summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--.cirrus.yml18
-rw-r--r--.gitattributes3
-rw-r--r--.github/ISSUE_TEMPLATE/Bug.yml108
-rw-r--r--.github/ISSUE_TEMPLATE/Code-Report.yml43
-rw-r--r--.github/ISSUE_TEMPLATE/Feature.yml50
-rw-r--r--.github/ISSUE_TEMPLATE/config.yml8
-rw-r--r--.github/errorfile209
-rw-r--r--.github/h2spec.config27
-rwxr-xr-x.github/matrix.py269
-rw-r--r--.github/vtest.json14
-rw-r--r--.github/workflows/aws-lc.yml66
-rw-r--r--.github/workflows/codespell.yml20
-rw-r--r--.github/workflows/compliance.yml57
-rw-r--r--.github/workflows/contrib.yml25
-rw-r--r--.github/workflows/coverity.yml49
-rw-r--r--.github/workflows/cross-zoo.yml110
-rw-r--r--.github/workflows/fedora-rawhide.yml58
-rw-r--r--.github/workflows/musl.yml62
-rw-r--r--.github/workflows/openssl-nodeprecated.yml33
-rw-r--r--.github/workflows/vtest.yml162
-rw-r--r--.github/workflows/windows.yml67
-rw-r--r--.gitignore59
-rw-r--r--.mailmap1
-rw-r--r--.travis.yml54
-rw-r--r--BRANCHES239
-rw-r--r--BSDmakefile10
-rw-r--r--CHANGELOG21931
-rw-r--r--CONTRIBUTING1020
-rw-r--r--INSTALL766
-rw-r--r--LICENSE37
-rw-r--r--MAINTAINERS152
-rw-r--r--Makefile1265
-rw-r--r--README22
-rw-r--r--SUBVERS2
-rw-r--r--VERDATE2
-rw-r--r--VERSION1
-rw-r--r--addons/51degrees/51d.c1179
-rw-r--r--addons/51degrees/dummy/cityhash/city.c4
-rw-r--r--addons/51degrees/dummy/pattern/51Degrees.c114
-rw-r--r--addons/51degrees/dummy/pattern/51Degrees.h147
-rw-r--r--addons/51degrees/dummy/threading.c4
-rw-r--r--addons/51degrees/dummy/trie/51Degrees.c89
-rw-r--r--addons/51degrees/dummy/trie/51Degrees.h112
-rw-r--r--addons/51degrees/dummy/v4hash/hash/fiftyone.h34
-rw-r--r--addons/51degrees/dummy/v4hash/hash/hash.c130
-rw-r--r--addons/51degrees/dummy/v4hash/hash/hash.h277
-rw-r--r--addons/deviceatlas/Makefile48
-rw-r--r--addons/deviceatlas/da.c501
-rw-r--r--addons/deviceatlas/dadwsch.c195
-rw-r--r--addons/deviceatlas/dummy/Makefile12
-rw-r--r--addons/deviceatlas/dummy/Os/daunix.c9
-rw-r--r--addons/deviceatlas/dummy/dac.c222
-rw-r--r--addons/deviceatlas/dummy/dac.h600
-rw-r--r--addons/deviceatlas/dummy/dadwcom.c1
-rw-r--r--addons/deviceatlas/dummy/dasch.c1
-rw-r--r--addons/deviceatlas/dummy/json.c1
-rw-r--r--addons/ot/AUTHORS1
-rw-r--r--addons/ot/MAINTAINERS1
-rw-r--r--addons/ot/Makefile73
-rw-r--r--addons/ot/README794
-rw-r--r--addons/ot/README-func298
-rw-r--r--addons/ot/README-pool25
-rw-r--r--addons/ot/include/cli.h50
-rw-r--r--addons/ot/include/conf.h228
-rw-r--r--addons/ot/include/config.h46
-rw-r--r--addons/ot/include/debug.h104
-rw-r--r--addons/ot/include/define.h107
-rw-r--r--addons/ot/include/event.h120
-rw-r--r--addons/ot/include/filter.h68
-rw-r--r--addons/ot/include/group.h61
-rw-r--r--addons/ot/include/http.h41
-rw-r--r--addons/ot/include/include.h66
-rw-r--r--addons/ot/include/opentracing.h86
-rw-r--r--addons/ot/include/parser.h172
-rw-r--r--addons/ot/include/pool.h39
-rw-r--r--addons/ot/include/scope.h126
-rw-r--r--addons/ot/include/util.h109
-rw-r--r--addons/ot/include/vars.h55
-rw-r--r--addons/ot/src/cli.c397
-rw-r--r--addons/ot/src/conf.c764
-rw-r--r--addons/ot/src/event.c338
-rw-r--r--addons/ot/src/filter.c1176
-rw-r--r--addons/ot/src/group.c354
-rw-r--r--addons/ot/src/http.c312
-rw-r--r--addons/ot/src/opentracing.c1067
-rw-r--r--addons/ot/src/parser.c1225
-rw-r--r--addons/ot/src/pool.c223
-rw-r--r--addons/ot/src/scope.c634
-rw-r--r--addons/ot/src/util.c815
-rw-r--r--addons/ot/src/vars.c834
-rw-r--r--addons/ot/test/README-speed-cmp111
-rw-r--r--addons/ot/test/README-speed-ctx111
-rw-r--r--addons/ot/test/README-speed-fe-be111
-rw-r--r--addons/ot/test/README-speed-sa111
-rw-r--r--addons/ot/test/be/cfg-dd.json5
-rw-r--r--addons/ot/test/be/cfg-jaeger.yml34
-rw-r--r--addons/ot/test/be/cfg-zipkin.json4
-rw-r--r--addons/ot/test/be/haproxy.cfg37
-rw-r--r--addons/ot/test/be/ot.cfg62
-rw-r--r--addons/ot/test/cmp/cfg-dd.json5
-rw-r--r--addons/ot/test/cmp/cfg-jaeger.yml34
-rw-r--r--addons/ot/test/cmp/cfg-zipkin.json4
-rw-r--r--addons/ot/test/cmp/haproxy.cfg36
-rw-r--r--addons/ot/test/cmp/ot.cfg83
-rw-r--r--addons/ot/test/ctx/cfg-dd.json5
-rw-r--r--addons/ot/test/ctx/cfg-jaeger.yml34
-rw-r--r--addons/ot/test/ctx/cfg-zipkin.json4
-rw-r--r--addons/ot/test/ctx/haproxy.cfg38
-rw-r--r--addons/ot/test/ctx/ot.cfg197
-rw-r--r--addons/ot/test/empty/cfg-dd.json5
-rw-r--r--addons/ot/test/empty/cfg-jaeger.yml34
-rw-r--r--addons/ot/test/empty/cfg-zipkin.json4
-rw-r--r--addons/ot/test/empty/haproxy.cfg30
-rw-r--r--addons/ot/test/empty/ot.cfg3
-rw-r--r--addons/ot/test/fe/cfg-dd.json5
-rw-r--r--addons/ot/test/fe/cfg-jaeger.yml34
-rw-r--r--addons/ot/test/fe/cfg-zipkin.json4
-rw-r--r--addons/ot/test/fe/haproxy.cfg37
-rw-r--r--addons/ot/test/fe/ot.cfg74
-rwxr-xr-xaddons/ot/test/func-stat.sh5
-rwxr-xr-xaddons/ot/test/get-opentracing-plugins.sh45
-rw-r--r--addons/ot/test/index.html1
-rwxr-xr-xaddons/ot/test/run-cmp.sh13
-rwxr-xr-xaddons/ot/test/run-ctx.sh13
-rwxr-xr-xaddons/ot/test/run-fe-be.sh47
-rwxr-xr-xaddons/ot/test/run-sa.sh13
-rw-r--r--addons/ot/test/sa/cfg-dd.json5
-rw-r--r--addons/ot/test/sa/cfg-jaeger.yml34
-rw-r--r--addons/ot/test/sa/cfg-zipkin.json4
-rw-r--r--addons/ot/test/sa/haproxy.cfg40
-rw-r--r--addons/ot/test/sa/ot.cfg160
-rwxr-xr-xaddons/ot/test/test-speed.sh117
-rw-r--r--addons/promex/README356
-rw-r--r--addons/promex/service-prometheus.c1655
-rw-r--r--addons/wurfl/dummy/Makefile13
-rw-r--r--addons/wurfl/dummy/dummy-wurfl.c126
-rw-r--r--addons/wurfl/dummy/wurfl/wurfl.h409
-rw-r--r--addons/wurfl/wurfl.c779
-rw-r--r--admin/acme.sh/LICENSE674
-rw-r--r--admin/acme.sh/README13
-rw-r--r--admin/acme.sh/haproxy.sh403
-rw-r--r--admin/dyncookie/dyncookie.c56
-rw-r--r--admin/halog/README4
-rw-r--r--admin/halog/fgets2.c267
-rw-r--r--admin/halog/halog.c1915
-rw-r--r--admin/iprange/Makefile13
-rw-r--r--admin/iprange/ip6range.c397
-rw-r--r--admin/iprange/iprange.c202
-rw-r--r--admin/netsnmp-perl/README111
-rw-r--r--admin/netsnmp-perl/cacti_data_query_haproxy_backends.xml750
-rw-r--r--admin/netsnmp-perl/cacti_data_query_haproxy_frontends.xml750
-rw-r--r--admin/netsnmp-perl/haproxy.pl249
-rw-r--r--admin/netsnmp-perl/haproxy_backend.xml83
-rw-r--r--admin/netsnmp-perl/haproxy_frontend.xml83
-rw-r--r--admin/netsnmp-perl/haproxy_socket.xml90
-rw-r--r--admin/release-estimator/README.md68
-rwxr-xr-xadmin/release-estimator/release-estimator.py429
-rw-r--r--admin/selinux/README18
-rw-r--r--admin/selinux/haproxy.fc6
-rw-r--r--admin/selinux/haproxy.if2
-rw-r--r--admin/selinux/haproxy.te66
-rw-r--r--admin/syntax-highlight/haproxy.vim164
-rw-r--r--admin/systemd/Makefile8
-rw-r--r--admin/systemd/haproxy.service.in37
-rw-r--r--admin/wireshark-dissectors/peers/Makefile17
-rw-r--r--admin/wireshark-dissectors/peers/README78
-rw-r--r--admin/wireshark-dissectors/peers/packet-happp.c1679
-rw-r--r--admin/wireshark-dissectors/peers/wireshark.happp.dissector.patch24
-rw-r--r--dev/base64/base64rev-gen.c70
-rw-r--r--dev/coccinelle/bug_on.cocci7
-rw-r--r--dev/coccinelle/cs_endp_flags.cocci76
-rw-r--r--dev/coccinelle/endp_flags.cocci76
-rw-r--r--dev/coccinelle/ha_free.cocci6
-rw-r--r--dev/coccinelle/ist.cocci86
-rw-r--r--dev/coccinelle/realloc_leak.cocci6
-rw-r--r--dev/coccinelle/strcmp.cocci309
-rw-r--r--dev/coccinelle/xalloc_cast.cocci11
-rw-r--r--dev/coccinelle/xalloc_size.cocci41
-rw-r--r--dev/flags/README12
-rw-r--r--dev/flags/flags.c157
-rwxr-xr-xdev/flags/show-fd-to-flags.sh2
-rwxr-xr-xdev/flags/show-sess-to-flags.sh209
-rwxr-xr-xdev/h2/mkhdr.sh151
-rw-r--r--dev/haring/README9
-rw-r--r--dev/haring/haring.c266
-rw-r--r--dev/hpack/README4
-rw-r--r--dev/hpack/decode.c215
-rw-r--r--dev/hpack/gen-enc.c205
-rw-r--r--dev/hpack/gen-rht.c369
-rw-r--r--dev/plug_qdisc/README59
-rw-r--r--dev/plug_qdisc/plug_qdisc.c86
-rw-r--r--dev/poll/Makefile13
-rw-r--r--dev/poll/poll.c445
-rw-r--r--dev/qpack/decode.c171
-rw-r--r--dev/sslkeylogger/sslkeylogger.lua47
-rw-r--r--dev/tcploop/Makefile13
-rw-r--r--dev/tcploop/tcploop.c1055
-rwxr-xr-xdev/trace/trace.awk78
-rw-r--r--dev/udp/udp-perturb.c527
-rw-r--r--doc/51Degrees-device-detection.txt174
-rw-r--r--doc/DeviceAtlas-device-detection.txt82
-rw-r--r--doc/SOCKS4.protocol.txt1
-rw-r--r--doc/SPOE.txt1255
-rw-r--r--doc/WURFL-device-detection.txt71
-rw-r--r--doc/acl.fig229
-rw-r--r--doc/architecture.txt1448
-rw-r--r--doc/coding-style.txt1566
-rw-r--r--doc/configuration.txt26732
-rw-r--r--doc/cookie-options.txt25
-rw-r--r--doc/design-thoughts/binding-possibilities.txt167
-rw-r--r--doc/design-thoughts/connection-reuse.txt224
-rw-r--r--doc/design-thoughts/http_load_time.url5
-rw-r--r--doc/design-thoughts/pool-debugging.txt243
-rw-r--r--doc/design-thoughts/thread-group.txt655
-rw-r--r--doc/gpl.txt340
-rw-r--r--doc/haproxy.1227
-rw-r--r--doc/internals/acl.txt82
-rw-r--r--doc/internals/api/appctx.txt142
-rw-r--r--doc/internals/api/buffer-api.txt653
-rw-r--r--doc/internals/api/event_hdl.txt1015
-rw-r--r--doc/internals/api/filters.txt1188
-rw-r--r--doc/internals/api/htx-api.txt570
-rw-r--r--doc/internals/api/initcalls.txt366
-rw-r--r--doc/internals/api/ist.txt167
-rw-r--r--doc/internals/api/layers.txt190
-rw-r--r--doc/internals/api/list.txt195
-rw-r--r--doc/internals/api/pools.txt585
-rw-r--r--doc/internals/api/scheduler.txt228
-rw-r--r--doc/internals/body-parsing.txt165
-rw-r--r--doc/internals/connect-status.txt28
-rw-r--r--doc/internals/connection-header.txt196
-rw-r--r--doc/internals/connection-scale.txt44
-rw-r--r--doc/internals/fd-migration.txt138
-rw-r--r--doc/internals/hashing.txt83
-rw-r--r--doc/internals/list.fig599
-rw-r--r--doc/internals/list.pngbin0 -> 33618 bytes
-rw-r--r--doc/internals/listener-states.fig150
-rw-r--r--doc/internals/listener-states.pngbin0 -> 36142 bytes
-rw-r--r--doc/internals/lua_socket.fig113
-rw-r--r--doc/internals/lua_socket.pdfbin0 -> 14969 bytes
-rw-r--r--doc/internals/muxes.fig401
-rw-r--r--doc/internals/muxes.pdfbin0 -> 12984 bytes
-rw-r--r--doc/internals/muxes.pngbin0 -> 32209 bytes
-rw-r--r--doc/internals/muxes.svg911
-rw-r--r--doc/internals/notes-layers.txt330
-rw-r--r--doc/internals/notes-poll-connect.txt93
-rw-r--r--doc/internals/notes-pollhup.txt281
-rw-r--r--doc/internals/notes-polling.txt192
-rw-r--r--doc/internals/pattern.diabin0 -> 5631 bytes
-rw-r--r--doc/internals/pattern.pdfbin0 -> 37269 bytes
-rw-r--r--doc/internals/polling-states.fig59
-rw-r--r--doc/internals/sched.fig748
-rw-r--r--doc/internals/sched.pdfbin0 -> 25334 bytes
-rw-r--r--doc/internals/sched.pngbin0 -> 150457 bytes
-rw-r--r--doc/internals/sched.svg1204
-rw-r--r--doc/internals/ssl_cert.diabin0 -> 6700 bytes
-rw-r--r--doc/internals/stats-v2.txt8
-rw-r--r--doc/internals/stconn-close.txt74
-rw-r--r--doc/internals/stream-sock-states.fig535
-rw-r--r--doc/intro.txt1700
-rw-r--r--doc/lgpl.txt504
-rw-r--r--doc/linux-syn-cookies.txt106
-rw-r--r--doc/lua-api/Makefile153
-rw-r--r--doc/lua-api/_static/channel.fig55
-rw-r--r--doc/lua-api/_static/channel.pngbin0 -> 18457 bytes
-rw-r--r--doc/lua-api/conf.py242
-rw-r--r--doc/lua-api/index.rst4491
-rw-r--r--doc/lua.txt972
-rw-r--r--doc/management.txt4521
-rw-r--r--doc/netscaler-client-ip-insertion-protocol.txt55
-rw-r--r--doc/network-namespaces.txt106
-rw-r--r--doc/peers-v2.0.txt294
-rw-r--r--doc/peers.txt491
-rw-r--r--doc/proxy-protocol.txt1051
-rw-r--r--doc/queuing.fig192
-rw-r--r--doc/regression-testing.txt706
-rw-r--r--doc/seamless_reload.txt62
-rw-r--r--examples/basic-config-edge.cfg131
-rw-r--r--examples/content-sw-sample.cfg65
-rw-r--r--examples/errorfiles/400.http9
-rw-r--r--examples/errorfiles/403.http9
-rw-r--r--examples/errorfiles/408.http9
-rw-r--r--examples/errorfiles/500.http9
-rw-r--r--examples/errorfiles/502.http9
-rw-r--r--examples/errorfiles/503.http9
-rw-r--r--examples/errorfiles/504.http9
-rw-r--r--examples/errorfiles/README9
-rw-r--r--examples/haproxy.init137
-rw-r--r--examples/lua/README7
-rw-r--r--examples/lua/event_handler.lua28
-rw-r--r--examples/lua/mailers.lua426
-rw-r--r--examples/option-http_proxy.cfg54
-rw-r--r--examples/quick-test.cfg29
-rw-r--r--examples/socks4.cfg55
-rw-r--r--examples/transparent_proxy.cfg55
-rw-r--r--examples/wurfl-example.cfg41
-rw-r--r--include/haproxy/acl-t.h160
-rw-r--r--include/haproxy/acl.h157
-rw-r--r--include/haproxy/action-t.h217
-rw-r--r--include/haproxy/action.h124
-rw-r--r--include/haproxy/activity-t.h144
-rw-r--r--include/haproxy/activity.h47
-rw-r--r--include/haproxy/api-t.h40
-rw-r--r--include/haproxy/api.h38
-rw-r--r--include/haproxy/applet-t.h101
-rw-r--r--include/haproxy/applet.h270
-rw-r--r--include/haproxy/arg-t.h152
-rw-r--r--include/haproxy/arg.h94
-rw-r--r--include/haproxy/atomic.h897
-rw-r--r--include/haproxy/auth-t.h57
-rw-r--r--include/haproxy/auth.h40
-rw-r--r--include/haproxy/backend-t.h191
-rw-r--r--include/haproxy/backend.h158
-rw-r--r--include/haproxy/base64.h28
-rw-r--r--include/haproxy/buf-t.h62
-rw-r--r--include/haproxy/buf.h1161
-rw-r--r--include/haproxy/bug.h479
-rw-r--r--include/haproxy/capture-t.h43
-rw-r--r--include/haproxy/capture.h37
-rw-r--r--include/haproxy/cbuf-t.h45
-rw-r--r--include/haproxy/cbuf.h136
-rw-r--r--include/haproxy/cfgcond-t.h105
-rw-r--r--include/haproxy/cfgcond.h43
-rw-r--r--include/haproxy/cfgdiag.h11
-rw-r--r--include/haproxy/cfgparse.h149
-rw-r--r--include/haproxy/channel-t.h314
-rw-r--r--include/haproxy/channel.h1021
-rw-r--r--include/haproxy/check-t.h198
-rw-r--r--include/haproxy/check.h131
-rw-r--r--include/haproxy/chunk.h303
-rw-r--r--include/haproxy/cli-t.h100
-rw-r--r--include/haproxy/cli.h138
-rw-r--r--include/haproxy/clock.h59
-rw-r--r--include/haproxy/compat.h313
-rw-r--r--include/haproxy/compiler.h469
-rw-r--r--include/haproxy/compression-t.h109
-rw-r--r--include/haproxy/compression.h44
-rw-r--r--include/haproxy/connection-t.h722
-rw-r--r--include/haproxy/connection.h762
-rw-r--r--include/haproxy/counters-t.h128
-rw-r--r--include/haproxy/cpuset-t.h54
-rw-r--r--include/haproxy/cpuset.h76
-rw-r--r--include/haproxy/debug.h39
-rw-r--r--include/haproxy/defaults.h533
-rw-r--r--include/haproxy/dgram-t.h53
-rw-r--r--include/haproxy/dgram.h29
-rw-r--r--include/haproxy/dict-t.h46
-rw-r--r--include/haproxy/dict.h36
-rw-r--r--include/haproxy/dns-t.h179
-rw-r--r--include/haproxy/dns.h33
-rw-r--r--include/haproxy/dynbuf-t.h41
-rw-r--r--include/haproxy/dynbuf.h131
-rw-r--r--include/haproxy/errors.h139
-rw-r--r--include/haproxy/event_hdl-t.h295
-rw-r--r--include/haproxy/event_hdl.h512
-rw-r--r--include/haproxy/extcheck.h49
-rw-r--r--include/haproxy/fcgi-app-t.h123
-rw-r--r--include/haproxy/fcgi-app.h42
-rw-r--r--include/haproxy/fcgi.h133
-rw-r--r--include/haproxy/fd-t.h251
-rw-r--r--include/haproxy/fd.h542
-rw-r--r--include/haproxy/filters-t.h258
-rw-r--r--include/haproxy/filters.h187
-rw-r--r--include/haproxy/fix-t.h70
-rw-r--r--include/haproxy/fix.h97
-rw-r--r--include/haproxy/flt_http_comp.h28
-rw-r--r--include/haproxy/freq_ctr-t.h45
-rw-r--r--include/haproxy/freq_ctr.h402
-rw-r--r--include/haproxy/frontend.h38
-rw-r--r--include/haproxy/global-t.h251
-rw-r--r--include/haproxy/global.h98
-rw-r--r--include/haproxy/h1.h377
-rw-r--r--include/haproxy/h1_htx.h76
-rw-r--r--include/haproxy/h2.h351
-rw-r--r--include/haproxy/h3.h118
-rw-r--r--include/haproxy/h3_stats-t.h12
-rw-r--r--include/haproxy/h3_stats.h17
-rw-r--r--include/haproxy/hash.h33
-rw-r--r--include/haproxy/hlua-t.h243
-rw-r--r--include/haproxy/hlua.h81
-rw-r--r--include/haproxy/hlua_fcn.h41
-rw-r--r--include/haproxy/hpack-dec.h39
-rw-r--r--include/haproxy/hpack-enc.h261
-rw-r--r--include/haproxy/hpack-huff.h35
-rw-r--r--include/haproxy/hpack-tbl-t.h143
-rw-r--r--include/haproxy/hpack-tbl.h184
-rw-r--r--include/haproxy/hq_interop.h6
-rw-r--r--include/haproxy/http-hdr-t.h41
-rw-r--r--include/haproxy/http-hdr.h60
-rw-r--r--include/haproxy/http-t.h184
-rw-r--r--include/haproxy/http.h222
-rw-r--r--include/haproxy/http_ana-t.h264
-rw-r--r--include/haproxy/http_ana.h91
-rw-r--r--include/haproxy/http_client-t.h69
-rw-r--r--include/haproxy/http_client.h40
-rw-r--r--include/haproxy/http_ext-t.h149
-rw-r--r--include/haproxy/http_ext.h58
-rw-r--r--include/haproxy/http_fetch.h41
-rw-r--r--include/haproxy/http_htx-t.h95
-rw-r--r--include/haproxy/http_htx.h84
-rw-r--r--include/haproxy/http_rules.h56
-rw-r--r--include/haproxy/htx-t.h277
-rw-r--r--include/haproxy/htx.h885
-rw-r--r--include/haproxy/init-t.h64
-rw-r--r--include/haproxy/init.h79
-rw-r--r--include/haproxy/initcall.h257
-rw-r--r--include/haproxy/intops.h495
-rw-r--r--include/haproxy/istbuf.h162
-rw-r--r--include/haproxy/jwt-t.h86
-rw-r--r--include/haproxy/jwt.h37
-rw-r--r--include/haproxy/lb_chash-t.h40
-rw-r--r--include/haproxy/lb_chash.h41
-rw-r--r--include/haproxy/lb_fas-t.h39
-rw-r--r--include/haproxy/lb_fas.h40
-rw-r--r--include/haproxy/lb_fwlc-t.h39
-rw-r--r--include/haproxy/lb_fwlc.h40
-rw-r--r--include/haproxy/lb_fwrr-t.h50
-rw-r--r--include/haproxy/lb_fwrr.h40
-rw-r--r--include/haproxy/lb_map-t.h40
-rw-r--r--include/haproxy/lb_map.h41
-rw-r--r--include/haproxy/linuxcap.h7
-rw-r--r--include/haproxy/list-t.h73
-rw-r--r--include/haproxy/list.h907
-rw-r--r--include/haproxy/listener-t.h317
-rw-r--r--include/haproxy/listener.h246
-rw-r--r--include/haproxy/log-t.h277
-rw-r--r--include/haproxy/log.h195
-rw-r--r--include/haproxy/mailers-t.h83
-rw-r--r--include/haproxy/mailers.h42
-rw-r--r--include/haproxy/map-t.h34
-rw-r--r--include/haproxy/map.h39
-rw-r--r--include/haproxy/mqtt-t.h310
-rw-r--r--include/haproxy/mqtt.h118
-rw-r--r--include/haproxy/mux_fcgi-t.h175
-rw-r--r--include/haproxy/mux_h1-t.h160
-rw-r--r--include/haproxy/mux_h2-t.h222
-rw-r--r--include/haproxy/mux_quic-t.h204
-rw-r--r--include/haproxy/mux_quic.h116
-rw-r--r--include/haproxy/mworker-t.h51
-rw-r--r--include/haproxy/mworker.h48
-rw-r--r--include/haproxy/namespace-t.h39
-rw-r--r--include/haproxy/namespace.h47
-rw-r--r--include/haproxy/ncbuf-t.h104
-rw-r--r--include/haproxy/ncbuf.h54
-rw-r--r--include/haproxy/net_helper.h387
-rw-r--r--include/haproxy/obj_type-t.h56
-rw-r--r--include/haproxy/obj_type.h213
-rw-r--r--include/haproxy/openssl-compat.h487
-rw-r--r--include/haproxy/pattern-t.h235
-rw-r--r--include/haproxy/pattern.h273
-rw-r--r--include/haproxy/payload.h39
-rw-r--r--include/haproxy/peers-t.h160
-rw-r--r--include/haproxy/peers.h69
-rw-r--r--include/haproxy/pipe-t.h43
-rw-r--r--include/haproxy/pipe.h54
-rw-r--r--include/haproxy/pool-os.h109
-rw-r--r--include/haproxy/pool-t.h149
-rw-r--r--include/haproxy/pool.h368
-rw-r--r--include/haproxy/port_range-t.h40
-rw-r--r--include/haproxy/port_range.h105
-rw-r--r--include/haproxy/proto_quic.h35
-rw-r--r--include/haproxy/proto_rhttp-t.h14
-rw-r--r--include/haproxy/proto_rhttp.h21
-rw-r--r--include/haproxy/proto_sockpair.h32
-rw-r--r--include/haproxy/proto_tcp.h45
-rw-r--r--include/haproxy/proto_udp.h41
-rw-r--r--include/haproxy/proto_uxst.h34
-rw-r--r--include/haproxy/protobuf-t.h87
-rw-r--r--include/haproxy/protobuf.h577
-rw-r--r--include/haproxy/protocol-t.h148
-rw-r--r--include/haproxy/protocol.h111
-rw-r--r--include/haproxy/proxy-t.h547
-rw-r--r--include/haproxy/proxy.h264
-rw-r--r--include/haproxy/qmux_http.h17
-rw-r--r--include/haproxy/qmux_trace.h73
-rw-r--r--include/haproxy/qpack-dec.h51
-rw-r--r--include/haproxy/qpack-enc.h12
-rw-r--r--include/haproxy/qpack-t.h47
-rw-r--r--include/haproxy/qpack-tbl-t.h65
-rw-r--r--include/haproxy/qpack-tbl.h170
-rw-r--r--include/haproxy/queue-t.h59
-rw-r--r--include/haproxy/queue.h134
-rw-r--r--include/haproxy/quic_ack-t.h43
-rw-r--r--include/haproxy/quic_ack.h23
-rw-r--r--include/haproxy/quic_cc-t.h123
-rw-r--r--include/haproxy/quic_cc.h112
-rw-r--r--include/haproxy/quic_cid-t.h38
-rw-r--r--include/haproxy/quic_cid.h110
-rw-r--r--include/haproxy/quic_cli-t.h18
-rw-r--r--include/haproxy/quic_conn-t.h446
-rw-r--r--include/haproxy/quic_conn.h201
-rw-r--r--include/haproxy/quic_enc.h275
-rw-r--r--include/haproxy/quic_frame-t.h309
-rw-r--r--include/haproxy/quic_frame.h281
-rw-r--r--include/haproxy/quic_loss-t.h62
-rw-r--r--include/haproxy/quic_loss.h92
-rw-r--r--include/haproxy/quic_openssl_compat-t.h64
-rw-r--r--include/haproxy/quic_openssl_compat.h33
-rw-r--r--include/haproxy/quic_retransmit.h20
-rw-r--r--include/haproxy/quic_retry.h33
-rw-r--r--include/haproxy/quic_rx-t.h54
-rw-r--r--include/haproxy/quic_rx.h58
-rw-r--r--include/haproxy/quic_sock-t.h50
-rw-r--r--include/haproxy/quic_sock.h107
-rw-r--r--include/haproxy/quic_ssl-t.h21
-rw-r--r--include/haproxy/quic_ssl.h55
-rw-r--r--include/haproxy/quic_stats-t.h105
-rw-r--r--include/haproxy/quic_stats.h14
-rw-r--r--include/haproxy/quic_stream-t.h48
-rw-r--r--include/haproxy/quic_stream.h23
-rw-r--r--include/haproxy/quic_tls-t.h283
-rw-r--r--include/haproxy/quic_tls.h1116
-rw-r--r--include/haproxy/quic_tp-t.h118
-rw-r--r--include/haproxy/quic_tp.h124
-rw-r--r--include/haproxy/quic_trace-t.h103
-rw-r--r--include/haproxy/quic_trace.h40
-rw-r--r--include/haproxy/quic_tx-t.h56
-rw-r--r--include/haproxy/quic_tx.h92
-rw-r--r--include/haproxy/receiver-t.h106
-rw-r--r--include/haproxy/regex-t.h78
-rw-r--r--include/haproxy/regex.h144
-rw-r--r--include/haproxy/resolvers-t.h297
-rw-r--r--include/haproxy/resolvers.h66
-rw-r--r--include/haproxy/ring-t.h113
-rw-r--r--include/haproxy/ring.h53
-rw-r--r--include/haproxy/sample-t.h315
-rw-r--r--include/haproxy/sample.h186
-rw-r--r--include/haproxy/sample_data-t.h51
-rw-r--r--include/haproxy/sc_strm.h447
-rw-r--r--include/haproxy/server-t.h681
-rw-r--r--include/haproxy/server.h328
-rw-r--r--include/haproxy/session-t.h78
-rw-r--r--include/haproxy/session.h335
-rw-r--r--include/haproxy/shctx-t.h63
-rw-r--r--include/haproxy/shctx.h80
-rw-r--r--include/haproxy/show_flags-t.h99
-rw-r--r--include/haproxy/signal-t.h66
-rw-r--r--include/haproxy/signal.h52
-rw-r--r--include/haproxy/sink-t.h76
-rw-r--r--include/haproxy/sink.h97
-rw-r--r--include/haproxy/sock-t.h37
-rw-r--r--include/haproxy/sock.h62
-rw-r--r--include/haproxy/sock_inet.h49
-rw-r--r--include/haproxy/sock_unix.h36
-rw-r--r--include/haproxy/spoe-t.h413
-rw-r--r--include/haproxy/spoe.h351
-rw-r--r--include/haproxy/ssl_ckch-t.h161
-rw-r--r--include/haproxy/ssl_ckch.h75
-rw-r--r--include/haproxy/ssl_crtlist-t.h63
-rw-r--r--include/haproxy/ssl_crtlist.h48
-rw-r--r--include/haproxy/ssl_ocsp-t.h94
-rw-r--r--include/haproxy/ssl_ocsp.h70
-rw-r--r--include/haproxy/ssl_sock-t.h323
-rw-r--r--include/haproxy/ssl_sock.h191
-rw-r--r--include/haproxy/ssl_utils.h51
-rw-r--r--include/haproxy/stats-t.h617
-rw-r--r--include/haproxy/stats.h145
-rw-r--r--include/haproxy/stconn-t.h325
-rw-r--r--include/haproxy/stconn.h557
-rw-r--r--include/haproxy/stick_table-t.h250
-rw-r--r--include/haproxy/stick_table.h404
-rw-r--r--include/haproxy/stream-t.h301
-rw-r--r--include/haproxy/stream.h404
-rw-r--r--include/haproxy/task-t.h182
-rw-r--r--include/haproxy/task.h857
-rw-r--r--include/haproxy/tcp_rules.h52
-rw-r--r--include/haproxy/tcpcheck-t.h242
-rw-r--r--include/haproxy/tcpcheck.h125
-rw-r--r--include/haproxy/thread-t.h165
-rw-r--r--include/haproxy/thread.h489
-rw-r--r--include/haproxy/ticks.h157
-rw-r--r--include/haproxy/time.h520
-rw-r--r--include/haproxy/timeshift.h10
-rw-r--r--include/haproxy/tinfo-t.h180
-rw-r--r--include/haproxy/tinfo.h120
-rw-r--r--include/haproxy/tools-t.h166
-rw-r--r--include/haproxy/tools.h1179
-rw-r--r--include/haproxy/trace-t.h179
-rw-r--r--include/haproxy/trace.h216
-rw-r--r--include/haproxy/uri_auth-t.h56
-rw-r--r--include/haproxy/uri_auth.h44
-rw-r--r--include/haproxy/uri_normalizer-t.h31
-rw-r--r--include/haproxy/uri_normalizer.h44
-rw-r--r--include/haproxy/vars-t.h71
-rw-r--r--include/haproxy/vars.h72
-rw-r--r--include/haproxy/version.h86
-rw-r--r--include/haproxy/xref-t.h45
-rw-r--r--include/haproxy/xref.h105
-rw-r--r--include/haproxy/xxhash.h52
-rw-r--r--include/import/atomic-ops.h1991
-rw-r--r--include/import/eb32sctree.h121
-rw-r--r--include/import/eb32tree.h482
-rw-r--r--include/import/eb64tree.h575
-rw-r--r--include/import/ebimtree.h324
-rw-r--r--include/import/ebistree.h329
-rw-r--r--include/import/ebmbtree.h850
-rw-r--r--include/import/ebpttree.h156
-rw-r--r--include/import/ebsttree.h324
-rw-r--r--include/import/ebtree-t.h217
-rw-r--r--include/import/ebtree.h857
-rw-r--r--include/import/ist.h957
-rw-r--r--include/import/lru.h75
-rw-r--r--include/import/mjson.h209
-rw-r--r--include/import/plock.h1422
-rw-r--r--include/import/sha1.h35
-rw-r--r--include/import/slz-tables.h257
-rw-r--r--include/import/slz.h200
-rw-r--r--include/import/xxhash.h6773
-rw-r--r--include/make/compiler.mk42
-rw-r--r--include/make/options.mk52
-rw-r--r--include/make/verbose.mk30
-rw-r--r--reg-tests/README71
-rw-r--r--reg-tests/balance/balance-rr.vtc73
-rw-r--r--reg-tests/balance/balance-uri-path-only.vtc97
-rw-r--r--reg-tests/balance/balance-uri.vtc73
-rw-r--r--reg-tests/cache/basic.vtc53
-rw-r--r--reg-tests/cache/caching_rules.vtc320
-rw-r--r--reg-tests/cache/expires.vtc127
-rw-r--r--reg-tests/cache/if-modified-since.vtc144
-rw-r--r--reg-tests/cache/if-none-match.vtc89
-rw-r--r--reg-tests/cache/post_on_entry.vtc65
-rw-r--r--reg-tests/cache/sample_fetches.vtc137
-rw-r--r--reg-tests/cache/vary.vtc461
-rw-r--r--reg-tests/cache/vary_accept_encoding.vtc333
-rw-r--r--reg-tests/checks/1be_40srv_odd_health_checks.vtc117
-rw-r--r--reg-tests/checks/40be_2srv_odd_health_checks.vtc645
-rw-r--r--reg-tests/checks/4be_1srv_health_checks.vtc201
-rw-r--r--reg-tests/checks/4be_1srv_smtpchk_httpchk_layer47errors.vtc100
-rw-r--r--reg-tests/checks/agent-check.vtc42
l---------reg-tests/checks/common.pem1
-rw-r--r--reg-tests/checks/http-check-expect.vtc64
-rw-r--r--reg-tests/checks/http-check-send.vtc165
-rw-r--r--reg-tests/checks/http-check.vtc157
-rw-r--r--reg-tests/checks/http-monitor-uri.vtc56
-rw-r--r--reg-tests/checks/ldap-check.vtc96
-rw-r--r--reg-tests/checks/mysql-check.vtc123
-rw-r--r--reg-tests/checks/pgsql-check.vtc93
-rw-r--r--reg-tests/checks/redis-check.vtc61
-rw-r--r--reg-tests/checks/smtp-check.vtc110
-rw-r--r--reg-tests/checks/spop-check.vtc94
-rw-r--r--reg-tests/checks/ssl-hello-check.vtc76
-rw-r--r--reg-tests/checks/tcp-check-ssl.vtc118
-rw-r--r--reg-tests/checks/tcp-check_min-recv.vtc68
-rw-r--r--reg-tests/checks/tcp-check_multiple_ports.vtc48
-rw-r--r--reg-tests/checks/tcp-checks-socks4.vtc60
-rw-r--r--reg-tests/checks/tls_health_checks.vtc120
-rw-r--r--reg-tests/compression/basic.vtc377
l---------reg-tests/compression/common.pem1
-rw-r--r--reg-tests/compression/etags_conversion.vtc230
-rw-r--r--reg-tests/compression/lua_validation.lua19
-rw-r--r--reg-tests/compression/lua_validation.vtc59
-rw-r--r--reg-tests/compression/vary.vtc308
l---------reg-tests/connection/ca-auth.crt1
-rw-r--r--reg-tests/connection/cli_src_dst.vtc290
l---------reg-tests/connection/client1.pem1
l---------reg-tests/connection/common.pem1
-rw-r--r--reg-tests/connection/dispatch.vtc42
-rw-r--r--reg-tests/connection/http_reuse_aggressive.vtc45
-rw-r--r--reg-tests/connection/http_reuse_always.vtc43
-rw-r--r--reg-tests/connection/http_reuse_be_transparent.vtc82
-rw-r--r--reg-tests/connection/http_reuse_conn_hash.vtc163
-rw-r--r--reg-tests/connection/http_reuse_dispatch.vtc79
-rw-r--r--reg-tests/connection/http_reuse_never.vtc79
-rw-r--r--reg-tests/connection/http_reuse_safe.vtc78
-rw-r--r--reg-tests/connection/proxy_protocol_random_fail.vtc59
-rw-r--r--reg-tests/connection/proxy_protocol_send_generic.vtc74
-rw-r--r--reg-tests/connection/proxy_protocol_send_unique_id.vtc42
-rw-r--r--reg-tests/connection/proxy_protocol_send_unique_id_alpn.vtc33
-rw-r--r--reg-tests/connection/proxy_protocol_tlv_validation.vtc142
-rw-r--r--reg-tests/connection/reverse_connect_full.vtc70
-rw-r--r--reg-tests/connection/reverse_server.vtc69
-rw-r--r--reg-tests/connection/reverse_server_name.vtc87
-rw-r--r--reg-tests/connection/tcp_to_http_upgrade.vtc169
-rw-r--r--reg-tests/contrib/prometheus.vtc113
-rw-r--r--reg-tests/converter/add_item.vtc50
-rw-r--r--reg-tests/converter/be2dec.vtc56
-rw-r--r--reg-tests/converter/be2hex.vtc60
-rw-r--r--reg-tests/converter/bytes.vtc156
-rw-r--r--reg-tests/converter/digest.vtc57
-rw-r--r--reg-tests/converter/field.vtc43
-rw-r--r--reg-tests/converter/fix.vtc235
-rw-r--r--reg-tests/converter/hmac.vtc55
-rw-r--r--reg-tests/converter/iif.vtc46
-rw-r--r--reg-tests/converter/json.vtc40
-rw-r--r--reg-tests/converter/json_query.vtc107
-rw-r--r--reg-tests/converter/mqtt.vtc238
-rw-r--r--reg-tests/converter/param.vtc80
-rw-r--r--reg-tests/converter/secure_memcmp.vtc143
-rw-r--r--reg-tests/converter/sha2.vtc57
-rw-r--r--reg-tests/converter/url_dec.vtc37
-rw-r--r--reg-tests/converter/url_enc.vtc43
-rw-r--r--reg-tests/converter/word.vtc43
-rw-r--r--reg-tests/filters/random-forwarding.vtc138
-rw-r--r--reg-tests/http-capture/multiple_headers.vtc91
-rw-r--r--reg-tests/http-cookies/cookie_insert_indirect.vtc54
-rw-r--r--reg-tests/http-cookies/h2_cookie_concat.vtc42
-rw-r--r--reg-tests/http-errorfiles/errorfiles.vtc51
-rw-r--r--reg-tests/http-errorfiles/errors/400-1.http9
-rw-r--r--reg-tests/http-errorfiles/errors/400-2.http9
-rw-r--r--reg-tests/http-errorfiles/errors/400-3.http9
-rw-r--r--reg-tests/http-errorfiles/errors/400.http9
-rw-r--r--reg-tests/http-errorfiles/errors/403-1.http9
-rw-r--r--reg-tests/http-errorfiles/errors/403-2.http9
-rw-r--r--reg-tests/http-errorfiles/errors/403.http9
-rw-r--r--reg-tests/http-errorfiles/errors/404-1.http9
-rw-r--r--reg-tests/http-errorfiles/errors/404-2.http9
-rw-r--r--reg-tests/http-errorfiles/errors/404-3.http9
-rw-r--r--reg-tests/http-errorfiles/errors/404.http9
-rw-r--r--reg-tests/http-errorfiles/errors/500-1.http9
-rw-r--r--reg-tests/http-errorfiles/errors/500.http9
-rw-r--r--reg-tests/http-errorfiles/errors/lf-403.txt1
-rw-r--r--reg-tests/http-errorfiles/http-error.vtc75
-rw-r--r--reg-tests/http-errorfiles/http_deny_errors.vtc77
-rw-r--r--reg-tests/http-errorfiles/http_errors.vtc134
-rw-r--r--reg-tests/http-errorfiles/http_return.vtc46
l---------reg-tests/http-messaging/common.pem1
-rw-r--r--reg-tests/http-messaging/h1_host_normalization.vtc762
-rw-r--r--reg-tests/http-messaging/h1_to_h1.vtc301
-rw-r--r--reg-tests/http-messaging/h2_desync_attacks.vtc167
-rw-r--r--reg-tests/http-messaging/h2_to_h1.vtc324
-rw-r--r--reg-tests/http-messaging/http_abortonclose.vtc226
-rw-r--r--reg-tests/http-messaging/http_bodyless_response.vtc128
-rw-r--r--reg-tests/http-messaging/http_bodyless_spliced_response.vtc89
-rw-r--r--reg-tests/http-messaging/http_msg_full_on_eom.vtc62
-rw-r--r--reg-tests/http-messaging/http_request_buffer.vtc135
-rw-r--r--reg-tests/http-messaging/http_splicing.vtc77
-rw-r--r--reg-tests/http-messaging/http_splicing_chunk.vtc74
-rw-r--r--reg-tests/http-messaging/http_transfer_encoding.vtc202
-rw-r--r--reg-tests/http-messaging/http_wait_for_body.vtc171
-rw-r--r--reg-tests/http-messaging/protocol_upgrade.vtc228
-rw-r--r--reg-tests/http-messaging/scheme_based_normalize.vtc125
-rw-r--r--reg-tests/http-messaging/srv_ws.vtc180
-rw-r--r--reg-tests/http-messaging/truncated.vtc101
-rw-r--r--reg-tests/http-messaging/websocket.vtc211
-rw-r--r--reg-tests/http-rules/1k.txt16
-rw-r--r--reg-tests/http-rules/acl_cli_spaces.vtc77
-rw-r--r--reg-tests/http-rules/agents.acl1
-rw-r--r--reg-tests/http-rules/converters_ipmask_concat_strcmp_field_word.map1
-rw-r--r--reg-tests/http-rules/converters_ipmask_concat_strcmp_field_word.vtc231
-rw-r--r--reg-tests/http-rules/default_rules.vtc159
-rw-r--r--reg-tests/http-rules/del_header.vtc93
-rw-r--r--reg-tests/http-rules/except-forwardfor-originalto.vtc143
-rw-r--r--reg-tests/http-rules/forwarded-header-7239.vtc171
-rw-r--r--reg-tests/http-rules/h1or2_to_h1c.vtc233
-rw-r--r--reg-tests/http-rules/http_after_response.vtc192
-rw-r--r--reg-tests/http-rules/http_return.vtc99
-rw-r--r--reg-tests/http-rules/ifnone-forwardfor.vtc98
-rw-r--r--reg-tests/http-rules/lf-file.txt1
-rw-r--r--reg-tests/http-rules/map_ordering.map4
-rw-r--r--reg-tests/http-rules/map_ordering.vtc32
-rw-r--r--reg-tests/http-rules/map_redirect-be.map4
-rw-r--r--reg-tests/http-rules/map_redirect.map5
-rw-r--r--reg-tests/http-rules/map_redirect.vtc200
-rw-r--r--reg-tests/http-rules/map_regm_with_backref.map1
-rw-r--r--reg-tests/http-rules/map_regm_with_backref.vtc73
-rw-r--r--reg-tests/http-rules/normalize_uri.vtc549
-rw-r--r--reg-tests/http-rules/path_and_pathq.vtc64
-rw-r--r--reg-tests/http-rules/restrict_req_hdr_names.vtc185
-rw-r--r--reg-tests/http-rules/strict_rw_mode.vtc164
-rw-r--r--reg-tests/http-set-timeout/set_timeout.vtc214
-rwxr-xr-xreg-tests/jwt/build_token.py22
-rw-r--r--reg-tests/jwt/es256-public.pem4
-rw-r--r--reg-tests/jwt/es384-public.pem5
-rw-r--r--reg-tests/jwt/es512-public.pem6
-rw-r--r--reg-tests/jwt/jws_verify.vtc418
-rw-r--r--reg-tests/jwt/rsa-public.pem14
-rw-r--r--reg-tests/log/last_rule.vtc165
-rw-r--r--reg-tests/log/load_balancing.vtc159
-rw-r--r--reg-tests/log/log_backend.vtc185
-rw-r--r--reg-tests/log/log_forward.vtc57
-rw-r--r--reg-tests/log/log_uri.vtc61
-rw-r--r--reg-tests/log/wrong_ip_port_logging.vtc62
-rw-r--r--reg-tests/lua/bad_http_clt_req_duration.lua8
-rw-r--r--reg-tests/lua/bad_http_clt_req_duration.vtc76
-rw-r--r--reg-tests/lua/close_wait_lf.lua1
-rw-r--r--reg-tests/lua/close_wait_lf.vtc53
l---------reg-tests/lua/common.pem1
-rw-r--r--reg-tests/lua/h_txn_get_priv.lua15
-rw-r--r--reg-tests/lua/h_txn_get_priv.vtc33
-rw-r--r--reg-tests/lua/httpclient_action.lua8
-rw-r--r--reg-tests/lua/httpclient_action.vtc39
-rw-r--r--reg-tests/lua/lua_httpclient.lua49
-rw-r--r--reg-tests/lua/lua_httpclient.vtc68
-rw-r--r--reg-tests/lua/lua_socket.lua44
-rw-r--r--reg-tests/lua/lua_socket.vtc33
-rw-r--r--reg-tests/lua/set_var.lua25
-rw-r--r--reg-tests/lua/set_var.vtc83
-rw-r--r--reg-tests/lua/txn_get_priv-print_r.lua96
-rw-r--r--reg-tests/lua/txn_get_priv-thread.vtc69
-rw-r--r--reg-tests/lua/txn_get_priv.lua180
-rw-r--r--reg-tests/lua/txn_get_priv.vtc35
-rw-r--r--reg-tests/lua/wrong_types_usage.lua3
-rw-r--r--reg-tests/lua/wrong_types_usage.vtc77
-rw-r--r--reg-tests/mailers/healthcheckmail.lua70
-rw-r--r--reg-tests/mailers/healthcheckmail.vtc60
l---------reg-tests/mailers/mailers.lua1
-rw-r--r--reg-tests/mcli/mcli_show_info.vtc27
-rw-r--r--reg-tests/mcli/mcli_start_progs.vtc36
-rw-r--r--reg-tests/peers/basic_sync.vtc120
-rw-r--r--reg-tests/peers/basic_sync_wo_stkt_backend.vtc115
l---------reg-tests/peers/common.pem1
-rw-r--r--reg-tests/peers/tls_basic_sync.vtc157
-rw-r--r--reg-tests/peers/tls_basic_sync_wo_stkt_backend.vtc151
-rw-r--r--reg-tests/pki/README23
-rw-r--r--reg-tests/pki/certificates/www.test1.com-csr.json15
-rw-r--r--reg-tests/pki/certificates/www.test1.com-key.pem27
-rw-r--r--reg-tests/pki/certificates/www.test1.com.csr17
-rw-r--r--reg-tests/pki/certificates/www.test1.com.pem23
-rw-r--r--reg-tests/pki/config.json27
-rw-r--r--reg-tests/pki/intermediate/intermediate-ca-key.pem27
-rw-r--r--reg-tests/pki/intermediate/intermediate-ca.csr17
-rw-r--r--reg-tests/pki/intermediate/intermediate-ca.pem22
-rw-r--r--reg-tests/pki/intermediate/intermediate-csr.json14
-rw-r--r--reg-tests/pki/root/root-ca-key.pem27
-rw-r--r--reg-tests/pki/root/root-ca.csr17
-rw-r--r--reg-tests/pki/root/root-ca.pem22
-rw-r--r--reg-tests/pki/root/root-csr.json17
-rw-r--r--reg-tests/sample_fetches/cond_set_var.vtc362
-rw-r--r--reg-tests/sample_fetches/cook.vtc132
-rw-r--r--reg-tests/sample_fetches/hashes.vtc101
-rw-r--r--reg-tests/sample_fetches/so_name.vtc22
-rw-r--r--reg-tests/sample_fetches/srv_name.vtc46
-rw-r--r--reg-tests/sample_fetches/tcpinfo_rtt.vtc39
-rw-r--r--reg-tests/sample_fetches/tlvs.vtc57
-rw-r--r--reg-tests/sample_fetches/ubase64.vtc57
-rw-r--r--reg-tests/sample_fetches/vars.vtc84
-rw-r--r--reg-tests/seamless-reload/abns_socket.vtc55
-rw-r--r--reg-tests/server/cli_add_check_server.vtc161
-rw-r--r--reg-tests/server/cli_add_server.vtc87
-rw-r--r--reg-tests/server/cli_add_ssl_server.vtc110
-rw-r--r--reg-tests/server/cli_add_track_server.vtc242
-rw-r--r--reg-tests/server/cli_delete_dynamic_server.vtc94
-rw-r--r--reg-tests/server/cli_delete_server.vtc60
-rw-r--r--reg-tests/server/cli_set_fqdn.vtc57
-rw-r--r--reg-tests/server/cli_set_ssl.vtc60
l---------reg-tests/server/common.pem1
-rw-r--r--reg-tests/server/get_srv_stats.lua11
-rw-r--r--reg-tests/spoe/wrong_init.vtc22
-rw-r--r--reg-tests/ssl/README2
-rw-r--r--reg-tests/ssl/add_ssl_crt-list.vtc114
l---------reg-tests/ssl/bug-2265.crt1
-rw-r--r--reg-tests/ssl/ca-auth.crt33
-rw-r--r--reg-tests/ssl/cert1-example.com.pem.ecdsa17
-rw-r--r--reg-tests/ssl/cert1-example.com.pem.rsa80
-rw-r--r--reg-tests/ssl/cert2-example.com.pem.ecdsa17
-rw-r--r--reg-tests/ssl/cert2-example.com.pem.rsa80
-rw-r--r--reg-tests/ssl/client.ecdsa.pem28
-rw-r--r--reg-tests/ssl/client1.pem187
-rw-r--r--reg-tests/ssl/client2_expired.pem81
-rw-r--r--reg-tests/ssl/client3_revoked.pem81
-rw-r--r--reg-tests/ssl/common.4096.dh13
-rw-r--r--reg-tests/ssl/common.crt90
-rw-r--r--reg-tests/ssl/common.key28
-rw-r--r--reg-tests/ssl/common.pem72
-rw-r--r--reg-tests/ssl/crl-auth.pem18
-rw-r--r--reg-tests/ssl/del_ssl_crt-list.vtc102
-rw-r--r--reg-tests/ssl/dynamic_server_ssl.vtc113
-rw-r--r--reg-tests/ssl/ecdsa.crt12
-rw-r--r--reg-tests/ssl/ecdsa.key6
-rw-r--r--reg-tests/ssl/ecdsa.pem17
-rw-r--r--reg-tests/ssl/filters.crt-list2
-rw-r--r--reg-tests/ssl/generate_certificates/gen_cert_ca.pem23
-rw-r--r--reg-tests/ssl/generate_certificates/gen_cert_server.pem18
-rw-r--r--reg-tests/ssl/interCA1_crl.pem27
-rw-r--r--reg-tests/ssl/interCA1_crl_empty.pem27
-rw-r--r--reg-tests/ssl/interCA2_crl.pem27
-rw-r--r--reg-tests/ssl/interCA2_crl_empty.pem27
-rw-r--r--reg-tests/ssl/localhost.crt-list5
-rw-r--r--reg-tests/ssl/log_forward_ssl.vtc60
-rw-r--r--reg-tests/ssl/new_del_ssl_cafile.vtc157
-rw-r--r--reg-tests/ssl/new_del_ssl_crlfile.vtc139
-rw-r--r--reg-tests/ssl/ocsp_auto_update.vtc718
-rw-r--r--reg-tests/ssl/ocsp_update/index.txt2
-rw-r--r--reg-tests/ssl/ocsp_update/multicert/server_ocsp.pem.ecdsa33
-rw-r--r--reg-tests/ssl/ocsp_update/multicert/server_ocsp.pem.ecdsa.issuer30
-rw-r--r--reg-tests/ssl/ocsp_update/multicert/server_ocsp.pem.ecdsa.ocspbin0 -> 2281 bytes
-rw-r--r--reg-tests/ssl/ocsp_update/multicert/server_ocsp.pem.rsa56
-rw-r--r--reg-tests/ssl/ocsp_update/multicert/server_ocsp.pem.rsa.issuer30
-rw-r--r--reg-tests/ssl/ocsp_update/multicert/server_ocsp.pem.rsa.ocspbin0 -> 2298 bytes
-rw-r--r--reg-tests/ssl/ocsp_update/multicert/server_ocsp_ecdsa.pem63
-rw-r--r--reg-tests/ssl/ocsp_update/multicert/server_ocsp_ecdsa.pem.ocspbin0 -> 2281 bytes
-rw-r--r--reg-tests/ssl/ocsp_update/multicert_both_certs.crt-list2
-rw-r--r--reg-tests/ssl/ocsp_update/multicert_ecdsa.crt-list1
-rw-r--r--reg-tests/ssl/ocsp_update/multicert_ecdsa_no_update.crt-list1
-rw-r--r--reg-tests/ssl/ocsp_update/multicert_no_ocsp/server_ocsp_ecdsa.pem63
-rw-r--r--reg-tests/ssl/ocsp_update/multicert_no_ocsp/server_ocsp_rsa.pem86
-rw-r--r--reg-tests/ssl/ocsp_update/multicert_rsa.crt-list1
-rw-r--r--reg-tests/ssl/ocsp_update/ocsp.haproxy.com.pem84
-rw-r--r--reg-tests/ssl/ocsp_update/ocsp_update_rootca.crt30
-rw-r--r--reg-tests/ssl/rootCA_crl.pem16
-rw-r--r--reg-tests/ssl/set_cafile_client.pem95
-rw-r--r--reg-tests/ssl/set_cafile_interCA1.crt24
-rw-r--r--reg-tests/ssl/set_cafile_interCA2.crt24
-rw-r--r--reg-tests/ssl/set_cafile_rootCA.crt30
-rw-r--r--reg-tests/ssl/set_cafile_server.pem95
-rw-r--r--reg-tests/ssl/set_default_cert.crt-list2
-rw-r--r--reg-tests/ssl/set_default_cert.pem52
-rw-r--r--reg-tests/ssl/set_ssl_bug_2265.vtc90
-rw-r--r--reg-tests/ssl/set_ssl_cafile.vtc167
-rw-r--r--reg-tests/ssl/set_ssl_cert.vtc206
-rw-r--r--reg-tests/ssl/set_ssl_cert_bundle.vtc111
-rw-r--r--reg-tests/ssl/set_ssl_cert_noext.vtc90
-rw-r--r--reg-tests/ssl/set_ssl_crlfile.vtc146
-rw-r--r--reg-tests/ssl/set_ssl_server_cert.vtc129
-rw-r--r--reg-tests/ssl/show_ocsp_server.pem119
-rw-r--r--reg-tests/ssl/show_ocsp_server.pem.issuer30
-rw-r--r--reg-tests/ssl/show_ocsp_server.pem.ocspbin0 -> 2281 bytes
-rw-r--r--reg-tests/ssl/show_ocsp_server.pem.ocsp.revokedbin0 -> 2298 bytes
-rw-r--r--reg-tests/ssl/show_ssl_ocspresponse.vtc144
-rw-r--r--reg-tests/ssl/simple.crt-list5
-rw-r--r--reg-tests/ssl/ssl_alpn.vtc212
-rw-r--r--reg-tests/ssl/ssl_client_auth.vtc76
-rw-r--r--reg-tests/ssl/ssl_client_samples.vtc74
-rw-r--r--reg-tests/ssl/ssl_crt-list_filters.vtc124
-rw-r--r--reg-tests/ssl/ssl_curve_name.vtc51
-rw-r--r--reg-tests/ssl/ssl_curves.vtc134
-rw-r--r--reg-tests/ssl/ssl_default_server.vtc142
-rw-r--r--reg-tests/ssl/ssl_dh.vtc234
-rw-r--r--reg-tests/ssl/ssl_errors.vtc439
-rw-r--r--reg-tests/ssl/ssl_frontend_samples.vtc69
-rw-r--r--reg-tests/ssl/ssl_generate_certificate.vtc168
-rw-r--r--reg-tests/ssl/ssl_reuse.vtc141
-rw-r--r--reg-tests/ssl/ssl_server_samples.vtc73
-rw-r--r--reg-tests/ssl/ssl_simple_crt-list.vtc50
-rw-r--r--reg-tests/ssl/wrong_ctx_storage.vtc45
-rw-r--r--reg-tests/startup/automatic_maxconn.vtc104
-rw-r--r--reg-tests/startup/check_condition.vtc32
-rw-r--r--reg-tests/startup/common.pem117
-rw-r--r--reg-tests/startup/default_rules.vtc185
-rw-r--r--reg-tests/stick-table/converteers_ref_cnt_never_dec.vtc75
-rw-r--r--reg-tests/stick-table/src_conn_rate.vtc43
-rw-r--r--reg-tests/stick-table/unknown_key.vtc32
-rw-r--r--reg-tests/stickiness/lb-services.vtc292
-rw-r--r--reg-tests/stickiness/srvkey-addr.vtc263
-rw-r--r--reg-tests/stream/unique-id-from-proxy.vtc38
-rw-r--r--reg-tests/stream/unique-id.vtc47
-rw-r--r--reg-tests/tcp-rules/default_rules.vtc61
-rw-r--r--reg-tests/webstats/missing-stats-fields.vtc14
-rw-r--r--reg-tests/webstats/webstats-scope-and-post-change.vtc84
-rwxr-xr-xscripts/announce-release279
-rwxr-xr-xscripts/backport146
-rwxr-xr-xscripts/build-ot.sh27
-rwxr-xr-xscripts/build-ssl.sh208
-rwxr-xr-xscripts/build-vtest.sh10
-rwxr-xr-xscripts/create-release237
-rwxr-xr-xscripts/git-show-backports336
-rwxr-xr-xscripts/make-releases-json103
-rwxr-xr-xscripts/publish-release194
-rwxr-xr-xscripts/run-regtests.sh427
-rw-r--r--src/acl.c1377
-rw-r--r--src/action.c363
-rw-r--r--src/activity.c1248
-rw-r--r--src/applet.c501
-rw-r--r--src/arg.c479
-rw-r--r--src/auth.c316
-rw-r--r--src/backend.c3401
-rw-r--r--src/base64.c303
-rw-r--r--src/cache.c3014
-rw-r--r--src/calltrace.c286
-rw-r--r--src/cbuf.c59
-rw-r--r--src/cfgcond.c559
-rw-r--r--src/cfgdiag.c97
-rw-r--r--src/cfgparse-global.c1396
-rw-r--r--src/cfgparse-listen.c3073
-rw-r--r--src/cfgparse-quic.c292
-rw-r--r--src/cfgparse-ssl.c2382
-rw-r--r--src/cfgparse-tcp.c275
-rw-r--r--src/cfgparse-unix.c135
-rw-r--r--src/cfgparse.c4798
-rw-r--r--src/channel.c591
-rw-r--r--src/check.c2642
-rw-r--r--src/chunk.c311
-rw-r--r--src/cli.c3423
-rw-r--r--src/clock.c460
-rw-r--r--src/compression.c742
-rw-r--r--src/connection.c2748
-rw-r--r--src/cpuset.c296
-rw-r--r--src/debug.c2301
-rw-r--r--src/dgram.c79
-rw-r--r--src/dict.c127
-rw-r--r--src/dns.c1330
-rw-r--r--src/dynbuf.c129
-rw-r--r--src/eb32sctree.c472
-rw-r--r--src/eb32tree.c218
-rw-r--r--src/eb64tree.c218
-rw-r--r--src/ebimtree.c44
-rw-r--r--src/ebistree.c42
-rw-r--r--src/ebmbtree.c77
-rw-r--r--src/ebpttree.c208
-rw-r--r--src/ebsttree.c42
-rw-r--r--src/ebtree.c50
-rw-r--r--src/errors.c567
-rw-r--r--src/ev_epoll.c413
-rw-r--r--src/ev_evports.c441
-rw-r--r--src/ev_kqueue.c380
-rw-r--r--src/ev_poll.c348
-rw-r--r--src/ev_select.c335
-rw-r--r--src/event_hdl.c999
-rw-r--r--src/extcheck.c694
-rw-r--r--src/fcgi-app.c1133
-rw-r--r--src/fcgi.c294
-rw-r--r--src/fd.c1348
-rw-r--r--src/filters.c1125
-rw-r--r--src/fix.c264
-rw-r--r--src/flt_bwlim.c976
-rw-r--r--src/flt_http_comp.c1076
-rw-r--r--src/flt_spoe.c4739
-rw-r--r--src/flt_trace.c675
-rw-r--r--src/freq_ctr.c218
-rw-r--r--src/frontend.c339
-rw-r--r--src/h1.c1319
-rw-r--r--src/h1_htx.c1074
-rw-r--r--src/h2.c814
-rw-r--r--src/h3.c2403
-rw-r--r--src/h3_stats.c276
-rw-r--r--src/haproxy.c3962
-rw-r--r--src/hash.c190
-rw-r--r--src/hlua.c13961
-rw-r--r--src/hlua_fcn.c2721
-rw-r--r--src/hpack-dec.c475
-rw-r--r--src/hpack-enc.c210
-rw-r--r--src/hpack-huff.c861
-rw-r--r--src/hpack-tbl.c372
-rw-r--r--src/hq_interop.c174
-rw-r--r--src/http.c1433
-rw-r--r--src/http_acl.c185
-rw-r--r--src/http_act.c2501
-rw-r--r--src/http_ana.c5153
-rw-r--r--src/http_client.c1598
-rw-r--r--src/http_conv.c453
-rw-r--r--src/http_ext.c1881
-rw-r--r--src/http_fetch.c2368
-rw-r--r--src/http_htx.c3028
-rw-r--r--src/http_rules.c530
-rw-r--r--src/htx.c1099
-rw-r--r--src/init.c249
-rw-r--r--src/jwt.c478
-rw-r--r--src/lb_chash.c517
-rw-r--r--src/lb_fas.c348
-rw-r--r--src/lb_fwlc.c375
-rw-r--r--src/lb_fwrr.c623
-rw-r--r--src/lb_map.c281
-rw-r--r--src/linuxcap.c191
-rw-r--r--src/listener.c2487
-rw-r--r--src/log.c4659
-rw-r--r--src/lru.c305
-rw-r--r--src/mailers.c329
-rw-r--r--src/map.c1232
-rw-r--r--src/mjson.c1048
-rw-r--r--src/mqtt.c1281
-rw-r--r--src/mux_fcgi.c4268
-rw-r--r--src/mux_h1.c5374
-rw-r--r--src/mux_h2.c7598
-rw-r--r--src/mux_pt.c904
-rw-r--r--src/mux_quic.c3067
-rw-r--r--src/mworker-prog.c359
-rw-r--r--src/mworker.c821
-rw-r--r--src/namespace.c132
-rw-r--r--src/ncbuf.c986
-rw-r--r--src/pattern.c2683
-rw-r--r--src/payload.c1448
-rw-r--r--src/peers.c4231
-rw-r--r--src/pipe.c136
-rw-r--r--src/pool.c1539
-rw-r--r--src/proto_quic.c799
-rw-r--r--src/proto_rhttp.c464
-rw-r--r--src/proto_sockpair.c589
-rw-r--r--src/proto_tcp.c834
-rw-r--r--src/proto_udp.c247
-rw-r--r--src/proto_uxdg.c159
-rw-r--r--src/proto_uxst.c372
-rw-r--r--src/protocol.c309
-rw-r--r--src/proxy.c3451
-rw-r--r--src/qmux_http.c108
-rw-r--r--src/qmux_trace.c114
-rw-r--r--src/qpack-dec.c563
-rw-r--r--src/qpack-enc.c185
-rw-r--r--src/qpack-tbl.c415
-rw-r--r--src/queue.c761
-rw-r--r--src/quic_ack.c258
-rw-r--r--src/quic_cc.c49
-rw-r--r--src/quic_cc_cubic.c542
-rw-r--r--src/quic_cc_newreno.c220
-rw-r--r--src/quic_cc_nocc.c76
-rw-r--r--src/quic_cid.c286
-rw-r--r--src/quic_cli.c413
-rw-r--r--src/quic_conn.c1893
-rw-r--r--src/quic_frame.c1273
-rw-r--r--src/quic_loss.c312
-rw-r--r--src/quic_openssl_compat.c531
-rw-r--r--src/quic_retransmit.c252
-rw-r--r--src/quic_retry.c320
-rw-r--r--src/quic_rx.c2290
-rw-r--r--src/quic_sock.c1080
-rw-r--r--src/quic_ssl.c790
-rw-r--r--src/quic_stats.c215
-rw-r--r--src/quic_stream.c294
-rw-r--r--src/quic_tls.c1095
-rw-r--r--src/quic_tp.c714
-rw-r--r--src/quic_trace.c633
-rw-r--r--src/quic_tx.c2348
-rw-r--r--src/raw_sock.c489
-rw-r--r--src/regex.c459
-rw-r--r--src/resolvers.c3813
-rw-r--r--src/ring.c482
-rw-r--r--src/sample.c5173
-rw-r--r--src/server.c6765
-rw-r--r--src/server_state.c947
-rw-r--r--src/session.c528
-rw-r--r--src/sha1.c308
-rw-r--r--src/shctx.c320
-rw-r--r--src/signal.c284
-rw-r--r--src/sink.c1406
-rw-r--r--src/slz.c1421
-rw-r--r--src/sock.c1072
-rw-r--r--src/sock_inet.c521
-rw-r--r--src/sock_unix.c387
-rw-r--r--src/ssl_ckch.c3968
-rw-r--r--src/ssl_crtlist.c1577
-rw-r--r--src/ssl_ocsp.c1986
-rw-r--r--src/ssl_sample.c2389
-rw-r--r--src/ssl_sock.c8100
-rw-r--r--src/ssl_utils.c702
-rw-r--r--src/stats.c5521
-rw-r--r--src/stconn.c2050
-rw-r--r--src/stick_table.c5658
-rw-r--r--src/stream.c4045
-rw-r--r--src/task.c979
-rw-r--r--src/tcp_act.c749
-rw-r--r--src/tcp_rules.c1428
-rw-r--r--src/tcp_sample.c641
-rw-r--r--src/tcpcheck.c5150
-rw-r--r--src/thread.c1864
-rw-r--r--src/time.c147
-rw-r--r--src/tools.c6348
-rw-r--r--src/trace.c997
-rw-r--r--src/uri_auth.c318
-rw-r--r--src/uri_normalizer.c467
-rw-r--r--src/vars.c1454
-rw-r--r--src/version.c28
-rw-r--r--src/wdt.c193
-rw-r--r--src/xprt_handshake.c299
-rw-r--r--src/xprt_quic.c175
-rw-r--r--tests/conf/basic-check.cfg34
-rw-r--r--tests/conf/ext-check.cfg26
-rw-r--r--tests/conf/ports.cfg74
-rw-r--r--tests/conf/setstatus.lua26
-rw-r--r--tests/conf/tcp-check.cfg74
-rw-r--r--tests/conf/test-acl-args.cfg36
-rw-r--r--tests/conf/test-address-syntax.cfg84
-rw-r--r--tests/conf/test-backlog.cfg22
-rw-r--r--tests/conf/test-check-expect.cfg87
-rw-r--r--tests/conf/test-connection.cfg34
-rw-r--r--tests/conf/test-cookie-indirect.cfg47
-rw-r--r--tests/conf/test-cookie-insert.cfg35
-rw-r--r--tests/conf/test-cookie-passive.cfg35
-rw-r--r--tests/conf/test-cookie-prefix.cfg35
-rw-r--r--tests/conf/test-cookie-rewrite.cfg35
-rw-r--r--tests/conf/test-disable-404.cfg61
-rw-r--r--tests/conf/test-fsm.cfg346
-rw-r--r--tests/conf/test-fwlc.cfg61
-rw-r--r--tests/conf/test-fwrr.cfg51
-rw-r--r--tests/conf/test-handshakes-chk.cfg148
-rw-r--r--tests/conf/test-handshakes.cfg135
-rw-r--r--tests/conf/test-http-send-name-hdr.cfg33
-rw-r--r--tests/conf/test-http-set-status-lua.cfg31
-rw-r--r--tests/conf/test-http-set-status.cfg32
-rw-r--r--tests/conf/test-inspect-smtp.cfg44
-rw-r--r--tests/conf/test-inspect-ssl.cfg37
-rw-r--r--tests/conf/test-map-ports.cfg31
-rw-r--r--tests/conf/test-param-hash.cfg23
-rw-r--r--tests/conf/test-pollers.cfg15
-rw-r--r--tests/conf/test-redirect.cfg49
-rw-r--r--tests/conf/test-sample-fetch-args.cfg36
-rw-r--r--tests/conf/test-sample-fetch-conv.cfg42
-rw-r--r--tests/conf/test-sql.cfg29
-rw-r--r--tests/conf/test-srv-verify.cfg57
-rw-r--r--tests/conf/test-stats.cfg5045
-rw-r--r--tests/conf/test-str2sa.cfg60
-rw-r--r--tests/conf/test-time.cfg24
-rw-r--r--tests/conf/test-timeout.cfg27
-rw-r--r--tests/conf/test-url-hash.cfg40
-rw-r--r--tests/conf/test-valid-names.cfg37
-rw-r--r--tests/exp/blocksig.c16
-rw-r--r--tests/exp/filltab25.c399
-rw-r--r--tests/exp/hash_results.txt218
-rw-r--r--tests/exp/hashing-results.txt314
-rw-r--r--tests/exp/io_limits.txt116
-rw-r--r--tests/exp/ip-hash.c202
-rw-r--r--tests/exp/test_hashes.c559
-rw-r--r--tests/exp/testinet.c28
-rw-r--r--tests/exp/uri_hash.c377
-rw-r--r--tests/unit/ist.c700
-rw-r--r--tests/unit/test-1-among.c105
-rw-r--r--tests/unit/test-arg.c44
-rw-r--r--tests/unit/test-inherited-fd.py23
-rw-r--r--tests/unit/test-list.c98
-rw-r--r--tests/unit/test-sockpair.py28
1198 files changed, 490391 insertions, 0 deletions
diff --git a/.cirrus.yml b/.cirrus.yml
new file mode 100644
index 0000000..4bf3fb6
--- /dev/null
+++ b/.cirrus.yml
@@ -0,0 +1,18 @@
+FreeBSD_task:
+ freebsd_instance:
+ matrix:
+ image_family: freebsd-13-2
+ only_if: $CIRRUS_BRANCH =~ 'master|next'
+ install_script:
+ - pkg update -f && pkg upgrade -y && pkg install -y openssl git gmake lua53 socat pcre
+ script:
+ - sudo sysctl kern.corefile=/tmp/%N.%P.core
+ - sudo sysctl kern.sugid_coredump=1
+ - scripts/build-vtest.sh
+ - gmake CC=clang V=1 ERR=1 TARGET=freebsd USE_ZLIB=1 USE_PCRE=1 USE_OPENSSL=1 USE_LUA=1 LUA_INC=/usr/local/include/lua53 LUA_LIB=/usr/local/lib LUA_LIB_NAME=lua-5.3
+ - ./haproxy -vv
+ - ldd haproxy
+ test_script:
+ - env VTEST_PROGRAM=../vtest/vtest gmake reg-tests REGTESTS_TYPES=default,bug,devel
+ on_failure:
+ debug_script: (for folder in /tmp/*regtest*/vtc.*; do cat $folder/INFO $folder/LOG; done && ls /tmp/haproxy.*.core && gdb -ex 'thread apply all bt full' ./haproxy /tmp/haproxy.*.core)
diff --git a/.gitattributes b/.gitattributes
new file mode 100644
index 0000000..4b56645
--- /dev/null
+++ b/.gitattributes
@@ -0,0 +1,3 @@
+SUBVERS export-subst
+VERDATE export-subst
+*.[ch] diff=cpp
diff --git a/.github/ISSUE_TEMPLATE/Bug.yml b/.github/ISSUE_TEMPLATE/Bug.yml
new file mode 100644
index 0000000..b56ecb7
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/Bug.yml
@@ -0,0 +1,108 @@
+name: Bug Report
+description: Report a problem with HAProxy to help us resolve it.
+labels:
+- 'type: bug'
+- 'status: needs-triage'
+body:
+- type: markdown
+ attributes:
+ value: |
+ ## Welcome!
+
+ You are about to *report a bug* you encountered in HAProxy. Please use the 'Feature Request' template if you want to propose a new feature instead.
+
+ This issue tracker is used to track actual bugs. Please use [the forum](https://discourse.haproxy.org/) or mailing list if you have a question, e.g. to get help with building a configuration to achieve your desired behavior.
+
+ The forum is at: https://discourse.haproxy.org/
+
+ The mailing list (no need to subscribe) is: haproxy@formilux.org
+ Subscribe to the list: haproxy+subscribe@formilux.org
+ Unsubscribe from the list: haproxy+unsubscribe@formilux.org
+
+ Forum and mailing list are correct places for questions about HAProxy or general suggestions and topics, e.g. usage or documentation questions! This issue tracker is for tracking bugs and feature requests directly relating to the development of the software itself.
+
+ Thanks for understanding, and for contributing to the project!
+- type: textarea
+ id: description
+ attributes:
+ label: Detailed Description of the Problem
+ description: |
+ In this section, please try to concentrate on observations. Only describe what you observed directly.
+ validations:
+ required: true
+- type: textarea
+ id: expected-behavior
+ attributes:
+ label: Expected Behavior
+ description: |
+ Explain why you consider the described behavior (above) to be wrong. What did you expect instead?
+
+ Most likely this is a mismatch between HAProxy's documentation and HAProxy's behavior.
+ validations:
+ required: true
+- type: textarea
+ id: steps
+ attributes:
+ label: Steps to Reproduce the Behavior
+ description: |
+ The more time you spend describing an easy way to reproduce the behavior (if this is possible), the easier it is for the project developers to fix it!
+ placeholder: |
+ 1.
+ 2.
+ 3.
+ validations:
+ required: true
+- type: textarea
+ id: possible-cause
+ attributes:
+ label: Do you have any idea what may have caused this?
+ description: |
+ Simply leave this empty if you do not.
+- type: textarea
+ id: possible-solution
+ attributes:
+ label: Do you have an idea how to solve the issue?
+ description: |
+ Simply leave this empty if you do not.
+- type: textarea
+ id: configuration
+ attributes:
+ label: What is your configuration?
+ description: |
+ - Include as much configuration as possible, including global and default sections.
+ - Replace confidential data like domain names and IP addresses.
+ render: haproxy
+ validations:
+ required: true
+- type: textarea
+ id: haproxy-vv
+ attributes:
+ label: 'Output of `haproxy -vv`'
+ description: |
+ Please run `haproxy -vv` (with two `v`) and paste the output into this field.
+
+ Please also include the output of `uname -a` if you use HAProxy 2.1 or older.
+ render: plain
+ validations:
+ required: true
+- type: textarea
+ id: last-output
+ attributes:
+ label: Last Outputs and Backtraces
+ description: |
+ If HAProxy crashed then please provide:
+
+ 1. The last output from your HAProxy logs (e.g. from journalctl or syslog).
+ 2. A backtrace from a coredump (`t a a bt full`).
+ render: plain
+- type: textarea
+ id: additional
+ attributes:
+ label: Additional Information
+ description: |
+ Any additional information about your environment that may be useful to know about. For example:
+
+ - Any local patches applied
+ - Environment specificities
+ - Unusual workload
+ - Interesting observations or coincidences with events on other components
diff --git a/.github/ISSUE_TEMPLATE/Code-Report.yml b/.github/ISSUE_TEMPLATE/Code-Report.yml
new file mode 100644
index 0000000..41d1dd6
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/Code-Report.yml
@@ -0,0 +1,43 @@
+name: Code Report
+description: File a Code Report (for example from coverity or valgrind)
+labels:
+- 'type: code-report'
+body:
+- type: markdown
+ attributes:
+ value: |
+ ## Welcome!
+
+ You are about to *report an issue found using an automated tool*. Please use the 'Bug Report' template if you encountered a regular bug.
+
+ Please use the forum or mailing list if you have a question, e.g. to get help with building a configuration to achieve your desired behavior.
+- type: input
+ id: tool
+ attributes:
+ label: Tool Name and Version
+ description: The name and version of the tool you used (e.g. valgrind-3.13.0, or Coverity)
+ validations:
+ required: true
+- type: textarea
+ id: code-report
+ attributes:
+ label: Code Report
+ description: Please provide the full output of the tool here.
+ render: plain
+ validations:
+ required: true
+- type: textarea
+ id: additional
+ attributes:
+ label: Additional Information
+ description: |
+ Any additional information about your environment (e.g. example configurations to trigger a memory leak).
+- type: textarea
+ id: haproxy-vv
+ attributes:
+ label: 'Output of `haproxy -vv`'
+ render: plain
+ description: |
+ Please add the output of `haproxy -vv` you are currently using here, this helps us later to see what has changed in HAProxy when we revisit this issue after some time.
+ validations:
+ required: true
diff --git a/.github/ISSUE_TEMPLATE/Feature.yml b/.github/ISSUE_TEMPLATE/Feature.yml
new file mode 100644
index 0000000..8515256
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/Feature.yml
@@ -0,0 +1,50 @@
+name: Feature Request
+description: Suggest a new feature or enhancement for HAProxy.
+labels:
+- 'type: feature'
+body:
+- type: markdown
+ attributes:
+ value: |
+ ## Welcome!
+
+ You are about to *request a feature* you are missing in HAProxy.
+
+ This issue tracker is used to track feature requests and bug reports. Please use [the forum](https://discourse.haproxy.org/) or mailing list if you have a question, e.g. to get help with building a configuration to achieve your desired behavior.
+
+ The forum is at: https://discourse.haproxy.org/
+
+ The mailing list (no need to subscribe) is: haproxy@formilux.org
+ Subscribe to the list: haproxy+subscribe@formilux.org
+ Unsubscribe from the list: haproxy+unsubscribe@formilux.org
+
+ Forum and mailing list are correct places for questions about HAProxy or general suggestions and topics, e.g. usage or documentation questions! This issue tracker is for tracking bugs and feature requests directly relating to the development of the software itself.
+
+ Thanks for understanding, and for contributing to the project!
+- type: textarea
+ id: feature-request
+ attributes:
+ label: Your Feature Request
+ description: |
+ What should HAProxy do differently? Which functionality do you think we should add? Please describe the feature you would like us to add here.
+ validations:
+ required: true
+- type: textarea
+ id: feature-explanation
+ attributes:
+ label: What are you trying to do?
+ description: |
+ This section should contain a brief description what you're trying to do, which would be possible after implementing the new feature.
+ validations:
+ required: true
+- type: textarea
+ id: haproxy-vv
+ attributes:
+ label: 'Output of `haproxy -vv`'
+ render: plain
+ description: >
+ Please add the output of `haproxy -vv` you are currently using here, this helps us later to see what has changed in HAProxy when we revisit this issue after some time.
+
+ Please *do not* enter "future version" or something similar. We are interested in the version you are *currently using*.
+ validations:
+ required: true
diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml
new file mode 100644
index 0000000..4c1ddc6
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/config.yml
@@ -0,0 +1,8 @@
+blank_issues_enabled: true
+contact_links:
+ - name: HAProxy Mailing List
+ url: https://www.mail-archive.com/haproxy@formilux.org/
+ about: Questions are best directed to HAProxy Mailing list or the HAProxy Forum.
+ - name: HAProxy Forum
+ url: https://discourse.haproxy.org/
+ about: Questions are best directed to HAProxy Mailing list or the HAProxy Forum.
diff --git a/.github/errorfile b/.github/errorfile
new file mode 100644
index 0000000..f15d8e0
--- /dev/null
+++ b/.github/errorfile
@@ -0,0 +1,209 @@
+HTTP/1.0 200 OK
+Cache-Control: no-cache
+Connection: close
+Content-Type: text/html
+
+<html>
+<body>
+b1z6lx9BLl3rSuonLqkIJAn9k9Hsah5qGfx9aq3qWSw6Nn37AZBJ1lxI0UyI7zvjXIEjSEVdCS4U
+k6rTW/LndurrbPieC6OcEBPMjzGtsfpR9IsZ3QH6/mtBGnvAtbhxAfhMZ/QQXkqfv0JPjuLdXBdM
+Z9cInHOr4ykoETgRbHaNt9ykBv32nIKt81YxLtTOMyyFAzH/AVSHUs6PanUKhxG11Csqn5RnlvSj
+PBCaF0lJAGvndF/1PTSIhzjEtXR3ZUzCfO03/j0q0/4+cduV5jf3XNFeICjY19OHKSMIWVN0XVht
+bY2eSMG0LoL8TqWyv6VSnclsVM5S5LJe7prJtWFobEpU3AMZzzjMPsxDiyMGJhjbJa0TnsDGAwln
+IVO5n56gtdUdhwWUnVn8ZYGZlFVjOQt++q6XL/Vhm+DFCArXZ6Xz8+mz1o109JpM28jHhxg6e7A1
+CF08n0mwN+adNFTi3Wg8D4RJOOQ90Q1bS/gmW7LtPjYxGuu8k27MjUspEHeeEr5rdAcBJbKiG8C9
+191DBDLxlJv/V4ZYG/FdGIqX/a2F7x03Uj7rsVnBPmOz7U0EbcHGcEEpZlSN9YLuUKvPXeZ8HWa9
+fbaGO39Yt+9DByPWC1Xyr65sPBH8eDURPdSDMh3Sr+16HH46anVI40tjK8NZC6jjFBQPfJBP6MVW
+HZF1F48rZXxesnHLHaMESCvwTruBf5R4JjYB1gt1Vv76e4Pew1MTK3/1ooNPV5kvoBV5PSLkMDqx
+XO6dxSC9Y3HzxhzkoRK56h7SWDbwxd5OmUHNvjm3k0QtVTAWsAEbJ5q/gp65ikG3+hGvp9xF80pU
+C1dAxK9+MHg7Ya3UiV6G/dB9prc3v92lEVqtK5CNKzlFiWQHSF3H+sz4qPGlB2Ub8H0T59TSZcyu
+oTFKi802BYc8UnPdUX+mf4Uda4Vad4dPE409UQ1XIEqI+2pCTgOCUm80xM2Hgyjpp8bi1mnv6rI1
+8jafv0e6S23Meb9d93E/MLm82KWfXHIjPkDFaTouGS78IJie3giG68U270AL1+gpUwNunW+Ez0Ch
+AwKUOM5BUL9pFfRMeDshy8KfiDGr7enLupqa2xe65Hbo47eioZZfIb0AD3P7yzlciIUXsy5JAwCF
+B+L0T+XuRUJuXCaJ+ioDmgW0PenJ6xfL/+BuJ9yVrMGYb0paL/cD7VSVDda1L7+VSbLW7sQ6BOHM
+ZgFV6+O81p48hGDquMb9+eURGrFKhQFipUPEi5sTQ7ocmyRZIAI3VeEOdBsX6zuwR9a2L5aV4yZc
+HoiQikOgAeF1O8FNoVBIKh6TvFIzG+JFxb64pfWiwku+njgQu/xXkhuDSnYh/tzzqwghzmKHpQQl
+7fbxF7jBihOr4qTcD/fUNPNGZYAZnZk/+wuA/6NOqwJl7nV2E7Ht7N13E6RZqGzfcL8KWldZbuFL
+cFUVZ7sxogftmAWhSrQ1Io4IcAqt19XL5uGFlAiELphh5v+3mWKVNh5kOaCIDcoOggaerDZMl05F
+C/d5veJxVLFBsSKFfdADmGh/8g85JDQC1UJuHYXbmPKuQ3pzUZRg6a3JYoi9ssLz7GijrSmmgkXs
+71+8TsvHFP193euCd9N981+Gp4NMxpVvWkrYFsjkdtkzSQBda4dUlJ/QhbbHoRAuzNl2zDkUU7SA
+P7LCNw3SKJlCQlwDEtqNYuO6jiQBZcUnajdk/UKwVwiH/p6q8rg8bh+NPV04hTZraUoaKumsPG+5
+tCBRj/WxPDKWfLjpgLx2gJPU4SKJrKwbfSot+VVO0Tc9viV8Jl0PPOkcW3ixx3Hc9WEqj0QZYHsB
+kUk4E/q8N5WDnvmCzp3t6tlpeNqqkXhvNOAg0XxVXmKE3pWEytX1iMdggdjnoo9dLLGcNwW5+tnw
+XdAlEVuqcvTeKJfYT/RMxpfdB7bXsg6OGEokMEkZHltLPmlYkB9i+J6zpWgo0WCwjawocVc7Y9Lj
+FfSezs/Fs2s8OJhFlrHQzh3SwZoyAXHgOPC1wJDanMZWjLASi6W7ds2H2FHuyKYfx/gJb02+d199
+1ac7QG3Vgi0QiNB+6D8vuj4r05jQgHj7REIvFwbvJX/eVCY2kle+nXjzOiTL9M4AoDdaW9Hfzoao
+YnwcKslhmHhRl/Q+9jA0YX7TCHh/VcKg6+lao3ScQ5F+MjwZewK0lwOlE9Z7Oz5rDNwTdBe6LhwA
+tTkeItrhm45IPdipFKRRcqY7OPV0GgHeqIg704hGnpzws0cJi++lpLi2c+387h28ymvUXArndPtF
+at7mboqJ7XCi2mYBOa73e7Q58R5UBhNzZB+M+SbNM3Xi5hcbXMH1UtnHGx8E8uNS5oXQvm4Cm4k0
+v1g0jU5xxU3m8j6ze99Z6sFZ3EJ7IrIdIkKHl0jkr+WZww64BEKmNsJfh0nWO+5Bm50ZK+sNkvDg
+PtjkehxsWaaERJ8aQeqzIVQTK81m6FqHYdcSsuxMiY2ZAQSnVRarmSJqyd2oPy5vEkCnS9yd2ha9
+bYqAREVHUEy//dw/XJAtttnZSgwAKdn+SRSQuDiWZ9GPs0k/zKuohKkSXkHPlhIDuer+lJ1Hs17m
+r0JTCt2LQVXLdCbKScAHOm4wdGeeIyMsV8MJv/PIWoySW8PIm3IjjzFinphnj6COvvVJUYg6zvPb
+1WN7ZU0UyI0nFklUVguc6RF2ByO9ZzZA7nmVXlFawnDc5UkotXSGYZJaiV9c44Mvqg8CgxbfLLk+
+OCOJgQF9xIEk0bUp/QAfKj6o5aP2qHr+YvKxxTxlEthlxdGyM+F8YX4WpR6wf2mbFjc6jWC67xk3
+F7zfTdXtmezimB6vUkRAf4P5yS8J6Q7m7JCE/V0CN0Z6fUG4Z/8cGxpVQwqD44McT257MqSdiwrp
+C9NiXxqWiLXcj0NbUI0rxAlzSwzuFAjMdLpPOm3zjm6I3SltMKn9BPSOyz5Q4wclInCx6yvZAqK3
+r95cvb72qVEt7YJKaM3b6Vb0oQRMpSWyoYHZQ75WTwcwd8PRecAXGPqgn0e0GYUSfRqiBi5Z3tUp
+eljOJvV9ujIs2rFeySLKVfkCfHvcCRVyFZwsiUO0W9NvvIy40lkHtFshFANYlDkJOznhLVSlUGNW
+lNwSTjEuG/bcGiiAm4NogFSmu6ijWrrJZbFAjH2CSKkKJUxCpAeasm5nDBqXY6fmS56WLv+mZmlJ
+qQx2aJ/yiGWg5h4auN4tyq7vM0S66X9+rg2c+vrxQBAaUbF+rGP6x44QE2FZS66Sptlh3/dY+CLn
+7fuVLv2/2ROBRPN06XmmBF0l5VA51wCmozvcHXGgSNndTws7naVH6pu4tNwYje+sKV3xzzspY5ob
+cZxtGGGfOMvGuZB8Utx7QgEXAM+RBTc3n1mCJdka2JL797YtSuysoiDmoKY16tQDzw5yQfkU5sbN
+pvUFJZLTDBXdBMMisEzyfICsuIyv6A72TWdo9hGnfk1EfofSCuDWVv8Zt1L+FwtM0lkm73t0ILRa
+EpmusX+2QFiJdjmpl744bCu4nsj/a/RQ4EmSdKHZXuCL8/pII+ebHY9LkruU4LgXsNa7ZCKCn8lo
+RUq7p5/Iwz6tbJ/n2Zp4q5UKeTGtlAqfPicg18AmOwJZx/UunhFpAoPr3Au7wfM7aYTAm+6XRgRx
+xXglcFDtGwipKH8Fkj2crhfaJ2GT2/4e0Abb70HW6yYlUHYKI8aMZ1fFXl0qzFbkOuq9I/4IEhLU
+auZS898sqj2H6EZ78LE9r/tV1TjC55LS9mVVbmGptjtjSnXQKzwz0ecgUHTJ/BqugyUflq61ZjLg
+6GX7+hisr1AJFs72g7aRJGXJHESmPb6fbsfWDqoLquR7NuYefaOsAJWQX8zkgocyIe9gGrHIE0Tt
+ybdP4WtbUwn53drOs5A6UbpiSt1XVHps2gZcSkeGIRu7fnX9ZU53758/LKuLdyAQn44aInztqJMn
+JVjFjZ4/3RwrDoPHyOwKbrjyWNWTqWBc6ewcLOFvYz4V35pvuXeT/JanX93foBv4O9aWgmjAjqC/
+j8SbRjEt145uKVP113AfvEgLFviOkKu1UOlRiOtqFsN/rSy/7AREhsH/7UO+qSYm3VsFiiUu5N00
+iENT45OeG2yGB/PcyOtSTiryHPZ0sBs9BaCmtCdHxXIRZg0oHk0As+GbsdyG+njF4DmNzrS0ypgt
+tT4BkDWqn2s9zwIvOjE8G/zRHffn9VfwqGGrNVenszmkpyoGsmpfl212tUHnvF5i1Ffivj5y9ZMw
+ESuq3BaH56cs4hEkq2qubjnhpcl41rSVCZLX0CNfS5uSBtPqknZTvWqH/i1y7NcA7jyj6muqR/Dk
+VqoVcaAI/Oey0250AAIpnzGO4Kiv48qWqXhZjRIJvWwtwflLrqU3N5Ec1oh0/vEe33nt61rNsB8P
+t0HZwz9aZfymYMRnzgA7tHDiRcGNIOWupWGnXRsisJdH/6T7HH3mup2lNklKvAwH9yM65cTeMDt1
+5/QZKTjlAKiJ8cXes6SE+HpKcTaDo1IQPXkYzv5i2x3D/Pteakic2djUXsQOB+V+Lsx2Koeb9yrq
+sHtDRsl7q2uMUlH/u6vMV86AZXmm63Iji3Ktpihc+Qno9GDbA4PQYrvEPtcLGY5UJTpKUhSi5icn
+RgQr1IKG0ALE9mH2nS/b9TJn34lzCaeiIMYfOe1Am9AR6bfXuI2riAT18xdGyZGOGzFjJOMtm7Ok
+4VhuPnoiuYsmeH3jI/HclKKm2nbrv1g2FOK5z/7cXTFtqqzoIR+/U+Cfixi/A19M4lJwmYnZSRhJ
+DMKtf4E8rrW1brFa+HJULcc775nyLTYrhPjio5ozar1/mLZm2eQiIFspHYL1oMh4IvhVXi2ABsq2
+NAyHSZ5UwxXg5jUUv9Z1I8CsNseXIGIiubTe1JQMeTrvzlnBpYhQv2W6LZ4V4EYk6/Af3ULGIQ7C
+b2iS/xaa5X+JWHl1MJ4N6debH9OryCj51sPC2o7rAWu9a07RqdzarIVZuJkgvvBSyhHfF9aoOAdb
+jOFW4wU79J1VDfVj3wTIGvY6qNru/s+Dapx4s9a0inqj6yX0i6uf1flNnSm+wesHLGhI1ae3uZuS
+GBeo+0GiooJHFT+E9BlR+4lZKmr4+waJvCRJmyJ7y57eUWJQbviH0vazlDbJGVqdjRTKV6UgPFrV
+xkVnbFBsNP40VnMfJbXiJN+iSkWPYtrQbEoFGpWhpqEcPmryiIApN2LllVGtGmkBBAM2+kQx9jud
+EEV+B7+hhhC4Is1ntkbYHFVbIWFgxjbpwP408v5LGkoEnfV1Wdp3d8UtkrShvkOmG/VGtZq6uv9w
+bZXMk8b1npQeN2eu7WK4cBDho4CuALLcUl+yO6BkPOibZCSEs2d3M/FMrGZfw+jm5EM30XLItvP4
+6Kg0NDudqumR3LQNtH0RGvlMmEHb47u2E97GgCiJomBP2/JxcFlSzTwLJL805UKWyISEfAvET+e9
+GkE+CoPOHbtsHUEybw3UGKfMUUMhPlC+oEUMwmyZD6JWfYSwygqL8aw0abtDMzOaEWrz0xfkd9ym
+BnK9mTd79oPvFAh9qvbdUdgldxXcvIG19jzQ/1IVVz8LZVMJhOKg/yskLFABiTDJ2JXbpznjH1BH
+zswQqmlcPqu/vNBFK59EYKhAZumQkjLxB/lPfJBzByPCCfM+dk7Pq5xTjbWLnrPHQoC5WaVrQiCj
+Wcosd66Kg9CrXrL98auVQVIX6Oh/Sl6h6W5fqD/iOPUZp13AacEIRA38rwbsyR4ohfSxyHGkd24+
+GUJ/s+Rj7KlwiJEDQdMbJ5Dr4yqIvPLqMLyuz4soneIMdA7RCztmlieIgNnwTI5nzlESRzXoIkzk
+KKvC2LPzypHnOhJ3XmZODFRs3Y/qKUIOdSBwDRayhHTnuVK1v21bB3EkFtWllqXf/qHUj19+YFuy
+rD4ZvPmIuTxqXb0wPlnr52aE2M2p5068lG6hAvCpZH/uuZsNuaYATcaeBm5J0uvFpKITTHriUEcs
+NtjSPf/Z7rn8VWBm6iGcTrrCC+w9LRagVdTxecg+NK5PVUoUVAGEhQXoL+ESGUanPOjFN7NwhM8J
+WUagxHH7ALK8LuxrXktdkk7jalBI7U66aFnMxh44M/3yEcUsGH94/jqsC31cCNp4kE97BedfGyRR
+jIkTpZAJg702y4k/LEkkcT+wzh0E3vtcpq1fx/3sXduBaxP8HrXSxAiIVDUqJcNy0PYkXLxxnVVW
+8z3EDoe0ZZa7PSvYHA1NQHO/74e1T1Alv/ImPo6l7a5M+f2tXZ+75cw7dQgYmnFuGlinfvq12oNw
+EwZhrs2WCAEzrjC/sFuPIn0IVqU8q6ve3BzD9YltbeEcJDLkRTOYNkvo8kF6s4uRchsig8FxFE4Q
+Jxwravj5xyJpFUF6ZbhagtX3LIBT2/IrWAZgpNu9f4fq9lQ3wnP1UEgkv8o07LyyEV52ZrH6wn19
+sqB9S/6TgvTqO4FwteoxKTzhNI+0oXwloAQrYsmmkhjROu+BncuzKuj6m67mPKrjkudYup/xfVCL
+ktYIcPIPCy0ahKdSD03CV2zBT8IF7B9skYoZxC0NvP3rPzl6w/h0wK0jJya12tVHZfqSu8WMD03C
+AieOYB1+KoHL3PZsArTAnPGPbAU2VnwC6zFV1LXiq3yvqrtERZsD+xsZxMG2hTGM4jUCFgVrRN3X
+Qh05FpC71nHR19z+vaSNJ5wJp6hWIQBTqjz/NWew908SGF1hAFAkzQAHb1a7RN/Cdq9l6WK4XmCg
+0g4Vuix2sa8AceNCcuQ9LBimoUEvglkwvCxaxh0Es7xIumigdavavAPvpR2pGESn3S2dOTanYFyG
+yJJfLVtH0VWgqzKRZJYg5jzHqcIE6Sh7VXroGIJezm0ykjMUVd3xOrTFW1lEqJI8YEwC5kVhjzhd
+sCJNvEIENH0roJX3YiH05jatoKRRhJNF9bN45LKL319pPbZox35rZD23wxZCrrnEDxKOVhVL/ibY
+8ITaT5qdDS+ngEEeZdzO8cMoPHbHKdZQgH6DivP5QTDcd9405pE41X4rSDbJaJ7fHonhQUbQR4Rb
+TKp4nYKhb1stz3hvP5JqbGD5Lj+0EdeOCXdGk9mbAymoIko6D82n0tjRvA6cFoHI7aaRmzWN2N05
+hYCnpcK/NBFud7usuyAPFLBvaO5Xnar32EGnw9ykCTFjNdfgJjvOxHiE34kiRq/kn1DL5WKwq9EP
+MRgEFZ+O7wElyfa6ZtRstJsdI9geoyogelNCpnv4MmRps91jNIjIqyznonsnjWv8+Nl1kzt6bGQR
+QjeTfDofWHG/RuIdq9BlKUsqe/VJI1jeCG2kNZ0muWaa7G6K42W4eKp3XPLLiAahNEthtGb6PWDt
+IjKhydJeLFWe2FCCBkffYflVZbDsUtS0OiFiBFXDxzKjv0lK1W+DkQ6Yyjh05bXWsdN4P8QRayx2
+SJRWqzhe05HFY75DZLnldGvP348mQmJPxsQQxiH0Gi94DajrNN5I4if1z71rLkyhbxpAu1y311AL
+rKNO0CKv27lH3KdpFQlLG3xWQFD86m0RICfDf+SYdugAIPiUPqvJ/QEhkqGLXFgmYdLDfsyp95O1
+YY8K66uS7hlc+sOKRqCJ2Tu0KZP7O10NssRuUhwH1nRLmPUhFX+AyRvE52NAQ+qHppMwFWN2CqPt
+YtrkBuzxWCpWuHREkvwoBFW0v8ahyAv33qZZOenJy3EY1R2XJis43tEGrh/40aMG8ktmF7mAS1d6
+ObU2MO0rqf6y7+l6OvnwhFaqokfwZxH3i/axRC3OAf2znpqDRDhsCQwFWzrgbC8ocCuOyntjx02v
+yMX5gy2rSXQ0nqqrDLZ4WywJ6cBAxl6ltSP3lgnAlxMRcvbpU6DmMn9QBlTyzsrTpTq6PWjsukqX
+0aARHpY5I0U334P6oTaWxDbn+N0IWNLm4MTFxMYsP04hajKfDTEtjhom4ZVy5O6mtc/UP2THyPwx
+zK7ETgLItLThg0eKZApOZXpVj3NGN0VorIfrMLx9K6O1vxrkwY3Ybpk1wuSxWO51sXc211XjCOMa
+r8axFnIynW2ngHy+hIVjEwG9NxK3ORYUN2I4spVJJGqLQ+b4wceUDSD/Cky1GkirdlWVkheAlkd2
+eLz9UIoJCWCx9BVhjY8ufW75pNsNjgcy27tpt49TJGpKyoXyoZGynksoCA/QzYVrCUn6VsNh0UjG
+LlPQ+zFh4iyIYpvVemcOsu8HQZC1YIoumBwu8ukqpo0FBALnVX7jDmxcYMjxPBL/xWJyMF6GOfF8
+yTsxk6YKhaWZb0qCNyEqUQ5OmvoP7aboWOVXWRofWj4CriT13UzeOL5EfCh5GAmlEK8w7GdLq1QW
+9LpedlGTHr/sSoeNPMqSS2KEZNvQAp1uff97M/Wr6ilWAQwQNXXESoJ8xgUTR/i8DUBVemedy6FU
+DdFwv0/FOFaUqyxE5yjqibZCj82TIimq9CPPEJjHfiYswb1dwlEIWWcxg7XRb/lt2pBCaljrVDWo
+64OUPoRICDbVGTiFXh2UuEpw6PDs4ouz+R+0eHeBV3VkFmO7n18nuP6CLFYTI+mz0L9E1eM5coNA
+vpX4j76tnagQ3GSm5FBjcTLOG2I0f7MS+AvA5cjzLYzTCVeF73eyjgBlVwFk+U6Nkk0VPvwL22gu
+sRQu0kthRNzkfd4sEFvF88vOEq1zuQEuI5zMVcNQ3eYgFXrKLnXQQJdYcJ95mkIbUfW95cNeruHc
+rNv9GtoHx4RyWHB5GnTJCLvIdYsBtCd0jlW1R88kf8JeUaAOeoDRV7ScbIKO3xNRpn2P3PzPD884
+chFMCCx7qODm4/6q7onWO3lEsUEiXVFB+rBsN9ZzYmEmXfLuureTWWRZUaXfPY8LNVb+Zu+LPzcf
+4kZ9+Fxowm9Q0s9r97K3fQKnxz7ptr/+D1McEKMXLGFCm1JA65PbyRsIo6drTYenzrn/QF4B+Shg
+E2Fq5hlOfzpmeUNcHT6EgM02J+4bfjzOA06G9ZxPY4ZNvevOuMA+ITiXQMLpIHXTpZNvKDMBEvD6
+vfin7laoyn7zB45THToL7D1SCdj9IxtOD4UrAVSPAtjZ517JVNyMcRVGT8tfRzT8/AYVCeYY1G9M
+GMv7fmTJ4FqvXQWfOIiQbQk5b6ZIkr7x55TasVxDZgDazhjgTy3gavKXTXB8NjSx6dLaP03I9A28
+44W32wh2KGCtv75P3b2WPaSt3oJVPp3SRrgt8a5QsPc21iDkAVLlOcWOqCnHXjiL5Ljtkots3idw
+5qDH7vpviIhcoqxnhBcaTc0NEKHX8WxZBamiuLiceCVHTBNpbZbS031yGwHIVu3ZE3oLeCTG6Gzr
+6/96lrWA9NW8NGxqS2LMlTZ6oR1pACLLnARSbhyUSpX4dmx7SjOcnBB/7tyAJ8wNHLreD3KK5Opo
+IHrQInd08AefjqTaW6BUKhAKYIdeRKLWka6QnrrIOeWioL+xQaImTU3KJ1Lfao4CL9wp7c8DukoP
+OYsCNtLgFNr70Yrb1jFYjYPsrtUw59NuSugmTotsCO+Etb2PsXgoCTO2j545sgSsm0y/qIXbZLAE
+oB7hReI8JrT00f6YLW79C7SpZQ3P9O77G1IDfhxr+D7tIrRgqelyTiH2gBOBbC4jbaPt/Z4gsrHq
+ncncllSvs6RxVHIEZbxz1eCCJDkTkbZqfzvHGh2X+RhG38Y9GaOdh+Ki26Cv6U1Wzds3t8W9DRSf
+RCjz7YqN33mGQVSbG0DPTQ8VAQyfDHcyOMTvJtsiRYn0z1sXl7UcULT37IfHb7NJncWiSU6AHOkT
+JaPbYs8Oh3eohYDzE6k3iSpUcSxxF0V3+kVkqAUrgFlUPRvoi65xYxqiMQ2DqAU8JbclkRPh7Gse
+DEttqQiZ5vDH894otB+WxdyxlKWF/e4SBKC0d3m/4jtYN9xn/Kwc6xStW9phP59/kU+1p9YoGFOz
+y8cHQSQ6aE+c9ySCYSpup2VHoI8vafA93kbZXsp8JqWgMO4hE3oqZo8y6QvapxiqWgLc3s6CeTMV
+1akLpWpDFnhzA7hkB5xRLuE/XTbVSjELRcKVPFfVw42UMkzf7UXXUz2Y/MwhlnLo5H7lWUoYwQNS
+mQ2bLA9jjbf/J7abcd6OgFnXtots4AnSpyY976Q589H8g0Godi2GEzZ6/lamDk7jmSaoA/r5LYVL
+ecuMHI+FWP5sp+FQb1gBhxfF6zC+KMyvaDyV15/jGVbAwn9JNM+B6YJ/sjkiukVzruBz/8i3htmB
+pp0RlgDfxGe5lpnVJSfoSkFi8psxkwzGraTnWhKvByrMGVJNZuchoV2Gis5ORnWLCFuok5jB43Lv
+e9t9xkyXhaig95z2yy6/7cVEsIlOGfKnU1SKVu6JJ4kqDLvWd2Sf9aYq37fO3U833vA9GFHuNX93
+9gJxnrMpQpA+2T88XR2LmoMqfSQ3fV8KGkT6YZuNYe2G8GQ4bnDzO1CWN7gBNXmEw1tgusQwoX1+
+1n2zWhZkgEbxQw8H8tRtyGNzsadxCwQAmcqbwvc29s2QVBGipPTzkzqHfP9WBKr1QB2y0TQPsXcf
+znTRIKSnk9/HYKkUpXHT8rdWcJQpuIpOQXpxDAb2hUOaIyOBPOHLdAVV0bbiS97DOeowytVIdr5l
+KvLdo367gsKBln240ZUYxwLxU5o2fvUEdwCCxAbkFCI1NdamkOaRMS/ASNeduMWMMuFXmW4gmna0
+nponYLK1drlECpJK1LIeszRqMRnlQ2bKIZaNnhARDLx8J+qTdpVKPY5rnKPi8RueD8w4bgmETLkj
+Pu7LVwGDM+KyLkfxiP5EHbjULcV23bOfucRGvUy0OP/Ciq8+IhibLohBnod6DPaJayPFN2xtYTii
+aYk3e4YyaqQ5JZGMtx9TkanAdlu28rd69oO+AVHsAj+pXWTwfr3Jd7rNMlE9OCM0CBewcEhVl8XG
+dERGMhSVPVARbfwaI67sFmHUr0X8mAsJ0yJoYqbLPIW5jiV1PbK6zkcaksDLLoMkIhJpASb/98+P
+q5SKERK5j0rUcpTtr83wfNTPmxxTJzPK4i8Dd2dfv1zy8BF9seOylVQ5Lxw6xXaCrgStUVl8Gz0p
+LJWduNunp8pqFiTK2uYeR1KcSabbEj9xPcbWw9zasQL8ycyUB9RoavKLf7MB+a8NK2OSXH5dfD5H
+Pl1jcgMu6C9X9YbUtMyhOBbXirYqv80wWDAhLn0GmOn1Stg+sWjGv3UyssE9JZtfH3ExYsS6pWTz
+XhFKN+Pvwrj6U2393x25W6MYqWWIIE8MVBVjB5z97WQ+B+A+jVrdno8XAWonBXwfSA7RX2+z8F+A
+2AbBpIgGNbW0v7YbPdRinHWbXvccWQ+stNa8Rg3rhf0Z+c3AqMSzJMo3G1fK6565z1ZjPmQya0Qv
+V0TRsXRtBPwJCaKOhweMRm/8cUoKBK0UT2D+xtbmag2hhlk+nX9rUAtAuH8zI15WZN+IzyImPNZg
+xtBm9hK9yEWQcze0+vLvlGduZb0jh5nMiJaeDg8fADk8I46ZtKkbA595X12HG4szWKNPoeo6M6eg
+lRRkV3UFyNqQv+EJspmiSWp3qwLN1+lAwdVRIf/58UrJCThn3FDe/EDThjMJD0/AmvEZ2QB6ptjD
+ku8mAKuIM90NSkDayS1/mhAEQCThrEFsEc3yWrKtkZGl01eOTyRvVQcfzEfiG5nYJSIj7zz9NQ1K
+JEoXNtz3NNmEYKxJ3IlaFcCl/RdWDcuHT3s86OoiuUb5FoQbxPsgkX75vUd0N7C4tBPFugzxDlho
+e6BXgBTLS5miqes9Sld5L4q02q44VR07NfmY3LX72vFDcWX8mEpLtSOPnyu0LtzT71kVpWlt9aV2
+LvtCY1P7fsE+Ux9CtwSzLczYzxpu6d7QVQtbQQJLNDsxcGKrfiWfoP0EpAleG4DlrFgKE7DnKXw6
+uV6henTS8UGOGEgFmkAM5ogsZLsNght/Ty5PHwTWYgm/nBc3s1UEqSCpbl8CKZyRQys76I3yP4+J
+bSgXCId3u2XZqYoUqfnaqcFaPzkn5evrIIelAbZQJRQioHN+++TsCh7RifuOSVMOswrFEtXfeSUI
+JCvljAZBFFtl3PjzmKdP64oBaW5nMglSG1lBqTQe40qajHS0EfAsaLEbh8HsciQNE2krCKCkGSN8
+UkzmtQO7ErwvGVrmIuYbgJqzDcE2I2a/Z3Lac//hwBAcBsF+yfWLGXmNQiNcaHnmoZCkekctfJFV
+lbPhaapODNLjhKoMXJkOvBk5MRJal0MMaBtUnxBCzeazIkkPxy1S6L1KGm85x+l6UtzHqPKuYF+w
+KoOUlb+9jckpStrgll/o5xGUU/hgWsMZtfYTNiq2ShlpjxDT4OuimS26O2jTlCJl8nhbSxc23iC7
+EOSsV8DolO2oUF2JmBpfqQWsOG/88CZhQqN7CQWM734bLXDrSdYE4WI+YgINTnQvM6nZr+U6+kqp
+2ORCVQ8huhM20uj0/+adTe1zowyijWi4aod6cLhQ7kZD4PFio5Y6omaHXLUNaPofPMFB3Bl9owKI
+bhJJ0GJ8V/abRegfRHNve9w74BDFoPs1BZJBz3H9OEwguFKSk/Jr4ep5U2op9d7Um87FtUwWVtBd
+bAyIanLdh3TGW1haNyhfiCk8aR5MlzAvBV9iuNK2aw5yxCgowa/VhuLQaIVFSrZrjvfwvFoYK1re
+ke6nSM3n1/rmbu+BY6qG5u+3SQs4Oob9dD+y4nN7N1IhUY0jf4hYDfsUrexVdEsStu54riCyNLIL
+czHtc+60dpqZFGFIbMJeDqgkUR6msurjr75bEwvuB80rMPbHTkbo9JNf4bBAxKf/IJiokE4J6Rjr
+tFZTDMh48Rjp/zGumLhRPstHZHoz6ETMDmC2c5IsAF77F6ofytqS1i+81W6jIzAUhyyMKCi+EgM7
+Lo3rFzyeRVBMf4sgPajI2NnUpMzm9Zn4p/HlxxWu+wUu6bv4IjQS12jdA7OuRA03LaVr7vIJhOIc
+g5lYj307khG0uIyZVa/Wh9/Bi0wCwecoACcebEeD+hIKvevZqR8VZBlB3DRoEzbOe1v9mnf7TzG4
+zReNRAT+TAuX19Ne8bmMT2ZYXsAGF3U+1aKNFuxaXVblO9HIKZ4g63/u0EK2ikt0zDpGIg0s7zD5
+dVA83t4xHTRa5H0S03336hd8FK9FgZ6+1tTCuxtdfOnGDCw53Uha0GHkK9O3GwBI7rdbP0bTvmx3
+iMawl6swyeSPAl1/yXZ6/ud8frt9V3tCiUiMO9FyN+F5E371JXHhfcY1D5/FQ0cAULzVUQtaxO/V
+u/KzIZDZ/LjWl/e0iBhtZG1DFVws0djmdkZdlddoDSJyQRU4XPGQr3J9EF8+AelTMo51bGUPVEC7
+PO00NRw4XW228K61C4FTeq4vvUoCQjr73rmG/iDPS/trXsYQjU160a3Jb/aMFHY0x7kw5vT1h5Cf
+ZR5sq/Ktlmattr81zgdYbN8/YHtFbISj9hctuHTTolv9zHgOJmuKMyS8UHQYrux8JFGKXjM4QNv0
+OOK09mIafWnAnlzHXwuvgdqZxUhua+Tn/C95Aps1SfwLxDmb8h++xwayZWAwGQ1/pjZGj2oneEQc
+e4yF5lR+rPIMN6ggNA66WdsHUVUH8nU12a2tsPV/xN/0ODFy3R5Lt81SgUsjdhwqWrOwDWYKx7Lw
+ULCsLvraD0LGdJnccojryAXkgLFRppVmwfHyEq61vAGkf6DDPPJC0nU6KogVthHztYPwjmUyaDkE
+Hm5IT6qto6YhU0SwIcqdSq6xIXgVx85zKhLyXQfp8wTsAifaRqNqFePNCicZsXROlXgWMKz2VNaA
+ppRsecTb+6fTbiO8GHslXR7uk5EZ/6Xa6CwKgOd5L+SzKSlVQb953TgLW16pW1yTDPrXE0ztQ2c0
+ACCm95j/y3DOhtU7/ZnijQb1VZlARiYrQ4jV5c5KkCStH5LYd2Im2PxUjfkloJQ8NCs9fTkmn7h7
+BeM6eVEXXX0J2KtLKyV4WeQxFuOuB7BSBoNKwSWV1Yt7jH2TqNQmqwp2RkHblR9ARxKPGpYDuds7
+ltfGDW/8EBPthF+ub5WNnYEjXHDxoV+OCoI9UvfNCO8gni8HJf1b+sTJhcc56S1tpcPziFcd6gTM
+Mwk1ljLaBYqOhInkDi9G6ok2bsdrB1FJuvIXsmezJpal8Gn1KA/7iKtqv/w4v/O0LgH78/OCtFfl
+aZgTbZ+ery6gsD7XqgB+KVBa6Ez4+5W03Q3Ocgbu1tAn1rmu+VmcVRAje/mo1ltvz3TuLDvGa9Te
+5Wpg4Do/OBYu0Dsb/tjVC3f4g14/bDhpK/PJkjqtUCArMSdQcpE1aOzxS4ROGPPX/BP8uLuWbGAG
+UOqbjhmn3y5AiinpTCC5BgUSvdX7igGf11d/+rktPCUtor50vzPfJFzI837MxhiWEf9Dfm9L7cV0
+HDu+PBHF1CE0gF+TaSeEgLf3cUdQfty2tfrqA+SUi8vebdaOBIVrdQwIf0MFYdxKSpYqhFshwIkx
+n84YYfYNM4J8V5qwnkGaVGITbVo1orebXFKCeDYSiN+yeuZld2vV5Zz8FaptZCU0CN+rTyIldIK0
+j81EDgYcUKvjyxOYUxbH6UsuXmi2vZgQfbDMyQ79p6K03JaQAeaDvOjQLl4FdE6HGA8O4uM2aJyL
+pZOnVjdj47zcX0Ah8F2TZ6YH76fMW+Qi2/s2RsVKnGLVWhzpSICmi5igcgeRSRsurUwUYUWsDl2E
+zUg6G+n2AJFaJQItSJwoiixxKgVaMiducsVJ+Sr12nUKH59AGaVyx+nmk3SIMtpigPZtlnv2MRHp
+3Fn9zV24EkGi6NkVLgslKEe8UOcYZOEZybzbkSz/24fiTyRAnaIb1PLH+zkwiXvPuXoaW/qRD+28
+mlCtWlm7vCSu6zcNk9Dp3AuDAB5HC5ruS1uPHHLQb6QSTElmMlYXV5UnyxQDbUBPybQ9R+5WzCp9
+A8gKZ4W3qAEALsmK6DfDayEepkLSz/1jseeq31ZkVxzytbZuGNtbVJn241QH0E/QoxPUQCfV133Z
+iNec7okJorscEM9m6EfGPhBi5D5Jm/Q8fOLz2iu399MiDKDZu9yt9qEV7mh7
+</body>
+</html>
+
diff --git a/.github/h2spec.config b/.github/h2spec.config
new file mode 100644
index 0000000..745a637
--- /dev/null
+++ b/.github/h2spec.config
@@ -0,0 +1,27 @@
+global
+ log stdout local0
+ tune.ssl.default-dh-param 2048
+ tune.ssl.capture-buffer-size 1
+
+defaults
+ mode http
+
+ timeout connect 5s
+ timeout client 30s
+ timeout client-fin 1s
+ timeout server 30s
+ timeout server-fin 1s
+ timeout http-request 10s
+ timeout http-keep-alive 300s
+
+ option logasap
+ option http-buffer-request
+
+frontend h2
+ mode http
+ bind 127.0.0.1:8443 ssl crt reg-tests/ssl/common.pem alpn h2,http/1.1
+ default_backend h2
+
+backend h2
+ errorfile 200 .github/errorfile
+ http-request deny deny_status 200
diff --git a/.github/matrix.py b/.github/matrix.py
new file mode 100755
index 0000000..856704d
--- /dev/null
+++ b/.github/matrix.py
@@ -0,0 +1,269 @@
+#!/usr/bin/python3
+
+# Copyright 2019 Ilya Shipitsin <chipitsine@gmail.com>
+# Copyright 2020 Tim Duesterhus <tim@bastelstu.be>
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version
+# 2 of the License, or (at your option) any later version.
+
+import functools
+import json
+import re
+import sys
+import urllib.request
+from os import environ
+from packaging import version
+
+#
+# this CI is used for both development and stable branches of HAProxy
+#
+# naming convention used, if branch name matches:
+#
+# "haproxy-" - stable branches
+# otherwise - development branch (i.e. "latest" ssl variants, "latest" github images)
+#
+
+def clean_ssl(ssl):
+ return ssl.replace("_VERSION", "").lower()
+
+def get_all_github_tags(url):
+ headers = {}
+ if environ.get("GITHUB_TOKEN") is not None:
+ headers["Authorization"] = "token {}".format(environ.get("GITHUB_TOKEN"))
+ request = urllib.request.Request(url, headers=headers)
+ try:
+ tags = urllib.request.urlopen(request)
+ except:
+ return None
+ tags = json.loads(tags.read().decode("utf-8"))
+ return [tag['name'] for tag in tags]
+
+@functools.lru_cache(5)
+def determine_latest_openssl(ssl):
+ tags = get_all_github_tags("https://api.github.com/repos/openssl/openssl/tags")
+ if not tags:
+ return "OPENSSL_VERSION=failed_to_detect"
+ latest_tag = ""
+ for tag in tags:
+ if "openssl-" in tag:
+ if (not latest_tag) or (version.parse(tag[8:]) > version.parse(latest_tag[8:])):
+ latest_tag = tag
+ return "OPENSSL_VERSION={}".format(latest_tag[8:])
+
+def aws_lc_version_string_to_num(version_string):
+ return tuple(map(int, version_string[1:].split('.')))
+
+def aws_lc_version_valid(version_string):
+ return re.match('^v[0-9]+(\.[0-9]+)*$', version_string)
+
+@functools.lru_cache(5)
+def determine_latest_aws_lc(ssl):
+ tags = get_all_github_tags("https://api.github.com/repos/aws/aws-lc/tags")
+ if not tags:
+ return "AWS_LC_VERSION=failed_to_detect"
+ valid_tags = list(filter(aws_lc_version_valid, tags))
+ latest_tag = max(valid_tags, key=aws_lc_version_string_to_num)
+ return "AWS_LC_VERSION={}".format(latest_tag[1:])
+
+@functools.lru_cache(5)
+def determine_latest_libressl(ssl):
+ try:
+ libressl_download_list = urllib.request.urlopen(
+ "https://cdn.openbsd.org/pub/OpenBSD/LibreSSL/"
+ )
+ except:
+ return "LIBRESSL_VERSION=failed_to_detect"
+ for line in libressl_download_list.readlines():
+ decoded_line = line.decode("utf-8")
+ if "libressl-" in decoded_line and ".tar.gz.asc" in decoded_line:
+ l = re.split("libressl-|.tar.gz.asc", decoded_line)[1]
+ return "LIBRESSL_VERSION={}".format(l)
+
+
+def clean_compression(compression):
+ return compression.replace("USE_", "").lower()
+
+
+def get_asan_flags(cc):
+ return [
+ "USE_OBSOLETE_LINKER=1",
+ 'DEBUG_CFLAGS="-g -fsanitize=address"',
+ 'LDFLAGS="-fsanitize=address"',
+ 'CPU_CFLAGS.generic="-O1"',
+ ]
+
+def main(ref_name):
+ print("Generating matrix for branch '{}'.".format(ref_name))
+
+ matrix = []
+
+ # Ubuntu
+
+ if "haproxy-" in ref_name:
+ os = "ubuntu-22.04" # stable branch
+ else:
+ os = "ubuntu-latest" # development branch
+
+ TARGET = "linux-glibc"
+ for CC in ["gcc", "clang"]:
+ matrix.append(
+ {
+ "name": "{}, {}, no features".format(os, CC),
+ "os": os,
+ "TARGET": TARGET,
+ "CC": CC,
+ "FLAGS": [],
+ }
+ )
+
+ matrix.append(
+ {
+ "name": "{}, {}, all features".format(os, CC),
+ "os": os,
+ "TARGET": TARGET,
+ "CC": CC,
+ "FLAGS": [
+ "USE_ZLIB=1",
+ "USE_OT=1",
+ "OT_INC=${HOME}/opt-ot/include",
+ "OT_LIB=${HOME}/opt-ot/lib",
+ "OT_RUNPATH=1",
+ "USE_PCRE=1",
+ "USE_PCRE_JIT=1",
+ "USE_LUA=1",
+ "USE_OPENSSL=1",
+ "USE_SYSTEMD=1",
+ "USE_WURFL=1",
+ "WURFL_INC=addons/wurfl/dummy",
+ "WURFL_LIB=addons/wurfl/dummy",
+ "USE_DEVICEATLAS=1",
+ "DEVICEATLAS_SRC=addons/deviceatlas/dummy",
+ "USE_PROMEX=1",
+ "USE_51DEGREES=1",
+ "51DEGREES_SRC=addons/51degrees/dummy/pattern",
+ ],
+ }
+ )
+
+ # ASAN
+
+ matrix.append(
+ {
+ "name": "{}, {}, ASAN, all features".format(os, CC),
+ "os": os,
+ "TARGET": TARGET,
+ "CC": CC,
+ "FLAGS": get_asan_flags(CC)
+ + [
+ "USE_ZLIB=1",
+ "USE_OT=1",
+ "OT_INC=${HOME}/opt-ot/include",
+ "OT_LIB=${HOME}/opt-ot/lib",
+ "OT_RUNPATH=1",
+ "USE_PCRE=1",
+ "USE_PCRE_JIT=1",
+ "USE_LUA=1",
+ "USE_OPENSSL=1",
+ "USE_SYSTEMD=1",
+ "USE_WURFL=1",
+ "WURFL_INC=addons/wurfl/dummy",
+ "WURFL_LIB=addons/wurfl/dummy",
+ "USE_DEVICEATLAS=1",
+ "DEVICEATLAS_SRC=addons/deviceatlas/dummy",
+ "USE_PROMEX=1",
+ "USE_51DEGREES=1",
+ "51DEGREES_SRC=addons/51degrees/dummy/pattern",
+ ],
+ }
+ )
+
+ for compression in ["USE_ZLIB=1"]:
+ matrix.append(
+ {
+ "name": "{}, {}, gz={}".format(os, CC, clean_compression(compression)),
+ "os": os,
+ "TARGET": TARGET,
+ "CC": CC,
+ "FLAGS": [compression],
+ }
+ )
+
+ ssl_versions = [
+ "stock",
+ "OPENSSL_VERSION=1.0.2u",
+ "OPENSSL_VERSION=1.1.1s",
+ "QUICTLS=yes",
+ "WOLFSSL_VERSION=5.6.4",
+ "AWS_LC_VERSION=1.16.0",
+ # "BORINGSSL=yes",
+ ]
+
+ if "haproxy-" not in ref_name: # development branch
+ ssl_versions = ssl_versions + [
+ "OPENSSL_VERSION=latest",
+ "LIBRESSL_VERSION=latest",
+ ]
+
+ for ssl in ssl_versions:
+ flags = ["USE_OPENSSL=1"]
+ if ssl == "BORINGSSL=yes" or ssl == "QUICTLS=yes" or "LIBRESSL" in ssl or "WOLFSSL" in ssl or "AWS_LC" in ssl:
+ flags.append("USE_QUIC=1")
+ if "WOLFSSL" in ssl:
+ flags.append("USE_OPENSSL_WOLFSSL=1")
+ if "AWS_LC" in ssl:
+ flags.append("USE_OPENSSL_AWSLC=1")
+ if ssl != "stock":
+ flags.append("SSL_LIB=${HOME}/opt/lib")
+ flags.append("SSL_INC=${HOME}/opt/include")
+ if "LIBRESSL" in ssl and "latest" in ssl:
+ ssl = determine_latest_libressl(ssl)
+ if "OPENSSL" in ssl and "latest" in ssl:
+ ssl = determine_latest_openssl(ssl)
+
+ matrix.append(
+ {
+ "name": "{}, {}, ssl={}".format(os, CC, clean_ssl(ssl)),
+ "os": os,
+ "TARGET": TARGET,
+ "CC": CC,
+ "ssl": ssl,
+ "FLAGS": flags,
+ }
+ )
+
+ # macOS
+
+ if "haproxy-" in ref_name:
+ os = "macos-12" # stable branch
+ else:
+ os = "macos-latest" # development branch
+
+ TARGET = "osx"
+ for CC in ["clang"]:
+ matrix.append(
+ {
+ "name": "{}, {}, no features".format(os, CC),
+ "os": os,
+ "TARGET": TARGET,
+ "CC": CC,
+ "FLAGS": [],
+ }
+ )
+
+ # Print matrix
+
+ print(json.dumps(matrix, indent=4, sort_keys=True))
+
+ if environ.get("GITHUB_OUTPUT") is not None:
+ with open(environ.get("GITHUB_OUTPUT"), "a") as f:
+ print("matrix={}".format(json.dumps({"include": matrix})), file=f)
+
+if __name__ == "__main__":
+ if len(sys.argv) == 2:
+ ref_name = sys.argv[1]
+ main(ref_name)
+ else:
+ print("Usage: {} <ref_name>".format(sys.argv[0]), file=sys.stderr)
+ sys.exit(1)
diff --git a/.github/vtest.json b/.github/vtest.json
new file mode 100644
index 0000000..8e8165c
--- /dev/null
+++ b/.github/vtest.json
@@ -0,0 +1,14 @@
+{
+ "problemMatcher": [
+ {
+ "owner": "vtest",
+ "pattern": [
+ {
+ "regexp": "^#(\\s+top\\s+TEST\\s+(.*)\\s+FAILED.*)",
+ "file": 2,
+ "message": 1
+ }
+ ]
+ }
+ ]
+}
diff --git a/.github/workflows/aws-lc.yml b/.github/workflows/aws-lc.yml
new file mode 100644
index 0000000..e590000
--- /dev/null
+++ b/.github/workflows/aws-lc.yml
@@ -0,0 +1,66 @@
+name: AWS-LC
+
+on:
+ schedule:
+ - cron: "0 0 * * 4"
+ workflow_dispatch:
+
+permissions:
+ contents: read
+
+jobs:
+ test:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v4
+ - name: Install VTest
+ run: |
+ scripts/build-vtest.sh
+ - name: Determine latest AWS-LC release
+ id: get_aws_lc_release
+ run: |
+ result=$(cd .github && python3 -c "from matrix import determine_latest_aws_lc; print(determine_latest_aws_lc(''))")
+ echo $result
+ echo "result=$result" >> $GITHUB_OUTPUT
+ - name: Cache AWS-LC
+ id: cache_aws_lc
+ uses: actions/cache@v4
+ with:
+ path: '~/opt/'
+ key: ssl-${{ steps.get_aws_lc_release.outputs.result }}-Ubuntu-latest-gcc
+ - name: Install AWS-LC
+ if: ${{ steps.cache_ssl.outputs.cache-hit != 'true' }}
+ run: env ${{ steps.get_aws_lc_release.outputs.result }} scripts/build-ssl.sh
+ - name: Compile HAProxy
+ run: |
+ make -j$(nproc) CC=gcc TARGET=linux-glibc \
+ USE_OPENSSL_AWSLC=1 USE_QUIC=1 \
+ SSL_LIB=${HOME}/opt/lib SSL_INC=${HOME}/opt/include \
+ DEBUG="-DDEBUG_STRICT -DDEBUG_MEMORY_POOLS -DDEBUG_POOL_INTEGRITY" \
+ ADDLIB="-Wl,-rpath,/usr/local/lib/ -Wl,-rpath,$HOME/opt/lib/"
+ sudo make install
+ - name: Show HAProxy version
+ id: show-version
+ run: |
+ ldd $(which haproxy)
+ haproxy -vv
+ echo "version=$(haproxy -v |awk 'NR==1{print $3}')" >> $GITHUB_OUTPUT
+ - name: Install problem matcher for VTest
+ run: echo "::add-matcher::.github/vtest.json"
+ - name: Run VTest for HAProxy
+ id: vtest
+ run: |
+ # This is required for macOS which does not actually allow to increase
+ # the '-n' soft limit to the hard limit, thus failing to run.
+ ulimit -n 65536
+ make reg-tests VTEST_PROGRAM=../vtest/vtest REGTESTS_TYPES=default,bug,devel
+ - name: Show VTest results
+ if: ${{ failure() && steps.vtest.outcome == 'failure' }}
+ run: |
+ for folder in ${TMPDIR}/haregtests-*/vtc.*; do
+ printf "::group::"
+ cat $folder/INFO
+ cat $folder/LOG
+ echo "::endgroup::"
+ done
+ exit 1
diff --git a/.github/workflows/codespell.yml b/.github/workflows/codespell.yml
new file mode 100644
index 0000000..3d66f29
--- /dev/null
+++ b/.github/workflows/codespell.yml
@@ -0,0 +1,20 @@
+name: Spelling Check
+
+on:
+ schedule:
+ - cron: "0 0 * * 2"
+
+permissions:
+ contents: read
+
+jobs:
+ codespell:
+ runs-on: ubuntu-latest
+ if: ${{ github.repository_owner == 'haproxy' }}
+ steps:
+ - uses: actions/checkout@v4
+ - uses: codespell-project/codespell-problem-matcher@v1
+ - uses: codespell-project/actions-codespell@master
+ with:
+ skip: CHANGELOG,Makefile,*.fig,*.pem,./doc/design-thoughts,./doc/internals
+ ignore_words_list: ist,ists,hist,wan,ca,cas,que,ans,te,nd,referer,ot,uint,iif,fo,keep-alives,dosen,ifset,thrid,strack,ba,chck,hel,unx,mor,clen
diff --git a/.github/workflows/compliance.yml b/.github/workflows/compliance.yml
new file mode 100644
index 0000000..caf9624
--- /dev/null
+++ b/.github/workflows/compliance.yml
@@ -0,0 +1,57 @@
+
+name: Spec Compliance
+
+on:
+ schedule:
+ - cron: "0 0 * * 3"
+
+permissions:
+ contents: read
+
+jobs:
+ h2spec:
+ name: h2spec
+ runs-on: ${{ matrix.os }}
+ strategy:
+ matrix:
+ include:
+ - TARGET: linux-glibc
+ CC: gcc
+ os: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v4
+ - name: Install h2spec
+ id: install-h2spec
+ run: |
+ H2SPEC_VERSION=`curl --silent "https://api.github.com/repos/summerwind/h2spec/releases/latest" | jq -r -j '.tag_name'`
+ curl -fsSL https://github.com/summerwind/h2spec/releases/download/${H2SPEC_VERSION}/h2spec_linux_amd64.tar.gz -o h2spec.tar.gz
+ tar xvf h2spec.tar.gz
+ sudo install -m755 h2spec /usr/local/bin/h2spec
+ echo "version=${H2SPEC_VERSION}" >> $GITHUB_OUTPUT
+ - name: Compile HAProxy with ${{ matrix.CC }}
+ run: |
+ make -j$(nproc) all \
+ ERR=1 \
+ TARGET=${{ matrix.TARGET }} \
+ CC=${{ matrix.CC }} \
+ DEBUG="-DDEBUG_STRICT -DDEBUG_MEMORY_POOLS -DDEBUG_POOL_INTEGRITY" \
+ USE_OPENSSL=1
+ sudo make install
+ - name: Show HAProxy version
+ id: show-version
+ run: |
+ echo "::group::Show dynamic libraries."
+ if command -v ldd > /dev/null; then
+ # Linux
+ ldd $(which haproxy)
+ else
+ # macOS
+ otool -L $(which haproxy)
+ fi
+ echo "::endgroup::"
+ haproxy -vv
+ echo "version=$(haproxy -v |awk 'NR==1{print $3}')" >> $GITHUB_OUTPUT
+ - name: Launch HAProxy ${{ steps.show-version.outputs.version }}
+ run: haproxy -f .github/h2spec.config -D
+ - name: Run h2spec ${{ steps.install-h2spec.outputs.version }}
+ run: h2spec -Svtk -h 127.0.0.1 -p 8443
diff --git a/.github/workflows/contrib.yml b/.github/workflows/contrib.yml
new file mode 100644
index 0000000..4e14744
--- /dev/null
+++ b/.github/workflows/contrib.yml
@@ -0,0 +1,25 @@
+name: Contrib
+
+on:
+ push:
+
+permissions:
+ contents: read
+
+jobs:
+ build:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v4
+ - name: Compile admin/halog/halog
+ run: |
+ make admin/halog/halog
+ - name: Compile dev/flags/flags
+ run: |
+ make dev/flags/flags
+ - name: Compile dev/poll/poll
+ run: |
+ make dev/poll/poll
+ - name: Compile dev/hpack
+ run: |
+ make dev/hpack/decode dev/hpack/gen-enc dev/hpack/gen-rht
diff --git a/.github/workflows/coverity.yml b/.github/workflows/coverity.yml
new file mode 100644
index 0000000..0b3c4af
--- /dev/null
+++ b/.github/workflows/coverity.yml
@@ -0,0 +1,49 @@
+
+#
+# scan results: https://scan.coverity.com/projects/haproxy
+#
+
+name: Coverity
+
+on:
+ schedule:
+ - cron: "0 0 * * *"
+
+permissions:
+ contents: read
+
+jobs:
+ scan:
+ runs-on: ubuntu-latest
+ if: ${{ github.repository_owner == 'haproxy' }}
+ steps:
+ - uses: actions/checkout@v4
+ - name: Install apt dependencies
+ run: |
+ sudo apt-get update
+ sudo apt-get install -y \
+ liblua5.3-dev \
+ libsystemd-dev
+ - name: Install QUICTLS
+ run: |
+ QUICTLS=yes scripts/build-ssl.sh
+ - name: Download Coverity build tool
+ run: |
+ wget -c -N https://scan.coverity.com/download/linux64 --post-data "token=${{ secrets.COVERITY_SCAN_TOKEN }}&project=Haproxy" -O coverity_tool.tar.gz
+ mkdir coverity_tool
+ tar xzf coverity_tool.tar.gz --strip 1 -C coverity_tool
+ - name: Build WURFL
+ run: make -C addons/wurfl/dummy
+ - name: Build with Coverity build tool
+ run: |
+ export PATH=`pwd`/coverity_tool/bin:$PATH
+ cov-build --dir cov-int make CC=clang TARGET=linux-glibc USE_ZLIB=1 USE_PCRE=1 USE_PCRE_JIT=1 USE_LUA=1 USE_OPENSSL=1 USE_QUIC=1 USE_SYSTEMD=1 USE_WURFL=1 WURFL_INC=addons/wurfl/dummy WURFL_LIB=addons/wurfl/dummy USE_DEVICEATLAS=1 DEVICEATLAS_SRC=addons/deviceatlas/dummy USE_51DEGREES=1 51DEGREES_SRC=addons/51degrees/dummy/pattern ADDLIB=\"-Wl,-rpath,$HOME/opt/lib/\" SSL_LIB=${HOME}/opt/lib SSL_INC=${HOME}/opt/include DEBUG+=-DDEBUG_STRICT=1 DEBUG+=-DDEBUG_USE_ABORT=1
+ - name: Submit build result to Coverity Scan
+ run: |
+ tar czvf cov.tar.gz cov-int
+ curl --form token=${{ secrets.COVERITY_SCAN_TOKEN }} \
+ --form email=chipitsine@gmail.com \
+ --form file=@cov.tar.gz \
+ --form version="Commit $GITHUB_SHA" \
+ --form description="Build submitted via CI" \
+ https://scan.coverity.com/builds?project=Haproxy
diff --git a/.github/workflows/cross-zoo.yml b/.github/workflows/cross-zoo.yml
new file mode 100644
index 0000000..d9864e2
--- /dev/null
+++ b/.github/workflows/cross-zoo.yml
@@ -0,0 +1,110 @@
+#
+# this is naamed "zoo" after OpenSSL "cross zoo pipeline"
+#
+name: Cross Compile
+
+on:
+ schedule:
+ - cron: "0 0 21 * *"
+
+permissions:
+ contents: read
+
+jobs:
+ cross-compilation:
+ strategy:
+ matrix:
+ platform: [
+ {
+ arch: aarch64-linux-gnu,
+ libs: libc6-dev-arm64-cross,
+ target: linux-aarch64
+ }, {
+ arch: alpha-linux-gnu,
+ libs: libc6.1-dev-alpha-cross,
+ target: linux-alpha-gcc
+ }, {
+ arch: arm-linux-gnueabi,
+ libs: libc6-dev-armel-cross,
+ target: linux-armv4
+ }, {
+ arch: arm-linux-gnueabihf,
+ libs: libc6-dev-armhf-cross,
+ target: linux-armv4
+ }, {
+ arch: hppa-linux-gnu,
+ libs: libc6-dev-hppa-cross,
+ target: -static linux-generic32
+ }, {
+ arch: m68k-linux-gnu,
+ libs: libc6-dev-m68k-cross,
+ target: -static -m68040 linux-latomic
+ }, {
+ arch: mips-linux-gnu,
+ libs: libc6-dev-mips-cross,
+ target: -static linux-mips32
+ }, {
+ arch: mips64-linux-gnuabi64,
+ libs: libc6-dev-mips64-cross,
+ target: -static linux64-mips64
+ }, {
+ arch: mipsel-linux-gnu,
+ libs: libc6-dev-mipsel-cross,
+ target: linux-mips32
+ }, {
+ arch: powerpc64le-linux-gnu,
+ libs: libc6-dev-ppc64el-cross,
+ target: linux-ppc64le
+ }, {
+ arch: riscv64-linux-gnu,
+ libs: libc6-dev-riscv64-cross,
+ target: linux64-riscv64
+ }, {
+ arch: s390x-linux-gnu,
+ libs: libc6-dev-s390x-cross,
+ target: linux64-s390x
+ }, {
+ arch: sh4-linux-gnu,
+ libs: libc6-dev-sh4-cross,
+ target: no-async linux-latomic
+ }, {
+ arch: hppa-linux-gnu,
+ libs: libc6-dev-hppa-cross,
+ target: linux-generic32,
+ }, {
+ arch: m68k-linux-gnu,
+ libs: libc6-dev-m68k-cross,
+ target: -mcfv4e linux-latomic
+ }, {
+ arch: mips-linux-gnu,
+ libs: libc6-dev-mips-cross,
+ target: linux-mips32
+ }, {
+ arch: mips64-linux-gnuabi64,
+ libs: libc6-dev-mips64-cross,
+ target: linux64-mips64
+ }, {
+ arch: sparc64-linux-gnu,
+ libs: libc6-dev-sparc64-cross,
+ target: linux64-sparcv9
+ }
+ ]
+ runs-on: ubuntu-latest
+ steps:
+ - name: install packages
+ run: |
+ sudo apt-get update
+ sudo apt-get -yq --force-yes install \
+ gcc-${{ matrix.platform.arch }} \
+ ${{ matrix.platform.libs }}
+ - uses: actions/checkout@v4
+
+
+ - name: install quictls
+ run: |
+ QUICTLS_EXTRA_ARGS="--cross-compile-prefix=${{ matrix.platform.arch }}- ${{ matrix.platform.target }}" QUICTLS=yes scripts/build-ssl.sh
+
+ - name: Build
+ run: |
+ make ERR=1 CC=${{ matrix.platform.arch }}-gcc TARGET=linux-glibc USE_LIBCRYPT= USE_OPENSSL=1 USE_QUIC=1 USE_PROMEX=1 SSL_LIB=${HOME}/opt/lib SSL_INC=${HOME}/opt/include ADDLIB="-Wl,-rpath,${HOME}/opt/lib"
+
diff --git a/.github/workflows/fedora-rawhide.yml b/.github/workflows/fedora-rawhide.yml
new file mode 100644
index 0000000..8f25781
--- /dev/null
+++ b/.github/workflows/fedora-rawhide.yml
@@ -0,0 +1,58 @@
+name: Fedora/Rawhide/QuicTLS
+
+on:
+ schedule:
+ - cron: "0 0 25 * *"
+
+permissions:
+ contents: read
+
+jobs:
+ build_and_test:
+ strategy:
+ matrix:
+ cc: [ gcc, clang ]
+ name: ${{ matrix.cc }}
+ runs-on: ubuntu-latest
+ container:
+ image: fedora:rawhide
+ steps:
+ - uses: actions/checkout@v4
+ - name: Install dependencies
+ run: |
+ dnf -y install git pcre-devel zlib-devel pcre2-devel 'perl(FindBin)' perl-IPC-Cmd 'perl(File::Copy)' 'perl(File::Compare)' lua-devel socat findutils systemd-devel clang
+ - name: Install VTest
+ run: scripts/build-vtest.sh
+ - name: Install QuicTLS
+ run: QUICTLS=yes scripts/build-ssl.sh
+ - name: Build contrib tools
+ run: |
+ make admin/halog/halog
+ make dev/flags/flags
+ make dev/poll/poll
+ make dev/hpack/decode dev/hpack/gen-enc dev/hpack/gen-rht
+ - name: Compile HAProxy with ${{ matrix.cc }}
+ run: |
+ make -j3 CC=${{ matrix.cc }} V=1 ERR=1 TARGET=linux-glibc USE_OPENSSL=1 USE_QUIC=1 USE_ZLIB=1 USE_PCRE=1 USE_PCRE_JIT=1 USE_LUA=1 USE_SYSTEMD=1 ADDLIB="-Wl,-rpath,${HOME}/opt/lib" SSL_LIB=${HOME}/opt/lib SSL_INC=${HOME}/opt/include
+ make install
+ - name: Show HAProxy version
+ id: show-version
+ run: |
+ echo "::group::Show dynamic libraries."
+ ldd $(command -v haproxy)
+ echo "::endgroup::"
+ haproxy -vv
+ echo "version=$(haproxy -v |awk 'NR==1{print $3}')" >> $GITHUB_OUTPUT
+ - name: Run VTest for HAProxy ${{ steps.show-version.outputs.version }}
+ id: vtest
+ run: |
+ make reg-tests VTEST_PROGRAM=../vtest/vtest REGTESTS_TYPES=default,bug,devel
+ - name: Show VTest results
+ if: ${{ failure() && steps.vtest.outcome == 'failure' }}
+ run: |
+ for folder in ${TMPDIR}/haregtests-*/vtc.*; do
+ printf "::group::"
+ cat $folder/INFO
+ cat $folder/LOG
+ echo "::endgroup::"
+ done
diff --git a/.github/workflows/musl.yml b/.github/workflows/musl.yml
new file mode 100644
index 0000000..930a22e
--- /dev/null
+++ b/.github/workflows/musl.yml
@@ -0,0 +1,62 @@
+name: alpine/musl
+
+on:
+ push:
+
+permissions:
+ contents: read
+
+jobs:
+ musl:
+ name: gcc
+ runs-on: ubuntu-latest
+ container:
+ image: alpine:latest
+ options: --privileged --ulimit core=-1 --security-opt seccomp=unconfined
+ volumes:
+ - /tmp/core:/tmp/core
+ steps:
+ - name: Setup coredumps
+ run: |
+ ulimit -c unlimited
+ echo '/tmp/core/core.%h.%e.%t' > /proc/sys/kernel/core_pattern
+ - uses: actions/checkout@v4
+ - name: Install dependencies
+ run: apk add gcc gdb make tar git python3 libc-dev linux-headers pcre-dev pcre2-dev openssl-dev lua5.3-dev grep socat curl musl-dbg lua5.3-dbg
+ - name: Install VTest
+ run: scripts/build-vtest.sh
+ - name: Build
+ run: make -j$(nproc) TARGET=linux-musl DEBUG_CFLAGS='-ggdb3' CC=cc V=1 USE_LUA=1 LUA_INC=/usr/include/lua5.3 LUA_LIB=/usr/lib/lua5.3 USE_OPENSSL=1 USE_PCRE2=1 USE_PCRE2_JIT=1 USE_PROMEX=1
+ - name: Show version
+ run: ./haproxy -vv
+ - name: Show linked libraries
+ run: ldd haproxy
+ - name: Install problem matcher for VTest
+ # This allows one to more easily see which tests fail.
+ run: echo "::add-matcher::.github/vtest.json"
+ - name: Run VTest
+ id: vtest
+ run: make reg-tests VTEST_PROGRAM=../vtest/vtest REGTESTS_TYPES=default,bug,devel
+ - name: Show coredumps
+ if: ${{ failure() && steps.vtest.outcome == 'failure' }}
+ run: |
+ failed=false
+ ls /tmp/core/
+ for file in /tmp/core/core.*; do
+ failed=true
+ printf "::group::"
+ gdb -ex 'thread apply all bt full' ./haproxy $file
+ echo "::endgroup::"
+ done
+ if [ "$failed" = true ]; then
+ exit 1;
+ fi
+ - name: Show results
+ if: ${{ failure() }}
+ run: |
+ for folder in /tmp/haregtests-*/vtc.*; do
+ printf "::group::"
+ cat $folder/INFO
+ cat $folder/LOG
+ echo "::endgroup::"
+ done
diff --git a/.github/workflows/openssl-nodeprecated.yml b/.github/workflows/openssl-nodeprecated.yml
new file mode 100644
index 0000000..a04c6cb
--- /dev/null
+++ b/.github/workflows/openssl-nodeprecated.yml
@@ -0,0 +1,33 @@
+#
+# special purpose CI: test against OpenSSL built in "no-deprecated" mode
+# let us run those builds weekly
+#
+# for example, OpenWRT uses such OpenSSL builds (those builds are smaller)
+#
+#
+# some details might be found at NL: https://www.mail-archive.com/haproxy@formilux.org/msg35759.html
+# GH: https://github.com/haproxy/haproxy/issues/367
+
+name: openssl no-deprecated
+
+on:
+ schedule:
+ - cron: "0 0 * * 4"
+
+permissions:
+ contents: read
+
+jobs:
+ test:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v4
+ - name: Install VTest
+ run: |
+ scripts/build-vtest.sh
+ - name: Compile HAProxy
+ run: |
+ make DEFINE="-DOPENSSL_API_COMPAT=0x10100000L -DOPENSSL_NO_DEPRECATED" -j3 CC=gcc ERR=1 TARGET=linux-glibc USE_OPENSSL=1
+ - name: Run VTest
+ run: |
+ make reg-tests VTEST_PROGRAM=../vtest/vtest REGTESTS_TYPES=default,bug,devel
diff --git a/.github/workflows/vtest.yml b/.github/workflows/vtest.yml
new file mode 100644
index 0000000..7b5254b
--- /dev/null
+++ b/.github/workflows/vtest.yml
@@ -0,0 +1,162 @@
+# Copyright 2019 Ilya Shipitsin <chipitsine@gmail.com>
+# Copyright 2020 Tim Duesterhus <tim@bastelstu.be>
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version
+# 2 of the License, or (at your option) any later version.
+
+name: VTest
+
+on:
+ push:
+
+permissions:
+ contents: read
+
+jobs:
+ # The generate-matrix job generates the build matrix using JSON output
+ # generated by .github/matrix.py.
+ generate-matrix:
+ name: Generate Build Matrix
+ runs-on: ubuntu-latest
+ outputs:
+ matrix: ${{ steps.set-matrix.outputs.matrix }}
+ steps:
+ - uses: actions/checkout@v4
+ - name: Generate Build Matrix
+ env:
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ id: set-matrix
+ run: python3 .github/matrix.py "${{ github.ref_name }}"
+
+ # The Test job actually runs the tests.
+ Test:
+ name: ${{ matrix.name }}
+ needs: generate-matrix
+ runs-on: ${{ matrix.os }}
+ strategy:
+ matrix: ${{ fromJson(needs.generate-matrix.outputs.matrix) }}
+ fail-fast: false
+ env:
+ # Configure a short TMPDIR to prevent failures due to long unix socket
+ # paths.
+ TMPDIR: /tmp
+ # Force ASAN output into asan.log to make the output more readable.
+ ASAN_OPTIONS: log_path=asan.log
+ OT_CPP_VERSION: 1.6.0
+ steps:
+ - uses: actions/checkout@v4
+ with:
+ fetch-depth: 100
+#
+# Github Action cache key cannot contain comma, so we calculate it based on job name
+#
+ - name: Generate cache key
+ id: generate-cache-key
+ run: |
+ echo "key=$(echo ${{ matrix.name }} | sha256sum | awk '{print $1}')" >> $GITHUB_OUTPUT
+
+ - name: Cache SSL libs
+ if: ${{ matrix.ssl && matrix.ssl != 'stock' && matrix.ssl != 'BORINGSSL=yes' && matrix.ssl != 'QUICTLS=yes' }}
+ id: cache_ssl
+ uses: actions/cache@v4
+ with:
+ path: '~/opt/'
+ key: ssl-${{ steps.generate-cache-key.outputs.key }}
+
+ - name: Cache OpenTracing
+ if: ${{ contains(matrix.FLAGS, 'USE_OT=1') }}
+ id: cache_ot
+ uses: actions/cache@v4
+ with:
+ path: '~/opt-ot/'
+ key: ot-${{ matrix.CC }}-${{ env.OT_CPP_VERSION }}-${{ contains(matrix.name, 'ASAN') }}
+ - name: Install apt dependencies
+ if: ${{ startsWith(matrix.os, 'ubuntu-') }}
+ run: |
+ sudo apt-get update
+ sudo apt-get install -y \
+ liblua5.3-dev \
+ libpcre2-dev \
+ libsystemd-dev \
+ ninja-build \
+ socat
+ - name: Install brew dependencies
+ if: ${{ startsWith(matrix.os, 'macos-') }}
+ run: |
+ brew install socat
+ brew install lua
+ - name: Install VTest
+ run: |
+ scripts/build-vtest.sh
+ - name: Install SSL ${{ matrix.ssl }}
+ if: ${{ matrix.ssl && matrix.ssl != 'stock' && steps.cache_ssl.outputs.cache-hit != 'true' }}
+ run: env ${{ matrix.ssl }} scripts/build-ssl.sh
+ - name: Install OpenTracing libs
+ if: ${{ contains(matrix.FLAGS, 'USE_OT=1') && steps.cache_ot.outputs.cache-hit != 'true' }}
+ run: |
+ OT_PREFIX=${HOME}/opt-ot scripts/build-ot.sh
+ - name: Build WURFL
+ if: ${{ contains(matrix.FLAGS, 'USE_WURFL=1') }}
+ run: make -C addons/wurfl/dummy
+ - name: Compile HAProxy with ${{ matrix.CC }}
+ run: |
+ echo "::group::Show compiler's version"
+ echo | ${{ matrix.CC }} -v
+ echo "::endgroup::"
+ echo "::group::Show platform specific defines"
+ echo | ${{ matrix.CC }} -dM -xc -E -
+ echo "::endgroup::"
+ make -j$(nproc) all \
+ ERR=1 \
+ TARGET=${{ matrix.TARGET }} \
+ CC=${{ matrix.CC }} \
+ DEBUG="-DDEBUG_STRICT -DDEBUG_MEMORY_POOLS -DDEBUG_POOL_INTEGRITY" \
+ ${{ join(matrix.FLAGS, ' ') }} \
+ ADDLIB="-Wl,-rpath,/usr/local/lib/ -Wl,-rpath,$HOME/opt/lib/"
+ sudo make install
+ - name: Show HAProxy version
+ id: show-version
+ run: |
+ echo "::group::Show dynamic libraries."
+ if command -v ldd > /dev/null; then
+ # Linux
+ ldd $(which haproxy)
+ else
+ # macOS
+ otool -L $(which haproxy)
+ fi
+ echo "::endgroup::"
+ haproxy -vv
+ echo "version=$(haproxy -v |awk 'NR==1{print $3}')" >> $GITHUB_OUTPUT
+ - name: Install problem matcher for VTest
+ # This allows one to more easily see which tests fail.
+ run: echo "::add-matcher::.github/vtest.json"
+ - name: Run VTest for HAProxy ${{ steps.show-version.outputs.version }}
+ id: vtest
+ run: |
+ # This is required for macOS which does not actually allow to increase
+ # the '-n' soft limit to the hard limit, thus failing to run.
+ ulimit -n 65536
+ make reg-tests VTEST_PROGRAM=../vtest/vtest REGTESTS_TYPES=default,bug,devel
+ - name: Config syntax check memleak smoke testing
+ if: ${{ contains(matrix.name, 'ASAN') }}
+ run: |
+ ./haproxy -f .github/h2spec.config -c
+ - name: Show VTest results
+ if: ${{ failure() && steps.vtest.outcome == 'failure' }}
+ run: |
+ for folder in ${TMPDIR}/haregtests-*/vtc.*; do
+ printf "::group::"
+ cat $folder/INFO
+ cat $folder/LOG
+ echo "::endgroup::"
+ done
+ shopt -s nullglob
+ for asan in asan.log*; do
+ echo "::group::$asan"
+ cat $asan
+ echo "::endgroup::"
+ done
+ exit 1
diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml
new file mode 100644
index 0000000..b020d7c
--- /dev/null
+++ b/.github/workflows/windows.yml
@@ -0,0 +1,67 @@
+# Copyright 2019 Ilya Shipitsin <chipitsine@gmail.com>
+# Copyright 2020 Tim Duesterhus <tim@bastelstu.be>
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version
+# 2 of the License, or (at your option) any later version.
+
+name: Windows
+
+on:
+ push:
+
+permissions:
+ contents: read
+
+jobs:
+ msys2:
+ name: ${{ matrix.name }}
+ runs-on: ${{ matrix.os }}
+ defaults:
+ run:
+ shell: msys2 {0}
+ strategy:
+ matrix:
+ include:
+ - name: "Windows, gcc, all features"
+ TARGET: cygwin
+ CC: gcc
+ os: windows-latest
+ FLAGS:
+ - USE_OPENSSL=1
+ - USE_PCRE=1
+ - USE_PCRE_JIT=1
+ - USE_THREAD=1
+ - USE_ZLIB=1
+ steps:
+ - uses: actions/checkout@v4
+ - uses: msys2/setup-msys2@v2
+ with:
+ install: >-
+ coreutils
+ curl
+ diffutils
+ gawk
+ gcc
+ make
+ tar
+ openssl-devel
+ pcre-devel
+ zlib-devel
+ - name: Compile HAProxy with ${{ matrix.CC }}
+ run: |
+ echo "::group::Show platform specific defines"
+ echo | ${{ matrix.CC }} -dM -xc -E -
+ echo "::endgroup::"
+ make -j$(nproc) all \
+ ERR=1 \
+ TARGET=${{ matrix.TARGET }} \
+ CC=${{ matrix.CC }} \
+ DEBUG="-DDEBUG_STRICT -DDEBUG_MEMORY_POOLS -DDEBUG_POOL_INTEGRITY" \
+ ${{ join(matrix.FLAGS, ' ') }}
+ - name: Show HAProxy version
+ id: show-version
+ run: |
+ ./haproxy -vv
+ echo "version=$(./haproxy -v |awk 'NR==1{print $3}')" >> $GITHUB_OUTPUT
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..0e3b22d
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,59 @@
+# Below we forbid everything and only allow what we know, that's much easier
+# than blocking about 500 different test files and bug report outputs.
+/.*
+/*
+!/.cirrus.yml
+!/.gitattributes
+!/.github
+!/.gitignore
+!/.travis.yml
+!/CHANGELOG
+!/LICENSE
+!/BRANCHES
+!/BSDmakefile
+!/Makefile
+!/README
+!/INSTALL
+!/CONTRIBUTING
+!/MAINTAINERS
+!/SUBVERS
+!/VERDATE
+!/VERSION
+!/addons
+!/admin
+!/dev
+!/doc
+!/ebtree
+!/examples
+!/include
+!/src
+!/tests
+!/debian
+!/scripts
+!/reg-tests
+# Reject some generic files
+*.o
+*.a
+*~
+*.rej
+*.orig
+*.bak
+*.sw[op]
+# And reject some specific files
+/admin/halog/halog
+/admin/dyncookie/dyncookie
+/admin/iprange/ip6range
+/admin/iprange/iprange
+/admin/systemd/haproxy.service
+dev/base64/base64rev-gen
+dev/flags/flags
+dev/poll/poll
+dev/tcploop/tcploop
+dev/haring/haring
+dev/hpack/decode
+dev/hpack/gen-rht
+dev/qpack/decode
+dev/udp/udp-perturb
+/src/dlmalloc.c
+/tests/test_hashes
+doc/lua-api/_build
diff --git a/.mailmap b/.mailmap
new file mode 100644
index 0000000..f72077f
--- /dev/null
+++ b/.mailmap
@@ -0,0 +1 @@
+Tim Duesterhus <tim@bastelstu.be>
diff --git a/.travis.yml b/.travis.yml
new file mode 100644
index 0000000..7f5110e
--- /dev/null
+++ b/.travis.yml
@@ -0,0 +1,54 @@
+dist: focal
+
+language: c
+
+branches:
+ only:
+ - master
+
+env:
+ global:
+ - FLAGS="USE_LUA=1 USE_OPENSSL=1 USE_PCRE=1 USE_PCRE_JIT=1 USE_SYSTEMD=1 USE_ZLIB=1"
+ - TMPDIR=/tmp
+
+addons:
+ apt:
+ update: true
+ packages: [ liblua5.3-dev, libsystemd-dev, libpcre2-dev, socat, libpcre3-dev ]
+
+matrix:
+ include:
+ - os: linux
+ arch: ppc64le
+ compiler: gcc
+ if: type == cron
+# - os: linux
+# arch: arm64
+# compiler: gcc
+# if: type == cron
+ - os: linux
+ arch: arm64-graviton2
+ group: edge
+ virt: vm
+ compiler: gcc
+ if: type == cron
+ - os: linux
+ arch: s390x
+ compiler: gcc
+ if: type == cron
+
+install:
+ - scripts/build-vtest.sh
+
+script:
+ - make -j$(nproc) ERR=1 TARGET=linux-glibc CC=$CC DEBUG=-DDEBUG_STRICT=1 $FLAGS
+ - ./haproxy -vv
+ - ldd haproxy
+ - make reg-tests VTEST_PROGRAM=../vtest/vtest REGTESTS_TYPES=default,bug,devel
+
+after_failure:
+ - |
+ for folder in ${TMPDIR}/*regtest*/vtc.*; do
+ cat $folder/INFO
+ cat $folder/LOG
+ done
diff --git a/BRANCHES b/BRANCHES
new file mode 100644
index 0000000..53b2ee9
--- /dev/null
+++ b/BRANCHES
@@ -0,0 +1,239 @@
+HAProxy branches and life cycle
+===============================
+
+The HAProxy project evolves quickly to stay up to date with modern features
+found in web environments but also takes a great care of addressing bugs which
+may affect deployed versions without forcing such users to upgrade when not
+needed. For this reason the project is developed in branches.
+
+A branch is designated as two numbers separated by a dot, for example "1.8".
+This numbering is historical. Each new development cycle increases the second
+digit by one, and after it reaches '9' it goes back to zero and the first digit
+increases by one. It effectively grows as a decimal number increased by 0.1 per
+version.
+
+The complete version is made of the branch suffixed with "-dev" followed by a
+sequence number during development, then by "." followed by a number when the
+development of that branch is finished and the branch enters a maintenance
+phase. The first release of a branch starts at ".0". Immediately after ".0" is
+issued, the next branch is created as "-dev0" as an exact copy of the previous
+branch's ".0" version. Thus we observe the following development sequence:
+
+ ... 1.9-dev10 -> 1.9-dev11 -> 1.9.0 -> 2.0-dev0 -> 2.0-dev1 ... 2.0 -> ...
+
+Occasionally a series of "-rc" versions may be emitted between the latest -dev
+and the release to mark the end of development and start of stabilizing, though
+it's mostly a signal send to users that the release is approaching rather than
+a change in the cycle as it is always hard to categorize patches.
+
+Very often the terms "branch" and "version" will be used interchangeably with
+only the first two digits to designate "the latest version of that branch". So
+when someone asks you "Could you please try the same on 1.8", it means "1.8.X"
+with X as high as possible, thus for example 1.8.20 if this one is available at
+this moment.
+
+During the maintenance phase, a maintenance branch is created for the just
+released version. The development version remains in the development branch
+called "master", or sometimes "-dev". If branches are represented vertically
+and time horizontally, this will look like this:
+
+ versions branch
+ 1.9-dev10 1.9-dev11 1.9.0 2.0-dev0 2.0-dev1 2.0-dev2
+ ----+--------+---------+-------+---------+---------+----------> master
+ \
+ \ 1.9.1 1.9.2
+ `-----------+-------------+---------> 1.9
+
+Each released version (e.g. 1.9.0 above) appears once in the master branch so
+that it is easy to list history of changes between versions.
+
+Before version 1.4, development and maintenance were inter-mixed in the same
+branch, which resulted in latest maintenance branches becoming unstable after
+some point. This is why versions 1.3.14 and 1.3.15 became maintenance branches
+on their own while the development pursued on 1.3 to stabilize again in the
+latest versions.
+
+Starting with version 1.4.0, a rule has been set not to create new features
+into a maintenance branch. It was not well respected and still created trouble
+with certain 1.4 versions causing regressions and confusing users.
+
+Since 1.5.0 this "no new feature" rule has become strict and maintenance
+versions only contain bug fixes that are necessary in this branch. This means
+that any version X.Y.Z is necessarily more stable than X.Y.W with W<Z.
+
+For this reason there is absolutely no excuse for not updating a version within
+your branch, as your version necessarily contains bugs that are fixed in any
+later version in that same branch. Obviously when a branch is just released,
+there will be some occasional bugs. And once in a while a fix for a recently
+discovered bug may have an undesired side effect called a regression. This must
+never happen but this will happen from time to time, especially on recently
+released versions. This is often presented as an excuse by some users for not
+updating but this is wrong, as the risk staying with an older version is much
+higher than the risk of updating. If you fear there could be an issue with an
+update because you don't completely trust the version in your branch, it simply
+means you're using the wrong branch and need an older one.
+
+When a bug is reported in a branch, developers will systematically ask if the
+bug is present in the latest version of this branch (since developers don't
+like to work on bugs that were already fixed). It's a good practice to perform
+the update yourself and to test again before reporting the bug. Note, as long
+as you're using a supported branch, as indicated on the haproxy.org web site,
+you don't need to upgrade to another branch to report a bug. However from time
+to time it may happen that a developer will ask you if you can try it in order
+to help narrow the problem down. But this will never be a requirement, just a
+question.
+
+Once a bug is understood, it is tested on the development branch and fixed
+there. Then the fix will be applied in turn to older branches, jumping from
+one to the other in descending order. For example:
+
+ FIX
+ 2.0-dev4 HERE 2.0-dev5 2.0-dev6
+ -----+-------V-------------+-----------+--------------> master
+ 1.9.4 \ 1.9.5 1.9.6 1.9.7
+ --+------------o-------+---------+-------------+------> 1.9
+ 1.8.18 \ 1.8.19 1.8.20
+ -----+-----------o------------+-------------+---------> 1.8
+
+This principle ensures that you will always have a safe upgrade path from an
+older branch to a newer: under no circumstances a bug that was already fixed
+in an older branch will still be present in a newer one. In the diagram above,
+a bug reported for 1.8.18 would be fixed between 2.0-dev4 and 2.0-dev5. The
+fix will be backported into 1.9 and from there into 1.8. 1.9.5 will be issued
+with the fix before 1.8.19 will be issued. This guarantees that for any version
+1.8 having the fix, there always exists a version 1.9 with it as well. So if
+you would upgrade to 1.8.19 to benefit from the fix and the next day decide
+that for whatever new feature you need to upgrade to 1.9, you'll have 1.9.5
+available with the same set of fixes so you will not reintroduce a previously
+fixed problem.
+
+In practice, it takes longer to release older versions than newer ones. There
+are two reasons to this. One is technical: the fixes often require some
+adaptations to be done for older versions. The other reason is stability: in
+spite of the great care and the tests, there is always a faint risk that a fix
+introduces a regression. By leaving fixes exposed in more recent versions
+before appearing in older ones, there is a much smaller probability that such a
+regression remains undetected when the next version of the older branch is
+issued.
+
+So the rule for the best stability is very simple:
+
+ STICK TO THE BRANCH THAT SUITS YOUR NEEDS AND APPLY ALL UPDATES.
+
+With other projects, some people developed a culture of backporting only a
+selection of fixes into their own maintenance branch. Usually they consider
+these fixes are critical, or security-related only. THIS IS TERRIBLY WRONG.
+It is already very difficult for the developers who made the initial patch to
+figure if and how it must be backported to an older branch, what extra patches
+it depends on to be safe, as you can imagine it is impossible for anyone else
+to make a safe guess about what to pick.
+
+ A VERSION WHICH ONLY CONTAINS A SELECTION OF FIXES IS WAY MORE
+ DANGEROUS AND LESS STABLE THAN ONE WITHOUT ANY OF THESE FIXES.
+
+Branches up to 1.8 are all designated as "long-term supported" ("LTS" for
+short), which means that they are maintained for several years after the
+release. These branches were emitted at a pace of one per year since 1.5 in
+2014. As of 2019, 1.5 is still supported and widely used, even though it very
+rarely receives updates. After a few years these LTS branches enter a
+"critical fixes only" status, which means that they will rarely receive a fix
+but if that a critital issue affects them, a release will be made, with or
+without any other fix. Once a version is not supported anymore, it will not
+receive any fix at all and it will really be time for you to upgrade to a more
+recent branch. Please note that even when an upgrade is needed, a great care is
+given to backwards compatibility so that most configs written for version 1.1
+still work with little to no modification 16 years later on version 2.0.
+
+Since 1.9, the release pacing has increased to match faster moving feature sets
+and a faster stabilization of the technical foundations. The principle is now
+the following:
+ - one release is emitted between October and December, with an odd version
+ number (such as "1.9"). This version heavily focuses on risky changes that
+ are considered necessary to develop new features. It can for example bring
+ nice performance improvements as well as invisible changes that will serve
+ later ; these versions will only be emitted for developers and highly
+ skilled users. They will not be maintained for a long time, they will
+ receive updates for 12 to 18 months only after which they will be marked
+ End-Of-Life ("EOL" for short). They may receive delicate fixes during their
+ maintenance cycle so users have to be prepared to see some breakage once in
+ a while as fixes are stabilizing. THESE VERSIONS MUST ABSOLUTELY NOT BE
+ PACKAGED BY OPERATING SYSTEM VENDORS.
+
+ - one release is emitted between May and June, with an even version number
+ (such as "2.0"). This version mostly relies on the technical foundations
+ brought by the previous release and tries hard not to apply risky changes.
+ Instead it will bring new user-visible features. Such versions will be
+ long-term supported and may be packaged by operating system vendors.
+
+This development model provides better stability for end users and better
+feedback for developers:
+ - regular users stick to LTS versions which rely on the same foundations
+ as the previous releases that had 6 months to stabilize. In terms of
+ stability it really means that the point zero version already accumulated
+ 6 months of fixes and that it is much safer to use even just after it is
+ released.
+
+ - for developers, given that the odd versions are solely used by highly
+ skilled users, it's easier to get advanced traces and captures, and there
+ is less pressure during bug reports because there is no doubt the user is
+ autonomous and knows how to work around the issue or roll back to the last
+ working version.
+
+Thus the release cycle from 1.8 to 2.2 should look like this:
+
+ 1.8.0 1.9.0 2.0.0 2.1.0 2.2.0
+ --+---------------+---------------+--------------+--------------+----> master
+ \ \ \ \ \
+ \ \ \ \ `--> 2.2 LTS
+ \ \ \ `--+--+--+---+---> 2.1
+ \ \ `----+-----+------+-------+----> 2.0 LTS
+ \ `--+-+-+--+---+------+--------+-----| EOL 1.9
+ `---+---+---+-----+-------+-----------+---------------+------> 1.8 LTS
+
+In short the non-LTS odd releases can be seen as technological previews of the
+next feature release, and will be terminated much earlier. The plan is to barely
+let them overlap with the next non-LTS release, allowing advanced users to
+always have the choice between the last two major releases.
+
+With all this in mind, what version should you use ? It's quite simple:
+ - if you're a first-time HAProxy user, just use the version provided by your
+ operating system. Just take a look at the "known bugs" section on the
+ haproxy.org web site to verify that it's not affected by bugs that could
+ have an impact for you.
+
+ - if you don't want or cannot use the version shipped with your operating
+ system, it is possible that other people (including the package maintainer)
+ provide alternate versions. This is the case for Debian and Ubuntu for
+ example, where you can choose your distribution and pick the branch you
+ need here: https://haproxy.debian.net/
+
+ - if you want to build with specific options, apply some patches, you'll
+ have to build from sources. If you have little experience or are not
+ certain to devote regular time to perform this task, take an "old" branch
+ (i.e. 1-2 years old max, for example 1.8 when 2.0 is emitted). You'll avoid
+ most bugs and will not have to work too often to update your local version.
+
+ - if you need a fresh version for application development, or to benefit from
+ latest improvements, take the most recent version of the most recent branch
+ and keep it up to date. You may even want to use the Git version or nightly
+ snapshots.
+
+ - if you want to develop on HAProxy, use the master from the Git tree.
+
+ - if you want to follow HAProxy's development by doing some tests without
+ the burden of entering too much into the development process, just use the
+ -dev versions of the master branch. At some point you'll feel the urge to
+ switch to the Git version anyway as it will ultimately simplify your work.
+
+ - if you're installing it on unmanaged servers with little to no hostile
+ exposure, or your home router, you should pick the latest version in one
+ of the oldest supported branches. While it doesn't guarantee that you will
+ never have to upgrade it, at least as long as you don't use too complex a
+ setup, it's unlikely that you will need to update it often.
+
+And as a general rule, do not put a non-LTS version on a server unless you are
+absolutely certain you are going to keep it up to date yourself and already
+plan to replace it once the following LTS version is issued. If you are not
+going to manage updates yourself, use pre-packaged versions exclusively and do
+not expect someone else to have to deal with the burden of building from
+sources.
diff --git a/BSDmakefile b/BSDmakefile
new file mode 100644
index 0000000..76a01af
--- /dev/null
+++ b/BSDmakefile
@@ -0,0 +1,10 @@
+# Makefile stub for FreeBSD, it checks BSDmakefile before Makefile so
+# we can be friendly to the user and tell them to use gmake.
+.BEGIN:
+ @echo "Please use GNU make instead. It is often called gmake on BSD systems."
+ @echo "Example:"
+ @echo ' gmake ${MAKEFLAGS} $(.TARGETS)' | sed -e 's, -J[ ]*[0-9,]*,,'
+ @echo
+
+all $(.TARGETS): .SILENT
+ @-
diff --git a/CHANGELOG b/CHANGELOG
new file mode 100644
index 0000000..6b4e1e6
--- /dev/null
+++ b/CHANGELOG
@@ -0,0 +1,21931 @@
+ChangeLog :
+===========
+
+2024/02/15 : 2.9.5
+ - BUG/MINOR: diag: always show the version before dumping a diag warning
+ - BUG/MINOR: diag: run the final diags before quitting when using -c
+ - BUG/MINOR: quic: Wrong ack ranges handling when reaching the limit.
+ - BUILD: quic: Variable name typo inside a BUG_ON().
+ - BUG/MINOR: ssl: Fix error message after ssl_sock_load_ocsp call
+ - CLEANUP: quic: Code clarifications for QUIC CUBIC (RFC 9438)
+ - BUG/MINOR: quic: fix possible integer wrap around in cubic window calculation
+ - MINOR: quic: Stop using 1024th of a second.
+ - MINOR: compiler: add a new DO_NOT_FOLD() macro to prevent code folding
+ - MINOR: debug: make sure calls to ha_crash_now() are never merged
+ - MINOR: debug: make ABORT_NOW() store the caller's line number when using abort
+ - BUILD: debug: remove leftover parentheses in ABORT_NOW()
+ - MINOR: debug: make BUG_ON() catch build errors even without DEBUG_STRICT
+ - BUG/MINOR: ssl: Duplicate ocsp update mode when dup'ing ckch
+ - BUG/MINOR: ssl: Clear the ckch instance when deleting a crt-list line
+ - MINOR: ssl: Use OCSP_CERTID instead of ckch_store in ckch_store_build_certid
+ - BUG/MEDIUM: ocsp: Separate refcount per instance and per store
+ - BUG/MINOR: ssl: Destroy ckch instances before the store during deinit
+ - BUG/MINOR: ssl: Reenable ocsp auto-update after an "add ssl crt-list"
+ - REGTESTS: ssl: Fix empty line in cli command input
+ - REGTESTS: ssl: Add OCSP related tests
+ - DOC: install: recommend pcre2
+ - DOC: config: fix misplaced "txn.conn_retries"
+ - DOC: config: fix typos for "bytes_{in,out}"
+ - DOC: config: fix misplaced "bytes_{in,out}"
+ - DOC: internal: update missing data types in peers-v2.0.txt
+ - BUG/MINOR: vars/cli: fix missing LF after "get var" output
+ - BUG/MEDIUM: cli: fix once for all the problem of missing trailing LFs
+ - CI: Update to actions/cache@v4
+ - BUILD: address a few remaining calloc(size, n) cases
+ - BUG/MEDIUM: pool: fix rare risk of deadlock in pool_flush()
+ - BUG/MEDIUM: ssl: Fix crash when calling "update ssl ocsp-response" when an update is ongoing
+ - BUG/MEDIUM: quic: Wrong K CUBIC calculation.
+ - MINOR: quic: Update K CUBIC calculation (RFC 9438)
+ - MINOR: quic: Dynamic packet reordering threshold
+ - MINOR: quic: Add a counter for reordered packets
+ - BUG/MEDIUM: stconn: Allow expiration update when READ/WRITE event is pending
+ - BUG/MEDIUM: stconn: Don't check pending shutdown to wake an applet up
+ - CLEANUP: stconn: Move SE flags set by app layer at the end of the bitfield
+ - MINOR: stconn: Rename SE_FL_MAY_FASTFWD and reorder bitfield
+ - MINOR: stconn: Add SE flag to announce zero-copy forwarding on consumer side
+ - MINOR: muxes: Announce support for zero-copy forwarding on consumer side
+ - BUG/MAJOR: stconn: Check support for zero-copy forwarding on both sides
+ - MINOR: muxes/applet: Simplify checks on options to disable zero-copy forwarding
+ - BUG/MEDIUM: mux-h2: Switch pending error to error if demux buffer is empty
+ - BUG/MEDIUM: mux-h2: Only Report H2C error on read error if demux buffer is empty
+ - BUG/MEDIUM: mux-h2: Don't report error on SE if error is only pending on H2C
+ - BUG/MEDIUM: mux-h2: Don't report error on SE for closed H2 streams
+
+2024/01/31 : 2.9.4
+ - BUG/MINOR: h3: fix checking on NULL Tx buffer
+ - DOC: configuration: fix set-dst in actions keywords matrix
+ - BUG/MEDIUM: mux-h2: refine connection vs stream error on headers
+ - MINOR: mux-h2/traces: add a missing trace on connection WU with negative inc
+ - BUG/MEDIUM: cli: some err/warn msg dumps add LR into CSV output on stat's CLI
+ - BUG/MINOR: jwt: fix jwt_verify crash on 32-bit archs
+ - BUG/MINOR: hlua: fix uninitialized var in hlua_core_get_var()
+ - BUG/MEDIUM: cache: Fix crash when deleting secondary entry
+ - BUG/MINOR: quic: newreno QUIC congestion control algorithm no more available
+ - CLEANUP: quic: Remove unused CUBIC_BETA_SCALE_FACTOR_SHIFT macro.
+ - MINOR: quic: Stop hardcoding a scale shifting value (CUBIC_BETA_SCALE_FACTOR_SHIFT)
+ - MINOR: quic: extract qc_stream_buf free in a dedicated function
+ - BUG/MEDIUM: quic: remove unsent data from qc_stream_desc buf
+ - DOC: configuration: clarify http-request wait-for-body
+ - BUG/MAJOR: ssl_sock: Always clear retry flags in read/write functions
+ - MINOR: h3: add traces for stream sending function
+ - BUG/MEDIUM: h3: do not crash on invalid response status code
+ - BUG/MEDIUM: qpack: allow 6xx..9xx status codes
+ - BUG/MEDIUM: quic: fix crash on invalid qc_stream_buf_free() BUG_ON
+ - BUG/MINOR: h1: Don't support LF only at the end of chunks
+ - BUG/MEDIUM: h1: Don't support LF only to mark the end of a chunk size
+ - DOC: httpclient: add dedicated httpclient section
+ - BUG/MINOR: h1-htx: properly initialize the err_pos field
+ - BUG/MEDIUM: h1: always reject the NUL character in header values
+
+2024/01/18 : 2.9.3
+ - BUILD: quic: missing include for quic_tp
+ - BUG/MINOR: mux-quic: do not prevent non-STREAM sending on flow control
+ - BUG/MINOR: mux-h2: also count streams for refused ones
+ - BUG/MEDIUM: quic: keylog callback not called (USE_OPENSSL_COMPAT)
+
+2024/01/11 : 2.9.2
+ - BUG/MINOR: resolvers: default resolvers fails when network not configured
+ - DOC: config: Update documentation about local haproxy response
+ - BUG/MINOR: server: Use the configured address family for the initial resolution
+ - BUG/MAJOR: stconn: Disable zero-copy forwarding if consumer is shut or in error
+ - MINOR: stats: store the parent proxy in stats ctx (http)
+ - BUG/MEDIUM: stats: unhandled switching rules with TCP frontend
+ - MINOR: server/event_hdl: add server_inetaddr struct to facilitate event data usage
+ - MINOR: server/event_hdl: update _srv_event_hdl_prepare_inetaddr prototype
+ - BUG/MINOR: server/event_hdl: propagate map port info through inetaddr event
+ - DOC: fix typo for fastfwd QUIC option
+ - BUG/MINOR: mux-quic: always report error to SC on RESET_STREAM emission
+ - BUG/MINOR: mux-quic: disable fast-fwd if connection on error
+ - BUG/MINOR: quic: Wrong keylog callback setting.
+ - BUG/MINOR: quic: Missing call to TLS message callbacks
+ - MINOR: h3: check connection error during sending
+ - BUG/MINOR: h3: close connection on header list too big
+ - MINOR: h3: add traces for connection init stage
+ - BUG/MINOR: h3: properly handle alloc failure on finalize
+ - BUG/MINOR: h3: close connection on sending alloc errors
+ - BUG/MINOR: h3: disable fast-forward on buffer alloc failure
+ - CI: use semantic version compare for determing "latest" OpenSSL
+ - MINOR: global: export a way to list build options
+ - MINOR: debug: add features and build options to "show dev"
+ - REGTESTS: check attach-srv out of order declaration
+ - CLEANUP: quic: Remaining useless code into server part
+ - BUILD: quic: Missing quic_ssl.h header protection
+ - BUG/MEDIUM: h3: fix incorrect snd_buf return value
+ - BUG/MEDIUM: stconn: Forward shutdown on write timeout only if it is forwardable
+ - BUG/MEDIUM: stconn: Set fsb date if zero-copy forwarding is blocked during nego
+ - BUG/MEDIUM: spoe: Never create new spoe applet if there is no server up
+ - MINOR: mux-h2: support limiting the total number of H2 streams per connection
+ - MINOR: ot: logsrv struct becomes logger
+ - MINOR: ssl: Update ssl_fc_curve/ssl_bc_curve to use SSL_get0_group_name
+ - CLEANUP: quic: Double quic_dgram_parse() prototype declaration.
+ - BUG/MINOR: map: list-based matching potential ordering regression
+ - REGTESTS: add a test to ensure map-ordering is preserved
+ - DOC: configuration: corrected description of keyword tune.ssl.ocsp-update.mindelay
+
+2023/12/15 : 2.9.1
+ - BUG/MINOR: ssl: Double free of OCSP Certificate ID
+ - MINOR: ssl/cli: Add ha_(warning|alert) msgs to CLI ckch callback
+ - BUG/MINOR: ssl: Wrong OCSP CID after modifying an SSL certficate
+ - BUG/MINOR: lua: Wrong OCSP CID after modifying an SSL certficate (LUA)
+ - DOC: configuration: typo req.ssl_hello_type
+ - BUG/MINOR: mworker/cli: fix set severity-output support
+ - BUG/MEDIUM: quic: Possible buffer overflow when building TLS records
+ - BUILD: ssl: update types in wolfssl cert selection callback
+ - BUG/MEDIUM: map/acl: pat_ref_{set,delete}_by_id regressions
+ - BUG/MINOR: ext-check: cannot use without preserve-env
+ - MINOR: version: mention that it's stable now
+ - BUG/MEDIUM: quic: QUIC CID removed from tree without locking
+ - BUG/MEDIUM: stconn: Block zero-copy forwarding if EOS/ERROR on consumer side
+ - BUG/MEDIUM: mux-h1: Cound data from input buf during zero-copy forwarding
+ - BUG/MEDIUM: mux-h1: Explicitly skip request's C-L header if not set originally
+ - CLEANUP: mux-h1: Fix a trace message about C-L header addition
+ - BUG/MEDIUM: mux-h2: Report too large HEADERS frame only when rxbuf is empty
+ - BUG/MEDIUM: mux-quic: report early error on stream
+ - DOC: config: add arguments to sample fetch methods in the table
+ - DOC: config: also add arguments to the converters in the table
+
+2023/12/05 : 2.9.0
+ - DOC: config: add missing colon to "bytes_out" sample fetch keyword (2)
+ - BUG/MINOR: cfgparse-listen: fix warning being reported as an alert
+ - DOC: config: add matrix entry for "max-session-srv-conns"
+ - DOC: config: fix monitor-fail typo
+ - DOC: config: add context hint for proxy keywords
+ - DEBUG: stream: Report lra/fsb values for front end back SC in stream dump
+ - REGTESTS: sample: Test the behavior of consecutive delimiters for the field converter
+ - BUG/MINOR: sample: Make the `word` converter compatible with `-m found`
+ - DOC: Clarify the differences between field() and word()
+ - BUG/MINOR: server/event_hdl: properly handle AF_UNSPEC for INETADDR event
+ - BUILD: http_htx: silence uninitialized warning on some gcc versions
+ - MINOR: acme.sh: don't use '*' in the filename for wildcard domain
+ - MINOR: global: Use a dedicated bitfield to customize zero-copy fast-forwarding
+ - MINOR: mux-pt: Add global option to enable/disable zero-copy forwarding
+ - MINOR: mux-h1: Add global option to enable/disable zero-copy forwarding
+ - MINOR: mux-h2: Add global option to enable/disable zero-copy forwarding
+ - MINOR: mux-quic: Add global option to enable/disable zero-copy forwarding
+ - MINOR: mux-quic: Disable zero-copy forwarding for send by default
+ - DOC: config: update the reminder on the HTTP model and add some terminology
+ - DOC: config: add a few more differences between HTTP/1 and 2+
+ - DOC: config: clarify session vs stream
+ - DOC: config: fix typo abandonned -> abandoned
+ - DOC: management: fix two latest typos (optionally, exception)
+ - BUG/MEDIUM: peers: fix partial message decoding
+ - DOC: management: update stream vs session
+
+2023/11/30 : 2.9-dev12
+ - BUG/MINOR: global: Fix tune.disable-(fast-forward/zero-copy-forwarding) options
+ - DOC: config: removing "log-balance" references
+ - MINOR: server/event_hdl: add SERVER_INETADDR event
+ - MINOR: tools: use const for read only pointers in ip{cmp,cpy}
+ - MINOR: server/ip: centralize server ip updates
+ - MINOR: backend: remove invalid mode test for "hash-balance-factor"
+ - Revert "MINOR: cfgparse-listen: warn when use-server rules is used in wrong mode"
+ - MINOR: proxy: add free_logformat_list() helper function
+ - MINOR: proxy: add free_server_rules() helper function
+ - MINOR: log/backend: prevent "use-server" rules use with LOG mode
+ - MINOR: log/balance: set lbprm tot_weight on server on queue/dequeue
+ - DOC: config: specify supported sections for "max-session-srv-conns"
+ - DOC: config: fix timeout check inheritance restrictions
+ - REGTESTS: connection: disable http_reuse_be_transparent.vtc if !TPROXY
+ - DOC: lua: add sticktable class reference from Proxy.stktable
+ - DOC: lua: fix Proxy.get_mode() output
+ - DOC: lua: add "syslog" to Proxy.get_mode() output
+ - MEDIUM: ssl: implement rsa/ecdsa selection with WolfSSL
+ - MINOR: ssl: replace 'trash.area' by 'servername' in ssl_sock_switchctx_cbk()
+ - MINOR: ssl: move certificate selection in a dedicate function
+ - MEDIUM: ssl: use ssl_sock_chose_sni_ctx() in the clienthello callback
+ - MINOR: mworker/cli: implement hard-reload over the master CLI
+ - BUG/MEDIUM: mux-h1: Properly ignore trailers when a content-length is announced
+ - MINOR: task/profiling: do not record task_drop_running() as a caller
+ - OPTIM: pattern: save memory and time using ebst instead of ebis
+ - BUILD: map: fix build warning
+ - MINOR: trace: define simple -dt argument
+ - MINOR: trace: parse level in a function
+ - MINOR: trace: parse verbosity in a function
+ - MINOR: trace: support -dt optional format
+ - OPTIM: mux-h2/zero-copy: don't allocate more buffers per connections than streams
+ - BUG/MINOR: quic: fix CONNECTION_CLOSE_APP encoding
+ - BUG/MEDIUM: stconn: Don't perform zero-copy FF if opposite SC is blocked
+ - BUG/MEDIUM: mux-h2: Remove H2_SF_NOTIFIED flag for H2S blocked on fast-forward
+ - CLEANUP: quic: Remove dead definitions/declarations
+ - REORG: quic: Move some QUIC CLI code to its C file
+ - REORG: quic: Add a new module to handle QUIC connection IDs
+ - REORG: quic: QUIC connection types header cleaning
+ - BUILD: quic: Missing RX header inclusions
+ - REORG: quic: Move CRYPTO data buffer defintions to QUIC TLS module
+ - REORG: quic: Move QUIC CRYPTO stream definitions/declarations to QUIC TLS
+ - REORG: quic: Move several inlined functions from quic_conn.h
+ - REORG: quic: Move QUIC SSL BIO method related functions to quic_ssl.c
+ - REORG: quic: Move the QUIC DCID parser to quic_sock.c
+ - REORG: quic: Rename some functions used upon ACK receipt
+ - REORG: quic: Move QUIC path definitions/declarations to quic_cc module
+ - REORG: quic: Move qc_handle_conn_migration() to quic_conn.c
+ - REORG: quic: Move quic_build_post_handshake_frames() to quic_conn module
+ - REORG: quic: Move qc_may_probe_ipktns() to quic_tls.h
+ - REORG: quic: Move qc_pkt_long() to quic_rx.h
+ - REORG: quic: Rename some (quic|qc)_conn* objects to quic_conn_closed
+ - REORG: quic: Move NEW_CONNECTION_ID frame builder to quic_cid
+ - REORG: quic: Move ncbuf related function from quic_rx to quic_conn
+ - REORG: quic: Add a new module for QUIC retry
+ - BUILD: quic: Several compiler warns fixes after retry module creation
+ - REORG: quic: Move qc_notify_send() to quic_conn
+ - REORG: quic: Add a new module for retransmissions
+ - REORG: quic: Remove qc_pkt_insert() implementation
+ - REORG: quic: Move quic_increment_curr_handshake() to quic_sock
+ - BUG/MINOR: cache: Remove incomplete entries from the cache when stream is closed
+ - MEDIUM: cli: allow custom pattern for payload
+ - CLEANUP: mworker/cli: use a label to return errors
+ - MINOR: mworker/cli: implements the customized payload pattern for master CLI
+ - DOC: management: add documentation about customized payload pattern
+ - BUG/MEDIUM: server/event_hdl: memory overrun in _srv_event_hdl_prepare_inetaddr()
+ - MINOR: event_hdl: add global tunables
+ - BUG/MAJOR: server/addr: fix a race during server addr:svc_port updates
+ - MEDIUM: log/balance: support FQDN for UDP log servers
+ - BUG/MINOR: compression: possible NULL dereferences in comp_prepare_compress_request()
+ - BUG/MEDIUM: master/cli: Properly pin the master CLI on thread 1 / group 1
+ - BUG/MEDIUM: mux-quic: Stop zero-copy FF during nego if input is not empty
+ - CLEANUP: log: Fix %rc comment in sess_build_logline()
+ - BUG/MINOR: h3: fix TRAILERS encoding
+ - BUG/MINOR: h3: always reject PUSH_PROMISE
+ - MINOR: h3: use correct error code for missing SETTINGS
+ - MINOR: http-fetch: Add a sample to retrieve the server status code
+ - DOC: config: Improve 'status' sample documentation
+ - MINOR: http-fetch: Add a sample to get the transaction status code
+ - MEDIUM: http-ana: Set termination state before returning haproxy response
+ - MINOR: stream: Expose session terminate state via a new sample fetch
+ - MINOR: stream: add a sample fetch to get the number of connection retries
+ - MINOR: stream: Expose the stream's uniq_id via a new sample fetch
+ - MINOR: muxes: Rename mux_ctl_type values to use MUX_CTL_ prefix
+ - MINOR: muxes: Add a callback function to send commands to mux streams
+ - MINOR: muxes: Implement ->sctl() callback for muxes and return the stream id
+ - MINOR: Add sample fetches to get the frontend and backend stream ID
+ - BUG/MEDIUM: cli: Don't look for payload pattern on empty commands
+ - DOC: config: Add argument for tune.lua.maxmem
+ - DOC: config: fix mention of request slot in http-response capture
+ - DOC: config: fix remaining mention of @reverse for attach-srv action
+ - DOC: config: fix missing characters in set-spoe-group action
+ - DOC: config: reorganize actions into their own section
+ - BUG/MINOR: acme.sh: update the deploy script
+ - MINOR: rhttp: mark reverse HTTP as experimental
+ - CLEANUP: quic_cid: remove unused listener arg
+ - BUG/MINOR: quic_tp: fix preferred_address decoding
+ - MINOR: quic_tp: use in_addr/in6_addr for preferred_address
+ - MINOR: acme.sh: use the master CLI for hot update
+ - DOC: config: move the cache-use and cache-store actions to the proper section
+ - DOC: config: fix alphabetical ordering of converter keywords
+ - DOC: config: add missing colon to "bytes_out" sample fetch keyword
+ - DOC: config: add an index of converter keywords
+ - DOC: config: add an index of sample fetch keywords
+ - BUG/MINOR: config: Stopped parsing upon unmatched environment variables
+ - DEBUG: unstatify a few functions that are often present in backtraces
+ - BUILD: server: shut a bogus gcc warning on certain ubuntu
+
+2023/11/24 : 2.9-dev11
+ - BUG/MINOR: startup: set GTUNE_SOCKET_TRANSFER correctly
+ - BUG/MINOR: sock: mark abns sockets as non-suspendable and always unbind them
+ - BUILD: cache: fix build error on older compilers
+ - BUG/MAJOR: quic: complete thread migration before tcp-rules
+ - BUG/MEDIUM: quic: Possible crash for connections to be killed
+ - MINOR: quic: remove unneeded QUIC specific stopping function
+ - MINOR: acl: define explicit HTTP_3.0
+ - DEBUG: connection/flags: update flags for reverse HTTP
+ - BUILD: log: silence a build warning when threads are disabled
+ - MINOR: quic: Add traces to debug frames handling during retransmissions
+ - BUG/MEDIUM: quic: Possible crash during retransmissions and heavy load
+ - BUG/MINOR: quic: Possible leak of TX packets under heavy load
+ - BUG/MINOR: quic: Possible RX packet memory leak under heavy load
+ - BUG/MINOR: server: do not leak default-server in defaults sections
+ - DEBUG: tinfo: store the pthread ID and the stack pointer in tinfo
+ - MINOR: debug: start to create a new struct post_mortem
+ - MINOR: debug: add OS/hardware info to the post_mortem struct
+ - MINOR: debug: report in port_mortem whether a container was detected
+ - MINOR: debug: report in post_mortem if the container techno used is docker
+ - MINOR: debug: detect CPU model and store it in post_mortem
+ - MINOR: debug: report any detected hypervisor in post_mortem
+ - MINOR: debug: collect some boot-time info related to the process
+ - MINOR: debug: copy the thread info into the post_mortem struct
+ - MINOR: debug: dump the mapping of the libs into post_mortem
+ - MINOR: debug: add the ability to enter components in the post_mortem struct
+ - MINOR: init: add info about the main program to the post_mortem struct
+ - DOC: management: document "show dev"
+ - CLEANUP: assorted typo fixes in the code and comments
+ - CI: limit codespell checks to main repo, not forks
+ - DOC: 51d: updated 51Degrees repo URL for v3.2.10
+ - DOC: install: update the list of openssl versions
+ - MINOR: ext-check: add an option to preserve environment variables
+ - BUG/MEDIUM: mux-h1: Don't set CO_SFL_MSG_MORE flag on last fast-forward send
+ - MINOR: rhttp: rename proto_reverse_connect
+ - MINOR: rhttp: large renaming to use rhttp prefix
+ - MINOR: rhttp: add count of active conns per thread
+ - MEDIUM: rhttp: support multi-thread active connect
+ - MINOR: listener: allow thread kw for rhttp bind
+ - DOC: rhttp: replace maxconn by nbconn
+ - MINOR: log/balance: rename "log-sticky" to "sticky"
+ - MEDIUM: mux-quic: Add consumer-side fast-forwarding support
+ - MAJOR: h3: Implement zero-copy support to send DATA frame
+
+2023/11/18 : 2.9-dev10
+ - CLEANUP: Re-apply xalloc_size.cocci (3)
+ - BUG/MEDIUM: stconn: Report send activity during mux-to-mux fast-forward
+ - BUG/MEDIUM: stconn: Don't report rcv/snd expiration date if SC cannot epxire
+ - MINOR: stconn: Don't queue stream task in past in sc_notify()
+ - BUG/MEDIUM: Don't apply a max value on room_needed in sc_need_room()
+ - BUG/MINOR: stconn: Sanitize report for read activity
+ - CLEANUP: htx: Properly indent htx_reserve_max_data() function
+ - DOC: stconn: Improve comments about lra and fsb usage
+ - BUG/MEDIUM: quic: fix actconn on quic_conn alloc failure
+ - BUG/MEDIUM: quic: fix sslconns on quic_conn alloc failure
+ - BUG/MEDIUM: mux-h1: Be sure xprt support splicing to use it during fast-forward
+ - MINOR: proto_reverse_connect: use connect timeout
+ - BUG/MINOR: mux-h1: Release empty ibuf during data fast-forwarding
+ - BUG/MINOR: stick-table/cli: Check for invalid ipv4 key
+ - MEDIUM: stktable/cli: simplify entry key handling
+ - MINOR: stktable/cli: support v6tov4 and v4tov6 conversions
+ - BUG/MINOR: mux-h1: Properly handle http-request and http-keep-alive timeouts
+ - BUG/MEDIUM: freq-ctr: Don't report overshoot for long inactivity period
+ - BUG/MEDIUM: pool: fix releasable pool calculation when overloaded
+ - BUG/MINOR: pool: check one other random bucket on alloc conflict
+ - BUG/MEDIUM: pool: try once to allocate from another bucket if empty
+ - MEDIUM: stconn/muxes: Loop on data fast-forwarding to forward at least a buffer
+ - MINOR: stconn/mux-h2: Use a iobuf flag to report EOI to consumer side during FF
+ - MEDIUM: quic: Heavy task mode during handshake
+ - MEDIUM: quic: Heavy task mode with non contiguously bufferized CRYPTO data
+ - MINOR: quic: release the TLS context asap from quic_conn_release()
+ - MINOR: quic: Add idle timer task pointer to traces
+ - BUG/MINOR: quic: idle timer task requeued in the past
+ - CLEANUP: quic: Indentation fix in qc_do_build_pkt()
+ - MINOR: quic: Avoid zeroing frame structures
+ - BUG/MEDIUM: quic: Too short Initial packet sent (enc. level allocation failed)
+ - BUG/MEDIUM: quic: Avoid trying to send ACK frames from an empty ack ranges tree
+ - BUG/MEDIUM: quic: Possible crashes when sending too short Initial packets
+ - BUG/MEDIUM: quic: Avoid some crashes upon TX packet allocation failures
+ - BUG/MEDIUM: quic: Possible crashes during secrets allocations (heavy load)
+ - BUG/MEDIUM: stconn: Don't update stream expiration date if already expired
+ - MINOR: errors: ha_alert() and ha_warning() uses warn_exec_path()
+ - MINOR: errors: does not check MODE_STARTING for log emission
+ - MEDIUM: errors: move the MODE_QUIET test in print_message()
+ - DOC: management: -q is quiet all the time
+ - MEDIUM: mworker: -W is mandatory when using -S
+ - BUG/MEDIUM: mux-h1: Exit early if fast-forward is not supported by opposite SC
+ - MEDIUM: quic: adjust address validation
+ - MINOR: quic: reduce half open counters scope
+ - MEDIUM: quic: limit handshake per listener
+ - MEDIUM: quic: define an accept queue limit
+ - BUG/MINOR: quic: fix retry token check inconsistency
+ - MINOR: task/debug: explicitly support passing a null caller to wakeup functions
+ - MINOR: task/debug: make task_queue() and task_schedule() possible callers
+ - OPTIM: mux-h2: don't allocate more buffers per connections than streams
+ - BUG/MINOR: quic: remove dead code in error path
+ - MEDIUM: quic: respect closing state even on soft-stop
+ - MEDIUM: quic: release conn socket before using quic_cc_conn
+ - DOC: config: use the word 'backend' instead of 'proxy' in 'track' description
+ - BUG/MEDIUM: applet: Remove appctx from buffer wait list on release
+ - MINOR: tools: make str2sa_range() directly return type hints
+ - BUG/MEDIUM: server: invalid address (post)parsing checks
+ - BUG/MINOR: sink: don't learn srv port from srv addr
+ - CLEANUP: sink: bad indent in sink_new_from_logger()
+ - CLEANUP: sink: useless leftover in sink_add_srv()
+ - BUG/MINOR: quic: Useless use of non-contiguous buffer for in order CRYPTO data
+ - MINOR: server: always initialize pp_tlvs for default servers
+ - BUG/MEDIUM: proxy: always initialize the default settings after init
+ - MEDIUM: startup: 'haproxy -c' is quiet when valid
+ - BUG/MINOR: sample: Fix bytes converter if offset is bigger than sample length
+ - BUG/MINOR: log: keep the ref in dup_logger()
+ - BUG/MINOR: quic: fix crash on qc_new_conn alloc failure
+ - BUG/MINOR: quic: fix decrement of half_open counter on qc alloc failure
+ - BUG/MEDIUM: quic: fix FD for quic_cc_conn
+ - DOC: config: Fix name for tune.disable-zero-copy-forwarding global param
+ - REGTESTS: startup: -conf-OK requires -V with current VTest
+ - BUG/MEDIUM: quic: Non initialized CRYPTO data stream deferencing
+ - MINOR: quic: Add a max window parameter to congestion control algorithms
+ - MINOR: quic: Maximum congestion control window for each algo
+ - DOC: quic: Wrong syntax for "quic-cc-algo" keyword.
+ - DOC: quic: Maximum congestion control window configuration
+ - BUG/MINOR: quic: maximum window limits do not match the doc
+ - BUG/MEDIUM: connection: report connection errors even when no mux is installed
+ - BUG/MINOR: stconn: Handle abortonclose if backend connection was already set up
+ - MINOR: connection: Add a CTL flag to notify mux it should wait for reads again
+ - MEDIUM: mux-h1: Handle MUX_SUBS_RECV flag in h1_ctl() and susbscribe for reads
+ - BUG/MEDIUM: stream: Properly handle abortonclose when set on backend only
+ - MINOR: stconn: Use SC to detect frontend connections in sc_conn_recv()
+ - REGTESTS: http: Improve script testing abortonclose option
+ - MINOR: activity: report profiling duration and age in "show profiling"
+ - BUG/MEDIUM: mworker: set the master variable earlier
+ - BUG/MEDIUM: stream: Don't call mux .ctl() callback if not implemented
+ - MINOR: connection: update rhttp flags usage
+ - BUG/MINOR: mux_h2: reject passive reverse conn if error on add to idle
+ - MINOR: server: force add to idle on reverse
+ - MINOR: shctx: Set last_append to NULL when reserving block in hot list
+ - MEDIUM: shctx: Move list between hot and avail list in O(1)
+ - MEDIUM: shctx: Simplify shctx_row_reserve_hot loop
+ - MINOR: shctx: Remove explicit 'from' param from shctx_row_data_append
+ - MEDIUM: cache: Use dedicated cache tree lock alongside shctx lock
+ - MINOR: cache: Remove expired entry delete in "show cache" command
+ - MINOR: cache: Add option to avoid removing expired entries in lookup function
+ - MEDIUM: cache: Use rdlock on cache in cache_use
+ - MEDIUM: shctx: Remove 'hot' list from shared_context
+ - MINOR: cache: Use dedicated trash for "show cache" cli command
+ - MEDIUM: cache: Switch shctx spinlock to rwlock and restrict its scope
+ - MEDIUM: cache: Add refcount on cache_entry
+ - MEDIUM: shctx: Descend shctx_lock calls into the shctx_row_reserve_hot
+ - MINOR: shctx: Add new reserve_finish callback call to shctx_row_reserve_hot
+ - MAJOR: cache: Delay cache entry delete in reserve_hot function
+ - MINOR: shctx: Remove redundant arg from free_block callback
+ - MINOR: shctx: Remove 'use_shared_mem' variable
+ - DOC: cache: Specify when function expects a cache lock
+ - BUG/MEDIUM: stconn: Update fsb date on partial sends
+ - MINOR: htx: Use a macro for overhead induced by HTX
+ - MINOR: channel: Add functions to get info on buffers and deal with HTX streams
+ - BUG/MINOR: stconn: Fix streamer detection for HTX streams
+ - BUG/MINOR: stconn: Use HTX-aware channel's functions to get info on buffer
+ - BUG/MINOR: stconn/applet: Report send activity only if there was output data
+ - BUG/MINOR: stconn: Report read activity on non-indep streams for partial sends
+ - BUG/MINOR: shctx: Remove old HA_SPIN_INIT
+ - REGTESTS: try to activate again the seamless reload test with the master CLI
+ - MINOR: proxy: Add "handshake" new timeout (frontend side)
+ - MEDIUM: quic: Add support for "handshake" timeout setting.
+ - MINOR: quic: Dump the expiration date of the idle timer task
+ - BUG/MINOR: quic: Malformed CONNECTION_CLOSE frame
+ - MEDIUM: session: handshake timeout (TCP)
+ - DOC: proxy: Add "handshake" timeout documentation.
+ - MINOR: quic: Rename "handshake" timeout to "client-hs"
+ - CLEANUP: haproxy: remove old comment from 1.1 from the file header
+ - BUG/MEDIUM: mux-h2: fail earlier on malloc in takeover()
+ - BUG/MEDIUM: mux-h1: fail earlier on malloc in takeover()
+ - BUG/MEDIUM: mux-fcgi: fail earlier on malloc in takeover()
+ - MINOR: rhttp: remove the unused outgoing connect() function
+ - MINOR: backend: without ->connect(), allow to pick another thread's connection
+ - BUG/MINOR: stream/cli: report correct stream age in "show sess"
+ - MINOR: stream/cli: add an optional "older" filter for "show sess"
+ - MINOR: stream/cli: add another filter "susp" to "show sess"
+ - MINOR: stktable: add stktable_deinit function
+ - BUG/MINOR: proxy/stktable: missing frees on proxy cleanup
+ - CLEANUP: backend: removing unused LB param
+ - MEDIUM: lbprm: store algo params on 32bits
+ - MEDIUM: log/balance: merge tcp/http algo with log ones
+ - Revert "MINOR: proxy: report a warning for max_ka_queue in proxy_cfg_ensure_no_http()"
+ - Revert "MINOR: tcp_rules: tcp-{request,response} requires TCP or HTTP mode"
+ - Revert "MINOR: stktable: "stick" requires TCP or HTTP mode"
+ - Revert "MINOR: cfgparse-listen: "http-send-name-header" requires TCP or HTTP mode"
+ - Revert "MINOR: cfgparse-listen: "dynamic-cookie-key" requires TCP or HTTP mode"
+ - Revert "MINOR: cfgparse-listen: "http-reuse" requires TCP or HTTP mode"
+ - Revert "MINOR: fcgi-app: "use-fcgi-app" requires TCP or HTTP mode"
+ - Revert "MINOR: http_htx/errors: prevent the use of some keywords when not in tcp/http mode"
+ - Revert "MINOR: flt_http_comp: "compression" requires TCP or HTTP mode"
+ - Revert "MINOR: filter: "filter" requires TCP or HTTP mode"
+ - MINOR: log/backend: ensure log exclusive params are not used in other modes
+ - MINOR: log/backend: prevent tcp-{request,response} use with LOG mode
+ - MINOR: log/backend: prevent stick table and stick rules with LOG mode
+ - MINOR: log/backend: prevent "http-send-name-header" use with LOG mode
+ - MINOR: log/backend: prevent "dynamic-cookie-key" use with LOG mode
+ - REGTESTS: http: add a test to validate chunked responses delivery
+
+2023/11/04 : 2.9-dev9
+ - DOC: internal: filters: fix reference to entities.pdf
+ - BUG/MINOR: ssl: load correctly @system-ca when ca-base is define
+ - MINOR: lua: Add flags to configure logging behaviour
+ - MINOR: lua: change tune.lua.log.stderr default from 'on' to 'auto'
+ - BUG/MINOR: backend: fix wrong BUG_ON for avail conn
+ - BUG/MAJOR: backend: fix idle conn crash under low FD
+ - MINOR: backend: refactor insertion in avail conns tree
+ - DEBUG: mux-h2/flags: fix list of h2c flags used by the flags decoder
+ - BUG/MEDIUM: server/log: "mode log" after server keyword causes crash
+ - MINOR: connection: add conn_pr_mode_to_proto_mode() helper func
+ - BUG/MEDIUM: server: "proto" not working for dynamic servers
+ - MINOR: server: add helper function to detach server from proxy list
+ - DEBUG: add a tainted flag when ha_panic() is called
+ - DEBUG: lua: add tainted flags for stuck Lua contexts
+ - DEBUG: pools: detect that malloc_trim() is in progress
+ - BUG/MINOR: quic: do not consider idle timeout on CLOSING state
+ - MINOR: frontend: implement a dedicated actconn increment function
+ - BUG/MINOR: ssl: use a thread-safe sslconns increment
+ - MEDIUM: quic: count quic_conn instance for maxconn
+ - MEDIUM: quic: count quic_conn for global sslconns
+ - BUG/MINOR: ssl: suboptimal certificate selection with TLSv1.3 and dual ECDSA/RSA
+ - REGTESTS: ssl: update the filters test for TLSv1.3 and sigalgs
+ - BUG/MINOR: mux-quic: fix early close if unset client timeout
+ - BUG/MEDIUM: ssl: segfault when cipher is NULL
+ - BUG/MINOR: tcpcheck: Report hexstring instead of binary one on check failure
+ - MEDIUM: systemd: be more verbose about the reload
+ - MINOR: sample: Add fetcher for getting all cookie names
+ - BUG/MINOR: proto_reverse_connect: support SNI on active connect
+ - MINOR: proxy/stktable: add resolve_stick_rule helper function
+ - BUG/MINOR: stktable: missing free in parse_stick_table()
+ - BUG/MINOR: cfgparse/stktable: fix error message on stktable_init() failure
+ - MINOR: stktable: stktable_init() sets err_msg on error
+ - MINOR: stktable: check if a type should be used as-is
+ - MEDIUM: stktable/peers: "write-to" local table on peer updates
+ - CI: github: update wolfSSL to 5.6.4
+ - DOC: install: update the wolfSSL required version
+ - MINOR: server: Add parser support for set-proxy-v2-tlv-fmt
+ - MINOR: connection: Send out generic, user-defined server TLVs
+ - BUG/MEDIUM: pattern: don't trim pools under lock in pat_ref_purge_range()
+ - MINOR: mux-h2: always use h2_send() in h2_done_ff(), not h2_process()
+ - OPTIM: mux-h2: call h2_send() directly from h2_snd_buf()
+ - BUG/MINOR: server: remove some incorrect free() calls on null elements
+
+2023/10/20 : 2.9-dev8
+ - MINOR: ssl: add an explicit error when 'ciphersuites' are not supported
+ - BUILD: ssl: enable 'ciphersuites' for WolfSSL
+ - BUILD: ssl: add 'ssl_c_r_dn' fetch for WolfSSL
+ - BUILD: ssl: add 'secure_memcmp' converter for WolfSSL and awslc
+ - BUILD: ssl: enable keylog for awslc
+ - CLEANUP: ssl: remove compat functions for openssl < 1.0.0
+ - BUILD: ssl: enable keylog for WolfSSL
+ - REGTESTS: pki: add a pki for SSL tests
+ - REGTESTS: ssl: update common.pem with the new pki
+ - REGTESTS: ssl: disable ssl_dh.vtc for WolfSSL
+ - REGTESTS: wolfssl: temporarly disable some failing reg-tests
+ - CI: ssl: add wolfssl to build-ssl.sh
+ - CI: ssl: add git id support for wolfssl download
+ - CI: github: add a wolfssl entry to the CI
+ - CI: github: update wolfssl to git revision d83f2fa
+ - CI: github: add awslc 1.16.0 to the push CI
+ - BUG/MINOR: quic: Avoid crashing with unsupported cryptographic algos
+ - REORG: quic: cleanup traces definition
+ - BUG/MINOR: quic: reject packet with no frame
+ - BUG/MEDIUM: mux-quic: fix RESET_STREAM on send-only stream
+ - BUG/MINOR: mux-quic: support initial 0 max-stream-data
+ - BUG/MINOR: h3: strengthen host/authority header parsing
+ - CLEANUP: connection: drop an uneeded leftover cast
+ - BUG/MAJOR: connection: make sure to always remove a connection from the tree
+ - BUG/MINOR: quic: fix qc.cids access on quic-conn fail alloc
+ - BUG/MINOR: quic: fix free on quic-conn fail alloc
+ - BUG/MINOR: mux-quic: fix free on qcs-new fail alloc
+ - BUG/MEDIUM: quic-conn: free unsent frames on retransmit to prevent crash
+ - MEDIUM: tree-wide: logsrv struct becomes logger
+ - MEDIUM: log: introduce log target
+ - DOC: config: log <address> becomes log <target> in "log" related doc
+ - MEDIUM: sink/log: stop relying on AF_UNSPEC for rings
+ - MINOR: log: support explicit log target as argument in __do_send_log()
+ - MINOR: log: remove the logger dependency in do_send_log()
+ - MEDIUM: log/sink: simplify log header handling
+ - MEDIUM: sink: inherit from caller fmt in ring_write() when rings didn't set one
+ - MINOR: sink: add sink_new_from_srv() function
+ - MAJOR: log: introduce log backends
+ - MINOR: log/balance: support for the "sticky" lb algorithm
+ - MINOR: log/balance: support for the "random" lb algorithm
+ - MINOR: lbprm: support for the "none" hash-type function
+ - MINOR: lbprm: compute the hash avalanche in gen_hash()
+ - MINOR: sample: add sample_process_cnv() function
+ - MEDIUM: log/balance: support for the "hash" lb algorithm
+ - REGTEST: add a test for log-backend used as a log target
+ - MINOR: server: introduce "log-bufsize" kw
+ - BUG/MEDIUM: stconn: Report a send activity everytime data were sent
+ - BUG/MEDIUM: applet: Report a send activity everytime data were sent
+ - BUG/MINOR: mux-h1: Send a 400-bad-request on shutdown before the first request
+ - MINOR: support for http-response set-timeout
+ - BUG/MINOR: mux-h2: make up other blocked streams upon removal from list
+ - DEBUG: pool: store the memprof bin on alloc() and update it on free()
+ - BUG/MEDIUM: quic_conn: let the scheduler kill the task when needed
+ - CLEANUP: hlua: Remove dead-code on error path in hlua_socket_new()
+ - BUG/MEDIUM: mux-h1: do not forget TLR/EOT even when no data is sent
+ - BUG/MINOR: htpp-ana/stats: Specify that HTX redirect messages have a C-L header
+ - BUG/MEDIUM: mux-h2: Don't report an error on shutr if a shutw is pending
+ - MEDIUM: stconn/channel: Move pipes used for the splicing in the SE descriptors
+ - MINOR: stconn: Start to introduce mux-to-mux fast-forwarding notion
+ - MINOR: stconn: Extend iobuf to handle a buffer in addition to a pipe
+ - MINOR: connection: Add new mux callbacks to perform data fast-forwarding
+ - MINOR: stconn: Temporarily remove kernel splicing support
+ - MINOR: mux-pt: Temporarily remove splicing support
+ - MINOR: mux-h1: Temporarily remove splicing support
+ - MINOR: connection: Remove mux callbacks about splicing
+ - MEDIUM: stconn: Add mux-to-mux fast-forward support
+ - MINOR: mux-h1: Use HTX extra field only for responses with known length
+ - MEDIUM: mux-h1: Properly handle state transitions of chunked outgoing messages
+ - MEDIUM: raw-sock: Specifiy amount of data to send via snd_pipe callback
+ - MINOR: mux-h1: Add function to add size of a chunk to an outgoind message
+ - MEDIUM: mux-h1: Simplify zero-copy on sending path
+ - MEDIUM: mux-h1: Simplify payload formatting based on HTX blocks on sending path
+ - MEDIUM: mux-h1: Add fast-forwarding support
+ - MINOR: h2: Set the BODYLESS_RESP flag on the HTX start-line if necessary
+ - MEDIUM: mux-h2: Add consumer-side fast-forwarding support
+ - MEDIUM: channel: don't look at iobuf to report an empty channel
+ - MINOR: tree-wide: Only rely on co_data() to check channel emptyness
+ - REGTESTS: Reenable HTTP tests about splicing
+ - CLEAN: mux-h1: Remove useless __maybe_unused attribute on h1_make_chunk()
+ - MEDIUM: mux-pt: Add fast-forwarding support
+ - MINOR: global: Add an option to disable the zero-copy forwarding
+ - BUILD: mux-h1: Fix build without kernel splicing support
+ - REORG: stconn/muxes: Rename init step in fast-forwarding
+ - MINOR: dgram: allow to set rcv/sndbuf for dgram sockets as well
+ - BUG/MINOR: mux-h2: fix http-request and http-keep-alive timeouts again
+ - BUG/MINOR: trace: fix trace parser error reporting
+ - BUG/MEDIUM: peers: Be sure to always refresh recconnect timer in sync task
+ - BUG/MEDIUM: peers: Fix synchro for huge number of tables
+ - MINOR: cfgparse: forbid mixing reverse and standard listeners
+ - MINOR: listener: add nbconn kw for reverse connect
+ - MINOR: server: convert @reverse to rev@ standard format
+ - MINOR: cfgparse: rename "rev@" prefix to "rhttp@"
+ - REGTESTS: remove maxconn from rhttp bind line
+ - MINOR: listener: forbid most keywords for reverse HTTP bind
+ - MINOR: sample: Added support for Arrays in sample_conv_json_query in sample.c
+ - MINOR: mux-h2/traces: explicitly show the error/refused stream states
+ - MINOR: mux-h2/traces: clarify the "rejected H2 request" event
+ - BUG/MINOR: mux-h2: commit the current stream ID even on reject
+ - BUG/MINOR: mux-h2: update tracked counters with req cnt/req err
+
+2023/10/06 : 2.9-dev7
+ - MINOR: support for http-request set-timeout client
+ - BUG/MINOR: mux-quic: remove full demux flag on ncbuf release
+ - CLEANUP: freq_ctr: make all freq_ctr readers take a const
+ - CLEANUP: stream: make the dump code not depend on the CLI appctx
+ - MINOR: stream: split stats_dump_full_strm_to_buffer() in two
+ - CLEANUP: stream: use const filters in the dump function
+ - CLEANUP: stream: make strm_dump_to_buffer() take a const stream
+ - MINOR: stream: make strm_dump_to_buffer() take an arbitrary buffer
+ - MINOR: stream: make strm_dump_to_buffer() show the list of filters
+ - MINOR: stream: make stream_dump() always multi-line
+ - MINOR: streams: add support for line prefixes to strm_dump_to_buffer()
+ - MEDIUM: stream: now provide full stream dumps in case of loops
+ - MINOR: debug: use the more detailed stream dump in panics
+ - CLEANUP: stream: remove the now unused stream_dump() function
+ - Revert "BUG/MEDIUM: quic: missing check of dcid for init pkt including a token"
+ - MINOR: stream: fix output alignment of stuck thread dumps
+ - BUG/MINOR: proto_reverse_connect: fix FD leak on connection error
+ - BUG/MINOR: tcp_act: fix attach-srv rule ACL parsing
+ - MINOR: connection: define error for reverse connect
+ - MINOR: connection: define mux flag for reverse support
+ - MINOR: tcp_act: remove limitation on protocol for attach-srv
+ - BUG/MINOR: proto_reverse_connect: fix FD leak upon connect
+ - BUG/MAJOR: plock: fix major bug in pl_take_w() introduced with EBO
+ - Revert "MEDIUM: sample: Small fix in function check_operator for eror reporting"
+ - DOC: sample: Add a comment in 'check_operator' to explain why 'vars_check_arg' should ignore the 'err' buffer
+ - DEV: sslkeylogger: handle file opening error
+ - MINOR: quic: define quic-socket bind setting
+ - MINOR: quic: handle perm error on bind during runtime
+ - MINOR: backend: refactor specific source address allocation
+ - MINOR: proto_reverse_connect: support source address setting
+ - BUILD: pool: Fix GCC error about potential null pointer dereference
+ - MINOR: hlua: Set context's appctx when the lua socket is created
+ - MINOR: hlua: Don't preform operations on a not connected socket
+ - MINOR: hlua: Save the lua socket's timeout in its context
+ - MINOR: hlua: Save the lua socket's server in its context
+ - MINOR: hlua: Test the hlua struct first when the lua socket is connecting
+ - BUG/MEDIUM: hlua: Initialize appctx used by a lua socket on connect only
+ - DEBUG: mux-h1: Fix event label from trace messages about payload formatting
+ - BUG/MINOR: mux-h1: Handle read0 in rcv_pipe() only when data receipt was tried
+ - BUG/MINOR: mux-h1: Ignore C-L when sending H1 messages if T-E is also set
+ - BUG/MEDIUM: h1: Ignore C-L value in the H1 parser if T-E is also set
+ - REGTESTS: filters: Don't set C-L header in the successful response to CONNECT
+ - MINOR: mux-h1: Add flags if outgoing msg contains a header about its payload
+ - MINOR: mux-h1: Rely on H1S_F_HAVE_CHNK to add T-E in outgoing messages
+ - BUG/MEDIUM: mux-h1: Add C-L header in outgoing message if it was removed
+ - BUG/MEDIUM: mux-h1; Ignore headers modifications about payload representation
+ - BUG/MINOR: h1-htx: Keep flags about C-L/T-E during HEAD response parsing
+ - MINOR: h1-htx: Declare successful tunnel establishment as bodyless
+ - BUILD: quic: allow USE_QUIC to work with AWSLC
+ - CI: github: add USE_QUIC=1 to aws-lc build
+ - BUG/MINOR: hq-interop: simplify parser requirement
+ - MEDIUM: cache: Add "Origin" header to secondary cache key
+ - MINOR: haproxy: permit to register features during boot
+ - MINOR: tcp_rules: tcp-{request,response} requires TCP or HTTP mode
+ - MINOR: stktable: "stick" requires TCP or HTTP mode
+ - MINOR: filter: "filter" requires TCP or HTTP mode
+ - MINOR: backend/balance: "balance" requires TCP or HTTP mode
+ - MINOR: flt_http_comp: "compression" requires TCP or HTTP mode
+ - MINOR: http_htx/errors: prevent the use of some keywords when not in tcp/http mode
+ - MINOR: fcgi-app: "use-fcgi-app" requires TCP or HTTP mode
+ - MINOR: cfgparse-listen: "http-send-name-header" requires TCP or HTTP mode
+ - MINOR: cfgparse-listen: "dynamic-cookie-key" requires TCP or HTTP mode
+ - MINOR: proxy: dynamic-cookie CLIs require TCP or HTTP mode
+ - MINOR: cfgparse-listen: "http-reuse" requires TCP or HTTP mode
+ - MINOR: proxy: report a warning for max_ka_queue in proxy_cfg_ensure_no_http()
+ - MINOR: cfgparse-listen: warn when use-server rules is used in wrong mode
+ - DOC: config: unify "log" directive doc
+ - MINOR: sink/log: fix some typos around postparsing logic
+ - MINOR: sink: remove useless check after sink creation
+ - MINOR: sink: don't rely on p->parent in sink appctx
+ - MINOR: sink: don't rely on forward_px to init sink forwarding
+ - MINOR: sink: refine forward_px usage
+ - MINOR: sink: function to add new sink servers
+ - BUG/MEDIUM: stconn: Fix comparison sign in sc_need_room()
+ - BUG/MEDIUM: actions: always apply a longest match on prefix lookup
+
+2023/09/22 : 2.9-dev6
+ - BUG/MINOR: quic: fdtab array underflow access
+ - DEBUG: pools: always record the caller for uncached allocs as well
+ - DEBUG: pools: pass the caller pointer to the check functions and macros
+ - DEBUG: pools: make pool_check_pattern() take a pointer to the pool
+ - DEBUG: pools: inspect pools on fatal error and dump information found
+ - BUG/MEDIUM: quic: quic_cc_conn ->cntrs counters unreachable
+ - DEBUG: pools: also print the item's pointer when crashing
+ - DEBUG: pools: also print the value of the tag when it doesn't match
+ - DEBUG: pools: print the contents surrounding the expected tag location
+ - MEDIUM: pools: refine pool size rounding
+ - BUG/MEDIUM: hlua: don't pass stale nargs argument to lua_resume()
+ - BUG/MINOR: hlua/init: coroutine may not resume itself
+ - BUG/MEDIUM: mux-fcgi: Don't swap trash and dbuf when handling STDERR records
+ - BUG/MINOR: promex: fix backend_agg_check_status
+ - BUG/MEDIUM: master/cli: Pin the master CLI on the first thread of the group 1
+ - MAJOR: import: update mt_list to support exponential back-off
+ - CLEANUP: pools: simplify the pool expression when no pool was matched in dump
+ - MINOR: samples: implement bytes_in and bytes_out samples
+ - DOC: configuration: add %[req.ver] sample to %HV
+ - BUG/MINOR: quic: Leak of frames to send.
+ - DOC: configuration: add %[query] to %HQ
+ - BUG/MINOR: freq_ctr: fix possible negative rate with the scaled API
+ - BUG/MAJOR: mux-h2: Report a protocol error for any DATA frame before headers
+ - BUILD: quic: fix build on centos 8 and USE_QUIC_OPENSSL_COMPAT
+ - Revert "MAJOR: import: update mt_list to support exponential back-off"
+ - BUG/MINOR: server: add missing free for server->rdr_pfx
+ - REGTESTS: ssl: skip OCSP test w/ WolfSSL
+ - REGTESTS: ssl: skip generate-certificates test w/ wolfSSL
+ - MINOR: logs: clarify the check of the log range
+ - MINOR: log: remove the unused curr_idx in struct smp_log_range
+ - CLEANUP: logs: rename a confusing local variable "curr_rg" to "smp_rg"
+ - MINOR: logs: use a single index to store the current range and index
+ - MEDIUM: logs: atomically check and update the log sample index
+ - CLEANUP: ring: rename the ring lock "RING_LOCK" instead of "LOGSRV_LOCK"
+ - BUG/MEDIUM: http-ana: Try to handle response before handling server abort
+ - MEDIUM: tools/ip: v4tov6() and v6tov4() rework
+ - MINOR: pattern/ip: offload ip conversion logic to helper functions
+ - MINOR: pattern: fix pat_{parse,match}_ip() function comments
+ - MINOR: pattern/ip: simplify pat_match_ip() function
+ - BUG/MEDIUM: server/cli: don't delete a dynamic server that has streams
+ - MINOR: hlua: Add support for the "http-after-res" action
+ - BUG/MINOR: proto_reverse_connect: fix preconnect with startup name resolution
+ - MINOR: proto_reverse_connect: prevent transparent server for pre-connect
+ - CI: cirrus-ci: display gdb bt if any
+ - MEDIUM: sample: Enhances converter "bytes" to take variable names as arguments
+ - MEDIUM: sample: Small fix in function check_operator for eror reporting
+ - MINOR: quic: handle external extra CIDs generator.
+ - BUG/MINOR: proto_reverse_connect: set default maxconn
+ - MINOR: proto_reverse_connect: refactor preconnect failure
+ - MINOR: proto_reverse_connect: remove unneeded wakeup
+ - MINOR: proto_reverse_connect: emit log for preconnect
+
+2023/09/08 : 2.9-dev5
+ - BUG/MEDIUM: mux-h2: fix crash when checking for reverse connection after error
+ - BUILD: import: guard plock.h against multiple inclusion
+ - BUILD: pools: import plock.h to build even without thread support
+ - BUG/MINOR: ssl/cli: can't find ".crt" files when replacing a certificate
+ - BUG/MINOR: stream: protect stream_dump() against incomplete streams
+ - DOC: config: mention uid dependency on the tune.quic.socket-owner option
+ - MEDIUM: capabilities: enable support for Linux capabilities
+ - CLEANUP/MINOR: connection: Improve consistency of PPv2 related constants
+ - MEDIUM: connection: Generic, list-based allocation and look-up of PPv2 TLVs
+ - MEDIUM: sample: Add fetch for arbitrary TLVs
+ - MINOR: sample: Refactor fc_pp_authority by wrapping the generic TLV fetch
+ - MINOR: sample: Refactor fc_pp_unique_id by wrapping the generic TLV fetch
+ - MINOR: sample: Add common TLV types as constants for fc_pp_tlv
+ - MINOR: ssl_sock: avoid iterating realloc(+1) on stored context
+ - DOC: ssl: add some comments about the non-obvious session allocation stuff
+ - CLEANUP: ssl: keep a pointer to the server in ssl_sock_init()
+ - MEDIUM: ssl_sock: always use the SSL's server name, not the one from the tid
+ - MEDIUM: server/ssl: place an rwlock in the per-thread ssl server session
+ - MINOR: server/ssl: maintain an index of the last known valid SSL session
+ - MINOR: server/ssl: clear the shared good session index on failure
+ - MEDIUM: server/ssl: pick another thread's session when we have none yet
+ - MINOR: activity: report the current run queue size
+ - BUG/MINOR: checks: do not queue/wake a bounced check
+ - MINOR: checks: start the checks in sleeping state
+ - MINOR: checks: pin the check to its thread upon wakeup
+ - MINOR: check: remember when we migrate a check
+ - MINOR: check/activity: collect some per-thread check activity stats
+ - MINOR: checks: maintain counters of active checks per thread
+ - MINOR: check: also consider the random other thread's active checks
+ - MEDIUM: checks: search more aggressively for another thread on overload
+ - MEDIUM: checks: implement a queue in order to limit concurrent checks
+ - MINOR: checks: also consider the thread's queue for rebalancing
+ - DEBUG: applet: Properly report opposite SC expiration dates in traces
+ - BUG/MEDIUM: stconn: Update stream expiration date on blocked sends
+ - BUG/MINOR: stconn: Don't report blocked sends during connection establishment
+ - BUG/MEDIUM: stconn: Wake applets on sending path if there is a pending shutdown
+ - BUG/MEDIUM: stconn: Don't block sends if there is a pending shutdown
+ - BUG/MINOR: quic: Possible skipped RTT sampling
+ - MINOR: quic: Add a trace to quic_release_frm()
+ - BUG/MAJOR: quic: Really ignore malformed ACK frames.
+ - BUG/MINOR: quic: Unchecked pointer to packet number space dereferenced
+ - BUG/MEDIUM: connection: fix pool free regression with recent ppv2 TLV patches
+ - BUG/MEDIUM: h1-htx: Ensure chunked parsing with full output buffer
+ - BUG/MINOR: stream: further protect stream_dump() against incomplete sessions
+ - DOC: configuration: update examples for req.ver
+ - MINOR: properly mark the end of the CLI command in error messages
+ - BUILD: ssl: Build with new cryptographic library AWS-LC
+ - REGTESTS: ssl: skip ssl_dh test with AWS-LC
+ - BUILD: bug: make BUG_ON() void to avoid a rare warning
+ - BUILD: checks: shut up yet another stupid gcc warning
+ - MINOR: cpuset: add ha_cpuset_isset() to check for the presence of a CPU in a set
+ - MINOR: cpuset: add ha_cpuset_or() to bitwise-OR two CPU sets
+ - MINOR: cpuset: centralize a reliable bound cpu detection
+ - MEDIUM: threads: detect incomplete CPU bindings
+ - MEDIUM: threads: detect excessive thread counts vs cpu-map
+ - BUILD: quic: Compilation issue on 32-bits systems with quic_may_send_bytes()
+ - BUG/MINOR: quic: Unchecked pointer to Handshake packet number space
+ - MINOR: global: export the display_version() symbol
+ - MEDIUM: mworker: display a more accessible message when a worker crash
+ - MINOR: httpclient: allow to configure the retries
+ - MINOR: httpclient: allow to configure the timeout.connect
+ - BUG/MINOR: quic: Wrong RTT adjusments
+ - BUG/MINOR: quic: Wrong RTT computation (srtt and rrt_var)
+ - BUG/MINOR: stconn: Don't inhibit shutdown on connection on error
+ - BUG/MEDIUM: applet: Fix API for function to push new data in channels buffer
+ - BUG/MEDIUM: stconn: Report read activity when a stream is attached to front SC
+ - BUG/MEDIUM: applet: Report an error if applet request more room on aborted SC
+ - BUG/MEDIUM: stconn/stream: Forward shutdown on write timeout
+ - NUG/MEDIUM: stconn: Always update stream's expiration date after I/O
+ - BUG/MINOR: applet: Always expect data when CLI is waiting for a new command
+ - BUG/MINOR: ring/cli: Don't expect input data when showing events
+ - BUG/MINOR: quic: Dereferenced unchecked pointer to Handshke packet number space
+ - BUG/MINOR: hlua/action: incorrect message on E_YIELD error
+ - MINOR: http_ana: position the FINAL flag for http_after_res execution
+ - CI: scripts: add support to build-ssl.sh to download and build AWS-LC
+ - CI: add support to matrix.py to determine the latest AWS-LC release
+ - CI: Update matrix.py so all code is contained in functions.
+ - CI: github: Add a weekly CI run building with AWS-LC
+ - MINOR: ring: add a function to compute max ring payload
+ - BUG/MEDIUM: ring: adjust maxlen consistency check
+ - MINOR: sink: simplify post_sink_resolve function
+ - MINOR: log/sink: detect when log maxlen exceeds sink size
+ - MINOR: sink: inform the user when logs will be implicitly truncated
+ - MEDIUM: sink: don't perform implicit truncations when maxlen is not set
+ - MINOR: log: move log-forwarders cleanup in log.c
+ - MEDIUM: httpclient/logs: rely on per-proxy post-check instead of global one
+ - MINOR: log: add dup_logsrv() helper function
+ - MEDIUM: log/sink: make logsrv postparsing more generic
+ - MEDIUM: fcgi-app: properly postresolve logsrvs
+ - MEDIUM: spoe-agent: properly postresolve log rings
+ - MINOR: sink: add helper function to deallocate sink struct
+ - MEDIUM: sink/ring: introduce high level ring creation helper function
+ - MEDIUM: sink: add sink_finalize() function
+ - CLEANUP: log: remove unnecessary trim in __do_send_log
+ - MINOR: cache: Change hash function in default normalizer used in case of "vary"
+ - MINOR: tasks/stats: report the number of niced tasks in "show info"
+ - CI: Update to actions/checkout@v4
+ - MINOR: ssl: add support for 'curves' keyword on server lines
+ - BUG/MINOR: quic: Wrong cluster secret initialization
+ - CLEANUP: quic: Remove useless free_quic_tx_pkts() function.
+ - MEDIUM: init: initialize the trash earlier
+ - MINOR: tools: add function read_line_to_trash() to read a line of a file
+ - MINOR: cfgparse: use read_line_from_trash() to read from /sys
+ - MEDIUM: cfgparse: assign NUMA affinity to cpu-maps
+ - MINOR: cpuset: dynamically allocate cpu_map
+ - REORG: cpuset: move parse_cpu_set() and parse_cpumap() to cpuset.c
+ - CI: musl: highlight section if there are coredumps
+ - CI: musl: drop shopt in workflow invocation
+
+2023/08/25 : 2.9-dev4
+ - DEV: flags/show-sess-to-flags: properly decode fd.state
+ - BUG/MINOR: stktable: allow sc-set-gpt(0) from tcp-request connection
+ - BUG/MINOR: stktable: allow sc-add-gpc from tcp-request connection
+ - DOC: typo: fix sc-set-gpt references
+ - SCRIPTS: git-show-backports: automatic ref and base detection with -m
+ - REGTESTS: Do not use REQUIRE_VERSION for HAProxy 2.5+ (3)
+ - DOC: jwt: Add explicit list of supported algorithms
+ - BUILD: Makefile: add the USE_QUIC option to make help
+ - BUILD: Makefile: add USE_QUIC_OPENSSL_COMPAT to make help
+ - BUILD: Makefile: realigned USE_* options in make help
+ - DEV: makefile: fix POSIX compatibility for "range" target
+ - IMPORT: plock: also support inlining the int code
+ - IMPORT: plock: always expose the inline version of the lock wait function
+ - IMPORT: lorw: support inlining the wait call
+ - MINOR: threads: inline the wait function for pthread_rwlock emulation
+ - MINOR: atomic: make sure to always relax after a failed CAS
+ - MINOR: pools: use EBO to wait for unlock during pool_flush()
+ - BUILD/IMPORT: fix compilation with PLOCK_DISABLE_EBO=1
+ - MINOR: quic+openssl_compat: Do not start without "limited-quic"
+ - MINOR: quic+openssl_compat: Emit an alert for "allow-0rtt" option
+ - BUG/MINOR: quic: allow-0rtt warning must only be emitted with quic bind
+ - BUG/MINOR: quic: ssl_quic_initial_ctx() uses error count not error code
+ - MINOR: pattern: do not needlessly lookup the LRU cache for empty lists
+ - IMPORT: xxhash: update xxHash to version 0.8.2
+ - MINOR: proxy: simplify parsing 'backend/server'
+ - MINOR: connection: centralize init/deinit of backend elements
+ - MEDIUM: connection: implement passive reverse
+ - MEDIUM: h2: reverse connection after SETTINGS reception
+ - MINOR: server: define reverse-connect server
+ - MINOR: backend: only allow reuse for reverse server
+ - MINOR: tcp-act: parse 'tcp-request attach-srv' session rule
+ - REGTESTS: provide a reverse-server test
+ - MINOR: tcp-act: define optional arg name for attach-srv
+ - MINOR: connection: use attach-srv name as SNI reuse parameter on reverse
+ - REGTESTS: provide a reverse-server test with name argument
+ - MINOR: proto: define dedicated protocol for active reverse connect
+ - MINOR: connection: extend conn_reverse() for active reverse
+ - MINOR: proto_reverse_connect: parse rev@ addresses for bind
+ - MINOR: connection: prepare init code paths for active reverse
+ - MEDIUM: proto_reverse_connect: bootstrap active reverse connection
+ - MINOR: proto_reverse_connect: handle early error before reversal
+ - MEDIUM: h2: implement active connection reversal
+ - MEDIUM: h2: prevent stream opening before connection reverse completed
+ - REGTESTS: write a full reverse regtest
+ - BUG/MINOR: h2: fix reverse if no timeout defined
+ - CI: fedora: fix "dnf" invocation syntax
+ - BUG/MINOR: hlua_fcn: potentially unsafe stktable_data_ptr usage
+ - DOC: lua: fix Sphinx warning from core.get_var()
+ - DOC: lua: fix core.register_action typo
+ - BUG/MINOR: ssl_sock: fix possible memory leak on OOM
+ - MEDIUM: map/acl: Improve pat_ref_set() efficiency (for "set-map", "add-acl" action perfs)
+ - MEDIUM: map/acl: Improve pat_ref_set_elt() efficiency (for "set-map", "add-acl"action perfs)
+ - MEDIUM: map/acl: Accelerate several functions using pat_ref_elt struct ->head list
+ - MEDIUM: map/acl: Replace map/acl spin lock by a read/write lock.
+ - DOC: map/acl: Remove the comments about map/acl performance issue
+ - DOC: Explanation of be_name and be_id fetches
+ - MINOR: connection: simplify removal of idle conns from their trees
+ - MINOR: server: move idle tree insert in a dedicated function
+ - MAJOR: connection: purge idle conn by last usage
+
+2023/08/12 : 2.9-dev3
+ - BUG/MINOR: ssl: OCSP callback only registered for first SSL_CTX
+ - BUG/MEDIUM: h3: Properly report a C-L header was found to the HTX start-line
+ - MINOR: sample: add pid sample
+ - MINOR: sample: implement act_conn sample fetch
+ - MINOR: sample: accept_date / request_date return %Ts / %tr timestamp values
+ - MEDIUM: sample: implement us and ms variant of utime and ltime
+ - BUG/MINOR: sample: check alloc_trash_chunk() in conv_time_common()
+ - DOC: configuration: describe Td in Timing events
+ - MINOR: sample: implement the T* timer tags from the log-format as fetches
+ - DOC: configuration: add sample fetches for timing events
+ - BUG/MINOR: quic: Possible crash when acknowledging Initial v2 packets
+ - MINOR: quic: Export QUIC traces code from quic_conn.c
+ - MINOR: quic: Export QUIC CLI code from quic_conn.c
+ - MINOR: quic: Move TLS related code to quic_tls.c
+ - MINOR: quic: Add new "QUIC over SSL" C module.
+ - MINOR: quic: Add a new quic_ack.c C module for QUIC acknowledgements
+ - CLEANUP: quic: Defined but no more used function (quic_get_tls_enc_levels())
+ - MINOR: quic: Split QUIC connection code into three parts
+ - CLEANUP: quic: quic_conn struct cleanup
+ - MINOR: quic; Move the QUIC frame pool to its proper location
+ - BUG/MINOR: chunk: fix chunk_appendf() to not write a zero if buffer is full
+ - BUG/MEDIUM: h3: Be sure to handle fin bit on the last DATA frame
+ - DOC: configuration: rework the custom log format table
+ - BUG/MINOR: quic+openssl_compat: Non initialized TLS encryption levels
+ - CLEANUP: acl: remove cache_idx from acl struct
+ - REORG: cfgparse: extract curproxy as a global variable
+ - MINOR: acl: add acl() sample fetch
+ - BUILD: cfgparse: keep a single "curproxy"
+ - BUG/MEDIUM: bwlim: Reset analyse expiration date when then channel analyse ends
+ - MEDIUM: stream: Reset response analyse expiration date if there is no analyzer
+ - BUG/MINOR: htx/mux-h1: Properly handle bodyless responses when splicing is used
+ - BUG/MEDIUM: quic: consume contig space on requeue datagram
+ - BUG/MINOR: http-client: Don't forget to commit changes on HTX message
+ - CLEANUP: stconn: Move comment about sedesc fields on the field line
+ - REGTESTS: http: Create a dedicated script to test spliced bodyless responses
+ - REGTESTS: Test SPLICE feature is enabled to execute script about splicing
+ - BUG/MINOR: quic: reappend rxbuf buffer on fake dgram alloc error
+ - BUILD: quic: fix wrong potential NULL dereference
+ - MINOR: h3: abort request if not completed before full response
+ - BUG/MAJOR: http-ana: Get a fresh trash buffer for each header value replacement
+ - CLEANUP: quic: Remove quic_path_room().
+ - MINOR: quic: Amplification limit handling sanitization.
+ - MINOR: quic: Move some counters from [rt]x quic_conn anonymous struct
+ - MEDIUM: quic: Send CONNECTION_CLOSE packets from a dedicated buffer.
+ - MINOR: quic: Use a pool for the connection ID tree.
+ - MEDIUM: quic: Allow the quic_conn memory to be asap released.
+ - MINOR: quic: Release asap quic_conn memory (application level)
+ - MINOR: quic: Release asap quic_conn memory from ->close() xprt callback.
+ - MINOR: quic: Warning for OpenSSL wrapper QUIC bindings without "limited-quic"
+ - REORG: http: move has_forbidden_char() from h2.c to http.h
+ - BUG/MAJOR: h3: reject header values containing invalid chars
+ - MINOR: mux-h2/traces: also suggest invalid header upon parsing error
+ - MINOR: ist: add new function ist_find_range() to find a character range
+ - MINOR: http: add new function http_path_has_forbidden_char()
+ - MINOR: h2: pass accept-invalid-http-request down the request parser
+ - REGTESTS: http-rules: add accept-invalid-http-request for normalize-uri tests
+ - BUG/MINOR: h1: do not accept '#' as part of the URI component
+ - BUG/MINOR: h2: reject more chars from the :path pseudo header
+ - BUG/MINOR: h3: reject more chars from the :path pseudo header
+ - REGTESTS: http-rules: verify that we block '#' by default for normalize-uri
+ - DOC: clarify the handling of URL fragments in requests
+ - BUG/MAJOR: http: reject any empty content-length header value
+ - BUG/MINOR: http: skip leading zeroes in content-length values
+ - BUG/MEDIUM: mux-h1: fix incorrect state checking in h1_process_mux()
+ - BUG/MEDIUM: mux-h1: do not forget EOH even when no header is sent
+ - BUILD: mux-h1: shut a build warning on clang from previous commit
+ - DEV: makefile: add a new "range" target to iteratively build all commits
+ - CI: do not use "groupinstall" for Fedora Rawhide builds
+ - CI: get rid of travis-ci wrapper for Coverity scan
+ - BUG/MINOR: quic: mux started when releasing quic_conn
+ - BUG/MINOR: quic: Possible crash in quic_cc_conn_io_cb() traces.
+ - MINOR: quic: Add a trace for QUIC conn fd ready for receive
+ - BUG/MINOR: quic: Possible crash when issuing "show fd/sess" CLI commands
+ - BUG/MINOR: quic: Missing tasklet (quic_cc_conn_io_cb) memory release (leak)
+ - BUG/MEDIUM: quic: fix tasklet_wakeup loop on connection closing
+ - BUG/MINOR: hlua: fix invalid use of lua_pop on error paths
+ - MINOR: hlua: add hlua_stream_ctx_prepare helper function
+ - BUG/MEDIUM: hlua: streams don't support mixing lua-load with lua-load-per-thread
+ - MAJOR: threads/plock: update the embedded library again
+ - MINOR: stick-table: move the task_queue() call outside of the lock
+ - MINOR: stick-table: move the task_wakeup() call outside of the lock
+ - MEDIUM: stick-table: change the ref_cnt atomically
+ - MINOR: stick-table: better organize the struct stktable
+ - MEDIUM: peers: update ->commitupdate out of the lock using a CAS
+ - MEDIUM: peers: drop then re-acquire the wrlock in peer_send_teachmsgs()
+ - MEDIUM: peers: only read-lock peer_send_teachmsgs()
+ - MEDIUM: stick-table: use a distinct lock for the updates tree
+ - MEDIUM: stick-table: touch updates under an upgradable read lock
+ - MEDIUM: peers: drop the stick-table lock before entering peer_send_teachmsgs()
+ - MINOR: stick-table: move the update lock into its own cache line
+ - CLEANUP: stick-table: slightly reorder the stktable struct
+ - BUILD: defaults: use __WORDSIZE not LONGBITS for MAX_THREADS_PER_GROUP
+ - MINOR: tools: make ptr_hash() support 0-bit outputs
+ - MINOR: tools: improve ptr hash distribution on 64 bits
+ - OPTIM: tools: improve hash distribution using a better prime seed
+ - OPTIM: pools: use exponential back-off on shared pool allocation/release
+ - OPTIM: pools: make pool_get_from_os() / pool_put_to_os() not update ->allocated
+ - MINOR: pools: introduce the use of multiple buckets
+ - MEDIUM: pools: spread the allocated counter over a few buckets
+ - MEDIUM: pools: move the used counter over a few buckets
+ - MEDIUM: pools: move the needed_avg counter over a few buckets
+ - MINOR: pools: move the failed allocation counter over a few buckets
+ - MAJOR: pools: move the shared pool's free_list over multiple buckets
+ - MINOR: pools: make pool_evict_last_items() use pool_put_to_os_no_dec()
+ - BUILD: pools: fix build error on clang with inline vs forceinline
+
+2023/07/21 : 2.9-dev2
+ - BUG/MINOR: quic: Possible leak when allocating an encryption level
+ - BUG/MINOR: quic: Missing QUIC connection path member initialization
+ - BUILD: quic: Compilation fixes for some gcc warnings with -O1
+ - DOC: ssl: Fix typo in 'ocsp-update' option
+ - DOC: ssl: Add ocsp-update troubleshooting clues and emphasize on crt-list only aspect
+ - BUG/MINOR: tcp_sample: bc_{dst,src} return IP not INT
+ - MEDIUM: acl/sample: unify sample conv parsing in a single function
+ - MINOR: sample: introduce c_pseudo() conv function
+ - MEDIUM: sample: add missing ADDR=>? compatibility matrix entries
+ - MINOR: sample: fix ipmask sample definition
+ - MEDIUM: tree-wide: fetches that may return IPV4+IPV6 now return ADDR
+ - MEDIUM: sample: introduce 'same' output type
+ - BUG/MINOR: quic: Possible crash in "show quic" dumping packet number spaces
+ - BUG/MINOR: cache: A 'max-age=0' cache-control directive can be overriden by a s-maxage
+ - BUG/MEDIUM: sink: invalid server list in sink_new_from_logsrv()
+ - BUG/MINOR: http_ext: unhandled ERR_ABORT in proxy_http_parse_7239()
+ - BUG/MINOR: sink: missing sft free in sink_deinit()
+ - BUG/MINOR: ring: size warning incorrectly reported as fatal error
+ - BUG/MINOR: ring: maxlen warning reported as alert
+ - BUG/MINOR: log: LF upsets maxlen for UDP targets
+ - MINOR: sink/api: pass explicit maxlen parameter to sink_write()
+ - BUG/MEDIUM: log: improper use of logsrv->maxlen for buffer targets
+ - BUG/MINOR: log: fix missing name error message in cfg_parse_log_forward()
+ - BUG/MINOR: log: fix multiple error paths in cfg_parse_log_forward()
+ - BUG/MINOR: log: free errmsg on error in cfg_parse_log_forward()
+ - BUG/MINOR: sink: invalid sft free in sink_deinit()
+ - BUG/MINOR: sink: fix errors handling in cfg_post_parse_ring()
+ - BUG/MINOR: server: set rid default value in new_server()
+ - MINOR: hlua_fcn/mailers: handle timeout mail from mailers section
+ - BUG/MINOR: sink/log: properly deinit srv in sink_new_from_logsrv()
+ - EXAMPLES: maintain haproxy 2.8 retrocompatibility for lua mailers script
+ - BUG/MINOR: hlua_fcn/queue: use atomic load to fetch queue size
+ - BUG/MINOR: config: Remove final '\n' in error messages
+ - BUG/MINOR: config: Lenient port configuration parsing
+ - BUG/MEDIUM: quic: token IV was not computed using a strong secret
+ - BUG/MINOR: quic: retry token remove one useless intermediate expand
+ - BUG/MEDIUM: quic: missing check of dcid for init pkt including a token
+ - BUG/MEDIUM: quic: timestamp shared in token was using internal time clock
+ - CLEANUP: quic: remove useless parameter 'key' from quic_packet_encrypt
+ - BUG/MINOR: hlua: hlua_yieldk ctx argument should support pointers
+ - BUG/MEDIUM: hlua_fcn/queue: bad pop_wait sequencing
+ - DOC: config: Fix fc_src description to state the source address is returned
+ - BUG/MINOR: sample: Fix wrong overflow detection in add/sub conveters
+ - BUG/MINOR: http: Return the right reason for 302
+ - MEDIUM: ssl: new sample fetch method to get curve name
+ - CI: add naming convention documentation
+ - CI: explicitely highlight VTest result section if there's something
+ - BUG/MINOR: quic: Unckecked encryption levels availability
+ - BUILD: quic: fix warning during compilation using gcc-6.5
+ - BUG/MINOR: hlua: add check for lua_newstate
+ - BUG/MINOR: h1-htx: Return the right reason for 302 FCGI responses
+ - MINOR: lua: Allow reading "proc." scoped vars from LUA core.
+ - MINOR: cpuset: add cpu_map_configured() to know if a cpu-map was found
+ - BUG/MINOR: config: do not detect NUMA topology when cpu-map is configured
+ - BUG/MINOR: cpuset: remove the bogus "proc" from the cpu_map struct
+ - BUG/MINOR: init: set process' affinity even in foreground
+ - CLEANUP: cpuset: remove the unused proc_t1 field in cpu_map
+ - CLEANUP: config: make parse_cpu_set() return documented values
+ - BUG/MINOR: server: Don't warn on server resolution failure with init-addr none
+ - MINOR: peers: add peers keyword registration
+ - MINOR: quic: Stop storing the TX encoded transport parameters
+ - MINOR: quic: Dynamic allocation for negotiated Initial TLS cipher context.
+ - MINOR: quic: Release asap the negotiated Initial TLS context.
+ - MINOR: quic: Add traces to qc_may_build_pkt()
+ - MEDIUM: quic: Packet building rework.
+ - CLEANUP: quic: Remove a useless TLS related variable from quic_conn_io_cb().
+ - MEDIUM: quic: Handshake I/O handler rework.
+ - MINOR: quic: Add traces for qc_frm_free()
+ - MINOR: quic: add trace about pktns packet/frames releasing
+ - BUG/MINOR: quic: Missing parentheses around PTO probe variable.
+ - MINOR: quic: Ping from Initial pktns before reaching anti-amplification limit
+ - BUG/MINOR: server-state: Ignore empty files
+ - BUG/MINOR: server-state: Avoid warning on 'file not found'
+ - BUG/MEDIUM: listener: Acquire proxy's lock in relax_listener() if necessary
+ - MINOR: quic: QUIC openssl wrapper implementation
+ - MINOR: quic: Include QUIC opensssl wrapper header from TLS stacks compatibility header
+ - MINOR: quic: Do not enable O-RTT with USE_QUIC_OPENSSL_COMPAT
+ - MINOR: quic: Set the QUIC connection as extra data before calling SSL_set_quic_method()
+ - MINOR: quic: Do not enable 0RTT with SSL_set_quic_early_data_enabled()
+ - MINOR: quic: Add a compilation option for the QUIC OpenSSL wrapper
+ - MINOR: quic: Export some KDF functions (QUIC-TLS)
+ - MINOR: quic: Make ->set_encryption_secrets() be callable two times
+ - MINOR: quic: Initialize TLS contexts for QUIC openssl wrapper
+ - MINOR: quic: Call the keylog callback for QUIC openssl wrapper from SSL_CTX_keylog()
+ - MINOR: quic: Add a quic_openssl_compat struct to quic_conn struct
+ - MINOR: quic: Useless call to SSL_CTX_set_quic_method()
+ - MINOR: quic: SSL context initialization with QUIC OpenSSL wrapper.
+ - MINOR: quic: Missing encoded transport parameters for QUIC OpenSSL wrapper
+ - MINOR: quic: Add "limited-quic" new tuning setting
+ - DOC: quic: Add "limited-quic" new tuning setting
+ - DOC: install: Document how to build a limited support for QUIC
+
+2023/07/02 : 2.9-dev1
+ - BUG/MINOR: stats: Fix Lua's `get_stats` function
+ - MINOR: stats: protect against future stats fields omissions
+ - BUG/MINOR: stream: do not use client-fin/server-fin with HTX
+ - BUG/MINOR: quic: Possible crash when SSL session init fails
+ - CONTRIB: Add vi file extensions to .gitignore
+ - BUG/MINOR: spoe: Only skip sending new frame after a receive attempt
+ - BUG/MINOR: peers: Improve detection of config errors in peers sections
+ - REG-TESTS: stickiness: Delay haproxys start to properly resolv variables
+ - DOC: quic: fix misspelled tune.quic.socket-owner
+ - DOC: config: fix jwt_verify() example using var()
+ - DOC: config: fix rfc7239 converter examples (again)
+ - BUG/MINOR: cfgparse-tcp: leak when re-declaring interface from bind line
+ - BUG/MINOR: proxy: add missing interface bind free in free_proxy
+ - BUG/MINOR: proxy/server: free default-server on deinit
+ - BUG/MEDIUM: hlua: Use front SC to detect EOI in HTTP applets' receive functions
+ - BUG/MINOR: ssl: log message non thread safe in SSL Hanshake failure
+ - BUG/MINOR: quic: Wrong encryption level flags checking
+ - BUG/MINOR: quic: Address inversion in "show quic full"
+ - BUG/MINOR: server: inherit from netns in srv_settings_cpy()
+ - BUG/MINOR: namespace: missing free in netns_sig_stop()
+ - BUG/MINOR: quic: Missing initialization (packet number space probing)
+ - BUG/MINOR: quic: Possible crash in quic_conn_prx_cntrs_update()
+ - BUG/MINOR: quic: Possible endless loop in quic_lstnr_dghdlr()
+ - MINOR: quic: Remove pool_zalloc() from qc_new_conn()
+ - MINOR: quic: Remove pool_zalloc() from qc_conn_alloc_ssl_ctx()
+ - MINOR: quic: Remove pool_zalloc() from quic_dgram_parse()
+ - BUG/MINOR: quic: Missing transport parameters initializations
+ - BUG/MEDIUM: mworker: increase maxsock with each new worker
+ - BUG/MINOR: quic: ticks comparison without ticks API use
+ - BUG/MINOR: quic: Missing TLS secret context initialization
+ - DOC: Add tune.h2.be.* and tune.h2.fe.* options to table of contents
+ - DOC: Add tune.h2.max-frame-size option to table of contents
+ - DOC: Attempt to fix dconv parsing error for tune.h2.fe.initial-window-size
+ - REGTESTS: h1_host_normalization : Add a barrier to not mix up log messages
+ - MEDIUM: mux-h1: Split h1_process_mux() to make code more readable
+ - REORG: mux-h1: Rename functions to emit chunk size/crlf in the output buffer
+ - MINOR: mux-h1: Add function to append the chunk size to the output buffer
+ - MINOR: mux-h1: Add function to prepend the chunk crlf to the output buffer
+ - MEDIUM: filters/htx: Don't rely on HTX extra field if payload is filtered
+ - MEDIIM: mux-h1: Add splicing support for chunked messages
+ - REGTESTS: Add a script to test the kernel splicing with chunked messages
+ - CLEANUP: mux-h1: Remove useless __maybe_unused statement
+ - BUG/MINOR: http_ext: fix if-none regression in forwardfor option
+ - REGTEST: add an extra testcase for ifnone-forwardfor
+ - BUG/MINOR: mworker: leak of a socketpair during startup failure
+ - BUG/MINOR: quic: Prevent deadlock with CID tree lock
+ - MEDIUM: ssl: handle the SSL_ERROR_ZERO_RETURN during the handshake
+ - BUG/MINOR: ssl: SSL_ERROR_ZERO_RETURN returns CO_ER_SSL_EMPTY
+ - BUILD: mux-h1: silence a harmless fallthrough warning
+ - BUG/MEDIUM: quic: error checking buffer large enought to receive the retry tag
+ - MINOR: ssl: allow to change the server signature algorithm on server lines
+ - MINOR: ssl: allow to change the client-sigalgs on server lines
+ - BUG/MINOR: config: fix stick table duplicate name check
+ - BUG/MINOR: quic: Missing random bits in Retry packet header
+ - BUG/MINOR: quic: Wrong Retry paquet version field endianess
+ - BUG/MINOR: quic: Wrong endianess for version field in Retry token
+ - IMPORT: slz: implement a synchronous flush() operation
+ - MINOR: compression/slz: add support for a pure flush of pending bytes
+ - MINOR: quic: Move QUIC TLS encryption level related code (quic_conn_enc_level_init())
+ - MINOR: quic: Move QUIC encryption level structure definition
+ - MINOR: quic: Implement a packet number space identification function
+ - MINOR: quic: Move packet number space related functions
+ - MEDIUM: quic: Dynamic allocations of packet number spaces
+ - CLEANUP: quic: Remove qc_list_all_rx_pkts() defined but not used
+ - MINOR: quic: Add a pool for the QUIC TLS encryption levels
+ - MEDIUM: quic: Dynamic allocations of QUIC TLS encryption levels
+ - MINOR: quic: Reduce the maximum length of TLS secrets
+ - CLEANUP: quic: Remove two useless pools a low QUIC connection level
+ - MEDIUM: quic: Handle the RX in one pass
+ - MINOR: quic: Remove call to qc_rm_hp_pkts() from I/O callback
+ - CLEANUP: quic: Remove server specific about Initial packet number space
+ - MEDIUM: quic: Release encryption levels and packet number spaces asap
+ - CLEANUP: quic: Remove a useless test about discarded pktns (qc_handle_crypto_frm())
+ - MINOR: quic: Move the packet number space status at quic_conn level
+ - MINOR: quic: Drop packet with type for discarded packet number space.
+ - BUILD: quic: Add a DISGUISE() to please some compiler to qc_prep_hpkts() 1st parameter
+ - BUILD: debug: avoid a build warning related to epoll_wait() in debug code
+
+2023/05/31 : 2.9-dev0
+ - MINOR: version: mention that it's development again
+
+2023/05/31 : 2.8.0
+ - MINOR: compression: Improve the way Vary header is added
+ - BUILD: makefile: search for SSL_INC/wolfssl before SSL_INC
+ - MINOR: init: pre-allocate kernel data structures on init
+ - DOC: install: add details about WolfSSL
+ - BUG/MINOR: ssl_sock: add check for ha_meth
+ - BUG/MINOR: thread: add a check for pthread_create
+ - BUILD: init: print rlim_cur as regular integer
+ - DOC: install: specify the minimum openssl version recommended
+ - CLEANUP: mux-quic: remove unneeded fields in qcc
+ - MINOR: mux-quic: remove nb_streams from qcc
+ - MINOR: quic: fix stats naming for flow control BLOCKED frames
+ - BUG/MEDIUM: mux-quic: only set EOI on FIN
+ - BUG/MEDIUM: threads: fix a tiny race in thread_isolate()
+ - DOC: config: fix rfc7239 converter examples
+ - DOC: quic: remove experimental status for QUIC
+ - CLEANUP: mux-quic: rename functions for mux_ops
+ - CLEANUP: mux-quic: rename internal functions
+ - BUG/MINOR: mux-h2: refresh the idle_timer when the mux is empty
+ - DOC: config: Fix bind/server/peer documentation in the peers section
+ - BUILD: Makefile: use -pthread not -lpthread when threads are enabled
+ - CLEANUP: doc: remove 21 totally obsolete docs
+ - DOC: install: mention the common strict-aliasing warning on older compilers
+ - DOC: install: clarify a few points on the wolfSSL build method
+ - MINOR: quic: Add QUIC connection statistical counters values to "show quic"
+ - EXAMPLES: update the basic-config-edge file for 2.8
+ - MINOR: quic/cli: clarify the "show quic" help message
+ - MINOR: version: mention that it's LTS now.
+
+2023/05/24 : 2.8-dev13
+ - DOC: add size format section to manual
+ - CLEANUP: mux-quic/h3: complete BUG_ON with comments
+ - MINOR: quic: remove return val of quic_aead_iv_build()
+ - MINOR: quic: use WARN_ON for encrypt failures
+ - BUG/MINOR: quic: handle Tx packet allocation failure properly
+ - MINOR: quic: fix alignment of oneline show quic
+ - MEDIUM: stconn/applet: Allow SF_SL_EOS flag alone
+ - MEDIUM: stconn: make the SE_FL_ERR_PENDING to ERROR transition systematic
+ - DOC: internal: add a bit of documentation for the stconn closing conditions
+ - DOC/MINOR: config: Fix typo in description for `ssl_bc` in configuration.txt
+ - BUILD: quic: re-enable chacha20_poly1305 for libressl
+ - MINOR: mux-quic: set both EOI EOS for stream fin
+ - MINOR: mux-quic: only set EOS on RESET_STREAM recv
+ - MINOR: mux-quic: report error on stream-endpoint earlier
+ - BUILD: makefile: fix build issue on GNU make < 3.82
+ - BUG/MINOR: mux-h2: Check H2_SF_BODY_TUNNEL on H2S flags and not demux frame ones
+ - MINOR: mux-h2: Set H2_SF_ES_RCVD flag when decoding the HEADERS frame
+ - MINOR: mux-h2: Add a function to propagate termination flags from h2s to SE
+ - BUG/MEDIUM: mux-h2: Propagate termination flags when frontend SC is created
+ - DEV: add a Lua helper script for SSL keys logging
+ - CLEANUP: makefile: don't display a dummy features list without a target
+ - BUILD: makefile: do not erase build options for some build options
+ - MINOR: quic: Add low level traces (addresses, DCID)
+ - BUG/MINOR: quic: Wrong token length check (quic_generate_retry_token())
+ - BUG/MINOR: quic: Missing Retry token length on receipt
+ - MINOR: quic: Align "show quic" command help information
+ - CLEANUP: quic: Indentation fix quic_rx_pkt_retrieve_conn()
+ - CLEANUP: quic: Useless tests in qc_rx_pkt_handle()
+ - MINOR: quic: Add some counters at QUIC connection level
+ - MINOR: quic: Add a counter for sent packets
+ - MINOR: hlua: hlua_smp2lua_str() may LJMP
+ - MINOR: hlua: hlua_smp2lua() may LJMP
+ - MINOR: hlua: hlua_arg2lua() may LJMP
+ - DOC: hlua: document hlua_lua2arg() function
+ - DOC: hlua: document hlua_lua2smp() function
+ - BUG/MINOR: hlua: unsafe hlua_lua2smp() usage
+ - BUILD: makefile: commit the tiny FreeBSD makefile stub
+ - BUILD: makefile: fix build options when building tools first
+ - BUILD: ist: do not put a cast in an array declaration
+ - BUILD: ist: use the literal declaration for ist_lc/ist_uc under TCC
+ - BUILD: compiler: systematically set USE_OBSOLETE_LINKER with TCC
+ - DOC: install: update reference to known supported versions
+ - SCRIPTS: publish-release: update the umask to keep group write access
+
+2023/05/17 : 2.8-dev12
+ - BUILD: mjson: Fix warning about unused variables
+ - MINOR: spoe: Don't stop disabled proxies
+ - BUG/MEDIUM: filters: Don't deinit filters for disabled proxies during startup
+ - BUG/MINOR: hlua_fcn/queue: fix broken pop_wait()
+ - BUG/MINOR: hlua_fcn/queue: fix reference leak
+ - CLEANUP: hlua_fcn/queue: make queue:push() easier to read
+ - BUG/MINOR: quic: Buggy acknowlegments of acknowlegments function
+ - DEBUG: list: add DEBUG_LIST to purposely corrupt list heads after delete
+ - MINOR: stats: report the total number of warnings issued
+ - MINOR: stats: report the number of times the global maxconn was reached
+ - BUG/MINOR: mux-quic: do not prevent shutw on error
+ - BUG/MINOR: mux-quic: do not free frame already released by quic-conn
+ - BUG/MINOR: mux-quic: no need to subscribe for detach streams
+ - MINOR: mux-quic: add traces for stream wake
+ - MINOR: mux-quic: do not send STREAM frames if already subscribe
+ - MINOR: mux-quic: factorize send subscribing
+ - MINOR: mux-quic: simplify return path of qc_send()
+ - MEDIUM: quic: streamline error notification
+ - MEDIUM: mux-quic: adjust transport layer error handling
+ - MINOR: stats: report the listener's protocol along with the address in stats
+ - BUG/MEDIUM: mux-fcgi: Never set SE_FL_EOS without SE_FL_EOI or SE_FL_ERROR
+ - BUG/MEDIUM: mux-fcgi: Don't request more room if mux is waiting for more data
+ - MINOR: stconn: Add a cross-reference between SE descriptor
+ - BUG/MINOR: proxy: missing free in free_proxy for redirect rules
+ - MINOR: proxy: add http_free_redirect_rule() function
+ - BUG/MINOR: http_rules: fix errors paths in http_parse_redirect_rule()
+ - CLEANUP: http_act: use http_free_redirect_rule() to clean redirect act
+ - MINOR: tree-wide: use free_acl_cond() where relevant
+ - CLEANUP: acl: discard prune_acl_cond() function
+ - BUG/MINOR: cli: don't complain about empty command on empty lines
+ - MINOR: cli: add an option to display the uptime in the CLI's prompt
+ - MINOR: master/cli: also implement the timed prompt on the master CLI
+ - MINOR: cli: make "show fd" identify QUIC connections and listeners
+ - MINOR: httpclient: allow to disable the DNS resolvers of the httpclient
+ - BUILD: debug: fix build issue on 32-bit platforms in "debug dev task"
+ - MINOR: ncbuf: missing malloc checks in standalone code
+ - DOC: lua: fix core.{proxies,frontends,backends} visibility
+ - EXAMPLES: fix race condition in lua mailers script
+ - BUG/MINOR: errors: handle malloc failure in usermsgs_put()
+ - BUG/MINOR: log: fix memory error handling in parse_logsrv()
+ - BUG/MINOR: quic: Wrong redispatch for external data on connection socket
+ - MINOR: htx: add function to set EOM reliably
+ - MINOR: mux-quic: remove dedicated function to handle standalone FIN
+ - BUG/MINOR: mux-quic: properly handle buf alloc failure
+ - BUG/MINOR: mux-quic: handle properly recv ncbuf alloc failure
+ - BUG/MINOR: quic: do not alloc buf count on alloc failure
+ - BUG/MINOR: mux-quic: differentiate failure on qc_stream_desc alloc
+ - BUG/MINOR: mux-quic: free task on qc_init() app ops failure
+ - MEDIUM: session/ssl: return the SSL error string during a SSL handshake error
+ - CI: enable monthly Fedora Rawhide clang builds
+ - MEDIUM: mworker/cli: does not disconnect the master CLI upon error
+ - MINOR: stconn: Remove useless test on sedesc on detach to release the xref
+ - MEDIUM: proxy: stop emitting logs for internal proxies when stopping
+ - MINOR: ssl: add new sample ssl_c_r_dn
+ - BUG/MEDIUM: mux-h2: make sure control frames do not refresh the idle timeout
+ - BUILD: ssl: ssl_c_r_dn fetches uses functiosn only available since 1.1.1
+ - BUG/MINOR: mux-quic: handle properly Tx buf exhaustion
+ - BUG/MINOR: h3: missing goto on buf alloc failure
+ - BUILD: ssl: get0_verified chain is available on libreSSL
+ - BUG/MINOR: makefile: use USE_LIBATOMIC instead of USE_ATOMIC
+ - MINOR: mux-quic: add trace to stream rcv_buf operation
+ - MINOR: mux-quic: properly report end-of-stream on recv
+ - MINOR: mux-quic: uninline qc_attach_sc()
+ - BUG/MEDIUM: mux-quic: fix EOI for request without payload
+ - MINOR: checks: make sure spread-checks is used also at boot time
+ - BUG/MINOR: tcp-rules: Don't shortened the inspect-delay when EOI is set
+ - REGTESTS: log: Reduce response inspect-delay for last_rule.vtc
+ - DOC: config: Clarify conditions to shorten the inspect-delay for TCP rules
+ - CLEANUP: server: remove useless tmptrash assigments in srv_update_status()
+ - BUG/MINOR: server: memory leak in _srv_update_status_op() on server DOWN
+ - CLEANUP: check; Remove some useless assignments to NULL
+ - CLEANUP: stats: update the trash chunk where it's used
+ - MINOR: clock: measure the total boot time
+ - MINOR: stats: report the boot time in "show info"
+ - BUG/MINOR: checks: postpone the startup of health checks by the boot time
+ - MINOR: clock: provide a function to automatically adjust now_offset
+ - BUG/MINOR: clock: automatically adjust the internal clock with the boot time
+ - CLEANUP: fcgi-app; Remove useless assignment to NULL
+ - REGTESTS: log: Reduce again response inspect-delay for last_rule.vtc
+ - CI: drop Fedora m32 pipeline in favour of cross matrix
+ - MEDIUM: checks: Stop scheduling healthchecks during stopping stage
+ - MEDIUM: resolvers: Stop scheduling resolution during stopping stage
+ - BUG/MINOR: hlua: SET_SAFE_LJMP misuse in hlua_event_runner()
+ - BUG/MINOR: debug: fix pointer check in debug_parse_cli_task()
+
+2023/05/11 : 2.8-dev11
+ - BUILD: debug: do not check the isolated_thread variable in non-threaded builds
+ - BUILD: quic: fix build warning when threads are disabled
+ - CI: more granular failure on generating build matrix
+ - CLEANUP: quic: No more used q_buf structure
+ - CLEANUP: quic: Rename several <buf> variables in quic_frame.(c|h)
+ - CLEANUP: quic: Typo fix for quic_connection_id pool
+ - BUG/MINOR: quic: Wrong key update cipher context initialization for encryption
+ - BUG/MEDIUM: cache: Don't request more room than the max allowed
+ - MEDIUM: stconn: Be sure to always be able to unblock a SC that needs room
+ - EXAMPLES: fix IPV6 support for lua mailers script
+ - BUILD: ssl: buggy -Werror=dangling-pointer since gcc 13.0
+ - DOC: stconn: Update comments about ABRT/SHUT for stconn structure
+ - BUG/MEDIUM: stats: Require more room if buffer is almost full
+ - DOC: configuration: add info about ssl-engine for 2.6
+ - BUG/MINOR: mux-quic: fix transport VS app CONNECTION_CLOSE
+ - BUG/MEDIUM: mux-quic: wakeup tasklet to close on error
+ - DEV: flags: add a script to decode most flags in the "show sess all" output
+ - BUG/MINOR: quic: Possible crash when dumping version information
+ - BUG/MINOR: config: make compression work again in defaults section
+ - BUG/MEDIUM: stream: Forward shutdowns when unhandled errors are caught
+ - MEDIUM: stream: Resync analyzers at the end of process_stream() on change
+ - DEV: flags: add missing stream flags to show-sess-to-flags
+ - DEV: flags/show-sess-to-flags: only retrieve hex digits from hex fields
+ - DEV: flags/show-sess-to-flags: add support for color output
+ - CLEANUP: src/listener.c: remove redundant NULL check
+
+2023/05/07 : 2.8-dev10
+ - BUG/MINOR: stats: fix typo in `TotalSplicedBytesOut` field name
+ - REGTESTS: add success test, "set server" via fqdn
+ - MINOR: ssl: disable CRL checks with WolfSSL when no CRL file
+ - BUG/MINOR: stream/cli: fix stream age calculation in "show sess"
+ - MINOR: debug: clarify "debug dev stream" help message
+ - DEBUG: cli: add "debug dev task" to show/wake/expire/kill tasks and tasklets
+ - BUG/MINOR: ssl/sample: x509_v_err_str converter output when not found
+ - REGTESTS: ssl: simplify X509_V code check in ssl_client_auth.vtc
+ - BUILD: cli: fix build on Windows due to isalnum() implemented as a macro
+ - MINOR: activity: use a single macro to iterate over all fields
+ - MINOR: activity: show the line header inside the SHOW_VAL macro
+ - MINOR: activity: iterate over all fields in a main loop for dumping
+ - MINOR: activity: allow "show activity" to restart dumping on any line
+ - MINOR: activity: allow "show activity" to restart in the middle of a line
+ - DEV: haring: automatically disable DEBUG_STRICT
+ - DEV: haring: update readme to suggest using the same build options for haring
+ - BUG/MINOR: debug: fix incorrect profiling status reporting in show threads
+ - MINOR: debug: permit the "debug dev loop" to run under isolation
+ - BUG/MEDIUM: mux-h2: Properly handle end of request to expect data from server
+ - BUG/MINOR: mux-quic: prevent quic_conn error code to be overwritten
+ - MINOR: mux-quic: add trace event for local error
+ - MINOR: mux-quic: wake up after recv only if avail data
+ - MINOR: mux-quic: adjust local error API
+ - MINOR: mux-quic: report local error on stream endpoint asap
+ - MINOR: mux-quic: close connection asap on local error
+ - BUG/MINOR: debug: do not emit empty lines in thread dumps
+ - BUG/MINOR: mux-h2: Also expect data when waiting for a tunnel establishment
+ - BUG/MINOR: time: fix NS_TO_TV macro
+ - MEDIUM: debug: simplify the thread dump mechanism
+ - MINOR: debug: write panic dump to stderr one thread at a time
+ - MINOR: debug: make "show threads" properly iterate over all threads
+ - CLEANUP: debug: remove the now unused ha_thread_dump_all_to_trash()
+ - MINOR: ssl: allow to change the server signature algorithm
+ - MINOR: ssl: allow to change the signature algorithm for client authentication
+ - MINOR: cli: Use applet API to write output message
+ - MINOR: stats: Use the applet API to write data
+ - MINOR: peers: Use the applet API to send message
+ - MINOR: stconn: Add a field to specify the room needed by the SC to progress
+ - MEDIUM: tree-wide: Change sc API to specify required free space to progress
+ - BUG/MEDIUM: stconn: Unblock SC from stream if there is enough room to progrees
+ - MEDIUM: applet: Check room needed to unblock opposite SC when data was consumed
+ - MEDIUM: stconn: Check room needed to unblock SC on fast-forward
+ - MEDIUM: stconn: Check room needed to unblock opposite SC when data was sent
+ - MINOR: hlua_fcn: fix Server.is_draining() return type
+ - MINOR: hlua_fcn: add Server.is_backup()
+ - MINOR: hlua_fcn: add Server.is_dynamic()
+ - MINOR: hlua_fcn: add Server.tracking()
+ - MINOR: hlua_fcn: add Server.get_trackers()
+ - MINOR: hlua_fcn: add Server.get_proxy()
+ - MINOR: hlua_fcn: add Server.get_pend_conn() and Server.get_cur_sess()
+ - MINOR: hlua_fcn: add Proxy.get_srv_act() and Proxy.get_srv_bck()
+ - DOC: lua/event: add ServerEvent class header
+ - MINOR: server/event_hdl: publish macro helper
+ - MINOR: server/event_hdl: add SERVER_STATE event
+ - OPTIM: server: publish UP/DOWN events from STATE change
+ - MINOR: hlua: expose SERVER_STATE event
+ - MINOR: server/event_hdl: add SERVER_ADMIN event
+ - MINOR: hlua: expose SERVER_ADMIN event
+ - MINOR: checks/event_hdl: SERVER_CHECK event
+ - MINOR: hlua/event_hdl: expose SERVER_CHECK event
+ - MINOR: mailers/hlua: disable email sending from lua
+ - MINOR: hlua: expose proxy mailers
+ - EXAMPLES: add lua mailers script to replace tcpcheck mailers
+ - BUG/MINOR: hlua: spinning loop in hlua_socket_handler()
+ - MINOR: server: fix message report when IDRAIN is set and MAINT is cleared
+ - CLEANUP: hlua: hlua_register_task() may longjmp
+ - REGTESTS: use lua mailer script for mailers tests
+ - MINOR: hlua: declare hlua_{ref,pushref,unref} functions
+ - MINOR: hlua: declare hlua_gethlua() function
+ - MINOR: hlua: declare hlua_yieldk() function
+ - MINOR: hlua_fcn: add Queue class
+ - EXAMPLES: mailqueue for lua mailers script
+ - MINOR: quic: add format argument for "show quic"
+ - MINOR: quic: implement oneline format for "show quic"
+ - MINOR: config: allow cpu-map to take commas in lists of ranges
+ - CLEANUP: fix a few reported typos in code comments
+ - DOC: fix a few reported typos in the config and install doc
+
+2023/04/28 : 2.8-dev9
+ - MINOR: quic: Move traces at proto level
+ - BUG/MINOR: quic: Possible memory leak from TX packets
+ - BUG/MINOR: quic: Possible leak during probing retransmissions
+ - BUG/MINOR: quic: Useless probing retransmission in draining or killing state
+ - BUG/MINOR: quic: Useless I/O handler task wakeups (draining, killing state)
+ - CLEANUP: quic: rename frame types with an explicit prefix
+ - CLEANUP: quic: rename frame variables
+ - CLEANUP: quic: Remove useless parameters passes to qc_purge_tx_buf()
+ - CLEANUP: quic: Rename <buf> variable to <token> in quic_generate_retry_token()
+ - CLEANUP: quic: Rename <buf> variable into quic_padding_check()
+ - CLEANUP: quic: Rename <buf> variable into quic_rx_pkt_parse()
+ - CLEANUP: quic: Rename <buf> variable for several low level functions
+ - CLEANUP: quic: Make qc_build_pkt() be more readable
+ - CLEANUP: quic: Rename quic_get_dgram_dcid() <buf> variable
+ - CLEANUP: quic: Rename several <buf> variables at low level
+ - CLEANUP: quic: Rename <buf> variable into quic_packet_read_long_header()
+ - CLEANUP: quic: Rename <buf> variable into qc_parse_hd_form()
+ - CLEANUP: quic: Rename several <buf> variables into quic_sock.c
+ - DEBUG: crash using an invalid opcode on x86/x86_64 instead of an invalid access
+ - DEBUG: crash using an invalid opcode on aarch64 instead of an invalid access
+ - DEV: h2: add a script "mkhdr" to build h2 frames from scratch
+ - DEV: h2: support reading frame payload from a file
+ - MINOR: acme.sh: add the deploy script for acme.sh in admin directory
+ - BUG/MEDIUM: mux-quic: do not emit RESET_STREAM for unknown length
+ - BUG/MEDIUM: mux-quic: improve streams fairness to prevent early timeout
+ - BUG/MINOR: quic: prevent buggy memcpy for empty STREAM
+ - MINOR: mux-quic: do not set buffer for empty STREAM frame
+ - MINOR: mux-quic: do not allocate Tx buf for empty STREAM frame
+ - MINOR: quic: finalize affinity change as soon as possible
+ - BUG/MINOR: quic: fix race on quic_conns list during affinity rebind
+ - CI: switch to Fastly CDN to download LibreSSL
+ - BUILD: ssl: switch LibreSSL to Fastly CDN
+ - BUG/MINOR: clock: fix a few occurrences of 'now' being used in place of 'date'
+ - BUG/MINOR: spoe: use "date" not "now" in debug messages
+ - BUG/MINOR: activity: show wall-clock date, not internal date in show activity
+ - BUG/MINOR: opentracing: use 'date' instead of 'now' in debug output
+ - Revert "BUG/MINOR: clock: fix a few occurrences of 'now' being used in place of 'date'"
+ - BUG/MINOR: calltrace: fix 'now' being used in place of 'date'
+ - BUG/MINOR: trace: show wall-clock date, not internal date in show activity
+ - BUG/MINOR: hlua: return wall-clock date, not internal date in core.now()
+ - BUG/MEDIUM: spoe: Don't start new applet if there are enough idle ones
+ - BUG/MINOR: stconn: Fix SC flags with same value
+ - BUG/MINOR: resolvers: Use sc_need_room() to wait more room when dumping stats
+ - BUG/MEDIUM: tcpcheck: Don't eval custom expect rule on an empty buffer
+ - BUG/MINOR: stats: report the correct start date in "show info"
+ - MINOR: time: add conversions to/from nanosecond timestamps
+ - MINOR: time: replace calls to tv_ms_elapsed() with a linear subtract
+ - MINOR: spoe: switch the timeval-based timestamps to nanosecond timestamps
+ - MEDIUM: tree-wide: replace timeval with nanoseconds in tv_accept and tv_request
+ - MINOR: stats: use nanoseconds, not timeval to compute uptime
+ - MINOR: activity: use nanoseconds, not timeval to compute uptime
+ - MINOR: checks: use a nanosecond counters instead of timeval for checks->start
+ - MINOR: clock: do not use now.tv_sec anymore
+ - MEDIUM: clock: replace timeval "now" with integer "now_ns"
+ - MINOR: clock: replace the timeval start_time with start_time_ns
+ - MINOR: sample: Add bc_rtt and bc_rttvar
+ - MINOR: quic: use real sending rate measurement
+ - MINOR: proxy: factorize send rate measurement
+
+2023/04/23 : 2.8-dev8
+ - BUG/MEDIUM: cli: Set SE_FL_EOI flag for '_getsocks' and 'quit' commands
+ - BUG/MEDIUM: cli: Eat output data when waiting for appctx shutdown
+ - BUG/MEDIUM: http-client: Eat output data when waiting for appctx shutdown
+ - BUG/MEDIUM: stats: Eat output data when waiting for appctx shutdown
+ - BUG/MEDIUM: log: Eat output data when waiting for appctx shutdown
+ - BUG/MEDIUM: dns: Kill idle DNS sessions during stopping stage
+ - BUG/MINOR: resolvers: Wakeup DNS idle task on stopping
+ - BUG/MEDIUM: resolvers: Force the connect timeout for DNS resolutions
+ - MINOR: hlua: Stop to check the SC state when executing a hlua cli command
+ - BUG/MEDIUM: mux-h1: Report EOI when a TCP connection is upgraded to H2
+ - BUG/MEDIUM: mux-h2: Never set SE_FL_EOS without SE_FL_EOI or SE_FL_ERROR
+ - MINOR: quic: Trace fix in quic_pto_pktns() (handshaske status)
+ - BUG/MINOR: quic: Wrong packet number space probing before confirmed handshake
+ - MINOR: quic: Modify qc_try_rm_hp() traces
+ - MINOR: quic: Dump more information at proto level when building packets
+ - MINOR: quic: Add a trace for packet with an ACK frame
+ - MINOR: activity: add a line reporting the average CPU usage to "show activity"
+ - BUG/MINOR: stick_table: alert when type len has incorrect characters
+ - MINOR: thread: keep a bitmask of enabled groups in thread_set
+ - MINOR: fd: optimize fd_claim_tgid() for use in fd_insert()
+ - MINOR: fd: add a lock bit with the tgid
+ - MINOR: fd: implement fd_migrate_on() to migrate on a non-local thread
+ - MINOR: receiver: reserve special values for "shards"
+ - MINOR: bind-conf: support a new shards value: "by-group"
+ - BUG/MEDIUM: fd: don't wait for tmask to stabilize if we're not in it.
+ - MINOR: quic: Add packet loss and maximum cc window to "show quic"
+ - BUG/MINOR: quic: Ignored less than 1ms RTTs
+ - MINOR: quic: Add connection flags to traces
+ - BUG/MEDIUM: quic: Code sanitization about acknowledgements requirements
+ - BUG/MINOR: quic: Possible wrapped values used as ACK tree purging limit.
+ - BUG/MINOR: quic: SIGFPE in quic_cubic_update()
+ - MINOR: quic: Display the packet number space flags in traces
+ - MINOR: quic: Remove a useless test about probing in qc_prep_pkts()
+ - BUG/MINOR: quic: Wrong Application encryption level selection when probing
+ - CI: bump "actions/checkout" to v3 for cross zoo matrix
+ - CI: enable monthly test on Fedora Rawhide
+ - BUG/MINOR: stream: Fix test on SE_FL_ERROR on the wrong entity
+ - BUG/MEDIUM: stream: Report write timeouts before testing the flags
+ - BUG/MEDIUM: stconn: Do nothing in sc_conn_recv() when the SC needs more room
+ - MINOR: stream: Uninline and export sess_set_term_flags() function
+ - MINOR: filters: Review and simplify errors handling
+ - REGTESTS: fix the race conditions in log_uri.vtc
+ - MINOR: channel: Forwad close to other side on abort
+ - MINOR: stream: Introduce stream_abort() to abort on both sides in same time
+ - MINOR: stconn: Rename SC_FL_SHUTR_NOW in SC_FL_ABRT_WANTED
+ - MINOR: channel/stconn: Replace channel_shutr_now() by sc_schedule_abort()
+ - MINOR: stconn: Rename SC_FL_SHUTW_NOW in SC_FL_SHUT_WANTED
+ - MINOR: channel/stconn: Replace channel_shutw_now() by sc_schedule_shutdown()
+ - MINOR: stconn: Rename SC_FL_SHUTR in SC_FL_ABRT_DONE
+ - MINOR: channel/stconn: Replace sc_shutr() by sc_abort()
+ - MINOR: stconn: Rename SC_FL_SHUTW in SC_FL_SHUT_DONE
+ - MINOR: channel/stconn: Replace sc_shutw() by sc_shutdown()
+ - MINOR: tree-wide: Replace several chn_cons() by the corresponding SC
+ - MINOR: tree-wide: Replace several chn_prod() by the corresponding SC
+ - BUG/MINOR: cli: Don't close when SE_FL_ERR_PENDING is set in cli analyzer
+ - MINOR: stconn: Stop to set SE_FL_ERROR on sending path
+ - MEDIUM: stconn: Forbid applets with more to deliver if EOI was reached
+ - MINOR: stconn: Don't clear SE_FL_ERROR when endpoint is reset
+ - MINOR: stconn: Add a flag to ack endpoint errors at SC level
+ - MINOR: backend: Set SC_FL_ERROR on connection error
+ - MINOR: stream: Set SC_FL_ERROR on channels' buffer allocation error
+ - MINOR: tree-wide: Test SC_FL_ERROR with SE_FL_ERROR from upper layer
+ - MEDIUM: tree-wide: Stop to set SE_FL_ERROR from upper layer
+ - MEDIUM: backend: Stop to use SE flags to detect connection errors
+ - MEDIUM: stream: Stop to use SE flags to detect read errors from analyzers
+ - MEDIUM: stream: Stop to use SE flags to detect endpoint errors
+ - MEDIUM: stconn: Rely on SC flags to handle errors instead of SE flags
+ - BUG/MINOR: stconn: Don't set SE_FL_ERROR at the end of sc_conn_send()
+ - BUG/MINOR: quic: Do not use ack delay during the handshakes
+ - CLEANUP: use "offsetof" where appropriate
+ - MINOR: ssl: remove OpenSSL 1.0.2 mention into certificate loading error
+ - BUG/MEDIUM: http-ana: Properly switch the request in tunnel mode on upgrade
+ - BUG/MEDIUM: log: Properly handle client aborts in syslog applet
+ - MINOR: stconn: Add a flag to report EOS at the stream-connector level
+ - MINOR: stconn: Propagate EOS from a mux to the attached stream-connector
+ - MINOR: stconn: Propagate EOS from an applet to the attached stream-connector
+ - MINOR: mux-h2: make the initial window size configurable per side
+ - MINOR: mux-h2: make the max number of concurrent streams configurable per side
+ - BUG/MINOR: task: allow to use tasklet_wakeup_after with tid -1
+ - CLEANUP: quic: remove unused QUIC_LOCK label
+ - CLEANUP: quic: remove unused scid_node
+ - CLEANUP: quic: remove unused qc param on stateless reset token
+ - CLEANUP: quic: rename quic_connection_id vars
+ - MINOR: quic: remove uneeded tasklet_wakeup after accept
+ - MINOR: quic: adjust Rx packet type parsing
+ - MINOR: quic: adjust quic CID derive API
+ - MINOR: quic: remove TID ref from quic_conn
+ - MEDIUM: quic: use a global CID trees list
+ - MINOR: quic: remove TID encoding in CID
+ - MEDIUM: quic: handle conn bootstrap/handshake on a random thread
+ - MINOR: quic: do not proceed to accept for closing conn
+ - MINOR: protocol: define new callback set_affinity
+ - MINOR: quic: delay post handshake frames after accept
+ - MEDIUM: quic: implement thread affinity rebinding
+ - BUG/MINOR: quic: transform qc_set_timer() as a reentrant function
+ - MINOR: quic: properly finalize thread rebinding
+ - MAJOR: quic: support thread balancing on accept
+ - MINOR: listener: remove unneeded local accept flag
+ - BUG/MINOR: http-ana: Update analyzers on both sides when switching in TUNNEL mode
+ - CLEANUP: backend: Remove useless debug message in assign_server()
+ - CLEANUP: cli: Remove useless debug message in cli_io_handler()
+ - BUG/MEDIUM: stconn: Propagate error on the SC on sending path
+ - MINOR: config: add "no-alpn" support for bind lines
+ - REGTESTS: add a new "ssl_alpn" test to test ALPN negotiation
+ - DOC: add missing documentation for "no-alpn" on bind lines
+ - MINOR: ssl: do not set ALPN callback with the empty string
+ - MINOR: ssl_crtlist: dump "no-alpn" on "show crtlist" when "no-alpn" was set
+ - MEDIUM: config: set useful ALPN defaults for HTTPS and QUIC
+ - BUG/MEDIUM: quic: prevent crash on Retry sending
+ - BUG/MINOR: cfgparse: make sure to include openssl-compat
+ - MINOR: clock: add now_mono_time_fast() function
+ - MINOR: clock: add now_cpu_time_fast() function
+ - MEDIUM: hlua: reliable timeout detection
+ - MEDIUM: hlua: introduce tune.lua.burst-timeout
+ - CLEANUP: hlua: avoid confusion between internal timers and tick based timers
+ - MINOR: hlua: hook yield on known lua state
+ - MINOR: hlua: safe coroutine.create()
+ - BUG/MINOR: quic: Stop removing ACK ranges when building packets
+ - MINOR: quic: Do not allocate too much ack ranges
+ - BUG/MINOR: quic: Unchecked buffer length when building the token
+ - BUG/MINOR: quic: Wrong Retry token generation timestamp computing
+ - BUG/MINOR: mux-quic: fix crash with app ops install failure
+ - BUG/MINOR: mux-quic: properly handle STREAM frame alloc failure
+ - BUG/MINOR: h3: fix crash on h3s alloc failure
+ - BUG/MINOR: quic: prevent crash on qc_new_conn() failure
+ - BUG/MINOR: quic: consume Rx datagram even on error
+ - CLEANUP: errors: fix obsolete function comments
+ - CLEANUP: server: fix update_status() function comment
+ - MINOR: server/event_hdl: add proxy_uuid to event_hdl_cb_data_server
+ - MINOR: hlua/event_hdl: rely on proxy_uuid instead of proxy_name for lookups
+ - MINOR: hlua/event_hdl: expose proxy_uuid variable in server events
+ - MINOR: hlua/event_hdl: fix return type for hlua_event_hdl_cb_data_push_args
+ - MINOR: server/event_hdl: prepare for upcoming refactors
+ - BUG/MINOR: event_hdl: don't waste 1 event subtype slot
+ - CLEANUP: event_hdl: updating obsolete comment for EVENT_HDL_CB_DATA
+ - CLEANUP: event_hdl: fix comment typo about _sync assertion
+ - MINOR: event_hdl: dynamically allocated event data members
+ - MINOR: event_hdl: provide event->when for advanced handlers
+ - MINOR: hlua/event_hdl: timestamp for events
+ - DOC: lua: restore 80 char limitation
+ - BUG/MINOR: server: incorrect report for tracking servers leaving drain
+ - MINOR: server: explicitly commit state change in srv_update_status()
+ - BUG/MINOR: server: don't miss proxy stats update on server state transitions
+ - BUG/MINOR: server: don't miss server stats update on server state transitions
+ - BUG/MINOR: server: don't use date when restoring last_change from state file
+ - MINOR: server: central update for server counters on state change
+ - MINOR: server: propagate server state change to lb through single function
+ - MINOR: server: propagate lb changes through srv_lb_propagate()
+ - MINOR: server: change adm_st_chg_cause storage type
+ - MINOR: server: srv_append_status refacto
+ - MINOR: server: change srv_op_st_chg_cause storage type
+ - CLEANUP: server: remove unused variables in srv_update_status()
+ - CLEANUP: server: fix srv_set_{running, stopping, stopped} function comment
+ - MINOR: server: pass adm and op cause to srv_update_status()
+ - MEDIUM: server: split srv_update_status() in two functions
+ - MINOR: server/event_hdl: prepare for server event data wrapper
+ - MINOR: quic: support migrating the listener as well
+ - MINOR: quic_sock: index li->per_thr[] on local thread id, not global one
+ - MINOR: listener: support another thread dispatch mode: "fair"
+ - MINOR: receiver: add a struct shard_info to store info about each shard
+ - MINOR: receiver: add RX_F_MUST_DUP to indicate that an rx must be duped
+ - MEDIUM: proto: duplicate receivers marked RX_F_MUST_DUP
+ - MINOR: proto: skip socket setup for duped FDs
+ - MEDIUM: config: permit to start a bind on multiple groups at once
+ - MINOR: listener: make accept_queue index atomic
+ - MEDIUM: listener: rework thread assignment to consider all groups
+ - MINOR: listener: use a common thr_idx from the reference listener
+ - MINOR: listener: resync with the thread index before heavy calculations
+ - MINOR: listener: make sure to avoid ABA updates in per-thread index
+ - MINOR: listener: always compare the local thread as well
+ - MINOR: Make `tasklet_free()` safe to be called with `NULL`
+ - CLEANUP: Stop checking the pointer before calling `tasklet_free()`
+ - CLEANUP: Stop checking the pointer before calling `pool_free()`
+ - CLEANUP: Stop checking the pointer before calling `task_free()`
+ - CLEANUP: Stop checking the pointer before calling `ring_free()`
+ - BUG/MINOR: cli: clarify error message about stats bind-process
+ - CI: cirrus-ci: bump FreeBSD image to 13-1
+ - REGTESTS: remove unsupported "stats bind-process" keyword
+ - CI: extend spellchecker whitelist, add "clen" as well
+ - CLEANUP: assorted typo fixes in the code and comments
+ - BUG/MINOR: sock_inet: use SO_REUSEPORT_LB where available
+ - BUG/MINOR: tools: check libssl and libcrypto separately
+ - BUG/MINOR: config: fix NUMA topology detection on FreeBSD
+ - BUILD: sock_inet: forward-declare struct receiver
+ - BUILD: proto_tcp: export the correct names for proto_tcpv[46]
+ - CLEANUP: protocol: move the l3_addrlen to plug a hole in proto_fam
+ - CLEANUP: protocol: move the nb_receivers to plug a hole in protocol
+ - REORG: listener: move the bind_conf's thread setup code to listener.c
+ - MINOR: proxy: make proxy_type_str() recognize peers sections
+ - MEDIUM: peers: call bind_complete_thread_setup() to finish the config
+ - MINOR: protocol: add a flags field to store info about protocols
+ - MINOR: protocol: move the global reuseport flag to the protocols
+ - MINOR: listener: automatically adjust shards based on support for SO_REUSEPORT
+ - MINOR: protocol: add a function to check if some features are supported
+ - MINOR: sock: add a function to check for SO_REUSEPORT support at runtime
+ - MINOR: protocol: perform a live check for SO_REUSEPORT support
+ - MINOR: listener: do not restrict CLI to first group anymore
+ - MINOR: listener: add a new global tune.listener.default-shards setting
+ - MEDIUM: listener: switch the default sharding to by-group
+
+2023/04/08 : 2.8-dev7
+ - BUG/MINOR: stats: Don't replace sc_shutr() by SE_FL_EOS flag yet
+ - BUG/MEDIUM: mux-h2: Be able to detect connection error during handshake
+ - BUG/MINOR: quic: Missing padding in very short probe packets
+ - MINOR: proxy/pool: prevent unnecessary calls to pool_gc()
+ - CLEANUP: proxy: remove stop_time related dead code
+ - DOC/MINOR: reformat configuration.txt's "quoting and escaping" table
+ - MINOR: http_fetch: Add support for empty delim in url_param
+ - MINOR: http_fetch: add case insensitive support for smp_fetch_url_param
+ - MINOR: http_fetch: Add case-insensitive argument for url_param/urlp_val
+ - REGTESTS : Add test support for case insentitive for url_param
+ - BUG/MEDIUM: proxy/sktable: prevent watchdog trigger on soft-stop
+ - BUG/MINOR: backend: make be_usable_srv() consistent when stopping
+ - BUG/MINOR: ssl: Remove dead code in cli_parse_update_ocsp_response
+ - BUG/MINOR: ssl: Fix potential leak in cli_parse_update_ocsp_response
+ - BUG/MINOR: ssl: ssl-(min|max)-ver parameter not duplicated for bundles in crt-list
+ - BUG/MINOR: quic: Wrong use of now_ms timestamps (cubic algo)
+ - MINOR: quic: Add recovery related information to "show quic"
+ - BUG/MINOR: quic: Wrong use of now_ms timestamps (newreno algo)
+ - BUG/MINOR: quic: Missing max_idle_timeout initialization for the connection
+ - MINOR: quic: Implement cubic state trace callback
+ - MINOR: quic: Adjustments for generic control congestion traces
+ - MINOR: quic: Traces adjustments at proto level.
+ - MEDIUM: quic: Ack delay implementation
+ - BUG/MINOR: quic: Wrong rtt variance computing
+ - MINOR: cli: support filtering on FD types in "show fd"
+ - MINOR: quic: Add a fake congestion control algorithm named "nocc"
+ - CI: run smoke tests on config syntax to check memory related issues
+ - CLEANUP: assorted typo fixes in the code and comments
+ - CI: exclude doc/{design-thoughts,internals} from spell check
+ - BUG/MINOR: quic: Remaining useless statements in cubic slow start callback
+ - BUG/MINOR: quic: Cubic congestion control window may wrap
+ - MINOR: quic: Add missing traces in cubic algorithm implementation
+ - BUG/MAJOR: quic: Congestion algorithms states shared between the connection
+ - BUG/MINOR: ssl: Undefined reference when building with OPENSSL_NO_DEPRECATED
+ - BUG/MINOR: quic: Remove useless BUG_ON() in newreno and cubic algo implementation
+ - MINOR: http-act: emit a warning when a header field name contains forbidden chars
+ - DOC: config: strict-sni allows to start without certificate
+ - MINOR: quic: Add trace to debug idle timer task issues
+ - BUG/MINOR: quic: Unexpected connection closures upon idle timer task execution
+ - BUG/MINOR: quic: Wrong idle timer expiration (during 20s)
+ - BUILD: quic: 32bits compilation issue in cli_io_handler_dump_quic()
+ - BUG/MINOR: quic: Possible wrong PTO computing
+ - BUG/MINOR: tcpcheck: Be able to expect an empty response
+ - BUG/MEDIUM: stconn: Add a missing return statement in sc_app_shutr()
+ - BUG/MINOR: stream: Fix test on channels flags to set clientfin/serverfin touts
+ - MINOR: applet: Uninline appctx_free()
+ - MEDIUM: applet/trace: Register a new trace source with its events
+ - CLEANUP: stconn: Remove remaining debug messages
+ - BUG/MEDIUM: channel: Improve reports for shut in co_getblk()
+ - BUG/MEDIUM: dns: Properly handle error when a response consumed
+ - MINOR: stconn: Remove unecessary test on SE_FL_EOS before receiving data
+ - MINOR: stconn/channel: Move CF_READ_DONTWAIT into the SC and rename it
+ - MINOR: stconn/channel: Move CF_SEND_DONTWAIT into the SC and rename it
+ - MINOR: stconn/channel: Move CF_NEVER_WAIT into the SC and rename it
+ - MINOR: stconn/channel: Move CF_EXPECT_MORE into the SC and rename it
+ - MINOR: mux-pt: Report end-of-input with the end-of-stream after a read
+ - BUG/MINOR: mux-h1: Properly report EOI/ERROR on read0 in h1_rcv_pipe()
+ - CLEANUP: mux-h1/mux-pt: Remove useless test on SE_FL_SHR/SE_FL_SHW flags
+ - MINOR: mux-h1: Report an error to the SE descriptor on truncated message
+ - MINOR: stconn: Always ack EOS at the end of sc_conn_recv()
+ - MINOR: stconn/applet: Handle EOI in the applet .wake callback function
+ - MINOR: applet: No longer set EOI on the SC
+ - MINOR: stconn/applet: Handle EOS in the applet .wake callback function
+ - MEDIUM: cache: Use the sedesc to report and detect end of processing
+ - MEDIUM: cli: Use the sedesc to report and detect end of processing
+ - MINOR: dns: Remove the test on the opposite SC state to send requests
+ - MEDIUM: dns: Use the sedesc to report and detect end of processing
+ - MEDIUM: spoe: Use the sedesc to report and detect end of processing
+ - MEDIUM: hlua/applet: Use the sedesc to report and detect end of processing
+ - MEDIUM: log: Use the sedesc to report and detect end of processing
+ - MEDIUM: peers: Use the sedesc to report and detect end of processing
+ - MINOR: sink: Remove the tests on the opposite SC state to process messages
+ - MEDIUM: sink: Use the sedesc to report and detect end of processing
+ - MEDIUM: stats: Use the sedesc to report and detect end of processing
+ - MEDIUM: promex: Use the sedesc to report and detect end of processing
+ - MEDIUM: http_client: Use the sedesc to report and detect end of processing
+ - MINOR: stconn/channel: Move CF_EOI into the SC and rename it
+ - MEDIUM: tree-wide: Move flags about shut from the channel to the SC
+ - MINOR: tree-wide: Simplifiy some tests on SHUT flags by accessing SCs directly
+ - MINOR: stconn/applet: Add BUG_ON_HOT() to be sure SE_FL_EOS is never set alone
+ - MINOR: server: add SRV_F_DELETED flag
+ - BUG/MINOR: server/del: fix srv->next pointer consistency
+ - BUG/MINOR: stats: properly handle server stats dumping resumption
+ - BUG/MINOR: sink: free forward_px on deinit()
+ - BUG/MINOR: log: free log forward proxies on deinit()
+ - MINOR: server: always call ssl->destroy_srv when available
+ - MINOR: server: correctly free servers on deinit()
+ - BUG/MINOR: hlua: hook yield does not behave as expected
+ - MINOR: hlua: properly handle hlua_process_task HLUA_E_ETMOUT
+ - BUG/MINOR: hlua: enforce proper running context for register_x functions
+ - MINOR: hlua: Fix two functions that return nothing useful
+ - MEDIUM: hlua: Dynamic list of frontend/backend in Lua
+ - MINOR: hlua_fcn: alternative to old proxy and server attributes
+ - MEDIUM: hlua_fcn: dynamic server iteration and indexing
+ - MEDIUM: hlua_fcn/api: remove some old server and proxy attributes
+ - CLEANUP: hlua: fix conflicting comment in hlua_ctx_destroy()
+ - MINOR: hlua: add simple hlua reference handling API
+ - MINOR: hlua: fix return type for hlua_checkfunction() and hlua_checktable()
+ - BUG/MINOR: hlua: fix reference leak in core.register_task()
+ - BUG/MINOR: hlua: fix reference leak in hlua_post_init_state()
+ - BUG/MINOR: hlua: prevent function and table reference leaks on errors
+ - CLEANUP: hlua: use hlua_ref() instead of luaL_ref()
+ - CLEANUP: hlua: use hlua_pushref() instead of lua_rawgeti()
+ - CLEANUP: hlua: use hlua_unref() instead of luaL_unref()
+ - MINOR: hlua: simplify lua locking
+ - BUG/MEDIUM: hlua: prevent deadlocks with main lua lock
+ - MINOR: hlua_fcn: add server->get_rid() method
+ - MINOR: hlua: support for optional arguments to core.register_task()
+ - DOC: lua: silence "literal block ends without a blank line" Sphinx warnings
+ - DOC: lua: silence "Unexpected indentation" Sphinx warnings
+ - BUG/MINOR: event_hdl: fix rid storage type
+ - BUG/MINOR: event_hdl: make event_hdl_subscribe thread-safe
+ - MINOR: event_hdl: global sublist management clarification
+ - BUG/MEDIUM: event_hdl: clean soft-stop handling
+ - BUG/MEDIUM: event_hdl: fix async data refcount issue
+ - MINOR: event_hdl: normal tasks support for advanced async mode
+ - MINOR: event_hdl: add event_hdl_async_equeue_isempty() function
+ - MINOR: event_hdl: add event_hdl_async_equeue_size() function
+ - MINOR: event_hdl: pause/resume for subscriptions
+ - MINOR: proxy: add findserver_unique_id() and findserver_unique_name()
+ - MEDIUM: hlua/event_hdl: initial support for event handlers
+ - MINOR: hlua/event_hdl: per-server event subscription
+ - EXAMPLES: add basic event_hdl lua example script
+ - MINOR: http-ana: Add a HTTP_MSGF flag to state the Expect header was checked
+ - BUG/MINOR: http-ana: Don't switch message to DATA when waiting for payload
+ - BUG/MINOR: quic: Possible crashes in qc_idle_timer_task()
+ - MINOR: quic: derive first DCID from client ODCID
+ - MINOR: quic: remove ODCID dedicated tree
+ - MINOR: quic: remove address concatenation to ODCID
+ - BUG/MINOR: mworker: unset more internal variables from program section
+ - BUG/MINOR: errors: invalid use of memprintf in startup_logs_init()
+ - MINOR: applet: Use unsafe version to get stream from SC in the trace function
+ - BUG/MUNOR: http-ana: Use an unsigned integer for http_msg flags
+ - MINOR: compression: Make compression offload a flag
+ - MINOR: compression: Prepare compression code for request compression
+ - MINOR: compression: Store algo and type for both request and response
+ - MINOR: compression: Count separately request and response compression
+ - MEDIUM: compression: Make it so we can compress requests as well.
+ - BUG/MINOR: lua: remove incorrect usage of strncat()
+ - CLEANUP: tcpcheck: remove the only occurrence of sprintf() in the code
+ - CLEANUP: ocsp: do no use strpcy() to copy a path!
+ - CLEANUP: tree-wide: remove strpcy() from constant strings
+ - CLEANUP: opentracing: remove the last two occurrences of strncat()
+ - BUILD: compiler: fix __equals_1() on older compilers
+ - MINOR: compiler: define a __attribute__warning() macro
+ - BUILD: bug.h: add a warning in the base API when unsafe functions are used
+ - BUG/MEDIUM: listeners: Use the right parameters for strlcpy2().
+
+2023/03/28 : 2.8-dev6
+ - BUG/MEDIUM: mux-pt: Set EOS on error on sending path if read0 was received
+ - MINOR: ssl: Change the ocsp update log-format
+ - MINOR: ssl: Use ocsp update task for "update ssl ocsp-response" command
+ - BUG/MINOR: ssl: Fix double free in ocsp update deinit
+ - MINOR: ssl: Accept certpath as param in "show ssl ocsp-response" CLI command
+ - MINOR: ssl: Add certificate path to 'show ssl ocsp-response' output
+ - BUG/MEDIUM: proxy: properly stop backends on soft-stop
+ - BUG/MEDIUM: resolvers: Properly stop server resolutions on soft-stop
+ - DEBUG: cli/show_fd: Display connection error code
+ - DEBUG: ssl-sock/show_fd: Display SSL error code
+ - BUG/MEDIUM: mux-h1: Don't block SE_FL_ERROR if EOS is not reported on H1C
+ - BUG/MINOR: tcp_sample: fix a bug in fc_dst_port and fc_dst_is_local sample fetches
+ - BUG/MINOR: quic: Missing STREAM frame length updates
+ - BUG/MEDIUM: connection: Preserve flags when a conn is removed from an idle list
+ - BUG/MINOR: mux-h2: make sure the h2c task exists before refreshing it
+ - MINOR: buffer: add br_count() to return the number of allocated bufs
+ - MINOR: buffer: add br_single() to check if a buffer ring has more than one buf
+ - BUG/MEDIUM: mux-h2: only restart sending when mux buffer is decongested
+ - BUG/MINOR: mux-h2: set CO_SFL_STREAMER when sending lots of data
+ - BUG/MINOR: quic: Missing STREAM frame data pointer updates
+ - MINOR: stick-table: add sc-add-gpc() to http-after-response
+ - MINOR: doc: missing entries for sc-add-gpc()
+ - BUG/MAJOR: qpack: fix possible read out of bounds in static table
+ - OPTIM: mux-h1: limit first read size to avoid wrapping
+ - MINOR: mux-h2: set CO_SFL_MSG_MORE when sending multiple buffers
+ - MINOR: ssl-sock: pass the CO_SFL_MSG_MORE info down the stack
+ - MINOR: quic: Stop stressing the acknowledgments process (RX ACK frames)
+ - BUG/MINOR: quic: Dysfunctional 01RTT packet number space probing
+ - BUG/MEDIUM: stream: do not try to free a failed stream-conn
+ - BUG/MEDIUM: mux-h2: do not try to free an unallocated h2s->sd
+ - BUG/MEDIUM: mux-h2: erase h2c->wait_event.tasklet on error path
+ - BUG/MEDIUM: stconn: don't set the type before allocation succeeds
+ - BUG/MINOR: stconn: fix sedesc memory leak on stream allocation failure
+ - MINOR: dynbuf: set POOL_F_NO_FAIL on buffer allocation
+ - MINOR: pools: preset the allocation failure rate to 1% with -dMfail
+ - BUG/MEDIUM: mux-h1: properly destroy a partially allocated h1s
+ - BUG/MEDIUM: applet: only set appctx->sedesc on successful allocation
+ - BUG/MINOR: quic: wake up MUX on probing only for 01RTT
+ - BUG/MINOR: quic: ignore congestion window on probing for MUX wakeup
+ - BUILD: thread: implement thread_harmless_end_sig() for threadless builds
+ - BUILD: thread: silence a build warning when threads are disabled
+ - MINOR: debug: support dumping the libs addresses when running in verbose mode
+ - BUG/MINOR: illegal use of the malloc_trim() function if jemalloc is used
+ - BUG/MINOR: trace: fix hardcoded level for TRACE_PRINTF
+ - BUG/MEDIUM: mux-quic: release data from conn flow-control on qcs reset
+ - MINOR: mux-quic: complete traces for qcs emission
+ - MINOR: mux-quic: adjust trace level for MAX_DATA/MAX_STREAM_DATA recv
+ - MINOR: mux-quic: add flow-control info to minimal trace level
+ - MINOR: pools: make sure 'no-memory-trimming' is always used
+ - MINOR: pools: intercept malloc_trim() instead of trying to plug holes
+ - MEDIUM: pools: move the compat code from trim_all_pools() to malloc_trim()
+ - MINOR: pools: export trim_all_pools()
+ - MINOR: pattern: use trim_all_pools() instead of a conditional malloc_trim()
+ - MINOR: tools: relax dlopen() on malloc/free checks
+ - MEDIUM: tools: further relax dlopen() checks too consider grouped symbols
+ - BUG/MINOR: pools: restore detection of built-in allocator
+ - MINOR: pools: report a replaced memory allocator instead of just malloc_trim()
+ - BUG/MINOR: h3: properly handle incomplete remote uni stream type
+ - BUG/MINOR: mux-quic: prevent CC status to be erased by shutdown
+ - MINOR: mux-quic: interrupt qcc_recv*() operations if CC scheduled
+ - MINOR: mux-quic: ensure CONNECTION_CLOSE is scheduled once per conn
+ - MINOR: mux-quic: close on qcs allocation failure
+ - MINOR: mux-quic: close on frame alloc failure
+ - BUG/MINOR: syslog: Request for more data if message was not fully received
+ - BUG/MEDIUM: stats: Consume the request except when parsing the POST payload
+ - DOC: config: set-var() dconv rendering issues
+ - BUG/MEDIUM: mux-h1: Wakeup H1C on shutw if there is no I/O subscription
+ - BUG/MINOR: applet/new: fix sedesc freeing logic
+ - BUG/MINOR: quic: Missing STREAM frame type updated
+ - BUILD: da: extends CFLAGS to support API v3 from 3.1.7 and onwards.
+ - BUG/MINOR: ssl: Stop leaking `err` in ssl_sock_load_ocsp()
+
+2023/03/10 : 2.8-dev5
+ - MINOR: ssl: rename confusing ssl_bind_kws
+ - BUG/MINOR: config: crt-list keywords mistaken for bind ssl keywords
+ - BUG/MEDIUM: http-ana: Detect closed SC on opposite side during body forwarding
+ - BUG/MEDIUM: stconn: Don't rearm the read expiration date if EOI was reached
+ - MINOR: global: Add an option to disable the data fast-forward
+ - MINOR: haproxy: Add an command option to disable data fast-forward
+ - REGTESTS: Remove unsupported feature command in http_splicing.vtc
+ - BUG/MEDIUM: wdt: fix wrong thread being checked for sleeping
+ - BUG/MINOR: sched: properly report long_rq when tasks remain in the queue
+ - BUG/MEDIUM: sched: allow a bit more TASK_HEAVY to be processed when needed
+ - MINOR: threads: add flags to know if a thread is started and/or running
+ - MINOR: h3/hq-interop: handle no data in decode_qcs() with FIN set
+ - BUG/MINOR: mux-quic: transfer FIN on empty STREAM frame
+ - BUG/MINOR: mworker: prevent incorrect values in uptime
+ - MINOR: h3: add traces on decode_qcs callback
+ - BUG/MINOR: quic: Possible unexpected counter incrementation on send*() errors
+ - MINOR: quic: Add new traces about by connection RX buffer handling
+ - MINOR: quic: Move code to wakeup the timer task to avoid anti-amplication deadlock
+ - BUG/MINOR: quic: Really cancel the connection timer from qc_set_timer()
+ - MINOR: quic: Simplication for qc_set_timer()
+ - MINOR: quic: Kill the connections on ICMP (port unreachable) packet receipt
+ - MINOR: quic: Add traces to qc_kill_conn()
+ - MINOR: quic: Make qc_dgrams_retransmit() return a status.
+ - BUG/MINOR: quic: Missing call to task_queue() in qc_idle_timer_do_rearm()
+ - MINOR: quic: Add a trace to identify connections which sent Initial packet.
+ - MINOR: quic: Add <pto_count> to the traces
+ - BUG/MINOR: quic: Do not probe with too little Initial packets
+ - BUG/MINOR: quic: Wrong initialization for io_cb_wakeup boolean
+ - BUG/MINOR: quic: Do not drop too small datagrams with Initial packets
+ - BUG/MINOR: quic: Missing padding for short packets
+ - MINOR: quic: adjust request reject when MUX is already freed
+ - BUG/MINOR: quic: also send RESET_STREAM if MUX released
+ - BUG/MINOR: quic: acknowledge STREAM frame even if MUX is released
+ - BUG/MINOR: h3: prevent hypothetical demux failure on int overflow
+ - MEDIUM: h3: enforce GOAWAY by resetting higher unhandled stream
+ - MINOR: mux-quic: define qc_shutdown()
+ - MINOR: mux-quic: define qc_process()
+ - MINOR: mux-quic: implement client-fin timeout
+ - MEDIUM: mux-quic: properly implement soft-stop
+ - MINOR: quic: mark quic-conn as jobs on socket allocation
+ - MEDIUM: quic: trigger fast connection closing on process stopping
+ - MINOR: mux-h2/traces: do not log h2s pointer for dummy streams
+ - MINOR: mux-h2/traces: add a missing TRACE_LEAVE() in h2s_frt_handle_headers()
+ - BUG/MEDIUM: quic: Missing TX buffer draining from qc_send_ppkts()
+ - DEBUG: stream: Add a BUG_ON to never exit process_stream with an expired task
+ - DOC: config: Fix description of options about HTTP connection modes
+ - MINOR: proxy: Only consider backend httpclose option for server connections
+ - BUG/MINOR: haproxy: Fix option to disable the fast-forward
+ - DOC: config: Add the missing tune.fail-alloc option from global listing
+ - MINOR: cfgcond: Implement strstr condition expression
+ - MINOR: cfgcond: Implement enabled condition expression
+ - REGTESTS: Skip http_splicing.vtc script if fast-forward is disabled
+ - REGTESTS: Fix ssl_errors.vtc script to wait for connections close
+ - BUG/MINOR: mworker: stop doing strtok directly from the env
+ - BUG/MEDIUM: mworker: prevent inconsistent reload when upgrading from old versions
+ - BUG/MEDIUM: mworker: don't register mworker_accept_wrapper() when master FD is wrong
+ - MINOR: startup: HAPROXY_STARTUP_VERSION contains the version used to start
+ - BUG/MINOR: cache: Cache response even if request has "no-cache" directive
+ - BUG/MINOR: cache: Check cache entry is complete in case of Vary
+ - MINOR: compiler: add a TOSTR() macro to turn a value into a string
+ - BUG/MINOR: lua/httpclient: missing free in hlua_httpclient_send()
+ - BUG/MEDIUM: httpclient/lua: fix a race between lua GC and hlua_ctx_destroy
+ - MEDIUM: channel: Remove CF_READ_NOEXP flag
+ - MAJOR: channel: Remove flags to report READ or WRITE errors
+ - DEBUG: stream/trace: Add sedesc flags in trace messages
+ - MINOR: channel/stconn: Move rto/wto from the channel to the stconn
+ - MEDIUM: channel/stconn: Move rex/wex timer from the channel to the sedesc
+ - MEDIUM: stconn: Don't requeue the stream's task after I/O
+ - MEDIUM: stconn: Replace read and write timeouts by a unique I/O timeout
+ - MEDIUM: stconn: Add two date to track successful reads and blocked sends
+ - MINOR: applet/stconn: Add a SE flag to specify an endpoint does not expect data
+ - MAJOR: stream: Use SE descriptor date to detect read/write timeouts
+ - MINOR: stream: Dump the task expiration date in trace messages
+ - MINOR: stream: Report rex/wex value using the sedesc date in trace messages
+ - MINOR: stream: Use relative expiration date in trace messages
+ - MINOR: stconn: Always report READ/WRITE event on shutr/shutw
+ - CLEANUP: stconn: Remove old read and write expiration dates
+ - MINOR: stconn: Set half-close timeout using proxy settings
+ - MINOR: stconn: Remove half-closed timeout
+ - REGTESTS: cache: Use rxresphdrs to only get headers for 304 responses
+ - MINOR: stconn: Add functions to set/clear SE_FL_EXP_NO_DATA flag from endpoint
+ - BUG/MINOR: proto_ux: report correct error when bind_listener fails
+ - BUG/MINOR: protocol: fix minor memory leak in protocol_bind_all()
+ - MINOR: proto_uxst: add resume method
+ - MINOR: listener/api: add lli hint to listener functions
+ - MINOR: listener: add relax_listener() function
+ - MINOR: listener: workaround for closing a tiny race between resume_listener() and stopping
+ - MINOR: listener: make sure we don't pause/resume bypassed listeners
+ - BUG/MEDIUM: listener: fix pause_listener() suspend return value handling
+ - BUG/MINOR: listener: fix resume_listener() resume return value handling
+ - BUG/MEDIUM: resume from LI_ASSIGNED in default_resume_listener()
+ - MINOR: listener: pause_listener() becomes suspend_listener()
+ - BUG/MEDIUM: listener/proxy: fix listeners notify for proxy resume
+ - BUG/MINOR: sock_unix: match finalname with tempname in sock_unix_addrcmp()
+ - MEDIUM: proto_ux: properly suspend named UNIX listeners
+ - MINOR: proto_ux: ability to dump ABNS names in error messages
+ - MINOR: haproxy: always protocol unbind on startup error path
+ - BUILD: quic: 32-bits compilation issue with %zu in quic_rx_pkts_del()
+ - BUG/MINOR: ring: do not realign ring contents on resize
+ - MEDIUM: ring: make the offset relative to the head/tail instead of absolute
+ - CLEANUP: ring: remove the now unused ring's offset
+ - MINOR: config: add HAPROXY_BRANCH environment variable
+ - BUILD: thead: Fix several 32 bits compilation issues with uint64_t variables
+ - BUG/MEDIUM: fd: avoid infinite loops in fd_add_to_fd_list and fd_rm_from_fd_list
+ - BUG/MEDIUM: h1-htx: Never copy more than the max data allowed during parsing
+ - BUG/MINOR: stream: Remove BUG_ON about the task expiration in process_stream()
+ - MINOR: stream: Handle stream's timeouts in a dedicated function
+ - MEDIUM: stream: Eventually handle stream timeouts when exiting process_stream()
+ - MINOR: stconn: Report a send activity when endpoint is willing to consume data
+ - BUG/MEDIUM: stconn: Report a blocked send if some output data are not consumed
+ - MEDIUM: mux-h1: Don't expect data from server as long as request is unfinished
+ - MEDIUM: mux-h2: Don't expect data from server as long as request is unfinished
+ - MEDIUM: mux-quic: Don't expect data from server as long as request is unfinished
+ - DOC: config: Clarify the meaning of 'hold' in the 'resolvers' section
+ - DOC: config: Replace TABs by spaces
+ - BUG/MINOR: fd: used the update list from the fd's group instead of tgid
+ - BUG/MEDIUM: fd: make fd_delete() support being called from a different group
+ - CLEANUP: listener: only store conn counts for local threads
+ - MINOR: tinfo: make thread_set functions return nth group/mask instead of first
+ - MEDIUM: quic: improve fatal error handling on send
+ - MINOR: quic: consider EBADF as critical on send()
+ - BUG/MEDIUM: connection: Clear flags when a conn is removed from an idle list
+ - BUG/MINOR: mux-h1: Don't report an error on an early response close
+ - BUG/MINOR: http-check: Don't set HTX_SL_F_BODYLESS flag with a log-format body
+ - BUG/MINOR: http-check: Skip C-L header for empty body when it's not mandatory
+ - BUG/MINOR: http-fetch: recognize IPv6 addresses in square brackets in req.hdr_ip()
+ - REGTEST: added tests covering smp_fetch_hdr_ip()
+ - MINOR: quic: simplify return path in send functions
+ - MINOR: quic: implement qc_notify_send()
+ - MINOR: quic: purge txbuf before preparing new packets
+ - MEDIUM: quic: implement poller subscribe on sendto error
+ - MINOR: quic: notify on send ready
+ - BUG/MINOR: http-ana: Don't increment conn_retries counter before the L7 retry
+ - BUG/MINOR: http-ana: Do a L7 retry on read error if there is no response
+ - BUG/MEDIUM: http-ana: Don't close request side when waiting for response
+ - BUG/MINOR: mxu-h1: Report a parsing error on abort with pending data
+ - MINOR: ssl: Destroy ocsp update http_client during cleanup
+ - MINOR: ssl: Reinsert ocsp update entries later in case of unknown error
+ - MINOR: ssl: Add ocsp update success/failure counters
+ - MINOR: ssl: Store specific ocsp update errors in response and update ctx
+ - MINOR: ssl: Add certificate's path to certificate_ocsp structure
+ - MINOR: ssl: Add 'show ssl ocsp-updates' CLI command
+ - MINOR: ssl: Add sample fetches related to OCSP update
+ - MINOR: ssl: Use dedicated proxy and log-format for OCSP update
+ - MINOR: ssl: Reorder struct certificate_ocsp members
+ - MINOR: ssl: Increment OCSP update replay delay in case of failure
+ - MINOR: ssl: Add way to dump ocsp response in base64
+ - MINOR: ssl: Add global options to modify ocsp update min/max delay
+ - REGTESTS: ssl: Fix ocsp update crt-lists
+ - REGTESTS: ssl: Add test for new ocsp update cli commands
+ - MINOR: ssl: Add ocsp-update information to "show ssl crt-list"
+ - BUG/MINOR: ssl: Fix ocsp-update when using "add ssl crt-list"
+ - MINOR: ssl: Replace now.tv_sec with date.tv_sec in ocsp update task
+ - BUG/MINOR: ssl: Use 'date' instead of 'now' in ocsp stapling callback
+ - BUG/MEDIUM: quic: properly handle duplicated STREAM frames
+ - BUG/MINOR: cli: fix CLI handler "set anon global-key" call
+ - MINOR: http_ext: adding some documentation, forgot to inline function
+ - BUG/MINOR: quic: Do not send too small datagrams (with Initial packets)
+ - MINOR: quic: Add a BUG_ON_HOT() call for too small datagrams
+ - BUG/MINOR: quic: Ensure to be able to build datagrams to be retransmitted
+ - BUG/MINOR: quic: v2 Initial packets decryption failed
+ - MINOR: quic: Add traces about QUIC TLS key update
+ - BUG/MINOR: quic: Remove force_ack for Initial,Handshake packets
+ - BUG/MINOR: quic: Ensure not to retransmit packets with no ack-eliciting frames
+ - BUG/MINOR: quic: Do not resend already acked frames
+ - BUG/MINOR: quic: Missing detections of amplification limit reached
+ - MINOR: quic: Send PING frames when probing Initial packet number space
+ - BUG/MEDIUM: quic: do not crash when handling STREAM on released MUX
+ - BUG/MAJOR: fd/thread: fix race between updates and closing FD
+ - BUG/MEDIUM: dns: ensure ring offset is properly reajusted to head
+ - BUG/MINOR: mux-quic: properly init STREAM frame as not duplicated
+ - MINOR: quic: Do not accept wrong active_connection_id_limit values
+ - MINOR: quic: Store the next connection IDs sequence number in the connection
+ - MINOR: quic: Typo fix for ACK_ECN frame
+ - MINOR: quic: RETIRE_CONNECTION_ID frame handling (RX)
+ - MINOR: quic: Useless TLS context allocations in qc_do_rm_hp()
+ - MINOR: quic: Add spin bit support
+ - MINOR: quic: Add transport parameters to "show quic"
+ - BUG/MEDIUM: sink/forwarder: ensure ring offset is properly readjusted to head
+ - BUG/MINOR: dns: fix ring offset calculation on first read
+ - BUG/MINOR: dns: fix ring offset calculation in dns_resolve_send()
+ - MINOR: jwt: Add support for RSA-PSS signatures (PS256 algorithm)
+ - MINOR: h3: add traces on h3_init_uni_stream() error paths
+ - MINOR: quic: create a global list dedicated for closing QUIC conns
+ - MINOR: quic: handle new closing list in show quic
+ - MEDIUM: quic: release closing connections on stopping
+ - BUG/MINOR: quic: Wrong RETIRE_CONNECTION_ID sequence number check
+ - MINOR: fd/cli: report the polling mask in "show fd"
+ - CLEANUP: sock: always perform last connection updates before wakeup
+ - MINOR: quic: Do not stress the peer during retransmissions of lost packets
+ - BUG/MINOR: init: properly detect NUMA bindings on large systems
+ - BUG/MINOR: thread: report thread and group counts in the correct order
+ - BUG/MAJOR: fd/threads: close a race on closing connections after takeover
+ - MINOR: debug: add random delay injection with "debug dev delay-inj"
+ - BUG/MINOR: mworker: use MASTER_MAXCONN as default maxconn value
+ - BUG/MINOR: quic: Missing listener accept queue tasklet wakeups
+ - MINOR: quic_sock: un-statify quic_conn_sock_fd_iocb()
+ - DOC: config: fix typo "dependeing" in bind thread description
+ - DOC/CLEANUP: fix typos
+
+2023/02/14 : 2.8-dev4
+ - BUG/MINOR: stats: fix source buffer size for http dump
+ - BUG/MEDIUM: stats: fix resolvers dump
+ - BUG/MINOR: stats: fix ctx->field update in stats_dump_proxy_to_buffer()
+ - BUG/MINOR: stats: fix show stats field ctx for servers
+ - BUG/MINOR: stats: fix STAT_STARTED behavior with full htx
+ - MINOR: quic: Update version_information transport parameter to draft-14
+ - BUG/MINOR: stats: Prevent HTTP "other sessions" counter underflows
+ - BUG/MEDIUM: thread: fix extraneous shift in the thread_set parser
+ - BUG/MEDIUM: listener/thread: bypass shards setting on failed thread resolution
+ - BUG/MINOR: ssl/crt-list: warn when a line is malformated
+ - BUG/MEDIUM: stick-table: do not leave entries in end of window during purge
+ - BUG/MINOR: clock: do not mix wall-clock and monotonic time in uptime calculation
+ - BUG/MEDIUM: cache: use the correct time reference when comparing dates
+ - MEDIUM: clock: force internal time to wrap early after boot
+ - BUILD: ssl/ocsp: ssl_ocsp-t.h depends on ssl_sock-t.h
+ - MINOR: ssl/ocsp: add a function to check the OCSP update configuration
+ - MINOR: cfgparse/server: move (min/max)conn postparsing logic into dedicated function
+ - BUG/MINOR: server/add: ensure minconn/maxconn consistency when adding server
+ - BUG/MEDIUM: stconn: Schedule a shutw on shutr if data must be sent first
+ - BUG/MEDIUM: quic: fix crash when "option nolinger" is set in the frontend
+ - MINOR: quic: implement a basic "show quic" CLI handler
+ - MINOR: quic: display CIDs and state in "show quic"
+ - MINOR: quic: display socket info on "show quic"
+ - MINOR: quic: display infos about various encryption level on "show quic"
+ - MINOR: quic: display Tx stream info on "show quic"
+ - MINOR: quic: filter closing conn on "show quic"
+ - BUG/MINOR: quic: fix filtering of closing connections on "show quic"
+ - BUG/MEDIUM: stconn: Don't needlessly wake the stream on send during fast-forward
+ - BUG/MINOR: quic: fix type bug on "show quic" for 32-bits arch
+ - BUG/MINOR: mworker: fix uptime for master process
+ - BUG/MINOR: clock/stats: also use start_time not start_date in HTML info
+ - BUG/MEDIUM: stconn: stop to enable/disable reads from streams via si_update_rx
+ - BUG/MEDIUM: quic: Buffer overflow when looking through QUIC CLI keyword list
+ - DOC: proxy-protocol: fix wrong byte in provided example
+ - MINOR: ssl-ckch: Stop to test CF_WRITE_ERROR to commit CA/CRL file
+ - MINOR: bwlim: Remove useless test on CF_READ_ERROR to detect the last packet
+ - BUG/MINOR: http-ana: Fix condition to set LAST termination flag
+ - BUG/MINOR: mux-h1: Don't report an H1C error on client timeout
+ - BUG/MEDIUM: spoe: Don't set the default traget for the SPOE agent frontend
+ - BUG/MINOR: quic: Wrong datagram dispatch because of qc_check_dcid()
+ - BUG/CRITICAL: http: properly reject empty http header field names
+
+2023/02/04 : 2.8-dev3
+ - BUG/MINOR: sink: make sure to always properly unmap a file-backed ring
+ - DEV: haring: add a new option "-r" to automatically repair broken files
+ - BUG/MINOR: ssl: Fix leaks in 'update ssl ocsp-response' CLI command
+ - MINOR: ssl: Remove debug fprintf in 'update ssl ocsp-response' cli command
+ - MINOR: connection: add a BUG_ON() to detect destroying connection in idle list
+ - MINOR: mux-quic/h3: send SETTINGS as soon as transport is ready
+ - BUG/MINOR: h3: fix GOAWAY emission
+ - BUG/MEDIUM: mux-quic: fix crash on H3 SETTINGS emission
+ - BUG/MEDIUM: hpack: fix incorrect huffman decoding of some control chars
+ - BUG/MINOR: log: release global log servers on exit
+ - BUG/MINOR: ring: release the backing store name on exit
+ - BUG/MINOR: sink: free the forwarding task on exit
+ - CLEANUP: trace: remove the QUIC-specific ifdefs
+ - MINOR: trace: add a TRACE_ENABLED() macro to determine if a trace is active
+ - MINOR: trace: add a trace_no_cb() dummy callback for when to use no callback
+ - MINOR: trace: add the long awaited TRACE_PRINTF()
+ - MINOR: h2: add h2_phdr_to_ist() to make ISTs from pseudo headers
+ - MEDIUM: mux-h2/trace: add tracing support for headers
+ - CLEANUP: mux-h2/trace: shorten the name of the header enc/dec functions
+ - DEV: hpack: fix `trash` build regression
+ - MINOR: http_htx: add http_append_header() to append value to header
+ - MINOR: http_htx: add http_prepend_header() to prepend value to header
+ - MINOR: sample: add ARGC_OPT
+ - MINOR: proxy: introduce http only options
+ - MINOR: proxy/http_ext: introduce proxy forwarded option
+ - REGTEST: add ifnone-forwardfor test
+ - MINOR: proxy: move 'forwardfor' option to http_ext
+ - MINOR: proxy: move 'originalto' option to http_ext
+ - MINOR: http_ext: introduce http ext converters
+ - MINOR: http_ext: add rfc7239_is_valid converter
+ - MINOR: http_ext: add rfc7239_field converter
+ - MINOR: http_ext: add rfc7239_n2nn converter
+ - MINOR: http_ext: add rfc7239_n2np converter
+ - REGTEST: add RFC7239 forwarded header tests
+ - OPTIM: http_ext/7239: introduce c_mode to save some space
+ - MINOR: http_ext/7239: warn the user when fetch is not available
+ - MEDIUM: proxy/http_ext: implement dynamic http_ext
+ - MINOR: cfgparse/http_ext: move post-parsing http_ext steps to http_ext
+ - DOC: config: fix option spop-check proxy compatibility
+ - BUG/MINOR: fcgi-app: prevent 'use-fcgi-app' in default section
+ - DOC: config: 'http-send-name-header' option may be used in default section
+ - BUG/MINOR: mux-h2: Fix possible null pointer deref on h2c in _h2_trace_header()
+ - BUG/MINOR: http_ext/7239: ipv6 dumping relies on out of scope variables
+ - BUG/MEDIUM: h3: do not crash if no buf space for trailers
+ - OPTIM: h3: skip buf realign if no trailer to encode
+ - MINOR: mux-quic/h3: define stream close callback
+ - BUG/MEDIUM: h3: handle STOP_SENDING on control stream
+ - BUG/MINOR: h3: reject RESET_STREAM received for control stream
+ - MINOR: h3: add missing traces on closure
+ - BUG/MEDIUM: ssl: wrong eviction from the session cache tree
+ - BUG/MINOR: h3: fix crash due to h3 traces
+ - BUG/MINOR: h3: fix crash due to h3 traces
+ - BUG/MEDIUM: thread: consider secondary threads as idle+harmless during boot
+ - BUG/MINOR: stats: use proper buffer size for http dump
+ - BUILD: makefile: fix PCRE overriding specific lib path
+ - MINOR: quic: remove fin from quic_stream frame type
+ - MINOR: quic: ensure offset is properly set for STREAM frames
+ - MINOR: quic: define new functions for frame alloc
+ - MINOR: quic: refactor frame deallocation
+ - MEDIUM: quic: implement a retransmit limit per frame
+ - MINOR: quic: add config for retransmit limit
+ - OPTIM: htx: inline the most common memcpy(8)
+ - CLEANUP: quic: no need for atomics on packet refcnt
+ - MINOR: stats: add by HTTP version cumulated number of sessions and requests
+ - BUG/MINOR: quic: Possible stream truncations under heavy loss
+ - BUG/MINOR: quic: Too big PTO during handshakes
+ - MINOR: quic: Add a trace about variable states in qc_prep_fast_retrans()
+ - BUG/MINOR: quic: Do not ignore coalesced packets in qc_prep_fast_retrans()
+ - MINOR: quic: When probing Handshake packet number space, also probe the Initial one
+ - BUG/MAJOR: quic: Possible crash when processing 1-RTT during 0-RTT session
+ - MEDIUM: quic: Remove qc_conn_finalize() from the ClientHello TLS callbacks
+ - BUG/MINOR: quic: Unchecked source connection ID
+ - MEDIUM: listener: move the analysers mask to the bind_conf
+ - MINOR: listener: move maxseg and tcp_ut to bind_conf
+ - MINOR: listener: move maxaccept from listener to bind_conf
+ - MINOR: listener: move the backlog setting from listener to bind_conf
+ - MINOR: listener: move the maxconn parameter to the bind_conf
+ - MINOR: listener: move the ->accept callback to the bind_conf
+ - MINOR: listener: remove the useless ->default_target field
+ - MINOR: listener: move the nice field to the bind_conf
+ - MINOR: listener: move the NOLINGER option to the bind_conf
+ - MINOR: listener: move the NOQUICKACK option to the bind_conf
+ - MINOR: listener: move the DEF_ACCEPT option to the bind_conf
+ - MINOR: listener: move TCP_FO to bind_conf
+ - MINOR: listener: move the ACC_PROXY and ACC_CIP options to bind_conf
+ - MINOR: listener: move LI_O_UNLIMITED and LI_O_NOSTOP to bind_conf
+ - MINOR: listener: get rid of LI_O_TCP_L4_RULES and LI_O_TCP_L5_RULES
+ - CLEANUP: listener: remove the now unused options field
+ - MINOR: listener: remove the now useless LI_F_QUIC_LISTENER flag
+ - CLEANUP: config: remove test for impossible case regarding bind thread mask
+ - MINOR: thread: add a simple thread_set API
+ - MEDIUM: listener/config: make the "thread" parser rely on thread_sets
+ - CLEANUP: config: stop using bind_tgroup and bind_thread
+ - CLEANUP: listener/thread: remove now unused bind_conf's bind_tgroup/bind_thread
+ - CLEANUP: listener/config: remove the special case for shards==1
+ - MEDIUM: config: restrict shards, not bind_conf to one group each
+ - BUG/MEDIUM: quic: do not split STREAM frames if no space
+ - BUILD: thread: fix build warnings with older gcc compilers
+
+2023/01/22 : 2.8-dev2
+ - CLEANUP: htx: fix a typo in an error message of http_str_to_htx
+ - DOC: config: added optional rst-ttl argument to silent-drop in action lists
+ - BUG/MINOR: ssl: Fix crash in 'update ssl ocsp-response' CLI command
+ - BUG/MINOR: ssl: Crash during cleanup because of ocsp structure pointer UAF
+ - MINOR: ssl: Create temp X509_STORE filled with cert chain when checking ocsp response
+ - MINOR: ssl: Only set ocsp->issuer if issuer not in cert chain
+ - MINOR: ssl: Release ssl_ocsp_task_ctx.cur_ocsp when destroying task
+ - MINOR: ssl: Detect more OCSP update inconsistencies
+ - BUG/MINOR: ssl: Fix OCSP_CERTID leak when same certificate is used multiple times
+ - MINOR: ssl: Limit ocsp_uri buffer size to minimum
+ - MINOR: ssl: Remove mention of ckch_store in error message of cli command
+ - MINOR: channel: Don't test CF_READ_NULL while CF_SHUTR is enough
+ - REORG: channel: Rename CF_READ_NULL to CF_READ_EVENT
+ - REORG: channel: Rename CF_WRITE_NULL to CF_WRITE_EVENT
+ - MEDIUM: channel: Use CF_READ_EVENT instead of CF_READ_PARTIAL
+ - MEDIUM: channel: Use CF_WRITE_EVENT instead of CF_WRITE_PARTIAL
+ - MINOR: channel: Remove CF_READ_ACTIVITY
+ - MINOR: channel: Remove CF_WRITE_ACTIVITY
+ - MINOR: channel: Remove CF_ANA_TIMEOUT and report CF_READ_EVENT instead
+ - MEDIUM: channel: Remove CF_READ_ATTACHED and report CF_READ_EVENT instead
+ - MINOR: channel: Stop to test CF_READ_ERROR flag if CF_SHUTR is enough
+ - MINOR: channel/applets: Stop to test CF_WRITE_ERROR flag if CF_SHUTW is enough
+ - DOC: management: add details on "Used" status
+ - DOC: management: add details about @system-ca in "show ssl ca-file"
+ - BUG/MINOR: mux-quic: fix transfer of empty HTTP response
+ - MINOR: mux-quic: add traces for flow-control limit reach
+ - MAJOR: mux-quic: rework stream sending priorization
+ - MEDIUM: h3: send SETTINGS before STREAM frames
+ - MINOR: mux-quic: use send-list for STOP_SENDING/RESET_STREAM emission
+ - MINOR: mux-quic: use send-list for immediate sending retry
+ - BUG/MINOR: h1-htx: Remove flags about protocol upgrade on non-101 responses
+ - BUG/MINOR: hlua: Fix Channel.line and Channel.data behavior regarding the doc
+ - BUG/MINOR: resolvers: Wait the resolution execution for a do_resolv action
+ - BUG/MINOR: ssl: Remove unneeded pointer check in ocsp cli release function
+ - BUG/MINOR: ssl: Missing ssl_conf pointer check when checking ocsp update inconsistencies
+ - DEV: tcploop: add minimal support for unix sockets
+ - BUG/MEDIUM: listener: duplicate inherited FDs if needed
+ - BUG/MINOR: ssl: OCSP minimum update threshold not properly set
+ - MINOR: ssl: Treat ocsp-update inconsistencies as fatal errors
+ - MINOR: ssl: Do not wake ocsp update task if update tree empty
+ - MINOR: ssl: Reinsert updated ocsp response later in tree in case of http error
+ - REGTEST: ssl: Add test for 'update ssl ocsp-response' CLI command
+ - OPTIM: global: move byte counts out of global and per-thread
+ - BUG/MEDIUM: peers: make "show peers" more careful about partial initialization
+ - BUG/MINOR: promex: Don't forget to consume the request on error
+ - MINOR: http-ana: Add a function to set HTTP termination flags
+ - MINOR: http-ana: Use http_set_term_flags() in most of HTTP analyzers
+ - BUG/MINOR: http-ana: Report SF_FINST_R flag on error waiting the request body
+ - MINOR: http-ana: Use http_set_term_flags() when waiting the request body
+ - BUG/MINOR: http-fetch: Don't block HTTP sample fetch eval in HTTP_MSG_ERROR state
+ - MAJOR: http-ana: Review error handling during HTTP payload forwarding
+ - CLEANUP: http-ana: Remove HTTP_MSG_ERROR state
+ - BUG/MEDIUM: mux-h2: Don't send CANCEL on shutw when response length is unkown
+ - MINOR: htx: Add an HTX value for the extra field is payload length is unknown
+ - BUG/MINOR: http-ana: make set-status also update txn->status
+ - BUG/MINOR: listeners: fix suspend/resume of inherited FDs
+ - DOC: config: fix wrong section number for "protocol prefixes"
+ - DOC: config: fix aliases for protocol prefixes "udp4@" and "udp6@"
+ - DOC: config: mention the missing "quic4@" and "quic6@" in protocol prefixes
+ - MINOR: listener: also support "quic+" as an address prefix
+ - CLEANUP: stconn: always use se_fl_set_error() to set the pending error
+ - BUG/MEDIUM: stconn: also consider SE_FL_EOI to switch to SE_FL_ERROR
+ - MINOR: quic: Useless test about datagram destination addresses
+ - MINOR: quic: Disable the active connection migrations
+ - MINOR: quic: Add "no-quic" global option
+ - MINOR: sample: Add "quic_enabled" sample fetch
+ - MINOR: quic: Replace v2 draft definitions by those of the final 2 version
+ - BUG/MINOR: mux-fcgi: Correctly set pathinfo
+ - DOC: config: fix "Address formats" chapter syntax
+ - BUG/MEDIUM: jwt: Properly process ecdsa signatures (concatenated R and S params)
+ - BUILD: ssl: add ECDSA_SIG_set0() for openssl < 1.1 or libressl < 2.7
+ - Revert "BUILD: ssl: add ECDSA_SIG_set0() for openssl < 1.1 or libressl < 2.7"
+ - BUG/MINOR: ssl: Fix compilation with OpenSSL 1.0.2 (missing ECDSA_SIG_set0)
+ - BUG/MINOR: listener: close tiny race between resume_listener() and stopping
+ - BUG/MINOR: h3: properly handle connection headers
+ - MINOR: h3: extend function for QUIC varint encoding
+ - MINOR: h3: implement TRAILERS encoding
+ - BUG/MINOR: bwlim: Check scope for period expr for set-bandwitdh-limit actions
+ - MEDIUM: bwlim: Support constants limit or period on set-bandwidth-limit actions
+ - BUG/MINOR: bwlim: Fix parameters check for set-bandwidth-limit actions
+ - MINOR: h3: implement TRAILERS decoding
+ - BUG/MEDIUM: fd/threads: fix again incorrect thread selection in wakeup broadcast
+ - BUG/MINOR: thread: always reload threads_enabled in loops
+ - MINOR: threads: add a thread_harmless_end() version that doesn't wait
+ - BUG/MEDIUM: debug/thread: make the debug handler not wait for !rdv_requests
+ - BUG/MINOR: mux-h2: make sure to produce a log on invalid requests
+ - BUG/MINOR: mux-h2: add missing traces on failed headers decoding
+ - BUILD: hpack: include global.h for the trash that is needed in debug mode
+ - BUG/MINOR: jwt: Wrong return value checked
+ - BUG/MINOR: quic: Do not request h3 clients to close its unidirection streams
+ - MEDIUM: quic-sock: fix udp source address for send on listener socket
+
+2023/01/07 : 2.8-dev1
+ - MEDIUM: 51d: add support for 51Degrees V4 with Hash algorithm
+ - MINOR: debug: support pool filtering on "debug dev memstats"
+ - MINOR: debug: add a balance of alloc - free at the end of the memstats dump
+ - LICENSE: wurfl: clarify the dummy library license.
+ - MINOR: event_hdl: add event handler base api
+ - DOC/MINOR: api: add documentation for event_hdl feature
+ - MEDIUM: ssl: rename the struct "cert_key_and_chain" to "ckch_data"
+ - MINOR: quic: remove qc from quic_rx_packet
+ - MINOR: quic: complete traces in qc_rx_pkt_handle()
+ - MINOR: quic: extract datagram parsing code
+ - MINOR: tools: add port for ipcmp as optional criteria
+ - MINOR: quic: detect connection migration
+ - MINOR: quic: ignore address migration during handshake
+ - MINOR: quic: startup detect for quic-conn owned socket support
+ - MINOR: quic: test IP_PKTINFO support for quic-conn owned socket
+ - MINOR: quic: define config option for socket per conn
+ - MINOR: quic: allocate a socket per quic-conn
+ - MINOR: quic: use connection socket for emission
+ - MEDIUM: quic: use quic-conn socket for reception
+ - MEDIUM: quic: move receive out of FD handler to quic-conn io-cb
+ - MINOR: mux-quic: rename duplicate function names
+ - MEDIUM: quic: requeue datagrams received on wrong socket
+ - MINOR: quic: reconnect quic-conn socket on address migration
+ - MINOR: quic: activate socket per conn by default
+ - BUG/MINOR: ssl: initialize SSL error before parsing
+ - BUG/MINOR: ssl: initialize WolfSSL before parsing
+ - BUG/MINOR: quic: fix fd leak on startup check quic-conn owned socket
+ - BUG/MEDIIM: stconn: Flush output data before forwarding close to write side
+ - MINOR: server: add srv->rid (revision id) value
+ - MINOR: stats: add server revision id support
+ - MINOR: server/event_hdl: add support for SERVER_ADD and SERVER_DEL events
+ - MINOR: server/event_hdl: add support for SERVER_UP and SERVER_DOWN events
+ - BUG/MEDIUM: checks: do not reschedule a possibly running task on state change
+ - BUG/MINOR: checks: make sure fastinter is used even on forced transitions
+ - CLEANUP: assorted typo fixes in the code and comments
+ - MINOR: mworker: display an alert upon a wait-mode exit
+ - BUG/MEDIUM: mworker: fix segv in early failure of mworker mode with peers
+ - BUG/MEDIUM: mworker: create the mcli_reload socketpairs in case of upgrade
+ - BUG/MINOR: checks: restore legacy on-error fastinter behavior
+ - MINOR: check: use atomic for s->consecutive_errors
+ - MINOR: stats: properly handle ST_F_CHECK_DURATION metric
+ - MINOR: mworker: remove unused legacy code in mworker_cleanlisteners
+ - MINOR: peers: unused code path in process_peer_sync
+ - BUG/MINOR: init/threads: continue to limit default thread count to max per group
+ - CLEANUP: init: remove useless assignment of nbthread
+ - BUILD: atomic: atomic.h may need compiler.h on ARMv8.2-a
+ - BUILD: makefile/da: also clean Os/ in Device Atlas dummy lib dir
+ - BUG/MEDIUM: httpclient/lua: double LIST_DELETE on end of lua task
+ - CLEANUP: pools: move the write before free to the uaf-only function
+ - CLEANUP: pool: only include pool-os from pool.c not pool.h
+ - REORG: pool: move all the OS specific code to pool-os.h
+ - CLEANUP: pools: get rid of CONFIG_HAP_POOLS
+ - DEBUG: pool: show a few examples in -dMhelp
+ - MINOR: pools: make DEBUG_UAF a runtime setting
+ - BUG/MINOR: promex: create haproxy_backend_agg_server_status
+ - MINOR: promex: introduce haproxy_backend_agg_check_status
+ - DOC: promex: Add missing backend metrics
+ - BUG/MAJOR: fcgi: Fix uninitialized reserved bytes
+ - REGTESTS: fix the race conditions in iff.vtc
+ - CI: github: reintroduce openssl 1.1.1
+ - BUG/MINOR: quic: properly handle alloc failure in qc_new_conn()
+ - BUG/MINOR: quic: handle alloc failure on qc_new_conn() for owned socket
+ - CLEANUP: mux-quic: remove unused attribute on qcs_is_close_remote()
+ - BUG/MINOR: mux-quic: remove qcs from opening-list on free
+ - BUG/MINOR: mux-quic: handle properly alloc error in qcs_new()
+ - CI: github: split ssl lib selection based on git branch
+ - REGTESTS: startup: check maxconn computation
+ - BUG/MINOR: startup: don't use internal proxies to compute the maxconn
+ - REGTESTS: startup: change the expected maxconn to 11000
+ - CI: github: set ulimit -n to a greater value
+ - REGTESTS: startup: activate automatic_maxconn.vtc
+ - MINOR: sample: add param converter
+ - CLEANUP: ssl: remove check on srv->proxy
+ - BUG/MEDIUM: freq-ctr: Don't compute overshoot value for empty counters
+ - BUG/MEDIUM: resolvers: Use tick_first() to update the resolvers task timeout
+ - REGTESTS: startup: add alternatives values in automatic_maxconn.vtc
+ - BUG/MEDIUM: h3: reject request with invalid header name
+ - BUG/MEDIUM: h3: reject request with invalid pseudo header
+ - MINOR: http: extract content-length parsing from H2
+ - BUG/MEDIUM: h3: parse content-length and reject invalid messages
+ - CI: github: remove redundant ASAN loop
+ - CI: github: split matrix for development and stable branches
+ - BUG/MEDIUM: mux-h1: Don't release H1 stream upgraded from TCP on error
+ - BUG/MINOR: mux-h1: Fix test instead a BUG_ON() in h1_send_error()
+ - MINOR: http-htx: add BUG_ON to prevent API error on http_cookie_register
+ - BUG/MEDIUM: h3: fix cookie header parsing
+ - BUG/MINOR: h3: fix memleak on HEADERS parsing failure
+ - MINOR: h3: check return values of htx_add_* on headers parsing
+ - MINOR: ssl: Remove unneeded buffer allocation in show ocsp-response
+ - MINOR: ssl: Remove unnecessary alloc'ed trash chunk in show ocsp-response
+ - BUG/MINOR: ssl: Fix memory leak of find_chain in ssl_sock_load_cert_chain
+ - MINOR: stats: provide ctx for dumping functions
+ - MINOR: stats: introduce stats field ctx
+ - BUG/MINOR: stats: fix show stat json buffer limitation
+ - MINOR: stats: make show info json future-proof
+ - BUG/MINOR: quic: fix crash on PTO rearm if anti-amplification reset
+ - BUILD: 51d: fix build issue with recent compilers
+ - REGTESTS: startup: disable automatic_maxconn.vtc
+ - BUILD: peers: peers-t.h depends on stick-table-t.h
+ - BUG/MEDIUM: tests: use tmpdir to create UNIX socket
+ - BUG/MINOR: mux-h1: Report EOS on parsing/internal error for not running stream
+ - BUG/MINOR:: mux-h1: Never handle error at mux level for running connection
+ - BUG/MEDIUM: stats: Rely on a local trash buffer to dump the stats
+ - OPTIM: pool: split the read_mostly from read_write parts in pool_head
+ - MINOR: pool: make the thread-local hot cache size configurable
+ - MINOR: freq_ctr: add opportunistic versions of swrate_add()
+ - MINOR: pool: only use opportunistic versions of the swrate_add() functions
+ - REGTESTS: ssl: enable the ssl_reuse.vtc test for WolfSSL
+ - BUG/MEDIUM: mux-quic: fix double delete from qcc.opening_list
+ - BUG/MEDIUM: quic: properly take shards into account on bind lines
+ - BUG/MINOR: quic: do not allocate more rxbufs than necessary
+ - MINOR: ssl: Add a lock to the OCSP response tree
+ - MINOR: httpclient: Make the CLI flags public for future use
+ - MINOR: ssl: Add helper function that extracts an OCSP URI from a certificate
+ - MINOR: ssl: Add OCSP request helper function
+ - MINOR: ssl: Add helper function that checks the validity of an OCSP response
+ - MINOR: ssl: Add "update ssl ocsp-response" cli command
+ - MEDIUM: ssl: Add ocsp_certid in ckch structure and discard ocsp buffer early
+ - MINOR: ssl: Add ocsp_update_tree and helper functions
+ - MINOR: ssl: Add crt-list ocsp-update option
+ - MINOR: ssl: Store 'ocsp-update' mode in the ckch_data and check for inconsistencies
+ - MEDIUM: ssl: Insert ocsp responses in update tree when needed
+ - MEDIUM: ssl: Add ocsp update task main function
+ - MEDIUM: ssl: Start update task if at least one ocsp-update option is set to on
+ - DOC: ssl: Add documentation for ocsp-update option
+ - REGTESTS: ssl: Add tests for ocsp auto update mechanism
+ - MINOR: ssl: Move OCSP code to a dedicated source file
+ - BUG/MINOR: ssl/ocsp: check chunk_strcpy() in ssl_ocsp_get_uri_from_cert()
+ - CLEANUP: ssl/ocsp: add spaces around operators
+ - BUG/MEDIUM: mux-h2: Refuse interim responses with end-stream flag set
+ - BUG/MINOR: pool/stats: Use ullong to report total pool usage in bytes in stats
+ - BUG/MINOR: ssl/ocsp: httpclient blocked when doing a GET
+ - MINOR: httpclient: don't add body when istlen is empty
+ - MEDIUM: httpclient: change the default log format to skip duplicate proxy data
+ - BUG/MINOR: httpclient/log: free of invalid ptr with httpclient_log_format
+ - MEDIUM: mux-quic: implement shutw
+ - MINOR: mux-quic: do not count stream flow-control if already closed
+ - MINOR: mux-quic: handle RESET_STREAM reception
+ - MEDIUM: mux-quic: implement STOP_SENDING emission
+ - MINOR: h3: use stream error when needed instead of connection
+ - CI: github: enable github api authentication for OpenSSL tags read
+ - BUG/MINOR: mux-quic: ignore remote unidirectional stream close
+ - CI: github: use the GITHUB_TOKEN instead of a manually generated token
+ - BUILD: makefile: build the features list dynamically
+ - BUILD: makefile: move common options-oriented macros to include/make/options.mk
+ - BUILD: makefile: sort the features list
+ - BUILD: makefile: initialize all build options' variables at once
+ - BUILD: makefile: add a function to collect all options' CFLAGS/LDFLAGS
+ - BUILD: makefile: start to automatically collect CFLAGS/LDFLAGS
+ - BUILD: makefile: ensure that all USE_* handlers appear before CFLAGS are used
+ - BUILD: makefile: clean the wolfssl include and lib generation rules
+ - BUILD: makefile: make sure to also ignore SSL_INC when using wolfssl
+ - BUILD: makefile: reference libdl only once
+ - BUILD: makefile: make sure LUA_INC and LUA_LIB are always initialized
+ - BUILD: makefile: do not restrict Lua's prepend path to empty LUA_LIB_NAME
+ - BUILD: makefile: never force -latomic, set USE_LIBATOMIC instead
+ - BUILD: makefile: add an implicit USE_MATH variable for -lm
+ - BUILD: makefile: properly report USE_PCRE/USE_PCRE2 in features
+ - CLEANUP: makefile: properly indent ifeq/ifneq conditional blocks
+ - BUILD: makefile: rework 51D to split v3/v4
+ - BUILD: makefile: support LIBCRYPT_LDFLAGS
+ - BUILD: makefile: support RT_LDFLAGS
+ - BUILD: makefile: support THREAD_LDFLAGS
+ - BUILD: makefile: support BACKTRACE_LDFLAGS
+ - BUILD: makefile: support SYSTEMD_LDFLAGS
+ - BUILD: makefile: support ZLIB_CFLAGS and ZLIB_LDFLAGS
+ - BUILD: makefile: support ENGINE_CFLAGS
+ - BUILD: makefile: support OPENSSL_CFLAGS and OPENSSL_LDFLAGS
+ - BUILD: makefile: support WOLFSSL_CFLAGS and WOLFSSL_LDFLAGS
+ - BUILD: makefile: support LUA_CFLAGS and LUA_LDFLAGS
+ - BUILD: makefile: support DEVICEATLAS_CFLAGS and DEVICEATLAS_LDFLAGS
+ - BUILD: makefile: support PCRE[2]_CFLAGS and PCRE[2]_LDFLAGS
+ - BUILD: makefile: refactor support for 51DEGREES v3/v4
+ - BUILD: makefile: support WURFL_CFLAGS and WURFL_LDFLAGS
+ - BUILD: makefile: make all OpenSSL variants use the same settings
+ - BUILD: makefile: remove the special case of the SSL option
+ - BUILD: makefile: only consider settings from enabled options
+ - BUILD: makefile: also list per-option settings in 'make opts'
+ - BUG/MINOR: debug: don't mask the TH_FL_STUCK flag before dumping threads
+ - MINOR: cfgparse-ssl: avoid a possible crash on OOM in ssl_bind_parse_npn()
+ - BUG/MINOR: ssl: Missing goto in error path in ocsp update code
+ - BUG/MINOR: stick-table: report the correct action name in error message
+ - CI: Improve headline in matrix.py
+ - CI: Add in-memory cache for the latest OpenSSL/LibreSSL
+ - CI: Use proper `if` blocks instead of conditional expressions in matrix.py
+ - CI: Unify the `GITHUB_TOKEN` name across matrix.py and vtest.yml
+ - CI: Explicitly check environment variable against `None` in matrix.py
+ - CI: Reformat `matrix.py` using `black`
+ - MINOR: config: add environment variables for default log format
+ - REGTESTS: Remove REQUIRE_VERSION=1.9 from all tests
+ - REGTESTS: Remove REQUIRE_VERSION=2.0 from all tests
+ - REGTESTS: Remove tests with REQUIRE_VERSION_BELOW=1.9
+ - BUG/MINOR: http-fetch: Only fill txn status during prefetch if not already set
+ - BUG/MAJOR: buf: Fix copy of wrapping output data when a buffer is realigned
+ - DOC: config: fix alphabetical ordering of http-after-response rules
+ - MINOR: http-rules: Add missing actions in http-after-response ruleset
+ - DOC: config: remove duplicated "http-response sc-set-gpt0" directive
+ - BUG/MINOR: proxy: free orgto_hdr_name in free_proxy()
+ - REGTEST: fix the race conditions in json_query.vtc
+ - REGTEST: fix the race conditions in add_item.vtc
+ - REGTEST: fix the race conditions in digest.vtc
+ - REGTEST: fix the race conditions in hmac.vtc
+ - BUG/MINOR: fd: avoid bad tgid assertion in fd_delete() from deinit()
+ - BUG/MINOR: http: Memory leak of http redirect rules' format string
+ - MEDIUM: stick-table: set the track-sc limit at boottime via tune.stick-counters
+ - MINOR: stick-table: implement the sc-add-gpc() action
+
+2022/12/01 : 2.8-dev0
+ - MINOR: version: mention that it's development again
+
+2022/12/01 : 2.7.0
+ - MINOR: ssl: forgotten newline in error messages on ca-file
+ - BUG/MINOR: ssl: shut the ca-file errors emitted during httpclient init
+ - DOC: config: provide some configuration hints for "http-reuse"
+ - DOC: config: refer to section about quoting in the "add_item" converter
+ - DOC: halog: explain how to use -ac and -ad in the help message
+ - DOC: config: clarify the fact that SNI should not be used in HTTP scenarios
+ - DOC: config: mention that a single monitor-uri rule is supported
+ - DOC: config: explain how default matching method for ACL works
+ - DOC: config: clarify the fact that "retries" is not just for connections
+ - BUILD: halog: fix missing double-quote at end of help line
+ - DOC: config: clarify the -m dir and -m dom pattern matching methods
+ - MINOR: activity: report uptime in "show activity"
+ - REORG: activity/cli: move the "show activity" handler to activity.c
+ - DEV: poll: add support for epoll
+ - DEV: tcploop: centralize the polling code into wait_for_fd()
+ - DEV: tcploop: add support for POLLRDHUP when supported
+ - DEV: tcploop: do not report an error on POLLERR
+ - DEV: tcploop: add optional support for epoll
+ - SCRIPTS: announce-release: add a link to the data plane API
+ - CLEANUP: stick-table: fill alignment holes in the stktable struct
+ - MINOR: stick-table: store a per-table hash seed and use it
+ - MINOR: stick-table: show the shard number in each entry's "show table" output
+ - CLEANUP: ncbuf: remove ncb_blk args by value
+ - CLEANUP: ncbuf: inline small functions
+ - CLEANUP: ncbuf: use standard BUG_ON with DEBUG_STRICT
+ - BUG/MINOR: quic: Endless loop during retransmissions
+ - MINOR: mux-h2: add the expire task and its expiration date in "show fd"
+ - BUG/MINOR: peers: always initialize the stksess shard value
+ - REGTESTS: fix peers-related regtests regarding "show table"
+ - BUG/MEDIUM: mux-h1: Close client H1C on EOS when there is no output data
+ - MINOR: stick-table: change the API of the function used to calculate the shard
+ - CLEANUP: peers: factor out the key len calculation in received updates
+ - BUG/MINOR: peers: always update the stksess shard number on incoming updates
+ - CLEANUP: assorted typo fixes in the code and comments
+ - MINOR: mux-h1: add the expire task and its expiration date in "show fd"
+ - MINOR: debug: improve error handling on the memstats command parser
+ - BUILD: quic: allow build with USE_QUIC and USE_OPENSSL_WOLFSSL
+ - CLEANUP: anon: clarify the help message on "debug dev hash"
+ - MINOR: debug: relax access restrictions on "debug dev hash" and "memstats"
+ - SCRIPTS: run-regtests: add a version check
+ - MINOR: version: mention that it's stable now
+
+2022/11/24 : 2.7-dev10
+ - MEDIUM: tcp-act: add parameter rst-ttl to silent-drop
+ - BUG/MAJOR: quic: Crash upon retransmission of dgrams with several packets
+ - MINOR: cli: print parsed command when not found
+ - BUG/MAJOR: quic: Crash after discarding packet number spaces
+ - CLEANUP: quic: replace "choosen" with "chosen" all over the code
+ - MINOR: cli/pools: store "show pools" results into a temporary array
+ - MINOR: cli/pools: add sorting capabilities to "show pools"
+ - MINOR: cli/pools: add pool name filtering capability to "show pools"
+ - DOC: configuration: fix quic prefix typo
+ - MINOR: quic: report error if force-retry without cluster-secret
+ - MINOR: global: generate random cluster.secret if not defined
+ - BUG/MINOR: resolvers: do not run the timeout task when there's no resolution
+ - BUG/MINOR: server/idle: at least use atomic stores when updating max_used_conns
+ - MINOR: server/idle: make the next_takeover index per-tgroup
+ - BUILD: listener: fix build warning on global_listener_rwlock without threads
+ - BUG/MAJOR: sched: protect task during removal from wait queue
+ - BUILD: sched: fix build with DEBUG_THREAD with the previous commit
+ - DOC: quic: add note on performance issue with listener contention
+ - BUG/MINOR: cfgparse-listen: fix ebpt_next_dup pointer dereference on proxy "from" inheritance
+ - BUG/MINOR: log: fix parse_log_message rfc5424 size check
+ - CLEANUP: arg: remove extra check in make_arg_list arg escaping
+ - CLEANUP: tools: extra check in utoa_pad
+ - MINOR: h1: Consider empty port as invalid in authority for CONNECT
+ - MINOR: http: Considere empty ports as valid default ports
+ - BUG/MINOR: http-htx: Normalized absolute URIs with an empty port
+ - BUG/MINOR: h1: Replace authority validation to conform RFC3986
+ - REG-TESTS: http: Add more tests about authority/host matching
+ - BUG/MINOR: http-htx: Don't consider an URI as normalized after a set-uri action
+ - BUG/MEDIUM: mux-h1: Don't release H1C on timeout if there is a SC attached
+ - BUG/MEDIUM: mux-h1: Subscribe for reads on error on sending path
+ - BUILD: http-htx: Silent build error about a possible NULL start-line
+ - DOC: configuration.txt: add default_value for table_idle signature
+ - BUILD: ssl-sock: Silent error about NULL deref in ssl_sock_bind_verifycbk()
+ - BUG/MEDIUM: mux-h1: Remove H1C_F_WAIT_NEXT_REQ flag on a next request
+ - BUG/MINOR: mux-h1: Fix handling of 408-Request-Time-Out
+ - MINOR: mux-h1: Remove H1C_F_WAIT_NEXT_REQ in functions handling errors
+ - MINOR: mux-h1: Avoid useless call to h1_send() if no error is sent
+ - DOC: configuration.txt: fix typo in table_idle signature
+ - BUILD: stick-tables: fix build breakage in xxhash on older compilers
+ - BUILD: compiler: include compiler's definitions before ours
+ - BUILD: quic: global.h is needed in cfgparse-quic
+ - CLEANUP: tools: do not needlessly include xxhash nor cli from tools.h
+ - BUILD: flags: really restrict the cases where flags are exposed
+ - BUILD: makefile: minor reordering of objects by build time
+ - BUILD: quic: silence two invalid build warnings at -O1 with gcc-6.5
+ - BUILD: quic: use openssl-compat.h instead of openssl/ssl.h
+ - MEDIUM: ssl: add minimal WolfSSL support with OpenSSL compatibility mode
+ - MINOR: sample: make the rand() sample fetch function use the statistical_prng
+ - MINOR: auth: silence null dereference warning in check_user()
+ - CLEANUP: peers: fix format string for status messages (int signedness)
+ - CLEANUP: qpack: fix format string in debugging code (int signedness)
+ - CLEANUP: qpack: properly use the QPACK macros not HPACK ones in debug code
+ - BUG/MEDIUM: quic: fix datagram dropping on queueing failed
+
+2022/11/18 : 2.7-dev9
+ - BUILD: quic: QUIC mux build fix for 32-bit build
+ - BUILD: scripts: disable tests build on QuicTLS build
+ - BUG/MEDIUM: httpclient: segfault when the httpclient parser fails
+ - BUILD: ssl_sock: fix null dereference for QUIC build
+ - BUILD: quic: Fix build for m68k cross-compilation
+ - BUG/MINOR: quic: fix buffer overflow on retry token generation
+ - MINOR: quic: add version field on quic_rx_packet
+ - MINOR: quic: extend pn_offset field from quic_rx_packet
+ - MINOR: quic: define first packet flag
+ - MINOR: quic: extract connection retrieval
+ - MINOR: quic: split and rename qc_lstnr_pkt_rcv()
+ - MINOR: quic: refactor packet drop on reception
+ - MINOR: quic: extend Retry token check function
+ - BUG/MINOR: log: Preserve message facility when the log target is a ring buffer
+ - BUG/MINOR: ring: Properly parse connect timeout
+ - BUG/MEDIUM: httpclient/lua: crash when the lua task timeout before the httpclient
+ - BUG/MEDIUM: httpclient: check if the httpclient was released in the IO handler
+ - REGTESTS: httpclient/lua: test the lua task timeout with the httpclient
+ - CI: github: dump the backtrace of coredumps in the alpine container
+ - BUILD: Makefile: add "USE_SHM_OPEN" on the linux-musl target
+ - DOC: lua: add a note about compression w/ httpclient
+ - CLEANUP: mworker/cli: rename the status function to loadstatus
+ - MINOR: mworker/cli: does no try to dump the startup-logs w/o USE_SHM_OPEN
+ - MINOR: list: fixing typo in MT_LIST_LOCK_ELT
+ - DOC/MINOR: list: fixing MT_LIST_LOCK_ELT macro documentation
+ - MINOR: list: adding MT_LIST_APPEND_LOCKED macro
+ - BUG/MINOR: mux-quic: complete flow-control for uni streams
+ - BUG/MEDIUM: compression: handle rewrite errors when updating response headers
+ - MINOR: quic: do not crash on unhandled sendto error
+ - MINOR: quic: display unknown error sendto counter on stat page
+ - MINOR: peers: Support for peer shards
+ - MINOR: peers: handle multiple resync requests using shards
+ - BUG/MINOR: sink: Only use backend capability for the sink proxies
+ - BUG/MINOR: sink: Set default connect/server timeout for implicit ring buffers
+ - MINOR: ssl: add the SSL error string when failing to load a certificate
+ - MINOR: ssl: add the SSL error string before the chain
+ - MEDIUM: ssl: be stricter about chain error
+ - BUG/MAJOR: stick-table: don't process store-response rules for applets
+ - MINOR: quic: remove unnecessary quic_session_accept()
+ - BUG/MINOR: quic: fix subscribe operation
+ - BUG/MINOR: log: fixing bug in tcp syslog_io_handler Octet-Counting
+ - MINOR: ssl: dump the SSL string error when SSL_CTX_use_PrivateKey() failed.
+ - MINOR: quic: add counter for interrupted reception
+ - BUG/MINOR: quic: fix race condition on datagram purging
+ - CI: add monthly gcc cross compile jobs
+ - CLEANUP: assorted typo fixes in the code and comments
+ - CLEANUP: ssl: remove dead code in ssl_sock_load_pem_into_ckch()
+ - BUG/MINOR: httpclient: fixed memory allocation for the SSL ca_file
+ - BUG/MINOR: ssl: Memory leak of DH BIGNUM fields
+ - BUG/MINOR: ssl: Memory leak of AUTHORITY_KEYID struct when loading issuer
+ - BUG/MINOR: ssl: ocsp structure not freed properly in case of error
+ - CI: switch to the "latest" LibreSSL
+ - CI: enable QUIC for LibreSSL builds
+ - BUG/MEDIUM: ssl: Verify error codes can exceed 63
+ - MEDIUM: ssl: {ca,crt}-ignore-err can now use error constant name
+ - MINOR: ssl: x509_v_err_str converter transforms an integer to a X509_V_ERR name
+ - CLEANUP: cli: rename dynamic error printing state
+ - MINOR: cli: define usermsgs print context
+ - MINOR: server: clear prefix on stderr logs after add server
+ - BUG/MINOR: ssl: bind_conf is uncorrectly accessed when using QUIC
+ - BUILD: ssl_utils: fix build on gcc versions before 8
+ - BUILD: debug: remove unnecessary quotes in HA_WEAK() calls
+ - CI: emit the compiler's version in the build reports
+ - IMPORT: xxhash: update xxHash to version 0.8.1
+ - IMPORT: slz: declare len to fix debug build when optimal match is enabled
+ - IMPORT: slz: mention the potential header in slz_finish()
+ - IMPORT: slz: define and use a __fallthrough statement for switch/case
+ - BUILD: compiler: add a macro to detect if another one is set and equals 1
+ - BUILD: compiler: add a default definition for __has_attribute()
+ - BUILD: compiler: define a __fallthrough statement for switch/case
+ - BUILD: sample: use __fallthrough in smp_is_rw() and smp_dup()
+ - BUILD: quic: use __fallthrough in quic_connect_server()
+ - BUILD: ssl/crt-list: use __fallthrough in cli_io_handler_add_crtlist()
+ - BUILD: ssl: use __fallthrough in cli_io_handler_commit_{cert,cafile_crlfile}()
+ - BUILD: ssl: use __fallthrough in cli_io_handler_tlskeys_files()
+ - BUILD: hlua: use __fallthrough in hlua_post_init_state()
+ - BUILD: stream: use __fallthrough in stats_dump_full_strm_to_buffer()
+ - BUILD: tcpcheck: use __fallthrough in check_proxy_tcpcheck()
+ - BUILD: stats: use __fallthrough in stats_dump_proxy_to_buffer()
+ - BUILD: peers: use __fallthrough in peer_io_handler()
+ - BUILD: hash: use __fallthrough in hash_djb2()
+ - BUILD: tools: use __fallthrough in url_decode()
+ - BUILD: args: use __fallthrough in make_arg_list()
+ - BUILD: acl: use __fallthrough in parse_acl_expr()
+ - BUILD: spoe: use __fallthrough in spoe_handle_appctx()
+ - BUILD: logs: use __fallthrough in build_log_header()
+ - BUILD: check: use __fallthrough in __health_adjust()
+ - BUILD: http_act: use __fallthrough in parse_http_del_header()
+ - BUILD: h1_htx: use __fallthrough in h1_parse_chunk()
+ - BUILD: vars: use __fallthrough in var_accounting_{diff,add}()
+ - BUILD: map: use __fallthrough in cli_io_handler_*()
+ - BUILD: compression: use __fallthrough in comp_http_payload()
+ - BUILD: stconn: use __fallthrough in various shutw() functions
+ - BUILD: prometheus: use __fallthrough in promex_dump_metrics() and IO handler()
+ - CLEANUP: ssl: remove printf in bind_parse_ignore_err
+ - BUG/MINOR: ssl: crt-ignore-err memory leak with 'all' parameter
+ - BUG/MINOR: ssl: Fix potential overflow
+ - CLEANUP: stick-table: remove the unused table->exp_next
+ - OPTIM: stick-table: avoid atomic ops in stktable_requeue_exp() when possible
+ - BUG/MEDIUM: stick-table: fix a race condition when updating the expiration task
+ - MEDIUM: http-ana: remove set-cookie2 support
+ - BUG/MEDIUM: wdt/clock: properly handle early task hangs
+ - MINOR: deinit: add a "quick-exit" option to bypass the deinit step
+ - OPTIM: ebtree: make ebmb_insert_prefix() keep a copy the new node's pfx
+ - OPTIM: ebtree: make ebmb_insert_prefix() keep a copy the new node's key
+ - MINOR: ssl: ssl_sock_load_cert_chain() display error strings
+ - MINOR: ssl: reintroduce ERR_GET_LIB(ret) == ERR_LIB_PEM in ssl_sock_load_pem_into_ckch()
+ - BUG/MINOR: http-htx: Fix error handling during parsing http replies
+ - BUG/MINOR: resolvers: Don't wait periodic resolution on healthcheck failure
+ - BUG/MINOR: resolvers: Set port before IP address when processing SRV records
+ - BUG/MINOR: mux-fcgi: Be sure to send empty STDING record in case of zero-copy
+ - BUG/MEDIUM: mux-fcgi: Avoid value length overflow when it doesn't fit at once
+ - BUG/MINOR: ssl: SSL_load_error_strings might not be defined
+ - MINOR: pool/debug: create a new pool_alloc_flag() macro
+ - MINOR: dynbuf: switch allocation and release to macros to better track users
+ - BUG/MINOR: mux-h1: Do not send a last null chunk on body-less answers
+ - REG-TESTS: cache: Remove T-E header for 304-Not-Modified responses
+ - DOC: config: fix alphabetical ordering of global section
+ - MINOR: trace: split the CLI "trace" parser in CLI vs statement
+ - MEDIUM: trace: create a new "trace" statement in the "global" section
+ - BUG/MEDIUM: ring: fix creation of server in uninitialized ring
+ - BUILD: quic: fix dubious 0-byte overflow on qc_release_lost_pkts
+ - BUILD: makefile: mark poll and tcploop targets as phony
+ - BUILD: makefile: properly pass CC to sub-projects
+ - BUILD: makefile: move default verbosity settings to include/make/verbose.mk
+ - BUILD: makefile: use $(cmd_MAKE) in quiet mode
+ - BUILD: makefile: move the compiler option detection stuff to compiler.mk
+ - DEV: poll: make the connect() step an action as well
+ - DEV: poll: strip the "do_" prefix from reported function names
+ - DEV: poll: indicate the FD's side in front of its value
+ - BUG/MINOR: pool/cli: use ullong to report total pool usage in bytes
+ - MINOR: mux-h1: Remove usless code inside shutr callback
+ - CLEANUP: mux-h1; Rename H1S_F_ERROR flag into H1S_F_ERROR_MASK
+ - REORG: mux-h1: Reorg the H1C structure
+ - CLEANUP: mux-h1: Rename H1C_F_ST_ERROR and H1C_F_ST_SILENT_SHUT flags
+ - MINOR: mux-h1: Add a dedicated enum to deal with H1 connection state
+ - MEDIUM: mux-h1: Handle H1C states via its state field instead of H1C_F_ST_*
+ - MINOR: mux-h1: Don't handle subscribe for reads in h1_process_demux()
+ - CLEANUP: mux-h1: Rename H1C_F_ERR_PENDING into H1C_F_ABRT_PENDING
+ - MINOR: mux-h1: Add flag on H1 stream to deal with internal errors
+ - MEDIUM: mux-h1: Rely on the H1C to deal with shutdown for reads
+ - CLEANUP: mux-h1: Reorder H1 connection flags to avoid holes
+ - MEDIUM: mux-h1: Don't report a final error whe a message is aborted
+ - MEDIUM: mux-pt: Don't always set a final error on SE on the sending path
+ - MEDIUM: mux-h2: Introduce flags to deal with connection read/write errors
+ - CLEANUP: mux-h2: Remove unused fields in h2c structures
+ - MEDIUM: mux-fcgi: Introduce flags to deal with connection read/write errors
+ - MINOR: sconn: Set SE_FL_ERROR only when there is no more data to read
+ - MINOR: mux-h1: Rely on a H1S flag to know a WS key was found or not
+ - DOC: lua-api: Remove warning about the lua filters
+ - BUG/MEDIUM: listener: Fix race condition when updating the global mngmt task
+ - CLEANUP: listener: Remove useless task_queue from manage_global_listener_queue
+ - BUG/MINOR: mux-h1: Fix error handling when H1S allocation failed on client side
+ - DOC: internal: commit notes about polling states and flags
+ - DOC: internal: commit notes about polling states and flags on connect()
+ - CLEANUP: mux-h1: Don't test h1c in h1_shutw_conn()
+ - BUG/MINOR: http_ana/txn: don't re-initialize txn and req var lists
+ - BUG/MEDIUM: raw-sock: Don't report connection error if something was received
+ - BUG/MINOR: ssl: don't initialize the keylog callback when not required
+ - BUILD: Makefile: enable USE_SHM_OPEN by default on freebsd
+ - BUG/MEDIUM: peers: messages about unkown tables not correctly ignored
+ - MINOR: cfgparse: Always check the section position
+ - MEDIUM: thread: Restric nbthread/thread-group(s) to very first global sections
+ - BUILD: peers: Remove unused variables
+ - MINOR: ncbuf: complete doc for ncb_advance()
+ - BUG/MEDIUM: quic: fix unsuccessful handshakes on ncb_advance error
+ - BUG/MEDIUM: quic: fix memleak for out-of-order crypto data
+ - MINOR: quic: complete traces/debug for handshake
+
+2022/10/14 : 2.7-dev8
+ - BUG/MINOR: checks: update pgsql regex on auth packet
+ - DOC: config: Fix pgsql-check documentation to make user param mandatory
+ - CLEANUP: mux-quic: remove usage of non-standard ull type
+ - CLEANUP: quic: remove global var definition in quic_tls header
+ - BUG/MINOR: quic: adjust quic_tls prototypes
+ - CLEANUP: quic: fix headers
+ - CLEANUP: quic: remove unused function prototype
+ - CLEANUP: quic: remove duplicated varint code from xprt_quic.h
+ - CLEANUP: quic: create a dedicated quic_conn module
+ - BUG/MINOR: mux-quic: ignore STOP_SENDING for locally closed stream
+ - BUG/MEDIUM: lua: Don't crash in hlua_lua2arg_check on failure
+ - BUG/MEDIUM: lua: handle stick table implicit arguments right.
+ - BUILD: h1: silence an initiialized warning with gcc-4.7 and -Os
+ - MINOR: fd: add a new function to only raise RLIMIT_NOFILE
+ - MINOR: init: do not try to shrink existing RLIMIT_NOFIlE
+ - BUG/MINOR: http-fetch: Update method after a prefetch in smp_fetch_meth()
+ - BUILD: http_fetch: silence an uninitiialized warning with gcc-4/5/6 at -Os
+ - BUG/MINOR: hlua: hlua_channel_insert_data() behavior conflicts with documentation
+ - MINOR: quic: limit usage of ssl_sock_ctx in favor of quic_conn
+ - MINOR: mux-quic: check quic-conn return code on Tx
+ - CLEANUP: quic: fix indentation
+ - MEDIUM: quic: retrieve frontend destination address
+ - CLEANUP: Reapply ist.cocci (2)
+ - CLEANUP: Reapply strcmp.cocci
+ - CLEANUP: quic/receiver: remove the now unused tx_qring list
+ - BUG/MINOR: quic: set IP_PKTINFO socket option for QUIC receivers only
+ - MINOR: hlua: some luaL_checktype() calls were not guarded with MAY_LJMP
+ - DOC: configuration: missing 'if' in tcp-request content example
+ - MINOR: hlua: removing ambiguous lua_pushvalue with 0 index
+ - BUG/MAJOR: stick-tables: do not try to index a server name for applets
+ - MINOR: plock: support disabling exponential back-off
+ - MINOR: freq_ctr: use the thread's local time whenever possible
+ - MEDIUM: stick-table: switch the table lock to rwlock
+ - MINOR: stick-table: do not take an exclusive lock when downing ref_cnt
+ - MINOR: stick-table: move the write lock inside stktable_touch_with_exp()
+ - MEDIUM: stick-table: only take the lock when needed in stktable_touch_with_exp()
+ - MEDIUM: stick-table: make stksess_kill_if_expired() avoid the exclusive lock
+ - MEDIUM: stick-table: return inserted entry in __stktable_store()
+ - MEDIUM: stick-table: free newly allocated stkess if it couldn't be inserted
+ - MEDIUM: stick-table: switch to rdlock in stktable_lookup() and lookup_key()
+ - MEDIUM: stick-table: make stktable_get_entry() look up under a read lock
+ - MEDIUM: stick-table: do not take a lock to update t->current anymore.
+ - MEDIUM: stick-table: make stktable_set_entry() look up under a read lock
+ - MEDIUM: stick-table: requeue the expiration task out of the exclusive lock
+ - MINOR: stick-table: split stktable_store() between key and requeue
+ - MEDIUM: stick-table: always use atomic ops to requeue the table's task
+ - MEDIUM: stick-table: requeue the wakeup task out of the write lock
+ - BUG/MINOR: stick-table: fix build with DEBUG_THREAD
+ - REORG: mux-fcgi: Extract flags and enums into mux_fcgi-t.h
+ - MINOR: flags/mux-fcgi: Decode FCGI connection and stream flags
+ - BUG/MEDIUM: mux-h1: Add connection error handling when reading/sending on a pipe
+ - BUG/MEDIUM: mux-h1: Handle abort with an incomplete message during parsing
+ - BUG/MINOR: server: make sure "show servers state" hides private bits
+ - MINOR: checks: use the lighter PRNG for spread checks
+ - MEDIUM: checks: spread the checks load over random threads
+ - CI: SSL: use proper version generating when "latest" semantic is used
+ - CI: SSL: temporarily stick to LibreSSL=3.5.3
+ - MINOR: quic: New quic_cstream object implementation
+ - MINOR: quic: Extract CRYPTO frame parsing from qc_parse_pkt_frms()
+ - MINOR: quic: Use a non-contiguous buffer for RX CRYPTO data
+ - BUG/MINOR: quic: Stalled 0RTT connections with big ClientHello TLS message
+ - MINOR: quic: Split the secrets key allocation in two parts
+ - CLEANUP: quic: remove unused rxbufs member in receiver
+ - CLEANUP: quic: improve naming for rxbuf/datagrams handling
+ - MINOR: quic: implement datagram cleanup for quic_receiver_buf
+ - MINOR: ring: ring_cast_from_area() cast from an allocated area
+ - MINOR: buffers: split b_force_xfer() into b_cpy() and b_force_xfer()
+ - MINOR: logs: startup-logs can use a shm for logging the reload
+ - MINOR: mworker/cli: reload command displays the startup-logs
+ - MEDIUM: quic: respect the threads assigned to a bind line
+ - DOC: management: update the "reload" command of the master CLI
+ - BUILD: ssl_sock: bind_conf uninitialized in ssl_sock_bind_verifycbk()
+ - BUG/MEDIUM: httpclient: Don't set EOM flag on an empty HTX message
+ - MINOR: httpclient/lua: Don't set req_payload callback if body is empty
+ - DOC/CLEANUP: lua-api: some minor corrections
+ - DOC: lua-api: updating toolbox link
+ - DOC/CLEANUP: lua-api: removing duplicate core.proxies attribute
+ - DOC: management: add forgotten "show startup-logs"
+ - DOC: management: "show startup-logs" for master CLI
+ - CI: Replace the deprecated `::set-output` command by writing to $GITHUB_OUTPUT in matrix.py
+ - CI: Replace the deprecated `::set-output` command by writing to $GITHUB_OUTPUT in workflow definition
+
+2022/10/03 : 2.7-dev7
+ - BUG/MEDIUM: mux-quic: fix nb_hreq decrement
+ - CLEANUP: httpclient: deleted unused variables
+ - MINOR: httpclient: enabled the use of SNI presets
+ - OPTIM: hpack-huff: reduce the cache footprint of the huffman decoder
+ - BUG/MINOR: mux-quic: do not keep detached qcs with empty Tx buffers
+ - REORG: mux-quic: extract traces in a dedicated source file
+ - REORG: mux-quic: export HTTP related function in a dedicated file
+ - MINOR: mux-quic: refactor snd_buf
+ - BUG/MEDIUM: mux-quic: properly trim HTX buffer on snd_buf reset
+ - BUG/MINOR: mux-h1: Account consumed output data on synchronous connection error
+ - BUG/MINOR: log: improper behavior when escaping log data
+ - CLEANUP: tools: removing escape_chunk() function
+ - MINOR: clock: split local and global date updates
+ - MINOR: pollers: only update the local date during busy polling
+ - MINOR: clock: do not update the global date too often
+ - REGTESTS: 4be_1srv_smtpchk_httpchk_layer47errors: Return valid SMTP replies
+ - MINOR: smtpchk: Update expect rule to fully match replies to EHLO commands
+ - BUG/MINOR: smtpchk: SMTP Service check should gracefully close SMTP transaction
+ - MINOR: list: documenting mt_list_for_each_entry_safe() macro
+ - CLEANUP: list: Fix mt_list_for_each_entry_safe indentation
+ - BUG/MINOR: hlua: Remove \n in Lua error message built with memprintf
+ - MINOR: hlua: Allow argument on lua-lod(-per-thread) directives
+ - BUG/MINOR: anon: memory illegal accesses in tools.c with hash_anon and hash_ipanon
+ - MEDIUM: mworker/cli: keep the connection of the FD that ask for a reload
+ - BUG/MINOR: hlua: fixing ambiguous sizeof in hlua_load_per_thread
+ - MINOR: mworker/cli: replace close() by fd_delete()
+ - MINOR: mworker: store and shows loading status
+ - MINOR: mworker: mworker_cli_proxy_new_listener() returns a bind_conf
+ - MINOR: mworker: stores the mcli_reload bind_conf
+ - MINOR: mworker/cli: the mcli_reload bind_conf only send the reload status
+ - DOC: management: describe the new reload command behavior
+ - CLEANUP: list: fix again some style issues in the recent comments
+ - BUG/MINOR: stream: Perform errors handling in right order in stream_new()
+ - BUG/MEDIUM: stconn: Reset SE descriptor when we fail to create a stream
+ - BUG/MEDIUM: resolvers: Remove aborted resolutions from query_ids tree
+ - DOC: management: add timeout on the "reload" command
+ - BUG/MINOR: ring: fix the size check in ring_make_from_area()
+ - BUG/MINOR: config: don't count trailing spaces as empty arg
+ - Revert "BUG/MINOR: config: don't count trailing spaces as empty arg"
+ - BUG/MINOR: hlua: fixing hlua_http_msg_del_data behavior
+ - BUG/MINOR: hlua: fixing hlua_http_msg_insert_data behavior
+ - MINOR: cli: Add anonymization on a missed element for 'show sess all'
+ - MINOR: cli: remove error message with 'set anon on|off'
+ - MINOR: tools: modify hash_ipanon in order to use it in cli
+ - MINOR: cli: use hash_ipanon to anonymized address
+ - MINOR: cli: Add an anonymization on a missed element in 'show server state'
+ - MINOR: config: correct errors about argument number in condition in cfgparse.c
+ - MINOR: config: Add other keywords when dump the anonymized configuration file
+ - MINOR: config: Add option line when the configuration file is dumped
+ - MINOR: cli: correct commentary and replace 'set global-key' name
+ - MINOR: tools: Impprove hash_ipanon to support dgram sockets and port offsets
+ - MINOR: tools: Impprove hash_ipanon to not hash FD-based addresses
+ - BUG/MINOR: hlua: _hlua_http_msg_delete incorrect behavior when offset is used
+ - DOC: management: httpclient can resolve server names in URLs
+ - BUG/MINOR: hlua: prevent crash when loading numerous arguments using lua-load(per-thread)
+ - DOC/CLEANUP: lua-api: removing duplicate date functions doc
+ - MINOR: hlua: ambiguous lua_pushvalue with 0 index
+ - BUG/MINOR: config: don't count trailing spaces as empty arg (v2)
+ - BUG/MEDIUM: config: count line arguments without dereferencing the output
+ - BUG/MAJOR: conn-idle: fix hash indexing issues on idle conns
+ - BUG/MINOR: config: insufficient syntax check of the global "maxconn" value
+ - BUG/MINOR: backend: only enforce turn-around state when not redispatching
+
+2022/09/17 : 2.7-dev6
+ - MINOR: Revert part of clarifying samples support per os commit
+ - BUILD: makefile: enable crypt(3) for NetBSD
+ - BUG/MINOR: quic: Retransmitted frames marked as acknowledged
+ - BUG/MINOR: quic: Possible crash with "tls-ticket-keys" on QUIC bind lines
+ - MINOR: http-check: Remove support for headers/body in "option httpchk" version
+ - BUG/MINOR: h1: Support headers case adjustment for TCP proxies
+ - BUG/MINOR: quic: Possible crash when verifying certificates
+ - BUILD: quic: add some ifdef around the SSL_ERROR_* for libressl
+ - BUILD: ssl: fix ssl_sock_switchtx_cbk when no client_hello_cb
+ - BUILD: quic: temporarly ignore chacha20_poly1305 for libressl
+ - BUILD: quic: enable early data only with >= openssl 1.1.1
+ - BUILD: ssl: fix the ifdef mess in ssl_sock_initial_ctx
+ - BUILD: quic: fix the #ifdef in ssl_quic_initial_ctx()
+ - MINOR: quic: add QUIC support when no client_hello_cb
+ - MINOR: quic: Add traces about sent or resent TX frames
+ - MINOR: quic: No TRACE_LEAVE() in retrieve_qc_conn_from_cid()
+ - BUG/MINOR: quic: Wrong connection ID to thread ID association
+ - BUG/MINOR: task: always reset a new tasklet's call date
+ - BUG/MINOR: task: make task_instant_wakeup() work on a task not a tasklet
+ - MINOR: task: permanently enable latency measurement on tasklets
+ - CLEANUP: task: rename ->call_date to ->wake_date
+ - BUG/MINOR: sched: properly account for the CPU time of dying tasks
+ - MINOR: sched: store the current profile entry in the thread context
+ - BUG/MINOR: stream/sched: take into account CPU profiling for the last call
+ - MINOR: tasks: do not keep cpu and latency times in struct task
+ - MINOR: tools: add generic pointer hashing functions
+ - CLEANUP: activity: make memprof use the generic ptr_hash() function
+ - CLEANUP: activity: make taskprof use ptr_hash()
+ - MINOR: debug: add struct ha_caller to describe a calling location
+ - CLEANUP: debug: use struct ha_caller for memstat
+ - DEBUG: task: define a series of wakeup types for tasks and tasklets
+ - DEBUG: task: use struct ha_caller instead of arrays of file:line
+ - DEBUG: applet: instrument appctx_wakeup() to log the caller's location
+ - DEBUG: task: simplify the caller recording in DEBUG_TASK
+ - CLEANUP: task: move tid and wake_date into the common part
+ - CLEANUP: sched: remove duplicate code in run_tasks_from_list()
+ - CLEANUP: activity: make the number of sched activity entries more configurable
+ - DEBUG: resolvers: unstatify process_resolvers() to make it appear in profiling
+ - DEBUG: quic: export the few task handlers that often appear in task dumps
+ - MEDIUM: tasks/activity: combine the called function with the caller
+ - MINOR: tasks/activity: improve the caller-callee activity hash
+ - MINOR: activity/cli: support aggregating task profiling outputs
+ - MINOR: activity/cli: support sorting task profiling by total CPU time
+ - BUG/MINOR: signals/poller: set the poller timeout to 0 when there are signals
+ - BUG/MINOR: quic: Speed up the handshake completion only one time
+ - BUG/MINOR: quic: Trace fix about packet number space information.
+ - BUG/MINOR: h3: Crash when h3 trace verbosity is "minimal"
+ - MINOR: h3: Add the quic_conn object to h3 traces
+ - MINOR: h3: Missing connection argument for a TRACE_LEAVE() argument
+ - MINOR: h3: Send the h3 settings with others streams (requests)
+ - MINOR: dev/udp: Apply the corruption to both directions
+ - BUILD: udp-perturb: Add a make target for udp-perturb tool
+ - BUG/MINOR: signals/poller: ensure wakeup from signals
+ - CI: cirrus-ci: bump FreeBSD image to 13-1
+ - DEV: flags: fix usage message to reflect available options
+ - DEV: flags: add missing CO_FL_FDLESS connection flag
+ - MINOR: flags: add a new file to host flag dumping macros
+ - MINOR: flags: implement a macro used to dump enums inside masks
+ - MINOR: flags/channel: use flag dumping for channel flags and analysers
+ - MINOR: flags/connection: use flag dumping for connection flags
+ - MINOR: flags/stconn: use flag dumping for stconn and sedesc flags
+ - MINOR: flags/stream: use flag dumping for stream error type
+ - MINOR: flags/stream: use flag dumping for stream flags
+ - MINOR: flags/task: use flag dumping for task state
+ - MINOR: flags/http_ana: use flag dumping for txn flags
+ - DEV: flags: remove the now unused SHOW_FLAG() definition
+ - DEV: flags: remove the now useless intermediary functions
+ - MINOR: flags/htx: use flag dumping to show htx and start-line flags
+ - MINOR: flags/http_ana: use flag dumping to show http msg states
+ - BUG/MEDIUM: proxy: ensure pause_proxy() and resume_proxy() own PROXY_LOCK
+ - MINOR: listener: small API change
+ - MINOR: proxy/listener: support for additional PAUSED state
+ - BUG/MINOR: stats: fixing stat shows disabled frontend status as 'OPEN'
+ - BUILD: flags: fix build warning in some macros used by show_flags
+ - BUILD: flags: fix the fallback macros for missing stdio
+ - CLEANUP: pollers: remove dead code in the polling loop
+ - BUG/MINOR: mux-h1: Increment open_streams counter when H1 stream is created
+ - REGTESTS: healthcheckmail: Relax matching on the healthcheck log message
+ - CLEANUP: listener: function comment typo in stop_listener()
+ - BUG/MINOR: listener: null pointer dereference suspected by coverity
+ - MINOR: flags/fd: decode FD flags states
+ - REORG: mux-h2: extract flags and enums into mux_h2-t.h
+ - MINOR: flags/mux-h2: decode H2C and H2S flags
+ - REGTESTS: log: test the log-forward feature
+ - BUG/MEDIUM: sink: bad init sequence on tcp sink from a ring.
+ - REGTESTS: ssl/log: test the log-forward with SSL
+ - MEDIUM: httpclient: httpclient_create_proxy() creates a proxy for httpclient
+ - MEDIUM: httpclient: allow to use another proxy
+ - DOC: fix TOC in starter guide for subsection 3.3.8. Statistics
+ - MINOR: httpclient: export httpclient_create_proxy()
+ - MEDIUM: quic: separate path for rx and tx with set_encryption_secrets
+ - BUG/MEDIUM: mux-quic: fix crash on early app-ops release
+ - REORG: mux-h1: extract flags and enums into mux_h1-t.h
+ - MINOR: flags/mux-h1: decode H1C and H1S flags
+ - CLEANUP: mux-quic: remove stconn usage in h3/hq
+ - BUG/MINOR: mux-quic: do not remotely close stream too early
+ - CLEANUP: exclude udp-perturb with .gitignore
+ - BUG/MEDIUM: server: segv when adding server with hostname from CLI
+ - CLEANUP: quic,ssl: fix tiny typos in C comments
+ - BUG/MEDIUM: captures: free() an error capture out of the proxy lock
+ - BUILD: fd: fix a build warning on the DWCAS
+ - MINOR: anon: add new macros and functions to anonymize contents
+ - MINOR: anon: store the anonymizing key in the global structure
+ - MINOR: anon: store the anonymizing key in the CLI's appctx
+ - MINOR: cli: anonymize commands 'show sess' and 'show sess all'
+ - MINOR: cli: anonymize 'show servers state' and 'show servers conn'
+ - MINOR: config: add command-line -dC to dump the configuration file
+ - SCRIPTS: announce-release: update some URLs to https
+
+2022/09/02 : 2.7-dev5
+ - BUG/MINOR: mux-quic: Fix memleak on QUIC stream buffer for unacknowledged data
+ - BUG/MEDIUM: cpu-map: fix thread 1's affinity affecting all threads
+ - MINOR: cpu-map: remove obsolete diag warning about combined ranges
+ - BUG/MAJOR: mworker: fix infinite loop on master with no proxies.
+ - REGTESTS: launch http_reuse_always in mworker mode
+ - BUG/MINOR: quix: Memleak for non in flight TX packets
+ - BUG/MINOR: quic: Wrong list_for_each_entry() use when building packets from qc_do_build_pkt()
+ - BUG/MINOR: quic: Safer QUIC frame builders
+ - MINOR: quic: Replace MT_LISTs by LISTs for RX packets.
+ - BUG/MEDIUM: applet: fix incorrect check for abnormal return condition from handler
+ - BUG/MINOR: applet: make the call_rate only count the no-progress calls
+ - MEDIUM: peers: limit the number of updates sent at once
+ - BUILD: tcp_sample: fix build of get_tcp_info() on OpenBSD
+ - BUG/MINOR: resolvers: return the correct value in resolvers_finalize_config()
+ - BUG/MINOR: mworker: does not create the "default" resolvers in wait mode
+ - BUG/MINOR: tcpcheck: Disable QUICKACK only if data should be sent after connect
+ - REGTESTS: Fix prometheus script to perform HTTP health-checks
+ - MINOR: resolvers: shut the warning when "default" resolvers is implicit
+ - Revert "BUG/MINOR: quix: Memleak for non in flight TX packets"
+ - BUG/MINOR: quic: Leak in qc_release_lost_pkts() for non in flight TX packets
+ - BUG/MINOR: quic: Stalled connections (missing I/O handler wakeup)
+ - CLEANUP: quic: No more use ->rx_list MT_LIST entry point (quic_rx_packet)
+ - CLEANUP: quic: Remove a useless check in qc_lstnr_pkt_rcv()
+ - MINOR: quic: Remove useless traces about references to TX packets
+ - Revert "MINOR: quic: Remove useless traces about references to TX packets"
+ - DOC: configuration: do-resolve doesn't work with a port in the string
+ - MINOR: sample: add the host_only and port_only converters
+ - BUG/MINOR: httpclient: fix resolution with port
+ - DOC: configuration.txt: do-resolve must use host_only to remove its port.
+ - BUG/MINOR: quic: Null packet dereferencing from qc_dup_pkt_frms() trace
+ - BUG/MINOR: quic: Frames added to packets even if not built.
+ - BUG/MEDIUM: spoe: Properly update streams waiting for a ACK in async mode
+ - BUG/MEDIUM: peers: Add connect and server timeut to peers proxy
+ - BUG/MEDIUM: peers: Don't use resync timer when local resync is in progress
+ - BUG/MEDIUM: peers: Don't start resync on reload if local peer is not up-to-date
+ - BUG/MINOR: hlua: Rely on CF_EOI to detect end of message in HTTP applets
+ - BUG/MEDIUM: mux-h1: do not refrain from signaling errors after end of input
+ - BUG/MINOR: epoll: do not actively poll for Rx after an error
+ - MINOR: raw-sock: don't try to send if an error was already reported
+ - BUG/MINOR: quic: Missing header protection AES cipher context initialisations (draft-v2)
+ - MINOR: quic: Add a trace to distinguish the datagram from the packets inside
+ - BUG/MINOR: ssl: fix deinit of the ca-file tree
+ - BUG/MINOR: ssl: leak of ckch_inst_link in ckch_inst_free()
+ - BUG/MINOR: tcpcheck: Disable QUICKACK for default tcp-check (with no rule)
+ - BUG/MEDIUM: ssl: Fix a UAF when old ckch instances are released
+ - BUG/MINOR: ssl: revert two wrong fixes with ckhi_link
+ - BUG/MINOR: dev/udp: properly preset the rx address size
+ - BUILD: debug: make sure debug macros are never empty
+ - MINOR: quic: Move traces about RX/TX bytes from QUIC_EV_CONN_PRSAFRM event
+ - BUG/MINOR: quic: TX frames memleak
+ - BUG/MINOR: ssl: leak of ckch_inst_link in ckch_inst_free() v2
+ - MINOR: sink/ring: rotate non-empty file-backed contents only
+ - BUG/MINOR: regex: Properly handle PCRE2 lib compiled without JIT support
+ - REGTESTS: http_request_buffer: Add a barrier to not mix up log messages
+ - BUG/MEDIUM: mux-h1: always use RST to kill idle connections in pools
+ - MINOR: backend: always satisfy the first req reuse rule with l7 retries
+ - BUG/MINOR: quic: Do not ack when probing
+ - MINOR: quic: Add TX frames addresses to traces to several trace events
+ - MINOR: quic: Trace typo fix in qc_release_frm()
+ - BUG/MINOR: quic: Frames leak during retransmissions
+ - BUG/MINOR: h2: properly set the direction flag on HTX response
+ - BUG/MEDIUM: httpclient: always detach the caller before self-killing
+ - BUG/MINOR: httpclient: only ask for more room on failed writes
+ - BUG/MINOR: httpclient: keep-alive was accidentely disabled
+ - MEDIUM: httpclient: enable ALPN support on outgoing https connections
+ - BUG/MINOR: mux-h2: fix the "show fd" dest buffer for the subscriber
+ - BUG/MINOR: mux-h1: fix the "show fd" dest buffer for the subscriber
+ - BUG/MINOR: mux-fcgi: fix the "show fd" dest buffer for the subscriber
+ - DEBUG: stream: minor rearrangement of a few fields in struct stream.
+ - MINOR: debug: report applet pointer and handler in crashes when known
+ - MINOR: mux-h2: extract the stream dump function out of h2_show_fd()
+ - MINOR: mux-h2: extract the connection dump function out of h2_show_fd()
+ - MINOR: muxes: add a "show_sd" helper to complete "show sess" dumps
+ - MINOR: mux-h2: provide a "show_sd" helper to output stream debugging info
+ - MINOR: mux-h2: insert line breaks in "show sess all" output for legibility
+ - MINOR: mux-quic: provide a "show_sd" helper to output stream debugging info
+ - MINOR: mux-h1: split "show_fd" into connection and stream
+ - MINOR: mux-h1: provide a "show_sd" helper to output stream debugging info
+ - BUG/MINOR: http-act: initialize http fmt head earlier
+
+2022/08/20 : 2.7-dev4
+ - BUG/MEDIUM: quic: Wrong packet length check in qc_do_rm_hp()
+ - MINOR: quic: Too much useless traces in qc_build_frms()
+ - BUG/MEDIUM: quic: Missing AEAD TAG check after removing header protection
+ - MINOR: quic: Replace pool_zalloc() by pool_malloc() for fake datagrams
+ - MINOR: debug: make the mem_stats section aligned to void*
+ - MINOR: debug: store and report the pool's name in struct mem_stats
+ - MINOR: debug: also store the function name in struct mem_stats
+ - MINOR: debug/memstats: automatically determine first column size
+ - MINOR: debug/memstats: permit to pass the size to free()
+ - CLEANUP: mux-quic: remove loop on sending frames
+ - MINOR: quic: replace custom buf on Tx by default struct buffer
+ - MINOR: quic: release Tx buffer on each send
+ - MINOR: quic: refactor datagram commit in Tx buffer
+ - MINOR: quic: skip sending if no frame to send in io-cb
+ - BUG/MINOR: mux-quic: open stream on STOP_SENDING
+ - BUG/MINOR: quic: fix crash on handshake io-cb for null next enc level
+ - BUG/MEDIUM: quic: always remove the connection from the accept list on close
+ - BUG/MEDIUM: poller: use fd_delete() to release the poller pipes
+ - BUG/MEDIUM: task: relax one thread consistency check in task_unlink_wq()
+ - MEDIUM: quic: xprt traces rework
+ - BUILD: stconn: fix build warning at -O3 about possible null sc
+ - MINOR: quic: Remove useless lock for RX packets
+ - BUG/MINOR: quic: Possible infinite loop in quic_build_post_handshake_frames()
+ - CLEANUP: quic: Remove trailing spaces
+ - MINOR: mux-quic: adjust enter/leave traces
+ - MINOR: mux-quic: define protocol error traces
+ - CLEANUP: mux-quic: adjust traces level
+ - MINOR: mux-quic: define new traces
+ - BUG/MEDIUM: mux-quic: fix crash due to invalid trace arg
+ - BUG/MEDIUM: quic: Possible use of uninitialized <odcid> variable in qc_lstnr_params_init()
+ - BUG/MEDIUM: ring: fix too lax 'size' parser
+ - BUG/MEDIUM: quic: Wrong use of <token_odcid> in qc_lsntr_pkt_rcv()
+ - BUILD: ring: forward-declare struct appctx to avoid a build warning
+ - MINOR: ring: support creating a ring from a linear area
+ - MINOR: ring: add support for a backing-file
+ - DEV: haring: add a simple utility to read file-backed rings
+ - DEV: haring: support remapping LF in contents with CR VT
+ - BUG/MINOR: quic: memleak on wrong datagram receipt
+ - BUILD: sink: replace S_IRUSR, S_IWUSR with their octal value
+ - MINOR: ring: archive a previous file-backed ring on startup
+ - BUG/MINOR: mux-quic: fix crash with traces in qc_detach()
+ - BUG/MINOR: quic: MIssing check when building TX packets
+ - BUG/MINOR: quic: Wrong status returned by qc_pkt_decrypt()
+ - MINOR: memprof: export the minimum definitions for memory profiling
+ - MINOR: pool/memprof: report pool alloc/free in memory profiling
+ - MINOR: pools/memprof: store and report the pool's name in each bin
+ - MINOR: chunk: inline alloc_trash_chunk()
+ - MINOR: stick-table: Add table_expire() and table_idle() new converters
+ - CLEANUP: exclude haring with .gitignore
+ - MINOR: quic: adjust quic_frame flag manipulation
+ - MINOR: h3: report error on control stream close
+ - MINOR: qpack: report error on enc/dec stream close
+ - BUG/MEDIUM: mux-quic: reject uni stream ID exceeding flow control
+ - MINOR: mux-quic: adjust traces on stream init
+ - MINOR: mux-quic: add missing args on some traces
+ - MINOR: quic: refactor application send
+ - BUG/MINOR: quic: do not notify MUX on frame retransmit
+ - BUG/MEDIUM: http-ana: fix crash or wrong header deletion by http-restrict-req-hdr-names
+ - BUG/MINOR: quic: Missing initializations for ducplicated frames.
+ - BUG/MEDIUM: quic: fix crash on MUX send notification
+ - REORG: h2: extract cookies concat function in http_htx
+ - REGTESTS: add test for HTTP/2 cookies concatenation
+ - MEDIUM: h3: concatenate multiple cookie headers
+ - MINOR: applet: add a function to reset the svcctx of an applet
+ - BUG/MEDIUM: cli: always reset the service context between commands
+ - BUG/MEDIUM: mux-h2: do not fiddle with ->dsi to indicate demux is idle
+ - MINOR: mux-h2/traces: report transition to SETTINGS1 before not after
+ - MINOR: mux-h2: make streams know if they need to send more data
+ - BUG/MINOR: mux-h2: send a CANCEL instead of ES on truncated writes
+ - BUG/MINOR: quic: Possible crashes when dereferencing ->pkt quic_frame struct member
+ - MINOR: quic: Add frame addresses to QUIC_EV_CONN_PRSAFRM event traces
+ - BUG/MINOR: quic: Wrong splitted duplicated frames handling
+ - MINOR: quic: Add the QUIC connection to mux traces
+ - MINOR: quic: Trace fix in qc_release_frm()
+ - BUG/MAJOR: log-forward: Fix log-forward proxies not fully initialized
+ - BUG/MAJOR: log-forward: Fix ssl layer not initialized on bind even if configured
+ - MINOR: quic: Add reusable cipher contexts for header protection
+ - BUG/MINOR: ssl/cli: error when the ca-file is empty
+ - MINOR: ssl: handle ca-file appending in cafile_entry
+ - MINOR: ssl/cli: implement "add ssl ca-file"
+
+2022/08/07 : 2.7-dev3
+ - BUILD: makefile: Fix install(1) handling for OpenBSD/NetBSD/Solaris/AIX
+ - BUG/MEDIUM: tools: avoid calling dlsym() in static builds (try 2)
+ - MINOR: resolvers: resolvers_destroy() deinit and free a resolver
+ - BUG/MINOR: resolvers: shut off the warning for the default resolvers
+ - BUG/MINOR: ssl: allow duplicate certificates in ca-file directories
+ - BUG/MINOR: tools: fix statistical_prng_range()'s output range
+ - BUG/MINOR: quic: do not send CONNECTION_CLOSE_APP in initial/handshake
+ - BUILD: debug: Add braces to if statement calling only CHECK_IF()
+ - BUG/MINOR: fd: Properly init the fd state in fd_insert()
+ - BUG/MEDIUM: fd/threads: fix incorrect thread selection in wakeup broadcast
+ - MINOR: init: load OpenSSL error strings
+ - MINOR: ssl: enhance ca-file error emitting
+ - BUG/MINOR: mworker/cli: relative pid prefix not validated anymore
+ - BUG/MAJOR: mux_quic: fix invalid PROTOCOL_VIOLATION on POST data overlap
+ - BUG/MEDIUM: mworker: proc_self incorrectly set crashes upon reload
+ - BUILD: add detection for unsupported compiler models
+ - BUG/MEDIUM: stconn: Only reset connect expiration when processing backend side
+ - BUG/MINOR: backend: Fallback on RR algo if balance on source is impossible
+ - BUG/MEDIUM: master: force the thread count earlier
+ - BUG/MAJOR: poller: drop FD's tgid when masks don't match
+ - DEBUG: fd: detect possibly invalid tgid in fd_insert()
+ - BUG/MINOR: sockpair: wrong return value for fd_send_uxst()
+ - MINOR: sockpair: move send_fd_uxst() error message in caller
+ - Revert "BUG/MINOR: peers: set the proxy's name to the peers section name"
+ - DEBUG: fd: split the fd check
+ - MEDIUM: resolvers: continue startup if network is unavailable
+ - BUG/MINOR: fd: always remove late updates when freeing fd_updt[]
+ - MINOR: cli: emit a warning when _getsocks was used more than once
+ - BUG/MINOR: mworker: PROC_O_LEAVING used but not updated
+ - Revert "MINOR: cli: emit a warning when _getsocks was used more than once"
+ - MINOR: cli: warning on _getsocks when socket were closed
+ - BUG/MEDIUM: mux-quic: fix missing EOI flag to prevent streams leaks
+ - MINOR: quic: Congestion control architecture refactoring
+ - MEDIUM: quic: Cubic congestion control algorithm implementation
+ - MINOR: quic: New "quic-cc-algo" bind keyword
+ - BUG/MINOR: quic: loss time limit variable computed but not used
+ - MINOR: quic: Stop looking for packet loss asap
+ - BUG/MAJOR: quic: Useless resource intensive loop qc_ackrng_pkts()
+ - MINOR: quic: Send packets as much as possible from qc_send_app_pkts()
+ - BUG/MEDIUM: queue/threads: limit the number of entries dequeued at once
+ - MAJOR: threads/plock: update the embedded library
+ - MINOR: thread: provide an alternative to pthread's rwlock
+ - DEBUG: tools: provide a tree dump function for ebmbtrees as well
+ - MINOR: ebtree: add ebmb_lookup_shorter() to pursue lookups
+ - BUG/MEDIUM: pattern: only visit equivalent nodes when skipping versions
+ - BUG/MINOR: mux-quic: prevent crash if conn released during IO callback
+ - CLEANUP: mux-quic: remove useless app_ops is_active callback
+ - BUG/MINOR: mux-quic: do not free conn if attached streams
+ - MINOR: mux-quic: save proxy instance into qcc
+ - MINOR: mux-quic: use timeout server for backend conns
+ - MEDIUM: mux-quic: adjust timeout refresh
+ - MINOR: mux-quic: count in-progress requests
+ - MEDIUM: mux-quic: implement http-keep-alive timeout
+ - MINOR: peers: Add a warning about incompatible SSL config for the local peer
+ - MINOR: peers: Use a dedicated reconnect timeout when stopping the local peer
+ - BUG/MEDIUM: peers: limit reconnect attempts of the old process on reload
+ - BUG/MINOR: peers: Use right channel flag to consider the peer as connected
+ - BUG/MEDIUM: dns: Properly initialize new DNS session
+ - BUG/MINOR: backend: Don't increment conn_retries counter too early
+ - MINOR: server: Constify source server to copy its settings
+ - REORG: server: Export srv_settings_cpy() function
+ - BUG/MEDIUM: proxy: Perform a custom copy for default server settings
+ - BUG/MINOR: quic: Missing in flight ack eliciting packet counter decrement
+ - BUG/MEDIUM: quic: Floating point exception in cubic_root()
+ - MINOR: h3: support HTTP request framing state
+ - MINOR: mux-quic: refresh timeout on frame decoding
+ - MINOR: mux-quic: refactor refresh timeout function
+ - MEDIUM: mux-quic: implement http-request timeout
+ - BUG/MINOR: quic: Avoid sending truncated datagrams
+ - BUG/MINOR: ring/cli: fix a race condition between the writer and the reader
+ - BUG/MEDIUM: sink: Set the sink ref for forwarders created during ring parsing
+ - BUG/MINOR: sink: fix a race condition between the writer and the reader
+ - BUG/MINOR: quic: do not reject datagrams matching minimum permitted size
+ - MINOR: quic: Add two new stats counters for sendto() errors
+ - BUG/MINOR: quic: Missing Initial packet dropping case
+ - MINOR: quic: explicitely ignore sendto error
+ - BUG/MINOR: quic: adjust errno handling on sendto
+ - BUG/MEDIUM: quic: break out of the loop in quic_lstnr_dghdlr
+ - MINOR: threads: report the number of thread groups in build options
+ - MINOR: config: automatically preset MAX_THREADS based on MAX_TGROUPS
+ - BUILD: SSL: allow to pass additional configure args to QUICTLS
+ - CI: enable weekly "m32" builds on x86_64
+ - CLEANUP: assorted typo fixes in the code and comments
+ - BUG/MEDIUM: fix DH length when EC key is used
+ - REGTESTS: ssl: adopt tests to OpenSSL-3.0.N
+ - REGTESTS: ssl: adopt tests to OpenSSL-3.0.N
+ - REGTESTS: ssl: fix grep invocation to use extended regex in ssl_generate_certificate.vtc
+ - BUILD: cfgparse: always defined _GNU_SOURCE for sched.h and crypt.h
+
+2022/07/16 : 2.7-dev2
+ - BUG/MINOR: qpack: fix build with QPACK_DEBUG
+ - MINOR: h3: handle errors on HEADERS parsing/QPACK decoding
+ - BUG/MINOR: qpack: abort on dynamic index field line decoding
+ - MINOR: qpack: properly handle invalid dynamic table references
+ - MINOR: task: Add tasklet_wakeup_after()
+ - BUG/MINOR: quic: Dropped packets not counted (with RX buffers full)
+ - MINOR: quic: Add new stats counter to diagnose RX buffer overrun
+ - MINOR: quic: Duplicated QUIC_RX_BUFSZ definition
+ - MINOR: quic: Improvements for the datagrams receipt
+ - CLEANUP: h2: Typo fix in h2_unsubcribe() traces
+ - MINOR: quic: Increase the QUIC connections RX buffer size (upto 64Kb)
+ - CLEANUP: mux-quic: adjust comment on qcs_consume()
+ - MINOR: ncbuf: implement ncb_is_fragmented()
+ - BUG/MINOR: mux-quic: do not signal FIN if gap in buffer
+ - MINOR: fd: add a new FD_DISOWN flag to prevent from closing a deleted FD
+ - BUG/MEDIUM: ssl/fd: unexpected fd close using async engine
+ - MINOR: tinfo: make tid temporarily still reflect global ID
+ - CLEANUP: config: remove unused proc_mask()
+ - MINOR: debug: remove mask support from "debug dev sched"
+ - MEDIUM: task: add and preset a thread ID in the task struct
+ - MEDIUM: task/debug: move the ->thread_mask integrity checks to ->tid
+ - MAJOR: task: use t->tid instead of ffsl(t->thread_mask) to take the thread ID
+ - MAJOR: task: replace t->thread_mask with 1<<t->tid when thread mask is needed
+ - CLEANUP: task: remove thread_mask from the struct task
+ - MEDIUM: applet: only keep appctx_new_*() and drop appctx_new()
+ - MEDIUM: task: only keep task_new_*() and drop task_new()
+ - MINOR: applet: always use task_new_on() on applet creation
+ - MEDIUM: task: remove TASK_SHARED_WQ and only use t->tid
+ - MINOR: task: replace task_set_affinity() with task_set_thread()
+ - CLEANUP: task: remove the unused task_unlink_rq()
+ - CLEANUP: task: remove the now unused TASK_GLOBAL flag
+ - MINOR: task: make rqueue_ticks atomic
+ - MEDIUM: task: move the shared runqueue to one per thread
+ - MEDIUM: task: replace the global rq_lock with a per-rq one
+ - MINOR: task: remove grq_total and use rq_total instead
+ - MINOR: task: replace global_tasks_mask with a check for tree's emptiness
+ - MEDIUM: task: use regular eb32 trees for the run queues
+ - MEDIUM: queue: revert to regular inter-task wakeups
+ - MINOR: thread: make wake_thread() take care of the sleeping threads mask
+ - MINOR: thread: move the flags to the shared cache line
+ - MINOR: thread: only use atomic ops to touch the flags
+ - MINOR: poller: centralize poll return handling
+ - MEDIUM: polling: make update_fd_polling() not care about sleeping threads
+ - MINOR: poller: update_fd_polling: wake a random other thread
+ - MEDIUM: thread: add a new per-thread flag TH_FL_NOTIFIED to remember wakeups
+ - MEDIUM: tasks/fd: replace sleeping_thread_mask with a TH_FL_SLEEPING flag
+ - MINOR: tinfo: add the tgid to the thread_info struct
+ - MINOR: tinfo: replace the tgid with tgid_bit in tgroup_info
+ - MINOR: tinfo: add the mask of enabled threads in each group
+ - MINOR: debug: use ltid_bit in ha_thread_dump()
+ - MINOR: wdt: use ltid_bit in wdt_handler()
+ - MINOR: clock: use ltid_bit in clock_report_idle()
+ - MINOR: thread: use ltid_bit in ha_tkillall()
+ - MINOR: thread: add a new all_tgroups_mask variable to know about active tgroups
+ - CLEANUP: thread: remove thread_sync_release() and thread_sync_mask
+ - MEDIUM: tinfo: add a dynamic thread-group context
+ - MEDIUM: thread: make stopping_threads per-group and add stopping_tgroups
+ - MAJOR: threads: change thread_isolate to support inter-group synchronization
+ - MINOR: thread: add is_thread_harmless() to know if a thread already is harmless
+ - MINOR: debug: mark oneself harmless while waiting for threads to finish
+ - MINOR: wdt: do not rely on threads_to_dump anymore
+ - MEDIUM: debug: make the thread dumper not rely on a thread mask anymore
+ - BUILD: debug: fix build issue on clang with previous commit
+ - BUILD: debug: re-export thread_dump_state
+ - BUG/MEDIUM: threads: fix incorrect thread group being used on soft-stop
+ - BUG/MEDIUM: thread: check stopping thread against local bit and not global one
+ - MINOR: proxy: use tg->threads_enabled in hard_stop() to detect stopped threads
+ - BUILD: Makefile: Add Lua 5.4 autodetect
+ - CI: re-enable gcc asan builds
+ - MEDIUM: mworker: set the iocb of the socketpair without using fd_insert()
+ - MINOR: fd: Add BUG_ON checks on fd_insert()
+ - CLEANUP: mworker: rename mworker_pipe to mworker_sockpair
+ - CLEANUP: mux-quic: do not export qc_get_ncbuf
+ - REORG: mux-quic: reorganize flow-control fields
+ - MINOR: mux-quic: implement accessor for sedesc
+ - MEDIUM: mux-quic: refactor streams opening
+ - MINOR: mux-quic: rename qcs flag FIN_RECV to SIZE_KNOWN
+ - MINOR: mux-quic: emit FINAL_SIZE_ERROR on invalid STREAM size
+ - BUG/MINOR: peers/config: always fill the bind_conf's argument
+ - BUG/MEDIUM: peers/config: properly set the thread mask
+ - CLEANUP: bwlim: Set pointers to NULL when memory is released
+ - BUG/MINOR: http-check: Preserve headers if not redefined by an implicit rule
+ - BUG/MINOR: http-act: Properly generate 103 responses when several rules are used
+ - BUG/MEDIUM: thread: mask stopping_threads with threads_enabled when checking it
+ - CLEANUP: thread: also remove a thread's bit from stopping_threads on stop
+ - BUG/MINOR: peers: fix possible NULL dereferences at config parsing
+ - BUG/MINOR: http-htx: Fix scheme based normalization for URIs wih userinfo
+ - MINOR: http: Add function to get port part of a host
+ - MINOR: http: Add function to detect default port
+ - BUG/MEDIUM: h1: Improve authority validation for CONNCET request
+ - MINOR: http-htx: Use new HTTP functions for the scheme based normalization
+ - BUG/MEDIUM: http-fetch: Don't fetch the method if there is no stream
+ - REGTEESTS: filters: Fix CONNECT request in random-forwarding script
+ - MEDIUM: mworker/systemd: send STATUS over sd_notify
+ - BUG/MINOR: mux-h1: Be sure to commit htx changes in the demux buffer
+ - BUG/MEDIUM: http-ana: Don't wait to have an empty buf to switch in TUNNEL state
+ - BUG/MEDIUM: mux-h1: Handle connection error after a synchronous send
+ - MEDIUM: epoll: don't synchronously delete migrated FDs
+ - BUILD: debug: silence warning on gcc-5
+ - BUILD: http: silence an uninitialized warning affecting gcc-5
+ - BUG/MEDIUM: mux-quic: fix server chunked encoding response
+ - REORG: mux-quic: rename stream initialization function
+ - MINOR: mux-quic: rename stream purge function
+ - MINOR: mux-quic: add traces on frame parsing functions
+ - MINOR: mux-quic: implement qcs_alert()
+ - MINOR: mux-quic: filter send/receive-only streams on frame parsing
+ - MINOR: mux-quic: do not ack STREAM frames on unrecoverable error
+ - MINOR: mux-quic: support stream opening via MAX_STREAM_DATA
+ - MINOR: mux-quic: define basic stream states
+ - MINOR: mux-quic: use stream states to mark as detached
+ - MEDIUM: mux-quic: implement RESET_STREAM emission
+ - MEDIUM: mux-quic: implement STOP_SENDING handling
+ - BUG/MEDIUM: debug: fix possible hang when multiple threads dump at once
+ - BUG/MINOR: quic: fix closing state on NO_ERROR code sent
+ - CLEANUP: quic: clean up include on quic_frame-t.h
+ - MINOR: quic: define a generic QUIC error type
+ - MINOR: mux-quic: support app graceful shutdown
+ - MINOR: mux-quic/h3: prepare CONNECTION_CLOSE on release
+ - MEDIUM: quic: send CONNECTION_CLOSE on released MUX
+ - CLEANUP: mux-quic: move qc_release()
+ - MINOR: mux-quic: send one last time before release
+ - MINOR: h3: store control stream in h3c
+ - MINOR: h3: implement graceful shutdown with GOAWAY
+ - BUG/MINOR: threads: produce correct global mask for tgroup > 1
+ - BUG/MEDIUM: cli/threads: make "show threads" more robust on applets
+ - BUG/MINOR: thread: use the correct thread's group in ha_tkillall()
+ - BUG/MINOR: debug: enter ha_panic() only once
+ - BUG/MEDIUM: debug: fix parallel thread dumps again
+ - MINOR: cli/streams: show a stream's tgid next to its thread ID
+ - DEBUG: cli: add a new "debug dev deadlock" expert command
+ - MINOR: cli/activity: add a thread number argument to "show activity"
+ - CLEANUP: applet: remove the obsolete command context from the appctx
+ - MEDIUM: config: remove deprecated "bind-process" directives from frontends
+ - MEDIUM: config: remove the "process" keyword on "bind" lines
+ - MINOR: listener/config: make "thread" always support up to LONGBITS
+ - CLEANUP: fd: get rid of the __GET_{NEXT,PREV} macros
+ - MEDIUM: debug/threads: make the lock debugging take tgroups into account
+ - MEDIUM: proto: stop protocols under thread isolation during soft stop
+ - MEDIUM: poller: program the update in fd_update_events() for a migrated FD
+ - MEDIUM: poller: disable thread-groups for poll() and select()
+ - MINOR: thread: remove MAX_THREADS limitation
+ - MEDIUM: cpu-map: replace the process number with the thread group number
+ - MINOR: mworker/threads: limit the mworker sockets to group 1
+ - MINOR: cli/threads: always bind CLI to thread group 1
+ - MINOR: fd/thread: get rid of thread_mask()
+ - MEDIUM: task/thread: move the task shared wait queues per thread group
+ - MINOR: task: move the niced_tasks counter to the thread group context
+ - DOC: design: add some thoughts about how to handle the update_list
+ - MEDIUM: conn: make conn_backend_get always scan the same group
+ - MAJOR: fd: remove pending updates upon real close
+ - MEDIUM: fd/poller: make the update-list per-group
+ - MINOR: fd: delete unused updates on close()
+ - MINOR: fd: make fd_insert() apply the thread mask itself
+ - MEDIUM: fd: add the tgid to the fd and pass it to fd_insert()
+ - MINOR: cli/fd: show fd's tgid and refcount in "show fd"
+ - MINOR: fd: add functions to manipulate the FD's tgid
+ - MINOR: fd: add fd_get_running() to atomically return the running mask
+ - MAJOR: fd: grab the tgid before manipulating running
+ - MEDIUM: fd/poller: turn polled_mask to group-local IDs
+ - MEDIUM: fd/poller: turn update_mask to group-local IDs
+ - MEDIUM: fd/poller: turn running_mask to group-local IDs
+ - MINOR: fd: make fd_clr_running() return the previous value instead
+ - MEDIUM: fd: make thread_mask now represent group-local IDs
+ - MEDIUM: fd: make fd_insert() take local thread masks
+ - MEDIUM: fd: make fd_insert/fd_delete atomically update fd.tgid
+ - MEDIUM: fd: quit fd_update_events() when FD is closed
+ - MEDIUM: thread: change thread_resolve_group_mask() to return group-local values
+ - MEDIUM: listener: switch bind_thread from global to group-local
+ - MINOR: fd: add fd_reregister_all() to deal with boot-time FDs
+ - MEDIUM: fd: support stopping FDs during starting
+ - MAJOR: pollers: rely on fd_reregister_all() at boot time
+ - MAJOR: poller: only touch/inspect the update_mask under tgid protection
+ - MEDIUM: fd: support broadcasting updates for foreign groups in updt_fd_polling
+ - CLEANUP: threads: remove the now unused all_threads_mask and tid_bit
+ - MINOR: config: change default MAX_TGROUPS to 16
+ - BUG/MEDIUM: tools: avoid calling dlsym() in static builds
+
+2022/06/24 : 2.7-dev1
+ - BUG/MINOR: ssl_ckch: Free error msg if commit changes on a cert entry fails
+ - BUG/MINOR: ssl_ckch: Free error msg if commit changes on a CA/CRL entry fails
+ - BUG/MEDIUM: ssl_ckch: Don't delete a cert entry if it is being modified
+ - BUG/MEDIUM: ssl_ckch: Don't delete CA/CRL entry if it is being modified
+ - BUG/MINOR: ssl_ckch: Don't duplicate path when replacing a cert entry
+ - BUG/MINOR: ssl_ckch: Don't duplicate path when replacing a CA/CRL entry
+ - BUG/MEDIUM: ssl_ckch: Rework 'commit ssl cert' to handle full buffer cases
+ - BUG/MEDIUM: ssl_ckch: Rework 'commit ssl ca-file' to handle full buffer cases
+ - BUG/MEDIUM: ssl/crt-list: Rework 'add ssl crt-list' to handle full buffer cases
+ - BUG/MEDIUM: httpclient: Don't remove HTX header blocks before duplicating them
+ - BUG/MEDIUM: httpclient: Rework CLI I/O handler to handle full buffer cases
+ - MEDIUM: httpclient: Don't close CLI applet at the end of a response
+ - MEDIUM: http-ana: Always report rewrite failures as PRXCOND in logs
+ - CLEANUP: Re-apply xalloc_size.cocci (2)
+ - REGTESTS: abortonclose: Add a barrier to not mix up log messages
+ - REGTESTS: http_request_buffer: Increase client timeout to wait "slow" clients
+ - CLEANUP: ssl_ckch: Use corresponding enum for commit_cacrlfile_ctx.cafile_type
+ - MINOR: ssl_ckch: Simplify I/O handler to commit changes on CA/CRL entry
+ - BUG/MINOR: ssl_ckch: Use right type for old entry in show_crlfile_ctx
+ - BUG/MINOR: ssl_ckch: Dump CRL transaction only once if show command yield
+ - BUG/MINOR: ssl_ckch: Dump CA transaction only once if show command yield
+ - BUG/MINOR: ssl_ckch: Dump cert transaction only once if show command yield
+ - BUG/MINOR: ssl_ckch: Init right field when parsing "commit ssl crl-file" cmd
+ - CLEANUP: ssl_ckch: Remove unused field in commit_cacrlfile_ctx structure
+ - MINOR: ssl_ckch: Simplify structure used to commit changes on CA/CRL entries
+ - MINOR: ssl_ckch: Remove service context for "set ssl cert" command
+ - MINOR: ssl_ckch: Remove service context for "set ssl ca-file" command
+ - MINOR: ssl_ckch: Remove service context for "set ssl crl-file" command
+ - BUG/MINOR: ssl_ckch: Fix possible uninitialized value in show_cert I/O handler
+ - BUG/MINOR: ssl_ckch: Fix possible uninitialized value in show_cafile I/O handler
+ - BUG/MINOR: ssl_ckch: Fix possible uninitialized value in show_crlfile I/O handler
+ - BUILD: ssl_ckch: Fix build error about a possible uninitialized value
+ - BUG/MINOR: ssl_ckch: Fix another possible uninitialized value
+ - REGTESTS: http_abortonclose: Extend supported versions
+ - REGTESTS: restrict_req_hdr_names: Extend supported versions
+ - MINOR: connection: support HTTP/3.0 for smp_*_http_major fetch
+ - MINOR: h3: add h3c pointer into h3s instance
+ - MINOR: mux-quic: simplify decode_qcs API
+ - MINOR: mux-quic/h3: adjust demuxing function return values
+ - BUG/MINOR: h3: fix return value on decode_qcs on error
+ - BUILD: quic: fix anonymous union for gcc-4.4
+ - BUILD: compiler: implement unreachable for older compilers too
+ - DEV: tcploop: reorder options in the usage message
+ - DEV: tcploop: make the current address the default address
+ - DEV: tcploop: make it possible to change the target address of a connect()
+ - DEV: tcploop: factor out the socket creation
+ - DEV: tcploop: permit port 0 to ease handling of default options
+ - DEV: tcploop: add a new "bind" command to bind to ip/port.
+ - DEV: tcploop: add minimal UDP support
+ - BUG/MINOR: trace: Test server existence for health-checks to get proxy
+ - BUG/MINOR: checks: Properly handle email alerts in trace messages
+ - BUG/MEDIUM: mailers: Set the object type for check attached to an email alert
+ - REGTESTS: healthcheckmail: Update the test to be functionnal again
+ - REGTESTS: healthcheckmail: Relax health-check failure condition
+ - BUG/MINOR: h3: fix incorrect BUG_ON assert on SETTINGS parsing
+ - MEDIUM: mux-h2: try to coalesce outgoing WINDOW_UPDATE frames
+ - OPTIM: mux-h2: increase h2_settings_initial_window_size default to 64k
+ - BUG/MINOR: h3: fix frame type definition
+ - BUG/MEDIUM: h3: fix SETTINGS parsing
+ - BUG/MINOR: cli/stats: add missing trailing LF after JSON outputs
+ - BUG/MINOR: server: do not enable DNS resolution on disabled proxies
+ - BUG/MINOR: cli/stats: add missing trailing LF after "show info json"
+ - DOC: design: update the notes on thread groups
+ - BUG/MEDIUM: mux-quic: fix flow control connection Tx level
+ - MINOR: mux-quic: complete BUG_ON on TX flow-control enforcing
+ - BUG/MINOR: mux-quic: fix memleak on frames rejected by transport
+ - BUG/MINOR: tcp-rules: Make action call final on read error and delay expiration
+ - CLEANUP: check: Remove useless tests on check's stream-connector
+ - BUG/MEDIUM: stconn: Don't wakeup applet for send if it won't consume data
+ - BUG/MEDIUM: cli: Notify cli applet won't consume data during request processing
+ - BUG/MEDIUM: mux-quic: fix segfault on flow-control frame cleanup
+ - MINOR: task: move profiling bit to per-thread
+ - CLEANUP: quic: use task_new_on() for single-threaded tasks
+ - MINOR: tinfo: remove the global thread ID bit (tid_bit)
+ - CLEANUP: hlua: check for at least 2 threads on a task
+ - MINOR: thread: get rid of MAX_THREADS_MASK
+ - OPTIM: task: do not consult shared WQ when we're already full
+ - DOC: design: update the task vs thread affinity requirements
+ - MINOR: qpack: add comments and remove a useless trace
+ - MINOR: qpack: reduce dependencies on other modules
+ - BUG/MINOR: qpack: support header litteral name decoding
+ - MINOR: qpack: add ABORT_NOW on unimplemented decoding
+ - BUG/MINOR: h3/qpack: deal with too many headers
+ - MINOR: qpack: improve decoding function
+ - MINOR: qpack: implement standalone decoder tool
+ - BUG/BUILD: h3: fix wrong label name
+ - BUG/MINOR: quic: Stop hardcoding Retry packet Version field
+ - MINOR: quic: Add several nonce and key definitions for Retry tag
+ - BUG/MINOR: quic: Wrong PTO calculation
+ - MINOR: quic: Parse long packet version from qc_parse_hd_form()
+ - CLEANUP: quid: QUIC draft-28 no more supported
+ - MEDIUM: quic: Add QUIC v2 draft support
+ - MINOR: quic: Released QUIC TLS extension for QUIC v2 draft
+ - MEDIUM: quic: Compatible version negotiation implementation (draft-08)
+ - CLEANUP: quic: Remove any reference to boringssl
+ - BUG/MINOR: task: fix thread assignment in tasklet_kill()
+ - BUG/MEDIUM: stream: Properly handle destructive client connection upgrades
+ - MINOR: stream: Rely on stconn flags to abort stream destructive upgrade
+ - CLEANUP: stconn: Don't expect to have no sedesc on detach
+ - BUG/MINOR: log: Properly test connection retries to fix dontlog-normal option
+ - MINOR: hlua: don't dump empty entries in hlua_traceback()
+ - MINOR: hlua: add a new hlua_show_current_location() function
+ - MEDIUM: debug: add a tainted flag when a shared library is loaded
+ - MEDIUM: debug: detect redefinition of symbols upon dlopen()
+ - BUILD: quic: Wrong HKDF label constant variable initializations
+ - BUG/MINOR: quic: Unexpected half open connection counter wrapping
+ - BUG/MINOR: quic_stats: Duplicate "quic_streams_data_blocked_bidi" field name
+ - BUG/MINOR: quic: purge conn Rx packet list on release
+ - BUG/MINOR: quic: free rejected Rx packets
+ - BUG/MINOR: qpack: abort on dynamic index field line decoding
+ - BUG/MEDIUM: ssl/cli: crash when crt inserted into a crt-list
+ - REGTESTS: ssl: add the same cert for client/server
+ - BUG/MINOR: quic: Acknowledgement must be forced during handshake
+ - MINOR: quic: Dump version_information transport parameter
+ - BUG/MEDIUM: mworker: use default maxconn in wait mode
+ - MINOR: intops: add a function to return a valid bit position from a mask
+ - TESTS: add a unit test for one_among_mask()
+ - BUILD: ssl_ckch: fix "maybe-uninitialized" build error on gcc-9.4 + ARM
+ - BUG/MINOR: ssl: Do not look for key in extra files if already in pem
+ - BUG/MINOR: quic: Missing acknowledgments for trailing packets
+ - BUG/MINOR: http-ana: Set method to HTTP_METH_OTHER when an HTTP txn is created
+ - BUG/MINOR: http-fetch: Use integer value when possible in "method" sample fetch
+ - MINOR: freq_ctr: Add a function to get events excess over the current period
+ - BUG/MINOR: stream: only free the req/res captures when set
+ - CLEANUP: pool/tree-wide: remove suffix "_pool" from certain pool names
+ - MEDIUM: debug: improve DEBUG_MEM_STATS to also report pool alloc/free
+ - BUG/MINOR: quic: Wrong reuse of fulfilled dgram RX buffer
+ - BUG/MAJOR: quic: Big RX dgrams leak when fulfilling a buffer
+ - BUG/MAJOR: quic: Big RX dgrams leak with POST requests
+ - BUILD: quic+h3: 32-bit compilation errors fixes
+ - MEDIUM: bwlim: Add support of bandwith limitation at the stream level
+
+2022/05/31 : 2.7-dev0
+ - MINOR: version: it's development again
+
+2022/05/31 : 2.6.0
+ - DOC: Fix formatting in configuration.txt to fix dconv
+ - CLEANUP: tcpcheck: Remove useless test on the stream-connector in tcpcheck_main
+ - CLEANUP: muxes: Consider stream's sd as defined in .show_fd callback functions
+ - MINOR: quic: Ignore out of packet padding.
+ - CLEANUP: quic: Useless QUIC_CONN_TX_BUF_SZ definition
+ - CLEANUP: quic: No more used handshake output buffer
+ - MINOR: quic: QUIC transport parameters split.
+ - MINOR: quic: Transport parameters dump
+ - DOC: quic: Update documentation for QUIC Retry
+ - MINOR: quic: Tunable "max_idle_timeout" transport parameter
+ - MINOR: quic: Tunable "initial_max_streams_bidi" transport parameter
+ - MINOR: quic: Clarifications about transport parameters value
+ - MINOIR: quic_stats: add QUIC connection errors counters
+ - BUG/MINOR: quic: Largest RX packet numbers mixing
+ - MINOR: quic_stats: Add transport new counters (lost, stateless reset, drop)
+ - DOC: quic: Documentation update for QUIC
+ - MINOR: quic: Connection TX buffer setting renaming.
+ - MINOR: h3: Add a statistics module for h3
+ - MINOR: quic: Send STOP_SENDING frames if mux is released
+ - MINOR: quic: Do not drop packets with RESET_STREAM frames
+ - BUG/MINOR: qpack: fix buffer API usage on prefix integer encoding
+ - BUG/MINOR: qpack: support bigger prefix-integer encoding
+ - BUG/MINOR: h3: do not report bug on unknown method
+ - SCRIPTS: add make-releases-json to recreate a releases.json file in download dirs
+ - SCRIPTS: make publish-release try to launch make-releases-json
+ - MINOR: htx: add an unchecked version of htx_get_head_blk()
+ - BUILD: htx: use the unchecked version of htx_get_head_blk() where needed
+ - BUILD: quic: use inttypes.h instead of stdint.h
+ - DOC: internal: remove totally outdated diagrams
+ - DOC: remove the outdated ROADMAP file
+ - DOC: add maintainers for QUIC and HTTP/3
+ - MINOR: h3: define h3 trace module
+ - MINOR: h3: add traces on frame recv
+ - MINOR: h3: add traces on frame send
+ - MINOR: h3: add traces on h3s init/end
+ - EXAMPLES: remove completely outdated acl-content-sw.cfg
+ - BUILD: makefile: reorder objects by build time
+ - DOC: fix a few spelling mistakes in the docs
+ - BUG/MEDIUM: peers/cli: fix "show peers" crash
+ - CLEANUP: peers/cli: stop misusing the appctx local variable
+ - CLEANUP: peers/cli: make peers_dump_peer() take an appctx instead of an stconn
+ - BUG/MINOR: peers: set the proxy's name to the peers section name
+ - MINOR: server: indicate when no address was expected for a server
+ - BUG/MINOR: peers: detect and warn on init_addr/resolvers/check/agent-check
+ - DOC: peers: indicate that some server settings are not usable
+ - DOC: peers: clarify when entry expiration date is renewed.
+ - DOC: peers: fix port number and addresses on new peers section format
+ - DOC: gpc/gpt: add commments of gpc/gpt array definitions on stick tables.
+ - DOC: install: update supported OpenSSL versions in the INSTALL doc
+ - MINOR: ncbuf: adjust ncb_data with NCBUF_NULL
+ - BUG/MINOR: h3: fix frame demuxing
+ - BUG/MEDIUM: h3: fix H3_EXCESSIVE_LOAD when receiving H3 frame header only
+ - BUG/MINOR: quic: Fix QUIC_EV_CONN_PRSAFRM event traces
+ - CLEANUP: quic: remove useless check on local UNI stream reception
+ - BUG/MINOR: qpack: do not consider empty enc/dec stream as error
+ - DOC: intro: adjust the numbering of paragrams to keep the output ordered
+ - MINOR: version: mention that it's LTS now.
+
+2022/05/27 : 2.6-dev12
+ - CLEANUP: tools: Clean up non-QUIC error message handling in str2sa_range()
+ - BUG/MEDIUM: tools: Fix `inet_ntop` usage in sa2str
+ - CLEANUP: tools: Crash if inet_ntop fails due to ENOSPC in sa2str
+ - BUG/MEDIUM: mux-quic: adjust buggy proxy closing support
+ - Revert "MINOR: quic: activate QUIC traces at compilation"
+ - Revert "MINOR: mux-quic: activate qmux traces on stdout via macro"
+ - CLEANUP: init: address a coverity warning about possible multiply overflow
+ - BUG/MEDIUM: http: Properly reject non-HTTP/1.x protocols
+ - MEDIUM: h1: enlarge the scope of accepted version chars with accept-invalid-http-request
+ - BUG/MEDIUM: resolvers: Don't defer resolutions release in deinit function
+ - BUG/MEDIUM: peers: fix segfault using multiple bind on peers sections
+ - BUG/MEDIUM: peers: prevent unitialized multiple listeners on peers section
+ - BUG/MINOR: task: Don't defer tasks release when HAProxy is stopping
+ - MINOR: h3: mark ncbuf as const on h3_b_dup
+ - MINOR: mux-quic: do not alloc quic_stream_desc for uni remote stream
+ - MINOR: mux-quic: delay cs_endpoint allocation
+ - MINOR: mux-quic: add traces in qc_recv()
+ - MINOR: mux-quic: adjust return value of decode_qcs
+ - CLEANUP: h3: rename struct h3 -> h3c
+ - CLEANUP: h3: rename uni stream type constants
+ - BUG/MINOR: h3: prevent overflow when parsing SETTINGS
+ - MINOR: h3: refactor h3_control_send()
+ - MINOR: quic: support CONNECTION_CLOSE_APP emission
+ - MINOR: mux-quic: disable read on CONNECTION_CLOSE emission
+ - MINOR: h3: reject too big frames
+ - MINOR: mux-quic: emit STREAM_STATE_ERROR in qcc_recv
+ - BUG/MINOR: mux-quic: refactor uni streams TX/send H3 SETTINGS
+ - MINOR: h3/qpack: use qcs as type in decode callbacks
+ - MINOR: h3: define stream type
+ - MINOR: h3: refactor uni streams initialization
+ - MINOR: h3: check if frame is valid for stream type
+ - MINOR: h3: define non-h3 generic parsing function
+ - MEDIUM: quic: refactor uni streams RX
+ - CLEANUP: h3: remove h3 uni tasklet
+ - MINOR: h3: abort read on unknown uni stream
+ - MINOR: h3: refactor SETTINGS parsing/error reporting
+ - Revert "BUG/MINOR: task: Don't defer tasks release when HAProxy is stopping"
+ - DOC: configuration: add a warning for @system-ca on bind
+ - CLEANUP: init: address another coverity warning about a possible multiply overflow
+ - BUG/MINOR: ssl/lua: use correctly cert_ext in CertCache.set()
+ - BUG/MEDIUM: sample: Fix adjusting size in word converter
+ - REGTESTS: Do not use REQUIRE_VERSION for HAProxy 2.5+ (2)
+ - CLEANUP: conn_stream: remove unneeded exclusion of RX_WAIT_EP from RXBLK_ANY
+ - CLEANUP: conn_stream: rename the cs_endpoint's context to "conn"
+ - MINOR: conn_stream: add new sets of functions to set/get endpoint flags
+ - DEV: coccinelle: add cs_endp_flags.cocci
+ - CLEANUP: conn_stream: apply cs_endp_flags.cocci tree-wide
+ - DEV: coccinelle: add endp_flags.cocci
+ - CLEANUP: conn_stream: apply endp_flags.cocci tree-wide
+ - CLEANUP: conn_stream: rename the stream endpoint flags CS_EP_* to SE_FL_*
+ - CLEANUP: conn_stream: rename the cs_endpoint's target to "se"
+ - CLEANUP: conn_stream: rename cs_endpoint to sedesc (stream endpoint descriptor)
+ - CLEANUP: applet: rename the sedesc pointer from "endp" to "sedesc"
+ - CLEANUP: conn_stream: rename the conn_stream's endp to sedesc
+ - CLEANUP: conn_stream: rename cs_app_* to sc_app_*
+ - CLEANUP: conn_stream: tree-wide rename to stconn (stream connector)
+ - CLEANUP: mux-h1: add and use h1s_sc() to retrieve the stream connector
+ - CLEANUP: mux-h2: add and use h2s_sc() to retrieve the stream connector
+ - CLEANUP: mux-fcgi: add and use fcgi_strm_sc() to retrieve the stream connector
+ - CLEANUP: mux-pt: add and use pt_sc() to retrieve the stream connector
+ - CLEANUP: stdesc: rename the stream connector ->cs field to ->sc
+ - CLEANUP: stream: rename "csf" and "csb" to "scf" and "scb"
+ - CLEANUP: stconn: tree-wide rename stream connector flags CS_FL_* to SC_FL_*
+ - CLEANUP: stconn: tree-wide rename stconn states CS_ST/SB_* to SC_ST/SB_*
+ - MINOR: check: export wake_srv_chk()
+ - MINOR: conn_stream: test the various ops functions before calling them
+ - MEDIUM: stconn: merge the app_ops and the data_cb fields
+ - MINOR: applet: add new wrappers to put chk/blk/str/chr to channel from appctx
+ - CLEANUP: applet: use applet_put*() everywhere possible
+ - CLEANUP: stconn: rename cs_{i,o}{b,c} to sc_{i,o}{b,c}
+ - CLEANUP: stconn: rename cs_{check,strm,strm_task} to sc_strm_*
+ - CLEANUP: stconn: rename cs_conn() to sc_conn()
+ - CLEANUP: stconn: rename cs_mux() to sc_mux_strm()
+ - CLEANUP: stconn: rename cs_conn_mux() to sc_mux_ops()
+ - CLEANUP: stconn: rename cs_appctx() to sc_appctx()
+ - CLEANUP: stconn: rename __cs_endp_target() to __sc_endp()
+ - CLEANUP: stconn: rename cs_get_data_name() to sc_get_data_name()
+ - CLEANUP: stconn: rename cs_conn_*() to sc_conn_*()
+ - CLEANUP: stconn: rename cs_conn_get_first() to conn_get_first_sc()
+ - CLEANUP: stconn: rename cs_ep_set_error() to se_fl_set_error()
+ - CLEANUP: stconn: make a few functions take a const argument
+ - CLEANUP: stconn: use a single function to know if SC may send to SE
+ - MINOR: stconn: consider CF_SHUTW for sc_is_send_allowed()
+ - MINOR: stconn: remove calls to cs_done_get()
+ - MEDIUM: stconn: always rely on CF_SHUTR in addition to cs_rx_blocked()
+ - MEDIUM: stconn: remove SE_FL_RXBLK_SHUT
+ - MINOR: stconn: rename SE_FL_RXBLK_CONN to SE_FL_APPLET_NEED_CONN
+ - MEDIUM: stconn: take SE_FL_APPLET_NEED_CONN out of the RXBLK_ANY flags
+ - CLEANUP: stconn: rename cs_rx_room_{blk,rdy} to sc_{need,have}_room()
+ - CLEANUP: stconn: rename cs_rx_chan_{blk,rdy} to sc_{wont,will}_read()
+ - CLEANUP: stconn: rename cs_rx_buff_{blk,rdy} to sc_{need,have}_buff()
+ - MINOR: stconn: start to rename cs_rx_endp_{more,done}() to se_have_{no_,}more_data()
+ - MINOR: stconn: add sc_is_recv_allowed() to check for ability to receive
+ - CLEANUP: stconn: rename SE_FL_RX_WAIT_EP to SE_FL_HAVE_NO_DATA
+ - MEDIUM: stconn: move the RXBLK flags to the stream connector
+ - CLEANUP: stconn: rename SE_FL_WANT_GET to SE_FL_WILL_CONSUME
+ - CLEANUP: stconn: remove cs_tx_blocked() and cs_tx_endp_ready()
+ - CLEANUP: stconn: rename cs_{want,stop}_get() to se_{will,wont}_consume()
+ - CLEANUP: stconn: rename cs_cant_get() to se_need_more_data()
+ - CLEANUP: stconn: rename cs_{new,create,free,destroy}_* to sc_*
+ - CLEANUP: stconn: rename remaining management functions from cs_* to sc_*
+ - CLEANUP: stconn: rename cs{,_get}_{src,dst} to sc_*
+ - CLEANUP: stconn: rename cs_{shut,chk}* to sc_*
+ - CLEANUP: stconn: rename final state manipulation functions from cs_* to sc_*
+ - CLEANUP: quic: drop the name "conn_stream" from the pool variable names
+ - REORG: rename cs_utils.h to sc_strm.h
+ - REORG: stconn: rename conn_stream.{c,h} to stconn.{c,h}
+ - CLEANUP: muxes: rename "get_first_cs" to "get_first_sc"
+ - DEV: flags: use "sc" for stream conns instead of "cs"
+ - CLEANUP: check: rename all occurrences of stconn "cs" to "sc"
+ - CLEANUP: connection: rename all occurrences of stconn "cs" to "sc"
+ - CLEANUP: stconn: rename all occurrences of stconn "cs" to "sc"
+ - CLEANUP: quic/h3: rename all occurrences of stconn "cs" to "sc"
+ - CLEANUP: stream: rename all occurrences of stconn "cs" to "sc"
+ - CLEANUP: promex: rename all occurrences of stconn "cs" to "sc"
+ - CLEANUP: stats: rename all occurrences of stconn "cs" to "sc"
+ - CLEANUP: cli: rename all occurrences of stconn "cs" to "sc"
+ - CLEANUP: applet: rename all occurrences of stconn "cs" to "sc"
+ - CLEANUP: cache: rename all occurrences of stconn "cs" to "sc"
+ - CLEANUP: dns: rename all occurrences of stconn "cs" to "sc"
+ - CLEANUP: spoe: rename all occurrences of stconn "cs" to "sc"
+ - CLEANUP: hlua: rename all occurrences of stconn "cs" to "sc"
+ - CLEANUP: log-forward: rename all occurrences of stconn "cs" to "sc"
+ - CLEANUP: http-client: rename all occurrences of stconn "cs" to "sc"
+ - CLEANUP: mux-fcgi: rename all occurrences of stconn "cs" to "sc"
+ - CLEANUP: mux-h1: rename all occurrences of stconn "cs" to "sc"
+ - CLEANUP: mux-h2: rename all occurrences of stconn "cs" to "sc"
+ - CLEANUP: mux-pt: rename all occurrences of stconn "cs" to "sc"
+ - CLEANUP: peers: rename all occurrences of stconn "cs" to "sc"
+ - CLEANUP: sink: rename all occurrences of stconn "cs" to "sc"
+ - CLEANUP: sslsock: remove only occurrence of local variable "cs"
+ - CLEANUP: applet: rename appctx_cs() to appctx_sc()
+ - CLEANUP: stream: rename stream_upgrade_from_cs() to stream_upgrade_from_sc()
+ - CLEANUP: obj_type: rename OBJ_TYPE_CS to OBJ_TYPE_SC
+ - CLEANUP: stconn: replace a few remaining occurrences of CS in comments or traces
+ - DOC: internal: update the muxes doc to mention the stconn
+ - CLEANUP: mux-quic: rename the "endp" field to "sd"
+ - CLEANUP: mux-h1: rename the "endp" field to "sd"
+ - CLEANUP: mux-h2: rename the "endp" field to "sd"
+ - CLEANUP: mux-fcgi: rename the "endp" field to "sd"
+ - CLEANUP: mux-pt: rename the "endp" field to "sd"
+ - CLEANUP: stconn: rename a few "endp" arguments and variables to "sd"
+ - MINOR: stconn: turn SE_FL_WILL_CONSUME to SE_FL_WONT_CONSUME
+ - CLEANUP: stream: remove unneeded test on appctx during initialization
+ - CLEANUP: stconn: remove the new unneeded SE_FL_APP_MASK
+ - DEV: flags: fix "siet" shortcut name
+ - DEV: flags: rename the "endp" shortcut to "sd" for "stream descriptor"
+ - DEV: flags: reorder a few SC/SE flags
+ - DOC: internal: add a description of the stream connectors and descriptors
+
+2022/05/20 : 2.6-dev11
+ - CI: determine actual LibreSSL version dynamically
+ - BUG/MEDIUM: ncbuf: fix null buffer usage
+ - MINOR: ncbuf: fix warnings for testing build
+ - MEDIUM: http-ana: Add a proxy option to restrict chars in request header names
+ - MEDIUM: ssl: Delay random generator initialization after config parsing
+ - MINOR: ssl: Add 'ssl-propquery' global option
+ - MINOR: ssl: Add 'ssl-provider' global option
+ - CLEANUP: Add missing header to ssl_utils.c
+ - CLEANUP: Add missing header to hlua_fcn.c
+ - CLEANUP: Remove unused function hlua_get_top_error_string
+ - BUILD: fix build warning on solaris based systems with __maybe_unused.
+ - MINOR: tools: add get_exec_path implementation for solaris based systems.
+ - BUG/MINOR: ssl: Fix crash when no private key is found in pem
+ - CLEANUP: conn-stream: Remove cs_applet_shut declaration from header file
+ - MINOR: applet: Prepare appctx to own the session on frontend side
+ - MINOR: applet: Let the frontend appctx release the session
+ - MINOR: applet: Change return value for .init callback function
+ - MINOR: stream: Export stream_free()
+ - MINOR: applet: Add appctx_init() helper fnuction
+ - MINOR: applet: Add a function to finalize frontend appctx startup
+ - MINOR: applet: Add function to release appctx on error during init stage
+ - MEDIUM: dns: Refactor dns appctx creation
+ - MEDIUM: spoe: Refactor SPOE appctx creation
+ - MEDIUM: lua: Refactor cosocket appctx creation
+ - MEDIUM: httpclient: Refactor http-client appctx creation
+ - MINOR: sink: Add a ref to sink in the sink_forward_target structure
+ - MEDIUM: sink: Refactor sink forwarder appctx creation
+ - MINOR: peers: Add a ref to peers section in the peer structure
+ - MEDIUM: peers: Refactor peer appctx creation
+ - MINOR: applet: Add API to start applet on a thread subset
+ - MEDIUM: applet: Add support for async appctx startup on a thread subset
+ - MINOR: peers: Track number of applets run by thread
+ - MEDIUM: peers: Balance applets across threads
+ - MINOR: conn-stream/applet: Stop setting appctx as the endpoint context
+ - CLEANUP: proxy: Remove dead code when parsing "http-restrict-req-hdr-names" option
+ - REGTESTS: abortonclose: Fix some race conditions
+ - MINOR: ssl: Add 'ssl-provider-path' global option
+ - CLEANUP: http_ana: Make use of the return value of stream_generate_unique_id()
+ - BUG/MINOR: spoe: Fix error handling in spoe_init_appctx()
+ - CLEANUP: peers: Remove unreachable code in peer_session_create()
+ - CLEANUP: httpclient: Remove useless test on ss_dst in httpclient_applet_init()
+ - BUG/MEDIUM: quic: fix Rx buffering
+ - OPTIM: quic: realign empty Rx buffer
+ - BUG/MINOR: ncbuf: fix ncb_is_empty()
+ - MINOR: ncbuf: refactor ncb_advance()
+ - BUG/MINOR: mux-quic: update session's idle delay before stream creation
+ - MINOR: h3: do not wait a complete frame for demuxing
+ - MINOR: h3: flag demux as full on HTX full
+ - MEDIUM: mux-quic: implement recv on io-cb
+ - MINOR: mux-quic: remove qcc_decode_qcs() call in XPRT
+ - MINOR: mux-quic: reorganize flow-control frames emission
+ - MINOR: mux-quic: implement MAX_STREAM_DATA emission
+ - MINOR: mux-quic: implement MAX_DATA emission
+ - BUG/MINOR: mux-quic: support nul buffer with qc_free_ncbuf()
+ - MINOR: mux-quic: free RX buf if empty
+ - BUG/MEDIUM: config: Reset outline buffer size on realloc error in readcfgfile()
+ - BUG/MINOR: check: Reinit the buffer wait list at the end of a check
+ - MEDIUM: check: No longer shutdown the connection in .wake callback function
+ - REORG: check: Rename and export I/O callback function
+ - MEDIUM: check: Use the CS to handle subscriptions for read/write events
+ - BUG/MINOR: quic: break for error on sendto
+ - MINOR: quic: abort on unlisted errno on sendto()
+ - MINOR: quic: detect EBADF on sendto()
+ - BUG/MEDIUM: quic: fix initialization for local/remote TPs
+ - CLEANUP: quic: adjust comment/coding style for TPs init
+ - BUG/MINOR: cfgparse: abort earlier in case of allocation error
+ - MINOR: quic: Dump initial derived secrets
+ - MINOR: quic_tls: Add quic_tls_derive_retry_token_secret()
+ - MINOR: quic_tls: Add quic_tls_decrypt2() implementation
+ - MINOR: quic: Retry implementation
+ - MINOR: cfgparse: Update for "cluster-secret" keyword for QUIC Retry
+ - MINOR: quic: Move quic_lstnr_dgram_dispatch() out of xprt_quic.c
+ - BUILD: stats: Missing headers inclusions from stats.h
+ - MINOR: quic_stats: Add a new stats module for QUIC
+ - MINOR: quic: Attach proxy QUIC stats counters to the QUIC connection
+ - BUG/MINOR: quic: Fix potential memory leak during QUIC connection allocations
+ - MINOR: quic: QUIC stats counters handling
+ - MINOR: quic: Add tune.quic.retry-threshold keyword
+ - MINOR: quic: Dynamic Retry implementation
+ - MINOR: quic/mux-quic: define CONNECTION_CLOSE send API
+ - MINOR: mux-quic: emit FLOW_CONTROL_ERROR
+ - MINOR: mux-quic: emit STREAM_LIMIT_ERROR
+ - MINOR: mux-quic: close connection on error if different data at offset
+ - BUG/MINOR: peers: fix error reporting of "bind" lines
+ - CLEANUP: config: improve address parser error report for unmatched protocols
+ - CLEANUP: config: provide cleare hints about unsupported QUIC addresses
+ - MINOR: protocol: replace ctrl_type with xprt_type and clarify it
+ - MINOR: listener: provide a function to process all of a bind_conf's arguments
+ - MINOR: config: use the new bind_parse_args_list() to parse a "bind" line
+ - CLEANUP: listener: add a comment about what the BC_SSL_O_* flags are for
+ - MINOR: listener: add a new "options" entry in bind_conf
+ - CLEANUP: listener: replace all uses of bind_conf->is_ssl with BC_O_USE_SSL
+ - CLEANUP: listener: replace bind_conf->generate_cers with BC_O_GENERATE_CERTS
+ - CLEANUP: listener: replace bind_conf->quic_force_retry with BC_O_QUIC_FORCE_RETRY
+ - CLEANUP: listener: store stream vs dgram at the bind_conf level
+ - MINOR: listener: detect stream vs dgram conflict during parsing
+ - MINOR: listener: set the QUIC xprt layer immediately after parsing the args
+ - MINOR: listener/ssl: set the SSL xprt layer only once the whole config is known
+ - MINOR: connection: add flag MX_FL_FRAMED to mark muxes relying on framed xprt
+ - MINOR: config: detect and report mux and transport incompatibilities
+ - MINOR: listener: automatically select a QUIC mux with a QUIC transport
+ - MINOR: listener: automatically enable SSL if a QUIC transport is found
+ - BUG/MINOR: quic: Fixe a typo in qc_idle_timer_task()
+ - BUG/MINOR: quic: Missing <conn_opening> stats counter decrementation
+ - BUILD/MINOR: cpuset fix build for FreeBSD 13.1
+ - CI: determine actual OpenSSL version dynamically
+
+2022/05/14 : 2.6-dev10
+ - MINOR: ssl: ignore dotfiles when loading a dir w/ ca-file
+ - MEDIUM: ssl: ignore dotfiles when loading a dir w/ crt
+ - BUG/MINOR: ssl: Fix typos in crl-file related CLI commands
+ - MINOR: compiler: add a new macro to set an attribute on an enum when possible
+ - BUILD: stats: conditionally mark obsolete stats states as deprecated
+ - BUILD: ssl: work around bogus warning in gcc 12's -Wformat-truncation
+ - BUILD: debug: work around gcc-12 excessive -Warray-bounds warnings
+ - BUILD: listener: shut report of possible null-deref in listener_accept()
+ - BUG/MEDIUM: ssl: fix the gcc-12 broken fix :-(
+ - DOC: install: update gcc version requirements
+ - BUILD: makefile: add -Wfatal-errors to the default flags
+ - BUG/MINOR: server: Make SRV_STATE_LINE_MAXLEN value from 512 to 2kB (2000 bytes).
+ - BUG/MAJOR: dns: multi-thread concurrency issue on UDP socket
+ - BUG/MINOR: mux-h2: mark the stream as open before processing it not after
+ - MINOR: mux-h2: report a trace event when failing to create a new stream
+ - DOC: configuration: add the httpclient keywords to the global keywords index
+ - MINOR: quic: Add a debug counter for sendto() errors
+ - BUG/MINOR: quic: Dropped peer transport parameters
+ - BUG/MINOR: quic: Wrong unit for ack delay for incoming ACK frames
+ - MINOR: quic: Congestion controller event trace fix (loss)
+ - MINOR: quic: Add correct ack delay values to ACK frames
+ - MINOR: config: Add "cluster-secret" new global keyword
+ - MINOR: quic-tls: Add quic_hkdf_extract_and_expand() for HKDF
+ - MINOR: quic: new_quic_cid() code moving
+ - MINOR: quic: Initialize stateless reset tokens with HKDF secrets
+ - MINOR: qc_new_conn() rework for stateless reset
+ - MINOR: quic: Stateless reset token copy to transport parameters
+ - MINOR: quic: Send stateless reset tokens
+ - MINOR: quic: Short packets always embed a trailing AEAD TAG
+ - CLEANUP: quic: wrong use of eb*entry() macro
+ - CLEANUP: quic: Useless use of pointer for quic_hkdf_extract()
+ - CLEANUP: quic_tls: QUIC_TLS_IV_LEN defined two times
+ - MINOR: ncbuf: define non-contiguous buffer
+ - MINOR: ncbuf: complete API and define block interal abstraction
+ - MINOR: ncbuf: optimize storage for the last gap
+ - MINOR: ncbuf: implement insertion
+ - MINOR: ncbuf: define various insertion modes
+ - MINOR: ncbuf: implement advance
+ - MINOR: ncbuf: write unit tests
+ - BUG/MEDIUM: lua: fix argument handling in data removal functions
+ - DOC/MINOR: fix typos in the lua-api document
+ - BUG/MEDIUM: wdt: don't trigger the watchdog when p is unitialized
+ - MINOR: mux-h1: Add global option accpet payload for any HTTP/1.0 requests
+ - CLEANUP: mux-h1: Fix comments and error messages for global options
+ - MINOR: conn_stream: make cs_set_error() work on the endpoint instead
+ - CLEANUP: mux-h1: always take the endp from the h1s not the cs
+ - CLEANUP: mux-h2: always take the endp from the h2s not the cs
+ - CLEANUP: mux-pt: always take the endp from the context not the cs
+ - CLEANUP: mux-fcgi: always take the endp from the fstrm not the cs
+ - CLEANUP: mux-quic: always take the endp from the qcs not the cs
+ - CLEANUP: applet: use the appctx's endp instead of cs->endp
+ - MINOR: conn_stream: add a pointer back to the cs from the endpoint
+ - MINOR: mux-h1: remove the now unneeded h1s->cs
+ - MINOR: mux-h2: make sure any h2s always has an endpoint
+ - MINOR: mux-h2: remove the now unneeded conn_stream from the h2s
+ - MINOR: mux-fcgi: make sure any stream always has an endpoint
+ - MINOR: mux-fcgi: remove the now unneeded conn_stream from the fcgi_strm
+ - MINOR: mux-quic: remove the now unneeded conn_stream from the qcs
+ - MINOR: mux-pt: remove the now unneeded conn_stream from the context
+ - CLEANUP: muxes: make mux->attach/detach take a conn_stream endpoint
+ - MINOR: applet: replace cs_applet_shut() with appctx_shut()
+ - MINOR: applet: add appctx_strm() and appctx_cs() to access common fields
+ - CLEANUP: applet: remove the unneeded appctx->owner
+ - CLEANUP: conn_stream: merge cs_new_from_{mux,applet} into cs_new_from_endp()
+ - MINOR: ext-check: indicate the transport and protocol of a server
+ - BUG/MEDIUM: mux-quic: fix a thinko in the latest cs/endpoint cleanup
+ - MINOR: tools: improve error message accuracy in str2sa_range
+ - MINOR: config: make sure never to mix dgram and stream protocols on a bind line
+ - BUG/MINOR: ncbuf: fix coverity warning on uninit sz_data
+ - MINOR: xprt_quic: adjust flow-control according to bufsize
+ - MEDIUM: mux-quic/h3/hq-interop: use ncbuf for bidir streams
+ - MEDIUM: mux-quic/h3/qpack: use ncbuf for uni streams
+ - CLEANUP: mux-quic: remove unused fields for Rx
+ - CLEANUP: quic: remove unused quic_rx_strm_frm
+
+2022/05/08 : 2.6-dev9
+ - MINOR: mux-quic: support full request channel buffer
+ - BUG/MINOR: h3: fix parsing of unknown frame type with null length
+ - CLEANUP: backend: make alloc_{bind,dst}_address() idempotent
+ - MEDIUM: stream: remove the confusing SF_ADDR_SET flag
+ - MINOR: conn_stream: remove the now unused CS_FL_ADDR_*_SET flags
+ - CLEANUP: protocol: make sure the connect_* functions always receive a dst
+ - MINOR: connection: get rid of the CO_FL_ADDR_*_SET flags
+ - MINOR: session: get rid of the now unused SESS_FL_ADDR_*_SET flags
+ - CLEANUP: mux: Useless xprt_quic-t.h inclusion
+ - MINOR: quic: Make the quic_conn be aware of the number of streams
+ - BUG/MINOR: quic: Dropped retransmitted STREAM frames
+ - BUG/MINOR: mux_quic: Dropped packet upon retransmission for closed streams
+ - MEDIUM: httpclient: remove url2sa to use a more flexible parser
+ - MEDIUM: httpclient: http-request rules for resolving
+ - MEDIUM: httpclient: allow address and port change for resolving
+ - CLEANUP: httpclient: remove the comment about resolving
+ - MINOR: httpclient: handle unix and other socket types in dst
+ - MINOR: httpclient: rename dash by dot in global option
+ - MINOR: init: exit() after pre-check upon error
+ - MINOR: httpclient: cleanup the error handling in init
+ - MEDIUM: httpclient: hard-error when SSL is configured
+ - MINOR: httpclient: allow to configure the ca-file
+ - MINOR: httpclient: configure the resolvers section to use
+ - MINOR: httpclient: allow ipv4 or ipv6 preference for resolving
+ - DOC: configuration: httpclient global option
+ - MINOR: conn-stream: Add mask from flags set by endpoint or app layer
+ - BUG/MEDIUM: conn-stream: Only keep app layer flags of the endpoint on reset
+ - BUG/MEDIUM: mux-fcgi: Be sure to never set EOM flag on an empty HTX message
+ - BUG/MEDIUM: mux-h1: Be able to handle trailers when C-L header was specified
+ - DOC: config: Update doc for PR/PH session states to warn about rewrite failures
+ - MINOR: resolvers: cleanup alert/warning in parse-resolve-conf
+ - MINOR: resolvers: move the resolv.conf parser in parse_resolv_conf()
+ - MINOR: resolvers: resolvers_new() create a resolvers with default values
+ - BUILD: debug: unify the definition of ha_backtrace_to_stderr()
+ - BUG/MINOR: tcp/http: release the expr of set-{src,dst}[-port]
+ - MEDIUM: resolvers: create a "default" resolvers section at startup
+ - DOC: resolvers: default resolvers section
+ - BUG/MINOR: startup: usage() when no -cc arguments
+ - BUG/MEDIUM: resolvers: make "show resolvers" properly yield
+ - BUG/MEDIUM: cli: make "show cli sockets" really yield
+ - BUG/MINOR: proxy/cli: don't enumerate internal proxies on "show backend"
+ - BUG/MINOR: map/cli: protect the backref list during "show map" errors
+ - BUG/MINOR: map/cli: make sure patterns don't vanish under "show map"'s init
+ - BUG/MINOR: ssl/cli: fix "show ssl ca-file/crl-file" not to mix cli+ssl contexts
+ - BUG/MINOR: ssl/cli: fix "show ssl ca-file <name>" not to mix cli+ssl contexts
+ - BUG/MINOR: ssl/cli: fix "show ssl crl-file" not to mix cli+ssl contexts
+ - BUG/MINOR: ssl/cli: fix "show ssl cert" not to mix cli+ssl contexts
+ - CLEANUP: ssl/cli: do not loop on unknown states in "add ssl crt-list" handler
+ - MINOR: applet: reserve some generic storage in the applet's context
+ - CLEANUP: applet: make appctx_new() initialize the whole appctx
+ - CLEANUP: stream/cli: take the "show sess" context definition out of the appctx
+ - CLEANUP: stream/cli: stop using appctx->st2 for the dump state
+ - CLEANUP: stream/cli: remove the unneeded init state from "show sess"
+ - CLEANUP: stream/cli: remove the unneeded STATE_FIN state from "show sess"
+ - CLEANUP: stream/cli: remove the now unneeded dump state from "show sess"
+ - CLEANUP: proxy/cli: take the "show errors" context definition out of the appctx
+ - CLEANUP: stick-table/cli: take the "show table" context definition out of the appctx
+ - CLEANUP: stick-table/cli: stop using appctx->st2 for the dump state
+ - CLEANUP: stick-table/cli: remove the unneeded STATE_INIT for "show table"
+ - CLEANUP: map/cli: take the "show map" context definition out of the appctx
+ - CLEANUP: map/cli: stop using cli.i0/i1 to store the generation numbers
+ - CLEANUP: map/cli: stop using appctx->st2 for the dump state
+ - CLEANUP: map/cli: always detach the backref from the list after "show map"
+ - CLEANUP: peers/cli: take the "show peers" context definition out of the appctx
+ - CLEANUP: peers/cli: stop using appctx->st2 for the dump state
+ - CLEANUP: peers/cli: remove unneeded state STATE_INIT
+ - CLEANUP: cli: initialize the whole appctx->ctx, not just the stats part
+ - CLEANUP: promex: make the applet use its own context
+ - CLEANUP: promex: stop using appctx->st2
+ - CLEANUP: stats/cli: take the "show stat" context definition out of the appctx
+ - CLEANUP: stats/cli: stop using appctx->st2
+ - CLEANUP: hlua/cli: take the hlua_cli context definition out of the appctx
+ - CLEANUP: ssl/cli: use a local context for "show cafile"
+ - CLEANUP: ssl/cli: use a local context for "show crlfile"
+ - CLEANUP: ssl/cli: use a local context for "show ssl cert"
+ - CLEANUP: ssl/cli: use a local context for "commit ssl cert"
+ - CLEANUP: ssl/cli: stop using appctx->st2 for "commit ssl cert"
+ - CLEANUP: ssl/cli: use a local context for "set ssl cert"
+ - CLEANUP: ssl/cli: use a local context for "set ssl cafile"
+ - CLEANUP: ssl/cli: use a local context for "set ssl crlfile"
+ - CLEANUP: ssl/cli: use a local context for "commit ssl {ca|crl}file"
+ - CLEANUP: ssl/cli: stop using appctx->st2 for "commit ssl ca/crl"
+ - CLEANUP: ssl/cli: stop using ctx.cli.i0/i1/p0 for "show tls-keys"
+ - CLEANUP: ssl/cli: add a new "dump_entries" field to "show_keys_ref"
+ - CLEANUP: ssl/cli: make "show tlskeys" not use appctx->st2 anymore
+ - CLEANUP: ssl/cli: make "show ssl ocsp-response" not use cli.p0 anymore
+ - CLEANUP: ssl/cli: make "{show|dump} ssl crtlist" use its own context
+ - CLEANUP: ssl/cli: make "add ssl crtlist" use its own context
+ - CLEANUP: ssl/cli: make "add ssl crtlist" not use st2 anymore
+ - CLEANUP: dns: stop abusing the sink forwarder's context
+ - CLEANUP: sink: use the generic context to store the forwarder's context
+ - CLEANUP: activity/cli: make "show profiling" not use ctx.cli anymore
+ - CLEANUP: debug/cli: make "debug dev fd" not use ctx.cli anymore
+ - CLEANUP: debug/cli: make "debug dev memstats" not use ctx.cli anymore
+ - CLEANUP: ring: pass the ring watch flags to ring_attach_cli(), not in ctx.cli
+ - CLEANUP: ring/cli: use a locally-defined context instead of using ctx.cli
+ - CLEANUP: resolvers/cli: make "show resolvers" use a locally-defined context
+ - CLEANUP: resolvers/cli: remove the unneeded appctx->st2 from "show resolvers"
+ - CLEANUP: cache/cli: make use of a locally defined context for "show cache"
+ - CLEANUP: proxy/cli: make use of a locally defined context for "show servers"
+ - CLEANUP: proxy/cli: get rid of appctx->st2 in "show servers"
+ - CLEANUP: proxy/cli: make "show backend" only use the generic context
+ - CLEANUP: cli: make "show fd" use its own context
+ - CLEANUP: cli: make "show env" use its own context
+ - CLEANUP: cli: simplify the "show cli sockets" I/O handler
+ - CLEANUP: cli: make "show cli sockets" use its own context
+ - CLEANUP: httpclient/cli: use a locally-defined context instead of ctx.cli
+ - CLEANUP: httpclient: do not use the appctx.ctx anymore
+ - CLEANUP: peers: do not use appctx.ctx anymore
+ - CLEANUP: spoe: do not use appctx.ctx anymore
+ - BUILD: applet: mark the CLI's generic variables as deprecated
+ - BUILD: applet: mark the appctx's st2 variable as deprecated
+ - CLEANUP: cache: take the context out of appctx.ctx
+ - MEDIUM: lua: move the cosocket storage outside of appctx.ctx
+ - MINOR: lua: move the tcp service storage outside of appctx.ctx
+ - MINOR: lua: move the http service context out of appctx.ctx
+ - CLEANUP: cli: move the status print context into its own context
+ - CLEANUP: stats: rename the stats state values an mark the old ones deprecated
+ - DOC: internal: document the new cleaner approach to the appctx
+ - MINOR: tcp: socket translate TCP_KEEPIDLE for macOs equivalent
+ - DOC: fix typo "ant" for "and" in INSTALL
+ - CI: dynamically determine actual version of h2spec
+
+2022/04/30 : 2.6-dev8
+ - BUG/MINOR: quic: fix use-after-free with trace on ACK consume
+ - BUG/MINOR: rules: Forbid captures in defaults section if used by a backend
+ - BUG/MEDIUM: rules: Be able to use captures defined in defaults section
+ - BUG/MINOR: rules: Fix check_capture() function to use the right rule arguments
+ - BUG/MINOR: http-act: make release_http_redir() more robust
+ - BUG/MINOR: sample: add missing use_backend/use-server contexts in smp_resolve_args
+ - MINOR: sample: don't needlessly call c_none() in sample_fetch_as_type()
+ - MINOR: sample: make the bool type cast to bin
+ - MEDIUM: backend: add new "balance hash <expr>" algorithm
+ - MINOR: init: add global setting "fd-hard-limit" to bound system limits
+ - BUILD: pollers: use an initcall to register the pollers
+ - BUILD: xprt: use an initcall to register the transport layers
+ - BUILD: thread: use initcall instead of a constructor
+ - BUILD: http: remove the two unused constructors in rules and ana
+ - CLEANUP: compression: move the default setting of maxzlibmem to defaults
+ - MINOR: tree-wide: always consider EWOULDBLOCK in addition to EAGAIN
+ - BUG/MINOR: connection: "connection:close" header added despite 'close-spread-time'
+ - MINOR: fd: add functions to set O_NONBLOCK and FD_CLOEXEC
+ - CLEANUP: tree-wide: use fd_set_nonblock() and fd_set_cloexec()
+ - CLEANUP: tree-wide: remove 25 occurrences of unneeded fcntl.h
+ - REGTESTS: fix the race conditions in be2dec.vtc ad field.vtc
+ - REGTESTS: webstats: remove unused stats socket in /tmp
+ - MEDIUM: httpclient: disable SSL when the ca-file couldn't be loaded
+ - BUG/MINOR: httpclient/lua: error when the httpclient_start() fails
+ - BUG/MINOR: ssl: free the cafile entries on deinit
+ - BUG/MINOR: ssl: memory leak when trying to load a directory with ca-file
+ - MEDIUM: httpclient: re-enable the verify by default
+ - BUG/MEDIUM: ssl/cli: fix yielding in show_cafile_detail
+ - BUILD: compiler: properly distinguish weak and global symbols
+ - MINOR: connection: Add way to disable active connection closing during soft-stop
+ - BUG/MEDIUM: http-ana: Fix memleak in redirect rules with ignore-empty option
+ - CLEANUP: Destroy `http_err_chunks` members during deinit
+ - BUG/MINOR: resolvers: Fix memory leak in resolvers_deinit()
+ - MINOR: Call deinit_and_exit(0) for `haproxy -vv`
+ - BUILD: fd: disguise the fd_set_nonblock/cloexec result
+ - BUG/MINOR: pools: make sure to also destroy shared pools in pool_destroy_all()
+ - MINOR: ssl: add a new global option "tune.ssl.hard-maxrecord"
+ - CLEANUP: errors: also call deinit_errors_buffers() on deinit()
+ - CLEANUP: chunks: release trash also in deinit
+ - CLEANUP: deinit: release the pre-check callbacks
+ - CLEANUP: deinit: release the config postparsers
+ - CLEANUP: listeners/deinit: release accept queue tasklets on deinit
+ - CLEANUP: connections/deinit: destroy the idle_conns tasks
+ - BUG/MINOR: mux-quic: fix build in release mode
+ - MINOR: mux-quic: adjust comment on emission function
+ - MINOR: mux-quic: remove unused bogus qcc_get_stream()
+ - BUG/MINOR: mux-quic: fix leak if cs alloc failure
+ - MINOR: mux-quic: count local flow-control stream limit on reception
+ - BUG/MINOR: h3: fix incomplete POST requests
+ - BUG/MEDIUM: h3: fix use-after-free on mux Rx buffer wrapping
+ - MINOR: mux-quic: partially copy Rx frame if almost full buf
+ - MINOR: h3: change frame demuxing API
+ - MINOR: mux-quic: add a app-layer context in qcs
+ - MINOR: h3: implement h3 stream context
+ - MINOR: h3: support DATA demux if buffer full
+ - MINOR: quic: decode as much STREAM as possible
+ - MINOR: quic: Improve qc_prep_pkts() flexibility
+ - MINOR: quic: Prepare quic_frame struct duplication
+ - MINOR: quic: Do not retransmit frames from coalesced packets
+ - MINOR: quic: Add traces about TX frame memory releasing
+ - MINOR: quic: process_timer() rework
+ - MEDIUM: quic: New functions for probing rework
+ - MEDIUM: quic: Retransmission functions rework
+ - MEDIUM: quic: qc_requeue_nacked_pkt_tx_frms() rework
+ - MINOR: quic: old data distinction for qc_send_app_pkt()
+ - MINOR: quic: Mark packets as probing with old data
+ - MEDIUM: quic: Mark copies of acknowledged frames as acknowledged
+ - MEDIUM: quic: Enable the new datagram probing process
+ - MINOR: quic: Do not send ACK frames when probing
+ - BUG/MINOR: quic: Wrong returned status by qc_build_frms()
+ - BUG/MINOR: quic: Avoid sending useless PADDING frame
+ - BUG/MINOR: quic: Traces fix about remaining frames upon packet build failure
+ - MINOR: quic: Wake up the mux to probe with new data
+ - BUG/MEDIUM: quic: Possible crash on STREAM frame loss
+ - BUG/MINOR: quic: Missing Initial packet length check
+ - CLEANUP: quic: Rely on the packet length set by qc_lstnr_pkt_rcv()
+ - MINOR: quic: Drop 0-RTT packets if not allowed
+ - BUG/MINOR: httpclient/ssl: use the correct verify constant
+ - BUG/MEDIUM: conn-stream: Don't erase endpoint flags on reset
+ - BUG/MEDIUM: httpclient: Fix loop consuming HTX blocks from the response channel
+ - BUG/MINOR: httpclient: Count metadata in size to transfer via htx_xfer_blks()
+ - MINOR: httpclient: Don't use co_set_data() to decrement output
+ - BUG/MINOR: conn_stream: do not confirm a connection from the frontend path
+ - MEDIUM: quic: do not ACK packet with STREAM if MUX not present
+ - MEDIUM: quic: do not ack packet with invalid STREAM
+ - MINOR: quic: Drop 0-RTT packets without secrets
+ - CLEANUP: quic: Remaining fprintf() debug trace
+ - MINOR: quic: moving code for QUIC loss detection
+ - BUG/MINOR: quic: Missing time threshold multiplifier for loss delay computation
+ - CI: github actions: update LibreSSL to 3.5.2
+ - SCRIPTS: announce-release: add URL of dev packages
+
+2022/04/23 : 2.6-dev7
+ - BUILD: calltrace: fix wrong include when building with TRACE=1
+ - MINOR: ssl: Use DH parameters defined in RFC7919 instead of hard coded ones
+ - MEDIUM: ssl: Disable DHE ciphers by default
+ - BUILD: ssl: Fix compilation with OpenSSL 1.0.2
+ - MINOR: mux-quic: split xfer and STREAM frames build
+ - REORG: quic: use a dedicated module for qc_stream_desc
+ - MINOR: quic-stream: use distinct tree nodes for quic stream and qcs
+ - MINOR: quic-stream: add qc field
+ - MEDIUM: quic: implement multi-buffered Tx streams
+ - MINOR: quic-stream: refactor ack management
+ - MINOR: quic: limit total stream buffers per connection
+ - MINOR: mux-quic: implement immediate send retry
+ - MINOR: cfg-quic: define tune.quic.conn-buf-limit
+ - MINOR: ssl: Add 'show ssl providers' cli command and providers list in -vv option
+ - REGTESTS: ssl: Update error messages that changed with OpenSSLv3.1.0-dev
+ - BUG/MEDIUM: quic: Possible crash with released mux
+ - BUG/MINOR: mux-quic: unsubscribe on release
+ - BUG/MINOR: mux-quic: handle null timeout
+ - BUG/MEDIUM: logs: fix http-client's log srv initialization
+ - BUG/MINOR: mux-quic: remove dead code in qcs_xfer_data()
+ - DEV: stream: Fix conn-streams dump in full stream message
+ - CLEANUP: conn-stream: Rename cs_conn_close() and cs_conn_drain_and_close()
+ - CLEANUP: conn-stream: Rename cs_applet_release()
+ - MINOR: conn-stream: Rely on endpoint shutdown flags to shutdown an applet
+ - BUG/MINOR: cache: Disable cache if applet creation fails
+ - BUG/MINOR: backend: Don't allow to change backend applet
+ - BUG/MEDIUM: conn-stream: Set back CS to RDY state when the appctx is created
+ - MINOR: stream: Don't needlessly detach server endpoint on early client abort
+ - MINOR: conn-stream: Make cs_detach_* private and use cs_destroy() from outside
+ - MINOR: init: add the pre-check callback
+ - MEDIUM: httpclient: change the init sequence
+ - MEDIUM: httpclient/ssl: verify required
+ - MINOR: httpclient/mworker: disable in the master process
+ - MEDIUM: httpclient/ssl: verify is configurable and disabled by default
+ - BUG/MAJOR: connection: Never remove connection from idle lists outside the lock
+ - BUG/MEDIUM: mux-quic: fix stalled POST requets
+ - BUG/MINOR: mux-quic: fix POST with abortonclose
+ - MINOR: task: add a new task_instant_wakeup() function
+ - MEDIUM: queue: use tasklet_instant_wakeup() to wake tasks
+ - DOC: remove my name from the config doc
+
+2022/04/16 : 2.6-dev6
+ - CLEANUP: connection: reduce the with of the mux dump output
+ - CI: Update to actions/checkout@v3
+ - CI: Update to actions/cache@v3
+ - DOC: adjust QUIC instruction in INSTALL
+ - BUG/MINOR: stats: define the description' background color in dark color scheme
+ - BUILD: ssl: add USE_ENGINE and disable the openssl engine by default
+ - BUILD: makefile: pass USE_ENGINE to cflags
+ - BUILD: xprt-quic: replace ERR_func_error_string() with ERR_peek_error_func()
+ - DOC: install: document the fact that SSL engines are not enabled by default
+ - CI: github actions: disable -Wno-deprecated
+ - BUILD: makefile: silence unbearable OpenSSL deprecation warnings
+ - MINOR: sock: check configured limits at the sock layer, not the listener's
+ - MINOR: connection: add a new flag CO_FL_FDLESS on fd-less connections
+ - MINOR: connection: add conn_fd() to retrieve the FD only when it exists
+ - MINOR: stream: only dump connections' FDs when they are valid
+ - MINOR: connection: use conn_fd() when displaying connection errors
+ - MINOR: connection: skip FD-based syscalls for FD-less connections
+ - MEDIUM: connection: panic when calling FD-specific functions on FD-less conns
+ - MINOR: mux-quic: properly set the flags and name fields
+ - MINOR: connection: rearrange conn_get_src/dst to be a bit more extensible
+ - MINOR: protocol: add get_src() and get_dst() at the protocol level
+ - MINOR: quic-sock: provide a pair of get_src/get_dst functions
+ - MEDIUM: ssl: improve retrieval of ssl_sock_ctx and SSL detection
+ - MEDIUM: ssl: stop using conn->xprt_ctx to access the ssl_sock_ctx
+ - MEDIUM: xprt-quic: implement get_ssl_sock_ctx()
+ - MEDIUM: quic: move conn->qc into conn->handle
+ - BUILD: ssl: fix build warning with previous changes to ssl_sock_ctx
+ - BUILD: ssl: add an unchecked version of __conn_get_ssl_sock_ctx()
+ - MINOR: ssl: refine the error testing for fc_err and fc_err_str
+ - BUG/MINOR: sock: do not double-close the accepted socket on the error path
+ - CI: cirrus: switch to FreeBSD-13.0
+ - MINOR: log: add '~' to frontend when the transport layer provides SSL
+ - BUILD/DEBUG: lru: fix printf format in debug code
+ - BUILD: peers: adjust some printf format to silence cppcheck
+ - BUILD/DEBUG: hpack-tbl: fix format string in standalone debug code
+ - BUILD/DEBUG: hpack: use unsigned int in printf format in debug code
+ - BUILD: halog: fix some incorrect signs in printf formats for integers
+ - BUG/MINOR: h3: fix build with DEBUG_H3
+ - BUG/MINOR: mux-h2: do not send GOAWAY if SETTINGS were not sent
+ - BUG/MINOR: cache: do not display expired entries in "show cache"
+ - BUG/MINOR: mux-h1: Don't release unallocated CS on error path
+ - MINOR: applet: Make .init callback more generic
+ - MINOR: conn-stream: Add flags to set the type of the endpoint
+ - MEDIUM: applet: Set the appctx owner during allocation
+ - MAJOR: conn-stream: Invert conn-stream endpoint and its context
+ - REORG: Initialize the conn-stream by hand in cs_init()
+ - MEDIUM: conn-stream: Add an endpoint structure in the conn-stream
+ - MINOR: conn-stream: Move some CS flags to the endpoint
+ - MEDIUM: conn-stream: Be able to pass endpoint to create a conn-stream
+ - MEDIUM: conn-stream: Pre-allocate endpoint to create CS from muxes and applets
+ - REORG: applet: Uninline appctx_new function
+ - MAJOR: conn-stream: Share endpoint struct between the CS and the mux/applet
+ - MEDIUM: conn-stream: Move remaning flags from CS to endpoint
+ - MINOR: mux-pt: Rely on the endpoint instead of the conn-stream when possible
+ - MINOR: conn-stream: Add ISBACK conn-stream flag
+ - MINOR: conn-stream: Add header file with util functions related to conn-streams
+ - MEDIUM: tree-wide: Use CS util functions instead of SI ones
+ - MINOR: stream-int/txn: Move buffer for L7 retries in the HTTP transaction
+ - CLEANUP: http-ana: Remove http_alloc_txn() function
+ - MINOR: stream-int/stream: Move conn_retries counter in the stream
+ - MINOR: stream: Simplify retries counter calculation
+ - MEDIUM: stream-int/conn-stream: Move src/dst addresses in the conn-stream
+ - MINOR: stream-int/conn-stream: Move half-close timeout in the conn-stream
+ - MEDIUM: stream-int/stream: Use connect expiration instead of SI expiration
+ - MINOR: stream-int/conn-stream: Report error to the CS instead of the SI
+ - MEDIUM: conn-stream: Use endpoint error instead of conn-stream error
+ - MINOR: channel: Use conn-streams as channel producer and consumer
+ - MINOR: stream-int: Remove SI_FL_KILL_CON to rely on conn-stream endpoint only
+ - MINOR: mux-h2/mux-fcgi: Fully rely on CS_EP_KILL_CONN
+ - MINOR: stream-int: Remove SI_FL_NOLINGER/NOHALF to rely on CS flags instead
+ - MINOR: stream-int: Remove SI_FL_DONT_WAKE to rely on CS flags instead
+ - MINOR: stream-int: Remove SI_FL_INDEP_STR to rely on CS flags instead
+ - MINOR: stream-int: Remove SI_FL_SRC_ADDR to rely on stream flags instead
+ - CLEANUP: stream-int: Remove unused SI_FL_CLEAN_ABRT flag
+ - MINOR: stream: Only save previous connection state for the server side
+ - MEDIUM: stream-int: Move SI err_type in the stream
+ - MEDIUM: stream-int/conn-stream: Move stream-interface state in the conn-stream
+ - MINOR: stream-int/stream: Move si_retnclose() in the stream scope
+ - MINOR: stream-int/backend: Move si_connect() in the backend scope
+ - MINOR: stream-int/conn-stream: Move si_conn_ready() in the conn-stream scope
+ - MINOR: conn-stream/connection: Move SHR/SHW modes in the connection scope
+ - MEDIUM: conn-stream: Be prepared to fail to attach a cs to a mux
+ - MEDIUM: stream-int/conn-stream: Handle I/O subscriptions in the conn-stream
+ - MINOR: conn-stream: Rename CS functions dedicated to connections
+ - MINOR: stream-int/conn-stream: Move si_shut* and si_chk* in conn-stream scope
+ - MEDIUM: stream-int/conn-stream: Move si_ops in the conn-stream scope
+ - MINOR: applet: Use the CS to register and release applets instead of SI
+ - MINOR: connection: unconst mux's get_fist_cs() callback function
+ - MINOR: stream-int/connection: Move conn_si_send_proxy() in the connection scope
+ - REORG: stream-int: Export si_cs_recv(), si_cs_send() and si_cs_process()
+ - REORG: stream-int: Move si_is_conn_error() in the header file
+ - REORG: conn-stream: Move cs_shut* and cs_chk* in cs_utils
+ - REORG: conn-stream: Move cs_app_ops in conn_stream.c
+ - MINOR: stream-int-conn-stream: Move si_update_* in conn-stream scope
+ - MINOR: stream-int/stream: Move si_update_both in stream scope
+ - MEDIUM: conn-stream/applet: Add a data callback for applets
+ - MINOR: stream-int/conn-stream: Move stream_int_read0() in the conn-stream scope
+ - MINOR: stream-int/conn-stream: Move stream_int_notify() in the conn-stream scope
+ - MINOR: stream-int/conn-stream: Move si_cs_io_cb() in the conn-stream scope
+ - MINOR: stream-int/conn-stream: Move si_sync_recv/send() in conn-stream scope
+ - MINOR: conn-stream: Move si_conn_cb in the conn-stream scope
+ - MINOR: stream-int/conn-stream Move si_is_conn_error() in the conn-stream scope
+ - MINOR: stream-int/conn-stream: Move si_alloc_ibuf() in the conn-stream scope
+ - CLEANUP: stream-int: Remove unused SI functions
+ - MEDIUM: stream-int/conn-stream: Move blocking flags from SI to CS
+ - MEDIUM: stream-int/conn-stream: Move I/O functions to conn-stream
+ - REORG: stream-int/conn-stream: Move remaining functions to conn-stream
+ - MINOR: stream: Use conn-stream to report server error
+ - MINOR: http-ana: Use CS to perform L7 retries
+ - MEDIUM: stream: Don't use the stream-int anymore in process_stream()
+ - MINOR: conn-stream: Remove the stream-interface from the conn-stream
+ - DEV: flags: No longer dump SI flags
+ - CLEANUP: tree-wide: Remove any ref to stream-interfaces
+ - CLEANUP: conn-stream: Don't export internal functions
+ - DOC: conn-stream: Add comments on functions of the new CS api
+ - MEDIUM: check: Use a new conn-stream for each health-check run
+ - CLEANUP: muxes: Remove MX_FL_CLEAN_ABRT flag
+ - MINOR: conn-stream: Use a dedicated function to conditionally remove a CS
+ - CLEANUP: conn-stream: rename cs_register_applet() to cs_applet_create()
+ - MINOR: muxes: Improve show_fd callbacks to dump endpoint flags
+ - MINOR: mux-h1: Rely on the endpoint instead of the conn-stream when possible
+ - BUG/MINOR: quic: Avoid starting the mux if no ALPN sent by the client
+ - BUILD: debug: mark the __start_mem_stats/__stop_mem_stats symbols as weak
+ - BUILD: initcall: mark the __start_i_* symbols as weak, not global
+ - BUG/MINOR: mux-h2: do not use timeout http-keep-alive on backend side
+ - BUG/MINOR: mux-h2: use timeout http-request as a fallback for http-keep-alive
+ - MINOR: muxes: Don't expect to have a mux without connection in destroy callback
+ - MINOR: muxes: Don't handle proto upgrade for muxes not supporting it
+ - MINOR: muxes: Don't expect to call release function with no mux defined
+ - MINOR: conn-stream: Use unsafe functions to get conn/appctx in cs_detach_endp
+ - BUG/MEDIUM: mux-h1: Don't request more room on partial trailers
+ - BUILD: http-client: Avoid dead code when compiled without SSL support
+ - BUG/MINOR: mux-quic: prevent a crash in session_free on mux.destroy
+ - BUG/MINOR: quic-sock: do not double free session on conn init failure
+ - BUG/MINOR: quic: fix return value for error in start
+ - MINOR: quic: emit CONNECTION_CLOSE on app init error
+ - BUILD: sched: workaround crazy and dangerous warning in Clang 14
+ - BUILD: compiler: use a more portable set of asm(".weak") statements
+ - BUG/MEDIUM: stream: do not abort connection setup too early
+ - CLEANUP: extcheck: do not needlessly preset the server's address/port
+ - MINOR: extcheck: fill in the server's UNIX socket address when known
+ - BUG/MEDIUM: connection: Don't crush context pointer location if it is a CS
+ - BUG/MEDIUM: quic: properly clean frames on stream free
+ - BUG/MEDIUM: fcgi-app: Use http_msg flags to know if C-L header can be added
+ - BUG/MEDIUM: compression: Don't forget to update htx_sl and http_msg flags
+ - MINOR: tcp_sample: clarifying samples support per os, for further expansion.
+ - MINOR: tcp_sample: extend support for get_tcp_info to macOs.
+ - SCRIPTS: announce-release: update the doc's URL
+ - DOC: lua: update a few doc URLs
+ - SCRIPTS: announce-release: add shortened links to pending issues
+
+2022/04/09 : 2.6-dev5
+ - DOC: reflect H2 timeout changes
+ - BUG/MEDIUM: mux-fcgi: Properly handle return value of headers/trailers parsing
+ - BUG/MEDIUM: mux-h1: Properly detect full buffer cases during message parsing
+ - BUG/MINOR: log: Initialize the list element when allocating a new log server
+ - BUG/MINOR: samples: add missing context names for sample fetch functions
+ - MINOR: management: add some basic keyword dump infrastructure
+ - MINOR: config: add a function to dump all known config keywords
+ - MINOR: filters: extend flt_dump_kws() to dump to stdout
+ - MINOR: services: extend list_services() to dump to stdout
+ - MINOR: cli: add a new keyword dump function
+ - MINOR: acl: add a function to dump the list of known ACL keywords
+ - MINOR: samples: add a function to list register sample fetch keywords
+ - MINOR: sample: list registered sample converter functions
+ - MINOR: tools: add strordered() to check whether strings are ordered
+ - MINOR: action: add a function to dump the list of actions for a ruleset
+ - MINOR: config: alphanumerically sort config keywords output
+ - MINOR: sample: alphanumerically sort sample & conv keyword dumps
+ - MINOR: acl: alphanumerically sort the ACL dump
+ - MINOR: cli: alphanumerically sort the dump of supported commands
+ - MINOR: filters: alphabetically sort the list of filter names
+ - MINOR: services: alphabetically sort service names
+ - MEDIUM: httpclient/lua: be stricter with httpclient parameters
+ - MINOR: ssl: split the cert commit io handler
+ - MINOR: ssl: move the cert_exts and the CERT_TYPE enum
+ - MINOR: ssl: simplify the certificate extensions array
+ - MINOR: ssl: export ckch_inst_rebuild()
+ - MINOR: ssl: add "crt" in the cert_exts array
+ - MINOR: ssl/lua: CertCache.set() allows to update an SSL certificate file
+ - BUILD: ssl/lua: CacheCert needs OpenSSL
+ - DOC: lua: CertCache class documentation
+ - BUG/MEDIUM: quic: do not use qcs from quic_stream on ACK parsing
+ - MINOR: mux-quic: return qcs instance from qcc_get_qcs
+ - MINOR: mux-quic: reorganize qcs free
+ - MINOR: mux-quic: define release app-ops
+ - BUG/MINOR: h3: release resources on close
+ - BUG/MINOR: mux-quic: ensure to free all qcs on MUX release
+ - CLEANUP: quic: complete comment on qcs_try_to_consume
+ - MINOR: quic: implement stream descriptor for transport layer
+ - MEDIUM: quic: move transport fields from qcs to qc_conn_stream
+ - MEDIUM: mux-quic: remove qcs tree node
+ - BUG/MINOR: cli/stream: fix "shutdown session" to iterate over all threads
+ - DOC: management: add missing dot in 9.4.1
+ - BUG/MAJOR: mux_pt: always report the connection error to the conn_stream
+ - DOC: remove double blanks in configuration.txt
+ - CI: github actions: update OpenSSL to 3.0.2
+ - BUG/MEDIUM: quic: Possible crash in ha_quic_set_encryption_secrets()
+ - CLEANUP: quic: Remove all atomic operations on quic_conn struct
+ - CLEANUP: quic: Remove all atomic operations on packet number spaces
+ - MEDIUM: quic: Send ACK frames asap
+ - BUG/MINOR: quic: Missing probing packets when coalescing
+ - BUG/MINOR: quic: Discard Initial packet number space only one time
+ - MINOR: quic: Do not display any timer value from process_timer()
+ - BUG/MINOR: quic: Do not probe from an already probing packet number space
+ - BUG/MINOR: quic: Non duplicated frames upon fast retransmission
+ - BUG/MINOR: quic: Too much prepared retransmissions due to anti-amplification
+ - MINOR: quic: Useless call to SSL_CTX_set_default_verify_paths()
+ - MINOR: quic: Add traces about list of frames
+ - BUG/MINOR: h3: Missing wait event struct field initialization
+ - BUG/MINOR: quic: QUIC TLS secrets memory leak
+ - BUG/MINOR: quic: Missing ACK range deallocations
+ - BUG/MINOR: quic: Missing TX packet deallocations
+ - CLEANUP: hpack: be careful about integer promotion from uint8_t
+ - OPTIM: hpack: read 32 bits at once when possible.
+ - MEDIUM: ssl: allow loading of a directory with the ca-file directive
+ - BUG/MINOR: ssl: continue upon error when opening a directory w/ ca-file
+ - MINOR: ssl: ca-file @system-ca loads the system trusted CA
+ - DOC: configuration: add the ca-file changes
+ - MINOR: sample: converter: Add add_item convertor
+ - BUG/MINOR: ssl: handle X509_get_default_cert_dir() returning NULL
+ - BUG/MINOR: ssl/cli: Remove empty lines from CLI output
+ - MINOR: httpclient: enable request buffering
+ - MEDIUM: httpclient: enable l7-retry
+ - BUG/MINOR: httpclient: end callback in applet release
+ - MINOR: quic: Add draining connection state.
+ - MINOR: quic: Add closing connection state
+ - BUG/MEDIUM: quic: ensure quic-conn survives to the MUX
+ - CLEANUP: quic: use static qualifer on quic_close
+ - CLEANUP: mux-quic: remove unused QC_CF_CC_RECV
+ - BUG/MINOR: fix memleak on quic-conn streams cleaning
+ - MINOR: mux-quic: factorize conn-stream attach
+ - MINOR: mux-quic: adjust timeout to accelerate closing
+ - MINOR: mux-quic: define is_active app-ops
+ - MINOR: mux-quic: centralize send operations in qc_send
+ - MEDIUM: mux-quic: report CO_FL_ERROR on send
+ - MEDIUM: mux-quic: report errors on conn-streams
+ - MEDIUM: quic: report closing state for the MUX
+ - BUG/MINOR: fcgi-app: Don't add C-L header on response to HEAD requests
+ - BUG/MEDIUM: stats: Be sure to never set EOM flag on an empty HTX message
+ - BUG/MEDIUM: hlua: Don't set EOM flag on an empty HTX message in HTTP applet
+ - BUG/MEDIUM: promex: Be sure to never set EOM flag on an empty HTX message
+ - BUG/MEDIUM: mux-h1: Set outgoing message to DONE when payload length is reached
+ - BUG/MINOR: http_client: Don't add input data on an empty request buffer
+ - BUG/MEDIUM: http-conv: Fix url_enc() to not crush const samples
+ - BUG/MEDIUM: http-act: Don't replace URI if path is not found or invalid
+ - CLEANUP: mux-quic: remove uneeded TODO in qc_detach
+ - BUG/MEDIUM: mux-quic: properly release conn-stream on detach
+ - BUG/MINOR: quic: set the source not the destination address on accept()
+ - BUG/MEDIUM: quic: Possible crash from quic_free_arngs()
+ - MINOR: quic_tls: Add reusable cipher contexts to QUIC TLS contexts
+ - MINOR: quic_tls: Stop hardcoding cipher IV lengths
+ - CLEANUP: quic: Do not set any cipher/group from ssl_quic_initial_ctx()
+ - MINOR: quic: Add short packet key phase bit values to traces
+ - MINOR: quic_tls: Make key update use of reusable cipher contexts
+ - BUG/MINOR: opentracing: setting the return value in function flt_ot_var_set()
+ - BUG/BUILD: opentracing: fixed OT_DEFINE variable setting
+ - EXAMPLES: opentracing: refined shell scripts for testing filter performance
+ - DOC: opentracing: corrected comments in function descriptions
+ - CLEANUP: opentracing: removed unused function flt_ot_var_unset()
+ - CLEANUP: opentracing: removed unused function flt_ot_var_get()
+ - Revert "MINOR: opentracing: change the scope of the variable 'ot.uuid' from 'sess' to 'txn'"
+ - MINOR: opentracing: only takes the variables lock on shared entries
+ - CLEANUP: opentracing: added flt_ot_smp_init() function
+ - CLEANUP: opentracing: added variable to store variable length
+ - MINOR: opentracing: improved normalization of context variable names
+ - DEBUG: opentracing: show return values of all functions in the debug output
+ - CLEANUP: opentracing: added FLT_OT_PARSE_INVALID_enum enum
+ - DEBUG: opentracing: display the contents of the err variable after setting
+ - MAJOR: opentracing: reenable usage of vars to transmit opentracing context
+ - Revert "BUILD: opentracing: display warning in case of using OT_USE_VARS at compile time"
+ - MEDIUM: global: Add a "close-spread-time" option to spread soft-stop on time window
+
+2022/03/26 : 2.6-dev4
+ - BUG/MEDIUM: httpclient: don't consume data before it was analyzed
+ - CLEANUP: htx: remove unused co_htx_remove_blk()
+ - BUG/MINOR: httpclient: consume partly the blocks when necessary
+ - BUG/MINOR: httpclient: remove the UNUSED block when parsing headers
+ - BUG/MEDIUM: httpclient: must manipulate head, not first
+ - REGTESTS: fix the race conditions in be2hex.vtc
+ - BUG/MEDIUM: quic: Blocked STREAM when retransmitted
+ - BUG/MAJOR: quic: Possible crash with full congestion control window
+ - BUG/MINOR: httpclient/lua: stuck when closing without data
+ - BUG/MEDIUM: applet: Don't call .release callback function twice
+ - BUG/MEDIUM: cli/debug: Properly get the stream-int in all debug I/O handlers
+ - BUG/MEDIUM: sink: Properly get the stream-int in appctx callback functions
+ - DEV: udp: switch parser to getopt() instead of positional arguments
+ - DEV: udp: add support for random packet corruption
+ - MINOR: server: export server_parse_sni_expr() function
+ - BUG/MINOR: httpclient: send the SNI using the host header
+ - BUILD: httpclient: fix build without SSL
+ - BUG/MINOR: server/ssl: free the SNI sample expression
+ - BUG/MINOR: logs: fix logsrv leaks on clean exit
+ - MINOR: actions: add new function free_act_rule() to free a single rule
+ - BUG/MINOR: tcp-rules: completely free incorrect TCP rules on error
+ - BUG/MINOR: http-rules: completely free incorrect TCP rules on error
+ - BUG/MINOR: httpclient: only check co_data() instead of HTTP_MSG_DATA
+ - BUG/MINOR: httpclient: process the response when received before the end of the request
+ - BUG/MINOR: httpclient: CF_SHUTW_NOW should be tested with channel_is_empty()
+ - CI: github actions: switch to LibreSSL-3.5.1
+ - BUG/MEDIUM: mux-h1: only turn CO_FL_ERROR to CS_FL_ERROR with empty ibuf
+ - BUG/MEDIUM: stream-int: do not rely on the connection error once established
+ - BUG/MEDIUM: trace: avoid race condition when retrieving session from conn->owner
+ - MEDIUM: mux-h2: slightly relax timeout management rules
+ - BUG/MEDIUM: mux-h2: make use of http-request and keep-alive timeouts
+ - BUG/MINOR: rules: Initialize the list element when allocating a new rule
+ - BUG/MINOR: http-rules: Don't free new rule on allocation failure
+ - DEV: coccinelle: Fix incorrect replacement in ist.cocci
+ - CLEANUP: Reapply ist.cocci with `--include-headers-for-types --recursive-includes`
+ - DEV: coccinelle: Add a new pattern to ist.cocci
+ - CLEANUP: Reapply ist.cocci
+ - REGTESTS: Do not use REQUIRE_VERSION for HAProxy 2.5+
+ - MINOR: quic: Code factorization (TX buffer reuse)
+ - CLEANUP: quic: "largest_acked_pn" pktns struc member moving
+ - MEDIUM: quic: Limit the number of ACK ranges
+ - MEDIUM: quic: Rework of the TX packets memory handling
+ - BUG/MINOR: quic: Possible crash in parse_retry_token()
+ - BUG/MINOR: quic: Possible leak in quic_build_post_handshake_frames()
+ - BUG/MINOR: quic: Unsent frame because of qc_build_frms()
+ - BUG/MINOR: mux-quic: Access to empty frame list from qc_send_frames()
+ - BUG/MINOR: mux-quic: Missing I/O handler events initialization
+ - BUG/MINOR: quic: Missing TX packet initializations
+ - BUG/MINOR: quic: 1RTT packets ignored after mux was released
+ - BUG/MINOR: quic: Incorrect peer address validation
+ - BUG/MINOR: quic: Non initialized variable in quic_build_post_handshake_frames()
+ - BUG/MINOR: quic: Wrong TX packet related counters handling
+ - MEDIUM: mqtt: support mqtt_is_valid and mqtt_field_value converters for MQTTv3.1
+ - DOC: config: Explictly add supported MQTT versions
+ - MINOR: quic: Add traces about stream TX buffer consumption
+ - MINOR: quic: Add traces in qc_set_timer() (scheduling)
+ - CLEANUP: mux-quic: change comment style to not mess with git conflict
+ - CLEANUP: mux-quic: adjust comment for coding-style
+ - MINOR: mux-quic: complete trace when stream is not found
+ - MINOR: mux-quic: add comments for send functions
+ - MINOR: mux-quic: use shorter name for flow-control fields
+ - MEDIUM: mux-quic: respect peer bidirectional stream data limit
+ - MEDIUM: mux-quic: respect peer connection data limit
+ - MINOR: mux-quic: support MAX_STREAM_DATA frame parsing
+ - MINOR: mux-quic: support MAX_DATA frame parsing
+ - BUILD: stream-int: avoid a build warning when DEBUG is empty
+ - BUG/MINOR: quic: Wrong buffer length passed to generate_retry_token()
+ - BUG/MINOR: tools: fix url2sa return value with IPv4
+ - MINOR: mux-quic: convert fin on push-frame as boolean
+ - BUILD: quic: add missing includes
+ - REORG: quic: use a dedicated quic_loss.c
+ - MINOR: mux-quic: declare the qmux trace module
+ - MINOR: mux-quic: replace printfs by traces
+ - MINOR: mux-quic: add trace event for frame sending
+ - MINOR: mux-quic: add trace event for qcs_push_frame
+ - MINOR: mux-quic: activate qmux traces on stdout via macro
+ - BUILD: qpack: fix unused value when not using DEBUG_HPACK
+ - CLEANUP: qpack: suppress by default stdout traces
+ - CLEANUP: h3: suppress by default stdout traces
+ - BUG/MINOR: tools: url2sa reads too far when no port nor path
+
+2022/03/11 : 2.6-dev3
+ - DEBUG: rename WARN_ON_ONCE() to CHECK_IF()
+ - DEBUG: improve BUG_ON output message accuracy
+ - DEBUG: implement 4 levels of choices between warn and crash.
+ - DEBUG: add two new macros to enable debugging in hot paths
+ - DEBUG: buf: replace some sensitive BUG_ON() with BUG_ON_HOT()
+ - DEBUG: buf: add BUG_ON_HOT() to most buffer management functions
+ - MINOR: channel: don't use co_set_data() to decrement output
+ - DEBUG: channel: add consistency checks using BUG_ON_HOT() in some key functions
+ - MINOR: conn-stream: Improve API to have safe/unsafe accessors
+ - MEDIUM: tree-wide: Use unsafe conn-stream API when it is relevant
+ - CLEANUP: stream-int: Make si_cs_send() function static
+ - REORG: stream-int: Uninline si_sync_recv() and make si_cs_recv() private
+ - BUG/MEDIUM: mux-fcgi: Don't rely on SI src/dst addresses for FCGI health-checks
+ - BUG/MEDIUM: htx: Fix a possible null derefs in htx_xfer_blks()
+ - REGTESTS: fix the race conditions in normalize_uri.vtc
+ - DEBUG: stream-int: Fix BUG_ON used to test appctx in si_applet_ops callbacks
+ - BUILD: debug: fix build warning on older compilers around DEBUG_STRICT_ACTION
+ - CLEANUP: connection: Indicate unreachability to the compiler in conn_recv_proxy
+ - MINOR: connection: Transform safety check in PROXYv2 parsing into BUG_ON()
+ - DOC: install: it's DEBUG_CFLAGS, not DEBUG, which is set to -g
+ - DOC: install: describe the DEP variable
+ - DOC: install: describe how to choose options used in the DEBUG variable
+ - MINOR: queue: Replace if() + abort() with BUG_ON()
+ - CLEANUP: adjust indentation in bidir STREAM handling function
+ - MINOR: quic: simplify copy of STREAM frames to RX buffer
+ - MINOR: quic: handle partially received buffered stream frame
+ - MINOR: mux-quic: define flag for last received frame
+ - BUG/MINOR: quic: support FIN on Rx-buffered STREAM frames
+ - MEDIUM: quic: rearchitecture Rx path for bidirectional STREAM frames
+ - REGTESTS: fix the race conditions in secure_memcmp.vtc
+ - CLEANUP: stream: Remove useless tests on conn-stream in stream_dump()
+ - BUILD: ssl: another build warning on LIBRESSL_VERSION_NUMBER
+ - MINOR: quic: Ensure PTO timer is not set in the past
+ - MINOR: quic: Post handshake I/O callback switching
+ - MINOR: quic: Drop the packets of discarded packet number spaces
+ - CLEANUP: quic: Useless tests in qc_try_rm_hp()
+ - CLEANUP: quic: Indentation fix in qc_prep_pkts()
+ - MINOR: quic: Assemble QUIC TLS flags at the same level
+ - BUILD: conn_stream: avoid null-deref warnings on gcc 6
+ - BUILD: connection: do not declare register_mux_proto() inline
+ - BUILD: http_rules: do not declare http_*_keywords_registre() inline
+ - BUILD: trace: do not declare trace_registre_source() inline
+ - BUILD: tcpcheck: do not declare tcp_check_keywords_register() inline
+ - DEBUG: reduce the footprint of BUG_ON() calls
+ - BUG/MEDIUM: httpclient/lua: infinite appctx loop with POST
+ - BUG/MINOR: pool: always align pool_heads to 64 bytes
+ - DEV: udp: add a tiny UDP proxy for testing
+ - DEV: udp: implement pseudo-random reordering/loss
+ - DEV: udp: add an optional argument to set the prng seed
+ - BUG/MINOR: quic: fix segfault on CC if mux uninitialized
+ - BUG/MEDIUM: pools: fix ha_free() on area in the process of being freed
+ - CLEANUP: tree-wide: remove a few rare non-ASCII chars
+ - CI: coverity: simplify debugging options
+ - CLEANUP: quic: complete ABORT_NOW with a TODO comment
+ - MINOR: quic: qc_prep_app_pkts() implementation
+ - MINOR: quic: Send short packet from a frame list
+ - MINOR: quic: Make qc_build_frms() build ack-eliciting frames from a list
+ - MINOR: quic: Export qc_send_app_pkts()
+ - MINOR: mux-quic: refactor transport parameters init
+ - MINOR: mux-quic: complete functions to detect stream type
+ - MINOR: mux-quic: define new unions for flow-control fields
+ - MEDIUM: mux-quic: use direct send transport API for STREAMs
+ - MINOR: mux-quic: retry send opportunistically for remaining frames
+ - MEDIUM: mux-quic: implement MAX_STREAMS emission for bidir streams
+ - BUILD: fix kFreeBSD build.
+ - MINOR: quic: Retry on qc_build_pkt() failures
+ - BUG/MINOR: quic: Missing recovery start timer reset
+ - CLEANUP: quic: Remove QUIC path manipulations out of the congestion controller
+ - MINOR: quic: Add a "slow start" callback to congestion controller
+ - MINOR: quic: Persistent congestion detection outside of controllers
+ - CLEANUP: quic: Remove useless definitions from quic_cc_event struct
+ - BUG/MINOR: quic: Confusion betwen "in_flight" and "prep_in_flight" in quic_path_prep_data()
+ - MINOR: quic: More precise window update calculation
+ - CLEANUP: quic: Remove window redundant variable from NewReno algorithm state struct
+ - MINOR: quic: Add quic_max_int_by_size() function
+ - BUG/MAJOR: quic: Wrong quic_max_available_room() returned value
+ - MINOR: pools: add a new global option "no-memory-trimming"
+ - BUG/MINOR: add missing modes in proxy_mode_str()
+ - BUG/MINOR: cli: shows correct mode in "show sess"
+ - BUG/MEDIUM: quic: do not drop packet on duplicate stream/decoding error
+ - MINOR: stats: Add dark mode support for socket rows
+ - BUILD: fix recent build breakage of freebsd caused by kFreeBSD build fix
+ - BUG/MINOR: httpclient: Set conn-stream/channel EOI flags at the end of request
+ - BUG/MINOR: hlua: Set conn-stream/channel EOI flags at the end of request
+ - BUG/MINOR: stats: Set conn-stream/channel EOI flags at the end of request
+ - BUG/MINOR: cache: Set conn-stream/channel EOI flags at the end of request
+ - BUG/MINOR: promex: Set conn-stream/channel EOI flags at the end of request
+ - BUG/MEDIUM: stream: Use the front analyzers for new listener-less streams
+ - DEBUG: cache: Update underlying buffer when loading HTX message in cache applet
+ - BUG/MEDIUM: mcli: Properly handle errors and timeouts during reponse processing
+ - DEBUG: stream: Add the missing descriptions for stream trace events
+ - DEBUG: stream: Fix stream trace message to print response buffer state
+ - MINOR: proxy: Store monitor_uri as a `struct ist`
+ - MINOR: proxy: Store fwdfor_hdr_name as a `struct ist`
+ - MINOR: proxy: Store orgto_hdr_name as a `struct ist`
+ - MEDIUM: proxy: Store server_id_hdr_name as a `struct ist`
+ - CLEANUP: fcgi: Replace memcpy() on ist by istcat()
+ - CLEANUP: fcgi: Use `istadv()` in `fcgi_strm_send_params`
+ - BUG/MAJOR: mux-pt: Always destroy the backend connection on detach
+ - DOC: sample fetch methods: move distcc_* to the right locations
+ - MINOR: rules: record the last http/tcp rule that gave a final verdict
+ - MINOR: stream: add "last_rule_file" and "last_rule_line" samples
+ - BUG/MINOR: session: fix theoretical risk of memleak in session_accept_fd()
+ - MINOR: quic: Add max_idle_timeout advertisement handling
+ - MEDIUM: quic: Remove the QUIC connection reference counter
+ - BUG/MINOR: quic: ACK_REQUIRED and ACK_RECEIVED flag collision
+ - BUG/MINOR: quic: Missing check when setting the anti-amplification limit as reached
+ - MINOR: quic: Add a function to compute the current PTO
+ - MEDIUM: quic: Implement the idle timeout feature
+ - BUG/MEDIUM: quic: qc_prep_app_pkts() retries on qc_build_pkt() failures
+ - CLEANUP: quic: Comments fix for qc_prep_(app)pkts() functions
+ - MINOR: mux-quic: prevent push frame for unidir streams
+ - MINOR: mux-quic: improve opportunistic retry sending for STREAM frames
+ - MINOR: quic: implement sending confirmation
+ - MEDIUM: mux-quic: improve bidir STREAM frames sending
+ - MEDIUM: check: do not auto configure SSL/PROXY for dynamic servers
+ - REGTESTS: server: test SSL/PROXY with checks for dynamic servers
+ - MEDIUM: server: remove experimental-mode for dynamic servers
+ - BUG/MINOR: buffer: fix debugging condition in b_peek_varint()
+
+2022/02/25 : 2.6-dev2
+ - DOC: management: rework the Master CLI section
+ - DOC: management: add expert and experimental mode in 9.4.1
+ - CLEANUP: cleanup a commentary in pcli_parse_request()
+ - BUG/MINOR: mworker/cli: don't display help on master applet
+ - MINOR: mworker/cli: mcli-debug-mode enables every command
+ - MINOR: mworker/cli: add flags in the prompt
+ - BUG/MINOR: httpclient: Revisit HC request and response buffers allocation
+ - BUG/MEDIUM: httpclient: Xfer the request when the stream is created
+ - MINOR: httpclient: Don't limit data transfer to 1024 bytes
+ - BUILD: ssl: adjust guard for X509_get_X509_PUBKEY(x)
+ - REGTESTS: ssl: skip show_ssl_ocspresponse.vtc when BoringSSL is used
+ - MINOR: quic: Do not modify a marked as consumed datagram
+ - MINOR: quic: Wrong datagram buffer passed to quic_lstnr_dgram_dispatch()
+ - MINOR: quic: Remove a useless test in quic_get_dgram_dcid()
+ - BUG/MINOR: ssl: Remove empty lines from "show ssl ocsp-response <id>" output
+ - CLEANUP: ssl: Remove unused ssl_sock_create_cert function
+ - MINOR: ssl: Use high level OpenSSL APIs in sha2 converter
+ - MINOR: ssl: Remove EC_KEY related calls when preparing SSL context
+ - REGTESTS: ssl: Add test for "curves" and "ecdhe" SSL options
+ - MINOR: ssl: Remove EC_KEY related calls when creating a certificate
+ - REGTESTS: ssl: Add test for "generate-certificates" SSL option
+ - MINOR: ssl: Remove call to SSL_CTX_set_tlsext_ticket_key_cb with OpenSSLv3
+ - MINOR: ssl: Remove call to HMAC_Init_ex with OpenSSLv3
+ - MINOR: h3: hardcode the stream id of control stream
+ - MINOR: mux-quic: remove quic_transport_params_update
+ - MINOR: quic: rename local tid variable
+ - MINOR: quic: remove unused xprt rcv_buf operation
+ - MINOR: quic: take out xprt snd_buf operation
+ - CI: enable QUIC for Coverity scan
+ - BUG/MINOR: mworker: does not erase the pidfile upon reload
+ - MINOR: ssl: Remove call to ERR_func_error_string with OpenSSLv3
+ - MINOR: ssl: Remove call to ERR_load_SSL_strings with OpenSSLv3
+ - REGTESTS: ssl: Add tests for DH related options
+ - MINOR: ssl: Create HASSL_DH wrapper structure
+ - MINOR: ssl: Add ssl_sock_get_dh_from_bio helper function
+ - MINOR: ssl: Factorize ssl_get_tmp_dh and append a cbk to its name
+ - MINOR: ssl: Add ssl_sock_set_tmp_dh helper function
+ - MINOR: ssl: Add ssl_sock_set_tmp_dh_from_pkey helper function
+ - MINOR: ssl: Add ssl_new_dh_fromdata helper function
+ - MINOR: ssl: Build local DH of right size when needed
+ - MINOR: ssl: Set default dh size to 2048
+ - MEDIUM: ssl: Replace all DH objects by EVP_PKEY on OpenSSLv3 (via HASSL_DH type)
+ - MINOR: ssl: Remove calls to SSL_CTX_set_tmp_dh_callback on OpenSSLv3
+ - MINOR: quic: Remove an RX buffer useless lock
+ - MINOR: quic: Variable used before being checked in ha_quic_add_handshake_data()
+ - MINOR: quic: EINTR error ignored
+ - MINOR: quic: Potential overflow expression in qc_parse_frm()
+ - MINOR: quic: Possible overflow in qpack_get_varint()
+ - CLEANUP: h3: Unreachable target in h3_uqs_init()
+ - MINOR: quic: Possible memleak in qc_new_conn()
+ - MINOR: quic: Useless statement in quic_crypto_data_cpy()
+ - BUG/MEDIUM: pools: ensure items are always large enough for the pool_cache_item
+ - BUG/MINOR: pools: always flush pools about to be destroyed
+ - CLEANUP: pools: don't needlessly set a call mark during refilling of caches
+ - DEBUG: pools: add extra sanity checks when picking objects from a local cache
+ - DEBUG: pools: let's add reverse mapping from cache heads to thread and pool
+ - DEBUG: pools: replace the link pointer with the caller's address on pool_free()
+ - BUG/MAJOR: sched: prevent rare concurrent wakeup of multi-threaded tasks
+ - MINOR: quic: use a global dghlrs for each thread
+ - BUG/MEDIUM: quic: fix crash on CC if mux not present
+ - MINOR: qpack: fix typo in trace
+ - BUG/MINOR: quic: fix FIN stream signaling
+ - BUG/MINOR: h3: fix the header length for QPACK decoding
+ - MINOR: h3: remove transfer-encoding header
+ - MINOR: h3: add documentation on h3_decode_qcs
+ - MINOR: h3: set properly HTX EOM/BODYLESS on HEADERS parsing
+ - MINOR: mux-quic: implement rcv_buf
+ - MINOR: mux-quic: set EOS on rcv_buf
+ - MINOR: h3: set CS_FL_NOT_FIRST
+ - MINOR: h3: report frames bigger than rx buffer
+ - MINOR: h3: extract HEADERS parsing in a dedicated function
+ - MINOR: h3: implement DATA parsing
+ - MINOR: quic: Wrong smoothed rtt initialization
+ - MINOR: quic: Wrong loss delay computation
+ - MINOR: quic: Code never reached in qc_ssl_sess_init()
+ - MINOR: quic: ha_quic_set_encryption_secrets without server specific code
+ - MINOR: quic: Avoid warning about NULL pointer dereferences
+ - MINOR: quic: Useless test in quic_lstnr_dghdlr()
+ - MINOR: quic: Non checked returned value for cs_new() in hq_interop_decode_qcs()
+ - MINOR: h3: Dead code in h3_uqs_init()
+ - MINOR: quic: Non checked returned value for cs_new() in h3_decode_qcs()
+ - MINOR: quic: Possible frame parsers array overrun
+ - MINOR: quic: Do not retransmit too much packets.
+ - MINOR: quic: Move quic_rxbuf_pool pool out of xprt part
+ - MINOR: h3: report error on HEADERS/DATA parsing
+ - BUG/MINOR: jwt: Double free in deinit function
+ - BUG/MINOR: jwt: Missing pkey free during cleanup
+ - BUG/MINOR: jwt: Memory leak if same key is used in multiple jwt_verify calls
+ - BUG/MINOR: httpclient/cli: display junk characters in vsn
+ - MINOR: h3: remove unused return value on decode_qcs
+ - BUG/MAJOR: http/htx: prevent unbounded loop in http_manage_server_side_cookies
+ - BUG/MAJOR: spoe: properly detach all agents when releasing the applet
+ - REGTESTS: server: close an occasional race on dynamic_server_ssl.vtc
+ - REGTESTS: peers: leave a bit more time to peers to synchronize
+ - BUG/MEDIUM: h2/hpack: fix emission of HPACK DTSU after settings change
+ - BUG/MINOR: mux-h2: update the session's idle delay before creating the stream
+ - BUG/MINOR: httpclient: reinit flags in httpclient_start()
+ - BUG/MINOR: mailers: negotiate SMTP, not ESMTP
+ - MINOR: httpclient: sets an alternative destination
+ - MINOR: httpclient/lua: add 'dst' optionnal field
+ - BUG/MINOR: ssl: Add missing return value check in ssl_ocsp_response_print
+ - BUG/MINOR: ssl: Fix leak in "show ssl ocsp-response" CLI command
+ - BUG/MINOR: ssl: Missing return value check in ssl_ocsp_response_print
+ - CLEANUP: httpclient/cli: fix indentation alignment of the help message
+ - BUG/MINOR: tools: url2sa reads ipv4 too far
+ - BUG/MEDIUM: httpclient: limit transfers to the maximum available room
+ - DEBUG: buffer: check in __b_put_blk() whether the buffer room is respected
+ - MINOR: mux-quic: fix a possible null dereference in qc_timeout_task
+ - BUG/MEDIUM: htx: Be sure to have a buffer to perform a raw copy of a message
+ - BUG/MEDIUM: mux-h1: Don't wake h1s if mux is blocked on lack of output buffer
+ - BUG/MAJOR: mux-h2: Be sure to always report HTX parsing error to the app layer
+ - DEBUG: stream-int: Check CS_FL_WANT_ROOM is not set with an empty input buffer
+ - MINOR: quic: do not modify offset node if quic_rx_strm_frm in tree
+ - MINOR: h3: fix compiler warning variable set but not used
+ - MINOR: mux-quic: fix uninitialized return on qc_send
+ - MINOR: quic: fix handling of out-of-order received STREAM frames
+ - MINOR: pools: mark most static pool configuration variables as read-mostly
+ - CLEANUP: pools: remove the now unused pool_is_crowded()
+ - REGTESTS: fix the race conditions in 40be_2srv_odd_health_checks
+ - BUG/MEDIUM: stream: Abort processing if response buffer allocation fails
+ - MINOR: httpclient/lua: ability to set a server timeout
+ - BUG/MINOR: httpclient/lua: missing pop for new timeout parameter
+ - DOC: httpclient/lua: fix the type of the dst parameter
+ - CLEANUP: httpclient: initialize the client in stage INIT not REGISTER
+ - CLEANUP: muxes: do not use a dynamic trash in list_mux_protos()
+ - CLEANUP: vars: move the per-process variables initialization to vars.c
+ - CLEANUP: init: remove the ifdef on HAPROXY_MEMMAX
+ - MINOR: pools: disable redundant poisonning on pool_free()
+ - MINOR: pools: introduce a new pool_debugging global variable
+ - MINOR: pools: switch the fail-alloc test to runtime only
+ - MINOR: pools: switch DEBUG_DONT_SHARE_POOLS to runtime
+ - MINOR: pools: add a new debugging flag POOL_DBG_COLD_FIRST
+ - MINOR: pools: add a new debugging flag POOL_DBG_INTEGRITY
+ - MINOR: pools: make the global pools a runtime option.
+ - MEDIUM: pools: replace CONFIG_HAP_POOLS with a runtime "NO_CACHE" flag.
+ - MINOR: pools: store the allocated size for each pool
+ - MINOR: pools: get rid of POOL_EXTRA
+ - MINOR: pools: replace DEBUG_POOL_TRACING with runtime POOL_DBG_CALLER
+ - MINOR: pools: replace DEBUG_MEMORY_POOLS with runtime POOL_DBG_TAG
+ - MINOR: pools: add a debugging flag for memory poisonning option
+ - MEDIUM: initcall: move STG_REGISTER earlier
+ - MEDIUM: init: split the early initialization in its own function
+ - MINOR: init: extract args parsing to their own function
+ - MEDIUM: init: handle arguments earlier
+ - MINOR: pools: delegate parsing of command line option -dM to a new function
+ - MINOR: pools: support setting debugging options using -dM
+ - BUILD: makefile: enable both DEBUG_STRICT and DEBUG_MEMORY_POOLS by default
+ - CI: github: enable pool debugging by default
+ - DOC: Fix usage/examples of deprecated ACLs
+ - DOC: internal: update the pools API to mention boot-time settings
+ - DOC: design: add design thoughts for later simplification of the pools
+ - DOC: design: commit the temporary design notes on thread groups
+ - MINOR: stream-int: Handle appctx case first when releasing the endpoint
+ - MINOR: connection: Be prepared to handle conn-stream with no connection
+ - MINOR: stream: Handle appctx case first when creating a new stream
+ - MINOR: connection: Add a function to detach a conn-stream from the connection
+ - MINOR: stream-int: Add function to reset a SI endpoint
+ - MINOR: stream-int: Add function to attach a connection to a SI
+ - MINOR: stream-int: Be able to allocate a CS without connection
+ - MEDIUM: stream: No longer release backend conn-stream on connection retry
+ - MEDIUM: stream: Allocate backend CS when the stream is created
+ - REORG: conn_stream: move conn-stream stuff in dedicated files
+ - MEDIUM: conn-stream: No longer access connection field directly
+ - MEDIUM: conn-stream: Be prepared to use an appctx as conn-stream endpoint
+ - MAJOR: conn_stream/stream-int: move the appctx to the conn-stream
+ - MEDIUM: applet: Set the conn-stream as appctx owner instead of the stream-int
+ - MEDIUM: conn_stream: Add a pointer to the app object into the conn-stream
+ - MINOR: stream: Add pointer to front/back conn-streams into stream struct
+ - MINOR: stream: Slightly rework stream_new to separate CS/SI initialization
+ - MINOR: stream-int: Always access the stream-int via the conn-stream
+ - MINOR: backend: Always access the stream-int via the conn-stream
+ - MINOR: stream: Always access the stream-int via the conn-stream
+ - MINOR: http-ana: Always access the stream-int via the conn-stream
+ - MINOR: cli: Always access the stream-int via the conn-stream
+ - MINOR: log: Always access the stream-int via the conn-stream
+ - MINOR: frontend: Always access the stream-int via the conn-stream
+ - MINOR: proxy: Always access the stream-int via the conn-stream
+ - MINOR: peers: Always access the stream-int via the conn-stream
+ - MINOR: debug: Always access the stream-int via the conn-stream
+ - MINOR: hlua: Always access the stream-int via the conn-stream
+ - MINOR: cache: Always access the stream-int via the conn-stream
+ - MINOR: dns: Always access the stream-int via the conn-stream
+ - MINOR: http-act: Always access the stream-int via the conn-stream
+ - MINOR: httpclient: Always access the stream-int via the conn-stream
+ - MINOR: tcp-act: Always access the stream-int via the conn-stream
+ - MINOR: sink: Always access the stream-int via the conn-stream
+ - MINOR: conn-stream: Rename cs_detach() to cs_detach_endp()
+ - CLEANUP: conn-stream: Don't export conn-stream pool
+ - MAJOR: stream/conn_stream: Move the stream-interface into the conn-stream
+ - CLEANUP: stream-int: rename si_reset() to si_init()
+ - MINOR: conn-stream: Release a CS when both app and endp are detached
+ - MINOR: stream: Don't destroy conn-streams but detach app and endp
+ - MAJOR: check: Use a persistent conn-stream for health-checks
+ - CLEANUP: conn-stream: Remove cs_destroy()
+ - CLEANUP: backend: Don't export connect_server anymore
+ - BUG/MINOR: h3/hq_interop: Fix CS and stream creation
+ - BUILD: tree-wide: Avoid warnings about undefined entities retrieved from a CS
+ - BUG/MINOR: proxy: preset the error message pointer to NULL in parse_new_proxy()
+ - BUG/MEDIUM: quic: fix received ACK stream calculation
+ - BUILD: stream: fix build warning with older compilers
+ - BUG/MINOR: debug: fix get_tainted() to properly read an atomic value
+ - DEBUG: move the tainted stuff to bug.h for easier inclusion
+ - DEBUG: cleanup back trace generation
+ - DEBUG: cleanup BUG_ON() configuration
+ - DEBUG: mark ABORT_NOW() as unreachable
+ - DBEUG: add a new WARN_ON() macro
+ - DEBUG: make the _BUG_ON() macro return the condition
+ - DEBUG: add a new WARN_ON_ONCE() macro
+ - DEBUG: report BUG_ON() and WARN_ON() in the tainted flags
+ - MINOR: quic: adjust buffer handling for STREAM transmission
+ - MINOR: quic: liberate the TX stream buffer after ACK processing
+ - MINOR: quic: add a TODO for a memleak frame on ACK consume
+
+2022/02/01 : 2.6-dev1
+ - BUG/MINOR: cache: Fix loop on cache entries in "show cache"
+ - BUG/MINOR: httpclient: allow to replace the host header
+ - BUG/MINOR: lua: don't expose internal proxies
+ - MEDIUM: mworker: seamless reload use the internal sockpairs
+ - BUG/MINOR: lua: remove loop initial declarations
+ - BUG/MINOR: mworker: does not add the -sf in wait mode
+ - BUG/MEDIUM: mworker: FD leak of the eventpoll in wait mode
+ - MINOR: quic: do not reject PADDING followed by other frames
+ - REORG: quic: add comment on rare thread concurrence during CID alloc
+ - CLEANUP: quic: add comments on CID code
+ - MEDIUM: quic: handle CIDs to rattach received packets to connection
+ - MINOR: qpack: support litteral field line with non-huff name
+ - MINOR: quic: activate QUIC traces at compilation
+ - MINOR: quic: use more verbose QUIC traces set at compile-time
+ - MEDIUM: pool: refactor malloc_trim/glibc and jemalloc api addition detections.
+ - MEDIUM: pool: support purging jemalloc arenas in trim_all_pools()
+ - BUG/MINOR: mworker: deinit of thread poller was called when not initialized
+ - BUILD: pools: only detect link-time jemalloc on ELF platforms
+ - CI: github actions: add the output of $CC -dM -E-
+ - BUG/MEDIUM: cli: Properly set stream analyzers to process one command at a time
+ - BUILD: evports: remove a leftover from the dead_fd cleanup
+ - MINOR: quic: Set "no_application_protocol" alert
+ - MINOR: quic: More accurate immediately close.
+ - MINOR: quic: Immediately close if no transport parameters extension found
+ - MINOR: quic: Rename qc_prep_hdshk_pkts() to qc_prep_pkts()
+ - MINOR: quic: Possible crash when inspecting the xprt context
+ - MINOR: quic: Dynamically allocate the secrete keys
+ - MINOR: quic: Add a function to derive the key update secrets
+ - MINOR: quic: Add structures to maintain key phase information
+ - MINOR: quic: Optional header protection key for quic_tls_derive_keys()
+ - MINOR: quic: Add quic_tls_key_update() function for Key Update
+ - MINOR: quic: Enable the Key Update process
+ - MINOR: quic: Delete the ODCIDs asap
+ - BUG/MINOR: vars: Fix the set-var and unset-var converters
+ - MEDIUM: pool: Following up on previous pool trimming update.
+ - BUG/MEDIUM: mux-h1: Fix splicing by properly detecting end of message
+ - BUG/MINOR: mux-h1: Fix splicing for messages with unknown length
+ - MINOR: mux-h1: Improve H1 traces by adding info about http parsers
+ - MINOR: mux-h1: register a stats module
+ - MINOR: mux-h1: add counters instance to h1c
+ - MINOR: mux-h1: count open connections/streams on stats
+ - MINOR: mux-h1: add stat for total count of connections/streams
+ - MINOR: mux-h1: add stat for total amount of bytes received and sent
+ - REGTESTS: h1: Add a script to validate H1 splicing support
+ - BUG/MINOR: server: Don't rely on last default-server to init server SSL context
+ - BUG/MEDIUM: resolvers: Detach query item on response error
+ - MEDIUM: resolvers: No longer store query items in a list into the response
+ - BUG/MAJOR: segfault using multiple log forward sections.
+ - BUG/MEDIUM: h1: Properly reset h1m flags when headers parsing is restarted
+ - BUG/MINOR: resolvers: Don't overwrite the error for invalid query domain name
+ - BUILD: bug: Fix error when compiling with -DDEBUG_STRICT_NOCRASH
+ - BUG/MEDIUM: sample: Fix memory leak in sample_conv_jwt_member_query
+ - DOC: spoe: Clarify use of the event directive in spoe-message section
+ - DOC: config: Specify %Ta is only available in HTTP mode
+ - BUILD: tree-wide: avoid warnings caused by redundant checks of obj_types
+ - IMPORT: slz: use the correct CRC32 instruction when running in 32-bit mode
+ - MINOR: quic: fix segfault on CONNECTION_CLOSE parsing
+ - MINOR: h3: add BUG_ON on control receive function
+ - MEDIUM: xprt-quic: finalize app layer initialization after ALPN nego
+ - MINOR: h3: remove duplicated FIN flag position
+ - MAJOR: mux-quic: implement a simplified mux version
+ - MEDIUM: mux-quic: implement release mux operation
+ - MEDIUM: quic: detect the stream FIN
+ - MINOR: mux-quic: implement subscribe on stream
+ - MEDIUM: mux-quic: subscribe on xprt if remaining data after send
+ - MEDIUM: mux-quic: wake up xprt on data transferred
+ - MEDIUM: mux-quic: handle when sending buffer is full
+ - MINOR: quic: RX buffer full due to wrong CRYPTO data handling
+ - MINOR: quic: Race issue when consuming RX packets buffer
+ - MINOR: quic: QUIC encryption level RX packets race issue
+ - MINOR: quic: Delete remaining RX handshake packets
+ - MINOR: quic: Remove QUIC TX packet length evaluation function
+ - MINOR: hq-interop: fix tx buffering
+ - MINOR: mux-quic: remove uneeded code to check fin on TX
+ - MINOR: quic: add HTX EOM on request end
+ - BUILD: mux-quic: fix compilation with DEBUG_MEM_STATS
+ - MINOR: http-rules: Add capture action to http-after-response ruleset
+ - BUG/MINOR: cli/server: Don't crash when a server is added with a custom id
+ - MINOR: mux-quic: do not release qcs if there is remaining data to send
+ - MINOR: quic: notify the mux on CONNECTION_CLOSE
+ - BUG/MINOR: mux-quic: properly initialize flow control
+ - MINOR: quic: Compilation fix for quic_rx_packet_refinc()
+ - MINOR: h3: fix possible invalid dereference on htx parsing
+ - DOC: config: retry-on list is space-delimited
+ - DOC: config: fix error-log-format example
+ - BUG/MEDIUM: mworker/cli: crash when trying to access an old PID in prompt mode
+ - MINOR: hq-interop: refix tx buffering
+ - REGTESTS: ssl: use X509_V_ERR_UNABLE_TO_GET_ISSUER_CERT_LOCALLY for cert check
+ - MINOR: cli: "show version" displays the current process version
+ - CLEANUP: cfgparse: modify preprocessor guards around numa detection code
+ - MEDIUM: cfgparse: numa detect topology on FreeBSD.
+ - BUILD: ssl: unbreak the build with newer libressl
+ - MINOR: vars: Move UPDATEONLY flag test to vars_set_ifexist
+ - MINOR: vars: Set variable type to ANY upon creation
+ - MINOR: vars: Delay variable content freeing in var_set function
+ - MINOR: vars: Parse optional conditions passed to the set-var converter
+ - MINOR: vars: Parse optional conditions passed to the set-var actions
+ - MEDIUM: vars: Enable optional conditions to set-var converter and actions
+ - DOC: vars: Add documentation about the set-var conditions
+ - REGTESTS: vars: Add new test for conditional set-var
+ - MINOR: quic: Attach timer task to thread for the connection.
+ - CLEANUP: quic_frame: Remove a useless suffix to STOP_SENDING
+ - MINOR: quic: Add traces for STOP_SENDING frame and modify others
+ - CLEANUP: quic: Remove cdata_len from quic_tx_packet struct
+ - MINOR: quic: Enable TLS 0-RTT if needed
+ - MINOR: quic: No TX secret at EARLY_DATA encryption level
+ - MINOR: quic: Add quic_set_app_ops() function
+ - MINOR: ssl_sock: Set the QUIC application from ssl_sock_advertise_alpn_protos.
+ - MINOR: quic: Make xprt support 0-RTT.
+ - MINOR: qpack: Missing check for truncated QPACK fields
+ - CLEANUP: quic: Comment fix for qc_strm_cpy()
+ - MINOR: hq_interop: Stop BUG_ON() truncated streams
+ - MINOR: quic: Do not mix packet number space and connection flags
+ - CLEANUP: quic: Shorten a litte bit the traces in lstnr_rcv_pkt()
+ - MINOR: mux-quic: fix trace on stream creation
+ - CLEANUP: quic: fix spelling mistake in a trace
+ - CLEANUP: quic: rename quic_conn conn to qc in quic_conn_free
+ - MINOR: quic: add missing lock on cid tree
+ - MINOR: quic: rename constant for haproxy CIDs length
+ - MINOR: quic: refactor concat DCID with address for Initial packets
+ - MINOR: quic: compare coalesced packets by DCID
+ - MINOR: quic: refactor DCID lookup
+ - MINOR: quic: simplify the removal from ODCID tree
+ - REGTESTS: vars: Remove useless ssl tunes from conditional set-var test
+ - MINOR: ssl: Remove empty lines from "show ssl ocsp-response" output
+ - MINOR: quic: Increase the RX buffer for each connection
+ - MINOR: quic: Add a function to list remaining RX packets by encryption level
+ - MINOR: quic: Stop emptying the RX buffer asap.
+ - MINOR: quic: Do not expect to receive only one O-RTT packet
+ - MINOR: quic: Do not forget STREAM frames received in disorder
+ - MINOR: quic: Wrong packet refcount handling in qc_pkt_insert()
+ - DOC: fix misspelled keyword "resolve_retries" in resolvers
+ - CLEANUP: quic: rename quic_conn instances to qc
+ - REORG: quic: move mux function outside of xprt
+ - MINOR: quic: add reference to quic_conn in ssl context
+ - MINOR: quic: add const qualifier for traces function
+ - MINOR: trace: add quic_conn argument definition
+ - MINOR: quic: use quic_conn as argument to traces
+ - MINOR: quic: add quic_conn instance in traces for qc_new_conn
+ - MINOR: quic: Add stream IDs to qcs_push_frame() traces
+ - MINOR: quic: unchecked qc_retrieve_conn_from_cid() returned value
+ - MINOR: quic: Wrong dropped packet skipping
+ - MINOR: quic: Handle the cases of overlapping STREAM frames
+ - MINOR: quic: xprt traces fixes
+ - MINOR: quic: Drop asap Retry or Version Negotiation packets
+ - MINOR: pools: work around possibly slow malloc_trim() during gc
+ - DEBUG: ssl: make sure we never change a servername on established connections
+ - MINOR: quic: Add traces for RX frames (flow control related)
+ - MINOR: quic: Add CONNECTION_CLOSE phrase to trace
+ - REORG: quic: remove qc_ prefix on functions which not used it directly
+ - BUG/MINOR: quic: upgrade rdlock to wrlock for ODCID removal
+ - MINOR: quic: remove unnecessary call to free_quic_conn_cids()
+ - MINOR: quic: store ssl_sock_ctx reference into quic_conn
+ - MINOR: quic: remove unnecessary if in qc_pkt_may_rm_hp()
+ - MINOR: quic: replace usage of ssl_sock_ctx by quic_conn
+ - MINOR: quic: delete timer task on quic_close()
+ - MEDIUM: quic: implement refcount for quic_conn
+ - BUG/MINOR: quic: fix potential null dereference
+ - BUG/MINOR: quic: fix potential use of uninit pointer
+ - BUG/MEDIUM: backend: fix possible sockaddr leak on redispatch
+ - BUG/MEDIUM: peers: properly skip conn_cur from incoming messages
+ - CI: Github Actions: do not show VTest failures if build failed
+ - BUILD: opentracing: display warning in case of using OT_USE_VARS at compile time
+ - MINOR: compat: detect support for dl_iterate_phdr()
+ - MINOR: debug: add ability to dump loaded shared libraries
+ - MINOR: debug: add support for -dL to dump library names at boot
+ - BUG/MEDIUM: ssl: initialize correctly ssl w/ default-server
+ - REGTESTS: ssl: fix ssl_default_server.vtc
+ - BUG/MINOR: ssl: free the fields in srv->ssl_ctx
+ - BUG/MEDIUM: ssl: free the ckch instance linked to a server
+ - REGTESTS: ssl: update of a crt with server deletion
+ - BUILD/MINOR: cpuset FreeBSD 14 build fix.
+ - MINOR: pools: always evict oldest objects first in pool_evict_from_local_cache()
+ - DOC: pool: document the purpose of various structures in the code
+ - CLEANUP: pools: do not use the extra pointer to link shared elements
+ - CLEANUP: pools: get rid of the POOL_LINK macro
+ - MINOR: pool: allocate from the shared cache through the local caches
+ - CLEANUP: pools: group list updates in pool_get_from_cache()
+ - MINOR: pool: rely on pool_free_nocache() in pool_put_to_shared_cache()
+ - MINOR: pool: make pool_is_crowded() always true when no shared pools are used
+ - MINOR: pool: check for pool's fullness outside of pool_put_to_shared_cache()
+ - MINOR: pool: introduce pool_item to represent shared pool items
+ - MINOR: pool: add a function to estimate how many may be released at once
+ - MEDIUM: pool: compute the number of evictable entries once per pool
+ - MINOR: pools: prepare pool_item to support chained clusters
+ - MINOR: pools: pass the objects count to pool_put_to_shared_cache()
+ - MEDIUM: pools: centralize cache eviction in a common function
+ - MEDIUM: pools: start to batch eviction from local caches
+ - MEDIUM: pools: release cached objects in batches
+ - OPTIM: pools: reduce local pool cache size to 512kB
+ - CLEANUP: assorted typo fixes in the code and comments This is 29th iteration of typo fixes
+ - CI: github actions: update OpenSSL to 3.0.1
+ - BUILD/MINOR: tools: solaris build fix on dladdr.
+ - BUG/MINOR: cli: fix _getsocks with musl libc
+ - BUG/MEDIUM: http-ana: Preserve response's FLT_END analyser on L7 retry
+ - MINOR: quic: Wrong traces after rework
+ - MINOR: quic: Add trace about in flight bytes by packet number space
+ - MINOR: quic: Wrong first packet number space computation
+ - MINOR: quic: Wrong packet number space computation for PTO
+ - MINOR: quic: Wrong loss time computation in qc_packet_loss_lookup()
+ - MINOR: quic: Wrong ack_delay compution before calling quic_loss_srtt_update()
+ - MINOR: quic: Remove nb_pto_dgrams quic_conn struct member
+ - MINOR: quic: Wrong packet number space trace in qc_prep_pkts()
+ - MINOR: quic: Useless test in qc_prep_pkts()
+ - MINOR: quic: qc_prep_pkts() code moving
+ - MINOR: quic: Speeding up Handshake Completion
+ - MINOR: quic: Probe Initial packet number space more often
+ - MINOR: quic: Probe several packet number space upon timer expiration
+ - MINOR: quic: Comment fix.
+ - MINOR: quic: Improve qc_prep_pkts() flexibility
+ - MINOR: quic: Do not drop secret key but drop the CRYPTO data
+ - MINOR: quic: Prepare Handshake packets asap after completed handshake
+ - MINOR: quic: Flag asap the connection having reached the anti-amplification limit
+ - MINOR: quic: PTO timer too often reset
+ - MINOR: quic: Re-arm the PTO timer upon datagram receipt
+ - MINOR: proxy: add option idle-close-on-response
+ - MINOR: cpuset: switch to sched_setaffinity for FreeBSD 14 and above.
+ - CI: refactor spelling check
+ - CLEANUP: assorted typo fixes in the code and comments
+ - BUILD: makefile: add -Wno-atomic-alignment to work around clang abusive warning
+ - MINOR: quic: Only one CRYPTO frame by encryption level
+ - MINOR: quic: Missing retransmission from qc_prep_fast_retrans()
+ - MINOR: quic: Non-optimal use of a TX buffer
+ - BUG/MEDIUM: mworker: don't use _getsocks in wait mode
+ - BUG/MINOR: ssl: Store client SNI in SSL context in case of ClientHello error
+ - BUG/MAJOR: mux-h1: Don't decrement .curr_len for unsent data
+ - DOC: internals: document the pools architecture and API
+ - CI: github actions: clean default step conditions
+ - BUILD: cpuset: fix build issue on macos introduced by previous change
+ - MINOR: quic: Remaining TRACEs with connection as firt arg
+ - MINOR: quic: Reset ->conn quic_conn struct member when calling qc_release()
+ - MINOR: quic: Flag the connection as being attached to a listener
+ - MINOR: quic: Wrong CRYPTO frame concatenation
+ - MINOR: quid: Add traces quic_close() and quic_conn_io_cb()
+ - REGTESTS: ssl: Fix ssl_errors regtest with OpenSSL 1.0.2
+ - MINOR: quic: Do not dereference ->conn quic_conn struct member
+ - MINOR: quic: fix return of quic_dgram_read
+ - MINOR: quic: add config parse source file
+ - MINOR: quic: implement Retry TLS AEAD tag generation
+ - MEDIUM: quic: implement Initial token parsing
+ - MINOR: quic: define retry_source_connection_id TP
+ - MEDIUM: quic: implement Retry emission
+ - MINOR: quic: free xprt tasklet on its thread
+ - BUG/MEDIUM: connection: properly leave stopping list on error
+ - MINOR: pools: enable pools with DEBUG_FAIL_ALLOC as well
+ - MINOR: quic: As server, skip 0-RTT packet number space
+ - MINOR: quic: Do not wakeup the I/O handler before the mux is started
+ - BUG/MEDIUM: htx: Adjust length to add DATA block in an empty HTX buffer
+ - CI: github actions: use cache for OpenTracing
+ - BUG/MINOR: httpclient: don't send an empty body
+ - BUG/MINOR: httpclient: set default Accept and User-Agent headers
+ - BUG/MINOR: httpclient/lua: don't pop the lua stack when getting headers
+ - BUILD/MINOR: fix solaris build with clang.
+ - BUG/MEDIUM: server: avoid changing healthcheck ctx with set server ssl
+ - CI: refactor OpenTracing build script
+ - DOC: management: mark "set server ssl" as deprecated
+ - MEDIUM: cli: yield between each pipelined command
+ - MINOR: channel: add new function co_getdelim() to support multiple delimiters
+ - BUG/MINOR: cli: avoid O(bufsize) parsing cost on pipelined commands
+ - MEDIUM: h2/hpack: emit a Dynamic Table Size Update after settings change
+ - MINOR: quic: Retransmit the TX frames in the same order
+ - MINOR: quic: Remove the packet number space TX MT_LIST
+ - MINOR: quic: Splice the frames which could not be added to packets
+ - MINOR: quic: Add the number of TX bytes to traces
+ - CLEANUP: quic: Replace <nb_pto_dgrams> by <probe>
+ - MINOR: quic: Send two ack-eliciting packets when probing packet number spaces
+ - MINOR: quic: Probe regardless of the congestion control
+ - MINOR: quic: Speeding up handshake completion
+ - MINOR: quic: Release RX Initial packets asap
+ - MINOR: quic: Release asap TX frames to be transmitted
+ - MINOR: quic: Probe even if coalescing
+ - BUG/MEDIUM: cli: Never wait for more data on client shutdown
+ - BUG/MEDIUM: mcli: do not try to parse empty buffers
+ - BUG/MEDIUM: mcli: always realign wrapping buffers before parsing them
+ - BUG/MINOR: stream: make the call_rate only count the no-progress calls
+ - MINOR: quic: do not use quic_conn after dropping it
+ - MINOR: quic: adjust quic_conn refcount decrement
+ - MINOR: quic: fix race-condition on xprt tasklet free
+ - MINOR: quic: free SSL context on quic_conn free
+ - MINOR: quic: Add QUIC_FT_RETIRE_CONNECTION_ID parsing case
+ - MINOR: quic: Wrong packet number space selection
+ - DEBUG: pools: add new build option DEBUG_POOL_INTEGRITY
+ - MINOR: quic: add missing include in quic_sock
+ - MINOR: quic: fix indentation in qc_send_ppkts
+ - MINOR: quic: remove dereferencement of connection when possible
+ - MINOR: quic: set listener accept cb on parsing
+ - MEDIUM: quic/ssl: add new ex data for quic_conn
+ - MINOR: quic: initialize ssl_sock_ctx alongside the quic_conn
+ - MINOR: ssl: fix build in release mode
+ - MINOR: pools: partially uninline pool_free()
+ - MINOR: pools: partially uninline pool_alloc()
+ - MINOR: pools: prepare POOL_EXTRA to be split into multiple extra fields
+ - MINOR: pools: extend pool_cache API to pass a pointer to a caller
+ - DEBUG: pools: add new build option DEBUG_POOL_TRACING
+ - DEBUG: cli: add a new "debug dev fd" expert command
+ - MINOR: fd: register the write side of the poller pipe as well
+ - CI: github actions: use cache for SSL libs
+ - BUILD: debug/cli: condition test of O_ASYNC to its existence
+ - BUILD: pools: fix build error on DEBUG_POOL_TRACING
+ - MINOR: quic: refactor header protection removal
+ - MINOR: quic: handle app data according to mux/connection layer status
+ - MINOR: quic: refactor app-ops initialization
+ - MINOR: receiver: define a flag for local accept
+ - MEDIUM: quic: flag listener for local accept
+ - MINOR: quic: do not manage connection in xprt snd_buf
+ - MINOR: quic: remove wait handshake/L6 flags on init connection
+ - MINOR: listener: add flags field
+ - MINOR: quic: define QUIC flag on listener
+ - MINOR: quic: create accept queue for QUIC connections
+ - MINOR: listener: define per-thr struct
+ - MAJOR: quic: implement accept queue
+ - CLEANUP: mworker: simplify mworker_free_child()
+ - BUILD/DEBUG: lru: update the standalone code to support the revision
+ - DEBUG: lru: use a xorshift generator in the testing code
+ - BUG/MAJOR: compiler: relax alignment constraints on certain structures
+ - BUG/MEDIUM: fd: always align fdtab[] to 64 bytes
+ - MINOR: quic: No DCID length for datagram context
+ - MINOR: quic: Comment fix about the token found in Initial packets
+ - MINOR: quic: Get rid of a struct buffer in quic_lstnr_dgram_read()
+ - MINOR: quic: Remove the QUIC haproxy server packet parser
+ - MINOR: quic: Add new defintion about DCIDs offsets
+ - MINOR: quic: Add a list to QUIC sock I/O handler RX buffer
+ - MINOR: quic: Allocate QUIC datagrams from sock I/O handler
+ - MINOR: proto_quic: Allocate datagram handlers
+ - MINOR: quic: Pass CID as a buffer to quic_get_cid_tid()
+ - MINOR: quic: Convert quic_dgram_read() into a task
+ - CLEANUP: quic: Remove useless definition
+ - MINOR: proto_quic: Wrong allocations for TX rings and RX bufs
+ - MINOR: quic: Do not consume the RX buffer on QUIC sock i/o handler side
+ - MINOR: quic: Do not reset a full RX buffer
+ - MINOR: quic: Attach all the CIDs to the same connection
+ - MINOR: quic: Make usage of by datagram handler trees
+ - MEDIUM: da: new optional data file download scheduler service.
+ - MEDIUM: da: update doc and build for new scheduler mode service.
+ - MEDIUM: da: update module to handle schedule mode.
+ - MINOR: quic: Drop Initial packets with wrong ODCID
+ - MINOR: quic: Wrong RX buffer tail handling when no more contiguous data
+ - MINOR: quic: Iterate over all received datagrams
+ - MINOR: quic: refactor quic CID association with threads
+ - BUG/MEDIUM: resolvers: Really ignore trailing dot in domain names
+ - DEV: flags: Add missing flags
+ - BUG/MINOR: sink: Use the right field in appctx context in release callback
+ - MINOR: sock: move the unused socket cleaning code into its own function
+ - BUG/MEDIUM: mworker: close unused transferred FDs on load failure
+ - BUILD: atomic: make the old HA_ATOMIC_LOAD() support const pointers
+ - BUILD: cpuset: do not use const on the source of CPU_AND/CPU_ASSIGN
+ - BUILD: checks: fix inlining issue on set_srv_agent_[addr,port}
+ - BUILD: vars: avoid overlapping field initialization
+ - BUILD: server-state: avoid using not-so-portable isblank()
+ - BUILD: mux_fcgi: avoid aliasing of a const struct in traces
+ - BUILD: tree-wide: mark a few numeric constants as explicitly long long
+ - BUILD: tools: fix warning about incorrect cast with dladdr1()
+ - BUILD: task: use list_to_mt_list() instead of casting list to mt_list
+ - BUILD: mworker: include tools.h for platforms without unsetenv()
+ - BUG/MINOR: mworker: fix a FD leak of a sockpair upon a failed reload
+ - MINOR: mworker: set the master side of ipc_fd in the worker to -1
+ - MINOR: mworker: allocate and initialize a mworker_proc
+ - CI: Consistently use actions/checkout@v2
+ - REGTESTS: Remove REQUIRE_VERSION=1.8 from all tests
+ - MINOR: mworker: sets used or closed worker FDs to -1
+ - MINOR: quic: Try to accept 0-RTT connections
+ - MINOR: quic: Do not try to treat 0-RTT packets without started mux
+ - MINOR: quic: Do not try to accept a connection more than one time
+ - MINOR: quic: Initialize the connection timer asap
+ - MINOR: quic: Do not use connection struct xprt_ctx too soon
+ - Revert "MINOR: mworker: sets used or closed worker FDs to -1"
+ - BUILD: makefile: avoid testing all -Wno-* options when not needed
+ - BUILD: makefile: validate support for extra warnings by batches
+ - BUILD: makefile: only compute alternative options if required
+ - DEBUG: fd: make sure we never try to insert/delete an impossible FD number
+ - MINOR: mux-quic: add comment
+ - MINOR: mux-quic: properly initialize qcc flags
+ - MINOR: mux-quic: do not consider CONNECTION_CLOSE for the moment
+ - MINOR: mux-quic: create a timeout task
+ - MEDIUM: mux-quic: delay the closing with the timeout
+ - MINOR: mux-quic: release idle conns on process stopping
+ - MINOR: listener: replace the listener's spinlock with an rwlock
+ - BUG/MEDIUM: listener: read-lock the listener during accept()
+ - MINOR: mworker/cli: set expert/experimental mode from the CLI
+
+2021/11/23 : 2.6-dev0
+ - MINOR: version: it's development again
+
+2021/11/23 : 2.5.0
+ - BUILD: SSL: add quictls build to scripts/build-ssl.sh
+ - BUILD: SSL: add QUICTLS to build matrix
+ - CLEANUP: sock: Wrap `accept4_broken = 1` into additional parenthesis
+ - BUILD: cli: clear a maybe-unused warning on some older compilers
+ - BUG/MEDIUM: cli: make sure we can report a warning from a bind keyword
+ - BUG/MINOR: ssl: make SSL counters atomic
+ - CLEANUP: assorted typo fixes in the code and comments
+ - BUG/MINOR: ssl: free correctly the sni in the backend SSL cache
+ - MINOR: version: mention that it's stable now
+
+2021/11/19 : 2.5-dev15
+ - BUG/MINOR: stick-table/cli: Check for invalid ipv6 key
+ - CLEANUP: peers: Remove useless test on peer variable in peer_trace()
+ - DOC: log: Add comments to specify when session's listener is defined or not
+ - BUG/MEDIUM: mux-h1: Handle delayed silent shut in h1_process() to release H1C
+ - REGTESTS: ssl_crt-list_filters: feature cmd incorrectly set
+ - DOC: internals: document the list API
+ - BUG/MINOR: h3: ignore unknown frame types
+ - MINOR: quic: redirect app_ops snd_buf through mux
+ - MEDIUM: quic: inspect ALPN to install app_ops
+ - MINOR: quic: support hq-interop
+ - MEDIUM: quic: send version negotiation packet on unknown version
+ - BUG/MEDIUM: mworker: cleanup the listeners when reexecuting
+ - DOC: internals: document the scheduler API
+ - BUG/MINOR: quic: fix version negotiation packet generation
+ - CLEANUP: ssl: fix wrong #else commentary
+ - MINOR: config: support default values for environment variables
+ - SCRIPTS: run-regtests: reduce the number of processes needed to check options
+ - SCRIPT: run-regtests: avoid several calls to grep to test for features
+ - SCRIPT: run-regtests: avoid calling awk to compute the version
+ - REGTEST: set retries count to zero for all tests that expect at 503
+ - REGTESTS: make tcp-check_min-recv fail fast
+ - REGTESTS: extend the default I/O timeouts and make them overridable
+ - BUG/MEDIUM: ssl: backend TLS resumption with sni and TLSv1.3
+ - BUG/MEDIUM: ssl: abort with the correct SSL error when SNI not found
+ - REGTESTS: ssl: test the TLS resumption
+ - BUILD: makefile: stop opening sub-shells for each and every command
+ - BUILD: makefile: reorder objects by build time
+ - BUG/MEDIUM: mux-h2: always process a pending shut read
+ - MINOR: quic_sock: missing CO_FL_ADDR_TO_SET flag
+ - MINOR: quic: Possible wrong connection identification
+ - MINOR: quic: Correctly pad UDP datagrams
+ - MINOR: quic: Support transport parameters draft TLS extension
+ - MINOR: quic: Anti-amplification implementation
+ - MINOR: quic: Wrong Initial packet connection initialization
+ - MINOR: quic: Wrong ACK range building
+ - MINOR: quic: Update some QUIC protocol errors
+ - MINOR: quic: Send CONNECTION_CLOSE frame upon TLS alert
+ - MINOR: quic: Wrong largest acked packet number parsing
+ - MINOR: quic: Add minimalistic support for stream flow control frames
+ - MINOR: quic: Wrong value for version negotiation packet 'Unused' field
+ - MINOR: quic: Support draft-29 QUIC version
+ - BUG/MINOR: quic: fix segfault on trace for version negotiation
+ - BUG/MINOR: hq-interop: fix potential NULL dereference
+ - BUILD: quic: fix potential NULL dereference on xprt_quic
+ - DOC: lua: documentation about the httpclient API
+ - BUG/MEDIUM: cache/cli: make "show cache" thread-safe
+ - BUG/MEDIUM: shctx: leave the block allocator when enough blocks are found
+ - BUG/MINOR: shctx: do not look for available blocks when the first one is enough
+ - MINOR: shctx: add a few BUG_ON() for consistency checks
+
+2021/11/14 : 2.5-dev14
+ - DEV: coccinelle: Remove unused `expression e`
+ - DEV: coccinelle: Add rule to use `istend()` where possible
+ - CLEANUP: Apply ist.cocci
+ - CLEANUP: Re-apply xalloc_size.cocci
+ - CLEANUP: halog: make the default usage message fit in small screens
+ - MINOR: h3/qpack: fix gcc11 warnings
+ - MINOR: mux-quic: fix gcc11 warning
+ - MINOR: h3: fix potential NULL dereference
+ - MINOR: quic: Fix potential null pointer dereference
+ - CLEANUP: halog: remove unused strl2ui()
+ - OPTIM: halog: improve field parser speed for modern compilers
+ - OPTIM: halog: skip fields 64 bits at a time when supported
+ - DEV: coccinelle: Add rule to use `isttrim()` where possible
+ - CLEANUP: Apply ist.cocci
+ - DEV: coccinelle: Add rule to use `chunk_istcat()` instead of `chunk_memcat()`
+ - DEV: coccinelle: Add rule to use `chunk_istcat()` instead of `chunk_strncat()`
+ - CLEANUP: Apply ist.cocci
+ - CLEANUP: chunk: Remove duplicated chunk_Xcat implementation
+ - CLEANUP: chunk: remove misleading chunk_strncat() function
+ - BUG/MINOR: cache: properly ignore unparsable max-age in quotes
+ - Revert "DEV: coccinelle: Add rule to use `chunk_istcat()` instead of `chunk_strncat()`"
+ - DOC: stats: fix location of the text representation
+ - DOC: internals: document the IST API
+ - BUG/MINOR: httpclient/lua: rcv freeze when no request payload
+ - BUG/MEDIUM: httpclient: channel_add_input() must use htx->data
+ - MINOR: promex: backend aggregated server check status
+ - DOC: config: Fix typo in ssl_fc_unique_id description
+ - BUG/MINOR: http-ana: Apply stop to the current section for http-response rules
+ - Revert "BUG/MINOR: http-ana: Don't eval front after-response rules if stopped on back"
+ - DOC: config: Be more explicit in "allow" actions description
+ - DOC: lua: Be explicit with the Reply object limits
+ - MINOR: mux-h1: Slightly Improve H1 traces
+ - BUG/MEDIUM: conn-stream: Don't reset CS flags on close
+ - CLEANUP: mworker: remove any relative PID reference
+ - MEDIUM: mworker: reexec in waitpid mode after successful loading
+ - MINOR: mworker: clarify starting/failure messages
+ - MINOR: mworker: only increment the number of reload in wait mode
+ - MINOR: mworker: implement a reload failure counter
+ - MINOR: mworker: ReloadFailed shown depending on failedreload
+ - MINOR: mworker: change the way we set PROC_O_LEAVING
+ - BUG/MINOR: mworker: doesn't launch the program postparser
+ - DOC: management: edit the "show proc" example to show the current output
+ - BUG/MEDIUM: httpclient/cli: free of unallocated hc->req.uri
+ - REGTESTS: httpclient/lua: add greater body values
+ - BUG/MINOR: mux-h2: Fix H2_CF_DEM_SHORT_READ value
+ - BUG/MINOR: pools: don't mark ourselves as harmless in DEBUG_UAF mode
+ - BUG/MEDIUM: connection: make cs_shutr/cs_shutw//cs_close() idempotent
+ - BUILD: makefile: simplify detection of libatomic
+
+2021/11/06 : 2.5-dev13
+ - SCRIPTS: git-show-backports: re-enable file-based filtering
+ - MINOR: jwt: Make invalid static JWT algorithms an error in `jwt_verify` converter
+ - MINOR: mux-h2: add trace on extended connect usage
+ - BUG/MEDIUM: mux-h2: reject upgrade if no RFC8441 support
+ - MINOR: stream/mux: implement websocket stream flag
+ - MINOR: connection: implement function to update ALPN
+ - MINOR: connection: add alternative mux_ops param for conn_install_mux_be
+ - MEDIUM: server/backend: implement websocket protocol selection
+ - MINOR: server: add ws keyword
+ - BUG/MINOR: resolvers: fix sent messages were counted twice
+ - BUG/MINOR: resolvers: throw log message if trash not large enough for query
+ - MINOR: resolvers/dns: split dns and resolver counters in dns_counter struct
+ - MEDIUM: resolvers: rename dns extra counters to resolvers extra counters
+ - BUG/MINOR: jwt: Fix jwt_parse_alg incorrectly returning JWS_ALG_NONE
+ - DOC: add QUIC instruction in INSTALL
+ - CLEANUP: halog: Remove dead stores
+ - DEV: coccinelle: Add ha_free.cocci
+ - CLEANUP: Apply ha_free.cocci
+ - DEV: coccinelle: Add rule to use `istnext()` where possible
+ - CLEANUP: Apply ist.cocci
+ - REGTESTS: Use `feature cmd` for 2.5+ tests (2)
+ - DOC: internals: move some API definitions to an "api" subdirectory
+ - MINOR: quic: Allocate listener RX buffers
+ - CLEANUP: quic: Remove useless code
+ - MINOR: quic: Enhance the listener RX buffering part
+ - MINOR: quic: Remove a useless lock for CRYPTO frames
+ - MINOR: quic: Use QUIC_LOCK QUIC specific lock label.
+ - MINOR: backend: Get client dst address to set the server's one only if needful
+ - MINOR: compression: Warn for 'compression offload' in defaults sections
+ - MEDIUM: connection: rename fc_conn_err and bc_conn_err to fc_err and bc_err
+ - DOC: configuration: move the default log formats to their own section
+ - MINOR: ssl: make the ssl_fc_sni() sample-fetch function always available
+ - MEDIUM: log: add the client's SNI to the default HTTPS log format
+ - DOC: config: add an example of reasonably complete error-log-format
+ - DOC: config: move error-log-format before custom log format
+
+2021/11/02 : 2.5-dev12
+ - MINOR: httpclient: support payload within a buffer
+ - MINOR: httpclient/lua: support more HTTP methods
+ - MINOR: httpclient/lua: return an error when it can't generate the request
+ - CLEANUP: lua: Remove any ambiguities about lua txn execution context flags
+ - BUG/MEDIUM: lua: fix invalid return types in hlua_http_msg_get_body
+ - CLEANUP: connection: No longer export make_proxy_line_v1/v2 functions
+ - CLEANUP: tools: Use const address for get_net_port() and get_host_port()
+ - CLEANUP: lua: Use a const address to retrieve info about a connection
+ - MINOR: connection: Add function to get src/dst without updating the connection
+ - MINOR: session: Add src and dst addresses to the session
+ - MINOR: stream-int: Add src and dst addresses to the stream-interface
+ - MINOR: frontend: Rely on client src and dst addresses at stream level
+ - MINOR: log: Rely on client addresses at the appropriate level to log messages
+ - MINOR: session: Rely on client source address at session level to log error
+ - MINOR: http-ana: Rely on addresses at stream level to set xff and xot headers
+ - MINOR: http-fetch: Rely on addresses at stream level in HTTP sample fetches
+ - MINOR: mux-fcgi: Rely on client addresses at stream level to set default params
+ - MEDIUM: tcp-sample: Rely on addresses at the appropriate level in tcp samples
+ - MEDIUM: connection: Rely on addresses at stream level to make proxy line
+ - MEDIUM: backend: Rely on addresses at stream level to init server connection
+ - MEDIUM: connection: Assign session addresses when PROXY line is received
+ - MEDIUM: connection: Assign session addresses when NetScaler CIP proto is parsed
+ - MEDIUM: tcp-act: Set addresses at the apprioriate level in set-(src/dst) actions
+ - MINOR: tcp-act: Add set-src/set-src-port for "tcp-request content" rules
+ - DOC: config: Fix alphabetical order of fc_* samples
+ - MINOR: tcp-sample: Add samples to get original info about client connection
+ - REGTESTS: Add script to test client src/dst manipulation at different levels
+ - MINOR: stream: Use backend stream-interface dst address instead of target_addr
+ - BUILD: log: Fix compilation without SSL support
+ - DEBUG: protocol: yell loudly during registration of invalid sock_domain
+ - MINOR: protocols: add a new protocol type selector
+ - MINOR: protocols: make use of the protocol type to select the protocol
+ - MINOR: protocols: replace protocol_by_family() with protocol_lookup()
+ - MINOR: halog: Add -qry parameter allowing to preserve the query string in -uX
+ - CLEANUP: jwt: Remove the use of a trash buffer in jwt_jwsverify_hmac()
+ - CLEANUP: jwt: Remove the use of a trash buffer in jwt_jwsverify_rsa_ecdsa()
+ - DEV: coccinelle: Add realloc_leak.cocci
+ - CLEANUP: hlua: Remove obsolete branch in `hlua_alloc()`
+ - BUILD: atomic: prefer __atomic_compare_exchange_n() for __ha_cas_dw()
+ - BUILD: atomic: fix build on mac/arm64
+ - MINOR: atomic: remove the memcpy() call and dependency on string.h
+ - MINOR: httpclient: request streaming with a callback
+ - MINOR: httpclient/lua: handle the streaming into the lua applet
+ - REGTESTS: lua: test httpclient with body streaming
+ - DOC: halog: Move the `-qry` parameter into the correct section in help text
+ - MINOR: halog: Rename -qry to -query
+ - CLEANUP: halog: Use consistent indentation in help()
+ - BUG/MINOR: halog: Add missing newlines in die() messages
+ - MINOR: halog: Add support for extracting captures using -hdr
+ - DOC: Typo fixed "it" should be "is"
+ - BUG/MINOR: mux-h1: Save shutdown mode if the shutdown is delayed
+ - BUG/MEDIUM: mux-h1: Perform a connection shutdown when the h1c is released
+ - BUG/MEDIUM: resolvers: Don't recursively perform requester unlink
+ - BUG/MEDIUM: http-ana: Drain request data waiting the tarpit timeout expiration
+ - BUG/MINOR: http: Authorization value can have multiple spaces after the scheme
+ - BUG/MINOR: http: http_auth_bearer fetch does not work on custom header name
+ - BUG/MINOR: httpclient/lua: misplaced luaL_buffinit()
+ - BUILD/MINOR: cpuset freebsd build fix
+ - BUG/MINOR: httpclient: use a placeholder value for Host header
+ - BUG/MEDIUM: stream-int: Block reads if channel cannot receive more data
+ - BUG/MEDIUM: resolvers: Track api calls with a counter to free resolutions
+ - MINOR: stream: Improve dump of bogus streams
+ - DOC/peers: some grammar fixes for peers 2.1 spec
+ - MEDIUM: vars: make the var() sample fetch function really return type ANY
+ - MINOR: vars: add "set-var" for "tcp-request connection" rules.
+
+2021/10/22 : 2.5-dev11
+ - DEV: coccinelle: Add strcmp.cocci
+ - CLEANUP: Apply strcmp.cocci
+ - CI: Add `permissions` to GitHub Actions
+ - CI: Clean up formatting in GitHub Action definitions
+ - MINOR: add ::1 to predefined LOCALHOST acl
+ - CLEANUP: assorted typo fixes in the code and comments
+ - CLEANUP: Consistently `unsigned int` for bitfields
+ - MEDIUM: resolvers: lower-case labels when converting from/to DNS names
+ - MEDIUM: resolvers: replace bogus resolv_hostname_cmp() with memcmp()
+ - MINOR: jwt: Empty the certificate tree during deinit
+ - MINOR: jwt: jwt_verify returns negative values in case of error
+ - MINOR: jwt: Do not rely on enum order anymore
+ - BUG/MEDIUM: stream: Keep FLT_END analyzers if a stream detects a channel error
+ - MINOR: httpclient/cli: access should be only done from expert mode
+ - DOC: management: doc about the CLI httpclient
+ - BUG/MEDIUM: tcpcheck: Properly catch early HTTP parsing errors
+ - BUG/MAJOR: dns: tcp session can remain attached to a list after a free
+ - BUG/MAJOR: dns: attempt to lock globaly for msg waiter list instead of use barrier
+ - CLEANUP: dns: always detach the appctx from the dns session on release
+ - DEBUG: dns: add a few more BUG_ON at sensitive places
+ - BUG/MAJOR: resolvers: add other missing references during resolution removal
+ - CLEANUP: resolvers: do not export resolv_purge_resolution_answer_records()
+ - BUILD: resolvers: avoid a possible warning on null-deref
+ - BUG/MEDIUM: resolvers: always check a valid item in query_list
+ - CLEANUP: always initialize the answer_list
+ - CLEANUP: resolvers: simplify resolv_link_resolution() regarding requesters
+ - CLEANUP: resolvers: replace all LIST_DELETE with LIST_DEL_INIT
+ - MEDIUM: resolvers: use a kill list to preserve the list consistency
+ - MEDIUM: resolvers: remove the last occurrences of the "safe" argument
+ - BUG/MEDIUM: checks: fix the starting thread for external checks
+ - MEDIUM: resolvers: replace the answer_list with a (flat) tree
+ - MEDIUM: resolvers: hash the records before inserting them into the tree
+ - BUG/MAJOR: buf: fix varint API post- vs pre- increment
+ - OPTIM: resolvers: move the eb32 node before the data in the answer_item
+ - MINOR: list: add new macro LIST_INLIST_ATOMIC()
+ - OPTIM: dns: use an atomic check for the list membership
+ - BUG/MINOR: task: do not set TASK_F_USR1 for no reason
+ - BUG/MINOR: mux-h2: do not prevent from sending a final GOAWAY frame
+ - MINOR: connection: add a new CO_FL_WANT_DRAIN flag to force drain on close
+ - MINOR: mux-h2: perform a full cycle shutdown+drain on close
+ - CLEANUP: resolvers: get rid of single-iteration loop in resolv_get_ip_from_response()
+ - MINOR: quic: Increase the size of handshake RX UDP datagrams
+ - BUG/MEDIUM: lua: fix memory leaks with realloc() on non-glibc systems
+ - MINOR: memprof: report the delta between alloc and free on realloc()
+ - MINOR: memprof: add one pointer size to the size of allocations
+ - BUILD: fix compilation on NetBSD
+ - MINOR: backend: add traces for idle connections reuse
+ - BUG/MINOR: backend: fix improper insert in avail tree for always reuse
+ - MINOR: backend: improve perf with tcp proxies skipping idle conns
+ - MINOR: connection: remove unneeded memset 0 for idle conns
+
+2021/10/16 : 2.5-dev10
+ - MINOR: initcall: Rename __GLOBL and __GLOBL1.
+ - MINOR: rules: add a new function new_act_rule() to allocate act_rules
+ - MINOR: rules: add a file name and line number to act_rules
+ - MINOR: stream: report the current rule in "show sess all" when known
+ - MINOR: stream: report the current filter in "show sess all" when known
+ - CLEANUP: stream: Properly indent current_rule line in "show sess all"
+ - BUG/MINOR: lua: Fix lua error handling in `hlua_config_prepend_path()`
+ - CI: github: switch to OpenSSL 3.0.0
+ - REGTESTS: ssl: Fix references to removed option in test description
+ - MINOR: ssl: Add ssllib_name_startswith precondition
+ - REGTESTS: ssl: Fix ssl_errors test for OpenSSL v3
+ - REGTESTS: ssl: Reenable ssl_errors test for OpenSSL only
+ - REGTESTS: ssl: Use mostly TLSv1.2 in ssl_errors test
+ - MEDIUM: mux-quic: rationalize tx buffers between qcc/qcs
+ - MEDIUM: h3: properly manage tx buffers for large data
+ - MINOR: mux-quic: standardize h3 settings sending
+ - CLEANUP: h3: remove dead code
+ - MINOR: mux-quic: implement standard method to detect if qcc is dead
+ - MEDIUM: mux-quic: defer stream shut if remaining tx data
+ - MINOR: mux: remove last occurences of qcc ring buffer
+ - MINOR: quic: handle CONNECTION_CLOSE frame
+ - REGTESTS: ssl: re-enable set_ssl_cert_bundle.vtc
+ - MINOR: ssl: add ssl_fc_is_resumed to "option httpslog"
+ - MINOR: http: Add http_auth_bearer sample fetch
+ - MINOR: jwt: Parse JWT alg field
+ - MINOR: jwt: JWT tokenizing helper function
+ - MINOR: jwt: Insert public certificates into dedicated JWT tree
+ - MINOR: jwt: jwt_header_query and jwt_payload_query converters
+ - MEDIUM: jwt: Add jwt_verify converter to verify JWT integrity
+ - REGTESTS: jwt: Add tests for the jwt_verify converter
+ - BUILD: jwt: fix declaration of EVP_KEY in jwt-h.h
+ - MINOR: proto_tcp: use chunk_appendf() to ouput socket setup errors
+ - MINOR: proto_tcp: also report the attempted MSS values in error message
+ - MINOR: inet: report the faulty interface name in "bind" errors
+ - MINOR: protocol: report the file and line number for binding/listening errors
+ - MINOR: protocol: uniformize protocol errors
+ - MINOR: resolvers: fix the resolv_str_to_dn_label() API about trailing zero
+ - BUG/MEDIUM: resolver: make sure to always use the correct hostname length
+ - BUG/MINOR: resolvers: do not reject host names of length 255 in SRV records
+ - MINOR: resolvers: fix the resolv_dn_label_to_str() API about trailing zero
+ - MEDIUM: listeners: split the thread mask between receiver and bind_conf
+ - MINOR: listeners: add clone_listener() to duplicate listeners at boot time
+ - MEDIUM: listener: add the "shards" bind keyword
+ - BUG/MEDIUM: resolvers: use correct storage for the target address
+ - MINOR: resolvers: merge address and target into a union "data"
+ - BUG/MEDIUM: resolvers: fix truncated TLD consecutive to the API fix
+ - BUG/MEDIUM: jwt: fix base64 decoding error detection
+ - BUG/MINOR: jwt: use CRYPTO_memcmp() to compare HMACs
+ - DOC: jwt: fix a typo in the jwt_verify() keyword description
+ - BUG/MEDIUM: sample/jwt: fix another instance of base64 error detection
+ - BUG/MINOR: http-ana: Don't eval front after-response rules if stopped on back
+ - BUG/MINOR: sample: Fix 'fix_tag_value' sample when waiting for more data
+ - DOC: config: Move 'tcp-response content' at the right place
+ - BUG/MINOR: proxy: Use .disabled field as a bitfield as documented
+ - MINOR: proxy: Introduce proxy flags to replace disabled bitfield
+ - MINOR: sample/arg: Be able to resolve args found in defaults sections
+ - MEDIUM: proxy: Warn about ambiguous use of named defaults sections
+ - MINOR: proxy: Be able to reference the defaults section used by a proxy
+ - MINOR: proxy: Add PR_FL_READY flag on fully configured and usable proxies
+ - MINOR: config: Finish configuration for referenced default proxies
+ - MINOR: config: No longer remove previous anonymous defaults section
+ - MINOR: tcpcheck: Support 2-steps args resolution in defaults sections
+ - MEDIUM: rules/acl: Parse TCP/HTTP rules and acls defined in defaults sections
+ - MEDIUM: tcp-rules: Eval TCP rules defined in defaults sections
+ - MEDIUM: http-ana: Eval HTTP rules defined in defaults sections
+ - BUG/MEDIUM: sample: Cumulate frontend and backend sample validity flags
+ - REGTESTS: Add scripts to test support of TCP/HTTP rules in defaults sections
+ - DOC: config: Add documentation about TCP/HTTP rules in defaults section
+ - DOC: config: Rework and uniformize how TCP/HTTP rules are documented
+ - BUG/MINOR: proxy: Release ACLs and TCP/HTTP rules of default proxies
+ - BUG/MEDIUM: cpuset: fix cpuset size for FreeBSD
+ - BUG/MINOR: sample: fix backend direction flags consecutive to last fix
+ - BUG/MINOR: listener: fix incorrect return on out-of-memory
+ - BUG/MINOR: listener: add an error check for unallocatable trash
+ - CLEANUP: listeners: remove unreachable code in clone_listener()
+
+2021/10/08 : 2.5-dev9
+ - head-truc
+ - REGTESTS: lua: test the httpclient:get() feature
+ - Revert "head-truc"
+ - BUG/MEDIUM: httpclient: replace ist0 by istptr
+ - MINOR: config: use a standard parser for the "nbthread" keyword
+ - CLEANUP: init: remove useless test against MAX_THREADS in affinity loop
+ - MEDIUM: init: de-uglify the per-thread affinity setting
+ - MINOR: init: extract the setup and end of threads to their own functions
+ - MINOR: log: Try to get the status code when MUX_EXIT_STATUS is retrieved
+ - MINOR: mux-h1: Set error code if possible when MUX_EXIT_STATUS is returned
+ - MINOR: mux-h1: Be able to set custom status code on parsing error
+ - MEDIUM: mux-h1: Reject HTTP/1.0 GET/HEAD/DELETE requests with a payload
+ - MEDIUM: h1: Force close mode for invalid uses of T-E header
+ - BUG/MINOR: mux-h1/mux-fcgi: Sanitize TE header to only send "trailers"
+ - MINOR: http: Add 422-Unprocessable-Content error message
+ - MINOR: h1: Change T-E header parsing to fail if chunked encoding is found twice
+ - BUG/MEDIUM: mux-h1/mux-fcgi: Reject messages with unknown transfer encoding
+ - REGTESTS: Add script to validate T-E header parsing
+ - REORG: pools: move default settings to defaults.h
+ - DOC: peers: fix doc "enable" statement on "peers" sections
+ - MINOR: Makefile: add MEMORY_POOLS to the list of DEBUG_xxx options
+ - MINOR: ssl: Set connection error code in case of SSL read or write fatal failure
+ - MINOR: ssl: Rename ssl_bc_hsk_err to ssl_bc_err
+ - MINOR: ssl: Store the last SSL error code in case of read or write failure
+ - REGTESTS: ssl: enable show_ssl_ocspresponse.vtc again
+ - REGTESTS: ssl: enable ssl_crt-list_filters.vtc again
+ - BUG/MEDIUM: lua: fix wakeup condition from sleep()
+ - BUG/MAJOR: lua: use task_wakeup() to properly run a task once
+ - MINOR: arg: Be able to forbid unresolved args when building an argument list
+ - BUG/MINOR: tcpcheck: Don't use arg list for default proxies during parsing
+ - BUG/MINOR: tcp-rules: Stop content rules eval on read error and end-of-input
+ - MINOR: tasks: catch TICK_ETERNITY with BUG_ON() in __task_queue()
+ - REGTESTS: ssl: show_ssl_ocspresponse w/ freebsd won't use base64
+ - REGTESTS: ssl: wrong feature cmd in show_ssl_ocspresponse.vtc
+ - CLEANUP: tasks: remove the long-unused work_lists
+ - MINOR: task: provide 3 task_new_* wrappers to simplify the API
+ - MINOR: time: uninline report_idle() and move it to task.c
+ - REORG: sched: move idle time calculation from time.h to task.h
+ - REORG: sched: move the stolen CPU time detection to sched_entering_poll()
+ - BUG/MEDIUM: filters: Fix a typo when a filter is attached blocking the release
+ - BUG/MEDIUM: http-ana: Clear request analyzers when applying redirect rule
+ - MINOR: httpclient: destroy() must free the headers and the ists
+ - MINOR: httpclient: set HTTPCLIENT_F_ENDED only in release
+ - MINOR: httpclient: stop_and_destroy() ask the applet to autokill
+ - MINOR: httpclient: test if started during stop_and_destroy()
+ - MINOR: httpclient/lua: implement garbage collection
+ - BUG/MEDIUM: httpclient/lua: crash because of b_xfer and get_trash_chunk()
+ - MINOR: httpclient: destroy checks if a client was started but not stopped
+ - BUG/MINOR: httpclient/lua: does not process headers when failed
+ - MINOR: httpclient/lua: supports headers via named arguments
+ - CLEANUP: server: always include the storage for SSL settings
+ - CLEANUP: sample: rename sample_conv_var2smp() to *_sint
+ - CLEANUP: sample: uninline sample_conv_var2smp_str()
+ - MINOR: sample: provide a generic var-to-sample conversion function
+ - BUG/MEDIUM: sample: properly verify that variables cast to sample
+ - BUILD: action: add the relevant structures for function arguments
+ - BUILD: extcheck: needs to include stream-t.h
+ - BUILD: hlua: needs to include stream-t.h
+ - BUILD: stats: define several missing structures in stats.h
+ - BUILD: resolvers: define missing types in resolvers.h
+ - BUILD: httpclient: include missing ssl_sock-t
+ - BUILD: sample: include openssl-compat
+ - BUILD: http_ana: need to include proxy-t to get redirect_rule
+ - BUILD: http_rules: requires http_ana-t.h for REDIRECT_*
+ - BUILD: vars: need to include xxhash
+ - BUILD: peers: need to include eb{32/mb/pt}tree.h
+ - BUILD: ssl_ckch: include ebpttree.h in ssl_ckch.c
+ - BUILD: compiler: add the container_of() and container_of_safe() macros
+ - BUILD: idleconns: include missing ebmbtree.h at several places
+ - BUILD: connection: connection.h needs list.h and server.h
+ - BUILD: tree-wide: add missing http_ana.h from many places
+ - BUILD: cfgparse-ssl: add missing errors.h
+ - BUILD: tcp_sample: include missing errors.h and session-t.h
+ - BUILD: mworker: mworker-prog needs time.h for the 'now' variable
+ - BUILD: tree-wide: add several missing activity.h
+ - BUILD: compat: fix -Wundef on SO_REUSEADDR
+ - CLEANUP: pools: pools-t.h doesn't need to include thread-t.h
+ - REORG: pools: uninline the UAF allocator and force-inline the rest
+ - REORG: thread: uninline the lock-debugging code
+ - MINOR: thread/debug: replace nsec_now() with now_mono_time()
+ - CLEANUP: remove some unneeded includes from applet-t.h
+ - REORG: listener: move bind_conf_alloc() and listener_state_str() to listener.c
+ - CLEANUP: listeners: do not include openssl-compat
+ - CLEANUP: servers: do not include openssl-compat
+ - REORG: ssl: move ssl_sock_is_ssl() to connection.h and rename it
+ - CLEANUP: mux_fcgi: remove dependency on ssl_sock
+ - CLEANUP: ssl/server: move ssl_sock_set_srv() to srv_set_ssl() in server.c
+ - REORG: ssl-sock: move the sslconns/totalsslconns counters to global
+ - REORG: sample: move the crypto samples to ssl_sample.c
+ - REORG: sched: moved samp_time and idle_time to task.c as well
+ - REORG: time/ticks: move now_ms and global_now_ms definitions to ticks.h
+ - CLEANUP: tree-wide: remove unneeded include time.h in ~20 files
+ - REORG: activity: uninline activity_count_runtime()
+ - REORG: acitvity: uninline sched_activity_entry()
+ - CLEANUP: stream: remove many unneeded includes from stream-t.h
+ - CLEANUP: stick-table: no need to include socket nor in.h
+ - MINOR: connection: use uint64_t for the hashes
+ - REORG: connection: move the hash-related stuff to connection.c
+ - REORG: connection: uninline conn_notify_mux() and conn_delete_from_tree()
+ - REORG: server: uninline the idle conns management functions
+ - REORG: ebtree: split structures into their own file ebtree-t.h
+ - CLEANUP: tree-wide: only include ebtree-t from type files
+ - REORG: connection: move the largest inlines from connection.h to connection.c
+ - CLEANUP: connection: do not include http_ana!
+ - CLEANUP: connection: remove unneeded tcpcheck-t.h and use only session-t.h
+ - REORG: connection: uninline the rest of the alloc/free stuff
+ - REORG: task: uninline the loop time measurement code
+ - CLEANUP: time: move a few configurable defines to defaults.h
+ - CLEANUP: fd: do not include time.h
+ - REORG: fd: uninline compute_poll_timeout()
+ - CLENAUP: wdt: use ha_tkill() instead of accessing pthread directly
+ - REORG: thread: move the thread init/affinity/stop to thread.c
+ - REORG: thread: move ha_get_pthread_id() to thread.c
+ - MINOR: thread: use a dedicated static pthread_t array in thread.c
+ - CLEANUP: thread: uninline ha_tkill/ha_tkillall/ha_cpu_relax()
+ - DOC: configuration: add clarification on escaping in keyword arguments
+ - BUG/MINOR: task: fix missing include with DEBUG_TASK
+ - MINOR: pools: report the amount used by thread caches in "show pools"
+ - MINOR: quic: Distinguish packet and SSL read enc. level in traces
+ - MINOR: quic: Add a function to dump SSL stack errors
+ - MINOR: quic: BUG_ON() SSL errors.
+ - MINOR: quic: Fix SSL error issues (do not use ssl_bio_and_sess_init())
+ - BUG/MEDIUM: mux-quic: reinsert all streams in by_id tree
+ - BUG/MAJOR: xprt-quic: do not queue qc timer if not set
+ - MINOR: mux-quic: release connection if no more bidir streams
+ - BUG/MAJOR: quic: remove qc from receiver cids tree on free
+ - BUG/MEDIUM: mux_h2: Handle others remaining read0 cases on partial frames
+ - MINOR: qpack: do not encode invalid http status code
+ - MINOR: qpack: support non-indexed http status code encoding
+ - MINOR: qpack: fix memory leak on huffman decoding
+ - CLEANUP: mux-quic: remove unused code
+ - BUG/MINOR: quic: fix includes for compilation
+ - BUILD: connection: avoid a build warning on FreeBSD with SO_USER_COOKIE
+ - BUILD: init: avoid a build warning on FreeBSD with USE_PROCCTL
+ - REORG: time: move time-keeping code and variables to clock.c
+ - REORG: clock: move the updates of cpu/mono time to clock.c
+ - MINOR: activity: get the run_time from the clock updates
+ - CLEANUP: clock: stop exporting before_poll and after_poll
+ - REORG: clock: move the clock_id initialization to clock.c
+ - REORG: clock/wdt: move wdt timer initialization to clock.c
+ - MINOR: clock: move the clock_ids to clock.c
+ - MINOR: wdt: move wd_timer to wdt.c
+ - CLEANUP: wdt: do not remap SI_TKILL to SI_LWP, test the values directly
+ - REORG: thread/sched: move the task_per_thread stuff to thread_ctx
+ - REORG: thread/clock: move the clock parts of thread_info to thread_ctx
+ - REORG: thread/sched: move the thread_info flags to the thread_ctx
+ - REORG: thread/sched: move the last dynamic thread_info to thread_ctx
+ - MINOR: thread: make "ti" a const pointer and clean up thread_info a bit
+ - MINOR: threads: introduce a minimalistic notion of thread-group
+ - MINOR: global: add a new "thread-groups" directive
+ - MINOR: global: add a new "thread-group" directive
+ - MINOR: threads: make tg point to the current thread's group
+ - MEDIUM: threads: automatically assign threads to groups
+ - MINOR: threads: set the group ID and its bit in the thread group
+ - MINOR: threads: set the tid, ltid and their bit in thread_cfg
+ - MEDIUM: threads: replace ha_set_tid() with ha_set_thread()
+ - MINOR: threads: add the current group ID in thread-local "tgid" variable
+ - MINOR: debug: report the group and thread ID in the thread dumps
+ - MEDIUM: listeners: support the definition of thread groups on bind lines
+ - MINOR: threads: add a new function to resolve config groups and masks
+ - MEDIUM: config: resolve relative threads on bind lines to absolute ones
+ - MEDIUM: stick-table: never learn the "conn_cur" value from peers
+
+2021/09/24 : 2.5-dev8
+ - BUILD: compiler: fixed a missing test on defined(__GNUC__)
+ - BUILD: halog: fix a -Wundef warning on non-glibc systems
+ - BUILD: threads: fix -Wundef for _POSIX_PRIORITY_SCHEDULING on libmusl
+ - BUG/MINOR: compat: make sure __WORDSIZE is always defined
+ - BUILD: sample: fix format warning on 32-bit archs in sample_conv_be2dec_check()
+ - CLEANUP: pools: factor all malloc_trim() calls into trim_all_pools()
+ - MINOR: pools: automatically disable malloc_trim() with external allocators
+ - MINOR: pools: report it when malloc_trim() is enabled
+ - DOC: Add .mailmap
+ - CLEANUP: tree-wide: fix prototypes for functions taking no arguments.
+ - CLEANUP: Remove prototype for non-existent thread_get_default_count()
+ - CLEANUP: acl: Remove unused variable when releasing an acl expression
+ - BUG/MAJOR: mux-h1: Don't eval input data if an error was reported
+ - DOC: update Tim's address in .mailmap
+ - MINOR: pools: use mallinfo2() when available instead of mallinfo()
+ - BUG/MINOR: tcpcheck: Improve LDAP response parsing to fix LDAP check
+ - DOC: management: certificate files must be sanitized before injection
+ - BUG/MINOR: connection: prevent null deref on mux cleanup task allocation
+ - BUILD: ist: prevent gcc11 maybe-uninitialized warning on istalloc
+ - BUG/MINOR: cli/payload: do not search for args inside payload
+ - BUILD: sockpair: do not set unused flag
+ - BUILD: proto_uxst: do not set unused flag
+ - BUILD: fd: remove unused variable totlen in fd_write_frag_line()
+ - MINOR: applet: remove the thread mask from appctx_new()
+ - REORG: threads: move ha_get_pthread_id() to tinfo.h
+ - CLEANUP: Apply ist.cocci
+ - DEV: coccinelle: Add ist.cocci
+ - CLEANUP: Apply bug_on.cocci
+ - DEV: coccinelle: Add xalloc_size.cocci
+ - DEV: coccinelle: Add bug_on.cocci
+ - CLEANUP: Apply xalloc_size.cocci
+ - DEV: coccinelle: Add xalloc_cast.cocci
+ - BUG/MINOR: flt-trace: fix an infinite loop when random-parsing is set
+ - MINOR: httpclient: add the EOH when no headers where provided
+ - CLEANUP: Include check.h in flt_spoe.c
+ - CLEANUP: Remove unreachable `break` from parse_time_err()
+ - BUG/MINOR: server: allow 'enable health' only if check configured
+ - BUG/MINOR: server: alloc dynamic srv ssl ctx if proxy uses ssl chk rule
+ - MINOR: server: enable more keywords for ssl checks for dynamic servers
+ - MINOR: server: enable more check related keywords for dynamic servers
+ - REORG: server: move slowstart init outside of checks
+ - MINOR: server: enable slowstart for dynamic server
+ - MEDIUM: listener: deprecate "process" in favor of "thread" on bind lines
+ - BUG/MEDIUM: leastconn: fix rare possibility of divide by zero
+ - BUG/MINOR: quic: Possible NULL pointer dereferencing when dumping streams.
+ - MINOR: quic: Move transport parmaters to anynomous struct.
+ - MINOR: mux_quic: Add QUIC mux layer.
+ - MINOR: connection: Add callbacks definitions for QUIC.
+ - MINOR: quic: Attach QUIC mux connection objet to QUIC connection.
+ - MINOR: quic: Add a new definition to store STREAM frames.
+ - MINOR: h3: Add HTTP/3 definitions.
+ - MINOR: qpack: Add QPACK compression.
+ - MINOR: quic_sock: Finalize the QUIC connections.
+ - MINOR: quic: Disable the action of ->rcv_buf() xprt callback
+ - MINOR: quic: Add callbacks for (un)scribing to QUIC xprt.
+ - MINOR: quic: Variable-length integer encoding/decoding into/from buffer struct.
+ - BUG/MINOR: quic: Wrong ->accept() error handling
+ - MINOR: quic: Add a wrapper function to update transport parameters.
+ - MINOR: quic: Update the streams transport parameters.
+ - MINOR: quic: Avoid header collisions
+ - MINOR: quic: Replace max_packet_size by max_udp_payload size.
+ - MINOR: quic: Enable some quic, h3 and qpack modules compilation.
+ - MINOR: quic: Move an SSL func call from QUIC I/O handler to the xprt init.
+ - MINOR: quic: Initialize the session before starting the xprt.
+ - BUG/MINOR: quic: Do not check the acception of a new conn from I/O handler.
+ - MINOR: quic: QUIC conn initialization from I/O handler
+ - MINOR: quic: Remove header protection for conn with context
+ - MINOR: quic: Derive the initial secrets asap
+ - MINOR: quic: Remove header protection also for Initial packets
+ - BUG/MINOR: quic: Wrong memory free in quic_update_ack_ranges_list()
+ - MINOR: quic: quic_update_ack_ranges_list() code factorization
+ - MINOR: quic: Useless test in quic_update_ack_ranges_list()
+ - MINOR: quic: Remove a useless variable in quic_update_ack_ranges_list()
+ - BUG/MINOR: quic: Missing cases treatement when updating ACK ranges
+ - CLEAUNUP: quic: Usage of a useless variable in qc_treat_rx_pkts()
+ - BUG/MINOR: quic: Wrong RX packet reference counter usage
+ - MINOR: quic: Do not stop the packet parsing too early in qc_treat_rx_packets()
+ - MINOR: quic: Add a lock for RX packets
+ - MINOR: quic: Move the connection state
+ - MINOR: quic: Replace quic_conn_ctx struct by ssl_sock_ctx struct
+ - MINOR: quic: Replace the RX list of packet by a thread safety one.
+ - MINOR: quic: Replace the RX unprotected packet list by a thread safety one.
+ - MINOR: quic: Add useful traces for I/O dgram handler
+ - MINOR: quic: Do not wakeup the xprt task on ACK receipt
+ - MINOR: quic: Connection allocations rework
+ - MINOR: quic: Move conn_prepare() to ->accept_conn() callback
+ - MINOR: quic: Make qc_lstnr_pkt_rcv() be thread safe.
+ - MINOR: quic: Add a ring buffer implementation for QUIC
+ - MINOR: quic: Prefer x25519 as ECDH preferred parametes.
+ - MINOR: quic: Add the QUIC v1 initial salt.
+ - BUG/MINOR: quic: Too much reduced computed space to build handshake packets
+ - MINOR: net_helper: add functions for pointers
+ - MINOR: quic: Add ring buffer definition (struct qring) for QUIC
+ - MINOR: proto_quic: Allocate TX ring buffers for listeners
+ - MINOR: quic: Initialize pointers to TX ring buffer list
+ - MINOR: quic: Make use of TX ring buffers to send QUIC packets
+ - MINOR: quic_tls: Make use of the QUIC V1 salt.
+ - MINOR: quic: Remove old TX buffer implementation
+ - MINOR: Add function for TX packets reference counting
+ - MINOR: quic: Add TX packets at the very last time to their tree.
+ - MINOR: quic: Unitialized mux context upon Client Hello message receipt.
+ - MINOR: quic: Missing encryption level rx.crypto member initialization and lock.
+ - MINOR: quic: Rename ->rx.rwlock of quic_enc_level struct to ->rx.pkts_rwlock
+ - MINOR: quic: Make qc_treat_rx_pkts() be thread safe.
+ - MINOR: quic: Make ->tx.frms quic_pktns struct member be thread safe
+ - MINOR: quic: Replace quic_tx_frm struct by quic_frame struct
+ - MINOR: quic: Add a mask for TX frame builders and their authorized packet types
+ - MINOR: quic: Add a useful function to compute any frame length.
+ - MINOR: quic: Add the QUIC connection state to traces
+ - MINOR: quic: Store post handshake frame in ->pktns.tx.frms MT_LIST
+ - MINOR: quic: Add the packet type to quic_tx_packet struct
+ - MINOR: quic: Modify qc_do_build_hdshk_pkt() to accept any packet type
+ - MINOR: quic: Atomically handle packet number space ->largest_acked_pn variable
+ - MINOR: quic: Modify qc_build_cfrms() to support any frame
+ - MINOR: quic: quic_conn_io_cb() task rework
+ - MINOR: quic: Make qc_build_hdshk_pkt() atomically consume a packet number
+ - MINOR: quic: qc_do_build_hdshk_pkt() does not need to pass a copy of CRYPTO frame
+ - MINOR: quic: Remove Application level related functions
+ - MINOR: quic: Rename functions which do not build only Handshake packets
+ - MINOR: quic: Make circular buffer internal buffers be variable-sized.
+ - MINOR: quic: Add a pool for TX ring buffer internal buffer
+ - MINOR: quic: Make use of the last cbuf API when initializing TX ring buffers
+ - MINOR: quic: Missing acks encoded size updates.
+ - MINOR: quic: Evaluate the packet lengths in advance
+ - MINOR: quic: Update the TLS extension for QUIC transport parameters
+ - MINOR: quic: Fix handshake state debug strings
+ - MINOR: quic: Atomically get/set the connection state
+ - MINOR: quic: Missing QUIC encryption level for qc_build_pkt()
+ - MINOR: quic: Coalesce Application level packets with Handshake packets.
+ - MINOR: quic: Wrong flags handling for acks
+ - MINOR: quic: Missing case when discarding HANDSHAKE secrets
+ - MINOR: quic: Post handshake packet building improvements
+ - MINOR: quic: Prepare Application level packet asap.
+ - MINOR: h3: Send h3 settings asap
+ - MINOR: quic: Wrong STREAM frame length computing
+ - MINOR: quic: Wrong short packet minimum length
+ - MINOR: quic: Prepare STREAM frames to fill QUIC packets
+ - MINOR: h3: change default settings
+ - MINOR: quic-enc: fix varint encoding
+ - MINOR: qpack: fix wrong comment
+ - MINOR: qpack: generate headers list on decoder
+ - MINOR: h3: parse headers to htx
+ - MINOR: h3: allocate stream on headers
+ - MEDIUM: mux-quic: implement ring buffer on stream tx
+ - MINOR: mux-quic: send SETTINGS on uni stream
+ - MINOR: h3: define snd_buf callback and divert mux ops
+ - MINOR: mux-quic: define FIN stream flag
+ - MINOR: qpack: create qpack-enc module
+ - MINOR: qpack: encode headers functions
+ - MINOR: h3: encode htx headers to QPACK
+ - MINOR: h3: send htx data
+ - MINOR: h3/mux: detect fin on last h3 frame of the stream
+ - MINOR: quic: Shorten some handshakes
+ - MINOR: quic: Make QUIC-TLS support at least two initial salts
+ - MINOR: quic: Attach the QUIC connection to a thread.
+ - MINOR: quic: Missing active_connection_id_limit default value
+ - MINOR: quic_sock: Do not flag QUIC connections as being set
+ - MINOR: buf: Add b_force_xfer() function
+ - MINOR: quic: Make use of buffer structs to handle STREAM frames
+ - MINOR: mux_quic: move qc_process() code to qc_send()
+ - MINOR: quic: Add a typedef for unsigned long long
+ - MINOR: quic: Confusion between TX/RX for the frame builders
+ - MINOR: quic: Wrong packet flags settings during frame building
+ - MINOR: quic: Constantness fixes for frame builders/parsers.
+ - MINOR: quic_tls: Client/serveur state reordering
+ - MINOR: quic: Wrong packet loss detection due to wrong pktns order
+ - MINOR: quic: Wrong packet number space selection in quic_loss_pktns()
+ - MINOR: quic: Initial packet number spaced not discarded
+ - MINOR: quic: Add useful trace about pktns discarding
+ - MINOR: mux_quic: Export the mux related flags
+ - MINOR: quic: Implement quic_conn_subscribe()
+ - MINOR: quic: Wake up the mux upon ACK receipt
+ - MINOR: quic: Stream FIN bit fix in qcs_push_frame()
+ - MINOR: quic: Implement qc_process_mux()
+ - MINOR: quic: Wake up the xprt from mux
+ - CLEANUP: quic: Remove useless inline functions
+ - MINOR: quic: RX packets memory leak
+ - MINOR: quic: Possible endless loop in qc_treat_rx_pkts()
+ - MINOR: quic: Crash upon too big packets receipt
+ - MINOR: quic: define close handler
+ - MEDIUM: quic: implement mux release/conn free
+ - MINOR: quic: fix qcc subs initialization
+ - BUG/MINOR: h1-htx: Fix a typo when request parser is reset
+ - BUG/MEDIUM: mux-h1: Adjust conditions to ask more space in the channel buffer
+ - BUG/MEDIUM: stream-int: Notify stream that the mux wants more room to xfer data
+ - BUG/MEDIUM: stream: Stop waiting for more data if SI is blocked on RXBLK_ROOM
+ - MINOR: stream-int: Set CO_RFL transient/persistent flags apart in si_cs_rcv()
+ - MINOR: htx: Add an HTX flag to know when a message is fragmented
+ - MINOR: htx: Add a function to know if the free space wraps
+ - BUG/MEDIUM: stream-int: Defrag HTX message in si_cs_recv() if necessary
+ - MINOR: stream-int: Notify mux when the buffer is not stuck when calling rcv_buf
+ - BUG/MINOR: http-ana: increment internal_errors counter on response error
+ - MINOR: stats: Enable dark mode on stat web page
+ - CLEANUP: stats: Fix some alignment mistakes
+ - MINOR: httpclient: httpclient_data() returns the available data
+ - MINOR: httpclient: httpclient_ended() returns 1 if the client ended
+ - MINOR: httpclient/lua: httpclient:get() API in lua
+ - MINOR: httpclient/lua: implement the headers in the response object
+ - BUG/MINOR: httpclient/lua: return an error on argument check
+ - CLEANUP: slz: Mark `reset_refs` as static
+
+2021/09/12 : 2.5-dev7
+ - BUG/MINOR: config: reject configs using HTTP with bufsize >= 256 MB
+ - CLEANUP: htx: remove comments about "must be < 256 MB"
+ - BUG/MAJOR: htx: fix missing header name length check in htx_add_header/trailer
+ - Revert "BUG/MINOR: stream-int: Don't block reads in si_update_rx() if chn may receive"
+ - MINOR: proxy: add a global "grace" directive to postpone soft-stop
+ - MINOR: vars: rename vars_init() to vars_init_head()
+ - CLEANUP: vars: rename sample_clear_stream() to var_unset()
+ - REORG: vars: remerge sample_store{,_stream}() into var_set()
+ - MEDIUM: vars: make the ifexist variant of set-var only apply to the proc scope
+ - MINOR: vars: add a VF_CREATEONLY flag for creation
+ - MINOR: vars: support storing empty sample data with a variable
+ - MINOR: vars: store flags into variables and add VF_PERMANENT
+ - MEDIUM: vars: make var_clear() only reset VF_PERMANENT variables
+ - MEDIUM: vars: pre-create parsed SCOPE_PROC variables as permanent ones
+ - MINOR: vars: preset a random seed to hash variables names
+ - MEDIUM: vars: replace the global name index with a hash
+ - CLEANUP: vars: remove the now unused var_names array
+ - MINOR: vars: centralize the lock/unlock into static inlines
+ - OPTIM: vars: only takes the variables lock on shared entries
+ - OPTIM: vars: remove internal bookkeeping for vars_global_size
+ - OPTIM: vars: do not keep variables usage stats if no limit is set
+ - BUILD: fix dragonfly build again on __read_mostly
+ - CI: Github Actions: temporarily disable Opentracing
+ - BUG/MEDIUM: mux-h1: Remove "Upgrade:" header for requests with payload
+ - MINOR: htx: Skip headers with no value when adding a header list to a message
+ - CLEANUP: mux-h1: Remove condition rejecting upgrade requests with payload
+ - BUG/MEDIUM: stream-int: Don't block SI on a channel policy if EOI is reached
+ - BUG/MEDIUM: http-ana: Reset channels analysers when returning an error
+ - BUG/MINOR: filters: Set right FLT_END analyser depending on channel
+ - CLEANUP: Add haproxy/xxhash.h to avoid modifying import/xxhash.h
+ - CLEANUP: ebmbtree: Replace always-taken elseif by else
+ - CLEANUP: Move XXH3 macro from haproxy/compat.h to haproxy/xxhash.h
+ - BUILD: opentracing: exclude the use of haproxy variables for the OpenTracing context
+ - BUG/MINOR: opentracing: enable the use of http headers without a set value
+ - CLEANUP: opentracing: use the haproxy function to generate uuid
+ - MINOR: opentracing: change the scope of the variable 'ot.uuid' from 'sess' to 'txn'
+ - CI: Github Actions: re-enable Opentracing
+ - CLEANUP: opentracing: simplify the condition on the empty header
+ - BUG/MEDIUM lua: Add missing call to RESET_SAFE_LJMP in hlua_filter_new()
+
+2021/09/03 : 2.5-dev6
+ - BUG/MINOR threads: Use get_(local|gm)time instead of (local|gm)time
+ - BUG/MINOR: tools: Fix loop condition in dump_text()
+ - BUILD: ssl: next round of build warnings on LIBRESSL_VERSION_NUMBER
+ - BUILD: ssl: fix two remaining occurrences of #if USE_OPENSSL
+ - BUILD: tools: properly guard __GLIBC__ with defined()
+ - BUILD: globally enable -Wundef
+ - MINOR: log: Remove log-error-via-logformat option
+ - MINOR: log: Add new "error-log-format" option
+ - BUG/MAJOR: queue: better protect a pendconn being picked from the proxy
+ - CLEANUP: Add missing include guard to signal.h
+ - MINOR: ssl: Add new ssl_bc_hsk_err sample fetch
+ - MINOR: connection: Add a connection error code sample fetch for backend side
+ - REGTESTS: ssl: Add tests for bc_conn_err and ssl_bc_hsk_err sample fetches
+ - MINOR: http-rules: add a new "ignore-empty" option to redirects.
+ - CI: Github Actions: temporarily disable BoringSSL builds
+ - BUG/MINOR: vars: fix set-var/unset-var exclusivity in the keyword parser
+ - BUG/MINOR: vars: improve accuracy of the rules used to check expression validity
+ - MINOR: sample: add missing ARGC_ entries
+ - BUG/MINOR: vars: properly set the argument parsing context in the expression
+ - DOC: configuration: remove wrong tcp-request examples in tcp-response
+ - MEDIUM: vars: add a new "set-var-fmt" action
+ - BUG/MEDIUM: vars: run over the correct list in release_store_rules()
+ - BUG/MINOR: vars: truncate the variable name in error reports about scope.
+ - BUG/MINOR: vars: do not talk about global section in CLI errors for set-var
+ - CLEANUP: vars: name the temporary proxy "CFG" instead of "CLI" for global vars
+ - MINOR: log: make log-format expressions completely usable outside of req/resp
+ - MINOR: vars: add a "set-var-fmt" directive to the global section
+ - MEDIUM: vars: also support format strings in CLI's "set var" command
+ - CLEANUP: vars: factor out common code from vars_get_by_{desc,name}
+ - MINOR: vars: make vars_get_by_* support an optional default value
+ - MINOR: vars: make the vars() sample fetch function support a default value
+ - BUILD: ot: add argument for default value to vars_get_by_name()
+
+2021/08/28 : 2.5-dev5
+ - MINOR: httpclient: initialize the proxy
+ - MINOR: httpclient: implement a simple HTTP Client API
+ - MINOR: httpclient/cli: implement a simple client over the CLI
+ - MINOR: httpclient/cli: change the User-Agent to "HAProxy"
+ - MEDIUM: ssl: Keep a reference to the client's certificate for use in logs
+ - BUG/MEDIUM: h2: match absolute-path not path-absolute for :path
+ - BUILD/MINOR: ssl: Fix compilation with OpenSSL 1.0.2
+ - MINOR: server: check if srv is NULL in free_server()
+ - MINOR: proxy: check if p is NULL in free_proxy()
+ - BUG/MEDIUM: cfgparse: do not allocate IDs to automatic internal proxies
+ - BUG/MINOR: http_client: make sure to preset the proxy's default settings
+ - REGTESTS: http_upgrade: fix incorrect expectation on TCP->H1->H2
+ - REGTESTS: abortonclose: after retries, 503 is expected, not close
+ - REGTESTS: server: fix agent-check syntax and expectation
+ - BUG/MINOR: httpclient: fix uninitialized sl variable
+ - BUG/MINOR: httpclient/cli: change the appctx test in the callbacks
+ - BUG/MINOR: httpclient: check if hdr_num is not 0
+ - MINOR: httpclient: cleanup the include files
+ - MINOR: hlua: take the global Lua lock inside a global function
+ - MINOR: tools: add FreeBSD support to get_exec_path()
+ - BUG/MINOR: systemd: ExecStartPre must use -Ws
+ - MINOR: systemd: remove the ExecStartPre line in the unit file
+ - MINOR: ssl: add an openssl version string parser
+ - MINOR: cfgcond: implements openssl_version_atleast and openssl_version_before
+ - CLEANUP: ssl: remove useless check on p in openssl_version_parser()
+ - BUG/MINOR: stick-table: fix the sc-set-gpt* parser when using expressions
+ - BUG/MINOR: httpclient: remove deinit of the httpclient
+ - BUG/MEDIUM: base64: check output boundaries within base64{dec,urldec}
+ - MINOR: httpclient: set verify none on the https server
+ - MINOR: httpclient: add the server to the proxy
+ - BUG/MINOR: httpclient: fix Host header
+ - BUILD: httpclient: fix build without OpenSSL
+ - CI: github-actions: remove obsolete options
+ - CLEANUP: assorted typo fixes in the code and comments
+ - MINOR: proc: setting the process to produce a core dump on FreeBSD.
+ - BUILD: adopt script/build-ssl.sh for OpenSSL-3.0.0beta2
+ - MINOR: server: return the next srv instance on free_server
+ - BUG/MINOR: stats: use refcount to protect dynamic server on dump
+ - MEDIUM: server: extend refcount for all servers
+ - MINOR: server: define non purgeable server flag
+ - MINOR: server: mark referenced servers as non purgeable
+ - MINOR: server: mark servers referenced by LUA script as non purgeable
+ - MEDIUM: server: allow to remove servers at runtime except non purgeable
+ - BUG/MINOR: base64: base64urldec() ignores padding in output size check
+ - REGTEST: add missing lua requirements on server removal test
+ - REGTEST: fix haproxy required version for server removal test
+ - BUG/MINOR: proxy: don't dump servers of internal proxies
+ - REGTESTS: Use `feature cmd` for 2.5+ tests
+ - REGTESTS: Remove REQUIRE_VERSION=1.5 from all tests
+ - BUG/MINOR: resolvers: mark servers with name-resolution as non purgeable
+ - MINOR: compiler: implement an ONLY_ONCE() macro
+ - BUG/MINOR: lua: use strlcpy2() not strncpy() to copy sample keywords
+ - MEDIUM: ssl: Capture more info from Client Hello
+ - MINOR: sample: Expose SSL captures using new fetchers
+ - MINOR: sample: Add be2dec converter
+ - MINOR: sample: Add be2hex converter
+ - MEDIUM: config: Deprecate tune.ssl.capture-cipherlist-size
+ - BUG/MINOR: time: fix idle time computation for long sleeps
+ - MINOR: time: add report_idle() to report process-wide idle time
+ - BUG/MINOR: ebtree: remove dependency on incorrect macro for bits per long
+ - BUILD: activity: use #ifdef not #if on USE_MEMORY_PROFILING
+ - BUILD/MINOR: defaults: eliminate warning on MAXHOSTNAMELEN with -Wundef
+ - BUILD/MINOR: ssl: avoid a build warning on LIBRESSL_VERSION with -Wundef
+ - IMPORT: slz: silence a build warning with -Wundef
+ - BUILD/MINOR: regex: avoid a build warning on USE_PCRE2 with -Wundef
+
+2021/08/17 : 2.5-dev4
+ - MINOR: log: rename 'dontloglegacyconnerr' to 'log-error-via-logformat'
+ - MINOR: doc: rename conn_status in `option httsplog`
+ - MINOR: proxy: disabled takes a stopping and a disabled state
+ - MINOR: stats: shows proxy in a stopped state
+ - BUG/MINOR: server: fix race on error path of 'add server' CLI if track
+ - CLEANUP: thread: fix fantaisist indentation of thread_harmless_till_end()
+ - MINOR: threads: make thread_release() not wait for other ones to complete
+ - MEDIUM: threads: add a stronger thread_isolate_full() call
+ - MEDIUM: servers: make the server deletion code run under full thread isolation
+ - BUG/MINOR: server: remove srv from px list on CLI 'add server' error
+ - MINOR: activity/fd: remove the dead_fd counter
+ - MAJOR: fd: get rid of the DWCAS when setting the running_mask
+ - CLEANUP: fd: remove the now unused fd_set_running()
+ - CLEANUP: fd: remove the now unneeded fd_mig_lock
+ - BUG/MINOR: server: update last_change on maint->ready transitions too
+ - MINOR: spoe: Add a pointer on the filter config in the spoe_agent structure
+ - BUG/MEDIUM: spoe: Create a SPOE applet if necessary when the last one is released
+ - BUG/MEDIUM: spoe: Fix policy to close applets when SPOE connections are queued
+ - MINOR: server: unmark deprecated on enable health/agent cli
+ - MEDIUM: task: implement tasklet kill
+ - MINOR: server: initialize fields for dynamic server check
+ - MINOR: check: allocate default check ruleset for every backends
+ - MINOR: check: export check init functions
+ - MINOR: check: do not increment global maxsock at runtime
+ - MINOR: server: implement a refcount for dynamic servers
+ - MEDIUM: check: implement check deletion for dynamic servers
+ - MINOR: check: enable safe keywords for dynamic servers
+ - MEDIUM: server: implement check for dynamic servers
+ - MEDIUM: server: implement agent check for dynamic servers
+ - REGTESTS: server: add dynamic check server test
+ - MINOR: doc: specify ulimit-n usage for dynamic servers
+ - REGTESTS: server: fix dynamic server with checks test
+ - CI: travis-ci: temporarily disable arm64 builds
+ - BUG/MINOR: check: test if server is not null in purge
+ - MINOR: global: define MODE_STOPPING
+ - BUG/MINOR: server: do not use refcount in free_server in stopping mode
+ - ADMIN: dyncookie: implement a simple dynamic cookie calculator
+ - BUG/MINOR: check: do not reset check flags on purge
+ - BUG/MINOR: check: fix leak on add dynamic server with agent-check error
+ - BUG/MEDIUM: check: fix leak on agent-check purge
+ - BUG/MEDIUM: server: support both check/agent-check on a dynamic instance
+ - BUG/MINOR: buffer: fix buffer_dump() formatting
+ - MINOR: channel: remove an htx block from a channel
+ - BUG/MINOR: tcpcheck: Properly detect pending HTTP data in output buffer
+ - BUG/MINOR: stream: Don't release a stream if FLT_END is still registered
+ - MINOR: lua: Add a flag on lua context to know the yield capability at run time
+ - BUG/MINOR: lua: Yield in channel functions only if lua context can yield
+ - BUG/MINOR: lua: Don't yield in channel.append() and channel.set()
+ - MINOR: filters/lua: Release filters before the lua context
+ - MINOR: lua: Add a function to get a reference on a table in the stack
+ - MEDIUM: lua: Process buffer data using an offset and a length
+ - MEDIUM: lua: Improve/revisit the lua api to manipulate channels
+ - DOC: Improve the lua documentation
+ - MEDIUM: filters/lua: Add support for dummy filters written in lua
+ - MINOR: lua: Add a function to get a filter attached to a channel class
+ - MINOR: lua: Add flags on the lua TXN to know the execution context
+ - MEDIUM: filters/lua: Be prepared to filter TCP payloads
+ - MEDIUM: filters/lua: Support declaration of some filter callback functions in lua
+ - MEDIUM: filters/lua: Add HTTPMessage class to help HTTP filtering
+ - MINOR: filters/lua: Add request and response HTTP messages in the lua TXN
+ - MINOR: filters/lua: Support the HTTP filtering from filters written in lua
+ - DOC: config: Fix 'http-response send-spoe-group' documentation
+ - BUG/MINOR: lua: Properly check negative offset in Channel/HttpMessage functions
+ - BUG/MINOR: lua: Properly catch alloc errors when parsing lua filter directives
+ - BUG/MEDIUM: cfgcheck: verify existing log-forward listeners during config check
+ - MINOR: cli: delare the CLI frontend as an internal proxy
+ - MINOR: proxy: disable warnings for internal proxies
+ - BUG/MINOR: filters: Always set FLT_END analyser when CF_FLT_ANALYZE flag is set
+ - BUG/MINOR: lua/filters: Return right code when txn:done() is called
+ - DOC: lua-api: Add documentation about lua filters
+ - CI: Remove obsolete USE_SLZ=1 CI job
+ - CLEANUP: assorted typo fixes in the code and comments
+ - CI: github actions: relax OpenSSL-3.0.0 version comparision
+ - BUILD: tools: get the absolute path of the current binary on NetBSD.
+ - DOC: Minor typo fix - 'question mark' -> 'exclamation mark'
+ - DOC/MINOR: fix typo in management document
+ - MINOR: http: add a new function http_validate_scheme() to validate a scheme
+ - BUG/MAJOR: h2: verify early that non-http/https schemes match the valid syntax
+ - BUG/MAJOR: h2: verify that :path starts with a '/' before concatenating it
+ - BUG/MAJOR: h2: enforce stricter syntax checks on the :method pseudo-header
+ - BUG/MEDIUM: h2: give :authority precedence over Host
+ - REGTESTS: add a test to prevent h2 desync attacks
+
+2021/08/01 : 2.5-dev3
+ - BUG/MINOR: arg: free all args on make_arg_list()'s error path
+ - BUG/MINOR: cfgcond: revisit the condition freeing mechanism to avoid a leak
+ - MEDIUM: proxy: remove long-broken 'option http_proxy'
+ - CLEANUP: http_ana: Remove now unused label from http_process_request()
+ - MINOR: deinit: always deinit the init_mutex on failed initialization
+ - BUG/MEDIUM: cfgcond: limit recursion level in the condition expression parser
+ - BUG/MEDIUM: mworker: do not register an exit handler if exit is expected
+ - BUG/MINOR: mworker: do not export HAPROXY_MWORKER_REEXEC across programs
+ - BUILD/MINOR: memprof fix macOs build.
+ - BUG/MEDIUM: ssl_sample: fix segfault for srv samples on invalid request
+ - BUG/MINOR: stats: Add missing agent stats on servers
+ - BUG/MINOR: check: fix the condition to validate a port-less server
+ - BUILD: threads: fix pthread_mutex_unlock when !USE_THREAD
+ - BUG/MINOR: resolvers: Use a null-terminated string to lookup in servers tree
+ - MINOR: ssl: use __objt_* variant when retrieving counters
+ - BUG/MINOR: systemd: must check the configuration using -Ws
+ - BUG/MINOR: mux-h1: Obey dontlognull option for empty requests
+ - BUG/MINOR: mux-h2: Obey dontlognull option during the preface
+ - BUG/MINOR: mux-h1: Be sure to swap H1C to splice mode when rcv_pipe() is called
+ - BUG/MEDIUM: mux-h2: Handle remaining read0 cases on partial frames
+ - MINOR: proxy: rename PR_CAP_LUA to PR_CAP_INT
+ - MINOR: mworker: the mworker CLI proxy is internal
+ - MINOR: stats: don't output internal proxies (PR_CAP_INT)
+ - CLEANUP: mworker: use the proxy helper functions in mworker_cli_proxy_create()
+ - CLEANUP: mworker: PR_CAP already initialized with alloc_new_proxy()
+ - BUG/MINOR: connection: Add missing error labels to conn_err_code_str
+ - MINOR: connection: Add a connection error code sample fetch
+ - MINOR: ssl: Enable error fetches in case of handshake error
+ - MINOR: ssl: Add new ssl_fc_hsk_err sample fetch
+ - MINOR: ssl: Define a default https log format
+ - MEDIUM: connection: Add option to disable legacy error log
+ - REGTESTS: ssl: Add tests for the connection and SSL error fetches
+ - REGTESTS: ssl: ssl_errors.vtc does not work with old openssl version
+ - BUG/MEDIUM: connection: close a rare race between idle conn close and takeover
+ - BUG/MEDIUM: pollers: clear the sleeping bit after waking up, not before
+ - BUG/MINOR: select: fix excess number of dead/skip reported
+ - BUG/MINOR: poll: fix abnormally high skip_fd counter
+ - BUG/MINOR: pollers: always program an update for migrated FDs
+ - BUG/MINOR: fd: protect fd state harder against a concurrent takeover
+ - DOC: internals: document the FD takeover process
+ - MINOR: fd: update flags only once in fd_update_events()
+ - MINOR: poll/epoll: move detection of RDHUP support earlier
+ - REORG: fd: uninline fd_update_events()
+ - MEDIUM: fd: rely more on fd_update_events() to detect changes
+ - BUG/MINOR: freq_ctr: use stricter barriers between updates and readings
+ - MEDIUM: atomic: simplify the atomic load/store/exchange operations
+ - MEDIUM: atomic: relax the load/store barriers on x86_64
+ - BUILD: opentracing: fixed build when using pkg-config utility
+
+2021/07/17 : 2.5-dev2
+ - BUILD/MEDIUM: tcp: set-mark support for OpenBSD
+ - DOC: config: use CREATE USER for mysql-check
+ - BUG/MINOR: stick-table: fix several printf sign errors dumping tables
+ - BUG/MINOR: peers: fix data_type bit computation more than 32 data_types
+ - MINOR: stick-table: make skttable_data_cast to use only std types
+ - MEDIUM: stick-table: handle arrays of standard types into stick-tables
+ - MEDIUM: peers: handle arrays of std types in peers protocol
+ - DOC: stick-table: add missing documentation about gpt0 stored type
+ - MEDIUM: stick-table: add the new array of gpt data_type
+ - MEDIUM: stick-table: make the use of 'gpt' excluding the use of 'gpt0'
+ - MEDIUM: stick-table: add the new arrays of gpc and gpc_rate
+ - MEDIUM: stick-table: make the use of 'gpc' excluding the use of 'gpc0/1''
+ - BUG/MEDIUM: sock: make sure to never miss early connection failures
+ - BUG/MINOR: cli: fix server name output in "show fd"
+ - Revert "MINOR: tcp-act: Add set-src/set-src-port for "tcp-request content" rules"
+ - MEDIUM: stats: include disabled proxies that hold active sessions to stats
+ - BUILD: stick-table: shut up invalid "uninitialized" warning in gcc 8.3
+ - MINOR: http: implement http_get_scheme
+ - MEDIUM: http: implement scheme-based normalization
+ - MEDIUM: h1-htx: apply scheme-based normalization on h1 requests
+ - MEDIUM: h2: apply scheme-based normalization on h2 requests
+ - REGTESTS: add http scheme-based normalization test
+ - BUILD: http_htx: fix ci compilation error with isdigit for Windows
+ - MINOR: http: implement http uri parser
+ - MINOR: http: use http uri parser for scheme
+ - MINOR: http: use http uri parser for authority
+ - REORG: http_ana: split conditions for monitor-uri in wait for request
+ - MINOR: http: use http uri parser for path
+ - BUG/MEDIUM: http_ana: fix crash for http_proxy mode during uri rewrite
+ - MINOR: mux_h2: define config to disable h2 websocket support
+ - CLEANUP: applet: remove unused thread_mask
+ - BUG/MINOR: ssl: Default-server configuration ignored by server
+ - BUILD: add detection of missing important CFLAGS
+ - BUILD: lua: silence a build warning with TCC
+ - MINOR: srv: extract tracking server config function
+ - MINOR: srv: do not allow to track a dynamic server
+ - MEDIUM: server: support track keyword for dynamic servers
+ - REGTESTS: test track support for dynamic servers
+ - MINOR: init: verify that there is a single word on "-cc"
+ - MINOR: init: make -cc support environment variables expansion
+ - MINOR: arg: add a free_args() function to free an args array
+ - CLEANUP: config: use free_args() to release args array in cfg_eval_condition()
+ - CLEANUP: hlua: use free_args() to release args arrays
+ - REORG: config: move the condition preprocessing code to its own file
+ - MINOR: cfgcond: start to split the condition parser to introduce terms
+ - MEDIUM: cfgcond: report invalid trailing chars after expressions
+ - MINOR: cfgcond: remerge all arguments into a single line
+ - MINOR: cfgcond: support negating conditional expressions
+ - MINOR: cfgcond: make the conditional term parser automatically allocate nodes
+ - MINOR: cfgcond: insert an expression between the condition and the term
+ - MINOR: cfgcond: support terms made of parenthesis around expressions
+ - REGTEST: make check_condition.vtc fail as soon as possible
+ - REGTESTS: add more complex check conditions to check_conditions.vtc
+ - BUG/MEDIUM: init: restore behavior of command-line "-m" for memory limitation
+
+2021/06/30 : 2.5-dev1
+ - CLEANUP: ssl: Move ssl_store related code to ssl_ckch.c
+ - MINOR: ssl: Allow duplicated entries in the cafile_tree
+ - MEDIUM: ssl: Chain ckch instances in ca-file entries
+ - MINOR: ssl: Add reference to default ckch instance in bind_conf
+ - MINOR: ssl: Add helper functions to create/delete cafile entries
+ - MEDIUM: ssl: Add a way to load a ca-file content from memory
+ - MINOR: ssl: Add helper function to add cafile entries
+ - MINOR: ssl: Ckch instance rebuild and cleanup factorization in CLI handler
+ - MEDIUM: ssl: Add "set+commit ssl ca-file" CLI commands
+ - REGTESTS: ssl: Add new ca-file update tests
+ - MINOR: ssl: Add "abort ssl ca-file" CLI command
+ - MINOR: ssl: Add a cafile_entry type field
+ - MINOR: ssl: Refactorize the "show certificate details" code
+ - MEDIUM: ssl: Add "show ssl ca-file" CLI command
+ - MEDIUM: ssl: Add "new ssl ca-file" CLI command
+ - MINOR: ssl: Add "del ssl ca-file" CLI command
+ - REGTESTS: ssl: Add "new/del ssl ca-file" tests
+ - DOC: ssl: Add documentation about CA file hot update commands
+ - DOC: internals: update the SSL architecture schema
+ - MINOR: ssl: Chain instances in ca-file entries
+ - MEDIUM: ssl: Add "set+commit ssl crl-file" CLI commands
+ - MEDIUM: ssl: Add "new+del crl-file" CLI commands
+ - MINOR: ssl: Add "abort ssl crl-file" CLI command
+ - MEDIUM: ssl: Add "show ssl crl-file" CLI command
+ - REGTESTS: ssl: Add "new/del ssl crl-file" tests
+ - REGTESTS: ssl: Add "set/commit ssl crl-file" test
+ - DOC: ssl: Add documentation about CRL file hot update commands
+ - BUILD/MINOR: ssl: Fix compilation with SSL enabled
+ - BUILD/MINOR: ssl: Fix compilation with OpenSSL 1.0.2
+ - CI: introduce scripts/build-vtest.sh for installing VTest
+ - CLEANUP: ssl: Fix coverity issues found in CA file hot update code
+ - CI: github actions: add OpenTracing builds
+ - BUG/MEDIUM: ebtree: Invalid read when looking for dup entry
+ - BUG/MAJOR: server: prevent deadlock when using 'set maxconn server'
+ - BUILD/MINOR: opentracing: fixed build when using clang
+ - BUG/MEDIUM: filters: Exec pre/post analysers only one time per filter
+ - BUG/MINOR: http-comp: Preserve HTTP_MSGF_COMPRESSIONG flag on the response
+ - MINOR: map/acl: print the count of all the map/acl entries in "show map/acl"
+ - CLEANUP: pattern: remove export of non-existent function pattern_delete()
+ - MINOR: h1-htx: Update h1 parsing functions to return result as a size_t
+ - MEDIUM: h1-htx: Adapt H1 data parsing to copy wrapping data in one call
+ - MINOR: mux-h1/mux-fcgi: Don't needlessly loop on data parsing
+ - MINOR: h1-htx: Move HTTP chunks parsing into a dedicated function
+ - MEDIUM: h1-htx: Split function to parse a chunk and the loop on the buffer
+ - MEDIUM: h1-htx: Add a function to parse contiguous small chunks
+ - MINOR: h1-htx: Use a correlation table to speed-up small chunks parsing
+ - MINOR: buf: Add function to realign a buffer with a specific head position
+ - MINOR: muxes/h1-htx: Realign input buffer using b_slow_realign_ofs()
+ - CLEANUP: mux-h1: Rename functions parsing input buf and filling output buf
+ - Revert "MEDIUM: http-ana: Deal with L7 retries in HTTP analysers"
+ - BUG/MINOR: http-ana: Send the right error if max retries is reached on L7 retry
+ - BUG/MINOR: http-ana: Handle L7 retries on refused early data before K/A aborts
+ - MINOR: http-ana: Perform L7 retries because of status codes in response analyser
+ - MINOR: cfgparse: Fail when encountering extra arguments in macro
+ - DOC: intro: Fix typo in starter guide
+ - BUG/MINOR: server: Missing calloc return value check in srv_parse_source
+ - BUG/MINOR: peers: Missing calloc return value check in peers_register_table
+ - BUG/MINOR: ssl: Missing calloc return value check in ssl_init_single_engine
+ - BUG/MINOR: http: Missing calloc return value check in parse_http_req_capture
+ - BUG/MINOR: proxy: Missing calloc return value check in proxy_parse_declare
+ - BUG/MINOR: proxy: Missing calloc return value check in proxy_defproxy_cpy
+ - BUG/MINOR: http: Missing calloc return value check while parsing tcp-request/tcp-response
+ - BUG/MINOR: http: Missing calloc return value check while parsing tcp-request rule
+ - BUG/MINOR: compression: Missing calloc return value check in comp_append_type/algo
+ - BUG/MINOR: worker: Missing calloc return value check in mworker_env_to_proc_list
+ - BUG/MINOR: http: Missing calloc return value check while parsing redirect rule
+ - BUG/MINOR: http: Missing calloc return value check in make_arg_list
+ - BUG/MINOR: proxy: Missing calloc return value check in chash_init_server_tree
+ - CLEANUP: http-ana: Remove useless if statement about L7 retries
+ - BUG/MAJOR: stream-int: Release SI endpoint on server side ASAP on retry
+ - MINOR: backend: Don't release SI endpoint anymore in connect_server()
+ - BUG/MINOR: vars: Be sure to have a session to get checks variables
+ - DOC/MINOR: move uuid in the configuration to the right alphabetical order
+ - CLEANUP: mux-fcgi: Don't needlessly store result of data/trailers parsing
+ - BUILD: fix compilation for OpenSSL-3.0.0-alpha17
+ - MINOR: http-ana: Use -1 status for client aborts during queuing and connect
+ - REGTESTS: Fix http_abortonclose.vtc to support -1 status for some client aborts
+ - CLEANUP: backend: fix incorrect comments on locking conditions for lb functions
+ - CLEANUP: reg-tests: Remove obsolete no-htx parameter for reg-tests
+ - CI: github actions: add OpenSSL-3.0.0 builds
+ - CI: github actions: -Wno-deprecated-declarations with OpenSSL 3.0.0
+ - MINOR: errors: allow empty va_args for diag variadic macro
+ - REORG: errors: split errors reporting function from log.c
+ - CLEANUP: server: fix cosmetic of error message on sni parsing
+ - MEDIUM: errors: implement user messages buffer
+ - MINOR: log: do not discard stderr when starting is over
+ - MEDIUM: errors: implement parsing context type
+ - MINOR: errors: use user messages context in print_message
+ - MINOR: log: display exec path on first warning
+ - MINOR: errors: specify prefix "config" for parsing output
+ - MINOR: log: define server user message format
+ - REORG: server: use parsing ctx for server parsing
+ - REORG: config: use parsing ctx for server config check
+ - MINOR: server: use parsing ctx for server init addr
+ - MINOR: server: use ha_alert in server parsing functions
+ - DOC: use the req.ssl_sni in examples
+ - CLEANUP: cfgparse: Remove duplication of `MAX_LINE_ARGS + 1`
+ - CLEANUP: tools: Make errptr const in `parse_line()`
+ - MINOR: haproxy: Add `-cc` argument
+ - BUG: errors: remove printf positional args for user messages context
+ - CI: Make matrix.py executable and add shebang
+ - BUILD: make tune.ssl.keylog available again
+ - BUG/MINOR: ssl: OCSP stapling does not work if expire too far in the future
+ - Revert "BUG/MINOR: opentracing: initialization after establishing daemon mode"
+ - BUG/MEDIUM: opentracing: initialization before establishing daemon and/or chroot mode
+ - SCRIPTS: opentracing: enable parallel builds in build-ot.sh
+ - BUG/MEDIUM: compression: Fix loop skipping unused blocks to get the next block
+ - BUG/MEDIUM: compression: Properly get the next block to iterate on payload
+ - BUG/MEDIUM: compression: Add a flag to know the filter is still processing data
+ - MINOR: ssl: Keep the actual key length in the certificate_ocsp structure
+ - MINOR: ssl: Add new "show ssl ocsp-response" CLI command
+ - MINOR: ssl: Add the OCSP entry key when displaying the details of a certificate
+ - MINOR: ssl: Add the "show ssl cert foo.pem.ocsp" CLI command
+ - REGTESTS: ssl: Add "show ssl ocsp-response" test
+ - BUG/MINOR: server: explicitly set "none" init-addr for dynamic servers
+ - BUG/MINOR: pools: fix a possible memory leak in the lockless pool_flush()
+ - BUG/MINOR: pools: make DEBUG_UAF always write to the to-be-freed location
+ - MINOR: pools: do not maintain the lock during pool_flush()
+ - MINOR: pools: call malloc_trim() under thread isolation
+ - MEDIUM: pools: use a single pool_gc() function for locked and lockless
+ - BUG/MAJOR: pools: fix possible race with free() in the lockless variant
+ - CLEANUP: pools: remove now unused seq and pool_free_list
+ - MEDIUM: pools: remove the locked pools implementation
+ - BUILD: ssl: Fix compilation with BoringSSL
+ - BUG/MEDIUM: errors: include missing obj_type file
+ - REGTESTS: ssl: show_ssl_ocspresponce.vtc is broken with BoringSSL
+ - BUG/MAJOR: htx: Fix htx_defrag() when an HTX block is expanded
+ - BUG/MINOR: mux-fcgi: Expose SERVER_SOFTWARE parameter by default
+ - BUG/MINOR: h1-htx: Fix a signess bug with char data type when parsing chunk size
+ - CLEANUP: l7-retries: do not test the buffer before calling b_alloc()
+ - BUG/MINOR: resolvers: answser item list was randomly purged or errors
+ - MEDIUM: resolvers: add a ref on server to the used A/AAAA answer item
+ - MEDIUM: resolvers: add a ref between servers and srv request or used SRV record
+ - BUG/MINOR: server-state: load SRV resolution only if params match the config
+ - MINOR: config: remove support for deprecated option "tune.chksize"
+ - MINOR: config: completely remove support for "no option http-use-htx"
+ - MINOR: log: remove the long-deprecated early log-format tags
+ - MINOR: http: remove the long deprecated "set-cookie()" sample fetch function
+ - MINOR: config: reject long-deprecated "option forceclose"
+ - MINOR: config: remove deprecated option "http-tunnel"
+ - MEDIUM: proxy: remove the deprecated "grace" keyword
+ - MAJOR: config: remove parsing of the global "nbproc" directive
+ - BUILD: init: remove initialization of multi-process thread mappings
+ - BUILD: log: remove unused fmt_directive()
+ - REGTESTS: Remove REQUIRE_VERSION=1.6 from all tests
+ - REGTESTS: Remove REQUIRE_VERSION=1.7 from all tests
+ - CI: github actions: enable alpine/musl builds
+ - BUG/MAJOR: resolvers: segfault using server template without SRV RECORDs
+ - DOC: lua: Add a warning about buffers modification in HTTP
+ - MINOR: ssl: Use OpenSSL's ASN1_TIME convertor when available
+ - BUG/MINOR: stick-table: insert srv in used_name tree even with fixed id
+ - BUG/MEDIUM: server: extend thread-isolate over much of CLI 'add server'
+ - BUG/MEDIUM: server: clear dynamic srv on delete from proxy id/name trees
+ - BUG/MEDIUM: server: do not forget to generate the dynamic servers ids
+ - BUG/MINOR: server: do not keep an invalid dynamic server in px ids tree
+ - BUG/MEDIUM: server: do not auto insert a dynamic server in px addr_node
+ - BUG/MEDIUM: shctx: use at least thread-based locking on USE_PRIVATE_CACHE
+ - BUG/MINOR: ssl: use atomic ops to update global shctx stats
+ - BUG/MINOR: mworker: fix typo in chroot error message
+ - CLEANUP: global: remove unused definition of stopping_task[]
+ - MEDIUM: init: remove the loop over processes during init
+ - MINOR: mworker: remove the initialization loop over processes
+ - CLEANUP: global: remove the nbproc field from the global structure
+ - CLEANUP: global: remove pid_bit and all_proc_mask
+ - MEDIUM: global: remove dead code from nbproc/bind_proc removal
+ - MEDIUM: config: simplify cpu-map handling
+ - MEDIUM: cpu-set: make the proc a single bit field and not an array
+ - CLEANUP: global: remove unused definition of MAX_PROCS
+ - MEDIUM: global: remove the relative_pid from global and mworker
+ - DOC: update references to process numbers in cpu-map and bind-process
+ - MEDIUM: config: warn about "bind-process" deprecation
+ - CLEANUP: shctx: remove the different inter-process locking techniques
+ - BUG/MAJOR: queue: set SF_ASSIGNED when setting strm->target on dequeue
+ - MINOR: backend: only skip LB when there are actual connections
+ - BUG/MINOR: mux-h1: do not skip the error response on bad requests
+ - MINOR: connection: add helper conn_append_debug_info()
+ - MINOR: mux-h2/trace: report a few connection-level info during h2_init()
+ - CLEANUP: mux-h2/traces: better align user messages
+ - BUG/MINOR: stats: make "show stat typed desc" work again
+ - MINOR: mux-h2: obey http-ignore-probes during the preface
+ - BUG/MINOR: mux-h2/traces: bring back the lost "rcvd H2 REQ" trace
+ - BUG/MINOR: mux-h2/traces: bring back the lost "sent H2 REQ/RES" traces
+ - CLEANUP: assorted typo fixes in the code and comments
+ - CI: Replace the requirement for 'sudo' with a call to 'ulimit -n'
+ - REGTESTS: Replace REQUIRE_VERSION=2.5 with 'haproxy -cc'
+ - REGTESTS: Replace REQUIRE_OPTIONS with 'haproxy -cc' for 2.5+ tests
+ - REGTESTS: Replace REQUIRE_BINARIES with 'command -v'
+ - REGTESTS: Remove support for REQUIRE_BINARIES
+ - CI: ssl: enable parallel builds for OpenSSL on Linux
+ - CI: ssl: do not needlessly build the OpenSSL docs
+ - CI: ssl: keep the old method for ancient OpenSSL versions
+ - CLEANUP: server: a separate function for initializing the per_thr field
+ - BUG/MINOR: server: Forbid to set fqdn on the CLI if SRV resolution is enabled
+ - BUG/MEDIUM: server/cli: Fix ABBA deadlock when fqdn is set from the CLI
+ - MINOR: resolvers: Clean server in a dedicated function when removing a SRV item
+ - MINOR: resolvers: Remove server from named_servers tree when removing a SRV item
+ - BUG/MEDIUM: resolvers: Add a task on servers to check SRV resolution status
+ - BUG/MINOR: backend: restore the SF_SRV_REUSED flag original purpose
+ - BUG/MINOR: backend: do not set sni on connection reuse
+ - BUG/MINOR: resolvers: Use resolver's lock in resolv_srvrq_expire_task()
+ - BUG/MINOR: server/cli: Fix locking in function processing "set server" command
+ - BUG/MINOR: cache: Correctly handle existing-but-empty 'accept-encoding' header
+ - MINOR: ssl: fix typo in usage for 'new ssl ca-file'
+ - MINOR: ssl: always initialize random generator
+ - MINOR: ssl: check allocation in ssl_sock_init_srv
+ - MINOR: ssl: check allocation in parse ciphers/ciphersuites/verifyhost
+ - MINOR: ssl: check allocation in parse npn/sni
+ - MINOR: server: disable CLI 'set server ssl' for dynamic servers
+ - MINOR: ssl: render file-access optional on server crt loading
+ - MINOR: ssl: split parse functions for alpn/check-alpn
+ - MINOR: ssl: support ca-file arg for dynamic servers
+ - MINOR: ssl: support crt arg for dynamic servers
+ - MINOR: ssl: support crl arg for dynamic servers
+ - MINOR: ssl: enable a series of ssl keywords for dynamic servers
+ - MINOR: ssl: support ssl keyword for dynamic servers
+ - REGTESTS: server: test ssl support for dynamic servers
+ - MINOR: queue: update the stream's pend_pos before queuing it
+ - CLEANUP: Prevent channel-t.h from being detected as C++ by GitHub
+ - BUG/MAJOR: server: fix deadlock when changing maxconn via agent-check
+ - REGTESTS: fix maxconn update with agent-check
+ - MEDIUM: queue: make pendconn_process_next_strm() only return the pendconn
+ - MINOR: queue: update proxy->served once out of the loop
+ - MEDIUM: queue: refine the locking in process_srv_queue()
+ - MINOR: lb/api: remove the locked argument from take_conn/drop_conn
+ - MINOR: queue: create a new structure type "queue"
+ - MINOR: proxy: replace the pendconns-related stuff with a struct queue
+ - MINOR: server: replace the pendconns-related stuff with a struct queue
+ - MEDIUM: queue: use a dedicated lock for the queues
+ - MEDIUM: queue: simplify again the process_srv_queue() API
+ - MINOR: queue: factor out the proxy/server queuing code
+ - MINOR: queue: use atomic-ops to update the queue's index
+ - MEDIUM: queue: determine in process_srv_queue() if the proxy is usable
+ - MEDIUM: queue: move the queue lock manipulation to pendconn_process_next_strm()
+ - MEDIUM: queue: unlock as soon as possible
+ - MINOR: queue: make pendconn_first() take the lock by itself
+ - CLEANUP: backend: remove impossible case of round-robin + consistent hash
+ - MINOR: tcp-act: Add set-src/set-src-port for "tcp-request content" rules
+ - DOC: config: Add missing actions in "tcp-request session" documentation
+ - CLEANUP: dns: Remove a forgotten debug message
+ - DOC: Replace issue templates by issue forms
+ - Revert "MINOR: queue: make pendconn_first() take the lock by itself"
+ - Revert "MEDIUM: queue: unlock as soon as possible"
+ - Revert "MEDIUM: queue: move the queue lock manipulation to pendconn_process_next_strm()"
+ - Revert "MEDIUM: queue: determine in process_srv_queue() if the proxy is usable"
+ - Revert "MINOR: queue: use atomic-ops to update the queue's index"
+ - Revert "MINOR: queue: factor out the proxy/server queuing code"
+ - Revert "MEDIUM: queue: simplify again the process_srv_queue() API"
+ - Revert "MEDIUM: queue: use a dedicated lock for the queues"
+ - Revert "MEDIUM: queue: refine the locking in process_srv_queue()"
+ - Revert "MINOR: queue: update proxy->served once out of the loop"
+ - Revert "MEDIUM: queue: make pendconn_process_next_strm() only return the pendconn"
+ - MEDIUM: queue: update px->served and lb's take_conn once per loop
+ - MEDIUM: queue: use a dedicated lock for the queues (v2)
+ - MEDIUM: queue: simplify again the process_srv_queue() API (v2)
+ - MEDIUM: queue: determine in process_srv_queue() if the proxy is usable (v2)
+ - MINOR: queue: factor out the proxy/server queuing code (v2)
+ - MINOR: queue: use atomic-ops to update the queue's index (v2)
+ - MEDIUM: queue: take the proxy lock only during the px queue accesses
+ - MEDIUM: queue: use a trylock on the server's queue
+ - MINOR: queue: add queue_init() to initialize a queue
+ - MINOR: queue: add a pointer to the server and the proxy in the queue
+ - MINOR: queue: store a pointer to the queue into the pendconn
+ - MINOR: queue: remove the px/srv fields from pendconn
+ - MINOR: queue: simplify pendconn_unlink() regarding srv vs px
+ - BUG: backend: stop looking for queued connections once there's no more
+ - BUG/MINOR: queue/debug: use the correct lock labels on the queue lock
+ - BUG/MINOR: resolvers: Always attach server on matching record on resolution
+ - BUG/MINOR: resolvers: Reset server IP when no ip is found in the response
+ - MINOR: resolvers: Reset server IP on error in resolv_get_ip_from_response()
+ - BUG/MINOR: checks: return correct error code for srv_parse_agent_check
+ - BUILD: Makefile: fix linkage for Haiku.
+ - BUG/MINOR: tcpcheck: Fix numbering of implicit HTTP send/expect rules
+ - MINOR: http-act/tcp-act: Add "set-log-level" for tcp content rules
+ - MINOR: http-act/tcp-act: Add "set-nice" for tcp content rules
+ - MINOR: http-act/tcp-act: Add "set-mark" and "set-tos" for tcp content rules
+ - CLEANUP: tcp-act: Sort action lists
+ - BUILD/MEDIUM: tcp: set-mark setting support for FreeBSD.
+ - BUILD: tcp-act: avoid warning when set-mark / set-tos are not supported
+ - BUG/MINOR: mqtt: Fix parser for string with more than 127 characters
+ - BUG/MINOR: mqtt: Support empty client ID in CONNECT message
+ - BUG/MEDIUM: resolvers: Make 1st server of a template take part to SRV resolution
+ - CLEANUP: peers: re-write intdecode function comment.
+
+2021/05/14 : 2.5-dev0
+ - MINOR: version: it's development again
+
+2021/05/14 : 2.4.0
+ - BUG/MINOR: http_fetch: fix possible uninit sockaddr in fetch_url_ip/port
+ - CLEANUP: cli/activity: Remove double spacing in set profiling command
+ - CI: Build VTest with clang
+ - CI: extend spellchecker whitelist, add "ists" as well
+ - CLEANUP: assorted typo fixes in the code and comments
+ - BUG/MINOR: memprof: properly account for differences for realloc()
+ - MINOR: memprof: also report the method used by each call
+ - MINOR: memprof: also report the totals and delta alloc-free
+ - CLEANUP: pattern: remove the unused and dangerous pat_ref_reload()
+ - BUG/MINOR: http_act: Fix normalizer names in error messages
+ - MINOR: uri_normalizer: Add `fragment-strip` normalizer
+ - MINOR: uri_normalizer: Add `fragment-encode` normalizer
+ - IMPORT: slz: use the generic function for the last bytes of the crc32
+ - IMPORT: slz: do not produce the crc32_fast table when CRC is natively supported
+ - BUILD/MINOR: opentracing: fixed compilation with filter enabled
+ - BUILD: makefile: add a few popular ARMv8 CPU targets
+ - BUG/MEDIUM: stick_table: fix crash when using tcp smp_fetch_src
+ - REGTESTS: stick-table: add src_conn_rate test
+ - CLEANUP: stick-table: remove a leftover of an old keyword declaration
+ - BUG/MINOR: stats: fix lastchk metric that got accidently lost
+ - EXAMPLES: add a "basic-config-edge" example config
+ - EXAMPLES: add a trivial config for quick testing
+ - DOC: management: Correct example reload command in the document
+ - Revert "CI: Build VTest with clang"
+ - MINOR: activity/cli: optionally support sorting by address on "show profiling"
+ - DEBUG: ssl: export ssl_sock_close() to see its symbol resolved in profiling
+ - BUG/MINOR: lua/vars: prevent get_var() from allocating a new name
+ - DOC: config: Fix configuration example for mqtt
+ - BUG/MAJOR: config: properly initialize cpu_map.thread[] up to MAX_THREADS
+ - BUILD: config: avoid a build warning on numa_detect_topology() without threads
+ - DOC: update min requirements in INSTALL
+ - IMPORT: slz: use inttypes.h instead of stdint.h
+ - BUILD: sample: use strtoll() instead of atoll()
+ - MINOR: version: mention that it's LTS now.
+
+2021/05/10 : 2.4-dev19
+ - BUG/MINOR: hlua: Don't rely on top of the stack when using Lua buffers
+ - BUG/MEDIUM: cli: prevent memory leak on write errors
+ - BUG/MINOR: ssl/cli: fix a lock leak when no memory available
+ - MINOR: debug: add a new "debug dev sym" command in expert mode
+ - MINOR: pools/debug: slightly relax DEBUG_DONT_SHARE_POOLS
+ - CI: Github Actions: switch to LibreSSL-3.3.3
+ - MINOR: srv: close all idle connections on shutdown
+ - MINOR: connection: move session_list member in a union
+ - MEDIUM: mux_h1: release idling frontend conns on soft-stop
+ - MEDIUM: connection: close front idling connection on soft-stop
+ - MINOR: tools: add functions to retrieve the address of a symbol
+ - CLEANUP: activity: mark the profiling and task_profiling_mask __read_mostly
+ - MINOR: activity: add a "memory" entry to "profiling"
+ - MINOR: activity: declare the storage for memory usage statistics
+ - MEDIUM: activity: collect memory allocator statistics with USE_MEMORY_PROFILING
+ - MINOR: activity: clean up the show profiling io_handler a little bit
+ - MINOR: activity: make "show profiling" support a few arguments
+ - MINOR: activity: make "show profiling" also dump the memoery usage
+ - MINOR: activity: add the profiling.memory global setting
+ - BUILD: makefile: add new option USE_MEMORY_PROFILING
+ - MINOR: channel: Rely on HTX version if appropriate in channel_may_recv()
+ - BUG/MINOR: stream-int: Don't block reads in si_update_rx() if chn may receive
+ - MINOR: conn-stream: Force mux to wait for read events if abortonclose is set
+ - MEDIUM: mux-h1: Don't block reads when waiting for the other side
+ - BUG/MEDIUM: mux-h1: Properly report client close if abortonclose option is set
+ - REGTESTS: Add script to test abortonclose option
+ - MINOR: mux-h1: clean up conditions to enabled and disabled splicing
+ - MINOR: mux-h1: Subscribe for sends if output buffer is not empty in h1_snd_pipe
+ - MINOR: mux-h1: Always subscribe for reads when splicing is disabled
+ - MEDIUM: mux-h1: Wake H1 stream when both sides a synchronized
+ - CLEANUP: mux-h1: rename WAIT_INPUT/WAIT_OUTPUT flags
+ - MINOR: mux-h1: Manage processing blocking flags on the H1 stream
+ - BUG/MINOR: stream: Decrement server current session counter on L7 retry
+ - BUG/MINOR: config: fix uninitialized initial state in ".if" block evaluator
+ - BUG/MINOR: config: add a missing "ELIF_TAKE" test for ".elif" condition evaluator
+ - BUG/MINOR: config: .if/.elif should also accept negative integers
+ - MINOR: config: centralize the ".if"/".elif" condition parser and evaluator
+ - MINOR: config: keep up-to-date current file/line/section in the global struct
+ - MINOR: config: support some pseudo-variables for file/line/section
+ - BUILD: activity: do not include malloc.h
+ - MINOR: arg: improve the error message on missing closing parenthesis
+ - MINOR: global: export the build features string list
+ - MINOR: global: add version comparison functions
+ - MINOR: config: improve .if condition error reporting
+ - MINOR: config: make cfg_eval_condition() support predicates with arguments
+ - MINOR: config: add predicate "defined()" to conditional expression blocks
+ - MINOR: config: add predicates "streq()" and "strneq()" to conditional expressions
+ - MINOR: config: add predicate "feature" to detect certain built-in features
+ - MINOR: config: add predicates "version_atleast" and "version_before" to cond blocks
+ - BUG/MINOR: activity: use the new pointer to calculate the new size in realloc()
+ - BUG/MINOR: stream: properly clear the previous error mask on L7 retries
+ - MEDIUM: log: slightly refine the output format of alerts/warnings/etc
+ - MINOR: config: add a new message directive: .diag
+ - CLEANUP: cli/tree-wide: properly re-align the CLI commands' help messages
+ - BUG/MINOR: stream: Reset stream final state and si error type on L7 retry
+ - BUG/MINOR: checks: Handle synchronous connect when a tcpcheck is started
+ - BUG/MINOR: checks: Reschedule check on observe mode only if fastinter is set
+ - MINOR: global: define tainted flag
+ - MINOR: cfgparse: add a new field flags in cfg_keyword
+ - MINOR: cfgparse: implement experimental config keywords
+ - MINOR: action: replace match_pfx by a keyword flags field
+ - MINOR: action: implement experimental actions
+ - MINOR: cli: set tainted when using CLI expert/experimental mode
+ - MINOR: stats: report tainted on show info
+ - MINOR: http_act: mark normalize-uri as experimental
+ - BUILD: fix usage of ha_alert without format string
+ - MINOR: proxy: define PR_CAP_LB
+ - BUG/MINOR: server: do not report diag for peer servers with null weight
+ - DOC: ssl: Extra files loading now works for backends too
+ - ADDONS: make addons/ discoverable by git via .gitignore
+ - DOC: ssl: Add information about crl-file option
+ - MINOR: sample: improve error reporting on missing arg to strcmp() converter
+ - DOC: management: mention that some fields may be emitted as floats
+ - MINOR: tools: implement trimming of floating point numbers
+ - MINOR: tools: add a float-to-ascii conversion function
+ - MINOR: freq_ctr: add new functions to report float measurements
+ - MINOR: stats: avoid excessive padding of float values with trailing zeroes
+ - MINOR: stats: add the HTML conversion for float types
+ - MINOR: stats: pass the appctx flags to stats_fill_info()
+ - MINOR: stats: support an optional "float" option to "show info"
+ - MINOR: stats: use tv_remain() to precisely compute the uptime
+ - MINOR: stats: report uptime and start time as floats with subsecond resolution
+ - MINOR: stats: make "show info" able to report rates as floats when asked
+ - MINOR: config: mark tune.fd.edge-triggered as experimental
+ - REORG: vars: move the "proc" scope variables out of the global struct
+ - REORG: threads: move all_thread_mask() to thread.h
+ - BUILD: wdt: include signal-t.h
+ - BUILD: auth: include missing list.h
+ - REORG: mworker: move proc_self from global to mworker
+ - BUILD: ssl: ssl_utils requires chunk.h
+ - BUILD: config: cfgparse-ssl.c needs tools.h
+ - BUILD: wurfl: wurfl.c needs tools.h
+ - BUILD: spoe: flt_spoe.c needs tools.h
+ - BUILD: promex: service-prometheus.c needs tools.h
+ - BUILD: resolvers: include tools.h
+ - BUILD: config: include tools.h in cfgparse-listen.c
+ - BUILD: htx: include tools.h in http_htx.c
+ - BUILD: proxy: include tools.h in proxy.c
+ - BUILD: session: include tools.h in session.c
+ - BUILD: cache: include tools.h in cache.c
+ - BUILD: sink: include tools.h in sink.c
+ - BUILD: connection: include tools.h in connection.c
+ - BUILD: server-state: include tools.h from server_state.c
+ - BUILD: dns: include tools.h in dns.c
+ - BUILD: payload: include tools.h in payload.c
+ - BUILD: vars: include tools.h in vars.c
+ - BUILD: compression: include tools.h in compression.c
+ - BUILD: mworker: include tools.h from mworker.c
+ - BUILD: queue: include tools.h from queue.c
+ - BUILD: udp: include tools.h from proto_udp.c
+ - BUILD: stick-table: include freq_ctr.h from stick_table.h
+ - BUILD: server: include tools.h from server.c
+ - BUILD: server: include missing proxy.h in server.c
+ - BUILD: sink: include proxy.h in sink.c
+ - BUILD: mworker: include proxy.h in mworker.c
+ - BUILD: filters: include proxy.h in filters.c
+ - BUILD: fcgi-app: include proxy.h in fcgi-app.c
+ - BUILD: connection: move list_mux_proto() to connection.c
+ - REORG: stick-table: uninline stktable_alloc_data_type()
+ - REORG: stick-table: move composite address functions to stick_table.h
+ - REORG: config: uninline warnifnotcap() and failifnotcap()
+ - BUILD: task: remove unused includes from task.c
+ - MINOR: task: stop including stream.h from task.c
+ - BUILD: connection: stop including listener-t.h
+ - BUILD: hlua: include proxy.h from hlua.c
+ - BUILD: mux-h1: include proxy.h from mux-h1.c
+ - BUILD: mux-fcgi: include proxy.h from mux-fcgi.c
+ - BUILD: listener: include proxy.h from listener.c
+ - BUILD: http-rules: include proxy.h from http_rules.c
+ - BUILD: thread: include log.h from thread.c
+ - BUILD: comp: include proxy.h from flt_http_comp.c
+ - BUILD: fd: include log.h from fd.c
+ - BUILD: config: do not include proxy.h nor errors.h anymore in cfgparse.h
+ - BUILD: makefile: reorder object files by build time
+ - DOC: Fix a few grammar/spelling issues and casing of HAProxy
+ - REGTESTS: run-regtests: match both "HAProxy" and "HA-Proxy" in the version
+ - MINOR: version: report "HAProxy" not "HA-Proxy" in the version output
+ - DOC: remove last occurrences of "HA-Proxy" syntax
+ - DOC: peers: fix the protocol tag name in the doc
+ - ADMIN: netsnmp: report "HAProxy" and not "Haproxy" in output descriptions
+ - MEDIUM: mailers: use "HAProxy" nor "HAproxy" in the subject of messages
+ - DOC: fix a few remainig cases of "Haproxy" and "HAproxy" in doc and comments
+ - MINOR: tools/rnd: compute the result outside of the CAS loop
+ - BUILD: http_fetch: address a few aliasing warnings with older compilers
+ - BUILD: ssl: define HAVE_CRYPTO_memcmp() based on the library version
+ - BUILD: errors: include stdarg in errors.h
+ - REGTESTS: disable inter-thread idle connection sharing on sensitive tests
+ - MINOR: cli: make "help" support a command in argument
+ - MINOR: cli: sort the output of the "help" keywords
+ - CLEANUP: cli/mworker: properly align the help messages
+ - BUILD: memprof: make the old caller pointer a const in get_prof_bin()
+ - BUILD: compat: include malloc_np.h for USE_MEMORY_PROFILING on FreeBSD
+ - CI: Github Actions: enable USE_QUIC=1 for BoringSSL builds
+ - BUG/MEDIUM: quic: fix null deref on error path in qc_conn_init()
+ - BUILD: cli: appease a null-deref warning in cli_gen_usage_msg()
+
+2021/05/01 : 2.4-dev18
+ - DOC: Fix indentation for `path-strip-dot` normalizer
+ - DOC: Fix RFC reference for the percent-to-uppercase normalizer
+ - DOC: Add RFC references for the path-strip-dot(dot)? normalizers
+ - MINOR: uri_normalizer: Add a `percent-decode-unreserved` normalizer
+ - BUG/MINOR: mux-fcgi: Don't send normalized uri to FCGI application
+ - REORG: htx: Inline htx functions to add HTX blocks in a message
+ - CLEANUP: assorted typo fixes in the code and comments
+ - DOC: general: fix white spaces for HTML converter
+ - BUG/MINOR: ssl: ssl_sock_prepare_ssl_ctx does not return an error code
+ - BUG/MINOR: cpuset: move include guard at the very beginning
+ - BUG/MAJOR: fix build on musl with cpu_set_t support
+ - BUG/MEDIUM: cpuset: fix build on MacOS
+ - BUG/MINOR: htx: Preserve HTX flags when draining data from an HTX message
+ - MEDIUM: htx: Refactor htx_xfer_blks() to not rely on hdrs_bytes field
+ - CLEANUP: htx: Remove unsued hdrs_bytes field from the HTX start-line
+ - BUG/MINOR: mux-h2: Don't encroach on the reserve when decoding headers
+ - MEDIUM: http-ana: handle read error on server side if waiting for response
+ - MINOR: htx: Limit length of headers name/value when a HTX message is dumped
+ - BUG/MINOR: applet: Notify the other side if data were consumed by an applet
+ - BUG/MINOR: hlua: Don't consume headers when starting an HTTP lua service
+ - BUG/MEDIUM: mux-h2: Handle EOM flag when sending a DATA frame with zero-copy
+ - CLEANUP: channel: No longer notify the producer in co_skip()/co_htx_skip()
+ - DOC: general: fix example in set-timeout
+ - CLEANUP: cfgparse: de-uglify early file error handling in readcfgfile()
+ - MINOR: config: add a new "default-path" global directive
+ - BUG/MEDIUM: peers: initialize resync timer to get an initial full resync
+ - BUG/MEDIUM: peers: register last acked value as origin receiving a resync req
+ - BUG/MEDIUM: peers: stop considering ack messages teaching a full resync
+ - BUG/MEDIUM: peers: reset starting point if peers appears longly disconnected
+ - BUG/MEDIUM: peers: reset commitupdate value in new conns
+ - BUG/MEDIUM: peers: re-work updates lookup during the sync on the fly
+ - BUG/MEDIUM: peers: reset tables stage flags stages on new conns
+ - MINOR: peers: add informative flags about resync process for debugging
+ - BUG/MEDIUM: time: fix updating of global_now upon clock drift
+ - CLEANUP: freq_ctr: make arguments of freq_ctr_total() const
+ - CLEANUP: hlua: rename hlua_appctx* appctx to luactx
+ - MINOR: server: fix doc/trace on lb algo for dynamic server creation
+ - REGTESTS: server: fix cli_add_server due to previous trace update
+ - REGTESTS: add minimal CLI "add map" tests
+ - DOC: management: move "set var" to the proper place
+ - CLEANUP: map: slightly reorder the add map function
+ - MINOR: map: get rid of map_add_key_value()
+ - MINOR: map: show the current and next pattern version in "show map"
+ - MINOR: map/acl: add the possibility to specify the version in "show map/acl"
+ - MINOR: pattern: support purging arbitrary ranges of generations
+ - MINOR: map/acl: add the possibility to specify the version in "clear map/acl"
+ - MINOR: map/acl: add the "prepare map/acl" CLI command
+ - MINOR: map/acl: add the "commit map/acl" CLI command
+ - MINOR: map/acl: make "add map/acl" support an optional version number
+ - CLEANUP: map/cli: properly align the map/acl help
+ - BUILD: compiler: do not use already defined __read_mostly on dragonfly
+
+2021/04/23 : 2.4-dev17
+ - MINOIR: mux-pt/trace: Register a new trace source with its events
+ - BUG/MINOR: mux-pt: Fix a possible UAF because of traces in mux_pt_io_cb
+ - CI: travis: Drastically clean up .travis.yml
+ - CLEANUP: pattern: make all pattern tables read-only
+ - MINOR: trace: replace the trace() inline function with an equivalent macro
+ - MINOR: initcall: uniformize the section names between MacOS and other unixes
+ - CLEANUP: initcall: rename HA_SECTION to HA_INIT_SECTION
+ - MINOR: compiler: add macros to declare section names
+ - CLEANUP: initcall: rely on HA_SECTION_* instead of defining its own
+ - MINOR: global: declare a read_mostly section
+ - MINOR: fd: move a few read-mostly variables to their own section
+ - MINOR: epoll: move epoll_fd to read_mostly
+ - MINOR: kqueue: move kqueue_fd to read_mostly
+ - MINOR: pool: move pool declarations to read_mostly
+ - MINOR: threads: mark all_threads_mask as read_mostly
+ - MINOR: server: move idle_conn_task to read_mostly
+ - MINOR: protocol: move __protocol_by_family to read_mostly
+ - MINOR: pattern: make the pat_lru_seed read_mostly
+ - MINOR: trace: make trace sources read_mostly
+ - MINOR: freq_ctr: add a generic function to report the total value
+ - MEDIUM: freq_ctr: make read_freq_ctr_period() use freq_ctr_total()
+ - MEDIUM: freq_ctr: reimplement freq_ctr_remain_period() from freq_ctr_total()
+ - MINOR: freq_ctr: add the missing next_event_delay_period()
+ - MINOR: freq_ctr: unify freq_ctr and freq_ctr_period into freq_ctr
+ - MEDIUM: freq_ctr: replace the per-second counters with the generic ones
+ - MINOR: freq_ctr: add cpu_relax in the rotation loop of update_freq_ctr_period()
+ - MINOR: freq_ctr: simplify and improve the update function
+ - CLEANUP: time: remove the now unused ms_left_scaled
+ - MINOR: time: move the time initialization out of tv_update_date()
+ - MINOR: time: remove useless variable copies in tv_update_date()
+ - MINOR: time: change the global timeval and the the global tick at once
+ - MEDIUM: time: make the clock offset global and no per-thread
+ - MINOR: atomic: reimplement the relaxed version of x86 BTS/BTR
+ - MINOR: trace: Add the checks as a possible trace source
+ - MINOIR: checks/trace: Register a new trace source with its events
+ - MINOR: hlua: Add function to release a lua function
+ - BUG/MINOR: hlua: Fix memory leaks on error path when registering a task
+ - BUG/MINOR: hlua: Fix memory leaks on error path when registering a converter
+ - BUG/MINOR: hlua: Fix memory leaks on error path when registering a fetch
+ - BUG/MINOR: hlua: Fix memory leaks on error path when parsing a lua action
+ - BUG/MINOR: hlua: Fix memory leaks on error path when registering an action
+ - BUG/MINOR: hlua: Fix memory leaks on error path when registering a service
+ - BUG/MINOR: hlua: Fix memory leaks on error path when registering a cli keyword
+ - BUG/MINOR: cfgparse/proxy: Fix some leaks during proxy section parsing
+ - BUG/MINOR: listener: Handle allocation error when allocating a new bind_conf
+ - BUG/MINOR: cfgparse/proxy: Hande allocation errors during proxy section parsing
+ - MINOR: cfgparse/proxy: Group alloc error handling during proxy section parsing
+ - DOC: internals: update the SSL architecture schema
+ - BUG/MEDIUM: sample: Fix adjusting size in field converter
+ - MINOR: sample: add ub64dec and ub64enc converters
+ - CLEANUP: sample: align samples list in sample.c
+ - MINOR: ist: Add `istclear(struct ist*)`
+ - CI: cirrus: install "pcre" package
+ - MINOR: opentracing: correct calculation of the number of arguments in the args[]
+ - MINOR: opentracing: transfer of context names without prefix
+ - MINOR: sample: converter: Add mjson library.
+ - MINOR: sample: converter: Add json_query converter
+ - CI: travis-ci: enable weekly graviton2 builds
+ - DOC: ssl: Certificate hot update only works on fronted certificates
+ - DOC: ssl: Certificate hot update works on server certificates
+ - BUG/MEDIUM: threads: Ignore current thread to end its harmless period
+ - MINOR: threads: Only consider running threads to end a thread harmeless period
+ - BUG/MINOR: checks: Set missing id to the dummy checks frontend
+ - MINOR: logs: Add support of checks as session origin to format lf strings
+ - BUG/MINOR: connection: Fix fc_http_major and bc_http_major for TCP connections
+ - MINOR: connection: Make bc_http_major compatible with tcp-checks
+ - BUG/MINOR: ssl-samples: Fix ssl_bc_* samples when called from a health-check
+ - BUG/MINOR: http-fetch: Make method smp safe if headers were already forwarded
+ - MINOR: tcp_samples: Add samples to get src/dst info of the backend connection
+ - MINOR: tcp_samples: Be able to call bc_src/bc_dst from the health-checks
+ - BUG/MINOR: http_htx: Remove BUG_ON() from http_get_stline() function
+ - BUG/MINOR: logs: Report the true number of retries if there was no connection
+ - BUILD: makefile: Redirect stderr to /dev/null when probing options
+ - MINOR: uri_normalizer: Add uri_normalizer module
+ - MINOR: uri_normalizer: Add `enum uri_normalizer_err`
+ - MINOR: uri_normalizer: Add `http-request normalize-uri`
+ - MINOR: uri_normalizer: Add a `merge-slashes` normalizer to http-request normalize-uri
+ - MINOR: uri_normalizer: Add a `dotdot` normalizer to http-request normalize-uri
+ - MINOR: uri_normalizer: Add support for supressing leading `../` for dotdot normalizer
+ - MINOR: uri_normalizer: Add a `sort-query` normalizer
+ - MINOR: uri_normalizer: Add a `percent-upper` normalizer
+ - MEDIUM: http_act: Rename uri-normalizers
+ - DOC: Add introduction to http-request normalize-uri
+ - DOC: Note that URI normalization is experimental
+ - BUG/MINOR: pools: maintain consistent ->allocated count on alloc failures
+ - BUG/MINOR: pools/buffers: make sure to always reserve the required buffers
+ - MINOR: pools: drop the unused static history of artificially failed allocs
+ - CLEANUP: pools: remove unused arguments to pool_evict_from_cache()
+ - MEDIUM: pools: move the cache into the pool header
+ - MINOR: pool: remove the size field from pool_cache_head
+ - MINOR: pools: rename CONFIG_HAP_LOCAL_POOLS to CONFIG_HAP_POOLS
+ - MINOR: pools: enable the fault injector in all allocation modes
+ - MINOR: pools: make the basic pool_refill_alloc()/pool_free() update needed_avg
+ - MEDIUM: pools: unify pool_refill_alloc() across all models
+ - CLEANUP: pools: re-merge pool_refill_alloc() and __pool_refill_alloc()
+ - MINOR: pools: call pool_alloc_nocache() out of the pool's lock
+ - CLEANUP: pools: move the lock to the only __pool_get_first() that needs it
+ - CLEANUP: pools: rename __pool_get_first() to pool_get_from_shared_cache()
+ - CLEANUP: pools: rename pool_*_{from,to}_cache() to *_local_cache()
+ - CLEANUP: pools: rename __pool_free() to pool_put_to_shared_cache()
+ - MINOR: tools: add statistical_prng_range() to get a random number over a range
+ - MINOR: pools: use cheaper randoms for fault injections
+ - MINOR: pools: move the fault injector to __pool_alloc()
+ - MINOR: pools: split the OS-based allocator in two
+ - MINOR: pools: always use atomic ops to maintain counters
+ - MINOR: pools: move pool_free_area() out of the lock in the locked version
+ - MINOR: pools: factor the release code into pool_put_to_os()
+ - MEDIUM: pools: make CONFIG_HAP_POOLS control both local and shared pools
+ - MINOR: pools: create unified pool_{get_from,put_to}_cache()
+ - MINOR: pools: evict excess objects using pool_evict_from_local_cache()
+ - MEDIUM: pools: make pool_put_to_cache() always call pool_put_to_local_cache()
+ - CLEANUP: pools: make the local cache allocator fall back to the shared cache
+ - CLEANUP: pools: merge pool_{get_from,put_to}_local_caches with generic ones
+ - CLEANUP: pools: uninline pool_put_to_cache()
+ - CLEANUP: pools: declare dummy pool functions to remove some ifdefs
+ - BUILD: pools: fix build with DEBUG_FAIL_ALLOC
+ - BUG/MINOR: server: make srv_alloc_lb() allocate lb_nodes for consistent hash
+ - CONTRIB: mod_defender: import the minimal number of includes
+ - CONTRIB: mod_defender: make the code build with the embedded includes
+ - CONTRIB: modsecurity: import the minimal number of includes
+ - CONTRIB: modsecurity: make the code build with the embedded includes
+ - CLEANUP: sample: Improve local variables in sample_conv_json_query
+ - CLEANUP: sample: Explicitly handle all possible enum values from mjson
+ - CLEANUP: sample: Use explicit return for successful `json_query`s
+ - CLEANUP: lists/tree-wide: rename some list operations to avoid some confusion
+ - CONTRIB: move spoa_example out of the tree
+ - BUG/MINOR: server: free srv.lb_nodes in free_server
+ - BUG/MINOR: logs: free logsrv.conf.file on exit
+ - BUG/MEDIUM: server: ensure thread-safety of server runtime creation
+ - MINOR: server: add log on dynamic server creation
+ - MINOR: server: implement delete server cli command
+ - CONTRIB: move spoa_server out of the tree
+ - CONTRIB: move modsecurity out of the tree
+ - BUG/MINOR: server: fix potential null gcc error in delete server
+ - BUG/MAJOR: mux-h2: Properly detect too large frames when decoding headers
+ - BUG/MEDIUM: mux-h2: Fix dfl calculation when merging CONTINUATION frames
+ - BUG/MINOR: uri_normalizer: Use delim parameter when building the sorted query in uri_normalizer_query_sort
+ - CLEANUP: uri_normalizer: Remove trailing whitespace
+ - MINOR: uri_normalizer: Add a `strip-dot` normalizer
+ - CONTRIB: move mod_defender out of the tree
+ - CLEANUP: contrib: remove the last references to the now dead contrib/ directory
+ - BUG/MEDIUM: config: fix cpu-map notation with both process and threads
+ - MINOR: config: add a diag for invalid cpu-map statement
+ - BUG/MINOR: mworker/init: don't reset nb_oldpids in non-mworker cases
+ - BUG/MINOR: mworker: don't use oldpids[] anymore for reload
+ - BUILD: makefile: fix the "make clean" target on strict bourne shells
+ - IMPORT: slz: import slz into the tree
+ - BUILD: compression: switch SLZ from out-of-tree to in-tree
+ - CI: github: do not build libslz any more
+ - CLEANUP: compression: remove calls to SLZ init functions
+ - BUG/MEDIUM: mux-h2: Properly handle shutdowns when received with data
+ - MINOR: cpuset: define a platform-independent cpuset type
+ - MINOR: cfgparse: use hap_cpuset for parse_cpu_set
+ - MEDIUM: config: use platform independent type hap_cpuset for cpu-map
+ - MINOR: thread: implement the detection of forced cpu affinity
+ - MINOR: cfgparse: support the comma separator on parse_cpu_set
+ - MEDIUM: cfgparse: detect numa and set affinity if needed
+ - MINOR: global: add option to disable numa detection
+ - BUG/MINOR: haproxy: fix compilation on macOS
+ - BUG/MINOR: cpuset: fix compilation on platform without cpu affinity
+ - MINOR: time: avoid unneeded updates to now_offset
+ - MINOR: time: avoid overwriting the same values of global_now
+ - CLEANUP: time: use __tv_to_ms() in tv_update_date() instead of open-coding
+ - MINOR: time: avoid u64 needlessly expensive computations for the 32-bit now_ms
+ - BUG/MINOR: peers: remove useless table check if initial resync is finished
+ - BUG/MEDIUM: peers: re-work connection to new process during reload.
+ - BUG/MEDIUM: peers: re-work refcnt on table to protect against flush
+ - BUG/MEDIUM: config: fix missing initialization in numa_detect_topology()
+
+2021/04/09 : 2.4-dev16
+ - CLEANUP: dev/flags: remove useless test in the stdin number parser
+ - MINOR: No longer rely on deprecated sample fetches for predefined ACLs
+ - MINOR: acl: Add HTTP_2.0 predefined macro
+ - BUG/MINOR: hlua: Detect end of request when reading data for an HTTP applet
+ - BUG/MINOR: tools: fix parsing "us" unit for timers
+ - MINOR: server/bind: add support of new prefixes for addresses.
+ - MINOR: log: register config file and line number on log servers.
+ - MEDIUM: log: support tcp or stream addresses on log lines.
+ - BUG/MEDIUM: log: fix config parse error logging on stdout/stderr or any raw fd
+ - CLEANUP: fd: remove FD_POLL_DATA and FD_POLL_STICKY
+ - MEDIUM: fd: prepare FD_POLL_* to move to bits 8-15
+ - MEDIUM: fd: merge fdtab[].ev and state for FD_EV_* and FD_POLL_* into state
+ - MINOR: fd: move .linger_risk into fdtab[].state
+ - MINOR: fd: move .cloned into fdtab[].state
+ - MINOR: fd: move .initialized into fdtab[].state
+ - MINOR: fd: move .et_possible into fdtab[].state
+ - MINOR: fd: move .exported into fdtab[].state
+ - MINOR: fd: implement an exclusive syscall bit to remove the ugly "log" lock
+ - MINOR: cli/show-fd: slightly reorganize the FD status flags
+ - MINOR: atomic/arm64: detect and use builtins for the double-word CAS
+ - CLEANUP: atomic: add an explicit _FETCH variant for add/sub/and/or
+ - CLEANUP: atomic: make all standard add/or/and/sub operations return void
+ - CLEANUP: atomic: add a fetch-and-xxx variant for common operations
+ - CLEANUP: atomic: add HA_ATOMIC_INC/DEC for unit increments
+ - CLEANUP: atomic/tree-wide: replace single increments/decrements with inc/dec
+ - CLEANUP: atomic: use the __atomic variant of BTS/BTR on modern compilers
+ - MINOR: atomic: implement native BTS/BTR for x86
+ - MINOR: ist: Add `istappend(struct ist, char)`
+ - MINOR: ist: Add `istshift(struct ist*)`
+ - MINOR: ist: Add `istsplit(struct ist*, char)`
+ - BUG/MAJOR: fd: switch temp values to uint in fd_stop_both()
+ - MINOR: opentracing: register config file and line number on log servers
+ - MEDIUM: resolvers: add support of tcp address on nameserver line.
+ - MINOR: ist: Rename istappend() to __istappend()
+ - CLEANUP: htx: Make http_get_stline take a `const struct`
+ - CLEANUP: ist: Remove unused `count` argument from `ist2str*`
+ - CLEANUP: Remove useless malloc() casts
+
+2021/04/02 : 2.4-dev15
+ - BUG/MINOR: payload: Wait for more data if buffer is empty in payload/payload_lv
+ - BUG/MINOR: stats: Apply proper styles in HTML status page.
+ - BUG/MEDIUM: time: make sure to always initialize the global tick
+ - BUG/MINOR: tcp: fix silent-drop workaround for IPv6
+ - BUILD: tcp: use IPPROTO_IPV6 instead of SOL_IPV6 on FreeBSD/MacOS
+ - CLEANUP: socket: replace SOL_IP/IPV6/TCP with IPPROTO_IP/IPV6/TCP
+ - BUG/MINOR: http_fetch: make hdr_ip() resistant to empty fields
+ - BUG/MINOR: mux-h2: Don't emit log twice if an error occurred on the preface
+ - MINOR: stream: Don't trigger errors on destructive HTTP upgrades
+ - MINOR: frontend: Create HTTP txn for HTX streams
+ - MINOR: stream: Be sure to set HTTP analysers when creating an HTX stream
+ - BUG/MINOR: stream: Properly handle TCP>H1>H2 upgrades in http_wait_for_request
+ - BUG/MINOR: config: Add warning for http-after-response rules in TCP mode
+ - MINOR: muxes: Add a flag to notify a mux does not support any upgrade
+ - MINOR: mux-h1: Don't perform implicit HTTP/2 upgrade if not supported by mux
+ - MINOR: mux-pt: Don't perform implicit HTTP upgrade if not supported by mux
+ - MEDIUM: mux-h1: Expose h1 in the list of supported mux protocols
+ - MEDIUM: mux-pt: Expose passthrough in the list of supported mux protocols
+ - MINOR: muxes: Show muxes flags when the mux list is displayed
+ - DOC: config: Improve documentation about proto/check-proto keywords
+ - MINOR: stream: Use stream type instead of proxy mode when appropriate
+ - MINOR: filters/http-ana: Decide to filter HTTP headers in HTTP analysers
+ - MINOR: http-ana: Simplify creation/destruction of HTTP transactions
+ - MINOR: stream: Handle stream HTTP upgrade in a dedicated function
+ - MEDIUM: Add tcp-request switch-mode action to perform HTTP upgrade
+ - MINOR: config/proxy: Don't warn for HTTP rules in TCP if 'switch-mode http' set
+ - MINOR: config/proxy: Warn if a TCP proxy without backend is upgradable to HTTP
+ - DOC: config: Add documentation about TCP to HTTP upgrades
+ - REGTESTS: Add script to tests TCP to HTTP upgrades
+ - BUG/MINOR: payload/htx: Ingore L6 sample fetches for HTX streams/checks
+ - MINOR: htx: Make internal.strm.is_htx an internal sample fetch
+ - MINOR: action: Use a generic function to check validity of an action rule list
+ - MINOR: payload/config: Warn if a L6 sample fetch is used from an HTTP proxy
+ - MEDIUM: http-rules: Add wait-for-body action on request and response side
+ - REGTESTS: Add script to tests the wait-for-body HTTP action
+ - BUG/MINOR: http-fetch: Fix test on message state to capture the version
+ - CLEANUP: vars: always pre-initialize smp in vars_parse_cli_get_var()
+ - MINOR: global: define diagnostic mode of execution
+ - MINOR: cfgparse: diag for multiple nbthread statements
+ - MINOR: server: diag for 0 weight server
+ - MINOR: diag: create cfgdiag module
+ - MINOR: diag: diag if servers use the same cookie value
+ - MINOR: config: diag if global section after non-global
+ - TESTS: slightly reorganize the code in the tests/ directory
+ - TESTS: move tests/*.cfg to tests/config
+ - REGTESTS: ssl: "set ssl cert" and multi-certificates bundle
+ - REGTESTS: ssl: mark set_ssl_cert_bundle.vtc as broken
+ - CONTRIB: halog: fix issue with array of type char
+ - CONTRIB: tcploop: add a shutr command
+ - CONTRIB: debug: add the show-fd-to-flags script
+ - CONTRIB: debug: split poll from flags
+ - CONTRIB: move some dev-specific tools to dev/
+ - BUILD: makefile: always build the flags utility
+ - DEV: flags: replace the unneeded makefile with a README
+ - BUILD: makefile: integrate the hpack tools
+ - CONTRIB: merge ip6range with iprange
+ - CONTRIB: move some admin-related sub-projects to admin/
+ - CONTRIB: move halog to admin/
+ - ADMIN: halog: automatically enable USE_MEMCHR on the right glibc version
+ - BUILD: makefile: build halog with the correct flags
+ - BUILD: makefile: add a "USE_PROMEX" variable to ease building prometheus-exporter
+ - CONTRIB: move prometheus-exporter to addons/promex
+ - DOC: add a few words about USE_* and the addons directory
+ - CONTRIB: move 51Degrees to addons/51degrees
+ - CONTRIB: move src/da.c and contrib/deviceatlas to addons/deviceatlas
+ - CONTRIB: move src/wurfl.c and contrib/wurfl to addons/wurfl
+ - CONTRIB: move contrib/opentracing to addons/ot
+ - BUG/MINOR: opentracing: initialization after establishing daemon mode
+ - DOC: clarify that compression works for HTTP/2
+
+2021/03/27 : 2.4-dev14
+ - MEDIUM: quic: Fix build.
+ - MEDIUM: quic: Fix build.
+ - CI: codespell: whitelist "Dragan Dosen"
+ - CLEANUP: assorted typo fixes in the code and comments
+ - CI: github actions: update LibreSSL to 3.2.5
+ - REGTESTS: revert workaround for a crash with recent libressl on http-reuse sni
+ - CLEANUP: mark defproxy as const on parse tune.fail-alloc
+ - REGTESTS: remove unneeded experimental-mode in cli add server test
+ - REGTESTS: wait for proper return of enable server in cli add server test
+ - MINOR: compression: use pool_alloc(), not pool_alloc_dirty()
+ - MINOR: spoe: use pool_alloc(), not pool_alloc_dirty()
+ - MINOR: fcgi-app: use pool_alloc(), not pool_alloc_dirty()
+ - MINOR: cache: use pool_alloc(), not pool_alloc_dirty()
+ - MINOR: ssl: use pool_alloc(), not pool_alloc_dirty()
+ - MINOR: opentracing: use pool_alloc(), not pool_alloc_dirty()
+ - MINOR: dynbuf: make b_alloc() always check if the buffer is allocated
+ - CLEANUP: compression: do not test for buffer before calling b_alloc()
+ - CLEANUP: l7-retries: do not test the buffer before calling b_alloc()
+ - MINOR: channel: simplify the channel's buffer allocation
+ - MEDIUM: dynbuf: remove last usages of b_alloc_margin()
+ - CLEANUP: dynbuf: remove b_alloc_margin()
+ - CLEANUP: dynbuf: remove the unused b_alloc_fast() function
+ - CLEANUP: pools: remove the unused pool_get_first() function
+ - MINOR: pools: make the pool allocator support a few flags
+ - MINOR: pools: add pool_zalloc() to return a zeroed area
+ - CLEANUP: connection: use pool_zalloc() in conn_alloc_hash_node()
+ - CLEANUP: filters: use pool_zalloc() in flt_stream_add_filter()
+ - CLEANUP: spoe: use pool_zalloc() instead of pool_alloc+memset
+ - CLEANUP: frontend: use pool_zalloc() in frontend_accept()
+ - CLEANUP: mailers: use pool_zalloc() in enqueue_one_email_alert()
+ - CLEANUP: resolvers: use pool_zalloc() in resolv_link_resolution()
+ - CLEANUP: ssl: use pool_zalloc() in ssl_init_keylog()
+ - CLEANUP: tcpcheck: use pool_zalloc() instead of pool_alloc+memset
+ - CLEANUP: quic: use pool_zalloc() instead of pool_alloc+memset
+ - MINOR: time: also provide a global, monotonic global_now_ms timer
+ - BUG/MEDIUM: freq_ctr/threads: use the global_now_ms variable
+ - MINOR: tools: introduce new option PA_O_DEFAULT_DGRAM on str2sa_range.
+ - BUILD: tools: fix build error with new PA_O_DEFAULT_DGRAM
+ - BUG/MINOR: ssl: Prevent disk access when using "add ssl crt-list"
+ - CLEANUP: ssl: remove unused definitions
+ - BUILD: ssl: guard ecdh functions with SSL_CTX_set_tmp_ecdh macro
+ - MINOR: lua: Slightly improve function dumping the lua traceback
+ - BUG/MEDIUM: debug/lua: Use internal hlua function to dump the lua traceback
+ - BUG/MEDIUM: lua: Always init the lua stack before referencing the context
+ - MINOR: fd: make fd_clr_running() return the remaining running mask
+ - MINOR: fd: remove the unneeded running bit from fd_insert()
+ - BUG/MEDIUM: fd: do not wait on FD removal in fd_delete()
+ - CLEANUP: fd: remove unused fd_set_running_excl()
+ - CLEANUP: fd: slightly simplify up _fd_delete_orphan()
+ - BUG/MEDIUM: fd: Take the fd_mig_lock when closing if no DWCAS is available.
+ - BUG/MEDIUM: release lock on idle conn killing on reached pool high count
+ - BUG/MEDIUM: thread: Fix a deadlock if an isolated thread is marked as harmless
+ - MINOR: tools: make url2ipv4 return the exact number of bytes parsed
+ - BUG/MINOR: http_fetch: make hdr_ip() reject trailing characters
+ - BUG/MEDIUM: mux-h1: make h1_shutw_conn() idempotent
+ - BUG/MINOR: ssl: Fix update of default certificate
+ - BUG/MINOR: ssl: Prevent removal of crt-list line if the instance is a default one
+ - BUILD: ssl: introduce fine guard for ssl random extraction functions
+ - REORG: global: move initcall register code in a dedicated file
+ - REORG: global: move free acl/action in their related source files
+ - REORG: split proxy allocation functions
+ - MINOR: proxy: implement a free_proxy function
+ - MINOR: proxy: define cap PR_CAP_LUA
+ - MINOR: lua: properly allocate the lua Socket proxy
+ - MINOR: lua: properly allocate the lua Socket servers
+ - MINOR: vars: make get_vars() allow the session to be null
+ - MINOR: vars: make the var() sample fetch keyword depend on nothing
+ - CLEANUP: sample: remove duplicate "stopping" sample fetch keyword
+ - MINOR: sample: make smp_resolve_args() return an allocate error message
+ - MINOR: sample: add a new SMP_SRC_CONST sample capability
+ - MINOR: sample: mark the truly constant sample fetch keywords as such
+ - MINOR: sample: add a new CFG_PARSER context for samples
+ - MINOR: action: add a new ACT_F_CFG_PARSER origin designation
+ - MEDIUM: vars: add support for a "set-var" global directive
+ - REGTESTS: add a basic reg-test for some "set-var" commands
+ - MINOR: sample: add a new CLI_PARSER context for samples
+ - MINOR: action: add a new ACT_F_CLI_PARSER origin designation
+ - MINOR: vars/cli: add a "get var" CLI command to retrieve global variables
+ - MEDIUM: cli: add a new experimental "set var" command
+ - MINOR: compat: add short aliases for a few very commonly used types
+ - BUILD: ssl: use EVP_CIPH_GCM_MODE macro instead of HA_OPENSSL_VERSION
+ - MEDIUM: backend: use a trylock to grab a connection on high FD counts as well
+
+2021/03/19 : 2.4-dev13
+ - BUG/MEDIUM: cli: fix "help" crashing since recent spelling fixes
+ - BUG/MINOR: cfgparse: use the GLOBAL not LISTEN keywords list for spell checking
+ - MINOR: tools: improve word fingerprinting by counting presence
+ - MINOR: tools: do not sum squares of differences for word fingerprints
+ - MINOR: cli: improve fuzzy matching to work on all remaining words at once
+ - MINOR: cli: sort the suggestions by order of relevance
+ - MINOR: cli: limit spelling suggestions to 5
+ - MINOR: cfgparse/proxy: also support spelling fixes on options
+ - BUG/MINOR: resolvers: Add missing case-insensitive comparisons of DNS hostnames
+ - MINOR: time: export the global_now variable
+ - BUG/MINOR: freq_ctr/threads: make use of the last updated global time
+ - MINOR: freq_ctr/threads: relax when failing to update a sliding window value
+ - MINOR/BUG: mworker/cli: do not use the unix_bind prefix for the master CLI socket
+ - MINOR: mworker/cli: alert the user if we enabled a master CLI but not the master-worker mode
+ - MINOR: cli: implement experimental-mode
+ - REORG: server: add a free server function
+ - MINOR: cfgparse: always alloc idle conns task
+ - REORG: server: move keywords in srv_kws
+ - MINOR: server: remove fastinter from mistyped kw list
+ - REORG: server: split parse_server
+ - REORG: server: move alert traces in parse_server
+ - REORG: server: rename internal functions from parse_server
+ - REORG: server: attach servers in parse_server
+ - REORG: server: use flags for parse_server
+ - MINOR: server: prepare parsing for dynamic servers
+ - MINOR: stats: export function to allocate extra proxy counters
+ - MEDIUM: server: implement 'add server' cli command
+ - REGTESTS: implement test for 'add server' cli
+ - MINOR: server: enable standard options for dynamic servers
+ - MINOR: server: support keyword proto in 'add server' cli
+ - BUG/MINOR: protocol: add missing support of dgram unix socket.
+ - CLEANUP: Fix a typo in fix_is_valid description
+ - MINOR: raw_sock: Add a close method.
+ - MEDIUM: connections: Introduce a new XPRT method, start().
+ - MEDIUM: connections: Implement a start() method for xprt_handshake.
+ - MEDIUM: connections: Implement a start() method in ssl_sock.
+ - MINOR: muxes: garbage collect the reset() method.
+ - CLEANUP: tcp-rules: Fix a typo in error messages about expect-netscaler-cip
+ - MEDIUM: lua: Use a per-thread counter to track some non-reentrant parts of lua
+ - BUG/MEDIUM: debug/lua: Don't dump the lua stack if not dumpable
+
+2021/03/13 : 2.4-dev12
+ - CLEANUP: connection: Use `VAR_ARRAY` in `struct tlv` definition
+ - CLEANUP: connection: Remove useless test for NULL before calling `pool_free()`
+ - CLEANUP: connection: Use istptr / istlen for proxy_unique_id
+ - MINOR: connection: Use a `struct ist` to store proxy_authority
+ - CLEANUP: connection: Consistently use `struct ist` to process all TLV types
+ - BUILD: task: fix build at -O0 with threads disabled
+ - BUILD: bug: refine HA_LINK_ERROR() to only be used on gcc and derivatives
+ - CLEANUP: config: make the cfg_keyword parsers take a const for the defproxy
+ - BUILD: connection: do not use VAR_ARRAY in struct tlv
+ - BUG/MEDIUM: session: NULL dereference possible when accessing the listener
+ - MINOR: build: force CC to set a return code when probing options
+ - CLEANUP: stream: rename a few remaining occurrences of "stream *sess"
+ - BUG/MEDIUM: resolvers: handle huge responses over tcp servers.
+ - CLEANUP: config: also address the cfg_keyword API change in the compression code
+ - BUG/MEDIUM: ssl: properly remove the TASK_HEAVY flag at end of handshake
+ - BUG/MINOR: sample: Rename SenderComID/TargetComID to SenderCompID/TargetCompID
+ - MINOR: task: give the scheduler a bit more flexibility in the runqueue size
+ - OPTIM: task: automatically adjust the default runqueue-depth to the threads
+ - BUG/MINOR: connection: Missing QUIC initialization
+ - BUG/MEDIUM: stick-tables: fix ref counter in table entry using multiple http tracksc.
+ - BUILD: atomic/arm64: force the register pairs to use in __ha_cas_dw()
+ - BUG/MEDIUM: filters: Set CF_FL_ANALYZE on channels when filters are attached
+ - BUG/MINOR: tcpcheck: Update .health threshold of agent inside an agent-check
+ - BUG/MINOR: proxy/session: Be sure to have a listener to increment its counters
+ - BUG/MINOR: tcpcheck: Fix double free on error path when parsing tcp/http-check
+ - BUG/MINOR: server-state: properly handle the case where the base is not set
+ - BUG/MINOR: server-state: use the argument, not the global state
+ - CLEANUP: tcp-rules: add missing actions in the tcp-request error message
+ - CLEANUP: vars: make the error message clearer on missing arguments for set-var
+ - CLEANUP: http-rules: remove the unexpected comma before the list of action keywords
+ - CLEANUP: actions: the keyword must always be const from the rule
+ - MINOR: tools: add simple word fingerprinting to find similar-looking words
+ - MINOR: cfgparse: add cfg_find_best_match() to suggest an existing word
+ - MINOR: cfgparse: suggest correct spelling for unknown words in proxy sections
+ - MINOR: cfgparse: suggest correct spelling for unknown words in global section
+ - MINOR: cfgparse/server: try to fix spelling mistakes on server lines
+ - MINOR: cfgparse/bind: suggest correct spelling for unknown bind keywords
+ - MINOR: actions: add a function to suggest an action ressembling a given word
+ - MINOR: http-rules: suggest approaching action names on mismatch
+ - MINOR: tcp-rules: suggest approaching action names on mismatch
+ - BUG/MINOR: cfgparse/server: increment the extra keyword counter one at a time
+ - Revert "BUG/MINOR: resolvers: Only renew TTL for SRV records with an additional record"
+ - BUG/MINOR: resolvers: Consider server to have no IP on DNS resolution error
+ - BUG/MINOR: resolvers: Reset server address on DNS error only on status change
+ - BUG/MINOR: resolvers: Unlink DNS resolution to set RMAINT on SRV resolution
+ - BUG/MEDIUM: resolvers: Don't set an address-less server as UP
+ - BUG/MEDIUM: resolvers: Fix the loop looking for an existing ADD item
+ - MINOR: resolvers: new function find_srvrq_answer_record()
+ - BUG/MINOR; resolvers: Ignore DNS resolution for expired SRV item
+ - BUG/MEDIUM: resolvers: Trigger a DNS resolution if an ADD item is obsolete
+ - MINOR: resolvers: Use a function to remove answers attached to a resolution
+ - MINOR: resolvers: Purge answer items when a SRV resolution triggers an error
+ - MINOR: resolvers: Add function to change the srv status based on SRV resolution
+ - MINOR: resolvers: Directly call srvrq_update_srv_state() when possible
+ - BUG/MEDIUM: resolvers: Don't release resolution from a requester callbacks
+ - BUG/MEDIUM: resolvers: Skip DNS resolution at startup if SRV resolution is set
+ - MINOR: resolvers: Use milliseconds for cached items in resolver responses
+ - MINOR: resolvers: Don't try to match immediatly renewed ADD items
+ - CLEANUP: resolvers: Use ha_free() in srvrq_resolution_error_cb()
+ - CLEANUP: resolvers: Perform unsafe loop on requester list when possible
+ - BUG/MINOR: cli: make sure "help", "prompt", "quit" are enabled at master level
+ - CLEANUP: cli: fix misleading comment and better indent the access level flags
+ - MINOR: cli: set the ACCESS_MASTER* bits on the master bind_conf
+ - MINOR: cli: test the appctx level for master access instead of comparing pointers
+ - MINOR: cli: print the error message in the parser function itself
+ - MINOR: cli: filter the list of commands to the matching part
+ - MEDIUM: cli: apply spelling fixes for known commands before listing them
+ - MINOR: tools: add the ability to update a word fingerprint
+ - MINOR: cli: apply the fuzzy matching on the whole command instead of words
+ - CLEANUP: cli: rename MAX_STATS_ARGS to MAX_CLI_ARGS
+ - CLEANUP: cli: rename the last few "stats_" to "cli_"
+ - CLEANUP: task: make sure tasklet handlers always indicate their statuses
+ - CLEANUP: assorted typo fixes in the code and comments
+
+2021/03/05 : 2.4-dev11
+ - CI: codespell: skip Makefile for spell check
+ - CLEANUP: assorted typo fixes in the code and comments
+ - BUG/MINOR: tcp-act: Don't forget to set the original port for IPv4 set-dst rule
+ - BUG/MINOR: connection: Use the client's dst family for adressless servers
+ - BUG/MEDIUM: spoe: Kill applets if there are pending connections and nbthread > 1
+ - CLEANUP: Use ist2(const void*, size_t) whenever possible
+ - CLEANUP: Use IST_NULL whenever possible
+ - BUILD: proxy: Missing header inclusion for quic_transport_params_init()
+ - BUILD: quic: Implicit conversion between SSL related enums.
+ - DOC: spoe: Add a note about fragmentation support in HAProxy
+ - MINOR: contrib: add support for heartbeat control messages.
+ - MINOR: contrib: Enhance peers dissector heuristic.
+ - BUG/MINOR: mux-h2: Fix typo in scheme adjustment
+ - CLEANUP: Reapply the ist2() replacement patch
+ - CLEANUP: Use istadv(const struct ist, const size_t) whenever possible
+ - CLEANUP: Use isttest(const struct ist) whenever possible
+ - Revert "CI: Pin VTest to a known good commit"
+ - CLEANUP: backend: fix a wrong comment
+ - BUG/MINOR: backend: free allocated bind_addr if reuse conn
+ - MINOR: backend: handle reuse for conns with no server as target
+ - REGTESTS: test http-reuse if no server target
+ - BUG/MINOR: hlua: Don't strip last non-LWS char in hlua_pushstrippedstring()
+ - BUG/MINOR: server-state: Don't load server-state file for disabled backends
+ - CLEANUP: dns: Use DISGUISE() on a never-failing ring_attach() call
+ - CLEANUP: dns: Remove useless test on ns->dgram in dns_connect_nameserver()
+ - DOC: fix originalto except clause on destination address
+ - CLEANUP: Use the ist() macro whenever possible
+ - CLEANUP: Replace for loop with only a condition by while
+ - REORG: atomic: reimplement pl_cpu_relax() from atomic-ops.h
+ - BUG/MINOR: mt-list: always perform a cpu_relax call on failure
+ - MINOR: atomic: add armv8.1-a atomics variant for cas-dw
+ - MINOR: atomic: implement a more efficient arm64 __ha_cas_dw() using pairs
+ - BUG/MINOR: ssl: don't truncate the file descriptor to 16 bits in debug mode
+ - MEDIUM: pools: add CONFIG_HAP_NO_GLOBAL_POOLS and CONFIG_HAP_GLOBAL_POOLS
+ - MINOR: pools: double the local pool cache size to 1 MB
+ - MINOR: stream: use ABORT_NOW() and not abort() in stream_dump_and_crash()
+ - CLEANUP: stream: explain why we queue the stream at the head of the server list
+ - MEDIUM: backend: use a trylock when trying to grab an idle connection
+ - REORG: tools: promote the debug PRNG to more general use as a statistical one
+ - OPTIM: lb-random: use a cheaper PRNG to pick a server
+ - MINOR: task: stop abusing the nice field to detect a tasklet
+ - MINOR: task: move the nice field to the struct task only
+ - MEDIUM: task: extend the state field to 32 bits
+ - MINOR: task: add an application specific flag to the state: TASK_F_USR1
+ - MEDIUM: muxes: mark idle conns tasklets with TASK_F_USR1
+ - MINOR: xprt: add new xprt_set_idle and xprt_set_used methods
+ - MEDIUM: ssl: implement xprt_set_used and xprt_set_idle to relax context checks
+ - MINOR: server: don't read curr_used_conns multiple times
+ - CLEANUP: global: reorder some fields to respect cache lines
+ - CLEANUP: sockpair: silence a coverity check about fcntl()
+ - CLEANUP: lua: set a dummy file name and line number on the dummy servers
+ - MINOR: server: add a global list of all known servers
+ - MINOR: cfgparse: finish to set up servers outside of the proxy setup loop
+ - MINOR: server: allocate a per-thread struct for the per-thread connections stuff
+ - MINOR: server: move actconns to the per-thread structure
+ - CLEANUP: server: reorder some fields in the server struct to respect cache lines
+ - MINOR: backend: add a BUG_ON if conn mux NULL in connect_server
+ - BUG/MINOR: backend: fix condition for reuse on mode HTTP
+ - BUILD: Fix build when using clang without optimizing.
+ - CLEANUP: assorted typo fixes in the code and comments
+
+2021/02/26 : 2.4-dev10
+ - BUILD: SSL: introduce fine guard for RAND_keep_random_devices_open
+ - MINOR: Configure the `cpp` userdiff driver for *.[ch] in .gitattributes
+ - BUG/MINOR: ssl/cli: potential null pointer dereference in "set ssl cert"
+ - BUG/MINOR: sample: secure convs that accept base64 string and var name as args
+ - BUG/MEDIUM: vars: make functions vars_get_by_{name,desc} thread-safe
+ - CLEANUP: vars: make smp_fetch_var() to reuse vars_get_by_desc()
+ - DOC: muxes: add a diagram of the exchanges between muxes and outer world
+ - BUG/MEDIUM: proxy: use thread-safe stream killing on hard-stop
+ - BUG/MEDIUM: cli/shutdown sessions: make it thread-safe
+ - BUG/MINOR: proxy: wake up all threads when sending the hard-stop signal
+ - MINOR: stream: add an "epoch" to figure which streams appeared when
+ - MINOR: cli/streams: make "show sess" dump all streams till the new epoch
+ - MINOR: streams: use one list per stream instead of a global one
+ - MEDIUM: streams: do not use the streams lock anymore
+ - BUILD: dns: avoid a build warning when threads are disabled (dss unused)
+ - MEDIUM: task: remove the tasks_run_queue counter and have one per thread
+ - MINOR: tasks: do not maintain the rqueue_size counter anymore
+ - CLEANUP: tasks: use a less confusing name for task_list_size
+ - CLEANUP: task: move the tree root detection from __task_wakeup() to task_wakeup()
+ - MINOR: task: limit the remote thread wakeup to the global runqueue only
+ - MINOR: task: move the allocated tasks counter to the per-thread struct
+ - CLEANUP: task: split the large tasklet_wakeup_on() function in two
+ - BUG/MINOR: fd: properly wait for !running_mask in fd_set_running_excl()
+ - BUG/MINOR: resolvers: Fix condition to release received ARs if not assigned
+ - BUG/MINOR: resolvers: Only renew TTL for SRV records with an additional record
+ - BUG/MINOR: resolvers: new callback to properly handle SRV record errors
+ - BUG/MEDIUM: resolvers: Reset server address and port for obselete SRV records
+ - BUG/MEDIUM: resolvers: Reset address for unresolved servers
+ - DOC: Update the module list in MAINTAINERS file
+ - MINOR: htx: Add function to reserve the max possible size for an HTX DATA block
+ - DOC: Update the HTX API documentation
+ - DOC: Update the filters guide
+ - BUG/MEDIUM: contrib/prometheus-exporter: fix segfault in listener name dump
+ - MINOR: task: split the counts of local and global tasks picked
+ - MINOR: task: do not use __task_unlink_rq() from process_runnable_tasks()
+ - MINOR: task: don't decrement then increment the local run queue
+ - CLEANUP: task: re-merge __task_unlink_rq() with task_unlink_rq()
+ - MINOR: task: make grq_total atomic to move it outside of the grq_lock
+ - MINOR: tasks: also compute the tasklet latency when DEBUG_TASK is set
+ - MINOR: task: make tasklet wakeup latency measurements more accurate
+ - MINOR: server: Be more strict on the server-state line parsing
+ - MINOR: server: Only fill one array when parsing a server-state line
+ - MEDIUM: server: Refactor apply_server_state() to make it more readable
+ - CLEANUP: server: Rename state_line node to node instead of name_name
+ - CLEANUP: server: Rename state_line structure into server_state_line
+ - CLEANUP: server: Use a local eb-tree to store lines of the global server-state file
+ - MINOR: server: Be more strict when reading the version of a server-state file
+ - MEDIUM: server: Store parsed params of a server-state line in the tree
+ - MINOR: server: Remove cached line from global server-state tree when found
+ - MINOR: server: Move loading state of servers in a dedicated function
+ - MEDIUM: server: Use a tree to store local server-state lines
+ - MINOR: server: Parse and store server-state lines in a dedicated function
+ - MEDIUM: server: Don't load server-state file if a line is corrupted
+ - REORG: server: Export and rename some functions updating server info
+ - REORG: server-state: Move functions to deal with server-state in its own file
+ - MINOR: server-state: Don't load server-state file for serverless proxies
+ - CLEANUP: muxes: Remove useless if condition in show_fd function
+ - BUG/MINOR: stats: fix compare of no-maint url suffix
+ - MINOR: task: limit the number of subsequent heavy tasks with flag TASK_HEAVY
+ - MINOR: ssl: mark the SSL handshake tasklet as heavy
+ - CLEANUP: server: rename srv_cleanup_{idle,toremove}_connections()
+ - BUG/MINOR: ssl: potential null pointer dereference in ckchs_dup()
+ - MINOR: task: add one extra tasklet class: TL_HEAVY
+ - MINOR: task: place the heavy elements in TL_HEAVY
+ - MINOR: task: only limit TL_HEAVY tasks but not others
+ - BUG/MINOR: http-ana: Only consider dst address to process originalto option
+ - MINOR: tools: Add net_addr structure describing a network addess
+ - MINOR: tools: Add function to compare an address to a network address
+ - MEDIUM: http-ana: Add IPv6 support for forwardfor and orignialto options
+ - CLEANUP: hlua: Use net_addr structure internally to parse and compare addresses
+ - REGTESTS: Add script to test except param for fowardedfor/originalto options
+ - DOC: scheduler: add a diagram showing the different queues and their usages
+ - CLEANUP: tree-wide: replace free(x);x=NULL with ha_free(&x)
+ - CLEANUP: config: replace a few free() with ha_free()
+ - CLEANUP: vars: always zero the pointers after a free()
+ - CLEANUP: ssl: remove a useless "if" before freeing an error message
+ - CLEANUP: ssl: make ssl_sock_free_srv_ctx() zero the pointers after free
+ - CLEANUP: ssl: use realloc() instead of free()+malloc()
+
+2021/02/20 : 2.4-dev9
+ - BUG/MINOR: server: Remove RMAINT from admin state when loading server state
+ - CLEANUP: check: fix get_check_status_info declaration
+ - CLEANUP: contrib/prometheus-exporter: align for with srv status case
+ - MEDIUM: stats: allow to select one field in `stats_fill_li_stats`
+ - MINOR: stats: add helper to get status string
+ - MEDIUM: contrib/prometheus-exporter: add listen stats
+ - BUG/MINOR: dns: add test on result getting value from buffer into ring.
+ - BUG/MINOR: dns: dns_connect_server must return -1 unsupported nameserver's type
+ - BUG/MINOR: dns: missing test writing in output channel in session handler
+ - BUG/MINOR: dns: fix ring attach control on dns_session_new
+ - BUG/MEDIUM: dns: fix multiple double close on fd in dns.c
+ - BUG/MAJOR: connection: prevent double free if conn selected for removal
+ - BUG/MINOR: session: atomically increment the tracked sessions counter
+ - REGTESTS: fix http_reuse_conn_hash proxy test
+ - BUG/MINOR: backend: do not call smp_make_safe for sni conn hash
+ - MINOR: connection: remove pointers for prehash in conn_hash_params
+ - BUG/MINOR: checks: properly handle wrapping time in __health_adjust()
+ - BUG/MEDIUM: checks: don't needlessly take the server lock in health_adjust()
+ - DEBUG: thread: add 5 extra lock labels for statistics and debugging
+ - OPTIM: server: switch the actconn list to an mt-list
+ - Revert "MINOR: threads: change lock_t to an unsigned int"
+ - MINOR: lb/api: let callers of take_conn/drop_conn tell if they have the lock
+ - OPTIM: lb-first: do not take the server lock on take_conn/drop_conn
+ - OPTIM: lb-leastconn: do not take the server lock on take_conn/drop_conn
+ - OPTIM: lb-leastconn: do not unlink the server if it did not change
+ - MINOR: tasks: add DEBUG_TASK to report caller info in a task
+ - MINOR: tasks/debug: add some extra controls of use-after-free in DEBUG_TASK
+ - BUG/MINOR: sample: Always consider zero size string samples as unsafe
+ - MINOR: cli: add missing agent commands for set server
+ - BUILD/MEDIUM: da Adding pcre2 support.
+ - BUILD: ssl: introduce fine guard for OpenSSL specific SCTL functions
+ - REGTESTS: reorder reuse conn proxy protocol test
+ - DOC: explain the relation between pool-low-conn and tune.idle-pool.shared
+ - MINOR: tasks: refine the default run queue depth
+ - MINOR: listener: refine the default MAX_ACCEPT from 64 to 4
+ - MINOR: mux_h2: do not try to remove front conn from idle trees
+ - REGTESTS: workaround for a crash with recent libressl on http-reuse sni
+ - BUG/MEDIUM: lists: Avoid an infinite loop in MT_LIST_TRY_ADDQ().
+ - MINOR: connection: allocate dynamically hash node for backend conns
+ - DOC: DeviceAtlas documentation typo fix.
+ - BUG/MEDIUM: spoe: Resolve the sink if a SPOE logs in a ring buffer
+ - BUG/MINOR: http-rules: Always replace the response status on a return action
+ - BUG/MINOR: server: Init params before parsing a new server-state line
+ - BUG/MINOR: server: Be sure to cut the last parsed field of a server-state line
+ - MEDIUM: server: Don't introduce a new server-state file version
+ - DOC: contrib/prometheus-exporter: remove htx reference
+ - REGTESTS: contrib/prometheus-exporter: test NaN values
+ - REGTESTS: contrib/prometheus-exporter: test well known labels
+ - CI: github actions: switch to stable LibreSSL release
+ - BUG/MINOR: server: Fix test on number of fields allowed in a server-state line
+ - MINOR: dynbuf: make the buffer wait queue per thread
+ - MINOR: dynbuf: use regular lists instead of mt_lists for buffer_wait
+ - MINOR: dynbuf: pass offer_buffers() the number of buffers instead of a threshold
+ - MINOR: sched: have one runqueue ticks counter per thread
+
+2021/02/13 : 2.4-dev8
+ - BUILD: ssl: fix typo in HAVE_SSL_CTX_ADD_SERVER_CUSTOM_EXT macro
+ - BUILD: ssl: guard SSL_CTX_add_server_custom_ext with special macro
+ - BUG/MINOR: mux-h1: Don't emit extra CRLF for empty chunked messages
+ - MINOR: contrib/prometheus-exporter: use stats desc when possible followup
+ - MEDIUM: contrib/prometheus-exporter: export base stick table stats
+ - CLEANUP: assorted typo fixes in the code and comments
+ - CLEANUP: check: fix some typo in comments
+ - CLEANUP: tools: typo in `strl2irc` mention
+ - BUILD: ssl: guard SSL_CTX_set_msg_callback with SSL_CTRL_SET_MSG_CALLBACK macro
+ - MEDIUM: ssl: add a rwlock for SSL server session cache
+ - BUG/MINOR: intops: fix mul32hi()'s off-by-one
+ - BUG/MINOR: freq_ctr: fix a wrong delay calculation in next_event_delay()
+ - MINOR: stick-tables/counters: add http_fail_cnt and http_fail_rate data types
+ - MINOR: ssl: add SSL_SERVER_LOCK label in threads.h
+ - BUG/MINOR: mux-h1: Don't increment HTTP error counter for 408/500/501 errors
+ - BUG/MINOR: http-ana: Don't increment HTTP error counter on internal errors
+ - BUG/MEDIUM: mux-h1: Always set CS_FL_EOI for response in MSG_DONE state
+ - BUG/MINOR: mux-h1: Fix data skipping for bodyless responses
+ - BUG/MINOR: mux-h1: Don't blindly skip EOT block for non-chunked messages
+ - BUG/MEDIUM: mux-h2: Add EOT block when EOM flag is set on an empty HTX message
+ - MINOR: mux-h1: Be sure EOM flag is set when processing end of outgoing message
+ - REGTESTS: Add a script to test payload skipping for bodyless HTTP responses
+ - BUG/MINOR: server: re-align state file fields number
+ - CLEANUP: muxes: Remove useless calls to b_realign_if_empty()
+ - BUG/MINOR: tools: Fix a memory leak on error path in parse_dotted_uints()
+ - CLEANUP: remove unused variable assigned found by Coverity
+ - CLEANUP: queue: Remove useless tests on p or pp in pendconn_process_next_strm()
+ - BUG/MINOR: backend: hold correctly lock when killing idle conn
+ - MEDIUM: connection: protect idle conn lists with locks
+ - MEDIUM: connection: replace idle conn lists by eb trees
+ - MINOR: backend: search conn in idle/safe trees after available
+ - MINOR: backend: search conn in idle tree after safe on always reuse
+ - MINOR: connection: prepare hash calcul for server conns
+ - MINOR: connection: use the srv pointer for the srv conn hash
+ - MINOR: backend: compare conn hash for session conn reuse
+ - MINOR: connection: use sni as parameter for srv conn hash
+ - MINOR: reg-tests: test http-reuse with sni
+ - MINOR: backend: rewrite alloc of stream target address
+ - MINOR: connection: use dst addr as parameter for srv conn hash
+ - MINOR: reg-test: test http-reuse with specific dst addr
+ - MINOR: backend: rewrite alloc of connection src address
+ - MINOR: connection: use src addr as parameter for srv conn hash
+ - MINOR: connection: use proxy protocol as parameter for srv conn hash
+ - MINOR: reg-tests: test http-reuse with proxy protocol
+ - MINOR: doc: update http reuse for new eligilible connections
+ - BUG/MINOR: backend: fix compilation without ssl
+ - REGTESTS: adjust http_reuse_conn_hash requirements
+ - REGTESTS: deactivate a failed test on CI in http_reuse_conn_hash
+ - REGTESTS: fix sni used in http_reuse_conn_hash for libressl 3.3.0
+ - CI: cirrus: update FreeBSD image to 12.2
+ - MEDIUM: cli: add check-addr command
+ - MEDIUM: cli: add agent-port command
+ - MEDIUM: server: add server-states version 2
+ - MEDIUM: server: support {check,agent}_addr, agent_port in server state
+ - MINOR: server: enhance error precision when applying server state
+ - BUG/MINOR: server: Fix server-state-file-name directive
+ - CLEANUP: deinit: release global and per-proxy server-state variables on deinit
+ - BUG/MEDIUM: config: don't pick unset values from last defaults section
+ - BUG/MINOR: stats: revert the change on ST_CONVDONE
+ - BUG/MINOR: cfgparse: do not mention "addr:port" as supported on proxy lines
+ - BUG/MINOR: http-htx: defpx must be a const in proxy_dup_default_conf_errors()
+ - BUG/MINOR: tcpheck: the source list must be a const in dup_tcpcheck_var()
+ - BUILD: proxy: add missing compression-t.h to proxy-t.h
+ - REORG: move init_default_instance() to proxy.c and pass it the defproxy pointer
+ - REORG: proxy: centralize the proxy allocation code into alloc_new_proxy()
+ - MEDIUM: proxy: only take defaults when a default proxy is passed.
+ - MINOR: proxy: move the defproxy freeing code to proxy.c
+ - MINOR: proxy: always properly reset the just freed default instance pointers
+ - BUG/MINOR: extcheck: proxy_parse_extcheck() must take a const for the defproxy
+ - BUG/MINOR: tcpcheck: proxy_parse_*check*() must take a const for the defproxy
+ - BUG/MINOR: server: parse_server() must take a const for the defproxy
+ - MINOR: cfgparse: move defproxy to cfgparse-listen as a static
+ - MINOR: proxy: add a new capability PR_CAP_DEF
+ - MINOR: cfgparse: check PR_CAP_DEF instead of comparing poiner against defproxy
+ - MINOR: cfgparse: use a pointer to the current default proxy
+ - MINOR: proxy: also store the name for a defaults section
+ - MINOR: proxy: support storing defaults sections into their own tree
+ - MEDIUM: proxy: store the default proxies in a tree by name
+ - MEDIUM: cfgparse: allow a proxy to designate the defaults section to use
+ - MINOR: http: add baseq sample fetch
+ - CLEANUP: tcpcheck: Remove a useless test on port variable
+ - BUG/MINOR: server: Don't call fopen() with server-state filepath set to NULL
+ - CLEANUP: server: Remove useless "filepath" variable in apply_server_state()
+ - MINOR: peers/cli: do not dump the peers dictionaries by default on "show peers"
+ - MINOR: cfgparse: implement a simple if/elif/else/endif macro block handler
+ - DOC: tune: explain the origin of block size for ssl.cachesize
+ - MINOR: tcp: add support for defer-accept on FreeBSD.
+ - MINOR: ring: adds new ring_init function.
+ - CLEANUP: channel: fix comment in ci_putblk.
+ - BUG/MINOR: dns: add missing sent counter and parent id to dns counters.
+ - BUG/MINOR: resolvers: fix attribute packed struct for dns
+ - MINOR: resolvers: renames some resolvers internal types and removes dns prefix
+ - MINOR: resolvers: renames type dns_resolvers to resolvers.
+ - MINOR: resolvers: renames some resolvers specific types to not use dns prefix
+ - MINOR: resolvers: renames some dns prefixed types using resolv prefix.
+ - MINOR: resolvers: renames resolvers DNS_RESP_* errcodes RSLV_RESP_*
+ - MINOR: resolvers: renames resolvers DNS_UPD_* returncodes to RSLV_UPD_*
+ - MINOR: resolvers: rework prototype suffixes to split resolving and dns.
+ - MEDIUM: resolvers: move resolvers section parsing from cfgparse.c to dns.c
+ - MINOR: resolvers: replace nameserver's resolver ref by generic parent pointer
+ - MINOR: resolvers: rework dns stats prototype because specific to resolvers
+ - MEDIUM: resolvers: split resolving and dns message exchange layers.
+ - MEDIUM: resolvers/dns: split dns.c into dns.c and resolvers.c
+ - MEDIUM: dns: adds code to support pipelined DNS requests over TCP.
+ - MEDIUM: resolvers: add supports of TCP nameservers in resolvers.
+
+2021/02/05 : 2.4-dev7
+ - BUG/MINOR: stats: Continue to fill frontend stats on unimplemented metric
+ - BUILD: ssl: guard Client Hello callbacks with HAVE_SSL_CLIENT_HELLO_CB macro instead of openssl version
+ - BUG/MINOR: stats: Init the metric variable when frontend stats are filled
+ - MINOR: contrib/prometheus-exporter: better output of Not-a-Number
+ - CLEANUP: stats: improve field selection for frontend http fields
+ - CLEANUP: assorted typo fixes in the code and comments
+ - DOC: Improve documentation of the various hdr() fetches
+ - MEDIUM: stats: allow to select one field in `stats_fill_be_stats`
+ - MINOR: contrib/prometheus-exporter: use fill_be_stats for backend dump
+ - MEDIUM: stats: allow to select one field in `stats_fill_sv_stats`
+ - MINOR: contrib/prometheus-exporter: use fill_sv_stats for server dump
+ - MINOR: abort() on my_unreachable() when DEBUG_USE_ABORT is set.
+ - BUG/MEDIUM: filters/htx: Fix data forwarding when payload length is unknown
+ - BUG/MINOR: config: fix leak on proxy.conn_src.bind_hdr_name
+ - MINOR: reg-tests: add http-reuse test
+ - CLEANUP: srv: fix comment for pool-max-conn
+ - CLEANUP: backend: remove an obsolete comment on conn_backend_get
+ - REORG: backend: simplify conn_backend_get
+ - MINOR: ssl: Server ssl context prepare function refactoring
+ - MINOR: ssl: Certificate chain loading refactorization
+ - MEDIUM: ssl: Load client certificates in a ckch for backend servers
+ - MEDIUM: ssl: Enable backend certificate hot update
+ - MINOR: ssl: Remove client_crt member of the server's ssl context
+ - CLEANUP: ssl/cli: rework free in cli_io_handler_commit_cert()
+ - CLEANUP: ssl: remove SSL_CTX function parameter
+ - CLEANUP: ssl: make load_srv_{ckchs,cert} match their bind counterpart
+ - BUILD: Include stdlib.h in compiler.h if DEBUG_USE_ABORT is set
+ - CI: Fix DEBUG_STRICT definition for Coverity
+ - BUG/MINOR: stats: Remove a break preventing ST_F_QCUR to be set for servers
+ - BUG/MINOR: stats: Add a break after filling ST_F_MODE field for servers
+ - CLEANUP: ssl: remove dead code in ckch_inst_new_load_srv_store()
+ - BUG/MINOR: ssl: init tmp chunk correctly in ssl_sock_load_sctl_from_file()
+ - BUG/MEDIUM: session: only retrieve ready idle conn from session
+ - BUG/MEDIUM: backend: never reuse a connection for tcp mode
+ - REGTESTS: set_ssl_server_cert.vtc: remove the abort command
+ - REGTESTS: set_ssl_server_cert.vtc: check the Sha1 Fingerprint
+ - REGTESTS: set_ssl_server_cert.vtc: check the sha1 from the server
+ - MEDIUM: stream-int: Take care of EOS if the SI wake callback function
+ - MINOR: mux-h1: Try to wake up data layer first before calling its wake callback
+ - MINOR: mux-h1: Wake up H1C after its creation if input buffer is not empty
+ - MEDIUM: mux-h1: Add ST_READY state for the H1 connections
+ - MINOR: stream: Add a function to validate TCP to H1 upgrades
+ - MEDIUM: http-ana: Do nothing in wait-for-request analyzer if not htx
+ - BUG/MEDIUM: stream: Don't immediatly ack the TCP to H1 upgrades
+ - BUG/MAJOR: mux-h1: Properly handle TCP to H1 upgrades
+ - MINOR: htx/http-ana: Save info about Upgrade option in the Connection header
+ - MEDIUM: http-ana: Refuse invalid 101-switching-protocols responses
+ - BUG/MINOR: h2/mux-h2: Reject 101 responses with a PROTOCOL_ERROR h2s error
+ - MINOR: mux-h1/mux-fcgi: Don't set TUNNEL mode if payload length is unknown
+ - MINOR: mux-h1: Split H1C_F_WAIT_OPPOSITE flag to separate input/output sides
+ - MINOR: mux-h2: Add 2 flags to help to properly handle tunnel mode
+ - MEDIUM: mux-h2: Block client data on server side waiting tunnel establishment
+ - MEDIUM: mux-h2: Close streams when processing data for an aborted tunnel
+ - MEDIUM: mux-h1: Properly handle tunnel establishments and aborts
+ - BUG/MAJOR: mux-h1/mux-h2/htx: Fix HTTP tunnel management at the mux level
+ - MINOR: htx: Rename HTX_FL_EOI flag into HTX_FL_EOM
+ - REGTESTS: Don't run http_msg_full_on_eom script on the 2.4 anymore
+ - MINOR: htx: Add a function to know if a block is the only one in a message
+ - MAJOR: htx: Remove the EOM block type and use HTX_FL_EOM instead
+ - MINOR: mux-h1: Add a flag on H1 streams with a response known to be bodyless
+ - MEDIUM: mux-h1: Don't emit any payload for bodyless responses
+ - MINOR: mux-h1: Don't emit C-L and T-E headers for 204 and 1xx responses
+ - MINOR: mux-h1: Don't add Connection close/keep-alive header for 1xx messages
+ - MINOR: h2/mux-h2: Add flags to notify the response is known to have no body
+ - MEDIUM: mux-h2: Don't emit DATA frame for bodyless responses
+ - MEDIUM: http-ana: Deal with L7 retries in HTTP analysers
+ - MINOR: h1: reject websocket handshake if missing key
+ - MEDIUM: h1: generate WebSocket key on response if needed
+ - MINOR: mux_h2: define H2_SF_EXT_CONNECT_SENT stream flag
+ - MEDIUM: h2: parse Extended CONNECT reponse to htx
+ - MEDIUM: mux_h2: generate Extended CONNECT from htx upgrade
+ - MEDIUM: h1: add a WebSocket key on handshake if needed
+ - MEDIUM: mux_h2: generate Extended CONNECT response
+ - MEDIUM: h2: parse Extended CONNECT request to htx
+ - MEDIUM: h2: send connect protocol h2 settings
+ - MINOR: vtc: add test for h1/h2 protocol upgrade translation
+ - MINOR: vtc: add websocket test
+ - REGTESTS: Fix required versions for several scripts
+ - REGTEST: Don't use the websocket to validate http-check
+ - MINOR: mux-h1/trace: add traces at level ERROR for all kind of errors
+ - MINOR: mux-fcgi/trace: add traces at level ERROR for all kind of errors
+ - MINOR: h1: Raise the chunk size limit up to (2^52 - 1)
+ - BUG/MEDIUM: listener: do not accept connections faster than we can process them
+ - REGTESTS: set_ssl_server_cert.vtc: set as broken
+ - Revert "BUG/MEDIUM: listener: do not accept connections faster than we can process them"
+ - BUG/MINOR: backend: check available list allocation for reuse
+ - CI: Fix the coverity builds
+ - DOC: management: fix "show resolvers" alphabetical ordering
+ - MINOR: tools: add print_time_short() to print a condensed duration value
+ - MINOR: activity: make profiling more manageable
+ - MINOR: activity: declare a new structure to collect per-function activity
+ - MEDIUM: tasks/activity: collect per-task statistics when profiling is enabled
+ - MINOR: activity: also report collected tasks stats in "show profiling"
+ - MINOR: activity: flush scheduler stats on "set profiling tasks on"
+ - MINOR: activity: add a new "show tasks" command to list currently active tasks
+ - MINOR: listener: export accept_queue_process
+ - MINOR: session: export session_expire_embryonic()
+ - MINOR: muxes: export the timeout and shutr task handlers
+ - MINOR: checks: export a few functions that appear often in trace dumps
+ - MINOR: peers: export process_peer_sync() to improve traces
+ - MINOR: stick-tables: export process_table_expire()
+ - MINOR: mux-h1: Remove first useless test on count in h1_process_output()
+ - BUG/MINOR: stick-table: Always call smp_fetch_src() with a valid arg list
+ - MINOR: http-fetch: Don't check if argument list is set in sample fetches
+ - MINOR: http-conv: Don't check if argument list is set in sample converters
+ - MINOR: sample: Don't check if argument list is set in sample fetches
+ - MINOR: ssl-sample: Don't check if argument list is set in sample fetches
+ - MINOR: mux-h2: Don't tests the start-line when sending HEADERS frame
+ - MINOR: mux-h2: Slightly improve request HEADERS frames sending
+ - MINOR: contrib/prometheus-exporter: declare states for objects
+ - MAJOR: contrib/prometheus-exporter: move ftd/bkd/srv states to labels
+ - MEDIUM: contrib/prometheus-exporter: Use dynamic labels instead of static ones
+ - MINOR: listener: export manage_global_listener_queue()
+ - BUG/MINOR: activity: take care of late wakeups in "show tasks"
+ - REGTESTS: set_ssl_server_cert.vtc: remove SSL caching and set as working
+ - REGTESTS: set_ssl_server_cert: cleanup the SSL caching option
+ - MINOR: checks: Add function to get the result code corresponding to a status
+ - MAJOR: contrib/prometheus-exporter: move health check status to labels
+ - MINOR: contrib/prometheus-exporter: improve service status description field
+ - MINOR: stats: improve pending connections description
+ - MINOR: stats: improve max stats descriptions
+ - MINOR: contrib/prometheus-exporter: use stats desc when possible
+ - MINOR: contrib/prometheus-exporter: add uweight field
+ - MINOR: contrib/prometheus-exporter: add recv logs_logs_total field
+ - CLEANUP: contrib/prometheus-exporter: remove unused includes
+ - CLEANUP: contrib/prometheus-exporter: align and reorder fields
+ - CLEANUP: contrib/prometheus-exporter: remove description in README
+ - DOC: contrib/prometheus-exporter: Add missing metrics in README
+ - BUG/MINOR: contrib/prometheus-exporter: Add missing label for ST_F_HRSP_1XX
+ - BUG/MINOR: contrib/prometheus-exporter: Restart labels dump at the right pos
+ - BUG/MEDIUM: ssl/cli: abort ssl cert is freeing the old store
+ - BUG/MEDIUM: ssl: check a connection's status before computing a handshake
+ - BUG/MINOR: mux_h2: fix incorrect stat titles
+ - MINOR: ssl/cli: flush the server session cache upon 'commit ssl cert'
+ - BUG/MINOR: cli: fix set server addr/port coherency with health checks
+ - MINOR: server: Don't set the check port during the update from a state file
+ - MINOR: dns: Don't set the check port during a server dns resolution
+ - MEDIUM: check: remove checkport checkaddr flag
+ - MEDIUM: server: adding support for check_port in server state
+ - BUG/MINOR: check: consitent way to set agentaddr
+ - MEDIUM: check: align agentaddr and agentport behaviour
+ - DOC: server: Add missing params in comment of the server state line parsing
+ - BUG/MINOR: xxhash: make sure armv6 uses memcpy()
+ - REGTESTS: mark http-check-send.vtc as 2.4-only
+ - REGTESTS: mark sample_fetches/hashes.vtc as 2.4-only
+ - BUG/MINOR: ssl: do not try to use early data if not configured
+ - REGTESTS: unbreak http-check-send.vtc
+ - MINOR: cli/show_fd: report local and report ports when known
+ - BUILD: Makefile: move REGTESTST_TYPE default setting
+ - BUG/MEDIUM: mux-h2: handle remaining read0 cases
+ - CLEANUP: http-htx: Set buffer area to NULL instead of malloc(0)
+ - BUG/MINOR: sock: Unclosed fd in case of connection allocation failure
+ - BUG/MEDIUM: mux-h2: do not quit the demux loop before setting END_REACHED
+
+2021/01/22 : 2.4-dev6
+ - MINOR: converter: adding support for url_enc
+ - BUILD: SSL: guard TLS13 ciphersuites with HAVE_SSL_CTX_SET_CIPHERSUITES
+ - BUILD: ssl: guard EVP_PKEY_get_default_digest_nid with ASN1_PKEY_CTRL_DEFAULT_MD_NID
+ - BUILD: ssl: guard openssl specific with SSL_READ_EARLY_DATA_SUCCESS
+ - BUILD: Makefile: exclude broken tests by default
+ - CLEANUP: cfgparse: replace "realloc" with "my_realloc2" to fix to memory leak on error
+ - BUG/MINOR: hlua: Fix memory leak in hlua_alloc
+ - MINOR: contrib/prometheus-exporter: export build_info
+ - DOC: fix some spelling issues over multiple files
+ - CLEANUP: Fix spelling errors in comments
+ - SCRIPTS: announce-release: fix typo in help message
+ - CI: github: add a few more words to the codespell ignore list
+ - DOC: Add maintainers for the Prometheus exporter
+ - BUG/MINOR: sample: fix concat() converter's corruption with non-string variables
+ - BUG/MINOR: server: Memory leak of proxy.used_server_addr during deinit
+ - CLEANUP: sample: remove uneeded check in json validation
+ - MINOR: reg-tests: add a way to add service dependency
+ - BUG/MINOR: sample: check alloc_trash_chunk return value in concat()
+ - BUG/MINOR: reg-tests: fix service dependency script
+ - MINOR: reg-tests: add base prometheus test
+ - Revert "BUG/MINOR: dns: SRV records ignores duplicated AR records"
+ - BUG/MINOR: sample: Memory leak of sample_expr structure in case of error
+ - BUG/MINOR: check: Don't perform any check on servers defined in a frontend
+ - BUG/MINOR: init: enforce strict-limits when using master-worker
+ - MINOR: contrib/prometheus-exporter: avoid connection close header
+ - MINOR: contrib/prometheus-exporter: use fill_info for process dump
+ - BUG/MINOR: init: Use a dynamic buffer to set HAPROXY_CFGFILES env variable
+ - MINOR: config: Add failifnotcap() to emit an alert on proxy capabilities
+ - MINOR: server: Forbid server definitions in frontend sections
+ - BUG/MINOR: threads: Fixes the number of possible cpus report for Mac.
+ - CLEANUP: pattern: rename pat_ref_commit() to pat_ref_commit_elt()
+ - MINOR: pattern: add the missing generation ID manipulation functions
+ - MINOR: peers: Add traces for peer control messages.
+ - BUG/MINOR: dns: SRV records ignores duplicated AR records (v2)
+ - BUILD: peers: fix build warning about unused variable
+ - BUG/MEDIUM: stats: add missing INF_BUILD_INFO definition
+ - MINOR: cache: Do not store responses with an unknown encoding
+ - BUG/MINOR: peers: Possible appctx pointer dereference.
+ - MINOR: build: discard echoing in help target
+ - MINOR: cache: Remove the `hash` part of the accept-encoding secondary key
+ - CLEANUP: cache: Use proper data types in secondary_key_cmp()
+ - CLEANUP: Rename accept_encoding_hash_cmp to accept_encoding_bitmap_cmp
+ - BUG/MINOR: peers: Wrong "new_conn" value for "show peers" CLI command.
+ - MINOR: contrib: Make the wireshark peers dissector compile for more distribs.
+ - BUG/MINOR: mux_h2: missing space between "st" and ".flg" in the "show fd" helper
+ - CLEANUP: tools: make resolve_sym_name() take a const pointer
+ - CLEANUP: cli: make "show fd" use a const connection to access other fields
+ - MINOR: cli: make "show fd" also report the xprt and xprt_ctx
+ - MINOR: xprt: add a new show_fd() helper to complete some "show fd" dumps.
+ - MINOR: ssl: provide a "show fd" helper to report important SSL information
+ - MINOR: xprt/mux: export all *_io_cb functions so that "show fd" resolves them
+ - MINOR: mux-h2: make the "show fd" helper also decode the h2s subscriber when known
+ - MINOR: mux-h1: make the "show fd" helper also decode the h1s subscriber when known
+ - MINOR: mux-fcgi: make the "show fd" helper also decode the fstrm subscriber when known
+ - CI: Pin VTest to a known good commit
+ - MINOR: cli: give the show_fd helpers the ability to report a suspicious entry
+ - MINOR: cli/show_fd: report some easily detectable suspicious states
+ - MINOR: ssl/show_fd: report some FDs as suspicious when possible
+ - MINOR: mux-h2/show_fd: report as suspicious an entry with too many calls
+ - MINOR: mux-h1/show_fd: report as suspicious an entry with too many calls
+ - BUG/MINOR: mworker: define _GNU_SOURCE for strsignal()
+ - BUG/MEDIUM: tcpcheck: Don't destroy connection in the wake callback context
+ - BUG/MEDIUM: mux-h2: Xfer rxbuf to the upper layer when creating a front stream
+ - MINOR: http: Add HTTP 501-not-implemented error message
+ - MINOR: muxes: Add exit status for errors about not implemented features
+ - MINOR: mux-h1: Be prepared to return 501-not-implemented error during parsing
+ - MEDIUM: mux-h1: Return a 501-not-implemented for upgrade requests with a body
+ - DOC: Remove space after comma in converter signature
+ - DOC: Rename '<var name>' to '<var>' in converter signature
+ - MINOR: stats: duplicate 3 fields in bytes in info
+ - MINOR: stats: add new start time field
+ - MINOR: contrib/prometheus-exporter: merge info description from stats
+ - MEDIUM: stats: allow to select one field in `stats_fill_fe_stats`
+ - MINOR: contrib/prometheus-exporter: use fill_fe_stats for frontend dump
+ - MINOR: contrib/prometheus-exporter: Don't needlessly set empty label for metrics
+ - MINOR: contrib/prometheus-exporter: Split the PROMEX_FL_STATS_METRIC flag
+ - MINOR: contrib/prometheus-exporter: Add promex_metric struct defining a metric
+ - MEDIUM: contrib/prometheus-exporter: Rework matrices defining Promex metrics
+ - BUG/MINOR: stream: Don't update counters when TCP to H2 upgrades are performed
+ - BUG/MEDIUM: mux-h2: fix read0 handling on partial frames
+ - MINOR: debug: always export the my_backtrace function
+ - MINOR: debug: extract the backtrace dumping code to its own function
+ - MINOR: debug: create ha_backtrace_to_stderr() to dump an instant backtrace
+ - MEDIUM: debug: now always print a backtrace on CRASH_NOW() and friends
+ - MINOR: debug: let ha_dump_backtrace() dump a bit further for some callers
+ - BUILD: debug: fix build warning by consuming the write() result
+ - MINOR: lua: remove unused variable
+ - BUILD/MINOR: lua: define _GNU_SOURCE for LLONG_MAX
+
+2021/01/06 : 2.4-dev5
+ - BUG/MEDIUM: mux_h2: Add missing braces in h2_snd_buf()around trace+wakeup
+ - BUILD: hpack: hpack-tbl-t.h uses VAR_ARRAY but does not include compiler.h
+ - MINOR: time: increase the minimum wakeup interval to 60s
+ - MINOR: check: do not ignore a connection header for http-check send
+ - REGTESTS: complete http-check test
+ - CI: travis-ci: drop coverity scan builds
+ - MINOR: atomic: don't use ; to separate instruction on aarch64.
+ - IMPORT: xxhash: update to v0.8.0 that introduces stable XXH3 variant
+ - MEDIUM: xxhash: use the XXH3 functions to generate 64-bit hashes
+ - MEDIUM: xxhash: use the XXH_INLINE_ALL macro to inline all functions
+ - CLEANUP: xxhash: remove the unused src/xxhash.c
+ - MINOR: sample: add the xxh3 converter
+ - REGTESTS: add tests for the xxh3 converter
+ - MINOR: protocol: Create proto_quic QUIC protocol layer.
+ - MINOR: connection: Attach a "quic_conn" struct to "connection" struct.
+ - MINOR: quic: Redefine control layer callbacks which are QUIC specific.
+ - MINOR: ssl_sock: Initialize BIO and SSL objects outside of ssl_sock_init()
+ - MINOR: connection: Add a new xprt to connection.
+ - MINOR: ssl: Export definitions required by QUIC.
+ - MINOR: cfgparse: Do not modify the QUIC xprt when parsing "ssl".
+ - MINOR: tools: Add support for QUIC addresses parsing.
+ - MINOR: quic: Add definitions for QUIC protocol.
+ - MINOR: quic: Import C source code files for QUIC protocol.
+ - MINOR: listener: Add QUIC info to listeners and receivers.
+ - MINOR: server: Add QUIC definitions to servers.
+ - MINOR: ssl: SSL CTX initialization modifications for QUIC.
+ - MINOR: ssl: QUIC transport parameters parsing.
+ - MINOR: quic: QUIC socket management finalization.
+ - MINOR: cfgparse: QUIC default server transport parameters init.
+ - MINOR: quic: Enable the compilation of QUIC modules.
+ - MAJOR: quic: Make usage of ebtrees to store QUIC ACK ranges.
+ - MINOR: quic: Attempt to make trace more readable
+ - MINOR: quic: Make usage of the congestion control window.
+ - MINOR: quic: Flag RX packet as ack-eliciting from the generic parser.
+ - MINOR: quic: Code reordering to help in reviewing/modifying.
+ - MINOR: quic: Add traces to congestion avoidance NewReno callback.
+ - MINOR: quic: Display the SSL alert in ->ssl_send_alert() callback.
+ - MINOR: quic: Update the initial salt to that of draft-29.
+ - MINOR: quic: Add traces for in flght ack-eliciting packet counter.
+ - MINOR: quic: make a packet build fails when qc_build_frm() fails.
+ - MINOR: quic: Add traces for quic_packet_encrypt().
+ - MINOR: cache: Refactoring of secondary_key building functions
+ - MINOR: cache: Avoid storing responses whose secondary key was not correctly calculated
+ - BUG/MINOR: cache: Manage multiple headers in accept-encoding normalization
+ - MINOR: cache: Add specific secondary key comparison mechanism
+ - MINOR: http: Add helper functions to trim spaces and tabs
+ - MEDIUM: cache: Manage a subset of encodings in accept-encoding normalizer
+ - REGTESTS: cache: Simplify vary.vtc file
+ - REGTESTS: cache: Add a specific test for the accept-encoding normalizer
+ - MINOR: cache: Remove redundant test in http_action_req_cache_use
+ - MINOR: cache: Replace the "process-vary" option's expected values
+ - CI: GitHub Actions: enable daily Coverity scan
+ - BUG/MEDIUM: cache: Fix hash collision in `accept-encoding` handling for `Vary`
+ - MEDIUM: stick-tables: Add srvkey option to stick-table
+ - REGTESTS: add test for stickiness using "srvkey addr"
+ - BUILD: Makefile: disable -Warray-bounds until it's fixed in gcc 11
+ - BUG/MINOR: sink: Return an allocation failure in __sink_new if strdup() fails
+ - BUG/MINOR: lua: Fix memory leak error cases in hlua_config_prepend_path
+ - MINOR: lua: Use consistent error message 'memory allocation failed'
+ - CLEANUP: Compare the return value of `XXXcmp()` functions with zero
+ - CLEANUP: Apply the coccinelle patch for `XXXcmp()` on include/
+ - CLEANUP: Apply the coccinelle patch for `XXXcmp()` on contrib/
+ - MINOR: qpack: Add static header table definitions for QPACK.
+ - CLEANUP: qpack: Wrong comment about the draft for QPACK static header table.
+ - CLEANUP: quic: Remove useless QUIC event trace definitions.
+ - BUG/MINOR: quic: Possible CRYPTO frame building errors.
+ - MINOR: quic: Pass quic_conn struct to frame parsers.
+ - BUG/MINOR: quic: Wrong STREAM frames parsing.
+ - MINOR: quic: Drop packets with STREAM frames with wrong direction.
+ - CLEANUP: ssl: Remove useless loop in tlskeys_list_get_next()
+ - CLEANUP: ssl: Remove useless local variable in tlskeys_list_get_next()
+ - MINOR: ssl: make tlskeys_list_get_next() take a list element
+ - Revert "BUILD: Makefile: disable -Warray-bounds until it's fixed in gcc 11"
+ - BUG/MINOR: cfgparse: Fail if the strdup() for `rule->be.name` for `use_backend` fails
+ - CLEANUP: mworker: remove duplicate pointer tests in cfg_parse_program()
+ - CLEANUP: Reduce scope of `header_name` in http_action_store_cache()
+ - CLEANUP: Reduce scope of `hdr_age` in http_action_store_cache()
+ - CLEANUP: spoe: fix typo on `var_check_arg` comment
+ - BUG/MINOR: tcpcheck: Report a L7OK if the last evaluated rule is a send rule
+ - CI: github actions: build several popular "contrib" tools
+ - DOC: Improve the message printed when running `make` w/o `TARGET`
+ - BUG/MEDIUM: server: srv_set_addr_desc() crashes when a server has no address
+ - REGTESTS: add unresolvable servers to srvkey-addr
+ - BUG/MINOR: stats: Make stat_l variable used to dump a stat line thread local
+ - BUG/MINOR: quic: NULL pointer dereferences when building post handshake frames.
+ - SCRIPTS: improve announce-release to support different tag and versions
+ - SCRIPTS: make announce release support preparing announces before tag exists
+ - CLEANUP: assorted typo fixes in the code and comments
+ - BUG/MINOR: srv: do not init address if backend is disabled
+ - BUG/MINOR: srv: do not cleanup idle conns if pool max is null
+ - CLEANUP: assorted typo fixes in the code and comments
+ - CLEANUP: few extra typo and fixes over last one ("ot" -> "to")
+
+2020/12/21 : 2.4-dev4
+ - BUG/MEDIUM: lb-leastconn: Reposition a server using the right eweight
+ - BUG/MEDIUM: mux-h1: Fix a deadlock when a 408 error is pending for a client
+ - BUG/MEDIUM: ssl/crt-list: bad behavior with "commit ssl cert"
+ - BUG/MAJOR: cache: Crash because of disabled entry not removed from the tree
+ - BUILD: SSL: fine guard for SSL_CTX_add_server_custom_ext call
+ - MEDIUM: cache: Add a secondary entry counter and insertion limitation
+ - MEDIUM: cache: Avoid going over duplicates lists too often
+ - MINOR: cache: Add a max-secondary-entries cache option
+ - CI: cirrus: drop CentOS 6 builds
+ - BUILD: Makefile: have "make clean" destroy .o/.a/.s in contrib subdirs as well
+ - MINOR: vars: replace static functions with global ones
+ - MINOR: opentracing: add ARGC_OT enum
+ - CONTRIB: opentracing: add the OpenTracing filter
+ - DOC: opentracing: add the OpenTracing filter section
+ - REGTESTS: make use of HAPROXY_ARGS and pass -dM by default
+ - BUG/MINOR: http: Establish a tunnel for all 2xx responses to a CONNECT
+ - BUG/MINOR: mux-h1: Don't set CS_FL_EOI too early for protocol upgrade requests
+ - BUG/MEDIUM: http-ana: Never for sending data in TUNNEL mode
+ - CLEANUP: mux-h2: Rename h2s_frt_make_resp_data() to be generic
+ - CLEANUP: mux-h2: Rename h2c_frt_handle_data() to be generic
+ - BUG/MEDIUM: mux-h1: Handle h1_process() failures on a pipelined request
+ - CLEANUP: debug: mark the RNG's seed as unsigned
+ - CONTRIB: halog: fix build issue caused by %L printf format
+ - CONTRIB: halog: mark the has_zero* functions unused
+ - CONTRIB: halog: fix signed/unsigned build warnings on counts and timestamps
+ - CONTRIB: debug: address "poll" utility build on non-linux platforms
+ - BUILD: plock: remove dead code that causes a warning in gcc 11
+ - BUILD: ssl: fine guard for SSL_CTX_get0_privatekey call
+ - BUG/MINOR: dns: SRV records ignores duplicated AR records
+ - DOC: fix "smp_size" vs "sample_size" in "log" directive arguments
+ - CLEANUP: assorted typo fixes in the code and comments
+ - DOC: assorted typo fixes in the documentation
+ - CI: codespell: whitelist "te" and "nd" words
+
+2020/12/11 : 2.4-dev3
+ - MINOR: log: Logging HTTP path only with %HPO
+ - BUG/MINOR: mux-h2/stats: make stream/connection proto errors more accurate
+ - MINOR: traces: add a new level "error" below the "user" level
+ - MINOR: mux-h2/trace: add traces at level ERROR for protocol errors
+ - BUG/MINOR: mux-h2/stats: not all GOAWAY frames are errors
+ - BUG/MINOR: lua: missing "\n" in error message
+ - BUG/MINOR: lua: lua-load doesn't check its parameters
+ - BUG/MINOR: lua: Post init register function are not executed beyond the first one
+ - BUG/MINOR: lua: Some lua init operation are processed unsafe
+ - MINOR: actions: Export actions lookup functions
+ - MINOR: actions: add a function returning a service pointer from its name
+ - MINOR: cli: add a function to look up a CLI service description
+ - BUG/MINOR: lua: warn when registering action, conv, sf, cli or applet multiple times
+ - MINOR: cache: Improve accept_encoding_normalizer
+ - MINOR: cache: Add entry to the tree as soon as possible
+ - BUG/MINOR: trace: Wrong displayed trace level
+ - BUG/MAJOR: ring: tcp forward on ring can break the reader counter.
+ - MINOR: lua: simplify hlua_alloc() to only rely on realloc()
+ - MEDIUM: lua-thread: use atomics for memory accounting
+ - MINOR: lua-thread: remove struct hlua from function hlua_prepend_path()
+ - MEDIUM: lua-thread: make hlua_post_init() no longer use the runtime execution function
+ - MINOR: lua-thread: hlua_ctx_renew() is never called with main gL lua state
+ - MINOR: lua-thread: Use NULL context for main lua state
+ - MINOR: lua-thread: Stop usage of struct hlua for the global lua state
+ - MINOR: lua-thread: Replace embedded struct hlua_function by a pointer
+ - MINOR: lua-thread: Split hlua_init() function in two parts
+ - MINOR: lua-thread: make hlua_ctx_init() get L from its caller
+ - MINOR: lua-thread: Split hlua_load function in two parts
+ - MINOR: lua-thread: Split hlua_post_init() function in two parts
+ - MINOR: lua-thread: Add the "thread" core variable
+ - MEDIUM: lua-thread: No longer use locked context in initialization parts
+ - MEDIUM: lua-thread: Apply lock only if the parent state is the main thread
+ - MINOR: lua-thread: Replace global gL var with an array of states
+ - MINOR: lua-thread: Replace "struct hlua_function" allocation by dedicated function
+ - MINOR: lua-thread: Replace state_from by state_id
+ - MINOR: lua-thread: Store each function reference and init reference in array
+ - MEDIUM: lua-thread: Add the lua-load-per-thread directive
+ - MINOR: lua-thread: Add verbosity in errors
+ - REGTESTS: add a test for the threaded Lua code
+ - BUILD/MINOR: haproxy DragonFlyBSD affinity build update.
+ - DOC/MINOR: Fix formatting in Management Guide
+ - MINOR: cache: Do not store stale entry
+ - MINOR: cache: Add extra "cache-control" value checks
+ - MEDIUM: cache: Remove cache entry in case of POST on the same resource
+ - MINOR: cache: Consider invalid Age values as stale
+ - BUG/MEDIUM: lua-thread: some parts must be initialized once
+ - BUG/MINOR: lua-thread: close all states on deinit
+ - BUG/MINOR: listener: use sockaddr_in6 for IPv6
+ - BUG/MINOR: mux-h1: Handle keep-alive timeout for idle frontend connections
+ - MINOR: session: Add the idle duration field into the session
+ - MINOR: mux-h1: Update session idle duration when data are received
+ - MINOR: mux-h1: Reset session dates and durations info when the CS is detached
+ - MINOR: logs: Use session idle duration when no stream is provided
+ - MINOR: stream: Always get idle duration from the session
+ - MINOR: stream: Don't retrieve anymore timing info from the mux csinfo
+ - MINOR: mux-h1: Don't provide anymore timing info using cs_info structure
+ - MINOR: muxes: Remove get_cs_info callback function now useless
+ - MINOR: stream: Pass an optional input buffer when a stream is created
+ - MINOR: mux-h1: Add a flag to disable reads to wait opposite side
+ - MEDIUM: mux-h1: Use a h1c flag to block reads when splicing is in-progress
+ - MINOR: mux-h1: Introduce H1C_F_IS_BACK flag on the H1 connection
+ - MINOR: mux-h1: Separate parsing and formatting errors at H1 stream level
+ - MINOR: mux-h1: Split front/back h1 stream creation in 2 functions
+ - MINOR: mux-h1: Add a rxbuf into the H1 stream
+ - MINOR: mux-h1: Don't set CS flags in internal parsing functions
+ - MINOR: mux-h1: Add embryonic and attached states on the H1 connection
+ - MINOR: mux-h1: rework the h1_timeout_task() function
+ - MINOR: mux-h1: Reset more H1C flags when a H1 stream is destroyed
+ - MINOR: mux-h1: Disable reads if an error was reported on the H1 stream
+ - MINOR: mux-h1: Rework how shutdowns are handled
+ - MINOR: mux-h1: Rework h1_refresh_timeout to be easier to read
+ - MINOR: mux-h1: Process next request for IDLE connection only
+ - MINOR: mux-h1: Add a idle expiration date on the H1 connection
+ - MINOR: stick-tables: Add functions to update some values of a tracked counter
+ - MINOR: session: Add functions to increase http values of tracked counters
+ - MINOR: mux: Add a ctl parameter to get the exit status of the multiplexers
+ - MINOR: logs: Get the multiplexer exist status when no stream is provided
+ - MINOR: mux-h1: Add functions to send HTTP errors from the mux
+ - MAJOR: mux-h1: Create the client stream as later as possible
+ - DOC: config: Add notes about errors emitted by H1 mux
+ - CLEANUP: mux-h1: Rename H1C_F_CS_* flags and reorder H1C flags
+ - MINOR: http-ana: Remove useless update of t_idle duration of the stream
+ - CLEANUP: htx: Remove HTX_FL_UPGRADE unsued flag
+ - MEDIUM: http-ana: Don't process partial or empty request anymore
+ - CLEANUP: http-ana: Remove TX_WAIT_NEXT_RQ unsued flag
+ - CLEANUP: connection: Remove CS_FL_READ_PARTIAL flag
+ - REGTESTS: Fix proxy_protocol_tlv_validation
+ - MINOR: http-ana: Properly set message flags from the start-line flags
+ - MINOR: h1-htx/http-ana: Set BODYLESS flag on message in TUNNEL state
+ - MINOR: protocol: add a ->set_port() helper to address families
+ - MINOR: listener: automatically set the port when creating listeners
+ - MINOR: listener: now use a generic add_listener() function
+ - MEDIUM: ssl: fatal error with bundle + openssl < 1.1.1
+ - BUG/MEDIUM: stream: Xfer the input buffer to a fully created stream
+ - BUG/MINOR: stream: Don't use input buffer after the ownership xfer
+ - MINOR: protocol: remove the redundant ->sock_domain field
+ - MINOR: protocol: export protocol definitions
+ - CLEANUP: protocol: group protocol struct members by usage
+ - MINOR: protocol: add a set of ctrl_init/ctrl_close methods for setup/teardown
+ - MINOR: connection: use the control layer's init/close
+ - MINOR: udp: export udp_suspend_receiver() and udp_resume_receiver()
+ - BUG/MAJOR: spoa/python: Fixing return None
+ - DOC: spoa/python: Fixing typo in IP related error messages
+ - DOC: spoa/python: Rephrasing memory related error messages
+ - DOC: spoa/python: Fixing typos in comments
+ - BUG/MINOR: spoa/python: Cleanup references for failed Module Addobject operations
+ - BUG/MINOR: spoa/python: Cleanup ipaddress objects if initialization fails
+ - BUG/MEDIUM: spoa/python: Fixing PyObject_Call positional arguments
+ - BUG/MEDIUM: spoa/python: Fixing references to None
+ - DOC: email change of the DeviceAtlas maintainer
+ - MINOR: cache: Dump secondary entries in "show cache"
+ - CLEANUP: connection: use fd_stop_both() instead of conn_stop_polling()
+ - MINOR: stream-int: don't touch polling anymore on shutdown
+ - MINOR: connection: implement cs_drain_and_close()
+ - MINOR: mux-pt: take care of CS_SHR_DRAIN in shutr()
+ - MINOR: checks: use cs_drain_and_close() instead of draining the connection
+ - MINOR: checks: don't call conn_cond_update_polling() anymore
+ - CLEANUP: connection: open-code conn_cond_update_polling() and update the comment
+ - CLEANUP: connection: remove the unused conn_{stop,cond_update}_polling()
+ - BUG/MINOR: http-check: Use right condition to consider HTX message as full
+ - BUG/MINOR: tcpcheck: Don't rearm the check timeout on each read
+ - MINOR: tcpcheck: Only wait for more payload data on HTTP expect rules
+ - BUG/MINOR: tools: make parse_time_err() more strict on the timer validity
+ - BUG/MINOR: tools: Reject size format not starting by a digit
+ - MINOR: action: define enum for timeout type of the set-timeout rule
+ - MINOR: stream: prepare the hot refresh of timeouts
+ - MEDIUM: stream: support a dynamic server timeout
+ - MEDIUM: stream: support a dynamic tunnel timeout
+ - MEDIUM: http_act: define set-timeout server/tunnel action
+ - MINOR: frontend: add client timeout sample fetch
+ - MINOR: backend: add timeout sample fetches
+ - MINOR: stream: add sample fetches
+ - MINOR: stream: add timeout sample fetches
+ - REGTESTS: add regtest for http-request set-timeout
+ - CLEANUP: remove the unused fd_stop_send() in conn_xprt_shutw{,_hard}()
+ - CLEANUP: connection: remove the unneeded fd_stop_{recv,send} on read0/shutw
+ - MINOR: connection: remove sock-specific code from conn_sock_send()
+ - REORG: connection: move the socket iocb (conn_fd_handler) to sock.c
+ - MINOR: protocol: add a ->drain() function at the connection control layer
+ - MINOR: connection: make conn_sock_drain() use the control layer's ->drain()
+ - MINOR: protocol: add a pair of check_events/ignore_events functions at the ctrl layer
+ - MEDIUM: connection: make use of the control layer check_events/ignore_events
+
+2020/12/01 : 2.4-dev2
+ - BUILD: Make DEBUG part of .build_opts
+ - BUILD: Show the value of DEBUG= in haproxy -vv
+ - CI: Set DEBUG=-DDEBUG_STRICT=1 in GitHub Actions
+ - MINOR: stream: Add level 7 retries on http error 401, 403
+ - CLEANUP: remove unused function "ssl_sock_is_ckch_valid"
+ - BUILD: SSL: add BoringSSL guarding to "RAND_keep_random_devices_open"
+ - BUILD: SSL: do not "update" BoringSSL version equivalent anymore
+ - BUG/MEDIUM: http_act: Restore init of log-format list
+ - DOC: better describes how to configure a fallback crt
+ - BUG/MAJOR: filters: Always keep all offsets up to date during data filtering
+ - MINOR: cache: Prepare helper functions for Vary support
+ - MEDIUM: cache: Add the Vary header support
+ - MINOR: cache: Add a process-vary option that can enable/disable Vary processing
+ - BUG/CRITICAL: cache: Fix trivial crash by sending accept-encoding header
+ - BUG/MAJOR: peers: fix partial message decoding
+ - DOC: cache: Add new caching limitation information
+ - DOC: cache: Add information about Vary support
+ - DOC: better document the config file format and escaping/quoting rules
+ - DOC: Clarify %HP description in log-format
+ - CI: github actions: update LibreSSL to 3.3.0
+ - CI: github actions: enable 51degrees feature
+ - MINOR: fd/threads: silence a build warning with threads disabled
+ - BUG/MINOR: tcpcheck: Don't forget to reset tcp-check flags on new kind of check
+ - MINOR: tcpcheck: Don't handle anymore in-progress send rules in tcpcheck_main
+ - BUG/MAJOR: tcpcheck: Allocate input and output buffers from the buffer pool
+ - MINOR: tcpcheck: Don't handle anymore in-progress connect rules in tcpcheck_main
+ - MINOR: config: Deprecate and ignore tune.chksize global option
+ - MINOR: config: Add a warning if tune.chksize is used
+ - REORG: tcpcheck: Move check option parsing functions based on tcp-check
+ - MINOR: check: Always increment check health counter on CONPASS
+ - MINOR: tcpcheck: Add support of L7OKC on expect rules error-status argument
+ - DOC: config: Make disable-on-404 option clearer on transition conditions
+ - DOC: config: Move req.hdrs and req.hdrs_bin in L7 samples fetches section
+ - BUG/MINOR: http-fetch: Fix smp_fetch_body() when called from a health-check
+ - MINOR: plock: use an ARMv8 instruction barrier for the pause instruction
+ - MINOR: debug: add "debug dev sched" to stress the scheduler.
+ - MINOR: debug: add a trivial PRNG for scheduler stress-tests
+ - BUG/MEDIUM: lists: Lock the element while we check if it is in a list.
+ - MINOR: task: remove tasklet_insert_into_tasklet_list()
+ - MINOR: task: perform atomic counter increments only once per wakeup
+ - MINOR: task: remove __tasklet_remove_from_tasklet_list()
+ - BUG/MEDIUM: task: close a possible data race condition on a tasklet's list link
+ - BUG/MEDIUM: local log format regression.
+
+2020/11/21 : 2.4-dev1
+ - MINOR: ist: Add istend() function to return a pointer to the end of the string
+ - MINOR: sample: Add converters to parse FIX messages
+ - REGTEST: converter: Add a regtest for fix converters
+ - MINOR: sample: Add converts to parses MQTT messages
+ - REGTEST: converter: Add a regtest for MQTT converters
+ - MINOR: compat: automatically include malloc.h on glibc
+ - MEDIUM: pools: call malloc_trim() from pool_gc()
+ - MEDIUM: pattern: call malloc_trim() on pat_ref_reload()
+ - MINOR: pattern: move the update revision to the pat_ref, not the expression
+ - CLEANUP: pattern: delete the back refs at once during pat_ref_reload()
+ - MINOR: pattern: new sflag PAT_SF_REGFREE indicates regex_free() is needed
+ - MINOR: pattern: make the delete and prune functions more generic
+ - MEDIUM: pattern: link all final elements from the reference
+ - MEDIUM: pattern: change the pat_del_* functions to delete from the references
+ - MINOR: pattern: remerge the list and tree deletion functions
+ - MINOR: pattern: perform a single call to pat_delete_gen() under the expression
+ - CLEANUP: acl: don't reference the generic pattern deletion function anymore
+ - CLEANUP: pattern: remove pat_delete_fcts[] and pattern_head->delete()
+ - MINOR: pattern: introduce pat_ref_delete_by_ptr() to delete a valid reference
+ - MINOR: pattern: store a generation number in the reference patterns
+ - MEDIUM: pattern: only match patterns that match the current generation
+ - MINOR: pattern: add pat_ref_commit() to commit a previously inserted element
+ - MINOR: pattern: implement pat_ref_load() to load a pattern at a given generation
+ - MINOR: pattern: add pat_ref_purge_older() to purge old entries
+ - MEDIUM: pattern: make pat_ref_prune() rely on pat_ref_purge_older()
+ - MINOR: pattern: during reload, delete elements frem the ref, not the expression
+ - MINOR: pattern: prepare removal of a pattern from the list head
+ - MEDIUM: pattern: turn the pattern chaining to single-linked list
+ - CLEANUP: cfgparse: remove duplicate registration for transparent build options
+ - BUG/MINOR: ssl: don't report 1024 bits DH param load error when it's higher
+ - MINOR: http-htx: Add understandable errors for the errorfiles parsing
+ - MINOR: ssl: instantiate stats module
+ - MINOR: ssl: count client hello for stats
+ - MINOR: ssl: add counters for ssl sessions
+ - DOC: config: Fix a typo on ssl_c_chain_der
+ - MINOR: server: remove idle lock in srv_cleanup_connections
+ - BUILD: ssl: silence build warning on uninitialised counters
+ - BUILD: http-htx: fix build warning regarding long type in printf
+ - REGTEST: ssl: test wildcard and multi-type + exclusions
+ - BUG/MEDIUM: ssl/crt-list: correctly insert crt-list line if crt already loaded
+ - CI: Expand use of GitHub Actions for CI
+ - REGTEST: ssl: mark reg-tests/ssl/ssl_crt-list_filters.vtc as broken
+ - BUG/MINOR: pattern: a sample marked as const could be written
+ - BUG/MINOR: lua: set buffer size during map lookups
+ - MEDIUM: cache: Change caching conditions
+ - BUG/MINOR: stats: free dynamically stats fields/lines on shutdown
+ - BUG/MEDIUM: stats: prevent crash if counters not alloc with dummy one
+ - MINOR: peers: Add traces to peer_treat_updatemsg().
+ - BUG/MINOR: peers: Do not ignore a protocol error for dictionary entries.
+ - BUG/MINOR: peers: Missing TX cache entries reset.
+ - BUG/MEDIUM: peers: fix decoding of multi-byte length in stick-table messages
+ - BUG/MINOR: http-fetch: Extract cookie value even when no cookie name
+ - BUG/MINOR: http-fetch: Fix calls w/o parentheses of the cookie sample fetches
+ - BUG/MEDIUM: check: reuse srv proto only if using same mode
+ - MINOR: check: report error on incompatible proto
+ - MINOR: check: report error on incompatible connect proto
+ - BUG/MINOR: http-htx: Handle warnings when parsing http-error and http-errors
+ - BUG/MAJOR: spoe: Be sure to remove all references on a released spoe applet
+ - MINOR: spoe: Don't close connection in sync mode on processing timeout
+ - BUG/MINOR: tcpcheck: Don't warn on unused rules if check option is after
+ - MINOR: init: Fix the prototype for per-thread free callbacks
+ - MINOR: config/mux-h2: Return ERR_ flags from init_h2() instead of a status
+ - CLEANUP: config: Return ERR_NONE from config callbacks instead of 0
+ - MINOR: cfgparse: tighten the scope of newnameserver variable, free it on error.
+ - REGTEST: make ssl_client_samples and ssl_server_samples require to 2.2
+ - REGTESTS: Add sample_fetches/cook.vtc
+ - BUG/MEDIUM: filters: Forward all filtered data at the end of http filtering
+ - BUG/MINOR: http-ana: Don't wait for the body of CONNECT requests
+ - CLEANUP: flt-trace: Remove unused random-parsing option
+ - MINOR: flt-trace: Add an option to inhibits trace messages
+ - MINOR: flt-trace: Use a bitfield for the trace options
+ - REGTESTS: Add a script to test the random forwarding with several filters
+ - REGTESTS: mark the abns test as broken again
+ - REGTESTS: converter: add url_dec test
+ - CI: Stop hijacking the hosts file
+ - CI: Make the h2spec workflow more consistent with the VTest workflow
+ - CI: travis-ci: remove amd64, osx builds
+ - CI: travis-ci: arm64 are not allowed to fail anymore
+ - DOC: add missing 3.10 in the summary
+ - MINOR: ssl: remove client hello counters
+ - MEDIUM: stats: add counters for failed handshake
+ - MINOR: ssl: create common ssl_ctx init
+ - MEDIUM: cli/ssl: configure ssl on server at runtime
+ - REGTEST: server/cli_set_ssl.vtc requires OpenSSL
+ - DOC: coding-style: update a few rules about pointers
+ - BUG/MINOR: ssl: segv on startup when AKID but no keyid
+ - BUILD: ssl: use SSL_MODE_ASYNC macro instead of OPENSSL_VERSION
+ - BUG/MEDIUM: http-ana: Don't eval http-after-response ruleset on empty messages
+ - BUG/MEDIUM: ssl/crt-list: bundle support broken in crt-list
+ - BUG/MEDIUM: ssl: error when no certificate are found
+ - BUG/MINOR: ssl/crt-list: load bundle in crt-list only if activated
+ - BUG/MEDIUM: ssl/crt-list: fix error when no file found
+ - CI: Github Actions: enable prometheus exporter
+ - CI: Github Actions: remove LibreSSL-3.0.2 builds
+ - CI: Github Actions: enable BoringSSL builds
+ - CI: travis-ci: remove builds migrated to GH actions
+ - BUILD: makefile: enable crypt(3) for OpenBSD
+ - CI: Github Action: run "apt-get update" before packages restore
+ - BUILD: SSL: guard TLS13 ciphersuites with HAVE_SSL_CTX_SET_CIPHERSUITES
+ - CI: Pass the github.event_name to matrix.py
+ - CI: Clean up Windows CI
+ - DOC: clarify how to create a fallback crt
+ - CLEANUP: connection: do not use conn->owner when the session is known
+ - BUG/MAJOR: connection: reset conn->owner when detaching from session list
+ - REGTESTS: mark proxy_protocol_random_fail as broken
+ - BUG/MINOR: http_htx: Fix searching headers by substring
+ - MINOR: http_act: Add -m flag for del-header name matching method
+
+2020/11/05 : 2.4-dev0
+ - MINOR: version: it's development again.
+ - DOC: mention in INSTALL that it's development again
+
+2020/11/05 : 2.3.0
+ - CLEANUP: pattern: remove unused entry "tree" in pattern.val
+ - BUILD: ssl: use SSL_CTRL_GET_RAW_CIPHERLIST instead of OpenSSL versions
+ - BUG/MEDIUM: filters: Don't try to init filters for disabled proxies
+ - BUG/MINOR: proxy/server: Skip per-proxy/server post-check for disabled proxies
+ - BUG/MINOR: checks: Report a socket error before any connection attempt
+ - BUG/MINOR: server: Set server without addr but with dns in RMAINT on startup
+ - MINOR: server: Copy configuration file and line for server templates
+ - BUG/MEDIUM: mux-pt: Release the tasklet during an HTTP upgrade
+ - BUILD: ssl: use HAVE_OPENSSL_KEYLOG instead of OpenSSL versions
+ - MINOR: debug: don't count free(NULL) in memstats
+ - BUG/MINOR: filters: Skip disabled proxies during startup only
+ - MINOR: mux_h2: capitalize frame type in stats
+ - MINOR: mux_h2: add stat for total count of connections/streams
+ - MINOR: stats: do not display empty stat module title on html
+ - BUG/MEDIUM: stick-table: limit the time spent purging old entries
+ - BUG/MEDIUM: listener: only enable a listening listener if needed
+ - BUG/MEDIUM: listener: never suspend inherited sockets
+ - BUG/MEDIUM: listener: make the master also keep workers' inherited FDs
+ - MINOR: fd: add fd_want_recv_safe()
+ - MEDIUM: listeners: make use of fd_want_recv_safe() to enable early receivers
+ - REGTESTS: mark abns_socket as working now
+ - CLEANUP: mux-h2: Remove the h1 parser state from the h2 stream
+ - MINOR: sock: add a check against cross worker<->master socket activities
+ - CI: github actions: limit OpenSSL no-deprecated builds to "default,bug,devel" reg-tests
+ - BUG/MEDIUM: server: make it possible to kill last idle connections
+ - MINOR: mworker/cli: the master CLI use its own applet
+ - MINOR: ssl: define SSL_CTX_set1_curves_list to itself on BoringSSL
+ - BUILD: ssl: use feature macros for detecting ec curves manipulation support
+ - DOC: Add dns as an available domain to show stat
+ - BUILD: makefile: usual reorder of objects for faster builds
+ - DOC: update INSTALL to mention that TCC is supported
+ - DOC: mention in INSTALL that haproxy 2.3 is a stable version
+ - MINOR: version: mention that it's stable now
+
+2020/10/31 : 2.3-dev9
+ - CLEANUP: http_ana: remove unused assignation of `att_beg`
+ - BUG/MEDIUM: ssl: OCSP must work with BoringSSL
+ - BUG/MINOR: log: fix memory leak on logsrv parse error
+ - BUG/MINOR: log: fix risk of null deref on error path
+ - BUILD: ssl: more elegant OpenSSL early data support check
+ - CI: github actions: update h2spec to 2.6.0
+ - BUG/MINOR: cache: Check the return value of http_replace_res_status
+ - MINOR: cache: Store the "Last-Modified" date in the cache_entry
+ - MINOR: cache: Process the If-Modified-Since header in conditional requests
+ - MINOR: cache: Create res.cache_hit and res.cache_name sample fetches
+ - MINOR: mux-h2: register a stats module
+ - MINOR: mux-h2: add counters instance to h2c
+ - MINOR: mux-h2: add stats for received frame types
+ - MINOR: mux-h2: report detected error on stats
+ - MINOR: mux-h2: count open connections/streams on stats
+ - BUG/MINOR: server: fix srv downtime calcul on starting
+ - BUG/MINOR: server: fix down_time report for stats
+ - BUG/MINOR: lua: initialize sample before using it
+ - MINOR: cache: Add Expires header value parsing
+ - MINOR: ist: Add a case insensitive istmatch function
+ - BUG/MINOR: cache: Manage multiple values in cache-control header value
+ - BUG/MINOR: cache: Inverted variables in http_calc_maxage function
+ - MINOR: pattern: make pat_ref_append() return the newly added element
+ - MINOR: pattern: make pat_ref_add() rely on pat_ref_append()
+ - MINOR: pattern: export pat_ref_push()
+ - CLEANUP: pattern: use calloc() rather than malloc for structures
+ - CLEANUP: pattern: fix spelling/grammatical/copy-paste in comments
+
+2020/10/24 : 2.3-dev8
+ - MINOR: backend: replace the lbprm lock with an rwlock
+ - MINOR: lb/map: use seek lock and read locks where appropriate
+ - MINOR: lb/leastconn: only take a read lock in fwlc_get_next_server()
+ - MINOR: lb/first: use a read lock in fas_get_next_server()
+ - MINOR: lb/chash: use a read lock in chash_get_server_hash()
+ - BUG/MINOR: disable dynamic OCSP load with BoringSSL
+ - BUILD: ssl: make BoringSSL use its own version numbers
+ - CLEANUP: threads: don't register an initcall when not debugging
+ - MINOR: threads: change lock_t to an unsigned int
+ - CLEANUP: tree-wide: reorder a few structures to plug some holes around locks
+ - CLEANUP: task: remove the unused and mishandled global_rqueue_size
+ - BUG/MEDIUM: connection: Never cleanup server lists when freeing private conns
+ - MEDIUM: config: report that "nbproc" is deprecated
+ - BUG/MINOR: listener: close before free in `listener_accept`
+ - MINOR: ssl: 'ssl-load-extra-del-ext' removes the certificate extension
+ - BUG/MINOR: queue: properly report redistributed connections
+ - CONTRIB: tcploop: remove unused local variables in tcp_pause()
+ - BUILD: makefile: add entries to build common debugging tools
+ - BUG/MEDIUM: server: support changing the slowstart value from state-file
+ - MINOR: http: Add `enum etag_type http_get_etag_type(const struct ist)`
+ - MINOR: http: Add etag comparison function
+ - MEDIUM: cache: Store the ETag information in the cache_entry
+ - MEDIUM: cache: Add support for 'If-None-Match' request header
+ - REGTEST: cache: Add if-none-match test case
+ - CLEANUP: compression: Make use of http_get_etag_type()
+ - BUG/MINOR: http-ana: Don't send payload for internal responses to HEAD requests
+ - BUG/MAJOR: mux-h2: Don't try to send data if we know it is no longer possible
+ - MINOR: threads/debug: only report used lock stats
+ - MINOR: threads/debug: only report lock stats for used operations
+ - MINOR: proxy; replace the spinlock with an rwlock
+ - MINOR: server: read-lock the cookie during srv_set_dyncookie()
+ - MINOR: proxy/cli: only take a read lock in "show errors"
+ - OPTIM: queue: don't call pendconn_unlink() when the pendconn is not queued
+ - MINOR: queue: split __pendconn_unlink() in per-srv and per-prx
+ - MINOR: queue: reduce the locked area in pendconn_add()
+ - OPTIM: queue: make the nbpend counters atomic
+ - OPTIM: queue: decrement the nbpend and totpend counters outside of the lock
+ - MINOR: leastconn: take the queue length into account when queuing servers
+ - MEDIUM: fwlc: re-enable per-server queuing up to maxqueue
+ - Revert "OPTIM: queue: don't call pendconn_unlink() when the pendconn is not queued"
+ - MINOR: stats: support the "up" output modifier for "show stat"
+ - MINOR: stats: also support a "no-maint" show stat modifier
+ - MINOR: stats: indicate the number of servers in a backend's status
+ - MEDIUM: ssl: ssl-load-extra-del-ext work only with .crt
+ - REGTEST: ssl: test "set ssl cert" with separate key / crt
+ - DOC: management: apply the "show stat" modifiers to "show stat", not "show info"
+ - MINOR: stats: report server's user-configured weight next to effective weight
+ - CI: travis-ci: switch to Ubuntu 20.04
+ - CONTRIB: release-estimator: Add release estimating tool
+ - BUG/MEDIUM: queue: fix unsafe proxy pointer when counting nbpend
+ - BUG/MINOR: extcheck: add missing checks on extchk_setenv()
+
+2020/10/17 : 2.3-dev7
+ - CI: travis-ci: replace not defined SSL_LIB, SSL_INC for BotringSSL builds
+ - BUG/MINOR: init: only keep rlim_fd_cur if max is unlimited
+ - BUG/MINOR: mux-h2: do not stop outgoing connections on stopping
+ - MINOR: fd: report an error message when failing initial allocations
+ - MINOR: proto-tcp: make use of connect(AF_UNSPEC) for the pause
+ - MINOR: sock: add sock_accept_conn() to test a listening socket
+ - MINOR: protocol: make proto_tcp & proto_uxst report listening sockets
+ - MINOR: sockpair: implement the .rx_listening function
+ - CLEANUP: tcp: make use of sock_accept_conn() where relevant
+ - CLEANUP: unix: make use of sock_accept_conn() where relevant
+ - BUG/MINOR: listener: detect and handle shared sockets stopped in other processes
+ - CONTRIB: tcploop: implement a disconnect operation 'D'
+ - CLEANUP: protocol: intitialize all of the sockaddr when disconnecting
+ - BUG/MEDIUM: deinit: check fdtab before fdtab[fd].owner
+ - BUG/MINOR: connection: fix loop iter on connection takeover
+ - BUG/MEDIUM: connection: fix srv idle count on conn takeover
+ - MINOR: connection: improve list api usage
+ - MINOR: mux/connection: add a new mux flag for HOL risk
+ - MINOR: connection: don't check priv flag on free
+ - MEDIUM: backend: add new conn to session if mux marked as HOL blocking
+ - MEDIUM: backend: add reused conn to sess if mux marked as HOL blocking
+ - MEDIUM: h2: remove conn from session on detach
+ - MEDIUM: fcgi: remove conn from session on detach
+ - DOC: Describe reuse safe for HOL handling
+ - MEDIUM: proxy: remove obsolete "mode health"
+ - MEDIUM: proxy: remove obsolete "monitor-net"
+ - CLEANUP: protocol: remove the ->drain() function
+ - CLEANUP: fd: finally get rid of fd_done_recv()
+ - MINOR: connection: make sockaddr_alloc() take the address to be copied
+ - MEDIUM: listener: allocate the connection before queuing a new connection
+ - MINOR: session: simplify error path in session_accept_fd()
+ - MINOR: connection: add new error codes for accept_conn()
+ - MINOR: sock: rename sock_accept_conn() to sock_accepting_conn()
+ - MINOR: protocol: add a new function accept_conn()
+ - MINOR: sock: implement sock_accept_conn() to accept a connection
+ - MINOR: sockpair: implement sockpair_accept_conn() to accept a connection
+ - MEDIUM: listener: use protocol->accept_conn() to accept a connection
+ - MEDIUM: listener: remove the second pass of fd manipulation at the end
+ - MINOR: protocol: add a default I/O callback and put it into the receiver
+ - MINOR: log: set the UDP receiver's I/O handler in the receiver
+ - MINOR: protocol: register the receiver's I/O handler and not the protocol's
+ - CLEANUP: protocol: remove the now unused <handler> field of proto_fam->bind()
+ - DOC: improve the documentation for "option nolinger"
+ - BUG/MEDIUM: proxy: properly stop backends
+ - BUG/MEDIUM: task: bound the number of tasks picked from the wait queue at once
+ - MINOR: threads: augment rwlock debugging stats to report seek lock stats
+ - MINOR: threads: add the transitions to/from the seek state
+ - MEDIUM: task: use an upgradable seek lock when scanning the wait queue
+ - BUILD: listener: avoir a build warning when threads are disabled
+ - BUG/MINOR: peers: Possible unexpected peer seesion reset after collisions.
+ - MINOR: ssl: add volatile flags to ssl samples
+ - MEDIUM: backend: reuse connection if using a static sni
+ - BUG/MEDIUM: spoe: Unset variable instead of set it if no data provided
+ - BUG/MEDIUM: mux-h1: Get the session from the H1S when capturing bad messages
+ - BUG/MEDIUM: lb: Always lock the server when calling server_{take,drop}_conn
+ - DOC: fix typo in MAX_SESS_STKCTR
+
+2020/10/10 : 2.3-dev6
+ - REGTESTS: use "command" instead of "which" for better POSIX compatibility
+ - BUILD: makefile: Update feature flags for OpenBSD
+ - DOC: agent-check: fix typo in "fail" word expected reply
+ - DOC: crt: advise to move away from cert bundle
+ - BUG/MINOR: ssl/crt-list: exit on warning out of crtlist_parse_line()
+ - REGTEST: fix host part in balance-uri-path-only.vtc
+ - REGTEST: make ssl_client_samples and ssl_server_samples requiret to 2.3
+ - REGTEST: the iif converter test requires 2.3
+ - REGTEST: make agent-check.vtc require 1.8
+ - REGTEST: make abns_socket.vtc require 1.8
+ - REGTEST: make map_regm_with_backref require 1.7
+ - BUILD: makefile: Update feature flags for FreeBSD
+ - OPTIM: backend/random: never queue on the server, always on the backend
+ - OPTIM: backend: skip LB when we know the backend is full
+ - BUILD: makefile: Fix building with closefrom() support enabled
+ - BUILD: makefile: add an EXTRAVERSION variable to ease local naming
+ - MINOR: tools: support for word expansion of environment in parse_line
+ - BUILD: tools: fix minor build issue on isspace()
+ - BUILD: makefile: Enable closefrom() support on Solaris
+ - CLEANUP: ssl: Use structured format for error line report during crt-list parsing
+ - MINOR: ssl: Add error if a crt-list might be truncated
+ - MINOR: ssl: remove uneeded check in crtlist_parse_file
+ - BUG/MINOR: Fix several leaks of 'log_tag' in init().
+ - DOC: tcp-rules: Refresh details about L7 matching for tcp-request content rules
+ - MEDIUM: tcp-rules: Warn if a track-sc* content rule doesn't depend on content
+ - BUG/MINOR: tcpcheck: Set socks4 and send-proxy flags before the connect call
+ - DOC: ssl: new "cert bundle" behavior
+ - BUG/MEDIUM: queue: make pendconn_cond_unlink() really thread-safe
+ - CLEANUP: ssl: "bundle" is not an OpenSSL wording
+ - MINOR: counters: fix a typo in comment
+ - BUG/MINOR: stats: fix validity of the json schema
+ - REORG: stats: export some functions
+ - MINOR: stats: add stats size as a parameter for csv/json dump
+ - MINOR: stats: hide px/sv/li fields in applet struct
+ - REORG: stats: extract proxy json dump
+ - REORG: stats: extract proxies dump loop in a function
+ - MINOR: hlua: Display debug messages on stderr only in debug mode
+ - MINOR: stats: define the concept of domain for statistics
+ - MINOR: stats: define additional flag px cap on domain
+ - MEDIUM: stats: add delimiter for static proxy stats on csv
+ - MEDIUM: stats: define an API to register stat modules
+ - MEDIUM: stats: add abstract type to store counters
+ - MEDIUM: stats: integrate static proxies stats in new stats
+ - MINOR: stats: support clear counters for dynamic stats
+ - MINOR: stats: display extra proxy stats on the html page
+ - MINOR: stats: add config "stats show modules"
+ - MINOR: dns/stats: integrate dns counters in stats
+ - MINOR: stats: remove for loop declaration
+ - DOC: ssl: fix typo about ocsp files
+ - BUG/MINOR: peers: Inconsistency when dumping peer status codes.
+ - DOC: update INSTALL with supported OpenBSD / FreeBSD versions
+ - BUG/MINOR: proto_tcp: Report warning messages when listeners are bound
+ - CLEANUP: cache: Fix leak of cconf->c.name during config check
+ - CLEANUP: ssl: Release cached SSL sessions on deinit
+ - BUG/MINOR: mux-h1: Be sure to only set CO_RFL_READ_ONCE for the first read
+ - BUG/MINOR: mux-h1: Always set the session on frontend h1 stream
+ - MINOR: mux-h1: Don't wakeup the H1C when output buffer become available
+ - CLEANUP: sock-unix: Remove an unreachable goto clause
+ - BUG/MINOR: proxy: inc req counter on new syslog messages.
+ - BUG/MEDIUM: log: old processes with log foward section don't die on soft stop.
+ - MINOR: stats: inc req counter on listeners.
+ - MINOR: channel: new getword and getchar functions on channel.
+ - MEDIUM: log: syslog TCP support on log forward section.
+ - BUG/MINOR: proxy/log: frontend/backend and log forward names must differ
+ - DOC: re-work log forward bind statement documentation.
+ - DOC: fix a confusing typo on a regsub example
+ - BUILD: Add a DragonFlyBSD target
+ - BUG/MINOR: makefile: fix a tiny typo in the target list
+ - BUILD: makefile: Update feature flags for NetBSD
+ - CI: travis-ci: help Coverity to detect BUG_ON() as a real stop
+ - DOC: Add missing stats fields in the management doc
+ - BUG/MEDIUM: mux-fcgi: Don't handle pending read0 too early on streams
+ - BUG/MEDIUM: mux-h2: Don't handle pending read0 too early on streams
+ - DOC: Fix typos in configuration.txt
+ - BUG/MINOR: http: Fix content-length of the default 500 error
+ - BUG/MINOR: http-htx: Expect no body for 204/304 internal HTTP responses
+ - REGTESTS: mark abns_socket as broken
+ - MEDIUM: fd: always wake up one thread when enabling a foreing FD
+ - MEDIUM: listeners: don't bounce listeners management between queues
+ - MEDIUM: init: stop disabled proxies after initializing fdtab
+ - MEDIUM: listeners: make unbind_listener() converge if needed
+ - MEDIUM: deinit: close all receivers/listeners before scanning proxies
+ - MEDIUM: listeners: remove the now unused ZOMBIE state
+ - MINOR: listeners: do not uselessly try to close zombie listeners in soft_stop()
+ - CLEANUP: proxy: remove the first_to_listen hack in zombify_proxy()
+ - MINOR: listeners: introduce listener_set_state()
+ - MINOR: proxy: maintain per-state counters of listeners
+ - MEDIUM: proxy: remove the unused PR_STFULL state
+ - MEDIUM: proxy: remove the PR_STERROR state
+ - MEDIUM: proxy: remove state PR_STPAUSED
+ - MINOR: startup: don't rely on PR_STNEW to check for listeners
+ - CLEANUP: peers: don't use the PR_ST* states to mark enabled/disabled
+ - MEDIUM: proxy: replace proxy->state with proxy->disabled
+ - MEDIUM: proxy: remove start_proxies()
+ - MEDIUM: proxy: merge zombify_proxy() with stop_proxy()
+ - MINOR: listeners: check the current listener state in pause_listener()
+ - MINOR: listeners: check the current listener earlier state in resume_listener()
+ - MEDIUM: listener/proxy: make the listeners notify about proxy pause/resume
+ - MINOR: protocol: introduce protocol_{pause,resume}_all()
+ - MAJOR: signals: use protocol_pause_all() and protocol_resume_all()
+ - CLEANUP: proxy: remove the now unused pause_proxies() and resume_proxies()
+ - MEDIUM: proto_tcp: make the pause() more robust in multi-process
+ - BUG/MEDIUM: listeners: correctly report pause() errors
+ - MINOR: listeners: move fd_stop_recv() to the receiver's socket code
+ - CLEANUP: protocol: remove the ->disable_all method
+ - CLEANUP: listeners: remove unused disable_listener and disable_all_listeners
+ - MINOR: listeners: export enable_listener()
+ - MINOR: protocol: directly call enable_listener() from protocol_enable_all()
+ - CLEANUP: protocol: remove the ->enable_all method
+ - CLEANUP: listeners: remove the now unused enable_all_listeners()
+ - MINOR: protocol: rename the ->listeners field to ->receivers
+ - MINOR: protocol: replace ->pause(listener) with ->rx_suspend(receiver)
+ - MINOR: protocol: implement an ->rx_resume() method
+ - MINOR: listener: use the protocol's ->rx_resume() method when available
+ - MINOR: sock: provide a set of generic enable/disable functions
+ - MINOR: protocol: add a new pair of rx_enable/rx_disable methods
+ - MINOR: protocol: add a new pair of enable/disable methods for listeners
+ - MEDIUM: listeners: now use the listener's ->enable/disable
+ - MINOR: listeners: split delete_listener() in two versions
+ - MINOR: listeners: count unstoppable jobs on creation, not deletion
+ - MINOR: listeners: add a new stop_listener() function
+ - MEDIUM: proxy: make stop_proxy() now use stop_listener()
+ - MEDIUM: proxy: add mode PR_MODE_PEERS to flag peers frontends
+ - MEDIUM: proxy: centralize proxy status update and reporting
+ - MINOR: protocol: add protocol_stop_now() to instant-stop listeners
+ - MEDIUM: proxy: make soft_stop() stop most listeners using protocol_stop_now()
+ - MEDIUM: udp: implement udp_suspend() and udp_resume()
+ - MINOR: listener: add a few BUG_ON() statements to detect inconsistencies
+ - MEDIUM: listeners: always close master vs worker listeners
+ - BROKEN/MEDIUM: listeners: rework the unbind logic to make it idempotent
+ - MEDIUM: listener: let do_unbind_listener() decide whether to close or not
+ - CLEANUP: listeners: remove the do_close argument to unbind_listener()
+ - MINOR: listeners: move the LI_O_MWORKER flag to the receiver
+ - MEDIUM: receivers: add an rx_unbind() method in the protocols
+ - MINOR: listeners: split do_unbind_listener() in two
+ - MEDIUM: listeners: implement protocol level ->suspend/resume() calls
+ - MEDIUM: config: mark "grace" as deprecated
+ - MEDIUM: config: remove the deprecated and dangerous global "debug" directive
+ - BUG/MINOR: proxy: respect the proper format string in sig_pause/sig_listen
+ - MINOR: peers: heartbeat, collisions and handshake information for "show peers" command.
+ - BUILD: makefile: Enable getaddrinfo() on OS/X
+
+2020/09/25 : 2.3-dev5
+ - DOC: Fix typo in iif() example
+ - CLEANUP: Update .gitignore
+ - BUILD: introduce possibility to define ABORT_NOW() conditionally
+ - CI: travis-ci: help Coverity to recognize abort()
+ - BUG/MINOR: Fix type passed of sizeof() for calloc()
+ - CLEANUP: Do not use a fixed type for 'sizeof' in 'calloc'
+ - CLEANUP: tree-wide: use VAR_ARRAY instead of [0] in various definitions
+ - BUILD: connection: fix build on clang after the VAR_ARRAY cleanup
+ - BUG/MINOR: ssl: verifyhost is case sensitive
+ - BUILD: makefile: change default value of CC from gcc to cc
+ - CI: travis-ci: split asan step out of running tests
+ - BUG/MINOR: server: report correct error message for invalid port on "socks4"
+ - BUG/MEDIUM: ssl: Don't call ssl_sock_io_cb() directly.
+ - BUG/MINOR: ssl/crt-list: crt-list could end without a \n
+ - BUG/MINOR: log-forward: fail on unknown keywords
+ - MEDIUM: log-forward: use "dgram-bind" instead of "bind" for the listener
+ - BUG/MEDIUM: log-forward: always quit on parsing errors
+ - MEDIUM: ssl: remove bundle support in crt-list and directories
+ - MEDIUM: ssl/cli: remove support for multi certificates bundle
+ - MINOR: ssl: crtlist_dup_ssl_conf() duplicates a ssl_bind_conf
+ - MINOR: ssl: crtlist_entry_dup() duplicates a crtlist_entry
+ - MEDIUM: ssl: emulates the multi-cert bundles in the crtlist
+ - MEDIUM: ssl: emulate multi-cert bundles loading in standard loading
+ - CLEANUP: ssl: remove test on "multi" variable in ckch functions
+ - CLEANUP: ssl/cli: remove test on 'multi' variable in CLI functions
+ - CLEANUP: ssl: remove utility functions for bundle
+ - DOC: explain bundle emulation in configuration.txt
+ - BUILD: fix build with openssl < 1.0.2 since bundle removal
+ - BUG/MINOR: log: gracefully handle the "udp@" address format for log servers
+ - BUG/MINOR: dns: gracefully handle the "udp@" address format for nameservers
+ - MINOR: listener: create a new struct "settings" in bind_conf
+ - MINOR: listener: move bind_proc and bind_thread to struct settings
+ - MINOR: listener: move the interface to the struct settings
+ - MINOR: listener: move the network namespace to the struct settings
+ - REORG: listener: create a new struct receiver
+ - REORG: listener: move the listening address to a struct receiver
+ - REORG: listener: move the receiving FD to struct receiver
+ - REORG: listener: move the listener's proto to the receiver
+ - MINOR: listener: make sock_find_compatible_fd() check the socket type
+ - REORG: listener: move the receiver part to a new file
+ - MINOR: receiver: link the receiver to its settings
+ - MINOR: receiver: link the receiver to its owner
+ - MINOR: listener: prefer to retrieve the socket's settings via the receiver
+ - MINOR: receiver: add a receiver-specific flag to indicate the socket is bound
+ - MINOR: listener: move the INHERITED flag down to the receiver
+ - MINOR: receiver: move the FOREIGN and V6ONLY options from listener to settings
+ - MINOR: sock: make sock_find_compatible_fd() only take a receiver
+ - MINOR: protocol: rename the ->bind field to ->listen
+ - MINOR: protocol: add a new ->bind() entry to bind the receiver
+ - MEDIUM: sock_inet: implement sock_inet_bind_receiver()
+ - MEDIUM: tcp: make use of sock_inet_bind_receiver()
+ - MEDIUM: udp: make use of sock_inet_bind_receiver()
+ - MEDIUM: sock_unix: implement sock_unix_bind_receiver()
+ - MEDIUM: uxst: make use of sock_unix_bind_receiver()
+ - MEDIUM: sockpair: implement sockpair_bind_receiver()
+ - MEDIUM: proto_sockpair: make use of sockpair_bind_receiver()
+ - MEDIUM: protocol: explicitly start the receiver before the listener
+ - MEDIUM: protocol: do not call proto->bind() anymore from bind_listener()
+ - MINOR: protocol: add a new proto_fam structure for protocol families
+ - MINOR: protocol: retrieve the family-specific fields from the family
+ - CLEANUP: protocol: remove family-specific fields from struct protocol
+ - MINOR: protocol: add a real family for existing FDs
+ - CLEANUP: tools: make str2sa_range() less awful for fd@ and sockpair@
+ - MINOR: tools: make str2sa_range() take more options than just resolve
+ - MINOR: tools: add several PA_O_PORT_* flags in str2sa_range() callers
+ - MEDIUM: tools: make str2sa_range() validate callers' port specifications
+ - MEDIUM: config: remove all checks for missing/invalid ports/ranges
+ - MINOR: tools: add several PA_O_* flags in str2sa_range() callers
+ - MINOR: listener: remove the inherited arg to create_listener()
+ - MINOR: tools: make str2sa_range() optionally return the fd
+ - MINOR: log: detect LOG_TARGET_FD from the fd and not from the syntax
+ - MEDIUM: tools: make str2sa_range() resolve pre-bound listeners
+ - MINOR: config: do not test an inherited socket again
+ - MEDIUM: tools: make str2sa_range() check for the sockpair's FD usability
+ - MINOR: tools: start to distinguish stream and dgram in str2sa_range()
+ - MEDIUM: tools: make str2sa_range() only report AF_CUST_UDP on listeners
+ - MINOR: tools: remove the central test for "udp" in str2sa_range()
+ - MINOR: cfgparse: add str2receiver() to parse dgram receivers
+ - MINOR: log-forward: use str2receiver() to parse the dgram-bind address
+ - MEDIUM: config: make str2listener() not accept datagram sockets anymore
+ - MINOR: listener: pass the chosen protocol to create_listeners()
+ - MINOR: tools: make str2sa_range() directly return the protocol
+ - MEDIUM: tools: make str2sa_range() check that the protocol has ->connect()
+ - MINOR: protocol: add the control layer type in the protocol struct
+ - MEDIUM: protocol: store the socket and control type in the protocol array
+ - MEDIUM: tools: make str2sa_range() use protocol_lookup()
+ - MEDIUM: proto_udp: replace last AF_CUST_UDP* with AF_INET*
+ - MINOR: tools: drop listener detection hack from str2sa_range()
+ - BUILD: sock_unix: add missing errno.h
+ - MINOR: sock_inet: report the errno string in binding errors
+ - MINOR: sock_unix: report the errno string in binding errors
+ - BUILD: sock_inet: include errno.h
+ - MINOR: h2/trace: also display the remaining frame length in traces
+ - BUG/MINOR: h2/trace: do not display "stream error" after a frame ACK
+ - BUG/MEDIUM: h2: report frame bits only for handled types
+ - BUG/MINOR: http-fetch: Don't set the sample type during the htx prefetch
+ - BUG/MINOR: Fix memory leaks cfg_parse_peers
+ - BUG/MINOR: config: Fix memory leak on config parse listen
+ - MINOR: backend: make the "whole" option of balance uri take only one bit
+ - MINOR: backend: add a new "path-only" option to "balance uri"
+ - REGTESTS: add a few load balancing tests
+ - BUG/MEDIUM: listeners: do not pause foreign listeners
+ - BUG/MINOR: listeners: properly close listener FDs
+ - BUILD: trace: include tools.h
+
+2020/09/11 : 2.3-dev4
+ - MINOR: hlua: Add error message relative to the Channel manipulation and HTTP mode
+ - BUG/MEDIUM: ssl: crt-list negative filters don't work
+ - DOC: overhauling github issue templates
+ - MEDIUM: cfgparse: Emit hard error on truncated lines
+ - DOC: cache: Use '<name>' instead of '<id>' in error message
+ - MINOR: cache: Reject duplicate cache names
+ - REGTEST: remove stray leading spaces in converteers_ref_cnt_never_dec.vtc
+ - MINOR: stats: prevent favicon.ico requests for stats page
+ - BUILD: tools: include auxv a bit later
+ - BUILD: task: work around a bogus warning in gcc 4.7/4.8 at -O1
+ - MEDIUM: ssl: Support certificate chaining for certificate generation
+ - MINOR: ssl: Support SAN extension for certificate generation
+ - MINOR: tcp: don't try to set/clear v6only on inherited sockets
+ - BUG/MINOR: reload: detect the OS's v6only status before choosing an old socket
+ - MINOR: reload: determine the foreing binding status from the socket
+ - MEDIUM: reload: stop passing listener options along with FDs
+ - BUG/MEDIUM: ssl: fix ssl_bind_conf double free w/ wildcards
+ - MEDIUM: fd: replace usages of fd_remove() with fd_stop_both()
+ - CLEANUP: fd: remove fd_remove() and rename fd_dodelete() to fd_delete()
+ - MINOR: fd: add a new "exported" flag and use it for all regular listeners
+ - MEDIUM: reload: pass all exportable FDs, not just listeners
+ - DOC: add description of pidfile in master-worker mode
+ - BUG/MINOR: reload: do not fail when no socket is sent
+ - REORG: tcp: move TCP actions from proto_tcp.c to tcp_act.c
+ - CLEANUP: tcp: stop exporting smp_fetch_src()
+ - REORG: tcp: move TCP sample fetches from proto_tcp.c to tcp_sample.c
+ - REORG: tcp: move TCP bind/server keywords from proto_tcp.c to cfgparse-tcp.c
+ - REORG: unix: move UNIX bind/server keywords from proto_uxst.c to cfgparse-unix.c
+ - REORG: sock: start to move some generic socket code to sock.c
+ - MINOR: sock: introduce sock_inet and sock_unix
+ - MINOR: tcp/udp/unix: make use of proto->addrcmp() to compare addresses
+ - MINOR: sock_inet: implement sock_inet_get_dst()
+ - REORG: inet: replace tcp_is_foreign() with sock_inet_is_foreign()
+ - REORG: sock_inet: move v6only_default from proto_tcp.c to sock_inet.c
+ - REORG: sock_inet: move default_tcp_maxseg from proto_tcp.c
+ - REORG: listener: move xfer_sock_list to sock.{c,h}.
+ - MINOR: sock: add interface and namespace length to xfer_sock_list
+ - MINOR: sock: implement sock_find_compatible_fd()
+ - MINOR: sock_inet: move the IPv4/v6 transparent mode code to sock_inet
+ - REORG: sock: move get_old_sockets() from haproxy.c
+ - MINOR: sock: do not use LI_O_* in xfer_sock_list anymore
+ - MINOR: sock: distinguish dgram from stream types when retrieving old sockets
+ - BUILD: sock_unix: fix build issue with isdigit()
+ - BUG/MEDIUM: http-ana: Don't wait to send 1xx responses received from servers
+ - MINOR: http-htx: Add an option to eval query-string when the path is replaced
+ - BUG/MINOR: http-rules: Replace path and query-string in "replace-path" action
+ - MINOR: http-htx: Handle an optional reason when replacing the response status
+ - MINOR: contrib/spoa-server: allow MAX_FRAME_SIZE override
+ - BUG/MAJOR: contrib/spoa-server: Fix unhandled python call leading to memory leak
+ - BUG/MINOR: contrib/spoa-server: Ensure ip address references are freed
+ - BUG/MINOR: contrib/spoa-server: Do not free reference to NULL
+ - BUG/MINOR: contrib/spoa-server: Updating references to free in case of failure
+ - BUG/MEDIUM: contrib/spoa-server: Fix ipv4_address used instead of ipv6_address
+ - CLEANUP: http: silence a cppcheck warning in get_http_auth()
+ - REGTEST: increase some short timeouts to make tests more reliable
+ - BUG/MINOR: threads: work around a libgcc_s issue with chrooting
+ - BUILD: thread: limit the libgcc_s workaround to glibc only
+ - MINOR: protocol: do not call proto->bind_all() anymore
+ - MINOR: protocol: do not call proto->unbind_all() anymore
+ - CLEANUP: protocol: remove all ->bind_all() and ->unbind_all() functions
+ - MAJOR: init: start all listeners via protocols and not via proxies anymore
+ - BUG/MINOR: startup: haproxy -s cause 100% cpu
+ - Revert "BUG/MINOR: http-rules: Replace path and query-string in "replace-path" action"
+ - BUG/MEDIUM: doc: Fix replace-path action description
+ - MINOR: http-rules: Add set-pathq and replace-pathq actions
+ - MINOR: http-fetch: Add pathq sample fetch
+ - REGTEST: Add a test for request path manipulations, with and without the QS
+ - MINOR: Commit .gitattributes
+ - CLEANUP: Update .gitignore
+ - BUG/MEDIUM: dns: Don't store additional records in a linked-list
+ - BUG/MEDIUM: dns: Be sure to renew IP address for already known servers
+ - MINOR: server: Improve log message sent when server address is updated
+ - DOC: ssl-load-extra-files only applies to certificates on bind lines
+ - BUG/MINOR: auth: report valid crypto(3) support depending on build options
+ - BUG/MEDIUM: mux-h1: always apply the timeout on half-closed connections
+ - BUILD: threads: better workaround for late loading of libgcc_s
+ - BUILD: compiler: reserve the gcc version checks to the gcc compiler
+ - BUILD: compiler: workaround a glibc madness around __attribute__()
+ - BUILD: intops: on x86_64, the bswap instruction is called bswapq
+ - BUILD: trace: always have an argument before variadic args in macros
+ - BUILD: traces: don't pass an empty argument for missing ones
+ - BUG/MINOR: haproxy: Free uri_auth->scope during deinit
+ - CLEANUP: Free old_argv on deinit
+ - CLEANUP: haproxy: Free post_proxy_check_list in deinit()
+ - CLEANUP: haproxy: Free per_thread_*_list in deinit()
+ - CLEANUP: haproxy: Free post_check_list in deinit()
+ - BUG/MEDIUM: pattern: Renew the pattern expression revision when it is pruned
+ - REORG: tools: move PARSE_OPT_* from tools.h to tools-t.h
+ - MINOR: sample: Add iif(<true>,<false>) converter
+
+2020/08/14 : 2.3-dev3
+ - SCRIPTS: git-show-backports: make -m most only show the left branch
+ - SCRIPTS: git-show-backports: emit the shell command to backport a commit
+ - BUILD: Makefile: require SSL_LIB, SSL_INC to be explicitly set
+ - CI: travis-ci: specify SLZ_LIB, SLZ_INC for travis builds
+ - BUG/MEDIUM: mux-h1: Refresh H1 connection timeout after a synchronous send
+ - CLEANUP: dns: typo in reported error message
+ - BUG/MAJOR: dns: disabled servers through SRV records never recover
+ - BUG/MINOR: spoa-server: fix size_t format printing
+ - DOC: spoa-server: fix false friends `actually`
+ - BUG/MINOR: ssl: fix memory leak at OCSP loading
+ - BUG/MEDIUM: ssl: memory leak of ocsp data at SSL_CTX_free()
+ - BUG/MEDIUM: map/lua: Return an error if a map is loaded during runtime
+ - MINOR: arg: Add an argument type to keep a reference on opaque data
+ - BUG/MINOR: converters: Store the sink in an arg pointer for debug() converter
+ - BUG/MINOR: lua: Duplicate map name to load it when a new Map object is created
+ - BUG/MINOR: arg: Fix leaks during arguments validation for fetches/converters
+ - BUG/MINOR: lua: Check argument type to convert it to IPv4/IPv6 arg validation
+ - BUG/MINOR: lua: Check argument type to convert it to IP mask in arg validation
+ - MINOR: hlua: Don't needlessly copy lua strings in trash during args validation
+ - BUG/MINOR: lua: Duplicate lua strings in sample fetches/converters arg array
+ - MEDIUM: lua: Don't filter exported fetches and converters
+ - MINOR: lua: Add support for userlist as fetches and converters arguments
+ - MINOR: lua: Add support for regex as fetches and converters arguments
+ - MINOR: arg: Use chunk_destroy() to release string arguments
+ - BUG/MINOR: snapshots: leak of snapshots on deinit()
+ - CLEANUP: ssl: ssl_sock_crt2der semicolon and spaces
+ - MINOR: ssl: add ssl_{c,s}_chain_der fetch methods
+ - CLEANUP: fix all duplicated semicolons
+ - BUG/MEDIUM: ssl: fix the ssl-skip-self-issued-ca option
+ - BUG/MINOR: ssl: ssl-skip-self-issued-ca requires >= 1.0.2
+ - BUG/MINOR: stats: use strncmp() instead of memcmp() on health states
+ - BUILD: makefile: don't disable -Wstringop-overflow anymore
+ - BUG/MINOR: ssl: double free w/ smp_fetch_ssl_x_chain_der()
+ - BUG/MEDIUM: htx: smp_prefetch_htx() must always validate the direction
+ - BUG/MEDIUM: ssl: never generates the chain from the verify store
+ - OPTIM: regex: PCRE2 use JIT match when JIT optimisation occured.
+ - BUG/MEDIUM: ssl: does not look for all SNIs before chosing a certificate
+ - CLEANUP: ssl: remove poorly readable nested ternary
+
+2020/07/31 : 2.3-dev2
+ - DOC: ssl: req_ssl_sni needs implicit TLS
+ - BUG/MEDIUM: arg: empty args list must be dropped
+ - BUG/MEDIUM: resolve: fix init resolving for ring and peers section.
+ - BUG/MAJOR: tasks: don't requeue global tasks into the local queue
+ - MINOR: tasks/debug: make the thread affinity BUG_ON check a bit stricter
+ - MINOR: tasks/debug: add a few BUG_ON() to detect use of wrong timer queue
+ - MINOR: tasks/debug: add a BUG_ON() check to detect requeued task on free
+ - BUG/MAJOR: dns: Make the do-resolve action thread-safe
+ - BUG/MEDIUM: dns: Release answer items when a DNS resolution is freed
+ - MEDIUM: htx: Add a flag on a HTX message when no more data are expected
+ - BUG/MEDIUM: stream-int: Don't set MSG_MORE flag if no more data are expected
+ - BUG/MEDIUM: http-ana: Only set CF_EXPECT_MORE flag on data filtering
+ - CLEANUP: dns: remove 45 "return" statements from dns_validate_dns_response()
+ - BUG/MINOR: htx: add two missing HTX_FL_EOI and remove an unexpected one
+ - BUG/MINOR: mux-fcgi: Don't url-decode the QUERY_STRING parameter anymore
+ - BUILD: tools: fix build with static only toolchains
+ - DOC: Use gender neutral language
+ - BUG/MINOR: debug: Don't dump the lua stack if it is not initialized
+ - BUG/MAJOR: dns: fix null pointer dereference in snr_update_srv_status
+ - BUG/MAJOR: dns: don't treat Authority records as an error
+ - CI : travis-ci : prepare for using stock OpenSSL
+ - CI: travis-ci : switch to stock openssl when openssl-1.1.1 is used
+ - MEDIUM: lua: Add support for the Lua 5.4
+ - BUG/MEDIUM: dns: Don't yield in do-resolve action on a final evaluation
+ - BUG/MINOR: lua: Abort execution of actions that yield on a final evaluation
+ - MINOR: tcp-rules: Return an internal error if an action yields on a final eval
+ - BUG/MINOR: tcp-rules: Preserve the right filter analyser on content eval abort
+ - BUG/MINOR: tcp-rules: Set the inspect-delay when a tcp-response action yields
+ - MEDIUM: tcp-rules: Use a dedicated expiration date for tcp ruleset
+ - MEDIUM: lua: Set the analyse expiration date with smaller wake_time only
+ - BUG/MEDIUM: connection: Be sure to always install a mux for sync connect
+ - MINOR: connection: Preinstall the mux for non-ssl connect
+ - MINOR: stream-int: Be sure to have a mux to do sends and receives
+ - BUG/MINOR: lua: Fix a possible null pointer deref on lua ctx
+ - SCRIPTS: announce-release: add the link to the wiki in the announce messages
+ - CI: travis-ci: use better name for Coverity scan job
+ - CI: travis-ci: use proper linking flags for SLZ build
+ - BUG/MEDIUM: backend: always attach the transport before installing the mux
+ - BUG/MEDIUM: tcp-checks: always attach the transport before installing the mux
+ - MINOR: connection: avoid a useless recvfrom() on outgoing connections
+ - MINOR: mux-h1: do not even try to receive if the connection is not fully set up
+ - MINOR: mux-h1: do not try to receive on backend before sending a request
+ - CLEANUP: assorted typo fixes in the code and comments
+ - BUG/MEDIUM: ssl: check OCSP calloc in ssl_sock_load_ocsp()
+
+2020/07/17 : 2.3-dev1
+ - MINOR: config: make strict limits enabled by default
+ - BUG/MINOR: acl: Fix freeing of expr->smp in prune_acl_expr
+ - BUG/MINOR: sample: Fix freeing of conv_exprs in release_sample_expr
+ - BUG/MINOR: haproxy: Free proxy->format_unique_id during deinit
+ - BUG/MINOR: haproxy: Add missing free of server->(hostname|resolvers_id)
+ - BUG/MINOR: haproxy: Free proxy->unique_id_header during deinit
+ - BUG/MINOR: haproxy: Free srule->file during deinit
+ - BUG/MINOR: haproxy: Free srule->expr during deinit
+ - BUG/MINOR: sample: Free str.area in smp_check_const_bool
+ - BUG/MINOR: sample: Free str.area in smp_check_const_meth
+ - CLEANUP: haproxy: Free proxy_deinit_list in deinit()
+ - CLEANUP: haproxy: Free post_deinit_list in deinit()
+ - CLEANUP: haproxy: Free server_deinit_list in deinit()
+ - CLEANUP: haproxy: Free post_server_check_list in deinit()
+ - CLEANUP: Add static void vars_deinit()
+ - CLEANUP: Add static void hlua_deinit()
+ - CLEANUP: contrib/prometheus-exporter: typo fixes for ssl reuse metric
+ - BUG/MEDIUM: lists: add missing store barrier on MT_LIST_BEHEAD()
+ - BUG/MEDIUM: lists: add missing store barrier in MT_LIST_ADD/MT_LIST_ADDQ
+ - MINOR: tcp: Support TCP keepalive parameters customization
+ - BUILD: tcp: condition TCP keepalive settings to platforms providing them
+ - MINOR: lists: rename some MT_LIST operations to clarify them
+ - MINOR: buffer: use MT_LIST_ADDQ() for buffer_wait lists additions
+ - MINOR: connection: use MT_LIST_ADDQ() to add connections to idle lists
+ - MINOR: tasks: use MT_LIST_ADDQ() when killing tasks.
+ - CONTRIB: da: fix memory leak in dummy function da_atlas_open()
+ - CI: travis-ci: speed up osx build by running brew scripted, switch to latest osx image
+ - BUG/MEDIUM: mux-h2: Don't add private connections in available connection list
+ - BUG/MEDIUM: mux-fcgi: Don't add private connections in available connection list
+ - MINOR: connection: Set the SNI on server connections before installing the mux
+ - MINOR: connection: Set new connection as private on reuse never
+ - MINOR: connection: Add a wrapper to mark a connection as private
+ - MEDIUM: connection: Add private connections synchronously in session server list
+ - MINOR: connection: Use a dedicated function to look for a session's connection
+ - MINOR: connection: Set the conncetion target during its initialisation
+ - MINOR: session: Take care to decrement idle_conns counter in session_unown_conn
+ - MINOR: server: Factorize code to deal with reuse of server idle connections
+ - MINOR: server: Factorize code to deal with connections removed from an idle list
+ - CLEANUP: connection: remove unused field idle_time from the connection struct
+ - BUG/MEDIUM: mux-h1: Continue to process request when switching in tunnel mode
+ - MINOR: raw_sock: Report the number of bytes emitted using the splicing
+ - MINOR: contrib/prometheus-exporter: Add missing global and per-server metrics
+ - MINOR: backend: Add sample fetches to get the server's weight
+ - BUG/MINOR: mux-fcgi: Handle empty STDERR record
+ - BUG/MINOR: mux-fcgi: Set conn state to RECORD_P when skipping the record padding
+ - BUG/MINOR: mux-fcgi: Set flags on the right stream field for empty FCGI_STDOUT
+ - BUG/MINOR: backend: fix potential null deref on srv_conn
+ - BUG/MEDIUM: log: issue mixing sampled to not sampled log servers.
+ - MEDIUM: udp: adds minimal proto udp support for message listeners.
+ - MEDIUM: log/sink: re-work and merge of build message API.
+ - MINOR: log: adds syslog udp message handler and parsing.
+ - MEDIUM: log: adds log forwarding section.
+ - MINOR: log: adds counters on received syslog messages.
+ - BUG/MEDIUM: fcgi-app: fix memory leak in fcgi_flt_http_headers
+ - BUG/MEDIUM: server: resolve state file handle leak on reload
+ - BUG/MEDIUM: server: fix possibly uninitialized state file on close
+ - BUG/MEDIUM: channel: Be aware of SHUTW_NOW flag when output data are peeked
+ - BUILD: config: address build warning on raspbian+rpi4
+ - BUG/MAJOR: tasks: make sure to always lock the shared wait queue if needed
+ - BUILD: config: fix again bugs gcc warnings on calloc
+
+2020/07/07 : 2.3-dev0
+ - [RELEASE] Released version 2.3-dev0
+ - MINOR: version: back to development, update status message
+
+2020/07/07 : 2.3-dev0
+ - exact copy of 2.2.0
+
+2020/07/07 : 2.2.0
+ - BUILD: mux-h2: fix typo breaking build when using DEBUG_LOCK
+ - CLEANUP: makefile: update the outdated list of DEBUG_xxx options
+ - BUILD: tools: make resolve_sym_name() return a const
+ - CLEANUP: auth: fix useless self-include of auth-t.h
+ - BUILD: tree-wide: cast arguments to tolower/toupper to unsigned char
+ - CLEANUP: assorted typo fixes in the code and comments
+ - WIP/MINOR: ssl: add sample fetches for keylog in frontend
+ - DOC: fix tune.ssl.keylog sample fetches array
+ - BUG/MINOR: ssl: check conn in keylog sample fetch
+ - DOC: configuration: various typo fixes
+ - MINOR: log: Remove unused case statement during the log-format string parsing
+ - BUG/MINOR: mux-h1: Fix the splicing in TUNNEL mode
+ - BUG/MINOR: mux-h1: Don't read data from a pipe if the mux is unable to receive
+ - BUG/MINOR: mux-h1: Disable splicing only if input data was processed
+ - BUG/MEDIUM: mux-h1: Disable splicing for the conn-stream if read0 is received
+ - MINOR: mux-h1: Improve traces about the splicing
+ - BUG/MINOR: backend: Remove CO_FL_SESS_IDLE if a client remains on the last server
+ - BUG/MEDIUM: connection: Don't consider new private connections as available
+ - BUG/MINOR: connection: See new connection as available only on reuse always
+ - DOC: configuration: remove obsolete mentions of H2 being converted to HTTP/1.x
+ - CLEANUP: ssl: remove unrelevant comment in smp_fetch_ssl_x_keylog()
+ - DOC: update INSTALL with new compiler versions
+ - DOC: minor update to coding style file
+ - MINOR: version: mention that it's an LTS release now
+
+2020/07/04 : 2.2-dev12
+ - BUG/MINOR: mux_h2: don't lose the leaving trace in h2_io_cb()
+ - MINOR: cli: make "show sess" stop at the last known session
+ - CLEANUP: buffers: remove unused buffer_wq_lock lock
+ - BUG/MEDIUM: buffers: always allocate from the local cache first
+ - MINOR: connection: align toremove_{lock,connections} and cleanup into idle_conns
+ - CONTRIB: debug: add missing flags SI_FL_L7_RETRY & SI_FL_D_L7_RETRY
+ - BUG/MEDIUM: connections: Don't increase curr_used_conns for shared connections.
+ - BUG/MEDIUM: checks: Increment the server's curr_used_conns
+ - REORG: buffer: rename buffer.c to dynbuf.c
+ - REORG: includes: create tinfo.h for the thread_info struct
+ - CLEANUP: pool: only include the type files from types
+ - MINOR: pools: move the LRU cache heads to thread_info
+ - BUG/MINOR: debug: fix "show fd" null-deref when built with DEBUG_FD
+ - MINOR: stats: add 3 new output values for the per-server idle conn state
+ - MINOR: activity: add per-thread statistics on FD takeover
+ - BUG/MINOR: server: start cleaning idle connections from various points
+ - MEDIUM: server: improve estimate of the need for idle connections
+ - MINOR: stats: add the estimated need of concurrent connections per server
+ - BUG/MINOR: threads: Don't forget to init each thread toremove_lock.
+ - BUG/MEDIUM: lists: Lock the element while we check if it is in a list.
+ - Revert "BUG/MEDIUM: lists: Lock the element while we check if it is in a list."
+ - BUG/MINOR: haproxy: don't wake already stopping threads on exit
+ - BUG/MINOR: server: always count one idle slot for current thread
+ - MEDIUM: server: use the two thresholds for the connection release algorithm
+ - BUG/MINOR: http-rules: Fix ACLs parsing for http deny rules
+ - BUG/MINOR: sched: properly cover for a rare MT_LIST_ADDQ() race
+ - MINOR: mux-h1: avoid taking the toremove_lock in on dying tasks
+ - MINOR: mux-h2: avoid taking the toremove_lock in on dying tasks
+ - MINOR: mux-fcgi: avoid taking the toremove_lock in on dying tasks
+ - MINOR: pools: increase MAX_BASE_POOLS to 64
+ - DOC: ssl: add "allow-0rtt" and "ciphersuites" in crt-list
+ - BUG/MEDIUM: pattern: Add a trailing \0 to match strings only if possible
+ - BUG/MEDIUM: log-format: fix possible endless loop in parse_logformat_string()
+ - BUG/MINOR: proxy: fix dump_server_state()'s misuse of the trash
+ - BUG/MINOR: proxy: always initialize the trash in show servers state
+ - MINOR: cli/proxy: add a new "show servers conn" command
+ - MINOR: server: skip servers with no idle conns earlier
+ - BUG/MINOR: server: fix the connection release logic regarding nearly full conditions
+ - MEDIUM: server: add a new pool-low-conn server setting
+ - BUG/MEDIUM: backend: always search in the safe list after failing on the idle one
+ - MINOR: backend: don't always takeover from the same threads
+ - MINOR: sched: make sched->task_list_size atomic
+ - MEDIUM: sched: create a new TASK_KILLED task flag
+ - MEDIUM: sched: implement task_kill() to kill a task
+ - MEDIUM: mux-h1: use task_kill() during h1_takeover() instead of task_wakeup()
+ - MEDIUM: mux-h2: use task_kill() during h2_takeover() instead of task_wakeup()
+ - MEDIUM: mux-fcgi: use task_kill() during fcgi_takeover() instead of task_wakeup()
+ - MINOR: list: Add MT_LIST_DEL_SAFE_NOINIT() and MT_LIST_ADDQ_NOCHECK()
+ - CLEANUP: connections: rename the toremove_lock to takeover_lock
+ - MEDIUM: connections: Don't use a lock when moving connections to remove.
+ - DOC: configuration: add missing index entries for tune.pool-{low,high}-fd-ratio
+ - DOC: configuration: fix alphabetical ordering for tune.pool-{high,low}-fd-ratio
+ - MINOR: config: add a new tune.idle-pool.shared global setting.
+ - MINOR: 51d: silence a warning about null pointer dereference
+ - MINOR: debug: add a new "debug dev memstats" command
+ - MINOR: log-format: allow to preserve spacing in log format strings
+ - BUILD: debug: avoid build warnings with DEBUG_MEM_STATS
+ - BUG/MAJOR: sched: make sure task_kill() always queues the task
+ - BUG/MEDIUM: muxes: Make sure nobody stole the connection before using it.
+ - BUG/MEDIUM: cli/proxy: don't try to dump idle connection state if there's none
+ - BUILD: haproxy: fix build error when RLIMIT_AS is not set
+ - BUG/MAJOR: sched: make it work also when not building with DEBUG_STRICT
+ - MINOR: log: add time second fraction field to rfc5424 log timestamp.
+ - BUG/MINOR: log: missing timezone on iso dates.
+ - BUG/MEDIUM: server: don't kill all idle conns when there are not enough
+ - MINOR: sched: split tasklet_wakeup() into tasklet_wakeup_on()
+ - BUG/MEDIUM: connections: Set the tid for the old tasklet on takeover.
+ - BUG/MEDIUM: connections: Let the xprt layer know a takeover happened.
+ - BUG/MINOR: http_act: don't check capture id in backend (2)
+ - BUILD: makefile: disable threads by default on OpenBSD
+ - BUILD: peers: fix build warning with gcc 4.2.1
+ - CI: cirrus-ci: exclude slow reg-tests
+
+2020/06/26 : 2.2-dev11
+ - REGTEST: Add a simple script to tests errorfile directives in proxy sections
+ - BUG/MEDIUM: fcgi-app: Resolve the sink if a fcgi-app logs in a ring buffer
+ - BUG/MINOR: spoe: correction of setting bits for analyzer
+ - BUG/MINOR: cfgparse: Support configurations without newline at EOF
+ - MINOR: cfgparse: Warn on truncated lines / files
+ - BUG/MINOR: http_ana: clarify connection pointer check on L7 retry
+ - MINOR: debug: add a new DEBUG_FD build option
+ - BUG/MINOR: tasks: make sure never to exceed max_processed
+ - MINOR: task: add a new pointer to current tasklet queue
+ - BUG/MEDIUM: task: be careful not to run too many tasks at TL_URGENT
+ - BUG/MINOR: cfgparse: Fix argument reference in PARSE_ERR_TOOMANY message
+ - BUG/MINOR: cfgparse: Fix calculation of position for PARSE_ERR_TOOMANY message
+ - BUG/MEDIUM: ssl: fix ssl_bind_conf double free
+ - MINOR: ssl: free bind_conf_node in crtlist_free()
+ - MINOR: ssl: free the crtlist and the ckch during the deinit()
+ - BUG/MINOR: ssl: fix build with ckch_deinit() and crtlist_deinit()
+ - BUG/MINOR: ssl/cli: certs added from the CLI can't be deleted
+ - MINOR: ssl: move the ckch/crtlist deinit to ssl_sock.c
+ - MEDIUM: tasks: apply a fair CPU distribution between tasklet classes
+ - MINOR: tasks: make current_queue an index instead of a pointer
+ - MINOR: tasks: add a mask of the queues with active tasklets
+ - MINOR: tasks: pass the queue index to run_task_from_list()
+ - MINOR: tasks: make run_tasks_from_lists() scan the queues itself
+ - MEDIUM: tasks: add a tune.sched.low-latency option
+ - BUG/MEDIUM: ssl/cli: 'commit ssl cert' crashes when no private key
+ - BUG/MINOR: cfgparse: don't increment linenum on incomplete lines
+ - MINOR: tools: make parse_line() always terminate the args list
+ - BUG/MINOR: cfgparse: report extraneous args *after* the string is allocated
+ - MINOR: cfgparse: sanitize the output a little bit
+ - MINOR: cli/ssl: handle trailing slashes in crt-list commands
+ - MINOR: ssl: add the ssl_s_* sample fetches for server side certificate
+ - BUG/MEDIUM: http-ana: Don't loop trying to generate a malformed 500 response
+ - BUG/MINOR: stream-int: Don't wait to send truncated HTTP messages
+ - BUG/MINOR: http-ana: Set CF_EOI on response channel for generated responses
+ - BUG/MINOR: http-ana: Don't wait to send 1xx responses generated by HAProxy
+ - MINOR: spoe: Don't systematically create new applets if processing rate is low
+ - DOC: fix some typos in the ssl_s_{s|i}_dn documentation
+ - BUILD: fix ssl_sample.c when building against BoringSSL
+ - CI: travis-ci: switch BoringSSL builds to ninja
+ - CI: extend spellchecker whitelist
+ - DOC: assorted typo fixes in the documentation
+ - CLEANUP: assorted typo fixes in the code and comments
+ - MINOR: http: Add support for http 413 status
+ - REGTEST: ssl: tests the ssl_f_* sample fetches
+ - REGTEST: ssl: add some ssl_c_* sample fetches test
+ - DOC: ssl: update the documentation of "commit ssl cert"
+ - BUG/MINOR: cfgparse: correctly deal with empty lines
+ - BUG/MEDIUM: fetch: Fix hdr_ip misparsing IPv4 addresses due to missing NUL
+
+2020/06/19 : 2.2-dev10
+ - BUILD: include: add sys/types before netinet/tcp.h
+ - BUG/MEDIUM: log: don't hold the log lock during writev() on a file descriptor
+ - BUILD: Remove nowarn for warnings that do not trigger
+ - BUG/MEDIUM: pattern: fix thread safety of pattern matching
+ - BUILD: Re-enable -Wimplicit-fallthrough
+ - BUG/MINOR: ssl: fix ssl-{min,max}-ver with openssl < 1.1.0
+ - BUILD: thread: add parenthesis around values of locking macros
+ - BUILD: proto_uxst: shut up yet another gcc's absurd warning
+ - BUG/MEDIUM: checks: Fix off-by-one in allocation of SMTP greeting cmd
+ - CI: travis-ci: use "-O1" for clang builds
+ - MINOR: haproxy: Add void deinit_and_exit(int)
+ - MINOR: haproxy: Make use of deinit_and_exit() for clean exits
+ - BUG/MINOR: haproxy: Free rule->arg.vars.expr during deinit_act_rules
+ - BUILD: compression: make gcc 10 happy with free_zlib()
+ - BUILD: atomic: add string.h for memcpy() on ARM64
+ - BUG/MINOR: http: make smp_fetch_body() report that the contents may change
+ - BUG/MINOR: tcp-rules: tcp-response must check the buffer's fullness
+ - BUILD: haproxy: mark deinit_and_exit() as noreturn
+ - BUG/MAJOR: vars: Fix bogus free() during deinit() for http-request rules
+ - BUG/MEDIUM: ebtree: use a byte-per-byte memcmp() to compare memory blocks
+ - MINOR: tools: add a new configurable line parse, parse_line()
+ - BUG/MEDIUM: cfgparse: use parse_line() to expand/unquote/unescape config lines
+ - BUG/MEDIUM: cfgparse: stop after a reasonable amount of fatal error
+ - MINOR: http: do not close connections anymore after internal responses
+ - BUG/MINOR: cfgparse: Add missing fatal++ in PARSE_ERR_HEX case
+ - BUG/MINOR: spoe: add missing key length check before checking key names
+ - MINOR: version: put the compiler version output into version.c not haproxy.c
+ - MINOR: compiler: always define __has_feature()
+ - MINOR: version: report the presence of the compiler's address sanitizer
+ - BUILD: Fix build by including haproxy/global.h
+ - BUG/MAJOR: connection: always disable ready events once reported
+ - CLEANUP: activity: remove unused counter fd_lock
+ - DOC: fd: make it clear that some fields ordering must absolutely be respected
+ - MINOR: activity: report the number of times poll() reports I/O
+ - MINOR: activity: rename confusing poll_* fields in the output
+ - MINOR: fd: Fix a typo in a coment.
+ - BUG/MEDIUM: fd: Don't fd_stop_recv() a fd we don't own.
+ - BUG/MEDIUM: fd: Call fd_stop_recv() when we just got a fd.
+ - MINOR: activity: group the per-loop counters at the top
+ - MINOR: activity: rename the "stream" field to "stream_calls"
+ - MEDIUM: fd: refine the fd_takeover() migration lock
+ - MINOR: fd: slightly optimize the fd_takeover double-CAS loop
+ - MINOR: fd: factorize the fd_takeover() exit path to make it safer
+ - MINOR: peers: do not use localpeer as an array anymore
+ - MEDIUM: peers: add the "localpeer" global option
+ - MEDIUM: fd: add experimental support for edge-triggered polling
+ - CONTRIB: debug: add the missing flags CO_FL_SAFE_LIST and CO_FL_IDLE_LIST
+ - MINOR: haproxy: process signals before runnable tasks
+ - MEDIUM: tasks: clean up the front side of the wait queue in wake_expired_tasks()
+ - MEDIUM: tasks: also process late wakeups in process_runnable_tasks()
+ - BUG/MINOR: cli: allow space escaping on the CLI
+ - BUG/MINOR: mworker/cli: fix the escaping in the master CLI
+ - BUG/MINOR: mworker/cli: fix semicolon escaping in master CLI
+ - REGTEST: http-rules: test spaces in ACLs
+ - REGTEST: http-rules: test spaces in ACLs with master CLI
+ - BUG/MAJOR: init: properly compute the default global.maxpipes value
+ - MEDIUM: map: make the "clear map" operation yield
+ - BUG/MEDIUM: stream-int: fix loss of CO_SFL_MSG_MORE flag in forwarding
+ - MINOR: mux_h1: Set H1_F_CO_MSG_MORE if we know we have more to send.
+ - BUG/MINOR: systemd: Wait for network to be online
+ - DOC: configuration: Unindent non-code sentences in the protobuf example
+ - DOC: configuration: http-check send was missing from matrix
+
+2020/06/11 : 2.2-dev9
+ - BUG/MINOR: http-htx: Don't forget to release the http reply in release function
+ - BUG/MINOR: http-htx: Fix a leak on error path during http reply parsing
+ - MINOR: checks: Remove dead code from process_chk_conn()
+ - REGTESTS: checks: Fix tls_health_checks when IPv6 addresses are used
+ - REGTESTS: Add missing OPENSSL to REQUIRE_OPTIONS for lua/txn_get_priv
+ - MINOR: lua: Use vars_unset_by_name_ifexist()
+ - CLEANUP: vars: Remove void vars_unset_by_name(const char*, size_t, struct sample*)
+ - MINOR: vars: Make vars_(un|)set_by_name(_ifexist|) return a success value
+ - MINOR: lua: Make `set_var()` and `unset_var()` return success
+ - MEDIUM: lua: Add `ifexist` parameter to `set_var`
+ - MEDIUM: ring: new section ring to declare custom ring buffers.
+ - REGTESTS: Add missing OPENSSL to REQUIRE_OPTIONS for compression/lua_validation
+ - REGTESTS: Require the version 2.2 to execute lua/set_var
+ - BUG/MEDIUM: checks: Refresh the conn-stream and the connection after a connect
+ - MINOR: checks: Remove useless tests on the connection and conn-stream
+ - BUG/MEDIUM: contrib/spoa: do not register python3.8 if --embed fail
+ - BUG/MEDIUM: connection: Ignore PP2 unique ID for stream-less connections
+ - BUG/MINOR: connection: Always get the stream when available to send PP2 line
+ - BUG/MEDIUM: backend: set the connection owner to the session when using alpn.
+ - MINOR: pools: compute an estimate of each pool's average needed objects
+ - MEDIUM: pools: directly free objects when pools are too much crowded
+ - REGTEST: Add connection/proxy_protocol_send_unique_id_alpn
+ - MINOR: http-ana: Make the function http_reply_to_htx() public
+ - MINOR: http-ana: Use proxy's error replies to emit 401/407 responses
+ - MINOR: http-rules: Use an action function to eval http-request auth rules
+ - CLEANUP: http: Remove unused HTTP message templates
+ - BUG/MEDIUM: checks: Don't blindly subscribe for receive if waiting for connect
+ - MINOR: checks: I/O callback function only rely on the data layer wake callback
+ - BUG/MINOR: lua: Add missing string length for lua sticktable lookup
+ - BUG/MEDIUM: logs: fix trailing zeros on log message.
+ - CI: cirrus-ci: skip reg-tests/connection/proxy_protocol_send_unique_id_alpn.vtc on CentOS 6
+ - BUG/MINOR: nameservers: fix error handling in parsing of resolv.conf
+ - BUG/MEDIUM: checks: Don't add a tcpcheck ruleset twice in the shared tree
+ - MEDIUM: ssl: use TLSv1.2 as the minimum default on bind lines
+ - CLEANUP: pools: use the regular lock for the flush operation on lockless pools
+ - SCRIPTS: publish-release: pass -n to gzip to remove timestamp
+ - MINOR: ring: re-work ring attach generic API.
+ - BUG/MINOR: error on unknown statement in ring section.
+ - MEDIUM: ring: add server statement to forward messages from a ring
+ - MEDIUM: ring: add new srv statement to support octet counting forward
+ - MINOR: ssl: set ssl-min-ver in ambiguous configurations
+ - CLEANUP: ssl: remove comment from dump_crtlist_sslconf()
+ - BUILD: sink: address build warning on 32-bit architectures
+ - BUG/MINOR: peers: fix internal/network key type mapping.
+ - CLEANUP: regex: remove outdated support for regex actions
+ - Revert "MINOR: ssl: rework add cert chain to CTX to be libssl independent"
+ - MINOR: mux-h1/proxy: Add a proxy option to disable clear h2 upgrade
+ - BUG/MEDIUM: lua: Reset analyse expiration timeout before executing a lua action
+ - DOC: add a line about comments in crt-list
+ - BUG/MEDIUM: hlua: Lock pattern references to perform set/add/del operations
+ - BUG/MINOR: checks: Fix test on http-check rulesets during config validity check
+ - BUG/MEDIUM: contrib/prometheus-exporter: Properly set flags to dump metrics
+ - BUG/MEDIUM: mworker: fix the copy of options in copy_argv()
+ - BUG/MINOR: init: -x can have a parameter starting with a dash
+ - BUG/MINOR: init: -S can have a parameter starting with a dash
+ - BUG/MEDIUM: mworker: fix the reload with an -- option
+ - BUG/MINOR: ssl: fix a trash buffer leak in some error cases
+ - BUG/MINOR: mworker: fix a memleak when execvp() failed
+ - MINOR: sample: Add secure_memcmp converter
+ - REORG: ebtree: move the C files from ebtree/ to src/
+ - REORG: ebtree: move the include files from ebtree to include/import/
+ - REORG: ebtree: clean up remains of the ebtree/ directory
+ - REORG: include: create new file haproxy/api-t.h
+ - REORG: include: create new file haproxy/api.h
+ - REORG: include: update all files to use haproxy/api.h or api-t.h if needed
+ - CLEANUP: include: remove common/config.h
+ - CLEANUP: include: remove unused template.h
+ - REORG: include: move MIN/MAX from tools.h to compat.h
+ - REORG: include: move SWAP/MID_RANGE/MAX_RANGE from tools.h to standard.h
+ - CLEANUP: include: remove unused common/tools.h
+ - REORG: include: move the base files from common/ to haproxy/
+ - REORG: include: move version.h to haproxy/
+ - REORG: include: move base64.h, errors.h and hash.h from common to to haproxy/
+ - REORG: include: move openssl-compat.h from common/ to haproxy/
+ - REORG: include: move ist.h from common/ to import/
+ - REORG: include: move the BUG_ON() code to haproxy/bug.h
+ - REORG: include: move debug.h from common/ to haproxy/
+ - CLEANUP: debug: drop unused function p_malloc()
+ - REORG: include: split buf.h into haproxy/buf-t.h and haproxy/buf.h
+ - REORG: include: move istbuf.h to haproxy/
+ - REORG: include: split mini-clist into haproxy/list and list-t.h
+ - REORG: threads: extract atomic ops from hathreads.h
+ - CLEANUP: threads: remove a few needless includes of hathreads.h
+ - REORG: include: split hathreads into haproxy/thread.h and haproxy/thread-t.h
+ - CLEANUP: thread: rename __decl_hathreads() to __decl_thread()
+ - REORG: include: move time.h from common/ to haproxy/
+ - REORG: include: move integer manipulation functions from standard.h to intops.h
+ - CLEANUP: include: remove excessive includes of common/standard.h
+ - REORG: include: move freq_ctr to haproxy/
+ - CLEANUP: pool: include freq_ctr.h and remove locally duplicated functions
+ - REORG: memory: move the pool type definitions to haproxy/pool-t.h
+ - REORG: memory: move the OS-level allocator to haproxy/pool-os.h
+ - MINOR: memory: don't let __pool_get_first() pick from the cache
+ - MEDIUM: memory: don't let pool_put_to_cache() free the objects itself
+ - MINOR: memory: move pool-specific path of the locked pool_free() to __pool_free()
+ - MEDIUM: memory: make local pools independent on lockless pools
+ - REORG: include: move common/memory.h to haproxy/pool.h
+ - REORG: include: move common/chunk.h to haproxy/chunk.h
+ - REORG: include: move activity to haproxy/
+ - REORG: include: move common/buffer.h to haproxy/dynbuf{,-t}.h
+ - REORG: include: move common/net_helper.h to haproxy/net_helper.h
+ - REORG: include: move common/namespace.h to haproxy/namespace{,-t}.h
+ - REORG: include: split common/regex.h into haproxy/regex{,-t}.h
+ - REORG: include: split common/xref.h into haproxy/xref{,-t}.h
+ - REORG: include: move common/ticks.h to haproxy/ticks.h
+ - REORG: include: split common/http.h into haproxy/http{,-t}.h
+ - REORG: include: split common/http-hdr.h into haproxy/http-hdr{,-t}.h
+ - REORG: include: move common/h1.h to haproxy/h1.h
+ - REORG: include: split common/htx.h into haproxy/htx{,-t}.h
+ - REORG: include: move hpack*.h to haproxy/ and split hpack-tbl
+ - REORG: include: move common/h2.h to haproxy/h2.h
+ - REORG: include: move common/fcgi.h to haproxy/
+ - REORG: include: move protocol.h to haproxy/protocol{,-t}.h
+ - REORG: tools: split common/standard.h into haproxy/tools{,-t}.h
+ - REORG: include: move dict.h to hparoxy/dict{,-t}.h
+ - REORG: include: move shctx to haproxy/shctx{,-t}.h
+ - REORG: include: move port_range.h to haproxy/port_range{,-t}.h
+ - REORG: include: move fd.h to haproxy/fd{,-t}.h
+ - REORG: include: move ring to haproxy/ring{,-t}.h
+ - REORG: include: move sink.h to haproxy/sink{,-t}.h
+ - REORG: include: move pipe.h to haproxy/pipe{,-t}.h
+ - CLEANUP: include: remove empty raw_sock.h
+ - REORG: include: move proto_udp.h to haproxy/proto_udp{,-t}.h
+ - REORG: include: move proto/proto_sockpair.h to haproxy/proto_sockpair.h
+ - REORG: include: move compression.h to haproxy/compression{,-t}.h
+ - REORG: include: move h1_htx.h to haproxy/h1_htx.h
+ - REORG: include: move http_htx.h to haproxy/http_htx{,-t}.h
+ - REORG: include: move hlua.h to haproxy/hlua{,-t}.h
+ - REORG: include: move hlua_fcn.h to haproxy/hlua_fcn.h
+ - REORG: include: move action.h to haproxy/action{,-t}.h
+ - REORG: include: move arg.h to haproxy/arg{,-t}.h
+ - REORG: include: move auth.h to haproxy/auth{,-t}.h
+ - REORG: include: move dns.h to haproxy/dns{,-t}.h
+ - REORG: include: move flt_http_comp.h to haproxy/
+ - REORG: include: move counters.h to haproxy/counters-t.h
+ - REORG: include: split mailers.h into haproxy/mailers{,-t}.h
+ - REORG: include: move capture.h to haproxy/capture{,-t}.h
+ - REORG: include: move frontend.h to haproxy/frontend.h
+ - REORG: include: move obj_type.h to haproxy/obj_type{,-t}.h
+ - REORG: include: move http_rules.h to haproxy/http_rules.h
+ - CLEANUP: include: remove unused mux_pt.h
+ - REORG: include: move mworker.h to haproxy/mworker{,-t}.h
+ - REORG: include: move ssl_utils.h to haproxy/ssl_utils.h
+ - REORG: include: move ssl_ckch.h to haproxy/ssl_ckch{,-t}.h
+ - REORG: move ssl_crtlist.h to haproxy/ssl_crtlist{,-t}.h
+ - REORG: include: move lb_chash.h to haproxy/lb_chash{,-t}.h
+ - REORG: include: move lb_fas.h to haproxy/lb_fas{,-t}.h
+ - REORG: include: move lb_fwlc.h to haproxy/lb_fwlc{,-t}.h
+ - REORG: include: move lb_fwrr.h to haproxy/lb_fwrr{,-t}.h
+ - REORG: include: move listener.h to haproxy/listener{,-t}.h
+ - REORG: include: move pattern.h to haproxy/pattern{,-t}.h
+ - REORG: include: move map to haproxy/map{,-t}.h
+ - REORG: include: move payload.h to haproxy/payload.h
+ - REORG: include: move sample.h to haproxy/sample{,-t}.h
+ - REORG: include: move protocol_buffers.h to haproxy/protobuf{,-t}.h
+ - REORG: include: move vars.h to haproxy/vars{,-t}.h
+ - REORG: include: split global.h into haproxy/global{,-t}.h
+ - REORG: include: move task.h to haproxy/task{,-t}.h
+ - REORG: include: move proto_tcp.h to haproxy/proto_tcp.h
+ - REORG: include: move signal.h to haproxy/signal{,-t}.h
+ - REORG: include: move tcp_rules.h to haproxy/tcp_rules.h
+ - REORG: include: move connection.h to haproxy/connection{,-t}.h
+ - REORG: include: move checks.h to haproxy/check{,-t}.h
+ - REORG: include: move http_fetch.h to haproxy/http_fetch.h
+ - REORG: include: move peers.h to haproxy/peers{,-t}.h
+ - REORG: include: move stick_table.h to haproxy/stick_table{,-t}.h
+ - REORG: include: move session.h to haproxy/session{,-t}.h
+ - REORG: include: move trace.h to haproxy/trace{,-t}.h
+ - REORG: include: move acl.h to haproxy/acl.h{,-t}.h
+ - REORG: include: split common/uri_auth.h into haproxy/uri_auth{,-t}.h
+ - REORG: move applet.h to haproxy/applet{,-t}.h
+ - REORG: include: move stats.h to haproxy/stats{,-t}.h
+ - REORG: include: move cli.h to haproxy/cli{,-t}.h
+ - REORG: include: move lb_map.h to haproxy/lb_map{,-t}.h
+ - REORG: include: move ssl_sock.h to haproxy/ssl_sock{,-t}.h
+ - REORG: include: move stream_interface.h to haproxy/stream_interface{,-t}.h
+ - REORG: include: move channel.h to haproxy/channel{,-t}.h
+ - REORG: include: move http_ana.h to haproxy/http_ana{,-t}.h
+ - REORG: include: move filters.h to haproxy/filters{,-t}.h
+ - REORG: include: move fcgi-app.h to haproxy/fcgi-app{,-t}.h
+ - REORG: include: move log.h to haproxy/log{,-t}.h
+ - REORG: include: move proxy.h to haproxy/proxy{,-t}.h
+ - REORG: include: move spoe.h to haproxy/spoe{,-t}.h
+ - REORG: include: move backend.h to haproxy/backend{,-t}.h
+ - REORG: include: move queue.h to haproxy/queue{,-t}.h
+ - REORG: include: move server.h to haproxy/server{,-t}.h
+ - REORG: include: move stream.h to haproxy/stream{,-t}.h
+ - REORG: include: move cfgparse.h to haproxy/cfgparse.h
+ - CLEANUP: hpack: export debug functions and move inlines to .h
+ - REORG: check: move the e-mail alerting code to mailers.c
+ - REORG: check: move tcpchecks away from check.c
+ - REORG: check: move email_alert* from proxy-t.h to mailers-t.h
+ - REORG: check: extract the external checks from check.{c,h}
+ - CLEANUP: include: don't include stddef.h directly
+ - CLEANUP: include: don't include proxy-t.h in global-t.h
+ - CLEANUP: include: move sample_data out of sample-t.h
+ - REORG: include: move the error reporting functions to from log.h to errors.h
+ - BUILD: reorder objects in the Makefile for faster builds
+ - CLEANUP: compiler: add a THREAD_ALIGNED macro and use it where appropriate
+ - CLEANUP: include: make atomic.h part of the base API
+ - REORG: include: move MAX_THREADS to defaults.h
+ - REORG: include: move THREAD_LOCAL and __decl_thread() to compiler.h
+ - CLEANUP: include: tree-wide alphabetical sort of include files
+ - REORG: include: make list-t.h part of the base API
+ - REORG: dgram: rename proto_udp to dgram
+
+2020/05/22 : 2.2-dev8
+ - MINOR: checks: Improve report of unexpected errors for expect rules
+ - MEDIUM: checks: Add matching on log-format string for expect rules
+ - DOC: Fix req.body and co documentation to be accurate
+ - MEDIUM: checks: Remove dedicated sample fetches and use response ones instead
+ - CLEANUP: checks: sort and rename tcpcheck_expect_type types
+ - MINOR: checks: Use dedicated actions to send log-format strings in send rules
+ - MINOR: checks: Simplify matching on HTTP headers in HTTP expect rules
+ - MINOR: checks/sample: Remove unnecessary tests on the sample session
+ - REGTEST: checks: Adapt SSL error message reported when connection is rejected
+ - MINOR: mworker: replace ha_alert by ha_warning when exiting successfuly
+ - MINOR: checks: Support log-format string to set the URI for HTTP send rules
+ - MINOR: checks: Support log-format string to set the body for HTTP send rules
+ - DOC: Be more explicit about configurable check ok/error/timeout status
+ - MINOR: checks: Make matching on HTTP headers for expect rules less obscure
+ - BUG/MEDIUM: lua: Fix dumping of stick table entries for STD_T_DICT
+ - BUG/MINOR: config: Make use_backend and use-server post-parsing less obscur
+ - REGTESTS: make the http-check-send test require version 2.2
+ - BUG/MINOR: http-ana: fix NTLM response parsing again
+ - BUG/MEDIUM: http_ana: make the detection of NTLM variants safer
+ - BUG/MINOR: cfgparse: Abort parsing the current line if an invalid \x sequence is encountered
+ - MINOR: cfgparse: Improve error message for invalid \x sequences
+ - CI: travis-ci: enable arm64 builds again
+ - MEDIUM: ssl: increase default-dh-param to 2048
+ - CI: travis-ci: skip pcre2 on arm64 build
+ - CI: travis-ci: extend the build time for SSL to 60 minutes
+ - CLEANUP: config: drop unused setting CONFIG_HAP_MEM_OPTIM
+ - CLEANUP: config: drop unused setting CONFIG_HAP_INLINE_FD_SET
+ - CLENAUP: config: move CONFIG_HAP_LOCKLESS_POOLS out of config.h
+ - CLEANUP: remove THREAD_LOCAL from config.h
+ - CI: travis-ci: upgrade LibreSSL versions
+ - DOC: assorted typo fixes in the documentation
+ - CI: extend spellchecker whitelist
+ - CLEANUP: assorted typo fixes in the code and comments
+ - MAJOR: contrib: porting spoa_server to support python3
+ - BUG/MEDIUM: checks: Subscribe to I/O events on an unfinished connect
+ - BUG/MINOR: checks: Don't subscribe to I/O events if it is already done
+ - BUG/MINOR: checks: Rely on next I/O oriented rule when waiting for a connection
+ - MINOR: checks: Don't try to send outgoing data if waiting to be able to send
+ - MINOR: sample: Move aes_gcm_dec implementation into sample.c
+ - MINOR: sample: Add digest and hmac converters
+ - BUG/MEDIUM: checks: Subscribe to I/O events only if a mux was installed
+ - BUG/MINOR: sample/ssl: Fix digest converter for openssl < 1.1.0
+ - BUG/MINOR: pools: use %u not %d to report pool stats in "show pools"
+ - BUG/MINOR: pollers: remove uneeded free in global init
+ - CLEANUP: select: enhance readability in init
+ - BUG/MINOR: soft-stop: always wake up waiting threads on stopping
+ - MINOR: soft-stop: let the first stopper only signal other threads
+ - BUILD: select: only declare existing local labels to appease clang
+ - BUG/MEDIUM: streams: Remove SF_ADDR_SET if we're retrying due to L7 retry.
+ - BUG/MEDIUM: stream: Only allow L7 retries when using HTTP.
+ - DOC: retry-on can only be used with mode http
+ - MEDIUM: ssl: allow to register callbacks for SSL/TLS protocol messages
+ - MEDIUM: ssl: split ssl_sock_msgcbk() and use a new callback mechanism
+ - MINOR: ssl: add a new function ssl_sock_get_ssl_object()
+ - MEDIUM: ssl: use ssl_sock_get_ssl_object() in fetchers where appropriate
+ - REORG: ssl: move macros and structure definitions to ssl_sock.h
+ - CLEANUP: ssl: remove the shsess_* macros
+ - REORG: move the crt-list structures in their own .h
+ - REORG: ssl: move the ckch structures to types/ssl_ckch.h
+ - CLEANUP: ssl: add ckch prototypes in proto/ssl_ckch.h
+ - REORG: ssl: move crtlist functions to src/ssl_crtlist.c
+ - CLEANUP: ssl: avoid circular dependencies in ssl_crtlist.h
+ - REORG: ssl: move the ckch_store related functions to src/ssl_ckch.c
+ - REORG: ssl: move ckch_inst functions to src/ssl_ckch.c
+ - REORG: ssl: move the crt-list CLI functions in src/ssl_crtlist.c
+ - REORG: ssl: move the CLI 'cert' functions to src/ssl_ckch.c
+ - REORG: ssl: move ssl configuration to cfgparse-ssl.c
+ - MINOR: ssl: remove static keyword in some SSL utility functions
+ - REORG: ssl: move ssl_sock_ctx and fix cross-dependencies issues
+ - REORG: ssl: move sample fetches to src/ssl_sample.c
+ - REORG: ssl: move utility functions to src/ssl_utils.c
+ - DOC: ssl: update MAINTAINERS file
+ - CI: travis-ci: switch arm64 builds to use openssl from distro
+ - MINOR: stats: Prepare for more accurate moving averages
+ - MINOR: stats: Expose native cum_req metric for a server
+ - MEDIUM: stats: Enable more accurate moving average calculation for stats
+ - BUILD: ssl: include buffer common headers for ssl_sock_ctx
+ - BUILD: ssl: include errno.h in ssl_crtlist.c
+ - CLEANUP: acl: remove unused assignment
+ - DOC/MINOR: halog: Add long help info for ic flag
+ - BUILD: ssl: fix build without OPENSSL_NO_ENGINE
+ - DOC: SPOE is no longer experimental
+ - BUG/MINOR: cache: Don't needlessly test "cache" keyword in parse_cache_flt()
+ - MINOR: config: Don't dump keywords if argument is NULL
+ - MEDIUM: checks: Make post-41 the default mode for mysql checks
+ - BUG/MINOR: logs: prevent double line returns in some events.
+ - MEDIUM: sink: build header in sink_write for log formats
+ - MEDIUM: logs: buffer targets now rely on new sink_write
+ - MEDIUM: sink: add global statement to create a new ring (sink buffer)
+ - MEDIUM: hpack: use a pool for the hpack table
+ - BUG/MAJOR: mux-fcgi: Stop sending loop if FCGI stream is blocked for any reason
+ - BUG/MEDIUM: ring: write-lock the ring while attaching/detaching
+ - MINOR: applet: adopt the wait list entry from the CLI
+ - MINOR: ring: make the applet code not depend on the CLI
+ - Revert "MEDIUM: sink: add global statement to create a new ring (sink buffer)"
+ - CI: travis-ci: fix libslz download URL
+ - MINOR: ssl: split config and runtime variable for ssl-{min,max}-ver
+ - CLEANUP: http_ana: Remove unused TXN flags
+ - BUG/MINOR: http-rules: Mark http return rules as final
+ - MINOR: http-htx: Add http_reply type based on what is used for http return rules
+ - CLEANUP: http-htx: Rename http_error structure into http_error_msg
+ - MINOR: http-rules: Use http_reply structure for http return rules
+ - MINOR: http-htx: Use a dedicated function to release http_reply objects
+ - MINOR: http-htx: Use a dedicated function to parse http reply arguments
+ - MINOR: http-htx: Use a dedicated function to check http reply validity
+ - MINOR: http-ana: Use a dedicated function to send a response from an http reply
+ - MEDIUM: http-rules: Rely on http reply for http deny/tarpit rules
+ - MINOR: http-htx: Store default error messages in a global http reply array
+ - MINOR: http-htx: Store messages of an http-errors section in a http reply array
+ - MINOR: http-htx: Store errorloc/errorfile messages in http replies
+ - MINOR: proxy: Add references on http replies for proxy error messages
+ - MINOR: http-htx: Use http reply from the http-errors section
+ - MINOR: http-ana: Use a TXN flag to prevent after-response ruleset evaluation
+ - MEDIUM: http-ana: Use http replies for HTTP error messages
+ - CLEANUP: http-htx: Remove unused storage of error messages in buffers
+ - MINOR: htx: Add a function to copy a buffer in an HTX message
+ - CLEANUP: channel: Remove channel_htx_copy_msg() function
+ - MINOR: http-ana: Add a function to write an http reply in an HTX message
+ - MINOR: http-htx/proxy: Add http-error directive using http return syntax
+ - DOC: Fix "errorfile" description in the configuration manual
+ - BUG/MINOR: checks: Respect check-ssl param when a port or an addr is specified
+ - BUILD: hpack: make sure the hpack table can still be built standalone
+ - CONTRIB: hpack: make use of the simplified standalone HPACK API
+ - MINOR: connection: add pp2-never-send-local to support old PP2 behavior
+
+2020/05/05 : 2.2-dev7
+ - MINOR: version: Show uname output in display_version()
+ - CI: run weekly OpenSSL "no-deprecated" builds
+ - CLEANUP: log: fix comment of parse_logformat_string()
+ - DOC: Improve documentation on http-request set-src
+ - MINOR: ssl/cli: disallow SSL options for directory in 'add ssl crt-list'
+ - MINOR: ssl/cli: restrain certificate path when inserting into a directory
+ - MINOR: ssl: add ssl-skip-self-issued-ca global option
+ - BUG/MINOR: ssl: default settings for ssl server options are not used
+ - MINOR: config: add a global directive to set default SSL curves
+ - BUG/MEDIUM: http-ana: Handle NTLM messages correctly.
+ - DOC: internals: update the SSL architecture schema
+ - BUG/MINOR: tools: fix the i386 version of the div64_32 function
+ - BUG/MINOR: mux-fcgi/trace: fix wrong set of trace flags in fcgi_strm_add_eom()
+ - BUG/MINOR: http: make url_decode() optionally convert '+' to SP
+ - DOC: option logasap does not depend on mode
+ - MEDIUM: memory: make pool_gc() run under thread isolation
+ - MINOR: contrib: make the peers wireshark dissector a plugin
+ - BUG/MINOR: http-ana: Throw a 500 error if after-response ruleset fails on errors
+ - BUG/MINOR: check: Update server address and port to execute an external check
+ - MINOR: mini-clist: Add functions to iterate backward on a list
+ - MINOR: checks: Add a way to send custom headers and payload during http chekcs
+ - MINOR: server: respect warning and alert semantic
+ - BUG/MINOR: checks: Respect the no-check-ssl option
+ - BUG/MEDIUM: server/checks: Init server check during config validity check
+ - CLEANUP: checks: Don't export anymore init_check and srv_check_healthcheck_port
+ - BUG/MINOR: checks: chained expect will not properly wait for enough data
+ - BUG/MINOR: checks: Forbid tcp-check lines in default section as documented
+ - MINOR: checks: Use an enum to describe the tcp-check rule type
+ - MINOR: checks: Simplify connection flag parsing in tcp-check connect
+ - MEDIUM: checks: rewind to the first inverse expect rule of a chain on new data
+ - MINOR: checks: simplify tcp expect config parser
+ - MINOR: checks: add min-recv tcp-check expect option
+ - MINOR: checks: add linger option to tcp connect
+ - MINOR: checks: define a tcp expect type
+ - MEDIUM: checks: rewrite tcp-check expect block
+ - MINOR: checks: Stop xform buffers to null-terminated string for tcp-check rules
+ - MINOR: checks: add rbinary expect match type
+ - MINOR: checks: Simplify functions to get step id and comment
+ - MEDIUM: checks: capture groups in expect regexes
+ - MINOR: checks: Don't use a static tcp rule list head
+ - MEDIUM: checks: Use a non-comment rule iterator to get next rule
+ - MEDIUM: proxy/checks: Register a keyword to parse tcp-check rules
+ - MINOR: checks: Set the tcp-check rule index during parsing
+ - MINOR: checks: define tcp-check send type
+ - MINOR: checks: define a tcp-check connect type
+ - MEDIUM: checks: Add implicit tcp-check connect rule
+ - MAJOR: checks: Refactor and simplify the tcp-check loop
+ - MEDIUM: checks: Associate a session to each tcp-check healthcheck
+ - MINOR: checks/vars: Add a check scope for variables
+ - MEDIUM: checks: Parse custom action rules in tcp-checks
+ - MINOR: checks: Add support to set-var and unset-var rules in tcp-checks
+ - MINOR: checks: Add the sni option for tcp-check connect rules
+ - MINOR: checks: Add the via-socks4 option for tcp-check connect rules
+ - MINOR: checks: Add the alpn option for tcp-check connect rules
+ - MINOR: ssl: Export a generic function to parse an alpn string
+ - MINOR: checks: Add the default option for tcp-check connect rules
+ - MINOR: checks: Add the addr option for tcp-check connect rule
+ - MEDIUM: checks: Support expression to set the port
+ - MEDIUM: checks: Support log-format strings for tcp-check send rules
+ - MINOR: log: Don't depends on a stream to process samples in log-format string
+ - MINOR: log: Don't systematically set LW_REQ when a sample expr is added
+ - MEDIUM: checks: Add a shared list of tcp-check rules
+ - MINOR: sample: add htonl converter
+ - MINOR: sample: add cut_crlf converter
+ - MINOR: sample: add ltrim converter
+ - MINOR: sample: add rtrim converter
+ - MINOR: checks: Use a name for the healthcheck status enum
+ - MINOR: checks: Add option to tcp-check expect rules to customize error status
+ - MINOR: checks: Merge tcp-check comment rules with the others at config parsing
+ - MINOR: checks: Add a sample fetch to extract a block from the input check buffer
+ - MEDIUM: checks: Add on-error/on-success option on tcp-check expect rules
+ - MEDIUM: checks: Add status-code sample expression on tcp-check expect rules
+ - MINOR: checks: Relax the default option for tcp-check connect rules
+ - MEDIUM: checks: Add a list of vars to set before executing a tpc-check ruleset
+ - MINOR: checks: Export the tcpcheck_eval_ret enum
+ - MINOR: checks: Use dedicated function to handle onsuccess/onerror messages
+ - MINOR: checks: Support custom functions to eval a tcp-check expect rules
+ - MEDIUM: checks: Implement redis check using tcp-check rules
+ - MEDIUM: checks: Implement ssl-hello check using tcp-check rules
+ - MEDIUM: checks: Implement smtp check using tcp-check rules
+ - MEDIUM: checks: Implement postgres check using tcp-check rules
+ - MEDIUM: checks: Implement MySQL check using tcp-check rules
+ - MEDIUM: checks: Implement LDAP check using tcp-check rules
+ - MEDIUM: checks: Implement SPOP check using tcp-check rules
+ - MINOR: server/checks: Move parsing of agent keywords in checks.c
+ - MINOR: server/checks: Move parsing of server check keywords in checks.c
+ - MEDIUM: checks: Implement agent check using tcp-check rules
+ - REGTEST: Adapt regtests about checks to recent changes
+ - MINOR: Produce tcp-check info message for pure tcp-check rules only
+ - MINOR: checks: Add an option to set success status of tcp-check expect rules
+ - MINOR: checks: Improve log message of tcp-checks on success
+ - MINOR: proxy/checks: Move parsing of httpchk option in checks.c
+ - MINOR: proxy/checks: Move parsing of tcp-check option in checks.c
+ - MINOR: proxy/checks: Register a keyword to parse http-check rules
+ - MINOR: proxy/checks: Move parsing of external-check option in checks.c
+ - MINOR: proxy/checks: Register a keyword to parse external-check rules
+ - MEDIUM: checks: Use a shared ruleset to store tcp-check rules
+ - MINOR: checks: Use an indirect string to represent the expect matching string
+ - MINOR: checks: Introduce flags to configure in tcp-check expect rules
+ - MINOR: standard: Add my_memspn and my_memcspn
+ - MINOR: checks: Add a reverse non-comment rule iterator to get last rule
+ - MAJOR: checks: Implement HTTP check using tcp-check rules
+ - MINOR: checks: Make resume conditions more explicit in tcpcheck_main()
+ - MINOR: connection: Add macros to know if a conn or a cs uses an HTX mux
+ - MEDIUM: checks: Refactor how data are received in tcpcheck_main()
+ - MINOR: checks/obj_type: Add a new object type for checks
+ - BUG/MINOR: obj_type: Handle stream object in obj_base_ptr() function
+ - MINOR: checks: Use the check as origin when a session is created
+ - MINOR: checks: Add a mux proto to health-check and tcp-check connect rule
+ - MINOR: connection: Add a function to install a mux for a health-check
+ - MAJOR: checks: Use the best mux depending on the protocol for health checks
+ - MEDIUM: checks: Implement default TCP check using tcp-check rules
+ - MINOR: checks: Remove unused code about pure TCP checks
+ - CLEANUP: checks: Reorg checks.c file to be more readable
+ - REGTEST: Fix reg-tests about health-checks to adapt them to recent changes
+ - MINOR: ist: Add a function to retrieve the ist pointer
+ - MINOR: checks: Use ist API as far as possible
+ - BUG/MEDIUM: checks: Be sure to subscribe for sends if outgoing data remains
+ - MINOR: checks: Use a tree instead of a list to store tcp-check rulesets
+ - BUG/MINOR: checks: Send the right amount of outgoing data for HTTP checks
+ - REGTEST: Add scripts to test based tcp-check health-checks
+ - Revert "MEDIUM: checks: capture groups in expect regexes"
+ - DOC: Add documentation about comments for tcp-check and http-check directives
+ - DOC: Fix the tcp-check and http-check directives layout
+ - BUG/MEDIUM: checks: Use the mux protocol specified on the server line
+ - MINOR: checks: Support mux protocol definition for tcp and http health checks
+ - BUG/MINOR: mux-fcgi: Be sure to have a connection as session's origin to use it
+ - MINOR: checks: Support list of status codes on http-check expect rules
+ - BUG/MEDIUM: checks: Unsubscribe to mux events when a conn-stream is destroyed
+ - REGTEST: Add a script to validate agent checks
+ - BUG/MINOR: server: Fix server_finalize_init() to avoid unused variable
+ - BUG/MEDIUM: checks: unsubscribe for events on the old conn-stream on connect
+ - BUG/MINOR: checks: Only use ssl_sock_is_ssl() if compiled with SSL support
+ - BUG/MINOR: checks/server: use_ssl member must be signed
+ - BUG/MEDIUM: sessions: Always pass the mux context as argument to destroy a mux
+ - BUG/MEDIUM: checks: Destroy the conn-stream before the session
+ - BUG/MINOR: checks: Fix PostgreSQL regex on the authentication packet
+ - CI: cirrus-ci: remove reg-tests/checks/tcp-check-ssl.vtc on CentOS 6
+ - MINOR: checks: Support HTTP/2 version (without '.0') for http-check send rules
+ - MINOR: checks: Use ver keyword to specify the HTTP version for http checks
+ - BUG/MINOR: checks: Remove wrong variable redeclaration
+ - BUG/MINOR: checks: Properly handle truncated mysql server messages
+ - CLEANUP: checks: Remove unused code when ldap server message is parsed
+ - MINOR: checks: Make the use of the check's server more explicit on connect
+ - BUG/MINOR: checks: Avoid incompatible cast when a binary string is parsed
+ - BUG/MINOR: checks: Remove bad call to free() when an expect rule is parsed
+ - BUG/MINOR: checks: Don't lose warning on proxy capability
+ - MINOR: log: Add "Tu" timer
+ - BUG/MINOR: checks: Set the output buffer length before calling parse_binary()
+ - BUG/MEDIUM: mux-h1: make sure we always have a timeout on front connections
+ - REGTEST: ssl: test the client certificate authentication
+ - DOC: give a more accurate description of what check does
+ - BUG/MEDIUM: capture: capture-req/capture-res converters crash without a stream
+ - BUG/MEDIUM: capture: capture.{req,res}.* crash without a stream
+ - BUG/MEDIUM: http: the "http_first_req" sample fetch could crash without a steeam
+ - BUG/MEDIUM: http: the "unique-id" sample fetch could crash without a steeam
+ - CLEANUP: http: add a few comments on certain functions' assumptions about streams
+ - BUG/MEDIUM: sample: make the CPU and latency sample fetches check for a stream
+ - MINOR: http-htx: Export functions to update message authority and host
+ - MINOR: checks: Don't support multiple host header for http-check send rule
+ - MINOR: checks: Skip some headers for http-check send rules
+ - MINOR: checks: Keep the Host header and the request uri synchronized
+ - CLEANUP: checks: Fix checks includes
+ - DOC: Fix send rules in the http-check connect example
+ - DOC: Add more info about request formatting in http-check send description
+ - REGTEST: http-rules: Require PCRE or PCRE2 option to run map_redirect script
+ - REGTEST: ssl: remove curl from the "add ssl crt-list" test
+ - REGTEST: ssl: improve the "set ssl cert" test
+ - CLEANUP: ssl: silence a build warning when threads are disabled
+ - BUG/MEDIUM: listener: mark the thread as not stuck inside the loop
+ - MINOR: threads: export the POSIX thread ID in panic dumps
+ - BUG/MINOR: debug: properly use long long instead of long for the thread ID
+ - BUG/MEDIUM: shctx: really check the lock's value while waiting
+ - BUG/MEDIUM: shctx: bound the number of loops that can happen around the lock
+ - MINOR: stream: report the list of active filters on stream crashes
+ - BUG/MEDIUM: mux-fcgi: Return from detach if server don't keep the connection
+ - BUG/MEDIUM: mux_fcgi: Free the FCGI connection at the end of fcgi_release()
+ - BUG/MEDIUM: mux-fcgi: Fix wrong test on FCGI_CF_KEEP_CONN in fcgi_detach()
+ - BUG/MEDIUM: connections: force connections cleanup on server changes
+ - BUG/MEDIUM: h1: Don't compare host and authority if only h1 headers are parsed
+ - BUG/MEDIUM: ssl: fix the id length check within smp_fetch_ssl_fc_session_id()
+ - CLEANUP: connections: align function declaration
+ - BUG/MINOR: sample: Set the correct type when a binary is converted to a string
+ - MEDIUM: checks/http-fetch: Support htx prefetch from a check for HTTP samples
+ - DOC: Document the log-format parameter for tcp-check send/send-binary rules
+ - MINOR: checks: Add support of payload-based sample fetches
+ - MINOR: checks: Add support of be_id, be_name, srv_id and srv_name sample fetches
+ - MINOR: checks: Add support of server side ssl sample fetches
+ - MINOR: checks: Add support of HTTP response sample fetches
+ - MINOR: http-htx: Support different methods to look for header names
+ - MINOR: checks: Set by default expect rule status to UNKNOWN during parsing
+ - BUG/MINOR: checks: Support multiple HTTP expect rules
+ - REGTEST: checks: Fix sync condition for agent-check
+ - MEDIUM: checks: Support matching on headers for http-check expect rules
+ - MINOR: lua: allow changing port with set_addr
+ - BUG/MINOR: da: Fix HTX message prefetch
+ - BUG/MINOR: wurfl: Fix HTX message prefetch
+ - BUG/MINOR: 51d: Fix HTX message prefetch
+ - MINOR: ist: add istadv() function
+ - MINOR: ist: add istissame() function
+ - MINOR: istbuf: add ist2buf() function
+ - BUG/MINOR: threads: fix multiple use of argument inside HA_ATOMIC_CAS()
+ - BUG/MINOR: threads: fix multiple use of argument inside HA_ATOMIC_UPDATE_{MIN,MAX}()
+ - DOC: update intro.txt for 2.2
+ - DOC: intro: add a contacts section
+
+2020/04/17 : 2.2-dev6
+ - BUG/MINOR: ssl: memory leak when find_chain is NULL
+ - CLEANUP: ssl: rename ssl_get_issuer_chain to ssl_get0_issuer_chain
+ - MINOR: ssl: rework add cert chain to CTX to be libssl independent
+ - BUG/MINOR: peers: init bind_proc to 1 if it wasn't initialized
+ - BUG/MINOR: peers: avoid an infinite loop with peers_fe is NULL
+ - BUG/MINOR: peers: Use after free of "peers" section.
+ - CI: github actions: add weekly h2spec test
+ - BUG/MEDIUM: mux_h1: Process a new request if we already received it.
+ - MINOR: build: Fix build in mux_h1
+ - CLEANUP: remove obsolete comments
+ - BUG/MEDIUM: dns: improper parsing of aditional records
+ - MINOR: ssl: skip self issued CA in cert chain for ssl_ctx
+ - MINOR: listener: add so_name sample fetch
+ - MEDIUM: stream: support use-server rules with dynamic names
+ - MINOR: servers: Add a counter for the number of currently used connections.
+ - MEDIUM: connections: Revamp the way idle connections are killed
+ - MINOR: cli: add a general purpose pointer in the CLI struct
+ - MINOR: ssl: add a list of bind_conf in struct crtlist
+ - REORG: ssl: move SETCERT enum to ssl_sock.h
+ - BUG/MINOR: ssl: ckch_inst wrongly inserted in crtlist_entry
+ - REORG: ssl: move some functions above crtlist_load_cert_dir()
+ - MINOR: ssl: use crtlist_free() upon error in directory loading
+ - MINOR: ssl: add a list of crtlist_entry in ckch_store
+ - MINOR: ssl: store a ptr to crtlist in crtlist_entry
+ - MINOR: ssl/cli: update pointer to store in 'commit ssl cert'
+ - MEDIUM: ssl/cli: 'add ssl crt-list' command
+ - REGTEST: ssl/cli: test the 'add ssl crt-list' command
+ - BUG/MINOR: ssl: entry->ckch_inst not initialized
+ - REGTEST: ssl/cli: change test type to devel
+ - REGTEST: make the PROXY TLV validation depend on version 2.2
+ - CLEANUP: assorted typo fixes in the code and comments
+ - BUG/MINOR: stats: Fix color of draining servers on stats page
+ - DOC: internals: Fix spelling errors in filters.txt
+ - MINOR: connections: Don't mark conn flags 0x00000001 and 0x00000002 as unused.
+ - REGTEST: make the unique-id test depend on version 2.0
+ - BUG/MEDIUM: dns: Consider the fact that dns answers are case-insensitive
+ - MINOR: ssl: split the line parsing of the crt-list
+ - MINOR: ssl/cli: support filters and options in add ssl crt-list
+ - MINOR: ssl: add a comment above the ssl_bind_conf keywords
+ - REGTEST: ssl/cli: tests options and filters w/ add ssl crt-list
+ - REGTEST: ssl: pollute the crt-list file
+ - BUG/CRITICAL: hpack: never index a header into the headroom after wrapping
+ - BUG/MINOR: protocol_buffer: Wrong maximum shifting.
+ - CLEANUP: src/fd.c: mask setsockopt with DISGUISE
+ - BUG/MINOR: ssl/cli: initialize fcount int crtlist_entry
+ - REGTEST: ssl/cli: add other cases of 'add ssl crt-list'
+ - CLEANUP: assorted typo fixes in the code and comments
+ - DOC: management: add the new crt-list CLI commands
+ - BUG/MINOR: ssl/cli: fix spaces in 'show ssl crt-list'
+ - MINOR: ssl/cli: 'del ssl crt-list' delete an entry
+ - MINOR: ssl/cli: replace dump/show ssl crt-list by '-n' option
+ - CI: use better SSL library definition
+ - CI: travis-ci: enable DEBUG_STRICT=1 for CI builds
+ - CI: travis-ci: upgrade openssl to 1.1.1f
+ - MINOR: ssl: improve the errors when a crt can't be open
+ - CI: cirrus-ci: rename openssl package after it is renamed in FreeBSD
+ - CI: adopt openssl download script to download all versions
+ - BUG/MINOR: ssl/cli: lock the ckch structures during crt-list delete
+ - MINOR: ssl/cli: improve error for bundle in add/del ssl crt-list
+ - MINOR: ssl/cli: 'del ssl cert' deletes a certificate
+ - BUG/MINOR: ssl: trailing slashes in directory names wrongly cached
+ - BUG/MINOR: ssl/cli: memory leak in 'set ssl cert'
+ - CLEANUP: ssl: use the refcount for the SSL_CTX'
+ - CLEANUP: ssl/cli: use the list of filters in the crtlist_entry
+ - BUG/MINOR: ssl: memleak of the struct cert_key_and_chain
+ - CLEANUP: ssl: remove a commentary in struct ckch_inst
+ - MINOR: ssl: initialize all list in ckch_inst_new()
+ - MINOR: ssl: free instances and SNIs with ckch_inst_free()
+ - MINOR: ssl: replace ckchs_free() by ckch_store_free()
+ - BUG/MEDIUM: ssl/cli: trying to access to free'd memory
+ - MINOR: ssl: ckch_store_new() alloc and init a ckch_store
+ - MINOR: ssl: crtlist_new() alloc and initialize a struct crtlist
+ - REORG: ssl: move some free/new functions
+ - MINOR: ssl: crtlist_entry_{new, free}
+ - BUG/MINOR: ssl: ssl_conf always set to NULL on crt-list parsing
+ - MINOR: ssl: don't alloc ssl_conf if no option found
+ - BUG/MINOR: connection: always send address-less LOCAL PROXY connections
+ - BUG/MINOR: peers: Incomplete peers sections should be validated.
+ - MINOR: init: report in "haproxy -c" whether there were warnings or not
+ - MINOR: init: add -dW and "zero-warning" to reject configs with warnings
+ - MINOR: init: report the compiler version in haproxy -vv
+ - CLEANUP: assorted typo fixes in the code and comments
+ - MINOR: init: report the haproxy version and executable path once on errors
+ - DOC: Make how "option redispatch" works more explicit
+ - BUILD: Makefile: add linux-musl to TARGET
+ - CLEANUP: assorted typo fixes in the code and comments
+ - CLEANUP: http: Fixed small typo in parse_http_return
+ - DOC: hashing: update link to hashing functions
+
+2020/03/23 : 2.2-dev5
+ - CLEANUP: ssl: is_default is a bit in ckch_inst
+ - BUG/MINOR: ssl/cli: sni_ctx' mustn't always be used as filters
+ - DOC: ssl: clarify security implications of TLS tickets
+ - CLEANUP: remove support for Linux i686 vsyscalls
+ - CLEANUP: drop support for USE_MY_ACCEPT4
+ - CLEANUP: remove support for USE_MY_EPOLL
+ - CLEANUP: remove support for USE_MY_SPLICE
+ - CLEANUP: remove the now unused common/syscall.h
+ - BUILD: make dladdr1 depend on glibc version and not __USE_GNU
+ - BUILD: wdt: only test for SI_TKILL when compiled with thread support
+ - BUILD: Makefile: the compiler-specific flags should all be in SPEC_CFLAGS
+ - CLEANUP: ssl: separate the directory loading in a new function
+ - BUG/MINOR: buffers: MT_LIST_DEL_SAFE() expects the temporary pointer.
+ - BUG/MEDIUM: mt_lists: Make sure we set the deleted element to NULL;
+ - MINOR: init: move the maxsock calculation code to compute_ideal_maxsock()
+ - MEDIUM: init: always try to push the FD limit when maxconn is set from -m
+ - BUG/MAJOR: list: fix invalid element address calculation
+ - BUILD: stream-int: fix a few includes dependencies
+ - MINOR: mt_lists: Appease gcc.
+ - MINOR: lists: Implement function to convert list => mt_list and mt_list => list
+ - MINOR: servers: Kill priv_conns.
+ - MINOR: lists: fix indentation.
+ - BUG/MEDIUM: random: align the state on 2*64 bits for ARM64
+ - BUG/MEDIUM: connections: Don't assume the connection has a valid session.
+ - BUG/MEDIUM: pools: Always update free_list in pool_gc().
+ - BUG/MINOR: haproxy: always initialize sleeping_thread_mask
+ - BUG/MINOR: listener/mq: do not dispatch connections to remote threads when stopping
+ - BUG/MINOR: haproxy/threads: try to make all threads leave together
+ - Revert "BUILD: travis-ci: enable s390x builds"
+ - BUILD: travis-ci: enable regular s390x builds
+ - DOC: proxy_protocol: Reserve TLV type 0x05 as PP2_TYPE_UNIQUE_ID
+ - MINOR: proxy_protocol: Ingest PP2_TYPE_UNIQUE_ID on incoming connections
+ - MEDIUM: proxy_protocol: Support sending unique IDs using PPv2
+ - CLEANUP: connection: Add blank line after declarations in PP handling
+ - CLEANUP: assorted typo fixes in the code and comments
+ - CI: add spellcheck github action
+ - DOC: correct typo in alert message about rspirep
+ - CI: travis: switch linux builds to clang-9
+ - MINOR: debug: add a new DISGUISE() macro to pass a value as identity
+ - MINOR: debug: consume the write() result in BUG_ON() to silence a warning
+ - MINOR: use DISGUISE() everywhere we deliberately want to ignore a result
+ - BUILD: pools: silence build warnings with DEBUG_MEMORY_POOLS and DEBUG_UAF
+ - CLEANUP: connection: Stop directly setting an ist's .ptr
+ - CI: travis: revert to clang-7 for BoringSSL tests
+ - BUILD: on ARM, must be linked to libatomic.
+ - BUILD: makefile: fix regex syntax in ARM platform detection
+ - BUG/MEDIUM: peers: resync ended with RESYNC_PARTIAL in wrong cases.
+ - REORG: ssl: move ssl_sock_load_cert()
+ - MINOR: ssl: pass ckch_inst to ssl_sock_load_ckchs()
+ - MEDIUM: ssl: allow crt-list caching
+ - MINOR: ssl: directories are loaded like crt-list
+ - BUG/MINOR: ssl: can't open directories anymore
+ - BUG/MEDIUM: spoe: dup agent's engine_id string from trash.area
+ - MINOR: fd: Use a separate lock for logs instead of abusing the fd lock.
+ - MINOR: mux_pt: Don't try to remove the connection from the idle list.
+ - MINOR: ssl/cli: show/dump ssl crt-list
+ - BUG/MINOR: ssl/cli: free the trash chunk in dump_crtlist
+ - MEDIUM: fd: Introduce a running mask, and use it instead of the spinlock.
+ - BUG/MINOR: ssl: memory leak in crtlist_parse_file()
+ - MINOR: tasks: Provide the tasklet to the callback.
+ - BUG/MINOR: ssl: memleak of struct crtlist_entry
+ - BUG/MINOR: pattern: Do not pass len = 0 to calloc()
+ - BUILD: makefile: fix expression again to detect ARM platform
+ - CI: travis: re-enable ASAN on clang
+ - CI: travis: proper group output redirection together with travis_wait
+ - DOC: assorted typo fixes in the documentation
+ - MINOR: wdt: Move the definitions of WDTSIG and DEBUGSIG into types/signal.h.
+ - BUG/MEDIUM: wdt: Don't ignore WDTSIG and DEBUGSIG in __signal_process_queue().
+ - MINOR: memory: Change the flush_lock to a spinlock, and don't get it in alloc.
+ - MINOR: ssl/cli: 'new ssl cert' command
+ - MINOR: ssl/cli: show certificate status in 'show ssl cert'
+ - MEDIUM: sessions: Don't be responsible for connections anymore.
+ - MEDIUM: servers: Split the connections into idle, safe, and available.
+ - MINOR: fd: Implement fd_takeover().
+ - MINOR: connections: Add a new mux method, "takeover".
+ - MINOR: connections: Make the "list" element a struct mt_list instead of list.
+ - MINOR: connections: Add a flag to know if we're in the safe or idle list.
+ - MEDIUM: connections: Attempt to get idle connections from other threads.
+ - MEDIUM: mux_h1: Implement the takeover() method.
+ - MEDIUM: mux_h2: Implement the takeover() method.
+ - MEDIUM: mux_fcgi: Implement the takeover() method.
+ - MEDIUM: connections: Kill connections even if we are reusing one.
+ - BUG/MEDIUM: connections: Don't forget to decrement idle connection counters.
+ - BUG/MINOR: ssl: Do not free garbage pointers on memory allocation failure
+ - BUG/MINOR: ssl: Correctly add the 1 for the sentinel to the number of elements
+ - BUG/MINOR: ssl: crtlist_dup_filters() must return NULL with fcount == 0
+ - BUG/MEDIUM: build: Fix compilation by spelling decl correctly.
+ - BUILD/MEDIUM: fd: Declare fd_mig_lock as extern.
+ - CI: run travis-ci builds on push only, skip pull requests
+ - CI: temporarily disable unstable travis arm64 builds
+ - BUG/MINOR: ssl/cli: free BIO upon error in 'show ssl cert'
+ - BUG/MINOR: connections: Make sure we free the connection on failure.
+ - BUG/MINOR: ssl/cli: fix a potential NULL dereference
+ - BUG/MEDIUM: h1: Make sure we subscribe before going into idle list.
+ - BUG/MINOR: connections: Set idle_time before adding to idle list.
+ - MINOR: muxes: Note that we can't usee a connection when added to the srv idle.
+ - REGTEST: increase timeouts on the seamless-reload test
+ - BUG/MINOR: haproxy/threads: close a possible race in soft-stop detection
+ - CLEANUP: haproxy/threads: don't check global_tasks_mask twice
+
+2020/03/09 : 2.2-dev4
+ - MEDIUM: buffer: remove the buffer_wq lock
+ - MINOR: ssl: move find certificate chain code to its own function
+ - MINOR: ssl: resolve issuers chain later
+ - MINOR: ssl: resolve ocsp_issuer later
+ - MINOR: ssl/cli: "show ssl cert" command should print the "Chain Filename:"
+ - BUG/MINOR: h2: reject again empty :path pseudo-headers
+ - MINOR: wdt: always clear sigev_value to make valgrind happy
+ - MINOR: epoll: always initialize all of epoll_event to please valgrind
+ - BUG/MINOR: sample: Make sure to return stable IDs in the unique-id fetch
+ - BUG/MEDIUM: ssl: chain must be initialized with sk_X509_new_null()
+ - BUILD: cirrus-ci: suppress OS version check when installing packages
+ - BUG/MINOR: http_ana: make sure redirect flags don't have overlapping bits
+ - CLEANUP: fd: remove the FD_EV_STATUS aggregate
+ - CLEANUP: fd: remove some unneeded definitions of FD_EV_* flags
+ - MINOR: fd: merge the read and write error bits into RW error
+ - BUG/MINOR: dns: ignore trailing dot
+ - MINOR: contrib/prometheus-exporter: Add the last heathcheck duration metric
+ - BUG/MINOR: http-htx: Do case-insensive comparisons on Host header name
+ - MINOR: mux-h1: Remove useless case-insensitive comparisons
+ - MINOR: rawsock: always mark the FD not ready when we're certain it happens
+ - MEDIUM: connection: make the subscribe() call able to wakeup if ready
+ - MEDIUM: connection: don't stop receiving events in the FD handler
+ - MEDIUM: mux-h1: do not blindly wake up the tasklet at end of request anymore
+ - BUG/MINOR: arg: don't reject missing optional args
+ - MINOR: tools: make sure to correctly check the returned 'ms' in date2std_log
+ - MINOR: debug: report the task handler's pointer relative to main
+ - BUG/MEDIUM: debug: make the debug_handler check for the thread in threads_to_dump
+ - MINOR: haproxy: export main to ease access from debugger
+ - MINOR: haproxy: export run_poll_loop
+ - MINOR: task: export run_tasks_from_list
+ - BUILD: tools: remove obsolete and conflicting trace() from standard.c
+ - MINOR: tools: add new function dump_addr_and_bytes()
+ - MINOR: tools: add resolve_sym_name() to resolve function pointers
+ - MINOR: debug: use resolve_sym_name() to dump task handlers
+ - MINOR: cli: make "show fd" rely on resolve_sym_name()
+ - MEDIUM: debug: add support for dumping backtraces of stuck threads
+ - MINOR: debug: call backtrace() once upon startup
+ - MINOR: ssl: add "ca-verify-file" directive
+ - BUG/MINOR: wdt: do not return an error when the watchdog couldn't be enabled
+ - BUILD: Makefile: include librt before libpthread
+ - MEDIUM: wdt: fall back to CLOCK_REALTIME if CLOCK_THREAD_CPUTIME is not available
+ - MINOR: wdt: do not depend on USE_THREAD
+ - MINOR: debug: report the number of entries in the backtrace
+ - MINOR: debug: improve backtrace() on aarch64 and possibly other systems
+ - MINOR: debug: use our own backtrace function on clang+x86_64
+ - MINOR: debug: dump the whole trace if we can't spot the starting point
+ - BUILD: tools: unbreak resolve_sym_name() on non-GNU platforms
+ - BUILD: tools: rely on __ELF__ not USE_DL to enable use of dladdr()
+ - CLEANUP: contrib/spoa_example: Fix several typos
+ - BUILD: makefile: do not modify the build options during make reg-tests
+ - BUG/MEDIUM: connection: stop polling for sending when the event is ready
+ - MEDIUM: stream-int: make sure to try to immediately validate the connection
+ - MINOR: tcp/uxst/sockpair: only ask for I/O when really waiting for a connect()
+ - MEDIUM: connection: only call ->wake() for connect() without I/O
+ - OPTIM: connection: disable receiving on disabled events when the run queue is too high
+ - OPTIM: mux-h1: subscribe rather than waking up at a few other places
+ - REGTEST: Add unique-id reg-test
+ - MINOR: stream: Add stream_generate_unique_id function
+ - MINOR: stream: Use stream_generate_unique_id
+ - BUG/MINOR: connection/debug: do not enforce !event_type on subscribe() anymore
+ - MINOR: ssl/cli: support crt-list filters
+ - MINOR: ssl: reach a ckch_store from a sni_ctx
+ - DOC: fix incorrect indentation of http_auth_*
+ - BUG/MINOR: ssl-sock: do not return an uninitialized pointer in ckch_inst_sni_ctx_to_sni_filters
+ - MINOR: debug: add CLI command "debug dev write" to write an arbitrary size
+ - MINOR: ist: Add `IST_NULL` macro
+ - MINOR: ist: Add `int isttest(const struct ist)`
+ - MINOR: ist: Add `struct ist istalloc(size_t)` and `void istfree(struct ist*)`
+ - CLEANUP: Use `isttest()` and `istfree()`
+ - MINOR: ist: Add `struct ist istdup(const struct ist)`
+ - MINOR: proxy: Make `header_unique_id` a `struct ist`
+ - MEDIUM: stream: Make the `unique_id` member of `struct stream` a `struct ist`
+ - OPTIM: startup: fast unique_id allocation for acl.
+ - DOC: configuration.txt: fix various typos
+ - DOC: assorted typo fixes in the documentation and Makefile
+ - BUG/MINOR: init: make the automatic maxconn consider the max of soft/hard limits
+ - BUG/MAJOR: proxy_protocol: Properly validate TLV lengths
+ - CLEANUP: proxy_protocol: Use `size_t` when parsing TLVs
+ - MINOR: buf: Add function to insert a string at an absolute offset in a buffer
+ - MINOR: htx: Add a function to return a block at a specific offset
+ - MINOR: htx: Use htx_find_offset() to truncate an HTX message
+ - MINOR: flt_trace: Use htx_find_offset() to get the available payload length
+ - BUG/MINOR: filters: Use filter offset to decude the amount of forwarded data
+ - BUG/MINOR: filters: Forward everything if no data filters are called
+ - BUG/MEDIUM: cache/filters: Fix loop on HTX blocks caching the response payload
+ - BUG/MEDIUM: compression/filters: Fix loop on HTX blocks compressing the payload
+ - BUG/MINOR: http-ana: Reset request analysers on a response side error
+ - BUG/MINOR: lua: Abort when txn:done() is called from a Lua action
+ - BUG/MINOR: lua: Ignore the reserve to know if a channel is full or not
+ - MINOR: lua: Add function to know if a channel is a response one
+ - MINOR: lua: Stop using the lua txn in hlua_http_get_headers()
+ - MINOR: lua: Stop using the lua txn in hlua_http_rep_hdr()
+ - MINOR: lua: Stop using lua txn in hlua_http_del_hdr() and hlua_http_add_hdr()
+ - MINOR: lua: Remove the flag HLUA_TXN_HTTP_RDY
+ - MINOR: lua: Rename hlua_action_wake_time() to hlua_set_wake_time()
+ - BUG/MINOR: lua: Init the lua wake_time value before calling a lua function
+ - BUG/MINOR: http-rules: Return ACT_RET_ABRT to abort a transaction
+ - BUG/MINOR: http-rules: Preserve FLT_END analyzers on reject action
+ - BUG/MINOR: http-rules: Fix a typo in the reject action function
+ - MINOR: cache/filters: Initialize the cache filter when stream is created
+ - MINOR: compression/filters: Initialize the comp filter when stream is created
+ - BUG/MINOR: rules: Preserve FLT_END analyzers on silent-drop action
+ - BUG/MINOR: rules: Return ACT_RET_ABRT when a silent-drop action is executed
+ - BUG/MINOR: rules: Increment be_counters if backend is assigned for a silent-drop
+ - BUG/MINOR: http-rules: Abort transaction when a redirect is applied on response
+ - BUILD: buffer: types/{ring.h,checks.h} should include buf.h, not buffer.h
+ - BUILD: ssl: include mini-clist.h
+ - BUILD: global: must not include common/standard.h but only types/freq_ctr.h
+ - BUILD: freq_ctr: proto/freq_ctr needs to include common/standard.h
+ - BUILD: listener: types/listener.h must not include standard.h
+ - BUG/MEDIUM: random: initialize the random pool a bit better
+ - BUG/MEDIUM: random: implement per-thread and per-process random sequences
+ - Revert "BUG/MEDIUM: random: implement per-thread and per-process random sequences"
+ - BUILD: cirrus-ci: get rid of unstable freebsd images
+ - MINOR: tools: add 64-bit rotate operators
+ - BUG/MEDIUM: random: implement a thread-safe and process-safe PRNG
+ - MINOR: backend: use a single call to ha_random32() for the random LB algo
+ - BUG/MINOR: checks/threads: use ha_random() and not rand()
+ - MINOR: sample: make all bits random on the rand() sample fetch
+ - MINOR: tools: add a generic function to generate UUIDs
+ - DOC: fix typo about no-tls-tickets
+ - DOC: improve description of no-tls-tickets
+ - DOC: assorted typo fixes in the documentation
+ - CLEANUP: remove unused code in 'my_ffsl/my_flsl' functions
+
+2020/02/25 : 2.2-dev3
+ - SCRIPTS: announce-release: place the send command in the mail's header
+ - SCRIPTS: announce-release: allow the user to force to overwrite old files
+ - SCRIPTS: backport: fix the master branch detection
+ - BUG/MINOR: http-act: Set stream error flag before returning an error
+ - BUG/MINOR: http-act: Fix bugs on error path during parsing of return actions
+ - BUG/MEDIUM: ssl/cli: 'commit ssl cert' wrong SSL_CTX init
+ - BUG/MEDIUM: tcp-rules: Fix track-sc* actions for L4/L5 TCP rules
+ - DOC: schematic of the SSL certificates architecture
+ - BUG/MAJOR: mux-h2: don't wake streams after connection was destroyed
+ - BUG/MINOR: unix: better catch situations where the unix socket path length is close to the limit
+ - BUILD: cirrus-ci: switch to "snap" images to unify openssl naming
+ - BUILD: cirrus-ci: workaround "pkg install" bug
+ - BUILD: cirrus-ci: add ERR=1 to freebsd builds
+ - BUG/MINOR: connection: correctly retry I/O on signals
+ - CLEANUP: mini-clist: simplify nested do { while(1) {} } while (0)
+ - BUILD: http_act: cast file sizes when reporting file size error
+ - BUG/MEDIUM: listener: only consider running threads when resuming listeners
+ - BUG/MINOR: listener: enforce all_threads_mask on bind_thread on init
+ - BUG/MINOR: tcp: avoid closing fd when socket failed in tcp_bind_listener
+ - MINOR: build: add aix72-gcc build TARGET and power{8,9} CPUs
+ - BUILD: travis-ci: no more allowed failures for openssl-1.0.2
+ - BUILD: travis-ci: harden builds, add ERR=1 (warning ought to be errors)
+ - BUILD: scripts/build-ssl.sh: use "uname" instead of ${TRAVIS_OS_NAME}
+ - BUG/MINOR: tcp: don't try to set defaultmss when value is negative
+ - SCRIPTS: make announce-release executable again
+ - BUG/MINOR: namespace: avoid closing fd when socket failed in my_socketat
+ - BUG/MEDIUM: muxes: Use the right argument when calling the destroy method.
+ - BUG/MINOR: mux-fcgi: Forbid special characters when matching PATH_INFO param
+ - CLEANUP: ssl: remove unused functions in openssl-compat.h
+ - MINOR: mux-fcgi: Make the capture of the path-info optional in pathinfo regex
+ - MINOR: tools: add is_idchar() to tell if a char may belong to an identifier
+ - MINOR: chunk: implement chunk_strncpy() to copy partial strings
+ - MINOR: sample/acl: use is_idchar() to locate the fetch/conv name
+ - MEDIUM: arg: make make_arg_list() stop after its own arguments
+ - MEDIUM: arg: copy parsed arguments into the trash instead of allocating them
+ - MEDIUM: arg: make make_arg_list() support quotes in arguments
+ - MINOR: sample: make sample_parse_expr() able to return an end pointer
+ - MEDIUM: log-format: make the LF parser aware of sample expressions' end
+ - BUG/MINOR: arg: report an error if an argument is larger than bufsize
+ - SCRIPTS: announce-release: use mutt -H instead of -i to include the draft
+ - BUILD: enable ERR=1 in github cygwin builds
+ - BUG/MINOR: arg: fix again incorrect argument length check
+ - MINOR: sample: regsub now supports backreferences
+ - BUG/MINOR: tools: also accept '+' as a valid character in an identifier
+ - MINOR: http-htx: Add a function to retrieve the headers size of an HTX message
+ - MINOR: filters: Forward data only if the last filter forwards something
+ - BUG/MINOR: filters: Count HTTP headers as filtered data but don't forward them
+ - BUG/MINOR: http-htx: Don't return error if authority is updated without changes
+ - BUG/MINOR: stream: Don't incr frontend cum_req counter when stream is closed
+ - BUG/MINOR: sample: exit regsub() in case of trash allocation error
+ - MINOR: ssl: add "issuers-chain-path" directive.
+ - REGTESTS: use "command -v" instead of "which"
+ - BUG/MINOR: http-ana: Matching on monitor-uri should be case-sensitive
+ - MINOR: http-ana: Match on the path if the monitor-uri starts by a /
+ - BUG/MINOR: ssl: Stop passing dynamic strings as format arguments
+ - BUG/MAJOR: http-ana: Always abort the request when a tarpit is triggered
+ - BUG/MINOR: mux: do not call conn_xprt_stop_recv() on buffer shortage
+ - MINOR: checks: do not call conn_xprt_stop_send() anymore
+ - CLEANUP: epoll: place the struct epoll_event in the stack
+ - MEDIUM: connection: remove the intermediary polling state from the connection
+ - MINOR: raw_sock: directly call fd_stop_send() and not conn_xprt_stop_send()
+ - MINOR: tcp/uxst/sockpair: use fd_want_send() instead of conn_xprt_want_send()
+ - MINOR: connection: remove the last calls to conn_xprt_{want,stop}_*
+ - CLEANUP: connection: remove the definitions of conn_xprt_{stop,want}_{send,recv}
+ - MINOR: connection: introduce a new receive flag: CO_RFL_READ_ONCE
+ - MINOR: mux-h1: pass CO_RFL_READ_ONCE to the lower layers when relevant
+ - MINOR: ist: add an iststop() function
+ - BUG/MINOR: http: http-request replace-path duplicates the query string
+ - CLEANUP: sample: use iststop instead of a for loop
+ - BUG/MEDIUM: shctx: make sure to keep all blocks aligned
+ - MINOR: compiler: move CPU capabilities definition from config.h and complete them
+ - BUG/MEDIUM: ebtree: don't set attribute packed without unaligned access support
+ - CLEANUP: http/h1: rely on HA_UNALIGNED_LE instead of checking for CPU families
+ - BUILD: fix recent build failure on unaligned archs
+ - MINOR: ssl: load the key from a dedicated file
+ - BUG/MINOR: ssl: load .key in a directory only after PEM
+ - MINOR: compiler: drop special cases of likely/unlikely for older compilers
+ - CLEANUP: conn: Do not pass a pointer to likely
+ - CLEANUP: net_helper: Do not negate the result of unlikely
+ - BUILD: remove obsolete support for -mregparm / USE_REGPARM
+ - CLEANUP: cfgparse: Fix type of second calloc() parameter
+ - BUILD: ssl: only pass unsigned chars to isspace()
+ - BUILD: general: always pass unsigned chars to is* functions
+ - BUG/MINOR: sample: fix the json converter's endian-sensitivity
+ - BUG/MEDIUM: ssl: fix several bad pointer aliases in a few sample fetch functions
+ - CLEANUP: fd: use a union in fd_rm_from_fd_list() to shut aliasing warnings
+ - CLEANUP: cache: use read_u32/write_u32 to access the cache entry's hash
+ - CLEANUP: stick-tables: use read_u32() to display a node's key
+ - CLEANUP: sample: use read_u64() in ipmask() to apply an IPv6 mask
+ - MINOR: pattern: fix all remaining strict aliasing issues
+ - CLEANUP: lua: fix aliasing issues in the address matching code
+ - CLEANUP: connection: use read_u32() instead of a cast in the netscaler parser
+ - BUILD: makefile: re-enable strict aliasing
+ - BUG/MINOR: connection: make sure to correctly tag local PROXY connections
+ - MINOR: compiler: add new alignment macros
+ - BUILD: ebtree: improve architecture-specific alignment
+ - MINOR: config: mark global.debug as deprecated
+ - BUILD: travis-ci: enable s390x builds
+ - MINOR: ssl/cli: 'show ssl cert' displays the chain
+ - MINOR: ssl/cli: 'show ssl cert'displays the issuer in the chain
+ - MINOR: ssl/cli: reorder 'show ssl cert' output
+ - CLEANUP: ssl: move issuer_chain tree and definition
+ - DOC: proxy-protocol: clarify IPv6 address representation in the spec
+
+2020/02/07 : 2.2-dev2
+ - BUILD: CI: temporarily mark openssl-1.0.2 as allowed failure
+ - MEDIUM: cli: Allow multiple filter entries for "show table"
+ - BUG/MEDIUM: netscaler: Don't forget to allocate storage for conn->src/dst.
+ - BUG/MINOR: ssl: ssl_sock_load_pem_into_ckch is not consistent
+ - BUILD: stick-table: fix build errors introduced by last stick-table change
+ - BUG/MINOR: cli: Missing arg offset for filter data values.
+ - MEDIUM: streams: Always create a conn_stream in connect_server().
+ - MEDIUM: connections: Get ride of the xprt_done callback.
+ - CLEANUP: changelog: remove the duplicate entry for 2.2-dev1
+ - BUILD: CI: move cygwin builds to Github Actions
+ - MINOR: cli: Report location of errors or any extra data for "show table"
+ - BUG/MINOR: ssl/cli: free the previous ckch content once a PEM is loaded
+ - CLEANUP: backend: remove useless test for inexistent connection
+ - CLEANUP: backend: shut another false null-deref in back_handle_st_con()
+ - CLEANUP: stats: shut up a wrong null-deref warning from gcc 9.2
+ - BUG/MINOR: ssl: increment issuer refcount if in chain
+ - BUG/MINOR: ssl: memory leak w/ the ocsp_issuer
+ - BUG/MINOR: ssl: typo in previous patch
+ - BUG/MEDIUM: connections: Set CO_FL_CONNECTED in conn_complete_session().
+ - BUG/MINOR: ssl/cli: ocsp_issuer must be set w/ "set ssl cert"
+ - MEDIUM: connection: remove CO_FL_CONNECTED and only rely on CO_FL_WAIT_*
+ - BUG/MEDIUM: 0rtt: Only consider the SSL handshake.
+ - MINOR: stream-int: always report received shutdowns
+ - MINOR: connection: remove CO_FL_SSL_WAIT_HS from CO_FL_HANDSHAKE
+ - MEDIUM: connection: use CO_FL_WAIT_XPRT more consistently than L4/L6/HANDSHAKE
+ - MINOR: connection: remove checks for CO_FL_HANDSHAKE before I/O
+ - MINOR: connection: do not check for CO_FL_SOCK_RD_SH too early
+ - MINOR: connection: don't check for CO_FL_SOCK_WR_SH too early in handshakes
+ - MINOR: raw-sock: always check for CO_FL_SOCK_WR_SH before sending
+ - MINOR: connection: remove some unneeded checks for CO_FL_SOCK_WR_SH
+ - BUG/MINOR: stktable: report the current proxy name in error messages
+ - BUG/MEDIUM: mux-h2: make sure we don't emit TE headers with anything but "trailers"
+ - MINOR: lua: Add hlua_prepend_path function
+ - MINOR: lua: Add lua-prepend-path configuration option
+ - MINOR: lua: Add HLUA_PREPEND_C?PATH build option
+ - BUILD: cfgparse: silence a bogus gcc warning on 32-bit machines
+ - BUG/MINOR: http-ana: Increment the backend counters on the backend
+ - BUG/MINOR: stream: Be sure to have a listener to increment its counters
+ - BUG/MEDIUM: streams: Move the conn_stream allocation outside #IF USE_OPENSSL.
+ - REGTESTS: make the set_ssl_cert test require version 2.2
+ - BUG/MINOR: ssl: Possible memleak when allowing the 0RTT data buffer.
+ - MINOR: ssl: Remove dead code.
+ - BUG/MEDIUM: ssl: Don't forget to free ctx->ssl on failure.
+ - BUG/MEDIUM: stream: Don't install the mux in back_handle_st_con().
+ - MEDIUM: streams: Don't close the connection in back_handle_st_con().
+ - MEDIUM: streams: Don't close the connection in back_handle_st_rdy().
+ - BUILD: CI: disable slow regtests on Travis
+ - BUG/MINOR: tcpchecks: fix the connect() flags regarding delayed ack
+ - BUG/MINOR: http-rules: Always init log-format expr for common HTTP actions
+ - BUG/MINOR: connection: fix ip6 dst_port copy in make_proxy_line_v2
+ - BUG/MINOR: dns: allow 63 char in hostname
+ - MINOR: proxy: clarify number of connections log when stopping
+ - DOC: word converter ignores delimiters at the start or end of input string
+ - MEDIUM: raw-sock: remove obsolete calls to fd_{cant,cond,done}_{send,recv}
+ - BUG/MINOR: ssl/cli: fix unused variable with openssl < 1.0.2
+ - MEDIUM: pipe/thread: reduce the locking overhead
+ - MEDIUM: pipe/thread: maintain a per-thread local cache of recently used pipes
+ - BUG/MEDIUM: pipe/thread: fix atomicity of pipe counters
+ - MINOR: tasks: move the list walking code to its own function
+ - MEDIUM: tasks: implement 3 different tasklet classes with their own queues
+ - MEDIUM: tasks: automatically requeue into the bulk queue an already running tasklet
+ - OPTIM: task: refine task classes default CPU bandwidth ratios
+ - BUG/MEDIUM: connections: Don't forget to unlock when killing a connection.
+ - MINOR: task: permanently flag tasklets waking themselves up
+ - MINOR: task: make sched->current also reflect tasklets
+ - MINOR: task: detect self-wakeups on tl==sched->current instead of TASK_RUNNING
+ - OPTIM: task: readjust CPU bandwidth distribution since last update
+ - MINOR: task: don't set TASK_RUNNING on tasklets
+ - BUG/MEDIUM: memory_pool: Update the seq number in pool_flush().
+ - MINOR: memory: Only init the pool spinlock once.
+ - BUG/MEDIUM: memory: Add a rwlock before freeing memory.
+ - BUG/MAJOR: memory: Don't forget to unlock the rwlock if the pool is empty.
+ - MINOR: ssl: ssl-load-extra-files configure loading of files
+ - SCRIPTS: add a new "backport" script to simplify long series of backports
+ - BUG/MINOR: ssl: we may only ignore the first 64 errors
+ - SCRIPTS: use /usr/bin/env bash instead of /bin/bash for scripts
+ - BUG/MINOR: ssl: clear the SSL errors on DH loading failure
+ - CLEANUP: hpack: remove a redundant test in the decoder
+ - CLEANUP: peers: Remove unused static function `free_dcache`
+ - CLEANUP: peers: Remove unused static function `free_dcache_tx`
+ - CONTRIB: debug: add missing flags SF_HTX and SF_MUX
+ - CONTRIB: debug: add the possibility to decode the value as certain types only
+ - CONTRIB: debug: support reporting multiple values at once
+ - BUG/MINOR: http-act: Use the good message to test strict rewritting mode
+ - MINOR: global: Set default tune.maxrewrite value during global structure init
+ - MINOR: http-rules: Set SF_ERR_PRXCOND termination flag when a header rewrite fails
+ - MINOR: http-htx: Emit a warning if an error file runs over the buffer's reserve
+ - MINOR: htx: Add a function to append an HTX message to another one
+ - MINOR: htx/channel: Add a function to copy an HTX message in a channel's buffer
+ - BUG/MINOR: http-ana: Don't overwrite outgoing data when an error is reported
+ - MINOR: dns: Dynamically allocate dns options to reduce the act_rule size
+ - MINOR: dns: Add function to release memory allocated for a do-resolve rule
+ - BUG/MINOR: http-ana: Reset HTX first index when HAPRoxy sends a response
+ - BUG/MINOR: http-ana: Set HTX_FL_PROXY_RESP flag if a server perform a redirect
+ - MINOR: http-rules: Add a flag on redirect rules to know the rule direction
+ - MINOR: http-rules: Handle the rule direction when a redirect is evaluated
+ - MINOR: http-ana: Rely on http_reply_and_close() to handle server error
+ - MINOR: http-ana: Add a function for forward internal responses
+ - MINOR: http-ana/http-rules: Use dedicated function to forward internal responses
+ - MEDIUM: http: Add a ruleset evaluated on all responses just before forwarding
+ - MEDIUM: http-rules: Add the return action to HTTP rules
+ - MEDIUM: http-rules: Support extra headers for HTTP return actions
+ - CLEANUP: lua: Remove consistency check for sample fetches and actions
+ - BUG/MINOR: http-ana: Increment failed_resp counters on invalid response
+ - MINOR: lua: Get the action return code on the stack when an action finishes
+ - MINOR: lua: Create the global 'act' object to register all action return codes
+ - MINOR: lua: Add act:wake_time() function to set a timeout when an action yields
+ - MEDIUM: lua: Add ability for actions to intercept HTTP messages
+ - REGTESTS: Add reg tests for the HTTP return action
+ - REGTESTS: Add a reg test for http-after-response rulesets
+ - BUILD: lua: silence a warning on systems where longjmp is not marked as noreturn
+ - MINOR: acl: Warn when an ACL is named 'or'
+ - CONTRIB: debug: also support reading values from stdin
+ - SCRIPTS: backport: use short revs and resolve the initial commit
+ - BUG/MINOR: acl: Fix type of log message when an acl is named 'or'
+
+2020/01/22 : 2.2-dev1
+ - DOC: this is development again
+ - MINOR: version: this is development again, update the status
+ - SCRIPTS: update create-release to fix the changelog on new branches
+ - CLEANUP: ssl: Clean up error handling
+ - BUG/MINOR: contrib/prometheus-exporter: decode parameter and value only
+ - BUG/MINOR: h1: Don't test the host header during response parsing
+ - BUILD/MINOR: trace: fix use of long type in a few printf format strings
+ - DOC: Clarify behavior of server maxconn in HTTP mode
+ - MINOR: ssl: deduplicate ca-file
+ - MINOR: ssl: compute ca-list from deduplicate ca-file
+ - MINOR: ssl: deduplicate crl-file
+ - CLEANUP: dns: resolution can never be null
+ - BUG/MINOR: http-htx: Don't make http_find_header() fail if the value is empty
+ - DOC: ssl/cli: set/commit/abort ssl cert
+ - BUG/MINOR: ssl: fix SSL_CTX_set1_chain compatibility for openssl < 1.0.2
+ - BUG/MINOR: fcgi-app: Make the directive pass-header case insensitive
+ - BUG/MINOR: stats: Fix HTML output for the frontends heading
+ - BUG/MINOR: ssl: fix X509 compatibility for openssl < 1.1.0
+ - DOC: clarify matching strings on binary fetches
+ - DOC: Fix ordered list in summary
+ - DOC: move the "group" keyword at the right place
+ - MEDIUM: init: prevent process and thread creation at runtime
+ - BUG/MINOR: ssl/cli: 'ssl cert' cmd only usable w/ admin rights
+ - BUG/MEDIUM: stream-int: don't subscribed for recv when we're trying to flush data
+ - BUG/MINOR: stream-int: avoid calling rcv_buf() when splicing is still possible
+ - BUG/MINOR: ssl/cli: don't overwrite the filters variable
+ - BUG/MEDIUM: listener/thread: fix a race when pausing a listener
+ - BUG/MINOR: ssl: certificate choice can be unexpected with openssl >= 1.1.1
+ - BUG/MEDIUM: mux-h1: Never reuse H1 connection if a shutw is pending
+ - BUG/MINOR: mux-h1: Don't rely on CO_FL_SOCK_RD_SH to set H1C_F_CS_SHUTDOWN
+ - BUG/MINOR: mux-h1: Fix conditions to know whether or not we may receive data
+ - BUG/MEDIUM: tasks: Make sure we switch wait queues in task_set_affinity().
+ - BUG/MEDIUM: checks: Make sure we set the task affinity just before connecting.
+ - MINOR: debug: replace popen() with pipe+fork() in "debug dev exec"
+ - MEDIUM: init: set NO_NEW_PRIVS by default when supported
+ - BUG/MINOR: mux-h1: Be sure to set CS_FL_WANT_ROOM when EOM can't be added
+ - BUG/MEDIUM: mux-fcgi: Handle cases where the HTX EOM block cannot be inserted
+ - BUG/MINOR: proxy: make soft_stop() also close FDs in LI_PAUSED state
+ - BUG/MINOR: listener/threads: always use atomic ops to clear the FD events
+ - BUG/MINOR: listener: also clear the error flag on a paused listener
+ - BUG/MEDIUM: listener/threads: fix a remaining race in the listener's accept()
+ - MINOR: listener: make the wait paths cleaner and more reliable
+ - MINOR: listener: split dequeue_all_listener() in two
+ - REORG: listener: move the global listener queue code to listener.c
+ - DOC: document the listener state transitions
+ - BUG/MEDIUM: kqueue: Make sure we report read events even when no data.
+ - BUG/MAJOR: dns: add minimalist error processing on the Rx path
+ - BUG/MEDIUM: proto_udp/threads: recv() and send() must not be exclusive.
+ - DOC: listeners: add a few missing transitions
+ - BUG/MINOR: tasks: only requeue a task if it was already in the queue
+ - MINOR: tasks: split wake_expired_tasks() in two parts to avoid useless wakeups
+ - DOC: proxies: HAProxy only supports 3 connection modes
+ - DOC: remove references to the outdated architecture.txt
+ - BUG/MINOR: log: fix minor resource leaks on logformat error path
+ - BUG/MINOR: mworker: properly pass SIGTTOU/SIGTTIN to workers
+ - BUG/MINOR: listener: do not immediately resume on transient error
+ - BUG/MINOR: server: make "agent-addr" work on default-server line
+ - BUG/MINOR: listener: fix off-by-one in state name check
+ - BUILD/MINOR: unix sockets: silence an absurd gcc warning about strncpy()
+ - MEDIUM: h1-htx: Add HTX EOM block when the message is in H1_MSG_DONE state
+ - MINOR: http-htx: Add some htx sample fetches for debugging purpose
+ - REGTEST: Add an HTX reg-test to check an edge case
+ - DOC: clarify the fact that replace-uri works on a full URI
+ - BUG/MINOR: sample: fix the closing bracket and LF in the debug converter
+ - BUG/MINOR: sample: always check converters' arguments
+ - MINOR: sample: Validate the number of bits for the sha2 converter
+ - BUG/MEDIUM: ssl: Don't set the max early data we can receive too early.
+ - MINOR: ssl/cli: 'show ssl cert' give information on the certificates
+ - BUG/MINOR: ssl/cli: fix build for openssl < 1.0.2
+ - MINOR: debug: support logging to various sinks
+ - MINOR: http: add a new "replace-path" action
+ - REGTEST: ssl: test the "set ssl cert" CLI command
+ - REGTEST: run-regtests: implement #REQUIRE_BINARIES
+ - MINOR: task: only check TASK_WOKEN_ANY to decide to requeue a task
+ - BUG/MAJOR: task: add a new TASK_SHARED_WQ flag to fix foreing requeuing
+ - BUG/MEDIUM: ssl: Revamp the way early data are handled.
+ - MINOR: fd/threads: make _GET_NEXT()/_GET_PREV() use the volatile attribute
+ - BUG/MEDIUM: fd/threads: fix a concurrency issue between add and rm on the same fd
+ - REGTEST: make the "set ssl cert" require version 2.1
+ - BUG/MINOR: ssl: openssl-compat: Fix getm_ defines
+ - BUG/MEDIUM: state-file: do not allocate a full buffer for each server entry
+ - BUG/MINOR: state-file: do not store duplicates in the global tree
+ - BUG/MINOR: state-file: do not leak memory on parse errors
+ - BUG/MAJOR: mux-h1: Don't pretend the input channel's buffer is full if empty
+ - BUG/MEDIUM: stream: Be sure to never assign a TCP backend to an HTX stream
+ - BUILD: ssl: improve SSL_CTX_set_ecdh_auto compatibility
+ - BUILD: travis-ci: link with ssl libraries using rpath instead of LD_LIBRARY_PATH/DYLD_LIBRARY_PATH
+ - BUILD: travis-ci: reenable address sanitizer for clang builds
+ - BUG/MINOR: checks: refine which errno values are really errors.
+ - BUG/MINOR: connection: only wake send/recv callbacks if the FD is active
+ - CLEANUP: connection: conn->xprt is never NULL
+ - MINOR: pollers: add a new flag to indicate pollers reporting ERR & HUP
+ - MEDIUM: tcp: make tcp_connect_probe() consider ERR/HUP
+ - REORG: connection: move tcp_connect_probe() to conn_fd_check()
+ - MINOR: connection: check for connection validation earlier
+ - MINOR: connection: remove the double test on xprt_done_cb()
+ - CLEANUP: connection: merge CO_FL_NOTIFY_DATA and CO_FL_NOTIFY_DONE
+ - MINOR: poller: do not call the IO handler if the FD is not active
+ - OPTIM: epoll: always poll for recv if neither active nor ready
+ - OPTIM: polling: do not create update entries for FD removal
+ - BUG/MEDIUM: checks: Only attempt to do handshakes if the connection is ready.
+ - BUG/MEDIUM: connections: Hold the lock when wanting to kill a connection.
+ - BUILD: CI: modernize cirrus-ci
+ - MINOR: config: disable busy polling on old processes
+ - MINOR: ssl: Remove unused variable "need_out".
+ - BUG/MINOR: h1: Report the right error position when a header value is invalid
+ - BUG/MINOR: proxy: Fix input data copy when an error is captured
+ - BUG/MEDIUM: http-ana: Truncate the response when a redirect rule is applied
+ - BUG/MINOR: channel: inject output data at the end of output
+ - BUG/MEDIUM: session: do not report a failure when rejecting a session
+ - MEDIUM: dns: implement synchronous send
+ - MINOR: raw_sock: make sure to disable polling once everything is sent
+ - MINOR: http: Add 410 to http-request deny
+ - MINOR: http: Add 404 to http-request deny
+ - CLEANUP: mux-h2: remove unused goto "out_free_h2s"
+ - BUILD: cirrus-ci: choose proper openssl package name
+ - BUG/MAJOR: listener: do not schedule a task-less proxy
+ - CLEANUP: server: remove unused err section in server_finalize_init
+ - REGTEST: set_ssl_cert.vtc: replace "echo" with "printf"
+ - BUG/MINOR: stream-int: Don't trigger L7 retry if max retries is already reached
+ - BUG/MEDIUM: tasks: Use the MT macros in tasklet_free().
+ - BUG/MINOR: mux-h2: use a safe list_for_each_entry in h2_send()
+ - BUG/MEDIUM: mux-h2: fix missing test on sending_list in previous patch
+ - CLEANUP: ssl: remove opendir call in ssl_sock_load_cert
+ - MEDIUM: lua: don't call the GC as often when dealing with outgoing connections
+ - BUG/MEDIUM: mux-h2: don't stop sending when crossing a buffer boundary
+ - BUG/MINOR: cli/mworker: can't start haproxy with 2 programs
+ - REGTEST: mcli/mcli_start_progs: start 2 programs
+ - BUG/MEDIUM: mworker: remain in mworker mode during reload
+ - DOC: clarify crt-base usage
+ - CLEANUP: compression: remove unused deinit_comp_ctx section
+ - BUG/MEDIUM: mux_h1: Don't call h1_send if we subscribed().
+ - BUG/MEDIUM: raw_sock: Make sur the fd and conn are sync.
+ - CLEANUP: proxy: simplify proxy_parse_rate_limit proxy checks
+ - BUG/MAJOR: hashes: fix the signedness of the hash inputs
+ - REGTEST: add sample_fetches/hashes.vtc to validate hashes
+ - BUG/MEDIUM: cli: _getsocks must send the peers sockets
+ - CLEANUP: cli: deduplicate the code in _getsocks
+ - BUG/MINOR: stream: don't mistake match rules for store-request rules
+ - BUG/MEDIUM: connection: add a mux flag to indicate splice usability
+ - BUG/MINOR: pattern: handle errors from fgets when trying to load patterns
+ - MINOR: connection: move the CO_FL_WAIT_ROOM cleanup to the reader only
+ - MINOR: stream-int: remove dependency on CO_FL_WAIT_ROOM for rcv_buf()
+ - MEDIUM: connection: get rid of CO_FL_CURR_* flags
+ - BUILD: pattern: include errno.h
+ - MEDIUM: mux-h2: do not try to stop sending streams on blocked mux
+ - MEDIUM: mux-fcgi: do not try to stop sending streams on blocked mux
+ - MEDIUM: mux-h2: do not make an h2s subscribe to itself on deferred shut
+ - MEDIUM: mux-fcgi: do not make an fstrm subscribe to itself on deferred shut
+ - REORG: stream/backend: move backend-specific stuff to backend.c
+ - MEDIUM: backend: move the connection finalization step to back_handle_st_con()
+ - MEDIUM: connection: merge the send_wait and recv_wait entries
+ - MEDIUM: xprt: merge recv_wait and send_wait in xprt_handshake
+ - MEDIUM: ssl: merge recv_wait and send_wait in ssl_sock
+ - MEDIUM: mux-h1: merge recv_wait and send_wait
+ - MEDIUM: mux-h2: merge recv_wait and send_wait event notifications
+ - MEDIUM: mux-fcgi: merge recv_wait and send_wait event notifications
+ - MINOR: connection: make the last arg of subscribe() a struct wait_event*
+ - MINOR: ssl: Add support for returning the dn samples from ssl_(c|f)_(i|s)_dn in LDAP v3 (RFC2253) format.
+ - DOC: Fix copy and paste mistake in http-response replace-value doc
+ - BUG/MINOR: cache: Fix leak of cache name in error path
+ - BUG/MINOR: dns: Make dns_query_id_seed unsigned
+ - BUG/MINOR: 51d: Fix bug when HTX is enabled
+ - MINOR: http-htx: Move htx sample fetches in the scope "internal"
+ - MINOR: http-htx: Rename 'internal.htx_blk.val' to 'internal.htx_blk.data'
+ - MINOR: http-htx: Make 'internal.htx_blk_data' return a binary string
+ - DOC: Add a section to document the internal sample fetches
+ - MINOR: mux-h1: Inherit send flags from the upper layer
+ - MINOR: contrib/prometheus-exporter: Add heathcheck status/code in server metrics
+ - BUG/MINOR: http-ana/filters: Wait end of the http_end callback for all filters
+ - BUG/MINOR: http-rules: Remove buggy deinit functions for HTTP rules
+ - BUG/MINOR: stick-table: Use MAX_SESS_STKCTR as the max track ID during parsing
+ - MEDIUM: http-rules: Register an action keyword for all http rules
+ - MINOR: tcp-rules: Always set from which ruleset a rule comes from
+ - MINOR: actions: Use ACT_RET_CONT code to ignore an error from a custom action
+ - MINOR: tcp-rules: Kill connections when custom actions return ACT_RET_ERR
+ - MINOR: http-rules: Return an error when custom actions return ACT_RET_ERR
+ - MINOR: counters: Add a counter to report internal processing errors
+ - MEDIUM: http-ana: Properly handle internal processing errors
+ - MINOR: http-rules: Add a rule result to report internal error
+ - MINOR: http-rules: Handle internal errors during HTTP rules evaluation
+ - MINOR: http-rules: Add more return codes to let custom actions act as normal ones
+ - MINOR: tcp-rules: Handle denied/aborted/invalid connections from TCP rules
+ - MINOR: http-rules: Handle denied/aborted/invalid connections from HTTP rules
+ - MINOR: stats: Report internal errors in the proxies/listeners/servers stats
+ - MINOR: contrib/prometheus-exporter: Export internal errors per proxy/server
+ - MINOR: counters: Remove failed_secu counter and use denied_resp instead
+ - MINOR: counters: Review conditions to increment counters from analysers
+ - MINOR: http-ana: Add a txn flag to support soft/strict message rewrites
+ - MINOR: http-rules: Handle all message rewrites the same way
+ - MINOR: http-rules: Add a rule to enable or disable the strict rewriting mode
+ - MEDIUM: http-rules: Enable the strict rewriting mode by default
+ - REGTEST: Fix format of set-uri HTTP request rule in h1or2_to_h1c.vtc
+ - MINOR: actions: Add a function pointer to release args used by actions
+ - MINOR: actions: Regroup some info about HTTP rules in the same struct
+ - MINOR: http-rules/tcp-rules: Call the defined action function first if defined
+ - MINOR: actions: Rename the act_flag enum into act_opt
+ - MINOR: actions: Add flags to configure the action behaviour
+ - MINOR: actions: Use an integer to set the action type
+ - MINOR: http-rules: Use a specific action type for some custom HTTP actions
+ - MINOR: http-rules: Make replace-header and replace-value custom actions
+ - MINOR: http-rules: Make set-header and add-header custom actions
+ - MINOR: http-rules: Make set/del-map and add/del-acl custom actions
+ - MINOR: http-rules: Group all processing of early-hint rule in its case clause
+ - MEDIUM: http-rules: Make early-hint custom actions
+ - MINOR: http-rule/tcp-rules: Make track-sc* custom actions
+ - MINOR: tcp-rules: Make tcp-request capture a custom action
+ - MINOR: http-rules: Add release functions for existing HTTP actions
+ - BUG/MINOR: http-rules: Fix memory releases on error path during action parsing
+ - MINOR: tcp-rules: Add release functions for existing TCP actions
+ - BUG/MINOR: tcp-rules: Fix memory releases on error path during action parsing
+ - MINOR: http-htx: Add functions to read a raw error file and convert it in HTX
+ - MINOR: http-htx: Add functions to create HTX redirect message
+ - MINOR: config: Use dedicated function to parse proxy's errorfiles
+ - MINOR: config: Use dedicated function to parse proxy's errorloc
+ - MEDIUM: http-htx/proxy: Use a global and centralized storage for HTTP error messages
+ - MINOR: proxy: Register keywords to parse errorfile and errorloc directives
+ - MINOR: http-htx: Add a new section to create groups of custom HTTP errors
+ - MEDIUM: proxy: Add a directive to reference an http-errors section in a proxy
+ - MINOR: http-rules: Update txn flags and status when a deny rule is executed
+ - MINOR: http-rules: Support an optional status on deny rules for http reponses
+ - MINOR: http-rules: Use same function to parse request and response deny actions
+ - MINOR: http-ana: Add an error message in the txn and send it when defined
+ - MEDIUM: http-rules: Support an optional error message in http deny rules
+ - REGTEST: Add a strict rewriting mode reg test
+ - REGEST: Add reg tests about error files
+ - MINOR: ssl: accept 'verify' bind option with 'set ssl cert'
+ - BUG/MINOR: ssl: ssl_sock_load_ocsp_response_from_file memory leak
+ - BUG/MINOR: ssl: ssl_sock_load_issuer_file_into_ckch memory leak
+ - BUG/MINOR: ssl: ssl_sock_load_sctl_from_file memory leak
+ - BUG/MINOR: http_htx: Fix some leaks on error path when error files are loaded
+ - CLEANUP: http-ana: Remove useless test on txn when the error message is retrieved
+ - BUILD: CI: introduce ARM64 builds
+ - BUILD: ssl: more elegant anti-replay feature presence check
+ - MINOR: proxy/http-ana: Add support of extra attributes for the cookie directive
+ - MEDIUM: dns: use Additional records from SRV responses
+ - CLEANUP: Consistently `unsigned int` for bitfields
+ - CLEANUP: pattern: remove the pat_time definition
+ - BUG/MINOR: http_act: don't check capture id in backend
+ - BUG/MINOR: ssl: fix build on development versions of openssl-1.1.x
+
+2019/11/25 : 2.2-dev0
+ - exact copy of 2.1.0
+
+2019/11/25 : 2.1.0
+ - BUG/MINOR: init: fix set-dumpable when using uid/gid
+ - MINOR: init: avoid code duplication while setting identify
+ - BUG/MINOR: ssl: ssl_pkey_info_index ex_data can store a dereferenced pointer
+ - BUG/MINOR: ssl: fix crt-list neg filter for openssl < 1.1.1
+ - MINOR: peers: Alway show the table info for disconnected peers.
+ - MINOR: peers: Add TX/RX heartbeat counters.
+ - MINOR: peers: Add debugging information to "show peers".
+ - BUG/MINOR: peers: Wrong null "server_name" data field handling.
+ - MINOR: ssl/cli: 'abort ssl cert' deletes an on-going transaction
+ - BUG/MEDIUM: mworker: don't fill the -sf argument with -1 during the reexec
+ - BUG/MINOR: peers: "peer alive" flag not reset when deconnecting.
+ - BUILD/MINOR: ssl: fix compiler warning about useless statement
+ - BUG/MEDIUM: stream-int: Don't loose events on the CS when an EOS is reported
+ - MINOR: contrib/prometheus-exporter: filter exported metrics by scope
+ - MINOR: contrib/prometheus-exporter: Add a param to ignore servers in maintenance
+ - BUILD: debug: Avoid warnings in dev mode with -02 because of some BUG_ON tests
+ - BUG/MINOR: mux-h1: Fix tunnel mode detection on the response path
+ - BUG/MINOR: http-ana: Properly catch aborts during the payload forwarding
+ - DOC: Update http-buffer-request description to remove the part about chunks
+ - BUG/MINOR: stream-int: Fix si_cs_recv() return value
+ - DOC: internal: document the init calls
+ - MEDIUM: dns: Add resolve-opts "ignore-weight"
+ - MINOR: ssl: ssl_sock_prepare_ctx() return an error code
+ - MEDIUM: ssl/cli: apply SSL configuration on SSL_CTX during commit
+ - MINOR: ssl/cli: display warning during 'commit ssl cert'
+ - MINOR: version: report the version status in "haproxy -v"
+ - MINOR: version: emit the link to the known bugs in output of "haproxy -v"
+ - DOC: Add documentation about the use-service action
+ - MINOR: ssl: fix possible null dereference in error handling
+ - BUG/MINOR: ssl: fix curve setup with LibreSSL
+ - BUG/MINOR: ssl: Stop passing dynamic strings as format arguments
+ - CLEANUP: ssl: check if a transaction exists once before setting it
+ - BUG/MINOR: cli: fix out of bounds in -S parser
+ - MINOR: ist: add ist_find_ctl()
+ - BUG/MAJOR: h2: reject header values containing invalid chars
+ - BUG/MAJOR: h2: make header field name filtering stronger
+ - BUG/MAJOR: mux-h2: don't try to decode a response HEADERS frame in idle state
+ - MINOR: h2: add a function to report H2 error codes as strings
+ - MINOR: mux-h2/trace: report the connection and/or stream error code
+ - SCRIPTS: create-release: show the correct origin name in suggested commands
+ - SCRIPTS: git-show-backports: add "-s" to proposed cherry-pick commands
+ - BUG/MEDIUM: trace: fix a typo causing an incorrect startup error
+ - BUILD: reorder the objects in the makefile
+ - DOC: mention in INSTALL haproxy 2.1 is a stable stable version
+ - MINOR: version: indicate that this version is stable
+
+2019/11/15 : 2.1-dev5
+ - BUG/MEDIUM: ssl/cli: don't alloc path when cert not found
+ - BUG/MINOR: ssl/cli: unable to update a certificate without bundle extension
+ - BUG/MINOR: ssl/cli: fix an error when a file is not found
+ - MINOR: ssl/cli: replace the default_ctx during 'commit ssl cert'
+ - DOC: fix date and http_date keywords syntax
+ - MINOR: peers: Add "log" directive to "peers" section.
+ - BUG/MEDIUM: mux-h1: Disable splicing for chunked messages
+ - BUG/MEDIUM: stream: Be sure to support splicing at the mux level to enable it
+ - MINOR: flt_trace: Rename macros to print trace messages
+ - MINOR: trace: Add a set of macros to trace events if HA is compiled with debug
+ - MEDIUM: stream/trace: Register a new trace source with its events
+ - MINOR: doc: http-reuse connection pool fix
+ - BUG/MEDIUM: stream: Be sure to release allocated captures for TCP streams
+ - MINOR: http-ana: Remove the unused function http_reset_txn()
+ - BUG/MINOR: action: do-resolve now use cached response
+ - BUG: dns: timeout resolve not applied for valid resolutions
+ - DOC: management: fix typo on "cache_lookups" stats output
+ - BUG/MINOR: stream: init variables when the list is empty
+ - BUG/MEDIUM: tasks: Make tasklet_remove_from_tasklet_list() no matter the tasklet.
+ - BUG/MINOR: queue/threads: make the queue unlinking atomic
+ - BUG/MEDIUM: Make sure we leave the session list in session_free().
+ - CLEANUP: session: slightly simplify idle connection cleanup logic
+ - MINOR: memory: also poison the area on freeing
+ - CLEANUP: cli: use srv_shutdown_streams() instead of open-coding it
+ - CLEANUP: stats: use srv_shutdown_streams() instead of open-coding it
+ - BUG/MEDIUM: listeners: always pause a listener on out-of-resource condition
+ - BUILD: contrib/da: remove an "unused" warning
+ - BUG/MEDIUM: filters: Don't call TCP callbacks for HTX streams
+ - MEDIUM: filters: Adapt filters API to allow again TCP filtering on HTX streams
+ - MINOR: freq_ctr: Make the sliding window sums thread-safe
+ - MINOR: stream: Remove the lock on the proxy to update time stats
+ - MINOR: counters: Add fields to store the max observed for {q,c,d,t}_time
+ - MINOR: stats: Report max times in addition of the averages for sessions
+ - MINOR: contrib/prometheus-exporter: Report metrics about max times for sessions
+ - BUG/MINOR: contrib/prometheus-exporter: Rename some metrics
+ - MINOR: contrib/prometheus-exporter: report the number of idle conns per server
+ - DOC: Add missing stats fields in the management manual
+ - BUG/MINOR: mux-h1: Properly catch parsing errors on payload and trailers
+ - BUG/MINOR: mux-h1: Don't set CS_FL_EOS on a read0 when receiving data to pipe
+ - MINOR: mux-h1: Set EOI on the conn-stream when EOS is reported in TUNNEL state
+ - MINOR: sink: Set the default max length for a message to BUFSIZE
+ - MINOR: ring: make the parse function automatically set the handler/release
+ - BUG/MINOR: log: make "show startup-log" use a ring buffer instead
+ - MINOR: stick-table: allow sc-set-gpt0 to set value from an expression
+
+2019/11/03 : 2.1-dev4
+ - BUG/MINOR: cli: don't call the kw->io_release if kw->parse failed
+ - BUG/MINOR: mux-h2: Don't pretend mux buffers aren't full anymore if nothing sent
+ - BUG/MAJOR: stream-int: Don't receive data from mux until SI_ST_EST is reached
+ - DOC: remove obsolete section about header manipulation
+ - BUG/MINOR: ssl/cli: cleanup on cli_parse_set_cert error
+ - MINOR: ssl/cli: rework the 'set ssl cert' IO handler
+ - BUILD: CI: comment out cygwin build, upgrade various ssl libraries
+ - DOC: Improve documentation of http-re(quest|sponse) replace-(header|value|uri)
+ - BUILD/MINOR: tools: shut up the format truncation warning in get_gmt_offset()
+ - BUG/MINOR: spoe: fix off-by-one length in UUID format string
+ - BUILD/MINOR: ssl: shut up a build warning about format truncation
+ - BUILD: do not disable -Wformat-truncation anymore
+ - MINOR: chunk: add chunk_istcat() to concatenate an ist after a chunk
+ - Revert "MINOR: istbuf: add b_fromist() to make a buffer from an ist"
+ - MINOR: mux: Add a new method to get informations about a mux.
+ - BUG/MEDIUM: stream_interface: Only use SI_ST_RDY when the mux is ready.
+ - BUG/MEDIUM: servers: Only set SF_SRV_REUSED if the connection if fully ready.
+ - MINOR: doc: fix busy-polling performance reference
+ - MINOR: config: allow no set-dumpable config option
+ - MINOR: init: always fail when setrlimit fails
+ - MINOR: ssl/cli: rework 'set ssl cert' as 'set/commit'
+ - CLEANUP: ssl/cli: remove leftovers of bundle/certs (it < 2)
+ - REGTEST: vtest can now enable mcli with its own flag
+ - BUG/MINOR: config: Update cookie domain warn to RFC6265
+ - MINOR: sample: add us/ms support to date/http_date
+ - BUG/MINOR: ssl/cli: check trash allocation in cli_io_handler_commit_cert()
+ - BUG/MEDIUM: mux-h2: report no available stream on a connection having errors
+ - BUG/MEDIUM: mux-h2: immediately remove a failed connection from the idle list
+ - BUG/MEDIUM: mux-h2: immediately report connection errors on streams
+ - BUG/MINOR: stats: properly check the path and not the whole URI
+ - BUG/MINOR: ssl: segfault in cli_parse_set_cert with old openssl/boringssl
+ - BUG/MINOR: ssl: ckch->chain must be initialized
+ - BUG/MINOR: ssl: double free on error for ckch->{key,cert}
+ - MINOR: ssl: BoringSSL ocsp_response does not need issuer
+ - BUG/MEDIUM: ssl/cli: fix dot research in cli_parse_set_cert
+ - MINOR: backend: Add srv_name sample fetche
+ - DOC: Add GitHub issue config.yml
+
+2019/10/25 : 2.1-dev3
+ - MINOR: mux-h2/trace: missing conn pointer in demux full message
+ - MINOR: mux-h2: add a per-connection list of blocked streams
+ - BUILD: ebtree: make eb_is_empty() and eb_is_dup() take a const
+ - BUG/MEDIUM: mux-h2: do not enforce timeout on long connections
+ - BUG/MEDIUM: tasks: Don't forget to decrement tasks_run_queue.
+ - BUG/MINOR: peers: crash on reload without local peer.
+ - BUG/MINOR: mux-h2/trace: Fix traces on h2c initialization
+ - MINOR: h1-htx: Update h1_copy_msg_data() to ease the traces in the mux-h1
+ - MINOR: htx: Adapt htx_dump() to be used from traces
+ - MINOR: mux-h1/trace: register a new trace source with its events
+ - MINOR: proxy: Store http-send-name-header in lower case
+ - MINOR: http: Remove headers matching the name of http-send-name-header option
+ - BUG/MINOR: mux-h1: Adjust header case when the server name is add to a request
+ - BUG/MINOR: mux-h1: Adjust header case when chunked encoding is add to a message
+ - MINOR: mux-h1: Try to wakeup the stream on output buffer allocation
+ - MINOR: fcgi: Add function to get the string representation of a record type
+ - MINOR: mux-fcgi/trace: Register a new trace source with its events
+ - BUG/MEDIUM: cache: make sure not to cache requests with absolute-uri
+ - DOC: clarify some points around http-send-name-header's behavior
+ - MEDIUM: mux-h2: support emitting CONTINUATION frames after HEADERS
+ - BUG/MINOR: mux-h1/mux-fcgi/trace: Fix position of the 4th arg in some traces
+ - DOC: fix typo in Prometheus exporter doc
+ - MINOR: h2: clarify the rules for how to convert an H2 request to HTX
+ - MINOR: htx: Add 2 flags on the start-line to have more info about the uri
+ - MINOR: http: Add a function to get the authority into a URI
+ - MINOR: h1-htx: Set the flag HTX_SL_F_HAS_AUTHORITY during the request parsing
+ - MEDIUM: http-htx: Keep the Host header and the request start-line synchronized
+ - MINOR: h1-htx: Only use the path of a normalized URI to format a request line
+ - MEDIUM: h2: make the request parser rebuild a complete URI
+ - MINOR: h2: report in the HTX flags when the request has an authority
+ - MEDIUM: mux-h2: do not map Host to :authority on output
+ - MEDIUM: h2: use the normalized URI encoding for absolute form requests
+ - MINOR: stats: mention in the help message support for "json" and "typed"
+ - MINOR: stats: get rid of the ST_CONVDONE flag
+ - MINOR: stats: replace the ST_* uri_auth flags with STAT_*
+ - MINOR: stats: always merge the uri_auth flags into the appctx flags
+ - MINOR: stats: set the appctx flags when initializing the applet only
+ - MINOR: stats: get rid of the STAT_SHOWADMIN flag
+ - MINOR: stats: make stats_dump_fields_json() directly take flags
+ - MINOR: stats: uniformize the calling convention of the dump functions
+ - MINOR: stats: support the "desc" output format modifier for info and stat
+ - MINOR: stats: prepare to add a description with each stat/info field
+ - MINOR: stats: make "show stat" and "show info"
+ - MINOR: stats: fill all the descriptions for "show info" and "show stat"
+ - BUG/MEDIUM: applet: always check a fast running applet's activity before killing
+ - BUILD: stats: fix missing '=' sign in array declaration
+ - MINOR: lists: add new macro LIST_SPLICE_END_DETACHED
+ - MINOR: list: add new macro MT_LIST_BEHEAD
+ - MEDIUM: task: Split the tasklet list into two lists.
+ - MINOR: h2: Document traps to be avoided on multithread.
+ - MINOR: lists: Try to use local variables instead of macro arguments.
+ - MINOR: lists: Fix alignement of \ when relevant.
+ - MINOR: mux-h2: also support emitting CONTINUATION on trailers
+ - MINOR: ssl: crt-list do ckchn_lookup
+ - REORG: ssl: rename ckch_node to ckch_store
+ - REORG: ssl: move structures to ssl_sock.h
+ - MINOR: ssl: initialize the sni_keytypes_map as EB_ROOT
+ - MINOR: ssl: initialize explicitly the sni_ctx trees
+ - BUG/MINOR: ssl: abort on sni allocation failure
+ - BUG/MINOR: ssl: free the sni_keytype nodes
+ - BUG/MINOR: ssl: abort on sni_keytypes allocation failure
+ - MEDIUM: ssl: introduce the ckch instance structure
+ - MEDIUM: ssl: split ssl_sock_add_cert_sni()
+ - MINOR: ssl: ssl_sock_load_ckchn() can properly fail
+ - MINOR: ssl: ssl_sock_load_multi_ckchs() can properly fail
+ - MEDIUM: ssl: ssl_sock_load_ckchs() alloc a ckch_inst
+ - MINOR: ssl: ssl_sock_load_crt_file_into_ckch() is filling from a BIO
+ - MEDIUM: ssl/cli: 'set ssl cert' updates a certificate from the CLI
+ - MINOR: ssl: load the sctl in/from the ckch
+ - MINOR: ssl: load the ocsp in/from the ckch
+ - BUG/MEDIUM: ssl: NULL dereference in ssl_sock_load_cert_sni()
+ - BUG/MINOR: ssl: fix build without SSL
+ - BUG/MINOR: ssl: fix build without multi-cert bundles
+ - BUILD: ssl: wrong #ifdef for SSL engines code
+ - BUG/MINOR: ssl: fix OCSP build with BoringSSL
+ - BUG/MEDIUM: htx: Catch chunk_memcat() failures when HTX data are formatted to h1
+ - BUG/MINOR: chunk: Fix tests on the chunk size in functions copying data
+ - BUG/MINOR: mux-h1: Mark the output buffer as full when the xfer is interrupted
+ - MINOR: mux-h1: Xfer as much payload data as possible during output processing
+ - CLEANUP: h1-htx: Move htx-to-h1 formatting functions from htx.c to h1_htx.c
+ - BUG/MINOR: mux-h1: Capture ignored parsing errors
+ - MINOR: h1: Reject requests with different occurrences of the header host
+ - MINOR: h1: Reject requests if the authority does not match the header host
+ - REGTESTS: Send valid URIs in peers reg-tests and fix HA config to avoid warnings
+ - REGTESTS: Adapt proxy_protocol_random_fail.vtc to match normalized URI too
+ - BUG/MINOR: WURFL: fix send_log() function arguments
+ - BUG/MINOR: ssl: fix error messages for OCSP loading
+ - BUG/MINOR: ssl: can't load ocsp files
+ - MINOR: version: make the version strings variables, not constants
+ - BUG/MINOR: http-htx: Properly set htx flags on error files to support keep-alive
+ - MINOR: htx: Add a flag on HTX to known when a response was generated by HAProxy
+ - MINOR: mux-h1: Force close mode for proxy responses with an unfinished request
+ - BUILD: travis-ci: limit build to branches "master" and "next"
+ - BUILD/MEDIUM: threads: rename thread_info struct to ha_thread_info
+ - BUILD/SMALL: threads: enable threads on osx
+ - BUILD/MEDIUM: threads: enable cpu_affinity on osx
+ - MINOR: istbuf: add b_fromist() to make a buffer from an ist
+ - BUG/MINOR: cache: also cache absolute URIs
+ - BUG/MINOR: mworker/ssl: close openssl FDs unconditionally
+ - BUG/MINOR: tcp: Don't alter counters returned by tcp info fetchers
+ - BUG/MEDIUM: lists: Handle 1-element-lists in MT_LIST_BEHEAD().
+ - BUG/MEDIUM: mux_pt: Make sure we don't have a conn_stream before freeing.
+ - BUG/MEDIUM: tasklet: properly compute the sleeping threads mask in tasklet_wakeup()
+ - BUG/MAJOR: idle conns: schedule the cleanup task on the correct threads
+ - BUG/MEDIUM: task: make tasklets either local or shared but not both at once
+ - Revert e8826ded5fea3593d89da2be5c2d81c522070995.
+ - BUG/MEDIUM: mux_pt: Don't destroy the connection if we have a stream attached.
+ - BUG/MEDIUM: mux_pt: Only call the wake emthod if nobody subscribed to receive.
+ - REGTEST: mcli/mcli_show_info: launch a 'show info' on the master CLI
+ - CLEANUP: ssl: make ssl_sock_load_cert*() return real error codes
+ - CLEANUP: ssl: make ssl_sock_load_ckchs() return a set of ERR_*
+ - CLEANUP: ssl: make cli_parse_set_cert handle errcode and warnings.
+ - CLEANUP: ssl: make ckch_inst_new_load_(multi_)store handle errcode/warn
+ - CLEANUP: ssl: make ssl_sock_put_ckch_into_ctx handle errcode/warn
+ - CLEANUP: ssl: make ssl_sock_load_dh_params handle errcode/warn
+ - CLEANUP: bind: handle warning label on bind keywords parsing.
+ - BUG/MEDIUM: ssl: 'tune.ssl.default-dh-param' value ignored with openssl > 1.1.1
+ - BUG/MINOR: mworker/cli: reload fail with inherited FD
+ - BUG/MINOR: ssl: Fix fd leak on error path when a TLS ticket keys file is parsed
+ - BUG/MINOR: stick-table: Never exceed (MAX_SESS_STKCTR-1) when fetching a stkctr
+ - BUG/MINOR: cache: alloc shctx after check config
+ - BUG/MINOR: sample: Make the `field` converter compatible with `-m found`
+ - BUG/MINOR: server: check return value of fopen() in apply_server_state()
+ - REGTESTS: make seamless-reload depend on 1.9 and above
+ - REGTESTS: server/cli_set_fqdn requires version 1.8 minimum
+ - BUG/MINOR: dns: allow srv record weight set to 0
+ - BUG/MINOR: ssl: fix memcpy overlap without consequences.
+ - BUG/MINOR: stick-table: fix an incorrect 32 to 64 bit key conversion
+ - BUG/MEDIUM: pattern: make the pattern LRU cache thread-local and lockless
+ - BUG/MINOR: mux-h2: do not emit logs on backend connections
+ - CLEANUP: ssl: remove old TODO commentary
+ - CLEANUP: ssl: fix SNI/CKCH lock labels
+ - MINOR: ssl: OCSP functions can load from file or buffer
+ - MINOR: ssl: load sctl from buf OR from a file
+ - MINOR: ssl: load issuer from file or from buffer
+ - MINOR: ssl: split ssl_sock_load_crt_file_into_ckch()
+ - BUG/MINOR: ssl/cli: fix looking up for a bundle
+ - MINOR: ssl/cli: update ocsp/issuer/sctl file from the CLI
+ - MINOR: ssl: update ssl_sock_free_cert_key_and_chain_contents
+ - MINOR: ssl: copy a ckch from src to dst
+ - MINOR: ssl: new functions duplicate and free a ckch_store
+ - MINOR: ssl/cli: assignate a new ckch_store
+ - MEDIUM: cli/ssl: handle the creation of SSL_CTX in an IO handler
+ - BUG/MINOR: ssl/cli: fix build of SCTL and OCSP
+ - BUG/MINOR: ssl/cli: out of bounds when built without ocsp/sctl
+ - BUG/MINOR: ssl: fix build with openssl < 1.1.0
+ - BUG/MINOR: ssl: fix build of X509_chain_up_ref() w/ libreSSL
+ - MINOR: tcp: avoid confusion in time parsing init
+ - MINOR: debug: add a new "debug dev stream" command
+ - MINOR: cli/debug: validate addresses using may_access() in "debug dev stream"
+ - REORG: move CLI access level definitions to cli.h
+ - MINOR: cli: add an expert mode to hide dangerous commands
+ - MINOR: debug: make most debug CLI commands accessible in expert mode
+ - MINOR: stats/debug: maintain a counter of debug commands issued
+ - BUG/MEDIUM: debug: address a possible null pointer dereference in "debug dev stream"
+
+2019/10/01 : 2.1-dev2
+ - DOC: management: document reuse and connect counters in the CSV format
+ - DOC: management: document cache_hits and cache_lookups in the CSV format
+ - BUG/MINOR: dns: remove irrelevant dependency on a client connection
+ - MINOR: applet: make appctx use their own pool
+ - BUG/MEDIUM: checks: Don't attempt to receive data if we already subscribed.
+ - BUG/MEDIUM: http/htx: unbreak option http_proxy
+ - BUG/MINOR: backend: do not try to install a mux when the connection failed
+ - MINOR: mux-h2: Don't adjust anymore the amount of data sent in h2_snd_buf()
+ - BUG/MINOR: http_fetch: Fix http_auth/http_auth_group when called from TCP rules
+ - BUG/MINOR: http_htx: Initialize HTX error messages for TCP proxies
+ - BUG/MINOR: cache/htx: Make maxage calculation HTX aware
+ - BUG/MINOR: hlua: Make the function txn:done() HTX aware
+ - MINOR: proto_htx: Directly call htx_check_response_for_cacheability()
+ - MINOR: proto_htx: Rely on the HTX function to apply a redirect rules
+ - MINOR: proto_htx: Add the function htx_return_srv_error()
+ - MINOR: backend/htx: Don't rewind output data to set the sni on a srv connection
+ - MINOR: proto_htx: Don't stop forwarding when there is a post-connect processing
+ - DOC: htx: Update comments in HTX files
+ - CLEANUP: htx: Remove the unsued function htx_add_blk_type_size()
+ - MINOR: htx: Deduce the number of used blocks from tail and head values
+ - MINOR: htx: Use an array of char to store HTX blocks
+ - MINOR: htx: Slightly update htx_dump() to report better messages
+ - DOC: htx: Add internal documentation about the HTX
+ - MAJOR: http: Deprecate and ignore the option "http-use-htx"
+ - MEDIUM: mux-h2: Remove support of the legacy HTTP mode
+ - CLEANUP: h2: Remove functions converting h2 requests to raw HTTP/1.1 ones
+ - MINOR: connection: Remove the multiplexer protocol PROTO_MODE_HTX
+ - MINOR: stream: Rely on HTX analyzers instead of legacy HTTP ones
+ - MEDIUM: http_fetch: Remove code relying on HTTP legacy mode
+ - MINOR: config: Remove tests on the option 'http-use-htx'
+ - MINOR: stream: Remove tests on the option 'http-use-htx' in stream_new()
+ - MINOR: proxy: Remove tests on the option 'http-use-htx' during H1 upgrade
+ - MINOR: hlua: Remove tests on the option 'http-use-htx' to reject TCP applets
+ - MINOR: cache: Remove tests on the option 'http-use-htx'
+ - MINOR: contrib/prometheus-exporter: Remove tests on the option 'http-use-htx'
+ - CLEANUP: proxy: Remove the flag PR_O2_USE_HTX
+ - MINOR: proxy: Don't adjust connection mode of HTTP proxies anymore
+ - MEDIUM: backend: Remove code relying on the HTTP legacy mode
+ - MEDIUM: hlua: Remove code relying on the legacy HTTP mode
+ - MINOR: http_act: Remove code relying on the legacy HTTP mode
+ - MEDIUM: cache: Remove code relying on the legacy HTTP mode
+ - MEDIUM: compression: Remove code relying on the legacy HTTP mode
+ - MINOR: flt_trace: Remove code relying on the legacy HTTP mode
+ - MINOR: stats: Remove code relying on the legacy HTTP mode
+ - MAJOR: filters: Remove code relying on the legacy HTTP mode
+ - MINOR: stream: Remove code relying on the legacy HTTP mode
+ - MAJOR: http: Remove the HTTP legacy code
+ - MINOR: hlua: Remove useless test on TX_CON_WANT_* flags
+ - MINOR: proto_http: Remove unused http txn flags
+ - MINOR: proto_http: Remove the unused flag HTTP_MSGF_WAIT_CONN
+ - CLEANUP: proto_http: Group remaining flags of the HTTP transaction
+ - CLEANUP: channel: Remove the unused flag CF_WAKE_CONNECT
+ - CLEANUP: proto_http: Remove unecessary includes and comments
+ - CLEANUP: proto_http: Move remaining code from proto_http.c to proto_htx.c
+ - REORG: proto_htx: Move HTX analyzers & co to http_ana.{c,h} files
+ - BUG/MINOR: debug: Remove flags CO_FL_SOCK_WR_ENA/CO_FL_SOCK_RD_ENA
+ - MINOR: proxy: Remove support of the option 'http-tunnel'
+ - DOC: config: Update as a result of the legacy HTTP removal
+ - MEDIUM: config: Remove parsing of req* and rsp* directives
+ - MINOR: proxy: Remove the unused list of block rules
+ - MINOR: proxy/http_ana: Remove unused req_exp/rsp_exp and req_add/rsp_add lists
+ - DOC: config: Remove unsupported req* and rsp* keywords
+ - MINOR: global: Preset tune.max_http_hdr to its default value
+ - MINOR: http: Don't store raw HTTP errors in chunks anymore
+ - BUG/MINOR: session: Emit an HTTP error if accept fails only for H1 connection
+ - BUG/MINOR: session: Send a default HTTP error if accept fails for a H1 socket
+ - CLEANUP: mux-h2: Remove unused flags H2_SF_CHNK_*
+ - BUG/MINOR: checks: do not exit tcp-checks from the middle of the loop
+ - MINOR: config: Warn only if the option http-use-htx is used with "no" prefix
+ - BUG/MEDIUM: mux-h1: Trim excess server data at the end of a transaction
+ - MINOR: connection: add conn_get_src() and conn_get_dst()
+ - MINOR: frontend: switch to conn_get_{src,dst}() for logging and debugging
+ - MINOR: backend: switch to conn_get_{src,dst}() for port and address mapping
+ - MINOR: ssl: switch to conn_get_dst() to retrieve the destination address
+ - MINOR: tcp: replace various calls to conn_get_{from,to}_addr with conn_get_{src,dst}
+ - MINOR: stream-int: use conn_get_{src,dst} in conn_si_send_proxy()
+ - MINOR: stream/cli: use conn_get_{src,dst} in "show sess" and "show peers" output
+ - MINOR: log: use conn_get_{dst,src}() to retrieve the cli/frt/bck/srv/ addresses
+ - MINOR: http/htx: use conn_get_dst() to retrieve the destination address
+ - MINOR: lua: use conn_get_{src,dst} to retrieve connection addresses
+ - MINOR: http: check the source address via conn_get_src() in sample fetch functions
+ - CLEANUP: connection: remove the now unused conn_get_{from,to}_addr()
+ - MINOR: connection: add new src and dst fields
+ - MINOR: connection: use conn->{src,dst} instead of &conn->addr.{from,to}
+ - MINOR: ssl-sock: use conn->dst instead of &conn->addr.to
+ - MINOR: lua: switch to conn->dst for a connection's target address
+ - MINOR: peers: use conn->dst for the peer's target address
+ - MINOR: htx: switch from conn->addr.{from,to} to conn->{src,dst}
+ - MINOR: stream: switch from conn->addr.{from,to} to conn->{src,dst}
+ - MINOR: proxy: switch to conn->src in error snapshots
+ - MINOR: session: use conn->src instead of conn->addr.from
+ - MINOR: tcp: replace conn->addr.{from,to} with conn->{src,dst}
+ - MINOR: unix: use conn->dst for the target address in ->connect()
+ - MINOR: sockpair: use conn->dst for the target address in ->connect()
+ - MINOR: log: use conn->{src,dst} instead of conn->addr.{from,to}
+ - MINOR: checks: replace conn->addr.to with conn->dst
+ - MINOR: frontend: switch from conn->addr.{from,to} to conn->{src,dst}
+ - MINOR: http: convert conn->addr.from to conn->src in sample fetches
+ - MEDIUM: backend: turn all conn->addr.{from,to} to conn->{src,dst}
+ - MINOR: connection: create a new pool for struct sockaddr_storage
+ - MEDIUM: connection: make sure all address producers allocate their address
+ - MAJOR: connection: remove the addr field
+ - MINOR: connection: don't use clear_addr() anymore, just release the address
+ - MINOR: stream: add a new target_addr entry in the stream structure
+ - MAJOR: stream: store the target address into s->target_addr
+ - MINOR: peers: now remove the remote connection setup code
+ - MEDIUM: lua: do not allocate the remote connection anymore
+ - MEDIUM: backend: always release any existing prior connection in connect_server()
+ - MEDIUM: backend: remove impossible cases from connect_server()
+ - BUG/MINOR: mux-h1: Close server connection if input data remains in h1_detach()
+ - BUG/MEDIUM: tcp-checks: do not dereference inexisting conn_stream
+ - BUG/MINOR: http_ana: Be sure to have an allocated buffer to generate an error
+ - BUG/MINOR: http_htx: Support empty errorfiles
+ - BUG/CRITICAL: http_ana: Fix parsing of malformed cookies which start by a delimiter
+ - BUG/MEDIUM: protocols: add a global lock for the init/deinit stuff
+ - BUG/MINOR: proxy: always lock stop_proxy()
+ - MEDIUM: mux-h1: Add the support of headers adjustment for bogus HTTP/1 apps
+ - BUILD: threads: add the definition of PROTO_LOCK
+ - BUG/MEDIUM: lb-chash: Fix the realloc() when the number of nodes is increased
+ - BUG/MEDIUM: streams: Don't switch the SI to SI_ST_DIS if we have data to send.
+ - BUG/MINOR: log: make sure writev() is not interrupted on a file output
+ - DOC: improve the wording in CONTRIBUTING about how to document a bug fix
+ - MEDIUM: h1: Don't try to subscribe if we managed to read data.
+ - MEDIUM: h1: Don't wake the H1 tasklet if we got the whole request.
+ - REGTESTS: checks: exclude freebsd target for tcp-check_multiple_ports.vtc
+ - BUG/MINOR: hlua/htx: Reset channels analyzers when txn:done() is called
+ - BUG/MEDIUM: hlua: Check the calling direction in lua functions of the HTTP class
+ - MINOR: hlua: Don't set request analyzers on response channel for lua actions
+ - MINOR: hlua: Add a flag on the lua txn to know in which context it can be used
+ - BUG/MINOR: hlua: Only execute functions of HTTP class if the txn is HTTP ready
+ - BUG/MINOR: htx: Fix free space addresses calculation during a block expansion
+ - MINOR: ssl: merge ssl_sock_load_cert_file() and ssl_sock_load_cert_chain_file()
+ - MEDIUM: ssl: use cert_key_and_chain struct in ssl_sock_load_cert_file()
+ - MEDIUM: ssl: split the loading of the certificates
+ - MEDIUM: ssl: lookup and store in a ckch_node tree
+ - MEDIUM: ssl: load DH param in struct cert_key_and_chain
+ - BUG/MAJOR: queue/threads: avoid an AB/BA locking issue in process_srv_queue()
+ - MINOR: ssl: use STACK_OF for chain certs
+ - MINOR: ssl: add extra chain compatibility
+ - MINOR: ssl: check private key consistency in loading
+ - MINOR: ssl: do not look at DHparam with OPENSSL_NO_DH
+ - CLEANUP: ssl: ssl_sock_load_crt_file_into_ckch
+ - MINOR: ssl: clean ret variable in ssl_sock_load_ckchn
+ - MAJOR: fd: Get rid of the fd cache.
+ - MEDIUM: pollers: Remember the state for read and write for each threads.
+ - MEDIUM: mux-h2: don't try to read more than needed
+ - BUG/BUILD: ssl: fix build with openssl < 1.0.2
+ - BUG/MEDIUM: ssl: does not try to free a DH in a ckch
+ - BUG/MINOR: debug: fix a small race in the thread dumping code
+ - MINOR: wdt: also consider that waiting in the thread dumper is normal
+ - REGTESTS: checks: make 4be_1srv_health_checks more reliable
+ - BUILD: ssl: BoringSSL add EVP_PKEY_base_id
+ - BUG/MEDIUM: ssl: don't free the ckch in multi-cert bundle
+ - BUG/MINOR: ssl: fix ressource leaks on error
+ - BUG/MEDIUM: lb-chash: Ensure the tree integrity when server weight is increased
+ - BUG/MAJOR: http/sample: use a static buffer for raw -> htx conversion
+ - BUG/MINOR: stream-int: make sure to always release empty buffers after sending
+ - BUG/MEDIUM: ssl: open the right path for multi-cert bundle
+ - BUG/MINOR: stream-int: also update analysers timeouts on activity
+ - BUG/MEDIUM: mux-h2: unbreak receipt of large DATA frames
+ - BUG/MEDIUM: mux-h2: split the stream's and connection's window sizes
+ - BUG/MEDIUM: proxy: Make sure to destroy the stream on upgrade from TCP to H2
+ - DOC: Add 'Question.md' issue template, discouraging asking questions
+ - BUG/MEDIUM: fd: Always reset the polled_mask bits in fd_dodelete().
+ - BUG/MEDIUM: pollers: Clear the poll_send bits as well.
+ - BUILD: travis-ci: enable daily Coverity scan
+ - BUG/MINOR: mux-h2: don't refrain from sending an RST_STREAM after another one
+ - BUG/MINOR: mux-h2: use CANCEL, not STREAM_CLOSED in h2c_frt_handle_data()
+ - BUG/MINOR: mux-h2: do not send REFUSED_STREAM on aborted uploads
+ - BUG/MEDIUM: mux-h2: do not recheck a frame type after a state transition
+ - BUG/MINOR: mux-h2: always send stream window update before connection's
+ - BUG/MINOR: mux-h2: always reset rcvd_s when switching to a new frame
+ - BUG/MEDIUM: checks: make sure to close nicely when we're the last to speak
+ - BUG/MEDIUM: stick-table: Wrong stick-table backends parsing.
+ - CLEANUP: mux-h2: move the demuxed frame check code in its own function
+ - MINOR: cache: add method to cache hash
+ - MINOR: cache: allow caching of OPTIONS request
+ - BUG/MINOR: ssl: fix 0-RTT for BoringSSL
+ - MINOR: ssl: ssl_fc_has_early should work for BoringSSL
+ - BUG/MINOR: pools: don't mark the thread harmless if already isolated
+ - BUG/MINOR: buffers/threads: always clear a buffer's head before releasing it
+ - CLEANUP: buffer: replace b_drop() with b_free()
+ - CLEANUP: task: move the cpu_time field to the task-only part
+ - MINOR: cli: add two new states to print messages on the CLI
+ - MINOR: cli: add cli_msg(), cli_err(), cli_dynmsg(), cli_dynerr()
+ - CLEANUP: cli: replace all occurrences of manual handling of return messages
+ - BUG/MEDIUM: proxy: Don't forget the SF_HTX flag when upgrading TCP=>H1+HTX.
+ - BUG/MEDIUM: proxy: Don't use cs_destroy() when freeing the conn_stream.
+ - BUG/MINOR: lua: fix setting netfilter mark
+ - BUG/MINOR: Fix prometheus '# TYPE' and '# HELP' headers
+ - BUG/MEDIUM: lua: Fix test on the direction to set the channel exp timeout
+ - BUG/MINOR: stats: Wait the body before processing POST requests
+ - MINOR: fd: make sure to mark the thread as not stuck in fd_update_events()
+ - BUG/MEDIUM: mux_pt: Don't call unsubscribe if we did not subscribe.
+ - BUILD: travis-ci: trigger non-mainstream configurations only on daily builds.
+ - MINOR: debug: indicate the applet name when the task is task_run_applet()
+ - MINOR: tools: add append_prefixed_str()
+ - MINOR: lua: export applet and task handlers
+ - MEDIUM: debug: make the thread dump code show Lua backtraces
+ - BUG/MEDIUM: h1: Always try to receive more in h1_rcv_buf().
+ - MINOR: list: add LIST_SPLICE() to merge one list into another
+ - MINOR: tools: add a DEFNULL() macro to use NULL for empty args
+ - REORG: trace: rename trace.c to calltrace.c and mention it's not thread-safe
+ - MINOR: sink: create definitions a minimal code for event sinks
+ - MINOR: sink: add a support for file descriptors
+ - MINOR: trace: start to create a new trace subsystem
+ - MINOR: trace: add allocation of buffer-sized trace buffers
+ - MINOR: trace/cli: register the "trace" CLI keyword to list the sources
+ - MINOR: trace/cli: parse the "level" argument to configure the trace verbosity
+ - MINOR: trace/cli: add "show trace" to report trace state and statistics
+ - MINOR: trace: implement a very basic trace() function
+ - MINOR: trace: add the file name and line number in the prefix
+ - MINOR: trace: make trace() now also take a level in argument
+ - MINOR: trace: implement a call to a decode function
+ - MINOR: trace: add per-level macros to produce traces
+ - MINOR: trace: add a definition of typed arguments to trace()
+ - MINOR: trace: make sure to always stop the locking when stopping or pausing
+ - MINOR: trace: add the possibility to lock on some arguments
+ - MINOR: trace: parse the "lock" argument to trace
+ - MINOR: trace: retrieve useful pointers and enforce lock-on
+ - DOC: management: document the "trace" and "show trace" commands
+ - BUILD: trace: make the lockon_ptr const to silence a warning without threads
+ - BUG/MEDIUM: mux-h1: do not truncate trailing 0CRLF on buffer boundary
+ - BUG/MEDIUM: mux-h1: do not report errors on transfers ending on buffer full
+ - DOC: fixed typo in management.txt
+ - BUG/MINOR: mworker: disable SIGPROF on re-exec
+ - BUG/MEDIUM: listener/threads: fix an AB/BA locking issue in delete_listener()
+ - BUG/MEDIUM: url32 does not take the path part into account in the returned hash.
+ - MINOR: backend: Add srv_queue converter
+ - MINOR: sink: set the fd-type sinks to non-blocking
+ - MINOR: tools: add a function varint_bytes() to report the size of a varint
+ - MINOR: buffer: add functions to read/write varints from/to buffers
+ - MINOR: fd: add fd_write_frag_line() to send a fragmented line to an fd
+ - MINOR: sink: now call the generic fd write function
+ - MINOR: ring: add a new mechanism for retrieving/storing ring data in buffers
+ - MINOR: ring: add a ring_write() function
+ - MINOR: ring: add a generic CLI io_handler to dump a ring buffer
+ - MINOR: sink: add support for ring buffers
+ - MINOR: sink: implement "show events" to show supported sinks and dump the rings
+ - MINOR: sink: now report the number of dropped events on output
+ - MINOR: trace: support a default callback for the source
+ - MINOR: trace: extend the source location to 13 chars
+ - MINOR: trace: show thread number and source name in the trace
+ - MINOR: trace: change the TRACE() calling convention to put the args and cb last
+ - MINOR: connection: add the fc_pp_authority fetch -- authority TLV, from PROXYv2
+ - MINOR: tools: add a generic struct "name_desc" for name-description pairs
+ - MINOR: trace: replace struct trace_lockon_args with struct name_desc
+ - MINOR: trace: change the "payload" level to "data" and move it
+ - MINOR: trace: prepend the function name for developer level traces
+ - MINOR: trace: also report the trace level in the output
+ - MINOR: trace: change the detail_level to per-source verbosity
+ - MINOR: mux-h2/trace: register a new trace source with its events
+ - MINOR: mux-h2/trace: add the default decoding callback
+ - MEDIUM: mux-h2/trace: add lots of traces all over the code
+ - MINOR: mux-h2: add functions to convert an h2c/h2s state to a string
+ - MINOR: mux-h2/trace: add a new verbosity level "clean"
+ - MINOR: mux-h2/trace: only decode the start-line at verbosity other than "minimal"
+ - MINOR: mux-h2/trace: always report the h2c/h2s state and flags
+ - MINOR: mux-h2/trace: report h2s->id before h2c->dsi for the stream ID
+ - CLEANUP: mux-h2/trace: reformat the "received" messages for better alignment
+ - CLEANUP: mux-h2/trace: lower-case event names
+ - MINOR: trace: extend default event names to 12 chars
+ - BUG/MINOR: ring: fix the way watchers are counted
+ - MINOR: cli: extend the CLI context with a list and two offsets
+ - MINOR: mux-h2/trace: report the connection pointer and state before FRAME_H
+ - MEDIUM: ring: implement a wait mode for watchers
+ - BUG/MEDIUM: mux-h2/trace: do not dereference h2c->conn after failed idle
+ - BUG/MEDIUM: mux-h2/trace: fix missing braces added with traces
+ - BUG/MINOR: ring: b_peek_varint() returns a uint64_t, not a size_t
+ - CLEANUP: fd: remove leftovers of the fdcache
+ - MINOR: fd: add a new "initialized" bit in the fdtab struct
+ - MINOR: fd/log/sink: make the non-blocking initialization depend on the initialized bit
+ - MEDIUM: log: use the new generic fd_write_frag_line() function
+ - MINOR: log: add a target type instead of hacking the address family
+ - MEDIUM: log: add support for logging to a ring buffer
+ - MINOR: send-proxy-v2: sends authority TLV according to TLV received
+ - MINOR: build: add linux-glibc-legacy build TARGET
+ - BUG/MEDIUM: peers: local peer socket not bound.
+ - BUILD: connection: silence gcc warning with extra parentheses
+ - BUG/MINOR: http-ana: Reset response flags when 1xx messages are handled
+ - BUG/MINOR: h1: Properly reset h1m when parsing is restarted
+ - BUG/MINOR: mux-h1: Fix size evaluation of HTX messages after headers parsing
+ - BUG/MINOR: mux-h1: Don't stop anymore input processing when the max is reached
+ - BUG/MINOR: mux-h1: Be sure to update the count before adding EOM after trailers
+ - BUG/MEDIUM: cache: Properly copy headers splitted on several shctx blocks
+ - BUG/MEDIUM: cache: Don't cache objects if the size of headers is too big
+ - BUG/MINOR: mux-h1: Fix a possible null pointer dereference in h1_subscribe()
+ - MEDIUM: fd: remove the FD_EV_POLLED status bit
+ - MEDIUM: fd: simplify the fd_*_{recv,send} functions using BTS/BTR
+ - MINOR: fd: make updt_fd_polling() a normal function
+ - CONTRIB: debug: add new program "poll" to test poll() events
+ - BUG/MINOR: checks: stop polling for write when we have nothing left to send
+ - BUG/MINOR: checks: start sending the request right after connect()
+ - BUG/MINOR: checks: make __event_chk_srv_r() report success before closing
+ - BUG/MINOR: checks: do not uselessly poll for reads before the connection is up
+ - BUG/MINOR: mux-h1: Fix a UAF in cfg_h1_headers_case_adjust_postparser()
+ - BUILD: CI: add basic CentOS 6 cirrus build
+ - MINOR: contrib/prometheus-exporter: Report DRAIN/MAINT/NOLB status for servers
+ - BUG/MINOR: lb/leastconn: ignore the server weights for empty servers
+ - BUG/MAJOR: ssl: ssl_sock was not fully initialized.
+ - MEDIUM: fd: mark the FD as ready when it's inserted
+ - MINOR: fd: add two new calls fd_cond_{recv,send}()
+ - MEDIUM: connection: enable reading only once the connection is confirmed
+ - MINOR: fd: add two flags ERR and SHUT to describe FD states
+ - MEDIUM: fd: do not use the FD_POLL_* flags in the pollers anymore
+ - BUG/MEDIUM: connection: don't keep more idle connections than ever needed
+ - MINOR: stats: report the number of idle connections for each server
+ - BUILD: CI: skip reg-tests/connection/proxy_protocol_random_fail.vtc on CentOS 6
+ - BUILD/MINOR: auth: enabling for osx
+ - BUG/MINOR: listener: Fix a possible null pointer dereference
+ - BUG/MINOR: ssl: always check for ssl connection before getting its XPRT context
+ - MINOR: stats: Add JSON export from the stats page
+ - BUG/MINOR: filters: Properly set the HTTP status code on analysis error
+ - MINOR: sample: Add UUID-fetch
+ - CLEANUP: mux-h2: Remove unused flag H2_SF_DATA_CHNK
+ - BUG/MINOR: acl: Fix memory leaks when an ACL expression is parsed
+ - BUG/MINOR: backend: Fix a possible null pointer dereference
+ - BUG/MINOR: Missing stat_field_names (since f21d17bb)
+ - BUG/MEDIUM: stick-table: Properly handle "show table" with a data type argument
+ - BUILD: CI: temporarily disable ASAN
+ - MINOR: htx: Add a flag on HTX message to report processing errors
+ - MINOR: mux-h1: Report a processing error during output processing
+ - MINOR: http-ana: Handle HTX errors first during message analysis
+ - MINOR: http-ana: Remove err_state field from http_msg
+ - MINOR: config: Support per-proxy and per-server deinit functions callbacks
+ - MINOR: config: Support per-proxy and per-server post-check functions callbacks
+ - MINOR: http_fetch: Add sample fetches to get auth method/user/pass
+ - MINOR: istbuf: Add the function b_isteqi()
+ - MINOR: log: Provide a function to emit a log for an application
+ - MINOR: http: Add function to parse value of the header Status
+ - MEDIUM: mux-h1/h1-htx: move HTX convertion of H1 messages in dedicated file
+ - MINOR: h1-htx: Use the same function to copy message payload in all cases
+ - MINOR: muxes/htx: Ignore pseudo header during message formatting
+ - MINOR: fcgi: Add code related to FCGI protocol
+ - MEDIUM: fcgi-app: Add FCGI application and filter
+ - MEDIUM: mux-fcgi: Add the FCGI multiplexer
+ - MINOR: doc: Add documentation about the FastCGI support
+ - BUG/MINOR: build: Fix compilation of mux_fcgi.c when compiled without SSL
+ - BUILD: CI: install golang-1.13 when building BoringSSL
+ - BUG/MINOR: mux-h2: Be sure to have a connection to unsubcribe
+ - BUG/MINOR: mux-fcgi: Be sure to have a connection to unsubcribe
+ - CLEANUP: fcgi-app: Remove useless test on fcgi_conf pointer
+ - BUG/MINOR: mux-fcgi: Don't compare the filter name in its parsing callback
+ - BUG/MAJOR: mux-h2: Handle HEADERS frames received after a RST_STREAM frame
+ - BUG/MEDIUM: check/threads: make external checks run exclusively on thread 1
+ - MEDIUM: list: Separate "locked" list from regular list.
+ - MINOR: mt_lists: Add new macroes.
+ - MEDIUM: servers: Use LIST_DEL_INIT() instead of LIST_DEL().
+ - MINOR: mt_lists: Do nothing in MT_LIST_ADD/MT_LIST_ADDQ if already in list.
+ - MINOR: mt_lists: Give MT_LIST_ADD, MT_LIST_ADDQ and MT_LIST_DEL a return value.
+ - MEDIUM: tasklets: Make the tasklet list a struct mt_list.
+ - TESTS: Add a stress-test for mt_lists.
+ - BUILD: travis-ci: add PCRE2, SLZ build
+ - BUG/MINOR: build: fix event ports (Solaris)
+ - BUG/MEDIUM: namespace: fix fd leak in master-worker mode
+ - OPTIM: listeners: use tasklets for the multi-queue rings
+ - BUILD: makefile: work around yet another GCC fantasy (-Wstring-plus-int)
+ - BUG/MINOR: stream-int: Process connection/CS errors first in si_cs_send()
+ - BUG/MEDIUM: stream-int: Process connection/CS errors during synchronous sends
+ - BUG/MEDIUM: checks: make sure the connection is ready before trying to recv
+ - CLEANUP: task: remove impossible test
+ - CLEANUP: task: cache the task_per_thread pointer
+ - MINOR: task: split the tasklet vs task code in process_runnable_tasks()
+ - MINOR: task: introduce a thread-local "sched" variable for local scheduler stuff
+ - CLEANUP: mux-fcgi: Remove the unused function fcgi_strm_id()
+ - BUG/MINOR: mux-fcgi: Use a literal string as format in app_log()
+ - BUG/MEDIUM: tasklets: Make sure we're waking the target thread if it sleeps.
+ - MINOR: h2/trace: indicate 'F' or 'B' to locate the side of an h2c in traces
+ - MINOR: h2/trace: report the frame type when known
+ - BUG/MINOR: mux-h2: do not wake up blocked streams before the mux is ready
+ - BUG/MEDIUM: namespace: close open namespaces during soft shutdown
+ - MINOR: time: add timeofday_as_iso_us() to return instant time as ISO
+ - MINOR: sink: finally implement support for SINK_FMT_{TIMED,ISO}
+ - MINOR: sink: change ring buffer "buf0"'s format to "timed"
+ - BUG/MEDIUM: mux-h2: don't reject valid frames on closed streams
+ - BUG/MINOR: mux-fcgi: silence a gcc warning about null dereference
+ - BUG/MINOR: mux-h2: Fix missing braces because of traces in h2_detach()
+ - BUG/MINOR: mux-h2: Use the dummy error when decoding headers for a closed stream
+ - BUG/MAJOR: mux_h2: Don't consume more payload than received for skipped frames
+ - BUG/MINOR: mux-h1: Do h2 upgrade only on the first request
+ - BUG/MEDIUM: spoe: Use a different engine-id per process
+ - MINOR: spoe: Improve generation of the engine-id
+ - MINOR: spoe: Support the async mode with several threads
+ - MINOR: http: Add server name header from HTTP multiplexers
+ - CLEANUP: http-ana: Remove the unused function http_send_name_header()
+ - MINOR: stats: Add the support of float fields in stats
+ - BUG/MINOR: contrib/prometheus-exporter: Return the time averages in seconds
+ - DOC: Fix documentation about the cli command to get resolver stats
+ - BUG/MEDIUM: fcgi: fix missing list tail in sample fetch registration
+ - BUG/MINOR: stats: Add a missing break in a switch statement
+ - BUG/MINOR: lua: Properly initialize the buffer's fields for string samples in hlua_lua2(smp|arg)
+ - CLEANUP: lua: Get rid of obsolete (size_t *) cast in hlua_lua2(smp|arg)
+ - BUG/MEDIUM: lua: Store stick tables into the sample's `t` field
+ - CLEANUP: proxy: Remove `proxy_tbl_by_name`
+ - BUILD: ssl: fix a warning when built with openssl < 1.0.2
+ - DOC: replace utf-8 quotes by ascii ones
+ - BUG/MEDIUM: fd: HUP is an error only when write is active
+ - BUG/MINOR: action: do-resolve does not yield on requests with body
+ - Revert "MINOR: cache: allow caching of OPTIONS request"
+
+2019/07/16 : 2.1-dev1
+ - BUG/MEDIUM: h2/htx: Update data length of the HTX when the cookie list is built
+ - DOC: this is a development branch again.
+ - MEDIUM: Make 'block' directive fatal
+ - MEDIUM: Make 'redispatch' directive fatal
+ - MEDIUM: Make '(cli|con|srv)timeout' directive fatal
+ - MEDIUM: Remove 'option independant-streams'
+ - MINOR: sample: Add sha2([<bits>]) converter
+ - MEDIUM: server: server-state global file stored in a tree
+ - BUG/MINOR: lua/htx: Make txn.req_req_* and txn.res_rep_* HTX aware
+ - BUG/MINOR: mux-h1: Add the header connection in lower case in outgoing messages
+ - BUG/MEDIUM: compression: Set Vary: Accept-Encoding for compressed responses
+ - MINOR: htx: Add the function htx_change_blk_value_len()
+ - BUG/MEDIUM: htx: Fully update HTX message when the block value is changed
+ - BUG/MEDIUM: mux-h2: Reset padlen when several frames are demux
+ - BUG/MEDIUM: mux-h2: Remove the padding length when a DATA frame size is checked
+ - BUG/MEDIUM: lb_fwlc: Don't test the server's lb_tree from outside the lock
+ - BUG/MAJOR: sample: Wrong stick-table name parsing in "if/unless" ACL condition.
+ - BUILD: mworker: silence two printf format warnings around getpid()
+ - BUILD: makefile: use :space: instead of digits to count commits
+ - BUILD: makefile: adjust the sed expression of "make help" for solaris
+ - BUILD: makefile: do not rely on shell substitutions to determine git version
+ - BUG/MINOR: mworker-prog: Fix segmentation fault during cfgparse
+ - BUG/MINOR: spoe: Fix memory leak if failing to allocate memory
+ - BUG/MEDIUM: mworker: don't call the thread and fdtab deinit
+ - BUG/MEDIUM: stream_interface: Don't add SI_FL_ERR the state is < SI_ST_CON.
+ - BUG/MEDIUM: connections: Always add the xprt handshake if needed.
+ - BUG/MEDIUM: ssl: Don't do anything in ssl_subscribe if we have no ctx.
+ - BUG/MEDIUM: mworker/cli: command pipelining doesn't work anymore
+ - BUG/MINOR: htx: Save hdrs_bytes when the HTX start-line is replaced
+ - BUG/MAJOR: mux-h1: Don't crush trash chunk area when outgoing message is formatted
+ - BUG/MINOR: memory: Set objects size for pools in the per-thread cache
+ - BUG/MINOR: log: Detect missing sampling ranges in config
+ - BUG/MEDIUM: proto_htx: Don't add EOM on 1xx informational messages
+ - BUG/MEDIUM: mux-h1: Use buf_room_for_htx_data() to detect too large messages
+ - BUG/MINOR: mux-h1: Make format errors during output formatting fatal
+ - BUG/MEDIUM: ssl: Don't attempt to set alpn if we're not using SSL.
+ - BUG/MEDIUM: mux-h1: Always release H1C if a shutdown for writes was reported
+ - BUG/MINOR: mworker/cli: don't output a \n before the response
+ - BUG/MEDIUM: checks: unblock signals in external checks
+ - BUG/MINOR: mux-h1: Skip trailers for non-chunked outgoing messages
+ - BUG/MINOR: mux-h1: Don't return the empty chunk on HEAD responses
+ - BUG/MEDIUM: connections: Always call shutdown, with no linger.
+ - BUG/MEDIUM: checks: Make sure the tasklet won't run if the connection is closed.
+ - BUG/MINOR: contrib/prometheus-exporter: Don't use channel_htx_recv_max()
+ - BUG/MINOR: hlua: Don't use channel_htx_recv_max()
+ - BUG/MEDIUM: channel/htx: Use the total HTX size in channel_htx_recv_limit()
+ - BUG/MINOR: hlua/htx: Respect the reserve when HTX data are sent
+ - BUG/MINOR: contrib/prometheus-exporter: Respect the reserve when data are sent
+ - BUG/MEDIUM: connections: Make sure we're unsubscribe before upgrading the mux.
+ - BUG/MEDIUM: servers: Authorize tfo in default-server.
+ - BUG/MEDIUM: sessions: Don't keep an extra idle connection in sessions.
+ - MINOR: server: Add "no-tfo" option.
+ - BUG/MINOR: contrib/prometheus-exporter: Don't try to add empty data blocks
+ - MINOR: action: Add the return code ACT_RET_DONE for actions
+ - BUG/MEDIUM: http/applet: Finish request processing when a service is registered
+ - BUG/MEDIUM: lb_fas: Don't test the server's lb_tree from outside the lock
+ - BUG/MEDIUM: mux-h1: Handle TUNNEL state when outgoing messages are formatted
+ - BUG/MINOR: mux-h1: Don't process input or ouput if an error occurred
+ - MINOR: stream-int: Factorize processing done after sending data in si_cs_send()
+ - BUG/MEDIUM: stream-int: Don't rely on CF_WRITE_PARTIAL to unblock opposite si
+ - DOC: contrib: spoa_server Add some hints for building spoa_server
+ - DOC: Fix typo in intro.txt
+ - BUG/MEDIUM: servers: Don't forget to set srv_cs to NULL if we can't reuse it.
+ - BUG/MINOR: ssl: revert empty handshake detection in OpenSSL <= 1.0.2
+ - MINOR: pools: release the pool's lock during the malloc/free calls
+ - MINOR: pools: always pre-initialize allocated memory outside of the lock
+ - MINOR: pools: make the thread harmless during the mmap/munmap syscalls
+ - BUG/MEDIUM: fd/threads: fix excessive CPU usage on multi-thread accept
+ - BUG/MINOR: server: Be really able to keep "pool-max-conn" idle connections
+ - BUG/MEDIUM: checks: Don't attempt to read if we destroyed the connection.
+ - BUG/MEDIUM: da: cast the chunk to string.
+ - DOC: Fix typos and grammer in configuration.txt
+ - CLEANUP: proto_tcp: Remove useless header inclusions.
+ - BUG/MEDIUM: servers: Fix a race condition with idle connections.
+ - MINOR: task: introduce work lists
+ - BUG/MAJOR: listener: fix thread safety in resume_listener()
+ - BUG/MEDIUM: mux-h1: Don't release h1 connection if there is still data to send
+ - BUG/MINOR: mux-h1: Correctly report Ti timer when HTX and keepalives are used
+ - BUG/MEDIUM: streams: Don't give up if we couldn't send the request.
+ - BUG/MEDIUM: streams: Don't redispatch with L7 retries if redispatch isn't set.
+ - BUG/MINOR: mux-pt: do not pretend there's more data after a read0
+ - BUG/MEDIUM: tcp-check: unbreak multiple connect rules again
+ - MEDIUM: mworker-prog: Add user/group options to program section
+ - REGTESTS: checks: tcp-check connect to multiple ports
+ - BUG/MEDIUM: threads: cpu-map designating a single thread/process are ignored
+
+2019/06/16 : 2.1-dev0
+ - exact copy of 2.0.0
+
+2019/06/16 : 2.0.0
+ - MINOR: fd: Don't use atomic operations when it's not needed.
+ - DOC: mworker-prog: documentation for the program section
+ - MINOR: http: add a new "http-request replace-uri" action
+ - BUG/MINOR: 51d/htx: The _51d_fetch method, and the methods it calls are now HTX aware.
+ - MINOR: 51d: Added dummy libraries for the 51Degrees module for testing.
+ - MINOR: mworker: change formatting in uptime field of "show proc"
+ - MINOR: mworker: add the HAProxy version in "show proc"
+ - MINOR: doc: Remove -Ds option in man page
+ - MINOR: doc: add master-worker in the man page
+ - MINOR: doc: mention HAPROXY_LOCALPEER in the man
+ - BUILD: Silence gcc warning about unused return value
+ - CLEANUP: 51d: move the 51d dummy lib to contrib/51d/src to match the real lib
+ - BUILD: travis-ci: add 51Degree device detection, update openssl to 1.1.1c
+ - MINOR: doc: update the manpage and usage message about -S
+ - BUILD/MINOR: 51d: Updated build registration output to indicate thatif the library is a dummy one or not.
+ - BUG/MEDIUM: h1: Don't wait for handshake if we had an error.
+ - BUG/MEDIUM: h1: Wait for the connection if the handshake didn't complete.
+ - BUG/MINOR: task: prevent schedulable tasks from starving under high I/O activity
+ - BUG/MINOR: fl_trace/htx: Be sure to always forward trailers and EOM
+ - BUG/MINOR: channel/htx: Call channel_htx_full() from channel_full()
+ - BUG/MINOR: http: Use the global value to limit the number of parsed headers
+ - BUG/MINOR: htx: Detect when tail_addr meet end_addr to maximize free rooms
+ - BUG/MEDIUM: htx: Don't change position of the first block during HTX analysis
+ - CLEANUP: channel: Remove channel_htx_fwd_payload() and channel_htx_fwd_all()
+ - BUG/MEDIUM: proto_htx: Introduce the state ENDING during forwarding
+ - MINOR: htx: Add 3 flags on the start-line to deal with the request schemes
+ - MINOR: h2: Set flags about the request's scheme on the start-line
+ - MINOR: mux-h1: Set flags about the request's scheme on the start-line
+ - MINOR: mux-h2: Forward clients scheme to servers checking start-line flags
+ - MEDIUM: server: server-state only rely on server name
+ - CLEANUP: connection: rename the wait_event.task field to .tasklet
+ - CLEANUP: tasks: rename task_remove_from_tasklet_list() to tasklet_remove_*
+ - BUG/MEDIUM: connections: Don't call shutdown() if we want to disable linger.
+ - DOC: add some environment variables in section 2.3
+ - BUILD: makefile: clarify the "help" output and list options
+ - BUG/MINOR: mux-h1: Wake busy mux for I/O when message is fully sent
+ - BUG: tasks: fix bug introduced by latest scheduler cleanup
+ - BUG/MEDIUM: mux-h2: fix early close with option abortonclose
+ - BUG/MEDIUM: connections: Don't use ALPN to pick mux when in mode TCP.
+ - BUG/MEDIUM: connections: Don't try to send early data if we have no mux.
+ - BUG/MEDIUM: mux-h2: properly account for the appended data in HTX
+ - BUILD: makefile: further clarify the "help" output and list targets
+ - BUILD: makefile: rename "linux2628" to "linux-glibc" and remove older targets
+ - BUILD: travis-ci: switch to linux-glibc instead of linux2628
+ - DOC: update few references to the linux* targets and change them to linux-glibc
+ - BUILD: makefile: detect and reject recently removed linux targets
+ - BUILD: makefile: enable linux namespaces by default on linux
+ - BUILD: makefile: enable TFO on linux platforms
+ - BUILD: makefile: enable getaddrinfo on the linux-glibc target
+ - DOC: small updates to the CONTRIBUTING file
+ - BUG/MEDIUM: ssl: Make sure we initiate the handshake after using early data.
+ - CLEANUP: removed obsolete examples an move a few to better places
+ - DOC: Fix typos in CONTRIBUTING
+ - DOC: update the outdated ROADMAP file
+ - DOC: create a BRANCHES file to explain the life cycle
+ - DOC: mention in INSTALL haproxy 2.0 is a long-term supported stable version
+ - BUILD: travis-ci: TFO and GETADDRINFO are now enabled by default
+ - BUILD: makefile: make the obsolete target detection compatible with make-3.80
+ - BUILD: tools: work around an internal compiler bug in gcc-3.4
+ - BUILD: pattern: work around an internal compiler bug in gcc-3.4
+ - BUILD: makefile: enable USE_RT on Solaris
+ - BUILD: makefile: do not use echo -n
+ - DOC: mention a few common build errors in the INSTALL file
+
+2019/06/11 : 2.0-dev7
+ - BUG/MEDIUM: mux-h2: make sure the connection timeout is always set
+ - MINOR: tools: add new bitmap manipulation functions
+ - MINOR: logs: use the new bitmap functions instead of fd_sets for encoding maps
+ - MINOR: chunks: Make sure trash_size is only set once.
+ - Revert "MINOR: chunks: Make sure trash_size is only set once."
+ - MINOR: threads: serialize threads initialization
+ - MINOR peers: data structure simplifications for server names dictionary cache.
+ - DOC: peers: Update for dictionary cache entries for peers protocol.
+ - MINOR: dict: Store the length of the dictionary entries.
+ - MINOR: peers: A bit of optimization when encoding cached server names.
+ - MINOR: peers: Optimization for dictionary cache lookup.
+ - MEDIUM: tools: improve time format error detection
+ - BUG/MEDIUM: H1: When upgrading, make sure we don't free the buffer too early.
+ - BUG/MEDIUM: stream_interface: Make sure we call si_cs_process() if CS_FL_EOI.
+ - MINOR: threads: avoid clearing harmless twice in thread_release()
+ - MEDIUM: threads: add thread_sync_release() to synchronize steps
+ - BUG/MEDIUM: init/threads: prevent initialized threads from starting before others
+ - OPTIM/MINOR: init/threads: only call protocol_enable_all() on first thread
+ - BUG/MINOR: dict: race condition fix when inserting dictionary entries.
+ - MEDIUM: init/threads: don't use spinlocks during the init phase
+ - BUG/MINOR: cache/htx: Fix the counting of data already sent by the cache applet
+ - BUG/MEDIUM: compression/htx: Fix the adding of the last data block
+ - MINOR: flt_trace: Don't scrash the original offset during the random forwarding
+ - MAJOR: htx: Rework how free rooms are tracked in an HTX message
+ - MINOR: htx: Add the function htx_move_blk_before()
+ - Revert "BUG/MEDIUM: H1: When upgrading, make sure we don't free the buffer too early."
+ - BUG/MINOR: http-rules: mention "deny_status" for "deny" in the error message
+ - MINOR: http: turn default error files to HTTP/1.1
+ - BUG/MEDIUM: h1: Don't try to subscribe if we had a connection error.
+ - BUG/MEDIUM: h1: Don't consider we're connected if the handshake isn't done.
+ - MINOR: contrib/spoa_server: Upgrade SPOP to 2.0
+ - BUG/MEDIUM: contrib/spoa_server: Set FIN flag on agent frames
+ - MINOR: contrib/spoa_server: Add random IP score
+ - DOC/MINOR: contrib/spoa_server: Fix typo in README
+
+2019/06/07 : 2.0-dev6
+ - BUG/MEDIUM: connection: fix multiple handshake polling issues
+ - MINOR: connection: also stop receiving after a SOCKS4 response
+ - MINOR: mux-h1: don't try to recv() before the connection is ready
+ - BUG/MEDIUM: mux-h1: only check input data for the current stream, not next one
+ - MEDIUM: mux-h1: don't use CS_FL_REOS anymore
+ - CLEANUP: connection: remove the now unused CS_FL_REOS flag
+ - CONTRIB: debug: add 4 missing connection/conn_stream flags
+ - MEDIUM: stream: make a full process_stream() loop when completing I/O on exit
+ - MINOR: server: increase the default pool-purge-delay to 5 seconds
+ - BUILD: tools: do not use the weak attribute for trace() on obsolete linkers
+ - BUG/MEDIUM: vars: make sure the scope is always valid when accessing vars
+ - BUG/MEDIUM: vars: make the tcp/http unset-var() action support conditions
+ - BUILD: task: fix a build warning when threads are disabled
+ - CLEANUP: peers: Remove tabs characters.
+ - CLEANUP: peers: Replace hard-coded values by macros.
+ - BUG/MINOR: peers: Wrong stick-table update message building.
+ - MINOR: dict: Add dictionary new data structure.
+ - MINOR: peers: Add a LRU cache implementation for dictionaries.
+ - MINOR: stick-table: Add "server_name" new data type.
+ - MINOR: cfgparse: Space allocation for "server_name" stick-table data type.
+ - MINOR: proxy: Add a "server by name" tree to proxy.
+ - MINOR: server: Add a dictionary for server names.
+ - MINOR: stream: Stickiness server lookup by name.
+ - MINOR: peers: Make peers protocol support new "server_name" data type.
+ - MINOR: stick-table: Make the CLI stick-table handler support dictionary entry data type.
+ - REGTEST: Add a basic server by name stickiness reg test.
+ - MINOR: peers: Add dictionary cache information to "show peers" CLI command.
+ - MINOR: peers: Replace hard-coded for peer protocol 64-bits value encoding by macros.
+ - MINOR: peers: Replace hard-coded values for peer protocol messaging by macros.
+ - CLEANUP: ssl: remove unneeded defined(OPENSSL_IS_BORINGSSL)
+ - BUILD: travis-ci improvements
+ - MINOR: SSL: add client/server random sample fetches
+ - BUG/MINOR: channel/htx: Don't alter channel during forward for empty HTX message
+ - BUG/MINOR: contrib/prometheus-exporter: Add HTX data block in one time
+ - BUG/MINOR: mux-h1: errflag must be set on H1S and not H1M during output processing
+ - MEDIUM: mux-h1: refactor output processing
+ - MINOR: mux-h1: Add the flag HAVE_O_CONN on h1s
+ - MINOR: mux-h1: Add h1_eval_htx_hdrs_size() to estimate size of the HTX headers
+ - MINOR: mux-h1: Don't count the EOM in the estimated size of headers
+ - MEDIUM: cache/htx: Always store info about HTX blocks in the cache
+ - MEDIUM: htx: Add the parsing of trailers of chunked messages
+ - MINOR: htx: Don't use end-of-data blocks anymore
+ - BUG/MINOR: mux-h1: Don't send more data than expected
+ - BUG/MINOR: flt_trace/htx: Only apply the random forwarding on the message body.
+ - BUG/MINOR: peers: Wrong "server_name" decoding.
+ - BUG/MEDIUM: servers: Don't attempt to destroy idle connections if disabled.
+ - MEDIUM: checks: Make sure we unsubscribe before calling cs_destroy().
+ - MEDIUM: connections: Wake the upper layer even if sending/receiving is disabled.
+ - MEDIUM: ssl: Handle subscribe by itself.
+ - MINOR: ssl: Make ssl_sock_handshake() static.
+ - MINOR: connections: Add a new xprt method, remove_xprt.
+ - MINOR: connections: Add a new xprt method, add_xprt().
+ - MEDIUM: connections: Introduce a handshake pseudo-XPRT.
+ - MEDIUM: connections: Remove CONN_FL_SOCK*
+ - BUG/MEDIUM: ssl: Don't forget to initialize ctx->send_recv and ctx->recv_wait.
+ - BUG/MINOR: peers: Wrong server name parsing.
+ - MINOR: server: really increase the pool-purge-delay default to 5 seconds
+ - BUG/MINOR: stream: don't emit a send-name-header in conn error or disconnect states
+ - MINOR: stream-int: use bit fields to match multiple stream-int states at once
+ - MEDIUM: stream-int: remove dangerous interval checks for stream-int states
+ - MEDIUM: stream-int: introduce a new state SI_ST_RDY
+ - MAJOR: stream-int: switch from SI_ST_CON to SI_ST_RDY on I/O
+ - MEDIUM: stream-int: make idle-conns switch to ST_RDY
+ - MEDIUM: stream: re-arrange the connection setup status reporting
+ - MINOR: stream-int: split si_update() into si_update_rx() and si_update_tx()
+ - MINOR: stream-int: make si_sync_send() from the send code of si_update_both()
+ - MEDIUM: stream: rearrange the events to remove the loop
+ - MEDIUM: stream: only loop on flags relevant to the analysers
+ - MEDIUM: stream: don't abusively loop back on changes on CF_SHUT*_NOW
+ - BUILD: stream-int: avoid a build warning in dev mode in si_state_bit()
+ - BUILD: peers: fix a build warning about an incorrect intiialization
+ - BUG/MINOR: time: make sure only one thread sets global_now at boot
+ - BUG/MEDIUM: tcp: Make sure we keep the polling consistent in tcp_probe_connect.
+
+2019/06/02 : 2.0-dev5
+ - BUILD: watchdog: use si_value.sival_int, not si_int for the timer's value
+ - BUILD: signals: FreeBSD has SI_LWP instead of SI_TKILL
+ - BUILD: watchdog: condition it to USE_RT
+ - MINOR: raw_sock: report global traffic statistics
+ - MINOR: stats: report the global output bit rate in human readable form
+ - BUG/MINOR: proto-htx: Try to keep connections alive on redirect
+ - BUG/MEDIUM: spoe: Don't use the SPOE applet after releasing it
+ - BUG/MINOR: lua: Set right direction and flags on new HTTP objects
+ - BUG/MINOR: mux-h2: Count EOM in bytes sent when a HEADERS frame is formatted
+ - BUG/MINOR: mux-h1: Report EOI instead EOS on parsing error or H2 upgrade
+ - BUG/MEDIUM: proto-htx: Not forward too much data when 1xx reponses are handled
+ - BUG/MINOR: htx: Remove a forgotten while loop in htx_defrag()
+ - DOC: fix typos
+ - BUG/MINOR: ssl_sock: Fix memory leak when disabling compression
+ - OPTIM: freq-ctr: don't take the date lock for most updates
+ - MEDIUM: mux-h2: avoid doing expensive buffer realigns when not absolutely needed
+ - CLEANUP: debug: remove the TRACE() macro
+ - MINOR: buffer: introduce b_make() to make a buffer from its parameters
+ - MINOR: buffer: add a new buffer ring API to manipulate rings of buffers
+ - MEDIUM: mux-h2: replace all occurrences of mbuf with a buffer ring
+ - MEDIUM: mux-h2: make the conditions to send based on mbuf, not just its tail
+ - MINOR: mux-h2: introduce h2_release_mbuf() to release all buffers in the mbuf ring
+ - MEDIUM: mux-h2: make the send() function iterate over all mux buffers
+ - CLEANUP: mux-h2: consistently use a local variable for the mbuf
+ - MINOR: mux-h2: report the mbuf's head and tail in "show fd"
+ - MAJOR: mux-h2: switch to next mux buffer on buffer full condition.
+ - BUILD: connections: shut up gcc about impossible out-of-bounds warning
+ - BUILD: ssl: fix latest LibreSSL reg-test error
+ - MINOR: cli/activity: remove "fd_del" and "fd_skip" from show activity
+ - MINOR: cli/activity: add 3 general purpose counters in development mode
+ - BUG/MAJOR: lb/threads: make sure the avoided server is not full on second pass
+ - BUG/MEDIUM: queue: fix the tree walk in pendconn_redistribute.
+ - BUG/MEDIUM: threads: fix double-word CAS on non-optimized 32-bit platforms
+ - MEDIUM: config: now alert when two servers have the same name
+ - MINOR: htx: Remove the macro IS_HTX_SMP() and always use IS_HTX_STRM() instead
+ - MINOR: htx: Move the macro IS_HTX_STRM() in proto/stream.h
+ - MINOR: htx: Store the head position instead of the wrap one
+ - MINOR: htx: Store start-line block's position instead of address of its payload
+ - MINOR: htx: Add functions to get the first block of an HTX message
+ - MINOR: mux-h2/htx: Get the start-line from the head when HEADERS frame is built
+ - MINOR: htx: Replace the function http_find_stline() by http_get_stline()
+ - CLEANUP: htx: Remove unused function htx_get_stline()
+ - MINOR: http/htx: Use sl_pos directly to replace the start-line
+ - MEDIUM: http/htx: Perform analysis relatively to the first block
+ - MINOR: channel/htx: Call channel_htx_recv_max() from channel_recv_max()
+ - MINOR: htx: Add function htx_get_max_blksz()
+ - BUG/MINOR: htx: Change htx_xfer_blk() to also count metadata
+ - MEDIUM: mux-h1: Use the count value received from the SI in h1_rcv_buf()
+ - MINOR: mux-h2: Use the count value received from the SI in h2_rcv_buf()
+ - MINOR: stream-int: Don't use the flag CO_RFL_KEEP_RSV anymore in si_cs_recv()
+ - MINOR: connection: Remove the unused flag CO_RFL_KEEP_RSV
+ - MINOR: mux-h2/htx: Support zero-copy when possible in h2_rcv_buf()
+ - MINOR: htx: Add a field to set the memory used by headers in the HTX start-line
+ - MINOR: h2/htx: Set hdrs_bytes on the SL when an HTX message is produced
+ - MINOR: mux-h1: Set hdrs_bytes on the SL when an HTX message is produced
+ - MINOR: htx: Be sure to xfer all headers in one time in htx_xfer_blks()
+ - MEDIUM: htx: 1xx messages are now part of the final reponses
+ - MINOR: channel/htx: Add function to forward headers of an HTX message
+ - MINOR: filters/htx: Use channel_htx_fwd_headers() after headers filtering
+ - MINOR: proto-htx: Use channel_htx_fwd_headers() to forward 1xx responses
+ - MEDIUM: htx: Store the first block position instead of the start-line one
+ - MINOR: stats/htx: don't use the first block position but the head one
+ - MINOR: channel/htx: Add functions to forward a part or all HTX payload
+ - MINOR: proto-htx: Use channel_htx_fwd_all() when unfiltered body are forwarded
+ - MEDIUM: filters/htx: Filter body relatively to the first block
+ - MINOR: htx: Optimize htx_drain() when all data are drained
+ - MINOR: htx: don't rely on htx_find_blk() anymore in the function htx_truncate()
+ - MINOR: htx: remove the unused function htx_find_blk()
+ - MINOR: htx: Remove support of pseudo headers because it is unused
+ - BUG/MEDIUM: http: fix "http-request reject" when not final
+ - MINOR: ssl: Make sure the underlying xprt's init method doesn't fail.
+ - MINOR: ssl: Don't forget to call the close method of the underlying xprt.
+ - MINOR: htx: rename htx_append_blk_value() to htx_add_data_atonce()
+ - MINOR: htx: make htx_add_data() return the transmitted byte count
+ - MEDIUM: htx: make htx_add_data() never defragment the buffer
+ - MINOR: activity: write totals on the "show activity" output
+ - MINOR: activity: report totals and average separately
+ - MEDIUM: poller: separate the wait time from the wake events
+ - MINOR: activity: report the number of failed pool/buffer allocations
+ - MEDIUM: buffers: relax the buffer lock a little bit
+ - MINOR: task: turn the WQ lock to an RW_LOCK
+ - MEDIUM: task: don't grab the WR lock just to check the WQ
+ - BUG/MEDIUM: mux-h1: Don't skip the TCP splicing when there is no more data to read
+ - MEDIUM: sessions: Introduce session flags.
+ - BUG/MEDIUM: h2: Don't forget to set h2s->cs to NULL after having free'd cs.
+ - BUG/MEDIUM: mux-h2: fix the conditions to end the h2_send() loop
+ - BUG/MEDIUM: mux-h2: don't refrain from offering oneself a used buffer
+ - BUG/MEDIUM: connection: Use the session to get the origin address if needed.
+ - MEDIUM: tasks: Get rid of active_tasks_mask.
+ - MEDIUM: connection: Upstream SOCKS4 proxy support
+ - BUILD: contrib/prometheus: fix build breakage caused by move of idle_pct
+ - BUG/MINOR: deinit/threads: make hard-stop-after perform a clean exit
+
+2019/05/22 : 2.0-dev4
+ - BUILD: enable freebsd builds on cirrus-ci
+ - BUG/MINOR: http_fetch: Rely on the smp direction for "cookie()" and "hdr()"
+ - MEDIUM: Make 'option forceclose' actually warn
+ - MEDIUM: Make 'resolution_pool_size' directive fatal
+ - DOC: management: place "show activity" at the right place
+ - MINOR: cli/activity: show the dumping thread ID starting at 1
+ - MINOR: task: export global_task_mask
+ - MINOR: cli/debug: add a thread dump function
+ - BUG/MEDIUM: streams: Don't use CF_EOI to decide if the request is complete.
+ - BUG/MEDIUM: streams: Try to L7 retry before aborting the connection.
+ - BUG/MINOR: debug: make ha_task_dump() always check the task before dumping it
+ - BUG/MINOR: debug: make ha_task_dump() actually dump the requested task
+ - MINOR: debug: make ha_thread_dump() and ha_task_dump() take a buffer
+ - BUG/MINOR: debug: don't check the call date on tasklets
+ - MINOR: thread: implement ha_thread_relax()
+ - MINOR: task: put barriers after each write to curr_task
+ - MINOR: task: always reset curr_task when freeing a task or tasklet
+ - MINOR: stream: detach the stream from its own task on stream_free()
+ - MEDIUM: debug/threads: implement an advanced thread dump system
+ - REGTEST: extend the check duration on tls_health_checks and mark it slow
+ - DOC: fix "successful" typo
+ - MINOR: init: setenv HAPROXY_CFGFILES
+ - MINOR: threads/init: synchronize the threads startup
+ - MEDIUM: init/mworker: make the pipe register function a regular initcall
+ - CLEANUP: memory: make the fault injection code use the OTHER_LOCK label
+ - CLEANUP: threads: remove the now unused START_LOCK label
+ - MINOR: init/threads: make the global threads an array of structs
+ - MINOR: threads: add each thread's clockid into the global thread_info
+ - CLEANUP: stream: remove an obsolete debugging test
+ - MINOR: tools: add dump_hex()
+ - MINOR: debug: implement ha_panic()
+ - MINOR: debug/cli: add some debugging commands for developers
+ - MINOR: tools: provide a may_access() function and make dump_hex() use it
+ - MINOR: debug: make ha_panic() report threads starting at 1
+ - REORG: compat: move some integer limit definitions from standard.h to compat.h
+ - REORG: threads: move the struct thread_info from global.h to hathreads.h
+ - MINOR: compat: make sure to always define clockid_t
+ - MINOR: threads: always place the clockid in the struct thread_info
+ - MINOR: threads: add a thread-local thread_info pointer "ti"
+ - MINOR: time: move the cpu, mono, and idle time to thread_info
+ - MINOR: time: add a function to retrieve another thread's cputime
+ - MINOR: debug: report each thread's cpu usage in "show thread"
+ - BUILD: threads: only assign the clock_id when supported
+ - BUILD: makefile: use USE_OBSOLETE_LINKER for solaris
+ - BUILD: makefile: remove -fomit-frame-pointer optimisation (solaris)
+ - MAJOR: polling: add event ports support (Solaris)
+ - BUG/MEDIUM: streams: Don't switch from SI_ST_CON to SI_ST_DIS on read0.
+ - CLEANUP: time: refine the test on _POSIX_TIMERS
+ - MINOR: compat: define a new empty type empty_t for non-implemented fields
+ - CLEANUP: time: switch clockid_t to empty_t when not available
+ - BUG/MINOR: mworker: Fix memory leak of mworker_proc members
+ - CLEANUP: objtype: make obj_type() and obj_type_name() take consts
+ - MINOR: debug: switch to SIGURG for thread dumps
+ - CLEANUP: threads: really move thread_info to hathreads.c
+ - MINOR: threads: make threads_{harmless|want_rdv}_mask constant 0 without threads
+ - CLEANUP: debug: always report harmless/want_rdv even without threads
+ - MINOR: threads: implement ha_tkill() and ha_tkillall()
+ - CLEANUP: debug: make use of ha_tkill() and remove ifdefs
+ - MINOR: stream: introduce a stream_dump() function and use it in stream_dump_and_crash()
+ - MINOR: debug: dump streams when an applet, iocb or stream is known
+ - MINOR: threads: add a "stuck" flag to the thread_info struct
+ - MINOR: threads: add a timer_t per thread in thread_info
+ - MAJOR: watchdog: implement a thread lockup detection mechanism
+ - MINOR: stream: remove the cpu time detection from process_stream()
+ - MINOR: connection: report the mux names in "haproxy -vv"
+ - CLEANUP: mux-h1: use "H1" and not "h1" as the mux's name
+ - BUG/MEDIUM: WURFL: segfault in wurfl-get() with missing info.
+ - MINOR: WURFL: call header_retireve_callback() in dummy library
+ - MINOR: WURFL: fixed Engine load failed error when wurfl-information-list contains wurfl_root_id
+ - MINOR: WURFL: shows log messages during module initialization
+ - MINOR: WURFL: removes heading wurfl-information-separator from wurfl-get-all() and wurfl-get() results
+ - MINOR: WURFL: wurfl_get() and wurfl_get_all() now return an empty string if device detection fails
+ - MEDIUM: WURFL: HTX awareness.
+ - MINOR: WURFL: module version bump to 2.0
+ - MINOR: WURFL: do not emit warnings when not configured
+ - CONTRIB: wurfl: address 3 build issues in the wurfl dummy library
+ - BUG/MEDIUM: init/threads: provide per-thread alloc/free function callbacks
+ - BUILD: travis: add sanitizers to travis-ci builds
+ - BUILD: time: remove the test on _POSIX_C_SOURCE
+ - CLEANUP: build: rename some build macros to use the USE_* ones
+ - CLEANUP: raw_sock: remove support for very old linux splice bug workaround
+ - BUG/MEDIUM: dns: make the port numbers unsigned
+ - MEDIUM: config: deprecate the antique req* and rsp* commands
+
+2019/05/15 : 2.0-dev3
+ - BUG/MINOR: peers: Really close the sessions with no heartbeat.
+ - CLEANUP: peers: remove useless annoying tabulations.
+ - CLEANUP: peers: replace timeout constants by macros.
+ - REGTEST: Enable again reg tests with HEAD HTTP method usage.
+ - DOC: The option httplog is no longer valid in a backend.
+ - DOC: peers: Peers protocol documentation update.
+ - REGTEST: remove unexpected "nbthread" statement from Lua test cases
+ - BUILD: Makefile: remove 11-years old workarounds for deprecated options
+ - BUILD: remove 10-years old error message for obsolete option USE_TCPSPLICE
+ - BUILD: Makefile: remove outdated support for dlmalloc
+ - BUILD: Makefile: consider a variable's origin and not its value for the options list
+ - BUILD: Makefile: also report disabled options in the BUILD_OPTIONS variable
+ - BUILD: Makefile: shorten default settings declaration
+ - BUILD: Makefile: clean up the target declarations
+ - BUILD: report the whole feature set with their status in haproxy -vv
+ - BUILD: pass all "USE_*" variables as -DUSE_* to the compiler
+ - REGTEST: script: make the script use the new features list
+ - REGTEST: script: remove platform-specific assigments of OPTIONS
+ - BUG/MINOR: peers: Missing initializations after peer session shutdown.
+ - BUG/MINOR: contrib/prometheus-exporter: Fix applet accordingly to recent changes
+ - BUILD/MINOR: listener: Silent a few signedness warnings.
+ - BUG/MINOR: mux-h1: Only skip invalid C-L headers on output
+ - BUG/MEDIUM: mworker: don't free the wrong child when not found
+ - BUG/MEDIUM: checks: Don't bother subscribing if we have a connection error.
+ - BUG/MAJOR: checks: segfault during tcpcheck_main
+ - BUILD: makefile: work around an old bug in GNU make-3.80
+ - BUILD: makefile: work around another bug in make 3.80
+ - BUILD: http: properly mark some struct as extern
+ - BUILD: chunk: properly declare pool_head_trash as extern
+ - BUILD: cache: avoid a build warning with some compilers/linkers
+ - MINOR: tools: make memvprintf() never pass a NULL target to vsnprintf()
+ - MINOR: tools: add an unsetenv() implementation
+ - BUILD: re-implement an initcall variant without using executable sections
+ - BUILD: use inttypes.h instead of stdint.h
+ - BUILD: connection: fix naming of ip_v field
+ - BUILD: makefile: fix build of IPv6 header on aix51
+ - BUILD: makefile: add _LINUX_SOURCE_COMPAT to build on AIX-51
+ - BUILD: define unsetenv on AIX 5.1
+ - BUILD: Makefile: disable shared cache on AIX 5.1
+ - MINOR: ssl: Add aes_gcm_dec converter
+ - REORG: mworker: move serializing functions to mworker.c
+ - REORG: mworker: move signals functions to mworker.c
+ - REORG: mworker: move IPC functions to mworker.c
+ - REORG: mworker: move signal handlers and related functions
+ - REORG: mworker: move mworker_cleanlisteners to mworker.c
+ - MINOR: mworker: calloc mworker_proc structures
+ - MINOR: mworker: don't use children variable anymore
+ - MINOR: cli: export cli_parse_default() definition in cli.h
+ - REORG: mworker/cli: move CLI functions to mworker.c
+ - MEDIUM: mworker-prog: implement program for master-worker
+ - MINOR: mworker/cli: show programs in 'show proc'
+ - BUG/MINOR: cli: correctly handle abns in 'show cli sockets'
+ - MINOR: cli: start addresses by a prefix in 'show cli sockets'
+ - MINOR: cli: export HAPROXY_CLI environment variable
+ - BUG/MINOR: htx: Preserve empty HTX messages with an unprocessed parsing error
+ - BUG/MINOR: proto_htx: Reset to_forward value when a message is set to DONE
+ - REGTEST: http-capture/h00000: Relax a regex matching the log message
+ - REGTEST: http-messaging/h00000: Fix the test when the HTX is enabled
+ - REGTEST: http-rules/h00003: Use a different client for requests expecting a 301
+ - REGTEST: log/b00000: Be sure the client always hits its timeout
+ - REGTEST: lua/b00003: Relax the regex matching the log message
+ - REGTEST: lua/b00003: Specify the HAProxy pid when the command ss is executed
+ - BUG/MEDIUM: peers: fix a case where peer session is not cleanly reset on release.
+ - BUG/MEDIUM: h2: Don't attempt to recv from h2_process_demux if we subscribed.
+ - BUG/MEDIUM: htx: fix random premature abort of data transfers
+ - BUG/MEDIUM: streams: Don't remove the SI_FL_ERR flag in si_update_both().
+ - BUG/MEDIUM: streams: Store prev_state before calling si_update_both().
+ - BUG/MEDIUM: stream: Don't clear the stream_interface flags in si_update_both.
+ - MINOR: initcall: Don't forget to define the __start/stop_init_##stg symbols.
+ - MINOR: threads: Implement thread_cpus_enabled() for FreeBSD.
+ - BUG/MEDIUM: pattern: assign pattern IDs after checking the config validity
+ - MINOR: skip get_gmtime where tm is unused
+ - MINOR: ssl: Activate aes_gcm_dec converter for BoringSSL
+ - BUG/MEDIUM: streams: Only re-run process_stream if we're in a connected state.
+ - BUG/MEDIUM: stream_interface: Don't bother doing chk_rcv/snd if not connected.
+ - BUG/MEDIUM: task/threads: address a fairness issue between local and global tasks
+ - BUG/MINOR: tasks: make sure the first task to be queued keeps its nice value
+ - BUG/MINOR: listener: renice the accept ring processing task
+ - MINOR: cli/listener: report the number of accepts on "show activity"
+ - MINOR: cli/activity: report the accept queue sizes in "show activity"
+ - BUG/MEDIUM: spoe: Queue message only if no SPOE applet is attached to the stream
+ - BUG/MEDIUM: spoe: Return an error if nothing is encoded for fragmented messages
+ - BUG/MINOR: spoe: Be sure to set tv_request when each message fragment is encoded
+ - BUG/MEDIUM: htx: Defrag if blocks position is changed and the payloads wrap
+ - BUG/MEDIUM: htx: Don't crush blocks payload when append is done on a data block
+ - MEDIUM: htx: Deprecate the option 'http-tunnel' and ignore it in HTX
+ - MINOR: proto_htx: Don't adjust transaction mode anymore in HTX analyzers
+ - BUG/MEDIUM: htx: Fix the process of HTTP CONNECT with h2 connections
+ - MINOR: mux-h1: Simplify handling of 1xx responses
+ - MINOR: stats/htx: Don't add "Connection: close" header anymore in stats responses
+ - MEDIUM: h1: Add an option to sanitize connection headers during parsing
+ - MEDIUM: mux-h1: Simplify the connection mode management by sanitizing headers
+ - MINOR: mux-h1: Don't release the conn_stream anymore when h1s is destroyed
+ - BUG/MINOR: mux-h1: Handle the flag CS_FL_KILL_CONN during a shutdown read/write
+ - MINOR: mux-h2: Add a mux_ops dedicated to the HTX mode
+ - MINOR: muxes: Add a flag to specify a multiplexer uses the HTX
+ - MINOR: stream: Set a flag when the stream uses the HTX
+ - MINOR: http: update the macro IS_HTX_STRM() to check the stream flag SF_HTX
+ - MINOR: http_fetch/htx: Use stream flags instead of px mode in smp_prefetch_htx
+ - MINOR: filters/htx: Use stream flags instead of px mode to instanciate a filter
+ - MINOR: muxes: Rely on conn_is_back() during init to handle front/back conn
+ - MEDIUM: muxes: Add an optional input buffer during mux initialization
+ - MINOR: muxes: Pass the context of the mux to destroy() instead of the connection
+ - MEDIUM: muxes: Be prepared to don't own connection during the release
+ - MEDIUM: connection: Add conn_upgrade_mux_fe() to handle mux upgrades
+ - MEDIUM: htx: Allow the option http-use-htx to be used on TCP proxies too
+ - MAJOR: proxy/htx: Handle mux upgrades from TCP to HTTP in HTX mode
+ - MAJOR: muxes/htx: Handle inplicit upgrades from h1 to h2
+ - MAJOR: htx: Enable the HTX mode by default for all proxies
+ - REGTEST: Use HTX by default and add '--no-htx' option to disable it
+ - BUG/MEDIUM: muxes: Don't dereference mux context if null in release functions
+ - CLEANUP: task: do not export rq_next anymore
+ - MEDIUM: tasks: improve fairness between the local and global queues
+ - MEDIUM: tasks: only base the nice offset on the run queue depth
+ - MINOR: tasks: restore the lower latency scheduling when niced tasks are present
+ - BUG/MEDIUM: map: Fix memory leak in the map converter
+ - BUG/MINOR: ssl: Fix 48 byte TLS ticket key rotation
+ - BUILD: task/thread: fix single-threaded build of task.c
+ - BUILD: cli/threads: fix build in single-threaded mode
+ - BUG/MEDIUM: muxes: Make sure we unsubcribed when destroying mux ctx.
+ - BUG/MEDIUM: h2: Make sure we're not already in the send_list in h2_subscribe().
+ - BUG/MEDIUM: h2: Revamp the way send subscriptions works.
+ - MINOR: connections: Remove the SUB_CALL_UNSUBSCRIBE flag.
+ - BUG/MEDIUM: Threads: Only use the gcc >= 4.7 builtins when using gcc >= 4.7.
+ - BUILD: address a few cases of "static <type> inline foo()"
+ - BUILD: do not specify "const" on functions returning structs or scalars
+ - BUILD: htx: fix a used uninitialized warning on is_cookie2
+ - MINOR: peers: Add a new command to the CLI for peers.
+ - DOC: update for "show peers" CLI command.
+ - BUG/MAJOR: lb/threads: fix insufficient locking on round-robin LB
+ - MEDIUM: mworker: store the leaving state of a process
+ - MEDIUM: mworker-prog: implements 'option start-on-reload'
+ - CLEANUP: mworker: remove the type field in mworker_proc
+ - MEDIUM: mworker/cli: export the HAPROXY_MASTER_CLI variable
+ - MINOR: cli: don't add a semicolon at the end of HAPROXY_CLI
+ - MINOR: mworker: export HAPROXY_MWORKER=1 when running in mworker mode
+ - MINOR: init: add a "set-dumpable" global directive to enable core dumps
+ - BUG/MINOR: listener/mq: correctly scan all bound threads under low load
+ - BUG/MINOR: mworker: mworker_kill should apply on every children
+ - BUG/MINOR: mworker: don't exit with an ambiguous value
+ - BUG/MINOR: mworker: ensure that we still quits with SIGINT
+ - REGTESTS: exclude tests that require ssl, pcre if no such feature is enabled
+ - BUG/MINOR: mux-h1: Process input even if the input buffer is empty
+ - BUG/MINOR: mux-h1: Don't switch the parser in busy mode if other side has done
+ - BUG/MEDIUM: mux-h1: Notify the stream waiting for TCP splicing if ibuf is empty
+ - BUG/MEDIUM: mux-h1: Enable TCP splicing to exchange data only
+ - MINOR: mux-h1: Handle read0 during TCP splicing
+ - BUG/MEDIUM: htx: Don't return the start-line if the HTX message is empty
+ - BUG/MAJOR: http_fetch: Get the channel depending on the keyword used
+ - BUG/MINOR: http_fetch/htx: Allow permissive sample prefetch for the HTX
+ - BUG/MINOR: http_fetch/htx: Use HTX versions if the proxy enables the HTX mode
+ - BUG/MEDIUM: tasks: Make sure we set TASK_QUEUED before adding a task to the rq.
+ - BUG/MEDIUM: tasks: Make sure we modify global_tasks_mask with the rq_lock.
+ - MINOR: tasks: Don't consider we can wake task with tasklet_wakeup().
+ - MEDIUM: tasks: No longer use rq.node.leaf_p as a lock.
+ - MINOR: tasks: Don't set the TASK_RUNNING flag when adding in the tasklet list.
+ - BUG/MEDIUM: applets: Don't use task_in_rq().
+ - BUG/MAJOR: task: make sure never to delete a queued task
+ - MINOR: task/thread: factor out a wake-up condition
+ - CLEANUP: task: remain consistent when using the task's handler
+ - MEDIUM: tasks: Merge task_delete() and task_free() into task_destroy().
+ - MEDIUM: tasks: Don't account a destroyed task as a runned task.
+ - BUG/MINOR: contrib/prometheus-exporter: Fix a typo in the run-queue metric type
+ - MINOR: contrib/prometheus-exporter: Remove usless rate metrics
+ - MINOR: contrib/prometheus-exporter: Rename some metrics to be more usable
+ - MINOR: contrib/prometheus-exporter: Follow best practices about metrics type
+ - BUG/MINOR: mworker: disable busy polling in the master process
+ - MEDIUM: tasks: Use __ha_barrier_store after modifying global_tasks_mask.
+ - MEDIUM: ssl: Give ssl_sock its own context.
+ - MEDIUM: connections: Move some fields from struct connection to ssl_sock_ctx.
+ - MEDIUM: ssl: provide its own subscribe/unsubscribe function.
+ - MEDIUM: connections: Provide a xprt_ctx for each xprt method.
+ - MEDIUM: ssl: provide our own BIO.
+ - BUILD/medium: ssl: Fix build with OpenSSL < 1.1.0
+ - MINOR: peers: adds counters on show peers about tasks calls.
+ - MEDIUM: enable travis-ci builds
+ - MINOR: fd: Add a counter of used fds.
+ - MEDIUM: connections: Add a way to control the number of idling connections.
+ - BUG/MEDIUM: maps: only try to parse the default value when it's present
+ - BUG/MINOR: acl: properly detect pattern type SMP_T_ADDR
+ - REGTEST: Missing REQUIRE_VERSION declarations.
+ - MINOR: proto_tcp: tcp-request content: enable set-dst and set-dst-var
+ - BUG/MEDIUM: h1: Don't parse chunks CRLF if not enough data are available
+ - BUG/MEDIUM: thread/http: Add missing locks in set-map and add-acl HTTP rules
+ - BUG/MEDIUM: stream: Don't request a server connection if a shutw was scheduled
+ - BUG/MINOR: 51d: Get the request channel to call CHECK_HTTP_MESSAGE_FIRST()
+ - BUG/MINOR: da: Get the request channel to call CHECK_HTTP_MESSAGE_FIRST()
+ - MINOR: gcc: Fix a silly gcc warning in connect_server()
+ - MINOR: ssl/cli: async fd io-handlers printable on show fd
+ - Revert "CLEANUP: wurfl: remove dead, broken and unmaintained code"
+ - BUILD: add USE_WURFL to the list of known build options
+ - MINOR: wurfl: indicate in haproxy -vv the wurfl version in use
+ - BUILD: wurfl: build fix for 1.9/2.0 code base
+ - CLEANUP: wurfl: removed deprecated methods
+ - DOC: wurfl: added point of contact in MAINTAINERS file
+ - MINOR: wurfl: enabled multithreading mode
+ - MINOR: contrib: dummy wurfl library
+ - MINOR: dns: dns_requester structures are now in a memory pool
+ - MINOR: dns: move callback affection in dns_link_resolution()
+ - MINOR: obj_type: new object type for struct stream
+ - MINOR: action: new '(http-request|tcp-request content) do-resolve' action
+ - MINOR: log: Extract some code to send syslog messages.
+ - REGTEST: replace LEVEL option by a more human readable one.
+ - REGTEST: rename the reg test files.
+ - REGTEST: adapt some reg tests after renaming.
+ - REGTEST: make the "run-regtests" script search for tests in reg-tests by default
+ - BUG/MAJOR: stream: Missing DNS context initializations.
+ - BUG/MEDIUM: stream: Fix the way early aborts on the client side are handled
+ - BUG/MINOR: spoe: Don't systematically wakeup SPOE stream in the applet handler
+ - BUG/MEDIUM: ssl: Return -1 on recv/send if we got EAGAIN.
+ - BUG/MAJOR: lb/threads: fix AB/BA locking issue in round-robin LB
+ - BUG/MAJOR: muxes: Use the HTX mode to find the best mux for HTTP proxies only
+ - BUG/MINOR: htx: Exclude TCP proxies when the HTX mode is handled during startup
+ - CLEANUP: task: report calls as unsigned in show sess
+ - MINOR: tasks/activity: report the context switch and task wakeup rates
+ - MINOR: stream: measure and report a stream's call rate in "show sess"
+ - MINOR: applet: measure and report an appctx's call rate in "show sess"
+ - BUILD: extend Travis CI config to support more platforms
+ - REGTEST: exclude osx and generic targets for 40be_2srv_odd_health_checks
+ - REGTEST: relax the IPv6 address format checks in converters_ipmask_concat_strcmp_field_word
+ - REGTEST: exclude OSX and generic targets from abns_socket.vtc
+ - BUILD: travis: remove the "allow_failures" entry
+ - BUG/MINOR: activity: always initialize the profiling variable
+ - MINOR: activity: make the profiling status per thread and not global
+ - MINOR: activity: enable automatic profiling turn on/off
+ - CLEANUP: standard: use proper const to addr_to_str() and port_to_str()
+ - BUG/MINOR: proto_http: properly reset the stream's call rate on keep-alive
+ - MINOR: connection: make the debugging helper functions safer
+ - MINOR: stream/debug: make a stream dump and crash function
+ - MEDIUM: appctx/debug: force a crash if an appctx spins over itself forever
+ - MEDIUM: stream/debug: force a crash if a stream spins over itself forever
+ - MEDIUM: streams: measure processing time and abort when detecting bugs
+ - BUILD/MEDIUM: contrib: Dummy DeviceAtlas API.
+ - MEDIUM: da: HTX mode support.
+ - BUG/MEDIUM: mux-h2: properly deal with too large headers frames
+ - BUG/MINOR: http: Call stream_inc_be_http_req_ctr() only one time per request
+ - BUG/MEDIUM: spoe: arg len encoded in previous frag frame but len changed
+ - MINOR: spoe: Use the sample context to pass frag_ctx info during encoding
+ - DOC: contrib/modsecurity: Typos and fix the reject example
+ - BUG/MEDIUM: contrib/modsecurity: If host header is NULL, don't try to strdup it
+ - MINOR: log: Add "sample" new keyword to "log" lines.
+ - MINOR: log: Enable the log sampling and load-balancing feature.
+ - DOC: log: Document the sampling and load-balancing logging feature.
+ - REGTEST: Add a new reg test for log load-balancing feature.
+ - BUG/MAJOR: map/acl: real fix segfault during show map/acl on CLI
+ - REGTEST: Make this reg test be Linux specific.
+ - CLEANUP: task: move the task_per_thread definition to task.h
+ - MINOR: activity: report context switch counts instead of rates
+ - MINOR: threads: Implement HA_ATOMIC_LOAD().
+ - BUG/MEDIUM: port_range: Make the ring buffer lock-free.
+ - BUG/MEDIUM: listener: Fix how unlimited number of consecutive accepts is handled
+ - MINOR: config: Test validity of tune.maxaccept during the config parsing
+ - CLEANUP: config: Don't alter listener->maxaccept when nbproc is set to 1
+ - BUG/MEDIUM: servers: fix typo "src" instead of "srv"
+ - BUG/MEDIUM: ssl: Don't pretend we can retry a recv/send if we got a shutr/w.
+ - BUG/MINOR: haproxy: fix rule->file memory leak
+ - BUG/MINOR: log: properly free memory on logformat parse error and deinit()
+ - BUG/MINOR: checks: free memory allocated for tasklets
+ - BUG/MEDIUM: pattern: fix memory leak in regex pattern functions
+ - BUG/MEDIUM: channels: Don't forget to reset output in channel_erase().
+ - BUG/MEDIUM: connections: Make sure we remove CO_FL_SESS_IDLE on disown.
+ - MINOR: threads: flatten the per-thread cpu-map
+ - MINOR: init/threads: remove the useless tids[] array
+ - MINOR: init/threads: make the threads array global
+ - BUG/MEDIUM: ssl: Use the early_data API the right way.
+ - BUG/MEDIUM: streams: Don't add CF_WRITE_ERROR if early data were rejected.
+ - MEDIUM: streams: Add the ability to retry a request on L7 failure.
+ - MEDIUM: streams: Add a way to replay failed 0rtt requests.
+ - MEDIUM: streams: Add a new keyword for retry-on, "junk-response"
+ - BUG/MINOR: stream: also increment the retry stats counter on L7 retries
+ - BUG/MEDIUM: checks: make sure the warmup task takes the server lock
+ - BUG/MINOR: logs/threads: properly split the log area upon startup
+ - BUILD: extend travis-ci matrix
+ - CLEANUP: Remove appsession documentation
+ - DOC: Fix typo in keyword matrix
+ - BUILD: remove "build_libressl" duplicate declaration
+ - BUILD: travis-ci: get back to osx without openssl support
+ - BUILD: enable several LibreSSL hacks, including
+ - BUILD: temporarily mark LibreSSL builds as allowed to fail
+ - BUILD: travis: TMPDIR replacement.
+ - BUG/MEDIUM: ssl: Don't attempt to use early data with libressl.
+ - MINOR: doc: Document allow-0rtt on the server line.
+ - MINOR: doc: Document the interaction of allow-0rtt and retry-on 0rtt-rejected.
+ - MEDIUM: proto: Change the prototype of the connect() method.
+ - MEDIUM: tcp: add the "tfo" option to support TCP fastopen on the server
+ - MINOR: config: Extract the code of "stick-table" line parsing.
+ - BUILD/MINOR: stick-table: Compilation fix.
+ - MEDIUM: stick-table: Stop handling stick-tables as proxies.
+ - MINOR: stick-tables: Add peers process binding computing.
+ - MINOR: stick-table: Add prefixes to stick-table names.
+ - MINOR: peers: Do not emit global stick-table names.
+ - DOC: Update for "table" lines in "peers" section.
+ - REGTEST: Add reg tests for "table" lines in "peers" sections.
+ - MEDIUM: regex: modify regex_comp() to atomically allocate/free the my_regex struct
+ - REGTEST: make the tls_health_checks test much faster
+ - REGTEST: make the "table in peers" test require v2.0
+ - BUG/MINOR: mux-h2: rely on trailers output not input to turn them to empty data
+ - BUG/MEDIUM: h2/htx: always fail on too large trailers
+ - MEDIUM: mux-h2: discard contents that are to be sent after a shutdown
+ - BUG/MEDIUM: mux-h2/htx: never wait for EOM when processing trailers
+ - BUG/MEDIUM: h2/htx: never leave a trailers block alone with no EOM block
+ - REGTEST: Flag some slow reg tests.
+ - REGTEST: Reg tests file renaming.
+ - REGTEST: Wrong renaming for one reg test.
+ - REGTEST: Wrong assumption in IP:port logging test.
+ - BUG/MINOR: mworker/ssl: close OpenSSL FDs on reload
+ - MINOR: systemd: Use the variables from /etc/default/haproxy
+ - MINOR: systemd: Make use of master socket in systemd unit
+ - MINOR: systemd: support /etc/sysconfig/ for redhat based distrib
+ - BUG/MEDIUM: stick-table: fix regression caused by a change in proxy struct
+ - BUG/MEDIUM: tasks: fix possible segfault on task_destroy()
+ - CLEANUP: task: remove unneeded tests before task_destroy()
+ - MINOR: mworker: support a configurable maximum number of reloads
+ - BUG/MINOR: mux-h2: fix the condition to close a cs-less h2s on the backend
+ - BUG/MEDIUM: spoe: Be sure the sample is found before setting its context
+ - BUG/MINOR: mux-h1: Fix the parsing of trailers
+ - BUG/MINOR: htx: Never transfer more than expected in htx_xfer_blks()
+ - MINOR: htx: Split on DATA blocks only when blocks are moved to an HTX message
+ - MINOR: htx: Don't try to append a trailer block with the previous one
+ - MINOR: htx: Remove support for unused OOB HTX blocks
+ - BUILD: travis-ci bugfixes and improvements
+ - BUG/MEDIUM: servers: Don't use the same srv flag for cookie-set and TFO.
+ - BUG/MEDIUM: h2: Make sure we set send_list to NULL in h2_detach().
+ - BUILD: ssl: fix again a libressl build failure after the openssl FD leak fix
+ - CLEANUP: ssl-sock: use HA_OPENSSL_VERSION_NUMBER instead of OPENSSL_VERSION_NUMBER
+ - BUILD: ssl: make libressl use its own version numbers
+ - CLEANUP: ssl: remove 57 occurrences of useless tests on LIBRESSL_VERSION_NUMBER
+ - MINOR: ssl: enable aes_gcm_dec on LibreSSL
+ - BUILD: ssl: fix libressl build again after aes-gcm-enc
+ - REORG: ssl: move openssl-compat from proto to common
+ - REORG: ssl: move some OpenSSL defines from ssl_sock to openssl-compat
+ - CLEANUP: ssl: never include openssl/*.h outside of openssl-compat.h anymore
+ - CLEANUP: ssl: make inclusion of openssl headers safe
+ - BUILD: add BoringSSL to travis-ci build matrix
+ - BUILD: threads: Add __ha_cas_dw fallback for single threaded builds
+ - BUG/MINOR: stream: Attach the read side on the response as soon as possible
+ - BUG/MEDIUM: http: Use pointer to the begining of input to parse message headers
+ - BUG/MEDIUM: h2: Don't check send_wait to know if we're in the send_list.
+ - BUG/MEDIUM: streams: Make sur SI_FL_L7_RETRY is set before attempting a retry.
+ - MEDIUM: streams: Add a new http action, disable-l7-retry.
+ - MINOR: streams: Introduce a new retry-on keyword, all-retryable-errors.
+ - BUG/MINOR: vars: Fix memory leak in vars_check_arg
+ - BUILD: travis-ci: make TMPDIR global variable in travis-ci
+ - CLEANUP: ssl: move the SSL_OP_* and SSL_MODE_* definitions to openssl-compat
+ - CLEANUP: ssl: remove ifdef around SSL_CTX_get_extra_chain_certs()
+ - CLEANUP: ssl: move all BIO_* definitions to openssl-compat
+ - BUILD: threads: fix again the __ha_cas_dw() definition
+ - BUG/MAJOR: mux-h2: do not add a stream twice to the send list
+ - Revert "BUG/MINOR: vars: Fix memory leak in vars_check_arg"
+ - BUG/MINOR: peers: Fix memory leak in cfg_parse_peers
+ - BUG/MINOR: htx: make sure to always initialize the HTTP method when parsing a buffer
+ - REGTEST: fix tls_health_checks random failures on MacOS in Travis-CI
+ - MINOR: spoe: Set the argument chunk size to 0 when SPOE variables are checked
+ - BUG/MINOR: vars: Fix memory leak in vars_check_arg
+ - BUG/MAJOR: ssl: segfault upon an heartbeat request
+ - MINOR: spoa-server: Clone the v1.7 spoa-example project
+ - MINOR: spoa-server: move some definition from spoa_server.c to spoa_server.h
+ - MINOR: spoa-server: Externalise debug functions
+ - MINOR: spoe-server: rename "worker" functions
+ - MINOR: spoa-server: Replace the thread init system by processes
+ - MINOR: spoa-server: With debug mode, start only one process
+ - MINOR: spoa-server: Allow registering external processes
+ - MINOR: spoa-server: Allow registering message processors
+ - MINOR: spoa-server: Load files
+ - MINOR: spoa-server: Prepare responses
+ - MINOR: spoa-server: Execute registered callbacks
+ - MINOR: spoa-server: Add Lua processing
+ - MINOR: spoa-server: Add python
+ - MINOR/DOC: spoe-server: Add documentation
+ - BUG/MEDIUM: connections: Don't forget to set xprt_ctx to NULL on close.
+ - MINOR: lists: add LIST_ADDED() to check if an element belongs to a list
+ - CLEANUP: mux-h2: use LIST_ADDED() instead of LIST_ISEMPTY() where relevant
+ - MINOR: mux-h2: add two H2S flags to report the need for shutr/shutw
+ - CLEANUP: mux-h2: simply use h2s->flags instead of ret in h2_deferred_shut()
+ - CLEANUP: connection: remove the handle field from the wait_event struct
+ - BUG/MINOR: log: Wrong log format initialization.
+ - BUG/MINOR: mux-h2: make the do_shut{r,w} functions more robust against retries
+ - BUG/MINOR: mworker: use after free when the PID not assigned
+ - MINOR: mux-h2: remove useless test on stream ID vs last in wake function
+ - MINOR: mux-h2: make h2_wake_some_streams() not depend on the CS flags
+ - MINOR: mux-h2: make h2s_wake_one_stream() the only function to deal with CS
+ - MINOR: mux-h2: make h2s_wake_one_stream() not depend on temporary CS flags
+ - BUG/MINOR: mux-h2: make sure to honor KILL_CONN in do_shut{r,w}
+ - CLEANUP: mux-h2: don't test for impossible CS_FL_REOS conditions
+ - MINOR: mux-h2: add macros to check multiple stream states at once
+ - MINOR: mux-h2: stop relying on CS_FL_REOS
+ - BUG/MEDIUM: mux-h2: Set EOI on the conn_stream during h2_rcv_buf()
+ - BUILD: debug: make gcc not complain on the ABORT_NOW() macro
+ - MINOR: debug: add a new BUG_ON macro
+ - MINOR: h2: Use BUG_ON() to enforce rules in subscribe/unsubscribe.
+ - MINOR: h1: Use BUG_ON() to enforce rules in subscribe/unsubscribe.
+ - MINOR: connections: Use BUG_ON() to enforce rules in subscribe/unsubscribe.
+ - BUILD: ist: turn the lower/upper case tables to literal on obsolete linkers
+
+2019/03/26 : 2.0-dev2
+ - CLEANUP: http: Remove unreachable code in parse_http_req_capture
+ - CLEANUP: stream: Remove bogus loop in conn_si_send_proxy
+ - MINOR: lists: Implement locked variations.
+ - MEDIUM: servers: Used a locked list for idle_orphan_conns.
+ - MEDIUM: servers: Reorganize the way idle connections are cleaned.
+ - BUG/MEDIUM: lists: Properly handle the case we're removing the first elt.
+ - MINOR: cfgparse: Add a cast to make gcc happier.
+ - BUG/MEDIUM: standard: Wrong reallocation size.
+ - BUG/MINOR: listener: keep accept rate counters accurate under saturation
+ - DOC: fix alphabetic ordering for "tune.fail-alloc" setting
+ - MAJOR: config: disable support for nbproc and nbthread in parallel
+ - MEDIUM: listener: keep a single thread-mask and warn on "process" misuse
+ - MAJOR: listener: do not hold the listener lock in listener_accept()
+ - MINOR: listener: maintain a per-thread count of the number of connections on a listener
+ - MINOR: tools: implement functions to look up the nth bit set in a mask
+ - MINOR: listener: pre-compute some thread counts per bind_conf
+ - MINOR: listener: implement multi-queue accept for threads
+ - MAJOR: listener: use the multi-queue for multi-thread listeners
+ - MINOR: activity: add accept queue counters for pushed and overflows
+ - MINOR: config: add global tune.listener.multi-queue setting
+ - MAJOR: threads: enable one thread per CPU by default
+ - DOC: update management.txt to reflect that threads are used by default
+ - BUG/MINOR: config: don't over-count the global maxsock value
+ - BUG/MEDIUM: list: fix the rollback on addq in the locked liss
+ - BUG/MEDIUM: list: fix LIST_POP_LOCKED's removal of the last pointer
+ - BUG/MEDIUM: list: add missing store barriers when updating elements and head
+ - MINOR: list: make the delete and pop operations idempotent
+ - MINOR: server: remove a few unneeded LIST_INIT calls after LIST_DEL_LOCKED
+ - BUG/MEDIUM: listener: use a self-locked list for the dequeue lists
+ - BUG/MEDIUM: listener: make sure the listener never accepts too many conns
+ - BUG/MEDIUM: list: correct fix for LIST_POP_LOCKED's removal of last element
+ - MINOR: listener: introduce listener_backlog() to report the backlog value
+ - MINOR: listener: do not needlessly set l->maxconn
+ - MINOR: proxy: do not change the listeners' maxconn when updating the frontend's
+ - MEDIUM: config: don't enforce a low frontend maxconn value anymore
+ - MINOR: peers: Add a message for heartbeat.
+ - MINOR: global: keep a copy of the initial rlim_fd_cur and rlim_fd_max values
+ - BUG/MINOR: init: never lower rlim_fd_max
+ - BUG/MINOR: checks: make external-checks restore the original rlim_fd_cur/max
+ - BUG/MINOR: mworker: be careful to restore the original rlim_fd_cur/max on reload
+ - MINOR: init: make the maxpipe computation more accurate
+ - MINOR: init: move some maxsock updates earlier
+ - MEDIUM: init: make the global maxconn default to what rlim_fd_cur permits
+ - REGTEST: fix a spurious "nbthread 4" in the connection test
+ - DOC: update the text related to the global maxconn value
+ - BUG/MAJOR: mux-h2: fix race condition between close on both ends
+ - MINOR: sample: Replace "req.ungrpc" smp fetch by a "ungrpc" converter.
+ - BUG/MEDIUM: list: fix again LIST_ADDQ_LOCKED
+ - MINOR: htx: unconditionally handle parsing errors in requests or responses
+ - MINOR: mux-h2: always pass HTX_FL_PARSING_ERROR between h2s and buf on RX
+ - BUG/MEDIUM: h2/htx: verify that :path doesn't contain invalid chars
+ - MINOR: sample: Code factorization "ungrpc" converter.
+ - MINOR: sample: Rework gRPC converter code.
+ - CLEANUP: wurfl: remove dead, broken and unmaintained code
+ - MINOR: config: relax the range checks on cpu-map
+ - BUG/MINOR: ssl: fix warning about ssl-min/max-ver support
+ - MINOR: sample: Extract some protocol buffers specific code.
+ - DOC: Remove tabs and fixed punctuation.
+ - MINOR: sample: Add a protocol buffers specific converter.
+ - REGTEST: Peers reg tests.
+ - REGTEST: Enable reg tests with HEAD HTTP method usage.
+ - MINOR: lists: add a LIST_DEL_INIT() macro
+ - MINOR: task: use LIST_DEL_INIT() to remove a task from the queue
+ - MINOR: listener: improve incoming traffic distribution
+ - MINOR: tools: implement my_flsl()
+ - MEDIUM: listener: change the LB algorithm again to use two round robins instead
+ - CLEANUP: listener: remove old thread bit mapping
+ - MINOR: listener: move thr_idx from the bind_conf to the listener
+ - BUG/MEDIUM: logs: Only attempt to free startup_logs once.
+ - BUG/MAJOR: config: Wrong maxconn adjustment.
+ - BUG/MEDIUM: 51d: fix possible segfault on deinit_51degrees()
+ - OPTIM: task: limit the impact of memory barriers in taks_remove_from_task_list()
+ - MINOR: fd: Remove debugging code.
+ - BUG/MEDIUM: listeners: Don't call fd_stop_recv() if fd_updt is NULL.
+ - MINOR: threads: Implement __ha_barrier_atomic*.
+ - MEDIUM: threads: Use __ATOMIC_SEQ_CST when using the newer atomic API.
+ - MINOR: threads: Add macros to do atomic operation with no memory barrier.
+ - MEDIUM: various: Use __ha_barrier_atomic* when relevant.
+ - MEDIUM: applets: Use the new _HA_ATOMIC_* macros.
+ - MEDIUM: xref: Use the new _HA_ATOMIC_* macros.
+ - MEDIUM: fd: Use the new _HA_ATOMIC_* macros.
+ - MEDIUM: freq_ctr: Use the new _HA_ATOMIC_* macros.
+ - MEDIUM: proxy: Use the new _HA_ATOMIC_* macros.
+ - MEDIUM: server: Use the new _HA_ATOMIC_* macros.
+ - MEDIUM: task: Use the new _HA_ATOMIC_* macros.
+ - MEDIUM: activity: Use the new _HA_ATOMIC_* macros.
+ - MEDIUM: backend: Use the new _HA_ATOMIC_* macros.
+ - MEDIUM: cache: Use the new _HA_ATOMIC_* macros.
+ - MEDIUM: checks: Use the new _HA_ATOMIC_* macros.
+ - MEDIUM: pollers: Use the new _HA_ATOMIC_* macros.
+ - MEDIUM: compression: Use the new _HA_ATOMIC_* macros.
+ - MEDIUM: spoe: Use the new _HA_ATOMIC_* macros.
+ - MEDIUM: threads: Use the new _HA_ATOMIC_* macros.
+ - MEDIUM: http: Use the new _HA_ATOMIC_* macros.
+ - MEDIUM: lb/threads: Use the new _HA_ATOMIC_* macros.
+ - MEDIUM: listeners: Use the new _HA_ATOMIC_* macros.
+ - MEDIUM: logs: Use the new _HA_ATOMIC_* macros.
+ - MEDIUM: memory: Use the new _HA_ATOMIC_* macros.
+ - MEDIUM: peers: Use the new _HA_ATOMIC_* macros.
+ - MEDIUM: proto_tcp: Use the new _HA_ATOMIC_* macros.
+ - MEDIUM: queues: Use the new _HA_ATOMIC_* macros.
+ - MEDIUM: sessions: Use the new _HA_ATOMIC_* macros.
+ - MEDIUM: ssl: Use the new _HA_ATOMIC_* macros.
+ - MEDIUM: stream: Use the new _HA_ATOMIC_* macros.
+ - MEDIUM: tcp_rules: Use the new _HA_ATOMIC_* macros.
+ - MEDIUM: time: Use the new _HA_ATOMIC_* macros.
+ - MEDIUM: vars: Use the new _HA_ATOMIC_* macros.
+ - MINOR: config: remove obsolete use of DEFAULT_MAXCONN at various places
+ - MINOR: config: continue to rely on DEFAULT_MAXCONN to set the minimum maxconn
+ - BUG/MEDIUM: list: fix incorrect pointer unlocking in LIST_DEL_LOCKED()
+ - BUG/MEDIUM: listener: make sure we don't pick stopped threads
+ - MEDIUM: list: Remove useless barriers.
+ - MEDIUM: list: Use _HA_ATOMIC_*
+ - MEDIUM: connections: Use _HA_ATOMIC_*
+ - BUG/MAJOR: tasks: Use the TASK_GLOBAL flag to know if we're in the global rq.
+ - BUG/MEDIUM: threads/fd: do not forget to take into account epoll_fd/pipes
+ - BUG/MEDIUM: init/threads: consider epoll_fd/pipes for automatic maxconn calculation
+ - BUG/MEDIUM: tasks: Make sure we wake sleeping threads if needed.
+ - BUG/MINOR: mux-h1: Don't report an error on EOS if no message was received
+ - BUG/MINOR: stats/htx: Call channel_add_input() when response headers are sent
+ - BUG/MINOR: lua/htx: Use channel_add_input() when response data are added
+ - BUG/MINOR: lua/htx: Don't forget to call htx_to_buf() when appropriate
+ - MINOR: stats: Add the status code STAT_STATUS_IVAL to handle invalid requests
+ - MINOR: stats: Move stuff about the stats status codes in stats files
+ - BUG/MINOR: stats: Be more strict on what is a valid request to the stats applet
+ - Revert "REGTEST: Enable reg tests with HEAD HTTP method usage."
+ - BUILD: listener: shut up a build warning when threads are disabled
+ - BUILD: Makefile: allow the reg-tests target to be verbose
+ - BUILD: Makefile: resolve LEVEL before calling run-regtests
+ - BUG/MAJOR: spoe: Fix initialization of thread-dependent fields
+ - BUG/MAJOR: stats: Fix how huge POST data are read from the channel
+ - BUG/MINOR: http/counters: fix missing increment of fe->srv_aborts
+ - BUG/MEDIUM: mux-h2: Always wakeup streams with no id to avoid frozen streams
+ - MINOR: mux-h2: Set REFUSED_STREAM error to reset a stream if no data was never sent
+ - MINOR: muxes: Report the Last read with a dedicated flag
+ - MINOR: proto-http/proto-htx: Make error handling clearer during data forwarding
+ - BUILD: tools: fix a build warning on some 32-bit archs
+ - MINOR: init: report the list of optionally available services
+ - MEDIUM: proto_htx: Switch to infinite forwarding if there is no data filter
+ - BUG/MINOR: cache: Fully consume large requests in the cache applet
+ - BUG/MINOR: stats: Fully consume large requests in the stats applet
+ - BUG/MEDIUM: lua: Fully consume large requests when an HTTP applet ends
+ - MINOR: proto_http: Add function to handle the header "Expect: 100-continue"
+ - MINOR: proto_htx: Add function to handle the header "Expect: 100-continue"
+ - MINOR: stats/cache: Handle the header Expect when applets are registered
+ - MINOR: http/applets: Handle all applets intercepting HTTP requests the same way
+ - CLEANUP: cache: don't export http_cache_applet anymore
+ - MINOR: lua: Don't handle the header Expect in lua HTTP applets anymore
+ - BUG/MINOR: doc: Be accurate on the behavior on pool-purge-delay.
+ - Revert "MEDIUM: proto_htx: Switch to infinite forwarding if there is no data filter"
+ - BUG/MEDIUM: mux-h2: Make sure we destroyed the h2s once shutr/shutw is done.
+ - BUG/MEDIUM: mux-h2: Don't bother keeping the h2s if detaching and nothing to send.
+ - BUG/MEDIUM: mux-h2: Use the right list in h2_stop_senders().
+ - MINOR: mux-h2: copy small data blocks more often and reduce the number of pauses
+ - CLEANUP: mux-h2: add some comments to help understand the code
+ - BUG/MEDIUM: ssl: ability to set TLS 1.3 ciphers using ssl-default-server-ciphersuites
+ - BUG/MINOR: log: properly format IPv6 address when LOG_OPT_HEXA modifier is used.
+ - BUG/MEDIUM: h2: Try to be fair when sending data.
+ - BUG/MINOR: proto-http: Don't forward request body anymore on error
+ - MINOR: mux-h2: Remove useless test on ES flag in h2_frt_transfer_data()
+ - MINOR: connection: and new flag to mark end of input (EOI)
+ - MINOR: channel: Report EOI on the input channel if it was reached in the mux
+ - MEDIUM: mux-h2: Don't mix the end of the message with the end of stream
+ - MINOR: mux-h1: Set CS_FL_EOI the end of the message is reached
+ - BUG/MEDIUM: http/htx: Fix handling of the option abortonclose
+ - CLEANUP: muxes/stream-int: Remove flags CS_FL_READ_NULL and SI_FL_READ_NULL
+ - MEDIUM: proto_htx: Reintroduce the infinite forwarding on data
+ - BUG/MEDIUM: h2: only destroy the h2s if h2s->cs is NULL.
+ - BUG/MEDIUM: h2: Use the new sending_list in h2s_notify_send().
+ - BUG/MEDIUM: h2: Follow the same logic in h2_deferred_shut than in h2_snd_buf.
+ - BUG/MEDIUM: h2: Remove the tasklet from the task list if unsubscribing.
+ - BUG/MEDIUM: task/h2: add an idempotent task removal fucntion
+ - CLEANUP: task: only perform a LIST_DEL() when the list is not empty
+ - BUG/MEDIUM: mux-h2: make sure to always notify streams of EOS condition
+ - CONTRIB: debug: report the CS and CF's EOI flags
+ - MINOR: channel: don't unset CF_SHUTR_NOW after shutting down.
+
+2019/02/26 : 2.0-dev1
+ - MINOR: mux-h2: only increase the connection window with the first update
+ - REGTESTS: remove the expected window updates from H2 handshakes
+ - BUG/MINOR: mux-h2: make empty HEADERS frame return a connection error
+ - BUG/MEDIUM: mux-h2: mark that we have too many CS once we have more than the max
+ - MEDIUM: mux-h2: remove padlen during headers phase
+ - MINOR: h2: add a bit-based frame type representation
+ - MINOR: mux-h2: remove useless check for empty frame length in h2s_decode_headers()
+ - MEDIUM: mux-h2: decode HEADERS frames before allocating the stream
+ - MINOR: mux-h2: make h2c_send_rst_stream() use the dummy stream's error code
+ - MINOR: mux-h2: add a new dummy stream for the REFUSED_STREAM error code
+ - MINOR: mux-h2: fail stream creation more cleanly using RST_STREAM
+ - MINOR: buffers: add a new b_move() function
+ - MINOR: mux-h2: make h2_peek_frame_hdr() support an offset
+ - MEDIUM: mux-h2: handle decoding of CONTINUATION frames
+ - CLEANUP: mux-h2: remove misleading comments about CONTINUATION
+ - BUG/MEDIUM: servers: Don't try to reuse connection if we switched server.
+ - BUG/MEDIUM: tasks: Decrement tasks_run_queue in tasklet_free().
+ - BUG/MINOR: htx: send the proper authenticate header when using http-request auth
+ - BUG/MEDIUM: mux_h2: Don't add to the idle list if we're full.
+ - BUG/MEDIUM: servers: Fail if we fail to allocate a conn_stream.
+ - BUG/MAJOR: servers: Use the list api correctly to avoid crashes.
+ - BUG/MAJOR: servers: Correctly use LIST_ELEM().
+ - BUG/MAJOR: sessions: Use an unlimited number of servers for the conn list.
+ - BUG/MEDIUM: servers: Flag the stream_interface on handshake error.
+ - MEDIUM: servers: Be smarter when switching connections.
+ - MEDIUM: sessions: Keep track of which connections are idle.
+ - MINOR: payload: add sample fetch for TLS ALPN
+ - BUG/MEDIUM: log: don't mark log FDs as non-blocking on terminals
+ - MINOR: channel: Add the function channel_add_input
+ - MINOR: stats/htx: Call channel_add_input instead of updating channel state by hand
+ - BUG/MEDIUM: cache: Be sure to end the forwarding when XFER length is unknown
+ - BUG/MAJOR: htx: Return the good block address after a defrag
+ - MINOR: lb: allow redispatch when using consistent hash
+ - CLEANUP: mux-h2: fix end-of-stream flag name when processing headers
+ - BUG/MEDIUM: mux-h2: always restart reading if data are available
+ - BUG/MINOR: mux-h2: set the stream-full flag when leaving h2c_decode_headers()
+ - BUG/MINOR: mux-h2: don't check the CS count in h2c_bck_handle_headers()
+ - BUG/MINOR: mux-h2: mark end-of-stream after processing response HEADERS, not before
+ - BUG/MINOR: mux-h2: only update rxbuf's length for H1 headers
+ - BUG/MEDIUM: mux-h1: use per-direction flags to indicate transitions
+ - BUG/MEDIUM: mux-h1: make HTX chunking consistent with H2
+ - BUG/MAJOR: stream-int: Update the stream expiration date in stream_int_notify()
+ - BUG/MEDIUM: proto-htx: Set SI_FL_NOHALF on server side when request is done
+ - BUG/MEDIUM: mux-h1: Add a task to handle connection timeouts
+ - MINOR: mux-h2: make h2c_decode_headers() return a status, not a count
+ - MINOR: mux-h2: add a new dummy stream : h2_error_stream
+ - MEDIUM: mux-h2: make h2c_decode_headers() support recoverable errors
+ - BUG/MINOR: mux-h2: detect when the HTX EOM block cannot be added after headers
+ - MINOR: mux-h2: remove a misleading and impossible test
+ - CLEANUP: mux-h2: clean the stream error path on HEADERS frame processing
+ - MINOR: mux-h2: check for too many streams only for idle streams
+ - MINOR: mux-h2: set H2_SF_HEADERS_RCVD when a HEADERS frame was decoded
+ - BUG/MEDIUM: mux-h2: decode trailers in HEADERS frames
+ - MINOR: h2: add h2_make_h1_trailers to turn H2 headers to H1 trailers
+ - MEDIUM: mux-h2: pass trailers to H1 (legacy mode)
+ - MINOR: htx: add a new function to add a block without filling it
+ - MINOR: h2: add h2_make_htx_trailers to turn H2 headers to HTX trailers
+ - MEDIUM: mux-h2: pass trailers to HTX
+ - MINOR: mux-h1: parse the content-length header on output and set H1_MF_CLEN
+ - BUG/MEDIUM: mux-h1: don't enforce chunked encoding on requests
+ - MINOR: mux-h2: make HTX_BLK_EOM processing idempotent
+ - MINOR: h1: make the H1 headers block parser able to parse headers only
+ - MEDIUM: mux-h2: emit HEADERS frames when facing HTX trailers blocks
+ - MINOR: stream/htx: Add info about the HTX structs in "show sess all" command
+ - MINOR: stream: Add the subscription events of SIs in "show sess all" command
+ - MINOR: mux-h1: Add the subscription events in "show fd" command
+ - BUG/MEDIUM: h1: Get the h1m state when restarting the headers parsing
+ - BUG/MINOR: cache/htx: Be sure to count partial trailers
+ - BUG/MEDIUM: h1: In h1_init(), wake the tasklet instead of calling h1_recv().
+ - BUG/MEDIUM: server: Defer the mux init until after xprt has been initialized.
+ - MINOR: connections: Remove a stall comment.
+ - BUG/MEDIUM: cli: make "show sess" really thread-safe
+ - BUILD: add a new file "version.c" to carry version updates
+ - MINOR: stream/htx: add the HTX flags output in "show sess all"
+ - MINOR: stream/cli: fix the location of the waiting flag in "show sess all"
+ - MINOR: stream/cli: report more info about the HTTP messages on "show sess all"
+ - BUG/MINOR: lua: bad args are returned for Lua actions
+ - BUG/MEDIUM: lua: dead lock when Lua tasks are trigerred
+ - MINOR: htx: Add an helper function to get the max space usable for a block
+ - MINOR: channel/htx: Add HTX version for some helper functions
+ - BUG/MEDIUM: cache/htx: Respect the reserve when cached objects are served
+ - BUG/MINOR: stats/htx: Respect the reserve when the stats page is dumped
+ - DOC: regtest: make it clearer what the purpose of the "broken" series is
+ - REGTEST: mailers: add new test for 'mailers' section
+ - REGTEST: Add a reg test for health-checks over SSL/TLS.
+ - BUG/MINOR: mux-h1: Close connection on shutr only when shutw was really done
+ - MEDIUM: mux-h1: Clarify how shutr/shutw are handled
+ - BUG/MINOR: compression: Disable it if another one is already in progress
+ - BUG/MINOR: filters: Detect cache+compression config on legacy HTTP streams
+ - BUG/MINOR: cache: Disable the cache if any compression filter precedes it
+ - REGTEST: Add some informatoin to test results.
+ - MINOR: htx: Add a function to truncate all blocks after a specific offset
+ - MINOR: channel/htx: Add the HTX version of channel_truncate/erase
+ - BUG/MINOR: proto_htx: Use HTX versions to truncate or erase a buffer
+ - BUG/CRITICAL: mux-h2: re-check the frame length when PRIORITY is used
+ - DOC: Fix typo in req.ssl_alpn example (commit 4afdd138424ab...)
+ - DOC: http-request cache-use / http-response cache-store expects cache name
+ - REGTEST: "capture (request|response)" regtest.
+ - BUG/MINOR: lua/htx: Respect the reserve when data are send from an HTX applet
+ - REGTEST: filters: add compression test
+ - BUG/MEDIUM: init: Initialize idle_orphan_conns for first server in server-template
+ - BUG/MEDIUM: ssl: Disable anti-replay protection and set max data with 0RTT.
+ - DOC: Be a bit more explicit about allow-0rtt security implications.
+ - MINOR: mux-h1: make the mux_h1_ops struct static
+ - BUILD: makefile: add an EXTRA_OBJS variable to help build optional code
+ - BUG/MEDIUM: connection: properly unregister the mux on failed initialization
+ - BUG/MAJOR: cache: fix confusion between zero and uninitialized cache key
+ - REGTESTS: test case for map_regm commit 271022150d
+ - REGTESTS: Basic tests for concat,strcmp,word,field,ipmask converters
+ - REGTESTS: Basic tests for using maps to redirect requests / select backend
+ - DOC: REGTESTS README varnishtest -Dno-htx= define.
+ - MINOR: spoe: Make the SPOE filter compatible with HTX proxies
+ - MINOR: checks: Store the proxy in checks.
+ - BUG/MEDIUM: checks: Avoid having an associated server for email checks.
+ - REGTEST: Switch to vtest.
+ - REGTEST: Adapt reg test doc files to vtest.
+ - BUG/MEDIUM: h1: Make sure we destroy an inactive connectin that did shutw.
+ - BUG/MINOR: base64: dec func ignores padding for output size checking
+ - BUG/MEDIUM: ssl: missing allocation failure checks loading tls key file
+ - MINOR: ssl: add support of aes256 bits ticket keys on file and cli.
+ - BUG/MINOR: backend: don't use url_param_name as a hint for BE_LB_ALGO_PH
+ - BUG/MINOR: backend: balance uri specific options were lost across defaults
+ - BUG/MINOR: backend: BE_LB_LKUP_CHTREE is a value, not a bit
+ - MINOR: backend: move url_param_name/len to lbprm.arg_str/len
+ - MINOR: backend: make headers and RDP cookie also use arg_str/len
+ - MINOR: backend: add new fields in lbprm to store more LB options
+ - MINOR: backend: make the header hash use arg_opt1 for use_domain_only
+ - MINOR: backend: remap the balance uri settings to lbprm.arg_opt{1,2,3}
+ - MINOR: backend: move hash_balance_factor out of chash
+ - MEDIUM: backend: move all LB algo parameters into an union
+ - MINOR: backend: make the random algorithm support a number of draws
+ - BUILD/MEDIUM: da: Necessary code changes for new buffer API.
+ - BUG/MINOR: stick_table: Prevent conn_cur from underflowing
+ - BUG: 51d: Changes to the buffer API in 1.9 were not applied to the 51Degrees code.
+ - BUG/MEDIUM: stats: Get the right scope pointer depending on HTX is used or not
+ - DOC: add a missing space in the documentation for bc_http_major
+ - REGTEST: checks basic stats webpage functionality
+ - BUG/MEDIUM: servers: Make assign_tproxy_address work when ALPN is set.
+ - BUG/MEDIUM: connections: Add the CO_FL_CONNECTED flag if a send succeeded.
+ - DOC: add github issue templates
+ - MINOR: cfgparse: Extract some code to be re-used.
+ - CLEANUP: cfgparse: Return asap from cfg_parse_peers().
+ - CLEANUP: cfgparse: Code reindentation.
+ - MINOR: cfgparse: Useless frontend initialization in "peers" sections.
+ - MINOR: cfgparse: Rework peers frontend init.
+ - MINOR: cfgparse: Simplication.
+ - MINOR: cfgparse: Make "peer" lines be parsed as "server" lines.
+ - MINOR: peers: Make outgoing connection to SSL/TLS peers work.
+ - MINOR: cfgparse: SSL/TLS binding in "peers" sections.
+ - DOC: peers: SSL/TLS documentation for "peers"
+ - BUG/MINOR: startup: certain goto paths in init_pollers fail to free
+ - BUG/MEDIUM: checks: fix recent regression on agent-check making it crash
+ - BUG/MINOR: server: don't always trust srv_check_health when loading a server state
+ - BUG/MINOR: check: Wake the check task if the check is finished in wake_srv_chk()
+ - BUG/MEDIUM: ssl: Fix handling of TLS 1.3 KeyUpdate messages
+ - DOC: mention the effect of nf_conntrack_tcp_loose on src/dst
+ - BUG/MINOR: proto-htx: Return an error if all headers cannot be received at once
+ - BUG/MEDIUM: mux-h2/htx: Respect the channel's reserve
+ - BUG/MINOR: mux-h1: Apply the reserve on the channel's buffer only
+ - BUG/MINOR: mux-h1: avoid copying output over itself in zero-copy
+ - BUG/MAJOR: mux-h2: don't destroy the stream on failed allocation in h2_snd_buf()
+ - BUG/MEDIUM: backend: also remove from idle list muxes that have no more room
+ - BUG/MEDIUM: mux-h2: properly abort on trailers decoding errors
+ - MINOR: h2: declare new sets of frame types
+ - BUG/MINOR: mux-h2: CONTINUATION in closed state must always return GOAWAY
+ - BUG/MINOR: mux-h2: headers-type frames in HREM are always a connection error
+ - BUG/MINOR: mux-h2: make it possible to set the error code on an already closed stream
+ - BUG/MINOR: hpack: return a compression error on invalid table size updates
+ - MINOR: server: make sure pool-max-conn is >= -1
+ - BUG/MINOR: stream: take care of synchronous errors when trying to send
+ - CLEANUP: server: fix indentation mess on idle connections
+ - BUG/MINOR: mux-h2: always check the stream ID limit in h2_avail_streams()
+ - BUG/MINOR: mux-h2: refuse to allocate a stream with too high an ID
+ - BUG/MEDIUM: backend: never try to attach to a mux having no more stream available
+ - MINOR: server: add a max-reuse parameter
+ - MINOR: mux-h2: always consider a server's max-reuse parameter
+ - MEDIUM: stream-int: always mark pending outgoing SI_ST_CON
+ - MINOR: stream: don't wait before retrying after a failed connection reuse
+ - MEDIUM: h2: always parse and deduplicate the content-length header
+ - BUG/MINOR: mux-h2: always compare content-length to the sum of DATA frames
+ - CLEANUP: h2: Remove debug printf in mux_h2.c
+ - MINOR: cfgparse: make the process/thread parser support a maximum value
+ - MINOR: threads: make MAX_THREADS configurable at build time
+ - DOC: nbthread is no longer experimental.
+ - BUG/MINOR: listener: always fill the source address for accepted socketpairs
+ - BUG/MINOR: mux-h2: do not report available outgoing streams after GOAWAY
+ - BUG/MINOR: spoe: corrected fragmentation string size
+ - BUG/MINOR: task: fix possibly missed event in inter-thread wakeups
+ - BUG/MEDIUM: servers: Attempt to reuse an unfinished connection on retry.
+ - BUG/MEDIUM: backend: always call si_detach_endpoint() on async connection failure
+ - SCRIPTS: add the issue tracker URL to the announce script
+ - MINOR: peers: Extract some code to be reused.
+ - CLEANUP: peers: Indentation fixes.
+ - MINOR: peers: send code factorization.
+ - MINOR: peers: Add new functions to send code and reduce the I/O handler.
+ - MEDIUM: peers: synchronizaiton code factorization to reduce the size of the I/O handler.
+ - MINOR: peers: Move update receive code to reduce the size of the I/O handler.
+ - MINOR: peers: Move ack, switch and definition receive code to reduce the size of the I/O handler.
+ - MINOR: peers: Move high level receive code to reduce the size of I/O handler.
+ - CLEANUP: peers: Be more generic.
+ - MINOR: peers: move error handling to reduce the size of the I/O handler.
+ - MINOR: peers: move messages treatment code to reduce the size of the I/O handler.
+ - MINOR: peers: move send code to reduce the size of the I/O handler.
+ - CLEANUP: peers: Remove useless statements.
+ - MINOR: peers: move "hello" message treatment code to reduce the size of the I/O handler.
+ - MINOR: peers: move peer initializations code to reduce the size of the I/O handler.
+ - CLEANUP: peers: factor the error handling code in peer_treet_updatemsg()
+ - CLEANUP: peers: factor error handling in peer_treat_definedmsg()
+ - BUILD/MINOR: peers: shut up a build warning introduced during last cleanup
+ - BUG/MEDIUM: mux-h2: only close connection on request frames on closed streams
+ - CLEANUP: mux-h2: remove two useless but misleading assignments
+ - BUG/MEDIUM: checks: Check that conn_install_mux succeeded.
+ - BUG/MEDIUM: servers: Only destroy a conn_stream we just allocated.
+ - BUG/MEDIUM: servers: Don't add an incomplete conn to the server idle list.
+ - BUG/MEDIUM: checks: Don't try to set ALPN if connection failed.
+ - BUG/MEDIUM: h2: In h2_send(), stop the loop if we failed to alloc a buf.
+ - BUG/MEDIUM: peers: Handle mux creation failure.
+ - BUG/MEDIUM: servers: Close the connection if we failed to install the mux.
+ - BUG/MEDIUM: compression: Rewrite strong ETags
+ - BUG/MINOR: deinit: tcp_rep.inspect_rules not deinit, add to deinit
+ - CLEANUP: mux-h2: remove misleading leftover test on h2s' nullity
+ - BUG/MEDIUM: mux-h2: wake up flow-controlled streams on initial window update
+ - BUG/MEDIUM: mux-h2: fix two half-closed to closed transitions
+ - BUG/MEDIUM: mux-h2: make sure never to send GOAWAY on too old streams
+ - BUG/MEDIUM: mux-h2: do not abort HEADERS frame before decoding them
+ - BUG/MINOR: mux-h2: make sure response HEADERS are not received in other states than OPEN and HLOC
+ - MINOR: h2: add a generic frame checker
+ - MEDIUM: mux-h2: check the frame validity before considering the stream state
+ - CLEANUP: mux-h2: remove stream ID and frame length checks from the frame parsers
+ - BUG/MINOR: mux-h2: make sure request trailers on aborted streams don't break the connection
+ - DOC: compression: Update the reasons for disabled compression
+ - BUG/MEDIUM: buffer: Make sure b_is_null handles buffers waiting for allocation.
+ - DOC: htx: make it clear that htxbuf() and htx_from_buf() always return valid pointers
+ - MINOR: htx: never check for null htx pointer in htx_is_{,not_}empty()
+ - MINOR: mux-h2: consistently rely on the htx variable to detect the mode
+ - BUG/MEDIUM: peers: Peer addresses parsing broken.
+ - BUG/MEDIUM: mux-h1: Don't add "transfer-encoding" if message-body is forbidden
+ - BUG/MEDIUM: connections: Don't forget to remove CO_FL_SESS_IDLE.
+ - BUG/MINOR: stream: don't close the front connection when facing a backend error
+ - BUG/MEDIUM: mux-h2: wait for the mux buffer to be empty before closing the connection
+ - MINOR: stream-int: add a new flag to mention that we want the connection to be killed
+ - MINOR: connstream: have a new flag CS_FL_KILL_CONN to kill a connection
+ - BUG/MEDIUM: mux-h2: do not close the connection on aborted streams
+ - BUG/MINOR: server: fix logic flaw in idle connection list management
+ - MINOR: mux-h2: max-concurrent-streams should be unsigned
+ - MINOR: mux-h2: make sure to only check concurrency limit on the frontend
+ - MINOR: mux-h2: learn and store the peer's advertised MAX_CONCURRENT_STREAMS setting
+ - BUG/MEDIUM: mux-h2: properly consider the peer's advertised max-concurrent-streams
+ - MINOR: xref: Add missing barriers.
+ - MINOR: muxes: Don't bother to LIST_DEL(&conn->list) before calling conn_free().
+ - MINOR: debug: Add an option that causes random allocation failures.
+ - BUG/MEDIUM: backend: always release the previous connection into its own target srv_list
+ - BUG/MEDIUM: htx: check the HTX compatibility in dynamic use-backend rules
+ - BUG/MINOR: tune.fail-alloc: Don't forget to initialize ret.
+ - BUG/MINOR: backend: check srv_conn before dereferencing it
+ - BUG/MEDIUM: mux-h2: always omit :scheme and :path for the CONNECT method
+ - BUG/MEDIUM: mux-h2: always set :authority on request output
+ - BUG/MEDIUM: stream: Don't forget to free s->unique_id in stream_free().
+ - BUG/MINOR: threads: fix the process range of thread masks
+ - BUG/MINOR: config: fix bind line thread mask validation
+ - CLEANUP: threads: fix misleading comment about all_threads_mask
+ - CLEANUP: threads: use nbits to calculate the thread mask
+ - OPTIM: listener: optimize cache-line packing for struct listener
+ - MINOR: tools: improve the popcount() operation
+ - MINOR: config: keep an all_proc_mask like we have all_threads_mask
+ - MINOR: global: add proc_mask() and thread_mask()
+ - MINOR: config: simplify bind_proc processing using proc_mask()
+ - MINOR: threads: make use of thread_mask() to simplify some thread calculations
+ - BUG/MINOR: compression: properly report compression stats in HTX mode
+ - BUG/MINOR: task: close a tiny race in the inter-thread wakeup
+ - BUG/MAJOR: config: verify that targets of track-sc and stick rules are present
+ - BUG/MAJOR: spoe: verify that backends used by SPOE cover all their callers' processes
+ - BUG/MAJOR: htx/backend: Make all tests on HTTP messages compatible with HTX
+ - BUG/MINOR: config: make sure to count the error on incorrect track-sc/stick rules
+ - DOC: ssl: Clarify when pre TLSv1.3 cipher can be used
+ - DOC: ssl: Stop documenting ciphers example to use
+ - BUG/MINOR: spoe: do not assume agent->rt is valid on exit
+ - BUG/MINOR: lua: initialize the correct idle conn lists for the SSL sockets
+ - BUG/MEDIUM: spoe: initialization depending on nbthread must be done last
+ - BUG/MEDIUM: server: initialize the idle conns list after parsing the config
+ - BUG/MEDIUM: server: initialize the orphaned conns lists and tasks at the end
+ - MINOR: config: make MAX_PROCS configurable at build time
+ - BUG/MAJOR: spoe: Don't try to get agent config during SPOP healthcheck
+ - BUG/MINOR: config: Reinforce validity check when a process number is parsed
+ - BUG/MEDIUM: peers: check that p->srv actually exists before using p->srv->use_ssl
+ - CONTRIB: contrib/prometheus-exporter: Add a Prometheus exporter for HAProxy
+ - BUG/MINOR: mux-h1: verify the request's version before dropping connection: keep-alive
+ - BUG: 51d: In Hash Trie, multi header matching was affected by the header names stored globaly.
+ - MEDIUM: 51d: Enabled multi threaded operation in the 51Degrees module.
+ - BUG/MAJOR: stream: avoid double free on unique_id
+ - BUILD/MINOR: stream: avoid a build warning with threads disabled
+ - BUILD/MINOR: tools: fix build warning in the date conversion functions
+ - BUILD/MINOR: peers: remove an impossible null test in intencode()
+ - BUILD/MINOR: htx: fix some potential null-deref warnings with http_find_stline
+ - BUG/MEDIUM: peers: Missing peer initializations.
+ - BUG/MEDIUM: http_fetch: fix the "base" and "base32" fetch methods in HTX mode
+ - BUG/MEDIUM: proto_htx: Fix data size update if end of the cookie is removed
+ - BUG/MEDIUM: http_fetch: fix "req.body_len" and "req.body_size" fetch methods in HTX mode
+ - BUILD/MEDIUM: initcall: Fix build on MacOS.
+ - BUG/MEDIUM: mux-h2/htx: Always set CS flags before exiting h2_rcv_buf()
+ - MINOR: h2/htx: Set the flag HTX_SL_F_BODYLESS for messages without body
+ - BUG/MINOR: mux-h1: Add "transfer-encoding" header on outgoing requests if needed
+ - BUG/MINOR: mux-h2: Don't add ":status" pseudo-header on trailers
+ - BUG/MINOR: proto-htx: Consider a XFER_LEN message as chunked by default
+ - BUG/MEDIUM: h2/htx: Correctly handle interim responses when HTX is enabled
+ - MINOR: mux-h2: Set HTX extra value when possible
+ - BUG/MEDIUM: htx: count the amount of copied data towards the final count
+ - MINOR: mux-h2: make the H2 MAX_FRAME_SIZE setting configurable
+ - BUG/MEDIUM: mux-h2/htx: send an empty DATA frame on empty HTX trailers
+ - BUG/MEDIUM: servers: Use atomic operations when handling curr_idle_conns.
+ - BUG/MEDIUM: servers: Add a per-thread counter of idle connections.
+ - MINOR: fd: add a new my_closefrom() function to close all FDs
+ - MINOR: checks: use my_closefrom() to close all FDs
+ - MINOR: fd: implement an optimised my_closefrom() function
+ - BUG/MINOR: fd: make sure my_closefrom() doesn't miss some FDs
+ - BUG/MAJOR: fd/threads, task/threads: ensure all spin locks are unlocked
+ - BUG/MAJOR: listener: Make sure the listener exist before using it.
+ - MINOR: fd: Use closefrom() as my_closefrom() if supported.
+ - BUG/MEDIUM: mux-h1: Report the right amount of data xferred in h1_rcv_buf()
+ - BUG/MINOR: channel: Set CF_WROTE_DATA when outgoing data are skipped
+ - MINOR: htx: Add function to drain data from an HTX message
+ - MINOR: channel/htx: Add function to skips output bytes from an HTX channel
+ - BUG/MAJOR: cache/htx: Set the start-line offset when a cached object is served
+ - BUG/MEDIUM: cache: Get objects from the cache only for GET and HEAD requests
+ - BUG/MINOR: cache/htx: Return only the headers of cached objects to HEAD requests
+ - BUG/MINOR: mux-h1: Always initilize h1m variable in h1_process_input()
+ - BUG/MEDIUM: proto_htx: Fix functions applying regex filters on HTX messages
+ - BUG/MEDIUM: h2: advertise to servers that we don't support push
+ - MINOR: standard: Add a function to parse uints (dotted notation).
+ - MINOR: arg: Add support for ARGT_PBUF_FNUM arg type.
+ - MINOR: http_fetch: add "req.ungrpc" sample fetch for gRPC.
+ - MINOR: sample: Add two sample converters for protocol buffers.
+ - DOC: sample: Add gRPC related documentation.
+
+2018/12/22 : 2.0-dev0
+ - BUG/MAJOR: connections: Close the connection before freeing it.
+ - REGTEST: Require the option LUA to run lua tests
+ - REGTEST: script: Process script arguments before everything else
+ - REGTEST: script: Evaluate the varnishtest command to allow quoted parameters
+ - REGTEST: script: Add the option --clean to remove previous log direcotries
+ - REGTEST: script: Add the option --debug to show logs on standard ouput
+ - REGTEST: script: Add the option --keep-logs to keep all log directories
+ - REGTEST: script: Add the option --use-htx to enable the HTX in regtests
+ - REGTEST: script: Print only errors in the results report
+ - REGTEST: Add option to use HTX prefixed by the macro 'no-htx'
+ - REGTEST: Make reg-tests target support argument.
+ - REGTEST: Fix a typo about barrier type.
+ - REGTEST: Be less Linux specific with a syslog regex.
+ - REGTEST: Missing enclosing quotes for ${tmpdir} macro.
+ - REGTEST: Exclude freebsd target for some reg tests.
+ - BUG/MEDIUM: h2: Don't forget to quit the sending_list if SUB_CALL_UNSUBSCRIBE.
+ - BUG/MEDIUM: mux-h2: Don't forget to quit the send list on error reports
+ - BUG/MEDIUM: dns: Don't prevent reading the last byte of the payload in dns_validate_response()
+ - BUG/MEDIUM: dns: overflowed dns name start position causing invalid dns error
+ - BUG/MINOR: compression/htx: Don't compress responses with unknown body length
+ - BUG/MINOR: compression/htx: Don't add the last block of data if it is empty
+ - MEDIUM: mux_h1: Implement h1_show_fd.
+ - REGTEST: script: Add support of alternatives in requited options list
+ - REGTEST: Add a basic test for the compression
+ - BUG/MEDIUM: mux-h2: don't needlessly wake up the demux on short frames
+ - REGTEST: A basic test for "http-buffer-request"
+ - BUG/MEDIUM: server: Also copy "check-sni" for server templates.
+ - MINOR: ssl: Add ssl_sock_set_alpn().
+ - MEDIUM: checks: Add check-alpn.
+ - wip
+
+2018/12/19 : 1.9.0
+ - BUG/MEDIUM: compression: Use the right buffer pointers to compress input data
+ - BUG/MINOR: mux_pt: Set CS_FL_WANT_ROOM when count is zero in rcv_buf() callback
+ - BUG/MEDIUM: connection: Add a new CS_FL_ERR_PENDING flag to conn_streams.
+ - CONTRIB: debug: teach the "flags" utility about new conn_stream flags
+ - BUG/MEDIUM: stream-int: always clear CS_FL_WANT_ROOM before receiving
+ - BUG/MEDIUM: mux-h2: also restart demuxing when data are pending in demux
+ - BUG/MEDIUM: mux-h2: restart demuxing as soon as demux data are available
+ - BUG/MEDIUM: h2: fix aggregated cookie length computation in HTX mode
+ - MINOR: mux-h2: report more h2c, last h2s and cs information on "show fd"
+ - CONTRIB: debug: report stream-int's flag SI_FL_CLEAN_ABRT
+ - MINOR: cli/stream: add the conn_stream in "show sess" output
+ - BUG/MINOR: mux-h2: don't report a fantom h2s in "show fd"
+ - BUG/MINOR: cli/fd: don't isolate the thread for each individual fd
+ - MINOR: objtype: report a few missing types in names and base pointers
+ - BUG/MEDIUM: mux-h2: make sure to report synchronous errors after EOS
+ - BUG/MEDIUM: mux-h2: report asynchronous errors in h2_wake_some_streams()
+ - BUG/MEDIUM: mux-h2: make sure the demux also wakes streams up on errors
+ - BUG/MINOR: mux-h1: report the correct frontend in error captures
+ - BUG/MEDIUM: stream-int: also wake the stream up on end of transfer
+ - MEDIUM: h2: properly check and deduplicate the content-length header in HTX
+ - BUG/MEDIUM: stream: Forward the right amount of data before infinite forwarding
+ - BUG/MINOR: proto_htx: Call the HTX version of the function managing client cookies
+ - BUG/MEDIUM: lua/htx: Handle EOM in receive/get_line calls in HTTP applets
+ - BUG/MINOR: lua: Return an error if a legacy HTTP applet doesn't send anything
+ - MINOR: compression: Remove the thread_local variable buf_output
+ - CLEANUP: connection: rename subscription events values and event field
+ - CLEANUP: connection: rename conn->mux_ctx to conn->ctx
+ - MINOR: connection: remove an unwelcome dependency on struct stream
+ - CLEANUP: stream-int: consistently call the si/stream_int functions
+ - BUG/MEDIUM: h1: Don't shutw/shutr the connection if we have keepalive.
+ - BUG/MEDIUM: H2: Make sure htx is set even on empty frames.
+ - BUG/MEDIUM: mux-h2: pass CS_FL_ERR_PENDING to h2_wake_some_streams()
+ - MEDIUM: stream-int: always consider all CS errors on the send side
+ - BUG/MEDIUM: h2: Make sure we don't set CS_FL_ERROR if there's still data.
+ - CLEANUP: mux-h2: implement h2s_notify_{send,recv} to report events to subscribers
+ - MINOR: mux-h2: add a new function h2s_alert() to call the data layer
+ - BUG/MEDIUM: mux-h2: make use of h2s_alert() to report aborts
+ - MINOR: connection: add cs_set_error() to set the error bits
+ - CLEANUP: mux-h2: make use of cs_set_error()
+ - BUG/MINOR: mux-h2: make sure we check the conn_stream in early data
+ - BUG/MEDIUM: h2: Don't wait for flow control if the connection had a shutr.
+ - MINOR: cli/show_fd: report that a connection is back or not
+ - SCRIPTS: add the slack channel URL to the announce script
+ - CLEANUP: remove my name and address from the copyright banner
+ - DOC: mention in the readme that 1.9 is a stable version now
+
+2018/12/16 : 1.9-dev11
+ - BUG/MEDIUM: connection: Don't use the provided conn_stream if it was tried.
+ - REGTEST/MINOR: remove double body specification for server txresp
+ - BUG/MEDIUM: connections: Remove error flags when retrying.
+ - REGTEST/MINOR: skip seamless-reload test with abns socket on freebsd
+ - REGTEST/MINOR: remove health-check that can make the test fail
+ - DOC: clarify that check-sni needs an argument.
+ - DOC: refer to check-sni in the documentation of sni
+ - BUG/MEDIUM: mux-h2: fix encoding of non-GET/POST methods
+ - BUG/MINOR: mux-h1: Fix conn_mode processing for headerless outgoing messages
+ - BUG/MEDIUM: mux-h1: Add a BUSY mode to not loop on pipelinned requests
+ - BUG/MEDIUM: mux-h1: Don't loop on the headers parsing if the read0 was received
+ - BUG/MEDIUM: htx: Always do a defrag if a block value is replace by a bigger one
+ - BUG/MEDIUM: mux-h2: Don't forget to set the CS_FL_EOS flag with htx.
+ - BUG/MINOR: hpack: fix off-by-one in header name encoding length calculation
+ - CLEANUP: hpack: no need to include chunk.h, only include buf.h
+ - MINOR: hpack: simplify the len to bytes conversion
+ - MINOR: hpack: use ist2bin() to copy header names in hpack_encode_header()
+ - MINOR: hpack: optimize header encoding for short names
+ - CONTRIB: hpack: add a compressed stream generator for the encoder
+ - MEDIUM: hpack: make it possible to encode any static header name
+ - MINOR: hpack: move the length computation and encoding functions to .h
+ - MINOR: hpack: provide a function to encode a short indexed header
+ - MINOR: hpack: provide a function to encode a long indexed header
+ - MINOR: hpack: provide new functions to encode the ":status" header
+ - MEDIUM: mux-h2: make use of standard HPACK encoding functions for the status
+ - MINOR: hpack: provide a function to encode an HTTP method
+ - MEDIUM: mux-h2: make use of hpack_encode_method() to encode the method
+ - MINOR: hpack: provide a function to encode an HTTP scheme
+ - MEDIUM: mux-h2: make use of hpack_encode_scheme() to encode the scheme
+ - MINOR: hpack: provide a function to encode an HTTP path
+ - MEDIUM: mux-h2: make use of hpack_encode_path() to encode the path
+ - REGTEST: add the HTTP rules test involving HTX processing
+ - REORG: connection: centralize the conn_set_{tos,mark,quickack} functions
+ - MEDIUM: cli: rework the CLI proxy parser
+ - MINOR: cli: parse prompt command in the CLI proxy
+ - MINOR: cli: implements 'quit' in the CLI proxy
+ - BUG/MINOR: cli: wait for payload data even without prompt
+ - MEDIUM: cli: handle payload in CLI proxy
+ - MINOR: cli: use pcli_flags for prompt activation
+ - MINOR: compression: Rename the function check_legacy_http_comp_flt()
+ - MINOR: cache/htx: Don't use the same cache on HTX and legacy HTTP proxies
+ - MINOR: cache: Register the cache as a data filter only if response is cacheable
+ - MEDIUM: cache/htx: Add the HTX support into the cache
+ - MINOR: cache: Improve and simplify the cache configuration check
+ - MINOR: filters: Export the name of known filters
+ - MEDIUM: cache/compression: Add a way to safely combined compression and cache
+ - MEDIUM: cache: Require an explicit filter declaration if other filters are used
+ - REORG: htx: merge types+proto into common/htx.h
+ - REORG: http: create http_msg.c to place there some legacy HTTP parts
+ - REORG: h1: move legacy http functions to http_msg.c
+ - REORG: h1: move the h1_state definition to proto_http
+ - CLEANUP: h1: remove some occurrences of unneeded h1.h inclusions
+ - REORG: h1: merge types+proto into common/h1.h
+ - CLEANUP: stream: remove SF_TUNNEL, SF_INITIALIZED, SF_CONN_TAR
+ - MEDIUM: mux-h1: implement true zero-copy of DATA blocks
+ - MINOR: config: round up global.tune.bufsize to the next multiple of 2 void*
+ - BUG/MINOR: mux-h2: refrain from muxing during the preface
+ - BUG/MINOR: mux-h2: advertise a larger connection window size
+ - DOC: master CLI documentation in management.txt
+ - MINOR: mux-h2: avoid copying large blocks into full buffers
+ - MEDIUM: mux-h2: implement true zero-copy send of large HTX DATA blocks
+ - MINOR: mux-h2: force reads to be HTX-aligned in HTX mode
+ - MINOR: cli: change 'show proc' output of old processes
+ - BUG/MEDIUM: mux-h1: Fix the zero-copy on output for chunked messages
+ - BUG: dns: Prevent stack-exhaustion via recursion loop in dns_read_name
+ - BUG: dns: Prevent out-of-bounds read in dns_read_name()
+ - BUG: dns: Prevent out-of-bounds read in dns_validate_dns_response()
+ - BUG: dns: Fix out-of-bounds read via signedness error in dns_validate_dns_response()
+ - BUG: dns: Fix off-by-one write in dns_validate_dns_response()
+ - REGTEST: the cache regtest requires haproxy 1.9
+ - MEDIUM: cli: store CLI level in the appctx
+ - MEDIUM: cli: show and change CLI permissions
+ - CLEANUP: cli: use dedicated define instead of appctx ones
+ - MEDIUM: cli: handle CLI level from the master CLI
+ - BUG/MEDIUM: cli: handle correctly prefix and payload
+ - BUILD: Makefile: Implements the help target
+ - REGTESTS: adjust the http-rules regtest to support window updates
+ - BUG/MEDIUM: connections: Remove CS_FL_EOS | CS_FL_REOS on retry.
+ - BUG/MEDIUM: stream_interface: Don't report read0 if we were not connected.
+ - BUG/MEDIUM: connection: Just make sure we closed the fd on connection failure.
+ - MEDIUM: mux: Add an optional "reset" method.
+ - BUG/MEDIUM: mux-h1: Fix loop if server closes its connection with unparsed data
+ - MINOR: mux-h1: Add helper functions to wake a stream from recv or send
+ - BUG/MEDIUM: mux-h1: Wake the stream for send once the connection is established
+ - BUG/MEDIUM: connections: Don't attempt to reuse an unusable connection.
+ - MEDIUM: htx: Try to take a connection over if it has no owner.
+ - REGTEST: Reg testing improvements.
+ - REGTEST: Add a first test for health-checks.
+ - REGTEST: Reg test for "check" health-check option.
+ - REGTEST: level 1 health-check test 2.
+ - REGTEST: Add miscellaneous reg tests for health-checks.
+ - REGTEST: add a few HTTP messaging tests
+ - MINOR: lb: make the leastconn algorithm more accurate
+ - REGTEST: fix missing space in checks/s00001
+ - REGTEST: http-messaging: add "option http-buffer-request" for H2 tests
+ - BUG/MEDIUM: cache: fix random crash on filter parser's error path
+ - MINOR: connection: realign empty buffers in muxes, not transport layers
+ - MINOR: mux_h1/h2: simplify the zero-copy Rx alignment
+ - MINOR: backend: count the number of connect and reuse per server and per backend
+ - BUG/MINOR: stats: fix inversion of failed header rewrites and other statuses
+ - MINOR: tools: increase the number of ITOA strings to 16
+ - MINOR: cache: report the number of cache lookups and cache hits
+ - MEDIUM: tasks: check the global task mask instead of the thread number
+ - MINOR: mworker: set all_threads_mask and pid_bit to 1
+ - BUG/MINOR: proto_htx: Fix htx_res_set_status to also set the reason
+ - BUG/MINOR: stats: Parse post data for HTX streams
+ - MINOR: payload/htx: Adapt smp_fetch_len to be HTX aware
+ - MINOR: http_fecth: Implement body_len and body_size sample fetches for the HTX
+ - MAJOR: lua: Forbid calls to Channel functions for LUA scripts in HTTP proxies
+ - MEDIUM: lua/htx: Adapt functions of the HTTP to be compatible with HTX
+ - MINOR: lua/htx: Adapt the functions get_in_length and is_full to be HTX aware
+ - MAJOR: lua/htx: Adapt HTTP applets to support HTX messages
+ - MINOR: lua: Remove useless check on the messages state in HTTP functions
+ - BUG/MEDIUM: htx: When performing zero-copy, start from the right offset.
+ - BUG/MINOR: mworker: don't use unitialized mworker_proc struct
+ - MINOR: mworker/cli: indicate in the master prompt when a reload failed
+ - MINOR: cli: implements 'reload' on master CLI
+ - BUG/MEDIUM: log: Don't call sample_fetch_as_type if we don't have a stream.
+ - BUG/MEDIUM: mux-h1: make sure we always have at least one HTX block to send
+ - BUG/MAJOR: backend: only update server's counters when the server exists
+ - MINOR: tools: preset the port of fd-based "sockets" to zero
+ - BUG/MINOR: log: fix logging to both FD and IP
+ - REGTEST: Add a reg test for HTTP cookies.
+ - BUILD: ssl: Fix compilation without deprecated OpenSSL 1.1 APIs
+ - BUILD: thread: properly report multi-thread support
+ - BUG/MINOR: logs: leave startup-logs global and not per-thread
+ - BUG/MEDIUM: threads: don't close the thread waker pipe if not init
+ - BUG/MAJOR: compression/cache: Make it really works with these both filters
+ - BUG/MEDIUM: h2: Don't forget to destroy the h2s after deferred shut.
+ - MEDIUM: proxy: Set http-reuse safe as default.
+ - MEDIUM: servers: Add a command to limit the number of idling connections.
+ - MEDIUM: servers: Replace idle-timeout with pool-purge-delay.
+ - MEDIUM: mux: Destroy the stream before trying to add the conn to the idle list.
+ - MEDIUM: mux: provide the session to the init() and attach() method.
+ - MEDIUM: sessions: Don't keep an infinite number of idling connections.
+ - MEDIUM: servers: Be more agressive when adding H2 connection to idle lists.
+ - MEDIUM: mux_h2: Always set CS_FL_NOT_FIRST for new conn_streams.
+ - BUG/MEDIUM: htx/cache: use the correct class of error codes on abort
+ - BUG/MINOR: cache: also consider CF_SHUTR to abort delivery
+ - MINOR: pools: Cast to volatile int * instead of int *.
+ - MINOR: debug: make the ABORT_NOW macro use a volatile int
+ - BUG/MEDIUM: h2: Don't destroy the h2s if it still has a cs attached.
+ - BUG/MEDIUM: mux-h1: don't try to process an empty input buffer
+ - DOC: clarify the agent-check status line syntax
+ - BUG/MAJOR: hpack: fix length check for short names encoding
+ - DOC: split the README into README + INSTALL
+
+2018/12/08 : 1.9-dev10
+ - MINOR: htx: Rename functions htx_*_to_str() to be H1 specific
+ - BUG/MINOR: htx: Force HTTP/1.1 on H1 formatting when version is 1.1 or above
+ - BUG/MINOR: fix ssl_fc_alpn and actually add ssl_bc_alpn
+ - BUG/MEDIUM: mworker: stop proxies which have no listener in the master
+ - BUG/MEDIUM: h1: Destroy a connection after detach if it has no owner.
+ - BUG/MEDIUM: h2: Don't forget to wake the tasklet after shutr/shutw.
+ - BUG/MINOR: flt_trace/compression: Use the right flag to add the HTX support
+ - BUG/MEDIUM: stream_interface: Make REALLY sure we read all the data.
+ - MEDIUM: mux-h1: Revamp the way subscriptions are handled.
+ - BUG/MEDIUM: mux-h1: Always set CS_FL_RCV_MORE when data are received in h1_recv()
+ - MINOR: mux-h1: Make sure to return 1 in h1_recv() when needed
+ - BUG/MEDIUM: mux-h1: Release the mux H1 in h1_process() if there is no h1s
+ - BUG/MINOR: proto_htx: Truncate the request when an error is detected
+ - BUG/MEDIUM: h2: When sending in HTX, make sure the caller knows we sent all.
+ - BUG/MEDIUM: mux-h2: properly update the window size in HTX mode
+ - BUG/MEDIUM: mux-h2: make sure to always report HTX EOM when consumed by headers
+ - BUG/MEDIUM: mux-h2: stop sending HTX once the mux is blocked
+ - BUG/MEDIUM: mux-h2: don't send more HTX data than requested
+ - MINOR: mux-h2: stop on non-DATA and non-EOM HTX blocks
+ - BUG/MEDIUM: h1: Correctly report used data with no len.
+ - MEDIUM: h1: Realign the ibuf before calling rcv_buf if needed.
+ - BUG/MEDIUM: mux_pt: Always set CS_FL_RCV_MORE.
+ - MINOR: htx: make htx_from_buf() adjust the size only on new buffers
+ - MINOR: htx: add buf_room_for_htx_data() to help optimize buffer transfers
+ - MEDIUM: mux-h1: make use of buf_room_for_htx_data() instead of b_room()
+ - MEDIUM: mux-h1: attempt to zero-copy Rx DATA transfers
+ - MEDIUM: mux-h1: avoid a double copy on the Tx path whenever possible
+ - BUG/MEDIUM: stream-int: don't mark as blocked an empty buffer on Rx
+ - BUG/MINOR: mux-h1: Check h1m flags to set the server conn_mode on request path
+ - MEDIUM: htx: Rework conversion from a buffer to an htx structure
+ - MEDIUM: channel/htx: Add functions for forward HTX data
+ - MINOR: mux-h1: Don't adjust anymore the amount of data sent in h1_snd_buf()
+ - CLEANUP: htx: Fix indentation here and there in HTX files
+ - MINOR: mux-h1: Allow partial data consumption during outgoing data processing
+ - BUG/MEDIUM: mux-h2: use the correct offset for the HTX start line
+ - BUG/MEDIUM: mux-h2: stop sending using HTX on errors
+ - MINOR: mux-h1: Drain obuf if the output is closed after sending data
+ - BUG/MEDIUM: mworker: stop every tasks in the master
+ - BUG/MEDIUM: htx: Set the right start-line offset after a defrag
+ - BUG/MEDIUM: stream: Don't dereference s->txn when it is not there yet.
+ - BUG/MEDIUM: connections: Reuse an already attached conn_stream.
+ - MINOR: stream-int: add a new blocking condition on the remote connection
+ - BUG/MEDIUM: stream-int: don't attempt to receive if the connection is not established
+ - BUG/MEDIUM: lua: block on remote connection establishment
+ - BUG/MEDIUM: mworker: fix several typos in mworker_cleantasks()
+ - SCRIPTS/REGTEST: merge grep+sed into sed in run-regtests
+ - BUG/MEDIUM: connections: Split CS_FL_RCV_MORE into 2 flags.
+ - BUG/MEDIUM: h1: Don't free the connection if it's an outgoing connection.
+ - BUG/MEDIUM: h1: Set CS_FL_REOS if we had a read0.
+ - BUG/MEDIUM: mux-h1: Be sure to have a conn_stream to set CS_FL_REOS in h1_recv
+ - REGTEST: Move LUA reg test 4 to level 1.
+ - MINOR: ist: add functions to copy/uppercase/lowercase into a buffer or string
+ - MEDIUM: ist: always turn header names to lower case
+ - MINOR: h2: don't turn HTX header names to lower case anymore
+ - MEDIUM: ist: use local conversion arrays to case conversion
+ - MINOR: htx: switch to case sensitive search of lower case header names
+ - MINOR: mux-h1: Set CS_FL_EOS when read0 is detected and no data are pending
+ - BUG/MINOR: stream-int: Process read0 even if no data was received in si_cs_recv
+ - REGTEST: fix the Lua test file name in test lua/h00002 :-)
+ - REGTEST: add a basic test for HTTP rules manipulating headers
+ - BUG/MEDIUM: sample: Don't treat SMP_T_METH as SMP_T_STR.
+ - MINOR: sample: add bc_http_major
+ - BUG/MEDIUM: htx: fix typo in htx_replace_stline() making it fail all the time
+ - REGTEST: make the HTTP rules test compatible with HTTP/2 as well
+ - BUG/MEDIUM: h2: Don't try to chunk data when using HTX.
+ - MINOR: compiler: add a new macro ALREADY_CHECKED()
+ - BUILD: h2: mark the start line already checked to avoid warnings
+ - BUG/MINOR: mux-h1: Remove the connection header when it is useless
+
+2018/12/02 : 1.9-dev9
+ - BUILD/MINOR: ssl: fix build with non-alpn/non-npn libssl
+ - BUG/MINOR: mworker: Do not attempt to close(2) fd -1
+ - BUILD: compression: fix build error with DEFAULT_MAXZLIBMEM
+ - MINOR: compression: always create the compression pool
+ - BUG/MEDIUM: mworker: fix FD leak upon reload
+ - BUILD: htx: fix fprintf format inconsistency on 32-bit platforms
+ - BUILD: buffers: buf.h requires unistd to get ssize_t on libmusl
+ - MINOR: initcall: introduce a way to register init functions to call at boot
+ - MINOR: init: process all initcalls in order at boot time
+ - MEDIUM: init: convert all trivial registration calls to initcalls
+ - MINOR: thread: provide a set of lock initialisers
+ - MINOR: threads: add new macros to declare self-initializing locks
+ - MEDIUM: init: use self-initializing spinlocks and rwlocks
+ - MINOR: initcall: apply initcall to all register_build_opts() calls
+ - MINOR: initcall: use initcalls for most post_{check,deinit} and per_thread*
+ - MINOR: initcall: use initcalls for section parsers
+ - MINOR: memory: add a callback function to create a pool
+ - MEDIUM: init: use initcall for all fixed size pool creations
+ - MEDIUM: memory: use pool_destroy_all() to destroy all pools on deinit()
+ - MEDIUM: initcall: use initcalls for a few initialization functions
+ - MEDIUM: memory: make the pool cache an array and not a thread_local
+ - MINOR: ssl: free ctx when libssl doesn't support NPN
+ - BUG/MINOR: proto_htx: only mark connections private if NTLM is detected
+ - MINOR: h2: make struct h2_ops static
+ - BUG/MEDIUM: mworker: avoid leak of client socket
+ - REORG: mworker: declare master variable in global.h
+ - BUG/MEDIUM: listeners: CLOEXEC flag is not correctly set
+ - CLEANUP: http: Fix typo in init_http's comment
+ - BUILD: Makefile: Disable -Wcast-function-type if it exists.
+ - BUG/MEDIUM: h2: Don't bogusly error if the previous stream was closed.
+ - REGTEST/MINOR: script: add run-regtests.sh script
+ - REGTEST: Add a basic test for the cache.
+ - BUG/MEDIUM: mux_pt: Don't forget to unsubscribe() on attach.
+ - BUG/MINOR: ssl: ssl_sock_parse_clienthello ignores session id
+ - BUG/MEDIUM: connections: Wake the stream once the mux is chosen.
+ - BUG/MEDIUM: connections: Don't forget to detach the connection from the SI.
+ - BUG/MEDIUM: stream_interface: Don't check if the handshake is done.
+ - BUG/MEDIUM: stream_interface: Make sure we read all the data available.
+ - BUG/MEDIUM: h2: Call h2_process() if there's an error on the connection.
+ - REGTEST: Fix several issues.
+ - REGTEST: lua: check socket functionality from a lua-task
+ - BUG/MEDIUM: session: Remove the session from the session_list in session_free.
+ - BUG/MEDIUM: streams: Don't assume we have a CS in sess_update_st_con_tcp.
+ - BUG/MEDIUM: connections: Don't assume we have a mux in connect_server().
+ - BUG/MEDIUM: connections: Remove the connection from the idle list before destroy.
+ - BUG/MEDIUM: session: properly clean the outgoing connection before freeing.
+ - BUG/MEDIUM: mux_pt: Don't try to send if handshake is not done.
+ - MEDIUM: connections: Put H2 connections in the idle list if http-reuse always.
+ - MEDIUM: h2: Destroy a connection with no stream if it has no owner.
+ - MAJOR: sessions: Store multiple outgoing connections in the session.
+ - MEDIUM: session: Steal owner-less connections on end of transaction.
+ - MEDIUM: server: Be smarter about deciding to reuse the last server.
+ - BUG/MEDIUM: Special-case http_proxy when dealing with outgoing connections.
+ - BUG/MINOR: cfgparse: Fix transition between 2 sections with the same name
+ - BUG/MINOR: http: Use out buffer instead of trash to display error snapshot
+ - BUG/MINOR: htx: Fix block size calculation when a start-line is added/replaced
+ - BUG/MINOR: mux-h1: Fix processing of "Connection: " header on outgoing messages
+ - BUG/MEDIUM: mux-h1: Reset the H1 parser when an outgoing message is processed
+ - BUG/MINOR: proto_htx: Send outgoing data to client to start response processing
+ - BUG/MINOR: htx: Stop a header or a start line lookup on the first EOH or EOM
+ - BUG/MINOR: connection: report mux modes when HTX is supported
+ - MINOR: htx: add a function to cut the beginning of a DATA block
+ - MEDIUM: conn_stream: Add a way to get mux's info on a CS from the upper layer
+ - MINOR: mux-h1: Implement get_cs_info() callback
+ - MINOR: stream: Rely on CS's info if it exists and fallback on session's ones
+ - MINOR: proto_htx: Use conn_stream's info to set t_idle duration when possible
+ - MINOR: mux-h1: Don't rely on the stream anymore in h1_set_srv_conn_mode()
+ - MINOR: mux-h1: Write last chunk and trailers if not found in the HTX message
+ - MINOR: mux-h1: Be prepare to fail when EOM is added during trailers parsing
+ - MINOR: mux-h1: Subscribe to send in h1_snd_buf() when not all data have been sent
+ - MINOR: mux-h1: Consume channel's data in a loop in h1_snd_buf()
+ - MEDIUM: mux-h1: Add keep-alive outgoing connections in connections list
+ - MINOR: htx: Add function to add an HTX block just before another one
+ - MINOR: htx: Add function to iterate on an HTX message using HTX blocks
+ - MINOR: htx: Add a function to find the HTX block corresponding to a data offset
+ - MINOR: stats: Don't add end-of-data marker and trailers in the HTX response
+ - MEDIUM: htx: Change htx_sl to be a struct instead of an union
+ - MINOR: htx: Add the start-line offset for the HTX message in the HTX structure
+ - MEDIUM: htx: Don't rely on h1_sl anymore except during H1 header parsing
+ - MINOR: proto-htx: Use the start-line flags to set the HTTP messsage ones
+ - MINOR: htx: Add BODYLESS flags on the HTX start-line and the HTTP message
+ - MINOR: proto_htx: Use full HTX messages to send 100-Continue responses
+ - MINOR: proto_htx: Use full HTX messages to send 103-Early-Hints responses
+ - MINOR: proto_htx: Use full HTX messages to send 401 and 407 responses
+ - MINOR: proto_htx: Send valid HTX message when redir mode is enabled on a server
+ - MINOR: proto_htx: Send valid HTX message to send 30x responses
+ - MEDIUM: proto_htx: Convert all HTTP error messages into HTX
+ - MINOR: mux-h1: Process conn_mode on the EOH when no connection header is found
+ - MINOR: mux-h1: Change client conn_mode on an explicit close for the response
+ - MINOR: mux-h1: Capture bad H1 messages
+ - MAJOR: filters: Adapt filters API to be compatible with the HTX represenation
+ - MEDIUM: proto_htx/filters: Add data filtering during the forwarding
+ - MINOR: flt_trace: Adapt to be compatible with the HTX representation
+ - MEDIUM: compression: Adapt to be compatible with the HTX representation
+ - MINOR: h2: implement H2->HTX request header frame transcoding
+ - MEDIUM: mux-h2: register mux for both HTTP and HTX modes
+ - MEDIUM: mux-h2: make h2_rcv_buf() support HTX transfers
+ - MEDIUM: mux-h2: make h2_snd_buf() HTX-aware
+ - MEDIUM: mux-h2: add basic H2->HTX transcoding support for headers
+ - MEDIUM: mux-h2: implement emission of H2 headers frames from HTX blocks
+ - MEDIUM: mux-h2: implement the emission of DATA frames from HTX DATA blocks
+ - MEDIUM: mux-h2: support passing H2 DATA frames to HTX blocks
+ - BUG/MINOR: cfgparse: Fix the call to post parser of the last sections parsed
+ - BUG/MEDIUM: mux-h2: don't lose the first response header in HTX mode
+ - BUG/MEDIUM: mux-h2: remove the HTX EOM block on H2 response headers
+ - MINOR: listener: the mux_proto entry in the bind_conf is const
+ - MINOR: connection: create conn_get_best_mux_entry()
+ - MINOR: server: the mux_proto entry in the server is const
+ - MINOR: config: make sure to associate the proper mux to bind and servers
+ - MINOR: hpack: add ":path" to the list of common header fields
+ - MINOR: h2: add new functions to produce an HTX message from an H2 response
+ - MINOR: mux-h2: mention that the mux is compatible with both sides
+ - MINOR: mux-h2: implement an outgoing stream allocator : h2c_bck_stream_new()
+ - MEDIUM: mux-h2: start to create the outgoing mux
+ - MEDIUM: mux-h2: implement encoding of H2 request on the backend side
+ - MEDIUM: mux-h2: make h2_frt_decode_headers() direction-agnostic
+ - MEDIUM: mux-h2: make h2_process_demux() capable of processing responses as well
+ - MEDIUM: mux-h2: Implement h2_attach().
+ - MEDIUM: mux-h2: Don't bother flagging outgoing connections as TOOMANY.
+ - REGTEST: Fix LEVEL 4 script 0 of "connection" module.
+ - MINOR: connection: Fix a comment.
+ - MINOR: mux: add a "max_streams" method.
+ - MEDIUM: servers: Add a way to keep idle connections alive.
+ - CLEANUP: fix typos in the htx subsystem
+ - CLEANUP: Fix typo in the chunk headers file
+ - CLEANUP: Fix typos in the h1 subsystem
+ - CLEANUP: Fix typos in the h2 subsystem
+ - CLEANUP: Fix a typo in the mini-clist header
+ - CLEANUP: Fix a typo in the proto_htx subsystem
+ - CLEANUP: Fix typos in the proto_tcp subsystem
+ - CLEANUP: Fix a typo in the signal subsystem
+ - CLEANUP: Fix a typo in the session subsystem
+ - CLEANUP: Fix a typo in the queue subsystem
+ - CLEANUP: Fix typos in the shctx subsystem
+ - CLEANUP: Fix typos in the socket pair protocol subsystem
+ - CLEANUP: Fix typos in the map management functions
+ - CLEANUP: Fix typo in the fwrr subsystem
+ - CLEANUP: Fix typos in the cli subsystem
+ - CLEANUP: Fix typo in the 51d subsystem
+ - CLEANUP: Fix a typo in the base64 subsystem
+ - CLEANUP: Fix a typo in the connection subsystem
+ - CLEANUP: Fix a typo in the protocol header file
+ - CLEANUP: Fix a typo in the checks header file
+ - CLEANUP: Fix typos in the file descriptor subsystem
+ - CLEANUP: Fix a typo in the listener subsystem
+ - BUG/MINOR: lb-map: fix unprotected update to server's score
+ - BUILD: threads: fix minor build warnings when threads are disabled
+
+2018/11/25 : 1.9-dev8
+ - REORG: config: extract the global section parser into cfgparse-global
+ - REORG: config: extract the proxy parser into cfgparse-listen.c
+ - BUILD: update the list of supported targets and compilers in makefile and readme
+ - BUILD: reorder the objects in the makefile
+ - BUILD: Makefile: make "V=1" show some of the commands that are executed
+ - BUILD: Makefile: add the quiet mode to a few more targets
+ - BUILD: Makefile: add "$(Q)" to clean, tags and cscope targets
+ - BUILD: Makefile: switch to quiet mode by default for CC/LD/AR
+ - MINOR: cli: format `show proc` to be more readable
+ - MINOR: cli: displays uptime in `show proc`
+ - MINOR: cli: show master information in 'show proc'
+ - BUG/MEDIUM: hpack: fix encoding of "accept-ranges" field
+ - MAJOR: mux-h1: Remove the rxbuf and decode HTTP messages in channel's buffer
+ - BUG/MINOR: mux-h1: Enable keep-alive on server side
+ - BUG/MEDIUM: mux-h1: Fix freeze when the kernel splicing is used
+ - BUG/MEDIUM: mux-h1: Don't set the flag CS_FL_RCV_MORE when nothing was parsed
+ - BUG/MINOR: stats/htx: Remove channel's output when the request is eaten
+ - BUG/MINOR: proto_htx: Fix request/response synchronisation on error
+ - MINOR: stream-int: Notify caller when an error is reported after a rcv_pipe()
+ - MINOR: stream-int: Notify caller when an error is reported after a rcv_buf()
+ - BUG/MINOR: stream-int: Don't call snd_buf() if there are still data in the pipe
+ - MINOR: stream-int: remove useless checks on CS and conn flags in si_cs_send()
+ - BUG/MINOR: config: Be aware of the HTX during the check of mux protocols
+ - BUG/MINOR: mux-htx: Fix bad test on h1c flags in h1_recv_allowed()
+ - MEDIUM: mworker: wait mode use standard init code path
+ - MINOR: log: introduce ha_notice()
+ - MINOR: mworker: use ha_notice to announce a new worker
+ - BUG/MEDIUM: http_fetch: Make sure name is initialized before http_find_header.
+ - MINOR: cli: add mworker_accept_wrapper to 'show fd'
+ - MEDIUM: signal: signal_unregister() removes every handlers
+ - BUG/MEDIUM: mworker: unregister the signals of main()
+ - MINOR: cli: add a few missing includes in proto/cli.h
+ - REORG: time/activity: move activity measurements to activity.{c,h}
+ - MINOR: activity: report the average loop time in "show activity"
+ - MINOR: activity: add configuration and CLI support for "profiling.tasks"
+ - MEDIUM: tasks: collect per-task CPU time and latency
+ - MINOR: sample: add cpu_calls, cpu_ns_avg, cpu_ns_tot, lat_ns_avg, lat_ns_tot
+ - MINOR: cli/activity: rename the stolen CPU time fields to mention milliseconds
+ - BUG/MINOR: cli: Fix memory leak
+ - BUG/MINOR: mworker: fix FD leak and memory leak in error path
+ - MINOR: poller: move the call of tv_update_date() back to the pollers
+ - MINOR: polling: add an option to support busy polling
+ - MINOR: server: Add "alpn" and "npn" keywords.
+ - MEDIUM: connection: Don't bother reactivating polling after connection retry.
+ - MAJOR: connections: Defer mux creation for outgoing connection if alpn is set.
+ - MEDIUM: ssl: Add ssl_bc_alpn and ssl_bc_npn sample fetches.
+ - MINOR: servers: Free [idle|safe|priv]_conns on exit.
+ - REGTEST: add the option to test only a specific set of files
+ - REGTEST: add a test for connections to a "dispatch" address
+ - BUG/MEDIUM: connections: Don't reset the conn flags in *connect_server().
+ - MINOR: server: Only defined conn_complete_server if USE_OPENSSL is set.
+ - BUG/MEDIUM: servers: Don't check if we have a conn_stream too soon.
+ - BUG/MEDIUM: sessions: Set sess->origin to NULL if the origin was destroyed.
+ - MEDIUM: servers: Store the connection in the SI until we have a mux.
+ - BUG/MEDIUM: h2: wake the processing task up after demuxing
+ - BUG/MEDIUM: h2: restart demuxing after releasing buffer space
+
+2018/11/18 : 1.9-dev7
+ - BUILD: cache: fix a build warning regarding too large an integer for the age
+ - CLEANUP: fix typos in the comments of the Makefile
+ - CLEANUP: fix a typo in a comment for the contrib/halog subsystem
+ - CLEANUP: fix typos in comments for the contrib/modsecurity subsystem
+ - CLEANUP: fix typos in comments for contrib/spoa_example
+ - CLEANUP: fix typos in comments for contrib/wireshark-dissectors
+ - DOC: Fix typos in README and CONTRIBUTING
+ - MINOR: log: slightly improve error message syntax on log failure
+ - DOC: logs: the format directive was missing from the second log part
+ - MINOR: log: report the number of dropped logs in the stats
+ - MEDIUM: log: add support for logging to existing file descriptors
+ - MEDIUM: log: support a new "short" format
+ - MEDIUM: log: add a new "raw" format
+ - BUG/MEDIUM: stream-int: change the way buffer room is requested by a stream-int
+ - BUG/MEDIUM: stream-int: convert some co_data() checks to channel_is_empty()
+ - MINOR: namespaces: don't build namespace.c if disabled
+ - BUILD/MEDIUM: threads/affinity: DragonFly build fix
+ - MINOR: http: Add new "early-hint" http-request action.
+ - MINOR: http: Make new "early-hint" http-request action really be parsed.
+ - MINOR: http: Implement "early-hint" http request rules.
+ - MINOR: doc: Add information about "early-hint" http-request action.
+ - DOC: early-hints: fix truncated line.
+ - MINOR: mworker: only close std{in,out,err} in daemon mode
+ - BUG/MEDIUM: log: don't CLOEXEC the inherited FDs
+ - BUG/MEDIUM: Make sure stksess is properly aligned.
+ - BUG/MEDIUM: stream-int: make failed splice_in always subscribe to recv
+ - BUG/MEDIUM: stream-int: clear CO_FL_WAIT_ROOM after splicing data in
+ - BUG/MINOR: stream-int: make sure not to go through the rcv_buf path after splice()
+ - CONTRIB: debug: fix build related to conn_stream flags change
+ - REGTEST: fix scripts 1 and 3 to accept development version
+ - BUG/MINOR: http_fetch: Remove the version part when capturing the request uri
+ - MINOR: http: Regroup return statements of http_req_get_intercept_rule at the end
+ - MINOR: http: Regroup return statements of http_res_get_intercept_rule at the end
+ - BUG/MINOR: http: Be sure to sent fully formed HTTP 103 responses
+ - MEDIUM: jobs: support unstoppable jobs for soft stop
+ - MEDIUM: listeners: support unstoppable listener
+ - MEDIUM: cli: worker socketpair is unstoppable
+ - BUG/MINOR: stream-int: set SI_FL_WANT_PUT in sess_establish()
+ - MINOR: stream: move the conn_stream specific calls to the stream-int
+ - BUG/MINOR: config: Copy default error messages when parsing of a backend starts
+ - CLEANUP: h2: minimum documentation for recent API changes
+ - MINOR: mux: implement a get_first_cs() method
+ - MINOR: stream-int: make conn_si_send_proxy() use cs_get_first()
+ - MINOR: stream-int: relax the forwarding rules in stream_int_notify()
+ - MINOR: stream-int: expand the flags to 32-bit
+ - MINOR: stream-int: rename SI_FL_WAIT_ROOM to SI_FL_RXBLK_ROOM
+ - MINOR: stream-int: introduce new SI_FL_RXBLK flags
+ - MINOR: stream-int: add new functions si_{rx,tx}_{blocked,endp_ready}()
+ - MINOR: stream-int: replace SI_FL_WANT_PUT with !SI_FL_RX_WAIT_EP
+ - MINOR: stream-int: use si_rx_blocked()/si_tx_blocked() to check readiness
+ - MEDIUM: stream-int: use si_rx_buff_{rdy,blk} to report buffer readiness
+ - MINOR: stream-int: replace si_{want,stop}_put() with si_rx_endp_{more,done}()
+ - MEDIUM: stream-int: update the endp polling status only at the end of si_cs_recv()
+ - MINOR: stream-int: make si_sync_recv() simply check ENDP before si_cs_recv()
+ - MINOR: stream-int: automatically mark applets as ready if they block on the channel
+ - MEDIUM: stream-int: fix the si_cant_put() calls used for end point readiness
+ - MEDIUM: stream-int: fix the si_cant_put() calls used for buffer readiness
+ - MEDIUM: stream-int: use si_rx_shut_blk() to indicate the SI is closed
+ - MEDIUM: stream-int: unconditionally call si_chk_rcv() in update and notify
+ - MEDIUM: stream-int: make use of si_rx_chan_{rdy,blk} to control the stream-int from the channel
+ - MINOR: stream-int: replace si_cant_put() with si_rx_room_{blk,rdy}()
+ - MEDIUM: connections: Wait until the connection is established to try to recv.
+ - MEDIUM: mux: Teach the mux_pt how to deal with idle connections.
+ - MINOR: mux: Add a new "avail_streams" method.
+ - MINOR: mux: Add a destroy() method.
+ - MINOR: sessions: Start to store the outgoing connection in sessions.
+ - MAJOR: connections: Detach connections from streams.
+ - MINOR: conn_stream: Add a flag to notify the mux it should flush its buffers
+ - MINOR: htx: Add proto_htx.c file
+ - MINOR: conn_stream: Add a flag to notify the mux it must respect the reserve
+ - MINOR: http: Add standalone functions to parse a start-line or a header
+ - MINOR: http: Call http_send_name_header with the stream instead of the txn
+ - MINOR: conn_stream: Add a flag to notify the SI some data were received
+ - MINOR: http: Add macros to check if a stream uses the HTX representation
+ - MEDIUM: proto_htx: Add HTX analyzers and use it when the mux H1 is used
+ - MEDIUM: mux-h1: Add dummy mux to handle HTTP/1.1 connections
+ - MEDIUM: mux-h1: Add parsing of incoming and ougoing HTTP messages
+ - MAJOR: mux-h1/proto_htx: Handle keep-alive connections in the mux
+ - MEDIUM: mux-h1: Add support of the kernel TCP splicing to forward data
+ - MEDIUM: htx: Add API to deal with the internal representation of HTTP messages
+ - MINOR: http_htx: Add functions to manipulate HTX messages in http_htx.c
+ - MINOR: proto_htx: Add some functions to handle HTX messages
+ - MAJOR: mux-h1/proto_htx: Switch mux-h1 and HTX analyzers on the HTX representation
+ - MINOR: http_htx: Add functions to replace part of the start-line
+ - MINOR: http_htx: Add functions to retrieve a specific occurrence of a header
+ - MINOR: proto_htx: Rewrite htx_apply_redirect_rule to handle HTX messages
+ - MINOR: proto_htx: Add the internal function htx_del_hdr_value
+ - MINOR: proto_htx: Add the internal function htx_fmt_res_line
+ - MINOR: proto_htx: Add functions htx_transform_header and htx_transform_header_str
+ - MINOR: proto_htx: Add functions htx_req_replace_stline and htx_res_set_status
+ - MINOR: proto_htx: Add function to build and send HTTP 103 responses
+ - MINOR: proto_htx: Add functions htx_req_get_intercept_rule and htx_res_get_intercept_rule
+ - MINOR: proto_htx: Add functions to apply req* and rsp* rules on HTX messages
+ - MINOR: proto_htx: Add functions to manage cookies on HTX messages
+ - MINOR: proto_htx: Add functions to check the cacheability of HTX messages
+ - MINOR: proto_htx: Add functions htx_send_name_header
+ - MINOR: proto_htx: Add functions htx_perform_server_redirect
+ - MINOR: proto_htx: Add functions to handle the stats applet
+ - MEDIUM: proto_htx: Adapt htx_process_req_common to handle HTX messages
+ - MEDIUM: proto_htx: Adapt htx_process_request to handle HTX messages
+ - MINOR: proto_htx: Adapt htx_process_tarpit to handle HTX messages
+ - MEDIUM: proto_htx: Adapt htx_wait_for_request_body to handle HTX messages
+ - MEDIUM: proto_htx: Adapt htx_process_res_common to handle HTX messages
+ - MINOR: http_fetch: Add smp_prefetch_htx
+ - MEDIUM: http_fetch: Adapt all fetches to handle HTX messages
+ - MEDIUM: mux-h1: Wait for connection establishment before consuming channel's data
+ - MINOR: stats/htx: Adapt the stats applet to handle HTX messages
+ - MINOR: stream: Don't reset sov value with HTX messages
+ - MEDIUM: mux-h1: Handle errors and timeouts in the stream
+ - MINOR: filters/htx: Forbid filters when the HTX is enabled on a proxy
+ - MINOR: lua/htx: Forbid lua usage when the HTX is enabled on a proxy
+ - CLEANUP: Fix some typos in the haproxy subsystem
+ - CLEANUP: Fix typos in the dns subsystem
+ - CLEANUP: Fix typos in the pattern subsystem
+ - CLEANUP: fix 2 typos in the xxhash subsystem
+ - CLEANUP: fix a few typos in the comments of the server subsystem
+ - CLEANUP: fix a misspell in tests/filltab25.c
+ - CLEANUP: fix a typo found in the stream subsystem
+ - CLEANUP: fix typos in comments in ebtree
+ - CLEANUP: fix typos in reg-tests
+ - CLEANUP: fix typos in the comments of the vars subsystem
+ - CLEANUP: fix typos in the hlua_fcn subsystem
+ - CLEANUP: fix typos in the proto_http subsystem
+ - CLEANUP: fix typos in the proxy subsystem
+ - CLEANUP: fix typos in the ssl_sock subsystem
+ - DOC: Fix typos in different subsections of the documentation
+ - DOC: fix a few typos in the documentation
+ - MINOR: Fix an error message thrown when we run out of memory
+ - MINOR: Fix typos in error messages in the proxy subsystem
+ - MINOR: fix typos in the examples files
+ - CLEANUP: Fix a typo in the stats subsystem
+ - CLEANUP: Fix typos in the acl subsystem
+ - CLEANUP: Fix typos in the cache subsystem
+ - CLEANUP: Fix typos in the cfgparse subsystem
+ - CLEANUP: Fix typos in the filters subsystem
+ - CLEANUP: Fix typos in the http subsystem
+ - CLEANUP: Fix typos in the log subsystem
+ - CLEANUP: Fix typos in the peers subsystem
+ - CLEANUP: Fix typos in the regex subsystem
+ - CLEANUP: Fix typos in the sample subsystem
+ - CLEANUP: Fix typos in the spoe subsystem
+ - CLEANUP: Fix typos in the standard subsystem
+ - CLEANUP: Fix typos in the stick_table subsystem
+ - CLEANUP: Fix typos in the task subsystem
+ - MINOR: Fix typo in error message in the standard subsystem
+ - CLEANUP: fix typos in the comments of hlua
+ - MINOR: Fix typo in the error 500 output of hlua
+ - MINOR: Fix a typo in a warning message in the spoe subsystem
+
+2018/11/11 : 1.9-dev6
+ - BUG/MEDIUM: tools: fix direction of my_ffsl()
+ - BUG/MINOR: cli: forward the whole command on master CLI
+ - BUG/MEDIUM: auth/threads: use of crypt() is not thread-safe
+ - MINOR: compat: automatically detect support for crypt_r()
+ - MEDIUM: auth/threads: make use of crypt_r() on systems supporting it
+ - DOC: split the http-request actions in their own section
+ - DOC: split the http-response actions in their own section
+ - BUG/MAJOR: stream-int: don't call si_cs_recv() in stream_int_chk_rcv_conn()
+ - BUG/MINOR: tasks: make sure wakeup events are properly reported to subscribers
+ - MINOR: stats: report the number of active jobs and listeners in "show info"
+ - MINOR: stats: report the number of active peers in "show info"
+ - MINOR: stats: report the number of currently connected peers
+ - MINOR: cli: show the number of reload in 'show proc'
+ - MINOR: cli: can't connect to the target CLI
+ - MEDIUM: mworker: does not create the CLI proxy when no listener
+ - MINOR: mworker: displays more information when leaving
+ - MEDIUM: mworker: exit with the incriminated exit code
+ - MINOR: mworker: displays a message when a worker is forked
+ - MEDIUM: mworker: leave when the master die
+ - CLEANUP: stream-int: retro-document si_cs_io_cb()
+ - BUG/MEDIUM: mworker: does not abort() in mworker_pipe_register()
+ - BUG/MEDIUM: stream-int: don't wake up for nothing during SI_ST_CON
+ - BUG/MEDIUM: cli: crash when trying to access a worker
+ - DOC: restore note about "independant" typo
+ - MEDIUM: stream: implement stream_buf_available()
+ - MEDIUM: appctx: check for allocation attempts in buffer allocation callbacks
+ - MINOR: stream-int: rename si_applet_{want|stop|cant}_{get|put}
+ - MINOR: stream-int: add si_done_{get,put} to indicate that we won't do it anymore
+ - MINOR: stream-int: use si_cant_put() instead of setting SI_FL_WAIT_ROOM
+ - MINOR: stream-int: make use of si_done_{get,put}() in shut{w,r}
+ - MINOR: stream-int: make it clear that si_ops cannot be null
+ - MEDIUM: stream-int: temporarily make si_chk_rcv() take care of SI_FL_WAIT_ROOM
+ - MINOR: stream-int: factor the SI_ST_EST state test into si_chk_rcv()
+ - MEDIUM: stream-int: make SI_FL_WANT_PUT reflect CF_DONT_READ
+ - MEDIUM: stream-int: always call si_chk_rcv() when we make room in the buffer
+ - MEDIUM: stream-int: make si_chk_rcv() check that SI_FL_WAIT_ROOM is cleared
+ - MINOR: stream-int: replace si_update() with si_update_both()
+ - MEDIUM: stream-int: make stream_int_update() aware of the lower layers
+ - CLEANUP: stream-int: remove the now unused si->update() function
+ - MEDIUM: stream-int: Rely only on SI_FL_WAIT_ROOM to stop data receipt
+ - MEDIUM: stream-int: Try to read data even if channel's buffer seems to be full
+ - BUG/MINOR: config: better detect the presence of the h2 pattern in npn/alpn
+
+2018/10/28 : 1.9-dev5
+ - BUILD: Makefile: add the new ERR variable to force -Werror
+ - MINOR: freq_ctr: add swrate_add_scaled() to work with large samples
+ - MINOR: stream_interface: Avoid calling si_cs_send/recv if not needed.
+ - CLEANUP: http: Remove the unused function http_find_header
+ - MINOR: h1: Export some functions parsing the value of some HTTP headers
+ - BUG/MEDIUM: stream-int: don't set SI_FL_WAIT_ROOM on CF_READ_DONTWAIT
+ - MINOR: proxy: add a new option "http-use-htx"
+ - BUG/MEDIUM: pools: fix the minimum allocation size
+ - MINOR: shctx: Shared objects block by block allocation.
+ - MINOR: cache: Larger HTTP objects caching.
+ - MINOR: shctx: Add a maximum object size parameter.
+ - MINOR: cache: Add "max-object-size" option.
+ - DOC: Update about the cache support for big objects.
+ - BUG/MINOR: cache: Crashes with "total-max-size" > 2047(MB).
+ - BUG/MINOR: cache: Wrong usage of shctx_init().
+ - BUG/MINOR: ssl: Wrong usage of shctx_init().
+ - MINOR: cache: Avoid usage of atoi() when parsing "max-object-size".
+ - MINOR: shctx: Change max. object size type to unsigned int.
+ - DOC: cache: Missing information about "total-max-size" and "max-object-size"
+ - CLEANUP: tools: fix misleading comment above function LIM2A
+ - MEDIUM: channel: merge back flags CF_WRITE_PARTIAL and CF_WRITE_EVENT
+ - BUG/MINOR: only mark connections private if NTLM is detected
+ - BUG/MINOR: only auto-prefer last server if lb-alg is non-deterministic
+ - MINOR: stream: don't prune variables if the list is empty
+ - MINOR: stream-int: add si_alloc_ibuf() to ease input buffer allocation
+ - MEDIUM: stream-int: replace channel_alloc_buffer() with si_alloc_ibuf() everywhere
+ - MEDIUM: stream: always call si_cs_recv() after a failed buffer allocation
+ - MEDIUM: stream: don't try to send first in process_stream()
+ - MEDIUM: stream-int: make si_update() synchronize flag changes before the I/O
+ - MEDIUM: stream-int: call si_cs_process() in stream_int_update_conn
+ - MINOR: stream-int: don't needlessly call tasklet_wakeup() in stream_int_chk_snd_conn()
+ - MINOR: stream-int: make stream_int_notify() not wake the tasklet up
+ - MINOR: stream-int: don't needlessly call si_cs_send() in si_cs_process()
+ - MINOR: mworker: number of reload in the life of a worker
+ - MEDIUM: mworker: each worker socketpair is a CLI listener
+ - REORG: mworker: move struct mworker_proc to global.h
+ - MINOR: server: export new_server() function
+ - MEDIUM: mworker: move proc_list gen before proxies startup
+ - MEDIUM: mworker: add proc_list in global.h
+ - MEDIUM: mworker: proxy for the master CLI
+ - MEDIUM: mworker: create CLI listeners from argv[]
+ - MEDIUM: cli: disable some keywords in the master
+ - MEDIUM: mworker: find the server ptr using a CLI prefix
+ - MEDIUM: cli: 'show proc' displays processus
+ - MEDIUM: cli: implement 'mode cli' proxy analyzers
+ - MINOR: cli: displays sockpair@ in "show cli sockets"
+ - MEDIUM: cli: enable "show cli sockets" for the master
+ - MINOR: cli: put @master @<relative pid> @!<pid> in the help
+ - MEDIUM: listeners: set O_CLOEXEC on the accepted FDs
+ - MEDIUM: mworker: stop the master proxy in the workers
+ - MEDIUM: channel: reorder the channel analyzers for the cli
+ - MEDIUM: cli: write a prompt for the CLI proxy of the master
+ - MINOR: cli: helper to write an response message and close
+ - MINOR: cache: Add "Age" header.
+ - REGTEST: make the IP+port logging test more reliable
+ - BUG/MINOR: memory: make the thread-local cache allocator set the debugging link
+ - BUG/MAJOR: http: http_txn_get_path() may deference an inexisting buffer
+ - BUG/MINOR: backend: assign the wait list after the error check
+
+2018/10/21 : 1.9-dev4
+ - BUILD: Allow configuration of pcre-config path
+ - DOC: clarify force-private-cache is an option
+ - BUG/MINOR: connection: avoid null pointer dereference in send-proxy-v2
+ - REORG: http: move the code to different files
+ - REORG: http: move HTTP rules parsing to http_rules.c
+ - CLEANUP: http: remove some leftovers from recent cleanups
+ - BUILD: Makefile: add a "make opts" target to simply show the build options
+ - BUILD: Makefile: speed up compiler options detection
+ - BUG/MINOR: backend: check that the mux installed properly
+ - BUG/MEDIUM: h2: check that the connection is still valid at the end of init()
+ - BUG/MEDIUM: h2: make h2_stream_new() return an error on memory allocation failure
+ - REGTEST/MINOR: compatibility: use unix@ instead of abns@ sockets
+ - MINOR: ssl: cleanup old openssl API call
+ - MINOR: ssl: generate-certificates for BoringSSL
+ - BUG/MEDIUM: buffers: Make sure we don't wrap in ci_insert_line2/b_rep_blk.
+ - MEDIUM: ssl: add support for ciphersuites option for TLSv1.3
+ - CLEANUP: haproxy: Remove unused variable
+ - CLEANUP: h1: Fix debug warnings for h1 headers
+ - CLEANUP: stick-tables: Remove unneeded double (()) around conditional clause
+ - MEDIUM: task: perform a single tree lookup per run queue batch
+ - BUG/MEDIUM: Cur/CumSslConns counters not threadsafe.
+ - BUG/MINOR: threads: move declaration of capabilities to config.h
+ - OPTIM: tools: optimize my_ffsl() for x86_64
+ - BUG/MINOR: h2: null-deref
+ - BUG/MINOR: checks: queues null-deref
+ - MINOR: connections: Introduce an unsubscribe method.
+ - MEDIUM: connections: Change struct wait_list to wait_event.
+ - BUG/MEDIUM: h2: Make sure we're not in the send list on flow control.
+ - BUG/MEDIUM: mworker: segfault receiving SIGUSR1 followed by SIGTERM.
+ - BUG/MEDIUM: stream: Make sure to unsubscribe before si_release_endpoint.
+ - MINOR: http: Move comment about some HTTP macros in the right header file
+ - MINOR: stats: Add missing include
+ - MINOR: http: Export some functions and do cleanup to prepare HTTP refactoring
+ - MEDIUM: http: Ignore http-pretend-keepalive option on frontend
+ - MEDIUM: http: Ignore http-tunnel option on backend
+ - MINOR: http: Use same flag for httpclose and forceclose options
+ - MINOR: h1: Add EOH marker during headers parsing
+ - MINOR: conn-stream: Add CL_FL_NOT_FIRST flag
+ - MINOR: h1: Change the union h1_sl to use indirect strings to store infos
+ - MINOR: h1: Add the flag H1_MF_NO_PHDR to not add pseudo-headers during parsing
+ - MINOR: log: make sess_log() support sess=NULL
+ - MINOR: chunk: add chunk_cpy() and chunk_cat()
+ - MEDIUM: h2: stop relying on H2_SS_IDLE / H2_SS_CLOSED
+ - CLEANUP: h2: rename h2c_snd_settings() to h2c_send_settings()
+ - MINOR: h2: don't try to send data before preface
+ - MINOR: h2: unify the mux init function
+ - MINOR: h2: retrieve the front proxy from the caller instead of the session
+ - MINOR: h2: split h2c_stream_new() into h2s_new() + h2c_frt_stream_new()
+ - MINOR: h2: add a new flag to quickly distinguish front vs back connection
+ - BUG/MEDIUM: mworker: don't poll on LI_O_INHERITED listeners
+ - BUG/MEDIUM: stream: don't crash on out-of-memory
+ - BUILD: compiler: add a new statement "__unreachable()"
+ - BUILD: lua: silence some compiler warnings about potential null derefs
+ - BUILD: ssl: fix null-deref warning in ssl_fc_cipherlist_str sample fetch
+ - BUILD: ssl: fix another null-deref warning in ssl_sock_switchctx_cbk()
+ - BUILD: stick-table: make sure not to fail on task_new() during initialization
+ - BUILD: peers: check allocation error during peers_init_sync()
+ - MINOR: tools: add a new function atleast2() to test masks for more than 1 bit
+ - MINOR: config: use atleast2() instead of my_popcountl() where relevant
+ - MEDIUM: fd/threads: only grab the fd's lock if the FD has more than one thread
+ - MAJOR: tasks: create per-thread wait queues
+ - OPTIM: tasks: group all tree roots per cache line
+ - DOC: Fix a few typos
+ - MINOR: pools: allocate most memory pools from an array
+ - MINOR: pools: split pool_free() in the lockfree variant
+ - MEDIUM: pools: implement a thread-local cache for pool entries
+ - BUG/MEDIUM: threads: fix thread_release() at the end of the rendez-vous point
+ - Revert "BUILD: lua: silence some compiler warnings about potential null derefs"
+ - BUILD: lua: silence some compiler warnings about potential null derefs (#2)
+ - MINOR: lua: all functions calling lua_yieldk() may return
+ - BUILD: lua: silence some compiler warnings after WILL_LJMP
+ - BUILD: Makefile: silence an option conflict warning with clang
+ - MINOR: server: Use memcpy() instead of strncpy().
+ - CLEANUP: state-file: make the path concatenation code a bit more consistent
+ - MINOR: build: Disable -Wstringop-overflow.
+ - MINOR: cfgparse: Write 130 as 128 as 0x82 and 0x80.
+ - MINOR: peers: use defines instead of enums to appease clang.
+ - DOC: fix reference to map files in MAINTAINERS
+ - MINOR: fd: centralize poll timeout computation in compute_poll_timeout()
+ - MINOR: poller: move time and date computation out of the pollers
+ - BUILD: memory: fix pointer declaration for atomic CAS
+ - BUILD: Makefile: add USE_RT to pass -lrt for clock_gettime() and friends
+ - MINOR: time: add now_mono_time() and now_cpu_time()
+ - MEDIUM: time: measure the time stolen by other threads
+ - BUILD: memory: fix free_list pointer declaration again for atomic CAS
+ - BUILD: compiler: rename __unreachable() to my_unreachable()
+ - BUG/MEDIUM: pools: Fix the usage of mmap()) with DEBUG_UAF.
+ - BUILD: memory: fix free_list pointer declaration again for atomic CAS
+ - BUG/MEDIUM: h2: Close connection if no stream is left an GOAWAY was sent.
+ - BUG/MEDIUM: connections: Remove subscription if going in idle mode.
+ - BUG/MEDIUM: stream: Make sure polling is right on retry.
+ - MINOR: h2: Make sure to return 1 in h2_recv() when needed.
+ - MEDIUM: connections: Don't directly mess with the polling from the upper layers.
+ - MINOR: streams: Call tasklet_free() after si_release_endpoint().
+ - MINOR: connection: Add a SUB_CALL_UNSUBSCRIBE event.
+ - MINOR: h2: Don't run tasks that are waiting to send if mux in full.
+ - MINOR: ebtree: save 8 bytes in struct eb32sc_node
+
+2018/09/29 : 1.9-dev3
+ - BUG/MINOR: h1: don't consider the status for each header
+ - MINOR: h1: report in the h1m struct if the HTTP version is 1.1 or above
+ - MINOR: h1: parse the Connection header field
+ - DOC: Fix typos in lua documentation
+ - MINOR: h1: Add H1_MF_XFER_LEN flag
+ - MINOR: http: add http_hdr_del() to remove a header from a list
+ - MINOR: h1: add headers to the list after controls, not before
+ - MEDIUM: h1: better handle transfer-encoding vs content-length
+ - MEDIUM: h1: deduplicate the content-length header
+ - BUG/MEDIUM: patterns: fix possible double free when reloading a pattern list
+ - BUG/MEDIUM: h1: Really skip all updates when incomplete messages are parsed
+ - CLEANUP/CONTRIB: hpack: remove some h1 build warnings
+ - BUG/MINOR: tools: fix set_net_port() / set_host_port() on IPv4
+ - BUG/MINOR: cli: make sure the "getsock" command is only called on connections
+ - MINOR: stktable: provide an unchecked version of stktable_data_ptr()
+ - MINOR: stream-int: make si_appctx() never fail
+ - BUILD: ssl_sock: remove build warnings on potential null-derefs
+ - BUILD: stats: remove build warnings on potential null-derefs
+ - BUILD: stream: address null-deref build warnings at -Wextra
+ - BUILD: http: address a couple of null-deref warnings at -Wextra
+ - BUILD: log: silent build warnings due to unchecked __objt_{server,applet}
+ - BUILD: dns: fix null-deref build warning at -Wextra
+ - BUILD: checks: silence a null-deref build warning at -Wextra
+ - BUILD: connection: silence a couple of null-deref build warnings at -Wextra
+ - BUILD: backend: fix 3 build warnings related to null-deref at -Wextra
+ - BUILD: sockpair: silence a build warning at -Wextra
+ - BUILD: build with -Wextra and sort out certain warnings
+ - BUG/CRITICAL: hpack: fix improper sign check on the header index value
+ - BUG/MEDIUM: http: Don't parse chunked body if there is no input data
+ - DOC: Update configuration doc about the maximum number of stick counters.
+ - BUG/MEDIUM: process_stream: Don't use si_cs_io_cb() in process_stream().
+ - MINOR: h2/stream_interface: Reintroduce te wake() method.
+ - BUG/MEDIUM: h2: Wake the task instead of calling h2_recv()/h2_process().
+ - BUG/MEDIUM: process_stream(): Don't wake the task if no new data was received.
+ - MEDIUM: lua: Add stick table support for Lua.
+
+2018/09/12 : 1.9-dev2
+ - BUG/MINOR: buffers: Fix b_slow_realign when a buffer is realign without output
+ - BUG/MEDIUM: threads: fix the no-thread case after the change to the sync point
+ - BUG/MEDIUM: servers: check the queues once enabling a server
+ - BUG/MEDIUM: queue: prevent a backup server from draining the proxy's connections
+ - MEDIUM: mux: Remove const on the buffer in mux->snd_buf()
+ - CLEANUP: backend: Move mux install to call it at only one place
+ - MINOR: conn_stream: add an tx buffer to the conn_stream
+ - MINOR: conn_stream: add cs_send() as a default snd_buf() function
+ - MINOR: backend: Try to find the best mux for outgoing connections
+ - MEDIUM: backend: don't rely on mux_pt_ops in connect_server()
+ - MINOR: mux: Add info about the supported side in alpn_mux_list structure
+ - MINOR: mux: Unlink ALPN and multiplexers to rather speak of mux protocols
+ - MINOR: mux: Print the list of existing mux protocols during HA startup
+ - MEDIUM: checks: use the new rendez-vous point to spread check result
+ - MEDIUM: haproxy: don't use sync_poll_loop() anymore in the main loop
+ - MINOR: threads: remove the previous synchronization point
+ - MAJOR: server: make server state changes synchronous again
+ - CLEANUP: server: remove the update list and the update lock
+ - BUG/MINOR: threads: Remove the unexisting lock label "UPDATED_SERVERS_LOCK"
+ - BUG/MEDIUM: stream_int: Don't check CO_FL_SOCK_RD_SH flag to trigger cs receive
+ - MINOR: mux: Change get_mux_proto to get an ist as parameter
+ - MINOR: mux: Improve the message with the list of existing mux protocols
+ - MINOR: mux/frontend: Add 'proto' keyword to force the mux protocol
+ - MINOR: mux/server: Add 'proto' keyword to force the multiplexer's protocol
+ - MEDIUM: mux: Use the mux protocol specified on bind/server lines
+ - BUG/MEDIUM: connection/mux: take care of serverless proxies
+ - MINOR: queue: make sure the pendconn is released before logging
+ - MINOR: stream: rename {srv,prx}_queue_size to *_queue_pos
+ - MINOR: queue: store the queue index in the stream when enqueuing
+ - MINOR: queue: replace the linked list with a tree
+ - MEDIUM: add set-priority-class and set-priority-offset
+ - MEDIUM: queue: adjust position based on priority-class and priority-offset
+ - DOC: update the roadmap about priority queues
+ - BUG/MINOR: ssl: empty connections reported as errors.
+ - MINOR: connections: Make rcv_buf mandatory and nuke cs_recv().
+ - MINOR: connections: Move rxbuf from the conn_stream to the h2s.
+ - MINOR: connections: Get rid of txbuf.
+ - MINOR: tasks: Allow tasklet_wakeup() to wakeup a task.
+ - MINOR: connections/mux: Add the wait reason(s) to wait_list.
+ - MINOR: stream_interface: Don't use si_cs_send() as a task handler.
+ - MINOR: stream_interface: Give stream_interface its own wait_list.
+ - MINOR: mux_h2: Don't use h2_send() as a callback.
+ - MINOR: checks: Add event_srv_chk_io().
+ - BUG/MEDIUM: tasks: Don't insert in the global rqueue if nbthread == 1
+ - BUG/MEDIUM: sessions: Don't use t->state.
+ - BUG/MEDIUM: ssl: fix missing error loading a keytype cert from a bundle.
+ - BUG/MEDIUM: ssl: loading dh param from certifile causes unpredictable error.
+ - BUG/MINOR: map: fix map_regm with backref
+ - DOC: dns: explain set server ... fqdn requires resolver
+ - DOC: add documentation for prio_class and prio_offset sample fetches.
+ - DOC: ssl: Use consistent naming for TLS protocols
+ - DOC: update the layering design notes
+ - MINOR: tasks: Don't special-case when nbthreads == 1
+ - MINOR: fd cache: And the thread_mask with all_threads_mask.
+ - BUG/MEDIUM: lua: socket timeouts are not applied
+ - BUG/MINOR: lua: fix extra 500ms added to socket timeouts
+ - BUG/MEDIUM: server: update our local state before propagating changes
+ - BUG/MEDIUM: cli/threads: protect all "proxy" commands against concurrent updates
+ - DOC: server/threads: document which functions need to be called with/without locks
+ - BUG/MEDIUM: cli/threads: protect some server commands against concurrent operations
+ - BUG/MEDIUM: streams: Don't forget to remove the si from the wait list.
+ - BUG/MEDIUM: tasklets: Add the thread as active when waking a tasklet.
+ - BUG/MEDIUM: stream-int: Check if the conn_stream exist in si_cs_io_cb.
+ - BUG/MEDIUM: H2: Activate polling after successful h2_snd_buf().
+ - BUG/MEDIUM: stream_interface: Call the wake callback after sending.
+ - BUG/MAJOR: queue/threads: make pendconn_redistribute not lock the server
+ - BUG/MEDIUM: connection: don't forget to always delete the list's head
+ - BUG/MEDIUM: lb/threads: always properly lock LB algorithms on maintenance operations
+ - BUG/MEDIUM: check/threads: do not involve the rendez-vous point for status updates
+ - BUG/MINOR: chunks: do not store -1 into chunk_printf() in case of error
+ - BUG/MEDIUM: http: don't store exp_replace() result in the trash's length
+ - BUG/MEDIUM: http: don't store url_decode() result in the samples's length
+ - BUG/MEDIUM: dns: don't store dns_build_query() result in the trash's length
+ - BUG/MEDIUM: map: don't store exp_replace() result in the trash's length
+ - BUG/MEDIUM: connection: don't store recv() result into trash.data
+ - BUG/MEDIUM: cli/ssl: don't store base64dec() result in the trash's length
+ - MINOR: chunk: remove impossible tests on negative chunk->data
+ - MINOR: sample: remove impossible tests on negative smp->data.u.str.data
+ - DOC: Fix spelling error in configuration doc
+ - REGTEST/MINOR: Missing mandatory "ignore_unknown_macro".
+ - REGTEST/MINOR: Add a new class of regression testing files.
+ - BUG/MEDIUM: unix: provide a ->drain() function
+ - MINOR: connection: make conn_sock_drain() work for all socket families
+ - BUG/MINOR: lua: Bad HTTP client request duration.
+ - REGEST/MINOR: Add reg testing files.
+ - BUG/MEDIUM: mux_pt: dereference the connection with care in mux_pt_wake()
+ - REGTEST/MINOR: Add a reg testing file for b406b87 commit.
+ - BUG/MEDIUM: lua: reset lua transaction between http requests
+ - MINOR: add be_conn_free sample fetch
+ - MINOR: Add srv_conn_free sample fetch
+ - BUG/MEDIUM: hlua: Make sure we drain the output buffer when done.
+ - MINOR: checks: Call wake_srv_chk() when we can finally send data.
+ - BUG/MEDIUM: stream_interface: try to call si_cs_send() earlier.
+ - BUG/MAJOR: thread: lua: Wrong SSL context initialization.
+ - REGTEST/MINOR: Add a reg testing file for 3e60b11.
+ - BUG/MEDIUM: hlua: Don't call RESET_SAFE_LJMP if SET_SAFE_LJMP returns 0.
+ - REGTEST/MINOR: lua: Add reg testing files for 70d318c.
+ - BUG/MEDIUM: dns/server: fix incomatibility between SRV resolution and server state file
+ - BUG/MEDIUM: ECC cert should work with TLS < v1.2 and openssl >= 1.1.1
+ - MINOR: tools: make date2str_log() take some consts
+ - MINOR: thread: implement HA_ATOMIC_XADD()
+ - BUG/MINOR: stream: use atomic increments for the request counter
+ - BUG/MEDIUM: session: fix reporting of handshake processing time in the logs
+ - BUG/MEDIUM: h2: fix risk of memory leak on malformated wrapped frames
+ - BUG/MAJOR: buffer: fix incorrect check in __b_putblk()
+ - MINOR: log: move the log code to sess_build_logline() to add extra arguments
+ - MINOR: log: make the backend fall back to the frontend when there's no stream
+ - MINOR: log: make sess_build_logline() not dereference a NULL stream for txn
+ - MINOR: log: don't unconditionally pick log info from s->logs
+ - CLEANUP: log: make the low_level lf_{ip,port,text,text_len} functions take consts
+ - MINOR: log: keep a copy of the backend connection early in sess_build_logline()
+ - MINOR: log: do not dereference a null stream to access captures
+ - MINOR: log: be sure not to dereference a null stream for a target
+ - MINOR: log: don't check the stream-int's conn_retries if the stream is NULL
+ - MINOR: log: use NULL for the unique_id if there is no stream
+ - MINOR: log: keep a copy of s->flags early to avoid a dereference
+ - MINOR: log: use zero as the request counter if there is no stream
+ - MEDIUM: log: make sess_build_logline() support being called with no stream
+ - MINOR: log: provide a function to emit a log for a session
+ - MEDIUM: h2: produce some logs on early errors that prevent streams from being created
+ - BUG/MINOR: h1: fix buffer shift after realignment
+ - MINOR: connection: make the initialization more consistent
+ - MINOR: connection: add new function conn_get_proxy()
+ - MINOR: connection: add new function conn_is_back()
+ - MINOR: log: One const should be enough.
+ - BUG/MINOR: dns: check and link servers' resolvers right after config parsing
+ - BUG/MINOR: http/threads: atomically increment the error snapshot ID
+ - MINOR: snapshot: restart on the event ID and not the stream ID
+ - MINOR: snapshot: split the error snapshots into common and proto-specific parts
+ - MEDIUM: snapshot: start to reorder the HTTP snapshot output a little bit
+ - MEDIUM: snapshot: implement a show() callback and use it for HTTP
+ - MINOR: proxy: add a new generic proxy_capture_error()
+ - MINOR: http: make the HTTP error capture rely on the generic proxy code
+ - MINOR: http: remove the pointer to the error snapshot in http_capture_bad_message()
+ - REORG: cli: move the "show errors" handler from http to proxy
+ - BUG/MEDIUM: snapshot: take the proxy's lock while dumping errors
+ - MEDIUM: snapshots: dynamically allocate the snapshots
+ - MEDIUM: snapshot: merge the captured data after the descriptor
+ - MEDIUM: mworker: remove register/unregister signal functions
+ - MEDIUM: mworker: use the haproxy poll loop
+ - BUG/MINOR: mworker: no need to stop peers for each proxy
+ - MINOR: mworker: mworker_cleanlisteners() delete the listeners
+ - MEDIUM: mworker: block SIGCHLD until the master is ready
+ - MEDIUM: mworker: never block SIG{TERM,INT} during reload
+ - MEDIUM: startup: unify signal init between daemon and mworker mode
+ - MINOR: mworker: don't deinit the poller fd when in wait mode
+ - MEDIUM: mworker: master wait mode use its own initialization
+ - MEDIUM: mworker: replace the master pipe by socketpairs
+ - MINOR: mworker: keep and clean the listeners
+ - MEDIUM: threads: close the thread-waker pipe during deinit
+ - MEDIUM: mworker: call per_thread deinit in mworker_reload()
+ - REORG: http: move the HTTP semantics definitions to http.h/http.c
+ - REORG: http: move http_get_path() to http.c
+ - REORG: http: move error codes production and processing to http.c
+ - REORG: http: move the log encoding tables to log.c
+ - REORG: http: move some header value processing functions to http.c
+ - BUG/MAJOR: kqueue: Don't reset the changes number by accident.
+ - MEDIUM: protocol: use a custom AF_MAX to help protocol parser
+ - MEDIUM: protocol: sockpair protocol
+ - TESTS: add a python wrapper for sockpair@
+ - BUG/MINOR: server: Crash when setting FQDN via CLI.
+ - BUG/MINOR: h2: report asynchronous end of stream on closed connections
+ - BUILD: fix build without thread
+ - BUG/MEDIUM: tasks: Don't forget to decrement task_list_size in tasklet_free().
+ - MEDIUM: connections: Don't reset the polling flags in conn_fd_handler().
+ - MEDIUM: connections/mux: Add a recv and a send+recv wait list.
+ - MEDIUM: connections: Get rid of the recv() method.
+ - MINOR: h2: Let user of h2_recv() and h2_send() know xfer has been done.
+ - MEDIUM: h2: always subscribe to receive if allowed.
+ - MEDIUM: h2: Don't use a wake() method anymore.
+ - MEDIUM: stream_interface: Make recv() subscribe when more data is needed.
+ - MINOR: connections: Add a "handle" field to wait_list.
+ - MEDIUM: mux_h2: Revamp the send path when blocking.
+ - MEDIUM: stream_interfaces: Starts receiving from the upper layers.
+ - MINOR: checks: Give checks their own wait_list.
+ - MINOR: conn_streams: Remove wait_list from conn_streams.
+ - REORG: h1: create a new h1m_state
+ - MINOR: h1: add the restart offsets into struct h1m
+ - MINOR: h1: remove the unused states from h1m_state
+ - MINOR: h1: provide a distinct init() function for request and response
+ - MINOR: h1: add a message flag to indicate that a message carries a response
+ - MINOR: h2: make sure h1m->err_pos field is correct on chunk error
+ - MINOR: h1: properly pre-initialize err_pos to -2
+ - MINOR: mux_h2: replace the req,res h1 messages with a single h1 message
+ - MINOR: h2: pre-initialize h1m->err_pos to -1 on the output path
+ - MEDIUM: h1: consider err_pos before deciding to accept a header name or not
+ - MEDIUM: h1: make the parser support a pointer to a start line
+ - MEDIUM: h1: let the caller pass the initial parser's state
+ - MINOR: h1: make the message parser support a null <hdr> argument
+ - MEDIUM: h1: support partial message parsing
+ - MEDIUM: h1: remove the useless H1_MSG_BODY state
+ - MINOR: h2: store the HTTP status into the H2S, not the H1M
+ - MINOR: h1: remove the HTTP status from the H1M struct
+ - MEDIUM: h1: implement the request parser as well
+ - MINOR: h1: add H1_MF_TOLOWER to decide when to turn header names to lower case
+ - MINOR: connection: pass the proxy when creating a connection
+ - BUG/MEDIUM: h2: Don't forget to empty the wait lists on destroy.
+ - BUG/MEDIUM: h2: Don't forget to set recv_wait_list to NULL in h2_detach.
+ - BUG/MAJOR: h2: reset the parser's state on mux buffer full
+
+2018/08/02 : 1.9-dev1
+ - BUG/MEDIUM: kqueue: Don't bother closing the kqueue after fork.
+ - DOC: cache: update sections and fix some typos
+ - BUILD/MINOR: deviceatlas: enable thread support
+ - BUG/MEDIUM: tcp-check: Don't lock the server in tcpcheck_main
+ - BUG/MEDIUM: ssl: don't allocate shctx several time
+ - BUG/MEDIUM: cache: bad computation of the remaining size
+ - BUILD: checks: don't include server.h
+ - BUG/MEDIUM: stream: fix session leak on applet-initiated connections
+ - BUILD/MINOR: haproxy : FreeBSD/cpu affinity needs pthread_np header
+ - BUILD/MINOR: Makefile : enabling USE_CPU_AFFINITY
+ - BUG/MINOR: ssl: CO_FL_EARLY_DATA removal is managed by stream
+ - BUG/MEDIUM: threads/peers: decrement, not increment jobs on quitting
+ - BUG/MEDIUM: h2: don't report an error after parsing a 100-continue response
+ - BUG/MEDIUM: peers: fix some track counter rules dont register entries for sync.
+ - BUG/MAJOR: thread/peers: fix deadlock on peers sync.
+ - BUILD/MINOR: haproxy: compiling config cpu parsing handling when needed
+ - MINOR: config: report when "monitor fail" rules are misplaced
+ - BUG/MINOR: mworker: fix validity check for the pipe FDs
+ - BUG/MINOR: mworker: detach from tty when in daemon mode
+ - MINOR: threads: Fix pthread_setaffinity_np on FreeBSD.
+ - BUG/MAJOR: thread: Be sure to request a sync between threads only once at a time
+ - BUILD: Fix LDFLAGS vs. LIBS re linking order in various makefiles
+ - BUG/MEDIUM: checks: Be sure we have a mux if we created a cs.
+ - BUG/MINOR: hpack: fix debugging output of pseudo header names
+ - BUG/MINOR: hpack: must reject huffman literals padded with more than 7 bits
+ - BUG/MINOR: hpack: reject invalid header index
+ - BUG/MINOR: hpack: dynamic table size updates are only allowed before headers
+ - BUG/MAJOR: h2: correctly check the request length when building an H1 request
+ - BUG/MINOR: h2: immediately close if receiving GOAWAY after the last stream
+ - BUG/MINOR: h2: try to abort closed streams as soon as possible
+ - BUG/MINOR: h2: ":path" must not be empty
+ - BUG/MINOR: h2: fix a typo causing PING/ACK to be responded to
+ - BUG/MINOR: h2: the TE header if present may only contain trailers
+ - BUG/MEDIUM: h2: enforce the per-connection stream limit
+ - BUG/MINOR: h2: do not accept SETTINGS_ENABLE_PUSH other than 0 or 1
+ - BUG/MINOR: h2: reject incorrect stream dependencies on HEADERS frame
+ - BUG/MINOR: h2: properly check PRIORITY frames
+ - BUG/MINOR: h2: reject response pseudo-headers from requests
+ - BUG/MEDIUM: h2: remove connection-specific headers from request
+ - BUG/MEDIUM: h2: do not accept upper case letters in request header names
+ - BUG/MINOR: h2: use the H2_F_DATA_* macros for DATA frames
+ - BUG/MINOR: action: Don't check http capture rules when no id is defined
+ - BUG/MAJOR: hpack: don't pretend large headers fit in empty table
+ - BUG/MINOR: ssl: support tune.ssl.cachesize 0 again
+ - BUG/MEDIUM: mworker: also close peers sockets in the master
+ - BUG/MEDIUM: ssl engines: Fix async engines fds were not considered to fix fd limit automatically.
+ - BUG/MEDIUM: checks: a down server going to maint remains definitely stucked on down state.
+ - BUG/MEDIUM: peers: set NOLINGER on the outgoing stream interface
+ - BUG/MEDIUM: h2: fix handling of end of stream again
+ - MINOR: mworker: Update messages referencing exit-on-failure
+ - MINOR: mworker: Improve wording in `void mworker_wait()`
+ - CONTRIB: halog: Add help text for -s switch in halog program
+ - BUG/MEDIUM: email-alert: don't set server check status from a email-alert task
+ - BUG/MEDIUM: threads/vars: Fix deadlock in register_name
+ - MINOR: systemd: remove comment about HAPROXY_STATS_SOCKET
+ - DOC: notifications: add precisions about thread usage
+ - BUG/MEDIUM: lua/notification: memory leak
+ - MINOR: conn_stream: add new flag CS_FL_RCV_MORE to indicate pending data
+ - BUG/MEDIUM: stream-int: always set SI_FL_WAIT_ROOM on CS_FL_RCV_MORE
+ - BUG/MEDIUM: h2: automatically set CS_FL_RCV_MORE when the output buffer is full
+ - BUG/MEDIUM: h2: enable recv polling whenever demuxing is possible
+ - BUG/MEDIUM: h2: work around a connection API limitation
+ - BUG/MEDIUM: h2: debug incoming traffic in h2_wake()
+ - MINOR: h2: store the demux padding length in the h2c struct
+ - BUG/MEDIUM: h2: support uploading partial DATA frames
+ - MINOR: h2: don't demand that a DATA frame is complete before processing it
+ - BUG/MEDIUM: h2: don't switch the state to HREM before end of DATA frame
+ - BUG/MEDIUM: h2: don't close after the first DATA frame on tunnelled responses
+ - BUG/MEDIUM: http: don't disable lingering on requests with tunnelled responses
+ - BUG/MEDIUM: h2: fix stream limit enforcement
+ - BUG/MINOR: stream-int: don't try to receive again after receiving an EOS
+ - MINOR: sample: add len converter
+ - BUG: MAJOR: lb_map: server map calculation broken
+ - BUG: MINOR: http: don't check http-request capture id when len is provided
+ - MINOR: sample: rename the "len" converter to "length"
+ - BUG/MEDIUM: mworker: Set FD_CLOEXEC flag on log fd
+ - DOC/MINOR: intro: typo, wording, formatting fixes
+ - MINOR: netscaler: respect syntax
+ - MINOR: netscaler: remove the use of cip_magic only used once
+ - MINOR: netscaler: rename cip_len to clarify its uage
+ - BUG/MEDIUM: netscaler: use the appropriate IPv6 header size
+ - BUG/MAJOR: netscaler: address truncated CIP header detection
+ - MINOR: netscaler: check in one-shot if buffer is large enough for IP and TCP header
+ - MEDIUM: netscaler: do not analyze original IP packet size
+ - MEDIUM: netscaler: add support for standard NetScaler CIP protocol
+ - MINOR: spoe: add force-set-var option in spoe-agent configuration
+ - CONTRIB: iprange: Fix compiler warning in iprange.c
+ - CONTRIB: halog: Fix compiler warnings in halog.c
+ - BUG/MINOR: h2: properly report a stream error on RST_STREAM
+ - MINOR: mux: add flags to describe a mux's capabilities
+ - MINOR: stream-int: set flag SI_FL_CLEAN_ABRT when mux supports clean aborts
+ - BUG/MEDIUM: stream: don't consider abortonclose on muxes which close cleanly
+ - BUG/MEDIUM: checks: a server passed in maint state was not forced down.
+ - BUG/MEDIUM: lua: fix crash when using bogus mode in register_service()
+ - MINOR: http: adjust the list of supposedly cacheable methods
+ - MINOR: http: update the list of cacheable status codes as per RFC7231
+ - MINOR: http: start to compute the transaction's cacheability from the request
+ - BUG/MINOR: http: do not ignore cache-control: public
+ - BUG/MINOR: http: properly detect max-age=0 and s-maxage=0 in responses
+ - BUG/MINOR: cache: do not force the TX_CACHEABLE flag before checking cacheability
+ - MINOR: http: add a function to check request's cache-control header field
+ - BUG/MEDIUM: cache: do not try to retrieve host-less requests from the cache
+ - BUG/MEDIUM: cache: replace old object on store
+ - BUG/MEDIUM: cache: respect the request cache-control header
+ - BUG/MEDIUM: cache: don't cache the response on no-cache="set-cookie"
+ - BUG/MAJOR: connection: refine the situations where we don't send shutw()
+ - BUG/MEDIUM: checks: properly set servers to stopping state on 404
+ - BUG/MEDIUM: h2: properly handle and report some stream errors
+ - BUG/MEDIUM: h2: improve handling of frames received on closed streams
+ - DOC/MINOR: configuration: typo, formatting fixes
+ - BUG/MEDIUM: h2: ensure we always know the stream before sending a reset
+ - BUG/MEDIUM: mworker: don't close stdio several time
+ - MINOR: don't close stdio anymore
+ - BUG/MEDIUM: http: don't automatically forward request close
+ - BUG/MAJOR: hpack: don't return direct references to the dynamic headers table
+ - MINOR: h2: add a function to report pseudo-header names
+ - DEBUG: hpack: make hpack_dht_dump() expose the output file
+ - DEBUG: hpack: add more traces to the hpack decoder
+ - CONTRIB: hpack: add an hpack decoder
+ - MEDIUM: h2: prepare a graceful shutdown when the frontend is stopped
+ - BUG/MEDIUM: h2: properly handle the END_STREAM flag on empty DATA frames
+ - BUILD: ssl: silence a warning when building without NPN nor ALPN support
+ - CLEANUP: rbtree: remove
+ - BUG/MEDIUM: ssl: cache doesn't release shctx blocks
+ - BUG/MINOR: lua: Fix default value for pattern in Socket.receive
+ - DOC: lua: Fix typos in comments of hlua_socket_receive
+ - BUG/MEDIUM: lua: Fix IPv6 with separate port support for Socket.connect
+ - BUG/MINOR: lua: Fix return value of Socket.settimeout
+ - MINOR: dns: Handle SRV record weight correctly.
+ - BUG/MEDIUM: mworker: execvp failure depending on argv[0]
+ - MINOR: hathreads: add support for gcc < 4.7
+ - BUILD/MINOR: ancient gcc versions atomic fix
+ - BUG/MEDIUM: stream: properly handle client aborts during redispatch
+ - MINOR: spoe: add register-var-names directive in spoe-agent configuration
+ - MINOR: spoe: Don't queue a SPOE context if nothing is sent
+ - DOC: clarify the scope of ssl_fc_is_resumed
+ - CONTRIB: debug: fix a few flags definitions
+ - BUG/MINOR: poll: too large size allocation for FD events
+ - MINOR: sample: add date_us sample
+ - BUG/MEDIUM: peers: fix expire date wasn't updated if entry is modified remotely.
+ - MINOR: servers: Don't report duplicate dyncookies for disabled servers.
+ - MINOR: global/threads: move cpu_map at the end of the global struct
+ - MINOR: threads: add a MAX_THREADS define instead of LONGBITS
+ - MINOR: global: add some global activity counters to help debugging
+ - MINOR: threads/fd: Use a bitfield to know if there are FDs for a thread in the FD cache
+ - BUG/MEDIUM: threads/polling: Use fd_cache_mask instead of fd_cache_num
+ - BUG/MEDIUM: fd: maintain a per-thread update mask
+ - MINOR: fd: add a bitmask to indicate that an FD is known by the poller
+ - BUG/MEDIUM: epoll/threads: use one epoll_fd per thread
+ - BUG/MEDIUM: kqueue/threads: use one kqueue_fd per thread
+ - BUG/MEDIUM: threads/mworker: fix a race on startup
+ - BUG/MINOR: mworker: only write to pidfile if it exists
+ - MINOR: threads: Fix build when we're not compiling with threads.
+ - BUG/MINOR: threads: always set an owner to the thread_sync pipe
+ - BUG/MEDIUM: threads/server: Fix deadlock in srv_set_stopping/srv_set_admin_flag
+ - BUG/MEDIUM: checks: Don't try to release undefined conn_stream when a check is freed
+ - BUG/MINOR: kqueue/threads: Don't forget to close kqueue_fd[tid] on each thread
+ - MINOR: threads: Use __decl_hathreads instead of #ifdef/#endif
+ - BUILD: epoll/threads: Add test on MAX_THREADS to avoid warnings when complied without threads
+ - BUILD: kqueue/threads: Add test on MAX_THREADS to avoid warnings when complied without threads
+ - CLEANUP: sample: Fix comment encoding of sample.c
+ - CLEANUP: sample: Fix outdated comment about sample casts functions
+ - BUG/MINOR: sample: Fix output type of c_ipv62ip
+ - CLEANUP: Fix typo in ARGT_MSK6 comment
+ - CLEANUP: standard: Use len2mask4 in str2mask
+ - MINOR: standard: Add str2mask6 function
+ - MINOR: config: Add support for ARGT_MSK6
+ - MEDIUM: sample: Add IPv6 support to the ipmask converter
+ - MINOR: config: Enable tracking of up to MAX_SESS_STKCTR stick counters.
+ - BUG/MINOR: cli: use global.maxsock and not maxfd to list all FDs
+ - MINOR: polling: make epoll and kqueue not depend on maxfd anymore
+ - MINOR: fd: don't report maxfd in alert messages
+ - MEDIUM: polling: start to move maxfd computation to the pollers
+ - CLEANUP: fd/threads: remove the now unused fdtab_lock
+ - MINOR: poll: more accurately compute the new maxfd in the loop
+ - CLEANUP: fd: remove the unused "new" field
+ - MINOR: fd: move the hap_fd_{clr,set,isset} functions to fd.h
+ - MEDIUM: select: make use of hap_fd_* functions
+ - MEDIUM: fd: use atomic ops for hap_fd_{clr,set} and remove poll_lock
+ - MEDIUM: select: don't use the old FD state anymore
+ - MEDIUM: poll: don't use the old FD state anymore
+ - MINOR: fd: pass the iocb and owner to fd_insert()
+ - BUG/MINOR: threads: Update labels array because of changes in lock_label enum
+ - MINOR: stick-tables: Adds support for new "gpc1" and "gpc1_rate" counters.
+ - BUG/MINOR: epoll/threads: only call epoll_ctl(DEL) on polled FDs
+ - DOC: don't suggest using http-server-close
+ - MINOR: introduce proxy-v2-options for send-proxy-v2
+ - BUG/MEDIUM: spoe: Always try to receive or send the frame to detect shutdowns
+ - BUG/MEDIUM: spoe: Allow producer to read and to forward shutdown on request side
+ - MINOR: spoe: Remove check on min_applets number when a SPOE context is queued
+ - MINOR: spoe: Always link a SPOE context with the applet processing it
+ - MINOR: spoe: Replace sending_rate by a frequency counter
+ - MINOR: spoe: Count the number of frames waiting for an ack for each applet
+ - MEDIUM: spoe: Use an ebtree to manage idle applets
+ - MINOR: spoa_example: Count the number of frames processed by each worker
+ - MINOR: spoe: Add max-waiting-frames directive in spoe-agent configuration
+ - MINOR: init: make stdout unbuffered
+ - MINOR: early data: Don't rely on CO_FL_EARLY_DATA to wake up streams.
+ - MINOR: early data: Never remove the CO_FL_EARLY_DATA flag.
+ - MINOR: compiler: introduce offsetoff().
+ - MINOR: threads: Introduce double-width CAS on x86_64 and arm.
+ - MINOR: threads: add test and set/reset operations
+ - MINOR: pools/threads: Implement lockless memory pools.
+ - MAJOR: fd/threads: Make the fdcache mostly lockless.
+ - MEDIUM: fd/threads: Make sure we don't miss a fd cache entry.
+ - MAJOR: fd: compute the new fd polling state out of the fd lock
+ - MINOR: epoll: get rid of the now useless fd_compute_new_polled_status()
+ - MINOR: kqueue: get rid of the now useless fd_compute_new_polled_status()
+ - MINOR: poll: get rid of the now useless fd_compute_new_polled_status()
+ - MINOR: select: get rid of the now useless fd_compute_new_polled_status()
+ - CLEANUP: fd: remove the now unused fd_compute_new_polled_status() function
+ - MEDIUM: fd: make updt_fd_polling() use atomics
+ - MEDIUM: poller: use atomic ops to update the fdtab mask
+ - MINOR: fd: move the fd_{add_to,rm_from}_fdlist functions to fd.c
+ - BUG/MINOR: fd/threads: properly dereference fdcache as volatile
+ - MINOR: fd: remove the unneeded last CAS when adding an fd to the list
+ - MINOR: fd: reorder fd_add_to_fd_list()
+ - BUG/MINOR: time/threads: ensure the adjusted time is always correct
+ - BUG/MEDIUM: standard: Fix memory leak in str2ip2()
+ - MINOR: init: emit warning when -sf/-sd cannot parse argument
+ - BUILD: fd/threads: fix breakage build breakage without threads
+ - DOC: Describe routing impact of using interface keyword on bind lines
+ - DOC: Mention -Ws in the list of available options
+ - BUG/MINOR: config: don't emit a warning when global stats is incompletely configured
+ - BUG/MINOR: fd/threads: properly lock the FD before adding it to the fd cache.
+ - BUG/MEDIUM: threads: fix the double CAS implementation for ARMv7
+ - BUG/MEDIUM: ssl: Don't always treat SSL_ERROR_SYSCALL as unrecovarable.
+ - BUILD/MINOR: memory: stdint is needed for uintptr_t
+ - BUG/MINOR: init: Add missing brackets in the code parsing -sf/-st
+ - DOC: lua: new prototype for function "register_action()"
+ - DOC: cfgparse: Warn on option (tcp|http)log in backend
+ - BUG/MINOR: ssl/threads: Make management of the TLS ticket keys files thread-safe
+ - MINOR: sample: add a new "concat" converter
+ - BUG/MEDIUM: ssl: Shutdown the connection for reading on SSL_ERROR_SYSCALL
+ - BUG/MEDIUM: http: Switch the HTTP response in tunnel mode as earlier as possible
+ - BUG/MEDIUM: ssl/sample: ssl_bc_* fetch keywords are broken.
+ - MINOR: ssl/sample: adds ssl_bc_is_resumed fetch keyword.
+ - CLEANUP: cfgparse: Remove unused label end
+ - CLEANUP: spoe: Remove unused label retry
+ - CLEANUP: h2: Remove unused labels from mux_h2.c
+ - CLEANUP: pools: Remove unused end label in memory.h
+ - CLEANUP: standard: Fix typo in IPv6 mask example
+ - BUG/MINOR: pools/threads: don't ignore DEBUG_UAF on double-word CAS capable archs
+ - BUG/MINOR: debug/pools: properly handle out-of-memory when building with DEBUG_UAF
+ - MINOR: debug/pools: make DEBUG_UAF also detect underflows
+ - MINOR: stats: display the number of threads in the statistics.
+ - BUG/MINOR: h2: Set the target of dbuf_wait to h2c
+ - BUG/MEDIUM: h2: always consume any trailing data after end of output buffers
+ - BUG/MEDIUM: buffer: Fix the wrapping case in bo_putblk
+ - BUG/MEDIUM: buffer: Fix the wrapping case in bi_putblk
+ - BUG/MEDIUM: spoe: Remove idle applets from idle list when HAProxy is stopping
+ - Revert "BUG/MINOR: send-proxy-v2: string size must include ('\0')"
+ - MINOR: ssl: extract full pkey info in load_certificate
+ - MINOR: ssl: add ssl_sock_get_pkey_algo function
+ - MINOR: ssl: add ssl_sock_get_cert_sig function
+ - MINOR: connection: add proxy-v2-options ssl-cipher,cert-sig,cert-key
+ - MINOR: connection: add proxy-v2-options authority
+ - MINOR: systemd: Add section for SystemD sandboxing to unit file
+ - MINOR: systemd: Add SystemD's Protect*= options to the unit file
+ - MINOR: systemd: Add SystemD's SystemCallFilter option to the unit file
+ - CLEANUP: h2: rename misleading h2c_stream_close() to h2s_close()
+ - MINOR: h2: provide and use h2s_detach() and h2s_free()
+ - MEDIUM: h2: use a single buffer allocator
+ - MINOR/BUILD: fix Lua build on Mac OS X
+ - BUILD/MINOR: fix Lua build on Mac OS X (again)
+ - BUG/MINOR: session: Fix tcp-request session failure if handshake.
+ - CLEANUP: .gitignore: Ignore binaries from the contrib directory
+ - BUG/MINOR: unix: Don't mess up when removing the socket from the xfer_sock_list.
+ - DOC: buffers: clarify the purpose of the <from> pointer in offer_buffers()
+ - BUG/MEDIUM: h2: also arm the h2 timeout when sending
+ - BUG/MINOR: cli: Fix a crash when passing a negative or too large value to "show fd"
+ - CLEANUP: ssl: Remove a duplicated #include
+ - CLEANUP: cli: Remove a leftover debug message
+ - BUG/MINOR: cli: Fix a typo in the 'set rate-limit' usage
+ - BUG/MEDIUM: fix a 100% cpu usage with cpu-map and nbthread/nbproc
+ - BUG/MINOR: force-persist and ignore-persist only apply to backends
+ - BUG/MEDIUM: threads/unix: Fix a deadlock when a listener is temporarily disabled
+ - BUG/MAJOR: threads/queue: Fix thread-safety issues on the queues management
+ - BUG/MINOR: dns: don't downgrade DNS accepted payload size automatically
+ - TESTS: Add a testcase for multi-port + multi-server listener issue
+ - CLEANUP: dns: remove duplicate code in src/dns.c
+ - BUG/MINOR: seemless reload: Fix crash when an interface is specified.
+ - BUG/MINOR: cli: Ensure all command outputs end with a LF
+ - BUG/MINOR: cli: Fix a crash when sending a command with too many arguments
+ - BUILD: ssl: Fix build with OpenSSL without NPN capability
+ - BUG/MINOR: spoa-example: unexpected behavior for more than 127 args
+ - BUG/MINOR: lua: return bad error messages
+ - CLEANUP: lua/syntax: lua is a name and not an acronym
+ - BUG/MEDIUM: tcp-check: single connect rule can't detect DOWN servers
+ - BUG/MINOR: tcp-check: use the server's service port as a fallback
+ - BUG/MEDIUM: threads/queue: wake up other threads upon dequeue
+ - MINOR: log: stop emitting alerts when it's not possible to write on the socket
+ - BUILD/BUG: enable -fno-strict-overflow by default
+ - BUG/MEDIUM: fd/threads: ensure the fdcache_mask always reflects the cache contents
+ - DOC: log: more than 2 log servers are allowed
+ - MINOR: hash: add new function hash_crc32c
+ - MINOR: proxy-v2-options: add crc32c
+ - MINOR: accept-proxy: support proxy protocol v2 CRC32c checksum
+ - REORG: compact "struct server"
+ - MINOR: samples: add crc32c converter
+ - BUG/MEDIUM: h2: properly account for DATA padding in flow control
+ - BUG/MINOR: h2: ensure we can never send an RST_STREAM in response to an RST_STREAM
+ - BUG/MINOR: listener: Don't decrease actconn twice when a new session is rejected
+ - CLEANUP: map, stream: remove duplicate code in src/map.c, src/stream.c
+ - BUG/MINOR: lua: the function returns anything
+ - BUG/MINOR: lua funtion hlua_socket_settimeout don't check negative values
+ - CLEANUP: lua: typo fix in comments
+ - BUILD/MINOR: fix build when USE_THREAD is not defined
+ - MINOR: lua: allow socket api settimeout to accept integers, float, and doubles
+ - BUG/MINOR: hpack: fix harmless use of uninitialized value in hpack_dht_insert
+ - MINOR: cli/threads: make "show fd" report thread_sync_io_handler instead of "unknown"
+ - MINOR: cli: make "show fd" report the mux and mux_ctx pointers when available
+ - BUILD/MINOR: cli: fix a build warning introduced by last commit
+ - BUG/MAJOR: h2: remove orphaned streams from the send list before closing
+ - MINOR: h2: always call h2s_detach() in h2_detach()
+ - MINOR: h2: fuse h2s_detach() and h2s_free() into h2s_destroy()
+ - BUG/MEDIUM: h2/threads: never release the task outside of the task handler
+ - BUG/MEDIUM: h2: don't consider pending data on detach if connection is in error
+ - BUILD/MINOR: threads: always export thread_sync_io_handler()
+ - MINOR: mux: add a "show_fd" function to dump debugging information for "show fd"
+ - MINOR: h2: implement a basic "show_fd" function
+ - MINOR: cli: report cache indexes in "show fd"
+ - BUG/MINOR: h2: remove accidental debug code introduced with show_fd function
+ - BUG/MEDIUM: h2: always add a stream to the send or fctl list when blocked
+ - BUG/MINOR: checks: check the conn_stream's readiness and not the connection
+ - BUG/MINOR: fd: Don't clear the update_mask in fd_insert.
+ - BUG/MINOR: email-alert: Set the mailer port during alert initialization
+ - BUG/MINOR: cache: fix "show cache" output
+ - BUG/MAJOR: cache: fix random crashes caused by incorrect delete() on non-first blocks
+ - BUG/MINOR: spoe: Initialize variables used during conf parsing before any check
+ - BUG/MINOR: spoe: Don't release the context buffer in .check_timeouts callbaclk
+ - BUG/MINOR: spoe: Register the variable to set when an error occurred
+ - BUG/MINOR: spoe: Don't forget to decrement fpa when a processing is interrupted
+ - MINOR: spoe: Add metrics in to know time spent in the SPOE
+ - MINOR: spoe: Add options to store processing times in variables
+ - MINOR: log: move 'log' keyword parsing in dedicated function
+ - MINOR: log: Keep the ref when a log server is copied to avoid duplicate entries
+ - MINOR: spoe: Add loggers dedicated to the SPOE agent
+ - MINOR: spoe: Add support for option dontlog-normal in the SPOE agent section
+ - MINOR: spoe: use agent's logger to log SPOE messages
+ - MINOR: spoe: Add counters to log info about SPOE agents
+ - BUG/MAJOR: cache: always initialize newly created objects
+ - MINOR: servers: Support alphanumeric characters for the server templates names
+ - BUG/MEDIUM: threads: Fix the max/min calculation because of name clashes
+ - BUG/MEDIUM: connection: Make sure we have a mux before calling detach().
+ - BUG/MINOR: http: Return an error in proxy mode when url2sa fails
+ - MINOR: proxy: Add fe_defbe fetcher
+ - MINOR: config: Warn if resolvers has no nameservers
+ - BUG/MINOR: cli: Guard against NULL messages when using CLI_ST_PRINT_FREE
+ - MINOR: cli: Ensure the CLI always outputs an error when it should
+ - MEDIUM: sample: Extend functionality for field/word converters
+ - MINOR: export localpeer as an environment variable
+ - BUG/MEDIUM: kqueue: When adding new events, provide an output to get errors.
+ - BUILD: sample: avoid build warning in sample.c
+ - BUG/CRITICAL: h2: fix incorrect frame length check
+ - DOC: lua: update the links to the config and Lua API
+ - BUG/MINOR: pattern: Add a missing HA_SPIN_INIT() in pat_ref_newid()
+ - BUG/MAJOR: channel: Fix crash when trying to read from a closed socket
+ - BUG/MINOR: log: t_idle (%Ti) is not set for some requests
+ - BUG/MEDIUM: lua: Fix segmentation fault if a Lua task exits
+ - MINOR: h2: detect presence of CONNECT and/or content-length
+ - BUG/MEDIUM: h2: implement missing support for chunked encoded uploads
+ - BUG/MINOR: spoe: Fix counters update when processing is interrupted
+ - BUG/MINOR: spoe: Fix parsing of dontlog-normal option
+ - MEDIUM: cli: Add payload support
+ - MINOR: map: Add payload support to "add map"
+ - MINOR: ssl: Add payload support to "set ssl ocsp-response"
+ - BUG/MINOR: lua/threads: Make lua's tasks sticky to the current thread
+ - MINOR: sample: Add strcmp sample converter
+ - MINOR: http: Add support for 421 Misdirected Request
+ - BUG/MINOR: config: disable http-reuse on TCP proxies
+ - MINOR: ssl: disable SSL sample fetches when unsupported
+ - MINOR: ssl: add fetch 'ssl_fc_session_key' and 'ssl_bc_session_key'
+ - BUG/MINOR: checks: Fix check->health computation for flapping servers
+ - BUG/MEDIUM: threads: Fix the sync point for more than 32 threads
+ - BUG/MINOR, BUG/MINOR: lua: Put tasks to sleep when waiting for data
+ - MINOR: backend: implement random-based load balancing
+ - DOC/MINOR: clean up LUA documentation re: servers & array/table.
+ - MINOR: lua: Add server name & puid to LUA Server class.
+ - MINOR: lua: add get_maxconn and set_maxconn to LUA Server class.
+ - BUG/MINOR: map: correctly track reference to the last ref_elt being dumped
+ - BUG/MEDIUM: task: Don't free a task that is about to be run.
+ - MINOR: fd: Make the lockless fd list work with multiple lists.
+ - BUG/MEDIUM: pollers: Use a global list for fd shared between threads.
+ - MINOR: pollers: move polled_mask outside of struct fdtab.
+ - BUG/MINOR: lua: schedule socket task upon lua connect()
+ - BUG/MINOR: lua: ensure large proxy IDs can be represented
+ - BUG/MEDIUM: pollers/kqueue: use incremented position in event list
+ - BUG/MINOR: cli: don't stop cli_gen_usage_msg() when kw->usage == NULL
+ - BUG/MEDIUM: http: don't always abort transfers on CF_SHUTR
+ - BUG/MEDIUM: ssl: properly protect SSL cert generation
+ - BUG/MINOR: lua: Socket.send threw runtime error: 'close' needs 1 arguments.
+ - BUG/MINOR: spoe: Mistake in error message about SPOE configuration
+ - BUG/MEDIUM: spoe: Flags are not encoded in network order
+ - CLEANUP: spoe: Remove unused variables the agent structure
+ - DOC: spoe: fix a typo
+ - BUG/MEDIUM: contrib/mod_defender: Use network order to encode/decode flags
+ - BUG/MEDIUM: contrib/modsecurity: Use network order to encode/decode flags
+ - DOC: add some description of the pending rework of the buffer structure
+ - BUG/MINOR: ssl/lua: prevent lua from affecting automatic maxconn computation
+ - MINOR: lua: Improve error message
+ - BUG/MEDIUM: cache: don't cache when an Authorization header is present
+ - MINOR: ssl: set SSL_OP_PRIORITIZE_CHACHA
+ - BUG/MEDIUM: dns: Delay the attempt to run a DNS resolution on check failure.
+ - BUG/BUILD: threads: unbreak build without threads
+ - BUG/MEDIUM: servers: Add srv_addr default placeholder to the state file
+ - BUG/MEDIUM: lua/socket: Length required read doesn't work
+ - MINOR: tasks: Change the task API so that the callback takes 3 arguments.
+ - MAJOR: tasks: Create a per-thread runqueue.
+ - MAJOR: tasks: Introduce tasklets.
+ - MINOR: tasks: Make the number of tasks to run at once configurable.
+ - MAJOR: applets: Use tasks, instead of rolling our own scheduler.
+ - BUG/MEDIUM: stick-tables: Decrement ref_cnt in table_* converters
+ - MINOR: http: Log warning if (add|set)-header fails
+ - DOC: management: add the new wrew stats column
+ - MINOR: stats: also report the failed header rewrites warnings on the stats page
+ - BUG/MEDIUM: tasks: Don't forget to increase/decrease tasks_run_queue.
+ - BUG/MEDIUM: task: Don't forget to decrement max_processed after each task.
+ - MINOR: task: Also consider the task list size when getting global tasks.
+ - MINOR: dns: Implement `parse-resolv-conf` directive
+ - BUG/MEDIUM: spoe: Return an error when the wrong ACK is received in sync mode
+ - MINOR: task/notification: Is notifications registered ?
+ - BUG/MEDIUM: lua/socket: wrong scheduling for sockets
+ - BUG/MAJOR: lua: Dead lock with sockets
+ - BUG/MEDIUM: lua/socket: Notification error
+ - BUG/MEDIUM: lua/socket: Sheduling error on write: may dead-lock
+ - BUG/MEDIUM: lua/socket: Buffer error, may segfault
+ - DOC: contrib/modsecurity: few typo fixes
+ - DOC: SPOE.txt: fix a typo
+ - MAJOR: spoe: upgrade the SPOP version to 2.0 and remove the support for 1.0
+ - BUG/MINOR: contrib/spoa_example: Don't reset the status code during disconnect
+ - BUG/MINOR: contrib/mod_defender: Don't reset the status code during disconnect
+ - BUG/MINOR: contrib/modsecurity: Don't reset the status code during disconnect
+ - BUG/MINOR: contrib/mod_defender: update pointer on the end of the frame
+ - BUG/MINOR: contrib/modsecurity: update pointer on the end of the frame
+ - MINOR: task: Fix a compiler warning by adding a cast.
+ - MINOR: stats: also report the nice and number of calls for applets
+ - MINOR: applet: assign the same nice value to a new appctx as its owner task
+ - MINOR: task: Fix compiler warning.
+ - BUG/MEDIUM: tasks: Use the local runqueue when building without threads.
+ - MINOR: tasks: Don't define rqueue if we're building without threads.
+ - BUG/MINOR: unix: Make sure we can transfer abns sockets on seamless reload.
+ - MINOR: lua: Increase debug information
+ - BUG/MEDIUM: threads: handle signal queue only in thread 0
+ - BUG/MINOR: don't ignore SIG{BUS,FPE,ILL,SEGV} during signal processing
+ - BUG/MINOR: signals: ha_sigmask macro for multithreading
+ - BUG/MAJOR: map: fix a segfault when using http-request set-map
+ - DOC: regression testing: Add a short starting guide.
+ - MINOR: tasks: Make sure we correctly init and deinit a tasklet.
+ - BUG/MINOR: tasklets: Just make sure we don't pass a tasklet to the handler.
+ - BUG/MINOR: lua: Segfaults with wrong usage of types.
+ - BUG/MAJOR: ssl: Random crash with cipherlist capture
+ - BUG/MAJOR: ssl: OpenSSL context is stored in non-reserved memory slot
+ - BUG/MEDIUM: ssl: do not store pkinfo with SSL_set_ex_data
+ - MINOR: tests: First regression testing file.
+ - MINOR: reg-tests: Add reg-tests/README file.
+ - MINOR: reg-tests: Add a few regression testing files.
+ - DOC: Add new REGTEST tag info about reg testing.
+ - BUG/MEDIUM: fd: Don't modify the update_mask in fd_dodelete().
+ - MINOR: Some spelling cleanup in the comments.
+ - BUG/MEDIUM: threads: Use the sync point to check active jobs and exit
+ - MINOR: threads: Be sure to remove threads from all_threads_mask on exit
+ - REGTEST/MINOR: Wrong URI in a reg test for SSL/TLS.
+ - REGTEST/MINOR: Set HAPROXY_PROGRAM default value.
+ - REGTEST/MINOR: Add levels to reg-tests target.
+ - BUG/MAJOR: Stick-tables crash with segfault when the key is not in the stick-table
+ - BUG/BUILD: threads: unbreak build without threads
+ - BUG/MAJOR: stick_table: Complete incomplete SEGV fix
+ - MINOR: stick-tables: make stktable_release() do nothing on NULL
+ - BUG/MEDIUM: lua: possible CLOSE-WAIT state with '\n' headers
+ - MINOR: startup: change session/process group settings
+ - MINOR: systemd: consider exit status 143 as successful
+ - REGTEST/MINOR: Wrong URI syntax.
+ - CLEANUP: dns: remove obsolete macro DNS_MAX_IP_REC
+ - CLEANUP: dns: inacurate comment about prefered IP score
+ - MINOR: dns: fix wrong score computation in dns_get_ip_from_response
+ - MINOR: dns: new DNS options to allow/prevent IP address duplication
+ - REGTEST/MINOR: Unexpected curl URL globling.
+ - BUG/MINOR: ssl: properly ref-count the tls_keys entries
+ - MINOR: h2: keep a count of the number of conn_streams attached to the mux
+ - BUG/MEDIUM: h2: don't accept new streams if conn_streams are still in excess
+ - MINOR: h2: add the mux and demux buffer lengths on "show fd"
+ - BUG/MEDIUM: h2: never leave pending data in the output buffer on close
+ - BUG/MEDIUM: h2: make sure the last stream closes the connection after a timeout
+ - MINOR: tasklet: Set process to NULL.
+ - MINOR: buffer: implement a new file for low-level buffer manipulation functions
+ - MINOR: buffer: switch buffer sizes and offsets to size_t
+ - MINOR: buffer: add a few basic functions for the new API
+ - MINOR: buffer: Introduce b_sub(), b_add(), and bo_add()
+ - MINOR: buffer: Add b_set_data().
+ - MINOR: buffer: introduce b_realign_if_empty()
+ - MINOR: compression: pass the channel to http_compression_buffer_end()
+ - MINOR: channel: add a few basic functions for the new buffer API
+ - MINOR: channel/buffer: use c_realign_if_empty() instead of buffer_realign()
+ - MINOR: channel/buffer: replace buffer_slow_realign() with channel_slow_realign() and b_slow_realign()
+ - MEDIUM: channel: make channel_slow_realign() take a swap buffer
+ - MINOR: h2: use b_slow_realign() with the trash as a swap buffer
+ - MINOR: buffer: remove buffer_slow_realign() and the swap_buffer allocation code
+ - MINOR: channel/buffer: replace b_{adv,rew} with c_{adv,rew}
+ - MINOR: buffer: replace calls to buffer_space_wraps() with b_space_wraps()
+ - MINOR: buffer: remove bi_getblk() and bi_getblk_nc()
+ - MINOR: buffer: split bi_contig_data() into ci_contig_data and b_config_data()
+ - MINOR: buffer: remove bi_ptr()
+ - MINOR: buffer: remove bo_ptr()
+ - MINOR: buffer: remove bo_end()
+ - MINOR: buffer: remove bi_end()
+ - MINOR: buffer: remove bo_contig_data()
+ - MINOR: buffer: merge b{i,o}_contig_space()
+ - MINOR: buffer: replace bo_getblk() with direction agnostic b_getblk()
+ - MINOR: buffer: replace bo_getblk_nc() with b_getblk_nc() which takes an offset
+ - MINOR: buffer: replace bi_del() and bo_del() with b_del()
+ - MINOR: buffer: convert most b_ptr() calls to c_ptr()
+ - MINOR: h1: make h1_measure_trailers() take the byte count in argument
+ - MINOR: h2: clarify the fact that the send functions are unsigned
+ - MEDIUM: h2: prevent the various mux encoders from modifying the buffer
+ - MINOR: h1: make h1_skip_chunk_crlf() not depend on b_ptr() anymore
+ - MINOR: h1: make h1_parse_chunk_size() not depend on b_ptr() anymore
+ - MINOR: h1: make h1_measure_trailers() use an offset and a count
+ - MEDIUM: h2: do not use buf->o anymore inside h2_snd_buf's loop
+ - MEDIUM: h2: don't use b_ptr() nor b_end() anymore
+ - MINOR: buffer: get rid of b_end() and b_to_end()
+ - MINOR: buffer: make b_getblk_nc() take const pointers
+ - MINOR: buffer: make b_getblk_nc() take size_t for the block sizes
+ - MEDIUM: connection: make xprt->snd_buf() take the byte count in argument
+ - MEDIUM: mux: make mux->snd_buf() take the byte count in argument
+ - MEDIUM: connection: make xprt->rcv_buf() use size_t for the count
+ - MEDIUM: mux: make mux->rcv_buf() take a size_t for the count
+ - MINOR: connection: add a flags argument to rcv_buf()
+ - MINOR: connection: add a new receive flag : CO_RFL_BUF_WET
+ - MINOR: buffer: get rid of b_ptr() and convert its last users
+ - MINOR: buffer: use b_room() to determine available space in a buffer
+ - MINOR: buffer: replace buffer_not_empty() with b_data() or c_data()
+ - MINOR: buffer: replace buffer_empty() with b_empty() or c_empty()
+ - MINOR: buffer: make bo_putchar() use b_tail()
+ - MINOR: buffer: replace buffer_full() with channel_full()
+ - MINOR: buffer: replace bi_space_for_replace() with ci_space_for_replace()
+ - MINOR: buffer: replace buffer_pending() with ci_data()
+ - MINOR: buffer: replace buffer_flush() with c_adv(chn, ci_data(chn))
+ - MINOR: buffer: use c_head() instead of buffer_wrap_sub(c->buf, p-o)
+ - MINOR: buffer: use b_orig() to replace most references to b->data
+ - MINOR: buffer: Use b_add()/bo_add() instead of accessing b->i/b->o.
+ - MINOR: channel: remove almost all references to buf->i and buf->o
+ - MINOR: channel: Add co_set_data().
+ - MEDIUM: channel: adapt to the new buffer API
+ - MINOR: checks: adapt to the new buffer API
+ - MEDIUM: h2: update to the new buffer API
+ - MINOR: buffer: remove unused bo_add()
+ - MEDIUM: spoe: use the new buffer API for the SPOE buffer
+ - MINOR: stats: adapt to the new buffers API
+ - MINOR: cli: use the new buffer API
+ - MINOR: cache: use the new buffer API
+ - MINOR: stream-int: use the new buffer API
+ - MINOR: stream: use wrappers instead of directly manipulating buffers
+ - MINOR: backend: use new buffer API
+ - MEDIUM: http: use wrappers instead of directly manipulating buffers states
+ - MINOR: filters: convert to the new buffer API
+ - MINOR: payload: convert to the new buffer API
+ - MEDIUM: h1: port to new buffer API.
+ - MINOR: flt_trace: adapt to the new buffer API
+ - MEDIUM: compression: start to move to the new buffer API
+ - MINOR: lua: use the wrappers instead of directly manipulating buffer states
+ - MINOR: buffer: convert part bo_putblk() and bi_putblk() to the new API
+ - MINOR: buffer: adapt buffer_slow_realign() and buffer_dump() to the new API
+ - MAJOR: start to change buffer API
+ - MINOR: buffer: remove the check for output on b_del()
+ - MINOR: buffer: b_set_data() doesn't truncate output data anymore
+ - MINOR: buffer: rename the "data" field to "area"
+ - MEDIUM: buffers: move "output" from struct buffer to struct channel
+ - MINOR: buffer: replace bi_fast_delete() with b_del()
+ - MINOR: buffer: replace b{i,o}_put* with b_put*
+ - MINOR: buffer: add a new file for ist + buffer manipulation functions
+ - MINOR: checks: use b_putist() instead of b_putstr()
+ - MINOR: buffers: remove b_putstr()
+ - CLEANUP: buffer: minor cleanups to buffer.h
+ - MINOR: buffers/channel: replace buffer_insert_line2() with ci_insert_line2()
+ - MINOR: buffer: replace buffer_replace2() with b_rep_blk()
+ - MINOR: buffer: rename the data length member to '->data'
+ - MAJOR: buffer: finalize buffer detachment
+ - MEDIUM: chunks: make the chunk struct's fields match the buffer struct
+ - MAJOR: chunks: replace struct chunk with struct buffer
+ - DOC: buffers: document the new buffers API
+ - DOC: buffers: remove obsolete docs about buffers
+ - MINOR: tasklets: Don't attempt to add a tasklet in the list twice.
+ - MINOR: connections/mux: Add a new "subscribe" method.
+ - MEDIUM: connections/mux: Revamp the send direction.
+ - MINOR: connection: simplify subscription by adding a registration function
+ - BUG/MINOR: http: Set brackets for the unlikely macro at the right place
+ - BUG/MINOR: build: Fix compilation with debug mode enabled
+ - BUILD: Generate sha256 checksums in publish-release
+ - MINOR: debug: Add check for CO_FL_WILL_UPDATE
+ - MINOR: debug: Add checks for conn_stream flags
+ - MINOR: ist: Add the function isteqi
+ - BUG/MEDIUM: threads: Fix the exit condition of the thread barrier
+ - BUG/MEDIUM: mux_h2: Call h2_send() before updating polling.
+ - MINOR: buffers: simplify b_contig_space()
+ - MINOR: buffers: split b_putblk() into __b_putblk()
+ - MINOR: buffers: add b_xfer() to transfer data between buffers
+ - DOC: add some design notes about the new layering model
+ - MINOR: conn_stream: add a new CS_FL_REOS flag
+ - MINOR: conn_stream: add an rx buffer to the conn_stream
+ - MEDIUM: conn_stream: add cs_recv() as a default rcv_buf() function
+ - MEDIUM: stream-int: automatically call si_cs_recv_cb() if the cs has data on wake()
+ - MINOR: h2: make each H2 stream support an intermediary input buffer
+ - MEDIUM: h2: make h2_frt_decode_headers() use an intermediary buffer
+ - MEDIUM: h2: make h2_frt_transfer_data() copy via an intermediary buffer
+ - MEDIUM: h2: centralize transfer of decoded frames in h2_rcv_buf()
+ - MEDIUM: h2: move headers and data frame decoding to their respective parsers
+ - MEDIUM: buffers: make b_xfer() automatically swap buffers when possible
+ - MEDIUM: h2: perform a single call to the data layer in demux()
+ - MEDIUM: h2: don't call data_cb->recv() anymore
+ - MINOR: h2: make use of CS_FL_REOS to indicate that end of stream was seen
+ - MEDIUM: h2: use the default conn_stream's receive function
+ - DOC: add more design feedback on the new layering model
+ - MINOR: h2: add the error code and the max/last stream IDs to "show fd"
+ - BUG/MEDIUM: stream-int: don't immediately enable reading when the buffer was reportedly full
+ - BUG/MEDIUM: stats: don't ask for more data as long as we're responding
+ - BUG/MINOR: servers: Don't make "server" in a frontend fatal.
+ - BUG/MEDIUM: tasks: make sure we pick all tasks in the run queue
+ - BUG/MEDIUM: tasks: Decrement rqueue_size at the right time.
+ - BUG/MEDIUM: tasks: use atomic ops for active_tasks_mask
+ - BUG/MEDIUM: tasks: Make sure there's no task left before considering inactive.
+ - MINOR: signal: don't pass the signal number anymore as the wakeup reason
+ - MINOR: tasks: extend the state bits from 8 to 16 and remove the reason
+ - MINOR: tasks: Add a flag that tells if we're in the global runqueue.
+ - BUG/MEDIUM: tasks: make __task_unlink_rq responsible for the rqueue size.
+ - MINOR: queue: centralize dequeuing code a bit better
+ - MEDIUM: queue: make pendconn_free() work on the stream instead
+ - DOC: queue: document the expected locking model for the server's queue
+ - MINOR: queue: make sure pendconn->strm->pend_pos is always valid
+ - MINOR: queue: use a distinct variable for the assigned server and the queue
+ - MINOR: queue: implement pendconn queue locking functions
+ - MEDIUM: queue: get rid of the pendconn lock
+ - MINOR: tasks: Make active_tasks_mask volatile.
+ - MINOR: tasks: Make global_tasks_mask volatile.
+ - MINOR: pollers: Add a way to wake a thread sleeping in the poller.
+ - MINOR: threads/queue: Get rid of THREAD_WANT_SYNC in the queue code.
+ - BUG/MEDIUM: threads/sync: use sched_yield when available
+ - MINOR: ssl: BoringSSL matches OpenSSL 1.1.0
+ - BUG/MEDIUM: h2: prevent orphaned streams from blocking a connection forever
+ - BUG/MINOR: config: stick-table is not supported in defaults section
+ - BUILD/MINOR: threads: unbreak build with threads disabled
+ - BUG/MINOR: threads: Handle nbthread == MAX_THREADS.
+ - BUG/MEDIUM: threads: properly fix nbthreads == MAX_THREADS
+ - MINOR: threads: move "nbthread" parsing to hathreads.c
+ - BUG/MEDIUM: threads: unbreak "bind" referencing an incorrect thread number
+ - MEDIUM: proxy_protocol: Convert IPs to v6 when protocols are mixed
+ - BUILD/MINOR: compiler: fix offsetof() on older compilers
+ - SCRIPTS: git-show-backports: add missing quotes to "echo"
+ - MINOR: threads: add more consistency between certain variables in no-thread case
+ - MEDIUM: hathreads: implement a more flexible rendez-vous point
+ - BUG/MEDIUM: cli: make "show fd" thread-safe
+
+2017/11/26 : 1.9-dev0
+
+2017/11/26 : 1.8.0
+ - BUG/MEDIUM: stream: don't automatically forward connect nor close
+ - BUG/MAJOR: stream: ensure analysers are always called upon close
+ - BUG/MINOR: stream-int: don't try to read again when CF_READ_DONTWAIT is set
+ - MEDIUM: mworker: Add systemd `Type=notify` support
+ - BUG/MEDIUM: cache: free callback to remove from tree
+ - CLEANUP: cache: remove unused struct
+ - MEDIUM: cache: enable the HTTP analysers
+ - CLEANUP: cache: remove wrong comment
+ - MINOR: threads/atomic: rename local variables in macros to avoid conflicts
+ - MINOR: threads/plock: rename local variables in macros to avoid conflicts
+ - MINOR: threads/atomic: implement pl_mb() in asm on x86
+ - MINOR: threads/atomic: implement pl_bts() on non-x86
+ - MINOR: threads/build: atomic: replace the few inlines with macros
+ - BUILD: threads/plock: fix a build issue on Clang without optimization
+ - BUILD: ebtree: don't redefine types u32/s32 in scope-aware trees
+ - BUILD: compiler: add a new type modifier __maybe_unused
+ - BUILD: h2: mark some inlined functions "unused"
+ - BUILD: server: check->desc always exists
+ - BUG/MEDIUM: h2: properly report connection errors in headers and data handlers
+ - MEDIUM: h2: add a function to emit an HTTP/1 request from a headers list
+ - MEDIUM: h2: change hpack_decode_headers() to only provide a list of headers
+ - BUG/MEDIUM: h2: always reassemble the Cookie request header field
+ - BUG/MINOR: systemd: ignore daemon mode
+ - CONTRIB: spoa_example: allow to compile outside HAProxy.
+ - CONTRIB: spoa_example: remove bref, wordlist, cond_wordlist
+ - CONTRIB: spoa_example: remove last dependencies on type "sample"
+ - CONTRIB: spoa_example: remove SPOE enums that are useless for clients
+ - CLEANUP: cache: reorder includes
+ - MEDIUM: shctx: use unsigned int for len and block_count
+ - MEDIUM: cache: "show cache" on the cli
+ - BUG/MEDIUM: cache: use key=0 as a condition for freeing
+ - BUG/MEDIUM: cache: refcount forbids to free the objects
+ - BUG/MEDIUM: cache fix cli_kws structure
+ - BUG/MEDIUM: deinit: correctly deinitialize the proxy and global listener tasks
+ - BUG/MINOR: ssl: Always start the handshake if we can't send early data.
+ - MINOR: ssl: Don't disable early data handling if we could not write.
+ - MINOR: pools: prepare functions to override malloc/free in pools
+ - MINOR: pools: implement DEBUG_UAF to detect use after free
+ - BUG/MEDIUM: threads/time: fix time drift correction
+ - BUG/MEDIUM: threads/time: maintain a common time reference between all threads
+ - MINOR: sample: Add "thread" sample fetch
+ - BUG/MINOR: Use crt_base instead of ca_base when crt is parsed on a server line
+ - BUG/MINOR: stream: fix tv_request calculation for applets
+ - BUG/MAJOR: h2: always remove a stream from the send list before freeing it
+ - BUG/MAJOR: threads/task: dequeue expired tasks under the WQ lock
+ - MINOR: ssl: Handle reading early data after writing better.
+ - MINOR: mux: Make sure every string is woken up after the handshake.
+ - MEDIUM: cache: store sha1 for hashing the cache key
+ - MINOR: http: implement the "http-request reject" rule
+ - MINOR: h2: send RST_STREAM before GOAWAY on reject
+ - MEDIUM: h2: don't gracefully close the connection anymore on Connection: close
+ - MINOR: h2: make use of client-fin timeout after GOAWAY
+ - MEDIUM: config: ensure that tune.bufsize is at least 16384 when using HTTP/2
+ - MINOR: ssl: Handle early data with BoringSSL
+ - BUG/MEDIUM: stream: always release the stream-interface on abort
+ - BUG/MEDIUM: cache: free ressources in chn_end_analyze
+ - MINOR: cache: move the refcount decrease in the applet release
+ - BUG/MINOR: listener: Allow multiple "process" options on "bind" lines
+ - MINOR: config: Support a range to specify processes in "cpu-map" parameter
+ - MINOR: config: Slightly change how parse_process_number works
+ - MINOR: config: Export parse_process_number and use it wherever it's applicable
+ - MINOR: standard: Add my_ffsl function to get the position of the bit set to one
+ - MINOR: config: Add auto-increment feature for cpu-map
+ - MINOR: config: Support partial ranges in cpu-map directive
+ - MINOR:: config: Remove thread-map directive
+ - MINOR: config: Add the threads support in cpu-map directive
+ - MINOR: config: Add threads support for "process" option on "bind" lines
+ - MEDIUM: listener: Bind listeners on a thread subset if specified
+ - CLEANUP: debug: Use DPRINTF instead of fprintf into #ifdef DEBUG_FULL/#endif
+ - CLEANUP: log: Rename Alert/Warning in ha_alert/ha_warning
+ - MINOR/CLEANUP: proxy: rename "proxy" to "proxies_list"
+ - CLEANUP: pools: rename all pool functions and pointers to remove this "2"
+ - DOC: update the roadmap file with the latest changes merged in 1.8
+ - DOC: fix mangled version in peers protocol documentation
+ - DOC: add initial peers protovol v2.0 documentation.
+ - DOC: mention William as maintainer of the cache and master-worker
+ - DOC: add Christopher and Emeric as maintainers of the threads
+ - MINOR: cache: replace a fprint() by an abort()
+ - MEDIUM: cache: max-age configuration keyword
+ - DOC: explain HTTP2 timeout behavior
+ - DOC: cache: configuration and management
+ - MAJOR: mworker: exits the master on failure
+ - BUG/MINOR: threads: don't drop "extern" on the lock in include files
+ - MINOR: task: keep a pointer to the currently running task
+ - MINOR: task: align the rq and wq locks
+ - MINOR: fd: cache-align fdtab and fdcache locks
+ - MINOR: buffers: cache-align buffer_wq_lock
+ - CLEANUP: server: reorder some fields in struct server to save 40 bytes
+ - CLEANUP: proxy: slightly reorder the struct proxy to reduce holes
+ - CLEANUP: checks: remove 16 bytes of holes in struct check
+ - CLEANUP: cache: more efficiently pack the struct cache
+ - CLEANUP: fd: place the lock at the beginning of struct fdtab
+ - CLEANUP: pools: align pools on a cache line
+ - DOC: config: add a few bits about how to configure HTTP/2
+ - BUG/MAJOR: threads/queue: avoid recursive locking in pendconn_get_next_strm()
+ - BUILD: Makefile: reorder object files by size
+
+2017/11/19 : 1.8-rc4
+ - BUG/MEDIUM: cache: does not cache if no Content-Length
+ - BUILD: thread/pipe: fix build without threads
+ - BUG/MINOR: spoe: check buffer size before acquiring or releasing it
+ - MINOR: debug/flags: Add missing flags
+ - MINOR: threads: Use __decl_hathreads to declare locks
+ - BUG/MINOR: buffers: Fix b_alloc_margin to be "fonctionnaly" thread-safe
+ - BUG/MAJOR: ebtree/scope: fix insertion and removal of duplicates in scope-aware trees
+ - BUG/MAJOR: ebtree/scope: fix lookup of next node in scope-aware trees
+ - MINOR: ebtree/scope: add a function to find next node from a parent
+ - MINOR: ebtree/scope: simplify the lookup functions by using eb32sc_next_with_parent()
+ - BUG/MEDIUM: mworker: Fix re-exec when haproxy is started from PATH
+ - BUG/MEDIUM: cache: use msg->sov to forward header
+ - MINOR: cache: forward data with headers
+ - MINOR: cache: disable cache if shctx_row_data_append fail
+ - BUG/MINOR: threads: tid_bit must be a unsigned long
+ - CLEANUP: tasks: Remove useless double test on rq_next
+ - BUG/MEDIUM: standard: itao_str/idx and quote_str/idx must be thread-local
+ - MINOR: tools: add a function to dump a scope-aware tree to a file
+ - MINOR: tools: improve the DOT dump of the ebtree
+ - MINOR: tools: emphasize the node being worked on in the tree dump
+ - BUG/MAJOR: ebtree/scope: properly tag upper nodes during insertion
+ - DOC: peers: Add a first version of peers protocol v2.1.
+ - CONTRIB: Wireshark dissector for HAProxy Peer Protocol.
+ - MINOR: mworker: display an accurate error when the reexec fail
+ - BUG/MEDIUM: mworker: wait again for signals when execvp fail
+ - BUG/MEDIUM: mworker: does not deinit anymore
+ - BUG/MEDIUM: mworker: does not close inherited FD
+ - MINOR: tests: add a python wrapper to test inherited fd
+ - BUG/MINOR: Allocate the log buffers before the proxies startup
+ - MINOR: tasks: Use a bitfield to track tasks activity per-thread
+ - MAJOR: polling: Use active_tasks_mask instead of tasks_run_queue
+ - MINOR: applets: Use a bitfield to track applets activity per-thread
+ - MAJOR: polling: Use active_appels_mask instead of applets_active_queue
+ - MEDIUM: applets: Don't process more than 200 active applets at once
+ - MINOR: stream: Add thread-mask of tasks/FDs/applets in "show sess all" command
+ - MINOR: SSL: Store the ASN1 representation of client sessions.
+ - MINOR: ssl: Make sure we don't shutw the connection before the handshake.
+ - BUG/MEDIUM: deviceatlas: ignore not valuable HTTP request data
+
+2017/11/11 : 1.8-rc3
+ - BUILD: use MAXPATHLEN instead of NAME_MAX.
+ - BUG/MAJOR: threads/checks: add 4 missing spin_unlock() in various functions
+ - BUG/MAJOR: threads/server: missing unlock in CLI fqdn parser
+ - BUG/MINOR: cli: do not perform an invalid action on "set server check-port"
+ - BUG/MAJOR: threads/checks: wrong use of SPIN_LOCK instead of SPIN_UNLOCK
+ - CLEANUP: checks: remove return statements in locked functions
+ - BUG/MINOR: cli: add severity in "set server addr" parser
+ - CLEANUP: server: get rid of return statements in the CLI parser
+ - BUG/MAJOR: cli/streams: missing unlock on exit "show sess"
+ - BUG/MAJOR: threads/dns: add missing unlock on allocation failure path
+ - BUG/MAJOR: threads/lb: fix missing unlock on consistent hash LB
+ - BUG/MAJOR: threads/lb: fix missing unlock on map-based hash LB
+ - BUG/MEDIUM: threads/stick-tables: close a race condition on stktable_trash_expired()
+ - BUG/MAJOR: h2: set the connection's task to NULL when no client timeout is set
+ - BUG/MAJOR: thread/listeners: enable_listener must not call unbind_listener()
+ - BUG/MEDIUM: threads: don't try to free build option message on exit
+ - MINOR: applets: no need to check for runqueue's emptiness in appctx_res_wakeup()
+ - MINOR: add master-worker in the warning about nbproc
+ - MINOR: mworker: allow pidfile in mworker + foreground
+ - MINOR: mworker: write parent pid in the pidfile
+ - MINOR: mworker: do not store child pid anymore in the pidfile
+ - MINOR: ebtree: implement the scope-aware functions for eb32
+ - MEDIUM: ebtree: specify the scope of every node inserted via eb32sc
+ - MINOR: ebtree: update the eb32sc parent node's scope on delete
+ - MEDIUM: ebtree: only consider the branches matching the scope in lookups
+ - MINOR: ebtree: implement eb32sc_lookup_ge_or_first()
+ - MAJOR: task: make use of the scope-aware ebtree functions
+ - MINOR: task: simplify wake_expired_tasks() to avoid unlocking in the loop
+ - MEDIUM: task: change the construction of the loop in process_runnable_tasks()
+ - MINOR: threads: use faster locks for the spin locks
+ - MINOR: tasks: only visit filled task slots after processing them
+ - MEDIUM: tasks: implement a lockless scheduler for single-thread usage
+ - BUG/MINOR: dns: Don't try to get the server lock if it's already held.
+ - BUG/MINOR: dns: Don't lock the server lock in snr_check_ip_callback().
+ - DOC: Add note about encrypted password CPU usage
+ - BUG/MINOR: h2: set the "HEADERS_SENT" flag on stream, not connection
+ - BUG/MEDIUM: h2: properly send an RST_STREAM on mux stream error
+ - BUG/MEDIUM: h2: properly send the GOAWAY frame in the mux
+ - BUG/MEDIUM: h2: don't try (and fail) to send non-existing data in the mux
+ - MEDIUM: h2: remove the H2_SS_RESET intermediate state
+ - BUG/MEDIUM: h2: fix some wrong error codes on connections
+ - BUILD: threads: Rename SPIN/RWLOCK macros using HA_ prefix
+ - BUILD: enable USE_THREAD for Solaris build.
+ - BUG/MEDIUM: h2: don't close the connection is there are data left
+ - MINOR: h2: don't re-enable the connection's task when we're closing
+ - BUG/MEDIUM: h2: properly set H2_SF_ES_SENT when sending the final frame
+ - BUG/MINOR: h2: correctly check for H2_SF_ES_SENT before closing
+ - MINOR: h2: add new stream flag H2_SF_OUTGOING_DATA
+ - BUG/MINOR: h2: don't send GOAWAY on failed response
+ - BUG/MEDIUM: splice/threads: pipe reuse list was not protected.
+ - BUG/MINOR: comp: fix compilation warning compiling without compression.
+ - BUG/MINOR: stream-int: don't set MSG_MORE on closed request path
+ - BUG/MAJOR: threads/tasks: fix the scheduler again
+ - BUG/MINOR; ssl: Don't assume we have a ssl_bind_conf because a SNI is matched.
+ - MINOR: ssl: Handle session resumption with TLS 1.3
+ - MINOR: ssl: Spell 0x10101000L correctly.
+ - MINOR: ssl: Handle sending early data to server.
+ - BUILD: ssl: fix build of backend without ssl
+ - BUILD: shctx: do not depend on openssl anymore
+ - BUG/MINOR: h1: the HTTP/1 make status code parser check for digits
+ - BUG/MEDIUM: h2: reject non-3-digit status codes
+ - BUG/MEDIUM: stream-int: Don't loss write's notifs when a stream is woken up
+ - BUG/MINOR: pattern: Rely on the sample type to copy it in pattern_exec_match
+ - BUG/MEDIUM: h2: split the function to send RST_STREAM
+ - BUG/MEDIUM: h1: ensure the chunk size parser can deal with full buffers
+ - MINOR: tools: don't use unlikely() in hex2i()
+ - BUG/MEDIUM: h2: support orphaned streams
+ - BUG/MEDIUM: threads/cli: fix "show sess" locking on release
+ - CLEANUP: mux: remove the unused "release()" function
+ - MINOR: cli: make "show fd" report the fd's thread mask
+ - BUG/MEDIUM: stream: don't ignore res.analyse_exp anymore
+ - CLEANUP: global: introduce variable pid_bit to avoid shifts with relative_pid
+ - MEDIUM: http: always reject the "PRI" method
+
+2017/11/03 : 1.8-rc2
+ - BUG/MINOR: send-proxy-v2: fix dest_len in make_tlv call
+ - BUG/MINOR: send-proxy-v2: string size must include ('\0')
+ - MINOR: mux: Only define pipe functions on linux.
+ - MINOR: cache: Remove useless test for nonzero.
+ - MINOR: cache: Don't confuse act_return and act_parse_ret.
+ - BUG/MEDIUM: h2: don't try to parse incomplete H1 responses
+ - BUG/MEDIUM: checks/mux: always enable send-polling after connecting
+ - BUG/MAJOR: fix deadlock on healthchecks.
+ - BUG/MINOR: thread: fix a typo in the debug code
+ - BUILD: shctx: allow to be built without openssl
+ - BUG/MEDIUM: cache: don't try to resolve wrong filters
+ - BUG/MAJOR: buffers: fix get_buffer_nc() for data at end of buffer
+ - BUG/MINOR: freq: fix infinite loop on freq_ctr_period.
+ - BUG/MINOR: stdarg.h inclusion
+ - BUG/MINOR: dns: fix missing lock protection on server.
+ - BUG/MINOR: lua: fix missing lock protection on server.
+ - BUILD: enable USE_THREAD for OpenBSD build.
+ - BUG/MAJOR: mux_pt: don't dereference a connstream after ->wake()
+ - MINOR: thread: report multi-thread support in haproxy -vv
+
+2017/10/31 : 1.8-rc1
+ - BUG/MEDIUM: server: Allocate tmptrash before using it.
+ - CONTRIB: trace: add the possibility to place trace calls in the code
+ - CONTRIB: trace: try to display the function's return value on exit
+ - CONTRIB: trace: report the base name only for file names
+ - BUILD: ssl: support OPENSSL_NO_ASYNC #define
+ - MINOR: ssl: build with recent BoringSSL library
+ - BUG/MINOR: ssl: OCSP_single_get0_status can return -1
+ - BUG/MINOR: cli: restore "set ssl tls-key" command
+ - CLEANUP: cli: remove undocumented "set ssl tls-keys" command
+ - IMPORT: sha1: import SHA1 functions
+ - MINOR: sample: add the sha1 converter
+ - MINOR: sample: add the hex2i converter
+ - MINOR: stream-int: stop checking for useless connection flags in chk_snd_conn
+ - MINOR: ssl: don't abort after sending 16kB
+ - MINOR: connection: move the cleanup of flag CO_FL_WAIT_ROOM
+ - MINOR: connection: add flag CO_FL_WILL_UPDATE to indicate when updates are granted
+ - MEDIUM: connection: make use of CO_FL_WILL_UPDATE in conn_sock_shutw()
+ - MINOR: raw_sock: make use of CO_FL_WILL_UPDATE
+ - MINOR: ssl_sock: make use of CO_FL_WILL_UPDATE
+ - BUG/MINOR: checks: Don't forget to release the connection on error case.
+ - MINOR: buffer: add the buffer input manipulation functions
+ - BUG/MEDIUM: prevent buffers being overwritten during build_logline() execution
+ - MEDIUM: cfgparse: post section callback
+ - MEDIUM: cfgparse: post parsing registration
+ - MINOR: lua: add uuid to the Class Proxy
+ - MINOR: hlua: Add regex class
+ - MINOR: http: Mark the 425 code as "Too Early".
+ - MEDIUM: ssl: convert CBS (BoringSSL api) usage to neutral code
+ - MINOR: ssl: support Openssl 1.1.1 early callback for switchctx
+ - MINOR: ssl: generated certificate is missing in switchctx early callback
+ - MEDIUM: ssl: Handle early data with OpenSSL 1.1.1
+ - BUILD: Makefile: disable -Wunused-label
+ - MINOR: ssl/proto_http: Add keywords to take care of early data.
+ - BUG/MINOR: lua: const attribute of a string is overridden
+ - MINOR: ssl: Don't abuse ssl_options.
+ - MINOR: update proxy-protocol-v2 #define
+ - MINOR: merge ssl_sock_get calls for log and ppv2
+ - MINOR: add ALPN information to send-proxy-v2
+ - MEDIUM: h1: ensure that 1xx, 204 and 304 don't have a payload body
+ - CLEANUP: shctx: get ride of the shsess_packet{_hdr} structures
+ - MEDIUM: lists: list_for_each_entry{_safe}_from functions
+ - REORG: shctx: move lock functions and struct
+ - MEDIUM: shctx: allow the use of multiple shctx
+ - REORG: shctx: move ssl functions to ssl_sock.c
+ - MEDIUM: shctx: separate ssl and shctx
+ - MINOR: shctx: rename lock functions
+ - MINOR: h1: store the status code in the H1 message
+ - BUG/MINOR: spoe: Don't compare engine name and SPOE scope when both are NULL
+ - BUG/MINOR: spoa: Update pointer on the end of the frame when a reply is encoded
+ - MINOR: action: Add trk_idx inline function
+ - MINOR: action: Use trk_idx instead of tcp/http_trk_idx
+ - MINOR: action: Add a function pointer in act_rule struct to check its validity
+ - MINOR: action: Add function to check rules using an action ACT_ACTION_TRK_*
+ - MINOR: action: Add a functions to check http capture rules
+ - MINOR: action: Factorize checks on rules calling check_ptr if defined
+ - MINOR: acl: Pass the ACLs as an explicit parameter of build_acl_cond
+ - MEDIUM: spoe: Add support of ACLS to enable or disable sending of SPOE messages
+ - MINOR: spoe: Check uniqness of SPOE engine names during config parsing
+ - MEDIUM: spoe: Parse new "spoe-group" section in SPOE config file
+ - MEDIUM: spoe/rules: Add "send-spoe-group" action for tcp/http rules
+ - MINOR: spoe: Move message encoding in its own function
+ - MINOR: spoe: Add a type to qualify the message list during encoding
+ - MINOR: spoe: Add a generic function to encode a list of SPOE message
+ - MEDIUM: spoe/rules: Process "send-spoe-group" action
+ - BUG/MINOR: dns: Fix CLI keyword declaration
+ - MAJOR: dns: Refactor the DNS code
+ - BUG/MINOR: mailers: Fix a memory leak when email alerts are released
+ - MEDIUM: mailers: Init alerts during conf parsing and refactor their processing
+ - MINOR: mailers: Use pools to allocate email alerts and its tcpcheck_rules
+ - MINOR: standard: Add memvprintf function
+ - MINOR: log: Save alerts and warnings emitted during HAProxy startup
+ - MINOR: cli: Add "show startup-logs" command
+ - MINOR: startup: Extend the scope the MODE_STARTING flag
+ - MINOR: threads: Prepare makefile to link with pthread
+ - MINOR: threads: Add THREAD_LOCAL macro
+ - MINOR: threads: Add atomic-ops and plock includes in import dir
+ - MEDIUM: threads: Add hathreads header file
+ - MINOR: threads: Add mechanism to register per-thread init/deinit functions
+ - MINOR: threads: Add nbthread parameter
+ - MEDIUM: threads: Adds a set of functions to handle sync-point
+ - MAJOR: threads: Start threads to experiment multithreading
+ - MINOR: threads: Define the sync-point inside run_poll_loop
+ - MEDIUM: threads/buffers: Define and register per-thread init/deinit functions
+ - MEDIUM: threads/chunks: Transform trash chunks in thread-local variables
+ - MEDIUM: threads/time: Many global variables from time.h are now thread-local
+ - MEDIUM: threads/logs: Make logs thread-safe
+ - MEDIUM: threads/pool: Make pool thread-safe by locking all access to a pool
+ - MAJOR: threads/fd: Make fd stuffs thread-safe
+ - MINOR: threads/fd: Add a mask of threads allowed to process on each fd in fdtab array
+ - MEDIUM: threads/fd: Initialize the process mask during the call to fd_insert
+ - MINOR: threads/fd: Process cached events of FDs depending on the process mask
+ - MINOR: threads/polling: pollers now handle FDs depending on the process mask
+ - WIP: SQUASH WITH SYNC POINT
+ - MAJOR: threads/task: handle multithread on task scheduler
+ - MEDIUM: threads/signal: Add a lock to make signals thread-safe
+ - MEDIUM: threads/listeners: Make listeners thread-safe
+ - MEDIUM: threads/proxy: Add a lock per proxy and atomically update proxy vars
+ - MEDIUM: threads/server: Make connection list (priv/idle/safe) thread-safe
+ - MEDIUM: threads/server: Add a lock per server and atomically update server vars
+ - MINOR: threads/server: Add a lock to deal with insert in updates_servers list
+ - MEDIUM: threads/lb: Make LB algorithms (lb_*.c) thread-safe
+ - MEDIUM: threads/stick-tables: handle multithreads on stick tables
+ - MINOR: threads/sample: Change temp_smp into a thread local variable
+ - MEDIUM: threads/http: Make http_capture_bad_message thread-safe
+ - MINOR: threads/regex: Change Regex trash buffer into a thread local variable
+ - MAJOR: threads/applet: Handle multithreading for applets
+ - MAJOR: threads/peers: Make peers thread safe
+ - MAJOR: threads/buffer: Make buffer wait queue thread safe
+ - MEDIUM: threads/stream: Make streams list thread safe
+ - MAJOR: threads/ssl: Make SSL part thread-safe
+ - MEDIUM: threads/queue: Make queues thread-safe
+ - MAJOR: threads/map: Make acls/maps thread safe
+ - MEDIUM: threads/freq_ctr: Make the frequency counters thread-safe
+ - MEDIUM: thread/vars: Make vars thread-safe
+ - MEDIUM: threads/filters: Add init/deinit callback per thread
+ - MINOR: threads/filters: Update trace filter to add _per_thread callbacks
+ - MEDIUM: threads/compression: Make HTTP compression thread-safe
+ - MEDIUM: threads/lua: Makes the jmpbuf and some other buffers local to the current thread.
+ - MEDIUM: threads/lua: Add locks around the Lua execution parts.
+ - MEDIUM: threads/lua: Ensure that the launched tasks runs on the same threads than me
+ - MEDIUM: threads/lua: Cannot acces to the socket if we try to access from another thread.
+ - MEDIUM: threads/xref: Convert xref function to a thread safe model
+ - MEDIUM: threads/tasks: Add lock around notifications
+ - MEDIUM: thread/spoe: Make the SPOE thread-safe
+ - MEDIUM: thread/dns: Make DNS thread-safe
+ - MINOR: threads: Add thread-map config parameter in the global section
+ - MINOR: threads/checks: Add a lock to protect the pid list used by external checks
+ - MINOR: threads/checks: Set the task process_mask when a check is executed
+ - MINOR: threads/mailers: Add a lock to protect queues of email alerts
+ - MEDIUM: threads/server: Use the server lock to protect health check and cli concurrency
+ - MINOR: threads: Don't start when device a detection module is used
+ - BUG/MEDIUM: threads: Run the poll loop on the main thread too
+ - BUG/MINOR: threads: Add missing THREAD_LOCAL on static here and there
+ - MAJOR: threads: Offically enable the threads support in HAProxy
+ - BUG/MAJOR: threads/freq_ctr: fix lock on freq counters.
+ - BUG/MAJOR: threads/time: Store the time deviation in an 64-bits integer
+ - BUILD: stick-tables: silence an uninitialized variable warning
+ - BUG/MINOR: dns: Fix SRV records with the new thread code.
+ - MINOR: ssl: Remove the global allow-0rtt option.
+ - CLEANUP: threads: replace the last few 1UL<<tid with tid_bit
+ - CLEANUP: threads: rename process_mask to thread_mask
+ - MINOR: h1: add a function to measure the trailers length
+ - MINOR: threads: add a portable barrier for threads and non-threads
+ - BUG/MAJOR: threads/freq_ctr: use a memory barrier to detect changes
+ - BUG/MEDIUM: threads: Initialize the sync-point
+ - MEDIUM: connection: start to introduce a mux layer between xprt and data
+ - MINOR: connection: implement alpn registration of muxes
+ - MINOR: mux: register the pass-through mux for any ALPN string
+ - MEDIUM: session: use the ALPN token and proxy mode to select the mux
+ - MINOR: connection: report the major HTTP version from the MUX for logging (fc_http_major)
+ - MINOR: connection: introduce conn_stream
+ - MINOR: mux: add more methods to mux_ops
+ - MINOR: connection: introduce the conn_stream manipulation functions
+ - MINOR: mux_pt: implement remaining mux_ops methods
+ - MAJOR: connection : Split struct connection into struct connection and struct conn_stream.
+ - MINOR: connection: make conn_stream users also check for per-stream error flag
+ - MINOR: conn_stream: new shutr/w status flags
+ - MINOR: conn_stream: modify cs_shut{r,w} API to pass the desired mode
+ - MEDIUM: connection: make conn_sock_shutw() aware of lingering
+ - MINOR: connection: add cs_close() to close a conn_stream
+ - MEDIUM: mux_pt: make cs_shutr() / cs_shutw() properly close the connection
+ - MEDIUM: connection: replace conn_full_close() with cs_close()
+ - MEDIUM: connection: make mux->detach() release the connection
+ - MEDIUM: stream: do not forcefully close the client connection anymore
+ - MEDIUM: checks: exclusively use cs_destroy() to release a connection
+ - MEDIUM: connection: add a destroy callback
+ - MINOR: session: release the listener with the session, not the stream
+ - MEDIUM: session: make use of the connection's destroy callback
+ - CONTRIB: hpack: implement a reverse huffman table generator for hpack
+ - MINOR: hpack: implement the HPACK Huffman table decoder
+ - MINOR: hpack: implement the header tables management
+ - MINOR: hpack: implement the decoder
+ - MEDIUM: hpack: implement basic hpack encoding
+ - MINOR: h2: centralize all HTTP/2 protocol elements and constants
+ - MINOR: h2: create a very minimalistic h2 mux
+ - MINOR: h2: expose tune.h2.header-table-size to configure the table size
+ - MINOR: h2: expose tune.h2.initial-window-size to configure the window size
+ - MINOR: h2: expose tune.h2.max-concurrent-streams to limit the number of streams
+ - MINOR: h2: create the h2c struct and allocate its pool
+ - MINOR: h2: create the h2s struct and the associated pool
+ - MINOR: h2: handle two extra stream states for errors
+ - MINOR: h2: add a frame header descriptor for incoming frames
+ - MEDIUM: h2: allocate and release the h2c context on connection init/end
+ - MEDIUM: h2: implement basic recv/send/wake functions
+ - MEDIUM: h2: dynamically allocate the demux buffer on Rx
+ - MEDIUM: h2: implement the mux buffer allocator
+ - MINOR: h2: add the connection and stream flags listing the causes for blocking
+ - MINOR: h2: add function h2s_id() to report a stream's ID
+ - MINOR: h2: small function to know when the mux is busy
+ - MINOR: h2: new function h2c_error to mark an error on the connection
+ - MINOR: h2: new function h2s_error() to mark an error on a stream
+ - MINOR: h2: add h2_set_frame_size() to update the size in a binary frame
+ - MINOR: h2: new function h2_peek_frame_hdr() to retrieve a new frame header
+ - MINOR: h2: add a few functions to retrieve contents from a wrapping buffer
+ - MINOR: h2: add stream lookup function based on the stream ID
+ - MINOR: h2: create dummy idle and closed streams
+ - MINOR: h2: add the function to create a new stream
+ - MINOR: h2: update the {MUX,DEM}_{M,D}ALLOC flags on buffer availability
+ - MEDIUM: h2: start to consider the H2_CF_{MUX,DEM}_* flags for polling
+ - MINOR: h2: also terminate the connection on shutr
+ - MEDIUM: h2: properly consider all conditions for end of connection
+ - MEDIUM: h2: wake the connection up for send on pending streams
+ - MEDIUM: h2: start to implement the frames processing loop
+ - MINOR: h2: add a function to send a GOAWAY error frame
+ - MINOR: h2: match the H2 connection preface on init
+ - MEDIUM: h2: enable connection polling for send when a cs wants to emit
+ - MEDIUM: h2: enable reading again on the connection if it was blocked on stream buffer full
+ - MEDIUM: h2: process streams pending for sending
+ - MINOR: h2: send a real SETTINGS frame based on the configuration
+ - MEDIUM: h2: detect the presence of the first settings frame
+ - MINOR: h2: create a stream parser for the demuxer
+ - MINOR: h2: implement PING frames
+ - MEDIUM: h2: decode SETTINGS frames and extract relevant settings
+ - MINOR: h2: lookup the stream during demuxing
+ - MEDIUM: h2: honor WINDOW_UPDATE frames
+ - MINOR: h2: implement h2_send_rst_stream() to send RST_STREAM frames
+ - MINOR: h2: handle CONTINUATION frames
+ - MEDIUM: h2: partial implementation of h2_detach()
+ - MEDIUM: h2: unblock a connection when its current stream detaches
+ - MEDIUM: h2: basic processing of HEADERS frame
+ - MEDIUM: h2: don't use trash to decode headers!
+ - MEDIUM: h2: implement the response HEADERS frame to encode the H1 response
+ - MEDIUM: h2: send the H1 response body as DATA frames
+ - MEDIUM: h2: skip the response trailers if any
+ - MEDIUM: h2: properly continue to parse header block when facing a 1xx response
+ - MEDIUM: h2: send WINDOW_UPDATE frames for connection
+ - MEDIUM: h2: handle request body in DATA frames
+ - MINOR: h2: handle RST_STREAM frames
+ - MEDIUM: h2: send DATA+ES or RST_STREAM on shutw/shutr
+ - MINOR: h2: use a common function to signal some and all streams.
+ - MEDIUM: h2: handle GOAWAY frames
+ - MINOR: h2: centralize the check for the idle streams
+ - MINOR: h2: centralize the check for the half-closed(remote) streams
+ - MEDIUM: h2: silently ignore frames higher than last_id after GOAWAY
+ - MINOR: h2: properly reject PUSH_PROMISE frames coming from the client
+ - MEDIUM: h2: perform a graceful shutdown on "Connection: close"
+ - MEDIUM: h2: send a GOAWAY frame when dealing with an empty response
+ - MEDIUM: h2: apply a timeout to h2 connections
+ - BUG/MEDIUM: h2: fix incorrect timeout handling on the connection
+ - MEDIUM: shctx: forbid shctx to read more than expected
+ - MEDIUM: cache: configuration parsing and initialization
+ - MEDIUM: cache: store objects in cache
+ - MEDIUM: cache: deliver objects from cache
+
+2017/10/22 : 1.8-dev3
+ - REORG: ssl: move defines and methodVersions table upper
+ - MEDIUM: ssl: ctx_set_version/ssl_set_version func for methodVersions table
+ - MINOR: ssl: support ssl-min-ver and ssl-max-ver with crt-list
+ - MEDIUM: ssl: disable SSLv3 per default for bind
+ - BUG/MAJOR: ssl: fix segfault on connection close using async engines.
+ - BUG/MAJOR: ssl: buffer overflow using offloaded ciphering on async engine
+ - BUG/MINOR: ssl: do not call directly the conn_fd_handler from async_fd_handler
+ - BUG/MINOR: haproxy/cli : fix for solaris/illumos distros for CMSG* macros
+ - BUG/MEDIUM: build without openssl broken
+ - BUG/MINOR: warning: need_resend may be used uninitialized
+ - BUG/MEDIUM: misplaced exit and wrong exit code
+ - BUG/MINOR: Makefile: fix compile error with USE_LUA=1 in ubuntu16.04
+ - BUILD: scripts: make publish-release support bare repositories
+ - BUILD: scripts: add an automatic mode for publish-release
+ - BUILD: scripts: add a "quiet" mode to publish-release
+ - BUG/MAJOR: http: call manage_client_side_cookies() before erasing the buffer
+ - BUG/MINOR: buffers: Fix bi/bo_contig_space to handle full buffers
+ - CONTRIB: plug qdiscs: Plug queuing disciplines mini HOWTO.
+ - BUG/MINOR: acls: Set the right refflag when patterns are loaded from a map
+ - BUG/MINOR: ssl: Be sure that SSLv3 connection methods exist for openssl < 1.1.0
+ - BUG/MINOR: http/filters: Be sure to wait if a filter loops in HTTP_MSG_ENDING
+ - BUG/MEDIUM: peers: Peers CLOSE_WAIT issue.
+ - BUG/MAJOR: server: Segfault after parsing server state file.
+ - BUG/MEDIUM: unix: never unlink a unix socket from the file system
+ - scripts: create-release pass -n to tail
+ - SCRIPTS: create-release: enforce GIT_COMMITTER_{NAME|EMAIL} validity
+ - BUG/MEDIUM: fix segfault when no argument to -x option
+ - MINOR: warning on multiple -x
+ - MINOR: mworker: don't copy -x argument anymore in copy_argv()
+ - BUG/MEDIUM: mworker: don't reuse PIDs passed to the master
+ - BUG/MINOR: Wrong peer task expiration handling during synchronization processing.
+ - BUG/MINOR: cfgparse: Check if tune.http.maxhdr is in the range 1..32767
+ - BUG/MINOR: log: pin the front connection when front ip/ports are logged
+ - DOC: fix references to the section about the unix socket
+ - BUG/MINOR: stream: flag TASK_WOKEN_RES not set if task in runqueue
+ - MAJOR: task: task scheduler rework.
+ - MINOR: task/stream: tasks related to a stream must be init by the caller.
+ - MINOR: queue: Change pendconn_get_next_strm into private function
+ - MINOR: backends: Change get_server_sh/get_server_uh into private function
+ - MINOR: queue: Change pendconn_from_srv/pendconn_from_px into private functions
+ - MEDIUM: stream: make stream_new() always set the target and analysers
+ - MINOR: frontend: initialize HTTP layer after the debugging code
+ - MINOR: connection: add a .get_alpn() method to xprt_ops
+ - MINOR: ssl: add a get_alpn() method to ssl_sock
+ - MINOR: frontend: retrieve the ALPN name when available
+ - MINOR: frontend: report the connection's ALPN in the debug output
+ - MINOR: stream: don't set backend's nor response analysers on SF_TUNNEL
+ - MINOR: connection: send data before receiving
+ - MAJOR: applet: applet scheduler rework.
+ - BUG/MAJOR: frontend: don't dereference a null conn on outgoing connections
+ - BUG/MAJOR: cli: fix custom io_release was crushed by NULL.
+ - BUG/MAJOR: map: fix segfault during 'show map/acl' on cli.
+ - BUG/MAJOR: compression: Be sure to release the compression state in all cases
+ - MINOR: compression: Use a memory pool to allocate compression states
+ - BUG/MAJOR: applet: fix a freeze if data is immedately forwarded.
+ - DOC: fix references to the section about time format.
+ - BUG/MEDIUM: map/acl: fix unwanted flags inheritance.
+ - BUG/MAJOR: http: fix buffer overflow on loguri buffer.
+ - MINOR: ssl: compare server certificate names to the SNI on outgoing connections
+ - BUG/MINOR: stream: Don't forget to remove CF_WAKE_ONCE flag on response channel
+ - BUG/MINOR: http: Don't reset the transaction if there are still data to send
+ - BUG/MEDIUM: filters: Be sure to call flt_end_analyze for both channels
+ - MINOR: peers: Add additional information to stick-table definition messages.
+ - BUG/MINOR: http: properly handle all 1xx informational responses
+ - OPTIM: ssl: don't consider a small ssl_read() as an indication of end of buffer
+ - BUG/MINOR: peers: peer synchronization issue (with several peers sections).
+ - CLEANUP: hdr_idx: make some function arguments const where possible
+ - BUG/MINOR: Prevent a use-after-free on error scenario on option "-x".
+ - BUG/MINOR: lua: In error case, the safe mode is not removed
+ - BUG/MINOR: lua: executes the function destroying the Lua session in safe mode
+ - BUG/MAJOR: lua/socket: resources not detroyed when the socket is aborted
+ - BUG/MEDIUM: lua: bad memory access
+ - BUG/MINOR: Lua: variable already initialized
+ - DOC: update CONTRIBUTING regarding optional parts and message format
+ - DOC: update the list of OpenSSL versions in the README
+ - BUG/MINOR: http: Set the response error state in http_sync_res_state
+ - MINOR: http: Reorder/rewrite checks in http_resync_states
+ - MINOR: http: Switch requests/responses in TUNNEL mode only by checking txn flags
+ - BUG/MEDIUM: http: Switch HTTP responses in TUNNEL mode when body length is undefined
+ - MINOR: http: Rely on analyzers mask to end processing in forward_body functions
+ - BUG/MINOR: http: Fix bug introduced in previous patch in http_resync_states
+ - BUG/MINOR: contrib/modsecurity: BSD build fix
+ - BUG/MINOR: contrib/mod_defender: build fix
+ - BUG/MINOR: ssl: remove haproxy SSLv3 support when ssl lib have no SSLv3
+ - MINOR: ssl: remove an unecessary SSL_OP_NO_* dependancy
+ - BUILD: ssl: fix compatibility with openssl without TLSEXT_signature_*
+ - MINOR: tools: add a portable timegm() alternative
+ - BUILD: lua: replace timegm() with my_timegm() to fix build on Solaris 10
+ - DOC: Updated 51Degrees git URL to point to a stable version.
+ - BUG/MAJOR: http: Fix possible infinity loop in http_sync_(req|res)_state
+ - MINOR: memory: remove macros
+ - BUG/MINOR: lua: Fix Server.get_addr() port values
+ - BUG/MINOR: lua: Correctly use INET6_ADDRSTRLEN in Server.get_addr()
+ - MINOR: samples: Handle the type SMP_T_METH when we duplicate a sample in smp_dup
+ - MINOR: samples: Handle the type SMP_T_METH in smp_is_safe and smp_is_rw
+ - MINOR: samples: Don't allocate memory for SMP_T_METH sample when method is known
+ - BUG/MINOR: lua: always detach the tcp/http tasks before freeing them
+ - MINOR: task: always preinitialize the task's timeout in task_init()
+ - CLEANUP: task: remove all initializations to TICK_ETERNITY after task_new()
+ - BUG/MAJOR: lua: properly dequeue hlua_applet_wakeup() for new scheduler
+ - MINOR: lua: Add proxy as member of proxy object.
+ - DOC: lua: Proxy class doc update
+ - MINOR: lua: Add lists of frontends and backends
+ - BUG/MINOR: ssl: Fix check against SNI during server certificate verification
+ - BUG/MINOR: ssl: make use of the name in SNI before verifyhost
+ - MINOR: ssl: add a new error codes for wrong server certificates
+ - BUG/MEDIUM: stream: don't retry SSL connections which fail the SNI name check
+ - MINOR: ssl: add "no-ca-names" parameter for bind
+ - BUG/MINOR: lua: Fix bitwise logic for hlua_server_check_* functions.
+ - DOC: fix alphabetical order of "show commands" in management.txt
+ - MINOR: listener: add a function to return a listener's state as a string
+ - MINOR: cli: add a new "show fd" command
+ - BUG/MEDIUM: ssl: Fix regression about certificates generation
+ - MINOR: Add server port field to server state file.
+ - MINOR: ssl: allow to start without certificate if strict-sni is set
+ - MINOR: dns: Cache previous DNS answers.
+ - MINOR: obj: Add a new type of object, OBJ_TYPE_SRVRQ.
+ - Add a few functions to do unaligned access.
+ - MINOR: dns: Handle SRV records.
+ - MINOR: check: Fix checks when using SRV records.
+ - MINOR: doc: Document SRV label usage.
+ - BUILD/MINOR: cli: shut a minor gcc warning in "show fd"
+ - BUILD: ssl: replace SSL_CTX_get0_privatekey for openssl < 1.0.2
+ - BUILD/MINOR: build without openssl still broken
+ - BUG/MAJOR: stream: in stream_free(), close the front endpoint and not the origin
+ - CLEANUP: raw_sock: Use a better name for the constructor than __ssl_sock_deinit()
+ - MINOR: init: Fix CPU affinity setting on FreeBSD.
+ - MINOR: dns: Update analysis of TRUNCATED response for SRV records
+ - MINOR: dns: update record dname matching for SRV query types
+ - MINOR: dns: update dns response buffer reading pointer due to SRV record
+ - MINOR: dns: duplicate entries in resolution wait queue for SRV records
+ - MINOR: dns: make debugging function dump_dns_config() compatible with SRV records
+ - MINOR: dns: ability to use a SRV resolution for multiple backends
+ - MINOR: dns: enable caching of responses for server set by a SRV record
+ - MINOR: dns: new dns record type (RTYPE) for OPT
+ - MINOR: dns: enabled edns0 extension and make accpeted payload size tunable
+ - MINOR: dns: default "hold obsolete" timeout set to 0
+ - MINOR: chunks: add chunk_memcpy() and chunk_memcat()
+ - MINOR: session: add a streams field to the session struct
+ - MINOR: stream: link the stream to its session
+ - MEDIUM: session: do not free a session until no stream references it
+ - MINOR: ist: implement very simple indirect strings
+ - TESTS: ist: add a test file for the functions
+ - MINOR: http: export some of the HTTP parser macros
+ - BUG/MINOR: Wrong type used as argument for spoe_decode_buffer().
+ - BUG/MINOR: dns: server set by SRV records stay in "no resolution" status
+ - MINOR: dns: Maximum DNS udp payload set to 8192
+ - MINOR: dns: automatic reduction of DNS accpeted payload size
+ - MINOR: dns: make SRV record processing more verbose
+ - CLEANUP: dns: remove duplicated code in dns_resolve_recv()
+ - CLEANUP: dns: remove duplicated code in dns_validate_dns_response()
+ - BUG/MINOR: dns: wrong resolution interval lead to 100% CPU
+ - BUG/MEDIUM: dns: fix accepted_payload_size parser to avoid integer overflow
+ - BUG/MAJOR: lua: fix the impact of the scheduler changes again
+ - BUG/MEDIUM: lua: HTTP services must take care of body-less status codes
+ - MINOR: lua: properly process the contents of the content-length field
+ - BUG/MEDIUM: stream: properly set the required HTTP analysers on use-service
+ - OPTIM: lua: don't use expensive functions to parse headers in the HTTP applet
+ - OPTIM: lua: don't add "Connection: close" on the response
+ - REORG/MEDIUM: connection: introduce the notion of connection handle
+ - BUG/MINOR: stream-int: don't check the CO_FL_CURR_WR_ENA flag
+ - MEDIUM: connection: get rid of data->init() which was not for data
+ - MEDIUM: stream: make stream_new() allocate its own task
+ - CLEANUP: listener: remove the unused handler field
+ - MEDIUM: session: add a pointer to a struct task in the session
+ - MINOR: stream: provide a new stream creation function for connections
+ - MEDIUM: connection: remove useless flag CO_FL_DATA_RD_SH
+ - CLEANUP: connection: remove the unused conn_sock_shutw_pending()
+ - MEDIUM: connection: remove useless flag CO_FL_DATA_WR_SH
+ - DOC: add CLI info on privilege levels
+ - DOC: Refer to Mozilla TLS info / config generator
+ - MINOR: ssl: remove duplicate ssl_methods in struct bind_conf
+ - BUG/MEDIUM: http: Fix a regression bug when a HTTP response is in TUNNEL mode
+ - DOC: Add note about "* " prefix in CSV stats
+ - CLEANUP: memory: Remove unused function pool_destroy
+ - MINOR: listeners: Change listener_full and limit_listener into private functions
+ - MINOR: listeners: Change enable_listener and disable_listener into private functions
+ - MINOR: fd: Don't forget to reset fdtab[fd].update when a fd is added/removed
+ - MINOR: fd: Set owner and iocb field before inserting a new fd in the fdtab
+ - MINOR: backends: Make get_server_* functions explicitly static
+ - MINOR: applet: Check applets_active_queue before processing applets queue
+ - MINOR: chunks: Use dedicated function to init/deinit trash buffers
+ - MEDIUM: chunks: Realloc trash buffers only after the config is parsed and checked
+ - MINOR: logs: Use dedicated function to init/deinit log buffers
+ - MINOR: logs: Realloc log buffers only after the config is parsed and checked
+ - MINOR: buffers: Move swap_buffer into buffer.c and add deinit_buffer function
+ - MINOR: stick-tables: Make static_table_key a struct variable instead of a pointer
+ - MINOR: http: Use a trash chunk to store decoded string of the HTTP auth header
+ - MINOR: fd: Add fd_active function
+ - MINOR: fd: Use inlined functions to check fd state in fd_*_send/recv functions
+ - MINOR: fd: Move (de)allocation of fdtab and fdinfo in (de)init_pollers
+ - MINOR: freq_ctr: Return the new value after an update
+ - MEDIUM: check: server states and weight propagation re-work
+ - BUG/MEDIUM: epoll: ensure we always consider HUP and ERR
+ - MINOR: fd: Add fd_update_events function
+ - MINOR: polling: Use fd_update_events to update events seen for a fd
+ - BUG/MINOR: server: Remove FQDN requirement for using init-addr and state file
+ - Revert "BUG/MINOR: server: Remove FQDN requirement for using init-addr and state file"
+ - MINOR: ssl: rework smp_fetch_ssl_fc_cl_str without internal ssl use
+ - BUG/MEDIUM: http: Close streams for connections closed before a redirect
+ - BUG/MINOR: Lua: The socket may be destroyed when we try to access.
+ - MINOR: xref: Add a new xref system
+ - MEDIUM: xref/lua: Use xref for referencing cosocket relation between stream and lua
+ - MINOR: tasks: Move Lua notification from Lua to tasks
+ - MINOR: net_helper: Inline functions meant to be inlined.
+ - MINOR: cli: add socket commands and config to prepend informational messages with severity
+ - MINOR: add severity information to cli feedback messages
+ - BUILD: Makefile: add a function to detect support by the compiler of certain options
+ - BUILD: Makefile: shut certain gcc/clang stupid warnings
+ - BUILD: Makefile: improve detection of support for compiler warnings
+ - MINOR: peers: don't reference the incoming listener on outgoing connections
+ - MINOR: frontend: don't retrieve ALPN on the critical path
+ - MINOR: protocols: always pass a "port" argument to the listener creation
+ - MINOR: protocols: register the ->add function and stop calling them directly
+ - MINOR: unix: remove the now unused proto_uxst.h file
+ - MINOR: listeners: new function create_listeners
+ - MINOR: listeners: make listeners count consistent with reality
+ - MEDIUM: session: take care of incrementing/decrementing jobs
+ - MINOR: listener: new function listener_release
+ - MINOR: session: small cleanup of conn_complete_session()
+ - MEDIUM: session: factor out duplicated code for conn_complete_session
+ - MEDIUM: session: count the frontend's connections at a single place
+ - BUG/MEDIUM: compression: Fix check on txn in smp_fetch_res_comp_algo
+ - BUG/MINOR: compression: Check response headers before http-response rules eval
+ - BUG/MINOR: spoe: Don't rely on SPOE ctx in debug message when its creation failed
+ - BUG/MINOR: dns: Fix check on nameserver in snr_resolution_cb
+ - MINOR: ssl: Remove useless checks on bind_conf or bind_conf->is_ssl
+ - BUG/MINOR: contrib/mod_defender: close the va_list argp before return
+ - BUG/MINOR: contrib/modsecurity: close the va_list ap before return
+ - MINOR: tools: make my_htonll() more efficient on x86_64
+ - MINOR: buffer: add b_del() to delete a number of characters
+ - MINOR: buffer: add b_end() and b_to_end()
+ - MINOR: net_helper: add functions to read from vectors
+ - MINOR: net_helper: add write functions
+ - MINOR: net_helper: add 64-bit read/write functions
+ - MINOR: connection: adjust CO_FL_NOTIFY_DATA after removal of flags
+ - MINOR: ist: add a macro to ease const array initialization
+ - BUG/MEDIUM: server: unwanted behavior leaving maintenance mode on tracked stopping server
+ - BUG/MEDIUM: server: unwanted behavior leaving maintenance mode on tracked stopping server (take2)
+ - BUG/MINOR: log: fixing small memory leak in error code path.
+ - BUG/MINOR: contrib/halog: fixing small memory leak
+ - BUG/MEDIUM: tcp/http: set-dst-port action broken
+ - CLEANUUP: checks: don't set conn->handle.fd to -1
+ - BUG/MEDIUM: tcp-check: properly indicate polling state before performing I/O
+ - BUG/MINOR: tcp-check: don't quit with pending data in the send buffer
+ - BUG/MEDIUM: tcp-check: don't call tcpcheck_main() from the I/O handlers!
+ - BUG/MINOR: unix: properly check for octal digits in the "mode" argument
+ - MINOR: checks: make chk_report_conn_err() take a check, not a connection
+ - CLEANUP: checks: remove misleading comments and statuses for external process
+ - CLEANUP: checks: don't report report the fork() error twice
+ - CLEANUP: checks: do not allocate a connection for process checks
+ - TESTS: checks: add a simple test config for external checks
+ - BUG/MINOR: tcp-check: don't initialize then break a connection starting with a comment
+ - TESTS: checks: add a simple test config for tcp-checks
+ - MINOR: tcp-check: make tcpcheck_main() take a check, not a connection
+ - MINOR: checks: don't create then kill a dummy connection before tcp-checks
+ - MEDIUM: checks: make tcpcheck_main() indicate if it recycled a connection
+ - MEDIUM: checks: do not allocate a permanent connection anymore
+ - BUG/MEDIUM: cli: fix "show fd" crash when dumping closed FDs
+ - BUG/MEDIUM: http: Return an error when url_dec sample converter failed
+ - BUG/MAJOR: stream-int: don't re-arm recv if send fails
+ - BUILD/MINOR: 51d: fix warning when building with 51Degrees release version 3.2.12.12
+ - DOC: 51d: add 51Degrees git URL that points to release version 3.2.12.12
+ - DOC: 51d: Updated git URL and instructions for getting Hash Trie data files.
+ - MINOR: compiler: restore the likely() wrapper for gcc 5.x
+ - MINOR: session: remove the list of streams from struct session
+ - DOC: fix some typos
+ - MINOR: server: add the srv_queue() sample fetch method
+ - MINOR: payload: add new sample fetch functions to process distcc protocol
+ - MAJOR: servers: propagate server status changes asynchronously.
+ - BUG/MEDIUM: ssl: fix OCSP expiry calculation
+ - BUG/MINOR: stream-int: don't set MSG_MORE on SHUTW_NOW without AUTO_CLOSE
+ - MINOR: server: Handle weight increase in consistent hash.
+ - MINOR: checks: Add a new keyword to specify a SNI when doing SSL checks.
+ - BUG/MINOR: tools: fix my_htonll() on x86_64
+ - BUG/MINOR: stats: Clear a bit more counters with in cli_parse_clear_counters().
+ - BUG/MAJOR: lua: scheduled task is freezing.
+ - MINOR: buffer: add bo_del() to delete a number of characters from output
+ - MINOR: buffer: add a function to match against string patterns
+ - MINOR: buffer: add two functions to inject data into buffers
+ - MINOR: buffer: add buffer_space_wraps()
+ - REORG: channel: finally rename the last bi_* / bo_* functions
+ - MINOR: buffer: add bo_getblk() and bo_getblk_nc()
+ - MINOR: channel: make use of bo_getblk{,_nc} for their channel equivalents
+ - MINOR: channel: make the channel be a const in all {ci,co}_get* functions
+ - MINOR: ist: add ist0() to add a trailing zero to a string.
+ - BUG/MEDIUM: log: check result details truncated.
+ - MINOR: buffer: make bo_getblk_nc() not return 2 for a full buffer
+ - REORG: http: move some very http1-specific parts to h1.{c,h}
+ - REORG: http: move the HTTP/1 chunk parser to h1.{c,h}
+ - REORG: http: move the HTTP/1 header block parser to h1.c
+ - MEDIUM: http: make the chunk size parser only depend on the buffer
+ - MEDIUM: http: make the chunk crlf parser only depend on the buffer
+ - MINOR: h1: add struct h1m for basic HTTP/1 messages
+ - MINOR: http: add very simple header management based on double strings
+ - MEDIUM: h1: reimplement the http/1 response parser for the gateway
+ - REORG: connection: rename CO_FL_DATA_* -> CO_FL_XPRT_*
+ - MEDIUM: connection: make conn_sock_shutw() aware of lingering
+ - MINOR: connection: ensure conn_ctrl_close() also resets the fd
+ - MINOR: connection: add conn_stop_tracking() to disable tracking
+ - MINOR: tcp: use conn_full_close() instead of conn_force_close()
+ - MINOR: unix: use conn_full_close() instead of conn_force_close()
+ - MINOR: checks: use conn_full_close() instead of conn_force_close()
+ - MINOR: session: use conn_full_close() instead of conn_force_close()
+ - MINOR: stream: use conn_full_close() instead of conn_force_close()
+ - MINOR: stream: use conn_full_close() instead of conn_force_close()
+ - MINOR: backend: use conn_full_close() instead of conn_force_close()
+ - MINOR: stream-int: use conn_full_close() instead of conn_force_close()
+ - MINOR: connection: remove conn_force_close()
+ - BUG/MINOR: ssl: ocsp response with 'revoked' status is correct
+
+2017/06/02 : 1.8-dev2
+ - CLEANUP: server: moving netinet/tcp.h inclusion
+ - DOC: changed "block"(deprecated) examples to http-request deny
+ - DOC: add few comments to examples.
+ - DOC: update sample code for PROXY protocol
+ - DOC: mention lighttpd 1.4.46 implements PROXY
+ - MINOR server: Restrict dynamic cookie check to the same proxy.
+ - DOC: stick-table is available in frontend sections
+ - BUG/MINOR: server : no transparent proxy for DragonflyBSD
+ - BUILD/MINOR: stats: remove unexpected argument to stats_dump_json_header()
+ - BUILD/MINOR: tools: fix build warning in debug_hexdump()
+ - BUG/MINOR: dns: Wrong address family used when creating IPv6 sockets.
+ - BUG/MINOR: config: missing goto out after parsing an incorrect ACL character
+ - BUG/MINOR: arg: don't try to add an argument on failed memory allocation
+ - MEDIUM: server: Inherit CLI weight changes and agent-check weight responses
+ - BUG/MEDIUM: arg: ensure that we properly unlink unresolved arguments on error
+ - BUG/MEDIUM: acl: don't free unresolved args in prune_acl_expr()
+ - BUG/MEDIUM: servers: unbreak server weight propagation
+ - MINOR: lua: ensure the memory allocator is used all the time
+ - MINOR: cli: Add a command to send listening sockets.
+ - MINOR: global: Add an option to get the old listening sockets.
+ - MINOR: tcp: When binding socket, attempt to reuse one from the old proc.
+ - MINOR: doc: document the -x flag
+ - MINOR: proxy: Don't close FDs if not our proxy.
+ - MINOR: socket transfer: Set a timeout on the socket.
+ - MINOR: systemd wrapper: add support for passing the -x option.
+ - BUG/MINOR: server: Fix a wrong error message during 'usesrc' keyword parsing.
+ - BUG/MAJOR: Broken parsing for valid keywords provided after 'source' setting.
+ - CLEANUP: logs: typo: simgle => single
+ - BUG/MEDIUM: acl: proprely release unused args in prune_acl_expr()
+ - MEDIUM: config: don't check config validity when there are fatal errors
+ - BUG/MAJOR: Use -fwrapv.
+ - BUG/MINOR: server: don't use "proxy" when px is really meant.
+ - BUG/MEDIUM: http: Drop the connection establishment when a redirect is performed
+ - BUG/MINOR: server: missing default server 'resolvers' setting duplication.
+ - MINOR: server: Extract the code responsible of copying default-server settings.
+ - MINOR: server: Extract the code which finalizes server initializations after 'server' lines parsing.
+ - MINOR: server: Add 'server-template' new keyword supported in backend sections.
+ - MINOR: server: Add server_template_init() function to initialize servers from a templates.
+ - DOC: Add documentation for new "server-template" keyword.
+ - DOC: add layer 4 links/cross reference to "block" keyword.
+ - DOC: errloc/errorloc302/errorloc303 missing status codes.
+ - BUG/MEDIUM: lua: memory leak
+ - CLEANUP: lua: remove test
+ - BUG/MINOR: hash-balance-factor isn't effective in certain circumstances
+ - BUG/MINOR: change header-declared function to static inline
+ - REORG: spoe: move spoe_encode_varint / spoe_decode_varint from spoe to common
+ - MINOR: Add binary encoding request header sample fetch
+ - MINOR: proto-http: Add sample fetch wich returns all HTTP headers
+ - MINOR: Add ModSecurity wrapper as contrib
+ - BUG/MINOR: ssl: fix warnings about methods for opensslv1.1.
+ - DOC: update RFC references
+ - CONTRIB: tcploop: add action "X" to execute a command
+ - MINOR: server: cli: Add server FQDNs to server-state file and stats socket.
+ - BUG/MINOR: contrib/mod_security: fix build on FreeBSD
+ - BUG/MINOR: checks: don't send proxy protocol with agent checks
+ - MINOR: ssl: add prefer-client-ciphers
+ - MEDIUM: ssl: revert ssl/tls version settings relative to default-server.
+ - MEDIUM: ssl: ssl_methods implementation is reworked and factored for min/max tlsxx
+ - MEDIUM: ssl: calculate the real min/max TLS version and find holes
+ - MINOR: ssl: support TLSv1.3 for bind and server
+ - MINOR: ssl: show methods supported by openssl
+ - MEDIUM: ssl: add ssl-min-ver and ssl-max-ver parameters for bind and server
+ - MEDIUM: ssl: ssl-min-ver and ssl-max-ver compatibility.
+ - CLEANUP: retire obsoleted USE_GETSOCKNAME build option
+ - BUG/MAJOR: dns: Broken kqueue events handling (BSD systems).
+ - MINOR: sample: Add b64dec sample converter
+ - BUG/MEDIUM: lua: segfault if a converter or a sample doesn't return anything
+ - MINOR: cli: add ACCESS_LVL_MASK to store the access level
+ - MINOR: cli: add 'expose-fd listeners' to pass listeners FDs
+ - MEDIUM: proxy: zombify proxies only when the expose-fd socket is bound
+ - MEDIUM: ssl: add basic support for OpenSSL crypto engine
+ - MAJOR: ssl: add openssl async mode support
+ - MEDIUM: ssl: handle multiple async engines
+ - MINOR: boringssl: basic support for OCSP Stapling
+ - MEDIUM: mworker: replace systemd mode by master worker mode
+ - MEDIUM: mworker: handle reload and signals
+ - MEDIUM: mworker: wait mode on reload failure
+ - MEDIUM: mworker: try to guess the next stats socket to use with -x
+ - MEDIUM: mworker: exit-on-failure option
+ - MEDIUM: mworker: workers exit when the master leaves
+ - DOC: add documentation for the master-worker mode
+ - MEDIUM: systemd: Type=forking in unit file
+ - MAJOR: systemd-wrapper: get rid of the wrapper
+ - MINOR: log: Add logurilen tunable.
+ - CLEANUP: server.c: missing prototype of srv_free_dns_resolution
+ - MINOR: dns: smallest DNS fqdn size
+ - MINOR: dns: functions to manage memory for a DNS resolution structure
+ - MINOR: dns: parse_server() now uses srv_alloc_dns_resolution()
+ - REORG: dns: dns_option structure, storage of hostname_dn
+ - MINOR: dns: new snr_check_ip_callback function
+ - MAJOR: dns: save a copy of the DNS response in struct resolution
+ - MINOR: dns: implement a LRU cache for DNS resolutions
+ - MINOR: dns: make 'ancount' field to match the number of saved records
+ - MINOR: dns: introduce roundrobin into the internal cache (WIP)
+ - MAJOR/REORG: dns: DNS resolution task and requester queues
+ - BUILD: ssl: fix build with OPENSSL_NO_ENGINE
+ - MINOR: Add Mod Defender integration as contrib
+ - CLEANUP: str2mask return code comment: non-zero -> zero.
+ - MINOR: tools: make debug_hexdump() use a const char for the string
+ - MINOR: tools: make debug_hexdump() take a string prefix
+ - CLEANUP: connection: remove unused CO_FL_WAIT_DATA
+
+2017/04/03 : 1.8-dev1
+ - BUG/MEDIUM: proxy: return "none" and "unknown" for unknown LB algos
+ - BUG/MINOR: stats: make field_str() return an empty string on NULL
+ - DOC: Spelling fixes
+ - BUG/MEDIUM: http: Fix tunnel mode when the CONNECT method is used
+ - BUG/MINOR: http: Keep the same behavior between 1.6 and 1.7 for tunneled txn
+ - BUG/MINOR: filters: Protect args in macros HAS_DATA_FILTERS and IS_DATA_FILTER
+ - BUG/MINOR: filters: Invert evaluation order of HTTP_XFER_BODY and XFER_DATA analyzers
+ - BUG/MINOR: http: Call XFER_DATA analyzer when HTTP txn is switched in tunnel mode
+ - BUG/MAJOR: stream: fix session abort on resource shortage
+ - OPTIM: stream-int: don't disable polling anymore on DONT_READ
+ - BUG/MINOR: cli: allow the backslash to be escaped on the CLI
+ - BUG/MEDIUM: cli: fix "show stat resolvers" and "show tls-keys"
+ - DOC: Fix map table's format
+ - DOC: Added 51Degrees conv and fetch functions to documentation.
+ - BUG/MINOR: http: don't send an extra CRLF after a Set-Cookie in a redirect
+ - DOC: mention that req_tot is for both frontends and backends
+ - BUG/MEDIUM: variables: some variable name can hide another ones
+ - MINOR: lua: Allow argument for actions
+ - BUILD: rearrange target files by build time
+ - CLEANUP: hlua: just indent functions
+ - MINOR: lua: give HAProxy variable access to the applets
+ - BUG/MINOR: stats: fix be/sessions/max output in html stats
+ - MINOR: proxy: Add fe_name/be_name fetchers next to existing fe_id/be_id
+ - DOC: lua: Documentation about some entry missing
+ - DOC: lua: Add documentation about variable manipulation from applet
+ - MINOR: Do not forward the header "Expect: 100-continue" when the option http-buffer-request is set
+ - DOC: Add undocumented argument of the trace filter
+ - DOC: Fix some typo in SPOE documentation
+ - MINOR: cli: Remove useless call to bi_putchk
+ - BUG/MINOR: cli: be sure to always warn the cli applet when input buffer is full
+ - MINOR: applet: Count number of (active) applets
+ - MINOR: task: Rename run_queue and run_queue_cur counters
+ - BUG/MEDIUM: stream: Save unprocessed events for a stream
+ - BUG/MAJOR: Fix how the list of entities waiting for a buffer is handled
+ - BUILD/MEDIUM: Fixing the build using LibreSSL
+ - BUG/MEDIUM: lua: In some case, the return of sample-fetches is ignored (2)
+ - SCRIPTS: git-show-backports: fix a harmless typo
+ - SCRIPTS: git-show-backports: add -H to use the hash of the commit message
+ - BUG/MINOR: stream-int: automatically release SI_FL_WAIT_DATA on SHUTW_NOW
+ - CLEANUP: applet/lua: create a dedicated ->fcn entry in hlua_cli context
+ - CLEANUP: applet/table: add an "action" entry in ->table context
+ - CLEANUP: applet: remove the now unused appctx->private field
+ - DOC: lua: documentation about time parser functions
+ - DOC: lua: improve links
+ - DOC: lua: section declared twice
+ - MEDIUM: cli: 'show cli sockets' list the CLI sockets
+ - BUG/MINOR: cli: "show cli sockets" wouldn't list all processes
+ - BUG/MINOR: cli: "show cli sockets" would always report process 64
+ - CLEANUP: lua: rename one of the lua appctx union
+ - BUG/MINOR: lua/cli: bad error message
+ - MEDIUM: lua: use memory pool for hlua struct in applets
+ - MINOR: lua/signals: Remove Lua part from signals.
+ - DOC: cli: show cli sockets
+ - MINOR: cli: automatically enable a CLI I/O handler when there's no parser
+ - CLEANUP: memory: remove the now unused cli_parse_show_pools() function
+ - CLEANUP: applet: group all CLI contexts together
+ - CLEANUP: stats: move a misplaced stats context initialization
+ - MINOR: cli: add two general purpose pointers and integers in the CLI struct
+ - MINOR: appctx/cli: remove the cli_socket entry from the appctx union
+ - MINOR: appctx/cli: remove the env entry from the appctx union
+ - MINOR: appctx/cli: remove the "be" entry from the appctx union
+ - MINOR: appctx/cli: remove the "dns" entry from the appctx union
+ - MINOR: appctx/cli: remove the "server_state" entry from the appctx union
+ - MINOR: appctx/cli: remove the "tlskeys" entry from the appctx union
+ - CONTRIB: tcploop: add limits.h to fix build issue with some compilers
+ - MINOR/DOC: lua: just precise one thing
+ - DOC: fix small typo in fe_id (backend instead of frontend)
+ - BUG/MINOR: Fix the sending function in Lua's cosocket
+ - BUG/MINOR: lua: memory leak executing tasks
+ - BUG/MINOR: lua: bad return code
+ - BUG/MINOR: lua: memleak when Lua/cli fails
+ - MEDIUM: lua: remove Lua struct from session, and allocate it with memory pools
+ - CLEANUP: haproxy: statify unexported functions
+ - MINOR: haproxy: add a registration for build options
+ - CLEANUP: wurfl: use the build options list to report it
+ - CLEANUP: 51d: use the build options list to report it
+ - CLEANUP: da: use the build options list to report it
+ - CLEANUP: namespaces: use the build options list to report it
+ - CLEANUP: tcp: use the build options list to report transparent modes
+ - CLEANUP: lua: use the build options list to report it
+ - CLEANUP: regex: use the build options list to report the regex type
+ - CLEANUP: ssl: use the build options list to report the SSL details
+ - CLEANUP: compression: use the build options list to report the algos
+ - CLEANUP: auth: use the build options list to report its support
+ - MINOR: haproxy: add a registration for post-check functions
+ - CLEANUP: checks: make use of the post-init registration to start checks
+ - CLEANUP: filters: use the function registration to initialize all proxies
+ - CLEANUP: wurfl: make use of the late init registration
+ - CLEANUP: 51d: make use of the late init registration
+ - CLEANUP: da: make use of the late init registration code
+ - MINOR: haproxy: add a registration for post-deinit functions
+ - CLEANUP: wurfl: register the deinit function via the dedicated list
+ - CLEANUP: 51d: register the deinitialization function
+ - CLEANUP: da: register the deinitialization function
+ - CLEANUP: wurfl: move global settings out of the global section
+ - CLEANUP: 51d: move global settings out of the global section
+ - CLEANUP: da: move global settings out of the global section
+ - MINOR: cfgparse: add two new functions to check arguments count
+ - MINOR: cfgparse: move parsing of "ca-base" and "crt-base" to ssl_sock
+ - MEDIUM: cfgparse: move all tune.ssl.* keywords to ssl_sock
+ - MEDIUM: cfgparse: move maxsslconn parsing to ssl_sock
+ - MINOR: cfgparse: move parsing of ssl-default-{bind,server}-ciphers to ssl_sock
+ - MEDIUM: cfgparse: move ssl-dh-param-file parsing to ssl_sock
+ - MEDIUM: compression: move the zlib-specific stuff from global.h to compression.c
+ - BUG/MEDIUM: ssl: properly reset the reused_sess during a forced handshake
+ - BUG/MEDIUM: ssl: avoid double free when releasing bind_confs
+ - BUG/MINOR: stats: fix be/sessions/current out in typed stats
+ - MINOR: tcp-rules: check that the listener exists before updating its counters
+ - MEDIUM: spoe: don't create a dummy listener for outgoing connections
+ - MINOR: listener: move the transport layer pointer to the bind_conf
+ - MEDIUM: move listener->frontend to bind_conf->frontend
+ - MEDIUM: ssl: remote the proxy argument from most functions
+ - MINOR: connection: add a new prepare_bind_conf() entry to xprt_ops
+ - MEDIUM: ssl_sock: implement ssl_sock_prepare_bind_conf()
+ - MINOR: connection: add a new destroy_bind_conf() entry to xprt_ops
+ - MINOR: ssl_sock: implement ssl_sock_destroy_bind_conf()
+ - MINOR: server: move the use_ssl field out of the ifdef USE_OPENSSL
+ - MINOR: connection: add a minimal transport layer registration system
+ - CLEANUP: connection: remove all direct references to raw_sock and ssl_sock
+ - CLEANUP: connection: unexport raw_sock and ssl_sock
+ - MINOR: connection: add new prepare_srv()/destroy_srv() entries to xprt_ops
+ - MINOR: ssl_sock: implement and use prepare_srv()/destroy_srv()
+ - CLEANUP: ssl: move tlskeys_finalize_config() to a post_check callback
+ - CLEANUP: ssl: move most ssl-specific global settings to ssl_sock.c
+ - BUG/MINOR: backend: nbsrv() should return 0 if backend is disabled
+ - BUG/MEDIUM: ssl: for a handshake when server-side SNI changes
+ - BUG/MINOR: systemd: potential zombie processes
+ - DOC: Add timings events schemas
+ - BUILD: lua: build failed on FreeBSD.
+ - MINOR: samples: add xx-hash functions
+ - MEDIUM: regex: pcre2 support
+ - BUG/MINOR: option prefer-last-server must be ignored in some case
+ - MINOR: stats: Support "select all" for backend actions
+ - BUG/MINOR: sample-fetches/stick-tables: bad type for the sample fetches sc*_get_gpt0
+ - BUG/MAJOR: channel: Fix the definition order of channel analyzers
+ - BUG/MINOR: http: report real parser state in error captures
+ - BUILD: scripts: automatically update the branch in version.h when releasing
+ - MINOR: tools: add a generic hexdump function for debugging
+ - BUG/MAJOR: http: fix risk of getting invalid reports of bad requests
+ - MINOR: http: custom status reason.
+ - MINOR: connection: add sample fetch "fc_rcvd_proxy"
+ - BUG/MINOR: config: emit a warning if http-reuse is enabled with incompatible options
+ - BUG/MINOR: tools: fix off-by-one in port size check
+ - BUG/MEDIUM: server: consider AF_UNSPEC as a valid address family
+ - MEDIUM: server: split the address and the port into two different fields
+ - MINOR: tools: make str2sa_range() return the port in a separate argument
+ - MINOR: server: take the destination port from the port field, not the addr
+ - MEDIUM: server: disable protocol validations when the server doesn't resolve
+ - BUG/MEDIUM: tools: do not force an unresolved address to AF_INET:0.0.0.0
+ - BUG/MINOR: ssl: EVP_PKEY must be freed after X509_get_pubkey usage
+ - BUG/MINOR: ssl: assert on SSL_set_shutdown with BoringSSL
+ - MINOR: Use "500 Internal Server Error" for 500 error/status code message.
+ - MINOR: proto_http.c 502 error txt typo.
+ - DOC: add deprecation notice to "block"
+ - MINOR: compression: fix -vv output without zlib/slz
+ - BUG/MINOR: Reset errno variable before calling strtol(3)
+ - MINOR: ssl: don't show prefer-server-ciphers output
+ - OPTIM/MINOR: config: Optimize fullconn automatic computation loading configuration
+ - BUG/MINOR: stream: Fix how backend-specific analyzers are set on a stream
+ - MAJOR: ssl: bind configuration per certificat
+ - MINOR: ssl: add curve suite for ECDHE negotiation
+ - MINOR: checks: Add agent-addr config directive
+ - MINOR: cli: Add possiblity to change agent config via CLI/socket
+ - MINOR: doc: Add docs for agent-addr configuration variable
+ - MINOR: doc: Add docs for agent-addr and agent-send CLI commands
+ - BUILD: ssl: fix to build (again) with boringssl
+ - BUILD: ssl: fix build on OpenSSL 1.0.0
+ - BUILD: ssl: silence a warning reported for ERR_remove_state()
+ - BUILD: ssl: eliminate warning with OpenSSL 1.1.0 regarding RAND_pseudo_bytes()
+ - BUILD: ssl: kill a build warning introduced by BoringSSL compatibility
+ - BUG/MEDIUM: tcp: don't poll for write when connect() succeeds
+ - BUG/MINOR: unix: fix connect's polling in case no data are scheduled
+ - MINOR: server: extend the flags to 32 bits
+ - BUG/MINOR: lua: Map.end are not reliable because "end" is a reserved keyword
+ - MINOR: dns: give ability to dns_init_resolvers() to close a socket when requested
+ - BUG/MAJOR: dns: restart sockets after fork()
+ - MINOR: chunks: implement a simple dynamic allocator for trash buffers
+ - BUG/MEDIUM: http: prevent redirect from overwriting a buffer
+ - BUG/MEDIUM: filters: Do not truncate HTTP response when body length is undefined
+ - BUG/MEDIUM: http: Prevent replace-header from overwriting a buffer
+ - BUG/MINOR: http: Return an error when a replace-header rule failed on the response
+ - BUG/MINOR: sendmail: The return of vsnprintf is not cleanly tested
+ - BUG/MAJOR: ssl: fix a regression in ssl_sock_shutw()
+ - BUG/MAJOR: lua segmentation fault when the request is like 'GET ?arg=val HTTP/1.1'
+ - BUG/MEDIUM: config: reject anything but "if" or "unless" after a use-backend rule
+ - MINOR: http: don't close when redirect location doesn't start with "/"
+ - MEDIUM: boringssl: support native multi-cert selection without bundling
+ - BUG/MEDIUM: ssl: fix verify/ca-file per certificate
+ - BUG/MEDIUM: ssl: switchctx should not return SSL_TLSEXT_ERR_ALERT_WARNING
+ - MINOR: ssl: removes SSL_CTX_set_ssl_version call and cleanup CTX creation.
+ - BUILD: ssl: fix build with -DOPENSSL_NO_DH
+ - MEDIUM: ssl: add new sample-fetch which captures the cipherlist
+ - MEDIUM: ssl: remove ssl-options from crt-list
+ - BUG/MEDIUM: ssl: in bind line, ssl-options after 'crt' are ignored.
+ - BUG/MINOR: ssl: fix cipherlist captures with sustainable SSL calls
+ - MINOR: ssl: improved cipherlist captures
+ - BUG/MINOR: spoe: Fix soft stop handler using a specific id for spoe filters
+ - BUG/MINOR: spoe: Fix parsing of arguments in spoe-message section
+ - MAJOR: spoe: Add support of pipelined and asynchronous exchanges with agents
+ - MINOR: spoe: Add support for pipelining/async capabilities in the SPOA example
+ - MINOR: spoe: Remove SPOE details from the appctx structure
+ - MINOR: spoe: Add status code in error variable instead of hardcoded value
+ - MINOR: spoe: Send a log message when an error occurred during event processing
+ - MINOR: spoe: Check the scope of sample fetches used in SPOE messages
+ - MEDIUM: spoe: Be sure to wakeup the good entity waiting for a buffer
+ - MINOR: spoe: Use the min of all known max_frame_size to encode messages
+ - MAJOR: spoe: Add support of payload fragmentation in NOTIFY frames
+ - MINOR: spoe: Add support for fragmentation capability in the SPOA example
+ - MAJOR: spoe: refactor the filter to clean up the code
+ - MINOR: spoe: Handle NOTIFY frames cancellation using ABORT bit in ACK frames
+ - REORG: spoe: Move struct and enum definitions in dedicated header file
+ - REORG: spoe: Move low-level encoding/decoding functions in dedicated header file
+ - MINOR: spoe: Improve implementation of the payload fragmentation
+ - MINOR: spoe: Add support of negation for options in SPOE configuration file
+ - MINOR: spoe: Add "pipelining" and "async" options in spoe-agent section
+ - MINOR: spoe: Rely on alertif_too_many_arg during configuration parsing
+ - MINOR: spoe: Add "send-frag-payload" option in spoe-agent section
+ - MINOR: spoe: Add "max-frame-size" statement in spoe-agent section
+ - DOC: spoe: Update SPOE documentation to reflect recent changes
+ - MINOR: config: warn when some HTTP rules are used in a TCP proxy
+ - BUG/MEDIUM: ssl: Clear OpenSSL error stack after trying to parse OCSP file
+ - BUG/MEDIUM: cli: Prevent double free in CLI ACL lookup
+ - BUG/MINOR: Fix "get map <map> <value>" CLI command
+ - MINOR: Add nbsrv sample converter
+ - CLEANUP: Replace repeated code to count usable servers with be_usable_srv()
+ - MINOR: Add hostname sample fetch
+ - CLEANUP: Remove comment that's no longer valid
+ - MEDIUM: http_error_message: txn->status / http_get_status_idx.
+ - MINOR: http-request tarpit deny_status.
+ - CLEANUP: http: make http_server_error() not set the status anymore
+ - MEDIUM: stats: Add JSON output option to show (info|stat)
+ - MEDIUM: stats: Add show json schema
+ - BUG/MAJOR: connection: update CO_FL_CONNECTED before calling the data layer
+ - MINOR: server: Add dynamic session cookies.
+ - MINOR: cli: Let configure the dynamic cookies from the cli.
+ - BUG/MINOR: checks: attempt clean shutw for SSL check
+ - CONTRIB: tcploop: make it build on FreeBSD
+ - CONTRIB: tcploop: fix time format to silence build warnings
+ - CONTRIB: tcploop: report action 'K' (kill) in usage message
+ - CONTRIB: tcploop: fix connect's address length
+ - CONTRIB: tcploop: use the trash instead of NULL for recv()
+ - BUG/MEDIUM: listener: do not try to rebind another process' socket
+ - BUG/MEDIUM server: Fix crash when dynamic is defined, but not key is provided.
+ - CLEANUP: config: Typo in comment.
+ - BUG/MEDIUM: filters: Fix channels synchronization in flt_end_analyze
+ - TESTS: add a test configuration to stress handshake combinations
+ - BUG/MAJOR: stream-int: do not depend on connection flags to detect connection
+ - BUG/MEDIUM: connection: ensure to always report the end of handshakes
+ - MEDIUM: connection: don't test for CO_FL_WAKE_DATA
+ - CLEANUP: connection: completely remove CO_FL_WAKE_DATA
+ - BUG: payload: fix payload not retrieving arbitrary lengths
+ - BUILD: ssl: simplify SSL_CTX_set_ecdh_auto compatibility
+ - BUILD: ssl: fix OPENSSL_NO_SSL_TRACE for boringssl and libressl
+ - BUG/MAJOR: http: fix typo in http_apply_redirect_rule
+ - MINOR: doc: 2.4. Examples should be 2.5. Examples
+ - BUG/MEDIUM: stream: fix client-fin/server-fin handling
+ - MINOR: fd: add a new flag HAP_POLL_F_RDHUP to struct poller
+ - BUG/MINOR: raw_sock: always perfom the last recv if RDHUP is not available
+ - OPTIM: poll: enable support for POLLRDHUP
+ - MINOR: kqueue: exclusively rely on the kqueue returned status
+ - MEDIUM: kqueue: take care of EV_EOF to improve polling status accuracy
+ - MEDIUM: kqueue: only set FD_POLL_IN when there are pending data
+ - DOC/MINOR: Fix typos in proxy protocol doc
+ - DOC: Protocol doc: add checksum, TLV type ranges
+ - DOC: Protocol doc: add SSL TLVs, rename CHECKSUM
+ - DOC: Protocol doc: add noop TLV
+ - MEDIUM: global: add a 'hard-stop-after' option to cap the soft-stop time
+ - MINOR: dns: improve DNS response parsing to use as many available records as possible
+ - BUG/MINOR: cfgparse: loop in tracked servers lists not detected by check_config_validity().
+ - MINOR: server: irrelevant error message with 'default-server' config file keyword.
+ - MINOR: server: Make 'default-server' support 'backup' keyword.
+ - MINOR: server: Make 'default-server' support 'check-send-proxy' keyword.
+ - CLEANUP: server: code alignement.
+ - MINOR: server: Make 'default-server' support 'non-stick' keyword.
+ - MINOR: server: Make 'default-server' support 'send-proxy' and 'send-proxy-v2 keywords.
+ - MINOR: server: Make 'default-server' support 'check-ssl' keyword.
+ - MINOR: server: Make 'default-server' support 'force-sslv3' and 'force-tlsv1[0-2]' keywords.
+ - CLEANUP: server: code alignement.
+ - MINOR: server: Make 'default-server' support 'no-ssl*' and 'no-tlsv*' keywords.
+ - MINOR: server: Make 'default-server' support 'ssl' keyword.
+ - MINOR: server: Make 'default-server' support 'send-proxy-v2-ssl*' keywords.
+ - CLEANUP: server: code alignement.
+ - MINOR: server: Make 'default-server' support 'verify' keyword.
+ - MINOR: server: Make 'default-server' support 'verifyhost' setting.
+ - MINOR: server: Make 'default-server' support 'check' keyword.
+ - MINOR: server: Make 'default-server' support 'track' setting.
+ - MINOR: server: Make 'default-server' support 'ca-file', 'crl-file' and 'crt' settings.
+ - MINOR: server: Make 'default-server' support 'redir' keyword.
+ - MINOR: server: Make 'default-server' support 'observe' keyword.
+ - MINOR: server: Make 'default-server' support 'cookie' keyword.
+ - MINOR: server: Make 'default-server' support 'ciphers' keyword.
+ - MINOR: server: Make 'default-server' support 'tcp-ut' keyword.
+ - MINOR: server: Make 'default-server' support 'namespace' keyword.
+ - MINOR: server: Make 'default-server' support 'source' keyword.
+ - MINOR: server: Make 'default-server' support 'sni' keyword.
+ - MINOR: server: Make 'default-server' support 'addr' keyword.
+ - MINOR: server: Make 'default-server' support 'disabled' keyword.
+ - MINOR: server: Add 'no-agent-check' server keyword.
+ - DOC: server: Add docs for "server" and "default-server" new "no-*" and other settings.
+ - MINOR: doc: fix use-server example (imap vs mail)
+ - BUG/MEDIUM: tcp: don't require privileges to bind to device
+ - BUILD: make the release script use shortlog for the final changelog
+ - BUILD: scripts: fix typo in announce-release error message
+ - CLEANUP: time: curr_sec_ms doesn't need to be exported
+ - BUG/MEDIUM: server: Wrong server default CRT filenames initialization.
+ - BUG/MEDIUM: peers: fix buffer overflow control in intdecode.
+ - BUG/MEDIUM: buffers: Fix how input/output data are injected into buffers
+ - BUG/MINOR: http: Fix conditions to clean up a txn and to handle the next request
+ - CLEANUP: http: Remove channel_congested function
+ - CLEANUP: buffers: Remove buffer_bounce_realign function
+ - CLEANUP: buffers: Remove buffer_contig_area and buffer_work_area functions
+ - MINOR: http: remove useless check on HTTP_MSGF_XFER_LEN for the request
+ - MINOR: http: Add debug messages when HTTP body analyzers are called
+ - BUG/MEDIUM: http: Fix blocked HTTP/1.0 responses when compression is enabled
+ - BUG/MINOR: filters: Don't force the stream's wakeup when we wait in flt_end_analyze
+ - DOC: fix parenthesis and add missing "Example" tags
+ - DOC: update the contributing file
+ - DOC: log-format/tcplog/httplog update
+ - MINOR: config parsing: add warning when log-format/tcplog/httplog is overriden in "defaults" sections
+
+2016/11/25 : 1.8-dev0
+
+2016/11/25 : 1.7.0
+ - SCRIPTS: make publish-release also copy the new SPOE doc
+ - BUILD: http: include types/sample.h in proto_http.h
+ - BUILD: debug/flags: remove test for SF_COMP_READY
+ - CONTRIB: debug/flags: add check for SF_ERR_CHK_PORT
+ - MINOR: lua: add function which return true if the channel is full.
+ - MINOR: lua: add ip addresses and network manipulation function
+ - CONTRIB: tcploop: scriptable TCP I/O for debugging purposes
+ - CONTRIB: tcploop: implement fork()
+ - CONTRIB: tcploop: implement logging when called with -v
+ - CONTRIB: tcploop: update the usage output
+ - CONTRIB: tcploop: support sending plain strings
+ - CONTRIB: tcploop: don't report failed send() or recv()
+ - CONTRIB: tcploop: add basic loops via a jump instruction
+ - BUG/MEDIUM: channel: bad unlikely macro
+ - CLEANUP: lua: move comment
+ - CLEANUP: lua: control executed twice
+ - BUG/MEDIUM: ssl: Store certificate filename in a variable
+ - BUG/MINOR: ssl: Print correct filename when error occurs reading OCSP
+ - CLEANUP: ssl: Remove goto after return dead code
+ - CLEANUP: ssl: Fix bind keywords name in comments
+ - DOC: ssl: Use correct wording for ca-sign-pass
+ - CLEANUP: lua: avoid directly calling getsockname/getpeername()
+ - BUG/MINOR: stick-table: handle out-of-memory condition gracefully
+ - MINOR: cli: add private pointer and release function
+ - MEDIUM: lua: Add cli handler for Lua
+ - BUG/MEDIUM: connection: check the control layer before stopping polling
+ - DEBUG: connection: mark the closed FDs with a value that is easier to detect
+ - BUG/MEDIUM: stick-table: fix regression caused by recent fix for out-of-memory
+ - BUG/MINOR: cli: properly decrement ref count on tables during failed dumps
+ - BUG/MEDIUM: lua: In some case, the return of sample-fetche is ignored
+ - MINOR: filters: Add check_timeouts callback to handle timers expiration on streams
+ - MINOR: spoe: Add 'timeout processing' option to limit time to process an event
+ - MINOR: spoe: Remove useless 'timeout ack' option
+ - MINOR: spoe: Add 'option continue-on-error' statement in spoe-agent section
+ - MINOR: spoe: Add "maxconnrate" and "maxerrrate" statements
+ - MINOR: spoe: Add "option set-on-error" statement
+ - MINOR: stats: correct documentation of process ID for typed output
+ - BUILD: contrib: fix ip6range build on Centos 7
+ - BUILD: fix build on Solaris 10/11
+ - BUG/MINOR: cli: fix pointer size when reporting data/transport layer name
+ - BUG/MINOR: cli: dequeue from the proxy when changing a maxconn
+ - BUG/MINOR: cli: wake up the CLI's task after a timeout update
+ - MINOR: connection: add a few functions to report the data and xprt layers' names
+ - MINOR: connection: add names for transport and data layers
+ - REORG: cli: split dumpstats.c in src/cli.c and src/stats.c
+ - REORG: cli: split dumpstats.h in stats.h and cli.h
+ - REORG: cli: move ssl CLI functions to ssl_sock.c
+ - REORG: cli: move map and acl code to map.c
+ - REORG: cli: move show stat resolvers to dns.c
+ - MINOR: cli: create new function cli_has_level() to validate permissions
+ - MINOR: server: create new function cli_find_server() to find a server
+ - MINOR: proxy: create new function cli_find_frontend() to find a frontend
+ - REORG: cli: move 'set server' to server.c
+ - REORG: cli: move 'show pools' to memory.c
+ - REORG: cli: move 'show servers' to proxy.c
+ - REORG: cli: move 'show sess' to stream.c
+ - REORG: cli: move 'show backend' to proxy.c
+ - REORG: cli: move get/set weight to server.c
+ - REORG: cli: move "show stat" to stats.c
+ - REORG: cli: move "show info" to stats.c
+ - REORG: cli: move dump_text(), dump_text_line(), and dump_binary() to standard.c
+ - REORG: cli: move table dump/clear/set to stick_table.c
+ - REORG: cli: move "show errors" out of cli.c
+ - REORG: cli: make "show env" also use the generic keyword registration
+ - REORG: cli: move "set timeout" to its own handler
+ - REORG: cli: move "clear counters" to stats.c
+ - REORG: cli: move "set maxconn global" to its own handler
+ - REORG: cli: move "set maxconn server" to server.c
+ - REORG: cli: move "set maxconn frontend" to proxy.c
+ - REORG: cli: move "shutdown sessions server" to stream.c
+ - REORG: cli: move "shutdown session" to stream.c
+ - REORG: cli: move "shutdown frontend" to proxy.c
+ - REORG: cli: move "{enable|disable} frontend" to proxy.c
+ - REORG: cli: move "{enable|disable} server" to server.c
+ - REORG: cli: move "{enable|disable} health" to server.c
+ - REORG: cli: move "{enable|disable} agent" to server.c
+ - REORG: cli: move the "set rate-limit" functions to their own parser
+ - CLEANUP: cli: rename STAT_CLI_* to CLI_ST_*
+ - CLEANUP: cli: simplify the request parser a little bit
+ - CLEANUP: cli: remove assignments to st0 and st2 in keyword parsers
+ - BUILD: server: remove a build warning introduced by latest series
+ - BUG/MINOR: log-format: uncatched memory allocation functions
+ - CLEANUP: log-format: useless file and line in json converter
+ - CLEANUP/MINOR: log-format: unexport functions parse_logformat_var_args() and parse_logformat_var()
+ - CLEANUP: log-format: fix return code of the function parse_logformat_var()
+ - CLEANUP: log-format: fix return code of function parse_logformat_var_args()
+ - CLEANUP: log-format: remove unused arguments
+ - MEDIUM: log-format: strict parsing and enable fail
+ - MEDIUM: log-format/conf: take into account the parse_logformat_string() return code
+ - BUILD: ssl: make the SSL layer build again with openssl 0.9.8
+ - BUILD: vars: remove a build warning on vars.c
+ - MINOR: lua: add utility function for check boolean argument
+ - MINOR: lua: Add tokenize function.
+ - BUG/MINOR: conf: calloc untested
+ - MINOR: http/conf: store the use_backend configuration file and line for logs
+ - MEDIUM: log-format: Use standard HAProxy log system to report errors
+ - CLEANUP: sample: report "converter" instead of "conv method" in error messages
+ - BUG: spoe: Fix parsing of SPOE actions in ACK frames
+ - MINOR: cli: make "show stat" support a proxy name
+ - MINOR: cli: make "show errors" support a proxy name
+ - MINOR: cli: make "show errors" capable of dumping only request or response
+ - BUG/MINOR: freq-ctr: make swrate_add() support larger values
+ - CLEANUP: counters: move from 3 types to 2 types
+ - CLEANUP: cfgparse: cascade the warnif_misplaced_* rules
+ - REORG: tcp-rules: move tcp rules processing to their own file
+ - REORG: stkctr: move all the stick counters processing to stick-tables.c
+ - DOC: update the roadmap file with the latest changes
+
+2016/11/09 : 1.7-dev6
+ - DOC: fix the entry for hash-balance-factor config option
+ - DOC: Fix typo in description of `-st` parameter in man page
+ - CLEANUP: cfgparse: Very minor spelling correction
+ - MINOR: examples: Update haproxy.spec URLs to haproxy.org
+ - BUG/MEDIUM: peers: on shutdown, wake up the appctx, not the stream
+ - BUG/MEDIUM: peers: fix use after free in peer_session_create()
+ - MINOR: peers: make peer_session_forceshutdown() use the appctx and not the stream
+ - MINOR: peers: remove the pointer to the stream
+ - BUG/MEDIUM: systemd-wrapper: return correct exit codes
+ - DOC: stats: provide state details for show servers state
+ - MEDIUM: tools: make str2ip2() preserve existing ports
+ - CLEANUP: tools: make ipcpy() preserve the original port
+ - OPTIM: http: move all http character classs tables into a single one
+ - OPTIM: http: improve parsing performance of long header lines
+ - OPTIM: http: improve parsing performance of long URIs
+ - OPTIM: http: optimize lookup of comma and quote in header values
+ - BUG/MEDIUM: srv-state: properly restore the DRAIN state
+ - BUG/MINOR: srv-state: allow to have both CMAINT and FDRAIN flags
+ - MINOR: server: do not emit warnings/logs/alerts on server state changes at boot
+ - BUG/MEDIUM: servers: properly propagate the maintenance states during startup
+ - MEDIUM: wurfl: add Scientiamobile WURFL device detection module
+ - DOC: move the device detection modules documentation to their own files
+ - CLEANUP: wurfl: reduce exposure in the rest of the code
+ - MEDIUM: ssl: Add support for OpenSSL 1.1.0
+ - MINOR: stream: make option contstats usable again
+ - MEDIUM: tools: make str2sa_range() return the FQDN even when not resolving
+ - MINOR: init: move apply_server_state in haproxy.c before MODE_CHECK
+ - MAJOR: server: postpone address resolution
+ - MINOR: new srv_admin flag: SRV_ADMF_RMAINT
+ - MINOR: server: indicate in the logs when RMAINT is cleared
+ - MINOR: stats: indicate it when a server is down due to resolution
+ - MINOR: server: make srv_set_admin_state() capable of telling why this happens
+ - MINOR: dns: implement extra 'hold' timers.
+ - MAJOR: dns: runtime resolution can change server admin state
+ - MEDIUM: cli: leave the RMAINT state when setting an IP address on the CLI
+ - MEDIUM: server: add a new init-addr server line setting
+ - MEDIUM: server: make use of init-addr
+ - MINOR: server: implement init-addr none
+ - MEDIUM: server: make libc resolution failure non-fatal
+ - MINOR: server: add support for explicit numeric address in init-addr
+ - DOC: add some documentation for the "init-addr" server keyword
+ - MINOR: init: add -dr to ignore server address resolution failures
+ - MEDIUM: server: do not restrict anymore usage of IP address from the state file
+ - BUG: vars: Fix 'set-var' converter because of a typo
+ - CLEANUP: remove last references to 'ruleset' section
+ - MEDIUM: filters: Add attch/detach and stream_set_backend callbacks
+ - MINOR: filters: Update filters documentation accordingly to recent changes
+ - MINOR: filters: Call stream_set_backend callbacks before updating backend stats
+ - MINOR: filters: Remove backend filters attached to a stream only for HTTP streams
+ - MINOR: flt_trace: Add hexdump option to dump forwarded data
+ - MINOR: cfgparse: Add functions to backup and restore registered sections
+ - MINOR: cfgparse: Parse scope lines and save the last one parsed
+ - REORG: sample: move code to release a sample expression in sample.c
+ - MINOR: vars: Allow '.' in variable names
+ - MINOR: vars: Add vars_set_by_name_ifexist function
+ - MEDIUM: vars: Add a per-process scope for variables
+ - MINOR: vars: Add 'unset-var' action/converter
+ - MAJOR: spoe: Add an experimental Stream Processing Offload Engine
+ - MINOR: spoe: add random ip-reputation service as SPOA example
+ - MINOR: spoe/checks: Add support for SPOP health checks
+ - DOC: update ROADMAP file
+
+2016/10/25 : 1.7-dev5
+ - MINOR: cfgparse: few memory leaks fixes.
+ - MEDIUM: log: Decompose %Tq in %Th %Ti %TR
+ - CLEANUP: logs: remove unused log format field definitions
+ - BUILD/MAJOR:updated 51d Trie implementation to incorperate latest update to 51Degrees.c
+ - BUG/MAJOR: stream: properly mark the server address as unset on connect retry
+ - CLEANUP: proto_http: Removing useless variable assignation
+ - CLEANUP: dumpstats: Removing useless variables allocation
+ - CLEANUP: dns: Removing usless variable & assignation
+ - BUG/MINOR: payload: fix SSLv2 version parser
+ - MINOR: cli: allow the semi-colon to be escaped on the CLI
+ - MINOR: cli: change a server health check port through the stats socket
+ - BUG/MINOR: Fix OSX compilation errors
+ - MAJOR: check: find out which port to use for health check at run time
+ - MINOR: server: introduction of 3 new server flags
+ - MINOR: new update_server_addr_port() function to change both server's ADDR and service PORT
+ - MINOR: cli: ability to change a server's port
+ - CLEANUP/MINOR dns: comment do not follow up code update
+ - MINOR: chunk: new strncat function
+ - MINOR: dns: wrong DNS_MAX_UDP_MESSAGE value
+ - MINOR: dns: new MAX values
+ - MINOR: dns: new macro to compute DNS header size
+ - MINOR: dns: new DNS structures to store received packets
+ - MEDIUM: dns: new DNS response parser
+ - MINOR: dns: query type change when last record is a CNAME
+ - MINOR: dns: proper domain name validation when receiving DNS response
+ - MINOR: dns: comments in types/dns.h about structures endianness
+ - BUG/MINOR: displayed PCRE version is running release
+ - MINOR: show Built with PCRE version
+ - MINOR: show Running on zlib version
+ - MEDIUM: make SO_REUSEPORT configurable
+ - MINOR: enable IP_BIND_ADDRESS_NO_PORT on backend connections
+ - BUG/MEDIUM: http/compression: Fix how chunked data are copied during the HTTP body parsing
+ - BUG/MINOR: stats: report the correct conn_time in backend's html output
+ - BUG/MEDIUM: dns: don't randomly crash on out-of-memory
+ - MINOR: Add fe_req_rate sample fetch
+ - MEDIUM: peers: Fix a peer stick-tables synchronization issue.
+ - MEDIUM: cli: register CLI keywords with cli_register_kw()
+ - BUILD: Make use of accept4() on OpenBSD.
+ - MINOR: tcp: make set-src/set-src-port and set-dst/set-dst-port commutative
+ - DOC: fix missed entry for "set-{src,dst}{,-port}"
+ - BUG/MINOR: vars: use sess and not s->sess in action_store()
+ - BUG/MINOR: vars: make smp_fetch_var() more robust against misuses
+ - BUG/MINOR: vars: smp_fetch_var() doesn't depend on HTTP but on the session
+ - MINOR: stats: output dcon
+ - CLEANUP: tcp rules: mention everywhere that tcp-conn rules are L4
+ - MINOR: counters: add new fields for denied_sess
+ - MEDIUM: tcp: add registration and processing of TCP L5 rules
+ - MINOR: stats: emit dses
+ - DOC: document tcp-request session
+ - MINOR: ssl: add debug traces
+ - BUILD/CLEANUP: ssl: Check BIO_reset() return code
+ - BUG/MINOR: ssl: Check malloc return code
+ - BUG/MINOR: ssl: prevent multiple entries for the same certificate
+ - BUG/MINOR: systemd: make the wrapper return a non-null status code on error
+ - BUG/MINOR: systemd: always restore signals before execve()
+ - BUG/MINOR: systemd: check return value of calloc()
+ - MINOR: systemd: report it when execve() fails
+ - BUG/MEDIUM: systemd: let the wrapper know that haproxy has completed or failed
+ - MINOR: proxy: add 'served' field to proxy, equal to total of all servers'
+ - MINOR: backend: add hash-balance-factor option for hash-type consistent
+ - MINOR: server: compute a "cumulative weight" to allow chash balancing to hit its target
+ - MEDIUM: server: Implement bounded-load hash algorithm
+ - SCRIPTS: make git-show-backports also dump a "git show" command
+ - MINOR: build: Allow linking to device-atlas library file
+ - MINOR: stats: Escape equals sign on socket dump
+
+2016/08/14 : 1.7-dev4
+ - MINOR: add list_append_word function
+ - MEDIUM: init: use list_append_word in haproxy.c
+ - MEDIUM: init: allow directory as argument of -f
+ - CLEANUP: config: detect double registration of a config section
+ - MINOR: log: add the %Td log-format specifier
+ - MEDIUM: filters: Move HTTP headers filtering in its own callback
+ - MINOR: filters: Simplify calls to analyzers using 2 new macros
+ - MEDIUM: filters: Add pre and post analyzer callbacks
+ - DOC: filters: Update the filters documentation accordingly to recent changes
+ - BUG/MEDIUM: init: don't use environment locale
+ - SCRIPTS: teach git-show-backports how to report upstream commits
+ - SCRIPTS: make git-show-backports capable of limiting its history
+ - BUG/MAJOR: fix listening IP address storage for frontends
+ - BUG/MINOR: fix listening IP address storage for frontends (cont)
+ - DOC: Fix typo so fetch is properly parsed by Cyril's converter
+ - BUG/MAJOR: http: fix breakage of "reqdeny" causing random crashes
+ - BUG/MEDIUM: stick-tables: fix breakage in table converters
+ - MINOR: stick-table: change all stick-table converters' inputs to SMP_T_ANY
+ - BUG/MEDIUM: dns: unbreak DNS resolver after header fix
+ - BUILD: fix build on Solaris 11
+ - BUG/MEDIUM: config: fix multiple declaration of section parsers
+ - BUG/MEDIUM: stats: show servers state may show an servers from another backend
+ - BUG/MEDIUM: fix risk of segfault with "show tls-keys"
+ - MEDIUM: dumpstats: 'show tls-keys' is now able to show secrets
+ - DOC: update doc about tls-tickets-keys dump
+ - MEDIUM: tcp: add 'set-src' to 'tcp-request connection'
+ - MINOR: set the CO_FL_ADDR_FROM_SET flags with 'set-src'
+ - MEDIUM: tcp/http: add 'set-src-port' action
+ - MEDIUM: tcp/http: new set-dst/set-dst-port actions
+ - BUG/MEDIUM: sticktables: segfault in some configuration error cases
+ - BUILD/MEDIUM: rebuild everything when an include file is changed
+ - BUILD/MEDIUM: force a full rebuild if some build options change
+ - BUG/MEDIUM: lua: converters doesn't work
+ - BUG/MINOR: http: add-header: header name copied twice
+ - BUG/MEDIUM: http: add-header: buffer overwritten
+ - BUG/MINOR: ssl: fix potential memory leak in ssl_sock_load_dh_params()
+ - MINOR: stream: export the function 'smp_create_src_stkctr'
+ - BUG/MEDIUM: dumpstats: undefined behavior in stats_tlskeys_list()
+ - MEDIUM: dumpstats: make stats_tlskeys_list() yield-aware during tls-keys dump
+ - BUG/MINOR: http: url32+src should use the big endian version of url32
+ - BUG/MINOR: http: url32+src should check cli_conn before using it
+ - DOC: http: add documentation for url32 and url32+src
+ - BUG/MINOR: fix http-response set-log-level parsing error
+ - MINOR: systemd: Use variable for config and pidfile paths
+ - MINOR: systemd: Perform sanity check on config before reload
+ - MEDIUM: ssl: support SNI filters with multicerts
+ - MINOR: ssl: crt-list parsing factor
+ - BUILD: ssl: fix typo causing a build failure in the multicert patch
+ - MINOR: listener: add the "accept-netscaler-cip" option to the "bind" keyword
+ - MINOR: tcp: add "tcp-request connection expect-netscaler-cip layer4"
+ - BUG/MINOR: init: always ensure that global.rlimit_nofile matches actual limits
+ - BUG/MINOR: init: ensure that FD limit is raised to the max allowed
+ - BUG/MEDIUM: external-checks: close all FDs right after the fork()
+ - BUG/MAJOR: external-checks: use asynchronous signal delivery
+ - BUG/MINOR: external-checks: do not unblock undesired signals
+ - CLEANUP: external-check: don't block/unblock SIGCHLD when manipulating the list
+ - BUG/MEDIUM: filters: Fix data filtering when data are modified
+ - BUG/MINOR: filters: Fix HTTP parsing when a filter loops on data forwarding
+ - BUG/MINOR: srv-state: fix incorrect output of state file
+ - BUG/MINOR: ssl: close ssl key file on error
+ - BUG/MINOR: http: fix misleading error message for response captures
+ - BUG/BUILD: don't automatically run "make" on "make install"
+ - DOC: add missing doc for http-request deny [deny_status <status>]
+ - CLEANUP: dumpstats: u64 field is an unsigned type.
+ - BUG/MEDIUM: http: unbreak uri/header/url_param hashing
+ - BUG/MINOR: Rework slightly commit 9962f8fc to clean code and avoid mistakes
+ - MINOR: new function my_realloc2 = realloc + free upon failure
+ - CLEANUP: fixed some usages of realloc leading to memory leak
+ - Revert "BUG/MINOR: ssl: fix potential memory leak in ssl_sock_load_dh_params()"
+ - CLEANUP: connection: using internal struct to hold source and dest port.
+ - DOC: spelling fixes
+ - BUG/MINOR: ssl: fix potential memory leak in ssl_sock_load_dh_params()
+ - BUG/MEDIUM: dns: fix alignment issues in the DNS response parser
+ - BUG/MINOR: Fix endiness issue in DNS header creation code
+ - BUG/MEDIUM: lua: the function txn_done() from sample fetches can crash
+ - BUG/MEDIUM: lua: the function txn_done() from action wrapper can crash
+ - MEDIUM: http: implement http-response track-sc* directive
+ - BUG/MINOR: peers: Fix peers data decoding issue
+ - BUG/MINOR: peers: don't count track-sc multiple times on errors
+ - MINOR: standard: add function "escape_string"
+ - BUG/MEDIUM: log: use function "escape_string" instead of "escape_chunk"
+ - MINOR: tcp: Return TCP statistics like RTT and RTT variance
+ - DOC: lua: remove old functions
+ - BUG/MEDIUM: lua: somme HTTP manipulation functions are called without valid requests
+ - DOC: fix json converter example and error message
+ - BUG/MEDIUM: stream-int: completely detach connection on connect error
+ - DOC: minor typo fixes to improve HTML parsing by haproxy-dconv
+ - BUILD: make proto_tcp.c compatible with musl library
+ - BUG/MAJOR: compression: initialize avail_in/next_in even during flush
+ - BUG/MEDIUM: samples: make smp_dup() always duplicate the sample
+ - MINOR: sample: implement smp_is_safe() and smp_make_safe()
+ - MINOR: sample: provide smp_is_rw() and smp_make_rw()
+ - BUG/MAJOR: server: the "sni" directive could randomly cause trouble
+ - BUG/MEDIUM: stick-tables: do not fail on string keys with no allocated size
+ - BUG/MEDIUM: stick-table: properly convert binary samples to keys
+ - MINOR: sample: use smp_make_rw() in upper/lower converters
+ - MINOR: tcp: add dst_is_local and src_is_local
+ - BUG/MINOR: peers: some updates are pushed twice after a resync.
+ - BUILD: protocol: fix some build errors on OpenBSD
+ - BUILD: log: iovec requires to include sys/uio.h on OpenBSD
+ - BUILD: tcp: do not include netinet/ip.h for IP_TTL
+ - BUILD: connection: fix build breakage on openbsd due to missing in_systm.h
+ - BUILD: checks: remove the last strcat and eliminate a warning on OpenBSD
+ - BUILD: tcp: define SOL_TCP when only IPPROTO_TCP exists
+ - BUILD: compression: remove a warning when no compression lib is used
+ - BUILD: poll: remove unused hap_fd_isset() which causes a warning with clang
+ - MINOR: tcp: add further tcp info fetchers
+ - BUG/MINOR: peers: empty chunks after a resync.
+ - BUG/MAJOR: stick-counters: possible crash when using sc_trackers with wrong table
+ - MINOR: standard.c: ipcmp() function to compare 2 IP addresses stored in 2 struct sockaddr_storage
+ - MINOR: standard.c: ipcpy() function to copy an IP address from a struct sockaddr_storage into an other one
+ - MAJOR: listen section: don't use first bind port anymore when no server ports are provided
+
+2016/05/10 : 1.7-dev3
+ - MINOR: sample: Moves ARGS underlying type from 32 to 64 bits.
+ - BUG/MINOR: log: Don't use strftime() which can clobber timezone if chrooted
+ - BUILD: namespaces: fix a potential build warning in namespaces.c
+ - MINOR: da: Using ARG12 macro for the sample fetch and the convertor.
+ - DOC: add encoding to json converter example
+ - BUG/MINOR: conf: "listener id" expects integer, but its not checked
+ - DOC: Clarify tunes.vars.xxx-max-size settings
+ - CLEANUP: chunk: adding NULL check to chunk_dup allocation.
+ - CLEANUP: connection: fix double negation on memcmp()
+ - BUG/MEDIUM: peers: fix incorrect age in frequency counters
+ - BUG/MEDIUM: Fix RFC5077 resumption when more than TLS_TICKETS_NO are present
+ - BUG/MAJOR: Fix crash in http_get_fhdr with exactly MAX_HDR_HISTORY headers
+ - BUG/MINOR: lua: can't load external libraries
+ - BUG/MINOR: prevent the dump of uninitialized vars
+ - CLEANUP: map: it seems that the map were planed to be chained
+ - MINOR: lua: move class registration facilities
+ - MINOR: lua: remove some useless checks
+ - CLEANUP: lua: Remove two same functions
+ - MINOR: lua: refactor the Lua object registration
+ - MINOR: lua: precise message when a critical error is catched
+ - MINOR: lua: post initialization
+ - MINOR: lua: Add internal function which strip spaces
+ - MINOR: lua: convert field to lua type
+ - DOC: "addr" parameter applies to both health and agent checks
+ - DOC: timeout client: pointers to timeout http-request
+ - DOC: typo on stick-store response
+ - DOC: stick-table: amend paragraph blaming the loss of table upon reload
+ - DOC: typo: ACL subdir match
+ - DOC: typo: maxconn paragraph is wrong due to a wrong buffer size
+ - DOC: regsub: parser limitation about the inability to use closing square brackets
+ - DOC: typo: req.uri is now replaced by capture.req.uri
+ - DOC: name set-gpt0 mismatch with the expected keyword
+ - MINOR: http: sample fetch which returns unique-id
+ - MINOR: dumpstats: extract stats fields enum and names
+ - MINOR: dumpstats: split stats_dump_info_to_buffer() in two parts
+ - MINOR: dumpstats: split stats_dump_fe_stats() in two parts
+ - MINOR: dumpstats: split stats_dump_li_stats() in two parts
+ - MINOR: dumpstats: split stats_dump_sv_stats() in two parts
+ - MINOR: dumpstats: split stats_dump_be_stats() in two parts
+ - MINOR: lua: dump general info
+ - MINOR: lua: add class proxy
+ - MINOR: lua: add class server
+ - MINOR: lua: add class listener
+ - BUG/MEDIUM: stick-tables: some sample-fetch doesn't work in the connection state.
+ - MEDIUM: proxy: use dynamic allocation for error dumps
+ - CLEANUP: remove unneeded casts
+ - CLEANUP: uniformize last argument of malloc/calloc
+ - DOC: fix "needed" typo
+ - BUG/MINOR: dumpstats: fix write to global chunk
+ - BUG/MINOR: dns: inapropriate way out after a resolution timeout
+ - BUG/MINOR: dns: trigger a DNS query type change on resolution timeout
+ - CLEANUP: proto_http: few corrections for gcc warnings.
+ - BUG/MINOR: DNS: resolution structure change
+ - BUG/MINOR : allow to log cookie for tarpit and denied request
+ - BUG/MEDIUM: ssl: rewind the BIO when reading certificates
+ - OPTIM/MINOR: session: abort if possible before connecting to the backend
+ - DOC: http: rename the unique-id sample and add the documentation
+ - BUG/MEDIUM: trace.c: rdtsc() is defined in two files
+ - BUG/MEDIUM: channel: fix miscalculation of available buffer space (2nd try)
+ - BUG/MINOR: server: risk of over reading the pref_net array.
+ - BUG/MINOR: cfgparse: couple of small memory leaks.
+ - BUG/MEDIUM: sample: initialize the pointer before parse_binary call.
+ - DOC: fix discrepancy in the example for http-request redirect
+ - MINOR: acl: Add predefined METH_DELETE, METH_PUT
+ - CLEANUP: .gitignore cleanup
+ - DOC: Clarify IPv4 address / mask notation rules
+ - CLEANUP: fix inconsistency between fd->iocb, proto->accept and accept()
+ - BUG/MEDIUM: fix maxaccept computation on per-process listeners
+ - BUG/MINOR: listener: stop unbound listeners on startup
+ - BUG/MINOR: fix maxaccept computation according to the frontend process range
+ - TESTS: add blocksig.c to run tests with all signals blocked
+ - MEDIUM: unblock signals on startup.
+ - MINOR: filters: Print the list of existing filters during HA startup
+ - MINOR: filters: Typo in an error message
+ - MINOR: filters: Filters must define the callbacks struct during config parsing
+ - DOC: filters: Add filters documentation
+ - BUG/MEDIUM: channel: don't allow to overwrite the reserve until connected
+ - BUG/MEDIUM: channel: incorrect polling condition may delay event delivery
+ - BUG/MEDIUM: channel: fix miscalculation of available buffer space (3rd try)
+ - BUG/MEDIUM: log: fix risk of segfault when logging HTTP fields in TCP mode
+ - MINOR: Add ability for agent-check to set server maxconn
+ - CLEANUP: Use server_parse_maxconn_change_request for maxconn CLI updates
+ - MINOR: filters: add opaque data
+ - BUG/MEDIUM: lua: protects the upper boundary of the argument list for converters/fetches.
+ - MINOR: lua: migrate the argument mask to 64 bits type.
+ - BUG/MINOR: dumpstats: Fix the "Total bytes saved" counter in backends stats
+ - BUG/MINOR: log: fix a typo that would cause %HP to log <BADREQ>
+ - BUG/MEDIUM: http: fix incorrect reporting of server errors
+ - MINOR: channel: add new function channel_congested()
+ - BUG/MEDIUM: http: fix risk of CPU spikes with pipelined requests from dead client
+ - BUG/MAJOR: channel: fix miscalculation of available buffer space (4th try)
+ - BUG/MEDIUM: stream: ensure the SI_FL_DONT_WAKE flag is properly cleared
+ - BUG/MEDIUM: channel: fix inconsistent handling of 4GB-1 transfers
+ - BUG/MEDIUM: stats: show servers state may show an empty or incomplete result
+ - BUG/MEDIUM: stats: show backend may show an empty or incomplete result
+ - MINOR: stats: fix typo in help messages
+ - MINOR: stats: show stat resolvers missing in the help message
+ - BUG/MINOR: dns: fix DNS header definition
+ - BUG/MEDIUM: dns: fix alignment issue when building DNS queries
+ - CLEANUP: don't ignore scripts in .gitignore
+ - BUILD: add a few release and backport scripts in scripts/
+
+2016/03/14 : 1.7-dev2
+ - DOC: lua: fix lua API
+ - DOC: mailers: typo in 'hostname' description
+ - DOC: compression: missing mention of libslz for compression algorithm
+ - BUILD/MINOR: regex: missing header
+ - BUG/MINOR: stream: bad return code
+ - DOC: lua: fix somme errors and add implicit types
+ - MINOR: lua: add set/get priv for applets
+ - BUG/MINOR: http: fix several off-by-one errors in the url_param parser
+ - BUG/MINOR: http: Be sure to process all the data received from a server
+ - MINOR: filters/http: Use a wrapper function instead of stream_int_retnclose
+ - BUG/MINOR: chunk: make chunk_dup() always check and set dst->size
+ - DOC: ssl: fixed some formatting errors in crt tag
+ - MINOR: chunks: ensure that chunk_strcpy() adds a trailing zero
+ - MINOR: chunks: add chunk_strcat() and chunk_newstr()
+ - MINOR: chunk: make chunk_initstr() take a const string
+ - MEDIUM: tools: add csv_enc_append() to preserve the original chunk
+ - MINOR: tools: make csv_enc_append() always start at the first byte of the chunk
+ - MINOR: lru: new function to delete <nb> least recently used keys
+ - DOC: add Ben Shillito as the maintainer of 51d
+ - BUG/MINOR: 51d: Ensures a unique domain for each configuration
+ - BUG/MINOR: 51d: Aligns Pattern cache implementation with HAProxy best practices.
+ - BUG/MINOR: 51d: Releases workset back to pool.
+ - BUG/MINOR: 51d: Aligned const pointers to changes in 51Degrees.
+ - CLEANUP: 51d: Aligned if statements with HAProxy best practices and removed casts from malloc.
+ - MINOR: rename master process name in -Ds (systemd mode)
+ - DOC: fix a few spelling mistakes
+ - DOC: fix "workaround" spelling
+ - BUG/MINOR: examples: Fixing haproxy.spec to remove references to .cfg files
+ - MINOR: fix the return type for dns_response_get_query_id() function
+ - MINOR: server state: missing LF (\n) on error message printed when parsing server state file
+ - BUG/MEDIUM: dns: no DNS resolution happens if no ports provided to the nameserver
+ - BUG/MAJOR: servers state: server port is erased when dns resolution is enabled on a server
+ - BUG/MEDIUM: servers state: server port is used uninitialized
+ - BUG/MEDIUM: config: Adding validation to stick-table expire value.
+ - BUG/MEDIUM: sample: http_date() doesn't provide the right day of the week
+ - BUG/MEDIUM: channel: fix miscalculation of available buffer space.
+ - MEDIUM: pools: add a new flag to avoid rounding pool size up
+ - BUG/MEDIUM: buffers: do not round up buffer size during allocation
+ - BUG/MINOR: stream: don't force retries if the server is DOWN
+ - BUG/MINOR: counters: make the sc-inc-gpc0 and sc-set-gpt0 touch the table
+ - MINOR: unix: don't mention free ports on EAGAIN
+ - BUG/CLEANUP: CLI: report the proper field states in "show sess"
+ - MINOR: stats: send content-length with the redirect to allow keep-alive
+ - BUG: stream_interface: Reuse connection even if the output channel is empty
+ - DOC: remove old tunnel mode assumptions
+ - BUG/MAJOR: http-reuse: fix risk of orphaned connections
+ - BUG/MEDIUM: http-reuse: do not share private connections across backends
+ - BUG/MINOR: ssl: Be sure to use unique serial for regenerated certificates
+ - BUG/MINOR: stats: fix missing comma in stats on agent drain
+ - MAJOR: filters: Add filters support
+ - MINOR: filters: Do not reset stream analyzers if the client is gone
+ - REORG: filters: Prepare creation of the HTTP compression filter
+ - MAJOR: filters/http: Rewrite the HTTP compression as a filter
+ - MEDIUM: filters: Use macros to call filters callbacks to speed-up processing
+ - MEDIUM: filters: remove http_start_chunk, http_last_chunk and http_chunk_end
+ - MEDIUM: filters: Replace filter_http_headers callback by an analyzer
+ - MEDIUM: filters/http: Move body parsing of HTTP messages in dedicated functions
+ - MINOR: filters: Add stream_filters structure to hide filters info
+ - MAJOR: filters: Require explicit registration to filter HTTP body and TCP data
+ - MINOR: filters: Remove unused or useless stuff and do small optimizations
+ - MEDIUM: filters: Optimize the HTTP compression for chunk encoded response
+ - MINOR: filters/http: Slightly update the parsing of chunks
+ - MINOR: filters/http: Forward remaining data when a channel has no "data" filters
+ - MINOR: filters: Add an filter example
+ - MINOR: filters: Extract proxy stuff from the struct filter
+ - MINOR: map: Add regex matching replacement
+ - BUG/MINOR: lua: unsafe initialization
+ - DOC: lua: fix somme errors
+ - MINOR: lua: file dedicated to unsafe functions
+ - MINOR: lua: add "now" time function
+ - MINOR: standard: add RFC HTTP date parser
+ - MINOR: lua: Add date functions
+ - MINOR: lua: move common function
+ - MINOR: lua: merge function
+ - MINOR: lua: Add concat class
+ - MINOR: standard: add function "escape_chunk"
+ - MEDIUM: log: add a new log format flag "E"
+ - DOC: add server name at rate-limit sessions example
+ - BUG/MEDIUM: ssl: fix off-by-one in ALPN list allocation
+ - BUG/MEDIUM: ssl: fix off-by-one in NPN list allocation
+ - DOC: LUA: fix some typos and syntax errors
+ - MINOR: cli: add a new "show env" command
+ - MEDIUM: config: allow to manipulate environment variables in the global section
+ - MEDIUM: cfgparse: reject incorrect 'timeout retry' keyword spelling in resolvers
+ - MINOR: mailers: increase default timeout to 10 seconds
+ - MINOR: mailers: use <CRLF> for all line endings
+ - BUG/MAJOR: lua: segfault using Concat object
+ - DOC: lua: copyrights
+ - MINOR: common: mask conversion
+ - MEDIUM: dns: extract options
+ - MEDIUM: dns: add a "resolve-net" option which allow to prefer an ip in a network
+ - MINOR: mailers: make it possible to configure the connection timeout
+ - BUG/MAJOR: lua: applets can't sleep.
+ - BUG/MINOR: server: some prototypes are renamed
+ - BUG/MINOR: lua: Useless copy
+ - BUG/MEDIUM: stats: stats bind-process doesn't propagate the process mask correctly
+ - BUG/MINOR: server: fix the format of the warning on address change
+ - CLEANUP: server: add "const" to some message strings
+ - MINOR: server: generalize the "updater" source
+ - BUG/MEDIUM: chunks: always reject negative-length chunks
+ - BUG/MINOR: systemd: ensure we don't miss signals
+ - BUG/MINOR: systemd: report the correct signal in debug message output
+ - BUG/MINOR: systemd: propagate the correct signal to haproxy
+ - MINOR: systemd: ensure a reload doesn't mask a stop
+ - BUG/MEDIUM: cfgparse: wrong argument offset after parsing server "sni" keyword
+ - CLEANUP: stats: Avoid computation with uninitialized bits.
+ - CLEANUP: pattern: Ignore unknown samples in pat_match_ip().
+ - CLEANUP: map: Avoid memory leak in out-of-memory condition.
+ - BUG/MINOR: tcpcheck: fix incorrect list usage resulting in failure to load certain configs
+ - BUG/MAJOR: samples: check smp->strm before using it
+ - MINOR: sample: add a new helper to initialize the owner of a sample
+ - MINOR: sample: always set a new sample's owner before evaluating it
+ - BUG/MAJOR: vars: always retrieve the stream and session from the sample
+ - CLEANUP: payload: remove useless and confusing nullity checks for channel buffer
+ - BUG/MINOR: ssl: fix usage of the various sample fetch functions
+ - MINOR: stats: create fields types suitable for all CSV output data
+ - MINOR: stats: add all the "show info" fields in a table
+ - MEDIUM: stats: fill all the show info elements prior to displaying them
+ - MINOR: stats: add a function to emit fields into a chunk
+ - MINOR: stats: add stats_dump_info_fields() to dump one field per line
+ - MEDIUM: stats: make use of stats_dump_info_fields() for "show info"
+ - MINOR: stats: add a declaration of all stats fields
+ - MINOR: stats: don't hard-code the CSV fields list anymore
+ - MINOR: stats: create stats fields storage and CSV dump function
+ - MEDIUM: stats: convert stats_dump_fe_stats() to use stats_dump_fields_csv()
+ - MEDIUM: stats: make stats_dump_fe_stats() use stats fields for HTML dump
+ - MEDIUM: stats: convert stats_dump_li_stats() to use stats_dump_fields_csv()
+ - MEDIUM: stats: make stats_dump_li_stats() use stats fields for HTML dump
+ - MEDIUM: stats: convert stats_dump_be_stats() to use stats_dump_fields_csv()
+ - MEDIUM: stats: make stats_dump_be_stats() use stats fields for HTML dump
+ - MEDIUM: stats: convert stats_dump_sv_stats() to use stats_dump_fields_csv()
+ - MEDIUM: stats: make stats_dump_sv_stats() use the stats field for HTML
+ - MEDIUM: stats: move the server state coloring logic to the server dump function
+ - MINOR: stats: do not use srv->admin & STATS_ADMF_MAINT in HTML dumps
+ - MINOR: stats: do not check srv->state for SRV_ST_STOPPED in HTML dumps
+ - MINOR: stats: make CSV report server check status only when enabled
+ - MINOR: stats: only report backend's down time if it has servers
+ - MINOR: stats: prepend '*' in front of the check status when in progress
+ - MINOR: stats: make HTML stats dump rely on the table for the check status
+ - MINOR: stats: add agent_status, agent_code, agent_duration to output
+ - MINOR: stats: add check_desc and agent_desc to the output fields
+ - MINOR: stats: add check and agent's health values in the output
+ - MEDIUM: stats: make the HTML server state dump use the CSV states
+ - MEDIUM: stats: only report observe errors when observe is set
+ - MEDIUM: stats: expose the same flags for CLI and HTTP accesses
+ - MEDIUM: stats: report server's address in the CSV output
+ - MEDIUM: stats: report the cookie value in the server & backend CSV dumps
+ - MEDIUM: stats: compute the color code only in the HTML form
+ - MEDIUM: stats: report the listeners' address in the CSV output
+ - MEDIUM: stats: make it possible to report the WAITING state for listeners
+ - REORG: stats: dump the frontend's HTML stats via a generic function
+ - REORG: stats: dump the socket stats via the generic function
+ - REORG: stats: dump the server stats via the generic function
+ - REORG: stats: dump the backend stats via the generic function
+ - MEDIUM: stats: add a new "mode" column to report the proxy mode
+ - MINOR: stats: report the load balancing algorithm in CSV output
+ - MINOR: stats: add 3 fields to report the frontend-specific connection stats
+ - MINOR: stats: report number of intercepted requests for frontend and backends
+ - MINOR: stats: introduce stats_dump_one_line() to dump one stats line
+ - CLEANUP: stats: make stats_dump_fields_html() not rely on proxy anymore
+ - MINOR: stats: add ST_SHOWADMIN to pass the admin info in the regular flags
+ - MINOR: stats: make stats_dump_fields_html() not use &trash by default
+ - MINOR: stats: add functions to emit typed fields into a chunk
+ - MEDIUM: stats: support "show info typed" on the CLI
+ - MEDIUM: stats: implement a typed output format for stats
+ - DOC: document the "show info typed" and "show stat typed" output formats
+ - MINOR: cfgparse: warn when uid parameter is not a number
+ - MINOR: cfgparse: warn when gid parameter is not a number
+ - BUG/MINOR: standard: Avoid free of non-allocated pointer
+ - BUG/MINOR: pattern: Avoid memory leak on out-of-memory condition
+ - CLEANUP: http: fix a build warning introduced by a recent fix
+ - BUG/MINOR: log: GMT offset not updated when entering/leaving DST
+
+2015/12/20 : 1.7-dev1
+ - DOC: specify that stats socket doc (section 9.2) is in management
+ - BUILD: install only relevant and existing documentation
+ - CLEANUP: don't ignore debian/ directory if present
+ - BUG/MINOR: dns: parsing error of some DNS response
+ - BUG/MEDIUM: namespaces: don't fail if no namespace is used
+ - BUG/MAJOR: ssl: free the generated SSL_CTX if the LRU cache is disabled
+ - MEDIUM: dns: Don't use the ANY query type
+ - BUILD: ssl: fix build error introduced in commit 7969a3 with OpenSSL < 1.0.0
+ - DOC: fix a typo for a "deviceatlas" keyword
+ - FIX: small typo in an example using the "Referer" header
+ - MINOR: cli: ability to set per-server maxconn
+ - DEBUG/MINOR: memory: add a build option to disable memory pools sharing
+ - DEBUG/MEDIUM: memory: optionally protect free data in pools
+ - DEBUG/MEDIUM: memory: add optional control pool memory operations
+ - MEDIUM: memory: add accounting for failed allocations
+ - BUG/MEDIUM: config: count memory limits on 64 bits, not 32
+ - BUG/MAJOR: dns: first DNS response packet not matching queried hostname may lead to a loop
+ - BUG/MINOR: dns: unable to parse CNAMEs response
+ - BUG/MINOR: examples/haproxy.init: missing brace in quiet_check()
+ - DOC: deviceatlas: more example use cases.
+ - MINOR: config: allow IPv6 bracketed literals
+ - BUG/BUILD: replace haproxy-systemd-wrapper with $(EXTRA) in install-bin.
+ - BUILD: add Haiku as supported target.
+ - BUG/MAJOR: http: don't requeue an idle connection that is already queued
+ - DOC: typo on capture.res.hdr and capture.req.hdr
+ - BUG/MINOR: dns: check for duplicate nameserver id in a resolvers section was missing
+ - CLEANUP: use direction names in place of numeric values
+ - BUG/MEDIUM: lua: sample fetches based on response doesn't work
+ - MINOR: check: add agent-send server parameter
+ - BUG/MINOR: http rule: http capture 'id' rule points to a non existing id
+ - BUG/MINOR: server: check return value of fgets() in apply_server_state()
+ - BUG/MINOR: acl: don't use record layer in req_ssl_ver
+ - BUILD: freebsd: double declaration
+ - BUG/MEDIUM: lua: clean output buffer
+ - BUILD: check for libressl to be able to build against it
+ - DOC: lua-api/index.rst small example fixes, spelling correction.
+ - DOC: lua: architecture and first steps
+ - DOC: relation between timeout http-request and option http-buffer-request
+ - BUILD: Make deviceatlas require PCRE
+ - BUG: http: do not abort keep-alive connections on server timeout
+ - BUG/MEDIUM: http: switch the request channel to no-delay once done.
+ - BUG/MINOR: lua: don't force-sslv3 LUA's SSL socket
+ - BUILD/MINOR: http: proto_http.h needs sample.h
+ - BUG/MEDIUM: http: don't enable auto-close on the response side
+ - BUG/MEDIUM: stream: fix half-closed timeout handling
+ - CLEANUP: compression: don't allocate DEFAULT_MAXZLIBMEM without USE_ZLIB
+ - BUG/MEDIUM: cli: changing compression rate-limiting must require admin level
+ - BUG/MEDIUM: sample: urlp can't match an empty value
+ - BUILD: dumpstats: silencing warning for printf format specifier / time_t
+ - CLEANUP: proxy: calloc call inverted arguments
+ - MINOR: da: silent logging by default and displaying DeviceAtlas support if built.
+ - BUG/MEDIUM: da: stop DeviceAtlas processing in the convertor if there is no input.
+ - DOC: Edited 51Degrees section of README/
+ - BUG/MEDIUM: checks: email-alert not working when declared in defaults
+ - BUG/MINOR: checks: email-alert causes a segfault when an unknown mailers section is configured
+ - BUG/MINOR: checks: typo in an email-alert error message
+ - BUG/MINOR: tcpcheck: conf parsing error when no port configured on server and last rule is a CONNECT with no port
+ - BUG/MINOR: tcpcheck: conf parsing error when no port configured on server and first rule(s) is (are) COMMENT
+ - BUG/MEDIUM: http: fix http-reuse when frontend and backend differ
+ - DOC: prefer using http-request/response over reqXXX/rspXXX directives
+ - CLEANUP: haproxy: using _GNU_SOURCE instead of __USE_GNU macro.
+ - MINOR: ssl: Added cert_key_and_chain struct
+ - MEDIUM: ssl: Added support for creating SSL_CTX with multiple certs
+ - MINOR: ssl: Added multi cert support for crt-list config keyword
+ - MEDIUM: ssl: Added multi cert support for loading crt directories
+ - MEDIUM: ssl: Added support for Multi-Cert OCSP Stapling
+ - BUILD: ssl: set SSL_SOCK_NUM_KEYTYPES with openssl < 1.0.2
+ - MINOR: config: make tune.recv_enough configurable
+ - BUG/MEDIUM: config: properly adjust maxconn with nbproc when memmax is forced
+ - DOC: ssl: Adding docs for Multi-Cert bundling
+ - BUG/MEDIUM: peers: table entries learned from a remote are pushed to others after a random delay.
+ - BUG/MEDIUM: peers: old stick table updates could be repushed.
+ - MINOR: lua: service/applet can have access to the HTTP headers when a POST is received
+ - REORG/MINOR: lua: convert boolean "int" to bitfield
+ - BUG/MEDIUM: lua: Lua applets must not fetch samples using http_txn
+ - BUG/MINOR: lua: Lua applets must not use http_txn
+ - BUG/MEDIUM: lua: Forbid HTTP applets from being called from tcp rulesets
+ - BUG/MAJOR: lua: Do not force the HTTP analysers in use-services
+ - CLEANUP: lua: bad error messages
+ - CONTRIB: initiate a debugging suite to make debugging easier
+
+2015/10/13 : 1.7-dev0
+ - exact copy of 1.6.0
+
+2015/10/13 : 1.6.0
+ - BUG/MINOR: Handle interactive mode in cli handler
+ - DOC: global section missing parameters
+ - DOC: backend section missing parameters
+ - DOC: stats paramaters available in frontend
+ - MINOR: lru: do not allocate useless memory in lru64_lookup
+ - BUG/MINOR: http: Add OPTIONS in supported http methods (found by find_http_meth)
+ - BUG/MINOR: ssl: fix management of the cache where forged certificates are stored
+ - MINOR: ssl: Release Servers SSL context when HAProxy is shut down
+ - MINOR: ssl: Read the file used to generate certificates in any order
+ - MINOR: ssl: Add support for EC for the CA used to sign generated certificates
+ - MINOR: ssl: Add callbacks to set DH/ECDH params for generated certificates
+ - BUG/MEDIUM: logs: fix time zone offset format in RFC5424
+ - BUILD: Fix the build on OSX (htonll/ntohll)
+ - BUILD: enable build on Linux/s390x
+ - BUG/MEDIUM: lua: direction test failed
+ - MINOR: lua: fix a spelling error in some error messages
+ - CLEANUP: cli: ensure we can never double-free error messages
+ - BUG/MEDIUM: lua: force server-close mode on Lua services
+ - MEDIUM: init: support more command line arguments after pid list
+ - MEDIUM: init: support a list of files on the command line
+ - MINOR: debug: enable memory poisonning to use byte 0
+ - BUILD: ssl: fix build error introduced by recent commit
+ - BUG/MINOR: config: make the stats socket pass the correct proxy to the parsers
+ - MEDIUM: server: implement TCP_USER_TIMEOUT on the server
+ - DOC: mention the "namespace" options for bind and server lines
+ - DOC: add the "management" documentation
+ - DOC: move the stats socket documentation from config to management
+ - MINOR: examples: update haproxy.spec to mention new docs
+ - DOC: mention management.txt in README
+ - DOC: remove haproxy-{en,fr}.txt
+ - BUILD: properly report when USE_ZLIB and USE_SLZ are used together
+ - MINOR: init: report use of libslz instead of "no compression"
+ - CLEANUP: examples: remove some obsolete and confusing files
+ - CLEANUP: examples: remove obsolete configuration file samples
+ - CLEANUP: examples: fix the example file content-sw-sample.cfg
+ - CLEANUP: examples: update sample file option-http_proxy.cfg
+ - CLEANUP: examples: update sample file ssl.cfg
+ - CLEANUP: tests: move a test file from examples/ to tests/
+ - CLEANUP: examples: shut up warnings in transparent proxy example
+ - CLEANUP: tests: removed completely obsolete test files
+ - DOC: update ROADMAP to remove what was done in 1.6
+ - BUG/MEDIUM: pattern: fixup use_after_free in the pat_ref_delete_by_id
+
+2015/10/06 : 1.6-dev7
+ - MINOR: cli: Dump all resolvers stats if no resolver section is given
+ - BUG: config: external-check command validation is checking for incorrect arguments.
+ - DOC: documentation format cleanups
+ - DOC: lua: few typos.
+ - BUG/MEDIUM: str2ip: make getaddrinfo() consider local address selection policy
+ - BUG/MEDIUM: logs: segfault writing to log from Lua
+ - DOC: fix lua use-service example
+ - MINOR: payload: add support for tls session ticket ext
+ - MINOR: lua: remove the run flag
+ - MEDIUM: lua: change the timeout execution
+ - MINOR: lua: rename the tune.lua.applet-timeout
+ - DOC: lua: update Lua doc
+ - DOC: lua: update doc according with the last Lua changes
+ - MINOR: http/tcp: fill the avalaible actions
+ - DOC: reorder misplaced res.ssl_hello_type in the doc
+ - BUG/MINOR: tcp: make silent-drop always force a TCP reset
+ - CLEANUP: tcp: silent-drop: only drain the connection when quick-ack is disabled
+ - BUILD: tcp: use IPPROTO_IP when SOL_IP is not available
+ - BUILD: server: fix build warnings introduced by load-server-state
+ - BUG/MEDIUM: server: fix misuse of format string in load-server-state's warnings
+
+2015/09/28 : 1.6-dev6
+ - BUG/MAJOR: can't enable a server through the stat socket
+ - MINOR: server: Macro definition for server-state
+ - MINOR: cli: new stats socket command: show servers state
+ - DOC: stats socket command: show servers state
+ - MINOR: config: new global directive server-state-base
+ - DOC: global directive server-state-base
+ - MINOR: config: new global section directive: server-state-file
+ - DOC: new global directive: server-state-file
+ - MINOR: config: new backend directives: load-server-state-from-file and server-state-file-name
+ - DOC: load-server-state-from-file
+ - MINOR: init: server state loaded from file
+ - MINOR: server: startup slowstart task when using seamless reload of HAProxy
+ - MINOR: cli: new stats socket command: show backend
+ - DOC: servers state seamless reload example
+ - BUG: dns: can't connect UDP socket on FreeBSD
+ - MINOR: cfgparse: New function cfg_unregister_sections()
+ - MINOR: chunk: New function free_trash_buffers()
+ - BUG/MEDIUM: main: Freeing a bunch of static pointers
+ - MINOR: proto_http: Externalisation of previously internal functions
+ - MINOR: global: Few new struct fields for da module
+ - MAJOR: da: Update of the DeviceAtlas API module
+ - DOC: DeviceAtlas new keywords
+ - DOC: README: DeviceAtlas sample configuration updates
+ - MEDIUM: log: replace sendto() with sendmsg() in __send_log()
+ - MEDIUM: log: use a separate buffer for the header and for the message
+ - MEDIUM: logs: remove the hostname, tag and pid part from the logheader
+ - MEDIUM: logs: add support for RFC5424 header format per logger
+ - MEDIUM: logs: add a new RFC5424 log-format for the structured-data
+ - DOC: mention support for the RFC5424 syslog message format
+ - MEDIUM: logs: have global.log_send_hostname not contain the trailing space
+ - MEDIUM: logs: pass the trailing "\n" as an iovec
+ - BUG/MEDIUM: peers: some table updates are randomly not pushed.
+ - BUG/MEDIUM: peers: same table updates re-pushed after a re-connect
+ - BUG/MINOR: fct peer_prepare_ackmsg should not use trash.
+ - MINOR: http: made CHECK_HTTP_MESSAGE_FIRST accessible to other functions
+ - MINOR: global: Added new fields for 51Degrees device detection
+ - DOC: Added more explanation for 51Degrees V3.2
+ - BUILD: Changed 51Degrees option to support V3.2
+ - MAJOR: 51d: Upgraded to support 51Degrees V3.2 and new features
+ - MINOR: 51d: Improved string handling for LRU cache
+ - DOC: add references to rise/fall for the fastinter explanation
+ - MINOR: support cpu-map feature through the compile option USE_CPU_AFFINITY on FreeBSD
+ - BUG/MAJOR: lua: potential unexpected aborts()
+ - BUG/MINOR: lua: breaks the log message if his size exceed one buffer
+ - MINOR: action: add private configuration
+ - MINOR: action: add reference to the original keywork matched for the called parser.
+ - MINOR: lua: change actions registration
+ - MEDIUM: proto_http: smp_prefetch_http initialize txn
+ - MINOR: channel: rename function chn_sess to chn_strm
+ - CLEANUP: lua: align defines
+ - MINOR: http: export http_get_path() function
+ - MINOR: http: export the get_reason() function
+ - MINOR: http: export function http_msg_analyzer()
+ - MINOR: http: split initialization
+ - MINOR: lua: reset pointer after use
+ - MINOR: lua: identify userdata objects
+ - MEDIUM: lua: use the function lua_rawset in place of lua_settable
+ - BUG/MAJOR: lua: segfault after the channel data is modified by some Lua action.
+ - CLEANUP: lua: use calloc in place of malloc
+ - BUG/MEDIUM: lua: longjmp function must be unregistered
+ - BUG/MEDIUM: lua: forces a garbage collection
+ - BUG/MEDIUM: lua: wakeup task on bad conditions
+ - MINOR: standard: avoid DNS resolution from the function str2sa_range()
+ - MINOR: lua: extend socket address to support non-IP families
+ - MINOR: lua/applet: the cosocket applet should use appctx_wakeup in place of task_wakeup
+ - BUG/MEDIUM: lua: socket destroy before reading pending data
+ - MEDIUM: lua: change the GC policy
+ - OPTIM/MEDIUM: lua: executes the garbage collector only when using cosocket
+ - BUG/MEDIUM: lua: don't reset undesired flags in hlua_ctx_resume
+ - MINOR: applet: add init function
+ - MINOR: applet: add an execution timeout
+ - MINOR: stream/applet: add use-service action
+ - MINOR: lua: add AppletTCP class and service
+ - MINOR: lua: add AppletHTTP class and service
+ - DOC: lua: some documentation update
+ - DOC: add the documentation about internal circular lists
+ - DOC: add a CONTRIBUTING file
+ - DOC: add a MAINTAINERS file
+ - BUG/MAJOR: peers: fix a crash when stopping peers on unbound processes
+ - DOC: update coding-style to reference checkpatch.pl
+ - BUG/MEDIUM: stick-tables: fix double-decrement of tracked entries
+ - BUG/MINOR: args: add name for ARGT_VAR
+ - DOC: add more entries to MAINTAINERS
+ - DOC: add more entries to MAINTAINERS
+ - CLEANUP: stream-int: remove obsolete function si_applet_call()
+ - BUG/MAJOR: cli: do not dereference strm_li()->proto->name
+ - BUG/MEDIUM: http: do not dereference strm_li(stream)
+ - BUG/MEDIUM: proxy: do not dereference strm_li(stream)
+ - BUG/MEDIUM: stream: do not dereference strm_li(stream)
+ - MINOR: stream-int: use si_release_endpoint() to close idle conns
+ - BUG/MEDIUM: payload: make req.payload and payload_lv aware of dynamic buffers
+ - BUG/MEDIUM: acl: always accept match "found"
+ - MINOR: applet: rename applet_runq to applet_active_queue
+ - BUG/MAJOR: applet: use a separate run queue to maintain list integrity
+ - MEDIUM: stream-int: split stream_int_update_conn() into si- and conn-specific parts
+ - MINOR: stream-int: implement a new stream_int_update() function
+ - MEDIUM: stream-int: factor out the stream update functions
+ - MEDIUM: stream-int: call stream_int_update() from si_update()
+ - MINOR: stream-int: export stream_int_update_*
+ - MINOR: stream-int: move the applet_pause call out of the stream updates
+ - MEDIUM: stream-int: clean up the conditions to enable reading in si_conn_wake_cb
+ - MINOR: stream-int: implement the stream_int_notify() function
+ - MEDIUM: stream-int: use the same stream notification function for applets and conns
+ - MEDIUM: stream-int: completely remove stream_int_update_embedded()
+ - MINOR: stream-int: rename si_applet_done() to si_applet_wake_cb()
+ - BUG/MEDIUM: applet: fix reporting of broken write situation
+ - BUG/MINOR: stats: do not call cli_release_handler 3 times
+ - BUG/MEDIUM: cli: properly handle closed output
+ - MINOR: cli: do not call the release handler on internal error.
+ - BUG/MEDIUM: stream-int: avoid double-call to applet->release
+ - DEBUG: add p_malloc() to return a poisonned memory area
+ - CLEANUP: lua: remove unneeded memset(0) after calloc()
+ - MINOR: lua: use the proper applet wakeup mechanism
+ - BUG/MEDIUM: lua: better fix for the protocol check
+ - BUG/MEDIUM: lua: properly set the target on the connection
+ - MEDIUM: actions: pass a new "flags" argument to custom actions
+ - MEDIUM: actions: add new flag ACT_FLAG_FINAL to notify about last call
+ - MEDIUM: http: pass ACT_FLAG_FINAL to custom actions
+ - MEDIUM: lua: only allow actions to yield if not in a final call
+ - DOC: clarify how to make use of abstract sockets in socat
+ - CLEANUP: config: make the errorloc/errorfile messages less confusing
+ - MEDIUM: action: add a new flag ACT_FLAG_FIRST
+ - BUG/MINOR: config: check that tune.bufsize is always positive
+ - MEDIUM: config: set tune.maxrewrite to 1024 by default
+ - DOC: add David Carlier as maintainer of da.c
+ - DOC: fix some broken unexpected unicode chars in the Lua doc.
+ - BUG/MEDIUM: proxy: ignore stopped peers
+ - BUG/MEDIUM: proxy: do not wake stopped proxies' tasks during soft_stop()
+ - MEDIUM: init: completely deallocate unused peers
+ - BUG/MEDIUM: tcp: fix inverted condition to call custom actions
+ - DOC: remove outdated actions lists on tcp-request/response
+ - MEDIUM: tcp: add new tcp action "silent-drop"
+ - DOC: add URLs to optional libraries in the README
+
+2015/09/14 : 1.6-dev5
+ - MINOR: dns: dns_resolution structure update: time_t to unsigned int
+ - BUG/MEDIUM: dns: DNS resolution doesn't start
+ - BUG/MAJOR: dns: dns client resolution infinite loop
+ - MINOR: dns: coding style update
+ - MINOR: dns: new bitmasks to use against DNS flags
+ - MINOR: dns: dns_nameserver structure update: new counter for truncated response
+ - MINOR: dns: New DNS response analysis code: DNS_RESP_TRUNCATED
+ - MEDIUM: dns: handling of truncated response
+ - MINOR: DNS client query type failover management
+ - MINOR: dns: no expected DNS record type found
+ - MINOR: dns: new flag to report that no IP can be found in a DNS response packet
+ - BUG/MINOR: DNS request retry counter used for retry only
+ - DOC: DNS documentation updated
+ - MEDIUM: actions: remove ACTION_STOP
+ - BUG/MEDIUM: lua: outgoing connection was broken since 1.6-dev2 (bis)
+ - BUG/MINOR: lua: last log character truncated.
+ - CLEANUP: typo: bad indent
+ - CLEANUP: actions: missplaced includes
+ - MINOR: build: missing header
+ - CLEANUP: lua: Merge log functions
+ - BUG/MAJOR: http: don't manipulate the server connection if it's killed
+ - BUG/MINOR: http: remove stupid HTTP_METH_NONE entry
+ - BUG/MAJOR: http: don't call http_send_name_header() after an error
+ - MEDIUM: tools: make str2sa_range() optionally return the FQDN
+ - BUG/MINOR: tools: make str2sa_range() report unresolvable addresses
+ - BUG/MEDIUM: dns: use the correct server hostname when resolving
+
+2015/08/30 : 1.6-dev4
+ - MINOR: log: Add log-format variable %HQ, to log HTTP query strings
+ - DOC: typo in 'redirect', 302 code meaning
+ - DOC: typos in tcp-check expect examples
+ - DOC: resolve-prefer default value and default-server update
+ - MINOR: DNS counters: increment valid counter
+ - BUG/MEDIUM: DNS resolution response parsing broken
+ - MINOR: server: add new SRV_ADMF_CMAINT flag
+ - MINOR: server SRV_ADMF_CMAINT flag doesn't imply SRV_ADMF_FMAINT
+ - BUG/MEDIUM: dns: wrong first time DNS resolution
+ - BUG/MEDIUM: lua: Lua tasks fail to start.
+ - BUILD: add USE_LUA to BUILD_OPTIONS when it's used
+ - DOC/MINOR: fix OpenBSD versions where haproxy works
+ - MINOR: 51d: unable to start haproxy without "51degrees-data-file"
+ - BUG/MEDIUM: peers: fix wrong message id on stick table updates acknowledgement.
+ - BUG/MAJOR: peers: fix current table pointer not re-initialized on session release.
+ - BUILD: ssl: Allow building against libssl without SSLv3.
+ - DOC: clarify some points about SSL and the proxy protocol
+ - DOC: mention support for RFC 5077 TLS Ticket extension in starter guide
+ - BUG/MEDIUM: mailer: DATA part must be terminated with <CRLF>.<CRLF>
+ - DOC: match several lua configuration option names to those implemented in code
+ - MINOR cfgparse: Correct the mailer warning text to show the right names to the user
+ - BUG/MINOR: ssl: TLS Ticket Key rotation broken via socket command
+ - MINOR: stream: initialize the current_rule field to NULL on stream init
+ - BUG/MEDIUM: lua: timeout error with converters, wrapper and actions.
+ - CLEANUP: proto_http: remove useless initialisation
+ - CLEANUP: http/tcp actions: remove the scope member
+ - BUG/MINOR: proto_tcp: custom action continue is ignored
+ - MINOR: proto_tcp: add session in the action prototype
+ - MINOR: vars: reduce the code size of some wrappers
+ - MINOR: Move http method enum from proto_http to sample
+ - MINOR: sample: Add ipv6 to ipv4 and sint to ipv6 casts
+ - MINOR: sample/proto_tcp: export "smp_fetch_src"
+ - MEDIUM: cli: rely on the map's output type instead of the sample type
+ - BUG/MEDIUM: stream: The stream doen't inherit SC from the session
+ - BUG/MEDIUM: vars: segfault during the configuration parsing
+ - BUG/MEDIUM: stick-tables: refcount error after copying SC for the session to the stream
+ - BUG/MEDIUM: lua: bad error processing
+ - MINOR: samples: rename a struct from sample_storage to sample_data
+ - MINOR: samples: rename some struct member from "smp" to "data"
+ - MEDIUM: samples: Use the "struct sample_data" in the "struct sample"
+ - MINOR: samples: extract the anonymous union and create the union sample_value
+ - MINOR: samples: rename union from "data" to "u"
+ - MEDIUM: 51degrees: Adapt the 51Degrees library
+ - MINOR: samples: data assignation simplification
+ - MEDIUM: pattern/map: Maps can returns various types
+ - MINOR: map: The map can return IPv4 and IPv6
+ - MEDIUM: actions: Merge (http|tcp)-(request|reponse) action structs
+ - MINOR: actions: Remove the data opaque pointer
+ - MINOR: lua: use the hlua_rule type in place of opaque type
+ - MINOR: vars: use the vars types as argument in place of opaque type
+ - MINOR: proto_http: use an "expr" type in place of generic opaque type.
+ - MINOR: proto_http: replace generic opaque types by real used types for the actions on thr request line
+ - MINOR: proto_http: replace generic opaque types by real used types in "http_capture"
+ - MINOR: proto_http: replace generic opaque types by real used types in "http_capture" by id
+ - MEDIUM: track-sc: Move the track-sc configuration storage in the union
+ - MEDIUM: capture: Move the capture configuration storage in the union
+ - MINOR: actions: add "from" information
+ - MINOR: actions: remove the mark indicating the last entry in enum
+ - MINOR: actions: Declare all the embedded actions in the same header file
+ - MINOR: actions: change actions names
+ - MEDIUM: actions: Add standard return code for the action API
+ - MEDIUM: actions: Merge (http|tcp)-(request|reponse) keywords structs
+ - MINOR: proto_tcp: proto_tcp.h is now useles
+ - MINOR: actions: mutualise the action keyword lookup
+ - MEDIUM: actions: Normalize the return code of the configuration parsers
+ - MINOR: actions: Remove wrappers
+ - MAJOR: stick-tables: use sample types in place of dedicated types
+ - MEDIUM: stick-tables: use the sample type names
+ - MAJOR: stick-tables: remove key storage from the key struct
+ - MEDIUM: stick-tables: Add GPT0 in the stick tables
+ - MINOR: stick-tables: Add GPT0 access
+ - MINOR: stick-tables: Add GPC0 actions
+ - BUG/MEDIUM: lua: the lua fucntion Channel:close() causes a segfault
+ - DOC: ssl: missing LF
+ - MINOR: lua: add core.done() function
+ - DOC: fix function name
+ - BUG/MINOR: lua: in some case a sample may remain undefined
+ - DOC: fix "http_action_set_req_line()" comments
+ - MINOR: http: Action for manipulating the returned status code.
+ - MEDIUM: lua: turns txn:close into txn:done
+ - BUG/MEDIUM: lua: cannot process more Lua hooks after a "done()" function call
+ - BUILD: link with libdl if needed for Lua support
+ - CLEANUP: backend: factor out objt_server() in connect_server()
+ - MEDIUM: backend: don't call si_alloc_conn() when we reuse a valid connection
+ - MEDIUM: stream-int: simplify si_alloc_conn()
+ - MINOR: stream-int: add new function si_detach_endpoint()
+ - MINOR: server: add a list of private idle connections
+ - MINOR: connection: add a new list member in the connection struct
+ - MEDIUM: stream-int: queue idle connections at the server
+ - MINOR: stream-int: make si_idle_conn() only accept valid connections
+ - MINOR: server: add a list of already used idle connections
+ - MINOR: connection: add a new flag CO_FL_PRIVATE
+ - MINOR: config: add new setting "http-reuse"
+ - MAJOR: backend: initial work towards connection reuse
+ - MAJOR: backend: improve the connection reuse mechanism
+ - MEDIUM: backend: implement "http-reuse safe"
+ - MINOR: server: add a list of safe, already reused idle connections
+ - MEDIUM: backend: add the "http-reuse aggressive" strategy
+ - DOC: document the new http-reuse directive
+ - DOC: internals: document next steps for HTTP connection reuse
+ - DOC: mention that %ms is left-padded with zeroes.
+ - MINOR: init: indicate to check 'bind' lines when no listeners were found.
+ - MAJOR: http: remove references to appsession
+ - CLEANUP: config: remove appsession initialization
+ - CLEANUP: appsession: remove appsession.c and sessionhash.c
+ - CLEANUP: tests: remove sessionhash_test.c and test-cookie-appsess.cfg
+ - CLEANUP: proxy: remove last references to appsession
+ - CLEANUP: appsession: remove the last include files
+ - DOC: remove documentation about appsession
+ - CLEANUP: .gitignore: ignore more test files
+ - CLEANUP: .gitignore: finally ignore everything but what is known.
+ - MEDIUM: config: emit a warning on a frontend without listener
+ - DOC: add doc/internals/entities-v2.txt
+ - DOC: add doc/linux-syn-cookies.txt
+ - DOC: add design thoughts on HTTP/2
+ - DOC: add some thoughts on connection sharing for HTTP/2
+ - DOC: add design thoughts on dynamic buffer allocation
+ - BUG/MEDIUM: counters: ensure that src_{inc,clr}_gpc0 creates a missing entry
+ - DOC: add new file intro.txt
+ - MAJOR: tproxy: remove support for cttproxy
+ - BUG/MEDIUM: lua: outgoing connection was broken since 1.6-dev2
+ - DOC: lua: replace txn:close with txn:done in lua-api
+ - DOC: intro: minor updates and fixes
+ - DOC: intro: fix too long line.
+ - DOC: fix example of http-request using ssl_fc_session_id
+ - BUG/MEDIUM: lua: txn:done() still causes a segfault in TCP mode
+ - CLEANUP: lua: fix some indent issues
+ - BUG/MEDIUM: lua: fix a segfault in txn:done() if called twice
+ - DOC: lua: mention than txn:close was renamed txn:done.
+
+2015/07/22 : 1.6-dev3
+ - CLEANUP: sample: generalize sample_fetch_string() as sample_fetch_as_type()
+ - MEDIUM: http: Add new 'set-src' option to http-request
+ - DOC usesrc root privileges requirments
+ - BUG/MINOR: dns: wrong time unit for some DNS default parameters
+ - MINOR: proxy: bit field for proxy_find_best_match diff status
+ - MINOR: server: new server flag: SRV_F_FORCED_ID
+ - MINOR: server: server_find functions: id, name, best_match
+ - DOC: dns: fix chapters syntax
+ - BUILD/MINOR: tools: rename popcount to my_popcountl
+ - BUILD: add netbsd TARGET
+ - MEDIUM: 51Degrees code refactoring and cleanup
+ - MEDIUM: 51d: add LRU-based cache on User-Agent string detection
+ - DOC: add notes about the "51degrees-cache-size" parameter
+ - BUG/MEDIUM: 51d: possible incorrect operations on smp->data.str.str
+ - BUG/MAJOR: connection: fix TLV offset calculation for proxy protocol v2 parsing
+ - MINOR: Add sample fetch to detect Supported Elliptic Curves Extension
+ - BUG/MINOR: payload: Add volatile flag to smp_fetch_req_ssl_ec_ext
+ - BUG/MINOR: lua: type error in the arguments wrapper
+ - CLEANUP: vars: remove unused struct
+ - BUG/MINOR: http/sample: gmtime/localtime can fail
+ - MINOR: standard: add 64 bits conversion functions
+ - MAJOR: sample: converts uint and sint in 64 bits signed integer
+ - MAJOR: arg: converts uint and sint in sint
+ - MEDIUM: sample: switch to saturated arithmetic
+ - MINOR: vars: returns variable content
+ - MEDIUM: vars/sample: operators can use variables as parameter
+ - BUG/MINOR: ssl: fix smp_fetch_ssl_fc_session_id
+ - BUILD/MINOR: lua: fix a harmless build warning
+ - BUILD/MINOR: stats: fix build warning due to condition always true
+ - BUG/MAJOR: lru: fix unconditional call to free due to unexpected semi-colon
+ - BUG/MEDIUM: logs: fix improper systematic use of quotes with a few tags
+ - BUILD/MINOR: lua: ensure that hlua_ctx_destroy is properly defined
+ - BUG/MEDIUM: lru: fix possible memory leak when ->free() is used
+ - MINOR: vars: make the accounting not depend on the stream
+ - MEDIUM: vars: move the session variables to the session, not the stream
+ - BUG/MEDIUM: vars: do not freeze the connection when the expression cannot be fetched
+ - BUG/MAJOR: buffers: make the buffer_slow_realign() function respect output data
+ - BUG/MAJOR: tcp: tcp rulesets were still broken
+ - MINOR: stats: improve compression stats reporting
+ - MINOR: ssl: make self-generated certs also work with raw IPv6 addresses
+ - CLEANUP: ssl: make ssl_sock_generated_cert_serial() take a const
+ - CLEANUP: ssl: make ssl_sock_generate_certificate() use ssl_sock_generated_cert_serial()
+ - BUG/MINOR: log: missing some ARGC_* entries in fmt_directives()
+ - MINOR: args: add new context for servers
+ - MINOR: stream: maintain consistence between channel_forward and HTTP forward
+ - MINOR: ssl: provide ia function to set the SNI extension on a connection
+ - MEDIUM: ssl: add sni support on the server lines
+ - CLEANUP: stream: remove a useless call to si_detach()
+ - CLEANUP: stream-int: fix a few outdated comments about stream_int_register_handler()
+ - CLEANUP: stream-int: remove stream_int_unregister_handler() and si_detach()
+ - MINOR: stream-int: only use si_release_endpoint() to release a connection
+ - MINOR: standard: provide htonll() and ntohll()
+ - CLEANUP/MINOR: dns: dns_str_to_dn_label() only needs a const char
+ - BUG/MAJOR: dns: fix the length of the string to be copied
+
+2015/06/17 : 1.6-dev2
+ - BUG/MINOR: ssl: Display correct filename in error message
+ - MEDIUM: logs: Add HTTP request-line log format directives
+ - BUG/MEDIUM: check: tcpcheck regression introduced by e16c1b3f
+ - BUG/MINOR: check: fix tcpcheck error message
+ - MINOR: use an int instead of calling tcpcheck_get_step_id
+ - MINOR: tcpcheck_rule structure update
+ - MINOR: include comment in tcpcheck error log
+ - DOC: tcpcheck comment documentation
+ - MEDIUM: server: add support for changing a server's address
+ - MEDIUM: server: change server ip address from stats socket
+ - MEDIUM: protocol: add minimalist UDP protocol client
+ - MEDIUM: dns: implement a DNS resolver
+ - MAJOR: server: add DNS-based server name resolution
+ - DOC: server name resolution + proto DNS
+ - MINOR: dns: add DNS statistics
+ - MEDIUM: http: configurable http result codes for http-request deny
+ - BUILD: Compile clean when debug options defined
+ - MINOR: lru: Add the possibility to free data when an item is removed
+ - MINOR: lru: Add lru64_lookup function
+ - MEDIUM: ssl: Add options to forge SSL certificates
+ - MINOR: ssl: Export functions to manipulate generated certificates
+ - MEDIUM: config: add DeviceAtlas global keywords
+ - MEDIUM: global: add the DeviceAtlas required elements to struct global
+ - MEDIUM: sample: add the da-csv converter
+ - MEDIUM: init: DeviceAtlas initialization
+ - BUILD: Makefile: add options to build with DeviceAtlas
+ - DOC: README: explain how to build with DeviceAtlas
+ - BUG/MEDIUM: http: fix the url_param fetch
+ - BUG/MEDIUM: init: segfault if global._51d_property_names is not initialized
+ - MAJOR: peers: peers protocol version 2.0
+ - MINOR: peers: avoid re-scheduling of pending stick-table's updates still not pushed.
+ - MEDIUM: peers: re-schedule stick-table's entry for sync when data is modified.
+ - MEDIUM: peers: support of any stick-table data-types for sync
+ - BUG/MAJOR: sample: regression on sample cast to stick table types.
+ - CLEANUP: deinit: remove codes for cleaning p->block_rules
+ - DOC: Fix L4TOUT typo in documentation
+ - DOC: set-log-level in Logging section preamble
+ - BUG/MEDIUM: compat: fix segfault on FreeBSD
+ - MEDIUM: check: include server address and port in the send-state header
+ - MEDIUM: backend: Allow redispatch on retry intervals
+ - MINOR: Add TLS ticket keys reference and use it in the listener struct
+ - MEDIUM: Add support for updating TLS ticket keys via socket
+ - DOC: Document new socket commands "show tls-keys" and "set ssl tls-key"
+ - MINOR: Add sample fetch which identifies if the SSL session has been resumed
+ - DOC: Update doc about weight, act and bck fields in the statistics
+ - BUG/MEDIUM: ssl: fix tune.ssl.default-dh-param value being overwritten
+ - MINOR: ssl: add a destructor to free allocated SSL ressources
+ - MEDIUM: ssl: add the possibility to use a global DH parameters file
+ - MEDIUM: ssl: replace standards DH groups with custom ones
+ - MEDIUM: stats: Add enum srv_stats_state
+ - MEDIUM: stats: Separate server state and colour in stats
+ - MEDIUM: stats: Only report drain state in stats if server has SRV_ADMF_DRAIN set
+ - MEDIUM: stats: Differentiate between DRAIN and DRAIN (agent)
+ - MEDIUM: Lower priority of email alerts for log-health-checks messages
+ - MEDIUM: Send email alerts when servers are marked as UP or enter the drain state
+ - MEDIUM: Document when email-alerts are sent
+ - BUG/MEDIUM: lua: bad argument number in analyser and in error message
+ - MEDIUM: lua: automatically converts strings in proxy, tables, server and ip
+ - BUG/MINOR: utf8: remove compilator warning
+ - MEDIUM: map: uses HAProxy facilities to store default value
+ - BUG/MINOR: lua: error in detection of mandatory arguments
+ - BUG/MINOR: lua: set current proxy as default value if it is possible
+ - BUG/MEDIUM: http: the action set-{method|path|query|uri} doesn't run.
+ - BUG/MEDIUM: lua: undetected infinite loop
+ - BUG/MAJOR: http: don't read past buffer's end in http_replace_value
+ - BUG/MEDIUM: http: the function "(req|res)-replace-value" doesn't respect the HTTP syntax
+ - MEDIUM/CLEANUP: http: rewrite and lighten http_transform_header() prototype
+ - BUILD: lua: it miss the '-ldl' directive
+ - MEDIUM: http: allows 'R' and 'S' in the protocol alphabet
+ - MINOR: http: split the function http_action_set_req_line() in two parts
+ - MINOR: http: split http_transform_header() function in two parts.
+ - MINOR: http: export function inet_set_tos()
+ - MINOR: lua: txn: add function set_(loglevel|tos|mark)
+ - MINOR: lua: create and register HTTP class
+ - DOC: lua: fix some typos
+ - MINOR: lua: add log functions
+ - BUG/MINOR: lua: Fix SSL initialisation
+ - DOC: lua: some fixes
+ - MINOR: lua: (req|res)_get_headers return more than one header value
+ - MINOR: lua: map system integration in Lua
+ - BUG/MEDIUM: http: functions set-{path,query,method,uri} breaks the HTTP parser
+ - MINOR: sample: add url_dec converter
+ - MEDIUM: sample: fill the struct sample with the session, proxy and stream pointers
+ - MEDIUM: sample change the prototype of sample-fetches and converters functions
+ - MINOR: sample: fill the struct sample with the options.
+ - MEDIUM: sample: change the prototype of sample-fetches functions
+ - MINOR: http: split the url_param in two parts
+ - CLEANUP: http: bad indentation
+ - MINOR: http: add body_param fetch
+ - MEDIUM: http: url-encoded parsing function can run throught wrapped buffer
+ - DOC: http: req.body_param documentation
+ - MINOR: proxy: custom capture declaration
+ - MINOR: capture: add two "capture" converters
+ - MEDIUM: capture: Allow capture with slot identifier
+ - MINOR: http: add array of generic pointers in http_res_rules
+ - MEDIUM: capture: adds http-response capture
+ - MINOR: common: escape CSV strings
+ - MEDIUM: stats: escape some strings in the CSV dump
+ - MINOR: tcp: add custom actions that can continue tcp-(request|response) processing
+ - MINOR: lua: Lua tcp action are not final action
+ - DOC: lua: schematics about lua socket organization
+ - BUG/MINOR: debug: display (null) in place of "meth"
+ - DOC: mention the "lua action" in documentation
+ - MINOR: standard: add function that converts signed int to a string
+ - BUG/MINOR: sample: wrong conversion of signed values
+ - MEDIUM: sample: Add type any
+ - MINOR: debug: add a special converter which display its input sample content.
+ - MINOR: tcp: increase the opaque data array
+ - MINOR: tcp/http/conf: extends the keyword registration options
+ - MINOR: build: fix build dependency
+ - MEDIUM: vars: adds support of variables
+ - MINOR: vars: adds get and set functions
+ - MINOR: lua: Variable access
+ - MINOR: samples: add samples which returns constants
+ - BUG/MINOR: vars/compil: fix some warnings
+ - BUILD: add 51degrees options to makefile.
+ - MINOR: global: add several 51Degrees members to global
+ - MINOR: config: add 51Degrees config parsing.
+ - MINOR: init: add 51Degrees initialisation code
+ - MEDIUM: sample: add fiftyone_degrees converter.
+ - MEDIUM: deinit: add cleanup for 51Degrees to deinit
+ - MEDIUM: sample: add trie support to 51Degrees
+ - DOC: add 51Degrees notes to configuration.txt.
+ - DOC: add build indications for 51Degrees to README.
+ - MEDIUM: cfgparse: introduce weak and strong quoting
+ - BUG/MEDIUM: cfgparse: incorrect memmove in quotes management
+ - MINOR: cfgparse: remove line size limitation
+ - MEDIUM: cfgparse: expand environment variables
+ - BUG/MINOR: cfgparse: fix typo in 'option httplog' error message
+ - BUG/MEDIUM: cfgparse: segfault when userlist is misused
+ - CLEANUP: cfgparse: remove reference to 'ruleset' section
+ - MEDIUM: cfgparse: check section maximum number of arguments
+ - MEDIUM: cfgparse: max arguments check in the global section
+ - MEDIUM: cfgparse: check max arguments in the proxies sections
+ - CLEANUP: stream-int: remove a redundant clearing of the linger_risk flag
+ - MINOR: connection: make conn_sock_shutw() actually perform the shutdown() call
+ - MINOR: stream-int: use conn_sock_shutw() to shutdown a connection
+ - MINOR: connection: perform the call to xprt->shutw() in conn_data_shutw()
+ - MEDIUM: stream-int: replace xprt->shutw calls with conn_data_shutw()
+ - MINOR: checks: use conn_data_shutw_hard() instead of call via xprt
+ - MINOR: connection: implement conn_sock_send()
+ - MEDIUM: stream-int: make conn_si_send_proxy() use conn_sock_send()
+ - MEDIUM: connection: make conn_drain() perform more controls
+ - REORG: connection: move conn_drain() to connection.c and rename it
+ - CLEANUP: stream-int: remove inclusion of fd.h that is not used anymore
+ - MEDIUM: channel: don't always set CF_WAKE_WRITE on bi_put*
+ - CLEANUP: lua: don't use si_ic/si_oc on known stream-ints
+ - BUG/MEDIUM: peers: correctly configure the client timeout
+ - MINOR: peers: centralize configuration of the peers frontend
+ - MINOR: proxy: store the default target into the frontend's configuration
+ - MEDIUM: stats: use frontend_accept() as the accept function
+ - MEDIUM: peers: use frontend_accept() instead of peer_accept()
+ - CLEANUP: listeners: remove unused timeout
+ - MEDIUM: listener: store the default target per listener
+ - BUILD: fix automatic inclusion of libdl.
+ - MEDIUM: lua: implement a simple memory allocator
+ - MEDIUM: compression: postpone buffer adjustments after compression
+ - MEDIUM: compression: don't send leading zeroes with chunk size
+ - BUG/MINOR: compression: consider the expansion factor in init
+ - MINOR: http: check the algo name "identity" instead of the function pointer
+ - CLEANUP: compression: statify all algo-specific functions
+ - MEDIUM: compression: add a distinction between UA- and config- algorithms
+ - MEDIUM: compression: add new "raw-deflate" compression algorithm
+ - MEDIUM: compression: split deflate_flush() into flush and finish
+ - CLEANUP: compression: remove unused reset functions
+ - MAJOR: compression: integrate support for libslz
+ - BUG/MEDIUM: http: hdr_cnt would not count any header when called without name
+ - BUG/MAJOR: http: null-terminate the http actions keywords list
+ - CLEANUP: lua: remove the unused hlua_sleep memory pool
+ - BUG/MAJOR: lua: use correct object size when initializing a new converter
+ - CLEANUP: lua: remove hard-coded sizeof() in object creations and mallocs
+ - CLEANUP: lua: fix confusing local variable naming in hlua_txn_new()
+ - CLEANUP: hlua: stop using variable name "s" alternately for hlua_txn and hlua_smp
+ - CLEANUP: lua: get rid of the last "*ht" for struct hlua_txn.
+ - CLEANUP: lua: rename last occurrences of "*s" to "*htxn" for hlua_txn
+ - CLEANUP: lua: rename variable "sc" for struct hlua_smp
+ - CLEANUP: lua: get rid of the last two "*hs" for hlua_smp
+ - REORG/MAJOR: session: rename the "session" entity to "stream"
+ - REORG/MEDIUM: stream: rename stream flags from SN_* to SF_*
+ - MINOR: session: start to reintroduce struct session
+ - MEDIUM: stream: allocate the session when a stream is created
+ - MEDIUM: stream: move the listener's pointer to the session
+ - MEDIUM: stream: move the frontend's pointer to the session
+ - MINOR: session: add a pointer to the session's origin
+ - MEDIUM: session: use the pointer to the origin instead of s->si[0].end
+ - CLEANUP: sample: remove useless tests in fetch functions for l4 != NULL
+ - MEDIUM: http: move header captures from http_txn to struct stream
+ - MINOR: http: create a dedicated pool for http_txn
+ - MAJOR: http: move http_txn out of struct stream
+ - MAJOR: sample: don't pass l7 anymore to sample fetch functions
+ - CLEANUP: lua: remove unused hlua_smp->l7 and hlua_txn->l7
+ - MEDIUM: http: remove the now useless http_txn from {req/res} rules
+ - CLEANUP: lua: don't pass http_txn anymore to hlua_request_act_wrapper()
+ - MAJOR: sample: pass a pointer to the session to each sample fetch function
+ - MINOR: stream: provide a few helpers to retrieve frontend, listener and origin
+ - CLEANUP: stream: don't set ->target to the incoming connection anymore
+ - MINOR: stream: move session initialization before the stream's
+ - MINOR: session: store the session's accept date
+ - MINOR: session: don't rely on s->logs.logwait in embryonic sessions
+ - MINOR: session: implement session_free() and use it everywhere
+ - MINOR: session: add stick counters to the struct session
+ - REORG: stktable: move the stkctr_* functions from stream to sticktable
+ - MEDIUM: streams: support looking up stkctr in the session
+ - MEDIUM: session: update the session's stick counters upon session_free()
+ - MEDIUM: proto_tcp: track the session's counters in the connection ruleset
+ - MAJOR: tcp: make tcp_exec_req_rules() only rely on the session
+ - MEDIUM: stream: don't call stream_store_counters() in kill_mini_session() nor session_accept()
+ - MEDIUM: stream: move all the session-specific stuff of stream_accept() earlier
+ - MAJOR: stream: don't initialize the stream anymore in stream_accept
+ - MEDIUM: session: remove the task pointer from the session
+ - REORG: session: move the session parts out of stream.c
+ - MINOR: stream-int: make appctx_new() take the applet in argument
+ - MEDIUM: peers: move the appctx initialization earlier
+ - MINOR: session: introduce session_new()
+ - MINOR: session: make use of session_new() when creating a new session
+ - MINOR: peers: make use of session_new() when creating a new session
+ - MEDIUM: peers: initialize the task before the stream
+ - MINOR: session: set the CO_FL_CONNECTED flag on the connection once ready
+ - CLEANUP: stream.c: do not re-attach the connection to the stream
+ - MEDIUM: stream: isolate connection-specific initialization code
+ - MEDIUM: stream: also accept appctx as origin in stream_accept_session()
+ - MEDIUM: peers: make use of stream_accept_session()
+ - MEDIUM: frontend: make ->accept only return +/-1
+ - MEDIUM: stream: return the stream upon accept()
+ - MEDIUM: frontend: move some stream initialisation to stream_new()
+ - MEDIUM: frontend: move the fd-specific settings to session_accept_fd()
+ - MEDIUM: frontend: don't restrict frontend_accept() to connections anymore
+ - MEDIUM: frontend: move some remaining stream settings to stream_new()
+ - CLEANUP: frontend: remove one useless local variable
+ - MEDIUM: stream: don't rely on the session's listener anymore in stream_new()
+ - MEDIUM: lua: make use of stream_new() to create an outgoing connection
+ - MINOR: lua: minor cleanup in hlua_socket_new()
+ - MINOR: lua: no need for setting timeouts / conn_retries in hlua_socket_new()
+ - MINOR: peers: no need for setting timeouts / conn_retries in peer_session_create()
+ - CLEANUP: stream-int: swap stream-int and appctx declarations
+ - CLEANUP: namespaces: fix protection against multiple inclusions
+ - MINOR: session: maintain the session count stats in the session, not the stream
+ - MEDIUM: session: adjust the connection flags before stream_new()
+ - MINOR: stream: pass the pointer to the origin explicitly to stream_new()
+ - CLEANUP: poll: move the conditions for waiting out of the poll functions
+ - BUG/MEDIUM: listener: don't report an error when resuming unbound listeners
+ - BUG/MEDIUM: init: don't limit cpu-map to the first 32 processes only
+ - BUG/MAJOR: tcp/http: fix current_rule assignment when restarting over a ruleset
+ - BUG/MEDIUM: stream-int: always reset si->ops when si->end is nullified
+ - DOC: update the entities diagrams
+ - BUG/MEDIUM: http: properly retrieve the front connection
+ - MINOR: applet: add a new "owner" pointer in the appctx
+ - MEDIUM: applet: make the applet not depend on a stream interface anymore
+ - REORG: applet: move the applet definitions out of stream_interface
+ - CLEANUP: applet: rename struct si_applet to applet
+ - REORG: stream-int: create si_applet_ops dedicated to applets
+ - MEDIUM: applet: add basic support for an applet run queue
+ - MEDIUM: applet: implement a run queue for active appctx
+ - MEDIUM: stream-int: add a new function si_applet_done()
+ - MAJOR: applet: now call si_applet_done() instead of si_update() in I/O handlers
+ - MAJOR: stream: use a regular ->update for all stream interfaces
+ - MEDIUM: dumpstats: don't unregister the applet anymore
+ - MEDIUM: applet: centralize the call to si_applet_done() in the I/O handler
+ - MAJOR: stream: do not allocate request buffers anymore when the left side is an applet
+ - MINOR: stream-int: add two flags to indicate an applet's wishes regarding I/O
+ - MEDIUM: applet: make the applets only use si_applet_{cant|want|stop}_{get|put}
+ - MEDIUM: stream-int: pause the appctx if the task is woken up
+ - BUG/MAJOR: tcp: only call registered actions when they're registered
+ - BUG/MEDIUM: peers: fix applet scheduling
+ - BUG/MEDIUM: peers: recent applet changes broke peers updates scheduling
+ - MINOR: tools: provide an rdtsc() function for time comparisons
+ - IMPORT: lru: import simple ebtree-based LRU functions
+ - IMPORT: hash: import xxhash-r39
+ - MEDIUM: pattern: add a revision to all pattern expressions
+ - MAJOR: pattern: add LRU-based cache on pattern matching
+ - BUG/MEDIUM: http: remove content-length from chunked messages
+ - DOC: http: update the comments about the rules for determining transfer-length
+ - BUG/MEDIUM: http: do not restrict parsing of transfer-encoding to HTTP/1.1
+ - BUG/MEDIUM: http: incorrect transfer-coding in the request is a bad request
+ - BUG/MEDIUM: http: remove content-length form responses with bad transfer-encoding
+ - MEDIUM: http: restrict the HTTP version token to 1 digit as per RFC7230
+ - MEDIUM: http: disable support for HTTP/0.9 by default
+ - MEDIUM: http: add option-ignore-probes to get rid of the floods of 408
+ - BUG/MINOR: config: clear proxy->table.peers.p for disabled proxies
+ - MEDIUM: init: don't stop proxies in parent process when exiting
+ - MINOR: stick-table: don't attach to peers in stopped state
+ - MEDIUM: config: initialize stick-tables after peers, not before
+ - MEDIUM: peers: add the ability to disable a peers section
+ - MINOR: peers: store the pointer to the signal handler
+ - MEDIUM: peers: unregister peers that were never started
+ - MEDIUM: config: propagate the table's process list to the peers sections
+ - MEDIUM: init: stop any peers section not bound to the correct process
+ - MEDIUM: config: validate that peers sections are bound to exactly one process
+ - MAJOR: peers: allow peers section to be used with nbproc > 1
+ - DOC: relax the peers restriction to single-process
+ - DOC: document option http-ignore-probes
+ - DOC: fix the comments about the meaning of msg->sol in HTTP
+ - BUG/MEDIUM: http: wait for the exact amount of body bytes in wait_for_request_body
+ - BUG/MAJOR: http: prevent risk of reading past end with balance url_param
+ - MEDIUM: stream: move HTTP request body analyser before process_common
+ - MEDIUM: http: add a new option http-buffer-request
+ - MEDIUM: http: provide 3 fetches for the body
+ - DOC: update the doc on the proxy protocol
+ - BUILD: pattern: fix build warnings introduced in the LRU cache
+ - BUG/MEDIUM: stats: properly initialize the scope before dumping stats
+ - CLEANUP: config: fix misleading information in error message.
+ - MINOR: config: report the number of processes using a peers section in the error case
+ - BUG/MEDIUM: config: properly compute the default number of processes for a proxy
+ - MEDIUM: http: add new "capture" action for http-request
+ - BUG/MEDIUM: http: fix the http-request capture parser
+ - BUG/MEDIUM: http: don't forward client shutdown without NOLINGER except for tunnels
+ - BUILD/MINOR: ssl: fix build failure introduced by recent patch
+ - BUG/MAJOR: check: fix breakage of inverted tcp-check rules
+ - CLEANUP: checks: fix double usage of cur / current_step in tcp-checks
+ - BUG/MEDIUM: checks: do not dereference head of a tcp-check at the end
+ - CLEANUP: checks: simplify the loop processing of tcp-checks
+ - BUG/MAJOR: checks: always check for end of list before proceeding
+ - BUG/MEDIUM: checks: do not dereference a list as a tcpcheck struct
+ - BUG/MAJOR: checks: break infinite loops when tcp-checks starts with comment
+ - MEDIUM: http: make url_param iterate over multiple occurrences
+ - BUG/MEDIUM: peers: apply a random reconnection timeout
+ - MEDIUM: config: reject invalid config with name duplicates
+ - MEDIUM: config: reject conflicts in table names
+ - CLEANUP: proxy: make the proxy lookup functions more user-friendly
+ - MINOR: proxy: simply ignore duplicates in proxy name lookups
+ - MINOR: config: don't open-code proxy name lookups
+ - MEDIUM: config: clarify the conflicting modes detection for backend rules
+ - CLEANUP: proxy: remove now unused function findproxy_mode()
+ - MEDIUM: stick-table: remove the now duplicate find_stktable() function
+ - MAJOR: config: remove the deprecated reqsetbe / reqisetbe actions
+ - MINOR: proxy: add a new function proxy_find_by_id()
+ - MINOR: proxy: add a flag to memorize that the proxy's ID was forced
+ - MEDIUM: proxy: add a new proxy_find_best_match() function
+ - CLEANUP: http: explicitly reference request in http_apply_redirect_rules()
+ - MINOR: http: prepare support for parsing redirect actions on responses
+ - MEDIUM: http: implement http-response redirect rules
+ - MEDIUM: http: no need to close the request on redirect if data was parsed
+ - BUG/MEDIUM: http: fix body processing for the stats applet
+ - BUG/MINOR: da: fix log-level comparison to emove annoying warning
+ - CLEANUP: global: remove one ifdef USE_DEVICEATLAS
+ - CLEANUP: da: move the converter registration to da.c
+ - CLEANUP: da: register the config keywords in da.c
+ - CLEANUP: adjust the envelope name in da.h to reflect the file name
+ - CLEANUP: da: remove ifdef USE_DEVICEATLAS from da.c
+ - BUILD: make 51D easier to build by defaulting to 51DEGREES_SRC
+ - BUILD: fix build warning when not using 51degrees
+ - BUILD: make DeviceAtlas easier to build by defaulting to DEVICEATLAS_SRC
+ - BUILD: ssl: fix recent build breakage on older SSL libs
+
+2015/03/11 : 1.6-dev1
+ - CLEANUP: extract temporary $CFG to eliminate duplication
+ - CLEANUP: extract temporary $BIN to eliminate duplication
+ - CLEANUP: extract temporary $PIDFILE to eliminate duplication
+ - CLEANUP: extract temporary $LOCKFILE to eliminate duplication
+ - CLEANUP: extract quiet_check() to avoid duplication
+ - BUG/MINOR: don't start haproxy on reload
+ - DOC: Address issue where documentation is excluded due to a gitignore rule.
+ - BUG/MEDIUM: systemd: set KillMode to 'mixed'
+ - BUILD: fix "make install" to support spaces in the install dirs
+ - BUG/MINOR: config: http-request replace-header arg typo
+ - BUG: config: error in http-response replace-header number of arguments
+ - DOC: missing track-sc* in http-request rules
+ - BUILD: lua: missing ifdef related to SSL when enabling LUA
+ - BUG/MEDIUM: regex: fix pcre_study error handling
+ - MEDIUM: regex: Use pcre_study always when PCRE is used, regardless of JIT
+ - BUG/MINOR: Fix search for -p argument in systemd wrapper.
+ - MEDIUM: Improve signal handling in systemd wrapper.
+ - DOC: fix typo in Unix Socket commands
+ - BUG/MEDIUM: checks: external checks can't change server status to UP
+ - BUG/MEDIUM: checks: segfault with external checks in a backend section
+ - BUG/MINOR: checks: external checks shouldn't wait for timeout to return the result
+ - BUG/MEDIUM: auth: fix segfault with http-auth and a configuration with an unknown encryption algorithm
+ - BUG/MEDIUM: config: userlists should ensure that encrypted passwords are supported
+ - BUG/MINOR: config: don't propagate process binding for dynamic use_backend
+ - BUG/MINOR: log: fix request flags when keep-alive is enabled
+ - BUG/MEDIUM: checks: fix conflicts between agent checks and ssl healthchecks
+ - MINOR: checks: allow external checks in backend sections
+ - MEDIUM: checks: provide environment variables to the external checks
+ - MINOR: checks: update dynamic environment variables in external checks
+ - DOC: checks: environment variables used by "external-check command"
+ - BUG/MEDIUM: backend: correctly detect the domain when use_domain_only is used
+ - MINOR: ssl: load certificates in alphabetical order
+ - BUG/MINOR: checks: prevent http keep-alive with http-check expect
+ - MINOR: lua: typo in an error message
+ - MINOR: report the Lua version in -vv
+ - MINOR: lua: add a compilation error message when compiled with an incompatible version
+ - BUG/MEDIUM: lua: segfault when calling haproxy sample fetches from lua
+ - BUILD: try to automatically detect the Lua library name
+ - BUILD/CLEANUP: systemd: avoid a warning due to mixed code and declaration
+ - BUG/MEDIUM: backend: Update hash to use unsigned int throughout
+ - BUG/MEDIUM: connection: fix memory corruption when building a proxy v2 header
+ - MEDIUM: connection: add new bit in Proxy Protocol V2
+ - BUG/MINOR: ssl: rejects OCSP response without nextupdate.
+ - BUG/MEDIUM: ssl: Fix to not serve expired OCSP responses.
+ - BUG/MINOR: ssl: Fix OCSP resp update fails with the same certificate configured twice.
+ - BUG/MINOR: ssl: Fix external function in order not to return a pointer on an internal trash buffer.
+ - MINOR: add fetchs 'ssl_c_der' and 'ssl_f_der' to return DER formatted certs
+ - MINOR: ssl: add statement to force some ssl options in global.
+ - BUG/MINOR: ssl: correctly initialize ssl ctx for invalid certificates
+ - BUG/MEDIUM: ssl: fix bad ssl context init can cause segfault in case of OOM.
+ - BUG/MINOR: samples: fix unnecessary memcopy converting binary to string.
+ - MINOR: samples: adds the bytes converter.
+ - MINOR: samples: adds the field converter.
+ - MINOR: samples: add the word converter.
+ - BUG/MINOR: server: move the directive #endif to the end of file
+ - BUG/MAJOR: buffer: check the space left is enough or not when input data in a buffer is wrapped
+ - DOC: fix a few typos
+ - CLEANUP: epoll: epoll_events should be allocated according to global.tune.maxpollevents
+ - BUG/MINOR: http: fix typo: "401 Unauthorized" => "407 Unauthorized"
+ - BUG/MINOR: parse: refer curproxy instead of proxy
+ - BUG/MINOR: parse: check the validity of size string in a more strict way
+ - BUILD: add new target 'make uninstall' to support uninstalling haproxy from OS
+ - DOC: expand the docs for the provided stats.
+ - BUG/MEDIUM: unix: do not unlink() abstract namespace sockets upon failure.
+ - MEDIUM: ssl: Certificate Transparency support
+ - MEDIUM: stats: proxied stats admin forms fix
+ - MEDIUM: http: Compress HTTP responses with status codes 201,202,203 in addition to 200
+ - BUG/MEDIUM: connection: sanitize PPv2 header length before parsing address information
+ - MAJOR: namespace: add Linux network namespace support
+ - MINOR: systemd: Check configuration before start
+ - BUILD: ssl: handle boringssl in openssl version detection
+ - BUILD: ssl: disable OCSP when using boringssl
+ - BUILD: ssl: don't call get_rfc2409_prime when using boringssl
+ - MINOR: ssl: don't use boringssl's cipher_list
+ - BUILD: ssl: use OPENSSL_NO_OCSP to detect OCSP support
+ - MINOR: stats: fix minor typo in HTML page
+ - MINOR: Also accept SIGHUP/SIGTERM in systemd-wrapper
+ - MEDIUM: Add support for configurable TLS ticket keys
+ - DOC: Document the new tls-ticket-keys bind keyword
+ - DOC: clearly state that the "show sess" output format is not fixed
+ - MINOR: stats: fix minor typo fix in stats_dump_errors_to_buffer()
+ - DOC: httplog does not support 'no'
+ - BUG/MEDIUM: ssl: Fix a memory leak in DHE key exchange
+ - MINOR: ssl: use SSL_get_ciphers() instead of directly accessing the cipher list.
+ - BUG/MEDIUM: Consistently use 'check' in process_chk
+ - MEDIUM: Add external check
+ - BUG/MEDIUM: Do not set agent health to zero if server is disabled in config
+ - MEDIUM/BUG: Only explicitly report "DOWN (agent)" if the agent health is zero
+ - MEDIUM: Remove connect_chk
+ - MEDIUM: Refactor init_check and move to checks.c
+ - MEDIUM: Add free_check() helper
+ - MEDIUM: Move proto and addr fields struct check
+ - MEDIUM: Attach tcpcheck_rules to check
+ - MEDIUM: Add parsing of mailers section
+ - MEDIUM: Allow configuration of email alerts
+ - MEDIUM: Support sending email alerts
+ - DOC: Document email alerts
+ - MINOR: Remove trailing '.' from email alert messages
+ - MEDIUM: Allow suppression of email alerts by log level
+ - BUG/MEDIUM: Do not consider an agent check as failed on L7 error
+ - MINOR: deinit: fix memory leak
+ - MINOR: http: export the function 'smp_fetch_base32'
+ - BUG/MEDIUM: http: tarpit timeout is reset
+ - MINOR: sample: add "json" converter
+ - BUG/MEDIUM: pattern: don't load more than once a pattern list.
+ - MINOR: map/acl/dumpstats: remove the "Done." message
+ - BUG/MAJOR: ns: HAProxy segfault if the cli_conn is not from a network connection
+ - BUG/MINOR: pattern: error message missing
+ - BUG/MEDIUM: pattern: some entries are not deleted with case insensitive match
+ - BUG/MINOR: ARG6 and ARG7 don't fit in a 32 bits word
+ - MAJOR: poll: only rely on wake_expired_tasks() to compute the wait delay
+ - MEDIUM: task: call session analyzers if the task is woken by a message.
+ - MEDIUM: protocol: automatically pick the proto associated to the connection.
+ - MEDIUM: channel: wake up any request analyzer on response activity
+ - MINOR: converters: add a "void *private" argument to converters
+ - MINOR: converters: give the session pointer as converter argument
+ - MINOR: sample: add private argument to the struct sample_fetch
+ - MINOR: global: export function and permits to not resolve DNS names
+ - MINOR: sample: add function for browsing samples.
+ - MINOR: global: export many symbols.
+ - MINOR: includes: fix a lot of missing or useless includes
+ - MEDIUM: tcp: add register keyword system.
+ - MEDIUM: buffer: make bo_putblk/bo_putstr/bo_putchk return the number of bytes copied.
+ - MEDIUM: http: change the code returned by the response processing rule functions
+ - MEDIUM: http/tcp: permit to resume http and tcp custom actions
+ - MINOR: channel: functions to get data from a buffer without copy
+ - MEDIUM: lua: lua integration in the build and init system.
+ - MINOR: lua: add ease functions
+ - MINOR: lua: add runtime execution context
+ - MEDIUM: lua: "com" signals
+ - MINOR: lua: add the configuration directive "lua-load"
+ - MINOR: lua: core: create "core" class and object
+ - MINOR: lua: post initialisation bindings
+ - MEDIUM: lua: add coroutine as tasks.
+ - MINOR: lua: add sample and args type converters
+ - MINOR: lua: txn: create class TXN associated with the transaction.
+ - MINOR: lua: add shared context in the lua stack
+ - MINOR: lua: txn: import existing sample-fetches in the class TXN
+ - MINOR: lua: txn: add lua function in TXN that returns an array of http headers
+ - MINOR: lua: register and execute sample-fetches in LUA
+ - MINOR: lua: register and execute converters in LUA
+ - MINOR: lua: add bindings for tcp and http actions
+ - MINOR: lua: core: add sleep functions
+ - MEDIUM: lua: socket: add "socket" class for TCP I/O
+ - MINOR: lua: core: pattern and acl manipulation
+ - MINOR: lua: channel: add "channel" class
+ - MINOR: lua: txn: object "txn" provides two objects "channel"
+ - MINOR: lua: core: can set the nice of the current task
+ - MINOR: lua: core: can yield an execution stack
+ - MINOR: lua: txn: add binding for closing the client connection.
+ - MEDIUM: lua: Lua initialisation "on demand"
+ - BUG/MAJOR: lua: send function fails and return bad bytes
+ - MINOR: remove unused declaration.
+ - MINOR: lua: remove some #define
+ - MINOR: lua: use bitfield and macro in place of integer and enum
+ - MINOR: lua: set skeleton for Lua execution expiration
+ - MEDIUM: lua: each yielding function returns a wake up time.
+ - MINOR: lua: adds "forced yield" flag
+ - MEDIUM: lua: interrupt the Lua execution for running other process
+ - MEDIUM: lua: change the sleep function core
+ - BUG/MEDIUM: lua: the execution timeout is ignored in yield case
+ - DOC: lua: Lua configuration documentation
+ - MINOR: lua: add the struct session in the lua channel struct
+ - BUG/MINOR: lua: set buffer if it is nnot avalaible.
+ - BUG/MEDIUM: lua: reset flags before resuming execution
+ - BUG/MEDIUM: lua: fix infinite loop about channel
+ - BUG/MEDIUM: lua: the Lua process is not waked up after sending data on requests side
+ - BUG/MEDIUM: lua: many errors when we try to send data with the channel API
+ - MEDIUM: lua: use the Lua-5.3 version of the library
+ - BUG/MAJOR: lua: some function are not yieldable, the forced yield causes errors
+ - BUG/MEDIUM: lua: can't handle the response bytes
+ - BUG/MEDIUM: lua: segfault with buffer_replace2
+ - BUG/MINOR: lua: check buffers before initializing socket
+ - BUG/MINOR: log: segfault if there are no proxy reference
+ - BUG/MEDIUM: lua: sockets don't have buffer to write data
+ - BUG/MEDIUM: lua: cannot connect socket
+ - BUG/MINOR: lua: sockets receive behavior doesn't follows the specs
+ - BUG/BUILD: lua: The strict Lua 5.3 version check is not done.
+ - BUG/MEDIUM: buffer: one byte miss in buffer free space check
+ - MEDIUM: lua: make the functions hlua_gethlua() and hlua_sethlua() faster
+ - MINOR: replace the Core object by a simple model.
+ - MEDIUM: lua: change the objects configuration
+ - MEDIUM: lua: create a namespace for the fetches
+ - MINOR: converters: add function to browse converters
+ - MINOR: lua: wrapper for converters
+ - MINOR: lua: replace function (req|get)_channel by a variable
+ - MINOR: lua: fetches and converters can return an empty string in place of nil
+ - DOC: lua api
+ - BUG/MEDIUM: sample: fix random number upper-bound
+ - BUG/MINOR: stats:Fix incorrect printf type.
+ - BUG/MAJOR: session: revert all the crappy client-side timeout changes
+ - BUG/MINOR: logs: properly initialize and count log sockets
+ - BUG/MEDIUM: http: fetch "base" is not compatible with set-header
+ - BUG/MINOR: counters: do not untrack counters before logging
+ - BUG/MAJOR: sample: correctly reinitialize sample fetch context before calling sample_process()
+ - MINOR: stick-table: make stktable_fetch_key() indicate why it failed
+ - BUG/MEDIUM: counters: fix track-sc* to wait on unstable contents
+ - BUILD: remove TODO from the spec file and add README
+ - MINOR: log: make MAX_SYSLOG_LEN overridable at build time
+ - MEDIUM: log: support a user-configurable max log line length
+ - DOC: provide an example of how to use ssl_c_sha1
+ - BUILD: checks: external checker needs signal.h
+ - BUILD: checks: kill a minor warning on Solaris in external checks
+ - BUILD: http: fix isdigit & isspace warnings on Solaris
+ - BUG/MINOR: listener: set the listener's fd to -1 after deletion
+ - BUG/MEDIUM: unix: failed abstract socket binding is retryable
+ - MEDIUM: listener: implement a per-protocol pause() function
+ - MEDIUM: listener: support rebinding during resume()
+ - BUG/MEDIUM: unix: completely unbind abstract sockets during a pause()
+ - DOC: explicitly mention the limits of abstract namespace sockets
+ - DOC: minor fix on {sc,src}_kbytes_{in,out}
+ - DOC: fix alphabetical sort of converters
+ - MEDIUM: stick-table: implement lookup from a sample fetch
+ - MEDIUM: stick-table: add new converters to fetch table data
+ - MINOR: samples: add two converters for the date format
+ - BUG/MAJOR: http: correctly rewind the request body after start of forwarding
+ - DOC: remove references to CPU=native in the README
+ - DOC: mention that "compression offload" is ignored in defaults section
+ - DOC: mention that Squid correctly responds 400 to PPv2 header
+ - BUILD: fix dependencies between config and compat.h
+ - MINOR: session: export the function 'smp_fetch_sc_stkctr'
+ - MEDIUM: stick-table: make it easier to register extra data types
+ - BUG/MINOR: http: base32+src should use the big endian version of base32
+ - MINOR: sample: allow IP address to cast to binary
+ - MINOR: sample: add new converters to hash input
+ - MINOR: sample: allow integers to cast to binary
+ - BUILD: report commit ID in git versions as well
+ - CLEANUP: session: move the stick counters declarations to stick_table.h
+ - MEDIUM: http: add the track-sc* actions to http-request rules
+ - BUG/MEDIUM: connection: fix proxy v2 header again!
+ - BUG/MAJOR: tcp: fix a possible busy spinning loop in content track-sc*
+ - OPTIM/MINOR: proxy: reduce struct proxy by 48 bytes on 64-bit archs
+ - MINOR: log: add a new field "%lc" to implement a per-frontend log counter
+ - BUG/MEDIUM: http: fix inverted condition in pat_match_meth()
+ - BUG/MEDIUM: http: fix improper parsing of HTTP methods for use with ACLs
+ - BUG/MINOR: pattern: remove useless allocation of unused trash in pat_parse_reg()
+ - BUG/MEDIUM: acl: correctly compute the output type when a converter is used
+ - CLEANUP: acl: cleanup some of the redundancy and spaghetti after last fix
+ - BUG/CRITICAL: http: don't update msg->sov once data start to leave the buffer
+ - MEDIUM: http: enable header manipulation for 101 responses
+ - BUG/MEDIUM: config: propagate frontend to backend process binding again.
+ - MEDIUM: config: properly propagate process binding between proxies
+ - MEDIUM: config: make the frontends automatically bind to the listeners' processes
+ - MEDIUM: config: compute the exact bind-process before listener's maxaccept
+ - MEDIUM: config: only warn if stats are attached to multi-process bind directives
+ - MEDIUM: config: report it when tcp-request rules are misplaced
+ - DOC: indicate in the doc that track-sc* can wait if data are missing
+ - MINOR: config: detect the case where a tcp-request content rule has no inspect-delay
+ - MEDIUM: systemd-wrapper: support multiple executable versions and names
+ - BUG/MEDIUM: remove debugging code from systemd-wrapper
+ - BUG/MEDIUM: http: adjust close mode when switching to backend
+ - BUG/MINOR: config: don't propagate process binding on fatal errors.
+ - BUG/MEDIUM: check: rule-less tcp-check must detect connect failures
+ - BUG/MINOR: tcp-check: report the correct failed step in the status
+ - DOC: indicate that weight zero is reported as DRAIN
+ - BUG/MEDIUM: config: avoid skipping disabled proxies
+ - BUG/MINOR: config: do not accept more track-sc than configured
+ - BUG/MEDIUM: backend: fix URI hash when a query string is present
+ - BUG/MEDIUM: http: don't dump debug headers on MSG_ERROR
+ - BUG/MAJOR: cli: explicitly call cli_release_handler() upon error
+ - BUG/MEDIUM: tcp: fix outgoing polling based on proxy protocol
+ - BUILD/MINOR: ssl: de-constify "ciphers" to avoid a warning on openssl-0.9.8
+ - BUG/MEDIUM: tcp: don't use SO_ORIGINAL_DST on non-AF_INET sockets
+ - BUG/BUILD: revert accidental change in the makefile from latest SSL fix
+ - BUG/MEDIUM: ssl: force a full GC in case of memory shortage
+ - MEDIUM: ssl: add support for smaller SSL records
+ - MINOR: session: release a few other pools when stopping
+ - MINOR: task: release the task pool when stopping
+ - BUG/MINOR: config: don't inherit the default balance algorithm in frontends
+ - BUG/MAJOR: frontend: initialize capture pointers earlier
+ - BUG/MINOR: stats: correctly set the request/response analysers
+ - MAJOR: polling: centralize calls to I/O callbacks
+ - DOC: fix typo in the body parser documentation for msg.sov
+ - BUG/MINOR: peers: the buffer size is global.tune.bufsize, not trash.size
+ - MINOR: sample: add a few basic internal fetches (nbproc, proc, stopping)
+ - DEBUG: pools: apply poisonning on every allocated pool
+ - BUG/MAJOR: sessions: unlink session from list on out of memory
+ - BUG/MEDIUM: patterns: previous fix was incomplete
+ - BUG/MEDIUM: payload: ensure that a request channel is available
+ - BUG/MINOR: tcp-check: don't condition data polling on check type
+ - BUG/MEDIUM: tcp-check: don't rely on random memory contents
+ - BUG/MEDIUM: tcp-checks: disable quick-ack unless next rule is an expect
+ - BUG/MINOR: config: fix typo in condition when propagating process binding
+ - BUG/MEDIUM: config: do not propagate processes between stopped processes
+ - BUG/MAJOR: stream-int: properly check the memory allocation return
+ - BUG/MEDIUM: memory: fix freeing logic in pool_gc2()
+ - BUG/MAJOR: namespaces: conn->target is not necessarily a server
+ - BUG/MEDIUM: compression: correctly report zlib_mem
+ - CLEANUP: lists: remove dead code
+ - CLEANUP: memory: remove dead code
+ - CLEANUP: memory: replace macros pool_alloc2/pool_free2 with functions
+ - MINOR: memory: cut pool allocator in 3 layers
+ - MEDIUM: memory: improve pool_refill_alloc() to pass a refill count
+ - MINOR: stream-int: retrieve session pointer from stream-int
+ - MINOR: buffer: reset a buffer in b_reset() and not channel_init()
+ - MEDIUM: buffer: use b_alloc() to allocate and initialize a buffer
+ - MINOR: buffer: move buffer initialization after channel initialization
+ - MINOR: buffer: only use b_free to release buffers
+ - MEDIUM: buffer: always assign a dummy empty buffer to channels
+ - MEDIUM: buffer: add a new buf_wanted dummy buffer to report failed allocations
+ - MEDIUM: channel: do not report full when buf_empty is present on a channel
+ - MINOR: session: group buffer allocations together
+ - MINOR: buffer: implement b_alloc_fast()
+ - MEDIUM: buffer: implement b_alloc_margin()
+ - MEDIUM: session: implement a basic atomic buffer allocator
+ - MAJOR: session: implement a wait-queue for sessions who need a buffer
+ - MAJOR: session: only allocate buffers when needed
+ - MINOR: stats: report a "waiting" flags for sessions
+ - MAJOR: session: only wake up as many sessions as available buffers permit
+ - MINOR: config: implement global setting tune.buffers.reserve
+ - MINOR: config: implement global setting tune.buffers.limit
+ - MEDIUM: channel: implement a zero-copy buffer transfer
+ - MEDIUM: stream-int: support splicing from applets
+ - OPTIM: stream-int: try to send pending spliced data
+ - CLEANUP: session: remove session_from_task()
+ - DOC: add missing entry for log-format and clarify the text
+ - MINOR: logs: add a new per-proxy "log-tag" directive
+ - BUG/MEDIUM: http: fix header removal when previous header ends with pure LF
+ - MINOR: config: extend the default max hostname length to 64 and beyond
+ - BUG/MEDIUM: channel: fix possible integer overflow on reserved size computation
+ - BUG/MINOR: channel: compare to_forward with buf->i, not buf->size
+ - MINOR: channel: add channel_in_transit()
+ - MEDIUM: channel: make buffer_reserved() use channel_in_transit()
+ - MEDIUM: channel: make bi_avail() use channel_in_transit()
+ - BUG/MEDIUM: channel: don't schedule data in transit for leaving until connected
+ - CLEANUP: channel: rename channel_reserved -> channel_is_rewritable
+ - MINOR: channel: rename channel_full() to !channel_may_recv()
+ - MINOR: channel: rename buffer_reserved() to channel_reserved()
+ - MINOR: channel: rename buffer_max_len() to channel_recv_limit()
+ - MINOR: channel: rename bi_avail() to channel_recv_max()
+ - MINOR: channel: rename bi_erase() to channel_truncate()
+ - BUG/MAJOR: log: don't try to emit a log if no logger is set
+ - MINOR: tools: add new round_2dig() function to round integers
+ - MINOR: global: always export some SSL-specific metrics
+ - MINOR: global: report information about the cost of SSL connections
+ - MAJOR: init: automatically set maxconn and/or maxsslconn when possible
+ - MINOR: http: add a new fetch "query" to extract the request's query string
+ - MINOR: hash: add new function hash_crc32
+ - MINOR: samples: provide a "crc32" converter
+ - MEDIUM: backend: add the crc32 hash algorithm for load balancing
+ - BUG/MINOR: args: add missing entry for ARGT_MAP in arg_type_names
+ - BUG/MEDIUM: http: make http-request set-header compute the string before removal
+ - MEDIUM: args: use #define to specify the number of bits used by arg types and counts
+ - MEDIUM: args: increase arg type to 5 bits and limit arg count to 5
+ - MINOR: args: add type-specific flags for each arg in a list
+ - MINOR: args: implement a new arg type for regex : ARGT_REG
+ - MEDIUM: regex: add support for passing regex flags to regex_exec_match()
+ - MEDIUM: samples: add a regsub converter to perform regex-based transformations
+ - BUG/MINOR: sample: fix case sensitivity for the regsub converter
+ - MEDIUM: http: implement http-request set-{method,path,query,uri}
+ - DOC: fix missing closing brackend on regsub
+ - MEDIUM: samples: provide basic arithmetic and bitwise operators
+ - MEDIUM: init: continue to enforce SYSTEM_MAXCONN with auto settings if set
+ - BUG/MINOR: http: fix incorrect header value offset in replace-hdr/replace-value
+ - BUG/MINOR: http: abort request processing on filter failure
+ - MEDIUM: tcp: implement tcp-ut bind option to set TCP_USER_TIMEOUT
+ - MINOR: ssl/server: add the "no-ssl-reuse" server option
+ - BUG/MAJOR: peers: initialize s->buffer_wait when creating the session
+ - MINOR: http: add a new function to iterate over each header line
+ - MINOR: http: add the new sample fetches req.hdr_names and res.hdr_names
+ - MEDIUM: task: always ensure that the run queue is consistent
+ - BUILD: Makefile: add -Wdeclaration-after-statement
+ - BUILD/CLEANUP: ssl: avoid a warning due to mixed code and declaration
+ - BUILD/CLEANUP: config: silent 3 warnings about mixed declarations with code
+ - MEDIUM: protocol: use a family array to index the protocol handlers
+ - BUILD: lua: cleanup many mixed occurrences declarations & code
+ - BUG/MEDIUM: task: fix recently introduced scheduler skew
+ - BUG/MINOR: lua: report the correct function name in an error message
+ - BUG/MAJOR: http: fix stats regression consecutive to HTTP_RULE_RES_YIELD
+ - Revert "BUG/MEDIUM: lua: can't handle the response bytes"
+ - MINOR: lua: convert IP addresses to type string
+ - CLEANUP: lua: use the same function names in C and Lua
+ - REORG/MAJOR: move session's req and resp channels back into the session
+ - CLEANUP: remove now unused channel pool
+ - REORG/MEDIUM: stream-int: introduce si_ic/si_oc to access channels
+ - MEDIUM: stream-int: add a flag indicating which side the SI is on
+ - MAJOR: stream-int: only rely on SI_FL_ISBACK to find the requested channel
+ - MEDIUM: stream-interface: remove now unused pointers to channels
+ - MEDIUM: stream-int: make si_sess() use the stream int's side
+ - MEDIUM: stream-int: use si_task() to retrieve the task from the stream int
+ - MEDIUM: stream-int: remove any reference to the owner
+ - CLEANUP: stream-int: add si_ib/si_ob to dereference the buffers
+ - CLEANUP: stream-int: add si_opposite() to find the other stream interface
+ - REORG/MEDIUM: channel: only use chn_prod / chn_cons to find stream-interfaces
+ - MEDIUM: channel: add a new flag "CF_ISRESP" for the response channel
+ - MAJOR: channel: only rely on the new CF_ISRESP flag to find the SI
+ - MEDIUM: channel: remove now unused ->prod and ->cons pointers
+ - CLEANUP: session: simplify references to chn_{prod,cons}(&s->{req,res})
+ - CLEANUP: session: use local variables to access channels / stream ints
+ - CLEANUP: session: don't needlessly pass a pointer to the stream-int
+ - CLEANUP: session: don't use si_{ic,oc} when we know the session.
+ - CLEANUP: stream-int: limit usage of si_ic/si_oc
+ - CLEANUP: lua: limit usage of si_ic/si_oc
+ - MINOR: channel: add chn_sess() helper to retrieve session from channel
+ - MEDIUM: session: simplify receive buffer allocator to only use the channel
+ - MEDIUM: lua: use CF_ISRESP to detect the channel's side
+ - CLEANUP: lua: remove the session pointer from hlua_channel
+ - CLEANUP: lua: hlua_channel_new() doesn't need the pointer to the session anymore
+ - MEDIUM: lua: remove struct hlua_channel
+ - MEDIUM: lua: remove hlua_sample_fetch
+
+2014/06/19 : 1.6-dev0
+ - exact copy of 1.5.0
+
+2014/06/19 : 1.5.0
+ - MEDIUM: ssl: ignored file names ending as '.issuer' or '.ocsp'.
+ - MEDIUM: ssl: basic OCSP stapling support.
+ - MINOR: ssl/cli: Fix unapropriate comment in code on 'set ssl ocsp-response'
+ - MEDIUM: ssl: add 300s supported time skew on OCSP response update.
+ - MINOR: checks: mysql-check: Add support for v4.1+ authentication
+ - MEDIUM: ssl: Add the option to use standardized DH parameters >= 1024 bits
+ - MEDIUM: ssl: fix detection of ephemeral diffie-hellman key exchange by using the cipher description.
+ - MEDIUM: http: add actions "replace-header" and "replace-values" in http-req/resp
+ - MEDIUM: Break out check establishment into connect_chk()
+ - MEDIUM: Add port_to_str helper
+ - BUG/MEDIUM: fix ignored values for half-closed timeouts (client-fin and server-fin) in defaults section.
+ - BUG/MEDIUM: Fix unhandled connections problem with systemd daemon mode and SO_REUSEPORT.
+ - MINOR: regex: fix a little configuration memory leak.
+ - MINOR: regex: Create JIT compatible function that return match strings
+ - MEDIUM: regex: replace all standard regex function by own functions
+ - MEDIUM: regex: Remove null terminated strings.
+ - MINOR: regex: Use native PCRE API.
+ - MINOR: missing regex.h include
+ - DOC: Add Exim as Proxy Protocol implementer.
+ - BUILD: don't use type "uint" which is not portable
+ - BUILD: stats: workaround stupid and bogus -Werror=format-security behaviour
+ - BUG/MEDIUM: http: clear CF_READ_NOEXP when preparing a new transaction
+ - CLEANUP: http: don't clear CF_READ_NOEXP twice
+ - DOC: fix proxy protocol v2 decoder example
+ - DOC: fix remaining occurrences of "pattern extraction"
+ - MINOR: log: allow the HTTP status code to be logged even in TCP frontends
+ - MINOR: logs: don't limit HTTP header captures to HTTP frontends
+ - MINOR: sample: improve sample_fetch_string() to report partial contents
+ - MINOR: capture: extend the captures to support non-header keys
+ - MINOR: tcp: prepare support for the "capture" action
+ - MEDIUM: tcp: add a new tcp-request capture directive
+ - MEDIUM: session: allow shorter retry delay if timeout connect is small
+ - MEDIUM: session: don't apply the retry delay when redispatching
+ - MEDIUM: session: redispatch earlier when possible
+ - MINOR: config: warn when tcp-check rules are used without option tcp-check
+ - BUG/MINOR: connection: make proxy protocol v1 support the UNKNOWN protocol
+ - DOC: proxy protocol example parser was still wrong
+ - DOC: minor updates to the proxy protocol doc
+ - CLEANUP: connection: merge proxy proto v2 header and address block
+ - MEDIUM: connection: add support for proxy protocol v2 in accept-proxy
+ - MINOR: tools: add new functions to quote-encode strings
+ - DOC: clarify the CSV format
+ - MEDIUM: stats: report the last check and last agent's output on the CSV status
+ - MINOR: freq_ctr: introduce a new averaging method
+ - MEDIUM: session: maintain per-backend and per-server time statistics
+ - MEDIUM: stats: report per-backend and per-server time stats in HTML and CSV outputs
+ - BUG/MINOR: http: fix typos in previous patch
+ - DOC: remove the ultra-obsolete TODO file
+ - DOC: update roadmap
+ - DOC: minor updates to the README
+ - DOC: mention the maxconn limitations with the select poller
+ - DOC: commit a few old design thoughts files
+
+2014/05/28 : 1.5-dev26
+ - BUG/MEDIUM: polling: fix possible CPU hogging of worker processes after receiving SIGUSR1.
+ - BUG/MINOR: stats: fix a typo on a closing tag for a server tracking another one
+ - OPTIM: stats: avoid the calculation of a useless link on tracking servers in maintenance
+ - MINOR: fix a few memory usage errors
+ - CONTRIB: halog: Filter input lines by date and time through timestamp
+ - MINOR: ssl: SSL_CTX_set_options() and SSL_CTX_set_mode() take a long, not an int
+ - BUG/MEDIUM: regex: fix risk of buffer overrun in exp_replace()
+ - MINOR: acl: set "str" as default match for strings
+ - DOC: Add some precisions about acl default matching method
+ - MEDIUM: acl: strenghten the option parser to report invalid options
+ - BUG/MEDIUM: config: a stats-less config crashes in 1.5-dev25
+ - BUG/MINOR: checks: tcp-check must not stop on '\0' for binary checks
+ - MINOR: stats: improve alignment of color codes to save one line of header
+ - MINOR: checks: simplify and improve reporting of state changes when using log-health-checks
+ - MINOR: server: remove the SRV_DRAIN flag which can always be deduced
+ - MINOR: server: use functions to detect state changes and to update them
+ - MINOR: server: create srv_was_usable() from srv_is_usable() and use a pointer
+ - BUG/MINOR: stats: do not report "100%" in the thottle column when server is draining
+ - BUG/MAJOR: config: don't free valid regex memory
+ - BUG/MEDIUM: session: don't clear CF_READ_NOEXP if analysers are not called
+ - BUG/MINOR: stats: tracking servers may incorrectly report an inherited DRAIN status
+ - MEDIUM: proxy: make timeout parser a bit stricter
+ - REORG/MEDIUM: server: split server state and flags in two different variables
+ - REORG/MEDIUM: server: move the maintenance bits out of the server state
+ - MAJOR: server: use states instead of flags to store the server state
+ - REORG: checks: put the functions in the appropriate files !
+ - MEDIUM: server: properly support and propagate the maintenance status
+ - MEDIUM: server: allow multi-level server tracking
+ - CLEANUP: checks: rename the server_status_printf function
+ - MEDIUM: checks: simplify server up/down/nolb transitions
+ - MAJOR: checks: move health checks changes to set_server_check_status()
+ - MINOR: server: make the status reporting function support a reason
+ - MINOR: checks: simplify health check reporting functions
+ - MINOR: server: implement srv_set_stopped()
+ - MINOR: server: implement srv_set_running()
+ - MINOR: server: implement srv_set_stopping()
+ - MEDIUM: checks: simplify failure notification using srv_set_stopped()
+ - MEDIUM: checks: simplify success notification using srv_set_running()
+ - MEDIUM: checks: simplify stopping mode notification using srv_set_stopping()
+ - MEDIUM: stats: report a server's own state instead of the tracked one's
+ - MINOR: server: make use of srv_is_usable() instead of checking eweight
+ - MAJOR: checks: add support for a new "drain" administrative mode
+ - MINOR: stats: use the admin flags for soft enable/disable/stop/start on the web page
+ - MEDIUM: stats: introduce new actions to simplify admin status management
+ - MINOR: cli: introduce a new "set server" command
+ - MINOR: stats: report a distinct output for DOWN caused by agent
+ - MINOR: checks: support specific check reporting for the agent
+ - MINOR: checks: support a neutral check result
+ - BUG/MINOR: cli: "agent" was missing from the "enable"/"disable" help message
+ - MEDIUM: cli: add support for enabling/disabling health checks.
+ - MEDIUM: stats: report down caused by agent prior to reporting up
+ - MAJOR: agent: rework the response processing and support additional actions
+ - MINOR: stats: improve the stats web page to support more actions
+ - CONTRIB: halog: avoid calling time/localtime/mktime for each line
+ - DOC: document the workarouds for Google Chrome's bogus pre-connect
+ - MINOR: stats: report SSL key computations per second
+ - MINOR: stats: add counters for SSL cache lookups and misses
+
+2014/05/10 : 1.5-dev25
+ - MEDIUM: connection: Implement and extented PROXY Protocol V2
+ - MINOR: ssl: clean unused ACLs declarations
+ - MINOR: ssl: adds fetchs and ACLs for ssl back connection.
+ - MINOR: ssl: merge client's and frontend's certificate functions.
+ - MINOR: ssl: adds ssl_f_sha1 fetch to return frontend's certificate fingerprint
+ - MINOR: ssl: adds sample converter base64 for binary type.
+ - MINOR: ssl: convert to binary ssl_fc_unique_id and ssl_bc_unique_id.
+ - BUG/MAJOR: ssl: Fallback to private session cache if current lock mode is not supported.
+ - MAJOR: ssl: Change default locks on ssl session cache.
+ - BUG/MINOR: chunk: Fix function chunk_strcmp and chunk_strcasecmp match a substring.
+ - MINOR: ssl: add global statement tune.ssl.force-private-cache.
+ - MINOR: ssl: remove fallback to SSL session private cache if lock init fails.
+ - BUG/MEDIUM: patterns: last fix was still not enough
+ - MINOR: http: export the smp_fetch_cookie function
+ - MINOR: http: generic pointer to rule argument
+ - BUG/MEDIUM: pattern: a typo breaks automatic acl/map numbering
+ - BUG/MAJOR: patterns: -i and -n are ignored for inlined patterns
+ - BUG/MINOR: proxy: unsafe initialization of HTTP transaction when switching from TCP frontend
+ - BUG/MINOR: http: log 407 in case of proxy auth
+ - MINOR: http: rely on the message body parser to send 100-continue
+ - MEDIUM: http: move reqadd after execution of http_request redirect
+ - MEDIUM: http: jump to dedicated labels after http-request processing
+ - BUG/MINOR: http: block rules forgot to increment the denied_req counter
+ - BUG/MINOR: http: block rules forgot to increment the session's request counter
+ - MEDIUM: http: move Connection header processing earlier
+ - MEDIUM: http: remove even more of the spaghetti in the request path
+ - MINOR: http: silently support the "block" action for http-request
+ - CLEANUP: proxy: rename "block_cond" to "block_rules"
+ - MEDIUM: http: emulate "block" rules using "http-request" rules
+ - MINOR: http: remove the now unused loop over "block" rules
+ - MEDIUM: http: factorize the "auth" action of http-request and stats
+ - MEDIUM: http: make http-request rules processing return a verdict instead of a rule
+ - MINOR: config: add minimum support for emitting warnings only once
+ - MEDIUM: config: inform the user about the deprecatedness of "block" rules
+ - MEDIUM: config: inform the user that "reqsetbe" is deprecated
+ - MEDIUM: config: inform the user only once that "redispatch" is deprecated
+ - MEDIUM: config: warn that '{cli,con,srv}timeout' are deprecated
+ - BUG/MINOR: auth: fix wrong return type in pat_match_auth()
+ - BUILD: config: remove a warning with clang
+ - BUG/MAJOR: http: connection setup may stall on balance url_param
+ - BUG/MEDIUM: http/session: disable client-side expiration only after body
+ - BUG/MEDIUM: http: correctly report request body timeouts
+ - BUG/MEDIUM: http: disable server-side expiration until client has sent the body
+ - MEDIUM: listener: make the accept function more robust against pauses
+ - BUILD: syscalls: remove improper inline statement in front of syscalls
+ - BUILD: ssl: SSL_CTX_set_msg_callback() needs openssl >= 0.9.7
+ - BUG/MAJOR: session: recover the correct connection pointer in half-initialized sessions
+ - DOC: add some explanation on the shared cache build options in the readme.
+ - MEDIUM: proxy: only adjust the backend's bind-process when already set
+ - MEDIUM: config: limit nbproc to the machine's word size
+ - MEDIUM: config: check the bind-process settings according to nbproc
+ - MEDIUM: listener: parse the new "process" bind keyword
+ - MEDIUM: listener: inherit the process mask from the proxy
+ - MAJOR: listener: only start listeners bound to the same processes
+ - MINOR: config: only report a warning when stats sockets are bound to more than 1 process
+ - CLEANUP: config: set the maxaccept value for peers listeners earlier
+ - BUG/MINOR: backend: only match IPv4 addresses with RDP cookies
+ - BUG/MINOR: checks: correctly configure the address family and protocol
+ - MINOR: tools: split is_addr() and is_inet_addr()
+ - MINOR: protocols: use is_inet_addr() when only INET addresses are desired
+ - MEDIUM: unix: add preliminary support for connecting to servers over UNIX sockets
+ - MEDIUM: checks: only complain about the missing port when the check uses TCP
+ - MEDIUM: unix: implement support for Linux abstract namespace sockets
+ - DOC: map_beg was missing from the table of map_* converters
+ - DOC: ebtree: indicate that prefix insertion/lookup may be used with strings
+ - MEDIUM: pattern: use ebtree's longest match to index/lookup string beginning
+ - BUILD: remove the obsolete BSD and OSX makefiles
+ - MEDIUM: unix: avoid a double connect probe when no data are sent
+ - DOC: stop referencing the slow git repository in the README
+ - BUILD: only build the systemd wrapper on Linux 2.6 and above
+ - DOC: update roadmap with completed tasks
+ - MEDIUM: session: implement half-closed timeouts (client-fin and server-fin)
+
+2014/04/26 : 1.5-dev24
+ - MINOR: pattern: find element in a reference
+ - MEDIUM: http: ACL and MAP updates through http-(request|response) rules
+ - MEDIUM: ssl: explicitly log failed handshakes after a heartbeat
+ - DOC: Full section dedicated to the converters
+ - MEDIUM: http: register http-request and http-response keywords
+ - BUG/MINOR: compression: correctly report incoming byte count
+ - BUG/MINOR: http: don't report server aborts as client aborts
+ - BUG/MEDIUM: channel: bi_putblk() must not wrap before the end of buffer
+ - CLEANUP: buffers: remove unused function buffer_contig_space_with_res()
+ - MEDIUM: stats: reimplement HTTP keep-alive on the stats page
+ - BUG/MAJOR: http: fix timeouts during data forwarding
+ - BUG/MEDIUM: http: 100-continue responses must process the next part immediately
+ - MEDIUM: http: move skipping of 100-continue earlier
+ - BUILD: stats: let gcc know that last_fwd cannot be used uninitialized...
+ - CLEANUP: general: get rid of all old occurrences of "session *t"
+ - CLEANUP: http: remove the useless "if (1)" inherited from version 1.4
+ - BUG/MEDIUM: stats: mismatch between behaviour and doc about front/back
+ - MEDIUM: http: enable analysers to have keep-alive on stats
+ - REORG: http: move HTTP Connection response header parsing earlier
+ - MINOR: stats: always emit HTTP/1.1 in responses
+ - MINOR: http: add capture.req.ver and capture.res.ver
+ - MINOR: checks: add a new global max-spread-checks directive
+ - BUG/MAJOR: http: fix the 'next' pointer when performing a redirect
+ - MINOR: http: implement the max-keep-alive-queue setting
+ - DOC: fix alphabetic order of tcp-check
+ - MINOR: connection: add a new error code for SSL with heartbeat
+ - MEDIUM: ssl: implement a workaround for the OpenSSL heartbleed attack
+ - BUG/MEDIUM: Revert "MEDIUM: ssl: Add standardized DH parameters >= 1024 bits"
+ - BUILD: http: remove a warning on strndup
+ - BUILD: ssl: avoid a warning about conn not used with OpenSSL < 1.0.1
+ - BUG/MINOR: ssl: really block OpenSSL's response to heartbleed attack
+ - MINOR: ssl: finally catch the heartbeats missing the padding
+
+2014/04/23 : 1.5-dev23
+ - BUG/MINOR: reject malformed HTTP/0.9 requests
+ - MINOR: systemd wrapper: re-execute on SIGUSR2
+ - MINOR: systemd wrapper: improve logging
+ - MINOR: systemd wrapper: propagate exit status
+ - BUG/MINOR: tcpcheck connect wrong behavior
+ - MEDIUM: proxy: support use_backend with dynamic names
+ - MINOR: stats: Enhancement to stats page to provide information of last session time.
+ - BUG/MEDIUM: peers: fix key consistency for integer stick tables
+ - DOC: fix a typo on http-server-close and encapsulate options with double-quotes
+ - DOC: fix fetching samples syntax
+ - MINOR: ssl: add ssl_fc_unique_id to fetch TLS Unique ID
+ - MEDIUM: ssl: Use ALPN support as it will be available in OpenSSL 1.0.2
+ - DOC: fix typo
+ - CLEANUP: code style: use tabs to indent codes instead of spaces
+ - DOC: fix a few config typos.
+ - BUG/MINOR: raw_sock: also consider ENOTCONN in addition to EAGAIN for recv()
+ - DOC: lowercase format string in unique-id
+ - MINOR: set IP_FREEBIND on IPv6 sockets in transparent mode
+ - BUG/MINOR: acl: req_ssl_sni fails with SSLv3 record version
+ - BUG/MINOR: build: add missing objects in osx and bsd Makefiles
+ - BUG/MINOR: build: handle whitespaces in wc -l output
+ - BUG/MINOR: Fix name lookup ordering when compiled with USE_GETADDRINFO
+ - MEDIUM: ssl: Add standardized DH parameters >= 1024 bits
+ - BUG/MEDIUM: map: The map parser includes blank lines.
+ - BUG/MINOR: log: The log of quotted capture header has been terminated by 2 quotes.
+ - MINOR: standard: add function "encode_chunk"
+ - BUG/MINOR: http: fix encoding of samples used in http headers
+ - MINOR: sample: add hex converter
+ - MEDIUM: sample: change the behavior of the bin2str cast
+ - MAJOR: auth: Change the internal authentication system.
+ - MEDIUM: acl/pattern: standardisation "of pat_parse_int()" and "pat_parse_dotted_ver()"
+ - MEDIUM: pattern: The pattern parser no more uses <opaque> and just takes one string.
+ - MEDIUM: pattern: Change the prototype of the function pattern_register().
+ - CONTRIB: ip6range: add a network IPv6 range to mask converter
+ - MINOR: pattern: separe list element from the data part.
+ - MEDIUM: pattern: add indexation function.
+ - MEDIUM: pattern: The parse functions just return "struct pattern" without memory allocation
+ - MINOR: pattern: Rename "pat_idx_elt" to "pattern_tree"
+ - MINOR: sample: dont call the sample cast function "c_none"
+ - MINOR: standard: Add function for converting cidr to network mask.
+ - MEDIUM: sample: Remove types SMP_T_CSTR and SMP_T_CBIN, replace it by SMP_F_CONST flags
+ - MEDIUM: sample/http_proto: Add new type called method
+ - MINOR: dumpstats: Group map inline help
+ - MEDIUM: pattern: The function pattern_exec_match() returns "struct pattern" if the patten match.
+ - MINOR: dumpstats: change map inline sentences
+ - MINOR: dumpstats: change the "get map" display management
+ - MINOR: map/dumpstats: The cli cmd "get map ..." display the "int" format.
+ - MEDIUM: pattern: The match function browse itself the list or the tree.
+ - MEDIUM: pattern: Index IPv6 addresses in a tree.
+ - MEDIUM: pattern: add delete functions
+ - MEDIUM: pattern: add prune function
+ - MEDIUM: pattern: add sample lookup function.
+ - MEDIUM: pattern/dumpstats: The function pattern_lookup() is no longer used
+ - MINOR: map/pattern: The sample parser is stored in the pattern
+ - MAJOR: pattern/map: Extends the map edition system in the patterns
+ - MEDIUM: pattern: merge same pattern
+ - MEDIUM: pattern: The expected type is stored in the pattern head, and conversion is executed once.
+ - MINOR: pattern: Each pattern is identified by unique id.
+ - MINOR: pattern/acl: Each pattern of each acl can be load with specified id
+ - MINOR: pattern: The function "pattern_register()" is no longer used.
+ - MINOR: pattern: Merge function pattern_add() with pat_ref_push().
+ - MINOR: pattern: store configuration reference for each acl or map pattern.
+ - MINOR: pattern: Each pattern expression element store the reference struct.
+ - MINOR: dumpstats: display the reference for th key/pattern and value.
+ - MEDIUM: pattern: delete() function uses the pat_ref_elt to find the element to be removed
+ - MEDIUM: pattern_find_smp: functions find_smp uses the pat_ref_elt to find the element to be removed
+ - MEDIUM: dumpstats/pattern: display and use each pointer of each pattern dumped
+ - MINOR: pattern/map/acl: Centralization of the file parsers
+ - MINOR: pattern: Check if the file reference is not used with acl and map
+ - MINOR: acl/pattern: Acl "-M" option force to load file as map file with two columns
+ - MEDIUM: dumpstats: Display error message during add of values.
+ - MINOR: pattern: The function pat_ref_set() have now atomic behavior
+ - MINOR: regex: The pointer regstr in the struc regex is no longer used.
+ - MINOR: cli: Block the usage of the command "acl add" in many cases.
+ - MINOR: doc: Update the documentation about the map and acl
+ - MINOR: pattern: index duplicates
+ - MINOR: configuration: File and line propagation
+ - MINOR: dumpstat/conf: display all the configuration lines that using pattern reference
+ - MINOR: standard: Disable ip resolution during the runtime
+ - MINOR: pattern: Remove the flag "PAT_F_FROM_FILE".
+ - MINOR: pattern: forbid dns resolutions
+ - DOC: document "get map" / "get acl" on the CLI
+ - MEDIUM: acl: Change the acl register struct
+ - BUG/MEDIUM: acl: boolean only matches were broken by recent changes
+ - DOC: pattern: pattern organisation schematics
+ - MINOR: pattern/cli: Update used terms in documentation and cli
+ - MINOR: cli: remove information about acl or map owner.
+ - MINOR: session: don't always assume there's a listener
+ - MINOR: pattern: Add function to prune and reload pattern list.
+ - MINOR: standard: Add ipv6 support in the function url2sa().
+ - MEDIUM: config: Dynamic sections.
+ - BUG/MEDIUM: stick-table: fix IPv4-to-IPv6 conversion in src_* fetches
+ - MINOR: http: Add the "language" converter to for use with accept-language
+ - BUG/MINOR: log: Don't dump empty unique-id
+ - BUG/MAJOR: session: fix a possible crash with src_tracked
+ - DOC: Update "language" documentation
+ - MINOR: http: add the function "del-header" to the directives http-request and http-response
+ - DOC: add some information on capture.(req|res).hdr
+ - MINOR: http: capture.req.method and capture.req.uri
+ - MINOR: http: optimize capture.req.method and capture.req.uri
+ - MINOR: session: clean up the connection free code
+ - BUG/MEDIUM: checks: immediately report a connection success
+ - MEDIUM: connection: don't use real send() flags in snd_buf()
+ - OPTIM: ssl: implement dynamic record size adjustment
+ - MINOR: stats: report exact last session time in backend too
+ - BUG/MEDIUM: stats: the "lastsess" field must appear last in the CSV.
+ - BUG/MAJOR: check: fix memory leak in "tcp-check connect" over SSL
+ - BUG/MINOR: channel: initialize xfer_small/xfer_large on new buffers
+ - MINOR: channel: add the date of last read in the channel
+ - MEDIUM: stream-int: automatically disable CF_STREAMER flags after idle
+ - MINOR: ssl: add DEFAULT_SSL_MAX_RECORD to set the record size at build time
+ - MINOR: config: make the stream interface idle timer user-configurable
+ - MINOR: config: add global directives to set default SSL ciphers
+ - MINOR: sample: add a rand() sample fetch to return a sample.
+ - BUG/MEDIUM: config: immediately abort if peers section has no name
+ - BUG/MINOR: ssl: fix syntax in config error message
+ - BUG/MEDIUM: ssl: always send a full buffer after EAGAIN
+ - BUG/MINOR: config: server on-marked-* statement is ignored in default-server
+ - BUG/MEDIUM: backend: prefer-last-server breaks redispatch
+ - BUG/MEDIUM: http: continue to emit 503 on keep-alive to different server
+ - MEDIUM: acl: fix pattern type for payload / payload_lv
+ - BUG/MINOR: config: fix a crash on startup when a disabled backend references a peer
+ - BUG/MEDIUM: compression: fix the output type of the compressor name
+ - BUG/MEDIUM: http: don't start to forward request data before the connect
+ - MINOR: http: release compression context only in http_end_txn()
+ - MINOR: protect ebimtree/ebistree against multiple inclusions
+ - MEDIUM: proxy: create a tree to store proxies by name
+ - MEDIUM: proxy: make findproxy() use trees to look up proxies
+ - MEDIUM: proxy: make get_backend_server() use findproxy() to lookup proxies
+ - MEDIUM: stick-table: lookup table names using trees.
+ - MEDIUM: config: faster lookup for duplicated proxy name
+ - CLEANUP: acl: remove obsolete test in parse_acl_expr()
+ - MINOR: sample: move smp_to_type to sample.c
+ - MEDIUM: compression: consider the "q=" attribute in Accept-Encoding
+ - REORG: cfgparse: move server keyword parsing to server.c
+ - BUILD: adjust makefile for AIX 5.1
+ - BUG/MEDIUM: pattern: fix wrong definition of the pat_prune_fcts array
+ - CLEANUP: pattern: move array definitions to proto/ and not types/
+ - BUG/MAJOR: counters: check for null-deref when looking up an alternate table
+ - BUILD: ssl: previous patch failed
+ - BUILD/MEDIUM: standard: get rid of the last strcpy()
+ - BUILD/MEDIUM: standard: get rid of sprintf()
+ - BUILD/MEDIUM: cfgparse: get rid of sprintf()
+ - BUILD/MEDIUM: checks: get rid of sprintf()
+ - BUILD/MEDIUM: http: remove calls to sprintf()
+ - BUG/MEDIUM: systemd-wrapper: fix locating of haproxy binary
+ - BUILD/MINOR: ssl: remove one call to sprintf()
+ - MEDIUM: http: don't reject anymore message bodies not containing the url param
+ - MEDIUM: http: wait for the first chunk or message body length in http_process_body
+ - CLEANUP: http: rename http_process_request_body()
+ - CLEANUP: http: prepare dedicated processing for chunked encoded message bodies
+ - MINOR: http: make msg->eol carry the last CRLF length
+ - MAJOR: http: do not use msg->sol while processing messages or forwarding data
+ - MEDIUM: http: http_parse_chunk_crlf() must not advance the buffer pointer
+ - MAJOR: http: don't update msg->sov anymore while processing the body
+ - MINOR: http: add a small helper to compute the amount of body bytes present
+ - MEDIUM: http: add a small helper to compute how far to rewind to find headers
+ - MINOR: http: add a small helper to compute how far to rewind to find URI
+ - MEDIUM: http: small helpers to compute how far to rewind to find BODY and DATA
+ - MAJOR: http: reset msg->sov after headers are forwarded
+ - MEDIUM: http: forward headers again while waiting for connection to complete
+ - BUG/MINOR: http: deinitialize compression after a parsing error
+ - BUG/MINOR: http: deinitialize compression after a compression error
+ - MEDIUM: http: headers must be forwarded even if data was already inspected
+ - MAJOR: http: re-enable compression on chunked encoding
+ - MAJOR: http/compression: fix chunked-encoded response processing
+ - MEDIUM: http: cleanup: centralize a little bit HTTP compression end
+ - MEDIUM: http: start to centralize the forwarding code
+ - MINOR: http: further cleanups of response forwarding function
+ - MEDIUM: http: only allocate the temporary compression buffer when needed
+ - MAJOR: http: centralize data forwarding in the request path
+ - CLEANUP: http: document the response forwarding states
+ - CLEANUP: http: remove all calls to http_silent_debug()
+ - DOC: internal: add some reminders about HTTP parsing and pointer states
+ - BUG/MAJOR: http: fix bug in parse_qvalue() when selecting compression algo
+ - BUG/MINOR: stats: last session was not always set
+ - DOC: add pointer to the Cyril's HTML doc in the README
+ - MEDIUM: config: relax use_backend check to make the condition optional
+ - MEDIUM: config: report misplaced http-request rules
+ - MEDIUM: config: report misplaced use-server rules
+ - DOC: update roadmap with what was done.
+
+2014/02/03 : 1.5-dev22
+ - MEDIUM: tcp-check new feature: connect
+ - MEDIUM: ssl: Set verify 'required' as global default for servers side.
+ - MINOR: ssl: handshake optim for long certificate chains.
+ - BUG/MINOR: pattern: pattern comparison executed twice
+ - BUG/MEDIUM: map: segmentation fault with the stats's socket command "set map ..."
+ - BUG/MEDIUM: pattern: Segfault in binary parser
+ - MINOR: pattern: move functions for grouping pat_match_* and pat_parse_* and add documentation.
+ - MINOR: standard: The parse_binary() returns the length consumed and his documentation is updated
+ - BUG/MINOR: payload: the patterns of the acl "req.ssl_ver" are no parsed with the good function.
+ - BUG/MEDIUM: pattern: "pat_parse_dotted_ver()" set bad expect_type.
+ - BUG/MINOR: sample: The c_str2int converter does not fail if the entry is not an integer
+ - BUG/MEDIUM: http/auth: Sometimes the authentication credentials can be mix between two requests
+ - MINOR: doc: Bad cli function name.
+ - MINOR: http: smp_fetch_capture_header_* fetch captured headers
+ - BUILD: last release inadvertently prepended a "+" in front of the date
+ - BUG/MEDIUM: stream-int: fix the keep-alive idle connection handler
+ - BUG/MEDIUM: backend: do not re-initialize the connection's context upon reuse
+ - BUG: Revert "OPTIM/MEDIUM: epoll: fuse active events into polled ones during polling changes"
+ - BUG/MINOR: checks: successful check completion must not re-enable MAINT servers
+ - MINOR: http: try to stick to same server after status 401/407
+ - BUG/MINOR: http: always disable compression on HTTP/1.0
+ - OPTIM: poll: restore polling after a poll/stop/want sequence
+ - OPTIM: http: don't stop polling for read on the client side after a request
+ - BUG/MEDIUM: checks: unchecked servers could not be enabled anymore
+ - BUG/MEDIUM: stats: the web interface must check the tracked servers before enabling
+ - BUG/MINOR: channel: CHN_INFINITE_FORWARD must be unsigned
+ - BUG/MINOR: stream-int: do not clear the owner upon unregister
+ - MEDIUM: stats: add support for HTTP keep-alive on the stats page
+ - BUG/MEDIUM: stats: fix HTTP/1.0 breakage introduced in previous patch
+ - Revert "MEDIUM: stats: add support for HTTP keep-alive on the stats page"
+ - MAJOR: channel: add a new flag CF_WAKE_WRITE to notify the task of writes
+ - OPTIM: session: set the READ_DONTWAIT flag when connecting
+ - BUG/MINOR: http: don't clear the SI_FL_DONT_WAKE flag between requests
+ - MINOR: session: factor out the connect time measurement
+ - MEDIUM: session: prepare to support earlier transitions to the established state
+ - MEDIUM: stream-int: make si_connect() return an established state when possible
+ - MINOR: checks: use an inline function for health_adjust()
+ - OPTIM: session: put unlikely() around the freewheeling code
+ - MEDIUM: config: report a warning when multiple servers have the same name
+ - BUG: Revert "OPTIM: poll: restore polling after a poll/stop/want sequence"
+ - BUILD/MINOR: listener: remove a glibc warning on accept4()
+ - BUG/MAJOR: connection: fix mismatch between rcv_buf's API and usage
+ - BUILD: listener: fix recent accept4() again
+ - BUG/MAJOR: ssl: fix breakage caused by recent fix abf08d9
+ - BUG/MEDIUM: polling: ensure we update FD status when there's no more activity
+ - MEDIUM: listener: fix polling management in the accept loop
+ - MINOR: protocol: improve the proto->drain() API
+ - MINOR: connection: add a new conn_drain() function
+ - MEDIUM: tcp: report in tcp_drain() that lingering is already disabled on close
+ - MEDIUM: connection: update callers of ctrl->drain() to use conn_drain()
+ - MINOR: connection: add more error codes to report connection errors
+ - MEDIUM: tcp: report connection error at the connection level
+ - MEDIUM: checks: make use of chk_report_conn_err() for connection errors
+ - BUG/MEDIUM: unique_id: HTTP request counter is not stable
+ - DOC: fix misleading information about SIGQUIT
+ - BUG/MAJOR: fix freezes during compression
+ - BUG/MEDIUM: stream-interface: don't wake the task up before end of transfer
+ - BUILD: fix VERDATE exclusion regex
+ - CLEANUP: polling: rename "spec_e" to "state"
+ - DOC: add a diagram showing polling state transitions
+ - REORG: polling: rename "spec_e" to "state" and "spec_p" to "cache"
+ - REORG: polling: rename "fd_spec" to "fd_cache"
+ - REORG: polling: rename the cache allocation functions
+ - REORG: polling: rename "fd_process_spec_events()" to "fd_process_cached_events()"
+ - MAJOR: polling: rework the whole polling system
+ - MAJOR: connection: remove the CO_FL_WAIT_{RD,WR} flags
+ - MEDIUM: connection: remove conn_{data,sock}_poll_{recv,send}
+ - MEDIUM: connection: add check for readiness in I/O handlers
+ - MEDIUM: stream-interface: the polling flags must always be updated in chk_snd_conn
+ - MINOR: stream-interface: no need to call fd_stop_both() on error
+ - MEDIUM: connection: no need to recheck FD state
+ - CLEANUP: connection: use conn_ctrl_ready() instead of checking the flag
+ - CLEANUP: connection: use conn_xprt_ready() instead of checking the flag
+ - CLEANUP: connection: fix comments in connection.h to reflect new behaviour.
+ - OPTIM: raw-sock: don't speculate after a short read if polling is enabled
+ - MEDIUM: polling: centralize polled events processing
+ - MINOR: polling: create function fd_compute_new_polled_status()
+ - MINOR: cli: add more information to the "show info" output
+ - MEDIUM: listener: add support for limiting the session rate in addition to the connection rate
+ - MEDIUM: listener: apply a limit on the session rate submitted to SSL
+ - REORG: stats: move the stats socket states to dumpstats.c
+ - MINOR: cli: add the new "show pools" command
+ - BUG/MEDIUM: counters: flush content counters after each request
+ - BUG/MEDIUM: counters: fix stick-table entry leak when using track-sc2 in connection
+ - MINOR: tools: add very basic support for composite pointers
+ - MEDIUM: counters: stop relying on session flags at all
+ - BUG/MINOR: cli: fix missing break in command line parser
+ - BUG/MINOR: config: correctly report when log-format headers require HTTP mode
+ - MAJOR: http: update connection mode configuration
+ - MEDIUM: http: make keep-alive + httpclose be passive mode
+ - MAJOR: http: switch to keep-alive mode by default
+ - BUG/MEDIUM: http: fix regression caused by recent switch to keep-alive by default
+ - BUG/MEDIUM: listener: improve detection of non-working accept4()
+ - BUILD: listener: add fcntl.h and unistd.h
+ - BUG/MINOR: raw_sock: correctly set the MSG_MORE flag
+
+2013/12/17 : 1.5-dev21
+ - MINOR: stats: don't use a monospace font to report numbers
+ - MINOR: session: remove debugging code
+ - BUG/MAJOR: patterns: fix double free caused by loading strings from files
+ - MEDIUM: http: make option http_proxy automatically rewrite the URL
+ - BUG/MEDIUM: http: cook_cnt() forgets to set its output type
+ - BUG/MINOR: stats: correctly report throttle rate of low weight servers
+ - BUG/MEDIUM: checks: servers must not start in slowstart mode
+ - BUG/MINOR: acl: parser must also stop at comma on ACL-only keywords
+ - MEDIUM: stream-int: implement a very simplistic idle connection manager
+ - DOC: update the ROADMAP file
+
+2013/12/16 : 1.5-dev20
+ - DOC: add missing options to the manpage
+ - DOC: add manpage references to all system calls
+ - DOC: update manpage reference to haproxy-en.txt
+ - DOC: remove -s and -l options from the manpage
+ - DOC: missing information for the "description" keyword
+ - DOC: missing http-send-name-header keyword in keyword table
+ - MINOR: tools: function my_memmem() to lookup binary contents
+ - MEDIUM: checks: add send/expect tcp based check
+ - MEDIUM: backend: Enhance hash-type directive with an algorithm options
+ - MEDIUM: backend: Implement avalanche as a modifier of the hashing functions.
+ - DOC: Documentation for hashing function, with test results.
+ - BUG/MEDIUM: ssl: potential memory leak using verifyhost
+ - BUILD: ssl: compilation issue with openssl v0.9.6.
+ - BUG/MINOR: ssl: potential memory leaks using ssl_c_key_alg or ssl_c_sig_alg.
+ - MINOR: ssl: optimization of verifyhost on wildcard certificates.
+ - BUG/MINOR: ssl: verifyhost does not match empty strings on wildcard.
+ - MINOR: ssl: Add statement 'verifyhost' to "server" statements
+ - CLEANUP: session: remove event_accept() which was not used anymore
+ - BUG/MINOR: deinit: free fdinfo while doing cleanup
+ - DOC: minor typo fix in documentation
+ - BUG/MEDIUM: server: set the macro for server's max weight SRV_UWGHT_MAX to SRV_UWGHT_RANGE
+ - BUG/MINOR: use the same check condition for server as other algorithms
+ - DOC: fix typo in comments
+ - BUG/MINOR: deinit: free server map which is allocated in init_server_map()
+ - CLEANUP: stream_interface: cleanup loop information in si_conn_send_loop()
+ - MINOR: buffer: align the last output line of buffer_dump()
+ - MINOR: buffer: align the last output line if there are less than 8 characters left
+ - DOC: stick-table: modify the description
+ - OPTIM: stream_interface: return directly if the connection flag CO_FL_ERROR has been set
+ - CLEANUP: code style: use tabs to indent codes
+ - DOC: checkcache: block responses with cacheable cookies
+ - BUG/MINOR: check_config_validity: check the returned value of stktable_init()
+ - MEDIUM: haproxy-systemd-wrapper: Use haproxy in same directory
+ - MEDIUM: systemd-wrapper: Kill child processes when interrupted
+ - LOW: systemd-wrapper: Write debug information to stdout
+ - BUG/MINOR: http: fix "set-tos" not working in certain configurations
+ - MEDIUM: http: add IPv6 support for "set-tos"
+ - DOC: ssl: update build instructions to use new SSL_* variables
+ - BUILD/MINOR: systemd: fix compiler warning about unused result
+ - url32+src - like base32+src but whole url including parameters
+ - BUG/MINOR: fix forcing fastinter in "on-error"
+ - CLEANUP: Make parameters of srv_downtime and srv_getinter const
+ - CLEANUP: Remove unused 'last_slowstart_change' field from struct peer
+ - MEDIUM: Split up struct server's check element
+ - MEDIUM: Move result element to struct check
+ - MEDIUM: Paramatise functions over the check of a server
+ - MEDIUM: cfgparse: Factor out check initialisation
+ - MEDIUM: Add state to struct check
+ - MEDIUM: Move health element to struct check
+ - MEDIUM: Add helper for task creation for checks
+ - MEDIUM: Add helper function for failed checks
+ - MEDIUM: Log agent fail, stopped or down as info
+ - MEDIUM: Remove option lb-agent-chk
+ - MEDIUM: checks: Add supplementary agent checks
+ - MEDIUM: Do not mark a server as down if the agent is unavailable
+ - MEDIUM: Set rise and fall of agent checks to 1
+ - MEDIUM: Add enable and disable agent unix socket commands
+ - MEDIUM: Add DRAIN state and report it on the stats page
+ - BUILD/MINOR: missing header file
+ - CLEANUP: regex: Create regex_comp function that compiles regex using compilation options
+ - CLEANUP: The function "regex_exec" needs the string length but in many case they expect null terminated char.
+ - MINOR: http: some exported functions were not in the header file
+ - MINOR: http: change url_decode to return the size of the decoded string.
+ - BUILD/MINOR: missing header file
+ - BUG/MEDIUM: sample: The function v4tov6 cannot support input and output overlap
+ - BUG/MINOR: arg: fix error reporting for add-header/set-header sample fetch arguments
+ - MINOR: sample: export the generic sample conversion parser
+ - MINOR: sample: export sample_casts
+ - MEDIUM: acl: use the fetch syntax 'fetch(args),conv(),conv()' into the ACL keyword
+ - MINOR: stick-table: use smp_expr_output_type() to retrieve the output type of a "struct sample_expr"
+ - MINOR: sample: provide the original sample_conv descriptor struct to the argument checker function.
+ - MINOR: tools: Add a function to convert buffer to an ipv6 address
+ - MINOR: acl: export acl arrays
+ - MINOR: acl: Extract the pattern parsing and indexation from the "acl_read_patterns_from_file()" function
+ - MINOR: acl: Extract the pattern matching function
+ - MINOR: sample: Define new struct sample_storage
+ - MEDIUM: acl: associate "struct sample_storage" to each "struct acl_pattern"
+ - REORG: acl/pattern: extract pattern matching from the acl file and create pattern.c
+ - MEDIUM: pattern: create pattern expression
+ - MEDIUM: pattern: rename "acl" prefix to "pat"
+ - MEDIUM: sample: let the cast functions set their output type
+ - MINOR: sample: add a private field to the struct sample_conv
+ - MINOR: map: Define map types
+ - MEDIUM: sample: add the "map" converter
+ - MEDIUM: http: The redirect strings follows the log format rules.
+ - BUG/MINOR: acl: acl parser does not recognize empty converter list
+ - BUG/MINOR: map: The map list was declared in the map.h file
+ - MINOR: map: Cleanup the initialisation of map descriptors.
+ - MEDIUM: map: merge identical maps
+ - BUG/MEDIUM: pattern: Pattern node has type of "struct pat_idx_elt" in place of "struct eb_node"
+ - BUG/MEDIUM: map: Bad map file parser
+ - CLEANUP/MINOR: standard: use the system define INET6_ADDRSTRLEN in place of MAX_IP6_LEN
+ - BUG/MEDIUM: sample: conversion from str to ipv6 may read data past end
+ - MINOR: map: export map_get_reference() function
+ - MINOR: pattern: Each pattern sets the expected input type
+ - MEDIUM: acl: Last patch change the output type
+ - MEDIUM: pattern: Extract the index process from the pat_parse_*() functions
+ - MINOR: standard: The function parse_binary() can use preallocated buffer
+ - MINOR: regex: Change the struct containing regex
+ - MINOR: regex: Copy the original regex expression into string.
+ - MINOR: pattern: add support for compiling patterns for lookups
+ - MINOR: pattern: make the pattern matching function return a pointer to the matched element
+ - MINOR: map: export parse output sample functions
+ - MINOR: pattern: add function to lookup a specific entry in pattern list
+ - MINOR: pattern/map: Each pattern must free the associated sample
+ - MEDIUM: dumpstat: make the CLI parser understand the backslash as an escape char
+ - MEDIUM: map: dynamic manipulation of maps
+ - BUG/MEDIUM: unique_id: junk in log on empty unique_id
+ - BUG/MINOR: log: junk at the end of syslog packet
+ - MINOR: Makefile: provide cscope rule
+ - DOC: compression: chunk are not compressed anymore
+ - MEDIUM: session: disable lingering on the server when the client aborts
+ - BUG/MEDIUM: prevent gcc from moving empty keywords lists into BSS
+ - DOC: remove the comment saying that SSL certs are not checked on the server side
+ - BUG: counters: third counter was not stored if others unset
+ - BUG/MAJOR: http: don't emit the send-name-header when no server is available
+ - BUG/MEDIUM: http: "option checkcache" fails with the no-cache header
+ - BUG/MAJOR: http: sample prefetch code was not properly migrated
+ - BUG/MEDIUM: splicing: fix abnormal CPU usage with splicing
+ - BUG/MINOR: stream_interface: don't call chk_snd() on polled events
+ - OPTIM: splicing: use splice() for the last block when relevant
+ - MEDIUM: sample: handle comma-delimited converter list
+ - MINOR: sample: fix sample_process handling of unstable data
+ - CLEANUP: acl: move the 3 remaining sample fetches to samples.c
+ - MINOR: sample: add a new "date" fetch to return the current date
+ - MINOR: samples: add the http_date([<offset>]) sample converter.
+ - DOC: minor improvements to the part on the stats socket.
+ - MEDIUM: sample: systematically pass the keyword pointer to the keyword
+ - MINOR: payload: split smp_fetch_rdp_cookie()
+ - MINOR: counters: factor out smp_fetch_sc*_tracked
+ - MINOR: counters: provide a generic function to retrieve a stkctr for sc* and src.
+ - MEDIUM: counters: factor out smp_fetch_sc*_get_gpc0
+ - MEDIUM: counters: factor out smp_fetch_sc*_gpc0_rate
+ - MEDIUM: counters: factor out smp_fetch_sc*_inc_gpc0
+ - MEDIUM: counters: factor out smp_fetch_sc*_clr_gpc0
+ - MEDIUM: counters: factor out smp_fetch_sc*_conn_cnt
+ - MEDIUM: counters: factor out smp_fetch_sc*_conn_rate
+ - MEDIUM: counters: factor out smp_fetch_sc*_conn_cur
+ - MEDIUM: counters: factor out smp_fetch_sc*_sess_cnt
+ - MEDIUM: counters: factor out smp_fetch_sc*_sess_rate
+ - MEDIUM: counters: factor out smp_fetch_sc*_http_req_cnt
+ - MEDIUM: counters: factor out smp_fetch_sc*_http_req_rate
+ - MEDIUM: counters: factor out smp_fetch_sc*_http_err_cnt
+ - MEDIUM: counters: factor out smp_fetch_sc*_http_err_rate
+ - MEDIUM: counters: factor out smp_fetch_sc*_kbytes_in
+ - MEDIUM: counters: factor out smp_fetch_sc*_bytes_in_rate
+ - MEDIUM: counters: factor out smp_fetch_sc*_kbytes_out
+ - MEDIUM: counters: factor out smp_fetch_sc*_bytes_out_rate
+ - MEDIUM: counters: factor out smp_fetch_sc*_trackers
+ - MINOR: session: make the number of stick counter entries more configurable
+ - MEDIUM: counters: support passing the counter number as a fetch argument
+ - MEDIUM: counters: support looking up a key in an alternate table
+ - MEDIUM: cli: adjust the method for feeding frequency counters in tables
+ - MINOR: cli: make it possible to enter multiple values at once with "set table"
+ - MINOR: payload: allow the payload sample fetches to retrieve arbitrary lengths
+ - BUG/MINOR: cli: "clear table" must not kill entries that don't match condition
+ - MINOR: ssl: use MAXPATHLEN instead of PATH_MAX
+ - MINOR: config: warn when a server with no specific port uses rdp-cookie
+ - BUG/MEDIUM: unique_id: HTTP request counter must be unique!
+ - DOC: add a mention about the limited chunk size
+ - BUG/MEDIUM: fix broken send_proxy on FreeBSD
+ - MEDIUM: stick-tables: flush old entries upon soft-stop
+ - MINOR: tcp: add new "close" action for tcp-response
+ - MINOR: payload: provide the "res.len" fetch method
+ - BUILD: add SSL_INC/SSL_LIB variables to force the path to openssl
+ - MINOR: http: compute response time before processing headers
+ - BUG/MINOR: acl: fix improper string size assignment in proxy argument
+ - BUG/MEDIUM: http: accept full buffers on smp_prefetch_http
+ - BUG/MINOR: acl: implicit arguments of ACL keywords were not properly resolved
+ - BUG/MEDIUM: session: risk of crash on out of memory conditions
+ - BUG/MINOR: peers: set the accept date in outgoing connections
+ - BUG/MEDIUM: tcp: do not skip tracking rules on second pass
+ - BUG/MEDIUM: acl: do not evaluate next terms after a miss
+ - MINOR: acl: add a warning when an ACL keyword is used without any value
+ - MINOR: tcp: don't use tick_add_ifset() when timeout is known to be set
+ - BUG/MINOR: acl: remove patterns from the tree before freeing them
+ - MEDIUM: backend: add support for the wt6 hash
+ - OPTIM/MEDIUM: epoll: fuse active events into polled ones during polling changes
+ - OPTIM/MINOR: mark the source address as already known on accept()
+ - BUG/MINOR: stats: don't count tarpitted connections twice
+ - CLEANUP: http: homogenize processing of denied req counter
+ - CLEANUP: http: merge error handling for req* and http-request *
+ - BUG/MEDIUM: http: fix possible parser crash when parsing erroneous "http-request redirect" rules
+ - BUG/MINOR: http: fix build warning introduced with url32/url32_src
+ - BUG/MEDIUM: checks: fix slow start regression after fix attempt
+ - BUG/MAJOR: server: weight calculation fails for map-based algorithms
+ - MINOR: stats: report correct throttling percentage for servers in slowstart
+ - OPTIM: connection: fold the error handling with handshake handling
+ - MINOR: peers: accept to learn strings of different lengths
+ - BUG/MAJOR: fix haproxy crash when using server tracking instead of checks
+ - BUG/MAJOR: check: fix haproxy crash during soft-stop/soft-start
+ - BUG/MINOR: stats: do not report "via" on tracking servers in maintenance
+ - BUG/MINOR: connection: fix typo in error message report
+ - BUG/MINOR: backend: fix target address retrieval in transparent mode
+ - BUG/MINOR: config: report the correct track-sc number in tcp-rules
+ - BUG/MINOR: log: fix log-format parsing errors
+ - DOC: add some information about how to apply converters to samples
+ - MINOR: acl/pattern: use types different from int to clarify who does what.
+ - MINOR: pattern: import acl_find_match_name() into pattern.h
+ - MEDIUM: stick-tables: support automatic conversion from ipv4<->ipv6
+ - MEDIUM: log-format: relax parsing of '%' followed by unsupported characters
+ - BUG/MINOR: http: usual deinit stuff in last commit
+ - BUILD: log: silent a warning about isblank() with latest patches
+ - BUG/MEDIUM: checks: fix health check regression causing them to depend on declaration order
+ - BUG/MEDIUM: checks: fix a long-standing issue with reporting connection errors
+ - BUG/MINOR: checks: don't consider errno and use conn->err_code
+ - BUG/MEDIUM: checks: also update the DRAIN state from the web interface
+ - MINOR: stats: remove some confusion between the DRAIN state and NOLB
+ - BUG/MINOR: tcp: check that no error is pending during a connect probe
+ - BUG/MINOR: connection: check EINTR when sending a PROXY header
+ - MEDIUM: connection: set the socket shutdown flags on socket errors
+ - BUG/MEDIUM: acl: fix regression introduced by latest converters support
+ - MINOR: connection: clear errno prior to checking for errors
+ - BUG/MINOR: checks: do not trust errno in write event before any syscall
+ - MEDIUM: checks: centralize error reporting
+ - OPTIM: checks: don't poll on recv when using plain TCP connects
+ - OPTIM: checks: avoid setting SO_LINGER twice
+ - MINOR: tools: add a generic binary hex string parser
+ - BUG/MEDIUM: checks: tcp-check: do not poll when there's nothing to send
+ - BUG/MEDIUM: check: tcp-check might miss some outgoing data when socket buffers are full
+ - BUG/MEDIUM: args: fix double free on error path in argument expression parser
+ - BUG/MINOR: acl: fix sample expression error reporting
+ - BUG/MINOR: checks: tcp-check actions are enums, not flags
+ - MEDIUM: checks: make tcp-check perform multiple send() at once
+ - BUG/MEDIUM: stick: completely remove the unused flag from the store entries
+ - OPTIM: ebtree: pack the struct eb_node to avoid holes on 64-bit
+ - BUG/MEDIUM: stick-tables: complete the latest fix about store-responses
+ - CLEANUP: stream_interface: remove unused field err_loc
+ - MEDIUM: stats: don't use conn->xprt_st anymore
+ - MINOR: session: add a simple function to retrieve a session from a task
+ - MEDIUM: stats: don't use conn->xprt_ctx anymore
+ - MEDIUM: peers: don't rely on conn->xprt_ctx anymore
+ - MINOR: http: prevent smp_fetch_url_{ip,port} from using si->conn
+ - MINOR: connection: make it easier to emit proxy protocol for unknown addresses
+ - MEDIUM: stats: prepare the HTTP stats I/O handler to support more states
+ - MAJOR: stats: move the HTTP stats handling to its applet
+ - MEDIUM: stats: move request argument processing to the final step
+ - MEDIUM: session: detect applets from the session by using s->target
+ - MAJOR: session: check for a connection to an applet in sess_prepare_conn_req()
+ - MAJOR: session: pass applet return traffic through the response analysers
+ - MEDIUM: stream-int: split the shutr/shutw functions between applet and conn
+ - MINOR: stream-int: make the shutr/shutw functions void
+ - MINOR: obj: provide a safe and an unsafe access to pointed objects
+ - MINOR: connection: add a field to store an object type
+ - MINOR: connection: always initialize conn->objt_type to OBJ_TYPE_CONN
+ - MEDIUM: stream interface: move the peers' ptr into the applet context
+ - MINOR: stream-interface: move the applet context to its own struct
+ - MINOR: obj: introduce a new type appctx
+ - MINOR: stream-int: rename ->applet to ->appctx
+ - MINOR: stream-int: split si_prepare_embedded into si_prepare_none and si_prepare_applet
+ - MINOR: stream-int: add a new pointer to the end point
+ - MEDIUM: stream-interface: set the pointer to the applet into the applet context
+ - MAJOR: stream interface: remove the ->release function pointer
+ - MEDIUM: stream-int: make ->end point to the connection or the appctx
+ - CLEANUP: stream-int: remove obsolete si_ctrl function
+ - MAJOR: stream-int: stop using si->conn and use si->end instead
+ - MEDIUM: stream-int: do not allocate a connection in parallel to applets
+ - MEDIUM: session: attach incoming connection to target on embryonic sessions
+ - MINOR: connection: add conn_init() to (re)initialize a connection
+ - MINOR: checks: call conn_init() to properly initialize the connection.
+ - MINOR: peers: make use of conn_init() to initialize the connection
+ - MINOR: session: use conn_init() to initialize the connections
+ - MINOR: http: use conn_init() to reinitialize the server connection
+ - MEDIUM: connection: replace conn_prepare with conn_assign
+ - MINOR: get rid of si_takeover_conn()
+ - MINOR: connection: add conn_new() / conn_free()
+ - MAJOR: connection: add two new flags to indicate readiness of control/transport
+ - MINOR: stream-interface: introduce si_reset() and si_set_state()
+ - MINOR: connection: reintroduce conn_prepare to set the protocol and transport
+ - MINOR: connection: replace conn_assign with conn_attach
+ - MEDIUM: stream-interface: introduce si_attach_conn to replace si_prepare_conn
+ - MAJOR: stream interface: dynamically allocate the outgoing connection
+ - MEDIUM: connection: move the send_proxy offset to the connection
+ - MINOR: connection: check for send_proxy during the connect(), not the SI
+ - MEDIUM: connection: merge the send_proxy and local_send_proxy calls
+ - MEDIUM: stream-int: replace occurrences of si->appctx with si_appctx()
+ - MEDIUM: stream-int: return the allocated appctx in stream_int_register_handler()
+ - MAJOR: stream-interface: dynamically allocate the applet context
+ - MEDIUM: session: automatically register the applet designated by the target
+ - MEDIUM: stats: delay appctx initialization
+ - CLEANUP: peers: use less confusing state/status code names
+ - MEDIUM: peers: delay appctx initialization
+ - MINOR: stats: provide some appctx information in "show sess all"
+ - DIET/MINOR: obj: pack the obj_type enum to 8 bits
+ - DIET/MINOR: connection: rearrange a few fields to save 8 bytes in the struct
+ - DIET/MINOR: listener: rearrange a few fields in struct listener to save 16 bytes
+ - DIET/MINOR: proxy: rearrange a few fields in struct proxy to save 16 bytes
+ - DIET/MINOR: session: reduce the struct session size by 8 bytes
+ - DIET/MINOR: stream-int: rearrange a few fields in struct stream_interface to save 8 bytes
+ - DIET/MINOR: http: reduce the size of struct http_txn by 8 bytes
+ - MINOR: http: switch the http state to an enum
+ - MINOR: http: use an enum for the auth method in http_auth_data
+ - DIET/MINOR: task: reduce struct task size by 8 bytes
+ - MINOR: stream_interface: add reporting of ressouce allocation errors
+ - MINOR: session: report lack of resources using the new stream-interface's error code
+ - BUILD: simplify the date and version retrieval in the makefile
+ - BUILD: prepare the makefile to skip format lines in SUBVERS and VERDATE
+ - BUILD: use format tags in VERDATE and SUBVERS files
+ - BUG/MEDIUM: channel: bo_getline() must wait for \n until buffer is full
+ - CLEANUP: check: server port is unsigned
+ - BUG/MEDIUM: checks: agent doesn't get the response if server does not closes
+ - MINOR: tools: buf2ip6 must not modify output on failure
+ - MINOR: pattern: do not assign SMP_TYPES by default to patterns
+ - MINOR: sample: make sample_parse_expr() use memprintf() to report parse errors
+ - MINOR: arg: improve wording on error reporting
+ - BUG/MEDIUM: sample: simplify and fix the argument parsing
+ - MEDIUM: acl: fix the argument parser to let the lower layer report detailed errors
+ - MEDIUM: acl: fix the initialization order of the ACL expression
+ - CLEANUP: acl: remove useless blind copy-paste from sample converters
+ - TESTS: add regression tests for ACL and sample expression parsers
+ - BUILD: time: adapt the type of TV_ETERNITY to the local system
+ - MINOR: chunks: allocate the trash chunks before parsing the config
+ - BUILD: definitely silence some stupid GCC warnings
+ - MINOR: chunks: always initialize the output chunk in get_trash_chunk()
+ - MINOR: checks: improve handling of the servers tracking chain
+ - REORG: checks: retrieve the check-specific defines from server.h to checks.h
+ - MINOR: checks: use an enum instead of flags to report a check result
+ - MINOR: checks: rename the state flags
+ - MINOR: checks: replace state DISABLED with CONFIGURED and ENABLED
+ - MINOR: checks: use check->state instead of srv->state & SRV_CHECKED
+ - MINOR: checks: fix agent check interval computation
+ - MINOR: checks: add a PAUSED state for the checks
+ - MINOR: checks: create the agent tasks even when no check is configured
+ - MINOR: checks: add a flag to indicate what check is an agent
+ - MEDIUM: checks: enable agent checks even if health checks are disabled
+ - BUG/MEDIUM: checks: ensure we can enable a server after boot
+ - BUG/MEDIUM: checks: tracking servers must not inherit the MAINT flag
+ - BUG/MAJOR: session: repair tcp-request connection rules
+ - BUILD: fix SUBVERS extraction in the Makefile
+ - BUILD: pattern: silence a warning about uninitialized value
+ - BUILD: log: fix build warning on Solaris
+ - BUILD: dumpstats: fix build error on Solaris
+ - DOC: move option pgsql-check to the correct place
+ - DOC: move option tcp-check to the proper place
+ - MINOR: connection: add simple functions to report connection readiness
+ - MEDIUM: connection: centralize handling of nolinger in fd management
+ - OPTIM: http: set CF_READ_DONTWAIT on response message
+ - OPTIM: http: do not re-enable reading on client side while closing the server side
+ - MINOR: config: add option http-keep-alive
+ - MEDIUM: connection: inform si_alloc_conn() whether existing conn is OK or not
+ - MAJOR: stream-int: handle the connection reuse in si_connect()
+ - MAJOR: http: add the keep-alive transition on the server side
+ - MAJOR: backend: enable connection reuse
+ - MINOR: http: add option prefer-last-server
+ - MEDIUM: http: do not report connection errors for second and further requests
+
+2013/06/17 : 1.5-dev19
+ - MINOR: stats: remove the autofocus on the scope input field
+ - BUG/MEDIUM: Fix crt-list file parsing error: filtered name was ignored.
+ - BUG/MEDIUM: ssl: EDH ciphers are not usable if no DH parameters present in pem file.
+ - BUG/MEDIUM: shctx: makes the code independent on SSL runtime version.
+ - MEDIUM: ssl: improve crt-list format to support negation
+ - BUG: ssl: fix crt-list for clients not supporting SNI
+ - MINOR: stats: show soft-stopped servers in different color
+ - BUG/MINOR: config: "source" does not work in defaults section
+ - BUG: regex: fix pcre compile error when using JIT
+ - MINOR: ssl: add pattern fetch 'ssl_c_sha1'
+ - BUG: ssl: send payload gets corrupted if tune.ssl.maxrecord is used
+ - MINOR: show PCRE version and JIT status in -vv
+ - BUG/MINOR: jit: don't rely on USE flag to detect support
+ - DOC: readme: add suggestion to link against static openssl
+ - DOC: examples: provide simplified ssl configuration
+ - REORG: tproxy: prepare the transparent proxy defines for accepting other OSes
+ - MINOR: tproxy: add support for FreeBSD
+ - MINOR: tproxy: add support for OpenBSD
+ - DOC: examples: provide an example of transparent proxy configuration for FreeBSD 8
+ - CLEANUP: fix minor typo in error message.
+ - CLEANUP: fix missing include <string.h> in proto/listener.h
+ - CLEANUP: protect checks.h from multiple inclusions
+ - MINOR: compression: acl "res.comp" and fetch "res.comp_algo"
+ - BUG/MINOR: http: add-header/set-header did not accept the ACL condition
+ - BUILD: mention in the Makefile that USE_PCRE_JIT is for libpcre >= 8.32
+ - BUG/MEDIUM: splicing is broken since 1.5-dev12
+ - BUG/MAJOR: acl: add implicit arguments to the resolve list
+ - BUG/MINOR: tcp: fix error reporting for TCP rules
+ - CLEANUP: peers: remove a bit of spaghetti to prepare for the next bugfix
+ - MINOR: stick-table: allow to allocate an entry without filling it
+ - BUG/MAJOR: peers: fix an overflow when syncing strings larger than 16 bytes
+ - MINOR: session: only call http_send_name_header() when changing the server
+ - MINOR: tcp: report the erroneous word in tcp-request track*
+ - BUG/MAJOR: backend: consistent hash can loop forever in certain circumstances
+ - BUG/MEDIUM: log: fix regression on log-format handling
+ - MEDIUM: log: report file name, line number, and directive name with log-format errors
+ - BUG/MINOR: cli: "clear table" did not work anymore without a key
+ - BUG/MINOR: cli: "clear table xx data.xx" does not work anymore
+ - BUG/MAJOR: http: compression still has defects on chunked responses
+ - BUG/MINOR: stats: fix confirmation links on the stats interface
+ - BUG/MINOR: stats: the status bar does not appear anymore after a change
+ - BUG/MEDIUM: stats: allocate the stats frontend also on "stats bind-process"
+ - BUG/MEDIUM: stats: fix a regression when dealing with POST requests
+ - BUG/MINOR: fix unterminated ACL array in compression
+ - BUILD: last fix broke non-linux platforms
+ - MINOR: init: indicate the SSL runtime version on -vv.
+ - BUG/MEDIUM: compression: the deflate algorithm must use global settings as well
+ - BUILD: stdbool is not portable (again)
+ - DOC: readme: add a small reminder about restrictions to respect in the code
+ - MINOR: ebtree: add new eb_next_dup/eb_prev_dup() functions to visit duplicates
+ - BUG/MINOR: acl: fix a double free during exit when using PCRE_JIT
+ - DOC: fix wrong copy-paste in the rspdel example
+ - MINOR: counters: make it easier to extend the amount of tracked counters
+ - MEDIUM: counters: add support for tracking a third counter
+ - MEDIUM: counters: add a new "gpc0_rate" counter in stick-tables
+ - BUG/MAJOR: http: always ensure response buffer has some room for a response
+ - MINOR: counters: add fetch/acl sc*_tracked to indicate whether a counter is tracked
+ - MINOR: defaults: allow REQURI_LEN and CAPTURE_LEN to be redefined
+ - MINOR: log: add a new flag 'L' for locally processed requests
+ - MINOR: http: add full-length header fetch methods
+ - MEDIUM: protocol: implement a "drain" function in protocol layers
+ - MEDIUM: http: add a new "http-response" ruleset
+ - MEDIUM: http: add the "set-nice" action to http-request and http-response
+ - MEDIUM: log: add a log level override value in struct session
+ - MEDIUM: http: add support for action "set-log-level" in http-request/http-response
+ - MEDIUM: http: add support for "set-tos" in http-request/http-response
+ - MEDIUM: http: add the "set-mark" action on http-request/http-response rules
+ - MEDIUM: tcp: add "tcp-request connection expect-proxy layer4"
+ - MEDIUM: acl: automatically detect the type of certain fetches
+ - MEDIUM: acl: remove a lot of useless ACLs that are equivalent to their fetches
+ - MEDIUM: acl: remove 15 additional useless ACLs that are equivalent to their fetches
+ - DOC: major reorg of ACL + sample fetch
+ - CLEANUP: http: remove the bogus urlp_ip ACL match
+ - MINOR: acl: add the new "env()" fetch method to retrieve an environment variable
+ - BUG/MINOR: acl: correctly consider boolean fetches when doing casts
+ - BUG/CRITICAL: fix a possible crash when using negative header occurrences
+ - DOC: update ROADMAP file
+ - MEDIUM: counters: use sc0/sc1/sc2 instead of sc1/sc2/sc3
+ - MEDIUM: stats: add proxy name filtering on the statistic page
+
+2013/04/03 : 1.5-dev18
+ - DOCS: Add explanation of intermediate certs to crt paramater
+ - DOC: typo and minor fixes in compression paragraph
+ - MINOR: config: http-request configuration error message misses new keywords
+ - DOC: minor typo fix in documentation
+ - BUG/MEDIUM: ssl: ECDHE ciphers not usable without named curve configured.
+ - MEDIUM: ssl: add bind-option "strict-sni"
+ - MEDIUM: ssl: add mapping from SNI to cert file using "crt-list"
+ - MEDIUM: regex: Use PCRE JIT in acl
+ - DOC: simplify bind option "interface" explanation
+ - DOC: tfo: bump required kernel to linux-3.7
+ - BUILD: add explicit support for TFO with USE_TFO
+ - MEDIUM: New cli option -Ds for systemd compatibility
+ - MEDIUM: add haproxy-systemd-wrapper
+ - MEDIUM: add systemd service
+ - BUG/MEDIUM: systemd-wrapper: don't leak zombie processes
+ - BUG/MEDIUM: remove supplementary groups when changing gid
+ - BUG/MEDIUM: config: fix parser crash with bad bind or server address
+ - BUG/MINOR: Correct logic in cut_crlf()
+ - CLEANUP: checks: Make desc argument to set_server_check_status const
+ - CLEANUP: dumpstats: Make cli_release_handler() static
+ - MEDIUM: server: Break out set weight processing code
+ - MEDIUM: server: Allow relative weights greater than 100%
+ - MEDIUM: server: Tighten up parsing of weight string
+ - MEDIUM: checks: Add agent health check
+ - BUG/MEDIUM: ssl: openssl 0.9.8 doesn't open /dev/random before chroot
+ - BUG/MINOR: time: frequency counters are not totally accurate
+ - BUG/MINOR: http: don't process abortonclose when request was sent
+ - BUG/MEDIUM: stream_interface: don't close outgoing connections on shutw()
+ - BUG/MEDIUM: checks: ignore late resets after valid responses
+ - DOC: fix bogus recommendation on usage of gpc0 counter
+ - BUG/MINOR: http-compression: lookup Cache-Control in the response, not the request
+ - MINOR: signal: don't block SIGPROF by default
+ - OPTIM: epoll: make use of EPOLLRDHUP
+ - OPTIM: splice: detect shutdowns and avoid splice() == 0
+ - OPTIM: splice: assume by default that splice is working correctly
+ - BUG/MINOR: log: temporary fix for lost SSL info in some situations
+ - BUG/MEDIUM: peers: only the last peers section was used by tables
+ - BUG/MEDIUM: config: verbosely reject peers sections with multiple local peers
+ - BUG/MINOR: epoll: use a fix maxevents argument in epoll_wait()
+ - BUG/MINOR: config: fix improper check for failed memory alloc in ACL parser
+ - BUG/MINOR: config: free peer's address when exiting upon parsing error
+ - BUG/MINOR: config: check the proper variable when parsing log minlvl
+ - BUG/MEDIUM: checks: ensure the health_status is always within bounds
+ - BUG/MINOR: cli: show sess should always validate s->listener
+ - BUG/MINOR: log: improper NULL return check on utoa_pad()
+ - CLEANUP: http: remove a useless null check
+ - CLEANUP: tcp/unix: remove useless NULL check in {tcp,unix}_bind_listener()
+ - BUG/MEDIUM: signal: signal handler does not properly check for signal bounds
+ - BUG/MEDIUM: tools: off-by-one in quote_arg()
+ - BUG/MEDIUM: uri_auth: missing NULL check and memory leak on memory shortage
+ - BUG/MINOR: unix: remove the 'level' field from the ux struct
+ - CLEANUP: http: don't try to deinitialize http compression if it fails before init
+ - CLEANUP: config: slowstart is never negative
+ - CLEANUP: config: maxcompcpuusage is never negative
+ - BUG/MEDIUM: log: emit '-' for empty fields again
+ - BUG/MEDIUM: checks: fix a race condition between checks and observe layer7
+ - BUILD: fix a warning emitted by isblank() on non-c99 compilers
+ - BUILD: improve the makefile's support for libpcre
+ - MEDIUM: halog: add support for counting per source address (-ic)
+ - MEDIUM: tools: make str2sa_range support all address syntaxes
+ - MEDIUM: config: make use of str2sa_range() instead of str2sa()
+ - MEDIUM: config: use str2sa_range() to parse server addresses
+ - MEDIUM: config: use str2sa_range() to parse peers addresses
+ - MINOR: tests: add a config file to ease address parsing tests.
+ - MINOR: ssl: add a global tunable for the max SSL/TLS record size
+ - BUG/MINOR: syscall: fix NR_accept4 system call on sparc/linux
+ - BUILD/MINOR: syscall: add definition of NR_accept4 for ARM
+ - MINOR: config: report missing peers section name
+ - BUG/MEDIUM: tools: fix bad character handling in str2sa_range()
+ - BUG/MEDIUM: stats: never apply "unix-bind prefix" to the global stats socket
+ - MINOR: tools: prepare str2sa_range() to return an error message
+ - BUG/MEDIUM: checks: don't call connect() on unsupported address families
+ - MINOR: tools: prepare str2sa_range() to accept a prefix
+ - MEDIUM: tools: make str2sa_range() parse unix addresses too
+ - MEDIUM: config: make str2listener() use str2sa_range() to parse unix addresses
+ - MEDIUM: config: use a single str2sa_range() call to parse bind addresses
+ - MEDIUM: config: use str2sa_range() to parse log addresses
+ - CLEANUP: tools: remove str2sun() which is not used anymore.
+ - MEDIUM: config: add complete support for str2sa_range() in dispatch
+ - MEDIUM: config: add complete support for str2sa_range() in server addr
+ - MEDIUM: config: add complete support for str2sa_range() in 'server'
+ - MEDIUM: config: add complete support for str2sa_range() in 'peer'
+ - MEDIUM: config: add complete support for str2sa_range() in 'source' and 'usesrc'
+ - CLEANUP: minor cleanup in str2sa_range() and str2ip()
+ - CLEANUP: config: do not use multiple errmsg at once
+ - MEDIUM: tools: support specifying explicit address families in str2sa_range()
+ - MAJOR: listener: support inheriting a listening fd from the parent
+ - MAJOR: tools: support environment variables in addresses
+ - BUG/MEDIUM: http: add-header should not emit "-" for empty fields
+ - BUG/MEDIUM: config: ACL compatibility check on "redirect" was wrong
+ - BUG/MEDIUM: http: fix another issue caused by http-send-name-header
+ - DOC: mention the new HTTP 307 and 308 redirect statues
+ - MEDIUM: poll: do not use FD_* macros anymore
+ - BUG/MAJOR: ev_select: disable the select() poller if maxsock > FD_SETSIZE
+ - BUG/MINOR: acl: ssl_fc_{alg,use}_keysize must parse integers, not strings
+ - BUG/MINOR: acl: ssl_c_used, ssl_fc{,_has_crt,_has_sni} take no pattern
+ - BUILD: fix usual isdigit() warning on solaris
+ - BUG/MEDIUM: tools: vsnprintf() is not always reliable on Solaris
+ - OPTIM: buffer: remove one jump in buffer_count()
+ - OPTIM: http: improve branching in chunk size parser
+ - OPTIM: http: optimize the response forward state machine
+ - BUILD: enable poll() by default in the makefile
+ - BUILD: add explicit support for Mac OS/X
+ - BUG/MAJOR: http: use a static storage for sample fetch context
+ - BUG/MEDIUM: ssl: improve error processing and reporting in ssl_sock_load_cert_list_file()
+ - BUG/MAJOR: http: fix regression introduced by commit a890d072
+ - BUG/MAJOR: http: fix regression introduced by commit d655ffe
+ - BUG/CRITICAL: using HTTP information in tcp-request content may crash the process
+ - MEDIUM: acl: remove flag ACL_MAY_LOOKUP which is improperly used
+ - MEDIUM: samples: use new flags to describe compatibility between fetches and their usages
+ - MINOR: log: indicate it when some unreliable sample fetches are logged
+ - MEDIUM: samples: move payload-based fetches and ACLs to their own file
+ - MINOR: backend: rename sample fetch functions and declare the sample keywords
+ - MINOR: frontend: rename sample fetch functions and declare the sample keywords
+ - MINOR: listener: rename sample fetch functions and declare the sample keywords
+ - MEDIUM: http: unify acl and sample fetch functions
+ - MINOR: session: rename sample fetch functions and declare the sample keywords
+ - MAJOR: acl: make all ACLs reference the fetch function via a sample.
+ - MAJOR: acl: remove the arg_mask from the ACL definition and use the sample fetch's
+ - MAJOR: acl: remove fetch argument validation from the ACL struct
+ - MINOR: http: add new direction-explicit sample fetches for headers and cookies
+ - MINOR: payload: add new direction-explicit sample fetches
+ - CLEANUP: acl: remove ACL hooks which were never used
+ - MEDIUM: proxy: remove acl_requires and just keep a flag "http_needed"
+ - MINOR: sample: provide a function to report the name of a sample check point
+ - MAJOR: acl: convert all ACL requires to SMP use+val instead of ->requires
+ - CLEANUP: acl: remove unused references to ACL_USE_*
+ - MINOR: http: replace acl_parse_ver with acl_parse_str
+ - MEDIUM: acl: move the ->parse, ->match and ->smp fields to acl_expr
+ - MAJOR: acl: add option -m to change the pattern matching method
+ - MINOR: acl: remove the use_count in acl keywords
+ - MEDIUM: acl: have a pointer to the keyword name in acl_expr
+ - MEDIUM: acl: support using sample fetches directly in ACLs
+ - MEDIUM: http: remove val_usr() to validate user_lists
+ - MAJOR: sample: maintain a per-proxy list of the fetch args to resolve
+ - MINOR: ssl: add support for the "alpn" bind keyword
+ - MINOR: http: status code 303 is HTTP/1.1 only
+ - MEDIUM: http: implement redirect 307 and 308
+ - MINOR: http: status 301 should not be marked non-cacheable
+
+2012/12/28 : 1.5-dev17
+ - MINOR: ssl: Setting global tune.ssl.cachesize value to 0 disables SSL session cache.
+ - BUG/MEDIUM: stats: fix stats page regression introduced by commit 20b0de5
+ - BUG/MINOR: stats: last fix was still wrong
+ - BUG/MINOR: stats: http-request rules still don't cope with stats
+ - BUG/MINOR: http: http-request add-header emits a corrupted header
+ - BUG/MEDIUM: stats: disable request analyser when processing POST or HEAD
+ - BUG/MINOR: log: make log-format, unique-id-format and add-header more independant
+ - BUILD: log: unused variable svid
+ - CLEANUP: http: rename the misleading http_check_access_rule
+ - MINOR: http: move redirect rule processing to its own function
+ - REORG: config: move the http redirect rule parser to proto_http.c
+ - MEDIUM: http: add support for "http-request redirect" rules
+ - MEDIUM: http: add support for "http-request tarpit" rule
+
+2012/12/24 : 1.5-dev16
+ - BUG/MEDIUM: ssl: Prevent ssl error from affecting other connections.
+ - BUG/MINOR: ssl: error is not reported if it occurs simultaneously with peer close detection.
+ - MINOR: ssl: add fetch and acl "ssl_c_used" to check if current SSL session uses a client certificate.
+ - MINOR: contrib: make the iprange tool grep for addresses
+ - CLEANUP: polling: gcc doesn't always optimize constants away
+ - OPTIM: poll: optimize fd management functions for low register count CPUs
+ - CLEANUP: poll: remove a useless double-check on fdtab[fd].owner
+ - OPTIM: epoll: use a temp variable for intermediary flag computations
+ - OPTIM: epoll: current fd does not count as a new one
+ - BUG/MINOR: poll: the I/O handler was called twice for polled I/Os
+ - MINOR: http: make resp_ver and status ACLs check for the presence of a response
+ - BUG/MEDIUM: stream-interface: fix possible stalls during transfers
+ - BUG/MINOR: stream_interface: don't return when the fd is already set
+ - BUG/MEDIUM: connection: always update connection flags prior to computing polling
+ - CLEANUP: buffer: use buffer_empty() instead of buffer_len()==0
+ - BUG/MAJOR: stream_interface: fix occasional data transfer freezes
+ - BUG/MEDIUM: stream_interface: fix another case where the reader might not be woken up
+ - BUG/MINOR: http: don't abort client connection on premature responses
+ - BUILD: no need to clean up when making git-tar
+ - MINOR: log: add a tag for amount of bytes uploaded from client to server
+ - BUG/MEDIUM: log: fix possible segfault during config parsing
+ - MEDIUM: log: change a few log tokens to make them easier to remember
+ - BUG/MINOR: log: add_to_logformat_list() used the wrong constants
+ - MEDIUM: log-format: make the format parser more robust and more extensible
+ - MINOR: sample: support cast from bool to string
+ - MINOR: samples: add a function to fetch and convert any sample to a string
+ - MINOR: log: add lf_text_len
+ - MEDIUM: log: add the ability to include samples in logs
+ - REORG: stats: massive code reorg and cleanup
+ - REORG: stats: move the HTTP header injection to proto_http
+ - REORG: stats: functions are now HTTP/CLI agnostic
+ - BUG/MINOR: log: fix regression introduced by commit 8a3f52
+ - MINOR: chunks: centralize the trash chunk allocation
+ - MEDIUM: stats: use hover boxes instead of title to report details
+ - MEDIUM: stats: use multi-line tips to display detailed counters
+ - MINOR: tools: simplify the use of the int to ascii macros
+ - MINOR: stats: replace STAT_FMT_CSV with STAT_FMT_HTML
+ - MINOR: http: prepare to support more http-request actions
+ - MINOR: log: make parse_logformat_string() take a const char *
+ - MEDIUM: http: add http-request 'add-header' and 'set-header' to build headers
+
+2012/12/12 : 1.5-dev15
+ - DOC: add a few precisions on compression
+ - BUG/MEDIUM: ssl: Fix handshake failure on session resumption with client cert.
+ - BUG/MINOR: ssl: One free session in cache remains unused.
+ - BUG/MEDIUM: ssl: first outgoing connection would fail with {ca,crt}-ignore-err
+ - MEDIUM: ssl: manage shared cache by blocks for huge sessions.
+ - MINOR: acl: add fetch for server session rate
+ - BUG/MINOR: compression: Content-Type is case insensitive
+ - MINOR: compression: disable on multipart or status != 200
+ - BUG/MINOR: http: don't report client aborts as server errors
+ - MINOR: stats: compute the ratio of compressed response based on 2xx responses
+ - MINOR: http: factor out the content-type checks
+ - BUG/MAJOR: stats: correctly check for a possible divide error when showing compression ratios
+ - BUILD: ssl: OpenSSL 0.9.6 has no renegociation
+ - BUG/MINOR: http: disable compression when message has no body
+ - MINOR: compression: make the stats a bit more robust
+ - BUG/MEDIUM: comp: DEFAULT_MAXZLIBMEM was expressed in bytes and not megabytes
+ - MINOR: connection: don't remove failed handshake flags
+ - MEDIUM: connection: add an error code in connections
+ - MEDIUM: connection: add minimal error reporting in logs for incomplete connections
+ - MEDIUM: connection: add error reporting for the PROXY protocol header
+ - MEDIUM: connection: add error reporting for the SSL
+ - DOC: document the connection error format in logs
+ - BUG/MINOR: http: don't log a 503 on client errors while waiting for requests
+ - BUILD: stdbool is not portable
+ - BUILD: ssl: NAME_MAX is not portable, use MAXPATHLEN instead
+ - BUG/MAJOR: raw_sock: must check error code on hangup
+ - BUG/MAJOR: polling: do not set speculative events on ERR nor HUP
+ - BUG/MEDIUM: session: fix FD leak when transport layer logging is enabled
+ - MINOR: stats: add a few more information on session dump
+ - BUG/MINOR: tcp: set the ADDR_TO_SET flag on outgoing connections
+ - CLEANUP: connection: remove unused server/proxy/task/si_applet declarations
+ - BUG/MEDIUM: tcp: process could theorically crash on lack of source ports
+ - MINOR: cfgparse: mention "interface" in the list of allowed "source" options
+ - MEDIUM: connection: introduce "struct conn_src" for servers and proxies
+ - CLEANUP: proto_tcp: use the same code to bind servers and backends
+ - CLEANUP: backend: use the same tproxy address selection code for servers and backends
+ - BUG/MEDIUM: stick-tables: conversions to strings were broken in dev13
+ - MEDIUM: proto_tcp: add support for tracking L7 information
+ - MEDIUM: counters: add sc1_trackers/sc2_trackers
+ - MINOR: http: add the "base32" pattern fetch function
+ - MINOR: http: add the "base32+src" fetch method.
+ - CLEANUP: session: use an array for the stick counters
+ - BUG/MINOR: proto_tcp: fix parsing of "table" in track-sc1/2
+ - BUG/MINOR: proto_tcp: bidirectional fetches not supported anymore in track-sc1/2
+ - BUG/MAJOR: connection: always recompute polling status upon I/O
+ - BUG/MINOR: connection: remove a few synchronous calls to polling updates
+ - MINOR: config: improve error checking on TCP stick-table tracking
+ - DOC: add some clarifications to the readme
+
+2012/11/26 : 1.5-dev14
+ - DOC: fix minor typos
+ - BUG/MEDIUM: compression: does not forward trailers
+ - MINOR: buffer_dump with ASCII
+ - BUG/MEDIUM: checks: mark the check as stopped after a connect error
+ - BUG/MEDIUM: checks: ensure we completely disable polling upon success
+ - BUG/MINOR: checks: don't mark the FD as closed before transport close
+ - MEDIUM: checks: avoid accumulating TIME_WAITs during checks
+ - MINOR: cli: report the msg state in full text in "show sess $PTR"
+ - CLEANUP: checks: rename some server check flags
+ - MAJOR: checks: rework completely bogus state machine
+ - BUG/MINOR: checks: slightly clean the state machine up
+ - MEDIUM: checks: avoid waking the application up for pure TCP checks
+ - MEDIUM: checks: close the socket as soon as we have a response
+ - BUG/MAJOR: checks: close FD on all timeouts
+ - MINOR: checks: fix recv polling after connect()
+ - MEDIUM: connection: provide a common conn_full_close() function
+ - BUG/MEDIUM: checks: prevent TIME_WAITs from appearing also on timeouts
+ - BUG/MAJOR: peers: the listener's maxaccept was not set and caused loops
+ - MINOR: listeners: make the accept loop more robust when maxaccept==0
+ - BUG/MEDIUM: acl: correctly resolve all args, not just the first one
+ - BUG/MEDIUM: acl: make prue_acl_expr() correctly free ACL expressions upon exit
+ - BUG/MINOR: stats: fix inversion of the report of a check in progress
+ - MEDIUM: tcp: add explicit support for delayed ACK in connect()
+ - BUG/MEDIUM: connection: always disable polling upon error
+ - MINOR: connection: abort earlier when errors are detected
+ - BUG/MEDIUM: checks: report handshake failures
+ - BUG/MEDIUM: connection: local_send_proxy must wait for connection to establish
+ - MINOR: tcp: add support for the "v6only" bind option
+ - MINOR: stats: also report the computed compression savings in html stats
+ - MINOR: stats: report the total number of compressed responses per front/back
+ - MINOR: tcp: add support for the "v4v6" bind option
+ - DOC: stats: document the comp_rsp stats column
+ - BUILD: buffer: fix another isprint() warning on solaris
+ - MINOR: cli: add support for the "show sess all" command
+ - BUG/MAJOR: cli: show sess <id> may randomly corrupt the back-ref list
+ - MINOR: cli: improve output format for show sess $ptr
+
+2012/11/22 : 1.5-dev13
+ - BUILD: fix build issue without USE_OPENSSL
+ - BUILD: fix compilation error with DEBUG_FULL
+ - DOC: ssl: remove prefer-server-ciphers documentation
+ - DOC: ssl: surround keywords with quotes
+ - DOC: fix minor typo on http-send-name-header
+ - BUG/MEDIUM: acls using IPv6 subnets patterns incorrectly match IPs
+ - BUG/MAJOR: fix a segfault on option http_proxy and url_ip acl
+ - MEDIUM: http: accept IPv6 values with (s)hdr_ip acl
+ - BUILD: report zlib support in haproxy -vv
+ - DOC: compression: add some details and clean up the formatting
+ - DOC: Change is_ssl acl to ssl_fc acl in example
+ - DOC: make it clear what the HTTP request size is
+ - MINOR: ssl: try to load Diffie-Hellman parameters from cert file
+ - DOC: ssl: update 'crt' statement on 'bind' about Diffie-Hellman parameters loading
+ - MINOR: ssl: add elliptic curve Diffie-Hellman support for ssl key generation
+ - DOC: ssl: add 'ecdhe' statement on 'bind'
+ - MEDIUM: ssl: add client certificate authentication support
+ - DOC: ssl: add 'verify', 'cafile' and 'crlfile' statements on 'bind'
+ - MINOR: ssl: add fetch and ACL 'client_crt' to test a client cert is present
+ - DOC: ssl: add fetch and ACL 'client_cert'
+ - MINOR: ssl: add ignore verify errors options
+ - DOC: ssl: add 'ca-ignore-err' and 'crt-ignore-err' statements on 'bind'
+ - MINOR: ssl: add fetch and ACL 'ssl_verify_result'
+ - DOC: ssl: add fetch and ACL 'ssl_verify_result'
+ - MINOR: ssl: add fetches and ACLs to return verify errors
+ - DOC: ssl: add fetches and ACLs 'ssl_verify_crterr', 'ssl_verify_caerr', and 'ssl_verify_crterr_depth'
+ - MINOR: ssl: disable shared memory and locks on session cache if nbproc == 1
+ - MINOR: ssl: add build param USE_PRIVATE_CACHE to build cache without shared memory
+ - MINOR: ssl : add statements 'notlsv11' and 'notlsv12' and rename 'notlsv1' to 'notlsv10'.
+ - DOC: ssl : add statements 'notlsv11' and 'notlsv12' and rename 'notlsv1' to 'notlsv10'.
+ - MEDIUM: config: authorize frontend and listen without bind.
+ - MINOR: ssl: add statement 'no-tls-tickets' on bind to disable stateless session resumption
+ - DOC: ssl: add 'no-tls-tickets' statement documentation.
+ - BUG/MINOR: ssl: Fix CRL check was not enabled when crlfile was specified.
+ - BUG/MINOR: build: Fix compilation issue on openssl 0.9.6 due to missing CRL feature.
+ - BUG/MINOR: conf: Fix 'maxsslconn' statement error if built without OPENSSL.
+ - BUG/MINOR: build: Fix failure with USE_OPENSSL=1 and USE_FUTEX=1 on archs i486 and i686.
+ - MINOR: ssl: remove prefer-server-ciphers statement and set it as the default on ssl listeners.
+ - BUG/MEDIUM: ssl: subsequent handshakes fail after server configuration changes
+ - MINOR: ssl: add 'crt-base' and 'ca-base' global statements.
+ - MEDIUM: conf: rename 'nosslv3' and 'notlsvXX' statements 'no-sslv3' and 'no-tlsvXX'.
+ - MEDIUM: conf: rename 'cafile' and 'crlfile' statements 'ca-file' and 'crl-file'
+ - MINOR: ssl: use bit fields to store ssl options instead of one int each
+ - MINOR: ssl: add 'force-sslv3' and 'force-tlsvXX' statements on bind.
+ - MINOR: ssl: add 'force-sslv3' and 'force-tlsvXX' statements on server
+ - MINOR: ssl: add defines LISTEN_DEFAULT_CIPHERS and CONNECT_DEFAULT_CIPHERS.
+ - BUG/MINOR: ssl: Fix issue on server statements 'no-tls*' and 'no-sslv3'
+ - MINOR: ssl: move ssl context init for servers from cfgparse.c to ssl_sock.c
+ - MEDIUM: ssl: reject ssl server keywords in default-server statement
+ - MINOR: ssl: add statement 'no-tls-tickets' on server side.
+ - MINOR: ssl: add statements 'verify', 'ca-file' and 'crl-file' on servers.
+ - DOC: Fix rename of options cafile and crlfile to ca-file and crl-file.
+ - MINOR: sample: manage binary to string type convertion in stick-table and samples.
+ - MINOR: acl: add parse and match primitives to use binary type on ACLs
+ - MINOR: sample: export 'sample_get_trash_chunk(void)'
+ - MINOR: conf: rename all ssl modules fetches using prefix 'ssl_fc' and 'ssl_c'
+ - MINOR: ssl: add pattern and ACLs fetches 'ssl_fc_protocol', 'ssl_fc_cipher', 'ssl_fc_use_keysize' and 'ssl_fc_alg_keysize'
+ - MINOR: ssl: add pattern fetch 'ssl_fc_session_id'
+ - MINOR: ssl: add pattern and ACLs fetches 'ssl_c_version' and 'ssl_f_version'
+ - MINOR: ssl: add pattern and ACLs fetches 'ssl_c_s_dn', 'ssl_c_i_dn', 'ssl_f_s_dn' and 'ssl_c_i_dn'
+ - MINOR: ssl: add pattern and ACLs 'ssl_c_sig_alg' and 'ssl_f_sig_alg'
+ - MINOR: ssl: add pattern and ACLs fetches 'ssl_c_key_alg' and 'ssl_f_key_alg'
+ - MINOR: ssl: add pattern and ACLs fetches 'ssl_c_notbefore', 'ssl_c_notafter', 'ssl_f_notbefore' and 'ssl_f_notafter'
+ - MINOR: ssl: add 'crt' statement on server.
+ - MINOR: ssl: checks the consistency of a private key with the corresponding certificate
+ - BUG/MEDIUM: ssl: review polling on reneg.
+ - BUG/MEDIUM: ssl: Fix some reneg cases not correctly handled.
+ - BUG/MEDIUM: ssl: Fix sometimes reneg fails if requested by server.
+ - MINOR: build: allow packagers to specify the ssl cache size
+ - MINOR: conf: add warning if ssl is not enabled and a certificate is present on bind.
+ - MINOR: ssl: Add tune.ssl.lifetime statement in global.
+ - MINOR: compression: Enable compression for IE6 w/SP2, IE7 and IE8
+ - BUG: http: revert broken optimisation from 82fe75c1a79dac933391501b9d293bce34513755
+ - DOC: duplicate ssl_sni section
+ - MEDIUM: HTTP compression (zlib library support)
+ - CLEANUP: use struct comp_ctx instead of union
+ - BUILD: remove dependency to zlib.h
+ - MINOR: compression: memlevel and windowsize
+ - MEDIUM: use pool for zlib
+ - MINOR: compression: try init in cfgparse.c
+ - MINOR: compression: init before deleting headers
+ - MEDIUM: compression: limit RAM usage
+ - MINOR: compression: tune.comp.maxlevel
+ - MINOR: compression: maximum compression rate limit
+ - MINOR: log-format: check number of arguments in cfgparse.c
+ - BUG/MEDIUM: compression: no Content-Type header but type in configuration
+ - BUG/MINOR: compression: deinit zlib only when required
+ - MEDIUM: compression: don't compress when no data
+ - MEDIUM: compression: use pool for comp_ctx
+ - MINOR: compression: rate limit in 'show info'
+ - MINOR: compression: report zlib memory usage
+ - BUG/MINOR: compression: dynamic level increase
+ - DOC: compression: unsupported cases.
+ - MINOR: compression: CPU usage limit
+ - MEDIUM: http: add "redirect scheme" to ease HTTP to HTTPS redirection
+ - BUG/MAJOR: ssl: missing tests in ACL fetch functions
+ - MINOR: config: add a function to indent error messages
+ - REORG: split "protocols" files into protocol and listener
+ - MEDIUM: config: replace ssl_conf by bind_conf
+ - CLEANUP: listener: remove unused conf->file and conf->line
+ - MEDIUM: listener: add a minimal framework to register "bind" keyword options
+ - MEDIUM: config: move the "bind" TCP parameters to proto_tcp
+ - MEDIUM: move bind SSL parsing to ssl_sock
+ - MINOR: config: improve error reporting for "bind" lines
+ - MEDIUM: config: move the common "bind" settings to listener.c
+ - MEDIUM: config: move all unix-specific bind keywords to proto_uxst.c
+ - MEDIUM: config: enumerate full list of registered "bind" keywords upon error
+ - MINOR: listener: add a scope field in the bind keyword lists
+ - MINOR: config: pass the file and line to config keyword parsers
+ - MINOR: stats: fill the file and line numbers in the stats frontend
+ - MINOR: config: set the bind_conf entry on listeners created from a "listen" line.
+ - MAJOR: listeners: use dual-linked lists to chain listeners with frontends
+ - REORG: listener: move unix perms from the listener to the bind_conf
+ - BUG: backend: balance hdr was broken since 1.5-dev11
+ - MINOR: standard: make memprintf() support a NULL destination
+ - MINOR: config: make str2listener() use memprintf() to report errors.
+ - MEDIUM: stats: remove the stats_sock struct from the global struct
+ - MINOR: ssl: set the listeners' data layer to ssl during parsing
+ - MEDIUM: stats: make use of the standard "bind" parsers to parse global socket
+ - DOC: move bind options to their own section
+ - DOC: stats: refer to "bind" section for "stats socket" settings
+ - DOC: fix index to reference bind and server options
+ - BUG: http: do not print garbage on invalid requests in debug mode
+ - BUG/MINOR: config: check the proper pointer to report unknown protocol
+ - CLEANUP: connection: offer conn_prepare() to set up a connection
+ - CLEANUP: config: fix typo inteface => interface
+ - BUG: stats: fix regression introduced by commit 4348fad1
+ - MINOR: cli: allow to set frontend maxconn to zero
+ - BUG/MAJOR: http: chunk parser was broken with buffer changes
+ - MEDIUM: monitor: simplify handling of monitor-net and mode health
+ - MINOR: connection: add a pointer to the connection owner
+ - MEDIUM: connection: make use of the owner instead of container_of
+ - BUG/MINOR: ssl: report the L4 connection as established when possible
+ - BUG/MEDIUM: proxy: must not try to stop disabled proxies upon reload
+ - BUG/MINOR: config: use a copy of the file name in proxy configurations
+ - BUG/MEDIUM: listener: don't pause protocols that do not support it
+ - MEDIUM: proxy: add the global frontend to the list of normal proxies
+ - BUG/MINOR: epoll: correctly disable FD polling in fd_rem()
+ - MINOR: signal: really ignore signals configured with no handler
+ - MINOR: buffers: add a few functions to write chars, strings and blocks
+ - MINOR: raw_sock: always report asynchronous connection errors
+ - MEDIUM: raw_sock: improve connection error reporting
+ - REORG: connection: rename the data layer the "transport layer"
+ - REORG: connection: rename app_cb "data"
+ - MINOR: connection: provide a generic data layer wakeup callback
+ - MINOR: connection: split conn_prepare() in two functions
+ - MINOR: connection: add an init callback to the data_cb struct
+ - MEDIUM: session: use a specific data_cb for embryonic sessions
+ - MEDIUM: connection: use a generic data-layer init() callback
+ - MEDIUM: connection: reorganize connection flags
+ - MEDIUM: connection: only call the data->wake callback on activity
+ - MEDIUM: connection: make it possible for data->wake to return an error
+ - MEDIUM: session: register a data->wake callback to process errors
+ - MEDIUM: connection: don't call the data->init callback upon error
+ - MEDIUM: connection: it's not the data layer's role to validate the connection
+ - MEDIUM: connection: automatically disable polling on error
+ - REORG: connection: move the PROXY protocol management to connection.c
+ - MEDIUM: connection: add a new local send-proxy transport callback
+ - MAJOR: checks: make use of the connection layer to send checks
+ - REORG: server: move the check-specific parts into a check subsection
+ - MEDIUM: checks: use real buffers to store requests and responses
+ - MEDIUM: check: add the ctrl and transport layers in the server check structure
+ - MAJOR: checks: completely use the connection transport layer
+ - MEDIUM: checks: add the "check-ssl" server option
+ - MEDIUM: checks: enable the PROXY protocol with health checks
+ - CLEANUP: checks: remove minor warnings for assigned but not used variables
+ - MEDIUM: tcp: enable TCP Fast Open on systems which support it
+ - BUG: connection: fix regression from commit 9e272bf9
+ - CLEANUP: cttproxy: remove a warning on undeclared close()
+ - BUG/MAJOR: ensure that hdr_idx is always reserved when L7 fetches are used
+ - MEDIUM: listener: add support for linux's accept4() syscall
+ - MINOR: halog: sort output by cookie code
+ - BUG/MINOR: halog: -ad/-ac report the correct number of output lines
+ - BUG/MINOR: halog: fix help message for -ut/-uto
+ - MINOR: halog: add a parameter to limit output line count
+ - BUILD: accept4: move the socketcall declaration outside of accept4()
+ - MINOR: server: add minimal infrastructure to parse keywords
+ - MINOR: standard: make indent_msg() support empty messages
+ - MEDIUM: server: check for registered keywords when parsing unknown keywords
+ - MEDIUM: server: move parsing of keyword "id" to server.c
+ - BUG/MEDIUM: config: check-send-proxy was ignored if SSL was not builtin
+ - MEDIUM: ssl: move "server" keyword SSL options parsing to ssl_sock.c
+ - MEDIUM: log: suffix the frontend's name with '~' when using SSL
+ - MEDIUM: connection: always unset the transport layer upon close
+ - BUG/MINOR: session: fix some leftover from debug code
+ - BUG/MEDIUM: session: enable the conn_session_update() callback
+ - MEDIUM: connection: add a flag to hold the transport layer
+ - MEDIUM: log: add a new LW_XPRT flag to pin the transport layer
+ - MINOR: log: make lf_text use a const char *
+ - MEDIUM: log: report SSL ciphers and version in logs using logformat %sslc/%sslv
+ - REORG: http: rename msg->buf to msg->chn since it's a channel
+ - CLEANUP: http: use 'chn' to name channel variables, not 'buf'
+ - CLEANUP: channel: use 'chn' instead of 'buf' as local variable names
+ - CLEANUP: tcp: use 'chn' instead of 'buf' or 'b' for channel pointer names
+ - CLEANUP: stream_interface: use 'chn' instead of 'b' to name channel pointers
+ - CLEANUP: acl: use 'chn' instead of 'b' to name channel pointers
+ - MAJOR: channel: replace the struct buffer with a pointer to a buffer
+ - OPTIM: channel: reorganize struct members to improve cache efficiency
+ - CLEANUP: session: remove term_trace which is not used anymore
+ - OPTIM: session: reorder struct session fields
+ - OPTIM: connection: pack the struct target
+ - DOC: document relations between internal entities
+ - MINOR: ssl: add 'ssl_npn' sample/acl to extract TLS/NPN information
+ - BUILD: ssl: fix shctx build on older compilers
+ - MEDIUM: ssl: add support for the "npn" bind keyword
+ - BUG: ssl: fix ssl_sni ACLs to correctly process regular expressions
+ - MINOR: chunk: provide string compare functions
+ - MINOR: sample: accept fetch keywords without parenthesis
+ - MEDIUM: sample: pass an empty list instead of a null for fetch args
+ - MINOR: ssl: improve socket behaviour upon handshake abort.
+ - BUG/MEDIUM: http: set DONTWAIT on data when switching to tunnel mode
+ - MEDIUM: listener: provide a fallback for accept4() when not supported
+ - BUG/MAJOR: connection: risk of crash on certain tricky close scenario
+ - MEDIUM: cli: allow the stats socket to be bound to a specific set of processes
+ - OPTIM: channel: inline channel_forward's fast path
+ - OPTIM: http: inline http_parse_chunk_size() and http_skip_chunk_crlf()
+ - OPTIM: tools: inline hex2i()
+ - CLEANUP: http: rename HTTP_MSG_DATA_CRLF state
+ - MINOR: compression: automatically disable compression for older browsers
+ - MINOR: compression: optimize memLevel to improve byte rate
+ - BUG/MINOR: http: compression should consider all Accept-Encoding header values
+ - BUILD: fix coexistence of openssl and zlib
+ - MINOR: ssl: add pattern and ACLs fetches 'ssl_c_serial' and 'ssl_f_serial'
+ - BUG/MEDIUM: command-line option -D must have precedence over "debug"
+ - MINOR: tools: add a clear_addr() function to unset an address
+ - BUG/MEDIUM: tcp: transparent bind to the source only when address is set
+ - CLEANUP: remove trashlen
+ - MAJOR: session: detach the connections from the stream interfaces
+ - DOC: update document describing relations between internal entities
+ - BUILD: make it possible to specify ZLIB path
+ - MINOR: compression: add an offload option to remove the Accept-Encoding header
+ - BUG: compression: disable auto-close and enable MSG_MORE during transfer
+ - CLEANUP: completely remove trashlen
+ - MINOR: chunk: add a function to reset a chunk
+ - CLEANUP: replace chunk_printf() with chunk_appendf()
+ - MEDIUM: make the trash be a chunk instead of a char *
+ - MEDIUM: remove remains of BUFSIZE in HTTP auth and sample conversions
+ - MEDIUM: stick-table: allocate the table key of size buffer size
+ - BUG/MINOR: stream_interface: don't loop over ->snd_buf()
+ - BUG/MINOR: session: ensure that we don't retry connection if some data were sent
+ - OPTIM: session: don't process the whole session when only timers need a refresh
+ - BUG/MINOR: session: mark the handshake as complete earlier
+ - MAJOR: connection: remove the CO_FL_CURR_*_POL flag
+ - BUG/MAJOR: always clear the CO_FL_WAIT_* flags after updating polling flags
+ - MAJOR: sepoll: make the poller totally event-driven
+ - OPTIM: stream_interface: disable reading when CF_READ_DONTWAIT is set
+ - BUILD: compression: remove a build warning
+ - MEDIUM: fd: don't unset fdtab[].updated upon delete
+ - REORG: fd: move the speculative I/O management from ev_sepoll
+ - REORG: fd: move the fd state management from ev_sepoll
+ - REORG: fd: centralize the processing of speculative events
+ - BUG: raw_sock: also consider ENOTCONN in addition to EAGAIN
+ - BUILD: stream_interface: remove si_fd() and its references
+ - BUILD: compression: enable build in BSD and OSX Makefiles
+ - MAJOR: ev_select: make the poller support speculative events
+ - MAJOR: ev_poll: make the poller support speculative events
+ - MAJOR: ev_kqueue: make the poller support speculative events
+ - MAJOR: polling: replace epoll with sepoll and remove sepoll
+ - MAJOR: polling: remove unused callbacks from the poller struct
+ - MEDIUM: http: refrain from sending "Connection: close" when Upgrade is present
+ - CLEANUP: channel: remove any reference of the hijackers
+ - CLEANUP: stream_interface: remove the external task type target
+ - MAJOR: connection: replace struct target with a pointer to an enum
+ - BUG: connection: fix typo in previous commit
+ - BUG: polling: don't skip polled events in the spec list
+ - MINOR: splice: disable it when the system returns EBADF
+ - MINOR: build: allow packagers to specify the default maxzlibmem
+ - BUG: halog: fix broken output limitation
+ - BUG: proxy: fix server name lookup in get_backend_server()
+ - BUG: compression: do not always increment the round counter on allocation failure
+ - BUG/MEDIUM: compression: release the zlib pools between keep-alive requests
+ - MINOR: global: don't prevent nbproc from being redefined
+ - MINOR: config: support process ranges for "bind-process"
+ - MEDIUM: global: add support for CPU binding on Linux ("cpu-map")
+ - MINOR: ssl: rename and document the tune.ssl.cachesize option
+ - DOC: update the PROXY protocol spec to support v2
+ - MINOR: standard: add a simple popcount function
+ - MEDIUM: adjust the maxaccept per listener depending on the number of processes
+ - BUG: compression: properly disable compression when content-type does not match
+ - MINOR: cli: report connection status in "show sess xxx"
+ - BUG/MAJOR: stream_interface: certain workloads could cause get stuck
+ - BUILD: cli: fix build when SSL is enabled
+ - MINOR: cli: report the fd state in "show sess xxx"
+ - MINOR: cli: report an error message on missing argument to compression rate
+ - MINOR: http: add some debugging functions to pretty-print msg state names
+ - BUG/MAJOR: stream_interface: read0 not always handled since dev12
+ - DOC: documentation on http header capture is wrong
+ - MINOR: http: allow the cookie capture size to be changed
+ - DOC: http header capture has not been limited in size for a long time
+ - DOC: update readme with build methods for BSD
+ - BUILD: silence a warning on Solaris about usage of isdigit()
+ - MINOR: stats: report HTTP compression stats per frontend and per backend
+ - MINOR: log: add '%Tl' to log-format
+ - MINOR: samples: update the url_param fetch to match parameters in the path
+
+2012/09/10 : 1.5-dev12
+ - CONTRIB: halog: sort URLs by avg bytes_read or total bytes_read
+ - MEDIUM: ssl: add support for prefer-server-ciphers option
+ - MINOR: IPv6 support for transparent proxy
+ - MINOR: protocol: add SSL context to listeners if USE_OPENSSL is defined
+ - MINOR: server: add SSL context to servers if USE_OPENSSL is defined
+ - MEDIUM: connection: add a new handshake flag for SSL (CO_FL_SSL_WAIT_HS).
+ - MEDIUM: ssl: add new files ssl_sock.[ch] to provide the SSL data layer
+ - MEDIUM: config: add the 'ssl' keyword on 'bind' lines
+ - MEDIUM: config: add support for the 'ssl' option on 'server' lines
+ - MEDIUM: ssl: protect against client-initiated renegociation
+ - BUILD: add optional support for SSL via the USE_OPENSSL flag
+ - MEDIUM: ssl: add shared memory session cache implementation.
+ - MEDIUM: ssl: replace OpenSSL's session cache with the shared cache
+ - MINOR: ssl add global setting tune.sslcachesize to set SSL session cache size.
+ - MEDIUM: ssl: add support for SNI and wildcard certificates
+ - DOC: Typos cleanup
+ - DOC: fix name for "option independant-streams"
+ - DOC: specify the default value for maxconn in the context of a proxy
+ - BUG/MINOR: to_log erased with unique-id-format
+ - LICENSE: add licence exception for OpenSSL
+ - BUG/MAJOR: cookie prefix doesn't support cookie-less servers
+ - BUILD: add an AIX 5.2 (and later) target.
+ - MEDIUM: fd/si: move peeraddr from struct fdinfo to struct connection
+ - MINOR: halog: use the more recent dual-mode fgets2 implementation
+ - BUG/MEDIUM: ebtree: ebmb_insert() must not call cmp_bits on full-length matches
+ - CLEANUP: halog: make clean should also remove .o files
+ - OPTIM: halog: make use of memchr() on platforms which provide a fast one
+ - OPTIM: halog: improve cold-cache behaviour when loading a file
+ - BUG/MINOR: ACL implicit arguments must be created with unresolved flag
+ - MINOR: replace acl_fetch_{path,url}* with smp_fetch_*
+ - MEDIUM: pattern: add the "base" sample fetch method
+ - OPTIM: i386: make use of kernel-mode-linux when available
+ - BUG/MINOR: tarpit: fix condition to return the HTTP 500 message
+ - BUG/MINOR: polling: some events were not set in various pollers
+ - MINOR: http: add the urlp_val ACL match
+ - BUG: stktable: tcp_src_to_stktable_key() must return NULL on invalid families
+ - MINOR: stats/cli: add plans to support more stick-table actions
+ - MEDIUM: stats/cli: add support for "set table key" to enter values
+ - REORG/MEDIUM: fd: remove FD_STCLOSE from struct fdtab
+ - REORG/MEDIUM: fd: remove checks for FD_STERROR in ev_sepoll
+ - REORG/MEDIUM: fd: get rid of FD_STLISTEN
+ - REORG/MINOR: connection: move declaration to its own include file
+ - REORG/MINOR: checks: put a struct connection into the server
+ - MINOR: connection: add flags to the connection struct
+ - MAJOR: get rid of fdtab[].state and use connection->flags instead
+ - MINOR: fd: add a new I/O handler to fdtab
+ - MEDIUM: polling: prepare to call the iocb() function when defined.
+ - MEDIUM: checks: make use of fdtab->iocb instead of cb[]
+ - MEDIUM: protocols: use the generic I/O callback for accept callbacks
+ - MINOR: connection: add a handler for fd-based connections
+ - MAJOR: connection: replace direct I/O callbacks with the connection callback
+ - MINOR: fd: make fdtab->owner a connection and not a stream_interface anymore
+ - MEDIUM: connection: remove the FD_POLL_* flags only once
+ - MEDIUM: connection: extract the send_proxy callback from proto_tcp
+ - MAJOR: tcp: remove the specific I/O callbacks for TCP connection probes
+ - CLEANUP: remove the now unused fdtab direct I/O callbacks
+ - MAJOR: remove the stream interface and task management code from sock_*
+ - MEDIUM: stream_interface: pass connection instead of fd in sock_ops
+ - MEDIUM: stream_interface: centralize the SI_FL_ERR management
+ - MAJOR: connection: add a new CO_FL_CONNECTED flag
+ - MINOR: rearrange tcp_connect_probe() and fix wrong return codes
+ - MAJOR: connection: call data layer handshakes from the handler
+ - MEDIUM: fd: remove the EV_FD_COND_* primitives
+ - MINOR: sock_raw: move calls to si_data_close upper
+ - REORG: connection: replace si_data_close() with conn_data_close()
+ - MEDIUM: sock_raw: introduce a read0 callback that is different from shutr
+ - MAJOR: stream_int: use a common stream_int_shut*() functions regardless of the data layer
+ - MAJOR: fd: replace all EV_FD_* macros with new fd_*_* inline calls
+ - MEDIUM: fd: add fd_poll_{recv,send} for use when explicit polling is required
+ - MEDIUM: connection: add definitions for dual polling mechanisms
+ - MEDIUM: connection: make use of the new polling functions
+ - MAJOR: make use of conn_{data|sock}_{poll|stop|want}* in connection handlers
+ - MEDIUM: checks: don't use FD_WAIT_* anymore
+ - MINOR: fd: get rid of FD_WAIT_*
+ - MEDIUM: stream_interface: offer a generic function for connection updates
+ - MEDIUM: stream-interface: offer a generic chk_rcv function for connections
+ - MEDIUM: stream-interface: add a snd_buf() callback to sock_ops
+ - MEDIUM: stream-interface: provide a generic stream_int_chk_snd_conn() function
+ - MEDIUM: stream-interface: provide a generic si_conn_send_cb callback
+ - MEDIUM: stream-interface: provide a generic stream_sock_read0() function
+ - REORG/MAJOR: use "struct channel" instead of "struct buffer"
+ - REORG/MAJOR: extract "struct buffer" from "struct channel"
+ - MINOR: connection: provide conn_{data|sock}_{read0|shutw} functions
+ - REORG: sock_raw: rename the files raw_sock*
+ - MAJOR: raw_sock: extract raw_sock_to_buf() from raw_sock_read()
+ - MAJOR: raw_sock: temporarily disable splicing
+ - MINOR: stream-interface: add an rcv_buf callback to sock_ops
+ - REORG: stream-interface: move sock_raw_read() to si_conn_recv_cb()
+ - MAJOR: connection: split the send call into connection and stream interface
+ - MAJOR: stream-interface: restore splicing mechanism
+ - MAJOR: stream-interface: make conn_notify_si() more robust
+ - MEDIUM: proxy-proto: don't use buffer flags in conn_si_send_proxy()
+ - MAJOR: stream-interface: don't commit polling changes in every callback
+ - MAJOR: stream-interface: fix splice not to call chk_snd by itself
+ - MEDIUM: stream-interface: don't remove WAIT_DATA when a handshake is in progress
+ - CLEANUP: connection: split sock_ops into data_ops, app_cp and si_ops
+ - REORG: buffers: split buffers into chunk,buffer,channel
+ - MAJOR: channel: remove the BF_OUT_EMPTY flag
+ - REORG: buffer: move buffer_flush, b_adv and b_rew to buffer.h
+ - MINOR: channel: rename bi_full to channel_full as it checks the whole channel
+ - MINOR: buffer: provide a new buffer_full() function
+ - MAJOR: channel: stop relying on BF_FULL to take action
+ - MAJOR: channel: remove the BF_FULL flag
+ - REORG: channel: move buffer_{replace,insert_line}* to buffer.{c,h}
+ - CLEANUP: channel: usr CF_/CHN_ prefixes instead of BF_/BUF_
+ - CLEANUP: channel: use "channel" instead of "buffer" in function names
+ - REORG: connection: move the target pointer from si to connection
+ - MAJOR: connection: move the addr field from the stream_interface
+ - MEDIUM: stream_interface: remove CAP_SPLTCP/CAP_SPLICE flags
+ - MEDIUM: proto_tcp: remove any dependence on stream_interface
+ - MINOR: tcp: replace tcp_src_to_stktable_key with addr_to_stktable_key
+ - MEDIUM: connection: add an ->init function to data layer
+ - MAJOR: session: introduce embryonic sessions
+ - MAJOR: connection: make the PROXY decoder a handshake handler
+ - CLEANUP: frontend: remove the old proxy protocol decoder
+ - MAJOR: connection: rearrange the polling flags.
+ - MEDIUM: connection: only call tcp_connect_probe when nothing was attempted yet
+ - MEDIUM: connection: complete the polling cleanups
+ - MEDIUM: connection: avoid calling handshakes when polling is required
+ - MAJOR: stream_interface: continue to update data polling flags during handshakes
+ - CLEANUP: fd: remove fdtab->flags
+ - CLEANUP: fdtab: flatten the struct and merge the spec struct with the rest
+ - CLEANUP: includes: fix includes for a number of users of fd.h
+ - MINOR: ssl: disable TCP quick-ack by default on SSL listeners
+ - MEDIUM: config: add a "ciphers" keyword to set SSL cipher suites
+ - MEDIUM: config: add "nosslv3" and "notlsv1" on bind and server lines
+ - BUG: ssl: mark the connection as waiting for an SSL connection during the handshake
+ - BUILD: http: rename error_message http_error_message to fix conflicts on RHEL
+ - BUILD: ssl: fix shctx build on RHEL with futex
+ - BUILD: include sys/socket.h to fix build failure on FreeBSD
+ - BUILD: fix build error without SSL (ssl_cert)
+ - BUILD: ssl: use MAP_ANON instead of MAP_ANONYMOUS
+ - BUG/MEDIUM: workaround an eglibc bug which truncates the pidfiles when nbproc > 1
+ - MEDIUM: config: support per-listener backlog and maxconn
+ - MINOR: session: do not send an HTTP/500 error on SSL sockets
+ - MEDIUM: config: implement maxsslconn in the global section
+ - BUG: tcp: close socket fd upon connect error
+ - MEDIUM: connection: improve error handling around the data layer
+ - MINOR: config: make the tasks "nice" value configurable on "bind" lines.
+ - BUILD: shut a gcc warning introduced by commit 269ab31
+ - MEDIUM: config: centralize handling of SSL config per bind line
+ - BUILD: makefile: report USE_OPENSSL status in build options
+ - BUILD: report openssl build settings in haproxy -vv
+ - MEDIUM: ssl: add sample fetches for is_ssl, ssl_has_sni, ssl_sni_*
+ - DOC: add a special acknowledgement for the stud project
+ - DOC: add missing SSL options for servers and listeners
+ - BUILD: automatically add -lcrypto for SSL
+ - DOC: add some info about openssl build in the README
+
+2012/06/04 : 1.5-dev11
+ - BUG/MEDIUM: option forwardfor if-none doesn't work with some configurations
+ - BUG/MAJOR: trash must always be the size of a buffer
+ - DOC: fix minor regex example issue and improve doc on stats
+ - MINOR: stream_interface: add a pointer to the listener for TARG_TYPE_CLIENT
+ - MEDIUM: protocol: add a pointer to struct sock_ops to the listener struct
+ - MINOR: checks: add on-marked-up option
+ - MINOR: balance uri: added 'whole' parameter to include query string in hash calculation
+ - MEDIUM: stream_interface: remove the si->init
+ - MINOR: buffers: add a rewind function
+ - BUG/MAJOR: fix regression on content-based hashing and http-send-name-header
+ - MAJOR: http: stop using msg->sol outside the parsers
+ - CLEANUP: http: make it more obvious that msg->som is always null outside of chunks
+ - MEDIUM: http: get rid of msg->som which is not used anymore
+ - MEDIUM: http: msg->sov and msg->sol will never wrap
+ - BUG/MAJOR: checks: don't call set_server_status_* when no LB algo is set
+ - BUG/MINOR: stop connect timeout when connect succeeds
+ - REORG: move the send-proxy code to tcp_connect_write()
+ - REORG/MINOR: session: detect the TCP monitor checks at the protocol accept
+ - MINOR: stream_interface: introduce a new "struct connection" type
+ - REORG/MINOR: stream_interface: move si->fd to struct connection
+ - REORG/MEDIUM: stream_interface: move applet->state and private to connection
+ - MINOR: stream_interface: add a data channel close function
+ - MEDIUM: stream_interface: call si_data_close() before releasing the si
+ - MINOR: peers: use the socket layer operations from the peer instead of sock_raw
+ - BUG/MINOR: checks: expire on timeout.check if smaller than timeout.connect
+ - MINOR: add a new function call tracer for debugging purposes
+ - BUG/MINOR: perform_http_redirect also needs to rewind the buffer
+ - BUG/MAJOR: b_rew() must pass a signed offset to b_ptr()
+ - BUG/MEDIUM: register peer sync handler in the proper order
+ - BUG/MEDIUM: buffers: fix bi_putchr() to correctly advance the pointer
+ - BUG/MINOR: fix option httplog validation with TCP frontends
+ - BUG/MINOR: log: don't report logformat errors in backends
+ - REORG/MINOR: use dedicated proxy flags for the cookie handling
+ - BUG/MINOR: config: do not report twice the incompatibility between cookie and non-http
+ - MINOR: http: add support for "httponly" and "secure" cookie attributes
+ - BUG/MEDIUM: ensure that unresolved arguments are freed exactly once
+ - BUG/MINOR: commit 196729ef used wrong condition resulting in freeing constants
+ - MEDIUM: stats: add support for soft stop/soft start in the admin interface
+ - MEDIUM: stats: add the ability to kill sessions from the admin interface
+ - BUILD: add support for linux kernels >= 2.6.28
+
+2012/05/14 : 1.5-dev10
+ - BUG/MINOR: stats admin: "Unexpected result" was displayed unconditionally
+ - BUG/MAJOR: acl: http_auth_group() must not accept any user from the userlist
+ - CLEANUP: auth: make the code build again with DEBUG_AUTH
+ - BUG/MEDIUM: config: don't crash at config load time on invalid userlist names
+ - REORG: use the name sock_raw instead of stream_sock
+ - MINOR: stream_interface: add a client target : TARG_TYPE_CLIENT
+ - BUG/MEDIUM: stream_interface: restore get_src/get_dst
+ - CLEANUP: sock_raw: remove last references to stream_sock
+ - CLEANUP: stream_interface: stop exporting socket layer functions
+ - MINOR: stream_interface: add an init callback to sock_ops
+ - MEDIUM: stream_interface: derive the socket operations from the target
+ - MAJOR: fd: remove the need for the socket layer to recheck the connection
+ - MINOR: session: call the socket layer init function when a session establishes
+ - MEDIUM: session: add support for tunnel timeouts
+ - MINOR: standard: add a new debug macro : fddebug()
+ - CLEANUP: fd: remove unused cb->b pointers in the struct fdtab
+ - OPTIM: proto_http: don't enable quick-ack on empty buffers
+ - OPTIM/MAJOR: ev_sepoll: process spec events after polled events
+ - OPTIM/MEDIUM: stream_interface: add a new SI_FL_NOHALF flag
+
+2012/05/08 : 1.5-dev9
+ - MINOR: Add release callback to si_applet
+ - CLEANUP: Fix some minor typos
+ - MINOR: Add TO/FROM_SET flags to struct stream_interface
+ - CLEANUP: Fix some minor whitespace issues
+ - MINOR: stats admin: allow unordered parameters in POST requests
+ - CLEANUP: fix typo in findserver() log message
+ - MINOR: stats admin: use the backend id instead of its name in the form
+ - MINOR: stats admin: reduce memcmp()/strcmp() calls on status codes
+ - DOC: cleanup indentation, alignment, columns and chapters
+ - DOC: fix some keywords arguments documentation
+ - MINOR: cli: display the 4 IP addresses and ports on "show sess XXX"
+ - BUG/MAJOR: log: possible segfault with logformat
+ - MEDIUM: log: split of log_format generation
+ - MEDIUM: log: New format-log flags: %Fi %Fp %Si %Sp %Ts %rt %H %pid
+ - MEDIUM: log: Unique ID
+ - MINOR: log: log-format: usable without httplog and tcplog
+ - BUG/MEDIUM: balance source did not properly hash IPv6 addresses
+ - MINOR: contrib/iprange: add a network IP range to mask converter
+ - MEDIUM: session: implement the "use-server" directive
+ - MEDIUM: log: add a new cookie flag 'U' to report situations where cookie is not used
+ - MEDIUM: http: make extract_cookie_value() iterate over cookie values
+ - MEDIUM: http: add cookie and scookie ACLs
+ - CLEANUP: lb_first: add reference to a paper describing the original idea
+ - MEDIUM: stream_sock: add a get_src and get_dst callback and remove SN_FRT_ADDR_SET
+ - BUG/MINOR: acl: req_ssl_sni would randomly fail if a session ID is present
+ - BUILD: http: make extract_cookie_value() return an int not size_t
+ - BUILD: http: stop gcc-4.1.2 from complaining about possibly uninitialized values
+ - CLEANUP: http: message parser must ignore HTTP_MSG_ERROR
+ - MINOR: standard: add a memprintf() function to build formatted error messages
+ - CLEANUP: remove a few warning about unchecked return values in debug code
+ - MEDIUM: move message-related flags from transaction to message
+ - DOC: add a diagram to explain how circular buffers work
+ - MAJOR: buffer rework: replace ->send_max with ->o
+ - MAJOR: buffer: replace buf->l with buf->{o+i}
+ - MINOR: buffers: provide simple pointer normalization functions
+ - MINOR: buffers: remove unused function buffer_contig_data()
+ - MAJOR: buffers: replace buf->w with buf->p - buf->o
+ - MAJOR: buffers: replace buf->r with buf->p + buf->i
+ - MAJOR: http: move buffer->lr to http_msg->next
+ - MAJOR: http: change msg->{som,col,sov,eoh} to be relative to buffer origin
+ - CLEANUP: http: remove unused http_msg->col
+ - MAJOR: http: turn http_msg->eol to a buffer-relative offset
+ - MEDIUM: http: add a pointer to the buffer in http_msg
+ - MAJOR: http: make http_msg->sol relative to buffer's origin
+ - MEDIUM: http: http_send_name_header: remove references to msg and buffer
+ - MEDIUM: http: remove buffer arg in a few header manipulation functions
+ - MEDIUM: http: remove buffer arg in http_capture_bad_message
+ - MEDIUM: http: remove buffer arg in http_msg_analyzer
+ - MEDIUM: http: remove buffer arg in http_upgrade_v09_to_v10
+ - MEDIUM: http: remove buffer arg in http_buffer_heavy_realign
+ - MEDIUM: http: remove buffer arg in chunk parsing functions
+ - MINOR: http: remove useless wrapping checks in http_msg_analyzer
+ - MEDIUM: buffers: fix unsafe use of buffer_ignore at some places
+ - MEDIUM: buffers: add new pointer wrappers and get rid of almost all buffer_wrap_add calls
+ - MEDIUM: buffers: implement b_adv() to advance a buffer's pointer
+ - MEDIUM: buffers: rename a number of buffer management functions
+ - MEDIUM: http: add a prefetch function for ACL pattern fetch
+ - MEDIUM: http: make all ACL fetch function use acl_prefetch_http()
+ - BUG/MINOR: http_auth: ACLs are volatile, not permanent
+ - MEDIUM: http/acl: merge all request and response ACL fetches of headers and cookies
+ - MEDIUM: http/acl: make acl_fetch_hdr_{ip,val} rely on acl_fetch_hdr()
+ - MEDIUM: add a new typed argument list parsing framework
+ - MAJOR: acl: make use of the new argument parsing framework
+ - MAJOR: acl: store the ACL argument types in the ACL keyword declaration
+ - MEDIUM: acl: acl_find_target() now resolves arguments based on their types
+ - MAJOR: acl: make acl_find_targets also resolve proxy names at config time
+ - MAJOR: acl: ensure that implicit table and proxies are valid
+ - MEDIUM: acl: remove unused tests for missing args when args are mandatory
+ - MEDIUM: pattern: replace type pattern_arg with type arg
+ - MEDIUM: pattern: get rid of arg_i in all functions making use of arguments
+ - MEDIUM: pattern: use the standard arg parser
+ - MEDIUM: pattern: add an argument validation callback to pattern descriptors
+ - MEDIUM: pattern: report the precise argument parsing error when known.
+ - MEDIUM: acl: remove the ACL_TEST_F_NULL_MATCH flag
+ - MINOR: pattern: add a new 'sample' type to store fetched data
+ - MEDIUM: pattern: add new sample types to replace pattern types
+ - MAJOR: acl: make use of the new sample struct and get rid of acl_test
+ - MEDIUM: pattern/acl: get rid of temp_pattern in ACLs
+ - MEDIUM: acl: get rid of the SET_RES flags
+ - MEDIUM: get rid of SMP_F_READ_ONLY and SMP_F_MUST_FREE
+ - MINOR: pattern: replace struct pattern with struct sample
+ - MEDIUM: pattern: integrate pattern_data into sample and use sample everywhere
+ - MEDIUM: pattern: retrieve the sample type in the sample, not in the keyword description
+ - MEDIUM: acl/pattern: switch rdp_cookie functions stack up-down
+ - MEDIUM: acl: replace acl_expr with args in acl fetch_* functions
+ - MINOR: tcp: replace acl_fetch_rdp_cookie with smp_fetch_rdp_cookie
+ - MEDIUM: acl/pattern: use the same direction scheme
+ - MEDIUM: acl/pattern: start merging common sample fetch functions
+ - MEDIUM: pattern: ensure that sample types always cast into other types.
+ - MEDIUM: acl/pattern: factor out the src/dst address fetches
+ - MEDIUM: acl: implement payload and payload_lv
+ - CLEANUP: pattern: ensure that payload and payload_lv always stay in the buffer
+ - MINOR: stick_table: centralize the handling of empty keys
+ - MINOR: pattern: centralize handling of unstable data in pattern_process()
+ - MEDIUM: pattern: use smp_fetch_rdp_cookie instead of the pattern specific version
+ - MINOR: acl: set SMP_OPT_ITERATE on fetch functions
+ - MINOR: acl: add a val_args field to keywords
+ - MINOR: proto_tcp: validate arguments of payload and payload_lv ACLs
+ - MEDIUM: http: merge acl and pattern header fetch functions
+ - MEDIUM: http: merge ACL and pattern cookie fetches into a single one
+ - MEDIUM: acl: report parsing errors to the caller
+ - MINOR: arg: improve error reporting on invalid arguments
+ - MINOR: acl: report errors encountered when loading patterns from files
+ - MEDIUM: acl: extend the pattern parsers to report meaningful errors
+ - REORG: use the name "sample" instead of "pattern" to designate extracted data
+ - REORG: rename "pattern" files
+ - MINOR: acl: add types to ACL patterns
+ - MINOR: standard: add an IPv6 parsing function (str62net)
+ - MEDIUM: acl: support IPv6 address matching
+ - REORG: stream_interface: create a struct sock_ops to hold socket operations
+ - REORG/MEDIUM: move protocol->{read,write} to sock_ops
+ - REORG/MEDIUM: stream_interface: initialize socket ops from descriptors
+ - REORG/MEDIUM: replace stream interface protocol functions by a proto pointer
+ - REORG/MEDIUM: move the default accept function from sockstream to protocols.c
+ - MEDIUM: proto_tcp: remove src6 and dst6 pattern fetch methods
+ - BUG/MINOR: http: error snapshots are wrong if buffer wraps
+ - BUG/MINOR: http: ensure that msg->err_pos is always relative to buf->p
+ - MEDIUM: http: improve error capture reports
+ - MINOR: acl: add the cook_val() match to match a cookie against an integer
+ - BUG/MEDIUM: send_proxy: fix initialisation of send_proxy_ofs
+ - MEDIUM: memory: add the ability to poison memory at run time
+ - BUG/MEDIUM: log: ensure that unique_id is properly initialized
+ - MINOR: cfgparse: use a common errmsg pointer for all parsers
+ - MEDIUM: cfgparse: make backend_parse_balance() use memprintf to report errors
+ - MEDIUM: cfgparse: use the new error reporting framework for remaining cfg_keywords
+ - MINOR: http: replace http_message_realign() with buffer_slow_realign()
+
+2012/03/26 : 1.5-dev8
+ - MINOR: patch for minor typo (ressources/resources)
+ - MEDIUM: http: add support for sending the server's name in the outgoing request
+ - DOC: mention that default checks are TCP connections
+ - BUG/MINOR: fix options forwardfor if-none when an alternative header name is specified
+ - CLEANUP: Make check_statuses, analyze_statuses and process_chk static
+ - CLEANUP: Fix HCHK spelling errors
+ - BUG/MINOR: fix typo in processing of http-send-name-header
+ - MEDIUM: log: Use linked lists for loggers
+ - BUILD: fix declaration inside a scope block
+ - REORG: log: split send_log function
+ - MINOR: config: Parse the string of the log-format config keyword
+ - MINOR: add ultoa, ulltoa, ltoa, lltoa implementations
+ - MINOR: Date and time fonctions that don't use snprintf
+ - MEDIUM: log: make http_sess_log use log_format
+ - DOC: log-format documentation
+ - MEDIUM: log: use log_format for mode tcplog
+ - MEDIUM: log-format: backend source address %Bi %Bp
+ - BUG/MINOR: log-format: fix %o flag
+ - BUG/MEDIUM: bad length in log_format and __send_log
+ - MINOR: logformat %st is signed
+ - BUILD/MINOR: fix the source URL in the spec file
+ - DOC: acl is http_first_req, not http_req_first
+ - BUG/MEDIUM: don't trim last spaces from headers consisting only of spaces
+ - MINOR: acl: add new matches for header/path/url length
+ - BUILD: halog: make halog build on solaris
+ - BUG/MINOR: don't use a wrong port when connecting to a server with mapped ports
+ - MINOR: remove the client/server side distinction in SI addresses
+ - MINOR: halog: add support for matching queued requests
+ - DOC: indicate that cookie "prefix" and "indirect" should not be mixed
+ - OPTIM/MINOR: move struct sockaddr_storage to the tail of structs
+ - OPTIM/MINOR: make it possible to change pipe size (tune.pipesize)
+ - BUILD/MINOR: silent a build warning in src/pipe.c (fcntl)
+ - OPTIM/MINOR: move the hdr_idx pools out of the proxy struct
+ - MEDIUM: tune.http.maxhdr makes it possible to configure the maximum number of HTTP headers
+ - BUG/MINOR: fix a segfault when parsing a config with undeclared peers
+ - CLEANUP: rename possibly confusing struct field "tracked"
+ - BUG/MEDIUM: checks: fix slowstart behaviour when server tracking is in use
+ - MINOR: config: tolerate server "cookie" setting in non-HTTP mode
+ - MEDIUM: buffers: add some new primitives and rework existing ones
+ - BUG: buffers: don't return a negative value on buffer_total_space_res()
+ - MINOR: buffers: make buffer_pointer() support negative pointers too
+ - CLEANUP: kill buffer_replace() and use an inline instead
+ - BUG: tcp: option nolinger does not work on backends
+ - CLEANUP: ebtree: remove a few annoying signedness warnings
+ - CLEANUP: ebtree: clarify licence and update to 6.0.6
+ - CLEANUP: ebtree: remove 4-year old harmless typo in duplicates insertion code
+ - CLEANUP: ebtree: remove another typo, a wrong initialization in insertion code
+ - BUG: ebtree: ebst_lookup() could return the wrong entry
+ - OPTIM: stream_sock: reduce the amount of in-flight spliced data
+ - OPTIM: stream_sock: save a failed recv syscall when splice returns EAGAIN
+ - MINOR: acl: add support for TLS server name matching using SNI
+ - BUG: http: re-enable TCP quick-ack upon incomplete HTTP requests
+ - BUG: proto_tcp: don't try to bind to a foreign address if sin_family is unknown
+ - MINOR: pattern: export the global temporary pattern
+ - CLEANUP: patterns: get rid of pattern_data_setstring()
+ - MEDIUM: acl: use temp_pattern to store fetched information in the "method" match
+ - MINOR: acl: include pattern.h to make pattern migration more transparent
+ - MEDIUM: pattern: change the pattern data integer from unsigned to signed
+ - MEDIUM: acl: use temp_pattern to store any integer-type information
+ - MEDIUM: acl: use temp_pattern to store any address-type information
+ - CLEANUP: acl: integer part of acl_test is not used anymore
+ - MEDIUM: acl: use temp_pattern to store any string-type information
+ - CLEANUP: acl: remove last data fields from the acl_test struct
+ - MEDIUM: http: replace get_ip_from_hdr2() with http_get_hdr()
+ - MEDIUM: patterns: the hdr() pattern is now of type string
+ - DOC: add minimal documentation on how ACLs work internally
+ - DOC: add a coding-style file
+ - OPTIM: halog: keep a fast path for the lines-count only
+ - CLEANUP: silence a warning when building on sparc
+ - BUG: http: tighten the list of allowed characters in a URI
+ - MEDIUM: http: block non-ASCII characters in URIs by default
+ - DOC: add some documentation from RFC3986 about URI format
+ - BUG/MINOR: cli: correctly remove the whole table on "clear table"
+ - BUG/MEDIUM: correctly disable servers tracking another disabled servers.
+ - BUG/MEDIUM: zero-weight servers must not dequeue requests from the backend
+ - MINOR: halog: add some help on the command line
+ - BUILD: fix build error on FreeBSD
+ - BUG: fix double free in peers config error path
+ - MEDIUM: improve config check return codes
+ - BUILD: make it possible to look for pcre in the default system paths
+ - MINOR: config: emit a warning when 'default_backend' masks servers
+ - MINOR: backend: rework the LC definition to support other connection-based algos
+ - MEDIUM: backend: add the 'first' balancing algorithm
+ - BUG: fix httplog trailing LF
+ - MEDIUM: increase chunk-size limit to 2GB-1
+ - BUG: queue: fix dequeueing sequence on HTTP keep-alive sessions
+ - BUG: http: disable TCP delayed ACKs when forwarding content-length data
+ - BUG: checks: fix server maintenance exit sequence
+ - BUG/MINOR: stream_sock: don't remove BF_EXPECT_MORE and BF_SEND_DONTWAIT on partial writes
+ - DOC: enumerate valid status codes for "observe layer7"
+ - MINOR: buffer: switch a number of buffer args to const
+ - CLEANUP: silence signedness warning in acl.c
+ - BUG: stream_sock: si->release was not called upon shutw()
+ - MINOR: log: use "%ts" to log term status only and "%tsc" to log with cookie
+ - BUG/CRITICAL: log: fix risk of crash in development snapshot
+ - BUG/MAJOR: possible crash when using capture headers on TCP frontends
+ - MINOR: config: disable header captures in TCP mode and complain
+
+2011/09/10 : 1.5-dev7
+ - [BUG] fix binary stick-tables
+ - [MINOR] http: *_dom matching header functions now also split on ":"
+ - [BUG] checks: fix support of Mysqld >= 5.5 for mysql-check
+ - [MINOR] acl: add srv_conn acl to count connections on a specific backend server
+ - [MINOR] check: add redis check support
+ - [DOC] small fixes to clearly distinguish between keyword and variables
+ - [MINOR] halog: add support for termination code matching (-tcn/-TCN)
+ - [DOC] Minor spelling fixes and grammatical enhancements
+ - [CLEANUP] dumpstats: make symbols static where possible
+ - [MINOR] Break out dumping table
+ - [MINOR] Break out processing of clear table
+ - [MINOR] Allow listing of stick table by key
+ - [MINOR] Break out all stick table socat command parsing
+ - [MINOR] More flexible clearing of stick table
+ - [MINOR] Allow showing and clearing by key of ipv6 stick tables
+ - [MINOR] Allow showing and clearing by key of integer stick tables
+ - [MINOR] Allow showing and clearing by key of string stick tables
+ - [CLEANUP] Remove assigned but unused variables
+ - [CLEANUP] peers.h: fix declarations
+ - [CLEANUP] session.c: Make functions static where possible
+ - [MINOR] Add active connection list to server
+ - [MINOR] Allow shutdown of sessions when a server becomes unavailable
+ - [MINOR] Add down termination condition
+ - [MINOR] Make appsess{,ion}_refresh static
+ - [MINOR] Add rdp_cookie pattern fetch function
+ - [CLEANUP] Remove unnecessary casts
+ - [MINOR] Add non-stick server option
+ - [MINOR] Consistently use error in tcp_parse_tcp_req()
+ - [MINOR] Consistently free expr on error in cfg_parse_listen()
+ - [MINOR] Free rdp_cookie_name on denint()
+ - [MINOR] Free tcp rules on denint()
+ - [MINOR] Free stick table pool on denint()
+ - [MINOR] Free stick rules on denint()
+ - [MEDIUM] Fix stick-table replication on soft-restart
+ - [MEDIUM] Correct ipmask() logic
+ - [MINOR] Correct type in table dump examples
+ - [MINOR] Fix build error in stream_int_register_handler()
+ - [MINOR] Use DPRINTF in assign_server()
+ - [BUG] checks: http-check expect could fail a check on multi-packet responses
+ - [DOC] fix minor typo in the "dispatch" doc
+ - [BUG] proto_tcp: fix address binding on remote source
+ - [MINOR] http: don't report the "haproxy" word on the monitoring response
+ - [REORG] http: move HTTP error codes back to proto_http.h
+ - [MINOR] http: make the "HTTP 200" status code configurable.
+ - [MINOR] http: partially revert the chunking optimization for now
+ - [MINOR] stream_sock: always clear BF_EXPECT_MORE upon complete transfer
+ - [CLEANUP] stream_sock: remove unneeded FL_TCP and factor out test
+ - [MEDIUM] http: add support for "http-no-delay"
+ - [OPTIM] http: optimize chunking again in non-interactive mode
+ - [OPTIM] stream_sock: avoid fast-forwarding of partial data
+ - [OPTIM] stream_sock: don't use splice on too small payloads
+ - [MINOR] config: make it possible to specify a cookie even without a server
+ - [BUG] stats: support url-encoded forms
+ - [MINOR] config: automatically compute a default fullconn value
+ - [CLEANUP] config: remove some left-over printf debugging code from previous patch
+ - [DOC] add missing entry or stick store-response
+ - [MEDIUM] http: add support for 'cookie' and 'set-cookie' patterns
+ - [BUG] halog: correctly handle truncated last line
+ - [MINOR] halog: make SKIP_CHAR stop on field delimiters
+ - [MINOR] halog: add support for HTTP log matching (-H)
+ - [MINOR] halog: gain back performance before SKIP_CHAR fix
+ - [OPTIM] halog: cache some common fields positions
+ - [OPTIM] halog: check once for correct line format and reuse the pointer
+ - [OPTIM] halog: remove many 'if' by using a function pointer for the filters
+ - [OPTIM] halog: remove support for tab delimiters in input data
+ - [BUG] session: risk of crash on out of memory (1.5-dev regression)
+ - [MINOR] session: try to emit a 500 response on memory allocation errors
+ - [OPTIM] stream_sock: reduce the default number of accepted connections at once
+ - [BUG] stream_sock: disable listener when system resources are exhausted
+ - [MEDIUM] proxy: add a PAUSED state to listeners and move socket tricks out of proxy.c
+ - [BUG] stream_sock: ensure orphan listeners don't accept too many connections
+ - [MINOR] listeners: add listen_full() to mark a listener full
+ - [MINOR] listeners: add support for queueing resource limited listeners
+ - [MEDIUM] listeners: put listeners in queue upon resource shortage
+ - [MEDIUM] listeners: queue proxy-bound listeners at the proxy's
+ - [MEDIUM] listeners: don't stop proxies when global maxconn is reached
+ - [MEDIUM] listeners: don't change listeners states anymore in maintain_proxies
+ - [CLEANUP] proxy: rename a few proxy states (PR_STIDLE and PR_STRUN)
+ - [MINOR] stats: report a "WAITING" state for sockets waiting for resource
+ - [MINOR] proxy: make session rate-limit more accurate
+ - [MINOR] sessions: only wake waiting listeners up if rate limit is OK
+ - [BUG] proxy: peers must only be stopped once, not upon every call to maintain_proxies
+ - [CLEANUP] proxy: merge maintain_proxies() operation inside a single loop
+ - [MINOR] task: new function task_schedule() to schedule a wake up
+ - [MAJOR] proxy: finally get rid of maintain_proxies()
+ - [BUG] proxy: stats frontend and peers were missing many initializers
+ - [MEDIUM] listeners: add a global listener management task
+ - [MINOR] proxy: make findproxy() return proxies from numeric IDs too
+ - [DOC] fix typos, "#" is a sharp, not a dash
+ - [MEDIUM] stats: add support for changing frontend's maxconn at runtime
+ - [MEDIUM] checks: group health checks methods by values and save option bits
+ - [MINOR] session-counters: add the ability to clear the counters
+ - [BUG] check: http-check expect + regex would crash in defaults section
+ - [MEDIUM] http: make x-forwarded-for addition conditional
+ - [REORG] build: move syscall redefinition to specific places
+ - [CLEANUP] update the year in the copyright banner
+ - [BUG] possible crash in 'show table' on stats socket
+ - [BUG] checks: use the correct destination port for sending checks
+ - [BUG] backend: risk of picking a wrong port when mapping is used with crossed families
+ - [MINOR] make use of set_host_port() and get_host_port() to get rid of family mismatches
+ - [DOC] fixed a few "sensible" -> "sensitive" errors
+ - [MINOR] make use of addr_to_str() and get_host_port() to replace many inet_ntop()
+ - [BUG] http: trailing white spaces must also be trimmed after headers
+ - [MINOR] stats: display "<NONE>" instead of the frontend name when unknown
+ - [MINOR] http: take a capture of too large requests and responses
+ - [MINOR] http: take a capture of truncated responses
+ - [MINOR] http: take a capture of bad content-lengths.
+ - [DOC] add a few old and uncommitted docs
+ - [CLEANUP] cfgparse: fix reported options for the "bind" keyword
+ - [MINOR] halog: add -hs/-HS to filter by HTTP status code range
+ - [MINOR] halog: support backslash-escaped quotes
+ - [CLEANUP] remove dirty left-over of a debugging message
+ - [MEDIUM] stats: disable complex socket reservation for stats socket
+ - [CLEANUP] remove a useless test in manage_global_listener_queue()
+ - [MEDIUM] stats: add the "set maxconn" setting to the command line interface
+ - [MEDIUM] add support for global.maxconnrate to limit the per-process conn rate.
+ - [MINOR] stats: report the current and max global connection rates
+ - [MEDIUM] stats: add the ability to adjust the global maxconnrate
+ - [BUG] peers: don't pre-allocate 65000 connections to each peer
+ - [MEDIUM] don't limit peers nor stats socket to maxconn nor maxconnrate
+ - [BUG] peers: the peer frontend must not emit any log
+ - [CLEANUP] proxy: make pause_proxy() perform the required controls and emit the logs
+ - [BUG] peers: don't keep a peers section which has a NULL frontend
+ - [BUG] peers: ensure the peers are resumed if they were paused
+ - [MEDIUM] stats: add the ability to enable/disable/shutdown a frontend at runtime
+ - [MEDIUM] session: make session_shutdown() an independant function
+ - [MEDIUM] stats: offer the possibility to kill a session from the CLI
+ - [CLEANUP] stats: centralize tests for backend/server inputs on the CLI
+ - [MEDIUM] stats: offer the possibility to kill sessions by server
+ - [MINOR] halog: do not consider byte 0x8A as end of line
+ - [MINOR] frontend: ensure debug message length is always initialized
+ - [OPTIM] halog: make fgets parse more bytes by blocks
+ - [OPTIM] halog: add assembly version of the field lookup code
+ - [MEDIUM] poll: add a measurement of idle vs work time
+ - [CLEANUP] startup: report only the basename in the usage message
+ - [MINOR] startup: add an option to change to a new directory
+ - [OPTIM] task: don't scan the run queue if we know it's empty
+ - [BUILD] stats: stdint is not present on solaris
+ - [DOC] update the README file to reflect new naming rules for patches
+ - [MINOR] stats: report the number of requests intercepted by the frontend
+ - [DOC] update ROADMAP file
+
+2011/04/08 : 1.5-dev6
+ - [BUG] stream_sock: use get_addr_len() instead of sizeof() on sockaddr_storage
+ - [BUG] TCP source tracking was broken with IPv6 changes
+ - [BUG] stick-tables did not work when converting IPv6 to IPv4
+ - [CRITICAL] fix risk of crash when dealing with space in response cookies
+
+2011/03/29 : 1.5-dev5
+ - [BUG] standard: is_addr return value for IPv4 was inverted
+ - [MINOR] update comment about IPv6 support for server
+ - [MEDIUM] use getaddrinfo to resolve names if gethostbyname fail
+ - [DOC] update IPv6 support for bind
+ - [DOC] document IPv6 support for server
+ - [DOC] fix a minor typo
+ - [MEDIUM] IPv6 support for syslog
+ - [DOC] document IPv6 support for syslog
+ - [MEDIUM] IPv6 support for stick-tables
+ - [DOC] document IPv6 support for stick-tables
+ - [DOC] update ROADMAP file
+ - [BUG] session: src_conn_cur was returning src_conn_cnt instead
+ - [MINOR] frontend: add a make_proxy_line function
+ - [MEDIUM] stream_sock: add support for sending the proxy protocol header line
+ - [MEDIUM] server: add support for the "send-proxy" option
+ - [DOC] update the spec on the proxy protocol
+ - [BUILD] proto_tcp: fix build issue with CTTPROXY
+ - [DOC] update ROADMAP file
+ - [MEDIUM] config: rework the IPv4/IPv6 address parser to support host-only addresses
+ - [MINOR] cfgparse: better report wrong listening addresses and make use of str2sa_range
+ - [BUILD] add the USE_GETADDRINFO build option
+ - [TESTS] provide a test case for various address formats
+ - [BUG] session: conn_retries was not always initialized
+ - [BUG] log: retrieve the target from the session, not the SI
+ - [BUG] http: fix possible incorrect forwarded wrapping chunk size (take 2)
+ - [MINOR] tools: add two macros MID_RANGE and MAX_RANGE
+ - [BUG] http: fix content-length handling on 32-bit platforms
+ - [OPTIM] buffers: uninline buffer_forward()
+ - [BUG] stream_sock: fix handling for server side PROXY protocol
+ - [MINOR] acl: add support for table_cnt and table_avl matches
+ - [DOC] update ROADMAP file
+
+2011/03/13 : 1.5-dev4
+ - [MINOR] cfgparse: Check whether the path given for the stats socket actually fits into the sockaddr_un structure to avoid truncation.
+ - [MINOR] unix sockets : inherits the backlog size from the listener
+ - [CLEANUP] unix sockets : move create_uxst_socket() in uxst_bind_listener()
+ - [DOC] fix a minor typo
+ - [DOC] fix ignore-persist documentation
+ - [MINOR] add warnings on features not compatible with multi-process mode
+ - [BUG] http: fix http-pretend-keepalive and httpclose/tunnel mode
+ - [MINOR] stats: add support for several packets in stats admin
+ - [BUG] stats: admin commands must check the proxy state
+ - [BUG] stats: admin web interface must check the proxy state
+ - [MINOR] http: add pattern extraction method to stick on query string parameter
+ - [MEDIUM] add internal support for IPv6 server addresses
+ - [MINOR] acl: add be_id/srv_id to match backend's and server's id
+ - [MINOR] log: add support for passing the forwarded hostname
+ - [MINOR] log: ability to override the syslog tag
+ - [MINOR] checks: add PostgreSQL health check
+ - [DOC] update ROADMAP file
+ - [BUILD] pattern: use 'int' instead of 'int32_t'
+ - [OPTIM] linux: add support for bypassing libc to force using vsyscalls
+ - [BUG] debug: report the correct poller list in verbose mode
+ - [BUG] capture: do not capture a cookie if there is no memory left
+ - [BUG] appsession: fix possible double free in case of out of memory
+ - [CRITICAL] cookies: mixing cookies in indirect mode and appsession can crash the process
+ - [BUG] http: correctly update the header list when removing two consecutive headers
+ - [BUILD] add the CPU=native and ARCH=32/64 build options
+ - [BUILD] add -fno-strict-aliasing to fix warnings with gcc >= 4.4
+ - [CLEANUP] hash: move the avalanche hash code globally available
+ - [MEDIUM] hash: add support for an 'avalanche' hash-type
+ - [DOC] update roadmap file
+ - [BUG] http: do not re-enable the PROXY analyser on keep-alive
+ - [OPTIM] http: don't send each chunk in a separate packet
+ - [DOC] fix minor typos reported recently in the peers section
+ - [DOC] fix another typo in the doc
+ - [MINOR] stats: report HTTP message state and buffer flags in error dumps
+ - [BUG] http chunking: don't report a parsing error on connection errors
+ - [BUG] stream_interface: truncate buffers when sending error messages
+ - [MINOR] http: support wrapping messages in error captures
+ - [MINOR] http: capture incorrectly chunked message bodies
+ - [MINOR] stats: add global event ID and count
+ - [BUG] http: analyser optimizations broke pipelining
+ - [CLEANUP] frontend: only apply TCP-specific settings to TCP/TCP6 sockets
+ - [BUG] http: fix incorrect error reporting during data transfers
+ - [CRITICAL] session: correctly leave turn-around and queue states on abort
+ - [BUG] session: release slot before processing pending connections
+ - [MINOR] tcp: add support for dynamic MSS setting
+ - [BUG] stick-table: correctly terminate string keys during lookups
+ - [BUG] acl: fix handling of empty lines in pattern files
+ - [BUG] stick-table: use the private buffer when padding strings
+ - [BUG] ebtree: fix ebmb_lookup() with len smaller than the tree's keys
+ - [OPTIM] ebtree: ebmb_lookup: reduce stack usage by moving the return code out of the loop
+ - [OPTIM] ebtree: inline ebst_lookup_len and ebis_lookup_len
+ - [REVERT] undo the stick-table string key lookup fixes
+ - [MINOR] http: improve url_param pattern extraction to ignore empty values
+ - [BUILD] frontend: shut a warning with TCP_MAXSEG
+ - [BUG] http: update the header list's tail when removing the last header
+ - [DOC] fix minor typo in the proxy protocol doc
+ - [DOC] fix typos (http-request instead of http-check)
+ - [BUG] http: use correct ACL pointer when evaluating authentication
+ - [BUG] cfgparse: correctly count one socket per port in ranges
+ - [BUG] startup: set the rlimits before binding ports, not after.
+ - [BUG] acl: srv_id must return no match when the server is NULL
+ - [MINOR] acl: add ability to check for internal response-only parameters
+ - [MINOR] acl: srv_id is only valid in responses
+ - [MINOR] config: warn if response-only conditions are used in "redirect" rules
+ - [BUG] acl: fd leak when reading patterns from file
+ - [DOC] fix minor typo in "usesrc"
+ - [BUG] http: fix possible incorrect forwarded wrapping chunk size
+ - [BUG] http: fix computation of message body length after forwarding has started
+ - [BUG] http: balance url_param did not work with first parameters on POST
+ - [TESTS] update the url_param regression test to test check_post too
+ - [DOC] update ROADMAP
+ - [DOC] internal: reflect the fact that SI_ST_ASS is transient
+ - [BUG] config: don't crash on empty pattern files.
+ - [MINOR] stream_interface: make use of an applet descriptor for IO handlers
+ - [REORG] stream_interface: move the st0, st1 and private members to the applet
+ - [REORG] stream_interface: split the struct members in 3 parts
+ - [REORG] session: move client and server address to the stream interface
+ - [REORG] tcp: make tcpv4_connect_server() take the target address from the SI
+ - [MEDIUM] stream_interface: store the target pointer and type
+ - [CLEANUP] stream_interface: remove the applet.handler pointer
+ - [MEDIUM] log: take the logged server name from the stream interface
+ - [CLEANUP] session: remove data_source from struct session
+ - [CLEANUP] stats: make all dump functions only rely on the stream interface
+ - [REORG] session: move the data_ctx struct to the stream interface's applet
+ - [MINOR] proxy: add PR_O2_DISPATCH to detect dispatch mode
+ - [MINOR] cfgparse: only keep one of dispatch, transparent, http_proxy
+ - [MINOR] session: add a pointer to the new target into the session
+ - [MEDIUM] session: remove s->prev_srv which is not needed anymore
+ - [CLEANUP] stream_interface: use inline functions to manipulate targets
+ - [MAJOR] session: remove the ->srv pointer from struct session
+ - [MEDIUM] stats: split frontend and backend stats
+ - [MEDIUM] http: always evaluate http-request rules before stats http-request
+ - [REORG] http: move the http-request rules to proto_http
+ - [BUG] http: stats were not incremented on http-request deny
+ - [MINOR] checks: report it if checks fail due to socket creation error
+
+2010/11/11 : 1.5-dev3
+ - [DOC] fix http-request documentation
+ - [MEDIUM] enable/disable servers from the stats web interface
+ - [MEDIUM] stats: add an admin level
+ - [DOC] stats: document the "stats admin" statement
+ - [MINOR] startup: print the proxy socket which caused an error
+ - [CLEANUP] Remove unneeded chars allocation
+ - [MINOR] config: detect options not supported due to compilation options
+ - [MINOR] Add pattern's fetchs payload and payload_lv
+ - [MINOR] frontend: improve accept-proxy header parsing
+ - [MINOR] frontend: add tcpv6 support on accept-proxy bind
+ - [MEDIUM] Enhance message errors management on binds
+ - [MINOR] Manage unix socket source field on logs
+ - [MINOR] Manage unix socket source field on session dump on sock stats
+ - [MINOR] Support of unix listener sockets for debug and log event messages on frontend.c
+ - [MINOR] Add some tests on sockets family for port remapping and mode transparent.
+ - [MINOR] Manage socket type unix for some logs
+ - [MINOR] Enhance controls of socket's family on acls and pattern fetch
+ - [MINOR] Support listener's sockets unix on http logs.
+ - [MEDIUM] Add supports of bind on unix sockets.
+ - [BUG] stick table purge failure if size less than 255
+ - [BUG] stick table entries expire on counters updates/read or show table, even if there is no "expire" parameter
+ - [MEDIUM] Implement tcp inspect response rules
+ - [DOC] tcp-response content and inspect
+ - [MINOR] new acls fetch req_ssl_hello_type and rep_ssl_hello_type
+ - [DOC] acls rep_ssl_hello and req_ssl_hello
+ - [MEDIUM] Create new protected pattern types CONSTSTRING and CONSTDATA to force memcpy if data from protected areas need to be manipulated.
+ - [DOC] new type binary in stick-table
+ - [DOC] stick store-response and new patterns payload and payload_lv
+ - [MINOR] Manage all types (ip, integer, string, binary) on cli "show table" command
+ - [MEDIUM] Create updates tree on stick table to manage sync.
+ - [MAJOR] Add new files src/peer.c, include/proto/peers.h and include/types/peers.h for sync stick table management
+ - [MEDIUM] Manage peers section parsing and stick table registration on peers.
+ - [MEDIUM] Manage soft stop on peers proxy
+ - [DOC] add documentation for peers section
+ - [MINOR] checks: add support for LDAPv3 health checks
+ - [MINOR] add better support to "mysql-check"
+ - [BUG] Restore info about available active/backup servers
+ - [CONTRIB] Update haproxy.pl
+ - [CONTRIB] Update Cacti Tempates
+ - [CONTRIB] add templates for Cacti.
+ - [BUG] http: don't consider commas as a header delimitor within quotes
+ - [MINOR] support a global jobs counter
+ - [DOC] add a summary about cookie incompatibilities between specs and browsers
+ - [DOC] fix description of cookie "insert" and "indirect" modes
+ - [MEDIUM] http: fix space handling in the request cookie parser
+ - [MEDIUM] http: fix space handling in the response cookie parser
+ - [DOC] fix typo in the queue() definition (backend, not frontend)
+ - [BUG] deinit: unbind listeners before freeing them
+ - [BUG] stream_interface: only call si->release when both dirs are closed
+ - [MEDIUM] buffers: rework the functions to exchange between SI and buffers
+ - [DOC] fix typo in the avg_queue() and be_conn() definition (backend, not frontend)
+ - [MINOR] halog: add '-tc' to sort by termination codes
+ - [MINOR] halog: skip non-traffic logs for -st and -tc
+ - [BUG] stream_sock: cleanly disable the listener in case of resource shortage
+ - [BUILD] stream_sock: previous fix lacked the #include, causing a warning.
+ - [DOC] bind option is "defer-accept", not "defer_accept"
+ - [DOC] missing index entry for http-check send-state
+ - [DOC] tcp-request inspect-delay is for backends too
+ - [BUG] ebtree: string_equal_bits() could return garbage on identical strings
+ - [BUG] stream_sock: try to flush any extra pending request data after a POST
+ - [BUILD] proto_http: eliminate some build warnings with gcc-2.95
+ - [MEDIUM] make it possible to combine http-pretend-keepalived with httpclose
+ - [MEDIUM] tcp-request : don't wait for inspect-delay to expire when the buffer is full
+ - [MEDIUM] checks: add support for HTTP contents lookup
+ - [TESTS] add test-check-expect to test various http-check methods
+ - [MINOR] global: add "tune.chksize" to change the default check buffer size
+ - [MINOR] cookie: add options "maxidle" and "maxlife"
+ - [MEDIUM] cookie: support client cookies with some contents appended to their value
+ - [MINOR] http: make some room in the transaction flags to extend cookies
+ - [MINOR] cookie: add the expired (E) and old (O) flags for request cookies
+ - [MEDIUM] cookie: reassign set-cookie status flags to store more states
+ - [MINOR] add encode/decode function for 30-bit integers from/to base64
+ - [MEDIUM] cookie: check for maxidle and maxlife for incoming dated cookies
+ - [MEDIUM] cookie: set the date in the cookie if needed
+ - [DOC] document the cookie maxidle and maxlife parameters
+ - [BUG] checks: don't log backend down for all zero-weight servers
+ - [MEDIUM] checks: set server state to one state from failure when leaving maintenance
+ - [BUG] config: report correct keywords for "observe"
+ - [MINOR] checks: ensure that we can inherit binary checks from the defaults section
+ - [MINOR] acl: add the http_req_first match
+ - [DOC] fix typos about bind-process syntax
+ - [BUG] cookie: correctly unset default cookie parameters
+ - [MINOR] cookie: add support for the "preserve" option
+ - [BUG] ebtree: fix duplicate strings insertion
+ - [CONTRIB] halog: report per-url counts, errors and times
+ - [CONTRIB] halog: minor speed improvement in timer parser
+ - [MINOR] buffers: add a new request analyser flag for PROXY mode
+ - [MINOR] listener: add the "accept-proxy" option to the "bind" keyword
+ - [MINOR] standard: add read_uint() to parse a delimited unsigned integer
+ - [MINOR] standard: change arg type from const char* to char*
+ - [MINOR] frontend: add a new analyser to parse a proxied connection
+ - [MEDIUM] session: call the frontend_decode_proxy analyser on proxied connections
+ - [DOC] add the proxy protocol's specifications
+ - [DOC] document the 'accept-proxy' bind option
+ - [MINOR] cfgparse: report support of <path> for the 'bind' statements
+ - [DOC] add references to unix socket handling
+ - [MINOR] move MAXPATHLEN definition to compat.h
+ - [MEDIUM] unix sockets: cleanup the error reporting path
+ - [BUG] session: don't stop forwarding of data upon last packet
+ - [CLEANUP] accept: replace some inappropriate Alert() calls with send_log()
+ - [BUILD] peers: shut a printf format warning (key_size is a size_t)
+ - [BUG] accept: don't close twice upon error
+ - [OPTIM] session: don't recheck analysers when buffer flags have not changed
+ - [OPTIM] stream_sock: don't clear FDs that are already cleared
+ - [BUG] proto_tcp: potential bug on pattern fetch dst and dport
+
+2010/08/28 : 1.5-dev2
+ - [MINOR] startup: release unused structs after forking
+ - [MINOR] startup: don't wait for nothing when no old pid remains
+ - [CLEANUP] reference product branch 1.5
+ - [MEDIUM] signals: add support for registering functions and tasks
+ - [MEDIUM] signals: support redistribution of signal zero when stopping
+ - [BUG] http: don't set auto_close if more data are expected
+
+2010/08/25 : 1.5-dev1
+ - [BUG] stats: session rate limit gets garbaged in the stats
+ - [DOC] mention 'option http-server-close' effect in Tq section
+ - [DOC] summarize and highlight persistent connections behaviour
+ - [DOC] add configuration samples
+ - [BUG] http: dispatch and http_proxy modes were broken for a long time
+ - [BUG] http: the transaction must be initialized even in TCP mode
+ - [BUG] tcp: dropped connections must be counted as "denied" not "failed"
+ - [BUG] consistent hash: balance on all servers, not only 2 !
+ - [CONTRIB] halog: report per-server status codes, errors and response times
+ - [BUG] http: the transaction must be initialized even in TCP mode (part 2)
+ - [BUG] client: always ensure to zero rep->analysers
+ - [BUG] session: clear BF_READ_ATTACHED before next I/O
+ - [BUG] http: automatically close response if req is aborted
+ - [BUG] proxy: connection rate limiting was eating lots of CPU
+ - [BUG] http: report correct flags in case of client aborts during body
+ - [TESTS] refine non-regression tests and add 4 new tests
+ - [BUG] debug: wrong pointer was used to report a status line
+ - [BUG] debug: correctly report truncated messages
+ - [DOC] document the "dispatch" keyword
+ - [BUG] stick_table: fix possible memory leak in case of connection error
+ - [CLEANUP] acl: use 'L6' instead of 'L4' in ACL flags relying on contents
+ - [MINOR] accept: count the incoming connection earlier
+ - [CLEANUP] tcp: move some non tcp-specific layer6 processing out of proto_tcp
+ - [CLEANUP] client: move some ACLs away to their respective locations
+ - [CLEANUP] rename client -> frontend
+ - [MEDIUM] separate protocol-level accept() from the frontend's
+ - [MINOR] proxy: add a list to hold future layer 4 rules
+ - [MEDIUM] config: parse tcp layer4 rules (tcp-request accept/reject)
+ - [MEDIUM] tcp: check for pure layer4 rules immediately after accept()
+ - [OPTIM] frontend: tell the compiler that errors are unlikely to occur
+ - [MEDIUM] frontend: check for LI_O_TCP_RULES in the listener
+ - [MINOR] frontend: only check for monitor-net rules if LI_O_CHK_MONNET is set
+ - [CLEANUP] buffer->cto is not used anymore
+ - [MEDIUM] session: finish session establishment sequence in with I/O handlers
+ - [MEDIUM] session: initialize server-side timeouts after connect()
+ - [MEDIUM] backend: initialize the server stream_interface upon connect()
+ - [MAJOR] frontend: don't initialize the server-side stream_int anymore
+ - [MEDIUM] session: move the conn_retries attribute to the stream interface
+ - [MEDIUM] session: don't assign conn_retries upon accept() anymore
+ - [MINOR] frontend: rely on the frontend and not the backend for INDEPSTR
+ - [MAJOR] frontend: reorder the session initialization upon accept
+ - [MINOR] proxy: add an accept() callback for the application layer
+ - [MAJOR] frontend: split accept() into frontend_accept() and session_accept()
+ - [MEDIUM] stats: rely on the standard session_accept() function
+ - [MINOR] buffer: refine the flags that may wake an analyser up.
+ - [MINOR] stream_sock: don't dereference a non-existing frontend
+ - [MINOR] session: differenciate between accepted connections and received connections
+ - [MEDIUM] frontend: count the incoming connection earlier
+ - [MINOR] frontend: count denied TCP requests separately
+ - [CLEANUP] stick_table: add/clarify some comments
+ - [BUILD] memory: add a few missing parenthesis to the pool management macros
+ - [MINOR] stick_table: add support for variable-sized data
+ - [CLEANUP] stick_table: rename some stksess struct members to avoid confusion
+ - [CLEANUP] stick_table: move pattern to key functions to stick_table.c
+ - [MEDIUM] stick_table: add room for extra data types
+ - [MINOR] stick_table: add support for "conn_cum" data type.
+ - [MEDIUM] stick_table: don't overwrite data when storing an entry
+ - [MINOR] config: initialize stick tables after all the parsing
+ - [MINOR] stick_table: provide functions to return stksess data from a type
+ - [MEDIUM] stick_table: move the server ID to a generic data type
+ - [MINOR] stick_table: enable it for frontends too
+ - [MINOR] stick_table: export the stick_table_key
+ - [MINOR] tcp: add per-source connection rate limiting
+ - [MEDIUM] stick_table: separate storage and update of session entries
+ - [MEDIUM] stick-tables: add a reference counter to each entry
+ - [MINOR] session: add a pointer to the tracked counters for the source
+ - [CLEANUP] proto_tcp: make the config parser a little bit more flexible
+ - [BUG] config: report the correct proxy type in tcp-request errors
+ - [MINOR] config: provide a function to quote args in a more friendly way
+ - [BUG] stick_table: the fix for the memory leak caused a regression
+ - [MEDIUM] backend: support servers on 0.0.0.0
+ - [BUG] stick-table: correctly refresh expiration timers
+ - [MEDIUM] stream-interface: add a ->release callback
+ - [MINOR] proxy: add a "parent" member to the structure
+ - [MEDIUM] session: make it possible to call an I/O handler on both SI
+ - [MINOR] tools: add a fast div64_32 function
+ - [MINOR] freq_ctr: add new types and functions for periods different from 1s
+ - [MINOR] errors: provide new status codes for config parsing functions
+ - [BUG] http: denied requests must not be counted as denied resps in listeners
+ - [MINOR] tools: add a get_std_op() function to parse operators
+ - [MEDIUM] acl: make use of get_std_op() to parse intger ranges
+ - [MAJOR] stream_sock: better wakeup conditions on read()
+ - [BUG] session: analysers must be checked when SI state changes
+ - [MINOR] http: reset analysers to listener's, not frontend's
+ - [MEDIUM] session: support "tcp-request content" rules in backends
+ - [BUILD] always match official tags when doing git-tar
+ - [MAJOR] stream_interface: fix the wakeup conditions for embedded iohandlers
+ - [MEDIUM] buffer: make buffer_feed* support writing non-contiguous chunks
+ - [MINOR] tcp: src_count acl does not have a permanent result
+ - [MAJOR] session: add track-counters to track counters related to the session
+ - [MINOR] stick-table: provide a table lookup function
+ - [MINOR] stick-table: use suffix "_cnt" for cumulated counts
+ - [MEDIUM] session: move counter ACL fetches from proto_tcp
+ - [MEDIUM] session: add concurrent connections counter
+ - [MEDIUM] session: add data in and out volume counters
+ - [MINOR] session: add the trk_conn_cnt ACL keyword to track connection counts
+ - [MEDIUM] session-counters: automatically update tracked connection count
+ - [MINOR] session: add the trk_conn_cur ACL keyword to track concurrent connection
+ - [MINOR] session: add trk_kbytes_* ACL keywords to track data size
+ - [MEDIUM] session: add a counter on the cumulated number of sessions
+ - [MINOR] config: support a comma-separated list of store data types in stick-table
+ - [MEDIUM] stick-tables: add support for arguments to data_types
+ - [MEDIUM] stick-tables: add stored data argument type checking
+ - [MEDIUM] session counters: add conn_rate and sess_rate counters
+ - [MEDIUM] session counters: add bytes_in_rate and bytes_out_rate counters
+ - [MINOR] stktable: add a stktable_update_key() function
+ - [MINOR] session-counters: add a general purpose counter (gpc0)
+ - [MEDIUM] session-counters: add HTTP req/err tracking
+ - [MEDIUM] stats: add "show table [<name>]" to dump a stick-table
+ - [MEDIUM] stats: add "clear table <name> key <value>" to clear table entries
+ - [CLEANUP] stick-table: declare stktable_data_types as extern
+ - [MEDIUM] stick-table: make use of generic types for stored data
+ - [MINOR] stats: correctly report errors on "show table" and "clear table"
+ - [MEDIUM] stats: add the ability to dump table entries matching criteria
+ - [DOC] configuration: document all the new tracked counters
+ - [DOC] stats: document "show table" and "clear table"
+ - [MAJOR] session-counters: split FE and BE track counters
+ - [MEDIUM] tcp: accept the "track-counters" in "tcp-request content" rules
+ - [MEDIUM] session counters: automatically remove expired entries.
+ - [MEDIUM] config: replace 'tcp-request <action>' with "tcp-request connection"
+ - [MEDIUM] session-counters: make it possible to count connections from frontend
+ - [MINOR] session-counters: use "track-sc{1,2}" instead of "track-{fe,be}-counters"
+ - [MEDIUM] session-counters: correctly unbind the counters tracked by the backend
+ - [CLEANUP] stats: use stksess_kill() to remove table entries
+ - [DOC] update the references to session counters and to tcp-request connection
+ - [DOC] cleanup: split a few long lines
+ - [MEDIUM] http: forward client's close when abortonclose is set
+ - [BUG] queue: don't dequeue proxy-global requests on disabled servers
+ - [BUG] stats: global stats timeout may be specified before stats socket.
+ - [BUG] conf: add tcp-request content rules to the correct list
+
+2010/05/23 : 1.5-dev0
+ - exact copy of 1.4.6
+
+2010/05/16 : 1.4.6
+ - [BUILD] ebtree: update to v6.0.1 to remove references to dprintf()
+ - [CLEANUP] acl: make use of eb_is_empty() instead of open coding the tree's emptiness test
+ - [MINOR] acl: add srv_is_up() to check that a specific server is up or not
+ - [DOC] add a few precisions about the use of RDP cookies
+
+2010/05/13 : 1.4.5
+ - [DOC] report minimum kernel version for tproxy in the Makefile
+ - [MINOR] add the "ignore-persist" option to conditionally ignore persistence
+ - [DOC] add the "ignore-persist" option to conditionally ignore persistence
+ - [DOC] fix ignore-persist/force-persist documentation
+ - [BUG] cttproxy: socket fd leakage in check_cttproxy_version
+ - [DOC] doc/configuration.txt: fix typos
+ - [MINOR] option http-pretend-keepalive is both for FEs and BEs
+ - [MINOR] fix possible crash in debug mode with invalid responses
+ - [MINOR] halog: add support for statisticts on status codes
+ - [OPTIM] halog: use a faster zero test in fgets()
+ - [OPTIM] halog: minor speedup by using unlikely()
+ - [OPTIM] halog: speed up fgets2-64 by about 10%
+ - [DOC] refresh the README file and merge the CONTRIB file into it
+ - [MINOR] acl: support loading values from files
+ - [MEDIUM] ebtree: upgrade to version 6.0
+ - [MINOR] acl trees: add flags and union members to store values in trees
+ - [MEDIUM] acl: add ability to insert patterns in trees
+ - [MEDIUM] acl: add tree-based lookups of exact strings
+ - [MEDIUM] acl: add tree-based lookups of networks
+ - [MINOR] acl: ignore empty lines and comments in pattern files
+ - [MINOR] stick-tables: add support for "stick on hdr"
+
+2010/04/07 : 1.4.4
+ - [BUG] appsession should match the whole cookie name
+ - [CLEANUP] proxy: move PR_O_SSL3_CHK to options2 to release one flag
+ - [MEDIUM] backend: move the transparent proxy address selection to backend
+ - [MINOR] add very fast IP parsing functions
+ - [MINOR] add new tproxy flags for dynamic source address binding
+ - [MEDIUM] add ability to connect to a server from an IP found in a header
+ - [BUILD] config: last patch breaks build without CONFIG_HAP_LINUX_TPROXY
+ - [MINOR] http: make it possible to pretend keep-alive when doing close
+ - [MINOR] config: report "default-server" instead of "(null)" in error messages
+
+2010/03/30 : 1.4.3
+ - [CLEANUP] stats: remove printf format warning in stats_dump_full_sess_to_buffer()
+ - [MEDIUM] session: better fix for connection to servers with closed input
+ - [DOC] indicate in the doc how to bind to port ranges
+ - [BUG] backend: L7 hashing must not be performed on incomplete requests
+ - [TESTS] add a simple program to test connection resets
+ - [MINOR] cli: "show errors" should display "backend <NONE>" when backend was not used
+ - [MINOR] config: emit warnings when HTTP-only options are used in TCP mode
+ - [MINOR] config: allow "slowstart 0s"
+ - [BUILD] 'make tags' did not consider files ending in '.c'
+ - [MINOR] checks: add the ability to disable a server in the config
+
+2010/03/17 : 1.4.2
+ - [CLEANUP] product branch update
+ - [DOC] Some more documentation cleanups
+ - [BUG] clf logs segfault when capturing a non existant header
+ - [OPTIM] config: only allocate check buffer when checks are enabled
+ - [MEDIUM] checks: support multi-packet health check responses
+ - [CLEANUP] session: remove duplicate test
+ - [BUG] http: don't wait for response data to leave buffer is client has left
+ - [MINOR] proto_uxst: set accept_date upon accept() to the wall clock time
+ - [MINOR] stats: don't send empty lines in "show errors"
+ - [MINOR] stats: make the data dump function reusable for other purposes
+ - [MINOR] stats socket: add show sess <id> to dump details about a session
+ - [BUG] stats: connection reset counters must be plain ascii, not HTML
+ - [BUG] url_param hash may return a down server
+ - [MINOR] force null-termination of hostname
+ - [MEDIUM] connect to servers even when the input has already been closed
+ - [BUG] don't merge anonymous ACLs !
+ - [BUG] config: fix endless loop when parsing "on-error"
+ - [MINOR] http: don't mark a server as failed when it returns 501/505
+ - [OPTIM] checks: try to detect the end of response without polling again
+ - [BUG] checks: don't report an error when recv() returns an error after data
+ - [BUG] checks: don't abort when second poll returns an error
+ - [MINOR] checks: make shutdown() silently fail
+ - [BUG] http: fix truncated responses on chunk encoding when size divides buffer size
+ - [BUG] init: unconditionally catch SIGPIPE
+ - [BUG] checks: don't wait for a close to start parsing the response
+
+2010/03/04 : 1.4.1
+ - [BUG] Clear-cookie path issue
+ - [DOC] fix typo on stickiness rules
+ - [BUILD] fix BSD and OSX makefiles for missing files
+ - [BUILD] includes order breaks OpenBSD build
+ - [BUILD] fix some build warnings on Solaris with is* macros
+ - [BUG] logs: don't report "last data" when we have just closed after an error
+ - [BUG] logs: don't report "proxy request" when server closes early
+ - [BUILD] fix platform-dependant build issues related to crypt()
+ - [STATS] count transfer aborts caused by client and by server
+ - [STATS] frontend requests were not accounted for failed requests
+ - [MINOR] report total number of processed connections when stopping a proxy
+ - [DOC] be more clear about the limitation to one single monitor-net entry
+
+2010/02/26 : 1.4.0
+ - [MINOR] stats: report maint state for tracking servers too
+ - [DOC] fix summary to add pattern extraction
+ - [DOC] Documentation cleanups
+ - [BUG] cfgparse memory leak and missing free calls in deinit()
+ - [BUG] pxid/puid/luid: don't shift IDs when some of them are forced
+ - [EXAMPLES] add auth.cfg
+ - [BUG] uri_auth: ST_SHLGNDS should be 0x00000008 not 0x0000008
+ - [BUG] uri_auth: do not attemp to convert uri_auth -> http-request more than once
+ - [BUILD] auth: don't use unnamed unions
+ - [BUG] config: report unresolvable host names as errors
+ - [BUILD] fix build breakage with DEBUG_FULL
+ - [DOC] fix a typo about timeout check and clarify the explanation.
+ - [MEDIUM] http: don't use trash to realign large buffers
+ - [STATS] report HTTP requests (total and rate) in frontends
+ - [STATS] separate frontend and backend HTTP stats
+ - [MEDIUM] http: revert to use a swap buffer for realignment
+ - [MINOR] stats: report the request rate in frontends as cell titles
+ - [MINOR] stats: mark areas with an underline when tooltips are available
+ - [DOC] reorder some entries to maintain the alphabetical order
+ - [DOC] cleanup of the keyword matrix
+
+2010/02/02 : 1.4-rc1
+ - [MEDIUM] add a maintenance mode to servers
+ - [MINOR] http-auth: last fix was wrong
+ - [CONTRIB] add base64rev-gen.c that was used to generate the base64rev table.
+ - [MINOR] Base64 decode
+ - [MINOR] generic auth support with groups and encrypted passwords
+ - [MINOR] add ACL_TEST_F_NULL_MATCH
+ - [MINOR] http-request: allow/deny/auth support for frontend/backend/listen
+ - [MINOR] acl: add http_auth and http_auth_group
+ - [MAJOR] use the new auth framework for http stats
+ - [DOC] add info about userlists, http-request and http_auth/http_auth_group acls
+ - [STATS] make it possible to change a CLI connection timeout
+ - [BUG] patterns: copy-paste typo in type conversion arguments
+ - [MINOR] pattern: make the converter more flexible by supporting void* and int args
+ - [MINOR] standard: str2mask: string to netmask converter
+ - [MINOR] pattern: add support for argument parsers for converters
+ - [MINOR] pattern: add the "ipmask()" converting function
+ - [MINOR] config: off-by-one in "stick-table" after list of converters
+ - [CLEANUP] acl, patterns: make use of my_strndup() instead of malloc+memcpy
+ - [BUG] restore accidentely removed line in last patch !
+ - [MINOR] checks: make the HTTP check code add the CRLF itself
+ - [MINOR] checks: add the server's status in the checks
+ - [BUILD] halog: make without arch-specific optimizations
+ - [BUG] halog: fix segfault in case of empty log in PCT mode (cherry picked from commit fe362fe4762151d209b9656639ee1651bc2b329d)
+ - [MINOR] http: disable keep-alive when process is going down
+ - [MINOR] acl: add build_acl_cond() to make it easier to add ACLs in config
+ - [CLEANUP] config: use build_acl_cond() instead of parse_acl_cond()
+ - [CLEANUP] config: use warnif_cond_requires_resp() to check for bad ACLs
+ - [MINOR] prepare req_*/rsp_* to receive a condition
+ - [CLEANUP] config: specify correct const char types to warnif_* functions
+ - [MEDIUM] config: factor out the parsing of 20 req*/rsp* keywords
+ - [MEDIUM] http: make the request filter loop check for optional conditions
+ - [MEDIUM] http: add support for conditional request filter execution
+ - [DOC] add some build info about the AIX platform (cherry picked from commit e41914c77edbc40aebf827b37542d37d758e371e)
+ - [MEDIUM] http: add support for conditional request header addition
+ - [MEDIUM] http: add support for conditional response header rewriting
+ - [DOC] add some missing ACLs about response header matching
+ - [MEDIUM] http: add support for proxy authentication
+ - [MINOR] http-auth: make the 'unless' keyword work as expected
+ - [CLEANUP] config: use build_acl_cond() to simplify http-request ACL parsing
+ - [MEDIUM] add support for anonymous ACLs
+ - [MEDIUM] http: switch to tunnel mode after status 101 responses
+ - [MEDIUM] http: stricter processing of the CONNECT method
+ - [BUG] config: reset check request to avoid double free when switching to ssl/sql
+ - [MINOR] config: fix too large ssl-hello-check message.
+ - [BUG] fix error response in case of server error
+
+2010/01/25 : 1.4-dev8
+ - [CLEANUP] Keep in sync "defaults" support between documentation and code
+ - [MEDIUM] http: add support for Proxy-Connection header
+ - [CRITICAL] buffers: buffer_insert_line2 must not change the ->w entry
+ - [MINOR] http: remove a copy-paste typo in transaction cleaning
+ - [BUG] http: trim any excess buffer data when recycling a connection
+
+2010/01/25 : 1.4-dev7
+ - [BUG] appsession: possible memory leak in case of out of memory condition
+ - [MINOR] config: don't accept 'appsession' in defaults section
+ - [MINOR] Add function to parse a size in configuration
+ - [MEDIUM] Add stick table (persistence) management functions and types
+ - [MEDIUM] Add pattern fetch management types and functions
+ - [MEDIUM] Add src dst and dport pattern fetches.
+ - [MEDIUM] Add stick table configuration and init.
+ - [MEDIUM] Add stick and store rules analysers.
+ - [MINOR] add option "mysql-check" to use MySQL health checks
+ - [BUG] health checks: fix requeued message
+ - [OPTIM] remove SSP_O_VIA and SSP_O_STATUS
+ - [BUG] checks: fix newline termination
+ - [MINOR] acl: add fe_id/so_id to match frontend's and socket's id
+ - [BUG] appsession's sessid must be reset at end of transaction
+ - [BUILD] appsession did not build anymore under gcc-2.95
+ - [BUG] server redirection used an uninitialized string.
+ - [MEDIUM] http: fix handling of message pointers
+ - [MINOR] http: fix double slash prefix with server redirect
+ - [MINOR] http redirect: add the ability to append a '/' to the URL
+ - [BUG] stream_interface: fix retnclose and remove cond_close
+ - [MINOR] http redirect: don't explicitly state keep-alive on 1.1
+ - [MINOR] http: move appsession 'sessid' from session to http_txn
+ - [OPTIM] reorder http_txn to optimize cache lines placement
+ - [MINOR] http: differentiate waiting for new request and waiting for a complete requst
+ - [MINOR] http: add a separate "http-keep-alive" timeout
+ - [MINOR] config: remove undocumented and buggy 'timeout appsession'
+ - [DOC] fix various too large lines
+ - [DOC] remove several trailing spaces
+ - [DOC] add the doc about stickiness
+ - [BUILD] remove a warning in standard.h on AIX
+ - [BUG] checks: chars are unsigned on AIX, check was always true
+ - [CLEANUP] stream_sock: MSG_NOSIGNAL is only for send(), not recv()
+ - [BUG] check: we must not check for error before reading a response
+ - [BUG] buffers: remove remains of wrong obsolete length check
+ - [OPTIM] stream_sock: don't shutdown(write) when the socket is in error
+ - [BUG] http: don't count req errors on client resets or t/o during keep-alive
+ - [MEDIUM] http: don't switch to tunnel mode upon close
+ - [DOC] add documentation about connection header processing
+ - [MINOR] http: add http_remove_header2() to remove a header value.
+ - [MINOR] tools: add a "word_match()" function to match words and ignore spaces
+ - [MAJOR] http: rework request Connection header handling
+ - [MAJOR] http: rework response Connection header handling
+ - [MINOR] add the ability to force kernel socket buffer size.
+ - [BUG] http_server_error() must not purge a previous pending response
+ - [OPTIM] http: don't delay response if next request is incomplete
+ - [MINOR] add the "force-persist" statement to force persistence on down servers
+ - [MINOR] http: logs must report persistent connections to down servers
+ - [BUG] buffer_replace2 must never change the ->w entry
+
+2010/01/08 : 1.4-dev6
+ - [BUILD] warning in stream_interface.h
+ - [BUILD] warning ultoa_r returns char *
+ - [MINOR] hana: only report stats if it is enabled
+ - [MINOR] stats: add "a link" & "a href" for sockets
+ - [MINOR]: stats: add show-legends to report additional informations
+ - [MEDIUM] default-server support
+ - [BUG]: add 'observer', 'on-error', 'error-limit' to supported options list
+ - [MINOR] stats: add href to tracked server
+ - [BUG] stats: show UP/DOWN status also in tracking servers
+ - [DOC] Restore ability to search a keyword at the beginning of a line
+ - [BUG] stats: cookie should be reported under backend not under proxy
+ - [BUG] cfgparser/stats: fix error message
+ - [BUG] http: disable auto-closing during chunk analysis
+ - [BUG] http: fix hopefully last closing issue on data forwarding
+ - [DEBUG] add an http_silent_debug function to debug HTTP states
+ - [MAJOR] http: fix again the forward analysers
+ - [BUG] http_process_res_common() must not skip the forward analyser
+ - [BUG] http: some possible missed close remain in the forward chain
+ - [BUG] http: redirect needed to be updated after recent changes
+ - [BUG] http: don't set no-linger on response in case of forced close
+ - [MEDIUM] http: restore the original behaviour of option httpclose
+ - [TESTS] add a file to test various connection modes
+ - [BUG] http: check options before the connection header
+ - [MAJOR] session: fix the order by which the analysers are run
+ - [MEDIUM] session: also consider request analysers added during response
+ - [MEDIUM] http: make safer use of the DONT_READ and AUTO_CLOSE flags
+ - [BUG] http: memory leak with captures when using keep-alive
+ - [BUG] http: fix for capture memory leak was incorrect
+ - [MINOR] http redirect: use proper call to return last response
+ - [MEDIUM] http: wait for some flush of the response buffer before a new request
+ - [MEDIUM] session: limit the number of analyser loops
+
+2010/01/03 : 1.4-dev5
+ - [MINOR] server tracking: don't care about the tracked server's mode
+ - [MEDIUM] appsession: add "len", "prefix" and "mode" options
+ - [MEDIUM] appsession: add the "request-learn" option
+ - [BUG] Configuration parser bug when escaping characters
+ - [MINOR] CSS & HTML fun
+ - [MINOR] Collect & provide http response codes received from servers
+ - [BUG] Fix silly typo: hspr_other -> hrsp_other
+ - [MINOR] Add "a name" to stats page
+ - [MINOR] add additional "a href"s to stats page
+ - [MINOR] Collect & provide http response codes for frontends, fix backends
+ - [DOC] some small spell fixes and unifications
+ - [MEDIUM] Decrease server health based on http responses / events, version 3
+ - [BUG] format '%d' expects type 'int', but argument 5 has type 'long int'
+ - [BUG] config: fix erroneous check on cookie domain names, again
+ - [BUG] Healthchecks: get a proper error code if connection cannot be completed immediately
+ - [DOC] trivial fix for man page
+ - [MINOR] config: report all supported options for the "bind" keyword
+ - [MINOR] tcp: add support for the defer_accept bind option
+ - [MINOR] unix socket: report the socket path in case of bind error
+ - [CONTRIB] halog: support searching by response time
+ - [DOC] add a reminder about obsolete documents
+ - [DOC] point to 1.4 doc, not 1.3
+ - [DOC] option tcp-smart-connect was missing from index
+ - [MINOR] http: detect connection: close earlier
+ - [CLEANUP] sepoll: clean up the fd_clr/fd_set functions
+ - [OPTIM] move some rarely used fields out of fdtab
+ - [MEDIUM] fd: merge fd_list into fdtab
+ - [MAJOR] buffer: flag BF_DONT_READ to disable reads when not required
+ - [MINOR] http: add new transaction flags for keep-alive and content-length
+ - [MEDIUM] http request: parse connection, content-length and transfer-encoding
+ - [MINOR] http request: update the TX_SRV_CONN_KA flag on rewrite
+ - [MINOR] http request: simplify the test of no-data
+ - [MEDIUM] http request: simplify POST length detection
+ - [MEDIUM] http request: make use of pre-parsed transfer-encoding header
+ - [MAJOR] http: create the analyser which waits for a response
+ - [MINOR] http: pre-set the persistent flags in the transaction
+ - [MEDIUM] http response: check body length and set transaction flags
+ - [MINOR] http response: update the TX_CLI_CONN_KA flag on rewrite
+ - [MINOR] http: remove the last call to stream_int_return
+ - [IMPORT] import ebtree v5.0 into directory ebtree/
+ - [MEDIUM] build: switch ebtree users to use new ebtree version
+ - [CLEANUP] ebtree: remove old unused files
+ - [BUG] definitely fix regparm issues between haproxy core and ebtree
+ - [CLEANUP] ebtree: cast to char * to get rid of gcc warning
+ - [BUILD] missing #ifndef in ebmbtree.h
+ - [BUILD] missing #ifndef in ebsttree.h
+ - [MINOR] tools: add hex2i() function to convert hex char to int
+ - [MINOR] http: create new MSG_BODY sub-states
+ - [BUG] stream_sock: BUF_INFINITE_FORWARD broke splice on 64-bit platforms
+ - [DOC] option is "defer-accept", not "defer_accept"
+ - [MINOR] http: keep pointer to beginning of data
+ - [BUG] x-original-to: name was not set in default instance
+ - [MINOR] http: detect tunnel mode and set it in the session
+ - [BUG] config: fix error message when config file is not found
+ - [BUG] config: fix wrong handling of too large argument count
+ - [BUG] config: disable 'option httplog' on TCP proxies
+ - [BUG] config: fix erroneous check on cookie domain names
+ - [BUG] config: cookie domain was ignored in defaults sections
+ - [MINOR] config: support passing multiple "domain" statements to cookies
+ - [MINOR] ebtree: add functions to lookup non-null terminated strings
+ - [MINOR] config: don't report error on all subsequent files on failure
+ - [BUG] second fix for the printf format warning
+ - [BUG] check_post: limit analysis to the buffer length
+ - [MEDIUM] http: process request body in a specific analyser
+ - [MEDIUM] backend: remove HTTP POST parsing from get_server_ph_post()
+ - [MAJOR] http: completely process the "connection" header
+ - [MINOR] http: only consider chunk encoding with HTTP/1.1
+ - [MAJOR] buffers: automatically compute the maximum buffer length
+ - [MINOR] http: move the http transaction init/cleanup code to proto_http
+ - [MINOR] http: move 1xx handling earlier to eliminate a lot of ifs
+ - [MINOR] http: introduce a new synchronisation state : HTTP_MSG_DONE
+ - [MEDIUM] http: rework chunk-size parser
+ - [MEDIUM] http: add a new transaction flags indicating if we know the transfer length
+ - [MINOR] buffers: add buffer_ignore() to skip some bytes
+ - [BUG] http: offsets are relative to the buffer, not to ->som
+ - [MEDIUM] http: automatically re-aling request buffer
+ - [BUG] http: body parsing must consider the start of message
+ - [MINOR] new function stream_int_cond_close()
+ - [MAJOR] http: implement body parser
+ - [BUG] http: typos on several unlikely() around header insertion
+ - [BUG] stream_sock: wrong max computation on recv
+ - [MEDIUM] http: rework the buffer alignment logic
+ - [BUG] buffers: wrong size calculation for displaced data
+ - [MINOR] stream_sock: prepare for closing when all pending data are sent
+ - [MEDIUM] http: add two more states for the closing period
+ - [MEDIUM] http: properly handle "option forceclose"
+ - [MINOR] stream_sock: add SI_FL_NOLINGER for faster close
+ - [MEDIUM] http: make forceclose use SI_FL_NOLINGER
+ - [MEDIUM] session: set SI_FL_NOLINGER when aborting on write timeouts
+ - [MEDIUM] http: add some SI_FL_NOLINGER around server errors
+ - [MINOR] config: option forceclose is valid in frontends too
+ - [BUILD] halog: insufficient include path in makefile
+ - [MEDIUM] http: make the analyser not rely on msg being initialized anymore
+ - [MEDIUM] http: make the parsers able to wait for a buffer flush
+ - [MAJOR] http: add support for option http-server-close
+ - [BUG] http: ensure we abort data transfer on write error
+ - [BUG] last fix was overzealous and disabled server-close
+ - [BUG] http: fix erroneous trailers size computation
+ - [MINOR] stream_sock: enable MSG_MORE when forwarding finite amount of data
+ - [OPTIM] http: set MSG_MORE on response when a pipelined request is pending
+ - [BUG] http: redirects were broken by chunk changes
+ - [BUG] http: the request URI pointer is relative to the buffer
+ - [OPTIM] http: don't immediately enable reading on request
+ - [MINOR] http: move redirect messages to HTTP/1.1 with a content-length
+ - [BUG] http: take care of errors, timeouts and aborts during the data phase
+ - [MINOR] http: don't wait for sending requests to the server
+ - [MINOR] http: make the conditional redirect support keep-alive
+ - [BUG] http: fix cookie parser to support spaces and commas in values
+ - [MINOR] config: some options were missing for "redirect"
+ - [MINOR] redirect: add support for unconditional rules
+ - [MINOR] config: centralize proxy struct initialization
+ - [MEDIUM] config: remove the limitation of 10 reqadd/rspadd statements
+ - [MEDIUM] config: remove the limitation of 10 config files
+ - [CLEANUP] http: remove a remaining impossible condition
+ - [OPTIM] http: optimize a bit the construct of the forward loops
+
+2009/10/12 : 1.4-dev4
+ - [DOC] add missing rate_lim and rate_max
+ - [MAJOR] struct chunk rework
+ - [MEDIUM] Health check reporting code rework + health logging, v3
+ - [BUG] check if rise/fall has an argument and it is > 0
+ - [MINOR] health checks logging unification
+ - [MINOR] add "description", "node" and show-node"/"show-desc", remove "node-name", v2
+ - [MINOR] Allow dots in show-node & add "white-space: nowrap" in th.pxname.
+ - [DOC] Add information about http://haproxy.1wt.eu/contrib.html
+ - [MINOR] Introduce include/types/counters.h
+ - [CLEANUP] Move counters to dedicated structures
+ - [MINOR] Add "clear counters" to clear statistics counters
+ - [MEDIUM] Collect & provide separate statistics for sockets, v2
+ - [BUG] Fix NULL pointer dereference in stats_check_uri_auth(), v2
+ - [MINOR] acl: don't report valid acls as potential mistakes
+ - [MINOR] Add cut_crlf(), ltrim(), rtrim() and alltrim()
+ - [MINOR] Add chunk_htmlencode and chunk_asciiencode
+ - [MINOR] Capture & display more data from health checks, v2
+ - [BUG] task.c: don't assing last_timer to node-less entries
+ - [BUG] http stats: large outputs sometimes got some parts chopped off
+ - [MINOR] backend: export some functions to recount servers
+ - [MINOR] backend: uninline some LB functions
+ - [MINOR] include time.h from freq_ctr.h as is uses "now".
+ - [CLEANUP] backend: move LB algos to individual files
+ - [MINOR] lb_map: reorder code in order to ease integration of new hash functions
+ - [CLEANUP] proxy: move last lb-specific bits to their respective files
+ - [MINOR] backend: separate declarations of LB algos from their lookup method
+ - [MINOR] backend: reorganize the LB algorithm selection
+ - [MEDIUM] backend: introduce the "static-rr" LB algorithm
+ - [MINOR] report list of supported pollers with -vv
+ - [DOC] log-health-checks is an option, not a directive
+ - [MEDIUM] new option "independant-streams" to stop updating read timeout on writes
+ - [BUG] stats: don't call buffer_shutw(), but ->shutw() instead
+ - [MINOR] stats: strip CR and LF from the input command line
+ - [BUG] don't refresh timeouts late after detected activity
+ - [MINOR] stats_dump_errors_to_buffer: use buffer_feed_chunk()
+ - [MINOR] stats_dump_sess_to_buffer: use buffer_feed_chunk()
+ - [MINOR] stats: make stats_dump_raw_to_buffer() use buffer_feed_chunk
+ - [MEDIUM] stats: don't use s->ana_state anymore
+ - [MINOR] remove now obsolete ana_state from the session struct
+ - [MEDIUM] stats: make HTTP stats use an I/O handler
+ - [MEDIUM] stream_int: adjust WAIT_ROOM handling
+ - [BUG] config: look for ID conflicts in all sockets, not only last ones.
+ - [MINOR] config: reference file and line with any listener/proxy/server declaration
+ - [MINOR] config: report places of duplicate names or IDs
+ - [MINOR] config: add pointer to file name in block/redirect/use_backend/monitor rules
+ - [MINOR] tools: add a new get_next_id() function
+ - [MEDIUM] config: automatically find unused IDs for proxies, servers and listeners
+ - [OPTIM] counters: move some max numbers to the counters struct
+ - [BUG] counters: fix segfault on missing counters for a listener
+ - [MEDIUM] backend: implement consistent hashing variation
+ - [MINOR] acl: add fe_conn, be_conn, queue, avg_queue
+ - [MINOR] stats: use 'clear counters all' to clear all values
+ - [MEDIUM] add access restrictions to the stats socket
+ - [MINOR] buffers: add buffer_feed2() and make buffer_feed() measure string length
+ - [MINOR] proxy: provide function to retrieve backend/server pointers
+ - [MINOR] add the "initial weight" to the server struct.
+ - [MEDIUM] stats: add the "get weight" command to report a server's weight
+ - [MEDIUM] stats: add the "set weight" command
+ - [BUILD] add a 'make tags' target
+ - [MINOR] stats: add support for numeric IDs in set weight/get weight
+ - [MINOR] stats: use a dedicated state to output static data
+ - [OPTIM] stats: check free space before trying to print
+
+2009/09/24 : 1.4-dev3
+ - [BUILD] compilation of haproxy-1.4-dev2 on FreeBSD
+ - [MEDIUM] Collect & show information about last health check, v3
+ - [MINOR] export the hostname variable so that all the code can access it
+ - [MINOR] stats: add a new node-name setting
+ - [MEDIUM] remove old experimental tcpsplice option
+ - [BUILD] fix build for systems without SOL_TCP
+ - [MEDIUM] move connection establishment from backend to the SI.
+ - [MEDIUM] make the global stats socket part of a frontend
+ - [MEDIUM] session: account per-listener connections
+ - [MINOR] session: switch to established state if no connect function
+ - [MEDIUM] make the unix stats sockets use the generic session handler
+ - [CLEANUP] unix: remove uxst_process_session()
+ - [CLEANUP] move remaining stats sockets code to dumpstats
+ - [MINOR] move the initial task's nice value to the listener
+ - [MINOR] cleanup set_session_backend by using pre-computed analysers
+ - [MINOR] set s->srv_error according to the analysers
+ - [MEDIUM] set rep->analysers from fe and be analysers
+ - [MEDIUM] replace BUFSIZE with buf->size in computations
+ - [MEDIUM] make it possible to change the buffer size in the configuration
+ - [MEDIUM] report error on buffer writes larger than buffer size
+ - [MEDIUM] stream_interface: add and use ->update function to resync
+ - [CLEANUP] remove ifdef MSG_NOSIGNAL and define it instead
+ - [MEDIUM] remove TCP_CORK and make use of MSG_MORE instead
+ - [BUG] tarpit did not work anymore
+ - [MINOR] acl: add support for hdr_ip to match IP addresses in headers
+ - [MAJOR] buffers: fix misuse of the BF_SHUTW_NOW flag
+ - [MINOR] buffers: provide more functions to handle buffer data
+ - [MEDIUM] buffers: provide new buffer_feed*() function
+ - [MINOR] buffers: add peekchar and peekline functions for stream interfaces
+ - [MINOR] buffers: provide buffer_si_putchar() to send a char from a stream interface
+ - [BUG] buffer_forward() would not correctly consider data already scheduled
+ - [MINOR] buffers: add buffer_cut_tail() to cut only unsent data
+ - [MEDIUM] stream_interface: make use of buffer_cut_tail() to report errors
+ - [MAJOR] http: add support for HTTP 1xx informational responses
+ - [MINOR] buffers: inline buffer_si_putchar()
+ - [MAJOR] buffers: split BF_WRITE_ENA into BF_AUTO_CONNECT and BF_AUTO_CLOSE
+ - [MAJOR] buffers: fix the BF_EMPTY flag's meaning
+ - [BUG] stream_interface: SI_ST_CLO must have buffers SHUT
+ - [MINOR] stream_sock: don't set SI_FL_WAIT_DATA if BF_SHUTW_NOW is set
+ - [MEDIUM] add support for infinite forwarding
+ - [BUILD] stream_interface: fix conflicting declaration
+ - [BUG] buffers: buffer_forward() must not always clear BF_OUT_EMPTY
+ - [BUG] variable buffer size ignored at initialization time
+ - [MINOR] ensure that buffer_feed() and buffer_skip() set BF_*_PARTIAL
+ - [BUG] fix buffer_skip() and buffer_si_getline() to correctly handle wrap-arounds
+ - [MINOR] stream_interface: add SI_FL_DONT_WAKE flag
+ - [MINOR] stream_interface: add iohandler callback
+ - [MINOR] stream_interface: add functions to support running as internal/external tasks
+ - [MEDIUM] session: call iohandler for embedded tasks (applets)
+ - [MINOR] add a ->private member to the stream_interface
+ - [MEDIUM] stats: prepare the connection for closing before dumping
+ - [MEDIUM] stats: replace the stats socket analyser with an SI applet
+
+2009/08/09 : 1.4-dev2
+ - [BUG] task: fix possible crash when some timeouts are not configured
+ - [BUG] log: option tcplog would log to global if no logger was defined
+
+2009/07/29 : 1.4-dev1
+ - [MINOR] acl: add support for matching of RDP cookies
+ - [MEDIUM] add support for RDP cookie load-balancing
+ - [MEDIUM] add support for RDP cookie persistence
+ - [MINOR] add a new CLF log format
+ - [MINOR] startup: don't imply -q with -D
+ - [BUG] ensure that we correctly re-start old process in case of error
+ - [MEDIUM] add support for binding to source port ranges during connect
+ - [MINOR] config: track "no option"/"option" changes
+ - [MINOR] config: support resetting options do default values
+ - [MEDIUM] implement option tcp-smart-accept at the frontend
+ - [MEDIUM] stream_sock: implement tcp-cork for use during shutdowns on Linux
+ - [MEDIUM] implement tcp-smart-connect option at the backend
+ - [MEDIUM] add support for TCP MSS adjustment for listeners
+ - [MEDIUM] support setting a server weight to zero
+ - [MINOR] make DEFAULT_MAXCONN user-configurable at build time
+ - [MAJOR] session: don't clear buffer status flags anymore
+ - [MAJOR] session: only check for timeouts when they have just occurred.
+ - [MAJOR] session: simplify buffer error handling
+ - [MEDIUM] config: split parser and checker in two functions
+ - [MEDIUM] config: support loading multiple configuration files
+ - [MEDIUM] stream_sock: don't close prematurely when nolinger is set
+ - [MEDIUM] session: rework buffer analysis to permit permanent analysers
+ - [MEDIUM] splice: set the capability on each stream_interface
+ - [BUG] http: redirect rules were processed too early
+ - [CLEANUP] remove unused DEBUG_PARSE_NO_SPEEDUP define
+ - [MEDIUM] http: split request waiter from request processor
+ - [MEDIUM] session: tell analysers what bit they were called for
+ - [MAJOR] http: complete splitting of the remaining stages
+ - [MINOR] report in the proxies the requirements for ACLs
+ - [MINOR] http: rely on proxy->acl_requires to allocate hdr_idx
+ - [MINOR] acl: add HTTP protocol detection (req_proto_http)
+ - [MINOR] prepare callers of session_set_backend to handle errors
+ - [BUG] default ACLs did not properly set the ->requires flag
+ - [MEDIUM] allow a TCP frontend to switch to an HTTP backend
+ - [MINOR] ensure we can jump from swiching rules to http without data
+ - [MINOR] http: take http request timeout from the backend
+ - [MINOR] allow TCP inspection rules to make use of HTTP ACLs
+ - [BUILD] report commit date and not author's date as build date
+ - [MINOR] acl: don't complain anymore when using L7 acls in TCP
+ - [BUG] stream_sock: always shutdown(SHUT_WR) before closing
+ - [BUG] stream_sock: don't stop reading when the poller reports an error
+ - [BUG] config: tcp-request content only accepts "if" or "unless"
+ - [BUG] task: fix possible timer drift after update
+ - [MINOR] apply tcp-smart-connect option for the checks too
+ - [MINOR] stats: better displaying in MSIE
+ - [MINOR] config: improve error reporting in global section
+ - [MINOR] config: improve error reporting in listen sections
+ - [MINOR] config: the "capture" keyword is not allowed in backends
+ - [MINOR] config: improve error reporting when checking configuration
+ - [BUILD] fix a minor build warning on AIX
+ - [BUILD] use "git cmd" instead of "git-cmd"
+ - [CLEANUP] report 2009 not 2008 in the copyright banner.
+ - [MINOR] print usage on the stats sockets upon invalid commands
+ - [MINOR] acl: detect and report potential mistakes in ACLs
+ - [BUILD] fix incorrect printf arg count with tcp_splice
+ - [BUG] fix random pauses on last segment of a series
+ - [BUILD] add support for build under Cygwin
+
+2009/06/09 : 1.4-dev0
+ - exact copy of 1.3.18
+
+2009/05/10 : 1.3.18
+ - [MEDIUM] add support for "balance hdr(name)"
+ - [CLEANUP] give a little bit more information in error message
+ - [MINOR] add X-Original-To: header
+ - [BUG] x-original-to: fix missing initialization to default value
+ - [BUILD] spec file: fix broken pipe during rpmbuild and add man file
+ - [MINOR] improve reporting of misplaced acl/reqxxx rules
+ - [MEDIUM] http: add options to ignore invalid header names
+ - [MEDIUM] http: capture invalid requests/responses even if accepted
+ - [BUILD] add format(printf) to printf-like functions
+ - [MINOR] fix several printf formats and missing arguments
+ - [BUG] stats: total and lbtot are unsigned
+ - [MINOR] fix a few remaining printf-like formats on 64-bit platforms
+ - [CLEANUP] remove unused make option from haproxy.spec
+ - [BUILD] make it possible to pass alternative arch at build time
+ - [MINOR] switch all stat counters to 64-bit
+ - [MEDIUM] ensure we don't recursively call pool_gc2()
+ - [CRITICAL] uninitialized response field can sometimes cause crashes
+ - [BUG] fix wrong pointer arithmetics in HTTP message captures
+ - [MINOR] rhel init script : support the reload operation
+ - [MINOR] add basic signal handling functions
+ - [BUILD] add signal.o to all makefiles
+ - [MEDIUM] call signal_process_queue from run_poll_loop
+ - [MEDIUM] pollers: don't wait if a signal is pending
+ - [MEDIUM] convert all signals to asynchronous signals
+ - [BUG] O(1) pollers should check their FD before closing it
+ - [MINOR] don't close stdio fds twice
+ - [MINOR] add options dontlog-normal and log-separate-errors
+ - [DOC] minor fixes and rearrangements
+ - [BUG] fix parser crash on unconditional tcp content rules
+ - [DOC] rearrange the configuration manual and add a summary
+ - [MINOR] standard: provide a new 'my_strndup' function
+ - [MINOR] implement per-logger log level limitation
+ - [MINOR] compute the max of sessions/s on fe/be/srv
+ - [MINOR] stats: report max sessions/s and limit in CSV export
+ - [MINOR] stats: report max sessions/s and limit in HTML stats
+ - [MINOR] stats/html: use the arial font before helvetica
+
+2009/03/29 : 1.3.17
+ - Update specfile to build for v2.6 kernel.
+ - [BUG] reset the stream_interface connect timeout upon connect or error
+ - [BUG] reject unix accepts when connection limit is reached
+ - [MINOR] show sess: report number of calls to each task
+ - [BUG] don't call epoll_ctl() on closed sockets
+ - [BUG] stream_sock: disable I/O on fds reporting an error
+ - [MINOR] sepoll: don't count two events on the same FD.
+ - [MINOR] show sess: report a lot more information about sessions
+ - [BUG] stream_sock: check for shut{r,w} before refreshing some timeouts
+ - [BUG] don't set an expiration date directly from now_ms
+ - [MINOR] implement ulltoh() to write HTML-formatted numbers
+ - [MINOR] stats/html: group digits by 3 to clarify numbers
+ - [BUILD] remove haproxy-small.spec
+ - [BUILD] makefile: remove unused references to linux24eold and EPOLL_CTL_WORKAROUND
+
+2009/03/22 : 1.3.16
+ - [BUILD] Fixed Makefile for linking pcre
+ - [CONTRIB] selinux policy for haproxy
+ - [MINOR] show errors: encode backslash as well as non-ascii characters
+ - [MINOR] cfgparse: some cleanups in the consistency checks
+ - [MINOR] cfgparse: set backends to "balance roundrobin" by default
+ - [MINOR] tcp-inspect: permit the use of no-delay inspection
+ - [MEDIUM] reverse internal proxy declaration order to match configuration
+ - [CLEANUP] config: catch and report some possibly wrong rule ordering
+ - [BUG] connect timeout is in the stream interface, not the buffer
+ - [BUG] session: errors were not reported in termination flags in TCP mode
+ - [MINOR] tcp_request: let the caller take care of errors and timeouts
+ - [CLEANUP] http: remove some commented out obsolete code in process_response
+ - [MINOR] update ebtree to version 4.1
+ - [MEDIUM] scheduler: get rid of the 4 trees thanks and use ebtree v4.1
+ - [BUG] sched: don't leave 3 lasts tasks unprocessed when niced tasks are present
+ - [BUG] scheduler: fix improper handling of duplicates __task_queue()
+ - [MINOR] sched: permit a task to stay up between calls
+ - [MINOR] task: keep a task count and clean up task creators
+ - [MINOR] stats: report number of tasks (active and running)
+ - [BUG] server check intervals must not be null
+ - [OPTIM] stream_sock: don't retry to read after a large read
+ - [OPTIM] buffer: new BF_READ_DONTWAIT flag reduces EAGAIN rates
+ - [MEDIUM] session: don't resync FSMs on non-interesting changes
+ - [BUG] check for global.maxconn before doing accept()
+ - [OPTIM] sepoll: do not re-check whole list upon accepts
+
+2009/03/09 : 1.3.16-rc2
+ - [BUG] stream_sock: write timeout must be updated when forwarding !
+
+2009/03/09 : 1.3.16-rc1
+ - appsessions: cleanup DEBUG_HASH and initialize request_counter
+ - [MINOR] acl: add new keyword "connslots"
+ - [MINOR] cfgparse: fix off-by 2 in error message size
+ - [BUILD] fix build with gcc 4.3
+ - [BUILD] fix MANDIR default location to match documentation
+ - [TESTS] add a debug patch to help trigger the stats bug
+ - [BUG] Flush buffers also where there are exactly 0 bytes left
+ - [MINOR] Allow to specify a domain for a cookie
+ - [BUG/CLEANUP] cookiedomain -> cookie_domain rename + free(p->cookie_domain)
+ - [MEDIUM] Fix memory freeing at exit
+ - [MEDIUM] Fix memory freeing at exit, part 2
+ - [BUG] Fix listen & more of 2 couples <ip>:<port>
+ - [DOC] remove buggy comment for use_backend
+ - [CRITICAL] fix server state tracking: it was O(n!) instead of O(n)
+ - [MEDIUM] add support for URI hash depth and length limits
+ - [MINOR] permit renaming of x-forwarded-for header
+ - [BUILD] fix Makefile.bsd and Makefile.osx for stream_interface
+ - [BUILD] Haproxy won't compile if DEBUG_FULL is defined
+ - [MEDIUM] upgrade to ebtree v4.0
+ - [DOC] update the README file with new build options
+ - [MEDIUM] reduce risk of event starvation in ev_sepoll
+ - [MEDIUM] detect streaming buffers and tag them as such
+ - [MEDIUM] add support for conditional HTTP redirection
+ - [BUILD] make install should depend on haproxy not "all"
+ - [DEBUG] add a TRACE macro to facilitate runtime data extraction
+ - [BUG] event pollers must not wait if a task exists in the run queue
+ - [BUG] queue management: wake oldest request in queues
+ - [BUG] log: reported queue position was offed-by-one
+ - [BUG] fix the dequeuing logic to ensure that all requests get served
+ - [DOC] documentation for the "retries" parameter was missing.
+ - [MEDIUM] implement a monotonic internal clock
+ - [MEDIUM] further improve monotonic clock by check forward jumps
+ - [OPTIM] add branch prediction hints in list manipulations
+ - [MAJOR] replace ultree with ebtree in wait-queues
+ - [BUG] we could segfault during exit while freeing uri_auths
+ - [BUG] wqueue: perform proper timeout comparisons with wrapping values
+ - [MINOR] introduce now_ms, the current date in milliseconds
+ - [BUG] disable buffer read timeout when reading stats
+ - [MEDIUM] rework the wait queue mechanism
+ - [BUILD] change declaration of base64tab to fix build with Intel C++
+ - [OPTIM] shrink wake_expired_tasks() by using task_wakeup()
+ - [MAJOR] use an ebtree instead of a list for the run queue
+ - [MEDIUM] introduce task->nice and boot access to statistics
+ - [OPTIM] task_queue: assume most consecutive timers are equal
+ - [BUILD] silent a warning in unlikely() with gcc 4.x
+ - [MAJOR] convert all expiration timers from timeval to ticks
+ - [BUG] use_backend would not correctly consider "unless"
+ - [TESTS] added test-acl.cfg to test some ACL combinations
+ - [MEDIUM] add support for configuration keyword registration
+ - [MEDIUM] modularize the global "stats" keyword configuration parser
+ - [MINOR] cfgparse: add support for warnings in external functions
+ - [MEDIUM] modularize the "timeout" keyword configuration parser
+ - [MAJOR] implement tcp request content inspection
+ - [MINOR] acl: add a new parsing function: parse_dotted_ver
+ - [MINOR] acl: add req_ssl_ver in TCP, to match an SSL version
+ - [CLEANUP] remove unused include/types/client.h
+ - [CLEANUP] remove many #include <types/xxx> from C files
+ - [CLEANUP] remove dependency on obsolete INTBITS macro
+ - [DOC] document the new "tcp-request" keyword and associated ACLs
+ - [MINOR] acl: add REQ_CONTENT to the list of default acls
+ - [MEDIUM] acl: permit fetch() functions to set the result themselves
+ - [MEDIUM] acl: get rid of dummy values in always_true/always_false
+ - [MINOR] acl: add the "wait_end" acl verb
+ - [MEDIUM] acl: enforce ACL type checking
+ - [MEDIUM] acl: set types on all currently known ACL verbs
+ - [MEDIUM] acl: when possible, report the name and requirements of ACLs in warnings
+ - [CLEANUP] remove 65 useless NULL checks before free
+ - [MEDIUM] memory: update pool_free2() to support NULL pointers
+ - [MEDIUM] buffers: ensure buffer_shut* are properly called upon shutdowns
+ - [MEDIUM] process_srv: rely on buffer flags for client shutdown
+ - [MEDIUM] process_srv: don't rely at all on client state
+ - [MEDIUM] process_cli: don't rely at all on server state
+ - [BUG] fix segfault with url_param + check_post
+ - [BUG] server timeout was not considered in some circumstances
+ - [BUG] client timeout incorrectly rearmed while waiting for server
+ - [MAJOR] kill CL_STINSPECT and CL_STHEADERS (step 1)
+ - [MAJOR] get rid of SV_STANALYZE (step 2)
+ - [MEDIUM] simplify and centralize request timeout cancellation and request forwarding
+ - [MAJOR] completely separate HTTP and TCP states on the request path
+ - [BUG] fix recently introduced loop when client closes early
+ - [MAJOR] get rid of the SV_STHEADERS state
+ - [MAJOR] better separation of response processing and server state
+ - [MAJOR] clearly separate HTTP response processing from TCP server state
+ - [MEDIUM] remove unused references to {CL|SV}_STSHUT*
+ - [MINOR] term_trace: add better instrumentations to trace the code
+ - [BUG] ev_sepoll: closed file descriptors could persist in the spec list
+ - [BUG] process_response must not enable the read FD
+ - [BUG] buffers: remove BF_MAY_CONNECT and fix forwarding issue
+ - [BUG] process_response: do not touch srv_state
+ - [BUG] maintain_proxies must not disable backends
+ - [CLEANUP] get rid of BF_SHUT*_PENDING
+ - [MEDIUM] buffers: add BF_EMPTY and BF_FULL to remove dependency on req/rep->l
+ - [MAJOR] process_session: rely only on buffer flags
+ - [MEDIUM] use buffer->wex instead of buffer->cex for connect timeout
+ - [MEDIUM] centralize buffer timeout checks at the top of process_session
+ - [MINOR] ensure the termination flags are set by process_xxx
+ - [MEDIUM] session: move the analysis bit field to the buffer
+ - [OPTIM] process_cli/process_srv: reduce the number of tests
+ - [BUG] regparm is broken on gcc < 3
+ - [BUILD] fix warning in proto_tcp.c with gcc >= 4
+ - [MEDIUM] merge inspect_exp and txn->exp into request buffer
+ - [BUG] process_cli/process_srv: don't call shutdown when already done
+ - [BUG] process_request: HTTP body analysis must return zero if missing data
+ - [TESTS] test-fsm: 22 regression tests for state machines
+ - [BUG] Fix empty X-Forwarded-For header name when set in defaults section
+ - [BUG] fix harmless but wrong fd insertion sequence
+ - [MEDIUM] make it possible for analysers to follow the whole session
+ - [MAJOR] rework of the server FSM
+ - [OPTIM] remove useless fd_set(read) upon shutdown(write)
+ - [MEDIUM] massive cleanup of process_srv()
+ - [MEDIUM] second level of code cleanup for process_srv_data
+ - [MEDIUM] third cleanup and optimization of process_srv_data()
+ - [MEDIUM] process_srv_data: ensure that we always correctly re-arm timeouts
+ - [MEDIUM] stream_sock_process_data moved to stream_sock.c
+ - [MAJOR] make the client side use stream_sock_process_data()
+ - [MEDIUM] split stream_sock_process_data
+ - [OPTIM] stream_sock_read must check for null-reads more often
+ - [MINOR] only call flow analysers when their read side is connected.
+ - [MEDIUM] reintroduce BF_HIJACK with produce_content
+ - [MINOR] re-arrange buffer flags and rename some of them
+ - [MINOR] do not check for BF_SHUTR when computing write timeout
+ - [OPTIM] ev_sepoll: detect newly created FDs and check them once
+ - [OPTIM] reduce the number of calls to task_wakeup()
+ - [OPTIM] force inlining of large functions with gcc >= 3
+ - [MEDIUM] indicate a reason for a task wakeup
+ - [MINOR] change type of fdtab[]->owner to void*
+ - [MAJOR] make stream sockets aware of the stream interface
+ - [MEDIUM] stream interface: add the ->shutw method as well as in and out buffers
+ - [MEDIUM] buffers: add BF_READ_ATTACHED and BF_ANA_TIMEOUT
+ - [MEDIUM] process_session: make use of the new buffer flags
+ - [CLEANUP] process_session: move debug outputs out of the critical loop
+ - [MEDIUM] move QUEUE and TAR timers to stream interfaces
+ - [OPTIM] add compiler hints in tick_is_expired()
+ - [MINOR] add buffer_check_timeouts() to check what timeouts have fired.
+ - [MEDIUM] use buffer_check_timeouts instead of stream_sock_check_timeouts()
+ - [MINOR] add an expiration flag to the stream_sock_interface
+ - [MAJOR] migrate the connection logic to stream interface
+ - [MAJOR] add a connection error state to the stream_interface
+ - [MEDIUM] add the SN_CURR_SESS flag to the session to track open sessions
+ - [MEDIUM] continue layering cleanups.
+ - [MEDIUM] stream_interface: added a DISconnected state between CON/EST and CLO
+ - [MEDIUM] remove stream_sock_update_data()
+ - [MINOR] maintain a global session list in order to ease debugging
+ - [BUG] shutw must imply close during a connect
+ - [MEDIUM] process shutw during connection attempt
+ - [MEDIUM] make the stream interface control the SHUT{R,W} bits
+ - [MAJOR] complete layer4/7 separation
+ - [CLEANUP] move the session-related functions to session.c
+ - [MINOR] call session->do_log() for logging
+ - [MINOR] replace the ambiguous client_return function by stream_int_return
+ - [MINOR] replace client_retnclose() with stream_int_retnclose()
+ - [MINOR] replace srv_close_with_err() with http_server_error()
+ - [MEDIUM] make the http server error function a pointer in the session
+ - [CLEANUP] session.c: removed some migration left-overs in sess_establish()
+ - [MINOR] stream_sock_data_finish() should not expose fd
+ - [MEDIUM] extract TCP request processing from HTTP
+ - [MEDIUM] extract the HTTP tarpit code from process_request().
+ - [MEDIUM] move the HTTP request body analyser out of process_request().
+ - [MEDIUM] rename process_request to http_process_request
+ - [BUG] fix forgotten server session counter
+ - [MINOR] declare process_session in session.h, not proto_http.h
+ - [MEDIUM] first pass of lifting to proto_uxst.c:uxst_event_accept()
+ - [MINOR] add an analyser code for UNIX stats request
+ - [MINOR] pre-set analyser flags on the listener at registration time
+ - [BUG] do not forward close from cons to prod with analysers
+ - [MEDIUM] ensure that sock->shutw() also closes read for init states
+ - [MINOR] add an analyser state in struct session
+ - [MAJOR] make unix sockets work again with stats
+ - [MEDIUM] remove cli_fd, srv_fd, cli_state and srv_state from the session
+ - [MINOR] move the listener reference from fd to session
+ - [MEDIUM] reference the current hijack function in the buffer itself
+ - [MINOR] slightly rebalance stats_dump_{raw,http}
+ - [MINOR] add a new back-reference type : struct bref
+ - [MINOR] add back-references to sessions for later use by a dumper.
+ - [MEDIUM] add support for "show sess" in unix stats socket
+ - [BUG] do not release the connection slot during a retry
+ - [BUG] dynamic connection throttling could return a max of zero conns
+ - [BUG] do not try to pause backends during reload
+ - [BUG] ensure that listeners from disabled proxies are correctly unbound.
+ - [BUG] acl-related keywords are not allowed in defaults sections
+ - [BUG] cookie capture is declared in the frontend but checked on the backend
+ - [BUG] critical errors should be reported even in daemon mode
+ - [MINOR] redirect: add support for the "drop-query" option
+ - [MINOR] redirect: add support for "set-cookie" and "clear-cookie"
+ - [MINOR] redirect: in prefix mode a "/" means not to change the URI
+ - [BUG] do not dequeue requests on a dead server
+ - [BUG] do not dequeue the backend's pending connections on a dead server
+ - [MINOR] stats: indicate if a task is running in "show sess"
+ - [BUG] check timeout must not be changed if timeout.check is not set
+ - [BUG] "option transparent" is for backend, not frontend !
+ - [MINOR] transfer errors were not reported anymore in data phase
+ - [MEDIUM] add a send limit to a buffer
+ - [MEDIUM] don't report buffer timeout when there is I/O activity
+ - [MEDIUM] indicate when we don't care about read timeout
+ - [MINOR] add flags to indicate when a stream interface is waiting for space/data
+ - [MEDIUM] enable inter-stream_interface wakeup calls
+ - [MAJOR] implement autonomous inter-socket forwarding
+ - [MINOR] add the splice_len member to the buffer struct in preparation of splice support
+ - [MEDIUM] stream_sock: factor out the return path in case of no-writes
+ - [MEDIUM] i/o: rework ->to_forward and ->send_max
+ - [OPTIM] stream_sock: do not ask for polling on EAGAIN if we have read
+ - [OPTIM] buffer: replace rlim by max_len
+ - [OPTIM] stream_sock: factor out the buffer full handling out of the loop
+ - [CLEANUP] replace a few occurrences of (flags & X) && !(flags & Y)
+ - [CLEANUP] stream_sock: move the write-nothing condition out of the loop
+ - [MEDIUM] split stream_sock_write() into callback and core functions
+ - [MEDIUM] stream_sock_read: call ->chk_snd whenever there are data pending
+ - [MINOR] stream_sock: fix a few wrong empty calculations
+ - [MEDIUM] stream_sock: try to send pending data on chk_snd()
+ - [MINOR] global.maxpipes: add the ability to reserve file descriptors for pipes
+ - [MEDIUM] splice: add configuration options and set global.maxpipes
+ - [MINOR] introduce structures required to support Linux kernel splicing
+ - [MEDIUM] add definitions for Linux kernel splicing
+ - [MAJOR] complete support for linux 2.6 kernel splicing
+ - [BUG] reserve some pipes for backends with splice enabled
+ - [MEDIUM] splice: add hints to support older buggy kernels
+ - [MEDIUM] introduce pipe pools
+ - [MEDIUM] splice: make use of pipe pools
+ - [STATS] report pipe usage in the statistics
+ - [OPTIM] make global.maxpipes default to global.maxconn/4 when not specified
+ - [BUILD] fix snapshot date extraction with negative timezones
+ - [MEDIUM] move global tuning options to the global structure
+ - [MEDIUM] splice: add the global "nosplice" option
+ - [BUILD] add USE_LINUX_SPLICE to enable LINUX_SPLICE on linux 2.6
+ - [BUG] we must not exit if protocol binding only returns a warning
+ - [MINOR] add support for bind interface name
+ - [BUG] inform the user when root is expected but not set
+ - [MEDIUM] add support for source interface binding
+ - [MEDIUM] add support for source interface binding at the server level
+ - [MEDIUM] implement bind-process to limit service presence by process
+ - [DOC] document maxpipes, nosplice, option splice-{auto,request,response}
+ - [DOC] filled the logging section of the configuration manual
+ - [DOC] document HTTP status codes
+ - [DOC] document a few missing info about errorfile
+ - [BUG] fix random memory corruption using "show sess"
+ - [BUG] fix unix socket processing of interrupted output
+ - [DOC] add diagrams of queuing and future ACL design
+ - [BUILD] proto_http did not build on gcc-2.95
+ - [BUG] the "source" keyword must first clear optional settings
+ - [BUG] global.tune.maxaccept must be limited even in mono-process mode
+ - [MINOR] ensure that http_msg_analyzer updates pointer to invalid char
+ - [MEDIUM] store a complete dump of request and response errors in proxies
+ - [MEDIUM] implement error dump on unix socket with "show errors"
+ - [DOC] document "show errors"
+ - [MINOR] errors dump must use user-visible date, not internal date.
+ - [MINOR] time: add __usec_to_1024th to convert usecs to 1024th of second
+ - [MINOR] add curr_sec_ms and curr_sec_ms_scaled for current second.
+ - [MEDIUM] measure and report session rate on frontend, backends and servers
+ - [BUG] the "connslots" keyword was matched as "connlots"
+ - [MINOR] acl: add 2 new verbs: fe_sess_rate and be_sess_rate
+ - [MEDIUM] implement "rate-limit sessions" for the frontend
+ - [BUG] interface binding: length must include the trailing zero
+ - [BUG] typo in timeout error reporting : report *res and not *err
+ - [OPTIM] maintain_proxies: only wake up when the frontend will be ready
+ - [OPTIM] rate-limit: cleaner behaviour on low rates and reduce consumption
+ - [BUG] switch server-side stream interface to close in case of abort
+ - [CLEANUP] remove last references to term_trace
+ - [OPTIM] freq_ctr: do not rotate the counters when reading
+ - [BUG] disable any analysers for monitoring requests
+ - [BUG] rate-limit in defaults section was ignored
+ - [BUG] task: fix handling of duplicate keys
+ - [OPTIM] task: don't unlink a task from a wait queue when waking it up
+ - [OPTIM] displace tasks in the wait queue only if absolutely needed
+ - [MEDIUM] minor update to the task api: let the scheduler queue itself
+ - [BUG] event_accept() must always wake the task up, even in health mode
+ - [CLEANUP] task: distinguish between clock ticks and timers
+ - [OPTIM] task: reduce the number of calls to task_queue()
+ - [OPTIM] do not re-check req buffer when only response has changed
+ - [CLEANUP] don't enable kernel splicing when socket is closed
+ - [CLEANUP] buffer_flush() was misleading, rename it as buffer_erase
+ - [MINOR] buffers: implement buffer_flush()
+ - [MEDIUM] rearrange forwarding condition to enable splice during analysis
+ - [BUILD] build fixes for Solaris
+ - [BUILD] proto_http did not build on gcc-2.95 (again)
+ - [CONTRIB] halog: fast log parser for haproxy
+ - [CONTRIB] halog: faster fgets() and add support for percentile reporting
+
+2008/04/19 : 1.3.15
+ - [BUILD] Added support for 'make install'
+ - [BUILD] Added 'install-man' make target for installing the man page
+ - [BUILD] Added 'install-bin' make target
+ - [BUILD] Added 'install-doc' make target
+ - [BUILD] Removed "/" after '$(DESTDIR)' in install targets
+ - [BUILD] Changed 'install' target to install the binaries first
+ - [BUILD] Replace hardcoded 'LD = gcc' with 'LD = $(CC)'
+ - [MEDIUM]: Inversion for options
+ - [MEDIUM]: Count retries and redispatches also for servers, fix redistribute_pending, extend logs, %d->%u cleanup
+ - [BUG]: Restore clearing t->logs.bytes
+ - [MEDIUM]: rework checks handling
+ - [DOC] Update a "contrib" file with a hint about a scheme used for formathing subjects
+ - [MEDIUM] Implement "track [<backend>/]<server>"
+ - [MINOR] Implement persistent id for proxies and servers
+ - [BUG] Don't increment server connections too much + fix retries
+ - [MEDIUM]: Prevent redispatcher from selecting the same server, version #3
+ - [MAJOR] proto_uxst rework -> SNMP support
+ - [BUG] appsession lookup in URL does not work
+ - [BUG] transparent proxy address was ignored in backend
+ - [BUG] hot reconfiguration failed because of a wrong error check
+ - [DOC] big update to the configuration manual
+ - [DOC] large update to the configuration manual
+ - [DOC] document more options
+ - [BUILD] major rework of the GNU Makefile
+ - [STATS] add support for "show info" on the unix socket
+ - [DOC] document options forwardfor to logasap
+ - [MINOR] add support for the "backlog" parameter
+ - [OPTIM] introduce global parameter "tune.maxaccept"
+ - [MEDIUM] introduce "timeout http-request" in frontends
+ - [MINOR] tarpit timeout is also allowed in backends
+ - [BUG] increment server connections for each connect()
+ - [MEDIUM] add a turn-around state of one second after a connection failure
+ - [BUG] fix typo in redispatched connection
+ - [DOC] document options nolinger to ssl-hello-chk
+ - [DOC] added documentation for "option tcplog" to "use_backend"
+ - [BUG] connect_server: server might not exist when sending error report
+ - [MEDIUM] support fully transparent proxy on Linux (USE_LINUX_TPROXY)
+ - [MEDIUM] add non-local bind to connect() on Linux
+ - [MINOR] add transparent proxy support for balabit's Tproxy v4
+ - [BUG] use backend's source and not server's source with tproxy
+ - [BUG] fix overlapping server flags
+ - [MEDIUM] fix server health checks source address selection
+ - [BUG] build failed on CONFIG_HAP_LINUX_TPROXY without CONFIG_HAP_CTTPROXY
+ - [DOC] added "server", "source" and "stats" keywords
+ - [DOC] all server parameters have been documented
+ - [DOC] document all req* and rsp* keywords.
+ - [DOC] added documentation about HTTP header manipulations
+ - [BUG] log response byte count, not request
+ - [BUILD] code did not build in full debug mode
+ - [BUG] fix truncated responses with sepoll
+ - [MINOR] use s->frt_addr as the server's address in transparent proxy
+ - [MINOR] fix configuration hint about timeouts
+ - [DOC] minor cleanup of the doc and notice to contributors
+ - [MINOR] report correct section type for unknown keywords.
+ - [BUILD] update MacOS Makefile to build on newer versions
+ - [DOC] fix erroneous "useallbackups" option in the doc
+ - [DOC] applied small fixes from early readers
+ - [MINOR] add configuration support for "redir" server keyword
+ - [MEDIUM] completely implement the server redirection method
+ - [TESTS] add a test case for the server redirection mechanism
+ - [DOC] add a configuration entry for "server ... redir <prefix>"
+ - [BUILD] backend.c and checks.c did not build without tproxy !
+ - Revert "[BUILD] backend.c and checks.c did not build without tproxy !"
+ - [BUILD] backend.c and checks.c did not build without tproxy !
+ - [OPTIM] used unsigned ints for HTTP state and message offsets
+ - [OPTIM] GCC4's builtin_expect() is suboptimal
+ - [BUG] failed conns were sometimes incremented in the frontend!
+ - [BUG] timeout.check was not pre-set to eternity
+ - [TESTS] add test-pollers.cfg to easily report pollers in use
+ - [BUG] do not apply timeout.connect in checks if unset
+ - [BUILD] ensure that makefile understands USE_DLMALLOC=1
+ - [MINOR] silent gcc for a wrong warning
+ - [CLEANUP] update .gitignore to ignore more temporary files
+ - [CLEANUP] report dlmalloc's source path only if explictly specified
+ - [BUG] str2sun could leak a small buffer in case of error during parsing
+ - [BUG] option allbackups was not working anymore in roundrobin mode
+ - [MAJOR] implementation of the "leastconn" load balancing algorithm
+ - [BUILD] ensure that users don't build without setting the target anymore.
+ - [DOC] document the leastconn LB algo
+ - [MEDIUM] fix stats socket limitation to 16 kB
+ - [DOC] fix unescaped space in httpchk example.
+ - [BUG] fix double-decrement of server connections
+ - [TESTS] add a test case for port mapping
+ - [TESTS] add a benchmark for integer hashing
+ - [TESTS] add new methods in ip-hash test file
+ - [MAJOR] implement parameter hashing for POST requests
+
+2007/12/06 : 1.3.14
+ - New option http_proxy (Alexandre Cassen)
+ - add support for "maxqueue" to limit server queue overload (Elijah Epifanov)
+ - Check for duplicated conflicting proxies (Krzysztof Oledzki)
+ - stats: report server and backend cumulated downtime (Krzysztof Oledzki)
+ - use backends only with use_backend directive (Krzysztof Oledzki)
+ - Handle long lines properly (Krzysztof Oledzki)
+ - Implement and use generic findproxy and relax duplicated proxy check (Krzysztof Oledzki)
+ - continous statistics (Krzysztof Oledzki)
+ - add support for logging via a UNIX socket (Robert Tsai)
+ - fix error checking in strl2ic/strl2uic()
+ - fix calls to localtime()
+ - provide easier-to-use ultoa_* functions
+ - provide easy-to-use limit_r and LIM2A* macros
+ - add a simple test for the status page
+ - move error codes to common/errors.h
+ - silent warning about LIST_* being redefined on OpenBSD
+ - add socket address length to the protocols
+ - group PR_O_BALANCE_* bits into a checkable value
+ - externalize the "balance" option parser to backend.c
+ - introduce the "url_param" balance method
+ - make default_backend work in TCP mode too
+ - disable warning about localtime_r on Solaris
+ - adjust error messages about conflicting proxies
+ - avoid calling some layer7 functions if not needed
+ - simplify error path in event_accept()
+ - add an options field to the listeners
+ - added a new state to listeners
+ - unbind_listener() must use fd_delete() and not close()
+ - add a generic unbind_listener() primitive
+ - add a generic delete_listener() primitive
+ - add a generic unbind_all_listeners() primitive
+ - create proto_tcp and move initialization of proxy listeners
+ - stats: report numerical process ID, proxy ID and server ID
+ - relative_pid was not initialized
+ - missing header names in raw stats output
+ - fix missing parenthesis in check_response_for_cacheability
+ - small optimization on session_process_counters()
+ - merge ebtree version 3.0
+ - make ebtree headers multiple-include compatible
+ - ebtree: include config.h for REGPRM*
+ - differentiate between generic LB params and map-specific ones
+ - add a weight divisor to the struct proxy
+ - implement the Fast Weighted Round Robin (FWRR) algo
+ - include filltab25.c to experiment on FWRR for dynamic weights
+ - merge test-fwrr.cfg to validate dynamic weights
+ - move the load balancing algorithm to be->lbprm.algo
+ - change server check result to a bit field
+ - implement "http-check disable-on-404" for graceful shutdown
+ - secure the calling conditions of ->set_server_status_{up,down}
+ - report disabled servers as "NOLB" when they are still UP
+ - document the "http-check disable-on-404" option
+ - http-check disable-on-404 is not limited to HTTP mode
+ - add a test file for disable-on-404
+ - use distinct bits per load-balancing algorithm type
+ - implement the slowstart parameter for servers
+ - document the server's slowstart parameter
+ - stats: report the server warm up status in a "throttle" column
+ - fix 2 minor issues on AIX
+ - add the "nbsrv" ACL verb
+ - add the "fail" condition to monitor requests
+ - remove a warning from gcc due to htons() in standard.c
+ - fwrr: ensure that we never overflow in placements
+ - store the build options to report with -vv
+ - fix the status return of the init script (R.I. Pienaar)
+ - stats: real time monitoring script for unix socket (Prizee)
+ - document "nbsrv" and "monitor fail"
+ - restrict the set of allowed characters for identifiers
+ - implement a time parsing function
+ - add support for time units in the configuration
+ - add a bit of documentation about timers
+ - introduce separation between contimeout, and tarpit + queue
+ - introduce the "timeout" keyword
+ - grouped all timeouts in one structure
+ - slowstart is in ms, not seconds
+ - slowstart: ensure we don't start with a null weight
+ - report the number of times each server was selected
+ - fix build on AIX due to recent log changes
+ - fix build on Solaris due to recent log changes
+
+2007/10/18 : 1.3.13
+ - replace the code under O'Reilly license (Arnaud Cornet)
+ - add a small man page (Arnaud Cornet)
+ - stats: report haproxy's version by default (Krzysztof Oledzki)
+ - stats: count server retries and redispatches (Krzysztof Oledzki)
+ - core: added easy support for Doug Lea's malloc (dlmalloc)
+ - core: fade out memory usage when stopping proxies
+ - core: moved the sockaddr pointer to the fdtab structure
+ - core: add generic protocol support
+ - core: implement client-side support for PF_UNIX sockets
+ - stats: implement the CSV output
+ - stats: add a link to the CSV export HTML page
+ - stats: implement the statistics output on a unix socket
+ - config: introduce the "stats" keyword in global section
+ - build: centralize version and date into one file for each
+ - tests: added a new hash algorithm
+
+2007/10/18 : 1.3.12.3
+ - add the "nolinger" option to disable data lingering (Alexandre Cassen)
+ - fix double-free during clean exit (Krzysztof Oledzki)
+ - prevent the system from sending an RST when closing health-checks
+ (Krzysztof Oledzki)
+ - do not add a cache-control header when on non-cacheable responses
+ (Krzysztof Oledzki)
+ - spread health checks even more (Krzysztof Oledzki)
+ - stats: scope "." must match the backend and not the frontend
+ - fixed call to chroot() during startup
+ - fix wrong timeout computation in event_accept()
+ - remove condition for exit() under fork() failure
+
+2007/09/20 : 1.3.12.2
+ - fix configuration sanity checks for TCP listeners
+ - set the log socket receive window to zero bytes
+ - pre-initialize timeouts to infinity, not zero
+ - fix the SIGHUP message not to alert on server-less proxies
+ - timeouts and retries could be ignored when switching backend
+ - added a file to check that "retries" works.
+ - O'Reilly has clarified its license
+
+2007/09/05 : 1.3.12.1
+ - spec I/O: fix allocations of spec entries for an FD
+ - ensure we never overflow in chunk_printf()
+ - improve behaviour with large number of servers per proxy
+ - add support for "stats refresh <interval>"
+ - stats page: added links for 'refresh' and 'hide down'
+ - fix backend's weight in the stats page.
+ - the "stats" keyword is not allowed in a pure frontend.
+ - provide a test configuration file for stats and checks
+
+2007/06/17 : 1.3.12
+ - fix segfault at exit when using captures
+ - bug: negation in ACL conds was not cleared between terms
+ - errorfile: use a local file to feed error messages
+ - acl: support '-i' to ignore case when matching
+ - acl: smarter integer comparison with operators eq,lt,gt,le,ge
+ - acl: support maching on 'path' component
+ - acl: implement matching on header values
+ - acl: distinguish between request and response headers
+ - acl: permit to return any header when no name specified
+ - acl: provide default ACLs
+ - added the 'use_backend' keyword for full content-switching
+ - acl: specify the direction during fetches
+ - acl: provide the argument length for fetch functions
+ - acl: provide a reference to the expr to fetch()
+ - improve memory freeing upon exit
+ - str2net() must not change the const char *
+ - shut warnings 'is*' macros from ctype.h on solaris
+
+2007/06/03 : 1.3.11.4
+ - do not re-arm read timeout in SHUTR state !
+ - optimize I/O by detecting system starvation
+ - the epoll FD must not be shared between processes
+ - limit the number of events returned by *poll*
+
+2007/05/14 : 1.3.11.3
+ - pre-initialize timeouts with tv_eternity during parsing
+
+2007/05/14 : 1.3.11.2
+ - fixed broken health-checks since switch to timeval
+
+2007/05/14 : 1.3.11.1
+ - fixed ev_kqueue which was forgotten during the switch to timeval
+ - allowed null timeouts for past events in select
+
+2007/05/14 : 1.3.11
+ - fixed ev_sepoll again by rewriting the state machine
+ - switched all timeouts to timevals instead of milliseconds
+ - improved memory management using mempools v2.
+ - several minor optimizations
+
+2007/05/09 : 1.3.10.2
+ - fixed build on OpenBSD (missing types.h)
+
+2007/05/09 : 1.3.10.1
+ - fixed sepoll transition matrix (two states were missing)
+
+2007/05/08 : 1.3.10
+ - several fixes in ev_sepoll
+ - fixed some expiration dates on some tasks
+ - fixed a bug in connection establishment detection due to speculative I/O
+ - fixed rare bug occuring on TCP with early close (reported by Andy Smith)
+ - implemented URI hashing algorithm (Guillaume Dallaire)
+ - implemented SMTP health checks (Peter van Dijk)
+ - replaced the rbtree with ul2tree from old scheduler project
+ - new framework for generic ACL support
+ - added the 'acl' and 'block' keywords to the config language
+ - added several ACL criteria and matches (IP, port, URI, ...)
+ - cleaned up and better modularization for some time functions
+ - fixed list macros
+ - fixed useless memory allocation in str2net()
+ - store the original destination address in the session
+
+2007/04/15 : 1.3.9
+ - modularized the polling mechanisms and use function pointers instead
+ of macros at many places
+ - implemented support for FreeBSD's kqueue() polling mechanism
+ - fixed a warning on OpenBSD : MIN/MAX redefined
+ - change socket registration order at startup to accomodate kqueue.
+ - several makefile cleanups to support old shells
+ - fix build with limits.h once for all
+ - ev_epoll: do not rely on fd_sets anymore, use changes stacks instead.
+ - fdtab now holds the results of polling
+ - implemented support for speculative I/O processing with epoll()
+ - remove useless calls to shutdown(SHUT_RD), resulting in small speed boost
+ - auto-registering of pollers at load time
+
+2007/04/03 : 1.3.8.2
+ - rewriting either the status line or request line could crash the
+ process due to a pointer which ought to be reset before parsing.
+ - rewriting the status line in the response did not work, it caused
+ a 502 Bad Gateway due to an erroneous state during parsing
+
+2007/04/01 : 1.3.8.1
+ - fix reqadd when no option httpclose is used.
+ - removed now unused fiprm and beprm from proxies
+ - split logs into two versions : TCP and HTTP
+ - added some docs about http headers storage and acls
+ - added a VIM script for syntax color highlighting (Bruno Michel)
+
+2007/03/25 : 1.3.8
+ - fixed several bugs which might have caused a crash with bad configs
+ - several optimizations in header processing
+ - many progresses towards transaction-based processing
+ - option forwardfor may be used in frontends
+ - completed HTTP response processing
+ - some code refactoring between request and response processing
+ - new HTTP header manipulation functions
+ - optimizations on the recv() patch to reduce CPU usage under very
+ high data rates.
+ - more user-friendly help about the 'usesrc' keyword (CTTPROXY)
+ - username/groupname support from Marcus Rueckert
+ - added the "except" keyword to the "forwardfor" option (Bryan German)
+ - support for health-checks on other addresses (Fabrice Dulaunoy)
+ - makefile for MacOS 10.4 / Darwin (Dan Zinngrabe)
+ - do not insert "Connection: close" in HTTP/1.0 messages
+
+2007/01/26 : 1.3.7
+ - fix critical bug introduced with 1.3.6 : an empty request header
+ may lead to a crash due to missing pointer assignment
+ - hdr_idx might be left uninitialized in debug mode
+ - fixed build on FreeBSD due to missing fd_set declaration
+
+2007/01/22 : 1.3.6.1
+ - change in the header chaining broke cookies and authentication
+
+2007/01/22 : 1.3.6
+ - stats now support the HEAD method too
+ - extracted http request from the session
+ - huge rework of the HTTP parser which is now a 28-state FSM.
+ - linux-style likely/unlikely macros for optimization hints
+ - do not create a server socket when there's no server
+ - imported lots of docs
+
+2007/01/07 : 1.3.5
+ - stats: swap color sets for active and backup servers
+ - try to guess server check port when unset
+ - added complete support and doc for TCP Splicing
+ - replace the wait-queue linked list with an rbtree.
+ - a few bugfixes and cleanups
+
+2007/01/02 : 1.3.4
+ - support for cttproxy on the server side to present the client
+ address to the server.
+ - added support for SO_REUSEPORT on Linux (needs kernel patch)
+ - new RFC2616-compliant HTTP request parser with header indexing
+ - split proxies in frontends, rulesets and backends
+ - implemented the 'req[i]setbe' to select a backend depending
+ on the contents
+ - added the 'default_backend' keyword to select a default BE.
+ - new stats page featuring FEs and BEs + bytes in both dirs
+ - improved log format to indicate the backend and the time in ms.
+ - lots of cleanups
+
+2006/10/15 : 1.3.3
+ - fix broken redispatch option in case the connection has already
+ been marked "in progress" (ie: nearly always).
+ - support regparm on x86 to speed up some often called functions
+ - removed a few useless calls to gettimeofday() in log functions.
+ - lots of 'const char*' cleanups
+ - turn every FD_* into functions which are faster on recent CPUs
+
+2006/09/03 : 1.3.2
+ - started the changes towards I/O completion callbacks. stream_sock* have
+ replaced event_*.
+ - added the new "reqtarpit" and "reqitarpit" protection features
+
+2006/07/09 : 1.3.1 (1.2.15)
+ - now, haproxy warns about missing timeout during startup to try to
+ eliminate all those buggy configurations.
+ - added "Content-Type: text/html" in responses wherever appropriate, as
+ suggested by Cameron Simpson.
+ - implemented "option ssl-hello-chk" to use SSLv3 CLIENT HELLO messages to
+ test server's health
+ - implemented "monitor-uri" so that haproxy can reply to a specific URI with
+ an "HTTP/1.0 200 OK" response. This is useful to validate multiple proxies
+ at once.
+
+2006/06/29 : 1.3.0
+ - exploded the whole file into multiple .c and .h. No functionnal
+ difference is expected at all.
+ - fixed a bug by which neither stats nor error messages could be returned if
+ 'clitimeout' was missing.
+
+2006/05/21 : 1.2.14
+ - new HTML status report with the 'stats' keyword.
+ - added the 'abortonclose' option to better resist traffic surges
+ - implemented dynamic traffic regulation with the 'minconn' option
+ - show request time on denied requests
+ - definitely fixed hot reconf on OpenBSD by the use of SO_REUSEPORT
+ - now a proxy instance is allowed to run without servers, which is
+ useful to dedicate one instance to stats
+ - added lots of error counters
+ - a missing parenthesis preventd matching of cacheable cookies
+ - a missing parenthesis in poll_loop() might have caused missed events.
+
+2006/05/14 : 1.2.13.1
+ - an uninitialized field in the struct session could cause a crash when
+ the session was freed. This has been encountered on Solaris only.
+ - Solaris and OpenBSD no not support shutdown() on listening socket. Let's
+ be nice to them by performing a soft stop if pause fails.
+
+2006/05/13 : 1.2.13
+ - 'maxconn' server parameter to do per-server session limitation
+ - queueing to support non-blocking session limitation
+ - fixed removal of cookies for cookie-less servers such as backup servers
+ - two separate wait queues for expirable and non-expirable tasks provide
+ better performance with lots of sessions.
+ - some code cleanups and performance improvements
+ - made state dumps a bit more verbose
+ - fixed missing checks for NULL srv in dispatch mode
+ - load balancing on backup servers was not possible in source hash mode.
+ - two session flags shared the same bit, but fortunately they were not
+ compatible.
+
+2006/04/15 : 1.2.12
+ Very few changes preparing for more important changes to support per-server
+ session limitations and queueing :
+ - ignore leading empty lines in HTTP requests as suggested by RFC2616.
+ - added the 'weight' parameter to the servers, limited to 1..256. It applies
+ to roundrobin and source hash.
+ - the optional '-s' option could clobber '-st' and '-sf' if compiled in.
+
+2006/03/30 : 1.2.11.1
+ - under some conditions, it might have been possible that when the
+ last dead server became available, it would not have been used
+ till another one would have changed state. Could not be reproduced
+ at all, however seems possible from the code.
+
+2006/03/25 : 1.2.11
+ - added the '-db' command-line option to disable backgrounding.
+ - added the -sf/-st command-line arguments which are used to specify
+ a list of pids to send a FINISH or TERMINATE signal upon startup.
+ They will also be asked to release their port if a bind fails.
+ - reworked the startup mechanism to allow the sending of a signal to a list
+ of old pids if a socket cannot be bound, with a retry for a limited amount
+ of time (1 second by default).
+ - added the ability to enforce limits on memory usage.
+ - added the 'source' load-balancing algorithm which uses the source IP(v4|v6)
+ - re-architectured the server round-robin mechanism to ease integration of
+ other algorithms. It now relies on the number of active and backup servers.
+ - added a counter for the number of active and backup servers, and report
+ these numbers upon SIGHUP or state change.
+
+2006/03/23 : 1.2.10.1
+ - while fixing the backup server round-robin "feature", a new bug was
+ introduced which could miss some backup servers.
+ - the displayed proxy name was wrong when dumping upon SIGHUP.
+
+2006/03/19 : 1.2.10
+ - assert.h is needed when DEBUG is defined.
+ - ENORMOUS long standing bug affecting the epoll polling system :
+ event_data is a union, not a structure !
+ - Make fd management more robust and easier to debug. Also some
+ micro-optimisations.
+ - Limit the number of consecutive accept() in multi-process mode.
+ This produces a more evenly distributed load across the processes and
+ slightly improves performance by reducing bottlenecks.
+ - Make health-checks be more regular, and faster to retry after a timeout.
+ - Fixed some messages to ease parsing of alerts.
+ - provided a patch to enable epoll on RHEL3 kernels.
+ - Separated OpenBSD build from the main Makefile into a new one.
+
+2006/03/15 : 1.2.9
+ - haproxy could not be stopped after being paused, it had to be woken up
+ first. This has been fixed.
+ - the 'ulimit-n' parameter is now optional and by default computed from
+ maxconn + the number of listeners + the number of health-checks.
+ - it is now possible to specify a maximum number of connections at build
+ time with the SYSTEM_MAXCONN define. The value set in the configuration
+ file will then be limited to this value, and only the command-line '-n'
+ option will be able to bypass it. It will prevent against accidental
+ high memory usage on small systems.
+ - RFC2616 expects that any HTTP agent accepts multi-line headers. Earlier
+ versions did not detect a line beginning with a space as the continuation
+ of previous header. It is now correct.
+ - health checks sent to servers configured with identical intervals were
+ sent in perfect synchronisation because the initial time was the same
+ for all. This could induce high load peaks when fragile servers were
+ hosting tens of instances for the same application. Now the load is
+ spread evenly across the smallest interval amongst a listener.
+ - a new 'forceclose' option was added to make the proxy close the outgoing
+ channel to the server once it has sent all its headers and the server
+ starts responding. This helps some servers which don't close upon the
+ 'Connection: close' header. It implies 'option httpclose'.
+ - there was a bug in the way the backup servers were handled. They were
+ erroneously load-balanced while the doc said the opposite. Since
+ load-balanced backup servers is one of the features some people have
+ been asking for, the problem was fixed to reflect the documented
+ behaviour and a new option 'allbackups' was introduced to provide the
+ feature to those who need it.
+ - a never ending connect() could lead to a fast select() loop if its
+ timeout times the number of retransmits exceeded the server read or write
+ timeout, because the later was used to compute select()'s timeout while
+ the connection timeout was not reached.
+ - now we initialize the libc's localtime structures very early so that even
+ under OOM conditions, we can still send dated error messages without
+ segfaulting.
+ - the 'daemon' mode implies 'quiet' and disables 'verbose' because file
+ descriptors are closed.
+
+2006/01/29 : 1.2.8
+ - fixed a nasty bug affecting poll/epoll which could return unmodified data
+ from the server to the client, and sometimes lead to memory corruption
+ crashing the process.
+ - added the new pause/play mechanism with SIGTTOU/SIGTTIN for hot-reconf.
+
+2005/12/18 : 1.2.7.1
+ - the "retries" option was ignored because connect() could not return an
+ error if the connection failed before the timeout.
+ - TCP health-checks could not detect a connection refused in poll/epoll
+ mode.
+
+2005/11/13 : 1.2.7
+ - building with -DUSE_PCRE should include PCRE headers and not regex.h. At
+ least on Solaris, this caused the libc's regex primitives to be used instead
+ of PCRE, which caused trouble on group references. This is now fixed.
+ - delayed the quiet mode during startup so that most of the startup alerts can
+ be displayed even in quiet mode.
+ - display an alert when a listener has no address, invalid or no port, or when
+ there are no enabled listeners upon startup.
+ - added "static-pcre" to the list of supported regex options in the Makefile.
+
+2005/10/09 : 1.2.7rc (1.1.33rc)
+ - second batch of socklen_t changes.
+ - clean-ups from Cameron Simpson.
+ - because tv_remain() does not know about eternity, using no timeout can
+ make select() spin around a null time-out. Bug reported by Cameron Simpson.
+ - client read timeout was not properly set to eternity initialized after an
+ accept() if it was not set in the config. It remained undetected so long
+ because eternity is 0 and newly allocated pages are zeroed by the system.
+ - do not call get_original_dst() when not in transparent mode.
+ - implemented a workaround for a bug in certain epoll() implementations on
+ linux-2.4 kernels (epoll-lt <= 0.21).
+ - implemented TCP keepalive with new options : tcpka, clitcpka, srvtcpka.
+
+2005/08/07 : 1.2.6
+ - clean-up patch from Alexander Lazic fixes build on Debian 3.1 (socklen_t).
+
+2005/07/06 : 1.2.6-pre5 (1.1.32)
+ - added the number of active sessions (proxy/process) in the logs
+
+2005/07/06 : 1.2.6-pre4 (1.1.32-pre4)
+ - the time-out fix introduced in 1.1.25 caused a corner case where it was
+ possible for a client to keep a connection maintained regardless of the
+ timeout if the server closed the connection during the HEADER phase,
+ while the client ignored the close request while doing nothing in the
+ other direction. This has been fixed now by ensuring that read timeouts
+ are re-armed when switching to any SHUTW state.
+
+2005/07/05 : 1.2.6-pre3 (1.1.32-pre3)
+ - enhanced error reporting in the logs. Now the proxy will precisely detect
+ various error conditions related to the system and/or process limits, and
+ generate LOG_EMERG logs indicating that a resource has been exhausted.
+ - logs will contain two new characters for the error cause : 'R' indicates
+ a resource exhausted, and 'I' indicates an internal error, though this
+ one should never happen.
+ - server connection timeouts can now be reported in the logs (sC), as well
+ as connections refused because of maxconn limitations (PC).
+
+2005/07/05 : 1.2.6-pre2 (1.1.32-pre2)
+ - new global configuration keyword "ulimit-n" may be used to raise the FD
+ limit to usable values.
+ - a warning is now displayed on startup if the FD limit is lower than the
+ configured maximum number of sockets.
+
+2005/07/05 : 1.2.6-pre1 (1.1.32-pre1)
+ - new configuration keyword "monitor-net" makes it possible to be monitored
+ by external devices which connect to the proxy without being logged nor
+ forwarded to any server. Particularly useful on generic TCPv4 relays.
+
+2005/06/21 : 1.2.5.2
+ - fixed build on PPC where chars are unsigned by default
+
+2005/05/02 : 1.2.5.1
+ - dirty hack to fix a bug introduced with epoll : if we close an FD and
+ immediately reassign it to another session through a connect(), the
+ Prev{Read,Write}Events are not updated, which causes trouble detecting
+ changes, thus leading to many timeouts at high loads.
+
+2005/04/30 : 1.2.5 (1.1.31)
+ - changed the runtime argument to disable epoll() to '-de'
+ - changed the runtime argument to disable poll() to '-dp'
+ - added global options 'nopoll' and 'noepoll' to do the same at the
+ configuration level.
+ - added a 'linux24e' target to the Makefile for Linux 2.4 systems patched to
+ support epoll().
+ - changed default FD_SETSIZE to 65536 on Solaris (default=1024)
+ - conditionned signals redirection to #ifdef DEBUG_MEMORY
+
+2005/04/26 : 1.2.5-pre4
+ - made epoll() support a compile-time option : ENABLE_EPOLL
+ - provided a very little libc replacement for a possibly missing epoll()
+ implementation which can be enabled by -DUSE_MY_EPOLL
+ - implemented the poll() poller, which can be enabled with -DENABLE_POLL.
+ The equivalent runtime argument becomes '-P'. A few tests show that it
+ performs like select() with many fds, but slightly slower (certainly
+ because of the higher amount of memory involved).
+ - separated the 3 polling methods and the tasks scheduler into 4 distinct
+ functions which makes the code a lot more modular.
+ - moved some event tables to private static declarations inside the poller
+ functions.
+ - the poller functions can now initialize themselves, run, and cleanup.
+ - changed the runtime argument to enable epoll() to '-E'.
+ - removed buggy epoll_ctl() code in the client_retnclose() function. This
+ function was never meant to remove anything.
+ - fixed a typo which caused glibc to yell about a double free on exit.
+ - removed error checking after epoll_ctl(DEL) because we can never know if
+ the fd is still active or already closed.
+ - added a few entries in the makefile
+
+2005/04/25 : 1.2.5-pre3
+ - experimental epoll() support (use temporary '-e' argument)
+
+2005/04/24 : 1.2.5-pre2
+ - implemented the HTTP 303 code for error redirection. This forces the
+ browser to fetch the given URI with a GET request. The new keyword for
+ this is 'errorloc303', and a new 'errorloc302' keyword has been created
+ to make them easily distinguishable.
+ - added more controls in the parser for valid use of '\x' sequence.
+ - few fixes from Alex & Klaus
+
+2005/02/17 : 1.2.5-pre1
+ - fixed a few errors in the documentation
+
+2005/02/13
+ - do not pre-initialize unused file-descriptors before select() anymore.
+
+2005/01/22 : 1.2.4
+ - merged Alexander Lazic's and Klaus Wagner's work on application
+ cookie-based persistence. Since this is the first merge, this version is
+ not intended for general use and reports are more than welcome. Some
+ documentation is really needed though.
+
+2005/01/22 : 1.2.3 (1.1.30)
+ - add an architecture guide to the documentation
+ - released without any changes
+
+2004/12/26 : 1.2.3-pre1 (1.1.30-pre1)
+ - increased default BUFSIZE to 16 kB to accept max headers of 8 kB which is
+ compatible with Apache. This limit can be configured in the makefile now.
+ Thanks to Eric Fehr for the checks.
+ - added a per-server "source" option which now makes it possible to bind to
+ a different source for each (potentially identical) server.
+ - changed cookie-based server selection slightly to allow several servers to
+ share a same cookie, thus making it possible to associate backup servers to
+ live servers and ease soft-stop for maintenance periods. (Alexander Lazic)
+ - added the cookie 'prefix' mode which makes it possible to use persistence
+ with thin clients which support only one cookie. The server name is prefixed
+ before the application cookie, and restore back.
+ - fixed the order of servers within an instance to match documentation. Now
+ the servers are *really* used in the order of their declaration. This is
+ particularly important when multiple backup servers are in use.
+
+2004/10/18 : 1.2.2 (1.1.29)
+ - fixed a bug where a TCP connection would be logged twice if the 'logasap'
+ option was enabled without the 'tcplog' option.
+ - encode_string() would use hdr_encode_map instead of the map argument.
+
+2004/08/10 : (1.1.29-pre2)
+ - the logged request is now encoded with '#XX' for unprintable characters
+ - new keywords 'capture request header' and 'capture response header' enable
+ logging of arbitrary HTTP headers in requests and responses
+ - removed "-DSOLARIS" after replacing the last inet_aton() with inet_pton()
+
+2004/06/06 : 1.2.1 (1.1.28)
+ - added the '-V' command line option to verbosely report errors even though
+ the -q or 'quiet' options are specified. This is useful with '-c'.
+ - added a Red Hat init script and a .spec from Simon Matter <simon.matter@invoca.ch>
+
+2004/06/05 :
+ - added the "logasap" option which produces a log without waiting for the data
+ to be transferred from the server to the client.
+ - added the "httpclose" option which removes any "connection:" header and adds
+ "Connection: close" in both direction.
+ - added the 'checkcache' option which blocks cacheable responses containing
+ dangerous headers, such as 'set-cookie'.
+ - added 'rspdeny' and 'rspideny' to block certain responses to avoid sensible
+ information leak from servers.
+
+2004/04/18 :
+ - send an EMERG log when no server is available for a given proxy
+ - added the '-c' command line option to syntactically check the
+ configuration file without starting the service.
+
+2003/11/09 : 1.2.0
+ - the same as 1.1.27 + IPv6 support on the client side
+
+2003/10/27 : 1.1.27
+ - the configurable HTTP health check introduced in 1.1.23 revealed a shameful
+ bug : the code still assumed that HTTP requests were the same size as the
+ original ones (22 bytes), and failed if they were not.
+ - added support for pidfiles.
+
+2003/10/22 : 1.1.26
+ - the fix introduced in 1.1.25 for client timeouts while waiting for servers
+ broke almost all compatibility with POST requests, because the proxy
+ stopped to read anything from the client as soon as it got all of its
+ headers.
+
+2003/10/15 : 1.1.25
+ - added the 'tcplog' option, which provides enhanced, HTTP-like logs for
+ generic TCP proxies, or lighter logs for HTTP proxies.
+ - fixed a time-out condition wrongly reported as client time-out in data
+ phase if the client timeout was lower than the connect timeout times the
+ number of retries.
+
+2003/09/21 : 1.1.24
+ - if a client sent a full request then shut its write connection down, then
+ the request was aborted. This case was detected only when using haproxy
+ both as health-check client and as a server.
+ - if 'option httpchk' is used in a 'health' mode server, then responses will
+ change from 'OK' to 'HTTP/1.0 200 OK'.
+ - fixed a Linux-only bug in case of HTTP server health-checks, where a single
+ server response followed by a close could be ignored, and the server seen
+ as failed.
+
+2003/09/19 : 1.1.23
+ - fixed a stupid bug introduced in 1.1.22 which caused second and subsequent
+ 'default' sections to keep previous parameters, and not initialize logs
+ correctly.
+ - fixed a second stupid bug introduced in 1.1.22 which caused configurations
+ relying on 'dispatch' mode to segfault at the first connection.
+ - 'option httpchk' now supports method, HTTP version and a few headers.
+ - now, 'option httpchk', 'cookie' and 'capture' can be specified in
+ 'defaults' section
+
+2003/09/10 : 1.1.22
+ - 'listen' now supports optionnal address:port-range lists
+ - 'bind' introduced to add new listen addresses
+ - fixed a bug which caused a session to be kept established on a server till
+ it timed out if the client closed during the DATA phase.
+ - the port part of each server address can now be empty to make the proxy
+ connect to the server on the same port it was connected to, be an absolute
+ unsigned number to reflect a single port (as in older versions), or an
+ explicitly signed number (+N/-N) to indicate that this offset must be
+ applied to the port the proxy was connected to, when connecting to the
+ server.
+ - the 'port' server option allows the user to specify a different
+ health-check port than the service one. It is mandatory when only relative
+ ports have been specified and check is required. By default, the checks are
+ sent to the service port.
+ - new 'defaults' section which is rather similar to 'listen' except that all
+ values are only used as default values for future 'listen' sections, until
+ a new 'defaults' resets them. At the moment, server options, regexes,
+ cookie names and captures cannot be set in the 'defaults' section.
+
+2003/05/06 : 1.1.21
+ - changed the debug output format so that it now includes the session unique
+ ID followed by the instance name at the beginning of each line.
+ - in debug mode, accept now shows the client's IP and port.
+ - added one 3 small debugging scripts to search and pretty print debug output
+ - changed the default health check request to "OPTIONS /" instead of
+ "OPTIONS *" since not all servers implement the later one.
+ - "option httpchk" now accepts an optional parameter allowing the user to
+ specify and URI other than '/' during health-checks.
+
+2003/04/21 : 1.1.20
+ - fixed two problems with time-outs, one where a server would be logged as
+ timed out during transfer that take longer to complete than the fixed
+ time-out, and one where clients were logged as timed-out during the data
+ phase because they didn't have anything to send. This sometimes caused
+ slow client connections to close too early while in fact there was no
+ problem. The proper fix would be to have a per-fd time-out with
+ conditions depending on the state of the HTTP FSM.
+
+2003/04/16 : 1.1.19
+ - haproxy was NOT RFC compliant because it was case-sensitive on HTTP
+ "Cookie:" and "Set-Cookie:" headers. This caused JVM 1.4 to fail on
+ cookie persistence because it uses "cookie:". Two memcmp() have been
+ replaced with strncasecmp().
+
+2003/04/02 : 1.1.18
+ - Haproxy can be compiled with PCRE regex instead of libc regex, by setting
+ REGEX=pcre on the make command line.
+ - HTTP health-checks now use "OPTIONS *" instead of "OPTIONS /".
+ - when explicit source address binding is required, it is now also used for
+ health-checks.
+ - added 'reqpass' and 'reqipass' to allow certain headers but not the request
+ itself.
+ - factored several strings to reduce binary size by about 2 kB.
+ - replaced setreuid() and setregid() with more standard setuid() and setgid().
+ - added 4 status flags to the log line indicating who ended the connection
+ first, the sessions state, the validity of the cookie, and action taken on
+ the set-cookie header.
+
+2002/10/18 : 1.1.17
+ - add the notion of "backup" servers, which are used only when all other
+ servers are down.
+ - make Set-Cookie return "" instead of "(null)" when the server has no
+ cookie assigned (useful for backup servers).
+ - "log" now supports an optionnal level name (info, notice, err ...) above
+ which nothing is sent.
+ - replaced some strncmp() with memcmp() for better efficiency.
+ - added "capture cookie" option which logs client and/or server cookies
+ - cleaned up/down messages and dump servers states upon SIGHUP
+ - added a redirection feature for errors : "errorloc <errnum> <url>"
+ - now we won't insist on connecting to a dead server, even with a cookie,
+ unless option "persist" is specified.
+ - added HTTP/408 response for client request time-out and HTTP/50[234] for
+ server reply time-out or errors.
+
+2002/09/01 : 1.1.16
+ - implement HTTP health checks when option "httpchk" is specified.
+
+2002/08/07 : 1.1.15
+ - replaced setpgid()/setpgrp() with setsid() for better portability, because
+ setpgrp() doesn't have the same meaning under Solaris, Linux, and OpenBSD.
+
+2002/07/20 : 1.1.14
+ - added "postonly" cookie mode
+
+2002/07/15 : 1.1.13
+ - tv_diff used inverted parameters which led to negative times !
+
+2002/07/13 : 1.1.12
+ - fixed stats monitoring, and optimized some tv_* for most common cases.
+ - replaced temporary 'newhdr' with 'trash' to reduce stack size
+ - made HTTP errors more HTML-fiendly.
+ - renamed strlcpy() to strlcpy2() because of a slightly difference between
+ their behaviour (return value), to avoid confusion.
+ - restricted HTTP messages to HTTP proxies only
+ - added a 502 message when the connection has been refused by the server,
+ to prevent clients from believing this is a zero-byte HTTP 0.9 reply.
+ - changed 'Cache-control:' from 'no-cache="set-cookie"' to 'private' when
+ inserting a cookie, because some caches (apache) don't understand it.
+ - fixed processing of server headers when client is in SHUTR state
+
+2002/07/04 :
+ - automatically close fd's 0,1 and 2 when going daemon ; setpgrp() after
+ setpgid()
+
+2002/06/04 : 1.1.11
+ - fixed multi-cookie handling in client request to allow clean deletion
+ in insert+indirect mode. Now, only the server cookie is deleted and not
+ all the header. Should now be compliant to RFC2965.
+ - added a "nocache" option to "cookie" to specify that we explicitly want
+ to add a "cache-control" header when we add a cookie.
+ It is also possible to add an "Expires: <old-date>" to keep compatibility
+ with old/broken caches.
+
+2002/05/10 : 1.1.10
+ - if a cookie is used in insert+indirect mode, it's desirable that the
+ the servers don't see it. It was not possible to remove it correctly
+ with regexps, so now it's removed automatically.
+
+2002/04/19 : 1.1.9
+ - don't use snprintf()'s return value as an end of message since it may
+ be larger. This caused bus errors and segfaults in internal libc's
+ getenv() during localtime() in send_log().
+ - removed dead insecure send_syslog() function and all references to it.
+ - fixed warnings on Solaris due to buggy implementation of isXXXX().
+
+2002/04/18 : 1.1.8
+ - option "dontlognull"
+ - fixed "double space" bug in config parser
+ - fixed an uninitialized server field in case of dispatch
+ with no existing server which could cause a segfault during
+ logging.
+ - the pid logged was always the father's, which was wrong for daemons.
+ - fixed wrong level "LOG_INFO" for message "proxy started".
+
+2002/04/13 :
+ - http logging is now complete :
+ - ip:port, date, proxy, server
+ - req_time, conn_time, hdr_time, tot_time
+ - status, size, request
+ - source address
+
+2002/04/12 : 1.1.7
+ - added option forwardfor
+ - added reqirep, reqidel, reqiallow, reqideny, rspirep, rspidel
+ - added "log global" in "listen" section.
+
+2002/04/09 :
+ - added a new "global" section :
+ - logs
+ - debug, quiet, daemon modes
+ - uid, gid, chroot, nbproc, maxconn
+
+2002/04/08 : 1.1.6
+ - regex are now chained and not limited anymore.
+ - unavailable server now returns HTTP/502.
+ - increased per-line args limit to 40
+ - added reqallow/reqdeny to block some request on matches
+ - added HTTP 400/403 responses
+
+2002/04/03 : 1.1.5
+ - connection logging displayed incorrect source address.
+ - added proxy start/stop and server up/down log events.
+ - replaced log message short buffers with larger trash.
+ - enlarged buffer to 8 kB and replace buffer to 4 kB.
+
+2002/03/25 : 1.1.4
+ - made rise/fall/interval time configurable
+
+2002/03/22 : 1.1.3
+ - fixed a bug : cr_expire and cw_expire were inverted in CL_STSHUT[WR]
+ which could lead to loops.
+
+2002/03/21 : 1.1.2
+ - fixed a bug in buffer management where we could have a loop
+ between event_read() and process_{cli|srv} if R==BUFSIZE-MAXREWRITE.
+ => implemented an adjustable buffer limit.
+ - fixed a bug : expiration of tasks in wait queue timeout is used again,
+ and running tasks are skipped.
+ - added some debug lines for accept events.
+ - send warnings for servers up/down.
+
+2002/03/12 : 1.1.1
+ - fixed a bug in total failure handling
+ - fixed a bug in timestamp comparison within same second (tv_cmp_ms)
+
+2002/03/10 : 1.1.0
+ - fixed a few timeout bugs
+ - rearranged the task scheduler subsystem to improve performance,
+ add new tasks, and make it easier to later port to librt ;
+ - allow multiple accept() for one select() wake up ;
+ - implemented internal load balancing with basic health-check ;
+ - cookie insertion and header add/replace/delete, with better strings
+ support.
+
+2002/03/08
+ - reworked buffer handling to fix a few rewrite bugs, and
+ improve overall performance.
+ - implement the "purge" option to delete server cookies in direct mode.
+
+2002/03/07
+ - fixed some error cases where the maxfd was not decreased.
+
+2002/02/26
+ - now supports transparent proxying, at least on linux 2.4.
+
+2002/02/12
+ - soft stop works again (fixed select timeout computation).
+ - it seems that TCP proxies sometimes cannot timeout.
+ - added a "quiet" mode.
+ - enforce file descriptor limitation on socket() and accept().
+
+2001/12/30 : release of version 1.0.2 : fixed a bug in header processing
+2001/12/19 : release of version 1.0.1 : no MSG_NOSIGNAL on solaris
+2001/12/16 : release of version 1.0.0.
+2001/12/16 : added syslog capability for each accepted connection.
+2001/11/19 : corrected premature end of files and occasional SIGPIPE.
+2001/10/31 : added health-check type servers (mode health) which replies OK then closes.
+2001/10/30 : added the ability to support standard TCP proxies and HTTP proxies
+ with or without cookies (use keyword http for this).
+2001/09/01 : added client/server header replacing with regexps.
+ eg:
+ cliexp ^(Host:\ [^:]*).* Host:\ \1:80
+ srvexp ^Server:\ .* Server:\ Apache
+2000/11/29 : first fully working release with complete FSMs and timeouts.
+2000/11/28 : major rewrite
+2000/11/26 : first write
diff --git a/CONTRIBUTING b/CONTRIBUTING
new file mode 100644
index 0000000..60a78ba
--- /dev/null
+++ b/CONTRIBUTING
@@ -0,0 +1,1020 @@
+ HOW TO GET YOUR CODE ACCEPTED IN HAPROXY
+ READ THIS CAREFULLY BEFORE SUBMITTING CODE
+
+THIS DOCUMENT PROVIDES SOME RULES TO FOLLOW WHEN SENDING CONTRIBUTIONS. PATCHES
+NOT FOLLOWING THESE RULES WILL SIMPLY BE IGNORED IN ORDER TO PROTECT ALL OTHER
+RESPECTFUL CONTRIBUTORS' VALUABLE TIME.
+
+
+Abstract
+--------
+
+If you have never contributed to HAProxy before, or if you did so and noticed
+that nobody seems to be interested in reviewing your submission, please do read
+this long document carefully. HAProxy maintainers are particularly demanding on
+respecting certain simple rules related to general code and documentation style
+as well as splitting your patches and providing high quality commit messages.
+The reason behind this is that your patch will be met multiple times in the
+future, when doing some backporting work or when bisecting a bug, and it is
+critical that anyone can quickly decide if the patch is right, wrong, if it
+misses something, if it must be reverted or needs to be backported. Maintainers
+are generally benevolent with newcomers and will help them provided their work
+indicates they have at least read this document. Some have improved over time,
+to the point of being totally trusted and gaining commit access so they don't
+need to depend on anyone to pick their code. On the opposite, those who insist
+not making minimal efforts however will simply be ignored.
+
+
+Background
+----------
+
+HAProxy is a community-driven project. But like most highly technical projects
+it takes a lot of time to develop the skills necessary to be autonomous in the
+project, and there is a very small core team helped by a small set of very
+active participants. While most of the core team members work on the code as
+part of their day job, most participants do it on a voluntary basis during
+their spare time. The ideal model for developers is to spend their time:
+ 1) developing new features
+ 2) fixing bugs
+ 3) doing maintenance backports
+ 4) reviewing other people's code
+
+It turns out that on a project like HAProxy, like many other similarly complex
+projects, the time spent is exactly the opposite:
+ 1) reviewing other people's code
+ 2) doing maintenance backports
+ 3) fixing bugs
+ 4) developing new features
+
+A large part of the time spent reviewing code often consists in giving basic
+recommendations that are already explained in this file. In addition to taking
+time, it is not appealing for people willing to spend one hour helping others
+to do the same thing over and over instead of discussing the code design, and
+it tends to delay the start of code reviews.
+
+Regarding backports, they are necessary to provide a set of stable branches
+that are deployed in production at many places. Load balancers are complex and
+new features often induce undesired side effects in other areas, which we will
+call bugs. Thus it's common for users to stick to a branch featuring everything
+they need and not to upgrade too often. This backporting job is critical to the
+ecosystem's health and must be done regularly. Very often the person devoting
+some time on backports has little to no information about the relevance (let
+alone importance) of a patch and is unlikely to be an expert in the area
+affected by the patch. It's the role of the commit message to explain WHAT
+problem the patch tries to solve, WHY it is estimated that it is a problem, and
+HOW it tries to address it. With these elements, the person in charge of the
+backports can decide whether or not to pick the patch. And if the patch does
+not apply (which is common for older versions) they have information in the
+commit message about the principle and choices that the initial developer made
+and will try to adapt the patch sticking to these principles. Thus, the time
+spent backporting patches solely depends on the code quality and the commit
+message details and accuracy.
+
+When it turns to fixing bugs, before declaring a bug, there is an analysis
+phase. It starts with "is this behaviour expected", "is it normal", "under what
+circumstances does it happen", "when did it start to happen", "was it intended",
+"was it just overlooked", and "how to fix it without breaking the initial
+intent". A utility called "git bisect" is usually involved in determining when
+the behaviour started to happen. It determines the first patch which introduced
+the new behaviour. If the patch is huge, touches many areas, is really difficult
+to read because it needlessly reindents code or adds/removes line breaks out of
+context, it will be very difficult to figure what part of this patch broke the
+behaviour. Then once the part is figured, if the commit message doesn't provide
+a detailed description about the intent of the patch, i.e. the problem it was
+trying to solve, why and how, the developer landing on that patch will really
+feel powerless. And very often in this case, the fix for the problem will break
+something else or something that depended on the original patch.
+
+But contrary to what it could look like, providing great quality patches is not
+difficult, and developers will always help contributors improve their patches
+quality because it's in their interest as well. History has shown that first
+time contributors can provide an excellent work when they have carefully read
+this document, and that people coming from projects with different practices
+can grow from first-time contributor to trusted committer in about 6 months.
+
+
+Preparation
+-----------
+
+It is possible that you'll want to add a specific feature to satisfy your needs
+or one of your customers'. Contributions are welcome, however maintainers are
+often very picky about changes. Patches that change massive parts of the code,
+or that touch the core parts without any good reason will generally be rejected
+if those changes have not been discussed first.
+
+The proper place to discuss your changes is the HAProxy Mailing List. There are
+enough skilled readers to catch hazardous mistakes and to suggest improvements.
+There is no other place where you'll find as many skilled people on the project,
+and these people can help you get your code integrated quickly. You can
+subscribe to it by sending an empty e-mail at the following address :
+
+ haproxy+subscribe@formilux.org
+
+It is not even necessary to subscribe, you can post there and verify via the
+public list archives that your message was properly delivered. In this case you
+should indicate in your message that you'd like responders to keep you CCed.
+Please visit http://haproxy.org/ to figure available options to join the list.
+
+If you have an idea about something to implement, *please* discuss it on the
+list first. It has already happened several times that two persons did the same
+thing simultaneously. This is a waste of time for both of them. It's also very
+common to see some changes rejected because they're done in a way that will
+conflict with future evolutions, or that does not leave a good feeling. It's
+always unpleasant for the person who did the work, and it is unpleasant in
+general because people's time and efforts are valuable and would be better
+spent working on something else. That would not happen if these were discussed
+first. There is no problem posting work in progress to the list, it happens
+quite often in fact. Just prefix your mail subject with "RFC" (it stands for
+"request for comments") and everyone will understand you'd like some opinion
+on your work in progress. Also, don't waste your time with the doc when
+submitting patches for review, only add the doc with the patch you consider
+ready to merge (unless you need some help on the doc itself, of course).
+
+Another important point concerns code portability. HAProxy requires gcc as the
+C compiler, and may or may not work with other compilers. However it's known to
+build using gcc 2.95 or any later version. As such, it is important to keep in
+mind that certain facilities offered by recent versions must not be used in the
+code:
+
+ - declarations mixed in the code (requires gcc >= 3.x and is a bad practice)
+ - GCC builtins without checking for their availability based on version and
+ architecture ;
+ - assembly code without any alternate portable form for other platforms
+ - use of stdbool.h, "bool", "false", "true" : simply use "int", "0", "1"
+ - in general, anything which requires C99 (such as declaring variables in
+ "for" statements)
+
+Since most of these restrictions are just a matter of coding style, it is
+normally not a problem to comply. Please read doc/coding-style.txt for all the
+details.
+
+When modifying some optional subsystem (SSL, Lua, compression, device detection
+engines), please make sure the code continues to build (and to work) when these
+features are disabled. Similarly, when modifying the SSL stack, please always
+ensure that supported OpenSSL versions continue to build and to work, especially
+if you modify support for alternate libraries. Clean support for the legacy
+OpenSSL libraries is mandatory, support for its derivatives is a bonus and may
+occasionally break even though a great care is taken. In other words, if you
+provide a patch for OpenSSL you don't need to test its derivatives, but if you
+provide a patch for a derivative you also need to test with OpenSSL.
+
+If your work is very confidential and you can't publicly discuss it, you can
+also mail willy@haproxy.org directly about it, but your mail may be waiting
+several days in the queue before you get a response, if you get a response at
+all. Retransmit if you don't get a response by one week. Please note that
+direct sent e-mails to this address for non-confidential subjects may simply
+be forwarded to the list or be deleted without notification. An auto-responder
+bot is in place to try to detect e-mails from people asking for help and to
+redirect them to the mailing list. Do not be surprised if this happens to you.
+
+If you'd like a feature to be added but you think you don't have the skills to
+implement it yourself, you should follow these steps :
+
+ 1. discuss the feature on the mailing list. It is possible that someone
+ else has already implemented it, or that someone will tell you how to
+ proceed without it, or even why not to do it. It is also possible that
+ in fact it's quite easy to implement and people will guide you through
+ the process. That way you'll finally have YOUR patch merged, providing
+ the feature YOU need.
+
+ 2. if you really can't code it yourself after discussing it, then you may
+ consider contacting someone to do the job for you. Some people on the
+ list might sometimes be OK with trying to do it.
+
+The version control system used by the project (Git) keeps authorship
+information in the form of the patch author's e-mail address. This way you will
+be credited for your work in the project's history. If you contract with
+someone to implement your idea you may have to discuss such modalities with
+the person doing the work as by default this person will be mentioned as the
+work's author.
+
+
+Rules: the 12 laws of patch contribution
+----------------------------------------
+
+People contributing patches must apply the following rules. That may sound heavy
+at the beginning but it's common sense more than anything else and contributors
+do not think about them anymore after a few patches.
+
+1) Comply with the license
+
+ Before modifying some code, you have read the LICENSE file ("main license")
+ coming with the sources, and all the files this file references. Certain
+ files may be covered by different licenses, in which case it will be
+ indicated in the files themselves. In any case, you agree to respect these
+ licenses and to contribute your changes under the same licenses. If you want
+ to create new files, they will be under the main license, or any license of
+ your choice that you have verified to be compatible with the main license,
+ and that will be explicitly mentioned in the affected files. The project's
+ maintainers are free to reject contributions proposing license changes they
+ feel are not appropriate or could cause future trouble.
+
+2) Develop on development branch, not stable ones
+
+ Your work may only be based on the latest development version. No development
+ is made on a stable branch. If your work needs to be applied to a stable
+ branch, it will first be applied to the development branch and only then will
+ be backported to the stable branch. You are responsible for ensuring that
+ your work correctly applies to the development version. If at any moment you
+ are going to work on restructuring something important which may impact other
+ contributors, the rule that applies is that the first sent is the first
+ served. However it is considered good practice and politeness to warn others
+ in advance if you know you're going to make changes that may force them to
+ re-adapt their code, because they did probably not expect to have to spend
+ more time discovering your changes and rebasing their work.
+
+3) Read and respect the coding style
+
+ You have read and understood "doc/coding-style.txt", and you're actively
+ determined to respect it and to enforce it on your coworkers if you're going
+ to submit a team's work. We don't care what text editor you use, whether it's
+ an hex editor, cat, vi, emacs, Notepad, Word, or even Eclipse. The editor is
+ only the interface between you and the text file. What matters is what is in
+ the text file in the end. The editor is not an excuse for submitting poorly
+ indented code, which only proves that the person has no consideration for
+ quality and/or has done it in a hurry (probably worse). Please note that most
+ bugs were found in low-quality code. Reviewers know this and tend to be much
+ more reluctant to accept poorly formatted code because by experience they
+ won't trust their author's ability to write correct code. It is also worth
+ noting that poor quality code is painful to read and may result in nobody
+ willing to waste their time even reviewing your work.
+
+4) Present clean work
+
+ The time it takes for you to polish your code is always much smaller than the
+ time it takes others to do it for you, because they always have to wonder if
+ what they see is intended (meaning they didn't understand something) or if it
+ is a mistake that needs to be fixed. And since there are less reviewers than
+ submitters, it is vital to spread the effort closer to where the code is
+ written and not closer to where it gets merged. For example if you have to
+ write a report for a customer that your boss wants to review before you send
+ it to the customer, will you throw on his desk a pile of paper with stains,
+ typos and copy-pastes everywhere ? Will you say "come on, OK I made a mistake
+ in the company's name but they will find it by themselves, it's obvious it
+ comes from us" ? No. When in doubt, simply ask for help on the mailing list.
+
+5) Documentation is very important
+
+ There are four levels of importance of quality in the project :
+
+ - The most important one, and by far, is the quality of the user-facing
+ documentation. This is the first contact for most users and it immediately
+ gives them an accurate idea of how the project is maintained. Dirty docs
+ necessarily belong to a dirty project. Be careful to the way the text you
+ add is presented and indented. Be very careful about typos, usual mistakes
+ such as double consonants when only one is needed or "it's" instead of
+ "its", don't mix US English and UK English in the same paragraph, etc.
+ When in doubt, check in a dictionary. Fixes for existing typos in the doc
+ are always welcome and chasing them is a good way to become familiar with
+ the project and to get other participants' respect and consideration.
+
+ - The second most important level is user-facing messages emitted by the
+ code. You must try to see all the messages your code produces to ensure
+ they are understandable outside of the context where you wrote them,
+ because the user often doesn't expect them. That's true for warnings, and
+ that's even more important for errors which prevent the program from
+ working and which require an immediate and well understood fix in the
+ configuration. It's much better to say "line 35: compression level must be
+ an integer between 1 and 9" than "invalid argument at line 35". In HAProxy,
+ error handling roughly represents half of the code, and that's about 3/4 of
+ the configuration parser. Take the time to do something you're proud of. A
+ good rule of thumb is to keep in mind that your code talks to a human and
+ tries to teach them how to proceed. It must then speak like a human.
+
+ - The third most important level is the code and its accompanying comments,
+ including the commit message which is a complement to your code and
+ comments. It's important for all other contributors that the code is
+ readable, fluid, understandable and that the commit message describes what
+ was done, the choices made, the possible alternatives you thought about,
+ the reason for picking this one and its limits if any. Comments should be
+ written where it's easy to have a doubt or after some error cases have been
+ wiped out and you want to explain what possibilities remain. All functions
+ must have a comment indicating what they take on input and what they
+ provide on output. Please adjust the comments when you copy-paste a
+ function or change its prototype, this type of lazy mistake is too common
+ and very confusing when reading code later to debug an issue. Do not forget
+ that others will feel really angry at you when they have to dig into your
+ code for a bug that your code caused and they feel like this code is dirty
+ or confusing, that the commit message doesn't explain anything useful and
+ that the patch should never have been accepted in the first place. That
+ will strongly impact your reputation and will definitely affect your
+ chances to contribute again!
+
+ - The fourth level of importance is in the technical documentation that you
+ may want to add with your code. Technical documentation is always welcome
+ as it helps others make the best use of your work and to go exactly in the
+ direction you thought about during the design. This is also what reduces
+ the risk that your design gets changed in the near future due to a misuse
+ and/or a poor understanding. All such documentation is actually considered
+ as a bonus. It is more important that this documentation exists than that
+ it looks clean. Sometimes just copy-pasting your draft notes in a file to
+ keep a record of design ideas is better than losing them. Please do your
+ best so that other ones can read your doc. If these docs require a special
+ tool such as a graphics utility, ensure that the file name makes it
+ unambiguous how to process it. So there are no rules here for the contents,
+ except one. Please write the date in your file. Design docs tend to stay
+ forever and to remain long after they become obsolete. At this point that
+ can cause harm more than it can help. Writing the date in the document
+ helps developers guess the degree of validity and/or compare them with the
+ date of certain commits touching the same area.
+
+6) US-ASCII only!
+
+ All text files and commit messages are written using the US-ASCII charset.
+ Please be careful that your contributions do not contain any character not
+ printable using this charset, as they will render differently in different
+ editors and/or terminals. Avoid latin1 and more importantly UTF-8 which some
+ editors tend to abuse to replace some US-ASCII characters with their
+ typographic equivalent which aren't readable anymore in other editors. The
+ only place where alternative charsets are tolerated is in your name in the
+ commit message, but it's at your own risk as it can be mangled during the
+ merge. Anyway if you have an e-mail address, you probably have a valid
+ US-ASCII representation for it as well.
+
+7) Comments
+
+ Be careful about comments when you move code around. It's not acceptable that
+ a block of code is moved to another place leaving irrelevant comments at the
+ old place, just like it's not acceptable that a function is duplicated without
+ the comments being adjusted. The example below started to become quite common
+ during the 1.6 cycle, it is not acceptable and wastes everyone's time :
+
+ /* Parse switching <str> to build rule <rule>. Returns 0 on error. */
+ int parse_switching_rule(const char *str, struct rule *rule)
+ {
+ ...
+ }
+
+ /* Parse switching <str> to build rule <rule>. Returns 0 on error. */
+ void execute_switching_rule(struct rule *rule)
+ {
+ ...
+ }
+
+ This patch is not acceptable either (and it's unfortunately not that rare) :
+
+ + if (!session || !arg || list_is_empty(&session->rules->head))
+ + return 0;
+ +
+ /* Check if session->rules is valid before dereferencing it */
+ if (!session->rules_allocated)
+ return 0;
+
+ - if (!arg || list_is_empty(&session->rules->head))
+ - return 0;
+ -
+
+8) Short, readable identifiers
+
+ Limit the length of your identifiers in the code. When your identifiers start
+ to sound like sentences, it's very hard for the reader to keep on track with
+ what operation they are observing. Also long names force expressions to fit
+ on several lines which also cause some difficulties to the reader. See the
+ example below :
+
+ int file_name_len_including_global_path;
+ int file_name_len_without_global_path;
+ int global_path_len_or_zero_if_default;
+
+ if (global_path)
+ global_path_len_or_zero_if_default = strlen(global_path);
+ else
+ global_path_len_or_zero_if_default = 0;
+
+ file_name_len_without_global_path = strlen(file_name);
+ file_name_len_including_global_path =
+ file_name_len_without_global_path + 1 + /* for '/' */
+ global_path_len_or_zero_if_default ?
+ global_path_len_or_zero_if_default : default_path_len;
+
+ Compare it to this one :
+
+ int f, p;
+
+ p = global_path ? strlen(global_path) : default_path_len;
+ f = p + 1 + strlen(file_name); /* 1 for '/' */
+
+ A good rule of thumb is that if your identifiers start to contain more than
+ 3 words or more than 15 characters, they can become confusing. For function
+ names it's less important especially if these functions are rarely used or
+ are used in a complex context where it is important to differentiate between
+ their multiple variants.
+
+9) Unified diff only
+
+ The best way to build your patches is to use "git format-patch". This means
+ that you have committed your patch to a local branch, with an appropriate
+ subject line and a useful commit message explaining what the patch attempts
+ to do. It is not strictly required to use git, but what is strictly required
+ is to have all these elements in the same mail, easily distinguishable, and
+ a patch in "diff -up" format (which is also the format used by Git). This
+ means the "unified" diff format must be used exclusively, and with the
+ function name printed in the diff header of each block. That significantly
+ helps during reviews. Keep in mind that most reviews are done on the patch
+ and not on the code after applying the patch. Your diff must keep some
+ context (3 lines above and 3 lines below) so that there's no doubt where the
+ code has to be applied. Don't change code outside of the context of your
+ patch (eg: take care of not adding/removing empty lines once you remove
+ your debugging code). If you are using Git (which is strongly recommended),
+ always use "git show" after doing a commit to ensure it looks good, and
+ enable syntax coloring that will automatically report in red the trailing
+ spaces or tabs that your patch added to the code and that must absolutely be
+ removed. These ones cause a real pain to apply patches later because they
+ mangle the context in an invisible way. Such patches with trailing spaces at
+ end of lines will be rejected.
+
+10) One patch per feature
+
+ Please cut your work in series of patches that can be independently reviewed
+ and merged. Each patch must do something on its own that you can explain to
+ someone without being ashamed of what you did. For example, you must not say
+ "This is the patch that implements SSL, it was tricky". There's clearly
+ something wrong there, your patch will be huge, will definitely break things
+ and nobody will be able to figure what exactly introduced the bug. However
+ it's much better to say "I needed to add some fields in the session to store
+ the SSL context so this patch does this and doesn't touch anything else, so
+ it's safe". Also when dealing with series, you will sometimes fix a bug that
+ one of your patches introduced. Please do merge these fixes (eg: using git
+ rebase -i and squash or fixup), as it is not acceptable to see patches which
+ introduce known bugs even if they're fixed later. Another benefit of cleanly
+ splitting patches is that if some of your patches need to be reworked after
+ a review, the other ones can still be merged so that you don't need to care
+ about them anymore. When sending multiple patches for review, prefer to send
+ one e-mail per patch than all patches in a single e-mail. The reason is that
+ not everyone is skilled in all areas nor has the time to review everything
+ at once. With one patch per e-mail, it's easy to comment on a single patch
+ without giving an opinion on the other ones, especially if a long thread
+ starts about one specific patch on the mailing list. "git send-email" does
+ that for you though it requires a few trials before getting it right.
+
+ If you can, please always put all the bug fixes at the beginning of the
+ series. This often makes it easier to backport them because they will not
+ depend on context that your other patches changed. As a hint, if you can't
+ do this, there are little chances that your bug fix can be backported.
+
+11) Real commit messages please!
+
+ The commit message is how you're trying to convince a maintainer to adopt
+ your work and maintain it as long as possible. A dirty commit message almost
+ always comes with dirty code. Too short a commit message indicates that too
+ short an analysis was done and that side effects are extremely likely to be
+ encountered. It's the maintainer's job to decide to accept this work in its
+ current form or not, with the known constraints. Some patches which rework
+ architectural parts or fix sensitive bugs come with 20-30 lines of design
+ explanations, limitations, hypothesis or even doubts, and despite this it
+ happens when reading them 6 months later while trying to identify a bug that
+ developers still miss some information about corner cases.
+
+ So please properly format your commit messages. To get an idea, just run
+ "git log" on the file you've just modified. Patches always have the format
+ of an e-mail made of a subject, a description and the actual patch. If you
+ are sending a patch as an e-mail formatted this way, it can quickly be
+ applied with limited effort so that's acceptable :
+
+ - A subject line (may wrap to the next line, but please read below)
+ - an empty line (subject delimiter)
+ - a non-empty description (the body of the e-mail)
+ - the patch itself
+
+ The subject describes the "What" of the change ; the description explains
+ the "why", the "how" and sometimes "what next". For example a commit message
+ looking like this will be rejected :
+
+ | From: Mr Foobar <foobar@example.com>
+ | Subject: BUG: fix typo in ssl_sock
+ |
+
+ This one as well (too long subject, not the right place for the details) :
+
+ | From: Mr Foobar <foobar@example.com>
+ | Subject: BUG/MEDIUM: ssl: use an error flag to prevent ssl_read() from
+ | returning 0 when dealing with large buffers because that can cause
+ | an infinite loop
+ |
+
+ This one ought to be used instead :
+
+ | From: Mr Foobar <foobar@example.com>
+ | Subject: BUG/MEDIUM: ssl: fix risk of infinite loop in ssl_sock
+ |
+ | ssl_read() must not return 0 on error or the caller may loop forever.
+ | Instead we add a flag to the connection to notify about the error and
+ | check it at all call places. This situation can only happen with large
+ | buffers so a workaround is to limit buffer sizes. Another option would
+ | have been to return -1 but it required to use signed ints everywhere
+ | and would have made the patch larger and riskier. This fix should be
+ | backported to versions 1.2 and upper.
+
+ It is important to understand that for any reader to guess the text above
+ when it's absent, it will take a huge amount of time. If you made the
+ analysis leading to your patch, you must explain it, including the ideas
+ you dropped if you had a good reason for this.
+
+ While it's not strictly required to use Git, it is strongly recommended
+ because it helps you do the cleanest job with the least effort. But if you
+ are comfortable with writing clean e-mails and inserting your patches, you
+ don't need to use Git.
+
+ But in any case, it is important that there is a clean description of what
+ the patch does, the motivation for what it does, why it's the best way to do
+ it, its impacts, and what it does not yet cover. And this is particularly
+ important for bugs. A patch tagged "BUG" must absolutely explain what the
+ problem is, why it is considered as a bug. Anybody, even non-developers,
+ should be able to tell whether or not a patch is likely to address an issue
+ they are facing. Indicating what the code will do after the fix doesn't help
+ if it does not say what problem is encountered without the patch. Note that
+ in some cases the bug is purely theoretical and observed by reading the code.
+ In this case it's perfectly fine to provide an estimate about possible
+ effects. Also, in HAProxy, like many projects which take a great care of
+ maintaining stable branches, patches are reviewed later so that some of them
+ can be backported to stable releases.
+
+ While reviewing hundreds of patches can seem cumbersome, with a proper
+ formatting of the subject line it actually becomes very easy. For example,
+ here's how one can find patches that need to be reviewed for backports (bugs
+ and doc) between since commit ID 827752e :
+
+ $ git log --oneline 827752e.. | grep 'BUG\|DOC'
+ 0d79cf6 DOC: fix function name
+ bc96534 DOC: ssl: missing LF
+ 10ec214 BUG/MEDIUM: lua: the lua function Channel:close() causes a segf
+ bdc97a8 BUG/MEDIUM: lua: outgoing connection was broken since 1.6-dev2
+ ba56d9c DOC: mention support for RFC 5077 TLS Ticket extension in start
+ f1650a8 DOC: clarify some points about SSL and the proxy protocol
+ b157d73 BUG/MAJOR: peers: fix current table pointer not re-initialized
+ e1ab808 BUG/MEDIUM: peers: fix wrong message id on stick table updates
+ cc79b00 BUG/MINOR: ssl: TLS Ticket Key rotation broken via socket comma
+ d8e42b6 DOC: add new file intro.txt
+ c7d7607 BUG/MEDIUM: lua: bad error processing
+ 386a127 DOC: match several lua configuration option names to those impl
+ 0f4eadd BUG/MEDIUM: counters: ensure that src_{inc,clr}_gpc0 creates a
+
+ It is made possible by the fact that subject lines are properly formatted and
+ always respect the same principle : one part indicating the nature and
+ severity of the patch, another one to indicate which subsystem is affected,
+ and the last one is a succinct description of the change, with the important
+ part at the beginning so that it's obvious what it does even when lines are
+ truncated like above. The whole stable maintenance process relies on this.
+ For this reason, it is mandatory to respect some easy rules regarding the
+ way the subject is built. Please see the section below for more information
+ regarding this formatting.
+
+ As a rule of thumb, your patch MUST NEVER be made only of a subject line,
+ it *must* contain a description. Even one or two lines, or indicating
+ whether a backport is desired or not. It turns out that single-line commits
+ are so rare in the Git world that they require special manual (hence
+ painful) handling when they are backported, and at least for this reason
+ it's important to keep this in mind.
+
+ Maintainers who pick your patch may slightly adjust the description as they
+ see fit. Do not see this as a failure to do a clean job, it just means they
+ think it will help them do their daily job this way. The code may also be
+ slightly adjusted before being merged (non-functional changes only, fix for
+ typos, tabs vs spaces for example), unless your patch contains a
+ Signed-off-By tag, in which case they will either modify it and mention the
+ changes after your Signed-off-By line, or (more likely) ask you to perform
+ these changes yourself. This ability to slightly adjust a patch before
+ merging is is the main reason for not using pull requests which do not
+ provide this facility and will require to iterate back and forth with the
+ submitter and significantly delay the patch inclusion.
+
+ Each patch fixing a bug MUST be tagged with "BUG", a severity level, an
+ indication of the affected subsystem and a brief description of the nature
+ of the issue in the subject line, and a detailed analysis in the message
+ body. The explanation of the user-visible impact and the need for
+ backporting to stable branches or not are MANDATORY. Bug fixes with no
+ indication will simply be rejected as they are very likely to cause more
+ harm when nobody is able to tell whether or not the patch needs to be
+ backported or can be reverted in case of regression.
+
+ When fixing a bug which is reproducible, if possible, the contributors are
+ strongly encouraged to write a regression testing VTC file for varnishtest
+ to add to reg-tests directory. More information about varnishtest may be
+ found in README file of reg-tests directory and in doc/regression-testing.txt
+ file.
+
+12) Discuss on the mailing list
+
+ Note, some first-time contributors might feel impressed or scared by posting
+ to a list. This list is frequented only by nice people who are willing to
+ help you polish your work so that it is perfect and can last long. What you
+ think could be perceived as a proof of incompetence or lack of care will
+ instead be a proof of your ability to work with a community. You will not be
+ judged nor blamed for making mistakes. The project maintainers are the ones
+ creating the most bugs and mistakes anyway, and nobody knows the project in
+ its entirety anymore so you're just like anyone else. And people who have no
+ consideration for other's work are quickly ejected from the list so the
+ place is as safe and welcoming to new contributors as it is to long time
+ ones.
+
+ When submitting changes, please always CC the mailing list address so that
+ everyone gets a chance to spot any issue in your code. It will also serve
+ as an advertisement for your work, you'll get more testers quicker and
+ you'll feel better knowing that people really use your work. It's often
+ convenient to prepend "[PATCH]" in front of your mail's subject to mention
+ that this e-mail contains a patch (or a series of patches), because it will
+ easily catch reviewer's attention. It's automatically done by tools such as
+ "git format-patch" and "git send-email". If you don't want your patch to be
+ merged yet and prefer to show it for discussion, better tag it as "[RFC]"
+ (stands for "Request For Comments") and it will be reviewed but not merged
+ without your approval. It is also important to CC any author mentioned in
+ the file you change, or a subsystem maintainers whose address is mentioned
+ in a MAINTAINERS file. Not everyone reads the list on a daily basis so it's
+ very easy to miss some changes. Don't consider it as a failure when a
+ reviewer tells you you have to modify your patch, actually it's a success
+ because now you know what is missing for your work to get accepted. That's
+ why you should not hesitate to CC enough people. Don't copy people who have
+ no deal with your work area just because you found their address on the
+ list. That's the best way to appear careless about their time and make them
+ reject your changes in the future.
+
+
+Patch classifying rules
+-----------------------
+
+There are 3 criteria of particular importance in any patch :
+ - its nature (is it a fix for a bug, a new feature, an optimization, ...)
+ - its importance, which generally reflects the risk of merging/not merging it
+ - what area it applies to (eg: http, stats, startup, config, doc, ...)
+
+It's important to make these 3 criteria easy to spot in the patch's subject,
+because it's the first (and sometimes the only) thing which is read when
+reviewing patches to find which ones need to be backported to older versions.
+It also helps when trying to find which patch is the most likely to have caused
+a regression.
+
+Specifically, bugs must be clearly easy to spot so that they're never missed.
+Any patch fixing a bug must have the "BUG" tag in its subject. Most common
+patch types include :
+
+ - BUG fix for a bug. The severity of the bug should also be indicated
+ when known. Similarly, if a backport is needed to older versions,
+ it should be indicated on the last line of the commit message. The
+ commit message MUST ABSOLUTELY describe the problem and its impact
+ to non-developers. Any user must be able to guess if this patch is
+ likely to fix a problem they are facing. Even if the bug was
+ discovered by accident while reading the code or running an
+ automated tool, it is mandatory to try to estimate what potential
+ issue it might cause and under what circumstances. There may even
+ be security implications sometimes so a minimum analysis is really
+ required. Also please think about stable maintainers who have to
+ build the release notes, they need to have enough input about the
+ bug's impact to explain it. If the bug has been identified as a
+ regression brought by a specific patch or version, this indication
+ will be appreciated too. New maintenance releases are generally
+ emitted when a few of these patches are merged. If the bug is a
+ vulnerability for which a CVE identifier was assigned before you
+ publish the fix, you can mention it in the commit message, it will
+ help distro maintainers.
+
+ - CLEANUP code cleanup, silence of warnings, etc... theoretically no impact.
+ These patches will rarely be seen in stable branches, though they
+ may appear when they remove some annoyance or when they make
+ backporting easier. By nature, a cleanup is always of minor
+ importance and it's not needed to mention it.
+
+ - DOC updates to any of the documentation files, including README. Many
+ documentation updates are backported since they don't impact the
+ product's stability and may help users avoid bugs. So please
+ indicate in the commit message if a backport is desired. When a
+ feature gets documented, it's preferred that the doc patch appears
+ in the same patch or after the feature patch, but not before, as it
+ becomes confusing when someone working on a code base including
+ only the doc patch won't understand why a documented feature does
+ not work as documented.
+
+ - REORG code reorganization. Some blocks may be moved to other places,
+ some important checks might be swapped, etc... These changes
+ always present a risk of regression. For this reason, they should
+ never be mixed with any bug fix nor functional change. Code is
+ only moved as-is. Indicating the risk of breakage is highly
+ recommended. Minor breakage is tolerated in such patches if trying
+ to fix it at once makes the whole change even more confusing. That
+ may happen for example when some #ifdefs need to be propagated in
+ every file consecutive to the change.
+
+ - BUILD updates or fixes for build issues. Changes to makefiles also fall
+ into this category. The risk of breakage should be indicated if
+ known. It is also appreciated to indicate what platforms and/or
+ configurations were tested after the change.
+
+ - OPTIM some code was optimised. Sometimes if the regression risk is very
+ low and the gains significant, such patches may be merged in the
+ stable branch. Depending on the amount of code changed or replaced
+ and the level of trust the author has in the change, the risk of
+ regression should be indicated. If the optimization depends on the
+ architecture or on build options, it is important to verify that
+ the code continues to work without it.
+
+ - RELEASE release of a new version (development or stable).
+
+ - LICENSE licensing updates (may impact distro packagers).
+
+ - REGTEST updates to any of the regression testing files found in reg-tests
+ directory, including README or any documentation file.
+
+
+When the patch cannot be categorized, it's best not to put any type tag, and to
+only use a risk or complexity information only as below. This is commonly the
+case for new features, which development versions are mostly made of.
+
+The importance, complexity of the patch, or severity of the bug it fixes must
+be indicated when relevant. A single upper-case word is preferred, among :
+
+ - MINOR minor change, very low risk of impact. It is often the case for
+ code additions that don't touch live code. As a rule of thumb, a
+ patch tagged "MINOR" is safe enough to be backported to stable
+ branches. For a bug, it generally indicates an annoyance, nothing
+ more.
+
+ - MEDIUM medium risk, may cause unexpected regressions of low importance or
+ which may quickly be discovered. In short, the patch is safe but
+ touches working areas and it is always possible that you missed
+ something you didn't know existed (eg: adding a "case" entry or
+ an error message after adding an error code to an enum). For a bug,
+ it generally indicates something odd which requires changing the
+ configuration in an undesired way to work around the issue.
+
+ - MAJOR major risk of hidden regression. This happens when large parts of
+ the code are rearranged, when new timeouts are introduced, when
+ sensitive parts of the session scheduling are touched, etc... We
+ should only exceptionally find such patches in stable branches when
+ there is no other option to fix a design issue. For a bug, it
+ indicates severe reliability issues for which workarounds are
+ identified with or without performance impacts.
+
+ - CRITICAL medium-term reliability or security is at risk and workarounds,
+ if they exist, might not always be acceptable. An upgrade is
+ absolutely required. A maintenance release may be emitted even if
+ only one of these bugs are fixed. Note that this tag is only used
+ with bugs. Such patches must indicate what is the first version
+ affected, and if known, the commit ID which introduced the issue.
+
+The expected length of the commit message grows with the importance of the
+change. While a MINOR patch may sometimes be described in 1 or 2 lines, MAJOR
+or CRITICAL patches cannot have less than 10-15 lines to describe exactly the
+impacts otherwise the submitter's work will be considered as rough sabotage.
+If you are sending a new patch series after a review, it is generally good to
+enumerate at the end of the commit description what changed from the previous
+one as it helps reviewers quickly glance over such changes and not re-read the
+rest.
+
+For BUILD, DOC and CLEANUP types, this tag is not always relevant and may be
+omitted.
+
+The area the patch applies to is quite important, because some areas are known
+to be similar in older versions, suggesting a backport might be desirable, and
+conversely, some areas are known to be specific to one version. The area is a
+single-word lowercase name the contributor find clear enough to describe what
+part is being touched. The following list of tags is suggested but not
+exhaustive:
+
+ - examples example files. Be careful, sometimes these files are packaged.
+
+ - tests regression test files. No code is affected, no need to upgrade.
+
+ - reg-tests regression test files for varnishtest. No code is affected, no
+ need to upgrade.
+
+ - init initialization code, arguments parsing, etc...
+
+ - config configuration parser, mostly used when adding new config keywords
+
+ - http the HTTP engine
+
+ - stats the stats reporting engine
+
+ - cli the stats socket CLI
+
+ - checks the health checks engine (eg: when adding new checks)
+
+ - sample the sample fetch system (new fetch or converter functions)
+
+ - acl the ACL processing core or some ACLs from other areas
+
+ - filters everything related to the filters core
+
+ - peers the peer synchronization engine
+
+ - lua the Lua scripting engine
+
+ - listeners everything related to incoming connection settings
+
+ - frontend everything related to incoming connection processing
+
+ - backend everything related to LB algorithms and server farm
+
+ - session session processing and flags (very sensible, be careful)
+
+ - server server connection management, queueing
+
+ - spoe SPOE code
+
+ - ssl the SSL/TLS interface
+
+ - proxy proxy maintenance (start/stop)
+
+ - log log management
+
+ - poll any of the pollers
+
+ - halog the halog sub-component in the admin directory
+
+ - htx general HTX subsystem
+
+ - mux-h1 HTTP/1.x multiplexer/demultiplexer
+
+ - mux-h2 HTTP/2 multiplexer/demultiplexer
+
+ - h1 general HTTP/1.x protocol parser
+
+ - h2 general HTTP/2 protocol parser
+
+Other names may be invented when more precise indications are meaningful, for
+instance : "cookie" which indicates cookie processing in the HTTP core. Last,
+indicating the name of the affected file is also a good way to quickly spot
+changes. Many commits were already tagged with "stream_sock" or "cfgparse" for
+instance.
+
+It is required that the type of change and the severity when relevant are
+indicated, as well as the touched area when relevant as well in the patch
+subject. Normally, we would have the 3 most often. The two first criteria should
+be present before a first colon (':'). If both are present, then they should be
+delimited with a slash ('/'). The 3rd criterion (area) should appear next, also
+followed by a colon. Thus, all of the following subject lines are valid :
+
+Examples of subject lines :
+ - DOC: document options forwardfor to logasap
+ - DOC/MAJOR: reorganize the whole document and change indenting
+ - BUG: stats: connection reset counters must be plain ascii, not HTML
+ - BUG/MINOR: stats: connection reset counters must be plain ascii, not HTML
+ - MEDIUM: checks: support multi-packet health check responses
+ - RELEASE: Released version 1.4.2
+ - BUILD: stats: stdint is not present on solaris
+ - OPTIM/MINOR: halog: make fgets parse more bytes by blocks
+ - REORG/MEDIUM: move syscall redefinition to specific places
+
+Please do not use square brackets anymore around the tags, because they induce
+more work when merging patches, which need to be hand-edited not to lose the
+enclosed part.
+
+In fact, one of the only square bracket tags that still makes sense is '[RFC]'
+at the beginning of the subject, when you're asking for someone to review your
+change before getting it merged. If the patch is OK to be merged, then it can
+be merge as-is and the '[RFC]' tag will automatically be removed. If you don't
+want it to be merged at all, you can simply state it in the message, or use an
+alternate 'WIP/' prefix in front of your tag tag ("work in progress").
+
+The tags are not rigid, follow your intuition first, and they may be readjusted
+when your patch is merged. It may happen that a same patch has a different tag
+in two distinct branches. The reason is that a bug in one branch may just be a
+cleanup or safety measure in the other one because the code cannot be triggered.
+
+
+Working with Git
+----------------
+
+For a more efficient interaction between the mainline code and your code, you
+are strongly encouraged to try the Git version control system :
+
+ http://git-scm.com/
+
+It's very fast, lightweight and lets you undo/redo your work as often as you
+want, without making your mistakes visible to the rest of the world. It will
+definitely help you contribute quality code and take other people's feedback
+in consideration. In order to clone the HAProxy Git repository :
+
+ $ git clone http://git.haproxy.org/git/haproxy.git/ (development)
+
+If you decide to use Git for your developments, then your commit messages will
+have the subject line in the format described above, then the whole description
+of your work (mainly why you did it) will be in the body. You can directly send
+your commits to the mailing list, the format is convenient to read and process.
+
+It is recommended to create a branch for your work that is based on the master
+branch :
+
+ $ git checkout -b 20150920-fix-stats master
+
+You can then do your work and even experiment with multiple alternatives if you
+are not completely sure that your solution is the best one :
+
+ $ git checkout -b 20150920-fix-stats-v2
+
+Then reorder/merge/edit your patches :
+
+ $ git rebase -i master
+
+When you think you're ready, reread your whole patchset to ensure there is no
+formatting or style issue :
+
+ $ git show master..
+
+And once you're satisfied, you should update your master branch to be sure that
+nothing changed during your work (only needed if you left it unattended for days
+or weeks) :
+
+ $ git checkout -b 20150920-fix-stats-rebased
+ $ git fetch origin master:master
+ $ git rebase master
+
+You can build a list of patches ready for submission like this :
+
+ $ git format-patch master
+
+The output files are the patches ready to be sent over e-mail, either via a
+regular e-mail or via git send-email (carefully check the man page). Don't
+destroy your other work branches until your patches get merged, it may happen
+that earlier designs will be preferred for various reasons. Patches should be
+sent to the mailing list : haproxy@formilux.org and CCed to relevant subsystem
+maintainers or authors of the modified files if their address appears at the
+top of the file.
+
+Please don't send pull requests, they are really inconvenient as they make it
+much more complicate to perform minor adjustments, and nobody benefits from
+any comment on the code while on a list all subscribers learn a little bit on
+each review of anyone else's code.
+
+
+What to do if your patch is ignored
+-----------------------------------
+
+All patches merged are acknowledged by the maintainer who picked it. If you
+didn't get an acknowledgement, check the mailing list archives to see if your
+mail was properly delivered there and possibly if anyone responded and you did
+not get their response (please look at http://haproxy.org/ for the mailing list
+archive's address).
+
+If you see that your mail is there but nobody responded, please recheck:
+ - was the subject clearly indicating that it was a patch and/or that you were
+ seeking some review?
+
+ - was your email mangled by your mail agent? If so it's possible that
+ nobody had the willingness yet to mention it.
+
+ - was your email sent as HTML? If so it definitely ended in spam boxes
+ regardless of the archives.
+
+ - did the patch violate some of the principles explained in this document?
+
+If none of these cases matches, it might simply be that everyone was busy when
+your patch was sent and that it was overlooked. In this case it's fine to
+either resubmit it or respond to your own email asking if anything's wrong
+about it. In general don't expect a response after one week of silence, just
+because your email will not appear in anyone else's current window. So after
+one week it's time to resubmit.
+
+Among the mistakes that tend to make reviewers not respond are those who send
+multiple versions of a patch in a row. It's natural for others then to wait for
+the series to stabilize. And once it doesn't move anymore everyone forgot about
+it. As a rule of thumb, if you have to update your original email more than
+twice, first double-check that your series is really ready for submission, and
+second, start a new thread and stop responding to the previous one. In this
+case it is well appreciated to mention a version of your patch set in the
+subject such as "[PATCH v2]", so that reviewers can immediately spot the new
+version and not waste their time on the old one.
+
+If you still do not receive any response, it is possible that you've already
+played your last card by not respecting the basic principles multiple times
+despite being told about it several times, and that nobody is willing to spend
+more of their time than normally needed with your work anymore. Your best
+option at this point probably is to ask "did I do something wrong" than to
+resend the same patches.
+
+
+How to be sure to irritate everyone
+-----------------------------------
+
+Among the best ways to quickly lose everyone's respect, there is this small
+selection, which should help you improve the way you work with others, if
+you notice you're already practising some of them:
+ - repeatedly send improperly formatted commit messages, with no type or
+ severity, or with no commit message body. These ones require manual
+ edition, maintainers will quickly learn to recognize your name.
+
+ - repeatedly send patches which break something, and disappear or take a long
+ time to provide a fix.
+
+ - fail to respond to questions related to features you have contributed in
+ the past, which can further lead to the feature being declared unmaintained
+ and removed in a future version.
+
+ - send a new patch iteration without taking *all* comments from previous
+ review into consideration, so that the reviewer discovers they have to do
+ the exact same work again.
+
+ - "hijack" an existing thread to discuss something different or promote your
+ work. This will generally make you look like a fool so that everyone wants
+ to stay away from your e-mails.
+
+ - continue to send pull requests after having been explained why they are not
+ welcome.
+
+ - give wrong advices to people asking for help, or sending them patches to
+ try which make no sense, waste their time, and give them a bad impression
+ of the people working on the project.
+
+ - be disrespectful to anyone asking for help or contributing some work. This
+ may actually even get you kicked out of the list and banned from it.
+
+-- end
diff --git a/INSTALL b/INSTALL
new file mode 100644
index 0000000..28a1b2e
--- /dev/null
+++ b/INSTALL
@@ -0,0 +1,766 @@
+Installation instructions for HAProxy
+=====================================
+
+HAProxy 2.9 is a stable version, which means that it will get fixes for bugs as
+they are discovered till around Q1 2025 and should not receive new features.
+This version is mostly suited at experienced users who are willing to quickly
+follow updates. New users are encouraged to use long term supported versions
+such as the ones provided by their software vendor or Linux distribution, as
+such versions require far less common updates.
+
+If for any reason you'd prefer to use a different version than the one packaged
+for your system, you want to be certain to have all the fixes or to get some
+commercial support, other choices are available at http://www.haproxy.com/.
+
+
+Areas covered in this document
+==============================
+
+1) Quick build & install
+2) Basic principles
+3) Build environment
+4) Dependencies
+5) Advanced build options
+6) How to install HAProxy
+
+
+1) Quick build & install
+========================
+
+If you've already built HAProxy and are just looking for a quick reminder, here
+are a few build examples :
+
+ - recent Linux system with all options, make and install :
+ $ make clean
+ $ make -j $(nproc) TARGET=linux-glibc \
+ USE_OPENSSL=1 USE_LUA=1 USE_PCRE2=1 USE_SYSTEMD=1
+ $ sudo make install
+
+ - FreeBSD and OpenBSD, build with all options :
+ $ gmake -j 4 TARGET=freebsd USE_OPENSSL=1 USE_LUA=1 USE_PCRE2=1
+
+ - embedded Linux, build using a cross-compiler :
+ $ make -j $(nproc) TARGET=linux-glibc USE_OPENSSL=1 USE_PCRE2=1 \
+ CC=/opt/cross/gcc730-arm/bin/gcc ADDLIB=-latomic
+
+ - Build with static PCRE on Solaris / UltraSPARC :
+ $ make TARGET=solaris CPU=ultrasparc USE_STATIC_PCRE2=1
+
+For more advanced build options or if a command above reports an error, please
+read the following sections.
+
+
+2) Basic principles
+===================
+
+HAProxy uses a single GNU Makefile which supports options on the command line,
+so that there is no need to hack a "configure" file to work on your system. The
+makefile totally supports parallel build using "make -j <jobs>" where <jobs>
+matches the number of usable processors, which on some platforms is returned by
+the "nproc" utility. The explanations below may occasionally refer to some
+options, usually in the form "name=value", which have to be passed to the
+command line. This means that the option has to be passed after the "make"
+command. For example :
+
+ $ make -j $(nproc) TARGET=generic USE_GZIP=1
+
+One required option is TARGET, it must be set to a target platform name, which
+provides a number of presets. The list of known platforms is displayed when no
+target is specified. It is not strictly required to use the exact target, you
+can use a relatively similar one and adjust specific variables by hand.
+
+Most configuration variables are in fact booleans. Some options are detected and
+enabled by default if available on the target platform. This is the case for all
+those named "USE_<feature>". These booleans are enabled by "USE_<feature>=1"
+and are disabled by "USE_<feature>=" (with no value). An exhaustive list of the
+supported USE_* features is located at the top of the main Makefile. The last
+occurrence of such an option on the command line overrides any previous one.
+Example :
+
+ $ make TARGET=generic USE_THREAD=
+
+In case of error or missing TARGET, a help screen is displayed. It is also
+possible to display a list of all known options using "make help".
+
+Some optional components which may depend on third-party libraries, are used
+with popular tools which are not necessarily standard implementations, or are
+maintained at slower pace than the core of the project, are located in the
+"addons/" directory. These ones may disappear in a future version if the
+product they depend on disappears or if their maintainers do not assign enough
+resources to maintain them any more. For this reason they are not built by
+default, but some USE_* options are usually provided for them, and their build
+is routinely tested anyway.
+
+
+3) Build environment
+====================
+
+HAProxy requires a working GCC or Clang toolchain and GNU make :
+
+ - GNU make >= 3.80. Note that neither Solaris nor OpenBSD's make work with
+ the GNU Makefile. If you get many syntax errors when running "make", you
+ may want to retry with "gmake" which is the name commonly used for GNU make
+ on BSD systems.
+
+ - GCC >= 4.2 (up to 13 tested). Older versions can be made to work with a
+ few minor adaptations if really needed. Newer versions may sometimes break
+ due to compiler regressions or behaviour changes. The version shipped with
+ your operating system is very likely to work with no trouble. Clang >= 3.0
+ is also known to work as an alternative solution. Recent versions may emit
+ a bit more warnings that are worth reporting as they may reveal real bugs.
+ TCC (https://repo.or.cz/tinycc.git) is also usable for developers but will
+ not support threading and was found at least once to produce bad code in
+ some rare corner cases (since fixed). But it builds extremely quickly
+ (typically half a second for the whole project) and is very convenient to
+ run quick tests during API changes or code refactoring.
+
+ - GNU ld (binutils package), with no particular version. Other linkers might
+ work but were not tested.
+
+On debian or Ubuntu systems and their derivatives, you may get all these tools
+at once by issuing the two following commands :
+
+ $ sudo apt-get update
+ $ sudo apt-get install build-essential
+
+On Fedora, CentOS, RHEL and derivatives, you may get the equivalent packages
+with the following command :
+
+ $ sudo yum groupinstall "Development Tools"
+
+Please refer to your operating system's documentation for other systems.
+
+It is also possible to build HAProxy for another system or platform using a
+cross-compiler but in this case you probably already have installed these
+tools.
+
+Building HAProxy may require between 60 and 80 MB of free space in the
+directory where the sources have been extracted, depending on the debugging
+options involved.
+
+
+4) Dependencies
+===============
+
+HAProxy in its basic form does not depend on anything beyond a working libc.
+However a number of options are enabled by default, or are highly recommended,
+and these options will typically involve some external components or libraries,
+depending on the targeted platform.
+
+Optional dependencies may be split into several categories :
+
+ - memory allocation
+ - regular expressions
+ - multi-threading
+ - password encryption
+ - cryptography
+ - compression
+ - lua
+ - device detection
+ - miscellaneous
+
+
+4.1) Memory allocation
+----------------------
+By default, HAProxy uses the standard malloc() call provided by the libc. It
+may also be built to use jemalloc, which is fast and thread-safe. In order to
+use it, please add "-ljemalloc" to the ADDLIB variable. You may possibly also
+need to append "-lpthread" and/or "-ldl" depending on the operating system.
+
+
+4.2) Regular expressions
+------------------------
+HAProxy may make use regular expressions (regex) to match certain patterns. The
+regex engine is provided by default in the libc. On some operating systems, it
+might happen that the original regex library provided by the libc is too slow,
+too limited or even bogus. For example, on older Solaris versions up to 8, the
+default regex used not to properly extract group references, without reporting
+compilation errors. Also, some early versions of the GNU libc used to include a
+regex engine which could be slow or even crash on certain patterns.
+
+If you plan on importing a particularly heavy configuration involving a lot of
+regex, you may benefit from using some alternative regex implementations such as
+PCRE. HAProxy natively supports PCRE and PCRE2 (recommended), both in standard
+and JIT flavors (Just In Time). The following options are available depending on
+the library version provided on your system :
+
+ - "USE_PCRE=1" : enable PCRE version 1, dynamic linking
+ - "USE_STATIC_PCRE=1" : enable PCRE version 1, static linking
+ - "USE_PCRE_JIT=1" : enable PCRE version 1 in JIT mode
+ - "USE_PCRE2=1" : enable PCRE version 2, dynamic linking
+ - "USE_STATIC_PCRE2=1" : enable PCRE version 2, static linking
+ - "USE_PCRE2_JIT=1" : enable PCRE version 2 in JIT mode
+
+Both of these libraries may be downloaded from https://www.pcre.org/.
+
+By default, the include and library paths are figured from the "pcre-config"
+and "pcre2-config" utilities. If these ones are not installed or inaccurate
+(for example when cross-compiling), it is possible to force the path to include
+files using "PCRE_INC" and "PCRE2_INC" respectively, and the path to library
+files using "PCRE_LIB" and "PCRE2_LIB" respectively. For example :
+
+ $ make TARGET=generic \
+ USE_PCRE2_JIT=1 PCRE2_INC=/opt/cross/include PCRE2_LIB=/opt/cross/lib
+
+
+4.3) Multi-threading
+--------------------
+On some systems for which positive feedback was reported, multi-threading will
+be enabled by default. When multi-threading is used, the libpthread library
+(POSIX threading) will be used. If the target system doesn't contain such a
+library, it is possible to forcefully disable multi-threading by adding
+"USE_THREAD=" on the command line.
+
+
+4.4) Password encryption
+------------------------
+Many systems provide password encryption functions used for authentication. On
+some systems these functions are part of the libc. On others, they're part of a
+separate library called "libcrypt". The default targets are pre-configured
+based on which system needs the library. It is possible to forcefully disable
+the linkage against libcrypt by adding "USE_LIBCRYPT=" on the command line, or
+to forcefully enable it using "USE_LIBCRYPT=1".
+
+
+4.5) Cryptography
+-----------------
+For SSL/TLS, it is necessary to use a cryptography library. HAProxy currently
+supports the OpenSSL library, and is known to build and work with branches
+1.0.0, 1.0.1, 1.0.2, 1.1.0, 1.1.1, 3.0, 3.1 and 3.2. It is recommended to use
+at least OpenSSL 1.1.1 to have support for all SSL keywords and configuration
+in HAProxy. OpenSSL follows a long-term support cycle similar to HAProxy's,
+and each of the branches above receives its own fixes, without forcing you to
+upgrade to another branch. There is no excuse for staying vulnerable by not
+applying a fix available for your version. There is always a small risk of
+regression when jumping from one branch to another one, especially when it's
+very new, so it's preferable to observe for a while if you use a different
+version than your system's defaults. Specifically, it has been well established
+that OpenSSL 3.0 can be 2 to 20 times slower than earlier versions on
+multiprocessor systems due to design issues that cannot be fixed without a
+major redesign, so in this case upgrading should be carefully thought about
+(please see https://github.com/openssl/openssl/issues/20286 and
+https://github.com/openssl/openssl/issues/17627). If a migration to 3.x is
+mandated by support reasons, at least 3.1 recovers a small fraction of this
+important loss.
+
+Four OpenSSL derivatives called LibreSSL, BoringSSL, QUICTLS, and AWS-LC are
+reported to work as well. While there are some efforts from the community to
+ensure they work well, OpenSSL remains the primary target and this means that
+in case of conflicting choices, OpenSSL support will be favored over other
+options. Note that QUIC is not fully supported when haproxy is built with
+OpenSSL. In this case, QUICTLS is the preferred alternative. As of writing
+this, the QuicTLS project follows OpenSSL very closely and provides update
+simultaneously, but being a volunteer-driven project, its long-term future does
+not look certain enough to convince operating systems to package it, so it
+needs to be build locally. See the section about QUIC in this document.
+
+A fifth option is wolfSSL (https://github.com/wolfSSL/wolfssl). It is the only
+supported alternative stack not based on OpenSSL, yet which implements almost
+all of its API and natively supports QUIC. At the time of writing, the vast
+majority of SSL features are well supported by wolfSSL though not everything is
+exposed in haproxy yet, advanced users might notice tiny differences that the
+wolfSSL and HAProxy teams are working on together to address in the wolfSSL
+code base. Features like ecdsa/rsa dual stack, crt-list and client auth might
+not work as expected. As of November 2023, wolfSSL support is considered
+experimental. This stack is not affected by OpenSSL's design issue regarding
+multi-processor systems and is viewed by the HAProxy team as the most promising
+mid-term solution for general deployments and QUIC deployments.
+
+In order to enable SSL/TLS support, simply pass "USE_OPENSSL=1" on the command
+line and the default library present on your system will be used :
+
+ $ make TARGET=generic USE_OPENSSL=1
+
+If you want to use a different version from the one provided by your system
+(which is not recommended due to the risk of missing security fixes), it is
+possible to indicate the path to the SSL include files using SSL_INC, and the
+SSL library files using SSL_LIB. Example :
+
+ $ make TARGET=generic \
+ USE_OPENSSL=1 SSL_INC=/opt/ssl-1.1.1/include SSL_LIB=/opt/ssl-1.1.1/lib
+
+To use HAProxy with WolfSSL, WolfSSL must be built with haproxy support, at
+least WolfSSL 5.6.4 is needed, but a development version might be needed for
+some of the features:
+
+ $ cd ~/build/wolfssl
+ $ ./configure --enable-haproxy --enable-quic --prefix=/opt/wolfssl-5.6.4/
+ $ make -j $(nproc)
+ $ make install
+
+Please also note that wolfSSL supports many platform-specific features that may
+affect performance, and that for production uses it might be a good idea to
+check them using "./configure --help". Please refer to the lib's documentation.
+
+Building HAProxy with wolfSSL requires to specify the API variant on the "make"
+command line, for example:
+
+ $ cd ~/build/haproxy
+ $ make -j $(nproc) TARGET=generic USE_OPENSSL_WOLFSSL=1 USE_QUIC=1 \
+ SSL_INC=/opt/wolfssl-5.6.4/include SSL_LIB=/opt/wolfssl-5.6.4/lib
+
+To use HAProxy with AWS-LC you must have version v1.13.0 or newer of AWS-LC
+built and installed locally.
+ $ cd ~/build/aws-lc
+ $ cmake -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=/opt/aws-lc
+ $ make -j $(nproc)
+ $ make install
+
+Building HAProxy with AWS-LC requires you to enable AWS-LC support, and specify
+the path it was installed to when running make for HAPRoxy.
+
+ $ cd ~/build/haproxy
+ $ make -j $(nproc) TARGET=generic USE_OPENSSL_AWSLC=1 \
+ SSL_INC=/opt/aws-lc/include SSL_LIB=/opt/aws-lc/lib
+
+In order to link OpenSSL statically against HAProxy, first download OpenSSL
+from https://www.openssl.org/ then build it with the "no-shared" keyword and
+install it to a local directory, so your system is not affected :
+
+ $ export STATICLIBSSL=/tmp/staticlibssl
+ $ ./config --prefix=$STATICLIBSSL no-shared
+ $ make && make install_sw
+
+Then when building haproxy, pass that path via SSL_INC and SSL_LIB :
+
+ $ make TARGET=generic \
+ USE_OPENSSL=1 SSL_INC=$STATICLIBSSL/include SSL_LIB=$STATICLIBSSL/lib
+
+When building with OpenSSL on some systems, you may also need to enable support
+for the "libz" library, which is visible if the linker complains about function
+"deflateInit()" not being found. In this case, simply append "ADDLIB=-lz" to
+the command line.
+
+It is worth mentioning that asynchronous cryptography engines are supported on
+OpenSSL 1.1.0 and above. Such engines are used to access hardware cryptography
+acceleration that might be present on your system. Due to API changes that
+appeared with OpenSSL 3.0 and cause lots of build warnings, engines are not
+enabled by default anymore in HAProxy 2.6. It is required to pass USE_ENGINE=1
+if they are desired.
+
+If for any reason you are forced to use OpenSSL 3.x and the performance is not
+acceptable at all, you may want to try replacing the pthread locks that OpenSSL
+uses with HAProxy's much lighter locks that are able to emulate them:
+
+ $ make TARGET=generic \
+ USE_OPENSSL=1 USE_PTHREAD_EMULATION=1
+
+On large multi-processor systems, this may result in a performance increase of
+50 to 100% on OpenSSL 3.0 depending on the level of contention, but this will
+of course not recover everything. It should not be used by distro packagers as
+it is a bit less observable.
+
+
+4.6) Compression
+----------------
+HAProxy can compress HTTP responses before delivering them to clients, in order
+to save network bandwidth. Two compression options are available. The first one
+relies on the libslz library (http://libslz.org) that is embedded in haproxy.
+It is enabled by default as it is very fast and does not keep a copy of the
+contents in memory. It is possible to disable it, for example for very small
+systems, by passing "USE_SLZ=" to the "make" command.
+
+Please note that SLZ will benefit from some CPU-specific instructions like the
+availability of the CRC32 extension on some ARM processors. Thus it can further
+improve its performance to build with "CPU=native" on the target system, or
+"CPU=armv81" (modern systems such as Graviton2 or A55/A75 and beyond),
+"CPU=a72" (e.g. for RPi4, or AWS Graviton), "CPU=a53" (e.g. for RPi3), or
+"CPU=armv8-auto" (automatic detection with minor runtime penalty).
+
+A second option involves the widely known zlib library, which is very likely
+installed on your system. In order to use zlib, simply pass "USE_ZLIB=1" to the
+"make" command line, which will also automatically disable SLZ. If the library
+is not installed in your default system's path, it is possible to specify the
+path to the include files using ZLIB_INC, and the path to the library files
+using ZLIB_LIB :
+
+ $ make TARGET=generic \
+ USE_ZLIB=1 ZLIB_INC=/opt/zlib-1.2.11/include ZLIB_LIB=/opt/zlib-1.2.11/lib
+
+Zlib is commonly found on most systems, otherwise updates can be retrieved from
+http://www.zlib.net/. It is easy and fast to build, and new versions sometimes
+provide better performance so it might be worth using an up-to-date one.
+
+Zlib compresses a bit better than libslz but at the expense of more CPU usage
+(about 3.5 times more minimum), and a huge memory usage (~260 kB per compressed
+stream). The only valid reason for uzing Zlib instead of SLZ here usually is to
+deal with a very limited internet bandwidth while CPU and RAM are abundant so
+that the last few percent of compression ratio are worth the invested hardware.
+
+
+4.7) Lua
+--------
+Lua is an embedded programming language supported by HAProxy to provide more
+advanced scripting capabilities. Only versions 5.3 and above are supported.
+In order to enable Lua support, please specify "USE_LUA=1" on the command line.
+Some systems provide this library under various names to avoid conflicts with
+previous versions. By default, HAProxy looks for "lua5.4", "lua54", "lua5.3",
+"lua53", "lua". If your system uses a different naming, you may need to set the
+library name in the "LUA_LIB_NAME" variable.
+
+If Lua is not provided on your system, it can be very simply built locally. It
+can be downloaded from https://www.lua.org/, extracted and built, for example :
+
+ $ cd /opt/lua-5.4.6
+ $ make linux
+
+The path to the include files and library files may be set using "LUA_INC" and
+"LUA_LIB" respectively. For example :
+
+ $ make TARGET=generic \
+ USE_LUA=1 LUA_INC=/opt/lua-5.4.6/src LUA_LIB=/opt/lua-5.4.6/src
+
+
+4.8) Device detection
+---------------------
+HAProxy supports several device detection modules relying on third party
+products. Some of them may provide free code, others free libs, others free
+evaluation licenses. Please read about their respective details in the
+following files :
+
+ doc/DeviceAtlas-device-detection.txt for DeviceAtlas
+ doc/51Degrees-device-detection.txt for 51Degrees
+ doc/WURFL-device-detection.txt for Scientiamobile WURFL
+
+
+4.9) Miscellaneous
+------------------
+Some systems have specificities. Usually these specificities are known and/or
+detected and properly set for you. If you need to adjust the behaviour, here
+are the extra libraries that may be referenced at build time :
+
+ - USE_RT=1 build with librt, which is sometimes needed on some systems
+ when using threads. It is set by default on Linux platforms,
+ and may be disabled using "USE_RT=" if your system doesn't
+ have one. You may have to set it as well if you face an error
+ indicating that clock_gettime() was not found.
+
+ - USE_DL=1 build with libdl, which is usually needed for Lua and OpenSSL
+ on Linux. It is automatically detected and may be disabled
+ using "USE_DL=", though it should never harm.
+
+ - USE_SYSTEMD=1 enables support for the sdnotify features of systemd,
+ allowing better integration with systemd on Linux systems
+ which come with it. It is never enabled by default so there
+ is no need to disable it.
+
+
+4.10) Common errors
+-------------------
+Some build errors may happen depending on the options combinations or the
+selected target. When facing build errors, if you know that your system is a
+bit special or particularly old, start from TARGET=generic, it is easier to
+start from there and fix the remaining issues than trying to degrade another
+target. Common issues may include:
+
+ - clock_gettime() not found
+ => your system needs USE_RT=1
+
+ - many __sync_<something> errors in many files
+ => your gcc is too old, build without threads.
+
+ - many openssl errors
+ => your OpenSSL version really is too old, do not enable OpenSSL
+
+ - quic_conn-t.h: field 'level' has incomplete type
+ => you tried to build QUIC with the legacy OpenSSL library, which does
+ not support QUIC. Either disable QUIC with "USE_QUIC=" or use any
+ other supported compatible library.
+
+ - many "dereferencing pointer 'sa.985' does break strict-aliasing rules"
+ => these warnings happen on old compilers (typically gcc-4.4), and may
+ safely be ignored; newer ones are better on these.
+
+
+4.11) QUIC
+----------
+QUIC is the new transport layer protocol and is required for HTTP/3. This
+protocol stack is currently supported as an experimental feature in haproxy on
+the frontend side. In order to enable it, use "USE_QUIC=1 USE_OPENSSL=1".
+
+Note that QUIC is not fully supported by the OpenSSL library. Indeed QUIC 0-RTT
+cannot be supported by OpenSSL contrary to others libraries with full QUIC
+support. The preferred option is to use QUICTLS. This is a fork of OpenSSL with
+a QUIC-compatible API. Its repository is available at this location:
+
+ https://github.com/quictls/openssl
+
+You can use the following instruction to build a functional QUICTLS.
+
+ $ ./config --libdir=lib [--prefix=/opt/quictls]
+ $ make
+ $ make install
+
+On a development environment, use SSL_INC and SSL_LIB when building haproxy to
+point to the correct cryptographic library. It may be useful to specify QUICTLS
+location via rpath for haproxy execution. Example :
+
+ $ make -j $(nproc) TARGET=generic \
+ USE_QUIC=1 \
+ USE_OPENSSL=1 SSL_INC=/opt/quictls/include SSL_LIB=/opt/quictls/lib \
+ LDFLAGS="-Wl,-rpath,/opt/quictls/lib"
+
+Alternately, building against wolfSSL is supported as well, for example this
+way assuming that wolfSSL was installed in /opt/wolfssl-5.6.0 as shown in 4.5:
+
+ $ make -j $(nproc) TARGET=generic \
+ USE_QUIC=1 \
+ USE_OPENSSL_WOLFSSL=1 \
+ SSL_INC=/opt/wolfssl-5.6.0/include SSL_LIB=/opt/wolfssl-5.6.0/lib
+ LDFLAGS="-Wl,-rpath,/opt/wolfssl-5.6.0/lib"
+
+As last resort, haproxy may be compiled against OpenSSL as follows:
+
+ $ make TARGET=generic USE_OPENSSL=1 USE_QUIC=1 USE_QUIC_OPENSSL_COMPAT=1
+
+Note that QUIC 0-RTT is not supported by haproxy QUIC stack when built against
+OpenSSL. In addition to this compilation requirements, the QUIC listener
+bindings must be explicitly enabled with a specific QUIC tuning parameter.
+(see "limited-quic" global parameter of haproxy Configuration Manual).
+
+
+5) How to build HAProxy
+=======================
+
+This section assumes that you have already read section 2 (basic principles)
+and section 3 (build environment). It often refers to section 4 (dependencies).
+
+To build haproxy, you have to choose your target OS amongst the following ones
+and assign it to the TARGET variable :
+
+ - linux-glibc for Linux kernel 2.6.28 and above
+ - linux-glibc-legacy for Linux kernel 2.6.28 and above without new features
+ - linux-musl for Linux kernel 2.6.28 and above with musl libc
+ - solaris for Solaris 10 and above
+ - freebsd for FreeBSD 10 and above
+ - dragonfly for DragonFlyBSD 4.3 and above
+ - netbsd for NetBSD 8 and above
+ - osx for Mac OS/X
+ - openbsd for OpenBSD 6.3 and above
+ - aix51 for AIX 5.1
+ - aix52 for AIX 5.2
+ - aix72-gcc for AIX 7.2 (using gcc)
+ - cygwin for Cygwin
+ - haiku for Haiku
+ - generic for any other OS or version.
+ - custom to manually adjust every setting
+
+You may also choose your CPU to benefit from some optimizations. This is
+particularly important on UltraSparc machines. For this, you can assign
+one of the following choices to the CPU variable :
+
+ - i686 for intel PentiumPro, Pentium 2 and above, AMD Athlon (32 bits)
+ - i586 for intel Pentium, AMD K6, VIA C3.
+ - ultrasparc : Sun UltraSparc I/II/III/IV processor
+ - power8 : IBM POWER8 processor
+ - power9 : IBM POWER9 processor
+ - armv81 : modern ARM cores (Cortex A55/A75/A76/A78/X1, Neoverse, Graviton2)
+ - a72 : ARM Cortex-A72 or A73 (e.g. RPi4, Odroid N2, AWS Graviton)
+ - a53 : ARM Cortex-A53 or any of its successors in 64-bit mode (e.g. RPi3)
+ - armv8-auto : support both older and newer armv8 cores with a minor penalty,
+ thanks to gcc 10's outline atomics (default with gcc 10.2).
+ - native : use the build machine's specific processor optimizations. Use with
+ extreme care, and never in virtualized environments (known to break).
+ - generic : any other processor or no CPU-specific optimization. (default)
+
+Alternatively, you may just set the CPU_CFLAGS value to the optimal GCC options
+for your platform. A second variable named SMALL_OPTS also supports passing a
+number of defines and compiler options usually for small systems. For better
+clarity it's recommended to pass the options which result in a smaller binary
+(like memory limits or -Os) into this variable.
+
+If you are building for a different system than the one you're building on,
+this is called "cross-compiling". HAProxy supports cross-compilation pretty
+well and tries to ease it by letting you adjust paths to all libraries (please
+read section 4 on dependencies for more details). When cross-compiling, you
+just need to pass the path to your compiler in the "CC" variable, and the path
+to the linker in the "LD" variable. Most of the time, setting the CC variable
+is enough since LD points to it by default.
+
+By default the build process runs in quiet mode and hide the details of the
+commands that are executed. This allows to more easily catch build warnings
+and see what is happening. However it is not convenient at all to observe what
+flags are passed to the compiler nor what compiler is involved. Simply append
+"V=1" to the "make" command line to switch to verbose mode and display the
+details again. It is recommended to use this option when cross-compiling to
+verify that the paths are correct and that /usr/include is never involved.
+
+You may want to build specific target binaries which do not match your native
+compiler's target. This is particularly true on 64-bit systems when you want
+to build a 32-bit binary. Use the ARCH variable for this purpose. Right now
+it only knows about a few x86 variants (i386,i486,i586,i686,x86_64), two
+generic ones (32,64) and sets -m32/-m64 as well as -march=<arch> accordingly.
+This variable is only used to set ARCH_FLAGS to preset values, so if you know
+the arch-specific flags that your system needs, you may prefer to set
+ARCH_FLAGS instead. Note that these flags are passed both to the compiler and
+to the linker. For example, in order to build a 32-bit binary on an x86_64
+Linux system with SSL support without support for compression but when OpenSSL
+requires ZLIB anyway :
+
+ $ make TARGET=linux-glibc ARCH=i386 USE_OPENSSL=1 ADDLIB=-lz
+
+Recent systems can resolve IPv6 host names using getaddrinfo(). This primitive
+is not present in all libcs and does not work in all of them either. Support in
+glibc was broken before 2.3. Some embedded libs may not properly work either,
+thus, support is disabled by default, meaning that some host names which only
+resolve as IPv6 addresses will not resolve and configs might emit an error
+during parsing. If you know that your OS libc has reliable support for
+getaddrinfo(), you can add USE_GETADDRINFO=1 on the make command line to enable
+it. This is the recommended option for most Linux distro packagers since it's
+working fine on all recent mainstream distros. It is automatically enabled on
+Solaris 8 and above, as it's known to work.
+
+If your system supports PCRE (Perl Compatible Regular Expressions), then you
+really should build with libpcre which is between 2 and 10 times faster than
+other libc implementations. Regex are used for header processing (deletion,
+rewriting, allow, deny). Please see section 4 about dependencies to figure
+how to build with PCRE support.
+
+It is possible to add native support for SSL, by passing "USE_OPENSSL=1" on the
+make command line. The libssl and libcrypto will automatically be linked with
+HAProxy. Some systems also require libz, so if the build fails due to missing
+symbols such as deflateInit(), then try again with "ADDLIB=-lz". Please check
+section 4 about dependencies for more information on how to build with OpenSSL.
+
+HAProxy can compress HTTP responses to save bandwidth. Please see section 4
+about dependencies to see the available libraries and associated options.
+
+By default, the DEBUG_CFLAGS variable is set to '-g' to enable debug symbols.
+It is not wise to disable it on uncommon systems, because it's often the only
+way to get a usable core when you need one. Otherwise, you can set DEBUG to
+'-s' to strip the binary.
+
+If the ERR variable is set to any non-empty value, then -Werror will be added
+to the compiler so that any build warning will trigger an error. This is the
+recommended way to build when developing, and it is expected that contributed
+patches were tested with ERR=1.
+
+The DEBUG variable is used to extend the CFLAGS and is preset to a list of
+build-time options that are known for providing significant reliability
+improvements and a barely perceptible performance cost. Unless instructed to do
+so by some project developers, or trying to save the last ounce of performance,
+these options should not be changed. Among the usable ones are:
+ - -DDEBUG_STRICT: enable some runtime assertions at key places in the code.
+ The goal is to emit a warning or stop the program if certain expected
+ conditions are not met, and whose violation will result in a misbehaving
+ process due to memory corruption or other significant trouble, possibly
+ caused by an attempt to exploit a bug in the program or a library it relies
+ on. The option knows 3 values: 0 (disable all such assertions, the default
+ when the option is not set), 1 (enable all inexpensive assertions), and
+ 2 (enable all assertions even in fast paths). Setting the option with no
+ value corresponds to 1, which is the recommended value for production.
+
+ - -DDEBUG_STRICT_ACTION: indicates how to react to a check violation. There
+ are 3 types of checks: BUG (condition that is known to have serious
+ consequences), WARN (warning about a highly suspicious condition which the
+ process may recover from, but whose unknown cause may also have serious
+ consequences), CHECK (verification whether a condition that developers now
+ consider impossible still happens). The variable takes a value from 0 to 3,
+ that adjusts the behavior on these 3 violations:
+
+ BUG WARN CHECK
+ 0 warn warn warn
+ 1 stop warn warn
+ 2 stop stop warn
+ 3 stop stop stop
+
+ The default value is 1, which is the best balance for production in that it
+ will do its best to prevent a known bogus process from running away, but
+ will let it run if it believes it can recover. Users running the process in
+ sensitive environments (finance etc) may prefer to run at level 2 to make
+ sure to stop any detected anomaly before it may have an impact. Level 3
+ should only be used at the request of developers. In any case, any emitted
+ warning should be reported to developers.
+
+ - -DDEBUG_MEMORY_POOLS: this enables by default extra controls around memory
+ allocation that will help detect coding errors such as double-frees and
+ freeing a bad memory location. It will also detect earlier risks of memory
+ overflows, which may have security implications. The cost is extremely low
+ (less than 1% increase in memory footprint). This is equivalent to adding
+ "-dMtag" on the command line. This option is enabled in the default build
+ options.
+
+ - -DDEBUG_DONT_SHARE_POOLS: this will keep separate pools for same-sized
+ objects of different types. Using this increases the memory usage a little
+ bit but further reduces the risk of memory management related bugs and will
+ lead to more accurate traces in case of error. It is equivalent to adding
+ "-dMno-merge" on the command line. It is not enabled in the default build
+ options.
+
+ - -DDEBUG_POOL_INTEGRITY: this will enable runtime detection and stopping of
+ a class of bugs known as "use after free", which consists in modifying a
+ memory area after freeing it while it was reused for something else. This
+ option is quite powerful but such bugs are fortunately extremely rare, and
+ it will cause a measurable performance degradation (a few percent). This is
+ equivalent to adding "-dMcold-first,integrity" on the command line. This
+ option is not enabled by default but users running development versions on
+ moderate performance sites in order to participate to reliability testing
+ are encouraged to use it, in combination with -DDEBUG_DONT_SHARE_POOLS and
+ -DDEBUG_MEMORY_POOLS, as this could catch dangerous regressions.
+
+As such, for regular production, "-DDEBUG_STRICT -DDEBUG_MEMORY_POOLS" is
+recommended. For security sensitive environments, it is recommended to use
+"-DDEBUG_STRICT -DDEBUG_STRICT_ACTION=2 -DDEBUG_MEMORY_POOLS \
+-DDEBUG_DONT_SHARE_POOLS". For deployments dedicated to testing new versions or
+when trying to nail a bug down, use "-DDEBUG_STRICT=2 -DDEBUG_STRICT_ACTION=2 \
+-DDEBUG_MEMORY_POOLS -DDEBUG_DONT_SHARE_POOLS -DDEBUG_POOL_INTEGRITY".
+
+The DEP variable is automatically set to the list of include files and also
+designates a file that contains the last build options used. It is used during
+the build process to compute dependencies and decide whether or not to rebuild
+everything (we do rebuild everything when .h files are touched or when build
+options change). Sometimes when performing fast build iterations on inline
+functions it may be desirable to avoid a full rebuild. Forcing this variable
+to be empty will be sufficient to achieve this. This variable must never be
+forced to produce final binaries, and must not be used during bisect sessions,
+as it will often lead to the wrong commit.
+
+If you need to pass other defines, includes, libraries, etc... then please
+check the Makefile to see which ones will be available in your case, and
+use/override the USE_* variables from the Makefile.
+
+AIX 5.3 is known to work with the generic target. However, for the binary to
+also run on 5.2 or earlier, you need to build with DEFINE="-D_MSGQSUPPORT",
+otherwise __fd_select() will be used while not being present in the libc, but
+this is easily addressed using the "aix52" target. If you get build errors
+because of strange symbols or section mismatches, simply remove -g from
+DEBUG_CFLAGS.
+
+Building on AIX 7.2 works fine using the "aix72-gcc" TARGET. It adds two
+special CFLAGS to prevent the loading of AIX's xmem.h and var.h. This is done
+by defining the corresponding include-guards _H_XMEM and _H_VAR. Without
+excluding those header-files the build fails because of redefinition errors.
+Furthermore, the atomic library is added to the LDFLAGS to allow for
+multithreading via USE_THREAD.
+
+You can easily define your own target with the GNU Makefile. Unknown targets
+are processed with no default option except USE_POLL=default. So you can very
+well use that property to define your own set of options. USE_POLL and USE_SLZ
+can even be disabled by setting them to an empty string. For example :
+
+ $ gmake TARGET=tiny USE_POLL="" USE_SLZ="" TARGET_CFLAGS=-fomit-frame-pointer
+
+If you need to pass some defines to the preprocessor or compiler, you may pass
+them all in the DEFINE variable. Example:
+
+ $ make TARGET=generic DEFINE="-DDEBUG_DONT_SHARE_POOLS -DDEBUG_MEMORY_POOLS"
+
+The ADDINC variable may be used to add some extra include paths; this is
+sometimes needed when cross-compiling. Similarly the ADDLIB variable may be
+used to specify extra paths to library files. Example :
+
+ $ make TARGET=generic ADDINC=-I/opt/cross/include ADDLIB=-L/opt/cross/lib64
+
+
+6) How to install HAProxy
+=========================
+
+To install haproxy, you can either copy the single resulting binary to the
+place you want, or run :
+
+ $ sudo make install
+
+If you're packaging it for another system, you can specify its root directory
+in the usual DESTDIR variable.
+
+-- end
diff --git a/LICENSE b/LICENSE
new file mode 100644
index 0000000..717e303
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,37 @@
+HAPROXY's license - 2006/06/15
+
+Historically, haproxy has been covered by GPL version 2. However, an issue
+appeared in GPL which will prevent external non-GPL code from being built
+using the headers provided with haproxy. My long-term goal is to build a core
+system able to load external modules to support specific application protocols.
+
+Since some protocols are found in rare environments (finance, industry, ...),
+some of them might be accessible only after signing an NDA. Enforcing GPL on
+such modules would only prevent them from ever being implemented, while not
+providing anything useful to ordinary users.
+
+For this reason, I *want* to be able to support binary only external modules
+when needed, with a GPL core and GPL modules for standard protocols, so that
+people fixing bugs don't keep them secretly to try to stay over competition.
+
+The solution was then to apply the LGPL license to the exportable include
+files, while keeping the GPL for all the rest. This way, it still is mandatory
+to redistribute modified code under customer request, but at the same time, it
+is expressly permitted to write, compile, link and load non-GPL code using the
+LGPL header files and not to distribute them if it causes a legal problem.
+
+Of course, users are strongly encouraged to continue the work under GPL as long
+as possible, since this license has allowed useful enhancements, contributions
+and fixes from talented people around the world.
+
+Due to the incompatibility between the GPL and the OpenSSL licence, you must
+apply the GPL/LGPL licence with the following exception:
+This program is released under the GPL with the additional exemption that
+compiling, linking, and/or using OpenSSL is allowed.
+
+The text of the licenses lies in the "doc" directory. All the files provided in
+this package are covered by the GPL unless expressly stated otherwise in them.
+Every patch or contribution provided by external people will by default comply
+with the license of the files it affects, or be rejected.
+
+Willy Tarreau - w@1wt.eu
diff --git a/MAINTAINERS b/MAINTAINERS
new file mode 100644
index 0000000..d4b7980
--- /dev/null
+++ b/MAINTAINERS
@@ -0,0 +1,152 @@
+This file contains a list of people who are responsible for certain parts of
+the HAProxy project and who have authority on them. This means that these
+people have to be consulted before doing any change in the parts they maintain,
+including when fixing bugs. These persons are allowed to reject any change on
+the parts they maintain, and in parallel they try their best to ensure these
+parts work well. Similarly, any change to these parts not being validated by
+them will be rejected.
+
+The best way to deal with such subsystems when sending patches is to send the
+patches to the mailing list and to CC these people. When no maintainer is
+listed for a subsystem, you can simply send your changes the usual way, and it
+is also a sign that if you want to strengthen your skills on certain parts you
+can become yourself a maintainer of the parts you care a lot about.
+
+Please do not ask them to troubleshoot your bugs, it's not their job even though
+they may occasionally help as time permits.
+
+List of maintainers
+-------------------
+
+51Degrees device identification
+Maintainer: Ben Shillito <ben@51degrees.com>
+Files: addons/51degrees, doc/51Degrees-device-detection.txt
+
+Cache
+Maintainer: William Lallemand <wlallemand@haproxy.com>
+Files: src/cache.c, include/haproxy/cache*.h
+
+DeviceAtlas device identification
+Maintainer: David Carlier <dcarlier@deviceatlas.com>
+Files: addons/deviceatlas, doc/DeviceAtlas-device-detection.txt
+
+DNS and Resolvers
+Maintainer: Emeric Brun <ebrun@haproxy.com>
+Maintainer: Baptiste Assmann <bedis9@gmail.com>
+Files: src/dns.c, include/haproxy/dns*.h
+Files: src/resolvers.c, include/haproxy/resolvers*.h
+
+Doc to HTML converter (dconv)
+Maintainer: Cyril Bonté <cyril.bonte@free.fr>
+Files: doc/*.txt
+Note: ask Cyril before changing any doc's format or structure.
+
+EBTree
+Maintainer: Willy Tarreau <w@1wt.eu>
+Files: src/eb*.c, include/import/eb*.h
+
+FCGI:
+Maintainer: Christopher Faulet <cfaulet@haproxy.com>
+Files: src/mux_fcgi.c, src/fcgi*.c, include/haproxy/fcgi*.h
+
+Filters:
+Maintainer: Christopher Faulet <cfaulet@haproxy.com>
+Files: src/filters.c, src/flt_*.c, include/haproxy/filters*.h
+Files: doc/internals/filters.txt
+
+H1 and HTX
+Maintainer: Christopher Faulet <cfaulet@haproxy.com>
+Files: src/mux_h1.c src/h1_htx.c, include/haproxy/h1_htx.h
+
+H2 and HPACK
+Maintainer: Willy Tarreau <w@1wt.eu>
+Files: src/mux_h2.c src/h2.c src/hpack*.c
+Files: include/haproxy/h2.h, include/haproxy/hpack*.h
+
+Health checks
+Maintainer: Christopher Faulet <cfaulet@haproxy.com>
+Files: src/checks.c, include/haproxy/check*.h
+Files: src/tcpcheck.c, include/haproxy/tcpcheck*.h
+Note: health checks are fragile and have been broken many times, so please
+ consult the relevant maintainers if you want to change these specific
+ parts.
+
+HTTP
+Maintainer: Willy Tarreau <w@1wt.eu>
+Maintainer: Christopher Faulet <cfaulet@haproxy.com>
+Files: src/http*.h, include/haproxy/http*.h
+
+HTX
+Maintainer: Christopher Faulet <cfaulet@haproxy.com>
+Files: src/htx.c, include/haproxy/htx*.c, doc/internals/htx-api.txt
+
+Lua
+Maintainer: Thierry Fournier <tfournier@arpalert.org>
+Files: src/hlua.c, include/haproxy/hlua*.h
+
+Mailers
+Maintainer: Simon Horman <horms@verge.net.au>
+Files: src/mailers.c, include/haproxy/mailers*.h
+
+Maps and pattern matching
+Maintainer: Thierry Fournier <tfournier@arpalert.org>
+Files: src/map.c, include/haproxy/map*.h
+Files: src/pattern.c, include/haproxy/pattern*.h
+
+Master-worker
+Maintainer: William Lallemand <wlallemand@haproxy.com>
+Note: Please CC William when touching src/haproxy.c and admin/systemd
+
+Multi-threading
+Maintainer: Christopher Faulet <cfaulet@haproxy.com>
+Maintainer: Emeric Brun <ebrun@haproxy.com>
+Files: src/thread.c, include/haproxy/thread*.h
+Note: every change around the locking or synchronization point will require
+ approval from one of these maintainers. Problems which only appear when
+ nbthread is greater than 1 and which disappear otherwise are also
+ relevant.
+
+Multi-threaded task scheduler
+Maintainer: Willy Tarreau <w@1wt.eu>
+Maintainer: Olivier Houchard <cognet@ci0.org>
+Files: include/haproxy/task*.h src/task.c
+
+Peers
+Maintainer: Emeric Brun <ebrun@haproxy.com>
+Frédéric Lécaille <flecaille@haproxy.com>
+Files: src/peers.c, include/haproxy/peers*.h
+
+Prometheus Exporter
+Maintainer: Christopher Faulet <cfaulet@haproxy.com>
+Maintainer: William Dauchy <wdauchy@gmail.com>
+Files: addons/promex
+Note: William is the referent regarding Prometheus. He should be consulted for
+ all additions and modifications of features. Christopher is the referent
+ for the code itself. He should be consulted for questions regarding the
+ exporter integration into HAProxy, as well as for the bugs.
+
+QUIC and HTTP/3
+Maintainer: Frédéric Lécaille <flecaille@haproxy.com>
+Maintainer: Amaury Denoyelle <adenoyelle@haproxy.com>
+Files: src/quic*.c, src/cfgparse-quic.c, include/haproxy/quic*.h
+Files: src/mux_quic.c, include/haproxy/mux_quic.h,
+Files: src/proto_quic.c, include/haproxy/proto_quic.h
+Files: src/xprt_quic.c, include/haproxy/xprt_quic.h
+Files: src/h3*.c, include/haproxy/h3*.h
+
+ScientiaMobile WURFL Device Detection
+Maintainer: Paul Borile, Massimiliano Bellomi <wurfl-haproxy-support@scientiamobile.com>
+Files: addons/wurfl, doc/WURFL-device-detection.txt
+
+SPOE
+Maintainer: Christopher Faulet <cfaulet@haproxy.com>
+Files: src/flt_spoe.c, include/haproxy/spoe*.h, doc/SPOE.txt
+
+SSL
+Maintainer: Emeric Brun <ebrun@haproxy.com>
+Maintainer: William Lallemand <wlallemand@haproxy.com>
+Files: src/cfgparse-ssl.c, src/ssl_*.c, include/haproxy/ssl_*.h
+
+Thread-safe lists
+Maintainer: Olivier Houchard <cognet@ci0.org>
+Files: include/haproxy/list*.h
diff --git a/Makefile b/Makefile
new file mode 100644
index 0000000..7b546a5
--- /dev/null
+++ b/Makefile
@@ -0,0 +1,1265 @@
+# This GNU Makefile supports different OS and CPU combinations.
+#
+# You should use it this way :
+# [g]make TARGET=os [ARCH=arch] [CPU=cpu] USE_xxx=1 ...
+#
+# When in doubt, invoke help, possibly with a known target :
+# [g]make help
+# [g]make help TARGET=linux-glibc
+#
+# By default the detailed commands are hidden for a cleaner output, but you may
+# see them by appending "V=1" to the make command.
+#
+# Valid USE_* options are enumerated in the "use_opts" variable and are listed
+# below. Most of them are automatically set by the TARGET, others have to be
+# explicitly specified :
+# USE_EPOLL : enable epoll() on Linux 2.6. Automatic.
+# USE_KQUEUE : enable kqueue() on BSD. Automatic.
+# USE_EVPORTS : enable event ports on SunOS systems. Automatic.
+# USE_NETFILTER : enable netfilter on Linux. Automatic.
+# USE_PCRE : enable use of libpcre for regex.
+# USE_PCRE_JIT : enable JIT for faster regex on libpcre >= 8.32
+# USE_PCRE2 : enable use of libpcre2 for regex. Recommended.
+# USE_PCRE2_JIT : enable JIT for faster regex on libpcre2
+# USE_POLL : enable poll(). Automatic.
+# USE_THREAD : enable threads support.
+# USE_STATIC_PCRE : enable static libpcre.
+# USE_STATIC_PCRE2 : enable static libpcre2. Recommended.
+# USE_TPROXY : enable transparent proxy. Automatic.
+# USE_LINUX_TPROXY : enable full transparent proxy. Automatic.
+# USE_LINUX_SPLICE : enable kernel 2.6 splicing. Automatic.
+# USE_LINUX_CAP : enable Linux capabilities.
+# USE_LIBCRYPT : enable encrypted passwords using -lcrypt
+# USE_CRYPT_H : set it if your system requires including crypt.h
+# USE_GETADDRINFO : use getaddrinfo() to resolve IPv6 host names.
+# USE_OPENSSL : enable use of OpenSSL. Recommended, but see below.
+# USE_OPENSSL_AWSLC : enable use of AWS-LC
+# USE_OPENSSL_WOLFSSL : enable use of wolfSSL with the OpenSSL API
+# USE_QUIC : enable use of QUIC with the quictls API (quictls, libressl, boringssl)
+# USE_QUIC_OPENSSL_COMPAT : enable use of QUIC with the standard openssl API (limited features)
+# USE_ENGINE : enable use of OpenSSL Engine.
+# USE_LUA : enable Lua support.
+# USE_ACCEPT4 : enable use of accept4() on linux. Automatic.
+# USE_CLOSEFROM : enable use of closefrom() on *bsd, solaris. Automatic.
+# USE_PRCTL : enable use of prctl(). Automatic.
+# USE_PROCCTL : enable use of procctl(). Automatic.
+# USE_ZLIB : enable zlib library support and disable SLZ
+# USE_SLZ : enable slz library instead of zlib (default=enabled)
+# USE_CPU_AFFINITY : enable pinning processes to CPU on Linux. Automatic.
+# USE_TFO : enable TCP fast open. Supported on Linux >= 3.7.
+# USE_NS : enable network namespace support. Supported on Linux >= 2.6.24.
+# USE_DL : enable it if your system requires -ldl. Automatic on Linux.
+# USE_MATH : enable use of -lm. Automatic.
+# USE_RT : enable it if your system requires -lrt. Automatic on Linux.
+# USE_BACKTRACE : enable backtrace(). Automatic on Linux.
+# USE_PROMEX : enable the Prometheus exporter
+# USE_DEVICEATLAS : enable DeviceAtlas api.
+# USE_51DEGREES : enable third party device detection library from 51Degrees
+# USE_WURFL : enable WURFL detection library from Scientiamobile
+# USE_SYSTEMD : enable sd_notify() support.
+# USE_OBSOLETE_LINKER : use when the linker fails to emit __start_init/__stop_init
+# USE_THREAD_DUMP : use the more advanced thread state dump system. Automatic.
+# USE_OT : enable the OpenTracing filter
+# USE_MEMORY_PROFILING : enable the memory profiler. Linux-glibc only.
+# USE_LIBATOMIC : force to link with/without libatomic. Automatic.
+# USE_PTHREAD_EMULATION : replace pthread's rwlocks with ours
+# USE_SHM_OPEN : use shm_open() for the startup-logs
+#
+# Options can be forced by specifying "USE_xxx=1" or can be disabled by using
+# "USE_xxx=" (empty string). The list of enabled and disabled options for a
+# given TARGET is enumerated at the end of "make help". Most of these options
+# support specific xxx_CFLAGS and/or xxx_LDFLAGS that can be individually
+# forced. The currently active ones for a given set of options are listed in
+# "make opts USE_xxx=1 ...".
+#
+# Variables useful for packagers :
+# CC is set to "cc" by default and is used for compilation only.
+# LD is set to "cc" by default and is used for linking only.
+# ARCH may be useful to force build of 32-bit binary on 64-bit systems
+# CFLAGS is automatically set for the specified CPU and may be overridden.
+# LDFLAGS is automatically set to -g and may be overridden.
+# DEP may be cleared to ignore changes to include files during development
+# SMALL_OPTS may be used to specify some options to shrink memory usage.
+# DEBUG may be used to set some internal debugging options.
+# ERR may be set to non-empty to pass -Werror to the compiler
+# ADDINC may be used to complete the include path in the form -Ipath.
+# ADDLIB may be used to complete the library list in the form -Lpath -llib.
+# DEFINE may be used to specify any additional define, which will be reported
+# by "haproxy -vv" in CFLAGS.
+# SILENT_DEFINE may be used to specify other defines which will not be
+# reported by "haproxy -vv".
+# EXTRA is used to force building or not building some extra tools.
+# DESTDIR is not set by default and is used for installation only.
+# It might be useful to set DESTDIR if you want to install haproxy
+# in a sandbox.
+# INSTALL is set to "install" by default and is used to provide the name of
+# the install binary used by the install targets and any additional
+# flags.
+# PREFIX is set to "/usr/local" by default and is used for installation only.
+# SBINDIR is set to "$(PREFIX)/sbin" by default and is used for installation
+# only.
+# MANDIR is set to "$(PREFIX)/share/man" by default and is used for
+# installation only.
+# DOCDIR is set to "$(PREFIX)/doc/haproxy" by default and is used for
+# installation only.
+# HLUA_PREPEND_PATH may be used to prepend a folder to Lua's default package.path.
+# HLUA_PREPEND_CPATH may be used to prepend a folder to Lua's default package.cpath.
+#
+# Other variables :
+# PCRE_CONFIG : force the binary path to get pcre config (by default
+# pcre-config)
+# PCREDIR : force the path to libpcre.
+# PCRE_LIB : force the lib path to libpcre (defaults to $PCREDIR/lib).
+# PCRE_INC : force the include path to libpcre ($PCREDIR/inc)
+# PCRE2_CONFIG : force the binary path to get pcre2 config (by default
+# pcre2-config)
+# SSL_LIB : force the lib path to libssl/libcrypto
+# SSL_INC : force the include path to libssl/libcrypto
+# LUA_LIB : force the lib path to lua
+# LUA_INC : force the include path to lua
+# LUA_LIB_NAME : force the lib name (or automatically evaluated, by order of
+# priority : lua5.4, lua54, lua5.3, lua53, lua).
+# OT_DEBUG : compile the OpenTracing filter in debug mode
+# OT_INC : force the include path to libopentracing-c-wrapper
+# OT_LIB : force the lib path to libopentracing-c-wrapper
+# OT_RUNPATH : add RUNPATH for libopentracing-c-wrapper to haproxy executable
+# OT_USE_VARS : allows the use of variables for the OpenTracing context
+# IGNOREGIT : ignore GIT commit versions if set.
+# VERSION : force haproxy version reporting.
+# SUBVERS : add a sub-version (eg: platform, model, ...).
+# EXTRAVERSION : local version string to append (e.g. build number etc)
+# VERDATE : force haproxy's release date.
+# VTEST_PROGRAM : location of the vtest program to run reg-tests.
+# DEBUG_USE_ABORT: use abort() for program termination, see include/haproxy/bug.h for details
+
+include include/make/verbose.mk
+include include/make/compiler.mk
+include include/make/options.mk
+
+#### Installation options.
+DESTDIR =
+INSTALL = install
+PREFIX = /usr/local
+SBINDIR = $(PREFIX)/sbin
+MANDIR = $(PREFIX)/share/man
+DOCDIR = $(PREFIX)/doc/haproxy
+
+#### TARGET system
+# Use TARGET=<target_name> to optimize for a specific target OS among the
+# following list (use the default "generic" if uncertain) :
+# linux-glibc, linux-glibc-legacy, linux-musl, solaris, freebsd, freebsd-glibc,
+# dragonfly, openbsd, netbsd, cygwin, haiku, aix51, aix52, aix72-gcc, osx, generic,
+# custom
+TARGET =
+
+#### TARGET CPU
+# Use CPU=<cpu_name> to optimize for a particular CPU, among the following
+# list :
+# generic, native, i586, i686, ultrasparc, power8, power9, custom,
+# a53, a72, armv81, armv8-auto
+CPU = generic
+
+#### Architecture, used when not building for native architecture
+# Use ARCH=<arch_name> to force build for a specific architecture. Known
+# architectures will lead to "-m32" or "-m64" being added to CFLAGS and
+# LDFLAGS. This can be required to build 32-bit binaries on 64-bit targets.
+# Currently, only 32, 64, x86_64, i386, i486, i586 and i686 are understood.
+ARCH =
+
+#### Toolchain options.
+CC = cc
+LD = $(CC)
+
+#### Debug flags (typically "-g").
+# Those flags only feed CFLAGS so it is not mandatory to use this form.
+DEBUG_CFLAGS = -g
+
+#### Add -Werror when set to non-empty
+ERR =
+
+#### May be used to force running a specific set of reg-tests
+REG_TEST_FILES =
+REG_TEST_SCRIPT=./scripts/run-regtests.sh
+
+#### Compiler-specific flags that may be used to disable some negative over-
+# optimization or to silence some warnings.
+# We rely on signed integer wraparound on overflow, however clang think it
+# can do whatever it wants since it's an undefined behavior, so use -fwrapv
+# to be sure we get the intended behavior.
+WARN_CFLAGS := -Wtype-limits -Wshift-negative-value -Wshift-overflow=2 \
+ -Wduplicated-cond -Wnull-dereference
+SPEC_CFLAGS := -Wall -Wextra -Wundef -Wdeclaration-after-statement -Wfatal-errors
+SPEC_CFLAGS += $(call cc-all-fast,$(WARN_CFLAGS))
+
+SPEC_CFLAGS += $(call cc-opt-alt,-fwrapv,-fno-strict-overflow)
+SPEC_CFLAGS += $(cc-wnouwo)
+SPEC_CFLAGS += $(call cc-nowarn,address-of-packed-member)
+SPEC_CFLAGS += $(call cc-nowarn,unused-label)
+SPEC_CFLAGS += $(call cc-nowarn,sign-compare)
+SPEC_CFLAGS += $(call cc-nowarn,unused-parameter)
+SPEC_CFLAGS += $(call cc-nowarn,clobbered)
+SPEC_CFLAGS += $(call cc-nowarn,missing-field-initializers)
+SPEC_CFLAGS += $(call cc-nowarn,cast-function-type)
+SPEC_CFLAGS += $(call cc-nowarn,string-plus-int)
+SPEC_CFLAGS += $(call cc-nowarn,atomic-alignment)
+
+ifneq ($(ERR),)
+ SPEC_CFLAGS += -Werror
+endif
+
+#### Memory usage tuning
+# If small memory footprint is required, you can reduce the buffer size. There
+# are 2 buffers per concurrent session, so 16 kB buffers will eat 32 MB memory
+# with 1000 concurrent sessions. Putting it slightly lower than a page size
+# will prevent the additional parameters to go beyond a page. 8030 bytes is
+# exactly 5.5 TCP segments of 1460 bytes and is generally good. Useful tuning
+# macros include :
+# SYSTEM_MAXCONN, BUFSIZE, MAXREWRITE, REQURI_LEN, CAPTURE_LEN.
+# Example: SMALL_OPTS = -DBUFSIZE=8030 -DMAXREWRITE=1030 -DSYSTEM_MAXCONN=1024
+SMALL_OPTS =
+
+#### Debug settings
+# You can enable debugging on specific code parts by setting DEBUG=-DDEBUG_xxx.
+# Use quotes and spaces if multiple options are needed (the DEBUG variables is
+# passed as-is to CFLAGS). Please check sources for their exact meaning or do
+# not use them at all. Some even more obscure ones might also be available
+# without appearing here. Currently defined DEBUG macros include DEBUG_FULL,
+# DEBUG_MEM_STATS, DEBUG_DONT_SHARE_POOLS, DEBUG_FD, DEBUG_POOL_INTEGRITY,
+# DEBUG_NO_POOLS, DEBUG_FAIL_ALLOC, DEBUG_STRICT_ACTION=[0-3], DEBUG_HPACK,
+# DEBUG_AUTH, DEBUG_SPOE, DEBUG_UAF, DEBUG_THREAD, DEBUG_STRICT, DEBUG_DEV,
+# DEBUG_TASK, DEBUG_MEMORY_POOLS, DEBUG_POOL_TRACING, DEBUG_QPACK, DEBUG_LIST.
+DEBUG = -DDEBUG_STRICT -DDEBUG_MEMORY_POOLS
+
+#### Trace options
+# Use TRACE=1 to trace function calls to file "trace.out" or to stderr if not
+# possible.
+TRACE =
+
+#### Additional include and library dirs
+# Redefine this if you want to add some special PATH to include/libs
+ADDINC =
+ADDLIB =
+
+#### Specific macro definitions
+# Use DEFINE=-Dxxx to set any tunable macro. Anything declared here will appear
+# in the build options reported by "haproxy -vv". Use SILENT_DEFINE if you do
+# not want to pollute the report with complex defines.
+# The following settings might be of interest when SSL is enabled :
+# LISTEN_DEFAULT_CIPHERS is a cipher suite string used to set the default SSL
+# ciphers on "bind" lines instead of using OpenSSL's defaults.
+# CONNECT_DEFAULT_CIPHERS is a cipher suite string used to set the default
+# SSL ciphers on "server" lines instead of using OpenSSL's defaults.
+DEFINE =
+SILENT_DEFINE =
+
+#### extra programs to build
+# Force this to enable building extra programs or to disable them.
+# It's automatically appended depending on the targets.
+EXTRA =
+
+#### CPU dependent optimizations
+# Some CFLAGS are set by default depending on the target CPU. Those flags only
+# feed CPU_CFLAGS, which in turn feed CFLAGS, so it is not mandatory to use
+# them. You should not have to change these options. Better use CPU_CFLAGS or
+# even CFLAGS instead.
+CPU_CFLAGS.generic = -O2
+CPU_CFLAGS.native = -O2 -march=native
+CPU_CFLAGS.i586 = -O2 -march=i586
+CPU_CFLAGS.i686 = -O2 -march=i686
+CPU_CFLAGS.ultrasparc = -O6 -mcpu=v9 -mtune=ultrasparc
+CPU_CFLAGS.power8 = -O2 -mcpu=power8 -mtune=power8
+CPU_CFLAGS.power9 = -O2 -mcpu=power9 -mtune=power9
+CPU_CFLAGS.a53 = -O2 -mcpu=cortex-a53
+CPU_CFLAGS.a72 = -O2 -mcpu=cortex-a72
+CPU_CFLAGS.armv81 = -O2 -march=armv8.1-a
+CPU_CFLAGS.armv8-auto = -O2 -march=armv8-a+crc -moutline-atomics
+CPU_CFLAGS = $(CPU_CFLAGS.$(CPU))
+
+#### ARCH dependent flags, may be overridden by CPU flags
+ARCH_FLAGS.32 = -m32
+ARCH_FLAGS.64 = -m64
+ARCH_FLAGS.i386 = -m32 -march=i386
+ARCH_FLAGS.i486 = -m32 -march=i486
+ARCH_FLAGS.i586 = -m32 -march=i586
+ARCH_FLAGS.i686 = -m32 -march=i686
+ARCH_FLAGS.x86_64 = -m64 -march=x86-64
+ARCH_FLAGS = $(ARCH_FLAGS.$(ARCH))
+
+#### Common CFLAGS
+# These CFLAGS contain general optimization options, CPU-specific optimizations
+# and debug flags. They may be overridden by some distributions which prefer to
+# set all of them at once instead of playing with the CPU and DEBUG variables.
+CFLAGS = $(ARCH_FLAGS) $(CPU_CFLAGS) $(DEBUG_CFLAGS) $(SPEC_CFLAGS)
+
+#### Common LDFLAGS
+# These LDFLAGS are used as the first "ld" options, regardless of any library
+# path or any other option. They may be changed to add any linker-specific
+# option at the beginning of the ld command line.
+LDFLAGS = $(ARCH_FLAGS) -g
+
+#### list of all "USE_*" options. These ones must be updated if new options are
+# added, so that the relevant options are properly added to the CFLAGS and to
+# the reported build options.
+#
+# Relevant *_CFLAGS/*_LDFLAGS will be concatenated in the order defined here.
+# Note that PCRE last position is advisable as it relies on pcre configuration
+# detection tool which may generate default include/lib paths overriding more
+# specific entries if present before them.
+use_opts = USE_EPOLL USE_KQUEUE USE_NETFILTER USE_POLL \
+ USE_THREAD USE_PTHREAD_EMULATION USE_BACKTRACE \
+ USE_TPROXY USE_LINUX_TPROXY USE_LINUX_CAP \
+ USE_LINUX_SPLICE USE_LIBCRYPT USE_CRYPT_H USE_ENGINE \
+ USE_GETADDRINFO USE_OPENSSL USE_OPENSSL_WOLFSSL USE_OPENSSL_AWSLC \
+ USE_SSL USE_LUA USE_ACCEPT4 USE_CLOSEFROM USE_ZLIB USE_SLZ \
+ USE_CPU_AFFINITY USE_TFO USE_NS USE_DL USE_RT USE_LIBATOMIC \
+ USE_MATH USE_DEVICEATLAS USE_51DEGREES \
+ USE_WURFL USE_SYSTEMD USE_OBSOLETE_LINKER USE_PRCTL USE_PROCCTL \
+ USE_THREAD_DUMP USE_EVPORTS USE_OT USE_QUIC USE_PROMEX \
+ USE_MEMORY_PROFILING USE_SHM_OPEN \
+ USE_STATIC_PCRE USE_STATIC_PCRE2 \
+ USE_PCRE USE_PCRE_JIT USE_PCRE2 USE_PCRE2_JIT USE_QUIC_OPENSSL_COMPAT
+
+# preset all variables for all supported build options among use_opts
+$(reset_opts_vars)
+
+#### Target system options
+
+# poll() is always supported, unless explicitly disabled by passing USE_POLL=""
+# on the make command line.
+USE_POLL = default
+
+# SLZ is always supported unless explicitly disabled by passing USE_SLZ=""
+# or disabled by enabling ZLIB using USE_ZLIB=1
+ifeq ($(USE_ZLIB),)
+ USE_SLZ = default
+endif
+
+# generic system target has nothing specific
+ifeq ($(TARGET),generic)
+ set_target_defaults = $(call default_opts,USE_POLL USE_TPROXY)
+endif
+
+# Haiku
+ifeq ($(TARGET),haiku)
+ TARGET_LDFLAGS = -lnetwork
+ set_target_defaults = $(call default_opts,USE_POLL USE_TPROXY USE_OBSOLETE_LINKER)
+endif
+
+# For linux >= 2.6.28 and glibc
+ifeq ($(TARGET),linux-glibc)
+ set_target_defaults = $(call default_opts, \
+ USE_POLL USE_TPROXY USE_LIBCRYPT USE_DL USE_RT USE_CRYPT_H USE_NETFILTER \
+ USE_CPU_AFFINITY USE_THREAD USE_EPOLL USE_LINUX_TPROXY USE_LINUX_CAP \
+ USE_ACCEPT4 USE_LINUX_SPLICE USE_PRCTL USE_THREAD_DUMP USE_NS USE_TFO \
+ USE_GETADDRINFO USE_BACKTRACE USE_SHM_OPEN)
+ INSTALL = install -v
+endif
+
+# For linux >= 2.6.28, glibc without new features
+ifeq ($(TARGET),linux-glibc-legacy)
+ set_target_defaults = $(call default_opts, \
+ USE_POLL USE_TPROXY USE_LIBCRYPT USE_DL USE_RT USE_CRYPT_H USE_NETFILTER \
+ USE_CPU_AFFINITY USE_THREAD USE_EPOLL USE_LINUX_TPROXY USE_LINUX_CAP \
+ USE_ACCEPT4 USE_LINUX_SPLICE USE_PRCTL USE_THREAD_DUMP USE_GETADDRINFO)
+ INSTALL = install -v
+endif
+
+# For linux >= 2.6.28 and musl
+ifeq ($(TARGET),linux-musl)
+ set_target_defaults = $(call default_opts, \
+ USE_POLL USE_TPROXY USE_LIBCRYPT USE_DL USE_RT USE_CRYPT_H USE_NETFILTER \
+ USE_CPU_AFFINITY USE_THREAD USE_EPOLL USE_LINUX_TPROXY USE_LINUX_CAP \
+ USE_ACCEPT4 USE_LINUX_SPLICE USE_PRCTL USE_THREAD_DUMP USE_NS USE_TFO \
+ USE_GETADDRINFO USE_SHM_OPEN)
+ INSTALL = install -v
+endif
+
+# Solaris 10 and above
+ifeq ($(TARGET),solaris)
+ set_target_defaults = $(call default_opts, \
+ USE_POLL USE_TPROXY USE_LIBCRYPT USE_CRYPT_H USE_GETADDRINFO USE_THREAD \
+ USE_RT USE_OBSOLETE_LINKER USE_EVPORTS USE_CLOSEFROM)
+ TARGET_CFLAGS = -DFD_SETSIZE=65536 -D_REENTRANT -D_XOPEN_SOURCE=600 -D__EXTENSIONS__
+ TARGET_LDFLAGS = -lnsl -lsocket
+endif
+
+# FreeBSD 10 and above
+ifeq ($(TARGET),freebsd)
+ set_target_defaults = $(call default_opts, \
+ USE_POLL USE_TPROXY USE_LIBCRYPT USE_THREAD USE_CPU_AFFINITY USE_KQUEUE \
+ USE_ACCEPT4 USE_CLOSEFROM USE_GETADDRINFO USE_PROCCTL USE_SHM_OPEN)
+endif
+
+# kFreeBSD glibc
+ifeq ($(TARGET),freebsd-glibc)
+ set_target_defaults = $(call default_opts, \
+ USE_POLL USE_TPROXY USE_LIBCRYPT USE_THREAD USE_CPU_AFFINITY USE_KQUEUE \
+ USE_ACCEPT4 USE_GETADDRINFO USE_CRYPT_H USE_DL)
+endif
+
+# DragonFlyBSD 4.3 and above
+ifeq ($(TARGET),dragonfly)
+ set_target_defaults = $(call default_opts, \
+ USE_POLL USE_TPROXY USE_LIBCRYPT USE_THREAD USE_CPU_AFFINITY USE_KQUEUE \
+ USE_ACCEPT4 USE_CLOSEFROM USE_GETADDRINFO)
+endif
+
+# Mac OS/X
+ifeq ($(TARGET),osx)
+ set_target_defaults = $(call default_opts, \
+ USE_POLL USE_TPROXY USE_LIBCRYPT USE_THREAD USE_CPU_AFFINITY USE_KQUEUE \
+ USE_GETADDRINFO)
+ EXPORT_SYMBOL = -export_dynamic
+endif
+
+# OpenBSD 6.3 and above
+ifeq ($(TARGET),openbsd)
+ set_target_defaults = $(call default_opts, \
+ USE_POLL USE_TPROXY USE_LIBCRYPT USE_THREAD USE_KQUEUE USE_ACCEPT4 \
+ USE_CLOSEFROM USE_GETADDRINFO)
+endif
+
+# NetBSD 8 and above
+ifeq ($(TARGET),netbsd)
+ set_target_defaults = $(call default_opts, \
+ USE_POLL USE_TPROXY USE_LIBCRYPT USE_THREAD USE_KQUEUE USE_ACCEPT4 \
+ USE_CLOSEFROM USE_GETADDRINFO)
+endif
+
+# AIX 5.1 only
+ifeq ($(TARGET),aix51)
+ set_target_defaults = $(call default_opts, \
+ USE_POLL USE_LIBCRYPT USE_OBSOLETE_LINKER)
+ TARGET_CFLAGS = -Dss_family=__ss_family -Dip6_hdr=ip6hdr -DSTEVENS_API -D_LINUX_SOURCE_COMPAT -Dunsetenv=my_unsetenv
+ DEBUG_CFLAGS =
+endif
+
+# AIX 5.2
+ifeq ($(TARGET),aix52)
+ set_target_defaults = $(call default_opts, \
+ USE_POLL USE_LIBCRYPT USE_OBSOLETE_LINKER)
+ TARGET_CFLAGS = -D_MSGQSUPPORT
+ DEBUG_CFLAGS =
+endif
+
+# AIX 7.2 and above
+ifeq ($(TARGET),aix72-gcc)
+ set_target_defaults = $(call default_opts, \
+ USE_POLL USE_THREAD USE_LIBCRYPT USE_OBSOLETE_LINKER USE_GETADDRINFO)
+ TARGET_CFLAGS = -D_H_XMEM -D_H_VAR
+ USE_LIBATOMIC = implicit
+endif
+
+# Cygwin
+ifeq ($(TARGET),cygwin)
+ set_target_defaults = $(call default_opts, \
+ USE_POLL USE_TPROXY USE_OBSOLETE_LINKER)
+ # Cygwin adds IPv6 support only in version 1.7 (in beta right now).
+ TARGET_CFLAGS = $(if $(filter 1.5.%, $(shell uname -r)), -DUSE_IPV6 -DAF_INET6=23 -DINET6_ADDRSTRLEN=46, )
+endif
+
+# set the default settings according to the target above
+$(set_target_defaults)
+
+# Some architectures require to link with libatomic for atomics of certain
+# sizes. These ones are reported as value 1 in the *_LOCK_FREE macros. Value
+# 2 indicates that the builtin is native thus doesn't require libatomic. Hence
+# any occurrence of 1 indicates libatomic is necessary. It's better to avoid
+# linking with it by default as it's not always available nor deployed
+# (especially on archs which do not need it).
+ifneq ($(USE_THREAD),)
+ ifneq ($(shell $(CC) $(CFLAGS) -dM -E -xc - </dev/null 2>/dev/null | grep -c 'LOCK_FREE.*1'),0)
+ USE_LIBATOMIC = implicit
+ endif
+endif
+
+#### Determine version, sub-version and release date.
+# If GIT is found, and IGNOREGIT is not set, VERSION, SUBVERS and VERDATE are
+# extracted from the last commit. Otherwise, use the contents of the files
+# holding the same names in the current directory.
+
+ifeq ($(IGNOREGIT),)
+ VERSION := $(shell [ -d .git/. ] && (git describe --tags --match 'v*' --abbrev=0 | cut -c 2-) 2>/dev/null)
+ ifneq ($(VERSION),)
+ # OK git is there and works.
+ SUBVERS := $(shell comms=`git log --format=oneline --no-merges v$(VERSION).. 2>/dev/null | wc -l | tr -d '[:space:]'`; commit=`(git log -1 --pretty=%h --abbrev=6) 2>/dev/null`; [ $$comms -gt 0 ] && echo "-$$commit-$$comms")
+ VERDATE := $(shell git log -1 --pretty=format:%ci | cut -f1 -d' ' | tr '-' '/')
+ endif
+endif
+
+# Last commit version not found, take it from the files.
+ifeq ($(VERSION),)
+ VERSION := $(shell cat VERSION 2>/dev/null || touch VERSION)
+endif
+ifeq ($(SUBVERS),)
+ SUBVERS := $(shell (grep -v '\$$Format' SUBVERS 2>/dev/null || touch SUBVERS) | head -n 1)
+endif
+ifeq ($(VERDATE),)
+ VERDATE := $(shell (grep -v '^\$$Format' VERDATE 2>/dev/null || touch VERDATE) | head -n 1 | cut -f1 -d' ' | tr '-' '/')
+endif
+
+# this one is always empty by default and appended verbatim
+EXTRAVERSION =
+
+#### Build options
+# Do not change these ones, enable USE_* variables instead.
+OPTIONS_CFLAGS =
+OPTIONS_LDFLAGS =
+OPTIONS_OBJS =
+
+#### Extra objects to be built and integrated (used only for development)
+EXTRA_OBJS =
+
+# This variable collects all USE_* values except those set to "implicit". This
+# is used to report a list of all flags which were used to build this version.
+# Do not assign anything to it.
+BUILD_OPTIONS := $(call build_options)
+
+# All USE_* options have their equivalent macro defined in the code (some might
+# possibly be unused though)
+OPTIONS_CFLAGS += $(call opts_as_defines)
+
+ifneq ($(USE_LIBCRYPT),)
+ ifneq ($(TARGET),openbsd)
+ ifneq ($(TARGET),osx)
+ LIBCRYPT_LDFLAGS = -lcrypt
+ endif
+ endif
+endif
+
+ifneq ($(USE_ZLIB),)
+ # Use ZLIB_INC and ZLIB_LIB to force path to zlib.h and libz.{a,so} if needed.
+ ZLIB_CFLAGS = $(if $(ZLIB_INC),-I$(ZLIB_INC))
+ ZLIB_LDFLAGS = $(if $(ZLIB_LIB),-L$(ZLIB_LIB)) -lz
+endif
+
+ifneq ($(USE_SLZ),)
+ OPTIONS_OBJS += src/slz.o
+endif
+
+ifneq ($(USE_POLL),)
+ OPTIONS_OBJS += src/ev_poll.o
+endif
+
+ifneq ($(USE_EPOLL),)
+ OPTIONS_OBJS += src/ev_epoll.o
+endif
+
+ifneq ($(USE_KQUEUE),)
+ OPTIONS_OBJS += src/ev_kqueue.o
+endif
+
+ifneq ($(USE_EVPORTS),)
+ OPTIONS_OBJS += src/ev_evports.o
+endif
+
+ifneq ($(USE_RT),)
+ RT_LDFLAGS = -lrt
+endif
+
+ifneq ($(USE_THREAD),)
+ THREAD_LDFLAGS = -pthread
+endif
+
+ifneq ($(USE_BACKTRACE),)
+ BACKTRACE_LDFLAGS = -Wl,$(if $(EXPORT_SYMBOL),$(EXPORT_SYMBOL),--export-dynamic)
+endif
+
+ifneq ($(USE_CPU_AFFINITY),)
+ OPTIONS_OBJS += src/cpuset.o
+endif
+
+# OpenSSL is packaged in various forms and with various dependencies.
+# In general -lssl is enough, but on some platforms, -lcrypto may be needed,
+# reason why it's added by default. Some even need -lz, then you'll need to
+# pass it in the "ADDLIB" variable if needed. If your SSL libraries are not
+# in the usual path, use SSL_INC=/path/to/inc and SSL_LIB=/path/to/lib.
+
+# This is for the WolfSSL variant of the OpenSSL API. Setting it implies
+# OPENSSL so it's not necessary to set the latter.
+ifneq ($(USE_OPENSSL_WOLFSSL),)
+ SSL_CFLAGS := $(if $(SSL_INC),-I$(SSL_INC)/wolfssl -I$(SSL_INC))
+ SSL_LDFLAGS := $(if $(SSL_LIB),-L$(SSL_LIB)) -lwolfssl
+ # always automatically set USE_OPENSSL
+ USE_OPENSSL := $(if $(USE_OPENSSL),$(USE_OPENSSL),implicit)
+endif
+
+# This is for the AWS-LC variant of the OpenSSL API. Setting it implies
+# OPENSSL so it's not necessary to set the latter.
+ifneq ($(USE_OPENSSL_AWSLC),)
+ # always automatically set USE_OPENSSL
+ USE_OPENSSL := $(if $(USE_OPENSSL),$(USE_OPENSSL),implicit)
+endif
+
+# This is for any variant of the OpenSSL API. By default it uses OpenSSL.
+ifneq ($(USE_OPENSSL),)
+ # only preset these for the regular openssl
+ ifeq ($(USE_OPENSSL_WOLFSSL),)
+ SSL_CFLAGS := $(if $(SSL_INC),-I$(SSL_INC))
+ SSL_LDFLAGS := $(if $(SSL_LIB),-L$(SSL_LIB)) -lssl -lcrypto
+ endif
+ USE_SSL := $(if $(USE_SSL),$(USE_SSL),implicit)
+ OPTIONS_OBJS += src/ssl_sock.o src/ssl_ckch.o src/ssl_sample.o src/ssl_crtlist.o src/cfgparse-ssl.o src/ssl_utils.o src/jwt.o src/ssl_ocsp.o
+endif
+
+ifneq ($(USE_ENGINE),)
+ # OpenSSL 3.0 emits loud deprecation warnings by default when building with
+ # engine support, and this option is made to silence them. Better use it
+ # only when absolutely necessary, until there's a viable alternative to the
+ # engine API.
+ ENGINE_CFLAGS = -DOPENSSL_SUPPRESS_DEPRECATED
+endif
+
+ifneq ($(USE_QUIC),)
+OPTIONS_OBJS += src/quic_conn.o src/mux_quic.o src/h3.o src/xprt_quic.o \
+ src/quic_frame.o src/quic_tls.o src/quic_tp.o \
+ src/quic_stats.o src/quic_sock.o src/proto_quic.o \
+ src/qmux_trace.o src/quic_loss.o src/qpack-enc.o \
+ src/quic_cc_newreno.o src/quic_cc_cubic.o src/qpack-tbl.o \
+ src/qpack-dec.o src/hq_interop.o src/quic_stream.o \
+ src/h3_stats.o src/qmux_http.o src/cfgparse-quic.o \
+ src/cbuf.o src/quic_cc.o src/quic_cc_nocc.o src/quic_ack.o \
+ src/quic_trace.o src/quic_cli.o src/quic_ssl.o \
+ src/quic_rx.o src/quic_tx.o src/quic_cid.o src/quic_retry.o\
+ src/quic_retransmit.o
+endif
+
+ifneq ($(USE_QUIC_OPENSSL_COMPAT),)
+OPTIONS_OBJS += src/quic_openssl_compat.o
+endif
+
+ifneq ($(USE_LUA),)
+ check_lua_inc = $(shell if [ -d $(2)$(1) ]; then echo $(2)$(1); fi;)
+ LUA_INC := $(firstword $(foreach lib,lua5.4 lua54 lua5.3 lua53 lua,$(call check_lua_inc,$(lib),"/usr/include/")))
+
+ check_lua_lib = $(shell echo "int main(){}" | $(CC) -o /dev/null -x c - $(2) -l$(1) 2>/dev/null && echo $(1))
+ LUA_LD_FLAGS := -Wl,$(if $(EXPORT_SYMBOL),$(EXPORT_SYMBOL),--export-dynamic) $(if $(LUA_LIB),-L$(LUA_LIB))
+
+ # Try to automatically detect the Lua library if not set
+ ifeq ($(LUA_LIB_NAME),)
+ LUA_LIB_NAME := $(firstword $(foreach lib,lua5.4 lua54 lua5.3 lua53 lua,$(call check_lua_lib,$(lib),$(LUA_LD_FLAGS))))
+ endif
+
+ # Lua lib name must be set now (forced/detected above)
+ ifeq ($(LUA_LIB_NAME),)
+ $(error unable to automatically detect the Lua library name, you can enforce its name with LUA_LIB_NAME=<name> (where <name> can be lua5.4, lua54, lua, ...))
+ endif
+
+ ifneq ($(HLUA_PREPEND_PATH),)
+ LUA_CFLAGS += -DHLUA_PREPEND_PATH=$(HLUA_PREPEND_PATH)
+ BUILD_OPTIONS += HLUA_PREPEND_PATH=$(HLUA_PREPEND_PATH)
+ endif # HLUA_PREPEND_PATH
+
+ ifneq ($(HLUA_PREPEND_CPATH),)
+ LUA_CFLAGS += -DHLUA_PREPEND_CPATH=$(HLUA_PREPEND_CPATH)
+ BUILD_OPTIONS += HLUA_PREPEND_CPATH=$(HLUA_PREPEND_CPATH)
+ endif # HLUA_PREPEND_CPATH
+
+ USE_MATH = implicit
+ LUA_CFLAGS += $(if $(LUA_INC),-I$(LUA_INC))
+ LUA_LDFLAGS = $(LUA_LD_FLAGS) -l$(LUA_LIB_NAME)
+ OPTIONS_OBJS += src/hlua.o src/hlua_fcn.o
+endif # USE_LUA
+
+ifneq ($(USE_PROMEX),)
+ OPTIONS_OBJS += addons/promex/service-prometheus.o
+endif
+
+ifneq ($(USE_DEVICEATLAS),)
+ # Use DEVICEATLAS_SRC and possibly DEVICEATLAS_INC and DEVICEATLAS_LIB to force path
+ # to DeviceAtlas headers and libraries if needed.
+ DEVICEATLAS_INC = $(DEVICEATLAS_SRC)
+ DEVICEATLAS_LIB = $(DEVICEATLAS_SRC)
+ ifeq ($(DEVICEATLAS_SRC),)
+ DEVICEATLAS_LDFLAGS += -lda
+ else
+ ifeq ($(USE_PCRE),)
+ ifeq ($(USE_PCRE2),)
+ $(error the DeviceAtlas module needs the PCRE or the PCRE2 library in order to compile)
+ endif
+ endif
+ ifneq ($(USE_PCRE2),)
+ DEVICEATLAS_CFLAGS += -DDA_REGEX_HDR=\"dac_pcre2.c\" -DDA_REGEX_TAG=2
+ endif
+ OPTIONS_OBJS += $(DEVICEATLAS_LIB)/Os/daunix.o
+ OPTIONS_OBJS += $(DEVICEATLAS_LIB)/dadwcom.o
+ OPTIONS_OBJS += $(DEVICEATLAS_LIB)/dasch.o
+ OPTIONS_OBJS += $(DEVICEATLAS_LIB)/json.o
+ OPTIONS_OBJS += $(DEVICEATLAS_LIB)/dac.o
+ endif
+ OPTIONS_OBJS += addons/deviceatlas/da.o
+ DEVICEATLAS_CFLAGS += $(if $(DEVICEATLAS_INC),-I$(DEVICEATLAS_INC)) $(if $(DEVICEATLAS_SRC),-DDATLAS_DA_NOCACHE)
+endif
+
+# Use 51DEGREES_SRC and possibly 51DEGREES_INC and 51DEGREES_LIB to force path
+# to 51degrees v3/v4 headers and libraries if needed. Note that the SRC/INC/
+# LIB/CFLAGS/LDFLAGS variables names all use 51DEGREES as the prefix,
+# regardless of the version since they are mutually exclusive. The version
+# (51DEGREES_VER) must be either 3 or 4, and defaults to 3 if not set.
+51DEGREES_INC = $(51DEGREES_SRC)
+51DEGREES_LIB = $(51DEGREES_SRC)
+51DEGREES_VER = 3
+
+ifneq ($(USE_51DEGREES),)
+ ifeq ($(51DEGREES_VER),4) # v4 here
+ _51DEGREES_SRC = $(shell find $(51DEGREES_LIB) -maxdepth 2 -name '*.c')
+ OPTIONS_OBJS += $(_51DEGREES_SRC:%.c=%.o)
+ 51DEGREES_CFLAGS += -DUSE_51DEGREES_V4
+ ifeq ($(USE_THREAD),)
+ 51DEGREES_CFLAGS += -DFIFTYONEDEGREES_NO_THREADING -DFIFTYONE_DEGREES_NO_THREADING
+ endif
+ USE_LIBATOMIC = implicit
+ endif # 51DEGREES_VER==4
+
+ ifeq ($(51DEGREES_VER),3) # v3 here
+ OPTIONS_OBJS += $(51DEGREES_LIB)/../cityhash/city.o
+ OPTIONS_OBJS += $(51DEGREES_LIB)/51Degrees.o
+ ifeq ($(USE_THREAD),)
+ 51DEGREES_CFLAGS += -DFIFTYONEDEGREES_NO_THREADING
+ else
+ OPTIONS_OBJS += $(51DEGREES_LIB)/../threading.o
+ endif
+ else
+ ifneq ($(51DEGREES_VER),4)
+ $(error 51Degrees version (51DEGREES_VER) must be either 3 or 4)
+ endif
+ endif # 51DEGREES_VER==3
+
+ OPTIONS_OBJS += addons/51degrees/51d.o
+ 51DEGREES_CFLAGS += $(if $(51DEGREES_INC),-I$(51DEGREES_INC))
+ 51DEGREES_LDFLAGS += $(if $(51DEGREES_LIB),-L$(51DEGREES_LIB))
+ USE_MATH = implicit
+endif # USE_51DEGREES
+
+ifneq ($(USE_WURFL),)
+ # Use WURFL_SRC and possibly WURFL_INC and WURFL_LIB to force path
+ # to WURFL headers and libraries if needed.
+ WURFL_INC = $(WURFL_SRC)
+ WURFL_LIB = $(WURFL_SRC)
+ OPTIONS_OBJS += addons/wurfl/wurfl.o
+ WURFL_CFLAGS = $(if $(WURFL_INC),-I$(WURFL_INC))
+ ifneq ($(WURFL_DEBUG),)
+ WURFL_CFLAGS += -DWURFL_DEBUG
+ endif
+ ifneq ($(WURFL_HEADER_WITH_DETAILS),)
+ WURFL_CFLAGS += -DWURFL_HEADER_WITH_DETAILS
+ endif
+ WURFL_LDFLAGS = $(if $(WURFL_LIB),-L$(WURFL_LIB)) -lwurfl
+endif
+
+ifneq ($(USE_SYSTEMD),)
+ SYSTEMD_LDFLAGS = -lsystemd
+endif
+
+ifneq ($(USE_PCRE)$(USE_STATIC_PCRE)$(USE_PCRE_JIT),)
+ ifneq ($(USE_PCRE2)$(USE_STATIC_PCRE2)$(USE_PCRE2_JIT),)
+ $(error cannot compile both PCRE and PCRE2 support)
+ endif
+ # PCREDIR is used to automatically construct the PCRE_INC and PCRE_LIB paths,
+ # by appending /include and /lib respectively. If your system does not use the
+ # same sub-directories, simply force these variables instead of PCREDIR. It is
+ # automatically detected but can be forced if required (for cross-compiling).
+ # Forcing PCREDIR to an empty string will let the compiler use the default
+ # locations.
+
+ # in case only USE_STATIC_PCRE/USE_PCRE_JIT were set
+ USE_PCRE := $(if $(USE_PCRE),$(USE_PCRE),implicit)
+ PCRE_CONFIG := pcre-config
+ PCREDIR := $(shell $(PCRE_CONFIG) --prefix 2>/dev/null || echo /usr/local)
+ ifneq ($(PCREDIR),)
+ PCRE_INC := $(PCREDIR)/include
+ PCRE_LIB := $(PCREDIR)/lib
+ endif
+
+ PCRE_CFLAGS := $(if $(PCRE_INC),-I$(PCRE_INC))
+ ifeq ($(USE_STATIC_PCRE),)
+ PCRE_LDFLAGS := $(if $(PCRE_LIB),-L$(PCRE_LIB)) -lpcreposix -lpcre
+ else
+ PCRE_LDFLAGS := $(if $(PCRE_LIB),-L$(PCRE_LIB)) -Wl,-Bstatic -lpcreposix -lpcre -Wl,-Bdynamic
+ endif
+endif # USE_PCRE
+
+ifneq ($(USE_PCRE2)$(USE_STATIC_PCRE2)$(USE_PCRE2_JIT),)
+ # in case only USE_STATIC_PCRE2/USE_PCRE2_JIT were set
+ USE_PCRE2 := $(if $(USE_PCRE2),$(USE_PCRE2),implicit)
+ PCRE2_CONFIG := pcre2-config
+ PCRE2DIR := $(shell $(PCRE2_CONFIG) --prefix 2>/dev/null || echo /usr/local)
+ ifneq ($(PCRE2DIR),)
+ PCRE2_INC := $(PCRE2DIR)/include
+ PCRE2_LIB := $(PCRE2DIR)/lib
+
+ ifeq ($(PCRE2_WIDTH),)
+ PCRE2_WIDTH = 8
+ endif
+
+ ifneq ($(PCRE2_WIDTH),8)
+ ifneq ($(PCRE2_WIDTH),16)
+ ifneq ($(PCRE2_WIDTH),32)
+ $(error PCRE2_WIDTH needs to be set to either 8,16 or 32)
+ endif
+ endif
+ endif
+
+ PCRE2_CFLAGS := -DPCRE2_CODE_UNIT_WIDTH=$(PCRE2_WIDTH) $(if $(PCRE2_INC), -I$(PCRE2_INC))
+ PCRE2_LDFLAGS := $(shell $(PCRE2_CONFIG) --libs$(PCRE2_WIDTH) 2>/dev/null || echo -L/usr/local/lib -lpcre2-$(PCRE2_WIDTH))
+
+ ifeq ($(PCRE2_LDFLAGS),)
+ $(error libpcre2-$(PCRE2_WIDTH) not found)
+ else
+ ifeq ($(PCRE2_WIDTH),8)
+ PCRE2_LDFLAGS += -lpcre2-posix
+ endif
+ endif
+
+ ifneq ($(USE_STATIC_PCRE2),)
+ PCRE2_LDFLAGS := $(if $(PCRE2_LIB),-L$(PCRE2_LIB)) -Wl,-Bstatic -L$(PCRE2_LIB) $(PCRE2_LDFLAGS) -Wl,-Bdynamic
+ else
+ PCRE2_LDFLAGS := $(if $(PCRE2_LIB),-L$(PCRE2_LIB)) -L$(PCRE2_LIB) $(PCRE2_LDFLAGS)
+ endif
+ endif # PCRE2DIR
+endif # USE_PCRE2
+
+ifneq ($(USE_NS),)
+ OPTIONS_OBJS += src/namespace.o
+endif
+
+ifneq ($(USE_LINUX_CAP),)
+ OPTIONS_OBJS += src/linuxcap.o
+endif
+
+ifneq ($(USE_OT),)
+ include addons/ot/Makefile
+endif
+
+# better keep this one close to the end, as several libs above may need it
+ifneq ($(USE_DL),)
+ DL_LDFLAGS = -ldl
+endif
+
+ifneq ($(USE_MATH),)
+ MATH_LDFLAGS = -lm
+endif
+
+ifneq ($(USE_LIBATOMIC),)
+ LIBATOMIC_LDFLAGS = -latomic
+endif
+
+#### End of the USE_* options handling, any such option that would be added
+#### below could be silently ignored.
+
+# appends all foo_{C,LD}FLAGS to OPTIONS_{C,LD}FLAGS
+$(collect_opts_flags)
+
+#### Global compile options
+VERBOSE_CFLAGS = $(CFLAGS) $(TARGET_CFLAGS) $(SMALL_OPTS) $(DEFINE)
+COPTS = -Iinclude
+
+COPTS += $(CFLAGS) $(TARGET_CFLAGS) $(SMALL_OPTS) $(DEFINE) $(SILENT_DEFINE)
+COPTS += $(DEBUG) $(OPTIONS_CFLAGS) $(ADDINC)
+
+ifneq ($(VERSION)$(SUBVERS)$(EXTRAVERSION),)
+ COPTS += -DCONFIG_HAPROXY_VERSION=\"$(VERSION)$(SUBVERS)$(EXTRAVERSION)\"
+endif
+
+ifneq ($(VERDATE),)
+ COPTS += -DCONFIG_HAPROXY_DATE=\"$(VERDATE)\"
+endif
+
+ifneq ($(TRACE),)
+ # if tracing is enabled, we want it to be as fast as possible
+ TRACE_COPTS := $(filter-out -O0 -O1 -O2 -pg -finstrument-functions,$(COPTS)) -O3 -fomit-frame-pointer
+ COPTS += -finstrument-functions
+endif
+
+#### Global link options
+# These options are added at the end of the "ld" command line. Use LDFLAGS to
+# add options at the beginning of the "ld" command line if needed.
+LDOPTS = $(TARGET_LDFLAGS) $(OPTIONS_LDFLAGS) $(ADDLIB)
+
+ifeq ($(TARGET),)
+all:
+ @echo "Building HAProxy without specifying a TARGET is not supported."
+ @echo
+ @echo "Usage:"
+ @echo
+ @echo " $ make help # To print a full explanation."
+ @echo " $ make TARGET=xxx USE_<feature>=1 # To build HAProxy."
+ @echo
+ @echo "The most commonly used targets are:"
+ @echo
+ @echo " linux-glibc - Modern Linux with glibc"
+ @echo " linux-musl - Modern Linux with musl"
+ @echo " freebsd - FreeBSD"
+ @echo " openbsd - OpenBSD"
+ @echo " netbsd - NetBSD"
+ @echo " osx - macOS"
+ @echo " solaris - Solaris"
+ @echo
+ @echo "Choose the target which matches your OS the most in order to"
+ @echo "gain the maximum performance out of it."
+ @echo
+ @echo "Common features you might want to include in your build are:"
+ @echo
+ @echo " USE_OPENSSL=1 - Support for TLS encrypted connections"
+ @echo " USE_ZLIB=1 - Support for HTTP response compression"
+ @echo " USE_PCRE=1 - Support for PCRE regular expressions"
+ @echo " USE_LUA=1 - Support for dynamic processing using Lua"
+ @echo
+ @echo "Use 'make help' to print a full explanation of supported targets"
+ @echo "and features, and 'make ... opts' to show the variables in use"
+ @echo "for a given set of build options, in a reusable form."
+ @echo
+ @exit 1
+else
+ifneq ($(filter $(TARGET), linux linux22 linux24 linux24e linux26 linux2628),)
+all:
+ @echo
+ @echo "Target '$(TARGET)' was removed from HAProxy 2.0 due to being irrelevant and"
+ @echo "often wrong. Please use 'linux-glibc' instead or define your custom target"
+ @echo "by checking available options using 'make help TARGET=<your-target>'."
+ @echo
+ @exit 1
+else
+all: haproxy dev/flags/flags $(EXTRA)
+endif # obsolete targets
+endif # TARGET
+
+OBJS =
+
+ifneq ($(EXTRA_OBJS),)
+ OBJS += $(EXTRA_OBJS)
+endif
+
+OBJS += src/mux_h2.o src/mux_fcgi.o src/mux_h1.o src/tcpcheck.o \
+ src/stream.o src/stats.o src/http_ana.o src/server.o \
+ src/stick_table.o src/sample.o src/flt_spoe.o src/tools.o \
+ src/log.o src/cfgparse.o src/peers.o src/backend.o src/resolvers.o \
+ src/cli.o src/connection.o src/proxy.o src/http_htx.o \
+ src/cfgparse-listen.o src/pattern.o src/check.o src/haproxy.o \
+ src/cache.o src/stconn.o src/http_act.o src/http_fetch.o \
+ src/http_client.o src/listener.o src/dns.o src/vars.o src/debug.o \
+ src/tcp_rules.o src/sink.o src/h1_htx.o src/task.o src/mjson.o \
+ src/h2.o src/filters.o src/server_state.o src/payload.o \
+ src/fcgi-app.o src/map.o src/htx.o src/h1.o src/pool.o \
+ src/cfgparse-global.o src/trace.o src/tcp_sample.o src/http_ext.o \
+ src/flt_http_comp.o src/mux_pt.o src/flt_trace.o src/mqtt.o \
+ src/acl.o src/sock.o src/mworker.o src/tcp_act.o src/ring.o \
+ src/session.o src/proto_tcp.o src/fd.o src/channel.o src/activity.o \
+ src/queue.o src/lb_fas.o src/http_rules.o src/extcheck.o \
+ src/flt_bwlim.o src/thread.o src/http.o src/lb_chash.o src/applet.o \
+ src/compression.o src/raw_sock.o src/ncbuf.o src/frontend.o \
+ src/errors.o src/uri_normalizer.o src/http_conv.o src/lb_fwrr.o \
+ src/sha1.o src/proto_sockpair.o src/mailers.o src/lb_fwlc.o \
+ src/ebmbtree.o src/cfgcond.o src/action.o src/xprt_handshake.o \
+ src/protocol.o src/proto_uxst.o src/proto_udp.o src/lb_map.o \
+ src/fix.o src/ev_select.o src/arg.o src/sock_inet.o src/event_hdl.o \
+ src/mworker-prog.o src/hpack-dec.o src/cfgparse-tcp.o \
+ src/sock_unix.o src/shctx.o src/proto_uxdg.o src/fcgi.o \
+ src/eb64tree.o src/clock.o src/chunk.o src/cfgdiag.o src/signal.o \
+ src/regex.o src/lru.o src/eb32tree.o src/eb32sctree.o \
+ src/cfgparse-unix.o src/hpack-tbl.o src/ebsttree.o src/ebimtree.o \
+ src/base64.o src/auth.o src/uri_auth.o src/time.o src/ebistree.o \
+ src/dynbuf.o src/wdt.o src/pipe.o src/init.o src/http_acl.o \
+ src/hpack-huff.o src/hpack-enc.o src/dict.o src/freq_ctr.o \
+ src/ebtree.o src/hash.o src/dgram.o src/version.o src/proto_rhttp.o
+
+ifneq ($(TRACE),)
+ OBJS += src/calltrace.o
+endif
+
+# Used only for forced dependency checking. May be cleared during development.
+INCLUDES = $(wildcard include/*/*.h)
+DEP = $(INCLUDES) .build_opts
+
+help:
+ @sed -ne "/^[^#]*$$/q;s/^# \{0,1\}\(.*\)/\1/;p" Makefile
+ @echo; \
+ if [ -n "$(TARGET)" ]; then \
+ if [ -n "$(set_target_defaults)" ]; then \
+ echo "Current TARGET: $(TARGET)"; \
+ else \
+ echo "Current TARGET: $(TARGET) (custom target)"; \
+ fi; \
+ echo;echo "Enabled features for TARGET '$(TARGET)' (disable with 'USE_xxx=') :"; \
+ set -- $(enabled_opts); echo " $$*" | (fmt || cat) 2>/dev/null; \
+ echo;echo "Disabled features for TARGET '$(TARGET)' (enable with 'USE_xxx=1') :"; \
+ set -- $(disabled_opts); echo " $$*" | (fmt || cat) 2>/dev/null; \
+ else \
+ echo "TARGET not set, you should pass 'TARGET=xxx' to set one among :";\
+ echo " linux-glibc, linux-glibc-legacy, solaris, freebsd, dragonfly, netbsd,"; \
+ echo " osx, openbsd, aix51, aix52, aix72-gcc, cygwin, haiku, generic,"; \
+ echo " custom"; \
+ fi
+
+# Used only to force a rebuild if some build options change, but we don't do
+# it for certain build targets which take no build options nor when the
+# TARGET variable is not set since we're not building, by definition.
+IGNORE_OPTS=help install install-man install-doc install-bin \
+ uninstall clean tags cscope tar git-tar version update-version \
+ opts reg-tests reg-tests-help admin/halog/halog dev/flags/flags \
+ dev/haring/haring dev/poll/poll dev/tcploop/tcploop
+
+ifneq ($(TARGET),)
+ifeq ($(filter $(firstword $(MAKECMDGOALS)),$(IGNORE_OPTS)),)
+build_opts = $(shell rm -f .build_opts.new; echo \'$(TARGET) $(BUILD_OPTIONS) $(VERBOSE_CFLAGS) $(DEBUG)\' > .build_opts.new; if cmp -s .build_opts .build_opts.new; then rm -f .build_opts.new; else mv -f .build_opts.new .build_opts; fi)
+.build_opts: $(build_opts)
+else
+.build_opts:
+endif # ignore_opts
+else
+.build_opts:
+endif # non-empty target
+
+haproxy: $(OPTIONS_OBJS) $(OBJS)
+ $(cmd_LD) $(LDFLAGS) -o $@ $^ $(LDOPTS)
+
+objsize: haproxy
+ $(Q)objdump -t $^|grep ' g '|grep -F '.text'|awk '{print $$5 FS $$6}'|sort
+
+%.o: %.c $(DEP)
+ $(cmd_CC) $(COPTS) -c -o $@ $<
+
+admin/halog/halog: admin/halog/halog.o admin/halog/fgets2.o src/ebtree.o src/eb32tree.o src/eb64tree.o src/ebmbtree.o src/ebsttree.o src/ebistree.o src/ebimtree.o
+ $(cmd_LD) $(LDFLAGS) -o $@ $^ $(LDOPTS)
+
+admin/dyncookie/dyncookie: admin/dyncookie/dyncookie.o
+ $(cmd_LD) $(LDFLAGS) -o $@ $^ $(LDOPTS)
+
+dev/flags/flags: dev/flags/flags.o
+ $(cmd_LD) $(LDFLAGS) -o $@ $^ $(LDOPTS)
+
+dev/haring/haring: dev/haring/haring.o
+ $(cmd_LD) $(LDFLAGS) -o $@ $^ $(LDOPTS)
+
+dev/hpack/%: dev/hpack/%.o
+ $(cmd_LD) $(LDFLAGS) -o $@ $^ $(LDOPTS)
+
+dev/poll/poll:
+ $(cmd_MAKE) -C dev/poll poll CC='$(CC)' OPTIMIZE='$(COPTS)' V='$(V)'
+
+dev/qpack/decode: dev/qpack/decode.o
+ $(cmd_LD) $(LDFLAGS) -o $@ $^ $(LDOPTS)
+
+dev/tcploop/tcploop:
+ $(cmd_MAKE) -C dev/tcploop tcploop CC='$(CC)' OPTIMIZE='$(COPTS)' V='$(V)'
+
+dev/udp/udp-perturb: dev/udp/udp-perturb.o
+ $(cmd_LD) $(LDFLAGS) -o $@ $^ $(LDOPTS)
+
+# rebuild it every time
+.PHONY: src/version.c dev/poll/poll dev/tcploop/tcploop
+
+src/calltrace.o: src/calltrace.c $(DEP)
+ $(cmd_CC) $(TRACE_COPTS) -c -o $@ $<
+
+src/haproxy.o: src/haproxy.c $(DEP)
+ $(cmd_CC) $(COPTS) \
+ -DBUILD_TARGET='"$(strip $(TARGET))"' \
+ -DBUILD_ARCH='"$(strip $(ARCH))"' \
+ -DBUILD_CPU='"$(strip $(CPU))"' \
+ -DBUILD_CC='"$(strip $(CC))"' \
+ -DBUILD_CFLAGS='"$(strip $(VERBOSE_CFLAGS))"' \
+ -DBUILD_OPTIONS='"$(strip $(BUILD_OPTIONS))"' \
+ -DBUILD_DEBUG='"$(strip $(DEBUG))"' \
+ -DBUILD_FEATURES='"$(strip $(build_features))"' \
+ -c -o $@ $<
+
+install-man:
+ $(Q)$(INSTALL) -d "$(DESTDIR)$(MANDIR)"/man1
+ $(Q)$(INSTALL) -m 644 doc/haproxy.1 "$(DESTDIR)$(MANDIR)"/man1
+
+EXCLUDE_DOCUMENTATION = lgpl gpl coding-style
+DOCUMENTATION = $(filter-out $(EXCLUDE_DOCUMENTATION),$(patsubst doc/%.txt,%,$(wildcard doc/*.txt)))
+
+install-doc:
+ $(Q)$(INSTALL) -d "$(DESTDIR)$(DOCDIR)"
+ $(Q)for x in $(DOCUMENTATION); do \
+ $(INSTALL) -m 644 doc/$$x.txt "$(DESTDIR)$(DOCDIR)" ; \
+ done
+
+install-bin:
+ $(Q)for i in haproxy $(EXTRA); do \
+ if ! [ -e "$$i" ]; then \
+ echo "Please run 'make' before 'make install'."; \
+ exit 1; \
+ fi; \
+ done
+ $(Q)$(INSTALL) -d "$(DESTDIR)$(SBINDIR)"
+ $(Q)$(INSTALL) haproxy $(EXTRA) "$(DESTDIR)$(SBINDIR)"
+
+install: install-bin install-man install-doc
+
+uninstall:
+ $(Q)rm -f "$(DESTDIR)$(MANDIR)"/man1/haproxy.1
+ $(Q)for x in $(DOCUMENTATION); do \
+ rm -f "$(DESTDIR)$(DOCDIR)"/$$x.txt ; \
+ done
+ $(Q)-rmdir "$(DESTDIR)$(DOCDIR)"
+ $(Q)rm -f "$(DESTDIR)$(SBINDIR)"/haproxy
+
+clean:
+ $(Q)rm -f *.[oas] src/*.[oas] haproxy test .build_opts .build_opts.new
+ $(Q)for dir in . src dev/* admin/* addons/* include/* doc; do rm -f $$dir/*~ $$dir/*.rej $$dir/core; done
+ $(Q)rm -f haproxy-$(VERSION).tar.gz haproxy-$(VERSION)$(SUBVERS)$(EXTRAVERSION).tar.gz
+ $(Q)rm -f haproxy-$(VERSION) haproxy-$(VERSION)$(SUBVERS)$(EXTRAVERSION) nohup.out gmon.out
+ $(Q)rm -f addons/promex/*.[oas]
+ $(Q)rm -f addons/51degrees/*.[oas] addons/51degrees/dummy/*.[oas] addons/51degrees/dummy/*/*.[oas]
+ $(Q)rm -f addons/deviceatlas/*.[oas] addons/deviceatlas/dummy/*.[oas] addons/deviceatlas/dummy/*.o
+ $(Q)rm -f addons/deviceatlas/dummy/Os/*.o
+ $(Q)rm -f addons/ot/src/*.[oas]
+ $(Q)rm -f addons/wurfl/*.[oas] addons/wurfl/dummy/*.[oas]
+ $(Q)rm -f admin/*/*.[oas] admin/*/*/*.[oas]
+ $(Q)rm -f admin/iprange/iprange admin/iprange/ip6range admin/halog/halog
+ $(Q)rm -f admin/dyncookie/dyncookie
+ $(Q)rm -f dev/*/*.[oas]
+ $(Q)rm -f dev/flags/flags dev/haring/haring dev/poll/poll dev/tcploop/tcploop
+ $(Q)rm -f dev/hpack/decode dev/hpack/gen-enc dev/hpack/gen-rht
+ $(Q)rm -f dev/qpack/decode
+
+tags:
+ $(Q)find src include \( -name '*.c' -o -name '*.h' \) -print0 | \
+ xargs -0 etags --declarations --members
+
+cscope:
+ $(Q)find src include -name "*.[ch]" -print | cscope -q -b -i -
+
+tar: clean
+ $(Q)ln -s . haproxy-$(VERSION)$(SUBVERS)$(EXTRAVERSION)
+ $(Q)tar --exclude=haproxy-$(VERSION)$(SUBVERS)$(EXTRAVERSION)/.git \
+ --exclude=haproxy-$(VERSION)$(SUBVERS)$(EXTRAVERSION)/haproxy-$(VERSION)$(SUBVERS)$(EXTRAVERSION) \
+ --exclude=haproxy-$(VERSION)$(SUBVERS)$(EXTRAVERSION)/haproxy-$(VERSION)$(SUBVERS)$(EXTRAVERSION).tar.gz \
+ -cf - haproxy-$(VERSION)$(SUBVERS)$(EXTRAVERSION)/* | gzip -c9 >haproxy-$(VERSION)$(SUBVERS)$(EXTRAVERSION).tar.gz
+ $(Q)echo haproxy-$(VERSION)$(SUBVERS)$(EXTRAVERSION).tar.gz
+ $(Q)rm -f haproxy-$(VERSION)$(SUBVERS)$(EXTRAVERSION)
+
+git-tar:
+ $(Q)git archive --format=tar --prefix="haproxy-$(VERSION)$(SUBVERS)$(EXTRAVERSION)/" HEAD | gzip -9 > haproxy-$(VERSION)$(SUBVERS)$(EXTRAVERSION).tar.gz
+ $(Q)echo haproxy-$(VERSION)$(SUBVERS)$(EXTRAVERSION).tar.gz
+
+version:
+ @echo "VERSION: $(VERSION)"
+ @echo "SUBVERS: $(SUBVERS)"
+ @echo "VERDATE: $(VERDATE)"
+
+# never use this one if you don't know what it is used for.
+update-version:
+ @echo "Ready to update the following versions :"
+ @echo "VERSION: $(VERSION)"
+ @echo "SUBVERS: $(SUBVERS)"
+ @echo "VERDATE: $(VERDATE)"
+ @echo "Press [ENTER] to continue or Ctrl-C to abort now.";read
+ echo "$(VERSION)" > VERSION
+ echo "$(SUBVERS)" > SUBVERS
+ echo "$(VERDATE)" > VERDATE
+
+# just display the build options. The "USE_*" options and their respective
+# settings are also listed if they're explicitly set on the command line, or if
+# they are not empty. Implicit "USE_*" are not listed.
+opts:
+ @echo -n 'Using: '
+ @echo -n 'TARGET="$(strip $(TARGET))" '
+ @echo -n 'ARCH="$(strip $(ARCH))" '
+ @echo -n 'CPU="$(strip $(CPU))" '
+ @echo -n 'CC="$(strip $(CC))" '
+ @echo -n 'ARCH_FLAGS="$(strip $(ARCH_FLAGS))" '
+ @echo -n 'CPU_CFLAGS="$(strip $(CPU_CFLAGS))" '
+ @echo -n 'DEBUG_CFLAGS="$(strip $(DEBUG_CFLAGS))" '
+ @#echo "$(strip $(BUILD_OPTIONS))"
+ @$(foreach opt,$(enabled_opts),\
+ $(if $(subst command line,,$(origin USE_$(opt))),,\
+ echo -n 'USE_$(opt)=$(USE_$(opt)) ';) \
+ $(if $(subst command line,,$(origin $(opt)_CFLAGS)),\
+ $(if $($(opt)_CFLAGS),echo -n '$(opt)_CFLAGS="$($(opt)_CFLAGS)" ';),\
+ echo -n '$(opt)_CFLAGS="$($(opt)_CFLAGS)" ';) \
+ $(if $(subst command line,,$(origin $(opt)_LDFLAGS)),\
+ $(if $($(opt)_LDFLAGS),echo -n '$(opt)_LDFLAGS="$($(opt)_LDFLAGS)" ';),\
+ echo -n '$(opt)_LDFLAGS="$($(opt)_LDFLAGS)" ';))
+ @echo
+ @echo 'COPTS="$(strip $(COPTS))"'
+ @echo 'LDFLAGS="$(strip $(LDFLAGS))"'
+ @echo 'LDOPTS="$(strip $(LDOPTS))"'
+ @echo 'OPTIONS_OBJS="$(strip $(OPTIONS_OBJS))"'
+ @echo 'OBJS="$(strip $(OBJS))"'
+
+ifeq (reg-tests, $(firstword $(MAKECMDGOALS)))
+ REGTEST_ARGS := $(wordlist 2, $(words $(MAKECMDGOALS)), $(MAKECMDGOALS))
+ $(eval $(REGTEST_ARGS):;@true)
+endif
+
+# Target to run the regression testing script files.
+reg-tests:
+ $(Q)$(REG_TEST_SCRIPT) --type "$(REGTESTS_TYPES)" $(REGTEST_ARGS) $(REG_TEST_FILES)
+.PHONY: $(REGTEST_ARGS)
+
+reg-tests-help:
+ @echo
+ @echo "To launch the reg tests for haproxy, first export to your environment "
+ @echo "VTEST_PROGRAM variable to point to your vtest program:"
+ @echo " $$ export VTEST_PROGRAM=/opt/local/bin/vtest"
+ @echo "or"
+ @echo " $$ setenv VTEST_PROGRAM /opt/local/bin/vtest"
+ @echo
+ @echo "The same thing may be done to set your haproxy program with HAPROXY_PROGRAM "
+ @echo "but with ./haproxy as default value."
+ @echo
+ @echo "To run all the tests:"
+ @echo " $$ make reg-tests"
+ @echo
+ @echo "You can also set the programs to be used on the command line:"
+ @echo " $$ VTEST_PROGRAM=<...> HAPROXY_PROGRAM=<...> make reg-tests"
+ @echo
+ @echo "To run tests with specific types:"
+ @echo " $$ REGTESTS_TYPES=slow,default make reg-tests"
+ @echo
+ @echo "with 'default,bug,devel,slow' as default value for REGTESTS_TYPES variable."
+ @echo
+ @echo "About the reg test types:"
+ @echo " any : all the tests without distinction (this is the default"
+ @echo " value of REGTESTS_TYPES."
+ @echo " default : dedicated to pure haproxy compliance tests."
+ @echo " slow : scripts which take non negligible time to run."
+ @echo " bug : scripts in relation with bugs they help to reproduce."
+ @echo " broken : scripts triggering known broken behaviors for which"
+ @echo " there is still no fix."
+ @echo " experimental: for scripts which are experimental, typically used to"
+ @echo " develop new scripts."
+ @echo
+ @echo "Note that 'reg-tests' target run '"$(REG_TEST_SCRIPT)"' script"
+ @echo "(see --help option of this script for more information)."
+
+.PHONY: reg-tests reg-tests-help
+
+# "make range" iteratively builds using "make all" and the exact same build
+# options for all commits within RANGE. RANGE may be either a git range
+# such as ref1..ref2 or a single commit, in which case all commits from
+# the master branch to this one will be tested.
+
+range:
+ $(Q)[ -d .git/. ] || { echo "## Fatal: \"make $@\" may only be used inside a Git repository."; exit 1; }
+
+ $(Q)if git diff-index --name-only HEAD 2>/dev/null | grep -q ^; then \
+ echo "Fatal: \"make $@\" requires a clean working tree."; exit 1; fi
+
+ $(Q)[ -n "$(RANGE)" ] || { echo "## Fatal: \"make $@\" requires a git commit range in RANGE."; exit 1; }
+ $(Q)[ -n "$(TARGET)" ] || { echo "## Fatal: \"make $@\" needs the same variables as \"all\" (TARGET etc)."; exit 1; }
+
+ $(Q) ( die() { echo;echo "## Stopped in error at index [ $$index/$$count ] commit $$commit";\
+ echo "Previous branch was $$BRANCH"; exit $$1; }; \
+ BRANCH=$$(git branch --show-current HEAD 2>/dev/null); \
+ [ -n "$$BRANCH" ] || { echo "Fatal: \"make $@\" may only be used inside a checked out branch."; exit 1; }; \
+ [ -z "$${RANGE##*..*}" ] || RANGE="master..$${RANGE}"; \
+ COMMITS=$$(git rev-list --abbrev-commit --reverse "$${RANGE}"); \
+ index=1; count=$$(echo $$COMMITS | wc -w); \
+ [ "$${count}" -gt 0 ] || { echo "## Fatal: no commit(s) found in range $${RANGE}."; exit 1; }; \
+ echo "Found $${count} commit(s) in range $${RANGE}." ; \
+ echo "Current branch is $$BRANCH"; \
+ echo "Starting to building now..."; \
+ for commit in $$COMMITS; do \
+ echo "[ $$index/$$count ] $$commit #############################"; \
+ git checkout -q $$commit || die 1; \
+ $(MAKE) all || die 1; \
+ index=$$((index + 1)); \
+ done; \
+ echo;echo "Done! $${count} commit(s) built successfully for RANGE $${RANGE}" ; \
+ git checkout -q "$$BRANCH"; \
+ )
diff --git a/README b/README
new file mode 100644
index 0000000..a2c8b19
--- /dev/null
+++ b/README
@@ -0,0 +1,22 @@
+The HAProxy documentation has been split into a number of different files for
+ease of use.
+
+Please refer to the following files depending on what you're looking for :
+
+ - INSTALL for instructions on how to build and install HAProxy
+ - BRANCHES to understand the project's life cycle and what version to use
+ - LICENSE for the project's license
+ - CONTRIBUTING for the process to follow to submit contributions
+
+The more detailed documentation is located into the doc/ directory :
+
+ - doc/intro.txt for a quick introduction on HAProxy
+ - doc/configuration.txt for the configuration's reference manual
+ - doc/lua.txt for the Lua's reference manual
+ - doc/SPOE.txt for how to use the SPOE engine
+ - doc/network-namespaces.txt for how to use network namespaces under Linux
+ - doc/management.txt for the management guide
+ - doc/regression-testing.txt for how to use the regression testing suite
+ - doc/peers.txt for the peers protocol reference
+ - doc/coding-style.txt for how to adopt HAProxy's coding style
+ - doc/internals for developer-specific documentation (not all up to date)
diff --git a/SUBVERS b/SUBVERS
new file mode 100644
index 0000000..9c899fd
--- /dev/null
+++ b/SUBVERS
@@ -0,0 +1,2 @@
+-260dbb8
+
diff --git a/VERDATE b/VERDATE
new file mode 100644
index 0000000..8127140
--- /dev/null
+++ b/VERDATE
@@ -0,0 +1,2 @@
+2024-02-15 14:53:05 +0100
+2024/02/15
diff --git a/VERSION b/VERSION
new file mode 100644
index 0000000..1acd4da
--- /dev/null
+++ b/VERSION
@@ -0,0 +1 @@
+2.9.5
diff --git a/addons/51degrees/51d.c b/addons/51degrees/51d.c
new file mode 100644
index 0000000..a23b468
--- /dev/null
+++ b/addons/51degrees/51d.c
@@ -0,0 +1,1179 @@
+#include <stdio.h>
+
+#include <import/lru.h>
+#include <haproxy/api.h>
+#include <haproxy/arg.h>
+#include <haproxy/buf-t.h>
+#include <haproxy/cfgparse.h>
+#include <haproxy/chunk.h>
+#include <haproxy/errors.h>
+#include <haproxy/global.h>
+#include <haproxy/http_ana.h>
+#include <haproxy/http_fetch.h>
+#include <haproxy/http_htx.h>
+#include <haproxy/htx.h>
+#include <haproxy/sample.h>
+#include <haproxy/thread.h>
+#include <haproxy/tools.h>
+#include <haproxy/xxhash.h>
+
+#ifdef USE_51DEGREES_V4
+#include <hash/hash.h>
+#undef MAP_TYPE
+#include <hash/fiftyone.h>
+#else
+#include <51Degrees.h>
+#endif
+
+struct _51d_property_names {
+ struct list list;
+ char *name;
+};
+
+#ifdef FIFTYONEDEGREES_H_PATTERN_INCLUDED
+static struct lru64_head *_51d_lru_tree = NULL;
+static unsigned long long _51d_lru_seed;
+
+__decl_spinlock(_51d_lru_lock);
+#endif
+
+#ifdef FIFTYONE_DEGREES_HASH_INCLUDED
+#define _51D_HEADERS_BUFFER_SIZE BUFSIZE
+
+static THREAD_LOCAL struct {
+ char **buf;
+ int max;
+ int count;
+} _51d_headers;
+
+static THREAD_LOCAL fiftyoneDegreesResultsHash *_51d_results = NULL;
+#endif
+
+static struct {
+ char property_separator; /* the separator to use in the response for the values. this is taken from 51degrees-property-separator from config. */
+ struct list property_names; /* list of properties to load into the data set. this is taken from 51degrees-property-name-list from config. */
+ char *data_file_path;
+#if defined(FIFTYONEDEGREES_H_PATTERN_INCLUDED) || defined(FIFTYONEDEGREES_H_TRIE_INCLUDED)
+ int header_count; /* number of HTTP headers related to device detection. */
+ struct buffer *header_names; /* array of HTTP header names. */
+ fiftyoneDegreesDataSet data_set; /* data set used with the pattern and trie detection methods. */
+#ifdef FIFTYONEDEGREES_H_PATTERN_INCLUDED
+ fiftyoneDegreesWorksetPool *pool; /* pool of worksets to avoid creating a new one for each request. */
+#endif
+#ifdef FIFTYONEDEGREES_H_TRIE_INCLUDED
+ int32_t *header_offsets; /* offsets to the HTTP header name string. */
+#ifdef FIFTYONEDEGREES_NO_THREADING
+ fiftyoneDegreesDeviceOffsets device_offsets; /* Memory used for device offsets. */
+#endif
+#endif
+#elif defined(FIFTYONE_DEGREES_HASH_INCLUDED)
+ fiftyoneDegreesResourceManager manager;
+ int use_perf_graph;
+ int use_pred_graph;
+ int drift;
+ int difference;
+ int allow_unmatched;
+#endif
+ int cache_size;
+} global_51degrees = {
+ .property_separator = ',',
+ .property_names = LIST_HEAD_INIT(global_51degrees.property_names),
+ .data_file_path = NULL,
+#ifdef FIFTYONEDEGREES_H_PATTERN_INCLUDED
+ .data_set = { },
+#endif
+#ifdef FIFTYONE_DEGREES_HASH_INCLUDED
+ .manager = { },
+ .use_perf_graph = -1,
+ .use_pred_graph = -1,
+ .drift = -1,
+ .difference = -1,
+ .allow_unmatched = -1,
+#endif
+ .cache_size = 0,
+};
+
+static int _51d_data_file(char **args, int section_type, struct proxy *curpx,
+ const struct proxy *defpx, const char *file, int line,
+ char **err)
+{
+ if (*(args[1]) == 0) {
+ memprintf(err,
+ "'%s' expects a filepath to a 51Degrees trie or pattern data file.",
+ args[0]);
+ return -1;
+ }
+
+ if (global_51degrees.data_file_path)
+ free(global_51degrees.data_file_path);
+ global_51degrees.data_file_path = strdup(args[1]);
+
+ return 0;
+}
+
+static int _51d_property_name_list(char **args, int section_type, struct proxy *curpx,
+ const struct proxy *defpx, const char *file, int line,
+ char **err)
+{
+ int cur_arg = 1;
+ struct _51d_property_names *name;
+
+ if (*(args[cur_arg]) == 0) {
+ memprintf(err,
+ "'%s' expects at least one 51Degrees property name.",
+ args[0]);
+ return -1;
+ }
+
+ while (*(args[cur_arg])) {
+ name = calloc(1, sizeof(*name));
+ name->name = strdup(args[cur_arg]);
+ LIST_APPEND(&global_51degrees.property_names, &name->list);
+ ++cur_arg;
+ }
+
+ return 0;
+}
+
+static int _51d_property_separator(char **args, int section_type, struct proxy *curpx,
+ const struct proxy *defpx, const char *file, int line,
+ char **err)
+{
+ if (*(args[1]) == 0) {
+ memprintf(err,
+ "'%s' expects a single character.",
+ args[0]);
+ return -1;
+ }
+ if (strlen(args[1]) > 1) {
+ memprintf(err,
+ "'%s' expects a single character, got '%s'.",
+ args[0], args[1]);
+ return -1;
+ }
+
+ global_51degrees.property_separator = *args[1];
+
+ return 0;
+}
+
+static int _51d_cache_size(char **args, int section_type, struct proxy *curpx,
+ const struct proxy *defpx, const char *file, int line,
+ char **err)
+{
+ if (*(args[1]) == 0) {
+ memprintf(err,
+ "'%s' expects a positive numeric value.",
+ args[0]);
+ return -1;
+ }
+
+ global_51degrees.cache_size = atoi(args[1]);
+ if (global_51degrees.cache_size < 0) {
+ memprintf(err,
+ "'%s' expects a positive numeric value, got '%s'.",
+ args[0], args[1]);
+ return -1;
+ }
+
+ return 0;
+}
+
+static int _51d_fetch_check(struct arg *arg, char **err_msg)
+{
+ if (global_51degrees.data_file_path)
+ return 1;
+
+ memprintf(err_msg, "51Degrees data file is not specified (parameter '51degrees-data-file')");
+ return 0;
+}
+
+static int _51d_conv_check(struct arg *arg, struct sample_conv *conv,
+ const char *file, int line, char **err_msg)
+{
+ if (global_51degrees.data_file_path)
+ return 1;
+
+ memprintf(err_msg, "51Degrees data file is not specified (parameter '51degrees-data-file')");
+ return 0;
+}
+
+#ifdef FIFTYONEDEGREES_H_PATTERN_INCLUDED
+static void _51d_lru_free(void *cache_entry)
+{
+ struct buffer *ptr = cache_entry;
+
+ if (!ptr)
+ return;
+
+ free(ptr->area);
+ free(ptr);
+}
+
+/* Allocates memory freeing space in the cache if necessary.
+*/
+static void *_51d_malloc(int size)
+{
+ void *ptr = malloc(size);
+
+ if (!ptr) {
+ /* free the oldest 10 entries from lru to free up some memory
+ * then try allocating memory again */
+ lru64_kill_oldest(_51d_lru_tree, 10);
+ ptr = malloc(size);
+ }
+
+ return ptr;
+}
+
+/* Insert the data associated with the sample into the cache as a fresh item.
+ */
+static void _51d_insert_cache_entry(struct sample *smp, struct lru64 *lru, void* domain)
+{
+ struct buffer *cache_entry = _51d_malloc(sizeof(*cache_entry));
+
+ if (!cache_entry)
+ return;
+
+ cache_entry->area = _51d_malloc(smp->data.u.str.data + 1);
+ if (!cache_entry->area) {
+ free(cache_entry);
+ return;
+ }
+
+ memcpy(cache_entry->area, smp->data.u.str.area, smp->data.u.str.data);
+ cache_entry->area[smp->data.u.str.data] = 0;
+ cache_entry->data = smp->data.u.str.data;
+ HA_SPIN_LOCK(OTHER_LOCK, &_51d_lru_lock);
+ lru64_commit(lru, cache_entry, domain, 0, _51d_lru_free);
+ HA_SPIN_UNLOCK(OTHER_LOCK, &_51d_lru_lock);
+}
+
+/* Retrieves the data from the cache and sets the sample data to this string.
+ */
+static void _51d_retrieve_cache_entry(struct sample *smp, struct lru64 *lru)
+{
+ struct buffer *cache_entry = lru->data;
+ smp->data.u.str.area = cache_entry->area;
+ smp->data.u.str.data = cache_entry->data;
+}
+#endif
+
+#ifdef FIFTYONEDEGREES_H_PATTERN_INCLUDED
+/* Sets the important HTTP headers ahead of the detection
+ */
+static void _51d_set_headers(struct sample *smp, fiftyoneDegreesWorkset *ws)
+{
+ struct channel *chn;
+ struct htx *htx;
+ struct http_hdr_ctx ctx;
+ struct ist name;
+ int i;
+
+ ws->importantHeadersCount = 0;
+ chn = (smp->strm ? &smp->strm->req : NULL);
+
+ // No need to null check as this has already been carried out in the
+ // calling method
+ htx = smp_prefetch_htx(smp, chn, NULL, 1);
+ ALREADY_CHECKED(htx);
+
+ for (i = 0; i < global_51degrees.header_count; i++) {
+ name = ist2((global_51degrees.header_names + i)->area,
+ (global_51degrees.header_names + i)->data);
+ ctx.blk = NULL;
+
+ if (http_find_header(htx, name, &ctx, 1)) {
+ ws->importantHeaders[ws->importantHeadersCount].header = ws->dataSet->httpHeaders + i;
+ ws->importantHeaders[ws->importantHeadersCount].headerValue = ctx.value.ptr;
+ ws->importantHeaders[ws->importantHeadersCount].headerValueLength = ctx.value.len;
+ ws->importantHeadersCount++;
+ }
+ }
+}
+#endif
+
+#ifdef FIFTYONEDEGREES_H_TRIE_INCLUDED
+static void _51d_init_device_offsets(fiftyoneDegreesDeviceOffsets *offsets) {
+ int i;
+ for (i = 0; i < global_51degrees.data_set.uniqueHttpHeaders.count; i++) {
+ offsets->firstOffset[i].userAgent = NULL;
+ }
+}
+
+static void _51d_set_device_offsets(struct sample *smp, fiftyoneDegreesDeviceOffsets *offsets)
+{
+ struct channel *chn;
+ struct htx *htx;
+ struct http_hdr_ctx ctx;
+ struct ist name;
+ int i;
+
+ offsets->size = 0;
+ chn = (smp->strm ? &smp->strm->req : NULL);
+
+ // No need to null check as this has already been carried out in the
+ // calling method
+ htx = smp_prefetch_htx(smp, chn, NULL, 1);
+ ALREADY_CHECKED(htx);
+
+ for (i = 0; i < global_51degrees.header_count; i++) {
+ name = ist2((global_51degrees.header_names + i)->area,
+ (global_51degrees.header_names + i)->data);
+ ctx.blk = NULL;
+
+ if (http_find_header(htx, name, &ctx, 1)) {
+ (offsets->firstOffset + offsets->size)->httpHeaderOffset = *(global_51degrees.header_offsets + i);
+ (offsets->firstOffset + offsets->size)->deviceOffset = fiftyoneDegreesGetDeviceOffset(&global_51degrees.data_set, ctx.value.ptr);
+ offsets->size++;
+ }
+ }
+
+}
+#endif
+
+#ifdef FIFTYONEDEGREES_H_PATTERN_INCLUDED
+/* Provides a hash code for the important HTTP headers.
+ */
+unsigned long long _51d_req_hash(const struct arg *args, fiftyoneDegreesWorkset* ws)
+{
+ unsigned long long seed = _51d_lru_seed ^ (long)args;
+ unsigned long long hash = 0;
+ int i;
+ for(i = 0; i < ws->importantHeadersCount; i++) {
+ hash ^= ws->importantHeaders[i].header->headerNameOffset;
+ hash ^= XXH3(ws->importantHeaders[i].headerValue,
+ ws->importantHeaders[i].headerValueLength,
+ seed);
+ }
+ return hash;
+}
+#endif
+
+#ifdef FIFTYONE_DEGREES_HASH_INCLUDED
+static int _51d_use_perf_graph(char **args, int section_type, struct proxy *curpx,
+ const struct proxy *defpx, const char *file, int line,
+ char **err)
+{
+ if (too_many_args(1, args, err, NULL))
+ return -1;
+
+ if (strcmp(args[1], "on") == 0)
+ global_51degrees.use_perf_graph = 1;
+ else if (strcmp(args[1], "off") == 0)
+ global_51degrees.use_perf_graph = 0;
+ else {
+ memprintf(err, "'%s' expects either 'on' or 'off' but got '%s'.", args[0], args[1]);
+ return -1;
+ }
+
+ return 0;
+}
+
+static int _51d_use_pred_graph(char **args, int section_type, struct proxy *curpx,
+ const struct proxy *defpx, const char *file, int line,
+ char **err)
+{
+ if (too_many_args(1, args, err, NULL))
+ return -1;
+
+ if (strcmp(args[1], "on") == 0)
+ global_51degrees.use_pred_graph = 1;
+ else if (strcmp(args[1], "off") == 0)
+ global_51degrees.use_pred_graph = 0;
+ else {
+ memprintf(err, "'%s' expects either 'on' or 'off' but got '%s'.", args[0], args[1]);
+ return -1;
+ }
+
+ return 0;
+}
+
+static int _51d_drift(char **args, int section_type, struct proxy *curpx,
+ const struct proxy *defpx, const char *file, int line,
+ char **err)
+{
+ if (*(args[1]) == 0) {
+ memprintf(err, "'%s' expects a positive numeric value.", args[0]);
+ return -1;
+ }
+
+ global_51degrees.drift = atoi(args[1]);
+ if (global_51degrees.drift < 0) {
+ memprintf(err, "'%s' expects a positive numeric value, got '%s'.",
+ args[0], args[1]);
+ return -1;
+ }
+
+ return 0;
+}
+
+static int _51d_difference(char **args, int section_type, struct proxy *curpx,
+ const struct proxy *defpx, const char *file, int line,
+ char **err)
+{
+ if (*(args[1]) == 0) {
+ memprintf(err, "'%s' expects a positive numeric value.", args[0]);
+ return -1;
+ }
+
+ global_51degrees.difference = atoi(args[1]);
+ if (global_51degrees.difference < 0) {
+ memprintf(err, "'%s' expects a positive numeric value, got '%s'.",
+ args[0], args[1]);
+ return -1;
+ }
+
+ return 0;
+}
+
+static int _51d_allow_unmatched(char **args, int section_type, struct proxy *curpx,
+ const struct proxy *defpx, const char *file, int line,
+ char **err)
+{
+ if (too_many_args(1, args, err, NULL))
+ return -1;
+
+ if (strcmp(args[1], "on") == 0)
+ global_51degrees.allow_unmatched = 1;
+ else if (strcmp(args[1], "off") == 0)
+ global_51degrees.allow_unmatched = 0;
+ else {
+ memprintf(err, "'%s' expects either 'on' or 'off' but got '%s'.", args[0], args[1]);
+ return -1;
+ }
+
+ return 0;
+}
+
+static int _51d_init_internal()
+{
+ fiftyoneDegreesDataSetHash *ds;
+ int hdr_count;
+ int i, ret = 0;
+
+ ds = (fiftyoneDegreesDataSetHash *)fiftyoneDegreesDataSetGet(&global_51degrees.manager);
+
+ hdr_count = ds->b.b.uniqueHeaders->count;
+ if (hdr_count > _51d_headers.max)
+ hdr_count = _51d_headers.max;
+
+ _51d_results = fiftyoneDegreesResultsHashCreate(&global_51degrees.manager, hdr_count, 0);
+ if (!_51d_results)
+ goto out;
+
+ for (i = 0; i < hdr_count; i++) {
+ _51d_headers.buf[i] = malloc(_51D_HEADERS_BUFFER_SIZE);
+ if (!_51d_headers.buf[i])
+ goto out;
+ _51d_headers.count++;
+ }
+
+ /* success */
+ ret = 1;
+
+out:
+ fiftyoneDegreesDataSetRelease((fiftyoneDegreesDataSetBase *)ds);
+ return ret;
+}
+
+static fiftyoneDegreesEvidenceKeyValuePairArray * _51d_get_evidence(struct sample *smp)
+{
+ fiftyoneDegreesEvidenceKeyValuePairArray *evidence;
+ fiftyoneDegreesDataSetHash *ds;
+ size_t size;
+ struct channel *chn;
+ struct htx *htx;
+ struct http_hdr_ctx ctx;
+ struct ist name;
+ int i;
+
+ chn = (smp->strm ? &smp->strm->req : NULL);
+
+ // No need to null check as this has already been carried out in the
+ // calling method
+ htx = smp_prefetch_htx(smp, chn, NULL, 1);
+ ALREADY_CHECKED(htx);
+
+ ds = (fiftyoneDegreesDataSetHash *)_51d_results->b.b.dataSet;
+ size = _51d_headers.count * 2;
+
+ evidence = fiftyoneDegreesEvidenceCreate(size);
+ if (!evidence)
+ return NULL;
+
+ for (i = 0; i < _51d_headers.count; i++) {
+ fiftyoneDegreesHeader *hdr = &ds->b.b.uniqueHeaders->items[i];
+ name = ist2(hdr->name, hdr->nameLength);
+ ctx.blk = NULL;
+
+ if (http_find_header(htx, name, &ctx, 1)) {
+ size_t len = ctx.value.len;
+
+ if (unlikely(len >= _51D_HEADERS_BUFFER_SIZE))
+ len = _51D_HEADERS_BUFFER_SIZE - 1;
+
+ memcpy(_51d_headers.buf[i], ctx.value.ptr, len);
+ _51d_headers.buf[i][len] = '\0';
+
+ fiftyoneDegreesEvidenceAddString(
+ evidence,
+ FIFTYONE_DEGREES_EVIDENCE_HTTP_HEADER_STRING,
+ name.ptr,
+ _51d_headers.buf[i]);
+ }
+ }
+
+ return evidence;
+}
+#endif
+
+#if defined(FIFTYONEDEGREES_H_PATTERN_INCLUDED) || defined(FIFTYONEDEGREES_H_TRIE_INCLUDED)
+#ifdef FIFTYONEDEGREES_H_PATTERN_INCLUDED
+static void _51d_process_match(const struct arg *args, struct sample *smp, fiftyoneDegreesWorkset* ws)
+{
+ char *methodName;
+#endif
+#ifdef FIFTYONEDEGREES_H_TRIE_INCLUDED
+static void _51d_process_match(const struct arg *args, struct sample *smp, fiftyoneDegreesDeviceOffsets *offsets)
+{
+ char valuesBuffer[1024];
+ const char **requiredProperties = fiftyoneDegreesGetRequiredPropertiesNames(&global_51degrees.data_set);
+ int requiredPropertiesCount = fiftyoneDegreesGetRequiredPropertiesCount(&global_51degrees.data_set);
+#endif
+ const char* property_name;
+ int j;
+
+#elif defined(FIFTYONE_DEGREES_HASH_INCLUDED)
+static void _51d_process_match(const struct arg *args, struct sample *smp)
+{
+ char valuesBuffer[1024];
+#endif
+
+ char no_data[] = "NoData"; /* response when no data could be found */
+ struct buffer *temp = get_trash_chunk();
+ int i = 0, found;
+
+#if defined(FIFTYONE_DEGREES_HASH_INCLUDED)
+ FIFTYONE_DEGREES_EXCEPTION_CREATE;
+#endif
+
+ /* Loop through property names passed to the filter and fetch them from the dataset. */
+ while (args[i].data.str.area) {
+ /* Try to find request property in dataset. */
+ found = 0;
+#ifdef FIFTYONEDEGREES_H_PATTERN_INCLUDED
+ if (strcmp("Method", args[i].data.str.area) == 0) {
+ switch(ws->method) {
+ case EXACT: methodName = "Exact"; break;
+ case NUMERIC: methodName = "Numeric"; break;
+ case NEAREST: methodName = "Nearest"; break;
+ case CLOSEST: methodName = "Closest"; break;
+ default:
+ case NONE: methodName = "None"; break;
+ }
+ chunk_appendf(temp, "%s", methodName);
+ found = 1;
+ }
+ else if (strcmp("Difference", args[i].data.str.area) == 0) {
+ chunk_appendf(temp, "%d", ws->difference);
+ found = 1;
+ }
+ else if (strcmp("Rank", args[i].data.str.area) == 0) {
+ chunk_appendf(temp, "%d", fiftyoneDegreesGetSignatureRank(ws));
+ found = 1;
+ }
+ else {
+ for (j = 0; j < ws->dataSet->requiredPropertyCount; j++) {
+ property_name = fiftyoneDegreesGetPropertyName(ws->dataSet, ws->dataSet->requiredProperties[j]);
+ if (strcmp(property_name, args[i].data.str.area) == 0) {
+ found = 1;
+ fiftyoneDegreesSetValues(ws, j);
+ chunk_appendf(temp, "%s", fiftyoneDegreesGetValueName(ws->dataSet, *ws->values));
+ break;
+ }
+ }
+ }
+#endif
+#ifdef FIFTYONEDEGREES_H_TRIE_INCLUDED
+ found = 0;
+ for (j = 0; j < requiredPropertiesCount; j++) {
+ property_name = requiredProperties[j];
+ if (strcmp(property_name, args[i].data.str.area) == 0 &&
+ fiftyoneDegreesGetValueFromOffsets(&global_51degrees.data_set, offsets, j, valuesBuffer, 1024) > 0) {
+ found = 1;
+ chunk_appendf(temp, "%s", valuesBuffer);
+ break;
+ }
+ }
+#endif
+#ifdef FIFTYONE_DEGREES_HASH_INCLUDED
+ FIFTYONE_DEGREES_EXCEPTION_CLEAR;
+
+ found = fiftyoneDegreesResultsHashGetValuesString(
+ _51d_results, args[i].data.str.area,
+ valuesBuffer, 1024, "|",
+ exception);
+
+ if (FIFTYONE_DEGREES_EXCEPTION_FAILED || found <= 0)
+ found = 0;
+ else
+ chunk_appendf(temp, "%s", valuesBuffer);
+#endif
+ if (!found)
+ chunk_appendf(temp, "%s", no_data);
+
+ /* Add separator. */
+ chunk_appendf(temp, "%c", global_51degrees.property_separator);
+ ++i;
+ }
+
+ if (temp->data) {
+ --temp->data;
+ temp->area[temp->data] = '\0';
+ }
+
+ smp->data.u.str.area = temp->area;
+ smp->data.u.str.data = temp->data;
+}
+
+/* Sets the sample data as a constant string. This ensures that the
+ * string will be processed correctly.
+ */
+static void _51d_set_smp(struct sample *smp)
+{
+ /*
+ * Data type has to be set to ensure the string output is processed
+ * correctly.
+ */
+ smp->data.type = SMP_T_STR;
+
+ /* Flags the sample to show it uses constant memory. */
+ smp->flags |= SMP_F_CONST;
+}
+
+static int _51d_fetch(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ struct channel *chn;
+ struct htx *htx;
+#ifdef FIFTYONEDEGREES_H_PATTERN_INCLUDED
+ fiftyoneDegreesWorkset* ws; /* workset for detection */
+ struct lru64 *lru = NULL;
+#endif
+#ifdef FIFTYONEDEGREES_H_TRIE_INCLUDED
+ fiftyoneDegreesDeviceOffsets *offsets; /* Offsets for detection */
+#endif
+#ifdef FIFTYONE_DEGREES_HASH_INCLUDED
+ fiftyoneDegreesEvidenceKeyValuePairArray *evidence = NULL;
+ FIFTYONE_DEGREES_EXCEPTION_CREATE;
+#endif
+
+ chn = (smp->strm ? &smp->strm->req : NULL);
+ htx = smp_prefetch_htx(smp, chn, NULL, 1);
+ if (!htx)
+ return 0;
+
+
+#ifdef FIFTYONEDEGREES_H_PATTERN_INCLUDED
+
+ /* Get only the headers needed for device detection so they can be used
+ * with the cache to return previous results. Pattern is slower than
+ * Trie so caching will help improve performance.
+ */
+
+ /* Get a workset from the pool which will later contain detection results. */
+ ws = fiftyoneDegreesWorksetPoolGet(global_51degrees.pool);
+ if (!ws)
+ return 0;
+
+ /* Set the important HTTP headers for this request in the workset. */
+ _51d_set_headers(smp, ws);
+
+ /* Check the cache to see if there's results for these headers already. */
+ if (_51d_lru_tree) {
+ HA_SPIN_LOCK(OTHER_LOCK, &_51d_lru_lock);
+
+ lru = lru64_get(_51d_req_hash(args, ws),
+ _51d_lru_tree, (void*)args, 0);
+
+ if (lru && lru->domain) {
+ fiftyoneDegreesWorksetPoolRelease(global_51degrees.pool, ws);
+ _51d_retrieve_cache_entry(smp, lru);
+ HA_SPIN_UNLOCK(OTHER_LOCK, &_51d_lru_lock);
+
+ _51d_set_smp(smp);
+ return 1;
+ }
+ HA_SPIN_UNLOCK(OTHER_LOCK, &_51d_lru_lock);
+ }
+
+ fiftyoneDegreesMatchForHttpHeaders(ws);
+
+ _51d_process_match(args, smp, ws);
+
+#endif
+
+#ifdef FIFTYONEDEGREES_H_TRIE_INCLUDED
+#ifndef FIFTYONEDEGREES_NO_THREADING
+ offsets = fiftyoneDegreesCreateDeviceOffsets(&global_51degrees.data_set);
+ _51d_init_device_offsets(offsets);
+#else
+ offsets = &global_51degrees.device_offsets;
+#endif
+
+ /* Trie is very fast so all the headers can be passed in and the result
+ * returned faster than the hashing algorithm process.
+ */
+ _51d_set_device_offsets(smp, offsets);
+ _51d_process_match(args, smp, offsets);
+
+#ifndef FIFTYONEDEGREES_NO_THREADING
+ fiftyoneDegreesFreeDeviceOffsets(offsets);
+#endif
+
+#endif
+
+#ifdef FIFTYONEDEGREES_H_PATTERN_INCLUDED
+ fiftyoneDegreesWorksetPoolRelease(global_51degrees.pool, ws);
+ if (lru)
+ _51d_insert_cache_entry(smp, lru, (void*)args);
+#endif
+
+#ifdef FIFTYONE_DEGREES_HASH_INCLUDED
+ evidence = _51d_get_evidence(smp);
+ if (!evidence)
+ return 0;
+
+ fiftyoneDegreesResultsHashFromEvidence(
+ _51d_results, evidence, exception);
+ fiftyoneDegreesEvidenceFree(evidence);
+
+ if (FIFTYONE_DEGREES_EXCEPTION_FAILED)
+ return 0;
+
+ _51d_process_match(args, smp);
+#endif
+
+ _51d_set_smp(smp);
+ return 1;
+}
+
+static int _51d_conv(const struct arg *args, struct sample *smp, void *private)
+{
+#ifdef FIFTYONEDEGREES_H_PATTERN_INCLUDED
+ fiftyoneDegreesWorkset* ws; /* workset for detection */
+ struct lru64 *lru = NULL;
+#endif
+#ifdef FIFTYONEDEGREES_H_TRIE_INCLUDED
+ fiftyoneDegreesDeviceOffsets *offsets; /* Offsets for detection */
+#endif
+#ifdef FIFTYONE_DEGREES_HASH_INCLUDED
+ FIFTYONE_DEGREES_EXCEPTION_CREATE;
+#endif
+
+#ifdef FIFTYONEDEGREES_H_PATTERN_INCLUDED
+
+ /* Look in the list. */
+ if (_51d_lru_tree) {
+ unsigned long long seed = _51d_lru_seed ^ (long)args;
+
+ HA_SPIN_LOCK(OTHER_LOCK, &_51d_lru_lock);
+ lru = lru64_get(XXH3(smp->data.u.str.area, smp->data.u.str.data, seed),
+ _51d_lru_tree, (void*)args, 0);
+ if (lru && lru->domain) {
+ _51d_retrieve_cache_entry(smp, lru);
+ HA_SPIN_UNLOCK(OTHER_LOCK, &_51d_lru_lock);
+ return 1;
+ }
+ HA_SPIN_UNLOCK(OTHER_LOCK, &_51d_lru_lock);
+ }
+
+ /* Create workset. This will later contain detection results. */
+ ws = fiftyoneDegreesWorksetPoolGet(global_51degrees.pool);
+ if (!ws)
+ return 0;
+#endif
+
+ /* Duplicate the data and remove the "const" flag before device detection. */
+ if (!smp_dup(smp))
+ return 0;
+
+ smp->data.u.str.area[smp->data.u.str.data] = '\0';
+
+ /* Perform detection. */
+#if defined(FIFTYONEDEGREES_H_PATTERN_INCLUDED) || defined(FIFTYONEDEGREES_H_TRIE_INCLUDED)
+#ifdef FIFTYONEDEGREES_H_PATTERN_INCLUDED
+ fiftyoneDegreesMatch(ws, smp->data.u.str.area);
+ _51d_process_match(args, smp, ws);
+#endif
+#ifdef FIFTYONEDEGREES_H_TRIE_INCLUDED
+#ifndef FIFTYONEDEGREES_NO_THREADING
+ offsets = fiftyoneDegreesCreateDeviceOffsets(&global_51degrees.data_set);
+ _51d_init_device_offsets(offsets);
+#else
+ offsets = &global_51degrees.device_offsets;
+#endif
+
+ offsets->firstOffset->deviceOffset = fiftyoneDegreesGetDeviceOffset(&global_51degrees.data_set,
+ smp->data.u.str.area);
+ offsets->size = 1;
+ _51d_process_match(args, smp, offsets);
+#endif
+
+#ifdef FIFTYONEDEGREES_H_PATTERN_INCLUDED
+ fiftyoneDegreesWorksetPoolRelease(global_51degrees.pool, ws);
+ if (lru)
+ _51d_insert_cache_entry(smp, lru, (void*)args);
+#endif
+
+#ifdef FIFTYONEDEGREES_H_TRIE_INCLUDED
+#ifndef FIFTYONEDEGREES_NO_THREADING
+ fiftyoneDegreesFreeDeviceOffsets(offsets);
+#endif
+#endif
+
+#elif defined(FIFTYONE_DEGREES_HASH_INCLUDED)
+ fiftyoneDegreesResultsHashFromUserAgent(_51d_results, smp->data.u.str.area,
+ smp->data.u.str.data, exception);
+ if (FIFTYONE_DEGREES_EXCEPTION_FAILED)
+ return 0;
+
+ _51d_process_match(args, smp);
+#endif
+
+ _51d_set_smp(smp);
+ return 1;
+}
+
+#ifdef FIFTYONEDEGREES_H_PATTERN_INCLUDED
+void _51d_init_http_headers()
+{
+ int index = 0;
+ const fiftyoneDegreesAsciiString *headerName;
+ fiftyoneDegreesDataSet *ds = &global_51degrees.data_set;
+ global_51degrees.header_count = ds->httpHeadersCount;
+ global_51degrees.header_names = malloc(global_51degrees.header_count * sizeof(struct buffer));
+ for (index = 0; index < global_51degrees.header_count; index++) {
+ headerName = fiftyoneDegreesGetString(ds, ds->httpHeaders[index].headerNameOffset);
+ (global_51degrees.header_names + index)->area = (char*)&headerName->firstByte;
+ (global_51degrees.header_names + index)->data = headerName->length - 1;
+ (global_51degrees.header_names + index)->size = (global_51degrees.header_names + index)->data;
+ }
+}
+#endif
+
+#ifdef FIFTYONEDEGREES_H_TRIE_INCLUDED
+void _51d_init_http_headers()
+{
+ int index = 0;
+ fiftyoneDegreesDataSet *ds = &global_51degrees.data_set;
+ global_51degrees.header_count = fiftyoneDegreesGetHttpHeaderCount(ds);
+#ifdef FIFTYONEDEGREES_NO_THREADING
+ global_51degrees.device_offsets.firstOffset = malloc(
+ global_51degrees.header_count * sizeof(fiftyoneDegreesDeviceOffset));
+ _51d_init_device_offsets(&global_51degrees.device_offsets);
+#endif
+ global_51degrees.header_names = malloc(global_51degrees.header_count * sizeof(struct buffer));
+ global_51degrees.header_offsets = malloc(global_51degrees.header_count * sizeof(int32_t));
+ for (index = 0; index < global_51degrees.header_count; index++) {
+ global_51degrees.header_offsets[index] = fiftyoneDegreesGetHttpHeaderNameOffset(ds, index);
+ global_51degrees.header_names[index].area = (char*)fiftyoneDegreesGetHttpHeaderNamePointer(ds, index);
+ global_51degrees.header_names[index].data = strlen(global_51degrees.header_names[index].area);
+ global_51degrees.header_names[index].size = global_51degrees.header_names->data;
+ }
+}
+#endif
+
+/*
+ * module init / deinit functions. Returns 0 if OK, or a combination of ERR_*.
+ */
+static int init_51degrees(void)
+{
+ int i = 0;
+ struct _51d_property_names *name;
+ char **_51d_property_list = NULL;
+#if defined(FIFTYONEDEGREES_H_PATTERN_INCLUDED) || defined(FIFTYONEDEGREES_H_TRIE_INCLUDED)
+ struct buffer *temp;
+ fiftyoneDegreesDataSetInitStatus _51d_dataset_status = DATA_SET_INIT_STATUS_NOT_SET;
+#elif defined(FIFTYONE_DEGREES_HASH_INCLUDED)
+ fiftyoneDegreesConfigHash config = fiftyoneDegreesHashInMemoryConfig;
+ fiftyoneDegreesPropertiesRequired properties = fiftyoneDegreesPropertiesDefault;
+ fiftyoneDegreesMemoryReader reader;
+ fiftyoneDegreesStatusCode status;
+ FIFTYONE_DEGREES_EXCEPTION_CREATE;
+#endif
+
+ if (!global_51degrees.data_file_path)
+ return ERR_NONE;
+
+ if (global.nbthread < 1) {
+ ha_alert("51Degrees: The thread count cannot be zero or negative.\n");
+ return (ERR_FATAL | ERR_ALERT);
+ }
+
+ if (!LIST_ISEMPTY(&global_51degrees.property_names)) {
+ i = 0;
+ list_for_each_entry(name, &global_51degrees.property_names, list)
+ ++i;
+ _51d_property_list = calloc(i, sizeof(*_51d_property_list));
+
+ i = 0;
+ list_for_each_entry(name, &global_51degrees.property_names, list)
+ _51d_property_list[i++] = name->name;
+ }
+
+#if defined(FIFTYONEDEGREES_H_PATTERN_INCLUDED) || defined(FIFTYONEDEGREES_H_TRIE_INCLUDED)
+ _51d_dataset_status = fiftyoneDegreesInitWithPropertyArray(global_51degrees.data_file_path, &global_51degrees.data_set, (const char**)_51d_property_list, i);
+
+ temp = get_trash_chunk();
+ chunk_reset(temp);
+
+ switch (_51d_dataset_status) {
+ case DATA_SET_INIT_STATUS_SUCCESS:
+#ifdef FIFTYONEDEGREES_H_PATTERN_INCLUDED
+ global_51degrees.pool = fiftyoneDegreesWorksetPoolCreate(&global_51degrees.data_set, NULL, global.nbthread);
+#endif
+ _51d_init_http_headers();
+ break;
+ case DATA_SET_INIT_STATUS_INSUFFICIENT_MEMORY:
+ chunk_printf(temp, "Insufficient memory.");
+ break;
+ case DATA_SET_INIT_STATUS_CORRUPT_DATA:
+#ifdef FIFTYONEDEGREES_H_PATTERN_INCLUDED
+ chunk_printf(temp, "Corrupt data file. Check that the data file provided is uncompressed and Pattern data format.");
+#endif
+#ifdef FIFTYONEDEGREES_H_TRIE_INCLUDED
+ chunk_printf(temp, "Corrupt data file. Check that the data file provided is uncompressed and Trie data format.");
+#endif
+ break;
+ case DATA_SET_INIT_STATUS_INCORRECT_VERSION:
+#ifdef FIFTYONEDEGREES_H_PATTERN_INCLUDED
+ chunk_printf(temp, "Incorrect version. Check that the data file provided is uncompressed and Pattern data format.");
+#endif
+#ifdef FIFTYONEDEGREES_H_TRIE_INCLUDED
+ chunk_printf(temp, "Incorrect version. Check that the data file provided is uncompressed and Trie data format.");
+#endif
+ break;
+ case DATA_SET_INIT_STATUS_FILE_NOT_FOUND:
+ chunk_printf(temp, "File not found.");
+ break;
+ case DATA_SET_INIT_STATUS_NULL_POINTER:
+ chunk_printf(temp, "Null pointer to the existing dataset or memory location.");
+ break;
+ case DATA_SET_INIT_STATUS_POINTER_OUT_OF_BOUNDS:
+ chunk_printf(temp, "Allocated continuous memory containing 51Degrees data file appears to be smaller than expected. Most likely"
+ " because the data file was not fully loaded into the allocated memory.");
+ break;
+ case DATA_SET_INIT_STATUS_NOT_SET:
+ chunk_printf(temp, "Data set not initialised.");
+ break;
+ default:
+ chunk_printf(temp, "Other error.");
+ break;
+ }
+ if (_51d_dataset_status != DATA_SET_INIT_STATUS_SUCCESS) {
+ if (temp->data)
+ ha_alert("51Degrees Setup - Error reading 51Degrees data file. %s\n",
+ temp->area);
+ else
+ ha_alert("51Degrees Setup - Error reading 51Degrees data file.\n");
+ return ERR_ALERT | ERR_FATAL;
+ }
+ free(_51d_property_list);
+
+#ifdef FIFTYONEDEGREES_H_PATTERN_INCLUDED
+ _51d_lru_seed = ha_random();
+ if (global_51degrees.cache_size) {
+ _51d_lru_tree = lru64_new(global_51degrees.cache_size);
+ }
+#endif
+
+#elif defined(FIFTYONE_DEGREES_HASH_INCLUDED)
+ config.b.b.freeData = true;
+
+ if (global_51degrees.use_perf_graph != -1)
+ config.usePerformanceGraph = global_51degrees.use_perf_graph;
+ if (global_51degrees.use_pred_graph != -1)
+ config.usePredictiveGraph = global_51degrees.use_pred_graph;
+
+ if (global_51degrees.drift > 0)
+ config.drift = global_51degrees.drift;
+ if (global_51degrees.difference > 0)
+ config.difference = global_51degrees.difference;
+
+ if (global_51degrees.allow_unmatched != -1)
+ config.b.allowUnmatched = global_51degrees.allow_unmatched;
+
+ config.strings.concurrency =
+ config.properties.concurrency =
+ config.values.concurrency =
+ config.profiles.concurrency =
+ config.nodes.concurrency =
+ config.profileOffsets.concurrency =
+ config.maps.concurrency =
+ config.components.concurrency =
+ config.rootNodes.concurrency = global.nbthread;
+
+ properties.array = (const char **)_51d_property_list;
+ properties.count = i;
+
+ status = fiftyoneDegreesFileReadToByteArray(global_51degrees.data_file_path, &reader);
+ if (status == FIFTYONE_DEGREES_STATUS_SUCCESS && !FIFTYONE_DEGREES_EXCEPTION_FAILED) {
+ FIFTYONE_DEGREES_EXCEPTION_CLEAR;
+
+ status = fiftyoneDegreesHashInitManagerFromMemory(
+ &global_51degrees.manager,
+ &config,
+ &properties,
+ reader.startByte,
+ reader.length,
+ exception);
+ }
+
+ free(_51d_property_list);
+ _51d_property_list = NULL;
+ i = 0;
+
+ if (status != FIFTYONE_DEGREES_STATUS_SUCCESS || FIFTYONE_DEGREES_EXCEPTION_FAILED) {
+ const char *message = fiftyoneDegreesStatusGetMessage(status, global_51degrees.data_file_path);
+ if (message)
+ ha_alert("51Degrees Setup - Error reading 51Degrees data file. %s\n",
+ message);
+ else
+ ha_alert("51Degrees Setup - Error reading 51Degrees data file.\n");
+ return ERR_ALERT | ERR_FATAL;
+ }
+#endif
+
+ return ERR_NONE;
+}
+
+static void deinit_51degrees(void)
+{
+ struct _51d_property_names *_51d_prop_name, *_51d_prop_nameb;
+
+#if defined(FIFTYONEDEGREES_H_PATTERN_INCLUDED) || defined(FIFTYONEDEGREES_H_TRIE_INCLUDED)
+ free(global_51degrees.header_names);
+#ifdef FIFTYONEDEGREES_H_PATTERN_INCLUDED
+ if (global_51degrees.pool)
+ fiftyoneDegreesWorksetPoolFree(global_51degrees.pool);
+#endif
+#ifdef FIFTYONEDEGREES_H_TRIE_INCLUDED
+#ifdef FIFTYONEDEGREES_NO_THREADING
+ free(global_51degrees.device_offsets.firstOffset);
+#endif
+ free(global_51degrees.header_offsets);
+#endif
+ fiftyoneDegreesDataSetFree(&global_51degrees.data_set);
+#endif
+
+ ha_free(&global_51degrees.data_file_path);
+ list_for_each_entry_safe(_51d_prop_name, _51d_prop_nameb, &global_51degrees.property_names, list) {
+ LIST_DELETE(&_51d_prop_name->list);
+ free(_51d_prop_name);
+ }
+
+#ifdef FIFTYONEDEGREES_H_PATTERN_INCLUDED
+ while (lru64_destroy(_51d_lru_tree));
+#endif
+}
+
+#ifdef FIFTYONE_DEGREES_HASH_INCLUDED
+static int init_51degrees_per_thread()
+{
+ if (!global_51degrees.data_file_path) {
+ /* noop */
+ return 1;
+ }
+
+ _51d_headers.max = global.tune.max_http_hdr;
+ _51d_headers.buf = calloc(_51d_headers.max, sizeof(*_51d_headers.buf));
+ _51d_headers.count = 0;
+
+ if (!_51d_headers.buf)
+ return 0;
+
+ if (!_51d_init_internal())
+ return 0;
+
+ return 1;
+}
+
+static void deinit_51degrees_per_thread()
+{
+ int i;
+
+ if (_51d_results) {
+ fiftyoneDegreesResultsHashFree(_51d_results);
+ _51d_results = NULL;
+ }
+
+ if (_51d_headers.buf) {
+ for (i = 0; i < _51d_headers.max; i++)
+ free(_51d_headers.buf[i]);
+ free(_51d_headers.buf);
+ _51d_headers.buf = NULL;
+ }
+
+ _51d_headers.max = 0;
+ _51d_headers.count = 0;
+}
+#endif
+
+static struct cfg_kw_list _51dcfg_kws = {{ }, {
+ { CFG_GLOBAL, "51degrees-data-file", _51d_data_file },
+ { CFG_GLOBAL, "51degrees-property-name-list", _51d_property_name_list },
+ { CFG_GLOBAL, "51degrees-property-separator", _51d_property_separator },
+ { CFG_GLOBAL, "51degrees-cache-size", _51d_cache_size },
+#ifdef FIFTYONE_DEGREES_HASH_INCLUDED
+ { CFG_GLOBAL, "51degrees-use-performance-graph", _51d_use_perf_graph },
+ { CFG_GLOBAL, "51degrees-use-predictive-graph", _51d_use_pred_graph },
+ { CFG_GLOBAL, "51degrees-drift", _51d_drift },
+ { CFG_GLOBAL, "51degrees-difference", _51d_difference },
+ { CFG_GLOBAL, "51degrees-allow-unmatched", _51d_allow_unmatched },
+#endif
+ { 0, NULL, NULL },
+}};
+
+INITCALL1(STG_REGISTER, cfg_register_keywords, &_51dcfg_kws);
+
+/* Note: must not be declared <const> as its list will be overwritten */
+static struct sample_fetch_kw_list sample_fetch_keywords = {ILH, {
+ { "51d.all", _51d_fetch, ARG5(1,STR,STR,STR,STR,STR), _51d_fetch_check, SMP_T_STR, SMP_USE_HRQHV },
+ { NULL, NULL, 0, 0, 0 },
+}};
+
+INITCALL1(STG_REGISTER, sample_register_fetches, &sample_fetch_keywords);
+
+/* Note: must not be declared <const> as its list will be overwritten */
+static struct sample_conv_kw_list conv_kws = {ILH, {
+ { "51d.single", _51d_conv, ARG5(1,STR,STR,STR,STR,STR), _51d_conv_check, SMP_T_STR, SMP_T_STR },
+ { NULL, NULL, 0, 0, 0 },
+}};
+
+INITCALL1(STG_REGISTER, sample_register_convs, &conv_kws);
+
+REGISTER_POST_CHECK(init_51degrees);
+REGISTER_POST_DEINIT(deinit_51degrees);
+
+#if defined(FIFTYONEDEGREES_H_PATTERN_INCLUDED)
+#ifndef FIFTYONEDEGREES_DUMMY_LIB
+ REGISTER_BUILD_OPTS("Built with 51Degrees Pattern support.");
+#else
+ REGISTER_BUILD_OPTS("Built with 51Degrees Pattern support (dummy library).");
+#endif
+#elif defined(FIFTYONEDEGREES_H_TRIE_INCLUDED)
+#ifndef FIFTYONEDEGREES_DUMMY_LIB
+ REGISTER_BUILD_OPTS("Built with 51Degrees Trie support.");
+#else
+ REGISTER_BUILD_OPTS("Built with 51Degrees Trie support (dummy library).");
+#endif
+#elif defined(FIFTYONE_DEGREES_HASH_INCLUDED)
+ REGISTER_PER_THREAD_INIT(init_51degrees_per_thread);
+ REGISTER_PER_THREAD_DEINIT(deinit_51degrees_per_thread);
+#ifndef FIFTYONEDEGREES_DUMMY_LIB
+ REGISTER_BUILD_OPTS("Built with 51Degrees V4 Hash support.");
+#else
+ REGISTER_BUILD_OPTS("Built with 51Degrees V4 Hash support (dummy library).");
+#endif
+#endif
diff --git a/addons/51degrees/dummy/cityhash/city.c b/addons/51degrees/dummy/cityhash/city.c
new file mode 100644
index 0000000..b6b08bf
--- /dev/null
+++ b/addons/51degrees/dummy/cityhash/city.c
@@ -0,0 +1,4 @@
+typedef struct cityhash_t {
+ // This is an empty structure to ensure a city.o is generated
+ // by the dummy library, it is never used.
+} dummyCityHash; \ No newline at end of file
diff --git a/addons/51degrees/dummy/pattern/51Degrees.c b/addons/51degrees/dummy/pattern/51Degrees.c
new file mode 100644
index 0000000..c002e5c
--- /dev/null
+++ b/addons/51degrees/dummy/pattern/51Degrees.c
@@ -0,0 +1,114 @@
+/* *********************************************************************
+ * This Source Code Form is copyright of 51Degrees Mobile Experts Limited.
+ * Copyright 2019 51Degrees Mobile Experts Limited, 5 Charlotte Close,
+ * Caversham, Reading, Berkshire, United Kingdom RG4 7BY
+ *
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0.
+ *
+ * If a copy of the MPL was not distributed with this file, You can obtain
+ * one at http://mozilla.org/MPL/2.0/.
+ *
+ * This Source Code Form is "Incompatible With Secondary Licenses", as
+ * defined by the Mozilla Public License, v. 2.0.
+ * *********************************************************************/
+
+/* *********************************************************************
+ * Dummy library for HAProxy. This does not function, and is designed
+ * solely for HAProxy testing purposes.
+ * *********************************************************************/
+#include "51Degrees.h"
+#include <stdlib.h>
+
+int32_t fiftyoneDegreesGetSignatureRank(fiftyoneDegreesWorkset *ws) {
+ return 0;
+}
+
+const char* fiftyoneDegreesGetPropertyName(
+ const fiftyoneDegreesDataSet *dataSet,
+ const fiftyoneDegreesProperty *property) {
+ return "dummy-property";
+}
+
+int32_t fiftyoneDegreesSetValues(
+ fiftyoneDegreesWorkset *ws,
+ int32_t requiredPropertyIndex) {
+ return 0;
+}
+
+const char* fiftyoneDegreesGetValueName(
+ const fiftyoneDegreesDataSet *dataSet,
+ const fiftyoneDegreesValue *value) {
+ return "dummy-value";
+}
+
+static fiftyoneDegreesDataSet dummyDataSet = {
+ 0,
+ (fiftyoneDegreesHttpHeader*)NULL,
+ 0,
+ (const fiftyoneDegreesProperty**)NULL
+};
+
+static fiftyoneDegreesWorkset dummyWorkset = {
+ &dummyDataSet,
+ 0,
+ (fiftyoneDegreesHttpHeaderWorkset*)NULL,
+ EXACT,
+ 0,
+ (const fiftyoneDegreesValue **)NULL
+};
+
+fiftyoneDegreesWorkset *fiftyoneDegreesWorksetPoolGet(
+ fiftyoneDegreesWorksetPool *pool) {
+ return &dummyWorkset;
+}
+
+void fiftyoneDegreesWorksetPoolRelease(
+ fiftyoneDegreesWorksetPool *pool,
+ fiftyoneDegreesWorkset *ws) {
+ return;
+}
+
+void fiftyoneDegreesMatchForHttpHeaders(fiftyoneDegreesWorkset *ws) {
+ return;
+}
+
+void fiftyoneDegreesMatch(
+ fiftyoneDegreesWorkset *ws,
+ const char* userAgent) {
+ return;
+}
+
+fiftyoneDegreesDataSetInitStatus fiftyoneDegreesInitWithPropertyArray(
+ const char *fileName,
+ fiftyoneDegreesDataSet *dataSet,
+ const char** properties,
+ int32_t count) {
+ return DATA_SET_INIT_STATUS_SUCCESS;
+}
+
+static fiftyoneDegreesWorksetPool dummyWorksetPool;
+
+fiftyoneDegreesWorksetPool *fiftyoneDegreesWorksetPoolCreate(
+ fiftyoneDegreesDataSet *dataSet,
+ fiftyoneDegreesResultsetCache *cache,
+ int32_t size) {
+ return &dummyWorksetPool;
+}
+
+void fiftyoneDegreesWorksetPoolFree(
+ const fiftyoneDegreesWorksetPool *pool) {
+ return;
+}
+
+void fiftyoneDegreesDataSetFree(const fiftyoneDegreesDataSet *dataSet) {
+ return;
+}
+
+static fiftyoneDegreesAsciiString dummyAsciiString = {0, 0};
+
+const fiftyoneDegreesAsciiString* fiftyoneDegreesGetString(
+ const fiftyoneDegreesDataSet *dataSet,
+ int32_t offset) {
+ return &dummyAsciiString;
+} \ No newline at end of file
diff --git a/addons/51degrees/dummy/pattern/51Degrees.h b/addons/51degrees/dummy/pattern/51Degrees.h
new file mode 100644
index 0000000..9aaf949
--- /dev/null
+++ b/addons/51degrees/dummy/pattern/51Degrees.h
@@ -0,0 +1,147 @@
+/* *********************************************************************
+ * This Source Code Form is copyright of 51Degrees Mobile Experts Limited.
+ * Copyright 2019 51Degrees Mobile Experts Limited, 5 Charlotte Close,
+ * Caversham, Reading, Berkshire, United Kingdom RG4 7BY
+ *
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0.
+ *
+ * If a copy of the MPL was not distributed with this file, You can obtain
+ * one at http://mozilla.org/MPL/2.0/.
+ *
+ * This Source Code Form is "Incompatible With Secondary Licenses", as
+ * defined by the Mozilla Public License, v. 2.0.
+ * *********************************************************************/
+
+/* *********************************************************************
+ * Dummy library for HAProxy. This does not function, and is designed
+ * solely for HAProxy testing purposes.
+ * *********************************************************************/
+#ifndef FIFTYONEDEGREES_H_INCLUDED
+#define FIFTYONEDEGREES_H_INCLUDED
+
+#ifndef FIFTYONEDEGREES_H_PATTERN_INCLUDED
+#define FIFTYONEDEGREES_H_PATTERN_INCLUDED
+#endif
+
+#ifndef FIFTYONEDEGREES_DUMMY_LIB
+#define FIFTYONEDEGREES_DUMMY_LIB
+#endif
+
+#include <stdint.h>
+
+typedef enum e_fiftyoneDegrees_MatchMethod {
+ NONE,
+ EXACT,
+ NUMERIC,
+ NEAREST,
+ CLOSEST
+} fiftyoneDegreesMatchMethod;
+
+typedef enum e_fiftyoneDegrees_DataSetInitStatus {
+ DATA_SET_INIT_STATUS_SUCCESS,
+ DATA_SET_INIT_STATUS_INSUFFICIENT_MEMORY,
+ DATA_SET_INIT_STATUS_CORRUPT_DATA,
+ DATA_SET_INIT_STATUS_INCORRECT_VERSION,
+ DATA_SET_INIT_STATUS_FILE_NOT_FOUND,
+ DATA_SET_INIT_STATUS_NOT_SET,
+ DATA_SET_INIT_STATUS_POINTER_OUT_OF_BOUNDS,
+ DATA_SET_INIT_STATUS_NULL_POINTER
+} fiftyoneDegreesDataSetInitStatus;
+
+typedef struct fiftyoneDegrees_ascii_string_t {
+ const int16_t length;
+ const char firstByte;
+} fiftyoneDegreesAsciiString;
+
+typedef struct fiftyoneDegrees_dataset_header_t {
+} fiftyoneDegreesDataSetHeader;
+
+typedef struct fiftyoneDegrees_workset_pool_t {
+} fiftyoneDegreesWorksetPool;
+
+typedef struct fiftyoneDegrees_property_t {
+} fiftyoneDegreesProperty;
+
+typedef struct fiftyoneDegrees_value_t {
+} fiftyoneDegreesValue;
+
+typedef struct fiftyoneDegrees_resultset_cache_t {
+} fiftyoneDegreesResultsetCache;
+
+typedef struct fiftyoneDegrees_http_header_t {
+ int32_t headerNameOffset;
+ const char *headerName;
+} fiftyoneDegreesHttpHeader;
+
+typedef struct fiftyoneDegrees_http_header_workset_t {
+ fiftyoneDegreesHttpHeader *header;
+ const char *headerValue;
+ int headerValueLength;
+} fiftyoneDegreesHttpHeaderWorkset;
+
+
+typedef struct fiftyoneDegrees_dataset_t {
+ int32_t httpHeadersCount;
+ fiftyoneDegreesHttpHeader *httpHeaders;
+ int32_t requiredPropertyCount;
+ const fiftyoneDegreesProperty **requiredProperties;
+} fiftyoneDegreesDataSet;
+
+typedef struct fiftyoneDegrees_workset_t {
+ fiftyoneDegreesDataSet *dataSet;
+ int32_t importantHeadersCount;
+ fiftyoneDegreesHttpHeaderWorkset *importantHeaders;
+ fiftyoneDegreesMatchMethod method;
+ int32_t difference;
+ const fiftyoneDegreesValue **values;
+} fiftyoneDegreesWorkset;
+
+int32_t fiftyoneDegreesGetSignatureRank(fiftyoneDegreesWorkset *ws);
+
+const char* fiftyoneDegreesGetPropertyName(
+ const fiftyoneDegreesDataSet *dataSet,
+ const fiftyoneDegreesProperty *property);
+
+int32_t fiftyoneDegreesSetValues(
+ fiftyoneDegreesWorkset *ws,
+ int32_t requiredPropertyIndex);
+
+const char* fiftyoneDegreesGetValueName(
+ const fiftyoneDegreesDataSet *dataSet,
+ const fiftyoneDegreesValue *value);
+
+fiftyoneDegreesWorkset *fiftyoneDegreesWorksetPoolGet(
+ fiftyoneDegreesWorksetPool *pool);
+
+void fiftyoneDegreesWorksetPoolRelease(
+ fiftyoneDegreesWorksetPool *pool,
+ fiftyoneDegreesWorkset *ws);
+
+void fiftyoneDegreesMatchForHttpHeaders(fiftyoneDegreesWorkset *ws);
+
+void fiftyoneDegreesMatch(
+ fiftyoneDegreesWorkset *ws,
+ const char* userAgent);
+
+fiftyoneDegreesDataSetInitStatus fiftyoneDegreesInitWithPropertyArray(
+ const char *fileName,
+ fiftyoneDegreesDataSet *dataSet,
+ const char** properties,
+ int32_t count);
+
+fiftyoneDegreesWorksetPool *fiftyoneDegreesWorksetPoolCreate(
+ fiftyoneDegreesDataSet *dataSet,
+ fiftyoneDegreesResultsetCache *cache,
+ int32_t size);
+
+void fiftyoneDegreesWorksetPoolFree(
+ const fiftyoneDegreesWorksetPool *pool);
+
+void fiftyoneDegreesDataSetFree(const fiftyoneDegreesDataSet *dataSet);
+
+const fiftyoneDegreesAsciiString* fiftyoneDegreesGetString(
+ const fiftyoneDegreesDataSet *dataSet,
+ int32_t offset);
+
+#endif \ No newline at end of file
diff --git a/addons/51degrees/dummy/threading.c b/addons/51degrees/dummy/threading.c
new file mode 100644
index 0000000..e65678d
--- /dev/null
+++ b/addons/51degrees/dummy/threading.c
@@ -0,0 +1,4 @@
+typedef struct fiftyoneDegrees_threading_t {
+ // This is an empty structure to ensure a threading.o is generated
+ // by the dummy library, it is never used.
+} dummyFiftyoneDegreesThreading; \ No newline at end of file
diff --git a/addons/51degrees/dummy/trie/51Degrees.c b/addons/51degrees/dummy/trie/51Degrees.c
new file mode 100644
index 0000000..7453061
--- /dev/null
+++ b/addons/51degrees/dummy/trie/51Degrees.c
@@ -0,0 +1,89 @@
+/* *********************************************************************
+ * This Source Code Form is copyright of 51Degrees Mobile Experts Limited.
+ * Copyright 2019 51Degrees Mobile Experts Limited, 5 Charlotte Close,
+ * Caversham, Reading, Berkshire, United Kingdom RG4 7BY
+ *
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0.
+ *
+ * If a copy of the MPL was not distributed with this file, You can obtain
+ * one at http://mozilla.org/MPL/2.0/.
+ *
+ * This Source Code Form is "Incompatible With Secondary Licenses", as
+ * defined by the Mozilla Public License, v. 2.0.
+ * *********************************************************************/
+
+/* *********************************************************************
+ * Dummy library for HAProxy. This does not function, and is designed
+ * solely for HAProxy testing purposes.
+ * *********************************************************************/
+#include "51Degrees.h"
+#include <stdlib.h>
+
+int fiftyoneDegreesGetDeviceOffset(
+ fiftyoneDegreesDataSet *dataSet,
+ const char *userAgent) {
+ return 0;
+}
+
+const char** fiftyoneDegreesGetRequiredPropertiesNames(
+ fiftyoneDegreesDataSet *dataSet) {
+ return NULL;
+}
+
+int fiftyoneDegreesGetRequiredPropertiesCount(
+ fiftyoneDegreesDataSet *dataSet) {
+ return 0;
+}
+
+int fiftyoneDegreesGetValueFromOffsets(
+ fiftyoneDegreesDataSet *dataSet,
+ fiftyoneDegreesDeviceOffsets* deviceOffsets,
+ int requiredPropertyIndex,
+ char* values,
+ int size) {
+ return 0;
+}
+
+static fiftyoneDegreesDeviceOffset dummyOffset = { 0, 0, "dummy-user-agent" };
+
+static fiftyoneDegreesDeviceOffsets dummyOffsets = { 1, &dummyOffset, NULL };
+
+fiftyoneDegreesDeviceOffsets* fiftyoneDegreesCreateDeviceOffsets(
+ fiftyoneDegreesDataSet *dataSet) {
+ return &dummyOffsets;
+}
+
+void fiftyoneDegreesFreeDeviceOffsets(
+ fiftyoneDegreesDeviceOffsets* offsets) {
+ return;
+}
+
+int fiftyoneDegreesGetHttpHeaderCount(
+ fiftyoneDegreesDataSet *dataSet) {
+ return 0;
+}
+
+int fiftyoneDegreesGetHttpHeaderNameOffset(
+ fiftyoneDegreesDataSet *dataSet,
+ int httpHeaderIndex) {
+ return 0;
+}
+
+const char* fiftyoneDegreesGetHttpHeaderNamePointer(
+ fiftyoneDegreesDataSet *dataSet,
+ int httpHeaderIndex) {
+ return "dummy-header-name";
+}
+
+fiftyoneDegreesDataSetInitStatus fiftyoneDegreesInitWithPropertyArray(
+ const char* fileName,
+ fiftyoneDegreesDataSet *dataSet,
+ const char** properties,
+ int propertyCount) {
+ return DATA_SET_INIT_STATUS_SUCCESS;
+}
+
+void fiftyoneDegreesDataSetFree(fiftyoneDegreesDataSet *dataSet) {
+ return;
+} \ No newline at end of file
diff --git a/addons/51degrees/dummy/trie/51Degrees.h b/addons/51degrees/dummy/trie/51Degrees.h
new file mode 100644
index 0000000..bedcfd7
--- /dev/null
+++ b/addons/51degrees/dummy/trie/51Degrees.h
@@ -0,0 +1,112 @@
+/* *********************************************************************
+ * This Source Code Form is copyright of 51Degrees Mobile Experts Limited.
+ * Copyright 2019 51Degrees Mobile Experts Limited, 5 Charlotte Close,
+ * Caversham, Reading, Berkshire, United Kingdom RG4 7BY
+ *
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0.
+ *
+ * If a copy of the MPL was not distributed with this file, You can obtain
+ * one at http://mozilla.org/MPL/2.0/.
+ *
+ * This Source Code Form is "Incompatible With Secondary Licenses", as
+ * defined by the Mozilla Public License, v. 2.0.
+ * *********************************************************************/
+
+/* *********************************************************************
+ * Dummy library for HAProxy. This does not function, and is designed
+ * solely for HAProxy testing purposes.
+ * *********************************************************************/
+#ifndef FIFTYONEDEGREES_H_INCLUDED
+#define FIFTYONEDEGREES_H_INCLUDED
+
+#ifndef FIFTYONEDEGREES_H_TRIE_INCLUDED
+#define FIFTYONEDEGREES_H_TRIE_INCLUDED
+#endif
+
+#ifndef FIFTYONEDEGREES_DUMMY_LIB
+#define FIFTYONEDEGREES_DUMMY_LIB
+#endif
+
+#include <stdint.h>
+
+typedef enum e_fiftyoneDegrees_DataSetInitStatus {
+ DATA_SET_INIT_STATUS_SUCCESS,
+ DATA_SET_INIT_STATUS_INSUFFICIENT_MEMORY,
+ DATA_SET_INIT_STATUS_CORRUPT_DATA,
+ DATA_SET_INIT_STATUS_INCORRECT_VERSION,
+ DATA_SET_INIT_STATUS_FILE_NOT_FOUND,
+ DATA_SET_INIT_STATUS_NOT_SET,
+ DATA_SET_INIT_STATUS_POINTER_OUT_OF_BOUNDS,
+ DATA_SET_INIT_STATUS_NULL_POINTER
+} fiftyoneDegreesDataSetInitStatus;
+
+typedef struct fiftyoneDegrees_integers_t {
+ int32_t *firstElement;
+ unsigned int count;
+ int freeMemory;
+} fiftyoneDegreesIntegers;
+
+typedef struct fiftyoneDegrees_dataset_t {
+ fiftyoneDegreesIntegers uniqueHttpHeaders;
+} fiftyoneDegreesDataSet;
+
+typedef struct fiftyoneDegrees_active_dataset_t {
+
+} fiftyoneDegreesActiveDataSet;
+
+typedef struct fiftyoneDegrees_device_offset_t {
+ int httpHeaderOffset;
+ int deviceOffset;
+ char *userAgent;
+} fiftyoneDegreesDeviceOffset;
+
+typedef struct fiftyoneDegrees_device_offsets_t {
+ int size;
+ fiftyoneDegreesDeviceOffset *firstOffset;
+ fiftyoneDegreesActiveDataSet *active;
+} fiftyoneDegreesDeviceOffsets;
+
+int fiftyoneDegreesGetDeviceOffset(
+ fiftyoneDegreesDataSet *dataSet,
+ const char *userAgent);
+
+const char** fiftyoneDegreesGetRequiredPropertiesNames(
+ fiftyoneDegreesDataSet *dataSet);
+
+int fiftyoneDegreesGetRequiredPropertiesCount(
+ fiftyoneDegreesDataSet *dataSet);
+
+int fiftyoneDegreesGetValueFromOffsets(
+ fiftyoneDegreesDataSet *dataSet,
+ fiftyoneDegreesDeviceOffsets* deviceOffsets,
+ int requiredPropertyIndex,
+ char* values,
+ int size);
+
+fiftyoneDegreesDeviceOffsets* fiftyoneDegreesCreateDeviceOffsets(
+ fiftyoneDegreesDataSet *dataSet);
+
+void fiftyoneDegreesFreeDeviceOffsets(
+ fiftyoneDegreesDeviceOffsets* offsets);
+
+int fiftyoneDegreesGetHttpHeaderCount(
+ fiftyoneDegreesDataSet *dataSet);
+
+int fiftyoneDegreesGetHttpHeaderNameOffset(
+ fiftyoneDegreesDataSet *dataSet,
+ int httpHeaderIndex);
+
+const char* fiftyoneDegreesGetHttpHeaderNamePointer(
+ fiftyoneDegreesDataSet *dataSet,
+ int httpHeaderIndex);
+
+fiftyoneDegreesDataSetInitStatus fiftyoneDegreesInitWithPropertyArray(
+ const char* fileName,
+ fiftyoneDegreesDataSet *dataSet,
+ const char** properties,
+ int propertyCount);
+
+void fiftyoneDegreesDataSetFree(fiftyoneDegreesDataSet *dataSet);
+
+#endif \ No newline at end of file
diff --git a/addons/51degrees/dummy/v4hash/hash/fiftyone.h b/addons/51degrees/dummy/v4hash/hash/fiftyone.h
new file mode 100644
index 0000000..fe9da87
--- /dev/null
+++ b/addons/51degrees/dummy/v4hash/hash/fiftyone.h
@@ -0,0 +1,34 @@
+/* *********************************************************************
+ * This Original Work is copyright of 51 Degrees Mobile Experts Limited.
+ * Copyright 2022 51 Degrees Mobile Experts Limited, Davidson House,
+ * Forbury Square, Reading, Berkshire, United Kingdom RG1 3EU.
+ *
+ * This Original Work is licensed under the European Union Public Licence
+ * (EUPL) v.1.2 and is subject to its terms as set out below.
+ *
+ * If a copy of the EUPL was not distributed with this file, You can obtain
+ * one at https://opensource.org/licenses/EUPL-1.2.
+ *
+ * The 'Compatible Licences' set out in the Appendix to the EUPL (as may be
+ * amended by the European Commission) shall be deemed incompatible for
+ * the purposes of the Work and the provisions of the compatibility
+ * clause in Article 5 of the EUPL shall not apply.
+ *
+ * If using the Work as, or as part of, a network application, by
+ * including the attribution notice(s) required under Article 5 of the EUPL
+ * in the end user terms of the application under an appropriate heading,
+ * such notice(s) shall fulfill the requirements of that article.
+ * ********************************************************************* */
+
+/* *********************************************************************
+ * Dummy library for HAProxy. This does not function, and is designed
+ * solely for HAProxy testing purposes.
+ * *********************************************************************/
+#ifndef FIFTYONE_DEGREES_SYNONYM_HASH_INCLUDED
+#define FIFTYONE_DEGREES_SYNONYM_HASH_INCLUDED
+
+#ifndef FIFTYONEDEGREES_DUMMY_LIB
+#define FIFTYONEDEGREES_DUMMY_LIB
+#endif
+
+#endif
diff --git a/addons/51degrees/dummy/v4hash/hash/hash.c b/addons/51degrees/dummy/v4hash/hash/hash.c
new file mode 100644
index 0000000..e9a739e
--- /dev/null
+++ b/addons/51degrees/dummy/v4hash/hash/hash.c
@@ -0,0 +1,130 @@
+/* *********************************************************************
+ * This Original Work is copyright of 51 Degrees Mobile Experts Limited.
+ * Copyright 2022 51 Degrees Mobile Experts Limited, Davidson House,
+ * Forbury Square, Reading, Berkshire, United Kingdom RG1 3EU.
+ *
+ * This Original Work is the subject of the following patents and patent
+ * applications, owned by 51 Degrees Mobile Experts Limited of 5 Charlotte
+ * Close, Caversham, Reading, Berkshire, United Kingdom RG4 7BY:
+ * European Patent No. 3438848; and
+ * United States Patent No. 10,482,175.
+ *
+ * This Original Work is licensed under the European Union Public Licence
+ * (EUPL) v.1.2 and is subject to its terms as set out below.
+ *
+ * If a copy of the EUPL was not distributed with this file, You can obtain
+ * one at https://opensource.org/licenses/EUPL-1.2.
+ *
+ * The 'Compatible Licences' set out in the Appendix to the EUPL (as may be
+ * amended by the European Commission) shall be deemed incompatible for
+ * the purposes of the Work and the provisions of the compatibility
+ * clause in Article 5 of the EUPL shall not apply.
+ *
+ * If using the Work as, or as part of, a network application, by
+ * including the attribution notice(s) required under Article 5 of the EUPL
+ * in the end user terms of the application under an appropriate heading,
+ * such notice(s) shall fulfill the requirements of that article.
+ * ********************************************************************* */
+
+/* *********************************************************************
+ * Dummy library for HAProxy. This does not function, and is designed
+ * solely for HAProxy testing purposes.
+ * *********************************************************************/
+#include "hash.h"
+#include "fiftyone.h"
+
+static fiftyoneDegreesHeaders dummyHeaders = { };
+static fiftyoneDegreesDataSetBase dummyDataSet = { &dummyHeaders };
+
+fiftyoneDegreesConfigHash fiftyoneDegreesHashInMemoryConfig;
+fiftyoneDegreesPropertiesRequired fiftyoneDegreesPropertiesDefault;
+
+fiftyoneDegreesDataSetBase* fiftyoneDegreesDataSetGet(
+ fiftyoneDegreesResourceManager *manager) {
+ return &dummyDataSet;
+}
+
+void fiftyoneDegreesResultsHashFree(
+ fiftyoneDegreesResultsHash* results) {
+ return;
+}
+
+static fiftyoneDegreesResultsHash dummyResults = { };
+
+fiftyoneDegreesResultsHash* fiftyoneDegreesResultsHashCreate(
+ fiftyoneDegreesResourceManager *manager,
+ uint32_t userAgentCapacity,
+ uint32_t overridesCapacity) {
+ return &dummyResults;
+}
+
+void fiftyoneDegreesDataSetRelease(fiftyoneDegreesDataSetBase *dataSet) {
+ return;
+}
+
+static fiftyoneDegreesEvidenceKeyValuePairArray dummyEvidence = { };
+
+fiftyoneDegreesEvidenceKeyValuePairArray*
+fiftyoneDegreesEvidenceCreate(uint32_t capacity) {
+ return &dummyEvidence;
+}
+
+fiftyoneDegreesEvidenceKeyValuePair* fiftyoneDegreesEvidenceAddString(
+ fiftyoneDegreesEvidenceKeyValuePairArray *evidence,
+ fiftyoneDegreesEvidencePrefix prefix,
+ const char *field,
+ const char *originalValue) {
+ return NULL;
+}
+
+size_t fiftyoneDegreesResultsHashGetValuesString(
+ fiftyoneDegreesResultsHash* results,
+ const char *propertyName,
+ char *buffer,
+ size_t bufferLength,
+ const char *separator,
+ fiftyoneDegreesException *exception) {
+ return 0;
+}
+
+void fiftyoneDegreesResultsHashFromEvidence(
+ fiftyoneDegreesResultsHash *results,
+ fiftyoneDegreesEvidenceKeyValuePairArray *evidence,
+ fiftyoneDegreesException *exception) {
+ return;
+}
+
+void fiftyoneDegreesEvidenceFree(fiftyoneDegreesEvidenceKeyValuePairArray *evidence) {
+ return;
+}
+
+void fiftyoneDegreesResultsHashFromUserAgent(
+ fiftyoneDegreesResultsHash *results,
+ const char* userAgent,
+ size_t userAgentLength,
+ fiftyoneDegreesException *exception) {
+ return;
+}
+
+fiftyoneDegreesStatusCode fiftyoneDegreesFileReadToByteArray(
+ const char *fileName,
+ fiftyoneDegreesMemoryReader *reader) {
+ return FIFTYONE_DEGREES_STATUS_SUCCESS;
+}
+
+fiftyoneDegreesStatusCode
+fiftyoneDegreesHashInitManagerFromMemory(
+ fiftyoneDegreesResourceManager *manager,
+ fiftyoneDegreesConfigHash *config,
+ fiftyoneDegreesPropertiesRequired *properties,
+ void *memory,
+ long size,
+ fiftyoneDegreesException *exception) {
+ return FIFTYONE_DEGREES_STATUS_SUCCESS;
+}
+
+const char* fiftyoneDegreesStatusGetMessage(
+ fiftyoneDegreesStatusCode status,
+ const char *fileName) {
+ return NULL;
+}
diff --git a/addons/51degrees/dummy/v4hash/hash/hash.h b/addons/51degrees/dummy/v4hash/hash/hash.h
new file mode 100644
index 0000000..5d04d17
--- /dev/null
+++ b/addons/51degrees/dummy/v4hash/hash/hash.h
@@ -0,0 +1,277 @@
+/* *********************************************************************
+ * This Original Work is copyright of 51 Degrees Mobile Experts Limited.
+ * Copyright 2022 51 Degrees Mobile Experts Limited, Davidson House,
+ * Forbury Square, Reading, Berkshire, United Kingdom RG1 3EU.
+ *
+ * This Original Work is the subject of the following patents and patent
+ * applications, owned by 51 Degrees Mobile Experts Limited of 5 Charlotte
+ * Close, Caversham, Reading, Berkshire, United Kingdom RG4 7BY:
+ * European Patent No. 3438848; and
+ * United States Patent No. 10,482,175.
+ *
+ * This Original Work is licensed under the European Union Public Licence
+ * (EUPL) v.1.2 and is subject to its terms as set out below.
+ *
+ * If a copy of the EUPL was not distributed with this file, You can obtain
+ * one at https://opensource.org/licenses/EUPL-1.2.
+ *
+ * The 'Compatible Licences' set out in the Appendix to the EUPL (as may be
+ * amended by the European Commission) shall be deemed incompatible for
+ * the purposes of the Work and the provisions of the compatibility
+ * clause in Article 5 of the EUPL shall not apply.
+ *
+ * If using the Work as, or as part of, a network application, by
+ * including the attribution notice(s) required under Article 5 of the EUPL
+ * in the end user terms of the application under an appropriate heading,
+ * such notice(s) shall fulfill the requirements of that article.
+ * ********************************************************************* */
+
+/* *********************************************************************
+ * Dummy library for HAProxy. This does not function, and is designed
+ * solely for HAProxy testing purposes.
+ * *********************************************************************/
+#ifndef FIFTYONE_DEGREES_HASH_INCLUDED
+#define FIFTYONE_DEGREES_HASH_INCLUDED
+
+#ifndef FIFTYONEDEGREES_DUMMY_LIB
+#define FIFTYONEDEGREES_DUMMY_LIB
+#endif
+
+#include <stdlib.h>
+#include <inttypes.h>
+
+typedef int bool;
+enum { false, true };
+
+typedef unsigned char byte;
+
+typedef enum e_fiftyone_degrees_status_code {
+ FIFTYONE_DEGREES_STATUS_SUCCESS,
+ FIFTYONE_DEGREES_STATUS_NOT_SET,
+} fiftyoneDegreesStatusCode;
+
+typedef struct fiftyone_degrees_exception_t {
+ unsigned int status;
+} fiftyoneDegreesException;
+
+#define FIFTYONE_DEGREES_EXCEPTION_CLEAR \
+ exception->status = FIFTYONE_DEGREES_STATUS_NOT_SET;
+
+#define FIFTYONE_DEGREES_EXCEPTION_OKAY \
+ (exception == NULL || exception->status == FIFTYONE_DEGREES_STATUS_NOT_SET)
+
+#define FIFTYONE_DEGREES_EXCEPTION_FAILED \
+ (!FIFTYONE_DEGREES_EXCEPTION_OKAY)
+
+#define FIFTYONE_DEGREES_EXCEPTION_CREATE \
+ fiftyoneDegreesException exceptionValue; \
+ fiftyoneDegreesException *exception = &exceptionValue; \
+ FIFTYONE_DEGREES_EXCEPTION_CLEAR
+
+#define FIFTYONE_DEGREES_ARRAY_TYPE(t, m) \
+typedef struct fiftyone_degrees_array_##t##_t { \
+ uint32_t count; \
+ uint32_t capacity; \
+ t *items; \
+ m \
+} t##Array;
+
+typedef struct fiftyone_degrees_results_base_t {
+ void *dataSet;
+} fiftyoneDegreesResultsBase;
+
+typedef struct fiftyone_degrees_results_device_detection_t {
+ fiftyoneDegreesResultsBase b;
+} fiftyoneDegreesResultsDeviceDetection;
+
+typedef struct fiftyone_degrees_collection_item_t {
+
+} fiftyoneDegreesCollectionItem;
+
+typedef struct fiftyone_degrees_list_t {
+
+} fiftyoneDegreesList;
+
+typedef struct fiftyone_degrees_evidence_key_value_pair_t {
+
+} fiftyoneDegreesEvidenceKeyValuePair;
+
+#define EVIDENCE_KEY_VALUE_MEMBERS \
+ struct fiftyone_degrees_array_fiftyoneDegreesEvidenceKeyValuePair_t* pseudoEvidence;
+
+FIFTYONE_DEGREES_ARRAY_TYPE(
+ fiftyoneDegreesEvidenceKeyValuePair,
+ EVIDENCE_KEY_VALUE_MEMBERS)
+
+#define FIFTYONE_DEGREES_RESULTS_HASH_MEMBERS \
+ fiftyoneDegreesResultsDeviceDetection b; \
+ fiftyoneDegreesCollectionItem propertyItem; \
+ fiftyoneDegreesList values; \
+ fiftyoneDegreesEvidenceKeyValuePairArray* pseudoEvidence;
+
+typedef struct fiftyone_degrees_result_hash_t {
+
+} fiftyoneDegreesResultHash;
+
+FIFTYONE_DEGREES_ARRAY_TYPE(
+ fiftyoneDegreesResultHash,
+ FIFTYONE_DEGREES_RESULTS_HASH_MEMBERS)
+
+typedef fiftyoneDegreesResultHashArray fiftyoneDegreesResultsHash;
+
+typedef struct fiftyone_degrees_resource_manager_t {
+
+} fiftyoneDegreesResourceManager;
+
+typedef struct fiftyone_degrees_header_t {
+ const char* name;
+ size_t nameLength;
+} fiftyoneDegreesHeader;
+
+#define FIFTYONE_DEGREES_HEADERS_MEMBERS \
+ bool expectUpperPrefixedHeaders; \
+ uint32_t pseudoHeadersCount;
+
+FIFTYONE_DEGREES_ARRAY_TYPE(
+ fiftyoneDegreesHeader,
+ FIFTYONE_DEGREES_HEADERS_MEMBERS);
+
+typedef fiftyoneDegreesHeaderArray fiftyoneDegreesHeaders;
+
+typedef struct fiftyone_degrees_dataset_base_t {
+ fiftyoneDegreesHeaders *uniqueHeaders;
+} fiftyoneDegreesDataSetBase;
+
+typedef struct fiftyone_degrees_dataset_device_detection_t {
+ fiftyoneDegreesDataSetBase b;
+} fiftyoneDegreesDataSetDeviceDetection;
+
+typedef struct fiftyone_degrees_dataset_hash_t {
+ fiftyoneDegreesDataSetDeviceDetection b;
+} fiftyoneDegreesDataSetHash;
+
+typedef enum e_fiftyone_degrees_evidence_prefix {
+ FIFTYONE_DEGREES_EVIDENCE_HTTP_HEADER_STRING = 1 << 0,
+ FIFTYONE_DEGREES_EVIDENCE_HTTP_HEADER_IP_ADDRESSES = 1 << 1,
+ FIFTYONE_DEGREES_EVIDENCE_SERVER = 1 << 2,
+ FIFTYONE_DEGREES_EVIDENCE_QUERY = 1 << 3,
+ FIFTYONE_DEGREES_EVIDENCE_COOKIE = 1 << 4,
+ FIFTYONE_DEGREES_EVIDENCE_IGNORE = 1 << 7,
+} fiftyoneDegreesEvidencePrefix;
+
+typedef struct fiftyone_degrees_config_base_t {
+ bool freeData;
+} fiftyoneDegreesConfigBase;
+
+typedef struct fiftyone_degrees_config_device_detecton_t {
+ fiftyoneDegreesConfigBase b;
+ bool allowUnmatched;
+} fiftyoneDegreesConfigDeviceDetection;
+
+typedef struct fiftyone_degrees_collection_config_t {
+ uint16_t concurrency;
+} fiftyoneDegreesCollectionConfig;
+
+typedef struct fiftyone_degrees_config_hash_t {
+ fiftyoneDegreesConfigDeviceDetection b;
+ fiftyoneDegreesCollectionConfig strings;
+ fiftyoneDegreesCollectionConfig components;
+ fiftyoneDegreesCollectionConfig maps;
+ fiftyoneDegreesCollectionConfig properties;
+ fiftyoneDegreesCollectionConfig values;
+ fiftyoneDegreesCollectionConfig profiles;
+ fiftyoneDegreesCollectionConfig rootNodes;
+ fiftyoneDegreesCollectionConfig nodes;
+ fiftyoneDegreesCollectionConfig profileOffsets;
+ int32_t difference;
+ int32_t drift;
+ bool usePerformanceGraph;
+ bool usePredictiveGraph;
+} fiftyoneDegreesConfigHash;
+
+extern fiftyoneDegreesConfigHash fiftyoneDegreesHashInMemoryConfig;
+
+typedef struct fiftyone_degrees_property_available_t {
+
+} fiftyoneDegreesPropertyAvailable;
+
+FIFTYONE_DEGREES_ARRAY_TYPE(fiftyoneDegreesPropertyAvailable,)
+
+typedef fiftyoneDegreesPropertyAvailableArray fiftyoneDegreesPropertiesAvailable;
+
+typedef struct fiftyone_degrees_properties_required_t {
+ const char **array;
+ int count;
+ const char *string;
+ fiftyoneDegreesPropertiesAvailable *existing;
+} fiftyoneDegreesPropertiesRequired;
+
+extern fiftyoneDegreesPropertiesRequired fiftyoneDegreesPropertiesDefault;
+
+typedef struct fiftyone_degrees_memory_reader_t {
+ byte *startByte;
+ byte *current;
+ byte *lastByte;
+ long length;
+} fiftyoneDegreesMemoryReader;
+
+fiftyoneDegreesDataSetBase* fiftyoneDegreesDataSetGet(
+ fiftyoneDegreesResourceManager *manager);
+
+void fiftyoneDegreesResultsHashFree(
+ fiftyoneDegreesResultsHash* results);
+
+fiftyoneDegreesResultsHash* fiftyoneDegreesResultsHashCreate(
+ fiftyoneDegreesResourceManager *manager,
+ uint32_t userAgentCapacity,
+ uint32_t overridesCapacity);
+
+void fiftyoneDegreesDataSetRelease(fiftyoneDegreesDataSetBase *dataSet);
+
+fiftyoneDegreesEvidenceKeyValuePairArray* fiftyoneDegreesEvidenceCreate(uint32_t capacity);
+
+fiftyoneDegreesEvidenceKeyValuePair* fiftyoneDegreesEvidenceAddString(
+ fiftyoneDegreesEvidenceKeyValuePairArray *evidence,
+ fiftyoneDegreesEvidencePrefix prefix,
+ const char *field,
+ const char *originalValue);
+
+size_t fiftyoneDegreesResultsHashGetValuesString(
+ fiftyoneDegreesResultsHash* results,
+ const char *propertyName,
+ char *buffer,
+ size_t bufferLength,
+ const char *separator,
+ fiftyoneDegreesException *exception);
+
+void fiftyoneDegreesResultsHashFromEvidence(
+ fiftyoneDegreesResultsHash *results,
+ fiftyoneDegreesEvidenceKeyValuePairArray *evidence,
+ fiftyoneDegreesException *exception);
+
+void fiftyoneDegreesEvidenceFree(fiftyoneDegreesEvidenceKeyValuePairArray *evidence);
+
+void fiftyoneDegreesResultsHashFromUserAgent(
+ fiftyoneDegreesResultsHash *results,
+ const char* userAgent,
+ size_t userAgentLength,
+ fiftyoneDegreesException *exception);
+
+fiftyoneDegreesStatusCode fiftyoneDegreesFileReadToByteArray(
+ const char *fileName,
+ fiftyoneDegreesMemoryReader *reader);
+
+fiftyoneDegreesStatusCode
+fiftyoneDegreesHashInitManagerFromMemory(
+ fiftyoneDegreesResourceManager *manager,
+ fiftyoneDegreesConfigHash *config,
+ fiftyoneDegreesPropertiesRequired *properties,
+ void *memory,
+ long size,
+ fiftyoneDegreesException *exception);
+
+const char* fiftyoneDegreesStatusGetMessage(
+ fiftyoneDegreesStatusCode status,
+ const char *fileName);
+
+#endif
diff --git a/addons/deviceatlas/Makefile b/addons/deviceatlas/Makefile
new file mode 100644
index 0000000..fbcffca
--- /dev/null
+++ b/addons/deviceatlas/Makefile
@@ -0,0 +1,48 @@
+# DEVICEATLAS_SRC : DeviceAtlas API source root path
+
+
+OS := $(shell uname -s)
+OBJS := dadwsch.o
+CFLAGS := -g -O2
+LDFLAGS :=
+
+CURL_CONFIG := curl-config
+CURLDIR := $(shell $(CURL_CONFIG) --prefix 2>/dev/null || echo /usr/local)
+CURL_INC := $(CURLDIR)/include
+CURL_LIB := $(CURLDIR)/lib
+CURL_LDFLAGS := $(shell $(CURL_CONFIG) --libs 2>/dev/null || echo -L /usr/local/lib -lcurl)
+
+PCRE2_CONFIG := pcre2-config
+PCRE2DIR := $(shell $(PCRE2_CONFIG) --prefix 2>/dev/null || echo /usr/local)
+PCRE2_INC := $(PCRE2DIR)/include
+PCRE2_LIB := $(PCRE2DIR)/lib
+PCRE2_LDFLAGS := $(shell $(PCRE2_CONFIG) --libs8 2>/dev/null || echo /usr/local)
+
+ifeq ($(DEVICEATLAS_SRC),)
+dadwsch: dadwsch.c
+ $(CC) $(CFLAGS) -o $@ $^ $(LDFLAGS)
+
+LDFLAGS += -lda
+else
+DEVICEATLAS_INC = $(DEVICEATLAS_SRC)
+DEVICEATLAS_LIB = $(DEVICEATLAS_SRC)
+CFLAGS += -DDA_REGEX_HDR=\"dac_pcre2.c\" -DDA_REGEX_TAG=2
+CFLAGS += -DMOBI_CURL -DMOBI_CURLSSET -DMOBI_GZ -DMOBI_ZIP
+CFLAGS += -I$(DEVICEATLAS_INC) -I$(CURL_INC) -I$(PCRE2DIR)
+LDFLAGS += $(CURL_LDFLAGS) $(PCRE2_LDFLAGS) -lz -lzip -lpthread
+
+dadwsch: dadwsch.c $(DEVICEATLAS_SRC)/dac.c $(DEVICEATLAS_SRC)/dasch.c $(DEVICEATLAS_SRC)/dadwarc.c $(DEVICEATLAS_SRC)/dadwcom.c $(DEVICEATLAS_SRC)/dadwcurl.c $(DEVICEATLAS_SRC)/json.c $(DEVICEATLAS_SRC)/Os/daunix.c
+ $(CC) $(CFLAGS) -o $@ $^ $(LDFLAGS)
+endif
+
+ifeq ($(OS), Linux)
+LDFLAGS += -lrt
+endif
+ifeq ($(OS), SunOS)
+LDFLAGS += -lrt
+endif
+
+clean:
+ rm -f *.o
+ rm -f $(DEVICEATLAS_LIB)*.o
+ rm -f dadwsch
diff --git a/addons/deviceatlas/da.c b/addons/deviceatlas/da.c
new file mode 100644
index 0000000..969dfaa
--- /dev/null
+++ b/addons/deviceatlas/da.c
@@ -0,0 +1,501 @@
+#include <stdio.h>
+#include <fcntl.h>
+#include <sys/mman.h>
+#include <errno.h>
+
+#include <haproxy/api.h>
+#include <haproxy/arg.h>
+#include <haproxy/cfgparse.h>
+#include <haproxy/errors.h>
+#include <haproxy/global.h>
+#include <haproxy/http.h>
+#include <haproxy/http_ana.h>
+#include <haproxy/http_fetch.h>
+#include <haproxy/http_htx.h>
+#include <haproxy/htx.h>
+#include <haproxy/sample.h>
+#include <haproxy/tools.h>
+#include <dac.h>
+
+#define ATLASTOKSZ PATH_MAX
+#define ATLASMAPNM "/hapdeviceatlas"
+
+static struct {
+ void *atlasimgptr;
+ void *atlasmap;
+ char *jsonpath;
+ char *cookiename;
+ size_t cookienamelen;
+ int atlasfd;
+ da_atlas_t atlas;
+ da_evidence_id_t useragentid;
+ da_severity_t loglevel;
+ char separator;
+ unsigned char daset:1;
+} global_deviceatlas = {
+ .loglevel = 0,
+ .jsonpath = 0,
+ .cookiename = 0,
+ .cookienamelen = 0,
+ .atlasmap = NULL,
+ .atlasfd = -1,
+ .useragentid = 0,
+ .daset = 0,
+ .separator = '|',
+};
+
+__decl_thread(HA_SPINLOCK_T dadwsch_lock);
+
+static int da_json_file(char **args, int section_type, struct proxy *curpx,
+ const struct proxy *defpx, const char *file, int line,
+ char **err)
+{
+ if (*(args[1]) == 0) {
+ memprintf(err, "deviceatlas json file : expects a json path.\n");
+ return -1;
+ }
+ global_deviceatlas.jsonpath = strdup(args[1]);
+ return 0;
+}
+
+static int da_log_level(char **args, int section_type, struct proxy *curpx,
+ const struct proxy *defpx, const char *file, int line,
+ char **err)
+{
+ int loglevel;
+ if (*(args[1]) == 0) {
+ memprintf(err, "deviceatlas log level : expects an integer argument.\n");
+ return -1;
+ }
+
+ loglevel = atol(args[1]);
+ if (loglevel < 0 || loglevel > 3) {
+ memprintf(err, "deviceatlas log level : expects a log level between 0 and 3, %s given.\n", args[1]);
+ } else {
+ global_deviceatlas.loglevel = (da_severity_t)loglevel;
+ }
+
+ return 0;
+}
+
+static int da_property_separator(char **args, int section_type, struct proxy *curpx,
+ const struct proxy *defpx, const char *file, int line,
+ char **err)
+{
+ if (*(args[1]) == 0) {
+ memprintf(err, "deviceatlas property separator : expects a character argument.\n");
+ return -1;
+ }
+ global_deviceatlas.separator = *args[1];
+ return 0;
+}
+
+static int da_properties_cookie(char **args, int section_type, struct proxy *curpx,
+ const struct proxy *defpx, const char *file, int line,
+ char **err)
+{
+ if (*(args[1]) == 0) {
+ memprintf(err, "deviceatlas cookie name : expects a string argument.\n");
+ return -1;
+ } else {
+ global_deviceatlas.cookiename = strdup(args[1]);
+ }
+ global_deviceatlas.cookienamelen = strlen(global_deviceatlas.cookiename);
+ return 0;
+}
+
+static size_t da_haproxy_read(void *ctx, size_t len, char *buf)
+{
+ return fread(buf, 1, len, ctx);
+}
+
+static da_status_t da_haproxy_seek(void *ctx, off_t off)
+{
+ return fseek(ctx, off, SEEK_SET) != -1 ? DA_OK : DA_SYS;
+}
+
+static void da_haproxy_log(da_severity_t severity, da_status_t status,
+ const char *fmt, va_list args)
+{
+ if (global_deviceatlas.loglevel && severity <= global_deviceatlas.loglevel) {
+ char logbuf[256];
+ vsnprintf(logbuf, sizeof(logbuf), fmt, args);
+ ha_warning("deviceatlas : %s.\n", logbuf);
+ }
+}
+
+#define DA_COOKIENAME_DEFAULT "DAPROPS"
+
+/*
+ * module init / deinit functions. Returns 0 if OK, or a combination of ERR_*.
+ */
+static int init_deviceatlas(void)
+{
+ int err_code = ERR_NONE;
+
+ if (global_deviceatlas.jsonpath != 0) {
+ FILE *jsonp;
+ da_property_decl_t extraprops[] = {{0, 0}};
+ size_t atlasimglen;
+ da_status_t status;
+
+ jsonp = fopen(global_deviceatlas.jsonpath, "r");
+ if (jsonp == 0) {
+ ha_alert("deviceatlas : '%s' json file has invalid path or is not readable.\n",
+ global_deviceatlas.jsonpath);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+
+ da_init();
+ da_seterrorfunc(da_haproxy_log);
+ status = da_atlas_compile(jsonp, da_haproxy_read, da_haproxy_seek,
+ &global_deviceatlas.atlasimgptr, &atlasimglen);
+ fclose(jsonp);
+ if (status != DA_OK) {
+ ha_alert("deviceatlas : '%s' json file is invalid.\n",
+ global_deviceatlas.jsonpath);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+
+ status = da_atlas_open(&global_deviceatlas.atlas, extraprops,
+ global_deviceatlas.atlasimgptr, atlasimglen);
+
+ if (status != DA_OK) {
+ ha_alert("deviceatlas : data could not be compiled.\n");
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+
+ if (global_deviceatlas.cookiename == 0) {
+ global_deviceatlas.cookiename = strdup(DA_COOKIENAME_DEFAULT);
+ global_deviceatlas.cookienamelen = strlen(global_deviceatlas.cookiename);
+ }
+
+ global_deviceatlas.useragentid = da_atlas_header_evidence_id(&global_deviceatlas.atlas,
+ "user-agent");
+ if ((global_deviceatlas.atlasfd = shm_open(ATLASMAPNM, O_RDWR, 0660)) != -1) {
+ global_deviceatlas.atlasmap = mmap(NULL, ATLASTOKSZ, PROT_READ | PROT_WRITE, MAP_SHARED, global_deviceatlas.atlasfd, 0);
+ if (global_deviceatlas.atlasmap == MAP_FAILED) {
+ close(global_deviceatlas.atlasfd);
+ global_deviceatlas.atlasfd = -1;
+ global_deviceatlas.atlasmap = NULL;
+ } else {
+ fprintf(stdout, "Deviceatlas : scheduling support enabled.\n");
+ }
+ }
+ global_deviceatlas.daset = 1;
+
+ fprintf(stdout, "Deviceatlas module loaded.\n");
+ }
+
+out:
+ return err_code;
+}
+
+static void deinit_deviceatlas(void)
+{
+ if (global_deviceatlas.jsonpath != 0) {
+ free(global_deviceatlas.jsonpath);
+ }
+
+ if (global_deviceatlas.daset == 1) {
+ free(global_deviceatlas.cookiename);
+ da_atlas_close(&global_deviceatlas.atlas);
+ free(global_deviceatlas.atlasimgptr);
+ }
+
+ if (global_deviceatlas.atlasfd != -1) {
+ munmap(global_deviceatlas.atlasmap, ATLASTOKSZ);
+ close(global_deviceatlas.atlasfd);
+ shm_unlink(ATLASMAPNM);
+ }
+
+ da_fini();
+}
+
+static void da_haproxy_checkinst(void)
+{
+ if (global_deviceatlas.atlasmap != 0) {
+ char *base;
+ base = (char *)global_deviceatlas.atlasmap;
+
+ if (base[0] != 0) {
+ void *cnew;
+ size_t atlassz;
+ char atlasp[ATLASTOKSZ] = {0};
+ da_atlas_t inst;
+ da_property_decl_t extraprops[1] = {{NULL, 0}};
+#ifdef USE_THREAD
+ HA_SPIN_LOCK(OTHER_LOCK, &dadwsch_lock);
+#endif
+ strlcpy2(atlasp, base, sizeof(atlasp));
+ if (da_atlas_read_mapped(atlasp, NULL, &cnew, &atlassz) == DA_OK) {
+ if (da_atlas_open(&inst, extraprops, cnew, atlassz) == DA_OK) {
+ char jsonbuf[26];
+ time_t jsond;
+
+ da_atlas_close(&global_deviceatlas.atlas);
+ free(global_deviceatlas.atlasimgptr);
+ global_deviceatlas.atlasimgptr = cnew;
+ global_deviceatlas.atlas = inst;
+ memset(base, 0, ATLASTOKSZ);
+ jsond = da_getdatacreation(&global_deviceatlas.atlas);
+ ctime_r(&jsond, jsonbuf);
+ jsonbuf[24] = 0;
+ printf("deviceatlas: new instance, data file date `%s`.\n", jsonbuf);
+ } else {
+ ha_warning("deviceatlas: instance update failed.\n");
+ memset(base, 0, ATLASTOKSZ);
+ free(cnew);
+ }
+ }
+#ifdef USE_THREAD
+ HA_SPIN_UNLOCK(OTHER_LOCK, &dadwsch_lock);
+#endif
+ }
+ }
+}
+
+static int da_haproxy(const struct arg *args, struct sample *smp, da_deviceinfo_t *devinfo)
+{
+ struct buffer *tmp;
+ da_propid_t prop, *pprop;
+ da_status_t status;
+ da_type_t proptype;
+ const char *propname;
+ int i;
+
+ tmp = get_trash_chunk();
+ chunk_reset(tmp);
+
+ propname = (const char *) args[0].data.str.area;
+ i = 0;
+
+ for (; propname != 0; i ++,
+ propname = (const char *) args[i].data.str.area) {
+ status = da_atlas_getpropid(&global_deviceatlas.atlas,
+ propname, &prop);
+ if (status != DA_OK) {
+ chunk_appendf(tmp, "%c", global_deviceatlas.separator);
+ continue;
+ }
+ pprop = &prop;
+ da_atlas_getproptype(&global_deviceatlas.atlas, *pprop, &proptype);
+
+ switch (proptype) {
+ case DA_TYPE_BOOLEAN: {
+ bool val;
+ status = da_getpropboolean(devinfo, *pprop, &val);
+ if (status == DA_OK) {
+ chunk_appendf(tmp, "%d", val);
+ }
+ break;
+ }
+ case DA_TYPE_INTEGER:
+ case DA_TYPE_NUMBER: {
+ long val;
+ status = da_getpropinteger(devinfo, *pprop, &val);
+ if (status == DA_OK) {
+ chunk_appendf(tmp, "%ld", val);
+ }
+ break;
+ }
+ case DA_TYPE_STRING: {
+ const char *val;
+ status = da_getpropstring(devinfo, *pprop, &val);
+ if (status == DA_OK) {
+ chunk_appendf(tmp, "%s", val);
+ }
+ break;
+ }
+ default:
+ break;
+ }
+
+ chunk_appendf(tmp, "%c", global_deviceatlas.separator);
+ }
+
+ da_close(devinfo);
+
+ if (tmp->data) {
+ --tmp->data;
+ tmp->area[tmp->data] = 0;
+ }
+
+ smp->data.u.str.area = tmp->area;
+ smp->data.u.str.data = tmp->data;
+ smp->data.type = SMP_T_STR;
+
+ return 1;
+}
+
+static int da_haproxy_conv(const struct arg *args, struct sample *smp, void *private)
+{
+ da_deviceinfo_t devinfo;
+ da_status_t status;
+ const char *useragent;
+ char useragentbuf[1024] = { 0 };
+ int i;
+
+ if (global_deviceatlas.daset == 0 || smp->data.u.str.data == 0) {
+ return 1;
+ }
+
+ da_haproxy_checkinst();
+
+ i = smp->data.u.str.data > sizeof(useragentbuf) ? sizeof(useragentbuf) : smp->data.u.str.data;
+ memcpy(useragentbuf, smp->data.u.str.area, i - 1);
+ useragentbuf[i - 1] = 0;
+
+ useragent = (const char *)useragentbuf;
+
+ status = da_search(&global_deviceatlas.atlas, &devinfo,
+ global_deviceatlas.useragentid, useragent, 0);
+
+ return status != DA_OK ? 0 : da_haproxy(args, smp, &devinfo);
+}
+
+#define DA_MAX_HEADERS 24
+
+static int da_haproxy_fetch(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ da_evidence_t ev[DA_MAX_HEADERS];
+ da_deviceinfo_t devinfo;
+ da_status_t status;
+ struct channel *chn;
+ struct htx *htx;
+ struct htx_blk *blk;
+ char vbuf[DA_MAX_HEADERS][1024] = {{ 0 }};
+ int i, nbh = 0;
+
+ if (global_deviceatlas.daset == 0) {
+ return 0;
+ }
+
+ da_haproxy_checkinst();
+
+ chn = (smp->strm ? &smp->strm->req : NULL);
+ htx = smp_prefetch_htx(smp, chn, NULL, 1);
+ if (!htx)
+ return 0;
+
+ i = 0;
+ for (blk = htx_get_first_blk(htx); nbh < DA_MAX_HEADERS && blk; blk = htx_get_next_blk(htx, blk)) {
+ size_t vlen;
+ char *pval;
+ da_evidence_id_t evid;
+ enum htx_blk_type type;
+ struct ist n, v;
+ char hbuf[24] = { 0 };
+ char tval[1024] = { 0 };
+
+ type = htx_get_blk_type(blk);
+
+ if (type == HTX_BLK_HDR) {
+ n = htx_get_blk_name(htx, blk);
+ v = htx_get_blk_value(htx, blk);
+ } else if (type == HTX_BLK_EOH) {
+ break;
+ } else {
+ continue;
+ }
+
+ /* The HTTP headers used by the DeviceAtlas API are not longer */
+ if (n.len >= sizeof(hbuf)) {
+ continue;
+ }
+
+ memcpy(hbuf, n.ptr, n.len);
+ hbuf[n.len] = 0;
+ pval = v.ptr;
+ vlen = v.len;
+ evid = -1;
+ i = v.len > sizeof(tval) - 1 ? sizeof(tval) - 1 : v.len;
+ memcpy(tval, v.ptr, i);
+ tval[i] = 0;
+ pval = tval;
+
+ if (strcasecmp(hbuf, "Accept-Language") == 0) {
+ evid = da_atlas_accept_language_evidence_id(&global_deviceatlas.atlas);
+ } else if (strcasecmp(hbuf, "Cookie") == 0) {
+ char *p, *eval;
+ size_t pl;
+
+ eval = pval + vlen;
+ /**
+ * The cookie value, if it exists, is located between the current header's
+ * value position and the next one
+ */
+ if (http_extract_cookie_value(pval, eval, global_deviceatlas.cookiename,
+ global_deviceatlas.cookienamelen, 1, &p, &pl) == NULL) {
+ continue;
+ }
+
+ vlen -= global_deviceatlas.cookienamelen - 1;
+ pval = p;
+ evid = da_atlas_clientprop_evidence_id(&global_deviceatlas.atlas);
+ } else {
+ evid = da_atlas_header_evidence_id(&global_deviceatlas.atlas, hbuf);
+ }
+
+ if (evid == -1) {
+ continue;
+ }
+
+ i = vlen > sizeof(vbuf[nbh]) - 1 ? sizeof(vbuf[nbh]) - 1 : vlen;
+ memcpy(vbuf[nbh], pval, i);
+ vbuf[nbh][i] = 0;
+ ev[nbh].key = evid;
+ ev[nbh].value = vbuf[nbh];
+ ++ nbh;
+ }
+
+ status = da_searchv(&global_deviceatlas.atlas, &devinfo,
+ ev, nbh);
+
+ return status != DA_OK ? 0 : da_haproxy(args, smp, &devinfo);
+}
+
+static struct cfg_kw_list dacfg_kws = {{ }, {
+ { CFG_GLOBAL, "deviceatlas-json-file", da_json_file },
+ { CFG_GLOBAL, "deviceatlas-log-level", da_log_level },
+ { CFG_GLOBAL, "deviceatlas-property-separator", da_property_separator },
+ { CFG_GLOBAL, "deviceatlas-properties-cookie", da_properties_cookie },
+ { 0, NULL, NULL },
+}};
+
+INITCALL1(STG_REGISTER, cfg_register_keywords, &dacfg_kws);
+
+/* Note: must not be declared <const> as its list will be overwritten */
+static struct sample_fetch_kw_list fetch_kws = {ILH, {
+ { "da-csv-fetch", da_haproxy_fetch, ARG12(1,STR,STR,STR,STR,STR,STR,STR,STR,STR,STR,STR,STR), NULL, SMP_T_STR, SMP_USE_HRQHV },
+ { NULL, NULL, 0, 0, 0 },
+}};
+
+INITCALL1(STG_REGISTER, sample_register_fetches, &fetch_kws);
+
+/* Note: must not be declared <const> as its list will be overwritten */
+static struct sample_conv_kw_list conv_kws = {ILH, {
+ { "da-csv-conv", da_haproxy_conv, ARG12(1,STR,STR,STR,STR,STR,STR,STR,STR,STR,STR,STR,STR), NULL, SMP_T_STR, SMP_T_STR },
+ { NULL, NULL, 0, 0, 0 },
+}};
+
+static void da_haproxy_register_build_options()
+{
+ char *ptr = NULL;
+
+#ifdef MOBI_DA_DUMMY_LIBRARY
+ memprintf(&ptr, "Built with DeviceAtlas support (dummy library only).");
+#else
+ memprintf(&ptr, "Built with DeviceAtlas support (library version %u.%u).", MOBI_DA_MAJOR, MOBI_DA_MINOR);
+#endif
+ hap_register_build_opts(ptr, 1);
+}
+
+INITCALL1(STG_REGISTER, sample_register_convs, &conv_kws);
+
+REGISTER_POST_CHECK(init_deviceatlas);
+REGISTER_POST_DEINIT(deinit_deviceatlas);
+INITCALL0(STG_REGISTER, da_haproxy_register_build_options);
diff --git a/addons/deviceatlas/dadwsch.c b/addons/deviceatlas/dadwsch.c
new file mode 100644
index 0000000..e35566a
--- /dev/null
+++ b/addons/deviceatlas/dadwsch.c
@@ -0,0 +1,195 @@
+#define _GNU_SOURCE
+#include <dac.h>
+#include <dadwcurl.h>
+#include <dadwarc.h>
+#include <getopt.h>
+#include <stdlib.h>
+#include <signal.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <sys/mman.h>
+
+#define ATLASTOKSZ PATH_MAX
+#define ATLASMAPNM "/hapdeviceatlas"
+
+const char *__pgname;
+
+static struct {
+ da_dwatlas_t o;
+ int ofd;
+ void* atlasmap;
+} global_deviceatlassch = {
+ .ofd = -1,
+ .atlasmap = NULL
+};
+
+
+void usage(void)
+{
+ fprintf(stderr, "%s -u download URL [-d hour (in H:M:S format) current hour by default] [-p path for the downloaded file, /tmp by default]\n", __pgname);
+ exit(EXIT_FAILURE);
+}
+
+static size_t jsonread(void *ctx, size_t count, char *buf)
+{
+ return fread(buf, 1, count, ctx);
+}
+
+static da_status_t jsonseek(void *ctx, off_t pos)
+{
+ return fseek(ctx, pos, SEEK_SET) != -1 ? DA_OK : DA_SYS;
+}
+
+static void dadwlog(dw_config_t cfg, const char* msg)
+{
+ time_t now = time(NULL);
+ char buf[26] = {0};
+ ctime_r(&now, buf);
+ buf[24] = 0;
+ fprintf(stderr, "%s: %s\n", buf, msg);
+}
+
+static dw_status_t dadwnot(void *a, dw_config_t *cfg)
+{
+ da_dwatlas_t *o = (da_dwatlas_t *)a;
+ if (!o)
+ return DW_ERR;
+ char *e;
+ char jsondbuf[26] = {0}, buf[26] = {0}, atlasp[ATLASTOKSZ] = {0};
+ time_t now = time(NULL);
+ time_t jsond;
+ int fd = -1;
+ (void)a;
+ jsond = da_getdatacreation(&o->atlas);
+ dwgetfinalp(o->dcfg.info, atlasp, sizeof(atlasp));
+ ctime_r(&jsond, jsondbuf);
+ ctime_r(&now, buf);
+ jsondbuf[24] = 0;
+ buf[24] = 0;
+
+ printf("%s: data file generated on `%s`\n", buf, jsondbuf);
+ int val = 1;
+ unsigned char *ptr = (unsigned char *)global_deviceatlassch.atlasmap;
+ memset(ptr, 0, sizeof(atlasp));
+ strcpy(ptr, atlasp);
+ return DW_OK;
+}
+
+static da_status_t dadwinit(void)
+{
+ if ((global_deviceatlassch.ofd = shm_open(ATLASMAPNM, O_RDWR | O_CREAT, 0660)) == -1) {
+ fprintf(stderr, "%s\n", strerror(errno));
+ return DA_SYS;
+ }
+
+ if (ftruncate(global_deviceatlassch.ofd, ATLASTOKSZ) == -1) {
+ close(global_deviceatlassch.ofd);
+ return DA_SYS;
+ }
+ lseek(global_deviceatlassch.ofd, 0, SEEK_SET);
+ global_deviceatlassch.atlasmap = mmap(0, ATLASTOKSZ, PROT_READ | PROT_WRITE, MAP_SHARED, global_deviceatlassch.ofd, 0);
+ if (global_deviceatlassch.atlasmap == MAP_FAILED) {
+ fprintf(stderr, "%s\n", strerror(errno));
+ return DA_SYS;
+ } else {
+ memset(global_deviceatlassch.atlasmap, 0, ATLASTOKSZ);
+ return DA_OK;
+ }
+}
+
+static void dadwexit(int sig __attribute__((unused)), siginfo_t *s __attribute__((unused)), void *ctx __attribute__((unused)))
+{
+ ssize_t w;
+
+ fprintf(stderr, "%s: exit\n", __pgname);
+ dw_daatlas_close(&global_deviceatlassch.o);
+ da_fini();
+ munmap(global_deviceatlassch.atlasmap, ATLASTOKSZ);
+ close(global_deviceatlassch.ofd);
+ shm_unlink(ATLASMAPNM);
+ exit(EXIT_SUCCESS);
+}
+
+int main(int argc, char **argv)
+{
+ const char *opts = "u:p:d:h";
+ bool dset = false;
+ size_t i;
+ int ch;
+
+ da_property_decl_t extraprops[1] = {
+ { 0, 0 }
+ };
+
+ __pgname = argv[0];
+
+ dw_df_dainit_fn = curldwinit;
+ dw_df_dacleanup_fn = curldwcleanup;
+
+ da_init();
+ memset(&global_deviceatlassch.o.dcfg, 0, sizeof(global_deviceatlassch.o.dcfg));
+ while ((ch = getopt(argc, argv, opts)) != -1) {
+ switch (ch) {
+ case 'u':
+ global_deviceatlassch.o.dcfg.info.url = strdup(optarg);
+ break;
+ case 'p':
+ global_deviceatlassch.o.dcfg.info.path = strdup(optarg);
+ break;
+ case 'd':
+ if (strptime(optarg, "%H:%M:%S", &global_deviceatlassch.o.dcfg.info.rtm) != NULL)
+ dset = true;
+ else
+ usage();
+ break;
+ case 'h':
+ default:
+ usage();
+ }
+ }
+
+ if (!dset) {
+ time_t now = time(NULL);
+ struct tm *cnow = gmtime(&now);
+ memcpy(&global_deviceatlassch.o.dcfg.info.rtm, cnow, offsetof(struct tm, tm_mday));
+ }
+
+ if (!global_deviceatlassch.o.dcfg.info.url)
+ usage();
+
+ struct sigaction sa;
+ memset(&sa, 0, sizeof(sa));
+ sa.sa_flags = SA_SIGINFO | SA_RESTART;
+ sa.sa_sigaction = dadwexit;
+
+ global_deviceatlassch.o.dcfg.info.datatm = 1;
+ global_deviceatlassch.o.dcfg.info.chksum = 1;
+ global_deviceatlassch.o.dcfg.info.reload = 1;
+ global_deviceatlassch.o.dcfg.info.tobin = 1;
+ global_deviceatlassch.o.dcfg.ep = extraprops;
+ global_deviceatlassch.o.dcfg.dwproc = curldwproc;
+ global_deviceatlassch.o.dcfg.dwextract = dadwextract;
+ global_deviceatlassch.o.dcfg.lptr = (void *)stderr;
+ global_deviceatlassch.o.dcfg.dwlog = &dadwlog;
+ global_deviceatlassch.o.dcfg.dwnotify_n = &dadwnot;
+ global_deviceatlassch.o.rfn = jsonread;
+ global_deviceatlassch.o.posfn = jsonseek;
+
+ if (dadwinit() != DA_OK) {
+ fprintf(stderr, "%s init failed\n", __pgname);
+ exit(EXIT_FAILURE);
+ }
+
+ if (da_atlas_open_schedule(&global_deviceatlassch.o) != DA_OK) {
+ fprintf(stderr, "%s scheduling failed\n", __pgname);
+ exit(EXIT_FAILURE);
+ }
+
+ sigaction(SIGINT, &sa, NULL);
+ sigaction(SIGQUIT, &sa, NULL);
+ sigaction(SIGTERM, &sa, NULL);
+
+ while (true) sleep(1);
+
+ return 0;
+}
diff --git a/addons/deviceatlas/dummy/Makefile b/addons/deviceatlas/dummy/Makefile
new file mode 100644
index 0000000..8bba840
--- /dev/null
+++ b/addons/deviceatlas/dummy/Makefile
@@ -0,0 +1,12 @@
+# makefile for dummy DeviceAtlas library
+#
+# To enable the DeviceAtlas module support, the following are needed
+# make TARGET=<target> DEVICEATLAS_SRC=addons/deviceatlas/dummy USE_PCRE=1 USE_DEVICEATLAS=1
+
+build: libda.a
+
+libda.a: dac.o
+ ar rv $@ $<
+
+clean:
+ rm -rf *.a *.o
diff --git a/addons/deviceatlas/dummy/Os/daunix.c b/addons/deviceatlas/dummy/Os/daunix.c
new file mode 100644
index 0000000..ca696f9
--- /dev/null
+++ b/addons/deviceatlas/dummy/Os/daunix.c
@@ -0,0 +1,9 @@
+#include "dac.h"
+
+static char const __attribute__((unused)) rcsid[] = "$Id: dac.c, v dummy 1970/01/01 00:00:01 dcarlier Exp $";
+
+da_status_t
+da_atlas_read_mapped(const char *path, void *m, void **p, size_t *l)
+{
+ return DA_SYS;
+}
diff --git a/addons/deviceatlas/dummy/dac.c b/addons/deviceatlas/dummy/dac.c
new file mode 100644
index 0000000..720dc6a
--- /dev/null
+++ b/addons/deviceatlas/dummy/dac.c
@@ -0,0 +1,222 @@
+#include "dac.h"
+#include <stdlib.h>
+#include <string.h>
+#include <time.h>
+
+static char const __attribute__((unused)) rcsid[] = "$Id: dac.c, v dummy 1970/01/01 00:00:01 dcarlier Exp $";
+
+struct da_bitset {
+ unsigned long bits[8];
+ size_t bit_count;
+};
+
+/*
+ * Constructor/Destructor for possible globals.
+ */
+
+void
+da_init()
+{
+}
+
+void
+da_fini()
+{
+}
+
+
+void
+da_seterrorfunc(da_errorfunc_t callback)
+{
+}
+
+const char *
+da_typename(da_type_t fieldtype)
+{
+ return "none";
+}
+
+char *
+da_getdataversion(da_atlas_t *atlas)
+{
+ return "dummy library version 1.0";
+}
+
+time_t
+da_getdatacreation(da_atlas_t *atlas)
+{
+ return time(NULL);
+}
+
+int
+da_getdatarevision(da_atlas_t *atlas)
+{
+ return 1;
+}
+
+da_status_t
+da_atlas_compile(void *ctx, da_read_fn readfn, da_setpos_fn rewind, void **ptr, size_t *size)
+{
+ return DA_OK;
+}
+
+da_status_t
+da_atlas_open(da_atlas_t *atlas, da_property_decl_t *extraprops, const void *ptr, size_t len)
+{
+ void *ptr2 = malloc(len);
+ free(ptr2);
+ return ptr2 ? DA_OK : DA_NOMEM;
+}
+
+void
+da_atlas_close(da_atlas_t *atlas)
+{
+}
+
+da_evidence_id_t
+da_atlas_clientprop_evidence_id(const da_atlas_t *atlas)
+{
+ return (da_evidence_id_t)2;
+}
+
+da_evidence_id_t
+da_atlas_accept_language_evidence_id(const da_atlas_t *atlas)
+{
+ return (da_evidence_id_t)3;
+}
+
+da_evidence_id_t
+da_atlas_header_evidence_id(const da_atlas_t *atlas, const char *evidence_name)
+{
+ return (da_evidence_id_t)1;
+}
+
+da_status_t
+da_atlas_getproptype(const da_atlas_t *atlas, da_propid_t propid, da_type_t *type)
+{
+ *type = DA_TYPE_BOOLEAN;
+ return DA_OK;
+}
+
+da_status_t
+da_atlas_getpropname(const da_atlas_t *atlas, da_propid_t propid, const char **name)
+{
+ *name = "isRobot";
+ return DA_OK;
+}
+
+da_status_t
+da_atlas_getpropid(const da_atlas_t *atlas, const char *propname, da_propid_t *property)
+{
+ *property = (da_propid_t)1;
+ return DA_OK;
+}
+
+size_t
+da_atlas_getpropcount(const da_atlas_t *atlas)
+{
+ return 1;
+}
+
+void
+da_atlas_setconfig(da_atlas_t *atlas, da_config_t *config)
+{
+}
+
+da_status_t
+da_searchv(const da_atlas_t *atlas, da_deviceinfo_t *result, da_evidence_t *evidence, size_t count)
+{
+ memset(result, 0, sizeof(*result));
+ result->propcount = count;
+ return DA_OK;
+}
+
+da_status_t
+da_search(const da_atlas_t *atlas, da_deviceinfo_t *result, ...)
+{
+ da_evidence_t vec[4]; /* XXX: this will have to grow if more evidence is supported. */
+ size_t i;
+ va_list args;
+ va_start(args, result);
+ for (i = 0; i < sizeof vec / sizeof vec[0];) {
+ vec[i].key = va_arg(args, da_evidence_id_t);
+ if (vec[i].key == 0)
+ break;
+ vec[i++].value = va_arg(args, char *);
+ }
+ va_end(args);
+ return da_searchv(atlas, result, vec, i);
+}
+
+/*
+ * Search-result centric functions.
+ */
+size_t
+da_getpropcount(const da_deviceinfo_t *info)
+{
+ return info->propcount;
+}
+
+da_status_t
+da_getfirstprop(const da_deviceinfo_t *info, da_propid_t **propid)
+{
+ if (info->propcount == 0)
+ return DA_NOMORE;
+ *propid = &info->proplist[0];
+ return DA_OK;
+}
+
+da_status_t
+da_getnextprop(const da_deviceinfo_t *info, da_propid_t **propid)
+{
+ if (*propid - info->proplist >= info->propcount - 1)
+ return DA_NOMORE;
+ ++*propid;
+ return DA_OK;
+}
+
+void
+da_close(da_deviceinfo_t *sr)
+{
+}
+
+da_status_t
+da_getpropname(const da_deviceinfo_t *info, da_propid_t propid, const char **name)
+{
+ *name = "isRobot";
+ return DA_OK;
+}
+
+da_status_t
+da_getproptype(const da_deviceinfo_t *info, da_propid_t propid, da_type_t *type)
+{
+ *type = DA_TYPE_BOOLEAN;
+ return DA_OK;
+}
+
+da_status_t
+da_getpropinteger(const da_deviceinfo_t *info, da_propid_t property, long *vp)
+{
+ *vp = -1;
+ return DA_OK;
+}
+
+da_status_t
+da_getpropstring(const da_deviceinfo_t *info, da_propid_t property, const char **vp)
+{
+ *vp = NULL;
+ return DA_OK;
+}
+
+da_status_t
+da_getpropboolean(const da_deviceinfo_t *info, da_propid_t property, bool *vp)
+{
+ *vp = true;
+ return DA_OK;
+}
+
+const char *
+da_get_property_name(const da_atlas_t *atlas, da_propid_t property)
+{
+ return "isRobot";
+}
diff --git a/addons/deviceatlas/dummy/dac.h b/addons/deviceatlas/dummy/dac.h
new file mode 100644
index 0000000..bf166ae
--- /dev/null
+++ b/addons/deviceatlas/dummy/dac.h
@@ -0,0 +1,600 @@
+#ifndef MOBI_DA_DAC_H
+#define MOBI_DA_DAC_H
+
+/**
+ * @file dac.h
+ * @author Afilias Technologies
+ *
+ * @brief API main header file
+ */
+
+#include <sys/types.h>
+#include <limits.h>
+#include <inttypes.h>
+#include <stdlib.h>
+#include <stdarg.h>
+
+#ifndef __cplusplus
+#ifndef true
+#ifdef HAVE_NO_BUILTIN__BOOL
+typedef int _Bool;
+#endif
+#define bool _Bool
+
+#define true 1
+#define false 0
+#endif
+#endif
+
+#define MOBI_DA_MAJOR 2
+#define MOBI_DA_MINOR 1
+#define MOBI_DA_DUMMY_LIBRARY 1
+
+
+/**
+ * @brief All values returned by the API have one of these types.
+ * da_getprop*() return data in the appropriate C type for the given da_type.
+ */
+enum da_type {
+ DA_TYPE_NONE,
+ DA_TYPE_BOOLEAN,
+ DA_TYPE_INTEGER,
+ DA_TYPE_NUMBER,
+ DA_TYPE_STRING,
+ DA_TYPE_ARRAY,
+ DA_TYPE_OBJECT,
+ DA_TYPE_NULL
+};
+
+/**
+ * Any method that returns a da_status may potentially fail for one of these reasons.
+ * XXX: Error reporting needs to be improved.
+ */
+enum da_status {
+ DA_OK, /* Success. */
+ DA_INVALID_JSON, /* The JSON format is invalid, or the content is unexpected in a given context. */
+ DA_OVERFLOW, /* Overflow occurred. Note this is used to indicate an unfinished string parse in JSON */
+ DA_FORMAT_ERROR, /* The data supplied is formatted incorrectly. */
+ DA_NOMEM, /* There was not enough space to complete the operation */
+ DA_SYS, /* A system error occurred - consult the OS for more details (eg, check errno) */
+ DA_NOTIMPL, /* This method is not implemented */
+ DA_NOTFOUND, /* The requested item was not found. */
+ DA_REGEXBAD, /* An invalid regex was provided. */
+ DA_NOMORE, /* Used to indicate the end of an iterator. */
+ DA_INVALID_COOKIE, /* Cookie value supplied was invalid */
+ DA_INVALID_TYPE, /* A value of an unexpected type was found. */
+ DA_INTERNAL_ERROR,
+ DA_STATUS_LAST /* Placeholder to indicate highest possible error value. (value will change as API matures) */
+};
+
+enum da_severity {
+ DA_SEV_FATAL, /* The operation will not continue, and the operation will return an error. */
+ DA_SEV_ERROR, /* An error occurred, but the API call will return at least some valid information */
+ DA_SEV_WARN, /* An unexpected event occurred, but the system dealt with it */
+ DA_SEV_INFO /* An informational message. */
+};
+/* Forward references to tagged types */
+struct atlas_image;
+struct da_atlas;
+struct da_deviceinfo;
+struct da_jsonparser;
+struct da_node;
+struct da_propset;
+union da_value;
+struct da_evidence;
+struct da_bitset;
+struct da_allocator;
+struct da_config;
+
+/**
+ * @brief Primary types of the interface.
+ * Primary types used by API client.
+ * Non-typedef structures and unions are considered private to the API.
+ *
+ */
+typedef enum da_severity da_severity_t; /* A severity for the error callback. */
+typedef enum da_status da_status_t; /* An error code - returned from most API calls. */
+typedef da_status_t (*da_setpos_fn)(void *ctx, off_t off); /* callback provided to API to rewind input stream */
+typedef enum da_type da_type_t; /* A value type (integer, string, etc) */
+
+/**
+ * @brief An operation on an atlas involves converting a set of evidence strings into a set of property/value pairs.
+ * The ID for a particular type of evidence is extract from the atlas (eg, for a specific HTTP header, use:
+ *
+ * da_evidence_id_t evidence = da_atlas_header_evidence_id(atlas, "User-Agent");
+ *
+ */
+typedef int da_evidence_id_t;
+
+/**
+ * @brief The search result encompasses a key/value set. Keys are handles retrieved via
+ * _either_ da_atlas_getpropid() or da_getpropid().
+ * Some search results may have keys not available when the atlas is opened (eg,
+ * when the name of the property itself is contained within the evidence)
+ * Such properties by necessity are given a "local" da_propid_t
+ *
+ * You can ensure any properties you are interested in get a global propid by
+ * passing a list of interesting named properties to da_atlas_open()
+ */
+typedef int da_propid_t;
+typedef size_t (*da_read_fn)(void *ctx, size_t maxlen, char *ptr);
+typedef struct da_atlas da_atlas_t;
+typedef struct da_deviceinfo da_deviceinfo_t;
+typedef struct da_evidence da_evidence_t;
+typedef struct da_jsonparser da_jsonparser_t;
+typedef struct da_node da_node_t;
+typedef struct da_property_decl da_property_decl_t;
+typedef struct da_propset da_propset_t;
+typedef struct da_config da_config_t;
+typedef void *(*da_alloc_fn)(void *ctx, size_t);
+typedef void (*da_free_fn)(void *ctx, void *);
+typedef void *(*da_realloc_fn)(void *ctx, void *, size_t);
+typedef void (*da_errorfunc_t)(da_severity_t severity, da_status_t status, const char *msg, va_list args);
+
+
+/* Manifest constants. */
+enum {
+ /*
+ * used as the initial guess for the compiled size of an atlas.
+ * If atlas sizes grow more beyond this, it can be expanded to avoid multiple scans of the data.
+ */
+ DA_INITIAL_MEMORY_ESTIMATE = 1024 * 1024 * 14
+};
+
+struct da_config {
+ unsigned int ua_props;
+ unsigned int lang_props;
+ unsigned int __reserved[14]; /* enough reserved keywords for future use */
+};
+
+/**
+ * Functional interface.
+ */
+
+/**
+ * @brief Initialize process to use the DA API.
+ */
+void da_init(void);
+
+
+/**
+ * @brief Release all resources used by the API
+ */
+void da_fini(void);
+
+/**
+ * @brief User-supplied callback to be invoked with information about an error.
+ * Note this may use thread-local storage etc to store the info on return from the current call
+ * It is guaranteed that an error-reporting function returning an error-code will have called
+ * this function at least once.
+ * @param callback function
+ */
+void da_seterrorfunc(da_errorfunc_t callback);
+
+/**
+ * @brief Given a specific HTTP header, return the associated ID for that header.
+ * When passing evidence to the API, its type is identified using its da_evidince_id_t.
+ * @param atlas atlas instance
+ * @param header_name Header's name
+ * @return evidence id
+ */
+da_evidence_id_t da_atlas_header_evidence_id(const da_atlas_t *atlas, const char *header_name);
+/**
+ * @brief Return the associated ID of the client side properties evidence
+ * @param atlas Atlas instance
+ * @return evidence id
+ */
+da_evidence_id_t da_atlas_clientprop_evidence_id(const da_atlas_t *atlas);
+/**
+ * @brief Return the associated ID of the accept language header evidence
+ * @param atlas Atlas instance
+ * @return evidence id
+ */
+da_evidence_id_t da_atlas_accept_language_evidence_id(const da_atlas_t *atlas);
+
+/**
+ * @brief readfn should present JSON content from ctx.
+ * atlasp points to an uninitialized da_atlas structure.
+ * Result is a compiled atlas at atlasp.
+ * Result is allocated via normal memory-allocation methods, malloc/calloc/realloc, so should be
+ * Free'd with free()
+ * XXX TODO: Change this to take a da_allocator
+ * @param ctx pointer given to read the json file
+ * @param readfn function pointer, set accordingly to the attended given pointer
+ * @param setposfn function pointer
+ * @param ptr Pointer dynamically allocated if the json parsing happened normally
+ * @param len size of the atlas image
+ * @return status of atlas compilation
+ */
+da_status_t da_atlas_compile(void *ctx, da_read_fn readfn, da_setpos_fn setposfn, void **ptr, size_t *len);
+
+/**
+ * @brief opens a previously compiled atlas for operations. extra_props will be available in calls to
+ * da_getpropid on the atlas, and if generated by the search, the ID will be consistent across
+ * different calls to search.
+ * Properties added by a search that are neither in the compiled atlas, nor in the extra_props list
+ * Are assigned an ID within the context that is not transferrable through different search results
+ * within the same atlas.
+ * @param atlas Atlas instance
+ * @param extra_props properties
+ * @param ptr given pointer from previously compiled atlas
+ * @param pos atlas image size
+ * @return status of atlas data opening
+ */
+da_status_t da_atlas_open(da_atlas_t *atlas, da_property_decl_t *extra_props, const void *ptr, size_t pos);
+
+/**
+ * @brief read from a mapped data which then replace da_atlas_compile call
+ *
+ * @param dumppath, anonymous if NULL
+ * @param map for anonymous, it is the responsibility of the caller to unmap it, ignored otherwise
+ * @param maplen for anonymous, it is the size of the mapped data, ignored otherwise
+ * @param ptr Pointer dynamically allocated if the mapping happened normally
+ * @param len size of the atlas image
+ * @return status of mapping
+ */
+da_status_t da_atlas_read_mapped(const char *path, void *m, void **p, size_t *l);
+/**
+ * @brief Release any resources associated with the atlas structure atlas, which was previously generated from
+ * da_read_atlas or da_compile_atlas.
+ * @param atlas instance
+ */
+void da_atlas_close(da_atlas_t *atlas);
+
+/**
+ * @brief Find device properties given a set of evidence.
+ * Search results are returned in da_deviceinfo_t, and must be cleaned using da_close
+ * "Evidence" is an array of length count, of string data tagged with an evidence ID.
+ * @param atlas Atlas instance
+ * @param info Device info
+ * @param ev Array of evidences
+ * @param count Number of evidence given
+ * @return status of the search
+ */
+da_status_t da_searchv(const da_atlas_t *atlas, da_deviceinfo_t *info, da_evidence_t *ev, size_t count);
+
+/**
+ * @brief As da_search, but unrolls the evidence array into variable arguments for simpler calling
+ * convention with known evidence types.
+ * varargs are pairs of (da_evidence_id, string), terminated with da_evidence_id DA_END
+ * @code da_search(&myAtlas, &deviceInfo, da_get_header_evidence_id("User-Agent"),
+ * "Mozilla/5.0 (Linux...", DA_END);
+ * @endcode
+ * @param atlas Atlas instance
+ * @param info given device info which holds on device properties
+ * @param pairs of evidence id / evidence value
+ * @return status of the search
+ */
+da_status_t da_search(const da_atlas_t *atlas, da_deviceinfo_t *info, ...);
+
+/**
+ * @brief After finishing with a search result, release resources associated with it.
+ * @param info Device info previously allocated by search functions
+ */
+void da_close(da_deviceinfo_t *info);
+
+/**
+ * @brief Given a property name (Eg, "displayWidth"), return the property ID associated with it for the
+ * specified atlas.
+ * @param atlas Atlas instance
+ * @param propname Property name
+ * @param propid Property id
+ * @return status of the property id search
+ */
+da_status_t da_atlas_getpropid(const da_atlas_t *atlas, const char *propname, da_propid_t *propid);
+
+/**
+ * @brief Given a property ID, return the type of that property.
+ * @code
+ * da_getproptype(&myAtlas, da_getpropid(&myAtlas, "displayWidth"), &propertyType);
+ * assert(propertyType == DA_TYPE_INT);
+ * @endcode
+ * @param atlas Atlas instance
+ * @param propid Property id
+ * @param type Type id of the property
+ * @return status of the type id search
+ */
+da_status_t da_atlas_getproptype(const da_atlas_t *atlas, da_propid_t propid, da_type_t *type);
+
+/**
+ * @brief Given a property ID, return the name of that property.
+ * @code
+ * da_atlas_getpropname(&myAtlas, da_getpropid(&myAtlas, "displayWidth"), &propertyName);
+ * assert(strcmp("displayWidth", propertyName) == 0);
+ * @endcode
+ * @param atlas Atlas instance
+ * @param propid property id
+ * @param propname property name returned
+ * @return status of the property name search
+ */
+da_status_t da_atlas_getpropname(const da_atlas_t *atlas, da_propid_t propid, const char **propname);
+
+
+/**
+ * @brief Given an atlas instance, return its counters + the builtins
+ * @code
+ * da_atlas_getpropcount(&myAtlas);
+ * @endcode
+ * @param atlas Atlas instance
+ * @return counters
+ */
+size_t da_atlas_getpropcount(const da_atlas_t *atlas);
+
+/**
+ * @brief Given an atlas instance, set the detection config
+ * @param atlas Atlas instance
+ * @param config instance
+ */
+void da_atlas_setconfig(da_atlas_t *atlas, da_config_t *config);
+
+/**
+ * @brief Given a search result, find the value of a specific property.
+ * @code
+ * long displayWidth; // width of display in pixels.
+ * da_getpropinteger(&deviceInfo, da_getpropid(&myAtlas, "displayWidth"), &displayWidth);
+ * @endcode
+ * String contents are owned by the search result, and are valid until the search is closed.
+ */
+/**
+ * @brief returns a property value as a string from a given string typed property id
+ * @param info Device info
+ * @param propid Property id
+ * @param value Value of the property
+ * @return status of property value search
+ */
+da_status_t da_getpropstring(const da_deviceinfo_t *info, da_propid_t propid, const char **value);
+/**
+ * @brief returns a property value as a long from a given long typed property id
+ * @param info Device info
+ * @param propid Property id
+ * @param value Value of the property
+ * @return status of property value search
+ */
+da_status_t da_getpropinteger(const da_deviceinfo_t *info, da_propid_t propid, long *value);
+/**
+ * @brief returns a property value as a boolean from a given boolean typed property id
+ * @param info Device info
+ * @param propid Property id
+ * @param value Value of the property
+ * @return status of property value search
+ */
+da_status_t da_getpropboolean(const da_deviceinfo_t *info, da_propid_t propid, bool *value);
+/**
+ * @brief returns a property value as a float from a given float typed property id
+ * @param info Device info
+ * @param propid Property id
+ * @param value Value of the property
+ * @return status of property value search
+ */
+da_status_t da_getpropfloat(const da_deviceinfo_t *info, da_propid_t propid, double *value);
+
+/**
+ * @brief Some properties may not be not known to the atlas before the search commences.
+ * Such properties cannot have a da_propid_t assigned to them on the atlas, but will
+ * have a local property assigned during search. The name and type of such properties
+ * can be discovered here.
+ *
+ * Properties that are used in the atlas source and properties specifically registered
+ * with da_atlas_open() will always be assigned to a property discovered during search.
+ * Therefore, if there are specific properties that you want to use, and are unsure
+ * if they are in your device atlas source, registering them with da_atlas_open will
+ * make access to them easier and more efficient
+ */
+/**
+ * @brief returns the type of a given device property from the search functions
+ * @param info Device info
+ * @param propid Property id
+ * @param type Type id
+ * @return status of property type search
+ */
+da_status_t da_getproptype(const da_deviceinfo_t *info, da_propid_t propid, da_type_t *type);
+/**
+ * @brief returns the name of a given device property from the search functions
+ * @param info Device info
+ * @param propid Property id
+ * @param propname Property name
+ * @return status of property type search
+ */
+da_status_t da_getpropname(const da_deviceinfo_t *info, da_propid_t propid, const char **propname);
+
+/**
+ * @brief da_getfirstprop/da_getnextprop provide iteration over all properties
+ * in a search result.
+ * Both will return DA_OK if there is a result available, and DA_NOMORE
+ * if the search is complete.
+ * @code
+ *
+ * da_propid_t *propidp;
+ * for (da_status_t status = da_getfirstprop(&result, &propidp);
+ * status == DA_OK;
+ * status = da_getnextprop(&result, &propidp)) {
+ * const char *propname;
+ * if (da_getpropname(&result, *propidp, &propname) == DA_OK)
+ * fprintf("found property %s\n", propname);
+ * }
+ * @endcode
+ */
+
+/**
+ * @brief returns the first property from device info
+ * @param info Device info
+ * @param propid Property
+ * @return status
+ */
+da_status_t da_getfirstprop(const da_deviceinfo_t *info, da_propid_t **propid);
+/**
+ * @brief device info properties iterator
+ * @param info Device info
+ * @param propid Property
+ * @return status
+ */
+da_status_t da_getnextprop(const da_deviceinfo_t *info, da_propid_t **propid);
+
+/**
+ * @brief Report an error, as per a report from the API to the user-callback.
+ * @param severity Severity level of the error
+ * @param fmt format error message
+ * @param va_list
+ * @return status
+ */
+da_status_t da_reporterror(da_status_t severity, const char *fmt, ...);
+
+/**
+ * @brief returns a textual description of the type "type".
+ * @param type Type id
+ * @return type name
+ */
+const char *da_typename(da_type_t type);
+
+/**
+ * @brief returns the version from the JSON in memory
+ * @param atlas
+ * @return version
+ */
+char *da_getdataversion(da_atlas_t *atlas);
+
+/**
+ * @brief returns the date creation's timestamp from the JSON in memory
+ * @param atlas
+ * @return version
+ */
+time_t da_getdatacreation(da_atlas_t *atlas);
+
+/**
+ * @brief returns the revision's number from the JSON in memory
+ * @param atlas
+ * @return version
+ */
+int da_getdatarevision(da_atlas_t *atlas);
+
+/**
+ * @brief returns the name of a global property
+ * @param atlas Atlas instance
+ * @param propid Property id
+ * @return property name
+ */
+const char *da_get_property_name(const da_atlas_t *atlas, da_propid_t propid);
+
+/**
+ * @brief returns the number of properties in a result.
+ * @param info Device info
+ * @return properties count
+ */
+size_t da_getpropcount(const da_deviceinfo_t *info);
+
+/*
+ * Details below should not be required for usage of the API
+ */
+
+/**
+ * @brief Represents a usable device atlas interface.
+ *
+ * No user servicable parts inside: access should
+ * be via the functional API.
+ */
+struct da_atlas {
+ const struct atlas_image *image;
+ struct header_evidence_entry *header_priorities;
+ size_t header_evidence_count;
+
+ struct pcre_regex_info *uar_regexes;
+ size_t uar_regex_count;
+
+ struct pcre_regex_info *replacement_regexes;
+ size_t replacement_regex_count;
+
+ da_evidence_id_t user_agent_evidence;
+ da_evidence_id_t clientprops_evidence;
+ da_evidence_id_t accept_language_evidence;
+ da_evidence_id_t next_evidence;
+
+ da_propset_t *properties;
+ da_propid_t id_propid;
+ da_propid_t id_proplang;
+ da_propid_t id_proplang_locale;
+
+ da_config_t config;
+
+ da_deviceinfo_t **cpr_props;
+ size_t cpr_count;
+};
+
+/* fixed constants. */
+enum {
+ DA_BUFSIZE = 16000
+};
+
+/**
+ * Represents a chunk of memory. See comments on da_deviceinfo.
+ * This is presented here to allow aggregation in da_deviceinfo:
+ * Not for public consumption.
+ */
+struct da_buf {
+ struct da_buf *next;
+ char *cur;
+ char *limit;
+ char buf[DA_BUFSIZE];
+};
+
+/**
+ * A callback interface for allocating memory from some source
+ * Not for public consumption.
+ */
+struct da_allocator {
+ da_alloc_fn alloc;
+ da_free_fn free;
+ da_realloc_fn realloc;
+ void *context;
+};
+
+
+/**
+ * Represents a search result
+ * Can be used to retrieve values of known properties discovered from the evidence,
+ * iterate over the properties with known values, and query property types that are
+ * local to this result.
+ *
+ * The atlas the search is carried out on must survive any da_deviceinfo results
+ * it provides.
+ */
+struct da_deviceinfo {
+ struct da_allocator allocator;
+ const da_atlas_t *atlas; /* reference to the atlas the search was carried out on. */
+ struct da_bitset *present; /* property received from tree */
+ struct da_bitset *localprop; /* property was received from UAR rule or CPR */
+ struct da_bitset *cprprop; /* property was received from CPR */
+ union da_value *properties; /* properties - indexed by property id. */
+ da_propid_t *proplist; /* list of properties present in this result. */
+ size_t propcount; /* size of proplist */
+ da_propset_t *local_types; /* property descriptors local to this search result. */
+
+ /**
+ * The per-deviceinfo heap is stored here. Allocations for data in the result
+ * come from the raw data in these buffers. The size of the fixed-size buffer
+ * built in to da_buf is sized such that all known search results will not
+ * require memory allocation via malloc()
+ */
+ struct da_buf *heap;
+ struct da_buf initial_heap;
+};
+
+/**
+ * Used to pass evidence to da_searchv()
+ */
+struct da_evidence {
+ da_evidence_id_t key;
+ char *value;
+};
+
+/**
+ * Used to pass properties the API intends to query to the da_atlas_open function
+ * This can be used to improve performance of lookup on properties well-known
+ * to the API user, but not present in the JSON database.
+ */
+struct da_property_decl {
+ const char *name;
+ da_type_t type;
+};
+
+
+#endif /* DEVATLAS_DAC_H */
diff --git a/addons/deviceatlas/dummy/dadwcom.c b/addons/deviceatlas/dummy/dadwcom.c
new file mode 100644
index 0000000..53c5fdf
--- /dev/null
+++ b/addons/deviceatlas/dummy/dadwcom.c
@@ -0,0 +1 @@
+#include <stdio.h>
diff --git a/addons/deviceatlas/dummy/dasch.c b/addons/deviceatlas/dummy/dasch.c
new file mode 100644
index 0000000..53c5fdf
--- /dev/null
+++ b/addons/deviceatlas/dummy/dasch.c
@@ -0,0 +1 @@
+#include <stdio.h>
diff --git a/addons/deviceatlas/dummy/json.c b/addons/deviceatlas/dummy/json.c
new file mode 100644
index 0000000..53c5fdf
--- /dev/null
+++ b/addons/deviceatlas/dummy/json.c
@@ -0,0 +1 @@
+#include <stdio.h>
diff --git a/addons/ot/AUTHORS b/addons/ot/AUTHORS
new file mode 100644
index 0000000..92b2831
--- /dev/null
+++ b/addons/ot/AUTHORS
@@ -0,0 +1 @@
+Miroslav Zagorac <mzagorac@haproxy.com>
diff --git a/addons/ot/MAINTAINERS b/addons/ot/MAINTAINERS
new file mode 100644
index 0000000..92b2831
--- /dev/null
+++ b/addons/ot/MAINTAINERS
@@ -0,0 +1 @@
+Miroslav Zagorac <mzagorac@haproxy.com>
diff --git a/addons/ot/Makefile b/addons/ot/Makefile
new file mode 100644
index 0000000..5bf8d9e
--- /dev/null
+++ b/addons/ot/Makefile
@@ -0,0 +1,73 @@
+# USE_OT : enable the OpenTracing filter
+# OT_DEBUG : compile the OpenTracing filter in debug mode
+# OT_INC : force the include path to libopentracing-c-wrapper
+# OT_LIB : force the lib path to libopentracing-c-wrapper
+# OT_RUNPATH : add libopentracing-c-wrapper RUNPATH to haproxy executable
+# OT_USE_VARS : allows the use of variables for the OpenTracing context
+
+OT_DEFINE =
+OT_CFLAGS =
+OT_LDFLAGS =
+OT_DEBUG_EXT =
+OT_PKGSTAT =
+OTC_WRAPPER = opentracing-c-wrapper
+
+ifneq ($(OT_DEBUG),)
+OT_DEBUG_EXT = _dbg
+OT_DEFINE = -DDEBUG_OT
+endif
+
+ifeq ($(OT_INC),)
+OT_PKGSTAT = $(shell pkg-config --exists $(OTC_WRAPPER)$(OT_DEBUG_EXT); echo $$?)
+OT_CFLAGS = $(shell pkg-config --silence-errors --cflags $(OTC_WRAPPER)$(OT_DEBUG_EXT))
+else
+ifneq ($(wildcard $(OT_INC)/$(OTC_WRAPPER)/.*),)
+OT_CFLAGS = -I$(OT_INC) $(if $(OT_DEBUG),-DOTC_DBG_MEM)
+endif
+endif
+
+ifeq ($(OT_PKGSTAT),)
+ifeq ($(OT_CFLAGS),)
+$(error OpenTracing C wrapper : can't find headers)
+endif
+else
+ifneq ($(OT_PKGSTAT),0)
+$(error OpenTracing C wrapper : can't find package)
+endif
+endif
+
+ifeq ($(OT_LIB),)
+OT_LDFLAGS = $(shell pkg-config --silence-errors --libs $(OTC_WRAPPER)$(OT_DEBUG_EXT))
+else
+ifneq ($(wildcard $(OT_LIB)/lib$(OTC_WRAPPER).*),)
+OT_LDFLAGS = -L$(OT_LIB) -l$(OTC_WRAPPER)$(OT_DEBUG_EXT)
+ifneq ($(OT_RUNPATH),)
+OT_LDFLAGS += -Wl,--rpath,$(OT_LIB)
+endif
+endif
+endif
+
+ifeq ($(OT_LDFLAGS),)
+$(error OpenTracing C wrapper : can't find library)
+endif
+
+OPTIONS_OBJS += \
+ addons/ot/src/cli.o \
+ addons/ot/src/conf.o \
+ addons/ot/src/event.o \
+ addons/ot/src/filter.o \
+ addons/ot/src/group.o \
+ addons/ot/src/http.o \
+ addons/ot/src/opentracing.o \
+ addons/ot/src/parser.o \
+ addons/ot/src/pool.o \
+ addons/ot/src/scope.o \
+ addons/ot/src/util.o
+
+ifneq ($(OT_USE_VARS),)
+OT_DEFINE += -DUSE_OT_VARS
+OPTIONS_OBJS += \
+ addons/ot/src/vars.o
+endif
+
+OT_CFLAGS := $(OT_CFLAGS) -Iaddons/ot/include $(OT_DEFINE)
diff --git a/addons/ot/README b/addons/ot/README
new file mode 100644
index 0000000..a08f471
--- /dev/null
+++ b/addons/ot/README
@@ -0,0 +1,794 @@
+ -----------------------------------------
+ The HAProxy OpenTracing filter (OT)
+ Version 1.0
+ ( Last update: 2020-12-10 )
+ -----------------------------------------
+ Author : Miroslav Zagorac
+ Contact : mzagorac at haproxy dot com
+
+
+SUMMARY
+--------
+
+ 0. Terms
+ 1. Introduction
+ 2. Build instructions
+ 3. Basic concepts in OpenTracing
+ 4. OT configuration
+ 4.1. OT scope
+ 4.2. "ot-tracer" section
+ 4.3. "ot-scope" section
+ 4.4. "ot-group" section
+ 5. Examples
+ 5.1 Benchmarking results
+ 6. OT CLI
+ 7. Known bugs and limitations
+
+
+0. Terms
+---------
+
+* OT: The HAProxy OpenTracing filter
+
+ OT is the HAProxy filter that allows you to send data to distributed
+ tracing systems via the OpenTracing API.
+
+
+1. Introduction
+----------------
+
+Nowadays there is a growing need to divide a process into microservices and
+there is a problem of monitoring the work of the same process. One way to
+solve this problem is to use distributed tracing service in a central location,
+the so-called tracer.
+
+OT is a feature introduced in HAProxy 2.4. This filter enables communication
+via the OpenTracing API with OpenTracing compatible servers (tracers).
+Currently, tracers that support this API include Datadog, Jaeger, LightStep
+and Zipkin.
+
+The OT filter was primarily tested with the Jaeger tracer, while configurations
+for both Datadog and Zipkin tracers were also set in the test directory.
+
+The OT filter is a standard HAProxy filter, so what applies to others also
+applies to this one (of course, by that I mean what is described in the
+documentation, more precisely in the doc/internals/filters.txt file).
+
+The OT filter activation is done explicitly by specifying it in the HAProxy
+configuration. If this is not done, the OT filter in no way participates
+in the work of HAProxy.
+
+As for the impact on HAProxy speed, this is documented with several tests
+located in the test directory, and the result is found in the README-speed-*
+files. In short, the speed of operation depends on the way it is used and
+the complexity of the configuration, from an almost immeasurable impact to
+a significant deceleration (5x and more). I think that in some normal use
+the speed of HAProxy with the filter on will be quite satisfactory with a
+slowdown of less than 4% (provided that no more than 10% of requests are
+sent to the tracer, which is determined by the keyword 'rate-limit').
+
+The OT filter allows intensive use of ACLs, which can be defined anywhere in
+the configuration. Thus, it is possible to use the filter only for those
+connections that are of interest to us.
+
+
+2. Build instructions
+----------------------
+
+OT is the HAProxy filter and as such is compiled together with HAProxy.
+
+To communicate with some OpenTracing compatible tracer, the OT filter uses the
+OpenTracing C Wrapper library (which again uses the OpenTracing CPP library).
+This means that we must have both libraries installed on the system on which
+we want to compile or use HAProxy.
+
+Instructions for compiling and installing both required libraries can be
+found at https://github.com/haproxytech/opentracing-c-wrapper .
+
+Also, to use the OT filter when running HAProxy we need to have an OpenTracing
+plugin for the tracer we want to use. We will return to this later, in
+section 5.
+
+The OT filter can be more easily compiled using the pkg-config tool, if we
+have the OpenTracing C Wrapper library installed so that it contains pkg-config
+files (which have the .pc extension). If the pkg-config tool cannot be used,
+then the path to the directory where the include files and libraries are
+located can be explicitly specified.
+
+Below are examples of the two ways to compile HAProxy with the OT filter, the
+first using the pkg-congfig tool and the second explicitly specifying the path
+to the OpenTracing C Wrapper include and library.
+
+Note: prompt '%' indicates that the command is executed under a unprivileged
+ user, while prompt '#' indicates that the command is executed under the
+ root user.
+
+Example of compiling HAProxy using the pkg-congfig tool (assuming the
+OpenTracing C Wrapper library is installed in the /opt directory):
+
+ % PKG_CONFIG_PATH=/opt/lib/pkgconfig make USE_OT=1 TARGET=linux-glibc
+
+The OT filter can also be compiled in debug mode as follows:
+
+ % PKG_CONFIG_PATH=/opt/lib/pkgconfig make USE_OT=1 OT_DEBUG=1 TARGET=linux-glibc
+
+HAProxy compilation example explicitly specifying path to the OpenTracing C
+Wrapper include and library:
+
+ % make USE_OT=1 OT_INC=/opt/include OT_LIB=/opt/lib TARGET=linux-glibc
+
+In case we want to use debug mode, then it looks like this:
+
+ % make USE_OT=1 OT_DEBUG=1 OT_INC=/opt/include OT_LIB=/opt/lib TARGET=linux-glibc
+
+If the library we want to use is not installed on a unix system, then a locally
+installed library can be used (say, which is compiled and installed in the user
+home directory). In this case instead of /opt/include and /opt/lib the
+equivalent paths to the local installation should be specified. Of course,
+in that case the pkg-config tool can also be used if we have a complete
+installation (with .pc files).
+
+last but not least, if the pkg-config tool is not used when compiling, then
+HAProxy executable may not be able to find the OpenTracing C Wrapper library
+at startup. This can be solved in several ways, for example using the
+LD_LIBRARY_PATH environment variable which should be set to the path where the
+library is located before starting the HAProxy.
+
+ % LD_LIBRARY_PATH=/opt/lib /path-to/haproxy ...
+
+Another way is to add RUNPATH to HAProxy executable that contains the path to
+the library in question.
+
+ % make USE_OT=1 OT_RUNPATH=1 OT_INC=/opt/include OT_LIB=/opt/lib TARGET=linux-glibc
+
+After HAProxy is compiled, we can check if the OT filter is enabled:
+
+ % ./haproxy -vv | grep opentracing
+ --- command output ----------
+ [ OT] opentracing
+ --- command output ----------
+
+
+3. Basic concepts in OpenTracing
+---------------------------------
+
+Basic concepts of OpenTracing can be read on the OpenTracing documentation
+website https://opentracing.io/docs/overview/.
+
+Here we will list only the most important elements of distributed tracing and
+these are 'trace', 'span' and 'span context'. Trace is a description of the
+complete transaction we want to record in the tracing system. A span is an
+operation that represents a unit of work that is recorded in a tracing system.
+Span context is a group of information related to a particular span that is
+passed on to the system (from service to service). Using this context, we can
+add new spans to already open trace (or supplement data in already open spans).
+
+An individual span may contain one or more tags, logs and baggage items.
+The tag is a key-value element that is valid for the entire span. Log is a
+key-value element that allows you to write some data at a certain time, it
+can be used for debugging. A baggage item is a key-value data pair that can
+be used for the duration of an entire trace, from the moment it is added to
+the span.
+
+
+4. OT configuration
+--------------------
+
+In order for the OT filter to be used, it must be included in the HAProxy
+configuration, in the proxy section (frontend / listen / backend):
+
+ frontend ot-test
+ ...
+ filter opentracing [id <id>] config <file>
+ ...
+
+If no filter id is specified, 'ot-filter' is used as default. The 'config'
+parameter must be specified and it contains the path of the file used to
+configure the OT filter.
+
+
+4.1 OT scope
+-------------
+
+If the filter id is defined for the OT filter, then the OT scope with
+the same name should be defined in the configuration file. In the same
+configuration file we can have several defined OT scopes.
+
+Each OT scope must have a defined (only one) "ot-tracer" section that is
+used to configure the operation of the OT filter and define the used groups
+and scopes.
+
+OT scope starts with the id of the filter specified in square brackets and
+ends with the end of the file or when a new OT scope is defined.
+
+For example, this defines two OT scopes in the same configuration file:
+ [my-first-ot-filter]
+ ot-tracer tracer1
+ ...
+ ot-group group1
+ ...
+ ot-scope scope1
+ ...
+
+ [my-second-ot-filter]
+ ...
+
+
+4.2. "ot-tracer" section
+-------------------------
+
+Only one "ot-tracer" section must be defined for each OT scope.
+
+There are several keywords that must be defined for the OT filter to work.
+These are 'config' which defines the configuration file for the OpenTracing
+API, and 'plugin' which defines the OpenTracing plugin used.
+
+Through optional keywords can be defined ACLs, logging, rate limit, and groups
+and scopes that define the tracing model.
+
+
+ot-tracer <name>
+ A new OT with the name <name> is created.
+
+ Arguments :
+ name - the name of the tracer section
+
+
+ The following keywords are supported in this section:
+ - mandatory keywords:
+ - config
+ - plugin
+
+ - optional keywords:
+ - acl
+ - debug-level
+ - groups
+ - [no] log
+ - [no] option disabled
+ - [no] option dontlog-normal
+ - [no] option hard-errors
+ - rate-limit
+ - scopes
+
+
+acl <aclname> <criterion> [flags] [operator] <value> ...
+ Declare or complete an access list.
+
+ To configure and use the ACL, see section 7 of the HAProxy Configuration
+ Manual.
+
+
+config <file>
+ 'config' is one of the two mandatory keywords associated with the OT tracer
+ configuration. This keyword sets the path of the configuration file for the
+ OpenTracing tracer plugin. To set the contents of this configuration file,
+ it is best to look at the documentation related to the OpenTracing tracer we
+ want to use.
+
+ Arguments :
+ file - the path of the configuration file
+
+
+debug-level <value>
+ This keyword sets the value of the debug level related to the display of
+ debug messages in the OT filter. The 'debug-level' value is binary, ie
+ a single value bit enables or disables the display of the corresponding
+ debug message that uses that bit. The default value is set via the
+ FLT_OT_DEBUG_LEVEL macro in the include/config.h file. Debug level value
+ is used only if the OT filter is compiled with the debug mode enabled,
+ otherwise it is ignored.
+
+ Arguments :
+ value - binary value ranging from 0 to 255 (8 bits)
+
+
+groups <name> ...
+ A list of "ot-group" groups used for the currently defined tracer is declared.
+ Several groups can be specified in one line.
+
+ Arguments :
+ name - the name of the OT group
+
+
+log global
+log <addr> [len <len>] [format <fmt>] <facility> [<level> [<minlevel>]]
+no log
+ Enable per-instance logging of events and traffic.
+
+ To configure and use the logging system, see section 4.2 of the HAProxy
+ Configuration Manual.
+
+
+option disabled
+no option disabled
+ Keyword which turns the operation of the OT filter on or off. By default
+ the filter is on.
+
+
+option dontlog-normal
+no option dontlog-normal
+ Enable or disable logging of normal, successful processing. By default,
+ this option is disabled. For this option to be considered, logging must
+ be turned on.
+
+ See also: 'log' keyword description.
+
+
+option hard-errors
+no option hard-errors
+ During the operation of the filter, some errors may occur, caused by
+ incorrect configuration of the tracer or some error related to the operation
+ of HAProxy. By default, such an error will not interrupt the filter
+ operation for the stream in which the error occurred. If the 'hard-error'
+ option is enabled, the operation error prohibits all further processing of
+ events and groups in the stream in which the error occurred.
+
+
+plugin <file>
+ 'plugin' is one of the two mandatory keywords associated with the OT tracer
+ configuration. This keyword sets the path of the OpenTracing tracer plugin.
+
+ Arguments :
+ file - the name of the plugin used
+
+
+rate-limit <value>
+ This option allows limiting the use of the OT filter, ie it can be influenced
+ whether the OT filter is activated for a stream or not. Determining whether
+ or not a filter is activated depends on the value of this option that is
+ compared to a randomly selected value when attaching the filter to the stream.
+ By default, the value of this option is set to 100.0, ie the OT filter is
+ activated for each stream.
+
+ Arguments :
+ value - floating point value ranging from 0.0 to 100.0
+
+
+scopes <name> ...
+ This keyword declares a list of "ot-scope" definitions used for the currently
+ defined tracer. Multiple scopes can be specified in the same line.
+
+ Arguments :
+ name - the name of the OT scope
+
+
+4.3. "ot-scope" section
+------------------------
+
+Stream processing begins with filter attachment, then continues with the
+processing of a number of defined events and groups, and ends with filter
+detachment. The "ot-scope" section is used to define actions related to
+individual events. However, this section may be part of a group, so the
+event does not have to be part of the definition.
+
+
+ot-scope <name>
+ Creates a new OT scope definition named <name>.
+
+ Arguments :
+ name - the name of the OT scope
+
+
+ The following keywords are supported in this section:
+ - acl
+ - baggage
+ - event
+ - extract
+ - finish
+ - inject
+ - log
+ - span
+ - tag
+
+
+acl <aclname> <criterion> [flags] [operator] <value> ...
+ Declare or complete an access list.
+
+ To configure and use the ACL, see section 7 of the HAProxy Configuration
+ Manual.
+
+
+baggage <name> <sample> ...
+ Baggage items allow the propagation of data between spans, ie allow the
+ assignment of metadata that is propagated to future children spans.
+ This data is formatted in the style of key-value pairs and is part of
+ the context that can be transferred between processes that are part of
+ a server architecture.
+
+ This kewyord allows setting the baggage for the currently active span. The
+ data type is always a string, ie any sample type is converted to a string.
+ The exception is a binary value that is not supported by the OT filter.
+
+ See the 'tag' keyword description for the data type conversion table.
+
+ Arguments :
+ name - key part of a data pair
+ sample - sample expression (value part of a data pair), at least one
+ sample must be present
+
+
+event <name> [{ if | unless } <condition>]
+ Set the event that triggers the 'ot-scope' to which it is assigned.
+ Optionally, it can be followed by an ACL-based condition, in which case it
+ will only be evaluated if the condition is true.
+
+ ACL-based conditions are executed in the context of a stream that processes
+ the client and server connections. To configure and use the ACL, see
+ section 7 of the HAProxy Configuration Manual.
+
+ Arguments :
+ name - the event name
+ condition - a standard ACL-based condition
+
+ Supported events are (the table gives the names of the events in the OT
+ filter and the corresponding equivalent in the SPOE filter):
+
+ -------------------------------------|------------------------------
+ the OT filter | the SPOE filter
+ -------------------------------------|------------------------------
+ on-client-session-start | on-client-session
+ on-frontend-tcp-request | on-frontend-tcp-request
+ on-http-wait-request | -
+ on-http-body-request | -
+ on-frontend-http-request | on-frontend-http-request
+ on-switching-rules-request | -
+ on-backend-tcp-request | on-backend-tcp-request
+ on-backend-http-request | on-backend-http-request
+ on-process-server-rules-request | -
+ on-http-process-request | -
+ on-tcp-rdp-cookie-request | -
+ on-process-sticking-rules-request | -
+ on-client-session-end | -
+ on-server-unavailable | -
+ -------------------------------------|------------------------------
+ on-server-session-start | on-server-session
+ on-tcp-response | on-tcp-response
+ on-http-wait-response | -
+ on-process-store-rules-response | -
+ on-http-response | on-http-response
+ on-server-session-end | -
+ -------------------------------------|------------------------------
+
+
+extract <name-prefix> [use-vars | use-headers]
+ For a more detailed description of the propagation process of the span
+ context, see the description of the keyword 'inject'. Only the process
+ of extracting data from the carrier is described here.
+
+ Arguments :
+ name-prefix - data name prefix (ie key element prefix)
+ use-vars - data is extracted from HAProxy variables
+ use-headers - data is extracted from the HTTP header
+
+
+ Below is an example of using HAProxy variables to transfer span context data:
+
+ --- test/ctx/ot.cfg --------------------------------------------------------
+ ...
+ ot-scope client_session_start_2
+ extract "ot_ctx_1" use-vars
+ span "Client session" child-of "ot_ctx_1"
+ ...
+ ----------------------------------------------------------------------------
+
+
+finish <name> ...
+ Closing a particular span or span context. Instead of the name of the span,
+ there are several specially predefined names with which we can finish certain
+ groups of spans. So it can be used as the name '*req*' for all open spans
+ related to the request channel, '*res*' for all open spans related to the
+ response channel and '*' for all open spans regardless of which channel they
+ are related to. Several spans and/or span contexts can be specified in one
+ line.
+
+ Arguments :
+ name - the name of the span or context context
+
+
+inject <name-prefix> [use-vars] [use-headers]
+ In OpenTracing, the transfer of data related to the tracing process between
+ microservices that are part of a larger service is done through the
+ propagation of the span context. The basic operations that allow us to
+ access and transfer this data are 'inject' and 'extract'.
+
+ 'inject' allows us to extract span context so that the obtained data can
+ be forwarded to another process (microservice) via the selected carrier.
+ 'inject' in the name actually means inject data into carrier. Carrier is
+ an interface here (ie a data structure) that allows us to transfer tracing
+ state from one process to another.
+
+ Data transfer can take place via one of two selected storage methods, the
+ first is by adding data to the HTTP header and the second is by using HAProxy
+ variables. Only data transfer via HTTP header can be used to transfer data
+ to another process (ie microservice). All data is organized in the form of
+ key-value data pairs.
+
+ No matter which data transfer method you use, we need to specify a prefix
+ for the key element. All alphanumerics (lowercase only) and underline
+ character can be used to construct the data name prefix. Uppercase letters
+ can actually be used, but they will be converted to lowercase when creating
+ the prefix.
+
+ Arguments :
+ name-prefix - data name prefix (ie key element prefix)
+ use-vars - HAProxy variables are used to store and transfer data
+ use-headers - HTTP headers are used to store and transfer data
+
+
+ Below is an example of using HTTP headers and variables, and how this is
+ reflected in the internal data of the HAProxy process.
+
+ --- test/ctx/ot.cfg --------------------------------------------------------
+ ...
+ ot-scope client_session_start_1
+ span "HAProxy session" root
+ inject "ot_ctx_1" use-headers use-vars
+ ...
+ ----------------------------------------------------------------------------
+
+ - generated HAProxy variable (key -> value):
+ txn.ot_ctx_1.uberDtraceDid -> 8f1a05a3518d2283:8f1a05a3518d2283:0:1
+
+ - generated HTTP header (key: value):
+ ot_ctx_1-uber-trace-id: 8f1a05a3518d2283:8f1a05a3518d2283:0:1
+
+ Because HAProxy does not allow the '-' character in the variable name (which
+ is automatically generated by the OpenTracing API and on which we have no
+ influence), it is converted to the letter 'D'. We can see that there is no
+ such conversion in the name of the HTTP header because the '-' sign is allowed
+ there. Due to this conversion, initially all uppercase letters are converted
+ to lowercase because otherwise we would not be able to distinguish whether
+ the disputed sign '-' is used or not.
+
+ Thus created HTTP headers and variables are deleted when executing the
+ 'finish' keyword or when detaching the stream from the filter.
+
+
+log <name> <sample> ...
+ This kewyord allows setting the log for the currently active span. The
+ data type is always a string, ie any sample type is converted to a string.
+ The exception is a binary value that is not supported by the OT filter.
+
+ See the 'tag' keyword description for the data type conversion table.
+
+ Arguments :
+ name - key part of a data pair
+ sample - sample expression (value part of a data pair), at least one
+ sample must be present
+
+
+span <name> [<reference>]
+ Creating a new span (or referencing an already opened one). If a new span
+ is created, it can be a child of the referenced span, follow from the
+ referenced span, or be root 'span'. In case we did not specify a reference
+ to the previously created span, the new span will become the root span.
+ We need to pay attention to the fact that in one trace there can be only
+ one root span. In case we have specified a non-existent span as a reference,
+ a new span will not be created.
+
+ Arguments :
+ name - the name of the span being created or referenced (operation
+ name)
+ reference - span or span context to which the created span is referenced
+
+
+tag <name> <sample> ...
+ This kewyord allows setting a tag for the currently active span. The first
+ argument is the name of the tag (tag ID) and the second its value. A value
+ can consist of one or more data. If the value is only one data, then the
+ type of that data depends on the type of the HAProxy sample. If the value
+ contains more data, then the data type is string. The data conversion table
+ is below:
+
+ HAProxy sample data type | the OpenTracing data type
+ --------------------------+---------------------------
+ NULL | NULL
+ BOOL | BOOL
+ INT32 | INT64
+ UINT32 | UINT64
+ INT64 | INT64
+ UINT64 | UINT64
+ IPV4 | STRING
+ IPV6 | STRING
+ STRING | STRING
+ BINARY | UNSUPPORTED
+ --------------------------+---------------------------
+
+ Arguments :
+ name - key part of a data pair
+ sample - sample expression (value part of a data pair), at least one
+ sample must be present
+
+
+4.4. "ot-group" section
+------------------------
+
+This section allows us to define a group of OT scopes, that is not activated
+via an event but is triggered from TCP or HTTP rules. More precisely, these
+are the following rules: 'tcp-request', 'tcp-response', 'http-request',
+'http-response' and 'http-after-response'. These rules can be defined in the
+HAProxy configuration file.
+
+
+ot-group <name>
+ Creates a new OT group definition named <name>.
+
+ Arguments :
+ name - the name of the OT group
+
+
+ The following keywords are supported in this section:
+ - scopes
+
+
+scopes <name> ...
+ 'ot-scope' sections that are part of the specified group are defined. If
+ the mentioned 'ot-scope' sections are used only in some OT group, they do
+ not have to have defined events. Several 'ot-scope' sections can be
+ specified in one line.
+
+ Arguments :
+ name - the name of the 'ot-scope' section
+
+
+5. Examples
+------------
+
+Several examples of the OT filter configuration can be found in the test
+directory. A brief description of the prepared configurations follows:
+
+cmp - the configuration very similar to that of the spoa-opentracing project.
+ It was made to compare the speed of the OT filter with the
+ implementation of distributed tracing via spoa-opentracing application.
+
+sa - the configuration in which all possible events are used.
+
+ctx - the configuration is very similar to the previous one, with the only
+ difference that the spans are opened using the span context as a span
+ reference.
+
+fe be - a slightly more complicated example of the OT filter configuration
+ that uses two cascaded HAProxy services. The span context between
+ HAProxy processes is transmitted via the HTTP header.
+
+empty - the empty configuration in which the OT filter is initialized but
+ no event is triggered. It is not very usable, except to check the
+ behavior of the OT filter in the case of a similar configuration.
+
+
+In order to be able to collect data (and view results via the web interface)
+we need to install some of the supported tracers. We will use the Jaeger
+tracer as an example. Installation instructions can be found on the website
+https://www.jaegertracing.io/download/. For the impatient, here we will list
+how the image to test the operation of the tracer system can be installed
+without much reading of the documentation.
+
+ # docker pull jaegertracing/all-in-one:latest
+ # docker run -d --name jaeger -e COLLECTOR_ZIPKIN_HTTP_PORT=9411 \
+ -p 5775:5775/udp -p 6831:6831/udp -p 6832:6832/udp -p 5778:5778 \
+ -p 16686:16686 -p 14268:14268 -p 9411:9411 jaegertracing/all-in-one:latest
+
+The last command will also initialize and run the Jaeger container. If we
+want to use that container later, it can be started and stopped in the classic
+way, using the 'docker container start/stop' commands.
+
+
+In order to be able to use any of the configurations from the test directory,
+we must also have a tracer plugin in that directory (all examples use the
+Jaeger tracer plugin). The simplest way is to download the tracer plugin
+using the already prepared shell script get-opentracing-plugins.sh.
+The script accepts one argument, the directory in which the download is made.
+If run without an argument, the script downloads all plugins to the current
+directory.
+
+ % ./get-opentracing-plugins.sh
+
+After that, we can run one of the pre-configured configurations using the
+provided script run-xxx.sh (where xxx is the name of the configuration being
+tested). For example:
+
+ % ./run-sa.sh
+
+The script will create a new log file each time it is run (because part of the
+log file name is the start time of the script).
+
+Eh, someone will surely notice that all test configurations use the Jaeger
+tracing plugin that cannot be downloaded using the get-opentracing-plugins.sh
+script. Unfortunately, the latest precompiled version that can be downloaded
+is 0.4.2, for newer ones only the source code can be found. Version 0.4.2 has
+a bug that can cause the operation of the OT filter to get stuck, so it is
+better not to use this version. Here is the procedure by which we can compile
+a newer version of the plugin (in our example it is 0.5.0).
+
+Important note: the GCC version must be at least 4.9 or later.
+
+ % wget https://github.com/jaegertracing/jaeger-client-cpp/archive/v0.5.0.tar.gz
+ % tar xf v0.5.0.tar.gz
+ % cd jaeger-client-cpp-0.5.0
+ % mkdir build
+ % cd build
+ % cmake -DCMAKE_INSTALL_PREFIX=/opt -DJAEGERTRACING_PLUGIN=ON -DHUNTER_CONFIGURATION_TYPES=Release -DHUNTER_BUILD_SHARED_LIBS=OFF ..
+ % make
+
+After the plugin is compiled, it will be in the current directory. The name
+of the plugin is libjaegertracing_plugin.so.
+
+
+5.1. Benchmarking results
+--------------------------
+
+To check the operation of the OT filter, several different test configurations
+have been made which are located in the test directory. The test results of
+the same configurations (with the names README-speed-xxx, where xxx is the name
+of the configuration being tested) are also in the directory of the same name.
+
+All tests were performed on the same debian 9.13 system, CPU i7-4770, 32 GB RAM.
+For the purpose of testing, the thttpd web server on port 8000 was used.
+Testing was done with the wrk utility running via run-xxx.sh scripts; that is,
+via the test-speed.sh script that is run as follows:
+
+ % ./test-speed.sh all
+
+The above mentioned thttpd web server is run from that script and it should be
+noted that we need to have the same installed on the system (or change the path
+to the thttpd server in that script if it is installed elsewhere).
+
+Each test is performed several times over a period of 5 minutes per individual
+test. The only difference when running the tests for the same configuration
+was in changing the 'rate-limit' parameter (and the 'option disabled' option),
+which is set to the following values: 100.0, 50.0, 10.0, 2.5 and 0.0 percent.
+Then a test is performed with the OT filter active but disabled for request
+processing ('option disabled' is included in the ot.cfg configuration). In
+the last test, the OT filter is not used at all, ie it is not active and does
+not affect the operation of HAProxy in any way.
+
+
+6. OT CLI
+----------
+
+Via the HAProxy CLI interface we can find out the current status of the OT
+filter and change several of its settings.
+
+All supported CLI commands can be found in the following way, using the
+socat utility with the assumption that the HAProxy CLI socket path is set
+to /tmp/haproxy.sock (of course, instead of socat, nc or other utility can
+be used with a change in arguments when running the same):
+
+ % echo "help" | socat - UNIX-CONNECT:/tmp/haproxy.sock | grep flt-ot
+ --- command output ----------
+ flt-ot debug [level] : set the OT filter debug level (default: get current debug level)
+ flt-ot disable : disable the OT filter
+ flt-ot enable : enable the OT filter
+ flt-ot soft-errors : turning off hard-errors mode
+ flt-ot hard-errors : enabling hard-errors mode
+ flt-ot logging [state] : set logging state (default: get current logging state)
+ flt-ot rate [value] : set the rate limit (default: get current rate value)
+ flt-ot status : show the OT filter status
+ --- command output ----------
+
+'flt-ot debug' can only be used in case the OT filter is compiled with the
+debug mode enabled.
+
+
+7. Known bugs and limitations
+------------------------------
+
+The name of the span context definition can contain only letters, numbers and
+characters '_' and '-'. Also, all uppercase letters in the name are converted
+to lowercase. The character '-' is converted internally to the 'D' character,
+and since a HAProxy variable is generated from that name, this should be taken
+into account if we want to use it somewhere in the HAProxy configuration.
+The above mentioned span context is used in the 'inject' and 'extract' keywords.
+
+Let's look a little at the example test/fe-be (configurations are in the
+test/fe and test/be directories, 'fe' is here the abbreviation for frontend
+and 'be' for backend). In case we have the 'rate-limit' set to a value less
+than 100.0, then distributed tracing will not be started with each new HTTP
+request. It also means that the span context will not be delivered (via the
+HTTP header) to the backend HAProxy process. The 'rate-limit' on the backend
+HAProxy must be set to 100.0, but because the frontend HAProxy does not send
+a span context every time, all such cases will cause an error to be reported
+on the backend server. Therefore, the 'hard-errors' option must be set on the
+backend server, so that processing on that stream is stopped as soon as the
+first error occurs. Such cases will slow down the backend server's response
+a bit (in the example in question it is about 3%).
diff --git a/addons/ot/README-func b/addons/ot/README-func
new file mode 100644
index 0000000..273c7f9
--- /dev/null
+++ b/addons/ot/README-func
@@ -0,0 +1,298 @@
+Here I will write down some specifics of certain parts of the source, these are
+just some of my thoughts and clues and they are probably not too important for
+a wider audience.
+
+src/parser.c
+------------------------------------------------------------------------------
+The first thing to run when starting the HAProxy is the flt_ot_parse() function
+which actually parses the filter configuration.
+
+In case of correct configuration, the function returns ERR_NONE (or 0), while
+in case of incorrect configuration it returns the combination of ERR_* flags
+(ERR_NONE here does not belong to that bit combination because its value is 0).
+
+One of the parameters of the function is <char **err> in which an error message
+can be returned, if it exists. In that case the return value of the function
+should have some of the ERR_* flags set.
+
+Let's look at an example of the following filter configuration what the function
+call sequence looks like.
+
+Filter configuration line:
+ filter opentracing [id <id>] config <file>
+
+Function call sequence:
+ flt_ot_parse(<err>) {
+ /* Initialization of the filter configuration data. */
+ flt_ot_conf_init() {
+ }
+
+ /* Setting the filter name. */
+ flt_ot_parse_keyword(<err>) {
+ flt_ot_parse_strdup(<err>) {
+ }
+ }
+
+ /* Setting the filter configuration file name. */
+ flt_ot_parse_keyword(<err>) {
+ flt_ot_parse_strdup(<err>) {
+ }
+ }
+
+ /* Checking the configuration of the filter. */
+ flt_ot_parse_cfg(<err>) {
+ flt_ot_parse_cfg_tracer() {
+ }
+ ...
+ flt_ot_post_parse_cfg_tracer() {
+ }
+ flt_ot_parse_cfg_group() {
+ }
+ ...
+ flt_ot_post_parse_cfg_group() {
+ }
+ flt_ot_parse_cfg_scope() {
+ }
+ ...
+ flt_ot_post_parse_cfg_scope() {
+ }
+ }
+ }
+
+Checking the filter configuration is actually much more complicated, only the
+name of the main function flt_ot_parse_cfg() that does it is listed here.
+
+All functions that use the <err> parameter should set the error status using
+that pointer. All other functions (actually these are all functions called
+by the flt_ot_parse_cfg() function) should set the error message using the
+ha_warning()/ha_alert() HAProxy functions. Of course, the return value (the
+mentioned combination of ERR_* bits) is set in all these functions and it
+indicates whether the filter configuration is correct or not.
+
+
+src/group.c
+------------------------------------------------------------------------------
+The OT filter allows the use of groups within which one or more 'ot-scope'
+declarations can be found. These groups can be used using several HAProxy
+rules, more precisely 'http-request', 'http-response', 'tcp-request',
+'tcp-response' and 'http-after-response' rules.
+
+Configuration example for the specified rules:
+ <rule> ot-group <filter-id> <group-name> [ { if | unless } <condition> ]
+
+Parsing each of these rules is performed by the flt_ot_group_parse() function.
+After parsing the configuration, its verification is performed via the
+flt_ot_group_check() function. One parsing function and one configuration
+check function are called for each defined rule.
+
+ flt_ot_group_parse(<err>) {
+ }
+ ...
+ flt_ot_group_check() {
+ }
+ ...
+
+
+When deinitializing the module, the function flt_ot_group_release() is called
+(which is actually an release_ptr callback function from one of the above
+rules). One callback function is called for each defined rule.
+
+ flt_ot_group_release() {
+ }
+ ...
+
+
+src/filter.c
+------------------------------------------------------------------------------
+After parsing and checking the configuration, the flt_ot_check() function is
+called which associates the 'ot-group' and 'ot-scope' definitions with their
+declarations. This procedure concludes the configuration of the OT filter and
+after that its initialization is possible.
+
+ flt_ops.check = flt_ot_check;
+ flt_ot_check() {
+ }
+
+
+The initialization of the OT filter is done via the flt_ot_init() callback
+function. In this function the OpenTracing API library is also initialized.
+It is also possible to initialize for each thread individually, but nothing
+is being done here for now.
+
+ flt_ops.init = flt_ot_init;
+ flt_ot_init() {
+ flt_ot_cli_init() {
+ }
+ /* Initialization of the OpenTracing API. */
+ ot_init(<err>) {
+ }
+ }
+
+ flt_ops.init_per_thread = flt_ot_init_per_thread;
+ flt_ot_init_per_thread() {
+ }
+ ...
+
+
+After the filter instance is created and attached to the stream, the
+flt_ot_attach() function is called. In this function a new OT runtime
+context is created, and flags are set that define which analyzers are used.
+
+ flt_ops.attach = flt_ot_attach;
+ flt_ot_attach() {
+ /* In case OT is disabled, nothing is done on this stream further. */
+ flt_ot_runtime_context_init(<err>) {
+ flt_ot_pool_alloc() {
+ }
+ /* Initializing and setting the variable 'sess.ot.uuid'. */
+ if (flt_ot_var_register(<err>) != -1) {
+ flt_ot_var_set(<err>) {
+ }
+ }
+ }
+ }
+
+
+When a stream is started, this function is called. At the moment, nothing
+is being done in it.
+
+ flt_ops.stream_start = flt_ot_stream_start;
+ flt_ot_stream_start() {
+ }
+
+
+Channel analyzers are called when executing individual filter events.
+For each of the four analyzer functions, the events associated with them
+are listed.
+
+ Events:
+ - 1 'on-client-session-start'
+ - 15 'on-server-session-start'
+------------------------------------------------------------------------
+ flt_ops.channel_start_analyze = flt_ot_channel_start_analyze;
+ flt_ot_channel_start_analyze() {
+ flt_ot_event_run() {
+ /* Run event. */
+ flt_ot_scope_run() {
+ /* Processing of all ot-scopes defined for the current event. */
+ }
+ }
+ }
+
+
+ Events:
+ - 2 'on-frontend-tcp-request'
+ - 4 'on-http-body-request'
+ - 5 'on-frontend-http-request'
+ - 6 'on-switching-rules-request'
+ - 7 'on-backend-tcp-request'
+ - 8 'on-backend-http-request'
+ - 9 'on-process-server-rules-request'
+ - 10 'on-http-process-request'
+ - 11 'on-tcp-rdp-cookie-request'
+ - 12 'on-process-sticking-rules-request
+ - 16 'on-tcp-response'
+ - 18 'on-process-store-rules-response'
+ - 19 'on-http-response'
+------------------------------------------------------------------------
+ flt_ops.channel_pre_analyze = flt_ot_channel_pre_analyze;
+ flt_ot_channel_pre_analyze() {
+ flt_ot_event_run() {
+ /* Run event. */
+ flt_ot_scope_run() {
+ /* Processing of all ot-scopes defined for the current event. */
+ }
+ }
+ }
+
+
+ Events:
+ - 3 'on-http-wait-request'
+ - 17 'on-http-wait-response'
+------------------------------------------------------------------------
+ flt_ops.channel_post_analyze = flt_ot_channel_post_analyze;
+ flt_ot_channel_post_analyze() {
+ flt_ot_event_run() {
+ /* Run event. */
+ flt_ot_scope_run() {
+ /* Processing of all ot-scopes defined for the current event. */
+ }
+ }
+ }
+
+
+ Events:
+ - 13 'on-client-session-end'
+ - 14 'on-server-unavailable'
+ - 20 'on-server-session-end'
+------------------------------------------------------------------------
+ flt_ops.channel_end_analyze = flt_ot_channel_end_analyze;
+ flt_ot_channel_end_analyze() {
+ flt_ot_event_run() {
+ /* Run event. */
+ flt_ot_scope_run() {
+ /* Processing of all ot-scopes defined for the current event. */
+ }
+ }
+
+ /* In case the backend server does not work, event 'on-server-unavailable'
+ is called here before event 'on-client-session-end'. */
+ if ('on-server-unavailable') {
+ flt_ot_event_run() {
+ /* Run event. */
+ flt_ot_scope_run() {
+ /* Processing of all ot-scopes defined for the current event. */
+ }
+ }
+ }
+ }
+
+
+After the stream has stopped, this function is called. At the moment, nothing
+is being done in it.
+
+ flt_ops.stream_stop = flt_ot_stream_stop;
+ flt_ot_stream_stop() {
+ }
+
+
+Then, before the instance filter is detached from the stream, the following
+function is called. It deallocates the runtime context of the OT filter.
+
+ flt_ops.detach = flt_ot_detach;
+ flt_ot_detach() {
+ flt_ot_runtime_context_free() {
+ flt_ot_pool_free() {
+ }
+ }
+ }
+
+
+Module deinitialization begins with deinitialization of individual threads
+(as many threads as configured for the HAProxy process). Because nothing
+special is connected to the process threads, nothing is done in this function.
+
+ flt_ops.deinit_per_thread = flt_ot_deinit_per_thread;
+ flt_ot_deinit_per_thread() {
+ }
+ ...
+
+
+For this function see the above description related to the src/group.c file.
+
+ flt_ot_group_release() {
+ }
+ ...
+
+
+Module deinitialization ends with the flt_ot_deinit() function, in which all
+memory occupied by module operation (and OpenTracing API operation, of course)
+is freed.
+
+ flt_ops.deinit = flt_ot_deinit;
+ flt_ot_deinit() {
+ ot_close() {
+ }
+ flt_ot_conf_free() {
+ }
+ }
diff --git a/addons/ot/README-pool b/addons/ot/README-pool
new file mode 100644
index 0000000..8164b04
--- /dev/null
+++ b/addons/ot/README-pool
@@ -0,0 +1,25 @@
+Used pools:
+
+-------------------------------+-----------------------------+-----------------------------
+ head / name | size | define
+-------------------------------+-----------------------------+-----------------------------
+ pool_head_ buffer | global.tune.bufsize = 16384 | USE_POOL_BUFFER
+ pool_head_ trash | 32 + 16384 | USE_TRASH_CHUNK
+-------------------------------+-----------------------------+-----------------------------
+ pool_head_ ot_scope_span | 96 | USE_POOL_OT_SCOPE_SPAN
+ pool_head_ ot_scope_context | 64 | USE_POOL_OT_SCOPE_CONTEXT
+ pool_head_ ot_runtime_context | 128 | USE_POOL_OT_RUNTIME_CONTEXT
+ pool_head_ ot_span_context | 96 | USE_POOL_OT_SPAN_CONTEXT
+-------------------------------+-----------------------------+-----------------------------
+
+By defining individual definitions in file include/config.h, it is possible to
+switch individual pools on / off. If a particular pool is not used, memory is
+used in a 'normal' way instead, using malloc()/free() functions.
+
+This is made only from the aspect of debugging the program, i.e. comparing the
+speed of operation using different methods of working with memory.
+
+In general, it would be better to use memory pools, due to less fragmentation
+of memory space after long operation of the program. The speed of operation
+is similar to when using standard allocation functions (when testing it was
+shown that pool use was fast by about 1%).
diff --git a/addons/ot/include/cli.h b/addons/ot/include/cli.h
new file mode 100644
index 0000000..80ed6e8
--- /dev/null
+++ b/addons/ot/include/cli.h
@@ -0,0 +1,50 @@
+/***
+ * Copyright 2020 HAProxy Technologies
+ *
+ * This file is part of the HAProxy OpenTracing filter.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#ifndef _OPENTRACING_CLI_H_
+#define _OPENTRACING_CLI_H_
+
+#define FLT_OT_CLI_CMD "flt-ot"
+
+#define FLT_OT_CLI_LOGGING_OFF "off"
+#define FLT_OT_CLI_LOGGING_ON "on"
+#define FLT_OT_CLI_LOGGING_NOLOGNORM "dontlog-normal"
+#define FLT_OT_CLI_LOGGING_STATE(a) ((a) & FLT_OT_LOGGING_ON) ? (((a) & FLT_OT_LOGGING_NOLOGNORM) ? "enabled, " FLT_OT_CLI_LOGGING_NOLOGNORM : "enabled") : "disabled"
+
+#define FLT_OT_CLI_MSG_CAT(a) ((a) == NULL) ? "" : (a), ((a) == NULL) ? "" : "\n"
+
+enum FLT_OT_LOGGING_enum {
+ FLT_OT_LOGGING_OFF = 0,
+ FLT_OT_LOGGING_ON = 1 << 0,
+ FLT_OT_LOGGING_NOLOGNORM = 1 << 1,
+};
+
+
+void flt_ot_cli_init(void);
+
+#endif /* _OPENTRACING_CLI_H_ */
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ *
+ * vi: noexpandtab shiftwidth=8 tabstop=8
+ */
diff --git a/addons/ot/include/conf.h b/addons/ot/include/conf.h
new file mode 100644
index 0000000..c9c4863
--- /dev/null
+++ b/addons/ot/include/conf.h
@@ -0,0 +1,228 @@
+/***
+ * Copyright 2020 HAProxy Technologies
+ *
+ * This file is part of the HAProxy OpenTracing filter.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#ifndef _OPENTRACING_CONF_H_
+#define _OPENTRACING_CONF_H_
+
+#define FLT_OT_CONF(f) ((struct flt_ot_conf *)FLT_CONF(f))
+#define FLT_OT_CONF_HDR_FMT "%p:{ { '%.*s' %zu %d } "
+#define FLT_OT_CONF_HDR_ARGS(a,b) (a), (int)(a)->b##_len, (a)->b, (a)->b##_len, (a)->cfg_line
+#define FLT_OT_STR_HDR_ARGS(a,b) (a)->b, (a)->b##_len
+
+#define FLT_OT_DBG_CONF_SAMPLE_EXPR(f,a) \
+ FLT_OT_DBG(3, "%s%p:{ '%s' %p }", (f), (a), (a)->value, (a)->expr)
+
+#define FLT_OT_DBG_CONF_SAMPLE(f,a) \
+ FLT_OT_DBG(3, "%s%p:{ '%s' '%s' %s %d }", \
+ (f), (a), (a)->key, (a)->value, flt_ot_list_debug(&((a)->exprs)), (a)->num_exprs)
+
+#define FLT_OT_DBG_CONF_STR(f,a) \
+ FLT_OT_DBG(3, f FLT_OT_CONF_HDR_FMT "}", FLT_OT_CONF_HDR_ARGS(a, str))
+
+#define FLT_OT_DBG_CONF_CONTEXT(f,a) \
+ FLT_OT_DBG(3, f FLT_OT_CONF_HDR_FMT "0x%02hhx }", FLT_OT_CONF_HDR_ARGS(a, id), (a)->flags)
+
+#define FLT_OT_DBG_CONF_SPAN(f,a) \
+ FLT_OT_DBG(3, f FLT_OT_CONF_HDR_FMT "'%s' %zu %d '%s' %zu %hhu 0x%02hhx %s %s %s }", \
+ FLT_OT_CONF_HDR_ARGS(a, id), FLT_OT_STR_HDR_ARGS(a, ref_id), (a)->ref_type, \
+ FLT_OT_STR_HDR_ARGS(a, ctx_id), (a)->flag_root, (a)->ctx_flags, flt_ot_list_debug(&((a)->tags)), \
+ flt_ot_list_debug(&((a)->logs)), flt_ot_list_debug(&((a)->baggages)))
+
+#define FLT_OT_DBG_CONF_SCOPE(f,a) \
+ FLT_OT_DBG(3, f FLT_OT_CONF_HDR_FMT "%hhu %d %s %p %s %s %s }", \
+ FLT_OT_CONF_HDR_ARGS(a, id), (a)->flag_used, (a)->event, flt_ot_list_debug(&((a)->acls)), \
+ (a)->cond, flt_ot_list_debug(&((a)->contexts)), flt_ot_list_debug(&((a)->spans)), \
+ flt_ot_list_debug(&((a)->finish)))
+
+#define FLT_OT_DBG_CONF_GROUP(f,a) \
+ FLT_OT_DBG(3, f FLT_OT_CONF_HDR_FMT "%hhu %s }", \
+ FLT_OT_CONF_HDR_ARGS(a, id), (a)->flag_used, flt_ot_list_debug(&((a)->ph_scopes)))
+
+#define FLT_OT_DBG_CONF_PH(f,a) \
+ FLT_OT_DBG(3, f FLT_OT_CONF_HDR_FMT "%p }", FLT_OT_CONF_HDR_ARGS(a, id), (a)->ptr)
+
+#define FLT_OT_DBG_CONF_TRACER(f,a) \
+ FLT_OT_DBG(3, f FLT_OT_CONF_HDR_FMT "'%s' %p '%s' %p %u %hhu %hhu 0x%02hhx %p:%s 0x%08x %s %s %s }", \
+ FLT_OT_CONF_HDR_ARGS(a, id), (a)->config, (a)->cfgbuf, (a)->plugin, (a)->tracer, (a)->rate_limit, (a)->flag_harderr, \
+ (a)->flag_disabled, (a)->logging, &((a)->proxy_log), flt_ot_list_debug(&((a)->proxy_log.loggers)), (a)->analyzers, \
+ flt_ot_list_debug(&((a)->acls)), flt_ot_list_debug(&((a)->ph_groups)), flt_ot_list_debug(&((a)->ph_scopes)))
+
+#define FLT_OT_DBG_CONF(f,a) \
+ FLT_OT_DBG(3, "%s%p:{ %p '%s' '%s' %p %s %s }", \
+ (f), (a), (a)->proxy, (a)->id, (a)->cfg_file, (a)->tracer, \
+ flt_ot_list_debug(&((a)->groups)), flt_ot_list_debug(&((a)->scopes)))
+
+#define FLT_OT_STR_HDR(a) \
+ struct { \
+ char *a; \
+ size_t a##_len; \
+ }
+
+#define FLT_OT_CONF_HDR(a) \
+ struct { \
+ FLT_OT_STR_HDR(a); \
+ int cfg_line; \
+ struct list list; \
+ }
+
+
+struct flt_ot_conf_hdr {
+ FLT_OT_CONF_HDR(id);
+};
+
+/* flt_ot_conf_sample->exprs */
+struct flt_ot_conf_sample_expr {
+ FLT_OT_CONF_HDR(value); /* The sample value. */
+ struct sample_expr *expr; /* The sample expression. */
+};
+
+/*
+ * flt_ot_conf_span->tags
+ * flt_ot_conf_span->logs
+ * flt_ot_conf_span->baggages
+ */
+struct flt_ot_conf_sample {
+ FLT_OT_CONF_HDR(key); /* The sample name. */
+ char *value; /* The sample content. */
+ struct list exprs; /* Used to chain sample expressions. */
+ int num_exprs; /* Number of defined expressions. */
+};
+
+/* flt_ot_conf_scope->finish */
+struct flt_ot_conf_str {
+ FLT_OT_CONF_HDR(str); /* String content/length. */
+};
+
+/* flt_ot_conf_scope->contexts */
+struct flt_ot_conf_context {
+ FLT_OT_CONF_HDR(id); /* The name of the context. */
+ uint8_t flags; /* The type of storage from which the span context is extracted. */
+};
+
+/* flt_ot_conf_scope->spans */
+struct flt_ot_conf_span {
+ FLT_OT_CONF_HDR(id); /* The name of the span. */
+ FLT_OT_STR_HDR(ref_id); /* The reference name, if used. */
+ int ref_type; /* The reference type. */
+ FLT_OT_STR_HDR(ctx_id); /* The span context name, if used. */
+ uint8_t ctx_flags; /* The type of storage used for the span context. */
+ bool flag_root; /* Whether this is a root span. */
+ struct list tags; /* The set of key:value tags. */
+ struct list logs; /* The set of key:value logs. */
+ struct list baggages; /* The set of key:value baggage items. */
+};
+
+struct flt_ot_conf_scope {
+ FLT_OT_CONF_HDR(id); /* The scope name. */
+ bool flag_used; /* The indication that the scope is being used. */
+ int event; /* FLT_OT_EVENT_* */
+ struct list acls; /* ACLs declared on this scope. */
+ struct acl_cond *cond; /* ACL condition to meet. */
+ struct list contexts; /* Declared contexts. */
+ struct list spans; /* Declared spans. */
+ struct list finish; /* The list of spans to be finished. */
+};
+
+struct flt_ot_conf_group {
+ FLT_OT_CONF_HDR(id); /* The group name. */
+ bool flag_used; /* The indication that the group is being used. */
+ struct list ph_scopes; /* List of all used scopes. */
+};
+
+struct flt_ot_conf_ph {
+ FLT_OT_CONF_HDR(id); /* The scope/group name. */
+ void *ptr; /* Pointer to real placeholder structure. */
+};
+#define flt_ot_conf_ph_group flt_ot_conf_ph
+#define flt_ot_conf_ph_scope flt_ot_conf_ph
+
+struct flt_ot_conf_tracer {
+ FLT_OT_CONF_HDR(id); /* The tracer name. */
+ char *config; /* The OpenTracing configuration file name. */
+ char *cfgbuf; /* The OpenTracing configuration. */
+ char *plugin; /* The OpenTracing plugin library file name. */
+ struct otc_tracer *tracer; /* The OpenTracing tracer handle. */
+ uint32_t rate_limit; /* [0 2^32-1] <-> [0.0 100.0] */
+ bool flag_harderr; /* [0 1] */
+ bool flag_disabled; /* [0 1] */
+ uint8_t logging; /* [0 1 3] */
+ struct proxy proxy_log; /* The log server list. */
+ uint analyzers; /* Defined channel analyzers. */
+ struct list acls; /* ACLs declared on this tracer. */
+ struct list ph_groups; /* List of all used groups. */
+ struct list ph_scopes; /* List of all used scopes. */
+};
+
+struct flt_ot_counters {
+#ifdef DEBUG_OT
+ struct {
+ bool flag_used; /* Whether this event is used. */
+ uint64_t htx[2]; /* htx_is_empty() function result counter. */
+ } event[FLT_OT_EVENT_MAX];
+#endif
+
+ uint64_t disabled[2]; /* How many times stream processing is disabled. */
+};
+
+/* The OpenTracing filter configuration. */
+struct flt_ot_conf {
+ struct proxy *proxy; /* Proxy owning the filter. */
+ char *id; /* The OpenTracing filter id. */
+ char *cfg_file; /* The OpenTracing filter configuration file name. */
+ struct flt_ot_conf_tracer *tracer; /* There can only be one tracer. */
+ struct list groups; /* List of all available groups. */
+ struct list scopes; /* List of all available scopes. */
+ struct flt_ot_counters cnt; /* Various counters related to filter operation. */
+};
+
+
+#define flt_ot_conf_ph_group_free flt_ot_conf_ph_free
+#define flt_ot_conf_ph_scope_free flt_ot_conf_ph_free
+
+struct flt_ot_conf_ph *flt_ot_conf_ph_init(const char *id, int linenum, struct list *head, char **err);
+void flt_ot_conf_ph_free(struct flt_ot_conf_ph **ptr);
+struct flt_ot_conf_sample_expr *flt_ot_conf_sample_expr_init(const char *id, int linenum, struct list *head, char **err);
+void flt_ot_conf_sample_expr_free(struct flt_ot_conf_sample_expr **ptr);
+struct flt_ot_conf_sample *flt_ot_conf_sample_init(char **args, int linenum, struct list *head, char **err);
+void flt_ot_conf_sample_free(struct flt_ot_conf_sample **ptr);
+struct flt_ot_conf_str *flt_ot_conf_str_init(const char *id, int linenum, struct list *head, char **err);
+void flt_ot_conf_str_free(struct flt_ot_conf_str **ptr);
+struct flt_ot_conf_context *flt_ot_conf_context_init(const char *id, int linenum, struct list *head, char **err);
+void flt_ot_conf_context_free(struct flt_ot_conf_context **ptr);
+struct flt_ot_conf_span *flt_ot_conf_span_init(const char *id, int linenum, struct list *head, char **err);
+void flt_ot_conf_span_free(struct flt_ot_conf_span **ptr);
+struct flt_ot_conf_scope *flt_ot_conf_scope_init(const char *id, int linenum, struct list *head, char **err);
+void flt_ot_conf_scope_free(struct flt_ot_conf_scope **ptr);
+struct flt_ot_conf_group *flt_ot_conf_group_init(const char *id, int linenum, struct list *head, char **err);
+void flt_ot_conf_group_free(struct flt_ot_conf_group **ptr);
+struct flt_ot_conf_tracer *flt_ot_conf_tracer_init(const char *id, int linenum, char **err);
+void flt_ot_conf_tracer_free(struct flt_ot_conf_tracer **ptr);
+struct flt_ot_conf *flt_ot_conf_init(struct proxy *px);
+void flt_ot_conf_free(struct flt_ot_conf **ptr);
+
+#endif /* _OPENTRACING_CONF_H_ */
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ *
+ * vi: noexpandtab shiftwidth=8 tabstop=8
+ */
diff --git a/addons/ot/include/config.h b/addons/ot/include/config.h
new file mode 100644
index 0000000..3b26365
--- /dev/null
+++ b/addons/ot/include/config.h
@@ -0,0 +1,46 @@
+/***
+ * Copyright 2020 HAProxy Technologies
+ *
+ * This file is part of the HAProxy OpenTracing filter.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#ifndef _OPENTRACING_CONFIG_H_
+#define _OPENTRACING_CONFIG_H_
+
+#undef DEBUG_OT_SYSTIME
+#define USE_POOL_BUFFER
+#define USE_POOL_OT_SPAN_CONTEXT
+#define USE_POOL_OT_SCOPE_SPAN
+#define USE_POOL_OT_SCOPE_CONTEXT
+#define USE_POOL_OT_RUNTIME_CONTEXT
+#define USE_TRASH_CHUNK
+
+#define FLT_OT_ID_MAXLEN 64
+#define FLT_OT_MAXTAGS 8
+#define FLT_OT_MAXBAGGAGES 8
+#define FLT_OT_RATE_LIMIT_MAX 100.0
+#define FLT_OT_DEBUG_LEVEL 0b00001111
+
+#endif /* _OPENTRACING_CONFIG_H_ */
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ *
+ * vi: noexpandtab shiftwidth=8 tabstop=8
+ */
diff --git a/addons/ot/include/debug.h b/addons/ot/include/debug.h
new file mode 100644
index 0000000..c749960
--- /dev/null
+++ b/addons/ot/include/debug.h
@@ -0,0 +1,104 @@
+/***
+ * Copyright 2020 HAProxy Technologies
+ *
+ * This file is part of the HAProxy OpenTracing filter.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#ifndef _OPENTRACING_DEBUG_H_
+#define _OPENTRACING_DEBUG_H_
+
+#ifdef DEBUG_FULL
+# define DEBUG_OT
+#endif
+
+#ifdef DEBUG_OT
+# ifdef DEBUG_OT_SYSTIME
+# define FLT_OT_DBG_FMT(f) "[% 2d] %ld.%06ld [" FLT_OT_SCOPE "]: " f, tid, date.tv_sec, date.tv_usec
+# else
+# define FLT_OT_DBG_FMT(f) "[% 2d] %11.6f [" FLT_OT_SCOPE "]: " f, tid, FLT_OT_TV_UDIFF(&(flt_ot_debug.start), &date) / 1e6
+# endif
+# define FLT_OT_DBG_INDENT " "
+# define FLT_OT_DBG(l,f, ...) \
+ do { \
+ if (!(l) || (flt_ot_debug.level & (1 << (l)))) \
+ (void)fprintf(stderr, FLT_OT_DBG_FMT("%.*s" f "\n"), \
+ dbg_indent_level, FLT_OT_DBG_INDENT, ##__VA_ARGS__); \
+ } while (0)
+# define FLT_OT_FUNC(f, ...) do { FLT_OT_DBG(1, "%s(" f ") {", __func__, ##__VA_ARGS__); dbg_indent_level += 3; } while (0)
+# define FLT_OT_RETURN(a) do { dbg_indent_level -= 3; FLT_OT_DBG(1, "}"); return a; } while (0)
+# define FLT_OT_RETURN_EX(a,t,f) do { dbg_indent_level -= 3; { t _r = (a); FLT_OT_DBG(1, "} = " f, _r); return _r; } } while (0)
+# define FLT_OT_RETURN_INT(a) FLT_OT_RETURN_EX((a), int, "%d")
+# define FLT_OT_RETURN_PTR(a) FLT_OT_RETURN_EX((a), void *, "%p")
+# define FLT_OT_DBG_IFDEF(a,b) a
+# define FLT_OT_DBG_ARGS(a, ...) a, ##__VA_ARGS__
+# define FLT_OT_DBG_BUF(a,b) do { FLT_OT_DBG((a), "%p:{ %zu %p %zu %zu }", (b), (b)->size, (b)->area, (b)->data, (b)->head); } while (0)
+
+struct flt_ot_debug {
+#ifndef DEBUG_OT_SYSTIME
+ struct timeval start;
+#endif
+ uint8_t level;
+};
+
+
+extern THREAD_LOCAL int dbg_indent_level;
+extern struct flt_ot_debug flt_ot_debug;
+
+#else
+
+# define FLT_OT_DBG(...) while (0)
+# define FLT_OT_FUNC(...) while (0)
+# define FLT_OT_RETURN(a) return a
+# define FLT_OT_RETURN_EX(a,t,f) return a
+# define FLT_OT_RETURN_INT(a) return a
+# define FLT_OT_RETURN_PTR(a) return a
+# define FLT_OT_DBG_IFDEF(a,b) b
+# define FLT_OT_DBG_ARGS(...)
+# define FLT_OT_DBG_BUF(a,b) while (0)
+#endif /* DEBUG_OT */
+
+/*
+ * ON | NOLOGNORM |
+ * -----+-----------+-------------
+ * 0 | 0 | no log
+ * 0 | 1 | no log
+ * 1 | 0 | log all
+ * 1 | 1 | log errors
+ * -----+-----------+-------------
+ */
+#define FLT_OT_LOG(l,f, ...) \
+ do { \
+ if (!(conf->tracer->logging & FLT_OT_LOGGING_ON)) \
+ FLT_OT_DBG(3, "NOLOG[%d]: [" FLT_OT_SCOPE "]: [%s] " f, (l), conf->id, ##__VA_ARGS__); \
+ else if ((conf->tracer->logging & FLT_OT_LOGGING_NOLOGNORM) && ((l) > LOG_ERR)) \
+ FLT_OT_DBG(2, "NOLOG[%d]: [" FLT_OT_SCOPE "]: [%s] " f, (l), conf->id, ##__VA_ARGS__); \
+ else { \
+ send_log(&(conf->tracer->proxy_log), (l), "[" FLT_OT_SCOPE "]: [%s] " f "\n", conf->id, ##__VA_ARGS__); \
+ \
+ FLT_OT_DBG(1, "LOG[%d]: %s", (l), logline); \
+ } \
+ } while (0)
+
+#endif /* _OPENTRACING_DEBUG_H_ */
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ *
+ * vi: noexpandtab shiftwidth=8 tabstop=8
+ */
diff --git a/addons/ot/include/define.h b/addons/ot/include/define.h
new file mode 100644
index 0000000..3c3e4a3
--- /dev/null
+++ b/addons/ot/include/define.h
@@ -0,0 +1,107 @@
+/***
+ * Copyright 2020 HAProxy Technologies
+ *
+ * This file is part of the HAProxy OpenTracing filter.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#ifndef _OPENTRACING_DEFINE_H_
+#define _OPENTRACING_DEFINE_H_
+
+#define FLT_OT_DEREF(a,m,v) (((a) != NULL) ? (a)->m : (v))
+#define FLT_OT_DDEREF(a,m,v) ((((a) != NULL) && (*(a) != NULL)) ? (*(a))->m : (v))
+#define FLT_OT_TABLESIZE(a) (sizeof(a) / sizeof((a)[0]))
+#define FLT_OT_IN_RANGE(v,a,b) (((v) >= (a)) && ((v) <= (b)))
+#define FLT_OT_DPTR_ARGS(a) (a), ((a) == NULL) ? NULL : *(a)
+#define FLT_OT_ARG_ISVALID(n) ((args[n] != NULL) && *args[n])
+#define FLT_OT_TV_UDIFF(a,b) (((b)->tv_sec - (a)->tv_sec) * 1000000 + (b)->tv_usec - (a)->tv_usec)
+#define FLT_OT_U32_FLOAT(a,b) ((a) * (double)(b) / UINT32_MAX)
+#define FLT_OT_FLOAT_U32(a,b) ((uint32_t)((a) / (double)(b) * UINT32_MAX + 0.5))
+
+#define FLT_OT_STR_DASH_72 "------------------------------------------------------------------------"
+#define FLT_OT_STR_DASH_78 FLT_OT_STR_DASH_72 "------"
+#define FLT_OT_STR_FLAG_YN(a) (a) ? "yes" : "no"
+
+#define FLT_OT_STR_SIZE(a) (sizeof(a) - 1)
+#define FLT_OT_STR_ADDRSIZE(a) (a), FLT_OT_STR_SIZE(a)
+#define FLT_OT_STR_ISVALID(a) (((a) != NULL) && (*(a) != '\0'))
+#define FLT_OT_STR_CMP(S,s,l) (((l) == FLT_OT_STR_SIZE(S)) && (memcmp((s), FLT_OT_STR_ADDRSIZE(S)) == 0))
+#define FLT_OT_STR_ELLIPSIS(a,n) do { if ((a) != NULL) { if ((n) > 0) (a)[(n) - 1] = '\0'; if ((n) > 3) (a)[(n) - 2] = (a)[(n) - 3] = (a)[(n) - 4] = '.'; } } while (0)
+#define FLT_OT_NIBBLE_TO_HEX(a) ((a) + (((a) < 10) ? '0' : ('a' - 10)))
+
+#define FLT_OT_FREE(a) do { if ((a) != NULL) OTC_DBG_FREE(a); } while (0)
+#define FLT_OT_FREE_VOID(a) do { if ((a) != NULL) OTC_DBG_FREE((void *)(a)); } while (0)
+#define FLT_OT_FREE_CLEAR(a) do { if ((a) != NULL) { OTC_DBG_FREE(a); (a) = NULL; } } while (0)
+#define FLT_OT_STRDUP(s) OTC_DBG_STRDUP(s)
+#define FLT_OT_STRNDUP(s,n) OTC_DBG_STRNDUP((s), (n))
+#define FLT_OT_CALLOC(n,e) OTC_DBG_CALLOC((n), (e))
+#define FLT_OT_MALLOC(s) OTC_DBG_MALLOC((s))
+#define FLT_OT_MEMINFO() OTC_DBG_MEMINFO()
+
+#define FLT_OT_RUN_ONCE(f) do { static bool __f = 1; if (__f) { __f = 0; f; } } while (0)
+
+#define FLT_OT_LIST_ISVALID(a) (((a) != NULL) && ((a)->n != NULL) && ((a)->p != NULL))
+#define FLT_OT_LIST_DEL(a) do { if (FLT_OT_LIST_ISVALID(a)) LIST_DELETE(a); } while (0)
+#define FLT_OT_LIST_DESTROY(t,h) \
+ do { \
+ struct flt_ot_conf_##t *_ptr, *_back; \
+ \
+ if (!FLT_OT_LIST_ISVALID(h) || LIST_ISEMPTY(h)) \
+ break; \
+ \
+ FLT_OT_DBG(2, "- deleting " #t " list %s", flt_ot_list_debug(h)); \
+ \
+ list_for_each_entry_safe(_ptr, _back, (h), list) \
+ flt_ot_conf_##t##_free(&_ptr); \
+ } while (0)
+
+#define FLT_OT_BUFFER_THR(b,m,n,p) \
+ static THREAD_LOCAL char b[m][n]; \
+ static THREAD_LOCAL size_t __idx = 0; \
+ char *p = b[__idx]; \
+ __idx = (__idx + 1) % (m)
+
+#define FLT_OT_ERR(f, ...) \
+ do { \
+ if ((err != NULL) && (*err == NULL)) { \
+ (void)memprintf(err, f, ##__VA_ARGS__); \
+ \
+ FLT_OT_DBG(3, "%d err: '%s'", __LINE__, *err); \
+ } \
+ } while (0)
+#define FLT_OT_ERR_APPEND(f, ...) \
+ do { \
+ if (err != NULL) \
+ (void)memprintf(err, f, ##__VA_ARGS__); \
+ } while (0)
+#define FLT_OT_ERR_FREE(p) \
+ do { \
+ if ((p) == NULL) \
+ break; \
+ \
+ FLT_OT_DBG(0, "%s:%d: ERROR: %s", __func__, __LINE__, (p)); \
+ FLT_OT_FREE_CLEAR(p); \
+ } while (0)
+
+#endif /* _OPENTRACING_DEFINE_H_ */
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ *
+ * vi: noexpandtab shiftwidth=8 tabstop=8
+ */
diff --git a/addons/ot/include/event.h b/addons/ot/include/event.h
new file mode 100644
index 0000000..8d59163
--- /dev/null
+++ b/addons/ot/include/event.h
@@ -0,0 +1,120 @@
+/***
+ * Copyright 2020 HAProxy Technologies
+ *
+ * This file is part of the HAProxy OpenTracing filter.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#ifndef _OPENTRACING_EVENT_H_
+#define _OPENTRACING_EVENT_H_
+
+/*
+ * This must be defined in order for macro FLT_OT_EVENT_DEFINES
+ * and structure flt_ot_event_data to have the correct contents.
+ */
+#define AN_REQ_NONE 0
+#define AN_REQ_CLIENT_SESS_START 0
+#define AN_REQ_SERVER_UNAVAILABLE 0
+#define AN_REQ_CLIENT_SESS_END 0
+#define AN_RES_SERVER_SESS_START 0
+#define AN_RES_SERVER_SESS_END 0
+#define SMP_VAL_FE_ 0
+#define SMP_VAL_BE_ 0
+
+/*
+ * Event names are selected to be somewhat compatible with the SPOE filter,
+ * from which the following names are taken:
+ * - on-client-session -> on-client-session-start
+ * - on-frontend-tcp-request
+ * - on-frontend-http-request
+ * - on-backend-tcp-request
+ * - on-backend-http-request
+ * - on-server-session -> on-server-session-start
+ * - on-tcp-response
+ * - on-http-response
+ *
+ * FLT_OT_EVENT_NONE is used as an index for 'ot-scope' sections that do not
+ * have an event defined. The 'ot-scope' sections thus defined can be used
+ * within the 'ot-group' section.
+ *
+ * A description of the macro arguments can be found in the structure
+ * flt_ot_event_data definition
+ */
+#define FLT_OT_EVENT_DEFINES \
+ FLT_OT_EVENT_DEF( NONE, REQ, , , 0, "") \
+ FLT_OT_EVENT_DEF( CLIENT_SESS_START, REQ, CON_ACC, , 1, "on-client-session-start") \
+ FLT_OT_EVENT_DEF( INSPECT_FE, REQ, REQ_CNT, , 1, "on-frontend-tcp-request") \
+ FLT_OT_EVENT_DEF( WAIT_HTTP, REQ, , , 1, "on-http-wait-request") \
+ FLT_OT_EVENT_DEF( HTTP_BODY, REQ, , , 1, "on-http-body-request") \
+ FLT_OT_EVENT_DEF( HTTP_PROCESS_FE, REQ, HRQ_HDR, , 1, "on-frontend-http-request") \
+ FLT_OT_EVENT_DEF( SWITCHING_RULES, REQ, , , 1, "on-switching-rules-request") \
+ FLT_OT_EVENT_DEF( INSPECT_BE, REQ, REQ_CNT, REQ_CNT, 1, "on-backend-tcp-request") \
+ FLT_OT_EVENT_DEF( HTTP_PROCESS_BE, REQ, HRQ_HDR, HRQ_HDR, 1, "on-backend-http-request") \
+/* FLT_OT_EVENT_DEF( HTTP_TARPIT, REQ, , , 1, "on-http-tarpit-request") */ \
+ FLT_OT_EVENT_DEF( SRV_RULES, REQ, , , 1, "on-process-server-rules-request") \
+ FLT_OT_EVENT_DEF( HTTP_INNER, REQ, , , 1, "on-http-process-request") \
+ FLT_OT_EVENT_DEF( PRST_RDP_COOKIE, REQ, , , 1, "on-tcp-rdp-cookie-request") \
+ FLT_OT_EVENT_DEF( STICKING_RULES, REQ, , , 1, "on-process-sticking-rules-request") \
+ FLT_OT_EVENT_DEF( CLIENT_SESS_END, REQ, , , 0, "on-client-session-end") \
+ FLT_OT_EVENT_DEF(SERVER_UNAVAILABLE, REQ, , , 0, "on-server-unavailable") \
+ \
+ FLT_OT_EVENT_DEF( SERVER_SESS_START, RES, , SRV_CON, 0, "on-server-session-start") \
+ FLT_OT_EVENT_DEF( INSPECT, RES, RES_CNT, RES_CNT, 0, "on-tcp-response") \
+ FLT_OT_EVENT_DEF( WAIT_HTTP, RES, , , 1, "on-http-wait-response") \
+ FLT_OT_EVENT_DEF( STORE_RULES, RES, , , 1, "on-process-store-rules-response") \
+ FLT_OT_EVENT_DEF( HTTP_PROCESS_BE, RES, HRS_HDR, HRS_HDR, 1, "on-http-response") \
+ FLT_OT_EVENT_DEF( SERVER_SESS_END, RES, , , 0, "on-server-session-end")
+
+enum FLT_OT_EVENT_enum {
+#define FLT_OT_EVENT_DEF(a,b,c,d,e,f) FLT_OT_EVENT_##b##_##a,
+ FLT_OT_EVENT_DEFINES
+ FLT_OT_EVENT_MAX
+#undef FLT_OT_EVENT_DEF
+};
+
+enum FLT_OT_EVENT_SAMPLE_enum {
+ FLT_OT_EVENT_SAMPLE_TAG = 0,
+ FLT_OT_EVENT_SAMPLE_LOG,
+ FLT_OT_EVENT_SAMPLE_BAGGAGE,
+};
+
+struct flt_ot_event_data {
+ uint an_bit; /* Used channel analyser. */
+ uint smp_opt_dir; /* Fetch direction (request/response). */
+ uint smp_val_fe; /* Valid FE fetch location. */
+ uint smp_val_be; /* Valid BE fetch location. */
+ bool flag_http_inject; /* Span context injection allowed. */
+ const char *name; /* Filter event name. */
+};
+
+struct flt_ot_conf_scope;
+
+
+extern const struct flt_ot_event_data flt_ot_event_data[FLT_OT_EVENT_MAX];
+
+
+int flt_ot_scope_run(struct stream *s, struct filter *f, struct channel *chn, struct flt_ot_conf_scope *conf_scope, const struct timespec *ts, uint dir, char **err);
+int flt_ot_event_run(struct stream *s, struct filter *f, struct channel *chn, int event, char **err);
+
+#endif /* _OPENTRACING_EVENT_H_ */
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ *
+ * vi: noexpandtab shiftwidth=8 tabstop=8
+ */
diff --git a/addons/ot/include/filter.h b/addons/ot/include/filter.h
new file mode 100644
index 0000000..c97a0cc
--- /dev/null
+++ b/addons/ot/include/filter.h
@@ -0,0 +1,68 @@
+/***
+ * Copyright 2020 HAProxy Technologies
+ *
+ * This file is part of the HAProxy OpenTracing filter.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#ifndef _OPENTRACING_FILTER_H_
+#define _OPENTRACING_FILTER_H_
+
+#define FLT_OT_FMT_NAME "'" FLT_OT_OPT_NAME "' : "
+#define FLT_OT_FMT_TYPE "'filter' : "
+#define FLT_OT_VAR_UUID "sess", "ot", "uuid"
+#define FLT_OT_ALERT(f, ...) ha_alert(FLT_OT_FMT_TYPE FLT_OT_FMT_NAME f "\n", ##__VA_ARGS__)
+
+#define FLT_OT_CONDITION_IF "if"
+#define FLT_OT_CONDITION_UNLESS "unless"
+
+enum FLT_OT_RET_enum {
+ FLT_OT_RET_ERROR = -1,
+ FLT_OT_RET_WAIT = 0,
+ FLT_OT_RET_IGNORE = 0,
+ FLT_OT_RET_OK = 1,
+};
+
+#define FLT_OT_DBG_LIST(d,m,p,t,v,f) \
+ do { \
+ if (LIST_ISEMPTY(&((d)->m##s))) { \
+ FLT_OT_DBG(3, p "- no " #m "s " t); \
+ } else { \
+ const struct flt_ot_conf_##m *v; \
+ \
+ FLT_OT_DBG(3, p "- " t " " #m "s: %s", \
+ flt_ot_list_debug(&((d)->m##s))); \
+ list_for_each_entry(v, &((d)->m##s), list) \
+ do { f; } while (0); \
+ } \
+ } while (0)
+
+
+extern const char *ot_flt_id;
+extern struct flt_ops flt_ot_ops;
+
+
+bool flt_ot_is_disabled(const struct filter *f FLT_OT_DBG_ARGS(, int event));
+
+#endif /* _OPENTRACING_FILTER_H_ */
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ *
+ * vi: noexpandtab shiftwidth=8 tabstop=8
+ */
diff --git a/addons/ot/include/group.h b/addons/ot/include/group.h
new file mode 100644
index 0000000..a9bfcc6
--- /dev/null
+++ b/addons/ot/include/group.h
@@ -0,0 +1,61 @@
+/***
+ * Copyright 2020 HAProxy Technologies
+ *
+ * This file is part of the HAProxy OpenTracing filter.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#ifndef _OPENTRACING_GROUP_H_
+#define _OPENTRACING_GROUP_H_
+
+#define FLT_OT_ACTION_GROUP "ot-group"
+
+enum FLT_OT_ARG_enum {
+ FLT_OT_ARG_FILTER_ID = 0,
+ FLT_OT_ARG_GROUP_ID,
+
+ FLT_OT_ARG_FLT_CONF = 0,
+ FLT_OT_ARG_CONF,
+ FLT_OT_ARG_GROUP,
+};
+
+/*
+ * A description of the macro arguments can be found in the structure
+ * flt_ot_group_data definition
+ */
+#define FLT_OT_GROUP_DEFINES \
+ FLT_OT_GROUP_DEF(ACT_F_TCP_REQ_CON, SMP_VAL_FE_CON_ACC, SMP_OPT_DIR_REQ) \
+ FLT_OT_GROUP_DEF(ACT_F_TCP_REQ_SES, SMP_VAL_FE_SES_ACC, SMP_OPT_DIR_REQ) \
+ FLT_OT_GROUP_DEF(ACT_F_TCP_REQ_CNT, SMP_VAL_FE_REQ_CNT, SMP_OPT_DIR_REQ) \
+ FLT_OT_GROUP_DEF(ACT_F_TCP_RES_CNT, SMP_VAL_BE_RES_CNT, SMP_OPT_DIR_RES) \
+ FLT_OT_GROUP_DEF(ACT_F_HTTP_REQ, SMP_VAL_FE_HRQ_HDR, SMP_OPT_DIR_REQ) \
+ FLT_OT_GROUP_DEF(ACT_F_HTTP_RES, SMP_VAL_BE_HRS_HDR, SMP_OPT_DIR_RES)
+
+struct flt_ot_group_data {
+ enum act_from act_from; /* ACT_F_* */
+ uint smp_val; /* Valid FE/BE fetch location. */
+ uint smp_opt_dir; /* Fetch direction (request/response). */
+};
+
+#endif /* _OPENTRACING_GROUP_H_ */
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ *
+ * vi: noexpandtab shiftwidth=8 tabstop=8
+ */
diff --git a/addons/ot/include/http.h b/addons/ot/include/http.h
new file mode 100644
index 0000000..c323cde
--- /dev/null
+++ b/addons/ot/include/http.h
@@ -0,0 +1,41 @@
+/***
+ * Copyright 2020 HAProxy Technologies
+ *
+ * This file is part of the HAProxy OpenTracing filter.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#ifndef _OPENTRACING_HTTP_H_
+#define _OPENTRACING_HTTP_H_
+
+#ifndef DEBUG_OT
+# define flt_ot_http_headers_dump(...) while (0)
+#else
+void flt_ot_http_headers_dump(const struct channel *chn);
+#endif
+struct otc_text_map *flt_ot_http_headers_get(struct channel *chn, const char *prefix, size_t len, char **err);
+int flt_ot_http_header_set(struct channel *chn, const char *prefix, const char *name, const char *value, char **err);
+int flt_ot_http_headers_remove(struct channel *chn, const char *prefix, char **err);
+
+#endif /* _OPENTRACING_HTTP_H_ */
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ *
+ * vi: noexpandtab shiftwidth=8 tabstop=8
+ */
diff --git a/addons/ot/include/include.h b/addons/ot/include/include.h
new file mode 100644
index 0000000..f1a5672
--- /dev/null
+++ b/addons/ot/include/include.h
@@ -0,0 +1,66 @@
+/***
+ * Copyright 2020 HAProxy Technologies
+ *
+ * This file is part of the HAProxy OpenTracing filter.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#ifndef _OPENTRACING_INCLUDE_H_
+#define _OPENTRACING_INCLUDE_H_
+
+#include <errno.h>
+#include <stdbool.h>
+
+#include <haproxy/api.h>
+#include <haproxy/cfgparse.h>
+#include <haproxy/acl.h>
+#include <haproxy/cli.h>
+#include <haproxy/clock.h>
+#include <haproxy/filters.h>
+#include <haproxy/http_htx.h>
+#include <haproxy/http_rules.h>
+#include <haproxy/log.h>
+#include <haproxy/proxy.h>
+#include <haproxy/sample.h>
+#include <haproxy/tcp_rules.h>
+#include <haproxy/tools.h>
+#include <haproxy/vars.h>
+
+#include "config.h"
+#include "debug.h"
+#include "define.h"
+#include "cli.h"
+#include "event.h"
+#include "conf.h"
+#include "filter.h"
+#include "group.h"
+#include "http.h"
+#include "opentracing.h"
+#include "parser.h"
+#include "pool.h"
+#include "scope.h"
+#include "util.h"
+#include "vars.h"
+
+#endif /* _OPENTRACING_INCLUDE_H_ */
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ *
+ * vi: noexpandtab shiftwidth=8 tabstop=8
+ */
diff --git a/addons/ot/include/opentracing.h b/addons/ot/include/opentracing.h
new file mode 100644
index 0000000..2b88a33
--- /dev/null
+++ b/addons/ot/include/opentracing.h
@@ -0,0 +1,86 @@
+/***
+ * Copyright 2020 HAProxy Technologies
+ *
+ * This file is part of the HAProxy OpenTracing filter.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#ifndef _OPENTRACING_OT_H_
+#define _OPENTRACING_OT_H_
+
+#include <opentracing-c-wrapper/include.h>
+
+
+#define FLT_OT_VSET(p,t,v) \
+ do { (p)->type = otc_value_##t; (p)->value.t##_value = (v); } while (0)
+
+#define FLT_OT_DBG_TEXT_MAP(a) \
+ FLT_OT_DBG(3, "%p:{ %p %p %zu/%zu %hhu }", \
+ (a), (a)->key, (a)->value, (a)->count, (a)->size, (a)->is_dynamic)
+
+#define FLT_OT_DBG_TEXT_CARRIER(a,f) \
+ FLT_OT_DBG(3, "%p:{ { %p %p %zu/%zu %hhu } %p }", \
+ (a), (a)->text_map.key, (a)->text_map.value, (a)->text_map.count, \
+ (a)->text_map.size, (a)->text_map.is_dynamic, (a)->f)
+
+#define FLT_OT_DBG_CUSTOM_CARRIER(a,f) \
+ FLT_OT_DBG(3, "%p:{ { %p %zu %hhu } %p }", \
+ (a), (a)->binary_data.data, (a)->binary_data.size, \
+ (a)->binary_data.is_dynamic, (a)->f)
+
+#define FLT_OT_DBG_SPAN_CONTEXT(a) \
+ FLT_OT_DBG(3, "%p:{ %" PRId64 " %p %p }", (a), (a)->idx, (a)->span, (a)->destroy)
+
+
+#ifndef DEBUG_OT
+# define ot_debug() while (0)
+# define ot_text_map_show(...) while (0)
+#else
+void ot_text_map_show(const struct otc_text_map *text_map);
+void ot_debug(void);
+#endif
+int ot_init(struct otc_tracer **tracer, const char *plugin, char **err);
+int ot_start(struct otc_tracer *tracer, const char *cfgbuf, char **err);
+struct otc_span *ot_span_init(struct otc_tracer *tracer, const char *operation_name, const struct timespec *ts_steady, const struct timespec *ts_system, int ref_type, int ref_ctx_idx, const struct otc_span *ref_span, const struct otc_tag *tags, int num_tags, char **err);
+int ot_span_tag(struct otc_span *span, const struct otc_tag *tags, int num_tags);
+int ot_span_log(struct otc_span *span, const struct otc_log_field *log_fields, int num_fields);
+int ot_span_set_baggage(struct otc_span *span, const struct otc_text_map *baggage);
+struct otc_span_context *ot_inject_http_headers(struct otc_tracer *tracer, const struct otc_span *span, struct otc_http_headers_writer *carrier, char **err);
+struct otc_span_context *ot_extract_http_headers(struct otc_tracer *tracer, struct otc_http_headers_reader *carrier, const struct otc_text_map *text_map, char **err);
+void ot_span_finish(struct otc_span **span, const struct timespec *ts_finish, const struct timespec *log_ts, const char *log_key, const char *log_value, ...);
+void ot_close(struct otc_tracer **tracer);
+
+/* Unused code. */
+struct otc_span *ot_span_init_va(struct otc_tracer *tracer, const char *operation_name, const struct timespec *ts_steady, const struct timespec *ts_system, int ref_type, int ref_ctx_idx, const struct otc_span *ref_span, char **err, const char *tag_key, const char *tag_value, ...);
+int ot_span_tag_va(struct otc_span *span, const char *key, int type, ...);
+int ot_span_log_va(struct otc_span *span, const char *key, const char *value, ...);
+int ot_span_log_fmt(struct otc_span *span, const char *key, const char *format, ...) __attribute__ ((format(printf, 3, 4)));
+int ot_span_set_baggage_va(struct otc_span *span, const char *key, const char *value, ...);
+struct otc_text_map *ot_span_baggage_va(const struct otc_span *span, const char *key, ...);
+struct otc_span_context *ot_inject_text_map(struct otc_tracer *tracer, const struct otc_span *span, struct otc_text_map_writer *carrier);
+struct otc_span_context *ot_inject_binary(struct otc_tracer *tracer, const struct otc_span *span, struct otc_custom_carrier_writer *carrier);
+struct otc_span_context *ot_extract_text_map(struct otc_tracer *tracer, struct otc_text_map_reader *carrier, const struct otc_text_map *text_map);
+struct otc_span_context *ot_extract_binary(struct otc_tracer *tracer, struct otc_custom_carrier_reader *carrier, const struct otc_binary_data *binary_data);
+
+#endif /* _OPENTRACING_OT_H_ */
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ *
+ * vi: noexpandtab shiftwidth=8 tabstop=8
+ */
diff --git a/addons/ot/include/parser.h b/addons/ot/include/parser.h
new file mode 100644
index 0000000..53e414b
--- /dev/null
+++ b/addons/ot/include/parser.h
@@ -0,0 +1,172 @@
+/***
+ * Copyright 2020 HAProxy Technologies
+ *
+ * This file is part of the HAProxy OpenTracing filter.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#ifndef _OPENTRACING_PARSER_H_
+#define _OPENTRACING_PARSER_H_
+
+#define FLT_OT_SCOPE "OT"
+
+/*
+ * filter FLT_OT_OPT_NAME FLT_OT_OPT_FILTER_ID <FLT_OT_OPT_FILTER_ID_DEFAULT> FLT_OT_OPT_CONFIG <file>
+ */
+#define FLT_OT_OPT_NAME "opentracing"
+#define FLT_OT_OPT_FILTER_ID "id"
+#define FLT_OT_OPT_FILTER_ID_DEFAULT "ot-filter"
+#define FLT_OT_OPT_CONFIG "config"
+
+#define FLT_OT_PARSE_SECTION_TRACER_ID "ot-tracer"
+#define FLT_OT_PARSE_SECTION_GROUP_ID "ot-group"
+#define FLT_OT_PARSE_SECTION_SCOPE_ID "ot-scope"
+
+#define FLT_OT_PARSE_SPAN_ROOT "root"
+#define FLT_OT_PARSE_SPAN_REF_CHILD "child-of"
+#define FLT_OT_PARSE_SPAN_REF_FOLLOWS "follows-from"
+#define FLT_OT_PARSE_CTX_AUTONAME "-"
+#define FLT_OT_PARSE_CTX_IGNORE_NAME '-'
+#define FLT_OT_PARSE_CTX_USE_HEADERS "use-headers"
+#define FLT_OT_PARSE_CTX_USE_VARS "use-vars"
+#define FLT_OT_PARSE_OPTION_HARDERR "hard-errors"
+#define FLT_OT_PARSE_OPTION_DISABLED "disabled"
+#define FLT_OT_PARSE_OPTION_NOLOGNORM "dontlog-normal"
+
+/*
+ * A description of the macro arguments can be found in the structure
+ * flt_ot_parse_data definition
+ */
+#define FLT_OT_PARSE_TRACER_DEFINES \
+ FLT_OT_PARSE_TRACER_DEF( ID, 0, CHAR, 2, 2, "ot-tracer", " <name>") \
+ FLT_OT_PARSE_TRACER_DEF( ACL, 0, CHAR, 3, 0, "acl", " <name> <criterion> [flags] [operator] <value> ...") \
+ FLT_OT_PARSE_TRACER_DEF( LOG, 0, CHAR, 2, 0, "log", " { global | <addr> [len <len>] [format <fmt>] <facility> [<level> [<minlevel>]] }") \
+ FLT_OT_PARSE_TRACER_DEF( CONFIG, 0, NONE, 2, 2, "config", " <file>") \
+ FLT_OT_PARSE_TRACER_DEF( PLUGIN, 0, NONE, 2, 2, "plugin", " <file>") \
+ FLT_OT_PARSE_TRACER_DEF( GROUPS, 0, NONE, 2, 0, "groups", " <name> ...") \
+ FLT_OT_PARSE_TRACER_DEF( SCOPES, 0, NONE, 2, 0, "scopes", " <name> ...") \
+ FLT_OT_PARSE_TRACER_DEF( RATE_LIMIT, 0, NONE, 2, 2, "rate-limit", " <value>") \
+ FLT_OT_PARSE_TRACER_DEF( OPTION, 0, NONE, 2, 2, "option", " { disabled | dontlog-normal | hard-errors }") \
+ FLT_OT_PARSE_TRACER_DEF(DEBUG_LEVEL, 0, NONE, 2, 2, "debug-level", " <value>")
+
+#define FLT_OT_PARSE_GROUP_DEFINES \
+ FLT_OT_PARSE_GROUP_DEF( ID, 0, CHAR, 2, 2, "ot-group", " <name>") \
+ FLT_OT_PARSE_GROUP_DEF(SCOPES, 0, NONE, 2, 0, "scopes", " <name> ...")
+
+#ifdef USE_OT_VARS
+# define FLT_OT_PARSE_SCOPE_INJECT_HELP " <name-prefix> [use-vars] [use-headers]"
+# define FLT_OT_PARSE_SCOPE_EXTRACT_HELP " <name-prefix> [use-vars | use-headers]"
+#else
+# define FLT_OT_PARSE_SCOPE_INJECT_HELP " <name-prefix> [use-headers]"
+# define FLT_OT_PARSE_SCOPE_EXTRACT_HELP " <name-prefix> [use-headers]"
+#endif
+
+/*
+ * In case the possibility of working with OpenTracing context via HAProxyu
+ * variables is not used, args_max member of the structure flt_ot_parse_data
+ * should be reduced for 'inject' keyword. However, this is not critical
+ * because in this case the 'use-vars' argument cannot be entered anyway,
+ * so I will not complicate it here with additional definitions.
+ */
+#define FLT_OT_PARSE_SCOPE_DEFINES \
+ FLT_OT_PARSE_SCOPE_DEF( ID, 0, CHAR, 2, 2, "ot-scope", " <name>") \
+ FLT_OT_PARSE_SCOPE_DEF( SPAN, 0, NONE, 2, 5, "span", " <name> [<reference>] [root]") \
+ FLT_OT_PARSE_SCOPE_DEF( TAG, 1, NONE, 3, 0, "tag", " <name> <sample> ...") \
+ FLT_OT_PARSE_SCOPE_DEF( LOG, 1, NONE, 3, 0, "log", " <name> <sample> ...") \
+ FLT_OT_PARSE_SCOPE_DEF(BAGGAGE, 1, VAR, 3, 0, "baggage", " <name> <sample> ...") \
+ FLT_OT_PARSE_SCOPE_DEF( INJECT, 1, CTX, 2, 4, "inject", FLT_OT_PARSE_SCOPE_INJECT_HELP) \
+ FLT_OT_PARSE_SCOPE_DEF(EXTRACT, 0, CTX, 2, 3, "extract", FLT_OT_PARSE_SCOPE_EXTRACT_HELP) \
+ FLT_OT_PARSE_SCOPE_DEF( FINISH, 0, NONE, 2, 0, "finish", " <name> ...") \
+ FLT_OT_PARSE_SCOPE_DEF( ACL, 0, CHAR, 3, 0, "acl", " <name> <criterion> [flags] [operator] <value> ...") \
+ FLT_OT_PARSE_SCOPE_DEF( EVENT, 0, NONE, 2, 0, "event", " <name> [{ if | unless } <condition>]")
+
+enum FLT_OT_PARSE_INVCHAR_enum {
+ FLT_OT_PARSE_INVALID_NONE,
+ FLT_OT_PARSE_INVALID_CHAR,
+ FLT_OT_PARSE_INVALID_DOM,
+ FLT_OT_PARSE_INVALID_CTX,
+ FLT_OT_PARSE_INVALID_VAR,
+};
+
+enum FLT_OT_PARSE_TRACER_enum {
+#define FLT_OT_PARSE_TRACER_DEF(a,b,c,d,e,f,g) FLT_OT_PARSE_TRACER_##a,
+ FLT_OT_PARSE_TRACER_DEFINES
+#undef FLT_OT_PARSE_TRACER_DEF
+};
+
+enum FLT_OT_PARSE_GROUP_enum {
+#define FLT_OT_PARSE_GROUP_DEF(a,b,c,d,e,f,g) FLT_OT_PARSE_GROUP_##a,
+ FLT_OT_PARSE_GROUP_DEFINES
+#undef FLT_OT_PARSE_GROUP_DEF
+};
+
+enum FLT_OT_PARSE_SCOPE_enum {
+#define FLT_OT_PARSE_SCOPE_DEF(a,b,c,d,e,f,g) FLT_OT_PARSE_SCOPE_##a,
+ FLT_OT_PARSE_SCOPE_DEFINES
+#undef FLT_OT_PARSE_SCOPE_DEF
+};
+
+enum FLT_OT_CTX_USE_enum {
+ FLT_OT_CTX_USE_VARS = 1 << 0,
+ FLT_OT_CTX_USE_HEADERS = 1 << 1,
+};
+
+struct flt_ot_parse_data {
+ int keyword; /* Keyword index. */
+ bool flag_check_id; /* Whether the group ID must be defined for the keyword. */
+ int check_name; /* Checking allowed characters in the name. */
+ int args_min; /* The minimum number of arguments required. */
+ int args_max; /* The maximum number of arguments allowed. */
+ const char *name; /* Keyword name. */
+ const char *usage; /* Usage text to be printed in case of an error. */
+};
+
+#define FLT_OT_PARSE_WARNING(f, ...) \
+ ha_warning("parsing [%s:%d] : " FLT_OT_FMT_TYPE FLT_OT_FMT_NAME "'" f "'\n", ##__VA_ARGS__);
+#define FLT_OT_PARSE_ALERT(f, ...) \
+ do { \
+ ha_alert("parsing [%s:%d] : " FLT_OT_FMT_TYPE FLT_OT_FMT_NAME "'" f "'\n", ##__VA_ARGS__); \
+ \
+ retval |= ERR_ABORT | ERR_ALERT; \
+ } while (0)
+#define FLT_OT_POST_PARSE_ALERT(f, ...) \
+ FLT_OT_PARSE_ALERT(f, flt_ot_current_config->cfg_file, ##__VA_ARGS__)
+
+#define FLT_OT_PARSE_ERR(e,f, ...) \
+ do { \
+ if (*(e) == NULL) \
+ (void)memprintf((e), f, ##__VA_ARGS__); \
+ \
+ retval |= ERR_ABORT | ERR_ALERT; \
+ } while (0)
+#define FLT_OT_PARSE_IFERR_ALERT() \
+ do { \
+ if (err == NULL) \
+ break; \
+ \
+ FLT_OT_PARSE_ALERT("%s", file, linenum, err); \
+ FLT_OT_ERR_FREE(err); \
+ } while (0)
+
+#endif /* _OPENTRACING_PARSER_H_ */
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ *
+ * vi: noexpandtab shiftwidth=8 tabstop=8
+ */
diff --git a/addons/ot/include/pool.h b/addons/ot/include/pool.h
new file mode 100644
index 0000000..df72c84
--- /dev/null
+++ b/addons/ot/include/pool.h
@@ -0,0 +1,39 @@
+/***
+ * Copyright 2020 HAProxy Technologies
+ *
+ * This file is part of the HAProxy OpenTracing filter.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#ifndef _OPENTRACING_POOL_H_
+#define _OPENTRACING_POOL_H_
+
+void *flt_ot_pool_alloc(struct pool_head *pool, size_t size, bool flag_clear, char **err);
+void *flt_ot_pool_strndup(struct pool_head *pool, const char *s, size_t size, char **err);
+void flt_ot_pool_free(struct pool_head *pool, void **ptr);
+
+struct buffer *flt_ot_trash_alloc(bool flag_clear, char **err);
+void flt_ot_trash_free(struct buffer **ptr);
+
+#endif /* _OPENTRACING_POOL_H_ */
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ *
+ * vi: noexpandtab shiftwidth=8 tabstop=8
+ */
diff --git a/addons/ot/include/scope.h b/addons/ot/include/scope.h
new file mode 100644
index 0000000..7a3a776
--- /dev/null
+++ b/addons/ot/include/scope.h
@@ -0,0 +1,126 @@
+/***
+ * Copyright 2020 HAProxy Technologies
+ *
+ * This file is part of the HAProxy OpenTracing filter.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#ifndef _OPENTRACING_SCOPE_H_
+#define _OPENTRACING_SCOPE_H_
+
+#define FLT_OT_SCOPE_SPAN_FINISH_REQ "*req*"
+#define FLT_OT_SCOPE_SPAN_FINISH_RES "*res*"
+#define FLT_OT_SCOPE_SPAN_FINISH_ALL "*"
+
+#define FLT_OT_RT_CTX(a) ((struct flt_ot_runtime_context *)(a))
+
+#define FLT_OT_DBG_SCOPE_SPAN(f,a) \
+ FLT_OT_DBG(3, "%s%p:{ '%s' %zu %u %hhu %p %d %p %p }", \
+ (f), (a), FLT_OT_STR_HDR_ARGS(a, id), (a)->smp_opt_dir, \
+ (a)->flag_finish, (a)->span, (a)->ref_type, (a)->ref_span, (a)->ref_ctx)
+
+#define FLT_OT_DBG_SCOPE_CONTEXT(f,a) \
+ FLT_OT_DBG(3, "%s%p:{ '%s' %zu %u %hhu %p }", \
+ (f), (a), FLT_OT_STR_HDR_ARGS(a, id), (a)->smp_opt_dir, \
+ (a)->flag_finish, (a)->context)
+
+#define FLT_OT_DBG_SCOPE_DATA(f,a) \
+ FLT_OT_DBG(3, "%s%p:{ %p %d %p %p %d }", \
+ (f), (a), (a)->tags, (a)->num_tags, (a)->baggage, (a)->log_fields, (a)->num_log_fields)
+
+#define FLT_OT_DBG_RUNTIME_CONTEXT(f,a) \
+ FLT_OT_DBG(3, "%s%p:{ %p %p '%s' %hhu %hhu 0x%02hhx 0x%08x %s %s }", \
+ (f), (a), (a)->stream, (a)->filter, (a)->uuid, (a)->flag_harderr, \
+ (a)->flag_disabled, (a)->logging, (a)->analyzers, flt_ot_list_debug(&((a)->spans)), \
+ flt_ot_list_debug(&((a)->contexts)))
+
+#define FLT_OT_CONST_STR_HDR(a) \
+ struct { \
+ const char *a; \
+ size_t a##_len; \
+ }
+
+
+struct flt_ot_scope_data {
+ struct otc_tag tags[FLT_OT_MAXTAGS]; /* Defined tags. */
+ int num_tags; /* The number of tags used. */
+ struct otc_text_map *baggage; /* Defined baggage. */
+ struct otc_log_field log_fields[OTC_MAXLOGFIELDS]; /* Defined logs. */
+ int num_log_fields; /* The number of log fields used. */
+};
+
+/* flt_ot_runtime_context->spans */
+struct flt_ot_scope_span {
+ FLT_OT_CONST_STR_HDR(id); /* The span operation name/len. */
+ uint smp_opt_dir; /* SMP_OPT_DIR_RE(Q|S) */
+ bool flag_finish; /* Whether the span is marked for completion. */
+ struct otc_span *span; /* The current span. */
+ otc_span_reference_type_t ref_type; /* Span reference type. */
+ struct otc_span *ref_span; /* Span to which the current span refers. */
+ struct otc_span_context *ref_ctx; /* Span context to which the current span refers. */
+ struct list list; /* Used to chain this structure. */
+};
+
+/* flt_ot_runtime_context->contexts */
+struct flt_ot_scope_context {
+ FLT_OT_CONST_STR_HDR(id); /* The span context name/len. */
+ uint smp_opt_dir; /* SMP_OPT_DIR_RE(Q|S) */
+ bool flag_finish; /* Whether the span context is marked for completion. */
+ struct otc_span_context *context; /* The current span context. */
+ struct list list; /* Used to chain this structure. */
+};
+
+/* The runtime filter context attached to a stream. */
+struct flt_ot_runtime_context {
+ struct stream *stream; /* The stream to which the filter is attached. */
+ struct filter *filter; /* The OpenTracing filter. */
+ char uuid[40]; /* Randomly generated UUID. */
+ bool flag_harderr; /* [0 1] */
+ bool flag_disabled; /* [0 1] */
+ uint8_t logging; /* [0 1 3] */
+ uint analyzers; /* Executed channel analyzers. */
+ struct list spans; /* The scope spans. */
+ struct list contexts; /* The scope contexts. */
+};
+
+
+#ifndef DEBUG_OT
+# define flt_ot_pools_info() while (0)
+#else
+void flt_ot_pools_info(void);
+#endif
+struct flt_ot_runtime_context *flt_ot_runtime_context_init(struct stream *s, struct filter *f, char **err);
+void flt_ot_runtime_context_free(struct filter *f);
+
+struct flt_ot_scope_span *flt_ot_scope_span_init(struct flt_ot_runtime_context *rt_ctx, const char *id, size_t id_len, otc_span_reference_type_t ref_type, const char *ref_id, size_t ref_id_len, uint dir, char **err);
+void flt_ot_scope_span_free(struct flt_ot_scope_span **ptr);
+struct flt_ot_scope_context *flt_ot_scope_context_init(struct flt_ot_runtime_context *rt_ctx, struct otc_tracer *tracer, const char *id, size_t id_len, const struct otc_text_map *text_map, uint dir, char **err);
+void flt_ot_scope_context_free(struct flt_ot_scope_context **ptr);
+void flt_ot_scope_data_free(struct flt_ot_scope_data *ptr);
+
+int flt_ot_scope_finish_mark(const struct flt_ot_runtime_context *rt_ctx, const char *id, size_t id_len);
+void flt_ot_scope_finish_marked(const struct flt_ot_runtime_context *rt_ctx, const struct timespec *ts_finish);
+void flt_ot_scope_free_unused(struct flt_ot_runtime_context *rt_ctx, struct channel *chn);
+
+#endif /* _OPENTRACING_SCOPE_H_ */
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ *
+ * vi: noexpandtab shiftwidth=8 tabstop=8
+ */
diff --git a/addons/ot/include/util.h b/addons/ot/include/util.h
new file mode 100644
index 0000000..776ddd2
--- /dev/null
+++ b/addons/ot/include/util.h
@@ -0,0 +1,109 @@
+/***
+ * Copyright 2020 HAProxy Technologies
+ *
+ * This file is part of the HAProxy OpenTracing filter.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#ifndef _OPENTRACING_UTIL_H_
+#define _OPENTRACING_UTIL_H_
+
+#define HTTP_METH_STR_OPTIONS "OPTIONS"
+#define HTTP_METH_STR_GET "GET"
+#define HTTP_METH_STR_HEAD "HEAD"
+#define HTTP_METH_STR_POST "POST"
+#define HTTP_METH_STR_PUT "PUT"
+#define HTTP_METH_STR_DELETE "DELETE"
+#define HTTP_METH_STR_TRACE "TRACE"
+#define HTTP_METH_STR_CONNECT "CONNECT"
+
+/* Defined in include/haproxy/channel-t.h. */
+#define FLT_OT_AN_DEFINES \
+ FLT_OT_AN_DEF(AN_REQ_INSPECT_FE) \
+ FLT_OT_AN_DEF(AN_REQ_WAIT_HTTP) \
+ FLT_OT_AN_DEF(AN_REQ_HTTP_BODY) \
+ FLT_OT_AN_DEF(AN_REQ_HTTP_PROCESS_FE) \
+ FLT_OT_AN_DEF(AN_REQ_SWITCHING_RULES) \
+ FLT_OT_AN_DEF(AN_REQ_INSPECT_BE) \
+ FLT_OT_AN_DEF(AN_REQ_HTTP_PROCESS_BE) \
+ FLT_OT_AN_DEF(AN_REQ_HTTP_TARPIT) \
+ FLT_OT_AN_DEF(AN_REQ_SRV_RULES) \
+ FLT_OT_AN_DEF(AN_REQ_HTTP_INNER) \
+ FLT_OT_AN_DEF(AN_REQ_PRST_RDP_COOKIE) \
+ FLT_OT_AN_DEF(AN_REQ_STICKING_RULES) \
+ FLT_OT_AN_DEF(AN_REQ_HTTP_XFER_BODY) \
+ FLT_OT_AN_DEF(AN_REQ_WAIT_CLI) \
+ FLT_OT_AN_DEF(AN_RES_INSPECT) \
+ FLT_OT_AN_DEF(AN_RES_WAIT_HTTP) \
+ FLT_OT_AN_DEF(AN_RES_STORE_RULES) \
+ FLT_OT_AN_DEF(AN_RES_HTTP_PROCESS_BE) \
+ FLT_OT_AN_DEF(AN_RES_HTTP_PROCESS_FE) \
+ FLT_OT_AN_DEF(AN_RES_HTTP_XFER_BODY) \
+ FLT_OT_AN_DEF(AN_RES_WAIT_CLI)
+
+#define FLT_OT_PROXIES_LIST_START() \
+ do { \
+ struct flt_conf *fconf; \
+ struct proxy *px; \
+ \
+ for (px = proxies_list; px != NULL; px = px->next) \
+ list_for_each_entry(fconf, &(px->filter_configs), list) \
+ if (fconf->id == ot_flt_id) { \
+ struct flt_ot_conf *conf = fconf->conf;
+#define FLT_OT_PROXIES_LIST_END() \
+ } \
+ } while (0)
+
+#ifdef DEBUG_OT
+# define FLT_OT_ARGS_DUMP() do { if (flt_ot_debug.level & (1 << 2)) flt_ot_args_dump(args); } while (0)
+#else
+# define FLT_OT_ARGS_DUMP() while (0)
+#endif
+
+
+#ifndef DEBUG_OT
+# define flt_ot_filters_dump() while (0)
+#else
+void flt_ot_args_dump(char **args);
+void flt_ot_filters_dump(void);
+const char *flt_ot_chn_label(const struct channel *chn);
+const char *flt_ot_pr_mode(const struct stream *s);
+const char *flt_ot_stream_pos(const struct stream *s);
+const char *flt_ot_type(const struct filter *f);
+const char *flt_ot_analyzer(uint an_bit);
+const char *flt_ot_str_hex(const void *data, size_t size);
+const char *flt_ot_str_ctrl(const void *data, size_t size);
+const char *flt_ot_list_debug(const struct list *head);
+#endif
+
+ssize_t flt_ot_chunk_add(struct buffer *chk, const void *src, size_t n, char **err);
+int flt_ot_args_count(char **args);
+void flt_ot_args_to_str(char **args, int idx, char **str);
+double flt_ot_strtod(const char *nptr, double limit_min, double limit_max, char **err);
+int64_t flt_ot_strtoll(const char *nptr, int64_t limit_min, int64_t limit_max, char **err);
+int flt_ot_sample_to_str(const struct sample_data *data, char *value, size_t size, char **err);
+int flt_ot_sample_to_value(const char *key, const struct sample_data *data, struct otc_value *value, char **err);
+int flt_ot_sample_add(struct stream *s, uint dir, struct flt_ot_conf_sample *sample, struct flt_ot_scope_data *data, int type, char **err);
+
+#endif /* _OPENTRACING_UTIL_H_ */
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ *
+ * vi: noexpandtab shiftwidth=8 tabstop=8
+ */
diff --git a/addons/ot/include/vars.h b/addons/ot/include/vars.h
new file mode 100644
index 0000000..550cc89
--- /dev/null
+++ b/addons/ot/include/vars.h
@@ -0,0 +1,55 @@
+/***
+ * Copyright 2020 HAProxy Technologies
+ *
+ * This file is part of the HAProxy OpenTracing filter.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#ifndef _OPENTRACING_VARS_H_
+#define _OPENTRACING_VARS_H_
+
+#define FLT_OT_VARS_SCOPE "txn"
+#define FLT_OT_VAR_CTX_SIZE int8_t
+#define FLT_OT_VAR_CHAR_DASH 'D'
+#define FLT_OT_VAR_CHAR_SPACE 'S'
+
+struct flt_ot_ctx {
+ char value[BUFSIZ];
+ int value_len;
+};
+
+typedef int (*flt_ot_ctx_loop_cb)(struct sample *, size_t, const char *, const char *, const char *, FLT_OT_VAR_CTX_SIZE, char **, void *);
+
+
+#ifndef DEBUG_OT
+# define flt_ot_vars_dump(...) while (0)
+#else
+void flt_ot_vars_dump(struct stream *s);
+#endif
+int flt_ot_var_register(const char *scope, const char *prefix, const char *name, char **err);
+int flt_ot_var_set(struct stream *s, const char *scope, const char *prefix, const char *name, const char *value, uint opt, char **err);
+int flt_ot_vars_unset(struct stream *s, const char *scope, const char *prefix, uint opt, char **err);
+struct otc_text_map *flt_ot_vars_get(struct stream *s, const char *scope, const char *prefix, uint opt, char **err);
+
+#endif /* _OPENTRACING_VARS_H_ */
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ *
+ * vi: noexpandtab shiftwidth=8 tabstop=8
+ */
diff --git a/addons/ot/src/cli.c b/addons/ot/src/cli.c
new file mode 100644
index 0000000..0080dbd
--- /dev/null
+++ b/addons/ot/src/cli.c
@@ -0,0 +1,397 @@
+/***
+ * Copyright 2020 HAProxy Technologies
+ *
+ * This file is part of the HAProxy OpenTracing filter.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#include "include.h"
+
+
+/***
+ * NAME
+ * flt_ot_cli_set_msg -
+ *
+ * ARGUMENTS
+ * appctx -
+ * err -
+ * msg -
+ * cli_state -
+ *
+ * DESCRIPTION
+ * -
+ *
+ * RETURN VALUE
+ * This function does not return a value.
+ */
+static void cmn_cli_set_msg(struct appctx *appctx, char *err, char *msg, int cli_state)
+{
+ struct cli_print_ctx *ctx = applet_reserve_svcctx(appctx, sizeof(*ctx));
+
+ FLT_OT_FUNC("%p, %p, %p, %d", appctx, err, msg, cli_state);
+
+ if ((appctx == NULL) || ((err == NULL) && (msg == NULL)))
+ FLT_OT_RETURN();
+
+ ctx->err = (err == NULL) ? msg : err;
+ appctx->st0 = (ctx->err == NULL) ? CLI_ST_PROMPT : cli_state;
+
+ FLT_OT_DBG(1, "err(%d): \"%s\"", appctx->st0, ctx->err);
+
+ FLT_OT_RETURN();
+}
+
+
+#ifdef DEBUG_OT
+
+/***
+ * NAME
+ * flt_ot_cli_parse_debug -
+ *
+ * ARGUMENTS
+ * args -
+ * payload -
+ * appctx -
+ * private -
+ *
+ * DESCRIPTION
+ * -
+ *
+ * RETURN VALUE
+ * -
+ */
+static int flt_ot_cli_parse_debug(char **args, char *payload, struct appctx *appctx, void *private)
+{
+ char *err = NULL, *msg = NULL;
+ uint8_t value;
+ int retval = 0;
+
+ FLT_OT_FUNC("%p, \"%s\", %p, %p", args, payload, appctx, private);
+
+ FLT_OT_ARGS_DUMP();
+
+ if (FLT_OT_ARG_ISVALID(2)) {
+ value = flt_ot_strtoll(args[2], 0, 255, &err);
+ if (err == NULL) {
+ _HA_ATOMIC_STORE(&(flt_ot_debug.level), value);
+
+ (void)memprintf(&msg, FLT_OT_CLI_CMD " : debug level set to %hhu", value);
+ } else {
+ retval = 1;
+ }
+ } else {
+ value = _HA_ATOMIC_LOAD(&(flt_ot_debug.level));
+
+ (void)memprintf(&msg, FLT_OT_CLI_CMD " : current debug level is %hhu", value);
+ }
+
+ cmn_cli_set_msg(appctx, err, msg, CLI_ST_PRINT_DYNERR);
+
+ FLT_OT_RETURN_INT(retval);
+}
+
+#endif /* DEBUG_OT */
+
+
+/***
+ * NAME
+ * flt_ot_cli_parse_disabled -
+ *
+ * ARGUMENTS
+ * args -
+ * payload -
+ * appctx -
+ * private -
+ *
+ * DESCRIPTION
+ * -
+ *
+ * RETURN VALUE
+ * -
+ */
+static int flt_ot_cli_parse_disabled(char **args, char *payload, struct appctx *appctx, void *private)
+{
+ char *msg = NULL;
+ bool value = (uintptr_t)private;
+ int retval = 0;
+
+ FLT_OT_FUNC("%p, \"%s\", %p, %p", args, payload, appctx, private);
+
+ FLT_OT_ARGS_DUMP();
+
+ FLT_OT_PROXIES_LIST_START() {
+ _HA_ATOMIC_STORE(&(conf->tracer->flag_disabled), value);
+
+ (void)memprintf(&msg, "%s%s" FLT_OT_CLI_CMD " : filter %sabled", FLT_OT_CLI_MSG_CAT(msg), value ? "dis" : "en");
+ } FLT_OT_PROXIES_LIST_END();
+
+ cmn_cli_set_msg(appctx, NULL, msg, CLI_ST_PRINT_DYNERR);
+
+ FLT_OT_RETURN_INT(retval);
+}
+
+
+/***
+ * NAME
+ * flt_ot_cli_parse_option -
+ *
+ * ARGUMENTS
+ * args -
+ * payload -
+ * appctx -
+ * private -
+ *
+ * DESCRIPTION
+ * -
+ *
+ * RETURN VALUE
+ * -
+ */
+static int flt_ot_cli_parse_option(char **args, char *payload, struct appctx *appctx, void *private)
+{
+ char *msg = NULL;
+ bool value = (uintptr_t)private;
+ int retval = 0;
+
+ FLT_OT_FUNC("%p, \"%s\", %p, %p", args, payload, appctx, private);
+
+ FLT_OT_ARGS_DUMP();
+
+ FLT_OT_PROXIES_LIST_START() {
+ _HA_ATOMIC_STORE(&(conf->tracer->flag_harderr), value);
+
+ (void)memprintf(&msg, "%s%s" FLT_OT_CLI_CMD " : filter set %s-errors", FLT_OT_CLI_MSG_CAT(msg), value ? "hard" : "soft");
+ } FLT_OT_PROXIES_LIST_END();
+
+ cmn_cli_set_msg(appctx, NULL, msg, CLI_ST_PRINT_DYNERR);
+
+ FLT_OT_RETURN_INT(retval);
+}
+
+
+/***
+ * NAME
+ * flt_ot_cli_parse_logging -
+ *
+ * ARGUMENTS
+ * args -
+ * payload -
+ * appctx -
+ * private -
+ *
+ * DESCRIPTION
+ * -
+ *
+ * RETURN VALUE
+ * -
+ */
+static int flt_ot_cli_parse_logging(char **args, char *payload, struct appctx *appctx, void *private)
+{
+ char *err = NULL, *msg = NULL;
+ uint8_t value;
+ int retval = 0;
+
+ FLT_OT_FUNC("%p, \"%s\", %p, %p", args, payload, appctx, private);
+
+ FLT_OT_ARGS_DUMP();
+
+ if (FLT_OT_ARG_ISVALID(2)) {
+ if (strcasecmp(args[2], FLT_OT_CLI_LOGGING_OFF) == 0) {
+ value = FLT_OT_LOGGING_OFF;
+ }
+ else if (strcasecmp(args[2], FLT_OT_CLI_LOGGING_ON) == 0) {
+ value = FLT_OT_LOGGING_ON;
+ }
+ else if (strcasecmp(args[2], FLT_OT_CLI_LOGGING_NOLOGNORM) == 0) {
+ value = FLT_OT_LOGGING_ON | FLT_OT_LOGGING_NOLOGNORM;
+ }
+ else {
+ (void)memprintf(&err, "'%s' : invalid value, use <" FLT_OT_CLI_LOGGING_OFF " | " FLT_OT_CLI_LOGGING_ON " | " FLT_OT_CLI_LOGGING_NOLOGNORM ">", args[2]);
+
+ retval = 1;
+ }
+
+ if (retval == 0) {
+ FLT_OT_PROXIES_LIST_START() {
+ _HA_ATOMIC_STORE(&(conf->tracer->logging), value);
+
+ (void)memprintf(&msg, "%s%s" FLT_OT_CLI_CMD " : logging is %s", FLT_OT_CLI_MSG_CAT(msg), FLT_OT_CLI_LOGGING_STATE(value));
+ } FLT_OT_PROXIES_LIST_END();
+ }
+ } else {
+ FLT_OT_PROXIES_LIST_START() {
+ value = _HA_ATOMIC_LOAD(&(conf->tracer->logging));
+
+ (void)memprintf(&msg, "%s%s" FLT_OT_CLI_CMD " : logging is currently %s", FLT_OT_CLI_MSG_CAT(msg), FLT_OT_CLI_LOGGING_STATE(value));
+ } FLT_OT_PROXIES_LIST_END();
+ }
+
+ cmn_cli_set_msg(appctx, err, msg, CLI_ST_PRINT_DYNERR);
+
+ FLT_OT_RETURN_INT(retval);
+}
+
+
+/***
+ * NAME
+ * flt_ot_cli_parse_rate -
+ *
+ * ARGUMENTS
+ * args -
+ * payload -
+ * appctx -
+ * private -
+ *
+ * DESCRIPTION
+ * -
+ *
+ * RETURN VALUE
+ * -
+ */
+static int flt_ot_cli_parse_rate(char **args, char *payload, struct appctx *appctx, void *private)
+{
+ char *err = NULL, *msg = NULL;
+ uint32_t value;
+ int retval = 0;
+
+ FLT_OT_FUNC("%p, \"%s\", %p, %p", args, payload, appctx, private);
+
+ FLT_OT_ARGS_DUMP();
+
+ if (FLT_OT_ARG_ISVALID(2)) {
+ value = FLT_OT_FLOAT_U32(flt_ot_strtod(args[2], 0.0, FLT_OT_RATE_LIMIT_MAX, &err), FLT_OT_RATE_LIMIT_MAX);
+ if (err == NULL) {
+ FLT_OT_PROXIES_LIST_START() {
+ _HA_ATOMIC_STORE(&(conf->tracer->rate_limit), value);
+
+ (void)memprintf(&msg, "%s%s" FLT_OT_CLI_CMD " : rate limit set to %.2f", FLT_OT_CLI_MSG_CAT(msg), FLT_OT_U32_FLOAT(value, FLT_OT_RATE_LIMIT_MAX));
+ } FLT_OT_PROXIES_LIST_END();
+ } else {
+ retval = 1;
+ }
+ } else {
+ FLT_OT_PROXIES_LIST_START() {
+ value = _HA_ATOMIC_LOAD(&(conf->tracer->rate_limit));
+
+ (void)memprintf(&msg, "%s%s" FLT_OT_CLI_CMD " : current rate limit is %.2f", FLT_OT_CLI_MSG_CAT(msg), FLT_OT_U32_FLOAT(value, FLT_OT_RATE_LIMIT_MAX));
+ } FLT_OT_PROXIES_LIST_END();
+ }
+
+ cmn_cli_set_msg(appctx, err, msg, CLI_ST_PRINT_DYNERR);
+
+ FLT_OT_RETURN_INT(retval);
+}
+
+
+/***
+ * NAME
+ * flt_ot_cli_parse_status -
+ *
+ * ARGUMENTS
+ * args -
+ * payload -
+ * appctx -
+ * private -
+ *
+ * DESCRIPTION
+ * -
+ *
+ * RETURN VALUE
+ * -
+ */
+static int flt_ot_cli_parse_status(char **args, char *payload, struct appctx *appctx, void *private)
+{
+ const char *nl = "";
+ char *msg = NULL;
+ int retval = 0;
+
+ FLT_OT_FUNC("%p, \"%s\", %p, %p", args, payload, appctx, private);
+
+ FLT_OT_ARGS_DUMP();
+ flt_ot_filters_dump();
+
+ (void)memprintf(&msg, " " FLT_OT_OPT_NAME " filter status\n" FLT_OT_STR_DASH_78);
+#ifdef DEBUG_OT
+ (void)memprintf(&msg, "%s\n debug level: 0x%02hhx\n", msg, flt_ot_debug.level);
+#endif
+
+ FLT_OT_PROXIES_LIST_START() {
+ (void)memprintf(&msg, "%s\n%s filter %s\n", msg, nl, conf->id);
+ (void)memprintf(&msg, "%s configuration: %s\n", msg, conf->cfg_file);
+ (void)memprintf(&msg, "%s disable count: %" PRIu64 " %" PRIu64 "\n\n", msg, conf->cnt.disabled[0], conf->cnt.disabled[1]);
+ (void)memprintf(&msg, "%s tracer %s\n", msg, conf->tracer->id);
+ (void)memprintf(&msg, "%s configuration: %s\n", msg, conf->tracer->config);
+ (void)memprintf(&msg, "%s plugin: %s\n", msg, conf->tracer->plugin);
+ (void)memprintf(&msg, "%s rate limit: %.2f %%\n", msg, FLT_OT_U32_FLOAT(conf->tracer->rate_limit, FLT_OT_RATE_LIMIT_MAX));
+ (void)memprintf(&msg, "%s hard errors: %s\n", msg, FLT_OT_STR_FLAG_YN(conf->tracer->flag_harderr));
+ (void)memprintf(&msg, "%s disabled: %s\n", msg, FLT_OT_STR_FLAG_YN(conf->tracer->flag_disabled));
+ (void)memprintf(&msg, "%s logging: %s\n", msg, FLT_OT_CLI_LOGGING_STATE(conf->tracer->logging));
+ (void)memprintf(&msg, "%s analyzers: %08x", msg, conf->tracer->analyzers);
+
+ nl = "\n";
+ } FLT_OT_PROXIES_LIST_END();
+
+ cmn_cli_set_msg(appctx, NULL, msg, CLI_ST_PRINT_DYNERR);
+
+ FLT_OT_RETURN_INT(retval);
+}
+
+
+static struct cli_kw_list cli_kws = { { }, {
+#ifdef DEBUG_OT
+ { { FLT_OT_CLI_CMD, "debug", NULL }, FLT_OT_CLI_CMD " debug [level] : set the OT filter debug level (default: get current debug level)", flt_ot_cli_parse_debug, NULL, NULL, NULL, 0 },
+#endif
+ { { FLT_OT_CLI_CMD, "disable", NULL }, FLT_OT_CLI_CMD " disable : disable the OT filter", flt_ot_cli_parse_disabled, NULL, NULL, (void *)1, 0 },
+ { { FLT_OT_CLI_CMD, "enable", NULL }, FLT_OT_CLI_CMD " enable : enable the OT filter", flt_ot_cli_parse_disabled, NULL, NULL, (void *)0, 0 },
+ { { FLT_OT_CLI_CMD, "soft-errors", NULL }, FLT_OT_CLI_CMD " soft-errors : turning off hard-errors mode", flt_ot_cli_parse_option, NULL, NULL, (void *)0, 0 },
+ { { FLT_OT_CLI_CMD, "hard-errors", NULL }, FLT_OT_CLI_CMD " hard-errors : enabling hard-errors mode", flt_ot_cli_parse_option, NULL, NULL, (void *)1, 0 },
+ { { FLT_OT_CLI_CMD, "logging", NULL }, FLT_OT_CLI_CMD " logging [state] : set logging state (default: get current logging state)", flt_ot_cli_parse_logging, NULL, NULL, NULL, 0 },
+ { { FLT_OT_CLI_CMD, "rate", NULL }, FLT_OT_CLI_CMD " rate [value] : set the rate limit (default: get current rate value)", flt_ot_cli_parse_rate, NULL, NULL, NULL, 0 },
+ { { FLT_OT_CLI_CMD, "status", NULL }, FLT_OT_CLI_CMD " status : show the OT filter status", flt_ot_cli_parse_status, NULL, NULL, NULL, 0 },
+ { /* END */ }
+}};
+
+
+/***
+ * NAME
+ * flt_ot_cli_init -
+ *
+ * ARGUMENTS
+ * This function takes no arguments.
+ *
+ * DESCRIPTION
+ * -
+ *
+ * RETURN VALUE
+ * This function does not return a value.
+ */
+void flt_ot_cli_init(void)
+{
+ FLT_OT_FUNC("");
+
+ /* Register CLI keywords. */
+ cli_register_kw(&cli_kws);
+
+ FLT_OT_RETURN();
+}
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ *
+ * vi: noexpandtab shiftwidth=8 tabstop=8
+ */
diff --git a/addons/ot/src/conf.c b/addons/ot/src/conf.c
new file mode 100644
index 0000000..c6375a6
--- /dev/null
+++ b/addons/ot/src/conf.c
@@ -0,0 +1,764 @@
+/***
+ * Copyright 2020 HAProxy Technologies
+ *
+ * This file is part of the HAProxy OpenTracing filter.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#include "include.h"
+
+
+/***
+ * NAME
+ * flt_ot_conf_hdr_init -
+ *
+ * ARGUMENTS
+ * size -
+ * id -
+ * linenum -
+ * head -
+ * err -
+ *
+ * DESCRIPTION
+ * -
+ *
+ * RETURN VALUE
+ * -
+ */
+static void *flt_ot_conf_hdr_init(size_t size, const char *id, int linenum, struct list *head, char **err)
+{
+ struct flt_ot_conf_hdr *retptr = NULL, *ptr;
+
+ FLT_OT_FUNC("%zu, \"%s\", %d, %p, %p:%p", size, id, linenum, head, FLT_OT_DPTR_ARGS(err));
+
+ if (head != NULL)
+ list_for_each_entry(ptr, head, list)
+ if (strcmp(ptr->id, id) == 0) {
+ FLT_OT_ERR("'%s' : already defined", id);
+
+ FLT_OT_RETURN_PTR(retptr);
+ }
+
+ retptr = FLT_OT_CALLOC(1, size);
+ if (retptr != NULL) {
+ retptr->id_len = strlen(id);
+ if (retptr->id_len >= FLT_OT_ID_MAXLEN)
+ FLT_OT_ERR("'%s' : name too long", id);
+ else
+ retptr->id = FLT_OT_STRDUP(id);
+
+ if (retptr->id == NULL)
+ FLT_OT_FREE_CLEAR(retptr);
+ }
+
+ if (retptr != NULL) {
+ retptr->cfg_line = linenum;
+
+ if (head != NULL)
+ LIST_APPEND(head, &(retptr->list));
+ } else {
+ FLT_OT_ERR("out of memory");
+ }
+
+ FLT_OT_RETURN_PTR(retptr);
+}
+
+
+/***
+ * NAME
+ * flt_ot_conf_ph_init -
+ *
+ * ARGUMENTS
+ * id -
+ * linenum -
+ * head -
+ * err -
+ *
+ * DESCRIPTION
+ * -
+ *
+ * RETURN VALUE
+ * -
+ */
+struct flt_ot_conf_ph *flt_ot_conf_ph_init(const char *id, int linenum, struct list *head, char **err)
+{
+ struct flt_ot_conf_ph *retptr;
+
+ FLT_OT_FUNC("\"%s\", %d, %p, %p:%p", id, linenum, head, FLT_OT_DPTR_ARGS(err));
+
+ retptr = flt_ot_conf_hdr_init(sizeof(*retptr), id, linenum, head, err);
+ if (retptr != NULL)
+ FLT_OT_DBG_CONF_PH("- init ", retptr);
+
+ FLT_OT_RETURN_PTR(retptr);
+}
+
+
+/***
+ * NAME
+ * flt_ot_conf_ph_free -
+ *
+ * ARGUMENTS
+ * ptr -
+ *
+ * DESCRIPTION
+ * -
+ *
+ * RETURN VALUE
+ * This function does not return a value.
+ */
+void flt_ot_conf_ph_free(struct flt_ot_conf_ph **ptr)
+{
+ FLT_OT_FUNC("%p:%p", FLT_OT_DPTR_ARGS(ptr));
+
+ if ((ptr == NULL) || (*ptr == NULL))
+ FLT_OT_RETURN();
+
+ FLT_OT_DBG_CONF_PH("- free ", *ptr);
+
+ FLT_OT_FREE((*ptr)->id);
+ FLT_OT_LIST_DEL(&((*ptr)->list));
+ FLT_OT_FREE_CLEAR(*ptr);
+
+ FLT_OT_RETURN();
+}
+
+
+/***
+ * NAME
+ * flt_ot_conf_sample_expr_init -
+ *
+ * ARGUMENTS
+ * id -
+ * linenum -
+ * head -
+ * err -
+ *
+ * DESCRIPTION
+ * -
+ *
+ * RETURN VALUE
+ * -
+ */
+struct flt_ot_conf_sample_expr *flt_ot_conf_sample_expr_init(const char *id, int linenum, struct list *head, char **err)
+{
+ struct flt_ot_conf_sample_expr *retptr;
+
+ FLT_OT_FUNC("\"%s\", %d, %p, %p:%p", id, linenum, head, FLT_OT_DPTR_ARGS(err));
+
+ retptr = flt_ot_conf_hdr_init(sizeof(*retptr), id, linenum, head, err);
+ if (retptr != NULL)
+ FLT_OT_DBG_CONF_SAMPLE_EXPR("- init ", retptr);
+
+ FLT_OT_RETURN_PTR(retptr);
+}
+
+
+/***
+ * NAME
+ * flt_ot_conf_sample_expr_free -
+ *
+ * ARGUMENTS
+ * ptr -
+ *
+ * DESCRIPTION
+ * -
+ *
+ * RETURN VALUE
+ * This function does not return a value.
+ */
+void flt_ot_conf_sample_expr_free(struct flt_ot_conf_sample_expr **ptr)
+{
+ FLT_OT_FUNC("%p:%p", FLT_OT_DPTR_ARGS(ptr));
+
+ if ((ptr == NULL) || (*ptr == NULL))
+ FLT_OT_RETURN();
+
+ FLT_OT_DBG_CONF_SAMPLE_EXPR("- free ", *ptr);
+
+ FLT_OT_FREE((*ptr)->value);
+ release_sample_expr((*ptr)->expr);
+ FLT_OT_LIST_DEL(&((*ptr)->list));
+ FLT_OT_FREE_CLEAR(*ptr);
+
+ FLT_OT_RETURN();
+}
+
+
+/***
+ * NAME
+ * flt_ot_conf_sample_init -
+ *
+ * ARGUMENTS
+ * args -
+ * linenum -
+ * head -
+ * err -
+ *
+ * DESCRIPTION
+ * -
+ *
+ * RETURN VALUE
+ * -
+ */
+struct flt_ot_conf_sample *flt_ot_conf_sample_init(char **args, int linenum, struct list *head, char **err)
+{
+ struct flt_ot_conf_sample *retptr;
+
+ FLT_OT_FUNC("%p, %d, %p, %p:%p", args, linenum, head, FLT_OT_DPTR_ARGS(err));
+
+ retptr = flt_ot_conf_hdr_init(sizeof(*retptr), args[1], linenum, head, err);
+ if (retptr == NULL)
+ FLT_OT_RETURN_PTR(retptr);
+
+ flt_ot_args_to_str(args, 2, &(retptr->value));
+ if (retptr->value == NULL) {
+ FLT_OT_FREE_CLEAR(retptr);
+
+ FLT_OT_RETURN_PTR(retptr);
+ }
+
+ retptr->num_exprs = flt_ot_args_count(args) - 2;
+ LIST_INIT(&(retptr->exprs));
+
+ FLT_OT_DBG_CONF_SAMPLE("- init ", retptr);
+
+ FLT_OT_RETURN_PTR(retptr);
+}
+
+
+/***
+ * NAME
+ * flt_ot_conf_sample_free -
+ *
+ * ARGUMENTS
+ * ptr -
+ *
+ * DESCRIPTION
+ * -
+ *
+ * RETURN VALUE
+ * This function does not return a value.
+ */
+void flt_ot_conf_sample_free(struct flt_ot_conf_sample **ptr)
+{
+ FLT_OT_FUNC("%p:%p", FLT_OT_DPTR_ARGS(ptr));
+
+ if ((ptr == NULL) || (*ptr == NULL))
+ FLT_OT_RETURN();
+
+ FLT_OT_DBG_CONF_SAMPLE("- free ", *ptr);
+
+ FLT_OT_FREE((*ptr)->key);
+ FLT_OT_FREE((*ptr)->value);
+ FLT_OT_LIST_DESTROY(sample_expr, &((*ptr)->exprs));
+ FLT_OT_LIST_DEL(&((*ptr)->list));
+ FLT_OT_FREE_CLEAR(*ptr);
+
+ FLT_OT_RETURN();
+}
+
+
+/***
+ * NAME
+ * flt_ot_conf_str_init -
+ *
+ * ARGUMENTS
+ * id -
+ * linenum -
+ * head -
+ * err -
+ *
+ * DESCRIPTION
+ * -
+ *
+ * RETURN VALUE
+ * -
+ */
+struct flt_ot_conf_str *flt_ot_conf_str_init(const char *id, int linenum, struct list *head, char **err)
+{
+ struct flt_ot_conf_str *retptr;
+
+ FLT_OT_FUNC("\"%s\", %d, %p, %p:%p", id, linenum, head, FLT_OT_DPTR_ARGS(err));
+
+ retptr = flt_ot_conf_hdr_init(sizeof(*retptr), id, linenum, head, err);
+ if (retptr != NULL)
+ FLT_OT_DBG_CONF_STR("- init ", retptr);
+
+ FLT_OT_RETURN_PTR(retptr);
+}
+
+
+/***
+ * NAME
+ * flt_ot_conf_str_free -
+ *
+ * ARGUMENTS
+ * ptr -
+ *
+ * DESCRIPTION
+ * -
+ *
+ * RETURN VALUE
+ * This function does not return a value.
+ */
+void flt_ot_conf_str_free(struct flt_ot_conf_str **ptr)
+{
+ FLT_OT_FUNC("%p:%p", FLT_OT_DPTR_ARGS(ptr));
+
+ if ((ptr == NULL) || (*ptr == NULL))
+ FLT_OT_RETURN();
+
+ FLT_OT_DBG_CONF_STR("- free ", *ptr);
+
+ FLT_OT_FREE((*ptr)->str);
+ FLT_OT_LIST_DEL(&((*ptr)->list));
+ FLT_OT_FREE_CLEAR(*ptr);
+
+ FLT_OT_RETURN();
+}
+
+
+/***
+ * NAME
+ * flt_ot_conf_context_init -
+ *
+ * ARGUMENTS
+ * id -
+ * linenum -
+ * head -
+ * err -
+ *
+ * DESCRIPTION
+ * -
+ *
+ * RETURN VALUE
+ * -
+ */
+struct flt_ot_conf_context *flt_ot_conf_context_init(const char *id, int linenum, struct list *head, char **err)
+{
+ struct flt_ot_conf_context *retptr;
+
+ FLT_OT_FUNC("\"%s\", %d, %p, %p:%p", id, linenum, head, FLT_OT_DPTR_ARGS(err));
+
+ retptr = flt_ot_conf_hdr_init(sizeof(*retptr), id, linenum, head, err);
+ if (retptr != NULL)
+ FLT_OT_DBG_CONF_CONTEXT("- init ", retptr);
+
+ FLT_OT_RETURN_PTR(retptr);
+}
+
+
+/***
+ * NAME
+ * flt_ot_conf_context_free -
+ *
+ * ARGUMENTS
+ * ptr -
+ *
+ * DESCRIPTION
+ * -
+ *
+ * RETURN VALUE
+ * This function does not return a value.
+ */
+void flt_ot_conf_context_free(struct flt_ot_conf_context **ptr)
+{
+ FLT_OT_FUNC("%p:%p", FLT_OT_DPTR_ARGS(ptr));
+
+ if ((ptr == NULL) || (*ptr == NULL))
+ FLT_OT_RETURN();
+
+ FLT_OT_DBG_CONF_CONTEXT("- free ", *ptr);
+
+ FLT_OT_FREE((*ptr)->id);
+ FLT_OT_LIST_DEL(&((*ptr)->list));
+ FLT_OT_FREE_CLEAR(*ptr);
+
+ FLT_OT_RETURN();
+}
+
+
+/***
+ * NAME
+ * flt_ot_conf_span_init -
+ *
+ * ARGUMENTS
+ * id -
+ * linenum -
+ * head -
+ * err -
+ *
+ * DESCRIPTION
+ * -
+ *
+ * RETURN VALUE
+ * -
+ */
+struct flt_ot_conf_span *flt_ot_conf_span_init(const char *id, int linenum, struct list *head, char **err)
+{
+ struct flt_ot_conf_span *retptr;
+
+ FLT_OT_FUNC("\"%s\", %d, %p, %p:%p", id, linenum, head, FLT_OT_DPTR_ARGS(err));
+
+ retptr = flt_ot_conf_hdr_init(sizeof(*retptr), id, linenum, head, err);
+ if (retptr == NULL)
+ FLT_OT_RETURN_PTR(retptr);
+
+ LIST_INIT(&(retptr->tags));
+ LIST_INIT(&(retptr->logs));
+ LIST_INIT(&(retptr->baggages));
+
+ FLT_OT_DBG_CONF_SPAN("- init ", retptr);
+
+ FLT_OT_RETURN_PTR(retptr);
+}
+
+
+/***
+ * NAME
+ * flt_ot_conf_span_free -
+ *
+ * ARGUMENTS
+ * ptr -
+ *
+ * DESCRIPTION
+ * -
+ *
+ * RETURN VALUE
+ * This function does not return a value.
+ */
+void flt_ot_conf_span_free(struct flt_ot_conf_span **ptr)
+{
+ FLT_OT_FUNC("%p:%p", FLT_OT_DPTR_ARGS(ptr));
+
+ if ((ptr == NULL) || (*ptr == NULL))
+ FLT_OT_RETURN();
+
+ FLT_OT_DBG_CONF_SPAN("- free ", *ptr);
+
+ FLT_OT_FREE((*ptr)->id);
+ FLT_OT_FREE((*ptr)->ref_id);
+ FLT_OT_FREE((*ptr)->ctx_id);
+ FLT_OT_LIST_DESTROY(sample, &((*ptr)->tags));
+ FLT_OT_LIST_DESTROY(sample, &((*ptr)->logs));
+ FLT_OT_LIST_DESTROY(sample, &((*ptr)->baggages));
+ FLT_OT_LIST_DEL(&((*ptr)->list));
+ FLT_OT_FREE_CLEAR(*ptr);
+
+ FLT_OT_RETURN();
+}
+
+
+/***
+ * NAME
+ * flt_ot_conf_scope_init -
+ *
+ * ARGUMENTS
+ * id -
+ * linenum -
+ * head -
+ * err -
+ *
+ * DESCRIPTION
+ * -
+ *
+ * RETURN VALUE
+ * -
+ */
+struct flt_ot_conf_scope *flt_ot_conf_scope_init(const char *id, int linenum, struct list *head, char **err)
+{
+ struct flt_ot_conf_scope *retptr = NULL;
+
+ FLT_OT_FUNC("\"%s\", %d, %p, %p:%p", id, linenum, head, FLT_OT_DPTR_ARGS(err));
+
+ retptr = flt_ot_conf_hdr_init(sizeof(*retptr), id, linenum, head, err);
+ if (retptr == NULL)
+ FLT_OT_RETURN_PTR(retptr);
+
+ LIST_INIT(&(retptr->acls));
+ LIST_INIT(&(retptr->contexts));
+ LIST_INIT(&(retptr->spans));
+ LIST_INIT(&(retptr->finish));
+
+ FLT_OT_DBG_CONF_SCOPE("- init ", retptr);
+
+ FLT_OT_RETURN_PTR(retptr);
+}
+
+/***
+ * NAME
+ * flt_ot_conf_scope_free -
+ *
+ * ARGUMENTS
+ * ptr -
+ *
+ * DESCRIPTION
+ * -
+ *
+ * RETURN VALUE
+ * This function does not return a value.
+ */
+void flt_ot_conf_scope_free(struct flt_ot_conf_scope **ptr)
+{
+ struct acl *acl, *aclback;
+
+ FLT_OT_FUNC("%p:%p", FLT_OT_DPTR_ARGS(ptr));
+
+ if ((ptr == NULL) || (*ptr == NULL))
+ FLT_OT_RETURN();
+
+ FLT_OT_DBG_CONF_SCOPE("- free ", *ptr);
+
+ FLT_OT_FREE((*ptr)->id);
+ list_for_each_entry_safe(acl, aclback, &((*ptr)->acls), list) {
+ prune_acl(acl);
+ FLT_OT_LIST_DEL(&(acl->list));
+ FLT_OT_FREE(acl);
+ }
+ free_acl_cond((*ptr)->cond);
+ FLT_OT_LIST_DESTROY(context, &((*ptr)->contexts));
+ FLT_OT_LIST_DESTROY(span, &((*ptr)->spans));
+ FLT_OT_LIST_DESTROY(str, &((*ptr)->finish));
+ FLT_OT_LIST_DEL(&((*ptr)->list));
+ FLT_OT_FREE_CLEAR(*ptr);
+
+ FLT_OT_RETURN();
+}
+
+
+/***
+ * NAME
+ * flt_ot_conf_group_init -
+ *
+ * ARGUMENTS
+ * id -
+ * linenum -
+ * head -
+ * err -
+ *
+ * DESCRIPTION
+ * -
+ *
+ * RETURN VALUE
+ * -
+ */
+struct flt_ot_conf_group *flt_ot_conf_group_init(const char *id, int linenum, struct list *head, char **err)
+{
+ struct flt_ot_conf_group *retptr;
+
+ FLT_OT_FUNC("\"%s\", %d, %p, %p:%p", id, linenum, head, FLT_OT_DPTR_ARGS(err));
+
+ retptr = flt_ot_conf_hdr_init(sizeof(*retptr), id, linenum, head, err);
+ if (retptr == NULL)
+ FLT_OT_RETURN_PTR(retptr);
+
+ LIST_INIT(&(retptr->ph_scopes));
+
+ FLT_OT_DBG_CONF_GROUP("- init ", retptr);
+
+ FLT_OT_RETURN_PTR(retptr);
+}
+
+
+/***
+ * NAME
+ * flt_ot_conf_group_free -
+ *
+ * ARGUMENTS
+ * ptr -
+ *
+ * DESCRIPTION
+ * -
+ *
+ * RETURN VALUE
+ * This function does not return a value.
+ */
+void flt_ot_conf_group_free(struct flt_ot_conf_group **ptr)
+{
+ FLT_OT_FUNC("%p:%p", FLT_OT_DPTR_ARGS(ptr));
+
+ if ((ptr == NULL) || (*ptr == NULL))
+ FLT_OT_RETURN();
+
+ FLT_OT_DBG_CONF_GROUP("- free ", *ptr);
+
+ FLT_OT_FREE((*ptr)->id);
+ FLT_OT_LIST_DESTROY(ph_scope, &((*ptr)->ph_scopes));
+ FLT_OT_LIST_DEL(&((*ptr)->list));
+ FLT_OT_FREE_CLEAR(*ptr);
+
+ FLT_OT_RETURN();
+}
+
+
+/***
+ * NAME
+ * flt_ot_conf_tracer_init -
+ *
+ * ARGUMENTS
+ * id -
+ * linenum -
+ * err -
+ *
+ * DESCRIPTION
+ * -
+ *
+ * RETURN VALUE
+ * -
+ */
+struct flt_ot_conf_tracer *flt_ot_conf_tracer_init(const char *id, int linenum, char **err)
+{
+ struct flt_ot_conf_tracer *retptr;
+
+ FLT_OT_FUNC("\"%s\", %d, %p:%p", id, linenum, FLT_OT_DPTR_ARGS(err));
+
+ retptr = flt_ot_conf_hdr_init(sizeof(*retptr), id, linenum, NULL, err);
+ if (retptr == NULL)
+ FLT_OT_RETURN_PTR(retptr);
+
+ retptr->rate_limit = FLT_OT_FLOAT_U32(FLT_OT_RATE_LIMIT_MAX, FLT_OT_RATE_LIMIT_MAX);
+ init_new_proxy(&(retptr->proxy_log));
+ LIST_INIT(&(retptr->acls));
+ LIST_INIT(&(retptr->ph_groups));
+ LIST_INIT(&(retptr->ph_scopes));
+
+ FLT_OT_DBG_CONF_TRACER("- init ", retptr);
+
+ FLT_OT_RETURN_PTR(retptr);
+}
+
+
+/***
+ * NAME
+ * flt_ot_conf_tracer_free -
+ *
+ * ARGUMENTS
+ * ptr -
+ *
+ * DESCRIPTION
+ * -
+ *
+ * RETURN VALUE
+ * This function does not return a value.
+ */
+void flt_ot_conf_tracer_free(struct flt_ot_conf_tracer **ptr)
+{
+ struct acl *acl, *aclback;
+ struct logger *logger, *loggerback;
+
+ FLT_OT_FUNC("%p:%p", FLT_OT_DPTR_ARGS(ptr));
+
+ if ((ptr == NULL) || (*ptr == NULL))
+ FLT_OT_RETURN();
+
+ FLT_OT_DBG_CONF_TRACER("- free ", *ptr);
+
+ FLT_OT_FREE((*ptr)->id);
+ FLT_OT_FREE((*ptr)->config);
+ FLT_OT_FREE((*ptr)->cfgbuf);
+ FLT_OT_FREE((*ptr)->plugin);
+ FLT_OT_DBG(2, "- deleting acls list %s", flt_ot_list_debug(&((*ptr)->acls)));
+ list_for_each_entry_safe(acl, aclback, &((*ptr)->acls), list) {
+ prune_acl(acl);
+ FLT_OT_LIST_DEL(&(acl->list));
+ FLT_OT_FREE(acl);
+ }
+ FLT_OT_DBG(2, "- deleting proxy_log.loggers list %s", flt_ot_list_debug(&((*ptr)->proxy_log.loggers)));
+ list_for_each_entry_safe(logger, loggerback, &((*ptr)->proxy_log.loggers), list) {
+ LIST_DELETE(&(logger->list));
+ FLT_OT_FREE(logger);
+ }
+ FLT_OT_LIST_DESTROY(ph_group, &((*ptr)->ph_groups));
+ FLT_OT_LIST_DESTROY(ph_scope, &((*ptr)->ph_scopes));
+ FLT_OT_FREE_CLEAR(*ptr);
+
+ FLT_OT_RETURN();
+}
+
+
+/***
+ * NAME
+ * flt_ot_conf_init -
+ *
+ * ARGUMENTS
+ * px -
+ *
+ * DESCRIPTION
+ * -
+ *
+ * RETURN VALUE
+ * -
+ */
+struct flt_ot_conf *flt_ot_conf_init(struct proxy *px)
+{
+ struct flt_ot_conf *retptr;
+
+ FLT_OT_FUNC("%p", px);
+
+ retptr = FLT_OT_CALLOC(1, sizeof(*retptr));
+ if (retptr == NULL)
+ FLT_OT_RETURN_PTR(retptr);
+
+ retptr->proxy = px;
+ LIST_INIT(&(retptr->groups));
+ LIST_INIT(&(retptr->scopes));
+
+ FLT_OT_DBG_CONF("- init ", retptr);
+
+ FLT_OT_RETURN_PTR(retptr);
+}
+
+
+/***
+ * NAME
+ * flt_ot_conf_free -
+ *
+ * ARGUMENTS
+ * ptr -
+ *
+ * DESCRIPTION
+ * -
+ *
+ * RETURN VALUE
+ * This function does not return a value.
+ */
+void flt_ot_conf_free(struct flt_ot_conf **ptr)
+{
+ FLT_OT_FUNC("%p:%p", FLT_OT_DPTR_ARGS(ptr));
+
+ if ((ptr == NULL) || (*ptr == NULL))
+ FLT_OT_RETURN();
+
+ FLT_OT_DBG_CONF("- free ", *ptr);
+
+ FLT_OT_FREE((*ptr)->id);
+ FLT_OT_FREE((*ptr)->cfg_file);
+ flt_ot_conf_tracer_free(&((*ptr)->tracer));
+ FLT_OT_LIST_DESTROY(group, &((*ptr)->groups));
+ FLT_OT_LIST_DESTROY(scope, &((*ptr)->scopes));
+ FLT_OT_FREE_CLEAR(*ptr);
+
+ FLT_OT_RETURN();
+}
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ *
+ * vi: noexpandtab shiftwidth=8 tabstop=8
+ */
diff --git a/addons/ot/src/event.c b/addons/ot/src/event.c
new file mode 100644
index 0000000..dc29d52
--- /dev/null
+++ b/addons/ot/src/event.c
@@ -0,0 +1,338 @@
+/***
+ * Copyright 2020 HAProxy Technologies
+ *
+ * This file is part of the HAProxy OpenTracing filter.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#include "include.h"
+
+
+#define FLT_OT_EVENT_DEF(a,b,c,d,e,f) { AN_##b##_##a, SMP_OPT_DIR_##b, SMP_VAL_FE_##c, SMP_VAL_BE_##d, e, f },
+const struct flt_ot_event_data flt_ot_event_data[FLT_OT_EVENT_MAX] = { FLT_OT_EVENT_DEFINES };
+#undef FLT_OT_EVENT_DEF
+
+
+/***
+ * NAME
+ * flt_ot_scope_run_span -
+ *
+ * ARGUMENTS
+ * s -
+ * f -
+ * chn -
+ * dir -
+ * span -
+ * data -
+ * conf_span -
+ * ts -
+ * err -
+ *
+ * DESCRIPTION
+ * -
+ *
+ * RETURN VALUE
+ * Returns a negative value if an error occurs, 0 if it needs to wait,
+ * any other value otherwise.
+ */
+static int flt_ot_scope_run_span(struct stream *s, struct filter *f, struct channel *chn, uint dir, struct flt_ot_scope_span *span, struct flt_ot_scope_data *data, const struct flt_ot_conf_span *conf_span, const struct timespec *ts, char **err)
+{
+ struct flt_ot_conf *conf = FLT_OT_CONF(f);
+ int retval = FLT_OT_RET_OK;
+
+ FLT_OT_FUNC("%p, %p, %p, %u, %p, %p, %p, %p, %p:%p", s, f, chn, dir, span, data, conf_span, ts, FLT_OT_DPTR_ARGS(err));
+
+ if (span == NULL)
+ FLT_OT_RETURN_INT(retval);
+
+ if (span->span == NULL) {
+ span->span = ot_span_init(conf->tracer->tracer, span->id, ts, NULL, span->ref_type, FLT_OT_DEREF(span->ref_ctx, idx, -1), span->ref_span, data->tags, data->num_tags, err);
+ if (span->span == NULL)
+ retval = FLT_OT_RET_ERROR;
+ }
+ else if (data->num_tags > 0)
+ if (ot_span_tag(span->span, data->tags, data->num_tags) == -1)
+ retval = FLT_OT_RET_ERROR;
+
+ if ((span->span != NULL) && (data->baggage != NULL))
+ if (ot_span_set_baggage(span->span, data->baggage) == -1)
+ retval = FLT_OT_RET_ERROR;
+
+ if ((span->span != NULL) && (data->num_log_fields > 0))
+ if (ot_span_log(span->span, data->log_fields, data->num_log_fields) == -1)
+ retval = FLT_OT_RET_ERROR;
+
+ if ((span->span != NULL) && (conf_span->ctx_id != NULL)) {
+ struct otc_http_headers_writer writer;
+ struct otc_text_map *text_map = NULL;
+ struct otc_span_context *span_ctx;
+
+ span_ctx = ot_inject_http_headers(conf->tracer->tracer, span->span, &writer, err);
+ if (span_ctx != NULL) {
+ int i = 0;
+
+ if (conf_span->ctx_flags & (FLT_OT_CTX_USE_VARS | FLT_OT_CTX_USE_HEADERS)) {
+ for (text_map = &(writer.text_map); i < text_map->count; i++) {
+#ifdef USE_OT_VARS
+ if (!(conf_span->ctx_flags & FLT_OT_CTX_USE_VARS))
+ /* Do nothing. */;
+ else if (flt_ot_var_register(FLT_OT_VARS_SCOPE, conf_span->ctx_id, text_map->key[i], err) == -1)
+ retval = FLT_OT_RET_ERROR;
+ else if (flt_ot_var_set(s, FLT_OT_VARS_SCOPE, conf_span->ctx_id, text_map->key[i], text_map->value[i], dir, err) == -1)
+ retval = FLT_OT_RET_ERROR;
+#endif
+
+ if (!(conf_span->ctx_flags & FLT_OT_CTX_USE_HEADERS))
+ /* Do nothing. */;
+ else if (flt_ot_http_header_set(chn, conf_span->ctx_id, text_map->key[i], text_map->value[i], err) == -1)
+ retval = FLT_OT_RET_ERROR;
+ }
+ }
+
+ span_ctx->destroy(&span_ctx);
+ otc_text_map_destroy(&text_map, OTC_TEXT_MAP_FREE_KEY | OTC_TEXT_MAP_FREE_VALUE);
+ }
+ }
+
+ FLT_OT_RETURN_INT(retval);
+}
+
+
+/***
+ * NAME
+ * flt_ot_scope_run -
+ *
+ * ARGUMENTS
+ * s -
+ * f -
+ * chn -
+ * conf_scope -
+ * ts -
+ * dir -
+ * err -
+ *
+ * DESCRIPTION
+ * -
+ *
+ * RETURN VALUE
+ * Returns a negative value if an error occurs, 0 if it needs to wait,
+ * any other value otherwise.
+ */
+int flt_ot_scope_run(struct stream *s, struct filter *f, struct channel *chn, struct flt_ot_conf_scope *conf_scope, const struct timespec *ts, uint dir, char **err)
+{
+ struct flt_ot_conf *conf = FLT_OT_CONF(f);
+ struct flt_ot_conf_context *conf_ctx;
+ struct flt_ot_conf_span *conf_span;
+ struct flt_ot_conf_str *finish;
+ struct timespec ts_now;
+ int retval = FLT_OT_RET_OK;
+
+ FLT_OT_FUNC("%p, %p, %p, %p, %p, %u, %p:%p", s, f, chn, conf_scope, ts, dir, FLT_OT_DPTR_ARGS(err));
+
+ FLT_OT_DBG(3, "channel: %s, mode: %s (%s)", flt_ot_chn_label(chn), flt_ot_pr_mode(s), flt_ot_stream_pos(s));
+ FLT_OT_DBG(3, "run scope '%s' %d", conf_scope->id, conf_scope->event);
+ FLT_OT_DBG_CONF_SCOPE("run scope ", conf_scope);
+
+ if (ts == NULL) {
+ (void)clock_gettime(CLOCK_MONOTONIC, &ts_now);
+
+ ts = &ts_now;
+ }
+
+ if (conf_scope->cond != NULL) {
+ enum acl_test_res res;
+ int rc;
+
+ res = acl_exec_cond(conf_scope->cond, s->be, s->sess, s, dir | SMP_OPT_FINAL);
+ rc = acl_pass(res);
+ if (conf_scope->cond->pol == ACL_COND_UNLESS)
+ rc = !rc;
+
+ FLT_OT_DBG(3, "the ACL rule %s", rc ? "matches" : "does not match");
+
+ /*
+ * If the rule does not match, the current scope is skipped.
+ *
+ * If it is a root span, further processing of the session is
+ * disabled. As soon as the first span is encountered which
+ * is marked as root, further search is interrupted.
+ */
+ if (!rc) {
+ list_for_each_entry(conf_span, &(conf_scope->spans), list)
+ if (conf_span->flag_root) {
+ FLT_OT_DBG(0, "session disabled");
+
+ FLT_OT_RT_CTX(f->ctx)->flag_disabled = 1;
+
+ _HA_ATOMIC_ADD(conf->cnt.disabled + 0, 1);
+
+ break;
+ }
+
+ FLT_OT_RETURN_INT(retval);
+ }
+ }
+
+ list_for_each_entry(conf_ctx, &(conf_scope->contexts), list) {
+ struct otc_text_map *text_map = NULL;
+
+ FLT_OT_DBG(3, "run context '%s' -> '%s'", conf_scope->id, conf_ctx->id);
+ FLT_OT_DBG_CONF_CONTEXT("run context ", conf_ctx);
+
+ /*
+ * The OpenTracing context is read from the HTTP header
+ * or from HAProxy variables.
+ */
+ if (conf_ctx->flags & FLT_OT_CTX_USE_HEADERS)
+ text_map = flt_ot_http_headers_get(chn, conf_ctx->id, conf_ctx->id_len, err);
+#ifdef USE_OT_VARS
+ else
+ text_map = flt_ot_vars_get(s, FLT_OT_VARS_SCOPE, conf_ctx->id, dir, err);
+#endif
+
+ if (text_map != NULL) {
+ if (flt_ot_scope_context_init(f->ctx, conf->tracer->tracer, conf_ctx->id, conf_ctx->id_len, text_map, dir, err) == NULL)
+ retval = FLT_OT_RET_ERROR;
+
+ otc_text_map_destroy(&text_map, OTC_TEXT_MAP_FREE_KEY | OTC_TEXT_MAP_FREE_VALUE);
+ } else {
+ retval = FLT_OT_RET_ERROR;
+ }
+ }
+
+ list_for_each_entry(conf_span, &(conf_scope->spans), list) {
+ struct flt_ot_scope_data data;
+ struct flt_ot_scope_span *span;
+ struct flt_ot_conf_sample *sample;
+
+ FLT_OT_DBG(3, "run span '%s' -> '%s'", conf_scope->id, conf_span->id);
+ FLT_OT_DBG_CONF_SPAN("run span ", conf_span);
+
+ (void)memset(&data, 0, sizeof(data));
+
+ span = flt_ot_scope_span_init(f->ctx, conf_span->id, conf_span->id_len, conf_span->ref_type, conf_span->ref_id, conf_span->ref_id_len, dir, err);
+ if (span == NULL)
+ retval = FLT_OT_RET_ERROR;
+
+ list_for_each_entry(sample, &(conf_span->tags), list) {
+ FLT_OT_DBG(3, "adding tag '%s' -> '%s'", sample->key, sample->value);
+
+ if (flt_ot_sample_add(s, dir, sample, &data, FLT_OT_EVENT_SAMPLE_TAG, err) == FLT_OT_RET_ERROR)
+ retval = FLT_OT_RET_ERROR;
+ }
+
+ list_for_each_entry(sample, &(conf_span->logs), list) {
+ FLT_OT_DBG(3, "adding log '%s' -> '%s'", sample->key, sample->value);
+
+ if (flt_ot_sample_add(s, dir, sample, &data, FLT_OT_EVENT_SAMPLE_LOG, err) == FLT_OT_RET_ERROR)
+ retval = FLT_OT_RET_ERROR;
+ }
+
+ list_for_each_entry(sample, &(conf_span->baggages), list) {
+ FLT_OT_DBG(3, "adding baggage '%s' -> '%s'", sample->key, sample->value);
+
+ if (flt_ot_sample_add(s, dir, sample, &data, FLT_OT_EVENT_SAMPLE_BAGGAGE, err) == FLT_OT_RET_ERROR)
+ retval = FLT_OT_RET_ERROR;
+ }
+
+ if (retval != FLT_OT_RET_ERROR)
+ if (flt_ot_scope_run_span(s, f, chn, dir, span, &data, conf_span, ts, err) == FLT_OT_RET_ERROR)
+ retval = FLT_OT_RET_ERROR;
+
+ flt_ot_scope_data_free(&data);
+ }
+
+ list_for_each_entry(finish, &(conf_scope->finish), list)
+ if (flt_ot_scope_finish_mark(f->ctx, finish->str, finish->str_len) == -1)
+ retval = FLT_OT_RET_ERROR;
+
+ flt_ot_scope_finish_marked(f->ctx, ts);
+ flt_ot_scope_free_unused(f->ctx, chn);
+
+ FLT_OT_RETURN_INT(retval);
+}
+
+
+/***
+ * NAME
+ * flt_ot_event_run -
+ *
+ * ARGUMENTS
+ * s -
+ * f -
+ * chn -
+ * event -
+ * err -
+ *
+ * DESCRIPTION
+ * -
+ *
+ * RETURN VALUE
+ * Returns a negative value if an error occurs, 0 if it needs to wait,
+ * any other value otherwise.
+ */
+int flt_ot_event_run(struct stream *s, struct filter *f, struct channel *chn, int event, char **err)
+{
+ struct flt_ot_conf *conf = FLT_OT_CONF(f);
+ struct flt_ot_conf_scope *conf_scope;
+ struct timespec ts;
+ int retval = FLT_OT_RET_OK;
+
+ FLT_OT_FUNC("%p, %p, %p, %d, %p:%p", s, f, chn, event, FLT_OT_DPTR_ARGS(err));
+
+ FLT_OT_DBG(3, "channel: %s, mode: %s (%s)", flt_ot_chn_label(chn), flt_ot_pr_mode(s), flt_ot_stream_pos(s));
+ FLT_OT_DBG(3, "run event '%s' %d", flt_ot_event_data[event].name, event);
+
+#ifdef DEBUG_OT
+ _HA_ATOMIC_ADD(conf->cnt.event[event].htx + (htx_is_empty(htxbuf(&(chn->buf))) ? 1 : 0), 1);
+#endif
+
+ FLT_OT_RT_CTX(f->ctx)->analyzers |= flt_ot_event_data[event].an_bit;
+
+ /* All spans should be created/completed at the same time. */
+ (void)clock_gettime(CLOCK_MONOTONIC, &ts);
+
+ /*
+ * It is possible that there are defined multiple scopes that use the
+ * same event. Therefore, there must not be a 'break' here, ie an
+ * exit from the 'for' loop.
+ */
+ list_for_each_entry(conf_scope, &(conf->scopes), list) {
+ if (conf_scope->event != event)
+ /* Do nothing. */;
+ else if (!conf_scope->flag_used)
+ FLT_OT_DBG(3, "scope '%s' %d not used", conf_scope->id, conf_scope->event);
+ else if (flt_ot_scope_run(s, f, chn, conf_scope, &ts, flt_ot_event_data[event].smp_opt_dir, err) == FLT_OT_RET_ERROR)
+ retval = FLT_OT_RET_ERROR;
+ }
+
+#ifdef USE_OT_VARS
+ flt_ot_vars_dump(s);
+#endif
+ flt_ot_http_headers_dump(chn);
+
+ FLT_OT_DBG(3, "event = %d, chn = %p, s->req = %p, s->res = %p", event, chn, &(s->req), &(s->res));
+
+ FLT_OT_RETURN_INT(retval);
+}
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ *
+ * vi: noexpandtab shiftwidth=8 tabstop=8
+ */
diff --git a/addons/ot/src/filter.c b/addons/ot/src/filter.c
new file mode 100644
index 0000000..cf67fd2
--- /dev/null
+++ b/addons/ot/src/filter.c
@@ -0,0 +1,1176 @@
+/***
+ * Copyright 2020 HAProxy Technologies
+ *
+ * This file is part of the HAProxy OpenTracing filter.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#include "include.h"
+
+
+/*
+ * OpenTracing filter id, used to identify OpenTracing filters.
+ * The name of this variable is consistent with the other filter names
+ * declared in include/haproxy/filters.h .
+ */
+const char *ot_flt_id = "the OpenTracing filter";
+
+
+/***
+ * NAME
+ * flt_ot_is_disabled -
+ *
+ * ARGUMENTS
+ * f -
+ * event -
+ *
+ * DESCRIPTION
+ * -
+ *
+ * RETURN VALUE
+ * -
+ */
+bool flt_ot_is_disabled(const struct filter *f FLT_OT_DBG_ARGS(, int event))
+{
+#ifdef DEBUG_OT
+ const struct flt_ot_conf *conf = FLT_OT_CONF(f);
+ const char *msg;
+#endif
+ bool retval;
+
+ retval = FLT_OT_RT_CTX(f->ctx)->flag_disabled ? 1 : 0;
+
+#ifdef DEBUG_OT
+ msg = retval ? " (disabled)" : "";
+
+ if (FLT_OT_IN_RANGE(event, 0, FLT_OT_EVENT_MAX - 1))
+ FLT_OT_DBG(2, "filter '%s', type: %s, event: '%s' %d%s", conf->id, flt_ot_type(f), flt_ot_event_data[event].name, event, msg);
+ else
+ FLT_OT_DBG(2, "filter '%s', type: %s%s", conf->id, flt_ot_type(f), msg);
+#endif
+
+ return retval;
+}
+
+
+/***
+ * NAME
+ * flt_ot_return_int -
+ *
+ * ARGUMENTS
+ * f -
+ * err -
+ * retval -
+ *
+ * DESCRIPTION
+ * -
+ *
+ * RETURN VALUE
+ * -
+ */
+static int flt_ot_return_int(const struct filter *f, char **err, int retval)
+{
+ struct flt_ot_runtime_context *rt_ctx = f->ctx;
+
+ if ((retval == FLT_OT_RET_ERROR) || ((err != NULL) && (*err != NULL))) {
+ if (rt_ctx->flag_harderr) {
+ FLT_OT_DBG(1, "WARNING: filter hard-error (disabled)");
+
+ rt_ctx->flag_disabled = 1;
+
+ _HA_ATOMIC_ADD(FLT_OT_CONF(f)->cnt.disabled + 1, 1);
+ } else {
+ FLT_OT_DBG(1, "WARNING: filter soft-error");
+ }
+
+ retval = FLT_OT_RET_OK;
+ }
+
+ FLT_OT_ERR_FREE(*err);
+
+ return retval;
+}
+
+
+/***
+ * NAME
+ * flt_ot_return_void -
+ *
+ * ARGUMENTS
+ * f -
+ * err -
+ *
+ * DESCRIPTION
+ * -
+ *
+ * RETURN VALUE
+ * This function does not return a value.
+ */
+static void flt_ot_return_void(const struct filter *f, char **err)
+{
+ struct flt_ot_runtime_context *rt_ctx = f->ctx;
+
+ if ((err != NULL) && (*err != NULL)) {
+ if (rt_ctx->flag_harderr) {
+ FLT_OT_DBG(1, "WARNING: filter hard-error (disabled)");
+
+ rt_ctx->flag_disabled = 1;
+
+ _HA_ATOMIC_ADD(FLT_OT_CONF(f)->cnt.disabled + 1, 1);
+ } else {
+ FLT_OT_DBG(1, "WARNING: filter soft-error");
+ }
+ }
+
+ FLT_OT_ERR_FREE(*err);
+}
+
+
+/***
+ * NAME
+ * flt_ot_init - Initialize the filter.
+ *
+ * ARGUMENTS
+ * p -
+ * fconf -
+ *
+ * DESCRIPTION
+ * It initializes the filter for a proxy. You may define this callback
+ * if you need to complete your filter configuration.
+ *
+ * RETURN VALUE
+ * Returns a negative value if an error occurs, any other value otherwise.
+ */
+static int flt_ot_init(struct proxy *p, struct flt_conf *fconf)
+{
+ struct flt_ot_conf *conf = FLT_OT_DEREF(fconf, conf, NULL);
+ char *err = NULL;
+ int retval = FLT_OT_RET_ERROR;
+
+ FLT_OT_FUNC("%p, %p", p, fconf);
+
+ if (conf == NULL)
+ FLT_OT_RETURN_INT(retval);
+
+ flt_ot_cli_init();
+
+ /*
+ * Initialize the OpenTracing library.
+ */
+ retval = ot_init(&(conf->tracer->tracer), conf->tracer->plugin, &err);
+ if (retval != FLT_OT_RET_ERROR)
+ /* Do nothing. */;
+ else if (err != NULL) {
+ FLT_OT_ALERT("%s", err);
+
+ FLT_OT_ERR_FREE(err);
+ }
+
+ FLT_OT_RETURN_INT(retval);
+}
+
+
+/***
+ * NAME
+ * flt_ot_deinit - Free resources allocated by the filter.
+ *
+ * ARGUMENTS
+ * p -
+ * fconf -
+ *
+ * DESCRIPTION
+ * It cleans up what the parsing function and the init callback have done.
+ * This callback is useful to release memory allocated for the filter
+ * configuration.
+ *
+ * RETURN VALUE
+ * This function does not return a value.
+ */
+static void flt_ot_deinit(struct proxy *p, struct flt_conf *fconf)
+{
+ struct flt_ot_conf **conf = (fconf == NULL) ? NULL : (typeof(conf))&(fconf->conf);
+#ifdef DEBUG_OT
+ int i;
+#endif
+
+ FLT_OT_FUNC("%p, %p", p, fconf);
+
+ if (conf == NULL)
+ FLT_OT_RETURN();
+
+ ot_debug();
+ ot_close(&((*conf)->tracer->tracer));
+
+#ifdef DEBUG_OT
+ FLT_OT_DBG(0, "--- used events ----------");
+ for (i = 0; i < FLT_OT_TABLESIZE((*conf)->cnt.event); i++)
+ if ((*conf)->cnt.event[i].flag_used)
+ FLT_OT_DBG(0, " %02d: %" PRIu64 " / %" PRIu64, i, (*conf)->cnt.event[i].htx[0], (*conf)->cnt.event[i].htx[1]);
+#endif
+
+ flt_ot_conf_free(conf);
+
+ FLT_OT_MEMINFO();
+
+ FLT_OT_RETURN();
+}
+
+
+/***
+ * NAME
+ * flt_ot_check - Check configuration of the filter for the specified proxy.
+ *
+ * ARGUMENTS
+ * p -
+ * fconf -
+ *
+ * DESCRIPTION
+ * Optionally, by implementing the flt_ot_check() callback, you add a
+ * step to check the internal configuration of your filter after the
+ * parsing phase, when the HAProxy configuration is fully defined.
+ *
+ * RETURN VALUE
+ * Returns the number of encountered errors.
+ */
+static int flt_ot_check(struct proxy *p, struct flt_conf *fconf)
+{
+ struct proxy *px;
+ struct flt_ot_conf *conf = FLT_OT_DEREF(fconf, conf, NULL);
+ struct flt_ot_conf_group *conf_group;
+ struct flt_ot_conf_scope *conf_scope;
+ struct flt_ot_conf_ph *ph_group, *ph_scope;
+ int retval = 0, scope_unused_cnt = 0, span_root_cnt = 0;
+
+ FLT_OT_FUNC("%p, %p", p, fconf);
+
+ if (conf == NULL)
+ FLT_OT_RETURN_INT(++retval);
+
+ /*
+ * If only the proxy specified with the <p> parameter is checked, then
+ * no duplicate filters can be found that are not defined in the same
+ * configuration sections.
+ */
+ for (px = proxies_list; px != NULL; px = px->next) {
+ struct flt_conf *fconf_tmp;
+
+ FLT_OT_DBG(2, "proxy '%s'", px->id);
+
+ /*
+ * The names of all OT filters (filter ID) should be checked,
+ * they must be unique.
+ */
+ list_for_each_entry(fconf_tmp, &(px->filter_configs), list)
+ if ((fconf_tmp != fconf) && (fconf_tmp->id == ot_flt_id)) {
+ struct flt_ot_conf *conf_tmp = fconf_tmp->conf;
+
+ FLT_OT_DBG(2, " OT filter '%s'", conf_tmp->id);
+
+ if (strcmp(conf_tmp->id, conf->id) == 0) {
+ FLT_OT_ALERT("''%s' : duplicated filter ID'", conf_tmp->id);
+
+ retval++;
+ }
+ }
+ }
+
+ if (FLT_OT_DEREF(conf->tracer, id, NULL) == NULL) {
+ FLT_OT_ALERT("''%s' : no tracer found'", conf->id);
+
+ retval++;
+ }
+
+ /*
+ * Checking that all defined 'ot-group' sections have correctly declared
+ * 'ot-scope' sections (ie whether the declared 'ot-scope' sections have
+ * corresponding definitions).
+ */
+ list_for_each_entry(conf_group, &(conf->groups), list)
+ list_for_each_entry(ph_scope, &(conf_group->ph_scopes), list) {
+ bool flag_found = 0;
+
+ list_for_each_entry(conf_scope, &(conf->scopes), list)
+ if (strcmp(ph_scope->id, conf_scope->id) == 0) {
+ ph_scope->ptr = conf_scope;
+ conf_scope->flag_used = 1;
+ flag_found = 1;
+
+ break;
+ }
+
+ if (!flag_found) {
+ FLT_OT_ALERT("'" FLT_OT_PARSE_SECTION_GROUP_ID " '%s' : try to use undefined " FLT_OT_PARSE_SECTION_SCOPE_ID " '%s''", conf_group->id, ph_scope->id);
+
+ retval++;
+ }
+ }
+
+ if (conf->tracer != NULL) {
+ /*
+ * Checking that all declared 'groups' keywords have correctly
+ * defined 'ot-group' sections.
+ */
+ list_for_each_entry(ph_group, &(conf->tracer->ph_groups), list) {
+ bool flag_found = 0;
+
+ list_for_each_entry(conf_group, &(conf->groups), list)
+ if (strcmp(ph_group->id, conf_group->id) == 0) {
+ ph_group->ptr = conf_group;
+ conf_group->flag_used = 1;
+ flag_found = 1;
+
+ break;
+ }
+
+ if (!flag_found) {
+ FLT_OT_ALERT("'" FLT_OT_PARSE_SECTION_TRACER_ID " '%s' : try to use undefined " FLT_OT_PARSE_SECTION_GROUP_ID " '%s''", conf->tracer->id, ph_group->id);
+
+ retval++;
+ }
+ }
+
+ /*
+ * Checking that all declared 'scopes' keywords have correctly
+ * defined 'ot-scope' sections.
+ */
+ list_for_each_entry(ph_scope, &(conf->tracer->ph_scopes), list) {
+ bool flag_found = 0;
+
+ list_for_each_entry(conf_scope, &(conf->scopes), list)
+ if (strcmp(ph_scope->id, conf_scope->id) == 0) {
+ ph_scope->ptr = conf_scope;
+ conf_scope->flag_used = 1;
+ flag_found = 1;
+
+ break;
+ }
+
+ if (!flag_found) {
+ FLT_OT_ALERT("'" FLT_OT_PARSE_SECTION_TRACER_ID " '%s' : try to use undefined " FLT_OT_PARSE_SECTION_SCOPE_ID " '%s''", conf->tracer->id, ph_scope->id);
+
+ retval++;
+ }
+ }
+ }
+
+ FLT_OT_DBG(3, "--- filter '%s' configuration ----------", conf->id);
+ FLT_OT_DBG(3, "- defined spans ----------");
+
+ list_for_each_entry(conf_scope, &(conf->scopes), list) {
+ if (conf_scope->flag_used) {
+ struct flt_ot_conf_span *conf_span;
+
+ /*
+ * In principle, only one span should be labeled
+ * as a root span.
+ */
+ list_for_each_entry(conf_span, &(conf_scope->spans), list) {
+ FLT_OT_DBG_CONF_SPAN(" ", conf_span);
+
+ span_root_cnt += conf_span->flag_root ? 1 : 0;
+ }
+
+#ifdef DEBUG_OT
+ conf->cnt.event[conf_scope->event].flag_used = 1;
+#endif
+
+ /* Set the flags of the analyzers used. */
+ conf->tracer->analyzers |= flt_ot_event_data[conf_scope->event].an_bit;
+ } else {
+ FLT_OT_ALERT("''%s' : unused " FLT_OT_PARSE_SECTION_SCOPE_ID " '%s''", conf->id, conf_scope->id);
+
+ scope_unused_cnt++;
+ }
+ }
+
+ /*
+ * Unused scopes or a number of root spans other than one do not
+ * necessarily have to be errors, but it is good to print it when
+ * starting HAProxy.
+ */
+ if (scope_unused_cnt > 0)
+ FLT_OT_ALERT("''%s' : %d scope(s) not in use'", conf->id, scope_unused_cnt);
+
+ if (LIST_ISEMPTY(&(conf->scopes)))
+ /* Do nothing. */;
+ else if (span_root_cnt == 0)
+ FLT_OT_ALERT("''%s' : no span is marked as the root span'", conf->id);
+ else if (span_root_cnt > 1)
+ FLT_OT_ALERT("''%s' : multiple spans are marked as the root span'", conf->id);
+
+ FLT_OT_DBG_LIST(conf, group, "", "defined", _group,
+ FLT_OT_DBG_CONF_GROUP(" ", _group);
+ FLT_OT_DBG_LIST(_group, ph_scope, " ", "used", _scope, FLT_OT_DBG_CONF_PH(" ", _scope)));
+ FLT_OT_DBG_LIST(conf, scope, "", "defined", _scope, FLT_OT_DBG_CONF_SCOPE(" ", _scope));
+
+ if (conf->tracer != NULL) {
+ FLT_OT_DBG(3, " --- tracer '%s' configuration ----------", conf->tracer->id);
+ FLT_OT_DBG_LIST(conf->tracer, ph_group, " ", "used", _group, FLT_OT_DBG_CONF_PH(" ", _group));
+ FLT_OT_DBG_LIST(conf->tracer, ph_scope, " ", "used", _scope, FLT_OT_DBG_CONF_PH(" ", _scope));
+ }
+
+ FLT_OT_RETURN_INT(retval);
+}
+
+
+/***
+ * NAME
+ * flt_ot_init_per_thread -
+ *
+ * ARGUMENTS
+ * p -
+ * fconf -
+ *
+ * DESCRIPTION
+ * It initializes the filter for each thread. It works the same way than
+ * flt_ot_init() but in the context of a thread. This callback is called
+ * after the thread creation.
+ *
+ * RETURN VALUE
+ * Returns a negative value if an error occurs, any other value otherwise.
+ */
+static int flt_ot_init_per_thread(struct proxy *p, struct flt_conf *fconf)
+{
+ struct flt_ot_conf *conf = FLT_OT_DEREF(fconf, conf, NULL);
+ char *err = NULL;
+ int retval = FLT_OT_RET_ERROR;
+
+ FLT_OT_FUNC("%p, %p", p, fconf);
+
+ if (conf == NULL)
+ FLT_OT_RETURN_INT(retval);
+
+ /*
+ * Start the OpenTracing library tracer thread.
+ * Enable HTX streams filtering.
+ */
+ if (!(fconf->flags & FLT_CFG_FL_HTX)) {
+ retval = ot_start(conf->tracer->tracer, conf->tracer->cfgbuf, &err);
+ if (retval != FLT_OT_RET_ERROR)
+ fconf->flags |= FLT_CFG_FL_HTX;
+ else if (err != NULL) {
+ FLT_OT_ALERT("%s", err);
+
+ FLT_OT_ERR_FREE(err);
+ }
+ } else {
+ retval = FLT_OT_RET_OK;
+ }
+
+ FLT_OT_RETURN_INT(retval);
+}
+
+
+#ifdef DEBUG_OT
+
+/***
+ * NAME
+ * flt_ot_deinit_per_thread -
+ *
+ * ARGUMENTS
+ * p -
+ * fconf -
+ *
+ * DESCRIPTION
+ * It cleans up what the init_per_thread callback have done. It is called
+ * in the context of a thread, before exiting it.
+ *
+ * RETURN VALUE
+ * This function does not return a value.
+ */
+static void flt_ot_deinit_per_thread(struct proxy *p, struct flt_conf *fconf)
+{
+ FLT_OT_FUNC("%p, %p", p, fconf);
+
+ FLT_OT_RETURN();
+}
+
+#endif /* DEBUG_OT */
+
+
+/***
+ * NAME
+ * flt_ot_attach - Called when a filter instance is created and attach to a stream.
+ *
+ * ARGUMENTS
+ * s -
+ * f -
+ *
+ * DESCRIPTION
+ * It is called after a filter instance creation, when it is attached to a
+ * stream. This happens when the stream is started for filters defined on
+ * the stream's frontend and when the backend is set for filters declared
+ * on the stream's backend. It is possible to ignore the filter, if needed,
+ * by returning 0. This could be useful to have conditional filtering.
+ *
+ * RETURN VALUE
+ * Returns a negative value if an error occurs, 0 to ignore the filter,
+ * any other value otherwise.
+ */
+static int flt_ot_attach(struct stream *s, struct filter *f)
+{
+ const struct flt_ot_conf *conf = FLT_OT_CONF(f);
+ char *err = NULL;
+
+ FLT_OT_FUNC("%p, %p", s, f);
+
+ if (conf->tracer->flag_disabled) {
+ FLT_OT_DBG(2, "filter '%s', type: %s (disabled)", conf->id, flt_ot_type(f));
+
+ FLT_OT_RETURN_INT(FLT_OT_RET_IGNORE);
+ }
+ else if (conf->tracer->rate_limit < FLT_OT_FLOAT_U32(FLT_OT_RATE_LIMIT_MAX, FLT_OT_RATE_LIMIT_MAX)) {
+ uint32_t rnd = ha_random32();
+
+ if (conf->tracer->rate_limit <= rnd) {
+ FLT_OT_DBG(2, "filter '%s', type: %s (ignored: %u <= %u)", conf->id, flt_ot_type(f), conf->tracer->rate_limit, rnd);
+
+ FLT_OT_RETURN_INT(FLT_OT_RET_IGNORE);
+ }
+ }
+
+ FLT_OT_DBG(2, "filter '%s', type: %s (run)", conf->id, flt_ot_type(f));
+
+ f->ctx = flt_ot_runtime_context_init(s, f, &err);
+ FLT_OT_ERR_FREE(err);
+ if (f->ctx == NULL) {
+ FLT_OT_LOG(LOG_EMERG, "failed to create context");
+
+ FLT_OT_RETURN_INT(FLT_OT_RET_IGNORE);
+ }
+
+ /*
+ * AN_REQ_WAIT_HTTP and AN_RES_WAIT_HTTP analyzers can only be used
+ * in the .channel_post_analyze callback function.
+ */
+ f->pre_analyzers |= conf->tracer->analyzers & ((AN_REQ_ALL & ~AN_REQ_WAIT_HTTP & ~AN_REQ_HTTP_TARPIT) | (AN_RES_ALL & ~AN_RES_WAIT_HTTP));
+ f->post_analyzers |= conf->tracer->analyzers & (AN_REQ_WAIT_HTTP | AN_RES_WAIT_HTTP);
+
+ FLT_OT_LOG(LOG_INFO, "%08x %08x", f->pre_analyzers, f->post_analyzers);
+
+#ifdef USE_OT_VARS
+ flt_ot_vars_dump(s);
+#endif
+ flt_ot_http_headers_dump(&(s->req));
+
+ FLT_OT_RETURN_INT(FLT_OT_RET_OK);
+}
+
+
+#ifdef DEBUG_OT
+
+/***
+ * NAME
+ * flt_ot_stream_start - Called when a stream is created.
+ *
+ * ARGUMENTS
+ * s -
+ * f -
+ *
+ * DESCRIPTION
+ * It is called when a stream is started. This callback can fail by
+ * returning a negative value. It will be considered as a critical error
+ * by HAProxy which disabled the listener for a short time.
+ *
+ * RETURN VALUE
+ * Returns a negative value if an error occurs, any other value otherwise.
+ */
+static int flt_ot_stream_start(struct stream *s, struct filter *f)
+{
+ char *err = NULL;
+ int retval = FLT_OT_RET_OK;
+
+ FLT_OT_FUNC("%p, %p", s, f);
+
+ if (flt_ot_is_disabled(f FLT_OT_DBG_ARGS(, -1)))
+ FLT_OT_RETURN_INT(retval);
+
+ FLT_OT_RETURN_INT(flt_ot_return_int(f, &err, retval));
+}
+
+
+/***
+ * NAME
+ * flt_ot_stream_set_backend - Called when a backend is set for a stream.
+ *
+ * ARGUMENTS
+ * s -
+ * f -
+ * be -
+ *
+ * DESCRIPTION
+ * It is called when a backend is set for a stream. This callbacks will be
+ * called for all filters attached to a stream (frontend and backend). Note
+ * this callback is not called if the frontend and the backend are the same.
+ *
+ * RETURN VALUE
+ * Returns a negative value if an error occurs, any other value otherwise.
+ */
+static int flt_ot_stream_set_backend(struct stream *s, struct filter *f, struct proxy *be)
+{
+ char *err = NULL;
+ int retval = FLT_OT_RET_OK;
+
+ FLT_OT_FUNC("%p, %p, %p", s, f, be);
+
+ if (flt_ot_is_disabled(f FLT_OT_DBG_ARGS(, -1)))
+ FLT_OT_RETURN_INT(retval);
+
+ FLT_OT_DBG(3, "backend: %s", be->id);
+
+ FLT_OT_RETURN_INT(flt_ot_return_int(f, &err, retval));
+}
+
+
+/***
+ * NAME
+ * flt_ot_stream_stop - Called when a stream is destroyed.
+ *
+ * ARGUMENTS
+ * s -
+ * f -
+ *
+ * DESCRIPTION
+ * It is called when a stream is stopped. This callback always succeed.
+ * Anyway, it is too late to return an error.
+ *
+ * RETURN VALUE
+ * This function does not return a value.
+ */
+static void flt_ot_stream_stop(struct stream *s, struct filter *f)
+{
+ char *err = NULL;
+
+ FLT_OT_FUNC("%p, %p", s, f);
+
+ if (flt_ot_is_disabled(f FLT_OT_DBG_ARGS(, -1)))
+ FLT_OT_RETURN();
+
+ flt_ot_return_void(f, &err);
+
+ FLT_OT_RETURN();
+}
+
+#endif /* DEBUG_OT */
+
+
+/***
+ * NAME
+ * flt_ot_detach - Called when a filter instance is detach from a stream, just before its destruction.
+ *
+ * ARGUMENTS
+ * s -
+ * f -
+ *
+ * DESCRIPTION
+ * It is called when a filter instance is detached from a stream, before its
+ * destruction. This happens when the stream is stopped for filters defined
+ * on the stream's frontend and when the analyze ends for filters defined on
+ * the stream's backend.
+ *
+ * RETURN VALUE
+ * This function does not return a value.
+ */
+static void flt_ot_detach(struct stream *s, struct filter *f)
+{
+ FLT_OT_FUNC("%p, %p", s, f);
+
+ FLT_OT_DBG(2, "filter '%s', type: %s", FLT_OT_CONF(f)->id, flt_ot_type(f));
+
+ flt_ot_runtime_context_free(f);
+
+ FLT_OT_RETURN();
+}
+
+
+/***
+ * NAME
+ * flt_ot_check_timeouts - Called when a stream is woken up because of an expired timer.
+ *
+ * ARGUMENTS
+ * s -
+ * f -
+ *
+ * DESCRIPTION
+ * -
+ *
+ * RETURN VALUE
+ * This function does not return a value.
+ */
+static void flt_ot_check_timeouts(struct stream *s, struct filter *f)
+{
+ char *err = NULL;
+
+ FLT_OT_FUNC("%p, %p", s, f);
+
+ if (flt_ot_is_disabled(f FLT_OT_DBG_ARGS(, -1)))
+ FLT_OT_RETURN();
+
+ s->pending_events |= TASK_WOKEN_MSG;
+
+ flt_ot_return_void(f, &err);
+
+ FLT_OT_RETURN();
+}
+
+
+/***
+ * NAME
+ * flt_ot_channel_start_analyze - Called when analyze starts for a given channel.
+ *
+ * ARGUMENTS
+ * s -
+ * f -
+ * chn -
+ *
+ * DESCRIPTION
+ * -
+ *
+ * RETURN VALUE
+ * Returns a negative value if an error occurs, 0 if it needs to wait,
+ * any other value otherwise.
+ */
+static int flt_ot_channel_start_analyze(struct stream *s, struct filter *f, struct channel *chn)
+{
+ char *err = NULL;
+ int retval;
+
+ FLT_OT_FUNC("%p, %p, %p", s, f, chn);
+
+ if (flt_ot_is_disabled(f FLT_OT_DBG_ARGS(, (chn->flags & CF_ISRESP) ? FLT_OT_EVENT_RES_SERVER_SESS_START : FLT_OT_EVENT_REQ_CLIENT_SESS_START)))
+ FLT_OT_RETURN_INT(FLT_OT_RET_OK);
+
+ FLT_OT_DBG(3, "channel: %s, mode: %s (%s)", flt_ot_chn_label(chn), flt_ot_pr_mode(s), flt_ot_stream_pos(s));
+
+ if (chn->flags & CF_ISRESP) {
+ /* The response channel. */
+ chn->analysers |= f->pre_analyzers & AN_RES_ALL;
+
+ /* The event 'on-server-session-start'. */
+ retval = flt_ot_event_run(s, f, chn, FLT_OT_EVENT_RES_SERVER_SESS_START, &err);
+ if (retval == FLT_OT_RET_WAIT) {
+ channel_dont_read(chn);
+ channel_dont_close(chn);
+ }
+ } else {
+ /* The request channel. */
+ chn->analysers |= f->pre_analyzers & AN_REQ_ALL;
+
+ /* The event 'on-client-session-start'. */
+ retval = flt_ot_event_run(s, f, chn, FLT_OT_EVENT_REQ_CLIENT_SESS_START, &err);
+ }
+
+// register_data_filter(s, chn, f);
+
+ FLT_OT_RETURN_INT(flt_ot_return_int(f, &err, retval));
+}
+
+
+/***
+ * NAME
+ * flt_ot_channel_pre_analyze - Called before a processing happens on a given channel.
+ *
+ * ARGUMENTS
+ * s -
+ * f -
+ * chn - the channel on which the analyzing is done
+ * an_bit - the analyzer id
+ *
+ * DESCRIPTION
+ * -
+ *
+ * RETURN VALUE
+ * Returns a negative value if an error occurs, 0 if it needs to wait,
+ * any other value otherwise.
+ */
+static int flt_ot_channel_pre_analyze(struct stream *s, struct filter *f, struct channel *chn, uint an_bit)
+{
+ char *err = NULL;
+ int i, event = -1, retval;
+
+ FLT_OT_FUNC("%p, %p, %p, 0x%08x", s, f, chn, an_bit);
+
+ for (i = 0; i < FLT_OT_TABLESIZE(flt_ot_event_data); i++)
+ if (flt_ot_event_data[i].an_bit == an_bit) {
+ event = i;
+
+ break;
+ }
+
+ if (flt_ot_is_disabled(f FLT_OT_DBG_ARGS(, event)))
+ FLT_OT_RETURN_INT(FLT_OT_RET_OK);
+
+ FLT_OT_DBG(3, "channel: %s, mode: %s (%s), analyzer: %s", flt_ot_chn_label(chn), flt_ot_pr_mode(s), flt_ot_stream_pos(s), flt_ot_analyzer(an_bit));
+
+ retval = flt_ot_event_run(s, f, chn, event, &err);
+
+ if ((retval == FLT_OT_RET_WAIT) && (chn->flags & CF_ISRESP)) {
+ channel_dont_read(chn);
+ channel_dont_close(chn);
+ }
+
+ FLT_OT_RETURN_INT(flt_ot_return_int(f, &err, retval));
+}
+
+
+/***
+ * NAME
+ * flt_ot_channel_post_analyze - Called after a processing happens on a given channel.
+ *
+ * ARGUMENTS
+ * s -
+ * f -
+ * chn -
+ * an_bit -
+ *
+ * DESCRIPTION
+ * This function, for its part, is not resumable. It is called when a
+ * filterable analyzer finishes its processing. So it called once for
+ * the same analyzer.
+ *
+ * RETURN VALUE
+ * Returns a negative value if an error occurs, 0 if it needs to wait,
+ * any other value otherwise.
+ */
+static int flt_ot_channel_post_analyze(struct stream *s, struct filter *f, struct channel *chn, uint an_bit)
+{
+ char *err = NULL;
+ int i, event = -1, retval;
+
+ FLT_OT_FUNC("%p, %p, %p, 0x%08x", s, f, chn, an_bit);
+
+ for (i = 0; i < FLT_OT_TABLESIZE(flt_ot_event_data); i++)
+ if (flt_ot_event_data[i].an_bit == an_bit) {
+ event = i;
+
+ break;
+ }
+
+ if (flt_ot_is_disabled(f FLT_OT_DBG_ARGS(, event)))
+ FLT_OT_RETURN_INT(FLT_OT_RET_OK);
+
+ FLT_OT_DBG(3, "channel: %s, mode: %s (%s), analyzer: %s", flt_ot_chn_label(chn), flt_ot_pr_mode(s), flt_ot_stream_pos(s), flt_ot_analyzer(an_bit));
+
+ retval = flt_ot_event_run(s, f, chn, event, &err);
+
+ FLT_OT_RETURN_INT(flt_ot_return_int(f, &err, retval));
+}
+
+
+/***
+ * NAME
+ * flt_ot_channel_end_analyze - Called when analyze ends for a given channel.
+ *
+ * ARGUMENTS
+ * s -
+ * f -
+ * chn -
+ *
+ * DESCRIPTION
+ * -
+ *
+ * RETURN VALUE
+ * Returns a negative value if an error occurs, 0 if it needs to wait,
+ * any other value otherwise.
+ */
+static int flt_ot_channel_end_analyze(struct stream *s, struct filter *f, struct channel *chn)
+{
+ char *err = NULL;
+ int rc, retval;
+
+ FLT_OT_FUNC("%p, %p, %p", s, f, chn);
+
+ if (flt_ot_is_disabled(f FLT_OT_DBG_ARGS(, (chn->flags & CF_ISRESP) ? FLT_OT_EVENT_RES_SERVER_SESS_END : FLT_OT_EVENT_REQ_CLIENT_SESS_END)))
+ FLT_OT_RETURN_INT(FLT_OT_RET_OK);
+
+ FLT_OT_DBG(3, "channel: %s, mode: %s (%s)", flt_ot_chn_label(chn), flt_ot_pr_mode(s), flt_ot_stream_pos(s));
+
+ if (chn->flags & CF_ISRESP) {
+ /* The response channel, event 'on-server-session-end'. */
+ retval = flt_ot_event_run(s, f, chn, FLT_OT_EVENT_RES_SERVER_SESS_END, &err);
+ } else {
+ /* The request channel, event 'on-client-session-end'. */
+ retval = flt_ot_event_run(s, f, chn, FLT_OT_EVENT_REQ_CLIENT_SESS_END, &err);
+
+ /*
+ * In case an event using server response is defined and not
+ * executed, event 'on-server-unavailable' is called here.
+ */
+ if ((FLT_OT_CONF(f)->tracer->analyzers & AN_RES_ALL) && !(FLT_OT_RT_CTX(f->ctx)->analyzers & AN_RES_ALL)) {
+ rc = flt_ot_event_run(s, f, chn, FLT_OT_EVENT_REQ_SERVER_UNAVAILABLE, &err);
+ if ((retval == FLT_OT_RET_OK) && (rc != FLT_OT_RET_OK))
+ retval = rc;
+ }
+ }
+
+ FLT_OT_RETURN_INT(flt_ot_return_int(f, &err, retval));
+}
+
+
+#ifdef DEBUG_OT
+
+/***
+ * NAME
+ * flt_ot_http_headers -
+ *
+ * ARGUMENTS
+ * s -
+ * f -
+ * msg -
+ *
+ * DESCRIPTION
+ * -
+ *
+ * RETURN VALUE
+ * Returns a negative value if an error occurs, 0 if it needs to wait,
+ * any other value otherwise.
+ */
+static int flt_ot_http_headers(struct stream *s, struct filter *f, struct http_msg *msg)
+{
+ char *err = NULL;
+ struct htx *htx = htxbuf(&(msg->chn->buf));
+ struct htx_sl *sl = http_get_stline(htx);
+ int retval = FLT_OT_RET_OK;
+
+ FLT_OT_FUNC("%p, %p, %p", s, f, msg);
+
+ if (flt_ot_is_disabled(f FLT_OT_DBG_ARGS(, -1)))
+ FLT_OT_RETURN_INT(retval);
+
+ FLT_OT_DBG(3, "channel: %s, mode: %s (%s), %.*s %.*s %.*s", flt_ot_chn_label(msg->chn), flt_ot_pr_mode(s), flt_ot_stream_pos(s), HTX_SL_P1_LEN(sl), HTX_SL_P1_PTR(sl), HTX_SL_P2_LEN(sl), HTX_SL_P2_PTR(sl), HTX_SL_P3_LEN(sl), HTX_SL_P3_PTR(sl));
+
+ FLT_OT_RETURN_INT(flt_ot_return_int(f, &err, retval));
+}
+
+
+/***
+ * NAME
+ * flt_ot_http_payload -
+ *
+ * ARGUMENTS
+ * s -
+ * f -
+ * msg -
+ * offset -
+ * len -
+ *
+ * DESCRIPTION
+ * -
+ *
+ * RETURN VALUE
+ * Returns a negative value if an error occurs, any other value otherwise.
+ */
+static int flt_ot_http_payload(struct stream *s, struct filter *f, struct http_msg *msg, uint offset, uint len)
+{
+ char *err = NULL;
+ int retval = len;
+
+ FLT_OT_FUNC("%p, %p, %p, %u, %u", s, f, msg, offset, len);
+
+ if (flt_ot_is_disabled(f FLT_OT_DBG_ARGS(, -1)))
+ FLT_OT_RETURN_INT(len);
+
+ FLT_OT_DBG(3, "channel: %s, mode: %s (%s), offset: %u, len: %u, forward: %d", flt_ot_chn_label(msg->chn), flt_ot_pr_mode(s), flt_ot_stream_pos(s), offset, len, retval);
+
+ if (retval != len)
+ task_wakeup(s->task, TASK_WOKEN_MSG);
+
+ FLT_OT_RETURN_INT(flt_ot_return_int(f, &err, retval));
+}
+
+
+/***
+ * NAME
+ * flt_ot_http_end -
+ *
+ * ARGUMENTS
+ * s -
+ * f -
+ * msg -
+ *
+ * DESCRIPTION
+ * -
+ *
+ * RETURN VALUE
+ * Returns a negative value if an error occurs, 0 if it needs to wait,
+ * any other value otherwise.
+ */
+static int flt_ot_http_end(struct stream *s, struct filter *f, struct http_msg *msg)
+{
+ char *err = NULL;
+ int retval = FLT_OT_RET_OK;
+
+ FLT_OT_FUNC("%p, %p, %p", s, f, msg);
+
+ if (flt_ot_is_disabled(f FLT_OT_DBG_ARGS(, -1)))
+ FLT_OT_RETURN_INT(retval);
+
+ FLT_OT_DBG(3, "channel: %s, mode: %s (%s)", flt_ot_chn_label(msg->chn), flt_ot_pr_mode(s), flt_ot_stream_pos(s));
+
+ FLT_OT_RETURN_INT(flt_ot_return_int(f, &err, retval));
+}
+
+
+/***
+ * NAME
+ * flt_ot_http_reset -
+ *
+ * ARGUMENTS
+ * s -
+ * f -
+ * msg -
+ *
+ * DESCRIPTION
+ * -
+ *
+ * RETURN VALUE
+ * This function does not return a value.
+ */
+static void flt_ot_http_reset(struct stream *s, struct filter *f, struct http_msg *msg)
+{
+ char *err = NULL;
+
+ FLT_OT_FUNC("%p, %p, %p", s, f, msg);
+
+ if (flt_ot_is_disabled(f FLT_OT_DBG_ARGS(, -1)))
+ FLT_OT_RETURN();
+
+ FLT_OT_DBG(3, "channel: %s, mode: %s (%s)", flt_ot_chn_label(msg->chn), flt_ot_pr_mode(s), flt_ot_stream_pos(s));
+
+ flt_ot_return_void(f, &err);
+
+ FLT_OT_RETURN();
+}
+
+
+/***
+ * NAME
+ * flt_ot_http_reply -
+ *
+ * ARGUMENTS
+ * s -
+ * f -
+ * status -
+ * msg -
+ *
+ * DESCRIPTION
+ * -
+ *
+ * RETURN VALUE
+ * This function does not return a value.
+ */
+static void flt_ot_http_reply(struct stream *s, struct filter *f, short status, const struct buffer *msg)
+{
+ char *err = NULL;
+
+ FLT_OT_FUNC("%p, %p, %hd, %p", s, f, status, msg);
+
+ if (flt_ot_is_disabled(f FLT_OT_DBG_ARGS(, -1)))
+ FLT_OT_RETURN();
+
+ FLT_OT_DBG(3, "channel: -, mode: %s (%s)", flt_ot_pr_mode(s), flt_ot_stream_pos(s));
+
+ flt_ot_return_void(f, &err);
+
+ FLT_OT_RETURN();
+}
+
+
+/***
+ * NAME
+ * flt_ot_tcp_payload -
+ *
+ * ARGUMENTS
+ * s -
+ * f -
+ * chn -
+ * offset -
+ * len -
+ *
+ * DESCRIPTION
+ * -
+ *
+ * RETURN VALUE
+ * Returns a negative value if an error occurs, any other value otherwise.
+ */
+static int flt_ot_tcp_payload(struct stream *s, struct filter *f, struct channel *chn, uint offset, uint len)
+{
+ char *err = NULL;
+ int retval = len;
+
+ FLT_OT_FUNC("%p, %p, %p, %u, %u", s, f, chn, offset, len);
+
+ if (flt_ot_is_disabled(f FLT_OT_DBG_ARGS(, -1)))
+ FLT_OT_RETURN_INT(len);
+
+ FLT_OT_DBG(3, "channel: %s, mode: %s (%s), offset: %u, len: %u, forward: %d", flt_ot_chn_label(chn), flt_ot_pr_mode(s), flt_ot_stream_pos(s), offset, len, retval);
+
+ if (s->flags & SF_HTX) {
+ } else {
+ }
+
+ if (retval != len)
+ task_wakeup(s->task, TASK_WOKEN_MSG);
+
+ FLT_OT_RETURN_INT(flt_ot_return_int(f, &err, retval));
+}
+
+#endif /* DEBUG_OT */
+
+
+struct flt_ops flt_ot_ops = {
+ /* Callbacks to manage the filter lifecycle. */
+ .init = flt_ot_init,
+ .deinit = flt_ot_deinit,
+ .check = flt_ot_check,
+ .init_per_thread = flt_ot_init_per_thread,
+ .deinit_per_thread = FLT_OT_DBG_IFDEF(flt_ot_deinit_per_thread, NULL),
+
+ /* Stream callbacks. */
+ .attach = flt_ot_attach,
+ .stream_start = FLT_OT_DBG_IFDEF(flt_ot_stream_start, NULL),
+ .stream_set_backend = FLT_OT_DBG_IFDEF(flt_ot_stream_set_backend, NULL),
+ .stream_stop = FLT_OT_DBG_IFDEF(flt_ot_stream_stop, NULL),
+ .detach = flt_ot_detach,
+ .check_timeouts = flt_ot_check_timeouts,
+
+ /* Channel callbacks. */
+ .channel_start_analyze = flt_ot_channel_start_analyze,
+ .channel_pre_analyze = flt_ot_channel_pre_analyze,
+ .channel_post_analyze = flt_ot_channel_post_analyze,
+ .channel_end_analyze = flt_ot_channel_end_analyze,
+
+ /* HTTP callbacks. */
+ .http_headers = FLT_OT_DBG_IFDEF(flt_ot_http_headers, NULL),
+ .http_payload = FLT_OT_DBG_IFDEF(flt_ot_http_payload, NULL),
+ .http_end = FLT_OT_DBG_IFDEF(flt_ot_http_end, NULL),
+ .http_reset = FLT_OT_DBG_IFDEF(flt_ot_http_reset, NULL),
+ .http_reply = FLT_OT_DBG_IFDEF(flt_ot_http_reply, NULL),
+
+ /* TCP callbacks. */
+ .tcp_payload = FLT_OT_DBG_IFDEF(flt_ot_tcp_payload, NULL)
+};
+
+
+REGISTER_BUILD_OPTS("Built with OpenTracing support.");
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ *
+ * vi: noexpandtab shiftwidth=8 tabstop=8
+ */
diff --git a/addons/ot/src/group.c b/addons/ot/src/group.c
new file mode 100644
index 0000000..52b872d
--- /dev/null
+++ b/addons/ot/src/group.c
@@ -0,0 +1,354 @@
+/***
+ * Copyright 2020 HAProxy Technologies
+ *
+ * This file is part of the HAProxy OpenTracing filter.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#include "include.h"
+
+
+#define FLT_OT_GROUP_DEF(a,b,c) { a, b, c },
+const struct flt_ot_group_data flt_ot_group_data[] = { FLT_OT_GROUP_DEFINES };
+#undef FLT_OT_GROUP_DEF
+
+
+/***
+ * NAME
+ * flt_ot_group_action -
+ *
+ * ARGUMENTS
+ * rule -
+ * px -
+ * sess -
+ * s -
+ * opts -
+ *
+ * DESCRIPTION
+ * This is the action_ptr callback of a rule associated to the
+ * FLT_OT_ACTION_GROUP action.
+ *
+ * RETURN VALUE
+ * The function returns ACT_RET_CONT if processing is finished (with error or
+ * not), otherwise, it returns ACT_RET_YIELD if the action is in progress.
+ */
+static enum act_return flt_ot_group_action(struct act_rule *rule, struct proxy *px, struct session *sess, struct stream *s, int opts)
+{
+ const struct filter *filter;
+ const struct flt_conf *fconf;
+ const struct flt_ot_conf *conf;
+ const struct flt_ot_conf_group *conf_group;
+ const struct flt_ot_runtime_context *rt_ctx = NULL;
+ const struct flt_ot_conf_ph *ph_scope;
+ char *err = NULL;
+ int i, rc;
+
+ FLT_OT_FUNC("%p, %p, %p, %p, %d", rule, px, sess, s, opts);
+
+ FLT_OT_DBG(3, "from: %d, arg.act %p:{ %p %p %p %p }", rule->from, &(rule->arg.act), rule->arg.act.p[0], rule->arg.act.p[1], rule->arg.act.p[2], rule->arg.act.p[3]);
+
+ fconf = rule->arg.act.p[FLT_OT_ARG_FLT_CONF];
+ conf = rule->arg.act.p[FLT_OT_ARG_CONF];
+ conf_group = ((const struct flt_ot_conf_ph *)(rule->arg.act.p[FLT_OT_ARG_GROUP]))->ptr;
+
+ if ((fconf == NULL) || (conf == NULL) || (conf_group == NULL)) {
+ FLT_OT_LOG(LOG_ERR, FLT_OT_ACTION_GROUP ": internal error, invalid group action");
+
+ FLT_OT_RETURN_EX(ACT_RET_CONT, enum act_return, "%d");
+ }
+
+ if (conf->tracer->flag_disabled) {
+ FLT_OT_DBG(1, "filter '%s' disabled, group action '%s' ignored", conf->id, conf_group->id);
+
+ FLT_OT_RETURN_EX(ACT_RET_CONT, enum act_return, "%d");
+ }
+
+ /* Find the OpenTracing filter instance from the current stream. */
+ list_for_each_entry(filter, &(s->strm_flt.filters), list)
+ if (filter->config == fconf) {
+ rt_ctx = filter->ctx;
+
+ break;
+ }
+
+ if (rt_ctx == NULL) {
+ FLT_OT_DBG(1, "cannot find filter, probably not attached to the stream");
+
+ FLT_OT_RETURN_EX(ACT_RET_CONT, enum act_return, "%d");
+ }
+ else if (flt_ot_is_disabled(filter FLT_OT_DBG_ARGS(, -1))) {
+ FLT_OT_RETURN_EX(ACT_RET_CONT, enum act_return, "%d");
+ }
+ else {
+ FLT_OT_DBG(3, "run group '%s'", conf_group->id);
+ FLT_OT_DBG_CONF_GROUP("run group ", conf_group);
+ }
+
+ /*
+ * Check the value of rule->from; in case it is incorrect,
+ * report an error.
+ */
+ for (i = 0; i < FLT_OT_TABLESIZE(flt_ot_group_data); i++)
+ if (flt_ot_group_data[i].act_from == rule->from)
+ break;
+
+ if (i >= FLT_OT_TABLESIZE(flt_ot_group_data)) {
+ FLT_OT_LOG(LOG_ERR, FLT_OT_ACTION_GROUP ": internal error, invalid rule->from=%d", rule->from);
+
+ FLT_OT_RETURN_EX(ACT_RET_CONT, enum act_return, "%d");
+ }
+
+ list_for_each_entry(ph_scope, &(conf_group->ph_scopes), list) {
+ rc = flt_ot_scope_run(s, rt_ctx->filter, &(s->res), ph_scope->ptr, NULL, SMP_OPT_DIR_RES, &err);
+ if ((rc == FLT_OT_RET_ERROR) && (opts & ACT_OPT_FINAL)) {
+ /* XXX */
+ }
+ }
+
+ FLT_OT_RETURN_EX(ACT_RET_CONT, enum act_return, "%d");
+}
+
+
+/***
+ * NAME
+ * flt_ot_group_check -
+ *
+ * ARGUMENTS
+ * rule -
+ * px -
+ * err -
+ *
+ * DESCRIPTION
+ * This is the check_ptr callback of a rule associated to the
+ * FLT_OT_ACTION_GROUP action.
+ *
+ * RETURN VALUE
+ * The function returns 1 in success case, otherwise,
+ * it returns 0 and err is filled.
+ */
+static int flt_ot_group_check(struct act_rule *rule, struct proxy *px, char **err)
+{
+ struct flt_conf *fconf_tmp, *fconf = NULL;
+ struct flt_ot_conf *conf;
+ struct flt_ot_conf_ph *ph_group;
+ const char *filter_id;
+ const char *group_id;
+ bool flag_found = 0;
+ int i;
+
+ FLT_OT_FUNC("%p, %p, %p:%p", rule, px, FLT_OT_DPTR_ARGS(err));
+
+ filter_id = rule->arg.act.p[FLT_OT_ARG_FILTER_ID];
+ group_id = rule->arg.act.p[FLT_OT_ARG_GROUP_ID];
+
+ FLT_OT_DBG(2, "checking filter_id='%s', group_id='%s'", filter_id, group_id);
+
+ /*
+ * Check the value of rule->from; in case it is incorrect,
+ * report an error.
+ */
+ for (i = 0; i < FLT_OT_TABLESIZE(flt_ot_group_data); i++)
+ if (flt_ot_group_data[i].act_from == rule->from)
+ break;
+
+ if (i >= FLT_OT_TABLESIZE(flt_ot_group_data)) {
+ FLT_OT_ERR("internal error, unexpected rule->from=%d, please report this bug!", rule->from);
+
+ FLT_OT_RETURN_INT(0);
+ }
+
+ /*
+ * Try to find the OpenTracing filter by checking all filters
+ * for the proxy <px>.
+ */
+ list_for_each_entry(fconf_tmp, &(px->filter_configs), list) {
+ conf = fconf_tmp->conf;
+
+ if (fconf_tmp->id != ot_flt_id) {
+ /* This is not an OpenTracing filter. */
+ continue;
+ }
+ else if (strcmp(conf->id, filter_id) == 0) {
+ /* This is the good filter ID. */
+ fconf = fconf_tmp;
+
+ break;
+ }
+ }
+
+ if (fconf == NULL) {
+ FLT_OT_ERR("unable to find the OpenTracing filter '%s' used by the " FLT_OT_ACTION_GROUP " '%s'", filter_id, group_id);
+
+ FLT_OT_RETURN_INT(0);
+ }
+
+ /*
+ * Attempt to find if the group is defined in the OpenTracing filter
+ * configuration.
+ */
+ list_for_each_entry(ph_group, &(conf->tracer->ph_groups), list)
+ if (strcmp(ph_group->id, group_id) == 0) {
+ flag_found = 1;
+
+ break;
+ }
+
+ if (!flag_found) {
+ FLT_OT_ERR("unable to find group '%s' in the OpenTracing filter '%s' configuration", group_id, filter_id);
+
+ FLT_OT_RETURN_INT(0);
+ }
+
+ FLT_OT_FREE_CLEAR(rule->arg.act.p[FLT_OT_ARG_FILTER_ID]);
+ FLT_OT_FREE_CLEAR(rule->arg.act.p[FLT_OT_ARG_GROUP_ID]);
+
+ rule->arg.act.p[FLT_OT_ARG_FLT_CONF] = fconf;
+ rule->arg.act.p[FLT_OT_ARG_CONF] = conf;
+ rule->arg.act.p[FLT_OT_ARG_GROUP] = ph_group;
+
+ FLT_OT_RETURN_INT(1);
+}
+
+
+/***
+ * NAME
+ * flt_ot_group_release -
+ *
+ * ARGUMENTS
+ * rule -
+ *
+ * DESCRIPTION
+ * This is the release_ptr callback of a rule associated to the
+ * FLT_OT_ACTION_GROUP action.
+ *
+ * RETURN VALUE
+ * This function does not return a value.
+ */
+static void flt_ot_group_release(struct act_rule *rule)
+{
+ FLT_OT_FUNC("%p", rule);
+
+ FLT_OT_RETURN();
+}
+
+
+/***
+ * NAME
+ * flt_ot_group_parse -
+ *
+ * ARGUMENTS
+ * args -
+ * cur_arg -
+ * px -
+ * rule -
+ * err -
+ *
+ * DESCRIPTION
+ * -
+ *
+ * RETURN VALUE
+ * Returns ACT_RET_PRS_ERR if an error occurs, ACT_RET_PRS_OK otherwise.
+ */
+static enum act_parse_ret flt_ot_group_parse(const char **args, int *cur_arg, struct proxy *px, struct act_rule *rule, char **err)
+{
+ FLT_OT_FUNC("%p, %p, %p, %p, %p:%p", args, cur_arg, px, rule, FLT_OT_DPTR_ARGS(err));
+
+ if (!FLT_OT_ARG_ISVALID(*cur_arg) ||
+ !FLT_OT_ARG_ISVALID(*cur_arg + 1) ||
+ (FLT_OT_ARG_ISVALID(*cur_arg + 2) &&
+ (strcmp(args[*cur_arg + 2], FLT_OT_CONDITION_IF) != 0) &&
+ (strcmp(args[*cur_arg + 2], FLT_OT_CONDITION_UNLESS) != 0))) {
+ FLT_OT_ERR("expects: <filter-id> <group-id> [{ if | unless } ...]");
+
+ FLT_OT_RETURN_EX(ACT_RET_PRS_ERR, enum act_parse_ret, "%d");
+ }
+
+ /* Copy the OpenTracing filter id. */
+ rule->arg.act.p[FLT_OT_ARG_FILTER_ID] = FLT_OT_STRDUP(args[*cur_arg]);
+ if (rule->arg.act.p[FLT_OT_ARG_FILTER_ID] == NULL) {
+ FLT_OT_ERR("%s : out of memory", args[*cur_arg]);
+
+ FLT_OT_RETURN_EX(ACT_RET_PRS_ERR, enum act_parse_ret, "%d");
+ }
+
+ /* Copy the OpenTracing group id. */
+ rule->arg.act.p[FLT_OT_ARG_GROUP_ID] = FLT_OT_STRDUP(args[*cur_arg + 1]);
+ if (rule->arg.act.p[FLT_OT_ARG_GROUP_ID] == NULL) {
+ FLT_OT_ERR("%s : out of memory", args[*cur_arg + 1]);
+
+ FLT_OT_FREE_CLEAR(rule->arg.act.p[FLT_OT_ARG_FILTER_ID]);
+
+ FLT_OT_RETURN_EX(ACT_RET_PRS_ERR, enum act_parse_ret, "%d");
+ }
+
+ rule->action = ACT_CUSTOM;
+ rule->action_ptr = flt_ot_group_action;
+ rule->check_ptr = flt_ot_group_check;
+ rule->release_ptr = flt_ot_group_release;
+
+ *cur_arg += 2;
+
+ FLT_OT_RETURN_EX(ACT_RET_PRS_OK, enum act_parse_ret, "%d");
+}
+
+
+static struct action_kw_list tcp_req_action_kws = { ILH, {
+ { FLT_OT_ACTION_GROUP, flt_ot_group_parse },
+ { /* END */ },
+ }
+};
+
+INITCALL1(STG_REGISTER, tcp_req_cont_keywords_register, &tcp_req_action_kws);
+
+static struct action_kw_list tcp_res_action_kws = { ILH, {
+ { FLT_OT_ACTION_GROUP, flt_ot_group_parse },
+ { /* END */ },
+ }
+};
+
+INITCALL1(STG_REGISTER, tcp_res_cont_keywords_register, &tcp_res_action_kws);
+
+static struct action_kw_list http_req_action_kws = { ILH, {
+ { FLT_OT_ACTION_GROUP, flt_ot_group_parse },
+ { /* END */ },
+ }
+};
+
+INITCALL1(STG_REGISTER, http_req_keywords_register, &http_req_action_kws);
+
+static struct action_kw_list http_res_action_kws = { ILH, {
+ { FLT_OT_ACTION_GROUP, flt_ot_group_parse },
+ { /* END */ },
+ }
+};
+
+INITCALL1(STG_REGISTER, http_res_keywords_register, &http_res_action_kws);
+
+static struct action_kw_list http_after_res_actions_kws = { ILH, {
+ { FLT_OT_ACTION_GROUP, flt_ot_group_parse },
+ { /* END */ },
+ }
+};
+
+INITCALL1(STG_REGISTER, http_after_res_keywords_register, &http_after_res_actions_kws);
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ *
+ * vi: noexpandtab shiftwidth=8 tabstop=8
+ */
diff --git a/addons/ot/src/http.c b/addons/ot/src/http.c
new file mode 100644
index 0000000..517bd0d
--- /dev/null
+++ b/addons/ot/src/http.c
@@ -0,0 +1,312 @@
+/***
+ * Copyright 2020 HAProxy Technologies
+ *
+ * This file is part of the HAProxy OpenTracing filter.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#include "include.h"
+
+
+#ifdef DEBUG_OT
+
+/***
+ * NAME
+ * flt_ot_http_headers_dump -
+ *
+ * ARGUMENTS
+ * chn -
+ *
+ * DESCRIPTION
+ * -
+ *
+ * RETURN VALUE
+ * This function does not return a value.
+ */
+void flt_ot_http_headers_dump(const struct channel *chn)
+{
+ const struct htx *htx;
+ int32_t pos;
+
+ FLT_OT_FUNC("%p", chn);
+
+ if (chn == NULL)
+ FLT_OT_RETURN();
+
+ htx = htxbuf(&(chn->buf));
+
+ if (htx_is_empty(htx))
+ FLT_OT_RETURN();
+
+ for (pos = htx_get_first(htx); pos != -1; pos = htx_get_next(htx, pos)) {
+ struct htx_blk *blk = htx_get_blk(htx, pos);
+ enum htx_blk_type type = htx_get_blk_type(blk);
+
+ if (type == HTX_BLK_HDR) {
+ struct ist n = htx_get_blk_name(htx, blk);
+ struct ist v = htx_get_blk_value(htx, blk);
+
+ FLT_OT_DBG(2, "'%.*s: %.*s'", (int)n.len, n.ptr, (int)v.len, v.ptr);
+ }
+ else if (type == HTX_BLK_EOH)
+ break;
+ }
+
+ FLT_OT_RETURN();
+}
+
+#endif /* DEBUG_OT */
+
+
+/***
+ * NAME
+ * flt_ot_http_headers_get -
+ *
+ * ARGUMENTS
+ * chn -
+ * prefix -
+ * len -
+ * err -
+ *
+ * DESCRIPTION
+ * This function is very similar to function http_action_set_header(), from
+ * the HAProxy source.
+ *
+ * RETURN VALUE
+ * -
+ */
+struct otc_text_map *flt_ot_http_headers_get(struct channel *chn, const char *prefix, size_t len, char **err)
+{
+ const struct htx *htx;
+ size_t prefix_len = (!FLT_OT_STR_ISVALID(prefix) || (len == 0)) ? 0 : (len + 1);
+ int32_t pos;
+ struct otc_text_map *retptr = NULL;
+
+ FLT_OT_FUNC("%p, \"%s\", %zu, %p:%p", chn, prefix, len, FLT_OT_DPTR_ARGS(err));
+
+ if (chn == NULL)
+ FLT_OT_RETURN_PTR(retptr);
+
+ /*
+ * The keyword 'inject' allows you to define the name of the OpenTracing
+ * context without using a prefix. In that case all HTTP headers are
+ * transferred because it is not possible to separate them from the
+ * OpenTracing context (this separation is usually done via a prefix).
+ *
+ * When using the 'extract' keyword, the context name must be specified.
+ * To allow all HTTP headers to be extracted, the first character of
+ * that name must be set to FLT_OT_PARSE_CTX_IGNORE_NAME.
+ */
+ if (FLT_OT_STR_ISVALID(prefix) && (*prefix == FLT_OT_PARSE_CTX_IGNORE_NAME))
+ prefix_len = 0;
+
+ htx = htxbuf(&(chn->buf));
+
+ for (pos = htx_get_first(htx); pos != -1; pos = htx_get_next(htx, pos)) {
+ struct htx_blk *blk = htx_get_blk(htx, pos);
+ enum htx_blk_type type = htx_get_blk_type(blk);
+
+ if (type == HTX_BLK_HDR) {
+ struct ist v, n = htx_get_blk_name(htx, blk);
+
+ if ((prefix_len == 0) || ((n.len >= prefix_len) && (strncasecmp(n.ptr, prefix, len) == 0))) {
+ if (retptr == NULL) {
+ retptr = otc_text_map_new(NULL, 8);
+ if (retptr == NULL) {
+ FLT_OT_ERR("failed to create HTTP header data");
+
+ break;
+ }
+ }
+
+ v = htx_get_blk_value(htx, blk);
+
+ /*
+ * In case the data of the HTTP header is not
+ * specified, v.ptr will have some non-null
+ * value and v.len will be equal to 0. The
+ * otc_text_map_add() function will not
+ * interpret this well. In this case v.ptr
+ * is set to an empty string.
+ */
+ if (v.len == 0)
+ v = ist("");
+
+ /*
+ * Here, an HTTP header (which is actually part
+ * of the span context is added to the text_map.
+ *
+ * Before adding, the prefix is removed from the
+ * HTTP header name.
+ */
+ if (otc_text_map_add(retptr, n.ptr + prefix_len, n.len - prefix_len, v.ptr, v.len, OTC_TEXT_MAP_DUP_KEY | OTC_TEXT_MAP_DUP_VALUE) == -1) {
+ FLT_OT_ERR("failed to add HTTP header data");
+
+ otc_text_map_destroy(&retptr, OTC_TEXT_MAP_FREE_KEY | OTC_TEXT_MAP_FREE_VALUE);
+
+ break;
+ }
+ }
+ }
+ else if (type == HTX_BLK_EOH)
+ break;
+ }
+
+ ot_text_map_show(retptr);
+
+ if ((retptr != NULL) && (retptr->count == 0)) {
+ FLT_OT_DBG(2, "WARNING: no HTTP headers found");
+
+ otc_text_map_destroy(&retptr, OTC_TEXT_MAP_FREE_KEY | OTC_TEXT_MAP_FREE_VALUE);
+ }
+
+ FLT_OT_RETURN_PTR(retptr);
+}
+
+
+/***
+ * NAME
+ * flt_ot_http_header_set -
+ *
+ * ARGUMENTS
+ * chn -
+ * prefix -
+ * name -
+ * value -
+ * err -
+ *
+ * DESCRIPTION
+ * This function is very similar to function http_action_set_header(), from
+ * the HAProxy source.
+ *
+ * RETURN VALUE
+ * -
+ */
+int flt_ot_http_header_set(struct channel *chn, const char *prefix, const char *name, const char *value, char **err)
+{
+ struct http_hdr_ctx ctx = { .blk = NULL };
+ struct ist ist_name;
+ struct buffer *buffer = NULL;
+ struct htx *htx;
+ int retval = -1;
+
+ FLT_OT_FUNC("%p, \"%s\", \"%s\", \"%s\", %p:%p", chn, prefix, name, value, FLT_OT_DPTR_ARGS(err));
+
+ if ((chn == NULL) || (!FLT_OT_STR_ISVALID(prefix) && !FLT_OT_STR_ISVALID(name)))
+ FLT_OT_RETURN_INT(retval);
+
+ htx = htxbuf(&(chn->buf));
+
+ /*
+ * Very rare (about 1% of cases), htx is empty.
+ * In order to avoid segmentation fault, we exit this function.
+ */
+ if (htx_is_empty(htx)) {
+ FLT_OT_ERR("HTX is empty");
+
+ FLT_OT_RETURN_INT(retval);
+ }
+
+ if (!FLT_OT_STR_ISVALID(prefix)) {
+ ist_name = ist2((char *)name, strlen(name));
+ }
+ else if (!FLT_OT_STR_ISVALID(name)) {
+ ist_name = ist2((char *)prefix, strlen(prefix));
+ }
+ else {
+ buffer = flt_ot_trash_alloc(0, err);
+ if (buffer == NULL)
+ FLT_OT_RETURN_INT(retval);
+
+ (void)chunk_printf(buffer, "%s-%s", prefix, name);
+
+ ist_name = ist2(buffer->area, buffer->data);
+ }
+
+ /* Remove all occurrences of the header. */
+ while (http_find_header(htx, ist(""), &ctx, 1) == 1) {
+ struct ist n = htx_get_blk_name(htx, ctx.blk);
+#ifdef DEBUG_OT
+ struct ist v = htx_get_blk_value(htx, ctx.blk);
+#endif
+
+ /*
+ * If the <name> parameter is not set, then remove all headers
+ * that start with the contents of the <prefix> parameter.
+ */
+ if (!FLT_OT_STR_ISVALID(name))
+ n.len = ist_name.len;
+
+ if (isteqi(n, ist_name))
+ if (http_remove_header(htx, &ctx) == 1)
+ FLT_OT_DBG(3, "HTTP header '%.*s: %.*s' removed", (int)n.len, n.ptr, (int)v.len, v.ptr);
+ }
+
+ /*
+ * If the value pointer has a value of NULL, the HTTP header is not set
+ * after deletion.
+ */
+ if (value == NULL) {
+ /* Do nothing. */
+ }
+ else if (http_add_header(htx, ist_name, ist(value)) == 1) {
+ retval = 0;
+
+ FLT_OT_DBG(3, "HTTP header '%s: %s' added", ist_name.ptr, value);
+ }
+ else {
+ FLT_OT_ERR("failed to set HTTP header '%s: %s'", ist_name.ptr, value);
+ }
+
+ flt_ot_trash_free(&buffer);
+
+ FLT_OT_RETURN_INT(retval);
+}
+
+
+/***
+ * NAME
+ * flt_ot_http_headers_remove -
+ *
+ * ARGUMENTS
+ * chn -
+ * prefix -
+ * err -
+ *
+ * DESCRIPTION
+ * -
+ *
+ * RETURN VALUE
+ * -
+ */
+int flt_ot_http_headers_remove(struct channel *chn, const char *prefix, char **err)
+{
+ int retval;
+
+ FLT_OT_FUNC("%p, \"%s\", %p:%p", chn, prefix, FLT_OT_DPTR_ARGS(err));
+
+ retval = flt_ot_http_header_set(chn, prefix, NULL, NULL, err);
+
+ FLT_OT_RETURN_INT(retval);
+}
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ *
+ * vi: noexpandtab shiftwidth=8 tabstop=8
+ */
diff --git a/addons/ot/src/opentracing.c b/addons/ot/src/opentracing.c
new file mode 100644
index 0000000..8ae5f02
--- /dev/null
+++ b/addons/ot/src/opentracing.c
@@ -0,0 +1,1067 @@
+/***
+ * Copyright 2020 HAProxy Technologies
+ *
+ * This file is part of the HAProxy OpenTracing filter.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#include "include.h"
+
+
+static struct pool_head *pool_head_ot_span_context __read_mostly = NULL;
+
+#ifdef USE_POOL_OT_SPAN_CONTEXT
+REGISTER_POOL(&pool_head_ot_span_context, "ot_span_context", MAX(sizeof(struct otc_span), sizeof(struct otc_span_context)));
+#endif
+
+
+#ifdef DEBUG_OT
+
+/***
+ * NAME
+ * ot_text_map_show -
+ *
+ * ARGUMENTS
+ * text_map -
+ *
+ * DESCRIPTION
+ * -
+ *
+ * RETURN VALUE
+ * This function does not return a value.
+ */
+void ot_text_map_show(const struct otc_text_map *text_map)
+{
+ FLT_OT_FUNC("%p", text_map);
+
+ if (text_map == NULL)
+ FLT_OT_RETURN();
+
+ FLT_OT_DBG_TEXT_MAP(text_map);
+
+ if ((text_map->key != NULL) && (text_map->value != NULL) && (text_map->count > 0)) {
+ size_t i;
+
+ for (i = 0; i < text_map->count; i++)
+ FLT_OT_DBG(3, " \"%s\" -> \"%s\"", text_map->key[i], text_map->value[i]);
+ }
+
+ FLT_OT_RETURN();
+}
+
+
+/***
+ * NAME
+ * ot_debug -
+ *
+ * ARGUMENTS
+ * This function takes no arguments.
+ *
+ * DESCRIPTION
+ * -
+ *
+ * RETURN VALUE
+ * This function does not return a value.
+ */
+void ot_debug(void)
+{
+ char buffer[BUFSIZ];
+
+ FLT_OT_FUNC("");
+
+ otc_statistics(buffer, sizeof(buffer));
+ FLT_OT_DBG(0, "%s", buffer);
+
+ FLT_OT_RETURN();
+}
+
+#endif /* DEBUG_OT */
+
+
+/***
+ * NAME
+ * ot_mem_malloc -
+ *
+ * ARGUMENTS
+ * func -
+ * line -
+ * size -
+ *
+ * DESCRIPTION
+ * -
+ *
+ * RETURN VALUE
+ * -
+ */
+static void *ot_mem_malloc(FLT_OT_DBG_ARGS(const char *func, int line, ) size_t size)
+{
+ return flt_ot_pool_alloc(pool_head_ot_span_context, size, 1, NULL);
+}
+
+
+/***
+ * NAME
+ * ot_mem_free -
+ *
+ * ARGUMENTS
+ * func -
+ * line -
+ * ptr -
+ *
+ * DESCRIPTION
+ * -
+ *
+ * RETURN VALUE
+ * This function does not return a value.
+ */
+static void ot_mem_free(FLT_OT_DBG_ARGS(const char *func, int line, ) void *ptr)
+{
+ flt_ot_pool_free(pool_head_ot_span_context, &ptr);
+}
+
+
+/***
+ * NAME
+ * ot_init -
+ *
+ * ARGUMENTS
+ * tracer -
+ * plugin -
+ * err -
+ *
+ * DESCRIPTION
+ * -
+ *
+ * RETURN VALUE
+ * -
+ */
+int ot_init(struct otc_tracer **tracer, const char *plugin, char **err)
+{
+ char cwd[PATH_MAX], path[PATH_MAX], errbuf[BUFSIZ] = "";
+ int rc, retval = -1;
+
+ FLT_OT_FUNC("%p:%p, \"%s\", %p:%p", FLT_OT_DPTR_ARGS(tracer), plugin, FLT_OT_DPTR_ARGS(err));
+
+ flt_ot_pools_info();
+#ifdef USE_POOL_OT_SPAN_CONTEXT
+ FLT_OT_DBG(2, "sizeof_pool(ot_span_context) = %u", pool_head_ot_span_context->size);
+#endif
+
+ if (getcwd(cwd, sizeof(cwd)) == NULL) {
+ FLT_OT_ERR("failed to get current working directory");
+
+ FLT_OT_RETURN_INT(retval);
+ }
+ rc = snprintf(path, sizeof(path), "%s/%s", cwd, plugin);
+ if ((rc == -1) || (rc >= sizeof(path))) {
+ FLT_OT_ERR("failed to construct the OpenTracing plugin path");
+
+ FLT_OT_RETURN_INT(retval);
+ }
+
+ *tracer = otc_tracer_load(path, errbuf, sizeof(errbuf));
+ if (*tracer == NULL) {
+ FLT_OT_ERR("%s", (*errbuf == '\0') ? "failed to initialize tracing library" : errbuf);
+ } else {
+ otc_ext_init(ot_mem_malloc, ot_mem_free);
+
+ retval = 0;
+ }
+
+ FLT_OT_RETURN_INT(retval);
+}
+
+
+/***
+ * NAME
+ * ot_start -
+ *
+ * ARGUMENTS
+ * tracer -
+ * cfgbuf -
+ * err -
+ *
+ * DESCRIPTION
+ * -
+ *
+ * RETURN VALUE
+ * This function does not return a value.
+ */
+int ot_start(struct otc_tracer *tracer, const char *cfgbuf, char **err)
+{
+ char errbuf[BUFSIZ] = "";
+ int retval = -1;
+
+ FLT_OT_FUNC("%p, %p, %p:%p", tracer, cfgbuf, FLT_OT_DPTR_ARGS(err));
+
+ if (cfgbuf == NULL)
+ FLT_OT_RETURN_INT(retval);
+
+ retval = otc_tracer_start(NULL, cfgbuf, errbuf, sizeof(errbuf));
+ if (retval == -1)
+ FLT_OT_ERR("%s", (*errbuf == '\0') ? "failed to start tracer" : errbuf);
+
+ FLT_OT_RETURN_INT(retval);
+}
+
+
+/***
+ * NAME
+ * ot_close -
+ *
+ * ARGUMENTS
+ * tracer -
+ *
+ * DESCRIPTION
+ * -
+ *
+ * RETURN VALUE
+ * This function does not return a value.
+ */
+void ot_close(struct otc_tracer **tracer)
+{
+ FLT_OT_FUNC("%p:%p", FLT_OT_DPTR_ARGS(tracer));
+
+ if ((tracer == NULL) || (*tracer == NULL))
+ FLT_OT_RETURN();
+
+ (*tracer)->close(*tracer);
+
+ *tracer = NULL;
+
+ FLT_OT_RETURN();
+}
+
+
+/***
+ * NAME
+ * ot_span_init -
+ *
+ * ARGUMENTS
+ * tracer -
+ * operation_name -
+ * ts_steady -
+ * ts_system -
+ * ref_type -
+ * ref_ctx_idx -
+ * ref_span -
+ * tags -
+ * num_tags -
+ * err -
+ *
+ * DESCRIPTION
+ * -
+ *
+ * RETURN VALUE
+ * -
+ */
+struct otc_span *ot_span_init(struct otc_tracer *tracer, const char *operation_name, const struct timespec *ts_steady, const struct timespec *ts_system, int ref_type, int ref_ctx_idx, const struct otc_span *ref_span, const struct otc_tag *tags, int num_tags, char **err)
+{
+ struct otc_start_span_options options;
+ struct otc_span_context context = { .idx = ref_ctx_idx, .span = ref_span };
+ struct otc_span_reference references = { ref_type, &context };
+ struct otc_span *retptr = NULL;
+
+ FLT_OT_FUNC("%p, \"%s\", %p, %p, %d, %d, %p, %p, %d, %p:%p", tracer, operation_name, ts_steady, ts_system, ref_type, ref_ctx_idx, ref_span, tags, num_tags, FLT_OT_DPTR_ARGS(err));
+
+ if (operation_name == NULL)
+ FLT_OT_RETURN_PTR(retptr);
+ else if (tracer == NULL)
+ FLT_OT_RETURN_PTR(retptr);
+
+ (void)memset(&options, 0, sizeof(options));
+
+ if (ts_steady != NULL)
+ (void)memcpy(&(options.start_time_steady.value), ts_steady, sizeof(options.start_time_steady.value));
+
+ if (ts_system != NULL)
+ (void)memcpy(&(options.start_time_system.value), ts_system, sizeof(options.start_time_system.value));
+
+ if (FLT_OT_IN_RANGE(ref_type, otc_span_reference_child_of, otc_span_reference_follows_from)) {
+ options.references = &references;
+ options.num_references = 1;
+ }
+
+ options.tags = tags;
+ options.num_tags = num_tags;
+
+ retptr = tracer->start_span_with_options(tracer, operation_name, &options);
+ if (retptr == NULL)
+ FLT_OT_ERR("failed to init new span");
+ else
+ FLT_OT_DBG(2, "span %p:%zd initialized", retptr, retptr->idx);
+
+ FLT_OT_RETURN_PTR(retptr);
+}
+
+
+/***
+ * NAME
+ * ot_span_init_va -
+ *
+ * ARGUMENTS
+ * tracer -
+ * operation_name -
+ * ts_steady -
+ * ts_system -
+ * ref_type -
+ * ref_ctx_idx -
+ * ref_span -
+ * err -
+ * tag_key -
+ * tag_value -
+ *
+ * DESCRIPTION
+ * -
+ *
+ * RETURN VALUE
+ * -
+ */
+struct otc_span *ot_span_init_va(struct otc_tracer *tracer, const char *operation_name, const struct timespec *ts_steady, const struct timespec *ts_system, int ref_type, int ref_ctx_idx, const struct otc_span *ref_span, char **err, const char *tag_key, const char *tag_value, ...)
+{
+ struct otc_tag tags[FLT_OT_MAXTAGS];
+ int num_tags = 0;
+ struct otc_span *retptr;
+
+ FLT_OT_FUNC("%p, \"%s\", %p, %p, %d, %d, %p, %p:%p, \"%s\", \"%s\", ...", tracer, operation_name, ts_steady, ts_system, ref_type, ref_ctx_idx, ref_span, FLT_OT_DPTR_ARGS(err), tag_key, tag_value);
+
+ if (tag_key != NULL) {
+ va_list ap;
+
+ va_start(ap, tag_value);
+ for (num_tags = 0; (num_tags < FLT_OT_TABLESIZE(tags)) && (tag_key != NULL) && (tag_value != NULL); num_tags++) {
+ tags[num_tags].key = (char *)tag_key;
+ FLT_OT_VSET(&(tags[num_tags].value), string, tag_value);
+
+ tag_key = va_arg(ap, typeof(tag_key));
+ if (tag_key != NULL)
+ tag_value = va_arg(ap, typeof(tag_value));
+ }
+ va_end(ap);
+ }
+
+ retptr = ot_span_init(tracer, operation_name, ts_steady, ts_system, ref_type, ref_ctx_idx, ref_span, tags, num_tags, err);
+
+ FLT_OT_RETURN_PTR(retptr);
+}
+
+
+/***
+ * NAME
+ * ot_span_tag -
+ *
+ * ARGUMENTS
+ * span -
+ * tags -
+ * num_tags -
+ *
+ * DESCRIPTION
+ * -
+ *
+ * RETURN VALUE
+ * -
+ */
+int ot_span_tag(struct otc_span *span, const struct otc_tag *tags, int num_tags)
+{
+ int retval = -1;
+
+ FLT_OT_FUNC("%p, %p, %d", span, tags, num_tags);
+
+ if ((span == NULL) || (tags == NULL))
+ FLT_OT_RETURN_INT(retval);
+
+ for (retval = 0; retval < num_tags; retval++)
+ span->set_tag(span, tags[retval].key, &(tags[retval].value));
+
+ FLT_OT_RETURN_INT(retval);
+}
+
+
+/***
+ * NAME
+ * ot_span_tag_va -
+ *
+ * ARGUMENTS
+ * span -
+ * key -
+ * type -
+ * value -
+ *
+ * DESCRIPTION
+ * -
+ *
+ * RETURN VALUE
+ * -
+ */
+int ot_span_tag_va(struct otc_span *span, const char *key, int type, ...)
+{
+ va_list ap;
+ struct otc_value ot_value;
+ int retval = -1;
+
+ FLT_OT_FUNC("%p, \"%s\", %d, ...", span, key, type);
+
+ if ((span == NULL) || (key == NULL))
+ FLT_OT_RETURN_INT(retval);
+
+ va_start(ap, type);
+ for (retval = 0; (key != NULL) && FLT_OT_IN_RANGE(type, otc_value_bool, otc_value_null); retval++) {
+ ot_value.type = type;
+ if (type == otc_value_bool)
+ ot_value.value.bool_value = va_arg(ap, typeof(ot_value.value.bool_value));
+ else if (type == otc_value_double)
+ ot_value.value.double_value = va_arg(ap, typeof(ot_value.value.double_value));
+ else if (type == otc_value_int64)
+ ot_value.value.int64_value = va_arg(ap, typeof(ot_value.value.int64_value));
+ else if (type == otc_value_uint64)
+ ot_value.value.uint64_value = va_arg(ap, typeof(ot_value.value.uint64_value));
+ else if (type == otc_value_string)
+ ot_value.value.string_value = va_arg(ap, typeof(ot_value.value.string_value));
+ else if (type == otc_value_null)
+ ot_value.value.string_value = va_arg(ap, typeof(ot_value.value.string_value));
+ span->set_tag(span, key, &ot_value);
+
+ key = va_arg(ap, typeof(key));
+ if (key != NULL)
+ type = va_arg(ap, typeof(type));
+ }
+ va_end(ap);
+
+ FLT_OT_RETURN_INT(retval);
+}
+
+
+/***
+ * NAME
+ * ot_span_log -
+ *
+ * ARGUMENTS
+ * span -
+ * log_fields -
+ * num_fields -
+ *
+ * DESCRIPTION
+ * -
+ *
+ * RETURN VALUE
+ * -
+ */
+int ot_span_log(struct otc_span *span, const struct otc_log_field *log_fields, int num_fields)
+{
+ int retval = -1;
+
+ FLT_OT_FUNC("%p, %p, %d", span, log_fields, num_fields);
+
+ if ((span == NULL) || (log_fields == NULL))
+ FLT_OT_RETURN_INT(retval);
+
+ retval = MIN(OTC_MAXLOGFIELDS, num_fields);
+
+ span->log_fields(span, log_fields, retval);
+
+ FLT_OT_RETURN_INT(retval);
+}
+
+
+/***
+ * NAME
+ * ot_span_log_va -
+ *
+ * ARGUMENTS
+ * span -
+ * key -
+ * value -
+ *
+ * DESCRIPTION
+ * -
+ *
+ * RETURN VALUE
+ * -
+ */
+int ot_span_log_va(struct otc_span *span, const char *key, const char *value, ...)
+{
+ va_list ap;
+ struct otc_log_field log_field[OTC_MAXLOGFIELDS];
+ int retval = -1;
+
+ FLT_OT_FUNC("%p, \"%s\", \"%s\", ...", span, key, value);
+
+ if ((span == NULL) || (key == NULL) || (value == NULL))
+ FLT_OT_RETURN_INT(retval);
+
+ va_start(ap, value);
+ for (retval = 0; (retval < FLT_OT_TABLESIZE(log_field)) && (key != NULL); retval++) {
+ log_field[retval].key = key;
+ log_field[retval].value.type = otc_value_string;
+ log_field[retval].value.value.string_value = value;
+
+ key = va_arg(ap, typeof(key));
+ if (key != NULL)
+ value = va_arg(ap, typeof(value));
+ }
+ va_end(ap);
+
+ span->log_fields(span, log_field, retval);
+
+ FLT_OT_RETURN_INT(retval);
+}
+
+
+/***
+ * NAME
+ * ot_span_log_fmt -
+ *
+ * ARGUMENTS
+ * span -
+ * key -
+ * format -
+ *
+ * DESCRIPTION
+ * -
+ *
+ * RETURN VALUE
+ * -
+ */
+int ot_span_log_fmt(struct otc_span *span, const char *key, const char *format, ...)
+{
+ va_list ap;
+ char value[BUFSIZ];
+ int n;
+
+ FLT_OT_FUNC("%p, \"%s\", \"%s\", ...", span, key, format);
+
+ if ((span == NULL) || (key == NULL) || (format == NULL))
+ FLT_OT_RETURN_INT(-1);
+
+ va_start(ap, format);
+ n = vsnprintf(value, sizeof(value), format, ap);
+ if (!FLT_OT_IN_RANGE(n, 0, sizeof(value) - 1)) {
+ FLT_OT_DBG(2, "WARNING: log buffer too small (%d > %zu)", n, sizeof(value));
+
+ FLT_OT_STR_ELLIPSIS(value, sizeof(value));
+ }
+ va_end(ap);
+
+ FLT_OT_RETURN_INT(ot_span_log_va(span, key, value, NULL));
+}
+
+
+/***
+ * NAME
+ * ot_span_set_baggage -
+ *
+ * ARGUMENTS
+ * span -
+ * baggage -
+ *
+ * DESCRIPTION
+ * -
+ *
+ * RETURN VALUE
+ * -
+ */
+int ot_span_set_baggage(struct otc_span *span, const struct otc_text_map *baggage)
+{
+ size_t i;
+ int retval = -1;
+
+ FLT_OT_FUNC("%p, %p", span, baggage);
+
+ if ((span == NULL) || (baggage == NULL))
+ FLT_OT_RETURN_INT(retval);
+
+ if ((baggage->key == NULL) || (baggage->value == NULL))
+ FLT_OT_RETURN_INT(retval);
+
+ for (retval = i = 0; i < baggage->count; i++) {
+ FLT_OT_DBG(3, "set baggage: \"%s\" -> \"%s\"", baggage->key[i], baggage->value[i]);
+
+ if ((baggage->key[i] != NULL) && (baggage->value[i] != NULL)) {
+ span->set_baggage_item(span, baggage->key[i], baggage->value[i]);
+
+ retval++;
+ }
+ }
+
+ FLT_OT_RETURN_INT(retval);
+}
+
+
+/***
+ * NAME
+ * ot_span_set_baggage_va -
+ *
+ * ARGUMENTS
+ * span -
+ * key -
+ * value -
+ *
+ * DESCRIPTION
+ * -
+ *
+ * RETURN VALUE
+ * -
+ */
+int ot_span_set_baggage_va(struct otc_span *span, const char *key, const char *value, ...)
+{
+ va_list ap;
+ int retval = -1;
+
+ FLT_OT_FUNC("%p, \"%s\", \"%s\", ...", span, key, value);
+
+ if ((span == NULL) || (key == NULL) || (value == NULL))
+ FLT_OT_RETURN_INT(retval);
+
+ va_start(ap, value);
+ for (retval = 0; (key != NULL); retval++) {
+ FLT_OT_DBG(3, "set baggage: \"%s\" -> \"%s\"", key, value);
+
+ span->set_baggage_item(span, key, value);
+
+ key = va_arg(ap, typeof(key));
+ if (key != NULL)
+ value = va_arg(ap, typeof(value));
+ }
+ va_end(ap);
+
+ FLT_OT_RETURN_INT(retval);
+}
+
+
+/***
+ * NAME
+ * ot_span_baggage_va -
+ *
+ * ARGUMENTS
+ * span -
+ * key -
+ *
+ * DESCRIPTION
+ * -
+ *
+ * RETURN VALUE
+ * -
+ */
+struct otc_text_map *ot_span_baggage_va(const struct otc_span *span, const char *key, ...)
+{
+ va_list ap;
+ struct otc_text_map *retptr = NULL;
+ int i, n;
+
+ FLT_OT_FUNC("%p, \"%s\", ...", span, key);
+
+ if ((span == NULL) || (key == NULL))
+ FLT_OT_RETURN_PTR(retptr);
+
+ va_start(ap, key);
+ for (n = 1; va_arg(ap, typeof(key)) != NULL; n++);
+ va_end(ap);
+
+ retptr = otc_text_map_new(NULL, n);
+ if (retptr == NULL)
+ FLT_OT_RETURN_PTR(retptr);
+
+ va_start(ap, key);
+ for (i = 0; (i < n) && (key != NULL); i++) {
+ char *value = (char *)span->baggage_item(span, key);
+
+ if (value != NULL) {
+ (void)otc_text_map_add(retptr, key, 0, value, 0, OTC_TEXT_MAP_DUP_KEY | OTC_TEXT_MAP_DUP_VALUE);
+
+ FLT_OT_DBG(3, "get baggage[%d]: \"%s\" -> \"%s\"", i, retptr->key[i], retptr->value[i]);
+ } else {
+ FLT_OT_DBG(3, "get baggage[%d]: \"%s\" -> invalid key", i, key);
+ }
+
+ key = va_arg(ap, typeof(key));
+ }
+ va_end(ap);
+
+ FLT_OT_RETURN_PTR(retptr);
+}
+
+
+/***
+ * NAME
+ * ot_inject_text_map -
+ *
+ * ARGUMENTS
+ * tracer -
+ * span -
+ * carrier -
+ *
+ * DESCRIPTION
+ * -
+ *
+ * RETURN VALUE
+ * -
+ */
+struct otc_span_context *ot_inject_text_map(struct otc_tracer *tracer, const struct otc_span *span, struct otc_text_map_writer *carrier)
+{
+ struct otc_span_context *retptr = NULL;
+ int rc;
+
+ FLT_OT_FUNC("%p, %p, %p", tracer, span, carrier);
+
+ if ((span == NULL) || (carrier == NULL))
+ FLT_OT_RETURN_PTR(retptr);
+ else if (tracer == NULL)
+ FLT_OT_RETURN_PTR(retptr);
+
+ retptr = span->span_context((struct otc_span *)span);
+ if (retptr == NULL)
+ FLT_OT_RETURN_PTR(retptr);
+
+ (void)memset(carrier, 0, sizeof(*carrier));
+
+ rc = tracer->inject_text_map(tracer, carrier, retptr);
+ if (rc != otc_propagation_error_code_success) {
+ FLT_OT_FREE_CLEAR(retptr);
+ } else {
+#ifdef DEBUG_OT
+ FLT_OT_DBG_TEXT_CARRIER(carrier, set);
+ ot_text_map_show(&(carrier->text_map));
+ FLT_OT_DBG_SPAN_CONTEXT(retptr);
+#endif
+ }
+
+ FLT_OT_RETURN_PTR(retptr);
+}
+
+
+/***
+ * NAME
+ * ot_inject_http_headers -
+ *
+ * ARGUMENTS
+ * tracer -
+ * span -
+ * carrier -
+ * err -
+ *
+ * DESCRIPTION
+ * -
+ *
+ * RETURN VALUE
+ * -
+ */
+struct otc_span_context *ot_inject_http_headers(struct otc_tracer *tracer, const struct otc_span *span, struct otc_http_headers_writer *carrier, char **err)
+{
+ struct otc_span_context *retptr = NULL;
+ int rc;
+
+ FLT_OT_FUNC("%p, %p, %p, %p:%p", tracer, span, carrier, FLT_OT_DPTR_ARGS(err));
+
+ if ((span == NULL) || (carrier == NULL))
+ FLT_OT_RETURN_PTR(retptr);
+ else if (tracer == NULL)
+ FLT_OT_RETURN_PTR(retptr);
+
+ retptr = span->span_context((struct otc_span *)span);
+ if (retptr == NULL) {
+ FLT_OT_ERR("failed to create span context");
+
+ FLT_OT_RETURN_PTR(retptr);
+ }
+
+ (void)memset(carrier, 0, sizeof(*carrier));
+
+ rc = tracer->inject_http_headers(tracer, carrier, retptr);
+ if (rc != otc_propagation_error_code_success) {
+ FLT_OT_ERR("failed to inject HTTP headers data");
+
+ FLT_OT_FREE_CLEAR(retptr);
+ } else {
+#ifdef DEBUG_OT
+ FLT_OT_DBG_TEXT_CARRIER(carrier, set);
+ ot_text_map_show(&(carrier->text_map));
+ FLT_OT_DBG_SPAN_CONTEXT(retptr);
+#endif
+ }
+
+ FLT_OT_RETURN_PTR(retptr);
+}
+
+
+/***
+ * NAME
+ * ot_inject_binary -
+ *
+ * ARGUMENTS
+ * tracer -
+ * span -
+ * carrier -
+ *
+ * DESCRIPTION
+ * -
+ *
+ * RETURN VALUE
+ * -
+ */
+struct otc_span_context *ot_inject_binary(struct otc_tracer *tracer, const struct otc_span *span, struct otc_custom_carrier_writer *carrier)
+{
+ struct otc_span_context *retptr = NULL;
+ int rc;
+
+ FLT_OT_FUNC("%p, %p, %p", tracer, span, carrier);
+
+ if ((span == NULL) || (carrier == NULL))
+ FLT_OT_RETURN_PTR(retptr);
+ else if (tracer == NULL)
+ FLT_OT_RETURN_PTR(retptr);
+
+ retptr = span->span_context((struct otc_span *)span);
+ if (retptr == NULL)
+ FLT_OT_RETURN_PTR(retptr);
+
+ (void)memset(carrier, 0, sizeof(*carrier));
+
+ rc = tracer->inject_binary(tracer, carrier, retptr);
+ if (rc != otc_propagation_error_code_success) {
+ FLT_OT_FREE_CLEAR(retptr);
+ } else {
+#ifdef DEBUG_OT
+ struct otc_jaeger_trace_context *ctx = carrier->binary_data.data;
+
+ FLT_OT_DBG_CUSTOM_CARRIER(carrier, inject);
+ FLT_OT_DBG(3, "trace context: %016" PRIx64 "%016" PRIx64 ":%016" PRIx64 ":%016" PRIx64 ":%02hhx <%s> <%s>",
+ ctx->trace_id[0], ctx->trace_id[1], ctx->span_id, ctx->parent_span_id, ctx->flags,
+ flt_ot_str_hex(ctx->baggage, carrier->binary_data.size - sizeof(*ctx)),
+ flt_ot_str_ctrl(ctx->baggage, carrier->binary_data.size - sizeof(*ctx)));
+ FLT_OT_DBG_SPAN_CONTEXT(retptr);
+#endif
+ }
+
+ FLT_OT_RETURN_PTR(retptr);
+}
+
+
+/***
+ * NAME
+ * ot_extract_text_map -
+ *
+ * ARGUMENTS
+ * tracer -
+ * carrier -
+ * text_map -
+ *
+ * DESCRIPTION
+ * -
+ *
+ * RETURN VALUE
+ * -
+ */
+struct otc_span_context *ot_extract_text_map(struct otc_tracer *tracer, struct otc_text_map_reader *carrier, const struct otc_text_map *text_map)
+{
+ struct otc_span_context *retptr = NULL;
+ int rc;
+
+ FLT_OT_FUNC("%p, %p, %p", tracer, carrier, text_map);
+
+ if (carrier == NULL)
+ FLT_OT_RETURN_PTR(retptr);
+ else if (tracer == NULL)
+ FLT_OT_RETURN_PTR(retptr);
+
+ if (text_map != NULL) {
+ (void)memset(carrier, 0, sizeof(*carrier));
+ (void)memcpy(&(carrier->text_map), text_map, sizeof(carrier->text_map));
+
+ FLT_OT_DBG_TEXT_CARRIER(carrier, foreach_key);
+ }
+
+ rc = tracer->extract_text_map(tracer, carrier, &retptr);
+ if (rc != otc_propagation_error_code_success)
+ FLT_OT_FREE_CLEAR(retptr);
+ else if (retptr != NULL)
+ FLT_OT_DBG_SPAN_CONTEXT(retptr);
+
+ FLT_OT_RETURN_PTR(retptr);
+}
+
+
+/***
+ * NAME
+ * ot_extract_http_headers -
+ *
+ * ARGUMENTS
+ * tracer -
+ * carrier -
+ * text_map -
+ * err -
+ *
+ * DESCRIPTION
+ * -
+ *
+ * RETURN VALUE
+ * -
+ */
+struct otc_span_context *ot_extract_http_headers(struct otc_tracer *tracer, struct otc_http_headers_reader *carrier, const struct otc_text_map *text_map, char **err)
+{
+ struct otc_span_context *retptr = NULL;
+ int rc;
+
+ FLT_OT_FUNC("%p, %p, %p, %p:%p", tracer, carrier, text_map, FLT_OT_DPTR_ARGS(err));
+
+ if (carrier == NULL)
+ FLT_OT_RETURN_PTR(retptr);
+ else if (tracer == NULL)
+ FLT_OT_RETURN_PTR(retptr);
+
+ if (text_map != NULL) {
+ (void)memset(carrier, 0, sizeof(*carrier));
+ (void)memcpy(&(carrier->text_map), text_map, sizeof(carrier->text_map));
+
+ FLT_OT_DBG_TEXT_CARRIER(carrier, foreach_key);
+ }
+
+ rc = tracer->extract_http_headers(tracer, carrier, &retptr);
+ if (rc != otc_propagation_error_code_success) {
+ FLT_OT_ERR("failed to extract HTTP headers data");
+
+ FLT_OT_FREE_CLEAR(retptr);
+ }
+ else if (retptr != NULL)
+ FLT_OT_DBG_SPAN_CONTEXT(retptr);
+
+ FLT_OT_RETURN_PTR(retptr);
+}
+
+
+/***
+ * NAME
+ * ot_extract_binary -
+ *
+ * ARGUMENTS
+ * tracer -
+ * carrier -
+ * binary_data -
+ *
+ * DESCRIPTION
+ * -
+ *
+ * RETURN VALUE
+ * -
+ */
+struct otc_span_context *ot_extract_binary(struct otc_tracer *tracer, struct otc_custom_carrier_reader *carrier, const struct otc_binary_data *binary_data)
+{
+ struct otc_span_context *retptr = NULL;
+ int rc;
+
+ FLT_OT_FUNC("%p, %p, %p", tracer, carrier, binary_data);
+
+ if (carrier == NULL)
+ FLT_OT_RETURN_PTR(retptr);
+ else if (tracer == NULL)
+ FLT_OT_RETURN_PTR(retptr);
+
+ if ((FLT_OT_DEREF(binary_data, data, NULL) != NULL) && (binary_data->size > 0)) {
+ (void)memset(carrier, 0, sizeof(*carrier));
+ (void)memcpy(&(carrier->binary_data), binary_data, sizeof(carrier->binary_data));
+
+ FLT_OT_DBG_CUSTOM_CARRIER(carrier, extract);
+ }
+
+ rc = tracer->extract_binary(tracer, carrier, &retptr);
+ if (rc != otc_propagation_error_code_success)
+ FLT_OT_FREE_CLEAR(retptr);
+ else if (retptr != NULL)
+ FLT_OT_DBG_SPAN_CONTEXT(retptr);
+
+ FLT_OT_RETURN_PTR(retptr);
+}
+
+
+/***
+ * NAME
+ * ot_span_finish -
+ *
+ * ARGUMENTS
+ * span -
+ * ts_finish -
+ * log_ts -
+ * log_key -
+ * log_value -
+ *
+ * DESCRIPTION
+ * -
+ *
+ * RETURN VALUE
+ * This function does not return a value.
+ */
+void ot_span_finish(struct otc_span **span, const struct timespec *ts_finish, const struct timespec *log_ts, const char *log_key, const char *log_value, ...)
+{
+ struct otc_finish_span_options options;
+ struct otc_log_field log_field[OTC_MAXLOGFIELDS];
+ struct otc_log_record log_records = { .fields = log_field, .num_fields = 0 };
+#ifdef DEBUG_OT
+ typeof((*span)->idx) idx = FLT_OT_DDEREF(span, idx, 0);
+#endif
+
+ FLT_OT_FUNC("%p:%p, %p, %p, \"%s\", \"%s\", ...", FLT_OT_DPTR_ARGS(span), ts_finish, log_ts, log_key, log_value);
+
+ if ((span == NULL) || (*span == NULL))
+ FLT_OT_RETURN();
+
+ (void)memset(&options, 0, sizeof(options));
+
+ if (ts_finish != NULL)
+ (void)memcpy(&(options.finish_time.value), ts_finish, sizeof(options.finish_time.value));
+
+ if (log_key != NULL) {
+ va_list ap;
+ int i;
+
+ if (log_ts != NULL)
+ (void)memcpy(&(log_records.timestamp.value), log_ts, sizeof(log_records.timestamp.value));
+
+ va_start(ap, log_value);
+ for (i = 0; (i < FLT_OT_TABLESIZE(log_field)) && (log_key != NULL); i++) {
+ log_field[i].key = log_key;
+ log_field[i].value.type = otc_value_string;
+ log_field[i].value.value.string_value = log_value;
+
+ log_key = va_arg(ap, typeof(log_key));
+ if (log_key != NULL)
+ log_value = va_arg(ap, typeof(log_value));
+ }
+ va_end(ap);
+
+ log_records.num_fields = i;
+ options.log_records = &log_records;
+ options.num_log_records = 1;
+ }
+
+ /*
+ * Caution: memory allocated for the span is released
+ * in the function finish_with_options().
+ */
+ (*span)->finish_with_options(*span, &options);
+
+ FLT_OT_DBG(2, "span %p:%zu finished", *span, idx);
+
+ *span = NULL;
+
+ FLT_OT_RETURN();
+}
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ *
+ * vi: noexpandtab shiftwidth=8 tabstop=8
+ */
diff --git a/addons/ot/src/parser.c b/addons/ot/src/parser.c
new file mode 100644
index 0000000..f4f3e04
--- /dev/null
+++ b/addons/ot/src/parser.c
@@ -0,0 +1,1225 @@
+/***
+ * Copyright 2020 HAProxy Technologies
+ *
+ * This file is part of the HAProxy OpenTracing filter.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#include "include.h"
+
+
+#ifdef DEBUG_OT
+struct flt_ot_debug flt_ot_debug;
+THREAD_LOCAL int dbg_indent_level = 0;
+#endif
+
+#ifdef OTC_DBG_MEM
+static struct otc_dbg_mem_data dbg_mem_data[1000000];
+static struct otc_dbg_mem dbg_mem;
+#endif
+
+static struct flt_ot_conf *flt_ot_current_config = NULL;
+static struct flt_ot_conf_tracer *flt_ot_current_tracer = NULL;
+static struct flt_ot_conf_group *flt_ot_current_group = NULL;
+static struct flt_ot_conf_scope *flt_ot_current_scope = NULL;
+static struct flt_ot_conf_span *flt_ot_current_span = NULL;
+
+
+/***
+ * NAME
+ * flt_ot_parse_strdup -
+ *
+ * ARGUMENTS
+ * ptr -
+ * str -
+ * err -
+ * err_msg -
+ *
+ * DESCRIPTION
+ * -
+ *
+ * RETURN VALUE
+ * Returns ERR_NONE (== 0) in case of success,
+ * or a combination of ERR_* flags if an error is encountered.
+ */
+static int flt_ot_parse_strdup(char **ptr, const char *str, char **err, const char *err_msg)
+{
+ int retval = ERR_NONE;
+
+ FLT_OT_FUNC("%p:%p, %p, %p:%p, \"%s\"", FLT_OT_DPTR_ARGS(ptr), str, FLT_OT_DPTR_ARGS(err), err_msg);
+
+ *ptr = FLT_OT_STRDUP(str);
+ if (*ptr == NULL) {
+ FLT_OT_PARSE_ERR(err, "'%s' : out of memory", err_msg);
+
+ retval |= ERR_ABORT | ERR_ALERT;
+ }
+
+ FLT_OT_RETURN_INT(retval);
+}
+
+
+/***
+ * NAME
+ * flt_ot_parse_keyword -
+ *
+ * ARGUMENTS
+ * ptr -
+ * args -
+ * cur_arg -
+ * pos -
+ * err -
+ * err_msg -
+ *
+ * DESCRIPTION
+ * -
+ *
+ * RETURN VALUE
+ * Returns ERR_NONE (== 0) in case of success,
+ * or a combination of ERR_* flags if an error is encountered.
+ */
+static int flt_ot_parse_keyword(char **ptr, char **args, int cur_arg, int pos, char **err, const char *err_msg)
+{
+ int retval = ERR_NONE;
+
+ FLT_OT_FUNC("%p:%p, %p, %d, %d, %p:%p, \"%s\"", FLT_OT_DPTR_ARGS(ptr), args, cur_arg, pos, FLT_OT_DPTR_ARGS(err), err_msg);
+
+ if (*ptr != NULL) {
+ if (cur_arg == pos)
+ FLT_OT_PARSE_ERR(err, FLT_OT_FMT_TYPE "%s already set", err_msg);
+ else
+ FLT_OT_PARSE_ERR(err, "'%s' : %s already set", args[cur_arg], err_msg);
+ }
+ else if (!FLT_OT_ARG_ISVALID(pos + 1)) {
+ if (cur_arg == pos)
+ FLT_OT_PARSE_ERR(err, FLT_OT_FMT_TYPE "no %s set", err_msg);
+ else
+ FLT_OT_PARSE_ERR(err, "'%s' : no %s set", args[cur_arg], err_msg);
+ }
+ else {
+ retval = flt_ot_parse_strdup(ptr, args[pos + 1], err, args[cur_arg]);
+ }
+
+ FLT_OT_RETURN_INT(retval);
+}
+
+
+/***
+ * NAME
+ * flt_ot_parse_invalid_char -
+ *
+ * ARGUMENTS
+ * name -
+ * type -
+ *
+ * DESCRIPTION
+ * -
+ *
+ * RETURN VALUE
+ * -
+ */
+static const char *flt_ot_parse_invalid_char(const char *name, int type)
+{
+ const char *retptr = NULL;
+
+ FLT_OT_FUNC("\"%s\", %d", name, type);
+
+ if (!FLT_OT_STR_ISVALID(name))
+ FLT_OT_RETURN_EX(retptr, const char *, "%p");
+
+ if (type == FLT_OT_PARSE_INVALID_CHAR) {
+ retptr = invalid_char(name);
+ }
+ else if (type == FLT_OT_PARSE_INVALID_DOM) {
+ retptr = invalid_domainchar(name);
+ }
+ else if (type == FLT_OT_PARSE_INVALID_CTX) {
+ retptr = invalid_prefix_char(name);
+ }
+ else if (type == FLT_OT_PARSE_INVALID_VAR) {
+ retptr = name;
+
+ /*
+ * Allowed characters are letters, numbers and '_', the first
+ * character in the string must not be a number.
+ */
+ if (!isdigit(*retptr))
+ for (++retptr; (*retptr == '_') || isalnum(*retptr); retptr++);
+
+ if (*retptr == '\0')
+ retptr = NULL;
+ }
+
+ FLT_OT_RETURN_EX(retptr, const char *, "%p");
+}
+
+
+/***
+ * NAME
+ * flt_ot_parse_cfg_check -
+ *
+ * ARGUMENTS
+ * file -
+ * linenum -
+ * args -
+ * id -
+ * parse_data -
+ * parse_data_size -
+ * pdata -
+ * err -
+ *
+ * DESCRIPTION
+ * -
+ *
+ * RETURN VALUE
+ * Returns ERR_NONE (== 0) in case of success,
+ * or a combination of ERR_* flags if an error is encountered.
+ */
+static int flt_ot_parse_cfg_check(const char *file, int linenum, char **args, const void *id, const struct flt_ot_parse_data *parse_data, size_t parse_data_size, const struct flt_ot_parse_data **pdata, char **err)
+{
+ int i, argc, retval = ERR_NONE;
+
+ FLT_OT_FUNC("\"%s\", %d, %p, %p, %p, %zu, %p:%p, %p:%p", file, linenum, args, id, parse_data, parse_data_size, FLT_OT_DPTR_ARGS(pdata), FLT_OT_DPTR_ARGS(err));
+
+ FLT_OT_ARGS_DUMP();
+
+ *pdata = NULL;
+
+ /* First check here if args[0] is the correct keyword. */
+ for (i = 0; (*pdata == NULL) && (i < parse_data_size); i++)
+ if (strcmp(parse_data[i].name, args[0]) == 0)
+ *pdata = parse_data + i;
+
+ if (*pdata == NULL)
+ FLT_OT_PARSE_ERR(err, "'%s' : unknown keyword", args[0]);
+ else
+ argc = flt_ot_args_count(args);
+
+ if ((retval & ERR_CODE) || (id == NULL))
+ /* Do nothing. */;
+ else if ((id != flt_ot_current_tracer) && (flt_ot_current_config->tracer == NULL))
+ FLT_OT_PARSE_ERR(err, "tracer not defined");
+
+ /*
+ * Checking that fewer arguments are specified in the configuration
+ * line than is required.
+ */
+ if (!(retval & ERR_CODE))
+ if (argc < (*pdata)->args_min)
+ FLT_OT_PARSE_ERR(err, "'%s' : too few arguments (use '%s%s')", args[0], (*pdata)->name, (*pdata)->usage);
+
+ /*
+ * Checking that more arguments are specified in the configuration
+ * line than the maximum allowed.
+ */
+ if (!(retval & ERR_CODE) && ((*pdata)->args_max > 0))
+ if (argc > (*pdata)->args_max)
+ FLT_OT_PARSE_ERR(err, "'%s' : too many arguments (use '%s%s')", args[0], (*pdata)->name, (*pdata)->usage);
+
+ /* Checking that the first argument has only allowed characters. */
+ if (!(retval & ERR_CODE) && ((*pdata)->check_name != FLT_OT_PARSE_INVALID_NONE)) {
+ const char *ic;
+
+ ic = flt_ot_parse_invalid_char(args[1], (*pdata)->check_name);
+ if (ic != NULL)
+ FLT_OT_PARSE_ERR(err, "%s '%s' : invalid character '%c'", args[0], args[1], *ic);
+ }
+
+ /* Checking that the data group name is defined. */
+ if (!(retval & ERR_CODE) && (*pdata)->flag_check_id && (id == NULL))
+ FLT_OT_PARSE_ERR(err, "'%s' : %s ID not set (use '%s%s')", args[0], parse_data[1].name, parse_data[1].name, parse_data[1].usage);
+
+ FLT_OT_RETURN_INT(retval);
+}
+
+
+/***
+ * NAME
+ * flt_ot_parse_cfg_sample_expr -
+ *
+ * ARGUMENTS
+ * file -
+ * linenum -
+ * args -
+ * idx -
+ * head -
+ * err -
+ *
+ * DESCRIPTION
+ * -
+ *
+ * RETURN VALUE
+ * Returns ERR_NONE (== 0) in case of success,
+ * or a combination of ERR_* flags if an error is encountered.
+ */
+static int flt_ot_parse_cfg_sample_expr(const char *file, int linenum, char **args, int *idx, struct list *head, char **err)
+{
+ struct flt_ot_conf_sample_expr *expr;
+ int retval = ERR_NONE;
+
+ FLT_OT_FUNC("\"%s\", %d, %p, %p, %p, %p:%p", file, linenum, args, idx, head, FLT_OT_DPTR_ARGS(err));
+
+ expr = flt_ot_conf_sample_expr_init(args[*idx], linenum, head, err);
+ if (expr != NULL) {
+ expr->expr = sample_parse_expr(args, idx, file, linenum, err, &(flt_ot_current_config->proxy->conf.args), NULL);
+ if (expr->expr != NULL)
+ FLT_OT_DBG(3, "sample expression '%s' added", expr->value);
+ else
+ retval |= ERR_ABORT | ERR_ALERT;
+ } else {
+ retval |= ERR_ABORT | ERR_ALERT;
+ }
+
+ if (retval & ERR_CODE)
+ flt_ot_conf_sample_expr_free(&expr);
+
+ FLT_OT_RETURN_INT(retval);
+}
+
+
+/***
+ * NAME
+ * flt_ot_parse_cfg_sample -
+ *
+ * ARGUMENTS
+ * file -
+ * linenum -
+ * args -
+ * head -
+ * err -
+ *
+ * DESCRIPTION
+ * -
+ *
+ * RETURN VALUE
+ * Returns ERR_NONE (== 0) in case of success,
+ * or a combination of ERR_* flags if an error is encountered.
+ */
+static int flt_ot_parse_cfg_sample(const char *file, int linenum, char **args, struct list *head, char **err)
+{
+ struct flt_ot_conf_sample *sample;
+ int idx = 2, retval = ERR_NONE;
+
+ FLT_OT_FUNC("\"%s\", %d, %p, %p, %p:%p", file, linenum, args, head, FLT_OT_DPTR_ARGS(err));
+
+ sample = flt_ot_conf_sample_init(args, linenum, head, err);
+ if (sample == NULL)
+ FLT_OT_PARSE_ERR(err, "'%s' : out of memory", args[0]);
+
+ if (!(retval & ERR_CODE)) {
+ flt_ot_current_config->proxy->conf.args.ctx = ARGC_OT;
+ flt_ot_current_config->proxy->conf.args.file = file;
+ flt_ot_current_config->proxy->conf.args.line = linenum;
+
+ while (!(retval & ERR_CODE) && FLT_OT_ARG_ISVALID(idx))
+ retval = flt_ot_parse_cfg_sample_expr(file, linenum, args, &idx, &(sample->exprs), err);
+
+ flt_ot_current_config->proxy->conf.args.file = NULL;
+ flt_ot_current_config->proxy->conf.args.line = 0;
+ }
+
+ if (retval & ERR_CODE)
+ flt_ot_conf_sample_free(&sample);
+ else
+ FLT_OT_DBG(3, "sample '%s' -> '%s' added", sample->key, sample->value);
+
+ FLT_OT_RETURN_INT(retval);
+}
+
+
+/***
+ * NAME
+ * flt_ot_parse_cfg_str -
+ *
+ * ARGUMENTS
+ * file -
+ * linenum -
+ * args -
+ * head -
+ * err -
+ *
+ * DESCRIPTION
+ * -
+ *
+ * RETURN VALUE
+ * Returns ERR_NONE (== 0) in case of success,
+ * or a combination of ERR_* flags if an error is encountered.
+ */
+static int flt_ot_parse_cfg_str(const char *file, int linenum, char **args, struct list *head, char **err)
+{
+ struct flt_ot_conf_str *str = NULL;
+ int i, retval = ERR_NONE;
+
+ FLT_OT_FUNC("\"%s\", %d, %p, %p, %p:%p", file, linenum, args, head, FLT_OT_DPTR_ARGS(err));
+
+ for (i = 1; !(retval & ERR_CODE) && FLT_OT_ARG_ISVALID(i); i++)
+ if (flt_ot_conf_str_init(args[i], linenum, head, err) == NULL)
+ retval |= ERR_ABORT | ERR_ALERT;
+
+ if (retval & ERR_CODE)
+ flt_ot_conf_str_free(&str);
+
+ FLT_OT_RETURN_INT(retval);
+}
+
+
+/***
+ * NAME
+ * flt_ot_parse_cfg_file -
+ *
+ * ARGUMENTS
+ * ptr -
+ * file -
+ * linenum -
+ * args -
+ * err -
+ * err_msg -
+ *
+ * DESCRIPTION
+ * -
+ *
+ * RETURN VALUE
+ * Returns ERR_NONE (== 0) in case of success,
+ * or a combination of ERR_* flags if an error is encountered.
+ */
+static int flt_ot_parse_cfg_file(char **ptr, const char *file, int linenum, char **args, char **err, const char *err_msg)
+{
+ int retval = ERR_NONE;
+
+ FLT_OT_FUNC("%p:%p, \"%s\", %d, %p, %p:%p, \"%s\"", FLT_OT_DPTR_ARGS(ptr), file, linenum, args, FLT_OT_DPTR_ARGS(err), err_msg);
+
+ if (!FLT_OT_ARG_ISVALID(1))
+ FLT_OT_PARSE_ERR(err, "'%s' : no %s specified", flt_ot_current_tracer->id, err_msg);
+ else if (alertif_too_many_args(1, file, linenum, args, &retval))
+ retval |= ERR_ABORT | ERR_ALERT;
+ else if (access(args[1], R_OK) == -1)
+ FLT_OT_PARSE_ERR(err, "'%s' : %s", args[1], strerror(errno));
+ else
+ retval = flt_ot_parse_keyword(ptr, args, 0, 0, err, err_msg);
+
+ FLT_OT_RETURN_INT(retval);
+}
+
+
+/***
+ * NAME
+ * flt_ot_parse_check_scope -
+ *
+ * ARGUMENTS
+ * This function takes no arguments.
+ *
+ * DESCRIPTION
+ * -
+ *
+ * RETURN VALUE
+ * Returns TRUE in case the configuration is not in the currently defined
+ * scope, FALSE otherwise.
+ */
+static bool flt_ot_parse_check_scope(void)
+{
+ bool retval = 0;
+
+ if ((cfg_scope != NULL) && (flt_ot_current_config->id != NULL) && (strcmp(flt_ot_current_config->id, cfg_scope) != 0)) {
+ FLT_OT_DBG(1, "cfg_scope: '%s', id: '%s'", cfg_scope, flt_ot_current_config->id);
+
+ retval = 1;
+ }
+
+ return retval;
+}
+
+
+/***
+ * NAME
+ * flt_ot_parse_cfg_tracer -
+ *
+ * ARGUMENTS
+ * file -
+ * linenum -
+ * args -
+ * kw_mod -
+ *
+ * DESCRIPTION
+ * -
+ *
+ * RETURN VALUE
+ * Returns ERR_NONE (== 0) in case of success,
+ * or a combination of ERR_* flags if an error is encountered.
+ */
+static int flt_ot_parse_cfg_tracer(const char *file, int linenum, char **args, int kw_mod)
+{
+#define FLT_OT_PARSE_TRACER_DEF(a,b,c,d,e,f,g) { FLT_OT_PARSE_TRACER_##a, b, FLT_OT_PARSE_INVALID_##c, d, e, f, g },
+ static const struct flt_ot_parse_data parse_data[] = { FLT_OT_PARSE_TRACER_DEFINES };
+#undef FLT_OT_PARSE_TRACER_DEF
+ const struct flt_ot_parse_data *pdata = NULL;
+ char *err = NULL, *err_log = NULL;
+ int i, retval = ERR_NONE;
+
+ FLT_OT_FUNC("\"%s\", %d, %p, 0x%08x", file, linenum, args, kw_mod);
+
+ if (flt_ot_parse_check_scope())
+ FLT_OT_RETURN_INT(retval);
+
+ retval = flt_ot_parse_cfg_check(file, linenum, args, flt_ot_current_tracer, parse_data, FLT_OT_TABLESIZE(parse_data), &pdata, &err);
+ if (retval & ERR_CODE) {
+ FLT_OT_PARSE_IFERR_ALERT();
+
+ FLT_OT_RETURN_INT(retval);
+ }
+
+ if (pdata->keyword == FLT_OT_PARSE_TRACER_ID) {
+ if (flt_ot_current_config->tracer != NULL) {
+ FLT_OT_PARSE_ERR(&err, "'%s' : tracer can be defined only once", args[1]);
+ } else {
+ flt_ot_current_tracer = flt_ot_conf_tracer_init(args[1], linenum, &err);
+ if (flt_ot_current_tracer == NULL)
+ retval |= ERR_ABORT | ERR_ALERT;
+ }
+ }
+ else if (pdata->keyword == FLT_OT_PARSE_TRACER_LOG) {
+ if (parse_logger(args, &(flt_ot_current_tracer->proxy_log.loggers), kw_mod == KWM_NO, file, linenum, &err_log) == 0) {
+ FLT_OT_PARSE_ERR(&err, "'%s %s ...' : %s", args[0], args[1], err_log);
+ FLT_OT_FREE_CLEAR(err_log);
+
+ retval |= ERR_ABORT | ERR_ALERT;
+ } else {
+ flt_ot_current_tracer->logging |= FLT_OT_LOGGING_ON;
+ }
+ }
+ else if (pdata->keyword == FLT_OT_PARSE_TRACER_CONFIG) {
+ retval = flt_ot_parse_cfg_file(&(flt_ot_current_tracer->config), file, linenum, args, &err, "configuration file");
+ }
+ else if (pdata->keyword == FLT_OT_PARSE_TRACER_PLUGIN) {
+ retval = flt_ot_parse_cfg_file(&(flt_ot_current_tracer->plugin), file, linenum, args, &err, "plugin library");
+ }
+ else if (pdata->keyword == FLT_OT_PARSE_TRACER_GROUPS) {
+ for (i = 1; !(retval & ERR_CODE) && FLT_OT_ARG_ISVALID(i); i++)
+ if (flt_ot_conf_ph_init(args[i], linenum, &(flt_ot_current_tracer->ph_groups), &err) == NULL)
+ retval |= ERR_ABORT | ERR_ALERT;
+ }
+ else if (pdata->keyword == FLT_OT_PARSE_TRACER_SCOPES) {
+ for (i = 1; !(retval & ERR_CODE) && FLT_OT_ARG_ISVALID(i); i++)
+ if (flt_ot_conf_ph_init(args[i], linenum, &(flt_ot_current_tracer->ph_scopes), &err) == NULL)
+ retval |= ERR_ABORT | ERR_ALERT;
+ }
+ else if (pdata->keyword == FLT_OT_PARSE_TRACER_ACL) {
+ if (strcasecmp(args[1], "or") == 0)
+ FLT_OT_PARSE_ERR(&err, "'%s %s ...' : invalid ACL name", args[0], args[1]);
+ else if (parse_acl((const char **)args + 1, &(flt_ot_current_tracer->acls), &err, &(flt_ot_current_config->proxy->conf.args), file, linenum) == NULL)
+ retval |= ERR_ABORT | ERR_ALERT;
+ }
+ else if (pdata->keyword == FLT_OT_PARSE_TRACER_RATE_LIMIT) {
+ flt_ot_current_tracer->rate_limit = FLT_OT_FLOAT_U32(flt_ot_strtod(args[1], 0.0, FLT_OT_RATE_LIMIT_MAX, &err), FLT_OT_RATE_LIMIT_MAX);
+ }
+ else if (pdata->keyword == FLT_OT_PARSE_TRACER_OPTION) {
+ if (strcmp(args[1], FLT_OT_PARSE_OPTION_DISABLED) == 0) {
+ flt_ot_current_tracer->flag_disabled = (kw_mod == KWM_NO) ? 0 : 1;
+ }
+ else if (strcmp(args[1], FLT_OT_PARSE_OPTION_HARDERR) == 0) {
+ flt_ot_current_tracer->flag_harderr = (kw_mod == KWM_NO) ? 0 : 1;
+ }
+ else if (strcmp(args[1], FLT_OT_PARSE_OPTION_NOLOGNORM) == 0) {
+ if (kw_mod == KWM_NO)
+ flt_ot_current_tracer->logging &= ~FLT_OT_LOGGING_NOLOGNORM;
+ else
+ flt_ot_current_tracer->logging |= FLT_OT_LOGGING_NOLOGNORM;
+ }
+ else
+ FLT_OT_PARSE_ERR(&err, "'%s' : unknown option '%s'", args[0], args[1]);
+ }
+#ifdef DEBUG_OT
+ else if (pdata->keyword == FLT_OT_PARSE_TRACER_DEBUG_LEVEL) {
+ flt_ot_debug.level = flt_ot_strtoll(args[1], 0, 255, &err);
+ }
+#else
+ else {
+ FLT_OT_PARSE_WARNING("'%s' : keyword ignored", file, linenum, args[0]);
+ }
+#endif
+
+ FLT_OT_PARSE_IFERR_ALERT();
+
+ if ((retval & ERR_CODE) && (flt_ot_current_tracer != NULL))
+ flt_ot_conf_tracer_free(&flt_ot_current_tracer);
+
+ FLT_OT_RETURN_INT(retval);
+}
+
+
+/***
+ * NAME
+ * flt_ot_post_parse_cfg_tracer -
+ *
+ * ARGUMENTS
+ * This function takes no arguments.
+ *
+ * DESCRIPTION
+ * -
+ *
+ * RETURN VALUE
+ * Returns ERR_NONE (== 0) in case of success,
+ * or a combination of ERR_* flags if an error is encountered.
+ */
+static int flt_ot_post_parse_cfg_tracer(void)
+{
+ char errbuf[BUFSIZ] = "";
+ int retval = ERR_NONE;
+
+ FLT_OT_FUNC("");
+
+ if (flt_ot_current_tracer == NULL)
+ FLT_OT_RETURN_INT(retval);
+
+ flt_ot_current_config->tracer = flt_ot_current_tracer;
+
+ if (flt_ot_current_tracer->id == NULL)
+ FLT_OT_RETURN_INT(retval);
+
+ if (flt_ot_current_tracer->config == NULL) {
+ FLT_OT_POST_PARSE_ALERT("tracer '%s' has no configuration file specified", flt_ot_current_tracer->cfg_line, flt_ot_current_tracer->id);
+ } else {
+ flt_ot_current_tracer->cfgbuf = otc_file_read(flt_ot_current_tracer->config, "#", errbuf, sizeof(errbuf));
+ if (flt_ot_current_tracer->cfgbuf == NULL)
+ FLT_OT_POST_PARSE_ALERT("tracer '%s' %s", flt_ot_current_tracer->cfg_line, flt_ot_current_tracer->id, (*errbuf == '\0') ? "cannot load configuration file" : errbuf);
+ }
+
+ if (flt_ot_current_tracer->plugin == NULL)
+ FLT_OT_POST_PARSE_ALERT("tracer '%s' has no plugin library specified", flt_ot_current_tracer->cfg_line, flt_ot_current_tracer->id);
+
+ flt_ot_current_tracer = NULL;
+
+ FLT_OT_RETURN_INT(retval);
+}
+
+
+/***
+ * NAME
+ * flt_ot_parse_cfg_group -
+ *
+ * ARGUMENTS
+ * file -
+ * linenum -
+ * args -
+ * kw_mod -
+ *
+ * DESCRIPTION
+ * -
+ *
+ * RETURN VALUE
+ * Returns ERR_NONE (== 0) in case of success,
+ * or a combination of ERR_* flags if an error is encountered.
+ */
+static int flt_ot_parse_cfg_group(const char *file, int linenum, char **args, int kw_mod)
+{
+#define FLT_OT_PARSE_GROUP_DEF(a,b,c,d,e,f,g) { FLT_OT_PARSE_GROUP_##a, b, FLT_OT_PARSE_INVALID_##c, d, e, f, g },
+ static const struct flt_ot_parse_data parse_data[] = { FLT_OT_PARSE_GROUP_DEFINES };
+#undef FLT_OT_PARSE_GROUP_DEF
+ const struct flt_ot_parse_data *pdata = NULL;
+ char *err = NULL;
+ int i, retval = ERR_NONE;
+
+ FLT_OT_FUNC("\"%s\", %d, %p, 0x%08x", file, linenum, args, kw_mod);
+
+ if (flt_ot_parse_check_scope())
+ FLT_OT_RETURN_INT(retval);
+
+ retval = flt_ot_parse_cfg_check(file, linenum, args, flt_ot_current_group, parse_data, FLT_OT_TABLESIZE(parse_data), &pdata, &err);
+ if (retval & ERR_CODE) {
+ FLT_OT_PARSE_IFERR_ALERT();
+
+ FLT_OT_RETURN_INT(retval);
+ }
+
+ if (pdata->keyword == FLT_OT_PARSE_GROUP_ID) {
+ flt_ot_current_group = flt_ot_conf_group_init(args[1], linenum, &(flt_ot_current_config->groups), &err);
+ if (flt_ot_current_config == NULL)
+ retval |= ERR_ABORT | ERR_ALERT;
+ }
+ else if (pdata->keyword == FLT_OT_PARSE_GROUP_SCOPES) {
+ for (i = 1; !(retval & ERR_CODE) && FLT_OT_ARG_ISVALID(i); i++)
+ if (flt_ot_conf_ph_init(args[i], linenum, &(flt_ot_current_group->ph_scopes), &err) == NULL)
+ retval |= ERR_ABORT | ERR_ALERT;
+ }
+
+ FLT_OT_PARSE_IFERR_ALERT();
+
+ if ((retval & ERR_CODE) && (flt_ot_current_group != NULL))
+ flt_ot_conf_group_free(&flt_ot_current_group);
+
+ FLT_OT_RETURN_INT(retval);
+}
+
+
+/***
+ * NAME
+ * flt_ot_post_parse_cfg_group -
+ *
+ * ARGUMENTS
+ * This function takes no arguments.
+ *
+ * DESCRIPTION
+ * -
+ *
+ * RETURN VALUE
+ * Returns ERR_NONE (== 0) in case of success,
+ * or a combination of ERR_* flags if an error is encountered.
+ */
+static int flt_ot_post_parse_cfg_group(void)
+{
+ int retval = ERR_NONE;
+
+ FLT_OT_FUNC("");
+
+ if (flt_ot_current_group == NULL)
+ FLT_OT_RETURN_INT(retval);
+
+ /* Check that the group has at least one scope defined. */
+ if (LIST_ISEMPTY(&(flt_ot_current_group->ph_scopes)))
+ FLT_OT_POST_PARSE_ALERT("group '%s' has no defined scope(s)", flt_ot_current_group->cfg_line, flt_ot_current_group->id);
+
+ flt_ot_current_group = NULL;
+
+ FLT_OT_RETURN_INT(retval);
+}
+
+
+/***
+ * NAME
+ * flt_ot_parse_cfg_scope_ctx -
+ *
+ * ARGUMENTS
+ * args -
+ * cur_arg -
+ * err -
+ *
+ * DESCRIPTION
+ * -
+ *
+ * RETURN VALUE
+ * Returns ERR_NONE (== 0) in case of success,
+ * or a combination of ERR_* flags if an error is encountered.
+ */
+static int flt_ot_parse_cfg_scope_ctx(char **args, int cur_arg, char **err)
+{
+ uint8_t flags = 0;
+ int retval = ERR_NONE;
+
+ FLT_OT_FUNC("%p, %d, %p:%p", args, cur_arg, FLT_OT_DPTR_ARGS(err));
+
+ if (strcmp(args[cur_arg], FLT_OT_PARSE_CTX_USE_HEADERS) == 0)
+ flags = FLT_OT_CTX_USE_HEADERS;
+#ifdef USE_OT_VARS
+ else if (strcmp(args[cur_arg], FLT_OT_PARSE_CTX_USE_VARS) == 0)
+ flags = FLT_OT_CTX_USE_VARS;
+#endif
+ else
+ FLT_OT_PARSE_ERR(err, "'%s' : invalid context storage type", args[0]);
+
+ if (flags == 0)
+ /* Do nothing. */;
+ else if (flt_ot_current_span->ctx_flags & flags)
+ FLT_OT_PARSE_ERR(err, "'%s' : %s already used", args[0], args[cur_arg]);
+ else
+ flt_ot_current_span->ctx_flags |= flags;
+
+ FLT_OT_DBG(2, "ctx_flags: 0x%02hhx (0x%02hhx)", flt_ot_current_span->ctx_flags, flags);
+
+ FLT_OT_RETURN_INT(retval);
+}
+
+
+/***
+ * NAME
+ * flt_ot_parse_acl -
+ *
+ * ARGUMENTS
+ * file -
+ * linenum -
+ * px -
+ * args -
+ * err -
+ * head -
+ *
+ * DESCRIPTION
+ * -
+ *
+ * RETURN VALUE
+ * -
+ */
+static struct acl_cond *flt_ot_parse_acl(const char *file, int linenum, struct proxy *px, const char **args, char **err, struct list *head, ...)
+{
+ va_list ap;
+ int n = 0;
+ struct acl_cond *retptr = NULL;
+
+ FLT_OT_FUNC("\"%s\", %d, %p, %p, %p:%p, %p, ...", file, linenum, px, args, FLT_OT_DPTR_ARGS(err), head);
+
+ for (va_start(ap, head); (retptr == NULL) && (head != NULL); head = va_arg(ap, typeof(head)), n++) {
+ retptr = build_acl_cond(file, linenum, head, px, args, (n == 0) ? err : NULL);
+ if (retptr != NULL)
+ FLT_OT_DBG(2, "ACL build done, using list %p %d", head, n);
+ }
+ va_end(ap);
+
+ if ((retptr != NULL) && (err != NULL))
+ FLT_OT_FREE_CLEAR(*err);
+
+ FLT_OT_RETURN_PTR(retptr);
+}
+
+
+/***
+ * NAME
+ * flt_ot_parse_cfg_scope -
+ *
+ * ARGUMENTS
+ * file -
+ * linenum -
+ * args -
+ * kw_mod -
+ *
+ * DESCRIPTION
+ * Function used to load the scope block configuration.
+ *
+ * RETURN VALUE
+ * Returns ERR_NONE (== 0) in case of success,
+ * or a combination of ERR_* flags if an error is encountered.
+ */
+static int flt_ot_parse_cfg_scope(const char *file, int linenum, char **args, int kw_mod)
+{
+#define FLT_OT_PARSE_SCOPE_DEF(a,b,c,d,e,f,g) { FLT_OT_PARSE_SCOPE_##a, b, FLT_OT_PARSE_INVALID_##c, d, e, f, g },
+ static const struct flt_ot_parse_data parse_data[] = { FLT_OT_PARSE_SCOPE_DEFINES };
+#undef FLT_OT_PARSE_SCOPE_DEF
+ const struct flt_ot_parse_data *pdata = NULL;
+ char *err = NULL;
+ int i, retval = ERR_NONE;
+
+ FLT_OT_FUNC("\"%s\", %d, %p, 0x%08x", file, linenum, args, kw_mod);
+
+ if (flt_ot_parse_check_scope())
+ FLT_OT_RETURN_INT(retval);
+
+ retval = flt_ot_parse_cfg_check(file, linenum, args, flt_ot_current_span, parse_data, FLT_OT_TABLESIZE(parse_data), &pdata, &err);
+ if (retval & ERR_CODE) {
+ FLT_OT_PARSE_IFERR_ALERT();
+
+ FLT_OT_RETURN_INT(retval);
+ }
+
+ if (pdata->keyword == FLT_OT_PARSE_SCOPE_ID) {
+ /* Initialization of a new scope. */
+ flt_ot_current_scope = flt_ot_conf_scope_init(args[1], linenum, &(flt_ot_current_config->scopes), &err);
+ if (flt_ot_current_scope == NULL)
+ retval |= ERR_ABORT | ERR_ALERT;
+ }
+ else if (pdata->keyword == FLT_OT_PARSE_SCOPE_SPAN) {
+ /*
+ * Checking if this is the beginning of the definition of
+ * a new span.
+ */
+ if (flt_ot_current_span != NULL) {
+ FLT_OT_DBG(3, "span '%s' (done)", flt_ot_current_span->id);
+
+ flt_ot_current_span = NULL;
+ }
+
+ /* Initialization of a new span. */
+ flt_ot_current_span = flt_ot_conf_span_init(args[1], linenum, &(flt_ot_current_scope->spans), &err);
+
+ /*
+ * In case the span has a defined reference,
+ * the correctness of the arguments is checked here.
+ */
+ if (flt_ot_current_span == NULL) {
+ retval |= ERR_ABORT | ERR_ALERT;
+ }
+ else if (FLT_OT_ARG_ISVALID(2)) {
+ for (i = 2; (i < pdata->args_max) && FLT_OT_ARG_ISVALID(i); i++)
+ if (strcmp(args[i], FLT_OT_PARSE_SPAN_ROOT) == 0) {
+ if (flt_ot_current_span->flag_root)
+ FLT_OT_PARSE_ERR(&err, "'%s' : already set (use '%s%s')", args[i], pdata->name, pdata->usage);
+ else
+ flt_ot_current_span->flag_root = 1;
+ }
+ else if ((strcmp(args[i], FLT_OT_PARSE_SPAN_REF_CHILD) == 0) || (strcmp(args[i], FLT_OT_PARSE_SPAN_REF_FOLLOWS) == 0)) {
+ if (!FLT_OT_ARG_ISVALID(i + 1)) {
+ FLT_OT_PARSE_ERR(&err, "'%s' : too few arguments (use '%s%s')", args[i], pdata->name, pdata->usage);
+ }
+ else if (strcmp(args[i++], FLT_OT_PARSE_SPAN_REF_CHILD) == 0) {
+ flt_ot_current_span->ref_type = otc_span_reference_child_of;
+ flt_ot_current_span->ref_id_len = strlen(args[i]);
+
+ retval = flt_ot_parse_strdup(&(flt_ot_current_span->ref_id), args[i], &err, args[1]);
+ }
+ else {
+ flt_ot_current_span->ref_type = otc_span_reference_follows_from;
+ flt_ot_current_span->ref_id_len = strlen(args[i]);
+
+ retval = flt_ot_parse_strdup(&(flt_ot_current_span->ref_id), args[i], &err, args[1]);
+ }
+ }
+ else {
+ FLT_OT_PARSE_ERR(&err, "'%s' : invalid argument (use '%s%s')", args[i], pdata->name, pdata->usage);
+ }
+ }
+ else {
+ /*
+ * This is not a faulty configuration, only such a case
+ * will be logged.
+ */
+ FLT_OT_DBG(3, "new span '%s' without reference", flt_ot_current_span->id);
+ }
+ }
+ else if (pdata->keyword == FLT_OT_PARSE_SCOPE_TAG) {
+ retval = flt_ot_parse_cfg_sample(file, linenum, args, &(flt_ot_current_span->tags), &err);
+ }
+ else if (pdata->keyword == FLT_OT_PARSE_SCOPE_LOG) {
+ retval = flt_ot_parse_cfg_sample(file, linenum, args, &(flt_ot_current_span->logs), &err);
+ }
+ else if (pdata->keyword == FLT_OT_PARSE_SCOPE_BAGGAGE) {
+ retval = flt_ot_parse_cfg_sample(file, linenum, args, &(flt_ot_current_span->baggages), &err);
+ }
+ else if (pdata->keyword == FLT_OT_PARSE_SCOPE_INJECT) {
+ /*
+ * Automatic context name generation can be specified here
+ * if the contents of the FLT_OT_PARSE_CTX_AUTONAME macro
+ * are used as the name. In that case, if the context is
+ * after a particular event, it gets its name; otherwise
+ * it gets the name of the current span.
+ */
+ if (flt_ot_current_span->ctx_id != NULL)
+ FLT_OT_PARSE_ERR(&err, "'%s' : only one context per span is allowed", args[1]);
+ else if (strcmp(args[1], FLT_OT_PARSE_CTX_AUTONAME) != 0)
+ retval = flt_ot_parse_strdup(&(flt_ot_current_span->ctx_id), args[1], &err, args[0]);
+ else if (flt_ot_current_scope->event != FLT_OT_EVENT_REQ_NONE)
+ retval = flt_ot_parse_strdup(&(flt_ot_current_span->ctx_id), flt_ot_event_data[flt_ot_current_scope->event].name, &err, args[0]);
+ else
+ retval = flt_ot_parse_strdup(&(flt_ot_current_span->ctx_id), flt_ot_current_span->id, &err, args[0]);
+
+ if (flt_ot_current_span->ctx_id != NULL) {
+ flt_ot_current_span->ctx_id_len = strlen(flt_ot_current_span->ctx_id);
+
+ /*
+ * Here is checked the context storage type; which, if
+ * not explicitly specified, is set to HTTP headers.
+ *
+ * It is possible to use both types of context storage
+ * at the same time.
+ */
+ if (FLT_OT_ARG_ISVALID(2)) {
+ retval = flt_ot_parse_cfg_scope_ctx(args, 2, &err);
+ if (!(retval & ERR_CODE) && FLT_OT_ARG_ISVALID(3))
+ retval = flt_ot_parse_cfg_scope_ctx(args, 3, &err);
+ } else {
+ flt_ot_current_span->ctx_flags = FLT_OT_CTX_USE_HEADERS;
+ }
+ }
+ }
+ else if (pdata->keyword == FLT_OT_PARSE_SCOPE_EXTRACT) {
+ struct flt_ot_conf_context *conf_ctx;
+
+ /*
+ * Here is checked the context storage type; which, if
+ * not explicitly specified, is set to HTTP headers.
+ */
+ conf_ctx = flt_ot_conf_context_init(args[1], linenum, &(flt_ot_current_scope->contexts), &err);
+ if (conf_ctx == NULL)
+ retval |= ERR_ABORT | ERR_ALERT;
+ else if (!FLT_OT_ARG_ISVALID(2))
+ conf_ctx->flags = FLT_OT_CTX_USE_HEADERS;
+ else if (strcmp(args[2], FLT_OT_PARSE_CTX_USE_HEADERS) == 0)
+ conf_ctx->flags = FLT_OT_CTX_USE_HEADERS;
+#ifdef USE_OT_VARS
+ else if (strcmp(args[2], FLT_OT_PARSE_CTX_USE_VARS) == 0)
+ conf_ctx->flags = FLT_OT_CTX_USE_VARS;
+#endif
+ else
+ FLT_OT_PARSE_ERR(&err, "'%s' : invalid context storage type", args[2]);
+ }
+ else if (pdata->keyword == FLT_OT_PARSE_SCOPE_FINISH) {
+ retval = flt_ot_parse_cfg_str(file, linenum, args, &(flt_ot_current_scope->finish), &err);
+ }
+ else if (pdata->keyword == FLT_OT_PARSE_SCOPE_ACL) {
+ if (strcasecmp(args[1], "or") == 0)
+ FLT_OT_PARSE_ERR(&err, "'%s %s ...' : invalid ACL name", args[0], args[1]);
+ else if (parse_acl((const char **)args + 1, &(flt_ot_current_scope->acls), &err, &(flt_ot_current_config->proxy->conf.args), file, linenum) == NULL)
+ retval |= ERR_ABORT | ERR_ALERT;
+ }
+ else if (pdata->keyword == FLT_OT_PARSE_SCOPE_EVENT) {
+ /* Scope can only have one event defined. */
+ if (flt_ot_current_scope->event != FLT_OT_EVENT_REQ_NONE) {
+ FLT_OT_PARSE_ERR(&err, "'%s' : event already set", flt_ot_current_scope->id);
+ } else {
+ /* Check the event name. */
+ for (i = 0; i < FLT_OT_TABLESIZE(flt_ot_event_data); i++)
+ if (strcmp(flt_ot_event_data[i].name, args[1]) == 0) {
+ flt_ot_current_scope->event = i;
+
+ break;
+ }
+
+ /*
+ * The event can have some condition defined and this
+ * is checked here.
+ */
+ if (flt_ot_current_scope->event == FLT_OT_EVENT_REQ_NONE) {
+ FLT_OT_PARSE_ERR(&err, "'%s' : unknown event", args[1]);
+ }
+ else if (!FLT_OT_ARG_ISVALID(2)) {
+ /* Do nothing. */
+ }
+ else if ((strcmp(args[2], FLT_OT_CONDITION_IF) == 0) || (strcmp(args[2], FLT_OT_CONDITION_UNLESS) == 0)) {
+ /*
+ * We will first try to build ACL condition using
+ * local settings and then if that fails, using
+ * global settings (from tracer block). If it
+ * also fails, then try to use ACL defined in
+ * the HAProxy configuration.
+ */
+ flt_ot_current_scope->cond = flt_ot_parse_acl(file, linenum, flt_ot_current_config->proxy, (const char **)args + 2, &err, &(flt_ot_current_scope->acls), &(flt_ot_current_config->tracer->acls), &(flt_ot_current_config->proxy->acl), NULL);
+ if (flt_ot_current_scope->cond == NULL)
+ retval |= ERR_ABORT | ERR_ALERT;
+ }
+ else {
+ FLT_OT_PARSE_ERR(&err, "'%s' : expects either 'if' or 'unless' followed by a condition but found '%s'", args[1], args[2]);
+ }
+
+ if (!(retval & ERR_CODE))
+ FLT_OT_DBG(3, "event '%s'", args[1]);
+ }
+ }
+
+ FLT_OT_PARSE_IFERR_ALERT();
+
+ if ((retval & ERR_CODE) && (flt_ot_current_scope != NULL)) {
+ flt_ot_conf_scope_free(&flt_ot_current_scope);
+
+ flt_ot_current_span = NULL;
+ }
+
+ FLT_OT_RETURN_INT(retval);
+}
+
+
+/***
+ * NAME
+ * flt_ot_post_parse_cfg_scope -
+ *
+ * ARGUMENTS
+ * This function takes no arguments.
+ *
+ * DESCRIPTION
+ * In this function the correctness of the complete scope block is examined.
+ * This does not mean that all elements are checked here, but only those for
+ * which it has not been possible to establish their complete correctness in
+ * the function flt_ot_parse_cfg_scope().
+ *
+ * RETURN VALUE
+ * Returns ERR_NONE (== 0) in case of success,
+ * or a combination of ERR_* flags if an error is encountered.
+ */
+static int flt_ot_post_parse_cfg_scope(void)
+{
+ struct flt_ot_conf_span *conf_span;
+ int retval = ERR_NONE;
+
+ FLT_OT_FUNC("");
+
+ if (flt_ot_current_scope == NULL)
+ FLT_OT_RETURN_INT(retval);
+
+ /* If span context inject is used, check that this is possible. */
+ list_for_each_entry(conf_span, &(flt_ot_current_scope->spans), list)
+ if ((conf_span->ctx_id != NULL) && (conf_span->ctx_flags & FLT_OT_CTX_USE_HEADERS))
+ if (!flt_ot_event_data[flt_ot_current_scope->event].flag_http_inject)
+ FLT_OT_POST_PARSE_ALERT("inject '%s' : cannot use on this event", conf_span->cfg_line, conf_span->ctx_id);
+
+ if (retval & ERR_CODE)
+ flt_ot_conf_scope_free(&flt_ot_current_scope);
+
+ flt_ot_current_scope = NULL;
+ flt_ot_current_span = NULL;
+
+ FLT_OT_RETURN_INT(retval);
+}
+
+
+/***
+ * NAME
+ * flt_ot_parse_cfg -
+ *
+ * ARGUMENTS
+ * conf -
+ * flt_name -
+ * err -
+ *
+ * DESCRIPTION
+ * -
+ *
+ * RETURN VALUE
+ * Returns ERR_NONE (== 0) in case of success,
+ * or a combination of ERR_* flags if an error is encountered.
+ */
+static int flt_ot_parse_cfg(struct flt_ot_conf *conf, const char *flt_name, char **err)
+{
+ struct list backup_sections;
+ int retval = ERR_ABORT | ERR_ALERT;
+
+ FLT_OT_FUNC("%p, \"%s\", %p:%p", conf, flt_name, FLT_OT_DPTR_ARGS(err));
+
+ flt_ot_current_config = conf;
+
+ /* Backup sections. */
+ LIST_INIT(&backup_sections);
+ cfg_backup_sections(&backup_sections);
+
+ /* Register new OT sections and parse the OT filter configuration file. */
+ if (!cfg_register_section(FLT_OT_PARSE_SECTION_TRACER_ID, flt_ot_parse_cfg_tracer, flt_ot_post_parse_cfg_tracer))
+ /* Do nothing. */;
+ else if (!cfg_register_section(FLT_OT_PARSE_SECTION_GROUP_ID, flt_ot_parse_cfg_group, flt_ot_post_parse_cfg_group))
+ /* Do nothing. */;
+ else if (!cfg_register_section(FLT_OT_PARSE_SECTION_SCOPE_ID, flt_ot_parse_cfg_scope, flt_ot_post_parse_cfg_scope))
+ /* Do nothing. */;
+ else if (access(conf->cfg_file, R_OK) == -1)
+ FLT_OT_PARSE_ERR(err, "'%s' : %s", conf->cfg_file, strerror(errno));
+ else
+ retval = readcfgfile(conf->cfg_file);
+
+ /* Unregister OT sections and restore previous sections. */
+ cfg_unregister_sections();
+ cfg_restore_sections(&backup_sections);
+
+ flt_ot_current_config = NULL;
+
+ FLT_OT_RETURN_INT(retval);
+}
+
+
+/***
+ * NAME
+ * flt_ot_parse -
+ *
+ * ARGUMENTS
+ * args -
+ * cur_arg -
+ * px -
+ * fconf -
+ * err -
+ * private -
+ *
+ * DESCRIPTION
+ * -
+ *
+ * RETURN VALUE
+ * Returns ERR_NONE (== 0) in case of success,
+ * or a combination of ERR_* flags if an error is encountered.
+ */
+static int flt_ot_parse(char **args, int *cur_arg, struct proxy *px, struct flt_conf *fconf, char **err, void *private)
+{
+ struct flt_ot_conf *conf = NULL;
+ int pos, retval = ERR_NONE;
+
+#ifdef DEBUG_OT
+ FLT_OT_RUN_ONCE(
+# ifndef DEBUG_OT_SYSTIME
+ (void)memcpy(&(flt_ot_debug.start), &date, sizeof(flt_ot_debug.start));
+# endif
+
+ flt_ot_debug.level = FLT_OT_DEBUG_LEVEL;
+ );
+#endif
+
+ FLT_OT_FUNC("%p, %p, %p, %p, %p:%p, %p", args, cur_arg, px, fconf, FLT_OT_DPTR_ARGS(err), private);
+
+#ifdef OTC_DBG_MEM
+ FLT_OT_RUN_ONCE(
+ if (otc_dbg_mem_init(&dbg_mem, dbg_mem_data, FLT_OT_TABLESIZE(dbg_mem_data), 0xff) == -1) {
+ FLT_OT_PARSE_ERR(err, "cannot initialize memory debugger");
+
+ FLT_OT_RETURN_INT(retval);
+ }
+ );
+#endif
+
+ FLT_OT_ARGS_DUMP();
+
+ conf = flt_ot_conf_init(px);
+ if (conf == NULL) {
+ FLT_OT_PARSE_ERR(err, "'%s' : out of memory", args[*cur_arg]);
+
+ FLT_OT_RETURN_INT(retval);
+ }
+
+ for (pos = *cur_arg + 1; !(retval & ERR_CODE) && FLT_OT_ARG_ISVALID(pos); pos++) {
+ FLT_OT_DBG(3, "args[%d:2] : { '%s' '%s' }", pos, args[pos], args[pos + 1]);
+
+ if (strcmp(args[pos], FLT_OT_OPT_FILTER_ID) == 0) {
+ retval = flt_ot_parse_keyword(&(conf->id), args, *cur_arg, pos, err, "name");
+ pos++;
+ }
+ else if (strcmp(args[pos], FLT_OT_OPT_CONFIG) == 0) {
+ retval = flt_ot_parse_keyword(&(conf->cfg_file), args, *cur_arg, pos, err, "configuration file");
+ if (!(retval & ERR_CODE))
+ retval = flt_ot_parse_cfg(conf, args[*cur_arg], err);
+ pos++;
+ }
+ else {
+ FLT_OT_PARSE_ERR(err, "'%s' : unknown keyword '%s'", args[*cur_arg], args[pos]);
+ }
+ }
+
+ /* If the OpenTracing filter ID is not set, use default name. */
+ if (!(retval & ERR_CODE) && (conf->id == NULL)) {
+ ha_warning("parsing : " FLT_OT_FMT_TYPE FLT_OT_FMT_NAME "'no filter id set, using default id '%s'\n", FLT_OT_OPT_FILTER_ID_DEFAULT);
+
+ retval = flt_ot_parse_strdup(&(conf->id), FLT_OT_OPT_FILTER_ID_DEFAULT, err, args[*cur_arg]);
+ }
+
+ if (!(retval & ERR_CODE) && (conf->cfg_file == NULL))
+ FLT_OT_PARSE_ERR(err, "'%s' : no configuration file specified", args[*cur_arg]);
+
+ if (retval & ERR_CODE) {
+ flt_ot_conf_free(&conf);
+ } else {
+ fconf->id = ot_flt_id;
+ fconf->ops = &flt_ot_ops;
+ fconf->conf = conf;
+
+ *cur_arg = pos;
+
+ FLT_OT_DBG(3, "filter set: id '%s', config '%s'", conf->id, conf->cfg_file);
+ }
+
+ FLT_OT_RETURN_INT(retval);
+}
+
+
+/* Declare the filter parser for FLT_OT_OPT_NAME keyword. */
+static struct flt_kw_list flt_kws = { FLT_OT_SCOPE, { }, {
+ { FLT_OT_OPT_NAME, flt_ot_parse, NULL },
+ { NULL, NULL, NULL },
+ }
+};
+
+INITCALL1(STG_REGISTER, flt_register_keywords, &flt_kws);
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ *
+ * vi: noexpandtab shiftwidth=8 tabstop=8
+ */
diff --git a/addons/ot/src/pool.c b/addons/ot/src/pool.c
new file mode 100644
index 0000000..fbcdbfc
--- /dev/null
+++ b/addons/ot/src/pool.c
@@ -0,0 +1,223 @@
+/***
+ * Copyright 2020 HAProxy Technologies
+ *
+ * This file is part of the HAProxy OpenTracing filter.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#include "include.h"
+
+
+/***
+ * NAME
+ * flt_ot_pool_alloc -
+ *
+ * ARGUMENTS
+ * pool -
+ * size -
+ * flag_clear -
+ * err -
+ *
+ * DESCRIPTION
+ * -
+ *
+ * RETURN VALUE
+ * -
+ */
+void *flt_ot_pool_alloc(struct pool_head *pool, size_t size, bool flag_clear, char **err)
+{
+ void *retptr;
+
+ FLT_OT_FUNC("%p, %zu, %hhu, %p:%p", pool, size, flag_clear, FLT_OT_DPTR_ARGS(err));
+
+ if (pool != NULL) {
+ retptr = pool_alloc(pool);
+ if (retptr != NULL)
+ FLT_OT_DBG(2, "POOL_ALLOC: %s:%d(%p %zu)", __func__, __LINE__, retptr, FLT_OT_DEREF(pool, size, size));
+ } else {
+ retptr = FLT_OT_MALLOC(size);
+ }
+
+ if (retptr == NULL)
+ FLT_OT_ERR("out of memory");
+ else if (flag_clear)
+ (void)memset(retptr, 0, size);
+
+ FLT_OT_RETURN_PTR(retptr);
+}
+
+
+/***
+ * NAME
+ * flt_ot_pool_strndup -
+ *
+ * ARGUMENTS
+ * pool -
+ * s -
+ * size -
+ * err -
+ *
+ * DESCRIPTION
+ * -
+ *
+ * RETURN VALUE
+ * -
+ */
+void *flt_ot_pool_strndup(struct pool_head *pool, const char *s, size_t size, char **err)
+{
+ void *retptr;
+
+ FLT_OT_FUNC("%p, \"%.*s\", %zu, %p:%p", pool, (int)size, s, size, FLT_OT_DPTR_ARGS(err));
+
+ if (pool != NULL) {
+ retptr = pool_alloc(pool);
+ if (retptr != NULL) {
+ (void)memcpy(retptr, s, MIN(pool->size - 1, size));
+
+ ((uint8_t *)retptr)[MIN(pool->size - 1, size)] = '\0';
+ }
+ } else {
+ retptr = FLT_OT_STRNDUP(s, size);
+ }
+
+ if (retptr != NULL)
+ FLT_OT_DBG(2, "POOL_STRNDUP: %s:%d(%p %zu)", __func__, __LINE__, retptr, FLT_OT_DEREF(pool, size, size));
+ else
+ FLT_OT_ERR("out of memory");
+
+ FLT_OT_RETURN_PTR(retptr);
+}
+
+
+/***
+ * NAME
+ * flt_ot_pool_free -
+ *
+ * ARGUMENTS
+ * pool -
+ * ptr -
+ *
+ * DESCRIPTION
+ * -
+ *
+ * RETURN VALUE
+ * This function does not return a value.
+ */
+void flt_ot_pool_free(struct pool_head *pool, void **ptr)
+{
+ FLT_OT_FUNC("%p, %p:%p", pool, FLT_OT_DPTR_ARGS(ptr));
+
+ if ((ptr == NULL) || (*ptr == NULL))
+ FLT_OT_RETURN();
+
+ FLT_OT_DBG(2, "POOL_FREE: %s:%d(%p %u)", __func__, __LINE__, *ptr, FLT_OT_DEREF(pool, size, 0));
+
+ if (pool != NULL)
+ pool_free(pool, *ptr);
+ else
+ FLT_OT_FREE(*ptr);
+
+ *ptr = NULL;
+
+ FLT_OT_RETURN();
+}
+
+
+/***
+ * NAME
+ * flt_ot_trash_alloc -
+ *
+ * ARGUMENTS
+ * flag_clear -
+ * err -
+ *
+ * DESCRIPTION
+ * -
+ *
+ * RETURN VALUE
+ * -
+ */
+struct buffer *flt_ot_trash_alloc(bool flag_clear, char **err)
+{
+ struct buffer *retptr;
+
+ FLT_OT_FUNC("%hhu, %p:%p", flag_clear, FLT_OT_DPTR_ARGS(err));
+
+#ifdef USE_TRASH_CHUNK
+ retptr = alloc_trash_chunk();
+ if (retptr != NULL)
+ FLT_OT_DBG(2, "TRASH_ALLOC: %s:%d(%p %zu)", __func__, __LINE__, retptr, retptr->size);
+#else
+ retptr = FLT_OT_MALLOC(sizeof(*retptr));
+ if (retptr != NULL) {
+ chunk_init(retptr, FLT_OT_MALLOC(global.tune.bufsize), global.tune.bufsize);
+ if (retptr->area == NULL)
+ FLT_OT_FREE_CLEAR(retptr);
+ else
+ *(retptr->area) = '\0';
+ }
+#endif
+
+ if (retptr == NULL)
+ FLT_OT_ERR("out of memory");
+ else if (flag_clear)
+ (void)memset(retptr->area, 0, retptr->size);
+
+ FLT_OT_RETURN_PTR(retptr);
+}
+
+
+/***
+ * NAME
+ * flt_ot_trash_free -
+ *
+ * ARGUMENTS
+ * ptr -
+ *
+ * DESCRIPTION
+ * -
+ *
+ * RETURN VALUE
+ * This function does not return a value.
+ */
+void flt_ot_trash_free(struct buffer **ptr)
+{
+ FLT_OT_FUNC("%p:%p", FLT_OT_DPTR_ARGS(ptr));
+
+ if ((ptr == NULL) || (*ptr == NULL))
+ FLT_OT_RETURN();
+
+ FLT_OT_DBG(2, "TRASH_FREE: %s:%d(%p %zu)", __func__, __LINE__, *ptr, (*ptr)->size);
+
+#ifdef USE_TRASH_CHUNK
+ free_trash_chunk(*ptr);
+#else
+ FLT_OT_FREE((*ptr)->area);
+ FLT_OT_FREE(*ptr);
+#endif
+
+ *ptr = NULL;
+
+ FLT_OT_RETURN();
+}
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ *
+ * vi: noexpandtab shiftwidth=8 tabstop=8
+ */
diff --git a/addons/ot/src/scope.c b/addons/ot/src/scope.c
new file mode 100644
index 0000000..efe8fe2
--- /dev/null
+++ b/addons/ot/src/scope.c
@@ -0,0 +1,634 @@
+/***
+ * Copyright 2020 HAProxy Technologies
+ *
+ * This file is part of the HAProxy OpenTracing filter.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#include "include.h"
+
+
+static struct pool_head *pool_head_ot_scope_span __read_mostly = NULL;
+static struct pool_head *pool_head_ot_scope_context __read_mostly = NULL;
+static struct pool_head *pool_head_ot_runtime_context __read_mostly = NULL;
+
+#ifdef USE_POOL_OT_SCOPE_SPAN
+REGISTER_POOL(&pool_head_ot_scope_span, "ot_scope_span", sizeof(struct flt_ot_scope_span));
+#endif
+#ifdef USE_POOL_OT_SCOPE_CONTEXT
+REGISTER_POOL(&pool_head_ot_scope_context, "ot_scope_context", sizeof(struct flt_ot_scope_context));
+#endif
+#ifdef USE_POOL_OT_RUNTIME_CONTEXT
+REGISTER_POOL(&pool_head_ot_runtime_context, "ot_runtime_context", sizeof(struct flt_ot_runtime_context));
+#endif
+
+
+#ifdef DEBUG_OT
+
+/***
+ * NAME
+ * flt_ot_pools_info -
+ *
+ * ARGUMENTS
+ * This function takes no arguments.
+ *
+ * DESCRIPTION
+ * -
+ *
+ * RETURN VALUE
+ * This function does not return a value.
+ */
+void flt_ot_pools_info(void)
+{
+ /*
+ * In case we have some error in the configuration file,
+ * it is possible that this pool was not initialized.
+ */
+#ifdef USE_POOL_BUFFER
+ FLT_OT_DBG(2, "sizeof_pool(buffer) = %u", FLT_OT_DEREF(pool_head_buffer, size, 0));
+#endif
+#ifdef USE_TRASH_CHUNK
+ FLT_OT_DBG(2, "sizeof_pool(trash) = %u", FLT_OT_DEREF(pool_head_trash, size, 0));
+#endif
+
+#ifdef USE_POOL_OT_SCOPE_SPAN
+ FLT_OT_DBG(2, "sizeof_pool(ot_scope_span) = %u", pool_head_ot_scope_span->size);
+#endif
+#ifdef USE_POOL_OT_SCOPE_CONTEXT
+ FLT_OT_DBG(2, "sizeof_pool(ot_scope_context) = %u", pool_head_ot_scope_context->size);
+#endif
+#ifdef USE_POOL_OT_RUNTIME_CONTEXT
+ FLT_OT_DBG(2, "sizeof_pool(ot_runtime_context) = %u", pool_head_ot_runtime_context->size);
+#endif
+}
+
+#endif /* DEBUG_OT */
+
+
+/***
+ * NAME
+ * flt_ot_runtime_context_init -
+ *
+ * ARGUMENTS
+ * s -
+ * f -
+ * err -
+ *
+ * DESCRIPTION
+ * -
+ *
+ * RETURN VALUE
+ * -
+ */
+struct flt_ot_runtime_context *flt_ot_runtime_context_init(struct stream *s, struct filter *f, char **err)
+{
+ const struct flt_ot_conf *conf = FLT_OT_CONF(f);
+ struct buffer uuid;
+ struct flt_ot_runtime_context *retptr = NULL;
+
+ FLT_OT_FUNC("%p, %p, %p:%p", s, f, FLT_OT_DPTR_ARGS(err));
+
+ retptr = flt_ot_pool_alloc(pool_head_ot_runtime_context, sizeof(*retptr), 1, err);
+ if (retptr == NULL)
+ FLT_OT_RETURN_PTR(retptr);
+
+ retptr->stream = s;
+ retptr->filter = f;
+ retptr->flag_harderr = conf->tracer->flag_harderr;
+ retptr->flag_disabled = conf->tracer->flag_disabled;
+ retptr->logging = conf->tracer->logging;
+ LIST_INIT(&(retptr->spans));
+ LIST_INIT(&(retptr->contexts));
+
+ uuid = b_make(retptr->uuid, sizeof(retptr->uuid), 0, 0);
+ ha_generate_uuid(&uuid);
+
+#ifdef USE_OT_VARS
+ /*
+ * The HAProxy variable 'sess.ot.uuid' is registered here,
+ * after which its value is set to runtime context UUID.
+ */
+ if (flt_ot_var_register(FLT_OT_VAR_UUID, err) != -1)
+ (void)flt_ot_var_set(s, FLT_OT_VAR_UUID, retptr->uuid, SMP_OPT_DIR_REQ, err);
+#endif
+
+ FLT_OT_DBG_RUNTIME_CONTEXT("session context: ", retptr);
+
+ FLT_OT_RETURN_PTR(retptr);
+}
+
+
+/***
+ * NAME
+ * flt_ot_runtime_context_free -
+ *
+ * ARGUMENTS
+ * f -
+ *
+ * DESCRIPTION
+ * -
+ *
+ * RETURN VALUE
+ * This function does not return a value.
+ */
+void flt_ot_runtime_context_free(struct filter *f)
+{
+ struct flt_ot_runtime_context *rt_ctx = f->ctx;
+
+ FLT_OT_FUNC("%p", f);
+
+ if (rt_ctx == NULL)
+ FLT_OT_RETURN();
+
+ FLT_OT_DBG_RUNTIME_CONTEXT("session context: ", rt_ctx);
+
+ if (!LIST_ISEMPTY(&(rt_ctx->spans))) {
+ struct timespec ts;
+ struct flt_ot_scope_span *span, *span_back;
+
+ /* All spans should be completed at the same time. */
+ (void)clock_gettime(CLOCK_MONOTONIC, &ts);
+
+ list_for_each_entry_safe(span, span_back, &(rt_ctx->spans), list) {
+ ot_span_finish(&(span->span), &ts, NULL, NULL, NULL);
+ flt_ot_scope_span_free(&span);
+ }
+ }
+
+ if (!LIST_ISEMPTY(&(rt_ctx->contexts))) {
+ struct flt_ot_scope_context *ctx, *ctx_back;
+
+ list_for_each_entry_safe(ctx, ctx_back, &(rt_ctx->contexts), list)
+ flt_ot_scope_context_free(&ctx);
+ }
+
+ flt_ot_pool_free(pool_head_ot_runtime_context, &(f->ctx));
+
+ FLT_OT_RETURN();
+}
+
+
+/***
+ * NAME
+ * flt_ot_scope_span_init -
+ *
+ * ARGUMENTS
+ * rt_ctx -
+ * id -
+ * id_len -
+ * ref_type -
+ * ref_id -
+ * ref_id_len -
+ * dir -
+ * err -
+ *
+ * DESCRIPTION
+ * -
+ *
+ * RETURN VALUE
+ * -
+ */
+struct flt_ot_scope_span *flt_ot_scope_span_init(struct flt_ot_runtime_context *rt_ctx, const char *id, size_t id_len, otc_span_reference_type_t ref_type, const char *ref_id, size_t ref_id_len, uint dir, char **err)
+{
+ struct otc_span *ref_span = NULL;
+ struct otc_span_context *ref_ctx = NULL;
+ struct flt_ot_scope_span *span, *retptr = NULL;
+ struct flt_ot_scope_context *ctx;
+
+ FLT_OT_FUNC("%p, \"%s\", %zu, %d, \"%s\", %zu, %u, %p:%p", rt_ctx, id, id_len, ref_type, ref_id, ref_id_len, dir, FLT_OT_DPTR_ARGS(err));
+
+ if ((rt_ctx == NULL) || (id == NULL))
+ FLT_OT_RETURN_PTR(retptr);
+
+ list_for_each_entry(span, &(rt_ctx->spans), list)
+ if ((span->id_len == id_len) && (memcmp(span->id, id, id_len) == 0)) {
+ FLT_OT_DBG(2, "found span %p", span);
+
+ FLT_OT_RETURN_PTR(span);
+ }
+
+ if (ref_id != NULL) {
+ list_for_each_entry(span, &(rt_ctx->spans), list)
+ if ((span->id_len == ref_id_len) && (memcmp(span->id, ref_id, ref_id_len) == 0)) {
+ ref_span = span->span;
+
+ break;
+ }
+
+ if (ref_span != NULL) {
+ FLT_OT_DBG(2, "found referenced span %p", span);
+ } else {
+ list_for_each_entry(ctx, &(rt_ctx->contexts), list)
+ if ((ctx->id_len == ref_id_len) && (memcmp(ctx->id, ref_id, ref_id_len) == 0)) {
+ ref_ctx = ctx->context;
+
+ break;
+ }
+
+ if (ref_ctx != NULL) {
+ FLT_OT_DBG(2, "found referenced context %p", ctx);
+ } else {
+ FLT_OT_ERR("cannot find referenced span/context '%s'", ref_id);
+
+ FLT_OT_RETURN_PTR(retptr);
+ }
+ }
+ }
+
+ retptr = flt_ot_pool_alloc(pool_head_ot_scope_span, sizeof(*retptr), 1, err);
+ if (retptr == NULL)
+ FLT_OT_RETURN_PTR(retptr);
+
+ retptr->id = id;
+ retptr->id_len = id_len;
+ retptr->smp_opt_dir = dir;
+ retptr->ref_type = ref_type;
+ retptr->ref_span = ref_span;
+ retptr->ref_ctx = ref_ctx;
+ LIST_INSERT(&(rt_ctx->spans), &(retptr->list));
+
+ FLT_OT_DBG_SCOPE_SPAN("new span ", retptr);
+
+ FLT_OT_RETURN_PTR(retptr);
+}
+
+
+/***
+ * NAME
+ * flt_ot_scope_span_free -
+ *
+ * ARGUMENTS
+ * ptr -
+ *
+ * DESCRIPTION
+ * -
+ *
+ * RETURN VALUE
+ * This function does not return a value.
+ */
+void flt_ot_scope_span_free(struct flt_ot_scope_span **ptr)
+{
+ FLT_OT_FUNC("%p:%p", FLT_OT_DPTR_ARGS(ptr));
+
+ if ((ptr == NULL) || (*ptr == NULL))
+ FLT_OT_RETURN();
+
+ FLT_OT_DBG_SCOPE_SPAN("", *ptr);
+
+ /* If the span is still active, do nothing. */
+ if ((*ptr)->span != NULL) {
+ FLT_OT_DBG(2, "cannot finish active span");
+
+ FLT_OT_RETURN();
+ }
+
+ FLT_OT_LIST_DEL(&((*ptr)->list));
+ flt_ot_pool_free(pool_head_ot_scope_span, (void **)ptr);
+
+ FLT_OT_RETURN();
+}
+
+
+/***
+ * NAME
+ * flt_ot_scope_context_init -
+ *
+ * ARGUMENTS
+ * rt_ctx -
+ * tracer -
+ * id -
+ * id_len -
+ * text_map -
+ * dir -
+ * err -
+ *
+ * DESCRIPTION
+ * -
+ *
+ * RETURN VALUE
+ * -
+ */
+struct flt_ot_scope_context *flt_ot_scope_context_init(struct flt_ot_runtime_context *rt_ctx, struct otc_tracer *tracer, const char *id, size_t id_len, const struct otc_text_map *text_map, uint dir, char **err)
+{
+ struct otc_http_headers_reader reader;
+ struct otc_span_context *span_ctx;
+ struct flt_ot_scope_context *retptr = NULL;
+
+ FLT_OT_FUNC("%p, %p, \"%s\", %zu, %p, %u, %p:%p", rt_ctx, tracer, id, id_len, text_map, dir, FLT_OT_DPTR_ARGS(err));
+
+ if ((rt_ctx == NULL) || (tracer == NULL) || (id == NULL) || (text_map == NULL))
+ FLT_OT_RETURN_PTR(retptr);
+
+ list_for_each_entry(retptr, &(rt_ctx->contexts), list)
+ if ((retptr->id_len == id_len) && (memcmp(retptr->id, id, id_len) == 0)) {
+ FLT_OT_DBG(2, "found context %p", retptr);
+
+ FLT_OT_RETURN_PTR(retptr);
+ }
+
+ retptr = flt_ot_pool_alloc(pool_head_ot_scope_context, sizeof(*retptr), 1, err);
+ if (retptr == NULL)
+ FLT_OT_RETURN_PTR(retptr);
+
+ span_ctx = ot_extract_http_headers(tracer, &reader, text_map, err);
+ if (span_ctx == NULL) {
+ flt_ot_scope_context_free(&retptr);
+
+ FLT_OT_RETURN_PTR(retptr);
+ }
+
+ retptr->id = id;
+ retptr->id_len = id_len;
+ retptr->smp_opt_dir = dir;
+ retptr->context = span_ctx;
+ LIST_INSERT(&(rt_ctx->contexts), &(retptr->list));
+
+ FLT_OT_DBG_SCOPE_CONTEXT("new context ", retptr);
+
+ FLT_OT_RETURN_PTR(retptr);
+}
+
+
+/***
+ * NAME
+ * flt_ot_scope_context_free -
+ *
+ * ARGUMENTS
+ * ptr -
+ *
+ * DESCRIPTION
+ * -
+ *
+ * RETURN VALUE
+ * This function does not return a value.
+ */
+void flt_ot_scope_context_free(struct flt_ot_scope_context **ptr)
+{
+ FLT_OT_FUNC("%p:%p", FLT_OT_DPTR_ARGS(ptr));
+
+ if ((ptr == NULL) || (*ptr == NULL))
+ FLT_OT_RETURN();
+
+ FLT_OT_DBG_SCOPE_CONTEXT("", *ptr);
+
+ if ((*ptr)->context != NULL)
+ (*ptr)->context->destroy(&((*ptr)->context));
+
+ FLT_OT_LIST_DEL(&((*ptr)->list));
+ flt_ot_pool_free(pool_head_ot_scope_context, (void **)ptr);
+
+ FLT_OT_RETURN();
+}
+
+
+/***
+ * NAME
+ * flt_ot_scope_data_free -
+ *
+ * ARGUMENTS
+ * ptr -
+ *
+ * DESCRIPTION
+ * -
+ *
+ * RETURN VALUE
+ * This function does not return a value.
+ */
+void flt_ot_scope_data_free(struct flt_ot_scope_data *ptr)
+{
+ int i;
+
+ FLT_OT_FUNC("%p", ptr);
+
+ if (ptr == NULL)
+ FLT_OT_RETURN();
+
+ FLT_OT_DBG_SCOPE_DATA("", ptr);
+
+ for (i = 0; i < ptr->num_tags; i++)
+ if (ptr->tags[i].value.type == otc_value_string)
+ FLT_OT_FREE_VOID(ptr->tags[i].value.value.string_value);
+ otc_text_map_destroy(&(ptr->baggage), OTC_TEXT_MAP_FREE_VALUE);
+ for (i = 0; i < ptr->num_log_fields; i++)
+ if (ptr->log_fields[i].value.type == otc_value_string)
+ FLT_OT_FREE_VOID(ptr->log_fields[i].value.value.string_value);
+
+ (void)memset(ptr, 0, sizeof(*ptr));
+
+ FLT_OT_RETURN();
+}
+
+
+/***
+ * NAME
+ * flt_ot_scope_finish_mark -
+ *
+ * ARGUMENTS
+ * rt_ctx -
+ * id -
+ * id_len -
+ *
+ * DESCRIPTION
+ * -
+ *
+ * RETURN VALUE
+ * -
+ */
+int flt_ot_scope_finish_mark(const struct flt_ot_runtime_context *rt_ctx, const char *id, size_t id_len)
+{
+ struct flt_ot_scope_span *span;
+ struct flt_ot_scope_context *ctx;
+ int span_cnt = 0, ctx_cnt = 0, retval;
+
+ FLT_OT_FUNC("%p, \"%s\", %zu", rt_ctx, id, id_len);
+
+ if (FLT_OT_STR_CMP(FLT_OT_SCOPE_SPAN_FINISH_ALL, id, id_len)) {
+ list_for_each_entry(span, &(rt_ctx->spans), list) {
+ span->flag_finish = 1;
+ span_cnt++;
+ }
+
+ list_for_each_entry(ctx, &(rt_ctx->contexts), list) {
+ ctx->flag_finish = 1;
+ ctx_cnt++;
+ }
+
+ FLT_OT_DBG(2, "marked %d span(s), %d context(s)", span_cnt, ctx_cnt);
+ }
+ else if (FLT_OT_STR_CMP(FLT_OT_SCOPE_SPAN_FINISH_REQ, id, id_len)) {
+ list_for_each_entry(span, &(rt_ctx->spans), list)
+ if (span->smp_opt_dir == SMP_OPT_DIR_REQ) {
+ span->flag_finish = 1;
+ span_cnt++;
+ }
+
+ list_for_each_entry(ctx, &(rt_ctx->contexts), list)
+ if (ctx->smp_opt_dir == SMP_OPT_DIR_REQ) {
+ ctx->flag_finish = 1;
+ span_cnt++;
+ }
+
+ FLT_OT_DBG(2, "marked REQuest channel %d span(s), %d context(s)", span_cnt, ctx_cnt);
+ }
+ else if (FLT_OT_STR_CMP(FLT_OT_SCOPE_SPAN_FINISH_RES, id, id_len)) {
+ list_for_each_entry(span, &(rt_ctx->spans), list)
+ if (span->smp_opt_dir == SMP_OPT_DIR_RES) {
+ span->flag_finish = 1;
+ span_cnt++;
+ }
+
+ list_for_each_entry(ctx, &(rt_ctx->contexts), list)
+ if (ctx->smp_opt_dir == SMP_OPT_DIR_RES) {
+ ctx->flag_finish = 1;
+ ctx_cnt++;
+ }
+
+ FLT_OT_DBG(2, "marked RESponse channel %d span(s), %d context(s)", span_cnt, ctx_cnt);
+ }
+ else {
+ list_for_each_entry(span, &(rt_ctx->spans), list)
+ if ((span->id_len == id_len) && (memcmp(span->id, id, id_len) == 0)) {
+ span->flag_finish = 1;
+ span_cnt++;
+
+ break;
+ }
+
+ list_for_each_entry(ctx, &(rt_ctx->contexts), list)
+ if ((ctx->id_len == id_len) && (memcmp(ctx->id, id, id_len) == 0)) {
+ ctx->flag_finish = 1;
+ ctx_cnt++;
+
+ break;
+ }
+
+ if (span_cnt > 0)
+ FLT_OT_DBG(2, "marked span '%s'", id);
+ if (ctx_cnt > 0)
+ FLT_OT_DBG(2, "marked context '%s'", id);
+ if ((span_cnt + ctx_cnt) == 0)
+ FLT_OT_DBG(2, "cannot find span/context '%s'", id);
+ }
+
+ retval = span_cnt + ctx_cnt;
+
+ FLT_OT_RETURN_INT(retval);
+}
+
+
+/***
+ * NAME
+ * flt_ot_scope_finish_marked -
+ *
+ * ARGUMENTS
+ * rt_ctx -
+ * ts_finish -
+ *
+ * DESCRIPTION
+ * Finish marked spans.
+ *
+ * RETURN VALUE
+ * This function does not return a value.
+ */
+void flt_ot_scope_finish_marked(const struct flt_ot_runtime_context *rt_ctx, const struct timespec *ts_finish)
+{
+ struct flt_ot_scope_span *span;
+ struct flt_ot_scope_context *ctx;
+
+ FLT_OT_FUNC("%p, %p", rt_ctx, ts_finish);
+
+ list_for_each_entry(span, &(rt_ctx->spans), list)
+ if (span->flag_finish) {
+ FLT_OT_DBG_SCOPE_SPAN("finishing span ", span);
+
+ ot_span_finish(&(span->span), ts_finish, NULL, NULL, NULL);
+
+ span->flag_finish = 0;
+ }
+
+ list_for_each_entry(ctx, &(rt_ctx->contexts), list)
+ if (ctx->flag_finish) {
+ FLT_OT_DBG_SCOPE_CONTEXT("finishing context ", ctx);
+
+ if (ctx->context != NULL)
+ ctx->context->destroy(&(ctx->context));
+
+ ctx->flag_finish = 0;
+ }
+
+ FLT_OT_RETURN();
+}
+
+
+/***
+ * NAME
+ * flt_ot_scope_free_unused -
+ *
+ * ARGUMENTS
+ * rt_ctx -
+ * chn -
+ *
+ * DESCRIPTION
+ * -
+ *
+ * RETURN VALUE
+ * This function does not return a value.
+ */
+void flt_ot_scope_free_unused(struct flt_ot_runtime_context *rt_ctx, struct channel *chn)
+{
+ FLT_OT_FUNC("%p", rt_ctx);
+
+ if (rt_ctx == NULL)
+ FLT_OT_RETURN();
+
+ if (!LIST_ISEMPTY(&(rt_ctx->spans))) {
+ struct flt_ot_scope_span *span, *span_back;
+
+ list_for_each_entry_safe(span, span_back, &(rt_ctx->spans), list)
+ if (span->span == NULL)
+ flt_ot_scope_span_free(&span);
+ }
+
+ if (!LIST_ISEMPTY(&(rt_ctx->contexts))) {
+ struct flt_ot_scope_context *ctx, *ctx_back;
+
+ list_for_each_entry_safe(ctx, ctx_back, &(rt_ctx->contexts), list)
+ if (ctx->context == NULL) {
+ /*
+ * All headers and variables associated with
+ * the context in question should be deleted.
+ */
+ (void)flt_ot_http_headers_remove(chn, ctx->id, NULL);
+#ifdef USE_OT_VARS
+ (void)flt_ot_vars_unset(rt_ctx->stream, FLT_OT_VARS_SCOPE, ctx->id, ctx->smp_opt_dir, NULL);
+#endif
+
+ flt_ot_scope_context_free(&ctx);
+ }
+ }
+
+ FLT_OT_DBG_RUNTIME_CONTEXT("session context: ", rt_ctx);
+
+ FLT_OT_RETURN();
+}
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ *
+ * vi: noexpandtab shiftwidth=8 tabstop=8
+ */
diff --git a/addons/ot/src/util.c b/addons/ot/src/util.c
new file mode 100644
index 0000000..fd04016
--- /dev/null
+++ b/addons/ot/src/util.c
@@ -0,0 +1,815 @@
+/***
+ * Copyright 2020 HAProxy Technologies
+ *
+ * This file is part of the HAProxy OpenTracing filter.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#include "include.h"
+
+
+#ifdef DEBUG_OT
+
+/***
+ * NAME
+ * flt_ot_args_dump -
+ *
+ * ARGUMENTS
+ * args -
+ *
+ * DESCRIPTION
+ * -
+ *
+ * RETURN VALUE
+ * This function does not return a value.
+ */
+void flt_ot_args_dump(char **args)
+{
+ int i, argc;
+
+ argc = flt_ot_args_count(args);
+
+ (void)fprintf(stderr, FLT_OT_DBG_FMT("%.*sargs[%d]: { '%s' "), dbg_indent_level, FLT_OT_DBG_INDENT, argc, args[0]);
+
+ for (i = 1; i < argc; i++)
+ (void)fprintf(stderr, "'%s' ", args[i]);
+
+ (void)fprintf(stderr, "}\n");
+}
+
+
+/***
+ * NAME
+ * flt_ot_filters_dump -
+ *
+ * ARGUMENTS
+ * This function takes no arguments.
+ *
+ * DESCRIPTION
+ * -
+ *
+ * RETURN VALUE
+ * This function does not return a value.
+ */
+void flt_ot_filters_dump(void)
+{
+ struct flt_conf *fconf;
+ struct proxy *px;
+
+ FLT_OT_FUNC("");
+
+ for (px = proxies_list; px != NULL; px = px->next) {
+ FLT_OT_DBG(2, "proxy '%s'", px->id);
+
+ list_for_each_entry(fconf, &(px->filter_configs), list)
+ if (fconf->id == ot_flt_id) {
+ struct flt_ot_conf *conf = fconf->conf;
+
+ FLT_OT_DBG(2, " OT filter '%s'", conf->id);
+ }
+ }
+
+ FLT_OT_RETURN();
+}
+
+
+/***
+ * NAME
+ * flt_ot_chn_label -
+ *
+ * ARGUMENTS
+ * chn -
+ *
+ * DESCRIPTION
+ * -
+ *
+ * RETURN VALUE
+ * -
+ */
+const char *flt_ot_chn_label(const struct channel *chn)
+{
+ return (chn->flags & CF_ISRESP) ? "RESponse" : "REQuest";
+}
+
+
+/***
+ * NAME
+ * flt_ot_pr_mode -
+ *
+ * ARGUMENTS
+ * s -
+ *
+ * DESCRIPTION
+ * -
+ *
+ * RETURN VALUE
+ * -
+ */
+const char *flt_ot_pr_mode(const struct stream *s)
+{
+ struct proxy *px = (s->flags & SF_BE_ASSIGNED) ? s->be : strm_fe(s);
+
+ return (px->mode == PR_MODE_HTTP) ? "HTTP" : "TCP";
+}
+
+
+/***
+ * NAME
+ * flt_ot_stream_pos -
+ *
+ * ARGUMENTS
+ * s -
+ *
+ * DESCRIPTION
+ * -
+ *
+ * RETURN VALUE
+ * -
+ */
+const char *flt_ot_stream_pos(const struct stream *s)
+{
+ return (s->flags & SF_BE_ASSIGNED) ? "backend" : "frontend";
+}
+
+
+/***
+ * NAME
+ * flt_ot_type -
+ *
+ * ARGUMENTS
+ * f -
+ *
+ * DESCRIPTION
+ * -
+ *
+ * RETURN VALUE
+ * -
+ */
+const char *flt_ot_type(const struct filter *f)
+{
+ return (f->flags & FLT_FL_IS_BACKEND_FILTER) ? "backend" : "frontend";
+}
+
+
+/***
+ * NAME
+ * flt_ot_analyzer -
+ *
+ * ARGUMENTS
+ * an_bit -
+ *
+ * DESCRIPTION
+ * -
+ *
+ * RETURN VALUE
+ * -
+ */
+const char *flt_ot_analyzer(uint an_bit)
+{
+#define FLT_OT_AN_DEF(a) { a, #a },
+ static const struct {
+ uint an_bit;
+ const char *str;
+ } flt_ot_an[] = { FLT_OT_AN_DEFINES };
+#undef FLT_OT_AN_DEF
+ const char *retptr = "invalid an_bit";
+ int i;
+
+ for (i = 0; i < FLT_OT_TABLESIZE(flt_ot_an); i++)
+ if (flt_ot_an[i].an_bit == an_bit) {
+ retptr = flt_ot_an[i].str;
+
+ break;
+ }
+
+ return retptr;
+}
+
+
+/***
+ * NAME
+ * flt_ot_str_hex -
+ *
+ * ARGUMENTS
+ * data -
+ * size -
+ *
+ * DESCRIPTION
+ * -
+ *
+ * RETURN VALUE
+ * -
+ */
+const char *flt_ot_str_hex(const void *data, size_t size)
+{
+ static THREAD_LOCAL char retbuf[BUFSIZ];
+ const uint8_t *ptr = data;
+ size_t i;
+
+ if (data == NULL)
+ return "(null)";
+ else if (size == 0)
+ return "()";
+
+ for (i = 0, size <<= 1; (i < (sizeof(retbuf) - 2)) && (i < size); ptr++) {
+ retbuf[i++] = FLT_OT_NIBBLE_TO_HEX(*ptr >> 4);
+ retbuf[i++] = FLT_OT_NIBBLE_TO_HEX(*ptr & 0x0f);
+ }
+
+ retbuf[i] = '\0';
+
+ return retbuf;
+}
+
+
+/***
+ * NAME
+ * flt_ot_str_ctrl -
+ *
+ * ARGUMENTS
+ * data -
+ * size -
+ *
+ * DESCRIPTION
+ * -
+ *
+ * RETURN VALUE
+ * -
+ */
+const char *flt_ot_str_ctrl(const void *data, size_t size)
+{
+ static THREAD_LOCAL char retbuf[BUFSIZ];
+ const uint8_t *ptr = data;
+ size_t i, n = 0;
+
+ if (data == NULL)
+ return "(null)";
+ else if (size == 0)
+ return "()";
+
+ for (i = 0; (n < (sizeof(retbuf) - 1)) && (i < size); i++)
+ retbuf[n++] = ((ptr[i] >= 0x20) && (ptr[i] <= 0x7e)) ? ptr[i] : '.';
+
+ retbuf[n] = '\0';
+
+ return retbuf;
+}
+
+
+/***
+ * NAME
+ * flt_ot_list_debug -
+ *
+ * ARGUMENTS
+ * head -
+ *
+ * DESCRIPTION
+ * -
+ *
+ * RETURN VALUE
+ * -
+ */
+const char *flt_ot_list_debug(const struct list *head)
+{
+ FLT_OT_BUFFER_THR(retbuf, 4, 64, retptr);
+
+ if ((head == NULL) || LIST_ISEMPTY(head)) {
+ (void)strncpy(retptr, (head == NULL) ? "{ null list }" : "{ empty list }", sizeof(retbuf[0]));
+ }
+ else if (head->p == head->n) {
+ (void)snprintf(retptr, sizeof(retbuf[0]), "{ %p * 1 }", head->p);
+ }
+ else {
+ const struct list *ptr;
+ size_t count = 0;
+
+ for (ptr = head->n; ptr != head; ptr = ptr->n, count++);
+
+ (void)snprintf(retptr, sizeof(retbuf[0]), "{ %p %p %zu }", head->p, head->n, count);
+ }
+
+ return (retptr);
+}
+
+#endif /* DEBUG_OT */
+
+
+/***
+ * NAME
+ * flt_ot_chunk_add -
+ *
+ * ARGUMENTS
+ * chk -
+ * src -
+ * n -
+ * err -
+ *
+ * DESCRIPTION
+ * -
+ *
+ * RETURN VALUE
+ * -
+ */
+ssize_t flt_ot_chunk_add(struct buffer *chk, const void *src, size_t n, char **err)
+{
+ FLT_OT_FUNC("%p, %p, %zu, %p:%p", chk, src, n, FLT_OT_DPTR_ARGS(err));
+
+ if ((chk == NULL) || (src == NULL))
+ FLT_OT_RETURN_EX(-1, ssize_t, "%ld");
+
+ if (chk->area == NULL)
+ chunk_init(chk, FLT_OT_CALLOC(1, global.tune.bufsize), global.tune.bufsize);
+
+ if (chk->area == NULL) {
+ FLT_OT_ERR("out of memory");
+
+ FLT_OT_RETURN_EX(-1, ssize_t, "%ld");
+ }
+ else if (n > (chk->size - chk->data)) {
+ FLT_OT_ERR("chunk size too small");
+
+ FLT_OT_RETURN_EX(-1, ssize_t, "%ld");
+ }
+
+ (void)memcpy(chk->area + chk->data, src, n);
+ chk->data += n;
+
+ FLT_OT_RETURN_EX(chk->data, ssize_t, "%ld");
+}
+
+
+/***
+ * NAME
+ * flt_ot_args_count -
+ *
+ * ARGUMENTS
+ * args -
+ *
+ * DESCRIPTION
+ * -
+ *
+ * RETURN VALUE
+ * -
+ */
+int flt_ot_args_count(char **args)
+{
+ int i, retval = 0;
+
+ if (args == NULL)
+ return retval;
+
+ /*
+ * It is possible that some arguments within the configuration line
+ * are not specified; that is, they are set to a blank string.
+ *
+ * For example:
+ * keyword '' arg_2
+ *
+ * In that case the content of the args field will be like this:
+ * args[0]: 'keyword'
+ * args[1]: NULL pointer
+ * args[2]: 'arg_2'
+ * args[3 .. MAX_LINE_ARGS): NULL pointers
+ *
+ * The total number of arguments is the index of the last argument
+ * (increased by 1) that is not a NULL pointer.
+ */
+ for (i = 0; i < MAX_LINE_ARGS; i++)
+ if (FLT_OT_ARG_ISVALID(i))
+ retval = i + 1;
+
+ return retval;
+}
+
+
+/***
+ * NAME
+ * flt_ot_args_to_str -
+ *
+ * ARGUMENTS
+ * args -
+ * idx -
+ * str -
+ *
+ * DESCRIPTION
+ * -
+ *
+ * RETURN VALUE
+ * This function does not return a value.
+ */
+void flt_ot_args_to_str(char **args, int idx, char **str)
+{
+ int i, argc;
+
+ if ((args == NULL) || (*args == NULL))
+ return;
+
+ argc = flt_ot_args_count(args);
+
+ for (i = idx; i < argc; i++)
+ (void)memprintf(str, "%s%s%s", (*str == NULL) ? "" : *str, (i == idx) ? "" : " ", (args[i] == NULL) ? "" : args[i]);
+}
+
+
+/***
+ * NAME
+ * flt_ot_strtod -
+ *
+ * ARGUMENTS
+ * nptr -
+ * limit_min -
+ * limit_max -
+ * err -
+ *
+ * DESCRIPTION
+ * -
+ *
+ * RETURN VALUE
+ * -
+ */
+double flt_ot_strtod(const char *nptr, double limit_min, double limit_max, char **err)
+{
+ char *endptr = NULL;
+ double retval;
+
+ errno = 0;
+
+ retval = strtod(nptr, &endptr);
+ if ((errno != 0) || FLT_OT_STR_ISVALID(endptr))
+ FLT_OT_ERR("'%s' : invalid value", nptr);
+ else if (!FLT_OT_IN_RANGE(retval, limit_min, limit_max))
+ FLT_OT_ERR("'%s' : value out of range [%.2f, %.2f]", nptr, limit_min, limit_max);
+
+ return retval;
+}
+
+
+/***
+ * NAME
+ * flt_ot_strtoll -
+ *
+ * ARGUMENTS
+ * nptr -
+ * limit_min -
+ * limit_max -
+ * err -
+ *
+ * DESCRIPTION
+ * -
+ *
+ * RETURN VALUE
+ * -
+ */
+int64_t flt_ot_strtoll(const char *nptr, int64_t limit_min, int64_t limit_max, char **err)
+{
+ char *endptr = NULL;
+ int64_t retval;
+
+ errno = 0;
+
+ retval = strtoll(nptr, &endptr, 0);
+ if ((errno != 0) || FLT_OT_STR_ISVALID(endptr))
+ FLT_OT_ERR("'%s' : invalid value", nptr);
+ else if (!FLT_OT_IN_RANGE(retval, limit_min, limit_max))
+ FLT_OT_ERR("'%s' : value out of range [%" PRId64 ", %" PRId64 "]", nptr, limit_min, limit_max);
+
+ return retval;
+}
+
+
+/***
+ * NAME
+ * flt_ot_sample_to_str -
+ *
+ * ARGUMENTS
+ * data -
+ * value -
+ * size -
+ * err -
+ *
+ * DESCRIPTION
+ * -
+ *
+ * RETURN VALUE
+ * -
+ */
+int flt_ot_sample_to_str(const struct sample_data *data, char *value, size_t size, char **err)
+{
+ int retval = -1;
+
+ FLT_OT_FUNC("%p, %p, %zu, %p:%p", data, value, size, FLT_OT_DPTR_ARGS(err));
+
+ if ((data == NULL) || (value == NULL) || (size == 0))
+ FLT_OT_RETURN_INT(retval);
+
+ *value = '\0';
+
+ if (data->type == SMP_T_ANY) {
+ FLT_OT_ERR("invalid sample data type %d", data->type);
+ }
+ else if (data->type == SMP_T_BOOL) {
+ value[0] = data->u.sint ? '1' : '0';
+ value[1] = '\0';
+
+ retval = 1;
+ }
+ else if (data->type == SMP_T_SINT) {
+ retval = snprintf(value, size, "%lld", data->u.sint);
+ }
+ else if (data->type == SMP_T_ADDR) {
+ /* This type is never used to qualify a sample. */
+ }
+ else if (data->type == SMP_T_IPV4) {
+ if (INET_ADDRSTRLEN > size)
+ FLT_OT_ERR("sample data size too large");
+ else if (inet_ntop(AF_INET, &(data->u.ipv4), value, INET_ADDRSTRLEN) == NULL)
+ FLT_OT_ERR("invalid IPv4 address");
+ else
+ retval = strlen(value);
+ }
+ else if (data->type == SMP_T_IPV6) {
+ if (INET6_ADDRSTRLEN > size)
+ FLT_OT_ERR("sample data size too large");
+ else if (inet_ntop(AF_INET6, &(data->u.ipv6), value, INET6_ADDRSTRLEN) == NULL)
+ FLT_OT_ERR("invalid IPv6 address");
+ else
+ retval = strlen(value);
+ }
+ else if (data->type == SMP_T_STR) {
+ if (data->u.str.data >= size) {
+ FLT_OT_ERR("sample data size too large");
+ }
+ else if (data->u.str.data > 0) {
+ retval = data->u.str.data;
+ memcpy(value, data->u.str.area, retval);
+ value[retval] = '\0';
+ }
+ else {
+ /*
+ * There is no content to add but we will still return
+ * the correct status.
+ */
+ retval = 0;
+ }
+ }
+ else if (data->type == SMP_T_BIN) {
+ FLT_OT_ERR("invalid sample data type %d", data->type);
+ }
+ else if (data->type != SMP_T_METH) {
+ FLT_OT_ERR("invalid sample data type %d", data->type);
+ }
+ else if (data->u.meth.meth == HTTP_METH_OPTIONS) {
+ retval = FLT_OT_STR_SIZE(HTTP_METH_STR_OPTIONS);
+
+ (void)memcpy(value, HTTP_METH_STR_OPTIONS, retval + 1);
+ }
+ else if (data->u.meth.meth == HTTP_METH_GET) {
+ retval = FLT_OT_STR_SIZE(HTTP_METH_STR_GET);
+
+ (void)memcpy(value, HTTP_METH_STR_GET, retval + 1);
+ }
+ else if (data->u.meth.meth == HTTP_METH_HEAD) {
+ retval = FLT_OT_STR_SIZE(HTTP_METH_STR_HEAD);
+
+ (void)memcpy(value, HTTP_METH_STR_HEAD, retval + 1);
+ }
+ else if (data->u.meth.meth == HTTP_METH_POST) {
+ retval = FLT_OT_STR_SIZE(HTTP_METH_STR_POST);
+
+ (void)memcpy(value, HTTP_METH_STR_POST, retval + 1);
+ }
+ else if (data->u.meth.meth == HTTP_METH_PUT) {
+ retval = FLT_OT_STR_SIZE(HTTP_METH_STR_PUT);
+
+ (void)memcpy(value, HTTP_METH_STR_PUT, retval + 1);
+ }
+ else if (data->u.meth.meth == HTTP_METH_DELETE) {
+ retval = FLT_OT_STR_SIZE(HTTP_METH_STR_DELETE);
+
+ (void)memcpy(value, HTTP_METH_STR_DELETE, retval + 1);
+ }
+ else if (data->u.meth.meth == HTTP_METH_TRACE) {
+ retval = FLT_OT_STR_SIZE(HTTP_METH_STR_TRACE);
+
+ (void)memcpy(value, HTTP_METH_STR_TRACE, retval + 1);
+ }
+ else if (data->u.meth.meth == HTTP_METH_CONNECT) {
+ retval = FLT_OT_STR_SIZE(HTTP_METH_STR_CONNECT);
+
+ (void)memcpy(value, HTTP_METH_STR_CONNECT, retval + 1);
+ }
+ else if (data->u.meth.meth == HTTP_METH_OTHER) {
+ if (data->u.meth.str.data >= size) {
+ FLT_OT_ERR("sample data size too large");
+ } else {
+ retval = data->u.meth.str.data;
+ memcpy(value, data->u.meth.str.area, retval);
+ value[retval] = '\0';
+ }
+ }
+ else {
+ FLT_OT_ERR("invalid HTTP method");
+ }
+
+ FLT_OT_RETURN_INT(retval);
+}
+
+
+/***
+ * NAME
+ * flt_ot_sample_to_value -
+ *
+ * ARGUMENTS
+ * key -
+ * data -
+ * value -
+ * err -
+ *
+ * DESCRIPTION
+ * -
+ *
+ * RETURN VALUE
+ * -
+ */
+int flt_ot_sample_to_value(const char *key, const struct sample_data *data, struct otc_value *value, char **err)
+{
+ int retval = -1;
+
+ FLT_OT_FUNC("\"%s\", %p, %p, %p:%p", key, data, value, FLT_OT_DPTR_ARGS(err));
+
+ if ((data == NULL) || (value == NULL))
+ FLT_OT_RETURN_INT(retval);
+
+ if (data->type == SMP_T_BOOL) {
+ value->type = otc_value_bool;
+ value->value.bool_value = data->u.sint ? 1 : 0;
+
+ retval = sizeof(value->value.bool_value);
+ }
+ else if (data->type == SMP_T_SINT) {
+ value->type = otc_value_int64;
+ value->value.int64_value = data->u.sint;
+
+ retval = sizeof(value->value.int64_value);
+ }
+ else {
+ value->type = otc_value_string;
+ value->value.string_value = FLT_OT_MALLOC(global.tune.bufsize);
+
+ if (value->value.string_value == NULL)
+ FLT_OT_ERR("out of memory");
+ else
+ retval = flt_ot_sample_to_str(data, (char *)value->value.string_value, global.tune.bufsize, err);
+ }
+
+ FLT_OT_RETURN_INT(retval);
+}
+
+
+/***
+ * NAME
+ * flt_ot_sample_add -
+ *
+ * ARGUMENTS
+ * s -
+ * dir -
+ * sample -
+ * data -
+ * type -
+ * err -
+ *
+ * DESCRIPTION
+ * -
+ *
+ * RETURN VALUE
+ * Returns a negative value if an error occurs, 0 if it needs to wait,
+ * any other value otherwise.
+ */
+int flt_ot_sample_add(struct stream *s, uint dir, struct flt_ot_conf_sample *sample, struct flt_ot_scope_data *data, int type, char **err)
+{
+ const struct flt_ot_conf_sample_expr *expr;
+ struct sample smp;
+ struct otc_value value;
+ struct buffer buffer;
+ int idx = 0, rc, retval = FLT_OT_RET_OK;
+
+ FLT_OT_FUNC("%p, %u, %p, %p, %d, %p:%p", s, dir, data, sample, type, FLT_OT_DPTR_ARGS(err));
+
+ FLT_OT_DBG_CONF_SAMPLE("sample ", sample);
+
+ (void)memset(&buffer, 0, sizeof(buffer));
+
+ list_for_each_entry(expr, &(sample->exprs), list) {
+ FLT_OT_DBG_CONF_SAMPLE_EXPR("sample expression ", expr);
+
+ (void)memset(&smp, 0, sizeof(smp));
+
+ /*
+ * If we have only one expression to process, then the data
+ * type that is the result of the expression is converted to
+ * an equivalent data type (if possible) that is written to
+ * the tracer.
+ *
+ * If conversion is not possible, or if we have multiple
+ * expressions to process, then the result is converted to
+ * a string and as such sent to the tracer.
+ */
+ if (sample_process(s->be, s->sess, s, dir | SMP_OPT_FINAL, expr->expr, &smp) != NULL) {
+ FLT_OT_DBG(3, "data type %d: '%s'", smp.data.type, expr->value);
+ } else {
+ FLT_OT_DBG(2, "WARNING: failed to fetch '%s' value", expr->value);
+
+ /*
+ * In case the fetch failed, we will set the result
+ * (sample) to an empty static string.
+ */
+ (void)memset(&(smp.data), 0, sizeof(smp.data));
+ smp.data.type = SMP_T_STR;
+ smp.data.u.str.area = "";
+ }
+
+ if ((sample->num_exprs == 1) && (type == FLT_OT_EVENT_SAMPLE_TAG)) {
+ if (flt_ot_sample_to_value(sample->key, &(smp.data), &value, err) == -1)
+ retval = FLT_OT_RET_ERROR;
+ } else {
+ if (buffer.area == NULL) {
+ chunk_init(&buffer, FLT_OT_CALLOC(1, global.tune.bufsize), global.tune.bufsize);
+ if (buffer.area == NULL) {
+ FLT_OT_ERR("out of memory");
+
+ retval = FLT_OT_RET_ERROR;
+
+ break;
+ }
+ }
+
+ rc = flt_ot_sample_to_str(&(smp.data), buffer.area + buffer.data, buffer.size - buffer.data, err);
+ if (rc == -1) {
+ retval = FLT_OT_RET_ERROR;
+ } else {
+ buffer.data += rc;
+
+ if (sample->num_exprs == ++idx) {
+ value.type = otc_value_string;
+ value.value.string_value = buffer.area;
+ }
+ }
+ }
+ }
+
+ if (retval == FLT_OT_RET_ERROR) {
+ /* Do nothing. */
+ }
+ else if (type == FLT_OT_EVENT_SAMPLE_TAG) {
+ struct otc_tag *tag = data->tags + data->num_tags++;
+
+ tag->key = sample->key;
+ (void)memcpy(&(tag->value), &value, sizeof(tag->value));
+ }
+ else if (type == FLT_OT_EVENT_SAMPLE_LOG) {
+ struct otc_log_field *log_field = data->log_fields + data->num_log_fields++;
+
+ log_field->key = sample->key;
+ (void)memcpy(&(log_field->value), &value, sizeof(log_field->value));
+ }
+ else {
+ if (data->baggage == NULL)
+ data->baggage = otc_text_map_new(NULL, FLT_OT_MAXBAGGAGES);
+
+ if (data->baggage == NULL) {
+ FLT_OT_ERR("out of memory");
+
+ retval = FLT_OT_RET_ERROR;
+ }
+ else if (otc_text_map_add(data->baggage, sample->key, 0, value.value.string_value, 0, 0) == -1) {
+ FLT_OT_ERR("out of memory");
+
+ retval = FLT_OT_RET_ERROR;
+ }
+ else
+ FLT_OT_DBG(3, "baggage[%zu]: '%s' -> '%s'", data->baggage->count - 1, data->baggage->key[data->baggage->count - 1], data->baggage->value[data->baggage->count - 1]);
+ }
+
+ FLT_OT_RETURN_INT(retval);
+}
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ *
+ * vi: noexpandtab shiftwidth=8 tabstop=8
+ */
diff --git a/addons/ot/src/vars.c b/addons/ot/src/vars.c
new file mode 100644
index 0000000..e99bab1
--- /dev/null
+++ b/addons/ot/src/vars.c
@@ -0,0 +1,834 @@
+/***
+ * Copyright 2020 HAProxy Technologies
+ *
+ * This file is part of the HAProxy OpenTracing filter.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#include "include.h"
+
+
+#ifdef DEBUG_OT
+
+/***
+ * NAME
+ * flt_ot_vars_scope_dump -
+ *
+ * ARGUMENTS
+ * vars -
+ * scope -
+ *
+ * DESCRIPTION
+ * Function prints the contents of all variables defined for a particular
+ * scope.
+ *
+ * RETURN VALUE
+ * This function does not return a value.
+ */
+static void flt_ot_vars_scope_dump(struct vars *vars, const char *scope)
+{
+ const struct var *var;
+
+ if (vars == NULL)
+ return;
+
+ vars_rdlock(vars);
+ list_for_each_entry(var, &(vars->head), l)
+ FLT_OT_DBG(2, "'%s.%016" PRIx64 "' -> '%.*s'", scope, var->name_hash, (int)b_data(&(var->data.u.str)), b_orig(&(var->data.u.str)));
+ vars_rdunlock(vars);
+}
+
+
+/***
+ * NAME
+ * flt_ot_vars_dump -
+ *
+ * ARGUMENTS
+ * s -
+ *
+ * DESCRIPTION
+ * Function prints the contents of all variables grouped by individual
+ * scope.
+ *
+ * RETURN VALUE
+ * This function does not return a value.
+ */
+void flt_ot_vars_dump(struct stream *s)
+{
+ FLT_OT_FUNC("%p", s);
+
+ /*
+ * It would be nice if we could use the get_vars() function from HAProxy
+ * source here to get the value of the 'vars' pointer, but it is defined
+ * as 'static inline', so unfortunately none of this is possible.
+ */
+ flt_ot_vars_scope_dump(&(proc_vars), "PROC");
+ flt_ot_vars_scope_dump(&(s->sess->vars), "SESS");
+ flt_ot_vars_scope_dump(&(s->vars_txn), "TXN");
+ flt_ot_vars_scope_dump(&(s->vars_reqres), "REQ/RES");
+
+ FLT_OT_RETURN();
+}
+
+#endif /* DEBUG_OT */
+
+
+/***
+ * NAME
+ * flt_ot_smp_init -
+ *
+ * ARGUMENTS
+ * s -
+ * smp -
+ * opt -
+ * type -
+ * data -
+ *
+ * DESCRIPTION
+ * The function initializes the value of the 'smp' structure. If the 'data'
+ * argument is set, then the 'sample_data' member of the 'smp' structure is
+ * also initialized.
+ *
+ * RETURN VALUE
+ * This function does not return a value.
+ */
+static inline void flt_ot_smp_init(struct stream *s, struct sample *smp, uint opt, int type, const char *data)
+{
+ (void)memset(smp, 0, sizeof(*smp));
+ (void)smp_set_owner(smp, s->be, s->sess, s, opt | SMP_OPT_FINAL);
+
+ if (data != NULL) {
+ smp->data.type = type;
+
+ chunk_initstr(&(smp->data.u.str), data);
+ }
+}
+
+
+/***
+ * NAME
+ * flt_ot_smp_add -
+ *
+ * ARGUMENTS
+ * data -
+ * blk -
+ * len -
+ * err -
+ *
+ * DESCRIPTION
+ * -
+ *
+ * RETURN VALUE
+ * -
+ */
+static int flt_ot_smp_add(struct sample_data *data, const char *name, size_t len, char **err)
+{
+ bool flag_alloc = 0;
+ int retval = FLT_OT_RET_ERROR;
+
+ FLT_OT_FUNC("%p, \"%.*s\", %zu, %p:%p", data, (int)len, name, len, FLT_OT_DPTR_ARGS(err));
+
+ FLT_OT_DBG_BUF(2, &(data->u.str));
+
+ if (b_orig(&(data->u.str)) == NULL) {
+ data->type = SMP_T_BIN;
+ chunk_init(&(data->u.str), FLT_OT_MALLOC(global.tune.bufsize), global.tune.bufsize);
+
+ flag_alloc = (b_orig(&(data->u.str)) != NULL);
+ }
+
+ if (b_orig(&(data->u.str)) == NULL) {
+ FLT_OT_ERR("failed to add ctx '%.*s', not enough memory", (int)len, name);
+ }
+ else if (len > ((UINT64_C(1) << ((sizeof(FLT_OT_VAR_CTX_SIZE) << 3) - 1)) - 1)) {
+ FLT_OT_ERR("failed to add ctx '%.*s', too long name", (int)len, name);
+ }
+ else if ((len + sizeof(FLT_OT_VAR_CTX_SIZE)) > b_room(&(data->u.str))) {
+ FLT_OT_ERR("failed to add ctx '%.*s', too many names", (int)len, name);
+ }
+ else {
+ retval = b_data(&(data->u.str));
+
+ b_putchr(&(data->u.str), len);
+ (void)__b_putblk(&(data->u.str), name, len);
+
+ FLT_OT_DBG_BUF(2, &(data->u.str));
+ }
+
+ if ((retval == FLT_OT_RET_ERROR) && flag_alloc)
+ FLT_OT_FREE(b_orig(&(data->u.str)));
+
+ FLT_OT_RETURN_INT(retval);
+}
+
+
+/***
+ * NAME
+ * flt_ot_normalize_name -
+ *
+ * ARGUMENTS
+ * var_name -
+ * size -
+ * len -
+ * name -
+ * flag_cpy -
+ * err -
+ *
+ * DESCRIPTION
+ * -
+ *
+ * RETURN VALUE
+ * -
+ */
+static int flt_ot_normalize_name(char *var_name, size_t size, int *len, const char *name, bool flag_cpy, char **err)
+{
+ int retval = 0;
+
+ FLT_OT_FUNC("%p, %zu, %p, \"%s\", %hhu, %p:%p", var_name, size, len, name, flag_cpy, FLT_OT_DPTR_ARGS(err));
+
+ if (!FLT_OT_STR_ISVALID(name))
+ FLT_OT_RETURN_INT(retval);
+
+ /*
+ * In case the name of the variable consists of several elements,
+ * the character '.' is added between them.
+ */
+ if ((*len == 0) || (var_name[*len - 1] == '.'))
+ /* Do nothing. */;
+ else if (*len < (size - 1))
+ var_name[(*len)++] = '.';
+ else {
+ FLT_OT_ERR("failed to normalize variable name, buffer too small");
+
+ retval = -1;
+ }
+
+ if (flag_cpy) {
+ /* Copy variable name without modification. */
+ retval = strlen(name);
+ if ((*len + retval + 1) > size) {
+ FLT_OT_ERR("failed to normalize variable name, buffer too small");
+
+ retval = -1;
+ } else {
+ (void)memcpy(var_name + *len, name, retval + 1);
+
+ *len += retval;
+ }
+ } else {
+ /*
+ * HAProxy does not allow the use of variable names containing '-'
+ * or ' '. This of course applies to HTTP header names as well.
+ * Also, here the capital letters are converted to lowercase.
+ */
+ while (retval != -1)
+ if (*len >= (size - 1)) {
+ FLT_OT_ERR("failed to normalize variable name, buffer too small");
+
+ retval = -1;
+ } else {
+ uint8_t ch = name[retval];
+
+ if (ch == '\0')
+ break;
+ else if (ch == '-')
+ ch = FLT_OT_VAR_CHAR_DASH;
+ else if (ch == ' ')
+ ch = FLT_OT_VAR_CHAR_SPACE;
+ else if (isupper(ch))
+ ch = ist_lc[ch];
+
+ var_name[(*len)++] = ch;
+ retval++;
+ }
+
+ var_name[*len] = '\0';
+ }
+
+ FLT_OT_DBG(3, "var_name: \"%s\" %d/%d", var_name, retval, *len);
+
+ if (retval == -1)
+ *len = retval;
+
+ FLT_OT_RETURN_INT(retval);
+}
+
+
+/***
+ * NAME
+ * flt_ot_var_name -
+ *
+ * ARGUMENTS
+ * scope -
+ * prefix -
+ * name -
+ * flag_cpy -
+ * var_name -
+ * size -
+ * err -
+ *
+ * DESCRIPTION
+ * The function initializes the value of the 'smp' structure. If the 'data'
+ * argument is set, then the 'sample_data' member of the 'smp' structure is
+ * also initialized.
+ *
+ * RETURN VALUE
+ * -
+ */
+static int flt_ot_var_name(const char *scope, const char *prefix, const char *name, bool flag_cpy, char *var_name, size_t size, char **err)
+{
+ int retval = 0;
+
+ FLT_OT_FUNC("\"%s\", \"%s\", \"%s\", %hhu, %p, %zu, %p:%p", scope, prefix, name, flag_cpy, var_name, size, FLT_OT_DPTR_ARGS(err));
+
+ if (flt_ot_normalize_name(var_name, size, &retval, scope, 0, err) >= 0)
+ if (flt_ot_normalize_name(var_name, size, &retval, prefix, 0, err) >= 0)
+ (void)flt_ot_normalize_name(var_name, size, &retval, name, flag_cpy, err);
+
+ if (retval == -1)
+ FLT_OT_ERR("failed to construct variable name '%s.%s.%s'", scope, prefix, name);
+
+ FLT_OT_RETURN_INT(retval);
+}
+
+
+/***
+ * NAME
+ * flt_ot_ctx_loop -
+ *
+ * ARGUMENTS
+ * smp -
+ * scope -
+ * prefix -
+ * err -
+ * func -
+ * ptr -
+ *
+ * DESCRIPTION
+ * -
+ *
+ * RETURN VALUE
+ * -
+ */
+static int flt_ot_ctx_loop(struct sample *smp, const char *scope, const char *prefix, char **err, flt_ot_ctx_loop_cb func, void *ptr)
+{
+ FLT_OT_VAR_CTX_SIZE var_ctx_size;
+ char var_name[BUFSIZ], var_ctx[BUFSIZ];
+ int i, var_name_len, var_ctx_len, rc, n = 1, retval = 0;
+
+ FLT_OT_FUNC("%p, \"%s\", \"%s\", %p:%p, %p, %p", smp, scope, prefix, FLT_OT_DPTR_ARGS(err), func, ptr);
+
+ /*
+ * The variable in which we will save the name of the OpenTracing
+ * context variable.
+ */
+ var_name_len = flt_ot_var_name(scope, prefix, NULL, 0, var_name, sizeof(var_name), err);
+ if (var_name_len == -1)
+ FLT_OT_RETURN_INT(FLT_OT_RET_ERROR);
+
+ /*
+ * Here we will try to find all the previously recorded variables from
+ * the currently set OpenTracing context. If we find the required
+ * variable and it is marked as deleted, we will mark it as active.
+ * If we do not find it, then it is added to the end of the previously
+ * saved names.
+ */
+ if (vars_get_by_name(var_name, var_name_len, smp, NULL) == 0) {
+ FLT_OT_DBG(2, "ctx '%s' no variable found", var_name);
+ }
+ else if (smp->data.type != SMP_T_BIN) {
+ FLT_OT_ERR("ctx '%s' invalid data type %d", var_name, smp->data.type);
+
+ retval = FLT_OT_RET_ERROR;
+ }
+ else {
+ FLT_OT_DBG_BUF(2, &(smp->data.u.str));
+
+ for (i = 0; i < b_data(&(smp->data.u.str)); i += sizeof(var_ctx_size) + var_ctx_len, n++) {
+ var_ctx_size = *((typeof(var_ctx_size) *)(b_orig(&(smp->data.u.str)) + i));
+ var_ctx_len = abs(var_ctx_size);
+
+ if ((i + sizeof(var_ctx_size) + var_ctx_len) > b_data(&(smp->data.u.str))) {
+ FLT_OT_ERR("ctx '%s' invalid data size", var_name);
+
+ retval = FLT_OT_RET_ERROR;
+
+ break;
+ }
+
+ (void)memcpy(var_ctx, b_orig(&(smp->data.u.str)) + i + sizeof(var_ctx_size), var_ctx_len);
+ var_ctx[var_ctx_len] = '\0';
+
+ rc = func(smp, i, scope, prefix, var_ctx, var_ctx_size, err, ptr);
+ if (rc == FLT_OT_RET_ERROR) {
+ retval = FLT_OT_RET_ERROR;
+
+ break;
+ }
+ else if (rc > 0) {
+ retval = n;
+
+ break;
+ }
+ }
+ }
+
+ FLT_OT_RETURN_INT(retval);
+}
+
+
+/***
+ * NAME
+ * flt_ot_ctx_set_cb -
+ *
+ * ARGUMENTS
+ * smp -
+ * idx -
+ * scope -
+ * prefix -
+ * name -
+ * name_len -
+ * err -
+ * ptr -
+ *
+ * DESCRIPTION
+ * -
+ *
+ * RETURN VALUE
+ * -
+ */
+static int flt_ot_ctx_set_cb(struct sample *smp, size_t idx, const char *scope, const char *prefix, const char *name, FLT_OT_VAR_CTX_SIZE name_len, char **err, void *ptr)
+{
+ struct flt_ot_ctx *ctx = ptr;
+ int retval = 0;
+
+ FLT_OT_FUNC("%p, %zu, \"%s\", \"%s\", \"%s\", %hhd, %p:%p, %p", smp, idx, scope, prefix, name, name_len, FLT_OT_DPTR_ARGS(err), ptr);
+
+ if ((name_len == ctx->value_len) && (strncmp(name, ctx->value, name_len) == 0)) {
+ FLT_OT_DBG(2, "ctx '%s' found\n", name);
+
+ retval = 1;
+ }
+
+ FLT_OT_RETURN_INT(retval);
+}
+
+
+/***
+ * NAME
+ * flt_ot_ctx_set -
+ *
+ * ARGUMENTS
+ * s -
+ * scope -
+ * prefix -
+ * name -
+ * opt -
+ * err -
+ *
+ * DESCRIPTION
+ * -
+ *
+ * RETURN VALUE
+ * -
+ */
+static int flt_ot_ctx_set(struct stream *s, const char *scope, const char *prefix, const char *name, uint opt, char **err)
+{
+ struct flt_ot_ctx ctx;
+ struct sample smp_ctx;
+ char var_name[BUFSIZ];
+ bool flag_alloc = 0;
+ int rc, var_name_len, retval = FLT_OT_RET_ERROR;
+
+ FLT_OT_FUNC("%p, \"%s\", \"%s\", \"%s\", %u, %p:%p", s, scope, prefix, name, opt, FLT_OT_DPTR_ARGS(err));
+
+ /*
+ * The variable in which we will save the name of the OpenTracing
+ * context variable.
+ */
+ var_name_len = flt_ot_var_name(scope, prefix, NULL, 0, var_name, sizeof(var_name), err);
+ if (var_name_len == -1)
+ FLT_OT_RETURN_INT(retval);
+
+ /* Normalized name of the OpenTracing context variable. */
+ ctx.value_len = flt_ot_var_name(name, NULL, NULL, 0, ctx.value, sizeof(ctx.value), err);
+ if (ctx.value_len == -1)
+ FLT_OT_RETURN_INT(retval);
+
+ flt_ot_smp_init(s, &smp_ctx, opt, 0, NULL);
+
+ retval = flt_ot_ctx_loop(&smp_ctx, scope, prefix, err, flt_ot_ctx_set_cb, &ctx);
+ if (retval == 0) {
+ rc = flt_ot_smp_add(&(smp_ctx.data), ctx.value, ctx.value_len, err);
+ if (rc == FLT_OT_RET_ERROR)
+ retval = FLT_OT_RET_ERROR;
+
+ flag_alloc = (rc == 0);
+ }
+
+ if (retval == FLT_OT_RET_ERROR) {
+ /* Do nothing. */
+ }
+ else if (retval > 0) {
+ FLT_OT_DBG(2, "ctx '%s' data found", ctx.value);
+ }
+ else if (vars_set_by_name_ifexist(var_name, var_name_len, &smp_ctx) == 0) {
+ FLT_OT_ERR("failed to set ctx '%s'", var_name);
+
+ retval = FLT_OT_RET_ERROR;
+ }
+ else {
+ FLT_OT_DBG(2, "ctx '%s' -> '%.*s' set", var_name, (int)b_data(&(smp_ctx.data.u.str)), b_orig(&(smp_ctx.data.u.str)));
+
+ retval = b_data(&(smp_ctx.data.u.str));
+ }
+
+ if (flag_alloc)
+ FLT_OT_FREE(b_orig(&(smp_ctx.data.u.str)));
+
+ FLT_OT_RETURN_INT(retval);
+}
+
+
+/***
+ * NAME
+ * flt_ot_var_register -
+ *
+ * ARGUMENTS
+ * scope -
+ * prefix -
+ * name -
+ * err -
+ *
+ * DESCRIPTION
+ * -
+ *
+ * RETURN VALUE
+ * -
+ */
+int flt_ot_var_register(const char *scope, const char *prefix, const char *name, char **err)
+{
+ struct arg arg;
+ char var_name[BUFSIZ];
+ int retval = -1, var_name_len;
+
+ FLT_OT_FUNC("\"%s\", \"%s\", \"%s\", %p:%p", scope, prefix, name, FLT_OT_DPTR_ARGS(err));
+
+ var_name_len = flt_ot_var_name(scope, prefix, name, 0, var_name, sizeof(var_name), err);
+ if (var_name_len == -1)
+ FLT_OT_RETURN_INT(retval);
+
+ /* Set <size> to 0 to not release var_name memory in vars_check_arg(). */
+ (void)memset(&arg, 0, sizeof(arg));
+ arg.type = ARGT_STR;
+ arg.data.str.area = var_name;
+ arg.data.str.data = var_name_len;
+
+ if (vars_check_arg(&arg, err) == 0) {
+ FLT_OT_ERR_APPEND("failed to register variable '%s': %s", var_name, *err);
+ } else {
+ FLT_OT_DBG(2, "variable '%s' registered", var_name);
+
+ retval = var_name_len;
+ }
+
+ FLT_OT_RETURN_INT(retval);
+}
+
+
+/***
+ * NAME
+ * flt_ot_var_set -
+ *
+ * ARGUMENTS
+ * s -
+ * scope -
+ * prefix -
+ * name -
+ * value -
+ * opt -
+ * err -
+ *
+ * DESCRIPTION
+ * -
+ *
+ * RETURN VALUE
+ * -
+ */
+int flt_ot_var_set(struct stream *s, const char *scope, const char *prefix, const char *name, const char *value, uint opt, char **err)
+{
+ struct sample smp;
+ char var_name[BUFSIZ];
+ int retval = -1, var_name_len;
+
+ FLT_OT_FUNC("%p, \"%s\", \"%s\", \"%s\", \"%s\", %u, %p:%p", s, scope, prefix, name, value, opt, FLT_OT_DPTR_ARGS(err));
+
+ var_name_len = flt_ot_var_name(scope, prefix, name, 0, var_name, sizeof(var_name), err);
+ if (var_name_len == -1)
+ FLT_OT_RETURN_INT(retval);
+
+ flt_ot_smp_init(s, &smp, opt, SMP_T_STR, value);
+
+ if (vars_set_by_name_ifexist(var_name, var_name_len, &smp) == 0) {
+ FLT_OT_ERR("failed to set variable '%s'", var_name);
+ } else {
+ FLT_OT_DBG(2, "variable '%s' set", var_name);
+
+ retval = var_name_len;
+
+ if (strcmp(scope, FLT_OT_VARS_SCOPE) == 0)
+ retval = flt_ot_ctx_set(s, scope, prefix, name, opt, err);
+ }
+
+ FLT_OT_RETURN_INT(retval);
+}
+
+
+/***
+ * NAME
+ * flt_ot_vars_unset_cb -
+ *
+ * ARGUMENTS
+ * smp -
+ * idx -
+ * scope -
+ * prefix -
+ * name -
+ * name_len -
+ * err -
+ * ptr -
+ *
+ * DESCRIPTION
+ * -
+ *
+ * RETURN VALUE
+ * -
+ */
+static int flt_ot_vars_unset_cb(struct sample *smp, size_t idx, const char *scope, const char *prefix, const char *name, FLT_OT_VAR_CTX_SIZE name_len, char **err, void *ptr)
+{
+ struct sample smp_ctx;
+ char var_ctx[BUFSIZ];
+ int var_ctx_len, retval = FLT_OT_RET_ERROR;
+
+ FLT_OT_FUNC("%p, %zu, \"%s\", \"%s\", \"%s\", %hhd, %p:%p, %p", smp, idx, scope, prefix, name, name_len, FLT_OT_DPTR_ARGS(err), ptr);
+
+ var_ctx_len = flt_ot_var_name(scope, prefix, name, 1, var_ctx, sizeof(var_ctx), err);
+ if (var_ctx_len == -1) {
+ FLT_OT_ERR("ctx '%s' invalid", name);
+
+ FLT_OT_RETURN_INT(retval);
+ }
+
+ flt_ot_smp_init(smp->strm, &smp_ctx, smp->opt, 0, NULL);
+
+ if (vars_unset_by_name_ifexist(var_ctx, var_ctx_len, &smp_ctx) == 0) {
+ FLT_OT_ERR("ctx '%s' no variable found", var_ctx);
+ } else {
+ FLT_OT_DBG(2, "ctx '%s' unset", var_ctx);
+
+ retval = 0;
+ }
+
+ FLT_OT_RETURN_INT(retval);
+}
+
+
+/***
+ * NAME
+ * flt_ot_vars_unset -
+ *
+ * ARGUMENTS
+ * s -
+ * scope -
+ * prefix -
+ * opt -
+ * err -
+ *
+ * DESCRIPTION
+ * -
+ *
+ * RETURN VALUE
+ * -
+ */
+int flt_ot_vars_unset(struct stream *s, const char *scope, const char *prefix, uint opt, char **err)
+{
+ struct sample smp_ctx;
+ char var_name[BUFSIZ];
+ int var_name_len, retval;
+
+ FLT_OT_FUNC("%p, \"%s\", \"%s\", %u, %p:%p", s, scope, prefix, opt, FLT_OT_DPTR_ARGS(err));
+
+ flt_ot_smp_init(s, &smp_ctx, opt, 0, NULL);
+
+ retval = flt_ot_ctx_loop(&smp_ctx, scope, prefix, err, flt_ot_vars_unset_cb, NULL);
+ if (retval != FLT_OT_RET_ERROR) {
+ /*
+ * After all ctx variables have been unset, the variable used
+ * to store their names should also be unset.
+ */
+ var_name_len = flt_ot_var_name(scope, prefix, NULL, 0, var_name, sizeof(var_name), err);
+ if (var_name_len == -1)
+ FLT_OT_RETURN_INT(FLT_OT_RET_ERROR);
+
+ flt_ot_smp_init(s, &smp_ctx, opt, 0, NULL);
+
+ if (vars_unset_by_name_ifexist(var_name, var_name_len, &smp_ctx) == 0) {
+ FLT_OT_DBG(2, "variable '%s' not found", var_name);
+ } else {
+ FLT_OT_DBG(2, "variable '%s' unset", var_name);
+
+ retval = 1;
+ }
+ }
+
+ FLT_OT_RETURN_INT(retval);
+}
+
+
+/***
+ * NAME
+ * flt_ot_vars_get_cb -
+ *
+ * ARGUMENTS
+ * smp -
+ * idx -
+ * scope -
+ * prefix -
+ * name -
+ * name_len -
+ * err -
+ * ptr -
+ *
+ * DESCRIPTION
+ * -
+ *
+ * RETURN VALUE
+ * -
+ */
+static int flt_ot_vars_get_cb(struct sample *smp, size_t idx, const char *scope, const char *prefix, const char *name, FLT_OT_VAR_CTX_SIZE name_len, char **err, void *ptr)
+{
+ struct otc_text_map **map = ptr;
+ struct sample smp_ctx;
+ char var_ctx[BUFSIZ], ot_var_name[BUFSIZ], ch;
+ int var_ctx_len, ot_var_name_len, retval = FLT_OT_RET_ERROR;
+
+ FLT_OT_FUNC("%p, %zu, \"%s\", \"%s\", \"%s\", %hhd, %p:%p, %p", smp, idx, scope, prefix, name, name_len, FLT_OT_DPTR_ARGS(err), ptr);
+
+ var_ctx_len = flt_ot_var_name(scope, prefix, name, 1, var_ctx, sizeof(var_ctx), err);
+ if (var_ctx_len == -1) {
+ FLT_OT_ERR("ctx '%s' invalid", name);
+
+ FLT_OT_RETURN_INT(retval);
+ }
+
+ flt_ot_smp_init(smp->strm, &smp_ctx, smp->opt, 0, NULL);
+
+ if (vars_get_by_name(var_ctx, var_ctx_len, &smp_ctx, NULL) != 0) {
+ FLT_OT_DBG(2, "'%s' -> '%.*s'", var_ctx, (int)b_data(&(smp_ctx.data.u.str)), b_orig(&(smp_ctx.data.u.str)));
+
+ if (*map == NULL) {
+ *map = otc_text_map_new(NULL, 8);
+ if (*map == NULL) {
+ FLT_OT_ERR("failed to create map data");
+
+ FLT_OT_RETURN_INT(FLT_OT_RET_ERROR);
+ }
+ }
+
+ /*
+ * Eh, because the use of some characters is not allowed
+ * in the variable name, the conversion of the replaced
+ * characters to the original is performed here.
+ */
+ for (ot_var_name_len = 0; (ch = name[ot_var_name_len]) != '\0'; ot_var_name_len++)
+ if (ot_var_name_len >= (FLT_OT_TABLESIZE(ot_var_name) - 1)) {
+ FLT_OT_ERR("failed to reverse variable name, buffer too small");
+
+ otc_text_map_destroy(map, OTC_TEXT_MAP_FREE_KEY | OTC_TEXT_MAP_FREE_VALUE);
+
+ break;
+ } else {
+ ot_var_name[ot_var_name_len] = (ch == FLT_OT_VAR_CHAR_DASH) ? '-' : ((ch == FLT_OT_VAR_CHAR_SPACE) ? ' ' : ch);
+ }
+ ot_var_name[ot_var_name_len] = '\0';
+
+ if (*map == NULL) {
+ retval = FLT_OT_RET_ERROR;
+ }
+ else if (otc_text_map_add(*map, ot_var_name, ot_var_name_len, b_orig(&(smp_ctx.data.u.str)), b_data(&(smp_ctx.data.u.str)), OTC_TEXT_MAP_DUP_KEY | OTC_TEXT_MAP_DUP_VALUE) == -1) {
+ FLT_OT_ERR("failed to add map data");
+
+ otc_text_map_destroy(map, OTC_TEXT_MAP_FREE_KEY | OTC_TEXT_MAP_FREE_VALUE);
+
+ retval = FLT_OT_RET_ERROR;
+ }
+ else {
+ retval = 0;
+ }
+ } else {
+ FLT_OT_DBG(2, "ctx '%s' no variable found", var_ctx);
+ }
+
+ FLT_OT_RETURN_INT(retval);
+}
+
+
+/***
+ * NAME
+ * flt_ot_vars_get -
+ *
+ * ARGUMENTS
+ * s -
+ * scope -
+ * prefix -
+ * opt -
+ * err -
+ *
+ * DESCRIPTION
+ * -
+ *
+ * RETURN VALUE
+ * -
+ */
+struct otc_text_map *flt_ot_vars_get(struct stream *s, const char *scope, const char *prefix, uint opt, char **err)
+{
+ struct sample smp_ctx;
+ struct otc_text_map *retptr = NULL;
+
+ FLT_OT_FUNC("%p, \"%s\", \"%s\", %u, %p:%p", s, scope, prefix, opt, FLT_OT_DPTR_ARGS(err));
+
+ flt_ot_smp_init(s, &smp_ctx, opt, 0, NULL);
+
+ (void)flt_ot_ctx_loop(&smp_ctx, scope, prefix, err, flt_ot_vars_get_cb, &retptr);
+
+ ot_text_map_show(retptr);
+
+ if ((retptr != NULL) && (retptr->count == 0)) {
+ FLT_OT_DBG(2, "WARNING: no variables found");
+
+ otc_text_map_destroy(&retptr, OTC_TEXT_MAP_FREE_KEY | OTC_TEXT_MAP_FREE_VALUE);
+ }
+
+ FLT_OT_RETURN_PTR(retptr);
+}
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ *
+ * vi: noexpandtab shiftwidth=8 tabstop=8
+ */
diff --git a/addons/ot/test/README-speed-cmp b/addons/ot/test/README-speed-cmp
new file mode 100644
index 0000000..9251faa
--- /dev/null
+++ b/addons/ot/test/README-speed-cmp
@@ -0,0 +1,111 @@
+--- rate-limit 100.0 --------------------------------------------------
+Running 5m test @ http://localhost:10080/index.html
+ 8 threads and 8 connections
+ Thread Stats Avg Stdev Max +/- Stdev
+ Latency 650.95us 431.15us 46.44ms 96.67%
+ Req/Sec 1.44k 51.39 2.57k 74.89%
+ Latency Distribution
+ 50% 608.00us
+ 75% 760.00us
+ 90% 0.91ms
+ 99% 1.31ms
+ 3434836 requests in 5.00m, 0.89GB read
+Requests/sec: 11446.99
+Transfer/sec: 3.03MB
+----------------------------------------------------------------------
+
+--- rate-limit 50.0 --------------------------------------------------
+Running 5m test @ http://localhost:10080/index.html
+ 8 threads and 8 connections
+ Thread Stats Avg Stdev Max +/- Stdev
+ Latency 398.00us 371.39us 22.56ms 97.23%
+ Req/Sec 2.32k 84.01 2.76k 74.84%
+ Latency Distribution
+ 50% 350.00us
+ 75% 467.00us
+ 90% 593.00us
+ 99% 1.03ms
+ 5530848 requests in 5.00m, 1.43GB read
+Requests/sec: 18434.31
+Transfer/sec: 4.89MB
+----------------------------------------------------------------------
+
+--- rate-limit 10.0 --------------------------------------------------
+Running 5m test @ http://localhost:10080/index.html
+ 8 threads and 8 connections
+ Thread Stats Avg Stdev Max +/- Stdev
+ Latency 316.75us 351.92us 23.00ms 98.57%
+ Req/Sec 2.87k 94.02 3.22k 79.30%
+ Latency Distribution
+ 50% 273.00us
+ 75% 342.00us
+ 90% 424.00us
+ 99% 0.94ms
+ 6859293 requests in 5.00m, 1.78GB read
+Requests/sec: 22862.16
+Transfer/sec: 6.06MB
+----------------------------------------------------------------------
+
+--- rate-limit 2.5 --------------------------------------------------
+Running 5m test @ http://localhost:10080/index.html
+ 8 threads and 8 connections
+ Thread Stats Avg Stdev Max +/- Stdev
+ Latency 307.90us 368.64us 26.08ms 98.71%
+ Req/Sec 2.96k 103.84 3.23k 83.76%
+ Latency Distribution
+ 50% 264.00us
+ 75% 327.00us
+ 90% 402.00us
+ 99% 0.97ms
+ 7065667 requests in 5.00m, 1.83GB read
+Requests/sec: 23550.37
+Transfer/sec: 6.24MB
+----------------------------------------------------------------------
+
+--- rate-limit 0.0 --------------------------------------------------
+Running 5m test @ http://localhost:10080/index.html
+ 8 threads and 8 connections
+ Thread Stats Avg Stdev Max +/- Stdev
+ Latency 304.60us 376.36us 30.26ms 98.74%
+ Req/Sec 2.99k 106.93 3.24k 83.08%
+ Latency Distribution
+ 50% 262.00us
+ 75% 323.00us
+ 90% 396.00us
+ 99% 0.95ms
+ 7136261 requests in 5.00m, 1.85GB read
+Requests/sec: 23785.77
+Transfer/sec: 6.31MB
+----------------------------------------------------------------------
+
+--- rate-limit disabled --------------------------------------------------
+Running 5m test @ http://localhost:10080/index.html
+ 8 threads and 8 connections
+ Thread Stats Avg Stdev Max +/- Stdev
+ Latency 300.90us 342.35us 22.13ms 98.74%
+ Req/Sec 3.00k 95.67 3.33k 81.11%
+ Latency Distribution
+ 50% 261.00us
+ 75% 322.00us
+ 90% 394.00us
+ 99% 806.00us
+ 7159525 requests in 5.00m, 1.85GB read
+Requests/sec: 23863.05
+Transfer/sec: 6.33MB
+----------------------------------------------------------------------
+
+--- rate-limit off --------------------------------------------------
+Running 5m test @ http://localhost:10080/index.html
+ 8 threads and 8 connections
+ Thread Stats Avg Stdev Max +/- Stdev
+ Latency 302.51us 371.99us 30.26ms 98.77%
+ Req/Sec 3.00k 104.43 3.73k 83.74%
+ Latency Distribution
+ 50% 260.00us
+ 75% 321.00us
+ 90% 394.00us
+ 99% 0.89ms
+ 7170345 requests in 5.00m, 1.86GB read
+Requests/sec: 23898.19
+Transfer/sec: 6.34MB
+----------------------------------------------------------------------
diff --git a/addons/ot/test/README-speed-ctx b/addons/ot/test/README-speed-ctx
new file mode 100644
index 0000000..fa8fc2c
--- /dev/null
+++ b/addons/ot/test/README-speed-ctx
@@ -0,0 +1,111 @@
+--- rate-limit 100.0 --------------------------------------------------
+Running 5m test @ http://localhost:10080/index.html
+ 8 threads and 8 connections
+ Thread Stats Avg Stdev Max +/- Stdev
+ Latency 2.49ms 799.87us 43.00ms 70.90%
+ Req/Sec 393.01 20.61 696.00 71.68%
+ Latency Distribution
+ 50% 2.50ms
+ 75% 3.00ms
+ 90% 3.38ms
+ 99% 4.23ms
+ 939237 requests in 5.00m, 249.01MB read
+Requests/sec: 3130.01
+Transfer/sec: 849.75KB
+----------------------------------------------------------------------
+
+--- rate-limit 50.0 --------------------------------------------------
+Running 5m test @ http://localhost:10080/index.html
+ 8 threads and 8 connections
+ Thread Stats Avg Stdev Max +/- Stdev
+ Latency 1.27ms 0.97ms 40.77ms 56.91%
+ Req/Sec 778.22 70.30 1.36k 69.10%
+ Latency Distribution
+ 50% 1.36ms
+ 75% 1.80ms
+ 90% 2.49ms
+ 99% 3.51ms
+ 1859055 requests in 5.00m, 492.88MB read
+Requests/sec: 6195.58
+Transfer/sec: 1.64MB
+----------------------------------------------------------------------
+
+--- rate-limit 10.0 --------------------------------------------------
+Running 5m test @ http://localhost:10080/index.html
+ 8 threads and 8 connections
+ Thread Stats Avg Stdev Max +/- Stdev
+ Latency 442.00us 481.47us 31.61ms 90.27%
+ Req/Sec 2.25k 130.05 2.73k 72.83%
+ Latency Distribution
+ 50% 287.00us
+ 75% 526.00us
+ 90% 0.92ms
+ 99% 1.76ms
+ 5380213 requests in 5.00m, 1.39GB read
+Requests/sec: 17930.27
+Transfer/sec: 4.75MB
+----------------------------------------------------------------------
+
+--- rate-limit 2.5 --------------------------------------------------
+Running 5m test @ http://localhost:10080/index.html
+ 8 threads and 8 connections
+ Thread Stats Avg Stdev Max +/- Stdev
+ Latency 346.65us 414.65us 28.50ms 95.63%
+ Req/Sec 2.75k 159.74 3.23k 84.68%
+ Latency Distribution
+ 50% 271.00us
+ 75% 353.00us
+ 90% 505.00us
+ 99% 1.55ms
+ 6560093 requests in 5.00m, 1.70GB read
+Requests/sec: 21864.43
+Transfer/sec: 5.80MB
+----------------------------------------------------------------------
+
+--- rate-limit 0.0 --------------------------------------------------
+Running 5m test @ http://localhost:10080/index.html
+ 8 threads and 8 connections
+ Thread Stats Avg Stdev Max +/- Stdev
+ Latency 313.32us 402.25us 24.73ms 98.55%
+ Req/Sec 2.95k 145.03 3.21k 88.99%
+ Latency Distribution
+ 50% 264.00us
+ 75% 327.00us
+ 90% 403.00us
+ 99% 1.33ms
+ 7050847 requests in 5.00m, 1.83GB read
+Requests/sec: 23501.14
+Transfer/sec: 6.23MB
+----------------------------------------------------------------------
+
+--- rate-limit disabled --------------------------------------------------
+Running 5m test @ http://localhost:10080/index.html
+ 8 threads and 8 connections
+ Thread Stats Avg Stdev Max +/- Stdev
+ Latency 310.19us 384.76us 22.18ms 98.66%
+ Req/Sec 2.96k 115.62 3.37k 84.30%
+ Latency Distribution
+ 50% 265.00us
+ 75% 327.00us
+ 90% 402.00us
+ 99% 1.10ms
+ 7058682 requests in 5.00m, 1.83GB read
+Requests/sec: 23526.70
+Transfer/sec: 6.24MB
+----------------------------------------------------------------------
+
+--- rate-limit off --------------------------------------------------
+Running 5m test @ http://localhost:10080/index.html
+ 8 threads and 8 connections
+ Thread Stats Avg Stdev Max +/- Stdev
+ Latency 305.86us 367.56us 25.76ms 98.65%
+ Req/Sec 2.99k 116.93 3.43k 85.59%
+ Latency Distribution
+ 50% 261.00us
+ 75% 322.00us
+ 90% 396.00us
+ 99% 1.09ms
+ 7137173 requests in 5.00m, 1.85GB read
+Requests/sec: 23788.84
+Transfer/sec: 6.31MB
+----------------------------------------------------------------------
diff --git a/addons/ot/test/README-speed-fe-be b/addons/ot/test/README-speed-fe-be
new file mode 100644
index 0000000..ab2b7af
--- /dev/null
+++ b/addons/ot/test/README-speed-fe-be
@@ -0,0 +1,111 @@
+--- rate-limit 100.0 --------------------------------------------------
+Running 5m test @ http://localhost:10080/index.html
+ 8 threads and 8 connections
+ Thread Stats Avg Stdev Max +/- Stdev
+ Latency 0.89ms 466.84us 35.44ms 94.39%
+ Req/Sec 1.09k 39.30 1.32k 72.60%
+ Latency Distribution
+ 50% 823.00us
+ 75% 1.00ms
+ 90% 1.20ms
+ 99% 2.14ms
+ 2594524 requests in 5.00m, 687.86MB read
+Requests/sec: 8645.83
+Transfer/sec: 2.29MB
+----------------------------------------------------------------------
+
+--- rate-limit 50.0 --------------------------------------------------
+Running 5m test @ http://localhost:10080/index.html
+ 8 threads and 8 connections
+ Thread Stats Avg Stdev Max +/- Stdev
+ Latency 681.74us 463.28us 20.45ms 95.46%
+ Req/Sec 1.41k 54.00 1.60k 68.97%
+ Latency Distribution
+ 50% 613.00us
+ 75% 785.00us
+ 90% 0.98ms
+ 99% 2.06ms
+ 3367473 requests in 5.00m, 0.87GB read
+Requests/sec: 11222.76
+Transfer/sec: 2.98MB
+----------------------------------------------------------------------
+
+--- rate-limit 10.0 --------------------------------------------------
+Running 5m test @ http://localhost:10080/index.html
+ 8 threads and 8 connections
+ Thread Stats Avg Stdev Max +/- Stdev
+ Latency 558.32us 458.54us 29.40ms 97.73%
+ Req/Sec 1.72k 60.67 2.05k 73.10%
+ Latency Distribution
+ 50% 494.00us
+ 75% 610.00us
+ 90% 743.00us
+ 99% 2.08ms
+ 4105420 requests in 5.00m, 1.06GB read
+Requests/sec: 13683.36
+Transfer/sec: 3.63MB
+----------------------------------------------------------------------
+
+--- rate-limit 2.5 --------------------------------------------------
+Running 5m test @ http://localhost:10080/index.html
+ 8 threads and 8 connections
+ Thread Stats Avg Stdev Max +/- Stdev
+ Latency 542.66us 440.31us 22.63ms 97.88%
+ Req/Sec 1.76k 60.02 2.00k 72.27%
+ Latency Distribution
+ 50% 481.00us
+ 75% 588.00us
+ 90% 710.00us
+ 99% 2.05ms
+ 4214525 requests in 5.00m, 1.09GB read
+Requests/sec: 14046.76
+Transfer/sec: 3.72MB
+----------------------------------------------------------------------
+
+--- rate-limit 0.0 --------------------------------------------------
+Running 5m test @ http://localhost:10080/index.html
+ 8 threads and 8 connections
+ Thread Stats Avg Stdev Max +/- Stdev
+ Latency 529.06us 414.38us 30.09ms 97.97%
+ Req/Sec 1.80k 59.34 2.05k 74.47%
+ Latency Distribution
+ 50% 473.00us
+ 75% 576.00us
+ 90% 692.00us
+ 99% 1.79ms
+ 4287428 requests in 5.00m, 1.11GB read
+Requests/sec: 14290.45
+Transfer/sec: 3.79MB
+----------------------------------------------------------------------
+
+--- rate-limit disabled --------------------------------------------------
+Running 5m test @ http://localhost:10080/index.html
+ 8 threads and 8 connections
+ Thread Stats Avg Stdev Max +/- Stdev
+ Latency 517.81us 463.10us 36.81ms 98.25%
+ Req/Sec 1.85k 62.39 2.21k 75.65%
+ Latency Distribution
+ 50% 458.00us
+ 75% 558.00us
+ 90% 670.00us
+ 99% 1.96ms
+ 4416273 requests in 5.00m, 1.14GB read
+Requests/sec: 14719.43
+Transfer/sec: 3.90MB
+----------------------------------------------------------------------
+
+--- rate-limit off --------------------------------------------------
+Running 5m test @ http://localhost:10080/index.html
+ 8 threads and 8 connections
+ Thread Stats Avg Stdev Max +/- Stdev
+ Latency 511.67us 428.18us 27.68ms 98.15%
+ Req/Sec 1.86k 60.67 2.05k 75.44%
+ Latency Distribution
+ 50% 455.00us
+ 75% 554.00us
+ 90% 666.00us
+ 99% 1.81ms
+ 4441271 requests in 5.00m, 1.15GB read
+Requests/sec: 14803.32
+Transfer/sec: 3.92MB
+----------------------------------------------------------------------
diff --git a/addons/ot/test/README-speed-sa b/addons/ot/test/README-speed-sa
new file mode 100644
index 0000000..ea8749d
--- /dev/null
+++ b/addons/ot/test/README-speed-sa
@@ -0,0 +1,111 @@
+--- rate-limit 100.0 --------------------------------------------------
+Running 5m test @ http://localhost:10080/index.html
+ 8 threads and 8 connections
+ Thread Stats Avg Stdev Max +/- Stdev
+ Latency 1.24ms 522.78us 35.59ms 79.12%
+ Req/Sec 767.71 38.72 3.02k 72.19%
+ Latency Distribution
+ 50% 1.20ms
+ 75% 1.51ms
+ 90% 1.78ms
+ 99% 2.37ms
+ 1834067 requests in 5.00m, 486.25MB read
+Requests/sec: 6111.57
+Transfer/sec: 1.62MB
+----------------------------------------------------------------------
+
+--- rate-limit 50.0 --------------------------------------------------
+Running 5m test @ http://localhost:10080/index.html
+ 8 threads and 8 connections
+ Thread Stats Avg Stdev Max +/- Stdev
+ Latency 593.11us 476.81us 43.00ms 91.27%
+ Req/Sec 1.59k 81.15 2.07k 71.14%
+ Latency Distribution
+ 50% 549.00us
+ 75% 788.00us
+ 90% 1.03ms
+ 99% 1.62ms
+ 3795987 requests in 5.00m, 0.98GB read
+Requests/sec: 12650.65
+Transfer/sec: 3.35MB
+----------------------------------------------------------------------
+
+--- rate-limit 10.0 --------------------------------------------------
+Running 5m test @ http://localhost:10080/index.html
+ 8 threads and 8 connections
+ Thread Stats Avg Stdev Max +/- Stdev
+ Latency 326.02us 355.00us 29.23ms 98.05%
+ Req/Sec 2.80k 88.05 3.30k 75.36%
+ Latency Distribution
+ 50% 277.00us
+ 75% 356.00us
+ 90% 456.00us
+ 99% 0.97ms
+ 6675563 requests in 5.00m, 1.73GB read
+Requests/sec: 22249.78
+Transfer/sec: 5.90MB
+----------------------------------------------------------------------
+
+--- rate-limit 2.5 --------------------------------------------------
+Running 5m test @ http://localhost:10080/index.html
+ 8 threads and 8 connections
+ Thread Stats Avg Stdev Max +/- Stdev
+ Latency 311.77us 357.45us 24.11ms 98.62%
+ Req/Sec 2.91k 94.70 3.18k 78.52%
+ Latency Distribution
+ 50% 268.00us
+ 75% 334.00us
+ 90% 413.00us
+ 99% 0.94ms
+ 6960933 requests in 5.00m, 1.80GB read
+Requests/sec: 23201.07
+Transfer/sec: 6.15MB
+----------------------------------------------------------------------
+
+--- rate-limit 0.0 --------------------------------------------------
+Running 5m test @ http://localhost:10080/index.html
+ 8 threads and 8 connections
+ Thread Stats Avg Stdev Max +/- Stdev
+ Latency 302.51us 330.50us 25.84ms 98.69%
+ Req/Sec 2.98k 91.46 3.40k 78.84%
+ Latency Distribution
+ 50% 263.00us
+ 75% 325.00us
+ 90% 397.00us
+ 99% 812.00us
+ 7112084 requests in 5.00m, 1.84GB read
+Requests/sec: 23705.14
+Transfer/sec: 6.28MB
+----------------------------------------------------------------------
+
+--- rate-limit disabled --------------------------------------------------
+Running 5m test @ http://localhost:10080/index.html
+ 8 threads and 8 connections
+ Thread Stats Avg Stdev Max +/- Stdev
+ Latency 303.01us 353.98us 28.03ms 98.76%
+ Req/Sec 2.99k 93.97 3.34k 81.12%
+ Latency Distribution
+ 50% 262.00us
+ 75% 323.00us
+ 90% 395.00us
+ 99% 838.00us
+ 7133837 requests in 5.00m, 1.85GB read
+Requests/sec: 23777.95
+Transfer/sec: 6.30MB
+----------------------------------------------------------------------
+
+--- rate-limit off --------------------------------------------------
+Running 5m test @ http://localhost:10080/index.html
+ 8 threads and 8 connections
+ Thread Stats Avg Stdev Max +/- Stdev
+ Latency 302.61us 349.74us 25.48ms 98.75%
+ Req/Sec 2.99k 94.85 3.49k 80.75%
+ Latency Distribution
+ 50% 262.00us
+ 75% 323.00us
+ 90% 395.00us
+ 99% 822.00us
+ 7132714 requests in 5.00m, 1.85GB read
+Requests/sec: 23773.35
+Transfer/sec: 6.30MB
+----------------------------------------------------------------------
diff --git a/addons/ot/test/be/cfg-dd.json b/addons/ot/test/be/cfg-dd.json
new file mode 100644
index 0000000..8b69b03
--- /dev/null
+++ b/addons/ot/test/be/cfg-dd.json
@@ -0,0 +1,5 @@
+{
+ "service": "BE",
+ "agent_host": "localhost",
+ "agent_port": 8126
+}
diff --git a/addons/ot/test/be/cfg-jaeger.yml b/addons/ot/test/be/cfg-jaeger.yml
new file mode 100644
index 0000000..0893166
--- /dev/null
+++ b/addons/ot/test/be/cfg-jaeger.yml
@@ -0,0 +1,34 @@
+service_name:
+ BE
+
+###
+# When using configuration object to instantiate the tracer, the type of
+# sampling can be selected via sampler.type and sampler.param properties.
+# Jaeger libraries support the following samplers:
+#
+# - Constant (sampler.type=const) sampler always makes the same decision for
+# all traces. It either samples all traces (sampler.param=1) or none of
+# them (sampler.param=0).
+#
+# - Probabilistic (sampler.type=probabilistic) sampler makes a random sampling
+# decision with the probability of sampling equal to the value of
+# sampler.param property. For example, with sampler.param=0.1 approximately
+# 1 in 10 traces will be sampled.
+#
+# - Rate Limiting (sampler.type=ratelimiting) sampler uses a leaky bucket rate
+# limiter to ensure that traces are sampled with a certain constant rate.
+# For example, when sampler.param=2.0 it will sample requests with the rate
+# of 2 traces per second.
+#
+# - Remote (sampler.type=remote, which is also the default) sampler consults
+# Jaeger agent for the appropriate sampling strategy to use in the current
+# service. This allows controlling the sampling strategies in the services
+# from a central configuration in Jaeger backend, or even dynamically.
+#
+sampler:
+ type: ratelimiting
+ param: 10.0
+
+reporter:
+ logSpans: true
+ localAgentHostPort: localhost:6831
diff --git a/addons/ot/test/be/cfg-zipkin.json b/addons/ot/test/be/cfg-zipkin.json
new file mode 100644
index 0000000..f0e30d5
--- /dev/null
+++ b/addons/ot/test/be/cfg-zipkin.json
@@ -0,0 +1,4 @@
+{
+ "service_name": "BE",
+ "collector_host": "localhost"
+}
diff --git a/addons/ot/test/be/haproxy.cfg b/addons/ot/test/be/haproxy.cfg
new file mode 100644
index 0000000..c225a2f
--- /dev/null
+++ b/addons/ot/test/be/haproxy.cfg
@@ -0,0 +1,37 @@
+global
+# nbthread 1
+ maxconn 5000
+ hard-stop-after 10s
+# log localhost:514 local7 debug
+# debug
+ stats socket /tmp/haproxy-be.sock mode 666 level admin
+
+defaults
+ log global
+ mode http
+ option httplog
+ option dontlognull
+ option httpclose
+ retries 3
+ maxconn 4000
+ timeout connect 5000
+ timeout client 50000
+ timeout server 50000
+
+listen stats
+ mode http
+ bind *:8002
+ stats uri /
+ stats admin if TRUE
+ stats refresh 10s
+
+frontend ot-test-be-frontend
+ bind *:11080
+ mode http
+ default_backend servers-backend
+
+ filter opentracing id ot-test-be config be/ot.cfg
+
+backend servers-backend
+ mode http
+ server server-1 127.0.0.1:8000
diff --git a/addons/ot/test/be/ot.cfg b/addons/ot/test/be/ot.cfg
new file mode 100644
index 0000000..edd3f76
--- /dev/null
+++ b/addons/ot/test/be/ot.cfg
@@ -0,0 +1,62 @@
+[ot-test-be]
+ ot-tracer ot-test-tracer
+ config be/cfg-jaeger.yml
+ plugin libjaeger_opentracing_plugin-0.5.0.so
+# log localhost:514 local7 debug
+ option dontlog-normal
+ option hard-errors
+ no option disabled
+
+ scopes frontend_http_request
+ scopes backend_tcp_request
+ scopes backend_http_request
+ scopes client_session_end
+
+ scopes server_session_start
+ scopes tcp_response
+ scopes http_response
+ scopes server_session_end
+
+ ot-scope frontend_http_request
+ extract "ot-ctx" use-headers
+ span "HAProxy session" child-of "ot-ctx" root
+ baggage "haproxy_id" var(sess.ot.uuid)
+ span "Client session" child-of "HAProxy session"
+ span "Frontend HTTP request" child-of "Client session"
+ tag "http.method" method
+ tag "http.url" url
+ tag "http.version" str("HTTP/") req.ver
+ event on-frontend-http-request
+
+ ot-scope backend_tcp_request
+ span "Backend TCP request" follows-from "Frontend HTTP request"
+ finish "Frontend HTTP request"
+ event on-backend-tcp-request
+
+ ot-scope backend_http_request
+ span "Backend HTTP request" follows-from "Backend TCP request"
+ finish "Backend TCP request"
+ event on-backend-http-request
+
+ ot-scope client_session_end
+ finish "Client session"
+ event on-client-session-end
+
+ ot-scope server_session_start
+ span "Server session" child-of "HAProxy session"
+ finish "Backend HTTP request"
+ event on-server-session-start
+
+ ot-scope tcp_response
+ span "TCP response" child-of "Server session"
+ event on-tcp-response
+
+ ot-scope http_response
+ span "HTTP response" follows-from "TCP response"
+ tag "http.status_code" status
+ finish "TCP response"
+ event on-http-response
+
+ ot-scope server_session_end
+ finish *
+ event on-server-session-end
diff --git a/addons/ot/test/cmp/cfg-dd.json b/addons/ot/test/cmp/cfg-dd.json
new file mode 100644
index 0000000..a931f45
--- /dev/null
+++ b/addons/ot/test/cmp/cfg-dd.json
@@ -0,0 +1,5 @@
+{
+ "service": "CMP",
+ "agent_host": "localhost",
+ "agent_port": 8126
+}
diff --git a/addons/ot/test/cmp/cfg-jaeger.yml b/addons/ot/test/cmp/cfg-jaeger.yml
new file mode 100644
index 0000000..78efc2d
--- /dev/null
+++ b/addons/ot/test/cmp/cfg-jaeger.yml
@@ -0,0 +1,34 @@
+service_name:
+ CMP
+
+###
+# When using configuration object to instantiate the tracer, the type of
+# sampling can be selected via sampler.type and sampler.param properties.
+# Jaeger libraries support the following samplers:
+#
+# - Constant (sampler.type=const) sampler always makes the same decision for
+# all traces. It either samples all traces (sampler.param=1) or none of
+# them (sampler.param=0).
+#
+# - Probabilistic (sampler.type=probabilistic) sampler makes a random sampling
+# decision with the probability of sampling equal to the value of
+# sampler.param property. For example, with sampler.param=0.1 approximately
+# 1 in 10 traces will be sampled.
+#
+# - Rate Limiting (sampler.type=ratelimiting) sampler uses a leaky bucket rate
+# limiter to ensure that traces are sampled with a certain constant rate.
+# For example, when sampler.param=2.0 it will sample requests with the rate
+# of 2 traces per second.
+#
+# - Remote (sampler.type=remote, which is also the default) sampler consults
+# Jaeger agent for the appropriate sampling strategy to use in the current
+# service. This allows controlling the sampling strategies in the services
+# from a central configuration in Jaeger backend, or even dynamically.
+#
+sampler:
+ type: ratelimiting
+ param: 10.0
+
+reporter:
+ logSpans: true
+ localAgentHostPort: localhost:6831
diff --git a/addons/ot/test/cmp/cfg-zipkin.json b/addons/ot/test/cmp/cfg-zipkin.json
new file mode 100644
index 0000000..7e9d3dd
--- /dev/null
+++ b/addons/ot/test/cmp/cfg-zipkin.json
@@ -0,0 +1,4 @@
+{
+ "service_name": "CMP",
+ "collector_host": "localhost"
+}
diff --git a/addons/ot/test/cmp/haproxy.cfg b/addons/ot/test/cmp/haproxy.cfg
new file mode 100644
index 0000000..9d22725
--- /dev/null
+++ b/addons/ot/test/cmp/haproxy.cfg
@@ -0,0 +1,36 @@
+global
+# nbthread 1
+ maxconn 5000
+ hard-stop-after 10s
+ stats socket /tmp/haproxy.sock mode 666 level admin
+
+defaults
+ log global
+ mode http
+ option httplog
+ option dontlognull
+ option httpclose
+ retries 3
+ maxconn 4000
+ timeout connect 5000
+ timeout client 50000
+ timeout server 50000
+
+listen stats
+ mode http
+ bind *:8001
+ stats uri /
+ stats admin if TRUE
+ stats refresh 10s
+
+frontend ot-test-cmp-frontend
+ bind *:10080
+ mode http
+ default_backend servers-backend
+
+ acl acl-http-status-ok status 100:399
+ filter opentracing id ot-test-cmp config cmp/ot.cfg
+
+backend servers-backend
+ mode http
+ server server-1 127.0.0.1:8000
diff --git a/addons/ot/test/cmp/ot.cfg b/addons/ot/test/cmp/ot.cfg
new file mode 100644
index 0000000..21b15dd
--- /dev/null
+++ b/addons/ot/test/cmp/ot.cfg
@@ -0,0 +1,83 @@
+[ot-test-cmp]
+ ot-tracer ot-test-tracer
+ config cmp/cfg-jaeger.yml
+ plugin libjaeger_opentracing_plugin-0.5.0.so
+# log localhost:514 local7 debug
+ option dontlog-normal
+ option hard-errors
+ no option disabled
+ rate-limit 100.0
+
+ scopes client_session_start
+ scopes frontend_tcp_request
+ scopes frontend_http_request
+ scopes backend_tcp_request
+ scopes backend_http_request
+ scopes server_unavailable
+
+ scopes server_session_start
+ scopes tcp_response
+ scopes http_response http_response-error server_session_end client_session_end
+
+ ot-scope client_session_start
+ span "HAProxy session" root
+ baggage "haproxy_id" var(sess.ot.uuid)
+ span "Client session" child-of "HAProxy session"
+ event on-client-session-start
+
+ ot-scope frontend_tcp_request
+ span "Frontend TCP request" child-of "Client session"
+ event on-frontend-tcp-request
+
+ ot-scope frontend_http_request
+ span "Frontend HTTP request" follows-from "Frontend TCP request"
+ tag "http.method" method
+ tag "http.url" url
+ tag "http.version" str("HTTP/") req.ver
+ finish "Frontend TCP request"
+ event on-frontend-http-request
+
+ ot-scope backend_tcp_request
+ span "Backend TCP request" follows-from "Frontend HTTP request"
+ finish "Frontend HTTP request"
+ event on-backend-tcp-request
+
+ ot-scope backend_http_request
+ span "Backend HTTP request" follows-from "Backend TCP request"
+ finish "Backend TCP request"
+ event on-backend-http-request
+
+ ot-scope server_unavailable
+ span "HAProxy session"
+ tag "error" bool(true)
+ log "status" str("503 Service Unavailable")
+ finish *
+ event on-server-unavailable
+
+ ot-scope server_session_start
+ span "Server session" child-of "HAProxy session"
+ finish "Backend HTTP request"
+ event on-server-session-start
+
+ ot-scope tcp_response
+ span "TCP response" child-of "Server session"
+ event on-tcp-response
+
+ ot-scope http_response
+ span "HTTP response" follows-from "TCP response"
+ tag "http.status_code" status
+ finish "TCP response"
+ event on-http-response
+
+ ot-scope http_response-error
+ span "HTTP response"
+ tag "error" bool(true)
+ event on-http-response if !acl-http-status-ok
+
+ ot-scope server_session_end
+ finish "HTTP response" "Server session"
+ event on-http-response
+
+ ot-scope client_session_end
+ finish "*"
+ event on-http-response
diff --git a/addons/ot/test/ctx/cfg-dd.json b/addons/ot/test/ctx/cfg-dd.json
new file mode 100644
index 0000000..f68d97a
--- /dev/null
+++ b/addons/ot/test/ctx/cfg-dd.json
@@ -0,0 +1,5 @@
+{
+ "service": "CTX",
+ "agent_host": "localhost",
+ "agent_port": 8126
+}
diff --git a/addons/ot/test/ctx/cfg-jaeger.yml b/addons/ot/test/ctx/cfg-jaeger.yml
new file mode 100644
index 0000000..659724a
--- /dev/null
+++ b/addons/ot/test/ctx/cfg-jaeger.yml
@@ -0,0 +1,34 @@
+service_name:
+ CTX
+
+###
+# When using configuration object to instantiate the tracer, the type of
+# sampling can be selected via sampler.type and sampler.param properties.
+# Jaeger libraries support the following samplers:
+#
+# - Constant (sampler.type=const) sampler always makes the same decision for
+# all traces. It either samples all traces (sampler.param=1) or none of
+# them (sampler.param=0).
+#
+# - Probabilistic (sampler.type=probabilistic) sampler makes a random sampling
+# decision with the probability of sampling equal to the value of
+# sampler.param property. For example, with sampler.param=0.1 approximately
+# 1 in 10 traces will be sampled.
+#
+# - Rate Limiting (sampler.type=ratelimiting) sampler uses a leaky bucket rate
+# limiter to ensure that traces are sampled with a certain constant rate.
+# For example, when sampler.param=2.0 it will sample requests with the rate
+# of 2 traces per second.
+#
+# - Remote (sampler.type=remote, which is also the default) sampler consults
+# Jaeger agent for the appropriate sampling strategy to use in the current
+# service. This allows controlling the sampling strategies in the services
+# from a central configuration in Jaeger backend, or even dynamically.
+#
+sampler:
+ type: ratelimiting
+ param: 10.0
+
+reporter:
+ logSpans: true
+ localAgentHostPort: localhost:6831
diff --git a/addons/ot/test/ctx/cfg-zipkin.json b/addons/ot/test/ctx/cfg-zipkin.json
new file mode 100644
index 0000000..3a3a257
--- /dev/null
+++ b/addons/ot/test/ctx/cfg-zipkin.json
@@ -0,0 +1,4 @@
+{
+ "service_name": "CTX",
+ "collector_host": "localhost"
+}
diff --git a/addons/ot/test/ctx/haproxy.cfg b/addons/ot/test/ctx/haproxy.cfg
new file mode 100644
index 0000000..d240a99
--- /dev/null
+++ b/addons/ot/test/ctx/haproxy.cfg
@@ -0,0 +1,38 @@
+global
+# nbthread 1
+ maxconn 5000
+ hard-stop-after 10s
+ stats socket /tmp/haproxy.sock mode 666 level admin
+
+defaults
+ log global
+ mode http
+ option httplog
+ option dontlognull
+ option httpclose
+ retries 3
+ maxconn 4000
+ timeout connect 5000
+ timeout client 50000
+ timeout server 50000
+
+listen stats
+ mode http
+ bind *:8001
+ stats uri /
+ stats admin if TRUE
+ stats refresh 10s
+
+frontend ot-test-ctx-frontend
+ bind *:10080
+ mode http
+ default_backend servers-backend
+
+ acl acl-http-status-ok status 100:399
+ filter opentracing id ot-test-ctx config ctx/ot.cfg
+ http-response ot-group ot-test-ctx http_response_group if acl-http-status-ok
+ http-after-response ot-group ot-test-ctx http_after_response_group if !acl-http-status-ok
+
+backend servers-backend
+ mode http
+ server server-1 127.0.0.1:8000
diff --git a/addons/ot/test/ctx/ot.cfg b/addons/ot/test/ctx/ot.cfg
new file mode 100644
index 0000000..a06a4e0
--- /dev/null
+++ b/addons/ot/test/ctx/ot.cfg
@@ -0,0 +1,197 @@
+[ot-test-ctx]
+ ot-tracer ot-test-tracer
+ log localhost:514 local7 debug
+ config ctx/cfg-jaeger.yml
+ plugin libjaeger_opentracing_plugin-0.5.0.so
+ option dontlog-normal
+ option hard-errors
+ no option disabled
+ rate-limit 100.0
+
+ groups http_response_group
+ groups http_after_response_group
+
+ scopes client_session_start_1
+ scopes client_session_start_2
+ scopes frontend_tcp_request
+ scopes http_wait_request
+ scopes http_body_request
+ scopes frontend_http_request
+ scopes switching_rules_request
+ scopes backend_tcp_request
+ scopes backend_http_request
+ scopes process_server_rules_request
+ scopes http_process_request
+ scopes tcp_rdp_cookie_request
+ scopes process_sticking_rules_request
+ scopes client_session_end
+ scopes server_unavailable
+
+ scopes server_session_start
+ scopes tcp_response
+ scopes http_wait_response
+ scopes process_store_rules_response
+ scopes http_response http_response-error
+ scopes server_session_end
+
+ ot-group http_response_group
+ scopes http_response_1
+ scopes http_response_2
+
+ ot-scope http_response_1
+ span "HTTP response"
+ log "hdr.content" res.hdr("content-type") str("; length: ") res.hdr("content-length") str(" bytes")
+
+ ot-scope http_response_2
+ span "HTTP response"
+ log "hdr.date" res.hdr("date") str(" / ") res.hdr("last-modified")
+
+ ot-group http_after_response_group
+ scopes http_after_response
+
+ ot-scope http_after_response
+ span "HAProxy response" child-of "HAProxy session"
+ tag "error" bool(true)
+ tag "http.status_code" status
+
+ ot-scope client_session_start_1
+ span "HAProxy session" root
+ inject "ot_ctx_1" use-headers use-vars
+ baggage "haproxy_id" var(sess.ot.uuid)
+ event on-client-session-start
+
+ ot-scope client_session_start_2
+ extract "ot_ctx_1" use-vars
+ span "Client session" child-of "ot_ctx_1"
+ inject "ot_ctx_2" use-headers use-vars
+ event on-client-session-start
+
+ ot-scope frontend_tcp_request
+ extract "ot_ctx_2" use-vars
+ span "Frontend TCP request" child-of "ot_ctx_2"
+ inject "ot_ctx_3" use-headers use-vars
+ event on-frontend-tcp-request
+
+ ot-scope http_wait_request
+ extract "ot_ctx_3" use-vars
+ span "HTTP wait request" follows-from "ot_ctx_3"
+ inject "ot_ctx_4" use-headers use-vars
+ finish "Frontend TCP request" "ot_ctx_3"
+ event on-http-wait-request
+
+ ot-scope http_body_request
+ extract "ot_ctx_4" use-vars
+ span "HTTP body request" follows-from "ot_ctx_4"
+ inject "ot_ctx_5" use-headers use-vars
+ finish "HTTP wait request" "ot_ctx_4"
+ event on-http-body-request
+
+ ot-scope frontend_http_request
+ extract "ot_ctx_5" use-vars
+ span "Frontend HTTP request" follows-from "ot_ctx_5"
+ tag "http.method" method
+ tag "http.url" url
+ tag "http.version" str("HTTP/") req.ver
+ inject "ot_ctx_6" use-headers use-vars
+ finish "HTTP body request" "ot_ctx_5"
+ event on-frontend-http-request
+
+ ot-scope switching_rules_request
+ extract "ot_ctx_6" use-vars
+ span "Switching rules request" follows-from "ot_ctx_6"
+ inject "ot_ctx_7" use-headers use-vars
+ finish "Frontend HTTP request" "ot_ctx_6"
+ event on-switching-rules-request
+
+ ot-scope backend_tcp_request
+ extract "ot_ctx_7" use-vars
+ span "Backend TCP request" follows-from "ot_ctx_7"
+ inject "ot_ctx_8" use-headers use-vars
+ finish "Switching rules request" "ot_ctx_7"
+ event on-backend-tcp-request
+
+ ot-scope backend_http_request
+ extract "ot_ctx_8" use-vars
+ span "Backend HTTP request" follows-from "ot_ctx_8"
+ inject "ot_ctx_9" use-headers use-vars
+ finish "Backend TCP request" "ot_ctx_8"
+ event on-backend-http-request
+
+ ot-scope process_server_rules_request
+ extract "ot_ctx_9" use-vars
+ span "Process server rules request" follows-from "ot_ctx_9"
+ inject "ot_ctx_10" use-headers use-vars
+ finish "Backend HTTP request" "ot_ctx_9"
+ event on-process-server-rules-request
+
+ ot-scope http_process_request
+ extract "ot_ctx_10" use-vars
+ span "HTTP process request" follows-from "ot_ctx_10"
+ inject "ot_ctx_11" use-headers use-vars
+ finish "Process server rules request" "ot_ctx_10"
+ event on-http-process-request
+
+ ot-scope tcp_rdp_cookie_request
+ extract "ot_ctx_11" use-vars
+ span "TCP RDP cookie request" follows-from "ot_ctx_11"
+ inject "ot_ctx_12" use-headers use-vars
+ finish "HTTP process request" "ot_ctx_11"
+ event on-tcp-rdp-cookie-request
+
+ ot-scope process_sticking_rules_request
+ extract "ot_ctx_12" use-vars
+ span "Process sticking rules request" follows-from "ot_ctx_12"
+ inject "ot_ctx_13" use-headers use-vars
+ finish "TCP RDP cookie request" "ot_ctx_12"
+ event on-process-sticking-rules-request
+
+ ot-scope client_session_end
+ finish "Client session" "ot_ctx_2"
+ event on-client-session-end
+
+ ot-scope server_unavailable
+ finish *
+ event on-server-unavailable
+
+ ot-scope server_session_start
+ span "Server session" child-of "ot_ctx_1"
+ inject "ot_ctx_14" use-vars
+ extract "ot_ctx_13" use-vars
+ finish "Process sticking rules request" "ot_ctx_13"
+ event on-server-session-start
+
+ ot-scope tcp_response
+ extract "ot_ctx_14" use-vars
+ span "TCP response" child-of "ot_ctx_14"
+ inject "ot_ctx_15" use-vars
+ event on-tcp-response
+
+ ot-scope http_wait_response
+ extract "ot_ctx_15" use-vars
+ span "HTTP wait response" follows-from "ot_ctx_15"
+ inject "ot_ctx_16" use-headers use-vars
+ finish "TCP response" "ot_ctx_15"
+ event on-http-wait-response
+
+ ot-scope process_store_rules_response
+ extract "ot_ctx_16" use-vars
+ span "Process store rules response" follows-from "ot_ctx_16"
+ inject "ot_ctx_17" use-headers use-vars
+ finish "HTTP wait response" "ot_ctx_16"
+ event on-process-store-rules-response
+
+ ot-scope http_response
+ extract "ot_ctx_17" use-vars
+ span "HTTP response" follows-from "ot_ctx_17"
+ tag "http.status_code" status
+ finish "Process store rules response" "ot_ctx_17"
+ event on-http-response
+
+ ot-scope http_response-error
+ span "HTTP response"
+ tag "error" bool(true)
+ event on-http-response if !acl-http-status-ok
+
+ ot-scope server_session_end
+ finish *
+ event on-server-session-end
diff --git a/addons/ot/test/empty/cfg-dd.json b/addons/ot/test/empty/cfg-dd.json
new file mode 100644
index 0000000..38b65f1
--- /dev/null
+++ b/addons/ot/test/empty/cfg-dd.json
@@ -0,0 +1,5 @@
+{
+ "service": "EMPTY",
+ "agent_host": "localhost",
+ "agent_port": 8126
+}
diff --git a/addons/ot/test/empty/cfg-jaeger.yml b/addons/ot/test/empty/cfg-jaeger.yml
new file mode 100644
index 0000000..08fadd8
--- /dev/null
+++ b/addons/ot/test/empty/cfg-jaeger.yml
@@ -0,0 +1,34 @@
+service_name:
+ EMPTY
+
+###
+# When using configuration object to instantiate the tracer, the type of
+# sampling can be selected via sampler.type and sampler.param properties.
+# Jaeger libraries support the following samplers:
+#
+# - Constant (sampler.type=const) sampler always makes the same decision for
+# all traces. It either samples all traces (sampler.param=1) or none of
+# them (sampler.param=0).
+#
+# - Probabilistic (sampler.type=probabilistic) sampler makes a random sampling
+# decision with the probability of sampling equal to the value of
+# sampler.param property. For example, with sampler.param=0.1 approximately
+# 1 in 10 traces will be sampled.
+#
+# - Rate Limiting (sampler.type=ratelimiting) sampler uses a leaky bucket rate
+# limiter to ensure that traces are sampled with a certain constant rate.
+# For example, when sampler.param=2.0 it will sample requests with the rate
+# of 2 traces per second.
+#
+# - Remote (sampler.type=remote, which is also the default) sampler consults
+# Jaeger agent for the appropriate sampling strategy to use in the current
+# service. This allows controlling the sampling strategies in the services
+# from a central configuration in Jaeger backend, or even dynamically.
+#
+sampler:
+ type: ratelimiting
+ param: 10.0
+
+reporter:
+ logSpans: true
+ localAgentHostPort: localhost:6831
diff --git a/addons/ot/test/empty/cfg-zipkin.json b/addons/ot/test/empty/cfg-zipkin.json
new file mode 100644
index 0000000..55fde9f
--- /dev/null
+++ b/addons/ot/test/empty/cfg-zipkin.json
@@ -0,0 +1,4 @@
+{
+ "service_name": "EMPTY",
+ "collector_host": "localhost"
+}
diff --git a/addons/ot/test/empty/haproxy.cfg b/addons/ot/test/empty/haproxy.cfg
new file mode 100644
index 0000000..9d40db9
--- /dev/null
+++ b/addons/ot/test/empty/haproxy.cfg
@@ -0,0 +1,30 @@
+global
+ stats socket /tmp/haproxy.sock mode 666 level admin
+
+defaults
+ log global
+ mode http
+ option httplog
+ option dontlognull
+ option httpclose
+ timeout connect 5000
+ timeout client 50000
+ timeout server 50000
+
+listen stats
+ mode http
+ bind *:8001
+ stats uri /
+ stats admin if TRUE
+ stats refresh 10s
+
+frontend ot-test-empty
+ bind *:10080
+ mode http
+ default_backend servers-backend
+
+ filter opentracing id ot-test-empty config empty/ot.cfg
+
+backend servers-backend
+ mode http
+ server server-1 127.0.0.1:8000
diff --git a/addons/ot/test/empty/ot.cfg b/addons/ot/test/empty/ot.cfg
new file mode 100644
index 0000000..961c8bc
--- /dev/null
+++ b/addons/ot/test/empty/ot.cfg
@@ -0,0 +1,3 @@
+ot-tracer ot-test-tracer
+ config empty/cfg-jaeger.yml
+ plugin libjaeger_opentracing_plugin-0.5.0.so
diff --git a/addons/ot/test/fe/cfg-dd.json b/addons/ot/test/fe/cfg-dd.json
new file mode 100644
index 0000000..84afe56
--- /dev/null
+++ b/addons/ot/test/fe/cfg-dd.json
@@ -0,0 +1,5 @@
+{
+ "service": "FE",
+ "agent_host": "localhost",
+ "agent_port": 8126
+}
diff --git a/addons/ot/test/fe/cfg-jaeger.yml b/addons/ot/test/fe/cfg-jaeger.yml
new file mode 100644
index 0000000..1365efa
--- /dev/null
+++ b/addons/ot/test/fe/cfg-jaeger.yml
@@ -0,0 +1,34 @@
+service_name:
+ FE
+
+###
+# When using configuration object to instantiate the tracer, the type of
+# sampling can be selected via sampler.type and sampler.param properties.
+# Jaeger libraries support the following samplers:
+#
+# - Constant (sampler.type=const) sampler always makes the same decision for
+# all traces. It either samples all traces (sampler.param=1) or none of
+# them (sampler.param=0).
+#
+# - Probabilistic (sampler.type=probabilistic) sampler makes a random sampling
+# decision with the probability of sampling equal to the value of
+# sampler.param property. For example, with sampler.param=0.1 approximately
+# 1 in 10 traces will be sampled.
+#
+# - Rate Limiting (sampler.type=ratelimiting) sampler uses a leaky bucket rate
+# limiter to ensure that traces are sampled with a certain constant rate.
+# For example, when sampler.param=2.0 it will sample requests with the rate
+# of 2 traces per second.
+#
+# - Remote (sampler.type=remote, which is also the default) sampler consults
+# Jaeger agent for the appropriate sampling strategy to use in the current
+# service. This allows controlling the sampling strategies in the services
+# from a central configuration in Jaeger backend, or even dynamically.
+#
+sampler:
+ type: ratelimiting
+ param: 10.0
+
+reporter:
+ logSpans: true
+ localAgentHostPort: localhost:6831
diff --git a/addons/ot/test/fe/cfg-zipkin.json b/addons/ot/test/fe/cfg-zipkin.json
new file mode 100644
index 0000000..1546b10
--- /dev/null
+++ b/addons/ot/test/fe/cfg-zipkin.json
@@ -0,0 +1,4 @@
+{
+ "service_name": "FE",
+ "collector_host": "localhost"
+}
diff --git a/addons/ot/test/fe/haproxy.cfg b/addons/ot/test/fe/haproxy.cfg
new file mode 100644
index 0000000..bfc0ec9
--- /dev/null
+++ b/addons/ot/test/fe/haproxy.cfg
@@ -0,0 +1,37 @@
+global
+# nbthread 1
+ maxconn 5000
+ hard-stop-after 10s
+# log localhost:514 local7 debug
+# debug
+ stats socket /tmp/haproxy-fe.sock mode 666 level admin
+
+defaults
+ log global
+ mode http
+ option httplog
+ option dontlognull
+ option httpclose
+ retries 3
+ maxconn 4000
+ timeout connect 5000
+ timeout client 50000
+ timeout server 50000
+
+listen stats
+ mode http
+ bind *:8001
+ stats uri /
+ stats admin if TRUE
+ stats refresh 10s
+
+frontend ot-test-fe-frontend
+ bind *:10080
+ mode http
+ default_backend servers-backend
+
+ filter opentracing id ot-test-fe config fe/ot.cfg
+
+backend servers-backend
+ mode http
+ server server-1 127.0.0.1:11080
diff --git a/addons/ot/test/fe/ot.cfg b/addons/ot/test/fe/ot.cfg
new file mode 100644
index 0000000..11de828
--- /dev/null
+++ b/addons/ot/test/fe/ot.cfg
@@ -0,0 +1,74 @@
+[ot-test-fe]
+ ot-tracer ot-test-tracer
+ config fe/cfg-jaeger.yml
+ plugin libjaeger_opentracing_plugin-0.5.0.so
+# log localhost:514 local7 debug
+ option dontlog-normal
+ option hard-errors
+ no option disabled
+ rate-limit 100.0
+
+ scopes client_session_start
+ scopes frontend_tcp_request
+ scopes frontend_http_request
+ scopes backend_tcp_request
+ scopes backend_http_request
+ scopes client_session_end
+
+ scopes server_session_start
+ scopes tcp_response
+ scopes http_response
+ scopes server_session_end
+
+ ot-scope client_session_start
+ span "HAProxy session" root
+ baggage "haproxy_id" var(sess.ot.uuid)
+ span "Client session" child-of "HAProxy session"
+ event on-client-session-start
+
+ ot-scope frontend_tcp_request
+ span "Frontend TCP request" child-of "Client session"
+ event on-frontend-tcp-request
+
+ ot-scope frontend_http_request
+ span "Frontend HTTP request" follows-from "Frontend TCP request"
+ tag "http.method" method
+ tag "http.url" url
+ tag "http.version" str("HTTP/") req.ver
+ finish "Frontend TCP request"
+ event on-frontend-http-request
+
+ ot-scope backend_tcp_request
+ span "Backend TCP request" follows-from "Frontend HTTP request"
+ finish "Frontend HTTP request"
+ event on-backend-tcp-request
+
+ ot-scope backend_http_request
+ span "Backend HTTP request" follows-from "Backend TCP request"
+ finish "Backend TCP request"
+ span "HAProxy session"
+ inject "ot-ctx" use-headers
+ event on-backend-http-request
+
+ ot-scope client_session_end
+ finish "Client session"
+ event on-client-session-end
+
+ ot-scope server_session_start
+ span "Server session" child-of "HAProxy session"
+ finish "Backend HTTP request"
+ event on-server-session-start
+
+ ot-scope tcp_response
+ span "TCP response" child-of "Server session"
+ event on-tcp-response
+
+ ot-scope http_response
+ span "HTTP response" follows-from "TCP response"
+ tag "http.status_code" status
+ finish "TCP response"
+ event on-http-response
+
+ ot-scope server_session_end
+ finish *
+ event on-server-session-end
diff --git a/addons/ot/test/func-stat.sh b/addons/ot/test/func-stat.sh
new file mode 100755
index 0000000..cf5bd9e
--- /dev/null
+++ b/addons/ot/test/func-stat.sh
@@ -0,0 +1,5 @@
+#!/bin/sh
+#
+test ${#} -lt 1 && exit 1
+
+awk '/ {$/ { sub(/\(.*/, "", $5); print $5 }' "${@}" | sort | uniq -c
diff --git a/addons/ot/test/get-opentracing-plugins.sh b/addons/ot/test/get-opentracing-plugins.sh
new file mode 100755
index 0000000..f2fe2d6
--- /dev/null
+++ b/addons/ot/test/get-opentracing-plugins.sh
@@ -0,0 +1,45 @@
+#!/bin/sh
+#
+_ARG_DIR="${1:-.}"
+
+
+get ()
+{
+ local _arg_tracer="${1}"
+ local _arg_version="${2}"
+ local _arg_url="${3}"
+ local _arg_file="${4}"
+ local _var_tmpfile="_tmpfile_"
+ local _var_plugin="lib${_arg_tracer}_opentracing_plugin-${_arg_version}.so"
+
+ test -e "${_var_plugin}" && return 0
+
+ wget "https://github.com/${_arg_url}/releases/download/v${_arg_version}/${_arg_file}" -O "${_var_tmpfile}" || {
+ rm "${_var_tmpfile}"
+ return 1
+ }
+
+ case "$(file ${_var_tmpfile})" in
+ *shared\ object*)
+ mv "${_var_tmpfile}" "${_var_plugin}" ;;
+
+ *gzip\ compressed\ data*)
+ gzip -cd "${_var_tmpfile}" > "${_var_plugin}"
+ rm "${_var_tmpfile}" ;;
+ esac
+}
+
+
+mkdir -p "${_ARG_DIR}" && cd "${_ARG_DIR}" || exit 1
+
+get dd 1.1.2 DataDog/dd-opentracing-cpp linux-amd64-libdd_opentracing_plugin.so.gz
+get dd 1.2.0 DataDog/dd-opentracing-cpp linux-amd64-libdd_opentracing_plugin.so.gz
+
+get jaeger 0.4.2 jaegertracing/jaeger-client-cpp libjaegertracing_plugin.linux_amd64.so
+#et jaeger 0.5.0 jaegertracing/jaeger-client-cpp libjaegertracing_plugin.linux_amd64.so
+#et jaeger 0.6.0 jaegertracing/jaeger-client-cpp libjaegertracing_plugin.linux_amd64.so
+
+get lightstep 0.12.0 lightstep/lightstep-tracer-cpp linux-amd64-liblightstep_tracer_plugin.so.gz
+get lightstep 0.13.0 lightstep/lightstep-tracer-cpp linux-amd64-liblightstep_tracer_plugin.so.gz
+
+get zipkin 0.5.2 rnburn/zipkin-cpp-opentracing linux-amd64-libzipkin_opentracing_plugin.so.gz
diff --git a/addons/ot/test/index.html b/addons/ot/test/index.html
new file mode 100644
index 0000000..09ed6fa
--- /dev/null
+++ b/addons/ot/test/index.html
@@ -0,0 +1 @@
+<html><body><p>Did I err?</p></body></html>
diff --git a/addons/ot/test/run-cmp.sh b/addons/ot/test/run-cmp.sh
new file mode 100755
index 0000000..8e678b7
--- /dev/null
+++ b/addons/ot/test/run-cmp.sh
@@ -0,0 +1,13 @@
+#!/bin/sh
+#
+_ARG_HAPROXY="${1:-$(realpath -L ${PWD}/../../../haproxy)}"
+ _ARGS="-f cmp/haproxy.cfg"
+ _LOG_DIR="_logs"
+ _LOG="${_LOG_DIR}/_log-$(basename "${0}" .sh)-$(date +%s)"
+
+
+test -x "${_ARG_HAPROXY}" || exit 1
+mkdir -p "${_LOG_DIR}" || exit 2
+
+echo "executing: ${_ARG_HAPROXY} ${_ARGS} > ${_LOG}"
+"${_ARG_HAPROXY}" ${_ARGS} >"${_LOG}" 2>&1
diff --git a/addons/ot/test/run-ctx.sh b/addons/ot/test/run-ctx.sh
new file mode 100755
index 0000000..bfac617
--- /dev/null
+++ b/addons/ot/test/run-ctx.sh
@@ -0,0 +1,13 @@
+#!/bin/sh
+#
+_ARG_HAPROXY="${1:-$(realpath -L ${PWD}/../../../haproxy)}"
+ _ARGS="-f ctx/haproxy.cfg"
+ _LOG_DIR="_logs"
+ _LOG="${_LOG_DIR}/_log-$(basename "${0}" .sh)-$(date +%s)"
+
+
+test -x "${_ARG_HAPROXY}" || exit 1
+mkdir -p "${_LOG_DIR}" || exit 2
+
+echo "executing: ${_ARG_HAPROXY} ${_ARGS} > ${_LOG}"
+"${_ARG_HAPROXY}" ${_ARGS} >"${_LOG}" 2>&1
diff --git a/addons/ot/test/run-fe-be.sh b/addons/ot/test/run-fe-be.sh
new file mode 100755
index 0000000..68b250c
--- /dev/null
+++ b/addons/ot/test/run-fe-be.sh
@@ -0,0 +1,47 @@
+#!/bin/sh
+#
+_ARG_HAPROXY="${1:-$(realpath -L ${PWD}/../../../haproxy)}"
+ _ARGS_FE="-f fe/haproxy.cfg"
+ _ARGS_BE="-f be/haproxy.cfg"
+ _TIME="$(date +%s)"
+ _LOG_DIR="_logs"
+ _LOG_FE="${_LOG_DIR}/_log-$(basename "${0}" fe-be.sh)fe-${_TIME}"
+ _LOG_BE="${_LOG_DIR}/_log-$(basename "${0}" fe-be.sh)be-${_TIME}"
+
+
+__exit ()
+{
+ test -z "${2}" && {
+ echo
+ echo "Script killed!"
+
+ echo "Waiting for jobs to complete..."
+ pkill --signal SIGUSR1 haproxy
+ wait
+ }
+
+ test -n "${1}" && {
+ echo
+ echo "${1}"
+ echo
+ }
+
+ exit ${2:-100}
+}
+
+
+trap __exit INT TERM
+
+test -x "${_ARG_HAPROXY}" || __exit "${_ARG_HAPROXY}: executable does not exist" 1
+mkdir -p "${_LOG_DIR}" || __exit "${_ARG_HAPROXY}: cannot create log directory" 2
+
+echo "\n------------------------------------------------------------------------"
+echo "--- executing: ${_ARG_HAPROXY} ${_ARGS_BE} > ${_LOG_BE}"
+"${_ARG_HAPROXY}" ${_ARGS_BE} >"${_LOG_BE}" 2>&1 &
+
+echo "--- executing: ${_ARG_HAPROXY} ${_ARGS_FE} > ${_LOG_FE}"
+"${_ARG_HAPROXY}" ${_ARGS_FE} >"${_LOG_FE}" 2>&1 &
+echo "------------------------------------------------------------------------\n"
+
+echo "Press CTRL-C to quit..."
+wait
diff --git a/addons/ot/test/run-sa.sh b/addons/ot/test/run-sa.sh
new file mode 100755
index 0000000..04a303a
--- /dev/null
+++ b/addons/ot/test/run-sa.sh
@@ -0,0 +1,13 @@
+#!/bin/sh
+#
+_ARG_HAPROXY="${1:-$(realpath -L ${PWD}/../../../haproxy)}"
+ _ARGS="-f sa/haproxy.cfg"
+ _LOG_DIR="_logs"
+ _LOG="${_LOG_DIR}/_log-$(basename "${0}" .sh)-$(date +%s)"
+
+
+test -x "${_ARG_HAPROXY}" || exit 1
+mkdir -p "${_LOG_DIR}" || exit 2
+
+echo "executing: ${_ARG_HAPROXY} ${_ARGS} > ${_LOG}"
+"${_ARG_HAPROXY}" ${_ARGS} >"${_LOG}" 2>&1
diff --git a/addons/ot/test/sa/cfg-dd.json b/addons/ot/test/sa/cfg-dd.json
new file mode 100644
index 0000000..0c476f7
--- /dev/null
+++ b/addons/ot/test/sa/cfg-dd.json
@@ -0,0 +1,5 @@
+{
+ "service": "SA",
+ "agent_host": "localhost",
+ "agent_port": 8126
+}
diff --git a/addons/ot/test/sa/cfg-jaeger.yml b/addons/ot/test/sa/cfg-jaeger.yml
new file mode 100644
index 0000000..e14f91e
--- /dev/null
+++ b/addons/ot/test/sa/cfg-jaeger.yml
@@ -0,0 +1,34 @@
+service_name:
+ SA
+
+###
+# When using configuration object to instantiate the tracer, the type of
+# sampling can be selected via sampler.type and sampler.param properties.
+# Jaeger libraries support the following samplers:
+#
+# - Constant (sampler.type=const) sampler always makes the same decision for
+# all traces. It either samples all traces (sampler.param=1) or none of
+# them (sampler.param=0).
+#
+# - Probabilistic (sampler.type=probabilistic) sampler makes a random sampling
+# decision with the probability of sampling equal to the value of
+# sampler.param property. For example, with sampler.param=0.1 approximately
+# 1 in 10 traces will be sampled.
+#
+# - Rate Limiting (sampler.type=ratelimiting) sampler uses a leaky bucket rate
+# limiter to ensure that traces are sampled with a certain constant rate.
+# For example, when sampler.param=2.0 it will sample requests with the rate
+# of 2 traces per second.
+#
+# - Remote (sampler.type=remote, which is also the default) sampler consults
+# Jaeger agent for the appropriate sampling strategy to use in the current
+# service. This allows controlling the sampling strategies in the services
+# from a central configuration in Jaeger backend, or even dynamically.
+#
+sampler:
+ type: ratelimiting
+ param: 10.0
+
+reporter:
+ logSpans: true
+ localAgentHostPort: localhost:6831
diff --git a/addons/ot/test/sa/cfg-zipkin.json b/addons/ot/test/sa/cfg-zipkin.json
new file mode 100644
index 0000000..9d155ba
--- /dev/null
+++ b/addons/ot/test/sa/cfg-zipkin.json
@@ -0,0 +1,4 @@
+{
+ "service_name": "SA",
+ "collector_host": "localhost"
+}
diff --git a/addons/ot/test/sa/haproxy.cfg b/addons/ot/test/sa/haproxy.cfg
new file mode 100644
index 0000000..988e3ab
--- /dev/null
+++ b/addons/ot/test/sa/haproxy.cfg
@@ -0,0 +1,40 @@
+global
+# nbthread 1
+ maxconn 5000
+ hard-stop-after 10s
+# log localhost:514 local7 debug
+# debug
+ stats socket /tmp/haproxy.sock mode 666 level admin
+
+defaults
+ log global
+ mode http
+ option httplog
+ option dontlognull
+ option httpclose
+ retries 3
+ maxconn 4000
+ timeout connect 5000
+ timeout client 50000
+ timeout server 50000
+
+listen stats
+ mode http
+ bind *:8001
+ stats uri /
+ stats admin if TRUE
+ stats refresh 10s
+
+frontend ot-test-sa-frontend
+ bind *:10080
+ mode http
+ default_backend servers-backend
+
+ acl acl-http-status-ok status 100:399
+ filter opentracing id ot-test-sa config sa/ot.cfg
+ http-response ot-group ot-test-sa http_response_group if acl-http-status-ok
+ http-after-response ot-group ot-test-sa http_after_response_group if !acl-http-status-ok
+
+backend servers-backend
+ mode http
+ server server-1 127.0.0.1:8000
diff --git a/addons/ot/test/sa/ot.cfg b/addons/ot/test/sa/ot.cfg
new file mode 100644
index 0000000..ae7413b
--- /dev/null
+++ b/addons/ot/test/sa/ot.cfg
@@ -0,0 +1,160 @@
+[ot-test-sa]
+ ot-tracer ot-test-tracer
+ log localhost:514 local7 debug
+ config sa/cfg-jaeger.yml
+ plugin libjaeger_opentracing_plugin-0.5.0.so
+ option dontlog-normal
+ option hard-errors
+ no option disabled
+ rate-limit 100.0
+
+ groups http_response_group
+ groups http_after_response_group
+
+ scopes client_session_start
+ scopes frontend_tcp_request
+ scopes http_wait_request
+ scopes http_body_request
+ scopes frontend_http_request
+ scopes switching_rules_request
+ scopes backend_tcp_request
+ scopes backend_http_request
+ scopes process_server_rules_request
+ scopes http_process_request
+ scopes tcp_rdp_cookie_request
+ scopes process_sticking_rules_request
+ scopes client_session_end
+ scopes server_unavailable
+
+ scopes server_session_start
+ scopes tcp_response
+ scopes http_wait_response
+ scopes process_store_rules_response
+ scopes http_response http_response-error
+ scopes server_session_end
+
+ ot-group http_response_group
+ scopes http_response_1
+ scopes http_response_2
+
+ ot-scope http_response_1
+ span "HTTP response"
+ log "hdr.content" res.hdr("content-type") str("; length: ") res.hdr("content-length") str(" bytes")
+
+ ot-scope http_response_2
+ span "HTTP response"
+ log "hdr.date" res.hdr("date") str(" / ") res.hdr("last-modified")
+
+ ot-group http_after_response_group
+ scopes http_after_response
+
+ ot-scope http_after_response
+ span "HAProxy response" child-of "HAProxy session"
+ tag "error" bool(true)
+ tag "http.status_code" status
+
+ ot-scope client_session_start
+ span "HAProxy session" root
+ baggage "haproxy_id" var(sess.ot.uuid)
+ span "Client session" child-of "HAProxy session"
+ acl acl-test-src-ip src 127.0.0.1
+ event on-client-session-start if acl-test-src-ip
+
+ ot-scope frontend_tcp_request
+ span "Frontend TCP request" child-of "Client session"
+ event on-frontend-tcp-request
+
+ ot-scope http_wait_request
+ span "HTTP wait request" follows-from "Frontend TCP request"
+ finish "Frontend TCP request"
+ event on-http-wait-request
+
+ ot-scope http_body_request
+ span "HTTP body request" follows-from "HTTP wait request"
+ finish "HTTP wait request"
+ event on-http-body-request
+
+ ot-scope frontend_http_request
+ span "Frontend HTTP request" follows-from "HTTP body request"
+ tag "http.method" method
+ tag "http.url" url
+ tag "http.version" str("HTTP/") req.ver
+ finish "HTTP body request"
+ event on-frontend-http-request
+
+ ot-scope switching_rules_request
+ span "Switching rules request" follows-from "Frontend HTTP request"
+ finish "Frontend HTTP request"
+ event on-switching-rules-request
+
+ ot-scope backend_tcp_request
+ span "Backend TCP request" follows-from "Switching rules request"
+ finish "Switching rules request"
+ event on-backend-tcp-request
+
+ ot-scope backend_http_request
+ span "Backend HTTP request" follows-from "Backend TCP request"
+ finish "Backend TCP request"
+ event on-backend-http-request
+
+ ot-scope process_server_rules_request
+ span "Process server rules request" follows-from "Backend HTTP request"
+ finish "Backend HTTP request"
+ event on-process-server-rules-request
+
+ ot-scope http_process_request
+ span "HTTP process request" follows-from "Process server rules request"
+ finish "Process server rules request"
+ event on-http-process-request
+
+ ot-scope tcp_rdp_cookie_request
+ span "TCP RDP cookie request" follows-from "HTTP process request"
+ finish "HTTP process request"
+ event on-tcp-rdp-cookie-request
+
+ ot-scope process_sticking_rules_request
+ span "Process sticking rules request" follows-from "TCP RDP cookie request"
+ finish "TCP RDP cookie request"
+ event on-process-sticking-rules-request
+
+ ot-scope client_session_end
+ finish "Client session"
+ event on-client-session-end
+
+ ot-scope server_unavailable
+ finish *
+ event on-server-unavailable
+
+ ot-scope server_session_start
+ span "Server session" child-of "HAProxy session"
+ finish "Process sticking rules request"
+ event on-server-session-start
+
+ ot-scope tcp_response
+ span "TCP response" child-of "Server session"
+ event on-tcp-response
+
+ ot-scope http_wait_response
+ span "HTTP wait response" follows-from "TCP response"
+ finish "TCP response"
+ event on-http-wait-response
+
+ ot-scope process_store_rules_response
+ span "Process store rules response" follows-from "HTTP wait response"
+ finish "HTTP wait response"
+ event on-process-store-rules-response
+
+ ot-scope http_response
+ span "HTTP response" follows-from "Process store rules response"
+ tag "http.status_code" status
+ finish "Process store rules response"
+ event on-http-response
+
+ ot-scope http_response-error
+ span "HTTP response"
+ tag "error" bool(true)
+ event on-http-response if !acl-http-status-ok
+
+ ot-scope server_session_end
+ finish *
+ event on-server-session-end
diff --git a/addons/ot/test/test-speed.sh b/addons/ot/test/test-speed.sh
new file mode 100755
index 0000000..f2ac514
--- /dev/null
+++ b/addons/ot/test/test-speed.sh
@@ -0,0 +1,117 @@
+#!/bin/sh
+#
+ _ARG_CFG="${1}"
+ _ARG_DIR="${2:-${1}}"
+ _LOG_DIR="_logs"
+_HTTPD_PIDFILE="${_LOG_DIR}/thttpd.pid"
+ _USAGE_MSG="usage: $(basename "${0}") cfg [dir]"
+
+
+sh_exit ()
+{
+ test -z "${2}" && {
+ echo
+ echo "Script killed!"
+ }
+
+ test -n "${1}" && {
+ echo
+ echo "${1}"
+ echo
+ }
+
+ exit ${2:-64}
+}
+
+httpd_run ()
+{
+
+ test -e "${_HTTPD_PIDFILE}" && return
+
+ thttpd -p 8000 -d . -nos -nov -l /dev/null -i "${_HTTPD_PIDFILE}"
+}
+
+httpd_stop ()
+{
+ test -e "${_HTTPD_PIDFILE}" || return
+
+ kill -TERM "$(cat ${_HTTPD_PIDFILE})"
+ rm "${_HTTPD_PIDFILE}"
+}
+
+haproxy_run ()
+{
+ _arg_ratio="${1}"
+ _var_sed_ot=
+ _var_sed_haproxy=
+
+ if test "${_arg_ratio}" = "disabled"; then
+ _var_sed_ot="s/no \(option disabled\)/\1/"
+ elif test "${_arg_ratio}" = "off"; then
+ _var_sed_haproxy="s/^\(.* filter opentracing .*\)/#\1/g; s/^\(.* ot-group .*\)/#\1/g"
+ else
+ _var_sed_ot="s/\(rate-limit\) 100.0/\1 ${_arg_ratio}/"
+ fi
+
+ sed "${_var_sed_haproxy}" "${_ARG_DIR}/haproxy.cfg.in" > "${_ARG_DIR}/haproxy.cfg"
+ sed "${_var_sed_ot}" "${_ARG_DIR}/ot.cfg.in" > "${_ARG_DIR}/ot.cfg"
+
+ if test "${_ARG_DIR}" = "fe"; then
+ if test "${_arg_ratio}" = "disabled" -o "${_arg_ratio}" = "off"; then
+ sed "${_var_sed_haproxy}" "be/haproxy.cfg.in" > "be/haproxy.cfg"
+ sed "${_var_sed_ot}" "be/ot.cfg.in" > "be/ot.cfg"
+ fi
+ fi
+
+ ./run-${_ARG_CFG}.sh &
+ sleep 5
+}
+
+wrk_run ()
+{
+ _arg_ratio="${1}"
+
+ echo "--- rate-limit ${_arg_ratio} --------------------------------------------------"
+ wrk -c8 -d300 -t8 --latency http://localhost:10080/index.html
+ echo "----------------------------------------------------------------------"
+ echo
+
+ sleep 10
+}
+
+
+command -v thttpd >/dev/null 2>&1 || sh_exit "thttpd: command not found" 5
+command -v wrk >/dev/null 2>&1 || sh_exit "wrk: command not found" 6
+
+mkdir -p "${_LOG_DIR}" || sh_exit "${_LOG_DIR}: Cannot create log directory" 1
+
+if test "${_ARG_CFG}" = "all"; then
+ "${0}" fe-be fe > "${_LOG_DIR}/README-speed-fe-be"
+ "${0}" sa sa > "${_LOG_DIR}/README-speed-sa"
+ "${0}" cmp cmp > "${_LOG_DIR}/README-speed-cmp"
+ "${0}" ctx ctx > "${_LOG_DIR}/README-speed-ctx"
+ exit 0
+fi
+
+test -z "${_ARG_CFG}" -o -z "${_ARG_DIR}" && sh_exit "${_USAGE_MSG}" 4
+test -f "run-${_ARG_CFG}.sh" || sh_exit "run-${_ARG_CFG}.sh: No such configuration script" 2
+test -d "${_ARG_DIR}" || sh_exit "${_ARG_DIR}: No such directory" 3
+
+test -e "${_ARG_DIR}/haproxy.cfg.in" || cp -af "${_ARG_DIR}/haproxy.cfg" "${_ARG_DIR}/haproxy.cfg.in"
+test -e "${_ARG_DIR}/ot.cfg.in" || cp -af "${_ARG_DIR}/ot.cfg" "${_ARG_DIR}/ot.cfg.in"
+if test "${_ARG_DIR}" = "fe"; then
+ test -e "be/haproxy.cfg.in" || cp -af "be/haproxy.cfg" "be/haproxy.cfg.in"
+ test -e "be/ot.cfg.in" || cp -af "be/ot.cfg" "be/ot.cfg.in"
+fi
+
+httpd_run
+
+for _var_ratio in 100.0 50.0 10.0 2.5 0.0 disabled off; do
+ haproxy_run "${_var_ratio}"
+ wrk_run "${_var_ratio}"
+
+ pkill --signal SIGUSR1 haproxy
+ wait
+done
+
+httpd_stop
diff --git a/addons/promex/README b/addons/promex/README
new file mode 100644
index 0000000..4e29e23
--- /dev/null
+++ b/addons/promex/README
@@ -0,0 +1,356 @@
+PROMEX: A Prometheus exporter for HAProxy
+-------------------------------------------
+
+Prometheus is a monitoring and alerting system. More and more people use it to
+monitor their environment (this is written February 2019). It collects metrics
+from monitored targets by scraping metrics HTTP endpoints on these targets. For
+HAProxy, The Prometheus team officially supports an exporter written in Go
+(https://github.com/prometheus/haproxy_exporter). But it requires an extra
+software to deploy and monitor. PROMEX, on its side, is a built-in Prometheus
+exporter for HAProxy. It was developed as a service and is directly available in
+HAProxy, like the stats applet.
+
+However, PROMEX is not built by default with HAProxy. It is provided as an extra
+component for everyone want to use it. So you need to explicitly build HAProxy
+with the PROMEX service, setting the Makefile variable "USE_PROMEX" to "1". For
+instance:
+
+ > make TARGET=linux-glibc USE_PROMEX=1
+
+if HAProxy provides the PROMEX service, the following build option will be
+reported by the command "haproxy -vv":
+
+ Built with the Prometheus exporter as a service
+
+To be used, it must be enabled in the configuration with an "http-request" rule
+and the corresponding HTTP proxy must enable the HTX support. For instance:
+
+ frontend test
+ mode http
+ ...
+ http-request use-service prometheus-exporter if { path /metrics }
+ ...
+
+
+This service has been developed as a third-party component because it could
+become obsolete, depending on how much time Prometheus will remain heavily
+used. This is said with no ulterior motive of course. Prometheus is a great
+software and I hope all the well for it. But we involve in a environment moving
+quickly and a solution may be obvious today could be deprecated the next
+year. And because PROMEX is not integrated by default into the HAProxy codebase,
+it will need some interest to be actively supported. All contribution of any
+kind are welcome.
+
+You must also be careful if you use with huge configurations. Unlike the stats
+applet, all metrics are not grouped by service (proxy, listener or server). With
+PROMEX, all lines for a given metric are provided as one single group. So
+instead of collecting all metrics for a proxy before moving to the next one, we
+must loop on all proxies for each metric. Same for the servers. Thus, it will
+spend much more resources to produce the Prometheus metrics than the CSV export
+through the stats page. To give a comparison order, quick benchmarks shown that
+a PROMEX dump is 5x slower and 20x more verbose than a CSV export.
+
+
+metrics filtering
+-------------------
+
+It is possible to dynamically select the metrics to export if you don't use all
+of them passing parameters in the query-string.
+
+* Filtering on scopes
+
+The metrics may be filtered by scopes. Multiple parameters with "scope" as name
+may be passed in the query-string to filter exported metrics, with one of those
+values: global, frontend, backend, server or '*' (means all). A scope parameter
+with no value means to filter out all scopes (nothing is returned). The scope
+parameters are parsed in their appearance order in the query-string. So an empty
+scope will reset all scopes already parsed. But it can be overridden by
+following scope parameters in the query-string. By default everything is
+exported. Here are examples:
+
+ /metrics?scope=server # ==> server metrics will be exported
+ /metrics?scope=frontend&scope=backend # ==> Frontend and backend metrics will be exported
+ /metrics?scope=listener # ==> listener metrics will be exported
+ /metrics?scope=*&scope= # ==> no metrics will be exported
+ /metrics?scope=&scope=global # ==> global metrics will be exported
+ /metrics?scope=sticktable # ==> stick tables metrics will be exported
+
+* How do I prevent my prometheus instance to explode?
+
+** Filtering on servers state
+
+It is possible to exclude from returned metrics all servers in maintenance mode
+passing the parameter "no-maint" in the query-string. This parameter may help to
+solve performance issues of configuration that use the server templates to
+manage dynamic provisionning. Note there is no consistency check on the servers
+state. So, if the state of a server changes while the exporter is running, only
+a part of the metrics for this server will be dumped.
+
+prometheus example config:
+
+For server-template users:
+- <job>
+ params:
+ no-maint:
+ - empty
+
+** Scrap server health checks only
+
+All health checks status are dump through `state` label values. If you want to
+scrap server health check status but prevent all server metrics to be saved,
+except the server_check_status, you may configure prometheus that way:
+
+- <job>
+ metric_relabel_configs:
+ - source_labels: ['__name__']
+ regex: 'haproxy_(process_|frontend_|listener_|backend_|server_check_status).*'
+ action: keep
+
+Exported metrics
+------------------
+
+See prometheus export for the description of each field.
+
+* Globals metrics
+
++------------------------------------------------+
+| Metric name |
++------------------------------------------------+
+| haproxy_process_nbthread |
+| haproxy_process_nbproc |
+| haproxy_process_relative_process_id |
+| haproxy_process_uptime_seconds |
+| haproxy_process_pool_failures_total |
+| haproxy_process_max_fds |
+| haproxy_process_max_sockets |
+| haproxy_process_max_connections |
+| haproxy_process_hard_max_connections |
+| haproxy_process_current_connections |
+| haproxy_process_connections_total |
+| haproxy_process_requests_total |
+| haproxy_process_max_ssl_connections |
+| haproxy_process_current_ssl_connections |
+| haproxy_process_ssl_connections_total |
+| haproxy_process_max_pipes |
+| haproxy_process_pipes_used_total |
+| haproxy_process_pipes_free_total |
+| haproxy_process_current_connection_rate |
+| haproxy_process_limit_connection_rate |
+| haproxy_process_max_connection_rate |
+| haproxy_process_current_session_rate |
+| haproxy_process_limit_session_rate |
+| haproxy_process_max_session_rate |
+| haproxy_process_current_ssl_rate |
+| haproxy_process_limit_ssl_rate |
+| haproxy_process_max_ssl_rate |
+| haproxy_process_current_frontend_ssl_key_rate |
+| haproxy_process_max_frontend_ssl_key_rate |
+| haproxy_process_frontend_ssl_reuse |
+| haproxy_process_current_backend_ssl_key_rate |
+| haproxy_process_max_backend_ssl_key_rate |
+| haproxy_process_ssl_cache_lookups_total |
+| haproxy_process_ssl_cache_misses_total |
+| haproxy_process_http_comp_bytes_in_total |
+| haproxy_process_http_comp_bytes_out_total |
+| haproxy_process_limit_http_comp |
+| haproxy_process_current_zlib_memory |
+| haproxy_process_max_zlib_memory |
+| haproxy_process_current_tasks |
+| haproxy_process_current_run_queue |
+| haproxy_process_idle_time_percent |
+| haproxy_process_stopping |
+| haproxy_process_jobs |
+| haproxy_process_unstoppable_jobs |
+| haproxy_process_listeners |
+| haproxy_process_active_peers |
+| haproxy_process_connected_peers |
+| haproxy_process_dropped_logs_total |
+| haproxy_process_busy_polling_enabled |
+| haproxy_process_failed_resolutions |
+| haproxy_process_bytes_out_total |
+| haproxy_process_spliced_bytes_out_total |
+| haproxy_process_bytes_out_rate |
+| haproxy_process_recv_logs_total |
+| haproxy_process_build_info |
+| haproxy_process_max_memory_bytes |
+| haproxy_process_pool_allocated_bytes |
+| haproxy_process_pool_used_bytes |
+| haproxy_process_start_time_seconds |
++------------------------------------------------+
+
+* Frontend metrics
+
++-------------------------------------------------+
+| Metric name |
++-------------------------------------------------+
+| haproxy_frontend_current_sessions |
+| haproxy_frontend_max_sessions |
+| haproxy_frontend_limit_sessions |
+| haproxy_frontend_sessions_total |
+| haproxy_frontend_bytes_in_total |
+| haproxy_frontend_bytes_out_total |
+| haproxy_frontend_requests_denied_total |
+| haproxy_frontend_responses_denied_total |
+| haproxy_frontend_request_errors_total |
+| haproxy_frontend_status |
+| haproxy_frontend_limit_session_rate |
+| haproxy_frontend_max_session_rate |
+| haproxy_frontend_http_responses_total |
+| haproxy_frontend_http_requests_rate_max |
+| haproxy_frontend_http_requests_total |
+| haproxy_frontend_http_comp_bytes_in_total |
+| haproxy_frontend_http_comp_bytes_out_total |
+| haproxy_frontend_http_comp_bytes_bypassed_total |
+| haproxy_frontend_http_comp_responses_total |
+| haproxy_frontend_connections_rate_max |
+| haproxy_frontend_connections_total |
+| haproxy_frontend_intercepted_requests_total |
+| haproxy_frontend_denied_connections_total |
+| haproxy_frontend_denied_sessions_total |
+| haproxy_frontend_failed_header_rewriting_total |
+| haproxy_frontend_http_cache_lookups_total |
+| haproxy_frontend_http_cache_hits_total |
+| haproxy_frontend_internal_errors_total |
++-------------------------------------------------+
+
+* Listener metrics
+
++-------------------------------------------------+
+| Metric name |
++-------------------------------------------------+
+| haproxy_listener_current_sessions |
+| haproxy_listener_max_sessions |
+| haproxy_listener_limit_sessions |
+| haproxy_listener_sessions_total |
+| haproxy_listener_bytes_in_total |
+| haproxy_listener_bytes_out_total |
+| haproxy_listener_requests_denied_total |
+| haproxy_listener_responses_denied_total |
+| haproxy_listener_request_errors_total |
+| haproxy_listener_status |
+| haproxy_listener_denied_connections_total |
+| haproxy_listener_denied_sessions_total |
+| haproxy_listener_failed_header_rewriting_total |
+| haproxy_listener_internal_errors_total |
++-------------------------------------------------+
+
+* Backend metrics
+
++-----------------------------------------------------+
+| Metric name |
++-----------------------------------------------------+
+| haproxy_backend_current_queue |
+| haproxy_backend_max_queue |
+| haproxy_backend_current_sessions |
+| haproxy_backend_max_sessions |
+| haproxy_backend_limit_sessions |
+| haproxy_backend_sessions_total |
+| haproxy_backend_bytes_in_total |
+| haproxy_backend_bytes_out_total |
+| haproxy_backend_requests_denied_total |
+| haproxy_backend_responses_denied_total |
+| haproxy_backend_connection_errors_total |
+| haproxy_backend_response_errors_total |
+| haproxy_backend_retry_warnings_total |
+| haproxy_backend_redispatch_warnings_total |
+| haproxy_backend_status |
+| haproxy_backend_weight |
+| haproxy_backend_active_servers |
+| haproxy_backend_backup_servers |
+| haproxy_backend_check_up_down_total |
+| haproxy_backend_check_last_change_seconds |
+| haproxy_backend_downtime_seconds_total |
+| haproxy_backend_loadbalanced_total |
+| haproxy_backend_max_session_rate |
+| haproxy_backend_http_responses_total |
+| haproxy_backend_http_requests_total |
+| haproxy_backend_client_aborts_total |
+| haproxy_backend_server_aborts_total |
+| haproxy_backend_http_comp_bytes_in_total |
+| haproxy_backend_http_comp_bytes_out_total |
+| haproxy_backend_http_comp_bytes_bypassed_total |
+| haproxy_backend_http_comp_responses_total |
+| haproxy_backend_last_session_seconds |
+| haproxy_backend_queue_time_average_seconds |
+| haproxy_backend_connect_time_average_seconds |
+| haproxy_backend_response_time_average_seconds |
+| haproxy_backend_total_time_average_seconds |
+| haproxy_backend_failed_header_rewriting_total |
+| haproxy_backend_connection_attempts_total |
+| haproxy_backend_connection_reuses_total |
+| haproxy_backend_http_cache_lookups_total |
+| haproxy_backend_http_cache_hits_total |
+| haproxy_backend_max_queue_time_seconds |
+| haproxy_backend_max_connect_time_seconds |
+| haproxy_backend_max_response_time_seconds |
+| haproxy_backend_max_total_time_seconds |
+| haproxy_backend_internal_errors_total |
+| haproxy_backend_uweight |
+| haproxy_backend_agg_server_status |
+| haproxy_backend_agg_check_status |
++-----------------------------------------------------+
+
+* Server metrics
+
++----------------------------------------------------+
+| Metric name |
++----------------------------------------------------+
+| haproxy_server_current_queue |
+| haproxy_server_max_queue |
+| haproxy_server_current_sessions |
+| haproxy_server_max_sessions |
+| haproxy_server_limit_sessions |
+| haproxy_server_sessions_total |
+| haproxy_server_bytes_in_total |
+| haproxy_server_bytes_out_total |
+| haproxy_server_responses_denied_total |
+| haproxy_server_connection_errors_total |
+| haproxy_server_response_errors_total |
+| haproxy_server_retry_warnings_total |
+| haproxy_server_redispatch_warnings_total |
+| haproxy_server_status |
+| haproxy_server_weight |
+| haproxy_server_check_failures_total |
+| haproxy_server_check_up_down_total |
+| haproxy_server_check_last_change_seconds |
+| haproxy_server_downtime_seconds_total |
+| haproxy_server_queue_limit |
+| haproxy_server_current_throttle |
+| haproxy_server_loadbalanced_total |
+| haproxy_server_max_session_rate |
+| haproxy_server_check_status |
+| haproxy_server_check_code |
+| haproxy_server_check_duration_seconds |
+| haproxy_server_http_responses_total |
+| haproxy_server_client_aborts_total |
+| haproxy_server_server_aborts_total |
+| haproxy_server_last_session_seconds |
+| haproxy_server_queue_time_average_seconds |
+| haproxy_server_connect_time_average_seconds |
+| haproxy_server_response_time_average_seconds |
+| haproxy_server_total_time_average_seconds |
+| haproxy_server_failed_header_rewriting_total |
+| haproxy_server_connection_attempts_total |
+| haproxy_server_connection_reuses_total |
+| haproxy_server_idle_connections_current |
+| haproxy_server_idle_connections_limit |
+| haproxy_server_max_queue_time_seconds |
+| haproxy_server_max_connect_time_seconds |
+| haproxy_server_max_response_time_seconds |
+| haproxy_server_max_total_time_seconds |
+| haproxy_server_internal_errors_total |
+| haproxy_server_unsafe_idle_connections_current |
+| haproxy_server_safe_idle_connections_current |
+| haproxy_server_used_connections_current |
+| haproxy_server_need_connections_current |
+| haproxy_server_uweight |
++----------------------------------------------------+
+
+* Stick table metrics
+
++----------------------------------------------------+
+| Metric name |
++----------------------------------------------------+
+| haproxy_sticktable_size |
+| haproxy_sticktable_used |
++----------------------------------------------------+
diff --git a/addons/promex/service-prometheus.c b/addons/promex/service-prometheus.c
new file mode 100644
index 0000000..6885d20
--- /dev/null
+++ b/addons/promex/service-prometheus.c
@@ -0,0 +1,1655 @@
+/*
+ * Promex is a Prometheus exporter for HAProxy
+ *
+ * It is highly inspired by the official Prometheus exporter.
+ * See: https://github.com/prometheus/haproxy_exporter
+ *
+ * Copyright 2019 Christopher Faulet <cfaulet@haproxy.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <haproxy/action-t.h>
+#include <haproxy/api.h>
+#include <haproxy/applet.h>
+#include <haproxy/backend.h>
+#include <haproxy/cfgparse.h>
+#include <haproxy/check.h>
+#include <haproxy/frontend.h>
+#include <haproxy/global.h>
+#include <haproxy/http.h>
+#include <haproxy/http_ana.h>
+#include <haproxy/http_htx.h>
+#include <haproxy/htx.h>
+#include <haproxy/list.h>
+#include <haproxy/listener.h>
+#include <haproxy/log.h>
+#include <haproxy/proxy.h>
+#include <haproxy/sample.h>
+#include <haproxy/sc_strm.h>
+#include <haproxy/server.h>
+#include <haproxy/stats.h>
+#include <haproxy/stconn.h>
+#include <haproxy/stream.h>
+#include <haproxy/task.h>
+#include <haproxy/tools.h>
+#include <haproxy/version.h>
+
+/* Prometheus exporter applet states (appctx->st0) */
+enum {
+ PROMEX_ST_INIT = 0, /* initialized */
+ PROMEX_ST_HEAD, /* send headers before dump */
+ PROMEX_ST_DUMP, /* dumping stats */
+ PROMEX_ST_DONE, /* finished */
+ PROMEX_ST_END, /* treatment terminated */
+};
+
+/* Prometheus exporter dumper states (appctx->st1) */
+enum {
+ PROMEX_DUMPER_INIT = 0, /* initialized */
+ PROMEX_DUMPER_GLOBAL, /* dump metrics of globals */
+ PROMEX_DUMPER_FRONT, /* dump metrics of frontend proxies */
+ PROMEX_DUMPER_BACK, /* dump metrics of backend proxies */
+ PROMEX_DUMPER_LI, /* dump metrics of listeners */
+ PROMEX_DUMPER_SRV, /* dump metrics of servers */
+ PROMEX_DUMPER_STICKTABLE, /* dump metrics of stick tables */
+ PROMEX_DUMPER_DONE, /* finished */
+};
+
+/* Prometheus exporter flags (ctx->flags) */
+#define PROMEX_FL_METRIC_HDR 0x00000001
+#define PROMEX_FL_INFO_METRIC 0x00000002
+#define PROMEX_FL_FRONT_METRIC 0x00000004
+#define PROMEX_FL_BACK_METRIC 0x00000008
+#define PROMEX_FL_SRV_METRIC 0x00000010
+#define PROMEX_FL_LI_METRIC 0x00000020
+#define PROMEX_FL_STICKTABLE_METRIC 0x00000040
+#define PROMEX_FL_SCOPE_GLOBAL 0x00000080
+#define PROMEX_FL_SCOPE_FRONT 0x00000100
+#define PROMEX_FL_SCOPE_BACK 0x00000200
+#define PROMEX_FL_SCOPE_SERVER 0x00000400
+#define PROMEX_FL_SCOPE_LI 0x00000800
+#define PROMEX_FL_SCOPE_STICKTABLE 0x00001000
+#define PROMEX_FL_NO_MAINT_SRV 0x00002000
+
+#define PROMEX_FL_SCOPE_ALL (PROMEX_FL_SCOPE_GLOBAL | PROMEX_FL_SCOPE_FRONT | \
+ PROMEX_FL_SCOPE_LI | PROMEX_FL_SCOPE_BACK | \
+ PROMEX_FL_SCOPE_SERVER | PROMEX_FL_SCOPE_STICKTABLE)
+
+/* the context of the applet */
+struct promex_ctx {
+ struct proxy *px; /* current proxy */
+ struct stktable *st; /* current table */
+ struct listener *li; /* current listener */
+ struct server *sv; /* current server */
+ unsigned int flags; /* PROMEX_FL_* */
+ unsigned field_num; /* current field number (ST_F_* etc) */
+ int obj_state; /* current state among PROMEX_{FRONT|BACK|SRV|LI}_STATE_* */
+};
+
+/* Promtheus metric type (gauge or counter) */
+enum promex_mt_type {
+ PROMEX_MT_GAUGE = 1,
+ PROMEX_MT_COUNTER = 2,
+};
+
+/* The max length for metrics name. It is a hard limit but it should be
+ * enough.
+ */
+#define PROMEX_MAX_NAME_LEN 128
+
+/* The expected max length for a metric dump, including its header lines. It is
+ * just a soft limit to avoid extra work. We don't try to dump a metric if less
+ * than this size is available in the HTX.
+ */
+#define PROMEX_MAX_METRIC_LENGTH 512
+
+/* The max number of labels per metric */
+#define PROMEX_MAX_LABELS 8
+
+/* Describe a prometheus metric */
+struct promex_metric {
+ const struct ist n; /* The metric name */
+ enum promex_mt_type type; /* The metric type (gauge or counter) */
+ unsigned int flags; /* PROMEX_FL_* flags */
+};
+
+/* Describe a prometheus metric label. It is just a key/value pair */
+struct promex_label {
+ struct ist name;
+ struct ist value;
+};
+
+/* Global metrics */
+const struct promex_metric promex_global_metrics[INF_TOTAL_FIELDS] = {
+ //[INF_NAME] ignored
+ //[INF_VERSION], ignored
+ //[INF_RELEASE_DATE] ignored
+ [INF_NBTHREAD] = { .n = IST("nbthread"), .type = PROMEX_MT_GAUGE, .flags = PROMEX_FL_INFO_METRIC },
+ [INF_NBPROC] = { .n = IST("nbproc"), .type = PROMEX_MT_GAUGE, .flags = PROMEX_FL_INFO_METRIC },
+ [INF_PROCESS_NUM] = { .n = IST("relative_process_id"), .type = PROMEX_MT_GAUGE, .flags = PROMEX_FL_INFO_METRIC },
+ //[INF_PID] ignored
+ //[INF_UPTIME] ignored
+ [INF_UPTIME_SEC] = { .n = IST("uptime_seconds"), .type = PROMEX_MT_GAUGE, .flags = PROMEX_FL_INFO_METRIC },
+ [INF_START_TIME_SEC] = { .n = IST("start_time_seconds"), .type = PROMEX_MT_GAUGE, .flags = PROMEX_FL_INFO_METRIC },
+ //[INF_MEMMAX_MB] ignored
+ [INF_MEMMAX_BYTES] = { .n = IST("max_memory_bytes"), .type = PROMEX_MT_GAUGE, .flags = PROMEX_FL_INFO_METRIC },
+ //[INF_POOL_ALLOC_MB] ignored
+ [INF_POOL_ALLOC_BYTES] = { .n = IST("pool_allocated_bytes"), .type = PROMEX_MT_GAUGE, .flags = PROMEX_FL_INFO_METRIC },
+ //[INF_POOL_USED_MB] ignored
+ [INF_POOL_USED_BYTES] = { .n = IST("pool_used_bytes"), .type = PROMEX_MT_GAUGE, .flags = PROMEX_FL_INFO_METRIC },
+ [INF_POOL_FAILED] = { .n = IST("pool_failures_total"), .type = PROMEX_MT_COUNTER, .flags = PROMEX_FL_INFO_METRIC },
+ [INF_ULIMIT_N] = { .n = IST("max_fds"), .type = PROMEX_MT_GAUGE, .flags = PROMEX_FL_INFO_METRIC },
+ [INF_MAXSOCK] = { .n = IST("max_sockets"), .type = PROMEX_MT_GAUGE, .flags = PROMEX_FL_INFO_METRIC },
+ [INF_MAXCONN] = { .n = IST("max_connections"), .type = PROMEX_MT_GAUGE, .flags = PROMEX_FL_INFO_METRIC },
+ [INF_HARD_MAXCONN] = { .n = IST("hard_max_connections"), .type = PROMEX_MT_GAUGE, .flags = PROMEX_FL_INFO_METRIC },
+ [INF_CURR_CONN] = { .n = IST("current_connections"), .type = PROMEX_MT_GAUGE, .flags = PROMEX_FL_INFO_METRIC },
+ [INF_CUM_CONN] = { .n = IST("connections_total"), .type = PROMEX_MT_COUNTER, .flags = PROMEX_FL_INFO_METRIC },
+ [INF_CUM_REQ] = { .n = IST("requests_total"), .type = PROMEX_MT_COUNTER, .flags = PROMEX_FL_INFO_METRIC },
+ [INF_MAX_SSL_CONNS] = { .n = IST("max_ssl_connections"), .type = PROMEX_MT_GAUGE, .flags = PROMEX_FL_INFO_METRIC },
+ [INF_CURR_SSL_CONNS] = { .n = IST("current_ssl_connections"), .type = PROMEX_MT_GAUGE, .flags = PROMEX_FL_INFO_METRIC },
+ [INF_CUM_SSL_CONNS] = { .n = IST("ssl_connections_total"), .type = PROMEX_MT_COUNTER, .flags = PROMEX_FL_INFO_METRIC },
+ [INF_MAXPIPES] = { .n = IST("max_pipes"), .type = PROMEX_MT_GAUGE, .flags = PROMEX_FL_INFO_METRIC },
+ [INF_PIPES_USED] = { .n = IST("pipes_used_total"), .type = PROMEX_MT_COUNTER, .flags = PROMEX_FL_INFO_METRIC },
+ [INF_PIPES_FREE] = { .n = IST("pipes_free_total"), .type = PROMEX_MT_COUNTER, .flags = PROMEX_FL_INFO_METRIC },
+ [INF_CONN_RATE] = { .n = IST("current_connection_rate"), .type = PROMEX_MT_GAUGE, .flags = PROMEX_FL_INFO_METRIC },
+ [INF_CONN_RATE_LIMIT] = { .n = IST("limit_connection_rate"), .type = PROMEX_MT_GAUGE, .flags = PROMEX_FL_INFO_METRIC },
+ [INF_MAX_CONN_RATE] = { .n = IST("max_connection_rate"), .type = PROMEX_MT_GAUGE, .flags = PROMEX_FL_INFO_METRIC },
+ [INF_SESS_RATE] = { .n = IST("current_session_rate"), .type = PROMEX_MT_GAUGE, .flags = PROMEX_FL_INFO_METRIC },
+ [INF_SESS_RATE_LIMIT] = { .n = IST("limit_session_rate"), .type = PROMEX_MT_GAUGE, .flags = PROMEX_FL_INFO_METRIC },
+ [INF_MAX_SESS_RATE] = { .n = IST("max_session_rate"), .type = PROMEX_MT_GAUGE, .flags = PROMEX_FL_INFO_METRIC },
+ [INF_SSL_RATE] = { .n = IST("current_ssl_rate"), .type = PROMEX_MT_GAUGE, .flags = PROMEX_FL_INFO_METRIC },
+ [INF_SSL_RATE_LIMIT] = { .n = IST("limit_ssl_rate"), .type = PROMEX_MT_GAUGE, .flags = PROMEX_FL_INFO_METRIC },
+ [INF_MAX_SSL_RATE] = { .n = IST("max_ssl_rate"), .type = PROMEX_MT_GAUGE, .flags = PROMEX_FL_INFO_METRIC },
+ [INF_SSL_FRONTEND_KEY_RATE] = { .n = IST("current_frontend_ssl_key_rate"), .type = PROMEX_MT_GAUGE, .flags = PROMEX_FL_INFO_METRIC },
+ [INF_SSL_FRONTEND_MAX_KEY_RATE] = { .n = IST("max_frontend_ssl_key_rate"), .type = PROMEX_MT_GAUGE, .flags = PROMEX_FL_INFO_METRIC },
+ [INF_SSL_FRONTEND_SESSION_REUSE_PCT] = { .n = IST("frontend_ssl_reuse"), .type = PROMEX_MT_GAUGE, .flags = PROMEX_FL_INFO_METRIC },
+ [INF_SSL_BACKEND_KEY_RATE] = { .n = IST("current_backend_ssl_key_rate"), .type = PROMEX_MT_GAUGE, .flags = PROMEX_FL_INFO_METRIC },
+ [INF_SSL_BACKEND_MAX_KEY_RATE] = { .n = IST("max_backend_ssl_key_rate"), .type = PROMEX_MT_GAUGE, .flags = PROMEX_FL_INFO_METRIC },
+ [INF_SSL_CACHE_LOOKUPS] = { .n = IST("ssl_cache_lookups_total"), .type = PROMEX_MT_COUNTER, .flags = PROMEX_FL_INFO_METRIC },
+ [INF_SSL_CACHE_MISSES] = { .n = IST("ssl_cache_misses_total"), .type = PROMEX_MT_COUNTER, .flags = PROMEX_FL_INFO_METRIC },
+ [INF_COMPRESS_BPS_IN] = { .n = IST("http_comp_bytes_in_total"), .type = PROMEX_MT_COUNTER, .flags = PROMEX_FL_INFO_METRIC },
+ [INF_COMPRESS_BPS_OUT] = { .n = IST("http_comp_bytes_out_total"), .type = PROMEX_MT_COUNTER, .flags = PROMEX_FL_INFO_METRIC },
+ [INF_COMPRESS_BPS_RATE_LIM] = { .n = IST("limit_http_comp"), .type = PROMEX_MT_GAUGE, .flags = PROMEX_FL_INFO_METRIC },
+ [INF_ZLIB_MEM_USAGE] = { .n = IST("current_zlib_memory"), .type = PROMEX_MT_GAUGE, .flags = PROMEX_FL_INFO_METRIC },
+ [INF_MAX_ZLIB_MEM_USAGE] = { .n = IST("max_zlib_memory"), .type = PROMEX_MT_GAUGE, .flags = PROMEX_FL_INFO_METRIC },
+ [INF_TASKS] = { .n = IST("current_tasks"), .type = PROMEX_MT_GAUGE, .flags = PROMEX_FL_INFO_METRIC },
+ [INF_RUN_QUEUE] = { .n = IST("current_run_queue"), .type = PROMEX_MT_GAUGE, .flags = PROMEX_FL_INFO_METRIC },
+ [INF_IDLE_PCT] = { .n = IST("idle_time_percent"), .type = PROMEX_MT_GAUGE, .flags = PROMEX_FL_INFO_METRIC },
+ //[INF_NODE] ignored
+ //[INF_DESCRIPTION] ignored
+ [INF_STOPPING] = { .n = IST("stopping"), .type = PROMEX_MT_GAUGE, .flags = PROMEX_FL_INFO_METRIC },
+ [INF_JOBS] = { .n = IST("jobs"), .type = PROMEX_MT_GAUGE, .flags = PROMEX_FL_INFO_METRIC },
+ [INF_UNSTOPPABLE_JOBS] = { .n = IST("unstoppable_jobs"), .type = PROMEX_MT_GAUGE, .flags = PROMEX_FL_INFO_METRIC },
+ [INF_LISTENERS] = { .n = IST("listeners"), .type = PROMEX_MT_GAUGE, .flags = PROMEX_FL_INFO_METRIC },
+ [INF_ACTIVE_PEERS] = { .n = IST("active_peers"), .type = PROMEX_MT_GAUGE, .flags = PROMEX_FL_INFO_METRIC },
+ [INF_CONNECTED_PEERS] = { .n = IST("connected_peers"), .type = PROMEX_MT_GAUGE, .flags = PROMEX_FL_INFO_METRIC },
+ [INF_DROPPED_LOGS] = { .n = IST("dropped_logs_total"), .type = PROMEX_MT_COUNTER, .flags = PROMEX_FL_INFO_METRIC },
+ [INF_BUSY_POLLING] = { .n = IST("busy_polling_enabled"), .type = PROMEX_MT_GAUGE, .flags = PROMEX_FL_INFO_METRIC },
+ [INF_FAILED_RESOLUTIONS] = { .n = IST("failed_resolutions"), .type = PROMEX_MT_COUNTER, .flags = PROMEX_FL_INFO_METRIC },
+ [INF_TOTAL_BYTES_OUT] = { .n = IST("bytes_out_total"), .type = PROMEX_MT_COUNTER, .flags = PROMEX_FL_INFO_METRIC },
+ [INF_TOTAL_SPLICED_BYTES_OUT] = { .n = IST("spliced_bytes_out_total"), .type = PROMEX_MT_COUNTER, .flags = PROMEX_FL_INFO_METRIC },
+ [INF_BYTES_OUT_RATE] = { .n = IST("bytes_out_rate"), .type = PROMEX_MT_GAUGE, .flags = PROMEX_FL_INFO_METRIC },
+ //[INF_DEBUG_COMMANDS_ISSUED] ignored
+ [INF_CUM_LOG_MSGS] = { .n = IST("recv_logs_total"), .type = PROMEX_MT_COUNTER, .flags = PROMEX_FL_INFO_METRIC },
+ [INF_BUILD_INFO] = { .n = IST("build_info"), .type = PROMEX_MT_GAUGE, .flags = PROMEX_FL_INFO_METRIC },
+};
+
+/* frontend/backend/server fields */
+const struct promex_metric promex_st_metrics[ST_F_TOTAL_FIELDS] = {
+ //[ST_F_PXNAME] ignored
+ //[ST_F_SVNAME] ignored
+ [ST_F_QCUR] = { .n = IST("current_queue"), .type = PROMEX_MT_GAUGE, .flags = ( PROMEX_FL_BACK_METRIC | PROMEX_FL_SRV_METRIC) },
+ [ST_F_QMAX] = { .n = IST("max_queue"), .type = PROMEX_MT_GAUGE, .flags = ( PROMEX_FL_BACK_METRIC | PROMEX_FL_SRV_METRIC) },
+ [ST_F_SCUR] = { .n = IST("current_sessions"), .type = PROMEX_MT_GAUGE, .flags = (PROMEX_FL_FRONT_METRIC | PROMEX_FL_LI_METRIC | PROMEX_FL_BACK_METRIC | PROMEX_FL_SRV_METRIC) },
+ [ST_F_SMAX] = { .n = IST("max_sessions"), .type = PROMEX_MT_GAUGE, .flags = (PROMEX_FL_FRONT_METRIC | PROMEX_FL_LI_METRIC | PROMEX_FL_BACK_METRIC | PROMEX_FL_SRV_METRIC) },
+ [ST_F_SLIM] = { .n = IST("limit_sessions"), .type = PROMEX_MT_GAUGE, .flags = (PROMEX_FL_FRONT_METRIC | PROMEX_FL_LI_METRIC | PROMEX_FL_BACK_METRIC | PROMEX_FL_SRV_METRIC) },
+ [ST_F_STOT] = { .n = IST("sessions_total"), .type = PROMEX_MT_COUNTER, .flags = (PROMEX_FL_FRONT_METRIC | PROMEX_FL_LI_METRIC | PROMEX_FL_BACK_METRIC | PROMEX_FL_SRV_METRIC) },
+ [ST_F_BIN] = { .n = IST("bytes_in_total"), .type = PROMEX_MT_COUNTER, .flags = (PROMEX_FL_FRONT_METRIC | PROMEX_FL_LI_METRIC | PROMEX_FL_BACK_METRIC | PROMEX_FL_SRV_METRIC) },
+ [ST_F_BOUT] = { .n = IST("bytes_out_total"), .type = PROMEX_MT_COUNTER, .flags = (PROMEX_FL_FRONT_METRIC | PROMEX_FL_LI_METRIC | PROMEX_FL_BACK_METRIC | PROMEX_FL_SRV_METRIC) },
+ [ST_F_DREQ] = { .n = IST("requests_denied_total"), .type = PROMEX_MT_COUNTER, .flags = (PROMEX_FL_FRONT_METRIC | PROMEX_FL_LI_METRIC | PROMEX_FL_BACK_METRIC ) },
+ [ST_F_DRESP] = { .n = IST("responses_denied_total"), .type = PROMEX_MT_COUNTER, .flags = (PROMEX_FL_FRONT_METRIC | PROMEX_FL_LI_METRIC | PROMEX_FL_BACK_METRIC | PROMEX_FL_SRV_METRIC) },
+ [ST_F_EREQ] = { .n = IST("request_errors_total"), .type = PROMEX_MT_COUNTER, .flags = (PROMEX_FL_FRONT_METRIC | PROMEX_FL_LI_METRIC ) },
+ [ST_F_ECON] = { .n = IST("connection_errors_total"), .type = PROMEX_MT_COUNTER, .flags = ( PROMEX_FL_BACK_METRIC | PROMEX_FL_SRV_METRIC) },
+ [ST_F_ERESP] = { .n = IST("response_errors_total"), .type = PROMEX_MT_COUNTER, .flags = ( PROMEX_FL_BACK_METRIC | PROMEX_FL_SRV_METRIC) },
+ [ST_F_WRETR] = { .n = IST("retry_warnings_total"), .type = PROMEX_MT_COUNTER, .flags = ( PROMEX_FL_BACK_METRIC | PROMEX_FL_SRV_METRIC) },
+ [ST_F_WREDIS] = { .n = IST("redispatch_warnings_total"), .type = PROMEX_MT_COUNTER, .flags = ( PROMEX_FL_BACK_METRIC | PROMEX_FL_SRV_METRIC) },
+ [ST_F_STATUS] = { .n = IST("status"), .type = PROMEX_MT_GAUGE, .flags = (PROMEX_FL_FRONT_METRIC | PROMEX_FL_LI_METRIC | PROMEX_FL_BACK_METRIC | PROMEX_FL_SRV_METRIC) },
+ [ST_F_WEIGHT] = { .n = IST("weight"), .type = PROMEX_MT_GAUGE, .flags = ( PROMEX_FL_BACK_METRIC | PROMEX_FL_SRV_METRIC) },
+ [ST_F_ACT] = { .n = IST("active_servers"), .type = PROMEX_MT_GAUGE, .flags = ( PROMEX_FL_BACK_METRIC ) },
+ [ST_F_BCK] = { .n = IST("backup_servers"), .type = PROMEX_MT_GAUGE, .flags = ( PROMEX_FL_BACK_METRIC ) },
+ [ST_F_CHKFAIL] = { .n = IST("check_failures_total"), .type = PROMEX_MT_COUNTER, .flags = ( PROMEX_FL_SRV_METRIC) },
+ [ST_F_CHKDOWN] = { .n = IST("check_up_down_total"), .type = PROMEX_MT_COUNTER, .flags = ( PROMEX_FL_BACK_METRIC | PROMEX_FL_SRV_METRIC) },
+ [ST_F_LASTCHG] = { .n = IST("check_last_change_seconds"), .type = PROMEX_MT_GAUGE, .flags = ( PROMEX_FL_BACK_METRIC | PROMEX_FL_SRV_METRIC) },
+ [ST_F_DOWNTIME] = { .n = IST("downtime_seconds_total"), .type = PROMEX_MT_COUNTER, .flags = ( PROMEX_FL_BACK_METRIC | PROMEX_FL_SRV_METRIC) },
+ [ST_F_QLIMIT] = { .n = IST("queue_limit"), .type = PROMEX_MT_GAUGE, .flags = ( PROMEX_FL_SRV_METRIC) },
+ //[ST_F_PID] ignored
+ //[ST_F_IID] ignored
+ //[ST_F_SID] ignored
+ [ST_F_THROTTLE] = { .n = IST("current_throttle"), .type = PROMEX_MT_GAUGE, .flags = ( PROMEX_FL_SRV_METRIC) },
+ [ST_F_LBTOT] = { .n = IST("loadbalanced_total"), .type = PROMEX_MT_COUNTER, .flags = ( PROMEX_FL_BACK_METRIC | PROMEX_FL_SRV_METRIC) },
+ //[ST_F_TRACKED] ignored
+ //[ST_F_TYPE] ignored
+ //[ST_F_RATE] ignored
+ [ST_F_RATE_LIM] = { .n = IST("limit_session_rate"), .type = PROMEX_MT_GAUGE, .flags = (PROMEX_FL_FRONT_METRIC ) },
+ [ST_F_RATE_MAX] = { .n = IST("max_session_rate"), .type = PROMEX_MT_GAUGE, .flags = (PROMEX_FL_FRONT_METRIC | PROMEX_FL_BACK_METRIC | PROMEX_FL_SRV_METRIC) },
+ [ST_F_CHECK_STATUS] = { .n = IST("check_status"), .type = PROMEX_MT_GAUGE, .flags = ( PROMEX_FL_SRV_METRIC) },
+ [ST_F_CHECK_CODE] = { .n = IST("check_code"), .type = PROMEX_MT_GAUGE, .flags = ( PROMEX_FL_SRV_METRIC) },
+ [ST_F_CHECK_DURATION] = { .n = IST("check_duration_seconds"), .type = PROMEX_MT_GAUGE, .flags = ( PROMEX_FL_SRV_METRIC) },
+ [ST_F_HRSP_1XX] = { .n = IST("http_responses_total"), .type = PROMEX_MT_COUNTER, .flags = (PROMEX_FL_FRONT_METRIC | PROMEX_FL_BACK_METRIC | PROMEX_FL_SRV_METRIC) },
+ [ST_F_HRSP_2XX] = { .n = IST("http_responses_total"), .type = PROMEX_MT_COUNTER, .flags = (PROMEX_FL_FRONT_METRIC | PROMEX_FL_BACK_METRIC | PROMEX_FL_SRV_METRIC) },
+ [ST_F_HRSP_3XX] = { .n = IST("http_responses_total"), .type = PROMEX_MT_COUNTER, .flags = (PROMEX_FL_FRONT_METRIC | PROMEX_FL_BACK_METRIC | PROMEX_FL_SRV_METRIC) },
+ [ST_F_HRSP_4XX] = { .n = IST("http_responses_total"), .type = PROMEX_MT_COUNTER, .flags = (PROMEX_FL_FRONT_METRIC | PROMEX_FL_BACK_METRIC | PROMEX_FL_SRV_METRIC) },
+ [ST_F_HRSP_5XX] = { .n = IST("http_responses_total"), .type = PROMEX_MT_COUNTER, .flags = (PROMEX_FL_FRONT_METRIC | PROMEX_FL_BACK_METRIC | PROMEX_FL_SRV_METRIC) },
+ [ST_F_HRSP_OTHER] = { .n = IST("http_responses_total"), .type = PROMEX_MT_COUNTER, .flags = (PROMEX_FL_FRONT_METRIC | PROMEX_FL_BACK_METRIC | PROMEX_FL_SRV_METRIC) },
+ //[ST_F_HANAFAIL] ignored
+ //[ST_F_REQ_RATE] ignored
+ [ST_F_REQ_RATE_MAX] = { .n = IST("http_requests_rate_max"), .type = PROMEX_MT_GAUGE, .flags = (PROMEX_FL_FRONT_METRIC ) },
+ [ST_F_REQ_TOT] = { .n = IST("http_requests_total"), .type = PROMEX_MT_COUNTER, .flags = (PROMEX_FL_FRONT_METRIC | PROMEX_FL_BACK_METRIC ) },
+ [ST_F_CLI_ABRT] = { .n = IST("client_aborts_total"), .type = PROMEX_MT_COUNTER, .flags = ( PROMEX_FL_BACK_METRIC | PROMEX_FL_SRV_METRIC) },
+ [ST_F_SRV_ABRT] = { .n = IST("server_aborts_total"), .type = PROMEX_MT_COUNTER, .flags = ( PROMEX_FL_BACK_METRIC | PROMEX_FL_SRV_METRIC) },
+ [ST_F_COMP_IN] = { .n = IST("http_comp_bytes_in_total"), .type = PROMEX_MT_COUNTER, .flags = (PROMEX_FL_FRONT_METRIC | PROMEX_FL_BACK_METRIC ) },
+ [ST_F_COMP_OUT] = { .n = IST("http_comp_bytes_out_total"), .type = PROMEX_MT_COUNTER, .flags = (PROMEX_FL_FRONT_METRIC | PROMEX_FL_BACK_METRIC ) },
+ [ST_F_COMP_BYP] = { .n = IST("http_comp_bytes_bypassed_total"), .type = PROMEX_MT_COUNTER, .flags = (PROMEX_FL_FRONT_METRIC | PROMEX_FL_BACK_METRIC ) },
+ [ST_F_COMP_RSP] = { .n = IST("http_comp_responses_total"), .type = PROMEX_MT_COUNTER, .flags = (PROMEX_FL_FRONT_METRIC | PROMEX_FL_BACK_METRIC ) },
+ [ST_F_LASTSESS] = { .n = IST("last_session_seconds"), .type = PROMEX_MT_GAUGE, .flags = ( PROMEX_FL_BACK_METRIC | PROMEX_FL_SRV_METRIC) },
+ //[ST_F_LAST_CHK] ignored
+ //[ST_F_LAST_AGT] ignored
+ [ST_F_QTIME] = { .n = IST("queue_time_average_seconds"), .type = PROMEX_MT_GAUGE, .flags = ( PROMEX_FL_BACK_METRIC | PROMEX_FL_SRV_METRIC) },
+ [ST_F_CTIME] = { .n = IST("connect_time_average_seconds"), .type = PROMEX_MT_GAUGE, .flags = ( PROMEX_FL_BACK_METRIC | PROMEX_FL_SRV_METRIC) },
+ [ST_F_RTIME] = { .n = IST("response_time_average_seconds"), .type = PROMEX_MT_GAUGE, .flags = ( PROMEX_FL_BACK_METRIC | PROMEX_FL_SRV_METRIC) },
+ [ST_F_TTIME] = { .n = IST("total_time_average_seconds"), .type = PROMEX_MT_GAUGE, .flags = ( PROMEX_FL_BACK_METRIC | PROMEX_FL_SRV_METRIC) },
+ //[ST_F_AGENT_STATUS] ignored
+ //[ST_F_AGENT_CODE] ignored
+ //[ST_F_AGENT_DURATION] ignored
+ //[ST_F_CHECK_DESC] ignored
+ //[ST_F_AGENT_DESC] ignored
+ //[ST_F_CHECK_RISE] ignored
+ //[ST_F_CHECK_FALL] ignored
+ //[ST_F_CHECK_HEALTH] ignored
+ //[ST_F_AGENT_RISE] ignored
+ //[ST_F_AGENT_FALL] ignored
+ //[ST_F_AGENT_HEALTH] ignored
+ //[ST_F_ADDR] ignored
+ //[ST_F_COOKIE] ignored
+ //[ST_F_MODE] ignored
+ //[ST_F_ALGO] ignored
+ //[ST_F_CONN_RATE] ignored
+ [ST_F_CONN_RATE_MAX] = { .n = IST("connections_rate_max"), .type = PROMEX_MT_GAUGE, .flags = (PROMEX_FL_FRONT_METRIC ) },
+ [ST_F_CONN_TOT] = { .n = IST("connections_total"), .type = PROMEX_MT_COUNTER, .flags = (PROMEX_FL_FRONT_METRIC ) },
+ [ST_F_INTERCEPTED] = { .n = IST("intercepted_requests_total"), .type = PROMEX_MT_COUNTER, .flags = (PROMEX_FL_FRONT_METRIC ) },
+ [ST_F_DCON] = { .n = IST("denied_connections_total"), .type = PROMEX_MT_COUNTER, .flags = (PROMEX_FL_FRONT_METRIC | PROMEX_FL_LI_METRIC ) },
+ [ST_F_DSES] = { .n = IST("denied_sessions_total"), .type = PROMEX_MT_COUNTER, .flags = (PROMEX_FL_FRONT_METRIC | PROMEX_FL_LI_METRIC ) },
+ [ST_F_WREW] = { .n = IST("failed_header_rewriting_total"), .type = PROMEX_MT_COUNTER, .flags = (PROMEX_FL_FRONT_METRIC | PROMEX_FL_LI_METRIC | PROMEX_FL_BACK_METRIC | PROMEX_FL_SRV_METRIC) },
+ [ST_F_CONNECT] = { .n = IST("connection_attempts_total"), .type = PROMEX_MT_COUNTER, .flags = ( PROMEX_FL_BACK_METRIC | PROMEX_FL_SRV_METRIC) },
+ [ST_F_REUSE] = { .n = IST("connection_reuses_total"), .type = PROMEX_MT_COUNTER, .flags = ( PROMEX_FL_BACK_METRIC | PROMEX_FL_SRV_METRIC) },
+ [ST_F_CACHE_LOOKUPS] = { .n = IST("http_cache_lookups_total"), .type = PROMEX_MT_COUNTER, .flags = (PROMEX_FL_FRONT_METRIC | PROMEX_FL_BACK_METRIC ) },
+ [ST_F_CACHE_HITS] = { .n = IST("http_cache_hits_total"), .type = PROMEX_MT_COUNTER, .flags = (PROMEX_FL_FRONT_METRIC | PROMEX_FL_BACK_METRIC ) },
+ [ST_F_SRV_ICUR] = { .n = IST("idle_connections_current"), .type = PROMEX_MT_GAUGE, .flags = ( PROMEX_FL_SRV_METRIC) },
+ [ST_F_SRV_ILIM] = { .n = IST("idle_connections_limit"), .type = PROMEX_MT_GAUGE, .flags = ( PROMEX_FL_SRV_METRIC) },
+ [ST_F_QT_MAX] = { .n = IST("max_queue_time_seconds"), .type = PROMEX_MT_GAUGE, .flags = ( PROMEX_FL_BACK_METRIC | PROMEX_FL_SRV_METRIC) },
+ [ST_F_CT_MAX] = { .n = IST("max_connect_time_seconds"), .type = PROMEX_MT_GAUGE, .flags = ( PROMEX_FL_BACK_METRIC | PROMEX_FL_SRV_METRIC) },
+ [ST_F_RT_MAX] = { .n = IST("max_response_time_seconds"), .type = PROMEX_MT_GAUGE, .flags = ( PROMEX_FL_BACK_METRIC | PROMEX_FL_SRV_METRIC) },
+ [ST_F_TT_MAX] = { .n = IST("max_total_time_seconds"), .type = PROMEX_MT_GAUGE, .flags = ( PROMEX_FL_BACK_METRIC | PROMEX_FL_SRV_METRIC) },
+ [ST_F_EINT] = { .n = IST("internal_errors_total"), .type = PROMEX_MT_COUNTER, .flags = (PROMEX_FL_FRONT_METRIC | PROMEX_FL_LI_METRIC | PROMEX_FL_BACK_METRIC | PROMEX_FL_SRV_METRIC) },
+ [ST_F_IDLE_CONN_CUR] = { .n = IST("unsafe_idle_connections_current"), .type = PROMEX_MT_GAUGE, .flags = ( PROMEX_FL_SRV_METRIC) },
+ [ST_F_SAFE_CONN_CUR] = { .n = IST("safe_idle_connections_current"), .type = PROMEX_MT_GAUGE, .flags = ( PROMEX_FL_SRV_METRIC) },
+ [ST_F_USED_CONN_CUR] = { .n = IST("used_connections_current"), .type = PROMEX_MT_GAUGE, .flags = ( PROMEX_FL_SRV_METRIC) },
+ [ST_F_NEED_CONN_EST] = { .n = IST("need_connections_current"), .type = PROMEX_MT_GAUGE, .flags = ( PROMEX_FL_SRV_METRIC) },
+ [ST_F_UWEIGHT] = { .n = IST("uweight"), .type = PROMEX_MT_GAUGE, .flags = ( PROMEX_FL_BACK_METRIC | PROMEX_FL_SRV_METRIC) },
+ [ST_F_AGG_SRV_CHECK_STATUS] = { .n = IST("agg_server_check_status"), .type = PROMEX_MT_GAUGE, .flags = ( PROMEX_FL_BACK_METRIC ) },
+ [ST_F_AGG_SRV_STATUS ] = { .n = IST("agg_server_status"), .type = PROMEX_MT_GAUGE, .flags = ( PROMEX_FL_BACK_METRIC ) },
+ [ST_F_AGG_CHECK_STATUS] = { .n = IST("agg_check_status"), .type = PROMEX_MT_GAUGE, .flags = ( PROMEX_FL_BACK_METRIC ) },
+};
+
+/* Description of overridden stats fields */
+const struct ist promex_st_metric_desc[ST_F_TOTAL_FIELDS] = {
+ [ST_F_STATUS] = IST("Current status of the service, per state label value."),
+ [ST_F_CHECK_STATUS] = IST("Status of last health check, per state label value."),
+ [ST_F_CHECK_CODE] = IST("layer5-7 code, if available of the last health check."),
+ [ST_F_CHECK_DURATION] = IST("Total duration of the latest server health check, in seconds."),
+ [ST_F_QTIME] = IST("Avg. queue time for last 1024 successful connections."),
+ [ST_F_CTIME] = IST("Avg. connect time for last 1024 successful connections."),
+ [ST_F_RTIME] = IST("Avg. response time for last 1024 successful connections."),
+ [ST_F_TTIME] = IST("Avg. total time for last 1024 successful connections."),
+ [ST_F_QT_MAX] = IST("Maximum observed time spent in the queue"),
+ [ST_F_CT_MAX] = IST("Maximum observed time spent waiting for a connection to complete"),
+ [ST_F_RT_MAX] = IST("Maximum observed time spent waiting for a server response"),
+ [ST_F_TT_MAX] = IST("Maximum observed total request+response time (request+queue+connect+response+processing)"),
+};
+
+/* stick table base fields */
+enum sticktable_field {
+ STICKTABLE_SIZE = 0,
+ STICKTABLE_USED,
+ /* must always be the last one */
+ STICKTABLE_TOTAL_FIELDS
+};
+
+const struct promex_metric promex_sticktable_metrics[STICKTABLE_TOTAL_FIELDS] = {
+ [STICKTABLE_SIZE] = { .n = IST("size"), .type = PROMEX_MT_GAUGE, .flags = PROMEX_FL_STICKTABLE_METRIC },
+ [STICKTABLE_USED] = { .n = IST("used"), .type = PROMEX_MT_GAUGE, .flags = PROMEX_FL_STICKTABLE_METRIC },
+};
+
+/* stick table base description */
+const struct ist promex_sticktable_metric_desc[STICKTABLE_TOTAL_FIELDS] = {
+ [STICKTABLE_SIZE] = IST("Stick table size."),
+ [STICKTABLE_USED] = IST("Number of entries used in this stick table."),
+};
+
+/* Specific labels for all ST_F_HRSP_* fields */
+const struct ist promex_hrsp_code[1 + ST_F_HRSP_OTHER - ST_F_HRSP_1XX] = {
+ [ST_F_HRSP_1XX - ST_F_HRSP_1XX] = IST("1xx"),
+ [ST_F_HRSP_2XX - ST_F_HRSP_1XX] = IST("2xx"),
+ [ST_F_HRSP_3XX - ST_F_HRSP_1XX] = IST("3xx"),
+ [ST_F_HRSP_4XX - ST_F_HRSP_1XX] = IST("4xx"),
+ [ST_F_HRSP_5XX - ST_F_HRSP_1XX] = IST("5xx"),
+ [ST_F_HRSP_OTHER - ST_F_HRSP_1XX] = IST("other"),
+};
+
+enum promex_front_state {
+ PROMEX_FRONT_STATE_DOWN = 0,
+ PROMEX_FRONT_STATE_UP,
+
+ PROMEX_FRONT_STATE_COUNT /* must be last */
+};
+
+const struct ist promex_front_st[PROMEX_FRONT_STATE_COUNT] = {
+ [PROMEX_FRONT_STATE_DOWN] = IST("DOWN"),
+ [PROMEX_FRONT_STATE_UP] = IST("UP"),
+};
+
+enum promex_back_state {
+ PROMEX_BACK_STATE_DOWN = 0,
+ PROMEX_BACK_STATE_UP,
+
+ PROMEX_BACK_STATE_COUNT /* must be last */
+};
+
+const struct ist promex_back_st[PROMEX_BACK_STATE_COUNT] = {
+ [PROMEX_BACK_STATE_DOWN] = IST("DOWN"),
+ [PROMEX_BACK_STATE_UP] = IST("UP"),
+};
+
+enum promex_srv_state {
+ PROMEX_SRV_STATE_DOWN = 0,
+ PROMEX_SRV_STATE_UP,
+ PROMEX_SRV_STATE_MAINT,
+ PROMEX_SRV_STATE_DRAIN,
+ PROMEX_SRV_STATE_NOLB,
+
+ PROMEX_SRV_STATE_COUNT /* must be last */
+};
+
+const struct ist promex_srv_st[PROMEX_SRV_STATE_COUNT] = {
+ [PROMEX_SRV_STATE_DOWN] = IST("DOWN"),
+ [PROMEX_SRV_STATE_UP] = IST("UP"),
+ [PROMEX_SRV_STATE_MAINT] = IST("MAINT"),
+ [PROMEX_SRV_STATE_DRAIN] = IST("DRAIN"),
+ [PROMEX_SRV_STATE_NOLB] = IST("NOLB"),
+};
+
+/* Return the server status. */
+enum promex_srv_state promex_srv_status(struct server *sv)
+{
+ int state = PROMEX_SRV_STATE_DOWN;
+
+ if (sv->cur_state == SRV_ST_RUNNING || sv->cur_state == SRV_ST_STARTING) {
+ state = PROMEX_SRV_STATE_UP;
+ if (sv->cur_admin & SRV_ADMF_DRAIN)
+ state = PROMEX_SRV_STATE_DRAIN;
+ }
+ else if (sv->cur_state == SRV_ST_STOPPING)
+ state = PROMEX_SRV_STATE_NOLB;
+
+ if (sv->cur_admin & SRV_ADMF_MAINT)
+ state = PROMEX_SRV_STATE_MAINT;
+
+ return state;
+}
+
+/* Convert a field to its string representation and write it in <out>, followed
+ * by a newline, if there is enough space. non-numeric value are converted in
+ * "NaN" because Prometheus only support numerical values (but it is unexepceted
+ * to process this kind of value). It returns 1 on success. Otherwise, it
+ * returns 0. The buffer's length must not exceed <max> value.
+ */
+static int promex_metric_to_str(struct buffer *out, struct field *f, size_t max)
+{
+ int ret = 0;
+
+ switch (field_format(f, 0)) {
+ case FF_EMPTY: ret = chunk_strcat(out, "NaN\n"); break;
+ case FF_S32: ret = chunk_appendf(out, "%d\n", f->u.s32); break;
+ case FF_U32: ret = chunk_appendf(out, "%u\n", f->u.u32); break;
+ case FF_S64: ret = chunk_appendf(out, "%lld\n", (long long)f->u.s64); break;
+ case FF_U64: ret = chunk_appendf(out, "%llu\n", (unsigned long long)f->u.u64); break;
+ case FF_FLT: ret = chunk_appendf(out, "%f\n", f->u.flt); break;
+ case FF_STR: ret = chunk_strcat(out, "NaN\n"); break;
+ default: ret = chunk_strcat(out, "NaN\n"); break;
+ }
+ if (!ret || out->data > max)
+ return 0;
+ return 1;
+}
+
+/* Dump the header lines for <metric>. It is its #HELP and #TYPE strings. It
+ * returns 1 on success. Otherwise, if <out> length exceeds <max>, it returns 0.
+ */
+static int promex_dump_metric_header(struct appctx *appctx, struct htx *htx,
+ const struct promex_metric *metric, const struct ist name,
+ struct ist *out, size_t max)
+{
+ struct promex_ctx *ctx = appctx->svcctx;
+ struct ist type;
+ struct ist desc;
+
+ switch (metric->type) {
+ case PROMEX_MT_COUNTER:
+ type = ist("counter");
+ break;
+ default:
+ type = ist("gauge");
+ }
+
+ if (istcat(out, ist("# HELP "), max) == -1 ||
+ istcat(out, name, max) == -1 ||
+ istcat(out, ist(" "), max) == -1)
+ goto full;
+
+ if (metric->flags & PROMEX_FL_INFO_METRIC)
+ desc = ist(info_fields[ctx->field_num].desc);
+ else if (metric->flags & PROMEX_FL_STICKTABLE_METRIC)
+ desc = promex_sticktable_metric_desc[ctx->field_num];
+ else if (!isttest(promex_st_metric_desc[ctx->field_num]))
+ desc = ist(stat_fields[ctx->field_num].desc);
+ else
+ desc = promex_st_metric_desc[ctx->field_num];
+
+ if (istcat(out, desc, max) == -1 ||
+ istcat(out, ist("\n# TYPE "), max) == -1 ||
+ istcat(out, name, max) == -1 ||
+ istcat(out, ist(" "), max) == -1 ||
+ istcat(out, type, max) == -1 ||
+ istcat(out, ist("\n"), max) == -1)
+ goto full;
+
+ return 1;
+
+ full:
+ return 0;
+}
+
+/* Dump the line for <metric>. It starts by the metric name followed by its
+ * labels (proxy name, server name...) between braces and finally its value. If
+ * not already done, the header lines are dumped first. It returns 1 on
+ * success. Otherwise if <out> length exceeds <max>, it returns 0.
+ */
+static int promex_dump_metric(struct appctx *appctx, struct htx *htx, struct ist prefix,
+ const struct promex_metric *metric, struct field *val,
+ struct promex_label *labels, struct ist *out, size_t max)
+{
+ struct ist name = { .ptr = (char[PROMEX_MAX_NAME_LEN]){ 0 }, .len = 0 };
+ struct promex_ctx *ctx = appctx->svcctx;
+ size_t len = out->len;
+
+ if (out->len + PROMEX_MAX_METRIC_LENGTH > max)
+ return 0;
+
+ /* Fill the metric name */
+ istcat(&name, prefix, PROMEX_MAX_NAME_LEN);
+ istcat(&name, metric->n, PROMEX_MAX_NAME_LEN);
+
+
+ if ((ctx->flags & PROMEX_FL_METRIC_HDR) &&
+ !promex_dump_metric_header(appctx, htx, metric, name, out, max))
+ goto full;
+
+ if (istcat(out, name, max) == -1)
+ goto full;
+
+ if (isttest(labels[0].name)) {
+ int i;
+
+ if (istcat(out, ist("{"), max) == -1)
+ goto full;
+
+ for (i = 0; isttest(labels[i].name); i++) {
+ if (!isttest(labels[i].value))
+ continue;
+
+ if ((i && istcat(out, ist(","), max) == -1) ||
+ istcat(out, labels[i].name, max) == -1 ||
+ istcat(out, ist("=\""), max) == -1 ||
+ istcat(out, labels[i].value, max) == -1 ||
+ istcat(out, ist("\""), max) == -1)
+ goto full;
+ }
+
+ if (istcat(out, ist("}"), max) == -1)
+ goto full;
+
+ }
+
+ if (istcat(out, ist(" "), max) == -1)
+ goto full;
+
+ trash.data = out->len;
+ if (!promex_metric_to_str(&trash, val, max))
+ goto full;
+ out->len = trash.data;
+
+ ctx->flags &= ~PROMEX_FL_METRIC_HDR;
+ return 1;
+ full:
+ // Restore previous length
+ out->len = len;
+ return 0;
+
+}
+
+
+/* Dump global metrics (prefixed by "haproxy_process_"). It returns 1 on success,
+ * 0 if <htx> is full and -1 in case of any error. */
+static int promex_dump_global_metrics(struct appctx *appctx, struct htx *htx)
+{
+ static struct ist prefix = IST("haproxy_process_");
+ struct promex_ctx *ctx = appctx->svcctx;
+ struct field val;
+ struct channel *chn = sc_ic(appctx_sc(appctx));
+ struct ist out = ist2(trash.area, 0);
+ size_t max = htx_get_max_blksz(htx, channel_htx_recv_max(chn, htx));
+ int ret = 1;
+
+ if (!stats_fill_info(info, INF_TOTAL_FIELDS, 0))
+ return -1;
+
+ for (; ctx->field_num < INF_TOTAL_FIELDS; ctx->field_num++) {
+ struct promex_label labels[PROMEX_MAX_LABELS-1] = {};
+
+ if (!(promex_global_metrics[ctx->field_num].flags & ctx->flags))
+ continue;
+
+ switch (ctx->field_num) {
+ case INF_BUILD_INFO:
+ labels[0].name = ist("version");
+ labels[0].value = ist(HAPROXY_VERSION);
+ val = mkf_u32(FN_GAUGE, 1);
+ break;
+
+ default:
+ val = info[ctx->field_num];
+ }
+
+ if (!promex_dump_metric(appctx, htx, prefix, &promex_global_metrics[ctx->field_num],
+ &val, labels, &out, max))
+ goto full;
+
+ ctx->flags |= PROMEX_FL_METRIC_HDR;
+ }
+
+ end:
+ if (out.len) {
+ if (!htx_add_data_atonce(htx, out))
+ return -1; /* Unexpected and unrecoverable error */
+ channel_add_input(chn, out.len);
+ }
+ return ret;
+ full:
+ ret = 0;
+ goto end;
+}
+
+/* Dump frontends metrics (prefixed by "haproxy_frontend_"). It returns 1 on success,
+ * 0 if <htx> is full and -1 in case of any error. */
+static int promex_dump_front_metrics(struct appctx *appctx, struct htx *htx)
+{
+ static struct ist prefix = IST("haproxy_frontend_");
+ struct promex_ctx *ctx = appctx->svcctx;
+ struct proxy *px;
+ struct field val;
+ struct channel *chn = sc_ic(appctx_sc(appctx));
+ struct ist out = ist2(trash.area, 0);
+ size_t max = htx_get_max_blksz(htx, channel_htx_recv_max(chn, htx));
+ struct field *stats = stat_l[STATS_DOMAIN_PROXY];
+ int ret = 1;
+ enum promex_front_state state;
+
+ for (;ctx->field_num < ST_F_TOTAL_FIELDS; ctx->field_num++) {
+ if (!(promex_st_metrics[ctx->field_num].flags & ctx->flags))
+ continue;
+
+ while (ctx->px) {
+ struct promex_label labels[PROMEX_MAX_LABELS-1] = {};
+
+ px = ctx->px;
+
+ labels[0].name = ist("proxy");
+ labels[0].value = ist2(px->id, strlen(px->id));
+
+ /* skip the disabled proxies, global frontend and non-networked ones */
+ if ((px->flags & PR_FL_DISABLED) || px->uuid <= 0 || !(px->cap & PR_CAP_FE))
+ goto next_px;
+
+ if (!stats_fill_fe_stats(px, stats, ST_F_TOTAL_FIELDS, &(ctx->field_num)))
+ return -1;
+
+ switch (ctx->field_num) {
+ case ST_F_STATUS:
+ state = !(px->flags & PR_FL_STOPPED);
+ for (; ctx->obj_state < PROMEX_FRONT_STATE_COUNT; ctx->obj_state++) {
+ labels[1].name = ist("state");
+ labels[1].value = promex_front_st[ctx->obj_state];
+ val = mkf_u32(FO_STATUS, state == ctx->obj_state);
+ if (!promex_dump_metric(appctx, htx, prefix, &promex_st_metrics[ctx->field_num],
+ &val, labels, &out, max))
+ goto full;
+ }
+ ctx->obj_state = 0;
+ goto next_px;
+ case ST_F_REQ_RATE_MAX:
+ case ST_F_REQ_TOT:
+ case ST_F_INTERCEPTED:
+ case ST_F_CACHE_LOOKUPS:
+ case ST_F_CACHE_HITS:
+ case ST_F_COMP_IN:
+ case ST_F_COMP_OUT:
+ case ST_F_COMP_BYP:
+ case ST_F_COMP_RSP:
+ if (px->mode != PR_MODE_HTTP)
+ goto next_px;
+ val = stats[ctx->field_num];
+ break;
+ case ST_F_HRSP_1XX:
+ case ST_F_HRSP_2XX:
+ case ST_F_HRSP_3XX:
+ case ST_F_HRSP_4XX:
+ case ST_F_HRSP_5XX:
+ case ST_F_HRSP_OTHER:
+ if (px->mode != PR_MODE_HTTP)
+ goto next_px;
+ if (ctx->field_num != ST_F_HRSP_1XX)
+ ctx->flags &= ~PROMEX_FL_METRIC_HDR;
+ labels[1].name = ist("code");
+ labels[1].value = promex_hrsp_code[ctx->field_num - ST_F_HRSP_1XX];
+ val = stats[ctx->field_num];
+ break;
+
+ default:
+ val = stats[ctx->field_num];
+ }
+
+ if (!promex_dump_metric(appctx, htx, prefix, &promex_st_metrics[ctx->field_num],
+ &val, labels, &out, max))
+ goto full;
+ next_px:
+ ctx->px = px->next;
+ }
+ ctx->flags |= PROMEX_FL_METRIC_HDR;
+ ctx->px = proxies_list;
+ }
+
+ end:
+ if (out.len) {
+ if (!htx_add_data_atonce(htx, out))
+ return -1; /* Unexpected and unrecoverable error */
+ channel_add_input(chn, out.len);
+ }
+ return ret;
+ full:
+ ret = 0;
+ goto end;
+}
+
+/* Dump listener metrics (prefixed by "haproxy_listen_"). It returns 1 on
+ * success, 0 if <htx> is full and -1 in case of any error. */
+static int promex_dump_listener_metrics(struct appctx *appctx, struct htx *htx)
+{
+ static struct ist prefix = IST("haproxy_listener_");
+ struct promex_ctx *ctx = appctx->svcctx;
+ struct proxy *px;
+ struct field val;
+ struct channel *chn = sc_ic(appctx_sc(appctx));
+ struct ist out = ist2(trash.area, 0);
+ size_t max = htx_get_max_blksz(htx, channel_htx_recv_max(chn, htx));
+ struct field *stats = stat_l[STATS_DOMAIN_PROXY];
+ struct listener *li;
+ int ret = 1;
+ enum li_status status;
+
+ for (;ctx->field_num < ST_F_TOTAL_FIELDS; ctx->field_num++) {
+ if (!(promex_st_metrics[ctx->field_num].flags & ctx->flags))
+ continue;
+
+ while (ctx->px) {
+ struct promex_label labels[PROMEX_MAX_LABELS-1] = {};
+
+ px = ctx->px;
+
+ labels[0].name = ist("proxy");
+ labels[0].value = ist2(px->id, strlen(px->id));
+
+ /* skip the disabled proxies, global frontend and non-networked ones */
+ if ((px->flags & PR_FL_DISABLED) || px->uuid <= 0 || !(px->cap & PR_CAP_FE))
+ goto next_px;
+
+ li = ctx->li;
+ list_for_each_entry_from(li, &px->conf.listeners, by_fe) {
+
+ if (!li->counters)
+ continue;
+
+ labels[1].name = ist("listener");
+ labels[1].value = ist2(li->name, strlen(li->name));
+
+ if (!stats_fill_li_stats(px, li, 0, stats,
+ ST_F_TOTAL_FIELDS, &(ctx->field_num)))
+ return -1;
+
+ switch (ctx->field_num) {
+ case ST_F_STATUS:
+ status = get_li_status(li);
+ for (; ctx->obj_state < LI_STATE_COUNT; ctx->obj_state++) {
+ val = mkf_u32(FO_STATUS, status == ctx->obj_state);
+ labels[2].name = ist("state");
+ labels[2].value = ist(li_status_st[ctx->obj_state]);
+ if (!promex_dump_metric(appctx, htx, prefix, &promex_st_metrics[ctx->field_num],
+ &val, labels, &out, max))
+ goto full;
+ }
+ ctx->obj_state = 0;
+ continue;
+ default:
+ val = stats[ctx->field_num];
+ }
+
+ if (!promex_dump_metric(appctx, htx, prefix,
+ &promex_st_metrics[ctx->field_num],
+ &val, labels, &out, max))
+ goto full;
+ }
+
+ next_px:
+ px = px->next;
+ ctx->px = px;
+ ctx->li = (px ? LIST_NEXT(&px->conf.listeners, struct listener *, by_fe) : NULL);
+ }
+ ctx->flags |= PROMEX_FL_METRIC_HDR;
+ ctx->px = proxies_list;
+ ctx->li = LIST_NEXT(&proxies_list->conf.listeners, struct listener *, by_fe);
+ }
+
+ end:
+ if (out.len) {
+ if (!htx_add_data_atonce(htx, out))
+ return -1; /* Unexpected and unrecoverable error */
+ channel_add_input(chn, out.len);
+ }
+ return ret;
+ full:
+ ctx->li = li;
+ ret = 0;
+ goto end;
+}
+
+/* Dump backends metrics (prefixed by "haproxy_backend_"). It returns 1 on success,
+ * 0 if <htx> is full and -1 in case of any error. */
+static int promex_dump_back_metrics(struct appctx *appctx, struct htx *htx)
+{
+ static struct ist prefix = IST("haproxy_backend_");
+ struct promex_ctx *ctx = appctx->svcctx;
+ struct proxy *px;
+ struct server *sv;
+ struct field val;
+ struct channel *chn = sc_ic(appctx_sc(appctx));
+ struct ist out = ist2(trash.area, 0);
+ size_t max = htx_get_max_blksz(htx, channel_htx_recv_max(chn, htx));
+ struct field *stats = stat_l[STATS_DOMAIN_PROXY];
+ int ret = 1;
+ double secs;
+ enum promex_back_state bkd_state;
+ enum promex_srv_state srv_state;
+ enum healthcheck_status srv_check_status;
+
+ for (;ctx->field_num < ST_F_TOTAL_FIELDS; ctx->field_num++) {
+ if (!(promex_st_metrics[ctx->field_num].flags & ctx->flags))
+ continue;
+
+ while (ctx->px) {
+ struct promex_label labels[PROMEX_MAX_LABELS-1] = {};
+ unsigned int srv_state_count[PROMEX_SRV_STATE_COUNT] = { 0 };
+ unsigned int srv_check_count[HCHK_STATUS_SIZE] = { 0 };
+ const char *check_state;
+
+ px = ctx->px;
+
+ labels[0].name = ist("proxy");
+ labels[0].value = ist2(px->id, strlen(px->id));
+
+ /* skip the disabled proxies, global frontend and non-networked ones */
+ if ((px->flags & PR_FL_DISABLED) || px->uuid <= 0 || !(px->cap & PR_CAP_BE))
+ goto next_px;
+
+ if (!stats_fill_be_stats(px, 0, stats, ST_F_TOTAL_FIELDS, &(ctx->field_num)))
+ return -1;
+
+ switch (ctx->field_num) {
+ case ST_F_AGG_SRV_CHECK_STATUS: // DEPRECATED
+ case ST_F_AGG_SRV_STATUS:
+ if (!px->srv)
+ goto next_px;
+ sv = px->srv;
+ while (sv) {
+ srv_state = promex_srv_status(sv);
+ srv_state_count[srv_state] += 1;
+ sv = sv->next;
+ }
+ for (; ctx->obj_state < PROMEX_SRV_STATE_COUNT; ctx->obj_state++) {
+ val = mkf_u32(FN_GAUGE, srv_state_count[ctx->obj_state]);
+ labels[1].name = ist("state");
+ labels[1].value = promex_srv_st[ctx->obj_state];
+ if (!promex_dump_metric(appctx, htx, prefix, &promex_st_metrics[ctx->field_num],
+ &val, labels, &out, max))
+ goto full;
+ }
+ ctx->obj_state = 0;
+ goto next_px;
+ case ST_F_AGG_CHECK_STATUS:
+ if (!px->srv)
+ goto next_px;
+ sv = px->srv;
+ while (sv) {
+ if ((sv->check.state & (CHK_ST_ENABLED|CHK_ST_PAUSED)) == CHK_ST_ENABLED) {
+ srv_check_status = sv->check.status;
+ srv_check_count[srv_check_status] += 1;
+ }
+ sv = sv->next;
+ }
+ for (; ctx->obj_state < HCHK_STATUS_SIZE; ctx->obj_state++) {
+ if (get_check_status_result(ctx->obj_state) < CHK_RES_FAILED)
+ continue;
+ val = mkf_u32(FO_STATUS, srv_check_count[ctx->obj_state]);
+ check_state = get_check_status_info(ctx->obj_state);
+ labels[1].name = ist("state");
+ labels[1].value = ist(check_state);
+ if (!promex_dump_metric(appctx, htx, prefix, &promex_st_metrics[ctx->field_num],
+ &val, labels, &out, max))
+ goto full;
+ }
+ ctx->obj_state = 0;
+ goto next_px;
+ case ST_F_STATUS:
+ bkd_state = ((px->lbprm.tot_weight > 0 || !px->srv) ? 1 : 0);
+ for (; ctx->obj_state < PROMEX_BACK_STATE_COUNT; ctx->obj_state++) {
+ labels[1].name = ist("state");
+ labels[1].value = promex_back_st[ctx->obj_state];
+ val = mkf_u32(FO_STATUS, bkd_state == ctx->obj_state);
+ if (!promex_dump_metric(appctx, htx, prefix, &promex_st_metrics[ctx->field_num],
+ &val, labels, &out, max))
+ goto full;
+ }
+ ctx->obj_state = 0;
+ goto next_px;
+ case ST_F_QTIME:
+ secs = (double)swrate_avg(px->be_counters.q_time, TIME_STATS_SAMPLES) / 1000.0;
+ val = mkf_flt(FN_AVG, secs);
+ break;
+ case ST_F_CTIME:
+ secs = (double)swrate_avg(px->be_counters.c_time, TIME_STATS_SAMPLES) / 1000.0;
+ val = mkf_flt(FN_AVG, secs);
+ break;
+ case ST_F_RTIME:
+ secs = (double)swrate_avg(px->be_counters.d_time, TIME_STATS_SAMPLES) / 1000.0;
+ val = mkf_flt(FN_AVG, secs);
+ break;
+ case ST_F_TTIME:
+ secs = (double)swrate_avg(px->be_counters.t_time, TIME_STATS_SAMPLES) / 1000.0;
+ val = mkf_flt(FN_AVG, secs);
+ break;
+ case ST_F_QT_MAX:
+ secs = (double)px->be_counters.qtime_max / 1000.0;
+ val = mkf_flt(FN_MAX, secs);
+ break;
+ case ST_F_CT_MAX:
+ secs = (double)px->be_counters.ctime_max / 1000.0;
+ val = mkf_flt(FN_MAX, secs);
+ break;
+ case ST_F_RT_MAX:
+ secs = (double)px->be_counters.dtime_max / 1000.0;
+ val = mkf_flt(FN_MAX, secs);
+ break;
+ case ST_F_TT_MAX:
+ secs = (double)px->be_counters.ttime_max / 1000.0;
+ val = mkf_flt(FN_MAX, secs);
+ break;
+ case ST_F_REQ_TOT:
+ case ST_F_CACHE_LOOKUPS:
+ case ST_F_CACHE_HITS:
+ case ST_F_COMP_IN:
+ case ST_F_COMP_OUT:
+ case ST_F_COMP_BYP:
+ case ST_F_COMP_RSP:
+ if (px->mode != PR_MODE_HTTP)
+ goto next_px;
+ val = stats[ctx->field_num];
+ break;
+ case ST_F_HRSP_1XX:
+ case ST_F_HRSP_2XX:
+ case ST_F_HRSP_3XX:
+ case ST_F_HRSP_4XX:
+ case ST_F_HRSP_5XX:
+ case ST_F_HRSP_OTHER:
+ if (px->mode != PR_MODE_HTTP)
+ goto next_px;
+ if (ctx->field_num != ST_F_HRSP_1XX)
+ ctx->flags &= ~PROMEX_FL_METRIC_HDR;
+ labels[1].name = ist("code");
+ labels[1].value = promex_hrsp_code[ctx->field_num - ST_F_HRSP_1XX];
+ val = stats[ctx->field_num];
+ break;
+
+ default:
+ val = stats[ctx->field_num];
+ }
+
+ if (!promex_dump_metric(appctx, htx, prefix, &promex_st_metrics[ctx->field_num],
+ &val, labels, &out, max))
+ goto full;
+ next_px:
+ ctx->px = px->next;
+ }
+ ctx->flags |= PROMEX_FL_METRIC_HDR;
+ ctx->px = proxies_list;
+ }
+
+ end:
+ if (out.len) {
+ if (!htx_add_data_atonce(htx, out))
+ return -1; /* Unexpected and unrecoverable error */
+ channel_add_input(chn, out.len);
+ }
+ return ret;
+ full:
+ ret = 0;
+ goto end;
+}
+
+/* Dump servers metrics (prefixed by "haproxy_server_"). It returns 1 on success,
+ * 0 if <htx> is full and -1 in case of any error. */
+static int promex_dump_srv_metrics(struct appctx *appctx, struct htx *htx)
+{
+ static struct ist prefix = IST("haproxy_server_");
+ struct promex_ctx *ctx = appctx->svcctx;
+ struct proxy *px;
+ struct server *sv;
+ struct field val;
+ struct channel *chn = sc_ic(appctx_sc(appctx));
+ struct ist out = ist2(trash.area, 0);
+ size_t max = htx_get_max_blksz(htx, channel_htx_recv_max(chn, htx));
+ struct field *stats = stat_l[STATS_DOMAIN_PROXY];
+ int ret = 1;
+ double secs;
+ enum promex_srv_state state;
+ const char *check_state;
+
+ for (;ctx->field_num < ST_F_TOTAL_FIELDS; ctx->field_num++) {
+ if (!(promex_st_metrics[ctx->field_num].flags & ctx->flags))
+ continue;
+
+ while (ctx->px) {
+ struct promex_label labels[PROMEX_MAX_LABELS-1] = {};
+
+ px = ctx->px;
+
+ labels[0].name = ist("proxy");
+ labels[0].value = ist2(px->id, strlen(px->id));
+
+ /* skip the disabled proxies, global frontend and non-networked ones */
+ if ((px->flags & PR_FL_DISABLED) || px->uuid <= 0 || !(px->cap & PR_CAP_BE))
+ goto next_px;
+
+ while (ctx->sv) {
+ sv = ctx->sv;
+
+ labels[1].name = ist("server");
+ labels[1].value = ist2(sv->id, strlen(sv->id));
+
+ if (!stats_fill_sv_stats(px, sv, 0, stats, ST_F_TOTAL_FIELDS, &(ctx->field_num)))
+ return -1;
+
+ if ((ctx->flags & PROMEX_FL_NO_MAINT_SRV) && (sv->cur_admin & SRV_ADMF_MAINT))
+ goto next_sv;
+
+ switch (ctx->field_num) {
+ case ST_F_STATUS:
+ state = promex_srv_status(sv);
+ for (; ctx->obj_state < PROMEX_SRV_STATE_COUNT; ctx->obj_state++) {
+ val = mkf_u32(FO_STATUS, state == ctx->obj_state);
+ labels[2].name = ist("state");
+ labels[2].value = promex_srv_st[ctx->obj_state];
+ if (!promex_dump_metric(appctx, htx, prefix, &promex_st_metrics[ctx->field_num],
+ &val, labels, &out, max))
+ goto full;
+ }
+ ctx->obj_state = 0;
+ goto next_sv;
+ case ST_F_QTIME:
+ secs = (double)swrate_avg(sv->counters.q_time, TIME_STATS_SAMPLES) / 1000.0;
+ val = mkf_flt(FN_AVG, secs);
+ break;
+ case ST_F_CTIME:
+ secs = (double)swrate_avg(sv->counters.c_time, TIME_STATS_SAMPLES) / 1000.0;
+ val = mkf_flt(FN_AVG, secs);
+ break;
+ case ST_F_RTIME:
+ secs = (double)swrate_avg(sv->counters.d_time, TIME_STATS_SAMPLES) / 1000.0;
+ val = mkf_flt(FN_AVG, secs);
+ break;
+ case ST_F_TTIME:
+ secs = (double)swrate_avg(sv->counters.t_time, TIME_STATS_SAMPLES) / 1000.0;
+ val = mkf_flt(FN_AVG, secs);
+ break;
+ case ST_F_QT_MAX:
+ secs = (double)sv->counters.qtime_max / 1000.0;
+ val = mkf_flt(FN_MAX, secs);
+ break;
+ case ST_F_CT_MAX:
+ secs = (double)sv->counters.ctime_max / 1000.0;
+ val = mkf_flt(FN_MAX, secs);
+ break;
+ case ST_F_RT_MAX:
+ secs = (double)sv->counters.dtime_max / 1000.0;
+ val = mkf_flt(FN_MAX, secs);
+ break;
+ case ST_F_TT_MAX:
+ secs = (double)sv->counters.ttime_max / 1000.0;
+ val = mkf_flt(FN_MAX, secs);
+ break;
+ case ST_F_CHECK_STATUS:
+ if ((sv->check.state & (CHK_ST_ENABLED|CHK_ST_PAUSED)) != CHK_ST_ENABLED)
+ goto next_sv;
+
+ for (; ctx->obj_state < HCHK_STATUS_SIZE; ctx->obj_state++) {
+ if (get_check_status_result(ctx->obj_state) < CHK_RES_FAILED)
+ continue;
+ val = mkf_u32(FO_STATUS, sv->check.status == ctx->obj_state);
+ check_state = get_check_status_info(ctx->obj_state);
+ labels[2].name = ist("state");
+ labels[2].value = ist(check_state);
+ if (!promex_dump_metric(appctx, htx, prefix, &promex_st_metrics[ctx->field_num],
+ &val, labels, &out, max))
+ goto full;
+ }
+ ctx->obj_state = 0;
+ goto next_sv;
+ case ST_F_CHECK_CODE:
+ if ((sv->check.state & (CHK_ST_ENABLED|CHK_ST_PAUSED)) != CHK_ST_ENABLED)
+ goto next_sv;
+ val = mkf_u32(FN_OUTPUT, (sv->check.status < HCHK_STATUS_L57DATA) ? 0 : sv->check.code);
+ break;
+ case ST_F_CHECK_DURATION:
+ if (sv->check.status < HCHK_STATUS_CHECKED)
+ goto next_sv;
+ secs = (double)sv->check.duration / 1000.0;
+ val = mkf_flt(FN_DURATION, secs);
+ break;
+ case ST_F_REQ_TOT:
+ if (px->mode != PR_MODE_HTTP)
+ goto next_px;
+ val = stats[ctx->field_num];
+ break;
+ case ST_F_HRSP_1XX:
+ case ST_F_HRSP_2XX:
+ case ST_F_HRSP_3XX:
+ case ST_F_HRSP_4XX:
+ case ST_F_HRSP_5XX:
+ case ST_F_HRSP_OTHER:
+ if (px->mode != PR_MODE_HTTP)
+ goto next_px;
+ if (ctx->field_num != ST_F_HRSP_1XX)
+ ctx->flags &= ~PROMEX_FL_METRIC_HDR;
+ labels[2].name = ist("code");
+ labels[2].value = promex_hrsp_code[ctx->field_num - ST_F_HRSP_1XX];
+ val = stats[ctx->field_num];
+ break;
+
+ default:
+ val = stats[ctx->field_num];
+ }
+
+ if (!promex_dump_metric(appctx, htx, prefix, &promex_st_metrics[ctx->field_num],
+ &val, labels, &out, max))
+ goto full;
+ next_sv:
+ ctx->sv = sv->next;
+ }
+
+ next_px:
+ ctx->px = px->next;
+ ctx->sv = (ctx->px ? ctx->px->srv : NULL);
+ }
+ ctx->flags |= PROMEX_FL_METRIC_HDR;
+ ctx->px = proxies_list;
+ ctx->sv = (ctx->px ? ctx->px->srv : NULL);
+ }
+
+
+ end:
+ if (out.len) {
+ if (!htx_add_data_atonce(htx, out))
+ return -1; /* Unexpected and unrecoverable error */
+ channel_add_input(chn, out.len);
+ }
+ return ret;
+ full:
+ ret = 0;
+ goto end;
+}
+
+/* Dump stick table metrics (prefixed by "haproxy_sticktable_"). It returns 1 on success,
+ * 0 if <htx> is full and -1 in case of any error. */
+static int promex_dump_sticktable_metrics(struct appctx *appctx, struct htx *htx)
+{
+ static struct ist prefix = IST("haproxy_sticktable_");
+ struct promex_ctx *ctx = appctx->svcctx;
+ struct field val;
+ struct channel *chn = sc_ic(appctx_sc(appctx));
+ struct ist out = ist2(trash.area, 0);
+ size_t max = htx_get_max_blksz(htx, channel_htx_recv_max(chn, htx));
+ int ret = 1;
+ struct stktable *t;
+
+ for (; ctx->field_num < STICKTABLE_TOTAL_FIELDS; ctx->field_num++) {
+ if (!(promex_sticktable_metrics[ctx->field_num].flags & ctx->flags))
+ continue;
+
+ while (ctx->st) {
+ struct promex_label labels[PROMEX_MAX_LABELS - 1] = {};
+
+ t = ctx->st;
+ if (!t->size)
+ goto next_px;
+
+ labels[0].name = ist("name");
+ labels[0].value = ist2(t->id, strlen(t->id));
+ labels[1].name = ist("type");
+ labels[1].value = ist2(stktable_types[t->type].kw, strlen(stktable_types[t->type].kw));
+ switch (ctx->field_num) {
+ case STICKTABLE_SIZE:
+ val = mkf_u32(FN_GAUGE, t->size);
+ break;
+ case STICKTABLE_USED:
+ val = mkf_u32(FN_GAUGE, t->current);
+ break;
+ default:
+ goto next_px;
+ }
+
+ if (!promex_dump_metric(appctx, htx, prefix,
+ &promex_sticktable_metrics[ctx->field_num],
+ &val, labels, &out, max))
+ goto full;
+
+ next_px:
+ ctx->st = t->next;
+ }
+ ctx->flags |= PROMEX_FL_METRIC_HDR;
+ ctx->st = stktables_list;
+ }
+
+ end:
+ if (out.len) {
+ if (!htx_add_data_atonce(htx, out))
+ return -1; /* Unexpected and unrecoverable error */
+ channel_add_input(chn, out.len);
+ }
+ return ret;
+ full:
+ ret = 0;
+ goto end;
+}
+
+/* Dump all metrics (global, frontends, backends and servers) depending on the
+ * dumper state (appctx->st1). It returns 1 on success, 0 if <htx> is full and
+ * -1 in case of any error.
+ * Uses <appctx.ctx.stats.px> as a pointer to the current proxy and <sv>/<li>
+ * as pointers to the current server/listener respectively.
+ */
+static int promex_dump_metrics(struct appctx *appctx, struct stconn *sc, struct htx *htx)
+{
+ struct promex_ctx *ctx = appctx->svcctx;
+ int ret;
+
+ switch (appctx->st1) {
+ case PROMEX_DUMPER_INIT:
+ ctx->px = NULL;
+ ctx->st = NULL;
+ ctx->li = NULL;
+ ctx->sv = NULL;
+ ctx->flags |= (PROMEX_FL_METRIC_HDR|PROMEX_FL_INFO_METRIC);
+ ctx->obj_state = 0;
+ ctx->field_num = INF_NAME;
+ appctx->st1 = PROMEX_DUMPER_GLOBAL;
+ __fallthrough;
+
+ case PROMEX_DUMPER_GLOBAL:
+ if (ctx->flags & PROMEX_FL_SCOPE_GLOBAL) {
+ ret = promex_dump_global_metrics(appctx, htx);
+ if (ret <= 0) {
+ if (ret == -1)
+ goto error;
+ goto full;
+ }
+ }
+
+ ctx->px = proxies_list;
+ ctx->st = NULL;
+ ctx->li = NULL;
+ ctx->sv = NULL;
+ ctx->flags &= ~PROMEX_FL_INFO_METRIC;
+ ctx->flags |= (PROMEX_FL_METRIC_HDR|PROMEX_FL_FRONT_METRIC);
+ ctx->obj_state = 0;
+ ctx->field_num = ST_F_PXNAME;
+ appctx->st1 = PROMEX_DUMPER_FRONT;
+ __fallthrough;
+
+ case PROMEX_DUMPER_FRONT:
+ if (ctx->flags & PROMEX_FL_SCOPE_FRONT) {
+ ret = promex_dump_front_metrics(appctx, htx);
+ if (ret <= 0) {
+ if (ret == -1)
+ goto error;
+ goto full;
+ }
+ }
+
+ ctx->px = proxies_list;
+ ctx->st = NULL;
+ ctx->li = LIST_NEXT(&proxies_list->conf.listeners, struct listener *, by_fe);
+ ctx->sv = NULL;
+ ctx->flags &= ~PROMEX_FL_FRONT_METRIC;
+ ctx->flags |= (PROMEX_FL_METRIC_HDR|PROMEX_FL_LI_METRIC);
+ ctx->obj_state = 0;
+ ctx->field_num = ST_F_PXNAME;
+ appctx->st1 = PROMEX_DUMPER_LI;
+ __fallthrough;
+
+ case PROMEX_DUMPER_LI:
+ if (ctx->flags & PROMEX_FL_SCOPE_LI) {
+ ret = promex_dump_listener_metrics(appctx, htx);
+ if (ret <= 0) {
+ if (ret == -1)
+ goto error;
+ goto full;
+ }
+ }
+
+ ctx->px = proxies_list;
+ ctx->st = NULL;
+ ctx->li = NULL;
+ ctx->sv = NULL;
+ ctx->flags &= ~PROMEX_FL_LI_METRIC;
+ ctx->flags |= (PROMEX_FL_METRIC_HDR|PROMEX_FL_BACK_METRIC);
+ ctx->obj_state = 0;
+ ctx->field_num = ST_F_PXNAME;
+ appctx->st1 = PROMEX_DUMPER_BACK;
+ __fallthrough;
+
+ case PROMEX_DUMPER_BACK:
+ if (ctx->flags & PROMEX_FL_SCOPE_BACK) {
+ ret = promex_dump_back_metrics(appctx, htx);
+ if (ret <= 0) {
+ if (ret == -1)
+ goto error;
+ goto full;
+ }
+ }
+
+ ctx->px = proxies_list;
+ ctx->st = NULL;
+ ctx->li = NULL;
+ ctx->sv = ctx->px ? ctx->px->srv : NULL;
+ ctx->flags &= ~PROMEX_FL_BACK_METRIC;
+ ctx->flags |= (PROMEX_FL_METRIC_HDR|PROMEX_FL_SRV_METRIC);
+ ctx->obj_state = 0;
+ ctx->field_num = ST_F_PXNAME;
+ appctx->st1 = PROMEX_DUMPER_SRV;
+ __fallthrough;
+
+ case PROMEX_DUMPER_SRV:
+ if (ctx->flags & PROMEX_FL_SCOPE_SERVER) {
+ ret = promex_dump_srv_metrics(appctx, htx);
+ if (ret <= 0) {
+ if (ret == -1)
+ goto error;
+ goto full;
+ }
+ }
+
+ ctx->px = NULL;
+ ctx->st = stktables_list;
+ ctx->li = NULL;
+ ctx->sv = NULL;
+ ctx->flags &= ~(PROMEX_FL_METRIC_HDR|PROMEX_FL_SRV_METRIC);
+ ctx->flags |= (PROMEX_FL_METRIC_HDR|PROMEX_FL_STICKTABLE_METRIC);
+ ctx->field_num = STICKTABLE_SIZE;
+ appctx->st1 = PROMEX_DUMPER_STICKTABLE;
+ __fallthrough;
+
+ case PROMEX_DUMPER_STICKTABLE:
+ if (ctx->flags & PROMEX_FL_SCOPE_STICKTABLE) {
+ ret = promex_dump_sticktable_metrics(appctx, htx);
+ if (ret <= 0) {
+ if (ret == -1)
+ goto error;
+ goto full;
+ }
+ }
+
+ ctx->px = NULL;
+ ctx->st = NULL;
+ ctx->li = NULL;
+ ctx->sv = NULL;
+ ctx->flags &= ~(PROMEX_FL_METRIC_HDR|PROMEX_FL_STICKTABLE_METRIC);
+ ctx->field_num = 0;
+ appctx->st1 = PROMEX_DUMPER_DONE;
+ __fallthrough;
+
+ case PROMEX_DUMPER_DONE:
+ default:
+ break;
+ }
+
+ return 1;
+
+ full:
+ sc_need_room(sc, channel_htx_recv_max(sc_ic(appctx_sc(appctx)), htx) + 1);
+ return 0;
+ error:
+ /* unrecoverable error */
+ ctx->px = NULL;
+ ctx->st = NULL;
+ ctx->li = NULL;
+ ctx->sv = NULL;
+ ctx->flags = 0;
+ ctx->field_num = 0;
+ appctx->st1 = PROMEX_DUMPER_DONE;
+ return -1;
+}
+
+/* Parse the query string of request URI to filter the metrics. It returns 1 on
+ * success and -1 on error. */
+static int promex_parse_uri(struct appctx *appctx, struct stconn *sc)
+{
+ struct promex_ctx *ctx = appctx->svcctx;
+ struct channel *req = sc_oc(sc);
+ struct channel *res = sc_ic(sc);
+ struct htx *req_htx, *res_htx;
+ struct htx_sl *sl;
+ char *p, *key, *value;
+ const char *end;
+ struct buffer *err;
+ int default_scopes = PROMEX_FL_SCOPE_ALL;
+ int len;
+
+ /* Get the query-string */
+ req_htx = htxbuf(&req->buf);
+ sl = http_get_stline(req_htx);
+ if (!sl)
+ goto error;
+ p = http_find_param_list(HTX_SL_REQ_UPTR(sl), HTX_SL_REQ_ULEN(sl), '?');
+ if (!p)
+ goto end;
+ end = HTX_SL_REQ_UPTR(sl) + HTX_SL_REQ_ULEN(sl);
+
+ /* copy the query-string */
+ len = end - p;
+ chunk_reset(&trash);
+ memcpy(trash.area, p, len);
+ trash.area[len] = 0;
+ p = trash.area;
+ end = trash.area + len;
+
+ /* Parse the query-string */
+ while (p < end && *p && *p != '#') {
+ value = NULL;
+
+ /* decode parameter name */
+ key = p;
+ while (p < end && *p != '=' && *p != '&' && *p != '#')
+ ++p;
+ /* found a value */
+ if (*p == '=') {
+ *(p++) = 0;
+ value = p;
+ }
+ else if (*p == '&')
+ *(p++) = 0;
+ else if (*p == '#')
+ *p = 0;
+ len = url_decode(key, 1);
+ if (len == -1)
+ goto error;
+
+ /* decode value */
+ if (value) {
+ while (p < end && *p != '=' && *p != '&' && *p != '#')
+ ++p;
+ if (*p == '=')
+ goto error;
+ if (*p == '&')
+ *(p++) = 0;
+ else if (*p == '#')
+ *p = 0;
+ len = url_decode(value, 1);
+ if (len == -1)
+ goto error;
+ }
+
+ if (strcmp(key, "scope") == 0) {
+ default_scopes = 0; /* at least a scope defined, unset default scopes */
+ if (!value)
+ goto error;
+ else if (*value == 0)
+ ctx->flags &= ~PROMEX_FL_SCOPE_ALL;
+ else if (*value == '*')
+ ctx->flags |= PROMEX_FL_SCOPE_ALL;
+ else if (strcmp(value, "global") == 0)
+ ctx->flags |= PROMEX_FL_SCOPE_GLOBAL;
+ else if (strcmp(value, "server") == 0)
+ ctx->flags |= PROMEX_FL_SCOPE_SERVER;
+ else if (strcmp(value, "backend") == 0)
+ ctx->flags |= PROMEX_FL_SCOPE_BACK;
+ else if (strcmp(value, "frontend") == 0)
+ ctx->flags |= PROMEX_FL_SCOPE_FRONT;
+ else if (strcmp(value, "listener") == 0)
+ ctx->flags |= PROMEX_FL_SCOPE_LI;
+ else if (strcmp(value, "sticktable") == 0)
+ ctx->flags |= PROMEX_FL_SCOPE_STICKTABLE;
+ else
+ goto error;
+ }
+ else if (strcmp(key, "no-maint") == 0)
+ ctx->flags |= PROMEX_FL_NO_MAINT_SRV;
+ }
+
+ end:
+ ctx->flags |= default_scopes;
+ return 1;
+
+ error:
+ err = &http_err_chunks[HTTP_ERR_400];
+ channel_erase(res);
+ res->buf.data = b_data(err);
+ memcpy(res->buf.area, b_head(err), b_data(err));
+ res_htx = htx_from_buf(&res->buf);
+ channel_add_input(res, res_htx->data);
+ return -1;
+}
+
+/* Send HTTP headers of the response. It returns 1 on success and 0 if <htx> is
+ * full. */
+static int promex_send_headers(struct appctx *appctx, struct stconn *sc, struct htx *htx)
+{
+ struct channel *chn = sc_ic(sc);
+ struct htx_sl *sl;
+ unsigned int flags;
+
+ flags = (HTX_SL_F_IS_RESP|HTX_SL_F_VER_11|HTX_SL_F_XFER_ENC|HTX_SL_F_XFER_LEN|HTX_SL_F_CHNK);
+ sl = htx_add_stline(htx, HTX_BLK_RES_SL, flags, ist("HTTP/1.1"), ist("200"), ist("OK"));
+ if (!sl)
+ goto full;
+ sl->info.res.status = 200;
+ if (!htx_add_header(htx, ist("Cache-Control"), ist("no-cache")) ||
+ !htx_add_header(htx, ist("Content-Type"), ist("text/plain; version=0.0.4")) ||
+ !htx_add_header(htx, ist("Transfer-Encoding"), ist("chunked")) ||
+ !htx_add_endof(htx, HTX_BLK_EOH))
+ goto full;
+
+ channel_add_input(chn, htx->data);
+ return 1;
+ full:
+ htx_reset(htx);
+ sc_need_room(sc, 0);
+ return 0;
+}
+
+/* The function returns 1 if the initialisation is complete, 0 if
+ * an errors occurs and -1 if more data are required for initializing
+ * the applet.
+ */
+static int promex_appctx_init(struct appctx *appctx)
+{
+ applet_reserve_svcctx(appctx, sizeof(struct promex_ctx));
+ appctx->st0 = PROMEX_ST_INIT;
+ return 0;
+}
+
+/* The main I/O handler for the promex applet. */
+static void promex_appctx_handle_io(struct appctx *appctx)
+{
+ struct stconn *sc = appctx_sc(appctx);
+ struct stream *s = __sc_strm(sc);
+ struct channel *req = sc_oc(sc);
+ struct channel *res = sc_ic(sc);
+ struct htx *req_htx, *res_htx;
+ int ret;
+
+ res_htx = htx_from_buf(&res->buf);
+
+ if (unlikely(se_fl_test(appctx->sedesc, (SE_FL_EOS|SE_FL_ERROR|SE_FL_SHR|SE_FL_SHW))))
+ goto out;
+
+ /* Check if the input buffer is available. */
+ if (!b_size(&res->buf)) {
+ sc_need_room(sc, 0);
+ goto out;
+ }
+
+ switch (appctx->st0) {
+ case PROMEX_ST_INIT:
+ ret = promex_parse_uri(appctx, sc);
+ if (ret <= 0) {
+ if (ret == -1)
+ goto error;
+ goto out;
+ }
+ appctx->st0 = PROMEX_ST_HEAD;
+ appctx->st1 = PROMEX_DUMPER_INIT;
+ __fallthrough;
+
+ case PROMEX_ST_HEAD:
+ if (!promex_send_headers(appctx, sc, res_htx))
+ goto out;
+ appctx->st0 = ((s->txn->meth == HTTP_METH_HEAD) ? PROMEX_ST_DONE : PROMEX_ST_DUMP);
+ __fallthrough;
+
+ case PROMEX_ST_DUMP:
+ ret = promex_dump_metrics(appctx, sc, res_htx);
+ if (ret <= 0) {
+ if (ret == -1)
+ goto error;
+ goto out;
+ }
+ appctx->st0 = PROMEX_ST_DONE;
+ __fallthrough;
+
+ case PROMEX_ST_DONE:
+ /* no more data are expected. If the response buffer is
+ * empty, be sure to add something (EOT block in this
+ * case) to have something to send. It is important to
+ * be sure the EOM flags will be handled by the
+ * endpoint.
+ */
+ if (htx_is_empty(res_htx)) {
+ if (!htx_add_endof(res_htx, HTX_BLK_EOT)) {
+ sc_need_room(sc, sizeof(struct htx_blk) + 1);
+ goto out;
+ }
+ channel_add_input(res, 1);
+ }
+ res_htx->flags |= HTX_FL_EOM;
+ se_fl_set(appctx->sedesc, SE_FL_EOI);
+ appctx->st0 = PROMEX_ST_END;
+ __fallthrough;
+
+ case PROMEX_ST_END:
+ se_fl_set(appctx->sedesc, SE_FL_EOS);
+ }
+
+ out:
+ htx_to_buf(res_htx, &res->buf);
+
+ /* eat the whole request */
+ if (co_data(req)) {
+ req_htx = htx_from_buf(&req->buf);
+ co_htx_skip(req, req_htx, co_data(req));
+ }
+ return;
+
+ error:
+ se_fl_set(appctx->sedesc, SE_FL_ERROR);
+ goto out;
+}
+
+struct applet promex_applet = {
+ .obj_type = OBJ_TYPE_APPLET,
+ .name = "<PROMEX>", /* used for logging */
+ .init = promex_appctx_init,
+ .fct = promex_appctx_handle_io,
+};
+
+static enum act_parse_ret service_parse_prometheus_exporter(const char **args, int *cur_arg, struct proxy *px,
+ struct act_rule *rule, char **err)
+{
+ /* Prometheus exporter service is only available on "http-request" rulesets */
+ if (rule->from != ACT_F_HTTP_REQ) {
+ memprintf(err, "Prometheus exporter service only available on 'http-request' rulesets");
+ return ACT_RET_PRS_ERR;
+ }
+
+ /* Add applet pointer in the rule. */
+ rule->applet = promex_applet;
+
+ return ACT_RET_PRS_OK;
+}
+static void promex_register_build_options(void)
+{
+ char *ptr = NULL;
+
+ memprintf(&ptr, "Built with the Prometheus exporter as a service");
+ hap_register_build_opts(ptr, 1);
+}
+
+
+static struct action_kw_list service_actions = { ILH, {
+ { "prometheus-exporter", service_parse_prometheus_exporter },
+ { /* END */ }
+}};
+
+INITCALL1(STG_REGISTER, service_keywords_register, &service_actions);
+INITCALL0(STG_REGISTER, promex_register_build_options);
diff --git a/addons/wurfl/dummy/Makefile b/addons/wurfl/dummy/Makefile
new file mode 100644
index 0000000..df08288
--- /dev/null
+++ b/addons/wurfl/dummy/Makefile
@@ -0,0 +1,13 @@
+# makefile for dummy wurfl library
+# builds shared library
+# installs it in /usr/lib/ with header file wurfl.h in /usr/include/wurfl
+#
+# install needs to be run as root
+
+build: libwurfl.a
+
+libwurfl.a: dummy-wurfl.o
+ ar rv $@ $<
+
+clean:
+ rm -rf *.a *.o
diff --git a/addons/wurfl/dummy/dummy-wurfl.c b/addons/wurfl/dummy/dummy-wurfl.c
new file mode 100644
index 0000000..0d5f068
--- /dev/null
+++ b/addons/wurfl/dummy/dummy-wurfl.c
@@ -0,0 +1,126 @@
+/*
+ * InFuze C API - HAPROXY Dummy library version of include
+ *
+ * Author : Paul Stephen Borile, Mon Apr 8, 2019
+ * Copyright (c) ScientiaMobile, Inc.
+ * http://www.scientiamobile.com
+ *
+ * This is a dummy implementation of the wurfl C API that builds and runs
+ * like the normal API simply without returning device detection data
+ *
+ *
+ */
+
+#include "wurfl/wurfl.h"
+
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+
+const char *wurfl_get_api_version(void)
+{
+ return "1.11.2.100"; // 100 indicates the dummy
+}
+
+wurfl_handle wurfl_create(void)
+{
+ return (void*) 0xbeffa;
+}
+
+void wurfl_destroy(wurfl_handle handle)
+{
+ return;
+}
+
+wurfl_error wurfl_set_root(wurfl_handle hwurfl, const char* root)
+{
+ return WURFL_OK;
+}
+wurfl_error wurfl_add_patch(wurfl_handle hwurfl, const char *patch)
+{
+ return WURFL_OK;
+}
+
+wurfl_error wurfl_add_requested_capability(wurfl_handle hwurfl, const char *requested_capability)
+{
+ return WURFL_OK;
+}
+
+const char *wurfl_get_error_message(wurfl_handle hwurfl)
+{
+ return "wurfl dummy library error message";
+}
+
+int wurfl_has_virtual_capability(wurfl_handle hwurfl, const char *virtual_capability)
+{
+ return 0;
+}
+
+wurfl_error wurfl_set_cache_provider(wurfl_handle hwurfl, wurfl_cache_provider cache_provider, const char *config)
+{
+ return WURFL_OK;
+}
+
+wurfl_error wurfl_load(wurfl_handle hwurfl)
+{
+ return WURFL_OK;
+}
+
+wurfl_device_handle wurfl_lookup(wurfl_handle hwurfl, wurfl_header_retrieve_callback header_retrieve_callback, const void *header_retrieve_callback_data)
+{
+ // call callback, on a probably existing header
+ const char *hvalue = header_retrieve_callback("User-Agent", header_retrieve_callback_data);
+ // and on a non existing one
+ hvalue = header_retrieve_callback("Non-Existing-Header", header_retrieve_callback_data);
+ (void)hvalue;
+ return (void *) 0xdeffa;
+}
+
+const char *wurfl_device_get_capability(wurfl_device_handle hwurfldevice, const char *capability)
+{
+ return "dummy_cap_val";
+}
+
+const char *wurfl_device_get_virtual_capability(wurfl_device_handle hwurfldevice, const char *capability)
+{
+ return "dummy_vcap_val";
+}
+
+void wurfl_device_destroy(wurfl_device_handle handle)
+{
+ return;
+}
+
+const char *wurfl_device_get_id(wurfl_device_handle hwurfldevice)
+{
+ return "generic_dummy_device";
+}
+
+const char *wurfl_device_get_root_id(wurfl_device_handle hwurfldevice)
+{
+ return "generic_dummy_device";
+}
+
+const char *wurfl_device_get_original_useragent(wurfl_device_handle hwurfldevice)
+{
+ return "original_useragent";
+}
+const char *wurfl_device_get_normalized_useragent(wurfl_device_handle hwurfldevice)
+{
+ return "normalized_useragent";
+}
+int wurfl_device_is_actual_device_root(wurfl_device_handle hwurfldevice)
+{
+ return 1;
+}
+
+const char *wurfl_get_wurfl_info(wurfl_handle hwurfl)
+{
+ return "dummy wurfl info";
+}
+
+const char *wurfl_get_last_load_time_as_string(wurfl_handle hwurfl)
+{
+ return "dummy wurfl last load time";
+}
+
+#pragma GCC diagnostic pop
diff --git a/addons/wurfl/dummy/wurfl/wurfl.h b/addons/wurfl/dummy/wurfl/wurfl.h
new file mode 100644
index 0000000..7659561
--- /dev/null
+++ b/addons/wurfl/dummy/wurfl/wurfl.h
@@ -0,0 +1,409 @@
+/*
+ * InFuze C API - HAPROXY Dummy library version of include
+ *
+ * Copyright (c) ScientiaMobile, Inc.
+ * http://www.scientiamobile.com
+ *
+ * This software package is the property of ScientiaMobile Inc. and is distributed under
+ * a dual licensing scheme:
+ *
+ * 1) commercially according to a contract between the Licensee and ScientiaMobile Inc. (Licensor).
+ * If you represent the Licensee, please refer to the licensing agreement which has been signed
+ * between the two parties. If you do not represent the Licensee, you are not authorized to use
+ * this software in any way.
+ *
+ * 2) LGPL when used in the context of the HAProxy project with the purpose of testing compatibility
+ * of HAProxy with ScientiaMobile software.
+ *
+ */
+
+#ifndef _WURFL_H_
+#define _WURFL_H_
+
+#include <time.h>
+
+#if defined (__GNUC__) || defined (__clang__)
+#define DEPRECATED __attribute__((deprecated))
+#elif defined(_MSC_VER)
+#define DEPRECATED __declspec(deprecated)
+#else
+#pragma message("WARNING: You need to implement DEPRECATED for this compiler")
+#define DEPRECATED
+#endif
+
+// WURFL error enumeration
+typedef enum {
+ WURFL_OK = 0, //!< no error
+ WURFL_ERROR_INVALID_HANDLE = 1, //!< handle passed to the function is invalid
+ WURFL_ERROR_ALREADY_LOAD = 2, //!< wurfl_load has already been invoked on the specific wurfl_handle
+ WURFL_ERROR_FILE_NOT_FOUND = 3, //!< file not found during wurfl_load or remote data file update
+ WURFL_ERROR_UNEXPECTED_END_OF_FILE = 4, //!< unexpected end of file or parsing error during wurfl_load
+ WURFL_ERROR_INPUT_OUTPUT_FAILURE = 5, //!< error reading stream during wurfl_load or updater accessing local updated data file
+ WURFL_ERROR_DEVICE_NOT_FOUND = 6, //!< specified device is missing
+ WURFL_ERROR_CAPABILITY_NOT_FOUND = 7, //!< specified capability is missing
+ WURFL_ERROR_INVALID_CAPABILITY_VALUE = 8, //!< invalid capability value
+ WURFL_ERROR_VIRTUAL_CAPABILITY_NOT_FOUND = 9, //!< specified virtual capability is missing
+ WURFL_ERROR_CANT_LOAD_CAPABILITY_NOT_FOUND = 10, //!< specified capability is missing
+ WURFL_ERROR_CANT_LOAD_VIRTUAL_CAPABILITY_NOT_FOUND = 11, //!< specified virtual capability is missing
+ WURFL_ERROR_EMPTY_ID = 12, //!< missing id in searching device
+ WURFL_ERROR_CAPABILITY_GROUP_NOT_FOUND = 13, //!< specified capability is missing in its group
+ WURFL_ERROR_CAPABILITY_GROUP_MISMATCH = 14, //!< specified capability mismatch in its group
+ WURFL_ERROR_DEVICE_ALREADY_DEFINED = 15, //!< specified device is already defined
+ WURFL_ERROR_USERAGENT_ALREADY_DEFINED = 16, //!< specified user agent is already defined
+ WURFL_ERROR_DEVICE_HIERARCHY_CIRCULAR_REFERENCE = 17, //!< circular reference in device hierarchy
+ WURFL_ERROR_UNKNOWN = 18, //!< unknown error
+ WURFL_ERROR_INVALID_USERAGENT_PRIORITY = 19, //!< specified override sideloaded browser user agent configuration not valid
+ WURFL_ERROR_INVALID_PARAMETER = 20, //!< invalid parameter
+ WURFL_ERROR_INVALID_CACHE_SIZE = 21, //!< specified an invalid cache size, 0 or a negative value.
+ WURFL_ERROR_XML_CONSISTENCY = 22, //!< WURFL data file is out of date or wrong - some needed device_id/capability is missing
+ WURFL_ERROR_INTERNAL = 23, //!< internal error. If this is an updater issue, please enable and check updater log using wurfl_updater_set_log_path()
+ WURFL_ERROR_VIRTUAL_CAPABILITY_NOT_AVAILABLE = 24, //!< the requested virtual capability has not been licensed
+ WURFL_ERROR_MISSING_USERAGENT = 25, // an XML device definition without mandatory UA has been detected
+ WURFL_ERROR_XML_PARSE = 26, // the XML data file is malformed
+ WURFL_ERROR_UPDATER_INVALID_DATA_URL = 27, // updater data URL is missing or invalid (note: only .zip and .gz formats allowed)
+ WURFL_ERROR_UPDATER_INVALID_LICENSE = 28, // client license is invalid, expired etc
+ WURFL_ERROR_UPDATER_NETWORK_ERROR = 29, // updater request returned an HTTP response != 200, or SSL error, etc. Please enable and check updater log using wurfl_updater_set_log_path()
+ WURFL_ERROR_ENGINE_NOT_INITIALIZED = 30, // prerequisite for executing an update is that the engine has been initialized (i.e., wurfl_load() has been called)
+ WURFL_ERROR_UPDATER_ALREADY_RUNNING = 31, // wurfl_updater_start() can be called just once, when the updater is not running
+ WURFL_ERROR_UPDATER_NOT_RUNNING = 32, // wurfl_updater_stop() can be called just once, when the updater is running
+ WURFL_ERROR_UPDATER_TOO_MANY_REQUESTS = 33, // Updater encountered HTTP 429 error
+ WURFL_ERROR_UPDATER_CMDLINE_DOWNLOADER_UNAVAILABLE = 34, // Curl executable not found. Please check path, etc
+ WURFL_ERROR_UPDATER_TIMEDOUT = 35, // Curl operation timed out.
+ WURFL_ERROR_ROOT_NOT_SET = 36, // set_root() must be called before any load() / reload() and update attempt
+ WURFL_ERROR_WRONG_ENGINE_TARGET = 37, // set_engine_target() was called with a wrong/unrecognized parameter
+ // new errors added in
+
+ WURFL_ERROR_CANNOT_FILTER_STATIC_CAP = 38,
+ WURFL_ENGINE_UNABLE_TO_ALLOCATE_MEMORY = 39,
+ WURFL_ENGINE_NOT_LOADED = 40,
+ WURFL_ERROR_UPDATER_CANNOT_START_THREAD = 41,
+ WURFL_ERROR_ENUM_EMPTY_SET = 42,
+
+ // update when adding errors
+ WURFL_ERROR_LAST = 43
+} wurfl_error;
+
+typedef enum {
+ WURFL_ENGINE_TARGET_HIGH_ACCURACY = 0,
+ WURFL_ENGINE_TARGET_HIGH_PERFORMANCE = 1,
+ WURFL_ENGINE_TARGET_DEFAULT = 2,
+ WURFL_ENGINE_TARGET_FAST_DESKTOP_BROWSER_MATCH = 3,
+} wurfl_engine_target;
+
+typedef enum {
+ WURFL_USERAGENT_PRIORITY_OVERRIDE_SIDELOADED_BROWSER_USERAGENT,
+ WURFL_USERAGENT_PRIORITY_USE_PLAIN_USERAGENT,
+ WURFL_USERAGENT_PRIORITY_INVALID,
+} wurfl_useragent_priority;
+
+typedef enum {
+ WURFL_CACHE_PROVIDER_NONE,
+ WURFL_CACHE_PROVIDER_LRU,
+ WURFL_CACHE_PROVIDER_DOUBLE_LRU,
+} wurfl_cache_provider;
+
+typedef enum {
+ WURFL_MATCH_TYPE_EXACT = 0,
+ WURFL_MATCH_TYPE_CONCLUSIVE = 1,
+ WURFL_MATCH_TYPE_RECOVERY = 2,
+ WURFL_MATCH_TYPE_CATCHALL = 3,
+ WURFL_MATCH_TYPE_HIGHPERFORMANCE = 4, // deprecated. See hereunder.
+ WURFL_MATCH_TYPE_NONE = 5,
+ WURFL_MATCH_TYPE_CACHED = 6,
+ WURFL_MATCH_TYPE_FAST_DESKTOP_BROWSER_MATCH = 7
+} wurfl_match_type;
+
+
+typedef enum {
+ WURFL_UPDATER_FREQ_DAILY = 0,
+ WURFL_UPDATER_FREQ_WEEKLY = 1,
+} wurfl_updater_frequency;
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+// typedef struct _we_h * wurfl_handle;
+// typedef struct _en_t * wurfl_enum_handle;
+// typedef struct _en_t * wurfl_device_capability_enumerator_handle;
+// typedef struct _en_t * wurfl_capability_enumerator_handle;
+// typedef struct _en_t * wurfl_device_id_enumerator_handle;
+// typedef struct _md_t * wurfl_device_handle;
+
+typedef void * wurfl_handle;
+typedef void * wurfl_enum_handle;
+typedef void * wurfl_device_capability_enumerator_handle;
+typedef void * wurfl_capability_enumerator_handle;
+typedef void * wurfl_device_id_enumerator_handle;
+typedef void * wurfl_device_handle;
+
+const char *wurfl_get_api_version(void);
+wurfl_handle wurfl_create(void);
+void wurfl_destroy(wurfl_handle handle);
+
+// NEW : enable/set api logfile
+wurfl_error wurfl_set_log_path(wurfl_handle hwurfl, const char *log_path);
+// allow writing user stuff on logs : mesg will be prepended by a "USER LOG :" string
+wurfl_error wurfl_log_print(wurfl_handle hwurfl, char *msg);
+
+// Errors
+
+const char *wurfl_get_error_message(wurfl_handle hwurfl);
+wurfl_error wurfl_get_error_code(wurfl_handle hwurfl);
+int wurfl_has_error_message(wurfl_handle hwurfl);
+// deprecated
+void wurfl_clear_error_message(wurfl_handle hwurfl);
+
+const char *wurfl_get_wurfl_info(wurfl_handle hwurfl);
+wurfl_error wurfl_set_root(wurfl_handle hwurfl, const char* root);
+wurfl_error wurfl_add_patch(wurfl_handle hwurfl, const char *patch);
+wurfl_error wurfl_add_requested_capability(wurfl_handle hwurfl, const char *requested_capability);
+DEPRECATED wurfl_error wurfl_set_engine_target(wurfl_handle hwurfl, wurfl_engine_target target);
+DEPRECATED wurfl_engine_target wurfl_get_engine_target(wurfl_handle hwurfl);
+DEPRECATED const char *wurfl_get_engine_target_as_string(wurfl_handle hwurfl);
+DEPRECATED wurfl_error wurfl_set_useragent_priority(wurfl_handle hwurfl, wurfl_useragent_priority useragent_priority);
+DEPRECATED wurfl_useragent_priority wurfl_get_useragent_priority(wurfl_handle hwurfl);
+DEPRECATED const char *wurfl_get_useragent_priority_as_string(wurfl_handle hwurfl);
+wurfl_error wurfl_set_cache_provider(wurfl_handle hwurfl, wurfl_cache_provider cache_provider, const char *config);
+wurfl_error wurfl_load(wurfl_handle hwurfl);
+struct tm *wurfl_get_last_load_time(wurfl_handle hwurfl);
+const char *wurfl_get_last_load_time_as_string(wurfl_handle hwurfl);
+int wurfl_has_capability(wurfl_handle hwurfl, const char *capability);
+int wurfl_has_virtual_capability(wurfl_handle hwurfl, const char *virtual_capability);
+
+/*
+ * enumerators
+ */
+
+/*
+ * new enumerators implementation
+ *
+ * a selector is used to indicate which enumerator we needed
+ WURFL_ENUM_VIRTUAL_CAPABILITIES, WURFL_ENUM_STATIC_CAPABILITIES, WURFL_ENUM_MANDATORY_CAPABILITIES, WURFL_ENUM_WURFLID,
+ */
+
+typedef enum {
+ WURFL_ENUM_STATIC_CAPABILITIES,
+ WURFL_ENUM_VIRTUAL_CAPABILITIES,
+ WURFL_ENUM_MANDATORY_CAPABILITIES,
+ WURFL_ENUM_WURFLID,
+} wurfl_enum_type;
+
+wurfl_enum_handle wurfl_enum_create(wurfl_handle, wurfl_enum_type);
+const char *wurfl_enum_get_name(wurfl_enum_handle handle);
+int wurfl_enum_is_valid(wurfl_enum_handle handle);
+void wurfl_enum_move_next(wurfl_enum_handle handle);
+void wurfl_enum_destroy(wurfl_enum_handle handle);
+
+/* deprecated enumerators */
+// virtual caps
+//DEPRECATED wurfl_capability_enumerator_handle wurfl_get_virtual_capability_enumerator(wurfl_handle hwurfl);
+wurfl_capability_enumerator_handle wurfl_get_virtual_capability_enumerator(wurfl_handle hwurfl);
+
+// all mandatories
+//DEPRECATED wurfl_capability_enumerator_handle wurfl_get_mandatory_capability_enumerator(wurfl_handle hwurfl);
+wurfl_capability_enumerator_handle wurfl_get_mandatory_capability_enumerator(wurfl_handle hwurfl);
+
+// all capabilities
+//DEPRECATED wurfl_capability_enumerator_handle wurfl_get_capability_enumerator(wurfl_handle hwurfl);
+wurfl_capability_enumerator_handle wurfl_get_capability_enumerator(wurfl_handle hwurfl);
+//DEPRECATED const char *wurfl_capability_enumerator_get_name(wurfl_capability_enumerator_handle hwurflcapabilityenumeratorhandle);
+const char *wurfl_capability_enumerator_get_name(wurfl_capability_enumerator_handle hwurflcapabilityenumeratorhandle);
+//DEPRECATED int wurfl_capability_enumerator_is_valid(wurfl_capability_enumerator_handle handle);
+int wurfl_capability_enumerator_is_valid(wurfl_capability_enumerator_handle handle);
+//DEPRECATED void wurfl_capability_enumerator_move_next(wurfl_capability_enumerator_handle handle);
+void wurfl_capability_enumerator_move_next(wurfl_capability_enumerator_handle handle);
+//DEPRECATED void wurfl_capability_enumerator_destroy(wurfl_capability_enumerator_handle handle);
+void wurfl_capability_enumerator_destroy(wurfl_capability_enumerator_handle handle);
+
+// device id enumerator
+//DEPRECATED wurfl_device_id_enumerator_handle wurfl_get_device_id_enumerator(wurfl_handle hwurfl);
+wurfl_device_id_enumerator_handle wurfl_get_device_id_enumerator(wurfl_handle hwurfl);
+//DEPRECATED const char *wurfl_device_id_enumerator_get_device_id(wurfl_device_id_enumerator_handle hwurfldeviceidenumeratorhandle);
+const char *wurfl_device_id_enumerator_get_device_id(wurfl_device_id_enumerator_handle hwurfldeviceidenumeratorhandle);
+//DEPRECATED int wurfl_device_id_enumerator_is_valid(wurfl_device_id_enumerator_handle handle);
+int wurfl_device_id_enumerator_is_valid(wurfl_device_id_enumerator_handle handle);
+//DEPRECATED void wurfl_device_id_enumerator_move_next(wurfl_device_id_enumerator_handle handle);
+void wurfl_device_id_enumerator_move_next(wurfl_device_id_enumerator_handle handle);
+//DEPRECATED void wurfl_device_id_enumerator_destroy(wurfl_device_id_enumerator_handle handle);
+void wurfl_device_id_enumerator_destroy(wurfl_device_id_enumerator_handle handle);
+
+/*
+ * deprecated device enumerators
+ */
+
+//DEPRECATED wurfl_device_capability_enumerator_handle wurfl_device_get_capability_enumerator(wurfl_device_handle hwurfldevice);
+wurfl_device_capability_enumerator_handle wurfl_device_get_capability_enumerator(wurfl_device_handle hwurfldevice);
+//DEPRECATED wurfl_device_capability_enumerator_handle wurfl_device_get_virtual_capability_enumerator(wurfl_device_handle hwurfldevice);
+wurfl_device_capability_enumerator_handle wurfl_device_get_virtual_capability_enumerator(wurfl_device_handle hwurfldevice);
+//DEPRECATED const char *wurfl_device_capability_enumerator_get_name(wurfl_device_capability_enumerator_handle);
+const char *wurfl_device_capability_enumerator_get_name(wurfl_device_capability_enumerator_handle);
+//DEPRECATED int wurfl_device_capability_enumerator_is_valid(wurfl_device_capability_enumerator_handle);
+int wurfl_device_capability_enumerator_is_valid(wurfl_device_capability_enumerator_handle);
+//DEPRECATED void wurfl_device_capability_enumerator_move_next(wurfl_device_capability_enumerator_handle);
+void wurfl_device_capability_enumerator_move_next(wurfl_device_capability_enumerator_handle);
+//DEPRECATED void wurfl_device_capability_enumerator_destroy(wurfl_device_capability_enumerator_handle);
+void wurfl_device_capability_enumerator_destroy(wurfl_device_capability_enumerator_handle);
+
+//DEPRECATED const char *wurfl_device_capability_enumerator_get_value(wurfl_device_capability_enumerator_handle);
+const char *wurfl_device_capability_enumerator_get_value(wurfl_device_capability_enumerator_handle);
+//DEPRECATED int wurfl_device_capability_enumerator_get_value_as_int(wurfl_device_capability_enumerator_handle hwurfldevicecapabilityenumeratorhandle);
+int wurfl_device_capability_enumerator_get_value_as_int(wurfl_device_capability_enumerator_handle hwurfldevicecapabilityenumeratorhandle);
+//DEPRECATED int wurfl_device_capability_enumerator_get_value_as_bool(wurfl_device_capability_enumerator_handle hwurfldevicecapabilityenumeratorhandle);
+int wurfl_device_capability_enumerator_get_value_as_bool(wurfl_device_capability_enumerator_handle hwurfldevicecapabilityenumeratorhandle);
+
+
+/*
+ * Device lookup methods
+ */
+
+typedef const char *(*wurfl_header_retrieve_callback)(const char *header_name, const void *callback_data);
+
+wurfl_device_handle wurfl_lookup(wurfl_handle hwurfl, wurfl_header_retrieve_callback header_retrieve_callback, const void *header_retrieve_callback_data);
+wurfl_device_handle wurfl_lookup_useragent(wurfl_handle hwurfl, const char *useragent);
+wurfl_device_handle wurfl_get_device(wurfl_handle hwurfl, const char *deviceid);
+wurfl_device_handle wurfl_get_device_with_headers(wurfl_handle hwurfl, const char *deviceid, wurfl_header_retrieve_callback header_retrieve_callback, const void *header_retrieve_callback_data);
+
+/*
+ * device related methods
+ */
+
+const char *wurfl_device_get_id(wurfl_device_handle hwurfldevice);
+const char *wurfl_device_get_root_id(wurfl_device_handle hwurfldevice);
+const char *wurfl_device_get_useragent(wurfl_device_handle hwurfldevice);
+const char *wurfl_device_get_original_useragent(wurfl_device_handle hwurfldevice);
+const char *wurfl_device_get_normalized_useragent(wurfl_device_handle hwurfldevice);
+int wurfl_device_is_actual_device_root(wurfl_device_handle hwurfldevice);
+wurfl_match_type wurfl_device_get_match_type(wurfl_device_handle hwurfldevice);
+const char *wurfl_device_get_matcher_name(wurfl_device_handle hwurfldevice);
+const char *wurfl_device_get_bucket_matcher_name(wurfl_device_handle hwurfldevice);
+void wurfl_device_destroy(wurfl_device_handle handle);
+
+
+/*
+ * static capability, virtual capability methods
+ */
+
+int wurfl_device_has_capability(wurfl_device_handle hwurfldevice, const char *capability);
+
+const char *wurfl_device_get_capability(wurfl_device_handle hwurfldevice, const char *capability);
+int wurfl_device_get_capability_as_int(wurfl_device_handle hwurfldevice, const char *capability);
+int wurfl_device_get_capability_as_bool(wurfl_device_handle hwurfldevice, const char *capability);
+
+int wurfl_device_has_virtual_capability(wurfl_device_handle hwurfldevice, const char *capability);
+
+const char *wurfl_device_get_virtual_capability(wurfl_device_handle hwurfldevice, const char *capability);
+int wurfl_device_get_virtual_capability_as_int(wurfl_device_handle hwurfldevice, const char *capability);
+int wurfl_device_get_virtual_capability_as_bool(wurfl_device_handle hwurfldevice, const char *capability);
+
+/*
+ * static capability, virtual capability NEW methods
+ */
+
+const char *wurfl_device_get_static_cap(wurfl_device_handle hwdev, const char *cap, wurfl_error *err);
+int wurfl_device_get_static_cap_as_int(wurfl_device_handle hwdev, const char *cap, wurfl_error *err);
+int wurfl_device_get_static_cap_as_bool(wurfl_device_handle hwdev, const char *cap, wurfl_error *err);
+
+const char *wurfl_device_get_virtual_cap(wurfl_device_handle hwdev, const char *vcap, wurfl_error *err);
+int wurfl_device_get_virtual_cap_as_int(wurfl_device_handle hwdev, const char *vcap, wurfl_error *err);
+int wurfl_device_get_virtual_cap_as_bool(wurfl_device_handle hwdev, const char *vcap, wurfl_error *err);
+
+/*
+ * Updater methods
+ */
+
+// Instruct the updater module to log to file any operation/error. If not used, the updater will not log anything.
+// Returns: WURLF_OK if no errors, WURFL_ERROR_INPUT_OUTPUT_FAILURE if the log file cannot be created (no write access rights?)
+// or if you try to reopen the log file anywhere else, i.e. this call can be made just once, any attempt to reopen a different log file will fail.
+wurfl_error wurfl_updater_set_log_path(wurfl_handle hwurfl, const char *log_path);
+
+// Set remote data file URL for downloading via internal updater. Will execute various validation tests
+// eventually returning WURFL_ERROR_UPDATER_XXX errors for various error conditions and logging detailed infos if
+// update logger is enabled.
+wurfl_error wurfl_updater_set_data_url(wurfl_handle hwurfl, const char *data_url);
+
+// Set the updater frequency of automatic updates. Will run a background task with given update frequency.
+wurfl_error wurfl_updater_set_data_frequency(wurfl_handle hwurfl, wurfl_updater_frequency freq);
+
+// Set updater timeouts.
+// There are two timeouts, both in milliseconds : connection timeout and operation timeout.
+// The values are mapped to CURL --connect-timeout and --max-time parameters
+// (after millisecs-to-secs conversion). Note that CURL sub millisecond timeouts don't work for
+// lack of a way to specify decimal values for timeout to curl (using 0.05 for example fails to work
+// on docker machines with "POSIX" locale installed.
+// Connection timeout has a default value of 10 seconds (10000 ms) and refers only to connection phase. Passing 0 will use CURL value "no timeout used".
+// Data transfer timeout has a default value of 600 seconds (600000 ms). Passing 0 will use CURL default value "no timeout used"
+// So, pass 0 to either parameter to set it to "no timeout used"
+// Pass -1 to either parameter to use default values (10 secs, 600 secs)
+// The specified timeouts (if any) are used just in the synchronous (i.e., wurfl_updater_runonce()) API call.
+// The asynchronous background updater always runs with default (CURL) timeouts (i.e., it will wait "as long as needed" for a new data file to be downloaded)
+wurfl_error wurfl_updater_set_data_url_timeouts(wurfl_handle hwurfl, int connection_timeout, int data_transfer_timeout);
+
+// Call a synchronous update. This is a blocking call and will execute the whole process
+// of downloading the new data file, checking for correctness, replacing the data file and restarting the engine.
+// Will keep all old configurations (patches, cache, etc)
+// Returns WURLF_OK if no errors,
+// or WURFL_ERROR_UPDATER_XXX errors for various error conditions, eventually logging detailed infos if
+// update logger is enabled.
+wurfl_error wurfl_updater_runonce(wurfl_handle hwurfl);
+
+// Start the asynchronous update thread. Can be called just once when the updater is stopped;
+// Subsequent/wrong calls will return WURFL_ERROR_UPDATER_ALREADY_RUNNING
+// Will also return WURFL_ERROR_UPDATER_XXX errors for various initialization error conditions (see above), eventually logging detailed infos if
+// update logger is enabled.
+// On success will return WURLF_OK
+wurfl_error wurfl_updater_start(wurfl_handle hwurfl);
+
+// Stop the asynchronous update thread. Can be called just once when the updater is started;
+// Subsequent/wrong calls will return WURFL_ERROR_UPDATER_NOT_RUNNING.
+// On success will return WURLF_OK
+wurfl_error wurfl_updater_stop(wurfl_handle hwurfl);
+
+// Reload and reboot the engine with the given data file. Basically, the same process of a wurfl_updater_runonce but without the file download.
+// Will synchronously load the new root testing for errors, restart the engine with the new data file and overwrite the old data file with the new one.
+// Will keep old configuration (patches, cache, etc)
+// Preconditions: wurfl_set_root() and wurfl_load() must have been called and the new root must be of the same kind (i.e, same extension) as the actual root
+// You can force a reload of the actual set_root() file passing NULL as the newroot
+wurfl_error wurfl_updater_reload_root(wurfl_handle hwurfl, const char *newroot);
+
+// Alternative API for passing headers to lookup functions
+
+// An opaque type representing a name/value headers map
+// You can create, fill and destroy this object directly.
+typedef struct _ih_h * wurfl_important_header_handle;
+wurfl_important_header_handle wurfl_important_header_create(wurfl_handle);
+wurfl_error wurfl_important_header_set(wurfl_important_header_handle, const char *name, const char *value);
+void wurfl_important_header_destroy(wurfl_important_header_handle);
+
+// Alternative lookup functions using the above wurfl_important_header_handle object.
+// Once called, you can destroy the wurfl_important_header_handle object. Headers values are cached internally in the wurfl_device_handle.
+wurfl_device_handle wurfl_lookup_with_important_header(wurfl_handle, wurfl_important_header_handle);
+wurfl_device_handle wurfl_get_device_with_important_header(wurfl_handle, const char *deviceid, wurfl_important_header_handle);
+
+// Enumerator of all headers that should be passed to a lookup function. Returns a null-termninated list of const char*
+//
+// Example usage:
+//
+// const char** importantHeadersNames = wurfl_get_important_header_names();
+// int i = 0;
+// while (importantHeadersNames[i])
+// {
+// printf("important header %i: %s\n", i, headerNames[i]);
+// i++;
+// }
+const char **wurfl_get_important_header_names(void);
+
+// classic WURFL iterator version of the enumerator hereabove.
+typedef void *wurfl_important_header_enumerator_handle;
+wurfl_important_header_enumerator_handle wurfl_get_important_header_enumerator(wurfl_handle hwurfl);
+void wurfl_important_header_enumerator_destroy(wurfl_important_header_enumerator_handle);
+const char *wurfl_important_header_enumerator_get_value(wurfl_important_header_enumerator_handle);
+int wurfl_important_header_enumerator_is_valid(wurfl_important_header_enumerator_handle);
+void wurfl_important_header_enumerator_move_next(wurfl_important_header_enumerator_handle);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif // _WURFL_H_
diff --git a/addons/wurfl/wurfl.c b/addons/wurfl/wurfl.c
new file mode 100644
index 0000000..4df6473
--- /dev/null
+++ b/addons/wurfl/wurfl.c
@@ -0,0 +1,779 @@
+#include <stdio.h>
+#include <stdarg.h>
+
+#include <import/ebmbtree.h>
+#include <import/ebsttree.h>
+
+#include <haproxy/api.h>
+#include <haproxy/arg.h>
+#include <haproxy/buf-t.h>
+#include <haproxy/cfgparse.h>
+#include <haproxy/chunk.h>
+#include <haproxy/errors.h>
+#include <haproxy/global.h>
+#include <haproxy/http_ana.h>
+#include <haproxy/http_fetch.h>
+#include <haproxy/http_htx.h>
+#include <haproxy/log.h>
+#include <haproxy/sample.h>
+#include <haproxy/tools.h>
+
+#include <wurfl/wurfl.h>
+
+static struct {
+ char *data_file; /* the WURFL data file */
+ char *cache_size; /* the WURFL cache parameters */
+ struct list patch_file_list; /* the list of WURFL patch file to use */
+ char information_list_separator; /* the separator used in request to separate values */
+ struct list information_list; /* the list of WURFL data to return into request */
+ void *handle; /* the handle to WURFL engine */
+ struct eb_root btree; /* btree containing info (name/type) on WURFL data to return */
+} global_wurfl = {
+ .data_file = NULL,
+ .cache_size = NULL,
+ .information_list_separator = ',',
+ .information_list = LIST_HEAD_INIT(global_wurfl.information_list),
+ .patch_file_list = LIST_HEAD_INIT(global_wurfl.patch_file_list),
+ .handle = NULL,
+};
+
+#ifdef WURFL_DEBUG
+inline static void ha_wurfl_log(char * message, ...)
+{
+ char logbuf[256];
+ va_list argp;
+
+ va_start(argp, message);
+ vsnprintf(logbuf, sizeof(logbuf), message, argp);
+ va_end(argp);
+ send_log(NULL, LOG_NOTICE, "%s", logbuf);
+}
+#else
+inline static void ha_wurfl_log(char * message, ...)
+{
+}
+#endif
+
+#define HA_WURFL_MAX_HEADER_LENGTH 1024
+
+typedef char *(*PROP_CALLBACK_FUNC)(wurfl_handle wHandle, wurfl_device_handle dHandle);
+
+enum wurfl_data_type {
+ HA_WURFL_DATA_TYPE_UNKNOWN = 0,
+ HA_WURFL_DATA_TYPE_CAP = 100,
+ HA_WURFL_DATA_TYPE_VCAP = 200,
+ HA_WURFL_DATA_TYPE_PROPERTY = 300
+};
+
+typedef struct {
+ char *name;
+ enum wurfl_data_type type;
+ PROP_CALLBACK_FUNC func_callback;
+ struct ebmb_node nd;
+} wurfl_data_t;
+
+static const char HA_WURFL_MODULE_VERSION[] = "2.0";
+static const char HA_WURFL_ISDEVROOT_FALSE[] = "FALSE";
+static const char HA_WURFL_ISDEVROOT_TRUE[] = "TRUE";
+
+static const char HA_WURFL_DATA_TYPE_UNKNOWN_STRING[] = "unknown";
+static const char HA_WURFL_DATA_TYPE_CAP_STRING[] = "capability";
+static const char HA_WURFL_DATA_TYPE_VCAP_STRING[] = "virtual_capability";
+static const char HA_WURFL_DATA_TYPE_PROPERTY_STRING[] = "property";
+
+static const char *ha_wurfl_retrieve_header(const char *header_name, const void *wh);
+static const char *ha_wurfl_get_wurfl_root_id (wurfl_handle wHandle, wurfl_device_handle dHandle);
+static const char *ha_wurfl_get_wurfl_id (wurfl_handle wHandle, wurfl_device_handle dHandle);
+static const char *ha_wurfl_get_wurfl_isdevroot (wurfl_handle wHandle, wurfl_device_handle dHandle);
+static const char *ha_wurfl_get_wurfl_useragent (wurfl_handle wHandle, wurfl_device_handle dHandle);
+static const char *ha_wurfl_get_wurfl_api_version (wurfl_handle wHandle, wurfl_device_handle dHandle);
+static const char *ha_wurfl_get_wurfl_engine_target (wurfl_handle wHandle, wurfl_device_handle dHandle);
+static const char *ha_wurfl_get_wurfl_info (wurfl_handle wHandle, wurfl_device_handle dHandle);
+static const char *ha_wurfl_get_wurfl_last_load_time (wurfl_handle wHandle, wurfl_device_handle dHandle);
+static const char *ha_wurfl_get_wurfl_normalized_useragent (wurfl_handle wHandle, wurfl_device_handle dHandle);
+static const char *ha_wurfl_get_wurfl_useragent_priority (wurfl_handle wHandle, wurfl_device_handle dHandle);
+static const char *(*ha_wurfl_get_property_callback(char *name)) (wurfl_handle wHandle, wurfl_device_handle dHandle);
+
+// ordered property=>function map, suitable for binary search
+static const struct {
+ const char *name;
+ const char *(*func)(wurfl_handle wHandle, wurfl_device_handle dHandle);
+} wurfl_properties_function_map [] = {
+ {"wurfl_api_version", ha_wurfl_get_wurfl_api_version},
+ {"wurfl_engine_target", ha_wurfl_get_wurfl_engine_target}, // kept for backward conf file compat
+ {"wurfl_id", ha_wurfl_get_wurfl_id },
+ {"wurfl_info", ha_wurfl_get_wurfl_info },
+ {"wurfl_isdevroot", ha_wurfl_get_wurfl_isdevroot},
+ {"wurfl_last_load_time", ha_wurfl_get_wurfl_last_load_time},
+ {"wurfl_normalized_useragent", ha_wurfl_get_wurfl_normalized_useragent},
+ {"wurfl_root_id", ha_wurfl_get_wurfl_root_id},
+ {"wurfl_useragent", ha_wurfl_get_wurfl_useragent},
+ {"wurfl_useragent_priority", ha_wurfl_get_wurfl_useragent_priority }, // kept for backward conf file compat
+};
+static const int HA_WURFL_PROPERTIES_NBR = 10;
+
+typedef struct {
+ struct list list;
+ wurfl_data_t data;
+} wurfl_information_t;
+
+typedef struct {
+ struct list list;
+ char *patch_file_path;
+} wurfl_patches_t;
+
+typedef struct {
+ struct sample *wsmp;
+ char header_value[HA_WURFL_MAX_HEADER_LENGTH + 1];
+} ha_wurfl_header_t;
+
+/*
+ * configuration parameters parsing functions
+ */
+static int ha_wurfl_cfg_data_file(char **args, int section_type, struct proxy *curpx,
+ const struct proxy *defpx, const char *file, int line,
+ char **err)
+{
+
+ if (*(args[1]) == 0) {
+ memprintf(err, "WURFL: %s expects a value.\n", args[0]);
+ return -1;
+ }
+
+ global_wurfl.data_file = strdup(args[1]);
+ return 0;
+}
+
+static int ha_wurfl_cfg_cache(char **args, int section_type, struct proxy *curpx,
+ const struct proxy *defpx, const char *file, int line,
+ char **err)
+{
+ if (*(args[1]) == 0) {
+ memprintf(err, "WURFL: %s expects a value.\n", args[0]);
+ return -1;
+ }
+
+ global_wurfl.cache_size = strdup(args[1]);
+ return 0;
+}
+
+static int ha_wurfl_cfg_engine_mode(char **args, int section_type, struct proxy *curpx,
+ const struct proxy *defpx, const char *file, int line,
+ char **err)
+{
+ // kept for backward conf file compat
+ return 0;
+}
+
+static int ha_wurfl_cfg_information_list_separator(char **args, int section_type, struct proxy *curpx,
+ const struct proxy *defpx, const char *file, int line,
+ char **err)
+{
+ if (*(args[1]) == 0) {
+ memprintf(err, "WURFL: %s expects a single character.\n", args[0]);
+ return -1;
+ }
+
+ if (strlen(args[1]) > 1) {
+ memprintf(err, "WURFL: %s expects a single character, got %s.\n", args[0], args[1]);
+ return -1;
+ }
+
+ global_wurfl.information_list_separator = *args[1];
+ return 0;
+}
+
+static int ha_wurfl_cfg_information_list(char **args, int section_type, struct proxy *curpx,
+ const struct proxy *defpx, const char *file, int line,
+ char **err)
+{
+ int argIdx = 1;
+ wurfl_information_t *wi;
+
+ if (*(args[argIdx]) == 0) {
+ memprintf(err, "WURFL: %s expects a value.\n", args[0]);
+ return -1;
+ }
+
+ while (*(args[argIdx])) {
+ wi = calloc(1, sizeof(*wi));
+
+ if (wi == NULL) {
+ memprintf(err, "WURFL: Error allocating memory for %s element.\n", args[0]);
+ return -1;
+ }
+
+ wi->data.name = strdup(args[argIdx]);
+ wi->data.type = HA_WURFL_DATA_TYPE_UNKNOWN;
+ wi->data.func_callback = NULL;
+ LIST_APPEND(&global_wurfl.information_list, &wi->list);
+ ++argIdx;
+ }
+
+ return 0;
+}
+
+static int ha_wurfl_cfg_patch_file_list(char **args, int section_type, struct proxy *curpx,
+ const struct proxy *defpx, const char *file, int line,
+ char **err)
+{
+ int argIdx = 1;
+ wurfl_patches_t *wp;
+
+ if (*(args[argIdx]) == 0) {
+ memprintf(err, "WURFL: %s expects a value.\n", args[0]);
+ return -1;
+ }
+
+ while (*(args[argIdx])) {
+ wp = calloc(1, sizeof(*wp));
+
+ if (wp == NULL) {
+ memprintf(err, "WURFL: Error allocating memory for %s element.\n", args[0]);
+ return -1;
+ }
+
+ wp->patch_file_path = strdup(args[argIdx]);
+ LIST_APPEND(&global_wurfl.patch_file_list, &wp->list);
+ ++argIdx;
+ }
+
+ return 0;
+}
+
+static int ha_wurfl_cfg_useragent_priority(char **args, int section_type, struct proxy *curpx,
+ const struct proxy *defpx, const char *file, int line,
+ char **err)
+{
+ // this feature is deprecated, keeping only not to break compatibility
+ // with old configuration files.
+ return 0;
+}
+
+/*
+ * module init / deinit functions. Returns 0 if OK, or a combination of ERR_*.
+ */
+
+static int ha_wurfl_init(void)
+{
+ wurfl_information_t *wi;
+ wurfl_patches_t *wp;
+ wurfl_data_t * wn;
+ int wurfl_result_code = WURFL_OK;
+ int len;
+
+ // wurfl-data-file not configured, WURFL is not used so don't try to
+ // configure it.
+ if (global_wurfl.data_file == NULL)
+ return ERR_NONE;
+
+ ha_notice("WURFL: Loading module v.%s\n", HA_WURFL_MODULE_VERSION);
+ // creating WURFL handler
+ global_wurfl.handle = wurfl_create();
+
+ if (global_wurfl.handle == NULL) {
+ ha_warning("WURFL: Engine handler creation failed\n");
+ return ERR_WARN;
+ }
+
+ ha_notice("WURFL: Engine handler created - API version %s\n", wurfl_get_api_version() );
+
+ // set wurfl data file
+ if (wurfl_set_root(global_wurfl.handle, global_wurfl.data_file) != WURFL_OK) {
+ ha_warning("WURFL: Engine setting root file failed - %s\n", wurfl_get_error_message(global_wurfl.handle));
+ return ERR_WARN;
+ }
+
+ ha_notice("WURFL: Engine root file set to %s\n", global_wurfl.data_file);
+ // just a log to inform which separator char has to be used
+ ha_notice("WURFL: Information list separator set to '%c'\n", global_wurfl.information_list_separator);
+
+ // load wurfl data needed ( and filter whose are supposed to be capabilities )
+ if (LIST_ISEMPTY(&global_wurfl.information_list)) {
+ ha_warning("WURFL: missing wurfl-information-list parameter in global configuration\n");
+ return ERR_WARN;
+ } else {
+ // ebtree initialization
+ global_wurfl.btree = EB_ROOT;
+
+ // checking if information is valid WURFL data ( cap, vcaps, properties )
+ list_for_each_entry(wi, &global_wurfl.information_list, list) {
+ // check if information is already loaded looking into btree
+ if (ebst_lookup(&global_wurfl.btree, wi->data.name) == NULL) {
+ if ((wi->data.func_callback = (PROP_CALLBACK_FUNC) ha_wurfl_get_property_callback(wi->data.name)) != NULL) {
+ wi->data.type = HA_WURFL_DATA_TYPE_PROPERTY;
+#ifdef WURFL_DEBUG
+ ha_notice("WURFL: [%s] is a valid wurfl data [property]\n",wi->data.name);
+#endif
+ } else if (wurfl_has_virtual_capability(global_wurfl.handle, wi->data.name)) {
+ wi->data.type = HA_WURFL_DATA_TYPE_VCAP;
+#ifdef WURFL_DEBUG
+ ha_notice("WURFL: [%s] is a valid wurfl data [virtual capability]\n",wi->data.name);
+#endif
+ } else {
+ // by default a cap type is assumed to be and we control it on engine load
+ wi->data.type = HA_WURFL_DATA_TYPE_CAP;
+
+ if (wurfl_add_requested_capability(global_wurfl.handle, wi->data.name) != WURFL_OK) {
+ ha_warning("WURFL: capability filtering failed - %s\n", wurfl_get_error_message(global_wurfl.handle));
+ return ERR_WARN;
+ }
+
+ ha_notice("WURFL: [%s] treated as wurfl capability. Will check its validity later, on engine load\n",wi->data.name);
+ }
+
+ // ebtree insert here
+ len = strlen(wi->data.name);
+
+ wn = malloc(sizeof(wurfl_data_t) + len + 1);
+
+ if (wn == NULL) {
+ ha_warning("WURFL: Error allocating memory for information tree element.\n");
+ return ERR_WARN;
+ }
+
+ wn->name = wi->data.name;
+ wn->type = wi->data.type;
+ wn->func_callback = wi->data.func_callback;
+ memcpy(wn->nd.key, wi->data.name, len);
+ wn->nd.key[len] = 0;
+
+ if (!ebst_insert(&global_wurfl.btree, &wn->nd)) {
+ ha_warning("WURFL: [%s] not inserted in btree\n",wn->name);
+ return ERR_WARN;
+ }
+
+ } else {
+#ifdef WURFL_DEBUG
+ ha_notice("WURFL: [%s] already loaded\n",wi->data.name);
+#endif
+ }
+
+ }
+
+ }
+
+
+ // adding WURFL patches if needed
+ if (!LIST_ISEMPTY(&global_wurfl.patch_file_list)) {
+
+ list_for_each_entry(wp, &global_wurfl.patch_file_list, list) {
+ if (wurfl_add_patch(global_wurfl.handle, wp->patch_file_path) != WURFL_OK) {
+ ha_warning("WURFL: Engine adding patch file failed - %s\n", wurfl_get_error_message(global_wurfl.handle));
+ return ERR_WARN;
+ }
+ ha_notice("WURFL: Engine patch file added %s\n", wp->patch_file_path);
+
+ }
+
+ }
+
+ // setting cache provider if specified in cfg, otherwise let engine choose
+ if (global_wurfl.cache_size != NULL) {
+ if (strpbrk(global_wurfl.cache_size, ",") != NULL) {
+ wurfl_result_code = wurfl_set_cache_provider(global_wurfl.handle, WURFL_CACHE_PROVIDER_DOUBLE_LRU, global_wurfl.cache_size) ;
+ } else {
+ if (strcmp(global_wurfl.cache_size, "0")) {
+ wurfl_result_code = wurfl_set_cache_provider(global_wurfl.handle, WURFL_CACHE_PROVIDER_LRU, global_wurfl.cache_size) ;
+ } else {
+ wurfl_result_code = wurfl_set_cache_provider(global_wurfl.handle, WURFL_CACHE_PROVIDER_NONE, 0);
+ }
+
+ }
+
+ if (wurfl_result_code != WURFL_OK) {
+ ha_warning("WURFL: Setting cache to [%s] failed - %s\n", global_wurfl.cache_size, wurfl_get_error_message(global_wurfl.handle));
+ return ERR_WARN;
+ }
+
+ ha_notice("WURFL: Cache set to [%s]\n", global_wurfl.cache_size);
+ }
+
+ // loading WURFL engine
+ if (wurfl_load(global_wurfl.handle) != WURFL_OK) {
+ ha_warning("WURFL: Engine load failed - %s\n", wurfl_get_error_message(global_wurfl.handle));
+ return ERR_WARN;
+ }
+
+ ha_notice("WURFL: Engine loaded\n");
+ ha_notice("WURFL: Module load completed\n");
+ return ERR_NONE;
+}
+
+static void ha_wurfl_deinit(void)
+{
+ wurfl_information_t *wi, *wi2;
+ wurfl_patches_t *wp, *wp2;
+
+ send_log(NULL, LOG_NOTICE, "WURFL: Unloading module v.%s\n", HA_WURFL_MODULE_VERSION);
+ wurfl_destroy(global_wurfl.handle);
+ global_wurfl.handle = NULL;
+ ha_free(&global_wurfl.data_file);
+ ha_free(&global_wurfl.cache_size);
+
+ list_for_each_entry_safe(wi, wi2, &global_wurfl.information_list, list) {
+ LIST_DELETE(&wi->list);
+ free(wi);
+ }
+
+ list_for_each_entry_safe(wp, wp2, &global_wurfl.patch_file_list, list) {
+ LIST_DELETE(&wp->list);
+ free(wp);
+ }
+
+ send_log(NULL, LOG_NOTICE, "WURFL: Module unloaded\n");
+}
+
+static int ha_wurfl_get_all(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ wurfl_device_handle dHandle;
+ struct buffer *temp;
+ wurfl_information_t *wi;
+ ha_wurfl_header_t wh;
+ struct channel *chn;
+ struct htx *htx;
+
+ ha_wurfl_log("WURFL: starting ha_wurfl_get_all\n");
+
+ chn = (smp->strm ? &smp->strm->req : NULL);
+ htx = smp_prefetch_htx(smp, chn, NULL, 1);
+ if (!htx)
+ return 0;
+
+ wh.wsmp = smp;
+
+ dHandle = wurfl_lookup(global_wurfl.handle, &ha_wurfl_retrieve_header, &wh);
+
+ temp = get_trash_chunk();
+ chunk_reset(temp);
+
+ if (!dHandle) {
+ ha_wurfl_log("WURFL: unable to retrieve device from request %s\n", wurfl_get_error_message(global_wurfl.handle));
+ goto wurfl_get_all_completed;
+ }
+
+ list_for_each_entry(wi, &global_wurfl.information_list, list) {
+
+ switch(wi->data.type) {
+ case HA_WURFL_DATA_TYPE_UNKNOWN :
+ ha_wurfl_log("WURFL: %s is of an %s type\n", wi->data.name, HA_WURFL_DATA_TYPE_UNKNOWN_STRING);
+#ifdef WURFL_HEADER_WITH_DETAILS
+ // write WURFL property type and name before its value...
+ chunk_appendf(temp, "%s=%s", HA_WURFL_DATA_TYPE_UNKNOWN_STRING, wi->data.name);
+#endif
+ break;
+ case HA_WURFL_DATA_TYPE_CAP :
+ ha_wurfl_log("WURFL: %s is a %s\n", wi->data.name, HA_WURFL_DATA_TYPE_CAP_STRING);
+#ifdef WURFL_HEADER_WITH_DETAILS
+ // write WURFL property type and name before its value...
+ chunk_appendf(temp, "%s=%s=", HA_WURFL_DATA_TYPE_CAP_STRING, wi->data.name);
+#endif
+ chunk_appendf(temp, "%s", wurfl_device_get_capability(dHandle, wi->data.name));
+ break;
+ case HA_WURFL_DATA_TYPE_VCAP :
+ ha_wurfl_log("WURFL: %s is a %s\n", wi->data.name, HA_WURFL_DATA_TYPE_VCAP_STRING);
+#ifdef WURFL_HEADER_WITH_DETAILS
+ // write WURFL property type and name before its value...
+ chunk_appendf(temp, "%s=%s=", HA_WURFL_DATA_TYPE_VCAP_STRING, wi->data.name);
+#endif
+ chunk_appendf(temp, "%s", wurfl_device_get_virtual_capability(dHandle, wi->data.name));
+ break;
+ case HA_WURFL_DATA_TYPE_PROPERTY :
+ ha_wurfl_log("WURFL: %s is a %s\n", wi->data.name, HA_WURFL_DATA_TYPE_PROPERTY_STRING);
+#ifdef WURFL_HEADER_WITH_DETAILS
+ // write WURFL property type and name before its value...
+ chunk_appendf(temp, "%s=%s=", HA_WURFL_DATA_TYPE_PROPERTY_STRING, wi->data.name);
+#endif
+ chunk_appendf(temp, "%s", wi->data.func_callback(global_wurfl.handle, dHandle));
+ break;
+ }
+
+ // append wurfl-information-list-separator
+ chunk_appendf(temp, "%c", global_wurfl.information_list_separator);
+ }
+
+wurfl_get_all_completed:
+
+ wurfl_device_destroy(dHandle);
+ smp->data.u.str.area = temp->area;
+ smp->data.u.str.data = temp->data;
+
+ // remove trailing wurfl-information-list-separator
+ if (temp->data) {
+ temp->area[temp->data] = '\0';
+ --smp->data.u.str.data;
+ }
+
+ smp->data.type = SMP_T_STR;
+ return 1;
+}
+
+static int ha_wurfl_get(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ wurfl_device_handle dHandle;
+ struct buffer *temp;
+ wurfl_data_t *wn = NULL;
+ struct ebmb_node *node;
+ ha_wurfl_header_t wh;
+ int i = 0;
+ struct channel *chn;
+ struct htx *htx;
+
+ ha_wurfl_log("WURFL: starting ha_wurfl_get\n");
+
+ chn = (smp->strm ? &smp->strm->req : NULL);
+ htx = smp_prefetch_htx(smp, chn, NULL, 1);
+ if (!htx)
+ return 0;
+
+ wh.wsmp = smp;
+
+ dHandle = wurfl_lookup(global_wurfl.handle, &ha_wurfl_retrieve_header, &wh);
+
+ temp = get_trash_chunk();
+ chunk_reset(temp);
+
+ if (!dHandle) {
+ ha_wurfl_log("WURFL: unable to retrieve device from request %s\n", wurfl_get_error_message(global_wurfl.handle));
+ goto wurfl_get_completed;
+ }
+
+ while (args[i].data.str.area) {
+ node = ebst_lookup(&global_wurfl.btree, args[i].data.str.area);
+
+ if (node) {
+
+ wn = container_of(node, wurfl_data_t, nd);
+
+ switch(wn->type) {
+ case HA_WURFL_DATA_TYPE_UNKNOWN :
+ ha_wurfl_log("WURFL: %s is of an %s type\n", wn->name, HA_WURFL_DATA_TYPE_UNKNOWN_STRING);
+#ifdef WURFL_HEADER_WITH_DETAILS
+ // write WURFL property type and name before its value...
+ chunk_appendf(temp, "%s=%s", HA_WURFL_DATA_TYPE_UNKNOWN_STRING, wn->name);
+#endif
+ break;
+ case HA_WURFL_DATA_TYPE_CAP :
+ ha_wurfl_log("WURFL: %s is a %s\n", wn->name, HA_WURFL_DATA_TYPE_CAP_STRING);
+#ifdef WURFL_HEADER_WITH_DETAILS
+ // write WURFL property type and name before its value...
+ chunk_appendf(temp, "%s=%s=", HA_WURFL_DATA_TYPE_CAP_STRING, wn->name);
+#endif
+ chunk_appendf(temp, "%s", wurfl_device_get_capability(dHandle, wn->name));
+ break;
+ case HA_WURFL_DATA_TYPE_VCAP :
+ ha_wurfl_log("WURFL: %s is a %s\n", wn->name, HA_WURFL_DATA_TYPE_VCAP_STRING);
+#ifdef WURFL_HEADER_WITH_DETAILS
+ // write WURFL property type and name before its value...
+ chunk_appendf(temp, "%s=%s=", HA_WURFL_DATA_TYPE_VCAP_STRING, wn->name);
+#endif
+ chunk_appendf(temp, "%s", wurfl_device_get_virtual_capability(dHandle, wn->name));
+ break;
+ case HA_WURFL_DATA_TYPE_PROPERTY :
+ ha_wurfl_log("WURFL: %s is a %s\n", wn->name, HA_WURFL_DATA_TYPE_PROPERTY_STRING);
+#ifdef WURFL_HEADER_WITH_DETAILS
+ // write WURFL property type and name before its value...
+ chunk_appendf(temp, "%s=%s=", HA_WURFL_DATA_TYPE_PROPERTY_STRING, wn->name);
+#endif
+ chunk_appendf(temp, "%s", wn->func_callback(global_wurfl.handle, dHandle));
+ break;
+ }
+
+ // append wurfl-information-list-separator
+ chunk_appendf(temp, "%c", global_wurfl.information_list_separator);
+
+ } else {
+ ha_wurfl_log("WURFL: %s not in wurfl-information-list \n",
+ args[i].data.str.area);
+ }
+
+ i++;
+ }
+
+wurfl_get_completed:
+
+ wurfl_device_destroy(dHandle);
+ smp->data.u.str.area = temp->area;
+ smp->data.u.str.data = temp->data;
+
+ // remove trailing wurfl-information-list-separator
+ if (temp->data) {
+ temp->area[temp->data] = '\0';
+ --smp->data.u.str.data;
+ }
+
+ smp->data.type = SMP_T_STR;
+ return 1;
+}
+
+static struct cfg_kw_list wurflcfg_kws = {{ }, {
+ { CFG_GLOBAL, "wurfl-data-file", ha_wurfl_cfg_data_file },
+ { CFG_GLOBAL, "wurfl-information-list-separator", ha_wurfl_cfg_information_list_separator },
+ { CFG_GLOBAL, "wurfl-information-list", ha_wurfl_cfg_information_list },
+ { CFG_GLOBAL, "wurfl-patch-file", ha_wurfl_cfg_patch_file_list },
+ { CFG_GLOBAL, "wurfl-cache-size", ha_wurfl_cfg_cache },
+ { CFG_GLOBAL, "wurfl-engine-mode", ha_wurfl_cfg_engine_mode },
+ { CFG_GLOBAL, "wurfl-useragent-priority", ha_wurfl_cfg_useragent_priority },
+ { 0, NULL, NULL },
+ }
+};
+
+INITCALL1(STG_REGISTER, cfg_register_keywords, &wurflcfg_kws);
+
+/* Note: must not be declared <const> as its list will be overwritten */
+static struct sample_fetch_kw_list fetch_kws = {ILH, {
+ { "wurfl-get-all", ha_wurfl_get_all, 0, NULL, SMP_T_STR, SMP_USE_HRQHV },
+ { "wurfl-get", ha_wurfl_get, ARG12(1,STR,STR,STR,STR,STR,STR,STR,STR,STR,STR,STR,STR), NULL, SMP_T_STR, SMP_USE_HRQHV },
+ { NULL, NULL, 0, 0, 0 },
+ }
+};
+
+INITCALL1(STG_REGISTER, sample_register_fetches, &fetch_kws);
+
+/* Note: must not be declared <const> as its list will be overwritten */
+static struct sample_conv_kw_list conv_kws = {ILH, {
+ { NULL, NULL, 0, 0, 0 },
+ }
+};
+
+INITCALL1(STG_REGISTER, sample_register_convs, &conv_kws);
+
+// WURFL properties wrapper functions
+static const char *ha_wurfl_get_wurfl_root_id (wurfl_handle wHandle, wurfl_device_handle dHandle)
+{
+ if (wurfl_device_get_root_id(dHandle))
+ return wurfl_device_get_root_id(dHandle);
+ else
+ return "";
+}
+
+static const char *ha_wurfl_get_wurfl_id (wurfl_handle wHandle, wurfl_device_handle dHandle)
+{
+ return wurfl_device_get_id(dHandle);
+}
+
+static const char *ha_wurfl_get_wurfl_isdevroot (wurfl_handle wHandle, wurfl_device_handle dHandle)
+{
+ if (wurfl_device_is_actual_device_root(dHandle))
+ return HA_WURFL_ISDEVROOT_TRUE;
+ else
+ return HA_WURFL_ISDEVROOT_FALSE;
+}
+
+static const char *ha_wurfl_get_wurfl_useragent (wurfl_handle wHandle, wurfl_device_handle dHandle)
+{
+ return wurfl_device_get_original_useragent(dHandle);
+}
+
+static const char *ha_wurfl_get_wurfl_api_version (wurfl_handle wHandle, wurfl_device_handle dHandle)
+{
+ return wurfl_get_api_version();
+}
+
+static const char *ha_wurfl_get_wurfl_engine_target (wurfl_handle wHandle, wurfl_device_handle dHandle)
+{
+ return "default";
+}
+
+static const char *ha_wurfl_get_wurfl_info (wurfl_handle wHandle, wurfl_device_handle dHandle)
+{
+ return wurfl_get_wurfl_info(wHandle);
+}
+
+static const char *ha_wurfl_get_wurfl_last_load_time (wurfl_handle wHandle, wurfl_device_handle dHandle)
+{
+ return wurfl_get_last_load_time_as_string(wHandle);
+}
+
+static const char *ha_wurfl_get_wurfl_normalized_useragent (wurfl_handle wHandle, wurfl_device_handle dHandle)
+{
+ return wurfl_device_get_normalized_useragent(dHandle);
+}
+
+static const char *ha_wurfl_get_wurfl_useragent_priority (wurfl_handle wHandle, wurfl_device_handle dHandle)
+{
+ return "default";
+}
+
+// call function for WURFL properties
+static const char *(*ha_wurfl_get_property_callback(char *name)) (wurfl_handle wHandle, wurfl_device_handle dHandle)
+{
+ int position;
+ int begin = 0;
+ int end = HA_WURFL_PROPERTIES_NBR - 1;
+ int cond = 0;
+
+ while(begin <= end) {
+ position = (begin + end) / 2;
+
+ if((cond = strcmp(wurfl_properties_function_map[position].name, name)) == 0) {
+ ha_wurfl_log("WURFL: ha_wurfl_get_property_callback match %s\n", wurfl_properties_function_map[position].name );
+ return wurfl_properties_function_map[position].func;
+ } else if(cond < 0)
+ begin = position + 1;
+ else
+ end = position - 1;
+
+ }
+
+ return NULL;
+}
+
+static const char *ha_wurfl_retrieve_header(const char *header_name, const void *wh)
+{
+ struct sample *smp;
+ struct channel *chn;
+ struct htx *htx;
+ struct http_hdr_ctx ctx;
+ struct ist name;
+ int header_len = HA_WURFL_MAX_HEADER_LENGTH;
+
+ smp = ((ha_wurfl_header_t *)wh)->wsmp;
+ chn = (smp->strm ? &smp->strm->req : NULL);
+
+ ha_wurfl_log("WURFL: retrieve header (HTX) request [%s]\n", header_name);
+
+ //the header is searched from the beginning
+ ctx.blk = NULL;
+
+ // We could skip this check since ha_wurfl_retrieve_header is called from inside
+ // ha_wurfl_get()/ha_wurfl_get_all() that already perform the same check
+ // We choose to keep it in case ha_wurfl_retrieve_header will be called directly
+ htx = smp_prefetch_htx(smp, chn, NULL, 1);
+ if (!htx) {
+ return NULL;
+ }
+
+ name = ist2((char *)header_name, strlen(header_name));
+
+ // If 4th param is set, it works on full-line headers in whose comma is not a delimiter but is
+ // part of the syntax
+ if (!http_find_header(htx, name, &ctx, 1)) {
+ return NULL;
+ }
+
+ if (header_len > ctx.value.len)
+ header_len = ctx.value.len;
+
+ strncpy(((ha_wurfl_header_t *)wh)->header_value, ctx.value.ptr, header_len);
+
+ ((ha_wurfl_header_t *)wh)->header_value[header_len] = '\0';
+
+ ha_wurfl_log("WURFL: retrieve header request returns [%s]\n", ((ha_wurfl_header_t *)wh)->header_value);
+ return ((ha_wurfl_header_t *)wh)->header_value;
+}
+
+static void ha_wurfl_register_build_options()
+{
+ const char *ver = wurfl_get_api_version();
+ char *ptr = NULL;
+
+ memprintf(&ptr, "Built with WURFL support (%sversion %s)",
+ strcmp(ver, "1.11.2.100") ? "" : "dummy library ",
+ ver);
+ hap_register_build_opts(ptr, 1);
+}
+
+REGISTER_POST_CHECK(ha_wurfl_init);
+REGISTER_POST_DEINIT(ha_wurfl_deinit);
+INITCALL0(STG_REGISTER, ha_wurfl_register_build_options);
diff --git a/admin/acme.sh/LICENSE b/admin/acme.sh/LICENSE
new file mode 100644
index 0000000..f288702
--- /dev/null
+++ b/admin/acme.sh/LICENSE
@@ -0,0 +1,674 @@
+ GNU GENERAL PUBLIC LICENSE
+ Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+ Preamble
+
+ The GNU General Public License is a free, copyleft license for
+software and other kinds of works.
+
+ The licenses for most software and other practical works are designed
+to take away your freedom to share and change the works. By contrast,
+the GNU General Public License is intended to guarantee your freedom to
+share and change all versions of a program--to make sure it remains free
+software for all its users. We, the Free Software Foundation, use the
+GNU General Public License for most of our software; it applies also to
+any other work released this way by its authors. You can apply it to
+your programs, too.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+them if you wish), that you receive source code or can get it if you
+want it, that you can change the software or use pieces of it in new
+free programs, and that you know you can do these things.
+
+ To protect your rights, we need to prevent others from denying you
+these rights or asking you to surrender the rights. Therefore, you have
+certain responsibilities if you distribute copies of the software, or if
+you modify it: responsibilities to respect the freedom of others.
+
+ For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must pass on to the recipients the same
+freedoms that you received. You must make sure that they, too, receive
+or can get the source code. And you must show them these terms so they
+know their rights.
+
+ Developers that use the GNU GPL protect your rights with two steps:
+(1) assert copyright on the software, and (2) offer you this License
+giving you legal permission to copy, distribute and/or modify it.
+
+ For the developers' and authors' protection, the GPL clearly explains
+that there is no warranty for this free software. For both users' and
+authors' sake, the GPL requires that modified versions be marked as
+changed, so that their problems will not be attributed erroneously to
+authors of previous versions.
+
+ Some devices are designed to deny users access to install or run
+modified versions of the software inside them, although the manufacturer
+can do so. This is fundamentally incompatible with the aim of
+protecting users' freedom to change the software. The systematic
+pattern of such abuse occurs in the area of products for individuals to
+use, which is precisely where it is most unacceptable. Therefore, we
+have designed this version of the GPL to prohibit the practice for those
+products. If such problems arise substantially in other domains, we
+stand ready to extend this provision to those domains in future versions
+of the GPL, as needed to protect the freedom of users.
+
+ Finally, every program is threatened constantly by software patents.
+States should not allow patents to restrict development and use of
+software on general-purpose computers, but in those that do, we wish to
+avoid the special danger that patents applied to a free program could
+make it effectively proprietary. To prevent this, the GPL assures that
+patents cannot be used to render the program non-free.
+
+ The precise terms and conditions for copying, distribution and
+modification follow.
+
+ TERMS AND CONDITIONS
+
+ 0. Definitions.
+
+ "This License" refers to version 3 of the GNU General Public License.
+
+ "Copyright" also means copyright-like laws that apply to other kinds of
+works, such as semiconductor masks.
+
+ "The Program" refers to any copyrightable work licensed under this
+License. Each licensee is addressed as "you". "Licensees" and
+"recipients" may be individuals or organizations.
+
+ To "modify" a work means to copy from or adapt all or part of the work
+in a fashion requiring copyright permission, other than the making of an
+exact copy. The resulting work is called a "modified version" of the
+earlier work or a work "based on" the earlier work.
+
+ A "covered work" means either the unmodified Program or a work based
+on the Program.
+
+ To "propagate" a work means to do anything with it that, without
+permission, would make you directly or secondarily liable for
+infringement under applicable copyright law, except executing it on a
+computer or modifying a private copy. Propagation includes copying,
+distribution (with or without modification), making available to the
+public, and in some countries other activities as well.
+
+ To "convey" a work means any kind of propagation that enables other
+parties to make or receive copies. Mere interaction with a user through
+a computer network, with no transfer of a copy, is not conveying.
+
+ An interactive user interface displays "Appropriate Legal Notices"
+to the extent that it includes a convenient and prominently visible
+feature that (1) displays an appropriate copyright notice, and (2)
+tells the user that there is no warranty for the work (except to the
+extent that warranties are provided), that licensees may convey the
+work under this License, and how to view a copy of this License. If
+the interface presents a list of user commands or options, such as a
+menu, a prominent item in the list meets this criterion.
+
+ 1. Source Code.
+
+ The "source code" for a work means the preferred form of the work
+for making modifications to it. "Object code" means any non-source
+form of a work.
+
+ A "Standard Interface" means an interface that either is an official
+standard defined by a recognized standards body, or, in the case of
+interfaces specified for a particular programming language, one that
+is widely used among developers working in that language.
+
+ The "System Libraries" of an executable work include anything, other
+than the work as a whole, that (a) is included in the normal form of
+packaging a Major Component, but which is not part of that Major
+Component, and (b) serves only to enable use of the work with that
+Major Component, or to implement a Standard Interface for which an
+implementation is available to the public in source code form. A
+"Major Component", in this context, means a major essential component
+(kernel, window system, and so on) of the specific operating system
+(if any) on which the executable work runs, or a compiler used to
+produce the work, or an object code interpreter used to run it.
+
+ The "Corresponding Source" for a work in object code form means all
+the source code needed to generate, install, and (for an executable
+work) run the object code and to modify the work, including scripts to
+control those activities. However, it does not include the work's
+System Libraries, or general-purpose tools or generally available free
+programs which are used unmodified in performing those activities but
+which are not part of the work. For example, Corresponding Source
+includes interface definition files associated with source files for
+the work, and the source code for shared libraries and dynamically
+linked subprograms that the work is specifically designed to require,
+such as by intimate data communication or control flow between those
+subprograms and other parts of the work.
+
+ The Corresponding Source need not include anything that users
+can regenerate automatically from other parts of the Corresponding
+Source.
+
+ The Corresponding Source for a work in source code form is that
+same work.
+
+ 2. Basic Permissions.
+
+ All rights granted under this License are granted for the term of
+copyright on the Program, and are irrevocable provided the stated
+conditions are met. This License explicitly affirms your unlimited
+permission to run the unmodified Program. The output from running a
+covered work is covered by this License only if the output, given its
+content, constitutes a covered work. This License acknowledges your
+rights of fair use or other equivalent, as provided by copyright law.
+
+ You may make, run and propagate covered works that you do not
+convey, without conditions so long as your license otherwise remains
+in force. You may convey covered works to others for the sole purpose
+of having them make modifications exclusively for you, or provide you
+with facilities for running those works, provided that you comply with
+the terms of this License in conveying all material for which you do
+not control copyright. Those thus making or running the covered works
+for you must do so exclusively on your behalf, under your direction
+and control, on terms that prohibit them from making any copies of
+your copyrighted material outside their relationship with you.
+
+ Conveying under any other circumstances is permitted solely under
+the conditions stated below. Sublicensing is not allowed; section 10
+makes it unnecessary.
+
+ 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
+
+ No covered work shall be deemed part of an effective technological
+measure under any applicable law fulfilling obligations under article
+11 of the WIPO copyright treaty adopted on 20 December 1996, or
+similar laws prohibiting or restricting circumvention of such
+measures.
+
+ When you convey a covered work, you waive any legal power to forbid
+circumvention of technological measures to the extent such circumvention
+is effected by exercising rights under this License with respect to
+the covered work, and you disclaim any intention to limit operation or
+modification of the work as a means of enforcing, against the work's
+users, your or third parties' legal rights to forbid circumvention of
+technological measures.
+
+ 4. Conveying Verbatim Copies.
+
+ You may convey verbatim copies of the Program's source code as you
+receive it, in any medium, provided that you conspicuously and
+appropriately publish on each copy an appropriate copyright notice;
+keep intact all notices stating that this License and any
+non-permissive terms added in accord with section 7 apply to the code;
+keep intact all notices of the absence of any warranty; and give all
+recipients a copy of this License along with the Program.
+
+ You may charge any price or no price for each copy that you convey,
+and you may offer support or warranty protection for a fee.
+
+ 5. Conveying Modified Source Versions.
+
+ You may convey a work based on the Program, or the modifications to
+produce it from the Program, in the form of source code under the
+terms of section 4, provided that you also meet all of these conditions:
+
+ a) The work must carry prominent notices stating that you modified
+ it, and giving a relevant date.
+
+ b) The work must carry prominent notices stating that it is
+ released under this License and any conditions added under section
+ 7. This requirement modifies the requirement in section 4 to
+ "keep intact all notices".
+
+ c) You must license the entire work, as a whole, under this
+ License to anyone who comes into possession of a copy. This
+ License will therefore apply, along with any applicable section 7
+ additional terms, to the whole of the work, and all its parts,
+ regardless of how they are packaged. This License gives no
+ permission to license the work in any other way, but it does not
+ invalidate such permission if you have separately received it.
+
+ d) If the work has interactive user interfaces, each must display
+ Appropriate Legal Notices; however, if the Program has interactive
+ interfaces that do not display Appropriate Legal Notices, your
+ work need not make them do so.
+
+ A compilation of a covered work with other separate and independent
+works, which are not by their nature extensions of the covered work,
+and which are not combined with it such as to form a larger program,
+in or on a volume of a storage or distribution medium, is called an
+"aggregate" if the compilation and its resulting copyright are not
+used to limit the access or legal rights of the compilation's users
+beyond what the individual works permit. Inclusion of a covered work
+in an aggregate does not cause this License to apply to the other
+parts of the aggregate.
+
+ 6. Conveying Non-Source Forms.
+
+ You may convey a covered work in object code form under the terms
+of sections 4 and 5, provided that you also convey the
+machine-readable Corresponding Source under the terms of this License,
+in one of these ways:
+
+ a) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by the
+ Corresponding Source fixed on a durable physical medium
+ customarily used for software interchange.
+
+ b) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by a
+ written offer, valid for at least three years and valid for as
+ long as you offer spare parts or customer support for that product
+ model, to give anyone who possesses the object code either (1) a
+ copy of the Corresponding Source for all the software in the
+ product that is covered by this License, on a durable physical
+ medium customarily used for software interchange, for a price no
+ more than your reasonable cost of physically performing this
+ conveying of source, or (2) access to copy the
+ Corresponding Source from a network server at no charge.
+
+ c) Convey individual copies of the object code with a copy of the
+ written offer to provide the Corresponding Source. This
+ alternative is allowed only occasionally and noncommercially, and
+ only if you received the object code with such an offer, in accord
+ with subsection 6b.
+
+ d) Convey the object code by offering access from a designated
+ place (gratis or for a charge), and offer equivalent access to the
+ Corresponding Source in the same way through the same place at no
+ further charge. You need not require recipients to copy the
+ Corresponding Source along with the object code. If the place to
+ copy the object code is a network server, the Corresponding Source
+ may be on a different server (operated by you or a third party)
+ that supports equivalent copying facilities, provided you maintain
+ clear directions next to the object code saying where to find the
+ Corresponding Source. Regardless of what server hosts the
+ Corresponding Source, you remain obligated to ensure that it is
+ available for as long as needed to satisfy these requirements.
+
+ e) Convey the object code using peer-to-peer transmission, provided
+ you inform other peers where the object code and Corresponding
+ Source of the work are being offered to the general public at no
+ charge under subsection 6d.
+
+ A separable portion of the object code, whose source code is excluded
+from the Corresponding Source as a System Library, need not be
+included in conveying the object code work.
+
+ A "User Product" is either (1) a "consumer product", which means any
+tangible personal property which is normally used for personal, family,
+or household purposes, or (2) anything designed or sold for incorporation
+into a dwelling. In determining whether a product is a consumer product,
+doubtful cases shall be resolved in favor of coverage. For a particular
+product received by a particular user, "normally used" refers to a
+typical or common use of that class of product, regardless of the status
+of the particular user or of the way in which the particular user
+actually uses, or expects or is expected to use, the product. A product
+is a consumer product regardless of whether the product has substantial
+commercial, industrial or non-consumer uses, unless such uses represent
+the only significant mode of use of the product.
+
+ "Installation Information" for a User Product means any methods,
+procedures, authorization keys, or other information required to install
+and execute modified versions of a covered work in that User Product from
+a modified version of its Corresponding Source. The information must
+suffice to ensure that the continued functioning of the modified object
+code is in no case prevented or interfered with solely because
+modification has been made.
+
+ If you convey an object code work under this section in, or with, or
+specifically for use in, a User Product, and the conveying occurs as
+part of a transaction in which the right of possession and use of the
+User Product is transferred to the recipient in perpetuity or for a
+fixed term (regardless of how the transaction is characterized), the
+Corresponding Source conveyed under this section must be accompanied
+by the Installation Information. But this requirement does not apply
+if neither you nor any third party retains the ability to install
+modified object code on the User Product (for example, the work has
+been installed in ROM).
+
+ The requirement to provide Installation Information does not include a
+requirement to continue to provide support service, warranty, or updates
+for a work that has been modified or installed by the recipient, or for
+the User Product in which it has been modified or installed. Access to a
+network may be denied when the modification itself materially and
+adversely affects the operation of the network or violates the rules and
+protocols for communication across the network.
+
+ Corresponding Source conveyed, and Installation Information provided,
+in accord with this section must be in a format that is publicly
+documented (and with an implementation available to the public in
+source code form), and must require no special password or key for
+unpacking, reading or copying.
+
+ 7. Additional Terms.
+
+ "Additional permissions" are terms that supplement the terms of this
+License by making exceptions from one or more of its conditions.
+Additional permissions that are applicable to the entire Program shall
+be treated as though they were included in this License, to the extent
+that they are valid under applicable law. If additional permissions
+apply only to part of the Program, that part may be used separately
+under those permissions, but the entire Program remains governed by
+this License without regard to the additional permissions.
+
+ When you convey a copy of a covered work, you may at your option
+remove any additional permissions from that copy, or from any part of
+it. (Additional permissions may be written to require their own
+removal in certain cases when you modify the work.) You may place
+additional permissions on material, added by you to a covered work,
+for which you have or can give appropriate copyright permission.
+
+ Notwithstanding any other provision of this License, for material you
+add to a covered work, you may (if authorized by the copyright holders of
+that material) supplement the terms of this License with terms:
+
+ a) Disclaiming warranty or limiting liability differently from the
+ terms of sections 15 and 16 of this License; or
+
+ b) Requiring preservation of specified reasonable legal notices or
+ author attributions in that material or in the Appropriate Legal
+ Notices displayed by works containing it; or
+
+ c) Prohibiting misrepresentation of the origin of that material, or
+ requiring that modified versions of such material be marked in
+ reasonable ways as different from the original version; or
+
+ d) Limiting the use for publicity purposes of names of licensors or
+ authors of the material; or
+
+ e) Declining to grant rights under trademark law for use of some
+ trade names, trademarks, or service marks; or
+
+ f) Requiring indemnification of licensors and authors of that
+ material by anyone who conveys the material (or modified versions of
+ it) with contractual assumptions of liability to the recipient, for
+ any liability that these contractual assumptions directly impose on
+ those licensors and authors.
+
+ All other non-permissive additional terms are considered "further
+restrictions" within the meaning of section 10. If the Program as you
+received it, or any part of it, contains a notice stating that it is
+governed by this License along with a term that is a further
+restriction, you may remove that term. If a license document contains
+a further restriction but permits relicensing or conveying under this
+License, you may add to a covered work material governed by the terms
+of that license document, provided that the further restriction does
+not survive such relicensing or conveying.
+
+ If you add terms to a covered work in accord with this section, you
+must place, in the relevant source files, a statement of the
+additional terms that apply to those files, or a notice indicating
+where to find the applicable terms.
+
+ Additional terms, permissive or non-permissive, may be stated in the
+form of a separately written license, or stated as exceptions;
+the above requirements apply either way.
+
+ 8. Termination.
+
+ You may not propagate or modify a covered work except as expressly
+provided under this License. Any attempt otherwise to propagate or
+modify it is void, and will automatically terminate your rights under
+this License (including any patent licenses granted under the third
+paragraph of section 11).
+
+ However, if you cease all violation of this License, then your
+license from a particular copyright holder is reinstated (a)
+provisionally, unless and until the copyright holder explicitly and
+finally terminates your license, and (b) permanently, if the copyright
+holder fails to notify you of the violation by some reasonable means
+prior to 60 days after the cessation.
+
+ Moreover, your license from a particular copyright holder is
+reinstated permanently if the copyright holder notifies you of the
+violation by some reasonable means, this is the first time you have
+received notice of violation of this License (for any work) from that
+copyright holder, and you cure the violation prior to 30 days after
+your receipt of the notice.
+
+ Termination of your rights under this section does not terminate the
+licenses of parties who have received copies or rights from you under
+this License. If your rights have been terminated and not permanently
+reinstated, you do not qualify to receive new licenses for the same
+material under section 10.
+
+ 9. Acceptance Not Required for Having Copies.
+
+ You are not required to accept this License in order to receive or
+run a copy of the Program. Ancillary propagation of a covered work
+occurring solely as a consequence of using peer-to-peer transmission
+to receive a copy likewise does not require acceptance. However,
+nothing other than this License grants you permission to propagate or
+modify any covered work. These actions infringe copyright if you do
+not accept this License. Therefore, by modifying or propagating a
+covered work, you indicate your acceptance of this License to do so.
+
+ 10. Automatic Licensing of Downstream Recipients.
+
+ Each time you convey a covered work, the recipient automatically
+receives a license from the original licensors, to run, modify and
+propagate that work, subject to this License. You are not responsible
+for enforcing compliance by third parties with this License.
+
+ An "entity transaction" is a transaction transferring control of an
+organization, or substantially all assets of one, or subdividing an
+organization, or merging organizations. If propagation of a covered
+work results from an entity transaction, each party to that
+transaction who receives a copy of the work also receives whatever
+licenses to the work the party's predecessor in interest had or could
+give under the previous paragraph, plus a right to possession of the
+Corresponding Source of the work from the predecessor in interest, if
+the predecessor has it or can get it with reasonable efforts.
+
+ You may not impose any further restrictions on the exercise of the
+rights granted or affirmed under this License. For example, you may
+not impose a license fee, royalty, or other charge for exercise of
+rights granted under this License, and you may not initiate litigation
+(including a cross-claim or counterclaim in a lawsuit) alleging that
+any patent claim is infringed by making, using, selling, offering for
+sale, or importing the Program or any portion of it.
+
+ 11. Patents.
+
+ A "contributor" is a copyright holder who authorizes use under this
+License of the Program or a work on which the Program is based. The
+work thus licensed is called the contributor's "contributor version".
+
+ A contributor's "essential patent claims" are all patent claims
+owned or controlled by the contributor, whether already acquired or
+hereafter acquired, that would be infringed by some manner, permitted
+by this License, of making, using, or selling its contributor version,
+but do not include claims that would be infringed only as a
+consequence of further modification of the contributor version. For
+purposes of this definition, "control" includes the right to grant
+patent sublicenses in a manner consistent with the requirements of
+this License.
+
+ Each contributor grants you a non-exclusive, worldwide, royalty-free
+patent license under the contributor's essential patent claims, to
+make, use, sell, offer for sale, import and otherwise run, modify and
+propagate the contents of its contributor version.
+
+ In the following three paragraphs, a "patent license" is any express
+agreement or commitment, however denominated, not to enforce a patent
+(such as an express permission to practice a patent or covenant not to
+sue for patent infringement). To "grant" such a patent license to a
+party means to make such an agreement or commitment not to enforce a
+patent against the party.
+
+ If you convey a covered work, knowingly relying on a patent license,
+and the Corresponding Source of the work is not available for anyone
+to copy, free of charge and under the terms of this License, through a
+publicly available network server or other readily accessible means,
+then you must either (1) cause the Corresponding Source to be so
+available, or (2) arrange to deprive yourself of the benefit of the
+patent license for this particular work, or (3) arrange, in a manner
+consistent with the requirements of this License, to extend the patent
+license to downstream recipients. "Knowingly relying" means you have
+actual knowledge that, but for the patent license, your conveying the
+covered work in a country, or your recipient's use of the covered work
+in a country, would infringe one or more identifiable patents in that
+country that you have reason to believe are valid.
+
+ If, pursuant to or in connection with a single transaction or
+arrangement, you convey, or propagate by procuring conveyance of, a
+covered work, and grant a patent license to some of the parties
+receiving the covered work authorizing them to use, propagate, modify
+or convey a specific copy of the covered work, then the patent license
+you grant is automatically extended to all recipients of the covered
+work and works based on it.
+
+ A patent license is "discriminatory" if it does not include within
+the scope of its coverage, prohibits the exercise of, or is
+conditioned on the non-exercise of one or more of the rights that are
+specifically granted under this License. You may not convey a covered
+work if you are a party to an arrangement with a third party that is
+in the business of distributing software, under which you make payment
+to the third party based on the extent of your activity of conveying
+the work, and under which the third party grants, to any of the
+parties who would receive the covered work from you, a discriminatory
+patent license (a) in connection with copies of the covered work
+conveyed by you (or copies made from those copies), or (b) primarily
+for and in connection with specific products or compilations that
+contain the covered work, unless you entered into that arrangement,
+or that patent license was granted, prior to 28 March 2007.
+
+ Nothing in this License shall be construed as excluding or limiting
+any implied license or other defenses to infringement that may
+otherwise be available to you under applicable patent law.
+
+ 12. No Surrender of Others' Freedom.
+
+ If conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot convey a
+covered work so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you may
+not convey it at all. For example, if you agree to terms that obligate you
+to collect a royalty for further conveying from those to whom you convey
+the Program, the only way you could satisfy both those terms and this
+License would be to refrain entirely from conveying the Program.
+
+ 13. Use with the GNU Affero General Public License.
+
+ Notwithstanding any other provision of this License, you have
+permission to link or combine any covered work with a work licensed
+under version 3 of the GNU Affero General Public License into a single
+combined work, and to convey the resulting work. The terms of this
+License will continue to apply to the part which is the covered work,
+but the special requirements of the GNU Affero General Public License,
+section 13, concerning interaction through a network will apply to the
+combination as such.
+
+ 14. Revised Versions of this License.
+
+ The Free Software Foundation may publish revised and/or new versions of
+the GNU General Public License from time to time. Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+ Each version is given a distinguishing version number. If the
+Program specifies that a certain numbered version of the GNU General
+Public License "or any later version" applies to it, you have the
+option of following the terms and conditions either of that numbered
+version or of any later version published by the Free Software
+Foundation. If the Program does not specify a version number of the
+GNU General Public License, you may choose any version ever published
+by the Free Software Foundation.
+
+ If the Program specifies that a proxy can decide which future
+versions of the GNU General Public License can be used, that proxy's
+public statement of acceptance of a version permanently authorizes you
+to choose that version for the Program.
+
+ Later license versions may give you additional or different
+permissions. However, no additional obligations are imposed on any
+author or copyright holder as a result of your choosing to follow a
+later version.
+
+ 15. Disclaimer of Warranty.
+
+ THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
+APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
+HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
+OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
+THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
+IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
+ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+ 16. Limitation of Liability.
+
+ IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
+THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
+GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
+USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
+DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
+PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
+EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGES.
+
+ 17. Interpretation of Sections 15 and 16.
+
+ If the disclaimer of warranty and limitation of liability provided
+above cannot be given local legal effect according to their terms,
+reviewing courts shall apply local law that most closely approximates
+an absolute waiver of all civil liability in connection with the
+Program, unless a warranty or assumption of liability accompanies a
+copy of the Program in return for a fee.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Programs
+
+ If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+ To do so, attach the following notices to the program. It is safest
+to attach them to the start of each source file to most effectively
+state the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+ <one line to give the program's name and a brief idea of what it does.>
+ Copyright (C) <year> <name of author>
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <https://www.gnu.org/licenses/>.
+
+Also add information on how to contact you by electronic and paper mail.
+
+ If the program does terminal interaction, make it output a short
+notice like this when it starts in an interactive mode:
+
+ <program> Copyright (C) <year> <name of author>
+ This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+ This is free software, and you are welcome to redistribute it
+ under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License. Of course, your program's commands
+might be different; for a GUI interface, you would use an "about box".
+
+ You should also get your employer (if you work as a programmer) or school,
+if any, to sign a "copyright disclaimer" for the program, if necessary.
+For more information on this, and how to apply and follow the GNU GPL, see
+<https://www.gnu.org/licenses/>.
+
+ The GNU General Public License does not permit incorporating your program
+into proprietary programs. If your program is a subroutine library, you
+may consider it more useful to permit linking proprietary applications with
+the library. If this is what you want to do, use the GNU Lesser General
+Public License instead of this License. But first, please read
+<https://www.gnu.org/licenses/why-not-lgpl.html>.
diff --git a/admin/acme.sh/README b/admin/acme.sh/README
new file mode 100644
index 0000000..7f8cef4
--- /dev/null
+++ b/admin/acme.sh/README
@@ -0,0 +1,13 @@
+This directory contains a fork of the acme.sh deploy script for haproxy which
+allow acme.sh to run as non-root and don't require to reload haproxy.
+
+The content of this directory is licensed under GPLv3 as explained in the
+LICENSE file.
+
+This was originally written for this pull request
+https://github.com/acmesh-official/acme.sh/pull/4581.
+
+The documentation is available on the haproxy wiki:
+https://github.com/haproxy/wiki/wiki/Letsencrypt-integration-with-HAProxy-and-acme.sh
+
+The haproxy.sh script must replace the one provided by acme.sh.
diff --git a/admin/acme.sh/haproxy.sh b/admin/acme.sh/haproxy.sh
new file mode 100644
index 0000000..4b6ca0e
--- /dev/null
+++ b/admin/acme.sh/haproxy.sh
@@ -0,0 +1,403 @@
+#!/usr/bin/env sh
+
+# Script for acme.sh to deploy certificates to haproxy
+#
+# The following variables can be exported:
+#
+# export DEPLOY_HAPROXY_PEM_NAME="${domain}.pem"
+#
+# Defines the name of the PEM file.
+# Defaults to "<domain>.pem"
+#
+# export DEPLOY_HAPROXY_PEM_PATH="/etc/haproxy"
+#
+# Defines location of PEM file for HAProxy.
+# Defaults to /etc/haproxy
+#
+# export DEPLOY_HAPROXY_RELOAD="systemctl reload haproxy"
+#
+# OPTIONAL: Reload command used post deploy
+# This defaults to be a no-op (ie "true").
+# It is strongly recommended to set this something that makes sense
+# for your distro.
+#
+# export DEPLOY_HAPROXY_ISSUER="no"
+#
+# OPTIONAL: Places CA file as "${DEPLOY_HAPROXY_PEM}.issuer"
+# Note: Required for OCSP stapling to work
+#
+# export DEPLOY_HAPROXY_BUNDLE="no"
+#
+# OPTIONAL: Deploy this certificate as part of a multi-cert bundle
+# This adds a suffix to the certificate based on the certificate type
+# eg RSA certificates will have .rsa as a suffix to the file name
+# HAProxy will load all certificates and provide one or the other
+# depending on client capabilities
+# Note: This functionality requires HAProxy was compiled against
+# a version of OpenSSL that supports this.
+#
+# export DEPLOY_HAPROXY_HOT_UPDATE="yes"
+# export DEPLOY_HAPROXY_STATS_SOCKET="UNIX:/run/haproxy/admin.sock"
+#
+# OPTIONAL: Deploy the certificate over the HAProxy stats socket without
+# needing to reload HAProxy. Default is "no".
+#
+# Require the socat binary. DEPLOY_HAPROXY_STATS_SOCKET variable uses the socat
+# address format.
+#
+# export DEPLOY_HAPROXY_MASTER_CLI="UNIX:/run/haproxy-master.sock"
+#
+# OPTIONAL: To use the master CLI with DEPLOY_HAPROXY_HOT_UPDATE="yes" instead
+# of a stats socket, use this variable.
+
+######## Public functions #####################
+
+#domain keyfile certfile cafile fullchain
+haproxy_deploy() {
+ _cdomain="$1"
+ _ckey="$2"
+ _ccert="$3"
+ _cca="$4"
+ _cfullchain="$5"
+ _cmdpfx=""
+
+ # Some defaults
+ DEPLOY_HAPROXY_PEM_PATH_DEFAULT="/etc/haproxy"
+ DEPLOY_HAPROXY_PEM_NAME_DEFAULT="${_cdomain}.pem"
+ DEPLOY_HAPROXY_BUNDLE_DEFAULT="no"
+ DEPLOY_HAPROXY_ISSUER_DEFAULT="no"
+ DEPLOY_HAPROXY_RELOAD_DEFAULT="true"
+ DEPLOY_HAPROXY_HOT_UPDATE_DEFAULT="no"
+ DEPLOY_HAPROXY_STATS_SOCKET_DEFAULT="UNIX:/run/haproxy/admin.sock"
+
+ _debug _cdomain "${_cdomain}"
+ _debug _ckey "${_ckey}"
+ _debug _ccert "${_ccert}"
+ _debug _cca "${_cca}"
+ _debug _cfullchain "${_cfullchain}"
+
+ # PEM_PATH is optional. If not provided then assume "${DEPLOY_HAPROXY_PEM_PATH_DEFAULT}"
+ _getdeployconf DEPLOY_HAPROXY_PEM_PATH
+ _debug2 DEPLOY_HAPROXY_PEM_PATH "${DEPLOY_HAPROXY_PEM_PATH}"
+ if [ -n "${DEPLOY_HAPROXY_PEM_PATH}" ]; then
+ Le_Deploy_haproxy_pem_path="${DEPLOY_HAPROXY_PEM_PATH}"
+ _savedomainconf Le_Deploy_haproxy_pem_path "${Le_Deploy_haproxy_pem_path}"
+ elif [ -z "${Le_Deploy_haproxy_pem_path}" ]; then
+ Le_Deploy_haproxy_pem_path="${DEPLOY_HAPROXY_PEM_PATH_DEFAULT}"
+ fi
+
+ # Ensure PEM_PATH exists
+ if [ -d "${Le_Deploy_haproxy_pem_path}" ]; then
+ _debug "PEM_PATH ${Le_Deploy_haproxy_pem_path} exists"
+ else
+ _err "PEM_PATH ${Le_Deploy_haproxy_pem_path} does not exist"
+ return 1
+ fi
+
+ # PEM_NAME is optional. If not provided then assume "${DEPLOY_HAPROXY_PEM_NAME_DEFAULT}"
+ _getdeployconf DEPLOY_HAPROXY_PEM_NAME
+ _debug2 DEPLOY_HAPROXY_PEM_NAME "${DEPLOY_HAPROXY_PEM_NAME}"
+ if [ -n "${DEPLOY_HAPROXY_PEM_NAME}" ]; then
+ Le_Deploy_haproxy_pem_name="${DEPLOY_HAPROXY_PEM_NAME}"
+ _savedomainconf Le_Deploy_haproxy_pem_name "${Le_Deploy_haproxy_pem_name}"
+ elif [ -z "${Le_Deploy_haproxy_pem_name}" ]; then
+ Le_Deploy_haproxy_pem_name="${DEPLOY_HAPROXY_PEM_NAME_DEFAULT}"
+ # We better not have '*' as the first character
+ if [ "${Le_Deploy_haproxy_pem_name%%"${Le_Deploy_haproxy_pem_name#?}"}" = '*' ]; then
+ # removes the first characters and add a _ instead
+ Le_Deploy_haproxy_pem_name="_${Le_Deploy_haproxy_pem_name#?}"
+ fi
+ fi
+
+ # BUNDLE is optional. If not provided then assume "${DEPLOY_HAPROXY_BUNDLE_DEFAULT}"
+ _getdeployconf DEPLOY_HAPROXY_BUNDLE
+ _debug2 DEPLOY_HAPROXY_BUNDLE "${DEPLOY_HAPROXY_BUNDLE}"
+ if [ -n "${DEPLOY_HAPROXY_BUNDLE}" ]; then
+ Le_Deploy_haproxy_bundle="${DEPLOY_HAPROXY_BUNDLE}"
+ _savedomainconf Le_Deploy_haproxy_bundle "${Le_Deploy_haproxy_bundle}"
+ elif [ -z "${Le_Deploy_haproxy_bundle}" ]; then
+ Le_Deploy_haproxy_bundle="${DEPLOY_HAPROXY_BUNDLE_DEFAULT}"
+ fi
+
+ # ISSUER is optional. If not provided then assume "${DEPLOY_HAPROXY_ISSUER_DEFAULT}"
+ _getdeployconf DEPLOY_HAPROXY_ISSUER
+ _debug2 DEPLOY_HAPROXY_ISSUER "${DEPLOY_HAPROXY_ISSUER}"
+ if [ -n "${DEPLOY_HAPROXY_ISSUER}" ]; then
+ Le_Deploy_haproxy_issuer="${DEPLOY_HAPROXY_ISSUER}"
+ _savedomainconf Le_Deploy_haproxy_issuer "${Le_Deploy_haproxy_issuer}"
+ elif [ -z "${Le_Deploy_haproxy_issuer}" ]; then
+ Le_Deploy_haproxy_issuer="${DEPLOY_HAPROXY_ISSUER_DEFAULT}"
+ fi
+
+ # RELOAD is optional. If not provided then assume "${DEPLOY_HAPROXY_RELOAD_DEFAULT}"
+ _getdeployconf DEPLOY_HAPROXY_RELOAD
+ _debug2 DEPLOY_HAPROXY_RELOAD "${DEPLOY_HAPROXY_RELOAD}"
+ if [ -n "${DEPLOY_HAPROXY_RELOAD}" ]; then
+ Le_Deploy_haproxy_reload="${DEPLOY_HAPROXY_RELOAD}"
+ _savedomainconf Le_Deploy_haproxy_reload "${Le_Deploy_haproxy_reload}"
+ elif [ -z "${Le_Deploy_haproxy_reload}" ]; then
+ Le_Deploy_haproxy_reload="${DEPLOY_HAPROXY_RELOAD_DEFAULT}"
+ fi
+
+ # HOT_UPDATE is optional. If not provided then assume "${DEPLOY_HAPROXY_HOT_UPDATE_DEFAULT}"
+ _getdeployconf DEPLOY_HAPROXY_HOT_UPDATE
+ _debug2 DEPLOY_HAPROXY_HOT_UPDATE "${DEPLOY_HAPROXY_HOT_UPDATE}"
+ if [ -n "${DEPLOY_HAPROXY_HOT_UPDATE}" ]; then
+ Le_Deploy_haproxy_hot_update="${DEPLOY_HAPROXY_HOT_UPDATE}"
+ _savedomainconf Le_Deploy_haproxy_hot_update "${Le_Deploy_haproxy_hot_update}"
+ elif [ -z "${Le_Deploy_haproxy_hot_update}" ]; then
+ Le_Deploy_haproxy_hot_update="${DEPLOY_HAPROXY_HOT_UPDATE_DEFAULT}"
+ fi
+
+ # STATS_SOCKET is optional. If not provided then assume "${DEPLOY_HAPROXY_STATS_SOCKET_DEFAULT}"
+ _getdeployconf DEPLOY_HAPROXY_STATS_SOCKET
+ _debug2 DEPLOY_HAPROXY_STATS_SOCKET "${DEPLOY_HAPROXY_STATS_SOCKET}"
+ if [ -n "${DEPLOY_HAPROXY_STATS_SOCKET}" ]; then
+ Le_Deploy_haproxy_stats_socket="${DEPLOY_HAPROXY_STATS_SOCKET}"
+ _savedomainconf Le_Deploy_haproxy_stats_socket "${Le_Deploy_haproxy_stats_socket}"
+ elif [ -z "${Le_Deploy_haproxy_stats_socket}" ]; then
+ Le_Deploy_haproxy_stats_socket="${DEPLOY_HAPROXY_STATS_SOCKET_DEFAULT}"
+ fi
+
+ # MASTER_CLI is optional. No defaults are used. When the master CLI is used,
+ # all commands are sent with a prefix.
+ _getdeployconf DEPLOY_HAPROXY_MASTER_CLI
+ _debug2 DEPLOY_HAPROXY_MASTER_CLI "${DEPLOY_HAPROXY_MASTER_CLI}"
+ if [ -n "${DEPLOY_HAPROXY_MASTER_CLI}" ]; then
+ Le_Deploy_haproxy_stats_socket="${DEPLOY_HAPROXY_MASTER_CLI}"
+ _savedomainconf Le_Deploy_haproxy_stats_socket "${Le_Deploy_haproxy_stats_socket}"
+ _cmdpfx="@1 " # command prefix used for master CLI only.
+ fi
+
+ # Set the suffix depending if we are creating a bundle or not
+ if [ "${Le_Deploy_haproxy_bundle}" = "yes" ]; then
+ _info "Bundle creation requested"
+ # Initialise $Le_Keylength if its not already set
+ if [ -z "${Le_Keylength}" ]; then
+ Le_Keylength=""
+ fi
+ if _isEccKey "${Le_Keylength}"; then
+ _info "ECC key type detected"
+ _suffix=".ecdsa"
+ else
+ _info "RSA key type detected"
+ _suffix=".rsa"
+ fi
+ else
+ _suffix=""
+ fi
+ _debug _suffix "${_suffix}"
+
+ # Set variables for later
+ _pem="${Le_Deploy_haproxy_pem_path}/${Le_Deploy_haproxy_pem_name}${_suffix}"
+ _issuer="${_pem}.issuer"
+ _ocsp="${_pem}.ocsp"
+ _reload="${Le_Deploy_haproxy_reload}"
+ _statssock="${Le_Deploy_haproxy_stats_socket}"
+
+ _info "Deploying PEM file"
+ # Create a temporary PEM file
+ _temppem="$(_mktemp)"
+ _debug _temppem "${_temppem}"
+ cat "${_ccert}" "${_cca}" "${_ckey}" | grep . >"${_temppem}"
+ _ret="$?"
+
+ # Check that we could create the temporary file
+ if [ "${_ret}" != "0" ]; then
+ _err "Error code ${_ret} returned during PEM file creation"
+ [ -f "${_temppem}" ] && rm -f "${_temppem}"
+ return ${_ret}
+ fi
+
+ # Move PEM file into place
+ _info "Moving new certificate into place"
+ _debug _pem "${_pem}"
+ cat "${_temppem}" >"${_pem}"
+ _ret=$?
+
+ # Clean up temp file
+ [ -f "${_temppem}" ] && rm -f "${_temppem}"
+
+ # Deal with any failure of moving PEM file into place
+ if [ "${_ret}" != "0" ]; then
+ _err "Error code ${_ret} returned while moving new certificate into place"
+ return ${_ret}
+ fi
+
+ # Update .issuer file if requested
+ if [ "${Le_Deploy_haproxy_issuer}" = "yes" ]; then
+ _info "Updating .issuer file"
+ _debug _issuer "${_issuer}"
+ cat "${_cca}" >"${_issuer}"
+ _ret="$?"
+
+ if [ "${_ret}" != "0" ]; then
+ _err "Error code ${_ret} returned while copying issuer/CA certificate into place"
+ return ${_ret}
+ fi
+ else
+ [ -f "${_issuer}" ] && _err "Issuer file update not requested but .issuer file exists"
+ fi
+
+ # Update .ocsp file if certificate was requested with --ocsp/--ocsp-must-staple option
+ if [ -z "${Le_OCSP_Staple}" ]; then
+ Le_OCSP_Staple="0"
+ fi
+ if [ "${Le_OCSP_Staple}" = "1" ]; then
+ _info "Updating OCSP stapling info"
+ _debug _ocsp "${_ocsp}"
+ _info "Extracting OCSP URL"
+ _ocsp_url=$(${ACME_OPENSSL_BIN:-openssl} x509 -noout -ocsp_uri -in "${_pem}")
+ _debug _ocsp_url "${_ocsp_url}"
+
+ # Only process OCSP if URL was present
+ if [ "${_ocsp_url}" != "" ]; then
+ # Extract the hostname from the OCSP URL
+ _info "Extracting OCSP URL"
+ _ocsp_host=$(echo "${_ocsp_url}" | cut -d/ -f3)
+ _debug _ocsp_host "${_ocsp_host}"
+
+ # Only process the certificate if we have a .issuer file
+ if [ -r "${_issuer}" ]; then
+ # Check if issuer cert is also a root CA cert
+ _subjectdn=$(${ACME_OPENSSL_BIN:-openssl} x509 -in "${_issuer}" -subject -noout | cut -d'/' -f2,3,4,5,6,7,8,9,10)
+ _debug _subjectdn "${_subjectdn}"
+ _issuerdn=$(${ACME_OPENSSL_BIN:-openssl} x509 -in "${_issuer}" -issuer -noout | cut -d'/' -f2,3,4,5,6,7,8,9,10)
+ _debug _issuerdn "${_issuerdn}"
+ _info "Requesting OCSP response"
+ # If the issuer is a CA cert then our command line has "-CAfile" added
+ if [ "${_subjectdn}" = "${_issuerdn}" ]; then
+ _cafile_argument="-CAfile \"${_issuer}\""
+ else
+ _cafile_argument=""
+ fi
+ _debug _cafile_argument "${_cafile_argument}"
+ # if OpenSSL/LibreSSL is v1.1 or above, the format for the -header option has changed
+ _openssl_version=$(${ACME_OPENSSL_BIN:-openssl} version | cut -d' ' -f2)
+ _debug _openssl_version "${_openssl_version}"
+ _openssl_major=$(echo "${_openssl_version}" | cut -d '.' -f1)
+ _openssl_minor=$(echo "${_openssl_version}" | cut -d '.' -f2)
+ if [ "${_openssl_major}" -eq "1" ] && [ "${_openssl_minor}" -ge "1" ] || [ "${_openssl_major}" -ge "2" ]; then
+ _header_sep="="
+ else
+ _header_sep=" "
+ fi
+ # Request the OCSP response from the issuer and store it
+ _openssl_ocsp_cmd="${ACME_OPENSSL_BIN:-openssl} ocsp \
+ -issuer \"${_issuer}\" \
+ -cert \"${_pem}\" \
+ -url \"${_ocsp_url}\" \
+ -header Host${_header_sep}\"${_ocsp_host}\" \
+ -respout \"${_ocsp}\" \
+ -verify_other \"${_issuer}\" \
+ ${_cafile_argument} \
+ | grep -q \"${_pem}: good\""
+ _debug _openssl_ocsp_cmd "${_openssl_ocsp_cmd}"
+ eval "${_openssl_ocsp_cmd}"
+ _ret=$?
+ else
+ # Non fatal: No issuer file was present so no OCSP stapling file created
+ _err "OCSP stapling in use but no .issuer file was present"
+ fi
+ else
+ # Non fatal: No OCSP url was found int the certificate
+ _err "OCSP update requested but no OCSP URL was found in certificate"
+ fi
+
+ # Non fatal: Check return code of openssl command
+ if [ "${_ret}" != "0" ]; then
+ _err "Updating OCSP stapling failed with return code ${_ret}"
+ fi
+ else
+ # An OCSP file was already present but certificate did not have OCSP extension
+ if [ -f "${_ocsp}" ]; then
+ _err "OCSP was not requested but .ocsp file exists."
+ # Could remove the file at this step, although HAProxy just ignores it in this case
+ # rm -f "${_ocsp}" || _err "Problem removing stale .ocsp file"
+ fi
+ fi
+
+ if [ "${Le_Deploy_haproxy_hot_update}" = "yes" ]; then
+ # set the socket name for messages
+ if [ -n "${_cmdpfx}" ]; then
+ _socketname="master CLI"
+ else
+ _socketname="stats socket"
+ fi
+
+ # Update certificate over HAProxy stats socket or master CLI.
+ if _exists socat; then
+ # look for the certificate on the stats socket, to chose between updating or creating one
+ _socat_cert_cmd="echo '${_cmdpfx}show ssl cert' | socat '${_statssock}' - | grep -q '^${_pem}$'"
+ _debug _socat_cert_cmd "${_socat_cert_cmd}"
+ eval "${_socat_cert_cmd}"
+ _ret=$?
+ if [ "${_ret}" != "0" ]; then
+ _newcert="1"
+ _info "Creating new certificate '${_pem}' over HAProxy ${_socketname}."
+ # certificate wasn't found, it's a new one. We should check if the crt-list exists and creates/inserts the certificate.
+ _socat_crtlist_show_cmd="echo '${_cmdpfx}show ssl crt-list' | socat '${_statssock}' - | grep -q '^${Le_Deploy_haproxy_pem_path}$'"
+ _debug _socat_crtlist_show_cmd "${_socat_crtlist_show_cmd}"
+ eval "${_socat_crtlist_show_cmd}"
+ _ret=$?
+ if [ "${_ret}" != "0" ]; then
+ _err "Couldn't find '${Le_Deploy_haproxy_pem_path}' in haproxy 'show ssl crt-list'"
+ return "${_ret}"
+ fi
+ # create a new certificate
+ _socat_new_cmd="echo '${_cmdpfx}new ssl cert ${_pem}' | socat '${_statssock}' - | grep -q 'New empty'"
+ _debug _socat_new_cmd "${_socat_new_cmd}"
+ eval "${_socat_new_cmd}"
+ _ret=$?
+ if [ "${_ret}" != "0" ]; then
+ _err "Couldn't create '${_pem}' in haproxy"
+ return "${_ret}"
+ fi
+ else
+ _info "Update existing certificate '${_pem}' over HAProxy ${_socketname}."
+ fi
+ _socat_cert_set_cmd="echo -e '${_cmdpfx}set ssl cert ${_pem} <<\n$(cat "${_pem}")\n' | socat '${_statssock}' - | grep -q 'Transaction created'"
+ _debug _socat_cert_set_cmd "${_socat_cert_set_cmd}"
+ eval "${_socat_cert_set_cmd}"
+ _ret=$?
+ if [ "${_ret}" != "0" ]; then
+ _err "Can't update '${_pem}' in haproxy"
+ return "${_ret}"
+ fi
+ _socat_cert_commit_cmd="echo '${_cmdpfx}commit ssl cert ${_pem}' | socat '${_statssock}' - | grep -q '^Success!$'"
+ _debug _socat_cert_commit_cmd "${_socat_cert_commit_cmd}"
+ eval "${_socat_cert_commit_cmd}"
+ _ret=$?
+ if [ "${_ret}" != "0" ]; then
+ _err "Can't commit '${_pem}' in haproxy"
+ return ${_ret}
+ fi
+ if [ "${_newcert}" = "1" ]; then
+ # if this is a new certificate, it needs to be inserted into the crt-list`
+ _socat_cert_add_cmd="echo '${_cmdpfx}add ssl crt-list ${Le_Deploy_haproxy_pem_path} ${_pem}' | socat '${_statssock}' - | grep -q 'Success!'"
+ _debug _socat_cert_add_cmd "${_socat_cert_add_cmd}"
+ eval "${_socat_cert_add_cmd}"
+ _ret=$?
+ if [ "${_ret}" != "0" ]; then
+ _err "Can't update '${_pem}' in haproxy"
+ return "${_ret}"
+ fi
+ fi
+ else
+ _err "'socat' is not available, couldn't update over ${_socketname}"
+ fi
+ else
+ # Reload HAProxy
+ _debug _reload "${_reload}"
+ eval "${_reload}"
+ _ret=$?
+ if [ "${_ret}" != "0" ]; then
+ _err "Error code ${_ret} during reload"
+ return ${_ret}
+ else
+ _info "Reload successful"
+ fi
+ fi
+
+ return 0
+}
diff --git a/admin/dyncookie/dyncookie.c b/admin/dyncookie/dyncookie.c
new file mode 100644
index 0000000..ddb71a7
--- /dev/null
+++ b/admin/dyncookie/dyncookie.c
@@ -0,0 +1,56 @@
+/*
+ * Dynamic server cookie calculator
+ *
+ * Copyright 2021 Willy Tarreau <w@1wt.eu>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdarg.h>
+#include <arpa/inet.h>
+
+#include <haproxy/xxhash.h>
+
+__attribute__((noreturn)) void die(int code, const char *format, ...)
+{
+ va_list args;
+
+ va_start(args, format);
+ vfprintf(stderr, format, args);
+ va_end(args);
+ exit(code);
+}
+
+int main(int argc, char **argv)
+{
+ size_t key_len;
+ int addr_len;
+ char *buf;
+ int port;
+
+ if (argc < 4)
+ die(1, "Usage: %s <key> <ip> <port>\n", argv[0]);
+
+ key_len = strlen(argv[1]);
+ buf = realloc(strdup(argv[1]), key_len + 16 + 4);
+ if (!buf)
+ die(2, "Not enough memory\n");
+
+ if (inet_pton(AF_INET, argv[2], buf + key_len) > 0)
+ addr_len = 4;
+ else if (inet_pton(AF_INET6, argv[2], buf + key_len) > 0)
+ addr_len = 16;
+ else
+ die(3, "Cannot parse address <%s> as IPv4/IPv6\n", argv[2]);
+
+ port = htonl(atoi(argv[3]));
+ memcpy(buf + key_len + addr_len, &port, 4);
+ printf("%016llx\n", (long long)XXH64(buf, key_len + addr_len + 4, 0));
+ return 0;
+}
diff --git a/admin/halog/README b/admin/halog/README
new file mode 100644
index 0000000..ff1bb12
--- /dev/null
+++ b/admin/halog/README
@@ -0,0 +1,4 @@
+This needs to be built from the top makefile, for example :
+
+ make admin/halog/halog
+
diff --git a/admin/halog/fgets2.c b/admin/halog/fgets2.c
new file mode 100644
index 0000000..7fbe16b
--- /dev/null
+++ b/admin/halog/fgets2.c
@@ -0,0 +1,267 @@
+/*
+ * fast fgets() replacement for log parsing
+ *
+ * Copyright 2000-2012 Willy Tarreau <w@1wt.eu>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this library; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ * This function manages its own buffer and returns a pointer to that buffer
+ * in order to avoid expensive memory copies. It also checks for line breaks
+ * 32 or 64 bits at a time. It could be improved a lot using mmap() but we
+ * would not be allowed to replace trailing \n with zeroes and we would be
+ * limited to small log files on 32-bit machines.
+ *
+ */
+
+#include <stdlib.h>
+#include <string.h>
+#include <stdio.h>
+#include <unistd.h>
+
+#ifndef FGETS2_BUFSIZE
+#define FGETS2_BUFSIZE (256*1024)
+#endif
+
+/* memchr() is faster in glibc with SSE since commit 093ecf92998de2 */
+#if defined(__x86_64__) && defined(__GLIBC__) && (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 15))
+#define USE_MEMCHR
+#endif
+
+/* return non-zero if the integer contains at least one zero byte */
+static inline __attribute__((unused)) unsigned int has_zero32(unsigned int x)
+{
+ unsigned int y;
+
+ /* Principle: we want to perform 4 tests on one 32-bit int at once. For
+ * this, we have to simulate an SIMD instruction which we don't have by
+ * default. The principle is that a zero byte is the only one which
+ * will cause a 1 to appear on the upper bit of a byte/word/etc... when
+ * we subtract 1. So we can detect a zero byte if a one appears at any
+ * of the bits 7, 15, 23 or 31 where it was not. It takes only one
+ * instruction to test for the presence of any of these bits, but it is
+ * still complex to check for their initial absence. Thus, we'll
+ * proceed differently : we first save and clear only those bits, then
+ * we check in the final result if one of them is present and was not.
+ * The order of operations below is important to save registers and
+ * tests. The result is used as a boolean, so the last test must apply
+ * on the constant so that it can efficiently be inlined.
+ */
+#if defined(__i386__)
+ /* gcc on x86 loves copying registers over and over even on code that
+ * simple, so let's do it by hand to prevent it from doing so :-(
+ */
+ asm("lea -0x01010101(%0),%1\n"
+ "not %0\n"
+ "and %1,%0\n"
+ : "=a" (x), "=r"(y)
+ : "0" (x)
+ );
+ return x & 0x80808080;
+#else
+ y = x - 0x01010101; /* generate a carry */
+ x = ~x & y; /* clear the bits that were already set */
+ return x & 0x80808080;
+#endif
+}
+
+/* return non-zero if the argument contains at least one zero byte. See principle above. */
+static inline __attribute__((unused)) unsigned long long has_zero64(unsigned long long x)
+{
+ unsigned long long y;
+
+ y = x - 0x0101010101010101ULL; /* generate a carry */
+ y &= ~x; /* clear the bits that were already set */
+ return y & 0x8080808080808080ULL;
+}
+
+static inline __attribute__((unused)) unsigned long has_zero(unsigned long x)
+{
+ return (sizeof(x) == 8) ? has_zero64(x) : has_zero32(x);
+}
+
+/* find a '\n' between <next> and <end>. Warning: may read slightly past <end>.
+ * If no '\n' is found, <end> is returned.
+ */
+static char *find_lf(char *next, char *end)
+{
+#if defined USE_MEMCHR
+ /* some recent libc use platform-specific optimizations to provide more
+ * efficient byte search than below (eg: glibc 2.11 on x86_64).
+ */
+ next = memchr(next, '\n', end - next);
+ if (!next)
+ next = end;
+#else
+ if (sizeof(long) == 4) { /* 32-bit system */
+ /* this is a speed-up, we read 32 bits at once and check for an
+ * LF character there. We stop if found then continue one at a
+ * time.
+ */
+ while (next < end && (((unsigned long)next) & 3) && *next != '\n')
+ next++;
+
+ /* Now next is multiple of 4 or equal to end. We know we can safely
+ * read up to 32 bytes past end if needed because they're allocated.
+ */
+ while (next < end) {
+ if (has_zero32(*(unsigned int *)next ^ 0x0A0A0A0A))
+ break;
+ next += 4;
+ if (has_zero32(*(unsigned int *)next ^ 0x0A0A0A0A))
+ break;
+ next += 4;
+ if (has_zero32(*(unsigned int *)next ^ 0x0A0A0A0A))
+ break;
+ next += 4;
+ if (has_zero32(*(unsigned int *)next ^ 0x0A0A0A0A))
+ break;
+ next += 4;
+ if (has_zero32(*(unsigned int *)next ^ 0x0A0A0A0A))
+ break;
+ next += 4;
+ if (has_zero32(*(unsigned int *)next ^ 0x0A0A0A0A))
+ break;
+ next += 4;
+ if (has_zero32(*(unsigned int *)next ^ 0x0A0A0A0A))
+ break;
+ next += 4;
+ if (has_zero32(*(unsigned int *)next ^ 0x0A0A0A0A))
+ break;
+ next += 4;
+ }
+ }
+ else { /* 64-bit system */
+ /* this is a speed-up, we read 64 bits at once and check for an
+ * LF character there. We stop if found then continue one at a
+ * time.
+ */
+ if (next <= end) {
+ /* max 3 bytes tested here */
+ while ((((unsigned long)next) & 3) && *next != '\n')
+ next++;
+
+ /* maybe we have can skip 4 more bytes */
+ if ((((unsigned long)next) & 4) && !has_zero32(*(unsigned int *)next ^ 0x0A0A0A0AU))
+ next += 4;
+ }
+
+ /* now next is multiple of 8 or equal to end */
+ while (next <= (end-68)) {
+ if (has_zero64(*(unsigned long long *)next ^ 0x0A0A0A0A0A0A0A0AULL))
+ break;
+ next += 8;
+ if (has_zero64(*(unsigned long long *)next ^ 0x0A0A0A0A0A0A0A0AULL))
+ break;
+ next += 8;
+ if (has_zero64(*(unsigned long long *)next ^ 0x0A0A0A0A0A0A0A0AULL))
+ break;
+ next += 8;
+ if (has_zero64(*(unsigned long long *)next ^ 0x0A0A0A0A0A0A0A0AULL))
+ break;
+ next += 8;
+ if (has_zero64(*(unsigned long long *)next ^ 0x0A0A0A0A0A0A0A0AULL))
+ break;
+ next += 8;
+ if (has_zero64(*(unsigned long long *)next ^ 0x0A0A0A0A0A0A0A0AULL))
+ break;
+ next += 8;
+ if (has_zero64(*(unsigned long long *)next ^ 0x0A0A0A0A0A0A0A0AULL))
+ break;
+ next += 8;
+ if (has_zero64(*(unsigned long long *)next ^ 0x0A0A0A0A0A0A0A0AULL))
+ break;
+ next += 8;
+ }
+
+ /* maybe we can skip 4 more bytes */
+ if (!has_zero32(*(unsigned int *)next ^ 0x0A0A0A0AU))
+ next += 4;
+ }
+
+ /* We finish if needed : if <next> is below <end>, it means we
+ * found an LF in one of the 4 following bytes.
+ */
+ while (next < end) {
+ if (*next == '\n')
+ break;
+ next++;
+ }
+#endif
+ return next;
+}
+
+const char *fgets2(FILE *stream)
+{
+ static char buffer[FGETS2_BUFSIZE + 68]; /* Note: +32 is enough on 32-bit systems */
+ static char *end = buffer;
+ static char *line = buffer;
+ char *next;
+ int ret;
+
+ next = line;
+
+ while (1) {
+ next = find_lf(next, end);
+ if (next < end) {
+ const char *start = line;
+ *next = '\0';
+ line = next + 1;
+ return start;
+ }
+
+ /* we found an incomplete line. First, let's move the
+ * remaining part of the buffer to the beginning, then
+ * try to complete the buffer with a new read. We can't
+ * rely on <next> anymore because it went past <end>.
+ */
+ if (line > buffer) {
+ if (end != line)
+ memmove(buffer, line, end - line);
+ end = buffer + (end - line);
+ next = end;
+ line = buffer;
+ } else {
+ if (end == buffer + FGETS2_BUFSIZE)
+ return NULL;
+ }
+
+ ret = read(fileno(stream), end, buffer + FGETS2_BUFSIZE - end);
+
+ if (ret <= 0) {
+ if (end == line)
+ return NULL;
+
+ *end = '\0';
+ end = line; /* ensure we stop next time */
+ return line;
+ }
+
+ end += ret;
+ *end = '\n'; /* make parser stop ASAP */
+ /* search for '\n' again */
+ }
+}
+
+#ifdef BENCHMARK
+int main() {
+ const char *p;
+ unsigned int lines = 0;
+
+ while ((p=fgets2(stdin)))
+ lines++;
+ printf("lines=%u\n", lines);
+ return 0;
+}
+#endif
diff --git a/admin/halog/halog.c b/admin/halog/halog.c
new file mode 100644
index 0000000..884a606
--- /dev/null
+++ b/admin/halog/halog.c
@@ -0,0 +1,1915 @@
+/*
+ * haproxy log statistics reporter
+ *
+ * Copyright 2000-2012 Willy Tarreau <w@1wt.eu>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <errno.h>
+#include <fcntl.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <syslog.h>
+#include <string.h>
+#include <unistd.h>
+#include <ctype.h>
+#include <time.h>
+
+#include <haproxy/compiler.h>
+
+#include <import/eb32tree.h>
+#include <import/eb64tree.h>
+#include <import/ebistree.h>
+#include <import/ebsttree.h>
+
+#define SOURCE_FIELD 5
+#define ACCEPT_FIELD 6
+#define SERVER_FIELD 8
+#define TIME_FIELD 9
+#define STATUS_FIELD 10
+#define BYTES_SENT_FIELD 11
+#define TERM_CODES_FIELD 14
+#define CONN_FIELD 15
+#define QUEUE_LEN_FIELD 16
+#define METH_FIELD 17
+#define URL_FIELD 18
+#define MAXLINE 16384
+#define QBITS 4
+
+#define SEP(c) ((unsigned char)(c) <= ' ')
+#define SKIP_CHAR(p,c) do { while (1) { int __c = (unsigned char)*p++; if (__c == c) break; if (__c <= ' ') { p--; break; } } } while (0)
+
+/* [0] = err/date, [1] = req, [2] = conn, [3] = resp, [4] = data */
+static struct eb_root timers[5] = {
+ EB_ROOT_UNIQUE, EB_ROOT_UNIQUE, EB_ROOT_UNIQUE,
+ EB_ROOT_UNIQUE, EB_ROOT_UNIQUE,
+};
+
+struct timer {
+ struct eb32_node node;
+ unsigned int count;
+};
+
+struct srv_st {
+ unsigned int st_cnt[6]; /* 0xx to 5xx */
+ unsigned int nb_ct, nb_rt, nb_ok;
+ unsigned long long cum_ct, cum_rt;
+ struct ebmb_node node;
+ /* don't put anything else here, the server name will be there */
+};
+
+struct url_stat {
+ union {
+ struct ebpt_node url;
+ struct eb64_node val;
+ } node;
+ char *url;
+ unsigned long long total_time; /* sum(all reqs' times) */
+ unsigned long long total_time_ok; /* sum(all OK reqs' times) */
+ unsigned long long total_bytes_sent; /* sum(all bytes sent) */
+ unsigned int nb_err, nb_req;
+};
+
+#define FILT_COUNT_ONLY 0x01
+#define FILT_INVERT 0x02
+#define FILT_QUIET 0x04
+#define FILT_ERRORS_ONLY 0x08
+#define FILT_ACC_DELAY 0x10
+#define FILT_ACC_COUNT 0x20
+#define FILT_GRAPH_TIMERS 0x40
+#define FILT_PERCENTILE 0x80
+#define FILT_TIME_RESP 0x100
+
+#define FILT_INVERT_ERRORS 0x200
+#define FILT_INVERT_TIME_RESP 0x400
+
+#define FILT_COUNT_STATUS 0x800
+#define FILT_COUNT_SRV_STATUS 0x1000
+#define FILT_COUNT_TERM_CODES 0x2000
+
+#define FILT_COUNT_URL_ONLY 0x004000
+#define FILT_COUNT_URL_COUNT 0x008000
+#define FILT_COUNT_URL_ERR 0x010000
+#define FILT_COUNT_URL_TTOT 0x020000
+#define FILT_COUNT_URL_TAVG 0x040000
+#define FILT_COUNT_URL_TTOTO 0x080000
+#define FILT_COUNT_URL_TAVGO 0x100000
+
+#define FILT_HTTP_ONLY 0x200000
+#define FILT_TERM_CODE_NAME 0x400000
+#define FILT_INVERT_TERM_CODE_NAME 0x800000
+
+#define FILT_HTTP_STATUS 0x1000000
+#define FILT_INVERT_HTTP_STATUS 0x2000000
+#define FILT_QUEUE_ONLY 0x4000000
+#define FILT_QUEUE_SRV_ONLY 0x8000000
+
+#define FILT_COUNT_URL_BAVG 0x10000000
+#define FILT_COUNT_URL_BTOT 0x20000000
+
+#define FILT_COUNT_URL_ANY (FILT_COUNT_URL_ONLY|FILT_COUNT_URL_COUNT|FILT_COUNT_URL_ERR| \
+ FILT_COUNT_URL_TTOT|FILT_COUNT_URL_TAVG|FILT_COUNT_URL_TTOTO|FILT_COUNT_URL_TAVGO| \
+ FILT_COUNT_URL_BAVG|FILT_COUNT_URL_BTOT)
+
+#define FILT_COUNT_COOK_CODES 0x40000000
+#define FILT_COUNT_IP_COUNT 0x80000000
+
+#define FILT2_TIMESTAMP 0x01
+#define FILT2_PRESERVE_QUERY 0x02
+#define FILT2_EXTRACT_CAPTURE 0x04
+
+unsigned int filter = 0;
+unsigned int filter2 = 0;
+unsigned int filter_invert = 0;
+const char *line;
+int linenum = 0;
+int parse_err = 0;
+int lines_out = 0;
+int lines_max = -1;
+
+const char *fgets2(FILE *stream);
+
+void filter_count_url(const char *accept_field, const char *time_field, struct timer **tptr);
+void filter_count_ip(const char *source_field, const char *accept_field, const char *time_field, struct timer **tptr);
+void filter_count_srv_status(const char *accept_field, const char *time_field, struct timer **tptr);
+void filter_count_cook_codes(const char *accept_field, const char *time_field, struct timer **tptr);
+void filter_count_term_codes(const char *accept_field, const char *time_field, struct timer **tptr);
+void filter_count_status(const char *accept_field, const char *time_field, struct timer **tptr);
+void filter_graphs(const char *accept_field, const char *time_field, struct timer **tptr);
+void filter_output_line(const char *accept_field, const char *time_field, struct timer **tptr);
+void filter_extract_capture(const char *accept_field, const char *time_field, unsigned int, unsigned int);
+void filter_accept_holes(const char *accept_field, const char *time_field, struct timer **tptr);
+
+void usage(FILE *output, const char *msg)
+{
+ fprintf(output,
+ "%s"
+ "Usage:\n"
+ " halog [-h|--help] for long help\n"
+ " halog [input_filters]* [modifiers]* [output_format] < log\n"
+ " inp = [-e|-E] [-H] [-Q|-QS] [-rt|-RT <time>] [-ad <delay>] [-ac <count>]\n"
+ " [-hs|-HS [min][:[max]]] [-tcn|-TCN <termcode>] [-time [min][:[max]]]\n"
+ " mod = [-q] [-v] [-m <lines>] [-s <skipflds>] [-query]\n"
+ " out = {-c|-u|-uc|-ue|-ua|-ut|-uao|-uto|-uba|-ubt|-hdr <block>:<field>|\n"
+ " -cc|-gt|-pct|-st|-tc|-srv|-ic}\n"
+ "\n",
+ msg ? msg : ""
+ );
+}
+
+void die(const char *msg)
+{
+ usage(stderr, msg);
+ exit(1);
+}
+
+void help()
+{
+ usage(stdout, NULL);
+ printf(
+ "Input filters - several filters may be combined\n"
+ " -H only match lines containing HTTP logs (ignore TCP)\n"
+ " -E only match lines without any error (no 5xx status)\n"
+ " -e only match lines with errors (status 5xx or negative)\n"
+ " -rt|-RT <time> only match response times larger|smaller than <time>\n"
+ " -Q|-QS only match queued requests (any queue|server queue)\n"
+ " -tcn|-TCN <code> only match requests with/without termination code <code>\n"
+ " -hs|-HS <[min][:][max]> only match requests with HTTP status codes within/not\n"
+ " within min..max. Any of them may be omitted. Exact\n"
+ " code is checked for if no ':' is specified.\n"
+ " -time <[min][:max]> only match requests recorded between timestamps.\n"
+ " Any of them may be omitted.\n"
+ "Modifiers\n"
+ " -v invert the input filtering condition\n"
+ " -q don't report errors/warnings\n"
+ " -m <lines> limit output to the first <lines> lines\n"
+ " -s <skip_n_fields> skip n fields from the beginning of a line (default %d)\n"
+ " you can also use -n to start from earlier then field %d\n"
+ " -query preserve the query string for per-URL (-u*) statistics\n"
+ "\n"
+ "Output format - only one may be used at a time\n"
+ " -c only report the number of lines that would have been printed\n"
+ " -pct output connect and response times percentiles\n"
+ " -st output number of requests per HTTP status code\n"
+ " -cc output number of requests per cookie code (2 chars)\n"
+ " -tc output number of requests per termination code (2 chars)\n"
+ " -srv output statistics per server (time, requests, errors)\n"
+ " -ic output statistics per ip count (time, requests, errors)\n"
+ " -u* output statistics per URL (time, requests, errors)\n"
+ " Additional characters indicate the output sorting key :\n"
+ " -u : by URL, -uc : request count, -ue : error count\n"
+ " -ua : average response time, -ut : average total time\n"
+ " -uao, -uto: average times computed on valid ('OK') requests\n"
+ " -uba, -ubt: average bytes returned, total bytes returned\n"
+ " -hdr output captured header at the given <block>:<field>\n"
+ " -ac <count> -ad <delay>:\n"
+ " Report periods corresponding to a grouped accept of <count> requests at\n"
+ " the same millisecond after a delay of at least <ad> milliseconds with no\n"
+ " incoming accept (used to spot network outages). Output format contains:\n"
+ " <accept_date> <date_ms> <delta_ms from previous one> <nb_entries>\n",
+ (int)SOURCE_FIELD, (int)SOURCE_FIELD
+ );
+ exit(0);
+}
+
+
+/* return pointer to first char not part of current field starting at <p>. */
+
+#if defined(__i386__)
+/* this one is always faster on 32-bits */
+static inline const char *field_stop(const char *p)
+{
+ asm(
+ /* Look for spaces */
+ "4: \n\t"
+ "inc %0 \n\t"
+ "cmpb $0x20, -1(%0) \n\t"
+ "ja 4b \n\t"
+ "jz 3f \n\t"
+
+ /* we only get there for control chars 0..31. Leave if we find '\0' */
+ "cmpb $0x0, -1(%0) \n\t"
+ "jnz 4b \n\t"
+
+ /* return %0-1 = position of the last char we checked */
+ "3: \n\t"
+ "dec %0 \n\t"
+ : "=r" (p)
+ : "0" (p)
+ );
+ return p;
+}
+#else
+const char *field_stop(const char *p)
+{
+ unsigned char c;
+
+ while (1) {
+ c = *(p++);
+ if (c > ' ')
+ continue;
+ if (c == ' ' || c == 0)
+ break;
+ }
+ return p - 1;
+}
+#endif
+
+/* return non-zero if the argument contains at least one zero byte. See principle above. */
+static inline __attribute__((unused)) unsigned long long has_zero64(unsigned long long x)
+{
+ unsigned long long y;
+
+ y = x - 0x0101010101010101ULL; /* generate a carry */
+ y &= ~x; /* clear the bits that were already set */
+ return y & 0x8080808080808080ULL;
+}
+
+/* return field <field> (starting from 1) in string <p>. Only consider
+ * contiguous spaces (or tabs) as one delimiter. May return pointer to
+ * last char if field is not found. Equivalent to awk '{print $field}'.
+ */
+const char *field_start(const char *p, int field)
+{
+#ifndef PREFER_ASM
+ unsigned char c;
+ while (1) {
+ /* skip spaces */
+ while (1) {
+ c = *(p++);
+ if (!c) /* end of line */
+ return p-1;
+ if (c == ' ')
+ continue;
+ /* other char => new field */
+ break;
+ }
+
+ /* start of field */
+ field--;
+ if (!field)
+ return p-1;
+
+ /* skip this field */
+ while (1) {
+#if defined(HA_UNALIGNED_LE64)
+ unsigned long long l = *(unsigned long long *)p;
+ if (!has_zero64(l)) {
+ l ^= 0x2020202020202020;
+ l = has_zero64(l);
+ if (!l) {
+ p += 8;
+ continue;
+ }
+ /* there is at least one space, find it and
+ * skip it now. The lowest byte in <l> with
+ * a 0x80 is the right one, but checking for
+ * it remains slower than testing each byte,
+ * probably due to the numerous short fields.
+ */
+ while (*(p++) != ' ')
+ ;
+ break;
+ }
+#endif
+ c = *(p++);
+ if (c == '\0')
+ return p - 1;
+ if (c == ' ')
+ break;
+ }
+ }
+#else
+ /* This version works optimally on i386 and x86_64 but the code above
+ * shows similar performance. However, depending on the version of GCC
+ * used, inlining rules change and it may have difficulties to make
+ * efficient use of this code at other locations and could result in
+ * worse performance (eg: gcc 4.4). You may want to experience.
+ */
+ asm(
+ /* skip spaces */
+ "1: \n\t"
+ "inc %0 \n\t"
+ "cmpb $0x20, -1(%0) \n\t"
+ "ja 2f \n\t"
+ "jz 1b \n\t"
+
+ /* we only get there for control chars 0..31. Leave if we find '\0' */
+ "cmpb $0x0, -1(%0) \n\t"
+ "jz 3f \n\t"
+
+ /* start of field at [%0-1]. Check if we need to skip more fields */
+ "2: \n\t"
+ "dec %1 \n\t"
+ "jz 3f \n\t"
+
+ /* Look for spaces */
+ "4: \n\t"
+ "inc %0 \n\t"
+ "cmpb $0x20, -1(%0) \n\t"
+ "jz 1b \n\t"
+ "ja 4b \n\t"
+
+ /* we only get there for control chars 0..31. Leave if we find '\0' */
+ "cmpb $0x0, -1(%0) \n\t"
+ "jnz 4b \n\t"
+
+ /* return %0-1 = position of the last char we checked */
+ "3: \n\t"
+ "dec %0 \n\t"
+ : "=r" (p)
+ : "r" (field), "0" (p)
+ );
+ return p;
+#endif
+}
+
+/* keep only the <bits> higher bits of <i> */
+static inline unsigned int quantify_u32(unsigned int i, int bits)
+{
+ int high;
+
+ if (!bits)
+ return 0;
+
+ if (i)
+ high = fls_auto(i); // 1 to 32
+ else
+ high = 0;
+
+ if (high <= bits)
+ return i;
+
+ return i & ~((1 << (high - bits)) - 1);
+}
+
+/* keep only the <bits> higher bits of the absolute value of <i>, as well as
+ * its sign. */
+static inline int quantify(int i, int bits)
+{
+ if (i >= 0)
+ return quantify_u32(i, bits);
+ else
+ return -quantify_u32(-i, bits);
+}
+
+/* Insert timer value <v> into tree <r>. A pre-allocated node must be passed
+ * in <alloc>. It may be NULL, in which case the function will allocate it
+ * itself. It will be reset to NULL once consumed. The caller is responsible
+ * for freeing the node once not used anymore. The node where the value was
+ * inserted is returned.
+ */
+struct timer *insert_timer(struct eb_root *r, struct timer **alloc, int v)
+{
+ struct timer *t = *alloc;
+ struct eb32_node *n;
+
+ if (!t) {
+ t = calloc(1, sizeof(*t));
+ if (unlikely(!t)) {
+ fprintf(stderr, "%s: not enough memory\n", __FUNCTION__);
+ exit(1);
+ }
+ }
+ t->node.key = quantify(v, QBITS); // keep only the higher QBITS bits
+
+ n = eb32i_insert(r, &t->node);
+ if (n == &t->node)
+ t = NULL; /* node inserted, will malloc next time */
+
+ *alloc = t;
+ return container_of(n, struct timer, node);
+}
+
+/* Insert value value <v> into tree <r>. A pre-allocated node must be passed
+ * in <alloc>. It may be NULL, in which case the function will allocate it
+ * itself. It will be reset to NULL once consumed. The caller is responsible
+ * for freeing the node once not used anymore. The node where the value was
+ * inserted is returned.
+ */
+struct timer *insert_value(struct eb_root *r, struct timer **alloc, int v)
+{
+ struct timer *t = *alloc;
+ struct eb32_node *n;
+
+ if (!t) {
+ t = calloc(1, sizeof(*t));
+ if (unlikely(!t)) {
+ fprintf(stderr, "%s: not enough memory\n", __FUNCTION__);
+ exit(1);
+ }
+ }
+ t->node.key = v;
+
+ n = eb32i_insert(r, &t->node);
+ if (n == &t->node)
+ t = NULL; /* node inserted, will malloc next time */
+
+ *alloc = t;
+ return container_of(n, struct timer, node);
+}
+
+int str2ic(const char *s)
+{
+ int i = 0;
+ int j, k;
+
+ if (*s != '-') {
+ /* positive number */
+ while (1) {
+ j = (*s++) - '0';
+ k = i * 10;
+ if ((unsigned)j > 9)
+ break;
+ i = k + j;
+ }
+ } else {
+ /* negative number */
+ s++;
+ while (1) {
+ j = (*s++) - '0';
+ k = i * 10;
+ if ((unsigned)j > 9)
+ break;
+ i = k - j;
+ }
+ }
+
+ return i;
+}
+
+
+/* Convert "[04/Dec/2008:09:49:40.555]" to an integer equivalent to the time of
+ * the day in milliseconds. It returns -1 for all unparsable values. The parser
+ * looks ugly but gcc emits far better code that way.
+ */
+int convert_date(const char *field)
+{
+ unsigned int h, m, s, ms;
+ unsigned char c;
+ const char *e;
+
+ h = m = s = ms = 0;
+ e = field;
+
+ /* skip the date */
+ while (1) {
+ c = *(e++);
+ if (c == ':')
+ break;
+ if (!c)
+ goto out_err;
+ }
+
+ /* hour + ':' */
+ while (1) {
+ c = *(e++) - '0';
+ if (c > 9)
+ break;
+ h = h * 10 + c;
+ }
+ if (c == (unsigned char)(0 - '0'))
+ goto out_err;
+
+ /* minute + ':' */
+ while (1) {
+ c = *(e++) - '0';
+ if (c > 9)
+ break;
+ m = m * 10 + c;
+ }
+ if (c == (unsigned char)(0 - '0'))
+ goto out_err;
+
+ /* second + '.' or ']' */
+ while (1) {
+ c = *(e++) - '0';
+ if (c > 9)
+ break;
+ s = s * 10 + c;
+ }
+ if (c == (unsigned char)(0 - '0'))
+ goto out_err;
+
+ /* if there's a '.', we have milliseconds */
+ if (c == (unsigned char)('.' - '0')) {
+ /* millisecond second + ']' */
+ while (1) {
+ c = *(e++) - '0';
+ if (c > 9)
+ break;
+ ms = ms * 10 + c;
+ }
+ if (c == (unsigned char)(0 - '0'))
+ goto out_err;
+ }
+ return (((h * 60) + m) * 60 + s) * 1000 + ms;
+ out_err:
+ return -1;
+}
+
+/* Convert "[04/Dec/2008:09:49:40.555]" to an unix timestamp.
+ * It returns -1 for all unparsable values. The parser
+ * looks ugly but gcc emits far better code that way.
+ */
+int convert_date_to_timestamp(const char *field)
+{
+ unsigned int d, mo, y, h, m, s;
+ unsigned char c;
+ const char *e;
+ time_t rawtime;
+ static struct tm * timeinfo;
+ static int last_res;
+
+ d = mo = y = h = m = s = 0;
+ e = field;
+
+ e++; // remove '['
+
+ /* day + '/' */
+ while (1) {
+ c = *(e++) - '0';
+ if (c > 9)
+ break;
+ d = d * 10 + c;
+ if (c == (unsigned char)(0 - '0'))
+ goto out_err;
+ }
+
+ /* month + '/' */
+ c = *(e++);
+ if (c =='F') {
+ mo = 2;
+ e = e+3;
+ } else if (c =='S') {
+ mo = 9;
+ e = e+3;
+ } else if (c =='O') {
+ mo = 10;
+ e = e+3;
+ } else if (c =='N') {
+ mo = 11;
+ e = e+3;
+ } else if (c == 'D') {
+ mo = 12;
+ e = e+3;
+ } else if (c == 'A') {
+ c = *(e++);
+ if (c == 'p') {
+ mo = 4;
+ e = e+2;
+ } else if (c == 'u') {
+ mo = 8;
+ e = e+2;
+ } else
+ goto out_err;
+ } else if (c == 'J') {
+ c = *(e++);
+ if (c == 'a') {
+ mo = 1;
+ e = e+2;
+ } else if (c == 'u') {
+ c = *(e++);
+ if (c == 'n') {
+ mo = 6;
+ e = e+1;
+ } else if (c == 'l') {
+ mo = 7;
+ e++;
+ }
+ } else
+ goto out_err;
+ } else if (c == 'M') {
+ e++;
+ c = *(e++);
+ if (c == 'r') {
+ mo = 3;
+ e = e+1;
+ } else if (c == 'y') {
+ mo = 5;
+ e = e+1;
+ } else
+ goto out_err;
+ } else
+ goto out_err;
+
+ /* year + ':' */
+ while (1) {
+ c = *(e++) - '0';
+ if (c > 9)
+ break;
+ y = y * 10 + c;
+ if (c == (unsigned char)(0 - '0'))
+ goto out_err;
+ }
+
+ /* hour + ':' */
+ while (1) {
+ c = *(e++) - '0';
+ if (c > 9)
+ break;
+ h = h * 10 + c;
+ }
+ if (c == (unsigned char)(0 - '0'))
+ goto out_err;
+
+ /* minute + ':' */
+ while (1) {
+ c = *(e++) - '0';
+ if (c > 9)
+ break;
+ m = m * 10 + c;
+ }
+ if (c == (unsigned char)(0 - '0'))
+ goto out_err;
+
+ /* second + '.' or ']' */
+ while (1) {
+ c = *(e++) - '0';
+ if (c > 9)
+ break;
+ s = s * 10 + c;
+ }
+
+ if (likely(timeinfo)) {
+ if ((unsigned)timeinfo->tm_min == m &&
+ (unsigned)timeinfo->tm_hour == h &&
+ (unsigned)timeinfo->tm_mday == d &&
+ (unsigned)timeinfo->tm_mon == mo - 1 &&
+ (unsigned)timeinfo->tm_year == y - 1900)
+ return last_res + s;
+ }
+ else {
+ time(&rawtime);
+ timeinfo = localtime(&rawtime);
+ }
+
+ timeinfo->tm_sec = 0;
+ timeinfo->tm_min = m;
+ timeinfo->tm_hour = h;
+ timeinfo->tm_mday = d;
+ timeinfo->tm_mon = mo - 1;
+ timeinfo->tm_year = y - 1900;
+ last_res = mktime(timeinfo);
+
+ return last_res + s;
+ out_err:
+ return -1;
+}
+
+void truncated_line(int linenum, const char *line)
+{
+ if (!(filter & FILT_QUIET))
+ fprintf(stderr, "Truncated line %d: %s\n", linenum, line);
+}
+
+int main(int argc, char **argv)
+{
+ const char *b, *p, *time_field, *accept_field, *source_field;
+ const char *filter_term_code_name = NULL;
+ const char *output_file = NULL;
+ int f, last;
+ struct timer *t = NULL;
+ struct eb32_node *n;
+ struct url_stat *ustat = NULL;
+ int val, test;
+ unsigned int uval;
+ unsigned int filter_acc_delay = 0, filter_acc_count = 0;
+ int filter_time_resp = 0;
+ int filt_http_status_low = 0, filt_http_status_high = 0;
+ unsigned int filt2_timestamp_low = 0, filt2_timestamp_high = 0;
+ unsigned int filt2_capture_block = 0, filt2_capture_field = 0;
+ int skip_fields = 1;
+
+ void (*line_filter)(const char *accept_field, const char *time_field, struct timer **tptr) = NULL;
+
+ argc--; argv++;
+ while (argc > 0) {
+ if (*argv[0] != '-')
+ break;
+
+ if (strcmp(argv[0], "-ad") == 0) {
+ if (argc < 2) die("missing option for -ad\n");
+ argc--; argv++;
+ filter |= FILT_ACC_DELAY;
+ filter_acc_delay = atol(*argv);
+ }
+ else if (strcmp(argv[0], "-ac") == 0) {
+ if (argc < 2) die("missing option for -ac\n");
+ argc--; argv++;
+ filter |= FILT_ACC_COUNT;
+ filter_acc_count = atol(*argv);
+ }
+ else if (strcmp(argv[0], "-rt") == 0) {
+ if (argc < 2) die("missing option for -rt\n");
+ argc--; argv++;
+ filter |= FILT_TIME_RESP;
+ filter_time_resp = atol(*argv);
+ }
+ else if (strcmp(argv[0], "-RT") == 0) {
+ if (argc < 2) die("missing option for -RT\n");
+ argc--; argv++;
+ filter |= FILT_TIME_RESP | FILT_INVERT_TIME_RESP;
+ filter_time_resp = atol(*argv);
+ }
+ else if (strcmp(argv[0], "-s") == 0) {
+ if (argc < 2) die("missing option for -s\n");
+ argc--; argv++;
+ skip_fields = atol(*argv);
+ }
+ else if (strcmp(argv[0], "-m") == 0) {
+ if (argc < 2) die("missing option for -m\n");
+ argc--; argv++;
+ lines_max = atol(*argv);
+ }
+ else if (strcmp(argv[0], "-e") == 0)
+ filter |= FILT_ERRORS_ONLY;
+ else if (strcmp(argv[0], "-E") == 0)
+ filter |= FILT_ERRORS_ONLY | FILT_INVERT_ERRORS;
+ else if (strcmp(argv[0], "-H") == 0)
+ filter |= FILT_HTTP_ONLY;
+ else if (strcmp(argv[0], "-Q") == 0)
+ filter |= FILT_QUEUE_ONLY;
+ else if (strcmp(argv[0], "-QS") == 0)
+ filter |= FILT_QUEUE_SRV_ONLY;
+ else if (strcmp(argv[0], "-c") == 0)
+ filter |= FILT_COUNT_ONLY;
+ else if (strcmp(argv[0], "-q") == 0)
+ filter |= FILT_QUIET;
+ else if (strcmp(argv[0], "-v") == 0)
+ filter_invert = !filter_invert;
+ else if (strcmp(argv[0], "-gt") == 0)
+ filter |= FILT_GRAPH_TIMERS;
+ else if (strcmp(argv[0], "-pct") == 0)
+ filter |= FILT_PERCENTILE;
+ else if (strcmp(argv[0], "-st") == 0)
+ filter |= FILT_COUNT_STATUS;
+ else if (strcmp(argv[0], "-srv") == 0)
+ filter |= FILT_COUNT_SRV_STATUS;
+ else if (strcmp(argv[0], "-cc") == 0)
+ filter |= FILT_COUNT_COOK_CODES;
+ else if (strcmp(argv[0], "-tc") == 0)
+ filter |= FILT_COUNT_TERM_CODES;
+ else if (strcmp(argv[0], "-tcn") == 0) {
+ if (argc < 2) die("missing option for -tcn\n");
+ argc--; argv++;
+ filter |= FILT_TERM_CODE_NAME;
+ filter_term_code_name = *argv;
+ }
+ else if (strcmp(argv[0], "-TCN") == 0) {
+ if (argc < 2) die("missing option for -TCN\n");
+ argc--; argv++;
+ filter |= FILT_TERM_CODE_NAME | FILT_INVERT_TERM_CODE_NAME;
+ filter_term_code_name = *argv;
+ }
+ else if (strcmp(argv[0], "-hs") == 0 || strcmp(argv[0], "-HS") == 0) {
+ char *sep, *str;
+
+ if (argc < 2) die("missing option for -hs/-HS ([min]:[max])\n");
+ filter |= FILT_HTTP_STATUS;
+ if (argv[0][1] == 'H')
+ filter |= FILT_INVERT_HTTP_STATUS;
+
+ argc--; argv++;
+ str = *argv;
+ sep = strchr(str, ':'); /* [min]:[max] */
+ if (!sep)
+ sep = str; /* make max point to min */
+ else
+ *sep++ = 0;
+ filt_http_status_low = *str ? atol(str) : 0;
+ filt_http_status_high = *sep ? atol(sep) : 65535;
+ }
+ else if (strcmp(argv[0], "-time") == 0) {
+ char *sep, *str;
+
+ if (argc < 2) die("missing option for -time ([min]:[max])\n");
+ filter2 |= FILT2_TIMESTAMP;
+
+ argc--; argv++;
+ str = *argv;
+ sep = strchr(str, ':'); /* [min]:[max] */
+ filt2_timestamp_low = *str ? atol(str) : 0;
+ if (!sep)
+ filt2_timestamp_high = 0xFFFFFFFF;
+ else
+ filt2_timestamp_high = atol(++sep);
+ }
+ else if (strcmp(argv[0], "-u") == 0)
+ filter |= FILT_COUNT_URL_ONLY;
+ else if (strcmp(argv[0], "-uc") == 0)
+ filter |= FILT_COUNT_URL_COUNT;
+ else if (strcmp(argv[0], "-ue") == 0)
+ filter |= FILT_COUNT_URL_ERR;
+ else if (strcmp(argv[0], "-ua") == 0)
+ filter |= FILT_COUNT_URL_TAVG;
+ else if (strcmp(argv[0], "-ut") == 0)
+ filter |= FILT_COUNT_URL_TTOT;
+ else if (strcmp(argv[0], "-uao") == 0)
+ filter |= FILT_COUNT_URL_TAVGO;
+ else if (strcmp(argv[0], "-uto") == 0)
+ filter |= FILT_COUNT_URL_TTOTO;
+ else if (strcmp(argv[0], "-uba") == 0)
+ filter |= FILT_COUNT_URL_BAVG;
+ else if (strcmp(argv[0], "-ubt") == 0)
+ filter |= FILT_COUNT_URL_BTOT;
+ else if (strcmp(argv[0], "-query") == 0)
+ filter2 |= FILT2_PRESERVE_QUERY;
+ else if (strcmp(argv[0], "-ic") == 0)
+ filter |= FILT_COUNT_IP_COUNT;
+ else if (strcmp(argv[0], "-hdr") == 0) {
+ char *sep, *str;
+
+ if (argc < 2) die("missing option for -hdr (<block>:<field>)\n");
+ filter2 |= FILT2_EXTRACT_CAPTURE;
+
+ argc--; argv++;
+ str = *argv;
+ sep = strchr(str, ':');
+ if (!sep)
+ die("missing colon in -hdr (<block>:<field>)\n");
+ else
+ *sep++ = 0;
+
+ filt2_capture_block = *str ? atol(str) : 1;
+ filt2_capture_field = *sep ? atol(sep) : 1;
+
+ if (filt2_capture_block < 1 || filt2_capture_field < 1)
+ die("block and field must be at least 1 for -hdr (<block>:<field>)\n");
+ }
+ else if (strcmp(argv[0], "-o") == 0) {
+ if (output_file)
+ die("Fatal: output file name already specified.\n");
+ if (argc < 2)
+ die("Fatal: missing output file name.\n");
+ output_file = argv[1];
+ }
+ else if (strcmp(argv[0], "-h") == 0 || strcmp(argv[0], "--help") == 0)
+ help();
+ argc--;
+ argv++;
+ }
+
+ if (!filter && !filter2)
+ die("No action specified.\n");
+
+ if (filter & FILT_ACC_COUNT && !filter_acc_count)
+ filter_acc_count=1;
+
+ if (filter & FILT_ACC_DELAY && !filter_acc_delay)
+ filter_acc_delay = 1;
+
+
+ /* by default, all lines are printed */
+ line_filter = filter_output_line;
+ if (filter & (FILT_ACC_COUNT|FILT_ACC_DELAY))
+ line_filter = filter_accept_holes;
+ else if (filter & (FILT_GRAPH_TIMERS|FILT_PERCENTILE))
+ line_filter = filter_graphs;
+ else if (filter & FILT_COUNT_STATUS)
+ line_filter = filter_count_status;
+ else if (filter & FILT_COUNT_COOK_CODES)
+ line_filter = filter_count_cook_codes;
+ else if (filter & FILT_COUNT_TERM_CODES)
+ line_filter = filter_count_term_codes;
+ else if (filter & FILT_COUNT_SRV_STATUS)
+ line_filter = filter_count_srv_status;
+ else if (filter & FILT_COUNT_URL_ANY)
+ line_filter = filter_count_url;
+ else if (filter & FILT_COUNT_ONLY)
+ line_filter = NULL;
+
+#if defined(POSIX_FADV_SEQUENTIAL)
+ /* around 20% performance improvement is observed on Linux with this
+ * on cold-cache. Surprisingly, WILLNEED is less performant. Don't
+ * use NOREUSE as it flushes the cache and prevents easy data
+ * manipulation on logs!
+ */
+ posix_fadvise(0, 0, 0, POSIX_FADV_SEQUENTIAL);
+#endif
+
+ if (!line_filter && /* FILT_COUNT_ONLY ( see above), and no input filter (see below) */
+ !(filter & (FILT_HTTP_ONLY|FILT_TIME_RESP|FILT_ERRORS_ONLY|FILT_HTTP_STATUS|FILT_QUEUE_ONLY|FILT_QUEUE_SRV_ONLY|FILT_TERM_CODE_NAME)) &&
+ !(filter2 & (FILT2_TIMESTAMP))) {
+ /* read the whole file at once first, ignore it if inverted output */
+ if (!filter_invert)
+ while ((lines_max < 0 || lines_out < lines_max) && fgets2(stdin) != NULL)
+ lines_out++;
+
+ goto skip_filters;
+ }
+
+ while ((line = fgets2(stdin)) != NULL) {
+ linenum++;
+ time_field = NULL; accept_field = NULL;
+ source_field = NULL;
+
+ test = 1;
+
+ /* for any line we process, we first ensure that there is a field
+ * looking like the accept date field (beginning with a '[').
+ */
+ if (filter & FILT_COUNT_IP_COUNT) {
+ /* we need the IP first */
+ source_field = field_start(line, SOURCE_FIELD + skip_fields);
+ accept_field = field_start(source_field, ACCEPT_FIELD - SOURCE_FIELD + 1);
+ }
+ else
+ accept_field = field_start(line, ACCEPT_FIELD + skip_fields);
+
+ if (unlikely(*accept_field != '[')) {
+ parse_err++;
+ continue;
+ }
+
+ /* the day of month field is begin 01 and 31 */
+ if (accept_field[1] < '0' || accept_field[1] > '3') {
+ parse_err++;
+ continue;
+ }
+
+ if (filter2 & FILT2_TIMESTAMP) {
+ uval = convert_date_to_timestamp(accept_field);
+ test &= (uval>=filt2_timestamp_low && uval<=filt2_timestamp_high) ;
+ }
+
+ if (filter & FILT_HTTP_ONLY) {
+ /* only report lines with at least 4 timers */
+ if (!time_field) {
+ time_field = field_start(accept_field, TIME_FIELD - ACCEPT_FIELD + 1);
+ if (unlikely(!*time_field)) {
+ truncated_line(linenum, line);
+ continue;
+ }
+ }
+
+ field_stop(time_field + 1);
+ /* we have field TIME_FIELD in [time_field]..[e-1] */
+ p = time_field;
+ f = 0;
+ while (!SEP(*p)) {
+ if (++f == 4)
+ break;
+ SKIP_CHAR(p, '/');
+ }
+ test &= (f >= 4);
+ }
+
+ if (filter & FILT_TIME_RESP) {
+ int tps;
+
+ /* only report lines with response times larger than filter_time_resp */
+ if (!time_field) {
+ time_field = field_start(accept_field, TIME_FIELD - ACCEPT_FIELD + 1);
+ if (unlikely(!*time_field)) {
+ truncated_line(linenum, line);
+ continue;
+ }
+ }
+
+ field_stop(time_field + 1);
+ /* we have field TIME_FIELD in [time_field]..[e-1], let's check only the response time */
+
+ p = time_field;
+ f = 0;
+ while (!SEP(*p)) {
+ tps = str2ic(p);
+ if (tps < 0) {
+ tps = -1;
+ }
+ if (++f == 4)
+ break;
+ SKIP_CHAR(p, '/');
+ }
+
+ if (unlikely(f < 4)) {
+ parse_err++;
+ continue;
+ }
+
+ test &= (tps >= filter_time_resp) ^ !!(filter & FILT_INVERT_TIME_RESP);
+ }
+
+ if (filter & (FILT_ERRORS_ONLY | FILT_HTTP_STATUS)) {
+ /* Check both error codes (-1, 5xx) and status code ranges */
+ if (time_field)
+ b = field_start(time_field, STATUS_FIELD - TIME_FIELD + 1);
+ else
+ b = field_start(accept_field, STATUS_FIELD - ACCEPT_FIELD + 1);
+
+ if (unlikely(!*b)) {
+ truncated_line(linenum, line);
+ continue;
+ }
+
+ val = str2ic(b);
+ if (filter & FILT_ERRORS_ONLY)
+ test &= (val < 0 || (val >= 500 && val <= 599)) ^ !!(filter & FILT_INVERT_ERRORS);
+
+ if (filter & FILT_HTTP_STATUS)
+ test &= (val >= filt_http_status_low && val <= filt_http_status_high) ^ !!(filter & FILT_INVERT_HTTP_STATUS);
+ }
+
+ if (filter & (FILT_QUEUE_ONLY|FILT_QUEUE_SRV_ONLY)) {
+ /* Check if the server's queue is non-nul */
+ if (time_field)
+ b = field_start(time_field, QUEUE_LEN_FIELD - TIME_FIELD + 1);
+ else
+ b = field_start(accept_field, QUEUE_LEN_FIELD - ACCEPT_FIELD + 1);
+
+ if (unlikely(!*b)) {
+ truncated_line(linenum, line);
+ continue;
+ }
+
+ if (*b == '0') {
+ if (filter & FILT_QUEUE_SRV_ONLY) {
+ test = 0;
+ }
+ else {
+ do {
+ b++;
+ if (*b == '/') {
+ b++;
+ break;
+ }
+ } while (*b);
+ test &= ((unsigned char)(*b - '1') < 9);
+ }
+ }
+ }
+
+ if (filter & FILT_TERM_CODE_NAME) {
+ /* only report corresponding termination code name */
+ if (time_field)
+ b = field_start(time_field, TERM_CODES_FIELD - TIME_FIELD + 1);
+ else
+ b = field_start(accept_field, TERM_CODES_FIELD - ACCEPT_FIELD + 1);
+
+ if (unlikely(!*b)) {
+ truncated_line(linenum, line);
+ continue;
+ }
+
+ test &= (b[0] == filter_term_code_name[0] && b[1] == filter_term_code_name[1]) ^ !!(filter & FILT_INVERT_TERM_CODE_NAME);
+ }
+
+
+ test ^= filter_invert;
+ if (!test)
+ continue;
+
+ /************** here we process inputs *******************/
+
+ if (line_filter) {
+ if (filter & FILT_COUNT_IP_COUNT)
+ filter_count_ip(source_field, accept_field, time_field, &t);
+ else if (filter2 & FILT2_EXTRACT_CAPTURE)
+ filter_extract_capture(accept_field, time_field, filt2_capture_block, filt2_capture_field);
+ else
+ line_filter(accept_field, time_field, &t);
+ }
+ else
+ lines_out++; /* FILT_COUNT_ONLY was used, so we're just counting lines */
+ if (lines_max >= 0 && lines_out >= lines_max)
+ break;
+ }
+
+ skip_filters:
+ /*****************************************************
+ * Here we've finished reading all input. Depending on the
+ * filters, we may still have some analysis to run on the
+ * collected data and to output data in a new format.
+ *************************************************** */
+
+ if (t)
+ free(t);
+
+ if (filter & FILT_COUNT_ONLY) {
+ printf("%d\n", lines_out);
+ exit(0);
+ }
+
+ if (filter & (FILT_ACC_COUNT|FILT_ACC_DELAY)) {
+ /* sort and count all timers. Output will look like this :
+ * <accept_date> <delta_ms from previous one> <nb entries>
+ */
+ n = eb32_first(&timers[0]);
+
+ if (n)
+ last = n->key;
+ while (n) {
+ unsigned int d, h, m, s, ms;
+
+ t = container_of(n, struct timer, node);
+ h = n->key;
+ d = h - last;
+ last = h;
+
+ if (d >= filter_acc_delay && t->count >= filter_acc_count) {
+ ms = h % 1000; h = h / 1000;
+ s = h % 60; h = h / 60;
+ m = h % 60; h = h / 60;
+ printf("%02u:%02u:%02u.%03u %d %u %u\n", h, m, s, ms, last, d, t->count);
+ lines_out++;
+ if (lines_max >= 0 && lines_out >= lines_max)
+ break;
+ }
+ n = eb32_next(n);
+ }
+ }
+ else if (filter & FILT_GRAPH_TIMERS) {
+ /* sort all timers */
+ for (f = 0; f < 5; f++) {
+ struct eb32_node *n;
+
+ n = eb32_first(&timers[f]);
+ while (n) {
+ int i;
+ double d;
+ int val;
+
+ t = container_of(n, struct timer, node);
+ last = n->key;
+ val = t->count;
+
+ i = (last < 0) ? -last : last;
+ i = fls_auto(i) - QBITS;
+
+ if (i > 0)
+ d = val / (double)(1 << i);
+ else
+ d = val;
+
+ if (d > 0.0)
+ printf("%d %d %f\n", f, last, d+1.0);
+
+ n = eb32_next(n);
+ }
+ }
+ }
+ else if (filter & FILT_PERCENTILE) {
+ /* report timers by percentile :
+ * <percent> <total> <max_req_time> <max_conn_time> <max_resp_time> <max_data_time>
+ * We don't count errs.
+ */
+ struct eb32_node *n[5];
+ unsigned long cum[5];
+ double step;
+
+ if (!lines_out)
+ goto empty;
+
+ for (f = 1; f < 5; f++) {
+ n[f] = eb32_first(&timers[f]);
+ cum[f] = container_of(n[f], struct timer, node)->count;
+ }
+
+ for (step = 1; step <= 1000;) {
+ unsigned int thres = lines_out * (step / 1000.0);
+
+ printf("%3.1f %u ", step/10.0, thres);
+ for (f = 1; f < 5; f++) {
+ struct eb32_node *next;
+ while (cum[f] < thres) {
+ /* need to find other keys */
+ next = eb32_next(n[f]);
+ if (!next)
+ break;
+ n[f] = next;
+ cum[f] += container_of(next, struct timer, node)->count;
+ }
+
+ /* value still within $step % of total */
+ printf("%d ", n[f]->key);
+ }
+ putchar('\n');
+ if (step >= 100 && step < 900)
+ step += 50; // jump 5% by 5% between those steps.
+ else if (step >= 20 && step < 980)
+ step += 10;
+ else
+ step += 1;
+ }
+ }
+ else if (filter & FILT_COUNT_STATUS) {
+ /* output all statuses in the form of <status> <occurrences> */
+ n = eb32_first(&timers[0]);
+ while (n) {
+ t = container_of(n, struct timer, node);
+ printf("%d %u\n", n->key, t->count);
+ lines_out++;
+ if (lines_max >= 0 && lines_out >= lines_max)
+ break;
+ n = eb32_next(n);
+ }
+ }
+ else if (filter & FILT_COUNT_SRV_STATUS) {
+ struct ebmb_node *srv_node;
+ struct srv_st *srv;
+
+ printf("#srv_name 1xx 2xx 3xx 4xx 5xx other tot_req req_ok pct_ok avg_ct avg_rt\n");
+
+ srv_node = ebmb_first(&timers[0]);
+ while (srv_node) {
+ int tot_rq;
+
+ srv = container_of(srv_node, struct srv_st, node);
+
+ tot_rq = 0;
+ for (f = 0; f <= 5; f++)
+ tot_rq += srv->st_cnt[f];
+
+ printf("%s %u %u %u %u %u %u %d %u %.1f %d %d\n",
+ srv_node->key, srv->st_cnt[1], srv->st_cnt[2],
+ srv->st_cnt[3], srv->st_cnt[4], srv->st_cnt[5], srv->st_cnt[0],
+ tot_rq,
+ srv->nb_ok, (double)srv->nb_ok * 100.0 / (tot_rq?tot_rq:1),
+ (int)(srv->cum_ct / (srv->nb_ct?srv->nb_ct:1)), (int)(srv->cum_rt / (srv->nb_rt?srv->nb_rt:1)));
+ srv_node = ebmb_next(srv_node);
+ lines_out++;
+ if (lines_max >= 0 && lines_out >= lines_max)
+ break;
+ }
+ }
+ else if (filter & (FILT_COUNT_TERM_CODES|FILT_COUNT_COOK_CODES)) {
+ /* output all statuses in the form of <code> <occurrences> */
+ n = eb32_first(&timers[0]);
+ while (n) {
+ t = container_of(n, struct timer, node);
+ printf("%c%c %u\n", (n->key >> 8), (n->key) & 255, t->count);
+ lines_out++;
+ if (lines_max >= 0 && lines_out >= lines_max)
+ break;
+ n = eb32_next(n);
+ }
+ }
+ else if (filter & (FILT_COUNT_URL_ANY|FILT_COUNT_IP_COUNT)) {
+ struct eb_node *node, *next;
+
+ if (!(filter & FILT_COUNT_URL_ONLY)) {
+ /* we have to sort on another criterion. We'll use timers[1] for the
+ * destination tree.
+ */
+
+ timers[1] = EB_ROOT; /* reconfigure to accept duplicates */
+ for (node = eb_first(&timers[0]); node; node = next) {
+ next = eb_next(node);
+ eb_delete(node);
+
+ ustat = container_of(node, struct url_stat, node.url.node);
+
+ if (filter & (FILT_COUNT_URL_COUNT|FILT_COUNT_IP_COUNT))
+ ustat->node.val.key = ustat->nb_req;
+ else if (filter & FILT_COUNT_URL_ERR)
+ ustat->node.val.key = ustat->nb_err;
+ else if (filter & FILT_COUNT_URL_TTOT)
+ ustat->node.val.key = ustat->total_time;
+ else if (filter & FILT_COUNT_URL_TAVG)
+ ustat->node.val.key = ustat->nb_req ? ustat->total_time / ustat->nb_req : 0;
+ else if (filter & FILT_COUNT_URL_TTOTO)
+ ustat->node.val.key = ustat->total_time_ok;
+ else if (filter & FILT_COUNT_URL_TAVGO)
+ ustat->node.val.key = (ustat->nb_req - ustat->nb_err) ? ustat->total_time_ok / (ustat->nb_req - ustat->nb_err) : 0;
+ else if (filter & FILT_COUNT_URL_BAVG)
+ ustat->node.val.key = ustat->nb_req ? ustat->total_bytes_sent / ustat->nb_req : 0;
+ else if (filter & FILT_COUNT_URL_BTOT)
+ ustat->node.val.key = ustat->total_bytes_sent;
+ else
+ ustat->node.val.key = 0;
+
+ eb64_insert(&timers[1], &ustat->node.val);
+ }
+ /* switch trees */
+ timers[0] = timers[1];
+ }
+
+ if (FILT_COUNT_IP_COUNT)
+ printf("#req err ttot tavg oktot okavg bavg btot src\n");
+ else
+ printf("#req err ttot tavg oktot okavg bavg btot url\n");
+
+ /* scan the tree in its reverse sorting order */
+ node = eb_last(&timers[0]);
+ while (node) {
+ ustat = container_of(node, struct url_stat, node.url.node);
+ printf("%u %u %llu %llu %llu %llu %llu %llu %s\n",
+ ustat->nb_req,
+ ustat->nb_err,
+ ustat->total_time,
+ ustat->nb_req ? ustat->total_time / ustat->nb_req : 0,
+ ustat->total_time_ok,
+ (ustat->nb_req - ustat->nb_err) ? ustat->total_time_ok / (ustat->nb_req - ustat->nb_err) : 0,
+ ustat->nb_req ? ustat->total_bytes_sent / ustat->nb_req : 0,
+ ustat->total_bytes_sent,
+ ustat->url);
+
+ node = eb_prev(node);
+ lines_out++;
+ if (lines_max >= 0 && lines_out >= lines_max)
+ break;
+ }
+ }
+
+ empty:
+ if (!(filter & FILT_QUIET))
+ fprintf(stderr, "%d lines in, %d lines out, %d parsing errors\n",
+ linenum, lines_out, parse_err);
+ exit(0);
+}
+
+void filter_output_line(const char *accept_field, const char *time_field, struct timer **tptr)
+{
+ puts(line);
+ lines_out++;
+}
+
+void filter_extract_capture(const char *accept_field, const char *time_field, unsigned int block, unsigned int field)
+{
+ const char *e, *f;
+
+ if (time_field)
+ e = field_start(time_field, METH_FIELD - TIME_FIELD + 1);
+ else
+ e = field_start(accept_field, METH_FIELD - ACCEPT_FIELD + 1);
+
+ while (block-- > 0) {
+ /* Scan until the start of a capture block ('{') until the URL ('"'). */
+ while ((*e != '"' && *e != '{') && *e) {
+ /* Note: some syslog servers escape quotes ! */
+ if (*e == '\\' && e[1] == '"')
+ break;
+
+ e = field_start(e, 2);
+ }
+
+ if (unlikely(!*e)) {
+ truncated_line(linenum, line);
+ return;
+ }
+
+ /* We reached the URL, no more captures will follow. */
+ if (*e != '{') {
+ puts("");
+ lines_out++;
+ return;
+ }
+
+ /* e points the the opening brace of the capture block. */
+
+ e++;
+ }
+
+ /* We are in the first field of the selected capture block. */
+
+ while (--field > 0) {
+ while ((*e != '|' && *e != '}') && *e)
+ e++;
+
+ if (unlikely(!*e)) {
+ truncated_line(linenum, line);
+ return;
+ }
+
+ if (*e != '|') {
+ puts("");
+ lines_out++;
+ return;
+ }
+
+ /* e points to the pipe. */
+
+ e++;
+ }
+
+ f = e;
+
+ while ((*f != '|' && *f != '}') && *f)
+ f++;
+
+ if (unlikely(!*f)) {
+ truncated_line(linenum, line);
+ return;
+ }
+
+ fwrite(e, f - e, 1, stdout);
+ putchar('\n');
+ lines_out++;
+}
+
+void filter_accept_holes(const char *accept_field, const char *time_field, struct timer **tptr)
+{
+ struct timer *t2;
+ int val;
+
+ val = convert_date(accept_field);
+ if (unlikely(val < 0)) {
+ truncated_line(linenum, line);
+ return;
+ }
+
+ t2 = insert_value(&timers[0], tptr, val);
+ t2->count++;
+ return;
+}
+
+void filter_count_status(const char *accept_field, const char *time_field, struct timer **tptr)
+{
+ struct timer *t2;
+ const char *b;
+ int val;
+
+ if (time_field)
+ b = field_start(time_field, STATUS_FIELD - TIME_FIELD + 1);
+ else
+ b = field_start(accept_field, STATUS_FIELD - ACCEPT_FIELD + 1);
+
+ if (unlikely(!*b)) {
+ truncated_line(linenum, line);
+ return;
+ }
+
+ val = str2ic(b);
+
+ t2 = insert_value(&timers[0], tptr, val);
+ t2->count++;
+}
+
+void filter_count_cook_codes(const char *accept_field, const char *time_field, struct timer **tptr)
+{
+ struct timer *t2;
+ const char *b;
+ int val;
+
+ if (time_field)
+ b = field_start(time_field, TERM_CODES_FIELD - TIME_FIELD + 1);
+ else
+ b = field_start(accept_field, TERM_CODES_FIELD - ACCEPT_FIELD + 1);
+
+ if (unlikely(!*b)) {
+ truncated_line(linenum, line);
+ return;
+ }
+
+ val = 256 * b[2] + b[3];
+
+ t2 = insert_value(&timers[0], tptr, val);
+ t2->count++;
+}
+
+void filter_count_term_codes(const char *accept_field, const char *time_field, struct timer **tptr)
+{
+ struct timer *t2;
+ const char *b;
+ int val;
+
+ if (time_field)
+ b = field_start(time_field, TERM_CODES_FIELD - TIME_FIELD + 1);
+ else
+ b = field_start(accept_field, TERM_CODES_FIELD - ACCEPT_FIELD + 1);
+
+ if (unlikely(!*b)) {
+ truncated_line(linenum, line);
+ return;
+ }
+
+ val = 256 * b[0] + b[1];
+
+ t2 = insert_value(&timers[0], tptr, val);
+ t2->count++;
+}
+
+void filter_count_srv_status(const char *accept_field, const char *time_field, struct timer **tptr)
+{
+ const char *b, *e, *p;
+ int f, err, array[5];
+ struct ebmb_node *srv_node;
+ struct srv_st *srv;
+ int val;
+
+ /* the server field is before the status field, so let's
+ * parse them in the proper order.
+ */
+ b = field_start(accept_field, SERVER_FIELD - ACCEPT_FIELD + 1);
+ if (unlikely(!*b)) {
+ truncated_line(linenum, line);
+ return;
+ }
+
+ e = field_stop(b + 1); /* we have the server name in [b]..[e-1] */
+
+ /* the chance that a server name already exists is extremely high,
+ * so let's perform a normal lookup first.
+ */
+ srv_node = ebst_lookup_len(&timers[0], b, e - b);
+ srv = container_of(srv_node, struct srv_st, node);
+
+ if (!srv_node) {
+ /* server not yet in the tree, let's create it */
+ srv = (void *)calloc(1, sizeof(struct srv_st) + e - b + 1);
+ srv_node = &srv->node;
+ memcpy(&srv_node->key, b, e - b);
+ srv_node->key[e - b] = '\0';
+ ebst_insert(&timers[0], srv_node);
+ }
+
+ /* let's collect the connect and response times */
+ if (!time_field) {
+ time_field = field_start(e, TIME_FIELD - SERVER_FIELD);
+ if (unlikely(!*time_field)) {
+ truncated_line(linenum, line);
+ return;
+ }
+ }
+
+ e = field_stop(time_field + 1);
+ /* we have field TIME_FIELD in [time_field]..[e-1] */
+
+ p = time_field;
+ err = 0;
+ f = 0;
+ while (!SEP(*p)) {
+ array[f] = str2ic(p);
+ if (array[f] < 0) {
+ array[f] = -1;
+ err = 1;
+ }
+ if (++f == 5)
+ break;
+ SKIP_CHAR(p, '/');
+ }
+
+ if (unlikely(f < 5)){
+ parse_err++;
+ return;
+ }
+
+ /* OK we have our timers in array[2,3] */
+ if (!err)
+ srv->nb_ok++;
+
+ if (array[2] >= 0) {
+ srv->cum_ct += array[2];
+ srv->nb_ct++;
+ }
+
+ if (array[3] >= 0) {
+ srv->cum_rt += array[3];
+ srv->nb_rt++;
+ }
+
+ /* we're interested in the 5 HTTP status classes (1xx ... 5xx), and
+ * the invalid ones which will be reported as 0.
+ */
+ b = field_start(e, STATUS_FIELD - TIME_FIELD);
+ if (unlikely(!*b)) {
+ truncated_line(linenum, line);
+ return;
+ }
+
+ val = 0;
+ if (*b >= '1' && *b <= '5')
+ val = *b - '0';
+
+ srv->st_cnt[val]++;
+}
+
+void filter_count_url(const char *accept_field, const char *time_field, struct timer **tptr)
+{
+ struct url_stat *ustat = NULL;
+ struct ebpt_node *ebpt_old;
+ const char *b, *e;
+ int f, err, array[5];
+ int val;
+
+ /* let's collect the response time */
+ if (!time_field) {
+ time_field = field_start(accept_field, TIME_FIELD - ACCEPT_FIELD + 1); // avg 115 ns per line
+ if (unlikely(!*time_field)) {
+ truncated_line(linenum, line);
+ return;
+ }
+ }
+
+ /* we have the field TIME_FIELD starting at <time_field>. We'll
+ * parse the 5 timers to detect errors, it takes avg 55 ns per line.
+ */
+ e = time_field; err = 0; f = 0;
+ while (!SEP(*e)) {
+ array[f] = str2ic(e);
+ if (array[f] < 0) {
+ array[f] = -1;
+ err = 1;
+ }
+ if (++f == 5)
+ break;
+ SKIP_CHAR(e, '/');
+ }
+ if (f < 5) {
+ parse_err++;
+ return;
+ }
+
+ /* OK we have our timers in array[3], and err is >0 if at
+ * least one -1 was seen. <e> points to the first char of
+ * the last timer. Let's prepare a new node with that.
+ */
+ if (unlikely(!ustat))
+ ustat = calloc(1, sizeof(*ustat));
+
+ ustat->nb_err = err;
+ ustat->nb_req = 1;
+
+ /* use array[4] = total time in case of error */
+ ustat->total_time = (array[3] >= 0) ? array[3] : array[4];
+ ustat->total_time_ok = (array[3] >= 0) ? array[3] : 0;
+
+ e = field_start(e, BYTES_SENT_FIELD - TIME_FIELD + 1);
+ val = str2ic(e);
+ ustat->total_bytes_sent = val;
+
+ /* the line may be truncated because of a bad request or anything like this,
+ * without a method. Also, if it does not begin with an quote, let's skip to
+ * the next field because it's a capture. Let's fall back to the "method" itself
+ * if there's nothing else.
+ */
+ e = field_start(e, METH_FIELD - BYTES_SENT_FIELD + 1);
+ while (*e != '"' && *e) {
+ /* Note: some syslog servers escape quotes ! */
+ if (*e == '\\' && e[1] == '"')
+ break;
+ e = field_start(e, 2);
+ }
+
+ if (unlikely(!*e)) {
+ truncated_line(linenum, line);
+ free(ustat);
+ return;
+ }
+
+ b = field_start(e, URL_FIELD - METH_FIELD + 1); // avg 40 ns per line
+ if (!*b)
+ b = e;
+
+ /* stop at end of field or first ';' or '?', takes avg 64 ns per line */
+ e = b;
+ do {
+ if (*e == ' '||
+ (!(filter2 & FILT2_PRESERVE_QUERY) && (*e == '?' || *e == ';'))) {
+ *(char *)e = 0;
+ break;
+ }
+ e++;
+ } while (*e);
+
+ /* now instead of copying the URL for a simple lookup, we'll link
+ * to it from the node we're trying to insert. If it returns a
+ * different value, it was already there. Otherwise we just have
+ * to dynamically realloc an entry using strdup().
+ */
+ ustat->node.url.key = (char *)b;
+ ebpt_old = ebis_insert(&timers[0], &ustat->node.url);
+
+ if (ebpt_old != &ustat->node.url) {
+ struct url_stat *ustat_old;
+ /* node was already there, let's update previous one */
+ ustat_old = container_of(ebpt_old, struct url_stat, node.url);
+ ustat_old->nb_req ++;
+ ustat_old->nb_err += ustat->nb_err;
+ ustat_old->total_time += ustat->total_time;
+ ustat_old->total_time_ok += ustat->total_time_ok;
+ ustat_old->total_bytes_sent += ustat->total_bytes_sent;
+ } else {
+ ustat->url = ustat->node.url.key = strdup(ustat->node.url.key);
+ ustat = NULL; /* node was used */
+ }
+}
+
+void filter_count_ip(const char *source_field, const char *accept_field, const char *time_field, struct timer **tptr)
+{
+ struct url_stat *ustat = NULL;
+ struct ebpt_node *ebpt_old;
+ const char *b, *e;
+ int f, err, array[5];
+ int val;
+
+ /* let's collect the response time */
+ if (!time_field) {
+ time_field = field_start(accept_field, TIME_FIELD - ACCEPT_FIELD + 1); // avg 115 ns per line
+ if (unlikely(!*time_field)) {
+ truncated_line(linenum, line);
+ return;
+ }
+ }
+
+ /* we have the field TIME_FIELD starting at <time_field>. We'll
+ * parse the 5 timers to detect errors, it takes avg 55 ns per line.
+ */
+ e = time_field; err = 0; f = 0;
+ while (!SEP(*e)) {
+ if (f == 0 || f == 4) {
+ array[f] = str2ic(e);
+ if (array[f] < 0) {
+ array[f] = -1;
+ err = 1;
+ }
+ }
+ if (++f == 5)
+ break;
+ SKIP_CHAR(e, '/');
+ }
+ if (f < 5) {
+ parse_err++;
+ return;
+ }
+
+ /* OK we have our timers in array[0], and err is >0 if at
+ * least one -1 was seen. <e> points to the first char of
+ * the last timer. Let's prepare a new node with that.
+ */
+ if (unlikely(!ustat))
+ ustat = calloc(1, sizeof(*ustat));
+
+ ustat->nb_err = err;
+ ustat->nb_req = 1;
+
+ /* use array[4] = total time in case of error */
+ ustat->total_time = (array[0] >= 0) ? array[0] : array[4];
+ ustat->total_time_ok = (array[0] >= 0) ? array[0] : 0;
+
+ e = field_start(e, BYTES_SENT_FIELD - TIME_FIELD + 1);
+ val = str2ic(e);
+ ustat->total_bytes_sent = val;
+
+ /* the source might be IPv4 or IPv6, so we always strip the port by
+ * removing the last colon.
+ */
+ b = source_field;
+ e = field_stop(b + 1);
+ while (e > b && e[-1] != ':')
+ e--;
+ *(char *)(e - 1) = '\0';
+
+ /* now instead of copying the src for a simple lookup, we'll link
+ * to it from the node we're trying to insert. If it returns a
+ * different value, it was already there. Otherwise we just have
+ * to dynamically realloc an entry using strdup(). We're using the
+ * <url> field of the node to store the source address.
+ */
+ ustat->node.url.key = (char *)b;
+ ebpt_old = ebis_insert(&timers[0], &ustat->node.url);
+
+ if (ebpt_old != &ustat->node.url) {
+ struct url_stat *ustat_old;
+ /* node was already there, let's update previous one */
+ ustat_old = container_of(ebpt_old, struct url_stat, node.url);
+ ustat_old->nb_req ++;
+ ustat_old->nb_err += ustat->nb_err;
+ ustat_old->total_time += ustat->total_time;
+ ustat_old->total_time_ok += ustat->total_time_ok;
+ ustat_old->total_bytes_sent += ustat->total_bytes_sent;
+ } else {
+ ustat->url = ustat->node.url.key = strdup(ustat->node.url.key);
+ ustat = NULL; /* node was used */
+ }
+}
+
+void filter_graphs(const char *accept_field, const char *time_field, struct timer **tptr)
+{
+ struct timer *t2;
+ const char *p;
+ int f, err, array[5];
+
+ if (!time_field) {
+ time_field = field_start(accept_field, TIME_FIELD - ACCEPT_FIELD + 1);
+ if (unlikely(!*time_field)) {
+ truncated_line(linenum, line);
+ return;
+ }
+ }
+
+ field_stop(time_field + 1);
+ /* we have field TIME_FIELD in [time_field]..[e-1] */
+
+ p = time_field;
+ err = 0;
+ f = 0;
+ while (!SEP(*p)) {
+ array[f] = str2ic(p);
+ if (array[f] < 0) {
+ array[f] = -1;
+ err = 1;
+ }
+ if (++f == 5)
+ break;
+ SKIP_CHAR(p, '/');
+ }
+
+ if (unlikely(f < 5)) {
+ parse_err++;
+ return;
+ }
+
+ /* if we find at least one negative time, we count one error
+ * with a time equal to the total session time. This will
+ * emphasize quantum timing effects associated to known
+ * timeouts. Note that on some buggy machines, it is possible
+ * that the total time is negative, hence the reason to reset
+ * it.
+ */
+
+ if (filter & FILT_GRAPH_TIMERS) {
+ if (err) {
+ if (array[4] < 0)
+ array[4] = -1;
+ t2 = insert_timer(&timers[0], tptr, array[4]); // total time
+ t2->count++;
+ } else {
+ int v;
+
+ t2 = insert_timer(&timers[1], tptr, array[0]); t2->count++; // req
+ t2 = insert_timer(&timers[2], tptr, array[2]); t2->count++; // conn
+ t2 = insert_timer(&timers[3], tptr, array[3]); t2->count++; // resp
+
+ v = array[4] - array[0] - array[1] - array[2] - array[3]; // data time
+ if (v < 0 && !(filter & FILT_QUIET))
+ fprintf(stderr, "ERR: %s (%d %d %d %d %d => %d)\n",
+ line, array[0], array[1], array[2], array[3], array[4], v);
+ t2 = insert_timer(&timers[4], tptr, v); t2->count++;
+ lines_out++;
+ }
+ } else { /* percentile */
+ if (err) {
+ if (array[4] < 0)
+ array[4] = -1;
+ t2 = insert_value(&timers[0], tptr, array[4]); // total time
+ t2->count++;
+ } else {
+ int v;
+
+ t2 = insert_value(&timers[1], tptr, array[0]); t2->count++; // req
+ t2 = insert_value(&timers[2], tptr, array[2]); t2->count++; // conn
+ t2 = insert_value(&timers[3], tptr, array[3]); t2->count++; // resp
+
+ v = array[4] - array[0] - array[1] - array[2] - array[3]; // data time
+ if (v < 0 && !(filter & FILT_QUIET))
+ fprintf(stderr, "ERR: %s (%d %d %d %d %d => %d)\n",
+ line, array[0], array[1], array[2], array[3], array[4], v);
+ t2 = insert_value(&timers[4], tptr, v); t2->count++;
+ lines_out++;
+ }
+ }
+}
+
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/admin/iprange/Makefile b/admin/iprange/Makefile
new file mode 100644
index 0000000..13598d8
--- /dev/null
+++ b/admin/iprange/Makefile
@@ -0,0 +1,13 @@
+CC = cc
+OPTIMIZE = -O3
+LDFLAGS = -s
+
+OBJS = iprange ip6range
+
+all: $(OBJS)
+
+%: %.c
+ $(CC) $(LDFLAGS) $(OPTIMIZE) -o $@ $^
+
+clean:
+ rm -f $(OBJS) *.o *.a *~
diff --git a/admin/iprange/ip6range.c b/admin/iprange/ip6range.c
new file mode 100644
index 0000000..2ffd490
--- /dev/null
+++ b/admin/iprange/ip6range.c
@@ -0,0 +1,397 @@
+/*
+ * network range to IP+mask converter
+ *
+ * Copyright 2011-2012 Willy Tarreau <w@1wt.eu>
+ *
+ * This program reads lines starting by two IP addresses and outputs them with
+ * the two IP addresses replaced by a netmask covering the range between these
+ * IPs (inclusive). When multiple ranges are needed, as many lines are emitted.
+ * The IP addresses may be delimited by spaces, tabs or commas. Quotes are
+ * stripped, and lines beginning with a sharp character ('#') are ignored. The
+ * IP addresses may be either in the dotted format or represented as a 32-bit
+ * integer value in network byte order.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <arpa/inet.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#define MAXLINE 1024
+
+static inline void in6_bswap(struct in6_addr *a)
+{
+ a->s6_addr32[0] = ntohl(a->s6_addr32[0]);
+ a->s6_addr32[1] = ntohl(a->s6_addr32[1]);
+ a->s6_addr32[2] = ntohl(a->s6_addr32[2]);
+ a->s6_addr32[3] = ntohl(a->s6_addr32[3]);
+}
+
+/* returns a string version of an IPv6 address in host order */
+static const char *get_ipv6_addr(struct in6_addr *addr)
+{
+ struct in6_addr a;
+ static char out[INET6_ADDRSTRLEN + 1];
+
+ memcpy(&a, addr, sizeof(struct in6_addr));
+ in6_bswap(&a);
+ return inet_ntop(AF_INET6, &a, out, INET6_ADDRSTRLEN + 1);
+}
+
+static const char *get_addr(struct in6_addr *addr)
+{
+ static char out[50];
+ snprintf(out, 50, "%08x:%08x:%08x:%08x",
+ addr->s6_addr32[0],
+ addr->s6_addr32[1],
+ addr->s6_addr32[2],
+ addr->s6_addr32[3]);
+ return out;
+}
+
+/* a <= b */
+static inline int a_le_b(struct in6_addr *a, struct in6_addr *b)
+{
+ if (a->s6_addr32[0] < b->s6_addr32[0]) return 1;
+ if (a->s6_addr32[0] > b->s6_addr32[0]) return 0;
+ if (a->s6_addr32[1] < b->s6_addr32[1]) return 1;
+ if (a->s6_addr32[1] > b->s6_addr32[1]) return 0;
+ if (a->s6_addr32[2] < b->s6_addr32[2]) return 1;
+ if (a->s6_addr32[2] > b->s6_addr32[2]) return 0;
+ if (a->s6_addr32[3] < b->s6_addr32[3]) return 1;
+ if (a->s6_addr32[3] > b->s6_addr32[3]) return 0;
+ return 1;
+}
+
+/* a == b */
+static inline int a_eq_b(struct in6_addr *a, struct in6_addr *b)
+{
+ if (a->s6_addr32[0] != b->s6_addr32[0]) return 0;
+ if (a->s6_addr32[1] != b->s6_addr32[1]) return 0;
+ if (a->s6_addr32[2] != b->s6_addr32[2]) return 0;
+ if (a->s6_addr32[3] != b->s6_addr32[3]) return 0;
+ return 1;
+}
+
+/* a > b */
+static inline int a_gt_b(struct in6_addr *a, struct in6_addr *b)
+{
+ if (a->s6_addr32[0] > b->s6_addr32[0]) return 1;
+ if (a->s6_addr32[0] < b->s6_addr32[0]) return 0;
+ if (a->s6_addr32[1] > b->s6_addr32[1]) return 1;
+ if (a->s6_addr32[1] < b->s6_addr32[1]) return 0;
+ if (a->s6_addr32[2] > b->s6_addr32[2]) return 1;
+ if (a->s6_addr32[2] < b->s6_addr32[2]) return 0;
+ if (a->s6_addr32[3] > b->s6_addr32[3]) return 1;
+ if (a->s6_addr32[3] < b->s6_addr32[3]) return 0;
+ return 0;
+}
+
+/* ( 1 << m ) - 1 -> r */
+static inline struct in6_addr *hmask(unsigned int b, struct in6_addr *r)
+{
+
+ if (b < 32) {
+ r->s6_addr32[3] = (1 << b) - 1;
+ r->s6_addr32[2] = 0;
+ r->s6_addr32[1] = 0;
+ r->s6_addr32[0] = 0;
+ }
+ else if (b < 64) {
+ r->s6_addr32[3] = 0xffffffff;
+ r->s6_addr32[2] = (1 << (b - 32)) - 1;
+ r->s6_addr32[1] = 0;
+ r->s6_addr32[0] = 0;
+ }
+ else if (b < 96) {
+ r->s6_addr32[3] = 0xffffffff;
+ r->s6_addr32[2] = 0xffffffff;
+ r->s6_addr32[1] = (1 << (b - 64)) - 1;
+ r->s6_addr32[0] = 0;
+ }
+ else if (b < 128) {
+ r->s6_addr32[3] = 0xffffffff;
+ r->s6_addr32[2] = 0xffffffff;
+ r->s6_addr32[1] = 0xffffffff;
+ r->s6_addr32[0] = (1 << (b - 96)) - 1;
+ }
+ else {
+ r->s6_addr32[3] = 0xffffffff;
+ r->s6_addr32[2] = 0xffffffff;
+ r->s6_addr32[1] = 0xffffffff;
+ r->s6_addr32[0] = 0xffffffff;
+ }
+ return r;
+}
+
+/* 1 << b -> r */
+static inline struct in6_addr *one_ls_b(unsigned int b, struct in6_addr *r)
+{
+ if (b < 32) {
+ r->s6_addr32[3] = 1 << b;
+ r->s6_addr32[2] = 0;
+ r->s6_addr32[1] = 0;
+ r->s6_addr32[0] = 0;
+ }
+ else if (b < 64) {
+ r->s6_addr32[3] = 0;
+ r->s6_addr32[2] = 1 << (b - 32);
+ r->s6_addr32[1] = 0;
+ r->s6_addr32[0] = 0;
+ }
+ else if (b < 96) {
+ r->s6_addr32[3] = 0;
+ r->s6_addr32[2] = 0;
+ r->s6_addr32[1] = 1 << (b - 64);
+ r->s6_addr32[0] = 0;
+ }
+ else if (b < 128) {
+ r->s6_addr32[3] = 0;
+ r->s6_addr32[2] = 0;
+ r->s6_addr32[1] = 0;
+ r->s6_addr32[0] = 1 << (b - 96);
+ }
+ else {
+ r->s6_addr32[3] = 0;
+ r->s6_addr32[2] = 0;
+ r->s6_addr32[1] = 0;
+ r->s6_addr32[0] = 0;
+ }
+ return r;
+}
+
+/* a + b -> r */
+static inline struct in6_addr *a_plus_b(struct in6_addr *a, struct in6_addr *b, struct in6_addr *r)
+{
+ unsigned long long int c = 0;
+ int i;
+
+ for (i=3; i>=0; i--) {
+ c = (unsigned long long int)a->s6_addr32[i] +
+ (unsigned long long int)b->s6_addr32[i] + c;
+ r->s6_addr32[i] = c;
+ c >>= 32;
+ }
+
+ return r;
+}
+
+/* a - b -> r */
+static inline struct in6_addr *a_minus_b(struct in6_addr *a, struct in6_addr *b, struct in6_addr *r)
+{
+ signed long long int c = 0;
+ signed long long int d;
+ int i;
+
+ /* Check sign. Return 0xff..ff (-1) if the result is less than 0. */
+ if (a_gt_b(b, a)) {
+ r->s6_addr32[3] = 0xffffffff;
+ r->s6_addr32[2] = 0xffffffff;
+ r->s6_addr32[1] = 0xffffffff;
+ r->s6_addr32[0] = 0xffffffff;
+ return r;
+ }
+
+ for (i=3; i>=0; i--) {
+ d = (unsigned long long int)b->s6_addr32[i] + c;
+ c = (unsigned long long int)a->s6_addr32[i];
+ if (c < d)
+ c += 0x100000000ULL;
+ c -= d;
+ r->s6_addr32[i] = c;
+ c >>= 32;
+ }
+
+ return r;
+}
+
+/* a & b -> r */
+static inline struct in6_addr *a_and_b(struct in6_addr *a, struct in6_addr *b, struct in6_addr *r)
+{
+ r->s6_addr32[0] = a->s6_addr32[0] & b->s6_addr32[0];
+ r->s6_addr32[1] = a->s6_addr32[1] & b->s6_addr32[1];
+ r->s6_addr32[2] = a->s6_addr32[2] & b->s6_addr32[2];
+ r->s6_addr32[3] = a->s6_addr32[3] & b->s6_addr32[3];
+ return r;
+}
+
+/* a != 0 */
+int is_set(struct in6_addr *a)
+{
+ return a->s6_addr32[0] ||
+ a->s6_addr32[1] ||
+ a->s6_addr32[2] ||
+ a->s6_addr32[3];
+}
+
+/* 1 */
+static struct in6_addr one = { .s6_addr32 = {0, 0, 0, 1} };
+
+/* print all networks present between address <low> and address <high> in
+ * cidr format, followed by <eol>.
+ */
+static void convert_range(struct in6_addr *low, struct in6_addr *high, const char *eol, const char *pfx)
+{
+ int bit;
+ struct in6_addr r0;
+ struct in6_addr r1;
+
+ if (a_eq_b(low, high)) {
+ /* single value */
+ printf("%s%s%s%s\n", pfx?pfx:"", pfx?" ":"", get_ipv6_addr(low), eol);
+ return;
+ }
+ else if (a_gt_b(low, high)) {
+ struct in6_addr *swap = low;
+ low = high;
+ high = swap;
+ }
+
+ if (a_eq_b(low, a_plus_b(high, &one, &r0))) {
+ /* full range */
+ printf("%s%s::/0%s\n", pfx?pfx:"", pfx?" ":"", eol);
+ return;
+ }
+ //printf("low=%08x high=%08x\n", low, high);
+
+ bit = 0;
+ while (bit < 128 && a_le_b(a_plus_b(low, hmask(bit, &r0), &r0), high)) {
+
+ /* enlarge mask */
+ if (is_set(a_and_b(low, one_ls_b(bit, &r0), &r0))) {
+ /* can't aggregate anymore, dump and retry from the same bit */
+ printf("%s%s%s/%d%s\n", pfx?pfx:"", pfx?" ":"", get_ipv6_addr(low), 128-bit, eol);
+ a_plus_b(low, one_ls_b(bit, &r0), low);
+ }
+ else {
+ /* try to enlarge the mask as much as possible first */
+ bit++;
+ //printf(" ++bit=%d\n", bit);
+ }
+ }
+ //printf("stopped 1 at low=%08x, bit=%d\n", low, bit);
+
+ bit = 127;
+ while (bit >= 0 && is_set(a_plus_b(a_minus_b(high, low, &r0), &one, &r0))) {
+
+ /* shrink mask */
+ if (is_set(a_and_b(a_plus_b(a_minus_b(high, low, &r0), &one, &r0), one_ls_b(bit, &r1), &r1))) {
+ /* large bit accepted, dump and go on from the same bit */
+ //printf("max: %08x/%d\n", low, 32-bit);
+ printf("%s%s%s/%d%s\n", pfx?pfx:"", pfx?" ":"", get_ipv6_addr(low), 128-bit, eol);
+ a_plus_b(low, one_ls_b(bit, &r0), low);
+ }
+ else {
+ bit--;
+ //printf(" --bit=%d, low=%08x\n", bit, low);
+ }
+ }
+ //printf("stopped at low=%08x\n", low);
+}
+
+static void usage(const char *argv0)
+{
+ fprintf(stderr,
+ "Usage: %s [<addr> ...] < iplist.csv\n"
+ "\n"
+ "This program reads lines starting by two IP addresses and outputs them with\n"
+ "the two IP addresses replaced by a netmask covering the range between these\n"
+ "IPs (inclusive). When multiple ranges are needed, as many lines are emitted.\n"
+ "The IP addresses may be delimited by spaces, tabs or commas. Quotes are\n"
+ "stripped, and lines beginning with a sharp character ('#') are ignored. The\n"
+ "IP addresses may be either in the dotted format or represented as a 32-bit\n"
+ "integer value in network byte order.\n"
+ "\n"
+ "For each optional <addr> specified, only the network it belongs to is returned,\n"
+ "prefixed with the <addr> value.\n"
+ "\n", argv0);
+}
+
+int main(int argc, char **argv)
+{
+ char line[MAXLINE];
+ int l, lnum;
+ char *lb, *le, *hb, *he, *err;
+ struct in6_addr sa, da, ta;
+
+ if (argc > 1 && *argv[1] == '-') {
+ usage(argv[0]);
+ exit(1);
+ }
+
+ lnum = 0;
+ while (fgets(line, sizeof(line), stdin) != NULL) {
+ l = strlen(line);
+ if (l && line[l - 1] == '\n')
+ line[--l] = '\0';
+
+ lnum++;
+ /* look for the first field which must be the low address of a range,
+ * in dotted IPv4 format or as an integer. spaces and commas are
+ * considered as delimiters, quotes are removed.
+ */
+ for (lb = line; *lb == ' ' || *lb == '\t' || *lb == ',' || *lb == '"'; lb++);
+ if (!*lb || *lb == '#')
+ continue;
+ for (le = lb + 1; *le != ' ' && *le != '\t' && *le != ',' && *le != '"' && *le; le++);
+ if (!*le)
+ continue;
+ /* we have the low address between lb(included) and le(excluded) */
+ *(le++) = 0;
+
+ for (hb = le; *hb == ' ' || *hb == '\t' || *hb == ',' || *hb == '"'; hb++);
+ if (!*hb || *hb == '#')
+ continue;
+ for (he = hb + 1; *he != ' ' && *he != '\t' && *he != ',' && *he != '"' && *he; he++);
+ if (!*he)
+ continue;
+ /* we have the high address between hb(included) and he(excluded) */
+ *(he++) = 0;
+
+ /* we want to remove a possible ending quote and a possible comma,
+ * not more.
+ */
+ while (*he == '"')
+ *(he++) = ' ';
+ while (*he == ',' || *he == ' ' || *he == '\t')
+ *(he++) = ' ';
+
+ /* if the trailing string is not empty, prefix it with a space */
+ if (*(he-1) == ' ')
+ he--;
+
+ if (inet_pton(AF_INET6, lb, &sa) <= 0) {
+ fprintf(stderr, "Failed to parse source address <%s> at line %d, skipping line\n", lb, lnum);
+ continue;
+ }
+
+ if (inet_pton(AF_INET6, hb, &da) <= 0) {
+ fprintf(stderr, "Failed to parse destination address <%s> at line %d, skipping line\n", hb, lnum);
+ continue;
+ }
+
+ in6_bswap(&sa);
+ in6_bswap(&da);
+
+ if (argc > 1) {
+ for (l = 1; l < argc; l++) {
+ if (inet_pton(AF_INET6, argv[l], &da) <= 0)
+ continue;
+ in6_bswap(&ta);
+ if ((a_le_b(&sa, &ta) && a_le_b(&ta, &da)) || (a_le_b(&da, &ta) && a_le_b(&ta, &sa)))
+ convert_range(&sa, &da, he, argv[l]);
+ }
+ }
+ else {
+ convert_range(&sa, &da, he, NULL);
+ }
+ }
+ return 0;
+}
diff --git a/admin/iprange/iprange.c b/admin/iprange/iprange.c
new file mode 100644
index 0000000..abae007
--- /dev/null
+++ b/admin/iprange/iprange.c
@@ -0,0 +1,202 @@
+/*
+ * network range to IP+mask converter
+ *
+ * Copyright 2011-2012 Willy Tarreau <w@1wt.eu>
+ *
+ * This program reads lines starting by two IP addresses and outputs them with
+ * the two IP addresses replaced by a netmask covering the range between these
+ * IPs (inclusive). When multiple ranges are needed, as many lines are emitted.
+ * The IP addresses may be delimited by spaces, tabs or commas. Quotes are
+ * stripped, and lines beginning with a sharp character ('#') are ignored. The
+ * IP addresses may be either in the dotted format or represented as a 32-bit
+ * integer value in network byte order.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <arpa/inet.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#define MAXLINE 1024
+
+/* returns a string version of an IPv4 address in host order */
+static const char *get_ipv4_addr(unsigned int addr)
+{
+ struct in_addr a;
+
+ a.s_addr = ntohl(addr);
+ return inet_ntoa(a);
+}
+
+/* print all networks present between address <low> and address <high> in
+ * cidr format, followed by <eol>.
+ */
+static void convert_range(unsigned int low, unsigned int high, const char *eol, const char *pfx)
+{
+ int bit;
+
+ if (low == high) {
+ /* single value */
+ printf("%s%s%s%s\n", pfx?pfx:"", pfx?" ":"", get_ipv4_addr(low), eol);
+ return;
+ }
+ else if (low > high) {
+ int swap = low;
+ low = high;
+ high = swap;
+ }
+
+ if (low == high + 1) {
+ /* full range */
+ printf("%s%s0.0.0.0/0%s\n", pfx?pfx:"", pfx?" ":"", eol);
+ return;
+ }
+ //printf("low=%08x high=%08x\n", low, high);
+
+ bit = 0;
+ while (bit < 32 && low + (1 << bit) - 1 <= high) {
+ /* enlarge mask */
+ if (low & (1 << bit)) {
+ /* can't aggregate anymore, dump and retry from the same bit */
+ printf("%s%s%s/%d%s\n", pfx?pfx:"", pfx?" ":"", get_ipv4_addr(low), 32-bit, eol);
+ low += (1 << bit);
+ }
+ else {
+ /* try to enlarge the mask as much as possible first */
+ bit++;
+ //printf(" ++bit=%d\n", bit);
+ }
+ }
+ //printf("stopped 1 at low=%08x, bit=%d\n", low, bit);
+
+ bit = 31;
+ while (bit >= 0 && high - low + 1 != 0) {
+ /* shrink mask */
+ if ((high - low + 1) & (1 << bit)) {
+ /* large bit accepted, dump and go on from the same bit */
+ //printf("max: %08x/%d\n", low, 32-bit);
+ printf("%s%s%s/%d%s\n", pfx?pfx:"", pfx?" ":"", get_ipv4_addr(low), 32-bit, eol);
+ low += (1 << bit);
+ }
+ else {
+ bit--;
+ //printf(" --bit=%d, low=%08x\n", bit, low);
+ }
+ }
+ //printf("stopped at low=%08x\n", low);
+}
+
+static void usage(const char *argv0)
+{
+ fprintf(stderr,
+ "Usage: %s [<addr> ...] < iplist.csv\n"
+ "\n"
+ "This program reads lines starting by two IP addresses and outputs them with\n"
+ "the two IP addresses replaced by a netmask covering the range between these\n"
+ "IPs (inclusive). When multiple ranges are needed, as many lines are emitted.\n"
+ "The IP addresses may be delimited by spaces, tabs or commas. Quotes are\n"
+ "stripped, and lines beginning with a sharp character ('#') are ignored. The\n"
+ "IP addresses may be either in the dotted format or represented as a 32-bit\n"
+ "integer value in network byte order.\n"
+ "\n"
+ "For each optional <addr> specified, only the network it belongs to is returned,\n"
+ "prefixed with the <addr> value.\n"
+ "\n", argv0);
+}
+
+int main(int argc, char **argv)
+{
+ char line[MAXLINE];
+ int l, lnum;
+ char *lb, *le, *hb, *he, *err;
+ struct in_addr src_addr, dst_addr;
+ unsigned int sa, da, ta;
+
+ if (argc > 1 && *argv[1] == '-') {
+ usage(argv[0]);
+ exit(1);
+ }
+
+ lnum = 0;
+ while (fgets(line, sizeof(line), stdin) != NULL) {
+ l = strlen(line);
+ if (l && line[l - 1] == '\n')
+ line[--l] = '\0';
+
+ lnum++;
+ /* look for the first field which must be the low address of a range,
+ * in dotted IPv4 format or as an integer. spaces and commas are
+ * considered as delimiters, quotes are removed.
+ */
+ for (lb = line; *lb == ' ' || *lb == '\t' || *lb == ',' || *lb == '"'; lb++);
+ if (!*lb || *lb == '#')
+ continue;
+ for (le = lb + 1; *le != ' ' && *le != '\t' && *le != ',' && *le != '"' && *le; le++);
+ if (!*le)
+ continue;
+ /* we have the low address between lb(included) and le(excluded) */
+ *(le++) = 0;
+
+ for (hb = le; *hb == ' ' || *hb == '\t' || *hb == ',' || *hb == '"'; hb++);
+ if (!*hb || *hb == '#')
+ continue;
+ for (he = hb + 1; *he != ' ' && *he != '\t' && *he != ',' && *he != '"' && *he; he++);
+ if (!*he)
+ continue;
+ /* we have the high address between hb(included) and he(excluded) */
+ *(he++) = 0;
+
+ /* we want to remove a possible ending quote and a possible comma,
+ * not more.
+ */
+ while (*he == '"')
+ *(he++) = ' ';
+ while (*he == ',' || *he == ' ' || *he == '\t')
+ *(he++) = ' ';
+
+ /* if the trailing string is not empty, prefix it with a space */
+ if (*(he-1) == ' ')
+ he--;
+
+ if (inet_pton(AF_INET, lb, &src_addr) <= 0) {
+ /* parsing failed, retry with a plain numeric IP */
+ src_addr.s_addr = ntohl(strtoul(lb, &err, 10));
+ if (err && *err) {
+ fprintf(stderr, "Failed to parse source address <%s> at line %d, skipping line\n", lb, lnum);
+ continue;
+ }
+ }
+
+ if (inet_pton(AF_INET, hb, &dst_addr) <= 0) {
+ /* parsing failed, retry with a plain numeric IP */
+ dst_addr.s_addr = ntohl(strtoul(hb, &err, 10));
+ if (err && *err) {
+ fprintf(stderr, "Failed to parse destination address <%s> at line %d, skipping line\n", hb, lnum);
+ continue;
+ }
+ }
+
+ sa = htonl(src_addr.s_addr);
+ da = htonl(dst_addr.s_addr);
+ if (argc > 1) {
+ for (l = 1; l < argc; l++) {
+ if (inet_pton(AF_INET, argv[l], &dst_addr) <= 0)
+ continue;
+ ta = htonl(dst_addr.s_addr);
+ if ((sa <= ta && ta <= da) || (da <= ta && ta <= sa))
+ convert_range(sa, da, he, argv[l]);
+ }
+ }
+ else {
+ convert_range(sa, da, he, NULL);
+ }
+ }
+ exit(0);
+}
diff --git a/admin/netsnmp-perl/README b/admin/netsnmp-perl/README
new file mode 100644
index 0000000..f44eb5b
--- /dev/null
+++ b/admin/netsnmp-perl/README
@@ -0,0 +1,111 @@
+SNMP support for HAProxy
+Copyright 2007-2008 Krzysztof Piotr Oledzki <ole@ans.pl>
+
+Root OID: 1.3.6.1.4.1.29385.106
+
+Files:
+ - README: this file
+ - haproxy.pl: Net-SNMP embedded perl module
+ - haproxy_backend.xml: Cacti snmp-query definition for backends
+ - haproxy_frontend.xml: Cacti snmp-query definition for frontends
+
+Install:
+ cp haproxy.pl /etc/snmp/
+ grep -q "disablePerl false" /etc/snmp/snmpd.conf || echo "disablePerl false" >> /etc/snmp/snmpd.conf
+ echo "perl do '/etc/snmp/haproxy.pl';" >> /etc/snmp/snmpd.conf
+
+Supported commands:
+ - GET (snmpget, snmpbulkget): quite fast.
+ - GETNEXT (snmpwalk, snmpbulkwalk): not so fast as requires to transfer
+ and parse a lot of data during each step. Always use "get" instead of "walk"
+ if that's possible.
+
+Supported OIDs:
+ - 1.3.6.1.4.1.29385.106.1: get a variable from stats
+ Usage: 1.3.6.1.4.1.29385.106.1.$type.$field.$iid.$sid
+
+ - type is one of:
+ 0) frontend
+ 1) backend
+ 2) server
+
+ - field is one of:
+ 0..32) CSV format variable
+ 10001) index
+ 10002) unique name
+
+ - iid is a proxy id
+
+ - sid is a service id (sid): 0 for frontends and backends, >= 1 for servers
+
+ - 1.3.6.1.4.1.29385.106.2: get a variable from info
+ Usage: 1.3.6.1.4.1.29385.106.2.$req.$varnr
+
+ - req is one of:
+ 0) get variable name
+ 1) gat variable value
+
+Examples:
+
+- Get a list of frontends (type: 0) with status (field: 17):
+$ snmpbulkwalk -c public -v2c 192.168.0.1 1.3.6.1.4.1.29385.106.1.0.17
+SNMPv2-SMI::enterprises.29385.106.1.0.17.1.0 = STRING: "OPEN"
+SNMPv2-SMI::enterprises.29385.106.1.0.17.47.0 = STRING: "OPEN"
+
+- Get a list of backends (type: 1) with index (field: 10001):
+$ snmpbulkwalk -c public -v2c 192.168.0.1 1.3.6.1.4.1.29385.106.1.1.10001
+SNMPv2-SMI::enterprises.29385.106.1.1.10001.1.0 = STRING: "1.0"
+SNMPv2-SMI::enterprises.29385.106.1.1.10001.1100.0 = STRING: "1100.0"
+SNMPv2-SMI::enterprises.29385.106.1.1.10001.1101.0 = STRING: "1101.0"
+SNMPv2-SMI::enterprises.29385.106.1.1.10001.1200.0 = STRING: "1200.0"
+SNMPv2-SMI::enterprises.29385.106.1.1.10001.1201.0 = STRING: "1201.0"
+SNMPv2-SMI::enterprises.29385.106.1.1.10001.1300.0 = STRING: "1300.0"
+SNMPv2-SMI::enterprises.29385.106.1.1.10001.1400.0 = STRING: "1400.0"
+SNMPv2-SMI::enterprises.29385.106.1.1.10001.1401.0 = STRING: "1401.0"
+SNMPv2-SMI::enterprises.29385.106.1.1.10001.1500.0 = STRING: "1500.0"
+(...)
+
+- Get a list of servers (type: 2) with unique name (field: 10002):
+$ snmpbulkwalk -c public -v2c 192.168.0.1 1.3.6.1.4.1.29385.106.1.2.10002
+SNMPv2-SMI::enterprises.29385.106.1.2.10002.1100.1001 = STRING: "backend1/s2"
+SNMPv2-SMI::enterprises.29385.106.1.2.10002.1100.1002 = STRING: "backend1/s5"
+SNMPv2-SMI::enterprises.29385.106.1.2.10002.1100.1003 = STRING: "backend1/s6"
+SNMPv2-SMI::enterprises.29385.106.1.2.10002.1100.1012 = STRING: "backend1/s7"
+SNMPv2-SMI::enterprises.29385.106.1.2.10002.1101.1001 = STRING: "backend2/s9"
+SNMPv2-SMI::enterprises.29385.106.1.2.10002.1101.1002 = STRING: "backend2/s10"
+SNMPv2-SMI::enterprises.29385.106.1.2.10002.1101.1003 = STRING: "backend2/s11"
+SNMPv2-SMI::enterprises.29385.106.1.2.10002.1101.1012 = STRING: "backend2/s12"
+SNMPv2-SMI::enterprises.29385.106.1.2.10002.1200.1001 = STRING: "backend3/s8"
+(...)
+
+- Get a list of servers (type: 2) with weight (field: 18) in proxy 4300:
+$ snmpbulkwalk -c public -v2c 192.168.0.1 1.3.6.1.4.1.29385.106.1.2.18.4300
+SNMPv2-SMI::enterprises.29385.106.1.2.18.4300.1001 = STRING: "40"
+SNMPv2-SMI::enterprises.29385.106.1.2.18.4300.1002 = STRING: "25"
+SNMPv2-SMI::enterprises.29385.106.1.2.18.4300.1003 = STRING: "40"
+SNMPv2-SMI::enterprises.29385.106.1.2.18.4300.1012 = STRING: "80"
+
+- Get total sessions count (field: 7) in frontend (type: 1), sid.iid: 47.0 (proxy #47):
+snmpget -c public -v2c 192.168.0.1 enterprises.29385.106.1.0.7.47.0
+SNMPv2-SMI::enterprises.29385.106.1.0.7.47.0 = STRING: "1014019"
+
+- Get a list of available variables (req: 0):
+$ snmpbulkwalk -c public -v2c 192.168.0.1 1.3.6.1.4.1.29385.106.2.0
+SNMPv2-SMI::enterprises.29385.106.2.0.0 = STRING: "Name"
+SNMPv2-SMI::enterprises.29385.106.2.0.1 = STRING: "Version"
+SNMPv2-SMI::enterprises.29385.106.2.0.2 = STRING: "Release_date"
+SNMPv2-SMI::enterprises.29385.106.2.0.3 = STRING: "Nbproc"
+SNMPv2-SMI::enterprises.29385.106.2.0.4 = STRING: "Process_num"
+SNMPv2-SMI::enterprises.29385.106.2.0.5 = STRING: "Pid"
+SNMPv2-SMI::enterprises.29385.106.2.0.6 = STRING: "Uptime"
+SNMPv2-SMI::enterprises.29385.106.2.0.7 = STRING: "Uptime_sec"
+SNMPv2-SMI::enterprises.29385.106.2.0.8 = STRING: "Memmax_MB"
+SNMPv2-SMI::enterprises.29385.106.2.0.9 = STRING: "Ulimit-n"
+SNMPv2-SMI::enterprises.29385.106.2.0.10 = STRING: "Maxsock"
+SNMPv2-SMI::enterprises.29385.106.2.0.11 = STRING: "Maxconn"
+SNMPv2-SMI::enterprises.29385.106.2.0.12 = STRING: "CurrConns"
+
+- Get a variable (req: 1), varnr: 7 (Uptime_sec):
+$ snmpget -c public -v2c 192.168.0.1 1.3.6.1.4.1.29385.106.2.1.7
+SNMPv2-SMI::enterprises.29385.106.2.1.7 = STRING: "18761"
+
diff --git a/admin/netsnmp-perl/cacti_data_query_haproxy_backends.xml b/admin/netsnmp-perl/cacti_data_query_haproxy_backends.xml
new file mode 100644
index 0000000..9c4ea38
--- /dev/null
+++ b/admin/netsnmp-perl/cacti_data_query_haproxy_backends.xml
@@ -0,0 +1,750 @@
+<cacti>
+ <hash_040013d1dd43e3e5cee941860ea277826c4fe2>
+ <name>HaProxy Backends</name>
+ <description></description>
+ <xml_path>&lt;path_cacti&gt;/resource/snmp_queries/haproxy_backend.xml</xml_path>
+ <data_input_id>hash_030013bf566c869ac6443b0c75d1c32b5a350e</data_input_id>
+ <graphs>
+ <hash_1100134d2954fa52f51ed186916f2cf624a8b9>
+ <name>HAProxy Backend Sessions</name>
+ <graph_template_id>hash_000013cdbf9accfcd57d9e0a7c97896313ddee</graph_template_id>
+ <rrd>
+ <item_000>
+ <snmp_field_name>beSTot</snmp_field_name>
+ <data_template_id>hash_010013fa4d4fff334b60e9064e89082173fe34</data_template_id>
+ <data_template_rrd_id>hash_080013230e04055a4228154123e74c6586d435</data_template_rrd_id>
+ </item_000>
+ <item_001>
+ <snmp_field_name>beEResp</snmp_field_name>
+ <data_template_id>hash_010013fa4d4fff334b60e9064e89082173fe34</data_template_id>
+ <data_template_rrd_id>hash_080013088549c8d7e8cdc80f19bae4d78dc296</data_template_rrd_id>
+ </item_001>
+ </rrd>
+ <sv_graph>
+ <hash_12001368ff8a0bfc447cb94d02e0d17cc3e252>
+ <field_name>ResponseErrors</field_name>
+ <sequence>1</sequence>
+ <text>ResponseErrors</text>
+ </hash_12001368ff8a0bfc447cb94d02e0d17cc3e252>
+ <hash_120013c2e81996ac5a70f67fa4a07e95eea035>
+ <field_name>TotalSessions</field_name>
+ <sequence>1</sequence>
+ <text>TotalSessions</text>
+ </hash_120013c2e81996ac5a70f67fa4a07e95eea035>
+ </sv_graph>
+ <sv_data_source>
+ <hash_130013169b7ea71d2aa3a8abaece19de7feeff>
+ <field_name>ResponseErrors</field_name>
+ <data_template_id>hash_010013fa4d4fff334b60e9064e89082173fe34</data_template_id>
+ <sequence>1</sequence>
+ <text>ResponseErrors</text>
+ </hash_130013169b7ea71d2aa3a8abaece19de7feeff>
+ <hash_130013a61ea1bb051f2162ba635c815324678d>
+ <field_name>TotalSessions</field_name>
+ <data_template_id>hash_010013fa4d4fff334b60e9064e89082173fe34</data_template_id>
+ <sequence>1</sequence>
+ <text>TotalSessions</text>
+ </hash_130013a61ea1bb051f2162ba635c815324678d>
+ </sv_data_source>
+ </hash_1100134d2954fa52f51ed186916f2cf624a8b9>
+ <hash_110013abc35ade0aae030d90f817dfd91486f4>
+ <name>HAProxy Backend Traffic</name>
+ <graph_template_id>hash_000013b6d238ff2532fcc19ab498043c7c65c2</graph_template_id>
+ <rrd>
+ <item_000>
+ <snmp_field_name>beBOut</snmp_field_name>
+ <data_template_id>hash_010013a63ddba34026d2c07d73c0ef2ae64b54</data_template_id>
+ <data_template_rrd_id>hash_0800136c0e4debeb9b084231d858faabd82f8f</data_template_rrd_id>
+ </item_000>
+ <item_001>
+ <snmp_field_name>beBIn</snmp_field_name>
+ <data_template_id>hash_010013a63ddba34026d2c07d73c0ef2ae64b54</data_template_id>
+ <data_template_rrd_id>hash_0800132f5283f17a7cde63137189d4d3ea7e4e</data_template_rrd_id>
+ </item_001>
+ </rrd>
+ <sv_graph>
+ <hash_1200133ba4a6c8aacf161f3e2411afd7053b8d>
+ <field_name>BytesIn</field_name>
+ <sequence>1</sequence>
+ <text>BytesIn</text>
+ </hash_1200133ba4a6c8aacf161f3e2411afd7053b8d>
+ <hash_1200130f8f674b52f6ea2e09608b505abfb3a1>
+ <field_name>BytesOut</field_name>
+ <sequence>1</sequence>
+ <text>BytesOut</text>
+ </hash_1200130f8f674b52f6ea2e09608b505abfb3a1>
+ </sv_graph>
+ <sv_data_source>
+ <hash_130013d9fb3064081d77e553c5ce732f15c909>
+ <field_name>BytesIn</field_name>
+ <data_template_id>hash_010013fa4d4fff334b60e9064e89082173fe34</data_template_id>
+ <sequence>1</sequence>
+ <text>BytesIn</text>
+ </hash_130013d9fb3064081d77e553c5ce732f15c909>
+ <hash_1300134fc96e4392a7a86d05fda31c2d5d334c>
+ <field_name>BytesOut</field_name>
+ <data_template_id>hash_010013fa4d4fff334b60e9064e89082173fe34</data_template_id>
+ <sequence>1</sequence>
+ <text>BytesOut</text>
+ </hash_1300134fc96e4392a7a86d05fda31c2d5d334c>
+ <hash_130013a7aad3557880ac197539a1d658f5d5da>
+ <field_name>BytesIn</field_name>
+ <data_template_id>hash_010013a63ddba34026d2c07d73c0ef2ae64b54</data_template_id>
+ <sequence>1</sequence>
+ <text>BytesIn</text>
+ </hash_130013a7aad3557880ac197539a1d658f5d5da>
+ <hash_130013acb469b673f6adbaa21ad5c634c3683f>
+ <field_name>BytesOut</field_name>
+ <data_template_id>hash_010013a63ddba34026d2c07d73c0ef2ae64b54</data_template_id>
+ <sequence>1</sequence>
+ <text>BytesOut</text>
+ </hash_130013acb469b673f6adbaa21ad5c634c3683f>
+ </sv_data_source>
+ </hash_110013abc35ade0aae030d90f817dfd91486f4>
+ </graphs>
+ </hash_040013d1dd43e3e5cee941860ea277826c4fe2>
+ <hash_030013bf566c869ac6443b0c75d1c32b5a350e>
+ <name>Get SNMP Data (Indexed)</name>
+ <type_id>3</type_id>
+ <input_string></input_string>
+ <fields>
+ <hash_070013617cdc8a230615e59f06f361ef6e7728>
+ <name>SNMP IP Address</name>
+ <update_rra></update_rra>
+ <regexp_match></regexp_match>
+ <allow_nulls></allow_nulls>
+ <type_code>hostname</type_code>
+ <input_output>in</input_output>
+ <data_name>management_ip</data_name>
+ </hash_070013617cdc8a230615e59f06f361ef6e7728>
+ <hash_070013acb449d1451e8a2a655c2c99d31142c7>
+ <name>SNMP Community</name>
+ <update_rra></update_rra>
+ <regexp_match></regexp_match>
+ <allow_nulls></allow_nulls>
+ <type_code>snmp_community</type_code>
+ <input_output>in</input_output>
+ <data_name>snmp_community</data_name>
+ </hash_070013acb449d1451e8a2a655c2c99d31142c7>
+ <hash_070013f4facc5e2ca7ebee621f09bc6d9fc792>
+ <name>SNMP Username (v3)</name>
+ <update_rra></update_rra>
+ <regexp_match></regexp_match>
+ <allow_nulls>on</allow_nulls>
+ <type_code>snmp_username</type_code>
+ <input_output>in</input_output>
+ <data_name>snmp_username</data_name>
+ </hash_070013f4facc5e2ca7ebee621f09bc6d9fc792>
+ <hash_0700131cc1493a6781af2c478fa4de971531cf>
+ <name>SNMP Password (v3)</name>
+ <update_rra></update_rra>
+ <regexp_match></regexp_match>
+ <allow_nulls>on</allow_nulls>
+ <type_code>snmp_password</type_code>
+ <input_output>in</input_output>
+ <data_name>snmp_password</data_name>
+ </hash_0700131cc1493a6781af2c478fa4de971531cf>
+ <hash_070013b5c23f246559df38662c255f4aa21d6b>
+ <name>SNMP Version (1, 2, or 3)</name>
+ <update_rra></update_rra>
+ <regexp_match></regexp_match>
+ <allow_nulls></allow_nulls>
+ <type_code>snmp_version</type_code>
+ <input_output>in</input_output>
+ <data_name>snmp_version</data_name>
+ </hash_070013b5c23f246559df38662c255f4aa21d6b>
+ <hash_0700136027a919c7c7731fbe095b6f53ab127b>
+ <name>Index Type</name>
+ <update_rra></update_rra>
+ <regexp_match></regexp_match>
+ <allow_nulls></allow_nulls>
+ <type_code>index_type</type_code>
+ <input_output>in</input_output>
+ <data_name>index_type</data_name>
+ </hash_0700136027a919c7c7731fbe095b6f53ab127b>
+ <hash_070013cbbe5c1ddfb264a6e5d509ce1c78c95f>
+ <name>Index Value</name>
+ <update_rra></update_rra>
+ <regexp_match></regexp_match>
+ <allow_nulls></allow_nulls>
+ <type_code>index_value</type_code>
+ <input_output>in</input_output>
+ <data_name>index_value</data_name>
+ </hash_070013cbbe5c1ddfb264a6e5d509ce1c78c95f>
+ <hash_070013e6deda7be0f391399c5130e7c4a48b28>
+ <name>Output Type ID</name>
+ <update_rra></update_rra>
+ <regexp_match></regexp_match>
+ <allow_nulls></allow_nulls>
+ <type_code>output_type</type_code>
+ <input_output>in</input_output>
+ <data_name>output_type</data_name>
+ </hash_070013e6deda7be0f391399c5130e7c4a48b28>
+ <hash_070013c1f36ee60c3dc98945556d57f26e475b>
+ <name>SNMP Port</name>
+ <update_rra></update_rra>
+ <regexp_match></regexp_match>
+ <allow_nulls></allow_nulls>
+ <type_code>snmp_port</type_code>
+ <input_output>in</input_output>
+ <data_name>snmp_port</data_name>
+ </hash_070013c1f36ee60c3dc98945556d57f26e475b>
+ </fields>
+ </hash_030013bf566c869ac6443b0c75d1c32b5a350e>
+ <hash_000013cdbf9accfcd57d9e0a7c97896313ddee>
+ <name>HAProxy Backend Sessions</name>
+ <graph>
+ <t_title></t_title>
+ <title>|host_description| - HaProxy - |query_bePxName| Backend Sessions</title>
+ <t_image_format_id></t_image_format_id>
+ <image_format_id>1</image_format_id>
+ <t_height></t_height>
+ <height>120</height>
+ <t_width></t_width>
+ <width>500</width>
+ <t_auto_scale></t_auto_scale>
+ <auto_scale>on</auto_scale>
+ <t_auto_scale_opts></t_auto_scale_opts>
+ <auto_scale_opts>2</auto_scale_opts>
+ <t_auto_scale_log></t_auto_scale_log>
+ <auto_scale_log></auto_scale_log>
+ <t_auto_scale_rigid></t_auto_scale_rigid>
+ <auto_scale_rigid></auto_scale_rigid>
+ <t_auto_padding></t_auto_padding>
+ <auto_padding>on</auto_padding>
+ <t_export></t_export>
+ <export>on</export>
+ <t_upper_limit></t_upper_limit>
+ <upper_limit>10000</upper_limit>
+ <t_lower_limit></t_lower_limit>
+ <lower_limit>0</lower_limit>
+ <t_base_value></t_base_value>
+ <base_value>1000</base_value>
+ <t_unit_value></t_unit_value>
+ <unit_value></unit_value>
+ <t_unit_exponent_value></t_unit_exponent_value>
+ <unit_exponent_value></unit_exponent_value>
+ <t_vertical_label></t_vertical_label>
+ <vertical_label></vertical_label>
+ </graph>
+ <items>
+ <hash_1000131ecaf3728447913a30dfa80cdd9cdff4>
+ <task_item_id>hash_080013230e04055a4228154123e74c6586d435</task_item_id>
+ <color_id>0000FF</color_id>
+ <graph_type_id>5</graph_type_id>
+ <consolidation_function_id>1</consolidation_function_id>
+ <cdef_id>0</cdef_id>
+ <value></value>
+ <gprint_id>hash_060013e9c43831e54eca8069317a2ce8c6f751</gprint_id>
+ <text_format>Total Sessions:</text_format>
+ <hard_return></hard_return>
+ <sequence>5</sequence>
+ </hash_1000131ecaf3728447913a30dfa80cdd9cdff4>
+ <hash_1000132171a00b34d33f99ef24bcc235fbb6a3>
+ <task_item_id>hash_080013230e04055a4228154123e74c6586d435</task_item_id>
+ <color_id>0</color_id>
+ <graph_type_id>9</graph_type_id>
+ <consolidation_function_id>4</consolidation_function_id>
+ <cdef_id>0</cdef_id>
+ <value></value>
+ <gprint_id>hash_060013e9c43831e54eca8069317a2ce8c6f751</gprint_id>
+ <text_format>Current:</text_format>
+ <hard_return></hard_return>
+ <sequence>6</sequence>
+ </hash_1000132171a00b34d33f99ef24bcc235fbb6a3>
+ <hash_1000132129590e72a46480422f85e063d8cf4d>
+ <task_item_id>hash_080013230e04055a4228154123e74c6586d435</task_item_id>
+ <color_id>0</color_id>
+ <graph_type_id>9</graph_type_id>
+ <consolidation_function_id>1</consolidation_function_id>
+ <cdef_id>0</cdef_id>
+ <value></value>
+ <gprint_id>hash_060013e9c43831e54eca8069317a2ce8c6f751</gprint_id>
+ <text_format>Average:</text_format>
+ <hard_return></hard_return>
+ <sequence>7</sequence>
+ </hash_1000132129590e72a46480422f85e063d8cf4d>
+ <hash_1000138d11fec869f88ccf2fa3227bcffadfc3>
+ <task_item_id>hash_080013230e04055a4228154123e74c6586d435</task_item_id>
+ <color_id>0</color_id>
+ <graph_type_id>9</graph_type_id>
+ <consolidation_function_id>3</consolidation_function_id>
+ <cdef_id>0</cdef_id>
+ <value></value>
+ <gprint_id>hash_060013e9c43831e54eca8069317a2ce8c6f751</gprint_id>
+ <text_format>Maximum:</text_format>
+ <hard_return>on</hard_return>
+ <sequence>8</sequence>
+ </hash_1000138d11fec869f88ccf2fa3227bcffadfc3>
+ <hash_100013783d295131617ad996e4699533a134ea>
+ <task_item_id>hash_080013088549c8d7e8cdc80f19bae4d78dc296</task_item_id>
+ <color_id>EA8F00</color_id>
+ <graph_type_id>5</graph_type_id>
+ <consolidation_function_id>1</consolidation_function_id>
+ <cdef_id>0</cdef_id>
+ <value></value>
+ <gprint_id>hash_060013e9c43831e54eca8069317a2ce8c6f751</gprint_id>
+ <text_format>Response Errors:</text_format>
+ <hard_return></hard_return>
+ <sequence>9</sequence>
+ </hash_100013783d295131617ad996e4699533a134ea>
+ <hash_1000139bc04e5072b25ca992ee0b0eec981b95>
+ <task_item_id>hash_080013088549c8d7e8cdc80f19bae4d78dc296</task_item_id>
+ <color_id>0</color_id>
+ <graph_type_id>9</graph_type_id>
+ <consolidation_function_id>4</consolidation_function_id>
+ <cdef_id>0</cdef_id>
+ <value></value>
+ <gprint_id>hash_060013e9c43831e54eca8069317a2ce8c6f751</gprint_id>
+ <text_format>Current:</text_format>
+ <hard_return></hard_return>
+ <sequence>10</sequence>
+ </hash_1000139bc04e5072b25ca992ee0b0eec981b95>
+ <hash_1000136333a9334fa0dc0d2f75c031dee1dcc5>
+ <task_item_id>hash_080013088549c8d7e8cdc80f19bae4d78dc296</task_item_id>
+ <color_id>0</color_id>
+ <graph_type_id>9</graph_type_id>
+ <consolidation_function_id>1</consolidation_function_id>
+ <cdef_id>0</cdef_id>
+ <value></value>
+ <gprint_id>hash_060013e9c43831e54eca8069317a2ce8c6f751</gprint_id>
+ <text_format>Average:</text_format>
+ <hard_return></hard_return>
+ <sequence>11</sequence>
+ </hash_1000136333a9334fa0dc0d2f75c031dee1dcc5>
+ <hash_10001386e0e18d79915cd21ff123fb830e150e>
+ <task_item_id>hash_080013088549c8d7e8cdc80f19bae4d78dc296</task_item_id>
+ <color_id>0</color_id>
+ <graph_type_id>9</graph_type_id>
+ <consolidation_function_id>3</consolidation_function_id>
+ <cdef_id>0</cdef_id>
+ <value></value>
+ <gprint_id>hash_060013e9c43831e54eca8069317a2ce8c6f751</gprint_id>
+ <text_format>Maximum:</text_format>
+ <hard_return>on</hard_return>
+ <sequence>12</sequence>
+ </hash_10001386e0e18d79915cd21ff123fb830e150e>
+ <hash_100013206b0b016daf267ff0a1daa7733ecf25>
+ <task_item_id>0</task_item_id>
+ <color_id>0</color_id>
+ <graph_type_id>1</graph_type_id>
+ <consolidation_function_id>1</consolidation_function_id>
+ <cdef_id>0</cdef_id>
+ <value></value>
+ <gprint_id>hash_060013e9c43831e54eca8069317a2ce8c6f751</gprint_id>
+ <text_format>Graph Last Updated: |date_time|</text_format>
+ <hard_return>on</hard_return>
+ <sequence>13</sequence>
+ </hash_100013206b0b016daf267ff0a1daa7733ecf25>
+ </items>
+ <inputs>
+ <hash_090013871102d568ae1a0d7d79aa4b0d3a6411>
+ <name>Data Source [TotalSessions]</name>
+ <description></description>
+ <column_name>task_item_id</column_name>
+ <items>hash_0000131ecaf3728447913a30dfa80cdd9cdff4|hash_0000132171a00b34d33f99ef24bcc235fbb6a3|hash_0000132129590e72a46480422f85e063d8cf4d|hash_0000138d11fec869f88ccf2fa3227bcffadfc3</items>
+ </hash_090013871102d568ae1a0d7d79aa4b0d3a6411>
+ <hash_090013320fd0edeb30465be51274fa3ecbe168>
+ <name>Data Source [ResponseErrors]</name>
+ <description></description>
+ <column_name>task_item_id</column_name>
+ <items>hash_000013783d295131617ad996e4699533a134ea|hash_0000139bc04e5072b25ca992ee0b0eec981b95|hash_0000136333a9334fa0dc0d2f75c031dee1dcc5|hash_00001386e0e18d79915cd21ff123fb830e150e</items>
+ </hash_090013320fd0edeb30465be51274fa3ecbe168>
+ </inputs>
+ </hash_000013cdbf9accfcd57d9e0a7c97896313ddee>
+ <hash_000013b6d238ff2532fcc19ab498043c7c65c2>
+ <name>HAProxy Backend Traffic</name>
+ <graph>
+ <t_title></t_title>
+ <title>|host_description| - HaProxy |query_bePxName| Backend Traffic</title>
+ <t_image_format_id></t_image_format_id>
+ <image_format_id>1</image_format_id>
+ <t_height></t_height>
+ <height>120</height>
+ <t_width></t_width>
+ <width>500</width>
+ <t_auto_scale></t_auto_scale>
+ <auto_scale>on</auto_scale>
+ <t_auto_scale_opts></t_auto_scale_opts>
+ <auto_scale_opts>2</auto_scale_opts>
+ <t_auto_scale_log></t_auto_scale_log>
+ <auto_scale_log></auto_scale_log>
+ <t_auto_scale_rigid></t_auto_scale_rigid>
+ <auto_scale_rigid></auto_scale_rigid>
+ <t_auto_padding></t_auto_padding>
+ <auto_padding>on</auto_padding>
+ <t_export></t_export>
+ <export>on</export>
+ <t_upper_limit></t_upper_limit>
+ <upper_limit>10000000000</upper_limit>
+ <t_lower_limit></t_lower_limit>
+ <lower_limit>0</lower_limit>
+ <t_base_value></t_base_value>
+ <base_value>1024</base_value>
+ <t_unit_value></t_unit_value>
+ <unit_value></unit_value>
+ <t_unit_exponent_value></t_unit_exponent_value>
+ <unit_exponent_value></unit_exponent_value>
+ <t_vertical_label></t_vertical_label>
+ <vertical_label>bytes</vertical_label>
+ </graph>
+ <items>
+ <hash_100013184e60d8dac2421c2787887fe07f6d25>
+ <task_item_id>hash_0800132f5283f17a7cde63137189d4d3ea7e4e</task_item_id>
+ <color_id>6EA100</color_id>
+ <graph_type_id>5</graph_type_id>
+ <consolidation_function_id>1</consolidation_function_id>
+ <cdef_id>0</cdef_id>
+ <value></value>
+ <gprint_id>hash_060013e9c43831e54eca8069317a2ce8c6f751</gprint_id>
+ <text_format>Ingress Traffic:</text_format>
+ <hard_return></hard_return>
+ <sequence>2</sequence>
+ </hash_100013184e60d8dac2421c2787887fe07f6d25>
+ <hash_100013f3889b4094b935798483e489b5f5e16e>
+ <task_item_id>hash_0800132f5283f17a7cde63137189d4d3ea7e4e</task_item_id>
+ <color_id>0</color_id>
+ <graph_type_id>9</graph_type_id>
+ <consolidation_function_id>4</consolidation_function_id>
+ <cdef_id>0</cdef_id>
+ <value></value>
+ <gprint_id>hash_060013e9c43831e54eca8069317a2ce8c6f751</gprint_id>
+ <text_format>Current:</text_format>
+ <hard_return></hard_return>
+ <sequence>3</sequence>
+ </hash_100013f3889b4094b935798483e489b5f5e16e>
+ <hash_1000134bbdf263db6461f5d76717c12564c42c>
+ <task_item_id>hash_0800132f5283f17a7cde63137189d4d3ea7e4e</task_item_id>
+ <color_id>0</color_id>
+ <graph_type_id>9</graph_type_id>
+ <consolidation_function_id>1</consolidation_function_id>
+ <cdef_id>0</cdef_id>
+ <value></value>
+ <gprint_id>hash_060013e9c43831e54eca8069317a2ce8c6f751</gprint_id>
+ <text_format>Average:</text_format>
+ <hard_return></hard_return>
+ <sequence>4</sequence>
+ </hash_1000134bbdf263db6461f5d76717c12564c42c>
+ <hash_1000131b708578244e36caba0f4dea67230c80>
+ <task_item_id>hash_0800132f5283f17a7cde63137189d4d3ea7e4e</task_item_id>
+ <color_id>0</color_id>
+ <graph_type_id>9</graph_type_id>
+ <consolidation_function_id>3</consolidation_function_id>
+ <cdef_id>0</cdef_id>
+ <value></value>
+ <gprint_id>hash_060013e9c43831e54eca8069317a2ce8c6f751</gprint_id>
+ <text_format>Maximum:</text_format>
+ <hard_return>on</hard_return>
+ <sequence>5</sequence>
+ </hash_1000131b708578244e36caba0f4dea67230c80>
+ <hash_1000133e2f02edb1a55bcdd20e925a3849fd37>
+ <task_item_id>hash_0800136c0e4debeb9b084231d858faabd82f8f</task_item_id>
+ <color_id>FF0000</color_id>
+ <graph_type_id>5</graph_type_id>
+ <consolidation_function_id>1</consolidation_function_id>
+ <cdef_id>0</cdef_id>
+ <value></value>
+ <gprint_id>hash_060013e9c43831e54eca8069317a2ce8c6f751</gprint_id>
+ <text_format>Egress Traffic:</text_format>
+ <hard_return></hard_return>
+ <sequence>6</sequence>
+ </hash_1000133e2f02edb1a55bcdd20e925a3849fd37>
+ <hash_1000134517c9799c71e03dcd2278681858d70f>
+ <task_item_id>hash_0800136c0e4debeb9b084231d858faabd82f8f</task_item_id>
+ <color_id>0</color_id>
+ <graph_type_id>9</graph_type_id>
+ <consolidation_function_id>4</consolidation_function_id>
+ <cdef_id>0</cdef_id>
+ <value></value>
+ <gprint_id>hash_060013e9c43831e54eca8069317a2ce8c6f751</gprint_id>
+ <text_format>Current:</text_format>
+ <hard_return></hard_return>
+ <sequence>7</sequence>
+ </hash_1000134517c9799c71e03dcd2278681858d70f>
+ <hash_1000132edf24a4592c9537d2341ec20c588fc2>
+ <task_item_id>hash_0800136c0e4debeb9b084231d858faabd82f8f</task_item_id>
+ <color_id>0</color_id>
+ <graph_type_id>9</graph_type_id>
+ <consolidation_function_id>1</consolidation_function_id>
+ <cdef_id>0</cdef_id>
+ <value></value>
+ <gprint_id>hash_060013e9c43831e54eca8069317a2ce8c6f751</gprint_id>
+ <text_format>Average:</text_format>
+ <hard_return></hard_return>
+ <sequence>8</sequence>
+ </hash_1000132edf24a4592c9537d2341ec20c588fc2>
+ <hash_100013150e680935bfccc75f1f88c7c60030f7>
+ <task_item_id>hash_0800136c0e4debeb9b084231d858faabd82f8f</task_item_id>
+ <color_id>0</color_id>
+ <graph_type_id>9</graph_type_id>
+ <consolidation_function_id>3</consolidation_function_id>
+ <cdef_id>0</cdef_id>
+ <value></value>
+ <gprint_id>hash_060013e9c43831e54eca8069317a2ce8c6f751</gprint_id>
+ <text_format>Maximum:</text_format>
+ <hard_return>on</hard_return>
+ <sequence>9</sequence>
+ </hash_100013150e680935bfccc75f1f88c7c60030f7>
+ <hash_1000135dcb7625a1a21d8d94fdf2f97d302a42>
+ <task_item_id>0</task_item_id>
+ <color_id>0</color_id>
+ <graph_type_id>1</graph_type_id>
+ <consolidation_function_id>1</consolidation_function_id>
+ <cdef_id>0</cdef_id>
+ <value></value>
+ <gprint_id>hash_060013e9c43831e54eca8069317a2ce8c6f751</gprint_id>
+ <text_format>Graph Last Updated: |date_time|</text_format>
+ <hard_return>on</hard_return>
+ <sequence>10</sequence>
+ </hash_1000135dcb7625a1a21d8d94fdf2f97d302a42>
+ </items>
+ <inputs>
+ <hash_090013952f2971b58b10f88a55d63a0388a429>
+ <name>Data Source [BytesIn]</name>
+ <description></description>
+ <column_name>task_item_id</column_name>
+ <items>hash_000013184e60d8dac2421c2787887fe07f6d25|hash_000013f3889b4094b935798483e489b5f5e16e|hash_0000134bbdf263db6461f5d76717c12564c42c|hash_0000131b708578244e36caba0f4dea67230c80</items>
+ </hash_090013952f2971b58b10f88a55d63a0388a429>
+ <hash_09001393a65aa111654d6801846a6cb523580b>
+ <name>Data Source [BytesOut]</name>
+ <description></description>
+ <column_name>task_item_id</column_name>
+ <items>hash_0000133e2f02edb1a55bcdd20e925a3849fd37|hash_0000134517c9799c71e03dcd2278681858d70f|hash_0000132edf24a4592c9537d2341ec20c588fc2|hash_000013150e680935bfccc75f1f88c7c60030f7</items>
+ </hash_09001393a65aa111654d6801846a6cb523580b>
+ </inputs>
+ </hash_000013b6d238ff2532fcc19ab498043c7c65c2>
+ <hash_010013fa4d4fff334b60e9064e89082173fe34>
+ <name>HAProxy Backend Session Stats</name>
+ <ds>
+ <t_name></t_name>
+ <name>|host_description| - HAProxy - |query_bePxName| Backend Session Stats</name>
+ <data_input_id>hash_030013bf566c869ac6443b0c75d1c32b5a350e</data_input_id>
+ <t_rra_id></t_rra_id>
+ <t_rrd_step></t_rrd_step>
+ <rrd_step>300</rrd_step>
+ <t_active></t_active>
+ <active>on</active>
+ <rra_items>hash_150013c21df5178e5c955013591239eb0afd46|hash_1500130d9c0af8b8acdc7807943937b3208e29|hash_1500136fc2d038fb42950138b0ce3e9874cc60|hash_150013e36f3adb9f152adfa5dc50fd2b23337e|hash_15001352829408ab566127eede2c74d201c678|hash_150013e73fb797d3ab2a9b97c3ec29e9690910</rra_items>
+ </ds>
+ <items>
+ <hash_080013230e04055a4228154123e74c6586d435>
+ <t_data_source_name></t_data_source_name>
+ <data_source_name>TotalSessions</data_source_name>
+ <t_rrd_minimum></t_rrd_minimum>
+ <rrd_minimum>0</rrd_minimum>
+ <t_rrd_maximum></t_rrd_maximum>
+ <rrd_maximum>10000</rrd_maximum>
+ <t_data_source_type_id></t_data_source_type_id>
+ <data_source_type_id>2</data_source_type_id>
+ <t_rrd_heartbeat></t_rrd_heartbeat>
+ <rrd_heartbeat>600</rrd_heartbeat>
+ <t_data_input_field_id></t_data_input_field_id>
+ <data_input_field_id>0</data_input_field_id>
+ </hash_080013230e04055a4228154123e74c6586d435>
+ <hash_080013088549c8d7e8cdc80f19bae4d78dc296>
+ <t_data_source_name></t_data_source_name>
+ <data_source_name>ResponseErrors</data_source_name>
+ <t_rrd_minimum></t_rrd_minimum>
+ <rrd_minimum>0</rrd_minimum>
+ <t_rrd_maximum></t_rrd_maximum>
+ <rrd_maximum>10000</rrd_maximum>
+ <t_data_source_type_id></t_data_source_type_id>
+ <data_source_type_id>2</data_source_type_id>
+ <t_rrd_heartbeat></t_rrd_heartbeat>
+ <rrd_heartbeat>600</rrd_heartbeat>
+ <t_data_input_field_id></t_data_input_field_id>
+ <data_input_field_id>0</data_input_field_id>
+ </hash_080013088549c8d7e8cdc80f19bae4d78dc296>
+ </items>
+ <data>
+ <item_000>
+ <data_input_field_id>hash_070013c1f36ee60c3dc98945556d57f26e475b</data_input_field_id>
+ <t_value></t_value>
+ <value></value>
+ </item_000>
+ <item_001>
+ <data_input_field_id>hash_070013e6deda7be0f391399c5130e7c4a48b28</data_input_field_id>
+ <t_value></t_value>
+ <value></value>
+ </item_001>
+ <item_002>
+ <data_input_field_id>hash_070013cbbe5c1ddfb264a6e5d509ce1c78c95f</data_input_field_id>
+ <t_value></t_value>
+ <value></value>
+ </item_002>
+ <item_003>
+ <data_input_field_id>hash_0700136027a919c7c7731fbe095b6f53ab127b</data_input_field_id>
+ <t_value></t_value>
+ <value></value>
+ </item_003>
+ <item_004>
+ <data_input_field_id>hash_070013b5c23f246559df38662c255f4aa21d6b</data_input_field_id>
+ <t_value></t_value>
+ <value></value>
+ </item_004>
+ <item_005>
+ <data_input_field_id>hash_0700131cc1493a6781af2c478fa4de971531cf</data_input_field_id>
+ <t_value></t_value>
+ <value></value>
+ </item_005>
+ <item_006>
+ <data_input_field_id>hash_070013f4facc5e2ca7ebee621f09bc6d9fc792</data_input_field_id>
+ <t_value></t_value>
+ <value></value>
+ </item_006>
+ <item_007>
+ <data_input_field_id>hash_070013acb449d1451e8a2a655c2c99d31142c7</data_input_field_id>
+ <t_value></t_value>
+ <value></value>
+ </item_007>
+ <item_008>
+ <data_input_field_id>hash_070013617cdc8a230615e59f06f361ef6e7728</data_input_field_id>
+ <t_value></t_value>
+ <value></value>
+ </item_008>
+ </data>
+ </hash_010013fa4d4fff334b60e9064e89082173fe34>
+ <hash_010013a63ddba34026d2c07d73c0ef2ae64b54>
+ <name>HAProxy Backend Traffic Stats</name>
+ <ds>
+ <t_name></t_name>
+ <name>|host_description| - HAProxy - |query_bePxName| Backend Traffic Stats</name>
+ <data_input_id>hash_030013bf566c869ac6443b0c75d1c32b5a350e</data_input_id>
+ <t_rra_id></t_rra_id>
+ <t_rrd_step></t_rrd_step>
+ <rrd_step>300</rrd_step>
+ <t_active></t_active>
+ <active>on</active>
+ <rra_items>hash_150013c21df5178e5c955013591239eb0afd46|hash_1500130d9c0af8b8acdc7807943937b3208e29|hash_1500136fc2d038fb42950138b0ce3e9874cc60|hash_150013e36f3adb9f152adfa5dc50fd2b23337e|hash_150013a4aa6f4de84eaa00008f88d3f5bd8520|hash_150013e73fb797d3ab2a9b97c3ec29e9690910</rra_items>
+ </ds>
+ <items>
+ <hash_0800136c0e4debeb9b084231d858faabd82f8f>
+ <t_data_source_name></t_data_source_name>
+ <data_source_name>BytesOut</data_source_name>
+ <t_rrd_minimum></t_rrd_minimum>
+ <rrd_minimum>0</rrd_minimum>
+ <t_rrd_maximum></t_rrd_maximum>
+ <rrd_maximum>10000000000</rrd_maximum>
+ <t_data_source_type_id></t_data_source_type_id>
+ <data_source_type_id>2</data_source_type_id>
+ <t_rrd_heartbeat></t_rrd_heartbeat>
+ <rrd_heartbeat>600</rrd_heartbeat>
+ <t_data_input_field_id></t_data_input_field_id>
+ <data_input_field_id>0</data_input_field_id>
+ </hash_0800136c0e4debeb9b084231d858faabd82f8f>
+ <hash_0800132f5283f17a7cde63137189d4d3ea7e4e>
+ <t_data_source_name></t_data_source_name>
+ <data_source_name>BytesIn</data_source_name>
+ <t_rrd_minimum></t_rrd_minimum>
+ <rrd_minimum>0</rrd_minimum>
+ <t_rrd_maximum></t_rrd_maximum>
+ <rrd_maximum>10000000000</rrd_maximum>
+ <t_data_source_type_id></t_data_source_type_id>
+ <data_source_type_id>2</data_source_type_id>
+ <t_rrd_heartbeat></t_rrd_heartbeat>
+ <rrd_heartbeat>600</rrd_heartbeat>
+ <t_data_input_field_id></t_data_input_field_id>
+ <data_input_field_id>0</data_input_field_id>
+ </hash_0800132f5283f17a7cde63137189d4d3ea7e4e>
+ </items>
+ <data>
+ <item_000>
+ <data_input_field_id>hash_070013c1f36ee60c3dc98945556d57f26e475b</data_input_field_id>
+ <t_value></t_value>
+ <value></value>
+ </item_000>
+ <item_001>
+ <data_input_field_id>hash_070013e6deda7be0f391399c5130e7c4a48b28</data_input_field_id>
+ <t_value></t_value>
+ <value></value>
+ </item_001>
+ <item_002>
+ <data_input_field_id>hash_070013cbbe5c1ddfb264a6e5d509ce1c78c95f</data_input_field_id>
+ <t_value></t_value>
+ <value></value>
+ </item_002>
+ <item_003>
+ <data_input_field_id>hash_0700136027a919c7c7731fbe095b6f53ab127b</data_input_field_id>
+ <t_value></t_value>
+ <value></value>
+ </item_003>
+ <item_004>
+ <data_input_field_id>hash_070013b5c23f246559df38662c255f4aa21d6b</data_input_field_id>
+ <t_value></t_value>
+ <value></value>
+ </item_004>
+ <item_005>
+ <data_input_field_id>hash_0700131cc1493a6781af2c478fa4de971531cf</data_input_field_id>
+ <t_value></t_value>
+ <value></value>
+ </item_005>
+ <item_006>
+ <data_input_field_id>hash_070013f4facc5e2ca7ebee621f09bc6d9fc792</data_input_field_id>
+ <t_value></t_value>
+ <value></value>
+ </item_006>
+ <item_007>
+ <data_input_field_id>hash_070013acb449d1451e8a2a655c2c99d31142c7</data_input_field_id>
+ <t_value></t_value>
+ <value></value>
+ </item_007>
+ <item_008>
+ <data_input_field_id>hash_070013617cdc8a230615e59f06f361ef6e7728</data_input_field_id>
+ <t_value></t_value>
+ <value></value>
+ </item_008>
+ </data>
+ </hash_010013a63ddba34026d2c07d73c0ef2ae64b54>
+ <hash_150013c21df5178e5c955013591239eb0afd46>
+ <name>Daily (5 Minute Average)</name>
+ <x_files_factor>0.5</x_files_factor>
+ <steps>1</steps>
+ <rows>600</rows>
+ <timespan>86400</timespan>
+ <cf_items>1|2|3|4</cf_items>
+ </hash_150013c21df5178e5c955013591239eb0afd46>
+ <hash_1500130d9c0af8b8acdc7807943937b3208e29>
+ <name>Weekly (30 Minute Average)</name>
+ <x_files_factor>0.5</x_files_factor>
+ <steps>6</steps>
+ <rows>700</rows>
+ <timespan>604800</timespan>
+ <cf_items>1|2|3|4</cf_items>
+ </hash_1500130d9c0af8b8acdc7807943937b3208e29>
+ <hash_1500136fc2d038fb42950138b0ce3e9874cc60>
+ <name>Monthly (2 Hour Average)</name>
+ <x_files_factor>0.5</x_files_factor>
+ <steps>24</steps>
+ <rows>775</rows>
+ <timespan>2678400</timespan>
+ <cf_items>1|2|3|4</cf_items>
+ </hash_1500136fc2d038fb42950138b0ce3e9874cc60>
+ <hash_150013e36f3adb9f152adfa5dc50fd2b23337e>
+ <name>Yearly (1 Day Average)</name>
+ <x_files_factor>0.5</x_files_factor>
+ <steps>288</steps>
+ <rows>797</rows>
+ <timespan>33053184</timespan>
+ <cf_items>1|2|3|4</cf_items>
+ </hash_150013e36f3adb9f152adfa5dc50fd2b23337e>
+ <hash_1500130028a19ed71b758898eaa55ab1c59694>
+ <name>Three days (5 minutes average)</name>
+ <x_files_factor>0.5</x_files_factor>
+ <steps>6</steps>
+ <rows>700</rows>
+ <timespan>302400</timespan>
+ <cf_items>1|2|3|4</cf_items>
+ </hash_1500130028a19ed71b758898eaa55ab1c59694>
+ <hash_150013e73fb797d3ab2a9b97c3ec29e9690910>
+ <name>Hourly (1 Minute Average)</name>
+ <x_files_factor>0.5</x_files_factor>
+ <steps>1</steps>
+ <rows>500</rows>
+ <timespan>14400</timespan>
+ <cf_items>1|3</cf_items>
+ </hash_150013e73fb797d3ab2a9b97c3ec29e9690910>
+ <hash_060013e9c43831e54eca8069317a2ce8c6f751>
+ <name>Normal</name>
+ <gprint_text>%8.2lf %s</gprint_text>
+ </hash_060013e9c43831e54eca8069317a2ce8c6f751>
+</cacti>
diff --git a/admin/netsnmp-perl/cacti_data_query_haproxy_frontends.xml b/admin/netsnmp-perl/cacti_data_query_haproxy_frontends.xml
new file mode 100644
index 0000000..1429b07
--- /dev/null
+++ b/admin/netsnmp-perl/cacti_data_query_haproxy_frontends.xml
@@ -0,0 +1,750 @@
+<cacti>
+ <hash_0400138cb70c1064bd60742726af23828c4b05>
+ <name>HAProxy Frontends</name>
+ <description></description>
+ <xml_path>&lt;path_cacti&gt;/resource/snmp_queries/haproxy_frontend.xml</xml_path>
+ <data_input_id>hash_030013bf566c869ac6443b0c75d1c32b5a350e</data_input_id>
+ <graphs>
+ <hash_110013c1c2bca3af0ae4e2ce0de096aa79dba5>
+ <name>HAProxy Frontend Sessions</name>
+ <graph_template_id>hash_00001328b6727aa54dde6bb3f5dde939ae03aa</graph_template_id>
+ <rrd>
+ <item_000>
+ <snmp_field_name>feSTot</snmp_field_name>
+ <data_template_id>hash_0100139f985697a7530256b4e35c95ef03db20</data_template_id>
+ <data_template_rrd_id>hash_080013f9c76e05d0a87b2d32f9a5b014e17aab</data_template_rrd_id>
+ </item_000>
+ <item_001>
+ <snmp_field_name>feEReq</snmp_field_name>
+ <data_template_id>hash_0100139f985697a7530256b4e35c95ef03db20</data_template_id>
+ <data_template_rrd_id>hash_080013c137bec94d7220e65a5b3dfa4049c242</data_template_rrd_id>
+ </item_001>
+ </rrd>
+ <sv_graph>
+ <hash_1200130f0e4ffcd11f807d23794ab805d7901a>
+ <field_name>TotalSessions</field_name>
+ <sequence>1</sequence>
+ <text>TotalSessions</text>
+ </hash_1200130f0e4ffcd11f807d23794ab805d7901a>
+ <hash_1200134fc506db9ce45c0e5cb38a429ad8e077>
+ <field_name>RequestErrors</field_name>
+ <sequence>1</sequence>
+ <text>RequestErrors</text>
+ </hash_1200134fc506db9ce45c0e5cb38a429ad8e077>
+ </sv_graph>
+ <sv_data_source>
+ <hash_1300138a5efc51c95b400c3139b352ce110969>
+ <field_name>RequestErrors</field_name>
+ <data_template_id>hash_0100139f985697a7530256b4e35c95ef03db20</data_template_id>
+ <sequence>1</sequence>
+ <text>RequestErrors</text>
+ </hash_1300138a5efc51c95b400c3139b352ce110969>
+ <hash_130013e374903ab025bc2728f2f9abeb412ac3>
+ <field_name>TotalSessions</field_name>
+ <data_template_id>hash_0100139f985697a7530256b4e35c95ef03db20</data_template_id>
+ <sequence>1</sequence>
+ <text>TotalSessions</text>
+ </hash_130013e374903ab025bc2728f2f9abeb412ac3>
+ </sv_data_source>
+ </hash_110013c1c2bca3af0ae4e2ce0de096aa79dba5>
+ <hash_1100130838495d5d82f25f4a675ee7c56543a5>
+ <name>HAProxy Frontend Traffic</name>
+ <graph_template_id>hash_000013d0fe9e9efc2746de488fdede0419b051</graph_template_id>
+ <rrd>
+ <item_000>
+ <snmp_field_name>feBOut</snmp_field_name>
+ <data_template_id>hash_010013a88327df77ea19e333ddd96096c34751</data_template_id>
+ <data_template_rrd_id>hash_0800137db81cd58fbbbd203af0f55c15c2081a</data_template_rrd_id>
+ </item_000>
+ <item_001>
+ <snmp_field_name>feBIn</snmp_field_name>
+ <data_template_id>hash_010013a88327df77ea19e333ddd96096c34751</data_template_id>
+ <data_template_rrd_id>hash_08001305772980bb6de1f12223d7ec53e323c4</data_template_rrd_id>
+ </item_001>
+ </rrd>
+ <sv_graph>
+ <hash_120013934d1311136bccb4d9ca5a67e240afeb>
+ <field_name>BytesIn</field_name>
+ <sequence>1</sequence>
+ <text>BytesIn</text>
+ </hash_120013934d1311136bccb4d9ca5a67e240afeb>
+ <hash_12001399a6e6fb09b025bc60a214cb00e6d1f0>
+ <field_name>BytesOut</field_name>
+ <sequence>1</sequence>
+ <text>BytesOut</text>
+ </hash_12001399a6e6fb09b025bc60a214cb00e6d1f0>
+ </sv_graph>
+ <sv_data_source>
+ <hash_1300135f35cdaeda1a1169be21e52a85af339e>
+ <field_name>BytesOut</field_name>
+ <data_template_id>hash_0100139f985697a7530256b4e35c95ef03db20</data_template_id>
+ <sequence>1</sequence>
+ <text>BytesOut</text>
+ </hash_1300135f35cdaeda1a1169be21e52a85af339e>
+ <hash_1300136ee916a0c0ce8dad133b9dfcf32e2581>
+ <field_name>BytesIn</field_name>
+ <data_template_id>hash_0100139f985697a7530256b4e35c95ef03db20</data_template_id>
+ <sequence>1</sequence>
+ <text>BytesIn</text>
+ </hash_1300136ee916a0c0ce8dad133b9dfcf32e2581>
+ <hash_13001382c5a3b953f8d1583b168d15beed6e9c>
+ <field_name>BytesOut</field_name>
+ <data_template_id>hash_010013a88327df77ea19e333ddd96096c34751</data_template_id>
+ <sequence>1</sequence>
+ <text>BytesOut</text>
+ </hash_13001382c5a3b953f8d1583b168d15beed6e9c>
+ <hash_1300132c486fa1a5e875179031ea9f5328614b>
+ <field_name>BytesIn</field_name>
+ <data_template_id>hash_010013a88327df77ea19e333ddd96096c34751</data_template_id>
+ <sequence>1</sequence>
+ <text>BytesIn</text>
+ </hash_1300132c486fa1a5e875179031ea9f5328614b>
+ </sv_data_source>
+ </hash_1100130838495d5d82f25f4a675ee7c56543a5>
+ </graphs>
+ </hash_0400138cb70c1064bd60742726af23828c4b05>
+ <hash_030013bf566c869ac6443b0c75d1c32b5a350e>
+ <name>Get SNMP Data (Indexed)</name>
+ <type_id>3</type_id>
+ <input_string></input_string>
+ <fields>
+ <hash_070013617cdc8a230615e59f06f361ef6e7728>
+ <name>SNMP IP Address</name>
+ <update_rra></update_rra>
+ <regexp_match></regexp_match>
+ <allow_nulls></allow_nulls>
+ <type_code>hostname</type_code>
+ <input_output>in</input_output>
+ <data_name>management_ip</data_name>
+ </hash_070013617cdc8a230615e59f06f361ef6e7728>
+ <hash_070013acb449d1451e8a2a655c2c99d31142c7>
+ <name>SNMP Community</name>
+ <update_rra></update_rra>
+ <regexp_match></regexp_match>
+ <allow_nulls></allow_nulls>
+ <type_code>snmp_community</type_code>
+ <input_output>in</input_output>
+ <data_name>snmp_community</data_name>
+ </hash_070013acb449d1451e8a2a655c2c99d31142c7>
+ <hash_070013f4facc5e2ca7ebee621f09bc6d9fc792>
+ <name>SNMP Username (v3)</name>
+ <update_rra></update_rra>
+ <regexp_match></regexp_match>
+ <allow_nulls>on</allow_nulls>
+ <type_code>snmp_username</type_code>
+ <input_output>in</input_output>
+ <data_name>snmp_username</data_name>
+ </hash_070013f4facc5e2ca7ebee621f09bc6d9fc792>
+ <hash_0700131cc1493a6781af2c478fa4de971531cf>
+ <name>SNMP Password (v3)</name>
+ <update_rra></update_rra>
+ <regexp_match></regexp_match>
+ <allow_nulls>on</allow_nulls>
+ <type_code>snmp_password</type_code>
+ <input_output>in</input_output>
+ <data_name>snmp_password</data_name>
+ </hash_0700131cc1493a6781af2c478fa4de971531cf>
+ <hash_070013b5c23f246559df38662c255f4aa21d6b>
+ <name>SNMP Version (1, 2, or 3)</name>
+ <update_rra></update_rra>
+ <regexp_match></regexp_match>
+ <allow_nulls></allow_nulls>
+ <type_code>snmp_version</type_code>
+ <input_output>in</input_output>
+ <data_name>snmp_version</data_name>
+ </hash_070013b5c23f246559df38662c255f4aa21d6b>
+ <hash_0700136027a919c7c7731fbe095b6f53ab127b>
+ <name>Index Type</name>
+ <update_rra></update_rra>
+ <regexp_match></regexp_match>
+ <allow_nulls></allow_nulls>
+ <type_code>index_type</type_code>
+ <input_output>in</input_output>
+ <data_name>index_type</data_name>
+ </hash_0700136027a919c7c7731fbe095b6f53ab127b>
+ <hash_070013cbbe5c1ddfb264a6e5d509ce1c78c95f>
+ <name>Index Value</name>
+ <update_rra></update_rra>
+ <regexp_match></regexp_match>
+ <allow_nulls></allow_nulls>
+ <type_code>index_value</type_code>
+ <input_output>in</input_output>
+ <data_name>index_value</data_name>
+ </hash_070013cbbe5c1ddfb264a6e5d509ce1c78c95f>
+ <hash_070013e6deda7be0f391399c5130e7c4a48b28>
+ <name>Output Type ID</name>
+ <update_rra></update_rra>
+ <regexp_match></regexp_match>
+ <allow_nulls></allow_nulls>
+ <type_code>output_type</type_code>
+ <input_output>in</input_output>
+ <data_name>output_type</data_name>
+ </hash_070013e6deda7be0f391399c5130e7c4a48b28>
+ <hash_070013c1f36ee60c3dc98945556d57f26e475b>
+ <name>SNMP Port</name>
+ <update_rra></update_rra>
+ <regexp_match></regexp_match>
+ <allow_nulls></allow_nulls>
+ <type_code>snmp_port</type_code>
+ <input_output>in</input_output>
+ <data_name>snmp_port</data_name>
+ </hash_070013c1f36ee60c3dc98945556d57f26e475b>
+ </fields>
+ </hash_030013bf566c869ac6443b0c75d1c32b5a350e>
+ <hash_00001328b6727aa54dde6bb3f5dde939ae03aa>
+ <name>HAProxy Frontend Sessions</name>
+ <graph>
+ <t_title></t_title>
+ <title>|host_description| - HaProxy - |query_fePxName| Frontend Sessions</title>
+ <t_image_format_id></t_image_format_id>
+ <image_format_id>1</image_format_id>
+ <t_height></t_height>
+ <height>120</height>
+ <t_width></t_width>
+ <width>500</width>
+ <t_auto_scale></t_auto_scale>
+ <auto_scale>on</auto_scale>
+ <t_auto_scale_opts></t_auto_scale_opts>
+ <auto_scale_opts>2</auto_scale_opts>
+ <t_auto_scale_log></t_auto_scale_log>
+ <auto_scale_log></auto_scale_log>
+ <t_auto_scale_rigid></t_auto_scale_rigid>
+ <auto_scale_rigid></auto_scale_rigid>
+ <t_auto_padding></t_auto_padding>
+ <auto_padding>on</auto_padding>
+ <t_export></t_export>
+ <export>on</export>
+ <t_upper_limit></t_upper_limit>
+ <upper_limit>10000</upper_limit>
+ <t_lower_limit></t_lower_limit>
+ <lower_limit>0</lower_limit>
+ <t_base_value></t_base_value>
+ <base_value>1000</base_value>
+ <t_unit_value></t_unit_value>
+ <unit_value></unit_value>
+ <t_unit_exponent_value></t_unit_exponent_value>
+ <unit_exponent_value></unit_exponent_value>
+ <t_vertical_label></t_vertical_label>
+ <vertical_label></vertical_label>
+ </graph>
+ <items>
+ <hash_100013b1ecfd75df9c17c0ba11acc5e9b7d8f8>
+ <task_item_id>hash_080013f9c76e05d0a87b2d32f9a5b014e17aab</task_item_id>
+ <color_id>0000FF</color_id>
+ <graph_type_id>5</graph_type_id>
+ <consolidation_function_id>1</consolidation_function_id>
+ <cdef_id>0</cdef_id>
+ <value></value>
+ <gprint_id>hash_060013e9c43831e54eca8069317a2ce8c6f751</gprint_id>
+ <text_format>Total Sessions:</text_format>
+ <hard_return></hard_return>
+ <sequence>5</sequence>
+ </hash_100013b1ecfd75df9c17c0ba11acc5e9b7d8f8>
+ <hash_100013fa878148199aee5bb2a10b7693318347>
+ <task_item_id>hash_080013f9c76e05d0a87b2d32f9a5b014e17aab</task_item_id>
+ <color_id>0</color_id>
+ <graph_type_id>9</graph_type_id>
+ <consolidation_function_id>4</consolidation_function_id>
+ <cdef_id>0</cdef_id>
+ <value></value>
+ <gprint_id>hash_060013e9c43831e54eca8069317a2ce8c6f751</gprint_id>
+ <text_format>Current:</text_format>
+ <hard_return></hard_return>
+ <sequence>6</sequence>
+ </hash_100013fa878148199aee5bb2a10b7693318347>
+ <hash_1000137d834c383afa4863974edc19a337e260>
+ <task_item_id>hash_080013f9c76e05d0a87b2d32f9a5b014e17aab</task_item_id>
+ <color_id>0</color_id>
+ <graph_type_id>9</graph_type_id>
+ <consolidation_function_id>1</consolidation_function_id>
+ <cdef_id>0</cdef_id>
+ <value></value>
+ <gprint_id>hash_060013e9c43831e54eca8069317a2ce8c6f751</gprint_id>
+ <text_format>Average:</text_format>
+ <hard_return></hard_return>
+ <sequence>7</sequence>
+ </hash_1000137d834c383afa4863974edc19a337e260>
+ <hash_1000138b0422b293230883462cfbfe32144d47>
+ <task_item_id>hash_080013f9c76e05d0a87b2d32f9a5b014e17aab</task_item_id>
+ <color_id>0</color_id>
+ <graph_type_id>9</graph_type_id>
+ <consolidation_function_id>3</consolidation_function_id>
+ <cdef_id>0</cdef_id>
+ <value></value>
+ <gprint_id>hash_060013e9c43831e54eca8069317a2ce8c6f751</gprint_id>
+ <text_format>Maximum:</text_format>
+ <hard_return>on</hard_return>
+ <sequence>8</sequence>
+ </hash_1000138b0422b293230883462cfbfe32144d47>
+ <hash_1000131c87ed4e76c026cd131418d792822944>
+ <task_item_id>hash_080013c137bec94d7220e65a5b3dfa4049c242</task_item_id>
+ <color_id>EA8F00</color_id>
+ <graph_type_id>5</graph_type_id>
+ <consolidation_function_id>1</consolidation_function_id>
+ <cdef_id>0</cdef_id>
+ <value></value>
+ <gprint_id>hash_060013e9c43831e54eca8069317a2ce8c6f751</gprint_id>
+ <text_format>Request Errors:</text_format>
+ <hard_return></hard_return>
+ <sequence>9</sequence>
+ </hash_1000131c87ed4e76c026cd131418d792822944>
+ <hash_100013a9993114514cb1abea4b929f984222ea>
+ <task_item_id>hash_080013c137bec94d7220e65a5b3dfa4049c242</task_item_id>
+ <color_id>0</color_id>
+ <graph_type_id>9</graph_type_id>
+ <consolidation_function_id>4</consolidation_function_id>
+ <cdef_id>0</cdef_id>
+ <value></value>
+ <gprint_id>hash_060013e9c43831e54eca8069317a2ce8c6f751</gprint_id>
+ <text_format>Current:</text_format>
+ <hard_return></hard_return>
+ <sequence>10</sequence>
+ </hash_100013a9993114514cb1abea4b929f984222ea>
+ <hash_1000131bc67adbaa8b77cd6c73d9622c7eebc1>
+ <task_item_id>hash_080013c137bec94d7220e65a5b3dfa4049c242</task_item_id>
+ <color_id>0</color_id>
+ <graph_type_id>9</graph_type_id>
+ <consolidation_function_id>1</consolidation_function_id>
+ <cdef_id>0</cdef_id>
+ <value></value>
+ <gprint_id>hash_060013e9c43831e54eca8069317a2ce8c6f751</gprint_id>
+ <text_format>Average:</text_format>
+ <hard_return></hard_return>
+ <sequence>12</sequence>
+ </hash_1000131bc67adbaa8b77cd6c73d9622c7eebc1>
+ <hash_1000138840d17711368b90a61132ba83e9edb8>
+ <task_item_id>hash_080013c137bec94d7220e65a5b3dfa4049c242</task_item_id>
+ <color_id>0</color_id>
+ <graph_type_id>9</graph_type_id>
+ <consolidation_function_id>3</consolidation_function_id>
+ <cdef_id>0</cdef_id>
+ <value></value>
+ <gprint_id>hash_060013e9c43831e54eca8069317a2ce8c6f751</gprint_id>
+ <text_format>Maximum:</text_format>
+ <hard_return>on</hard_return>
+ <sequence>13</sequence>
+ </hash_1000138840d17711368b90a61132ba83e9edb8>
+ <hash_100013e8ddbe92933ba99b2d2ebc8f76a06e2e>
+ <task_item_id>0</task_item_id>
+ <color_id>0</color_id>
+ <graph_type_id>1</graph_type_id>
+ <consolidation_function_id>1</consolidation_function_id>
+ <cdef_id>0</cdef_id>
+ <value></value>
+ <gprint_id>hash_060013e9c43831e54eca8069317a2ce8c6f751</gprint_id>
+ <text_format>Graph Last Updated: |date_time|</text_format>
+ <hard_return>on</hard_return>
+ <sequence>14</sequence>
+ </hash_100013e8ddbe92933ba99b2d2ebc8f76a06e2e>
+ </items>
+ <inputs>
+ <hash_0900134bedc49c15c9557fc95cdbc8850a5cb1>
+ <name>Data Source [TotalSessions]</name>
+ <description></description>
+ <column_name>task_item_id</column_name>
+ <items>hash_000013b1ecfd75df9c17c0ba11acc5e9b7d8f8|hash_000013fa878148199aee5bb2a10b7693318347|hash_0000138b0422b293230883462cfbfe32144d47|hash_0000137d834c383afa4863974edc19a337e260</items>
+ </hash_0900134bedc49c15c9557fc95cdbc8850a5cb1>
+ <hash_090013f3f3dfd39bb035006de08df94415e828>
+ <name>Data Source [RequestErrors]</name>
+ <description></description>
+ <column_name>task_item_id</column_name>
+ <items>hash_0000131c87ed4e76c026cd131418d792822944|hash_000013a9993114514cb1abea4b929f984222ea|hash_0000131bc67adbaa8b77cd6c73d9622c7eebc1|hash_0000138840d17711368b90a61132ba83e9edb8</items>
+ </hash_090013f3f3dfd39bb035006de08df94415e828>
+ </inputs>
+ </hash_00001328b6727aa54dde6bb3f5dde939ae03aa>
+ <hash_000013d0fe9e9efc2746de488fdede0419b051>
+ <name>HAProxy Frontend Traffic</name>
+ <graph>
+ <t_title></t_title>
+ <title>|host_description| - HaProxy |query_fePxName| Frontend Traffic</title>
+ <t_image_format_id></t_image_format_id>
+ <image_format_id>1</image_format_id>
+ <t_height></t_height>
+ <height>120</height>
+ <t_width></t_width>
+ <width>500</width>
+ <t_auto_scale></t_auto_scale>
+ <auto_scale>on</auto_scale>
+ <t_auto_scale_opts></t_auto_scale_opts>
+ <auto_scale_opts>2</auto_scale_opts>
+ <t_auto_scale_log></t_auto_scale_log>
+ <auto_scale_log></auto_scale_log>
+ <t_auto_scale_rigid></t_auto_scale_rigid>
+ <auto_scale_rigid></auto_scale_rigid>
+ <t_auto_padding></t_auto_padding>
+ <auto_padding>on</auto_padding>
+ <t_export></t_export>
+ <export>on</export>
+ <t_upper_limit></t_upper_limit>
+ <upper_limit>10000000000</upper_limit>
+ <t_lower_limit></t_lower_limit>
+ <lower_limit>0</lower_limit>
+ <t_base_value></t_base_value>
+ <base_value>1024</base_value>
+ <t_unit_value></t_unit_value>
+ <unit_value></unit_value>
+ <t_unit_exponent_value></t_unit_exponent_value>
+ <unit_exponent_value></unit_exponent_value>
+ <t_vertical_label></t_vertical_label>
+ <vertical_label>bytes</vertical_label>
+ </graph>
+ <items>
+ <hash_100013d5c13ff711cbd645e9f88697b2c5e61b>
+ <task_item_id>hash_08001305772980bb6de1f12223d7ec53e323c4</task_item_id>
+ <color_id>6EA100</color_id>
+ <graph_type_id>5</graph_type_id>
+ <consolidation_function_id>1</consolidation_function_id>
+ <cdef_id>0</cdef_id>
+ <value></value>
+ <gprint_id>hash_060013e9c43831e54eca8069317a2ce8c6f751</gprint_id>
+ <text_format>Ingress Traffic:</text_format>
+ <hard_return></hard_return>
+ <sequence>2</sequence>
+ </hash_100013d5c13ff711cbd645e9f88697b2c5e61b>
+ <hash_10001353cff0cd64c4d70574ef9da42f62c86a>
+ <task_item_id>hash_08001305772980bb6de1f12223d7ec53e323c4</task_item_id>
+ <color_id>0</color_id>
+ <graph_type_id>9</graph_type_id>
+ <consolidation_function_id>4</consolidation_function_id>
+ <cdef_id>0</cdef_id>
+ <value></value>
+ <gprint_id>hash_060013e9c43831e54eca8069317a2ce8c6f751</gprint_id>
+ <text_format>Current:</text_format>
+ <hard_return></hard_return>
+ <sequence>3</sequence>
+ </hash_10001353cff0cd64c4d70574ef9da42f62c86a>
+ <hash_1000136788d44f6207ce323ad40ccc8f15d462>
+ <task_item_id>hash_08001305772980bb6de1f12223d7ec53e323c4</task_item_id>
+ <color_id>0</color_id>
+ <graph_type_id>9</graph_type_id>
+ <consolidation_function_id>1</consolidation_function_id>
+ <cdef_id>0</cdef_id>
+ <value></value>
+ <gprint_id>hash_060013e9c43831e54eca8069317a2ce8c6f751</gprint_id>
+ <text_format>Average:</text_format>
+ <hard_return></hard_return>
+ <sequence>4</sequence>
+ </hash_1000136788d44f6207ce323ad40ccc8f15d462>
+ <hash_100013d4cb02a8fb7fa37ef1e37d8b78333ea3>
+ <task_item_id>hash_08001305772980bb6de1f12223d7ec53e323c4</task_item_id>
+ <color_id>0</color_id>
+ <graph_type_id>9</graph_type_id>
+ <consolidation_function_id>3</consolidation_function_id>
+ <cdef_id>0</cdef_id>
+ <value></value>
+ <gprint_id>hash_060013e9c43831e54eca8069317a2ce8c6f751</gprint_id>
+ <text_format>Maximum:</text_format>
+ <hard_return>on</hard_return>
+ <sequence>5</sequence>
+ </hash_100013d4cb02a8fb7fa37ef1e37d8b78333ea3>
+ <hash_1000137d82a7f3c82c698fe4e9cecc03d680b1>
+ <task_item_id>hash_0800137db81cd58fbbbd203af0f55c15c2081a</task_item_id>
+ <color_id>FF0000</color_id>
+ <graph_type_id>5</graph_type_id>
+ <consolidation_function_id>1</consolidation_function_id>
+ <cdef_id>0</cdef_id>
+ <value></value>
+ <gprint_id>hash_060013e9c43831e54eca8069317a2ce8c6f751</gprint_id>
+ <text_format>Egress Traffic:</text_format>
+ <hard_return></hard_return>
+ <sequence>6</sequence>
+ </hash_1000137d82a7f3c82c698fe4e9cecc03d680b1>
+ <hash_100013d2d059378b521327426b451324bbb608>
+ <task_item_id>hash_0800137db81cd58fbbbd203af0f55c15c2081a</task_item_id>
+ <color_id>0</color_id>
+ <graph_type_id>9</graph_type_id>
+ <consolidation_function_id>4</consolidation_function_id>
+ <cdef_id>0</cdef_id>
+ <value></value>
+ <gprint_id>hash_060013e9c43831e54eca8069317a2ce8c6f751</gprint_id>
+ <text_format>Current:</text_format>
+ <hard_return></hard_return>
+ <sequence>7</sequence>
+ </hash_100013d2d059378b521327426b451324bbb608>
+ <hash_1000132eef0fae129ef21ad2d73e5e80814a23>
+ <task_item_id>hash_0800137db81cd58fbbbd203af0f55c15c2081a</task_item_id>
+ <color_id>0</color_id>
+ <graph_type_id>9</graph_type_id>
+ <consolidation_function_id>1</consolidation_function_id>
+ <cdef_id>0</cdef_id>
+ <value></value>
+ <gprint_id>hash_060013e9c43831e54eca8069317a2ce8c6f751</gprint_id>
+ <text_format>Average:</text_format>
+ <hard_return></hard_return>
+ <sequence>8</sequence>
+ </hash_1000132eef0fae129ef21ad2d73e5e80814a23>
+ <hash_1000138365462951b1f4e6b1a76f20b91be65d>
+ <task_item_id>hash_0800137db81cd58fbbbd203af0f55c15c2081a</task_item_id>
+ <color_id>0</color_id>
+ <graph_type_id>9</graph_type_id>
+ <consolidation_function_id>3</consolidation_function_id>
+ <cdef_id>0</cdef_id>
+ <value></value>
+ <gprint_id>hash_060013e9c43831e54eca8069317a2ce8c6f751</gprint_id>
+ <text_format>Maximum:</text_format>
+ <hard_return>on</hard_return>
+ <sequence>9</sequence>
+ </hash_1000138365462951b1f4e6b1a76f20b91be65d>
+ <hash_100013eefc9c0f83c57d6d6f8d75fbd45965a3>
+ <task_item_id>0</task_item_id>
+ <color_id>0</color_id>
+ <graph_type_id>1</graph_type_id>
+ <consolidation_function_id>1</consolidation_function_id>
+ <cdef_id>0</cdef_id>
+ <value></value>
+ <gprint_id>hash_060013e9c43831e54eca8069317a2ce8c6f751</gprint_id>
+ <text_format>Graph Last Updated: |date_time|</text_format>
+ <hard_return>on</hard_return>
+ <sequence>10</sequence>
+ </hash_100013eefc9c0f83c57d6d6f8d75fbd45965a3>
+ </items>
+ <inputs>
+ <hash_090013384e7b730bb32653e3fbce5ce509977d>
+ <name>Data Source [BytesOut]</name>
+ <description></description>
+ <column_name>task_item_id</column_name>
+ <items>hash_000013d2d059378b521327426b451324bbb608|hash_0000137d82a7f3c82c698fe4e9cecc03d680b1|hash_0000132eef0fae129ef21ad2d73e5e80814a23|hash_0000138365462951b1f4e6b1a76f20b91be65d</items>
+ </hash_090013384e7b730bb32653e3fbce5ce509977d>
+ <hash_090013e5ff60e3069b2d28d905f6affa63250e>
+ <name>Data Source [BytesIn]</name>
+ <description></description>
+ <column_name>task_item_id</column_name>
+ <items>hash_00001353cff0cd64c4d70574ef9da42f62c86a|hash_000013d5c13ff711cbd645e9f88697b2c5e61b|hash_0000136788d44f6207ce323ad40ccc8f15d462|hash_000013d4cb02a8fb7fa37ef1e37d8b78333ea3</items>
+ </hash_090013e5ff60e3069b2d28d905f6affa63250e>
+ </inputs>
+ </hash_000013d0fe9e9efc2746de488fdede0419b051>
+ <hash_0100139f985697a7530256b4e35c95ef03db20>
+ <name>HAProxy Frontend Session Stats</name>
+ <ds>
+ <t_name></t_name>
+ <name>|host_description| - HAProxy - |query_fePxName| Frontend Session Stats</name>
+ <data_input_id>hash_030013bf566c869ac6443b0c75d1c32b5a350e</data_input_id>
+ <t_rra_id></t_rra_id>
+ <t_rrd_step></t_rrd_step>
+ <rrd_step>300</rrd_step>
+ <t_active></t_active>
+ <active>on</active>
+ <rra_items>hash_150013c21df5178e5c955013591239eb0afd46|hash_1500130d9c0af8b8acdc7807943937b3208e29|hash_1500136fc2d038fb42950138b0ce3e9874cc60|hash_150013e36f3adb9f152adfa5dc50fd2b23337e|hash_1500139b529013942d5a6891d05a84d17175e0|hash_150013e73fb797d3ab2a9b97c3ec29e9690910</rra_items>
+ </ds>
+ <items>
+ <hash_080013c137bec94d7220e65a5b3dfa4049c242>
+ <t_data_source_name></t_data_source_name>
+ <data_source_name>RequestErrors</data_source_name>
+ <t_rrd_minimum></t_rrd_minimum>
+ <rrd_minimum>0</rrd_minimum>
+ <t_rrd_maximum></t_rrd_maximum>
+ <rrd_maximum>10000</rrd_maximum>
+ <t_data_source_type_id></t_data_source_type_id>
+ <data_source_type_id>2</data_source_type_id>
+ <t_rrd_heartbeat></t_rrd_heartbeat>
+ <rrd_heartbeat>600</rrd_heartbeat>
+ <t_data_input_field_id></t_data_input_field_id>
+ <data_input_field_id>0</data_input_field_id>
+ </hash_080013c137bec94d7220e65a5b3dfa4049c242>
+ <hash_080013f9c76e05d0a87b2d32f9a5b014e17aab>
+ <t_data_source_name></t_data_source_name>
+ <data_source_name>TotalSessions</data_source_name>
+ <t_rrd_minimum></t_rrd_minimum>
+ <rrd_minimum>0</rrd_minimum>
+ <t_rrd_maximum></t_rrd_maximum>
+ <rrd_maximum>10000</rrd_maximum>
+ <t_data_source_type_id></t_data_source_type_id>
+ <data_source_type_id>2</data_source_type_id>
+ <t_rrd_heartbeat></t_rrd_heartbeat>
+ <rrd_heartbeat>600</rrd_heartbeat>
+ <t_data_input_field_id></t_data_input_field_id>
+ <data_input_field_id>0</data_input_field_id>
+ </hash_080013f9c76e05d0a87b2d32f9a5b014e17aab>
+ </items>
+ <data>
+ <item_000>
+ <data_input_field_id>hash_070013c1f36ee60c3dc98945556d57f26e475b</data_input_field_id>
+ <t_value></t_value>
+ <value></value>
+ </item_000>
+ <item_001>
+ <data_input_field_id>hash_070013e6deda7be0f391399c5130e7c4a48b28</data_input_field_id>
+ <t_value></t_value>
+ <value></value>
+ </item_001>
+ <item_002>
+ <data_input_field_id>hash_070013cbbe5c1ddfb264a6e5d509ce1c78c95f</data_input_field_id>
+ <t_value></t_value>
+ <value></value>
+ </item_002>
+ <item_003>
+ <data_input_field_id>hash_0700136027a919c7c7731fbe095b6f53ab127b</data_input_field_id>
+ <t_value></t_value>
+ <value></value>
+ </item_003>
+ <item_004>
+ <data_input_field_id>hash_070013b5c23f246559df38662c255f4aa21d6b</data_input_field_id>
+ <t_value></t_value>
+ <value></value>
+ </item_004>
+ <item_005>
+ <data_input_field_id>hash_0700131cc1493a6781af2c478fa4de971531cf</data_input_field_id>
+ <t_value></t_value>
+ <value></value>
+ </item_005>
+ <item_006>
+ <data_input_field_id>hash_070013f4facc5e2ca7ebee621f09bc6d9fc792</data_input_field_id>
+ <t_value></t_value>
+ <value></value>
+ </item_006>
+ <item_007>
+ <data_input_field_id>hash_070013acb449d1451e8a2a655c2c99d31142c7</data_input_field_id>
+ <t_value></t_value>
+ <value></value>
+ </item_007>
+ <item_008>
+ <data_input_field_id>hash_070013617cdc8a230615e59f06f361ef6e7728</data_input_field_id>
+ <t_value></t_value>
+ <value></value>
+ </item_008>
+ </data>
+ </hash_0100139f985697a7530256b4e35c95ef03db20>
+ <hash_010013a88327df77ea19e333ddd96096c34751>
+ <name>HAProxy Frontend Traffic Stats</name>
+ <ds>
+ <t_name></t_name>
+ <name>|host_description| - HAProxy - |query_fePxName| Frontend Traffic Stats</name>
+ <data_input_id>hash_030013bf566c869ac6443b0c75d1c32b5a350e</data_input_id>
+ <t_rra_id></t_rra_id>
+ <t_rrd_step></t_rrd_step>
+ <rrd_step>300</rrd_step>
+ <t_active></t_active>
+ <active>on</active>
+ <rra_items>hash_150013c21df5178e5c955013591239eb0afd46|hash_1500130d9c0af8b8acdc7807943937b3208e29|hash_1500136fc2d038fb42950138b0ce3e9874cc60|hash_150013e36f3adb9f152adfa5dc50fd2b23337e|hash_15001369b0abdb84cea4d93762fd5a5d0c2777|hash_150013e73fb797d3ab2a9b97c3ec29e9690910</rra_items>
+ </ds>
+ <items>
+ <hash_0800137db81cd58fbbbd203af0f55c15c2081a>
+ <t_data_source_name></t_data_source_name>
+ <data_source_name>BytesOut</data_source_name>
+ <t_rrd_minimum></t_rrd_minimum>
+ <rrd_minimum>0</rrd_minimum>
+ <t_rrd_maximum></t_rrd_maximum>
+ <rrd_maximum>10000000000</rrd_maximum>
+ <t_data_source_type_id></t_data_source_type_id>
+ <data_source_type_id>2</data_source_type_id>
+ <t_rrd_heartbeat></t_rrd_heartbeat>
+ <rrd_heartbeat>600</rrd_heartbeat>
+ <t_data_input_field_id></t_data_input_field_id>
+ <data_input_field_id>0</data_input_field_id>
+ </hash_0800137db81cd58fbbbd203af0f55c15c2081a>
+ <hash_08001305772980bb6de1f12223d7ec53e323c4>
+ <t_data_source_name></t_data_source_name>
+ <data_source_name>BytesIn</data_source_name>
+ <t_rrd_minimum></t_rrd_minimum>
+ <rrd_minimum>0</rrd_minimum>
+ <t_rrd_maximum></t_rrd_maximum>
+ <rrd_maximum>10000000000</rrd_maximum>
+ <t_data_source_type_id></t_data_source_type_id>
+ <data_source_type_id>2</data_source_type_id>
+ <t_rrd_heartbeat></t_rrd_heartbeat>
+ <rrd_heartbeat>600</rrd_heartbeat>
+ <t_data_input_field_id></t_data_input_field_id>
+ <data_input_field_id>0</data_input_field_id>
+ </hash_08001305772980bb6de1f12223d7ec53e323c4>
+ </items>
+ <data>
+ <item_000>
+ <data_input_field_id>hash_070013c1f36ee60c3dc98945556d57f26e475b</data_input_field_id>
+ <t_value></t_value>
+ <value></value>
+ </item_000>
+ <item_001>
+ <data_input_field_id>hash_070013e6deda7be0f391399c5130e7c4a48b28</data_input_field_id>
+ <t_value></t_value>
+ <value></value>
+ </item_001>
+ <item_002>
+ <data_input_field_id>hash_070013cbbe5c1ddfb264a6e5d509ce1c78c95f</data_input_field_id>
+ <t_value></t_value>
+ <value></value>
+ </item_002>
+ <item_003>
+ <data_input_field_id>hash_0700136027a919c7c7731fbe095b6f53ab127b</data_input_field_id>
+ <t_value></t_value>
+ <value></value>
+ </item_003>
+ <item_004>
+ <data_input_field_id>hash_070013b5c23f246559df38662c255f4aa21d6b</data_input_field_id>
+ <t_value></t_value>
+ <value></value>
+ </item_004>
+ <item_005>
+ <data_input_field_id>hash_0700131cc1493a6781af2c478fa4de971531cf</data_input_field_id>
+ <t_value></t_value>
+ <value></value>
+ </item_005>
+ <item_006>
+ <data_input_field_id>hash_070013f4facc5e2ca7ebee621f09bc6d9fc792</data_input_field_id>
+ <t_value></t_value>
+ <value></value>
+ </item_006>
+ <item_007>
+ <data_input_field_id>hash_070013acb449d1451e8a2a655c2c99d31142c7</data_input_field_id>
+ <t_value></t_value>
+ <value></value>
+ </item_007>
+ <item_008>
+ <data_input_field_id>hash_070013617cdc8a230615e59f06f361ef6e7728</data_input_field_id>
+ <t_value></t_value>
+ <value></value>
+ </item_008>
+ </data>
+ </hash_010013a88327df77ea19e333ddd96096c34751>
+ <hash_150013c21df5178e5c955013591239eb0afd46>
+ <name>Daily (5 Minute Average)</name>
+ <x_files_factor>0.5</x_files_factor>
+ <steps>1</steps>
+ <rows>600</rows>
+ <timespan>86400</timespan>
+ <cf_items>1|2|3|4</cf_items>
+ </hash_150013c21df5178e5c955013591239eb0afd46>
+ <hash_1500130d9c0af8b8acdc7807943937b3208e29>
+ <name>Weekly (30 Minute Average)</name>
+ <x_files_factor>0.5</x_files_factor>
+ <steps>6</steps>
+ <rows>700</rows>
+ <timespan>604800</timespan>
+ <cf_items>1|2|3|4</cf_items>
+ </hash_1500130d9c0af8b8acdc7807943937b3208e29>
+ <hash_1500136fc2d038fb42950138b0ce3e9874cc60>
+ <name>Monthly (2 Hour Average)</name>
+ <x_files_factor>0.5</x_files_factor>
+ <steps>24</steps>
+ <rows>775</rows>
+ <timespan>2678400</timespan>
+ <cf_items>1|2|3|4</cf_items>
+ </hash_1500136fc2d038fb42950138b0ce3e9874cc60>
+ <hash_150013e36f3adb9f152adfa5dc50fd2b23337e>
+ <name>Yearly (1 Day Average)</name>
+ <x_files_factor>0.5</x_files_factor>
+ <steps>288</steps>
+ <rows>797</rows>
+ <timespan>33053184</timespan>
+ <cf_items>1|2|3|4</cf_items>
+ </hash_150013e36f3adb9f152adfa5dc50fd2b23337e>
+ <hash_1500136399acb234c65ef56054d5a82b23bc20>
+ <name>Three days (5 minutes average)</name>
+ <x_files_factor>0.5</x_files_factor>
+ <steps>6</steps>
+ <rows>700</rows>
+ <timespan>302400</timespan>
+ <cf_items>1|2|3|4</cf_items>
+ </hash_1500136399acb234c65ef56054d5a82b23bc20>
+ <hash_150013e73fb797d3ab2a9b97c3ec29e9690910>
+ <name>Hourly (1 Minute Average)</name>
+ <x_files_factor>0.5</x_files_factor>
+ <steps>1</steps>
+ <rows>500</rows>
+ <timespan>14400</timespan>
+ <cf_items>1|3</cf_items>
+ </hash_150013e73fb797d3ab2a9b97c3ec29e9690910>
+ <hash_060013e9c43831e54eca8069317a2ce8c6f751>
+ <name>Normal</name>
+ <gprint_text>%8.2lf %s</gprint_text>
+ </hash_060013e9c43831e54eca8069317a2ce8c6f751>
+</cacti>
diff --git a/admin/netsnmp-perl/haproxy.pl b/admin/netsnmp-perl/haproxy.pl
new file mode 100644
index 0000000..64684ad
--- /dev/null
+++ b/admin/netsnmp-perl/haproxy.pl
@@ -0,0 +1,249 @@
+#
+# Net-SNMP perl plugin for HAProxy
+# Version 0.30
+#
+# Copyright 2007-2010 Krzysztof Piotr Oledzki <ole@ans.pl>
+#
+# 1. get a variable from "show stat":
+# 1.3.6.1.4.1.29385.106.1.$type.$field.$iid.$sid
+# type: 0->frontend, 1->backend, 2->server, 3->socket
+#
+# 2. get a variable from "show info":
+# 1.3.6.1.4.1.29385.106.2.$req.$varnr
+#
+# TODO:
+# - implement read timeout
+#
+
+use NetSNMP::agent (':all');
+use NetSNMP::ASN qw(:all);
+use IO::Socket::UNIX;
+
+use strict;
+
+my $agent = new NetSNMP::agent('Name' => 'HAProxy');
+my $sa = "/var/run/haproxy.stat";
+
+use constant OID_HAPROXY => '1.3.6.1.4.1.29385.106';
+use constant OID_HAPROXY_STATS => OID_HAPROXY . '.1';
+use constant OID_HAPROXY_INFO => OID_HAPROXY . '.2';
+
+my $oid_stat = new NetSNMP::OID(OID_HAPROXY_STATS);
+my $oid_info = new NetSNMP::OID(OID_HAPROXY_INFO);
+
+use constant STATS_PXNAME => 0;
+use constant STATS_SVNAME => 1;
+use constant STATS_IID => 27;
+use constant STATS_SID => 28;
+use constant STATS_TYPE => 32;
+
+use constant FIELD_INDEX => 10001;
+use constant FIELD_NAME => 10002;
+
+my %info_vars = (
+ 0 => 'Name',
+ 1 => 'Version',
+ 2 => 'Release_date',
+ 3 => 'Nbproc',
+ 4 => 'Process_num',
+ 5 => 'Pid',
+ 6 => 'Uptime',
+ 7 => 'Uptime_sec',
+ 8 => 'Memmax_MB',
+ 9 => 'Ulimit-n',
+ 10 => 'Maxsock',
+ 11 => 'Maxconn',
+ 12 => 'Maxpipes',
+ 13 => 'CurrConns',
+ 14 => 'PipesUsed',
+ 15 => 'PipesFree',
+ 16 => 'Tasks',
+ 17 => 'Run_queue',
+ 18 => 'node',
+ 19 => 'description',
+);
+
+sub find_next_stat_id {
+ my($type, $field, $proxyid, $sid) = @_;
+
+ my $obj = 1 << $type;
+
+ my $np = -1;
+ my $nl = -1;
+
+ my $sock = new IO::Socket::UNIX (Peer => $sa, Type => SOCK_STREAM, Timeout => 1);
+ next if !$sock;
+
+ print $sock "show stat -1 $obj -1\n";
+
+ while(<$sock>) {
+ chomp;
+ my @d = split(',');
+
+ last if !$d[$field] && $field != FIELD_INDEX && $field != FIELD_NAME && /^#/;
+ next if /^#/;
+
+ next if $d[STATS_TYPE] != $type;
+
+ next if ($d[STATS_IID] < $proxyid) || ($d[STATS_IID] == $proxyid && $d[STATS_SID] <= $sid);
+
+ if ($np == -1 || $d[STATS_IID] < $np || ($d[STATS_IID] == $np && $d[STATS_SID] < $nl)) {
+ $np = $d[STATS_IID];
+ $nl = $d[STATS_SID];
+ next;
+ }
+ }
+
+ close($sock);
+
+ return 0 if ($np == -1);
+
+ return "$type.$field.$np.$nl"
+}
+
+sub haproxy_stat {
+ my($handler, $registration_info, $request_info, $requests) = @_;
+
+ for(my $request = $requests; $request; $request = $request->next()) {
+ my $oid = $request->getOID();
+
+ $oid =~ s/$oid_stat//;
+ $oid =~ s/^\.//;
+
+ my $mode = $request_info->getMode();
+
+ my($type, $field, $proxyid, $sid, $or) = split('\.', $oid, 5);
+
+ next if $type > 3 || defined($or);
+
+ if ($mode == MODE_GETNEXT) {
+
+ $type = 0 if !$type;
+ $field = 0 if !$field;
+ $proxyid = 0 if !$proxyid;
+ $sid = 0 if !$sid;
+
+ my $nextid = find_next_stat_id($type, $field, $proxyid, $sid);
+ $nextid = find_next_stat_id($type, $field+1, 0, 0) if !$nextid;
+ $nextid = find_next_stat_id($type+1, 0, 0, 0) if !$nextid;
+
+ if ($nextid) {
+ ($type, $field, $proxyid, $sid) = split('\.', $nextid);
+ $request->setOID(sprintf("%s.%s", OID_HAPROXY_STATS, $nextid));
+ $mode = MODE_GET;
+ }
+ }
+
+ if ($mode == MODE_GET) {
+ next if !defined($proxyid) || !defined($type) || !defined($sid) || !defined($field);
+
+ my $obj = 1 << $type;
+
+ my $sock = new IO::Socket::UNIX (Peer => $sa, Type => SOCK_STREAM, Timeout => 1);
+ next if !$sock;
+
+ print $sock "show stat $proxyid $obj $sid\n";
+
+ while(<$sock>) {
+ chomp;
+ my @data = split(',');
+
+ last if !defined($data[$field]) && $field != FIELD_INDEX && $field != FIELD_NAME;
+
+ if ($proxyid) {
+ next if $data[STATS_IID] ne $proxyid;
+ next if $data[STATS_SID] ne $sid;
+ next if $data[STATS_TYPE] ne $type;
+ }
+
+ if ($field == FIELD_INDEX) {
+ $request->setValue(ASN_OCTET_STR,
+ sprintf("%s.%s", $data[STATS_IID],
+ $data[STATS_SID]));
+ } elsif ($field == FIELD_NAME) {
+ $request->setValue(ASN_OCTET_STR,
+ sprintf("%s/%s", $data[STATS_PXNAME],
+ $data[STATS_SVNAME]));
+ } else {
+ $request->setValue(ASN_OCTET_STR, $data[$field]);
+ }
+
+ close($sock);
+ last;
+ }
+
+ close($sock);
+ next;
+ }
+
+ }
+}
+
+sub haproxy_info {
+ my($handler, $registration_info, $request_info, $requests) = @_;
+
+ for(my $request = $requests; $request; $request = $request->next()) {
+ my $oid = $request->getOID();
+
+ $oid =~ s/$oid_info//;
+ $oid =~ s/^\.//;
+
+ my $mode = $request_info->getMode();
+
+ my($req, $nr, $or) = split('\.', $oid, 3);
+
+ next if $req >= 2 || defined($or);
+
+ if ($mode == MODE_GETNEXT) {
+ $req = 0 if !defined($req);
+ $nr = -1 if !defined($nr);
+
+ if (!defined($info_vars{$nr+1})) {
+ $req++;
+ $nr = -1;
+ }
+
+ next if $req >= 2;
+
+ $request->setOID(sprintf("%s.%s.%s", OID_HAPROXY_INFO, $req, ++$nr));
+ $mode = MODE_GET;
+
+ }
+
+ if ($mode == MODE_GET) {
+
+ next if !defined($req) || !defined($nr);
+
+ if ($req == 0) {
+ next if !defined($info_vars{$nr});
+ $request->setValue(ASN_OCTET_STR, $info_vars{$nr});
+ next;
+ }
+
+ if ($req == 1) {
+ next if !defined($info_vars{$nr});
+
+ my $sock = new IO::Socket::UNIX (Peer => $sa, Type => SOCK_STREAM, Timeout => 1);
+ next if !$sock;
+
+ print $sock "show info\n";
+
+ while(<$sock>) {
+ chomp;
+ my ($key, $val) = /(.*):\s*(.*)/;
+
+ next if $info_vars{$nr} ne $key;
+
+ $request->setValue(ASN_OCTET_STR, $val);
+ last;
+ }
+
+ close($sock);
+ }
+ }
+ }
+}
+
+$agent->register('HAProxy stat', OID_HAPROXY_STATS, \&haproxy_stat);
+$agent->register('HAProxy info', OID_HAPROXY_INFO, \&haproxy_info);
+
diff --git a/admin/netsnmp-perl/haproxy_backend.xml b/admin/netsnmp-perl/haproxy_backend.xml
new file mode 100644
index 0000000..26ce63f
--- /dev/null
+++ b/admin/netsnmp-perl/haproxy_backend.xml
@@ -0,0 +1,83 @@
+<interface>
+ <name>HAProxy - backend</name>
+ <oid_index>.1.3.6.1.4.1.29385.106.1.1.10001</oid_index>
+ <fields>
+ <beIID>
+ <name>Proxy ID</name>
+ <method>get</method>
+ <source>value</source>
+ <direction>input</direction>
+ <oid>.1.3.6.1.4.1.29385.106.1.1.27</oid>
+ </beIID>
+ <beSID>
+ <name>Service ID</name>
+ <method>get</method>
+ <source>value</source>
+ <direction>input</direction>
+ <oid>.1.3.6.1.4.1.29385.106.1.1.28</oid>
+ </beSID>
+ <bePxName>
+ <name>Proxy Name</name>
+ <method>get</method>
+ <source>value</source>
+ <direction>input</direction>
+ <oid>.1.3.6.1.4.1.29385.106.1.1.0</oid>
+ </bePxName>
+ <beSvName>
+ <name>Service Name</name>
+ <method>get</method>
+ <source>value</source>
+ <direction>input</direction>
+ <oid>.1.3.6.1.4.1.29385.106.1.1.1</oid>
+ </beSvName>
+ <beSTot>
+ <name>Total Sessions</name>
+ <method>get</method>
+ <source>value</source>
+ <direction>output</direction>
+ <oid>.1.3.6.1.4.1.29385.106.1.1.7</oid>
+ </beSTot>
+ <beBIn>
+ <name>Bytes In</name>
+ <method>get</method>
+ <source>value</source>
+ <direction>output</direction>
+ <oid>.1.3.6.1.4.1.29385.106.1.1.8</oid>
+ </beBIn>
+ <beBOut>
+ <name>Bytes Out</name>
+ <method>get</method>
+ <source>value</source>
+ <direction>output</direction>
+ <oid>.1.3.6.1.4.1.29385.106.1.1.9</oid>
+ </beBOut>
+ <beEConn>
+ <name>Connection Errors</name>
+ <method>get</method>
+ <source>value</source>
+ <direction>output</direction>
+ <oid>.1.3.6.1.4.1.29385.106.1.1.13</oid>
+ </beEConn>
+ <beEResp>
+ <name>Response Errors</name>
+ <method>get</method>
+ <source>value</source>
+ <direction>output</direction>
+ <oid>.1.3.6.1.4.1.29385.106.1.1.14</oid>
+ </beEResp>
+ <beLBTot>
+ <name>LB Total</name>
+ <method>get</method>
+ <source>value</source>
+ <direction>output</direction>
+ <oid>.1.3.6.1.4.1.29385.106.1.1.30</oid>
+ </beLBTot>
+ <beDReq>
+ <name>Denied Requests</name>
+ <method>get</method>
+ <source>value</source>
+ <direction>output</direction>
+ <oid>.1.3.6.1.4.1.29385.106.1.1.10</oid>
+ </beDReq>
+ </fields>
+</interface>
diff --git a/admin/netsnmp-perl/haproxy_frontend.xml b/admin/netsnmp-perl/haproxy_frontend.xml
new file mode 100644
index 0000000..ade2a77
--- /dev/null
+++ b/admin/netsnmp-perl/haproxy_frontend.xml
@@ -0,0 +1,83 @@
+<interface>
+ <name>HAProxy - frontend</name>
+ <oid_index>.1.3.6.1.4.1.29385.106.1.0.10001</oid_index>
+ <fields>
+ <feIID>
+ <name>Proxy ID</name>
+ <method>get</method>
+ <source>value</source>
+ <direction>input</direction>
+ <oid>.1.3.6.1.4.1.29385.106.1.0.27</oid>
+ </feIID>
+ <feSID>
+ <name>Service ID</name>
+ <method>get</method>
+ <source>value</source>
+ <direction>input</direction>
+ <oid>.1.3.6.1.4.1.29385.106.1.0.28</oid>
+ </feSID>
+ <fePxName>
+ <name>Proxy Name</name>
+ <method>get</method>
+ <source>value</source>
+ <direction>input</direction>
+ <oid>.1.3.6.1.4.1.29385.106.1.0.0</oid>
+ </fePxName>
+ <feSvName>
+ <name>Service Name</name>
+ <method>get</method>
+ <source>value</source>
+ <direction>input</direction>
+ <oid>.1.3.6.1.4.1.29385.106.1.0.1</oid>
+ </feSvName>
+ <feSCur>
+ <name>Current Sessions</name>
+ <method>get</method>
+ <source>value</source>
+ <direction>output</direction>
+ <oid>.1.3.6.1.4.1.29385.106.1.0.4</oid>
+ </feSCur>
+ <feSMax>
+ <name>Maximum Sessions</name>
+ <method>get</method>
+ <source>value</source>
+ <direction>output</direction>
+ <oid>.1.3.6.1.4.1.29385.106.1.0.5</oid>
+ </feSMax>
+ <feSTot>
+ <name>Total Sessions</name>
+ <method>get</method>
+ <source>value</source>
+ <direction>output</direction>
+ <oid>.1.3.6.1.4.1.29385.106.1.0.7</oid>
+ </feSTot>
+ <feEReq>
+ <name>Request Errors</name>
+ <method>get</method>
+ <source>value</source>
+ <direction>output</direction>
+ <oid>.1.3.6.1.4.1.29385.106.1.0.12</oid>
+ </feEReq>
+ <feBIn>
+ <name>Bytes In</name>
+ <method>get</method>
+ <source>value</source>
+ <direction>output</direction>
+ <oid>.1.3.6.1.4.1.29385.106.1.0.8</oid>
+ </feBIn>
+ <feBOut>
+ <name>Bytes Out</name>
+ <method>get</method>
+ <source>value</source>
+ <direction>output</direction>
+ <oid>.1.3.6.1.4.1.29385.106.1.0.9</oid>
+ </feBOut>
+ <feDReq>
+ <name>Denied Requests</name>
+ <method>get</method>
+ <source>value</source>
+ <direction>output</direction>
+ <oid>.1.3.6.1.4.1.29385.106.1.0.10</oid>
+ </feDReq>
+ </fields>
+</interface>
diff --git a/admin/netsnmp-perl/haproxy_socket.xml b/admin/netsnmp-perl/haproxy_socket.xml
new file mode 100644
index 0000000..63ae110
--- /dev/null
+++ b/admin/netsnmp-perl/haproxy_socket.xml
@@ -0,0 +1,90 @@
+<interface>
+ <name>HAProxy - socket</name>
+ <oid_index>.1.3.6.1.4.1.29385.106.1.3.10001</oid_index>
+ <fields>
+ <feseID>
+ <name>Unique Index</name>
+ <method>get</method>
+ <source>value</source>
+ <direction>input</direction>
+ <oid>.1.3.6.1.4.1.29385.106.1.3.10001</oid>
+ </feseID>
+ <feIID>
+ <name>Proxy ID</name>
+ <method>get</method>
+ <source>value</source>
+ <direction>input</direction>
+ <oid>.1.3.6.1.4.1.29385.106.1.3.27</oid>
+ </feIID>
+ <feSID>
+ <name>Service ID</name>
+ <method>get</method>
+ <source>value</source>
+ <direction>input</direction>
+ <oid>.1.3.6.1.4.1.29385.106.1.3.28</oid>
+ </feSID>
+ <fePxName>
+ <name>Proxy Name</name>
+ <method>get</method>
+ <source>value</source>
+ <direction>input</direction>
+ <oid>.1.3.6.1.4.1.29385.106.1.3.0</oid>
+ </fePxName>
+ <feSvName>
+ <name>Service Name</name>
+ <method>get</method>
+ <source>value</source>
+ <direction>input</direction>
+ <oid>.1.3.6.1.4.1.29385.106.1.3.1</oid>
+ </feSvName>
+ <feSCur>
+ <name>Current Sessions</name>
+ <method>get</method>
+ <source>value</source>
+ <direction>output</direction>
+ <oid>.1.3.6.1.4.1.29385.106.1.3.4</oid>
+ </feSCur>
+ <feSMax>
+ <name>Maximum Sessions</name>
+ <method>get</method>
+ <source>value</source>
+ <direction>output</direction>
+ <oid>.1.3.6.1.4.1.29385.106.1.3.5</oid>
+ </feSMax>
+ <feSTot>
+ <name>Total Sessions</name>
+ <method>get</method>
+ <source>value</source>
+ <direction>output</direction>
+ <oid>.1.3.6.1.4.1.29385.106.1.3.7</oid>
+ </feSTot>
+ <feEReq>
+ <name>Request Errors</name>
+ <method>get</method>
+ <source>value</source>
+ <direction>output</direction>
+ <oid>.1.3.6.1.4.1.29385.106.1.3.12</oid>
+ </feEReq>
+ <feBIn>
+ <name>Bytes In</name>
+ <method>get</method>
+ <source>value</source>
+ <direction>output</direction>
+ <oid>.1.3.6.1.4.1.29385.106.1.3.8</oid>
+ </feBIn>
+ <feBOut>
+ <name>Bytes Out</name>
+ <method>get</method>
+ <source>value</source>
+ <direction>output</direction>
+ <oid>.1.3.6.1.4.1.29385.106.1.3.9</oid>
+ </feBOut>
+ <feDReq>
+ <name>Denied Requests</name>
+ <method>get</method>
+ <source>value</source>
+ <direction>output</direction>
+ <oid>.1.3.6.1.4.1.29385.106.1.3.10</oid>
+ </feDReq>
+ </fields>
+</interface>
diff --git a/admin/release-estimator/README.md b/admin/release-estimator/README.md
new file mode 100644
index 0000000..a51f731
--- /dev/null
+++ b/admin/release-estimator/README.md
@@ -0,0 +1,68 @@
+# Release Estimator
+This tool monitors the HAProxy stable branches and calculates a proposed
+release date for the next minor release based on the bug fixes that are in
+the queue.
+
+
+## Requirements
+ - Python 3.x
+ - [lxml](https://lxml.de/installation.html)
+
+
+## Usage
+ release-estimator.py [-h] [--print] [--to-email TO_EMAIL]
+ [--from-email FROM_EMAIL] [--send-mail]
+
+ optional arguments:
+ -h, --help show this help message and exit
+ --print Print email only
+ --to-email TO_EMAIL Send email to <email>
+ --from-email FROM_EMAIL
+ Send email from <email>
+ --send-mail Send email
+
+
+## Examples
+
+
+### Print only:
+ ./release-estimator.py --print
+
+
+### Send email:
+ ./release-estimator.py --send-mail --from-email from@domain.local --to-email to@domain.local
+
+
+## How it works
+For each version we check the age and apply the following logic:
+ - Skip the release if it's:
+ - older than MAX_VERSION_AGE
+ - older than MAX_VERSION_AGE_NONLTS days and an odd numbered release
+ (1.9,2.1,2.3)
+
+ - For all other valid releases we will then collect the number of bug fixes
+ in queue for each of the defined severity levels:
+ - BUG
+ - BUILD
+ - MINOR
+ - MEDIUM
+ - MAJOR
+ - CRITICAL
+
+ We'll then begin calculating the proposed release date based on the last
+ release date plus the first commit date of the first bug fix for the defined
+ severity level.
+
+ By default the proposed release dates use the following padding:
+ (Can be modified in THRESHOLDS)
+ - BUG/BUILD/MINOR - 28 days
+ - MEDIUM - 30 days
+ - MAJOR - 14 days
+ - CRITICAL - 2 days
+
+ After we have a proposed release date we will assign a release urgency
+ to it. As we get closer to the proposed release date the urgency level changes.
+ By default the urgency levels and their times are:
+ - WARNING - proposed date is 7 days or less
+ - NOTICE - proposed date is 21 days or less
+ - INFO - proposed date is longer than the above
diff --git a/admin/release-estimator/release-estimator.py b/admin/release-estimator/release-estimator.py
new file mode 100755
index 0000000..bf005df
--- /dev/null
+++ b/admin/release-estimator/release-estimator.py
@@ -0,0 +1,429 @@
+#!/usr/bin/python3
+#
+# Release estimator for HAProxy
+#
+# A tool that monitors the HAProxy stable branches and calculates a proposed
+# release date for the next minor release based on the bug fixes that are in
+# the queue.
+#
+# Copyright 2020 HAProxy Technologies, Daniel Corbett <dcorbett@haproxy.com>
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version
+# 3 of the License, or (at your option) any later version.
+#
+#
+
+from lxml import html
+import requests
+import traceback
+import smtplib
+import math
+import copy
+import time
+import sys
+import argparse
+from datetime import datetime
+from datetime import timedelta
+from email.mime.text import MIMEText
+
+# Do not report on versions older than
+# MAX_VERSION_AGE.
+MAX_VERSION_AGE = 1095 # days
+
+# Do not report on non-lts releases (odd releases) that
+# are older than MAX_VERSION_AGE_NONLTS
+MAX_VERSION_AGE_NONLTS = 547 # days
+
+# For each severity/issue type, set thresholds
+# count - indicates how many bugs for this issue type should be in the queue
+# time - indicates how many days should be added to the release date
+THRESHOLDS = {
+ 'BUG' :{ 'count' : 1, 'time' : 28},
+ 'BUILD' :{ 'count' : 1, 'time' : 28},
+ 'MINOR' : { 'count' : 1, 'time' : 28},
+ 'MEDIUM' : { 'count' : 1, 'time' : 30},
+ 'MAJOR' : { 'count' : 1, 'time' : 14 },
+ 'CRITICAL' : { 'count' : 1, 'time' : 2 }
+}
+
+# Increase the urgency of a release as estimated time (in days) gets closer.
+RELEASE_URGENCY = { 'WARNING' : 7, 'NOTICE' : 21, 'INFO' : '' }
+
+def search_set(s, f):
+ for t in s:
+ if f in t:
+ return True
+
+def check_for_email(s, parser):
+ if "@" not in s:
+ parser.print_help()
+ sys.exit()
+
+def main():
+ global MAX_VERSION_AGE
+ global MAX_VERSION_AGE_NONLTS
+ global THRESHOLDS
+ global RELEASE_URGENCY
+
+ SEND_MAIL=False
+ VERSIONS = []
+ issues = {}
+ BUGQUEUE = {}
+ BUGS = { "bugs" :[] }
+ email_message = """Hi,
+
+This is a friendly bot that watches fixes pending for the next haproxy-stable release! One such e-mail is sent periodically once patches are waiting in the last maintenance branch, and an ideal release date is computed based on the severity of these fixes and their merge date. Responses to this mail must be sent to the mailing list.
+
+"""
+
+ parser = argparse.ArgumentParser(description='HAProxy Stable Release Estimator')
+ parser.add_argument('--print', action="store_true",
+ help='Print email only')
+ parser.add_argument('--to-email', nargs=1, required=False,
+ help='Send email to <email>')
+ parser.add_argument('--from-email', nargs=1, required=False,
+ help='Send email from <email>')
+ parser.add_argument('--send-mail', action="store_true",
+ help='Send email')
+ args = parser.parse_args()
+
+ if not args.print and not args.send_mail and not args.to_email and not args.from_email:
+ parser.print_help()
+ sys.exit()
+
+ if args.send_mail and (not args.to_email or not args.from_email):
+ parser.print_help()
+ sys.exit()
+
+ if args.to_email:
+ check_for_email(args.to_email[0], parser)
+ TO_EMAIL = args.to_email[0]
+
+ if args.from_email:
+ check_for_email(args.from_email[0], parser)
+ FROM_EMAIL = args.from_email[0]
+
+ if args.send_mail:
+ SEND_MAIL = True
+
+ if SEND_MAIL:
+ try:
+ TO_EMAIL
+ FROM_EMAIL
+ except:
+ parser.print_help()
+ sys.exit()
+
+ #
+ # Let's get the list of the current stable versions
+ #
+
+ page = requests.get('http://www.haproxy.org/bugs/')
+ tree = html.fromstring(page.content)
+
+ for x in (tree.xpath('//th')):
+ if x.xpath('./a/text()'):
+ VERSIONS.append(x.xpath('./a/text()')[0])
+
+
+ #
+ # For each version let's check it's age. We'll apply the following logic:
+ # - Skip the release if it's:
+ # * older than MAX_VERSION_AGE days
+ # * older than MAX_VERSION_AGE_NONLTS days and an odd numbered release (1.9,2.1,2.3)
+ #
+ # For all other valid releases we will then collect the number of bug fixes
+ # in queue for each of the defined severity levels:
+ # - BUG
+ # - BUILD
+ # - MINOR
+ # - MEDIUM
+ # - MAJOR
+ # - CRITICAL
+ #
+ # We'll then begin calculating the proposed release date based on the last
+ # release date plus the first commit date of the first bug fix for the defined
+ # severity level.
+ #
+ # By default the proposed release dates use the following padding:
+ # (Can be modified in THRESHOLDS)
+ # - BUG/BUILD/MINOR - 28 days
+ # - MEDIUM - 30 days
+ # - MAJOR - 14 days
+ # - CRITICAL - 2 days
+ #
+ # After we have a proposed release date we will assign a release urgency
+ # to it. As we get closer to the proposed release date the urgency level changes.
+ # By default the urgency levels and their times are:
+ # - WARNING - proposed date is 7 days or less
+ # - NOTICE - proposed date is 21 days or less
+ # - INFO - proposed date is longer than the above
+ #
+
+ for version in VERSIONS:
+ BUGQUEUE[version] = { "total" : 0, "last": "" }
+ VERSION_THRESHOLDS = copy.deepcopy(THRESHOLDS)
+ print("Collecting information on %s" % (version))
+ page = requests.get('http://www.haproxy.org/bugs/bugs-%s.html' % (version))
+ tree = html.fromstring(page.content)
+
+ issues[version] = {}
+ issues_count = {}
+ release_soon = False
+ num_to_word = {
+ 1 : 'one',
+ 2 : 'two',
+ 3 : 'three',
+ 4 : 'four',
+ 5 : 'five',
+ 6 : 'six',
+ 7 : 'seven',
+ 8 : 'eight',
+ 9 : 'nine',
+ 10 : 'ten',
+ 11 : 'eleven',
+ 12 : 'twelve',
+ 13 : 'thirteen',
+ }
+
+ # parse out the CHANGELOG link
+ CHANGELOG = tree.xpath('//a[contains(@href,"CHANGELOG")]/@href')[0]
+
+ last_version = tree.xpath('//td[contains(text(), "last")]/../td/a/text()')[0]
+ first_version = "%s.0" % (version)
+
+ # Get CHANGELOG for release
+ changelog_page = requests.get(CHANGELOG)
+ try:
+ for l in changelog_page.content.decode('utf-8').split('\n'):
+ # the below is a bit of a hack to parse out valid years in the CHANGELOG
+ if (last_version in l) and ('201' in l or '202' in l or '200' in l) and '/' in l:
+ # set the date in which this version was last released
+ last_release_date = l.split(' ')[0]
+ last_release_datetime = datetime.strptime(last_release_date.strip(), '%Y/%m/%d')
+ BUGQUEUE[version]['last'] = last_release_date
+ break
+ for l in changelog_page.content.decode('utf-8').split('\n'):
+ # the below is a bit of a hack to parse out valid years in the CHANGELOG
+ if (first_version in l) and ('201' in l or '202' in l or '200' in l) and '/' in l:
+ # set the date in which this version was first released
+ first_release_date = l.split(' ')[0]
+ first_release_datetime = datetime.strptime(first_release_date.strip(), '%Y/%m/%d')
+ BUGQUEUE[version]['first'] = first_release_datetime
+ break
+ except:
+ print(traceback.format_exc())
+ last_release_date = False
+
+ # get unix timestamp for today and timestamp of first release date
+ today_ts = datetime.today().timestamp()
+ first_version_ts = BUGQUEUE[version]['first'].timestamp()
+
+ # calculate the age of this version in days and years
+ version_age = math.ceil((today_ts-first_version_ts)/86400)
+ version_age_years = math.ceil(version_age/365)
+
+ # We do not want to monitor versions that are older
+ # than MAX_VERSION_AGE or MAX_VERSION_AGE_NONLTS
+ if version_age >= MAX_VERSION_AGE:
+ print("\t - Version: %s is older than %d days, skipping" % (version, MAX_VERSION_AGE))
+ continue
+
+ if version_age > MAX_VERSION_AGE_NONLTS:
+ if int(version.split('.')[1]) % 2 > 0:
+ print("\t - Version: %s is not LTS and is older than %d days, skipping" % (version, MAX_VERSION_AGE_NONLTS))
+ continue
+
+ # If the release is older than 1 year let's increase the time until
+ # a release is due. <base time threshold> * <version age years>
+ if version_age_years > 1:
+ for k in VERSION_THRESHOLDS.keys():
+ VERSION_THRESHOLDS[k]['time'] *= int(version_age_years)
+
+ # Let's capture the bug table which contains each bug & their severity
+ bug_table = tree.xpath('//th[contains(text(), "Severity")]/ancestor::table[last()]')[0]
+
+ # Loop through bug table and parse out the title of each bug
+ # found within the links and their merge date.
+ # Example is: 2020-10-19 BUG/MINOR: disable dynamic OCSP load with BoringSSL
+ for x in bug_table.xpath('.//a[contains(@href,"commitdiff")]'):
+ # Capture the bug label
+ # Example: BUG/MINOR: disable dynamic OCSP load with BoringSSL
+ issue_tmp = x.xpath('./text()')[0]
+ # Capture the date
+ # Example: 2020-10-19
+ date_tmp = x.xpath('../preceding-sibling::td/text()')[0]
+
+ # Split the bug into a severity
+ if "/" in issue_tmp:
+ bug_type = issue_tmp.split(':')[0].split('/')[1].strip()
+ else:
+ bug_type = issue_tmp.split(':')[0].strip()
+ bug_text = ":".join(issue_tmp.split(':')[1:]).strip()
+ if bug_type not in issues[version].keys():
+ issues[version][bug_type] = set()
+ issues[version][bug_type].add("%s|%s" % (date_tmp, bug_text))
+
+ # Loop through the issue_types (severities) (MINOR, MEDIUM, MAJOR, etc.)
+ # We'll check if the severity has already been accounted for
+ # If not, we'll set the timestamp to the timestamp of the current issue
+ # If so, we'll check if the current bugs timestamp is less than the
+ # previous one. This will help us to determine when we first saw this
+ # severity type as calculations are based on the first time seeing a
+ # severity type. We'll then set the number of issues for each severity.
+ for issue_type in issues[version]:
+ issues_count[issue_type] = {}
+ for k in issues[version][issue_type]:
+ if 'timestamp' not in issues_count[issue_type].keys():
+ issues_count[issue_type]['timestamp'] = int(time.mktime(datetime.strptime(k.split('|')[0], "%Y-%m-%d").timetuple()))
+ else:
+ if issues_count[issue_type]['timestamp'] > int(time.mktime(datetime.strptime(k.split('|')[0], "%Y-%m-%d").timetuple())):
+ issues_count[issue_type]['timestamp'] = int(time.mktime(datetime.strptime(k.split('|')[0], "%Y-%m-%d").timetuple()))
+ issues_count[issue_type]['count'] = len(issues[version][issue_type])
+
+ release_date = None
+ total_count = 0
+
+ # Let's check the count for each severity type and see if they
+ # are greater than our thresholds count. This can be used to
+ # hold off on calculating release estimates until a certain number of
+ # MINOR bugs have accumulated.
+ for issue_type in issues_count.keys():
+ if issues_count[issue_type]['count'] >= VERSION_THRESHOLDS[issue_type]['count']:
+ # If the total number of issues is greater than the threshold
+ # for a severity we'll attempt to set a release date.
+ # We'll use the timestamp from the first time an issue was
+ # seen and add on the number of days specified within the
+ # THRESHOLDS for that issue type. We'll also increment
+ # the total number of issues that have been fixed in this
+ # version across all severities/issue types.
+ total_count += issues_count[issue_type]['count']
+ issue_timestamp_delta = datetime.fromtimestamp(int(issues_count[issue_type]['timestamp'])) + timedelta(days=int(VERSION_THRESHOLDS[issue_type]['time']))
+ if not release_date: release_date = issue_timestamp_delta
+ elif release_date > issue_timestamp_delta: release_date = issue_timestamp_delta
+
+ if release_date: release_soon = True
+ if release_soon:
+ time_until_release = release_date - datetime.now()
+
+ # If a release date has been sent, let's calculate how long
+ # in words until that release. i.e. "less than 2 weeks"
+ if release_soon:
+ for k in sorted(RELEASE_URGENCY.keys()):
+ if not RELEASE_URGENCY[k]:
+ release_urgency_msg = k
+ elif time_until_release.days <= RELEASE_URGENCY[k]:
+ release_urgency_msg = k
+ rounded_week_time = math.ceil(time_until_release.days/7.0)
+ if abs(rounded_week_time) > 1:
+ week_word = 'weeks'
+ else:
+ week_word = 'week'
+ try:
+ # We now have all of the required information for building
+ # the email message.
+ # TODO: Fix alignment
+ email_message = """%s
+ Last release %s was issued on %s. There are currently %d patches in the queue cut down this way:
+""" % (email_message, last_version, last_release_datetime.strftime("%Y-%m-%d"), total_count)
+ for issue_type in sorted(issues_count.keys()):
+ email_message = "%s - %d %s, first one merged on %s\n" % (email_message, issues_count[issue_type]['count'],issue_type,datetime.fromtimestamp(int(issues_count[issue_type]['timestamp'])).strftime("%Y-%m-%d"))
+ email_message = "%s\nThus the computed ideal release date for %s would be %s, " % (email_message, ".".join(last_version.split(".")[:-1])+"."+str(int(last_version.split(".")[-1])+1), release_date.strftime("%Y-%m-%d"))
+ if rounded_week_time < 0:
+ email_message = "%swhich was %s %s ago.\n" % (email_message, num_to_word[abs(rounded_week_time)], week_word)
+ elif rounded_week_time == 0:
+ email_message = "%swhich was within the last week.\n" % (email_message)
+ else:
+ email_message = "%swhich is in %s %s or less.\n" % (email_message, num_to_word[rounded_week_time], week_word)
+ except Exception as err:
+ print(traceback.format_exc())
+ sys.exit()
+ # Total number of bugs fixed in this version
+ # since last release.
+ BUGQUEUE[version]['total'] = total_count
+
+ email_subject = "stable-bot: Bugfixes waiting for a release "
+
+ # Add each version & their number of bugs to the subject
+ for k in sorted(BUGQUEUE.keys(), reverse=True):
+ if BUGQUEUE[k]['total'] > 0:
+ email_subject = "%s %s (%d)," % ( email_subject, k, BUGQUEUE[k]['total'])
+
+ email_subject = email_subject.rstrip(",")
+ email_message = "%s\nThe current list of patches in the queue is:\n" % (email_message)
+ uniq_issues = set()
+
+ # Parse out unique issues across all versions so that we can
+ # print them once with the list of affected versions.
+ for k in BUGQUEUE.keys():
+ for issue_type in sorted(issues[k].keys()):
+ for issue in issues[k][issue_type]:
+ uniq_issues.add("%s|%s" % (issue_type,issue.split('|')[1]))
+
+ # Loop through the unique issues and determine which versions
+ # are affected.
+ for i in uniq_issues:
+ affected_versions = []
+ for k in BUGQUEUE.keys():
+ try:
+ if search_set(issues[k][i.split('|')[0]], i.split('|')[1]):
+ affected_versions.append(k)
+ except Exception as e:
+ pass
+ if affected_versions:
+ affected_versions.sort()
+ try:
+ BUGS["bugs"].append({ "affected_versions" : affected_versions, "bug":i.split('|')[1], "severity":i.split('|')[0] })
+ except:
+ BUGS["bugs"] = [ { "affected_versions" : affected_versions, "bug":i.split('|')[1], "severity":i.split('|')[0] } ]
+
+ BUGS["bugs"] = sorted(BUGS["bugs"], key = lambda i: i['severity'])
+
+ # Add each issue with affected versions to email message
+ # Example:
+ # - 1.8, 2.0, 2.1, 2.2 - MINOR : stats: fix validity of the json schema
+ for bug in BUGS["bugs"]:
+ email_message = "%s - %s %s %s : %s\n" % (email_message, ", ".join(bug["affected_versions"]).ljust(14), "-".rjust(12), bug["severity"].ljust(7), bug["bug"])
+
+ email_message="%s\n-- \nThe haproxy stable-bot is freely provided by HAProxy Technologies to help improve the quality of each HAProxy release. If you have any issue with these emails or if you want to suggest some improvements, please post them on the list so that the solutions suiting the most users can be found.\n" % (email_message)
+
+ # If a message with actual issues exists let's either print it or send out
+ # an email.
+ if "first one merged on" in email_message:
+ if args.print:
+ print(email_subject)
+ print(email_message)
+ if SEND_MAIL:
+ print('Send email to:%s from:%s' % (TO_EMAIL, FROM_EMAIL), end="")
+ msg = MIMEText(email_message)
+ msg['to'] = TO_EMAIL
+ msg['from'] = FROM_EMAIL
+ msg['subject'] = email_subject
+ msg.add_header('reply-to', TO_EMAIL)
+ try:
+ server = smtplib.SMTP('127.0.0.1', timeout=10)
+ server.sendmail(msg['from'], [msg['to']], msg.as_string())
+ print(" - Email sent")
+ except (ConnectionRefusedError, smtplib.SMTPConnectError):
+ print("- Error: SMTP Connection Error")
+ sys.exit()
+ except smtplib.SMTPServerDisconnected:
+ print('- Error: SMTP Server Disconnect (possible timeout)')
+ sys.exit()
+ except (smtplib.SMTPRecipientsRefused, smtplib.SMTPSenderRefused):
+ print('- Error: Recipients or Sender Refused')
+ sys.exit()
+ except (smtplib.SMTPHeloError, smtplib.SMTPAuthenticationError):
+ print('- Error: SMTP rejected HELO or requires Authentication')
+ sys.exit()
+ except:
+ print(traceback.format_exc())
+ sys.exit()
+
+
+if __name__ == "__main__":
+ main()
+
+sys.exit()
diff --git a/admin/selinux/README b/admin/selinux/README
new file mode 100644
index 0000000..7ad924d
--- /dev/null
+++ b/admin/selinux/README
@@ -0,0 +1,18 @@
+This directory includes an selinux policy for haproxy. It assumes
+the following file locations:
+
+ /usr/sbin/haproxy -- binary
+ /etc/haproxy/haproxy\.cfg -- configuration
+ /var/run/haproxy\.pid -- pid-file
+ /var/run/haproxy\.sock(.*) -- stats socket
+ /var/empty/haproxy -- chroot dir
+
+To build and load it on RHEL5 you'll need the "selinux-policy-devel" package,
+and from within this directory run:
+
+ make -f /usr/share/selinux/devel/Makefile
+ sudo semodule -i haproxy.pp
+ restorecon /usr/sbin/haproxy /etc/haproxy/haproxy.cfg /var/run/haproxy.pid /var/run/haproxy.sock*
+
+
+Feedback to Jan-Frode Myklebust <janfrode@tanso.no> is much appreciated,
diff --git a/admin/selinux/haproxy.fc b/admin/selinux/haproxy.fc
new file mode 100644
index 0000000..63a0828
--- /dev/null
+++ b/admin/selinux/haproxy.fc
@@ -0,0 +1,6 @@
+# haproxy labeling policy
+# file: haproxy.fc
+/usr/sbin/haproxy -- gen_context(system_u:object_r:haproxy_exec_t, s0)
+/etc/haproxy/haproxy\.cfg -- gen_context(system_u:object_r:haproxy_conf_t, s0)
+/var/run/haproxy\.pid -- gen_context(system_u:object_r:haproxy_var_run_t, s0)
+/var/run/haproxy\.sock(.*) -- gen_context(system_u:object_r:haproxy_var_run_t, s0)
diff --git a/admin/selinux/haproxy.if b/admin/selinux/haproxy.if
new file mode 100644
index 0000000..236ad38
--- /dev/null
+++ b/admin/selinux/haproxy.if
@@ -0,0 +1,2 @@
+## <summary>selinux policy module for haproxy</summary>
+
diff --git a/admin/selinux/haproxy.te b/admin/selinux/haproxy.te
new file mode 100644
index 0000000..bc124fb
--- /dev/null
+++ b/admin/selinux/haproxy.te
@@ -0,0 +1,66 @@
+policy_module(haproxy,1.0.0)
+
+########################################
+#
+# Declarations
+#
+
+type haproxy_t;
+type haproxy_exec_t;
+type haproxy_port_t;
+init_daemon_domain(haproxy_t, haproxy_exec_t)
+
+type haproxy_var_run_t;
+files_pid_file(haproxy_var_run_t)
+
+type haproxy_conf_t;
+files_config_file(haproxy_conf_t)
+
+########################################
+#
+# Local policy
+#
+
+# Configuration files - read
+allow haproxy_t haproxy_conf_t : dir list_dir_perms;
+allow haproxy_t haproxy_conf_t : file read_file_perms;
+allow haproxy_t haproxy_conf_t : lnk_file read_file_perms;
+
+# PID and socket file - create, read, and write
+files_pid_filetrans(haproxy_t, haproxy_var_run_t, { file sock_file })
+allow haproxy_t haproxy_var_run_t:file manage_file_perms;
+allow haproxy_t haproxy_var_run_t:sock_file { create rename link setattr unlink };
+
+allow haproxy_t self : tcp_socket create_stream_socket_perms;
+allow haproxy_t self: udp_socket create_socket_perms;
+allow haproxy_t self: capability { setgid setuid sys_chroot sys_resource kill };
+allow haproxy_t self: process { setrlimit signal };
+
+
+logging_send_syslog_msg(haproxy_t)
+
+corenet_tcp_bind_all_ports(haproxy_t)
+corenet_tcp_connect_all_ports(haproxy_t)
+corenet_tcp_bind_all_nodes(haproxy_t)
+corenet_tcp_sendrecv_all_ports(haproxy_t)
+corenet_tcp_recvfrom_unlabeled(haproxy_t)
+
+# use shared libraries
+libs_use_ld_so(haproxy_t)
+libs_use_shared_libs(haproxy_t)
+
+# Read /etc/localtime:
+miscfiles_read_localization(haproxy_t)
+# Read /etc/passwd and more.
+files_read_etc_files(haproxy_t)
+
+# RHEL5 specific:
+require {
+ type unlabeled_t;
+ type haproxy_t;
+ class packet send;
+ class packet recv;
+}
+
+allow haproxy_t unlabeled_t:packet { send recv };
+
diff --git a/admin/syntax-highlight/haproxy.vim b/admin/syntax-highlight/haproxy.vim
new file mode 100644
index 0000000..f559df0
--- /dev/null
+++ b/admin/syntax-highlight/haproxy.vim
@@ -0,0 +1,164 @@
+" Vim syntax file
+" Language: HAProxy
+" Maintainer: Bruno Michel <brmichel@free.fr>
+" Last Change: Mar 30, 2007
+" Version: 0.3
+" URL: http://haproxy.1wt.eu/
+" URL: http://vim.sourceforge.net/scripts/script.php?script_id=1845
+
+" It is suggested to add the following line to $HOME/.vimrc :
+" au BufRead,BufNewFile haproxy* set ft=haproxy
+
+" For version 5.x: Clear all syntax items
+" For version 6.x: Quit when a syntax file was already loaded
+if version < 600
+ syntax clear
+elseif exists("b:current_syntax")
+ finish
+endif
+
+if version >= 600
+ setlocal iskeyword=_,-,a-z,A-Z,48-57
+else
+ set iskeyword=_,-,a-z,A-Z,48-57
+endif
+
+
+" Escaped chars
+syn match hapEscape +\\\(\\\| \|n\|r\|t\|#\|x\x\x\)+
+
+" Comments
+syn match hapComment /#.*$/ contains=hapTodo
+syn keyword hapTodo contained TODO FIXME XXX
+syn case ignore
+
+" Sections
+syn match hapSection /^\s*\(global\|defaults\)/
+syn match hapSection /^\s*\(listen\|frontend\|backend\|ruleset\)/ skipwhite nextgroup=hapSectLabel
+syn match hapSectLabel /\S\+/ skipwhite nextgroup=hapIp1 contained
+syn match hapIp1 /\(\d\{1,3}\.\d\{1,3}\.\d\{1,3}\.\d\{1,3}\)\?:\d\{1,5}/ nextgroup=hapIp2 contained
+syn match hapIp2 /,\(\d\{1,3}\.\d\{1,3}\.\d\{1,3}\.\d\{1,3}\)\?:\d\{1,5}/hs=s+1 nextgroup=hapIp2 contained
+
+" Parameters
+syn keyword hapParam chroot cliexp
+syn keyword hapParam daemon debug disabled
+syn keyword hapParam enabled
+syn keyword hapParam fullconn
+syn keyword hapParam gid group
+syn keyword hapParam maxconn monitor-uri
+syn keyword hapParam noepoll nopoll
+syn keyword hapParam pidfile
+syn keyword hapParam quiet
+syn keyword hapParam redispatch retries
+syn keyword hapParam reqallow reqdel reqdeny reqpass reqtarpit skipwhite nextgroup=hapRegexp
+syn keyword hapParam reqiallow reqidel reqideny reqipass reqitarpit skipwhite nextgroup=hapRegexp
+syn keyword hapParam rspdel rspdeny skipwhite nextgroup=hapRegexp
+syn keyword hapParam rspidel rspideny skipwhite nextgroup=hapRegexp
+syn keyword hapParam reqsetbe reqisetbe skipwhite nextgroup=hapRegexp2
+syn keyword hapParam reqadd reqiadd rspadd rspiadd
+syn keyword hapParam server source srvexp
+syn keyword hapParam uid ulimit-n user
+syn keyword hapParam reqrep reqirep rsprep rspirep skipwhite nextgroup=hapRegexp
+syn keyword hapParam errorloc errorloc302 errorloc303 skipwhite nextgroup=hapStatus
+syn keyword hapParam default_backend skipwhite nextgroup=hapSectLabel
+syn keyword hapParam appsession skipwhite nextgroup=hapAppSess
+syn keyword hapParam bind skipwhite nextgroup=hapIp1
+syn keyword hapParam balance skipwhite nextgroup=hapBalance
+syn keyword hapParam cookie skipwhite nextgroup=hapCookieNam
+syn keyword hapParam capture skipwhite nextgroup=hapCapture
+syn keyword hapParam dispatch skipwhite nextgroup=hapIpPort
+syn keyword hapParam source skipwhite nextgroup=hapIpPort
+syn keyword hapParam mode skipwhite nextgroup=hapMode
+syn keyword hapParam monitor-net skipwhite nextgroup=hapIPv4Mask
+syn keyword hapParam option skipwhite nextgroup=hapOption
+syn keyword hapParam stats skipwhite nextgroup=hapStats
+syn keyword hapParam server skipwhite nextgroup=hapServerN
+syn keyword hapParam source skipwhite nextgroup=hapServerEOL
+syn keyword hapParam log skipwhite nextgroup=hapGLog,hapLogIp
+
+" Options and additional parameters
+syn keyword hapAppSess contained len timeout
+syn keyword hapBalance contained roundrobin source
+syn keyword hapLen contained len
+syn keyword hapGLog contained global
+syn keyword hapMode contained http tcp health
+syn keyword hapOption contained abortonclose allbackups checkcache clitcpka dontlognull forwardfor
+syn keyword hapOption contained httpchk httpclose httplog keepalive logasap persist srvtcpka ssl-hello-chk
+syn keyword hapOption contained tcplog tcpka tcpsplice
+syn keyword hapOption contained except skipwhite nextgroup=hapIPv4Mask
+syn keyword hapStats contained uri realm auth scope enable
+syn keyword hapLogFac contained kern user mail daemon auth syslog lpr news nextgroup=hapLogLvl skipwhite
+syn keyword hapLogFac contained uucp cron auth2 ftp ntp audit alert cron2 nextgroup=hapLogLvl skipwhite
+syn keyword hapLogFac contained local0 local1 local2 local3 local4 local5 local6 local7 nextgroup=hapLogLvl skipwhite
+syn keyword hapLogLvl contained emerg alert crit err warning notice info debug
+syn keyword hapCookieKey contained rewrite insert nocache postonly indirect prefix nextgroup=hapCookieKey skipwhite
+syn keyword hapCapture contained cookie nextgroup=hapNameLen skipwhite
+syn keyword hapCapture contained request response nextgroup=hapHeader skipwhite
+syn keyword hapHeader contained header nextgroup=hapNameLen skipwhite
+syn keyword hapSrvKey contained backup cookie check inter rise fall port source minconn maxconn weight usesrc
+syn match hapStatus contained /\d\{3}/
+syn match hapIPv4Mask contained /\d\{1,3}\.\d\{1,3}\.\d\{1,3}\.\d\{1,3}\(\/\d\{1,2}\)\?/
+syn match hapLogIp contained /\d\{1,3}\.\d\{1,3}\.\d\{1,3}\.\d\{1,3}/ nextgroup=hapLogFac skipwhite
+syn match hapIpPort contained /\d\{1,3}\.\d\{1,3}\.\d\{1,3}\.\d\{1,3}:\d\{1,5}/
+syn match hapServerAd contained /\d\{1,3}\.\d\{1,3}\.\d\{1,3}\.\d\{1,3}\(:[+-]\?\d\{1,5}\)\?/ nextgroup=hapSrvEOL skipwhite
+syn match hapNameLen contained /\S\+/ nextgroup=hapLen skipwhite
+syn match hapCookieNam contained /\S\+/ nextgroup=hapCookieKey skipwhite
+syn match hapServerN contained /\S\+/ nextgroup=hapServerAd skipwhite
+syn region hapSrvEOL contained start=/\S/ end=/$/ contains=hapSrvKey
+syn region hapRegexp contained start=/\S/ end=/\(\s\|$\)/ skip=/\\ / nextgroup=hapRegRepl skipwhite
+syn region hapRegRepl contained start=/\S/ end=/$/ contains=hapComment,hapEscape,hapBackRef
+syn region hapRegexp2 contained start=/\S/ end=/\(\s\|$\)/ skip=/\\ / nextgroup=hapSectLabel skipwhite
+syn match hapBackref contained /\\\d/
+
+
+" Transparent is a Vim keyword, so we need a regexp to match it
+syn match hapParam +transparent+
+syn match hapOption +transparent+ contained
+
+
+" Define the default highlighting.
+" For version 5.7 and earlier: only when not done already
+" For version 5.8 and later: only when an item doesn't have highlighting yet
+if version < 508
+ command -nargs=+ HiLink hi link <args>
+else
+ command -nargs=+ HiLink hi def link <args>
+endif
+
+HiLink hapEscape SpecialChar
+HiLink hapBackRef Special
+HiLink hapComment Comment
+HiLink hapTodo Todo
+HiLink hapSection Constant
+HiLink hapSectLabel Identifier
+HiLink hapParam Keyword
+
+HiLink hapRegexp String
+HiLink hapRegexp2 hapRegexp
+HiLink hapIp1 Number
+HiLink hapIp2 hapIp1
+HiLink hapLogIp hapIp1
+HiLink hapIpPort hapIp1
+HiLink hapIPv4Mask hapIp1
+HiLink hapServerAd hapIp1
+HiLink hapStatus Number
+
+HiLink hapOption Operator
+HiLink hapAppSess hapOption
+HiLink hapBalance hapOption
+HiLink hapCapture hapOption
+HiLink hapCookieKey hapOption
+HiLink hapHeader hapOption
+HiLink hapGLog hapOption
+HiLink hapLogFac hapOption
+HiLink hapLogLvl hapOption
+HiLink hapMode hapOption
+HiLink hapStats hapOption
+HiLink hapLen hapOption
+HiLink hapSrvKey hapOption
+
+
+delcommand HiLink
+
+let b:current_syntax = "haproxy"
+" vim: ts=8
diff --git a/admin/systemd/Makefile b/admin/systemd/Makefile
new file mode 100644
index 0000000..e542c23
--- /dev/null
+++ b/admin/systemd/Makefile
@@ -0,0 +1,8 @@
+PREFIX = /usr/local
+SBINDIR = $(PREFIX)/sbin
+
+haproxy.service: haproxy.service.in
+ sed -e 's:@SBINDIR@:'$(strip $(SBINDIR))':' $< > $@
+
+clean:
+ rm -f haproxy.service
diff --git a/admin/systemd/haproxy.service.in b/admin/systemd/haproxy.service.in
new file mode 100644
index 0000000..22a53d8
--- /dev/null
+++ b/admin/systemd/haproxy.service.in
@@ -0,0 +1,37 @@
+[Unit]
+Description=HAProxy Load Balancer
+After=network-online.target
+Wants=network-online.target
+
+[Service]
+EnvironmentFile=-/etc/default/haproxy
+EnvironmentFile=-/etc/sysconfig/haproxy
+Environment="CONFIG=/etc/haproxy/haproxy.cfg" "PIDFILE=/run/haproxy.pid" "EXTRAOPTS=-S /run/haproxy-master.sock"
+ExecStart=@SBINDIR@/haproxy -Ws -f $CONFIG -p $PIDFILE $EXTRAOPTS
+ExecReload=@SBINDIR@/haproxy -Ws -f $CONFIG -c $EXTRAOPTS
+ExecReload=/bin/kill -USR2 $MAINPID
+KillMode=mixed
+Restart=always
+SuccessExitStatus=143
+Type=notify
+
+# The following lines leverage SystemD's sandboxing options to provide
+# defense in depth protection at the expense of restricting some flexibility
+# in your setup (e.g. placement of your configuration files) or possibly
+# reduced performance. See systemd.service(5) and systemd.exec(5) for further
+# information.
+
+# NoNewPrivileges=true
+# ProtectHome=true
+# If you want to use 'ProtectSystem=strict' you should whitelist the PIDFILE,
+# any state files and any other files written using 'ReadWritePaths' or
+# 'RuntimeDirectory'.
+# ProtectSystem=true
+# ProtectKernelTunables=true
+# ProtectKernelModules=true
+# ProtectControlGroups=true
+# If your SystemD version supports them, you can add: @reboot, @swap, @sync
+# SystemCallFilter=~@cpu-emulation @keyring @module @obsolete @raw-io
+
+[Install]
+WantedBy=multi-user.target
diff --git a/admin/wireshark-dissectors/peers/Makefile b/admin/wireshark-dissectors/peers/Makefile
new file mode 100644
index 0000000..95149e7
--- /dev/null
+++ b/admin/wireshark-dissectors/peers/Makefile
@@ -0,0 +1,17 @@
+CFLAGS = `pkg-config --cflags wireshark` -g -fPIC $(OPTS)
+LDFLAGS = `pkg-config --libs wireshark`
+
+NAME = packet-happp.so
+OBJS = packet-happp.o
+
+plugins=$(HOME)/.wireshark/plugins/
+
+$(NAME): $(OBJS)
+ $(CC) -shared $(LDFLAGS) $(OBJS) -o $@
+
+install: $(NAME)
+ install -d $(DESTDIR)$(plugins)
+ install -m 0755 $(NAME) $(DESTDIR)$(plugins)
+
+clean:
+ rm $(NAME) $(OBJS)
diff --git a/admin/wireshark-dissectors/peers/README b/admin/wireshark-dissectors/peers/README
new file mode 100644
index 0000000..95a3603
--- /dev/null
+++ b/admin/wireshark-dissectors/peers/README
@@ -0,0 +1,78 @@
+------------------------------------------------------------------------
+How to build wireshark with HAProxy Peers Protocol dissection support.
+------------------------------------------------------------------------
+
+Please note that at this time, HAProxy Peers Protocol dissection is not supported
+on Windows systems (could not be tested).
+
+1) File list
+-------------
+ - packet-happp.c: source code for HAProxy Peers Protocol (HAPPP) dissection
+ support.
+ - wireshark.happp.dissector.patch: a patch file for wireshark sources to enable HAPPP
+ dissection support. Note that this patch file modifies only two files:
+ (epan/dissectors/CMakeLists.txt and epan/dissectors/Makefile.am) to add
+ packet-happp.c file DISSECTOR_SRC variable which list all wireshark
+ - README: this file.
+
+2a) To build wireshark with HAPPP dissection support
+---------------------------------------------------
+ - Download wireshark sources:
+ $ git clone https://code.wireshark.org/review/wireshark
+ - copy packet-happp.c file to epan/dissectors/ directory.
+ - apply wireshark.happp.dissector.patch file to wireshark source directory.
+ - build wireshark (see https://www.wireshark.org/docs/wsdg_html_chunked/):
+ $ ./autogen.sh
+ $ ./configure
+ $ make
+
+2b) Alternative: build the HAPPP dissector as a wireshark plugin
+-----------------------------------------------------------------
+If you don't want to build completely wireshark, you can build the dissector as
+a plugin.
+
+You will need the development package of your distribution, which is
+"libwireshark-dev" for debian based distribution and "wireshark-dev" for
+redhat-based ones.
+
+$ make
+
+It is possible that depending on your distribution the compilation may fail
+with such an error:
+
+ packet-happp.c:40:10: fatal error: ws_version.h: No such file or directory
+ #include <ws_version.h>
+
+In this case try to build this plugins with this OPTS variable:
+
+$ OPTS=-DWITHOUT_WS_VERSION make
+
+To install it in your home directory:
+
+$ make install
+
+The plugin will be installed in ~/.wireshark/plugins/ by default, but you can
+change this path by setting the "plugins" variable. If it didn't work, check
+the paths in "Help > About Wireshark > Folders > Personal Plugins" which should
+give you the right path to use.
+
+In some distribution it will be in ~/.local/lib/wireshark/ so you will need to
+install it this way:
+
+$ make install plugins=~/.local/lib/wireshark/plugins/3.2/epan/
+
+If you want to install it in the system directory you can do it this way, the
+right path is also in the Folder window. Change the plugins variable this way:
+
+$ sudo make install plugins=/usr/lib64/wireshark/plugins/3.2/epan/
+
+Be careful to use the right version number in the path.
+
+3) Check if you have the dissector in wireshark
+-----------------------------------------------
+To verify if the protocol was well loaded by your wireshark, open the Supported
+Protocols window in "View > Internals > Supported Protocols" and look for
+"HAPPP".
+
+In the case of a plugin, you should see your plugin loaded in "Help > About
+Wireshark > Plugins".
diff --git a/admin/wireshark-dissectors/peers/packet-happp.c b/admin/wireshark-dissectors/peers/packet-happp.c
new file mode 100644
index 0000000..581262f
--- /dev/null
+++ b/admin/wireshark-dissectors/peers/packet-happp.c
@@ -0,0 +1,1679 @@
+/* packet-happp.c
+ * Routines for HAProxy Peers Protocol (HAPPP) dissection
+ * Copyright 2016, Frédéric Lécaille <flecaille@haproxy.com>
+ *
+ * Wireshark - Network traffic analyzer
+ * By Gerald Combs <gerald@wireshark.org>
+ * Copyright 1998 Gerald Combs
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <stdio.h>
+#include <inttypes.h>
+#include <inttypes.h>
+#include <arpa/inet.h>
+
+#include <config.h>
+
+#include <epan/to_str.h>
+#include <epan/packet.h>
+#include <epan/prefs.h>
+#include <epan/conversation.h>
+#include <epan/strutil.h>
+#include <epan/dissectors/packet-tcp.h>
+#include <epan/tvbuff.h>
+
+#ifndef WITHOUT_WS_VERSION
+#include <ws_version.h>
+#endif
+
+#ifndef WIRESHARK_VERSION_MAJOR
+#define WIRESHARK_VERSION_MAJOR VERSION_MAJOR
+#endif
+#ifndef WIRESHARK_VERSION_MINOR
+#define WIRESHARK_VERSION_MINOR VERSION_MINOR
+#endif
+#ifndef WIRESHARK_VERSION_MICRO
+#define WIRESHARK_VERSION_MICRO VERSION_MICRO
+#endif
+
+#define HAPP_STR(str) #str
+#define HAPP_XSTR(str) HAPP_STR(str)
+
+WS_DLL_PUBLIC_DEF const gchar plugin_version[] = "0.0.1";
+WS_DLL_PUBLIC_DEF const gchar plugin_release[] = HAPP_XSTR(WIRESHARK_VERSION_MAJOR.WIRESHARK_VERSION_MINOR);
+WS_DLL_PUBLIC_DEF const int plugin_want_major = WIRESHARK_VERSION_MAJOR;
+WS_DLL_PUBLIC_DEF const int plugin_want_minor = WIRESHARK_VERSION_MINOR;
+WS_DLL_PUBLIC void plugin_register(void);
+
+
+#define HAPPP_PROTOCOL "HAProxyS"
+#define HAPPP_MSG_MIN_LEN 2
+
+/* Status messages are the shortest ones (3 digits followed by a LF character) */
+#define STATUS_HANDSHAKE_SUCCEEDED "200"
+#define STATUS_TRY_AGAIN_LATER "300"
+#define STATUS_PROTOCOL_ERROR "501"
+#define STATUS_BAD_VERSION "502"
+#define STATUS_LOCAL_PEER_NAME_MISMATCH "503"
+#define STATUS_REMOTE_PEER_NAME_MISMATCH "504"
+
+#include <stdio.h>
+#include <ctype.h>
+#include <stdarg.h>
+
+
+#ifdef DEBUG
+static unsigned char dbg_buf[16 << 10];
+
+__attribute__((format (printf, 3, 4)))
+void hexdump(const unsigned char *buf, size_t buflen, const char *title_fmt, ...)
+{
+ size_t i;
+ va_list ap;
+ const unsigned char *p;
+ char str_buf[2 + 1 + 16 + 1 + 1];
+
+ va_start(ap, title_fmt);
+ vfprintf(stderr, title_fmt, ap);
+ va_end(ap);
+
+ p = buf;
+ str_buf[0] = str_buf[1] = ' ';
+ str_buf[2] = '|';
+
+ for (i = 0; i < buflen; i++) {
+ if (!(i & 0xf))
+ fprintf(stderr, "%08X: ", i);
+ fprintf(stderr, " %02x", *p);
+ if (isalnum(*p))
+ str_buf[(i & 0xf) + 3] = *p;
+ else
+ str_buf[(i & 0xf) + 3] = '.';
+ if ((i & 0xf) == 0xf || i == buflen -1) {
+ size_t k;
+
+ for (k = 0; k < (0x10 - (i & 0xf) - 1); k++)
+ fprintf(stderr, " ");
+ str_buf[(i & 0xf) + 4] = '|';
+ str_buf[(i & 0xf) + 5 ] = '\0';
+ fprintf(stderr, "%s\n", str_buf);
+ }
+ p++;
+ }
+}
+
+void hexdump_tvb(tvbuff_t *tvb, const gint offset, size_t len)
+{
+ len = len > sizeof dbg_buf ? sizeof dbg_buf : len;
+ if (tvb_memcpy(tvb, dbg_buf, offset, len)) {
+ hexdump(dbg_buf, len, "tvb buff (%zu bytes):\n", len);
+ } else
+ fprintf(stderr, "tvb buff COPY FAILED\n");
+}
+#endif
+
+/* HAPPP message classes */
+enum {
+ PEER_MSG_CLASS_CONTROL = 0,
+ PEER_MSG_CLASS_ERROR,
+ PEER_MSG_CLASS_STICKTABLE = 0x0a,
+ PEER_MSG_CLASS_RESERVED = 0xff,
+};
+
+enum {
+ CONTROL_CLASS_INDEX,
+ ERROR_CLASS_INDEX,
+ STICK_TABLE_CLASS_INDEX,
+ RESERVED_CLASS_INDEX,
+};
+
+/* Control messages */
+enum {
+ PEER_MSG_CTRL_RESYNCREQ = 0,
+ PEER_MSG_CTRL_RESYNCFINISHED,
+ PEER_MSG_CTRL_RESYNCPARTIAL,
+ PEER_MSG_CTRL_RESYNCCONFIRM,
+ PEER_MSG_CTRL_HEARTBEAT,
+};
+
+/* Error messages */
+enum {
+ PEER_MSG_ERR_PROTOCOL = 0,
+ PEER_MSG_ERR_SIZELIMIT,
+};
+
+/* Stick table messages */
+enum {
+ PEER_MSG_STKT_UPDATE = 0x80,
+ PEER_MSG_STKT_INCUPDATE,
+ PEER_MSG_STKT_DEFINE,
+ PEER_MSG_STKT_SWITCH,
+ PEER_MSG_STKT_ACK,
+ PEER_MSG_STKT_UPDATE_TIMED,
+ PEER_MSG_STKT_INCUPDATE_TIMED,
+};
+
+/* This is the different key types of the stick tables.
+ * Same definitions as in HAProxy sources.
+ */
+enum {
+ SMP_T_ANY, /* any type */
+ SMP_T_BOOL, /* boolean */
+ SMP_T_SINT, /* signed 64bits integer type */
+ SMP_T_ADDR, /* ipv4 or ipv6, only used for input type compatibility */
+ SMP_T_IPV4, /* ipv4 type */
+ SMP_T_IPV6, /* ipv6 type */
+ SMP_T_STR, /* char string type */
+ SMP_T_BIN, /* buffer type */
+ SMP_T_METH, /* contain method */
+ SMP_TYPES /* number of types, must always be last */
+};
+
+/* The types of data we can store in a stick table.
+ * Same definitions as in HAProxy sources.
+ */
+enum {
+ STKT_DT_SERVER_ID, /* the server ID to use with this stream if > 0 */
+ STKT_DT_GPT0, /* General Purpose Flag 0. */
+ STKT_DT_GPC0, /* General Purpose Counter 0 (unsigned 32-bit integer) */
+ STKT_DT_GPC0_RATE, /* General Purpose Counter 0's event rate */
+ STKT_DT_CONN_CNT, /* cumulated number of connections */
+ STKT_DT_CONN_RATE, /* incoming connection rate */
+ STKT_DT_CONN_CUR, /* concurrent number of connections */
+ STKT_DT_SESS_CNT, /* cumulated number of sessions (accepted connections) */
+ STKT_DT_SESS_RATE, /* accepted sessions rate */
+ STKT_DT_HTTP_REQ_CNT, /* cumulated number of incoming HTTP requests */
+ STKT_DT_HTTP_REQ_RATE, /* incoming HTTP request rate */
+ STKT_DT_HTTP_ERR_CNT, /* cumulated number of HTTP requests errors (4xx) */
+ STKT_DT_HTTP_ERR_RATE, /* HTTP request error rate */
+ STKT_DT_BYTES_IN_CNT, /* cumulated bytes count from client to servers */
+ STKT_DT_BYTES_IN_RATE, /* bytes rate from client to servers */
+ STKT_DT_BYTES_OUT_CNT, /* cumulated bytes count from servers to client */
+ STKT_DT_BYTES_OUT_RATE, /* bytes rate from servers to client */
+ STKT_STATIC_DATA_TYPES, /* number of types above */
+};
+
+/* The types of data in stick stored in stick tables.
+ * Same definitions as in HAProxy sources.
+ */
+enum {
+ STD_T_SINT = 0, /* signed int */
+ STD_T_UINT, /* unsigned int */
+ STD_T_ULL, /* unsigned long long */
+ STD_T_FRQP, /* freq_ctr structure made of three unsigned int */
+};
+
+/* Prototypes */
+void proto_reg_handoff_happp(void);
+void proto_register_happp(void);
+
+/* Initialize the protocol and registered fields */
+static int proto_happp = -1;
+static int hf_happp_fake = -1;
+static int hf_happp_version = -1;
+static int hf_happp_remotepeerid = -1;
+static int hf_happp_localpeerid = -1;
+static int hf_happp_processpid = -1;
+static int hf_happp_relativepid = -1;
+static int hf_happp_status = -1;
+static int hf_happp_msg = -1;
+static int hf_happp_msg_class = -1;
+static int hf_happp_msg_type = -1;
+static int hf_happp_msg_len = -1;
+static int hf_happp_stkt_def_id = -1;
+static int hf_happp_stkt_def_name_len = -1;
+static int hf_happp_stkt_def_name_value = -1;
+static int hf_happp_stkt_def_key_type = -1;
+static int hf_happp_stkt_def_key_len = -1;
+static int hf_happp_stkt_def_data_types = -1;
+static int hf_happp_stkt_updt_update_id = -1;
+static int hf_happp_stkt_updt_expire = -1;
+static int hf_happp_stkt_updt_key_len = -1;
+static int hf_happp_stkt_updt_key_ipv4_value = -1;
+static int hf_happp_stkt_updt_key_str_value = -1;
+static int hf_happp_stkt_updt_key_int_value = -1;
+static int hf_happp_stkt_updt_key_bytes_value = -1;
+static int hf_happp_stkt_updt_data_server_id = -1;
+static int hf_happp_stkt_updt_data_gpt0 = -1;
+static int hf_happp_stkt_updt_data_gpc0 = -1;
+static int hf_happp_stkt_updt_data_gpc0_rate_curr_tick = -1;
+static int hf_happp_stkt_updt_data_gpc0_rate_curr_ctr = -1;
+static int hf_happp_stkt_updt_data_gpc0_rate_prev_ctr = -1;
+static int hf_happp_stkt_updt_data_conn_cnt = -1;
+static int hf_happp_stkt_updt_data_conn_rate_curr_tick = -1;
+static int hf_happp_stkt_updt_data_conn_rate_curr_ctr = -1;
+static int hf_happp_stkt_updt_data_conn_rate_prev_ctr = -1;
+static int hf_happp_stkt_updt_data_conn_cur = -1;
+static int hf_happp_stkt_updt_data_sess_cnt = -1;
+static int hf_happp_stkt_updt_data_sess_rate_curr_tick = -1;
+static int hf_happp_stkt_updt_data_sess_rate_curr_ctr = -1;
+static int hf_happp_stkt_updt_data_sess_rate_prev_ctr = -1;
+static int hf_happp_stkt_updt_data_http_req_cnt = -1;
+static int hf_happp_stkt_updt_data_http_req_rate_curr_tick = -1;
+static int hf_happp_stkt_updt_data_http_req_rate_curr_ctr = -1;
+static int hf_happp_stkt_updt_data_http_req_rate_prev_ctr= -1;
+static int hf_happp_stkt_updt_data_http_err_cnt = -1;
+static int hf_happp_stkt_updt_data_http_err_rate_curr_tick = -1;
+static int hf_happp_stkt_updt_data_http_err_rate_curr_ctr = -1;
+static int hf_happp_stkt_updt_data_http_err_rate_prev_ctr = -1;
+static int hf_happp_stkt_updt_data_bytes_in_cnt = -1;
+static int hf_happp_stkt_updt_data_bytes_in_rate_curr_tick = -1;
+static int hf_happp_stkt_updt_data_bytes_in_rate_curr_ctr = -1;
+static int hf_happp_stkt_updt_data_bytes_in_rate_prev_ctr = -1;
+static int hf_happp_stkt_updt_data_bytes_out_cnt = -1;
+static int hf_happp_stkt_updt_data_bytes_out_rate_curr_tick = -1;
+static int hf_happp_stkt_updt_data_bytes_out_rate_curr_ctr = -1;
+static int hf_happp_stkt_updt_data_bytes_out_rate_prev_ctr = -1;
+static int hf_happp_stkt_updt_ack_table_id = -1;
+static int hf_happp_stkt_updt_ack_update_id = -1;
+
+struct happp_cv_data_t {
+ /* Same thing for the type of the the stick table keys */
+ uint64_t stkt_key_type;
+
+ /* Same thing for the length of the stick table keys.
+ * Note that this is true only for key types different of SMT_T_STR (strings)
+ * and SMT_T_SINT (signed ints).
+ */
+ uint64_t stkt_key_len;
+
+ /* Same thing for the types of the stick table data */
+ uint64_t stkt_data_types;
+ void *data;
+};
+
+struct hf_stkt_data_type {
+ const char *name;
+ unsigned int type;
+ int *hf_ids[3];
+ size_t hf_ids_len;
+};
+
+struct hf_stkt_data_type hf_stkt_data_types[] = {
+ [STKT_DT_SERVER_ID] = {
+ .name = "server_id",
+ .type = STD_T_SINT,
+ .hf_ids = {
+ &hf_happp_stkt_updt_data_server_id,
+ },
+ .hf_ids_len = 1,
+ },
+ [STKT_DT_GPT0] = {
+ .name = "gpt0",
+ .type = STD_T_UINT,
+ .hf_ids = {
+ &hf_happp_stkt_updt_data_gpt0,
+ },
+ .hf_ids_len = 1,
+ },
+ [STKT_DT_GPC0] = {
+ .name = "gpc0",
+ .type = STD_T_UINT,
+ .hf_ids = {
+ &hf_happp_stkt_updt_data_gpc0,
+ },
+ .hf_ids_len = 1,
+ },
+ [STKT_DT_GPC0_RATE] = {
+ .name = "gpc0_rate",
+ .type = STD_T_FRQP,
+ .hf_ids = {
+ &hf_happp_stkt_updt_data_gpc0_rate_curr_tick,
+ &hf_happp_stkt_updt_data_gpc0_rate_curr_ctr,
+ &hf_happp_stkt_updt_data_gpc0_rate_prev_ctr,
+ },
+ .hf_ids_len = 3,
+ },
+ [STKT_DT_CONN_CNT] = {
+ .name = "conn_cnt",
+ .type = STD_T_UINT,
+ .hf_ids = {
+ &hf_happp_stkt_updt_data_conn_cnt,
+ },
+ .hf_ids_len = 1,
+ },
+ [STKT_DT_CONN_RATE] = {
+ .name = "conn_rate",
+ .type = STD_T_FRQP,
+ .hf_ids = {
+ &hf_happp_stkt_updt_data_conn_rate_curr_tick,
+ &hf_happp_stkt_updt_data_conn_rate_curr_ctr,
+ &hf_happp_stkt_updt_data_conn_rate_prev_ctr,
+ },
+ .hf_ids_len = 3,
+ },
+ [STKT_DT_CONN_CUR] = {
+ .name = "conn_cur",
+ .type = STD_T_UINT,
+ .hf_ids = {
+ &hf_happp_stkt_updt_data_conn_cur,
+ },
+ .hf_ids_len = 1,
+ },
+ [STKT_DT_SESS_CNT] = {
+ .name = "sess_cnt",
+ .type = STD_T_UINT,
+ .hf_ids = {
+ &hf_happp_stkt_updt_data_sess_cnt,
+ },
+ .hf_ids_len = 1,
+ },
+ [STKT_DT_SESS_RATE] = {
+ .name = "sess_rate",
+ .type = STD_T_FRQP,
+ .hf_ids = {
+ &hf_happp_stkt_updt_data_sess_rate_curr_tick,
+ &hf_happp_stkt_updt_data_sess_rate_curr_ctr,
+ &hf_happp_stkt_updt_data_sess_rate_prev_ctr,
+ },
+ .hf_ids_len = 3,
+ },
+ [STKT_DT_HTTP_REQ_CNT] = {
+ .name = "http_req_cnt",
+ .type = STD_T_UINT,
+ .hf_ids = {
+ &hf_happp_stkt_updt_data_http_req_cnt,
+ },
+ .hf_ids_len = 1,
+ },
+ [STKT_DT_HTTP_REQ_RATE] = {
+ .name = "http_req_rate",
+ .type = STD_T_FRQP,
+ .hf_ids = {
+ &hf_happp_stkt_updt_data_http_req_rate_curr_tick,
+ &hf_happp_stkt_updt_data_http_req_rate_curr_ctr,
+ &hf_happp_stkt_updt_data_http_req_rate_prev_ctr,
+ },
+ .hf_ids_len = 3,
+ },
+ [STKT_DT_HTTP_ERR_CNT] = {
+ .name = "http_err_cnt",
+ .type = STD_T_UINT,
+ .hf_ids = {
+ &hf_happp_stkt_updt_data_http_err_cnt,
+ },
+ .hf_ids_len = 1,
+ },
+ [STKT_DT_HTTP_ERR_RATE] = {
+ .name = "http_err_rate",
+ .type = STD_T_FRQP,
+ .hf_ids = {
+ &hf_happp_stkt_updt_data_http_err_rate_curr_tick,
+ &hf_happp_stkt_updt_data_http_err_rate_curr_ctr,
+ &hf_happp_stkt_updt_data_http_err_rate_prev_ctr,
+ },
+ .hf_ids_len = 3,
+ },
+ [STKT_DT_BYTES_IN_CNT] = {
+ .name = "bytes_in_cnt",
+ .type = STD_T_ULL,
+ .hf_ids = {
+ &hf_happp_stkt_updt_data_bytes_in_cnt,
+ },
+ .hf_ids_len = 1,
+ },
+ [STKT_DT_BYTES_IN_RATE] = {
+ .name = "bytes_in_rate",
+ .type = STD_T_FRQP,
+ .hf_ids = {
+ &hf_happp_stkt_updt_data_bytes_in_rate_curr_tick,
+ &hf_happp_stkt_updt_data_bytes_in_rate_curr_ctr,
+ &hf_happp_stkt_updt_data_bytes_in_rate_prev_ctr,
+ },
+ .hf_ids_len = 3,
+ },
+ [STKT_DT_BYTES_OUT_CNT] = {
+ .name = "bytes_out_cnt",
+ .type = STD_T_ULL,
+ .hf_ids = {
+ &hf_happp_stkt_updt_data_bytes_out_cnt,
+ },
+ .hf_ids_len = 1,
+ },
+ [STKT_DT_BYTES_OUT_RATE] = {
+ .name = "bytes_out_rate",
+ .type = STD_T_FRQP,
+ .hf_ids = {
+ &hf_happp_stkt_updt_data_bytes_out_rate_curr_tick,
+ &hf_happp_stkt_updt_data_bytes_out_rate_curr_ctr,
+ &hf_happp_stkt_updt_data_bytes_out_rate_prev_ctr,
+ },
+ .hf_ids_len = 3,
+ },
+};
+
+
+/* Initialize the subtree pointers */
+static gint ett_happp = -1;
+static gint ett_happp_msg = -1;
+
+static dissector_handle_t happp_tcp_handle;
+
+static const char *control_msg_type_str_from_byte(guint8 c);
+static const char *error_msg_type_str_from_byte(guint8 c);
+static const char *stkt_msg_type_str_from_byte(guint8 c);
+
+struct class_def_t {
+ const char *class_str;
+ const char *col_info_str;
+ const char *(*msg_type_str_func)(guint8 c);
+ unsigned int count;
+};
+
+static struct class_def_t class_def_tab[] = {
+ [CONTROL_CLASS_INDEX] = {
+ .class_str = "Control Class Message",
+ .col_info_str = "Ctl",
+ .msg_type_str_func = control_msg_type_str_from_byte,
+ },
+ [ERROR_CLASS_INDEX] = {
+ .class_str = "Error Class Message",
+ .col_info_str = "Err",
+ .msg_type_str_func = error_msg_type_str_from_byte,
+ },
+ [STICK_TABLE_CLASS_INDEX] = {
+ .class_str = "Stick Table Class Message",
+ .col_info_str = "Stkt",
+ .msg_type_str_func = stkt_msg_type_str_from_byte,
+ },
+ [RESERVED_CLASS_INDEX] = {
+ .class_str = "Reserved Class Message",
+ .col_info_str = "Res",
+ }
+};
+
+static int control_class_index_from_byte(guint8 c)
+{
+ switch (c) {
+ case PEER_MSG_CLASS_CONTROL:
+ return CONTROL_CLASS_INDEX;
+ case PEER_MSG_CLASS_ERROR:
+ return ERROR_CLASS_INDEX;
+ case PEER_MSG_CLASS_STICKTABLE:
+ return STICK_TABLE_CLASS_INDEX;
+ case PEER_MSG_CLASS_RESERVED:
+ return RESERVED_CLASS_INDEX;
+ default:
+ return -1;
+ };
+}
+
+static const char *class_str_from_byte(guint8 c)
+{
+ int class_idx;
+
+ class_idx = control_class_index_from_byte(c);
+ if (class_idx == -1)
+ return "N/A";
+
+ return class_def_tab[class_idx].class_str;
+}
+
+static const char *control_msg_type_str_from_byte(guint8 c)
+{
+ switch (c) {
+ case PEER_MSG_CTRL_RESYNCREQ:
+ return "resync. request";
+ case PEER_MSG_CTRL_RESYNCFINISHED:
+ return "resync. finished";
+ case PEER_MSG_CTRL_RESYNCPARTIAL:
+ return "resync. partial";
+ case PEER_MSG_CTRL_RESYNCCONFIRM:
+ return "resync. confirm";
+ case PEER_MSG_CTRL_HEARTBEAT:
+ return "heartbeat";
+ default:
+ return "Unknown";
+ }
+}
+
+static const char *stkt_msg_type_str_from_byte(guint8 c)
+{
+ switch (c) {
+ case PEER_MSG_STKT_UPDATE:
+ return "update";
+ case PEER_MSG_STKT_INCUPDATE:
+ return "inc. update";
+ case PEER_MSG_STKT_DEFINE:
+ return "definition";
+ case PEER_MSG_STKT_SWITCH:
+ return "switch";
+ case PEER_MSG_STKT_ACK:
+ return "ack";
+ case PEER_MSG_STKT_UPDATE_TIMED:
+ return "update (with expiration)";
+ case PEER_MSG_STKT_INCUPDATE_TIMED:
+ return "inc. update (with expiration)";
+ default:
+ return "Unknown";
+ }
+}
+
+static const char *error_msg_type_str_from_byte(guint8 c)
+{
+ switch (c) {
+ case PEER_MSG_ERR_PROTOCOL:
+ return "protocol error";
+ case PEER_MSG_ERR_SIZELIMIT:
+ return "limit size error";
+ default:
+ return "Unknown";
+ }
+}
+
+#define MAX_ENC_LEN 10
+static uint64_t intdecode(unsigned char **str, size_t len) {
+ int i = 0;
+ uint64_t ret;
+
+ if (len < 1 || len > MAX_ENC_LEN) {
+ *str = NULL;
+ return 0;
+ }
+
+ ret = *(*str)++;
+ len--;
+ if ((ret & 0xf0) != 0xf0 || !len)
+ return ret;
+
+ do {
+ /* As shifting value may be greater than 8 (size of **str in bits),
+ * uint64_t cast is required.
+ */
+ ret += (uint64_t)**str << (4 + 7 * i++);
+ } while (len-- && (*(*str)++ & 0x80) == 0x80);
+
+ return ret;
+}
+
+static int dissect_happp_handshake_pdu(tvbuff_t *tvb, packet_info *pinfo,
+ proto_tree *happp_tree)
+{
+ int line_len, token_len;
+ gint offset = 0, next_offset;
+ const guchar *line, *line_end, *next_token;
+ size_t protocol_strlen;
+
+ line_len = tvb_find_line_end(tvb, offset, -1, &next_offset, TRUE);
+ /* XXX TO DO */
+ if (line_len == -1)
+ return -1;
+
+ protocol_strlen = strlen(HAPPP_PROTOCOL);
+
+ line = tvb_get_ptr(tvb, offset, line_len);
+ line_end = line + (next_offset - offset);
+ /* The line must contain at least HAPPP_PROTOCOL string followed by a space,
+ * then version string (at least one character) and a '\n' character.
+ */
+ if (line_len >= (int)protocol_strlen + 3 &&
+ !tvb_strncaseeql(tvb, 0, HAPPP_PROTOCOL, protocol_strlen)) {
+ /* This is an Hello message */
+ col_set_str(pinfo->cinfo, COL_INFO, "Hello message");
+
+ token_len = get_token_len(line + protocol_strlen + 1, line_end, &next_token);
+ proto_tree_add_item(happp_tree, hf_happp_version, tvb,
+ offset + protocol_strlen + 1, token_len,
+ ENC_ASCII | ENC_NA);
+
+ offset = next_offset;
+ line_len = tvb_find_line_end(tvb, offset, -1, &next_offset, TRUE);
+ /* XXX TO DO */
+ if (line_len == -1)
+ return -1;
+
+ line = tvb_get_ptr(tvb, offset, line_len);
+ line_end = line + (next_offset - offset);
+ /* Get next token: remotepeerid */
+ token_len = get_token_len(line, line_end, &next_token);
+ if (!token_len)
+ return -1;
+
+ proto_tree_add_item(happp_tree, hf_happp_remotepeerid, tvb, offset,
+ token_len, ENC_ASCII | ENC_NA);
+
+ /* Retrieve next line */
+ offset = next_offset;
+ line_len = tvb_find_line_end(tvb, offset, -1, &next_offset, TRUE);
+ /* XXX TO DO */
+ if (line_len == -1)
+ return -1;
+
+ line = tvb_get_ptr(tvb, offset, line_len);
+ line_end = line + (next_offset - offset);
+ /* Get next token: localpeerid */
+ token_len = get_token_len(line, line_end, &next_token);
+ if (!token_len)
+ return -1;
+
+ proto_tree_add_item(happp_tree, hf_happp_localpeerid, tvb, offset,
+ token_len, ENC_ASCII | ENC_NA);
+ offset += next_token - line;
+ line = next_token;
+
+ /* Get next token: processpid */
+ token_len = get_token_len(line, line_end, &next_token);
+ if (!token_len)
+ return -1;
+
+ proto_tree_add_item(happp_tree, hf_happp_processpid, tvb, offset,
+ token_len, ENC_ASCII | ENC_NA);
+ offset += next_token - line;
+ line = next_token;
+
+ /* Get next token: relativepid */
+ token_len = get_token_len(line, line_end, &next_token);
+ if (!token_len)
+ return -1;
+
+ proto_tree_add_item(happp_tree, hf_happp_relativepid, tvb, offset,
+ token_len, ENC_ASCII | ENC_NA);
+ offset += next_token - line;
+ line = next_token;
+
+ }
+ else if (line_len == 3) {
+ col_set_str(pinfo->cinfo, COL_INFO, "Status message");
+ token_len = get_token_len(line, line_end, &next_token);
+ if (!token_len)
+ return -1;
+
+ proto_tree_add_item(happp_tree, hf_happp_status, tvb, offset,
+ token_len, ENC_ASCII | ENC_NA);
+ }
+
+ return tvb_captured_length(tvb);
+}
+
+/* Reset to zero all statistics counters of class_def_array */
+static void init_class_def_tab(struct class_def_t *class_def_array, size_t size)
+{
+ size_t i;
+
+ for (i = 0; i < size; i++)
+ class_def_array[i].count = 0;
+}
+
+/* Add statistics counting information about HAPPP message classes to
+ * info column (numbers of messages found in an HAPPP PDU by class).
+ */
+static inline void col_info_append_class(packet_info *pinfo, int class_index,
+ int *first_class)
+{
+ if (!class_def_tab[class_index].count)
+ return;
+
+ col_append_fstr(pinfo->cinfo, COL_INFO, "%s%s=%u",
+ *first_class ? "" : " ",
+ class_def_tab[class_index].col_info_str,
+ class_def_tab[class_index].count);
+ class_def_tab[class_index].count = 0;
+ *first_class = 0;
+}
+
+
+static int intdecode_from_tvbuff(tvbuff_t *tvb, uint64_t *dec_val,
+ guint *offset, guint total)
+{
+ unsigned char *p, enc_buf[MAX_ENC_LEN];
+ size_t max_enc_buf_len, left;
+
+ left = total - *offset;
+ max_enc_buf_len = left < sizeof enc_buf ? left : sizeof enc_buf;
+ if (!tvb_memcpy(tvb, enc_buf, *offset, max_enc_buf_len))
+ return -1;
+
+ p = enc_buf;
+ *dec_val = intdecode(&p, max_enc_buf_len);
+ if (!p)
+ return -1;
+
+ *offset += p - enc_buf;
+
+ return 0;
+}
+
+static int add_enc_field_to_happp_tree(int field_id, proto_tree *tree, tvbuff_t *tvb,
+ guint *offset, guint total, uint64_t *val)
+{
+ uint64_t dec_val;
+ size_t dec_val_len;
+ guint saved_offset;
+
+ saved_offset = *offset;
+ if (intdecode_from_tvbuff(tvb, &dec_val, offset, total) < 0)
+ return -1;
+
+ dec_val_len = *offset - saved_offset;
+ proto_tree_add_uint64_format_value(tree, field_id, tvb, saved_offset,
+ dec_val_len, dec_val, "%" PRIu64, dec_val);
+
+ if (val)
+ *val = dec_val;
+
+ return 0;
+}
+
+static int add_int_field_to_happp_tree(int field_id,
+ tvbuff_t *tvb, proto_tree *tree,
+ guint *offset, guint total _U_)
+{
+ uint32_t val;
+
+ if (!tvb_memcpy(tvb, &val, *offset, sizeof val))
+ return -1;
+
+ val = ntohl(val);
+ proto_tree_add_int_format_value(tree, field_id, tvb, *offset,
+ sizeof val, val, "%" PRId32, val);
+ *offset += sizeof val;
+
+ return 0;
+}
+
+static void dissect_happp_stkt_define_msg(tvbuff_t *tvb, packet_info *pinfo _U_,
+ proto_tree *tree, guint offset, guint total)
+{
+ uint64_t dec_val;
+ uint64_t stkt_key_type;
+ uint64_t stkt_key_len;
+ uint64_t stkt_data_types;
+ struct happp_cv_data_t *happp_cv_data;
+ conversation_t *cv;
+
+ if (add_enc_field_to_happp_tree(hf_happp_stkt_def_id, tree,
+ tvb, &offset, total, NULL) < 0 ||
+ add_enc_field_to_happp_tree(hf_happp_stkt_def_name_len, tree,
+ tvb, &offset, total, &dec_val) < 0)
+ return;
+
+ /* Add the stick table name to HAPPP proto tree */
+ proto_tree_add_item(tree, hf_happp_stkt_def_name_value, tvb, offset, dec_val,
+ ENC_ASCII | ENC_NA);
+ offset += dec_val;
+
+ if (add_enc_field_to_happp_tree(hf_happp_stkt_def_key_type, tree,
+ tvb, &offset, total, &stkt_key_type) < 0 ||
+ add_enc_field_to_happp_tree(hf_happp_stkt_def_key_len, tree,
+ tvb, &offset, total, &stkt_key_len) < 0 ||
+ add_enc_field_to_happp_tree(hf_happp_stkt_def_data_types, tree,
+ tvb, &offset, total, &stkt_data_types) < 0)
+ return;
+
+ cv = find_conversation(pinfo->num, &pinfo->src, &pinfo->dst,
+ pinfo->ptype, pinfo->srcport, pinfo->destport, 0);
+ if (!cv)
+ return;
+
+ /*
+ * According to the documentation, it is not our responsibility
+ * to free this allocated memory.
+ */
+ happp_cv_data = (struct happp_cv_data_t *)wmem_alloc(wmem_file_scope(),
+ sizeof *happp_cv_data);
+ if (!happp_cv_data)
+ return;
+
+ happp_cv_data->stkt_key_type = stkt_key_type;
+ happp_cv_data->stkt_key_len = stkt_key_len;
+ happp_cv_data->stkt_data_types = stkt_data_types;
+
+ conversation_add_proto_data(cv, proto_happp, happp_cv_data);
+}
+
+static void dissect_happp_stkt_update_msg(tvbuff_t *tvb, packet_info *pinfo _U_,
+ proto_tree *tree, guint offset, guint total,
+ unsigned char msg_type_byte)
+{
+ unsigned int data_type;
+ uint64_t *stkt_key_type;
+ uint64_t *stkt_key_len;
+ struct happp_cv_data_t *happp_cv_data;
+ int has_update_id, has_exp;
+ conversation_t *cv;
+
+ cv = find_conversation(pinfo->num, &pinfo->src, &pinfo->dst,
+ pinfo->ptype, pinfo->srcport, pinfo->destport, 0);
+ if (!cv)
+ return;
+
+ happp_cv_data = (struct happp_cv_data_t *)conversation_get_proto_data(cv, proto_happp);
+ if (!happp_cv_data)
+ return;
+
+ has_update_id = msg_type_byte == PEER_MSG_STKT_UPDATE ||
+ msg_type_byte == PEER_MSG_STKT_UPDATE_TIMED;
+ has_exp = msg_type_byte == PEER_MSG_STKT_UPDATE_TIMED ||
+ msg_type_byte == PEER_MSG_STKT_INCUPDATE_TIMED;
+ /* Add the stick table update ID to HAPPP tree */
+ if (has_update_id &&
+ add_int_field_to_happp_tree(hf_happp_stkt_updt_update_id, tvb, tree,
+ &offset, total) < 0)
+ return;
+
+ if (has_exp &&
+ add_int_field_to_happp_tree(hf_happp_stkt_updt_expire, tvb, tree,
+ &offset, total) < 0)
+ return;
+
+
+ stkt_key_type = &happp_cv_data->stkt_key_type;
+ stkt_key_len = &happp_cv_data->stkt_key_len;
+
+ switch(*stkt_key_type) {
+ case SMP_T_STR:
+ if (add_enc_field_to_happp_tree(hf_happp_stkt_updt_key_len, tree, tvb,
+ &offset, total, stkt_key_len) < 0)
+ return;
+
+ proto_tree_add_item(tree, hf_happp_stkt_updt_key_str_value, tvb,
+ offset, *stkt_key_len, ENC_ASCII | ENC_NA);
+ offset += *stkt_key_len;
+ break;
+ case SMP_T_SINT:
+ if (add_int_field_to_happp_tree(hf_happp_stkt_updt_key_int_value, tvb, tree,
+ &offset, total) < 0)
+ return;
+
+ break;
+ case SMP_T_IPV4:
+ proto_tree_add_ipv4(tree, hf_happp_stkt_updt_key_ipv4_value,
+ tvb, offset, 4, tvb_get_ipv4(tvb, offset));
+ offset += 4;
+ break;
+ default:
+ proto_tree_add_item(tree, hf_happp_stkt_updt_key_bytes_value,
+ tvb, offset, *stkt_key_len, ENC_NA);
+ offset += *stkt_key_len;
+ break;
+ }
+
+ /* Data dissection */
+ for (data_type = 0;
+ data_type < sizeof hf_stkt_data_types / sizeof *hf_stkt_data_types;
+ data_type++) {
+ struct hf_stkt_data_type *hf_stkt_dtype;
+ size_t i;
+
+ if (!(happp_cv_data->stkt_data_types & (1 << data_type)))
+ continue;
+
+ hf_stkt_dtype = &hf_stkt_data_types[data_type];
+
+ for (i = 0; i < hf_stkt_dtype->hf_ids_len; i++)
+ if (add_enc_field_to_happp_tree(*hf_stkt_dtype->hf_ids[i], tree, tvb,
+ &offset, total, NULL) < 0)
+ return;
+ }
+}
+
+static void dissect_happp_stkt_ack_msg(tvbuff_t *tvb, packet_info *pinfo _U_,
+ proto_tree *tree, guint offset, guint total)
+{
+ if (add_enc_field_to_happp_tree(hf_happp_stkt_updt_ack_table_id, tree, tvb,
+ &offset, total, NULL) < 0)
+ return;
+
+ if (add_int_field_to_happp_tree(hf_happp_stkt_updt_ack_update_id, tvb, tree,
+ &offset, total) < 0)
+ return;
+}
+
+static void dissect_happp_stk_msg(tvbuff_t *tvb, packet_info *pinfo _U_,
+ proto_tree *tree, guint8 msg_type_byte,
+ guint offset, guint total)
+{
+ switch (msg_type_byte) {
+ case PEER_MSG_STKT_DEFINE:
+ dissect_happp_stkt_define_msg(tvb, pinfo, tree, offset, total);
+ break;
+ case PEER_MSG_STKT_UPDATE:
+ case PEER_MSG_STKT_INCUPDATE:
+ case PEER_MSG_STKT_UPDATE_TIMED:
+ case PEER_MSG_STKT_INCUPDATE_TIMED:
+ dissect_happp_stkt_update_msg(tvb, pinfo, tree, offset, total, msg_type_byte);
+ break;
+ case PEER_MSG_STKT_ACK:
+ dissect_happp_stkt_ack_msg(tvb, pinfo, tree, offset, total);
+ break;
+ };
+
+}
+
+static void
+dissect_happp_msg(tvbuff_t *tvb, packet_info *pinfo _U_, proto_tree *tree,
+ guint8 msg_class_byte, guint8 msg_type_byte,
+ guint *offset, guint total)
+{
+ unsigned char *p, enc_buf[MAX_ENC_LEN];
+ uint64_t dec_msg_len;
+ size_t max_enc_buf_len, left, dec_val_len;
+
+ left = total - *offset;
+ max_enc_buf_len = left < sizeof enc_buf ? left : sizeof enc_buf;
+ if (!tvb_memcpy(tvb, enc_buf, *offset, max_enc_buf_len))
+ return;
+
+ p = enc_buf;
+ dec_msg_len = intdecode(&p, max_enc_buf_len);
+ if (!p)
+ return;
+
+ dec_val_len = p - enc_buf;
+ proto_tree_add_uint64_format_value(tree, hf_happp_msg_len,
+ tvb, *offset, dec_val_len, dec_msg_len,
+ "%" PRIu64, dec_msg_len);
+ *offset += dec_val_len;
+
+ switch (msg_class_byte) {
+ case PEER_MSG_CLASS_STICKTABLE:
+ dissect_happp_stk_msg(tvb, pinfo, tree, msg_type_byte, *offset, total);
+ break;
+ }
+
+ *offset += dec_msg_len;
+}
+
+/* Code to actually dissect the packets */
+static int
+dissect_happp_pdu(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, void *data _U_)
+{
+ /* Set up structures needed to add the protocol subtree and manage it */
+ proto_item *item;
+ proto_tree *happp_tree;
+ /* Other misc. local variables. */
+ guint total, offset;
+ int first_message, first_class, curr_class, prev_class;
+ guint8 first_byte;
+ size_t sizeof_class_def_tab;
+
+ offset = 0;
+ first_message = first_class = 1;
+ total = tvb_reported_length(tvb);
+
+ /* create display subtree for the protocol */
+ item = proto_tree_add_item(tree, proto_happp, tvb, offset, -1, ENC_NA);
+ happp_tree = proto_item_add_subtree(item, ett_happp);
+
+ /* Set the protocol column value */
+ col_set_str(pinfo->cinfo, COL_PROTOCOL, "happp");
+
+ first_byte = (gchar)tvb_get_guint8(tvb, offset);
+ if (first_byte != PEER_MSG_CLASS_CONTROL &&
+ first_byte != PEER_MSG_CLASS_ERROR &&
+ first_byte != PEER_MSG_CLASS_STICKTABLE &&
+ first_byte != PEER_MSG_CLASS_RESERVED)
+ return dissect_happp_handshake_pdu(tvb, pinfo, happp_tree);
+
+ /* Reset class_def_tab message class counters */
+ sizeof_class_def_tab = sizeof class_def_tab / sizeof *class_def_tab;
+ init_class_def_tab(class_def_tab, sizeof_class_def_tab);
+
+ prev_class = curr_class = -1;
+ col_set_str(pinfo->cinfo, COL_INFO, "[");
+ while (offset < total) {
+ guint8 msg_class_byte, msg_type_byte;
+ const char *(*msg_type_str_func)(guint8 c);
+ struct class_def_t *class_def;
+
+ if (first_message) {
+ msg_class_byte = first_byte;
+ }
+ else {
+ msg_class_byte = tvb_get_guint8(tvb, offset);
+ }
+ curr_class = control_class_index_from_byte(msg_class_byte);
+ if (curr_class == -1)
+ return -1;
+
+ if (first_message) {
+ prev_class = curr_class;
+ first_message = 0;
+ }
+
+ class_def = &class_def_tab[curr_class];
+ class_def->count++;
+ msg_type_str_func = class_def->msg_type_str_func;
+
+ /* Insert a line separator */
+ proto_tree_add_item(happp_tree, hf_happp_fake, tvb,
+ offset, 0,
+ ENC_ASCII | ENC_NA);
+ proto_tree_add_uint_format_value(happp_tree, hf_happp_msg_class,
+ tvb, offset++, 1, msg_class_byte,
+ "%u (%s)", msg_class_byte,
+ class_str_from_byte(msg_class_byte));
+ msg_type_byte = tvb_get_guint8(tvb, offset);
+
+ /* First byte: message class */
+ switch (msg_class_byte) {
+ case PEER_MSG_CLASS_CONTROL:
+ case PEER_MSG_CLASS_ERROR:
+ case PEER_MSG_CLASS_STICKTABLE:
+ /* Second byte: message type in the class */
+ proto_tree_add_uint_format_value(happp_tree, hf_happp_msg_type,
+ tvb, offset++, 1, msg_type_byte,
+ "%u (%s)", msg_type_byte,
+ msg_type_str_func(msg_type_byte));
+ break;
+ case PEER_MSG_CLASS_RESERVED:
+ col_append_str(pinfo->cinfo, COL_INFO, "NON IMPLEMENTED");
+ break;
+ }
+ if (msg_class_byte >= PEER_MSG_CLASS_STICKTABLE)
+ dissect_happp_msg(tvb, pinfo, happp_tree,
+ msg_class_byte, msg_type_byte, &offset, total);
+
+ /* Sequentially add counting information to info column about
+ * number of messages found by class in an HAPPP PDU.
+ * For instance if an HAPPP PDU contains this sequence of messages:
+ * 1 Control message - 2 Stick Table messages - 3 Control messages
+ * column information displays: [Ctl=1 Stkt=2 Ctl=3].
+ */
+ if (curr_class != prev_class) {
+ col_info_append_class(pinfo, prev_class, &first_class);
+ col_info_append_class(pinfo, curr_class, &first_class);
+ prev_class = curr_class;
+ }
+ else if (offset >= total) {
+ /* Last message */
+ col_info_append_class(pinfo, curr_class, &first_class);
+ }
+ }
+ col_append_str(pinfo->cinfo, COL_INFO, "]");
+
+ return tvb_captured_length(tvb);
+}
+
+static guint
+get_happp_msg_len(packet_info *pinfo _U_, tvbuff_t *tvb, int offset, void *data _U_)
+{
+ guint ret, len, left;
+ gint next_offset, line_len;
+ guint8 first_byte;
+ uint64_t dec_len;
+ int saved_offset;
+
+ /* 0 means there is not enough data to get length. */
+ ret = 0;
+
+ len = tvb_reported_length(tvb);
+ left = len - offset;
+ if (left < HAPPP_MSG_MIN_LEN)
+ goto out;
+
+ saved_offset = offset;
+ first_byte = (gchar)tvb_get_guint8(tvb, offset);
+ if (first_byte == PEER_MSG_CLASS_CONTROL ||
+ first_byte == PEER_MSG_CLASS_ERROR ||
+ first_byte == PEER_MSG_CLASS_RESERVED) {
+ ret = HAPPP_MSG_MIN_LEN;
+ } else if (first_byte == PEER_MSG_CLASS_STICKTABLE) {
+ int soff;
+
+ left -= HAPPP_MSG_MIN_LEN;
+ offset += HAPPP_MSG_MIN_LEN;
+ soff = offset;
+ if (intdecode_from_tvbuff(tvb, &dec_len, &offset, len) < 0)
+ goto out;
+
+ left -= offset - soff;
+ if (left < dec_len)
+ goto out;
+
+ ret = dec_len + offset - saved_offset;
+ } else {
+ /* hello message: add line lengths to compute this message length. */
+ for (;;) {
+ line_len = tvb_find_line_end(tvb, offset, -1, &next_offset, TRUE);
+ if (line_len == -1)
+ break;
+
+ ret += line_len + 1;
+ offset += line_len + 1;
+ }
+ }
+
+ out:
+ return ret;
+ }
+
+static int
+dissect_happp_tcp(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, void *data)
+{
+ tcp_dissect_pdus(tvb, pinfo, tree, TRUE,
+ HAPPP_MSG_MIN_LEN, get_happp_msg_len, dissect_happp_pdu, data);
+
+ return tvb_captured_length(tvb);
+}
+
+/* Register the protocol with Wireshark.
+ *
+ * This format is require because a script is used to build the C function that
+ * calls all the protocol registration.
+ */
+void
+proto_register_happp(void)
+{
+ /* Setup list of header fields See Section 1.5 of README.dissector for
+ * details. */
+ static hf_register_info hf[] = {
+ {
+ /* This one is used as separator between HAPPP messages */
+ &hf_happp_fake,
+ {
+ ":-----------------------------------------------", "happp.fake",
+ FT_STRING, STR_ASCII, NULL, 0, "FAKE", HFILL
+ }
+ },
+ {
+ &hf_happp_version,
+ {
+ "version", "happp.version",
+ FT_STRING, STR_ASCII, NULL, 0, "version", HFILL
+ }
+ },
+ {
+ &hf_happp_remotepeerid,
+ {
+ "remotepeerid", "happp.remotepeerid",
+ FT_STRING, STR_ASCII, NULL, 0, "remote peer id", HFILL
+ }
+ },
+ {
+ &hf_happp_localpeerid,
+ {
+ "localpeerid", "happp.localpeerid",
+ FT_STRING, STR_ASCII, NULL, 0, "local peer id", HFILL
+ }
+ },
+ {
+ &hf_happp_processpid,
+ {
+ "processpid", "happp.processpid",
+ FT_STRING, STR_ASCII, NULL, 0, "process pid", HFILL
+ }
+ },
+ {
+ &hf_happp_relativepid,
+ {
+ "relativepid", "happp.relativepid",
+ FT_STRING, STR_ASCII, NULL, 0, "relative pid", HFILL
+ }
+ },
+ {
+ &hf_happp_status,
+ {
+ "status", "happp.status",
+ FT_STRING, STR_ASCII, NULL, 0, "status message", HFILL
+ }
+ },
+ {
+ &hf_happp_msg,
+ {
+ "message", "happp.msg",
+ FT_UINT8, BASE_DEC, NULL, 0, NULL, HFILL
+ }
+ },
+ {
+ &hf_happp_msg_class,
+ {
+ "message class", "happp.msg.class",
+ FT_UINT8, BASE_DEC, NULL, 0, NULL, HFILL
+ }
+ },
+ {
+ &hf_happp_msg_type,
+ {
+ "message type", "happp.msg.type",
+ FT_UINT8, BASE_DEC, NULL, 0, NULL, HFILL
+ }
+ },
+ {
+ &hf_happp_msg_len,
+ {
+ "message length", "happp.msg.len",
+ FT_UINT64, BASE_DEC, NULL, 0, NULL, HFILL
+ }
+ },
+ {
+ &hf_happp_stkt_def_id,
+ {
+ " ID", "happp.msg.stkt.def.id",
+ FT_UINT64, BASE_DEC, NULL, 0, NULL, HFILL
+ }
+ },
+ {
+ &hf_happp_stkt_def_name_len,
+ {
+ " name length", "happp.msg.stkt.def.name.length",
+ FT_UINT64, BASE_DEC, NULL, 0, NULL, HFILL
+ }
+ },
+ {
+ &hf_happp_stkt_def_name_value,
+ {
+ " name", "happp.msg.stkt.def.name.value",
+ FT_STRING, STR_ASCII, NULL, 0, NULL, HFILL
+ }
+ },
+ {
+ &hf_happp_stkt_def_key_type,
+ {
+ " key type", "happp.msg.stkt.def.key.type",
+ FT_UINT64, BASE_DEC, NULL, 0, NULL, HFILL
+ }
+ },
+ {
+ &hf_happp_stkt_def_key_len,
+ {
+ " key length", "happp.msg.stkt.def.key.len",
+ FT_UINT64, BASE_DEC, NULL, 0, NULL, HFILL
+ }
+ },
+ {
+ &hf_happp_stkt_def_data_types,
+ {
+ " data types", "happp.msg.stkt.def.data_types",
+ FT_UINT64, BASE_DEC, NULL, 0, NULL, HFILL
+ }
+ },
+ {
+ &hf_happp_stkt_updt_update_id,
+ {
+ " update ID", "happp.msg.stkt.updt.update_id",
+ FT_INT32, BASE_DEC, NULL, 0, NULL, HFILL
+ }
+ },
+ {
+ &hf_happp_stkt_updt_expire,
+ {
+ " expiration", "happp.msg.stkt.updt.expiration",
+ FT_INT32, BASE_DEC, NULL, 0, NULL, HFILL
+ }
+ },
+ {
+ &hf_happp_stkt_updt_key_len,
+ {
+ " key length", "happp.msg.stkt.updt.key.len",
+ FT_UINT64, BASE_DEC, NULL, 0, NULL, HFILL
+ }
+ },
+ {
+ &hf_happp_stkt_updt_key_str_value,
+ {
+ " key value", "happp.msg.stkt.updt.key.str.value",
+ FT_STRING, STR_ASCII, NULL, 0, NULL, HFILL
+ }
+ },
+ {
+ &hf_happp_stkt_updt_key_int_value,
+ {
+ " key value", "happp.msg.stkt.updt.key.int.value",
+ FT_INT32, BASE_DEC, NULL, 0, NULL, HFILL
+ }
+ },
+ {
+ &hf_happp_stkt_updt_key_ipv4_value,
+ {
+ " key IPv4 value", "happp.msg.stkt.updt.key.ipv4.value",
+ FT_IPv4, BASE_NONE, NULL, 0, NULL, HFILL
+ }
+ },
+ {
+ &hf_happp_stkt_updt_key_bytes_value,
+ {
+ " key value", "happp.msg.stkt.updt.key.bytes.value",
+ FT_BYTES, 0, NULL, 0, NULL, HFILL
+ }
+ },
+ {
+ &hf_happp_stkt_updt_data_server_id,
+ {
+ " server_id", "happp.msg.stkt.updt.data.server_id",
+ FT_UINT64, BASE_DEC, NULL, 0, NULL, HFILL
+ }
+ },
+ {
+ &hf_happp_stkt_updt_data_gpt0,
+ {
+ " gpt0", "happp.msg.stkt.updt.data.gpt0",
+ FT_UINT64, BASE_DEC, NULL, 0, NULL, HFILL
+ }
+ },
+ {
+ &hf_happp_stkt_updt_data_gpc0,
+ {
+ " gpc0", "happp.msg.stkt.updt.data.gpc0",
+ FT_UINT64, BASE_DEC, NULL, 0, NULL, HFILL
+ }
+ },
+ {
+ &hf_happp_stkt_updt_data_gpc0_rate_curr_tick,
+ {
+ " gpc0 curr. tick",
+ "happp.msg.stkt.updt.data.gpc0_rate.curr_tick",
+ FT_UINT64, BASE_DEC, NULL, 0, NULL, HFILL
+ }
+ },
+ {
+ &hf_happp_stkt_updt_data_gpc0_rate_curr_ctr,
+ {
+ " gpc0 curr. ctr.",
+ "happp.msg.stkt.updt.data.gpc0_rate.curr_ctr",
+ FT_UINT64, BASE_DEC, NULL, 0, NULL, HFILL
+ }
+ },
+ {
+ &hf_happp_stkt_updt_data_gpc0_rate_prev_ctr,
+ {
+ " gpc0 prev. ctr.",
+ "happp.msg.stkt.updt.data.gpc0_rate.prev_ctr",
+ FT_UINT64, BASE_DEC, NULL, 0, NULL, HFILL
+ }
+ },
+ {
+ &hf_happp_stkt_updt_data_conn_cnt,
+ {
+ " conn_cnt",
+ "happp.msg.stkt.updt.data.conn_cnt",
+ FT_UINT64, BASE_DEC, NULL, 0, NULL, HFILL
+ }
+ },
+ {
+ &hf_happp_stkt_updt_data_conn_rate_curr_tick,
+ {
+ " conn_rate curr. tick",
+ "happp.msg.stkt.updt.data.conn_rate.curr_tick",
+ FT_UINT64, BASE_DEC, NULL, 0, NULL, HFILL
+ }
+ },
+ {
+ &hf_happp_stkt_updt_data_conn_rate_curr_ctr,
+ {
+ " conn_rate curr. ctr.",
+ "happp.msg.stkt.updt.data.conn_rate.curr_ctr",
+ FT_UINT64, BASE_DEC, NULL, 0, NULL, HFILL
+ }
+ },
+ {
+ &hf_happp_stkt_updt_data_conn_rate_prev_ctr,
+ {
+ " conn_rate prev. ctr.",
+ "happp.msg.stkt.updt.data.conn_rate.prev_ctr",
+ FT_UINT64, BASE_DEC, NULL, 0, NULL, HFILL
+ }
+ },
+ {
+ &hf_happp_stkt_updt_data_conn_cur,
+ {
+ " conn_curr curr. tick",
+ "happp.msg.stkt.updt.data.conn_cur",
+ FT_UINT64, BASE_DEC, NULL, 0, NULL, HFILL
+ }
+ },
+ {
+ &hf_happp_stkt_updt_data_sess_cnt,
+ {
+ " sess_cnt", "happp.msg.stkt.updt.data.sess_cnt",
+ FT_UINT64, BASE_DEC, NULL, 0, NULL, HFILL
+ }
+ },
+ {
+ &hf_happp_stkt_updt_data_sess_rate_curr_tick,
+ {
+ " sess_rate curr. tick",
+ "happp.msg.stkt.updt.data.sess_rate.curr_tick",
+ FT_UINT64, BASE_DEC, NULL, 0, NULL, HFILL
+ }
+ },
+ {
+ &hf_happp_stkt_updt_data_sess_rate_curr_ctr,
+ {
+ " sess_rate curr. ctr.",
+ "happp.msg.stkt.updt.data.sess_rate.curr_ctr",
+ FT_UINT64, BASE_DEC, NULL, 0, NULL, HFILL
+ }
+ },
+ {
+ &hf_happp_stkt_updt_data_sess_rate_prev_ctr,
+ {
+ " sess_rate prev. ctr.",
+ "happp.msg.stkt.updt.data.sess_rate.prev_ctr",
+ FT_UINT64, BASE_DEC, NULL, 0, NULL, HFILL
+ }
+ },
+ {
+ &hf_happp_stkt_updt_data_http_req_cnt,
+ {
+ " http_req_cnt",
+ "happp.msg.stkt.updt.data.http_req_cnt",
+ FT_UINT64, BASE_DEC, NULL, 0, NULL, HFILL
+ }
+ },
+ {
+ &hf_happp_stkt_updt_data_http_req_rate_curr_tick,
+ {
+ " http_req_rate curr. tick",
+ "happp.msg.stkt.updt.data.http_req_rate.curr_tick",
+ FT_UINT64, BASE_DEC, NULL, 0, NULL, HFILL
+ }
+ },
+ {
+ &hf_happp_stkt_updt_data_http_req_rate_curr_ctr,
+ {
+ " http_req_rate curr. ctr.",
+ "happp.msg.stkt.updt.data.http_req_rate.curr_ctr",
+ FT_UINT64, BASE_DEC, NULL, 0, NULL, HFILL
+ }
+ },
+ {
+ &hf_happp_stkt_updt_data_http_req_rate_prev_ctr,
+ {
+ " http_req_rate prev. ctr.",
+ "happp.msg.stkt.updt.data.http_req_rate.prev_ctr",
+ FT_UINT64, BASE_DEC, NULL, 0, NULL, HFILL
+ }
+ },
+ {
+ &hf_happp_stkt_updt_data_http_err_cnt,
+ {
+ " http_err_cnt",
+ "happp.msg.stkt.updt.data.http_err_cnt",
+ FT_UINT64, BASE_DEC, NULL, 0, NULL, HFILL
+ }
+ },
+ {
+ &hf_happp_stkt_updt_data_http_err_rate_curr_tick,
+ {
+ " http_err_rate curr. tick",
+ "happp.msg.stkt.updt.data.http_err_rate.curr_tick",
+ FT_UINT64, BASE_DEC, NULL, 0, NULL, HFILL
+ }
+ },
+ {
+ &hf_happp_stkt_updt_data_http_err_rate_curr_ctr,
+ {
+ " http_err_rate curr. ctr.",
+ "happp.msg.stkt.updt.data.http_err_rate.curr_ctr",
+ FT_UINT64, BASE_DEC, NULL, 0, NULL, HFILL
+ }
+ },
+ {
+ &hf_happp_stkt_updt_data_http_err_rate_prev_ctr,
+ {
+ " http_err_rate prev. ctr.",
+ "happp.msg.stkt.updt.data.http_err_rate.prev_ctr",
+ FT_UINT64, BASE_DEC, NULL, 0, NULL, HFILL
+ }
+ },
+ {
+ &hf_happp_stkt_updt_data_bytes_in_cnt,
+ {
+ " bytes_in_cnt",
+ "happp.msg.stkt.updt.data.bytes_in_cnt",
+ FT_UINT64, BASE_DEC, NULL, 0, NULL, HFILL
+ }
+ },
+ {
+ &hf_happp_stkt_updt_data_bytes_in_rate_curr_tick,
+ {
+ " bytes_in_rate curr. tick",
+ "happp.msg.stkt.updt.data.bytes_in_rate.curr_tick",
+ FT_UINT64, BASE_DEC, NULL, 0, NULL, HFILL
+ }
+ },
+ {
+ &hf_happp_stkt_updt_data_bytes_in_rate_curr_ctr,
+ {
+ " bytes_in_rate curr. ctr.",
+ "happp.msg.stkt.updt.data.bytes_in_rate.curr_ctr",
+ FT_UINT64, BASE_DEC, NULL, 0, NULL, HFILL
+ }
+ },
+ {
+ &hf_happp_stkt_updt_data_bytes_in_rate_prev_ctr,
+ {
+ " bytes_in_rate prev. ctr.",
+ "happp.msg.stkt.updt.data.bytes_in_rate.prev_ctr",
+ FT_UINT64, BASE_DEC, NULL, 0, NULL, HFILL
+ }
+ },
+ {
+ &hf_happp_stkt_updt_data_bytes_out_cnt,
+ {
+ " bytes_out_cnt",
+ "happp.msg.stkt.updt.data.bytes_out_cnt",
+ FT_UINT64, BASE_DEC, NULL, 0, NULL, HFILL
+ }
+ },
+ {
+ &hf_happp_stkt_updt_data_bytes_out_rate_curr_tick,
+ {
+ " bytes_out_rate curr. tick",
+ "happp.msg.stkt.updt.data.bytes_out_rate.curr_tick",
+ FT_UINT64, BASE_DEC, NULL, 0, NULL, HFILL
+ }
+ },
+ {
+ &hf_happp_stkt_updt_data_bytes_out_rate_curr_ctr,
+ {
+ " bytes_out_rate curr. ctr.",
+ "happp.msg.stkt.updt.data.bytes_out_rate.curr_ctr",
+ FT_UINT64, BASE_DEC, NULL, 0, NULL, HFILL
+ }
+ },
+ {
+ &hf_happp_stkt_updt_data_bytes_out_rate_prev_ctr,
+ {
+ " bytes_out_rate prev. ctr.",
+ "happp.msg.stkt.updt.data.bytes_out_rate.prev_ctr",
+ FT_UINT64, BASE_DEC, NULL, 0, NULL, HFILL
+ }
+ },
+ {
+ &hf_happp_stkt_updt_ack_table_id,
+ {
+ " remote table Id",
+ "happp.msg.stkt.updt.ack.table_id",
+ FT_UINT64, BASE_DEC, NULL, 0, NULL, HFILL
+ }
+ },
+ {
+ &hf_happp_stkt_updt_ack_update_id,
+ {
+ " update Id", "happp.msg.stkt.updt.ack.update_id",
+ FT_INT32, BASE_DEC, NULL, 0, NULL, HFILL
+ }
+ },
+ };
+
+ /* Setup protocol subtree array */
+ static gint *ett[] = {
+ &ett_happp,
+ &ett_happp_msg
+ };
+
+ /* Register the protocol name and description */
+ proto_happp = proto_register_protocol("HAProxy Peers Protocol", "HAPPP", "happp");
+
+ /* Required function calls to register the header fields and subtrees */
+ proto_register_field_array(proto_happp, hf, array_length(hf));
+ proto_register_subtree_array(ett, array_length(ett));
+}
+
+static gboolean
+dissect_happp_heur_tcp(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, void *data)
+{
+ size_t proto_strlen;
+ conversation_t *conversation;
+
+ proto_strlen = strlen(HAPPP_PROTOCOL);
+
+ if (tvb_captured_length(tvb) < 2)
+ return FALSE;
+
+ if (tvb_get_guint8(tvb, 0) == PEER_MSG_CLASS_STICKTABLE &&
+ tvb_get_guint8(tvb, 1) >= PEER_MSG_STKT_UPDATE &&
+ tvb_get_guint8(tvb, 1) <= PEER_MSG_STKT_ACK)
+ goto found;
+
+ if (tvb_captured_length(tvb) < proto_strlen + 1)
+ return FALSE;
+
+ /* Check that we received a line beginning with HAPPP_PROTOCOL
+ * followed by a space character.
+ */
+ if (tvb_strneql(tvb, 0, HAPPP_PROTOCOL, proto_strlen) ||
+ tvb_get_guint8(tvb, proto_strlen) != ' ')
+ return FALSE;
+
+ found:
+ conversation = find_or_create_conversation(pinfo);
+ if (!conversation)
+ return FALSE;
+
+ conversation_set_dissector(conversation, happp_tcp_handle);
+ dissect_happp_tcp(tvb, pinfo, tree, data);
+
+ return TRUE;
+}
+
+/* Simpler form of proto_reg_handoff_happp which can be used if there are
+ * no prefs-dependent registration function calls. */
+void
+proto_reg_handoff_happp(void)
+{
+ /* Use create_dissector_handle() to indicate that dissect_happp_tcp()
+ * returns the number of bytes it dissected (or 0 if it thinks the packet
+ * does not belong to HAProxy Peers Protocol).
+ */
+ happp_tcp_handle = create_dissector_handle(dissect_happp_tcp, proto_happp);
+ heur_dissector_add("tcp", dissect_happp_heur_tcp, "HAPPP over TCP", "happp_tcp",
+ proto_happp, HEURISTIC_ENABLE);
+}
+
+
+void
+plugin_register(void)
+{
+ static proto_plugin plug;
+
+ plug.register_protoinfo = proto_register_happp;
+ plug.register_handoff = proto_reg_handoff_happp;
+ proto_register_plugin(&plug);
+}
diff --git a/admin/wireshark-dissectors/peers/wireshark.happp.dissector.patch b/admin/wireshark-dissectors/peers/wireshark.happp.dissector.patch
new file mode 100644
index 0000000..c06134d
--- /dev/null
+++ b/admin/wireshark-dissectors/peers/wireshark.happp.dissector.patch
@@ -0,0 +1,24 @@
+diff --git a/epan/dissectors/CMakeLists.txt b/epan/dissectors/CMakeLists.txt
+index 38e2149..5f8cd49 100644
+--- a/epan/dissectors/CMakeLists.txt
++++ b/epan/dissectors/CMakeLists.txt
+@@ -665,6 +665,7 @@ set(DISSECTOR_SRC
+ packet-h263.c
+ packet-h263p.c
+ packet-h264.c
++ packet-happp.c
+ packet-hartip.c
+ packet-hazelcast.c
+ packet-hci_h1.c
+diff --git a/epan/dissectors/Makefile.am b/epan/dissectors/Makefile.am
+index 70edc66..bc46235 100644
+--- a/epan/dissectors/Makefile.am
++++ b/epan/dissectors/Makefile.am
+@@ -694,6 +694,7 @@ DISSECTOR_SRC = \
+ packet-h263.c \
+ packet-h263p.c \
+ packet-h264.c \
++ packet-happp.c \
+ packet-hartip.c \
+ packet-hazelcast.c \
+ packet-hci_h1.c \
diff --git a/dev/base64/base64rev-gen.c b/dev/base64/base64rev-gen.c
new file mode 100644
index 0000000..faffc87
--- /dev/null
+++ b/dev/base64/base64rev-gen.c
@@ -0,0 +1,70 @@
+/*
+ * base64rev generator
+ *
+ * Copyright 2009-2010 Krzysztof Piotr Oledzki <ole@ans.pl>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <stdio.h>
+
+const char base64tab[65]="ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
+char base64rev[128];
+
+#define base '#' /* arbitrary chosen base value */
+#define B64MAX 64
+#define B64PADV B64MAX
+
+int main() {
+ char *p, c;
+ int i, min = 255, max = 0;
+
+ for (i = 0; i < sizeof(base64rev); i++)
+ base64rev[i] = base;
+
+ for (i = 0; i < B64MAX; i++) {
+ c = base64tab[i];
+
+ if (min > c)
+ min = c;
+
+ if (max < c)
+ max = c;
+ }
+
+ for (i = 0; i < B64MAX; i++) {
+ c = base64tab[i];
+
+ if (base+i+1 > 127) {
+ printf("Wrong base value @%d\n", i);
+ return 1;
+ }
+
+ base64rev[c - min] = base+i+1;
+ }
+
+ base64rev['=' - min] = base + B64PADV;
+
+ base64rev[max - min + 1] = '\0';
+
+ printf("#define B64BASE '%c'\n", base);
+ printf("#define B64CMIN '%c'\n", min);
+ printf("#define B64CMAX '%c'\n", max);
+ printf("#define B64PADV %u\n", B64PADV);
+
+ p = base64rev;
+ printf("const char base64rev[]=\"");
+ for (p = base64rev; *p; p++) {
+ if (*p == '\\')
+ printf("\\%c", *p);
+ else
+ printf("%c", *p);
+ }
+ printf("\"\n");
+
+ return 0;
+}
diff --git a/dev/coccinelle/bug_on.cocci b/dev/coccinelle/bug_on.cocci
new file mode 100644
index 0000000..3837879
--- /dev/null
+++ b/dev/coccinelle/bug_on.cocci
@@ -0,0 +1,7 @@
+@@
+expression E;
+@@
+
+- if (E)
+- ABORT_NOW();
++ BUG_ON(E);
diff --git a/dev/coccinelle/cs_endp_flags.cocci b/dev/coccinelle/cs_endp_flags.cocci
new file mode 100644
index 0000000..639d321
--- /dev/null
+++ b/dev/coccinelle/cs_endp_flags.cocci
@@ -0,0 +1,76 @@
+@@
+struct conn_stream *cs;
+expression e;
+@@
+(
+- (cs->endp->flags & (e))
++ sc_ep_test(cs, e)
+|
+- (cs->endp->flags & e)
++ sc_ep_test(cs, e)
+|
+- cs->endp->flags & (e)
++ sc_ep_test(cs, e)
+|
+- cs->endp->flags & e
++ sc_ep_test(cs, e)
+)
+
+@@
+struct conn_stream *cs;
+expression e;
+@@
+(
+- cs->endp->flags |= (e)
++ sc_ep_set(cs, e)
+|
+- cs->endp->flags |= e
++ sc_ep_set(cs, e)
+)
+
+@@
+struct conn_stream *cs;
+expression e;
+@@
+(
+- cs->endp->flags &= ~(e)
++ sc_ep_clr(cs, e)
+|
+- cs->endp->flags &= (e)
++ sc_ep_clr(cs, ~e)
+|
+- cs->endp->flags &= ~e
++ sc_ep_clr(cs, e)
+|
+- cs->endp->flags &= e
++ sc_ep_clr(cs, ~e)
+)
+
+@@
+struct conn_stream *cs;
+@@
+- cs->endp->flags = 0
++ sc_ep_zero(cs)
+
+@@
+struct conn_stream *cs;
+expression e;
+@@
+(
+- cs->endp->flags = (e)
++ sc_ep_setall(cs, e)
+|
+- cs->endp->flags = e
++ sc_ep_setall(cs, e)
+)
+
+@@
+struct conn_stream *cs;
+@@
+(
+- (cs->endp->flags)
++ sc_ep_get(cs)
+|
+- cs->endp->flags
++ sc_ep_get(cs)
+)
diff --git a/dev/coccinelle/endp_flags.cocci b/dev/coccinelle/endp_flags.cocci
new file mode 100644
index 0000000..fceda27
--- /dev/null
+++ b/dev/coccinelle/endp_flags.cocci
@@ -0,0 +1,76 @@
+@@
+struct cs_endpoint *endp;
+expression e;
+@@
+(
+- (endp->flags & (e))
++ se_fl_test(endp, e)
+|
+- (endp->flags & e)
++ se_fl_test(endp, e)
+|
+- endp->flags & (e)
++ se_fl_test(endp, e)
+|
+- endp->flags & e
++ se_fl_test(endp, e)
+)
+
+@@
+struct cs_endpoint *endp;
+expression e;
+@@
+(
+- endp->flags |= (e)
++ se_fl_set(endp, e)
+|
+- endp->flags |= e
++ se_fl_set(endp, e)
+)
+
+@@
+struct cs_endpoint *endp;
+expression e;
+@@
+(
+- endp->flags &= ~(e)
++ se_fl_clr(endp, e)
+|
+- endp->flags &= (e)
++ se_fl_clr(endp, ~e)
+|
+- endp->flags &= ~e
++ se_fl_clr(endp, e)
+|
+- endp->flags &= e
++ se_fl_clr(endp, ~e)
+)
+
+@@
+struct cs_endpoint *endp;
+@@
+- endp->flags = 0
++ se_fl_zero(endp)
+
+@@
+struct cs_endpoint *endp;
+expression e;
+@@
+(
+- endp->flags = (e)
++ se_fl_setall(endp, e)
+|
+- endp->flags = e
++ se_fl_setall(endp, e)
+)
+
+@@
+struct cs_endpoint *endp;
+@@
+(
+- (endp->flags)
++ se_fl_get(endp)
+|
+- endp->flags
++ se_fl_get(endp)
+)
diff --git a/dev/coccinelle/ha_free.cocci b/dev/coccinelle/ha_free.cocci
new file mode 100644
index 0000000..0019039
--- /dev/null
+++ b/dev/coccinelle/ha_free.cocci
@@ -0,0 +1,6 @@
+@ rule @
+expression E;
+@@
+- free(E);
+- E = NULL;
++ ha_free(&E);
diff --git a/dev/coccinelle/ist.cocci b/dev/coccinelle/ist.cocci
new file mode 100644
index 0000000..acde626
--- /dev/null
+++ b/dev/coccinelle/ist.cocci
@@ -0,0 +1,86 @@
+@@
+struct ist i;
+expression p, l;
+@@
+
+(
+- i.ptr = p;
+- i.len = strlen(i.ptr);
++ i = ist(p);
+|
+- i.ptr = p;
+- i.len = l;
++ i = ist2(p, l);
+)
+
+@@
+@@
+
+- ist2(NULL, 0)
++ IST_NULL
+
+@@
+struct ist i;
+expression e;
+@@
+
+- i.ptr += e;
+- i.len -= e;
++ i = istadv(i, e);
+
+@@
+struct ist i;
+@@
+
+- i = istadv(i, 1);
++ i = istnext(i);
+
+@@
+struct ist i;
+@@
+
+- i.ptr++;
+- i.len--;
++ i = istnext(i);
+
+@@
+struct ist i;
+@@
+
+- (\(i.ptr\|istptr(i)\) + \(i.len\|istlen(i)\))
++ istend(i)
+
+@@
+struct ist i;
+expression e;
+@@
+
+- if (\(i.len\|istlen(i)\) > e) { i.len = e; }
++ i = isttrim(i, e);
+
+@@
+struct ist i;
+struct buffer *b;
+@@
+
+- chunk_memcat(b, \(i.ptr\|istptr(i)\) , \(i.len\|istlen(i)\));
++ chunk_istcat(b, i);
+
+@@
+struct ist i;
+@@
+
+- i.ptr != NULL
++ isttest(i)
+
+@@
+char *s;
+@@
+
+(
+- ist2(s, strlen(s))
++ ist(s)
+|
+- ist2(strdup(s), strlen(s))
++ ist(strdup(s))
+)
diff --git a/dev/coccinelle/realloc_leak.cocci b/dev/coccinelle/realloc_leak.cocci
new file mode 100644
index 0000000..c201b80
--- /dev/null
+++ b/dev/coccinelle/realloc_leak.cocci
@@ -0,0 +1,6 @@
+@@
+expression E;
+expression F;
+@@
+
+* E = realloc(E, F);
diff --git a/dev/coccinelle/strcmp.cocci b/dev/coccinelle/strcmp.cocci
new file mode 100644
index 0000000..f6064bf
--- /dev/null
+++ b/dev/coccinelle/strcmp.cocci
@@ -0,0 +1,309 @@
+@@
+statement S;
+expression E;
+expression F;
+@@
+
+ if (
+(
+dns_hostname_cmp
+|
+eb_memcmp
+|
+memcmp
+|
+strcasecmp
+|
+strcmp
+|
+strncasecmp
+|
+strncmp
+)
+- (E, F)
++ (E, F) != 0
+ )
+(
+ S
+|
+ { ... }
+)
+
+@@
+statement S;
+expression E;
+expression F;
+@@
+
+ if (
+- !
+(
+dns_hostname_cmp
+|
+eb_memcmp
+|
+memcmp
+|
+strcasecmp
+|
+strcmp
+|
+strncasecmp
+|
+strncmp
+)
+- (E, F)
++ (E, F) == 0
+ )
+(
+ S
+|
+ { ... }
+)
+
+@@
+expression E;
+expression F;
+expression G;
+@@
+
+(
+G &&
+(
+dns_hostname_cmp
+|
+eb_memcmp
+|
+memcmp
+|
+strcasecmp
+|
+strcmp
+|
+strncasecmp
+|
+strncmp
+)
+- (E, F)
++ (E, F) != 0
+)
+
+@@
+expression E;
+expression F;
+expression G;
+@@
+
+(
+G ||
+(
+dns_hostname_cmp
+|
+eb_memcmp
+|
+memcmp
+|
+strcasecmp
+|
+strcmp
+|
+strncasecmp
+|
+strncmp
+)
+- (E, F)
++ (E, F) != 0
+)
+
+@@
+expression E;
+expression F;
+expression G;
+@@
+
+(
+(
+dns_hostname_cmp
+|
+eb_memcmp
+|
+memcmp
+|
+strcasecmp
+|
+strcmp
+|
+strncasecmp
+|
+strncmp
+)
+- (E, F)
++ (E, F) != 0
+&& G
+)
+
+@@
+expression E;
+expression F;
+expression G;
+@@
+
+(
+(
+dns_hostname_cmp
+|
+eb_memcmp
+|
+memcmp
+|
+strcasecmp
+|
+strcmp
+|
+strncasecmp
+|
+strncmp
+)
+- (E, F)
++ (E, F) != 0
+|| G
+)
+
+@@
+expression E;
+expression F;
+expression G;
+@@
+
+(
+G &&
+- !
+(
+dns_hostname_cmp
+|
+eb_memcmp
+|
+memcmp
+|
+strcasecmp
+|
+strcmp
+|
+strncasecmp
+|
+strncmp
+)
+- (E, F)
++ (E, F) == 0
+)
+
+@@
+expression E;
+expression F;
+expression G;
+@@
+
+(
+G ||
+- !
+(
+dns_hostname_cmp
+|
+eb_memcmp
+|
+memcmp
+|
+strcasecmp
+|
+strcmp
+|
+strncasecmp
+|
+strncmp
+)
+- (E, F)
++ (E, F) == 0
+)
+
+@@
+expression E;
+expression F;
+expression G;
+@@
+
+(
+- !
+(
+dns_hostname_cmp
+|
+eb_memcmp
+|
+memcmp
+|
+strcasecmp
+|
+strcmp
+|
+strncasecmp
+|
+strncmp
+)
+- (E, F)
++ (E, F) == 0
+&& G
+)
+
+@@
+expression E;
+expression F;
+expression G;
+@@
+
+(
+- !
+(
+dns_hostname_cmp
+|
+eb_memcmp
+|
+memcmp
+|
+strcasecmp
+|
+strcmp
+|
+strncasecmp
+|
+strncmp
+)
+- (E, F)
++ (E, F) == 0
+|| G
+)
+
+@@
+expression E;
+expression F;
+expression G;
+@@
+
+(
+- !
+(
+dns_hostname_cmp
+|
+eb_memcmp
+|
+memcmp
+|
+strcasecmp
+|
+strcmp
+|
+strncasecmp
+|
+strncmp
+)
+- (E, F)
++ (E, F) == 0
+)
diff --git a/dev/coccinelle/xalloc_cast.cocci b/dev/coccinelle/xalloc_cast.cocci
new file mode 100644
index 0000000..75baa00
--- /dev/null
+++ b/dev/coccinelle/xalloc_cast.cocci
@@ -0,0 +1,11 @@
+@@
+type T;
+@@
+
+- (T*)
+(
+malloc
+|
+calloc
+)
+ (...)
diff --git a/dev/coccinelle/xalloc_size.cocci b/dev/coccinelle/xalloc_size.cocci
new file mode 100644
index 0000000..80808e3
--- /dev/null
+++ b/dev/coccinelle/xalloc_size.cocci
@@ -0,0 +1,41 @@
+@@
+type T;
+expression E;
+expression t;
+@@
+
+(
+ t = calloc(E, sizeof(*t))
+|
+- t = calloc(E, sizeof(T))
++ t = calloc(E, sizeof(*t))
+)
+
+@@
+type T;
+T *x;
+@@
+
+ x = malloc(
+- sizeof(T)
++ sizeof(*x)
+ )
+
+@@
+type T;
+T *x;
+@@
+
+ x = calloc(1,
+- sizeof(T)
++ sizeof(*x)
+ )
+
+@@
+@@
+
+ calloc(
++ 1,
+ ...
+- ,1
+ )
diff --git a/dev/flags/README b/dev/flags/README
new file mode 100644
index 0000000..f3730c7
--- /dev/null
+++ b/dev/flags/README
@@ -0,0 +1,12 @@
+This needs to be built from the top makefile, for example :
+
+ make dev/flags/flags
+
+Then the executable is usable either one value at a time from the
+command line, either with values coming from stdin with "-" passed
+alone instead of the value.
+
+It is possible to restrict the decoding to certain fields only by
+specifying one of "ana", "chn", "conn", "sc", "si", "sierr", "strm",
+"task", or "txn" before the value.
+
diff --git a/dev/flags/flags.c b/dev/flags/flags.c
new file mode 100644
index 0000000..65af237
--- /dev/null
+++ b/dev/flags/flags.c
@@ -0,0 +1,157 @@
+#include <stdio.h>
+#include <stdlib.h>
+
+/* make the include files below expose their flags */
+#define HA_EXPOSE_FLAGS
+
+#include <haproxy/channel-t.h>
+#include <haproxy/connection-t.h>
+#include <haproxy/fd-t.h>
+#include <haproxy/http_ana-t.h>
+#include <haproxy/htx-t.h>
+#include <haproxy/mux_fcgi-t.h>
+#include <haproxy/mux_h2-t.h>
+#include <haproxy/mux_h1-t.h>
+#include <haproxy/stconn-t.h>
+#include <haproxy/stream-t.h>
+#include <haproxy/task-t.h>
+
+// 1 bit per flag, no hole permitted here
+#define SHOW_AS_ANA 0x00000001
+#define SHOW_AS_CHN 0x00000002
+#define SHOW_AS_CONN 0x00000004
+#define SHOW_AS_SC 0x00000008
+#define SHOW_AS_SET 0x00000010
+#define SHOW_AS_STRM 0x00000020
+#define SHOW_AS_TASK 0x00000040
+#define SHOW_AS_TXN 0x00000080
+#define SHOW_AS_SD 0x00000100
+#define SHOW_AS_HSL 0x00000200
+#define SHOW_AS_HTX 0x00000400
+#define SHOW_AS_HMSG 0x00000800
+#define SHOW_AS_FD 0x00001000
+#define SHOW_AS_H2C 0x00002000
+#define SHOW_AS_H2S 0x00004000
+#define SHOW_AS_H1C 0x00008000
+#define SHOW_AS_H1S 0x00010000
+#define SHOW_AS_FCONN 0x00020000
+#define SHOW_AS_FSTRM 0x00040000
+
+// command line names, must be in exact same order as the SHOW_AS_* flags above
+// so that show_as_words[i] matches flag 1U<<i.
+const char *show_as_words[] = { "ana", "chn", "conn", "sc", "stet", "strm", "task", "txn", "sd", "hsl", "htx", "hmsg", "fd", "h2c", "h2s", "h1c", "h1s", "fconn", "fstrm"};
+
+/* will be sufficient for even largest flag names */
+static char buf[4096];
+static size_t bsz = sizeof(buf);
+
+unsigned int get_show_as(const char *word)
+{
+ int w = 0;
+
+ while (1) {
+ if (w == sizeof(show_as_words) / sizeof(*show_as_words))
+ return 0;
+ if (strcmp(word, show_as_words[w]) == 0)
+ return 1U << w;
+ w++;
+ }
+}
+
+void usage_exit(const char *name)
+{
+ int word, nbword;
+
+ fprintf(stderr, "Usage: %s [", name);
+
+ nbword = sizeof(show_as_words) / sizeof(*show_as_words);
+ for (word = 0; word < nbword; word++)
+ fprintf(stderr, "%s%s", word ? "|" : "", show_as_words[word]);
+ fprintf(stderr, "]* { [+-][0x]value* | - }\n");
+ exit(1);
+}
+
+int main(int argc, char **argv)
+{
+ unsigned int flags;
+ unsigned int show_as = 0;
+ unsigned int f;
+ const char *name = argv[0];
+ char line[20];
+ char *value;
+ int multi = 0;
+ int use_stdin = 0;
+ char *err;
+
+ while (argc > 0) {
+ argv++; argc--;
+ if (argc < 1)
+ usage_exit(name);
+
+ f = get_show_as(argv[0]);
+ if (!f)
+ break;
+ show_as |= f;
+ }
+
+ if (!show_as)
+ show_as = ~0U;
+
+ if (argc > 1)
+ multi = 1;
+
+ if (strcmp(argv[0], "-") == 0)
+ use_stdin = 1;
+
+ while (argc > 0) {
+ if (use_stdin) {
+ value = fgets(line, sizeof(line), stdin);
+ if (!value)
+ break;
+
+ /* skip common leading delimiters that slip from copy-paste */
+ while (*value == ' ' || *value == '\t' || *value == ':' || *value == '=')
+ value++;
+
+ /* stop at the end of the number and trim any C suffix like "UL" */
+ err = value;
+ while (*err == '-' || *err == '+' ||
+ (isalnum((unsigned char)*err) && toupper((unsigned char)*err) != 'U' && toupper((unsigned char)*err) != 'L'))
+ err++;
+ *err = 0;
+ } else {
+ value = argv[0];
+ argv++; argc--;
+ }
+
+ flags = strtoul(value, &err, 0);
+ if (!*value || *err) {
+ fprintf(stderr, "Unparsable value: <%s>\n", value);
+ usage_exit(name);
+ }
+
+ if (multi || use_stdin)
+ printf("### 0x%08x:\n", flags);
+
+ if (show_as & SHOW_AS_ANA) printf("chn->ana = %s\n", (chn_show_analysers(buf, bsz, " | ", flags), buf));
+ if (show_as & SHOW_AS_CHN) printf("chn->flags = %s\n", (chn_show_flags (buf, bsz, " | ", flags), buf));
+ if (show_as & SHOW_AS_CONN) printf("conn->flags = %s\n", (conn_show_flags (buf, bsz, " | ", flags), buf));
+ if (show_as & SHOW_AS_SC) printf("sc->flags = %s\n", (sc_show_flags (buf, bsz, " | ", flags), buf));
+ if (show_as & SHOW_AS_SD) printf("sd->flags = %s\n", (se_show_flags (buf, bsz, " | ", flags), buf));
+ if (show_as & SHOW_AS_SET) printf("strm->et = %s\n", (strm_et_show_flags(buf, bsz, " | ", flags), buf));
+ if (show_as & SHOW_AS_STRM) printf("strm->flags = %s\n", (strm_show_flags (buf, bsz, " | ", flags), buf));
+ if (show_as & SHOW_AS_TASK) printf("task->state = %s\n", (task_show_state (buf, bsz, " | ", flags), buf));
+ if (show_as & SHOW_AS_TXN) printf("txn->flags = %s\n", (txn_show_flags (buf, bsz, " | ", flags), buf));
+ if (show_as & SHOW_AS_HSL) printf("sl->flags = %s\n", (hsl_show_flags (buf, bsz, " | ", flags), buf));
+ if (show_as & SHOW_AS_HTX) printf("htx->flags = %s\n", (htx_show_flags (buf, bsz, " | ", flags), buf));
+ if (show_as & SHOW_AS_HMSG) printf("hmsg->flags = %s\n", (hmsg_show_flags (buf, bsz, " | ", flags), buf));
+ if (show_as & SHOW_AS_FD) printf("fd->flags = %s\n", (fd_show_flags (buf, bsz, " | ", flags), buf));
+ if (show_as & SHOW_AS_H2C) printf("h2c->flags = %s\n", (h2c_show_flags (buf, bsz, " | ", flags), buf));
+ if (show_as & SHOW_AS_H2S) printf("h2s->flags = %s\n", (h2s_show_flags (buf, bsz, " | ", flags), buf));
+ if (show_as & SHOW_AS_H1C) printf("h1c->flags = %s\n", (h1c_show_flags (buf, bsz, " | ", flags), buf));
+ if (show_as & SHOW_AS_H1S) printf("h1s->flags = %s\n", (h1s_show_flags (buf, bsz, " | ", flags), buf));
+ if (show_as & SHOW_AS_FCONN) printf("fconn->flags = %s\n",(fconn_show_flags (buf, bsz, " | ", flags), buf));
+ if (show_as & SHOW_AS_FSTRM) printf("fstrm->flags = %s\n",(fstrm_show_flags (buf, bsz, " | ", flags), buf));
+ }
+ return 0;
+}
diff --git a/dev/flags/show-fd-to-flags.sh b/dev/flags/show-fd-to-flags.sh
new file mode 100755
index 0000000..29757c3
--- /dev/null
+++ b/dev/flags/show-fd-to-flags.sh
@@ -0,0 +1,2 @@
+#!/bin/sh
+awk '{print $12}' | grep cflg= | sort | uniq -c | sort -nr | while read a b; do c=${b##*=}; d=$(${0%/*}/flags conn $c);d=${d##*= }; printf "%6d %s %s\n" $a "$b" "$d";done
diff --git a/dev/flags/show-sess-to-flags.sh b/dev/flags/show-sess-to-flags.sh
new file mode 100755
index 0000000..79003a4
--- /dev/null
+++ b/dev/flags/show-sess-to-flags.sh
@@ -0,0 +1,209 @@
+#!/usr/bin/env bash
+
+# This script is used to resolve various flags that appear on "show sess all".
+# All identified ones will be appended at the end, with a short name and their
+# value, followed by either the value resolved by "flags" when it's found, or
+# by the copy-pastable command to use to resolve them. The path to FLAGS is
+# searched in this order: 1) $FLAGS, 2) in the path, 3) dev/flags/flags, 4)
+# in the same directory as the script.
+#
+# This script is horrendous, but it's not a reason for making it even more
+# disgusting. The big regex flag mapping mess at the end is readable on a
+# large screen and it's easier to spot mistakes using this aligned format,
+# so please preserve this as much as possible and avoid multi-line formats.
+#
+# The append_* functions provide different variants that are still commented
+# out. It's mostly a matter of taste, they're equivalent.
+#
+# Usage: socat /path/to/socket - <<< "show sess all" | ./$0 > output
+#
+# options:
+# --color=never, --no-color: never colorize output
+# --color=always: always colorize output (default: only on terminal)
+
+# look for "flags in path then in dev/flags/flags then next to the script"
+FLAGS="${FLAGS:-$(command -v flags)}"
+if [ -z "$FLAGS" ]; then
+ if [ -e dev/flags/flags ]; then
+ FLAGS=dev/flags/flags;
+ elif [ -e "${0%/*}/flags" ]; then
+ FLAGS="${0%/*}/flags"
+ else
+ # OK still not found,let's write a copy-pastable command
+ FLAGS="echo ./flags"
+ fi
+fi
+
+HTTP_METH=( "OPTIONS" "GET" "HEAD" "POST" "PUT" "DELETE" "TRACE" "CONNECT" "OTHER" )
+out=( )
+decode=( )
+
+# returns str $2 and $3 concatenated with enough spaces in between so that the
+# total size doesn't exceed $1 chars, but always inserts at least one space.
+justify() {
+ local pad=" "
+ local str
+
+ while str="${2}${pad}${3}" && [ ${#str} -le $1 ]; do
+ pad="${pad} "
+ done
+ echo -n "$str"
+}
+
+# remove spaces at the beginning and end in "$1"
+trim() {
+ while [ -n "$1" -a -z "${1## *}" ]; do
+ set -- "${1# }"
+ done
+ while [ -n "$1" -a -z "${1%%* }" ]; do
+ set -- "${1% }"
+ done
+ echo -n "$1"
+}
+
+# pass $1=ctx name, $2=argname, $3=value, append the decoded line to decode[]
+append_flag() {
+ set -- "$1" "$2" "$(printf "%#x" $3)"
+ #decode[${#decode[@]}]="$1=$3 [ $(set -- $($FLAGS $2 $3 | cut -f2- -d=); echo $*) ]"
+ #decode[${#decode[@]}]="$(printf "%-14s %10s %s" $1 $3 "$(set -- $($FLAGS $2 $3 | cut -f2- -d=); echo $*)")"
+ #decode[${#decode[@]}]="$(justify 22 "$1" "$3") $(set -- $($FLAGS $2 $3 | cut -f2- -d=); echo $*)"
+ decode[${#decode[@]}]="$(justify 22 "$1" "$3") $(set -- $($FLAGS $2 $3 | cut -f2- -d= | tr -d '|'); echo "$*")"
+ #decode[${#decode[@]}]="$(justify 22 "$1" "$3") $(set -- $($FLAGS $2 $(printf "%#x" $3) | cut -f2- -d= | tr -d '|'); echo "$*")"
+ #decode[${#decode[@]}]="$(justify 22 "$1" "$3") $(trim "$($FLAGS $2 $3 | cut -f2- -d= | tr -d '|')")"
+ #decode[${#decode[@]}]="$(justify 22 "$1" "$3") $(trim "$($FLAGS $2 $3 | cut -f2- -d= | tr -d ' ')")"
+}
+
+# pass $1=ctx name, $2=value, $3=decoded value
+append_str() {
+ #decode[${#decode[@]}]="$1=$2 [ $3 ]"
+ #decode[${#decode[@]}]="$(printf "%-14s %10s %s" $1 $2 $3)"
+ decode[${#decode[@]}]="$(justify 22 "$1" "$2") $(trim $3)"
+}
+
+# dump and reset the buffers
+dump_and_reset() {
+ local line
+
+ line=0
+ while [ $line -lt ${#out[@]} ]; do
+ if [ -n "$COLOR" ]; then
+ # highlight name=value for values made of upper case letters
+ echo "${out[$line]}" | \
+ sed -e 's,\(^0x.*\),\x1b[1;37m\1\x1b[0m,g' \
+ -e 's,\([^ ,=]*\)=\([A-Z][^:, ]*\),\x1b[1;36m\1\x1b[0m=\x1b[1;33m\2\x1b[0m,g'
+
+ else
+ echo "${out[$line]}"
+ fi
+ ((line++))
+ done
+
+ [ ${#decode[@]} -eq 0 ] || echo " -----------------------------------"
+
+ line=0
+ while [ $line -lt ${#decode[@]} ]; do
+ echo " ${decode[$line]}"
+ ((line++, total++))
+ done
+
+ [ ${#decode[@]} -eq 0 ] || echo " -----------------------------------"
+
+ decode=( )
+ out=( )
+}
+
+### main entry point
+
+if [ -t 1 ]; then
+ # terminal on stdout, enable color by default
+ COLOR=1
+else
+ COLOR=
+fi
+
+if [ "$1" == "--no-color" -o "$1" == "--color=never" ]; then
+ shift
+ COLOR=
+elif [ "$1" == "--color=always" ]; then
+ shift
+ COLOR=1
+fi
+
+ctx=strm
+while read -r; do
+ [ "$REPLY" != "EOF" ] || break # for debugging
+
+ if [[ "$REPLY" =~ ^[[:blank:]]*task= ]]; then
+ ctx=task;
+ elif [[ "$REPLY" =~ ^[[:blank:]]*txn= ]]; then
+ ctx=txn;
+ elif [[ "$REPLY" =~ ^[[:blank:]]*scf= ]]; then
+ ctx=scf;
+ elif [[ "$REPLY" =~ ^[[:blank:]]*co0= ]]; then
+ ctx=cof;
+ elif [[ "$REPLY" =~ ^[[:blank:]]*app0= ]]; then
+ ctx=appf;
+ elif [[ "$REPLY" =~ ^[[:blank:]]*req= ]]; then
+ ctx=req;
+ elif [[ "$REPLY" =~ ^[[:blank:]]*scb= ]]; then
+ ctx=scb;
+ elif [[ "$REPLY" =~ ^[[:blank:]]*co1= ]]; then
+ ctx=cob;
+ elif [[ "$REPLY" =~ ^[[:blank:]]*app1= ]]; then
+ ctx=appb;
+ elif [[ "$REPLY" =~ ^[[:blank:]]*res= ]]; then
+ ctx=res;
+ elif [[ "$REPLY" =~ ^0x ]]; then
+ # here we dump what we have and we reset
+ dump_and_reset
+ ctx=strm;
+ fi
+
+ if [ $ctx = strm ]; then
+ ! [[ "$REPLY" =~ [[:blank:]]flags=([0-9a-fx]*) ]] || append_flag strm.flg strm "${BASH_REMATCH[1]}"
+ elif [ $ctx = task ]; then
+ ! [[ "$REPLY" =~ \(state=([0-9a-fx]*) ]] || append_flag task.state task "${BASH_REMATCH[1]}"
+ elif [ $ctx = txn ]; then
+ ! [[ "$REPLY" =~ [[:blank:]]meth=([^[:blank:]]*) ]] || append_str txn.meth "${BASH_REMATCH[1]}" "${HTTP_METH[$((${BASH_REMATCH[1]}))]}"
+ ! [[ "$REPLY" =~ [[:blank:]]flags=([0-9a-fx]*) ]] || append_flag txn.flg txn "${BASH_REMATCH[1]}"
+ ! [[ "$REPLY" =~ [[:blank:]]req\.f=([0-9a-fx]*) ]] || append_flag txn.req.flg hmsg "${BASH_REMATCH[1]}"
+ ! [[ "$REPLY" =~ [[:blank:]]rsp\.f=([0-9a-fx]*) ]] || append_flag txn.rsp.flg hmsg "${BASH_REMATCH[1]}"
+ elif [ $ctx = scf ]; then
+ ! [[ "$REPLY" =~ [[:blank:]]flags=([0-9a-fx]*) ]] || append_flag f.sc.flg sc "${BASH_REMATCH[1]}"
+ ! [[ "$REPLY" =~ [[:blank:]]endp=[[:alnum:]]*,[[:alnum:]]*,([0-9a-fx]*) ]] || append_flag f.sc.sd.flg sd "${BASH_REMATCH[1]}"
+ ! [[ "$REPLY" =~ [[:blank:]]h1s.*\.sd\.flg=([0-9a-fx]*) ]] || append_flag f.h1s.sd.flg sd "${BASH_REMATCH[1]}"
+ ! [[ "$REPLY" =~ [[:blank:]]h1s\.flg=([0-9a-fx]*) ]] || append_flag f.h1s.flg h1s "${BASH_REMATCH[1]}"
+ ! [[ "$REPLY" =~ [[:blank:]]h1c\.flg=([0-9a-fx]*) ]] || append_flag f.h1c.flg h1c "${BASH_REMATCH[1]}"
+ ! [[ "$REPLY" =~ ^[[:blank:]]*\.sc=.*\.flg=.*\.app=.*\.sd=[^=]*\.flg=([0-9a-fx]*) ]] || append_flag f.h2s.sd.flg sd "${BASH_REMATCH[1]}"
+ ! [[ "$REPLY" =~ [[:blank:]]h2s.*\.flg=([0-9a-fx]*) ]] || append_flag f.h2s.flg h2s "${BASH_REMATCH[1]}"
+ ! [[ "$REPLY" =~ [[:blank:]]h2c.*\.flg=([0-9a-fx]*) ]] || append_flag f.h2c.flg h2c "${BASH_REMATCH[1]}"
+ elif [ $ctx = cof ]; then
+ ! [[ "$REPLY" =~ [[:blank:]]flags=([0-9a-fx]*) ]] || append_flag f.co.flg conn "${BASH_REMATCH[1]}"
+ ! [[ "$REPLY" =~ [[:blank:]]fd.state=([0-9a-fx]*) ]] || append_flag f.co.fd.st fd 0x"${BASH_REMATCH[1]#0x}"
+ elif [ $ctx = req ]; then
+ ! [[ "$REPLY" =~ [[:blank:]]\(f=([0-9a-fx]*) ]] || append_flag req.flg chn "${BASH_REMATCH[1]}"
+ ! [[ "$REPLY" =~ [[:blank:]]an=([0-9a-fx]*) ]] || append_flag req.ana ana "${BASH_REMATCH[1]}"
+ ! [[ "$REPLY" =~ [[:blank:]]htx.*flags=([0-9a-fx]*) ]] || append_flag req.htx.flg htx "${BASH_REMATCH[1]}"
+ elif [ $ctx = scb ]; then
+ ! [[ "$REPLY" =~ [[:blank:]]flags=([0-9a-fx]*) ]] || append_flag b.sc.flg sc "${BASH_REMATCH[1]}"
+ ! [[ "$REPLY" =~ [[:blank:]]endp=[[:alnum:]]*,[[:alnum:]]*,([0-9a-fx]*) ]] || append_flag b.sc.sd.flg sd "${BASH_REMATCH[1]}"
+ ! [[ "$REPLY" =~ [[:blank:]]h1s.*\.sd\.flg=([0-9a-fx]*) ]] || append_flag b.h1s.sd.flg sd "${BASH_REMATCH[1]}"
+ ! [[ "$REPLY" =~ [[:blank:]]h1s\.flg=([0-9a-fx]*) ]] || append_flag b.h1s.flg h1s "${BASH_REMATCH[1]}"
+ ! [[ "$REPLY" =~ [[:blank:]]h1c\.flg=([0-9a-fx]*) ]] || append_flag b.h1c.flg h1c "${BASH_REMATCH[1]}"
+ ! [[ "$REPLY" =~ ^[[:blank:]]*\.sc=.*\.flg=.*\.app=.*\.sd=[^=]*\.flg=([0-9a-fx]*) ]] || append_flag b.h2s.sd.flg sd "${BASH_REMATCH[1]}"
+ ! [[ "$REPLY" =~ [[:blank:]]h2s.*\.flg=([0-9a-fx]*) ]] || append_flag b.h2s.flg h2s "${BASH_REMATCH[1]}"
+ ! [[ "$REPLY" =~ [[:blank:]]h2c.*\.flg=([0-9a-fx]*) ]] || append_flag b.h2c.flg h2c "${BASH_REMATCH[1]}"
+ elif [ $ctx = cob ]; then
+ ! [[ "$REPLY" =~ [[:blank:]]flags=([0-9a-fx]*) ]] || append_flag b.co.flg conn "${BASH_REMATCH[1]}"
+ ! [[ "$REPLY" =~ [[:blank:]]fd.state=([0-9a-fx]*) ]] || append_flag b.co.fd.st fd "${BASH_REMATCH[1]}"
+ elif [ $ctx = res ]; then
+ ! [[ "$REPLY" =~ [[:blank:]]\(f=([0-9a-fx]*) ]] || append_flag res.flg chn "${BASH_REMATCH[1]}"
+ ! [[ "$REPLY" =~ [[:blank:]]an=([0-9a-fx]*) ]] || append_flag res.ana ana "${BASH_REMATCH[1]}"
+ ! [[ "$REPLY" =~ [[:blank:]]htx.*flags=([0-9a-fx]*) ]] || append_flag res.htx.flg htx "${BASH_REMATCH[1]}"
+ fi
+
+ out[${#out[@]}]="$REPLY"
+done
+
+# dump the last stream
+dump_and_reset
diff --git a/dev/h2/mkhdr.sh b/dev/h2/mkhdr.sh
new file mode 100755
index 0000000..4d129fa
--- /dev/null
+++ b/dev/h2/mkhdr.sh
@@ -0,0 +1,151 @@
+#!/usr/bin/env bash
+
+# Usage: mkhdr -l <len> -t <type> -f <flags> -sid <sid> > hdr.bin
+# All fields are optional. 0 assumed when absent.
+
+USAGE=\
+"Usage: %s [-l <len> ] [-t <type>] [-f <flags>] [-i <sid>] [ -d <data> ] > hdr.bin
+ Numbers are decimal or 0xhex. Not set=0. If <data> is passed, it points
+ to a file that is read and chunked into frames of <len> bytes.
+
+Supported symbolic types (case insensitive prefix match):
+ DATA (0x00) PUSH_PROMISE (0x05)
+ HEADERS (0x01) PING (0x06)
+ PRIORITY (0x02) GOAWAY (0x07)
+ RST_STREAM (0x03) WINDOW_UPDATE (0x08)
+ SETTINGS (0x04) CONTINUATION (0x09)
+
+Supported symbolic flags (case insensitive prefix match):
+ ES (0x01) PAD (0x08)
+ EH (0x04) PRIO (0x20)
+
+"
+
+LEN=
+TYPE=
+FLAGS=
+ID=
+
+die() {
+ [ "$#" -eq 0 ] || echo "$*" >&2
+ exit 1
+}
+
+quit() {
+ [ "$#" -eq 0 ] || echo "$*"
+ exit 0
+}
+
+# print usage with $1 as the cmd name
+usage() {
+ printf "$USAGE" "$1";
+}
+
+# Send frame made of $1 $2 $3 $4 to stdout.
+# Usage: mkframe <len> <type> <flags> <id>
+mkframe() {
+ local L="${1:-0}"
+ local T="${2:-0}"
+ local F="${3:-0}"
+ local I="${4:-0}"
+ local t f
+
+ # get the first match in this order
+ for t in DATA:0x00 HEADERS:0x01 RST_STREAM:0x03 SETTINGS:0x04 PING:0x06 \
+ GOAWAY:0x07 WINDOW_UPDATE:0x08 CONTINUATION:0x09 PRIORITY:0x02 \
+ PUSH_PROMISE:0x05; do
+ if [ -z "${t##${T^^*}*}" ]; then
+ T="${t##*:}"
+ break
+ fi
+ done
+
+ if [ -n "${T##[0-9]*}" ]; then
+ echo "Unknown type '$T'" >&2
+ usage "${0##*}"
+ die
+ fi
+
+ # get the first match in this order
+ for f in ES:0x01 EH:0x04 PAD:0x08 PRIO:0x20; do
+ if [ -z "${f##${F^^*}*}" ]; then
+ F="${f##*:}"
+ fi
+ done
+
+ if [ -n "${F##[0-9]*}" ]; then
+ echo "Unknown type '$T'" >&2
+ usage "${0##*}"
+ die
+ fi
+
+ L=$(( L )); T=$(( T )); F=$(( F )); I=$(( I ))
+
+ L0=$(( (L >> 16) & 255 )); L0=$(printf "%02x" $L0)
+ L1=$(( (L >> 8) & 255 )); L1=$(printf "%02x" $L1)
+ L2=$(( (L >> 0) & 255 )); L2=$(printf "%02x" $L2)
+
+ T0=$(( (T >> 0) & 255 )); T0=$(printf "%02x" $T0)
+ F0=$(( (F >> 0) & 255 )); F0=$(printf "%02x" $F0)
+
+ I0=$(( (I >> 24) & 127 )); I0=$(printf "%02x" $I0)
+ I1=$(( (I >> 16) & 255 )); I1=$(printf "%02x" $I1)
+ I2=$(( (I >> 8) & 255 )); I2=$(printf "%02x" $I2)
+ I3=$(( (I >> 0) & 255 )); I3=$(printf "%02x" $I3)
+
+ printf "\x$L0\x$L1\x$L2\x$T0\x$F0\x$I0\x$I1\x$I2\x$I3"
+}
+
+## main
+
+if [ $# -le 1 ]; then
+ usage "${0##*}"
+ die
+fi
+
+while [ -n "$1" -a -z "${1##-*}" ]; do
+ case "$1" in
+ -l) LEN="$2" ; shift 2 ;;
+ -t) TYPE="$2" ; shift 2 ;;
+ -f) FLAGS="$2" ; shift 2 ;;
+ -i) ID="$2" ; shift 2 ;;
+ -d) DATA="$2" ; shift 2 ;;
+ -h|--help) usage "${0##*}"; quit;;
+ *) usage "${0##*}"; die ;;
+ esac
+done
+
+if [ $# -gt 0 ]; then
+ usage "${0##*}"
+ die
+fi
+
+# default values for LEN and ID
+LEN=${LEN:-0};
+if [ -n "${LEN##[0-9]*}" ]; then
+ echo "Unparsable length '$LEN'" >&2
+ usage "${0##*}"
+ die
+fi
+
+ID=${ID:-0};
+if [ -n "${ID##[0-9]*}" ]; then
+ echo "Unparsable stream ID '$ID'" >&2
+ usage "${0##*}"
+ die
+fi
+
+if [ -z "$DATA" ]; then
+ mkframe "$LEN" "$TYPE" "$FLAGS" "$ID"
+else
+ # read file $DATA in <LEN> chunks and send it in multiple frames
+ # advertising their respective lengths.
+ [ $LEN -gt 0 ] || LEN=16384
+
+ while read -rN "$LEN" payload || [ ${#payload} -gt 0 ]; do
+ mkframe "${#payload}" "$TYPE" "$FLAGS" "$ID"
+ echo -n "$payload"
+ done < "$DATA"
+fi
+
+exit 0
diff --git a/dev/haring/README b/dev/haring/README
new file mode 100644
index 0000000..5205cf2
--- /dev/null
+++ b/dev/haring/README
@@ -0,0 +1,9 @@
+This needs to be built from the top makefile, for example :
+
+ make dev/haring/haring
+
+If HAProxy is built with special options such -DDEBUG_THREAD or with
+multi-threading support enabled (which changes the ring's header size),
+it can be worth reusing the same build options for haring, usually they
+will remain compatible, and will simplify the handling of different file
+layouts, at the expense of dragging more dependencies into the executable.
diff --git a/dev/haring/haring.c b/dev/haring/haring.c
new file mode 100644
index 0000000..ee7e1aa
--- /dev/null
+++ b/dev/haring/haring.c
@@ -0,0 +1,266 @@
+/*
+ * post-mortem ring reader for haproxy
+ *
+ * Copyright (C) 2022 Willy Tarreau <w@1wt.eu>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+/* we do not implement BUG_ON() */
+#undef DEBUG_STRICT
+
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <stdarg.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+#include <haproxy/api.h>
+#include <haproxy/buf.h>
+#include <haproxy/ring.h>
+
+int force = 0; // force access to a different layout
+int lfremap = 0; // remap LF in traces
+int repair = 0; // repair file
+
+
+/* display the message and exit with the code */
+__attribute__((noreturn)) void die(int code, const char *format, ...)
+{
+ va_list args;
+
+ if (format) {
+ va_start(args, format);
+ vfprintf(stderr, format, args);
+ va_end(args);
+ }
+ exit(code);
+}
+
+/* display the usage message and exit with the code */
+__attribute__((noreturn)) void usage(int code, const char *arg0)
+{
+ die(code,
+ "Usage: %s [options]* <file>\n"
+ "\n"
+ "options :\n"
+ " -f : force accessing a non-matching layout for 'ring struct'\n"
+ " -l : replace LF in contents with CR VT\n"
+ " -r : \"repair\" corrupted file (actively search for message boundaries)\n"
+ "\n"
+ "", arg0);
+}
+
+/* This function dumps all events from the ring whose pointer is in <p0> into
+ * the appctx's output buffer, and takes from <o0> the seek offset into the
+ * buffer's history (0 for oldest known event). It looks at <i0> for boolean
+ * options: bit0 means it must wait for new data or any key to be pressed. Bit1
+ * means it must seek directly to the end to wait for new contents. It returns
+ * 0 if the output buffer or events are missing is full and it needs to be
+ * called again, otherwise non-zero. It is meant to be used with
+ * cli_release_show_ring() to clean up.
+ */
+int dump_ring(struct ring *ring, size_t ofs, int flags)
+{
+ struct buffer buf;
+ uint64_t msg_len = 0;
+ size_t len, cnt;
+ const char *blk1 = NULL, *blk2 = NULL, *p;
+ size_t len1 = 0, len2 = 0, bl;
+
+ /* Explanation: the storage area in the writing process starts after
+ * the end of the structure. Since the whole area is mmapped(), we know
+ * it starts at 0 mod 4096, hence the buf->area pointer's 12 LSB point
+ * to the relative offset of the storage area. As there will always be
+ * users using the wrong version of the tool with a dump, we need to
+ * run a few checks first. After that we'll create our own buffer
+ * descriptor matching that area.
+ */
+ if ((((long)ring->buf.area) & 4095) != sizeof(*ring)) {
+ if (!force) {
+ fprintf(stderr, "FATAL: header in file is %ld bytes long vs %ld expected!\n",
+ (((long)ring->buf.area) & 4095),
+ (long)sizeof(*ring));
+ exit(1);
+ }
+ else {
+ fprintf(stderr, "WARNING: header in file is %ld bytes long vs %ld expected!\n",
+ (((long)ring->buf.area) & 4095),
+ (long)sizeof(*ring));
+ }
+ /* maybe we could emit a warning at least ? */
+ }
+
+ /* Now make our own buffer pointing to that area */
+ buf = b_make(((void *)ring + (((long)ring->buf.area) & 4095)),
+ ring->buf.size, ring->buf.head, ring->buf.data);
+
+ /* explanation for the initialization below: it would be better to do
+ * this in the parsing function but this would occasionally result in
+ * dropped events because we'd take a reference on the oldest message
+ * and keep it while being scheduled. Thus instead let's take it the
+ * first time we enter here so that we have a chance to pass many
+ * existing messages before grabbing a reference to a location. This
+ * value cannot be produced after initialization.
+ */
+ if (unlikely(ofs == ~0)) {
+ ofs = 0;
+
+ /* going to the end means looking at tail-1 */
+ ofs = (flags & RING_WF_SEEK_NEW) ? buf.data - 1 : 0;
+
+ //HA_ATOMIC_INC(b_peek(&buf, ofs));
+ }
+
+ while (1) {
+ //HA_RWLOCK_RDLOCK(RING_LOCK, &ring->lock);
+
+ if (ofs >= buf.size) {
+ fprintf(stderr, "FATAL error at %d\n", __LINE__);
+ return 1;
+ }
+ //HA_ATOMIC_DEC(b_peek(&buf, ofs));
+
+ /* in this loop, ofs always points to the counter byte that precedes
+ * the message so that we can take our reference there if we have to
+ * stop before the end.
+ */
+ while (ofs + 1 < b_data(&buf)) {
+ if (unlikely(repair && *b_peek(&buf, ofs))) {
+ /* in repair mode we consider that we could have landed
+ * in the middle of a message so we skip all bytes till
+ * the next zero.
+ */
+ ofs++;
+ continue;
+ }
+ cnt = 1;
+ len = b_peek_varint(&buf, ofs + cnt, &msg_len);
+ if (!len)
+ break;
+ cnt += len;
+
+ if (msg_len + ofs + cnt + 1 > buf.data) {
+ fprintf(stderr, "FATAL error at %d\n", __LINE__);
+ return 1;
+ }
+
+ len = b_getblk_nc(&buf, &blk1, &len1, &blk2, &len2, ofs + cnt, msg_len);
+ if (!lfremap) {
+ if (len > 0 && len1)
+ fwrite(blk1, len1, 1, stdout);
+ if (len > 1 && len2)
+ fwrite(blk2, len2, 1, stdout);
+ } else {
+ while (len > 0) {
+ for (; len1; p++) {
+ p = memchr(blk1, '\n', len1);
+ if (!p || p > blk1) {
+ bl = p ? p - blk1 : len1;
+ fwrite(blk1, bl, 1, stdout);
+ blk1 += bl;
+ len1 -= bl;
+ }
+
+ if (p) {
+ putchar('\r');
+ putchar('\v');
+ blk1++;
+ len1--;
+ }
+ }
+ len--;
+ blk1 = blk2;
+ len1 = len2;
+ }
+ }
+
+ putchar('\n');
+
+ ofs += cnt + msg_len;
+ }
+
+ //HA_ATOMIC_INC(b_peek(&buf, ofs));
+ //HA_RWLOCK_RDUNLOCK(RING_LOCK, &ring->lock);
+
+ if (!(flags & RING_WF_WAIT_MODE))
+ break;
+
+ /* pause 10ms before checking for new stuff */
+ usleep(10000);
+ }
+ return 0;
+}
+
+int main(int argc, char **argv)
+{
+ struct ring *ring;
+ struct stat statbuf;
+ const char *arg0;
+ int fd;
+
+ arg0 = argv[0];
+ while (argc > 1 && argv[1][0] == '-') {
+ argc--; argv++;
+ if (strcmp(argv[0], "-f") == 0)
+ force = 1;
+ else if (strcmp(argv[0], "-l") == 0)
+ lfremap = 1;
+ else if (strcmp(argv[0], "-r") == 0)
+ repair = 1;
+ else if (strcmp(argv[0], "--") == 0)
+ break;
+ else
+ usage(1, arg0);
+ }
+
+ if (argc < 2)
+ usage(1, arg0);
+
+ fd = open(argv[1], O_RDONLY);
+ if (fd < 0) {
+ perror("open()");
+ return 1;
+ }
+
+ if (fstat(fd, &statbuf) < 0) {
+ perror("fstat()");
+ return 1;
+ }
+
+ ring = mmap(NULL, statbuf.st_size, PROT_READ, MAP_SHARED, fd, 0);
+ close(fd);
+
+ if (ring == MAP_FAILED) {
+ perror("mmap()");
+ return 1;
+ }
+
+ return dump_ring(ring, ~0, 0);
+}
+
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/dev/hpack/README b/dev/hpack/README
new file mode 100644
index 0000000..d7258b5
--- /dev/null
+++ b/dev/hpack/README
@@ -0,0 +1,4 @@
+This needs to be built from the top makefile, for example :
+
+ make dev/hpack/{decode,gen-enc,gen-rht}
+
diff --git a/dev/hpack/decode.c b/dev/hpack/decode.c
new file mode 100644
index 0000000..13c95c7
--- /dev/null
+++ b/dev/hpack/decode.c
@@ -0,0 +1,215 @@
+/*
+ * HPACK stream decoder. Takes a series of hex codes on stdin using one line
+ * per HEADERS frame. Spaces, tabs, CR, '-' and ',' are silently skipped.
+ * e.g. :
+ * echo 82864188f439ce75c875fa5784 | dev/hpack/decode
+ *
+ * The DHT size may optionally be changed in argv[1].
+ *
+ * Build like this :
+ * gcc -I../../include -O0 -g -fno-strict-aliasing -fwrapv \
+ * -o decode decode.c
+ */
+
+#define HPACK_STANDALONE
+
+#include <ctype.h>
+#include <inttypes.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <haproxy/chunk.h>
+#include <haproxy/hpack-dec.h>
+
+#define MAX_RQ_SIZE 65536
+#define MAX_HDR_NUM 1000
+
+char hex[MAX_RQ_SIZE*3+3]; // enough for "[ XX]* <CR> <LF> \0"
+uint8_t buf[MAX_RQ_SIZE];
+
+char trash_buf[MAX_RQ_SIZE];
+char tmp_buf[MAX_RQ_SIZE];
+
+THREAD_LOCAL struct buffer trash = { .area = trash_buf, .data = 0, .size = sizeof(trash_buf) };
+struct buffer tmp = { .area = tmp_buf, .data = 0, .size = sizeof(tmp_buf) };
+
+/* displays a <len> long memory block at <buf>, assuming first byte of <buf>
+ * has address <baseaddr>. String <pfx> may be placed as a prefix in front of
+ * each line. It may be NULL if unused. The output is emitted to file <out>.
+ */
+void debug_hexdump(FILE *out, const char *pfx, const char *buf,
+ unsigned int baseaddr, int len)
+{
+ unsigned int i;
+ int b, j;
+
+ for (i = 0; i < (len + (baseaddr & 15)); i += 16) {
+ b = i - (baseaddr & 15);
+ fprintf(out, "%s%08x: ", pfx ? pfx : "", i + (baseaddr & ~15));
+ for (j = 0; j < 8; j++) {
+ if (b + j >= 0 && b + j < len)
+ fprintf(out, "%02x ", (unsigned char)buf[b + j]);
+ else
+ fprintf(out, " ");
+ }
+
+ if (b + j >= 0 && b + j < len)
+ fputc('-', out);
+ else
+ fputc(' ', out);
+
+ for (j = 8; j < 16; j++) {
+ if (b + j >= 0 && b + j < len)
+ fprintf(out, " %02x", (unsigned char)buf[b + j]);
+ else
+ fprintf(out, " ");
+ }
+
+ fprintf(out, " ");
+ for (j = 0; j < 16; j++) {
+ if (b + j >= 0 && b + j < len) {
+ if (isprint((unsigned char)buf[b + j]))
+ fputc((unsigned char)buf[b + j], out);
+ else
+ fputc('.', out);
+ }
+ else
+ fputc(' ', out);
+ }
+ fputc('\n', out);
+ }
+}
+
+/* enable DEBUG_HPACK to show each individual hpack code */
+#define DEBUG_HPACK
+#include "../src/hpack-huff.c"
+#include "../src/hpack-tbl.c"
+#include "../src/hpack-dec.c"
+
+/* display the message and exit with the code */
+__attribute__((noreturn)) void die(int code, const char *format, ...)
+{
+ va_list args;
+
+ if (format) {
+ va_start(args, format);
+ vfprintf(stderr, format, args);
+ va_end(args);
+ }
+ exit(code);
+}
+
+/* reads <hex> and stops at the first LF, '#' or \0. Converts from hex to
+ * binary, ignoring spaces, tabs, CR, "-" and ','. The output is sent into
+ * <bin> for no more than <size> bytes. The number of bytes placed there is
+ * returned, or a negative value in case of parsing error.
+ */
+int hex2bin(const char *hex, uint8_t *bin, int size)
+{
+ int a, b, c;
+ uint8_t code;
+ int len = 0;
+
+ a = b = -1;
+
+ for (; *hex; hex++) {
+ c = *hex;
+ if (c == ' ' || c == '\t' || c == '\r' ||
+ c == '-' || c == ',')
+ continue;
+
+ if (c == '\n' || c == '#')
+ break;
+
+ if (c >= '0' && c <= '9')
+ c -= '0';
+ else if (c >= 'a' && c <= 'f')
+ c -= 'a' - 10;
+ else if (c >= 'A' && c <= 'F')
+ c -= 'A' - 10;
+ else
+ return -1;
+
+ if (a == -1)
+ a = c;
+ else
+ b = c;
+
+ if (b == -1)
+ continue;
+
+ code = (a << 4) | b;
+ a = b = -1;
+ if (len >= size)
+ return -2;
+
+ bin[len] = code;
+ len++;
+ }
+ if (a >= 0 || b >= 0)
+ return -3;
+ return len;
+}
+
+int main(int argc, char **argv)
+{
+ struct hpack_dht *dht;
+ struct http_hdr list[MAX_HDR_NUM];
+ struct pool_head pool;
+ int outlen;
+ int dht_size = 4096;
+ int len, idx;
+ int line;
+
+ /* first arg: dht size */
+ if (argc > 1) {
+ dht_size = atoi(argv[1]);
+ argv++; argc--;
+ }
+
+ pool.size = dht_size;
+ pool_head_hpack_tbl = &pool;
+ dht = hpack_dht_alloc();
+ if (!dht) {
+ die(1, "cannot initialize dht\n");
+ return 1;
+ }
+
+ for (line = 1; fgets(hex, sizeof(hex), stdin); line++) {
+ len = hex2bin(hex, buf, sizeof(buf));
+ if (len <= 0)
+ continue;
+ printf("###### line %d : frame len=%d #######\n", line, len);
+ debug_hexdump(stdout, " ", (const char *)buf, 0, len);
+
+ outlen = hpack_decode_frame(dht, buf, len, list,
+ sizeof(list)/sizeof(list[0]), &tmp);
+ if (outlen <= 0) {
+ printf(" HPACK decoding failed: %d\n", outlen);
+ continue;
+ }
+
+ printf("<<< Found %d headers :\n", outlen);
+ for (idx = 0; idx < outlen - 1; idx++) {
+ //printf(" \e[1;34m%s\e[0m: ",
+ // list[idx].n.ptr ? istpad(trash.str, list[idx].n).ptr : h2_phdr_to_str(list[idx].n.len));
+
+ //printf("\e[1;35m%s\e[0m\n", istpad(trash.str, list[idx].v).ptr);
+
+ printf(" %s: ", list[idx].n.ptr ?
+ istpad(trash.area, list[idx].n).ptr :
+ h2_phdr_to_str(list[idx].n.len));
+
+ printf("%s [n=(%p,%d) v=(%p,%d)]\n",
+ istpad(trash.area, list[idx].v).ptr,
+ list[idx].n.ptr, (int)list[idx].n.len, list[idx].v.ptr, (int)list[idx].v.len);
+ }
+ puts(">>>");
+#ifdef DEBUG_HPACK
+ printf("<<=== DHT dump [ptr=%p]:\n", dht);
+ hpack_dht_dump(stdout, dht);
+ puts("===>>");
+#endif
+ }
+ return 0;
+}
diff --git a/dev/hpack/gen-enc.c b/dev/hpack/gen-enc.c
new file mode 100644
index 0000000..3fc5ef9
--- /dev/null
+++ b/dev/hpack/gen-enc.c
@@ -0,0 +1,205 @@
+/*
+ * HPACK encoding table generator. It produces a stream of
+ * <len><idx><name> and a table pointing to the first <len> of each series.
+ * The end of the stream is marked by <len>=0. In parallel, a length-indexed
+ * table is built to access the first entry of each length.
+ *
+ * Build like this :
+ * gcc -I../../include -o gen-enc gen-enc.c
+ */
+#define HPACK_STANDALONE
+
+#include <ctype.h>
+#include <inttypes.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <import/ist.h>
+#include <haproxy/hpack-tbl-t.h>
+#include "../../src/hpack-tbl.c"
+
+struct idxhdr {
+ const char *ptr;
+ int len;
+ int idx;
+};
+
+struct idxhdr idxhdr[HPACK_SHT_SIZE];
+static int positions[32];
+static char known_hdr[1024];
+
+/* preferred ordering of headers of similar size. Those not mentioned will be
+ * less prioritized.
+ */
+const struct {
+ const char *name;
+ const int rank;
+} ranks[] = {
+ { .name = "age", .rank = 1 },
+ { .name = "via", .rank = 2 },
+
+ { .name = "date", .rank = 1 },
+ { .name = "host", .rank = 2 },
+
+ { .name = "accept", .rank = 1 },
+ { .name = "server", .rank = 2 },
+ { .name = "cookie", .rank = 3 },
+
+ { .name = "referer", .rank = 1 },
+ { .name = "expires", .rank = 2 },
+
+ { .name = "location", .rank = 1 },
+
+ { .name = "user-agent", .rank = 1 },
+ { .name = "set-cookie", .rank = 2 },
+
+ { .name = "content-type", .rank = 1 },
+
+ { .name = "cache-control", .rank = 1 },
+ { .name = "last-modified", .rank = 2 },
+ { .name = "accept-ranges", .rank = 3 },
+ { .name = "if-none-match", .rank = 4 },
+
+ { .name = "content-length", .rank = 1 },
+
+ { .name = "accept-encoding", .rank = 1 },
+ { .name = "accept-language", .rank = 2 },
+
+ { .name = "content-encoding", .rank = 1 },
+
+ { .name = "transfer-encoding", .rank = 1 },
+ { .name = "if-modified-since", .rank = 2 },
+
+ { .name = "content-disposition", .rank = 1 },
+};
+
+/* returns the rank of header <name> or 255 if not found */
+int get_hdr_rank(const char *name)
+{
+ int i;
+
+ for (i = 0; i < sizeof(ranks) / sizeof(ranks[0]); i++) {
+ if (strcmp(ranks[i].name, name) == 0)
+ return ranks[i].rank;
+ }
+ return 255;
+}
+
+/* sorts first on the length, second on the name, and third on the idx, so that
+ * headers which appear with multiple occurrences are always met first.
+ */
+int cmp_idx(const void *l, const void *r)
+{
+ const struct idxhdr *a = l, *b = r;
+ int ranka, rankb;
+ int ret;
+
+ if (a->len < b->len)
+ return -1;
+ else if (a->len > b->len)
+ return 1;
+
+ ranka = get_hdr_rank(a->ptr);
+ rankb = get_hdr_rank(b->ptr);
+
+ if (ranka < rankb)
+ return -1;
+ else if (ranka > rankb)
+ return 1;
+
+ /* same rank, check for duplicates and use index */
+ ret = strcmp(a->ptr, b->ptr);
+ if (ret != 0)
+ return ret;
+
+ if (a->idx < b->idx)
+ return -1;
+ else if (a->idx > b->idx)
+ return 1;
+ else
+ return 0;
+}
+
+int main(int argc, char **argv)
+{
+ int pos;
+ int prev;
+ int len;
+ int i;
+
+ for (len = 0; len < 32; len++)
+ positions[len] = -1;
+
+ for (i = 0; i < HPACK_SHT_SIZE; i++) {
+ idxhdr[i].ptr = hpack_sht[i].n.ptr;
+ idxhdr[i].len = hpack_sht[i].n.len;
+ idxhdr[i].idx = i;
+ }
+
+ /* sorts all header names by length first, then by name, and finally by
+ * idx so that we meet smaller headers first, that within a length they
+ * appear in frequency order, and that multiple occurrences appear with
+ * the smallest index first.
+ */
+ qsort(&idxhdr[1], HPACK_SHT_SIZE - 1, sizeof(idxhdr[0]), cmp_idx);
+
+ pos = 0;
+ prev = -1;
+ for (i = 1; i < HPACK_SHT_SIZE; i++) {
+ len = idxhdr[i].len;
+ if (len > 31) {
+ //printf("skipping %s (len=%d)\n", idxhdr[i].ptr, idxhdr[i].len);
+ continue;
+ }
+
+ /* first occurrence of this length? */
+ if (positions[len] == -1)
+ positions[len] = pos;
+ else if (prev >= 0 &&
+ memcmp(&known_hdr[prev] + 2, idxhdr[i].ptr, len) == 0) {
+ /* duplicate header field */
+ continue;
+ }
+
+ /* store <len> <idx> <name> in the output array */
+
+ if (pos + 1 + len + 2 >= sizeof(known_hdr))
+ abort();
+
+ prev = pos;
+ known_hdr[pos++] = len;
+ known_hdr[pos++] = idxhdr[i].idx;
+ memcpy(&known_hdr[pos], idxhdr[i].ptr, len);
+ pos += len;
+ //printf("%d %d %s\n", len, idxhdr[i].idx, idxhdr[i].ptr);
+ }
+
+ if (pos + 1 >= sizeof(known_hdr))
+ abort();
+ known_hdr[pos++] = 0; // size zero ends the stream
+
+ printf("const char hpack_enc_stream[%d] = {\n", pos);
+ for (i = 0; i < pos; i++) {
+ if ((i & 7) == 0)
+ printf("\t /* % 4d: */", i);
+
+ printf(" 0x%02x,", known_hdr[i]);
+
+ if ((i & 7) == 7 || (i == pos - 1))
+ putchar('\n');
+ }
+ printf("};\n\n");
+
+ printf("const signed short hpack_pos_len[32] = {\n");
+ for (i = 0; i < 32; i++) {
+ if ((i & 7) == 0)
+ printf("\t /* % 4d: */", i);
+
+ printf(" % 4d,", positions[i]);
+
+ if ((i & 7) == 7 || (i == pos - 1))
+ putchar('\n');
+ }
+ printf("};\n\n");
+ return 0;
+}
diff --git a/dev/hpack/gen-rht.c b/dev/hpack/gen-rht.c
new file mode 100644
index 0000000..4260ffb
--- /dev/null
+++ b/dev/hpack/gen-rht.c
@@ -0,0 +1,369 @@
+/* Reverse Huffman table generator for HPACK decoder - 2017-05-19 Willy Tarreau
+ *
+ * rht_bit31_24[256] is indexed on bits 31..24 when < 0xfe
+ * rht_bit24_17[256] is indexed on bits 24..17 when 31..24 >= 0xfe
+ * rht_bit15_11_fe[32] is indexed on bits 15..11 when 24..17 == 0xfe
+ * rht_bit15_8[256] is indexed on bits 15..8 when 24..17 == 0xff
+ * rht_bit11_4[256] is indexed on bits 11..4 when 15..8 == 0xff
+ * when 11..4 == 0xff, 3..2 provide the following mapping :
+ * 00 => 0x0a, 01 => 0x0d, 10 => 0x16, 11 => EOS
+ */
+
+#include <inttypes.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+/* from RFC7541 Appendix B */
+static const struct huff {
+ uint32_t c; /* code point */
+ int b; /* bits */
+} ht[257] = {
+ [0] = { .c = 0x00001ff8, .b = 13 },
+ [1] = { .c = 0x007fffd8, .b = 23 },
+ [2] = { .c = 0x0fffffe2, .b = 28 },
+ [3] = { .c = 0x0fffffe3, .b = 28 },
+ [4] = { .c = 0x0fffffe4, .b = 28 },
+ [5] = { .c = 0x0fffffe5, .b = 28 },
+ [6] = { .c = 0x0fffffe6, .b = 28 },
+ [7] = { .c = 0x0fffffe7, .b = 28 },
+ [8] = { .c = 0x0fffffe8, .b = 28 },
+ [9] = { .c = 0x00ffffea, .b = 24 },
+ [10] = { .c = 0x3ffffffc, .b = 30 },
+ [11] = { .c = 0x0fffffe9, .b = 28 },
+ [12] = { .c = 0x0fffffea, .b = 28 },
+ [13] = { .c = 0x3ffffffd, .b = 30 },
+ [14] = { .c = 0x0fffffeb, .b = 28 },
+ [15] = { .c = 0x0fffffec, .b = 28 },
+ [16] = { .c = 0x0fffffed, .b = 28 },
+ [17] = { .c = 0x0fffffee, .b = 28 },
+ [18] = { .c = 0x0fffffef, .b = 28 },
+ [19] = { .c = 0x0ffffff0, .b = 28 },
+ [20] = { .c = 0x0ffffff1, .b = 28 },
+ [21] = { .c = 0x0ffffff2, .b = 28 },
+ [22] = { .c = 0x3ffffffe, .b = 30 },
+ [23] = { .c = 0x0ffffff3, .b = 28 },
+ [24] = { .c = 0x0ffffff4, .b = 28 },
+ [25] = { .c = 0x0ffffff5, .b = 28 },
+ [26] = { .c = 0x0ffffff6, .b = 28 },
+ [27] = { .c = 0x0ffffff7, .b = 28 },
+ [28] = { .c = 0x0ffffff8, .b = 28 },
+ [29] = { .c = 0x0ffffff9, .b = 28 },
+ [30] = { .c = 0x0ffffffa, .b = 28 },
+ [31] = { .c = 0x0ffffffb, .b = 28 },
+ [32] = { .c = 0x00000014, .b = 6 },
+ [33] = { .c = 0x000003f8, .b = 10 },
+ [34] = { .c = 0x000003f9, .b = 10 },
+ [35] = { .c = 0x00000ffa, .b = 12 },
+ [36] = { .c = 0x00001ff9, .b = 13 },
+ [37] = { .c = 0x00000015, .b = 6 },
+ [38] = { .c = 0x000000f8, .b = 8 },
+ [39] = { .c = 0x000007fa, .b = 11 },
+ [40] = { .c = 0x000003fa, .b = 10 },
+ [41] = { .c = 0x000003fb, .b = 10 },
+ [42] = { .c = 0x000000f9, .b = 8 },
+ [43] = { .c = 0x000007fb, .b = 11 },
+ [44] = { .c = 0x000000fa, .b = 8 },
+ [45] = { .c = 0x00000016, .b = 6 },
+ [46] = { .c = 0x00000017, .b = 6 },
+ [47] = { .c = 0x00000018, .b = 6 },
+ [48] = { .c = 0x00000000, .b = 5 },
+ [49] = { .c = 0x00000001, .b = 5 },
+ [50] = { .c = 0x00000002, .b = 5 },
+ [51] = { .c = 0x00000019, .b = 6 },
+ [52] = { .c = 0x0000001a, .b = 6 },
+ [53] = { .c = 0x0000001b, .b = 6 },
+ [54] = { .c = 0x0000001c, .b = 6 },
+ [55] = { .c = 0x0000001d, .b = 6 },
+ [56] = { .c = 0x0000001e, .b = 6 },
+ [57] = { .c = 0x0000001f, .b = 6 },
+ [58] = { .c = 0x0000005c, .b = 7 },
+ [59] = { .c = 0x000000fb, .b = 8 },
+ [60] = { .c = 0x00007ffc, .b = 15 },
+ [61] = { .c = 0x00000020, .b = 6 },
+ [62] = { .c = 0x00000ffb, .b = 12 },
+ [63] = { .c = 0x000003fc, .b = 10 },
+ [64] = { .c = 0x00001ffa, .b = 13 },
+ [65] = { .c = 0x00000021, .b = 6 },
+ [66] = { .c = 0x0000005d, .b = 7 },
+ [67] = { .c = 0x0000005e, .b = 7 },
+ [68] = { .c = 0x0000005f, .b = 7 },
+ [69] = { .c = 0x00000060, .b = 7 },
+ [70] = { .c = 0x00000061, .b = 7 },
+ [71] = { .c = 0x00000062, .b = 7 },
+ [72] = { .c = 0x00000063, .b = 7 },
+ [73] = { .c = 0x00000064, .b = 7 },
+ [74] = { .c = 0x00000065, .b = 7 },
+ [75] = { .c = 0x00000066, .b = 7 },
+ [76] = { .c = 0x00000067, .b = 7 },
+ [77] = { .c = 0x00000068, .b = 7 },
+ [78] = { .c = 0x00000069, .b = 7 },
+ [79] = { .c = 0x0000006a, .b = 7 },
+ [80] = { .c = 0x0000006b, .b = 7 },
+ [81] = { .c = 0x0000006c, .b = 7 },
+ [82] = { .c = 0x0000006d, .b = 7 },
+ [83] = { .c = 0x0000006e, .b = 7 },
+ [84] = { .c = 0x0000006f, .b = 7 },
+ [85] = { .c = 0x00000070, .b = 7 },
+ [86] = { .c = 0x00000071, .b = 7 },
+ [87] = { .c = 0x00000072, .b = 7 },
+ [88] = { .c = 0x000000fc, .b = 8 },
+ [89] = { .c = 0x00000073, .b = 7 },
+ [90] = { .c = 0x000000fd, .b = 8 },
+ [91] = { .c = 0x00001ffb, .b = 13 },
+ [92] = { .c = 0x0007fff0, .b = 19 },
+ [93] = { .c = 0x00001ffc, .b = 13 },
+ [94] = { .c = 0x00003ffc, .b = 14 },
+ [95] = { .c = 0x00000022, .b = 6 },
+ [96] = { .c = 0x00007ffd, .b = 15 },
+ [97] = { .c = 0x00000003, .b = 5 },
+ [98] = { .c = 0x00000023, .b = 6 },
+ [99] = { .c = 0x00000004, .b = 5 },
+ [100] = { .c = 0x00000024, .b = 6 },
+ [101] = { .c = 0x00000005, .b = 5 },
+ [102] = { .c = 0x00000025, .b = 6 },
+ [103] = { .c = 0x00000026, .b = 6 },
+ [104] = { .c = 0x00000027, .b = 6 },
+ [105] = { .c = 0x00000006, .b = 5 },
+ [106] = { .c = 0x00000074, .b = 7 },
+ [107] = { .c = 0x00000075, .b = 7 },
+ [108] = { .c = 0x00000028, .b = 6 },
+ [109] = { .c = 0x00000029, .b = 6 },
+ [110] = { .c = 0x0000002a, .b = 6 },
+ [111] = { .c = 0x00000007, .b = 5 },
+ [112] = { .c = 0x0000002b, .b = 6 },
+ [113] = { .c = 0x00000076, .b = 7 },
+ [114] = { .c = 0x0000002c, .b = 6 },
+ [115] = { .c = 0x00000008, .b = 5 },
+ [116] = { .c = 0x00000009, .b = 5 },
+ [117] = { .c = 0x0000002d, .b = 6 },
+ [118] = { .c = 0x00000077, .b = 7 },
+ [119] = { .c = 0x00000078, .b = 7 },
+ [120] = { .c = 0x00000079, .b = 7 },
+ [121] = { .c = 0x0000007a, .b = 7 },
+ [122] = { .c = 0x0000007b, .b = 7 },
+ [123] = { .c = 0x00007ffe, .b = 15 },
+ [124] = { .c = 0x000007fc, .b = 11 },
+ [125] = { .c = 0x00003ffd, .b = 14 },
+ [126] = { .c = 0x00001ffd, .b = 13 },
+ [127] = { .c = 0x0ffffffc, .b = 28 },
+ [128] = { .c = 0x000fffe6, .b = 20 },
+ [129] = { .c = 0x003fffd2, .b = 22 },
+ [130] = { .c = 0x000fffe7, .b = 20 },
+ [131] = { .c = 0x000fffe8, .b = 20 },
+ [132] = { .c = 0x003fffd3, .b = 22 },
+ [133] = { .c = 0x003fffd4, .b = 22 },
+ [134] = { .c = 0x003fffd5, .b = 22 },
+ [135] = { .c = 0x007fffd9, .b = 23 },
+ [136] = { .c = 0x003fffd6, .b = 22 },
+ [137] = { .c = 0x007fffda, .b = 23 },
+ [138] = { .c = 0x007fffdb, .b = 23 },
+ [139] = { .c = 0x007fffdc, .b = 23 },
+ [140] = { .c = 0x007fffdd, .b = 23 },
+ [141] = { .c = 0x007fffde, .b = 23 },
+ [142] = { .c = 0x00ffffeb, .b = 24 },
+ [143] = { .c = 0x007fffdf, .b = 23 },
+ [144] = { .c = 0x00ffffec, .b = 24 },
+ [145] = { .c = 0x00ffffed, .b = 24 },
+ [146] = { .c = 0x003fffd7, .b = 22 },
+ [147] = { .c = 0x007fffe0, .b = 23 },
+ [148] = { .c = 0x00ffffee, .b = 24 },
+ [149] = { .c = 0x007fffe1, .b = 23 },
+ [150] = { .c = 0x007fffe2, .b = 23 },
+ [151] = { .c = 0x007fffe3, .b = 23 },
+ [152] = { .c = 0x007fffe4, .b = 23 },
+ [153] = { .c = 0x001fffdc, .b = 21 },
+ [154] = { .c = 0x003fffd8, .b = 22 },
+ [155] = { .c = 0x007fffe5, .b = 23 },
+ [156] = { .c = 0x003fffd9, .b = 22 },
+ [157] = { .c = 0x007fffe6, .b = 23 },
+ [158] = { .c = 0x007fffe7, .b = 23 },
+ [159] = { .c = 0x00ffffef, .b = 24 },
+ [160] = { .c = 0x003fffda, .b = 22 },
+ [161] = { .c = 0x001fffdd, .b = 21 },
+ [162] = { .c = 0x000fffe9, .b = 20 },
+ [163] = { .c = 0x003fffdb, .b = 22 },
+ [164] = { .c = 0x003fffdc, .b = 22 },
+ [165] = { .c = 0x007fffe8, .b = 23 },
+ [166] = { .c = 0x007fffe9, .b = 23 },
+ [167] = { .c = 0x001fffde, .b = 21 },
+ [168] = { .c = 0x007fffea, .b = 23 },
+ [169] = { .c = 0x003fffdd, .b = 22 },
+ [170] = { .c = 0x003fffde, .b = 22 },
+ [171] = { .c = 0x00fffff0, .b = 24 },
+ [172] = { .c = 0x001fffdf, .b = 21 },
+ [173] = { .c = 0x003fffdf, .b = 22 },
+ [174] = { .c = 0x007fffeb, .b = 23 },
+ [175] = { .c = 0x007fffec, .b = 23 },
+ [176] = { .c = 0x001fffe0, .b = 21 },
+ [177] = { .c = 0x001fffe1, .b = 21 },
+ [178] = { .c = 0x003fffe0, .b = 22 },
+ [179] = { .c = 0x001fffe2, .b = 21 },
+ [180] = { .c = 0x007fffed, .b = 23 },
+ [181] = { .c = 0x003fffe1, .b = 22 },
+ [182] = { .c = 0x007fffee, .b = 23 },
+ [183] = { .c = 0x007fffef, .b = 23 },
+ [184] = { .c = 0x000fffea, .b = 20 },
+ [185] = { .c = 0x003fffe2, .b = 22 },
+ [186] = { .c = 0x003fffe3, .b = 22 },
+ [187] = { .c = 0x003fffe4, .b = 22 },
+ [188] = { .c = 0x007ffff0, .b = 23 },
+ [189] = { .c = 0x003fffe5, .b = 22 },
+ [190] = { .c = 0x003fffe6, .b = 22 },
+ [191] = { .c = 0x007ffff1, .b = 23 },
+ [192] = { .c = 0x03ffffe0, .b = 26 },
+ [193] = { .c = 0x03ffffe1, .b = 26 },
+ [194] = { .c = 0x000fffeb, .b = 20 },
+ [195] = { .c = 0x0007fff1, .b = 19 },
+ [196] = { .c = 0x003fffe7, .b = 22 },
+ [197] = { .c = 0x007ffff2, .b = 23 },
+ [198] = { .c = 0x003fffe8, .b = 22 },
+ [199] = { .c = 0x01ffffec, .b = 25 },
+ [200] = { .c = 0x03ffffe2, .b = 26 },
+ [201] = { .c = 0x03ffffe3, .b = 26 },
+ [202] = { .c = 0x03ffffe4, .b = 26 },
+ [203] = { .c = 0x07ffffde, .b = 27 },
+ [204] = { .c = 0x07ffffdf, .b = 27 },
+ [205] = { .c = 0x03ffffe5, .b = 26 },
+ [206] = { .c = 0x00fffff1, .b = 24 },
+ [207] = { .c = 0x01ffffed, .b = 25 },
+ [208] = { .c = 0x0007fff2, .b = 19 },
+ [209] = { .c = 0x001fffe3, .b = 21 },
+ [210] = { .c = 0x03ffffe6, .b = 26 },
+ [211] = { .c = 0x07ffffe0, .b = 27 },
+ [212] = { .c = 0x07ffffe1, .b = 27 },
+ [213] = { .c = 0x03ffffe7, .b = 26 },
+ [214] = { .c = 0x07ffffe2, .b = 27 },
+ [215] = { .c = 0x00fffff2, .b = 24 },
+ [216] = { .c = 0x001fffe4, .b = 21 },
+ [217] = { .c = 0x001fffe5, .b = 21 },
+ [218] = { .c = 0x03ffffe8, .b = 26 },
+ [219] = { .c = 0x03ffffe9, .b = 26 },
+ [220] = { .c = 0x0ffffffd, .b = 28 },
+ [221] = { .c = 0x07ffffe3, .b = 27 },
+ [222] = { .c = 0x07ffffe4, .b = 27 },
+ [223] = { .c = 0x07ffffe5, .b = 27 },
+ [224] = { .c = 0x000fffec, .b = 20 },
+ [225] = { .c = 0x00fffff3, .b = 24 },
+ [226] = { .c = 0x000fffed, .b = 20 },
+ [227] = { .c = 0x001fffe6, .b = 21 },
+ [228] = { .c = 0x003fffe9, .b = 22 },
+ [229] = { .c = 0x001fffe7, .b = 21 },
+ [230] = { .c = 0x001fffe8, .b = 21 },
+ [231] = { .c = 0x007ffff3, .b = 23 },
+ [232] = { .c = 0x003fffea, .b = 22 },
+ [233] = { .c = 0x003fffeb, .b = 22 },
+ [234] = { .c = 0x01ffffee, .b = 25 },
+ [235] = { .c = 0x01ffffef, .b = 25 },
+ [236] = { .c = 0x00fffff4, .b = 24 },
+ [237] = { .c = 0x00fffff5, .b = 24 },
+ [238] = { .c = 0x03ffffea, .b = 26 },
+ [239] = { .c = 0x007ffff4, .b = 23 },
+ [240] = { .c = 0x03ffffeb, .b = 26 },
+ [241] = { .c = 0x07ffffe6, .b = 27 },
+ [242] = { .c = 0x03ffffec, .b = 26 },
+ [243] = { .c = 0x03ffffed, .b = 26 },
+ [244] = { .c = 0x07ffffe7, .b = 27 },
+ [245] = { .c = 0x07ffffe8, .b = 27 },
+ [246] = { .c = 0x07ffffe9, .b = 27 },
+ [247] = { .c = 0x07ffffea, .b = 27 },
+ [248] = { .c = 0x07ffffeb, .b = 27 },
+ [249] = { .c = 0x0ffffffe, .b = 28 },
+ [250] = { .c = 0x07ffffec, .b = 27 },
+ [251] = { .c = 0x07ffffed, .b = 27 },
+ [252] = { .c = 0x07ffffee, .b = 27 },
+ [253] = { .c = 0x07ffffef, .b = 27 },
+ [254] = { .c = 0x07fffff0, .b = 27 },
+ [255] = { .c = 0x03ffffee, .b = 26 },
+ [256] = { .c = 0x3fffffff, .b = 30 }, /* EOS */
+};
+
+
+int main(int argc, char **argv)
+{
+ uint32_t c, i, j;
+
+ /* fill first byte */
+ printf("struct rht rht_bit31_24[256] = {\n");
+ for (j = 0; j < 256; j++) {
+ for (i = 0; i < sizeof(ht)/sizeof(ht[0]); i++) {
+ if (ht[i].b > 8)
+ continue;
+ c = ht[i].c << (32 - ht[i].b);
+
+ if (((c ^ (j << 24)) & -(1 << (32 - ht[i].b)) & 0xff000000) == 0) {
+ printf("\t[0x%02x] = { .c = 0x%02x, .l = %d },\n", j, i, ht[i].b);
+ break;
+ }
+ }
+ }
+ printf("};\n\n");
+
+ printf("struct rht rht_bit24_17[256] = {\n");
+ for (j = 0; j < 256; j++) {
+ for (i = 0; i < sizeof(ht)/sizeof(ht[0]); i++) {
+ if (ht[i].b <= 8 || ht[i].b > 16)
+ continue;
+ c = ht[i].c << (32 - ht[i].b);
+
+ if (((c ^ (j << 17)) & -(1 << (32 - ht[i].b)) & 0x01fe0000) == 0) {
+ printf("\t[0x%02x] = { .c = 0x%02x, .l = %d },\n", j, i, ht[i].b);
+ break;
+ }
+ }
+ }
+ printf("};\n\n");
+
+ printf("struct rht rht_bit15_11_fe[32] = {\n");
+ for (j = 0; j < 32; j++) {
+ for (i = 0; i < sizeof(ht)/sizeof(ht[0]); i++) {
+ if (ht[i].b <= 16 || ht[i].b > 21)
+ continue;
+ c = ht[i].c << (32 - ht[i].b);
+ if ((c & 0x00ff0000) != 0x00fe0000)
+ continue;
+
+ if (((c ^ (j << 11)) & -(1 << (32 - ht[i].b)) & 0x0000f800) == 0) {
+ printf("\t[0x%02x] = { .c = 0x%02x, .l = %d },\n", j, i, ht[i].b);
+ break;
+ }
+ }
+ }
+ printf("};\n\n");
+
+ printf("struct rht rht_bit15_8[256] = {\n");
+ for (j = 0; j < 256; j++) {
+ for (i = 0; i < sizeof(ht)/sizeof(ht[0]); i++) {
+ if (ht[i].b <= 16 || ht[i].b > 24)
+ continue;
+ c = ht[i].c << (32 - ht[i].b);
+ if ((c & 0x00ff0000) != 0x00ff0000)
+ continue;
+
+ if (((c ^ (j << 8)) & -(1 << (32 - ht[i].b)) & 0x0000ff00) == 0) {
+ printf("\t[0x%02x] = { .c = 0x%02x, .l = %d },\n", j, i, ht[i].b);
+ break;
+ }
+ }
+ }
+ printf("};\n\n");
+
+ printf("struct rht rht_bit11_4[256] = {\n");
+ /* fill fourth byte after 0xff 0xff 0xf6-0xff. Only 0xfffffffx are not distinguished */
+ for (j = 0; j < 256; j++) {
+ for (i = 0; i < sizeof(ht)/sizeof(ht[0]); i++) {
+ if (ht[i].b <= 24)
+ continue;
+ c = ht[i].c << (32 - ht[i].b);
+
+ if (((c ^ (j << 4)) & -(1 << (32 - ht[i].b)) & 0x00000ff0) == 0) {
+ //printf("\tj=%02x i=%02x c=%08x l=%d c/l=%08x j/l=%08x xor=%08x\n", j, i, c, ht[i].b, c & -(1 << (32 - ht[i].b)), ((j << 4) & -(1 << (32 - ht[i].b))), (c ^ (j << 4)) & -(1 << (32 - ht[i].b)));
+ printf("\t[0x%02x] = { .c = 0x%02x, .l = %d },\n", j, i, ht[i].b);
+ break;
+ }
+ }
+ }
+ printf("\t/* Note, when l==30, bits 3..2 give 00:0x0a, 01:0x0d, 10:0x16, 11:EOS */\n");
+ printf("};\n\n");
+ return 0;
+}
diff --git a/dev/plug_qdisc/README b/dev/plug_qdisc/README
new file mode 100644
index 0000000..ccc9bd0
--- /dev/null
+++ b/dev/plug_qdisc/README
@@ -0,0 +1,59 @@
+ ** Plug queueing disciplines **
+
+ The 'plug' qdisc type is not documented. It is even not supported
+ by traffic shaping tools like 'tc' from iproute2 package.
+
+ Such qdiscs have already been used by Yelp engineers but outside
+ of haproxy with libnl-utils tools (especially nl-qdisc-* tools)
+ to implement a workaround and make haproxy reloads work.
+
+ Indeed with such plug qdiscs coupled with iptables configurations
+ we are able to temporarily bufferize IP packets and to release them as
+ needed. So, they may be very useful to "synchronize" TCP sessions
+ or at higher level to put network applications in states approaching
+ the ones suspected to occur during bugs. Furthermore to be sure
+ to produce a correct bug fix, it may be useful to reproduce
+ as mush as needed such painful bugs. This is where plug qdiscs
+ may be useful.
+
+ To have an idea about how to use plug qdisc on the command line I highly recommend to
+ read Willy Tarreau blog here:
+
+ https://www.haproxy.com/blog/truly-seamless-reloads-with-haproxy-no-more-hacks/
+
+ which refers to this other one from Yelp:
+
+ https://engineeringblog.yelp.com/2015/04/true-zero-downtime-haproxy-reloads.html
+
+ The code found in plug_qdisc.c file already helped in fixing a painful bug hard to
+ fix because hard to reproduce. To use the API it exports this is quite easy:
+
+ - First your program must call plug_disc_attach() to create if not already created
+ a plug qdisc and use it (must be done during your application own already existing
+ initializations).
+ Note that this function calls plug_qdisc_release_indefinite_buffer() so that to
+ release already buffered packets before you start your application,
+
+ - then call plug_qdisc_plug_buffer() to start buffering packets incoming to your
+ plug qdisc. So they won't be delivered to your application,
+
+ - then call plug_qdisc_release_indefinite_buffer() to stop buffering the packets
+ incoming to your plug qdisc and release those already buffered.
+ So, that to be deliver them to your application.
+
+ This code is short and simple. But uses several libraries especially libnl-route module
+ part of libnl library. To compile haproxy and make it use the plug_qdisc.c code we had
+ to link it against several libnl3 library modules like that:
+
+ -lnl-genl-3 -lnl-route-3 -lnl-3 -lnl-cli-3
+
+
+ - Some references:
+ Libnl API documentation may be found here:
+ https://www.infradead.org/~tgr/libnl/doc/api/index.html
+
+ Kernel sources:
+ http://elixir.free-electrons.com/linux/latest/source/net/sched/sch_plug.c
+
+ Nice website about traffic shaping with queuing disciplines:
+ http://wiki.linuxwall.info/doku.php/en:ressources:dossiers:networking:traffic_control
diff --git a/dev/plug_qdisc/plug_qdisc.c b/dev/plug_qdisc/plug_qdisc.c
new file mode 100644
index 0000000..bc47f5d
--- /dev/null
+++ b/dev/plug_qdisc/plug_qdisc.c
@@ -0,0 +1,86 @@
+#include <inttypes.h>
+#include <netlink/cache.h>
+#include <netlink/cli/utils.h>
+#include <netlink/cli/tc.h>
+#include <netlink/cli/qdisc.h>
+#include <netlink/cli/link.h>
+#include <netlink/route/qdisc/plug.h>
+
+/*
+ * XXX Please, first note that this code is not safe. XXX
+ * It was developed fast so that to reproduce a bug.
+ * You will certainly have to adapt it to your application.
+ * But at least it gives an idea about how to programmatically use plug
+ * queueing disciplines.
+ */
+
+static struct nl_sock *nl_sock;
+static struct nl_cache *link_cache;
+static struct rtnl_qdisc *qdisc;
+static struct rtnl_tc *tc;
+
+static int qdisc_init(void)
+{
+ nl_sock = nl_cli_alloc_socket();
+ nl_cli_connect(nl_sock, NETLINK_ROUTE);
+ link_cache = nl_cli_link_alloc_cache(nl_sock);
+ qdisc = nl_cli_qdisc_alloc();
+ tc = (struct rtnl_tc *)qdisc;
+
+ return 0;
+}
+
+/* Stop buffering and release all buffered and incoming 'qdisc'
+ * queueing discipline traffic.
+ */
+int plug_qdisc_release_indefinite_buffer(void)
+{
+ rtnl_qdisc_plug_release_indefinite(qdisc);
+ return rtnl_qdisc_add(nl_sock, qdisc, 0);
+}
+
+/* Start buffering incoming 'qdisc' queueing discipline traffic. */
+int plug_qdisc_plug_buffer(void)
+{
+ rtnl_qdisc_plug_buffer(qdisc);
+ return rtnl_qdisc_add(nl_sock, qdisc, 0);
+}
+
+/* Create a plug qdisc attached to 'device' network device with 'parent'
+ * as parent, with 'id' as ID and 'limit' as buffer size.
+ * This is equivalent to use nl-qdisc-add tool like that:
+ * $ nl-qdisc-add --dev=<device> --parent=<parent> --id=<id> plug --limit <limit>
+ * $ nl-qdisc-add --dev=<device> --parent=<parent> --id=<id> --update plug --release-indefinite
+ */
+int plug_qdisc_attach(char *device, char *parent, char *id, uint32_t limit)
+{
+ int ret;
+
+ if (!tc && qdisc_init() == -1)
+ return -1;
+
+ nl_cli_tc_parse_dev(tc, link_cache, device);
+ nl_cli_tc_parse_parent(tc, parent);
+ if (!rtnl_tc_get_ifindex(tc))
+ return -1;
+
+ if (!rtnl_tc_get_parent(tc))
+ return -1;
+ if (id)
+ nl_cli_tc_parse_handle(tc, id, 1);
+
+ rtnl_tc_set_kind(tc, "plug");
+ if (limit)
+ rtnl_qdisc_plug_set_limit(qdisc, limit);
+
+ ret = rtnl_qdisc_add(nl_sock, qdisc, NLM_F_CREATE);
+ if (ret < 0) {
+ fprintf(stderr, "Could add attach qdisc: %s\n", nl_geterror(ret));
+ return -1;
+ }
+ /* Release buffer. */
+ plug_qdisc_release_indefinite_buffer();
+
+ return 0;
+}
+
diff --git a/dev/poll/Makefile b/dev/poll/Makefile
new file mode 100644
index 0000000..0247099
--- /dev/null
+++ b/dev/poll/Makefile
@@ -0,0 +1,13 @@
+include ../../include/make/verbose.mk
+
+CC = cc
+OPTIMIZE = -O2 -g
+DEFINE =
+INCLUDE =
+OBJS = poll
+
+poll: poll.c
+ $(cmd_CC) $(OPTIMIZE) $(DEFINE) $(INCLUDE) -o $@ $^
+
+clean:
+ rm -f $(OBJS) *.[oas] *~
diff --git a/dev/poll/poll.c b/dev/poll/poll.c
new file mode 100644
index 0000000..022c039
--- /dev/null
+++ b/dev/poll/poll.c
@@ -0,0 +1,445 @@
+#define _GNU_SOURCE // for POLLRDHUP
+#include <sys/socket.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+
+#ifdef __linux__
+#include <sys/epoll.h>
+#endif
+
+#include <netinet/in.h>
+#include <netinet/tcp.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <poll.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+/* for OSes which don't have it */
+#ifndef POLLRDHUP
+#define POLLRDHUP 0
+#endif
+
+#ifndef MSG_NOSIGNAL
+#define MSG_NOSIGNAL 0
+#endif
+#ifndef MSG_MORE
+#define MSG_MORE 0
+#endif
+
+int verbose = 0;
+int cmd = 0;
+int cmdstep = 0;
+int zero = 0;
+int one = 1;
+int lfd = -1;
+int cfd = -1;
+int sfd = -1;
+int connected = 0;
+int use_epoll = 0;
+struct sockaddr_in saddr, caddr;
+socklen_t salen, calen;
+
+static inline const char *side(int fd)
+{
+ if (fd == lfd)
+ return "l";
+ if (fd == sfd)
+ return "s";
+ if (fd == cfd)
+ return "c";
+ return "?";
+}
+
+void usage(const char *arg0)
+{
+ printf("Usage: %s [ arg [<action>[,...]] ] ...\n"
+ "args:\n"
+ " -h display this help\n"
+ " -v verbose mode (shows ret values)\n"
+ " -e use epoll instead of poll\n"
+ " -c <actions> perform <action> on client side socket\n"
+ " -s <actions> perform <action> on server side socket\n"
+ " -l <actions> perform <action> on listening socket\n"
+ "\n"
+ "actions for -c/-s/-l (multiple may be delimited by commas) :\n"
+ " con connect to listener, implicit before first -c/-s\n"
+ " acc accept on listener, implicit before first -s\n"
+ " snd send a few bytes of data\n"
+ " mor send a few bytes of data with MSG_MORE\n"
+ " rcv receive a few bytes of data\n"
+ " drn drain: receive till zero\n"
+ " shr SHUT_RD : shutdown read side\n"
+ " shw SHUT_WR : shutdown write side\n"
+ " shb SHUT_RDWR : shutdown both sides\n"
+ " lin disable lingering on the socket\n"
+ " clo close the file descriptor\n"
+ " pol poll() for any event\n"
+ "\n", arg0);
+}
+
+void die(const char *msg)
+{
+ if (msg)
+ fprintf(stderr, "%s\n", msg);
+ exit(1);
+}
+
+const char *get_errno(int ret)
+{
+ static char errmsg[100];
+
+ if (ret >= 0)
+ return "";
+
+ snprintf(errmsg, sizeof(errmsg), " (%s)", strerror(errno));
+ return errmsg;
+}
+
+void do_acc(int fd)
+{
+ int ret;
+
+ calen = sizeof(caddr);
+ ret = accept(lfd, (struct sockaddr*)&caddr, &calen);
+ if (sfd < 0)
+ sfd = ret;
+ if (verbose)
+ printf("cmd #%d stp #%d: %s(%s=%d): ret=%d%s\n", cmd, cmdstep, __FUNCTION__ + 3, side(fd), fd, ret, get_errno(ret));
+}
+
+void do_con(int fd)
+{
+ int ret;
+
+ ret = connect(cfd, (const struct sockaddr*)&saddr, salen);
+ if (verbose)
+ printf("cmd #%d stp #%d: %s(%s=%d): ret=%d%s\n", cmd, cmdstep, __FUNCTION__ + 3, side(fd), fd, ret, get_errno(ret));
+ connected = 1;
+}
+
+void do_snd(int fd)
+{
+ int ret;
+
+ ret = send(fd, "foo", 3, MSG_NOSIGNAL|MSG_DONTWAIT);
+ if (verbose)
+ printf("cmd #%d stp #%d: %s(%s=%d): ret=%d%s\n", cmd, cmdstep, __FUNCTION__ + 3, side(fd), fd, ret, get_errno(ret));
+}
+
+void do_mor(int fd)
+{
+ int ret;
+
+ ret = send(fd, "foo", 3, MSG_NOSIGNAL|MSG_DONTWAIT|MSG_MORE);
+ if (verbose)
+ printf("cmd #%d stp #%d: %s(%s=%d): ret=%d%s\n", cmd, cmdstep, __FUNCTION__ + 3, side(fd), fd, ret, get_errno(ret));
+}
+
+void do_rcv(int fd)
+{
+ char buf[10];
+ int ret;
+
+ ret = recv(fd, buf, sizeof(buf), MSG_DONTWAIT);
+ if (verbose)
+ printf("cmd #%d stp #%d: %s(%s=%d): ret=%d%s\n", cmd, cmdstep, __FUNCTION__ + 3, side(fd), fd, ret, get_errno(ret));
+}
+
+void do_drn(int fd)
+{
+ char buf[16384];
+ int total = -1;
+ int ret;
+
+ while (1) {
+ ret = recv(fd, buf, sizeof(buf), 0);
+ if (ret <= 0)
+ break;
+ if (total < 0)
+ total = 0;
+ total += ret;
+ }
+
+ if (verbose)
+ printf("cmd #%d stp #%d: %s(%s=%d): ret=%d%s\n", cmd, cmdstep, __FUNCTION__ + 3, side(fd), fd, total, get_errno(ret));
+}
+
+void do_shr(int fd)
+{
+ int ret;
+
+ ret = shutdown(fd, SHUT_RD);
+ if (verbose)
+ printf("cmd #%d stp #%d: %s(%s=%d): ret=%d%s\n", cmd, cmdstep, __FUNCTION__ + 3, side(fd), fd, ret, get_errno(ret));
+}
+
+void do_shw(int fd)
+{
+ int ret;
+
+ ret = shutdown(fd, SHUT_WR);
+ if (verbose)
+ printf("cmd #%d stp #%d: %s(%s=%d): ret=%d%s\n", cmd, cmdstep, __FUNCTION__ + 3, side(fd), fd, ret, get_errno(ret));
+}
+
+void do_shb(int fd)
+{
+ int ret;
+
+ ret = shutdown(fd, SHUT_RDWR);
+ if (verbose)
+ printf("cmd #%d stp #%d: %s(%s=%d): ret=%d%s\n", cmd, cmdstep, __FUNCTION__ + 3, side(fd), fd, ret, get_errno(ret));
+}
+
+void do_lin(int fd)
+{
+ struct linger nolinger = { .l_onoff = 1, .l_linger = 0 };
+ int ret;
+
+ ret = setsockopt(fd, SOL_SOCKET, SO_LINGER, &nolinger, sizeof(nolinger));
+ if (verbose)
+ printf("cmd #%d stp #%d: %s(%s=%d): ret=%d%s\n", cmd, cmdstep, __FUNCTION__ + 3, side(fd), fd, ret, get_errno(ret));
+}
+
+void do_clo(int fd)
+{
+ int ret;
+
+ ret = close(fd);
+ if (verbose)
+ printf("cmd #%d stp #%d: %s(%s=%d): ret=%d%s\n", cmd, cmdstep, __FUNCTION__ + 3, side(fd), fd, ret, get_errno(ret));
+}
+
+void do_pol(int fd)
+{
+ struct pollfd fds = { .fd = fd, .events = POLLIN|POLLOUT|POLLRDHUP, .revents=0 };
+ int flags, flag;
+ int ret;
+
+#ifdef __linux__
+ while (use_epoll) {
+ struct epoll_event evt;
+ static int epoll_fd = -1;
+
+ if (epoll_fd == -1)
+ epoll_fd = epoll_create(1024);
+ if (epoll_fd == -1)
+ break;
+ evt.events = EPOLLIN | EPOLLOUT | EPOLLRDHUP;
+ evt.data.fd = fd;
+ epoll_ctl(epoll_fd, EPOLL_CTL_ADD, fd, &evt);
+ ret = epoll_wait(epoll_fd, &evt, 1, 0);
+
+ if (verbose) {
+ printf("cmd #%d stp #%d: %s(%s=%d): ret=%d%s ev=%#x ", cmd, cmdstep, __FUNCTION__ + 3, side(fd), fd, ret, get_errno(ret), ret > 0 ? evt.events : 0);
+ if (ret > 0 && evt.events) {
+ putchar('(');
+
+ for (flags = evt.events; flags; flags ^= flag) {
+ flag = flags ^ (flags & (flags - 1)); // keep lowest bit only
+ switch (flag) {
+ case EPOLLIN: printf("IN"); break;
+ case EPOLLOUT: printf("OUT"); break;
+ case EPOLLPRI: printf("PRI"); break;
+ case EPOLLHUP: printf("HUP"); break;
+ case EPOLLERR: printf("ERR"); break;
+ case EPOLLRDHUP: printf("RDHUP"); break;
+ default: printf("???[%#x]", flag); break;
+ }
+ if (flags ^ flag)
+ putchar(' ');
+ }
+ putchar(')');
+ }
+ putchar('\n');
+ }
+
+ evt.data.fd = fd;
+ epoll_ctl(epoll_fd, EPOLL_CTL_DEL, fd, &evt);
+ return;
+ }
+#endif
+ ret = poll(&fds, 1, 0);
+ if (verbose) {
+ printf("cmd #%d stp #%d: %s(%s=%d): ret=%d%s ev=%#x ", cmd, cmdstep, __FUNCTION__ + 3, side(fd), fd, ret, get_errno(ret), ret > 0 ? fds.revents : 0);
+ if (ret > 0 && fds.revents) {
+ putchar('(');
+
+ for (flags = fds.revents; flags; flags ^= flag) {
+ flag = flags ^ (flags & (flags - 1)); // keep lowest bit only
+ switch (flag) {
+ case POLLIN: printf("IN"); break;
+ case POLLOUT: printf("OUT"); break;
+ case POLLPRI: printf("PRI"); break;
+ case POLLHUP: printf("HUP"); break;
+ case POLLERR: printf("ERR"); break;
+ case POLLNVAL: printf("NVAL"); break;
+#if POLLRDHUP
+ case POLLRDHUP: printf("RDHUP"); break;
+#endif
+ default: printf("???[%#x]", flag); break;
+ }
+ if (flags ^ flag)
+ putchar(' ');
+ }
+ putchar(')');
+ }
+ putchar('\n');
+ }
+}
+
+int main(int argc, char **argv)
+{
+ const char *arg0;
+ char *word, *next;
+ int fd;
+
+ /* listener */
+ lfd = socket(PF_INET, SOCK_STREAM, IPPROTO_TCP);
+ if (lfd < 0)
+ die("socket(l)");
+
+ setsockopt(lfd, SOL_SOCKET, SO_REUSEADDR, &one, sizeof(one));
+
+ memset(&saddr, 0, sizeof(saddr));
+ saddr.sin_family = AF_INET;
+ saddr.sin_port = htons(0);
+ salen = sizeof(saddr);
+
+ if (bind(lfd, (struct sockaddr *)&saddr, salen) < 0)
+ die("bind()");
+
+ if (listen(lfd, 1000) < 0)
+ die("listen()");
+
+ if (getsockname(lfd, (struct sockaddr *)&saddr, &salen) < 0)
+ die("getsockname()");
+
+
+ /* client */
+ cfd = socket(PF_INET, SOCK_STREAM, IPPROTO_TCP);
+ if (cfd < 0)
+ die("socket(c)");
+
+ arg0 = argv[0];
+ if (argc < 2) {
+ usage(arg0);
+ exit(1);
+ }
+
+ write(1, "#### BEGIN ####\n", 16); // add a visible delimiter in the traces
+
+ while (argc > 1) {
+ argc--; argv++;
+ if (**argv != '-') {
+ usage(arg0);
+ exit(1);
+ }
+
+ fd = -1;
+ switch (argv[0][1]) {
+ case 'h' :
+ usage(arg0);
+ exit(0);
+ break;
+ case 'v' :
+ verbose++;
+ break;
+ case 'e' :
+ use_epoll = 1;
+ break;
+ case 'c' :
+ cmd++; cmdstep = 0;
+ if (!connected) {
+ do_con(cfd);
+ /* connection is pending in accept queue, accept() will either be
+ * explicit with "-l acc" below, or implicit on "-s <cmd>"
+ */
+ }
+ fd = cfd;
+ break;
+ case 's' :
+ cmd++; cmdstep = 0;
+ if (!connected)
+ do_con(cfd);
+ if (sfd < 0)
+ do_acc(lfd);
+ if (sfd < 0)
+ die("accept()");
+ fd = sfd;
+ break;
+ case 'l' :
+ cmd++; cmdstep = 0;
+ fd = lfd;
+ break;
+ default : usage(arg0); exit(1); break;
+ }
+
+ if (fd >= 0) { /* an action is required */
+ if (argc < 2) {
+ usage(arg0);
+ exit(1);
+ }
+
+ for (word = argv[1]; word && *word; word = next) {
+ next = strchr(word, ',');
+ if (next)
+ *(next++) = 0;
+ cmdstep++;
+ if (strcmp(word, "acc") == 0) {
+ do_acc(fd);
+ }
+ else if (strcmp(word, "con") == 0) {
+ do_con(fd);
+ }
+ else if (strcmp(word, "snd") == 0) {
+ do_snd(fd);
+ }
+ else if (strcmp(word, "mor") == 0) {
+ do_mor(fd);
+ }
+ else if (strcmp(word, "rcv") == 0) {
+ do_rcv(fd);
+ }
+ else if (strcmp(word, "drn") == 0) {
+ do_drn(fd);
+ }
+ else if (strcmp(word, "shb") == 0) {
+ do_shb(fd);
+ }
+ else if (strcmp(word, "shr") == 0) {
+ do_shr(fd);
+ }
+ else if (strcmp(word, "shw") == 0) {
+ do_shw(fd);
+ }
+ else if (strcmp(word, "lin") == 0) {
+ do_lin(fd);
+ }
+ else if (strcmp(word, "clo") == 0) {
+ do_clo(fd);
+ }
+ else if (strcmp(word, "pol") == 0) {
+ do_pol(fd);
+ }
+ else {
+ printf("Ignoring unknown action '%s' in step #%d of cmd #%d\n", word, cmdstep, cmd);
+ }
+ }
+ argc--; argv++;
+ }
+ }
+
+ write(1, "#### END ####\n", 14); // add a visible delimiter in the traces
+
+ if (!cmd) {
+ printf("No command was requested!\n");
+ usage(arg0);
+ exit(1);
+ }
+
+ return 0;
+}
diff --git a/dev/qpack/decode.c b/dev/qpack/decode.c
new file mode 100644
index 0000000..b2d233a
--- /dev/null
+++ b/dev/qpack/decode.c
@@ -0,0 +1,171 @@
+/*
+ * QPACK stream decoder. Decode a series of hex codes on stdin using one line
+ * per H3 HEADERS frame. Silently skip spaces, tabs, CR, '-' and ','.
+ *
+ * Compilation via Makefile
+ *
+ * Example run:
+ * echo 0000d1d7508b089d5c0b8170dc101a699fc15f5085ed6989397f | ./dev/qpack/decode
+ */
+
+#include <ctype.h>
+#include <inttypes.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+#define MAX_RQ_SIZE 65536
+#define MAX_HDR_NUM 1000
+
+#define QPACK_STANDALONE
+
+#define USE_OPENSSL
+#define USE_QUIC
+
+#include <haproxy/buf-t.h>
+#include <haproxy/http-hdr-t.h>
+#include <haproxy/qpack-dec.h>
+#include <haproxy/qpack-tbl.h>
+
+char line[MAX_RQ_SIZE * 3 + 3];
+uint8_t bin[MAX_RQ_SIZE];
+
+char tmp_buf[MAX_RQ_SIZE];
+struct buffer buf = { .area = tmp_buf, .data = 0, .size = sizeof(tmp_buf) };
+
+#define DEBUG_QPACK
+#include "../src/hpack-huff.c"
+#include "../src/qpack-dec.c"
+#include "../src/qpack-tbl.c"
+
+/* define to compile with BUG_ON/ABORT_NOW statements */
+void ha_backtrace_to_stderr(void)
+{
+}
+
+/* taken from dev/hpack/decode.c */
+int hex2bin(const char *hex, uint8_t *bin, int size)
+{
+ int a, b, c;
+ uint8_t code;
+ int len = 0;
+
+ a = b = -1;
+
+ for (; *hex; hex++) {
+ c = *hex;
+ if (c == ' ' || c == '\t' || c == '\r' ||
+ c == '-' || c == ',')
+ continue;
+
+ if (c == '\n' || c == '#')
+ break;
+
+ if (c >= '0' && c <= '9')
+ c -= '0';
+ else if (c >= 'a' && c <= 'f')
+ c -= 'a' - 10;
+ else if (c >= 'A' && c <= 'F')
+ c -= 'A' - 10;
+ else
+ return -1;
+
+ if (a == -1)
+ a = c;
+ else
+ b = c;
+
+ if (b == -1)
+ continue;
+
+ code = (a << 4) | b;
+ a = b = -1;
+ if (len >= size)
+ return -2;
+
+ bin[len] = code;
+ len++;
+ }
+ if (a >= 0 || b >= 0)
+ return -3;
+ return len;
+}
+
+/* taken from src/tools.c */
+void debug_hexdump(FILE *out, const char *pfx, const char *buf,
+ unsigned int baseaddr, int len)
+{
+ unsigned int i;
+ int b, j;
+
+ for (i = 0; i < (len + (baseaddr & 15)); i += 16) {
+ b = i - (baseaddr & 15);
+ fprintf(out, "%s%08x: ", pfx ? pfx : "", i + (baseaddr & ~15));
+ for (j = 0; j < 8; j++) {
+ if (b + j >= 0 && b + j < len)
+ fprintf(out, "%02x ", (unsigned char)buf[b + j]);
+ else
+ fprintf(out, " ");
+ }
+
+ if (b + j >= 0 && b + j < len)
+ fputc('-', out);
+ else
+ fputc(' ', out);
+
+ for (j = 8; j < 16; j++) {
+ if (b + j >= 0 && b + j < len)
+ fprintf(out, " %02x", (unsigned char)buf[b + j]);
+ else
+ fprintf(out, " ");
+ }
+
+ fprintf(out, " ");
+ for (j = 0; j < 16; j++) {
+ if (b + j >= 0 && b + j < len) {
+ if (isprint((unsigned char)buf[b + j]))
+ fputc((unsigned char)buf[b + j], out);
+ else
+ fputc('.', out);
+ }
+ else
+ fputc(' ', out);
+ }
+ fputc('\n', out);
+ }
+}
+
+int main(int argc, char **argv)
+{
+ struct http_hdr hdrs[MAX_HDR_NUM];
+ int len, outlen, hdr_idx;
+
+ do {
+ if (!fgets(line, sizeof(line), stdin))
+ break;
+
+ if ((len = hex2bin(line, bin, MAX_RQ_SIZE)) < 0)
+ break;
+
+ outlen = qpack_decode_fs(bin, len, &buf, hdrs,
+ sizeof(hdrs) / sizeof(hdrs[0]));
+ if (outlen < 0) {
+ fprintf(stderr, "QPACK decoding failed: %d\n", outlen);
+ continue;
+ }
+
+ hdr_idx = 0;
+ fprintf(stderr, "<<< Found %d headers:\n", outlen);
+ while (1) {
+ if (isteq(hdrs[hdr_idx].n, ist("")))
+ break;
+
+ fprintf(stderr, "%.*s: %.*s\n",
+ (int)hdrs[hdr_idx].n.len, hdrs[hdr_idx].n.ptr,
+ (int)hdrs[hdr_idx].v.len, hdrs[hdr_idx].v.ptr);
+
+ ++hdr_idx;
+ }
+ } while (1);
+
+ return EXIT_SUCCESS;
+}
diff --git a/dev/sslkeylogger/sslkeylogger.lua b/dev/sslkeylogger/sslkeylogger.lua
new file mode 100644
index 0000000..e67bf77
--- /dev/null
+++ b/dev/sslkeylogger/sslkeylogger.lua
@@ -0,0 +1,47 @@
+--[[
+ This script can be used to decipher SSL traffic coming through haproxy. It
+ must first be loaded in the global section of haproxy configuration with
+ TLS keys logging activated :
+
+ tune.ssl.keylog on
+ lua-load sslkeylogger.lua
+
+ Then a http-request rule can be inserted for the desired frontend :
+ http-request lua.sslkeylog <path_to_keylog_file>
+
+ The generated keylog file can then be injected into wireshark to decipher a
+ network capture.
+]]
+
+local function sslkeylog(txn, filename)
+ local fields = {
+ CLIENT_EARLY_TRAFFIC_SECRET = function() return txn.f:ssl_fc_client_early_traffic_secret() end,
+ CLIENT_HANDSHAKE_TRAFFIC_SECRET = function() return txn.f:ssl_fc_client_handshake_traffic_secret() end,
+ SERVER_HANDSHAKE_TRAFFIC_SECRET = function() return txn.f:ssl_fc_server_handshake_traffic_secret() end,
+ CLIENT_TRAFFIC_SECRET_0 = function() return txn.f:ssl_fc_client_traffic_secret_0() end,
+ SERVER_TRAFFIC_SECRET_0 = function() return txn.f:ssl_fc_server_traffic_secret_0() end,
+ EXPORTER_SECRET = function() return txn.f:ssl_fc_exporter_secret() end,
+ EARLY_EXPORTER_SECRET = function() return txn.f:ssl_fc_early_exporter_secret() end
+ }
+
+ local client_random = txn.c:hex(txn.f:ssl_fc_client_random())
+
+ -- ensure that a key is written only once by using a session variable
+ if not txn:get_var('sess.sslkeylogdone') then
+ local file, err = io.open(filename, 'a')
+ if file then
+ for fieldname, fetch in pairs(fields) do
+ if fetch() then
+ file:write(string.format('%s %s %s\n', fieldname, client_random, fetch()))
+ end
+ end
+ file:close()
+ else
+ core.Warning("Cannot open SSL log file: " .. err .. ".")
+ end
+
+ txn:set_var('sess.sslkeylogdone', true)
+ end
+end
+
+core.register_action('sslkeylog', { 'http-req' }, sslkeylog, 1)
diff --git a/dev/tcploop/Makefile b/dev/tcploop/Makefile
new file mode 100644
index 0000000..6d0a0c2
--- /dev/null
+++ b/dev/tcploop/Makefile
@@ -0,0 +1,13 @@
+include ../../include/make/verbose.mk
+
+CC = gcc
+OPTIMIZE = -O2 -g
+DEFINE =
+INCLUDE =
+OBJS = tcploop
+
+tcploop: tcploop.c
+ $(cmd_CC) $(OPTIMIZE) $(DEFINE) $(INCLUDE) -o $@ $^
+
+clean:
+ rm -f $(OBJS) *.[oas] *~
diff --git a/dev/tcploop/tcploop.c b/dev/tcploop/tcploop.c
new file mode 100644
index 0000000..c091f10
--- /dev/null
+++ b/dev/tcploop/tcploop.c
@@ -0,0 +1,1055 @@
+/*
+ * TCP client and server for bug hunting
+ *
+ * Copyright (C) 2016 Willy Tarreau <w@1wt.eu>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#define _GNU_SOURCE // for POLLRDHUP
+#include <sys/resource.h>
+#include <sys/select.h>
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <sys/stat.h>
+#include <sys/time.h>
+#include <sys/ioctl.h>
+#include <sys/un.h>
+#include <sys/wait.h>
+
+#ifdef __linux__
+#include <sys/epoll.h>
+#endif
+
+#include <arpa/inet.h>
+#include <netinet/in.h>
+#include <netinet/tcp.h>
+
+#include <ctype.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <limits.h>
+#include <netdb.h>
+#include <poll.h>
+#include <signal.h>
+#include <stdarg.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <time.h>
+#include <unistd.h>
+
+/* for OSes which don't have it */
+#ifndef POLLRDHUP
+#define POLLRDHUP 0
+#endif
+
+#ifndef MSG_MORE
+#define MSG_MORE 0
+#endif
+
+struct err_msg {
+ int size;
+ int len;
+ char msg[0];
+};
+
+const int zero = 0;
+const int one = 1;
+const struct linger nolinger = { .l_onoff = 1, .l_linger = 0 };
+
+#define TRASH_SIZE 65536
+static char trash[TRASH_SIZE];
+
+volatile int nbproc = 0;
+static struct timeval start_time;
+static int showtime;
+static int verbose;
+static int use_epoll;
+static int pid;
+static int sock_type = SOCK_STREAM;
+static int sock_proto = IPPROTO_TCP;
+
+
+/* display the message and exit with the code */
+__attribute__((noreturn)) void die(int code, const char *format, ...)
+{
+ va_list args;
+
+ if (format) {
+ va_start(args, format);
+ vfprintf(stderr, format, args);
+ va_end(args);
+ }
+ exit(code);
+}
+
+/* display the usage message and exit with the code */
+__attribute__((noreturn)) void usage(int code, const char *arg0)
+{
+ die(code,
+ "Usage : %s [options]* [<ip>:]port [<action>*]\n"
+ "\n"
+ "options :\n"
+ " -v : verbose\n"
+ " -u : use UDP instead of TCP (limited)\n"
+ " -U : use UNIX instead of TCP (limited, addr must have one '/')\n"
+ " -t|-tt|-ttt : show time (msec / relative / absolute)\n"
+ " -e : use epoll instead of poll on Linux\n"
+ "actions :\n"
+ " A[<count>] : Accepts <count> incoming sockets and closes count-1\n"
+ " Note: fd=accept(fd)\n"
+ " B[[ip]:port] : Bind a new socket to ip:port or default one if unspecified.\n"
+ " Note: fd=socket,bind(fd)\n"
+ " C[[ip]:port] : Connects to ip:port or default ones if unspecified.\n"
+ " Note: fd=socket,connect(fd)\n"
+ " D : Disconnect (connect to AF_UNSPEC)\n"
+ " E[<size>] : Echo this amount of bytes. 0=infinite. unset=any amount.\n"
+ " F : FIN : shutdown(SHUT_WR)\n"
+ " G : disable lingering\n"
+ " I : wait for Input data to be present (POLLIN)\n"
+ " J : Jump back to oldest post-fork/post-accept action\n"
+ " K : kill the connection and go on with next operation\n"
+ " L[<backlog>] : Listens to ip:port and optionally sets backlog\n"
+ " Note: fd=socket,bind(fd),listen(fd)\n"
+ " N<max> : fork New process, limited to <max> concurrent (default 1)\n"
+ " O : wait for Output queue to be empty (POLLOUT + TIOCOUTQ)\n"
+ " P[<time>] : Pause for <time> ms (100 by default)\n"
+ " Q : disable TCP Quick-ack\n"
+ " R[<size>] : Read this amount of bytes. 0=infinite. unset=any amount.\n"
+ " S[<size>] : Send this amount of bytes. 0=infinite. unset=any amount.\n"
+ " S:<string> : Send this exact string. \\r, \\n, \\t, \\\\ supported.\n"
+ " T : set TCP_NODELAY\n"
+ " W[<time>] : Wait for any event on the socket, maximum <time> ms\n"
+ " X[i|o|e]* ** : execvp() next args passing socket as stdin/stdout/stderr.\n"
+ " If i/o/e present, only stdin/out/err are mapped to socket.\n"
+ " r : shutr : shutdown(SHUT_RD) (pauses a listener or ends recv)\n"
+ "\n"
+ "It's important to note that a single FD is used at once and that Accept\n"
+ "replaces the listening FD with the accepted one. Thus always do it after\n"
+ "a fork if other connections have to be accepted.\n"
+ "\n"
+ "After a fork, we loop back to the beginning and silently skip L/C if the\n"
+ "main socket already exists.\n"
+ "\n"
+ "Example dummy HTTP request drain server :\n"
+ " tcploop 8001 L W N20 A R S10 [ F K ]\n"
+ "\n"
+ "Example large bandwidth HTTP request drain server :\n"
+ " tcploop 8001 L W N20 A R S0 [ F K ]\n"
+ "\n"
+ "Example TCP client with pauses at each step :\n"
+ " tcploop 8001 C T W P100 S10 O P100 R S10 O R G K\n"
+ "\n"
+ "Simple chargen server :\n"
+ " tcploop 8001 L A Xo cat /dev/zero\n"
+ "\n"
+ "Simple telnet server :\n"
+ " tcploop 8001 L W N A X /usr/sbin/in.telnetd\n"
+ "", arg0);
+}
+
+void dolog(const char *format, ...)
+{
+ struct timeval date, tv;
+ int delay;
+ va_list args;
+
+ if (!verbose)
+ return;
+
+ if (showtime) {
+ gettimeofday(&date, NULL);
+ switch (showtime) {
+ case 1: // [msec] relative
+ delay = (date.tv_sec - start_time.tv_sec) * 1000000 + date.tv_usec - start_time.tv_usec;
+ fprintf(stderr, "[%d] ", delay / 1000);
+ break;
+ case 2: // [sec.usec] relative
+ tv.tv_usec = date.tv_usec - start_time.tv_usec;
+ tv.tv_sec = date.tv_sec - start_time.tv_sec;
+ if ((signed)tv.tv_sec > 0) {
+ if ((signed)tv.tv_usec < 0) {
+ tv.tv_usec += 1000000;
+ tv.tv_sec--;
+ }
+ } else if (tv.tv_sec == 0) {
+ if ((signed)tv.tv_usec < 0)
+ tv.tv_usec = 0;
+ } else {
+ tv.tv_sec = 0;
+ tv.tv_usec = 0;
+ }
+ fprintf(stderr, "[%d.%06d] ", (int)tv.tv_sec, (int)tv.tv_usec);
+ break;
+ default: // [sec.usec] absolute
+ fprintf(stderr, "[%d.%06d] ", (int)date.tv_sec, (int)date.tv_usec);
+ break;
+ }
+ }
+
+ fprintf(stderr, "%5d ", pid);
+
+ va_start(args, format);
+ vfprintf(stderr, format, args);
+ va_end(args);
+}
+
+/* convert '\n', '\t', '\r', '\\' to their respective characters */
+int unescape(char *out, int size, const char *in)
+{
+ int len;
+
+ for (len = 0; len < size && *in; in++, out++, len++) {
+ if (*in == '\\') {
+ switch (in[1]) {
+ case 'n' : *out = '\n'; in++; continue;
+ case 't' : *out = '\t'; in++; continue;
+ case 'r' : *out = '\r'; in++; continue;
+ case '\\' : *out = '\\'; in++; continue;
+ default : break;
+ }
+ }
+ *out = *in;
+ }
+ return len;
+}
+
+struct err_msg *alloc_err_msg(int size)
+{
+ struct err_msg *err;
+
+ err = malloc(sizeof(*err) + size);
+ if (err) {
+ err->len = 0;
+ err->size = size;
+ }
+ return err;
+}
+
+void sig_handler(int sig)
+{
+ if (sig == SIGCHLD) {
+ while (waitpid(-1, NULL, WNOHANG) > 0)
+ __sync_sub_and_fetch(&nbproc, 1);
+ }
+}
+
+/* converts str in the form [[<ipv4>|<ipv6>|<hostname>]:]port to struct sockaddr_storage.
+ * Returns < 0 with err set in case of error.
+ */
+int addr_to_ss(const char *str, struct sockaddr_storage *ss, struct err_msg *err)
+{
+ char *port_str;
+ int port;
+
+ memset(ss, 0, sizeof(*ss));
+
+ /* if there's a slash it's a unix socket */
+ if (strchr(str, '/')) {
+ ((struct sockaddr_un *)ss)->sun_family = AF_UNIX;
+ strncpy(((struct sockaddr_un *)ss)->sun_path, str, sizeof(((struct sockaddr_un *)ss)->sun_path) - 1);
+ ((struct sockaddr_un *)ss)->sun_path[sizeof(((struct sockaddr_un *)ss)->sun_path)] = 0;
+ return 0;
+ }
+
+ /* look for the addr/port delimiter, it's the last colon. If there's no
+ * colon, it's 0:<port>.
+ */
+ if ((port_str = strrchr(str, ':')) == NULL) {
+ port = atoi(str);
+ if (port < 0 || port > 65535) {
+ err->len = snprintf(err->msg, err->size, "Missing/invalid port number: '%s'\n", str);
+ return -1;
+ }
+
+ ss->ss_family = AF_INET;
+ ((struct sockaddr_in *)ss)->sin_port = htons(port);
+ ((struct sockaddr_in *)ss)->sin_addr.s_addr = INADDR_ANY;
+ return 0;
+ }
+
+ *port_str++ = 0;
+
+ if (strrchr(str, ':') != NULL) {
+ /* IPv6 address contains ':' */
+ ss->ss_family = AF_INET6;
+ ((struct sockaddr_in6 *)ss)->sin6_port = htons(atoi(port_str));
+
+ if (!inet_pton(ss->ss_family, str, &((struct sockaddr_in6 *)ss)->sin6_addr)) {
+ err->len = snprintf(err->msg, err->size, "Invalid server address: '%s'\n", str);
+ return -1;
+ }
+ }
+ else {
+ ss->ss_family = AF_INET;
+ ((struct sockaddr_in *)ss)->sin_port = htons(atoi(port_str));
+
+ if (*str == '*' || *str == '\0') { /* INADDR_ANY */
+ ((struct sockaddr_in *)ss)->sin_addr.s_addr = INADDR_ANY;
+ return 0;
+ }
+
+ if (!inet_pton(ss->ss_family, str, &((struct sockaddr_in *)ss)->sin_addr)) {
+ struct hostent *he = gethostbyname(str);
+
+ if (he == NULL) {
+ err->len = snprintf(err->msg, err->size, "Invalid server name: '%s'\n", str);
+ return -1;
+ }
+ ((struct sockaddr_in *)ss)->sin_addr = *(struct in_addr *) *(he->h_addr_list);
+ }
+ }
+
+ return 0;
+}
+
+/* waits up to <ms> milliseconds on fd <fd> for events <events> (POLLIN|POLLRDHUP|POLLOUT).
+ * returns poll's status.
+ */
+int wait_on_fd(int fd, int events, int ms)
+{
+ struct pollfd pollfd;
+ int ret;
+
+#ifdef __linux__
+ while (use_epoll) {
+ struct epoll_event evt;
+ static int epoll_fd = -1;
+
+ if (epoll_fd == -1)
+ epoll_fd = epoll_create(1024);
+ if (epoll_fd == -1)
+ break;
+ evt.events = ((events & POLLIN) ? EPOLLIN : 0) |
+ ((events & POLLOUT) ? EPOLLOUT : 0) |
+ ((events & POLLRDHUP) ? EPOLLRDHUP : 0);
+ evt.data.fd = fd;
+ epoll_ctl(epoll_fd, EPOLL_CTL_ADD, fd, &evt);
+
+ do {
+ ret = epoll_wait(epoll_fd, &evt, 1, ms);
+ } while (ret == -1 && errno == EINTR);
+
+ evt.data.fd = fd;
+ epoll_ctl(epoll_fd, EPOLL_CTL_DEL, fd, &evt);
+ return ret;
+ }
+#endif
+
+ do {
+ pollfd.fd = fd;
+ pollfd.events = events;
+ ret = poll(&pollfd, 1, ms);
+ } while (ret == -1 && errno == EINTR);
+
+ return ret;
+}
+
+int tcp_set_nodelay(int sock, const char *arg)
+{
+ return setsockopt(sock, IPPROTO_TCP, TCP_NODELAY, &one, sizeof(one));
+}
+
+int tcp_set_nolinger(int sock, const char *arg)
+{
+ return setsockopt(sock, SOL_SOCKET, SO_LINGER, (struct linger *) &nolinger, sizeof(struct linger));
+}
+
+int tcp_set_noquickack(int sock, const char *arg)
+{
+#ifdef TCP_QUICKACK
+ /* warning: do not use during connect if nothing is to be sent! */
+ return setsockopt(sock, IPPROTO_TCP, TCP_QUICKACK, &zero, sizeof(zero));
+#else
+ return 0;
+#endif
+}
+
+/* Create a new TCP socket for either listening or connecting */
+int tcp_socket(sa_family_t fam)
+{
+ int sock;
+
+ sock = socket(fam, sock_type, sock_proto);
+ if (sock < 0) {
+ perror("socket()");
+ return -1;
+ }
+
+ return sock;
+}
+
+/* Try to bind to local address <sa>. Return the fd or -1 in case of error.
+ * Supports being passed NULL for arg if none has to be passed.
+ */
+int tcp_bind(int sock, const struct sockaddr_storage *sa, const char *arg)
+{
+ struct sockaddr_storage conn_addr;
+
+ if (arg && arg[1]) {
+ struct err_msg err;
+
+ if (addr_to_ss(arg + 1, &conn_addr, &err) < 0)
+ die(1, "%s\n", err.msg);
+ sa = &conn_addr;
+ }
+
+
+ if (sock < 0) {
+ sock = tcp_socket(sa->ss_family);
+ if (sock < 0)
+ return sock;
+ }
+
+ if (setsockopt(sock, SOL_SOCKET, SO_REUSEADDR, &one, sizeof(one)) == -1) {
+ perror("setsockopt(SO_REUSEADDR)");
+ goto fail;
+ }
+
+#ifdef SO_REUSEPORT
+ if (setsockopt(sock, SOL_SOCKET, SO_REUSEPORT, (char *) &one, sizeof(one)) == -1) {
+ perror("setsockopt(SO_REUSEPORT)");
+ goto fail;
+ }
+#endif
+ if (bind(sock, (struct sockaddr *)sa, sa->ss_family == AF_INET6 ?
+ sizeof(struct sockaddr_in6) : sizeof(struct sockaddr_in)) == -1) {
+ perror("bind");
+ goto fail;
+ }
+
+ return sock;
+ fail:
+ close(sock);
+ return -1;
+}
+
+/* Try to listen to address <sa>. Return the fd or -1 in case of error */
+int tcp_listen(int sock, const struct sockaddr_storage *sa, const char *arg)
+{
+ int backlog;
+
+ if (sock < 0) {
+ sock = tcp_bind(sock, sa, NULL);
+ if (sock < 0)
+ return sock;
+ }
+
+ if (arg[1])
+ backlog = atoi(arg + 1);
+ else
+ backlog = 1000;
+
+ if (backlog < 0 || backlog > 65535) {
+ fprintf(stderr, "backlog must be between 0 and 65535 inclusive (was %d)\n", backlog);
+ goto fail;
+ }
+
+ if (listen(sock, backlog) == -1) {
+ perror("listen");
+ goto fail;
+ }
+
+ return sock;
+ fail:
+ close(sock);
+ return -1;
+}
+
+/* accepts a socket from listening socket <sock>, and returns it (or -1 in case of error) */
+int tcp_accept(int sock, const char *arg)
+{
+ int count;
+ int newsock;
+
+ if (arg[1])
+ count = atoi(arg + 1);
+ else
+ count = 1;
+
+ if (count <= 0) {
+ fprintf(stderr, "accept count must be > 0 or unset (was %d)\n", count);
+ return -1;
+ }
+
+ do {
+ newsock = accept(sock, NULL, NULL);
+ if (newsock < 0) { // TODO: improve error handling
+ if (errno == EINTR || errno == EAGAIN || errno == ECONNABORTED)
+ continue;
+ perror("accept()");
+ break;
+ }
+
+ if (count > 1)
+ close(newsock);
+ count--;
+ } while (count > 0);
+
+ fcntl(newsock, F_SETFL, O_NONBLOCK);
+ return newsock;
+}
+
+/* Try to establish a new connection to <sa>. Return the fd or -1 in case of error */
+int tcp_connect(int sock, const struct sockaddr_storage *sa, const char *arg)
+{
+ struct sockaddr_storage conn_addr;
+
+ if (arg[1]) {
+ struct err_msg err;
+
+ if (addr_to_ss(arg + 1, &conn_addr, &err) < 0)
+ die(1, "%s\n", err.msg);
+ sa = &conn_addr;
+ }
+
+ if (sock < 0) {
+ sock = tcp_socket(sa->ss_family);
+ if (sock < 0)
+ return sock;
+ }
+
+ if (fcntl(sock, F_SETFL, O_NONBLOCK) == -1)
+ goto fail;
+
+ if (setsockopt(sock, SOL_SOCKET, SO_REUSEADDR, &one, sizeof(one)) == -1)
+ goto fail;
+
+ if (connect(sock, (const struct sockaddr *)sa, sizeof(struct sockaddr_in)) < 0) {
+ if (errno != EINPROGRESS)
+ goto fail;
+ }
+
+ return sock;
+ fail:
+ close(sock);
+ return -1;
+}
+
+/* Try to disconnect by connecting to AF_UNSPEC. Return >=0 on success, -1 in case of error */
+int tcp_disconnect(int sock)
+{
+ const struct sockaddr sa = { .sa_family = AF_UNSPEC };
+
+ return connect(sock, &sa, sizeof(sa));
+}
+
+/* receives N bytes from the socket and returns 0 (or -1 in case of a recv
+ * error, or -2 in case of an argument error). When no arg is passed, receives
+ * anything and stops. Otherwise reads the requested amount of data. 0 means
+ * read as much as possible.
+ */
+int tcp_recv(int sock, const char *arg)
+{
+ int count = -1; // stop at first read
+ int ret;
+ int max;
+
+ if (arg[1]) {
+ count = atoi(arg + 1);
+ if (count < 0) {
+ fprintf(stderr, "recv count must be >= 0 or unset (was %d)\n", count);
+ return -2;
+ }
+ }
+
+ while (1) {
+ max = (count > 0) ? count : INT_MAX;
+ if (max > sizeof(trash))
+ max = sizeof(trash);
+ ret = recv(sock, trash, max, MSG_NOSIGNAL | MSG_TRUNC);
+ if (ret < 0) {
+ if (errno == EINTR)
+ continue;
+ if (errno != EAGAIN) {
+ dolog("recv %d\n", ret);
+ return -1;
+ }
+ while (!wait_on_fd(sock, POLLIN | POLLRDHUP, 1000));
+ continue;
+ }
+ dolog("recv %d\n", ret);
+ if (!ret)
+ break;
+
+ if (!count)
+ continue;
+ else if (count > 0)
+ count -= ret;
+
+ if (count <= 0)
+ break;
+ }
+
+ return 0;
+}
+
+/* Sends N bytes to the socket and returns 0 (or -1 in case of send error, -2
+ * in case of an argument error. If the byte count is not set, sends only one
+ * block. Sending zero means try to send forever. If the argument starts with
+ * ':' then whatever follows is interpreted as the payload to be sent as-is.
+ * Escaped characters '\r', '\n', '\t' and '\\' are detected and converted. In
+ * this case, blocks must be small so that send() doesn't fragment them, as
+ * they will be put into the trash and expected to be sent at once.
+ */
+int tcp_send(int sock, const char *arg)
+{
+ int count = -1; // stop after first block
+ int ret;
+
+ if (arg[1] == ':') {
+ count = unescape(trash, sizeof(trash), arg + 2);
+ } else if (arg[1]) {
+ count = atoi(arg + 1);
+ if (count < 0) {
+ fprintf(stderr, "send count must be >= 0 or unset (was %d)\n", count);
+ return -2;
+ }
+ }
+
+ while (1) {
+ ret = send(sock, trash,
+ (count > 0) && (count < sizeof(trash)) ? count : sizeof(trash),
+ MSG_NOSIGNAL | ((count > sizeof(trash)) ? MSG_MORE : 0));
+ if (ret < 0) {
+ if (errno == EINTR)
+ continue;
+ if (errno != EAGAIN) {
+ dolog("send %d\n", ret);
+ return -1;
+ }
+ while (!wait_on_fd(sock, POLLOUT, 1000));
+ continue;
+ }
+ dolog("send %d\n", ret);
+ if (!count)
+ continue;
+ else if (count > 0)
+ count -= ret;
+
+ if (count <= 0)
+ break;
+ }
+
+ return 0;
+}
+
+/* echoes N bytes to the socket and returns 0 (or -1 in case of error). If not
+ * set, echoes only the first block. Zero means forward forever.
+ */
+int tcp_echo(int sock, const char *arg)
+{
+ int count = -1; // echo forever
+ int ret;
+ int rcvd;
+
+ if (arg[1]) {
+ count = atoi(arg + 1);
+ if (count < 0) {
+ fprintf(stderr, "send count must be >= 0 or unset (was %d)\n", count);
+ return -1;
+ }
+ }
+
+ rcvd = 0;
+ while (1) {
+ if (rcvd <= 0) {
+ /* no data pending */
+ rcvd = recv(sock, trash, (count > 0) && (count < sizeof(trash)) ? count : sizeof(trash), MSG_NOSIGNAL);
+ if (rcvd < 0) {
+ if (errno == EINTR)
+ continue;
+ if (errno != EAGAIN) {
+ dolog("recv %d\n", rcvd);
+ return -1;
+ }
+ while (!wait_on_fd(sock, POLLIN | POLLRDHUP, 1000));
+ continue;
+ }
+ dolog("recv %d\n", rcvd);
+ if (!rcvd)
+ break;
+ }
+ else {
+ /* some data still pending */
+ ret = send(sock, trash, rcvd, MSG_NOSIGNAL | ((count > rcvd) ? MSG_MORE : 0));
+ if (ret < 0) {
+ if (errno == EINTR)
+ continue;
+ if (errno != EAGAIN) {
+ dolog("send %d\n", ret);
+ return -1;
+ }
+ while (!wait_on_fd(sock, POLLOUT, 1000));
+ continue;
+ }
+ dolog("send %d\n", ret);
+ rcvd -= ret;
+ if (rcvd)
+ continue;
+
+ if (!count)
+ continue;
+ else if (count > 0)
+ count -= ret;
+
+ if (count <= 0)
+ break;
+ }
+ }
+ return 0;
+}
+
+/* waits for an event on the socket, usually indicates an accept for a
+ * listening socket and a connect for an outgoing socket.
+ */
+int tcp_wait(int sock, const char *arg)
+{
+ int delay = -1; // wait forever
+ int ret;
+
+ if (arg[1]) {
+ delay = atoi(arg + 1);
+ if (delay < 0) {
+ fprintf(stderr, "wait time must be >= 0 or unset (was %d)\n", delay);
+ return -1;
+ }
+ }
+
+ /* FIXME: this doesn't take into account delivered signals */
+ ret = wait_on_fd(sock, POLLIN | POLLRDHUP | POLLOUT, delay);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+/* waits for the input data to be present */
+int tcp_wait_in(int sock, const char *arg)
+{
+ int ret;
+
+ ret = wait_on_fd(sock, POLLIN | POLLRDHUP, 1000);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+/* waits for the output queue to be empty */
+int tcp_wait_out(int sock, const char *arg)
+{
+ int ret;
+
+ ret = wait_on_fd(sock, POLLOUT, 1000);
+ if (ret < 0)
+ return ret;
+
+ /* Now wait for data to leave the socket */
+ do {
+ if (ioctl(sock, TIOCOUTQ, &ret) < 0)
+ return -1;
+ } while (ret > 0);
+ return 0;
+}
+
+/* delays processing for <time> milliseconds, 100 by default */
+int tcp_pause(int sock, const char *arg)
+{
+ int delay = 100;
+
+ if (arg[1]) {
+ delay = atoi(arg + 1);
+ if (delay < 0) {
+ fprintf(stderr, "wait time must be >= 0 or unset (was %d)\n", delay);
+ return -1;
+ }
+ }
+
+ usleep(delay * 1000);
+ return 0;
+}
+
+/* forks another process while respecting the limit imposed in argument (1 by
+ * default). Will wait for another process to exit before creating a new one.
+ * Returns the value of the fork() syscall, ie 0 for the child, non-zero for
+ * the parent, -1 for an error.
+ */
+int tcp_fork(int sock, const char *arg)
+{
+ int max = 1;
+ int ret;
+
+ if (arg[1]) {
+ max = atoi(arg + 1);
+ if (max <= 0) {
+ fprintf(stderr, "max process must be > 0 or unset (was %d)\n", max);
+ return -1;
+ }
+ }
+
+ while (nbproc >= max)
+ poll(NULL, 0, 1000);
+
+ ret = fork();
+ if (ret > 0)
+ __sync_add_and_fetch(&nbproc, 1);
+ return ret;
+}
+
+int main(int argc, char **argv)
+{
+ struct sockaddr_storage default_addr;
+ struct err_msg err;
+ const char *arg0;
+ int loop_arg;
+ int arg;
+ int ret;
+ int sock;
+ int errfd;
+
+ arg0 = argv[0];
+
+ while (argc > 1 && argv[1][0] == '-') {
+ argc--; argv++;
+ if (strcmp(argv[0], "-t") == 0)
+ showtime++;
+ else if (strcmp(argv[0], "-tt") == 0)
+ showtime += 2;
+ else if (strcmp(argv[0], "-ttt") == 0)
+ showtime += 3;
+ else if (strcmp(argv[0], "-e") == 0)
+ use_epoll = 1;
+ else if (strcmp(argv[0], "-v") == 0)
+ verbose ++;
+ else if (strcmp(argv[0], "-u") == 0) {
+ sock_type = SOCK_DGRAM;
+ sock_proto = IPPROTO_UDP;
+ }
+ else if (strcmp(argv[0], "-U") == 0) {
+ sock_proto = 0;
+ }
+ else if (strcmp(argv[0], "--") == 0)
+ break;
+ else
+ usage(1, arg0);
+ }
+
+ if (argc < 2)
+ usage(1, arg0);
+
+ pid = getpid();
+ signal(SIGCHLD, sig_handler);
+
+ if (addr_to_ss(argv[1], &default_addr, &err) < 0)
+ die(1, "%s\n", err.msg);
+
+ gettimeofday(&start_time, NULL);
+
+ sock = -1;
+ loop_arg = 2;
+ for (arg = loop_arg; arg < argc; arg++) {
+ switch (argv[arg][0]) {
+ case 'L':
+ sock = tcp_listen(sock, &default_addr, argv[arg]);
+ if (sock < 0)
+ die(1, "Fatal: tcp_listen() failed.\n");
+ break;
+
+ case 'B':
+ /* silently ignore existing connections */
+ sock = tcp_bind(sock, &default_addr, argv[arg]);
+ if (sock < 0)
+ die(1, "Fatal: tcp_connect() failed.\n");
+ dolog("connect\n");
+ break;
+
+ case 'C':
+ sock = tcp_connect(sock, &default_addr, argv[arg]);
+ if (sock < 0)
+ die(1, "Fatal: tcp_connect() failed.\n");
+ dolog("connect\n");
+ break;
+
+ case 'D':
+ /* silently ignore non-existing connections */
+ if (sock >= 0 && tcp_disconnect(sock) < 0)
+ die(1, "Fatal: tcp_connect() failed.\n");
+ dolog("disconnect\n");
+ break;
+
+ case 'A':
+ if (sock < 0)
+ die(1, "Fatal: tcp_accept() on non-socket.\n");
+ sock = tcp_accept(sock, argv[arg]);
+ if (sock < 0)
+ die(1, "Fatal: tcp_accept() failed.\n");
+ dolog("accept\n");
+ loop_arg = arg + 1; // cannot loop before accept()
+ break;
+
+ case 'T':
+ if (sock < 0)
+ die(1, "Fatal: tcp_set_nodelay() on non-socket.\n");
+ if (tcp_set_nodelay(sock, argv[arg]) < 0)
+ die(1, "Fatal: tcp_set_nodelay() failed.\n");
+ break;
+
+ case 'G':
+ if (sock < 0)
+ die(1, "Fatal: tcp_set_nolinger() on non-socket.\n");
+ if (tcp_set_nolinger(sock, argv[arg]) < 0)
+ die(1, "Fatal: tcp_set_nolinger() failed.\n");
+ break;
+
+ case 'Q':
+ if (sock < 0)
+ die(1, "Fatal: tcp_set_noquickack() on non-socket.\n");
+ if (tcp_set_noquickack(sock, argv[arg]) < 0)
+ die(1, "Fatal: tcp_set_noquickack() failed.\n");
+ break;
+
+ case 'R':
+ if (sock < 0)
+ die(1, "Fatal: tcp_recv() on non-socket.\n");
+ ret = tcp_recv(sock, argv[arg]);
+ if (ret < 0) {
+ if (ret == -1) // usually ECONNRESET, silently exit
+ die(0, NULL);
+ die(1, "Fatal: tcp_recv() failed.\n");
+ }
+ break;
+
+ case 'S':
+ if (sock < 0)
+ die(1, "Fatal: tcp_send() on non-socket.\n");
+ ret = tcp_send(sock, argv[arg]);
+ if (ret < 0) {
+ if (ret == -1) // usually a broken pipe, silently exit
+ die(0, NULL);
+ die(1, "Fatal: tcp_send() failed.\n");
+ }
+ break;
+
+ case 'E':
+ if (sock < 0)
+ die(1, "Fatal: tcp_echo() on non-socket.\n");
+ if (tcp_echo(sock, argv[arg]) < 0)
+ die(1, "Fatal: tcp_echo() failed.\n");
+ break;
+
+ case 'P':
+ if (tcp_pause(sock, argv[arg]) < 0)
+ die(1, "Fatal: tcp_pause() failed.\n");
+ break;
+
+ case 'W':
+ if (sock < 0)
+ die(1, "Fatal: tcp_wait() on non-socket.\n");
+ if (tcp_wait(sock, argv[arg]) < 0)
+ die(1, "Fatal: tcp_wait() failed.\n");
+ dolog("ready_any\n");
+ break;
+
+ case 'I':
+ if (sock < 0)
+ die(1, "Fatal: tcp_wait_in() on non-socket.\n");
+ if (tcp_wait_in(sock, argv[arg]) < 0)
+ die(1, "Fatal: tcp_wait_in() failed.\n");
+ dolog("ready_in\n");
+ break;
+
+ case 'O':
+ if (sock < 0)
+ die(1, "Fatal: tcp_wait_out() on non-socket.\n");
+ if (tcp_wait_out(sock, argv[arg]) < 0)
+ die(1, "Fatal: tcp_wait_out() failed.\n");
+ dolog("ready_out\n");
+ break;
+
+ case 'K':
+ if (sock < 0 || close(sock) < 0)
+ die(1, "Fatal: close() on non-socket.\n");
+ dolog("close\n");
+ sock = -1;
+ break;
+
+ case 'F':
+ /* ignore errors on shutdown() as they are common */
+ if (sock >= 0)
+ shutdown(sock, SHUT_WR);
+ dolog("shutdown(w)\n");
+ break;
+
+ case 'r':
+ /* ignore errors on shutdown() as they are common */
+ if (sock >= 0)
+ shutdown(sock, SHUT_RD);
+ dolog("shutdown(r)\n");
+ break;
+
+ case 'N':
+ ret = tcp_fork(sock, argv[arg]);
+ if (ret < 0)
+ die(1, "Fatal: fork() failed.\n");
+ if (ret > 0) {
+ /* loop back to first arg */
+ arg = loop_arg - 1;
+ continue;
+ }
+ /* OK we're in the child, let's continue */
+ pid = getpid();
+ loop_arg = arg + 1;
+ break;
+
+ case 'J': // jump back to oldest post-fork action
+ arg = loop_arg - 1;
+ continue;
+
+ case 'X': // execute command. Optionally supports redirecting only i/o/e
+ if (arg + 1 >= argc)
+ die(1, "Fatal: missing argument after %s\n", argv[arg]);
+
+ errfd = dup(2);
+ fcntl(errfd, F_SETFD, fcntl(errfd, F_GETFD, FD_CLOEXEC) | FD_CLOEXEC);
+ fcntl(sock, F_SETFL, fcntl(sock, F_GETFL, O_NONBLOCK) & ~O_NONBLOCK);
+ if (!argv[arg][1] || strchr(argv[arg], 'i'))
+ dup2(sock, 0);
+ if (!argv[arg][1] || strchr(argv[arg], 'o'))
+ dup2(sock, 1);
+ if (!argv[arg][1] || strchr(argv[arg], 'e'))
+ dup2(sock, 2);
+ argv += arg + 1;
+ if (execvp(argv[0], argv) == -1) {
+ int e = errno;
+
+ dup2(errfd, 2); // restore original stderr
+ close(errfd);
+ die(1, "Fatal: execvp(%s) failed : %s\n", argv[0], strerror(e));
+ }
+ break;
+ default:
+ usage(1, arg0);
+ }
+ }
+ return 0;
+}
diff --git a/dev/trace/trace.awk b/dev/trace/trace.awk
new file mode 100755
index 0000000..7b3b131
--- /dev/null
+++ b/dev/trace/trace.awk
@@ -0,0 +1,78 @@
+#!/bin/sh
+#
+# trace.awk - Fast trace symbol resolver - w@1wt.eu - 2012/05/25
+#
+# Principle: this program launches reads pointers from a trace file and if not
+# found in its cache, it passes them over a pipe to addr2line which is forked
+# in a coprocess, then stores the result in the cache.
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version
+# 2 of the License, or (at your option) any later version.
+#
+# usage: $0 exec_file < trace.out
+#
+
+if [ $# -lt 1 ]; then
+ echo "Usage: ${0##*/} exec_file < trace.out"
+ echo "Example: ${0##*/} ./haproxy < trace.out"
+ echo "Example: HAPROXY_TRACE=/dev/stdout ./haproxy -f cfg | ${0##*/} ./haproxy"
+ exit 1
+fi
+
+if [ ! -s "$1" ]; then
+ echo "$1 is not a valid executable file"
+ exit 1
+fi
+
+exec awk -v prog="$1" \
+'
+BEGIN {
+ if (cmd == "")
+ cmd=ENVIRON["ADDR2LINE"];
+ if (cmd == "")
+ cmd="addr2line";
+
+ if (prog == "")
+ prog=ENVIRON["PROG"];
+
+ cmd=cmd " -f -s -e " prog;
+
+ for (i = 1; i < 100; i++) {
+ indents[">",i] = indents[">",i-1] "->"
+ indents[">",i-1] = indents[">",i-1] " "
+ indents["<",i] = indents["<",i-1] " "
+ indents["<",i-1] = indents["<",i-1] " "
+ indents[" ",i] = indents[" ",i-1] "##"
+ indents[" ",i-1] = indents[" ",i-1] " "
+ }
+}
+
+function getptr(ptr)
+{
+ loc=locs[ptr];
+ name=names[ptr];
+ if (loc == "" || name == "") {
+ print ptr |& cmd;
+ cmd |& getline name;
+ cmd |& getline loc;
+ names[ptr]=name
+ locs[ptr]=loc
+ }
+}
+
+{
+ # input format: <timestamp> <level> <caller> <dir> <callee> [<ret>|<args>...]
+ if ($3 == "#") { # this is a trace comment
+ printf "%s %s ", $1, indents[" ",$2]
+ $1=""; $2=""; $3=""
+ print substr($0,4)
+ next
+ }
+ getptr($3); caller_loc=loc; caller_name=name
+ getptr($5); callee_loc=loc; callee_name=name
+ printf "%s %s %s %s %s(%s) [%s:%s] %s [%s:%s]\n",
+ $1, indents[$4,$2], caller_name, $4, callee_name, $6, caller_loc, $3, $4, callee_loc, $5
+}
+'
diff --git a/dev/udp/udp-perturb.c b/dev/udp/udp-perturb.c
new file mode 100644
index 0000000..55d1773
--- /dev/null
+++ b/dev/udp/udp-perturb.c
@@ -0,0 +1,527 @@
+/*
+ * Copyright (C) 2010-2022 Willy Tarreau <w@1wt.eu>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <string.h>
+#include <ctype.h>
+#include <sys/time.h>
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <netinet/tcp.h>
+#include <netinet/in.h>
+#include <arpa/inet.h>
+#include <netdb.h>
+#include <fcntl.h>
+#include <errno.h>
+#include <getopt.h>
+#include <signal.h>
+#include <stdarg.h>
+#include <sys/stat.h>
+#include <time.h>
+#include <limits.h>
+#include <poll.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdarg.h>
+
+#define MAXCONN 1
+
+const int zero = 0;
+const int one = 1;
+
+struct conn {
+ struct sockaddr_storage cli_addr;
+ int fd_bck;
+};
+
+struct errmsg {
+ char *msg;
+ int size;
+ int len;
+};
+
+struct sockaddr_storage frt_addr; // listen address
+struct sockaddr_storage srv_addr; // server address
+
+#define MAXPKTSIZE 16384
+#define MAXREORDER 20
+char trash[MAXPKTSIZE];
+
+/* history buffer, to resend random packets */
+struct {
+ char buf[MAXPKTSIZE];
+ size_t len;
+} history[MAXREORDER];
+int history_idx = 0;
+unsigned int rand_rate = 0;
+unsigned int corr_rate = 0;
+unsigned int corr_span = 1;
+unsigned int corr_base = 0;
+
+struct conn conns[MAXCONN]; // sole connection for now
+int fd_frt;
+
+int nbfd = 0;
+int nbconn = MAXCONN;
+
+
+/* display the message and exit with the code */
+__attribute__((noreturn)) void die(int code, const char *format, ...)
+{
+ va_list args;
+
+ va_start(args, format);
+ vfprintf(stderr, format, args);
+ va_end(args);
+ exit(code);
+}
+
+/* Xorshift RNG */
+unsigned int prng_state = ~0U/3; // half bits set, but any seed will fit
+static inline unsigned int prng(unsigned int range)
+{
+ unsigned int x = prng_state;
+
+ x ^= x << 13;
+ x ^= x >> 17;
+ x ^= x << 5;
+ prng_state = x;
+ return ((unsigned long long)x * (range - 1) + x) >> 32;
+}
+
+/* converts str in the form [<ipv4>|<ipv6>|<hostname>]:port to struct sockaddr_storage.
+ * Returns < 0 with err set in case of error.
+ */
+int addr_to_ss(char *str, struct sockaddr_storage *ss, struct errmsg *err)
+{
+ char *port_str;
+ int port;
+
+ /* look for the addr/port delimiter, it's the last colon. */
+ if ((port_str = strrchr(str, ':')) == NULL)
+ port_str = str;
+ else
+ *port_str++ = 0;
+
+ port = atoi(port_str);
+ if (port <= 0 || port > 65535) {
+ err->len = snprintf(err->msg, err->size, "Missing/invalid port number: '%s'\n", port_str);
+ return -1;
+ }
+ *port_str = 0; // present an empty address if none was set
+
+ memset(ss, 0, sizeof(*ss));
+
+ if (strrchr(str, ':') != NULL) {
+ /* IPv6 address contains ':' */
+ ss->ss_family = AF_INET6;
+ ((struct sockaddr_in6 *)ss)->sin6_port = htons(port);
+
+ if (!inet_pton(ss->ss_family, str, &((struct sockaddr_in6 *)ss)->sin6_addr)) {
+ err->len = snprintf(err->msg, err->size, "Invalid IPv6 server address: '%s'", str);
+ return -1;
+ }
+ }
+ else {
+ ss->ss_family = AF_INET;
+ ((struct sockaddr_in *)ss)->sin_port = htons(port);
+
+ if (*str == '*' || *str == '\0') { /* INADDR_ANY */
+ ((struct sockaddr_in *)ss)->sin_addr.s_addr = INADDR_ANY;
+ return 0;
+ }
+
+ if (!inet_pton(ss->ss_family, str, &((struct sockaddr_in *)ss)->sin_addr)) {
+ struct hostent *he = gethostbyname(str);
+
+ if (he == NULL) {
+ err->len = snprintf(err->msg, err->size, "Invalid IPv4 server name: '%s'", str);
+ return -1;
+ }
+ ((struct sockaddr_in *)ss)->sin_addr = *(struct in_addr *) *(he->h_addr_list);
+ }
+ }
+ return 0;
+}
+
+/* returns <0 with err in case of error or the front FD */
+int create_udp_listener(struct sockaddr_storage *addr, struct errmsg *err)
+{
+ int fd;
+
+ if ((fd = socket(addr->ss_family, SOCK_DGRAM, 0)) == -1) {
+ err->len = snprintf(err->msg, err->size, "socket(): '%s'", strerror(errno));
+ goto fail;
+ }
+
+ if (fcntl(fd, F_SETFL, O_NONBLOCK) == -1) {
+ err->len = snprintf(err->msg, err->size, "fcntl(O_NONBLOCK): '%s'", strerror(errno));
+ goto fail;
+ }
+
+ if (setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, (char *) &one, sizeof(one)) == -1) {
+ err->len = snprintf(err->msg, err->size, "setsockopt(SO_REUSEADDR): '%s'", strerror(errno));
+ goto fail;
+ }
+
+#ifdef SO_REUSEPORT
+ if (setsockopt(fd, SOL_SOCKET, SO_REUSEPORT, (char *) &one, sizeof(one)) == -1) {
+ err->len = snprintf(err->msg, err->size, "setsockopt(SO_REUSEPORT): '%s'", strerror(errno));
+ goto fail;
+ }
+#endif
+ if (bind(fd, (struct sockaddr *)addr, addr->ss_family == AF_INET6 ?
+ sizeof(struct sockaddr_in6) : sizeof(struct sockaddr_in)) == -1) {
+ err->len = snprintf(err->msg, err->size, "bind(): '%s'", strerror(errno));
+ goto fail;
+ }
+
+ /* the socket is ready */
+ return fd;
+
+ fail:
+ if (fd > -1)
+ close(fd);
+ fd = -1;
+ return fd;
+}
+
+/* recompute pollfds using frt_fd and scanning nbconn connections.
+ * Returns the number of FDs in the set.
+ */
+int update_pfd(struct pollfd *pfd, int frt_fd, struct conn *conns, int nbconn)
+{
+ int nbfd = 0;
+ int i;
+
+ pfd[nbfd].fd = frt_fd;
+ pfd[nbfd].events = POLLIN;
+ nbfd++;
+
+ for (i = 0; i < nbconn; i++) {
+ if (conns[i].fd_bck < 0)
+ continue;
+ pfd[nbfd].fd = conns[i].fd_bck;
+ pfd[nbfd].events = POLLIN;
+ nbfd++;
+ }
+ return nbfd;
+}
+
+/* searches a connection using fd <fd> as back connection, returns it if found
+ * otherwise NULL.
+ */
+struct conn *conn_bck_lookup(struct conn *conns, int nbconn, int fd)
+{
+ int i;
+
+ for (i = 0; i < nbconn; i++) {
+ if (conns[i].fd_bck < 0)
+ continue;
+ if (conns[i].fd_bck == fd)
+ return &conns[i];
+ }
+ return NULL;
+}
+
+/* Try to establish a connection to <sa>. Return the fd or -1 in case of error */
+int add_connection(struct sockaddr_storage *ss)
+{
+ int fd;
+
+ fd = socket(ss->ss_family, SOCK_DGRAM, 0);
+ if (fd < 0)
+ goto fail;
+
+ if (fcntl(fd, F_SETFL, O_NONBLOCK) == -1)
+ goto fail;
+
+ if (setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &one, sizeof(one)) == -1)
+ goto fail;
+
+ if (connect(fd, (struct sockaddr *)ss, ss->ss_family == AF_INET6 ?
+ sizeof(struct sockaddr_in6) : sizeof(struct sockaddr_in)) == -1) {
+ if (errno != EINPROGRESS)
+ goto fail;
+ }
+
+ return fd;
+ fail:
+ if (fd > -1)
+ close(fd);
+ return -1;
+}
+
+/* Corrupt <buf> buffer with <buflen> as length if required */
+static void pktbuf_apply_corruption(char *buf, size_t buflen)
+{
+ if (corr_rate > 0 && prng(100) < corr_rate) {
+ unsigned int rnd = prng(corr_span * 256); // pos and value
+ unsigned int pos = corr_base + (rnd >> 8);
+
+ if (pos < buflen)
+ buf[pos] ^= rnd;
+ }
+}
+
+/* Handle a read operation on an front FD. Will either reuse the existing
+ * connection if the source is found, or will allocate a new one, possibly
+ * replacing the oldest one. Returns <0 on error or the number of bytes
+ * transmitted.
+ */
+int handle_frt(int fd, struct pollfd *pfd, struct conn *conns, int nbconn)
+{
+ struct sockaddr_storage addr;
+ socklen_t addrlen;
+ struct conn *conn;
+ char *pktbuf = trash;
+ int ret;
+ int i;
+
+ if (rand_rate > 0) {
+ /* keep a copy of this packet */
+ history_idx++;
+ if (history_idx >= MAXREORDER)
+ history_idx = 0;
+ pktbuf = history[history_idx].buf;
+ }
+
+ addrlen = sizeof(addr);
+ ret = recvfrom(fd, pktbuf, MAXPKTSIZE, MSG_DONTWAIT | MSG_NOSIGNAL,
+ (struct sockaddr *)&addr, &addrlen);
+
+ if (rand_rate > 0) {
+ history[history_idx].len = ret; // note: we may store -1/EAGAIN
+ if (prng(100) < rand_rate) {
+ /* return a random buffer or nothing */
+ int idx = prng(MAXREORDER + 1) - 1;
+ if (idx < 0) {
+ /* pretend we didn't receive anything */
+ return 0;
+ }
+ pktbuf = history[idx].buf;
+ ret = history[idx].len;
+ if (ret < 0)
+ errno = EAGAIN;
+ }
+ }
+
+ if (ret == 0)
+ return 0;
+
+ if (ret < 0)
+ return errno == EAGAIN ? 0 : -1;
+
+ pktbuf_apply_corruption(pktbuf, ret);
+
+ conn = NULL;
+ for (i = 0; i < nbconn; i++) {
+ if (addr.ss_family != conns[i].cli_addr.ss_family)
+ continue;
+ if (memcmp(&conns[i].cli_addr, &addr,
+ (addr.ss_family == AF_INET6) ?
+ sizeof(struct sockaddr_in6) :
+ sizeof(struct sockaddr_in)) != 0)
+ continue;
+ conn = &conns[i];
+ break;
+ }
+
+ if (!conn) {
+ /* address not found, create a new conn or replace the oldest
+ * one. For now we support a single one.
+ */
+ conn = &conns[0];
+
+ memcpy(&conn->cli_addr, &addr,
+ (addr.ss_family == AF_INET6) ?
+ sizeof(struct sockaddr_in6) :
+ sizeof(struct sockaddr_in));
+
+ if (conn->fd_bck < 0) {
+ /* try to create a new connection */
+ conn->fd_bck = add_connection(&srv_addr);
+ nbfd = update_pfd(pfd, fd, conns, nbconn); // FIXME: MAXCONN instead ?
+ }
+ }
+
+ if (conn->fd_bck < 0)
+ return 0;
+
+ ret = send(conn->fd_bck, pktbuf, ret, MSG_DONTWAIT | MSG_NOSIGNAL);
+ return ret;
+}
+
+/* Handle a read operation on an FD. Close and return 0 when the read returns zero or an error */
+int handle_bck(int fd, struct pollfd *pfd, struct conn *conns, int nbconn)
+{
+ struct sockaddr_storage addr;
+ socklen_t addrlen;
+ struct conn *conn;
+ char *pktbuf = trash;
+ int ret;
+
+ if (rand_rate > 0) {
+ /* keep a copy of this packet */
+ history_idx++;
+ if (history_idx >= MAXREORDER)
+ history_idx = 0;
+ pktbuf = history[history_idx].buf;
+ }
+
+ ret = recvfrom(fd, pktbuf, MAXPKTSIZE, MSG_DONTWAIT | MSG_NOSIGNAL,
+ (struct sockaddr *)&addr, &addrlen);
+
+ if (rand_rate > 0) {
+ history[history_idx].len = ret; // note: we may store -1/EAGAIN
+ if (prng(100) < rand_rate) {
+ /* return a random buffer or nothing */
+ int idx = prng(MAXREORDER + 1) - 1;
+ if (idx < 0) {
+ /* pretend we didn't receive anything */
+ return 0;
+ }
+ pktbuf = history[idx].buf;
+ ret = history[idx].len;
+ if (ret < 0)
+ errno = EAGAIN;
+ }
+ }
+
+ if (ret == 0)
+ return 0;
+
+ if (ret < 0)
+ return errno == EAGAIN ? 0 : -1;
+
+ pktbuf_apply_corruption(pktbuf, ret);
+
+ conn = conn_bck_lookup(conns, nbconn, fd);
+ if (!conn)
+ return 0;
+
+ ret = sendto(fd_frt, pktbuf, ret, MSG_DONTWAIT | MSG_NOSIGNAL,
+ (struct sockaddr *)&conn->cli_addr,
+ conn->cli_addr.ss_family == AF_INET6 ?
+ sizeof(struct sockaddr_in6) : sizeof(struct sockaddr_in));
+ return ret;
+}
+
+/* print the usage message for program named <name> and exit with status <status> */
+void usage(int status, const char *name)
+{
+ if (strchr(name, '/'))
+ name = strrchr(name, '/') + 1;
+ die(status,
+ "Usage: %s [-h] [options] [<laddr>:]<lport> [<saddr>:]<sport>\n"
+ "Options:\n"
+ " -h display this help\n"
+ " -r rate reorder/duplicate/lose around <rate>%% of packets\n"
+ " -s seed force initial random seed (currently %#x)\n"
+ " -c rate corrupt around <rate>%% of packets\n"
+ " -o ofs start offset of corrupted area (def: 0)\n"
+ " -w width width of the corrupted area (def: 1)\n"
+ "", name, prng_state);
+}
+
+int main(int argc, char **argv)
+{
+ struct errmsg err;
+ struct pollfd *pfd;
+ int opt;
+ int i;
+
+ err.len = 0;
+ err.size = 100;
+ err.msg = malloc(err.size);
+
+ while ((opt = getopt(argc, argv, "hr:s:c:o:w:")) != -1) {
+ switch (opt) {
+ case 'r': // rand_rate%
+ rand_rate = atoi(optarg);
+ break;
+ case 's': // seed
+ prng_state = atol(optarg);
+ break;
+ case 'c': // corruption rate
+ corr_rate = atol(optarg);
+ break;
+ case 'o': // corruption offset
+ corr_base = atol(optarg);
+ break;
+ case 'w': // corruption width
+ corr_span = atol(optarg);
+ break;
+ default: // help, anything else
+ usage(0, argv[0]);
+ }
+ }
+
+ if (argc - optind < 2)
+ usage(1, argv[0]);
+
+ if (addr_to_ss(argv[optind], &frt_addr, &err) < 0)
+ die(1, "parsing listen address: %s\n", err.msg);
+
+ if (addr_to_ss(argv[optind+1], &srv_addr, &err) < 0)
+ die(1, "parsing server address: %s\n", err.msg);
+
+ pfd = calloc(MAXCONN + 1, sizeof(struct pollfd));
+ if (!pfd)
+ die(1, "out of memory\n");
+
+ fd_frt = create_udp_listener(&frt_addr, &err);
+ if (fd_frt < 0)
+ die(1, "binding listener: %s\n", err.msg);
+
+
+ for (i = 0; i < MAXCONN; i++)
+ conns[i].fd_bck = -1;
+
+ nbfd = update_pfd(pfd, fd_frt, conns, MAXCONN);
+
+ while (1) {
+ /* listen for incoming packets */
+ int ret, i;
+
+ ret = poll(pfd, nbfd, 1000);
+ if (ret <= 0)
+ continue;
+
+ for (i = 0; ret; i++) {
+ if (!pfd[i].revents)
+ continue;
+ ret--;
+
+ if (pfd[i].fd == fd_frt) {
+ handle_frt(pfd[i].fd, pfd, conns, nbconn);
+ continue;
+ }
+
+ handle_bck(pfd[i].fd, pfd, conns, nbconn);
+ }
+ }
+}
diff --git a/doc/51Degrees-device-detection.txt b/doc/51Degrees-device-detection.txt
new file mode 100644
index 0000000..2e31274
--- /dev/null
+++ b/doc/51Degrees-device-detection.txt
@@ -0,0 +1,174 @@
+51Degrees Device Detection
+--------------------------
+
+You can also include 51Degrees for inbuilt device detection enabling attributes
+such as screen size (physical & pixels), supported input methods, release date,
+hardware vendor and model, browser information, and device price among many
+others. Such information can be used to improve the user experience of a web
+site by tailoring the page content, layout and business processes to the
+precise characteristics of the device. Such customisations improve profit by
+making it easier for customers to get to the information or services they
+need. Attributes of the device making a web request can be added to HTTP
+headers as configurable parameters.
+
+In order to enable 51Degrees download the 51Degrees source code from the
+official git repository :
+
+ - either use the proven stable but frozen 3.2.10 version which
+ supports the Trie algorithm :
+
+ git clone https://github.com/51Degrees/Device-Detection.git -b v3.2.10
+
+ - use newer 3.2.12.12 version which continues to receive database
+ updates and supports a new Hash Trie algorithm, but which is not
+ compatible with older Trie databases :
+
+ git clone https://github.com/51Degrees/Device-Detection.git -b v3.2.12
+
+ - or use the latest 51Degrees version 4 with 51Degrees Hash algorithm,
+ not compatible with older databases :
+
+ git clone --recurse-submodules https://github.com/51Degrees/device-detection-cxx.git
+
+then run 'make' with USE_51DEGREES, optionally 51DEGREES_VER=4 (if using
+51Degrees version 4), and 51DEGREES_SRC set. Both 51DEGREES_INC and
+51DEGREES_LIB may additionally be used to force specific different paths for
+.o and .h, but will default to 51DEGREES_SRC. Make sure to replace
+'51D_REPO_PATH' with the path to the 51Degrees repository.
+
+51Degrees provide 4 different detection algorithms:
+
+ 1. Pattern - balances main memory usage and CPU.
+ 2. Trie - a very high performance detection solution which uses more main
+ memory than Pattern.
+ 3. Hash Trie - replaces Trie, 3x faster, 80% lower memory consumption and
+ tuning options.
+ 4. 51Degrees V4 Hash - only with 51Degrees Device Detection V4.
+
+To make with 51Degrees Pattern algorithm use the following command line.
+
+ $ make TARGET=<target> USE_51DEGREES=1 51DEGREES_SRC='51D_REPO_PATH'/src/pattern
+
+To use the 51Degrees Trie algorithm use the following command line.
+
+ $ make TARGET=<target> USE_51DEGREES=1 51DEGREES_SRC='51D_REPO_PATH'/src/trie
+
+To build with the 51Degrees Device Detection V4 use the following command line.
+
+ $ make TARGET=<target> USE_51DEGREES=1 51DEGREES_VER=4 51DEGREES_SRC='51D_REPO_PATH'/src
+
+A data file containing information about devices, browsers, operating systems
+and their associated signatures is then needed. 51Degrees provide a free
+database with Github repo for this purpose. These free data files are located
+in '51D_REPO_PATH'/data with the extensions .dat for Pattern data and .trie for
+Trie data. Free Hash Trie data file can be obtained by signing up for a licence
+key at https://51degrees.com/products/store/on-premise-device-detection.
+If using the 51degrees version 4, the free hash data file is located in
+'51D_REPO_PATH'/device-detection-data with the .hash extension.
+
+For HAProxy developers who need to verify that their changes didn't affect the
+51Degrees implementation, a dummy library is provided in the
+"addons/51degrees/dummy" directory. This does not function, but implements the
+API such that the 51Degrees module can be used (but not return any meaningful
+information). To test either Pattern or Hash Trie, or the 51Degrees version 4
+Hash algorithm, build with:
+
+ $ make TARGET=<target> USE_51DEGREES=1 51DEGREES_SRC=addons/51degrees/dummy/pattern
+or
+ $ make TARGET=<target> USE_51DEGREES=1 51DEGREES_SRC=addons/51degrees/dummy/trie
+or
+ $ make TARGET=<target> USE_51DEGREES=1 51DEGREES_VER=4 51DEGREES_SRC=addons/51degrees/dummy/v4hash
+
+respectively.
+
+The configuration file needs to set the following parameters:
+
+ global
+ 51degrees-data-file path to the Pattern, Trie or V4 Hash data file
+ 51degrees-property-name-list list of 51Degrees properties to detect
+ 51degrees-property-separator separator to use between values
+ 51degrees-cache-size LRU-based cache size (disabled by default)
+
+The following is an example of the settings for Pattern.
+
+ global
+ 51degrees-data-file '51D_REPO_PATH'/data/51Degrees-LiteV3.2.dat
+ 51degrees-property-name-list IsTablet DeviceType IsMobile
+ 51degrees-property-separator ,
+ 51degrees-cache-size 10000
+
+HAProxy needs a way to pass device information to the backend servers. This is
+done by using the 51d converter or fetch method, which intercepts the HTTP
+headers and creates some new headers. This is controlled in the frontend
+http-in section.
+
+The following is an example which adds two new HTTP headers prefixed X-51D-
+
+ frontend http-in
+ bind *:8081
+ default_backend servers
+ http-request set-header X-51D-DeviceTypeMobileTablet %[51d.all(DeviceType,IsMobile,IsTablet)]
+ http-request set-header X-51D-Tablet %[51d.all(IsTablet)]
+
+Here, two headers are created with 51Degrees data, X-51D-DeviceTypeMobileTablet
+and X-51D-Tablet. Any number of headers can be created this way and can be
+named anything. 51d.all( ) invokes the 51degrees fetch. It can be passed up to
+five property names of values to return. Values will be returned in the same
+order, separated by the 51-degrees-property-separator configured earlier. If a
+property name can't be found the value 'NoData' is returned instead.
+
+In addition to the device properties three additional properties related to the
+validity of the result can be returned when used with the Pattern method. The
+following example shows how Method, Difference and Rank could be included as one
+new HTTP header X-51D-Stats.
+
+ frontend http-in
+ ...
+ http-request set-header X-51D-Stats %[51d.all(Method,Difference,Rank)]
+
+These values indicate how confident 51Degrees is in the result that that was
+returned. More information is available on the 51Degrees web site at:
+
+ https://51degrees.com/support/documentation/pattern
+
+The above 51d.all fetch method uses all available HTTP headers for detection. A
+modest performance improvement can be obtained by only passing one HTTP header
+to the detection method with the 51d.single converter. The following example
+uses the User-Agent HTTP header only for detection.
+
+ frontend http-in
+ ...
+ http-request set-header X-51D-DeviceTypeMobileTablet %[req.fhdr(User-Agent),51d.single(DeviceType,IsMobile,IsTablet)]
+
+Any HTTP header could be used inplace of User-Agent by changing the parameter
+provided to req.fhdr.
+
+When compiled to use the Trie detection method the trie format data file needs
+to be provided. Changing the extension of the data file from dat to trie will
+use the correct data.
+
+ global
+ 51degrees-data-file '51D_REPO_PATH'/data/51Degrees-LiteV3.2.trie
+
+When used with Trie the Method, Difference and Rank properties are not
+available.
+
+When using the 51Degrees V4 Hash algorithm, the hash format data file needs
+to be provided as in the following example.
+
+ global
+ 51degrees-data-file '51D_REPO_PATH'/device-detection-data/51Degrees-LiteV4.1.hash
+
+The free Lite data file contains information about screen size in pixels and
+whether the device is a mobile. A full list of available properties is located
+on the 51Degrees web site at:
+
+ https://51degrees.com/resources/property-dictionary
+
+Some properties are only available in the paid for Premium and Enterprise
+versions of 51Degrees. These data sets not only contain more properties but
+are updated weekly and daily and contain signatures for 100,000s of different
+device combinations. For more information see the data options comparison web
+page:
+
+ https://51degrees.com/compare-data-options
diff --git a/doc/DeviceAtlas-device-detection.txt b/doc/DeviceAtlas-device-detection.txt
new file mode 100644
index 0000000..b600918
--- /dev/null
+++ b/doc/DeviceAtlas-device-detection.txt
@@ -0,0 +1,82 @@
+DeviceAtlas Device Detection
+----------------------------
+
+In order to add DeviceAtlas Device Detection support, you would need to download
+the API source code from https://deviceatlas.com/deviceatlas-haproxy-module.
+The build supports the USE_PCRE and USE_PCRE2 options. Once extracted :
+
+ $ make TARGET=<target> USE_PCRE=1 (or USE_PCRE2=1) USE_DEVICEATLAS=1 DEVICEATLAS_SRC=<path to the API root folder>
+
+Optionally DEVICEATLAS_INC and DEVICEATLAS_LIB may be set to override the path
+to the include files and libraries respectively if they're not in the source
+directory. However, if the API had been installed beforehand, DEVICEATLAS_SRC
+can be omitted. Note that the DeviceAtlas C API version supported is the 2.4.0
+at minimum.
+
+For HAProxy developers who need to verify that their changes didn't accidentally
+break the DeviceAtlas code, it is possible to build a dummy library provided in
+the addons/deviceatlas/dummy directory and to use it as an alternative for the
+full library. This will not provide the full functionalities, it will just allow
+haproxy to start with a deviceatlas configuration, which generally is enough to
+validate API changes :
+
+ $ make TARGET=<target> USE_PCRE=1 USE_DEVICEATLAS=1 DEVICEATLAS_SRC=$PWD/addons/deviceatlas/dummy
+
+These are supported DeviceAtlas directives (see doc/configuration.txt) :
+ - deviceatlas-json-file <path to the DeviceAtlas JSON data file>.
+ - deviceatlas-log-level <number> (0 to 3, level of information returned by
+ the API, 0 by default).
+ - deviceatlas-property-separator <character> (character used to separate the
+ properties produced by the API, | by default).
+
+Sample configuration :
+
+ global
+ deviceatlas-json-file <path to json file>
+
+ ...
+ frontend
+ bind *:8881
+ default_backend servers
+
+There are two distinct methods available, one which leverages all HTTP headers
+and one which uses only a single HTTP header for the detection. The former
+method is highly recommended and more accurate. There are several possible use
+cases.
+
+# To transmit the DeviceAtlas data downstream to the target application
+
+All HTTP headers via the sample / fetch
+
+ http-request set-header X-DeviceAtlas-Data %[da-csv-fetch(primaryHardwareType,osName,osVersion,browserName,browserVersion,browserRenderingEngine)]
+
+Single HTTP header (e.g. User-Agent) via the converter
+
+ http-request set-header X-DeviceAtlas-Data %[req.fhdr(User-Agent),da-csv-conv(primaryHardwareType,osName,osVersion,browserName,browserVersion,browserRenderingEngine)]
+
+# Mobile content switching with ACL
+
+All HTTP headers
+
+ acl is_mobile da-csv-fetch(mobileDevice) 1
+
+Single HTTP header
+
+ acl device_type_tablet req.fhdr(User-Agent),da-csv-conv(primaryHardwareType) "Tablet"
+
+Optionally a JSON download scheduler is provided to allow a data file being
+fetched automatically in a daily basis without restarting HAProxy :
+
+ $ cd addons/deviceatlas && make [DEVICEATLAS_SRC=<path to the API root folder>]
+
+Similarly, if the DeviceAtlas API is installed, DEVICEATLAS_SRC can be omitted.
+
+ $ ./dadwsch -u JSON data file URL e.g. "https://deviceatlas.com/getJSON?licencekey=<your licence key>&format=zip&data=my&index=web" \
+ [-p download directory path /tmp by default] \
+ [-d scheduled hour of download, hour when the service is launched by default]
+
+Noted it needs to be started before HAProxy.
+
+
+Please find more information about DeviceAtlas and the detection methods at
+https://deviceatlas.com/resources .
diff --git a/doc/SOCKS4.protocol.txt b/doc/SOCKS4.protocol.txt
new file mode 100644
index 0000000..06aee8a
--- /dev/null
+++ b/doc/SOCKS4.protocol.txt
@@ -0,0 +1 @@
+Please reference to "https://www.openssh.com/txt/socks4.protocol". \ No newline at end of file
diff --git a/doc/SPOE.txt b/doc/SPOE.txt
new file mode 100644
index 0000000..cc6d8dd
--- /dev/null
+++ b/doc/SPOE.txt
@@ -0,0 +1,1255 @@
+ -----------------------------------------------
+ Stream Processing Offload Engine (SPOE)
+ Version 1.2
+ ( Last update: 2020-06-13 )
+ -----------------------------------------------
+ Author : Christopher Faulet
+ Contact : cfaulet at haproxy dot com
+
+
+SUMMARY
+--------
+
+ 0. Terms
+ 1. Introduction
+ 2. SPOE configuration
+ 2.1. SPOE scope
+ 2.2. "spoe-agent" section
+ 2.3. "spoe-message" section
+ 2.4. "spoe-group" section
+ 2.5. Example
+ 3. SPOP specification
+ 3.1. Data types
+ 3.2. Frames
+ 3.2.1. Frame capabilities
+ 3.2.2. Frame types overview
+ 3.2.3. Workflow
+ 3.2.4. Frame: HAPROXY-HELLO
+ 3.2.5. Frame: AGENT-HELLO
+ 3.2.6. Frame: NOTIFY
+ 3.2.7. Frame: ACK
+ 3.2.8. Frame: HAPROXY-DISCONNECT
+ 3.2.9. Frame: AGENT-DISCONNECT
+ 3.3. Events & messages
+ 3.4. Actions
+ 3.5. Errors & timeouts
+ 4. Logging
+
+
+0. Terms
+---------
+
+* SPOE : Stream Processing Offload Engine.
+
+ A SPOE is a filter talking to servers managed by a SPOA to offload the
+ stream processing. An engine is attached to a proxy. A proxy can have
+ several engines. Each engine is linked to an agent and only one.
+
+* SPOA : Stream Processing Offload Agent.
+
+ A SPOA is a service that will receive info from a SPOE to offload the
+ stream processing. An agent manages several servers. It uses a backend to
+ reference all of them. By extension, these servers can also be called
+ agents.
+
+* SPOP : Stream Processing Offload Protocol, used by SPOEs to talk to SPOA
+ servers.
+
+ This protocol is used by engines to talk to agents. It is an in-house
+ binary protocol described in this documentation.
+
+
+1. Introduction
+----------------
+
+SPOE is a feature introduced in HAProxy 1.7. It makes possible the
+communication with external components to retrieve some info. The idea started
+with the problems caused by most ldap libs not working fine in event-driven
+systems (often at least the connect() is blocking). So, it is hard to properly
+implement Single Sign On solution (SSO) in HAProxy. The SPOE will ease this
+kind of processing, or we hope so.
+
+Now, the aim of SPOE is to allow any kind of offloading on the streams. First
+releases won't do lot of things. As we will see, there are few handled events
+and even less actions supported. Actually, for now, the SPOE can offload the
+processing before "tcp-request content", "tcp-response content", "http-request"
+and "http-response" rules. And it only supports variables definition. But, in
+spite of these limited features, we can easily imagine to implement SSO
+solution, ip reputation or ip geolocation services.
+
+Some example implementations in various languages are linked to from the
+HAProxy Wiki page dedicated to this mechanism:
+
+ https://github.com/haproxy/wiki/wiki/SPOE:-Stream-Processing-Offloading-Engine
+
+2. SPOE configuration
+----------------------
+
+Because SPOE is implemented as a filter, To use it, you must declare a "filter
+spoe" line in a proxy section (frontend/backend/listen) :
+
+ frontend my-front
+ ...
+ filter spoe [engine <name>] config <file>
+ ...
+
+The "config" parameter is mandatory. It specifies the SPOE configuration
+file. The engine name is optional. It can be set to declare the scope to use in
+the SPOE configuration. So it is possible to use the same SPOE configuration
+for several engines. If no name is provided, the SPOE configuration must not
+contain any scope directive.
+
+We use a separate configuration file on purpose. By commenting SPOE filter
+line, you completely disable the feature, including the parsing of sections
+reserved to SPOE. This is also a way to keep the HAProxy configuration clean.
+
+A SPOE configuration file must contains, at least, the SPOA configuration
+("spoe-agent" section) and SPOE messages/groups ("spoe-message" or "spoe-group"
+sections) attached to this agent.
+
+IMPORTANT : The configuration of a SPOE filter must be located in a dedicated
+file. But the backend used by a SPOA must be declared in HAProxy configuration
+file.
+
+2.1. SPOE scope
+-------------------------
+
+If you specify an engine name on the SPOE filter line, then you need to define
+scope in the SPOE configuration with the same name. You can have several SPOE
+scope in the same file. In each scope, you must define one and only one
+"spoe-agent" section to configure the SPOA linked to your SPOE and several
+"spoe-message" and "spoe-group" sections to describe, respectively, messages and
+group of messages sent to servers managed by your SPOA.
+
+A SPOE scope starts with this kind of line :
+
+ [<name>]
+
+where <name> is the same engine name specified on the SPOE filter line. The
+scope ends when the file ends or when another scope is found.
+
+ Example :
+ [my-first-engine]
+ spoe-agent my-agent
+ ...
+ spoe-message msg1
+ ...
+ spoe-message msg2
+ ...
+ spoe-group grp1
+ ...
+ spoe-group grp2
+ ...
+
+ [my-second-engine]
+ ...
+
+If no engine name is provided on the SPOE filter line, no SPOE scope must be
+found in the SPOE configuration file. All the file is considered to be in the
+same anonymous and implicit scope.
+
+The engine name must be uniq for a proxy. If no engine name is provided on the
+SPOE filter line, the SPOE agent name is used by default.
+
+2.2. "spoe-agent" section
+--------------------------
+
+For each engine, you must define one and only one "spoe-agent" section. In this
+section, you will declare SPOE messages and the backend you will use. You will
+also set timeouts and options to customize your agent's behaviour.
+
+
+spoe-agent <name>
+ Create a new SPOA with the name <name>. It must have one and only one
+ "spoe-agent" definition by SPOE scope.
+
+ Arguments :
+ <name> is the name of the agent section.
+
+ following keywords are supported :
+ - groups
+ - log
+ - maxconnrate
+ - maxerrrate
+ - max-frame-size
+ - max-waiting-frames
+ - messages
+ - [no] option async
+ - [no] option dontlog-normal
+ - [no] option pipelining
+ - [no] option send-frag-payload
+ - option continue-on-error
+ - option force-set-var
+ - option set-on-error
+ - option set-process-time
+ - option set-total-time
+ - option var-prefix
+ - register-var-names
+ - timeout hello|idle|processing
+ - use-backend
+
+
+groups <grp-name> ...
+ Declare the list of SPOE groups that an agent will handle.
+
+ Arguments :
+ <grp-name> is the name of a SPOE group.
+
+ Groups declared here must be found in the same engine scope, else an error is
+ triggered during the configuration parsing. You can have many "groups" lines.
+
+ See also: "spoe-group" section.
+
+
+log global
+log <address> [len <length>] [format <format>] <facility> [<level> [<minlevel>]]
+no log
+ Enable per-instance logging of events and traffic.
+
+ Prefix :
+ no should be used when the logger list must be flushed.
+
+ See the HAProxy Configuration Manual for details about this option.
+
+maxconnrate <number>
+ Set the maximum number of connections per second to <number>. The SPOE will
+ stop to open new connections if the maximum is reached and will wait to
+ acquire an existing one. So it is important to set "timeout hello" to a
+ relatively small value.
+
+
+maxerrrate <number>
+ Set the maximum number of errors per second to <number>. The SPOE will stop
+ its processing if the maximum is reached.
+
+
+max-frame-size <number>
+ Set the maximum allowed size for frames exchanged between HAProxy and SPOA.
+ It must be in the range [256, tune.bufsize-4] (4 bytes are reserved for the
+ frame length). By default, it is set to (tune.bufsize-4).
+
+max-waiting-frames <number>
+ Set the maximum number of frames waiting for an acknowledgement on the same
+ connection. This value is only used when the pipelinied or asynchronous
+ exchanges between HAProxy and SPOA are enabled. By default, it is set to 20.
+
+messages <msg-name> ...
+ Declare the list of SPOE messages that an agent will handle.
+
+ Arguments :
+ <msg-name> is the name of a SPOE message.
+
+ Messages declared here must be found in the same engine scope, else an error
+ is triggered during the configuration parsing. You can have many "messages"
+ lines.
+
+ See also: "spoe-message" section.
+
+
+option async
+no option async
+ Enable or disable the support of asynchronous exchanges between HAProxy and
+ SPOA. By default, this option is enabled.
+
+
+option continue-on-error
+ Do not stop the events processing when an error occurred on a stream.
+
+ By default, for a specific stream, when an abnormal/unexpected error occurs,
+ the SPOE is disabled for all the transaction. So if you have several events
+ configured, such error on an event will disabled all following. For TCP
+ streams, this will disable the SPOE for the whole session. For HTTP streams,
+ this will disable it for the transaction (request and response).
+
+ When set, this option bypass this behaviour and only the current event will
+ be ignored.
+
+
+option dontlog-normal
+no option dontlog-normal
+ Enable or disable logging of normal, successful processing.
+
+ Arguments : none
+
+ See also: "log" and section 4 about logging.
+
+
+option force-set-var
+ By default, SPOE filter only register already known variables (mainly from
+ parsing of the configuration), and process-wide variables (those of scope
+ "proc") cannot be created. If you want that haproxy trusts the agent and
+ registers all variables (ex: can be useful for LUA workload), activate this
+ option.
+
+ Caution : this option opens to a variety of attacks such as a rogue SPOA that
+ asks to register too many variables.
+
+
+option pipelining
+no option pipelining
+ Enable or disable the support of pipelined exchanges between HAProxy and
+ SPOA. By default, this option is enabled.
+
+
+option send-frag-payload
+no option send-frag-payload
+ Enable or disable the sending of fragmented payload to SPOA. By default, this
+ option is enabled.
+
+
+option set-on-error <var name>
+ Define the variable to set when an error occurred during an event processing.
+
+ Arguments :
+
+ <var name> is the variable name, without the scope. The name may only
+ contain characters 'a-z', 'A-Z', '0-9', '.' and '_'.
+
+ This variable will only be set when an error occurred in the scope of the
+ transaction. As for all other variables define by the SPOE, it will be
+ prefixed. So, if your variable name is "error" and your prefix is
+ "my_spoe_pfx", the variable will be "txn.my_spoe_pfx.error".
+
+ When set, the variable is an integer representing the error reason. For values
+ under 256, it represents an error coming from the engine. Below 256, it
+ reports a SPOP error. In this case, to retrieve the right SPOP status code,
+ you must remove 256 to this value. Here are possible values:
+
+ * 1 a timeout occurred during the event processing.
+
+ * 2 an error was triggered during the resources allocation.
+
+ * 3 the frame payload exceeds the frame size and it cannot be
+ fragmented.
+
+ * 4 the fragmentation of a payload is aborted.
+
+ * 5 The frame processing has been interrupted by HAProxy.
+
+ * 255 an unknown error occurred during the event processing.
+
+ * 256+N a SPOP error occurred during the event processing (see section
+ "Errors & timeouts").
+
+ Note that if "option continue-on-error" is set, the variable is not
+ automatically removed between events processing.
+
+ See also: "option continue-on-error", "option var-prefix".
+
+
+option set-process-time <var name>
+ Define the variable to set to report the processing time of the last event or
+ group.
+
+ Arguments :
+
+ <var name> is the variable name, without the scope. The name may only
+ contain characters 'a-z', 'A-Z', '0-9', '.' and '_'.
+
+ This variable will be set in the scope of the transaction. As for all other
+ variables define by the SPOE, it will be prefixed. So, if your variable name
+ is "process_time" and your prefix is "my_spoe_pfx", the variable will be
+ "txn.my_spoe_pfx.process_time".
+
+ When set, the variable is an integer representing the delay to process the
+ event or the group, in milliseconds. From the stream point of view, it is the
+ latency added by the SPOE processing for the last handled event or group.
+
+ If several events or groups are processed for the same stream, this value
+ will be overrideen.
+
+ See also: "option set-total-time".
+
+
+option set-total-time <var name>
+ Define the variable to set to report the total processing time SPOE for a
+ stream.
+
+ Arguments :
+
+ <var name> is the variable name, without the scope. The name may only
+ contain characters 'a-z', 'A-Z', '0-9', '.' and '_'.
+
+ This variable will be set in the scope of the transaction. As for all other
+ variables define by the SPOE, it will be prefixed. So, if your variable name
+ is "total_time" and your prefix is "my_spoe_pfx", the variable will be
+ "txn.my_spoe_pfx.total_time".
+
+ When set, the variable is an integer representing the sum of processing times
+ for a stream, in milliseconds. From the stream point of view, it is the
+ latency added by the SPOE processing.
+
+ If several events or groups are processed for the same stream, this value
+ will be updated.
+
+ See also: "option set-process-time".
+
+
+option var-prefix <prefix>
+ Define the prefix used when variables are set by an agent.
+
+ Arguments :
+
+ <prefix> is the prefix used to limit the scope of variables set by an
+ agent.
+
+ To avoid conflict with other variables defined by HAProxy, all variables
+ names will be prefixed. By default, the "spoe-agent" name is used. This
+ option can be used to customize it.
+
+ The prefix will be added between the variable scope and its name, separated
+ by a '.'. It may only contain characters 'a-z', 'A-Z', '0-9', '.' and '_', as
+ for variables name. In HAProxy configuration, you need to use this prefix as
+ a part of the variables name. For example, if an agent define the variable
+ "myvar" in the "txn" scope, with the prefix "my_spoe_pfx", then you should
+ use "txn.my_spoe_pfx.myvar" name in your HAProxy configuration.
+
+ By default, an agent will never set new variables at runtime: It can only set
+ new value for existing ones. If you want a different behaviour, see
+ force-set-var option and register-var-names directive.
+
+register-var-names <var name> ...
+ Register some variable names. By default, an agent will not be allowed to set
+ new variables at runtime. This rule can be totally relaxed by setting the
+ option "force-set-var". If you know all the variables you will need, this
+ directive is a good way to register them without letting an agent doing what
+ it want. This is only required if these variables are not referenced anywhere
+ in the HAProxy configuration or the SPOE one.
+
+ Arguments:
+ <var name> is a variable name without the scope. The name may only
+ contain characters 'a-z', 'A-Z', '0-9', '.' and '_'.
+
+ The prefix will be automatically added during the registration. You can have
+ many "register-var-names" lines.
+
+ See also: "option force-set-var", "option var-prefix".
+
+timeout hello <timeout>
+ Set the maximum time to wait for an agent to receive the AGENT-HELLO frame.
+ It is applied on the stream that handle the connection with the agent.
+
+ Arguments :
+ <timeout> is the timeout value specified in milliseconds by default, but
+ can be in any other unit if the number is suffixed by the unit,
+ as explained at the top of this document.
+
+ This timeout is an applicative timeout. It differ from "timeout connect"
+ defined on backends.
+
+
+timeout idle <timeout>
+ Set the maximum time to wait for an agent to close an idle connection. It is
+ applied on the stream that handle the connection with the agent.
+
+ Arguments :
+ <timeout> is the timeout value specified in milliseconds by default, but
+ can be in any other unit if the number is suffixed by the unit,
+ as explained at the top of this document.
+
+
+timeout processing <timeout>
+ Set the maximum time to wait for a stream to process an event, i.e to acquire
+ a stream to talk with an agent, to encode all messages, to send the NOTIFY
+ frame, to receive the corresponding acknowledgement and to process all
+ actions. It is applied on the stream that handle the client and the server
+ sessions.
+
+ Arguments :
+ <timeout> is the timeout value specified in milliseconds by default, but
+ can be in any other unit if the number is suffixed by the unit,
+ as explained at the top of this document.
+
+
+use-backend <backend>
+ Specify the backend to use. It must be defined.
+
+ Arguments :
+ <backend> is the name of a valid "backend" section.
+
+
+2.3. "spoe-message" section
+----------------------------
+
+To offload the stream processing, SPOE will send messages with specific
+information at a specific moment in the stream life and will wait for
+corresponding replies to know what to do.
+
+
+spoe-message <name>
+ Create a new SPOE message with the name <name>.
+
+ Arguments :
+ <name> is the name of the SPOE message.
+
+ Here you define a message that can be referenced in a "spoe-agent"
+ section. Following keywords are supported :
+ - acl
+ - args
+ - event
+
+ See also: "spoe-agent" section.
+
+
+acl <aclname> <criterion> [flags] [operator] <value> ...
+ Declare or complete an access list.
+
+ See section 7 about ACL usage in the HAProxy Configuration Manual.
+
+
+args [name=]<sample> ...
+ Define arguments passed into the SPOE message.
+
+ Arguments :
+ <sample> is a sample expression.
+
+ When the message is processed, if a sample expression is not available, it is
+ set to NULL. Arguments are processed in their declaration order and added in
+ the message in that order. It is possible to declare named arguments.
+
+ For example:
+ args frontend=fe_id src dst
+
+
+event <name> [ { if | unless } <condition> ]
+ Set the event that triggers sending of the message. It may optionally be
+ followed by an ACL-based condition, in which case it will only be evaluated
+ if the condition is true. A SPOE message can only be sent on one event. If
+ several events are defined, only the last one is considered.
+
+ ACL-based conditions are executed in the context of the stream that handle
+ the client and the server connections.
+
+ Arguments :
+ <name> is the event name.
+ <condition> is a standard ACL-based condition.
+
+ Supported events are:
+ - on-client-session
+ - on-server-session
+ - on-frontend-tcp-request
+ - on-backend-tcp-request
+ - on-tcp-response
+ - on-frontend-http-request
+ - on-backend-http-request
+ - on-http-response
+
+ See section "Events & Messages" for more details about supported events.
+ See section 7 about ACL usage in the HAProxy Configuration Manual.
+
+2.4. "spoe-group" section
+--------------------------
+
+This section can be used to declare a group of SPOE messages. Unlike messages
+referenced in a "spoe-agent" section, messages inside a group are not sent on a
+specific event. The sending must be triggered by TCP or HTTP rules, from the
+HAProxy configuration.
+
+
+spoe-group <name>
+ Create a new SPOE group with the name <name>.
+
+ Arguments :
+ <name> is the name of the SPOE group.
+
+ Here you define a group of SPOE messages that can be referenced in a
+ "spoe-agent" section. Following keywords are supported :
+ - messages
+
+ See also: "spoe-agent" and "spoe-message" sections.
+
+
+messages <msg-name> ...
+ Declare the list of SPOE messages belonging to the group.
+
+ Arguments :
+ <msg-name> is the name of a SPOE message.
+
+ Messages declared here must be found in the same engine scope, else an error
+ is triggered during the configuration parsing. Furthermore, a message belongs
+ at most to a group. You can have many "messages" lines.
+
+ See also: "spoe-message" section.
+
+
+2.5. Example
+-------------
+
+Here is a simple but complete example that sends client-ip address to a ip
+reputation service. This service can set the variable "ip_score" which is an
+integer between 0 and 100, indicating its reputation (100 means totally safe
+and 0 a blacklisted IP with no doubt).
+
+ ###
+ ### HAProxy configuration
+ frontend www
+ mode http
+ bind *:80
+
+ filter spoe engine ip-reputation config spoe-ip-reputation.conf
+
+ # Reject connection if the IP reputation is under 20
+ tcp-request content reject if { var(sess.iprep.ip_score) -m int lt 20 }
+
+ default_backend http-servers
+
+ backend http-servers
+ mode http
+ server http A.B.C.D:80
+
+ backend iprep-servers
+ mode tcp
+ balance roundrobin
+
+ timeout connect 5s # greater than hello timeout
+ timeout server 3m # greater than idle timeout
+
+ server iprep1 A1.B1.C1.D1:12345
+ server iprep2 A2.B2.C2.D2:12345
+
+ ####
+ ### spoe-ip-reputation.conf
+ [ip-reputation]
+
+ spoe-agent iprep-agent
+ messages get-ip-reputation
+
+ option var-prefix iprep
+
+ timeout hello 2s
+ timeout idle 2m
+ timeout processing 10ms
+
+ use-backend iprep-servers
+
+ spoe-message get-ip-reputation
+ args ip=src
+ event on-client-session if ! { src -f /etc/haproxy/whitelist.lst }
+
+
+3. SPOP specification
+----------------------
+
+3.1. Data types
+----------------
+
+Here is the bytewise representation of typed data:
+
+ TYPED-DATA : <TYPE:4 bits><FLAGS:4 bits><DATA>
+
+Supported types and their representation are:
+
+ TYPE | ID | DESCRIPTION
+ -----------------------------+-----+----------------------------------
+ NULL | 0 | NULL : <0>
+ Boolean | 1 | BOOL : <1+FLAG>
+ 32bits signed integer | 2 | INT32 : <2><VALUE:varint>
+ 32bits unsigned integer | 3 | UINT32 : <3><VALUE:varint>
+ 64bits signed integer | 4 | INT64 : <4><VALUE:varint>
+ 32bits unsigned integer | 5 | UNIT64 : <5><VALUE:varint>
+ IPV4 | 6 | IPV4 : <6><STRUCT IN_ADDR:4 bytes>
+ IPV6 | 7 | IPV6 : <7><STRUCT IN_ADDR6:16 bytes>
+ String | 8 | STRING : <8><LENGTH:varint><BYTES>
+ Binary | 9 | BINARY : <9><LENGTH:varint><BYTES>
+ 10 -> 15 unused/reserved | - | -
+ -----------------------------+-----+----------------------------------
+
+Variable-length integer (varint) are encoded using Peers encoding:
+
+
+ 0 <= X < 240 : 1 byte (7.875 bits) [ XXXX XXXX ]
+ 240 <= X < 2288 : 2 bytes (11 bits) [ 1111 XXXX ] [ 0XXX XXXX ]
+ 2288 <= X < 264432 : 3 bytes (18 bits) [ 1111 XXXX ] [ 1XXX XXXX ] [ 0XXX XXXX ]
+ 264432 <= X < 33818864 : 4 bytes (25 bits) [ 1111 XXXX ] [ 1XXX XXXX ]*2 [ 0XXX XXXX ]
+ 33818864 <= X < 4328786160 : 5 bytes (32 bits) [ 1111 XXXX ] [ 1XXX XXXX ]*3 [ 0XXX XXXX ]
+ ...
+
+For booleans, the value (true or false) is the first bit in the FLAGS
+bitfield. if this bit is set to 0, then the boolean is evaluated as false,
+otherwise, the boolean is evaluated as true.
+
+3.2. Frames
+------------
+
+Exchange between HAProxy and agents are made using FRAME packets. All frames
+must be prefixed with their size encoded on 4 bytes in network byte order:
+
+ <FRAME-LENGTH:4 bytes> <FRAME>
+
+A frame always starts with its type, on one byte, followed by metadata
+containing flags, on 4 bytes and a two variable-length integer representing the
+stream identifier and the frame identifier inside the stream:
+
+ FRAME : <FRAME-TYPE:1 byte> <METADATA> <FRAME-PAYLOAD>
+ METADATA : <FLAGS:4 bytes> <STREAM-ID:varint> <FRAME-ID:varint>
+
+Then comes the frame payload. Depending on the frame type, the payload can be
+of three types: a simple key/value list, a list of messages or a list of
+actions.
+
+ FRAME-PAYLOAD : <LIST-OF-MESSAGES> | <LIST-OF-ACTIONS> | <KV-LIST>
+
+ LIST-OF-MESSAGES : [ <MESSAGE-NAME> <NB-ARGS:1 byte> <KV-LIST> ... ]
+ MESSAGE-NAME : <STRING>
+
+ LIST-OF-ACTIONS : [ <ACTION-TYPE:1 byte> <NB-ARGS:1 byte> <ACTION-ARGS> ... ]
+ ACTION-ARGS : [ <TYPED-DATA>... ]
+
+ KV-LIST : [ <KV-NAME> <KV-VALUE> ... ]
+ KV-NAME : <STRING>
+ KV-VALUE : <TYPED-DATA>
+
+ FLAGS :
+
+ Flags are a 32 bits field. They are encoded on 4 bytes in network byte
+ order, where the bit 0 is the LSB.
+
+ 0 1 2-31
+ +---+---+----------+
+ | | A | |
+ | F | B | |
+ | I | O | RESERVED |
+ | N | R | |
+ | | T | |
+ +---+---+----------+
+
+ FIN: Indicates that this is the final payload fragment. The first fragment
+ may also be the final fragment.
+
+ ABORT: Indicates that the processing of the current frame must be
+ cancelled. This bit should be set on frames with a fragmented
+ payload. It can be ignore for frames with an unfragemnted
+ payload. When it is set, the FIN bit must also be set.
+
+
+Frames cannot exceed a maximum size negotiated between HAProxy and agents
+during the HELLO handshake. Most of time, payload will be small enough to send
+it in one frame. But when supported by the peer, it will be possible to
+fragment huge payload on many frames. This ability is announced during the
+HELLO handshake and it can be asynmetric (supported by agents but not by
+HAProxy or the opposite). The following rules apply to fragmentation:
+
+ * An unfragemnted payload consists of a single frame with the FIN bit set.
+
+ * A fragemented payload consists of several frames with the FIN bit clear and
+ terminated by a single frame with the FIN bit set. All these frames must
+ share the same STREAM-ID and FRAME-ID. The first frame must set the right
+ FRAME-TYPE (e.g, NOTIFY). The following frames must have an unset type (0).
+
+Beside the support of fragmented payload by a peer, some payload must not be
+fragmented. See below for details.
+
+IMPORTANT : The maximum size supported by peers for a frame must be greater
+than or equal to 256 bytes.
+
+3.2.1. Frame capabilities
+--------------------------
+
+Here are the list of official capabilities that HAProxy and agents can support:
+
+ * fragmentation: This is the ability for a peer to support fragmented
+ payload in received frames. This is an asymmectical
+ capability, it only concerns the peer that announces
+ it. This is the responsibility to the other peer to use it
+ or not.
+
+ * pipelining: This is the ability for a peer to decouple NOTIFY and ACK
+ frames. This is a symmectical capability. To be used, it must
+ be supported by HAProxy and agents. Unlike HTTP pipelining, the
+ ACK frames can be send in any order, but always on the same TCP
+ connection used for the corresponding NOTIFY frame.
+
+ * async: This ability is similar to the pipelining, but here any TCP
+ connection established between HAProxy and the agent can be used to
+ send ACK frames. if an agent accepts connections from multiple
+ HAProxy, it can use the "engine-id" value to group TCP
+ connections. See details about HAPROXY-HELLO frame.
+
+Unsupported or unknown capabilities are silently ignored, when possible.
+
+NOTE: HAProxy does not support the fragmentation for now. This means it is not
+ able to handle fragmented frames. However, if an agent announces the
+ fragmentation support, HAProxy may choose to send fragemented frames.
+
+3.2.2. Frame types overview
+----------------------------
+
+Here are types of frame supported by SPOE. Frames sent by HAProxy come first,
+then frames sent by agents :
+
+ TYPE | ID | DESCRIPTION
+ -----------------------------+-----+-------------------------------------
+ UNSET | 0 | Used for all frames but the first when a
+ | | payload is fragmented.
+ -----------------------------+-----+-------------------------------------
+ HAPROXY-HELLO | 1 | Sent by HAProxy when it opens a
+ | | connection on an agent.
+ | |
+ HAPROXY-DISCONNECT | 2 | Sent by HAProxy when it want to close
+ | | the connection or in reply to an
+ | | AGENT-DISCONNECT frame
+ | |
+ NOTIFY | 3 | Sent by HAProxy to pass information
+ | | to an agent
+ -----------------------------+-----+-------------------------------------
+ AGENT-HELLO | 101 | Reply to a HAPROXY-HELLO frame, when
+ | | the connection is established
+ | |
+ AGENT-DISCONNECT | 102 | Sent by an agent just before closing
+ | | the connection
+ | |
+ ACK | 103 | Sent to acknowledge a NOTIFY frame
+ -----------------------------+-----+-------------------------------------
+
+Unknown frames may be silently skipped.
+
+3.2.3. Workflow
+----------------
+
+ * Successful HELLO handshake:
+
+ HAPROXY AGENT SRV
+ | HAPROXY-HELLO |
+ | (healthcheck: false) |
+ | --------------------------> |
+ | |
+ | AGENT-HELLO |
+ | <-------------------------- |
+ | |
+
+ * Successful HELLO healthcheck:
+
+ HAPROXY AGENT SRV
+ | HAPROXY-HELLO |
+ | (healthcheck: true) |
+ | --------------------------> |
+ | |
+ | AGENT-HELLO + close() |
+ | <-------------------------- |
+ | |
+
+
+ * Error encountered by agent during the HELLO handshake:
+
+ HAPROXY AGENT SRV
+ | HAPROXY-HELLO |
+ | --------------------------> |
+ | |
+ | DISCONNECT + close() |
+ | <-------------------------- |
+ | |
+
+ * Error encountered by HAProxy during the HELLO handshake:
+
+ HAPROXY AGENT SRV
+ | HAPROXY-HELLO |
+ | --------------------------> |
+ | |
+ | AGENT-HELLO |
+ | <-------------------------- |
+ | |
+ | DISCONNECT |
+ | --------------------------> |
+ | |
+ | DISCONNECT + close() |
+ | <-------------------------- |
+ | |
+
+ * Notify / Ack exchange (unfragmented payload):
+
+ HAPROXY AGENT SRV
+ | NOTIFY |
+ | --------------------------> |
+ | |
+ | ACK |
+ | <-------------------------- |
+ | |
+
+ * Notify / Ack exchange (fragmented payload):
+
+ HAPROXY AGENT SRV
+ | NOTIFY (frag 1) |
+ | --------------------------> |
+ | |
+ | UNSET (frag 2) |
+ | --------------------------> |
+ | ... |
+ | UNSET (frag N) |
+ | --------------------------> |
+ | |
+ | ACK |
+ | <-------------------------- |
+ | |
+
+ * Aborted fragmentation of a NOTIFY frame:
+
+ HAPROXY AGENT SRV
+ | ... |
+ | UNSET (frag X) |
+ | --------------------------> |
+ | |
+ | ACK/ABORT |
+ | <-------------------------- |
+ | |
+ | UNSET (frag X+1) |
+ | -----------X |
+ | |
+ | |
+
+ * Connection closed by haproxy:
+
+ HAPROXY AGENT SRV
+ | DISCONNECT |
+ | --------------------------> |
+ | |
+ | DISCONNECT + close() |
+ | <-------------------------- |
+ | |
+
+ * Connection closed by agent:
+
+ HAPROXY AGENT SRV
+ | DISCONNECT + close() |
+ | <-------------------------- |
+ | |
+
+3.2.4. Frame: HAPROXY-HELLO
+----------------------------
+
+This frame is the first one exchanged between HAProxy and an agent, when the
+connection is established. The payload of this frame is a KV-LIST. It cannot be
+fragmented. STREAM-ID and FRAME-ID are must be set 0.
+
+Following items are mandatory in the KV-LIST:
+
+ * "supported-versions" <STRING>
+
+ Last SPOP major versions supported by HAProxy. It is a comma-separated list
+ of versions, following the format "Major.Minor". Spaces must be ignored, if
+ any. When a major version is announced by HAProxy, it means it also support
+ all previous minor versions.
+
+ Example: "2.0, 1.5" means HAProxy supports SPOP 2.0 and 1.0 to 1.5
+
+ * "max-frame-size" <UINT32>
+
+ This is the maximum size allowed for a frame. The HAPROXY-HELLO frame must
+ be lower or equal to this value.
+
+ * "capabilities" <STRING>
+
+ This a comma-separated list of capabilities supported by HAProxy. Spaces
+ must be ignored, if any.
+
+Following optional items can be added in the KV-LIST:
+
+ * "healthcheck" <BOOLEAN>
+
+ If this item is set to TRUE, then the HAPROXY-HELLO frame is sent during a
+ SPOE health check. When set to FALSE, this item can be ignored.
+
+ * "engine-id" <STRING>
+
+ This is a uniq string that identify a SPOE engine.
+
+To finish the HELLO handshake, the agent must return an AGENT-HELLO frame with
+its supported SPOP version, the lower value between its maximum size allowed
+for a frame and the HAProxy one and capabilities it supports. If an error
+occurs or if an incompatibility is detected with the agent configuration, an
+AGENT-DISCONNECT frame must be returned.
+
+3.2.5. Frame: AGENT-HELLO
+--------------------------
+
+This frame is sent in reply to a HAPROXY-HELLO frame to finish a HELLO
+handshake. As for HAPROXY-HELLO frame, STREAM-ID and FRAME-ID are also set
+0. The payload of this frame is a KV-LIST and it cannot be fragmented.
+
+Following items are mandatory in the KV-LIST:
+
+ * "version" <STRING>
+
+ This is the SPOP version the agent supports. It must follow the format
+ "Major.Minor" and it must be lower or equal than one of major versions
+ announced by HAProxy.
+
+ * "max-frame-size" <UINT32>
+
+ This is the maximum size allowed for a frame. It must be lower or equal to
+ the value in the HAPROXY-HELLO frame. This value will be used for all
+ subsequent frames.
+
+ * "capabilities" <STRING>
+
+ This a comma-separated list of capabilities supported by agent. Spaces must
+ be ignored, if any.
+
+At this time, if everything is ok for HAProxy (supported version and valid
+max-frame-size value), the HELLO handshake is successfully completed. Else,
+HAProxy sends a HAPROXY-DISCONNECT frame with the corresponding error.
+
+If "healthcheck" item was set to TRUE in the HAPROXY-HELLO frame, the agent can
+safely close the connection without DISCONNECT frame. In all cases, HAProxy
+will close the connection at the end of the health check.
+
+3.2.6. Frame: NOTIFY
+---------------------
+
+Information are sent to the agents inside NOTIFY frames. These frames are
+attached to a stream, so STREAM-ID and FRAME-ID must be set. The payload of
+NOTIFY frames is a LIST-OF-MESSAGES and, if supported by agents, it can be
+fragmented.
+
+NOTIFY frames must be acknowledge by agents sending an ACK frame, repeating
+right STREAM-ID and FRAME-ID.
+
+3.2.7. Frame: ACK
+------------------
+
+ACK frames must be sent by agents to reply to NOTIFY frames. STREAM-ID and
+FRAME-ID found in a NOTIFY frame must be reuse in the corresponding ACK
+frame. The payload of ACK frames is a LIST-OF-ACTIONS and, if supported by
+HAProxy, it can be fragmented.
+
+3.2.8. Frame: HAPROXY-DISCONNECT
+---------------------------------
+
+If an error occurs, at anytime, from the HAProxy side, a HAPROXY-DISCONNECT
+frame is sent with information describing the error. HAProxy will wait an
+AGENT-DISCONNECT frame in reply. All other frames will be ignored. The agent
+must then close the socket.
+
+The payload of this frame is a KV-LIST. It cannot be fragmented. STREAM-ID and
+FRAME-ID are must be set 0.
+
+Following items are mandatory in the KV-LIST:
+
+ * "status-code" <UINT32>
+
+ This is the code corresponding to the error.
+
+ * "message" <STRING>
+
+ This is a textual message describing the error.
+
+For more information about known errors, see section "Errors & timeouts"
+
+3.2.9. Frame: AGENT-DISCONNECT
+-------------------------------
+
+If an error occurs, at anytime, from the agent size, a AGENT-DISCONNECT frame
+is sent, with information describing the error. such frame is also sent in reply
+to a HAPROXY-DISCONNECT. The agent must close the socket just after sending
+this frame.
+
+The payload of this frame is a KV-LIST. It cannot be fragmented. STREAM-ID and
+FRAME-ID are must be set 0.
+
+Following items are mandatory in the KV-LIST:
+
+ * "status-code" <UINT32>
+
+ This is the code corresponding to the error.
+
+ * "message" <STRING>
+
+ This is a textual message describing the error.
+
+For more information about known errors, see section "Errors & timeouts"
+
+3.3. Events & Messages
+-----------------------
+
+Information about streams are sent in NOTIFY frames. You can specify which kind
+of information to send by defining "spoe-message" sections in your SPOE
+configuration file. for each "spoe-message" there will be a message in a NOTIFY
+frame when the right event is triggered.
+
+A NOTIFY frame is sent for an specific event when there is at least one
+"spoe-message" attached to this event. All messages for an event will be added
+in the same NOTIFY frame.
+
+Here is the list of supported events:
+
+ * on-client-session is triggered when a new client session is created.
+ This event is only available for SPOE filters
+ declared in a frontend or a listen section.
+
+ * on-frontend-tcp-request is triggered just before the evaluation of
+ "tcp-request content" rules on the frontend side.
+ This event is only available for SPOE filters
+ declared in a frontend or a listen section.
+
+ * on-backend-tcp-request is triggered just before the evaluation of
+ "tcp-request content" rules on the backend side.
+ This event is skipped for SPOE filters declared
+ in a listen section.
+
+ * on-frontend-http-request is triggered just before the evaluation of
+ "http-request" rules on the frontend side. This
+ event is only available for SPOE filters declared
+ in a frontend or a listen section.
+
+ * on-backend-http-request is triggered just before the evaluation of
+ "http-request" rules on the backend side. This
+ event is skipped for SPOE filters declared in a
+ listen section.
+
+ * on-server-session is triggered when the session with the server is
+ established.
+
+ * on-tcp-response is triggered just before the evaluation of
+ "tcp-response content" rules.
+
+ * on-http-response is triggered just before the evaluation of
+ "http-response" rules.
+
+
+The stream processing will loop on these events, when triggered, waiting the
+agent reply.
+
+3.4. Actions
+-------------
+
+An agent must acknowledge each NOTIFY frame by sending the corresponding ACK
+frame. Actions can be added in these frames to dynamically take action on the
+processing of a stream.
+
+Here is the list of supported actions:
+
+ * set-var set the value for an existing variable. 3 arguments must be
+ attached to this action: the variable scope (proc, sess, txn,
+ req or res), the variable name (a string) and its value.
+
+ ACTION-SET-VAR : <SET-VAR:1 byte><NB-ARGS:1 byte><VAR-SCOPE:1 byte><VAR-NAME><VAR-VALUE>
+
+ SET-VAR : <1>
+ NB-ARGS : <3>
+ VAR-SCOPE : <PROCESS> | <SESSION> | <TRANSACTION> | <REQUEST> | <RESPONSE>
+ VAR-NAME : <STRING>
+ VAR-VALUE : <TYPED-DATA>
+
+ PROCESS : <0>
+ SESSION : <1>
+ TRANSACTION : <2>
+ REQUEST : <3>
+ RESPONSE : <4>
+
+ * unset-var unset the value for an existing variable. 2 arguments must be
+ attached to this action: the variable scope (proc, sess, txn,
+ req or res) and the variable name (a string).
+
+ ACTION-UNSET-VAR : <UNSET-VAR:1 byte><NB-ARGS:1 byte><VAR-SCOPE:1 byte><VAR-NAME>
+
+ UNSET-VAR : <2>
+ NB-ARGS : <2>
+ VAR-SCOPE : <PROCESS> | <SESSION> | <TRANSACTION> | <REQUEST> | <RESPONSE>
+ VAR-NAME : <STRING>
+
+ PROCESS : <0>
+ SESSION : <1>
+ TRANSACTION : <2>
+ REQUEST : <3>
+ RESPONSE : <4>
+
+
+NOTE: Name of the variables will be automatically prefixed by HAProxy to avoid
+ name clashes with other variables used in HAProxy. Moreover, unknown
+ variable will be silently ignored.
+
+3.5. Errors & timeouts
+----------------------
+
+Here is the list of all known errors:
+
+ STATUS CODE | DESCRIPTION
+ ----------------+--------------------------------------------------------
+ 0 | normal (no error occurred)
+ 1 | I/O error
+ 2 | A timeout occurred
+ 3 | frame is too big
+ 4 | invalid frame received
+ 5 | version value not found
+ 6 | max-frame-size value not found
+ 7 | capabilities value not found
+ 8 | unsupported version
+ 9 | max-frame-size too big or too small
+ 10 | payload fragmentation is not supported
+ 11 | invalid interlaced frames
+ 12 | frame-id not found (it does not match any referenced frame)
+ 13 | resource allocation error
+ 99 | an unknown error occurrde
+ ----------------+--------------------------------------------------------
+
+An agent can define its own errors using a not yet assigned status code.
+
+IMPORTANT NOTE: By default, for a specific stream, when an abnormal/unexpected
+ error occurs, the SPOE is disabled for all the transaction. So
+ if you have several events configured, such error on an event
+ will disabled all following. For TCP streams, this will
+ disable the SPOE for the whole session. For HTTP streams, this
+ will disable it for the transaction (request and response).
+ See 'option continue-on-error' to bypass this limitation.
+
+To avoid a stream to wait undefinetly, you must carefully choose the
+acknowledgement timeout. In most of cases, it will be quiet low. But it depends
+on the responsivness of your service.
+
+You must also choose idle timeout carefully. Because connection with your
+service depends on the backend configuration used by the SPOA, it is important
+to use a lower value for idle timeout than the server timeout. Else the
+connection will be closed by HAProxy. The same is true for hello timeout. You
+should choose a lower value than the connect timeout.
+
+4. Logging
+-----------
+
+Activity of an SPOE is logged using HAProxy's logger. The messages are logged
+in the context of the streams that handle the client and the server
+connections. A message is emitted for each event or group handled by an
+SPOE. Depending on the status code, the log level will be different. In the
+normal case, when no error occurred, the message is logged with the level
+LOG_NOTICE. Otherwise, the message is logged with the level LOG_WARNING.
+
+The messages are logged using the agent's logger, if defined, and use the
+following format:
+
+ SPOE: [AGENT] <TYPE:NAME> sid=STREAM-ID st=STATUS-CODE reqT/qT/wT/resT/pT \
+ <idles>/<applets> <nb_sending>/<nb_waiting> <nb_error>/<nb_processed>
+
+ AGENT is the agent name
+ TYPE is EVENT of GROUP
+ NAME is the event or the group name
+ STREAM-ID is an integer, the unique id of the stream
+ STATUS_CODE is the processing's status code
+ reqT/qT/wT/resT/pT are the following time events:
+
+ * reqT : the encoding time. It includes ACLs processing, if any. For
+ fragmented frames, it is the sum of all fragments.
+ * qT : the delay before the request gets out the sending queue. For
+ fragmented frames, it is the sum of all fragments.
+ * wT : the delay before the response is received. No fragmentation
+ supported here.
+ * resT : the delay to process the response. No fragmentation supported
+ here.
+ * pT : the delay to process the event or the group. From the stream
+ point of view, it is the latency added by the SPOE processing.
+ It is more or less the sum of values above.
+
+ <idle> is the numbers of idle SPOE applets
+ <applets> is the numbers of SPOE applets
+ <nb_sending> is the numbers of streams waiting to send data
+ <nb_waiting> is the numbers of streams waiting for a ack
+ <nb_error> is the numbers of processing errors
+ <nb_processed> is the numbers of events/groups processed
+
+
+For all these time events, -1 means the processing was interrupted before the
+end. So -1 for the queue time means the request was never dequeued. For
+fragmented frames it is harder to know when the interruption happened.
+
+/*
+ * Local variables:
+ * fill-column: 79
+ * End:
+ */
diff --git a/doc/WURFL-device-detection.txt b/doc/WURFL-device-detection.txt
new file mode 100644
index 0000000..4786e22
--- /dev/null
+++ b/doc/WURFL-device-detection.txt
@@ -0,0 +1,71 @@
+Scientiamobile WURFL Device Detection
+-------------------------------------
+
+You can also include WURFL for inbuilt device detection enabling attributes.
+
+WURFL is a high-performance and low-memory footprint mobile device detection
+software component that can quickly and accurately detect over 500 capabilities
+of visiting devices. It can differentiate between portable mobile devices, desktop devices,
+SmartTVs and any other types of devices on which a web browser can be installed.
+
+In order to add WURFL device detection support, you would need to download Scientiamobile
+InFuze C API and install it on your system. Refer to www.scientiamobile.com to obtain a valid
+InFuze license.
+Compile haproxy as shown :
+
+ $ make TARGET=<target> USE_WURFL=1
+
+Optionally WURFL_DEBUG=1 may be set to increase logs verbosity
+
+For HAProxy developers who need to verify that their changes didn't accidentally
+break the WURFL code, it is possible to build a dummy library provided in the
+addons/wurfl/dummy directory and to use it as an alternative for the full library.
+This will not provide the full functionalities, it will just allow haproxy to
+start with a wurfl configuration, which generally is enough to validate API
+changes :
+
+ $ make -C addons/wurfl/dummy
+ $ make TARGET=<target> USE_WURFL=1 WURFL_INC=$PWD/addons/wurfl/dummy WURFL_LIB=$PWD/addons/wurfl/dummy
+
+These are the supported WURFL directives (see doc/configuration.txt) :
+- wurfl-data-file <path to WURFL data file>
+- wurfl-information-list [<string>] (list of WURFL capabilities,
+ virtual capabilities, property names we plan to use in injected headers)
+- wurfl-information-list-separator <char> (character that will be
+ used to separate values in a response header, ',' by default).
+- wurfl-cache-size <string> (Sets the WURFL caching strategy)
+- wurfl-patch-file [<file path>] (Sets the paths to custom WURFL patch files)
+
+Sample configuration :
+
+ global
+ wurfl-data-file /usr/share/wurfl/wurfl.zip
+
+ wurfl-information-list wurfl_id model_name
+
+ #wurfl-information-list-separator |
+
+ ## single LRU cache
+ #wurfl-cache-size 100000
+ ## no cache
+ #wurfl-cache-size 0
+
+ #wurfl-patch-file <paths to custom patch files>
+
+ ...
+ frontend
+ bind *:8888
+ default_backend servers
+
+There are two distinct methods available to transmit the WURFL data downstream
+to the target application:
+
+All data listed in wurfl-information-list
+
+ http-request set-header X-WURFL-All %[wurfl-get-all()]
+
+A subset of data listed in wurfl-information-list
+
+ http-request set-header X-WURFL-Properties %[wurfl-get(wurfl_id,is_tablet)]
+
+Please find more information about WURFL and the detection methods at https://www.scientiamobile.com
diff --git a/doc/acl.fig b/doc/acl.fig
new file mode 100644
index 0000000..253a053
--- /dev/null
+++ b/doc/acl.fig
@@ -0,0 +1,229 @@
+#FIG 3.2 Produced by xfig version 3.2.5-alpha5
+Portrait
+Center
+Metric
+A4
+100.00
+Single
+-2
+1200 2
+6 2430 1080 2700 2250
+1 2 0 1 0 11 52 -1 20 0.000 1 0.0000 2587 1687 113 563 2474 1687 2700 1687
+4 1 0 50 -1 16 8 1.5708 4 120 840 2610 1710 tcp-req inspect\001
+-6
+6 5805 1080 6255 2250
+1 2 0 1 0 29 52 -1 20 0.000 1 0.0000 6052 1687 203 563 5849 1687 6255 1687
+4 1 0 50 -1 16 8 1.5708 4 90 300 6030 1710 HTTP\001
+4 1 0 50 -1 16 8 1.5708 4 120 615 6165 1710 processing\001
+-6
+6 1575 3375 1800 4500
+2 2 0 1 0 29 52 -1 20 0.000 0 0 -1 0 0 5
+ 1575 3375 1800 3375 1800 4500 1575 4500 1575 3375
+4 1 0 50 -1 16 8 1.5708 4 120 735 1710 3960 http-resp out\001
+-6
+6 2025 3375 2250 4500
+2 2 0 1 0 29 52 -1 20 0.000 0 0 -1 0 0 5
+ 2025 3375 2250 3375 2250 4500 2025 4500 2025 3375
+4 1 0 50 -1 16 8 1.5708 4 120 735 2160 3960 http-resp out\001
+-6
+6 810 3600 1080 4230
+4 1 0 50 -1 16 8 1.5708 4 105 555 900 3915 Response\001
+4 1 0 50 -1 16 8 1.5708 4 105 450 1065 3915 to client\001
+-6
+6 720 1350 1035 2070
+4 1 0 50 -1 16 8 1.5708 4 120 540 855 1710 Requests \001
+4 1 0 50 -1 16 8 1.5708 4 105 645 1020 1710 from clients\001
+-6
+6 7695 1350 8010 1980
+4 1 0 50 -1 16 8 1.5708 4 120 510 7830 1665 Requests\001
+4 1 0 50 -1 16 8 1.5708 4 105 555 7995 1665 to servers\001
+-6
+6 7785 3600 8055 4230
+4 1 0 50 -1 16 8 1.5708 4 105 555 7875 3915 Response\001
+4 1 0 50 -1 16 8 1.5708 4 105 630 8055 3915 from server\001
+-6
+1 2 0 1 0 11 52 -1 20 0.000 1 0.0000 1687 1687 113 563 1574 1687 1800 1687
+1 2 0 1 0 11 52 -1 20 0.000 1 0.0000 7087 3937 113 563 6974 3937 7200 3937
+1 2 0 1 0 29 52 -1 20 0.000 1 0.0000 4072 3937 203 563 3869 3937 4275 3937
+1 2 0 1 0 29 52 -1 20 0.000 1 0.0000 2903 3937 203 563 2700 3937 3106 3937
+2 3 0 1 0 6 54 -1 20 0.000 0 0 -1 0 0 9
+ 1485 900 1485 2475 4140 2475 4140 1035 6390 1035 6390 2340
+ 6840 2340 6840 900 1485 900
+2 3 0 1 0 2 54 -1 20 0.000 0 0 -1 0 0 9
+ 4365 1035 4365 2475 7290 2475 7290 900 6840 900 6840 2340
+ 5715 2340 5715 1035 4365 1035
+2 2 0 1 0 29 52 -1 20 0.000 0 0 -1 0 0 5
+ 4950 1125 5175 1125 5175 2250 4950 2250 4950 1125
+2 2 0 1 0 29 52 -1 20 0.000 0 0 -1 0 0 5
+ 5400 1125 5625 1125 5625 2250 5400 2250 5400 1125
+2 2 0 1 0 11 52 -1 20 0.000 0 0 -1 0 0 5
+ 2025 1125 2250 1125 2250 2250 2025 2250 2025 1125
+2 2 0 1 0 11 52 -1 20 0.000 0 0 -1 0 0 5
+ 2925 1125 3150 1125 3150 2250 2925 2250 2925 1125
+2 1 0 1 0 0 50 -1 -1 0.000 0 0 -1 1 0 2
+ 1 1 1.00 60.00 120.00
+ 1125 1710 1575 1710
+2 1 0 1 0 0 50 -1 -1 0.000 0 0 -1 1 0 2
+ 1 1 1.00 60.00 120.00
+ 1125 1935 1575 1755
+2 1 0 1 0 0 50 -1 -1 0.000 0 0 -1 1 0 2
+ 1 1 1.00 60.00 120.00
+ 1125 1485 1575 1665
+2 2 0 1 0 29 52 -1 20 0.000 0 0 -1 0 0 5
+ 3825 1125 4050 1125 4050 2250 3825 2250 3825 1125
+2 2 0 1 0 6 50 -1 20 0.000 0 0 -1 0 0 5
+ 1575 450 2025 450 2025 540 1575 540 1575 450
+2 2 0 1 0 2 50 -1 20 0.000 0 0 -1 0 0 5
+ 1575 675 2025 675 2025 765 1575 765 1575 675
+2 2 0 1 0 11 50 -1 20 0.000 0 0 -1 0 0 5
+ 3150 450 3600 450 3600 540 3150 540 3150 450
+2 2 0 1 0 29 50 -1 20 0.000 0 0 -1 0 0 5
+ 3150 675 3600 675 3600 765 3150 765 3150 675
+2 2 0 1 0 29 52 -1 20 0.000 0 0 -1 0 0 5
+ 6525 1125 6750 1125 6750 2250 6525 2250 6525 1125
+2 1 0 1 0 0 50 -1 -1 0.000 0 0 -1 1 0 2
+ 1 1 1.00 60.00 120.00
+ 7200 1665 7650 1665
+2 1 0 1 0 0 50 -1 -1 0.000 0 0 -1 1 0 2
+ 1 1 1.00 60.00 120.00
+ 7200 1620 7650 1530
+2 1 0 1 0 0 50 -1 -1 0.000 0 0 -1 1 0 2
+ 1 1 1.00 60.00 120.00
+ 7200 1710 7650 1800
+2 2 0 1 0 29 52 -1 20 0.000 0 0 -1 0 0 5
+ 6975 1125 7200 1125 7200 2250 6975 2250 6975 1125
+2 2 0 1 0 29 52 -1 20 0.000 0 0 -1 0 0 5
+ 3375 1125 3600 1125 3600 2250 3375 2250 3375 1125
+2 2 0 1 0 11 52 -1 20 0.000 0 0 -1 0 0 5
+ 4500 1125 4725 1125 4725 2250 4500 2250 4500 1125
+2 1 0 1 0 0 50 -1 -1 0.000 0 0 -1 1 0 2
+ 1 1 1.00 30.00 60.00
+ 1800 1665 2025 1665
+2 1 0 1 0 0 50 -1 -1 0.000 0 0 -1 1 0 2
+ 1 1 1.00 30.00 60.00
+ 2250 1665 2475 1665
+2 1 0 1 0 0 50 -1 -1 0.000 0 0 -1 1 0 2
+ 1 1 1.00 30.00 60.00
+ 2700 1665 2925 1665
+2 1 0 1 0 0 50 -1 -1 0.000 0 0 -1 1 0 2
+ 1 1 1.00 30.00 60.00
+ 3150 1665 3375 1665
+2 1 0 1 0 0 50 -1 -1 0.000 0 0 -1 1 0 2
+ 1 1 1.00 30.00 60.00
+ 3600 1665 3825 1665
+2 1 0 1 0 0 50 -1 -1 0.000 0 0 -1 1 0 2
+ 1 1 1.00 30.00 60.00
+ 4725 1665 4950 1665
+2 1 0 1 0 0 50 -1 -1 0.000 0 0 -1 1 0 2
+ 1 1 1.00 30.00 60.00
+ 5175 1665 5400 1665
+2 1 0 1 0 0 50 -1 -1 0.000 0 0 -1 1 0 2
+ 1 1 1.00 30.00 60.00
+ 5625 1665 5850 1665
+2 1 0 1 0 0 50 -1 -1 0.000 0 0 -1 1 0 2
+ 1 1 1.00 30.00 60.00
+ 6750 1665 6975 1665
+2 1 0 1 0 0 50 -1 -1 0.000 0 0 -1 1 0 2
+ 1 1 1.00 30.00 60.00
+ 6255 1665 6525 1665
+2 1 0 1 0 0 50 -1 -1 0.000 0 0 -1 1 0 2
+ 1 1 1.00 30.00 60.00
+ 4050 1665 4500 1665
+2 1 0 1 0 0 50 -1 -1 0.000 0 0 -1 1 0 2
+ 1 1 1.00 30.00 60.00
+ 4050 1620 4500 1530
+2 1 0 1 0 0 50 -1 -1 0.000 0 0 -1 1 0 2
+ 1 1 1.00 30.00 60.00
+ 4050 1710 4500 1800
+2 2 0 1 0 11 52 -1 20 0.000 0 0 -1 0 0 5
+ 6525 3375 6750 3375 6750 4500 6525 4500 6525 3375
+2 2 0 1 0 29 52 -1 20 0.000 0 0 -1 0 0 5
+ 6075 3375 6300 3375 6300 4500 6075 4500 6075 3375
+2 3 0 1 0 2 54 -1 20 0.000 0 0 -1 0 0 9
+ 7290 3150 7290 4725 5985 4725 5985 3285 2385 3285 2385 4590
+ 1935 4590 1935 3150 7290 3150
+2 3 0 1 0 6 54 -1 20 0.000 0 0 -1 0 0 9
+ 1935 3150 1485 3150 1485 4725 5985 4725 5985 3285 5085 3285
+ 5085 4590 1935 4590 1935 3150
+2 2 0 1 0 11 52 -1 20 0.000 0 0 -1 0 0 5
+ 5625 3375 5850 3375 5850 4500 5625 4500 5625 3375
+2 2 0 1 0 29 52 -1 20 0.000 0 0 -1 0 0 5
+ 5175 3375 5400 3375 5400 4500 5175 4500 5175 3375
+2 1 0 1 0 0 54 -1 -1 0.000 0 0 -1 1 0 2
+ 1 1 1.00 60.00 120.00
+ 7650 3915 7200 3915
+2 1 0 1 0 0 54 -1 -1 0.000 0 0 -1 1 0 2
+ 1 1 1.00 60.00 120.00
+ 1575 3915 1125 3915
+2 1 0 1 0 0 50 -1 -1 0.000 0 0 -1 1 0 2
+ 1 1 1.00 30.00 60.00
+ 6975 3915 6750 3915
+2 1 0 1 0 0 50 -1 -1 0.000 0 0 -1 1 0 2
+ 1 1 1.00 30.00 60.00
+ 6525 3915 6300 3915
+2 1 0 1 0 0 50 -1 -1 0.000 0 0 -1 1 0 2
+ 1 1 1.00 30.00 60.00
+ 6075 3915 5850 3915
+2 1 0 1 0 0 50 -1 -1 0.000 0 0 -1 1 0 2
+ 1 1 1.00 30.00 60.00
+ 5625 3915 5400 3915
+2 1 0 1 0 0 50 -1 -1 0.000 0 0 -1 1 0 2
+ 1 1 1.00 30.00 60.00
+ 2025 3915 1800 3915
+2 1 0 1 0 0 50 -1 -1 0.000 0 0 -1 1 0 2
+ 1 1 1.00 30.00 60.00
+ 5175 3915 4275 3915
+2 1 0 1 0 0 50 -1 -1 0.000 0 0 -1 1 0 2
+ 1 1 1.00 30.00 60.00
+ 3870 3915 3105 3915
+2 1 0 1 0 0 50 -1 -1 0.000 0 0 -1 1 0 2
+ 1 1 1.00 30.00 60.00
+ 2700 3915 2250 3915
+2 1 0 1 0 0 50 -1 -1 0.000 0 0 -1 1 0 3
+ 1 1 1.00 30.00 60.00
+ 3465 2250 3465 2880 2970 3465
+2 1 0 1 0 0 50 -1 -1 0.000 0 0 -1 1 0 4
+ 1 1 1.00 30.00 60.00
+ 5040 2250 5040 2655 3600 2880 3015 3510
+2 1 0 1 0 0 50 -1 -1 0.000 0 0 -1 1 0 4
+ 1 1 1.00 30.00 60.00
+ 6075 2250 6075 2565 3645 2925 3060 3555
+2 1 0 1 0 0 50 -1 -1 0.000 0 0 -1 1 0 4
+ 1 1 1.00 30.00 60.00
+ 6615 2250 6615 2610 3690 2970 3060 3645
+2 1 0 1 0 0 50 -1 -1 0.000 0 0 -1 1 0 4
+ 1 1 1.00 30.00 60.00
+ 7065 2250 7065 2655 3735 3015 3060 3690
+2 1 0 1 0 0 50 -1 -1 0.000 0 0 -1 1 0 4
+ 1 1 1.00 30.00 60.00
+ 5265 3375 5265 2970 3825 3105 3105 3780
+2 1 0 1 0 0 50 -1 -1 0.000 0 0 -1 1 0 4
+ 1 1 1.00 30.00 60.00
+ 6165 3375 6165 2835 3780 3060 3105 3735
+4 1 0 50 -1 16 8 1.5708 4 120 630 2160 1710 tcp-request\001
+4 1 0 50 -1 16 8 1.5708 4 120 870 3060 1710 tcp-req content\001
+4 1 0 50 -1 16 8 1.5708 4 120 600 5085 1710 http-req in\001
+4 1 0 50 -1 16 8 1.5708 4 105 690 3960 1710 use-backend\001
+4 1 0 50 -1 16 8 1.5708 4 75 570 5535 1710 use-server\001
+4 1 0 50 -1 16 8 1.5708 4 120 360 1710 1710 accept\001
+4 0 0 50 -1 18 6 0.0000 4 90 435 2115 540 frontend\001
+4 0 0 50 -1 18 6 0.0000 4 90 405 2115 765 backend\001
+4 0 0 50 -1 18 6 0.0000 4 105 150 3735 540 tcp\001
+4 0 0 50 -1 18 6 0.0000 4 105 450 3735 765 http only\001
+4 2 0 50 -1 18 6 0.0000 4 90 435 4050 2430 frontend\001
+4 0 0 50 -1 18 6 0.0000 4 90 405 4455 2430 backend\001
+4 1 0 50 -1 16 8 1.5708 4 120 675 6660 1710 http-req out\001
+4 1 0 50 -1 16 8 1.5708 4 120 675 7110 1710 http-req out\001
+4 1 0 50 -1 16 8 1.5708 4 120 600 3510 1710 http-req in\001
+4 1 0 50 -1 16 8 1.5708 4 120 870 4635 1710 tcp-req content\001
+4 1 0 50 -1 16 8 1.5708 4 120 660 6210 3960 http-resp in\001
+4 1 0 50 -1 16 8 1.5708 4 120 930 6660 3960 tcp-resp content\001
+4 1 0 50 -1 16 8 1.5708 4 120 900 7110 3960 tcp-resp inspect\001
+4 1 0 50 -1 16 8 1.5708 4 120 930 5760 3960 tcp-resp content\001
+4 1 0 50 -1 16 8 1.5708 4 120 660 5310 3960 http-resp in\001
+4 0 0 50 -1 18 6 0.0000 4 90 405 6075 4680 backend\001
+4 1 0 50 -1 16 8 1.5708 4 90 300 4050 3960 HTTP\001
+4 1 0 50 -1 16 8 1.5708 4 120 615 4185 3960 processing\001
+4 1 0 50 -1 16 8 1.5708 4 90 300 2835 3915 Error\001
+4 1 0 50 -1 16 8 1.5708 4 120 615 2970 3915 processing\001
+4 2 0 50 -1 18 6 0.0000 4 90 435 5895 4680 frontend\001
diff --git a/doc/architecture.txt b/doc/architecture.txt
new file mode 100644
index 0000000..c37632f
--- /dev/null
+++ b/doc/architecture.txt
@@ -0,0 +1,1448 @@
+ -------------------
+ HAProxy
+ Architecture Guide
+ -------------------
+ version 1.1.34
+ willy tarreau
+ 2006/01/29
+
+
+This document provides real world examples with working configurations.
+Please note that except stated otherwise, global configuration parameters
+such as logging, chrooting, limits and time-outs are not described here.
+
+===================================================
+1. Simple HTTP load-balancing with cookie insertion
+===================================================
+
+A web application often saturates the front-end server with high CPU loads,
+due to the scripting language involved. It also relies on a back-end database
+which is not much loaded. User contexts are stored on the server itself, and
+not in the database, so that simply adding another server with simple IP/TCP
+load-balancing would not work.
+
+ +-------+
+ |clients| clients and/or reverse-proxy
+ +---+---+
+ |
+ -+-----+--------+----
+ | _|_db
+ +--+--+ (___)
+ | web | (___)
+ +-----+ (___)
+ 192.168.1.1 192.168.1.2
+
+
+Replacing the web server with a bigger SMP system would cost much more than
+adding low-cost pizza boxes. The solution is to buy N cheap boxes and install
+the application on them. Install haproxy on the old one which will spread the
+load across the new boxes.
+
+ 192.168.1.1 192.168.1.11-192.168.1.14 192.168.1.2
+ -------+-----------+-----+-----+-----+--------+----
+ | | | | | _|_db
+ +--+--+ +-+-+ +-+-+ +-+-+ +-+-+ (___)
+ | LB1 | | A | | B | | C | | D | (___)
+ +-----+ +---+ +---+ +---+ +---+ (___)
+ haproxy 4 cheap web servers
+
+
+Config on haproxy (LB1) :
+-------------------------
+
+ listen webfarm 192.168.1.1:80
+ mode http
+ balance roundrobin
+ cookie SERVERID insert indirect
+ option httpchk HEAD /index.html HTTP/1.0
+ server webA 192.168.1.11:80 cookie A check
+ server webB 192.168.1.12:80 cookie B check
+ server webC 192.168.1.13:80 cookie C check
+ server webD 192.168.1.14:80 cookie D check
+
+
+Description :
+-------------
+ - LB1 will receive clients requests.
+ - if a request does not contain a cookie, it will be forwarded to a valid
+ server
+ - in return, a cookie "SERVERID" will be inserted in the response holding the
+ server name (eg: "A").
+ - when the client comes again with the cookie "SERVERID=A", LB1 will know that
+ it must be forwarded to server A. The cookie will be removed so that the
+ server does not see it.
+ - if server "webA" dies, the requests will be sent to another valid server
+ and a cookie will be reassigned.
+
+
+Flows :
+-------
+
+(client) (haproxy) (server A)
+ >-- GET /URI1 HTTP/1.0 ------------> |
+ ( no cookie, haproxy forwards in load-balancing mode. )
+ | >-- GET /URI1 HTTP/1.0 ---------->
+ | <-- HTTP/1.0 200 OK -------------<
+ ( the proxy now adds the server cookie in return )
+ <-- HTTP/1.0 200 OK ---------------< |
+ Set-Cookie: SERVERID=A |
+ >-- GET /URI2 HTTP/1.0 ------------> |
+ Cookie: SERVERID=A |
+ ( the proxy sees the cookie. it forwards to server A and deletes it )
+ | >-- GET /URI2 HTTP/1.0 ---------->
+ | <-- HTTP/1.0 200 OK -------------<
+ ( the proxy does not add the cookie in return because the client knows it )
+ <-- HTTP/1.0 200 OK ---------------< |
+ >-- GET /URI3 HTTP/1.0 ------------> |
+ Cookie: SERVERID=A |
+ ( ... )
+
+
+Limits :
+--------
+ - if clients use keep-alive (HTTP/1.1), only the first response will have
+ a cookie inserted, and only the first request of each session will be
+ analyzed. This does not cause trouble in insertion mode because the cookie
+ is put immediately in the first response, and the session is maintained to
+ the same server for all subsequent requests in the same session. However,
+ the cookie will not be removed from the requests forwarded to the servers,
+ so the server must not be sensitive to unknown cookies. If this causes
+ trouble, you can disable keep-alive by adding the following option :
+
+ option httpclose
+
+ - if for some reason the clients cannot learn more than one cookie (eg: the
+ clients are indeed some home-made applications or gateways), and the
+ application already produces a cookie, you can use the "prefix" mode (see
+ below).
+
+ - LB1 becomes a very sensible server. If LB1 dies, nothing works anymore.
+ => you can back it up using keepalived (see below)
+
+ - if the application needs to log the original client's IP, use the
+ "forwardfor" option which will add an "X-Forwarded-For" header with the
+ original client's IP address. You must also use "httpclose" to ensure
+ that you will rewrite every requests and not only the first one of each
+ session :
+
+ option httpclose
+ option forwardfor
+
+ - if the application needs to log the original destination IP, use the
+ "originalto" option which will add an "X-Original-To" header with the
+ original destination IP address. You must also use "httpclose" to ensure
+ that you will rewrite every requests and not only the first one of each
+ session :
+
+ option httpclose
+ option originalto
+
+ The web server will have to be configured to use this header instead.
+ For example, on apache, you can use LogFormat for this :
+
+ LogFormat "%{X-Forwarded-For}i %l %u %t \"%r\" %>s %b " combined
+ CustomLog /var/log/httpd/access_log combined
+
+Hints :
+-------
+Sometimes on the internet, you will find a few percent of the clients which
+disable cookies on their browser. Obviously they have troubles everywhere on
+the web, but you can still help them access your site by using the "source"
+balancing algorithm instead of the "roundrobin". It ensures that a given IP
+address always reaches the same server as long as the number of servers remains
+unchanged. Never use this behind a proxy or in a small network, because the
+distribution will be unfair. However, in large internal networks, and on the
+internet, it works quite well. Clients which have a dynamic address will not
+be affected as long as they accept the cookie, because the cookie always has
+precedence over load balancing :
+
+ listen webfarm 192.168.1.1:80
+ mode http
+ balance source
+ cookie SERVERID insert indirect
+ option httpchk HEAD /index.html HTTP/1.0
+ server webA 192.168.1.11:80 cookie A check
+ server webB 192.168.1.12:80 cookie B check
+ server webC 192.168.1.13:80 cookie C check
+ server webD 192.168.1.14:80 cookie D check
+
+
+==================================================================
+2. HTTP load-balancing with cookie prefixing and high availability
+==================================================================
+
+Now you don't want to add more cookies, but rather use existing ones. The
+application already generates a "JSESSIONID" cookie which is enough to track
+sessions, so we'll prefix this cookie with the server name when we see it.
+Since the load-balancer becomes critical, it will be backed up with a second
+one in VRRP mode using keepalived under Linux.
+
+Download the latest version of keepalived from this site and install it
+on each load-balancer LB1 and LB2 :
+
+ http://www.keepalived.org/
+
+You then have a shared IP between the two load-balancers (we will still use the
+original IP). It is active only on one of them at any moment. To allow the
+proxy to bind to the shared IP on Linux 2.4, you must enable it in /proc :
+
+# echo 1 >/proc/sys/net/ipv4/ip_nonlocal_bind
+
+
+ shared IP=192.168.1.1
+ 192.168.1.3 192.168.1.4 192.168.1.11-192.168.1.14 192.168.1.2
+ -------+------------+-----------+-----+-----+-----+--------+----
+ | | | | | | _|_db
+ +--+--+ +--+--+ +-+-+ +-+-+ +-+-+ +-+-+ (___)
+ | LB1 | | LB2 | | A | | B | | C | | D | (___)
+ +-----+ +-----+ +---+ +---+ +---+ +---+ (___)
+ haproxy haproxy 4 cheap web servers
+ keepalived keepalived
+
+
+Config on both proxies (LB1 and LB2) :
+--------------------------------------
+
+ listen webfarm 192.168.1.1:80
+ mode http
+ balance roundrobin
+ cookie JSESSIONID prefix
+ option httpclose
+ option forwardfor
+ option httpchk HEAD /index.html HTTP/1.0
+ server webA 192.168.1.11:80 cookie A check
+ server webB 192.168.1.12:80 cookie B check
+ server webC 192.168.1.13:80 cookie C check
+ server webD 192.168.1.14:80 cookie D check
+
+
+Notes: the proxy will modify EVERY cookie sent by the client and the server,
+so it is important that it can access to ALL cookies in ALL requests for
+each session. This implies that there is no keep-alive (HTTP/1.1), thus the
+"httpclose" option. Only if you know for sure that the client(s) will never
+use keep-alive (eg: Apache 1.3 in reverse-proxy mode), you can remove this
+option.
+
+
+Configuration for keepalived on LB1/LB2 :
+-----------------------------------------
+
+ vrrp_script chk_haproxy { # Requires keepalived-1.1.13
+ script "killall -0 haproxy" # cheaper than pidof
+ interval 2 # check every 2 seconds
+ weight 2 # add 2 points of prio if OK
+ }
+
+ vrrp_instance VI_1 {
+ interface eth0
+ state MASTER
+ virtual_router_id 51
+ priority 101 # 101 on master, 100 on backup
+ virtual_ipaddress {
+ 192.168.1.1
+ }
+ track_script {
+ chk_haproxy
+ }
+ }
+
+
+Description :
+-------------
+ - LB1 is VRRP master (keepalived), LB2 is backup. Both monitor the haproxy
+ process, and lower their prio if it fails, leading to a failover to the
+ other node.
+ - LB1 will receive clients requests on IP 192.168.1.1.
+ - both load-balancers send their checks from their native IP.
+ - if a request does not contain a cookie, it will be forwarded to a valid
+ server
+ - in return, if a JESSIONID cookie is seen, the server name will be prefixed
+ into it, followed by a delimiter ('~')
+ - when the client comes again with the cookie "JSESSIONID=A~xxx", LB1 will
+ know that it must be forwarded to server A. The server name will then be
+ extracted from cookie before it is sent to the server.
+ - if server "webA" dies, the requests will be sent to another valid server
+ and a cookie will be reassigned.
+
+
+Flows :
+-------
+
+(client) (haproxy) (server A)
+ >-- GET /URI1 HTTP/1.0 ------------> |
+ ( no cookie, haproxy forwards in load-balancing mode. )
+ | >-- GET /URI1 HTTP/1.0 ---------->
+ | X-Forwarded-For: 10.1.2.3
+ | <-- HTTP/1.0 200 OK -------------<
+ ( no cookie, nothing changed )
+ <-- HTTP/1.0 200 OK ---------------< |
+ >-- GET /URI2 HTTP/1.0 ------------> |
+ ( no cookie, haproxy forwards in lb mode, possibly to another server. )
+ | >-- GET /URI2 HTTP/1.0 ---------->
+ | X-Forwarded-For: 10.1.2.3
+ | <-- HTTP/1.0 200 OK -------------<
+ | Set-Cookie: JSESSIONID=123
+ ( the cookie is identified, it will be prefixed with the server name )
+ <-- HTTP/1.0 200 OK ---------------< |
+ Set-Cookie: JSESSIONID=A~123 |
+ >-- GET /URI3 HTTP/1.0 ------------> |
+ Cookie: JSESSIONID=A~123 |
+ ( the proxy sees the cookie, removes the server name and forwards
+ to server A which sees the same cookie as it previously sent )
+ | >-- GET /URI3 HTTP/1.0 ---------->
+ | Cookie: JSESSIONID=123
+ | X-Forwarded-For: 10.1.2.3
+ | <-- HTTP/1.0 200 OK -------------<
+ ( no cookie, nothing changed )
+ <-- HTTP/1.0 200 OK ---------------< |
+ ( ... )
+
+Hints :
+-------
+Sometimes, there will be some powerful servers in the farm, and some smaller
+ones. In this situation, it may be desirable to tell haproxy to respect the
+difference in performance. Let's consider that WebA and WebB are two old
+P3-1.2 GHz while WebC and WebD are shiny new Opteron-2.6 GHz. If your
+application scales with CPU, you may assume a very rough 2.6/1.2 performance
+ratio between the servers. You can inform haproxy about this using the "weight"
+keyword, with values between 1 and 256. It will then spread the load the most
+smoothly possible respecting those ratios :
+
+ server webA 192.168.1.11:80 cookie A weight 12 check
+ server webB 192.168.1.12:80 cookie B weight 12 check
+ server webC 192.168.1.13:80 cookie C weight 26 check
+ server webD 192.168.1.14:80 cookie D weight 26 check
+
+
+========================================================
+2.1 Variations involving external layer 4 load-balancers
+========================================================
+
+Instead of using a VRRP-based active/backup solution for the proxies,
+they can also be load-balanced by a layer4 load-balancer (eg: Alteon)
+which will also check that the services run fine on both proxies :
+
+ | VIP=192.168.1.1
+ +----+----+
+ | Alteon |
+ +----+----+
+ |
+ 192.168.1.3 | 192.168.1.4 192.168.1.11-192.168.1.14 192.168.1.2
+ -------+-----+------+-----------+-----+-----+-----+--------+----
+ | | | | | | _|_db
+ +--+--+ +--+--+ +-+-+ +-+-+ +-+-+ +-+-+ (___)
+ | LB1 | | LB2 | | A | | B | | C | | D | (___)
+ +-----+ +-----+ +---+ +---+ +---+ +---+ (___)
+ haproxy haproxy 4 cheap web servers
+
+
+Config on both proxies (LB1 and LB2) :
+--------------------------------------
+
+ listen webfarm 0.0.0.0:80
+ mode http
+ balance roundrobin
+ cookie JSESSIONID prefix
+ option httpclose
+ option forwardfor
+ option httplog
+ option dontlognull
+ option httpchk HEAD /index.html HTTP/1.0
+ server webA 192.168.1.11:80 cookie A check
+ server webB 192.168.1.12:80 cookie B check
+ server webC 192.168.1.13:80 cookie C check
+ server webD 192.168.1.14:80 cookie D check
+
+The "dontlognull" option is used to prevent the proxy from logging the health
+checks from the Alteon. If a session exchanges no data, then it will not be
+logged.
+
+Config on the Alteon :
+----------------------
+
+ /c/slb/real 11
+ ena
+ name "LB1"
+ rip 192.168.1.3
+ /c/slb/real 12
+ ena
+ name "LB2"
+ rip 192.168.1.4
+ /c/slb/group 10
+ name "LB1-2"
+ metric roundrobin
+ health tcp
+ add 11
+ add 12
+ /c/slb/virt 10
+ ena
+ vip 192.168.1.1
+ /c/slb/virt 10/service http
+ group 10
+
+
+Note: the health-check on the Alteon is set to "tcp" to prevent the proxy from
+forwarding the connections. It can also be set to "http", but for this the
+proxy must specify a "monitor-net" with the Alteons' addresses, so that the
+Alteon can really check that the proxies can talk HTTP but without forwarding
+the connections to the end servers. Check next section for an example on how to
+use monitor-net.
+
+
+============================================================
+2.2 Generic TCP relaying and external layer 4 load-balancers
+============================================================
+
+Sometimes it's useful to be able to relay generic TCP protocols (SMTP, TSE,
+VNC, etc...), for example to interconnect private networks. The problem comes
+when you use external load-balancers which need to send periodic health-checks
+to the proxies, because these health-checks get forwarded to the end servers.
+The solution is to specify a network which will be dedicated to monitoring
+systems and must not lead to a forwarding connection nor to any log, using the
+"monitor-net" keyword. Note: this feature expects a version of haproxy greater
+than or equal to 1.1.32 or 1.2.6.
+
+
+ | VIP=172.16.1.1 |
+ +----+----+ +----+----+
+ | Alteon1 | | Alteon2 |
+ +----+----+ +----+----+
+ 192.168.1.252 | GW=192.168.1.254 | 192.168.1.253
+ | |
+ ------+---+------------+--+-----------------> TSE farm : 192.168.1.10
+ 192.168.1.1 | | 192.168.1.2
+ +--+--+ +--+--+
+ | LB1 | | LB2 |
+ +-----+ +-----+
+ haproxy haproxy
+
+
+Config on both proxies (LB1 and LB2) :
+--------------------------------------
+
+ listen tse-proxy
+ bind :3389,:1494,:5900 # TSE, ICA and VNC at once.
+ mode tcp
+ balance roundrobin
+ server tse-farm 192.168.1.10
+ monitor-net 192.168.1.252/31
+
+The "monitor-net" option instructs the proxies that any connection coming from
+192.168.1.252 or 192.168.1.253 will not be logged nor forwarded and will be
+closed immediately. The Alteon load-balancers will then see the proxies alive
+without perturbating the service.
+
+Config on the Alteon :
+----------------------
+
+ /c/l3/if 1
+ ena
+ addr 192.168.1.252
+ mask 255.255.255.0
+ /c/slb/real 11
+ ena
+ name "LB1"
+ rip 192.168.1.1
+ /c/slb/real 12
+ ena
+ name "LB2"
+ rip 192.168.1.2
+ /c/slb/group 10
+ name "LB1-2"
+ metric roundrobin
+ health tcp
+ add 11
+ add 12
+ /c/slb/virt 10
+ ena
+ vip 172.16.1.1
+ /c/slb/virt 10/service 1494
+ group 10
+ /c/slb/virt 10/service 3389
+ group 10
+ /c/slb/virt 10/service 5900
+ group 10
+
+
+Special handling of SSL :
+-------------------------
+Sometimes, you want to send health-checks to remote systems, even in TCP mode,
+in order to be able to failover to a backup server in case the first one is
+dead. Of course, you can simply enable TCP health-checks, but it sometimes
+happens that intermediate firewalls between the proxies and the remote servers
+acknowledge the TCP connection themselves, showing an always-up server. Since
+this is generally encountered on long-distance communications, which often
+involve SSL, an SSL health-check has been implemented to work around this issue.
+It sends SSL Hello messages to the remote server, which in turns replies with
+SSL Hello messages. Setting it up is very easy :
+
+ listen tcp-syslog-proxy
+ bind :1514 # listen to TCP syslog traffic on this port (SSL)
+ mode tcp
+ balance roundrobin
+ option ssl-hello-chk
+ server syslog-prod-site 192.168.1.10 check
+ server syslog-back-site 192.168.2.10 check backup
+
+
+=========================================================
+3. Simple HTTP/HTTPS load-balancing with cookie insertion
+=========================================================
+
+This is the same context as in example 1 above, but the web
+server uses HTTPS.
+
+ +-------+
+ |clients| clients
+ +---+---+
+ |
+ -+-----+--------+----
+ | _|_db
+ +--+--+ (___)
+ | SSL | (___)
+ | web | (___)
+ +-----+
+ 192.168.1.1 192.168.1.2
+
+
+Since haproxy does not handle SSL, this part will have to be extracted from the
+servers (freeing even more resources) and installed on the load-balancer
+itself. Install haproxy and apache+mod_ssl on the old box which will spread the
+load between the new boxes. Apache will work in SSL reverse-proxy-cache. If the
+application is correctly developed, it might even lower its load. However,
+since there now is a cache between the clients and haproxy, some security
+measures must be taken to ensure that inserted cookies will not be cached.
+
+
+ 192.168.1.1 192.168.1.11-192.168.1.14 192.168.1.2
+ -------+-----------+-----+-----+-----+--------+----
+ | | | | | _|_db
+ +--+--+ +-+-+ +-+-+ +-+-+ +-+-+ (___)
+ | LB1 | | A | | B | | C | | D | (___)
+ +-----+ +---+ +---+ +---+ +---+ (___)
+ apache 4 cheap web servers
+ mod_ssl
+ haproxy
+
+
+Config on haproxy (LB1) :
+-------------------------
+
+ listen 127.0.0.1:8000
+ mode http
+ balance roundrobin
+ cookie SERVERID insert indirect nocache
+ option httpchk HEAD /index.html HTTP/1.0
+ server webA 192.168.1.11:80 cookie A check
+ server webB 192.168.1.12:80 cookie B check
+ server webC 192.168.1.13:80 cookie C check
+ server webD 192.168.1.14:80 cookie D check
+
+
+Description :
+-------------
+ - apache on LB1 will receive clients requests on port 443
+ - it forwards it to haproxy bound to 127.0.0.1:8000
+ - if a request does not contain a cookie, it will be forwarded to a valid
+ server
+ - in return, a cookie "SERVERID" will be inserted in the response holding the
+ server name (eg: "A"), and a "Cache-control: private" header will be added
+ so that the apache does not cache any page containing such cookie.
+ - when the client comes again with the cookie "SERVERID=A", LB1 will know that
+ it must be forwarded to server A. The cookie will be removed so that the
+ server does not see it.
+ - if server "webA" dies, the requests will be sent to another valid server
+ and a cookie will be reassigned.
+
+Notes :
+-------
+ - if the cookie works in "prefix" mode, there is no need to add the "nocache"
+ option because it is an application cookie which will be modified, and the
+ application flags will be preserved.
+ - if apache 1.3 is used as a front-end before haproxy, it always disables
+ HTTP keep-alive on the back-end, so there is no need for the "httpclose"
+ option on haproxy.
+ - configure apache to set the X-Forwarded-For header itself, and do not do
+ it on haproxy if you need the application to know about the client's IP.
+
+
+Flows :
+-------
+
+(apache) (haproxy) (server A)
+ >-- GET /URI1 HTTP/1.0 ------------> |
+ ( no cookie, haproxy forwards in load-balancing mode. )
+ | >-- GET /URI1 HTTP/1.0 ---------->
+ | <-- HTTP/1.0 200 OK -------------<
+ ( the proxy now adds the server cookie in return )
+ <-- HTTP/1.0 200 OK ---------------< |
+ Set-Cookie: SERVERID=A |
+ Cache-Control: private |
+ >-- GET /URI2 HTTP/1.0 ------------> |
+ Cookie: SERVERID=A |
+ ( the proxy sees the cookie. it forwards to server A and deletes it )
+ | >-- GET /URI2 HTTP/1.0 ---------->
+ | <-- HTTP/1.0 200 OK -------------<
+ ( the proxy does not add the cookie in return because the client knows it )
+ <-- HTTP/1.0 200 OK ---------------< |
+ >-- GET /URI3 HTTP/1.0 ------------> |
+ Cookie: SERVERID=A |
+ ( ... )
+
+
+
+========================================
+3.1. Alternate solution using Stunnel
+========================================
+
+When only SSL is required and cache is not needed, stunnel is a cheaper
+solution than Apache+mod_ssl. By default, stunnel does not process HTTP and
+does not add any X-Forwarded-For header, but there is a patch on the official
+haproxy site to provide this feature to recent stunnel versions.
+
+This time, stunnel will only process HTTPS and not HTTP. This means that
+haproxy will get all HTTP traffic, so haproxy will have to add the
+X-Forwarded-For header for HTTP traffic, but not for HTTPS traffic since
+stunnel will already have done it. We will use the "except" keyword to tell
+haproxy that connections from local host already have a valid header.
+
+
+ 192.168.1.1 192.168.1.11-192.168.1.14 192.168.1.2
+ -------+-----------+-----+-----+-----+--------+----
+ | | | | | _|_db
+ +--+--+ +-+-+ +-+-+ +-+-+ +-+-+ (___)
+ | LB1 | | A | | B | | C | | D | (___)
+ +-----+ +---+ +---+ +---+ +---+ (___)
+ stunnel 4 cheap web servers
+ haproxy
+
+
+Config on stunnel (LB1) :
+-------------------------
+
+ cert=/etc/stunnel/stunnel.pem
+ setuid=stunnel
+ setgid=proxy
+
+ socket=l:TCP_NODELAY=1
+ socket=r:TCP_NODELAY=1
+
+ [https]
+ accept=192.168.1.1:443
+ connect=192.168.1.1:80
+ xforwardedfor=yes
+
+
+Config on haproxy (LB1) :
+-------------------------
+
+ listen 192.168.1.1:80
+ mode http
+ balance roundrobin
+ option forwardfor except 192.168.1.1
+ cookie SERVERID insert indirect nocache
+ option httpchk HEAD /index.html HTTP/1.0
+ server webA 192.168.1.11:80 cookie A check
+ server webB 192.168.1.12:80 cookie B check
+ server webC 192.168.1.13:80 cookie C check
+ server webD 192.168.1.14:80 cookie D check
+
+Description :
+-------------
+ - stunnel on LB1 will receive clients requests on port 443
+ - it forwards them to haproxy bound to port 80
+ - haproxy will receive HTTP client requests on port 80 and decrypted SSL
+ requests from Stunnel on the same port.
+ - stunnel will add the X-Forwarded-For header
+ - haproxy will add the X-Forwarded-For header for everyone except the local
+ address (stunnel).
+
+
+========================================
+4. Soft-stop for application maintenance
+========================================
+
+When an application is spread across several servers, the time to update all
+instances increases, so the application seems jerky for a longer period.
+
+HAProxy offers several solutions for this. Although it cannot be reconfigured
+without being stopped, nor does it offer any external command, there are other
+working solutions.
+
+
+=========================================
+4.1 Soft-stop using a file on the servers
+=========================================
+
+This trick is quite common and very simple: put a file on the server which will
+be checked by the proxy. When you want to stop the server, first remove this
+file. The proxy will see the server as failed, and will not send it any new
+session, only the old ones if the "persist" option is used. Wait a bit then
+stop the server when it does not receive anymore connections.
+
+
+ listen 192.168.1.1:80
+ mode http
+ balance roundrobin
+ cookie SERVERID insert indirect
+ option httpchk HEAD /running HTTP/1.0
+ server webA 192.168.1.11:80 cookie A check inter 2000 rise 2 fall 2
+ server webB 192.168.1.12:80 cookie B check inter 2000 rise 2 fall 2
+ server webC 192.168.1.13:80 cookie C check inter 2000 rise 2 fall 2
+ server webD 192.168.1.14:80 cookie D check inter 2000 rise 2 fall 2
+ option persist
+ redispatch
+ contimeout 5000
+
+
+Description :
+-------------
+ - every 2 seconds, haproxy will try to access the file "/running" on the
+ servers, and declare the server as down after 2 attempts (4 seconds).
+ - only the servers which respond with a 200 or 3XX response will be used.
+ - if a request does not contain a cookie, it will be forwarded to a valid
+ server
+ - if a request contains a cookie for a failed server, haproxy will insist
+ on trying to reach the server anyway, to let the user finish what they were
+ doing. ("persist" option)
+ - if the server is totally stopped, the connection will fail and the proxy
+ will rebalance the client to another server ("redispatch")
+
+Usage on the web servers :
+--------------------------
+- to start the server :
+ # /etc/init.d/httpd start
+ # touch /home/httpd/www/running
+
+- to soft-stop the server
+ # rm -f /home/httpd/www/running
+
+- to completely stop the server :
+ # /etc/init.d/httpd stop
+
+Limits
+------
+If the server is totally powered down, the proxy will still try to reach it
+for those clients who still have a cookie referencing it, and the connection
+attempt will expire after 5 seconds ("contimeout"), and only after that, the
+client will be redispatched to another server. So this mode is only useful
+for software updates where the server will suddenly refuse the connection
+because the process is stopped. The problem is the same if the server suddenly
+crashes. All of its users will be fairly perturbated.
+
+
+==================================
+4.2 Soft-stop using backup servers
+==================================
+
+A better solution which covers every situation is to use backup servers.
+Version 1.1.30 fixed a bug which prevented a backup server from sharing
+the same cookie as a standard server.
+
+
+ listen 192.168.1.1:80
+ mode http
+ balance roundrobin
+ redispatch
+ cookie SERVERID insert indirect
+ option httpchk HEAD / HTTP/1.0
+ server webA 192.168.1.11:80 cookie A check port 81 inter 2000
+ server webB 192.168.1.12:80 cookie B check port 81 inter 2000
+ server webC 192.168.1.13:80 cookie C check port 81 inter 2000
+ server webD 192.168.1.14:80 cookie D check port 81 inter 2000
+
+ server bkpA 192.168.1.11:80 cookie A check port 80 inter 2000 backup
+ server bkpB 192.168.1.12:80 cookie B check port 80 inter 2000 backup
+ server bkpC 192.168.1.13:80 cookie C check port 80 inter 2000 backup
+ server bkpD 192.168.1.14:80 cookie D check port 80 inter 2000 backup
+
+Description
+-----------
+Four servers webA..D are checked on their port 81 every 2 seconds. The same
+servers named bkpA..D are checked on the port 80, and share the exact same
+cookies. Those servers will only be used when no other server is available
+for the same cookie.
+
+When the web servers are started, only the backup servers are seen as
+available. On the web servers, you need to redirect port 81 to local
+port 80, either with a local proxy (eg: a simple haproxy tcp instance),
+or with iptables (linux) or pf (openbsd). This is because we want the
+real web server to reply on this port, and not a fake one. Eg, with
+iptables :
+
+ # /etc/init.d/httpd start
+ # iptables -t nat -A PREROUTING -p tcp --dport 81 -j REDIRECT --to-port 80
+
+A few seconds later, the standard server is seen up and haproxy starts to send
+it new requests on its real port 80 (only new users with no cookie, of course).
+
+If a server completely crashes (even if it does not respond at the IP level),
+both the standard and backup servers will fail, so clients associated to this
+server will be redispatched to other live servers and will lose their sessions.
+
+Now if you want to enter a server into maintenance, simply stop it from
+responding on port 81 so that its standard instance will be seen as failed,
+but the backup will still work. Users will not notice anything since the
+service is still operational :
+
+ # iptables -t nat -D PREROUTING -p tcp --dport 81 -j REDIRECT --to-port 80
+
+The health checks on port 81 for this server will quickly fail, and the
+standard server will be seen as failed. No new session will be sent to this
+server, and existing clients with a valid cookie will still reach it because
+the backup server will still be up.
+
+Now wait as long as you want for the old users to stop using the service, and
+once you see that the server does not receive any traffic, simply stop it :
+
+ # /etc/init.d/httpd stop
+
+The associated backup server will in turn fail, and if any client still tries
+to access this particular server, they will be redispatched to any other valid
+server because of the "redispatch" option.
+
+This method has an advantage : you never touch the proxy when doing server
+maintenance. The people managing the servers can make them disappear smoothly.
+
+
+4.2.1 Variations for operating systems without any firewall software
+--------------------------------------------------------------------
+
+The downside is that you need a redirection solution on the server just for
+the health-checks. If the server OS does not support any firewall software,
+this redirection can also be handled by a simple haproxy in tcp mode :
+
+ global
+ daemon
+ quiet
+ pidfile /var/run/haproxy-checks.pid
+ listen 0.0.0.0:81
+ mode tcp
+ dispatch 127.0.0.1:80
+ contimeout 1000
+ clitimeout 10000
+ srvtimeout 10000
+
+To start the web service :
+
+ # /etc/init.d/httpd start
+ # haproxy -f /etc/haproxy/haproxy-checks.cfg
+
+To soft-stop the service :
+
+ # kill $(</var/run/haproxy-checks.pid)
+
+The port 81 will stop responding and the load-balancer will notice the failure.
+
+
+4.2.2 Centralizing the server management
+----------------------------------------
+
+If one finds it preferable to manage the servers from the load-balancer itself,
+the port redirector can be installed on the load-balancer itself. See the
+example with iptables below.
+
+Make the servers appear as operational :
+ # iptables -t nat -A OUTPUT -d 192.168.1.11 -p tcp --dport 81 -j DNAT --to-dest :80
+ # iptables -t nat -A OUTPUT -d 192.168.1.12 -p tcp --dport 81 -j DNAT --to-dest :80
+ # iptables -t nat -A OUTPUT -d 192.168.1.13 -p tcp --dport 81 -j DNAT --to-dest :80
+ # iptables -t nat -A OUTPUT -d 192.168.1.14 -p tcp --dport 81 -j DNAT --to-dest :80
+
+Soft stop one server :
+ # iptables -t nat -D OUTPUT -d 192.168.1.12 -p tcp --dport 81 -j DNAT --to-dest :80
+
+Another solution is to use the "COMAFILE" patch provided by Alexander Lazic,
+which is available for download here :
+
+ http://w.ods.org/tools/haproxy/contrib/
+
+
+4.2.3 Notes :
+-------------
+ - Never, ever, start a fake service on port 81 for the health-checks, because
+ a real web service failure will not be detected as long as the fake service
+ runs. You must really forward the check port to the real application.
+
+ - health-checks will be sent twice as often, once for each standard server,
+ and once for each backup server. All this will be multiplicated by the
+ number of processes if you use multi-process mode. You will have to ensure
+ that all the checks sent to the server do not overload it.
+
+=======================
+4.3 Hot reconfiguration
+=======================
+
+There are two types of haproxy users :
+ - those who can never do anything in production out of maintenance periods ;
+ - those who can do anything at any time provided that the consequences are
+ limited.
+
+The first ones have no problem stopping the server to change configuration
+because they got some maintenance periods during which they can break anything.
+So they will even prefer doing a clean stop/start sequence to ensure everything
+will work fine upon next reload. Since those have represented the majority of
+haproxy uses, there has been little effort trying to improve this.
+
+However, the second category is a bit different. They like to be able to fix an
+error in a configuration file without anyone noticing. This can sometimes also
+be the case for the first category because humans are not failsafe.
+
+For this reason, a new hot reconfiguration mechanism has been introduced in
+version 1.1.34. Its usage is very simple and works even in chrooted
+environments with lowered privileges. The principle is very simple : upon
+reception of a SIGTTOU signal, the proxy will stop listening to all the ports.
+This will release the ports so that a new instance can be started. Existing
+connections will not be broken at all. If the new instance fails to start,
+then sending a SIGTTIN signal back to the original processes will restore
+the listening ports. This is possible without any special privileges because
+the sockets will not have been closed, so the bind() is still valid. Otherwise,
+if the new process starts successfully, then sending a SIGUSR1 signal to the
+old one ensures that it will exit as soon as its last session ends.
+
+A hot reconfiguration script would look like this :
+
+ # save previous state
+ mv /etc/haproxy/config /etc/haproxy/config.old
+ mv /var/run/haproxy.pid /var/run/haproxy.pid.old
+
+ mv /etc/haproxy/config.new /etc/haproxy/config
+ kill -TTOU $(cat /var/run/haproxy.pid.old)
+ if haproxy -p /var/run/haproxy.pid -f /etc/haproxy/config; then
+ echo "New instance successfully loaded, stopping previous one."
+ kill -USR1 $(cat /var/run/haproxy.pid.old)
+ rm -f /var/run/haproxy.pid.old
+ exit 1
+ else
+ echo "New instance failed to start, resuming previous one."
+ kill -TTIN $(cat /var/run/haproxy.pid.old)
+ rm -f /var/run/haproxy.pid
+ mv /var/run/haproxy.pid.old /var/run/haproxy.pid
+ mv /etc/haproxy/config /etc/haproxy/config.new
+ mv /etc/haproxy/config.old /etc/haproxy/config
+ exit 0
+ fi
+
+After this, you can still force old connections to end by sending
+a SIGTERM to the old process if it still exists :
+
+ kill $(cat /var/run/haproxy.pid.old)
+ rm -f /var/run/haproxy.pid.old
+
+Be careful with this as in multi-process mode, some pids might already
+have been reallocated to completely different processes.
+
+
+==================================================
+5. Multi-site load-balancing with local preference
+==================================================
+
+5.1 Description of the problem
+==============================
+
+Consider a world-wide company with sites on several continents. There are two
+production sites SITE1 and SITE2 which host identical applications. There are
+many offices around the world. For speed and communication cost reasons, each
+office uses the nearest site by default, but can switch to the backup site in
+the event of a site or application failure. There also are users on the
+production sites, which use their local sites by default, but can switch to the
+other site in case of a local application failure.
+
+The main constraints are :
+
+ - application persistence : although the application is the same on both
+ sites, there is no session synchronisation between the sites. A failure
+ of one server or one site can cause a user to switch to another server
+ or site, but when the server or site comes back, the user must not switch
+ again.
+
+ - communication costs : inter-site communication should be reduced to the
+ minimum. Specifically, in case of a local application failure, every
+ office should be able to switch to the other site without continuing to
+ use the default site.
+
+5.2 Solution
+============
+ - Each production site will have two haproxy load-balancers in front of its
+ application servers to balance the load across them and provide local HA.
+ We will call them "S1L1" and "S1L2" on site 1, and "S2L1" and "S2L2" on
+ site 2. These proxies will extend the application's JSESSIONID cookie to
+ put the server name as a prefix.
+
+ - Each production site will have one front-end haproxy director to provide
+ the service to local users and to remote offices. It will load-balance
+ across the two local load-balancers, and will use the other site's
+ load-balancers as backup servers. It will insert the local site identifier
+ in a SITE cookie for the local load-balancers, and the remote site
+ identifier for the remote load-balancers. These front-end directors will
+ be called "SD1" and "SD2" for "Site Director".
+
+ - Each office will have one haproxy near the border gateway which will direct
+ local users to their preference site by default, or to the backup site in
+ the event of a previous failure. It will also analyze the SITE cookie, and
+ direct the users to the site referenced in the cookie. Thus, the preferred
+ site will be declared as a normal server, and the backup site will be
+ declared as a backup server only, which will only be used when the primary
+ site is unreachable, or when the primary site's director has forwarded
+ traffic to the second site. These proxies will be called "OP1".."OPXX"
+ for "Office Proxy #XX".
+
+
+5.3 Network diagram
+===================
+
+Note : offices 1 and 2 are on the same continent as site 1, while
+ office 3 is on the same continent as site 3. Each production
+ site can reach the second one either through the WAN or through
+ a dedicated link.
+
+
+ Office1 Office2 Office3
+ users users users
+192.168 # # # 192.168 # # # # # #
+.1.0/24 | | | .2.0/24 | | | 192.168.3.0/24 | | |
+ --+----+-+-+- --+----+-+-+- ---+----+-+-+-
+ | | .1 | | .1 | | .1
+ | +-+-+ | +-+-+ | +-+-+
+ | |OP1| | |OP2| | |OP3| ...
+ ,-:-. +---+ ,-:-. +---+ ,-:-. +---+
+ ( X ) ( X ) ( X )
+ `-:-' `-:-' ,---. `-:-'
+ --+---------------+------+----~~~( X )~~~~-------+---------+-
+ | `---' |
+ | |
+ +---+ ,-:-. +---+ ,-:-.
+ |SD1| ( X ) |SD2| ( X )
+ ( SITE 1 ) +-+-+ `-:-' ( SITE 2 ) +-+-+ `-:-'
+ |.1 | |.1 |
+ 10.1.1.0/24 | | ,---. 10.2.1.0/24 | |
+ -+-+-+-+-+-+-+-----+-+--( X )------+-+-+-+-+-+-+-----+-+--
+ | | | | | | | `---' | | | | | | |
+ ...# # # # # |.11 |.12 ...# # # # # |.11 |.12
+ Site 1 +-+--+ +-+--+ Site 2 +-+--+ +-+--+
+ Local |S1L1| |S1L2| Local |S2L1| |S2L2|
+ users +-+--+ +--+-+ users +-+--+ +--+-+
+ | | | |
+ 10.1.2.0/24 -+-+-+--+--++-- 10.2.2.0/24 -+-+-+--+--++--
+ |.1 |.4 |.1 |.4
+ +-+-+ +-+-+ +-+-+ +-+-+
+ |W11| ~~~ |W14| |W21| ~~~ |W24|
+ +---+ +---+ +---+ +---+
+ 4 application servers 4 application servers
+ on site 1 on site 2
+
+
+
+5.4 Description
+===============
+
+5.4.1 Local users
+-----------------
+ - Office 1 users connect to OP1 = 192.168.1.1
+ - Office 2 users connect to OP2 = 192.168.2.1
+ - Office 3 users connect to OP3 = 192.168.3.1
+ - Site 1 users connect to SD1 = 10.1.1.1
+ - Site 2 users connect to SD2 = 10.2.1.1
+
+5.4.2 Office proxies
+--------------------
+ - Office 1 connects to site 1 by default and uses site 2 as a backup.
+ - Office 2 connects to site 1 by default and uses site 2 as a backup.
+ - Office 3 connects to site 2 by default and uses site 1 as a backup.
+
+The offices check the local site's SD proxy every 30 seconds, and the
+remote one every 60 seconds.
+
+
+Configuration for Office Proxy OP1
+----------------------------------
+
+ listen 192.168.1.1:80
+ mode http
+ balance roundrobin
+ redispatch
+ cookie SITE
+ option httpchk HEAD / HTTP/1.0
+ server SD1 10.1.1.1:80 cookie SITE1 check inter 30000
+ server SD2 10.2.1.1:80 cookie SITE2 check inter 60000 backup
+
+
+Configuration for Office Proxy OP2
+----------------------------------
+
+ listen 192.168.2.1:80
+ mode http
+ balance roundrobin
+ redispatch
+ cookie SITE
+ option httpchk HEAD / HTTP/1.0
+ server SD1 10.1.1.1:80 cookie SITE1 check inter 30000
+ server SD2 10.2.1.1:80 cookie SITE2 check inter 60000 backup
+
+
+Configuration for Office Proxy OP3
+----------------------------------
+
+ listen 192.168.3.1:80
+ mode http
+ balance roundrobin
+ redispatch
+ cookie SITE
+ option httpchk HEAD / HTTP/1.0
+ server SD2 10.2.1.1:80 cookie SITE2 check inter 30000
+ server SD1 10.1.1.1:80 cookie SITE1 check inter 60000 backup
+
+
+5.4.3 Site directors ( SD1 and SD2 )
+------------------------------------
+The site directors forward traffic to the local load-balancers, and set a
+cookie to identify the site. If no local load-balancer is available, or if
+the local application servers are all down, it will redirect traffic to the
+remote site, and report this in the SITE cookie. In order not to uselessly
+load each site's WAN link, each SD will check the other site at a lower
+rate. The site directors will also insert their client's address so that
+the application server knows which local user or remote site accesses it.
+
+The SITE cookie which is set by these directors will also be understood
+by the office proxies. This is important because if SD1 decides to forward
+traffic to site 2, it will write "SITE2" in the "SITE" cookie, and on next
+request, the office proxy will automatically and directly talk to SITE2 if
+it can reach it. If it cannot, it will still send the traffic to SITE1
+where SD1 will in turn try to reach SITE2.
+
+The load-balancers checks are performed on port 81. As we'll see further,
+the load-balancers provide a health monitoring port 81 which reroutes to
+port 80 but which allows them to tell the SD that they are going down soon
+and that the SD must not use them anymore.
+
+
+Configuration for SD1
+---------------------
+
+ listen 10.1.1.1:80
+ mode http
+ balance roundrobin
+ redispatch
+ cookie SITE insert indirect
+ option httpchk HEAD / HTTP/1.0
+ option forwardfor
+ server S1L1 10.1.1.11:80 cookie SITE1 check port 81 inter 4000
+ server S1L2 10.1.1.12:80 cookie SITE1 check port 81 inter 4000
+ server S2L1 10.2.1.11:80 cookie SITE2 check port 81 inter 8000 backup
+ server S2L2 10.2.1.12:80 cookie SITE2 check port 81 inter 8000 backup
+
+Configuration for SD2
+---------------------
+
+ listen 10.2.1.1:80
+ mode http
+ balance roundrobin
+ redispatch
+ cookie SITE insert indirect
+ option httpchk HEAD / HTTP/1.0
+ option forwardfor
+ server S2L1 10.2.1.11:80 cookie SITE2 check port 81 inter 4000
+ server S2L2 10.2.1.12:80 cookie SITE2 check port 81 inter 4000
+ server S1L1 10.1.1.11:80 cookie SITE1 check port 81 inter 8000 backup
+ server S1L2 10.1.1.12:80 cookie SITE1 check port 81 inter 8000 backup
+
+
+5.4.4 Local load-balancers S1L1, S1L2, S2L1, S2L2
+-------------------------------------------------
+Please first note that because SD1 and SD2 use the same cookie for both
+servers on a same site, the second load-balancer of each site will only
+receive load-balanced requests, but as soon as the SITE cookie will be
+set, only the first LB will receive the requests because it will be the
+first one to match the cookie.
+
+The load-balancers will spread the load across 4 local web servers, and
+use the JSESSIONID provided by the application to provide server persistence
+using the new 'prefix' method. Soft-stop will also be implemented as described
+in section 4 above. Moreover, these proxies will provide their own maintenance
+soft-stop. Port 80 will be used for application traffic, while port 81 will
+only be used for health-checks and locally rerouted to port 80. A grace time
+will be specified to service on port 80, but not on port 81. This way, a soft
+kill (kill -USR1) on the proxy will only kill the health-check forwarder so
+that the site director knows it must not use this load-balancer anymore. But
+the service will still work for 20 seconds and as long as there are established
+sessions.
+
+These proxies will also be the only ones to disable HTTP keep-alive in the
+chain, because it is enough to do it at one place, and it's necessary to do
+it with 'prefix' cookies.
+
+Configuration for S1L1/S1L2
+---------------------------
+
+ listen 10.1.1.11:80 # 10.1.1.12:80 for S1L2
+ grace 20000 # don't kill us until 20 seconds have elapsed
+ mode http
+ balance roundrobin
+ cookie JSESSIONID prefix
+ option httpclose
+ option forwardfor
+ option httpchk HEAD / HTTP/1.0
+ server W11 10.1.2.1:80 cookie W11 check port 81 inter 2000
+ server W12 10.1.2.2:80 cookie W12 check port 81 inter 2000
+ server W13 10.1.2.3:80 cookie W13 check port 81 inter 2000
+ server W14 10.1.2.4:80 cookie W14 check port 81 inter 2000
+
+ server B11 10.1.2.1:80 cookie W11 check port 80 inter 4000 backup
+ server B12 10.1.2.2:80 cookie W12 check port 80 inter 4000 backup
+ server B13 10.1.2.3:80 cookie W13 check port 80 inter 4000 backup
+ server B14 10.1.2.4:80 cookie W14 check port 80 inter 4000 backup
+
+ listen 10.1.1.11:81 # 10.1.1.12:81 for S1L2
+ mode tcp
+ dispatch 10.1.1.11:80 # 10.1.1.12:80 for S1L2
+
+
+Configuration for S2L1/S2L2
+---------------------------
+
+ listen 10.2.1.11:80 # 10.2.1.12:80 for S2L2
+ grace 20000 # don't kill us until 20 seconds have elapsed
+ mode http
+ balance roundrobin
+ cookie JSESSIONID prefix
+ option httpclose
+ option forwardfor
+ option httpchk HEAD / HTTP/1.0
+ server W21 10.2.2.1:80 cookie W21 check port 81 inter 2000
+ server W22 10.2.2.2:80 cookie W22 check port 81 inter 2000
+ server W23 10.2.2.3:80 cookie W23 check port 81 inter 2000
+ server W24 10.2.2.4:80 cookie W24 check port 81 inter 2000
+
+ server B21 10.2.2.1:80 cookie W21 check port 80 inter 4000 backup
+ server B22 10.2.2.2:80 cookie W22 check port 80 inter 4000 backup
+ server B23 10.2.2.3:80 cookie W23 check port 80 inter 4000 backup
+ server B24 10.2.2.4:80 cookie W24 check port 80 inter 4000 backup
+
+ listen 10.2.1.11:81 # 10.2.1.12:81 for S2L2
+ mode tcp
+ dispatch 10.2.1.11:80 # 10.2.1.12:80 for S2L2
+
+
+5.5 Comments
+------------
+Since each site director sets a cookie identifying the site, remote office
+users will have their office proxies direct them to the right site and stick
+to this site as long as the user still uses the application and the site is
+available. Users on production sites will be directed to the right site by the
+site directors depending on the SITE cookie.
+
+If the WAN link dies on a production site, the remote office users will not
+see their site anymore, so they will redirect the traffic to the second site.
+If there are dedicated inter-site links as on the diagram above, the second
+SD will see the cookie and still be able to reach the original site. For
+example :
+
+Office 1 user sends the following to OP1 :
+ GET / HTTP/1.0
+ Cookie: SITE=SITE1; JSESSIONID=W14~123;
+
+OP1 cannot reach site 1 because its external router is dead. So the SD1 server
+is seen as dead, and OP1 will then forward the request to SD2 on site 2,
+regardless of the SITE cookie.
+
+SD2 on site 2 receives a SITE cookie containing "SITE1". Fortunately, it
+can reach Site 1's load balancers S1L1 and S1L2. So it forwards the request
+so S1L1 (the first one with the same cookie).
+
+S1L1 (on site 1) finds "W14" in the JSESSIONID cookie, so it can forward the
+request to the right server, and the user session will continue to work. Once
+the Site 1's WAN link comes back, OP1 will see SD1 again, and will not route
+through SITE 2 anymore.
+
+However, when a new user on Office 1 connects to the application during a
+site 1 failure, it does not contain any cookie. Since OP1 does not see SD1
+because of the network failure, it will direct the request to SD2 on site 2,
+which will by default direct the traffic to the local load-balancers, S2L1 and
+S2L2. So only initial users will load the inter-site link, not the new ones.
+
+
+===================
+6. Source balancing
+===================
+
+Sometimes it may reveal useful to access servers from a pool of IP addresses
+instead of only one or two. Some equipment (NAT firewalls, load-balancers)
+are sensible to source address, and often need many sources to distribute the
+load evenly amongst their internal hash buckets.
+
+To do this, you simply have to use several times the same server with a
+different source. Example :
+
+ listen 0.0.0.0:80
+ mode tcp
+ balance roundrobin
+ server from1to1 10.1.1.1:80 source 10.1.2.1
+ server from2to1 10.1.1.1:80 source 10.1.2.2
+ server from3to1 10.1.1.1:80 source 10.1.2.3
+ server from4to1 10.1.1.1:80 source 10.1.2.4
+ server from5to1 10.1.1.1:80 source 10.1.2.5
+ server from6to1 10.1.1.1:80 source 10.1.2.6
+ server from7to1 10.1.1.1:80 source 10.1.2.7
+ server from8to1 10.1.1.1:80 source 10.1.2.8
+
+
+=============================================
+7. Managing high loads on application servers
+=============================================
+
+One of the roles often expected from a load balancer is to mitigate the load on
+the servers during traffic peaks. More and more often, we see heavy frameworks
+used to deliver flexible and evolutive web designs, at the cost of high loads
+on the servers, or very low concurrency. Sometimes, response times are also
+rather high. People developing web sites relying on such frameworks very often
+look for a load balancer which is able to distribute the load in the most
+evenly fashion and which will be nice with the servers.
+
+There is a powerful feature in haproxy which achieves exactly this : request
+queueing associated with concurrent connections limit.
+
+Let's say you have an application server which supports at most 20 concurrent
+requests. You have 3 servers, so you can accept up to 60 concurrent HTTP
+connections, which often means 30 concurrent users in case of keep-alive (2
+persistent connections per user).
+
+Even if you disable keep-alive, if the server takes a long time to respond,
+you still have a high risk of multiple users clicking at the same time and
+having their requests unserved because of server saturation. To work around
+the problem, you increase the concurrent connection limit on the servers,
+but their performance stalls under higher loads.
+
+The solution is to limit the number of connections between the clients and the
+servers. You set haproxy to limit the number of connections on a per-server
+basis, and you let all the users you want connect to it. It will then fill all
+the servers up to the configured connection limit, and will put the remaining
+connections in a queue, waiting for a connection to be released on a server.
+
+This ensures five essential principles :
+
+ - all clients can be served whatever their number without crashing the
+ servers, the only impact it that the response time can be delayed.
+
+ - the servers can be used at full throttle without the risk of stalling,
+ and fine tuning can lead to optimal performance.
+
+ - response times can be reduced by making the servers work below the
+ congestion point, effectively leading to shorter response times even
+ under moderate loads.
+
+ - no domino effect when a server goes down or starts up. Requests will be
+ queued more or less, always respecting servers limits.
+
+ - it's easy to achieve high performance even on memory-limited hardware.
+ Indeed, heavy frameworks often consume huge amounts of RAM and not always
+ all the CPU available. In case of wrong sizing, reducing the number of
+ concurrent connections will protect against memory shortages while still
+ ensuring optimal CPU usage.
+
+
+Example :
+---------
+
+HAProxy is installed in front of an application servers farm. It will limit
+the concurrent connections to 4 per server (one thread per CPU), thus ensuring
+very fast response times.
+
+
+ 192.168.1.1 192.168.1.11-192.168.1.13 192.168.1.2
+ -------+-------------+-----+-----+------------+----
+ | | | | _|_db
+ +--+--+ +-+-+ +-+-+ +-+-+ (___)
+ | LB1 | | A | | B | | C | (___)
+ +-----+ +---+ +---+ +---+ (___)
+ haproxy 3 application servers
+ with heavy frameworks
+
+
+Config on haproxy (LB1) :
+-------------------------
+
+ listen appfarm 192.168.1.1:80
+ mode http
+ maxconn 10000
+ option httpclose
+ option forwardfor
+ balance roundrobin
+ cookie SERVERID insert indirect
+ option httpchk HEAD /index.html HTTP/1.0
+ server railsA 192.168.1.11:80 cookie A maxconn 4 check
+ server railsB 192.168.1.12:80 cookie B maxconn 4 check
+ server railsC 192.168.1.13:80 cookie C maxconn 4 check
+ contimeout 60000
+
+
+Description :
+-------------
+The proxy listens on IP 192.168.1.1, port 80, and expects HTTP requests. It
+can accept up to 10000 concurrent connections on this socket. It follows the
+roundrobin algorithm to assign servers to connections as long as servers are
+not saturated.
+
+It allows up to 4 concurrent connections per server, and will queue the
+requests above this value. The "contimeout" parameter is used to set the
+maximum time a connection may take to establish on a server, but here it
+is also used to set the maximum time a connection may stay unserved in the
+queue (1 minute here).
+
+If the servers can each process 4 requests in 10 ms on average, then at 3000
+connections, response times will be delayed by at most :
+
+ 3000 / 3 servers / 4 conns * 10 ms = 2.5 seconds
+
+Which is not that dramatic considering the huge number of users for such a low
+number of servers.
+
+When connection queues fill up and application servers are starving, response
+times will grow and users might abort by clicking on the "Stop" button. It is
+very undesirable to send aborted requests to servers, because they will eat
+CPU cycles for nothing.
+
+An option has been added to handle this specific case : "option abortonclose".
+By specifying it, you tell haproxy that if an input channel is closed on the
+client side AND the request is still waiting in the queue, then it is highly
+likely that the user has stopped, so we remove the request from the queue
+before it will get served.
+
+
+Managing unfair response times
+------------------------------
+
+Sometimes, the application server will be very slow for some requests (eg:
+login page) and faster for other requests. This may cause excessive queueing
+of expectedly fast requests when all threads on the server are blocked on a
+request to the database. Then the only solution is to increase the number of
+concurrent connections, so that the server can handle a large average number
+of slow connections with threads left to handle faster connections.
+
+But as we have seen, increasing the number of connections on the servers can
+be detrimental to performance (eg: Apache processes fighting for the accept()
+lock). To improve this situation, the "minconn" parameter has been introduced.
+When it is set, the maximum connection concurrency on the server will be bound
+by this value, and the limit will increase with the number of clients waiting
+in queue, till the clients connected to haproxy reach the proxy's maxconn, in
+which case the connections per server will reach the server's maxconn. It means
+that during low-to-medium loads, the minconn will be applied, and during surges
+the maxconn will be applied. It ensures both optimal response times under
+normal loads, and availability under very high loads.
+
+Example :
+---------
+
+ listen appfarm 192.168.1.1:80
+ mode http
+ maxconn 10000
+ option httpclose
+ option abortonclose
+ option forwardfor
+ balance roundrobin
+ # The servers will get 4 concurrent connections under low
+ # loads, and 12 when there will be 10000 clients.
+ server railsA 192.168.1.11:80 minconn 4 maxconn 12 check
+ server railsB 192.168.1.12:80 minconn 4 maxconn 12 check
+ server railsC 192.168.1.13:80 minconn 4 maxconn 12 check
+ contimeout 60000
+
+
diff --git a/doc/coding-style.txt b/doc/coding-style.txt
new file mode 100644
index 0000000..02a55f5
--- /dev/null
+++ b/doc/coding-style.txt
@@ -0,0 +1,1566 @@
+2020/07/07 - HAProxy coding style - Willy Tarreau <w@1wt.eu>
+------------------------------------------------------------
+
+A number of contributors are often embarrassed with coding style issues, they
+don't always know if they're doing it right, especially since the coding style
+has elvoved along the years. What is explained here is not necessarily what is
+applied in the code, but new code should as much as possible conform to this
+style. Coding style fixes happen when code is replaced. It is useless to send
+patches to fix coding style only, they will be rejected, unless they belong to
+a patch series which needs these fixes prior to get code changes. Also, please
+avoid fixing coding style in the same patches as functional changes, they make
+code review harder.
+
+A good way to quickly validate your patch before submitting it is to pass it
+through the Linux kernel's checkpatch.pl utility which can be downloaded here :
+
+ http://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/plain/scripts/checkpatch.pl
+
+Running it with the following options relaxes its checks to accommodate to the
+extra degree of freedom that is tolerated in HAProxy's coding style compared to
+the stricter style used in the kernel :
+
+ checkpatch.pl -q --max-line-length=160 --no-tree --no-signoff \
+ --ignore=LEADING_SPACE,CODE_INDENT,DEEP_INDENTATION \
+ --ignore=ELSE_AFTER_BRACE < patch
+
+You can take its output as hints instead of strict rules, but in general its
+output will be accurate and it may even spot some real bugs.
+
+When modifying a file, you must accept the terms of the license of this file
+which is recalled at the top of the file, or is explained in the LICENSE file,
+or if not stated, defaults to LGPL version 2.1 or later for files in the
+'include' directory, and GPL version 2 or later for all other files.
+
+When adding a new file, you must add a copyright banner at the top of the
+file with your real name, e-mail address and a reminder of the license.
+Contributions under incompatible licenses or too restrictive licenses might
+get rejected. If in doubt, please apply the principle above for existing files.
+
+All code examples below will intentionally be prefixed with " | " to mark
+where the code aligns with the first column, and tabs in this document will be
+represented as a series of 8 spaces so that it displays the same everywhere.
+
+
+1) Indentation and alignment
+----------------------------
+
+1.1) Indentation
+----------------
+
+Indentation and alignment are two completely different things that people often
+get wrong. Indentation is used to mark a sub-level in the code. A sub-level
+means that a block is executed in the context of another block (eg: a function
+or a condition) :
+
+ | main(int argc, char **argv)
+ | {
+ | int i;
+ |
+ | if (argc < 2)
+ | exit(1);
+ | }
+
+In the example above, the code belongs to the main() function and the exit()
+call belongs to the if statement. Indentation is made with tabs (\t, ASCII 9),
+which allows any developer to configure their preferred editor to use their
+own tab size and to still get the text properly indented. Exactly one tab is
+used per sub-level. Tabs may only appear at the beginning of a line or after
+another tab. It is illegal to put a tab after some text, as it mangles displays
+in a different manner for different users (particularly when used to align
+comments or values after a #define). If you're tempted to put a tab after some
+text, then you're doing it wrong and you need alignment instead (see below).
+
+Note that there are places where the code was not properly indented in the
+past. In order to view it correctly, you may have to set your tab size to 8
+characters.
+
+
+1.2) Alignment
+--------------
+
+Alignment is used to continue a line in a way to makes things easier to group
+together. By definition, alignment is character-based, so it uses spaces. Tabs
+would not work because for one tab there would not be as many characters on all
+displays. For instance, the arguments in a function declaration may be broken
+into multiple lines using alignment spaces :
+
+ | int http_header_match2(const char *hdr, const char *end,
+ | const char *name, int len)
+ | {
+ | ...
+ | }
+
+In this example, the "const char *name" part is aligned with the first
+character of the group it belongs to (list of function arguments). Placing it
+here makes it obvious that it's one of the function's arguments. Multiple lines
+are easy to handle this way. This is very common with long conditions too :
+
+ | if ((len < eol - sol) &&
+ | (sol[len] == ':') &&
+ | (strncasecmp(sol, name, len) == 0)) {
+ | ctx->del = len;
+ | }
+
+If we take again the example above marking tabs with "[-Tabs-]" and spaces
+with "#", we get this :
+
+ | [-Tabs-]if ((len < eol - sol) &&
+ | [-Tabs-]####(sol[len] == ':') &&
+ | [-Tabs-]####(strncasecmp(sol, name, len) == 0)) {
+ | [-Tabs-][-Tabs-]ctx->del = len;
+ | [-Tabs-]}
+
+It is worth noting that some editors tend to confuse indentations and alignment.
+Emacs is notoriously known for this brokenness, and is responsible for almost
+all of the alignment mess. The reason is that Emacs only counts spaces, tries
+to fill as many as possible with tabs and completes with spaces. Once you know
+it, you just have to be careful, as alignment is not used much, so generally it
+is just a matter of replacing the last tab with 8 spaces when this happens.
+
+Indentation should be used everywhere there is a block or an opening brace. It
+is not possible to have two consecutive closing braces on the same column, it
+means that the innermost was not indented.
+
+Right :
+
+ | main(int argc, char **argv)
+ | {
+ | if (argc > 1) {
+ | printf("Hello\n");
+ | }
+ | exit(0);
+ | }
+
+Wrong :
+
+ | main(int argc, char **argv)
+ | {
+ | if (argc > 1) {
+ | printf("Hello\n");
+ | }
+ | exit(0);
+ | }
+
+A special case applies to switch/case statements. Due to my editor's settings,
+I've been used to align "case" with "switch" and to find it somewhat logical
+since each of the "case" statements opens a sublevel belonging to the "switch"
+statement. But indenting "case" after "switch" is accepted too. However in any
+case, whatever follows the "case" statement must be indented, whether or not it
+contains braces :
+
+ | switch (*arg) {
+ | case 'A': {
+ | int i;
+ | for (i = 0; i < 10; i++)
+ | printf("Please stop pressing 'A'!\n");
+ | break;
+ | }
+ | case 'B':
+ | printf("You pressed 'B'\n");
+ | break;
+ | case 'C':
+ | case 'D':
+ | printf("You pressed 'C' or 'D'\n");
+ | break;
+ | default:
+ | printf("I don't know what you pressed\n");
+ | }
+
+
+2) Braces
+---------
+
+Braces are used to delimit multiple-instruction blocks. In general it is
+preferred to avoid braces around single-instruction blocks as it reduces the
+number of lines :
+
+Right :
+
+ | if (argc >= 2)
+ | exit(0);
+
+Wrong :
+
+ | if (argc >= 2) {
+ | exit(0);
+ | }
+
+But it is not that strict, it really depends on the context. It happens from
+time to time that single-instruction blocks are enclosed within braces because
+it makes the code more symmetrical, or more readable. Example :
+
+ | if (argc < 2) {
+ | printf("Missing argument\n");
+ | exit(1);
+ | } else {
+ | exit(0);
+ | }
+
+Braces are always needed to declare a function. A function's opening brace must
+be placed at the beginning of the next line :
+
+Right :
+
+ | int main(int argc, char **argv)
+ | {
+ | exit(0);
+ | }
+
+Wrong :
+
+ | int main(int argc, char **argv) {
+ | exit(0);
+ | }
+
+Note that a large portion of the code still does not conforms to this rule, as
+it took years to get all authors to adapt to this more common standard which
+is now preferred, as it avoids visual confusion when function declarations are
+broken on multiple lines :
+
+Right :
+
+ | int foo(const char *hdr, const char *end,
+ | const char *name, const char *err,
+ | int len)
+ | {
+ | int i;
+
+Wrong :
+
+ | int foo(const char *hdr, const char *end,
+ | const char *name, const char *err,
+ | int len) {
+ | int i;
+
+Braces should always be used where there might be an ambiguity with the code
+later. The most common example is the stacked "if" statement where an "else"
+may be added later at the wrong place breaking the code, but it also happens
+with comments or long arguments in function calls. In general, if a block is
+more than one line long, it should use braces.
+
+Dangerous code waiting of a victim :
+
+ | if (argc < 2)
+ | /* ret must not be negative here */
+ | if (ret < 0)
+ | return -1;
+
+Wrong change :
+
+ | if (argc < 2)
+ | /* ret must not be negative here */
+ | if (ret < 0)
+ | return -1;
+ | else
+ | return 0;
+
+It will do this instead of what your eye seems to tell you :
+
+ | if (argc < 2)
+ | /* ret must not be negative here */
+ | if (ret < 0)
+ | return -1;
+ | else
+ | return 0;
+
+Right :
+
+ | if (argc < 2) {
+ | /* ret must not be negative here */
+ | if (ret < 0)
+ | return -1;
+ | }
+ | else
+ | return 0;
+
+Similarly dangerous example :
+
+ | if (ret < 0)
+ | /* ret must not be negative here */
+ | complain();
+ | init();
+
+Wrong change to silent the annoying message :
+
+ | if (ret < 0)
+ | /* ret must not be negative here */
+ | //complain();
+ | init();
+
+... which in fact means :
+
+ | if (ret < 0)
+ | init();
+
+
+3) Breaking lines
+-----------------
+
+There is no strict rule for line breaking. Some files try to stick to the 80
+column limit, but given that various people use various tab sizes, it does not
+make much sense. Also, code is sometimes easier to read with less lines, as it
+represents less surface on the screen (since each new line adds its tabs and
+spaces). The rule is to stick to the average line length of other lines. If you
+are working in a file which fits in 80 columns, try to keep this goal in mind.
+If you're in a function with 120-chars lines, there is no reason to add many
+short lines, so you can make longer lines.
+
+In general, opening a new block should lead to a new line. Similarly, multiple
+instructions should be avoided on the same line. But some constructs make it
+more readable when those are perfectly aligned :
+
+A copy-paste bug in the following construct will be easier to spot :
+
+ | if (omult % idiv == 0) { omult /= idiv; idiv = 1; }
+ | if (idiv % omult == 0) { idiv /= omult; omult = 1; }
+ | if (imult % odiv == 0) { imult /= odiv; odiv = 1; }
+ | if (odiv % imult == 0) { odiv /= imult; imult = 1; }
+
+than in this one :
+
+ | if (omult % idiv == 0) {
+ | omult /= idiv;
+ | idiv = 1;
+ | }
+ | if (idiv % omult == 0) {
+ | idiv /= omult;
+ | omult = 1;
+ | }
+ | if (imult % odiv == 0) {
+ | imult /= odiv;
+ | odiv = 1;
+ | }
+ | if (odiv % imult == 0) {
+ | odiv /= imult;
+ | imult = 1;
+ | }
+
+What is important is not to mix styles. For instance there is nothing wrong
+with having many one-line "case" statements as long as most of them are this
+short like below :
+
+ | switch (*arg) {
+ | case 'A': ret = 1; break;
+ | case 'B': ret = 2; break;
+ | case 'C': ret = 4; break;
+ | case 'D': ret = 8; break;
+ | default : ret = 0; break;
+ | }
+
+Otherwise, prefer to have the "case" statement on its own line as in the
+example in section 1.2 about alignment. In any case, avoid to stack multiple
+control statements on the same line, so that it will never be the needed to
+add two tab levels at once :
+
+Right :
+
+ | switch (*arg) {
+ | case 'A':
+ | if (ret < 0)
+ | ret = 1;
+ | break;
+ | default : ret = 0; break;
+ | }
+
+Wrong :
+
+ | switch (*arg) {
+ | case 'A': if (ret < 0)
+ | ret = 1;
+ | break;
+ | default : ret = 0; break;
+ | }
+
+Right :
+
+ | if (argc < 2)
+ | if (ret < 0)
+ | return -1;
+
+or Right :
+
+ | if (argc < 2)
+ | if (ret < 0) return -1;
+
+but Wrong :
+
+ | if (argc < 2) if (ret < 0) return -1;
+
+
+When complex conditions or expressions are broken into multiple lines, please
+do ensure that alignment is perfectly appropriate, and group all main operators
+on the same side (which you're free to choose as long as it does not change for
+every block. Putting binary operators on the right side is preferred as it does
+not mangle with alignment but various people have their preferences.
+
+Right :
+
+ | if ((txn->flags & TX_NOT_FIRST) &&
+ | ((req->flags & BF_FULL) ||
+ | req->r < req->lr ||
+ | req->r > req->data + req->size - global.tune.maxrewrite)) {
+ | return 0;
+ | }
+
+Right :
+
+ | if ((txn->flags & TX_NOT_FIRST)
+ | && ((req->flags & BF_FULL)
+ | || req->r < req->lr
+ | || req->r > req->data + req->size - global.tune.maxrewrite)) {
+ | return 0;
+ | }
+
+Wrong :
+
+ | if ((txn->flags & TX_NOT_FIRST) &&
+ | ((req->flags & BF_FULL) ||
+ | req->r < req->lr
+ | || req->r > req->data + req->size - global.tune.maxrewrite)) {
+ | return 0;
+ | }
+
+If it makes the result more readable, parenthesis may even be closed on their
+own line in order to align with the opening one. Note that should normally not
+be needed because such code would be too complex to be digged into.
+
+The "else" statement may either be merged with the closing "if" brace or lie on
+its own line. The later is preferred but it adds one extra line to each control
+block which is annoying in short ones. However, if the "else" is followed by an
+"if", then it should really be on its own line and the rest of the if/else
+blocks must follow the same style.
+
+Right :
+
+ | if (a < b) {
+ | return a;
+ | }
+ | else {
+ | return b;
+ | }
+
+Right :
+
+ | if (a < b) {
+ | return a;
+ | } else {
+ | return b;
+ | }
+
+Right :
+
+ | if (a < b) {
+ | return a;
+ | }
+ | else if (a != b) {
+ | return b;
+ | }
+ | else {
+ | return 0;
+ | }
+
+Wrong :
+
+ | if (a < b) {
+ | return a;
+ | } else if (a != b) {
+ | return b;
+ | } else {
+ | return 0;
+ | }
+
+Wrong :
+
+ | if (a < b) {
+ | return a;
+ | }
+ | else if (a != b) {
+ | return b;
+ | } else {
+ | return 0;
+ | }
+
+
+4) Spacing
+----------
+
+Correctly spacing code is very important. When you have to spot a bug at 3am,
+you need it to be clear. When you expect other people to review your code, you
+want it to be clear and don't want them to get nervous when trying to find what
+you did.
+
+Always place spaces around all binary or ternary operators, commas, as well as
+after semi-colons and opening braces if the line continues :
+
+Right :
+
+ | int ret = 0;
+ | /* if (x >> 4) { x >>= 4; ret += 4; } */
+ | ret += (x >> 4) ? (x >>= 4, 4) : 0;
+ | val = ret + ((0xFFFFAA50U >> (x << 1)) & 3) + 1;
+
+Wrong :
+
+ | int ret=0;
+ | /* if (x>>4) {x>>=4;ret+=4;} */
+ | ret+=(x>>4)?(x>>=4,4):0;
+ | val=ret+((0xFFFFAA50U>>(x<<1))&3)+1;
+
+Never place spaces after unary operators (&, *, -, !, ~, ++, --) nor cast, as
+they might be confused with they binary counterpart, nor before commas or
+semicolons :
+
+Right :
+
+ | bit = !!(~len++ ^ -(unsigned char)*x);
+
+Wrong :
+
+ | bit = ! ! (~len++ ^ - (unsigned char) * x) ;
+
+Note that "sizeof" is a unary operator which is sometimes considered as a
+language keyword, but in no case it is a function. It does not require
+parenthesis so it is sometimes followed by spaces and sometimes not when
+there are no parenthesis. Most people do not really care as long as what
+is written is unambiguous.
+
+Braces opening a block must be preceded by one space unless the brace is
+placed on the first column :
+
+Right :
+
+ | if (argc < 2) {
+ | }
+
+Wrong :
+
+ | if (argc < 2){
+ | }
+
+Do not add unneeded spaces inside parenthesis, they just make the code less
+readable.
+
+Right :
+
+ | if (x < 4 && (!y || !z))
+ | break;
+
+Wrong :
+
+ | if ( x < 4 && ( !y || !z ) )
+ | break;
+
+Language keywords must all be followed by a space. This is true for control
+statements (do, for, while, if, else, return, switch, case), and for types
+(int, char, unsigned). As an exception, the last type in a cast does not take
+a space before the closing parenthesis). The "default" statement in a "switch"
+construct is generally just followed by the colon. However the colon after a
+"case" or "default" statement must be followed by a space.
+
+Right :
+
+ | if (nbargs < 2) {
+ | printf("Missing arg at %c\n", *(char *)ptr);
+ | for (i = 0; i < 10; i++) beep();
+ | return 0;
+ | }
+ | switch (*arg) {
+
+Wrong :
+
+ | if(nbargs < 2){
+ | printf("Missing arg at %c\n", *(char*)ptr);
+ | for(i = 0; i < 10; i++)beep();
+ | return 0;
+ | }
+ | switch(*arg) {
+
+Function calls are different, the opening parenthesis is always coupled to the
+function name without any space. But spaces are still needed after commas :
+
+Right :
+
+ | if (!init(argc, argv))
+ | exit(1);
+
+Wrong :
+
+ | if (!init (argc,argv))
+ | exit(1);
+
+
+5) Excess or lack of parenthesis
+--------------------------------
+
+Sometimes there are too many parenthesis in some formulas, sometimes there are
+too few. There are a few rules of thumb for this. The first one is to respect
+the compiler's advice. If it emits a warning and asks for more parenthesis to
+avoid confusion, follow the advice at least to shut the warning. For instance,
+the code below is quite ambiguous due to its alignment :
+
+ | if (var1 < 2 || var2 < 2 &&
+ | var3 != var4) {
+ | /* fail */
+ | return -3;
+ | }
+
+Note that this code does :
+
+ | if (var1 < 2 || (var2 < 2 && var3 != var4)) {
+ | /* fail */
+ | return -3;
+ | }
+
+But maybe the author meant :
+
+ | if ((var1 < 2 || var2 < 2) && var3 != var4) {
+ | /* fail */
+ | return -3;
+ | }
+
+A second rule to put parenthesis is that people don't always know operators
+precedence too well. Most often they have no issue with operators of the same
+category (eg: booleans, integers, bit manipulation, assignment) but once these
+operators are mixed, it causes them all sort of issues. In this case, it is
+wise to use parenthesis to avoid errors. One common error concerns the bit
+shift operators because they're used to replace multiplies and divides but
+don't have the same precedence :
+
+The expression :
+
+ | x = y * 16 + 5;
+
+becomes :
+
+ | x = y << 4 + 5;
+
+which is wrong because it is equivalent to :
+
+ | x = y << (4 + 5);
+
+while the following was desired instead :
+
+ | x = (y << 4) + 5;
+
+It is generally fine to write boolean expressions based on comparisons without
+any parenthesis. But on top of that, integer expressions and assignments should
+then be protected. For instance, there is an error in the expression below
+which should be safely rewritten :
+
+Wrong :
+
+ | if (var1 > 2 && var1 < 10 ||
+ | var1 > 2 + 256 && var2 < 10 + 256 ||
+ | var1 > 2 + 1 << 16 && var2 < 10 + 2 << 16)
+ | return 1;
+
+Right (may remove a few parenthesis depending on taste) :
+
+ | if ((var1 > 2 && var1 < 10) ||
+ | (var1 > (2 + 256) && var2 < (10 + 256)) ||
+ | (var1 > (2 + (1 << 16)) && var2 < (10 + (1 << 16))))
+ | return 1;
+
+The "return" statement is not a function, so it takes no argument. It is a
+control statement which is followed by the expression to be returned. It does
+not need to be followed by parenthesis :
+
+Wrong :
+
+ | int ret0()
+ | {
+ | return(0);
+ | }
+
+Right :
+
+ | int ret0()
+ | {
+ | return 0;
+ | }
+
+Parenthesisis are also found in type casts. Type casting should be avoided as
+much as possible, especially when it concerns pointer types. Casting a pointer
+disables the compiler's type checking and is the best way to get caught doing
+wrong things with data not the size you expect. If you need to manipulate
+multiple data types, you can use a union instead. If the union is really not
+convenient and casts are easier, then try to isolate them as much as possible,
+for instance when initializing function arguments or in another function. Not
+proceeding this way causes huge risks of not using the proper pointer without
+any notification, which is especially true during copy-pastes.
+
+Wrong :
+
+ | void *check_private_data(void *arg1, void *arg2)
+ | {
+ | char *area;
+ |
+ | if (*(int *)arg1 > 1000)
+ | return NULL;
+ | if (memcmp(*(const char *)arg2, "send(", 5) != 0))
+ | return NULL;
+ | area = malloc(*(int *)arg1);
+ | if (!area)
+ | return NULL;
+ | memcpy(area, *(const char *)arg2 + 5, *(int *)arg1);
+ | return area;
+ | }
+
+Right :
+
+ | void *check_private_data(void *arg1, void *arg2)
+ | {
+ | char *area;
+ | int len = *(int *)arg1;
+ | const char *msg = arg2;
+ |
+ | if (len > 1000)
+ | return NULL;
+ | if (memcmp(msg, "send(", 5) != 0)
+ | return NULL;
+ | area = malloc(len);
+ | if (!area)
+ | return NULL;
+ | memcpy(area, msg + 5, len);
+ | return area;
+ | }
+
+
+6) Ambiguous comparisons with zero or NULL
+------------------------------------------
+
+In C, '0' has no type, or it has the type of the variable it is assigned to.
+Comparing a variable or a return value with zero means comparing with the
+representation of zero for this variable's type. For a boolean, zero is false.
+For a pointer, zero is NULL. Very often, to make things shorter, it is fine to
+use the '!' unary operator to compare with zero, as it is shorter and easier to
+remind or understand than a plain '0'. Since the '!' operator is read "not", it
+helps read code faster when what follows it makes sense as a boolean, and it is
+often much more appropriate than a comparison with zero which makes an equal
+sign appear at an undesirable place. For instance :
+
+ | if (!isdigit(*c) && !isspace(*c))
+ | break;
+
+is easier to understand than :
+
+ | if (isdigit(*c) == 0 && isspace(*c) == 0)
+ | break;
+
+For a char this "not" operator can be reminded as "no remaining char", and the
+absence of comparison to zero implies existence of the tested entity, hence the
+simple strcpy() implementation below which automatically stops once the last
+zero is copied :
+
+ | void my_strcpy(char *d, const char *s)
+ | {
+ | while ((*d++ = *s++));
+ | }
+
+Note the double parenthesis in order to avoid the compiler telling us it looks
+like an equality test.
+
+For a string or more generally any pointer, this test may be understood as an
+existence test or a validity test, as the only pointer which will fail to
+validate equality is the NULL pointer :
+
+ | area = malloc(1000);
+ | if (!area)
+ | return -1;
+
+However sometimes it can fool the reader. For instance, strcmp() precisely is
+one of such functions whose return value can make one think the opposite due to
+its name which may be understood as "if strings compare...". Thus it is strongly
+recommended to perform an explicit comparison with zero in such a case, and it
+makes sense considering that the comparison's operator is the same that is
+wanted to compare the strings (note that current config parser lacks a lot in
+this regards) :
+
+ strcmp(a, b) == 0 <=> a == b
+ strcmp(a, b) != 0 <=> a != b
+ strcmp(a, b) < 0 <=> a < b
+ strcmp(a, b) > 0 <=> a > b
+
+Avoid this :
+
+ | if (strcmp(arg, "test"))
+ | printf("this is not a test\n");
+ |
+ | if (!strcmp(arg, "test"))
+ | printf("this is a test\n");
+
+Prefer this :
+
+ | if (strcmp(arg, "test") != 0)
+ | printf("this is not a test\n");
+ |
+ | if (strcmp(arg, "test") == 0)
+ | printf("this is a test\n");
+
+
+7) System call returns
+----------------------
+
+This is not directly a matter of coding style but more of bad habits. It is
+important to check for the correct value upon return of syscalls. The proper
+return code indicating an error is described in its man page. There is no
+reason to consider wider ranges than what is indicated. For instance, it is
+common to see such a thing :
+
+ | if ((fd = open(file, O_RDONLY)) < 0)
+ | return -1;
+
+This is wrong. The man page says that -1 is returned if an error occurred. It
+does not suggest that any other negative value will be an error. It is possible
+that a few such issues have been left in existing code. They are bugs for which
+fixes are accepted, even though they're currently harmless since open() is not
+known for returning negative values at the moment.
+
+
+8) Declaring new types, names and values
+----------------------------------------
+
+Please refrain from using "typedef" to declare new types, they only obfuscate
+the code. The reader never knows whether he's manipulating a scalar type or a
+struct. For instance it is not obvious why the following code fails to build :
+
+ | int delay_expired(timer_t exp, timer_us_t now)
+ | {
+ | return now >= exp;
+ | }
+
+With the types declared in another file this way :
+
+ | typedef unsigned int timer_t;
+ | typedef struct timeval timer_us_t;
+
+This cannot work because we're comparing a scalar with a struct, which does
+not make sense. Without a typedef, the function would have been written this
+way without any ambiguity and would not have failed :
+
+ | int delay_expired(unsigned int exp, struct timeval *now)
+ | {
+ | return now >= exp->tv_sec;
+ | }
+
+Declaring special values may be done using enums. Enums are a way to define
+structured integer values which are related to each other. They are perfectly
+suited for state machines. While the first element is always assigned the zero
+value, not everybody knows that, especially people working with multiple
+languages all the day. For this reason it is recommended to explicitly force
+the first value even if it's zero. The last element should be followed by a
+comma if it is planned that new elements might later be added, this will make
+later patches shorter. Conversely, if the last element is placed in order to
+get the number of possible values, it must not be followed by a comma and must
+be preceded by a comment :
+
+ | enum {
+ | first = 0,
+ | second,
+ | third,
+ | fourth,
+ | };
+
+
+ | enum {
+ | first = 0,
+ | second,
+ | third,
+ | fourth,
+ | /* nbvalues must always be placed last */
+ | nbvalues
+ | };
+
+Structure names should be short enough not to mangle function declarations,
+and explicit enough to avoid confusion (which is the most important thing).
+
+Wrong :
+
+ | struct request_args { /* arguments on the query string */
+ | char *name;
+ | char *value;
+ | struct misc_args *next;
+ | };
+
+Right :
+
+ | struct qs_args { /* arguments on the query string */
+ | char *name;
+ | char *value;
+ | struct qs_args *next;
+ | }
+
+
+When declaring new functions or structures, please do not use CamelCase, which
+is a style where upper and lower case are mixed in a single word. It causes a
+lot of confusion when words are composed from acronyms, because it's hard to
+stick to a rule. For instance, a function designed to generate an ISN (initial
+sequence number) for a TCP/IP connection could be called :
+
+ - generateTcpipIsn()
+ - generateTcpIpIsn()
+ - generateTcpIpISN()
+ - generateTCPIPISN()
+ etc...
+
+None is right, none is wrong, these are just preferences which might change
+along the code. Instead, please use an underscore to separate words. Lowercase
+is preferred for the words, but if acronyms are upcased it's not dramatic. The
+real advantage of this method is that it creates unambiguous levels even for
+short names.
+
+Valid examples :
+
+ - generate_tcpip_isn()
+ - generate_tcp_ip_isn()
+ - generate_TCPIP_ISN()
+ - generate_TCP_IP_ISN()
+
+Another example is easy to understand when 3 arguments are involved in naming
+the function :
+
+Wrong (naming conflict) :
+
+ | /* returns A + B * C */
+ | int mulABC(int a, int b, int c)
+ | {
+ | return a + b * c;
+ | }
+ |
+ | /* returns (A + B) * C */
+ | int mulABC(int a, int b, int c)
+ | {
+ | return (a + b) * c;
+ | }
+
+Right (unambiguous naming) :
+
+ | /* returns A + B * C */
+ | int mul_a_bc(int a, int b, int c)
+ | {
+ | return a + b * c;
+ | }
+ |
+ | /* returns (A + B) * C */
+ | int mul_ab_c(int a, int b, int c)
+ | {
+ | return (a + b) * c;
+ | }
+
+Whenever you manipulate pointers, try to declare them as "const", as it will
+save you from many accidental misuses and will only cause warnings to be
+emitted when there is a real risk. In the examples below, it is possible to
+call my_strcpy() with a const string only in the first declaration. Note that
+people who ignore "const" are often the ones who cast a lot and who complain
+from segfaults when using strtok() !
+
+Right :
+
+ | void my_strcpy(char *d, const char *s)
+ | {
+ | while ((*d++ = *s++));
+ | }
+ |
+ | void say_hello(char *dest)
+ | {
+ | my_strcpy(dest, "hello\n");
+ | }
+
+Wrong :
+
+ | void my_strcpy(char *d, char *s)
+ | {
+ | while ((*d++ = *s++));
+ | }
+ |
+ | void say_hello(char *dest)
+ | {
+ | my_strcpy(dest, "hello\n");
+ | }
+
+
+9) Getting macros right
+-----------------------
+
+It is very common for macros to do the wrong thing when used in a way their
+author did not have in mind. For this reason, macros must always be named with
+uppercase letters only. This is the only way to catch the developer's eye when
+using them, so that they double-check whether they are taking a risk or not. First,
+macros must never ever be terminated by a semi-colon, or they will close the
+wrong block once in a while. For instance, the following will cause a build
+error before the "else" due to the double semi-colon :
+
+Wrong :
+
+ | #define WARN printf("warning\n");
+ | ...
+ | if (a < 0)
+ | WARN;
+ | else
+ | a--;
+
+Right :
+
+ | #define WARN printf("warning\n")
+
+If multiple instructions are needed, then use a do { } while (0) block, which
+is the only construct which respects *exactly* the semantics of a single
+instruction :
+
+ | #define WARN do { printf("warning\n"); log("warning\n"); } while (0)
+ | ...
+ |
+ | if (a < 0)
+ | WARN;
+ | else
+ | a--;
+
+Second, do not put unprotected control statements in macros, they will
+definitely cause bugs :
+
+Wrong :
+
+ | #define WARN if (verbose) printf("warning\n")
+ | ...
+ | if (a < 0)
+ | WARN;
+ | else
+ | a--;
+
+Which is equivalent to the undesired form below :
+
+ | if (a < 0)
+ | if (verbose)
+ | printf("warning\n");
+ | else
+ | a--;
+
+Right way to do it :
+
+ | #define WARN do { if (verbose) printf("warning\n"); } while (0)
+ | ...
+ | if (a < 0)
+ | WARN;
+ | else
+ | a--;
+
+Which is equivalent to :
+
+ | if (a < 0)
+ | do { if (verbose) printf("warning\n"); } while (0);
+ | else
+ | a--;
+
+Macro parameters must always be surrounded by parenthesis, and must never be
+duplicated in the same macro unless explicitly stated. Also, macros must not be
+defined with operators without surrounding parenthesis. The MIN/MAX macros are
+a pretty common example of multiple misuses, but this happens as early as when
+using bit masks. Most often, in case of any doubt, try to use inline functions
+instead.
+
+Wrong :
+
+ | #define MIN(a, b) a < b ? a : b
+ |
+ | /* returns 2 * min(a,b) + 1 */
+ | int double_min_p1(int a, int b)
+ | {
+ | return 2 * MIN(a, b) + 1;
+ | }
+
+What this will do :
+
+ | int double_min_p1(int a, int b)
+ | {
+ | return 2 * a < b ? a : b + 1;
+ | }
+
+Which is equivalent to :
+
+ | int double_min_p1(int a, int b)
+ | {
+ | return (2 * a) < b ? a : (b + 1);
+ | }
+
+The first thing to fix is to surround the macro definition with parenthesis to
+avoid this mistake :
+
+ | #define MIN(a, b) (a < b ? a : b)
+
+But this is still not enough, as can be seen in this example :
+
+ | /* compares either a or b with c */
+ | int min_ab_c(int a, int b, int c)
+ | {
+ | return MIN(a ? a : b, c);
+ | }
+
+Which is equivalent to :
+
+ | int min_ab_c(int a, int b, int c)
+ | {
+ | return (a ? a : b < c ? a ? a : b : c);
+ | }
+
+Which in turn means a totally different thing due to precedence :
+
+ | int min_ab_c(int a, int b, int c)
+ | {
+ | return (a ? a : ((b < c) ? (a ? a : b) : c));
+ | }
+
+This can be fixed by surrounding *each* argument in the macro with parenthesis:
+
+ | #define MIN(a, b) ((a) < (b) ? (a) : (b))
+
+But this is still not enough, as can be seen in this example :
+
+ | int min_ap1_b(int a, int b)
+ | {
+ | return MIN(++a, b);
+ | }
+
+Which is equivalent to :
+
+ | int min_ap1_b(int a, int b)
+ | {
+ | return ((++a) < (b) ? (++a) : (b));
+ | }
+
+Again, this is wrong because "a" is incremented twice if below b. The only way
+to fix this is to use a compound statement and to assign each argument exactly
+once to a local variable of the same type :
+
+ | #define MIN(a, b) ({ typeof(a) __a = (a); typeof(b) __b = (b); \
+ | ((__a) < (__b) ? (__a) : (__b)); \
+ | })
+
+At this point, using static inline functions is much cleaner if a single type
+is to be used :
+
+ | static inline int min(int a, int b)
+ | {
+ | return a < b ? a : b;
+ | }
+
+
+10) Includes
+------------
+
+Includes are as much as possible listed in alphabetically ordered groups :
+ - the includes more or less system-specific (sys/*, netinet/*, ...)
+ - the libc-standard includes (those without any path component)
+ - includes from the local "import" subdirectory
+ - includes from the local "haproxy" subdirectory
+
+Each section is just visually delimited from the other ones using an empty
+line. The two first ones above may be merged into a single section depending on
+developer's preference. Please do not copy-paste include statements from other
+files. Having too many includes significantly increases build time and makes it
+hard to find which ones are needed later. Just include what you need and if
+possible in alphabetical order so that when something is missing, it becomes
+obvious where to look for it and where to add it.
+
+All files should include <haproxy/api.h> because this is where build options
+are prepared.
+
+HAProxy header files are split in two, those exporting the types only (named
+with a trailing "-t") and those exporting variables, functions and inline
+functions. Types, structures, enums and #defines must go into the types files
+which are the only ones that may be included by othertype files. Function
+prototypes and inlined functions must go into the main files. This split is
+because of inlined functions which cross-reference types from other files,
+which cause a chicken-and-egg problem if the functions and types are declared
+at the same place.
+
+Include files must be protected against multiple inclusion using the common
+#ifndef/#define/#endif trick with a tag derived from the include file and its
+location.
+
+
+11) Comments
+------------
+
+Comments are preferably of the standard 'C' form using /* */. The C++ form "//"
+are tolerated for very short comments (eg: a word or two) but should be avoided
+as much as possible. Multi-line comments are made with each intermediate line
+starting with a star aligned with the first one, as in this example :
+
+ | /*
+ | * This is a multi-line
+ | * comment.
+ | */
+
+If multiple code lines need a short comment, try to align them so that you can
+have multi-line sentences. This is rarely needed, only for really complex
+constructs.
+
+Do not tell what you're doing in comments, but explain why you're doing it if
+it seems not to be obvious. Also *do* indicate at the top of function what they
+accept and what they don't accept. For instance, strcpy() only accepts output
+buffers at least as large as the input buffer, and does not support any NULL
+pointer. There is nothing wrong with that if the caller knows it.
+
+Wrong use of comments :
+
+ | int flsnz8(unsigned int x)
+ | {
+ | int ret = 0; /* initialize ret */
+ | if (x >> 4) { x >>= 4; ret += 4; } /* add 4 to ret if needed */
+ | return ret + ((0xFFFFAA50U >> (x << 1)) & 3) + 1; /* add ??? */
+ | }
+ | ...
+ | bit = ~len + (skip << 3) + 9; /* update bit */
+
+Right use of comments :
+
+ | /* This function returns the position of the highest bit set in the lowest
+ | * byte of <x>, between 0 and 7. It only works if <x> is non-null. It uses
+ | * a 32-bit value as a lookup table to return one of 4 values for the
+ | * highest 16 possible 4-bit values.
+ | */
+ | int flsnz8(unsigned int x)
+ | {
+ | int ret = 0;
+ | if (x >> 4) { x >>= 4; ret += 4; }
+ | return ret + ((0xFFFFAA50U >> (x << 1)) & 3) + 1;
+ | }
+ | ...
+ | bit = ~len + (skip << 3) + 9; /* (skip << 3) + (8 - len), saves 1 cycle */
+
+
+12) Use of assembly
+-------------------
+
+There are many projects where use of assembly code is not welcome. There is no
+problem with use of assembly in haproxy, provided that :
+
+ a) an alternate C-form is provided for architectures not covered
+ b) the code is small enough and well commented enough to be maintained
+
+It is important to take care of various incompatibilities between compiler
+versions, for instance regarding output and cloberred registers. There are
+a number of documentations on the subject on the net. Anyway if you are
+fiddling with assembly, you probably know that already.
+
+Example :
+ | /* gcc does not know when it can safely divide 64 bits by 32 bits. Use this
+ | * function when you know for sure that the result fits in 32 bits, because
+ | * it is optimal on x86 and on 64bit processors.
+ | */
+ | static inline unsigned int div64_32(unsigned long long o1, unsigned int o2)
+ | {
+ | unsigned int result;
+ | #ifdef __i386__
+ | asm("divl %2"
+ | : "=a" (result)
+ | : "A"(o1), "rm"(o2));
+ | #else
+ | result = o1 / o2;
+ | #endif
+ | return result;
+ | }
+
+
+13) Pointers
+------------
+
+A lot could be said about pointers, there's enough to fill entire books. Misuse
+of pointers is one of the primary reasons for bugs in haproxy, and this rate
+has significantly increased with the use of threads. Moreover, bogus pointers
+cause the hardest to analyse bugs, because usually they result in modifications
+to reassigned areas or accesses to unmapped areas, and in each case, bugs that
+strike very far away from where they were located. Some bugs have already taken
+up to 3 weeks of full time analysis, which has a severe impact on the project's
+ability to make forward progress on important features. For this reason, code
+that doesn't look robust enough or that doesn't follow some of the rules below
+will be rejected, and may even be reverted after being merged if the trouble is
+detected late!
+
+
+13.1) No test before freeing
+----------------------------
+
+All platforms where haproxy is supported have a well-defined and documented
+behavior for free(NULL), which is to do nothing at all. In other words, free()
+does test for the pointer's nullity. As such, there is no point in testing
+if a pointer is NULL or not before calling free(). And further, you must not
+do it, because it adds some confusion to the reader during debugging sessions,
+making one think that the code's authors weren't very sure about what they
+were doing. This will not cause a bug but will result in your code to get
+rejected.
+
+Wrong call to free :
+
+ | static inline int blah_free(struct blah *blah)
+ | {
+ | if (blah->str1)
+ | free(blah->str1);
+ | if (blah->str2)
+ | free(blah->str2);
+ | free(blah);
+ | }
+
+Correct call to free :
+
+ | static inline int blah_free(struct blah *blah)
+ | {
+ | free(blah->str1);
+ | free(blah->str2);
+ | free(blah);
+ | }
+
+
+13.2) No dangling pointers
+--------------------------
+
+Pointers are very commonly used as booleans: if they're not NULL, then the
+area they point to is valid and may be used. This is convenient for many things
+and is even emphasized with threads where they can atomically be swapped with
+another value (even NULL), and as such provide guaranteed atomic resource
+allocation and sharing.
+
+The problem with this is when someone forgets to delete a pointer when an area
+is no longer valid, because this may result in the pointer being accessed later
+and pointing to a wrong location, one that was reallocated for something else
+and causing all sort of nastiness like crashes or memory corruption. Moreover,
+thanks to the memory pools, it is extremely likely that a just released pointer
+will be reassigned to a similar object with comparable values (flags etc) at
+the same positions, making tests apparently succeed for a while. Some such bugs
+have gone undetected for several years.
+
+The rule is pretty simple:
+
+ +-----------------------------------------------------------------+
+ | NO REACHABLE POINTER MAY EVER POINT TO AN UNREACHABLE LOCATION. |
+ +-----------------------------------------------------------------+
+
+By "reachable pointer", here we mean a pointer that is accessible from a
+reachable structure or a global variable. This means that any pointer found
+anywhere in any structure in the code may always be dereferenced. This can
+seem obvious but this is not always enforced.
+
+This means that when freeing an area, the pointer that was used to find that
+area must be overwritten with NULL, and all other such pointers must as well
+if any. It is one case where one can find more convenient to write the NULL
+on the same line as the call to free() to make things easier to check. Be
+careful about any potential "if" when doing this.
+
+Wrong use of free :
+
+ | static inline int blah_recycle(struct blah *blah)
+ | {
+ | free(blah->str1);
+ | free(blah->str2);
+ | }
+
+Correct use of free :
+
+ | static inline int blah_recycle(struct blah *blah)
+ | {
+ | free(blah->str1); blah->str1 = NULL;
+ | free(blah->str2); blah->str2 = NULL;
+ | }
+
+Sometimes the code doesn't permit this to be done. It is not a matter of code
+but a matter of architecture. Example:
+
+Initialization:
+
+ | static struct foo *foo_init()
+ | {
+ | struct foo *foo;
+ | struct bar *bar;
+ |
+ | foo = pool_alloc(foo_head);
+ | bar = pool_alloc(bar_head);
+ | if (!foo || !bar)
+ | goto fail;
+ | foo->bar = bar;
+ | ...
+ | }
+
+Scheduled task 1:
+
+ | static inline int foo_timeout(struct foo *foo)
+ | {
+ | free(foo->bar);
+ | free(foo);
+ | }
+
+Scheduled task 2:
+
+ | static inline int bar_timeout(struct bar *bar)
+ | {
+ | free(bar);
+ | }
+
+Here it's obvious that if "bar" times out, it will be freed but its pointer in
+"foo" will remain here, and if foo times out just after, it will lead to a
+double free. Or worse, if another instance allocates a pointer and receives bar
+again, when foo times out, it will release the old bar pointer which now points
+to a new object, and the code using that new object will crash much later, or
+even worse, will share the same area as yet another instance having inherited
+that pointer again.
+
+Here this simply means that the data model is wrong. If bar may be freed alone,
+it MUST have a pointer to foo so that bar->foo->bar is set to NULL to let foo
+finish its life peacefully. This also means that the code dealing with foo must
+be written in a way to support bar's leaving.
+
+
+13.3) Don't abuse pointers as booleans
+--------------------------------------
+
+Given the common use of a pointer to know if the area it points to is valid,
+there is a big incentive in using such pointers as booleans to describe
+something a bit higher level, like "is the user authenticated". This must not
+be done. The reason stems from the points above. Initially this perfectly
+matches and the code is simple. Then later some extra options need to be added,
+and more pointers are needed, all allocated together. At this point they all
+start to become their own booleans, supposedly always equivalent, but if that
+were true, they would be a single area with a single pointer. And things start
+to fall apart with some code areas relying on one pointer for the condition and
+other ones relying on other pointers. Pointers may be substituted with "flags"
+or "present in list" etc here. And from this point, things quickly degrade with
+pointers needing to remain set even if pointing to wrong areas, just for the
+sake of not being NULL and not breaking some assumptions. At this point the
+bugs are already there and the code is not trustable anymore.
+
+The only way to avoid this is to strictly respect this rule: pointers do not
+represent a functionality but a storage area. Of course it is very frequent to
+consider that if an optional string is not set, a feature is not enabled. This
+can be fine to some extents. But as soon as any slightest condition is added
+anywhere into the mux, the code relying on the pointer must be replaced with
+something else so that the pointer may live its own life and be released (and
+reset) earlier if needed.
+
+
+13.4) Mixing const and non-const
+--------------------------------
+
+Something often encountered, especially when assembling error messages, is
+functions that collect strings, assemble them into larger messages and free
+everything. The problem here is that if strings are defined as variables, there
+will rightfully be build warnings when reporting string constants such as bare
+keywords or messages, and if strings are defined as constants, it is not
+possible to free them. The temptation is sometimes huge to force some free()
+calls on casted strings. Do not do that! It will inevitably lead to someone
+getting caught passing a constant string that will make the process crash (if
+lucky). Document the expectations, indicate that all arguments must be freeable
+and that the caller must be capable of strdup(), and make your function support
+NULLs and document it (so that callers can deal with a failing strdup() on
+allocation error).
+
+One valid alternative is to use a secondary channel to indicate whether the
+message may be freed or not. A flag in a complex structure can be used for this
+purpose, for example. If you are certain that your strings are aligned to a
+certain number of bytes, it can be possible to instrument the code to use the
+lowest bit to indicate the need to free (e.g. by always adding one to every
+const string). But such a solution will require good enough instrumentation so
+that it doesn't constitute a new set of traps.
+
+
+13.5) No pointer casts
+----------------------
+
+Except in rare occasions caused by legacy APIs (e.g. sockaddr) or special cases
+which explicitly require a form of aliasing, there is no valid reason for
+casting pointers, and usually this is used to hide other problems that will
+strike later. The only suitable type of cast is the cast from the generic void*
+used to store a context for example. But in C, there is no need to cast to nor
+from void*, so this is not required. However those coming from C++ tend to be
+used to this practice, and others argue that it makes the intent more visible.
+
+As a corollary, do not abuse void*. Placing void* everywhere to avoid casting
+is a bad practice as well. The use of void* is only for generic functions or
+structures which do not have a limited set of types supported. When only a few
+types are supported, generally their type can be passed using a side channel,
+and the void* can be turned into a union that makes the code more readable and
+more verifiable.
+
+An alternative in haproxy is to use a pointer to an obj_type enum. Usually it
+is placed at the beginning of a structure. It works like a void* except that
+the type is read directly from the object. This is convenient when a small set
+of remote objects may be attached to another one because a single of them will
+match a non-null pointer (e.g. a connection or an applet).
+
+Example:
+
+ | static inline int blah_free(struct blah *blah)
+ | {
+ | /* only one of them (at most) will not be null */
+ | pool_free(pool_head_connection, objt_conn(blah->target));
+ | pool_free(pool_head_appctx, objt_appctx(blah->target));
+ | pool_free(pool_head_stream, objt_stream(blah->target));
+ | blah->target = NULL;
+ | }
+
+
+13.6) Extreme caution when using non-canonical pointers
+-------------------------------------------------------
+
+It can be particularly convenient to embed some logic in the unused bits or
+code points of a pointer. Indeed, when it is known by design that a given
+pointer will always follow a certain alignment, a few lower bits will always
+remain zero, and as such may be used as optional flags. For example, the ebtree
+code uses the lowest bit to differentiate left/right attachments to the parent
+and node/leaf in branches. It is also known that values very close to NULL will
+never represent a valid pointer, and the thread-safe MT_LIST code uses this to
+lock visited pointers.
+
+There are a few rules to respect in order to do this:
+ - the deviations from the canonical pointers must be exhaustively documented
+ where the pointer type is defined, and the whole control logic with its
+ implications and possible and impossible cases must be enumerated as well ;
+
+ - make sure that the operations will work on every supported platform, which
+ includes 32-bit platforms where structures may be aligned on as little as
+ 32-bit. 32-bit alignment leaves only two LSB available. When doing so, make
+ sure the target structures are not labelled with the "packed" attribute, or
+ that they're always perfectly aligned. All platforms where haproxy runs
+ have their NULL pointer mapped at address zero, and use page sizes at least
+ 4096 bytes large, leaving all values form 1 to 4095 unused. Anything
+ outside of this is unsafe. In particular, never use negative numbers to
+ represent a supposedly invalid address. On 32-bits platforms it will often
+ correspond to a system address or a special page. Always try a variety of
+ platforms when doing such a thing.
+
+ - the code must not use such pointers as booleans anymore even if it is known
+ that "it works" because that keeps a doubt open for the reviewer. Only the
+ canonical pointer may be tested. There can be a rare exception which is if
+ this is on a critical path where severe performance degradation may result
+ from this. In this case, *each* of the checks must be duly documented and
+ the equivalent BUG_ON() instances must be placed to prove the claim.
+
+ - some inline functions (or macros) must be used to turn the pointers to/from
+ their canonical form so that the regular code doesn't have to see the
+ operations, and so that the representation may be easily adjusted in the
+ future. A few comments indicating to a human how to turn a pointer back and
+ forth from inside a debugger will be appreciated, as macros often end up
+ not being trivially readable nor directly usable.
+
+ - do not use int types to cast the pointers, this will only work on 32-bit
+ platforms. While "long" is usually fine, it is not recommended anymore due
+ to the Windows platform being LLP64 and having it set to 32 bits. And
+ "long long" isn't good either for always being 64 bits. More suitable types
+ are ptrdiff_t or size_t. Note that while those were not available everywhere
+ in the early days of hparoxy, size_t is now heavily used and known to work
+ everywhere. And do not perform the operations on the pointers, only on the
+ integer types (and cast back again). Some compilers such as gcc are
+ extremely picky about this and will often emit wrong code when they see
+ equality conditions they believe is impossible and decide to optimize them
+ away.
+
+
+13.7) Pointers in unions
+------------------------
+
+Before placing multiple aliasing pointers inside a same union, there MUST be a
+SINGLE well-defined way to figure them out from each other. It may be thanks to
+a side-channel information (as done in the samples with a defined type), it may
+be based on in-area information (as done using obj_types), or any other trusted
+solution. In any case, if pointers are mixed with any other type (integer or
+float) in a union, there must be a very simple way to distinguish them, and not
+a platform-dependent nor compiler-dependent one.
diff --git a/doc/configuration.txt b/doc/configuration.txt
new file mode 100644
index 0000000..a1f15fc
--- /dev/null
+++ b/doc/configuration.txt
@@ -0,0 +1,26732 @@
+ ----------------------
+ HAProxy
+ Configuration Manual
+ ----------------------
+ version 2.9
+ 2024/02/15
+
+
+This document covers the configuration language as implemented in the version
+specified above. It does not provide any hints, examples, or advice. For such
+documentation, please refer to the Reference Manual or the Architecture Manual.
+The summary below is meant to help you find sections by name and navigate
+through the document.
+
+Note to documentation contributors :
+ This document is formatted with 80 columns per line, with even number of
+ spaces for indentation and without tabs. Please follow these rules strictly
+ so that it remains easily printable everywhere. If a line needs to be
+ printed verbatim and does not fit, please end each line with a backslash
+ ('\') and continue on next line, indented by two characters. It is also
+ sometimes useful to prefix all output lines (logs, console outputs) with 3
+ closing angle brackets ('>>>') in order to emphasize the difference between
+ inputs and outputs when they may be ambiguous. If you add sections,
+ please update the summary below for easier searching.
+
+
+Summary
+-------
+
+1. Quick reminder about HTTP
+1.1. The HTTP transaction model
+1.2. Terminology
+1.3. HTTP request
+1.3.1. The request line
+1.3.2. The request headers
+1.4. HTTP response
+1.4.1. The response line
+1.4.2. The response headers
+
+2. Configuring HAProxy
+2.1. Configuration file format
+2.2. Quoting and escaping
+2.3. Environment variables
+2.4. Conditional blocks
+2.5. Time format
+2.6. Size format
+2.7. Examples
+
+3. Global parameters
+3.1. Process management and security
+3.2. Performance tuning
+3.3. Debugging
+3.4. Userlists
+3.5. Peers
+3.6. Mailers
+3.7. Programs
+3.8. HTTP-errors
+3.9. Rings
+3.10. Log forwarding
+3.11. HTTPClient tuning
+
+4. Proxies
+4.1. Proxy keywords matrix
+4.2. Alphabetically sorted keywords reference
+4.3. Actions keywords matrix
+4.4. Alphabetically sorted actions reference
+
+5. Bind and server options
+5.1. Bind options
+5.2. Server and default-server options
+5.3. Server DNS resolution
+5.3.1. Global overview
+5.3.2. The resolvers section
+
+6. Cache
+6.1. Limitation
+6.2. Setup
+6.2.1. Cache section
+6.2.2. Proxy section
+
+7. Using ACLs and fetching samples
+7.1. ACL basics
+7.1.1. Matching booleans
+7.1.2. Matching integers
+7.1.3. Matching strings
+7.1.4. Matching regular expressions (regexes)
+7.1.5. Matching arbitrary data blocks
+7.1.6. Matching IPv4 and IPv6 addresses
+7.2. Using ACLs to form conditions
+7.3. Fetching samples
+7.3.1. Converters
+7.3.2. Fetching samples from internal states
+7.3.3. Fetching samples at Layer 4
+7.3.4. Fetching samples at Layer 5
+7.3.5. Fetching samples from buffer contents (Layer 6)
+7.3.6. Fetching HTTP samples (Layer 7)
+7.3.7. Fetching samples for developers
+7.4. Pre-defined ACLs
+
+8. Logging
+8.1. Log levels
+8.2. Log formats
+8.2.1. Default log format
+8.2.2. TCP log format
+8.2.3. HTTP log format
+8.2.4. HTTPS log format
+8.2.5. Error log format
+8.2.6. Custom log format
+8.3. Advanced logging options
+8.3.1. Disabling logging of external tests
+8.3.2. Logging before waiting for the stream to terminate
+8.3.3. Raising log level upon errors
+8.3.4. Disabling logging of successful connections
+8.4. Timing events
+8.5. Stream state at disconnection
+8.6. Non-printable characters
+8.7. Capturing HTTP cookies
+8.8. Capturing HTTP headers
+8.9. Examples of logs
+
+9. Supported filters
+9.1. Trace
+9.2. HTTP compression
+9.3. Stream Processing Offload Engine (SPOE)
+9.4. Cache
+9.5. fcgi-app
+9.6. OpenTracing
+9.7. Bandwidth limitation
+
+10. FastCGI applications
+10.1. Setup
+10.1.1. Fcgi-app section
+10.1.2. Proxy section
+10.1.3. Example
+10.2. Default parameters
+10.3. Limitations
+
+11. Address formats
+11.1. Address family prefixes
+11.2. Socket type prefixes
+11.3. Protocol prefixes
+
+
+1. Quick reminder about HTTP
+----------------------------
+
+When HAProxy is running in HTTP mode, both the request and the response are
+fully analyzed and indexed, thus it becomes possible to build matching criteria
+on almost anything found in the contents.
+
+However, it is important to understand how HTTP requests and responses are
+formed, and how HAProxy decomposes them. It will then become easier to write
+correct rules and to debug existing configurations.
+
+First, HTTP is standardized by a series of RFC that HAProxy follows as closely
+as possible:
+ - RFC 9110: HTTP Semantics (explains the meaning of protocol elements)
+ - RFC 9111: HTTP Caching (explains the rules to follow for an HTTP cache)
+ - RFC 9112: HTTP/1.1 (representation, interoperability rules, security)
+ - RFC 9113: HTTP/2 (representation, interoperability rules, security)
+ - RFC 9114: HTTP/3 (representation, interoperability rules, security)
+
+In addition to these, RFC 8999 to 9002 specify the QUIC transport layer used by
+the HTTP/3 protocol.
+
+
+1.1. The HTTP transaction model
+-------------------------------
+
+The HTTP protocol is transaction-driven. This means that each request will lead
+to one and only one response. Originally, with version 1.0 of the protocol,
+there was a single request per connection: a TCP connection is established from
+the client to the server, a request is sent by the client over the connection,
+the server responds, and the connection is closed. A new request then involves
+a new connection :
+
+ [CON1] [REQ1] ... [RESP1] [CLO1] [CON2] [REQ2] ... [RESP2] [CLO2] ...
+
+In this mode, often called the "HTTP close" mode, there are as many connection
+establishments as there are HTTP transactions. Since the connection is closed
+by the server after the response, the client does not need to know the content
+length, it considers that the response is complete when the connection closes.
+This also means that if some responses are truncated due to network errors, the
+client could mistakenly think a response was complete, and this used to cause
+truncated images to be rendered on screen sometimes.
+
+Due to the transactional nature of the protocol, it was possible to improve it
+to avoid closing a connection between two subsequent transactions. In this mode
+however, it is mandatory that the server indicates the content length for each
+response so that the client does not wait indefinitely. For this, a special
+header is used: "Content-length". This mode is called the "keep-alive" mode,
+and arrived with HTTP/1.1 (some HTTP/1.0 agents support it), and connections
+that are reused between requests are called "persistent connections":
+
+ [CON] [REQ1] ... [RESP1] [REQ2] ... [RESP2] [CLO] ...
+
+Its advantages are a reduced latency between transactions, less processing
+power required on the server side, and the ability to detect a truncated
+response. It is generally faster than the close mode, but not always because
+some clients often limit their concurrent connections to a smaller value, and
+this compensates less for poor network connectivity. Also, some servers have to
+keep the connection alive for a long time waiting for a possible new request
+and may experience a high memory usage due to the high number of connections,
+and closing too fast may break some requests that arrived at the moment the
+connection was closed.
+
+In this mode, the response size needs to be known upfront so that's not always
+possible with dynamically generated or compressed contents. For this reason
+another mode was implemented, the "chunked mode", where instead of announcing
+the size of the whole size at once, the sender only advertises the size of the
+next "chunk" of response it already has in a buffer, and can terminate at any
+moment with a zero-sized chunk. In this mode, the Content-Length header is not
+used.
+
+Another improvement in the communications is the pipelining mode. It still uses
+keep-alive, but the client does not wait for the first response to send the
+second request. This is useful for fetching large number of images composing a
+page :
+
+ [CON] [REQ1] [REQ2] ... [RESP1] [RESP2] [CLO] ...
+
+This can obviously have a tremendous benefit on performance because the network
+latency is eliminated between subsequent requests. Many HTTP agents do not
+correctly support pipelining since there is no way to associate a response with
+the corresponding request in HTTP. For this reason, it is mandatory for the
+server to reply in the exact same order as the requests were received. In
+practice, after several attempts by various clients to deploy it, it has been
+totally abandoned for its lack of reliability on certain servers. But it is
+mandatory for servers to support it.
+
+The next improvement is the multiplexed mode, as implemented in HTTP/2 and
+HTTP/3. In this mode, multiple transactions (i.e. request-response pairs) are
+transmitted in parallel over a single connection, and they all progress at
+their own speed, independent from each other. With multiplexed protocols, a new
+notion of "stream" was introduced, to represent these parallel communications
+happening over the same connection. Each stream is generally assigned a unique
+identifier for a given connection, that is used by both endpoints to know where
+to deliver the data. It is fairly common for clients to start many (up to 100,
+sometimes more) streams in parallel over a same connection, and let the server
+sort them out and respond in any order depending on what response is available.
+The main benefit of the multiplexed mode is that it significantly reduces the
+number of round trips, and speeds up page loading time over high latency
+networks. It is sometimes visibles on sites using many images, where all images
+appear to load in parallel.
+
+These protocols have also improved their efficiency by adopting some mechanisms
+to compress header fields in order to reduce the number of bytes on the wire,
+so that without the appropriate tools, they are not realistically manipulable
+by hand nor readable to the naked eye like HTTP/1 was. For this reason, various
+examples of HTTP messages continue to be represented in literature (including
+this document) using the HTTP/1 syntax even for newer versions of the protocol.
+
+HTTP/2 suffers from some design limitations, such as packet losses affecting
+all streams at once, and if a client takes too much time to retrieve an object
+(e.g. needs to store it on disk), it may slow down its retrieval and make it
+impossible during this time to access the data that is pending behind it. This
+is called "head of line blocking" or "HoL blocking" or sometimes just "HoL".
+
+HTTP/3 is implemented over QUIC, itself implemented over UDP. QUIC solves the
+head of line blocking at the transport level by means of independently handled
+streams. Indeed, when experiencing loss, an impacted stream does not affect the
+other streams, and all of them can be accessed in parallel.
+
+By default HAProxy operates in keep-alive mode with regards to persistent
+connections: for each connection it processes each request and response, and
+leaves the connection idle on both sides between the end of a response and the
+start of a new request. When it receives HTTP/2 connections from a client, it
+processes all the requests in parallel and leaves the connection idling,
+waiting for new requests, just as if it was a keep-alive HTTP connection.
+
+HAProxy essentially supports 3 connection modes :
+ - keep alive : all requests and responses are processed, and the client
+ facing and server facing connections are kept alive for new
+ requests. This is the default and suits the modern web and
+ modern protocols (HTTP/2 and HTTP/3).
+
+ - server close : the server-facing connection is closed after the response.
+
+ - close : the connection is actively closed after end of response on
+ both sides.
+
+In addition to this, by default, the server-facing connection is reusable by
+any request from any client, as mandated by the HTTP protocol specification, so
+any information pertaining to a specific client has to be passed along with
+each request if needed (e.g. client's source adress etc). When HTTP/2 is used
+with a server, by default HAProxy will dedicate this connection to the same
+client to avoid the risk of head of line blocking between clients.
+
+
+1.2. Terminology
+----------------
+
+Inside HAProxy, the terminology has evolved a bit over the ages to follow the
+evolutions of the HTTP protocol and its usages. While originally there was no
+significant difference between a connection, a session, a stream or a
+transaction, these ones clarified over time to match closely what exists in the
+modern versions of the HTTP protocol, though some terms remain visible in the
+configuration or the command line interface for the purpose of historical
+compatibility.
+
+Here are some definitions that apply to the current version of HAProxy:
+
+ - connection: a connection is a single, bidiractional communication channel
+ between a remote agent (client or server) and haproxy, at the lowest level
+ possible. Usually it corresponds to a TCP socket established between a pair
+ of IP and ports. On the client-facing side, connections are the very first
+ entities that are instantiated when a client connects to haproxy, and rules
+ applying at the connection level are the earliest ones that apply.
+
+ - session: a session adds some context information associated with a
+ connection. This includes and information specific to the transport layer
+ (e.g. TLS keys etc), or variables. This term has long been used inside
+ HAProxy to denote end-to-end HTTP/1.0 communications between two ends, and
+ as such it remains visible in the name of certain CLI commands or
+ statistics, despite representing streams nowadays, but the help messages
+ and descriptions try to make this unambiguous. It is still valid when it
+ comes to network-level terminology (e.g. TCP sessions inside the operating
+ systems, or TCP sessions across a firewall), or for non-HTTP user-level
+ applications (e.g. a telnet session or an SSH session). It must not be
+ confused with "application sessions" that are used to store a full user
+ context in a cookie and require to be sent to the same server.
+
+ - stream: a stream exactly corresponds to an end-to-end bidirectional
+ communication at the application level, where analysis and transformations
+ may be applied. In HTTP, it contains a single request and its associated
+ response, and is instantiated by the arrival of the request and is finished
+ with the end of delivery of the response. In this context there is a 1:1
+ relation between such a stream and the stream of a multiplexed protocol. In
+ TCP communications there is a single stream per connection.
+
+ - transaction: a transaction is only a pair of a request and the associated
+ response. The term was used in conjunction with sessions before the streams
+ but nowadays there is a 1:1 relation between a transaction and a stream. It
+ is essentially visible in the variables' scope "txn" which is valid during
+ the whole transaction, hence the stream.
+
+ - request: it designates the traffic flowing from the client to the server.
+ It is mainly used for HTTP to indicate where operations are performed. This
+ term also exists for TCP operations to indicate where data are processed.
+ Requests often appear in counters as a unit of traffic or activity. They do
+ not always imply a response (e.g. due to errors), but since there is no
+ spontaneous responses without requests, requests remain a relevant metric
+ of the overall activity. In TCP there are as many requests as connections.
+
+ - response: this designates the traffic flowing from the server to the
+ client, or sometimes from HAProxy to the client, when HAProxy produces the
+ response itself (e.g. an HTTP redirect).
+
+ - service: this generally indicates some internal processing in HAProxy that
+ does not require a server, such as the stats page, the cache, or some Lua
+ code to implement a small application. A service usually reads a request,
+ performs some operations and produces a response.
+
+
+1.3. HTTP request
+-----------------
+
+First, let's consider this HTTP request :
+
+ Line Contents
+ number
+ 1 GET /serv/login.php?lang=en&profile=2 HTTP/1.1
+ 2 Host: www.mydomain.com
+ 3 User-agent: my small browser
+ 4 Accept: image/jpeg, image/gif
+ 5 Accept: image/png
+
+
+1.3.1. The Request line
+-----------------------
+
+Line 1 is the "request line". It is always composed of 3 fields :
+
+ - a METHOD : GET
+ - a URI : /serv/login.php?lang=en&profile=2
+ - a version tag : HTTP/1.1
+
+All of them are delimited by what the standard calls LWS (linear white spaces),
+which are commonly spaces, but can also be tabs or line feeds/carriage returns
+followed by spaces/tabs. The method itself cannot contain any colon (':') and
+is limited to alphabetic letters. All those various combinations make it
+desirable that HAProxy performs the splitting itself rather than leaving it to
+the user to write a complex or inaccurate regular expression.
+
+The URI itself can have several forms :
+
+ - A "relative URI" :
+
+ /serv/login.php?lang=en&profile=2
+
+ It is a complete URL without the host part. This is generally what is
+ received by servers, reverse proxies and transparent proxies.
+
+ - An "absolute URI", also called a "URL" :
+
+ http://192.168.0.12:8080/serv/login.php?lang=en&profile=2
+
+ It is composed of a "scheme" (the protocol name followed by '://'), a host
+ name or address, optionally a colon (':') followed by a port number, then
+ a relative URI beginning at the first slash ('/') after the address part.
+ This is generally what proxies receive, but a server supporting HTTP/1.1
+ must accept this form too.
+
+ - a star ('*') : this form is only accepted in association with the OPTIONS
+ method and is not relayable. It is used to inquiry a next hop's
+ capabilities.
+
+ - an address:port combination : 192.168.0.12:80
+ This is used with the CONNECT method, which is used to establish TCP
+ tunnels through HTTP proxies, generally for HTTPS, but sometimes for
+ other protocols too.
+
+In a relative URI, two sub-parts are identified. The part before the question
+mark is called the "path". It is typically the relative path to static objects
+on the server. The part after the question mark is called the "query string".
+It is mostly used with GET requests sent to dynamic scripts and is very
+specific to the language, framework or application in use.
+
+HTTP/3 and HTTP/3 do not convey a version information with the request, so the
+version is assumed to be the same as the one of the underlying protocol (i.e.
+"HTTP/2"). In addition, these protocols do not send a request line as one part,
+but split it into individual fields called "pseudo-headers", whose name start
+with a colon, and which are conveniently reassembled by HAProxy into an
+equivalent request line. For this reason, request lines found in logs may
+slightly differ between HTTP/1.x and HTTP/2 or HTTP/3.
+
+
+1.3.2. The request headers
+--------------------------
+
+The headers start at the second line. They are composed of a name at the
+beginning of the line, immediately followed by a colon (':'). Traditionally,
+an LWS is added after the colon but that's not required. Then come the values.
+Multiple identical headers may be folded into one single line, delimiting the
+values with commas, provided that their order is respected. This is commonly
+encountered in the "Cookie:" field. A header may span over multiple lines if
+the subsequent lines begin with an LWS. In the example in 1.3, lines 4 and 5
+define a total of 3 values for the "Accept:" header. Finally, all LWS at the
+beginning or at the end of a header are ignored and are not part of the value,
+as per the specification.
+
+Contrary to a common misconception, header names are not case-sensitive, and
+their values are not either if they refer to other header names (such as the
+"Connection:" header). In HTTP/2 and HTTP/3, header names are always sent in
+lower case, as can be seen when running in debug mode. Internally, all header
+names are normalized to lower case so that HTTP/1.x and HTTP/2 or HTTP/3 use
+the exact same representation, and they are sent as-is on the other side. This
+explains why an HTTP/1.x request typed with camel case is delivered in lower
+case.
+
+The end of the headers is indicated by the first empty line. People often say
+that it's a double line feed, which is not exact, even if a double line feed
+is one valid form of empty line.
+
+Fortunately, HAProxy takes care of all these complex combinations when indexing
+headers, checking values and counting them, so there is no reason to worry
+about the way they could be written, but it is important not to accuse an
+application of being buggy if it does unusual, valid things.
+
+Important note:
+ As suggested by RFC7231, HAProxy normalizes headers by replacing line breaks
+ in the middle of headers by LWS in order to join multi-line headers. This
+ is necessary for proper analysis and helps less capable HTTP parsers to work
+ correctly and not to be fooled by such complex constructs.
+
+
+1.4. HTTP response
+------------------
+
+An HTTP response looks very much like an HTTP request. Both are called HTTP
+messages. Let's consider this HTTP response :
+
+ Line Contents
+ number
+ 1 HTTP/1.1 200 OK
+ 2 Content-length: 350
+ 3 Content-Type: text/html
+
+As a special case, HTTP supports so called "Informational responses" as status
+codes 1xx. These messages are special in that they don't convey any part of the
+response, they're just used as sort of a signaling message to ask a client to
+continue to post its request for instance. In the case of a status 100 response
+the requested information will be carried by the next non-100 response message
+following the informational one. This implies that multiple responses may be
+sent to a single request, and that this only works when keep-alive is enabled
+(1xx messages appeared in HTTP/1.1). HAProxy handles these messages and is able
+to correctly forward and skip them, and only process the next non-100 response.
+As such, these messages are neither logged nor transformed, unless explicitly
+state otherwise. Status 101 messages indicate that the protocol is changing
+over the same connection and that HAProxy must switch to tunnel mode, just as
+if a CONNECT had occurred. Then the Upgrade header would contain additional
+information about the type of protocol the connection is switching to.
+
+
+1.4.1. The response line
+------------------------
+
+Line 1 is the "response line". It is always composed of 3 fields :
+
+ - a version tag : HTTP/1.1
+ - a status code : 200
+ - a reason : OK
+
+The status code is always 3-digit. The first digit indicates a general status :
+ - 1xx = informational message to be skipped (e.g. 100, 101)
+ - 2xx = OK, content is following (e.g. 200, 206)
+ - 3xx = OK, no content following (e.g. 302, 304)
+ - 4xx = error caused by the client (e.g. 401, 403, 404)
+ - 5xx = error caused by the server (e.g. 500, 502, 503)
+
+Status codes greater than 599 must not be emitted in communications, though
+certain agents may produce them in logs to report their internal statuses.
+Please refer to RFC9110 for the detailed meaning of all such codes. HTTP/2 and
+above do not have a version tag and use the ":status" pseudo-header to report
+the status code.
+
+The "reason" field is just a hint, but is not parsed by clients. Anything can
+be found there, but it's a common practice to respect the well-established
+messages. It can be composed of one or multiple words, such as "OK", "Found",
+or "Authentication Required". It does not exist in HTTP/2 and above and is
+not emitted there. When a response from HTTP/2 or above is transmitted to an
+HTTP/1 client, HAProxy will produce such a common reason field that matches
+the status code.
+
+HAProxy may emit the following status codes by itself :
+
+ Code When / reason
+ 200 access to stats page, and when replying to monitoring requests
+ 301 when performing a redirection, depending on the configured code
+ 302 when performing a redirection, depending on the configured code
+ 303 when performing a redirection, depending on the configured code
+ 307 when performing a redirection, depending on the configured code
+ 308 when performing a redirection, depending on the configured code
+ 400 for an invalid or too large request
+ 401 when an authentication is required to perform the action (when
+ accessing the stats page)
+ 403 when a request is forbidden by a "http-request deny" rule
+ 404 when the requested resource could not be found
+ 408 when the request timeout strikes before the request is complete
+ 410 when the requested resource is no longer available and will not
+ be available again
+ 500 when HAProxy encounters an unrecoverable internal error, such as a
+ memory allocation failure, which should never happen
+ 501 when HAProxy is unable to satisfy a client request because of an
+ unsupported feature
+ 502 when the server returns an empty, invalid or incomplete response, or
+ when an "http-response deny" rule blocks the response.
+ 503 when no server was available to handle the request, or in response to
+ monitoring requests which match the "monitor fail" condition
+ 504 when the response timeout strikes before the server responds
+
+The error 4xx and 5xx codes above may be customized (see "errorloc" in section
+4.2). Other status codes can be emitted on purpose by specific actions (see the
+"deny", "return" and "redirect" actions in section 4.3 for example).
+
+
+1.4.2. The response headers
+---------------------------
+
+Response headers work exactly like request headers, and as such, HAProxy uses
+the same parsing function for both. Please refer to paragraph 1.3.2 for more
+details.
+
+
+2. Configuring HAProxy
+----------------------
+
+2.1. Configuration file format
+------------------------------
+
+HAProxy's configuration process involves 3 major sources of parameters :
+
+ - the arguments from the command-line, which always take precedence
+ - the configuration file(s), whose format is described here
+ - the running process's environment, in case some environment variables are
+ explicitly referenced
+
+The configuration file follows a fairly simple hierarchical format which obey
+a few basic rules:
+
+ 1. a configuration file is an ordered sequence of statements
+
+ 2. a statement is a single non-empty line before any unprotected "#" (hash)
+
+ 3. a line is a series of tokens or "words" delimited by unprotected spaces or
+ tab characters
+
+ 4. the first word or sequence of words of a line is one of the keywords or
+ keyword sequences listed in this document
+
+ 5. all other words are all arguments of the first one, some being well-known
+ keywords listed in this document, others being values, references to other
+ parts of the configuration, or expressions
+
+ 6. certain keywords delimit a section inside which only a subset of keywords
+ are supported
+
+ 7. a section ends at the end of a file or on a special keyword starting a new
+ section
+
+This is all that is needed to know to write a simple but reliable configuration
+generator, but this is not enough to reliably parse any configuration nor to
+figure how to deal with certain corner cases.
+
+First, there are a few consequences of the rules above. Rule 6 and 7 imply that
+the keywords used to define a new section are valid everywhere and cannot have
+a different meaning in a specific section. These keywords are always a single
+word (as opposed to a sequence of words), and traditionally the section that
+follows them is designated using the same name. For example when speaking about
+the "global section", it designates the section of configuration that follows
+the "global" keyword. This usage is used a lot in error messages to help locate
+the parts that need to be addressed.
+
+A number of sections create an internal object or configuration space, which
+requires to be distinguished from other ones. In this case they will take an
+extra word which will set the name of this particular section. For some of them
+the section name is mandatory. For example "frontend foo" will create a new
+section of type "frontend" named "foo". Usually a name is specific to its
+section and two sections of different types may use the same name, but this is
+not recommended as it tends to complexify configuration management.
+
+A direct consequence of rule 7 is that when multiple files are read at once,
+each of them must start with a new section, and the end of each file will end
+a section. A file cannot contain sub-sections nor end an existing section and
+start a new one.
+
+Rule 1 mentioned that ordering matters. Indeed, some keywords create directives
+that can be repeated multiple times to create ordered sequences of rules to be
+applied in a certain order. For example "tcp-request" can be used to alternate
+"accept" and "reject" rules on varying criteria. As such, a configuration file
+processor must always preserve a section's ordering when editing a file. The
+ordering of sections usually does not matter except for the global section
+which must be placed before other sections, but it may be repeated if needed.
+In addition, some automatic identifiers may automatically be assigned to some
+of the created objects (e.g. proxies), and by reordering sections, their
+identifiers will change. These ones appear in the statistics for example. As
+such, the configuration below will assign "foo" ID number 1 and "bar" ID number
+2, which will be swapped if the two sections are reversed:
+
+ listen foo
+ bind :80
+
+ listen bar
+ bind :81
+
+Another important point is that according to rules 2 and 3 above, empty lines,
+spaces, tabs, and comments following and unprotected "#" character are not part
+of the configuration as they are just used as delimiters. This implies that the
+following configurations are strictly equivalent:
+
+ global#this is the global section
+ daemon#daemonize
+ frontend foo
+ mode http # or tcp
+
+and:
+
+ global
+ daemon
+
+ # this is the public web frontend
+ frontend foo
+ mode http
+
+The common practice is to align to the left only the keyword that initiates a
+new section, and indent (i.e. prepend a tab character or a few spaces) all
+other keywords so that it's instantly visible that they belong to the same
+section (as done in the second example above). Placing comments before a new
+section helps the reader decide if it's the desired one. Leaving a blank line
+at the end of a section also visually helps spotting the end when editing it.
+
+Tabs are very convenient for indent but they do not copy-paste well. If spaces
+are used instead, it is recommended to avoid placing too many (2 to 4) so that
+editing in field doesn't become a burden with limited editors that do not
+support automatic indent.
+
+In the early days it used to be common to see arguments split at fixed tab
+positions because most keywords would not take more than two arguments. With
+modern versions featuring complex expressions this practice does not stand
+anymore, and is not recommended.
+
+
+2.2. Quoting and escaping
+-------------------------
+
+In modern configurations, some arguments require the use of some characters
+that were previously considered as pure delimiters. In order to make this
+possible, HAProxy supports character escaping by prepending a backslash ('\')
+in front of the character to be escaped, weak quoting within double quotes
+('"') and strong quoting within single quotes ("'").
+
+This is pretty similar to what is done in a number of programming languages and
+very close to what is commonly encountered in Bourne shell. The principle is
+the following: while the configuration parser cuts the lines into words, it
+also takes care of quotes and backslashes to decide whether a character is a
+delimiter or is the raw representation of this character within the current
+word. The escape character is then removed, the quotes are removed, and the
+remaining word is used as-is as a keyword or argument for example.
+
+If a backslash is needed in a word, it must either be escaped using itself
+(i.e. double backslash) or be strongly quoted.
+
+Escaping outside quotes is achieved by preceding a special character by a
+backslash ('\'):
+
+ \ to mark a space and differentiate it from a delimiter
+ \# to mark a hash and differentiate it from a comment
+ \\ to use a backslash
+ \' to use a single quote and differentiate it from strong quoting
+ \" to use a double quote and differentiate it from weak quoting
+
+In addition, a few non-printable characters may be emitted using their usual
+C-language representation:
+
+ \n to insert a line feed (LF, character \x0a or ASCII 10 decimal)
+ \r to insert a carriage return (CR, character \x0d or ASCII 13 decimal)
+ \t to insert a tab (character \x09 or ASCII 9 decimal)
+ \xNN to insert character having ASCII code hex NN (e.g \x0a for LF).
+
+Weak quoting is achieved by surrounding double quotes ("") around the character
+or sequence of characters to protect. Weak quoting prevents the interpretation
+of:
+
+ space or tab as a word separator
+ ' single quote as a strong quoting delimiter
+ # hash as a comment start
+
+Weak quoting permits the interpretation of environment variables (which are not
+evaluated outside of quotes) by preceding them with a dollar sign ('$'). If a
+dollar character is needed inside double quotes, it must be escaped using a
+backslash.
+
+Strong quoting is achieved by surrounding single quotes ('') around the
+character or sequence of characters to protect. Inside single quotes, nothing
+is interpreted, it's the efficient way to quote regular expressions.
+
+As a result, here is the matrix indicating how special characters can be
+entered in different contexts (unprintable characters are replaced with their
+name within angle brackets). Note that some characters that may only be
+represented escaped have no possible representation inside single quotes,
+hence its absence there:
+
+ Character | Unquoted | Weakly quoted | Strongly quoted
+ -----------+---------------+-----------------------------+-----------------
+ <TAB> | \<TAB>, \x09 | "<TAB>", "\<TAB>", "\x09" | '<TAB>'
+ -----------+---------------+-----------------------------+-----------------
+ <LF> | \n, \x0a | "\n", "\x0a" |
+ -----------+---------------+-----------------------------+-----------------
+ <CR> | \r, \x0d | "\r", "\x0d" |
+ -----------+---------------+-----------------------------+-----------------
+ <SPC> | \<SPC>, \x20 | "<SPC>", "\<SPC>", "\x20" | '<SPC>'
+ -----------+---------------+-----------------------------+-----------------
+ " | \", \x22 | "\"", "\x22" | '"'
+ -----------+---------------+-----------------------------+-----------------
+ # | \#, \x23 | "#", "\#", "\x23" | '#'
+ -----------+---------------+-----------------------------+-----------------
+ $ | $, \$, \x24 | "\$", "\x24" | '$'
+ -----------+---------------+-----------------------------+-----------------
+ ' | \', \x27 | "'", "\'", "\x27" |
+ -----------+---------------+-----------------------------+-----------------
+ \ | \\, \x5c | "\\", "\x5c" | '\'
+ -----------+---------------+-----------------------------+-----------------
+
+ Example:
+ # those are all strictly equivalent:
+ log-format %{+Q}o\ %t\ %s\ %{-Q}r
+ log-format "%{+Q}o %t %s %{-Q}r"
+ log-format '%{+Q}o %t %s %{-Q}r'
+ log-format "%{+Q}o %t"' %s %{-Q}r'
+ log-format "%{+Q}o %t"' %s'\ %{-Q}r
+
+There is one particular case where a second level of quoting or escaping may be
+necessary. Some keywords take arguments within parenthesis, sometimes delimited
+by commas. These arguments are commonly integers or predefined words, but when
+they are arbitrary strings, it may be required to perform a separate level of
+escaping to disambiguate the characters that belong to the argument from the
+characters that are used to delimit the arguments themselves. A pretty common
+case is the "regsub" converter. It takes a regular expression in argument, and
+if a closing parenthesis is needed inside, this one will require to have its
+own quotes.
+
+The keyword argument parser is exactly the same as the top-level one regarding
+quotes, except that the \#, \$, and \xNN escapes are not processed. But what is
+not always obvious is that the delimiters used inside must first be escaped or
+quoted so that they are not resolved at the top level.
+
+Let's take this example making use of the "regsub" converter which takes 3
+arguments, one regular expression, one replacement string and one set of flags:
+
+ # replace all occurrences of "foo" with "blah" in the path:
+ http-request set-path %[path,regsub(foo,blah,g)]
+
+Here no special quoting was necessary. But if now we want to replace either
+"foo" or "bar" with "blah", we'll need the regular expression "(foo|bar)". We
+cannot write:
+
+ http-request set-path %[path,regsub((foo|bar),blah,g)]
+
+because we would like the string to cut like this:
+
+ http-request set-path %[path,regsub((foo|bar),blah,g)]
+ |---------|----|-|
+ arg1 _/ / /
+ arg2 __________/ /
+ arg3 ______________/
+
+but actually what is passed is a string between the opening and closing
+parenthesis then garbage:
+
+ http-request set-path %[path,regsub((foo|bar),blah,g)]
+ |--------|--------|
+ arg1=(foo|bar _/ /
+ trailing garbage _________/
+
+The obvious solution here seems to be that the closing parenthesis needs to be
+quoted, but alone this will not work, because as mentioned above, quotes are
+processed by the top-level parser which will resolve them before processing
+this word:
+
+ http-request set-path %[path,regsub("(foo|bar)",blah,g)]
+ ------------ -------- ----------------------------------
+ word1 word2 word3=%[path,regsub((foo|bar),blah,g)]
+
+So we didn't change anything for the argument parser at the second level which
+still sees a truncated regular expression as the only argument, and garbage at
+the end of the string. By escaping the quotes they will be passed unmodified to
+the second level:
+
+ http-request set-path %[path,regsub(\"(foo|bar)\",blah,g)]
+ ------------ -------- ------------------------------------
+ word1 word2 word3=%[path,regsub("(foo|bar)",blah,g)]
+ |---------||----|-|
+ arg1=(foo|bar) _/ / /
+ arg2=blah ___________/ /
+ arg3=g _______________/
+
+Another approach consists in using single quotes outside the whole string and
+double quotes inside (so that the double quotes are not stripped again):
+
+ http-request set-path '%[path,regsub("(foo|bar)",blah,g)]'
+ ------------ -------- ----------------------------------
+ word1 word2 word3=%[path,regsub("(foo|bar)",blah,g)]
+ |---------||----|-|
+ arg1=(foo|bar) _/ / /
+ arg2 ___________/ /
+ arg3 _______________/
+
+When using regular expressions, it can happen that the dollar ('$') character
+appears in the expression or that a backslash ('\') is used in the replacement
+string. In this case these ones will also be processed inside the double quotes
+thus single quotes are preferred (or double escaping). Example:
+
+ http-request set-path '%[path,regsub("^/(here)(/|$)","my/\1",g)]'
+ ------------ -------- -----------------------------------------
+ word1 word2 word3=%[path,regsub("^/(here)(/|$)","my/\1",g)]
+ |-------------| |-----||-|
+ arg1=(here)(/|$) _/ / /
+ arg2=my/\1 ________________/ /
+ arg3 ______________________/
+
+Remember that backslashes are not escape characters within single quotes and
+that the whole word above is already protected against them using the single
+quotes. Conversely, if double quotes had been used around the whole expression,
+single the dollar character and the backslashes would have been resolved at top
+level, breaking the argument contents at the second level.
+
+Unfortunately, since single quotes can't be escaped inside of strong quoting,
+if you need to include single quotes in your argument, you will need to escape
+or quote them twice. There are a few ways to do this:
+
+ http-request set-var(txn.foo) str("\\'foo\\'")
+ http-request set-var(txn.foo) str(\"\'foo\'\")
+ http-request set-var(txn.foo) str(\\\'foo\\\')
+
+When in doubt, simply do not use quotes anywhere, and start to place single or
+double quotes around arguments that require a comma or a closing parenthesis,
+and think about escaping these quotes using a backslash if the string contains
+a dollar or a backslash. Again, this is pretty similar to what is used under
+a Bourne shell when double-escaping a command passed to "eval". For API writers
+the best is probably to place escaped quotes around each and every argument,
+regardless of their contents. Users will probably find that using single quotes
+around the whole expression and double quotes around each argument provides
+more readable configurations.
+
+
+2.3. Environment variables
+--------------------------
+
+HAProxy's configuration supports environment variables. Those variables are
+interpreted only within double quotes. Variables are expanded during the
+configuration parsing. Variable names must be preceded by a dollar ("$") and
+optionally enclosed with braces ("{}") similarly to what is done in Bourne
+shell. Variable names can contain alphanumerical characters or the character
+underscore ("_") but should not start with a digit. If the variable contains a
+list of several values separated by spaces, it can be expanded as individual
+arguments by enclosing the variable with braces and appending the suffix '[*]'
+before the closing brace. It is also possible to specify a default value to
+use when the variable is not set, by appending that value after a dash '-'
+next to the variable name. Note that the default value only replaces non
+existing variables, not empty ones.
+
+ Example:
+
+ bind "fd@${FD_APP1}"
+
+ log "${LOCAL_SYSLOG-127.0.0.1}:514" local0 notice # send to local server
+
+ user "$HAPROXY_USER"
+
+Some variables are defined by HAProxy, they can be used in the configuration
+file, or could be inherited by a program (See 3.7. Programs):
+
+* HAPROXY_LOCALPEER: defined at the startup of the process which contains the
+ name of the local peer. (See "-L" in the management guide.)
+
+* HAPROXY_CFGFILES: list of the configuration files loaded by HAProxy,
+ separated by semicolons. Can be useful in the case you specified a
+ directory.
+
+* HAPROXY_HTTP_LOG_FMT: contains the value of the default HTTP log format as
+ defined in section 8.2.3 "HTTP log format". It can be used to override the
+ default log format without having to copy the whole original definition.
+
+ Example:
+ # Add the rule that gave the final verdict to the log
+ log-format "${HAPROXY_TCP_LOG_FMT} lr=last_rule_file:last_rule_line"
+
+* HAPROXY_HTTPS_LOG_FMT: similar to HAPROXY_HTTP_LOG_FMT but for HTTPS log
+ format as defined in section 8.2.4 "HTTPS log format".
+
+* HAPROXY_TCP_LOG_FMT: similar to HAPROXY_HTTP_LOG_FMT but for TCP log format
+ as defined in section 8.2.2 "TCP log format".
+
+* HAPROXY_MWORKER: In master-worker mode, this variable is set to 1.
+
+* HAPROXY_CLI: configured listeners addresses of the stats socket for every
+ processes, separated by semicolons.
+
+* HAPROXY_MASTER_CLI: In master-worker mode, listeners addresses of the master
+ CLI, separated by semicolons.
+
+* HAPROXY_STARTUP_VERSION: contains the version used to start, in master-worker
+ mode this is the version which was used to start the master, even after
+ updating the binary and reloading.
+
+* HAPROXY_BRANCH: contains the HAProxy branch version (such as "2.8"). It does
+ not contain the full version number. It can be useful in case of migration
+ if resources (such as maps or certificates) are in a path containing the
+ branch number.
+
+In addition, some pseudo-variables are internally resolved and may be used as
+regular variables. Pseudo-variables always start with a dot ('.'), and are the
+only ones where the dot is permitted. The current list of pseudo-variables is:
+
+* .FILE: the name of the configuration file currently being parsed.
+
+* .LINE: the line number of the configuration file currently being parsed,
+ starting at one.
+
+* .SECTION: the name of the section currently being parsed, or its type if the
+ section doesn't have a name (e.g. "global"), or an empty string before the
+ first section.
+
+These variables are resolved at the location where they are parsed. For example
+if a ".LINE" variable is used in a "log-format" directive located in a defaults
+section, its line number will be resolved before parsing and compiling the
+"log-format" directive, so this same line number will be reused by subsequent
+proxies.
+
+This way it is possible to emit information to help locate a rule in variables,
+logs, error statuses, health checks, header values, or even to use line numbers
+to name some config objects like servers for example.
+
+See also "external-check command" for other variables.
+
+
+2.4. Conditional blocks
+-----------------------
+
+It may sometimes be convenient to be able to conditionally enable or disable
+some arbitrary parts of the configuration, for example to enable/disable SSL or
+ciphers, enable or disable some pre-production listeners without modifying the
+configuration, or adjust the configuration's syntax to support two distinct
+versions of HAProxy during a migration.. HAProxy brings a set of nestable
+preprocessor-like directives which allow to integrate or ignore some blocks of
+text. These directives must be placed on their own line and they act on the
+lines that follow them. Two of them support an expression, the other ones only
+switch to an alternate block or end a current level. The 4 following directives
+are defined to form conditional blocks:
+
+ - .if <condition>
+ - .elif <condition>
+ - .else
+ - .endif
+
+The ".if" directive nests a new level, ".elif" stays at the same level, ".else"
+as well, and ".endif" closes a level. Each ".if" must be terminated by a
+matching ".endif". The ".elif" may only be placed after ".if" or ".elif", and
+there is no limit to the number of ".elif" that may be chained. There may be
+only one ".else" per ".if" and it must always be after the ".if" or the last
+".elif" of a block.
+
+Comments may be placed on the same line if needed after a '#', they will be
+ignored. The directives are tokenized like other configuration directives, and
+as such it is possible to use environment variables in conditions.
+
+Conditions can also be evaluated on startup with the -cc parameter.
+See "3. Starting HAProxy" in the management doc.
+
+The conditions are either an empty string (which then returns false), or an
+expression made of any combination of:
+
+ - the integer zero ('0'), always returns "false"
+ - a non-nul integer (e.g. '1'), always returns "true".
+ - a predicate optionally followed by argument(s) in parenthesis.
+ - a condition placed between a pair of parenthesis '(' and ')'
+ - an exclamation mark ('!') preceding any of the non-empty elements above,
+ and which will negate its status.
+ - expressions combined with a logical AND ('&&'), which will be evaluated
+ from left to right until one returns false
+ - expressions combined with a logical OR ('||'), which will be evaluated
+ from right to left until one returns true
+
+Note that like in other languages, the AND operator has precedence over the OR
+operator, so that "A && B || C && D" evalues as "(A && B) || (C && D)".
+
+The list of currently supported predicates is the following:
+
+ - defined(<name>) : returns true if an environment variable <name>
+ exists, regardless of its contents
+
+ - feature(<name>) : returns true if feature <name> is listed as present
+ in the features list reported by "haproxy -vv"
+ (which means a <name> appears after a '+')
+
+ - streq(<str1>,<str2>) : returns true only if the two strings are equal
+ - strneq(<str1>,<str2>) : returns true only if the two strings differ
+ - strstr(<str1>,<str2>) : returns true only if the second string is found in the first one
+
+ - version_atleast(<ver>): returns true if the current haproxy version is
+ at least as recent as <ver> otherwise false. The
+ version syntax is the same as shown by "haproxy -v"
+ and missing components are assumed as being zero.
+
+ - version_before(<ver>) : returns true if the current haproxy version is
+ strictly older than <ver> otherwise false. The
+ version syntax is the same as shown by "haproxy -v"
+ and missing components are assumed as being zero.
+
+ - enabled(<opt>) : returns true if the option <opt> is enabled at
+ run-time. Only a subset of options are supported:
+ POLL, EPOLL, KQUEUE, EVPORTS, SPLICE,
+ GETADDRINFO, REUSEPORT, FAST-FORWARD,
+ SERVER-SSL-VERIFY-NONE
+
+Example:
+
+ .if defined(HAPROXY_MWORKER)
+ listen mwcli_px
+ bind :1111
+ ...
+ .endif
+
+ .if strneq("$SSL_ONLY",yes)
+ bind :80
+ .endif
+
+ .if streq("$WITH_SSL",yes)
+ .if feature(OPENSSL)
+ bind :443 ssl crt ...
+ .endif
+ .endif
+
+ .if feature(OPENSSL) && (streq("$WITH_SSL",yes) || streq("$SSL_ONLY",yes))
+ bind :443 ssl crt ...
+ .endif
+
+ .if version_atleast(2.4-dev19)
+ profiling.memory on
+ .endif
+
+ .if !feature(OPENSSL)
+ .alert "SSL support is mandatory"
+ .endif
+
+Four other directives are provided to report some status:
+
+ - .diag "message" : emit this message only when in diagnostic mode (-dD)
+ - .notice "message" : emit this message at level NOTICE
+ - .warning "message" : emit this message at level WARNING
+ - .alert "message" : emit this message at level ALERT
+
+Messages emitted at level WARNING may cause the process to fail to start if the
+"strict-mode" is enabled. Messages emitted at level ALERT will always cause a
+fatal error. These can be used to detect some inappropriate conditions and
+provide advice to the user.
+
+Example:
+
+ .if "${A}"
+ .if "${B}"
+ .notice "A=1, B=1"
+ .elif "${C}"
+ .notice "A=1, B=0, C=1"
+ .elif "${D}"
+ .warning "A=1, B=0, C=0, D=1"
+ .else
+ .alert "A=1, B=0, C=0, D=0"
+ .endif
+ .else
+ .notice "A=0"
+ .endif
+
+ .diag "WTA/2021-05-07: replace 'redirect' with 'return' after switch to 2.4"
+ http-request redirect location /goaway if ABUSE
+
+
+2.5. Time format
+----------------
+
+Some parameters involve values representing time, such as timeouts. These
+values are generally expressed in milliseconds (unless explicitly stated
+otherwise) but may be expressed in any other unit by suffixing the unit to the
+numeric value. It is important to consider this because it will not be repeated
+for every keyword. Supported units are :
+
+ - us : microseconds. 1 microsecond = 1/1000000 second
+ - ms : milliseconds. 1 millisecond = 1/1000 second. This is the default.
+ - s : seconds. 1s = 1000ms
+ - m : minutes. 1m = 60s = 60000ms
+ - h : hours. 1h = 60m = 3600s = 3600000ms
+ - d : days. 1d = 24h = 1440m = 86400s = 86400000ms
+
+
+2.6. Size format
+----------------
+
+Some parameters involve values representing size, such as bandwidth limits.
+These values are generally expressed in bytes (unless explicitly stated
+otherwise) but may be expressed in any other unit by suffixing the unit to the
+numeric value. It is important to consider this because it will not be repeated
+for every keyword. Supported units are case insensitive :
+
+ - k : kilobytes. 1 kilobyte = 1024 bytes
+ - m : megabytes. 1 megabyte = 1048576 bytes
+ - g : gigabytes. 1 gigabyte = 1073741824 bytes
+
+Both time and size formats require integers, decimal notation is not allowed.
+
+
+2.7. Examples
+-------------
+
+ # Simple configuration for an HTTP proxy listening on port 80 on all
+ # interfaces and forwarding requests to a single backend "servers" with a
+ # single server "server1" listening on 127.0.0.1:8000
+ global
+ daemon
+ maxconn 256
+
+ defaults
+ mode http
+ timeout connect 5000ms
+ timeout client 50000ms
+ timeout server 50000ms
+
+ frontend http-in
+ bind *:80
+ default_backend servers
+
+ backend servers
+ server server1 127.0.0.1:8000 maxconn 32
+
+
+ # The same configuration defined with a single listen block. Shorter but
+ # less expressive, especially in HTTP mode.
+ global
+ daemon
+ maxconn 256
+
+ defaults
+ mode http
+ timeout connect 5000ms
+ timeout client 50000ms
+ timeout server 50000ms
+
+ listen http-in
+ bind *:80
+ server server1 127.0.0.1:8000 maxconn 32
+
+
+Assuming haproxy is in $PATH, test these configurations in a shell with:
+
+ $ sudo haproxy -f configuration.conf -c
+
+
+3. Global parameters
+--------------------
+
+Parameters in the "global" section are process-wide and often OS-specific. They
+are generally set once for all and do not need being changed once correct. Some
+of them have command-line equivalents.
+
+The following keywords are supported in the "global" section :
+
+ * Process management and security
+ - 51degrees-allow-unmatched
+ - 51degrees-cache-size
+ - 51degrees-data-file
+ - 51degrees-difference
+ - 51degrees-drift
+ - 51degrees-property-name-list
+ - 51degrees-property-separator
+ - 51degrees-use-performance-graph
+ - 51degrees-use-predictive-graph
+ - ca-base
+ - chroot
+ - cluster-secret
+ - cpu-map
+ - crt-base
+ - daemon
+ - default-path
+ - description
+ - deviceatlas-json-file
+ - deviceatlas-log-level
+ - deviceatlas-properties-cookie
+ - deviceatlas-separator
+ - expose-experimental-directives
+ - external-check
+ - fd-hard-limit
+ - gid
+ - grace
+ - group
+ - h1-accept-payload-with-any-method
+ - h1-case-adjust
+ - h1-case-adjust-file
+ - h2-workaround-bogus-websocket-clients
+ - hard-stop-after
+ - insecure-fork-wanted
+ - insecure-setuid-wanted
+ - issuers-chain-path
+ - localpeer
+ - log
+ - log-send-hostname
+ - log-tag
+ - lua-load
+ - lua-load-per-thread
+ - lua-prepend-path
+ - mworker-max-reloads
+ - nbthread
+ - node
+ - numa-cpu-mapping
+ - pidfile
+ - pp2-never-send-local
+ - presetenv
+ - prealloc-fd
+ - resetenv
+ - set-dumpable
+ - set-var
+ - setenv
+ - ssl-default-bind-ciphers
+ - ssl-default-bind-ciphersuites
+ - ssl-default-bind-client-sigalgs
+ - ssl-default-bind-curves
+ - ssl-default-bind-options
+ - ssl-default-bind-sigalgs
+ - ssl-default-server-ciphers
+ - ssl-default-server-ciphersuites
+ - ssl-default-server-client-sigalgs
+ - ssl-default-server-curves
+ - ssl-default-server-options
+ - ssl-default-server-sigalgs
+ - ssl-dh-param-file
+ - ssl-propquery
+ - ssl-provider
+ - ssl-provider-path
+ - ssl-server-verify
+ - ssl-skip-self-issued-ca
+ - stats
+ - strict-limits
+ - uid
+ - ulimit-n
+ - unix-bind
+ - unsetenv
+ - user
+ - wurfl-cache-size
+ - wurfl-data-file
+ - wurfl-information-list
+ - wurfl-information-list-separator
+
+ * Performance tuning
+ - busy-polling
+ - max-spread-checks
+ - maxcompcpuusage
+ - maxcomprate
+ - maxconn
+ - maxconnrate
+ - maxpipes
+ - maxsessrate
+ - maxsslconn
+ - maxsslrate
+ - maxzlibmem
+ - no-memory-trimming
+ - noepoll
+ - noevports
+ - nogetaddrinfo
+ - nokqueue
+ - nopoll
+ - noreuseport
+ - nosplice
+ - profiling.tasks
+ - server-state-base
+ - server-state-file
+ - spread-checks
+ - ssl-engine
+ - ssl-mode-async
+ - tune.buffers.limit
+ - tune.buffers.reserve
+ - tune.bufsize
+ - tune.comp.maxlevel
+ - tune.disable-fast-forward
+ - tune.disable-zero-copy-forwarding
+ - tune.events.max-events-at-once
+ - tune.fail-alloc
+ - tune.fd.edge-triggered
+ - tune.h1.zero-copy-fwd-recv
+ - tune.h1.zero-copy-fwd-send
+ - tune.h2.be.initial-window-size
+ - tune.h2.be.max-concurrent-streams
+ - tune.h2.fe.initial-window-size
+ - tune.h2.fe.max-concurrent-streams
+ - tune.h2.fe.max-total-streams
+ - tune.h2.header-table-size
+ - tune.h2.initial-window-size
+ - tune.h2.max-concurrent-streams
+ - tune.h2.max-frame-size
+ - tune.h2.zero-copy-fwd-send
+ - tune.http.cookielen
+ - tune.http.logurilen
+ - tune.http.maxhdr
+ - tune.idle-pool.shared
+ - tune.idletimer
+ - tune.lua.forced-yield
+ - tune.lua.maxmem
+ - tune.lua.service-timeout
+ - tune.lua.session-timeout
+ - tune.lua.task-timeout
+ - tune.lua.log.loggers
+ - tune.lua.log.stderr
+ - tune.max-checks-per-thread
+ - tune.maxaccept
+ - tune.maxpollevents
+ - tune.maxrewrite
+ - tune.memory.hot-size
+ - tune.pattern.cache-size
+ - tune.peers.max-updates-at-once
+ - tune.pipesize
+ - tune.pool-high-fd-ratio
+ - tune.pool-low-fd-ratio
+ - tune.pt.zero-copy-forwarding
+ - tune.quic.frontend.conn-tx-buffers.limit
+ - tune.quic.frontend.max-idle-timeout
+ - tune.quic.frontend.max-streams-bidi
+ - tune.quic.max-frame-loss
+ - tune.quic.retry-threshold
+ - tune.quic.socket-owner
+ - tune.quic.zero-copy-fwd-send
+ - tune.rcvbuf.backend
+ - tune.rcvbuf.client
+ - tune.rcvbuf.frontend
+ - tune.rcvbuf.server
+ - tune.recv_enough
+ - tune.runqueue-depth
+ - tune.sched.low-latency
+ - tune.sndbuf.backend
+ - tune.sndbuf.client
+ - tune.sndbuf.frontend
+ - tune.sndbuf.server
+ - tune.stick-counters
+ - tune.ssl.cachesize
+ - tune.ssl.capture-buffer-size
+ - tune.ssl.capture-cipherlist-size (deprecated)
+ - tune.ssl.default-dh-param
+ - tune.ssl.force-private-cache
+ - tune.ssl.hard-maxrecord
+ - tune.ssl.keylog
+ - tune.ssl.lifetime
+ - tune.ssl.maxrecord
+ - tune.ssl.ssl-ctx-cache-size
+ - tune.ssl.ocsp-update.maxdelay
+ - tune.ssl.ocsp-update.mindelay
+ - tune.vars.global-max-size
+ - tune.vars.proc-max-size
+ - tune.vars.reqres-max-size
+ - tune.vars.sess-max-size
+ - tune.vars.txn-max-size
+ - tune.zlib.memlevel
+ - tune.zlib.windowsize
+
+ * Debugging
+ - anonkey
+ - quiet
+ - zero-warning
+
+ * HTTPClient
+ - httpclient.resolvers.disabled
+ - httpclient.resolvers.id
+ - httpclient.resolvers.prefer
+ - httpclient.retries
+ - httpclient.ssl.ca-file
+ - httpclient.ssl.verify
+ - httpclient.timeout.connect
+
+3.1. Process management and security
+------------------------------------
+
+51degrees-data-file <file path>
+ The path of the 51Degrees data file to provide device detection services. The
+ file should be unzipped and accessible by HAProxy with relevant permissions.
+
+ Please note that this option is only available when HAProxy has been
+ compiled with USE_51DEGREES.
+
+51degrees-property-name-list [<string> ...]
+ A list of 51Degrees property names to be load from the dataset. A full list
+ of names is available on the 51Degrees website:
+ https://51degrees.com/resources/property-dictionary
+
+ Please note that this option is only available when HAProxy has been
+ compiled with USE_51DEGREES.
+
+51degrees-property-separator <char>
+ A char that will be appended to every property value in a response header
+ containing 51Degrees results. If not set that will be set as ','.
+
+ Please note that this option is only available when HAProxy has been
+ compiled with USE_51DEGREES.
+
+51degrees-cache-size <number>
+ Sets the size of the 51Degrees converter cache to <number> entries. This
+ is an LRU cache which reminds previous device detections and their results.
+ By default, this cache is disabled.
+
+ Please note that this option is only available when HAProxy has been
+ compiled with USE_51DEGREES.
+
+51degrees-use-performance-graph { on | off }
+ Enables ('on') or disables ('off') the use of the performance graph in
+ the detection process. The default value depends on 51Degrees library.
+
+ Please note that this option is only available when HAProxy has been
+ compiled with USE_51DEGREES and 51DEGREES_VER=4.
+
+51degrees-use-predictive-graph { on | off }
+ Enables ('on') or disables ('off') the use of the predictive graph in
+ the detection process. The default value depends on 51Degrees library.
+
+ Please note that this option is only available when HAProxy has been
+ compiled with USE_51DEGREES and 51DEGREES_VER=4.
+
+51degrees-drift <number>
+ Sets the drift value that a detection can allow.
+
+ Please note that this option is only available when HAProxy has been
+ compiled with USE_51DEGREES and 51DEGREES_VER=4.
+
+51degrees-difference <number>
+ Sets the difference value that a detection can allow.
+
+ Please note that this option is only available when HAProxy has been
+ compiled with USE_51DEGREES and 51DEGREES_VER=4.
+
+51degrees-allow-unmatched { on | off }
+ Enables ('on') or disables ('off') the use of unmatched nodes in the
+ detection process. The default value depends on 51Degrees library.
+
+ Please note that this option is only available when HAProxy has been
+ compiled with USE_51DEGREES and 51DEGREES_VER=4.
+
+ca-base <dir>
+ Assigns a default directory to fetch SSL CA certificates and CRLs from when a
+ relative path is used with "ca-file", "ca-verify-file" or "crl-file"
+ directives. Absolute locations specified in "ca-file", "ca-verify-file" and
+ "crl-file" prevail and ignore "ca-base".
+
+chroot <jail dir>
+ Changes current directory to <jail dir> and performs a chroot() there before
+ dropping privileges. This increases the security level in case an unknown
+ vulnerability would be exploited, since it would make it very hard for the
+ attacker to exploit the system. This only works when the process is started
+ with superuser privileges. It is important to ensure that <jail_dir> is both
+ empty and non-writable to anyone.
+
+close-spread-time <time>
+ Define a time window during which idle connections and active connections
+ closing is spread in case of soft-stop. After a SIGUSR1 is received and the
+ grace period is over (if any), the idle connections will all be closed at
+ once if this option is not set, and active HTTP or HTTP2 connections will be
+ ended after the next request is received, either by appending a "Connection:
+ close" line to the HTTP response, or by sending a GOAWAY frame in case of
+ HTTP2. When this option is set, connection closing will be spread over this
+ set <time>.
+ If the close-spread-time is set to "infinite", active connection closing
+ during a soft-stop will be disabled. The "Connection: close" header will not
+ be added to HTTP responses (or GOAWAY for HTTP2) anymore and idle connections
+ will only be closed once their timeout is reached (based on the various
+ timeouts set in the configuration).
+
+ Arguments :
+ <time> is a time window (by default in milliseconds) during which
+ connection closing will be spread during a soft-stop operation, or
+ "infinite" if active connection closing should be disabled.
+
+ It is recommended to set this setting to a value lower than the one used in
+ the "hard-stop-after" option if this one is used, so that all connections
+ have a chance to gracefully close before the process stops.
+
+ See also: grace, hard-stop-after, idle-close-on-response
+
+cluster-secret <secret>
+ Define an ASCII string secret shared between several nodes belonging to the
+ same cluster. It could be used for different usages. It is at least used to
+ derive stateless reset tokens for all the QUIC connections instantiated by
+ this process. This is also the case to derive secrets used to encrypt Retry
+ tokens.
+
+ If this parameter is not set, a random value will be selected on process
+ startup. This allows to use features which rely on it, albeit with some
+ limitations.
+
+cpu-map [auto:]<thread-group>[/<thread-set>] <cpu-set>[,...] [...]
+ On some operating systems, it is possible to bind a thread group or a thread
+ to a specific CPU set. This means that the designated threads will never run
+ on other CPUs. The "cpu-map" directive specifies CPU sets for individual
+ threads or thread groups. The first argument is a thread group range,
+ optionally followed by a thread set. These ranges have the following format:
+
+ all | odd | even | number[-[number]]
+
+ <number> must be a number between 1 and 32 or 64, depending on the machine's
+ word size. Any group IDs above 'thread-groups' and any thread IDs above the
+ machine's word size are ignored. All thread numbers are relative to the group
+ they belong to. It is possible to specify a range with two such number
+ delimited by a dash ('-'). It also is possible to specify all threads at once
+ using "all", only odd numbers using "odd" or even numbers using "even", just
+ like with the "thread" bind directive. The second and forthcoming arguments
+ are CPU sets. Each CPU set is either a unique number starting at 0 for the
+ first CPU or a range with two such numbers delimited by a dash ('-'). These
+ CPU numbers and ranges may be repeated by delimiting them with commas or by
+ passing more ranges as new arguments on the same line. Outside of Linux and
+ BSD operating systems, there may be a limitation on the maximum CPU index to
+ either 31 or 63. Multiple "cpu-map" directives may be specified, but each
+ "cpu-map" directive will replace the previous ones when they overlap.
+
+ Ranges can be partially defined. The higher bound can be omitted. In such
+ case, it is replaced by the corresponding maximum value, 32 or 64 depending
+ on the machine's word size.
+
+ The prefix "auto:" can be added before the thread set to let HAProxy
+ automatically bind a set of threads to a CPU by incrementing threads and
+ CPU sets. To be valid, both sets must have the same size. No matter the
+ declaration order of the CPU sets, it will be bound from the lowest to the
+ highest bound. Having both a group and a thread range with the "auto:"
+ prefix is not supported. Only one range is supported, the other one must be
+ a fixed number.
+
+ Note that group ranges are supported for historical reasons. Nowadays, a lone
+ number designates a thread group and must be 1 if thread-groups are not used,
+ and specifying a thread range or number requires to prepend "1/" in front of
+ it if thread groups are not used. Finally, "1" is strictly equivalent to
+ "1/all" and designates all threads in the group.
+
+ Examples:
+ cpu-map 1/all 0-3 # bind all threads of the first group on the
+ # first 4 CPUs
+
+ cpu-map 1/1- 0- # will be replaced by "cpu-map 1/1-64 0-63"
+ # or "cpu-map 1/1-32 0-31" depending on the machine's
+ # word size.
+
+ # all these lines bind thread 1 to the cpu 0, the thread 2 to cpu 1
+ # and so on.
+ cpu-map auto:1/1-4 0-3
+ cpu-map auto:1/1-4 0-1 2-3
+ cpu-map auto:1/1-4 3 2 1 0
+ cpu-map auto:1/1-4 3,2,1,0
+
+ # bind each thread to exactly one CPU using all/odd/even keyword
+ cpu-map auto:1/all 0-63
+ cpu-map auto:1/even 0-31
+ cpu-map auto:1/odd 32-63
+
+ # invalid cpu-map because thread and CPU sets have different sizes.
+ cpu-map auto:1/1-4 0 # invalid
+ cpu-map auto:1/1 0-3 # invalid
+
+ # map 40 threads of those 4 groups to individual CPUs
+ cpu-map auto:1/1-10 0-9
+ cpu-map auto:2/1-10 10-19
+ cpu-map auto:3/1-10 20-29
+ cpu-map auto:4/1-10 30-39
+
+ # Map 80 threads to one physical socket and 80 others to another socket
+ # without forcing assignment. These are split into 4 groups since no
+ # group may have more than 64 threads.
+ cpu-map 1/1-40 0-39,80-119 # node0, siblings 0 & 1
+ cpu-map 2/1-40 0-39,80-119
+ cpu-map 3/1-40 40-79,120-159 # node1, siblings 0 & 1
+ cpu-map 4/1-40 40-79,120-159
+
+
+crt-base <dir>
+ Assigns a default directory to fetch SSL certificates from when a relative
+ path is used with "crtfile" or "crt" directives. Absolute locations specified
+ prevail and ignore "crt-base".
+
+daemon
+ Makes the process fork into background. This is the recommended mode of
+ operation. It is equivalent to the command line "-D" argument. It can be
+ disabled by the command line "-db" argument. This option is ignored in
+ systemd mode.
+
+default-path { current | config | parent | origin <path> }
+ By default HAProxy loads all files designated by a relative path from the
+ location the process is started in. In some circumstances it might be
+ desirable to force all relative paths to start from a different location
+ just as if the process was started from such locations. This is what this
+ directive is made for. Technically it will perform a temporary chdir() to
+ the designated location while processing each configuration file, and will
+ return to the original directory after processing each file. It takes an
+ argument indicating the policy to use when loading files whose path does
+ not start with a slash ('/'):
+ - "current" indicates that all relative files are to be loaded from the
+ directory the process is started in ; this is the default.
+
+ - "config" indicates that all relative files should be loaded from the
+ directory containing the configuration file. More specifically, if the
+ configuration file contains a slash ('/'), the longest part up to the
+ last slash is used as the directory to change to, otherwise the current
+ directory is used. This mode is convenient to bundle maps, errorfiles,
+ certificates and Lua scripts together as relocatable packages. When
+ multiple configuration files are loaded, the directory is updated for
+ each of them.
+
+ - "parent" indicates that all relative files should be loaded from the
+ parent of the directory containing the configuration file. More
+ specifically, if the configuration file contains a slash ('/'), ".."
+ is appended to the longest part up to the last slash is used as the
+ directory to change to, otherwise the directory is "..". This mode is
+ convenient to bundle maps, errorfiles, certificates and Lua scripts
+ together as relocatable packages, but where each part is located in a
+ different subdirectory (e.g. "config/", "certs/", "maps/", ...).
+
+ - "origin" indicates that all relative files should be loaded from the
+ designated (mandatory) path. This may be used to ease management of
+ different HAProxy instances running in parallel on a system, where each
+ instance uses a different prefix but where the rest of the sections are
+ made easily relocatable.
+
+ Each "default-path" directive instantly replaces any previous one and will
+ possibly result in switching to a different directory. While this should
+ always result in the desired behavior, it is really not a good practice to
+ use multiple default-path directives, and if used, the policy ought to remain
+ consistent across all configuration files.
+
+ Warning: some configuration elements such as maps or certificates are
+ uniquely identified by their configured path. By using a relocatable layout,
+ it becomes possible for several of them to end up with the same unique name,
+ making it difficult to update them at run time, especially when multiple
+ configuration files are loaded from different directories. It is essential to
+ observe a strict collision-free file naming scheme before adopting relative
+ paths. A robust approach could consist in prefixing all files names with
+ their respective site name, or in doing so at the directory level.
+
+description <text>
+ Add a text that describes the instance.
+
+ Please note that it is required to escape certain characters (# for example)
+ and this text is inserted into a html page so you should avoid using
+ "<" and ">" characters.
+
+deviceatlas-json-file <path>
+ Sets the path of the DeviceAtlas JSON data file to be loaded by the API.
+ The path must be a valid JSON data file and accessible by HAProxy process.
+
+deviceatlas-log-level <value>
+ Sets the level of information returned by the API. This directive is
+ optional and set to 0 by default if not set.
+
+deviceatlas-properties-cookie <name>
+ Sets the client cookie's name used for the detection if the DeviceAtlas
+ Client-side component was used during the request. This directive is optional
+ and set to DAPROPS by default if not set.
+
+deviceatlas-separator <char>
+ Sets the character separator for the API properties results. This directive
+ is optional and set to | by default if not set.
+
+expose-experimental-directives
+ This statement must appear before using directives tagged as experimental or
+ the config file will be rejected.
+
+external-check [preserve-env]
+ Allows the use of an external agent to perform health checks. This is
+ disabled by default as a security precaution, and even when enabled, checks
+ may still fail unless "insecure-fork-wanted" is enabled as well. If the
+ program launched makes use of a setuid executable (it should really not),
+ you may also need to set "insecure-setuid-wanted" in the global section.
+ By default, the checks start with a clean environment which only contains
+ variables defined in the "external-check" command in the backend section. It
+ may sometimes be desirable to preserve the environment though, for example
+ when complex scripts retrieve their extra paths or information there. This
+ can be done by appending the "preserve-env" keyword. In this case however it
+ is strongly advised not to run a setuid nor as a privileged user, as this
+ exposes the check program to potential attacks. See "option external-check",
+ and "insecure-fork-wanted", and "insecure-setuid-wanted" for extra details.
+
+fd-hard-limit <number>
+ Sets an upper bound to the maximum number of file descriptors that the
+ process will use, regardless of system limits. While "ulimit-n" and "maxconn"
+ may be used to enforce a value, when they are not set, the process will be
+ limited to the hard limit of the RLIMIT_NOFILE setting as reported by
+ "ulimit -n -H". But some modern operating systems are now allowing extremely
+ large values here (in the order of 1 billion), which will consume way too
+ much RAM for regular usage. The fd-hard-limit setting is provided to enforce
+ a possibly lower bound to this limit. This means that it will always respect
+ the system-imposed limits when they are below <number> but the specified
+ value will be used if system-imposed limits are higher. In the example below,
+ no other setting is specified and the maxconn value will automatically adapt
+ to the lower of "fd-hard-limit" and the system-imposed limit:
+
+ global
+ # use as many FDs as possible but no more than 50000
+ fd-hard-limit 50000
+
+ See also: ulimit-n, maxconn
+
+gid <number>
+ Changes the process's group ID to <number>. It is recommended that the group
+ ID is dedicated to HAProxy or to a small set of similar daemons. HAProxy must
+ be started with a user belonging to this group, or with superuser privileges.
+ Note that if HAProxy is started from a user having supplementary groups, it
+ will only be able to drop these groups if started with superuser privileges.
+ See also "group" and "uid".
+
+grace <time>
+ Defines a delay between SIGUSR1 and real soft-stop.
+
+ Arguments :
+ <time> is an extra delay (by default in milliseconds) after receipt of the
+ SIGUSR1 signal that will be waited for before proceeding with the
+ soft-stop operation.
+
+ This is used for compatibility with legacy environments where the haproxy
+ process needs to be stopped but some external components need to detect the
+ status before listeners are unbound. The principle is that the internal
+ "stopping" variable (which is reported by the "stopping" sample fetch
+ function) will be turned to true, but listeners will continue to accept
+ connections undisturbed, until the delay expires, after what the regular
+ soft-stop will proceed. This must not be used with processes that are
+ reloaded, or this will prevent the old process from unbinding, and may
+ prevent the new one from starting, or simply cause trouble.
+
+ Example:
+
+ global
+ grace 10s
+
+ # Returns 200 OK until stopping is set via SIGUSR1
+ frontend ext-check
+ bind :9999
+ monitor-uri /ext-check
+ monitor fail if { stopping }
+
+ Please note that a more flexible and durable approach would instead consist
+ for an orchestration system in setting a global variable from the CLI, use
+ that variable to respond to external checks, then after a delay send the
+ SIGUSR1 signal.
+
+ Example:
+
+ # Returns 200 OK until proc.stopping is set to non-zero. May be done
+ # from HTTP using set-var(proc.stopping) or from the CLI using:
+ # > set var proc.stopping int(1)
+ frontend ext-check
+ bind :9999
+ monitor-uri /ext-check
+ monitor fail if { var(proc.stopping) -m int gt 0 }
+
+ See also: hard-stop-after, monitor
+
+group <group name>
+ Similar to "gid" but uses the GID of group name <group name> from /etc/group.
+ See also "gid" and "user".
+
+h1-accept-payload-with-any-method
+ Does not reject HTTP/1.0 GET/HEAD/DELETE requests with a payload.
+
+ While It is explicitly allowed in HTTP/1.1, HTTP/1.0 is not clear on this
+ point and some old servers don't expect any payload and never look for body
+ length (via Content-Length or Transfer-Encoding headers). It means that some
+ intermediaries may properly handle the payload for HTTP/1.0 GET/HEAD/DELETE
+ requests, while some others may totally ignore it. That may lead to security
+ issues because a request smuggling attack is possible. Thus, by default,
+ HAProxy rejects HTTP/1.0 GET/HEAD/DELETE requests with a payload.
+
+ However, it may be an issue with some old clients. In this case, this global
+ option may be set.
+
+h1-case-adjust <from> <to>
+ Defines the case adjustment to apply, when enabled, to the header name
+ <from>, to change it to <to> before sending it to HTTP/1 clients or
+ servers. <from> must be in lower case, and <from> and <to> must not differ
+ except for their case. It may be repeated if several header names need to be
+ adjusted. Duplicate entries are not allowed. If a lot of header names have to
+ be adjusted, it might be more convenient to use "h1-case-adjust-file".
+ Please note that no transformation will be applied unless "option
+ h1-case-adjust-bogus-client" or "option h1-case-adjust-bogus-server" is
+ specified in a proxy.
+
+ There is no standard case for header names because, as stated in RFC7230,
+ they are case-insensitive. So applications must handle them in a case-
+ insensitive manner. But some bogus applications violate the standards and
+ erroneously rely on the cases most commonly used by browsers. This problem
+ becomes critical with HTTP/2 because all header names must be exchanged in
+ lower case, and HAProxy follows the same convention. All header names are
+ sent in lower case to clients and servers, regardless of the HTTP version.
+
+ Applications which fail to properly process requests or responses may require
+ to temporarily use such workarounds to adjust header names sent to them for
+ the time it takes the application to be fixed. Please note that an
+ application which requires such workarounds might be vulnerable to content
+ smuggling attacks and must absolutely be fixed.
+
+ Example:
+ global
+ h1-case-adjust content-length Content-Length
+
+ See "h1-case-adjust-file", "option h1-case-adjust-bogus-client" and
+ "option h1-case-adjust-bogus-server".
+
+h1-case-adjust-file <hdrs-file>
+ Defines a file containing a list of key/value pairs used to adjust the case
+ of some header names before sending them to HTTP/1 clients or servers. The
+ file <hdrs-file> must contain 2 header names per line. The first one must be
+ in lower case and both must not differ except for their case. Lines which
+ start with '#' are ignored, just like empty lines. Leading and trailing tabs
+ and spaces are stripped. Duplicate entries are not allowed. Please note that
+ no transformation will be applied unless "option h1-case-adjust-bogus-client"
+ or "option h1-case-adjust-bogus-server" is specified in a proxy.
+
+ If this directive is repeated, only the last one will be processed. It is an
+ alternative to the directive "h1-case-adjust" if a lot of header names need
+ to be adjusted. Please read the risks associated with using this.
+
+ See "h1-case-adjust", "option h1-case-adjust-bogus-client" and
+ "option h1-case-adjust-bogus-server".
+
+h2-workaround-bogus-websocket-clients
+ This disables the announcement of the support for h2 websockets to clients.
+ This can be use to overcome clients which have issues when implementing the
+ relatively fresh RFC8441, such as Firefox 88. To allow clients to
+ automatically downgrade to http/1.1 for the websocket tunnel, specify h2
+ support on the bind line using "alpn" without an explicit "proto" keyword. If
+ this statement was previously activated, this can be disabled by prefixing
+ the keyword with "no'.
+
+hard-stop-after <time>
+ Defines the maximum time allowed to perform a clean soft-stop.
+
+ Arguments :
+ <time> is the maximum time (by default in milliseconds) for which the
+ instance will remain alive when a soft-stop is received via the
+ SIGUSR1 signal.
+
+ This may be used to ensure that the instance will quit even if connections
+ remain opened during a soft-stop (for example with long timeouts for a proxy
+ in tcp mode). It applies both in TCP and HTTP mode.
+
+ Example:
+ global
+ hard-stop-after 30s
+
+ See also: grace
+
+insecure-fork-wanted
+ By default HAProxy tries hard to prevent any thread and process creation
+ after it starts. Doing so is particularly important when using Lua files of
+ uncertain origin, and when experimenting with development versions which may
+ still contain bugs whose exploitability is uncertain. And generally speaking
+ it's good hygiene to make sure that no unexpected background activity can be
+ triggered by traffic. But this prevents external checks from working, and may
+ break some very specific Lua scripts which actively rely on the ability to
+ fork. This option is there to disable this protection. Note that it is a bad
+ idea to disable it, as a vulnerability in a library or within HAProxy itself
+ will be easier to exploit once disabled. In addition, forking from Lua or
+ anywhere else is not reliable as the forked process may randomly embed a lock
+ set by another thread and never manage to finish an operation. As such it is
+ highly recommended that this option is never used and that any workload
+ requiring such a fork be reconsidered and moved to a safer solution (such as
+ agents instead of external checks). This option supports the "no" prefix to
+ disable it.
+
+insecure-setuid-wanted
+ HAProxy doesn't need to call executables at run time (except when using
+ external checks which are strongly recommended against), and is even expected
+ to isolate itself into an empty chroot. As such, there basically is no valid
+ reason to allow a setuid executable to be called without the user being fully
+ aware of the risks. In a situation where HAProxy would need to call external
+ checks and/or disable chroot, exploiting a vulnerability in a library or in
+ HAProxy itself could lead to the execution of an external program. On Linux
+ it is possible to lock the process so that any setuid bit present on such an
+ executable is ignored. This significantly reduces the risk of privilege
+ escalation in such a situation. This is what HAProxy does by default. In case
+ this causes a problem to an external check (for example one which would need
+ the "ping" command), then it is possible to disable this protection by
+ explicitly adding this directive in the global section. If enabled, it is
+ possible to turn it back off by prefixing it with the "no" keyword.
+
+issuers-chain-path <dir>
+ Assigns a directory to load certificate chain for issuer completion. All
+ files must be in PEM format. For certificates loaded with "crt" or "crt-list",
+ if certificate chain is not included in PEM (also commonly known as
+ intermediate certificate), HAProxy will complete chain if the issuer of the
+ certificate corresponds to the first certificate of the chain loaded with
+ "issuers-chain-path".
+ A "crt" file with PrivateKey+Certificate+IntermediateCA2+IntermediateCA1
+ could be replaced with PrivateKey+Certificate. HAProxy will complete the
+ chain if a file with IntermediateCA2+IntermediateCA1 is present in
+ "issuers-chain-path" directory. All other certificates with the same issuer
+ will share the chain in memory.
+
+limited-quic
+ This setting must be used to explicitly enable the QUIC listener bindings when
+ haproxy is compiled against a TLS/SSL stack without QUIC support, typically
+ OpenSSL. It has no effect when haproxy is compiled against a TLS/SSL stack
+ with QUIC support, quictls for instance. Note that QUIC 0-RTT is not supported
+ when this setting is set.
+
+localpeer <name>
+ Sets the local instance's peer name. It will be ignored if the "-L"
+ command line argument is specified or if used after "peers" section
+ definitions. In such cases, a warning message will be emitted during
+ the configuration parsing.
+
+ This option will also set the HAPROXY_LOCALPEER environment variable.
+ See also "-L" in the management guide and "peers" section below.
+
+log <target> [len <length>] [format <format>] [sample <ranges>:<sample_size>]
+ <facility> [max level [min level]]
+ Adds a global syslog server. Several global servers can be defined. They
+ will receive logs for starts and exits, as well as all logs from proxies
+ configured with "log global". See "log" option for proxies for more details.
+
+log-send-hostname [<string>]
+ Sets the hostname field in the syslog header. If optional "string" parameter
+ is set the header is set to the string contents, otherwise uses the hostname
+ of the system. Generally used if one is not relaying logs through an
+ intermediate syslog server or for simply customizing the hostname printed in
+ the logs.
+
+log-tag <string>
+ Sets the tag field in the syslog header to this string. It defaults to the
+ program name as launched from the command line, which usually is "haproxy".
+ Sometimes it can be useful to differentiate between multiple processes
+ running on the same host. See also the per-proxy "log-tag" directive.
+
+lua-load <file> [ <arg1> [ <arg2> [ ... ] ] ]
+ This global directive loads and executes a Lua file in the shared context
+ that is visible to all threads. Any variable set in such a context is visible
+ from any thread. This is the easiest and recommended way to load Lua programs
+ but it will not scale well if a lot of Lua calls are performed, as only one
+ thread may be running on the global state at a time. A program loaded this
+ way will always see 0 in the "core.thread" variable. This directive can be
+ used multiple times.
+
+ args are available in the lua file using the code below in the body of the
+ file. Do not forget that Lua arrays start at index 1. A "local" variable
+ declared in a file is available in the entire file and not available on
+ other files.
+
+ local args = table.pack(...)
+
+lua-load-per-thread <file> [ <arg1> [ <arg2> [ ... ] ] ]
+ This global directive loads and executes a Lua file into each started thread.
+ Any global variable has a thread-local visibility so that each thread could
+ see a different value. As such it is strongly recommended not to use global
+ variables in programs loaded this way. An independent copy is loaded and
+ initialized for each thread, everything is done sequentially and in the
+ thread's numeric order from 1 to nbthread. If some operations need to be
+ performed only once, the program should check the "core.thread" variable to
+ figure what thread is being initialized. Programs loaded this way will run
+ concurrently on all threads and will be highly scalable. This is the
+ recommended way to load simple functions that register sample-fetches,
+ converters, actions or services once it is certain the program doesn't depend
+ on global variables. For the sake of simplicity, the directive is available
+ even if only one thread is used and even if threads are disabled (in which
+ case it will be equivalent to lua-load). This directive can be used multiple
+ times.
+
+ See lua-load for usage of args.
+
+lua-prepend-path <string> [<type>]
+ Prepends the given string followed by a semicolon to Lua's package.<type>
+ variable.
+ <type> must either be "path" or "cpath". If <type> is not given it defaults
+ to "path".
+
+ Lua's paths are semicolon delimited lists of patterns that specify how the
+ `require` function attempts to find the source file of a library. Question
+ marks (?) within a pattern will be replaced by module name. The path is
+ evaluated left to right. This implies that paths that are prepended later
+ will be checked earlier.
+
+ As an example by specifying the following path:
+
+ lua-prepend-path /usr/share/haproxy-lua/?/init.lua
+ lua-prepend-path /usr/share/haproxy-lua/?.lua
+
+ When `require "example"` is being called Lua will first attempt to load the
+ /usr/share/haproxy-lua/example.lua script, if that does not exist the
+ /usr/share/haproxy-lua/example/init.lua will be attempted and the default
+ paths if that does not exist either.
+
+ See https://www.lua.org/pil/8.1.html for the details within the Lua
+ documentation.
+
+master-worker [no-exit-on-failure]
+ Master-worker mode. It is equivalent to the command line "-W" argument.
+ This mode will launch a "master" which will monitor the "workers". Using
+ this mode, you can reload HAProxy directly by sending a SIGUSR2 signal to
+ the master. The master-worker mode is compatible either with the foreground
+ or daemon mode.
+
+ By default, if a worker exits with a bad return code, in the case of a
+ segfault for example, all workers will be killed, and the master will leave.
+ It is convenient to combine this behavior with Restart=on-failure in a
+ systemd unit file in order to relaunch the whole process. If you don't want
+ this behavior, you must use the keyword "no-exit-on-failure".
+
+ See also "-W" in the management guide.
+
+mworker-max-reloads <number>
+ In master-worker mode, this option limits the number of time a worker can
+ survive to a reload. If the worker did not leave after a reload, once its
+ number of reloads is greater than this number, the worker will receive a
+ SIGTERM. This option helps to keep under control the number of workers.
+ See also "show proc" in the Management Guide.
+
+nbthread <number>
+ This setting is only available when support for threads was built in. It
+ makes HAProxy run on <number> threads. "nbthread" also works when HAProxy is
+ started in foreground. On some platforms supporting CPU affinity, the default
+ "nbthread" value is automatically set to the number of CPUs the process is
+ bound to upon startup. This means that the thread count can easily be
+ adjusted from the calling process using commands like "taskset" or "cpuset".
+ Otherwise, this value defaults to 1. The default value is reported in the
+ output of "haproxy -vv".
+
+no-quic
+ Disable QUIC transport protocol. All the QUIC listeners will still be created.
+ But they will not bind their addresses. Hence, no QUIC traffic will be
+ processed by haproxy. See also "quic_enabled" sample fetch.
+
+numa-cpu-mapping
+ If running on a NUMA-aware platform, HAProxy inspects on startup the CPU
+ topology of the machine. If a multi-socket machine is detected, the affinity
+ is automatically calculated to run on the CPUs of a single node. This is done
+ in order to not suffer from the performance penalties caused by the
+ inter-socket bus latency. However, if the applied binding is non optimal on a
+ particular architecture, it can be disabled with the statement 'no
+ numa-cpu-mapping'. This automatic binding is also not applied if a nbthread
+ statement is present in the configuration, or the affinity of the process is
+ already specified, for example via the 'cpu-map' directive or the taskset
+ utility.
+
+pidfile <pidfile>
+ Writes PIDs of all daemons into file <pidfile> when daemon mode or writes PID
+ of master process into file <pidfile> when master-worker mode. This option is
+ equivalent to the "-p" command line argument. The file must be accessible to
+ the user starting the process. See also "daemon" and "master-worker".
+
+pp2-never-send-local
+ A bug in the PROXY protocol v2 implementation was present in HAProxy up to
+ version 2.1, causing it to emit a PROXY command instead of a LOCAL command
+ for health checks. This is particularly minor but confuses some servers'
+ logs. Sadly, the bug was discovered very late and revealed that some servers
+ which possibly only tested their PROXY protocol implementation against
+ HAProxy fail to properly handle the LOCAL command, and permanently remain in
+ the "down" state when HAProxy checks them. When this happens, it is possible
+ to enable this global option to revert to the older (bogus) behavior for the
+ time it takes to contact the affected components' vendors and get them fixed.
+ This option is disabled by default and acts on all servers having the
+ "send-proxy-v2" statement.
+
+presetenv <name> <value>
+ Sets environment variable <name> to value <value>. If the variable exists, it
+ is NOT overwritten. The changes immediately take effect so that the next line
+ in the configuration file sees the new value. See also "setenv", "resetenv",
+ and "unsetenv".
+
+prealloc-fd
+ Performs a one-time open of the maximum file descriptor which results in a
+ pre-allocation of the kernel's data structures. This prevents short pauses
+ when nbthread>1 and HAProxy opens a file descriptor which requires the kernel
+ to expand its data structures.
+
+resetenv [<name> ...]
+ Removes all environment variables except the ones specified in argument. It
+ allows to use a clean controlled environment before setting new values with
+ setenv or unsetenv. Please note that some internal functions may make use of
+ some environment variables, such as time manipulation functions, but also
+ OpenSSL or even external checks. This must be used with extreme care and only
+ after complete validation. The changes immediately take effect so that the
+ next line in the configuration file sees the new environment. See also
+ "setenv", "presetenv", and "unsetenv".
+
+server-state-base <directory>
+ Specifies the directory prefix to be prepended in front of all servers state
+ file names which do not start with a '/'. See also "server-state-file",
+ "load-server-state-from-file" and "server-state-file-name".
+
+server-state-file <file>
+ Specifies the path to the file containing state of servers. If the path starts
+ with a slash ('/'), it is considered absolute, otherwise it is considered
+ relative to the directory specified using "server-state-base" (if set) or to
+ the current directory. Before reloading HAProxy, it is possible to save the
+ servers' current state using the stats command "show servers state". The
+ output of this command must be written in the file pointed by <file>. When
+ starting up, before handling traffic, HAProxy will read, load and apply state
+ for each server found in the file and available in its current running
+ configuration. See also "server-state-base" and "show servers state",
+ "load-server-state-from-file" and "server-state-file-name"
+
+set-dumpable
+ This option is better left disabled by default and enabled only upon a
+ developer's request. If it has been enabled, it may still be forcibly
+ disabled by prefixing it with the "no" keyword. It has no impact on
+ performance nor stability but will try hard to re-enable core dumps that were
+ possibly disabled by file size limitations (ulimit -f), core size limitations
+ (ulimit -c), or "dumpability" of a process after changing its UID/GID (such
+ as /proc/sys/fs/suid_dumpable on Linux). Core dumps might still be limited by
+ the current directory's permissions (check what directory the file is started
+ from), the chroot directory's permission (it may be needed to temporarily
+ disable the chroot directive or to move it to a dedicated writable location),
+ or any other system-specific constraint. For example, some Linux flavours are
+ notorious for replacing the default core file with a path to an executable
+ not even installed on the system (check /proc/sys/kernel/core_pattern). Often,
+ simply writing "core", "core.%p" or "/var/log/core/core.%p" addresses the
+ issue. When trying to enable this option waiting for a rare issue to
+ re-appear, it's often a good idea to first try to obtain such a dump by
+ issuing, for example, "kill -11" to the "haproxy" process and verify that it
+ leaves a core where expected when dying.
+
+set-var <var-name> <expr>
+ Sets the process-wide variable '<var-name>' to the result of the evaluation
+ of the sample expression <expr>. The variable '<var-name>' may only be a
+ process-wide variable (using the 'proc.' prefix). It works exactly like the
+ 'set-var' action in TCP or HTTP rules except that the expression is evaluated
+ at configuration parsing time and that the variable is instantly set. The
+ sample fetch functions and converters permitted in the expression are only
+ those using internal data, typically 'int(value)' or 'str(value)'. It is
+ possible to reference previously allocated variables as well. These variables
+ will then be readable (and modifiable) from the regular rule sets.
+
+ Example:
+ global
+ set-var proc.current_state str(primary)
+ set-var proc.prio int(100)
+ set-var proc.threshold int(200),sub(proc.prio)
+
+set-var-fmt <var-name> <fmt>
+ Sets the process-wide variable '<var-name>' to the string resulting from the
+ evaluation of the log-format <fmt>. The variable '<var-name>' may only be a
+ process-wide variable (using the 'proc.' prefix). It works exactly like the
+ 'set-var-fmt' action in TCP or HTTP rules except that the expression is
+ evaluated at configuration parsing time and that the variable is instantly
+ set. The sample fetch functions and converters permitted in the expression
+ are only those using internal data, typically 'int(value)' or 'str(value)'.
+ It is possible to reference previously allocated variables as well. These
+ variables will then be readable (and modifiable) from the regular rule sets.
+ Please see section 8.2.4 for details on the log-format syntax.
+
+ Example:
+ global
+ set-var-fmt proc.current_state "primary"
+ set-var-fmt proc.bootid "%pid|%t"
+
+setcap <name>[,<name>...]
+ Sets a list of capabilities that must be preserved when starting with uid 0
+ and switching to a non-zero uid. By default all permissions are lost by the
+ uid switch, but some are often needed when trying connecting to a server from
+ a foreign address during transparent proxying, or when binding to a port
+ below 1024, e.g. when using "tune.quic.socket-owner connection", resulting in
+ setups running entirely under uid 0. Setting capabilities generally is a
+ safer alternative, as only the required capabilities will be preserved. The
+ feature is OS-specific and only enabled on Linux when USE_LINUX_CAP=1 is set
+ at build time. The list of supported capabilities also depends on the OS and
+ is enumerated by the error message displayed when an invalid capability name
+ or an empty one is passed. Multiple capabilities may be passed, delimited by
+ commas. Among those commonly used, "cap_net_raw" allows to transparently bind
+ to a foreign address, and "cap_net_bind_service" allows to bind to a
+ privileged port and may be used by QUIC.
+
+setenv <name> <value>
+ Sets environment variable <name> to value <value>. If the variable exists, it
+ is overwritten. The changes immediately take effect so that the next line in
+ the configuration file sees the new value. See also "presetenv", "resetenv",
+ and "unsetenv".
+
+ssl-default-bind-ciphers <ciphers>
+ This setting is only available when support for OpenSSL was built in. It sets
+ the default string describing the list of cipher algorithms ("cipher suite")
+ that are negotiated during the SSL/TLS handshake up to TLSv1.2 for all
+ "bind" lines which do not explicitly define theirs. The format of the string
+ is defined in "man 1 ciphers" from OpenSSL man pages. For background
+ information and recommendations see e.g.
+ (https://wiki.mozilla.org/Security/Server_Side_TLS) and
+ (https://mozilla.github.io/server-side-tls/ssl-config-generator/). For TLSv1.3
+ cipher configuration, please check the "ssl-default-bind-ciphersuites" keyword.
+ Please check the "bind" keyword for more information.
+
+ssl-default-bind-ciphersuites <ciphersuites>
+ This setting is only available when support for OpenSSL was built in and
+ OpenSSL 1.1.1 or later was used to build HAProxy. It sets the default string
+ describing the list of cipher algorithms ("cipher suite") that are negotiated
+ during the TLSv1.3 handshake for all "bind" lines which do not explicitly define
+ theirs. The format of the string is defined in
+ "man 1 ciphers" from OpenSSL man pages under the section "ciphersuites". For
+ cipher configuration for TLSv1.2 and earlier, please check the
+ "ssl-default-bind-ciphers" keyword. Please check the "bind" keyword for more
+ information.
+
+ssl-default-bind-client-sigalgs <sigalgs>
+ This setting is only available when support for OpenSSL was built in. It sets
+ the default string describing the list of signature algorithms related to
+ client authentication for all "bind" lines which do not explicitly define
+ theirs. The format of the string is a colon-delimited list of signature
+ algorithms. Each signature algorithm can use one of two forms: TLS1.3 signature
+ scheme names ("rsa_pss_rsae_sha256") or the public key algorithm + digest form
+ ("ECDSA+SHA256"). A list can contain both forms. For more information on the
+ format, see SSL_CTX_set1_client_sigalgs(3). A list of signature algorithms is
+ also available in RFC8446 section 4.2.3 and in OpenSSL in the ssl/t1_lib.c
+ file. This setting is not applicable to TLSv1.1 and earlier versions of the
+ protocol as the signature algorithms aren't separately negotiated in these
+ versions. It is not recommended to change this setting unless compatibility
+ with a middlebox is required.
+
+ssl-default-bind-curves <curves>
+ This setting is only available when support for OpenSSL was built in. It sets
+ the default string describing the list of elliptic curves algorithms ("curve
+ suite") that are negotiated during the SSL/TLS handshake with ECDHE. The format
+ of the string is a colon-delimited list of curve name.
+ Please check the "bind" keyword for more information.
+
+ssl-default-bind-options [<option>]...
+ This setting is only available when support for OpenSSL was built in. It sets
+ default ssl-options to force on all "bind" lines. Please check the "bind"
+ keyword to see available options.
+
+ Example:
+ global
+ ssl-default-bind-options ssl-min-ver TLSv1.0 no-tls-tickets
+
+ssl-default-bind-sigalgs <sigalgs>
+ This setting is only available when support for OpenSSL was built in. It
+ sets the default string describing the list of signature algorithms that
+ are negotiated during the TLSv1.2 and TLSv1.3 handshake for all "bind" lines
+ which do not explicitly define theirs. The format of the string is a
+ colon-delimited list of signature algorithms. Each signature algorithm can
+ use one of two forms: TLS1.3 signature scheme names ("rsa_pss_rsae_sha256")
+ or the public key algorithm + digest form ("ECDSA+SHA256"). A list
+ can contain both forms. For more information on the format,
+ see SSL_CTX_set1_sigalgs(3). A list of signature algorithms is also
+ available in RFC8446 section 4.2.3 and in OpenSSL in the ssl/t1_lib.c file.
+ This setting is not applicable to TLSv1.1 and earlier versions of the
+ protocol as the signature algorithms aren't separately negotiated in these
+ versions. It is not recommended to change this setting unless compatibility
+ with a middlebox is required.
+
+ssl-default-server-ciphers <ciphers>
+ This setting is only available when support for OpenSSL was built in. It
+ sets the default string describing the list of cipher algorithms that are
+ negotiated during the SSL/TLS handshake up to TLSv1.2 with the server,
+ for all "server" lines which do not explicitly define theirs. The format of
+ the string is defined in "man 1 ciphers" from OpenSSL man pages. For background
+ information and recommendations see e.g.
+ (https://wiki.mozilla.org/Security/Server_Side_TLS) and
+ (https://mozilla.github.io/server-side-tls/ssl-config-generator/).
+ For TLSv1.3 cipher configuration, please check the
+ "ssl-default-server-ciphersuites" keyword. Please check the "server" keyword
+ for more information.
+
+ssl-default-server-ciphersuites <ciphersuites>
+ This setting is only available when support for OpenSSL was built in and
+ OpenSSL 1.1.1 or later was used to build HAProxy. It sets the default
+ string describing the list of cipher algorithms that are negotiated during
+ the TLSv1.3 handshake with the server, for all "server" lines which do not
+ explicitly define theirs. The format of the string is defined in
+ "man 1 ciphers" from OpenSSL man pages under the section "ciphersuites". For
+ cipher configuration for TLSv1.2 and earlier, please check the
+ "ssl-default-server-ciphers" keyword. Please check the "server" keyword for
+ more information.
+
+ssl-default-server-client-sigalgs <sigalgs>
+ This setting is only available when support for OpenSSL was built in. It sets
+ the default string describing the list of signature algorithms related to
+ client authentication for all "server" lines which do not explicitly define
+ theirs. The format of the string is a colon-delimited list of signature
+ algorithms. Each signature algorithm can use one of two forms: TLS1.3 signature
+ scheme names ("rsa_pss_rsae_sha256") or the public key algorithm + digest form
+ ("ECDSA+SHA256"). A list can contain both forms. For more information on the
+ format, see SSL_CTX_set1_client_sigalgs(3). A list of signature algorithms is
+ also available in RFC8446 section 4.2.3 and in OpenSSL in the ssl/t1_lib.c
+ file. This setting is not applicable to TLSv1.1 and earlier versions of the
+ protocol as the signature algorithms aren't separately negotiated in these
+ versions. It is not recommended to change this setting unless compatibility
+ with a middlebox is required.
+
+ssl-default-server-curves <curves>
+ This setting is only available when support for OpenSSL was built in. It sets
+ the default string describing the list of elliptic curves algorithms ("curve
+ suite") that are negotiated during the SSL/TLS handshake with ECDHE. The format
+ of the string is a colon-delimited list of curve name.
+ Please check the "server" keyword for more information.
+
+ssl-default-server-options [<option>]...
+ This setting is only available when support for OpenSSL was built in. It sets
+ default ssl-options to force on all "server" lines. Please check the "server"
+ keyword to see available options.
+
+ssl-default-server-sigalgs <sigalgs>
+ This setting is only available when support for OpenSSL was built in. It
+ sets the default string describing the list of signature algorithms that
+ are negotiated during the TLSv1.2 and TLSv1.3 handshake for all "server" lines
+ which do not explicitly define theirs. The format of the string is a
+ colon-delimited list of signature algorithms. Each signature algorithm can
+ use one of two forms: TLS1.3 signature scheme names ("rsa_pss_rsae_sha256")
+ or the public key algorithm + digest form ("ECDSA+SHA256"). A list
+ can contain both forms. For more information on the format,
+ see SSL_CTX_set1_sigalgs(3). A list of signature algorithms is also
+ available in RFC8446 section 4.2.3 and in OpenSSL in the ssl/t1_lib.c file.
+ This setting is not applicable to TLSv1.1 and earlier versions of the
+ protocol as the signature algorithms aren't separately negotiated in these
+ versions. It is not recommended to change this setting unless compatibility
+ with a middlebox is required.
+
+ssl-dh-param-file <file>
+ This setting is only available when support for OpenSSL was built in. It sets
+ the default DH parameters that are used during the SSL/TLS handshake when
+ ephemeral Diffie-Hellman (DHE) key exchange is used, for all "bind" lines
+ which do not explicitly define theirs. It will be overridden by custom DH
+ parameters found in a bind certificate file if any. If custom DH parameters
+ are not specified either by using ssl-dh-param-file or by setting them
+ directly in the certificate file, DHE ciphers will not be used, unless
+ tune.ssl.default-dh-param is set. In this latter case, pre-defined DH
+ parameters of the specified size will be used. Custom parameters are known to
+ be more secure and therefore their use is recommended.
+ Custom DH parameters may be generated by using the OpenSSL command
+ "openssl dhparam <size>", where size should be at least 2048, as 1024-bit DH
+ parameters should not be considered secure anymore.
+
+ssl-propquery <query>
+ This setting is only available when support for OpenSSL was built in and when
+ OpenSSL's version is at least 3.0. It allows to define a default property
+ string used when fetching algorithms in providers. It behave the same way as
+ the openssl propquery option and it follows the same syntax (described in
+ https://www.openssl.org/docs/man3.0/man7/property.html). For instance, if you
+ have two providers loaded, the foo one and the default one, the propquery
+ "?provider=foo" allows to pick the algorithm implementations provided by the
+ foo provider by default, and to fallback on the default provider's one if it
+ was not found.
+
+ssl-provider <name>
+ This setting is only available when support for OpenSSL was built in and when
+ OpenSSL's version is at least 3.0. It allows to load a provider during init.
+ If loading is successful, any capabilities provided by the loaded provider
+ might be used by HAProxy. Multiple 'ssl-provider' options can be specified in
+ a configuration file. The providers will be loaded in their order of
+ appearance.
+
+ Please note that loading a provider explicitly prevents OpenSSL from loading
+ the 'default' provider automatically. OpenSSL also allows to define the
+ providers that should be loaded directly in its configuration file
+ (openssl.cnf for instance) so it is not necessary to use this 'ssl-provider'
+ option to load providers. The "show ssl providers" CLI command can be used to
+ show all the providers that were successfully loaded.
+
+ The default search path of OpenSSL provider can be found in the output of the
+ "openssl version -a" command. If the provider is in another directory, you
+ can set the OPENSSL_MODULES environment variable, which takes the directory
+ where your provider can be found.
+
+ See also "ssl-propquery" and "ssl-provider-path".
+
+ssl-provider-path <path>
+ This setting is only available when support for OpenSSL was built in and when
+ OpenSSL's version is at least 3.0. It allows to specify the search path that
+ is to be used by OpenSSL for looking for providers. It behaves the same way
+ as the OPENSSL_MODULES environment variable. It will be used for any
+ following 'ssl-provider' option or until a new 'ssl-provider-path' is
+ defined.
+ See also "ssl-provider".
+
+ssl-load-extra-del-ext
+ This setting allows to configure the way HAProxy does the lookup for the
+ extra SSL files. By default HAProxy adds a new extension to the filename.
+ (ex: with "foobar.crt" load "foobar.crt.key"). With this option enabled,
+ HAProxy removes the extension before adding the new one (ex: with
+ "foobar.crt" load "foobar.key").
+
+ Your crt file must have a ".crt" extension for this option to work.
+
+ This option is not compatible with bundle extensions (.ecdsa, .rsa. .dsa)
+ and won't try to remove them.
+
+ This option is disabled by default. See also "ssl-load-extra-files".
+
+ssl-load-extra-files <none|all|bundle|sctl|ocsp|issuer|key>*
+ This setting alters the way HAProxy will look for unspecified files during
+ the loading of the SSL certificates. This option applies to certificates
+ associated to "bind" lines as well as "server" lines but some of the extra
+ files will not have any functional impact for "server" line certificates.
+
+ By default, HAProxy discovers automatically a lot of files not specified in
+ the configuration, and you may want to disable this behavior if you want to
+ optimize the startup time.
+
+ "none": Only load the files specified in the configuration. Don't try to load
+ a certificate bundle if the file does not exist. In the case of a directory,
+ it won't try to bundle the certificates if they have the same basename.
+
+ "all": This is the default behavior, it will try to load everything,
+ bundles, sctl, ocsp, issuer, key.
+
+ "bundle": When a file specified in the configuration does not exist, HAProxy
+ will try to load a "cert bundle". Certificate bundles are only managed on the
+ frontend side and will not work for backend certificates.
+
+ Starting from HAProxy 2.3, the bundles are not loaded in the same OpenSSL
+ certificate store, instead it will loads each certificate in a separate
+ store which is equivalent to declaring multiple "crt". OpenSSL 1.1.1 is
+ required to achieve this. Which means that bundles are now used only for
+ backward compatibility and are not mandatory anymore to do an hybrid RSA/ECC
+ bind configuration.
+
+ To associate these PEM files into a "cert bundle" that is recognized by
+ HAProxy, they must be named in the following way: All PEM files that are to
+ be bundled must have the same base name, with a suffix indicating the key
+ type. Currently, three suffixes are supported: rsa, dsa and ecdsa. For
+ example, if www.example.com has two PEM files, an RSA file and an ECDSA
+ file, they must be named: "example.pem.rsa" and "example.pem.ecdsa". The
+ first part of the filename is arbitrary; only the suffix matters. To load
+ this bundle into HAProxy, specify the base name only:
+
+ Example : bind :8443 ssl crt example.pem
+
+ Note that the suffix is not given to HAProxy; this tells HAProxy to look for
+ a cert bundle.
+
+ HAProxy will load all PEM files in the bundle as if they were configured
+ separately in several "crt".
+
+ The bundle loading does not have an impact anymore on the directory loading
+ since files are loading separately.
+
+ On the CLI, bundles are seen as separate files, and the bundle extension is
+ required to commit them.
+
+ OCSP files (.ocsp), issuer files (.issuer), Certificate Transparency (.sctl)
+ as well as private keys (.key) are supported with multi-cert bundling.
+
+ "sctl": Try to load "<basename>.sctl" for each crt keyword. If provided for
+ a backend certificate, it will be loaded but will not have any functional
+ impact.
+
+ "ocsp": Try to load "<basename>.ocsp" for each crt keyword. If provided for
+ a backend certificate, it will be loaded but will not have any functional
+ impact.
+
+ "issuer": Try to load "<basename>.issuer" if the issuer of the OCSP file is
+ not provided in the PEM file. If provided for a backend certificate, it will
+ be loaded but will not have any functional impact.
+
+ "key": If the private key was not provided by the PEM file, try to load a
+ file "<basename>.key" containing a private key.
+
+ The default behavior is "all".
+
+ Example:
+ ssl-load-extra-files bundle sctl
+ ssl-load-extra-files sctl ocsp issuer
+ ssl-load-extra-files none
+
+ See also: "crt", section 5.1 about bind options and section 5.2 about server
+ options.
+
+ssl-server-verify [none|required]
+ The default behavior for SSL verify on servers side. If specified to 'none',
+ servers certificates are not verified. The default is 'required' except if
+ forced using cmdline option '-dV'.
+
+ssl-skip-self-issued-ca
+ Self issued CA, aka x509 root CA, is the anchor for chain validation: as a
+ server is useless to send it, client must have it. Standard configuration
+ need to not include such CA in PEM file. This option allows you to keep such
+ CA in PEM file without sending it to the client. Use case is to provide
+ issuer for ocsp without the need for '.issuer' file and be able to share it
+ with 'issuers-chain-path'. This concerns all certificates without intermediate
+ certificates. It's useless for BoringSSL, .issuer is ignored because ocsp
+ bits does not need it. Requires at least OpenSSL 1.0.2.
+
+stats maxconn <connections>
+ By default, the stats socket is limited to 10 concurrent connections. It is
+ possible to change this value with "stats maxconn".
+
+stats socket [<address:port>|<path>] [param*]
+ Binds a UNIX socket to <path> or a TCPv4/v6 address to <address:port>.
+ Connections to this socket will return various statistics outputs and even
+ allow some commands to be issued to change some runtime settings. Please
+ consult section 9.3 "Unix Socket commands" of Management Guide for more
+ details.
+
+ All parameters supported by "bind" lines are supported, for instance to
+ restrict access to some users or their access rights. Please consult
+ section 5.1 for more information.
+
+stats timeout <timeout, in milliseconds>
+ The default timeout on the stats socket is set to 10 seconds. It is possible
+ to change this value with "stats timeout". The value must be passed in
+ milliseconds, or be suffixed by a time unit among { us, ms, s, m, h, d }.
+
+strict-limits
+ Makes process fail at startup when a setrlimit fails. HAProxy tries to set the
+ best setrlimit according to what has been calculated. If it fails, it will
+ emit a warning. This option is here to guarantee an explicit failure of
+ HAProxy when those limits fail. It is enabled by default. It may still be
+ forcibly disabled by prefixing it with the "no" keyword.
+
+thread-group <group> [<thread-range>...]
+ This setting is only available when support for threads was built in. It
+ enumerates the list of threads that will compose thread group <group>.
+ Thread numbers and group numbers start at 1. Thread ranges are defined either
+ using a single thread number at once, or by specifying the lower and upper
+ bounds delimited by a dash '-' (e.g. "1-16"). Unassigned threads will be
+ automatically assigned to unassigned thread groups, and thread groups
+ defined with this directive will never receive more threads than those
+ defined. Defining the same group multiple times overrides previous
+ definitions with the new one. See also "nbthread" and "thread-groups".
+
+thread-groups <number>
+ This setting is only available when support for threads was built in. It
+ makes HAProxy split its threads into <number> independent groups. At the
+ moment, the default value is 1. Thread groups make it possible to reduce
+ sharing between threads to limit contention, at the expense of some extra
+ configuration efforts. It is also the only way to use more than 64 threads
+ since up to 64 threads per group may be configured. The maximum number of
+ groups is configured at compile time and defaults to 16. See also "nbthread".
+
+trace <args...>
+ This command configures one "trace" subsystem statement. Each of them can be
+ found in the management manual, and follow the exact same syntax. Only one
+ statement per line is permitted (i.e. if some long trace configurations using
+ semi-colons are to be imported, they must be placed one per line). Any output
+ that the "trace" command would produce will be emitted during the parsing
+ step of the section. Most of the time these will be errors and warnings, but
+ certain incomplete commands might list permissible choices. This command is
+ not meant for regular use, it will generally only be suggested by developers
+ along complex debugging sessions. For this reason it is internally marked as
+ experimental, meaning that "expose-experimental-directives" must appear on a
+ line before any "trace" statement. Note that these directives are parsed on
+ the fly, so referencing a ring buffer that is only declared further will not
+ work. For such use cases it is suggested to place another "global" section
+ with only the "trace" statements after the declaration of that ring. It is
+ important to keep in mind that depending on the trace level and details,
+ enabling traces can severely degrade the global performance. Please refer to
+ the management manual for the statements syntax.
+
+uid <number>
+ Changes the process's user ID to <number>. It is recommended that the user ID
+ is dedicated to HAProxy or to a small set of similar daemons. HAProxy must
+ be started with superuser privileges in order to be able to switch to another
+ one. See also "gid" and "user".
+
+ulimit-n <number>
+ Sets the maximum number of per-process file-descriptors to <number>. By
+ default, it is automatically computed, so it is recommended not to use this
+ option. If the intent is only to limit the number of file descriptors, better
+ use "fd-hard-limit" instead.
+
+ Note that the dynamic servers are not taken into account in this automatic
+ resource calculation. If using a large number of them, it may be needed to
+ manually specify this value.
+
+ See also: fd-hard-limit, maxconn
+
+unix-bind [ prefix <prefix> ] [ mode <mode> ] [ user <user> ] [ uid <uid> ]
+ [ group <group> ] [ gid <gid> ]
+
+ Fixes common settings to UNIX listening sockets declared in "bind" statements.
+ This is mainly used to simplify declaration of those UNIX sockets and reduce
+ the risk of errors, since those settings are most commonly required but are
+ also process-specific. The <prefix> setting can be used to force all socket
+ path to be relative to that directory. This might be needed to access another
+ component's chroot. Note that those paths are resolved before HAProxy chroots
+ itself, so they are absolute. The <mode>, <user>, <uid>, <group> and <gid>
+ all have the same meaning as their homonyms used by the "bind" statement. If
+ both are specified, the "bind" statement has priority, meaning that the
+ "unix-bind" settings may be seen as process-wide default settings.
+
+unsetenv [<name> ...]
+ Removes environment variables specified in arguments. This can be useful to
+ hide some sensitive information that are occasionally inherited from the
+ user's environment during some operations. Variables which did not exist are
+ silently ignored so that after the operation, it is certain that none of
+ these variables remain. The changes immediately take effect so that the next
+ line in the configuration file will not see these variables. See also
+ "setenv", "presetenv", and "resetenv".
+
+user <user name>
+ Similar to "uid" but uses the UID of user name <user name> from /etc/passwd.
+ See also "uid" and "group".
+
+node <name>
+ Only letters, digits, hyphen and underscore are allowed, like in DNS names.
+
+ This statement is useful in HA configurations where two or more processes or
+ servers share the same IP address. By setting a different node-name on all
+ nodes, it becomes easy to immediately spot what server is handling the
+ traffic.
+
+wurfl-cache-size <size>
+ Sets the WURFL Useragent cache size. For faster lookups, already processed user
+ agents are kept in a LRU cache :
+ - "0" : no cache is used.
+ - <size> : size of lru cache in elements.
+
+ Please note that this option is only available when HAProxy has been compiled
+ with USE_WURFL=1.
+
+wurfl-data-file <file path>
+ The path of the WURFL data file to provide device detection services. The
+ file should be accessible by HAProxy with relevant permissions.
+
+ Please note that this option is only available when HAProxy has been compiled
+ with USE_WURFL=1.
+
+wurfl-information-list [<capability>]*
+ A space-delimited list of WURFL capabilities, virtual capabilities, property
+ names we plan to use in injected headers. A full list of capability and
+ virtual capability names is available on the Scientiamobile website :
+
+ https://www.scientiamobile.com/wurflCapability
+
+ Valid WURFL properties are:
+ - wurfl_id Contains the device ID of the matched device.
+
+ - wurfl_root_id Contains the device root ID of the matched
+ device.
+
+ - wurfl_isdevroot Tells if the matched device is a root device.
+ Possible values are "TRUE" or "FALSE".
+
+ - wurfl_useragent The original useragent coming with this
+ particular web request.
+
+ - wurfl_api_version Contains a string representing the currently
+ used Libwurfl API version.
+
+ - wurfl_info A string containing information on the parsed
+ wurfl.xml and its full path.
+
+ - wurfl_last_load_time Contains the UNIX timestamp of the last time
+ WURFL has been loaded successfully.
+
+ - wurfl_normalized_useragent The normalized useragent.
+
+ Please note that this option is only available when HAProxy has been compiled
+ with USE_WURFL=1.
+
+wurfl-information-list-separator <char>
+ A char that will be used to separate values in a response header containing
+ WURFL results. If not set that a comma (',') will be used by default.
+
+ Please note that this option is only available when HAProxy has been compiled
+ with USE_WURFL=1.
+
+wurfl-patch-file [<file path>]
+ A list of WURFL patch file paths. Note that patches are loaded during startup
+ thus before the chroot.
+
+ Please note that this option is only available when HAProxy has been compiled
+ with USE_WURFL=1.
+
+3.2. Performance tuning
+-----------------------
+
+busy-polling
+ In some situations, especially when dealing with low latency on processors
+ supporting a variable frequency or when running inside virtual machines, each
+ time the process waits for an I/O using the poller, the processor goes back
+ to sleep or is offered to another VM for a long time, and it causes
+ excessively high latencies. This option provides a solution preventing the
+ processor from sleeping by always using a null timeout on the pollers. This
+ results in a significant latency reduction (30 to 100 microseconds observed)
+ at the expense of a risk to overheat the processor. It may even be used with
+ threads, in which case improperly bound threads may heavily conflict,
+ resulting in a worse performance and high values for the CPU stolen fields
+ in "show info" output, indicating which threads are misconfigured. It is
+ important not to let the process run on the same processor as the network
+ interrupts when this option is used. It is also better to avoid using it on
+ multiple CPU threads sharing the same core. This option is disabled by
+ default. If it has been enabled, it may still be forcibly disabled by
+ prefixing it with the "no" keyword. It is ignored by the "select" and
+ "poll" pollers.
+
+ This option is automatically disabled on old processes in the context of
+ seamless reload; it avoids too much cpu conflicts when multiple processes
+ stay around for some time waiting for the end of their current connections.
+
+max-spread-checks <delay in milliseconds>
+ By default, HAProxy tries to spread the start of health checks across the
+ smallest health check interval of all the servers in a farm. The principle is
+ to avoid hammering services running on the same server. But when using large
+ check intervals (10 seconds or more), the last servers in the farm take some
+ time before starting to be tested, which can be a problem. This parameter is
+ used to enforce an upper bound on delay between the first and the last check,
+ even if the servers' check intervals are larger. When servers run with
+ shorter intervals, their intervals will be respected though.
+
+maxcompcpuusage <number>
+ Sets the maximum CPU usage HAProxy can reach before stopping the compression
+ for new requests or decreasing the compression level of current requests.
+ It works like 'maxcomprate' but measures CPU usage instead of incoming data
+ bandwidth. The value is expressed in percent of the CPU used by HAProxy. A
+ value of 100 disable the limit. The default value is 100. Setting a lower
+ value will prevent the compression work from slowing the whole process down
+ and from introducing high latencies.
+
+maxcomprate <number>
+ Sets the maximum per-process input compression rate to <number> kilobytes
+ per second. For each stream, if the maximum is reached, the compression
+ level will be decreased during the stream. If the maximum is reached at the
+ beginning of a stream, the stream will not compress at all. If the maximum
+ is not reached, the compression level will be increased up to
+ tune.comp.maxlevel. A value of zero means there is no limit, this is the
+ default value.
+
+maxconn <number>
+ Sets the maximum per-process number of concurrent connections to <number>. It
+ is equivalent to the command-line argument "-n". Proxies will stop accepting
+ connections when this limit is reached. The "ulimit-n" parameter is
+ automatically adjusted according to this value. See also "ulimit-n". Note:
+ the "select" poller cannot reliably use more than 1024 file descriptors on
+ some platforms. If your platform only supports select and reports "select
+ FAILED" on startup, you need to reduce maxconn until it works (slightly
+ below 500 in general). If this value is not set, it will automatically be
+ calculated based on the current file descriptors limit reported by the
+ "ulimit -n" command, possibly reduced to a lower value if a memory limit
+ is enforced, based on the buffer size, memory allocated to compression, SSL
+ cache size, and use or not of SSL and the associated maxsslconn (which can
+ also be automatic). In any case, the fd-hard-limit applies if set.
+
+ See also: fd-hard-limit, ulimit-n
+
+maxconnrate <number>
+ Sets the maximum per-process number of connections per second to <number>.
+ Proxies will stop accepting connections when this limit is reached. It can be
+ used to limit the global capacity regardless of each frontend capacity. It is
+ important to note that this can only be used as a service protection measure,
+ as there will not necessarily be a fair share between frontends when the
+ limit is reached, so it's a good idea to also limit each frontend to some
+ value close to its expected share. Also, lowering tune.maxaccept can improve
+ fairness.
+
+maxpipes <number>
+ Sets the maximum per-process number of pipes to <number>. Currently, pipes
+ are only used by kernel-based tcp splicing. Since a pipe contains two file
+ descriptors, the "ulimit-n" value will be increased accordingly. The default
+ value is maxconn/4, which seems to be more than enough for most heavy usages.
+ The splice code dynamically allocates and releases pipes, and can fall back
+ to standard copy, so setting this value too low may only impact performance.
+
+maxsessrate <number>
+ Sets the maximum per-process number of sessions per second to <number>.
+ Proxies will stop accepting connections when this limit is reached. It can be
+ used to limit the global capacity regardless of each frontend capacity. It is
+ important to note that this can only be used as a service protection measure,
+ as there will not necessarily be a fair share between frontends when the
+ limit is reached, so it's a good idea to also limit each frontend to some
+ value close to its expected share. Also, lowering tune.maxaccept can improve
+ fairness.
+
+maxsslconn <number>
+ Sets the maximum per-process number of concurrent SSL connections to
+ <number>. By default there is no SSL-specific limit, which means that the
+ global maxconn setting will apply to all connections. Setting this limit
+ avoids having openssl use too much memory and crash when malloc returns NULL
+ (since it unfortunately does not reliably check for such conditions). Note
+ that the limit applies both to incoming and outgoing connections, so one
+ connection which is deciphered then ciphered accounts for 2 SSL connections.
+ If this value is not set, but a memory limit is enforced, this value will be
+ automatically computed based on the memory limit, maxconn, the buffer size,
+ memory allocated to compression, SSL cache size, and use of SSL in either
+ frontends, backends or both. If neither maxconn nor maxsslconn are specified
+ when there is a memory limit, HAProxy will automatically adjust these values
+ so that 100% of the connections can be made over SSL with no risk, and will
+ consider the sides where it is enabled (frontend, backend, both).
+
+maxsslrate <number>
+ Sets the maximum per-process number of SSL sessions per second to <number>.
+ SSL listeners will stop accepting connections when this limit is reached. It
+ can be used to limit the global SSL CPU usage regardless of each frontend
+ capacity. It is important to note that this can only be used as a service
+ protection measure, as there will not necessarily be a fair share between
+ frontends when the limit is reached, so it's a good idea to also limit each
+ frontend to some value close to its expected share. It is also important to
+ note that the sessions are accounted before they enter the SSL stack and not
+ after, which also protects the stack against bad handshakes. Also, lowering
+ tune.maxaccept can improve fairness.
+
+maxzlibmem <number>
+ Sets the maximum amount of RAM in megabytes per process usable by the zlib.
+ When the maximum amount is reached, future streams will not compress as long
+ as RAM is unavailable. When sets to 0, there is no limit.
+ The default value is 0. The value is available in bytes on the UNIX socket
+ with "show info" on the line "MaxZlibMemUsage", the memory used by zlib is
+ "ZlibMemUsage" in bytes.
+
+no-memory-trimming
+ Disables memory trimming ("malloc_trim") at a few moments where attempts are
+ made to reclaim lots of memory (on memory shortage or on reload). Trimming
+ memory forces the system's allocator to scan all unused areas and to release
+ them. This is generally seen as nice action to leave more available memory to
+ a new process while the old one is unlikely to make significant use of it.
+ But some systems dealing with tens to hundreds of thousands of concurrent
+ connections may experience a lot of memory fragmentation, that may render
+ this release operation extremely long. During this time, no more traffic
+ passes through the process, new connections are not accepted anymore, some
+ health checks may even fail, and the watchdog may even trigger and kill the
+ unresponsive process, leaving a huge core dump. If this ever happens, then it
+ is suggested to use this option to disable trimming and stop trying to be
+ nice with the new process. Note that advanced memory allocators usually do
+ not suffer from such a problem.
+
+noepoll
+ Disables the use of the "epoll" event polling system on Linux. It is
+ equivalent to the command-line argument "-de". The next polling system
+ used will generally be "poll". See also "nopoll".
+
+noevports
+ Disables the use of the event ports event polling system on SunOS systems
+ derived from Solaris 10 and later. It is equivalent to the command-line
+ argument "-dv". The next polling system used will generally be "poll". See
+ also "nopoll".
+
+nogetaddrinfo
+ Disables the use of getaddrinfo(3) for name resolving. It is equivalent to
+ the command line argument "-dG". Deprecated gethostbyname(3) will be used.
+
+nokqueue
+ Disables the use of the "kqueue" event polling system on BSD. It is
+ equivalent to the command-line argument "-dk". The next polling system
+ used will generally be "poll". See also "nopoll".
+
+nopoll
+ Disables the use of the "poll" event polling system. It is equivalent to the
+ command-line argument "-dp". The next polling system used will be "select".
+ It should never be needed to disable "poll" since it's available on all
+ platforms supported by HAProxy. See also "nokqueue", "noepoll" and
+ "noevports".
+
+noreuseport
+ Disables the use of SO_REUSEPORT - see socket(7). It is equivalent to the
+ command line argument "-dR".
+
+nosplice
+ Disables the use of kernel tcp splicing between sockets on Linux. It is
+ equivalent to the command line argument "-dS". Data will then be copied
+ using conventional and more portable recv/send calls. Kernel tcp splicing is
+ limited to some very recent instances of kernel 2.6. Most versions between
+ 2.6.25 and 2.6.28 are buggy and will forward corrupted data, so they must not
+ be used. This option makes it easier to globally disable kernel splicing in
+ case of doubt. See also "option splice-auto", "option splice-request" and
+ "option splice-response".
+
+profiling.memory { on | off }
+ Enables ('on') or disables ('off') per-function memory profiling. This will
+ keep usage statistics of malloc/calloc/realloc/free calls anywhere in the
+ process (including libraries) which will be reported on the CLI using the
+ "show profiling" command. This is essentially meant to be used when an
+ abnormal memory usage is observed that cannot be explained by the pools and
+ other info are required. The performance hit will typically be around 1%,
+ maybe a bit more on highly threaded machines, so it is normally suitable for
+ use in production. The same may be achieved at run time on the CLI using the
+ "set profiling memory" command, please consult the management manual.
+
+profiling.tasks { auto | on | off }
+ Enables ('on') or disables ('off') per-task CPU profiling. When set to 'auto'
+ the profiling automatically turns on a thread when it starts to suffer from
+ an average latency of 1000 microseconds or higher as reported in the
+ "avg_loop_us" activity field, and automatically turns off when the latency
+ returns below 990 microseconds (this value is an average over the last 1024
+ loops so it does not vary quickly and tends to significantly smooth short
+ spikes). It may also spontaneously trigger from time to time on overloaded
+ systems, containers, or virtual machines, or when the system swaps (which
+ must absolutely never happen on a load balancer).
+
+ CPU profiling per task can be very convenient to report where the time is
+ spent and which requests have what effect on which other request. Enabling
+ it will typically affect the overall's performance by less than 1%, thus it
+ is recommended to leave it to the default 'auto' value so that it only
+ operates when a problem is identified. This feature requires a system
+ supporting the clock_gettime(2) syscall with clock identifiers
+ CLOCK_MONOTONIC and CLOCK_THREAD_CPUTIME_ID, otherwise the reported time will
+ be zero. This option may be changed at run time using "set profiling" on the
+ CLI.
+
+spread-checks <0..50, in percent>
+ Sometimes it is desirable to avoid sending agent and health checks to
+ servers at exact intervals, for instance when many logical servers are
+ located on the same physical server. With the help of this parameter, it
+ becomes possible to add some randomness in the check interval between 0
+ and +/- 50%. A value between 2 and 5 seems to show good results. The
+ default value remains at 0.
+
+ssl-engine <name> [algo <comma-separated list of algorithms>]
+ Sets the OpenSSL engine to <name>. List of valid values for <name> may be
+ obtained using the command "openssl engine". This statement may be used
+ multiple times, it will simply enable multiple crypto engines. Referencing an
+ unsupported engine will prevent HAProxy from starting. Note that many engines
+ will lead to lower HTTPS performance than pure software with recent
+ processors. The optional command "algo" sets the default algorithms an ENGINE
+ will supply using the OPENSSL function ENGINE_set_default_string(). A value
+ of "ALL" uses the engine for all cryptographic operations. If no list of
+ algo is specified then the value of "ALL" is used. A comma-separated list
+ of different algorithms may be specified, including: RSA, DSA, DH, EC, RAND,
+ CIPHERS, DIGESTS, PKEY, PKEY_CRYPTO, PKEY_ASN1. This is the same format that
+ openssl configuration file uses:
+ https://www.openssl.org/docs/man1.0.2/apps/config.html
+
+ HAProxy Version 2.6 disabled the support for engines in the default build.
+ This option is only available when HAProxy has been built with support for
+ it. In case the ssl-engine is required HAProxy can be rebuild with the
+ USE_ENGINE=1 flag.
+
+ssl-mode-async
+ Adds SSL_MODE_ASYNC mode to the SSL context. This enables asynchronous TLS
+ I/O operations if asynchronous capable SSL engines are used. The current
+ implementation supports a maximum of 32 engines. The Openssl ASYNC API
+ doesn't support moving read/write buffers and is not compliant with
+ HAProxy's buffer management. So the asynchronous mode is disabled on
+ read/write operations (it is only enabled during initial and renegotiation
+ handshakes).
+
+tune.buffers.limit <number>
+ Sets a hard limit on the number of buffers which may be allocated per process.
+ The default value is zero which means unlimited. The minimum non-zero value
+ will always be greater than "tune.buffers.reserve" and should ideally always
+ be about twice as large. Forcing this value can be particularly useful to
+ limit the amount of memory a process may take, while retaining a sane
+ behavior. When this limit is reached, streams which need a buffer wait for
+ another one to be released by another stream. Since buffers are dynamically
+ allocated and released, the waiting time is very short and not perceptible
+ provided that limits remain reasonable. In fact sometimes reducing the limit
+ may even increase performance by increasing the CPU cache's efficiency. Tests
+ have shown good results on average HTTP traffic with a limit to 1/10 of the
+ expected global maxconn setting, which also significantly reduces memory
+ usage. The memory savings come from the fact that a number of connections
+ will not allocate 2*tune.bufsize. It is best not to touch this value unless
+ advised to do so by an HAProxy core developer.
+
+tune.buffers.reserve <number>
+ Sets the number of buffers which are pre-allocated and reserved for use only
+ during memory shortage conditions resulting in failed memory allocations. The
+ minimum value is 2 and is also the default. There is no reason a user would
+ want to change this value, it's mostly aimed at HAProxy core developers.
+
+tune.bufsize <number>
+ Sets the buffer size to this size (in bytes). Lower values allow more
+ streams to coexist in the same amount of RAM, and higher values allow some
+ applications with very large cookies to work. The default value is 16384 and
+ can be changed at build time. It is strongly recommended not to change this
+ from the default value, as very low values will break some services such as
+ statistics, and values larger than default size will increase memory usage,
+ possibly causing the system to run out of memory. At least the global maxconn
+ parameter should be decreased by the same factor as this one is increased. In
+ addition, use of HTTP/2 mandates that this value must be 16384 or more. If an
+ HTTP request is larger than (tune.bufsize - tune.maxrewrite), HAProxy will
+ return HTTP 400 (Bad Request) error. Similarly if an HTTP response is larger
+ than this size, HAProxy will return HTTP 502 (Bad Gateway). Note that the
+ value set using this parameter will automatically be rounded up to the next
+ multiple of 8 on 32-bit machines and 16 on 64-bit machines.
+
+tune.comp.maxlevel <number>
+ Sets the maximum compression level. The compression level affects CPU
+ usage during compression. This value affects CPU usage during compression.
+ Each stream using compression initializes the compression algorithm with
+ this value. The default value is 1.
+
+tune.disable-fast-forward [ EXPERIMENTAL ]
+ Disables the data fast-forwarding. It is a mechanism to optimize the data
+ forwarding by passing data directly from a side to the other one without
+ waking the stream up. Thanks to this directive, it is possible to disable
+ this optimization. Note it also disable any kernel tcp splicing but also the
+ zero-copy forwarding. This command is not meant for regular use, it will
+ generally only be suggested by developers along complex debugging
+ sessions. For this reason it is internally marked as experimental, meaning
+ that "expose-experimental-directives" must appear on a line before this
+ directive.
+
+tune.disable-zero-copy-forwarding
+ Globally disables the zero-copy forwarding of data. It is a mechanism to
+ optimize the data fast-forwarding by avoiding to use the channel's buffer.
+ Thanks to this directive, it is possible to disable this optimization. Note
+ it also disable any kernel tcp splicing.
+
+ See also: tune.pt.zero-copy-forwarding,
+ tune.h1.zero-copy-fwd-recv, tune.h1.zero-copy-fwd-send,
+ tune.h2.zero-copy-fwd-send, tune.quic.zero-copy-fwd-send
+
+tune.events.max-events-at-once <number>
+ Sets the number of events that may be processed at once by an asynchronous
+ task handler (from event_hdl API). <number> should be included between 1
+ and 10000. Large number could cause thread contention as a result of the
+ task doing heavy work without interruption, and on the other hand, small
+ number could result in the task being constantly rescheduled because it
+ cannot consume enough events per run and is not able to catch up with the
+ event producer. The default value may be forced at build time, otherwise
+ defaults to 100.
+
+tune.fail-alloc
+ If compiled with DEBUG_FAIL_ALLOC or started with "-dMfail", gives the
+ percentage of chances an allocation attempt fails. Must be between 0 (no
+ failure) and 100 (no success). This is useful to debug and make sure memory
+ failures are handled gracefully. When not set, the ratio is 0. However the
+ command-line "-dMfail" option automatically sets it to 1% failure rate so that
+ it is not necessary to change the configuration for testing.
+
+tune.fd.edge-triggered { on | off } [ EXPERIMENTAL ]
+ Enables ('on') or disables ('off') the edge-triggered polling mode for FDs
+ that support it. This is currently only support with epoll. It may noticeably
+ reduce the number of epoll_ctl() calls and slightly improve performance in
+ certain scenarios. This is still experimental, it may result in frozen
+ connections if bugs are still present, and is disabled by default.
+
+tune.h1.zero-copy-fwd-recv { on | off }
+ Enables ('on') of disabled ('off') the zero-copy receives of data for the H1
+ multiplexer. It is enabled by default.
+
+ See also: tune.disable-zero-copy-forwarding, tune.h1.zero-copy-fwd-send
+
+tune.h1.zero-copy-fwd-send { on | off }
+ Enables ('on') of disabled ('off') the zero-copy sends of data for the H1
+ multiplexer. It is enabled by default.
+
+ See also: tune.disable-zero-copy-forwarding, tune.h1.zero-copy-fwd-recv
+
+tune.h2.be.initial-window-size <number>
+ Sets the HTTP/2 initial window size for outgoing connections, which is the
+ number of bytes the server can respond before waiting for an acknowledgment
+ from HAProxy. This setting only affects payload contents, not headers. When
+ not set, the common default value set by tune.h2.initial-window-size applies.
+ It can make sense to slightly increase this value to allow faster downloads
+ or to reduce CPU usage on the servers, at the expense of creating unfairness
+ between clients. It doesn't affect resource usage.
+
+ See also: tune.h2.initial-window-size.
+
+tune.h2.be.max-concurrent-streams <number>
+ Sets the HTTP/2 maximum number of concurrent streams per outgoing connection
+ (i.e. the number of outstanding requests on a single connection to a server).
+ When not set, the default set by tune.h2.max-concurrent-streams applies. A
+ smaller value than the default 100 may improve a site's responsiveness at the
+ expense of maintaining more established connections to the servers. When the
+ "http-reuse" setting is set to "always", it is recommended to reduce this
+ value so as not to mix too many different clients over the same connection,
+ because if a client is slower than others, a mechanism known as "head of
+ line blocking" tends to cause cascade effect on download speed for all
+ clients sharing a connection (keep tune.h2.be.initial-window-size low in this
+ case). It is highly recommended not to increase this value; some might find
+ it optimal to run at low values (1..5 typically).
+
+tune.h2.fe.initial-window-size <number>
+ Sets the HTTP/2 initial window size for incoming connections, which is the
+ number of bytes the client can upload before waiting for an acknowledgment
+ from HAProxy. This setting only affects payload contents (i.e. the body of
+ POST requests), not headers. When not set, the common default value set by
+ tune.h2.initial-window-size applies. It can make sense to increase this value
+ to allow faster uploads. The default value of 65536 allows up to 5 Mbps of
+ bandwidth per client over a 100 ms ping time, and 500 Mbps for 1 ms ping
+ time. It doesn't affect resource usage. Using too large values may cause
+ clients to experience a lack of responsiveness if pages are accessed in
+ parallel to large uploads.
+
+ See also: tune.h2.initial-window-size.
+
+tune.h2.fe.max-concurrent-streams <number>
+ Sets the HTTP/2 maximum number of concurrent streams per incoming connection
+ (i.e. the number of outstanding requests on a single connection from a
+ client). When not set, the default set by tune.h2.max-concurrent-streams
+ applies. A larger value than the default 100 may sometimes slightly improve
+ the page load time for complex sites with lots of small objects over high
+ latency networks but can also result in using more memory by allowing a
+ client to allocate more resources at once. The default value of 100 is
+ generally good and it is recommended not to change this value.
+
+tune.h2.fe.max-total-streams <number>
+ Sets the HTTP/2 maximum number of total streams processed per incoming
+ connection. Once this limit is reached, HAProxy will send a graceful GOAWAY
+ frame informing the client that it will close the connection after all
+ pending streams have been closed. In practice, clients tend to close as fast
+ as possible when receiving this, and to establish a new connection for next
+ requests. Doing this is sometimes useful and desired in situations where
+ clients stay connected for a very long time and cause some imbalance inside a
+ farm. For example, in some highly dynamic environments, it is possible that
+ new load balancers are instantiated on the fly to adapt to a load increase,
+ and that once the load goes down they should be stopped without breaking
+ established connections. By setting a limit here, the connections will have
+ a limited lifetime and will be frequently renewed, with some possibly being
+ established to other nodes, so that existing resources are quickly released.
+
+ It's important to understand that there is an implicit relation between this
+ limit and "tune.h2.fe.max-concurrent-streams" above. Indeed, HAProxy will
+ always accept to process any possibly pending streams that might be in flight
+ between the client and the frontend, so the advertised limit will always
+ automatically be raised by the value configured in max-concurrent-streams,
+ and this value will serve as a hard limit above which a violation by a non-
+ compliant client will result in the connection being closed. Thus when
+ counting the number of requests per connection from the logs, any number
+ between max-total-streams and (max-total-streams + max-concurrent-streams)
+ may be observed depending on how fast streams are created by the client.
+
+ The default value is zero, which enforces no limit beyond those implied by
+ the protocol (2^30 ~= 1.07 billion). Values around 1000 may already cause
+ frequent connection renewal without causing any perceptible latency to most
+ clients. Setting it too low may result in an increase of CPU usage due to
+ frequent TLS reconnections, in addition to increased page load time. Please
+ note that some load testing tools do not support reconnections and may report
+ errors with this setting; as such it may be needed to disable it when running
+ performance benchmarks. See also "tune.h2.fe.max-concurrent-streams".
+
+tune.h2.header-table-size <number>
+ Sets the HTTP/2 dynamic header table size. It defaults to 4096 bytes and
+ cannot be larger than 65536 bytes. A larger value may help certain clients
+ send more compact requests, depending on their capabilities. This amount of
+ memory is consumed for each HTTP/2 connection. It is recommended not to
+ change it.
+
+tune.h2.initial-window-size <number>
+ Sets the default value for the HTTP/2 initial window size, on both incoming
+ and outgoing connections. This value is used for incoming connections when
+ tune.h2.fe.initial-window-size is not set, and by outgoing connections when
+ tune.h2.be.initial-window-size is not set. The default value is 65536, which
+ for uploads roughly allows up to 5 Mbps of bandwidth per client over a
+ network showing a 100 ms ping time, or 500 Mbps over a 1-ms local network.
+ Given that changing the default value will both increase upload speeds and
+ cause more unfairness between clients on downloads, it is recommended to
+ instead use the side-specific settings tune.h2.fe.initial-window-size and
+ tune.h2.be.initial-window-size.
+
+tune.h2.max-concurrent-streams <number>
+ Sets the default HTTP/2 maximum number of concurrent streams per connection
+ (i.e. the number of outstanding requests on a single connection). This value
+ is used for incoming connections when tune.h2.fe.max-concurrent-streams is
+ not set, and for outgoing connections when tune.h2.be.max-concurrent-streams
+ is not set. The default value is 100. The impact varies depending on the side
+ so please see the two settings above for more details. It is recommended not
+ to use this setting and to switch to the per-side ones instead. A value of
+ zero disables the limit so a single client may create as many streams as
+ allocatable by HAProxy. It is highly recommended not to change this value.
+
+tune.h2.max-frame-size <number>
+ Sets the HTTP/2 maximum frame size that HAProxy announces it is willing to
+ receive to its peers. The default value is the largest between 16384 and the
+ buffer size (tune.bufsize). In any case, HAProxy will not announce support
+ for frame sizes larger than buffers. The main purpose of this setting is to
+ allow to limit the maximum frame size setting when using large buffers. Too
+ large frame sizes might have performance impact or cause some peers to
+ misbehave. It is highly recommended not to change this value.
+
+tune.h2.zero-copy-fwd-send { on | off }
+ Enables ('on') of disabled ('off') the zero-copy sends of data for the H2
+ multiplexer. It is enabled by default.
+
+ See also: tune.disable-zero-copy-forwarding
+
+tune.http.cookielen <number>
+ Sets the maximum length of captured cookies. This is the maximum value that
+ the "capture cookie xxx len yyy" will be allowed to take, and any upper value
+ will automatically be truncated to this one. It is important not to set too
+ high a value because all cookie captures still allocate this size whatever
+ their configured value (they share a same pool). This value is per request
+ per response, so the memory allocated is twice this value per connection.
+ When not specified, the limit is set to 63 characters. It is recommended not
+ to change this value.
+
+tune.http.logurilen <number>
+ Sets the maximum length of request URI in logs. This prevents truncating long
+ request URIs with valuable query strings in log lines. This is not related
+ to syslog limits. If you increase this limit, you may also increase the
+ 'log ... len yyy' parameter. Your syslog daemon may also need specific
+ configuration directives too.
+ The default value is 1024.
+
+tune.http.maxhdr <number>
+ Sets the maximum number of headers in a request. When a request comes with a
+ number of headers greater than this value (including the first line), it is
+ rejected with a "400 Bad Request" status code. Similarly, too large responses
+ are blocked with "502 Bad Gateway". The default value is 101, which is enough
+ for all usages, considering that the widely deployed Apache server uses the
+ same limit. It can be useful to push this limit further to temporarily allow
+ a buggy application to work by the time it gets fixed. The accepted range is
+ 1..32767. Keep in mind that each new header consumes 32bits of memory for
+ each stream, so don't push this limit too high.
+
+tune.idle-pool.shared { on | off }
+ Enables ('on') or disables ('off') sharing of idle connection pools between
+ threads for a same server. The default is to share them between threads in
+ order to minimize the number of persistent connections to a server, and to
+ optimize the connection reuse rate. But to help with debugging or when
+ suspecting a bug in HAProxy around connection reuse, it can be convenient to
+ forcefully disable this idle pool sharing between multiple threads, and force
+ this option to "off". The default is on. It is strongly recommended against
+ disabling this option without setting a conservative value on "pool-low-conn"
+ for all servers relying on connection reuse to achieve a high performance
+ level, otherwise connections might be closed very often as the thread count
+ increases.
+
+tune.idletimer <timeout>
+ Sets the duration after which HAProxy will consider that an empty buffer is
+ probably associated with an idle stream. This is used to optimally adjust
+ some packet sizes while forwarding large and small data alternatively. The
+ decision to use splice() or to send large buffers in SSL is modulated by this
+ parameter. The value is in milliseconds between 0 and 65535. A value of zero
+ means that HAProxy will not try to detect idle streams. The default is 1000,
+ which seems to correctly detect end user pauses (e.g. read a page before
+ clicking). There should be no reason for changing this value. Please check
+ tune.ssl.maxrecord below.
+
+tune.listener.default-shards { by-process | by-thread | by-group }
+ Normally, all "bind" lines will create a single shard, that is, a single
+ socket that all threads of the process will listen to. With many threads,
+ this is not very efficient, and may even induce some important overhead in
+ the kernel for updating the polling state or even distributing events to the
+ various threads. Modern operating systems support balancing of incoming
+ connections, a mechanism that will consist in permitting multiple sockets to
+ be bound to the same address and port, and to evenly distribute all incoming
+ connections to these sockets so that each thread only sees the connections
+ that are waiting in the socket it is bound to. This significantly reduces
+ kernel-side overhead and increases performance in the incoming connection
+ path. This is usually enabled in HAProxy using the "shards" setting on "bind"
+ lines, which defaults to 1, meaning that each listener will be unique in the
+ process. On systems with many processors, it may be more convenient to change
+ the default setting to "by-thread" in order to always create one listening
+ socket per thread, or "by-group" in order to always create one listening
+ socket per thread group. Be careful about the file descriptor usage with
+ "by-thread" as each listener will need as many sockets as there are threads.
+ Also some operating systems (e.g. FreeBSD) are limited to no more than 256
+ sockets on a same address. Note that "by-group" will remain equivalent to
+ "by-process" for default configurations involving a single thread group, and
+ will fall back to sharing the same socket on systems that do not support this
+ mechanism. The default is "by-group" with a fallback to "by-process" for
+ systems or socket families that do not support multiple bindings.
+
+tune.listener.multi-queue { on | fair | off }
+ Enables ('on' / 'fair') or disables ('off') the listener's multi-queue accept
+ which spreads the incoming traffic to all threads a "bind" line is allowed to
+ run on instead of taking them for itself. This provides a smoother traffic
+ distribution and scales much better, especially in environments where threads
+ may be unevenly loaded due to external activity (network interrupts colliding
+ with one thread for example). The default mode, "on", optimizes the choice of
+ a thread by picking in a sample the one with the less connections. It is
+ often the best choice when connections are long-lived as it manages to keep
+ all threads busy. A second mode, "fair", instead cycles through all threads
+ regardless of their instant load level. It can be better suited for short-
+ lived connections, or on machines with very large numbers of threads where
+ the probability to find the least loaded thread with the first mode is low.
+ Finally it is possible to forcefully disable the redistribution mechanism
+ using "off" for troubleshooting, or for situations where connections are
+ short-lived and it is estimated that the operating system already provides a
+ good enough distribution. The default is "on".
+
+tune.lua.forced-yield <number>
+ This directive forces the Lua engine to execute a yield each <number> of
+ instructions executed. This permits interrupting a long script and allows the
+ HAProxy scheduler to process other tasks like accepting connections or
+ forwarding traffic. The default value is 10000 instructions. If HAProxy often
+ executes some Lua code but more responsiveness is required, this value can be
+ lowered. If the Lua code is quite long and its result is absolutely required
+ to process the data, the <number> can be increased.
+
+tune.lua.maxmem <number>
+ Sets the maximum amount of RAM in megabytes per process usable by Lua. By
+ default it is zero which means unlimited. It is important to set a limit to
+ ensure that a bug in a script will not result in the system running out of
+ memory.
+
+tune.lua.session-timeout <timeout>
+ This is the execution timeout for the Lua sessions. This is useful for
+ preventing infinite loops or spending too much time in Lua. This timeout
+ counts only the pure Lua runtime. If the Lua does a sleep, the sleep is
+ not taken in account. The default timeout is 4s.
+
+tune.lua.burst-timeout <timeout>
+ The "burst" execution timeout applies to any Lua handler. If the handler
+ fails to finish or yield before timeout is reached, it will be aborted to
+ prevent thread contention, to prevent traffic from not being served for too
+ long, and ultimately to prevent the process from crashing because of the
+ watchdog kicking in. Unlike other lua timeouts which are yield-cumulative,
+ burst-timeout will ensure that the time spent in a single lua execution
+ window does not exceed the configured timeout.
+
+ Yielding here means that the lua execution is effectively interrupted
+ either through an explicit call to lua-yielding function such as
+ core.(m)sleep() or core.yield(), or following an automatic forced-yield
+ (see tune.lua.forced-yield) and that it will be resumed later when the
+ related task is set for rescheduling. Not all lua handlers may yield: we have
+ to make a distinction between yieldable handlers and unyieldable handlers.
+
+ For yieldable handlers (tasks, actions..), reaching the timeout means
+ "tune.lua.forced-yield" might be too high for the system, reducing it
+ could improve the situation, but it could also be a good idea to check if
+ adding manual yields at some key points within the lua function helps or not.
+ It may also indicate that the handler is spending too much time in a specific
+ lua library function that cannot be interrupted.
+
+ For unyieldable handlers (lua converters, sample fetches), it could simply
+ indicate that the handler is doing too much computation, which could result
+ from an improper design given that such handlers, which often block the
+ request execution flow, are expected to terminate quickly to allow the
+ request processing to go through. A common resolution approach here would be
+ to try to better optimize the lua function for speed since decreasing
+ "tune.lua.forced-yield" won't help.
+
+ This timeout only counts the pure Lua runtime. If the Lua does a core.sleep,
+ the sleeping time is not taken in account. The default timeout is 1000ms.
+
+ Note: if a lua GC cycle is initiated from the handler (either explicitly
+ requested or automatically triggered by lua after some time), the GC cycle
+ time will also be accounted for.
+
+ Indeed, there is no way to deduce the GC cycle time, so this could lead to
+ some false positives on saturated systems (where GC is having hard time to
+ catch up and consumes most of the available execution runtime). If it were
+ to be the case, here are some resolution leads:
+
+ - checking if the script could be optimized to reduce lua memory footprint
+ - fine-tuning lua GC parameters and / or requesting manual GC cycles
+ (see: https://www.lua.org/manual/5.4/manual.html#pdf-collectgarbage)
+ - increasing tune.lua.burst-timeout
+
+ Setting value to 0 completely disables this protection.
+
+tune.lua.service-timeout <timeout>
+ This is the execution timeout for the Lua services. This is useful for
+ preventing infinite loops or spending too much time in Lua. This timeout
+ counts only the pure Lua runtime. If the Lua does a sleep, the sleep is
+ not taken in account. The default timeout is 4s.
+
+tune.lua.task-timeout <timeout>
+ Purpose is the same as "tune.lua.session-timeout", but this timeout is
+ dedicated to the tasks. By default, this timeout isn't set because a task may
+ remain alive during of the lifetime of HAProxy. For example, a task used to
+ check servers.
+
+tune.lua.log.loggers { on | off }
+ Enables ('on') or disables ('off') logging the output of LUA scripts via the
+ loggers applicable to the current proxy, if any.
+
+ Defaults to 'on'.
+
+tune.lua.log.stderr { on | auto | off }
+ Enables ('on') or disables ('off') logging the output of LUA scripts via
+ stderr.
+ When set to 'auto', logging via stderr is conditionally 'on' if any of:
+
+ - tune.lua.log.loggers is set to 'off'
+ - the script is executed in a non-proxy context with no global logger
+ - the script is executed in a proxy context with no logger attached
+
+ Please note that, when enabled, this logging is in addition to the logging
+ configured via tune.lua.log.loggers.
+
+ Defaults to 'auto'.
+
+tune.max-checks-per-thread <number>
+ Sets the number of active checks per thread above which a thread will
+ actively try to search a less loaded thread to run the health check, or
+ queue it until the number of active checks running on it diminishes. The
+ default value is zero, meaning no such limit is set. It may be needed in
+ certain environments running an extremely large number of expensive checks
+ with many threads when the load appears unequal and may make health checks
+ to randomly time out on startup, typically when using OpenSSL 3.0 which is
+ about 20 times more CPU-intensive on health checks than older ones. This will
+ have for result to try to level the health check work across all threads. The
+ vast majority of configurations do not need to touch this parameter. Please
+ note that too low values may significantly slow down the health checking if
+ checks are slow to execute.
+
+tune.maxaccept <number>
+ Sets the maximum number of consecutive connections a process may accept in a
+ row before switching to other work. In single process mode, higher numbers
+ used to give better performance at high connection rates, though this is not
+ the case anymore with the multi-queue. This value applies individually to
+ each listener, so that the number of processes a listener is bound to is
+ taken into account. This value defaults to 4 which showed best results. If a
+ significantly higher value was inherited from an ancient config, it might be
+ worth removing it as it will both increase performance and lower response
+ time. In multi-process mode, it is divided by twice the number of processes
+ the listener is bound to. Setting this value to -1 completely disables the
+ limitation. It should normally not be needed to tweak this value.
+
+tune.maxpollevents <number>
+ Sets the maximum amount of events that can be processed at once in a call to
+ the polling system. The default value is adapted to the operating system. It
+ has been noticed that reducing it below 200 tends to slightly decrease
+ latency at the expense of network bandwidth, and increasing it above 200
+ tends to trade latency for slightly increased bandwidth.
+
+tune.maxrewrite <number>
+ Sets the reserved buffer space to this size in bytes. The reserved space is
+ used for header rewriting or appending. The first reads on sockets will never
+ fill more than bufsize-maxrewrite. Historically it has defaulted to half of
+ bufsize, though that does not make much sense since there are rarely large
+ numbers of headers to add. Setting it too high prevents processing of large
+ requests or responses. Setting it too low prevents addition of new headers
+ to already large requests or to POST requests. It is generally wise to set it
+ to about 1024. It is automatically readjusted to half of bufsize if it is
+ larger than that. This means you don't have to worry about it when changing
+ bufsize.
+
+tune.memory.hot-size <number>
+ Sets the per-thread amount of memory that will be kept hot in the local cache
+ and will never be recoverable by other threads. Access to this memory is very
+ fast (lockless), and having enough is critical to maintain a good performance
+ level under extreme thread contention. The value is expressed in bytes, and
+ the default value is configured at build time via CONFIG_HAP_POOL_CACHE_SIZE
+ which defaults to 524288 (512 kB). A larger value may increase performance in
+ some usage scenarios, especially when performance profiles show that memory
+ allocation is stressed a lot. Experience shows that a good value sits between
+ once to twice the per CPU core L2 cache size. Too large values will have a
+ negative impact on performance by making inefficient use of the L3 caches in
+ the CPUs, and will consume larger amounts of memory. It is recommended not to
+ change this value, or to proceed in small increments. In order to completely
+ disable the per-thread CPU caches, using a very small value could work, but
+ it is better to use "-dMno-cache" on the command-line.
+
+tune.pattern.cache-size <number>
+ Sets the size of the pattern lookup cache to <number> entries. This is an LRU
+ cache which reminds previous lookups and their results. It is used by ACLs
+ and maps on slow pattern lookups, namely the ones using the "sub", "reg",
+ "dir", "dom", "end", "bin" match methods as well as the case-insensitive
+ strings. It applies to pattern expressions which means that it will be able
+ to memorize the result of a lookup among all the patterns specified on a
+ configuration line (including all those loaded from files). It automatically
+ invalidates entries which are updated using HTTP actions or on the CLI. The
+ default cache size is set to 10000 entries, which limits its footprint to
+ about 5 MB per process/thread on 32-bit systems and 8 MB per process/thread
+ on 64-bit systems, as caches are thread/process local. There is a very low
+ risk of collision in this cache, which is in the order of the size of the
+ cache divided by 2^64. Typically, at 10000 requests per second with the
+ default cache size of 10000 entries, there's 1% chance that a brute force
+ attack could cause a single collision after 60 years, or 0.1% after 6 years.
+ This is considered much lower than the risk of a memory corruption caused by
+ aging components. If this is not acceptable, the cache can be disabled by
+ setting this parameter to 0.
+
+tune.peers.max-updates-at-once <number>
+ Sets the maximum number of stick-table updates that haproxy will try to
+ process at once when sending messages. Retrieving the data for these updates
+ requires some locking operations which can be CPU intensive on highly
+ threaded machines if unbound, and may also increase the traffic latency
+ during the initial batched transfer between an older and a newer process.
+ Conversely low values may also incur higher CPU overhead, and take longer
+ to complete. The default value is 200 and it is suggested not to change it.
+
+tune.pipesize <number>
+ Sets the kernel pipe buffer size to this size (in bytes). By default, pipes
+ are the default size for the system. But sometimes when using TCP splicing,
+ it can improve performance to increase pipe sizes, especially if it is
+ suspected that pipes are not filled and that many calls to splice() are
+ performed. This has an impact on the kernel's memory footprint, so this must
+ not be changed if impacts are not understood.
+
+tune.pool-high-fd-ratio <number>
+ This setting sets the max number of file descriptors (in percentage) used by
+ HAProxy globally against the maximum number of file descriptors HAProxy can
+ use before we start killing idle connections when we can't reuse a connection
+ and we have to create a new one. The default is 25 (one quarter of the file
+ descriptor will mean that roughly half of the maximum front connections can
+ keep an idle connection behind, anything beyond this probably doesn't make
+ much sense in the general case when targeting connection reuse).
+
+tune.pool-low-fd-ratio <number>
+ This setting sets the max number of file descriptors (in percentage) used by
+ HAProxy globally against the maximum number of file descriptors HAProxy can
+ use before we stop putting connection into the idle pool for reuse. The
+ default is 20.
+
+tune.pt.zero-copy-forwarding { on | off }
+ Enables ('on') of disabled ('off') the zero-copy forwarding of data for the
+ pass-through multiplexer. To be used, the kernel splicing must also be
+ configured. It is enabled by default.
+
+ See also: tune.disable-zero-copy-forwarding, option splice-auto,
+ option splice-request and option splice-response
+
+tune.quic.frontend.conn-tx-buffers.limit <number>
+ This settings defines the maximum number of buffers allocated for a QUIC
+ connection on data emission. By default, it is set to 30. QUIC buffers are
+ drained on ACK reception. This setting has a direct impact on the throughput
+ and memory consumption and can be adjusted according to an estimated round
+ time-trip. Each buffer is tune.bufsize.
+
+tune.quic.frontend.max-idle-timeout <timeout>
+ Sets the QUIC max_idle_timeout transport parameters in milliseconds for
+ frontends which determines the period of time after which a connection silently
+ closes if it has remained inactive during an effective period of time deduced
+ from the two max_idle_timeout values announced by the two endpoints:
+ - the minimum of the two values if both are not null,
+ - the maximum if only one of them is not null,
+ - if both values are null, this feature is disabled.
+
+ The default value is 30000.
+
+tune.quic.frontend.max-streams-bidi <number>
+ Sets the QUIC initial_max_streams_bidi transport parameter for frontends.
+ This is the initial maximum number of bidirectional streams the remote peer
+ will be authorized to open. This determines the number of concurrent client
+ requests.
+
+ The default value is 100.
+
+tune.quic.max-frame-loss <number>
+ Sets the limit for which a single QUIC frame can be marked as lost. If
+ exceeded, the connection is considered as failing and is closed immediately.
+
+ The default value is 10.
+
+tune.quic.reorder-ratio <0..100, in percent>
+ The ratio applied to the packet reordering threshold calculated. It may
+ trigger a high packet loss detection when too small.
+
+ The default value is 50.
+
+tune.quic.retry-threshold <number>
+ Dynamically enables the Retry feature for all the configured QUIC listeners
+ as soon as this number of half open connections is reached. A half open
+ connection is a connection whose handshake has not already successfully
+ completed or failed. To be functional this setting needs a cluster secret to
+ be set, if not it will be silently ignored (see "cluster-secret" setting).
+ This setting will be also silently ignored if the use of QUIC Retry was
+ forced (see "quic-force-retry").
+
+ The default value is 100.
+
+ See https://www.rfc-editor.org/rfc/rfc9000.html#section-8.1.2 for more
+ information about QUIC retry.
+
+tune.quic.socket-owner { connection | listener }
+ Specifies globally how QUIC connections will use socket for receive/send
+ operations. Connections can share listener socket or each connection can
+ allocate its own socket.
+
+ When default "connection" value is set, a dedicated socket will be allocated
+ by every QUIC connections. This option is the preferred one to achieve the
+ best performance with a large QUIC traffic. This is also the only way to
+ ensure soft-stop is conducted properly without data loss for QUIC connections
+ and cases of transient errors during sendto() operation are handled
+ efficiently. However, this relies on some advanced features from the UDP
+ network stack. If your platform is deemed not compatible, haproxy will
+ automatically switch to "listener" mode on startup. Please note that QUIC
+ listeners running on privileged ports may require to run as uid 0, or some
+ OS-specific tuning to permit the target uid to bind such ports, such as
+ system capabilities. See also the "setcap" global directive.
+
+ The "listener" value indicates that QUIC transfers will occur on the shared
+ listener socket. This option can be a good compromise for small traffic as it
+ allows to reduce FD consumption. However, performance won't be optimal due to
+ a higher CPU usage if listeners are shared across a lot of threads or a
+ large number of QUIC connections can be used simultaneously.
+
+ This setting is applied in conjunction with each "quic-socket" bind options.
+ If "connection" mode is used on global tuning, it will be activated for each
+ listener, unless its bind option is set to "listener". However, if "listener"
+ is used globally, it will be forced on every listener instance, regardless of
+ their individual configuration.
+
+tune.quic.zero-copy-fwd-send { on | off }
+ Enables ('on') of disabled ('off') the zero-copy sends of data for the QUIC
+ multiplexer. It is disabled by default.
+
+ See also: tune.disable-zero-copy-forwarding
+
+tune.rcvbuf.backend <number>
+tune.rcvbuf.frontend <number>
+ For the kernel socket receive buffer size on non-connected sockets to this
+ size. This can be used QUIC in listener mode and log-forward on the frontend.
+ The default system buffers might sometimes be too small for sockets receiving
+ lots of aggregated traffic, causing some losses and possibly retransmits (in
+ case of QUIC), possibly slowing down connection establishment under heavy
+ traffic. The value is expressed in bytes, applied to each socket. In listener
+ mode, sockets are shared between all connections, and the total number of
+ sockets depends on the "shards" value of the "bind" line. There's no good
+ value, a good one corresponds to an expected size per connection multiplied
+ by the expected number of connections. The kernel may trim large values. See
+ also "tune.rcvbuf.client" and "tune.rcvbuf.server" for their connected socket
+ counter parts, as well as "tune.sndbuf.backend" and "tune.sndbuf.frontend"
+ for the send setting.
+
+tune.rcvbuf.client <number>
+tune.rcvbuf.server <number>
+ Forces the kernel socket receive buffer size on the client or the server side
+ to the specified value in bytes. This value applies to all TCP/HTTP frontends
+ and backends. It should normally never be set, and the default size (0) lets
+ the kernel auto-tune this value depending on the amount of available memory.
+ However it can sometimes help to set it to very low values (e.g. 4096) in
+ order to save kernel memory by preventing it from buffering too large amounts
+ of received data. Lower values will significantly increase CPU usage though.
+
+tune.recv_enough <number>
+ HAProxy uses some hints to detect that a short read indicates the end of the
+ socket buffers. One of them is that a read returns more than <recv_enough>
+ bytes, which defaults to 10136 (7 segments of 1448 each). This default value
+ may be changed by this setting to better deal with workloads involving lots
+ of short messages such as telnet or SSH sessions.
+
+tune.runqueue-depth <number>
+ Sets the maximum amount of task that can be processed at once when running
+ tasks. The default value depends on the number of threads but sits between 35
+ and 280, which tend to show the highest request rates and lowest latencies.
+ Increasing it may incur latency when dealing with I/Os, making it too small
+ can incur extra overhead. Higher thread counts benefit from lower values.
+ When experimenting with much larger values, it may be useful to also enable
+ tune.sched.low-latency and possibly tune.fd.edge-triggered to limit the
+ maximum latency to the lowest possible.
+
+tune.sched.low-latency { on | off }
+ Enables ('on') or disables ('off') the low-latency task scheduler. By default
+ HAProxy processes tasks from several classes one class at a time as this is
+ the most efficient. But when running with large values of tune.runqueue-depth
+ this can have a measurable effect on request or connection latency. When this
+ low-latency setting is enabled, tasks of lower priority classes will always
+ be executed before other ones if they exist. This will permit to lower the
+ maximum latency experienced by new requests or connections in the middle of
+ massive traffic, at the expense of a higher impact on this large traffic.
+ For regular usage it is better to leave this off. The default value is off.
+
+tune.sndbuf.backend <number>
+tune.sndbuf.frontend <number>
+ For the kernel socket send buffer size on non-connected sockets to this size.
+ This can be used for UNIX socket and UDP logging on the backend side, and for
+ QUIC in listener mode on the frontend. The default system buffers might
+ sometimes be too small for sockets shared between many connections (or log
+ senders), causing some losses and possibly retransmits, slowing down new
+ connection establishment under high traffic. The value is expressed in bytes,
+ applied to each socket. In listener mode, sockets are shared between all
+ connections, and the total number of sockets depends on the "shards" value of
+ the "bind" line. There's no good value, a good one corresponds to an expected
+ size per connection multiplied by the expected number of connections. The
+ kernel may trim large values. See also "tune.sndbuf.client" and
+ "tune.sndbuf.server" for their connected socket counter parts, as well as
+ "tune.rcvbuf.backend" and "tune.rcvbuf.frontend" for the receive setting.
+
+tune.sndbuf.client <number>
+tune.sndbuf.server <number>
+ Forces the kernel socket send buffer size on the client or the server side to
+ the specified value in bytes. This value applies to all TCP/HTTP frontends
+ and backends. It should normally never be set, and the default size (0) lets
+ the kernel auto-tune this value depending on the amount of available memory.
+ However it can sometimes help to set it to very low values (e.g. 4096) in
+ order to save kernel memory by preventing it from buffering too large amounts
+ of received data. Lower values will significantly increase CPU usage though.
+ Another use case is to prevent write timeouts with extremely slow clients due
+ to the kernel waiting for a large part of the buffer to be read before
+ notifying HAProxy again.
+
+tune.ssl.cachesize <number>
+ Sets the size of the global SSL session cache, in a number of blocks. A block
+ is large enough to contain an encoded session without peer certificate. An
+ encoded session with peer certificate is stored in multiple blocks depending
+ on the size of the peer certificate. A block uses approximately 200 bytes of
+ memory (based on `sizeof(struct sh_ssl_sess_hdr) + SHSESS_BLOCK_MIN_SIZE`
+ calculation used for `shctx_init` function). The default value may be forced
+ at build time, otherwise defaults to 20000. When the cache is full, the most
+ idle entries are purged and reassigned. Higher values reduce the occurrence
+ of such a purge, hence the number of CPU-intensive SSL handshakes by ensuring
+ that all users keep their session as long as possible. All entries are
+ pre-allocated upon startup. Setting this value to 0 disables the SSL session
+ cache.
+
+tune.ssl.capture-buffer-size <number>
+tune.ssl.capture-cipherlist-size <number> (deprecated)
+ Sets the maximum size of the buffer used for capturing client hello cipher
+ list, extensions list, elliptic curves list and elliptic curve point
+ formats. If the value is 0 (default value) the capture is disabled,
+ otherwise a buffer is allocated for each SSL/TLS connection.
+
+tune.ssl.default-dh-param <number>
+ Sets the maximum size of the Diffie-Hellman parameters used for generating
+ the ephemeral/temporary Diffie-Hellman key in case of DHE key exchange. The
+ final size will try to match the size of the server's RSA (or DSA) key (e.g,
+ a 2048 bits temporary DH key for a 2048 bits RSA key), but will not exceed
+ this maximum value. Only 1024 or higher values are allowed. Higher values
+ will increase the CPU load, and values greater than 1024 bits are not
+ supported by Java 7 and earlier clients. This value is not used if static
+ Diffie-Hellman parameters are supplied either directly in the certificate
+ file or by using the ssl-dh-param-file parameter.
+ If there is neither a default-dh-param nor a ssl-dh-param-file defined, and
+ if the server's PEM file of a given frontend does not specify its own DH
+ parameters, then DHE ciphers will be unavailable for this frontend.
+
+tune.ssl.force-private-cache
+ This option disables SSL session cache sharing between all processes. It
+ should normally not be used since it will force many renegotiations due to
+ clients hitting a random process. But it may be required on some operating
+ systems where none of the SSL cache synchronization method may be used. In
+ this case, adding a first layer of hash-based load balancing before the SSL
+ layer might limit the impact of the lack of session sharing.
+
+tune.ssl.hard-maxrecord <number>
+ Sets the maximum amount of bytes passed to SSL_write() at any time. Default
+ value 0 means there is no limit. In contrast to tune.ssl.maxrecord this
+ settings will not be adjusted dynamically. Smaller records may decrease
+ throughput, but may be required when dealing with low-footprint clients.
+
+tune.ssl.keylog { on | off }
+ This option activates the logging of the TLS keys. It should be used with
+ care as it will consume more memory per SSL session and could decrease
+ performances. This is disabled by default.
+
+ These sample fetches should be used to generate the SSLKEYLOGFILE that is
+ required to decipher traffic with wireshark.
+
+ https://developer.mozilla.org/en-US/docs/Mozilla/Projects/NSS/Key_Log_Format
+
+ The SSLKEYLOG is a series of lines which are formatted this way:
+
+ <Label> <space> <ClientRandom> <space> <Secret>
+
+ The ClientRandom is provided by the %[ssl_fc_client_random,hex] sample
+ fetch, the secret and the Label could be find in the array below. You need
+ to generate a SSLKEYLOGFILE with all the labels in this array.
+
+ The following sample fetches are hexadecimal strings and does not need to be
+ converted.
+
+ SSLKEYLOGFILE Label | Sample fetches for the Secrets
+ --------------------------------|-----------------------------------------
+ CLIENT_EARLY_TRAFFIC_SECRET | %[ssl_fc_client_early_traffic_secret]
+ CLIENT_HANDSHAKE_TRAFFIC_SECRET | %[ssl_fc_client_handshake_traffic_secret]
+ SERVER_HANDSHAKE_TRAFFIC_SECRET | %[ssl_fc_server_handshake_traffic_secret]
+ CLIENT_TRAFFIC_SECRET_0 | %[ssl_fc_client_traffic_secret_0]
+ SERVER_TRAFFIC_SECRET_0 | %[ssl_fc_server_traffic_secret_0]
+ EXPORTER_SECRET | %[ssl_fc_exporter_secret]
+ EARLY_EXPORTER_SECRET | %[ssl_fc_early_exporter_secret]
+
+ This is only available with OpenSSL 1.1.1, and useful with TLS1.3 session.
+
+ If you want to generate the content of a SSLKEYLOGFILE with TLS < 1.3, you
+ only need this line:
+
+ "CLIENT_RANDOM %[ssl_fc_client_random,hex] %[ssl_fc_session_key,hex]"
+
+tune.ssl.lifetime <timeout>
+ Sets how long a cached SSL session may remain valid. This time is expressed
+ in seconds and defaults to 300 (5 min). It is important to understand that it
+ does not guarantee that sessions will last that long, because if the cache is
+ full, the longest idle sessions will be purged despite their configured
+ lifetime. The real usefulness of this setting is to prevent sessions from
+ being used for too long.
+
+tune.ssl.maxrecord <number>
+ Sets the maximum amount of bytes passed to SSL_write() at the beginning of
+ the data transfer. Default value 0 means there is no limit. Over SSL/TLS,
+ the client can decipher the data only once it has received a full record.
+ With large records, it means that clients might have to download up to 16kB
+ of data before starting to process them. Limiting the value can improve page
+ load times on browsers located over high latency or low bandwidth networks.
+ It is suggested to find optimal values which fit into 1 or 2 TCP segments
+ (generally 1448 bytes over Ethernet with TCP timestamps enabled, or 1460 when
+ timestamps are disabled), keeping in mind that SSL/TLS add some overhead.
+ Typical values of 1419 and 2859 gave good results during tests. Use
+ "strace -e trace=write" to find the best value. HAProxy will automatically
+ switch to this setting after an idle stream has been detected (see
+ tune.idletimer above). See also tune.ssl.hard-maxrecord.
+
+tune.ssl.ssl-ctx-cache-size <number>
+ Sets the size of the cache used to store generated certificates to <number>
+ entries. This is a LRU cache. Because generating a SSL certificate
+ dynamically is expensive, they are cached. The default cache size is set to
+ 1000 entries.
+
+tune.ssl.ocsp-update.maxdelay <number>
+ Sets the maximum interval between two automatic updates of the same OCSP
+ response. This time is expressed in seconds and defaults to 3600 (1 hour). It
+ must be set to a higher value than "tune.ssl.ocsp-update.mindelay". See
+ option "ocsp-update" for more information about the auto update mechanism.
+
+tune.ssl.ocsp-update.mindelay <number>
+ Sets the minimum interval between two automatic updates of the same OCSP
+ response. This time is expressed in seconds and defaults to 300 (5 minutes).
+ It is particularly useful for OCSP response that do not have explicit
+ expiration times. It must be set to a lower value than
+ "tune.ssl.ocsp-update.maxdelay". See option "ocsp-update" for more
+ information about the auto update mechanism.
+
+tune.stick-counters <number>
+ Sets the number of stick-counters that may be tracked at the same time by a
+ connection or a request via "track-sc*" actions in "tcp-request" or
+ "http-request" rules. The default value is set at build time by the macro
+ MAX_SESS_STK_CTR, and defaults to 3. With this setting it is possible to
+ change the value and ignore the one passed at build time. Increasing this
+ value may be needed when porting complex configurations to haproxy, but users
+ are warned against the costs: each entry takes 16 bytes per connection and
+ 16 bytes per request, all of which need to be allocated and zeroed for all
+ requests even when not used. As such a value of 10 will inflate the memory
+ consumption per request by 320 bytes and will cause this memory to be erased
+ for each request, which does have measurable CPU impacts. Conversely, when
+ no "track-sc" rules are used, the value may be lowered (0 being valid to
+ entirely disable stick-counters).
+
+tune.vars.global-max-size <size>
+tune.vars.proc-max-size <size>
+tune.vars.reqres-max-size <size>
+tune.vars.sess-max-size <size>
+tune.vars.txn-max-size <size>
+ These five tunes help to manage the maximum amount of memory used by the
+ variables system. "global" limits the overall amount of memory available for
+ all scopes. "proc" limits the memory for the process scope, "sess" limits the
+ memory for the session scope, "txn" for the transaction scope, and "reqres"
+ limits the memory for each request or response processing.
+ Memory accounting is hierarchical, meaning more coarse grained limits include
+ the finer grained ones: "proc" includes "sess", "sess" includes "txn", and
+ "txn" includes "reqres".
+
+ For example, when "tune.vars.sess-max-size" is limited to 100,
+ "tune.vars.txn-max-size" and "tune.vars.reqres-max-size" cannot exceed
+ 100 either. If we create a variable "txn.var" that contains 100 bytes,
+ all available space is consumed.
+ Notice that exceeding the limits at runtime will not result in an error
+ message, but values might be cut off or corrupted. So make sure to accurately
+ plan for the amount of space needed to store all your variables.
+
+tune.zlib.memlevel <number>
+ Sets the memLevel parameter in zlib initialization for each stream. It
+ defines how much memory should be allocated for the internal compression
+ state. A value of 1 uses minimum memory but is slow and reduces compression
+ ratio, a value of 9 uses maximum memory for optimal speed. Can be a value
+ between 1 and 9. The default value is 8.
+
+tune.zlib.windowsize <number>
+ Sets the window size (the size of the history buffer) as a parameter of the
+ zlib initialization for each stream. Larger values of this parameter result
+ in better compression at the expense of memory usage. Can be a value between
+ 8 and 15. The default value is 15.
+
+3.3. Debugging
+--------------
+
+anonkey <key>
+ This sets the global anonymizing key to <key>, which must be a 32-bit number
+ between 0 and 4294967295. This is the key that will be used by default by CLI
+ commands when anonymized mode is enabled. This key may also be set at runtime
+ from the CLI command "set anon global-key". See also command line argument
+ "-dC" in the management manual.
+
+quick-exit
+ This speeds up the old process exit upon reload by skipping the releasing of
+ memory objects and listeners, since all of these are reclaimed by the
+ operating system at the process' death. The gains are only marginal (in the
+ order of a few hundred milliseconds for huge configurations at most). The
+ main target usage in fact is when a bug is spotted in the deinit() code, as
+ this allows to bypass it. It is better not to use this unless instructed to
+ do so by developers.
+
+quiet
+ Do not display any message during startup. It is equivalent to the command-
+ line argument "-q".
+
+zero-warning
+ When this option is set, HAProxy will refuse to start if any warning was
+ emitted while processing the configuration. It is highly recommended to set
+ this option on configurations that are not changed often, as it helps detect
+ subtle mistakes and keep the configuration clean and forward-compatible. Note
+ that "haproxy -c" will also report errors in such a case. This option is
+ equivalent to command line argument "-dW".
+
+
+3.4. Userlists
+--------------
+It is possible to control access to frontend/backend/listen sections or to
+http stats by allowing only authenticated and authorized users. To do this,
+it is required to create at least one userlist and to define users.
+
+userlist <listname>
+ Creates new userlist with name <listname>. Many independent userlists can be
+ used to store authentication & authorization data for independent customers.
+
+group <groupname> [users <user>,<user>,(...)]
+ Adds group <groupname> to the current userlist. It is also possible to
+ attach users to this group by using a comma separated list of names
+ proceeded by "users" keyword.
+
+user <username> [password|insecure-password <password>]
+ [groups <group>,<group>,(...)]
+ Adds user <username> to the current userlist. Both secure (encrypted) and
+ insecure (unencrypted) passwords can be used. Encrypted passwords are
+ evaluated using the crypt(3) function, so depending on the system's
+ capabilities, different algorithms are supported. For example, modern Glibc
+ based Linux systems support MD5, SHA-256, SHA-512, and, of course, the
+ classic DES-based method of encrypting passwords.
+
+ Attention: Be aware that using encrypted passwords might cause significantly
+ increased CPU usage, depending on the number of requests, and the algorithm
+ used. For any of the hashed variants, the password for each request must
+ be processed through the chosen algorithm, before it can be compared to the
+ value specified in the config file. Most current algorithms are deliberately
+ designed to be expensive to compute to achieve resistance against brute
+ force attacks. They do not simply salt/hash the clear text password once,
+ but thousands of times. This can quickly become a major factor in HAProxy's
+ overall CPU consumption!
+
+ Example:
+ userlist L1
+ group G1 users tiger,scott
+ group G2 users xdb,scott
+
+ user tiger password $6$k6y3o.eP$JlKBx9za9667qe4(...)xHSwRv6J.C0/D7cV91
+ user scott insecure-password elgato
+ user xdb insecure-password hello
+
+ userlist L2
+ group G1
+ group G2
+
+ user tiger password $6$k6y3o.eP$JlKBx(...)xHSwRv6J.C0/D7cV91 groups G1
+ user scott insecure-password elgato groups G1,G2
+ user xdb insecure-password hello groups G2
+
+ Please note that both lists are functionally identical.
+
+
+3.5. Peers
+----------
+It is possible to propagate entries of any data-types in stick-tables between
+several HAProxy instances over TCP connections in a multi-master fashion. Each
+instance pushes its local updates and insertions to remote peers. The pushed
+values overwrite remote ones without aggregation. As an exception, the data
+type "conn_cur" is never learned from peers, as it is supposed to reflect local
+values. Earlier versions used to synchronize it and to cause negative values in
+active-active setups, and always-growing values upon reloads or active-passive
+switches because the local value would reflect more connections than locally
+present. This information, however, is pushed so that monitoring systems can
+watch it.
+
+Interrupted exchanges are automatically detected and recovered from the last
+known point. In addition, during a soft restart, the old process connects to
+the new one using such a TCP connection to push all its entries before the new
+process tries to connect to other peers. That ensures very fast replication
+during a reload, it typically takes a fraction of a second even for large
+tables.
+
+Note that Server IDs are used to identify servers remotely, so it is important
+that configurations look similar or at least that the same IDs are forced on
+each server on all participants.
+
+peers <peersect>
+ Creates a new peer list with name <peersect>. It is an independent section,
+ which is referenced by one or more stick-tables.
+
+bind [<address>]:port [param*]
+bind /<path> [param*]
+ Defines the binding parameters of the local peer of this "peers" section.
+ Such lines are not supported with "peer" line in the same "peers" section.
+
+disabled
+ Disables a peers section. It disables both listening and any synchronization
+ related to this section. This is provided to disable synchronization of stick
+ tables without having to comment out all "peers" references.
+
+default-bind [param*]
+ Defines the binding parameters for the local peer, excepted its address.
+
+default-server [param*]
+ Change default options for a server in a "peers" section.
+
+ Arguments:
+ <param*> is a list of parameters for this server. The "default-server"
+ keyword accepts an important number of options and has a complete
+ section dedicated to it. In a peers section, the transport
+ parameters of a "default-server" line are supported. Please refer
+ to section 5 for more details, and the "server" keyword below in
+ this section for some of the restrictions.
+
+ See also: "server" and section 5 about server options
+
+enabled
+ This re-enables a peers section which was previously disabled via the
+ "disabled" keyword.
+
+log <target> [len <length>] [format <format>] [sample <ranges>:<sample_size>]
+ <facility> [<level> [<minlevel>]]
+ "peers" sections support the same "log" keyword as for the proxies to
+ log information about the "peers" listener. See "log" option for proxies for
+ more details.
+
+peer <peername> [<address>]:port [param*]
+peer <peername> /<path> [param*]
+ Defines a peer inside a peers section.
+ If <peername> is set to the local peer name (by default hostname, or forced
+ using "-L" command line option or "localpeer" global configuration setting),
+ HAProxy will listen for incoming remote peer connection on the provided
+ address. Otherwise, the address defines where to connect to in order to join
+ the remote peer, and <peername> is used at the protocol level to identify and
+ validate the remote peer on the server side.
+
+ During a soft restart, local peer address is used by the old instance to
+ connect the new one and initiate a complete replication (teaching process).
+
+ It is strongly recommended to have the exact same peers declaration on all
+ peers and to only rely on the "-L" command line argument or the "localpeer"
+ global configuration setting to change the local peer name. This makes it
+ easier to maintain coherent configuration files across all peers.
+
+ You may want to reference some environment variables in the address
+ parameter, see section 2.3 about environment variables.
+
+ Note: "peer" keyword may transparently be replaced by "server" keyword (see
+ "server" keyword explanation below).
+
+server <peername> [<address>:<port>] [param*]
+server <peername> [/<path>] [param*]
+ As previously mentioned, "peer" keyword may be replaced by "server" keyword
+ with a support for all "server" parameters found in 5.2 paragraph that are
+ related to transport settings. If the underlying peer is local, the address
+ parameter must not be present; it must be provided on a "bind" line (see
+ "bind" keyword of this "peers" section).
+
+ A number of "server" parameters are irrelevant for "peers" sections. Peers by
+ nature do not support dynamic host name resolution nor health checks, hence
+ parameters like "init_addr", "resolvers", "check", "agent-check", or "track"
+ are not supported. Similarly, there is no load balancing nor stickiness, thus
+ parameters such as "weight" or "cookie" have no effect.
+
+ Example:
+ # The old way.
+ peers mypeers
+ peer haproxy1 192.168.0.1:1024
+ peer haproxy2 192.168.0.2:1024
+ peer haproxy3 10.2.0.1:1024
+
+ backend mybackend
+ mode tcp
+ balance roundrobin
+ stick-table type ip size 20k peers mypeers
+ stick on src
+
+ server srv1 192.168.0.30:80
+ server srv2 192.168.0.31:80
+
+ Example:
+ peers mypeers
+ bind 192.168.0.1:1024 ssl crt mycerts/pem
+ default-server ssl verify none
+ server haproxy1 #local peer
+ server haproxy2 192.168.0.2:1024
+ server haproxy3 10.2.0.1:1024
+
+shards <shards>
+
+ In some configurations, one would like to distribute the stick-table contents
+ to some peers in place of sending all the stick-table contents to each peer
+ declared in the "peers" section. In such cases, "shards" specifies the
+ number of peer involved in this stick-table contents distribution.
+ See also "shard" server parameter.
+
+table <tablename> type {ip | integer | string [len <length>] | binary [len <length>]}
+ size <size> [expire <expire>] [write-to <wtable>] [nopurge] [store <data_type>]*
+
+ Configure a stickiness table for the current section. This line is parsed
+ exactly the same way as the "stick-table" keyword in others section, except
+ for the "peers" argument which is not required here and with an additional
+ mandatory first parameter to designate the stick-table. Contrary to others
+ sections, there may be several "table" lines in "peers" sections (see also
+ "stick-table" keyword).
+
+ Also be aware of the fact that "peers" sections have their own stick-table
+ namespaces to avoid collisions between stick-table names identical in
+ different "peers" section. This is internally handled prepending the "peers"
+ sections names to the name of the stick-tables followed by a '/' character.
+ If somewhere else in the configuration file you have to refer to such
+ stick-tables declared in "peers" sections you must use the prefixed version
+ of the stick-table name as follows:
+
+ peers mypeers
+ peer A ...
+ peer B ...
+ table t1 ...
+
+ frontend fe1
+ tcp-request content track-sc0 src table mypeers/t1
+
+ This is also this prefixed version of the stick-table names which must be
+ used to refer to stick-tables through the CLI.
+
+ About "peers" protocol, as only "peers" belonging to the same section may
+ communicate with each others, there is no need to do such a distinction.
+ Several "peers" sections may declare stick-tables with the same name.
+ This is shorter version of the stick-table name which is sent over the network.
+ There is only a '/' character as prefix to avoid stick-table name collisions between
+ stick-tables declared as backends and stick-table declared in "peers" sections
+ as follows in this weird but supported configuration:
+
+ peers mypeers
+ peer A ...
+ peer B ...
+ table t1 type string size 10m store gpc0
+
+ backend t1
+ stick-table type string size 10m store gpc0 peers mypeers
+
+ Here "t1" table declared in "mypeers" section has "mypeers/t1" as global name.
+ "t1" table declared as a backend as "t1" as global name. But at peer protocol
+ level the former table is named "/t1", the latter is again named "t1".
+
+3.6. Mailers
+------------
+It is possible to send email alerts when the state of servers changes.
+If configured email alerts are sent to each mailer that is configured
+in a mailers section. Email is sent to mailers using SMTP.
+
+mailers <mailersect>
+ Creates a new mailer list with the name <mailersect>. It is an
+ independent section which is referenced by one or more proxies.
+
+mailer <mailername> <ip>:<port>
+ Defines a mailer inside a mailers section.
+
+ Example:
+ mailers mymailers
+ mailer smtp1 192.168.0.1:587
+ mailer smtp2 192.168.0.2:587
+
+ backend mybackend
+ mode tcp
+ balance roundrobin
+
+ email-alert mailers mymailers
+ email-alert from test1@horms.org
+ email-alert to test2@horms.org
+
+ server srv1 192.168.0.30:80
+ server srv2 192.168.0.31:80
+
+timeout mail <time>
+ Defines the time available for a mail/connection to be made and send to
+ the mail-server. If not defined the default value is 10 seconds. To allow
+ for at least two SYN-ACK packets to be send during initial TCP handshake it
+ is advised to keep this value above 4 seconds.
+
+ Example:
+ mailers mymailers
+ timeout mail 20s
+ mailer smtp1 192.168.0.1:587
+
+3.7. Programs
+-------------
+In master-worker mode, it is possible to launch external binaries with the
+master, these processes are called programs. These programs are launched and
+managed the same way as the workers.
+
+During a reload of HAProxy, those processes are dealing with the same
+sequence as a worker:
+
+ - the master is re-executed
+ - the master sends a SIGUSR1 signal to the program
+ - if "option start-on-reload" is not disabled, the master launches a new
+ instance of the program
+
+During a stop, or restart, a SIGTERM is sent to the programs.
+
+program <name>
+ This is a new program section, this section will create an instance <name>
+ which is visible in "show proc" on the master CLI. (See "9.4. Master CLI" in
+ the management guide).
+
+command <command> [arguments*]
+ Define the command to start with optional arguments. The command is looked
+ up in the current PATH if it does not include an absolute path. This is a
+ mandatory option of the program section. Arguments containing spaces must
+ be enclosed in quotes or double quotes or be prefixed by a backslash.
+
+user <user name>
+ Changes the executed command user ID to the <user name> from /etc/passwd.
+ See also "group".
+
+group <group name>
+ Changes the executed command group ID to the <group name> from /etc/group.
+ See also "user".
+
+option start-on-reload
+no option start-on-reload
+ Start (or not) a new instance of the program upon a reload of the master.
+ The default is to start a new instance. This option may only be used in a
+ program section.
+
+
+3.8. HTTP-errors
+----------------
+
+It is possible to globally declare several groups of HTTP errors, to be
+imported afterwards in any proxy section. Same group may be referenced at
+several places and can be fully or partially imported.
+
+http-errors <name>
+ Create a new http-errors group with the name <name>. It is an independent
+ section that may be referenced by one or more proxies using its name.
+
+errorfile <code> <file>
+ Associate a file contents to an HTTP error code
+
+ Arguments :
+ <code> is the HTTP status code. Currently, HAProxy is capable of
+ generating codes 200, 400, 401, 403, 404, 405, 407, 408, 410,
+ 425, 429, 500, 501, 502, 503, and 504.
+
+ <file> designates a file containing the full HTTP response. It is
+ recommended to follow the common practice of appending ".http" to
+ the filename so that people do not confuse the response with HTML
+ error pages, and to use absolute paths, since files are read
+ before any chroot is performed.
+
+ Please referrers to "errorfile" keyword in section 4 for details.
+
+ Example:
+ http-errors website-1
+ errorfile 400 /etc/haproxy/errorfiles/site1/400.http
+ errorfile 404 /etc/haproxy/errorfiles/site1/404.http
+ errorfile 408 /dev/null # work around Chrome pre-connect bug
+
+ http-errors website-2
+ errorfile 400 /etc/haproxy/errorfiles/site2/400.http
+ errorfile 404 /etc/haproxy/errorfiles/site2/404.http
+ errorfile 408 /dev/null # work around Chrome pre-connect bug
+
+3.9. Rings
+----------
+
+It is possible to globally declare ring-buffers, to be used as target for log
+servers or traces.
+
+ring <ringname>
+ Creates a new ring-buffer with name <ringname>.
+
+backing-file <path>
+ This replaces the regular memory allocation by a RAM-mapped file to store the
+ ring. This can be useful for collecting traces or logs for post-mortem
+ analysis, without having to attach a slow client to the CLI. Newer contents
+ will automatically replace older ones so that the latest contents are always
+ available. The contents written to the ring will be visible in that file once
+ the process stops (most often they will even be seen very soon after but
+ there is no such guarantee since writes are not synchronous).
+
+ When this option is used, the total storage area is reduced by the size of
+ the "struct ring" that starts at the beginning of the area, and that is
+ required to recover the area's contents. The file will be created with the
+ starting user's ownership, with mode 0600 and will be of the size configured
+ by the "size" directive. When the directive is parsed (thus even during
+ config checks), any existing non-empty file will first be renamed with the
+ extra suffix ".bak", and any previously existing file with suffix ".bak" will
+ be removed. This ensures that instant reload or restart of the process will
+ not wipe precious debugging information, and will leave time for an admin to
+ spot this new ".bak" file and to archive it if needed. As such, after a crash
+ the file designated by <path> will contain the freshest information, and if
+ the service is restarted, the "<path>.bak" file will have it instead. This
+ means that the total storage capacity required will be double of the ring
+ size. Failures to rotate the file are silently ignored, so placing the file
+ into a directory without write permissions will be sufficient to avoid the
+ backup file if not desired.
+
+ WARNING: there are stability and security implications in using this feature.
+ First, backing the ring to a slow device (e.g. physical hard drive) may cause
+ perceptible slowdowns during accesses, and possibly even panics if too many
+ threads compete for accesses. Second, an external process modifying the area
+ could cause the haproxy process to crash or to overwrite some of its own
+ memory with traces. Third, if the file system fills up before the ring,
+ writes to the ring may cause the process to crash.
+
+ The information present in this ring are structured and are NOT directly
+ readable using a text editor (even though most of it looks barely readable).
+ The output of this file is only intended for developers.
+
+description <text>
+ The description is an optional description string of the ring. It will
+ appear on CLI. By default, <name> is reused to fill this field.
+
+format <format>
+ Format used to store events into the ring buffer.
+
+ Arguments:
+ <format> is the log format used when generating syslog messages. It may be
+ one of the following :
+
+ iso A message containing only the ISO date, followed by the text.
+ The PID, process name and system name are omitted. This is
+ designed to be used with a local log server.
+
+ local Analog to rfc3164 syslog message format except that hostname
+ field is stripped. This is the default.
+ Note: option "log-send-hostname" switches the default to
+ rfc3164.
+
+ raw A message containing only the text. The level, PID, date, time,
+ process name and system name are omitted. This is designed to be
+ used in containers or during development, where the severity
+ only depends on the file descriptor used (stdout/stderr). This
+ is the default.
+
+ rfc3164 The RFC3164 syslog message format.
+ (https://tools.ietf.org/html/rfc3164)
+
+ rfc5424 The RFC5424 syslog message format.
+ (https://tools.ietf.org/html/rfc5424)
+
+ short A message containing only a level between angle brackets such as
+ '<3>', followed by the text. The PID, date, time, process name
+ and system name are omitted. This is designed to be used with a
+ local log server. This format is compatible with what the systemd
+ logger consumes.
+
+ priority A message containing only a level plus syslog facility between angle
+ brackets such as '<63>', followed by the text. The PID, date, time,
+ process name and system name are omitted. This is designed to be used
+ with a local log server.
+
+ timed A message containing only a level between angle brackets such as
+ '<3>', followed by ISO date and by the text. The PID, process
+ name and system name are omitted. This is designed to be
+ used with a local log server.
+
+maxlen <length>
+ The maximum length of an event message stored into the ring,
+ including formatted header. If an event message is longer than
+ <length>, it will be truncated to this length.
+
+server <name> <address> [param*]
+ Used to configure a syslog tcp server to forward messages from ring buffer.
+ This supports for all "server" parameters found in 5.2 paragraph. Some of
+ these parameters are irrelevant for "ring" sections. Important point: there
+ is little reason to add more than one server to a ring, because all servers
+ will receive the exact same copy of the ring contents, and as such the ring
+ will progress at the speed of the slowest server. If one server does not
+ respond, it will prevent old messages from being purged and may block new
+ messages from being inserted into the ring. The proper way to send messages
+ to multiple servers is to use one distinct ring per log server, not to
+ attach multiple servers to the same ring. Note that specific server directive
+ "log-proto" is used to set the protocol used to send messages.
+
+size <size>
+ This is the optional size in bytes for the ring-buffer. Default value is
+ set to BUFSIZE.
+
+timeout connect <timeout>
+ Set the maximum time to wait for a connection attempt to a server to succeed.
+
+ Arguments :
+ <timeout> is the timeout value specified in milliseconds by default, but
+ can be in any other unit if the number is suffixed by the unit,
+ as explained at the top of this document.
+
+timeout server <timeout>
+ Set the maximum time for pending data staying into output buffer.
+
+ Arguments :
+ <timeout> is the timeout value specified in milliseconds by default, but
+ can be in any other unit if the number is suffixed by the unit,
+ as explained at the top of this document.
+
+ Example:
+ global
+ log ring@myring local7
+
+ ring myring
+ description "My local buffer"
+ format rfc3164
+ maxlen 1200
+ size 32764
+ timeout connect 5s
+ timeout server 10s
+ server mysyslogsrv 127.0.0.1:6514 log-proto octet-count
+
+3.10. Log forwarding
+-------------------
+
+It is possible to declare one or multiple log forwarding section,
+HAProxy will forward all received log messages to a log servers list.
+
+log-forward <name>
+ Creates a new log forwarder proxy identified as <name>.
+
+backlog <conns>
+ Give hints to the system about the approximate listen backlog desired size
+ on connections accept.
+
+bind <addr> [param*]
+ Used to configure a stream log listener to receive messages to forward.
+ This supports the "bind" parameters found in 5.1 paragraph including
+ those about ssl but some statements such as "alpn" may be irrelevant for
+ syslog protocol over TCP.
+ Those listeners support both "Octet Counting" and "Non-Transparent-Framing"
+ modes as defined in rfc-6587.
+
+dgram-bind <addr> [param*]
+ Used to configure a datagram log listener to receive messages to forward.
+ Addresses must be in IPv4 or IPv6 form,followed by a port. This supports
+ for some of the "bind" parameters found in 5.1 paragraph among which
+ "interface", "namespace" or "transparent", the other ones being
+ silently ignored as irrelevant for UDP/syslog case.
+
+log global
+log <target> [len <length>] [format <format>] [sample <ranges>:<sample_size>]
+ <facility> [<level> [<minlevel>]]
+ Used to configure target log servers. See more details on proxies
+ documentation.
+ If no format specified, HAProxy tries to keep the incoming log format.
+ Configured facility is ignored, except if incoming message does not
+ present a facility but one is mandatory on the outgoing format.
+ If there is no timestamp available in the input format, but the field
+ exists in output format, HAProxy will use the local date.
+
+ Example:
+ global
+ log stderr format iso local7
+
+ ring myring
+ description "My local buffer"
+ format rfc5424
+ maxlen 1200
+ size 32764
+ timeout connect 5s
+ timeout server 10s
+ # syslog tcp server
+ server mysyslogsrv 127.0.0.1:514 log-proto octet-count
+
+ log-forward sylog-loadb
+ dgram-bind 127.0.0.1:1514
+ bind 127.0.0.1:1514
+ # all messages on stderr
+ log global
+ # all messages on local tcp syslog server
+ log ring@myring local0
+ # load balance messages on 4 udp syslog servers
+ log 127.0.0.1:10001 sample 1:4 local0
+ log 127.0.0.1:10002 sample 2:4 local0
+ log 127.0.0.1:10003 sample 3:4 local0
+ log 127.0.0.1:10004 sample 4:4 local0
+
+maxconn <conns>
+ Fix the maximum number of concurrent connections on a log forwarder.
+ 10 is the default.
+
+timeout client <timeout>
+ Set the maximum inactivity time on the client side.
+
+3.11. HTTPClient tuning
+-----------------------
+
+HTTPClient is an internal HTTP library, it can be used by various subsystems,
+for example in LUA scripts. HTTPClient is not used in the data path, in other
+words it has nothing with HTTP traffic passing through HAProxy.
+
+httpclient.resolvers.disabled <on|off>
+ Disable the DNS resolution of the httpclient. Prevent the creation of the
+ "default" resolvers section.
+
+ Default value is off.
+
+httpclient.resolvers.id <resolvers id>
+ This option defines the resolvers section with which the httpclient will try
+ to resolve.
+
+ Default option is the "default" resolvers ID. By default, if this option is
+ not used, it will simply disable the resolving if the section is not found.
+
+ However, when this option is explicitly enabled it will trigger a
+ configuration error if it fails to load.
+
+httpclient.resolvers.prefer <ipv4|ipv6>
+ This option allows to chose which family of IP you want when resolving,
+ which is convenient when IPv6 is not available on your network. Default
+ option is "ipv6".
+
+httpclient.retries <number>
+ This option allows to configure the number of retries attempt of the
+ httpclient when a request failed. This does the same as the "retries" keyword
+ in a backend.
+
+ Default value is 3.
+
+httpclient.ssl.ca-file <cafile>
+ This option defines the ca-file which should be used to verify the server
+ certificate. It takes the same parameters as the "ca-file" option on the
+ server line.
+
+ By default and when this option is not used, the value is
+ "@system-ca" which tries to load the CA of the system. If it fails the SSL
+ will be disabled for the httpclient.
+
+ However, when this option is explicitly enabled it will trigger a
+ configuration error if it fails.
+
+httpclient.ssl.verify [none|required]
+ Works the same way as the verify option on server lines. If specified to 'none',
+ servers certificates are not verified. Default option is "required".
+
+ By default and when this option is not used, the value is
+ "required". If it fails the SSL will be disabled for the httpclient.
+
+ However, when this option is explicitly enabled it will trigger a
+ configuration error if it fails.
+
+httpclient.timeout.connect <timeout>
+ Set the maximum time to wait for a connection attempt by default for the
+ httpclient.
+
+ Arguments :
+ <timeout> is the timeout value specified in milliseconds by default, but
+ can be in any other unit if the number is suffixed by the unit,
+ as explained at the top of this document.
+
+ The default value is 5000ms.
+
+4. Proxies
+----------
+
+Proxy configuration can be located in a set of sections :
+ - defaults [<name>] [ from <defaults_name> ]
+ - frontend <name> [ from <defaults_name> ]
+ - backend <name> [ from <defaults_name> ]
+ - listen <name> [ from <defaults_name> ]
+
+A "frontend" section describes a set of listening sockets accepting client
+connections.
+
+A "backend" section describes a set of servers to which the proxy will connect
+to forward incoming connections.
+
+A "listen" section defines a complete proxy with its frontend and backend
+parts combined in one section. It is generally useful for TCP-only traffic.
+
+A "defaults" section resets all settings to the documented ones and presets new
+ones for use by subsequent sections. All of "frontend", "backend" and "listen"
+sections always take their initial settings from a defaults section, by default
+the latest one that appears before the newly created section. It is possible to
+explicitly designate a specific "defaults" section to load the initial settings
+from by indicating its name on the section line after the optional keyword
+"from". While "defaults" section do not impose a name, this use is encouraged
+for better readability. It is also the only way to designate a specific section
+to use instead of the default previous one. Since "defaults" section names are
+optional, by default a very permissive check is applied on their name and these
+are even permitted to overlap. However if a "defaults" section is referenced by
+any other section, its name must comply with the syntax imposed on all proxy
+names, and this name must be unique among the defaults sections. Please note
+that regardless of what is currently permitted, it is recommended to avoid
+duplicate section names in general and to respect the same syntax as for proxy
+names. This rule might be enforced in a future version. In addition, a warning
+is emitted if a defaults section is explicitly used by a proxy while it is also
+implicitly used by another one because it is the last one defined. It is highly
+encouraged to not mix both usages by always using explicit references or by
+adding a last common defaults section reserved for all implicit uses.
+
+Note that it is even possible for a defaults section to take its initial
+settings from another one, and as such, inherit settings across multiple levels
+of defaults sections. This can be convenient to establish certain configuration
+profiles to carry groups of default settings (e.g. TCP vs HTTP or short vs long
+timeouts) but can quickly become confusing to follow.
+
+All proxy names must be formed from upper and lower case letters, digits,
+'-' (dash), '_' (underscore) , '.' (dot) and ':' (colon). ACL names are
+case-sensitive, which means that "www" and "WWW" are two different proxies.
+
+Historically, all proxy names could overlap, it just caused troubles in the
+logs. Since the introduction of content switching, it is mandatory that two
+proxies with overlapping capabilities (frontend/backend) have different names.
+However, it is still permitted that a frontend and a backend share the same
+name, as this configuration seems to be commonly encountered.
+
+Right now, two major proxy modes are supported : "tcp", also known as layer 4,
+and "http", also known as layer 7. In layer 4 mode, HAProxy simply forwards
+bidirectional traffic between two sides. In layer 7 mode, HAProxy analyzes the
+protocol, and can interact with it by allowing, blocking, switching, adding,
+modifying, or removing arbitrary contents in requests or responses, based on
+arbitrary criteria.
+
+In HTTP mode, the processing applied to requests and responses flowing over
+a connection depends in the combination of the frontend's HTTP options and
+the backend's. HAProxy supports 3 connection modes :
+
+ - KAL : keep alive ("option http-keep-alive") which is the default mode : all
+ requests and responses are processed, and connections remain open but idle
+ between responses and new requests.
+
+ - SCL: server close ("option http-server-close") : the server-facing
+ connection is closed after the end of the response is received, but the
+ client-facing connection remains open.
+
+ - CLO: close ("option httpclose"): the connection is closed after the end of
+ the response and "Connection: close" appended in both directions.
+
+The effective mode that will be applied to a connection passing through a
+frontend and a backend can be determined by both proxy modes according to the
+following matrix, but in short, the modes are symmetric, keep-alive is the
+weakest option and close is the strongest.
+
+ Backend mode
+
+ | KAL | SCL | CLO
+ ----+-----+-----+----
+ KAL | KAL | SCL | CLO
+ ----+-----+-----+----
+ mode SCL | SCL | SCL | CLO
+ ----+-----+-----+----
+ CLO | CLO | CLO | CLO
+
+It is possible to chain a TCP frontend to an HTTP backend. It is pointless if
+only HTTP traffic is handled. But it may be used to handle several protocols
+within the same frontend. In this case, the client's connection is first handled
+as a raw tcp connection before being upgraded to HTTP. Before the upgrade, the
+content processings are performend on raw data. Once upgraded, data is parsed
+and stored using an internal representation called HTX and it is no longer
+possible to rely on raw representation. There is no way to go back.
+
+There are two kind of upgrades, in-place upgrades and destructive upgrades. The
+first ones involves a TCP to HTTP/1 upgrade. In HTTP/1, the request
+processings are serialized, thus the applicative stream can be preserved. The
+second one involves a TCP to HTTP/2 upgrade. Because it is a multiplexed
+protocol, the applicative stream cannot be associated to any HTTP/2 stream and
+is destroyed. New applicative streams are then created when HAProxy receives
+new HTTP/2 streams at the lower level, in the H2 multiplexer. It is important
+to understand this difference because that drastically changes the way to
+process data. When an HTTP/1 upgrade is performed, the content processings
+already performed on raw data are neither lost nor reexecuted while for an
+HTTP/2 upgrade, applicative streams are distinct and all frontend rules are
+evaluated systematically on each one. And as said, the first stream, the TCP
+one, is destroyed, but only after the frontend rules were evaluated.
+
+There is another importnat point to understand when HTTP processings are
+performed from a TCP proxy. While HAProxy is able to parse HTTP/1 in-fly from
+tcp-request content rules, it is not possible for HTTP/2. Only the HTTP/2
+preface can be parsed. This is a huge limitation regarding the HTTP content
+analysis in TCP. Concretely it is only possible to know if received data are
+HTTP. For instance, it is not possible to choose a backend based on the Host
+header value while it is trivial in HTTP/1. Hopefully, there is a solution to
+mitigate this drawback.
+
+There are two ways to perform an HTTP upgrade. The first one, the historical
+method, is to select an HTTP backend. The upgrade happens when the backend is
+set. Thus, for in-place upgrades, only the backend configuration is considered
+in the HTTP data processing. For destructive upgrades, the applicative stream
+is destroyed, thus its processing is stopped. With this method, possibilities
+to choose a backend with an HTTP/2 connection are really limited, as mentioned
+above, and a bit useless because the stream is destroyed. The second method is
+to upgrade during the tcp-request content rules evaluation, thanks to the
+"switch-mode http" action. In this case, the upgrade is performed in the
+frontend context and it is possible to define HTTP directives in this
+frontend. For in-place upgrades, it offers all the power of the HTTP analysis
+as soon as possible. It is not that far from an HTTP frontend. For destructive
+upgrades, it does not change anything except it is useless to choose a backend
+on limited information. It is of course the recommended method. Thus, testing
+the request protocol from the tcp-request content rules to perform an HTTP
+upgrade is enough. All the remaining HTTP manipulation may be moved to the
+frontend http-request ruleset. But keep in mind that tcp-request content rules
+remains evaluated on each streams, that can't be changed.
+
+4.1. Proxy keywords matrix
+--------------------------
+
+The following list of keywords is supported. Most of them may only be used in a
+limited set of section types. Some of them are marked as "deprecated" because
+they are inherited from an old syntax which may be confusing or functionally
+limited, and there are new recommended keywords to replace them. Keywords
+marked with "(*)" can be optionally inverted using the "no" prefix, e.g. "no
+option contstats". This makes sense when the option has been enabled by default
+and must be disabled for a specific instance. Such options may also be prefixed
+with "default" in order to restore default settings regardless of what has been
+specified in a previous "defaults" section. Keywords supported in defaults
+sections marked with "(!)" are only supported in named defaults sections, not
+anonymous ones.
+
+
+ keyword defaults frontend listen backend
+------------------------------------+----------+----------+---------+---------
+acl X (!) X X X
+backlog X X X -
+balance X - X X
+bind - X X -
+capture cookie - X X -
+capture request header - X X -
+capture response header - X X -
+clitcpka-cnt X X X -
+clitcpka-idle X X X -
+clitcpka-intvl X X X -
+compression X X X X
+cookie X - X X
+declare capture - X X -
+default-server X - X X
+default_backend X X X -
+description - X X X
+disabled X X X X
+dispatch - - X X
+email-alert from X X X X
+email-alert level X X X X
+email-alert mailers X X X X
+email-alert myhostname X X X X
+email-alert to X X X X
+enabled X X X X
+errorfile X X X X
+errorfiles X X X X
+errorloc X X X X
+errorloc302 X X X X
+-- keyword -------------------------- defaults - frontend - listen -- backend -
+errorloc303 X X X X
+error-log-format X X X -
+force-persist - - X X
+filter - X X X
+fullconn X - X X
+hash-type X - X X
+http-after-response X (!) X X X
+http-check comment X - X X
+http-check connect X - X X
+http-check disable-on-404 X - X X
+http-check expect X - X X
+http-check send X - X X
+http-check send-state X - X X
+http-check set-var X - X X
+http-check unset-var X - X X
+http-error X X X X
+http-request X (!) X X X
+http-response X (!) X X X
+http-reuse X - X X
+http-send-name-header X - X X
+id - X X X
+ignore-persist - - X X
+load-server-state-from-file X - X X
+log (*) X X X X
+log-format X X X -
+log-format-sd X X X -
+log-tag X X X X
+max-keep-alive-queue X - X X
+max-session-srv-conns X X X -
+maxconn X X X -
+mode X X X X
+monitor fail - X X -
+monitor-uri X X X -
+option abortonclose (*) X - X X
+option accept-invalid-http-request (*) X X X -
+option accept-invalid-http-response (*) X - X X
+option allbackups (*) X - X X
+option checkcache (*) X - X X
+option clitcpka (*) X X X -
+option contstats (*) X X X -
+option disable-h2-upgrade (*) X X X -
+option dontlog-normal (*) X X X -
+option dontlognull (*) X X X -
+-- keyword -------------------------- defaults - frontend - listen -- backend -
+option forwardfor X X X X
+option forwarded (*) X - X X
+option h1-case-adjust-bogus-client (*) X X X -
+option h1-case-adjust-bogus-server (*) X - X X
+option http-buffer-request (*) X X X X
+option http-ignore-probes (*) X X X -
+option http-keep-alive (*) X X X X
+option http-no-delay (*) X X X X
+option http-pretend-keepalive (*) X - X X
+option http-restrict-req-hdr-names X X X X
+option http-server-close (*) X X X X
+option http-use-proxy-header (*) X X X -
+option httpchk X - X X
+option httpclose (*) X X X X
+option httplog X X X -
+option httpslog X X X -
+option independent-streams (*) X X X X
+option ldap-check X - X X
+option external-check X - X X
+option log-health-checks (*) X - X X
+option log-separate-errors (*) X X X -
+option logasap (*) X X X -
+option mysql-check X - X X
+option nolinger (*) X X X X
+option originalto X X X X
+option persist (*) X - X X
+option pgsql-check X - X X
+option prefer-last-server (*) X - X X
+option redispatch (*) X - X X
+option redis-check X - X X
+option smtpchk X - X X
+option socket-stats (*) X X X -
+option splice-auto (*) X X X X
+option splice-request (*) X X X X
+option splice-response (*) X X X X
+option spop-check X - X X
+option srvtcpka (*) X - X X
+option ssl-hello-chk X - X X
+-- keyword -------------------------- defaults - frontend - listen -- backend -
+option tcp-check X - X X
+option tcp-smart-accept (*) X X X -
+option tcp-smart-connect (*) X - X X
+option tcpka X X X X
+option tcplog X X X X
+option transparent (*) X - X X
+option idle-close-on-response (*) X X X -
+external-check command X - X X
+external-check path X - X X
+persist rdp-cookie X - X X
+rate-limit sessions X X X -
+redirect - X X X
+-- keyword -------------------------- defaults - frontend - listen -- backend -
+retries X - X X
+retry-on X - X X
+server - - X X
+server-state-file-name X - X X
+server-template - - X X
+source X - X X
+srvtcpka-cnt X - X X
+srvtcpka-idle X - X X
+srvtcpka-intvl X - X X
+stats admin - X X X
+stats auth X X X X
+stats enable X X X X
+stats hide-version X X X X
+stats http-request - X X X
+stats realm X X X X
+stats refresh X X X X
+stats scope X X X X
+stats show-desc X X X X
+stats show-legends X X X X
+stats show-node X X X X
+stats uri X X X X
+-- keyword -------------------------- defaults - frontend - listen -- backend -
+stick match - - X X
+stick on - - X X
+stick store-request - - X X
+stick store-response - - X X
+stick-table - X X X
+tcp-check comment X - X X
+tcp-check connect X - X X
+tcp-check expect X - X X
+tcp-check send X - X X
+tcp-check send-lf X - X X
+tcp-check send-binary X - X X
+tcp-check send-binary-lf X - X X
+tcp-check set-var X - X X
+tcp-check unset-var X - X X
+tcp-request connection X (!) X X -
+tcp-request content X (!) X X X
+tcp-request inspect-delay X (!) X X X
+tcp-request session X (!) X X -
+tcp-response content X (!) - X X
+tcp-response inspect-delay X (!) - X X
+timeout check X - X X
+timeout client X X X -
+timeout client-fin X X X -
+timeout client-hs X X X -
+timeout connect X - X X
+timeout http-keep-alive X X X X
+timeout http-request X X X X
+timeout queue X - X X
+timeout server X - X X
+timeout server-fin X - X X
+timeout tarpit X X X X
+timeout tunnel X - X X
+transparent (deprecated) X - X X
+unique-id-format X X X -
+unique-id-header X X X -
+use_backend - X X -
+use-fcgi-app - - X X
+use-server - - X X
+------------------------------------+----------+----------+---------+---------
+ keyword defaults frontend listen backend
+
+
+4.2. Alphabetically sorted keywords reference
+---------------------------------------------
+
+This section provides a description of each keyword and its usage.
+
+
+acl <aclname> <criterion> [flags] [operator] <value> ...
+ Declare or complete an access list.
+
+ May be used in the following contexts: tcp, http
+
+ May be used in sections : defaults | frontend | listen | backend
+ yes(!) | yes | yes | yes
+
+ This directive is only available from named defaults sections, not anonymous
+ ones. ACLs defined in a defaults section are not visible from other sections
+ using it.
+
+ Example:
+ acl invalid_src src 0.0.0.0/7 224.0.0.0/3
+ acl invalid_src src_port 0:1023
+ acl local_dst hdr(host) -i localhost
+
+ See section 7 about ACL usage.
+
+
+backlog <conns>
+ Give hints to the system about the approximate listen backlog desired size
+
+ May be used in the following contexts: tcp, http
+
+ May be used in sections : defaults | frontend | listen | backend
+ yes | yes | yes | no
+
+ Arguments :
+ <conns> is the number of pending connections. Depending on the operating
+ system, it may represent the number of already acknowledged
+ connections, of non-acknowledged ones, or both.
+
+ This option is only meaningful for stream listeners, including QUIC ones. Its
+ behavior however is not identical with QUIC instances.
+
+ For all listeners but QUIC, in order to protect against SYN flood attacks,
+ one solution is to increase the system's SYN backlog size. Depending on the
+ system, sometimes it is just tunable via a system parameter, sometimes it is
+ not adjustable at all, and sometimes the system relies on hints given by the
+ application at the time of the listen() syscall. By default, HAProxy passes
+ the frontend's maxconn value to the listen() syscall. On systems which can
+ make use of this value, it can sometimes be useful to be able to specify a
+ different value, hence this backlog parameter.
+
+ On Linux 2.4, the parameter is ignored by the system. On Linux 2.6, it is
+ used as a hint and the system accepts up to the smallest greater power of
+ two, and never more than some limits (usually 32768).
+
+ For QUIC listeners, backlog sets a shared limits for both the maximum count
+ of active handshakes and connections waiting to be accepted. The handshake
+ phase relies primarily of the network latency with the remote peer, whereas
+ the second phase depends solely on haproxy load. When either one of this
+ limit is reached, haproxy starts to drop reception of INITIAL packets,
+ preventing any new connection allocation, until the connection excess starts
+ to decrease. This situation may cause browsers to silently downgrade the HTTP
+ versions and switching to TCP.
+
+ See also : "maxconn" and the target operating system's tuning guide.
+
+
+balance <algorithm> [ <arguments> ]
+balance url_param <param> [check_post]
+ Define the load balancing algorithm to be used in a backend.
+
+ May be used in the following contexts: tcp, http, log
+
+ May be used in sections : defaults | frontend | listen | backend
+ yes | no | yes | yes
+
+ Arguments :
+ <algorithm> is the algorithm used to select a server when doing load
+ balancing. This only applies when no persistence information
+ is available, or when a connection is redispatched to another
+ server. <algorithm> may be one of the following :
+
+ roundrobin Each server is used in turns, according to their weights.
+ This is the smoothest and fairest algorithm when the server's
+ processing time remains equally distributed. This algorithm
+ is dynamic, which means that server weights may be adjusted
+ on the fly for slow starts for instance. It is limited by
+ design to 4095 active servers per backend. Note that in some
+ large farms, when a server becomes up after having been down
+ for a very short time, it may sometimes take a few hundreds
+ requests for it to be re-integrated into the farm and start
+ receiving traffic. This is normal, though very rare. It is
+ indicated here in case you would have the chance to observe
+ it, so that you don't worry. Note: weights are ignored for
+ backends in LOG mode.
+
+ static-rr Each server is used in turns, according to their weights.
+ This algorithm is as similar to roundrobin except that it is
+ static, which means that changing a server's weight on the
+ fly will have no effect. On the other hand, it has no design
+ limitation on the number of servers, and when a server goes
+ up, it is always immediately reintroduced into the farm, once
+ the full map is recomputed. It also uses slightly less CPU to
+ run (around -1%). This algorithm is not usable in LOG mode.
+
+ leastconn The server with the lowest number of connections receives the
+ connection. Round-robin is performed within groups of servers
+ of the same load to ensure that all servers will be used. Use
+ of this algorithm is recommended where very long sessions are
+ expected, such as LDAP, SQL, TSE, etc... but is not very well
+ suited for protocols using short sessions such as HTTP. This
+ algorithm is dynamic, which means that server weights may be
+ adjusted on the fly for slow starts for instance. It will
+ also consider the number of queued connections in addition to
+ the established ones in order to minimize queuing. This
+ algorithm is not usable in LOG mode.
+
+ first The first server with available connection slots receives the
+ connection. The servers are chosen from the lowest numeric
+ identifier to the highest (see server parameter "id"), which
+ defaults to the server's position in the farm. Once a server
+ reaches its maxconn value, the next server is used. It does
+ not make sense to use this algorithm without setting maxconn.
+ The purpose of this algorithm is to always use the smallest
+ number of servers so that extra servers can be powered off
+ during non-intensive hours. This algorithm ignores the server
+ weight, and brings more benefit to long session such as RDP
+ or IMAP than HTTP, though it can be useful there too. In
+ order to use this algorithm efficiently, it is recommended
+ that a cloud controller regularly checks server usage to turn
+ them off when unused, and regularly checks backend queue to
+ turn new servers on when the queue inflates. Alternatively,
+ using "http-check send-state" may inform servers on the load.
+
+ hash Takes a regular sample expression in argument. The expression
+ is evaluated for each request and hashed according to the
+ configured hash-type. The result of the hash is divided by
+ the total weight of the running servers to designate which
+ server will receive the request. This can be used in place of
+ "source", "uri", "hdr()", "url_param()", "rdp-cookie" to make
+ use of a converter, refine the evaluation, or be used to
+ extract data from local variables for example. When the data
+ is not available, round robin will apply. This algorithm is
+ static by default, which means that changing a server's
+ weight on the fly will have no effect, but this can be
+ changed using "hash-type". This algorithm is not usable for
+ backends in LOG mode, please use "log-hash" instead.
+
+ source The source IP address is hashed and divided by the total
+ weight of the running servers to designate which server will
+ receive the request. This ensures that the same client IP
+ address will always reach the same server as long as no
+ server goes down or up. If the hash result changes due to the
+ number of running servers changing, many clients will be
+ directed to a different server. This algorithm is generally
+ used in TCP mode where no cookie may be inserted. It may also
+ be used on the Internet to provide a best-effort stickiness
+ to clients which refuse session cookies. This algorithm is
+ static by default, which means that changing a server's
+ weight on the fly will have no effect, but this can be
+ changed using "hash-type". See also the "hash" option above.
+ This algorithm is not usable for backends in LOG mode.
+
+ uri This algorithm hashes either the left part of the URI (before
+ the question mark) or the whole URI (if the "whole" parameter
+ is present) and divides the hash value by the total weight of
+ the running servers. The result designates which server will
+ receive the request. This ensures that the same URI will
+ always be directed to the same server as long as no server
+ goes up or down. This is used with proxy caches and
+ anti-virus proxies in order to maximize the cache hit rate.
+ Note that this algorithm may only be used in an HTTP backend.
+ This algorithm is static by default, which means that
+ changing a server's weight on the fly will have no effect,
+ but this can be changed using "hash-type".
+
+ This algorithm supports two optional parameters "len" and
+ "depth", both followed by a positive integer number. These
+ options may be helpful when it is needed to balance servers
+ based on the beginning of the URI only. The "len" parameter
+ indicates that the algorithm should only consider that many
+ characters at the beginning of the URI to compute the hash.
+ Note that having "len" set to 1 rarely makes sense since most
+ URIs start with a leading "/".
+
+ The "depth" parameter indicates the maximum directory depth
+ to be used to compute the hash. One level is counted for each
+ slash in the request. If both parameters are specified, the
+ evaluation stops when either is reached.
+
+ A "path-only" parameter indicates that the hashing key starts
+ at the first '/' of the path. This can be used to ignore the
+ authority part of absolute URIs, and to make sure that HTTP/1
+ and HTTP/2 URIs will provide the same hash. See also the
+ "hash" option above.
+
+ url_param The URL parameter specified in argument will be looked up in
+ the query string of each HTTP GET request.
+
+ If the modifier "check_post" is used, then an HTTP POST
+ request entity will be searched for the parameter argument,
+ when it is not found in a query string after a question mark
+ ('?') in the URL. The message body will only start to be
+ analyzed once either the advertised amount of data has been
+ received or the request buffer is full. In the unlikely event
+ that chunked encoding is used, only the first chunk is
+ scanned. Parameter values separated by a chunk boundary, may
+ be randomly balanced if at all. This keyword used to support
+ an optional <max_wait> parameter which is now ignored.
+
+ If the parameter is found followed by an equal sign ('=') and
+ a value, then the value is hashed and divided by the total
+ weight of the running servers. The result designates which
+ server will receive the request.
+
+ This is used to track user identifiers in requests and ensure
+ that a same user ID will always be sent to the same server as
+ long as no server goes up or down. If no value is found or if
+ the parameter is not found, then a round robin algorithm is
+ applied. Note that this algorithm may only be used in an HTTP
+ backend. This algorithm is static by default, which means
+ that changing a server's weight on the fly will have no
+ effect, but this can be changed using "hash-type". See also
+ the "hash" option above.
+
+ hdr(<name>) The HTTP header <name> will be looked up in each HTTP
+ request. Just as with the equivalent ACL 'hdr()' function,
+ the header name in parenthesis is not case sensitive. If the
+ header is absent or if it does not contain any value, the
+ roundrobin algorithm is applied instead.
+
+ An optional 'use_domain_only' parameter is available, for
+ reducing the hash algorithm to the main domain part with some
+ specific headers such as 'Host'. For instance, in the Host
+ value "haproxy.1wt.eu", only "1wt" will be considered.
+
+ This algorithm is static by default, which means that
+ changing a server's weight on the fly will have no effect,
+ but this can be changed using "hash-type". See also the
+ "hash" option above.
+
+ random
+ random(<draws>)
+ A random number will be used as the key for the consistent
+ hashing function. This means that the servers' weights are
+ respected, dynamic weight changes immediately take effect, as
+ well as new server additions. Random load balancing can be
+ useful with large farms or when servers are frequently added
+ or removed as it may avoid the hammering effect that could
+ result from roundrobin or leastconn in this situation. The
+ hash-balance-factor directive can be used to further improve
+ fairness of the load balancing, especially in situations
+ where servers show highly variable response times. When an
+ argument <draws> is present, it must be an integer value one
+ or greater, indicating the number of draws before selecting
+ the least loaded of these servers. It was indeed demonstrated
+ that picking the least loaded of two servers is enough to
+ significantly improve the fairness of the algorithm, by
+ always avoiding to pick the most loaded server within a farm
+ and getting rid of any bias that could be induced by the
+ unfair distribution of the consistent list. Higher values N
+ will take away N-1 of the highest loaded servers at the
+ expense of performance. With very high values, the algorithm
+ will converge towards the leastconn's result but much slower.
+ The default value is 2, which generally shows very good
+ distribution and performance. This algorithm is also known as
+ the Power of Two Random Choices and is described here :
+ http://www.eecs.harvard.edu/~michaelm/postscripts/handbook2001.pdf
+
+ For backends in LOG mode, the number of draws is ignored and
+ a single random is picked since there is no notion of server
+ load. Random log balancing can be useful with large farms or
+ when servers are frequently added or removed from the pool of
+ available servers as it may avoid the hammering effect that
+ could result from roundrobin in this situation.
+
+ rdp-cookie
+ rdp-cookie(<name>)
+ The RDP cookie <name> (or "mstshash" if omitted) will be
+ looked up and hashed for each incoming TCP request. Just as
+ with the equivalent ACL 'req.rdp_cookie()' function, the name
+ is not case-sensitive. This mechanism is useful as a degraded
+ persistence mode, as it makes it possible to always send the
+ same user (or the same session ID) to the same server. If the
+ cookie is not found, the normal roundrobin algorithm is
+ used instead.
+
+ Note that for this to work, the frontend must ensure that an
+ RDP cookie is already present in the request buffer. For this
+ you must use 'tcp-request content accept' rule combined with
+ a 'req.rdp_cookie_cnt' ACL.
+
+ This algorithm is static by default, which means that
+ changing a server's weight on the fly will have no effect,
+ but this can be changed using "hash-type". See also the
+ "hash" option above.
+
+ log-hash Takes a comma-delimited list of converters in argument. These
+ converters are applied in sequence to the input log message,
+ and the result will be cast as a string then hashed according
+ to the configured hash-type. The resulting hash will be used
+ to select the destination server among the ones declared in
+ the log backend. The goal of this algorithm is to be able to
+ extract a key within the final log message using string
+ converters and then be able to stick to the same server thanks
+ to the hash. Only "map-based" hashes are supported for now.
+ This algorithm is only usable for backends in LOG mode, for
+ others, please use "hash" instead.
+
+ sticky Tries to stick to the same server as much as possible. The
+ first server in the list of available servers receives all
+ the log messages. When the server goes DOWN, the next server
+ in the list takes its place. When a previously DOWN server
+ goes back UP it is added at the end of the list so that the
+ sticky server doesn't change until it becomes DOWN. This
+ algorithm is only usable for backends in LOG mode.
+
+ <arguments> is an optional list of arguments which may be needed by some
+ algorithms. Right now, only "url_param", "uri" and "log-hash"
+ support an optional argument.
+
+ The load balancing algorithm of a backend is set to roundrobin when no other
+ algorithm, mode nor option have been set. The algorithm may only be set once
+ for each backend. In backends in LOG mode, server "weight" is always ignored.
+
+ With authentication schemes that require the same connection like NTLM, URI
+ based algorithms must not be used, as they would cause subsequent requests
+ to be routed to different backend servers, breaking the invalid assumptions
+ NTLM relies on.
+
+ TCP/HTTP Examples :
+ balance roundrobin
+ balance url_param userid
+ balance url_param session_id check_post 64
+ balance hdr(User-Agent)
+ balance hdr(host)
+ balance hdr(Host) use_domain_only
+ balance hash req.cookie(clientid)
+ balance hash var(req.client_id)
+ balance hash req.hdr_ip(x-forwarded-for,-1),ipmask(24)
+
+ LOG backend examples:
+ global
+ log backend@mylog-rrb local0 # send all logs to mylog-rrb backend
+ log backend@mylog-hash local0 # send all logs to mylog-hash backend
+
+ backend mylog-rrb
+ mode log
+ balance roundrobin
+
+ server s1 udp@127.0.0.1:514 # will receive 50% of log messages
+ server s2 udp@127.0.0.1:514
+
+ backend mylog-hash
+ mode log
+
+ # extract "METHOD URL PROTO" at the end of the log message,
+ # and let haproxy hash it so that log messages generated from
+ # similar requests get sent to the same syslog server:
+ balance log-hash 'field(-2,\")'
+
+ # server list here
+ server s1 127.0.0.1:514
+ #...
+
+ Note: the following caveats and limitations on using the "check_post"
+ extension with "url_param" must be considered :
+
+ - all POST requests are eligible for consideration, because there is no way
+ to determine if the parameters will be found in the body or entity which
+ may contain binary data. Therefore another method may be required to
+ restrict consideration of POST requests that have no URL parameters in
+ the body. (see acl http_end)
+
+ - using a <max_wait> value larger than the request buffer size does not
+ make sense and is useless. The buffer size is set at build time, and
+ defaults to 16 kB.
+
+ - Content-Encoding is not supported, the parameter search will probably
+ fail; and load balancing will fall back to Round Robin.
+
+ - Expect: 100-continue is not supported, load balancing will fall back to
+ Round Robin.
+
+ - Transfer-Encoding (RFC7230 3.3.1) is only supported in the first chunk.
+ If the entire parameter value is not present in the first chunk, the
+ selection of server is undefined (actually, defined by how little
+ actually appeared in the first chunk).
+
+ - This feature does not support generation of a 100, 411 or 501 response.
+
+ - In some cases, requesting "check_post" MAY attempt to scan the entire
+ contents of a message body. Scanning normally terminates when linear
+ white space or control characters are found, indicating the end of what
+ might be a URL parameter list. This is probably not a concern with SGML
+ type message bodies.
+
+ See also : "dispatch", "cookie", "transparent", "hash-type".
+
+
+bind [<address>]:<port_range> [, ...] [param*]
+bind /<path> [, ...] [param*]
+ Define one or several listening addresses and/or ports in a frontend.
+
+ May be used in the following contexts: tcp, http
+
+ May be used in sections : defaults | frontend | listen | backend
+ no | yes | yes | no
+
+ Arguments :
+ <address> is optional and can be a host name, an IPv4 address, an IPv6
+ address, or '*'. It designates the address the frontend will
+ listen on. If unset, all IPv4 addresses of the system will be
+ listened on. The same will apply for '*' or the system's
+ special address "0.0.0.0". The IPv6 equivalent is '::'. Note
+ that if you bind a frontend to multiple UDP addresses you have
+ no guarantee about the address which will be used to respond.
+ This is why "0.0.0.0" addresses and lists of comma-separated
+ IP addresses have been forbidden to bind QUIC addresses.
+ Optionally, an address family prefix may be used before the
+ address to force the family regardless of the address format,
+ which can be useful to specify a path to a unix socket with
+ no slash ('/'). Currently supported prefixes are :
+ - 'ipv4@' -> address is always IPv4
+ - 'ipv6@' -> address is always IPv6
+ - 'udp@' -> address is resolved as IPv4 or IPv6 and
+ protocol UDP is used. Currently those listeners are
+ supported only in log-forward sections.
+ - 'udp4@' -> address is always IPv4 and protocol UDP
+ is used. Currently those listeners are supported
+ only in log-forward sections.
+ - 'udp6@' -> address is always IPv6 and protocol UDP
+ is used. Currently those listeners are supported
+ only in log-forward sections.
+ - 'unix@' -> address is a path to a local unix socket
+ - 'abns@' -> address is in abstract namespace (Linux only).
+ - 'fd@<n>' -> use file descriptor <n> inherited from the
+ parent. The fd must be bound and may or may not already
+ be listening.
+ - 'sockpair@<n>'-> like fd@ but you must use the fd of a
+ connected unix socket or of a socketpair. The bind waits
+ to receive a FD over the unix socket and uses it as if it
+ was the FD of an accept(). Should be used carefully.
+ - 'quic4@' -> address is resolved as IPv4 and protocol UDP
+ is used. Note that to achieve the best performance with a
+ large traffic you should keep "tune.quic.socket-owner" on
+ connection. Else QUIC connections will be multiplexed
+ over the listener socket. Another alternative would be to
+ duplicate QUIC listener instances over several threads,
+ for example using "shards" keyword to at least reduce
+ thread contention.
+ - 'quic6@' -> address is resolved as IPv6 and protocol UDP
+ is used. The performance note for QUIC over IPv4 applies
+ as well.
+ - 'rhttp@' [ EXPERIMENTAL ] -> used for reverse HTTP.
+ Address must be a server with the format
+ '<backend>/<server>'. The server will be used to
+ instantiate connections to a remote address. The listener
+ will try to maintain "nbconn" connections. This is an
+ experimental features which requires
+ "expose-experimental-directives" on a line before this
+ bind.
+
+ You may want to reference some environment variables in the
+ address parameter, see section 2.3 about environment
+ variables.
+
+ <port_range> is either a unique TCP port, or a port range for which the
+ proxy will accept connections for the IP address specified
+ above. The port is mandatory for TCP listeners. Note that in
+ the case of an IPv6 address, the port is always the number
+ after the last colon (':'). A range can either be :
+ - a numerical port (ex: '80')
+ - a dash-delimited ports range explicitly stating the lower
+ and upper bounds (ex: '2000-2100') which are included in
+ the range.
+
+ Particular care must be taken against port ranges, because
+ every <address:port> couple consumes one socket (= a file
+ descriptor), so it's easy to consume lots of descriptors
+ with a simple range, and to run out of sockets. Also, each
+ <address:port> couple must be used only once among all
+ instances running on a same system. Please note that binding
+ to ports lower than 1024 generally require particular
+ privileges to start the program, which are independent of
+ the 'uid' parameter.
+
+ <path> is a UNIX socket path beginning with a slash ('/'). This is
+ alternative to the TCP listening port. HAProxy will then
+ receive UNIX connections on the socket located at this place.
+ The path must begin with a slash and by default is absolute.
+ It can be relative to the prefix defined by "unix-bind" in
+ the global section. Note that the total length of the prefix
+ followed by the socket path cannot exceed some system limits
+ for UNIX sockets, which commonly are set to 107 characters.
+
+ <param*> is a list of parameters common to all sockets declared on the
+ same line. These numerous parameters depend on OS and build
+ options and have a complete section dedicated to them. Please
+ refer to section 5 to for more details.
+
+ It is possible to specify a list of address:port combinations delimited by
+ commas. The frontend will then listen on all of these addresses. There is no
+ fixed limit to the number of addresses and ports which can be listened on in
+ a frontend, as well as there is no limit to the number of "bind" statements
+ in a frontend.
+
+ Example :
+ listen http_proxy
+ bind :80,:443
+ bind 10.0.0.1:10080,10.0.0.1:10443
+ bind /var/run/ssl-frontend.sock user root mode 600 accept-proxy
+
+ listen http_https_proxy
+ bind :80
+ bind :443 ssl crt /etc/haproxy/site.pem
+
+ listen http_https_proxy_explicit
+ bind ipv6@:80
+ bind ipv4@public_ssl:443 ssl crt /etc/haproxy/site.pem
+ bind unix@ssl-frontend.sock user root mode 600 accept-proxy
+
+ listen external_bind_app1
+ bind "fd@${FD_APP1}"
+
+ listen h3_quic_proxy
+ bind quic4@10.0.0.1:8888 ssl crt /etc/mycrt
+
+ Note: regarding Linux's abstract namespace sockets, HAProxy uses the whole
+ sun_path length is used for the address length. Some other programs
+ such as socat use the string length only by default. Pass the option
+ ",unix-tightsocklen=0" to any abstract socket definition in socat to
+ make it compatible with HAProxy's.
+
+ See also : "source", "option forwardfor", "unix-bind" and the PROXY protocol
+ documentation, and section 5 about bind options.
+
+
+capture cookie <name> len <length>
+ Capture and log a cookie in the request and in the response.
+
+ May be used in the following contexts: http
+
+ May be used in sections : defaults | frontend | listen | backend
+ no | yes | yes | no
+
+ Arguments :
+ <name> is the beginning of the name of the cookie to capture. In order
+ to match the exact name, simply suffix the name with an equal
+ sign ('='). The full name will appear in the logs, which is
+ useful with application servers which adjust both the cookie name
+ and value (e.g. ASPSESSIONXXX).
+
+ <length> is the maximum number of characters to report in the logs, which
+ include the cookie name, the equal sign and the value, all in the
+ standard "name=value" form. The string will be truncated on the
+ right if it exceeds <length>.
+
+ Only the first cookie is captured. Both the "cookie" request headers and the
+ "set-cookie" response headers are monitored. This is particularly useful to
+ check for application bugs causing session crossing or stealing between
+ users, because generally the user's cookies can only change on a login page.
+
+ When the cookie was not presented by the client, the associated log column
+ will report "-". When a request does not cause a cookie to be assigned by the
+ server, a "-" is reported in the response column.
+
+ The capture is performed in the frontend only because it is necessary that
+ the log format does not change for a given frontend depending on the
+ backends. This may change in the future. Note that there can be only one
+ "capture cookie" statement in a frontend. The maximum capture length is set
+ by the global "tune.http.cookielen" setting and defaults to 63 characters. It
+ is not possible to specify a capture in a "defaults" section.
+
+ Example:
+ capture cookie ASPSESSION len 32
+
+ See also : "capture request header", "capture response header" as well as
+ section 8 about logging.
+
+
+capture request header <name> len <length>
+ Capture and log the last occurrence of the specified request header.
+
+ May be used in the following contexts: http
+
+ May be used in sections : defaults | frontend | listen | backend
+ no | yes | yes | no
+
+ Arguments :
+ <name> is the name of the header to capture. The header names are not
+ case-sensitive, but it is a common practice to write them as they
+ appear in the requests, with the first letter of each word in
+ upper case. The header name will not appear in the logs, only the
+ value is reported, but the position in the logs is respected.
+
+ <length> is the maximum number of characters to extract from the value and
+ report in the logs. The string will be truncated on the right if
+ it exceeds <length>.
+
+ The complete value of the last occurrence of the header is captured. The
+ value will be added to the logs between braces ('{}'). If multiple headers
+ are captured, they will be delimited by a vertical bar ('|') and will appear
+ in the same order they were declared in the configuration. Non-existent
+ headers will be logged just as an empty string. Common uses for request
+ header captures include the "Host" field in virtual hosting environments, the
+ "Content-length" when uploads are supported, "User-agent" to quickly
+ differentiate between real users and robots, and "X-Forwarded-For" in proxied
+ environments to find where the request came from.
+
+ Note that when capturing headers such as "User-agent", some spaces may be
+ logged, making the log analysis more difficult. Thus be careful about what
+ you log if you know your log parser is not smart enough to rely on the
+ braces.
+
+ There is no limit to the number of captured request headers nor to their
+ length, though it is wise to keep them low to limit memory usage per stream.
+ In order to keep log format consistent for a same frontend, header captures
+ can only be declared in a frontend. It is not possible to specify a capture
+ in a "defaults" section.
+
+ Example:
+ capture request header Host len 15
+ capture request header X-Forwarded-For len 15
+ capture request header Referer len 15
+
+ See also : "capture cookie", "capture response header" as well as section 8
+ about logging.
+
+
+capture response header <name> len <length>
+ Capture and log the last occurrence of the specified response header.
+
+ May be used in the following contexts: http
+
+ May be used in sections : defaults | frontend | listen | backend
+ no | yes | yes | no
+
+ Arguments :
+ <name> is the name of the header to capture. The header names are not
+ case-sensitive, but it is a common practice to write them as they
+ appear in the response, with the first letter of each word in
+ upper case. The header name will not appear in the logs, only the
+ value is reported, but the position in the logs is respected.
+
+ <length> is the maximum number of characters to extract from the value and
+ report in the logs. The string will be truncated on the right if
+ it exceeds <length>.
+
+ The complete value of the last occurrence of the header is captured. The
+ result will be added to the logs between braces ('{}') after the captured
+ request headers. If multiple headers are captured, they will be delimited by
+ a vertical bar ('|') and will appear in the same order they were declared in
+ the configuration. Non-existent headers will be logged just as an empty
+ string. Common uses for response header captures include the "Content-length"
+ header which indicates how many bytes are expected to be returned, the
+ "Location" header to track redirections.
+
+ There is no limit to the number of captured response headers nor to their
+ length, though it is wise to keep them low to limit memory usage per stream.
+ In order to keep log format consistent for a same frontend, header captures
+ can only be declared in a frontend. It is not possible to specify a capture
+ in a "defaults" section.
+
+ Example:
+ capture response header Content-length len 9
+ capture response header Location len 15
+
+ See also : "capture cookie", "capture request header" as well as section 8
+ about logging.
+
+
+clitcpka-cnt <count>
+ Sets the maximum number of keepalive probes TCP should send before dropping
+ the connection on the client side.
+
+ May be used in the following contexts: tcp, http, log
+
+ May be used in sections : defaults | frontend | listen | backend
+ yes | yes | yes | no
+
+ Arguments :
+ <count> is the maximum number of keepalive probes.
+
+ This keyword corresponds to the socket option TCP_KEEPCNT. If this keyword
+ is not specified, system-wide TCP parameter (tcp_keepalive_probes) is used.
+ The availability of this setting depends on the operating system. It is
+ known to work on Linux.
+
+ See also : "option clitcpka", "clitcpka-idle", "clitcpka-intvl".
+
+
+clitcpka-idle <timeout>
+ Sets the time the connection needs to remain idle before TCP starts sending
+ keepalive probes, if enabled the sending of TCP keepalive packets on the
+ client side.
+
+ May be used in the following contexts: tcp, http
+
+ May be used in sections : defaults | frontend | listen | backend
+ yes | yes | yes | no
+
+ Arguments :
+ <timeout> is the time the connection needs to remain idle before TCP starts
+ sending keepalive probes. It is specified in seconds by default,
+ but can be in any other unit if the number is suffixed by the
+ unit, as explained at the top of this document.
+
+ This keyword corresponds to the socket option TCP_KEEPIDLE. If this keyword
+ is not specified, system-wide TCP parameter (tcp_keepalive_time) is used.
+ The availability of this setting depends on the operating system. It is
+ known to work on Linux.
+
+ See also : "option clitcpka", "clitcpka-cnt", "clitcpka-intvl".
+
+
+clitcpka-intvl <timeout>
+ Sets the time between individual keepalive probes on the client side.
+
+ May be used in the following contexts: tcp, http
+
+ May be used in sections : defaults | frontend | listen | backend
+ yes | yes | yes | no
+
+ Arguments :
+ <timeout> is the time between individual keepalive probes. It is specified
+ in seconds by default, but can be in any other unit if the number
+ is suffixed by the unit, as explained at the top of this
+ document.
+
+ This keyword corresponds to the socket option TCP_KEEPINTVL. If this keyword
+ is not specified, system-wide TCP parameter (tcp_keepalive_intvl) is used.
+ The availability of this setting depends on the operating system. It is
+ known to work on Linux.
+
+ See also : "option clitcpka", "clitcpka-cnt", "clitcpka-idle".
+
+
+compression algo <algorithm> ...
+compression algo-req <algorithm>
+compression algo-res <algorithm>
+compression type <mime type> ...
+ Enable HTTP compression.
+
+ May be used in the following contexts: http
+
+ May be used in sections : defaults | frontend | listen | backend
+ yes | yes | yes | yes
+
+ Arguments :
+ algo is followed by the list of supported compression algorithms for
+ responses (legacy keyword)
+ algo-req is followed by compression algorithm for request (only one is
+ provided).
+ algo-res is followed by the list of supported compression algorithms for
+ responses.
+ type is followed by the list of MIME types that will be compressed for
+ responses (legacy keyword).
+ type-req is followed by the list of MIME types that will be compressed for
+ requests.
+ type-res is followed by the list of MIME types that will be compressed for
+ responses.
+
+ The currently supported algorithms are :
+ identity this is mostly for debugging, and it was useful for developing
+ the compression feature. Identity does not apply any change on
+ data.
+
+ gzip applies gzip compression. This setting is only available when
+ support for zlib or libslz was built in.
+
+ deflate same as "gzip", but with deflate algorithm and zlib format.
+ Note that this algorithm has ambiguous support on many
+ browsers and no support at all from recent ones. It is
+ strongly recommended not to use it for anything else than
+ experimentation. This setting is only available when support
+ for zlib or libslz was built in.
+
+ raw-deflate same as "deflate" without the zlib wrapper, and used as an
+ alternative when the browser wants "deflate". All major
+ browsers understand it and despite violating the standards,
+ it is known to work better than "deflate", at least on MSIE
+ and some versions of Safari. Do not use it in conjunction
+ with "deflate", use either one or the other since both react
+ to the same Accept-Encoding token. This setting is only
+ available when support for zlib or libslz was built in.
+
+ Compression will be activated depending on the Accept-Encoding request
+ header. With identity, it does not take care of that header.
+ If backend servers support HTTP compression, these directives
+ will be no-op: HAProxy will see the compressed response and will not
+ compress again. If backend servers do not support HTTP compression and
+ there is Accept-Encoding header in request, HAProxy will compress the
+ matching response.
+
+ Compression is disabled when:
+ * the request does not advertise a supported compression algorithm in the
+ "Accept-Encoding" header
+ * the response message is not HTTP/1.1 or above
+ * HTTP status code is not one of 200, 201, 202, or 203
+ * response contain neither a "Content-Length" header nor a
+ "Transfer-Encoding" whose last value is "chunked"
+ * response contains a "Content-Type" header whose first value starts with
+ "multipart"
+ * the response contains the "no-transform" value in the "Cache-control"
+ header
+ * User-Agent matches "Mozilla/4" unless it is MSIE 6 with XP SP2, or MSIE 7
+ and later
+ * The response contains a "Content-Encoding" header, indicating that the
+ response is already compressed (see compression offload)
+ * The response contains an invalid "ETag" header or multiple ETag headers
+
+ Note: The compression does not emit the Warning header.
+
+ Examples :
+ compression algo gzip
+ compression type text/html text/plain
+
+ See also : "compression offload", "compression direction"
+
+compression offload
+ Makes HAProxy work as a compression offloader only.
+
+ May be used in the following contexts: http
+
+ May be used in sections : defaults | frontend | listen | backend
+ no | yes | yes | yes
+
+ The "offload" setting makes HAProxy remove the Accept-Encoding header to
+ prevent backend servers from compressing responses. It is strongly
+ recommended not to do this because this means that all the compression work
+ will be done on the single point where HAProxy is located. However in some
+ deployment scenarios, HAProxy may be installed in front of a buggy gateway
+ with broken HTTP compression implementation which can't be turned off.
+ In that case HAProxy can be used to prevent that gateway from emitting
+ invalid payloads. In this case, simply removing the header in the
+ configuration does not work because it applies before the header is parsed,
+ so that prevents HAProxy from compressing. The "offload" setting should
+ then be used for such scenarios.
+
+ If this setting is used in a defaults section, a warning is emitted and the
+ option is ignored.
+
+ See also : "compression type", "compression algo", "compression direction"
+
+compression direction <direction>
+ Makes haproxy able to compress both requests and responses.
+ Valid values are "request", to compress only requests, "response", to
+ compress only responses, or "both", when you want to compress both.
+ The default value is "response".
+
+ May be used in the following contexts: http
+
+ See also : "compression type", "compression algo", "compression offload"
+
+cookie <name> [ rewrite | insert | prefix ] [ indirect ] [ nocache ]
+ [ postonly ] [ preserve ] [ httponly ] [ secure ]
+ [ domain <domain> ]* [ maxidle <idle> ] [ maxlife <life> ]
+ [ dynamic ] [ attr <value> ]*
+ Enable cookie-based persistence in a backend.
+
+ May be used in the following contexts: http
+
+ May be used in sections : defaults | frontend | listen | backend
+ yes | no | yes | yes
+
+ Arguments :
+ <name> is the name of the cookie which will be monitored, modified or
+ inserted in order to bring persistence. This cookie is sent to
+ the client via a "Set-Cookie" header in the response, and is
+ brought back by the client in a "Cookie" header in all requests.
+ Special care should be taken to choose a name which does not
+ conflict with any likely application cookie. Also, if the same
+ backends are subject to be used by the same clients (e.g.
+ HTTP/HTTPS), care should be taken to use different cookie names
+ between all backends if persistence between them is not desired.
+
+ rewrite This keyword indicates that the cookie will be provided by the
+ server and that HAProxy will have to modify its value to set the
+ server's identifier in it. This mode is handy when the management
+ of complex combinations of "Set-cookie" and "Cache-control"
+ headers is left to the application. The application can then
+ decide whether or not it is appropriate to emit a persistence
+ cookie. Since all responses should be monitored, this mode
+ doesn't work in HTTP tunnel mode. Unless the application
+ behavior is very complex and/or broken, it is advised not to
+ start with this mode for new deployments. This keyword is
+ incompatible with "insert" and "prefix".
+
+ insert This keyword indicates that the persistence cookie will have to
+ be inserted by HAProxy in server responses if the client did not
+
+ already have a cookie that would have permitted it to access this
+ server. When used without the "preserve" option, if the server
+ emits a cookie with the same name, it will be removed before
+ processing. For this reason, this mode can be used to upgrade
+ existing configurations running in the "rewrite" mode. The cookie
+ will only be a session cookie and will not be stored on the
+ client's disk. By default, unless the "indirect" option is added,
+ the server will see the cookies emitted by the client. Due to
+ caching effects, it is generally wise to add the "nocache" or
+ "postonly" keywords (see below). The "insert" keyword is not
+ compatible with "rewrite" and "prefix".
+
+ prefix This keyword indicates that instead of relying on a dedicated
+ cookie for the persistence, an existing one will be completed.
+ This may be needed in some specific environments where the client
+ does not support more than one single cookie and the application
+ already needs it. In this case, whenever the server sets a cookie
+ named <name>, it will be prefixed with the server's identifier
+ and a delimiter. The prefix will be removed from all client
+ requests so that the server still finds the cookie it emitted.
+ Since all requests and responses are subject to being modified,
+ this mode doesn't work with tunnel mode. The "prefix" keyword is
+ not compatible with "rewrite" and "insert". Note: it is highly
+ recommended not to use "indirect" with "prefix", otherwise server
+ cookie updates would not be sent to clients.
+
+ indirect When this option is specified, no cookie will be emitted to a
+ client which already has a valid one for the server which has
+ processed the request. If the server sets such a cookie itself,
+ it will be removed, unless the "preserve" option is also set. In
+ "insert" mode, this will additionally remove cookies from the
+ requests transmitted to the server, making the persistence
+ mechanism totally transparent from an application point of view.
+ Note: it is highly recommended not to use "indirect" with
+ "prefix", otherwise server cookie updates would not be sent to
+ clients.
+
+ nocache This option is recommended in conjunction with the insert mode
+ when there is a cache between the client and HAProxy, as it
+ ensures that a cacheable response will be tagged non-cacheable if
+ a cookie needs to be inserted. This is important because if all
+ persistence cookies are added on a cacheable home page for
+ instance, then all customers will then fetch the page from an
+ outer cache and will all share the same persistence cookie,
+ leading to one server receiving much more traffic than others.
+ See also the "insert" and "postonly" options.
+
+ postonly This option ensures that cookie insertion will only be performed
+ on responses to POST requests. It is an alternative to the
+ "nocache" option, because POST responses are not cacheable, so
+ this ensures that the persistence cookie will never get cached.
+ Since most sites do not need any sort of persistence before the
+ first POST which generally is a login request, this is a very
+ efficient method to optimize caching without risking to find a
+ persistence cookie in the cache.
+ See also the "insert" and "nocache" options.
+
+ preserve This option may only be used with "insert" and/or "indirect". It
+ allows the server to emit the persistence cookie itself. In this
+ case, if a cookie is found in the response, HAProxy will leave it
+ untouched. This is useful in order to end persistence after a
+ logout request for instance. For this, the server just has to
+ emit a cookie with an invalid value (e.g. empty) or with a date in
+ the past. By combining this mechanism with the "disable-on-404"
+ check option, it is possible to perform a completely graceful
+ shutdown because users will definitely leave the server after
+ they logout.
+
+ httponly This option tells HAProxy to add an "HttpOnly" cookie attribute
+ when a cookie is inserted. This attribute is used so that a
+ user agent doesn't share the cookie with non-HTTP components.
+ Please check RFC6265 for more information on this attribute.
+
+ secure This option tells HAProxy to add a "Secure" cookie attribute when
+ a cookie is inserted. This attribute is used so that a user agent
+ never emits this cookie over non-secure channels, which means
+ that a cookie learned with this flag will be presented only over
+ SSL/TLS connections. Please check RFC6265 for more information on
+ this attribute.
+
+ domain This option allows to specify the domain at which a cookie is
+ inserted. It requires exactly one parameter: a valid domain
+ name. If the domain begins with a dot, the browser is allowed to
+ use it for any host ending with that name. It is also possible to
+ specify several domain names by invoking this option multiple
+ times. Some browsers might have small limits on the number of
+ domains, so be careful when doing that. For the record, sending
+ 10 domains to MSIE 6 or Firefox 2 works as expected.
+
+ maxidle This option allows inserted cookies to be ignored after some idle
+ time. It only works with insert-mode cookies. When a cookie is
+ sent to the client, the date this cookie was emitted is sent too.
+ Upon further presentations of this cookie, if the date is older
+ than the delay indicated by the parameter (in seconds), it will
+ be ignored. Otherwise, it will be refreshed if needed when the
+ response is sent to the client. This is particularly useful to
+ prevent users who never close their browsers from remaining for
+ too long on the same server (e.g. after a farm size change). When
+ this option is set and a cookie has no date, it is always
+ accepted, but gets refreshed in the response. This maintains the
+ ability for admins to access their sites. Cookies that have a
+ date in the future further than 24 hours are ignored. Doing so
+ lets admins fix timezone issues without risking kicking users off
+ the site.
+
+ maxlife This option allows inserted cookies to be ignored after some life
+ time, whether they're in use or not. It only works with insert
+ mode cookies. When a cookie is first sent to the client, the date
+ this cookie was emitted is sent too. Upon further presentations
+ of this cookie, if the date is older than the delay indicated by
+ the parameter (in seconds), it will be ignored. If the cookie in
+ the request has no date, it is accepted and a date will be set.
+ Cookies that have a date in the future further than 24 hours are
+ ignored. Doing so lets admins fix timezone issues without risking
+ kicking users off the site. Contrary to maxidle, this value is
+ not refreshed, only the first visit date counts. Both maxidle and
+ maxlife may be used at the time. This is particularly useful to
+ prevent users who never close their browsers from remaining for
+ too long on the same server (e.g. after a farm size change). This
+ is stronger than the maxidle method in that it forces a
+ redispatch after some absolute delay.
+
+ dynamic Activate dynamic cookies. When used, a session cookie is
+ dynamically created for each server, based on the IP and port
+ of the server, and a secret key, specified in the
+ "dynamic-cookie-key" backend directive.
+ The cookie will be regenerated each time the IP address change,
+ and is only generated for IPv4/IPv6.
+
+ attr This option tells HAProxy to add an extra attribute when a
+ cookie is inserted. The attribute value can contain any
+ characters except control ones or ";". This option may be
+ repeated.
+
+ There can be only one persistence cookie per HTTP backend, and it can be
+ declared in a defaults section. The value of the cookie will be the value
+ indicated after the "cookie" keyword in a "server" statement. If no cookie
+ is declared for a given server, the cookie is not set.
+
+ Examples :
+ cookie JSESSIONID prefix
+ cookie SRV insert indirect nocache
+ cookie SRV insert postonly indirect
+ cookie SRV insert indirect nocache maxidle 30m maxlife 8h
+
+ See also : "balance source", "capture cookie", "server" and "ignore-persist".
+
+
+declare capture [ request | response ] len <length>
+ Declares a capture slot.
+
+ May be used in the following contexts: tcp, http
+
+ May be used in sections : defaults | frontend | listen | backend
+ no | yes | yes | no
+
+ Arguments:
+ <length> is the length allowed for the capture.
+
+ This declaration is only available in the frontend or listen section, but the
+ reserved slot can be used in the backends. The "request" keyword allocates a
+ capture slot for use in the request, and "response" allocates a capture slot
+ for use in the response.
+
+ See also: "capture-req", "capture-res" (sample converters),
+ "capture.req.hdr", "capture.res.hdr" (sample fetches),
+ "http-request capture" and "http-response capture".
+
+
+default-server [param*]
+ Change default options for a server in a backend
+
+ May be used in the following contexts: tcp, http
+
+ May be used in sections : defaults | frontend | listen | backend
+ yes | no | yes | yes
+
+ Arguments:
+ <param*> is a list of parameters for this server. The "default-server"
+ keyword accepts an important number of options and has a complete
+ section dedicated to it. Please refer to section 5 for more
+ details.
+
+ Example :
+ default-server inter 1000 weight 13
+
+ See also: "server" and section 5 about server options
+
+
+default_backend <backend>
+ Specify the backend to use when no "use_backend" rule has been matched.
+
+ May be used in the following contexts: tcp, http
+
+ May be used in sections : defaults | frontend | listen | backend
+ yes | yes | yes | no
+
+ Arguments :
+ <backend> is the name of the backend to use.
+
+ When doing content-switching between frontend and backends using the
+ "use_backend" keyword, it is often useful to indicate which backend will be
+ used when no rule has matched. It generally is the dynamic backend which
+ will catch all undetermined requests.
+
+ Example :
+
+ use_backend dynamic if url_dyn
+ use_backend static if url_css url_img extension_img
+ default_backend dynamic
+
+ See also : "use_backend"
+
+
+description <string>
+ Describe a listen, frontend or backend.
+
+ May be used in the following contexts: tcp, http, log
+
+ May be used in sections : defaults | frontend | listen | backend
+ no | yes | yes | yes
+
+ Arguments : string
+
+ Allows to add a sentence to describe the related object in the HAProxy HTML
+ stats page. The description will be printed on the right of the object name
+ it describes.
+ No need to backslash spaces in the <string> arguments.
+
+
+disabled
+ Disable a proxy, frontend or backend.
+
+ May be used in the following contexts: tcp, http, log
+
+ May be used in sections : defaults | frontend | listen | backend
+ yes | yes | yes | yes
+
+ Arguments : none
+
+ The "disabled" keyword is used to disable an instance, mainly in order to
+ liberate a listening port or to temporarily disable a service. The instance
+ will still be created and its configuration will be checked, but it will be
+ created in the "stopped" state and will appear as such in the statistics. It
+ will not receive any traffic nor will it send any health-checks or logs. It
+ is possible to disable many instances at once by adding the "disabled"
+ keyword in a "defaults" section.
+
+ See also : "enabled"
+
+
+dispatch <address>:<port>
+ Set a default server address
+
+ May be used in the following contexts: tcp, http
+
+ May be used in sections : defaults | frontend | listen | backend
+ no | no | yes | yes
+
+ Arguments :
+
+ <address> is the IPv4 address of the default server. Alternatively, a
+ resolvable hostname is supported, but this name will be resolved
+ during start-up.
+
+ <ports> is a mandatory port specification. All connections will be sent
+ to this port, and it is not permitted to use port offsets as is
+ possible with normal servers.
+
+ The "dispatch" keyword designates a default server for use when no other
+ server can take the connection. In the past it was used to forward non
+ persistent connections to an auxiliary load balancer. Due to its simple
+ syntax, it has also been used for simple TCP relays. It is recommended not to
+ use it for more clarity, and to use the "server" directive instead.
+
+ See also : "server"
+
+
+dynamic-cookie-key <string>
+ Set the dynamic cookie secret key for a backend.
+
+ May be used in the following contexts: http
+
+ May be used in sections : defaults | frontend | listen | backend
+ yes | no | yes | yes
+
+ Arguments : The secret key to be used.
+
+ When dynamic cookies are enabled (see the "dynamic" directive for cookie),
+ a dynamic cookie is created for each server (unless one is explicitly
+ specified on the "server" line), using a hash of the IP address of the
+ server, the TCP port, and the secret key.
+ That way, we can ensure session persistence across multiple load-balancers,
+ even if servers are dynamically added or removed.
+
+enabled
+ Enable a proxy, frontend or backend.
+
+ May be used in the following contexts: tcp, http, log
+
+ May be used in sections : defaults | frontend | listen | backend
+ yes | yes | yes | yes
+
+ Arguments : none
+
+ The "enabled" keyword is used to explicitly enable an instance, when the
+ defaults has been set to "disabled". This is very rarely used.
+
+ See also : "disabled"
+
+
+errorfile <code> <file>
+ Return a file contents instead of errors generated by HAProxy
+
+ May be used in the following contexts: http
+
+ May be used in sections : defaults | frontend | listen | backend
+ yes | yes | yes | yes
+
+ Arguments :
+ <code> is the HTTP status code. Currently, HAProxy is capable of
+ generating codes 200, 400, 401, 403, 404, 405, 407, 408, 410,
+ 413, 425, 429, 500, 501, 502, 503, and 504.
+
+ <file> designates a file containing the full HTTP response. It is
+ recommended to follow the common practice of appending ".http" to
+ the filename so that people do not confuse the response with HTML
+ error pages, and to use absolute paths, since files are read
+ before any chroot is performed.
+
+ It is important to understand that this keyword is not meant to rewrite
+ errors returned by the server, but errors detected and returned by HAProxy.
+ This is why the list of supported errors is limited to a small set.
+
+ Code 200 is emitted in response to requests matching a "monitor-uri" rule.
+
+ The files are parsed when HAProxy starts and must be valid according to the
+ HTTP specification. They should not exceed the configured buffer size
+ (BUFSIZE), which generally is 16 kB, otherwise an internal error will be
+ returned. It is also wise not to put any reference to local contents
+ (e.g. images) in order to avoid loops between the client and HAProxy when all
+ servers are down, causing an error to be returned instead of an
+ image. Finally, The response cannot exceed (tune.bufsize - tune.maxrewrite)
+ so that "http-after-response" rules still have room to operate (see
+ "tune.maxrewrite").
+
+ The files are read at the same time as the configuration and kept in memory.
+ For this reason, the errors continue to be returned even when the process is
+ chrooted, and no file change is considered while the process is running. A
+ simple method for developing those files consists in associating them to the
+ 403 status code and interrogating a blocked URL.
+
+ See also : "http-error", "errorloc", "errorloc302", "errorloc303"
+
+ Example :
+ errorfile 400 /etc/haproxy/errorfiles/400badreq.http
+ errorfile 408 /dev/null # work around Chrome pre-connect bug
+ errorfile 403 /etc/haproxy/errorfiles/403forbid.http
+ errorfile 503 /etc/haproxy/errorfiles/503sorry.http
+
+
+errorfiles <name> [<code> ...]
+ Import, fully or partially, the error files defined in the <name> http-errors
+ section.
+
+ May be used in the following contexts: http
+
+ May be used in sections : defaults | frontend | listen | backend
+ yes | yes | yes | yes
+
+ Arguments :
+ <name> is the name of an existing http-errors section.
+
+ <code> is a HTTP status code. Several status code may be listed.
+ Currently, HAProxy is capable of generating codes 200, 400, 401,
+ 403, 404, 405, 407, 408, 410, 413, 425, 429, 500, 501, 502, 503,
+ and 504.
+
+ Errors defined in the http-errors section with the name <name> are imported
+ in the current proxy. If no status code is specified, all error files of the
+ http-errors section are imported. Otherwise, only error files associated to
+ the listed status code are imported. Those error files override the already
+ defined custom errors for the proxy. And they may be overridden by following
+ ones. Functionally, it is exactly the same as declaring all error files by
+ hand using "errorfile" directives.
+
+ See also : "http-error", "errorfile", "errorloc", "errorloc302" ,
+ "errorloc303" and section 3.8 about http-errors.
+
+ Example :
+ errorfiles generic
+ errorfiles site-1 403 404
+
+
+errorloc <code> <url>
+errorloc302 <code> <url>
+ Return an HTTP redirection to a URL instead of errors generated by HAProxy
+
+ May be used in the following contexts: http
+
+ May be used in sections : defaults | frontend | listen | backend
+ yes | yes | yes | yes
+
+ Arguments :
+ <code> is the HTTP status code. Currently, HAProxy is capable of
+ generating codes 200, 400, 401, 403, 404, 405, 407, 408, 410,
+ 413, 425, 429, 500, 501, 502, 503, and 504.
+
+ <url> it is the exact contents of the "Location" header. It may contain
+ either a relative URI to an error page hosted on the same site,
+ or an absolute URI designating an error page on another site.
+ Special care should be given to relative URIs to avoid redirect
+ loops if the URI itself may generate the same error (e.g. 500).
+
+ It is important to understand that this keyword is not meant to rewrite
+ errors returned by the server, but errors detected and returned by HAProxy.
+ This is why the list of supported errors is limited to a small set.
+
+ Code 200 is emitted in response to requests matching a "monitor-uri" rule.
+
+ Note that both keyword return the HTTP 302 status code, which tells the
+ client to fetch the designated URL using the same HTTP method. This can be
+ quite problematic in case of non-GET methods such as POST, because the URL
+ sent to the client might not be allowed for something other than GET. To
+ work around this problem, please use "errorloc303" which send the HTTP 303
+ status code, indicating to the client that the URL must be fetched with a GET
+ request.
+
+ See also : "http-error", "errorfile", "errorloc303"
+
+
+errorloc303 <code> <url>
+ Return an HTTP redirection to a URL instead of errors generated by HAProxy
+
+ May be used in the following contexts: http
+
+ May be used in sections : defaults | frontend | listen | backend
+ yes | yes | yes | yes
+
+ Arguments :
+ <code> is the HTTP status code. Currently, HAProxy is capable of
+ generating codes 200, 400, 401, 403, 404, 405, 407, 408, 410,
+ 413, 425, 429, 500, 501, 502, 503, and 504.
+
+ <url> it is the exact contents of the "Location" header. It may contain
+ either a relative URI to an error page hosted on the same site,
+ or an absolute URI designating an error page on another site.
+ Special care should be given to relative URIs to avoid redirect
+ loops if the URI itself may generate the same error (e.g. 500).
+
+ It is important to understand that this keyword is not meant to rewrite
+ errors returned by the server, but errors detected and returned by HAProxy.
+ This is why the list of supported errors is limited to a small set.
+
+ Code 200 is emitted in response to requests matching a "monitor-uri" rule.
+
+ Note that both keyword return the HTTP 303 status code, which tells the
+ client to fetch the designated URL using the same HTTP GET method. This
+ solves the usual problems associated with "errorloc" and the 302 code. It is
+ possible that some very old browsers designed before HTTP/1.1 do not support
+ it, but no such problem has been reported till now.
+
+ See also : "http-error", "errorfile", "errorloc", "errorloc302"
+
+
+email-alert from <emailaddr>
+ Declare the from email address to be used in both the envelope and header
+ of email alerts. This is the address that email alerts are sent from.
+
+ May be used in the following contexts: tcp, http, log
+
+ May be used in sections: defaults | frontend | listen | backend
+ yes | yes | yes | yes
+
+ Arguments :
+
+ <emailaddr> is the from email address to use when sending email alerts
+
+ Also requires "email-alert mailers" and "email-alert to" to be set
+ and if so sending email alerts is enabled for the proxy.
+
+ See also : "email-alert level", "email-alert mailers",
+ "email-alert myhostname", "email-alert to", section 3.6 about
+ mailers.
+
+
+email-alert level <level>
+ Declare the maximum log level of messages for which email alerts will be
+ sent. This acts as a filter on the sending of email alerts.
+
+ May be used in the following contexts: tcp, http, log
+
+ May be used in sections: defaults | frontend | listen | backend
+ yes | yes | yes | yes
+
+ Arguments :
+
+ <level> One of the 8 syslog levels:
+ emerg alert crit err warning notice info debug
+ The above syslog levels are ordered from lowest to highest.
+
+ By default level is alert
+
+ Also requires "email-alert from", "email-alert mailers" and
+ "email-alert to" to be set and if so sending email alerts is enabled
+ for the proxy.
+
+ Alerts are sent when :
+
+ * An un-paused server is marked as down and <level> is alert or lower
+ * A paused server is marked as down and <level> is notice or lower
+ * A server is marked as up or enters the drain state and <level>
+ is notice or lower
+ * "option log-health-checks" is enabled, <level> is info or lower,
+ and a health check status update occurs
+
+ See also : "email-alert from", "email-alert mailers",
+ "email-alert myhostname", "email-alert to",
+ section 3.6 about mailers.
+
+
+email-alert mailers <mailersect>
+ Declare the mailers to be used when sending email alerts
+
+ May be used in the following contexts: tcp, http, log
+
+ May be used in sections: defaults | frontend | listen | backend
+ yes | yes | yes | yes
+
+ Arguments :
+
+ <mailersect> is the name of the mailers section to send email alerts.
+
+ Also requires "email-alert from" and "email-alert to" to be set
+ and if so sending email alerts is enabled for the proxy.
+
+ See also : "email-alert from", "email-alert level", "email-alert myhostname",
+ "email-alert to", section 3.6 about mailers.
+
+
+email-alert myhostname <hostname>
+ Declare the to hostname address to be used when communicating with
+ mailers.
+
+ May be used in the following contexts: tcp, http, log
+
+ May be used in sections: defaults | frontend | listen | backend
+ yes | yes | yes | yes
+
+ Arguments :
+
+ <hostname> is the hostname to use when communicating with mailers
+
+ By default the systems hostname is used.
+
+ Also requires "email-alert from", "email-alert mailers" and
+ "email-alert to" to be set and if so sending email alerts is enabled
+ for the proxy.
+
+ See also : "email-alert from", "email-alert level", "email-alert mailers",
+ "email-alert to", section 3.6 about mailers.
+
+
+email-alert to <emailaddr>
+ Declare both the recipient address in the envelope and to address in the
+ header of email alerts. This is the address that email alerts are sent to.
+
+ May be used in the following contexts: tcp, http, log
+
+ May be used in sections: defaults | frontend | listen | backend
+ yes | yes | yes | yes
+
+ Arguments :
+
+ <emailaddr> is the to email address to use when sending email alerts
+
+ Also requires "email-alert mailers" and "email-alert to" to be set
+ and if so sending email alerts is enabled for the proxy.
+
+ See also : "email-alert from", "email-alert level", "email-alert mailers",
+ "email-alert myhostname", section 3.6 about mailers.
+
+
+error-log-format <string>
+ Specifies the log format string to use in case of connection error on the frontend side.
+
+ May be used in the following contexts: tcp, http
+
+ May be used in sections: defaults | frontend | listen | backend
+ yes | yes | yes | no
+
+ This directive specifies the log format string that will be used for logs
+ containing information related to errors, timeouts, retries redispatches or
+ HTTP status code 5xx. This format will in short be used for every log line
+ that would be concerned by the "log-separate-errors" option, including
+ connection errors described in section 8.2.5.
+
+ If the directive is used in a defaults section, all subsequent frontends will
+ use the same log format. Please see section 8.2.4 which covers the log format
+ string in depth.
+
+ "error-log-format" directive overrides previous "error-log-format"
+ directives.
+
+
+force-persist { if | unless } <condition>
+ Declare a condition to force persistence on down servers
+
+ May be used in the following contexts: tcp, http
+
+ May be used in sections: defaults | frontend | listen | backend
+ no | no | yes | yes
+
+ By default, requests are not dispatched to down servers. It is possible to
+ force this using "option persist", but it is unconditional and redispatches
+ to a valid server if "option redispatch" is set. That leaves with very little
+ possibilities to force some requests to reach a server which is artificially
+ marked down for maintenance operations.
+
+ The "force-persist" statement allows one to declare various ACL-based
+ conditions which, when met, will cause a request to ignore the down status of
+ a server and still try to connect to it. That makes it possible to start a
+ server, still replying an error to the health checks, and run a specially
+ configured browser to test the service. Among the handy methods, one could
+ use a specific source IP address, or a specific cookie. The cookie also has
+ the advantage that it can easily be added/removed on the browser from a test
+ page. Once the service is validated, it is then possible to open the service
+ to the world by returning a valid response to health checks.
+
+ The forced persistence is enabled when an "if" condition is met, or unless an
+ "unless" condition is met. The final redispatch is always disabled when this
+ is used.
+
+ See also : "option redispatch", "ignore-persist", "persist",
+ and section 7 about ACL usage.
+
+
+filter <name> [param*]
+ Add the filter <name> in the filter list attached to the proxy.
+
+ May be used in the following contexts: tcp, http
+
+ May be used in sections : defaults | frontend | listen | backend
+ no | yes | yes | yes
+
+ Arguments :
+ <name> is the name of the filter. Officially supported filters are
+ referenced in section 9.
+
+ <param*> is a list of parameters accepted by the filter <name>. The
+ parsing of these parameters are the responsibility of the
+ filter. Please refer to the documentation of the corresponding
+ filter (section 9) for all details on the supported parameters.
+
+ Multiple occurrences of the filter line can be used for the same proxy. The
+ same filter can be referenced many times if needed.
+
+ Example:
+ listen
+ bind *:80
+
+ filter trace name BEFORE-HTTP-COMP
+ filter compression
+ filter trace name AFTER-HTTP-COMP
+
+ compression algo gzip
+ compression offload
+
+ server srv1 192.168.0.1:80
+
+ See also : section 9.
+
+
+fullconn <conns>
+ Specify at what backend load the servers will reach their maxconn
+
+ May be used in the following contexts: tcp, http
+
+ May be used in sections : defaults | frontend | listen | backend
+ yes | no | yes | yes
+
+ Arguments :
+ <conns> is the number of connections on the backend which will make the
+ servers use the maximal number of connections.
+
+ When a server has a "maxconn" parameter specified, it means that its number
+ of concurrent connections will never go higher. Additionally, if it has a
+ "minconn" parameter, it indicates a dynamic limit following the backend's
+ load. The server will then always accept at least <minconn> connections,
+ never more than <maxconn>, and the limit will be on the ramp between both
+ values when the backend has less than <conns> concurrent connections. This
+ makes it possible to limit the load on the servers during normal loads, but
+ push it further for important loads without overloading the servers during
+ exceptional loads.
+
+ Since it's hard to get this value right, HAProxy automatically sets it to
+ 10% of the sum of the maxconns of all frontends that may branch to this
+ backend (based on "use_backend" and "default_backend" rules). That way it's
+ safe to leave it unset. However, "use_backend" involving dynamic names are
+ not counted since there is no way to know if they could match or not.
+
+ Example :
+ # The servers will accept between 100 and 1000 concurrent connections each
+ # and the maximum of 1000 will be reached when the backend reaches 10000
+ # connections.
+ backend dynamic
+ fullconn 10000
+ server srv1 dyn1:80 minconn 100 maxconn 1000
+ server srv2 dyn2:80 minconn 100 maxconn 1000
+
+ See also : "maxconn", "server"
+
+
+hash-balance-factor <factor>
+ Specify the balancing factor for bounded-load consistent hashing
+
+ May be used in the following contexts: tcp, http
+
+ May be used in sections : defaults | frontend | listen | backend
+ yes | no | no | yes
+
+ Arguments :
+ <factor> is the control for the maximum number of concurrent requests to
+ send to a server, expressed as a percentage of the average number
+ of concurrent requests across all of the active servers.
+
+ Specifying a "hash-balance-factor" for a server with "hash-type consistent"
+ enables an algorithm that prevents any one server from getting too many
+ requests at once, even if some hash buckets receive many more requests than
+ others. Setting <factor> to 0 (the default) disables the feature. Otherwise,
+ <factor> is a percentage greater than 100. For example, if <factor> is 150,
+ then no server will be allowed to have a load more than 1.5 times the average.
+ If server weights are used, they will be respected.
+
+ If the first-choice server is disqualified, the algorithm will choose another
+ server based on the request hash, until a server with additional capacity is
+ found. A higher <factor> allows more imbalance between the servers, while a
+ lower <factor> means that more servers will be checked on average, affecting
+ performance. Reasonable values are from 125 to 200.
+
+ This setting is also used by "balance random" which internally relies on the
+ consistent hashing mechanism.
+
+ See also : "balance" and "hash-type".
+
+
+hash-type <method> <function> <modifier>
+ Specify a method to use for mapping hashes to servers
+
+ May be used in the following contexts: tcp, http, log
+
+ May be used in sections : defaults | frontend | listen | backend
+ yes | no | yes | yes
+
+ Arguments :
+ <method> is the method used to select a server from the hash computed by
+ the <function> :
+
+ map-based the hash table is a static array containing all alive servers.
+ The hashes will be very smooth, will consider weights, but
+ will be static in that weight changes while a server is up
+ will be ignored. This means that there will be no slow start.
+ Also, since a server is selected by its position in the array,
+ most mappings are changed when the server count changes. This
+ means that when a server goes up or down, or when a server is
+ added to a farm, most connections will be redistributed to
+ different servers. This can be inconvenient with caches for
+ instance.
+
+ consistent the hash table is a tree filled with many occurrences of each
+ server. The hash key is looked up in the tree and the closest
+ server is chosen. This hash is dynamic, it supports changing
+ weights while the servers are up, so it is compatible with the
+ slow start feature. It has the advantage that when a server
+ goes up or down, only its associations are moved. When a
+ server is added to the farm, only a few part of the mappings
+ are redistributed, making it an ideal method for caches.
+ However, due to its principle, the distribution will never be
+ very smooth and it may sometimes be necessary to adjust a
+ server's weight or its ID to get a more balanced distribution.
+ In order to get the same distribution on multiple load
+ balancers, it is important that all servers have the exact
+ same IDs. Note: consistent hash uses sdbm and avalanche if no
+ hash function is specified.
+
+ <function> is the hash function to be used :
+
+ sdbm this function was created initially for sdbm (a public-domain
+ reimplementation of ndbm) database library. It was found to do
+ well in scrambling bits, causing better distribution of the keys
+ and fewer splits. It also happens to be a good general hashing
+ function with good distribution, unless the total server weight
+ is a multiple of 64, in which case applying the avalanche
+ modifier may help.
+
+ djb2 this function was first proposed by Dan Bernstein many years ago
+ on comp.lang.c. Studies have shown that for certain workload this
+ function provides a better distribution than sdbm. It generally
+ works well with text-based inputs though it can perform extremely
+ poorly with numeric-only input or when the total server weight is
+ a multiple of 33, unless the avalanche modifier is also used.
+
+ wt6 this function was designed for HAProxy while testing other
+ functions in the past. It is not as smooth as the other ones, but
+ is much less sensible to the input data set or to the number of
+ servers. It can make sense as an alternative to sdbm+avalanche or
+ djb2+avalanche for consistent hashing or when hashing on numeric
+ data such as a source IP address or a visitor identifier in a URL
+ parameter.
+
+ crc32 this is the most common CRC32 implementation as used in Ethernet,
+ gzip, PNG, etc. It is slower than the other ones but may provide
+ a better distribution or less predictable results especially when
+ used on strings.
+
+ none don't hash the key, the key will be used as a hash, this can be
+ useful to manually hash the key using a converter for that purpose
+ and let haproxy use the result directly.
+
+ <modifier> indicates an optional method applied after hashing the key :
+
+ avalanche This directive indicates that the result from the hash
+ function above should not be used in its raw form but that
+ a 4-byte full avalanche hash must be applied first. The
+ purpose of this step is to mix the resulting bits from the
+ previous hash in order to avoid any undesired effect when
+ the input contains some limited values or when the number of
+ servers is a multiple of one of the hash's components (64
+ for SDBM, 33 for DJB2). Enabling avalanche tends to make the
+ result less predictable, but it's also not as smooth as when
+ using the original function. Some testing might be needed
+ with some workloads. This hash is one of the many proposed
+ by Bob Jenkins.
+
+ The default hash type is "map-based" and is recommended for most usages. The
+ default function is "sdbm", the selection of a function should be based on
+ the range of the values being hashed.
+
+ See also : "balance", "hash-balance-factor", "server"
+
+
+http-after-response <action> <options...> [ { if | unless } <condition> ]
+ Access control for all Layer 7 responses (server, applet/service and internal
+ ones).
+
+ May be used in the following contexts: http
+
+ May be used in sections: defaults | frontend | listen | backend
+ yes(!) | yes | yes | yes
+
+ The http-after-response statement defines a set of rules which apply to layer
+ 7 processing. The rules are evaluated in their declaration order when they
+ are met in a frontend, listen or backend section. Since these rules apply on
+ responses, the backend rules are applied first, followed by the frontend's
+ rules. Any rule may optionally be followed by an ACL-based condition, in
+ which case it will only be evaluated if the condition evaluates true.
+
+ Unlike http-response rules, these ones are applied on all responses, the
+ server ones but also to all responses generated by HAProxy. These rules are
+ evaluated at the end of the responses analysis, before the data forwarding
+ phase.
+
+ The condition is evaluated just before the action is executed, and the action
+ is performed exactly once. As such, there is no problem if an action changes
+ an element which is checked as part of the condition. This also means that
+ multiple actions may rely on the same condition so that the first action that
+ changes the condition's evaluation is sufficient to implicitly disable the
+ remaining actions. This is used for example when trying to assign a value to
+ a variable from various sources when it's empty. There is no limit to the
+ number of "http-after-response" statements per instance.
+
+ The first keyword after "http-after-response" in the syntax is the rule's
+ action, optionally followed by a varying number of arguments for the action.
+ The supported actions and their respective syntaxes are enumerated in section
+ 4.3 "Actions" (look for actions which tick "HTTP Aft").
+
+ This directive is only available from named defaults sections, not anonymous
+ ones. Rules defined in the defaults section are evaluated before ones in the
+ associated proxy section. To avoid ambiguities, in this case the same
+ defaults section cannot be used by proxies with the frontend capability and
+ by proxies with the backend capability. It means a listen section cannot use
+ a defaults section defining such rules.
+
+ Note: Errors emitted in early stage of the request parsing are handled by the
+ multiplexer at a lower level, before any http analysis. Thus no
+ http-after-response ruleset is evaluated on these errors.
+
+ Example:
+ http-after-response set-header Strict-Transport-Security "max-age=31536000"
+ http-after-response set-header Cache-Control "no-store,no-cache,private"
+ http-after-response set-header Pragma "no-cache"
+
+
+http-check comment <string>
+ Defines a comment for the following the http-check rule, reported in logs if
+ it fails.
+
+ May be used in the following contexts: tcp, http
+
+ May be used in sections : defaults | frontend | listen | backend
+ yes | no | yes | yes
+
+ Arguments :
+ <string> is the comment message to add in logs if the following http-check
+ rule fails.
+
+ It only works for connect, send and expect rules. It is useful to make
+ user-friendly error reporting.
+
+ See also : "option httpchk", "http-check connect", "http-check send" and
+ "http-check expect".
+
+
+http-check connect [default] [port <expr>] [addr <ip>] [send-proxy]
+ [via-socks4] [ssl] [sni <sni>] [alpn <alpn>] [linger]
+ [proto <name>] [comment <msg>]
+ Opens a new connection to perform an HTTP health check
+
+ May be used in the following contexts: tcp, http
+
+ May be used in sections : defaults | frontend | listen | backend
+ yes | no | yes | yes
+
+ Arguments :
+ comment <msg> defines a message to report if the rule evaluation fails.
+
+ default Use default options of the server line to do the health
+ checks. The server options are used only if not redefined.
+
+ port <expr> if not set, check port or server port is used.
+ It tells HAProxy where to open the connection to.
+ <port> must be a valid TCP port source integer, from 1 to
+ 65535 or an sample-fetch expression.
+
+ addr <ip> defines the IP address to do the health check.
+
+ send-proxy send a PROXY protocol string
+
+ via-socks4 enables outgoing health checks using upstream socks4 proxy.
+
+ ssl opens a ciphered connection
+
+ sni <sni> specifies the SNI to use to do health checks over SSL.
+
+ alpn <alpn> defines which protocols to advertise with ALPN. The protocol
+ list consists in a comma-delimited list of protocol names,
+ for instance: "h2,http/1.1". If it is not set, the server ALPN
+ is used.
+
+ proto <name> forces the multiplexer's protocol to use for this connection.
+ It must be an HTTP mux protocol and it must be usable on the
+ backend side. The list of available protocols is reported in
+ haproxy -vv.
+
+ linger cleanly close the connection instead of using a single RST.
+
+ Just like tcp-check health checks, it is possible to configure the connection
+ to use to perform HTTP health check. This directive should also be used to
+ describe a scenario involving several request/response exchanges, possibly on
+ different ports or with different servers.
+
+ When there are no TCP port configured on the server line neither server port
+ directive, then the first step of the http-check sequence must be to specify
+ the port with a "http-check connect".
+
+ In an http-check ruleset a 'connect' is required, it is also mandatory to start
+ the ruleset with a 'connect' rule. Purpose is to ensure admin know what they
+ do.
+
+ When a connect must start the ruleset, if may still be preceded by set-var,
+ unset-var or comment rules.
+
+ Examples :
+ # check HTTP and HTTPs services on a server.
+ # first open port 80 thanks to server line port directive, then
+ # tcp-check opens port 443, ciphered and run a request on it:
+ option httpchk
+
+ http-check connect
+ http-check send meth GET uri / ver HTTP/1.1 hdr host haproxy.1wt.eu
+ http-check expect status 200-399
+ http-check connect port 443 ssl sni haproxy.1wt.eu
+ http-check send meth GET uri / ver HTTP/1.1 hdr host haproxy.1wt.eu
+ http-check expect status 200-399
+
+ server www 10.0.0.1 check port 80
+
+ See also : "option httpchk", "http-check send", "http-check expect"
+
+
+http-check disable-on-404
+ Enable a maintenance mode upon HTTP/404 response to health-checks
+
+ May be used in the following contexts: tcp, http
+
+ May be used in sections : defaults | frontend | listen | backend
+ yes | no | yes | yes
+
+ Arguments : none
+
+ When this option is set, a server which returns an HTTP code 404 will be
+ excluded from further load-balancing, but will still receive persistent
+ connections. This provides a very convenient method for Web administrators
+ to perform a graceful shutdown of their servers. It is also important to note
+ that a server which is detected as failed while it was in this mode will not
+ generate an alert, just a notice. If the server responds 2xx or 3xx again, it
+ will immediately be reinserted into the farm. The status on the stats page
+ reports "NOLB" for a server in this mode. It is important to note that this
+ option only works in conjunction with the "httpchk" option. If this option
+ is used with "http-check expect", then it has precedence over it so that 404
+ responses will still be considered as soft-stop. Note also that a stopped
+ server will stay stopped even if it replies 404s. This option is only
+ evaluated for running servers.
+
+ See also : "option httpchk" and "http-check expect".
+
+
+http-check expect [min-recv <int>] [comment <msg>]
+ [ok-status <st>] [error-status <st>] [tout-status <st>]
+ [on-success <fmt>] [on-error <fmt>] [status-code <expr>]
+ [!] <match> <pattern>
+ Make HTTP health checks consider response contents or specific status codes
+
+ May be used in the following contexts: tcp, http
+
+ May be used in sections : defaults | frontend | listen | backend
+ yes | no | yes | yes
+
+ Arguments :
+ comment <msg> defines a message to report if the rule evaluation fails.
+
+ min-recv is optional and can define the minimum amount of data required to
+ evaluate the current expect rule. If the number of received bytes
+ is under this limit, the check will wait for more data. This
+ option can be used to resolve some ambiguous matching rules or to
+ avoid executing costly regex matches on content known to be still
+ incomplete. If an exact string is used, the minimum between the
+ string length and this parameter is used. This parameter is
+ ignored if it is set to -1. If the expect rule does not match,
+ the check will wait for more data. If set to 0, the evaluation
+ result is always conclusive.
+
+ ok-status <st> is optional and can be used to set the check status if
+ the expect rule is successfully evaluated and if it is
+ the last rule in the tcp-check ruleset. "L7OK", "L7OKC",
+ "L6OK" and "L4OK" are supported :
+ - L7OK : check passed on layer 7
+ - L7OKC : check conditionally passed on layer 7, set
+ server to NOLB state.
+ - L6OK : check passed on layer 6
+ - L4OK : check passed on layer 4
+ By default "L7OK" is used.
+
+ error-status <st> is optional and can be used to set the check status if
+ an error occurred during the expect rule evaluation.
+ "L7OKC", "L7RSP", "L7STS", "L6RSP" and "L4CON" are
+ supported :
+ - L7OKC : check conditionally passed on layer 7, set
+ server to NOLB state.
+ - L7RSP : layer 7 invalid response - protocol error
+ - L7STS : layer 7 response error, for example HTTP 5xx
+ - L6RSP : layer 6 invalid response - protocol error
+ - L4CON : layer 1-4 connection problem
+ By default "L7RSP" is used.
+
+ tout-status <st> is optional and can be used to set the check status if
+ a timeout occurred during the expect rule evaluation.
+ "L7TOUT", "L6TOUT", and "L4TOUT" are supported :
+ - L7TOUT : layer 7 (HTTP/SMTP) timeout
+ - L6TOUT : layer 6 (SSL) timeout
+ - L4TOUT : layer 1-4 timeout
+ By default "L7TOUT" is used.
+
+ on-success <fmt> is optional and can be used to customize the
+ informational message reported in logs if the expect
+ rule is successfully evaluated and if it is the last rule
+ in the tcp-check ruleset. <fmt> is a log-format string.
+
+ on-error <fmt> is optional and can be used to customize the
+ informational message reported in logs if an error
+ occurred during the expect rule evaluation. <fmt> is a
+ log-format string.
+
+ <match> is a keyword indicating how to look for a specific pattern in the
+ response. The keyword may be one of "status", "rstatus", "hdr",
+ "fhdr", "string", or "rstring". The keyword may be preceded by an
+ exclamation mark ("!") to negate the match. Spaces are allowed
+ between the exclamation mark and the keyword. See below for more
+ details on the supported keywords.
+
+ <pattern> is the pattern to look for. It may be a string, a regular
+ expression or a more complex pattern with several arguments. If
+ the string pattern contains spaces, they must be escaped with the
+ usual backslash ('\').
+
+ By default, "option httpchk" considers that response statuses 2xx and 3xx
+ are valid, and that others are invalid. When "http-check expect" is used,
+ it defines what is considered valid or invalid. Only one "http-check"
+ statement is supported in a backend. If a server fails to respond or times
+ out, the check obviously fails. The available matches are :
+
+ status <codes> : test the status codes found parsing <codes> string. it
+ must be a comma-separated list of status codes or range
+ codes. A health check response will be considered as
+ valid if the response's status code matches any status
+ code or is inside any range of the list. If the "status"
+ keyword is prefixed with "!", then the response will be
+ considered invalid if the status code matches.
+
+ rstatus <regex> : test a regular expression for the HTTP status code.
+ A health check response will be considered valid if the
+ response's status code matches the expression. If the
+ "rstatus" keyword is prefixed with "!", then the response
+ will be considered invalid if the status code matches.
+ This is mostly used to check for multiple codes.
+
+ hdr { name | name-lf } [ -m <meth> ] <name>
+ [ { value | value-lf } [ -m <meth> ] <value> :
+ test the specified header pattern on the HTTP response
+ headers. The name pattern is mandatory but the value
+ pattern is optional. If not specified, only the header
+ presence is verified. <meth> is the matching method,
+ applied on the header name or the header value. Supported
+ matching methods are "str" (exact match), "beg" (prefix
+ match), "end" (suffix match), "sub" (substring match) or
+ "reg" (regex match). If not specified, exact matching
+ method is used. If the "name-lf" parameter is used,
+ <name> is evaluated as a log-format string. If "value-lf"
+ parameter is used, <value> is evaluated as a log-format
+ string. These parameters cannot be used with the regex
+ matching method. Finally, the header value is considered
+ as comma-separated list. Note that matchings are case
+ insensitive on the header names.
+
+ fhdr { name | name-lf } [ -m <meth> ] <name>
+ [ { value | value-lf } [ -m <meth> ] <value> :
+ test the specified full header pattern on the HTTP
+ response headers. It does exactly the same than "hdr"
+ keyword, except the full header value is tested, commas
+ are not considered as delimiters.
+
+ string <string> : test the exact string match in the HTTP response body.
+ A health check response will be considered valid if the
+ response's body contains this exact string. If the
+ "string" keyword is prefixed with "!", then the response
+ will be considered invalid if the body contains this
+ string. This can be used to look for a mandatory word at
+ the end of a dynamic page, or to detect a failure when a
+ specific error appears on the check page (e.g. a stack
+ trace).
+
+ rstring <regex> : test a regular expression on the HTTP response body.
+ A health check response will be considered valid if the
+ response's body matches this expression. If the "rstring"
+ keyword is prefixed with "!", then the response will be
+ considered invalid if the body matches the expression.
+ This can be used to look for a mandatory word at the end
+ of a dynamic page, or to detect a failure when a specific
+ error appears on the check page (e.g. a stack trace).
+
+ string-lf <fmt> : test a log-format string match in the HTTP response body.
+ A health check response will be considered valid if the
+ response's body contains the string resulting of the
+ evaluation of <fmt>, which follows the log-format rules.
+ If prefixed with "!", then the response will be
+ considered invalid if the body contains the string.
+
+ It is important to note that the responses will be limited to a certain size
+ defined by the global "tune.bufsize" option, which defaults to 16384 bytes.
+ Thus, too large responses may not contain the mandatory pattern when using
+ "string" or "rstring". If a large response is absolutely required, it is
+ possible to change the default max size by setting the global variable.
+ However, it is worth keeping in mind that parsing very large responses can
+ waste some CPU cycles, especially when regular expressions are used, and that
+ it is always better to focus the checks on smaller resources.
+
+ In an http-check ruleset, the last expect rule may be implicit. If no expect
+ rule is specified after the last "http-check send", an implicit expect rule
+ is defined to match on 2xx or 3xx status codes. It means this rule is also
+ defined if there is no "http-check" rule at all, when only "option httpchk"
+ is set.
+
+ Last, if "http-check expect" is combined with "http-check disable-on-404",
+ then this last one has precedence when the server responds with 404.
+
+ Examples :
+ # only accept status 200 as valid
+ http-check expect status 200,201,300-310
+
+ # be sure a sessid coookie is set
+ http-check expect header name "set-cookie" value -m beg "sessid="
+
+ # consider SQL errors as errors
+ http-check expect ! string SQL\ Error
+
+ # consider status 5xx only as errors
+ http-check expect ! rstatus ^5
+
+ # check that we have a correct hexadecimal tag before /html
+ http-check expect rstring <!--tag:[0-9a-f]*--></html>
+
+ See also : "option httpchk", "http-check connect", "http-check disable-on-404"
+ and "http-check send".
+
+
+http-check send [meth <method>] [{ uri <uri> | uri-lf <fmt> }>] [ver <version>]
+ [hdr <name> <fmt>]* [{ body <string> | body-lf <fmt> }]
+ [comment <msg>]
+ Add a possible list of headers and/or a body to the request sent during HTTP
+ health checks.
+
+ May be used in the following contexts: tcp, http
+
+ May be used in sections : defaults | frontend | listen | backend
+ yes | no | yes | yes
+
+ Arguments :
+ comment <msg> defines a message to report if the rule evaluation fails.
+
+ meth <method> is the optional HTTP method used with the requests. When not
+ set, the "OPTIONS" method is used, as it generally requires
+ low server processing and is easy to filter out from the
+ logs. Any method may be used, though it is not recommended
+ to invent non-standard ones.
+
+ uri <uri> is optional and set the URI referenced in the HTTP requests
+ to the string <uri>. It defaults to "/" which is accessible
+ by default on almost any server, but may be changed to any
+ other URI. Query strings are permitted.
+
+ uri-lf <fmt> is optional and set the URI referenced in the HTTP requests
+ using the log-format string <fmt>. It defaults to "/" which
+ is accessible by default on almost any server, but may be
+ changed to any other URI. Query strings are permitted.
+
+ ver <version> is the optional HTTP version string. It defaults to
+ "HTTP/1.0" but some servers might behave incorrectly in HTTP
+ 1.0, so turning it to HTTP/1.1 may sometimes help. Note that
+ the Host field is mandatory in HTTP/1.1, use "hdr" argument
+ to add it.
+
+ hdr <name> <fmt> adds the HTTP header field whose name is specified in
+ <name> and whose value is defined by <fmt>, which follows
+ to the log-format rules.
+
+ body <string> add the body defined by <string> to the request sent during
+ HTTP health checks. If defined, the "Content-Length" header
+ is thus automatically added to the request.
+
+ body-lf <fmt> add the body defined by the log-format string <fmt> to the
+ request sent during HTTP health checks. If defined, the
+ "Content-Length" header is thus automatically added to the
+ request.
+
+ In addition to the request line defined by the "option httpchk" directive,
+ this one is the valid way to add some headers and optionally a body to the
+ request sent during HTTP health checks. If a body is defined, the associate
+ "Content-Length" header is automatically added. Thus, this header or
+ "Transfer-encoding" header should not be present in the request provided by
+ "http-check send". If so, it will be ignored. The old trick consisting to add
+ headers after the version string on the "option httpchk" line is now
+ deprecated.
+
+ Also "http-check send" doesn't support HTTP keep-alive. Keep in mind that it
+ will automatically append a "Connection: close" header, unless a Connection
+ header has already already been configured via a hdr entry.
+
+ Note that the Host header and the request authority, when both defined, are
+ automatically synchronized. It means when the HTTP request is sent, when a
+ Host is inserted in the request, the request authority is accordingly
+ updated. Thus, don't be surprised if the Host header value overwrites the
+ configured request authority.
+
+ Note also for now, no Host header is automatically added in HTTP/1.1 or above
+ requests. You should add it explicitly.
+
+ See also : "option httpchk", "http-check send-state" and "http-check expect".
+
+
+http-check send-state
+ Enable emission of a state header with HTTP health checks
+
+ May be used in the following contexts: tcp, http
+
+ May be used in sections : defaults | frontend | listen | backend
+ yes | no | yes | yes
+
+ Arguments : none
+
+ When this option is set, HAProxy will systematically send a special header
+ "X-Haproxy-Server-State" with a list of parameters indicating to each server
+ how they are seen by HAProxy. This can be used for instance when a server is
+ manipulated without access to HAProxy and the operator needs to know whether
+ HAProxy still sees it up or not, or if the server is the last one in a farm.
+
+ The header is composed of fields delimited by semi-colons, the first of which
+ is a word ("UP", "DOWN", "NOLB"), possibly followed by a number of valid
+ checks on the total number before transition, just as appears in the stats
+ interface. Next headers are in the form "<variable>=<value>", indicating in
+ no specific order some values available in the stats interface :
+ - a variable "address", containing the address of the backend server.
+ This corresponds to the <address> field in the server declaration. For
+ unix domain sockets, it will read "unix".
+
+ - a variable "port", containing the port of the backend server. This
+ corresponds to the <port> field in the server declaration. For unix
+ domain sockets, it will read "unix".
+
+ - a variable "name", containing the name of the backend followed by a slash
+ ("/") then the name of the server. This can be used when a server is
+ checked in multiple backends.
+
+ - a variable "node" containing the name of the HAProxy node, as set in the
+ global "node" variable, otherwise the system's hostname if unspecified.
+
+ - a variable "weight" indicating the weight of the server, a slash ("/")
+ and the total weight of the farm (just counting usable servers). This
+ helps to know if other servers are available to handle the load when this
+ one fails.
+
+ - a variable "scur" indicating the current number of concurrent connections
+ on the server, followed by a slash ("/") then the total number of
+ connections on all servers of the same backend.
+
+ - a variable "qcur" indicating the current number of requests in the
+ server's queue.
+
+ Example of a header received by the application server :
+ >>> X-Haproxy-Server-State: UP 2/3; name=bck/srv2; node=lb1; weight=1/2; \
+ scur=13/22; qcur=0
+
+ See also : "option httpchk", "http-check disable-on-404" and
+ "http-check send".
+
+
+http-check set-var(<var-name>[,<cond>...]) <expr>
+http-check set-var-fmt(<var-name>[,<cond>...]) <fmt>
+ This operation sets the content of a variable. The variable is declared inline.
+
+ May be used in the following contexts: tcp, http
+
+ May be used in sections: defaults | frontend | listen | backend
+ yes | no | yes | yes
+
+ Arguments :
+ <var-name> The name of the variable starts with an indication about its
+ scope. The scopes allowed for http-check are:
+ "proc" : the variable is shared with the whole process.
+ "sess" : the variable is shared with the tcp-check session.
+ "check": the variable is declared for the lifetime of the tcp-check.
+ This prefix is followed by a name. The separator is a '.'.
+ The name may only contain characters 'a-z', 'A-Z', '0-9', '.',
+ and '-'.
+
+ <cond> A set of conditions that must all be true for the variable to
+ actually be set (such as "ifnotempty", "ifgt" ...). See the
+ set-var converter's description for a full list of possible
+ conditions.
+
+ <expr> Is a sample-fetch expression potentially followed by converters.
+
+ <fmt> This is the value expressed using log-format rules (see Custom
+ Log Format in section 8.2.4).
+
+ Examples :
+ http-check set-var(check.port) int(1234)
+ http-check set-var-fmt(check.port) "name=%H"
+
+
+http-check unset-var(<var-name>)
+ Free a reference to a variable within its scope.
+
+ May be used in the following contexts: tcp, http
+
+ May be used in sections: defaults | frontend | listen | backend
+ yes | no | yes | yes
+
+ Arguments :
+ <var-name> The name of the variable starts with an indication about its
+ scope. The scopes allowed for http-check are:
+ "proc" : the variable is shared with the whole process.
+ "sess" : the variable is shared with the tcp-check session.
+ "check": the variable is declared for the lifetime of the tcp-check.
+ This prefix is followed by a name. The separator is a '.'.
+ The name may only contain characters 'a-z', 'A-Z', '0-9', '.',
+ and '-'.
+
+ Examples :
+ http-check unset-var(check.port)
+
+
+http-error status <code> [content-type <type>]
+ [ { default-errorfiles | errorfile <file> | errorfiles <name> |
+ file <file> | lf-file <file> | string <str> | lf-string <fmt> } ]
+ [ hdr <name> <fmt> ]*
+ Defines a custom error message to use instead of errors generated by HAProxy.
+
+ May be used in the following contexts: http
+
+ May be used in sections : defaults | frontend | listen | backend
+ yes | yes | yes | yes
+
+ Arguments :
+ status <code> is the HTTP status code. It must be specified.
+ Currently, HAProxy is capable of generating codes
+ 200, 400, 401, 403, 404, 405, 407, 408, 410, 413, 425,
+ 429, 500, 501, 502, 503, and 504.
+
+ content-type <type> is the response content type, for instance
+ "text/plain". This parameter is ignored and should be
+ omitted when an errorfile is configured or when the
+ payload is empty. Otherwise, it must be defined.
+
+ default-errorfiles Reset the previously defined error message for current
+ proxy for the status <code>. If used on a backend, the
+ frontend error message is used, if defined. If used on
+ a frontend, the default error message is used.
+
+ errorfile <file> designates a file containing the full HTTP response.
+ It is recommended to follow the common practice of
+ appending ".http" to the filename so that people do
+ not confuse the response with HTML error pages, and to
+ use absolute paths, since files are read before any
+ chroot is performed.
+
+ errorfiles <name> designates the http-errors section to use to import
+ the error message with the status code <code>. If no
+ such message is found, the proxy's error messages are
+ considered.
+
+ file <file> specifies the file to use as response payload. If the
+ file is not empty, its content-type must be set as
+ argument to "content-type", otherwise, any
+ "content-type" argument is ignored. <file> is
+ considered as a raw string.
+
+ string <str> specifies the raw string to use as response payload.
+ The content-type must always be set as argument to
+ "content-type".
+
+ lf-file <file> specifies the file to use as response payload. If the
+ file is not empty, its content-type must be set as
+ argument to "content-type", otherwise, any
+ "content-type" argument is ignored. <file> is
+ evaluated as a log-format string.
+
+ lf-string <str> specifies the log-format string to use as response
+ payload. The content-type must always be set as
+ argument to "content-type".
+
+ hdr <name> <fmt> adds to the response the HTTP header field whose name
+ is specified in <name> and whose value is defined by
+ <fmt>, which follows to the log-format rules.
+ This parameter is ignored if an errorfile is used.
+
+ This directive may be used instead of "errorfile", to define a custom error
+ message. As "errorfile" directive, it is used for errors detected and
+ returned by HAProxy. If an errorfile is defined, it is parsed when HAProxy
+ starts and must be valid according to the HTTP standards. The generated
+ response must not exceed the configured buffer size (BUFFSIZE), otherwise an
+ internal error will be returned. Finally, if you consider to use some
+ http-after-response rules to rewrite these errors, the reserved buffer space
+ should be available (see "tune.maxrewrite").
+
+ The files are read at the same time as the configuration and kept in memory.
+ For this reason, the errors continue to be returned even when the process is
+ chrooted, and no file change is considered while the process is running.
+
+ Note: 400/408/500 errors emitted in early stage of the request parsing are
+ handled by the multiplexer at a lower level. No custom formatting is
+ supported at this level. Thus only static error messages, defined with
+ "errorfile" directive, are supported. However, this limitation only
+ exists during the request headers parsing or between two transactions.
+
+ See also : "errorfile", "errorfiles", "errorloc", "errorloc302",
+ "errorloc303" and section 3.8 about http-errors.
+
+
+http-request <action> [options...] [ { if | unless } <condition> ]
+ Access control for Layer 7 requests
+
+ May be used in the following contexts: http
+
+ May be used in sections: defaults | frontend | listen | backend
+ yes(!) | yes | yes | yes
+
+ The http-request statement defines a set of rules which apply to layer 7
+ processing. The rules are evaluated in their declaration order when they are
+ met in a frontend, listen or backend section. Any rule may optionally be
+ followed by an ACL-based condition, in which case it will only be evaluated
+ if the condition evaluates to true.
+
+ The condition is evaluated just before the action is executed, and the action
+ is performed exactly once. As such, there is no problem if an action changes
+ an element which is checked as part of the condition. This also means that
+ multiple actions may rely on the same condition so that the first action that
+ changes the condition's evaluation is sufficient to implicitly disable the
+ remaining actions. This is used for example when trying to assign a value to
+ a variable from various sources when it's empty. There is no limit to the
+ number of "http-request" statements per instance.
+
+ The first keyword after "http-request" in the syntax is the rule's action,
+ optionally followed by a varying number of arguments for the action. The
+ supported actions and their respective syntaxes are enumerated in section 4.3
+ "Actions" (look for actions which tick "HTTP Req").
+
+ This directive is only available from named defaults sections, not anonymous
+ ones. Rules defined in the defaults section are evaluated before ones in the
+ associated proxy section. To avoid ambiguities, in this case the same
+ defaults section cannot be used by proxies with the frontend capability and
+ by proxies with the backend capability. It means a listen section cannot use
+ a defaults section defining such rules.
+
+ Example:
+ acl nagios src 192.168.129.3
+ acl local_net src 192.168.0.0/16
+ acl auth_ok http_auth(L1)
+
+ http-request allow if nagios
+ http-request allow if local_net auth_ok
+ http-request auth realm Gimme if local_net auth_ok
+ http-request deny
+
+ Example:
+ acl key req.hdr(X-Add-Acl-Key) -m found
+ acl add path /addacl
+ acl del path /delacl
+
+ acl myhost hdr(Host) -f myhost.lst
+
+ http-request add-acl(myhost.lst) %[req.hdr(X-Add-Acl-Key)] if key add
+ http-request del-acl(myhost.lst) %[req.hdr(X-Add-Acl-Key)] if key del
+
+ Example:
+ acl value req.hdr(X-Value) -m found
+ acl setmap path /setmap
+ acl delmap path /delmap
+
+ use_backend bk_appli if { hdr(Host),map_str(map.lst) -m found }
+
+ http-request set-map(map.lst) %[src] %[req.hdr(X-Value)] if setmap value
+ http-request del-map(map.lst) %[src] if delmap
+
+ See also : "stats http-request", section 3.4 about userlists and section 7
+ about ACL usage.
+
+http-response <action> <options...> [ { if | unless } <condition> ]
+ Access control for Layer 7 responses
+
+ May be used in the following contexts: http
+
+ May be used in sections: defaults | frontend | listen | backend
+ yes(!) | yes | yes | yes
+
+ The http-response statement defines a set of rules which apply to layer 7
+ processing. The rules are evaluated in their declaration order when they are
+ met in a frontend, listen or backend section. Since these rules apply on
+ responses, the backend rules are applied first, followed by the frontend's
+ rules. Any rule may optionally be followed by an ACL-based condition, in
+ which case it will only be evaluated if the condition evaluates to true.
+
+ The condition is evaluated just before the action is executed, and the action
+ is performed exactly once. As such, there is no problem if an action changes
+ an element which is checked as part of the condition. This also means that
+ multiple actions may rely on the same condition so that the first action that
+ changes the condition's evaluation is sufficient to implicitly disable the
+ remaining actions. This is used for example when trying to assign a value to
+ a variable from various sources when it's empty. There is no limit to the
+ number of "http-response" statements per instance.
+
+ The first keyword after "http-response" in the syntax is the rule's action,
+ optionally followed by a varying number of arguments for the action. The
+ supported actions and their respective syntaxes are enumerated in section 4.3
+ "Actions" (look for actions which tick "HTTP Res").
+
+ This directive is only available from named defaults sections, not anonymous
+ ones. Rules defined in the defaults section are evaluated before ones in the
+ associated proxy section. To avoid ambiguities, in this case the same
+ defaults section cannot be used by proxies with the frontend capability and
+ by proxies with the backend capability. It means a listen section cannot use
+ a defaults section defining such rules.
+
+ Example:
+ acl key_acl res.hdr(X-Acl-Key) -m found
+
+ acl myhost hdr(Host) -f myhost.lst
+
+ http-response add-acl(myhost.lst) %[res.hdr(X-Acl-Key)] if key_acl
+ http-response del-acl(myhost.lst) %[res.hdr(X-Acl-Key)] if key_acl
+
+ Example:
+ acl value res.hdr(X-Value) -m found
+
+ use_backend bk_appli if { hdr(Host),map_str(map.lst) -m found }
+
+ http-response set-map(map.lst) %[src] %[res.hdr(X-Value)] if value
+ http-response del-map(map.lst) %[src] if ! value
+
+ See also : "http-request", section 3.4 about userlists and section 7 about
+ ACL usage.
+
+http-reuse { never | safe | aggressive | always }
+ Declare how idle HTTP connections may be shared between requests
+
+ May be used in the following contexts: http
+
+ May be used in sections: defaults | frontend | listen | backend
+ yes | no | yes | yes
+
+ By default, a connection established between HAProxy and the backend server
+ which is considered safe for reuse is moved back to the server's idle
+ connections pool so that any other request can make use of it. This is the
+ "safe" strategy below.
+
+ The argument indicates the desired connection reuse strategy :
+
+ - "never" : idle connections are never shared between sessions. This mode
+ may be enforced to cancel a different strategy inherited from
+ a defaults section or for troubleshooting. For example, if an
+ old bogus application considers that multiple requests over
+ the same connection come from the same client and it is not
+ possible to fix the application, it may be desirable to
+ disable connection sharing in a single backend. An example of
+ such an application could be an old HAProxy using cookie
+ insertion in tunnel mode and not checking any request past the
+ first one.
+
+ - "safe" : this is the default and the recommended strategy. The first
+ request of a session is always sent over its own connection,
+ and only subsequent requests may be dispatched over other
+ existing connections. This ensures that in case the server
+ closes the connection when the request is being sent, the
+ browser can decide to silently retry it. Since it is exactly
+ equivalent to regular keep-alive, there should be no side
+ effects. There is also a special handling for the connections
+ using protocols subject to Head-of-line blocking (backend with
+ h2 or fcgi). In this case, when at least one stream is
+ processed, the used connection is reserved to handle streams
+ of the same session. When no more streams are processed, the
+ connection is released and can be reused.
+
+ - "aggressive" : this mode may be useful in webservices environments where
+ all servers are not necessarily known and where it would be
+ appreciable to deliver most first requests over existing
+ connections. In this case, first requests are only delivered
+ over existing connections that have been reused at least once,
+ proving that the server correctly supports connection reuse.
+ It should only be used when it's sure that the client can
+ retry a failed request once in a while and where the benefit
+ of aggressive connection reuse significantly outweighs the
+ downsides of rare connection failures.
+
+ - "always" : this mode is only recommended when the path to the server is
+ known for never breaking existing connections quickly after
+ releasing them. It allows the first request of a session to be
+ sent to an existing connection. This can provide a significant
+ performance increase over the "safe" strategy when the backend
+ is a cache farm, since such components tend to show a
+ consistent behavior and will benefit from the connection
+ sharing. It is recommended that the "http-keep-alive" timeout
+ remains low in this mode so that no dead connections remain
+ usable. In most cases, this will lead to the same performance
+ gains as "aggressive" but with more risks. It should only be
+ used when it improves the situation over "aggressive".
+
+ When http connection sharing is enabled, a great care is taken to respect the
+ connection properties and compatibility. Indeed, some properties are specific
+ and it is not possibly to reuse it blindly. Those are the SSL SNI, source
+ and destination address and proxy protocol block. A connection is reused only
+ if it shares the same set of properties with the request.
+
+ Also note that connections with certain bogus authentication schemes (relying
+ on the connection) like NTLM are marked private and never shared.
+
+ A connection pool is involved and configurable with "pool-max-conn".
+
+ Note: connection reuse improves the accuracy of the "server maxconn" setting,
+ because almost no new connection will be established while idle connections
+ remain available. This is particularly true with the "always" strategy.
+
+ The rules to decide to keep an idle connection opened or to close it after
+ processing are also governed by the "tune.pool-low-fd-ratio" (default: 20%)
+ and "tune.pool-high-fd-ratio" (default: 25%). These correspond to the
+ percentage of total file descriptors spent in idle connections above which
+ haproxy will respectively refrain from keeping a connection opened after a
+ response, and actively kill idle connections. Some setups using a very high
+ ratio of idle connections, either because of too low a global "maxconn", or
+ due to a lot of HTTP/2 or HTTP/3 traffic on the frontend (few connections)
+ but HTTP/1 connections on the backend, may observe a lower reuse rate because
+ too few connections are kept open. It may be desirable in this case to adjust
+ such thresholds or simply to increase the global "maxconn" value.
+
+ Similarly, when thread groups are explicitly enabled, it is important to
+ understand that idle connections are only usable between threads from a same
+ group. As such it may happen that unfair load between groups leads to more
+ idle connections being needed, causing a lower reuse rate. The same solution
+ may then be applied (increase global "maxconn" or increase pool ratios).
+
+ See also : "option http-keep-alive", "server maxconn", "thread-groups",
+ "tune.pool-high-fd-ratio", "tune.pool-low-fd-ratio"
+
+
+http-send-name-header [<header>]
+ Add the server name to a request. Use the header string given by <header>
+
+ May be used in the following contexts: http
+
+ May be used in sections: defaults | frontend | listen | backend
+ yes | no | yes | yes
+
+ Arguments :
+ <header> The header string to use to send the server name
+
+ The "http-send-name-header" statement causes the header field named <header>
+ to be set to the name of the target server at the moment the request is about
+ to be sent on the wire. Any existing occurrences of this header are removed.
+ Upon retries and redispatches, the header field is updated to always reflect
+ the server being attempted to connect to. Given that this header is modified
+ very late in the connection setup, it may have unexpected effects on already
+ modified headers. For example using it with transport-level header such as
+ connection, content-length, transfer-encoding and so on will likely result in
+ invalid requests being sent to the server. Additionally it has been reported
+ that this directive is currently being used as a way to overwrite the Host
+ header field in outgoing requests; while this trick has been known to work
+ as a side effect of the feature for some time, it is not officially supported
+ and might possibly not work anymore in a future version depending on the
+ technical difficulties this feature induces. A long-term solution instead
+ consists in fixing the application which required this trick so that it binds
+ to the correct host name.
+
+ See also : "server"
+
+id <value>
+ Set a persistent ID to a proxy.
+
+ May be used in the following contexts: tcp, http, log
+
+ May be used in sections : defaults | frontend | listen | backend
+ no | yes | yes | yes
+
+ Arguments : none
+
+ Set a persistent ID for the proxy. This ID must be unique and positive.
+ An unused ID will automatically be assigned if unset. The first assigned
+ value will be 1. This ID is currently only returned in statistics.
+
+
+ignore-persist { if | unless } <condition>
+ Declare a condition to ignore persistence
+
+ May be used in the following contexts: tcp, http
+
+ May be used in sections: defaults | frontend | listen | backend
+ no | no | yes | yes
+
+ By default, when cookie persistence is enabled, every requests containing
+ the cookie are unconditionally persistent (assuming the target server is up
+ and running).
+
+ The "ignore-persist" statement allows one to declare various ACL-based
+ conditions which, when met, will cause a request to ignore persistence.
+ This is sometimes useful to load balance requests for static files, which
+ often don't require persistence. This can also be used to fully disable
+ persistence for a specific User-Agent (for example, some web crawler bots).
+
+ The persistence is ignored when an "if" condition is met, or unless an
+ "unless" condition is met.
+
+ Example:
+ acl url_static path_beg /static /images /img /css
+ acl url_static path_end .gif .png .jpg .css .js
+ ignore-persist if url_static
+
+ See also : "force-persist", "cookie", and section 7 about ACL usage.
+
+load-server-state-from-file { global | local | none }
+ Allow seamless reload of HAProxy
+
+ May be used in the following contexts: tcp, http, log
+
+ May be used in sections: defaults | frontend | listen | backend
+ yes | no | yes | yes
+
+ This directive points HAProxy to a file where server state from previous
+ running process has been saved. That way, when starting up, before handling
+ traffic, the new process can apply old states to servers exactly has if no
+ reload occurred. The purpose of the "load-server-state-from-file" directive is
+ to tell HAProxy which file to use. For now, only 2 arguments to either prevent
+ loading state or load states from a file containing all backends and servers.
+ The state file can be generated by running the command "show servers state"
+ over the stats socket and redirect output.
+
+ The format of the file is versioned and is very specific. To understand it,
+ please read the documentation of the "show servers state" command (chapter
+ 9.3 of Management Guide).
+
+ Arguments:
+ global load the content of the file pointed by the global directive
+ named "server-state-file".
+
+ local load the content of the file pointed by the directive
+ "server-state-file-name" if set. If not set, then the backend
+ name is used as a file name.
+
+ none don't load any stat for this backend
+
+ Notes:
+ - server's IP address is preserved across reloads by default, but the
+ order can be changed thanks to the server's "init-addr" setting. This
+ means that an IP address change performed on the CLI at run time will
+ be preserved, and that any change to the local resolver (e.g. /etc/hosts)
+ will possibly not have any effect if the state file is in use.
+
+ - server's weight is applied from previous running process unless it has
+ has changed between previous and new configuration files.
+
+ Example: Minimal configuration
+
+ global
+ stats socket /tmp/socket
+ server-state-file /tmp/server_state
+
+ defaults
+ load-server-state-from-file global
+
+ backend bk
+ server s1 127.0.0.1:22 check weight 11
+ server s2 127.0.0.1:22 check weight 12
+
+
+ Then one can run :
+
+ socat /tmp/socket - <<< "show servers state" > /tmp/server_state
+
+ Content of the file /tmp/server_state would be like this:
+
+ 1
+ # <field names skipped for the doc example>
+ 1 bk 1 s1 127.0.0.1 2 0 11 11 4 6 3 4 6 0 0
+ 1 bk 2 s2 127.0.0.1 2 0 12 12 4 6 3 4 6 0 0
+
+ Example: Minimal configuration
+
+ global
+ stats socket /tmp/socket
+ server-state-base /etc/haproxy/states
+
+ defaults
+ load-server-state-from-file local
+
+ backend bk
+ server s1 127.0.0.1:22 check weight 11
+ server s2 127.0.0.1:22 check weight 12
+
+
+ Then one can run :
+
+ socat /tmp/socket - <<< "show servers state bk" > /etc/haproxy/states/bk
+
+ Content of the file /etc/haproxy/states/bk would be like this:
+
+ 1
+ # <field names skipped for the doc example>
+ 1 bk 1 s1 127.0.0.1 2 0 11 11 4 6 3 4 6 0 0
+ 1 bk 2 s2 127.0.0.1 2 0 12 12 4 6 3 4 6 0 0
+
+ See also: "server-state-file", "server-state-file-name", and
+ "show servers state"
+
+
+log global
+log <target> [len <length>] [format <format>] [sample <ranges>:<sample_size>]
+ <facility> [<level> [<minlevel>]]
+no log
+ Enable per-instance logging of events and traffic.
+
+ May be used in the following contexts: tcp, http, log
+
+ May be used in sections : defaults | frontend | listen | backend
+ yes | yes | yes | yes
+
+ Prefix :
+ no should be used when the logger list must be flushed. For example,
+ if you don't want to inherit from the default logger list. This
+ prefix does not allow arguments.
+
+ Arguments :
+ global should be used when the instance's logging parameters are the
+ same as the global ones. This is the most common usage. "global"
+ replaces <target>, <facility> and <level> with those of the log
+ entries found in the "global" section. Only one "log global"
+ statement may be used per instance, and this form takes no other
+ parameter.
+
+ <target> indicates where to send the logs. It takes the same format as
+ for the "global" section's logs, and can be one of :
+
+ - An IPv4 address optionally followed by a colon (':') and a UDP
+ port. If no port is specified, 514 is used by default (the
+ standard syslog port).
+
+ - An IPv6 address followed by a colon (':') and optionally a UDP
+ port. If no port is specified, 514 is used by default (the
+ standard syslog port).
+
+ - A filesystem path to a UNIX domain socket, keeping in mind
+ considerations for chroot (be sure the path is accessible
+ inside the chroot) and uid/gid (be sure the path is
+ appropriately writable).
+
+ - A file descriptor number in the form "fd@<number>", which may
+ point to a pipe, terminal, or socket. In this case unbuffered
+ logs are used and one writev() call per log is performed. This
+ is a bit expensive but acceptable for most workloads. Messages
+ sent this way will not be truncated but may be dropped, in
+ which case the DroppedLogs counter will be incremented. The
+ writev() call is atomic even on pipes for messages up to
+ PIPE_BUF size, which POSIX recommends to be at least 512 and
+ which is 4096 bytes on most modern operating systems. Any
+ larger message may be interleaved with messages from other
+ processes. Exceptionally for debugging purposes the file
+ descriptor may also be directed to a file, but doing so will
+ significantly slow HAProxy down as non-blocking calls will be
+ ignored. Also there will be no way to purge nor rotate this
+ file without restarting the process. Note that the configured
+ syslog format is preserved, so the output is suitable for use
+ with a TCP syslog server. See also the "short" and "raw"
+ formats below.
+
+ - "stdout" / "stderr", which are respectively aliases for "fd@1"
+ and "fd@2", see above.
+
+ - A ring buffer in the form "ring@<name>", which will correspond
+ to an in-memory ring buffer accessible over the CLI using the
+ "show events" command, which will also list existing rings and
+ their sizes. Such buffers are lost on reload or restart but
+ when used as a complement this can help troubleshooting by
+ having the logs instantly available.
+
+ - A log backend in the form "backend@<name>", which will send
+ log messages to the corresponding log backend responsible for
+ sending the message to the proper server according to the
+ backend's lb settings. A log backend is a backend section with
+ "mode log" set (see "mode" for more information).
+
+ - An explicit stream address prefix such as "tcp@","tcp6@",
+ "tcp4@" or "uxst@" will allocate an implicit ring buffer with
+ a stream forward server targeting the given address.
+
+ You may want to reference some environment variables in the
+ address parameter, see section 2.3 about environment variables.
+
+ <length> is an optional maximum line length. Log lines larger than this
+ value will be truncated before being sent. The reason is that
+ syslog servers act differently on log line length. All servers
+ support the default value of 1024, but some servers simply drop
+ larger lines while others do log them. If a server supports long
+ lines, it may make sense to set this value here in order to avoid
+ truncating long lines. Similarly, if a server drops long lines,
+ it is preferable to truncate them before sending them. Accepted
+ values are 80 to 65535 inclusive. The default value of 1024 is
+ generally fine for all standard usages. Some specific cases of
+ long captures or JSON-formatted logs may require larger values.
+ You may also need to increase "tune.http.logurilen" if your
+ request URIs are truncated.
+
+ <ranges> A list of comma-separated ranges to identify the logs to sample.
+ This is used to balance the load of the logs to send to the log
+ server. The limits of the ranges cannot be null. They are numbered
+ from 1. The size or period (in number of logs) of the sample must
+ be set with <sample_size> parameter.
+
+ <sample_size>
+ The size of the sample in number of logs to consider when balancing
+ their logging loads. It is used to balance the load of the logs to
+ send to the syslog server. This size must be greater or equal to the
+ maximum of the high limits of the ranges.
+ (see also <ranges> parameter).
+
+ <format> is the log format used when generating syslog messages. It may be
+ one of the following :
+
+ local Analog to rfc3164 syslog message format except that hostname
+ field is stripped. This is the default.
+ Note: option "log-send-hostname" switches the default to
+ rfc3164.
+
+ rfc3164 The RFC3164 syslog message format.
+ (https://tools.ietf.org/html/rfc3164)
+
+ rfc5424 The RFC5424 syslog message format.
+ (https://tools.ietf.org/html/rfc5424)
+
+ priority A message containing only a level plus syslog facility between
+ angle brackets such as '<63>', followed by the text. The PID,
+ date, time, process name and system name are omitted. This is
+ designed to be used with a local log server.
+
+ short A message containing only a level between angle brackets such as
+ '<3>', followed by the text. The PID, date, time, process name
+ and system name are omitted. This is designed to be used with a
+ local log server. This format is compatible with what the
+ systemd logger consumes.
+
+ timed A message containing only a level between angle brackets such as
+ '<3>', followed by ISO date and by the text. The PID, process
+ name and system name are omitted. This is designed to be
+ used with a local log server.
+
+ iso A message containing only the ISO date, followed by the text.
+ The PID, process name and system name are omitted. This is
+ designed to be used with a local log server.
+
+ raw A message containing only the text. The level, PID, date, time,
+ process name and system name are omitted. This is designed to
+ be used in containers or during development, where the severity
+ only depends on the file descriptor used (stdout/stderr).
+
+ <facility> must be one of the 24 standard syslog facilities :
+
+ kern user mail daemon auth syslog lpr news
+ uucp cron auth2 ftp ntp audit alert cron2
+ local0 local1 local2 local3 local4 local5 local6 local7
+
+ Note that the facility is ignored for the "short" and "raw"
+ formats, but still required as a positional field. It is
+ recommended to use "daemon" in this case to make it clear that
+ it's only supposed to be used locally.
+
+ <level> is optional and can be specified to filter outgoing messages. By
+ default, all messages are sent. If a level is specified, only
+ messages with a severity at least as important as this level
+ will be sent. An optional minimum level can be specified. If it
+ is set, logs emitted with a more severe level than this one will
+ be capped to this level. This is used to avoid sending "emerg"
+ messages on all terminals on some default syslog configurations.
+ Eight levels are known :
+
+ emerg alert crit err warning notice info debug
+
+ It is important to keep in mind that it is the frontend which decides what to
+ log from a connection, and that in case of content switching, the log entries
+ from the backend will be ignored. Connections are logged at level "info".
+
+ However, backend log declaration define how and where servers status changes
+ will be logged. Level "notice" will be used to indicate a server going up,
+ "warning" will be used for termination signals and definitive service
+ termination, and "alert" will be used for when a server goes down.
+
+ Note : According to RFC3164, messages are truncated to 1024 bytes before
+ being emitted.
+
+ Example :
+ log global
+ log stdout format short daemon # send log to systemd
+ log stdout format raw daemon # send everything to stdout
+ log stderr format raw daemon notice # send important events to stderr
+ log 127.0.0.1:514 local0 notice # only send important events
+ log tcp@127.0.0.1:514 local0 notice notice # same but limit output
+ # level and send in tcp
+ log "${LOCAL_SYSLOG}:514" local0 notice # send to local server
+
+log-format <string>
+ Specifies the log format string to use for traffic logs
+
+ May be used in the following contexts: tcp, http
+
+ May be used in sections: defaults | frontend | listen | backend
+ yes | yes | yes | no
+
+ This directive specifies the log format string that will be used for all logs
+ resulting from traffic passing through the frontend using this line. If the
+ directive is used in a defaults section, all subsequent frontends will use
+ the same log format. Please see section 8.2.4 which covers the log format
+ string in depth.
+ A specific log-format used only in case of connection error can also be
+ defined, see the "error-log-format" option.
+
+ "log-format" directive overrides previous "option tcplog", "log-format",
+ "option httplog" and "option httpslog" directives.
+
+log-format-sd <string>
+ Specifies the RFC5424 structured-data log format string
+
+ May be used in the following contexts: tcp, http
+
+ May be used in sections: defaults | frontend | listen | backend
+ yes | yes | yes | no
+
+ This directive specifies the RFC5424 structured-data log format string that
+ will be used for all logs resulting from traffic passing through the frontend
+ using this line. If the directive is used in a defaults section, all
+ subsequent frontends will use the same log format. Please see section 8.2.4
+ which covers the log format string in depth.
+
+ See https://tools.ietf.org/html/rfc5424#section-6.3 for more information
+ about the RFC5424 structured-data part.
+
+ Note : This log format string will be used only for loggers that have set
+ log format to "rfc5424".
+
+ Example :
+ log-format-sd [exampleSDID@1234\ bytes=\"%B\"\ status=\"%ST\"]
+
+
+log-tag <string>
+ Specifies the log tag to use for all outgoing logs
+
+ May be used in the following contexts: tcp, http, log
+
+ May be used in sections: defaults | frontend | listen | backend
+ yes | yes | yes | yes
+
+ Sets the tag field in the syslog header to this string. It defaults to the
+ log-tag set in the global section, otherwise the program name as launched
+ from the command line, which usually is "HAProxy". Sometimes it can be useful
+ to differentiate between multiple processes running on the same host, or to
+ differentiate customer instances running in the same process. In the backend,
+ logs about servers up/down will use this tag. As a hint, it can be convenient
+ to set a log-tag related to a hosted customer in a defaults section then put
+ all the frontends and backends for that customer, then start another customer
+ in a new defaults section. See also the global "log-tag" directive.
+
+max-keep-alive-queue <value>
+ Set the maximum server queue size for maintaining keep-alive connections
+
+ May be used in the following contexts: http
+
+ May be used in sections: defaults | frontend | listen | backend
+ yes | no | yes | yes
+
+ HTTP keep-alive tries to reuse the same server connection whenever possible,
+ but sometimes it can be counter-productive, for example if a server has a lot
+ of connections while other ones are idle. This is especially true for static
+ servers.
+
+ The purpose of this setting is to set a threshold on the number of queued
+ connections at which HAProxy stops trying to reuse the same server and prefers
+ to find another one. The default value, -1, means there is no limit. A value
+ of zero means that keep-alive requests will never be queued. For very close
+ servers which can be reached with a low latency and which are not sensible to
+ breaking keep-alive, a low value is recommended (e.g. local static server can
+ use a value of 10 or less). For remote servers suffering from a high latency,
+ higher values might be needed to cover for the latency and/or the cost of
+ picking a different server.
+
+ Note that this has no impact on responses which are maintained to the same
+ server consecutively to a 401 response. They will still go to the same server
+ even if they have to be queued.
+
+ See also : "option http-server-close", "option prefer-last-server", server
+ "maxconn" and cookie persistence.
+
+max-session-srv-conns <nb>
+ Set the maximum number of outgoing connections we can keep idling for a given
+ client session. The default is 5 (it precisely equals MAX_SRV_LIST which is
+ defined at build time).
+
+ May be used in the following contexts: tcp, http
+
+ May be used in sections : defaults | frontend | listen | backend
+ yes | yes | yes | no
+
+maxconn <conns>
+ Fix the maximum number of concurrent connections on a frontend
+
+ May be used in the following contexts: tcp, http
+
+ May be used in sections : defaults | frontend | listen | backend
+ yes | yes | yes | no
+
+ Arguments :
+ <conns> is the maximum number of concurrent connections the frontend will
+ accept to serve. Excess connections will be queued by the system
+ in the socket's listen queue and will be served once a connection
+ closes.
+
+ If the system supports it, it can be useful on big sites to raise this limit
+ very high so that HAProxy manages connection queues, instead of leaving the
+ clients with unanswered connection attempts. This value should not exceed the
+ global maxconn. Also, keep in mind that a connection contains two buffers
+ of tune.bufsize (16kB by default) each, as well as some other data resulting
+ in about 33 kB of RAM being consumed per established connection. That means
+ that a medium system equipped with 1GB of RAM can withstand around
+ 20000-25000 concurrent connections if properly tuned.
+
+ Also, when <conns> is set to large values, it is possible that the servers
+ are not sized to accept such loads, and for this reason it is generally wise
+ to assign them some reasonable connection limits.
+
+ When this value is set to zero, which is the default, the global "maxconn"
+ value is used.
+
+ See also : "server", global section's "maxconn", "fullconn"
+
+
+mode { tcp|http|log }
+ Set the running mode or protocol of the instance
+ May be used in sections : defaults | frontend | listen | backend
+ yes | yes | yes | yes
+ Arguments :
+ tcp The instance will work in pure TCP mode. A full-duplex connection
+ will be established between clients and servers, and no layer 7
+ examination will be performed. This is the default mode. It
+ should be used for SSL, SSH, SMTP, ...
+
+ http The instance will work in HTTP mode. The client request will be
+ analyzed in depth before connecting to any server. Any request
+ which is not RFC-compliant will be rejected. Layer 7 filtering,
+ processing and switching will be possible. This is the mode which
+ brings HAProxy most of its value.
+
+ log When used in a backend section, it will turn the backend into a
+ log backend. Such backend can be used as a log destination for
+ any "log" directive by using the "backend@<name>" syntax. Log
+ messages will be distributed to the servers from the backend
+ according to the lb settings which can be configured using the
+ "balance" keyword. Log backends support UDP servers by prefixing
+ the server's address with the "udp@" prefix. Common backend and
+ server features are supported, but not TCP or HTTP specific ones.
+
+ When doing content switching, it is mandatory that the frontend and the
+ backend are in the same mode (generally HTTP), otherwise the configuration
+ will be refused.
+
+ Example :
+ defaults http_instances
+ mode http
+
+
+monitor fail { if | unless } <condition>
+ Add a condition to report a failure to a monitor HTTP request.
+
+ May be used in the following contexts: http
+
+ May be used in sections : defaults | frontend | listen | backend
+ no | yes | yes | no
+
+ Arguments :
+ if <cond> the monitor request will fail if the condition is satisfied,
+ and will succeed otherwise. The condition should describe a
+ combined test which must induce a failure if all conditions
+ are met, for instance a low number of servers both in a
+ backend and its backup.
+
+ unless <cond> the monitor request will succeed only if the condition is
+ satisfied, and will fail otherwise. Such a condition may be
+ based on a test on the presence of a minimum number of active
+ servers in a list of backends.
+
+ This statement adds a condition which can force the response to a monitor
+ request to report a failure. By default, when an external component queries
+ the URI dedicated to monitoring, a 200 response is returned. When one of the
+ conditions above is met, HAProxy will return 503 instead of 200. This is
+ very useful to report a site failure to an external component which may base
+ routing advertisements between multiple sites on the availability reported by
+ HAProxy. In this case, one would rely on an ACL involving the "nbsrv"
+ criterion. Note that "monitor fail" only works in HTTP mode. Both status
+ messages may be tweaked using "errorfile" or "errorloc" if needed.
+
+ Example:
+ frontend www
+ mode http
+ acl site_dead nbsrv(dynamic) lt 2
+ acl site_dead nbsrv(static) lt 2
+ monitor-uri /site_alive
+ monitor fail if site_dead
+
+ See also : "monitor-uri", "errorfile", "errorloc"
+
+
+monitor-uri <uri>
+ Intercept a URI used by external components' monitor requests
+
+ May be used in the following contexts: http
+
+ May be used in sections : defaults | frontend | listen | backend
+ yes | yes | yes | no
+
+ Arguments :
+ <uri> is the exact URI which we want to intercept to return HAProxy's
+ health status instead of forwarding the request.
+
+ When an HTTP request referencing <uri> will be received on a frontend,
+ HAProxy will not forward it nor log it, but instead will return either
+ "HTTP/1.0 200 OK" or "HTTP/1.0 503 Service unavailable", depending on failure
+ conditions defined with "monitor fail". This is normally enough for any
+ front-end HTTP probe to detect that the service is UP and running without
+ forwarding the request to a backend server. Note that the HTTP method, the
+ version and all headers are ignored, but the request must at least be valid
+ at the HTTP level. This keyword may only be used with an HTTP-mode frontend.
+
+ Monitor requests are processed very early, just after the request is parsed
+ and even before any "http-request". The only rulesets applied before are the
+ tcp-request ones. They cannot be logged either, and it is the intended
+ purpose. Only one URI may be configured for monitoring; when multiple
+ "monitor-uri" statements are present, the last one will define the URI to
+ be used. They are only used to report HAProxy's health to an upper component,
+ nothing more. However, it is possible to add any number of conditions using
+ "monitor fail" and ACLs so that the result can be adjusted to whatever check
+ can be imagined (most often the number of available servers in a backend).
+
+ Note: if <uri> starts by a slash ('/'), the matching is performed against the
+ request's path instead of the request's uri. It is a workaround to let
+ the HTTP/2 requests match the monitor-uri. Indeed, in HTTP/2, clients
+ are encouraged to send absolute URIs only.
+
+ Example :
+ # Use /haproxy_test to report HAProxy's status
+ frontend www
+ mode http
+ monitor-uri /haproxy_test
+
+ See also : "monitor fail"
+
+
+option abortonclose
+no option abortonclose
+ Enable or disable early dropping of aborted requests pending in queues.
+
+ May be used in the following contexts: tcp, http
+
+ May be used in sections : defaults | frontend | listen | backend
+ yes | no | yes | yes
+
+ Arguments : none
+
+ In presence of very high loads, the servers will take some time to respond.
+ The per-instance connection queue will inflate, and the response time will
+ increase respective to the size of the queue times the average per-stream
+ response time. When clients will wait for more than a few seconds, they will
+ often hit the "STOP" button on their browser, leaving a useless request in
+ the queue, and slowing down other users, and the servers as well, because the
+ request will eventually be served, then aborted at the first error
+ encountered while delivering the response.
+
+ As there is no way to distinguish between a full STOP and a simple output
+ close on the client side, HTTP agents should be conservative and consider
+ that the client might only have closed its output channel while waiting for
+ the response. However, this introduces risks of congestion when lots of users
+ do the same, and is completely useless nowadays because probably no client at
+ all will close the stream while waiting for the response. Some HTTP agents
+ support this behavior (Squid, Apache, HAProxy), and others do not (TUX, most
+ hardware-based load balancers). So the probability for a closed input channel
+ to represent a user hitting the "STOP" button is close to 100%, and the risk
+ of being the single component to break rare but valid traffic is extremely
+ low, which adds to the temptation to be able to abort a stream early while
+ still not served and not pollute the servers.
+
+ In HAProxy, the user can choose the desired behavior using the option
+ "abortonclose". By default (without the option) the behavior is HTTP
+ compliant and aborted requests will be served. But when the option is
+ specified, a stream with an incoming channel closed will be aborted while
+ it is still possible, either pending in the queue for a connection slot, or
+ during the connection establishment if the server has not yet acknowledged
+ the connection request. This considerably reduces the queue size and the load
+ on saturated servers when users are tempted to click on STOP, which in turn
+ reduces the response time for other users.
+
+ If this option has been enabled in a "defaults" section, it can be disabled
+ in a specific instance by prepending the "no" keyword before it.
+
+ See also : "timeout queue" and server's "maxconn" and "maxqueue" parameters
+
+
+option accept-invalid-http-request
+no option accept-invalid-http-request
+ Enable or disable relaxing of HTTP request parsing
+
+ May be used in the following contexts: http
+
+ May be used in sections : defaults | frontend | listen | backend
+ yes | yes | yes | no
+
+ Arguments : none
+
+ By default, HAProxy complies with RFC7230 in terms of message parsing. This
+ means that invalid characters in header names are not permitted and cause an
+ error to be returned to the client. This is the desired behavior as such
+ forbidden characters are essentially used to build attacks exploiting server
+ weaknesses, and bypass security filtering. Sometimes, a buggy browser or
+ server will emit invalid header names for whatever reason (configuration,
+ implementation) and the issue will not be immediately fixed. In such a case,
+ it is possible to relax HAProxy's header name parser to accept any character
+ even if that does not make sense, by specifying this option. Similarly, the
+ list of characters allowed to appear in a URI is well defined by RFC3986, and
+ chars 0-31, 32 (space), 34 ('"'), 60 ('<'), 62 ('>'), 92 ('\'), 94 ('^'), 96
+ ('`'), 123 ('{'), 124 ('|'), 125 ('}'), 127 (delete) and anything above are
+ not allowed at all. HAProxy always blocks a number of them (0..32, 127). The
+ remaining ones are blocked by default unless this option is enabled. This
+ option also relaxes the test on the HTTP version, it allows HTTP/0.9 requests
+ to pass through (no version specified), as well as different protocol names
+ (e.g. RTSP), and multiple digits for both the major and the minor version.
+ Finally, this option also allows incoming URLs to contain fragment references
+ ('#' after the path).
+
+ This option should never be enabled by default as it hides application bugs
+ and open security breaches. It should only be deployed after a problem has
+ been confirmed.
+
+ When this option is enabled, erroneous header names will still be accepted in
+ requests, but the complete request will be captured in order to permit later
+ analysis using the "show errors" request on the UNIX stats socket. Similarly,
+ requests containing invalid chars in the URI part will be logged. Doing this
+ also helps confirming that the issue has been solved.
+
+ If this option has been enabled in a "defaults" section, it can be disabled
+ in a specific instance by prepending the "no" keyword before it.
+
+ See also : "option accept-invalid-http-response" and "show errors" on the
+ stats socket.
+
+
+option accept-invalid-http-response
+no option accept-invalid-http-response
+ Enable or disable relaxing of HTTP response parsing
+
+ May be used in the following contexts: http
+
+ May be used in sections : defaults | frontend | listen | backend
+ yes | no | yes | yes
+
+ Arguments : none
+
+ By default, HAProxy complies with RFC7230 in terms of message parsing. This
+ means that invalid characters in header names are not permitted and cause an
+ error to be returned to the client. This is the desired behavior as such
+ forbidden characters are essentially used to build attacks exploiting server
+ weaknesses, and bypass security filtering. Sometimes, a buggy browser or
+ server will emit invalid header names for whatever reason (configuration,
+ implementation) and the issue will not be immediately fixed. In such a case,
+ it is possible to relax HAProxy's header name parser to accept any character
+ even if that does not make sense, by specifying this option. This option also
+ relaxes the test on the HTTP version format, it allows multiple digits for
+ both the major and the minor version.
+
+ This option should never be enabled by default as it hides application bugs
+ and open security breaches. It should only be deployed after a problem has
+ been confirmed.
+
+ When this option is enabled, erroneous header names will still be accepted in
+ responses, but the complete response will be captured in order to permit
+ later analysis using the "show errors" request on the UNIX stats socket.
+ Doing this also helps confirming that the issue has been solved.
+
+ If this option has been enabled in a "defaults" section, it can be disabled
+ in a specific instance by prepending the "no" keyword before it.
+
+ See also : "option accept-invalid-http-request" and "show errors" on the
+ stats socket.
+
+
+option allbackups
+no option allbackups
+ Use either all backup servers at a time or only the first one
+
+ May be used in the following contexts: tcp, http, log
+
+ May be used in sections : defaults | frontend | listen | backend
+ yes | no | yes | yes
+
+ Arguments : none
+
+ By default, the first operational backup server gets all traffic when normal
+ servers are all down. Sometimes, it may be preferred to use multiple backups
+ at once, because one will not be enough. When "option allbackups" is enabled,
+ the load balancing will be performed among all backup servers when all normal
+ ones are unavailable. The same load balancing algorithm will be used and the
+ servers' weights will be respected. Thus, there will not be any priority
+ order between the backup servers anymore.
+
+ This option is mostly used with static server farms dedicated to return a
+ "sorry" page when an application is completely offline.
+
+ If this option has been enabled in a "defaults" section, it can be disabled
+ in a specific instance by prepending the "no" keyword before it.
+
+
+option checkcache
+no option checkcache
+ Analyze all server responses and block responses with cacheable cookies
+
+ May be used in the following contexts: http
+
+ May be used in sections : defaults | frontend | listen | backend
+ yes | no | yes | yes
+
+ Arguments : none
+
+ Some high-level frameworks set application cookies everywhere and do not
+ always let enough control to the developer to manage how the responses should
+ be cached. When a session cookie is returned on a cacheable object, there is a
+ high risk of session crossing or stealing between users traversing the same
+ caches. In some situations, it is better to block the response than to let
+ some sensitive session information go in the wild.
+
+ The option "checkcache" enables deep inspection of all server responses for
+ strict compliance with HTTP specification in terms of cacheability. It
+ carefully checks "Cache-control", "Pragma" and "Set-cookie" headers in server
+ response to check if there's a risk of caching a cookie on a client-side
+ proxy. When this option is enabled, the only responses which can be delivered
+ to the client are :
+ - all those without "Set-Cookie" header;
+ - all those with a return code other than 200, 203, 204, 206, 300, 301,
+ 404, 405, 410, 414, 501, provided that the server has not set a
+ "Cache-control: public" header field;
+ - all those that result from a request using a method other than GET, HEAD,
+ OPTIONS, TRACE, provided that the server has not set a 'Cache-Control:
+ public' header field;
+ - those with a 'Pragma: no-cache' header
+ - those with a 'Cache-control: private' header
+ - those with a 'Cache-control: no-store' header
+ - those with a 'Cache-control: max-age=0' header
+ - those with a 'Cache-control: s-maxage=0' header
+ - those with a 'Cache-control: no-cache' header
+ - those with a 'Cache-control: no-cache="set-cookie"' header
+ - those with a 'Cache-control: no-cache="set-cookie,' header
+ (allowing other fields after set-cookie)
+
+ If a response doesn't respect these requirements, then it will be blocked
+ just as if it was from an "http-response deny" rule, with an "HTTP 502 bad
+ gateway". The session state shows "PH--" meaning that the proxy blocked the
+ response during headers processing. Additionally, an alert will be sent in
+ the logs so that admins are informed that there's something to be fixed.
+
+ Due to the high impact on the application, the application should be tested
+ in depth with the option enabled before going to production. It is also a
+ good practice to always activate it during tests, even if it is not used in
+ production, as it will report potentially dangerous application behaviors.
+
+ If this option has been enabled in a "defaults" section, it can be disabled
+ in a specific instance by prepending the "no" keyword before it.
+
+
+option clitcpka
+no option clitcpka
+ Enable or disable the sending of TCP keepalive packets on the client side
+
+ May be used in the following contexts: tcp, http
+
+ May be used in sections : defaults | frontend | listen | backend
+ yes | yes | yes | no
+
+ Arguments : none
+
+ When there is a firewall or any session-aware component between a client and
+ a server, and when the protocol involves very long sessions with long idle
+ periods (e.g. remote desktops), there is a risk that one of the intermediate
+ components decides to expire a session which has remained idle for too long.
+
+ Enabling socket-level TCP keep-alives makes the system regularly send packets
+ to the other end of the connection, leaving it active. The delay between
+ keep-alive probes is controlled by the system only and depends both on the
+ operating system and its tuning parameters.
+
+ It is important to understand that keep-alive packets are neither emitted nor
+ received at the application level. It is only the network stacks which sees
+ them. For this reason, even if one side of the proxy already uses keep-alives
+ to maintain its connection alive, those keep-alive packets will not be
+ forwarded to the other side of the proxy.
+
+ Please note that this has nothing to do with HTTP keep-alive.
+
+ Using option "clitcpka" enables the emission of TCP keep-alive probes on the
+ client side of a connection, which should help when session expirations are
+ noticed between HAProxy and a client.
+
+ If this option has been enabled in a "defaults" section, it can be disabled
+ in a specific instance by prepending the "no" keyword before it.
+
+ See also : "option srvtcpka", "option tcpka"
+
+
+option contstats
+ Enable continuous traffic statistics updates
+
+ May be used in the following contexts: tcp, http
+
+ May be used in sections : defaults | frontend | listen | backend
+ yes | yes | yes | no
+
+ Arguments : none
+
+ By default, counters used for statistics calculation are incremented
+ only when a stream finishes. It works quite well when serving small
+ objects, but with big ones (for example large images or archives) or
+ with A/V streaming, a graph generated from HAProxy counters looks like
+ a hedgehog. With this option enabled counters get incremented frequently
+ along the stream, typically every 5 seconds, which is often enough to
+ produce clean graphs. Recounting touches a hotpath directly so it is not
+ not enabled by default, as it can cause a lot of wakeups for very large
+ session counts and cause a small performance drop.
+
+option disable-h2-upgrade
+no option disable-h2-upgrade
+ Enable or disable the implicit HTTP/2 upgrade from an HTTP/1.x client
+ connection.
+
+ May be used in the following contexts: http
+
+ May be used in sections : defaults | frontend | listen | backend
+ yes | yes | yes | no
+
+ Arguments : none
+
+ By default, HAProxy is able to implicitly upgrade an HTTP/1.x client
+ connection to an HTTP/2 connection if the first request it receives from a
+ given HTTP connection matches the HTTP/2 connection preface (i.e. the string
+ "PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n"). This way, it is possible to support
+ HTTP/1.x and HTTP/2 clients on a non-SSL connections. This option must be
+ used to disable the implicit upgrade. Note this implicit upgrade is only
+ supported for HTTP proxies, thus this option too. Note also it is possible to
+ force the HTTP/2 on clear connections by specifying "proto h2" on the bind
+ line. Finally, this option is applied on all bind lines. To disable implicit
+ HTTP/2 upgrades for a specific bind line, it is possible to use "proto h1".
+
+ If this option has been enabled in a "defaults" section, it can be disabled
+ in a specific instance by prepending the "no" keyword before it.
+
+option dontlog-normal
+no option dontlog-normal
+ Enable or disable logging of normal, successful connections
+
+ May be used in the following contexts: tcp, http
+
+ May be used in sections : defaults | frontend | listen | backend
+ yes | yes | yes | no
+
+ Arguments : none
+
+ There are large sites dealing with several thousand connections per second
+ and for which logging is a major pain. Some of them are even forced to turn
+ logs off and cannot debug production issues. Setting this option ensures that
+ normal connections, those which experience no error, no timeout, no retry nor
+ redispatch, will not be logged. This leaves disk space for anomalies. In HTTP
+ mode, the response status code is checked and return codes 5xx will still be
+ logged.
+
+ It is strongly discouraged to use this option as most of the time, the key to
+ complex issues is in the normal logs which will not be logged here. If you
+ need to separate logs, see the "log-separate-errors" option instead.
+
+ See also : "log", "dontlognull", "log-separate-errors" and section 8 about
+ logging.
+
+
+option dontlognull
+no option dontlognull
+ Enable or disable logging of null connections
+
+ May be used in the following contexts: tcp, http
+
+ May be used in sections : defaults | frontend | listen | backend
+ yes | yes | yes | no
+
+ Arguments : none
+
+ In certain environments, there are components which will regularly connect to
+ various systems to ensure that they are still alive. It can be the case from
+ another load balancer as well as from monitoring systems. By default, even a
+ simple port probe or scan will produce a log. If those connections pollute
+ the logs too much, it is possible to enable option "dontlognull" to indicate
+ that a connection on which no data has been transferred will not be logged,
+ which typically corresponds to those probes. Note that errors will still be
+ returned to the client and accounted for in the stats. If this is not what is
+ desired, option http-ignore-probes can be used instead.
+
+ It is generally recommended not to use this option in uncontrolled
+ environments (e.g. internet), otherwise scans and other malicious activities
+ would not be logged.
+
+ If this option has been enabled in a "defaults" section, it can be disabled
+ in a specific instance by prepending the "no" keyword before it.
+
+ See also : "log", "http-ignore-probes", "monitor-uri", and
+ section 8 about logging.
+
+option forwarded [ proto ]
+ [ host | host-expr <host_expr> ]
+ [ by | by-expr <by_expr> ] [ by_port | by_port-expr <by_port_expr>]
+ [ for | for-expr <for_expr> ] [ for_port | for_port-expr <for_port_expr>]
+no option forwarded
+ Enable insertion of the rfc 7239 forwarded header in requests sent to servers
+
+ May be used in the following contexts: http
+
+ May be used in sections : defaults | frontend | listen | backend
+ yes | no | yes | yes
+
+ Arguments :
+ <host_expr> optional argument to specify a custom sample expression
+ those result will be used as 'host' parameter value
+
+ <by_expr> optional argument to specicy a custom sample expression
+ those result will be used as 'by' parameter nodename value
+
+ <for_expr> optional argument to specicy a custom sample expression
+ those result will be used as 'for' parameter nodename value
+
+ <by_port_expr> optional argument to specicy a custom sample expression
+ those result will be used as 'by' parameter nodeport value
+
+ <for_port_expr> optional argument to specicy a custom sample expression
+ those result will be used as 'for' parameter nodeport value
+
+
+ Since HAProxy works in reverse-proxy mode, servers are losing some request
+ context (request origin: client ip address, protocol used...)
+
+ A common way to address this limitation is to use the well known
+ x-forward-for and x-forward-* friends to expose some of this context to the
+ underlying servers/applications.
+ While this use to work and is widely deployed, it is not officially supported
+ by the IETF and can be the root of some interoperability as well as security
+ issues.
+
+ To solve this, a new HTTP extension has been described by the IETF:
+ forwarded header (RFC7239).
+ More information here: https://www.rfc-editor.org/rfc/rfc7239.html
+
+ The use of this single header allow to convey numerous details
+ within the same header, and most importantly, fixes the proxy chaining
+ issue. (the rfc allows for multiple chained proxies to append their own
+ values to an already existing header).
+
+ This option may be specified in defaults, listen or backend section, but it
+ will be ignored for frontend sections.
+
+ Setting option forwarded without arguments results in using default implicit
+ behavior.
+ Default behavior enables proto parameter and injects original client ip.
+
+ The equivalent explicit/manual configuration would be:
+ option forwarded proto for
+
+ The keyword 'by' is used to enable 'by' parameter ("nodename") in
+ forwarded header. It allows to embed request proxy information.
+ 'by' value will be set to proxy ip (destination address)
+ If not available (ie: UNIX listener), 'by' will be set to
+ "unknown".
+
+ The keyword 'by-expr' is used to enable 'by' parameter ("nodename") in
+ forwarded header. It allows to embed request proxy information.
+ 'by' value will be set to the result of the sample expression
+ <by_expr>, if valid, otherwise it will be set to "unknown".
+
+ The keyword 'for' is used to enable 'for' parameter ("nodename") in
+ forwarded header. It allows to embed request client information.
+ 'for' value will be set to client ip (source address)
+ If not available (ie: UNIX listener), 'for' will be set to
+ "unknown".
+
+ The keyword 'for-expr' is used to enable 'for' parameter ("nodename") in
+ forwarded header. It allows to embed request client information.
+ 'for' value will be set to the result of the sample expression
+ <for_expr>, if valid, otherwise it will be set to "unknown".
+
+ The keyword 'by_port' is used to provide "nodeport" info to
+ 'by' parameter. 'by_port' requires 'by' or 'by-expr' to be set or
+ it will be ignored.
+ "nodeport" will be set to proxy (destination) port if available,
+ otherwise it will be ignored.
+
+ The keyword 'by_port-expr' is used to provide "nodeport" info to
+ 'by' parameter. 'by_port-expr' requires 'by' or 'by-expr' to be set or
+ it will be ignored.
+ "nodeport" will be set to the result of the sample expression
+ <by_port_expr>, if valid, otherwise it will be ignored.
+
+ The keyword 'for_port' is used to provide "nodeport" info to
+ 'for' parameter. 'for_port' requires 'for' or 'for-expr' to be set or
+ it will be ignored.
+ "nodeport" will be set to client (source) port if available,
+ otherwise it will be ignored.
+
+ The keyword 'for_port-expr' is used to provide "nodeport" info to
+ 'for' parameter. 'for_port-expr' requires 'for' or 'for-expr' to be set or
+ it will be ignored.
+ "nodeport" will be set to the result of the sample expression
+ <for_port_expr>, if valid, otherwise it will be ignored.
+
+ Examples :
+ # Those servers want the ip address and protocol of the client request
+ # Resulting header would look like this:
+ # forwarded: proto=http;for=127.0.0.1
+ backend www_default
+ mode http
+ option forwarded
+ #equivalent to: option forwarded proto for
+
+ # Those servers want the requested host and hashed client ip address
+ # as well as client source port (you should use seed for xxh32 if ensuring
+ # ip privacy is a concern)
+ # Resulting header would look like this:
+ # forwarded: host="haproxy.org";for="_000000007F2F367E:60138"
+ backend www_host
+ mode http
+ option forwarded host for-expr src,xxh32,hex for_port
+
+ # Those servers want custom data in host, for and by parameters
+ # Resulting header would look like this:
+ # forwarded: host="host.com";by=_haproxy;for="[::1]:10"
+ backend www_custom
+ mode http
+ option forwarded host-expr str(host.com) by-expr str(_haproxy) for for_port-expr int(10)
+
+ # Those servers want random 'for' obfuscated identifiers for request
+ # tracing purposes while protecting sensitive IP information
+ # Resulting header would look like this:
+ # forwarded: for=_000000002B1F4D63
+ backend www_for_hide
+ mode http
+ option forwarded for-expr rand,hex
+
+ See also : "option forwardfor", "option originalto"
+
+option forwardfor [ except <network> ] [ header <name> ] [ if-none ]
+ Enable insertion of the X-Forwarded-For header to requests sent to servers
+
+ May be used in the following contexts: http
+
+ May be used in sections : defaults | frontend | listen | backend
+ yes | yes | yes | yes
+
+ Arguments :
+ <network> is an optional argument used to disable this option for sources
+ matching <network>
+ <name> an optional argument to specify a different "X-Forwarded-For"
+ header name.
+
+ Since HAProxy works in reverse-proxy mode, the servers see its IP address as
+ their client address. This is sometimes annoying when the client's IP address
+ is expected in server logs. To solve this problem, the well-known HTTP header
+ "X-Forwarded-For" may be added by HAProxy to all requests sent to the server.
+ This header contains a value representing the client's IP address. Since this
+ header is always appended at the end of the existing header list, the server
+ must be configured to always use the last occurrence of this header only. See
+ the server's manual to find how to enable use of this standard header. Note
+ that only the last occurrence of the header must be used, since it is really
+ possible that the client has already brought one.
+
+ The keyword "header" may be used to supply a different header name to replace
+ the default "X-Forwarded-For". This can be useful where you might already
+ have a "X-Forwarded-For" header from a different application (e.g. stunnel),
+ and you need preserve it. Also if your backend server doesn't use the
+ "X-Forwarded-For" header and requires different one (e.g. Zeus Web Servers
+ require "X-Cluster-Client-IP").
+
+ Sometimes, a same HAProxy instance may be shared between a direct client
+ access and a reverse-proxy access (for instance when an SSL reverse-proxy is
+ used to decrypt HTTPS traffic). It is possible to disable the addition of the
+ header for a known source address or network by adding the "except" keyword
+ followed by the network address. In this case, any source IP matching the
+ network will not cause an addition of this header. Most common uses are with
+ private networks or 127.0.0.1. IPv4 and IPv6 are both supported.
+
+ Alternatively, the keyword "if-none" states that the header will only be
+ added if it is not present. This should only be used in perfectly trusted
+ environment, as this might cause a security issue if headers reaching HAProxy
+ are under the control of the end-user.
+
+ This option may be specified either in the frontend or in the backend. If at
+ least one of them uses it, the header will be added. Note that the backend's
+ setting of the header subargument takes precedence over the frontend's if
+ both are defined. In the case of the "if-none" argument, if at least one of
+ the frontend or the backend does not specify it, it wants the addition to be
+ mandatory, so it wins.
+
+ Example :
+ # Public HTTP address also used by stunnel on the same machine
+ frontend www
+ mode http
+ option forwardfor except 127.0.0.1 # stunnel already adds the header
+
+ # Those servers want the IP Address in X-Client
+ backend www
+ mode http
+ option forwardfor header X-Client
+
+ See also : "option httpclose", "option http-server-close",
+ "option http-keep-alive"
+
+
+option h1-case-adjust-bogus-client
+no option h1-case-adjust-bogus-client
+ Enable or disable the case adjustment of HTTP/1 headers sent to bogus clients
+
+ May be used in the following contexts: http
+
+ May be used in sections : defaults | frontend | listen | backend
+ yes | yes | yes | no
+
+ Arguments : none
+
+ There is no standard case for header names because, as stated in RFC7230,
+ they are case-insensitive. So applications must handle them in a case-
+ insensitive manner. But some bogus applications violate the standards and
+ erroneously rely on the cases most commonly used by browsers. This problem
+ becomes critical with HTTP/2 because all header names must be exchanged in
+ lower case, and HAProxy follows the same convention. All header names are
+ sent in lower case to clients and servers, regardless of the HTTP version.
+
+ When HAProxy receives an HTTP/1 response, its header names are converted to
+ lower case and manipulated and sent this way to the clients. If a client is
+ known to violate the HTTP standards and to fail to process a response coming
+ from HAProxy, it is possible to transform the lower case header names to a
+ different format when the response is formatted and sent to the client, by
+ enabling this option and specifying the list of headers to be reformatted
+ using the global directives "h1-case-adjust" or "h1-case-adjust-file". This
+ must only be a temporary workaround for the time it takes the client to be
+ fixed, because clients which require such workarounds might be vulnerable to
+ content smuggling attacks and must absolutely be fixed.
+
+ Please note that this option will not affect standards-compliant clients.
+
+ If this option has been enabled in a "defaults" section, it can be disabled
+ in a specific instance by prepending the "no" keyword before it.
+
+ See also: "option h1-case-adjust-bogus-server", "h1-case-adjust",
+ "h1-case-adjust-file".
+
+
+option h1-case-adjust-bogus-server
+no option h1-case-adjust-bogus-server
+ Enable or disable the case adjustment of HTTP/1 headers sent to bogus servers
+
+ May be used in the following contexts: http
+
+ May be used in sections : defaults | frontend | listen | backend
+ yes | no | yes | yes
+
+ Arguments : none
+
+ There is no standard case for header names because, as stated in RFC7230,
+ they are case-insensitive. So applications must handle them in a case-
+ insensitive manner. But some bogus applications violate the standards and
+ erroneously rely on the cases most commonly used by browsers. This problem
+ becomes critical with HTTP/2 because all header names must be exchanged in
+ lower case, and HAProxy follows the same convention. All header names are
+ sent in lower case to clients and servers, regardless of the HTTP version.
+
+ When HAProxy receives an HTTP/1 request, its header names are converted to
+ lower case and manipulated and sent this way to the servers. If a server is
+ known to violate the HTTP standards and to fail to process a request coming
+ from HAProxy, it is possible to transform the lower case header names to a
+ different format when the request is formatted and sent to the server, by
+ enabling this option and specifying the list of headers to be reformatted
+ using the global directives "h1-case-adjust" or "h1-case-adjust-file". This
+ must only be a temporary workaround for the time it takes the server to be
+ fixed, because servers which require such workarounds might be vulnerable to
+ content smuggling attacks and must absolutely be fixed.
+
+ Please note that this option will not affect standards-compliant servers.
+
+ If this option has been enabled in a "defaults" section, it can be disabled
+ in a specific instance by prepending the "no" keyword before it.
+
+ See also: "option h1-case-adjust-bogus-client", "h1-case-adjust",
+ "h1-case-adjust-file".
+
+
+option http-buffer-request
+no option http-buffer-request
+ Enable or disable waiting for whole HTTP request body before proceeding
+
+ May be used in the following contexts: http
+
+ May be used in sections : defaults | frontend | listen | backend
+ yes | yes | yes | yes
+
+ Arguments : none
+
+ It is sometimes desirable to wait for the body of an HTTP request before
+ taking a decision. This is what is being done by "balance url_param" for
+ example. The first use case is to buffer requests from slow clients before
+ connecting to the server. Another use case consists in taking the routing
+ decision based on the request body's contents. This option placed in a
+ frontend or backend forces the HTTP processing to wait until either the whole
+ body is received or the request buffer is full. It can have undesired side
+ effects with some applications abusing HTTP by expecting unbuffered
+ transmissions between the frontend and the backend, so this should definitely
+ not be used by default.
+
+ See also : "option http-no-delay", "timeout http-request",
+ "http-request wait-for-body"
+
+
+option http-ignore-probes
+no option http-ignore-probes
+ Enable or disable logging of null connections and request timeouts
+
+ May be used in the following contexts: http
+
+ May be used in sections : defaults | frontend | listen | backend
+ yes | yes | yes | no
+
+ Arguments : none
+
+ Recently some browsers started to implement a "pre-connect" feature
+ consisting in speculatively connecting to some recently visited web sites
+ just in case the user would like to visit them. This results in many
+ connections being established to web sites, which end up in 408 Request
+ Timeout if the timeout strikes first, or 400 Bad Request when the browser
+ decides to close them first. These ones pollute the log and feed the error
+ counters. There was already "option dontlognull" but it's insufficient in
+ this case. Instead, this option does the following things :
+ - prevent any 400/408 message from being sent to the client if nothing
+ was received over a connection before it was closed;
+ - prevent any log from being emitted in this situation;
+ - prevent any error counter from being incremented
+
+ That way the empty connection is silently ignored. Note that it is better
+ not to use this unless it is clear that it is needed, because it will hide
+ real problems. The most common reason for not receiving a request and seeing
+ a 408 is due to an MTU inconsistency between the client and an intermediary
+ element such as a VPN, which blocks too large packets. These issues are
+ generally seen with POST requests as well as GET with large cookies. The logs
+ are often the only way to detect them.
+
+ If this option has been enabled in a "defaults" section, it can be disabled
+ in a specific instance by prepending the "no" keyword before it.
+
+ See also : "log", "dontlognull", "errorfile", and section 8 about logging.
+
+
+option http-keep-alive
+no option http-keep-alive
+ Enable or disable HTTP keep-alive from client to server for HTTP/1.x
+ connections
+
+ May be used in the following contexts: http
+
+ May be used in sections : defaults | frontend | listen | backend
+ yes | yes | yes | yes
+
+ Arguments : none
+
+ By default HAProxy operates in keep-alive mode with regards to persistent
+ HTTP/1.x connections: for each connection it processes each request and
+ response, and leaves the connection idle on both sides. This mode may be
+ changed by several options such as "option http-server-close" or "option
+ httpclose". This option allows to set back the keep-alive mode, which can be
+ useful when another mode was used in a defaults section.
+
+ Setting "option http-keep-alive" enables HTTP keep-alive mode on the client-
+ and server- sides. This provides the lowest latency on the client side (slow
+ network) and the fastest session reuse on the server side at the expense
+ of maintaining idle connections to the servers. In general, it is possible
+ with this option to achieve approximately twice the request rate that the
+ "http-server-close" option achieves on small objects. There are mainly two
+ situations where this option may be useful :
+
+ - when the server is non-HTTP compliant and authenticates the connection
+ instead of requests (e.g. NTLM authentication)
+
+ - when the cost of establishing the connection to the server is significant
+ compared to the cost of retrieving the associated object from the server.
+
+ This last case can happen when the server is a fast static server of cache.
+
+ At the moment, logs will not indicate whether requests came from the same
+ session or not. The accept date reported in the logs corresponds to the end
+ of the previous request, and the request time corresponds to the time spent
+ waiting for a new request. The keep-alive request time is still bound to the
+ timeout defined by "timeout http-keep-alive" or "timeout http-request" if
+ not set.
+
+ This option disables and replaces any previous "option httpclose" or "option
+ http-server-close".
+
+ See also : "option httpclose",, "option http-server-close",
+ "option prefer-last-server" and "option http-pretend-keepalive".
+
+
+option http-no-delay
+no option http-no-delay
+ Instruct the system to favor low interactive delays over performance in HTTP
+
+ May be used in the following contexts: http
+
+ May be used in sections : defaults | frontend | listen | backend
+ yes | yes | yes | yes
+
+ Arguments : none
+
+ In HTTP, each payload is unidirectional and has no notion of interactivity.
+ Any agent is expected to queue data somewhat for a reasonably low delay.
+ There are some very rare server-to-server applications that abuse the HTTP
+ protocol and expect the payload phase to be highly interactive, with many
+ interleaved data chunks in both directions within a single request. This is
+ absolutely not supported by the HTTP specification and will not work across
+ most proxies or servers. When such applications attempt to do this through
+ HAProxy, it works but they will experience high delays due to the network
+ optimizations which favor performance by instructing the system to wait for
+ enough data to be available in order to only send full packets. Typical
+ delays are around 200 ms per round trip. Note that this only happens with
+ abnormal uses. Normal uses such as CONNECT requests nor WebSockets are not
+ affected.
+
+ When "option http-no-delay" is present in either the frontend or the backend
+ used by a connection, all such optimizations will be disabled in order to
+ make the exchanges as fast as possible. Of course this offers no guarantee on
+ the functionality, as it may break at any other place. But if it works via
+ HAProxy, it will work as fast as possible. This option should never be used
+ by default, and should never be used at all unless such a buggy application
+ is discovered. The impact of using this option is an increase of bandwidth
+ usage and CPU usage, which may significantly lower performance in high
+ latency environments.
+
+ See also : "option http-buffer-request"
+
+
+option http-pretend-keepalive
+no option http-pretend-keepalive
+ Define whether HAProxy will announce keepalive for HTTP/1.x connection to the
+ server or not
+
+ May be used in the following contexts: http
+
+ May be used in sections : defaults | frontend | listen | backend
+ yes | no | yes | yes
+
+ Arguments : none
+
+ When running with "option http-server-close" or "option httpclose", HAProxy
+ adds a "Connection: close" header to the HTTP/1.x request forwarded to the
+ server. Unfortunately, when some servers see this header, they automatically
+ refrain from using the chunked encoding for responses of unknown length,
+ while this is totally unrelated. The effect is that a client or a cache could
+ receive an incomplete response without being aware of it, and consider the
+ response complete.
+
+ By setting "option http-pretend-keepalive", HAProxy will make the server
+ believe it will keep the connection alive. The server will then not fall back
+ to the abnormal undesired above. When HAProxy gets the whole response, it
+ will close the connection with the server just as it would do with the
+ "option httpclose". That way the client gets a normal response and the
+ connection is correctly closed on the server side.
+
+ It is recommended not to enable this option by default, because most servers
+ will more efficiently close the connection themselves after the last packet,
+ and release its buffers slightly earlier. Also, the added packet on the
+ network could slightly reduce the overall peak performance. However it is
+ worth noting that when this option is enabled, HAProxy will have slightly
+ less work to do. So if HAProxy is the bottleneck on the whole architecture,
+ enabling this option might save a few CPU cycles.
+
+ This option may be set in backend and listen sections. Using it in a frontend
+ section will be ignored and a warning will be reported during startup. It is
+ a backend related option, so there is no real reason to set it on a
+ frontend.
+
+ If this option has been enabled in a "defaults" section, it can be disabled
+ in a specific instance by prepending the "no" keyword before it.
+
+ See also : "option httpclose", "option http-server-close", and
+ "option http-keep-alive"
+
+option http-restrict-req-hdr-names { preserve | delete | reject }
+ Set HAProxy policy about HTTP request header names containing characters
+ outside the "[a-zA-Z0-9-]" charset
+
+ May be used in the following contexts: http
+
+ May be used in sections : defaults | frontend | listen | backend
+ yes | yes | yes | yes
+
+ Arguments :
+ preserve disable the filtering. It is the default mode for HTTP proxies
+ with no FastCGI application configured.
+
+ delete remove request headers with a name containing a character
+ outside the "[a-zA-Z0-9-]" charset. It is the default mode for
+ HTTP backends with a configured FastCGI application.
+
+ reject reject the request with a 403-Forbidden response if it contains a
+ header name with a character outside the "[a-zA-Z0-9-]" charset.
+
+ This option may be used to restrict the request header names to alphanumeric
+ and hyphen characters ([A-Za-z0-9-]). This may be mandatory to interoperate
+ with non-HTTP compliant servers that fail to handle some characters in header
+ names. It may also be mandatory for FastCGI applications because all
+ non-alphanumeric characters in header names are replaced by an underscore
+ ('_'). Thus, it is easily possible to mix up header names and bypass some
+ rules. For instance, "X-Forwarded-For" and "X_Forwarded-For" headers are both
+ converted to "HTTP_X_FORWARDED_FOR" in FastCGI.
+
+ Note this option is evaluated per proxy and after the http-request rules
+ evaluation.
+
+option http-server-close
+no option http-server-close
+ Enable or disable HTTP/1.x connection closing on the server side
+
+ May be used in the following contexts: http
+
+ May be used in sections : defaults | frontend | listen | backend
+ yes | yes | yes | yes
+
+ Arguments : none
+
+ By default HAProxy operates in keep-alive mode with regards to persistent
+ HTTP/1.x connections: for each connection it processes each request and
+ response, and leaves the connection idle on both sides. This mode may be
+ changed by several options such as "option http-server-close" or "option
+ httpclose". Setting "option http-server-close" enables HTTP connection-close
+ mode on the server side while keeping the ability to support HTTP keep-alive
+ and pipelining on the client side. This provides the lowest latency on the
+ client side (slow network) and the fastest session reuse on the server side
+ to save server resources, similarly to "option httpclose". It also permits
+ non-keepalive capable servers to be served in keep-alive mode to the clients
+ if they conform to the requirements of RFC7230. Please note that some servers
+ do not always conform to those requirements when they see "Connection: close"
+ in the request. The effect will be that keep-alive will never be used. A
+ workaround consists in enabling "option http-pretend-keepalive".
+
+ At the moment, logs will not indicate whether requests came from the same
+ session or not. The accept date reported in the logs corresponds to the end
+ of the previous request, and the request time corresponds to the time spent
+ waiting for a new request. The keep-alive request time is still bound to the
+ timeout defined by "timeout http-keep-alive" or "timeout http-request" if
+ not set.
+
+ This option may be set both in a frontend and in a backend. It is enabled if
+ at least one of the frontend or backend holding a connection has it enabled.
+ It disables and replaces any previous "option httpclose" or "option
+ http-keep-alive". Please check section 4 ("Proxies") to see how this option
+ combines with others when frontend and backend options differ.
+
+ If this option has been enabled in a "defaults" section, it can be disabled
+ in a specific instance by prepending the "no" keyword before it.
+
+ See also : "option httpclose", "option http-pretend-keepalive" and
+ "option http-keep-alive".
+
+option http-use-proxy-header
+no option http-use-proxy-header
+ Make use of non-standard Proxy-Connection header instead of Connection
+
+ May be used in the following contexts: http
+
+ May be used in sections : defaults | frontend | listen | backend
+ yes | yes | yes | no
+
+ Arguments : none
+
+ While RFC7230 explicitly states that HTTP/1.1 agents must use the
+ Connection header to indicate their wish of persistent or non-persistent
+ connections, both browsers and proxies ignore this header for proxied
+ connections and make use of the undocumented, non-standard Proxy-Connection
+ header instead. The issue begins when trying to put a load balancer between
+ browsers and such proxies, because there will be a difference between what
+ HAProxy understands and what the client and the proxy agree on.
+
+ By setting this option in a frontend, HAProxy can automatically switch to use
+ that non-standard header if it sees proxied requests. A proxied request is
+ defined here as one where the URI begins with neither a '/' nor a '*'. This
+ is incompatible with the HTTP tunnel mode. Note that this option can only be
+ specified in a frontend and will affect the request along its whole life.
+
+ Also, when this option is set, a request which requires authentication will
+ automatically switch to use proxy authentication headers if it is itself a
+ proxied request. That makes it possible to check or enforce authentication in
+ front of an existing proxy.
+
+ This option should normally never be used, except in front of a proxy.
+
+ See also : "option httpclose", and "option http-server-close".
+
+option httpchk
+option httpchk <uri>
+option httpchk <method> <uri>
+option httpchk <method> <uri> <version>
+ Enables HTTP protocol to check on the servers health
+
+ May be used in the following contexts: tcp, http
+
+ May be used in sections : defaults | frontend | listen | backend
+ yes | no | yes | yes
+
+ Arguments :
+ <method> is the optional HTTP method used with the requests. When not set,
+ the "OPTIONS" method is used, as it generally requires low server
+ processing and is easy to filter out from the logs. Any method
+ may be used, though it is not recommended to invent non-standard
+ ones.
+
+ <uri> is the URI referenced in the HTTP requests. It defaults to " / "
+ which is accessible by default on almost any server, but may be
+ changed to any other URI. Query strings are permitted.
+
+ <version> is the optional HTTP version string. It defaults to "HTTP/1.0"
+ but some servers might behave incorrectly in HTTP 1.0, so turning
+ it to HTTP/1.1 may sometimes help. Note that the Host field is
+ mandatory in HTTP/1.1, use "http-check send" directive to add it.
+
+ By default, server health checks only consist in trying to establish a TCP
+ connection. When "option httpchk" is specified, a complete HTTP request is
+ sent once the TCP connection is established, and responses 2xx and 3xx are
+ considered valid, while all other ones indicate a server failure, including
+ the lack of any response.
+
+ Combined with "http-check" directives, it is possible to customize the
+ request sent during the HTTP health checks or the matching rules on the
+ response. It is also possible to configure a send/expect sequence, just like
+ with the directive "tcp-check" for TCP health checks.
+
+ The server configuration is used by default to open connections to perform
+ HTTP health checks. By it is also possible to overwrite server parameters
+ using "http-check connect" rules.
+
+ "httpchk" option does not necessarily require an HTTP backend, it also works
+ with plain TCP backends. This is particularly useful to check simple scripts
+ bound to some dedicated ports using the inetd daemon. However, it will always
+ internally relies on an HTX multiplexer. Thus, it means the request
+ formatting and the response parsing will be strict.
+
+ Examples :
+ # Relay HTTPS traffic to Apache instance and check service availability
+ # using HTTP request "OPTIONS * HTTP/1.1" on port 80.
+ backend https_relay
+ mode tcp
+ option httpchk OPTIONS * HTTP/1.1
+ http-check send hdr Host www
+ server apache1 192.168.1.1:443 check port 80
+
+ See also : "option ssl-hello-chk", "option smtpchk", "option mysql-check",
+ "option pgsql-check", "http-check" and the "check", "port" and
+ "inter" server options.
+
+
+option httpclose
+no option httpclose
+ Enable or disable HTTP/1.x connection closing
+
+ May be used in the following contexts: http
+
+ May be used in sections : defaults | frontend | listen | backend
+ yes | yes | yes | yes
+
+ Arguments : none
+
+ By default HAProxy operates in keep-alive mode with regards to persistent
+ HTTP/1.x connections: for each connection it processes each request and
+ response, and leaves the connection idle on both sides. This mode may be
+ changed by several options such as "option http-server-close" or "option
+ httpclose".
+
+ If "option httpclose" is set, HAProxy will close the client or the server
+ connection, depending where the option is set. The frontend is considered for
+ client connections while the backend is considered for server ones. If the
+ option is set on a listener, it is applied both on client and server
+ connections. It will check if a "Connection: close" header is already set in
+ each direction, and will add one if missing.
+
+ This option may also be combined with "option http-pretend-keepalive", which
+ will disable sending of the "Connection: close" request header, but will
+ still cause the connection to be closed once the whole response is received.
+
+ It disables and replaces any previous "option http-server-close" or "option
+ http-keep-alive".
+
+ If this option has been enabled in a "defaults" section, it can be disabled
+ in a specific instance by prepending the "no" keyword before it.
+
+ See also : "option http-server-close".
+
+
+option httplog [ clf ]
+ Enable logging of HTTP request, stream state and timers
+
+ May be used in the following contexts: http
+
+ May be used in sections : defaults | frontend | listen | backend
+ yes | yes | yes | no
+
+ Arguments :
+ clf if the "clf" argument is added, then the output format will be
+ the CLF format instead of HAProxy's default HTTP format. You can
+ use this when you need to feed HAProxy's logs through a specific
+ log analyzer which only support the CLF format and which is not
+ extensible.
+
+ By default, the log output format is very poor, as it only contains the
+ source and destination addresses, and the instance name. By specifying
+ "option httplog", each log line turns into a much richer format including,
+ but not limited to, the HTTP request, the connection timers, the stream
+ status, the connections numbers, the captured headers and cookies, the
+ frontend, backend and server name, and of course the source address and
+ ports.
+
+ Specifying only "option httplog" will automatically clear the 'clf' mode
+ if it was set by default.
+
+ "option httplog" overrides any previous "log-format" directive.
+
+ See also : section 8 about logging.
+
+option httpslog
+ Enable logging of HTTPS request, stream state and timers
+
+ May be used in the following contexts: http
+
+ May be used in sections : defaults | frontend | listen | backend
+ yes | yes | yes | no
+
+ By default, the log output format is very poor, as it only contains the
+ source and destination addresses, and the instance name. By specifying
+ "option httpslog", each log line turns into a much richer format including,
+ but not limited to, the HTTP request, the connection timers, the stream
+ status, the connections numbers, the captured headers and cookies, the
+ frontend, backend and server name, the SSL certificate verification and SSL
+ handshake statuses, and of course the source address and ports.
+
+ "option httpslog" overrides any previous "log-format" directive.
+
+ See also : section 8 about logging.
+
+
+option independent-streams
+no option independent-streams
+ Enable or disable independent timeout processing for both directions
+
+ May be used in the following contexts: tcp, http
+
+ May be used in sections : defaults | frontend | listen | backend
+ yes | yes | yes | yes
+
+ Arguments : none
+
+ By default, when data is sent over a socket, both the write timeout and the
+ read timeout for that socket are refreshed, because we consider that there is
+ activity on that socket, and we have no other means of guessing if we should
+ receive data or not.
+
+ While this default behavior is desirable for almost all applications, there
+ exists a situation where it is desirable to disable it, and only refresh the
+ read timeout if there are incoming data. This happens on streams with large
+ timeouts and low amounts of exchanged data such as telnet session. If the
+ server suddenly disappears, the output data accumulates in the system's
+ socket buffers, both timeouts are correctly refreshed, and there is no way
+ to know the server does not receive them, so we don't timeout. However, when
+ the underlying protocol always echoes sent data, it would be enough by itself
+ to detect the issue using the read timeout. Note that this problem does not
+ happen with more verbose protocols because data won't accumulate long in the
+ socket buffers.
+
+ When this option is set on the frontend, it will disable read timeout updates
+ on data sent to the client. There probably is little use of this case. When
+ the option is set on the backend, it will disable read timeout updates on
+ data sent to the server. Doing so will typically break large HTTP posts from
+ slow lines, so use it with caution.
+
+ See also : "timeout client", "timeout server" and "timeout tunnel"
+
+
+option ldap-check
+ Use LDAPv3 health checks for server testing
+
+ May be used in the following contexts: tcp
+
+ May be used in sections : defaults | frontend | listen | backend
+ yes | no | yes | yes
+
+ Arguments : none
+
+ It is possible to test that the server correctly talks LDAPv3 instead of just
+ testing that it accepts the TCP connection. When this option is set, an
+ LDAPv3 anonymous simple bind message is sent to the server, and the response
+ is analyzed to find an LDAPv3 bind response message.
+
+ The server is considered valid only when the LDAP response contains success
+ resultCode (http://tools.ietf.org/html/rfc4511#section-4.1.9).
+
+ Logging of bind requests is server dependent see your documentation how to
+ configure it.
+
+ Example :
+ option ldap-check
+
+ See also : "option httpchk"
+
+
+option external-check
+ Use external processes for server health checks
+
+ May be used in the following contexts: tcp, http, log
+
+ May be used in sections : defaults | frontend | listen | backend
+ yes | no | yes | yes
+
+ It is possible to test the health of a server using an external command.
+ This is achieved by running the executable set using "external-check
+ command".
+
+ Requires the "external-check" global to be set.
+
+ See also : "external-check", "external-check command", "external-check path"
+
+
+option idle-close-on-response
+no option idle-close-on-response
+ Avoid closing idle frontend connections if a soft stop is in progress
+
+ May be used in the following contexts: http
+
+ May be used in sections : defaults | frontend | listen | backend
+ yes | yes | yes | no
+
+ Arguments : none
+
+ By default, idle connections will be closed during a soft stop. In some
+ environments, a client talking to the proxy may have prepared some idle
+ connections in order to send requests later. If there is no proper retry on
+ write errors, this can result in errors while haproxy is reloading. Even
+ though a proper implementation should retry on connection/write errors, this
+ option was introduced to support backwards compatibility with haproxy prior
+ to version 2.4. Indeed before v2.4, haproxy used to wait for a last request
+ and response to add a "connection: close" header before closing, thus
+ notifying the client that the connection would not be reusable.
+
+ In a real life example, this behavior was seen in AWS using the ALB in front
+ of a haproxy. The end result was ALB sending 502 during haproxy reloads.
+
+ Users are warned that using this option may increase the number of old
+ processes if connections remain idle for too long. Adjusting the client
+ timeouts and/or the "hard-stop-after" parameter accordingly might be
+ needed in case of frequent reloads.
+
+ See also: "timeout client", "timeout client-fin", "timeout http-request",
+ "hard-stop-after"
+
+
+option log-health-checks
+no option log-health-checks
+ Enable or disable logging of health checks status updates
+
+ May be used in the following contexts: tcp, http, log
+
+ May be used in sections : defaults | frontend | listen | backend
+ yes | no | yes | yes
+
+ Arguments : none
+
+ By default, failed health check are logged if server is UP and successful
+ health checks are logged if server is DOWN, so the amount of additional
+ information is limited.
+
+ When this option is enabled, any change of the health check status or to
+ the server's health will be logged, so that it becomes possible to know
+ that a server was failing occasional checks before crashing, or exactly when
+ it failed to respond a valid HTTP status, then when the port started to
+ reject connections, then when the server stopped responding at all.
+
+ Note that status changes not caused by health checks (e.g. enable/disable on
+ the CLI) are intentionally not logged by this option.
+
+ See also: "option httpchk", "option ldap-check", "option mysql-check",
+ "option pgsql-check", "option redis-check", "option smtpchk",
+ "option tcp-check", "log" and section 8 about logging.
+
+
+option log-separate-errors
+no option log-separate-errors
+ Change log level for non-completely successful connections
+
+ May be used in the following contexts: tcp, http
+
+ May be used in sections : defaults | frontend | listen | backend
+ yes | yes | yes | no
+
+ Arguments : none
+
+ Sometimes looking for errors in logs is not easy. This option makes HAProxy
+ raise the level of logs containing potentially interesting information such
+ as errors, timeouts, retries, redispatches, or HTTP status codes 5xx. The
+ level changes from "info" to "err". This makes it possible to log them
+ separately to a different file with most syslog daemons. Be careful not to
+ remove them from the original file, otherwise you would lose ordering which
+ provides very important information.
+
+ Using this option, large sites dealing with several thousand connections per
+ second may log normal traffic to a rotating buffer and only archive smaller
+ error logs.
+
+ See also : "log", "dontlognull", "dontlog-normal" and section 8 about
+ logging.
+
+
+option logasap
+no option logasap
+ Enable or disable early logging.
+
+ May be used in the following contexts: tcp, http
+
+ May be used in sections : defaults | frontend | listen | backend
+ yes | yes | yes | no
+
+ Arguments : none
+
+ By default, logs are emitted when all the log format variables and sample
+ fetches used in the definition of the log-format string return a value, or
+ when the stream is terminated. This allows the built in log-format strings
+ to account for the transfer time, or the number of bytes in log messages.
+
+ When handling long lived connections such as large file transfers or RDP,
+ it may take a while for the request or connection to appear in the logs.
+ Using "option logasap", the log message is created as soon as the server
+ connection is established in mode tcp, or as soon as the server sends the
+ complete headers in mode http. Missing information in the logs will be the
+ total number of bytes which will only indicate the amount of data transferred
+ before the message was created and the total time which will not take the
+ remainder of the connection life or transfer time into account. For the case
+ of HTTP, it is good practice to capture the Content-Length response header
+ so that the logs at least indicate how many bytes are expected to be
+ transferred.
+
+ Examples :
+ listen http_proxy 0.0.0.0:80
+ mode http
+ option httplog
+ option logasap
+ log 192.168.2.200 local3
+
+ >>> Feb 6 12:14:14 localhost \
+ haproxy[14389]: 10.0.1.2:33317 [06/Feb/2009:12:14:14.655] http-in \
+ static/srv1 9/10/7/14/+30 200 +243 - - ---- 3/1/1/1/0 1/0 \
+ "GET /image.iso HTTP/1.0"
+
+ See also : "option httplog", "capture response header", and section 8 about
+ logging.
+
+
+option mysql-check [ user <username> [ { post-41 | pre-41 } ] ]
+ Use MySQL health checks for server testing
+
+ May be used in the following contexts: tcp
+
+ May be used in sections : defaults | frontend | listen | backend
+ yes | no | yes | yes
+
+ Arguments :
+ <username> This is the username which will be used when connecting to MySQL
+ server.
+ post-41 Send post v4.1 client compatible checks (the default)
+ pre-41 Send pre v4.1 client compatible checks
+
+ If you specify a username, the check consists of sending two MySQL packet,
+ one Client Authentication packet, and one QUIT packet, to correctly close
+ MySQL session. We then parse the MySQL Handshake Initialization packet and/or
+ Error packet. It is a basic but useful test which does not produce error nor
+ aborted connect on the server. However, it requires an unlocked authorised
+ user without a password. To create a basic limited user in MySQL with
+ optional resource limits:
+
+ CREATE USER '<username>'@'<ip_of_haproxy|network_of_haproxy/netmask>'
+ /*!50701 WITH MAX_QUERIES_PER_HOUR 1 MAX_UPDATES_PER_HOUR 0 */
+ /*M!100201 MAX_STATEMENT_TIME 0.0001 */;
+
+ If you don't specify a username (it is deprecated and not recommended), the
+ check only consists in parsing the Mysql Handshake Initialization packet or
+ Error packet, we don't send anything in this mode. It was reported that it
+ can generate lockout if check is too frequent and/or if there is not enough
+ traffic. In fact, you need in this case to check MySQL "max_connect_errors"
+ value as if a connection is established successfully within fewer than MySQL
+ "max_connect_errors" attempts after a previous connection was interrupted,
+ the error count for the host is cleared to zero. If HAProxy's server get
+ blocked, the "FLUSH HOSTS" statement is the only way to unblock it.
+
+ Remember that this does not check database presence nor database consistency.
+ To do this, you can use an external check with xinetd for example.
+
+ The check requires MySQL >=3.22, for older version, please use TCP check.
+
+ Most often, an incoming MySQL server needs to see the client's IP address for
+ various purposes, including IP privilege matching and connection logging.
+ When possible, it is often wise to masquerade the client's IP address when
+ connecting to the server using the "usesrc" argument of the "source" keyword,
+ which requires the transparent proxy feature to be compiled in, and the MySQL
+ server to route the client via the machine hosting HAProxy.
+
+ See also: "option httpchk"
+
+
+option nolinger
+no option nolinger
+ Enable or disable immediate session resource cleaning after close
+
+ May be used in the following contexts: tcp, http, log
+
+ May be used in sections: defaults | frontend | listen | backend
+ yes | yes | yes | yes
+
+ Arguments : none
+
+ When clients or servers abort connections in a dirty way (e.g. they are
+ physically disconnected), the session timeouts triggers and the session is
+ closed. But it will remain in FIN_WAIT1 state for some time in the system,
+ using some resources and possibly limiting the ability to establish newer
+ connections.
+
+ When this happens, it is possible to activate "option nolinger" which forces
+ the system to immediately remove any socket's pending data on close. Thus,
+ a TCP RST is emitted, any pending data are truncated, and the session is
+ instantly purged from the system's tables. The generally visible effect for
+ a client is that responses are truncated if the close happens with a last
+ block of data (e.g. on a redirect or error response). On the server side,
+ it may help release the source ports immediately when forwarding a client
+ aborts in tunnels. In both cases, TCP resets are emitted and given that
+ the session is instantly destroyed, there will be no retransmit. On a lossy
+ network this can increase problems, especially when there is a firewall on
+ the lossy side, because the firewall might see and process the reset (hence
+ purge its session) and block any further traffic for this session,, including
+ retransmits from the other side. So if the other side doesn't receive it,
+ it will never receive any RST again, and the firewall might log many blocked
+ packets.
+
+ For all these reasons, it is strongly recommended NOT to use this option,
+ unless absolutely needed as a last resort. In most situations, using the
+ "client-fin" or "server-fin" timeouts achieves similar results with a more
+ reliable behavior. On Linux it's also possible to use the "tcp-ut" bind or
+ server setting.
+
+ This option may be used both on frontends and backends, depending on the side
+ where it is required. Use it on the frontend for clients, and on the backend
+ for servers. While this option is technically supported in "defaults"
+ sections, it must really not be used there as it risks to accidentally
+ propagate to sections that must no use it and to cause problems there.
+
+ If this option has been enabled in a "defaults" section, it can be disabled
+ in a specific instance by prepending the "no" keyword before it.
+
+ See also: "timeout client-fin", "timeout server-fin", "tcp-ut" bind or server
+ keywords.
+
+option originalto [ except <network> ] [ header <name> ]
+ Enable insertion of the X-Original-To header to requests sent to servers
+
+ May be used in the following contexts: http
+
+ May be used in sections : defaults | frontend | listen | backend
+ yes | yes | yes | yes
+
+ Arguments :
+ <network> is an optional argument used to disable this option for sources
+ matching <network>
+ <name> an optional argument to specify a different "X-Original-To"
+ header name.
+
+ Since HAProxy can work in transparent mode, every request from a client can
+ be redirected to the proxy and HAProxy itself can proxy every request to a
+ complex SQUID environment and the destination host from SO_ORIGINAL_DST will
+ be lost. This is annoying when you want access rules based on destination ip
+ addresses. To solve this problem, a new HTTP header "X-Original-To" may be
+ added by HAProxy to all requests sent to the server. This header contains a
+ value representing the original destination IP address. Since this must be
+ configured to always use the last occurrence of this header only. Note that
+ only the last occurrence of the header must be used, since it is really
+ possible that the client has already brought one.
+
+ The keyword "header" may be used to supply a different header name to replace
+ the default "X-Original-To". This can be useful where you might already
+ have a "X-Original-To" header from a different application, and you need
+ preserve it. Also if your backend server doesn't use the "X-Original-To"
+ header and requires different one.
+
+ Sometimes, a same HAProxy instance may be shared between a direct client
+ access and a reverse-proxy access (for instance when an SSL reverse-proxy is
+ used to decrypt HTTPS traffic). It is possible to disable the addition of the
+ header for a known destination address or network by adding the "except"
+ keyword followed by the network address. In this case, any destination IP
+ matching the network will not cause an addition of this header. Most common
+ uses are with private networks or 127.0.0.1. IPv4 and IPv6 are both
+ supported.
+
+ This option may be specified either in the frontend or in the backend. If at
+ least one of them uses it, the header will be added. Note that the backend's
+ setting of the header subargument takes precedence over the frontend's if
+ both are defined.
+
+ Examples :
+ # Original Destination address
+ frontend www
+ mode http
+ option originalto except 127.0.0.1
+
+ # Those servers want the IP Address in X-Client-Dst
+ backend www
+ mode http
+ option originalto header X-Client-Dst
+
+ See also : "option httpclose", "option http-server-close".
+
+
+option persist
+no option persist
+ Enable or disable forced persistence on down servers
+
+ May be used in the following contexts: tcp, http
+
+ May be used in sections: defaults | frontend | listen | backend
+ yes | no | yes | yes
+
+ Arguments : none
+
+ When an HTTP request reaches a backend with a cookie which references a dead
+ server, by default it is redispatched to another server. It is possible to
+ force the request to be sent to the dead server first using "option persist"
+ if absolutely needed. A common use case is when servers are under extreme
+ load and spend their time flapping. In this case, the users would still be
+ directed to the server they opened the session on, in the hope they would be
+ correctly served. It is recommended to use "option redispatch" in conjunction
+ with this option so that in the event it would not be possible to connect to
+ the server at all (server definitely dead), the client would finally be
+ redirected to another valid server.
+
+ If this option has been enabled in a "defaults" section, it can be disabled
+ in a specific instance by prepending the "no" keyword before it.
+
+ See also : "option redispatch", "retries", "force-persist"
+
+
+option pgsql-check user <username>
+ Use PostgreSQL health checks for server testing
+
+ May be used in the following contexts: tcp
+
+ May be used in sections : defaults | frontend | listen | backend
+ yes | no | yes | yes
+
+ Arguments :
+ <username> This is the username which will be used when connecting to
+ PostgreSQL server.
+
+ The check sends a PostgreSQL StartupMessage and waits for either
+ Authentication request or ErrorResponse message. It is a basic but useful
+ test which does not produce error nor aborted connect on the server.
+ This check is identical with the "mysql-check".
+
+ See also: "option httpchk"
+
+
+option prefer-last-server
+no option prefer-last-server
+ Allow multiple load balanced requests to remain on the same server
+
+ May be used in the following contexts: tcp, http
+
+ May be used in sections: defaults | frontend | listen | backend
+ yes | no | yes | yes
+
+ Arguments : none
+
+ When the load balancing algorithm in use is not deterministic, and a previous
+ request was sent to a server to which HAProxy still holds a connection, it is
+ sometimes desirable that subsequent requests on a same session go to the same
+ server as much as possible. Note that this is different from persistence, as
+ we only indicate a preference which HAProxy tries to apply without any form
+ of warranty. The real use is for keep-alive connections sent to servers. When
+ this option is used, HAProxy will try to reuse the same connection that is
+ attached to the server instead of rebalancing to another server, causing a
+ close of the connection. This can make sense for static file servers. It does
+ not make much sense to use this in combination with hashing algorithms. Note,
+ HAProxy already automatically tries to stick to a server which sends a 401 or
+ to a proxy which sends a 407 (authentication required), when the load
+ balancing algorithm is not deterministic. This is mandatory for use with the
+ broken NTLM authentication challenge, and significantly helps in
+ troubleshooting some faulty applications. Option prefer-last-server might be
+ desirable in these environments as well, to avoid redistributing the traffic
+ after every other response.
+
+ If this option has been enabled in a "defaults" section, it can be disabled
+ in a specific instance by prepending the "no" keyword before it.
+
+ See also: "option http-keep-alive"
+
+
+option redispatch
+option redispatch <interval>
+no option redispatch
+ Enable or disable session redistribution in case of connection failure
+
+ May be used in the following contexts: tcp, http
+
+ May be used in sections: defaults | frontend | listen | backend
+ yes | no | yes | yes
+
+ Arguments :
+ <interval> The optional integer value that controls how often redispatches
+ occur when retrying connections. Positive value P indicates a
+ redispatch is desired on every Pth retry, and negative value
+ N indicate a redispatch is desired on the Nth retry prior to the
+ last retry. For example, the default of -1 preserves the
+ historical behavior of redispatching on the last retry, a
+ positive value of 1 would indicate a redispatch on every retry,
+ and a positive value of 3 would indicate a redispatch on every
+ third retry. You can disable redispatches with a value of 0.
+
+
+ In HTTP mode, if a server designated by a cookie is down, clients may
+ definitely stick to it because they cannot flush the cookie, so they will not
+ be able to access the service anymore.
+
+ Specifying "option redispatch" will allow the proxy to break cookie or
+ consistent hash based persistence and redistribute them to a working server.
+
+ Active servers are selected from a subset of the list of available
+ servers. Active servers that are not down or in maintenance (i.e., whose
+ health is not checked or that have been checked as "up"), are selected in the
+ following order:
+
+ 1. Any active, non-backup server, if any, or,
+
+ 2. If the "allbackups" option is not set, the first backup server in the
+ list, or
+
+ 3. If the "allbackups" option is set, any backup server.
+
+ When a retry occurs, HAProxy tries to select another server than the last
+ one. The new server is selected from the current list of servers.
+
+ Sometimes, if the list is updated between retries (e.g., if numerous retries
+ occur and last longer than the time needed to check that a server is down,
+ remove it from the list and fall back on the list of backup servers),
+ connections may be redirected to a backup server, though.
+
+ It also allows to retry connections to another server in case of multiple
+ connection failures. Of course, it requires having "retries" set to a nonzero
+ value.
+
+ If this option has been enabled in a "defaults" section, it can be disabled
+ in a specific instance by prepending the "no" keyword before it.
+
+ See also : "retries", "force-persist"
+
+
+option redis-check
+ Use redis health checks for server testing
+
+ May be used in the following contexts: tcp
+
+ May be used in sections : defaults | frontend | listen | backend
+ yes | no | yes | yes
+
+ Arguments : none
+
+ It is possible to test that the server correctly talks REDIS protocol instead
+ of just testing that it accepts the TCP connection. When this option is set,
+ a PING redis command is sent to the server, and the response is analyzed to
+ find the "+PONG" response message.
+
+ Example :
+ option redis-check
+
+ See also : "option httpchk", "option tcp-check", "tcp-check expect"
+
+
+option smtpchk
+option smtpchk <hello> <domain>
+ Use SMTP health checks for server testing
+
+ May be used in the following contexts: tcp
+
+ May be used in sections : defaults | frontend | listen | backend
+ yes | no | yes | yes
+
+ Arguments :
+ <hello> is an optional argument. It is the "hello" command to use. It can
+ be either "HELO" (for SMTP) or "EHLO" (for ESMTP). All other
+ values will be turned into the default command ("HELO").
+
+ <domain> is the domain name to present to the server. It may only be
+ specified (and is mandatory) if the hello command has been
+ specified. By default, "localhost" is used.
+
+ When "option smtpchk" is set, the health checks will consist in TCP
+ connections followed by an SMTP command. By default, this command is
+ "HELO localhost". The server's return code is analyzed and only return codes
+ starting with a "2" will be considered as valid. All other responses,
+ including a lack of response will constitute an error and will indicate a
+ dead server.
+
+ This test is meant to be used with SMTP servers or relays. Depending on the
+ request, it is possible that some servers do not log each connection attempt,
+ so you may want to experiment to improve the behavior. Using telnet on port
+ 25 is often easier than adjusting the configuration.
+
+ Most often, an incoming SMTP server needs to see the client's IP address for
+ various purposes, including spam filtering, anti-spoofing and logging. When
+ possible, it is often wise to masquerade the client's IP address when
+ connecting to the server using the "usesrc" argument of the "source" keyword,
+ which requires the transparent proxy feature to be compiled in.
+
+ Example :
+ option smtpchk HELO mydomain.org
+
+ See also : "option httpchk", "source"
+
+
+option socket-stats
+no option socket-stats
+
+ Enable or disable collecting & providing separate statistics for each socket.
+
+ May be used in the following contexts: tcp, http
+
+ May be used in sections : defaults | frontend | listen | backend
+ yes | yes | yes | no
+
+ Arguments : none
+
+
+option splice-auto
+no option splice-auto
+ Enable or disable automatic kernel acceleration on sockets in both directions
+
+ May be used in the following contexts: tcp, http
+
+ May be used in sections : defaults | frontend | listen | backend
+ yes | yes | yes | yes
+
+ Arguments : none
+
+ When this option is enabled either on a frontend or on a backend, HAProxy
+ will automatically evaluate the opportunity to use kernel tcp splicing to
+ forward data between the client and the server, in either direction. HAProxy
+ uses heuristics to estimate if kernel splicing might improve performance or
+ not. Both directions are handled independently. Note that the heuristics used
+ are not much aggressive in order to limit excessive use of splicing. This
+ option requires splicing to be enabled at compile time, and may be globally
+ disabled with the global option "nosplice". Since splice uses pipes, using it
+ requires that there are enough spare pipes.
+
+ Important note: kernel-based TCP splicing is a Linux-specific feature which
+ first appeared in kernel 2.6.25. It offers kernel-based acceleration to
+ transfer data between sockets without copying these data to user-space, thus
+ providing noticeable performance gains and CPU cycles savings. Since many
+ early implementations are buggy, corrupt data and/or are inefficient, this
+ feature is not enabled by default, and it should be used with extreme care.
+ While it is not possible to detect the correctness of an implementation,
+ 2.6.29 is the first version offering a properly working implementation. In
+ case of doubt, splicing may be globally disabled using the global "nosplice"
+ keyword.
+
+ Example :
+ option splice-auto
+
+ If this option has been enabled in a "defaults" section, it can be disabled
+ in a specific instance by prepending the "no" keyword before it.
+
+ See also : "option splice-request", "option splice-response", and global
+ options "nosplice" and "maxpipes"
+
+
+option splice-request
+no option splice-request
+ Enable or disable automatic kernel acceleration on sockets for requests
+
+ May be used in the following contexts: tcp, http
+
+ May be used in sections : defaults | frontend | listen | backend
+ yes | yes | yes | yes
+
+ Arguments : none
+
+ When this option is enabled either on a frontend or on a backend, HAProxy
+ will use kernel tcp splicing whenever possible to forward data going from
+ the client to the server. It might still use the recv/send scheme if there
+ are no spare pipes left. This option requires splicing to be enabled at
+ compile time, and may be globally disabled with the global option "nosplice".
+ Since splice uses pipes, using it requires that there are enough spare pipes.
+
+ Important note: see "option splice-auto" for usage limitations.
+
+ Example :
+ option splice-request
+
+ If this option has been enabled in a "defaults" section, it can be disabled
+ in a specific instance by prepending the "no" keyword before it.
+
+ See also : "option splice-auto", "option splice-response", and global options
+ "nosplice" and "maxpipes"
+
+
+option splice-response
+no option splice-response
+ Enable or disable automatic kernel acceleration on sockets for responses
+
+ May be used in the following contexts: tcp, http
+
+ May be used in sections : defaults | frontend | listen | backend
+ yes | yes | yes | yes
+
+ Arguments : none
+
+ When this option is enabled either on a frontend or on a backend, HAProxy
+ will use kernel tcp splicing whenever possible to forward data going from
+ the server to the client. It might still use the recv/send scheme if there
+ are no spare pipes left. This option requires splicing to be enabled at
+ compile time, and may be globally disabled with the global option "nosplice".
+ Since splice uses pipes, using it requires that there are enough spare pipes.
+
+ Important note: see "option splice-auto" for usage limitations.
+
+ Example :
+ option splice-response
+
+ If this option has been enabled in a "defaults" section, it can be disabled
+ in a specific instance by prepending the "no" keyword before it.
+
+ See also : "option splice-auto", "option splice-request", and global options
+ "nosplice" and "maxpipes"
+
+
+option spop-check
+ Use SPOP health checks for server testing
+
+ May be used in the following contexts: tcp
+
+ May be used in sections : defaults | frontend | listen | backend
+ yes | no | yes | yes
+
+ Arguments : none
+
+ It is possible to test that the server correctly talks SPOP protocol instead
+ of just testing that it accepts the TCP connection. When this option is set,
+ a HELLO handshake is performed between HAProxy and the server, and the
+ response is analyzed to check no error is reported.
+
+ Example :
+ option spop-check
+
+ See also : "option httpchk"
+
+
+option srvtcpka
+no option srvtcpka
+ Enable or disable the sending of TCP keepalive packets on the server side
+
+ May be used in the following contexts: tcp, http, log
+
+ May be used in sections : defaults | frontend | listen | backend
+ yes | no | yes | yes
+
+ Arguments : none
+
+ When there is a firewall or any session-aware component between a client and
+ a server, and when the protocol involves very long sessions with long idle
+ periods (e.g. remote desktops), there is a risk that one of the intermediate
+ components decides to expire a session which has remained idle for too long.
+
+ Enabling socket-level TCP keep-alives makes the system regularly send packets
+ to the other end of the connection, leaving it active. The delay between
+ keep-alive probes is controlled by the system only and depends both on the
+ operating system and its tuning parameters.
+
+ It is important to understand that keep-alive packets are neither emitted nor
+ received at the application level. It is only the network stacks which sees
+ them. For this reason, even if one side of the proxy already uses keep-alives
+ to maintain its connection alive, those keep-alive packets will not be
+ forwarded to the other side of the proxy.
+
+ Please note that this has nothing to do with HTTP keep-alive.
+
+ Using option "srvtcpka" enables the emission of TCP keep-alive probes on the
+ server side of a connection, which should help when session expirations are
+ noticed between HAProxy and a server.
+
+ If this option has been enabled in a "defaults" section, it can be disabled
+ in a specific instance by prepending the "no" keyword before it.
+
+ See also : "option clitcpka", "option tcpka"
+
+
+option ssl-hello-chk
+ Use SSLv3 client hello health checks for server testing
+
+ May be used in the following contexts: tcp, http
+
+ May be used in sections : defaults | frontend | listen | backend
+ yes | no | yes | yes
+
+ Arguments : none
+
+ When some SSL-based protocols are relayed in TCP mode through HAProxy, it is
+ possible to test that the server correctly talks SSL instead of just testing
+ that it accepts the TCP connection. When "option ssl-hello-chk" is set, pure
+ SSLv3 client hello messages are sent once the connection is established to
+ the server, and the response is analyzed to find an SSL server hello message.
+ The server is considered valid only when the response contains this server
+ hello message.
+
+ All servers tested till there correctly reply to SSLv3 client hello messages,
+ and most servers tested do not even log the requests containing only hello
+ messages, which is appreciable.
+
+ Note that this check works even when SSL support was not built into HAProxy
+ because it forges the SSL message. When SSL support is available, it is best
+ to use native SSL health checks instead of this one.
+
+ See also: "option httpchk", "check-ssl"
+
+
+option tcp-check
+ Perform health checks using tcp-check send/expect sequences
+
+ May be used in the following contexts: tcp, http, log
+
+ May be used in sections: defaults | frontend | listen | backend
+ yes | no | yes | yes
+
+ This health check method is intended to be combined with "tcp-check" command
+ lists in order to support send/expect types of health check sequences.
+
+ TCP checks currently support 4 modes of operations :
+ - no "tcp-check" directive : the health check only consists in a connection
+ attempt, which remains the default mode.
+
+ - "tcp-check send" or "tcp-check send-binary" only is mentioned : this is
+ used to send a string along with a connection opening. With some
+ protocols, it helps sending a "QUIT" message for example that prevents
+ the server from logging a connection error for each health check. The
+ check result will still be based on the ability to open the connection
+ only.
+
+ - "tcp-check expect" only is mentioned : this is used to test a banner.
+ The connection is opened and HAProxy waits for the server to present some
+ contents which must validate some rules. The check result will be based
+ on the matching between the contents and the rules. This is suited for
+ POP, IMAP, SMTP, FTP, SSH, TELNET.
+
+ - both "tcp-check send" and "tcp-check expect" are mentioned : this is
+ used to test a hello-type protocol. HAProxy sends a message, the server
+ responds and its response is analyzed. the check result will be based on
+ the matching between the response contents and the rules. This is often
+ suited for protocols which require a binding or a request/response model.
+ LDAP, MySQL, Redis and SSL are example of such protocols, though they
+ already all have their dedicated checks with a deeper understanding of
+ the respective protocols.
+ In this mode, many questions may be sent and many answers may be
+ analyzed.
+
+ A fifth mode can be used to insert comments in different steps of the script.
+
+ For each tcp-check rule you create, you can add a "comment" directive,
+ followed by a string. This string will be reported in the log and stderr in
+ debug mode. It is useful to make user-friendly error reporting. The
+ "comment" is of course optional.
+
+ During the execution of a health check, a variable scope is made available to
+ store data samples, using the "tcp-check set-var" operation. Freeing those
+ variable is possible using "tcp-check unset-var".
+
+
+ Examples :
+ # perform a POP check (analyze only server's banner)
+ option tcp-check
+ tcp-check expect string +OK\ POP3\ ready comment POP\ protocol
+
+ # perform an IMAP check (analyze only server's banner)
+ option tcp-check
+ tcp-check expect string *\ OK\ IMAP4\ ready comment IMAP\ protocol
+
+ # look for the redis master server after ensuring it speaks well
+ # redis protocol, then it exits properly.
+ # (send a command then analyze the response 3 times)
+ option tcp-check
+ tcp-check comment PING\ phase
+ tcp-check send PING\r\n
+ tcp-check expect string +PONG
+ tcp-check comment role\ check
+ tcp-check send info\ replication\r\n
+ tcp-check expect string role:master
+ tcp-check comment QUIT\ phase
+ tcp-check send QUIT\r\n
+ tcp-check expect string +OK
+
+ forge a HTTP request, then analyze the response
+ (send many headers before analyzing)
+ option tcp-check
+ tcp-check comment forge\ and\ send\ HTTP\ request
+ tcp-check send HEAD\ /\ HTTP/1.1\r\n
+ tcp-check send Host:\ www.mydomain.com\r\n
+ tcp-check send User-Agent:\ HAProxy\ tcpcheck\r\n
+ tcp-check send \r\n
+ tcp-check expect rstring HTTP/1\..\ (2..|3..) comment check\ HTTP\ response
+
+
+ See also : "tcp-check connect", "tcp-check expect" and "tcp-check send".
+
+
+option tcp-smart-accept
+no option tcp-smart-accept
+ Enable or disable the saving of one ACK packet during the accept sequence
+
+ May be used in the following contexts: tcp, http
+
+ May be used in sections : defaults | frontend | listen | backend
+ yes | yes | yes | no
+
+ Arguments : none
+
+ When an HTTP connection request comes in, the system acknowledges it on
+ behalf of HAProxy, then the client immediately sends its request, and the
+ system acknowledges it too while it is notifying HAProxy about the new
+ connection. HAProxy then reads the request and responds. This means that we
+ have one TCP ACK sent by the system for nothing, because the request could
+ very well be acknowledged by HAProxy when it sends its response.
+
+ For this reason, in HTTP mode, HAProxy automatically asks the system to avoid
+ sending this useless ACK on platforms which support it (currently at least
+ Linux). It must not cause any problem, because the system will send it anyway
+ after 40 ms if the response takes more time than expected to come.
+
+ During complex network debugging sessions, it may be desirable to disable
+ this optimization because delayed ACKs can make troubleshooting more complex
+ when trying to identify where packets are delayed. It is then possible to
+ fall back to normal behavior by specifying "no option tcp-smart-accept".
+
+ It is also possible to force it for non-HTTP proxies by simply specifying
+ "option tcp-smart-accept". For instance, it can make sense with some services
+ such as SMTP where the server speaks first.
+
+ It is recommended to avoid forcing this option in a defaults section. In case
+ of doubt, consider setting it back to automatic values by prepending the
+ "default" keyword before it, or disabling it using the "no" keyword.
+
+ See also : "option tcp-smart-connect"
+
+
+option tcp-smart-connect
+no option tcp-smart-connect
+ Enable or disable the saving of one ACK packet during the connect sequence
+
+ May be used in the following contexts: tcp, http
+
+ May be used in sections : defaults | frontend | listen | backend
+ yes | no | yes | yes
+
+ Arguments : none
+
+ On certain systems (at least Linux), HAProxy can ask the kernel not to
+ immediately send an empty ACK upon a connection request, but to directly
+ send the buffer request instead. This saves one packet on the network and
+ thus boosts performance. It can also be useful for some servers, because they
+ immediately get the request along with the incoming connection.
+
+ This feature is enabled when "option tcp-smart-connect" is set in a backend.
+ It is not enabled by default because it makes network troubleshooting more
+ complex.
+
+ It only makes sense to enable it with protocols where the client speaks first
+ such as HTTP. In other situations, if there is no data to send in place of
+ the ACK, a normal ACK is sent.
+
+ If this option has been enabled in a "defaults" section, it can be disabled
+ in a specific instance by prepending the "no" keyword before it.
+
+ See also : "option tcp-smart-accept"
+
+
+option tcpka
+ Enable or disable the sending of TCP keepalive packets on both sides
+
+ May be used in the following contexts: tcp, http, log
+
+ May be used in sections : defaults | frontend | listen | backend
+ yes | yes | yes | yes
+
+ Arguments : none
+
+ When there is a firewall or any session-aware component between a client and
+ a server, and when the protocol involves very long sessions with long idle
+ periods (e.g. remote desktops), there is a risk that one of the intermediate
+ components decides to expire a session which has remained idle for too long.
+
+ Enabling socket-level TCP keep-alives makes the system regularly send packets
+ to the other end of the connection, leaving it active. The delay between
+ keep-alive probes is controlled by the system only and depends both on the
+ operating system and its tuning parameters.
+
+ It is important to understand that keep-alive packets are neither emitted nor
+ received at the application level. It is only the network stacks which sees
+ them. For this reason, even if one side of the proxy already uses keep-alives
+ to maintain its connection alive, those keep-alive packets will not be
+ forwarded to the other side of the proxy.
+
+ Please note that this has nothing to do with HTTP keep-alive.
+
+ Using option "tcpka" enables the emission of TCP keep-alive probes on both
+ the client and server sides of a connection. Note that this is meaningful
+ only in "defaults" or "listen" sections. If this option is used in a
+ frontend, only the client side will get keep-alives, and if this option is
+ used in a backend, only the server side will get keep-alives. For this
+ reason, it is strongly recommended to explicitly use "option clitcpka" and
+ "option srvtcpka" when the configuration is split between frontends and
+ backends.
+
+ See also : "option clitcpka", "option srvtcpka"
+
+
+option tcplog
+ Enable advanced logging of TCP connections with stream state and timers
+
+ May be used in the following contexts: tcp, http
+
+ May be used in sections : defaults | frontend | listen | backend
+ yes | yes | yes | no
+
+ Arguments : none
+
+ By default, the log output format is very poor, as it only contains the
+ source and destination addresses, and the instance name. By specifying
+ "option tcplog", each log line turns into a much richer format including, but
+ not limited to, the connection timers, the stream status, the connections
+ numbers, the frontend, backend and server name, and of course the source
+ address and ports. This option is useful for pure TCP proxies in order to
+ find which of the client or server disconnects or times out. For normal HTTP
+ proxies, it's better to use "option httplog" which is even more complete.
+
+ "option tcplog" overrides any previous "log-format" directive.
+
+ See also : "option httplog", and section 8 about logging.
+
+
+option transparent
+no option transparent
+ Enable client-side transparent proxying
+
+ May be used in the following contexts: tcp, http
+
+ May be used in sections : defaults | frontend | listen | backend
+ yes | no | yes | yes
+
+ Arguments : none
+
+ This option was introduced in order to provide layer 7 persistence to layer 3
+ load balancers. The idea is to use the OS's ability to redirect an incoming
+ connection for a remote address to a local process (here HAProxy), and let
+ this process know what address was initially requested. When this option is
+ used, sessions without cookies will be forwarded to the original destination
+ IP address of the incoming request (which should match that of another
+ equipment), while requests with cookies will still be forwarded to the
+ appropriate server.
+
+ Note that contrary to a common belief, this option does NOT make HAProxy
+ present the client's IP to the server when establishing the connection.
+
+ See also: the "usesrc" argument of the "source" keyword, and the
+ "transparent" option of the "bind" keyword.
+
+
+external-check command <command>
+ Executable to run when performing an external-check
+
+ May be used in the following contexts: tcp, http, log
+
+ May be used in sections : defaults | frontend | listen | backend
+ yes | no | yes | yes
+
+ Arguments :
+ <command> is the external command to run
+
+ The arguments passed to the to the command are:
+
+ <proxy_address> <proxy_port> <server_address> <server_port>
+
+ The <proxy_address> and <proxy_port> are derived from the first listener
+ that is either IPv4, IPv6 or a UNIX socket. In the case of a UNIX socket
+ listener the proxy_address will be the path of the socket and the
+ <proxy_port> will be the string "NOT_USED". In a backend section, it's not
+ possible to determine a listener, and both <proxy_address> and <proxy_port>
+ will have the string value "NOT_USED".
+
+ Some values are also provided through environment variables.
+
+ Environment variables :
+ HAPROXY_PROXY_ADDR The first bind address if available (or empty if not
+ applicable, for example in a "backend" section).
+
+ HAPROXY_PROXY_ID The backend id.
+
+ HAPROXY_PROXY_NAME The backend name.
+
+ HAPROXY_PROXY_PORT The first bind port if available (or empty if not
+ applicable, for example in a "backend" section or
+ for a UNIX socket).
+
+ HAPROXY_SERVER_ADDR The server address.
+
+ HAPROXY_SERVER_CURCONN The current number of connections on the server.
+
+ HAPROXY_SERVER_ID The server id.
+
+ HAPROXY_SERVER_MAXCONN The server max connections.
+
+ HAPROXY_SERVER_NAME The server name.
+
+ HAPROXY_SERVER_PORT The server port if available (or empty for a UNIX
+ socket).
+
+ HAPROXY_SERVER_SSL "0" when SSL is not used, "1" when it is used
+
+ HAPROXY_SERVER_PROTO The protocol used by this server, which can be one
+ of "cli" (the haproxy CLI), "syslog" (syslog TCP
+ server), "peers" (peers TCP server), "h1" (HTTP/1.x
+ server), "h2" (HTTP/2 server), or "tcp" (any other
+ TCP server).
+
+ PATH The PATH environment variable used when executing
+ the command may be set using "external-check path".
+
+ See also "2.3. Environment variables" for other variables.
+
+ If the command executed and exits with a zero status then the check is
+ considered to have passed, otherwise the check is considered to have
+ failed.
+
+ Example :
+ external-check command /bin/true
+
+ See also : "external-check", "option external-check", "external-check path"
+
+
+external-check path <path>
+ The value of the PATH environment variable used when running an external-check
+
+ May be used in the following contexts: tcp, http, log
+
+ May be used in sections : defaults | frontend | listen | backend
+ yes | no | yes | yes
+
+ Arguments :
+ <path> is the path used when executing external command to run
+
+ The default path is "".
+
+ Example :
+ external-check path "/usr/bin:/bin"
+
+ See also : "external-check", "option external-check",
+ "external-check command"
+
+
+persist rdp-cookie
+persist rdp-cookie(<name>)
+ Enable RDP cookie-based persistence
+
+ May be used in the following contexts: tcp
+
+ May be used in sections : defaults | frontend | listen | backend
+ yes | no | yes | yes
+
+ Arguments :
+ <name> is the optional name of the RDP cookie to check. If omitted, the
+ default cookie name "msts" will be used. There currently is no
+ valid reason to change this name.
+
+ This statement enables persistence based on an RDP cookie. The RDP cookie
+ contains all information required to find the server in the list of known
+ servers. So when this option is set in the backend, the request is analyzed
+ and if an RDP cookie is found, it is decoded. If it matches a known server
+ which is still UP (or if "option persist" is set), then the connection is
+ forwarded to this server.
+
+ Note that this only makes sense in a TCP backend, but for this to work, the
+ frontend must have waited long enough to ensure that an RDP cookie is present
+ in the request buffer. This is the same requirement as with the "rdp-cookie"
+ load-balancing method. Thus it is highly recommended to put all statements in
+ a single "listen" section.
+
+ Also, it is important to understand that the terminal server will emit this
+ RDP cookie only if it is configured for "token redirection mode", which means
+ that the "IP address redirection" option is disabled.
+
+ Example :
+ listen tse-farm
+ bind :3389
+ # wait up to 5s for an RDP cookie in the request
+ tcp-request inspect-delay 5s
+ tcp-request content accept if RDP_COOKIE
+ # apply RDP cookie persistence
+ persist rdp-cookie
+ # if server is unknown, let's balance on the same cookie.
+ # alternatively, "balance leastconn" may be useful too.
+ balance rdp-cookie
+ server srv1 1.1.1.1:3389
+ server srv2 1.1.1.2:3389
+
+ See also : "balance rdp-cookie", "tcp-request" and the "req.rdp_cookie" ACL.
+
+
+rate-limit sessions <rate>
+ Set a limit on the number of new sessions accepted per second on a frontend
+
+ May be used in the following contexts: tcp, http
+
+ May be used in sections : defaults | frontend | listen | backend
+ yes | yes | yes | no
+
+ Arguments :
+ <rate> The <rate> parameter is an integer designating the maximum number
+ of new sessions per second to accept on the frontend.
+
+ When the frontend reaches the specified number of new sessions per second, it
+ stops accepting new connections until the rate drops below the limit again.
+ During this time, the pending sessions will be kept in the socket's backlog
+ (in system buffers) and HAProxy will not even be aware that sessions are
+ pending. When applying very low limit on a highly loaded service, it may make
+ sense to increase the socket's backlog using the "backlog" keyword.
+
+ This feature is particularly efficient at blocking connection-based attacks
+ or service abuse on fragile servers. Since the session rate is measured every
+ millisecond, it is extremely accurate. Also, the limit applies immediately,
+ no delay is needed at all to detect the threshold.
+
+ Example : limit the connection rate on SMTP to 10 per second max
+ listen smtp
+ mode tcp
+ bind :25
+ rate-limit sessions 10
+ server smtp1 127.0.0.1:1025
+
+ Note : when the maximum rate is reached, the frontend's status is not changed
+ but its sockets appear as "WAITING" in the statistics if the
+ "socket-stats" option is enabled.
+
+ See also : the "backlog" keyword and the "fe_sess_rate" ACL criterion.
+
+
+redirect location <loc> [code <code>] <option> [{if | unless} <condition>]
+redirect prefix <pfx> [code <code>] <option> [{if | unless} <condition>]
+redirect scheme <sch> [code <code>] <option> [{if | unless} <condition>]
+ Return an HTTP redirection if/unless a condition is matched
+
+ May be used in the following contexts: http
+
+ May be used in sections : defaults | frontend | listen | backend
+ no | yes | yes | yes
+
+ If/unless the condition is matched, the HTTP request will lead to a redirect
+ response. If no condition is specified, the redirect applies unconditionally.
+
+ Arguments :
+ <loc> With "redirect location", the exact value in <loc> is placed into
+ the HTTP "Location" header. When used in an "http-request" rule,
+ <loc> value follows the log-format rules and can include some
+ dynamic values (see Custom Log Format in section 8.2.4).
+
+ <pfx> With "redirect prefix", the "Location" header is built from the
+ concatenation of <pfx> and the complete URI path, including the
+ query string, unless the "drop-query" option is specified (see
+ below). As a special case, if <pfx> equals exactly "/", then
+ nothing is inserted before the original URI. It allows one to
+ redirect to the same URL (for instance, to insert a cookie). When
+ used in an "http-request" rule, <pfx> value follows the log-format
+ rules and can include some dynamic values (see Custom Log Format
+ in section 8.2.4).
+
+ <sch> With "redirect scheme", then the "Location" header is built by
+ concatenating <sch> with "://" then the first occurrence of the
+ "Host" header, and then the URI path, including the query string
+ unless the "drop-query" option is specified (see below). If no
+ path is found or if the path is "*", then "/" is used instead. If
+ no "Host" header is found, then an empty host component will be
+ returned, which most recent browsers interpret as redirecting to
+ the same host. This directive is mostly used to redirect HTTP to
+ HTTPS. When used in an "http-request" rule, <sch> value follows
+ the log-format rules and can include some dynamic values (see
+ Custom Log Format in section 8.2.4).
+
+ <code> The code is optional. It indicates which type of HTTP redirection
+ is desired. Only codes 301, 302, 303, 307 and 308 are supported,
+ with 302 used by default if no code is specified. 301 means
+ "Moved permanently", and a browser may cache the Location. 302
+ means "Moved temporarily" and means that the browser should not
+ cache the redirection. 303 is equivalent to 302 except that the
+ browser will fetch the location with a GET method. 307 is just
+ like 302 but makes it clear that the same method must be reused.
+ Likewise, 308 replaces 301 if the same method must be used.
+
+ <option> There are several options which can be specified to adjust the
+ expected behavior of a redirection :
+
+ - "drop-query"
+ When this keyword is used in a prefix-based redirection, then the
+ location will be set without any possible query-string, which is useful
+ for directing users to a non-secure page for instance. It has no effect
+ with a location-type redirect.
+
+ - "append-slash"
+ This keyword may be used in conjunction with "drop-query" to redirect
+ users who use a URL not ending with a '/' to the same one with the '/'.
+ It can be useful to ensure that search engines will only see one URL.
+ For this, a return code 301 is preferred.
+
+ - "ignore-empty"
+ This keyword only has effect when a location is produced using a log
+ format expression (i.e. when used in http-request or http-response).
+ It indicates that if the result of the expression is empty, the rule
+ should silently be skipped. The main use is to allow mass-redirects
+ of known paths using a simple map.
+
+ - "set-cookie NAME[=value]"
+ A "Set-Cookie" header will be added with NAME (and optionally "=value")
+ to the response. This is sometimes used to indicate that a user has
+ been seen, for instance to protect against some types of DoS. No other
+ cookie option is added, so the cookie will be a session cookie. Note
+ that for a browser, a sole cookie name without an equal sign is
+ different from a cookie with an equal sign.
+
+ - "clear-cookie NAME[=]"
+ A "Set-Cookie" header will be added with NAME (and optionally "="), but
+ with the "Max-Age" attribute set to zero. This will tell the browser to
+ delete this cookie. It is useful for instance on logout pages. It is
+ important to note that clearing the cookie "NAME" will not remove a
+ cookie set with "NAME=value". You have to clear the cookie "NAME=" for
+ that, because the browser makes the difference.
+
+ Example: move the login URL only to HTTPS.
+ acl clear dst_port 80
+ acl secure dst_port 8080
+ acl login_page url_beg /login
+ acl logout url_beg /logout
+ acl uid_given url_reg /login?userid=[^&]+
+ acl cookie_set hdr_sub(cookie) SEEN=1
+
+ redirect prefix https://mysite.com set-cookie SEEN=1 if !cookie_set
+ redirect prefix https://mysite.com if login_page !secure
+ redirect prefix http://mysite.com drop-query if login_page !uid_given
+ redirect location http://mysite.com/ if !login_page secure
+ redirect location / clear-cookie USERID= if logout
+
+ Example: send redirects for request for articles without a '/'.
+ acl missing_slash path_reg ^/article/[^/]*$
+ redirect code 301 prefix / drop-query append-slash if missing_slash
+
+ Example: redirect all HTTP traffic to HTTPS when SSL is handled by HAProxy.
+ redirect scheme https if !{ ssl_fc }
+
+ Example: append 'www.' prefix in front of all hosts not having it
+ http-request redirect code 301 location \
+ http://www.%[hdr(host)]%[capture.req.uri] \
+ unless { hdr_beg(host) -i www }
+
+ Example: permanently redirect only old URLs to new ones
+ http-request redirect code 301 location \
+ %[path,map_str(old-blog-articles.map)] ignore-empty
+
+ See section 7 about ACL usage.
+
+
+retries <value>
+ Set the number of retries to perform on a server after a failure
+
+ May be used in the following contexts: tcp, http
+
+ May be used in sections: defaults | frontend | listen | backend
+ yes | no | yes | yes
+
+ Arguments :
+ <value> is the number of times a request or connection attempt should be
+ retried on a server after a failure.
+
+ By default, retries apply only to new connection attempts. However, when
+ the "retry-on" directive is used, other conditions might trigger a retry
+ (e.g. empty response, undesired status code), and each of them will count
+ one attempt, and when the total number attempts reaches the value here, an
+ error will be returned.
+
+ In order to avoid immediate reconnections to a server which is restarting,
+ a turn-around timer of min("timeout connect", one second) is applied before
+ a retry occurs on the same server.
+
+ When "option redispatch" is set, some retries may be performed on another
+ server even if a cookie references a different server. By default this will
+ only be the last retry unless an argument is passed to "option redispatch".
+
+ See also : "option redispatch"
+
+
+retry-on [space-delimited list of keywords]
+ Specify when to attempt to automatically retry a failed request.
+ This setting is only valid when "mode" is set to http and is silently ignored
+ otherwise.
+
+ May be used in the following contexts: tcp, http
+
+ May be used in sections: defaults | frontend | listen | backend
+ yes | no | yes | yes
+
+ Arguments :
+ <keywords> is a space-delimited list of keywords or HTTP status codes, each
+ representing a type of failure event on which an attempt to
+ retry the request is desired. Please read the notes at the
+ bottom before changing this setting. The following keywords are
+ supported :
+
+ none never retry
+
+ conn-failure retry when the connection or the SSL handshake failed
+ and the request could not be sent. This is the default.
+
+ empty-response retry when the server connection was closed after part
+ of the request was sent, and nothing was received from
+ the server. This type of failure may be caused by the
+ request timeout on the server side, poor network
+ condition, or a server crash or restart while
+ processing the request.
+
+ junk-response retry when the server returned something not looking
+ like a complete HTTP response. This includes partial
+ responses headers as well as non-HTTP contents. It
+ usually is a bad idea to retry on such events, which
+ may be caused a configuration issue (wrong server port)
+ or by the request being harmful to the server (buffer
+ overflow attack for example).
+
+ response-timeout the server timeout stroke while waiting for the server
+ to respond to the request. This may be caused by poor
+ network condition, the reuse of an idle connection
+ which has expired on the path, or by the request being
+ extremely expensive to process. It generally is a bad
+ idea to retry on such events on servers dealing with
+ heavy database processing (full scans, etc) as it may
+ amplify denial of service attacks.
+
+ 0rtt-rejected retry requests which were sent over early data and were
+ rejected by the server. These requests are generally
+ considered to be safe to retry.
+
+ <status> any HTTP status code among "401" (Unauthorized), "403"
+ (Forbidden), "404" (Not Found), "408" (Request Timeout),
+ "425" (Too Early), "500" (Server Error), "501" (Not
+ Implemented), "502" (Bad Gateway), "503" (Service
+ Unavailable), "504" (Gateway Timeout).
+
+ all-retryable-errors
+ retry request for any error that are considered
+ retryable. This currently activates "conn-failure",
+ "empty-response", "junk-response", "response-timeout",
+ "0rtt-rejected", "500", "502", "503", and "504".
+
+ Using this directive replaces any previous settings with the new ones; it is
+ not cumulative.
+
+ Please note that using anything other than "none" and "conn-failure" requires
+ to allocate a buffer and copy the whole request into it, so it has memory and
+ performance impacts. Requests not fitting in a single buffer will never be
+ retried (see the global tune.bufsize setting).
+
+ You have to make sure the application has a replay protection mechanism built
+ in such as a unique transaction IDs passed in requests, or that replaying the
+ same request has no consequence, or it is very dangerous to use any retry-on
+ value beside "conn-failure" and "none". Static file servers and caches are
+ generally considered safe against any type of retry. Using a status code can
+ be useful to quickly leave a server showing an abnormal behavior (out of
+ memory, file system issues, etc), but in this case it may be a good idea to
+ immediately redispatch the connection to another server (please see "option
+ redispatch" for this). Last, it is important to understand that most causes
+ of failures are the requests themselves and that retrying a request causing a
+ server to misbehave will often make the situation even worse for this server,
+ or for the whole service in case of redispatch.
+
+ Unless you know exactly how the application deals with replayed requests, you
+ should not use this directive.
+
+ The default is "conn-failure".
+
+ Example:
+ retry-on 503 504
+
+ See also: "retries", "option redispatch", "tune.bufsize"
+
+server <name> <address>[:[port]] [param*]
+ Declare a server in a backend
+
+ May be used in the following contexts: tcp, http, log
+
+ May be used in sections : defaults | frontend | listen | backend
+ no | no | yes | yes
+
+ Arguments :
+ <name> is the internal name assigned to this server. This name will
+ appear in logs and alerts. If "http-send-name-header" is
+ set, it will be added to the request header sent to the server.
+
+ <address> is the IPv4 or IPv6 address of the server. Alternatively, a
+ resolvable hostname is supported, but this name will be resolved
+ during start-up. Address "0.0.0.0" or "*" has a special meaning.
+ It indicates that the connection will be forwarded to the same IP
+ address as the one from the client connection. This is useful in
+ transparent proxy architectures where the client's connection is
+ intercepted and HAProxy must forward to the original destination
+ address. This is more or less what the "transparent" keyword does
+ except that with a server it's possible to limit concurrency and
+ to report statistics. Optionally, an address family prefix may be
+ used before the address to force the family regardless of the
+ address format, which can be useful to specify a path to a unix
+ socket with no slash ('/'). Currently supported prefixes are :
+ - 'ipv4@' -> address is always IPv4
+ - 'ipv6@' -> address is always IPv6
+ - 'unix@' -> address is a path to a local unix socket
+ - 'abns@' -> address is in abstract namespace (Linux only)
+ - 'sockpair@' -> address is the FD of a connected unix
+ socket or of a socketpair. During a connection, the
+ backend creates a pair of connected sockets, and passes
+ one of them over the FD. The bind part will use the
+ received socket as the client FD. Should be used
+ carefully.
+ - 'rhttp@' [ EXPERIMENTAL ] -> custom address family for a
+ passive server in HTTP reverse context. This is an
+ experimental features which requires
+ "expose-experimental-directives" on a line before this
+ server.
+ You may want to reference some environment variables in the
+ address parameter, see section 2.3 about environment
+ variables. The "init-addr" setting can be used to modify the way
+ IP addresses should be resolved upon startup.
+
+ <port> is an optional port specification. If set, all connections will
+ be sent to this port. If unset, the same port the client
+ connected to will be used. The port may also be prefixed by a "+"
+ or a "-". In this case, the server's port will be determined by
+ adding this value to the client's port.
+
+ <param*> is a list of parameters for this server. The "server" keywords
+ accepts an important number of options and has a complete section
+ dedicated to it. Please refer to section 5 for more details.
+
+ Examples :
+ server first 10.1.1.1:1080 cookie first check inter 1000
+ server second 10.1.1.2:1080 cookie second check inter 1000
+ server transp ipv4@
+ server backup "${SRV_BACKUP}:1080" backup
+ server www1_dc1 "${LAN_DC1}.101:80"
+ server www1_dc2 "${LAN_DC2}.101:80"
+
+ Note: regarding Linux's abstract namespace sockets, HAProxy uses the whole
+ sun_path length is used for the address length. Some other programs
+ such as socat use the string length only by default. Pass the option
+ ",unix-tightsocklen=0" to any abstract socket definition in socat to
+ make it compatible with HAProxy's.
+
+ See also: "default-server", "http-send-name-header" and section 5 about
+ server options
+
+server-state-file-name [ { use-backend-name | <file> } ]
+ Set the server state file to read, load and apply to servers available in
+ this backend.
+
+ May be used in the following contexts: tcp, http, log
+
+ May be used in sections: defaults | frontend | listen | backend
+ no | no | yes | yes
+
+ It only applies when the directive "load-server-state-from-file" is set to
+ "local". When <file> is not provided, if "use-backend-name" is used or if
+ this directive is not set, then backend name is used. If <file> starts with a
+ slash '/', then it is considered as an absolute path. Otherwise, <file> is
+ concatenated to the global directive "server-state-base".
+
+ Example: the minimal configuration below would make HAProxy look for the
+ state server file '/etc/haproxy/states/bk':
+
+ global
+ server-state-file-base /etc/haproxy/states
+
+ backend bk
+ load-server-state-from-file
+
+ See also: "server-state-base", "load-server-state-from-file", and
+ "show servers state"
+
+server-template <prefix> <num | range> <fqdn>[:<port>] [params*]
+ Set a template to initialize servers with shared parameters.
+ The names of these servers are built from <prefix> and <num | range> parameters.
+
+ May be used in the following contexts: tcp, http, log
+
+ May be used in sections : defaults | frontend | listen | backend
+ no | no | yes | yes
+
+ Arguments:
+ <prefix> A prefix for the server names to be built.
+
+ <num | range>
+ If <num> is provided, this template initializes <num> servers
+ with 1 up to <num> as server name suffixes. A range of numbers
+ <num_low>-<num_high> may also be used to use <num_low> up to
+ <num_high> as server name suffixes.
+
+ <fqdn> A FQDN for all the servers this template initializes.
+
+ <port> Same meaning as "server" <port> argument (see "server" keyword).
+
+ <params*>
+ Remaining server parameters among all those supported by "server"
+ keyword.
+
+ Examples:
+ # Initializes 3 servers with srv1, srv2 and srv3 as names,
+ # google.com as FQDN, and health-check enabled.
+ server-template srv 1-3 google.com:80 check
+
+ # or
+ server-template srv 3 google.com:80 check
+
+ # would be equivalent to:
+ server srv1 google.com:80 check
+ server srv2 google.com:80 check
+ server srv3 google.com:80 check
+
+
+
+source <addr>[:<port>] [usesrc { <addr2>[:<port2>] | client | clientip } ]
+source <addr>[:<port>] [usesrc { <addr2>[:<port2>] | hdr_ip(<hdr>[,<occ>]) } ]
+source <addr>[:<port>] [interface <name>]
+ Set the source address for outgoing connections
+
+ May be used in the following contexts: tcp, http
+
+ May be used in sections : defaults | frontend | listen | backend
+ yes | no | yes | yes
+
+ Arguments :
+ <addr> is the IPv4 address HAProxy will bind to before connecting to a
+ server. This address is also used as a source for health checks.
+
+ The default value of 0.0.0.0 means that the system will select
+ the most appropriate address to reach its destination. Optionally
+ an address family prefix may be used before the address to force
+ the family regardless of the address format, which can be useful
+ to specify a path to a unix socket with no slash ('/'). Currently
+ supported prefixes are :
+ - 'ipv4@' -> address is always IPv4
+ - 'ipv6@' -> address is always IPv6
+ - 'unix@' -> address is a path to a local unix socket
+ - 'abns@' -> address is in abstract namespace (Linux only)
+ You may want to reference some environment variables in the
+ address parameter, see section 2.3 about environment variables.
+
+ <port> is an optional port. It is normally not needed but may be useful
+ in some very specific contexts. The default value of zero means
+ the system will select a free port. Note that port ranges are not
+ supported in the backend. If you want to force port ranges, you
+ have to specify them on each "server" line.
+
+ <addr2> is the IP address to present to the server when connections are
+ forwarded in full transparent proxy mode. This is currently only
+ supported on some patched Linux kernels. When this address is
+ specified, clients connecting to the server will be presented
+ with this address, while health checks will still use the address
+ <addr>.
+
+ <port2> is the optional port to present to the server when connections
+ are forwarded in full transparent proxy mode (see <addr2> above).
+ The default value of zero means the system will select a free
+ port.
+
+ <hdr> is the name of a HTTP header in which to fetch the IP to bind to.
+ This is the name of a comma-separated header list which can
+ contain multiple IP addresses. By default, the last occurrence is
+ used. This is designed to work with the X-Forwarded-For header
+ and to automatically bind to the client's IP address as seen
+ by previous proxy, typically Stunnel. In order to use another
+ occurrence from the last one, please see the <occ> parameter
+ below. When the header (or occurrence) is not found, no binding
+ is performed so that the proxy's default IP address is used. Also
+ keep in mind that the header name is case insensitive, as for any
+ HTTP header.
+
+ <occ> is the occurrence number of a value to be used in a multi-value
+ header. This is to be used in conjunction with "hdr_ip(<hdr>)",
+ in order to specify which occurrence to use for the source IP
+ address. Positive values indicate a position from the first
+ occurrence, 1 being the first one. Negative values indicate
+ positions relative to the last one, -1 being the last one. This
+ is helpful for situations where an X-Forwarded-For header is set
+ at the entry point of an infrastructure and must be used several
+ proxy layers away. When this value is not specified, -1 is
+ assumed. Passing a zero here disables the feature.
+
+ <name> is an optional interface name to which to bind to for outgoing
+ traffic. On systems supporting this features (currently, only
+ Linux), this allows one to bind all traffic to the server to
+ this interface even if it is not the one the system would select
+ based on routing tables. This should be used with extreme care.
+ Note that using this option requires root privileges.
+
+ The "source" keyword is useful in complex environments where a specific
+ address only is allowed to connect to the servers. It may be needed when a
+ private address must be used through a public gateway for instance, and it is
+ known that the system cannot determine the adequate source address by itself.
+
+ An extension which is available on certain patched Linux kernels may be used
+ through the "usesrc" optional keyword. It makes it possible to connect to the
+ servers with an IP address which does not belong to the system itself. This
+ is called "full transparent proxy mode". For this to work, the destination
+ servers have to route their traffic back to this address through the machine
+ running HAProxy, and IP forwarding must generally be enabled on this machine.
+
+ In this "full transparent proxy" mode, it is possible to force a specific IP
+ address to be presented to the servers. This is not much used in fact. A more
+ common use is to tell HAProxy to present the client's IP address. For this,
+ there are two methods :
+
+ - present the client's IP and port addresses. This is the most transparent
+ mode, but it can cause problems when IP connection tracking is enabled on
+ the machine, because a same connection may be seen twice with different
+ states. However, this solution presents the huge advantage of not
+ limiting the system to the 64k outgoing address+port couples, because all
+ of the client ranges may be used.
+
+ - present only the client's IP address and select a spare port. This
+ solution is still quite elegant but slightly less transparent (downstream
+ firewalls logs will not match upstream's). It also presents the downside
+ of limiting the number of concurrent connections to the usual 64k ports.
+ However, since the upstream and downstream ports are different, local IP
+ connection tracking on the machine will not be upset by the reuse of the
+ same session.
+
+ This option sets the default source for all servers in the backend. It may
+ also be specified in a "defaults" section. Finer source address specification
+ is possible at the server level using the "source" server option. Refer to
+ section 5 for more information.
+
+ In order to work, "usesrc" requires root privileges, or on supported systems,
+ the "cap_net_raw" capability. See also the "setcap" global directive.
+
+ Examples :
+ backend private
+ # Connect to the servers using our 192.168.1.200 source address
+ source 192.168.1.200
+
+ backend transparent_ssl1
+ # Connect to the SSL farm from the client's source address
+ source 192.168.1.200 usesrc clientip
+
+ backend transparent_ssl2
+ # Connect to the SSL farm from the client's source address and port
+ # not recommended if IP conntrack is present on the local machine.
+ source 192.168.1.200 usesrc client
+
+ backend transparent_ssl3
+ # Connect to the SSL farm from the client's source address. It
+ # is more conntrack-friendly.
+ source 192.168.1.200 usesrc clientip
+
+ backend transparent_smtp
+ # Connect to the SMTP farm from the client's source address/port
+ # with Tproxy version 4.
+ source 0.0.0.0 usesrc clientip
+
+ backend transparent_http
+ # Connect to the servers using the client's IP as seen by previous
+ # proxy.
+ source 0.0.0.0 usesrc hdr_ip(x-forwarded-for,-1)
+
+ See also : the "source" server option in section 5, the Tproxy patches for
+ the Linux kernel on www.balabit.com, the "bind" keyword.
+
+
+srvtcpka-cnt <count>
+ Sets the maximum number of keepalive probes TCP should send before dropping
+ the connection on the server side.
+
+ May be used in the following contexts: tcp, http, log
+
+ May be used in sections : defaults | frontend | listen | backend
+ yes | no | yes | yes
+
+ Arguments :
+ <count> is the maximum number of keepalive probes.
+
+ This keyword corresponds to the socket option TCP_KEEPCNT. If this keyword
+ is not specified, system-wide TCP parameter (tcp_keepalive_probes) is used.
+ The availability of this setting depends on the operating system. It is
+ known to work on Linux.
+
+ See also : "option srvtcpka", "srvtcpka-idle", "srvtcpka-intvl".
+
+
+srvtcpka-idle <timeout>
+ Sets the time the connection needs to remain idle before TCP starts sending
+ keepalive probes, if enabled the sending of TCP keepalive packets on the
+ server side.
+
+ May be used in the following contexts: tcp, http, log
+
+ May be used in sections : defaults | frontend | listen | backend
+ yes | no | yes | yes
+
+ Arguments :
+ <timeout> is the time the connection needs to remain idle before TCP starts
+ sending keepalive probes. It is specified in seconds by default,
+ but can be in any other unit if the number is suffixed by the
+ unit, as explained at the top of this document.
+
+ This keyword corresponds to the socket option TCP_KEEPIDLE. If this keyword
+ is not specified, system-wide TCP parameter (tcp_keepalive_time) is used.
+ The availability of this setting depends on the operating system. It is
+ known to work on Linux.
+
+ See also : "option srvtcpka", "srvtcpka-cnt", "srvtcpka-intvl".
+
+
+srvtcpka-intvl <timeout>
+ Sets the time between individual keepalive probes on the server side.
+
+ May be used in the following contexts: tcp, http, log
+
+ May be used in sections : defaults | frontend | listen | backend
+ yes | no | yes | yes
+
+ Arguments :
+ <timeout> is the time between individual keepalive probes. It is specified
+ in seconds by default, but can be in any other unit if the number
+ is suffixed by the unit, as explained at the top of this
+ document.
+
+ This keyword corresponds to the socket option TCP_KEEPINTVL. If this keyword
+ is not specified, system-wide TCP parameter (tcp_keepalive_intvl) is used.
+ The availability of this setting depends on the operating system. It is
+ known to work on Linux.
+
+ See also : "option srvtcpka", "srvtcpka-cnt", "srvtcpka-idle".
+
+
+stats admin { if | unless } <cond>
+ Enable statistics admin level if/unless a condition is matched
+
+ May be used in the following contexts: http
+
+ May be used in sections : defaults | frontend | listen | backend
+ no | yes | yes | yes
+
+ This statement enables the statistics admin level if/unless a condition is
+ matched.
+
+ The admin level allows to enable/disable servers from the web interface. By
+ default, statistics page is read-only for security reasons.
+
+ Currently, the POST request is limited to the buffer size minus the reserved
+ buffer space, which means that if the list of servers is too long, the
+ request won't be processed. It is recommended to alter few servers at a
+ time.
+
+ Example :
+ # statistics admin level only for localhost
+ backend stats_localhost
+ stats enable
+ stats admin if LOCALHOST
+
+ Example :
+ # statistics admin level always enabled because of the authentication
+ backend stats_auth
+ stats enable
+ stats auth admin:AdMiN123
+ stats admin if TRUE
+
+ Example :
+ # statistics admin level depends on the authenticated user
+ userlist stats-auth
+ group admin users admin
+ user admin insecure-password AdMiN123
+ group readonly users haproxy
+ user haproxy insecure-password haproxy
+
+ backend stats_auth
+ stats enable
+ acl AUTH http_auth(stats-auth)
+ acl AUTH_ADMIN http_auth_group(stats-auth) admin
+ stats http-request auth unless AUTH
+ stats admin if AUTH_ADMIN
+
+ See also : "stats enable", "stats auth", "stats http-request", section 3.4
+ about userlists and section 7 about ACL usage.
+
+
+stats auth <user>:<passwd>
+ Enable statistics with authentication and grant access to an account
+
+ May be used in the following contexts: http
+
+ May be used in sections : defaults | frontend | listen | backend
+ yes | yes | yes | yes
+
+ Arguments :
+ <user> is a user name to grant access to
+
+ <passwd> is the cleartext password associated to this user
+
+ This statement enables statistics with default settings, and restricts access
+ to declared users only. It may be repeated as many times as necessary to
+ allow as many users as desired. When a user tries to access the statistics
+ without a valid account, a "401 Forbidden" response will be returned so that
+ the browser asks the user to provide a valid user and password. The real
+ which will be returned to the browser is configurable using "stats realm".
+
+ Since the authentication method is HTTP Basic Authentication, the passwords
+ circulate in cleartext on the network. Thus, it was decided that the
+ configuration file would also use cleartext passwords to remind the users
+ that those ones should not be sensitive and not shared with any other account.
+
+ It is also possible to reduce the scope of the proxies which appear in the
+ report using "stats scope".
+
+ Though this statement alone is enough to enable statistics reporting, it is
+ recommended to set all other settings in order to avoid relying on default
+ unobvious parameters.
+
+ Example :
+ # public access (limited to this backend only)
+ backend public_www
+ server srv1 192.168.0.1:80
+ stats enable
+ stats hide-version
+ stats scope .
+ stats uri /admin?stats
+ stats realm HAProxy\ Statistics
+ stats auth admin1:AdMiN123
+ stats auth admin2:AdMiN321
+
+ # internal monitoring access (unlimited)
+ backend private_monitoring
+ stats enable
+ stats uri /admin?stats
+ stats refresh 5s
+
+ See also : "stats enable", "stats realm", "stats scope", "stats uri"
+
+
+stats enable
+ Enable statistics reporting with default settings
+
+ May be used in the following contexts: http
+
+ May be used in sections : defaults | frontend | listen | backend
+ yes | yes | yes | yes
+
+ Arguments : none
+
+ This statement enables statistics reporting with default settings defined
+ at build time. Unless stated otherwise, these settings are used :
+ - stats uri : /haproxy?stats
+ - stats realm : "HAProxy Statistics"
+ - stats auth : no authentication
+ - stats scope : no restriction
+
+ Though this statement alone is enough to enable statistics reporting, it is
+ recommended to set all other settings in order to avoid relying on default
+ unobvious parameters.
+
+ Example :
+ # public access (limited to this backend only)
+ backend public_www
+ server srv1 192.168.0.1:80
+ stats enable
+ stats hide-version
+ stats scope .
+ stats uri /admin?stats
+ stats realm HAProxy\ Statistics
+ stats auth admin1:AdMiN123
+ stats auth admin2:AdMiN321
+
+ # internal monitoring access (unlimited)
+ backend private_monitoring
+ stats enable
+ stats uri /admin?stats
+ stats refresh 5s
+
+ See also : "stats auth", "stats realm", "stats uri"
+
+
+stats hide-version
+ Enable statistics and hide HAProxy version reporting
+
+ May be used in the following contexts: http
+
+ May be used in sections : defaults | frontend | listen | backend
+ yes | yes | yes | yes
+
+ Arguments : none
+
+ By default, the stats page reports some useful status information along with
+ the statistics. Among them is HAProxy's version. However, it is generally
+ considered dangerous to report precise version to anyone, as it can help them
+ target known weaknesses with specific attacks. The "stats hide-version"
+ statement removes the version from the statistics report. This is recommended
+ for public sites or any site with a weak login/password.
+
+ Though this statement alone is enough to enable statistics reporting, it is
+ recommended to set all other settings in order to avoid relying on default
+ unobvious parameters.
+
+ Example :
+ # public access (limited to this backend only)
+ backend public_www
+ server srv1 192.168.0.1:80
+ stats enable
+ stats hide-version
+ stats scope .
+ stats uri /admin?stats
+ stats realm HAProxy\ Statistics
+ stats auth admin1:AdMiN123
+ stats auth admin2:AdMiN321
+
+ # internal monitoring access (unlimited)
+ backend private_monitoring
+ stats enable
+ stats uri /admin?stats
+ stats refresh 5s
+
+ See also : "stats auth", "stats enable", "stats realm", "stats uri"
+
+
+stats http-request { allow | deny | auth [realm <realm>] }
+ [ { if | unless } <condition> ]
+ Access control for statistics
+
+ May be used in the following contexts: http
+
+ May be used in sections: defaults | frontend | listen | backend
+ no | no | yes | yes
+
+ As "http-request", these set of options allow to fine control access to
+ statistics. Each option may be followed by if/unless and acl.
+ First option with matched condition (or option without condition) is final.
+ For "deny" a 403 error will be returned, for "allow" normal processing is
+ performed, for "auth" a 401/407 error code is returned so the client
+ should be asked to enter a username and password.
+
+ There is no fixed limit to the number of http-request statements per
+ instance.
+
+ See also : "http-request", section 3.4 about userlists and section 7
+ about ACL usage.
+
+
+stats realm <realm>
+ Enable statistics and set authentication realm
+
+ May be used in the following contexts: http
+
+ May be used in sections : defaults | frontend | listen | backend
+ yes | yes | yes | yes
+
+ Arguments :
+ <realm> is the name of the HTTP Basic Authentication realm reported to
+ the browser. The browser uses it to display it in the pop-up
+ inviting the user to enter a valid username and password.
+
+ The realm is read as a single word, so any spaces in it should be escaped
+ using a backslash ('\').
+
+ This statement is useful only in conjunction with "stats auth" since it is
+ only related to authentication.
+
+ Though this statement alone is enough to enable statistics reporting, it is
+ recommended to set all other settings in order to avoid relying on default
+ unobvious parameters.
+
+ Example :
+ # public access (limited to this backend only)
+ backend public_www
+ server srv1 192.168.0.1:80
+ stats enable
+ stats hide-version
+ stats scope .
+ stats uri /admin?stats
+ stats realm HAProxy\ Statistics
+ stats auth admin1:AdMiN123
+ stats auth admin2:AdMiN321
+
+ # internal monitoring access (unlimited)
+ backend private_monitoring
+ stats enable
+ stats uri /admin?stats
+ stats refresh 5s
+
+ See also : "stats auth", "stats enable", "stats uri"
+
+
+stats refresh <delay>
+ Enable statistics with automatic refresh
+
+ May be used in the following contexts: http
+
+ May be used in sections : defaults | frontend | listen | backend
+ yes | yes | yes | yes
+
+ Arguments :
+ <delay> is the suggested refresh delay, specified in seconds, which will
+ be returned to the browser consulting the report page. While the
+ browser is free to apply any delay, it will generally respect it
+ and refresh the page this every seconds. The refresh interval may
+ be specified in any other non-default time unit, by suffixing the
+ unit after the value, as explained at the top of this document.
+
+ This statement is useful on monitoring displays with a permanent page
+ reporting the load balancer's activity. When set, the HTML report page will
+ include a link "refresh"/"stop refresh" so that the user can select whether
+ they want automatic refresh of the page or not.
+
+ Though this statement alone is enough to enable statistics reporting, it is
+ recommended to set all other settings in order to avoid relying on default
+ unobvious parameters.
+
+ Example :
+ # public access (limited to this backend only)
+ backend public_www
+ server srv1 192.168.0.1:80
+ stats enable
+ stats hide-version
+ stats scope .
+ stats uri /admin?stats
+ stats realm HAProxy\ Statistics
+ stats auth admin1:AdMiN123
+ stats auth admin2:AdMiN321
+
+ # internal monitoring access (unlimited)
+ backend private_monitoring
+ stats enable
+ stats uri /admin?stats
+ stats refresh 5s
+
+ See also : "stats auth", "stats enable", "stats realm", "stats uri"
+
+
+stats scope { <name> | "." }
+ Enable statistics and limit access scope
+
+ May be used in the following contexts: http
+
+ May be used in sections : defaults | frontend | listen | backend
+ yes | yes | yes | yes
+
+ Arguments :
+ <name> is the name of a listen, frontend or backend section to be
+ reported. The special name "." (a single dot) designates the
+ section in which the statement appears.
+
+ When this statement is specified, only the sections enumerated with this
+ statement will appear in the report. All other ones will be hidden. This
+ statement may appear as many times as needed if multiple sections need to be
+ reported. Please note that the name checking is performed as simple string
+ comparisons, and that it is never checked that a give section name really
+ exists.
+
+ Though this statement alone is enough to enable statistics reporting, it is
+ recommended to set all other settings in order to avoid relying on default
+ unobvious parameters.
+
+ Example :
+ # public access (limited to this backend only)
+ backend public_www
+ server srv1 192.168.0.1:80
+ stats enable
+ stats hide-version
+ stats scope .
+ stats uri /admin?stats
+ stats realm HAProxy\ Statistics
+ stats auth admin1:AdMiN123
+ stats auth admin2:AdMiN321
+
+ # internal monitoring access (unlimited)
+ backend private_monitoring
+ stats enable
+ stats uri /admin?stats
+ stats refresh 5s
+
+ See also : "stats auth", "stats enable", "stats realm", "stats uri"
+
+
+stats show-desc [ <desc> ]
+ Enable reporting of a description on the statistics page.
+
+ May be used in the following contexts: http
+
+ May be used in sections : defaults | frontend | listen | backend
+ yes | yes | yes | yes
+
+ <desc> is an optional description to be reported. If unspecified, the
+ description from global section is automatically used instead.
+
+ This statement is useful for users that offer shared services to their
+ customers, where node or description should be different for each customer.
+
+ Though this statement alone is enough to enable statistics reporting, it is
+ recommended to set all other settings in order to avoid relying on default
+ unobvious parameters. By default description is not shown.
+
+ Example :
+ # internal monitoring access (unlimited)
+ backend private_monitoring
+ stats enable
+ stats show-desc Master node for Europe, Asia, Africa
+ stats uri /admin?stats
+ stats refresh 5s
+
+ See also: "show-node", "stats enable", "stats uri" and "description" in
+ global section.
+
+
+stats show-legends
+ Enable reporting additional information on the statistics page
+
+ May be used in the following contexts: http
+
+ May be used in sections : defaults | frontend | listen | backend
+ yes | yes | yes | yes
+
+ Arguments : none
+
+ Enable reporting additional information on the statistics page :
+ - cap: capabilities (proxy)
+ - mode: one of tcp, http or health (proxy)
+ - id: SNMP ID (proxy, socket, server)
+ - IP (socket, server)
+ - cookie (backend, server)
+
+ Though this statement alone is enough to enable statistics reporting, it is
+ recommended to set all other settings in order to avoid relying on default
+ unobvious parameters. Default behavior is not to show this information.
+
+ See also: "stats enable", "stats uri".
+
+
+stats show-modules
+ Enable display of extra statistics module on the statistics page
+
+ May be used in the following contexts: http
+
+ May be used in sections : defaults | frontend | listen | backend
+ yes | yes | yes | yes
+
+ Arguments : none
+
+ New columns are added at the end of the line containing the extra statistics
+ values as a tooltip.
+
+ Though this statement alone is enough to enable statistics reporting, it is
+ recommended to set all other settings in order to avoid relying on default
+ unobvious parameters. Default behavior is not to show this information.
+
+ See also: "stats enable", "stats uri".
+
+
+stats show-node [ <name> ]
+ Enable reporting of a host name on the statistics page.
+
+ May be used in the following contexts: http
+
+ May be used in sections : defaults | frontend | listen | backend
+ yes | yes | yes | yes
+
+ Arguments:
+ <name> is an optional name to be reported. If unspecified, the
+ node name from global section is automatically used instead.
+
+ This statement is useful for users that offer shared services to their
+ customers, where node or description might be different on a stats page
+ provided for each customer. Default behavior is not to show host name.
+
+ Though this statement alone is enough to enable statistics reporting, it is
+ recommended to set all other settings in order to avoid relying on default
+ unobvious parameters.
+
+ Example:
+ # internal monitoring access (unlimited)
+ backend private_monitoring
+ stats enable
+ stats show-node Europe-1
+ stats uri /admin?stats
+ stats refresh 5s
+
+ See also: "show-desc", "stats enable", "stats uri", and "node" in global
+ section.
+
+
+stats uri <prefix>
+ Enable statistics and define the URI prefix to access them
+
+ May be used in the following contexts: http
+
+ May be used in sections : defaults | frontend | listen | backend
+ yes | yes | yes | yes
+
+ Arguments :
+ <prefix> is the prefix of any URI which will be redirected to stats. This
+ prefix may contain a question mark ('?') to indicate part of a
+ query string.
+
+ The statistics URI is intercepted on the relayed traffic, so it appears as a
+ page within the normal application. It is strongly advised to ensure that the
+ selected URI will never appear in the application, otherwise it will never be
+ possible to reach it in the application.
+
+ The default URI compiled in HAProxy is "/haproxy?stats", but this may be
+ changed at build time, so it's better to always explicitly specify it here.
+ It is generally a good idea to include a question mark in the URI so that
+ intermediate proxies refrain from caching the results. Also, since any string
+ beginning with the prefix will be accepted as a stats request, the question
+ mark helps ensuring that no valid URI will begin with the same words.
+
+ It is sometimes very convenient to use "/" as the URI prefix, and put that
+ statement in a "listen" instance of its own. That makes it easy to dedicate
+ an address or a port to statistics only.
+
+ Though this statement alone is enough to enable statistics reporting, it is
+ recommended to set all other settings in order to avoid relying on default
+ unobvious parameters.
+
+ Example :
+ # public access (limited to this backend only)
+ backend public_www
+ server srv1 192.168.0.1:80
+ stats enable
+ stats hide-version
+ stats scope .
+ stats uri /admin?stats
+ stats realm HAProxy\ Statistics
+ stats auth admin1:AdMiN123
+ stats auth admin2:AdMiN321
+
+ # internal monitoring access (unlimited)
+ backend private_monitoring
+ stats enable
+ stats uri /admin?stats
+ stats refresh 5s
+
+ See also : "stats auth", "stats enable", "stats realm"
+
+
+stick match <pattern> [table <table>] [{if | unless} <cond>]
+ Define a request pattern matching condition to stick a user to a server
+
+ May be used in the following contexts: tcp, http
+
+ May be used in sections : defaults | frontend | listen | backend
+ no | no | yes | yes
+
+ Arguments :
+ <pattern> is a sample expression rule as described in section 7.3. It
+ describes what elements of the incoming request or connection
+ will be analyzed in the hope to find a matching entry in a
+ stickiness table. This rule is mandatory.
+
+ <table> is an optional stickiness table name. If unspecified, the same
+ backend's table is used. A stickiness table is declared using
+ the "stick-table" statement.
+
+ <cond> is an optional matching condition. It makes it possible to match
+ on a certain criterion only when other conditions are met (or
+ not met). For instance, it could be used to match on a source IP
+ address except when a request passes through a known proxy, in
+ which case we'd match on a header containing that IP address.
+
+ Some protocols or applications require complex stickiness rules and cannot
+ always simply rely on cookies nor hashing. The "stick match" statement
+ describes a rule to extract the stickiness criterion from an incoming request
+ or connection. See section 7 for a complete list of possible patterns and
+ transformation rules.
+
+ The table has to be declared using the "stick-table" statement. It must be of
+ a type compatible with the pattern. By default it is the one which is present
+ in the same backend. It is possible to share a table with other backends by
+ referencing it using the "table" keyword. If another table is referenced,
+ the server's ID inside the backends are used. By default, all server IDs
+ start at 1 in each backend, so the server ordering is enough. But in case of
+ doubt, it is highly recommended to force server IDs using their "id" setting.
+
+ It is possible to restrict the conditions where a "stick match" statement
+ will apply, using "if" or "unless" followed by a condition. See section 7 for
+ ACL based conditions.
+
+ There is no limit on the number of "stick match" statements. The first that
+ applies and matches will cause the request to be directed to the same server
+ as was used for the request which created the entry. That way, multiple
+ matches can be used as fallbacks.
+
+ The stick rules are checked after the persistence cookies, so they will not
+ affect stickiness if a cookie has already been used to select a server. That
+ way, it becomes very easy to insert cookies and match on IP addresses in
+ order to maintain stickiness between HTTP and HTTPS.
+
+ Example :
+ # forward SMTP users to the same server they just used for POP in the
+ # last 30 minutes
+ backend pop
+ mode tcp
+ balance roundrobin
+ stick store-request src
+ stick-table type ip size 200k expire 30m
+ server s1 192.168.1.1:110
+ server s2 192.168.1.1:110
+
+ backend smtp
+ mode tcp
+ balance roundrobin
+ stick match src table pop
+ server s1 192.168.1.1:25
+ server s2 192.168.1.1:25
+
+ See also : "stick-table", "stick on", and section 7 about ACLs and samples
+ fetching.
+
+
+stick on <pattern> [table <table>] [{if | unless} <condition>]
+ Define a request pattern to associate a user to a server
+
+ May be used in the following contexts: tcp, http
+
+ May be used in sections : defaults | frontend | listen | backend
+ no | no | yes | yes
+
+ Note : This form is exactly equivalent to "stick match" followed by
+ "stick store-request", all with the same arguments. Please refer
+ to both keywords for details. It is only provided as a convenience
+ for writing more maintainable configurations.
+
+ Examples :
+ # The following form ...
+ stick on src table pop if !localhost
+
+ # ...is strictly equivalent to this one :
+ stick match src table pop if !localhost
+ stick store-request src table pop if !localhost
+
+
+ # Use cookie persistence for HTTP, and stick on source address for HTTPS as
+ # well as HTTP without cookie. Share the same table between both accesses.
+ backend http
+ mode http
+ balance roundrobin
+ stick on src table https
+ cookie SRV insert indirect nocache
+ server s1 192.168.1.1:80 cookie s1
+ server s2 192.168.1.1:80 cookie s2
+
+ backend https
+ mode tcp
+ balance roundrobin
+ stick-table type ip size 200k expire 30m
+ stick on src
+ server s1 192.168.1.1:443
+ server s2 192.168.1.1:443
+
+ See also : "stick match", "stick store-request".
+
+
+stick store-request <pattern> [table <table>] [{if | unless} <condition>]
+ Define a request pattern used to create an entry in a stickiness table
+
+ May be used in the following contexts: tcp, http
+
+ May be used in sections : defaults | frontend | listen | backend
+ no | no | yes | yes
+
+ Arguments :
+ <pattern> is a sample expression rule as described in section 7.3. It
+ describes what elements of the incoming request or connection
+ will be analyzed, extracted and stored in the table once a
+ server is selected.
+
+ <table> is an optional stickiness table name. If unspecified, the same
+ backend's table is used. A stickiness table is declared using
+ the "stick-table" statement.
+
+ <cond> is an optional storage condition. It makes it possible to store
+ certain criteria only when some conditions are met (or not met).
+ For instance, it could be used to store the source IP address
+ except when the request passes through a known proxy, in which
+ case we'd store a converted form of a header containing that IP
+ address.
+
+ Some protocols or applications require complex stickiness rules and cannot
+ always simply rely on cookies nor hashing. The "stick store-request" statement
+ describes a rule to decide what to extract from the request and when to do
+ it, in order to store it into a stickiness table for further requests to
+ match it using the "stick match" statement. Obviously the extracted part must
+ make sense and have a chance to be matched in a further request. Storing a
+ client's IP address for instance often makes sense. Storing an ID found in a
+ URL parameter also makes sense. Storing a source port will almost never make
+ any sense because it will be randomly matched. See section 7 for a complete
+ list of possible patterns and transformation rules.
+
+ The table has to be declared using the "stick-table" statement. It must be of
+ a type compatible with the pattern. By default it is the one which is present
+ in the same backend. It is possible to share a table with other backends by
+ referencing it using the "table" keyword. If another table is referenced,
+ the server's ID inside the backends are used. By default, all server IDs
+ start at 1 in each backend, so the server ordering is enough. But in case of
+ doubt, it is highly recommended to force server IDs using their "id" setting.
+
+ It is possible to restrict the conditions where a "stick store-request"
+ statement will apply, using "if" or "unless" followed by a condition. This
+ condition will be evaluated while parsing the request, so any criteria can be
+ used. See section 7 for ACL based conditions.
+
+ There is no limit on the number of "stick store-request" statements, but
+ there is a limit of 8 simultaneous stores per request or response. This
+ makes it possible to store up to 8 criteria, all extracted from either the
+ request or the response, regardless of the number of rules. Only the 8 first
+ ones which match will be kept. Using this, it is possible to feed multiple
+ tables at once in the hope to increase the chance to recognize a user on
+ another protocol or access method. Using multiple store-request rules with
+ the same table is possible and may be used to find the best criterion to rely
+ on, by arranging the rules by decreasing preference order. Only the first
+ extracted criterion for a given table will be stored. All subsequent store-
+ request rules referencing the same table will be skipped and their ACLs will
+ not be evaluated.
+
+ The "store-request" rules are evaluated once the server connection has been
+ established, so that the table will contain the real server that processed
+ the request.
+
+ Example :
+ # forward SMTP users to the same server they just used for POP in the
+ # last 30 minutes
+ backend pop
+ mode tcp
+ balance roundrobin
+ stick store-request src
+ stick-table type ip size 200k expire 30m
+ server s1 192.168.1.1:110
+ server s2 192.168.1.1:110
+
+ backend smtp
+ mode tcp
+ balance roundrobin
+ stick match src table pop
+ server s1 192.168.1.1:25
+ server s2 192.168.1.1:25
+
+ See also : "stick-table", "stick on", about ACLs and sample fetching.
+
+
+stick-table type {ip | integer | string [len <length>] | binary [len <length>]}
+ size <size> [expire <expire>] [nopurge] [peers <peersect>] [srvkey <srvkey>]
+ [write-to <wtable>] [store <data_type>]*
+ Configure the stickiness table for the current section
+
+ May be used in the following contexts: tcp, http
+
+ May be used in sections : defaults | frontend | listen | backend
+ no | yes | yes | yes
+
+ Arguments :
+ ip a table declared with "type ip" will only store IPv4 addresses.
+ This form is very compact (about 50 bytes per entry) and allows
+ very fast entry lookup and stores with almost no overhead. This
+ is mainly used to store client source IP addresses.
+
+ ipv6 a table declared with "type ipv6" will only store IPv6 addresses.
+ This form is very compact (about 60 bytes per entry) and allows
+ very fast entry lookup and stores with almost no overhead. This
+ is mainly used to store client source IP addresses.
+
+ integer a table declared with "type integer" will store 32bit integers
+ which can represent a client identifier found in a request for
+ instance.
+
+ string a table declared with "type string" will store substrings of up
+ to <len> characters. If the string provided by the pattern
+ extractor is larger than <len>, it will be truncated before
+ being stored. During matching, at most <len> characters will be
+ compared between the string in the table and the extracted
+ pattern. When not specified, the string is automatically limited
+ to 32 characters.
+
+ binary a table declared with "type binary" will store binary blocks
+ of <len> bytes. If the block provided by the pattern
+ extractor is larger than <len>, it will be truncated before
+ being stored. If the block provided by the sample expression
+ is shorter than <len>, it will be padded by 0. When not
+ specified, the block is automatically limited to 32 bytes.
+
+ <length> is the maximum number of characters that will be stored in a
+ "string" type table (See type "string" above). Or the number
+ of bytes of the block in "binary" type table. Be careful when
+ changing this parameter as memory usage will proportionally
+ increase.
+
+ <size> is the maximum number of entries that can fit in the table. This
+ value directly impacts memory usage. Count approximately
+ 50 bytes per entry, plus the size of a string if any. The size
+ supports suffixes "k", "m", "g" for 2^10, 2^20 and 2^30 factors.
+
+ [nopurge] indicates that we refuse to purge older entries when the table
+ is full. When not specified and the table is full when HAProxy
+ wants to store an entry in it, it will flush a few of the oldest
+ entries in order to release some space for the new ones. This is
+ most often the desired behavior. In some specific cases, it
+ be desirable to refuse new entries instead of purging the older
+ ones. That may be the case when the amount of data to store is
+ far above the hardware limits and we prefer not to offer access
+ to new clients than to reject the ones already connected. When
+ using this parameter, be sure to properly set the "expire"
+ parameter (see below).
+
+ <peersect> is the name of the peers section to use for replication. Entries
+ which associate keys to server IDs are kept synchronized with
+ the remote peers declared in this section. All entries are also
+ automatically learned from the local peer (old process) during a
+ soft restart.
+
+ <wtable> is the name of the stick table where peers updates will be
+ written to in addition to the source table. <wtable> must be of
+ the same type as the table being defined and must have the same
+ key length, and source table cannot be used as a target table
+ itself. Every time an entry update will be received on the source
+ table through a peer, haproxy will try to refresh related
+ <wtable> entry. If the entry doesn't exist yet, it will be
+ created, else its values will be updated as well as its timer.
+ Note that only types that are not involved in arithmetic ops such
+ as server_id, server_key and gpt will be written to <wtable> to
+ prevent processed values from a remote table from interfering with
+ arithmetic operations performed on the local target table.
+ (ie: prevent shared cumulative counter from growing indefinitely)
+ One common use of this option is to be able to use sticking rules
+ (for server persistence) in a peers cluster setup, because
+ matching keys will be learned from remote tables.
+
+ <expire> defines the maximum duration of an entry in the table since it
+ was last created, refreshed using 'track-sc' or matched using
+ 'stick match' or 'stick on' rule. The expiration delay is
+ defined using the standard time format, similarly as the various
+ timeouts. The maximum duration is slightly above 24 days. See
+ section 2.5 for more information. If this delay is not specified,
+ the session won't automatically expire, but older entries will
+ be removed once full. Be sure not to use the "nopurge" parameter
+ if not expiration delay is specified.
+ Note: 'table_*' converters performs lookups but won't update touch
+ expire since they don't require 'track-sc'.
+
+ <srvkey> specifies how each server is identified for the purposes of the
+ stick table. The valid values are "name" and "addr". If "name" is
+ given, then <name> argument for the server (may be generated by
+ a template). If "addr" is given, then the server is identified
+ by its current network address, including the port. "addr" is
+ especially useful if you are using service discovery to generate
+ the addresses for servers with peered stick-tables and want
+ to consistently use the same host across peers for a stickiness
+ token.
+
+ <data_type> is used to store additional information in the stick-table. This
+ may be used by ACLs in order to control various criteria related
+ to the activity of the client matching the stick-table. For each
+ item specified here, the size of each entry will be inflated so
+ that the additional data can fit. Several data types may be
+ stored with an entry. Multiple data types may be specified after
+ the "store" keyword, as a comma-separated list. Alternatively,
+ it is possible to repeat the "store" keyword followed by one or
+ several data types. Except for the "server_id" type which is
+ automatically detected and enabled, all data types must be
+ explicitly declared to be stored. If an ACL references a data
+ type which is not stored, the ACL will simply not match. Some
+ data types require an argument which must be passed just after
+ the type between parenthesis. See below for the supported data
+ types and their arguments.
+
+ The data types that can be stored with an entry are the following :
+ - server_id : this is an integer which holds the numeric ID of the server a
+ request was assigned to. It is used by the "stick match", "stick store",
+ and "stick on" rules. It is automatically enabled when referenced.
+
+ - gpc(<nb>) : General Purpose Counters Array of <nb> elements. This is an
+ array of positive 32-bit integers which may be used to count anything.
+ Most of the time they will be used as a incremental counters on some
+ entries, for instance to note that a limit is reached and trigger some
+ actions. This array is limited to a maximum of 100 elements:
+ gpc0 to gpc99, to ensure that the build of a peer update
+ message can fit into the buffer. Users should take in consideration
+ that a large amount of counters will increase the data size and the
+ traffic load using peers protocol since all data/counters are pushed
+ each time any of them is updated.
+ This data_type will exclude the usage of the legacy data_types 'gpc0'
+ and 'gpc1' on the same table. Using the 'gpc' array data_type, all 'gpc0'
+ and 'gpc1' related fetches and actions will apply to the two first
+ elements of this array.
+
+ - gpc_rate(<nb>,<period>) : Array of increment rates of General Purpose
+ Counters over a period. Those elements are positive 32-bit integers which
+ may be used for anything. Just like <gpc>, the count events, but instead
+ of keeping a cumulative number, they maintain the rate at which the
+ counter is incremented. Most of the time it will be used to measure the
+ frequency of occurrence of certain events (e.g. requests to a specific
+ URL). This array is limited to a maximum of 100 elements: gpt(100)
+ allowing the storage of gpc0 to gpc99, to ensure that the build of a peer
+ update message can fit into the buffer.
+ The array cannot contain less than 1 element: use gpc(1) if you want to
+ store only the counter gpc0.
+ Users should take in consideration that a large amount of
+ counters will increase the data size and the traffic load using peers
+ protocol since all data/counters are pushed each time any of them is
+ updated.
+ This data_type will exclude the usage of the legacy data_types
+ 'gpc0_rate' and 'gpc1_rate' on the same table. Using the 'gpc_rate'
+ array data_type, all 'gpc0' and 'gpc1' related fetches and actions
+ will apply to the two first elements of this array.
+
+ - gpc0 : first General Purpose Counter. It is a positive 32-bit integer
+ integer which may be used for anything. Most of the time it will be used
+ to put a special tag on some entries, for instance to note that a
+ specific behavior was detected and must be known for future matches.
+
+ - gpc0_rate(<period>) : increment rate of the first General Purpose Counter
+ over a period. It is a positive 32-bit integer integer which may be used
+ for anything. Just like <gpc0>, it counts events, but instead of keeping
+ a cumulative number, it maintains the rate at which the counter is
+ incremented. Most of the time it will be used to measure the frequency of
+ occurrence of certain events (e.g. requests to a specific URL).
+
+ - gpc1 : second General Purpose Counter. It is a positive 32-bit integer
+ integer which may be used for anything. Most of the time it will be used
+ to put a special tag on some entries, for instance to note that a
+ specific behavior was detected and must be known for future matches.
+
+ - gpc1_rate(<period>) : increment rate of the second General Purpose Counter
+ over a period. It is a positive 32-bit integer integer which may be used
+ for anything. Just like <gpc1>, it counts events, but instead of keeping
+ a cumulative number, it maintains the rate at which the counter is
+ incremented. Most of the time it will be used to measure the frequency of
+ occurrence of certain events (e.g. requests to a specific URL).
+
+ - gpt(<nb>) : General Purpose Tags Array of <nb> elements. This is an array
+ of positive 32-bit integers which may be used for anything.
+ Most of the time they will be used to put a special tags on some entries,
+ for instance to note that a specific behavior was detected and must be
+ known for future matches. This array is limited to a maximum of 100
+ elements: gpt(100) allowing the storage of gpt0 to gpt99, to ensure that
+ the build of a peer update message can fit into the buffer.
+ The array cannot contain less than 1 element: use gpt(1) if you want to
+ to store only the tag gpt0.
+ Users should take in consideration that a large amount of counters will
+ increase the data size and the traffic load using peers protocol since
+ all data/counters are pushed each time any of them is updated.
+ This data_type will exclude the usage of the legacy data_type 'gpt0'
+ on the same table. Using the 'gpt' array data_type, all 'gpt0' related
+ fetches and actions will apply to the first element of this array.
+
+ - gpt0 : first General Purpose Tag. It is a positive 32-bit integer
+ integer which may be used for anything. Most of the time it will be used
+ to put a special tag on some entries, for instance to note that a
+ specific behavior was detected and must be known for future matches
+
+ - conn_cnt : Connection Count. It is a positive 32-bit integer which counts
+ the absolute number of connections received from clients which matched
+ this entry. It does not mean the connections were accepted, just that
+ they were received.
+
+ - conn_cur : Current Connections. It is a positive 32-bit integer which
+ stores the concurrent connection counts for the entry. It is incremented
+ once an incoming connection matches the entry, and decremented once the
+ connection leaves. That way it is possible to know at any time the exact
+ number of concurrent connections for an entry.
+
+ - conn_rate(<period>) : frequency counter (takes 12 bytes). It takes an
+ integer parameter <period> which indicates in milliseconds the length
+ of the period over which the average is measured. It reports the average
+ incoming connection rate over that period, in connections per period. The
+ result is an integer which can be matched using ACLs.
+
+ - sess_cnt : Session Count. It is a positive 32-bit integer which counts
+ the absolute number of sessions received from clients which matched this
+ entry. A session is a connection that was accepted by the layer 4 rules.
+
+ - sess_rate(<period>) : frequency counter (takes 12 bytes). It takes an
+ integer parameter <period> which indicates in milliseconds the length
+ of the period over which the average is measured. It reports the average
+ incoming session rate over that period, in sessions per period. The
+ result is an integer which can be matched using ACLs.
+
+ - http_req_cnt : HTTP request Count. It is a positive 32-bit integer which
+ counts the absolute number of HTTP requests received from clients which
+ matched this entry. It does not matter whether they are valid requests or
+ not. Note that this is different from sessions when keep-alive is used on
+ the client side.
+
+ - http_req_rate(<period>) : frequency counter (takes 12 bytes). It takes an
+ integer parameter <period> which indicates in milliseconds the length
+ of the period over which the average is measured. It reports the average
+ HTTP request rate over that period, in requests per period. The result is
+ an integer which can be matched using ACLs. It does not matter whether
+ they are valid requests or not. Note that this is different from sessions
+ when keep-alive is used on the client side.
+
+ - http_err_cnt : HTTP Error Count. It is a positive 32-bit integer which
+ counts the absolute number of HTTP requests errors induced by clients
+ which matched this entry. Errors are counted on invalid and truncated
+ requests, as well as on denied or tarpitted requests, and on failed
+ authentications. If the server responds with 4xx, then the request is
+ also counted as an error since it's an error triggered by the client
+ (e.g. vulnerability scan).
+
+ - http_err_rate(<period>) : frequency counter (takes 12 bytes). It takes an
+ integer parameter <period> which indicates in milliseconds the length
+ of the period over which the average is measured. It reports the average
+ HTTP request error rate over that period, in requests per period (see
+ http_err_cnt above for what is accounted as an error). The result is an
+ integer which can be matched using ACLs.
+
+ - http_fail_cnt : HTTP Failure Count. It is a positive 32-bit integer which
+ counts the absolute number of HTTP response failures induced by servers
+ which matched this entry. Errors are counted on invalid and truncated
+ responses, as well as any 5xx response other than 501 or 505. It aims at
+ being used combined with path or URI to detect service failures.
+
+ - http_fail_rate(<period>) : frequency counter (takes 12 bytes). It takes
+ an integer parameter <period> which indicates in milliseconds the length
+ of the period over which the average is measured. It reports the average
+ HTTP response failure rate over that period, in requests per period (see
+ http_fail_cnt above for what is accounted as a failure). The result is an
+ integer which can be matched using ACLs.
+
+ - bytes_in_cnt : client to server byte count. It is a positive 64-bit
+ integer which counts the cumulative number of bytes received from clients
+ which matched this entry. Headers are included in the count. This may be
+ used to limit abuse of upload features on photo or video servers.
+
+ - bytes_in_rate(<period>) : frequency counter (takes 12 bytes). It takes an
+ integer parameter <period> which indicates in milliseconds the length
+ of the period over which the average is measured. It reports the average
+ incoming bytes rate over that period, in bytes per period. It may be used
+ to detect users which upload too much and too fast. Warning: with large
+ uploads, it is possible that the amount of uploaded data will be counted
+ once upon termination, thus causing spikes in the average transfer speed
+ instead of having a smooth one. This may partially be smoothed with
+ "option contstats" though this is not perfect yet. Use of byte_in_cnt is
+ recommended for better fairness.
+
+ - bytes_out_cnt : server to client byte count. It is a positive 64-bit
+ integer which counts the cumulative number of bytes sent to clients which
+ matched this entry. Headers are included in the count. This may be used
+ to limit abuse of bots sucking the whole site.
+
+ - bytes_out_rate(<period>) : frequency counter (takes 12 bytes). It takes
+ an integer parameter <period> which indicates in milliseconds the length
+ of the period over which the average is measured. It reports the average
+ outgoing bytes rate over that period, in bytes per period. It may be used
+ to detect users which download too much and too fast. Warning: with large
+ transfers, it is possible that the amount of transferred data will be
+ counted once upon termination, thus causing spikes in the average
+ transfer speed instead of having a smooth one. This may partially be
+ smoothed with "option contstats" though this is not perfect yet. Use of
+ byte_out_cnt is recommended for better fairness.
+
+ There is only one stick-table per proxy. At the moment of writing this doc,
+ it does not seem useful to have multiple tables per proxy. If this happens
+ to be required, simply create a dummy backend with a stick-table in it and
+ reference it.
+
+ It is important to understand that stickiness based on learning information
+ has some limitations, including the fact that all learned associations are
+ lost upon restart unless peers are properly configured to transfer such
+ information upon restart (recommended). In general it can be good as a
+ complement but not always as an exclusive stickiness.
+
+ Last, memory requirements may be important when storing many data types.
+ Indeed, storing all indicators above at once in each entry requires 116 bytes
+ per entry, or 116 MB for a 1-million entries table. This is definitely not
+ something that can be ignored.
+
+ Example:
+ # Keep track of counters of up to 1 million IP addresses over 5 minutes
+ # and store a general purpose counter and the average connection rate
+ # computed over a sliding window of 30 seconds.
+ stick-table type ip size 1m expire 5m store gpc0,conn_rate(30s)
+
+ See also : "stick match", "stick on", "stick store-request", section 2.5
+ about time format and section 7 about ACLs.
+
+
+stick store-response <pattern> [table <table>] [{if | unless} <condition>]
+ Define a response pattern used to create an entry in a stickiness table
+
+ May be used in the following contexts: tcp, http
+
+ May be used in sections : defaults | frontend | listen | backend
+ no | no | yes | yes
+
+ Arguments :
+ <pattern> is a sample expression rule as described in section 7.3. It
+ describes what elements of the response or connection will
+ be analyzed, extracted and stored in the table once a
+ server is selected.
+
+ <table> is an optional stickiness table name. If unspecified, the same
+ backend's table is used. A stickiness table is declared using
+ the "stick-table" statement.
+
+ <cond> is an optional storage condition. It makes it possible to store
+ certain criteria only when some conditions are met (or not met).
+ For instance, it could be used to store the SSL session ID only
+ when the response is a SSL server hello.
+
+ Some protocols or applications require complex stickiness rules and cannot
+ always simply rely on cookies nor hashing. The "stick store-response"
+ statement describes a rule to decide what to extract from the response and
+ when to do it, in order to store it into a stickiness table for further
+ requests to match it using the "stick match" statement. Obviously the
+ extracted part must make sense and have a chance to be matched in a further
+ request. Storing an ID found in a header of a response makes sense.
+ See section 7 for a complete list of possible patterns and transformation
+ rules.
+
+ The table has to be declared using the "stick-table" statement. It must be of
+ a type compatible with the pattern. By default it is the one which is present
+ in the same backend. It is possible to share a table with other backends by
+ referencing it using the "table" keyword. If another table is referenced,
+ the server's ID inside the backends are used. By default, all server IDs
+ start at 1 in each backend, so the server ordering is enough. But in case of
+ doubt, it is highly recommended to force server IDs using their "id" setting.
+
+ It is possible to restrict the conditions where a "stick store-response"
+ statement will apply, using "if" or "unless" followed by a condition. This
+ condition will be evaluated while parsing the response, so any criteria can
+ be used. See section 7 for ACL based conditions.
+
+ There is no limit on the number of "stick store-response" statements, but
+ there is a limit of 8 simultaneous stores per request or response. This
+ makes it possible to store up to 8 criteria, all extracted from either the
+ request or the response, regardless of the number of rules. Only the 8 first
+ ones which match will be kept. Using this, it is possible to feed multiple
+ tables at once in the hope to increase the chance to recognize a user on
+ another protocol or access method. Using multiple store-response rules with
+ the same table is possible and may be used to find the best criterion to rely
+ on, by arranging the rules by decreasing preference order. Only the first
+ extracted criterion for a given table will be stored. All subsequent store-
+ response rules referencing the same table will be skipped and their ACLs will
+ not be evaluated. However, even if a store-request rule references a table, a
+ store-response rule may also use the same table. This means that each table
+ may learn exactly one element from the request and one element from the
+ response at once.
+
+ The table will contain the real server that processed the request.
+
+ Example :
+ # Learn SSL session ID from both request and response and create affinity.
+ backend https
+ mode tcp
+ balance roundrobin
+ # maximum SSL session ID length is 32 bytes.
+ stick-table type binary len 32 size 30k expire 30m
+
+ acl clienthello req.ssl_hello_type 1
+ acl serverhello res.ssl_hello_type 2
+
+ # use tcp content accepts to detects ssl client and server hello.
+ tcp-request inspect-delay 5s
+ tcp-request content accept if clienthello
+
+ # no timeout on response inspect delay by default.
+ tcp-response content accept if serverhello
+
+ # SSL session ID (SSLID) may be present on a client or server hello.
+ # Its length is coded on 1 byte at offset 43 and its value starts
+ # at offset 44.
+
+ # Match and learn on request if client hello.
+ stick on req.payload_lv(43,1) if clienthello
+
+ # Learn on response if server hello.
+ stick store-response resp.payload_lv(43,1) if serverhello
+
+ server s1 192.168.1.1:443
+ server s2 192.168.1.1:443
+
+ See also : "stick-table", "stick on", and section 7 about ACLs and pattern
+ extraction.
+
+
+tcp-check comment <string>
+ Defines a comment for the following the tcp-check rule, reported in logs if
+ it fails.
+
+ May be used in the following contexts: tcp, http, log
+
+ May be used in sections : defaults | frontend | listen | backend
+ yes | no | yes | yes
+
+ Arguments :
+ <string> is the comment message to add in logs if the following tcp-check
+ rule fails.
+
+ It only works for connect, send and expect rules. It is useful to make
+ user-friendly error reporting.
+
+ See also : "option tcp-check", "tcp-check connect", "tcp-check send" and
+ "tcp-check expect".
+
+
+tcp-check connect [default] [port <expr>] [addr <ip>] [send-proxy] [via-socks4]
+ [ssl] [sni <sni>] [alpn <alpn>] [linger]
+ [proto <name>] [comment <msg>]
+ Opens a new connection
+
+ May be used in the following contexts: tcp, http, log
+
+ May be used in sections: defaults | frontend | listen | backend
+ yes | no | yes | yes
+
+ Arguments :
+ comment <msg> defines a message to report if the rule evaluation fails.
+
+ default Use default options of the server line to do the health
+ checks. The server options are used only if not redefined.
+
+ port <expr> if not set, check port or server port is used.
+ It tells HAProxy where to open the connection to.
+ <port> must be a valid TCP port source integer, from 1 to
+ 65535 or an sample-fetch expression.
+
+ addr <ip> defines the IP address to do the health check.
+
+ send-proxy send a PROXY protocol string
+
+ via-socks4 enables outgoing health checks using upstream socks4 proxy.
+
+ ssl opens a ciphered connection
+
+ sni <sni> specifies the SNI to use to do health checks over SSL.
+
+ alpn <alpn> defines which protocols to advertise with ALPN. The protocol
+ list consists in a comma-delimited list of protocol names,
+ for instance: "http/1.1,http/1.0" (without quotes).
+ If it is not set, the server ALPN is used.
+
+ proto <name> forces the multiplexer's protocol to use for this connection.
+ It must be a TCP mux protocol and it must be usable on the
+ backend side. The list of available protocols is reported in
+ haproxy -vv.
+
+ linger cleanly close the connection instead of using a single RST.
+
+ When an application lies on more than a single TCP port or when HAProxy
+ load-balance many services in a single backend, it makes sense to probe all
+ the services individually before considering a server as operational.
+
+ When there are no TCP port configured on the server line neither server port
+ directive, then the 'tcp-check connect port <port>' must be the first step
+ of the sequence.
+
+ In a tcp-check ruleset a 'connect' is required, it is also mandatory to start
+ the ruleset with a 'connect' rule. Purpose is to ensure admin know what they
+ do.
+
+ When a connect must start the ruleset, if may still be preceded by set-var,
+ unset-var or comment rules.
+
+ Examples :
+ # check HTTP and HTTPs services on a server.
+ # first open port 80 thanks to server line port directive, then
+ # tcp-check opens port 443, ciphered and run a request on it:
+ option tcp-check
+ tcp-check connect
+ tcp-check send GET\ /\ HTTP/1.0\r\n
+ tcp-check send Host:\ haproxy.1wt.eu\r\n
+ tcp-check send \r\n
+ tcp-check expect rstring (2..|3..)
+ tcp-check connect port 443 ssl
+ tcp-check send GET\ /\ HTTP/1.0\r\n
+ tcp-check send Host:\ haproxy.1wt.eu\r\n
+ tcp-check send \r\n
+ tcp-check expect rstring (2..|3..)
+ server www 10.0.0.1 check port 80
+
+ # check both POP and IMAP from a single server:
+ option tcp-check
+ tcp-check connect port 110 linger
+ tcp-check expect string +OK\ POP3\ ready
+ tcp-check connect port 143
+ tcp-check expect string *\ OK\ IMAP4\ ready
+ server mail 10.0.0.1 check
+
+ See also : "option tcp-check", "tcp-check send", "tcp-check expect"
+
+
+tcp-check expect [min-recv <int>] [comment <msg>]
+ [ok-status <st>] [error-status <st>] [tout-status <st>]
+ [on-success <fmt>] [on-error <fmt>] [status-code <expr>]
+ [!] <match> <pattern>
+ Specify data to be collected and analyzed during a generic health check
+
+ May be used in the following contexts: tcp, http, log
+
+ May be used in sections: defaults | frontend | listen | backend
+ yes | no | yes | yes
+
+ Arguments :
+ comment <msg> defines a message to report if the rule evaluation fails.
+
+ min-recv is optional and can define the minimum amount of data required to
+ evaluate the current expect rule. If the number of received bytes
+ is under this limit, the check will wait for more data. This
+ option can be used to resolve some ambiguous matching rules or to
+ avoid executing costly regex matches on content known to be still
+ incomplete. If an exact string (string or binary) is used, the
+ minimum between the string length and this parameter is used.
+ This parameter is ignored if it is set to -1. If the expect rule
+ does not match, the check will wait for more data. If set to 0,
+ the evaluation result is always conclusive.
+
+ <match> is a keyword indicating how to look for a specific pattern in the
+ response. The keyword may be one of "string", "rstring", "binary" or
+ "rbinary".
+ The keyword may be preceded by an exclamation mark ("!") to negate
+ the match. Spaces are allowed between the exclamation mark and the
+ keyword. See below for more details on the supported keywords.
+
+ ok-status <st> is optional and can be used to set the check status if
+ the expect rule is successfully evaluated and if it is
+ the last rule in the tcp-check ruleset. "L7OK", "L7OKC",
+ "L6OK" and "L4OK" are supported :
+ - L7OK : check passed on layer 7
+ - L7OKC : check conditionally passed on layer 7, set
+ server to NOLB state.
+ - L6OK : check passed on layer 6
+ - L4OK : check passed on layer 4
+ By default "L7OK" is used.
+
+ error-status <st> is optional and can be used to set the check status if
+ an error occurred during the expect rule evaluation.
+ "L7OKC", "L7RSP", "L7STS", "L6RSP" and "L4CON" are
+ supported :
+ - L7OKC : check conditionally passed on layer 7, set
+ server to NOLB state.
+ - L7RSP : layer 7 invalid response - protocol error
+ - L7STS : layer 7 response error, for example HTTP 5xx
+ - L6RSP : layer 6 invalid response - protocol error
+ - L4CON : layer 1-4 connection problem
+ By default "L7RSP" is used.
+
+ tout-status <st> is optional and can be used to set the check status if
+ a timeout occurred during the expect rule evaluation.
+ "L7TOUT", "L6TOUT", and "L4TOUT" are supported :
+ - L7TOUT : layer 7 (HTTP/SMTP) timeout
+ - L6TOUT : layer 6 (SSL) timeout
+ - L4TOUT : layer 1-4 timeout
+ By default "L7TOUT" is used.
+
+ on-success <fmt> is optional and can be used to customize the
+ informational message reported in logs if the expect
+ rule is successfully evaluated and if it is the last rule
+ in the tcp-check ruleset. <fmt> is a log-format string.
+
+ on-error <fmt> is optional and can be used to customize the
+ informational message reported in logs if an error
+ occurred during the expect rule evaluation. <fmt> is a
+ log-format string.
+
+ status-code <expr> is optional and can be used to set the check status code
+ reported in logs, on success or on error. <expr> is a
+ standard HAProxy expression formed by a sample-fetch
+ followed by some converters.
+
+ <pattern> is the pattern to look for. It may be a string or a regular
+ expression. If the pattern contains spaces, they must be escaped
+ with the usual backslash ('\').
+ If the match is set to binary, then the pattern must be passed as
+ a series of hexadecimal digits in an even number. Each sequence of
+ two digits will represent a byte. The hexadecimal digits may be
+ used upper or lower case.
+
+ The available matches are intentionally similar to their http-check cousins :
+
+ string <string> : test the exact string matches in the response buffer.
+ A health check response will be considered valid if the
+ response's buffer contains this exact string. If the
+ "string" keyword is prefixed with "!", then the response
+ will be considered invalid if the body contains this
+ string. This can be used to look for a mandatory pattern
+ in a protocol response, or to detect a failure when a
+ specific error appears in a protocol banner.
+
+ rstring <regex> : test a regular expression on the response buffer.
+ A health check response will be considered valid if the
+ response's buffer matches this expression. If the
+ "rstring" keyword is prefixed with "!", then the response
+ will be considered invalid if the body matches the
+ expression.
+
+ string-lf <fmt> : test a log-format string match in the response's buffer.
+ A health check response will be considered valid if the
+ response's buffer contains the string resulting of the
+ evaluation of <fmt>, which follows the log-format rules.
+ If prefixed with "!", then the response will be
+ considered invalid if the buffer contains the string.
+
+ binary <hexstring> : test the exact string in its hexadecimal form matches
+ in the response buffer. A health check response will
+ be considered valid if the response's buffer contains
+ this exact hexadecimal string.
+ Purpose is to match data on binary protocols.
+
+ rbinary <regex> : test a regular expression on the response buffer, like
+ "rstring". However, the response buffer is transformed
+ into its hexadecimal form, including NUL-bytes. This
+ allows using all regex engines to match any binary
+ content. The hexadecimal transformation takes twice the
+ size of the original response. As such, the expected
+ pattern should work on at-most half the response buffer
+ size.
+
+ binary-lf <hexfmt> : test a log-format string in its hexadecimal form
+ match in the response's buffer. A health check response
+ will be considered valid if the response's buffer
+ contains the hexadecimal string resulting of the
+ evaluation of <fmt>, which follows the log-format
+ rules. If prefixed with "!", then the response will be
+ considered invalid if the buffer contains the
+ hexadecimal string. The hexadecimal string is converted
+ in a binary string before matching the response's
+ buffer.
+
+ It is important to note that the responses will be limited to a certain size
+ defined by the global "tune.bufsize" option, which defaults to 16384 bytes.
+ Thus, too large responses may not contain the mandatory pattern when using
+ "string", "rstring" or binary. If a large response is absolutely required, it
+ is possible to change the default max size by setting the global variable.
+ However, it is worth keeping in mind that parsing very large responses can
+ waste some CPU cycles, especially when regular expressions are used, and that
+ it is always better to focus the checks on smaller resources. Also, in its
+ current state, the check will not find any string nor regex past a null
+ character in the response. Similarly it is not possible to request matching
+ the null character.
+
+ Examples :
+ # perform a POP check
+ option tcp-check
+ tcp-check expect string +OK\ POP3\ ready
+
+ # perform an IMAP check
+ option tcp-check
+ tcp-check expect string *\ OK\ IMAP4\ ready
+
+ # look for the redis master server
+ option tcp-check
+ tcp-check send PING\r\n
+ tcp-check expect string +PONG
+ tcp-check send info\ replication\r\n
+ tcp-check expect string role:master
+ tcp-check send QUIT\r\n
+ tcp-check expect string +OK
+
+
+ See also : "option tcp-check", "tcp-check connect", "tcp-check send",
+ "tcp-check send-binary", "http-check expect", tune.bufsize
+
+
+tcp-check send <data> [comment <msg>]
+tcp-check send-lf <fmt> [comment <msg>]
+ Specify a string or a log-format string to be sent as a question during a
+ generic health check
+
+ May be used in the following contexts: tcp, http, log
+
+ May be used in sections: defaults | frontend | listen | backend
+ yes | no | yes | yes
+
+ Arguments :
+ comment <msg> defines a message to report if the rule evaluation fails.
+
+ <data> is the string that will be sent during a generic health
+ check session.
+
+ <fmt> is the log-format string that will be sent, once evaluated,
+ during a generic health check session.
+
+ Examples :
+ # look for the redis master server
+ option tcp-check
+ tcp-check send info\ replication\r\n
+ tcp-check expect string role:master
+
+ See also : "option tcp-check", "tcp-check connect", "tcp-check expect",
+ "tcp-check send-binary", tune.bufsize
+
+
+tcp-check send-binary <hexstring> [comment <msg>]
+tcp-check send-binary-lf <hexfmt> [comment <msg>]
+ Specify an hex digits string or an hex digits log-format string to be sent as
+ a binary question during a raw tcp health check
+
+ May be used in the following contexts: tcp, http, log
+
+ May be used in sections: defaults | frontend | listen | backend
+ yes | no | yes | yes
+
+ Arguments :
+ comment <msg> defines a message to report if the rule evaluation fails.
+
+ <hexstring> is the hexadecimal string that will be send, once converted
+ to binary, during a generic health check session.
+
+ <hexfmt> is the hexadecimal log-format string that will be send, once
+ evaluated and converted to binary, during a generic health
+ check session.
+
+ Examples :
+ # redis check in binary
+ option tcp-check
+ tcp-check send-binary 50494e470d0a # PING\r\n
+ tcp-check expect binary 2b504F4e47 # +PONG
+
+
+ See also : "option tcp-check", "tcp-check connect", "tcp-check expect",
+ "tcp-check send", tune.bufsize
+
+
+tcp-check set-var(<var-name>[,<cond>...]) <expr>
+tcp-check set-var-fmt(<var-name>[,<cond>...]) <fmt>
+ This operation sets the content of a variable. The variable is declared inline.
+
+ May be used in the following contexts: tcp, http, log
+
+ May be used in sections: defaults | frontend | listen | backend
+ yes | no | yes | yes
+
+ Arguments :
+ <var-name> The name of the variable starts with an indication about its
+ scope. The scopes allowed for tcp-check are:
+ "proc" : the variable is shared with the whole process.
+ "sess" : the variable is shared with the tcp-check session.
+ "check": the variable is declared for the lifetime of the tcp-check.
+ This prefix is followed by a name. The separator is a '.'.
+ The name may only contain characters 'a-z', 'A-Z', '0-9', '.',
+ and '-'.
+
+ <cond> A set of conditions that must all be true for the variable to
+ actually be set (such as "ifnotempty", "ifgt" ...). See the
+ set-var converter's description for a full list of possible
+ conditions.
+
+ <expr> Is a sample-fetch expression potentially followed by converters.
+
+ <fmt> This is the value expressed using log-format rules (see Custom
+ Log Format in section 8.2.4).
+
+ Examples :
+ tcp-check set-var(check.port) int(1234)
+ tcp-check set-var-fmt(check.name) "%H"
+
+
+tcp-check unset-var(<var-name>)
+ Free a reference to a variable within its scope.
+
+ May be used in the following contexts: tcp, http, log
+
+ May be used in sections: defaults | frontend | listen | backend
+ yes | no | yes | yes
+
+ Arguments :
+ <var-name> The name of the variable starts with an indication about its
+ scope. The scopes allowed for tcp-check are:
+ "proc" : the variable is shared with the whole process.
+ "sess" : the variable is shared with the tcp-check session.
+ "check": the variable is declared for the lifetime of the tcp-check.
+ This prefix is followed by a name. The separator is a '.'.
+ The name may only contain characters 'a-z', 'A-Z', '0-9', '.',
+ and '-'.
+
+ Examples :
+ tcp-check unset-var(check.port)
+
+
+tcp-request connection <action> <options...> [ { if | unless } <condition> ]
+ Perform an action on an incoming connection depending on a layer 4 condition
+
+ May be used in the following contexts: tcp, http
+
+ May be used in sections : defaults | frontend | listen | backend
+ yes(!) | yes | yes | no
+
+ Arguments :
+ <action> defines the action to perform if the condition applies. See
+ below.
+
+ <condition> is a standard layer4-only ACL-based condition (see section 7).
+
+ Immediately after acceptance of a new incoming connection, it is possible to
+ evaluate some conditions to decide whether this connection must be accepted
+ or dropped or have its counters tracked. Those conditions cannot make use of
+ any data contents because the connection has not been read from yet, and the
+ buffers are not yet allocated. This is used to selectively and very quickly
+ accept or drop connections from various sources with a very low overhead. If
+ some contents need to be inspected in order to take the decision, the
+ "tcp-request content" statements must be used instead.
+
+ The "tcp-request connection" rules are evaluated in their exact declaration
+ order. If no rule matches or if there is no rule, the default action is to
+ accept the incoming connection. There is no specific limit to the number of
+ rules which may be inserted. Any rule may optionally be followed by an
+ ACL-based condition, in which case it will only be evaluated if the condition
+ evaluates to true.
+
+ The condition is evaluated just before the action is executed, and the action
+ is performed exactly once. As such, there is no problem if an action changes
+ an element which is checked as part of the condition. This also means that
+ multiple actions may rely on the same condition so that the first action that
+ changes the condition's evaluation is sufficient to implicitly disable the
+ remaining actions. This is used for example when trying to assign a value to
+ a variable from various sources when it's empty.
+
+ The first keyword after "tcp-request connection" in the syntax is the rule's
+ action, optionally followed by a varying number of arguments for the action.
+ The supported actions and their respective syntaxes are enumerated in
+ section 4.3 "Actions" (look for actions which tick "TCP RqCon").
+
+ This directive is only available from named defaults sections, not anonymous
+ ones. Rules defined in the defaults section are evaluated before ones in the
+ associated proxy section. To avoid ambiguities, in this case the same
+ defaults section cannot be used by proxies with the frontend capability and
+ by proxies with the backend capability. It means a listen section cannot use
+ a defaults section defining such rules.
+
+ Note that the "if/unless" condition is optional. If no condition is set on
+ the action, it is simply performed unconditionally. That can be useful for
+ "track-sc*" actions as well as for changing the default action to a reject.
+
+ Example: accept all connections from white-listed hosts, reject too fast
+ connection without counting them, and track accepted connections.
+ This results in connection rate being capped from abusive sources.
+
+ tcp-request connection accept if { src -f /etc/haproxy/whitelist.lst }
+ tcp-request connection reject if { src_conn_rate gt 10 }
+ tcp-request connection track-sc0 src
+
+ Example: accept all connections from white-listed hosts, count all other
+ connections and reject too fast ones. This results in abusive ones
+ being blocked as long as they don't slow down.
+
+ tcp-request connection accept if { src -f /etc/haproxy/whitelist.lst }
+ tcp-request connection track-sc0 src
+ tcp-request connection reject if { sc0_conn_rate gt 10 }
+
+ Example: enable the PROXY protocol for traffic coming from all known proxies.
+
+ tcp-request connection expect-proxy layer4 if { src -f proxies.lst }
+
+ See section 7 about ACL usage.
+
+ See also : "tcp-request session", "tcp-request content", "stick-table"
+
+tcp-request content <action> [{if | unless} <condition>]
+ Perform an action on a new session depending on a layer 4-7 condition
+
+ May be used in the following contexts: tcp, http
+
+ May be used in sections : defaults | frontend | listen | backend
+ yes(!) | yes | yes | yes
+
+ Arguments :
+ <action> defines the action to perform if the condition applies. See
+ below.
+
+ <condition> is a standard layer 4-7 ACL-based condition (see section 7).
+
+ A request's contents can be analyzed at an early stage of request processing
+ called "TCP content inspection". During this stage, ACL-based rules are
+ evaluated every time the request contents are updated, until either an
+ "accept", a "reject" or a "switch-mode" rule matches, or the TCP request
+ inspection delay expires with no matching rule.
+
+ The first difference between these rules and "tcp-request connection" rules
+ is that "tcp-request content" rules can make use of contents to take a
+ decision. Most often, these decisions will consider a protocol recognition or
+ validity. The second difference is that content-based rules can be used in
+ both frontends and backends. In case of HTTP keep-alive with the client, all
+ tcp-request content rules are evaluated again, so HAProxy keeps a record of
+ what sticky counters were assigned by a "tcp-request connection" versus a
+ "tcp-request content" rule, and flushes all the content-related ones after
+ processing an HTTP request, so that they may be evaluated again by the rules
+ being evaluated again for the next request. This is of particular importance
+ when the rule tracks some L7 information or when it is conditioned by an
+ L7-based ACL, since tracking may change between requests.
+
+ Content-based rules are evaluated in their exact declaration order. If no
+ rule matches or if there is no rule, the default action is to accept the
+ contents. There is no specific limit to the number of rules which may be
+ inserted.
+
+ While there is nothing mandatory about it, it is recommended to use the
+ track-sc0 in "tcp-request connection" rules, track-sc1 for "tcp-request
+ content" rules in the frontend, and track-sc2 for "tcp-request content"
+ rules in the backend, because that makes the configuration more readable
+ and easier to troubleshoot, but this is just a guideline and all counters
+ may be used everywhere.
+
+ The first keyword after "tcp-request content" in the syntax is the rule's
+ action, optionally followed by a varying number of arguments for the action.
+ The supported actions and their respective syntaxes are enumerated in
+ section 4.3 "Actions" (look for actions which tick "TCP RqCnt").
+
+ This directive is only available from named defaults sections, not anonymous
+ ones. Rules defined in the defaults section are evaluated before ones in the
+ associated proxy section. To avoid ambiguities, in this case the same
+ defaults section cannot be used by proxies with the frontend capability and
+ by proxies with the backend capability. It means a listen section cannot use
+ a defaults section defining such rules.
+
+ Note that the "if/unless" condition is optional. If no condition is set on
+ the action, it is simply performed unconditionally. That can be useful for
+ "track-sc*" actions as well as for changing the default action to a reject.
+
+ Note also that it is recommended to use a "tcp-request session" rule to track
+ information that does *not* depend on Layer 7 contents, especially for HTTP
+ frontends. Some HTTP processing are performed at the session level and may
+ lead to an early rejection of the requests. Thus, the tracking at the content
+ level may be disturbed in such case. A warning is emitted during startup to
+ prevent, as far as possible, such unreliable usage.
+
+ It is perfectly possible to match layer 7 contents with "tcp-request content"
+ rules from a TCP proxy, since HTTP-specific ACL matches are able to
+ preliminarily parse the contents of a buffer before extracting the required
+ data. If the buffered contents do not parse as a valid HTTP message, then the
+ ACL does not match. The parser which is involved there is exactly the same
+ as for all other HTTP processing, so there is no risk of parsing something
+ differently. In an HTTP frontend or an HTTP backend, it is guaranteed that
+ HTTP contents will always be immediately present when the rule is evaluated
+ first because the HTTP parsing is performed in the early stages of the
+ connection processing, at the session level. But for such proxies, using
+ "http-request" rules is much more natural and recommended.
+
+ Tracking layer7 information is also possible provided that the information
+ are present when the rule is processed. The rule processing engine is able to
+ wait until the inspect delay expires when the data to be tracked is not yet
+ available.
+
+ Example:
+ tcp-request content use-service lua.deny if { src -f /etc/haproxy/blacklist.lst }
+
+ Example:
+ tcp-request content set-var(sess.my_var) src
+ tcp-request content set-var-fmt(sess.from) %[src]:%[src_port]
+ tcp-request content unset-var(sess.my_var2)
+
+ Example:
+ # Accept HTTP requests containing a Host header saying "example.com"
+ # and reject everything else. (Only works for HTTP/1 connections)
+ acl is_host_com hdr(Host) -i example.com
+ tcp-request inspect-delay 30s
+ tcp-request content accept if is_host_com
+ tcp-request content reject
+
+ # Accept HTTP requests containing a Host header saying "example.com"
+ # and reject everything else. (works for HTTP/1 and HTTP/2 connections)
+ acl is_host_com hdr(Host) -i example.com
+ tcp-request inspect-delay 5s
+ tcp-request switch-mode http if HTTP
+ tcp-request reject # non-HTTP traffic is implicit here
+ ...
+ http-request reject unless is_host_com
+
+ Example:
+ # reject SMTP connection if client speaks first
+ tcp-request inspect-delay 30s
+ acl content_present req.len gt 0
+ tcp-request content reject if content_present
+
+ # Forward HTTPS connection only if client speaks
+ tcp-request inspect-delay 30s
+ acl content_present req.len gt 0
+ tcp-request content accept if content_present
+ tcp-request content reject
+
+ Example:
+ # Track the last IP(stick-table type string) from X-Forwarded-For
+ tcp-request inspect-delay 10s
+ tcp-request content track-sc0 hdr(x-forwarded-for,-1)
+ # Or track the last IP(stick-table type ip|ipv6) from X-Forwarded-For
+ tcp-request content track-sc0 req.hdr_ip(x-forwarded-for,-1)
+
+ Example:
+ # track request counts per "base" (concatenation of Host+URL)
+ tcp-request inspect-delay 10s
+ tcp-request content track-sc0 base table req-rate
+
+ Example: track per-frontend and per-backend counters, block abusers at the
+ frontend when the backend detects abuse(and marks gpc0).
+
+ frontend http
+ # Use General Purpose Counter 0 in SC0 as a global abuse counter
+ # protecting all our sites
+ stick-table type ip size 1m expire 5m store gpc0
+ tcp-request connection track-sc0 src
+ tcp-request connection reject if { sc0_get_gpc0 gt 0 }
+ ...
+ use_backend http_dynamic if { path_end .php }
+
+ backend http_dynamic
+ # if a source makes too fast requests to this dynamic site (tracked
+ # by SC1), block it globally in the frontend.
+ stick-table type ip size 1m expire 5m store http_req_rate(10s)
+ acl click_too_fast sc1_http_req_rate gt 10
+ acl mark_as_abuser sc0_inc_gpc0(http) gt 0
+ tcp-request content track-sc1 src
+ tcp-request content reject if click_too_fast mark_as_abuser
+
+ See section 7 about ACL usage.
+
+ See also : "tcp-request connection", "tcp-request session",
+ "tcp-request inspect-delay", and "http-request".
+
+tcp-request inspect-delay <timeout>
+ Set the maximum allowed time to wait for data during content inspection
+
+ May be used in the following contexts: tcp, http
+
+ May be used in sections : defaults | frontend | listen | backend
+ yes(!) | yes | yes | yes
+
+ Arguments :
+ <timeout> is the timeout value specified in milliseconds by default, but
+ can be in any other unit if the number is suffixed by the unit,
+ as explained at the top of this document.
+
+ People using HAProxy primarily as a TCP relay are often worried about the
+ risk of passing any type of protocol to a server without any analysis. In
+ order to be able to analyze the request contents, we must first withhold
+ the data then analyze them. This statement simply enables withholding of
+ data for at most the specified amount of time.
+
+ TCP content inspection applies very early when a connection reaches a
+ frontend, then very early when the connection is forwarded to a backend. This
+ means that a connection may experience a first delay in the frontend and a
+ second delay in the backend if both have tcp-request rules.
+
+ Note that when performing content inspection, HAProxy will evaluate the whole
+ rules for every new chunk which gets in, taking into account the fact that
+ those data are partial. If no rule matches before the aforementioned delay,
+ a last check is performed upon expiration, this time considering that the
+ contents are definitive. If no delay is set, HAProxy will not wait at all
+ and will immediately apply a verdict based on the available information.
+ Obviously this is unlikely to be very useful and might even be racy, so such
+ setups are not recommended.
+
+ Note the inspection delay is shortened if an connection error or shutdown is
+ experienced or if the request buffer appears as full.
+
+ As soon as a rule matches, the request is released and continues as usual. If
+ the timeout is reached and no rule matches, the default policy will be to let
+ it pass through unaffected.
+
+ For most protocols, it is enough to set it to a few seconds, as most clients
+ send the full request immediately upon connection. Add 3 or more seconds to
+ cover TCP retransmits but that's all. For some protocols, it may make sense
+ to use large values, for instance to ensure that the client never talks
+ before the server (e.g. SMTP), or to wait for a client to talk before passing
+ data to the server (e.g. SSL). Note that the client timeout must cover at
+ least the inspection delay, otherwise it will expire first. If the client
+ closes the connection or if the buffer is full, the delay immediately expires
+ since the contents will not be able to change anymore.
+
+ This directive is only available from named defaults sections, not anonymous
+ ones. Proxies inherit this value from their defaults section.
+
+ See also : "tcp-request content accept", "tcp-request content reject",
+ "timeout client".
+
+
+tcp-request session <action> [{if | unless} <condition>]
+ Perform an action on a validated session depending on a layer 5 condition
+
+ May be used in the following contexts: tcp, http
+
+ May be used in sections : defaults | frontend | listen | backend
+ yes(!) | yes | yes | no
+
+ Arguments :
+ <action> defines the action to perform if the condition applies. See
+ below.
+
+ <condition> is a standard layer5-only ACL-based condition (see section 7).
+
+ Once a session is validated, (i.e. after all handshakes have been completed),
+ it is possible to evaluate some conditions to decide whether this session
+ must be accepted or dropped or have its counters tracked. Those conditions
+ cannot make use of any data contents because no buffers are allocated yet and
+ the processing cannot wait at this stage. The main use case is to copy some
+ early information into variables (since variables are accessible in the
+ session), or to keep track of some information collected after the handshake,
+ such as SSL-level elements (SNI, ciphers, client cert's CN) or information
+ from the PROXY protocol header (e.g. track a source forwarded this way). The
+ extracted information can thus be copied to a variable or tracked using
+ "track-sc" rules. Of course it is also possible to decide to accept/reject as
+ with other rulesets. Most operations performed here could also be performed
+ in "tcp-request content" rules, except that in HTTP these rules are evaluated
+ for each new request, and that might not always be acceptable. For example a
+ rule might increment a counter on each evaluation. It would also be possible
+ that a country is resolved by geolocation from the source IP address,
+ assigned to a session-wide variable, then the source address rewritten from
+ an HTTP header for all requests. If some contents need to be inspected in
+ order to take the decision, the "tcp-request content" statements must be used
+ instead.
+
+ The "tcp-request session" rules are evaluated in their exact declaration
+ order. If no rule matches or if there is no rule, the default action is to
+ accept the incoming session. There is no specific limit to the number of
+ rules which may be inserted.
+
+ The first keyword after "tcp-request session" in the syntax is the rule's
+ action, optionally followed by a varying number of arguments for the action.
+ The supported actions and their respective syntaxes are enumerated in
+ section 4.3 "Actions" (look for actions which tick "TCP RqSes").
+
+ This directive is only available from named defaults sections, not anonymous
+ ones. Rules defined in the defaults section are evaluated before ones in the
+ associated proxy section. To avoid ambiguities, in this case the same
+ defaults section cannot be used by proxies with the frontend capability and
+ by proxies with the backend capability. It means a listen section cannot use
+ a defaults section defining such rules.
+
+ Note that the "if/unless" condition is optional. If no condition is set on
+ the action, it is simply performed unconditionally. That can be useful for
+ "track-sc*" actions as well as for changing the default action to a reject.
+
+ Example: track the original source address by default, or the one advertised
+ in the PROXY protocol header for connection coming from the local
+ proxies. The first connection-level rule enables receipt of the
+ PROXY protocol for these ones, the second rule tracks whatever
+ address we decide to keep after optional decoding.
+
+ tcp-request connection expect-proxy layer4 if { src -f proxies.lst }
+ tcp-request session track-sc0 src
+
+ Example: accept all sessions from white-listed hosts, reject too fast
+ sessions without counting them, and track accepted sessions.
+ This results in session rate being capped from abusive sources.
+
+ tcp-request session accept if { src -f /etc/haproxy/whitelist.lst }
+ tcp-request session reject if { src_sess_rate gt 10 }
+ tcp-request session track-sc0 src
+
+ Example: accept all sessions from white-listed hosts, count all other
+ sessions and reject too fast ones. This results in abusive ones
+ being blocked as long as they don't slow down.
+
+ tcp-request session accept if { src -f /etc/haproxy/whitelist.lst }
+ tcp-request session track-sc0 src
+ tcp-request session reject if { sc0_sess_rate gt 10 }
+
+ See section 7 about ACL usage.
+
+ See also : "tcp-request connection", "tcp-request content", "stick-table"
+
+tcp-response content <action> [{if | unless} <condition>]
+ Perform an action on a session response depending on a layer 4-7 condition
+
+ May be used in the following contexts: tcp, http
+
+ May be used in sections : defaults | frontend | listen | backend
+ yes(!) | no | yes | yes
+
+ Arguments :
+ <action> defines the action to perform if the condition applies. See
+ below.
+
+ <condition> is a standard layer 4-7 ACL-based condition (see section 7).
+
+ Response contents can be analyzed at an early stage of response processing
+ called "TCP content inspection". During this stage, ACL-based rules are
+ evaluated every time the response contents are updated, until either a final
+ rule matches, or a TCP response inspection delay is set and expires with no
+ matching rule.
+
+ Most often, these decisions will consider a protocol recognition or validity.
+
+ Content-based rules are evaluated in their exact declaration order. If no
+ rule matches or if there is no rule, the default action is to accept the
+ contents. There is no specific limit to the number of rules which may be
+ inserted.
+
+ The first keyword after "tcp-response content" in the syntax is the rule's
+ action, optionally followed by a varying number of arguments for the action.
+ The supported actions and their respective syntaxes are enumerated in
+ section 4.3 "Actions" (look for actions which tick "TCP RsCnt").
+
+ This directive is only available from named defaults sections, not anonymous
+ ones. Rules defined in the defaults section are evaluated before ones in the
+ associated proxy section. To avoid ambiguities, in this case the same
+ defaults section cannot be used by proxies with the frontend capability and
+ by proxies with the backend capability. It means a listen section cannot use
+ a defaults section defining such rules.
+
+ Note that the "if/unless" condition is optional. If no condition is set on
+ the action, it is simply performed unconditionally. That can be useful for
+ for changing the default action to a reject.
+
+ Several types of actions are supported :
+
+ It is perfectly possible to match layer 7 contents with "tcp-response
+ content" rules, but then it is important to ensure that a full response has
+ been buffered, otherwise no contents will match. In order to achieve this,
+ the best solution involves detecting the HTTP protocol during the inspection
+ period.
+
+ See section 7 about ACL usage.
+
+ See also : "tcp-request content", "tcp-response inspect-delay"
+
+tcp-response inspect-delay <timeout>
+ Set the maximum allowed time to wait for a response during content inspection
+
+ May be used in the following contexts: tcp, http
+
+ May be used in sections : defaults | frontend | listen | backend
+ yes(!) | no | yes | yes
+
+ Arguments :
+ <timeout> is the timeout value specified in milliseconds by default, but
+ can be in any other unit if the number is suffixed by the unit,
+ as explained at the top of this document.
+
+ This directive is only available from named defaults sections, not anonymous
+ ones. Proxies inherit this value from their defaults section.
+
+ See also : "tcp-response content", "tcp-request inspect-delay".
+
+
+timeout check <timeout>
+ Set additional check timeout, but only after a connection has been already
+ established.
+
+ May be used in the following contexts: tcp, http, log
+
+ May be used in sections: defaults | frontend | listen | backend
+ yes | no | yes | yes
+
+ Arguments:
+ <timeout> is the timeout value specified in milliseconds by default, but
+ can be in any other unit if the number is suffixed by the unit,
+ as explained at the top of this document.
+
+ If set, HAProxy uses min("timeout connect", "inter") as a connect timeout
+ for check and "timeout check" as an additional read timeout. The "min" is
+ used so that people running with *very* long "timeout connect" (e.g. those
+ who needed this due to the queue or tarpit) do not slow down their checks.
+ (Please also note that there is no valid reason to have such long connect
+ timeouts, because "timeout queue" and "timeout tarpit" can always be used to
+ avoid that).
+
+ If "timeout check" is not set HAProxy uses "inter" for complete check
+ timeout (connect + read) exactly like all <1.3.15 version.
+
+ In most cases check request is much simpler and faster to handle than normal
+ requests and people may want to kick out laggy servers so this timeout should
+ be smaller than "timeout server".
+
+ This parameter is specific to backends, but can be specified once for all in
+ "defaults" sections. This is in fact one of the easiest solutions not to
+ forget about it.
+
+ See also: "timeout connect", "timeout queue", "timeout server",
+ "timeout tarpit".
+
+
+timeout client <timeout>
+ Set the maximum inactivity time on the client side.
+
+ May be used in the following contexts: tcp, http
+
+ May be used in sections : defaults | frontend | listen | backend
+ yes | yes | yes | no
+
+ Arguments :
+ <timeout> is the timeout value specified in milliseconds by default, but
+ can be in any other unit if the number is suffixed by the unit,
+ as explained at the top of this document.
+
+ The inactivity timeout applies when the client is expected to acknowledge or
+ send data. In HTTP mode, this timeout is particularly important to consider
+ during the first phase, when the client sends the request, and during the
+ response while it is reading data sent by the server. That said, for the
+ first phase, it is preferable to set the "timeout http-request" to better
+ protect HAProxy from Slowloris like attacks. The value is specified in
+ milliseconds by default, but can be in any other unit if the number is
+ suffixed by the unit, as specified at the top of this document. In TCP mode
+ (and to a lesser extent, in HTTP mode), it is highly recommended that the
+ client timeout remains equal to the server timeout in order to avoid complex
+ situations to debug. It is a good practice to cover one or several TCP packet
+ losses by specifying timeouts that are slightly above multiples of 3 seconds
+ (e.g. 4 or 5 seconds). If some long-lived streams are mixed with short-lived
+ streams (e.g. WebSocket and HTTP), it's worth considering "timeout tunnel",
+ which overrides "timeout client" and "timeout server" for tunnels, as well as
+ "timeout client-fin" for half-closed connections.
+
+ This parameter is specific to frontends, but can be specified once for all in
+ "defaults" sections. This is in fact one of the easiest solutions not to
+ forget about it. An unspecified timeout results in an infinite timeout, which
+ is not recommended. Such a usage is accepted and works but reports a warning
+ during startup because it may result in accumulation of expired sessions in
+ the system if the system's timeouts are not configured either.
+
+ See also : "timeout server", "timeout tunnel", "timeout http-request".
+
+
+timeout client-fin <timeout>
+ Set the inactivity timeout on the client side for half-closed connections.
+
+ May be used in the following contexts: tcp, http
+
+ May be used in sections : defaults | frontend | listen | backend
+ yes | yes | yes | no
+
+ Arguments :
+ <timeout> is the timeout value specified in milliseconds by default, but
+ can be in any other unit if the number is suffixed by the unit,
+ as explained at the top of this document.
+
+ The inactivity timeout applies when the client is expected to acknowledge or
+ send data while one direction is already shut down. This timeout is different
+ from "timeout client" in that it only applies to connections which are closed
+ in one direction. This is particularly useful to avoid keeping connections in
+ FIN_WAIT state for too long when clients do not disconnect cleanly. This
+ problem is particularly common long connections such as RDP or WebSocket.
+ Note that this timeout can override "timeout tunnel" when a connection shuts
+ down in one direction. It is applied to idle HTTP/2 connections once a GOAWAY
+ frame was sent, often indicating an expectation that the connection quickly
+ ends.
+
+ This parameter is specific to frontends, but can be specified once for all in
+ "defaults" sections. By default it is not set, so half-closed connections
+ will use the other timeouts (timeout.client or timeout.tunnel).
+
+ See also : "timeout client", "timeout server-fin", and "timeout tunnel".
+
+
+timeout client-hs <timeout>
+ Set the maximum time to wait for a client TLS handshake to complete. This is
+ usable both for TCP and QUIC connections.
+
+ May be used in the following contexts: tcp, http
+
+ May be used in sections : defaults | frontend | listen | backend
+ yes | yes | yes | no
+
+ Arguments :
+ <timeout> is the timeout value specified in milliseconds by default, but
+ can be in any other unit if the number is suffixed by the unit,
+ as explained at the top of this document.
+
+ If this handshake timeout is not set, this is the client timeout which is used
+ in place.
+
+
+timeout connect <timeout>
+ Set the maximum time to wait for a connection attempt to a server to succeed.
+
+ May be used in the following contexts: tcp, http, log
+
+ May be used in sections : defaults | frontend | listen | backend
+ yes | no | yes | yes
+
+ Arguments :
+ <timeout> is the timeout value specified in milliseconds by default, but
+ can be in any other unit if the number is suffixed by the unit,
+ as explained at the top of this document.
+
+ If the server is located on the same LAN as HAProxy, the connection should be
+ immediate (less than a few milliseconds). Anyway, it is a good practice to
+ cover one or several TCP packet losses by specifying timeouts that are
+ slightly above multiples of 3 seconds (e.g. 4 or 5 seconds). By default, the
+ connect timeout also presets both queue and tarpit timeouts to the same value
+ if these have not been specified.
+
+ This parameter is specific to backends, but can be specified once for all in
+ "defaults" sections. This is in fact one of the easiest solutions not to
+ forget about it. An unspecified timeout results in an infinite timeout, which
+ is not recommended. Such a usage is accepted and works but reports a warning
+ during startup because it may result in accumulation of failed sessions in
+ the system if the system's timeouts are not configured either.
+
+ See also: "timeout check", "timeout queue", "timeout server", "timeout tarpit".
+
+
+timeout http-keep-alive <timeout>
+ Set the maximum allowed time to wait for a new HTTP request to appear
+
+ May be used in the following contexts: http
+
+ May be used in sections : defaults | frontend | listen | backend
+ yes | yes | yes | yes
+
+ Arguments :
+ <timeout> is the timeout value specified in milliseconds by default, but
+ can be in any other unit if the number is suffixed by the unit,
+ as explained at the top of this document.
+
+ By default, the time to wait for a new request in case of keep-alive is set
+ by "timeout http-request". However this is not always convenient because some
+ people want very short keep-alive timeouts in order to release connections
+ faster, and others prefer to have larger ones but still have short timeouts
+ once the request has started to present itself.
+
+ The "http-keep-alive" timeout covers these needs. It will define how long to
+ wait for a new HTTP request to start coming after a response was sent. Once
+ the first byte of request has been seen, the "http-request" timeout is used
+ to wait for the complete request to come. Note that empty lines prior to a
+ new request do not refresh the timeout and are not counted as a new request.
+
+ There is also another difference between the two timeouts : when a connection
+ expires during timeout http-keep-alive, no error is returned, the connection
+ just closes. If the connection expires in "http-request" while waiting for a
+ connection to complete, a HTTP 408 error is returned.
+
+ In general it is optimal to set this value to a few tens to hundreds of
+ milliseconds, to allow users to fetch all objects of a page at once but
+ without waiting for further clicks. Also, if set to a very small value (e.g.
+ 1 millisecond) it will probably only accept pipelined requests but not the
+ non-pipelined ones. It may be a nice trade-off for very large sites running
+ with tens to hundreds of thousands of clients.
+
+ If this parameter is not set, the "http-request" timeout applies, and if both
+ are not set, "timeout client" still applies at the lower level. It should be
+ set in the frontend to take effect, unless the frontend is in TCP mode, in
+ which case the HTTP backend's timeout will be used.
+
+ See also : "timeout http-request", "timeout client".
+
+
+timeout http-request <timeout>
+ Set the maximum allowed time to wait for a complete HTTP request
+
+ May be used in the following contexts: http
+
+ May be used in sections : defaults | frontend | listen | backend
+ yes | yes | yes | yes
+
+ Arguments :
+ <timeout> is the timeout value specified in milliseconds by default, but
+ can be in any other unit if the number is suffixed by the unit,
+ as explained at the top of this document.
+
+ In order to offer DoS protection, it may be required to lower the maximum
+ accepted time to receive a complete HTTP request without affecting the client
+ timeout. This helps protecting against established connections on which
+ nothing is sent. The client timeout cannot offer a good protection against
+ this abuse because it is an inactivity timeout, which means that if the
+ attacker sends one character every now and then, the timeout will not
+ trigger. With the HTTP request timeout, no matter what speed the client
+ types, the request will be aborted if it does not complete in time. When the
+ timeout expires, an HTTP 408 response is sent to the client to inform it
+ about the problem, and the connection is closed. The logs will report
+ termination codes "cR". Some recent browsers are having problems with this
+ standard, well-documented behavior, so it might be needed to hide the 408
+ code using "option http-ignore-probes" or "errorfile 408 /dev/null". See
+ more details in the explanations of the "cR" termination code in section 8.5.
+
+ By default, this timeout only applies to the header part of the request,
+ and not to any data. As soon as the empty line is received, this timeout is
+ not used anymore. When combined with "option http-buffer-request", this
+ timeout also applies to the body of the request..
+ It is used again on keep-alive connections to wait for a second
+ request if "timeout http-keep-alive" is not set.
+
+ Generally it is enough to set it to a few seconds, as most clients send the
+ full request immediately upon connection. Add 3 or more seconds to cover TCP
+ retransmits but that's all. Setting it to very low values (e.g. 50 ms) will
+ generally work on local networks as long as there are no packet losses. This
+ will prevent people from sending bare HTTP requests using telnet.
+
+ If this parameter is not set, the client timeout still applies between each
+ chunk of the incoming request. It should be set in the frontend to take
+ effect, unless the frontend is in TCP mode, in which case the HTTP backend's
+ timeout will be used.
+
+ See also : "errorfile", "http-ignore-probes", "timeout http-keep-alive", and
+ "timeout client", "option http-buffer-request".
+
+
+timeout queue <timeout>
+ Set the maximum time to wait in the queue for a connection slot to be free
+
+ May be used in the following contexts: tcp, http
+
+ May be used in sections : defaults | frontend | listen | backend
+ yes | no | yes | yes
+
+ Arguments :
+ <timeout> is the timeout value specified in milliseconds by default, but
+ can be in any other unit if the number is suffixed by the unit,
+ as explained at the top of this document.
+
+ When a server's maxconn is reached, connections are left pending in a queue
+ which may be server-specific or global to the backend. In order not to wait
+ indefinitely, a timeout is applied to requests pending in the queue. If the
+ timeout is reached, it is considered that the request will almost never be
+ served, so it is dropped and a 503 error is returned to the client.
+
+ The "timeout queue" statement allows to fix the maximum time for a request to
+ be left pending in a queue. If unspecified, the same value as the backend's
+ connection timeout ("timeout connect") is used, for backwards compatibility
+ with older versions with no "timeout queue" parameter.
+
+ See also : "timeout connect".
+
+
+timeout server <timeout>
+ Set the maximum inactivity time on the server side.
+
+ May be used in the following contexts: tcp, http, log
+
+ May be used in sections : defaults | frontend | listen | backend
+ yes | no | yes | yes
+
+ Arguments :
+ <timeout> is the timeout value specified in milliseconds by default, but
+ can be in any other unit if the number is suffixed by the unit,
+ as explained at the top of this document.
+
+ The inactivity timeout applies when the server is expected to acknowledge or
+ send data. In HTTP mode, this timeout is particularly important to consider
+ during the first phase of the server's response, when it has to send the
+ headers, as it directly represents the server's processing time for the
+ request. To find out what value to put there, it's often good to start with
+ what would be considered as unacceptable response times, then check the logs
+ to observe the response time distribution, and adjust the value accordingly.
+
+ The value is specified in milliseconds by default, but can be in any other
+ unit if the number is suffixed by the unit, as specified at the top of this
+ document. In TCP mode (and to a lesser extent, in HTTP mode), it is highly
+ recommended that the client timeout remains equal to the server timeout in
+ order to avoid complex situations to debug. Whatever the expected server
+ response times, it is a good practice to cover at least one or several TCP
+ packet losses by specifying timeouts that are slightly above multiples of 3
+ seconds (e.g. 4 or 5 seconds minimum). If some long-lived streams are mixed
+ with short-lived streams (e.g. WebSocket and HTTP), it's worth considering
+ "timeout tunnel", which overrides "timeout client" and "timeout server" for
+ tunnels.
+
+ This parameter is specific to backends, but can be specified once for all in
+ "defaults" sections. This is in fact one of the easiest solutions not to
+ forget about it. An unspecified timeout results in an infinite timeout, which
+ is not recommended. Such a usage is accepted and works but reports a warning
+ during startup because it may result in accumulation of expired sessions in
+ the system if the system's timeouts are not configured either.
+
+ See also : "timeout client" and "timeout tunnel".
+
+
+timeout server-fin <timeout>
+ Set the inactivity timeout on the server side for half-closed connections.
+
+ May be used in the following contexts: tcp, http, log
+
+ May be used in sections : defaults | frontend | listen | backend
+ yes | no | yes | yes
+
+ Arguments :
+ <timeout> is the timeout value specified in milliseconds by default, but
+ can be in any other unit if the number is suffixed by the unit,
+ as explained at the top of this document.
+
+ The inactivity timeout applies when the server is expected to acknowledge or
+ send data while one direction is already shut down. This timeout is different
+ from "timeout server" in that it only applies to connections which are closed
+ in one direction. This is particularly useful to avoid keeping connections in
+ FIN_WAIT state for too long when a remote server does not disconnect cleanly.
+ This problem is particularly common long connections such as RDP or WebSocket.
+ Note that this timeout can override "timeout tunnel" when a connection shuts
+ down in one direction. This setting was provided for completeness, but in most
+ situations, it should not be needed.
+
+ This parameter is specific to backends, but can be specified once for all in
+ "defaults" sections. By default it is not set, so half-closed connections
+ will use the other timeouts (timeout.server or timeout.tunnel).
+
+ See also : "timeout client-fin", "timeout server", and "timeout tunnel".
+
+
+timeout tarpit <timeout>
+ Set the duration for which tarpitted connections will be maintained
+
+ May be used in the following contexts: http
+
+ May be used in sections : defaults | frontend | listen | backend
+ yes | yes | yes | yes
+
+ Arguments :
+ <timeout> is the tarpit duration specified in milliseconds by default, but
+ can be in any other unit if the number is suffixed by the unit,
+ as explained at the top of this document.
+
+ When a connection is tarpitted using "http-request tarpit", it is maintained
+ open with no activity for a certain amount of time, then closed. "timeout
+ tarpit" defines how long it will be maintained open.
+
+ The value is specified in milliseconds by default, but can be in any other
+ unit if the number is suffixed by the unit, as specified at the top of this
+ document. If unspecified, the same value as the backend's connection timeout
+ ("timeout connect") is used, for backwards compatibility with older versions
+ with no "timeout tarpit" parameter.
+
+ See also : "timeout connect".
+
+
+timeout tunnel <timeout>
+ Set the maximum inactivity time on the client and server side for tunnels.
+
+ May be used in the following contexts: tcp, http
+
+ May be used in sections : defaults | frontend | listen | backend
+ yes | no | yes | yes
+
+ Arguments :
+ <timeout> is the timeout value specified in milliseconds by default, but
+ can be in any other unit if the number is suffixed by the unit,
+ as explained at the top of this document.
+
+ The tunnel timeout applies when a bidirectional connection is established
+ between a client and a server, and the connection remains inactive in both
+ directions. This timeout supersedes both the client and server timeouts once
+ the connection becomes a tunnel. In TCP, this timeout is used as soon as no
+ analyzer remains attached to either connection (e.g. tcp content rules are
+ accepted). In HTTP, this timeout is used when a connection is upgraded (e.g.
+ when switching to the WebSocket protocol, or forwarding a CONNECT request
+ to a proxy), or after the first response when no keepalive/close option is
+ specified.
+
+ Since this timeout is usually used in conjunction with long-lived connections,
+ it usually is a good idea to also set "timeout client-fin" to handle the
+ situation where a client suddenly disappears from the net and does not
+ acknowledge a close, or sends a shutdown and does not acknowledge pending
+ data anymore. This can happen in lossy networks where firewalls are present,
+ and is detected by the presence of large amounts of sessions in a FIN_WAIT
+ state.
+
+ The value is specified in milliseconds by default, but can be in any other
+ unit if the number is suffixed by the unit, as specified at the top of this
+ document. Whatever the expected normal idle time, it is a good practice to
+ cover at least one or several TCP packet losses by specifying timeouts that
+ are slightly above multiples of 3 seconds (e.g. 4 or 5 seconds minimum).
+
+ This parameter is specific to backends, but can be specified once for all in
+ "defaults" sections. This is in fact one of the easiest solutions not to
+ forget about it.
+
+ Example :
+ defaults http
+ option http-server-close
+ timeout connect 5s
+ timeout client 30s
+ timeout client-fin 30s
+ timeout server 30s
+ timeout tunnel 1h # timeout to use with WebSocket and CONNECT
+
+ See also : "timeout client", "timeout client-fin", "timeout server".
+
+
+transparent (deprecated)
+ Enable client-side transparent proxying
+
+ May be used in the following contexts: tcp, http
+
+ May be used in sections : defaults | frontend | listen | backend
+ yes | no | yes | yes
+
+ Arguments : none
+
+ This keyword was introduced in order to provide layer 7 persistence to layer
+ 3 load balancers. The idea is to use the OS's ability to redirect an incoming
+ connection for a remote address to a local process (here HAProxy), and let
+ this process know what address was initially requested. When this option is
+ used, sessions without cookies will be forwarded to the original destination
+ IP address of the incoming request (which should match that of another
+ equipment), while requests with cookies will still be forwarded to the
+ appropriate server.
+
+ The "transparent" keyword is deprecated, use "option transparent" instead.
+
+ Note that contrary to a common belief, this option does NOT make HAProxy
+ present the client's IP to the server when establishing the connection.
+
+ See also: "option transparent"
+
+unique-id-format <string>
+ Generate a unique ID for each request.
+
+ May be used in the following contexts: tcp, http
+
+ May be used in sections : defaults | frontend | listen | backend
+ yes | yes | yes | no
+
+ Arguments :
+ <string> is a log-format string.
+
+ This keyword creates a ID for each request using the custom log format. A
+ unique ID is useful to trace a request passing through many components of
+ a complex infrastructure. The newly created ID may also be logged using the
+ %ID tag the log-format string.
+
+ The format should be composed from elements that are guaranteed to be
+ unique when combined together. For instance, if multiple HAProxy instances
+ are involved, it might be important to include the node name. It is often
+ needed to log the incoming connection's source and destination addresses
+ and ports. Note that since multiple requests may be performed over the same
+ connection, including a request counter may help differentiate them.
+ Similarly, a timestamp may protect against a rollover of the counter.
+ Logging the process ID will avoid collisions after a service restart.
+
+ It is recommended to use hexadecimal notation for many fields since it
+ makes them more compact and saves space in logs.
+
+ Example:
+
+ unique-id-format %{+X}o\ %ci:%cp_%fi:%fp_%Ts_%rt:%pid
+
+ will generate:
+
+ 7F000001:8296_7F00001E:1F90_4F7B0A69_0003:790A
+
+ See also: "unique-id-header"
+
+unique-id-header <name>
+ Add a unique ID header in the HTTP request.
+
+ May be used in the following contexts: http
+
+ May be used in sections : defaults | frontend | listen | backend
+ yes | yes | yes | no
+
+ Arguments :
+ <name> is the name of the header.
+
+ Add a unique-id header in the HTTP request sent to the server, using the
+ unique-id-format. It can't work if the unique-id-format doesn't exist.
+
+ Example:
+
+ unique-id-format %{+X}o\ %ci:%cp_%fi:%fp_%Ts_%rt:%pid
+ unique-id-header X-Unique-ID
+
+ will generate:
+
+ X-Unique-ID: 7F000001:8296_7F00001E:1F90_4F7B0A69_0003:790A
+
+ See also: "unique-id-format"
+
+use_backend <backend> [{if | unless} <condition>]
+ Switch to a specific backend if/unless an ACL-based condition is matched.
+
+ May be used in the following contexts: tcp, http
+
+ May be used in sections : defaults | frontend | listen | backend
+ no | yes | yes | no
+
+ Arguments :
+ <backend> is the name of a valid backend or "listen" section, or a
+ "log-format" string resolving to a backend name.
+
+ <condition> is a condition composed of ACLs, as described in section 7. If
+ it is omitted, the rule is unconditionally applied.
+
+ When doing content-switching, connections arrive on a frontend and are then
+ dispatched to various backends depending on a number of conditions. The
+ relation between the conditions and the backends is described with the
+ "use_backend" keyword. While it is normally used with HTTP processing, it can
+ also be used in pure TCP, either without content using stateless ACLs (e.g.
+ source address validation) or combined with a "tcp-request" rule to wait for
+ some payload.
+
+ There may be as many "use_backend" rules as desired. All of these rules are
+ evaluated in their declaration order, and the first one which matches will
+ assign the backend.
+
+ In the first form, the backend will be used if the condition is met. In the
+ second form, the backend will be used if the condition is not met. If no
+ condition is valid, the backend defined with "default_backend" will be used.
+ If no default backend is defined, either the servers in the same section are
+ used (in case of a "listen" section) or, in case of a frontend, no server is
+ used and a 503 service unavailable response is returned.
+
+ Note that it is possible to switch from a TCP frontend to an HTTP backend. In
+ this case, either the frontend has already checked that the protocol is HTTP,
+ and backend processing will immediately follow, or the backend will wait for
+ a complete HTTP request to get in. This feature is useful when a frontend
+ must decode several protocols on a unique port, one of them being HTTP.
+
+ When <backend> is a simple name, it is resolved at configuration time, and an
+ error is reported if the specified backend does not exist. If <backend> is
+ a log-format string instead, no check may be done at configuration time, so
+ the backend name is resolved dynamically at run time. If the resulting
+ backend name does not correspond to any valid backend, no other rule is
+ evaluated, and the default_backend directive is applied instead. Note that
+ when using dynamic backend names, it is highly recommended to use a prefix
+ that no other backend uses in order to ensure that an unauthorized backend
+ cannot be forced from the request.
+
+ It is worth mentioning that "use_backend" rules with an explicit name are
+ used to detect the association between frontends and backends to compute the
+ backend's "fullconn" setting. This cannot be done for dynamic names.
+
+ See also: "default_backend", "tcp-request", "fullconn", "log-format", and
+ section 7 about ACLs.
+
+use-fcgi-app <name>
+ Defines the FastCGI application to use for the backend.
+
+ May be used in the following contexts: tcp, http
+
+ May be used in sections : defaults | frontend | listen | backend
+ no | no | yes | yes
+
+ Arguments :
+ <name> is the name of the FastCGI application to use.
+
+ See section 10.1 about FastCGI application setup for details.
+
+use-server <server> if <condition>
+use-server <server> unless <condition>
+ Only use a specific server if/unless an ACL-based condition is matched.
+
+ May be used in the following contexts: tcp, http
+
+ May be used in sections : defaults | frontend | listen | backend
+ no | no | yes | yes
+
+ Arguments :
+ <server> is the name of a valid server in the same backend section
+ or a "log-format" string resolving to a server name.
+
+ <condition> is a condition composed of ACLs, as described in section 7.
+
+ By default, connections which arrive to a backend are load-balanced across
+ the available servers according to the configured algorithm, unless a
+ persistence mechanism such as a cookie is used and found in the request.
+
+ Sometimes it is desirable to forward a particular request to a specific
+ server without having to declare a dedicated backend for this server. This
+ can be achieved using the "use-server" rules. These rules are evaluated after
+ the "redirect" rules and before evaluating cookies, and they have precedence
+ on them. There may be as many "use-server" rules as desired. All of these
+ rules are evaluated in their declaration order, and the first one which
+ matches will assign the server.
+
+ If a rule designates a server which is down, and "option persist" is not used
+ and no force-persist rule was validated, it is ignored and evaluation goes on
+ with the next rules until one matches.
+
+ In the first form, the server will be used if the condition is met. In the
+ second form, the server will be used if the condition is not met. If no
+ condition is valid, the processing continues and the server will be assigned
+ according to other persistence mechanisms.
+
+ Note that even if a rule is matched, cookie processing is still performed but
+ does not assign the server. This allows prefixed cookies to have their prefix
+ stripped.
+
+ The "use-server" statement works both in HTTP and TCP mode. This makes it
+ suitable for use with content-based inspection. For instance, a server could
+ be selected in a farm according to the TLS SNI field when using protocols with
+ implicit TLS (also see "req.ssl_sni"). And if these servers have their weight
+ set to zero, they will not be used for other traffic.
+
+ Example :
+ # intercept incoming TLS requests based on the SNI field
+ use-server www if { req.ssl_sni -i www.example.com }
+ server www 192.168.0.1:443 weight 0
+ use-server mail if { req.ssl_sni -i mail.example.com }
+ server mail 192.168.0.1:465 weight 0
+ use-server imap if { req.ssl_sni -i imap.example.com }
+ server imap 192.168.0.1:993 weight 0
+ # all the rest is forwarded to this server
+ server default 192.168.0.2:443 check
+
+ When <server> is a simple name, it is checked against existing servers in the
+ configuration and an error is reported if the specified server does not exist.
+ If it is a log-format, no check is performed when parsing the configuration,
+ and if we can't resolve a valid server name at runtime but the use-server rule
+ was conditioned by an ACL returning true, no other use-server rule is applied
+ and we fall back to load balancing.
+
+ See also: "use_backend", section 5 about server and section 7 about ACLs.
+
+
+4.3. Actions keywords matrix
+----------------------------
+
+Several rule sets are evaluated at various stages of the request or response
+processing, and for each rule found in these rule sets, an action may be
+executed if the optional condition is met.
+
+A large number of actions are provided by default, they can modify contents,
+accept/block processing, change internal states etc. And it is possible to
+define new actions in Lua (in which case their names will always be prefixed
+with "lua.").
+
+While historically some actions did only exist in specific rule sets, nowadays
+many actions are usable with many rule sets. The listing in this section will
+indicate for which supported action where it may be used, by ticking the
+corresponding abbreviated entry names among the following rule sets:
+
+ - TCP RqCon: the action is valid for "tcp-request connection" rules
+ - TCP RqSes: the action is valid for "tcp-request session" rules
+ - TCP RqCnt: the action is valid for "tcp-request content" rules
+ - TCP RsCnt: the action is valid for "tcp-response content" rules
+ - HTTP Req: the action is valid for "http-request" rules
+ - HTTP Res: the action is valid for "http-response" rules
+ - HTTP Aft: the action is valid for "http-after-response" rules
+
+The same abbreviations are used in the reference section 4.4 below.
+
+
+ keyword TCP: RqCon RqSes RqCnt RsCnt HTTP: Req Res Aft
+----------------------+-----------+-----+-----+------+----------+---+----
+accept X X X X - - -
+add-acl - - - - X X -
+add-header - - - - X X X
+allow - - - - X X X
+attach-srv - X - - - - -
+auth - - - - X - -
+cache-store - - - - - X -
+cache-use - - - - X - -
+capture - - X - X X X
+close - - - X - - -
+del-acl - - - - X X -
+del-header - - - - X X X
+del-map - - - - X X X
+deny - - - - X X -
+disable-l7-retry - - - - X - -
+do-resolve - - X - X - -
+early-hint - - - - X - -
+expect-netscaler-cip X - - - - - -
+expect-proxy layer4 X - - - - - -
+normalize-uri - - - - X - -
+redirect - - - - X X -
+reject X X X X X - -
+replace-header - - - - X X X
+replace-path - - - - X - -
+replace-pathq - - - - X - -
+replace-uri - - - - X - -
+replace-value - - - - X X X
+return - - - - X X -
+sc-add-gpc X X X X X X X
+--keyword---------------TCP--RqCon-RqSes-RqCnt-RsCnt---HTTP--Req-Res-Aft-
+sc-inc-gpc X X X X X X X
+sc-inc-gpc0 X X X X X X X
+sc-inc-gpc1 X X X X X X X
+sc-set-gpt X X X X X X X
+sc-set-gpt0 X X X X X X X
+send-spoe-group - - X X X X -
+set-bandwidth-limit - - X X X X -
+set-dst X X X - X - -
+set-dst-port X X X - X - -
+set-header - - - - X X X
+set-log-level - - X X X X X
+set-map - - - - X X X
+set-mark X X X X X X -
+set-method - - - - X - -
+set-nice - - X X X X -
+set-path - - - - X - -
+set-pathq - - - - X - -
+set-priority-class - - X - X - -
+set-priority-offset - - X - X - -
+--keyword---------------TCP--RqCon-RqSes-RqCnt-RsCnt---HTTP--Req-Res-Aft-
+set-query - - - - X - -
+set-src X X X - X - -
+set-src-port X X X - X - -
+set-status - - - - - X X
+set-timeout - - - - X X -
+set-tos X X X X X X -
+set-uri - - - - X - -
+set-var X X X X X X X
+set-var-fmt X X X X X X X
+silent-drop X X X X X X -
+strict-mode - - - - X X X
+switch-mode - - X - - - -
+tarpit - - - - X - -
+track-sc1 X X X - X X -
+track-sc2 X X X - X X -
+unset-var X X X X X X X
+use-service - - X - X - -
+wait-for-body - - - - X X -
+wait-for-handshake - - - - X - -
+--keyword---------------TCP--RqCon-RqSes-RqCnt-RsCnt---HTTP--Req-Res-Aft-
+
+
+4.4. Alphabetically sorted actions reference
+--------------------------------------------
+
+This section provides a detailed description of each action and its usage,
+using the same ruleset terminology marking as described in section 4.3 above.
+
+
+accept
+ Usable in: TCP RqCon| RqSes| RqCnt| RsCnt | HTTP Req| Res| Aft
+ X | X | X | X | - | - | -
+
+ This stops the evaluation of the rules and lets the request or response pass
+ the check. This action is final, i.e. no further rules from the same rule set
+ are evaluated for the current section. There is no difference between this
+ and the "allow" action except that for historical compatibility, "accept" is
+ used for TCP rules and "allow" for HTTP rules. See also the "allow" action
+ below.
+
+
+add-acl(<file-name>) <key fmt>
+ Usable in: TCP RqCon| RqSes| RqCnt| RsCnt| HTTP Req| Res| Aft
+ - | - | - | - | X | X | -
+
+ This is used to add a new entry into an ACL. The ACL must be loaded from a
+ file (even a dummy empty file). The file name of the ACL to be updated is
+ passed between parentheses. It takes one argument: <key fmt>, which follows
+ log-format rules, to collect content of the new entry. It performs a lookup
+ in the ACL before insertion, to avoid duplicated (or more) values.
+ It is the equivalent of the "add acl" command from the stats socket, but can
+ be triggered by an HTTP request.
+
+
+add-header <name> <fmt>
+ Usable in: TCP RqCon| RqSes| RqCnt| RsCnt| HTTP Req| Res| Aft
+ - | - | - | - | X | X | X
+
+ This appends an HTTP header field whose name is specified in <name> and
+ whose value is defined by <fmt> which follows the log-format rules (see
+ Custom Log Format in section 8.2.4). This is particularly useful to pass
+ connection-specific information to the server (e.g. the client's SSL
+ certificate), or to combine several headers into one. This rule is not
+ final, so it is possible to add other similar rules. Note that header
+ addition is performed immediately, so one rule might reuse the resulting
+ header from a previous rule.
+
+
+allow
+ Usable in: TCP RqCon| RqSes| RqCnt| RsCnt| HTTP Req| Res| Aft
+ - | - | - | - | X | X | X
+
+ This stops the evaluation of the rules and lets the request pass the check.
+ This action is final, i.e. no further rules from the same rule set are
+ evaluated for the current section. There is no difference between this and
+ the "accept" action except that for historical compatibility, "accept" is
+ used for TCP rules and "allow" for HTTP rules. See also the "accept" action
+ above.
+
+
+attach-srv <srv> [name <expr>] [ EXPERIMENTAL ]
+ Usable in: TCP RqCon| RqSes| RqCnt| RsCnt| HTTP Req| Res| Aft
+ - | X | - | - | - | - | -
+
+ This is used to intercept the connection after proper HTTP/2 establishment.
+ The connection is reversed to the backend side and inserted into the idle
+ pool of server <srv>. This may only be used with servers having an 'rhttp@'
+ address.
+
+ An extra parameter <expr> can be specified. Its value is interpreted as a
+ sample expression to name the connection inside the server idle pool. When
+ routing an outgoing request through this server, this name will be matched
+ against the 'sni' parameter of the server line. Otherwise, the connection
+ will have no name and will only match requests without SNI.
+
+ This rule is only valid for frontend in HTTP mode. Also all listeners must
+ not require a protocol different from HTTP/2.
+
+ Reverse HTTP is currently still in active development. Configuration
+ mechanism may change in the future. For this reason it is internally marked
+ as experimental, meaning that "expose-experimental-directives" must appear on
+ a line before this directive.
+
+auth [realm <realm>]
+ Usable in: TCP RqCon| RqSes| RqCnt| RsCnt| HTTP Req| Res| Aft
+ - | - | - | - | X | - | -
+
+ This stops the evaluation of the rules and immediately responds with an
+ HTTP 401 or 407 error code to invite the user to present a valid user name
+ and password. No further "http-request" rules are evaluated. An optional
+ "realm" parameter is supported, it sets the authentication realm that is
+ returned with the response (typically the application's name).
+
+ The corresponding proxy's error message is used. It may be customized using
+ an "errorfile" or an "http-error" directive. For 401 responses, all
+ occurrences of the WWW-Authenticate header are removed and replaced by a new
+ one with a basic authentication challenge for realm "<realm>". For 407
+ responses, the same is done on the Proxy-Authenticate header. If the error
+ message must not be altered, consider to use "http-request return" rule
+ instead.
+
+ Example:
+ acl auth_ok http_auth_group(L1) G1
+ http-request auth unless auth_ok
+
+
+cache-store <name>
+ Usable in: TCP RqCon| RqSes| RqCnt| RsCnt| HTTP Req| Res| Aft
+ - | - | - | - | - | X | -
+
+ Store an http-response within the cache. The storage of the response headers
+ is done at this step, which means you can use others http-response actions
+ to modify headers before or after the storage of the response. This action
+ is responsible for the setup of the cache storage filter.
+
+ See section 6.2 about cache setup.
+
+
+cache-use <name>
+ Usable in: TCP RqCon| RqSes| RqCnt| RsCnt| HTTP Req| Res| Aft
+ - | - | - | - | X | - | -
+
+ Try to deliver a cached object from the cache <name>. This directive is also
+ mandatory to store the cache as it calculates the cache hash. If you want to
+ use a condition for both storage and delivering that's a good idea to put it
+ after this one.
+
+ See section 6.2 about cache setup.
+
+
+capture <sample> [ len <length> | id <id> ]
+ Usable in: TCP RqCon| RqSes| RqCnt| RsCnt| HTTP Req| Res| Aft
+ - | - | X | - | X | X | X
+
+ This captures sample expression <sample> from the request or response buffer,
+ and converts it to a string of at most <len> characters. The resulting string
+ is stored into the next "capture" slot (either request or reponse), so it
+ will possibly appear next to some captured HTTP headers. It will then
+ automatically appear in the logs, and it will be possible to extract it using
+ sample fetch methods to feed it into headers or anything. The length should
+ be limited given that this size will be allocated for each capture during the
+ whole stream life. Note that the length is only usable with "http-request"
+ rules. Please check section 7.3 (Fetching samples), "capture request header"
+ and "capture response header" for more information.
+
+ If the keyword "id" is used instead of "len", the action tries to store the
+ captured string in a previously declared capture slot. This is useful to run
+ captures in backends. The slot id can be declared by a previous directive
+ "http-request capture" or with the "declare capture" keyword.
+
+ When using this action in a backend, please double check that the relevant
+ frontend(s) have the required capture slots otherwise, this rule will be
+ ignored at run time. This can't be detected at configuration parsing time due
+ to HAProxy's ability to dynamically resolve backend name at runtime.
+
+
+close
+ Usable in: TCP RqCon| RqSes| RqCnt| RsCnt| HTTP Req| Res| Aft
+ - | - | - | X | - | - | -
+
+ This is used to immediately close the connection with the server. No further
+ "tcp-response content" rules are evaluated. The main purpose of this action
+ is to force a connection to be finished between a client and a server after
+ an exchange when the application protocol expects some long time outs to
+ elapse first. The goal is to eliminate idle connections which take
+ significant resources on servers with certain protocols.
+
+
+del-acl(<file-name>) <key fmt>
+ Usable in: TCP RqCon| RqSes| RqCnt| RsCnt| HTTP Req| Res| Aft
+ - | - | - | - | X | X | -
+
+ This is used to delete an entry from an ACL. The ACL must be loaded from a
+ file (even a dummy empty file). The file name of the ACL to be updated is
+ passed between parentheses. It takes one argument: <key fmt>, which follows
+ log-format rules, to collect content of the entry to delete.
+ It is the equivalent of the "del acl" command from the stats socket, but can
+ be triggered by an HTTP request or response.
+
+
+del-header <name> [ -m <meth> ]
+ Usable in: TCP RqCon| RqSes| RqCnt| RsCnt| HTTP Req| Res| Aft
+ - | - | - | - | X | X | X
+
+ This removes all HTTP header fields whose name is specified in <name>. <meth>
+ is the matching method, applied on the header name. Supported matching methods
+ are "str" (exact match), "beg" (prefix match), "end" (suffix match), "sub"
+ (substring match) and "reg" (regex match). If not specified, exact matching
+ method is used.
+
+
+del-map(<file-name>) <key fmt>
+ Usable in: TCP RqCon| RqSes| RqCnt| RsCnt| HTTP Req| Res| Aft
+ - | - | - | - | X | X | X
+
+ This is used to delete an entry from a MAP. The MAP must be loaded from a
+ file (even a dummy empty file). The file name of the MAP to be updated is
+ passed between parentheses. It takes one argument: <key fmt>, which follows
+ log-format rules, to collect content of the entry to delete.
+ It takes one argument: "file name" It is the equivalent of the "del map"
+ command from the stats socket, but can be triggered by an HTTP request or
+ response.
+
+
+deny [ { status | deny_status } <code> ] [ content-type <type> ]
+ [ { default-errorfiles | errorfile <file> | errorfiles <name> |
+ file <file> | lf-file <file> | string <str> | lf-string <fmt> } ]
+ [ hdr <name> <fmt> ]*
+ Usable in: TCP RqCon| RqSes| RqCnt| RsCnt| HTTP Req| Res| Aft
+ - | - | - | - | X | X | -
+
+ This stops the evaluation of the rules and immediately rejects the request or
+ response. By default an HTTP 403 error is returned for requests, and 502 for
+ responses, but the returned response may be customized using same syntax as
+ for the "return" action. Thus, see "return" below for details. For
+ compatibility purposes, when no argument is defined, or only "deny_status",
+ the argument "default-errorfiles" is implied. It means "deny [deny_status
+ <status>]" is an alias of "deny [status <status>] default-errorfiles". This
+ action is final, i.e. no further rules from the same rule set are evaluated
+ for the current section. See also the "return" action for the advanced
+ syntax.
+
+
+disable-l7-retry
+ Usable in: TCP RqCon| RqSes| RqCnt| RsCnt| HTTP Req| Res| Aft
+ - | - | - | - | X | - | -
+
+ This disables any attempt to retry the request if it fails for any other
+ reason than a connection failure. This can be useful for example to make
+ sure POST requests aren't retried on failure.
+
+
+do-resolve(<var>,<resolvers>,[ipv4,ipv6]) <expr>
+ Usable in: TCP RqCon| RqSes| RqCnt| RsCnt| HTTP Req| Res| Aft
+ - | - | X | - | X | - | -
+
+ This action performs a DNS resolution of the output of <expr> and stores
+ the result in the variable <var>. It uses the DNS resolvers section
+ pointed by <resolvers>.
+ It is possible to choose a resolution preference using the optional
+ arguments 'ipv4' or 'ipv6'.
+ When performing the DNS resolution, the client side connection is on
+ pause waiting till the end of the resolution.
+ If an IP address can be found, it is stored into <var>. If any kind of
+ error occurs, then <var> is not set.
+ One can use this action to discover a server IP address at run time and
+ based on information found in the request (IE a Host header).
+ If this action is used to find the server's IP address (using the
+ "set-dst" action), then the server IP address in the backend must be set
+ to 0.0.0.0. The do-resolve action takes an host-only parameter, any port must
+ be removed from the string.
+
+ Example:
+ resolvers mydns
+ nameserver local 127.0.0.53:53
+ nameserver google 8.8.8.8:53
+ timeout retry 1s
+ hold valid 10s
+ hold nx 3s
+ hold other 3s
+ hold obsolete 0s
+ accepted_payload_size 8192
+
+ frontend fe
+ bind 10.42.0.1:80
+ http-request do-resolve(txn.myip,mydns,ipv4) hdr(Host),host_only
+ http-request capture var(txn.myip) len 40
+
+ # return 503 when the variable is not set,
+ # which mean DNS resolution error
+ use_backend b_503 unless { var(txn.myip) -m found }
+
+ default_backend be
+
+ backend b_503
+ # dummy backend used to return 503.
+ # one can use the errorfile directive to send a nice
+ # 503 error page to end users
+
+ backend be
+ # rule to prevent HAProxy from reconnecting to services
+ # on the local network (forged DNS name used to scan the network)
+ http-request deny if { var(txn.myip) -m ip 127.0.0.0/8 10.0.0.0/8 }
+ http-request set-dst var(txn.myip)
+ server clear 0.0.0.0:0
+
+ NOTE: Don't forget to set the "protection" rules to ensure HAProxy won't
+ be used to scan the network or worst won't loop over itself...
+
+
+early-hint <name> <fmt>
+ Usable in: TCP RqCon| RqSes| RqCnt| RsCnt| HTTP Req| Res| Aft
+ - | - | - | - | X | - | -
+
+ This is used to build an HTTP 103 Early Hints response prior to any other one.
+ This appends an HTTP header field to this response whose name is specified in
+ <name> and whose value is defined by <fmt> which follows the log-format rules
+ (see Custom Log Format in section 8.2.4). This is particularly useful to pass
+ to the client some Link headers to preload resources required to render the
+ HTML documents.
+
+ See RFC 8297 for more information.
+
+
+expect-netscaler-cip layer4
+ Usable in: TCP RqCon| RqSes| RqCnt| RsCnt| HTTP Req| Res| Aft
+ X | - | - | - | - | - | -
+
+ This configures the client-facing connection to receive a NetScaler Client IP
+ insertion protocol header before any byte is read from the socket. This is
+ equivalent to having the "accept-netscaler-cip" keyword on the "bind" line,
+ except that using the TCP rule allows the PROXY protocol to be accepted only
+ for certain IP address ranges using an ACL. This is convenient when multiple
+ layers of load balancers are passed through by traffic coming from public
+ hosts.
+
+
+expect-proxy layer4
+ Usable in: TCP RqCon| RqSes| RqCnt| RsCnt| HTTP Req| Res| Aft
+ X | - | - | - | - | - | -
+
+ This configures the client-facing connection to receive a PROXY protocol
+ header before any byte is read from the socket. This is equivalent to having
+ the "accept-proxy" keyword on the "bind" line, except that using the TCP rule
+ allows the PROXY protocol to be accepted only for certain IP address ranges
+ using an ACL. This is convenient when multiple layers of load balancers are
+ passed through by traffic coming from public hosts.
+
+
+normalize-uri <normalizer>
+normalize-uri fragment-encode
+normalize-uri fragment-strip
+normalize-uri path-merge-slashes
+normalize-uri path-strip-dot
+normalize-uri path-strip-dotdot [ full ]
+normalize-uri percent-decode-unreserved [ strict ]
+normalize-uri percent-to-uppercase [ strict ]
+normalize-uri query-sort-by-name
+ Usable in: TCP RqCon| RqSes| RqCnt| RsCnt| HTTP Req| Res| Aft
+ - | - | - | - | X | - | -
+
+ Performs normalization of the request's URI.
+
+ URI normalization in HAProxy 2.4 is currently available as an experimental
+ technical preview. As such, it requires the global directive
+ 'expose-experimental-directives' first to be able to invoke it. You should be
+ prepared that the behavior of normalizers might change to fix possible
+ issues, possibly breaking proper request processing in your infrastructure.
+
+ Each normalizer handles a single type of normalization to allow for a
+ fine-grained selection of the level of normalization that is appropriate for
+ the supported backend.
+
+ As an example the "path-strip-dotdot" normalizer might be useful for a static
+ fileserver that directly maps the requested URI to the path within the local
+ filesystem. However it might break routing of an API that expects a specific
+ number of segments in the path.
+
+ It is important to note that some normalizers might result in unsafe
+ transformations for broken URIs. It might also be possible that a combination
+ of normalizers that are safe by themselves results in unsafe transformations
+ when improperly combined.
+
+ As an example the "percent-decode-unreserved" normalizer might result in
+ unexpected results when a broken URI includes bare percent characters. One
+ such a broken URI is "/%%36%36" which would be decoded to "/%66" which in
+ turn is equivalent to "/f". By specifying the "strict" option requests to
+ such a broken URI would safely be rejected.
+
+ The following normalizers are available:
+
+ - fragment-encode: Encodes "#" as "%23".
+
+ The "fragment-strip" normalizer should be preferred, unless it is known
+ that broken clients do not correctly encode '#' within the path component.
+
+ Example:
+ - /#foo -> /%23foo
+
+ - fragment-strip: Removes the URI's "fragment" component.
+
+ According to RFC 3986#3.5 the "fragment" component of an URI should not
+ be sent, but handled by the User Agent after retrieving a resource.
+
+ This normalizer should be applied first to ensure that the fragment is
+ not interpreted as part of the request's path component.
+
+ Example:
+ - /#foo -> /
+
+ - path-strip-dot: Removes "/./" segments within the "path" component
+ (RFC 3986#6.2.2.3).
+
+ Segments including percent encoded dots ("%2E") will not be detected. Use
+ the "percent-decode-unreserved" normalizer first if this is undesired.
+
+ Example:
+ - /. -> /
+ - /./bar/ -> /bar/
+ - /a/./a -> /a/a
+ - /.well-known/ -> /.well-known/ (no change)
+
+ - path-strip-dotdot: Normalizes "/../" segments within the "path" component
+ (RFC 3986#6.2.2.3).
+
+ This merges segments that attempt to access the parent directory with
+ their preceding segment.
+
+ Empty segments do not receive special treatment. Use the "merge-slashes"
+ normalizer first if this is undesired.
+
+ Segments including percent encoded dots ("%2E") will not be detected. Use
+ the "percent-decode-unreserved" normalizer first if this is undesired.
+
+ Example:
+ - /foo/../ -> /
+ - /foo/../bar/ -> /bar/
+ - /foo/bar/../ -> /foo/
+ - /../bar/ -> /../bar/
+ - /bar/../../ -> /../
+ - /foo//../ -> /foo/
+ - /foo/%2E%2E/ -> /foo/%2E%2E/
+
+ If the "full" option is specified then "../" at the beginning will be
+ removed as well:
+
+ Example:
+ - /../bar/ -> /bar/
+ - /bar/../../ -> /
+
+ - path-merge-slashes: Merges adjacent slashes within the "path" component
+ into a single slash.
+
+ Example:
+ - // -> /
+ - /foo//bar -> /foo/bar
+
+ - percent-decode-unreserved: Decodes unreserved percent encoded characters to
+ their representation as a regular character (RFC 3986#6.2.2.2).
+
+ The set of unreserved characters includes all letters, all digits, "-",
+ ".", "_", and "~".
+
+ Example:
+ - /%61dmin -> /admin
+ - /foo%3Fbar=baz -> /foo%3Fbar=baz (no change)
+ - /%%36%36 -> /%66 (unsafe)
+ - /%ZZ -> /%ZZ
+
+ If the "strict" option is specified then invalid sequences will result
+ in a HTTP 400 Bad Request being returned.
+
+ Example:
+ - /%%36%36 -> HTTP 400
+ - /%ZZ -> HTTP 400
+
+ - percent-to-uppercase: Uppercases letters within percent-encoded sequences
+ (RFC 3986#6.2.2.1).
+
+ Example:
+ - /%6f -> /%6F
+ - /%zz -> /%zz
+
+ If the "strict" option is specified then invalid sequences will result
+ in a HTTP 400 Bad Request being returned.
+
+ Example:
+ - /%zz -> HTTP 400
+
+ - query-sort-by-name: Sorts the query string parameters by parameter name.
+ Parameters are assumed to be delimited by '&'. Shorter names sort before
+ longer names and identical parameter names maintain their relative order.
+
+ Example:
+ - /?c=3&a=1&b=2 -> /?a=1&b=2&c=3
+ - /?aaa=3&a=1&aa=2 -> /?a=1&aa=2&aaa=3
+ - /?a=3&b=4&a=1&b=5&a=2 -> /?a=3&a=1&a=2&b=4&b=5
+
+
+redirect <rule>
+ Usable in: TCP RqCon| RqSes| RqCnt| RsCnt| HTTP Req| Res| Aft
+ - | - | - | - | X | X | -
+
+ This performs an HTTP redirection based on a redirect rule. This is exactly
+ the same as the "redirect" statement except that it inserts a redirect rule
+ which is processed in the middle of other "http-request" or "http-response"
+ rules and that these rules use the "log-format" strings. For responses, only
+ the "location" type of redirect is permitted. In addition, when a redirect is
+ performed during a response, the transfer from the server to HAProxy is
+ interrupted so that no payload can be forwarded to the client. This may cause
+ some connections to be closed on HTTP/1. This action is final, i.e. no
+ further rules from the same rule set are evaluated for the current section.
+ See the "redirect" keyword for the rule's syntax.
+
+
+reject
+ Usable in: TCP RqCon| RqSes| RqCnt| RsCnt| HTTP Req| Res| Aft
+ X | X | X | X | X | - | -
+
+ This stops the evaluation of the rules and immediately closes the connection
+ without sending any response. For HTTP rules, it acts similarly to the
+ "tcp-request content reject" rules. It can be useful to force an immediate
+ connection closure on HTTP/2 connections.
+
+ In "tcp-request connection" rules, rejected connections do not even become a
+ session, which is why they are accounted separately for in the stats, as
+ "denied connections". They are not considered for the session rate-limit and
+ are not logged either. The reason is that these rules should only be used to
+ filter extremely high connection rates such as the ones encountered during a
+ massive DDoS attack. Under these extreme conditions, the simple action of
+ logging each event would make the system collapse and would considerably
+ lower the filtering capacity. If logging is absolutely desired, then
+ "tcp-request content" rules should be used instead, as "tcp-request session"
+ rules will not log either.
+
+ When used in "tcp-response content" rules, the server connection will be
+ closed and the response aborted. This is generally used to prevent sensitive
+ information from leaking, typically after inspecting contents in conjunction
+ with the "wait-for-body" action.
+
+
+replace-header <name> <match-regex> <replace-fmt>
+ Usable in: TCP RqCon| RqSes| RqCnt| RsCnt| HTTP Req| Res| Aft
+ - | - | - | - | X | X | X
+
+ This matches the value of all occurrences of header field <name> against
+ <match-regex>. Matching is performed case-sensitively. Matching values are
+ completely replaced by <replace-fmt>. Format characters are allowed in
+ <replace-fmt> and work like <fmt> arguments in "http-request add-header".
+ Standard back-references using the backslash ('\') followed by a number are
+ supported.
+
+ This action acts on whole header lines, regardless of the number of values
+ they may contain. Thus it is well-suited to process headers naturally
+ containing commas in their value, such as If-Modified-Since or Set-Cookie.
+ Headers that contain a comma-separated list of values, such as Accept, or
+ Cache-Control should be processed using the "replace-value" action instead.
+ See also the "replace-value" action.
+
+ Example:
+ http-request replace-header Cookie foo=([^;]*);(.*) foo=\1;ip=%bi;\2
+
+ # applied to:
+ Cookie: foo=foobar; expires=Tue, 14-Jun-2016 01:40:45 GMT;
+
+ # outputs:
+ Cookie: foo=foobar;ip=192.168.1.20; expires=Tue, 14-Jun-2016 01:40:45 GMT;
+
+ # assuming the backend IP is 192.168.1.20
+
+ http-request replace-header User-Agent curl foo
+
+ # applied to:
+ User-Agent: curl/7.47.0
+
+ # outputs:
+ User-Agent: foo
+
+ Example:
+ http-response replace-header Set-Cookie (C=[^;]*);(.*) \1;ip=%bi;\2
+
+ # applied to:
+ Set-Cookie: C=1; expires=Tue, 14-Jun-2016 01:40:45 GMT
+
+ # outputs:
+ Set-Cookie: C=1;ip=192.168.1.20; expires=Tue, 14-Jun-2016 01:40:45 GMT
+
+ # assuming the backend IP is 192.168.1.20.
+
+
+replace-path <match-regex> <replace-fmt>
+ Usable in: TCP RqCon| RqSes| RqCnt| RsCnt| HTTP Req| Res| Aft
+ - | - | - | - | X | - | -
+
+ This works like "replace-header" except that it works on the request's path
+ component instead of a header. The path component starts at the first '/'
+ after an optional scheme+authority and ends before the question mark. Thus,
+ the replacement does not modify the scheme, the authority and the
+ query-string.
+
+ It is worth noting that regular expressions may be more expensive to evaluate
+ than certain ACLs, so rare replacements may benefit from a condition to avoid
+ performing the evaluation at all if it does not match.
+
+ Example:
+ # prefix /foo : turn /bar?q=1 into /foo/bar?q=1 :
+ http-request replace-path (.*) /foo\1
+
+ # strip /foo : turn /foo/bar?q=1 into /bar?q=1
+ http-request replace-path /foo/(.*) /\1
+ # or more efficient if only some requests match :
+ http-request replace-path /foo/(.*) /\1 if { url_beg /foo/ }
+
+
+replace-pathq <match-regex> <replace-fmt>
+ Usable in: TCP RqCon| RqSes| RqCnt| RsCnt| HTTP Req| Res| Aft
+ - | - | - | - | X | - | -
+
+ This does the same as "http-request replace-path" except that the path
+ contains the query-string if any is present. Thus, the path and the
+ query-string are replaced.
+
+ Example:
+ # suffix /foo : turn /bar?q=1 into /bar/foo?q=1 :
+ http-request replace-pathq ([^?]*)(\?(.*))? \1/foo\2
+
+
+replace-uri <match-regex> <replace-fmt>
+ Usable in: TCP RqCon| RqSes| RqCnt| RsCnt| HTTP Req| Res| Aft
+ - | - | - | - | X | - | -
+
+ This works like "replace-header" except that it works on the request's URI part
+ instead of a header. The URI part may contain an optional scheme, authority or
+ query string. These are considered to be part of the value that is matched
+ against.
+
+ It is worth noting that regular expressions may be more expensive to evaluate
+ than certain ACLs, so rare replacements may benefit from a condition to avoid
+ performing the evaluation at all if it does not match.
+
+ IMPORTANT NOTE: historically in HTTP/1.x, the vast majority of requests sent
+ by browsers use the "origin form", which differs from the "absolute form" in
+ that they do not contain a scheme nor authority in the URI portion. Mostly
+ only requests sent to proxies, those forged by hand and some emitted by
+ certain applications use the absolute form. As such, "replace-uri" usually
+ works fine most of the time in HTTP/1.x with rules starting with a "/". But
+ with HTTP/2, clients are encouraged to send absolute URIs only, which look
+ like the ones HTTP/1 clients use to talk to proxies. Such partial replace-uri
+ rules may then fail in HTTP/2 when they work in HTTP/1. Either the rules need
+ to be adapted to optionally match a scheme and authority, or replace-path
+ should be used.
+
+ Example:
+ # rewrite all "http" absolute requests to "https":
+ http-request replace-uri ^http://(.*) https://\1
+
+ # prefix /foo : turn /bar?q=1 into /foo/bar?q=1 :
+ http-request replace-uri ([^/:]*://[^/]*)?(.*) \1/foo\2
+
+
+replace-value <name> <match-regex> <replace-fmt>
+ Usable in: TCP RqCon| RqSes| RqCnt| RsCnt| HTTP Req| Res| Aft
+ - | - | - | - | X | X | X
+
+ This works like "replace-header" except that it matches the regex against
+ every comma-delimited value of the header field <name> instead of the
+ entire header. This is suited for all headers which are allowed to carry
+ more than one value. An example could be the Accept request header, or
+ Cache-Control for requests or responses.
+
+ Example:
+ http-request replace-value X-Forwarded-For ^192\.168\.(.*)$ 172.16.\1
+
+ # applied to:
+ X-Forwarded-For: 192.168.10.1, 192.168.13.24, 10.0.0.37
+
+ # outputs:
+ X-Forwarded-For: 172.16.10.1, 172.16.13.24, 10.0.0.37
+
+ Example:
+ http-after-response replace-value Cache-control ^public$ private
+
+ # applied to:
+ Cache-Control: max-age=3600, public
+
+ # outputs:
+ Cache-Control: max-age=3600, private
+
+
+return [ status <code> ] [ content-type <type> ]
+ [ { default-errorfiles | errorfile <file> | errorfiles <name> |
+ file <file> | lf-file <file> | string <str> | lf-string <fmt> } ]
+ [ hdr <name> <fmt> ]*
+ Usable in: TCP RqCon| RqSes| RqCnt| RsCnt| HTTP Req| Res| Aft
+ - | - | - | - | X | X | -
+
+ This stops the evaluation of the rules and immediately returns a response. The
+ default status code used for the response is 200. It can be optionally
+ specified as an arguments to "status". The response content-type may also be
+ specified as an argument to "content-type". Finally the response itself may
+ be defined. It can be a full HTTP response specifying the errorfile to use,
+ or the response payload specifying the file or the string to use. These rules
+ are followed to create the response :
+
+ * If neither the errorfile nor the payload to use is defined, a dummy
+ response is returned. Only the "status" argument is considered. It can be
+ any code in the range [200, 599]. The "content-type" argument, if any, is
+ ignored.
+
+ * If "default-errorfiles" argument is set, the proxy's errorfiles are
+ considered. If the "status" argument is defined, it must be one of the
+ status code handled by HAProxy (200, 400, 403, 404, 405, 408, 410, 413,
+ 425, 429, 500, 501, 502, 503, and 504). The "content-type" argument, if
+ any, is ignored.
+
+ * If a specific errorfile is defined, with an "errorfile" argument, the
+ corresponding file, containing a full HTTP response, is returned. Only the
+ "status" argument is considered. It must be one of the status code handled
+ by HAProxy (200, 400, 403, 404, 405, 408, 410, 413, 425, 429, 500, 501,
+ 502, 503, and 504). The "content-type" argument, if any, is ignored.
+
+ * If an http-errors section is defined, with an "errorfiles" argument, the
+ corresponding file in the specified http-errors section, containing a full
+ HTTP response, is returned. Only the "status" argument is considered. It
+ must be one of the status code handled by HAProxy (200, 400, 403, 404, 405,
+ 408, 410, 413, 425, 429, 500, 501, 502, 503, and 504). The "content-type"
+ argument, if any, is ignored.
+
+ * If a "file" or a "lf-file" argument is specified, the file's content is
+ used as the response payload. If the file is not empty, its content-type
+ must be set as argument to "content-type". Otherwise, any "content-type"
+ argument is ignored. With a "lf-file" argument, the file's content is
+ evaluated as a log-format string. With a "file" argument, it is considered
+ as a raw content.
+
+ * If a "string" or "lf-string" argument is specified, the defined string is
+ used as the response payload. The content-type must always be set as
+ argument to "content-type". With a "lf-string" argument, the string is
+ evaluated as a log-format string. With a "string" argument, it is
+ considered as a raw string.
+
+ When the response is not based on an errorfile, it is possible to append HTTP
+ header fields to the response using "hdr" arguments. Otherwise, all "hdr"
+ arguments are ignored. For each one, the header name is specified in <name>
+ and its value is defined by <fmt> which follows the log-format rules.
+
+ Note that the generated response must be smaller than a buffer. And to avoid
+ any warning, when an errorfile or a raw file is loaded, the buffer space
+ reserved for the headers rewriting should also be free.
+
+ This action is final, i.e. no further rules from the same rule set are
+ evaluated for the current section.
+
+ Example:
+ http-request return errorfile /etc/haproxy/errorfiles/200.http \
+ if { path /ping }
+
+ http-request return content-type image/x-icon file /var/www/favicon.ico \
+ if { path /favicon.ico }
+
+ http-request return status 403 content-type text/plain \
+ lf-string "Access denied. IP %[src] is blacklisted." \
+ if { src -f /etc/haproxy/blacklist.lst }
+
+
+sc-add-gpc(<idx>,<sc-id>) { <int> | <expr> }
+ Usable in: TCP RqCon| RqSes| RqCnt| RsCnt| HTTP Req| Res| Aft
+ X | X | X | X | X | X | X
+
+ This action increments the General Purpose Counter at the index <idx> of the
+ array associated to the sticky counter designated by <sc-id> by the value of
+ either integer <int> or the integer evaluation of expression <expr>. Integers
+ and expressions are limited to unsigned 32-bit values. If an error occurs,
+ this action silently fails and the actions evaluation continues. <idx> is an
+ integer between 0 and 99 and <sc-id> is an integer between 0 and 2. It also
+ silently fails if the there is no GPC stored at this index. The entry in the
+ table is refreshed even if the value is zero. The 'gpc_rate' is automatically
+ adjusted to reflect the average growth rate of the gpc value.
+
+ This action applies only to the 'gpc' and 'gpc_rate' array data_types (and
+ not to the legacy 'gpc0', 'gpc1', 'gpc0_rate' nor 'gpc1_rate' data_types).
+ There is no equivalent function for legacy data types, but if the value is
+ always 1, please see 'sc-inc-gpc()', 'sc-inc-gpc0()' and 'sc-inc-gpc1()'.
+ There is no way to decrement the value either, but it is possible to store
+ exact values in a General Purpose Tag using 'sc-set-gpt()' instead.
+
+ The main use of this action is to count scores or total volumes (e.g.
+ estimated danger per source IP reported by the server or a WAF, total
+ uploaded bytes, etc).
+
+
+sc-inc-gpc(<idx>,<sc-id>)
+ Usable in: TCP RqCon| RqSes| RqCnt| RsCnt| HTTP Req| Res| Aft
+ X | X | X | X | X | X | X
+
+ This actions increments the General Purpose Counter at the index <idx> of the
+ array associated to the sticky counter designated by <sc-id>. If an error
+ occurs, this action silently fails and the actions evaluation continues.
+ <idx> is an integer between 0 and 99 and <sc-id> is an integer between 0 and
+ 2. It also silently fails if the there is no GPC stored at this index. This
+ action applies only to the 'gpc' and 'gpc_rate' array data_types (and not to
+ the legacy 'gpc0', 'gpc1', 'gpc0_rate' nor 'gpc1_rate' data_types).
+
+
+sc-inc-gpc0(<sc-id>)
+sc-inc-gpc1(<sc-id>)
+ Usable in: TCP RqCon| RqSes| RqCnt| RsCnt| HTTP Req| Res| Aft
+ X | X | X | X | X | X | X
+
+ This actions increments the GPC0 or GPC1 counter according with the sticky
+ counter designated by <sc-id>. If an error occurs, this action silently fails
+ and the actions evaluation continues.
+
+
+sc-set-gpt(<idx>,<sc-id>) { <int> | <expr> }
+ Usable in: TCP RqCon| RqSes| RqCnt| RsCnt| HTTP Req| Res| Aft
+ X | X | X | X | X | X | X
+
+ This action sets the 32-bit unsigned GPT at the index <idx> of the array
+ associated to the sticky counter designated by <sc-id> at the value of
+ <int>/<expr>. The expected result is a boolean.
+
+ If an error occurs, this action silently fails and the actions evaluation
+ continues. <idx> is an integer between 0 and 99 and <sc-id> is an integer
+ between 0 and 2. It also silently fails if the there is no GPT stored
+ at this index.
+
+ This action applies only to the 'gpt' array data_type (and not to the
+ legacy 'gpt0' data-type).
+
+
+sc-set-gpt0(<sc-id>) { <int> | <expr> }
+ Usable in: TCP RqCon| RqSes| RqCnt| RsCnt| HTTP Req| Res| Aft
+ X | X | X | X | X | X | X
+
+ This action sets the 32-bit unsigned GPT0 tag according to the sticky counter
+ designated by <sc-id> and the value of <int>/<expr>. The expected result is a
+ boolean. If an error occurs, this action silently fails and the actions
+ evaluation continues. This action is an alias for "sc-set-gpt(0,<sc-id>)".
+ See also the "sc-set-gpt" action.
+
+
+send-spoe-group <engine-name> <group-name>
+ Usable in: TCP RqCon| RqSes| RqCnt| RsCnt| HTTP Req| Res| Aft
+ - | - | X | X | X | X | -
+
+ This action is used to trigger sending of a group of SPOE messages. To do so,
+ the SPOE engine used to send messages must be defined, as well as the SPOE
+ group to send. Of course, the SPOE engine must refer to an existing SPOE
+ filter. If not engine name is provided on the SPOE filter line, the SPOE
+ agent name must be used.
+
+ Arguments:
+ <engine-name> The SPOE engine name.
+
+ <group-name> The SPOE group name as specified in the engine
+ configuration.
+
+
+set-bandwidth-limit <name> [limit {<expr> | <size>}] [period {<expr> | <time>}]
+ Usable in: TCP RqCon| RqSes| RqCnt| RsCnt| HTTP Req| Res| Aft
+ - | - | X | X | X | X | -
+
+ This action is used to enable the bandwidth limitation filter <name>, either
+ on the upload or download direction depending on the filter type. Custom
+ limit and period may be defined, if and only if <name> references a
+ per-stream bandwidth limitation filter. When a set-bandwidth-limit rule is
+ executed, it first resets all settings of the filter to their defaults prior
+ to enabling it. As a consequence, if several "set-bandwidth-limit" actions
+ are executed for the same filter, only the last one is considered. Several
+ bandwidth limitation filters can be enabled on the same stream.
+
+ Note that this action cannot be used in a defaults section because bandwidth
+ limitation filters cannot be defined in defaults sections. In addition, only
+ the HTTP payload transfer is limited. The HTTP headers are not considered.
+
+ Arguments:
+ <expr> Is a standard HAProxy expression formed by a sample-fetch followed
+ by some converters. The result is converted to an integer. It is
+ interpreted as a size in bytes for the "limit" parameter and as a
+ duration in milliseconds for the "period" parameter.
+
+ <size> Is a number. It follows the HAProxy size format and is expressed in
+ bytes.
+
+ <time> Is a number. It follows the HAProxy time format and is expressed in
+ milliseconds.
+
+ Example:
+ http-request set-bandwidth-limit global-limit
+ http-request set-bandwidth-limit my-limit limit 1m period 10s
+
+ See section 9.7 about bandwidth limitation filter setup.
+
+
+set-dst <expr>
+ Usable in: TCP RqCon| RqSes| RqCnt| RsCnt| HTTP Req| Res| Aft
+ X | X | X | - | X | - | -
+
+ This is used to set the destination IP address to the value of specified
+ expression. Useful when a proxy in front of HAProxy rewrites destination IP,
+ but provides the correct IP in a HTTP header; or you want to mask the IP for
+ privacy. If you want to connect to the new address/port, use '0.0.0.0:0' as a
+ server address in the backend.
+
+ Arguments:
+ <expr> Is a standard HAProxy expression formed by a sample-fetch followed
+ by some converters.
+
+ Example:
+ http-request set-dst hdr(x-dst)
+ http-request set-dst dst,ipmask(24)
+
+ When possible, set-dst preserves the original destination port as long as the
+ address family allows it, otherwise the destination port is set to 0.
+
+
+set-dst-port <expr>
+ Usable in: TCP RqCon| RqSes| RqCnt| RsCnt| HTTP Req| Res| Aft
+ X | X | X | - | X | - | -
+
+ This is used to set the destination port address to the value of specified
+ expression. If you want to connect to the new address/port, use '0.0.0.0:0'
+ as a server address in the backend.
+
+ Arguments:
+ <expr> Is a standard HAProxy expression formed by a sample-fetch
+ followed by some converters.
+
+ Example:
+ http-request set-dst-port hdr(x-port)
+ http-request set-dst-port int(4000)
+
+ When possible, set-dst-port preserves the original destination address as
+ long as the address family supports a port, otherwise it forces the
+ destination address to IPv4 "0.0.0.0" before rewriting the port.
+
+
+set-header <name> <fmt>
+ Usable in: TCP RqCon| RqSes| RqCnt| RsCnt| HTTP Req| Res| Aft
+ - | - | - | - | X | X | X
+
+ This does the same as the "add-header" action except that the header is first
+ removed if it existed. This is useful when passing security information to
+ the server, where the header must not be manipulated by external users, or to
+ force certain response headers such as "Server" to hide external information.
+ Note that the new value is computed before the removal so it is possible to
+ concatenate a value to an existing header.
+
+ Example:
+ http-request set-header X-Haproxy-Current-Date %T
+ http-request set-header X-SSL %[ssl_fc]
+ http-request set-header X-SSL-Session_ID %[ssl_fc_session_id,hex]
+ http-request set-header X-SSL-Client-Verify %[ssl_c_verify]
+ http-request set-header X-SSL-Client-DN %{+Q}[ssl_c_s_dn]
+ http-request set-header X-SSL-Client-CN %{+Q}[ssl_c_s_dn(cn)]
+ http-request set-header X-SSL-Issuer %{+Q}[ssl_c_i_dn]
+ http-request set-header X-SSL-Client-NotBefore %{+Q}[ssl_c_notbefore]
+ http-request set-header X-SSL-Client-NotAfter %{+Q}[ssl_c_notafter]
+
+
+set-log-level <level>
+ Usable in: TCP RqCon| RqSes| RqCnt| RsCnt| HTTP Req| Res| Aft
+ - | - | X | X | X | X | X
+
+ This is used to change the log level of the current request when a certain
+ condition is met. Valid levels are the 8 syslog levels (see the "log"
+ keyword) plus the special level "silent" which disables logging for this
+ request. This rule is not final so the last matching rule wins. This rule
+ can be useful to disable health checks coming from another equipment.
+
+
+set-map(<file-name>) <key fmt> <value fmt>
+ Usable in: TCP RqCon| RqSes| RqCnt| RsCnt| HTTP Req| Res| Aft
+ - | - | - | - | X | X | X
+
+ This is used to add a new entry into a map. The map must be loaded from a
+ file (even a dummy empty file). The file name of the map to be updated is
+ passed between parentheses. It takes 2 arguments: <key fmt>, which follows
+ log-format rules, used to collect map key, and <value fmt>, which follows
+ log-format rules, used to collect content for the new entry.
+ It performs a lookup in the map before insertion, to avoid duplicated (or
+ more) values. It is the equivalent of the "set map" command from the
+ stats socket, but can be triggered by an HTTP request.
+
+
+set-mark <mark>
+ Usable in: TCP RqCon| RqSes| RqCnt| RsCnt| HTTP Req| Res| Aft
+ X | X | X | X | X | X | -
+
+ This is used to set the Netfilter/IPFW MARK on all packets sent to the client
+ to the value passed in <mark> on platforms which support it. This value is an
+ unsigned 32 bit value which can be matched by netfilter/ipfw and by the
+ routing table or monitoring the packets through DTrace. It can be expressed
+ both in decimal or hexadecimal format (prefixed by "0x").
+ This can be useful to force certain packets to take a different route (for
+ example a cheaper network path for bulk downloads). This works on Linux
+ kernels 2.6.32 and above and requires admin privileges, as well on FreeBSD
+ and OpenBSD.
+
+
+set-method <fmt>
+ Usable in: TCP RqCon| RqSes| RqCnt| RsCnt| HTTP Req| Res| Aft
+ - | - | - | - | X | - | -
+
+ This rewrites the request method with the result of the evaluation of format
+ string <fmt>. There should be very few valid reasons for having to do so as
+ this is more likely to break something than to fix it.
+
+
+set-nice <nice>
+ Usable in: TCP RqCon| RqSes| RqCnt| RsCnt| HTTP Req| Res| Aft
+ - | - | X | X | X | X | -
+
+ This sets the "nice" factor of the current request/response being processed.
+ It only has effect against the other requests being processed at the same
+ time. The default value is 0, unless altered by the "nice" setting on the
+ "bind" line. The accepted range is -1024..1024. The higher the value, the
+ nicest the request will be. Lower values will make the request more important
+ than other ones. This can be useful to improve the speed of some requests, or
+ lower the priority of non-important requests. Using this setting without
+ prior experimentation can cause some major slowdown.
+
+
+set-path <fmt>
+ Usable in: TCP RqCon| RqSes| RqCnt| RsCnt| HTTP Req| Res| Aft
+ - | - | - | - | X | - | -
+
+ This rewrites the request path with the result of the evaluation of format
+ string <fmt>. The query string, if any, is left intact. If a scheme and
+ authority is found before the path, they are left intact as well. If the
+ request doesn't have a path ("*"), this one is replaced with the format.
+ This can be used to prepend a directory component in front of a path for
+ example. See also "http-request set-query" and "http-request set-uri".
+
+ Example :
+ # prepend the host name before the path
+ http-request set-path /%[hdr(host)]%[path]
+
+
+set-pathq <fmt>
+ Usable in: TCP RqCon| RqSes| RqCnt| RsCnt| HTTP Req| Res| Aft
+ - | - | - | - | X | - | -
+
+ This does the same as "http-request set-path" except that the query-string is
+ also rewritten. It may be used to remove the query-string, including the
+ question mark (it is not possible using "http-request set-query").
+
+
+set-priority-class <expr>
+ Usable in: TCP RqCon| RqSes| RqCnt| RsCnt| HTTP Req| Res| Aft
+ - | - | X | - | X | - | -
+
+ This is used to set the queue priority class of the current request.
+ The value must be a sample expression which converts to an integer in the
+ range -2047..2047. Results outside this range will be truncated.
+ The priority class determines the order in which queued requests are
+ processed. Lower values have higher priority.
+
+
+set-priority-offset <expr>
+ Usable in: TCP RqCon| RqSes| RqCnt| RsCnt| HTTP Req| Res| Aft
+ - | - | X | - | X | - | -
+
+ This is used to set the queue priority timestamp offset of the current
+ request. The value must be a sample expression which converts to an integer
+ in the range -524287..524287. Results outside this range will be truncated.
+ When a request is queued, it is ordered first by the priority class, then by
+ the current timestamp adjusted by the given offset in milliseconds. Lower
+ values have higher priority.
+ Note that the resulting timestamp is is only tracked with enough precision
+ for 524,287ms (8m44s287ms). If the request is queued long enough to where the
+ adjusted timestamp exceeds this value, it will be misidentified as highest
+ priority. Thus it is important to set "timeout queue" to a value, where when
+ combined with the offset, does not exceed this limit.
+
+
+set-query <fmt>
+ Usable in: TCP RqCon| RqSes| RqCnt| RsCnt| HTTP Req| Res| Aft
+ - | - | - | - | X | - | -
+
+ This rewrites the request's query string which appears after the first
+ question mark ("?") with the result of the evaluation of format string <fmt>.
+ The part prior to the question mark is left intact. If the request doesn't
+ contain a question mark and the new value is not empty, then one is added at
+ the end of the URI, followed by the new value. If a question mark was
+ present, it will never be removed even if the value is empty. This can be
+ used to add or remove parameters from the query string.
+
+ See also "http-request set-query" and "http-request set-uri".
+
+ Example:
+ # replace "%3D" with "=" in the query string
+ http-request set-query %[query,regsub(%3D,=,g)]
+
+
+set-src <expr>
+ Usable in: TCP RqCon| RqSes| RqCnt| RsCnt| HTTP Req| Res| Aft
+ X | X | X | - | X | - | -
+
+ This is used to set the source IP address to the value of specified
+ expression. Useful when a proxy in front of HAProxy rewrites source IP, but
+ provides the correct IP in a HTTP header; or you want to mask source IP for
+ privacy. All subsequent calls to "src" fetch will return this value
+ (see example).
+
+ Arguments :
+ <expr> Is a standard HAProxy expression formed by a sample-fetch followed
+ by some converters.
+
+ See also "option forwardfor".
+
+ Example:
+ http-request set-src hdr(x-forwarded-for)
+ http-request set-src src,ipmask(24)
+
+ # After the masking this will track connections
+ # based on the IP address with the last byte zeroed out.
+ http-request track-sc0 src
+
+ When possible, set-src preserves the original source port as long as the
+ address family allows it, otherwise the source port is set to 0.
+
+
+set-src-port <expr>
+ Usable in: TCP RqCon| RqSes| RqCnt| RsCnt| HTTP Req| Res| Aft
+ X | X | X | - | X | - | -
+
+ This is used to set the source port address to the value of specified
+ expression.
+
+ Arguments:
+ <expr> Is a standard HAProxy expression formed by a sample-fetch followed
+ by some converters.
+
+ Example:
+ http-request set-src-port hdr(x-port)
+ http-request set-src-port int(4000)
+
+ When possible, set-src-port preserves the original source address as long as
+ the address family supports a port, otherwise it forces the source address to
+ IPv4 "0.0.0.0" before rewriting the port.
+
+
+set-status <status> [reason <str>]
+ Usable in: TCP RqCon| RqSes| RqCnt| RsCnt| HTTP Req| Res| Aft
+ - | - | - | - | - | X | X
+
+ This replaces the response status code with <status> which must be an integer
+ between 100 and 999. Optionally, a custom reason text can be provided defined
+ by <str>, or the default reason for the specified code will be used as a
+ fallback. Note that the reason string only exists in HTTP/1.x and is ignored
+ by other versions of the protocol.
+
+ Example:
+ # return "431 Request Header Fields Too Large"
+ http-response set-status 431
+ # return "503 Slow Down", custom reason
+ http-response set-status 503 reason "Slow Down".
+
+
+set-timeout { client | server | tunnel } { <timeout> | <expr> }
+ Usable in: TCP RqCon| RqSes| RqCnt| RsCnt| HTTP Req| Res| Aft
+ - | - | - | - | X | X | -
+
+ This action overrides the specified "client", "server" or "tunnel" timeout
+ for the current stream only. The timeout can be specified in milliseconds or
+ with any other unit if the number is suffixed by the unit as explained at the
+ top of this document. It is also possible to write an expression which must
+ return a number interpreted as a timeout in milliseconds.
+
+ Note that the server/tunnel timeouts are only relevant on the backend side
+ and thus this rule is only available for the proxies with backend
+ capabilities. Likewise, client timeout is only relevant for frontend side.
+ Also the timeout value must be non-null to obtain the expected results.
+
+ Example:
+ http-request set-timeout tunnel 5s
+ http-request set-timeout server req.hdr(host),map_int(host.lst)
+
+ Example:
+ http-response set-timeout tunnel 5s
+ http-response set-timeout server res.hdr(X-Refresh-Seconds),mul(1000)
+
+
+set-tos <tos>
+ Usable in: TCP RqCon| RqSes| RqCnt| RsCnt| HTTP Req| Res| Aft
+ X | X | X | X | X | X | -
+
+ This is used to set the TOS or DSCP field value of packets sent to the client
+ to the value passed in <tos> on platforms which support this. This value
+ represents the whole 8 bits of the IP TOS field, and can be expressed both in
+ decimal or hexadecimal format (prefixed by "0x"). Note that only the 6 higher
+ bits are used in DSCP or TOS, and the two lower bits are always 0. This can
+ be used to adjust some routing behavior on border routers based on some
+ information from the request.
+
+ See RFC 2474, 2597, 3260 and 4594 for more information.
+
+
+set-uri <fmt>
+ Usable in: TCP RqCon| RqSes| RqCnt| RsCnt| HTTP Req| Res| Aft
+ - | - | - | - | X | - | -
+
+ This rewrites the request URI with the result of the evaluation of format
+ string <fmt>. The scheme, authority, path and query string are all replaced
+ at once. This can be used to rewrite hosts in front of proxies, or to perform
+ complex modifications to the URI such as moving parts between the path and
+ the query string. If an absolute URI is set, it will be sent as is to
+ HTTP/1.1 servers. If it is not the desired behavior, the host, the path
+ and/or the query string should be set separately.
+ See also "http-request set-path" and "http-request set-query".
+
+
+set-var(<var-name>[,<cond>...]) <expr>
+set-var-fmt(<var-name>[,<cond>...]) <fmt>
+ Usable in: TCP RqCon| RqSes| RqCnt| RsCnt| HTTP Req| Res| Aft
+ X | X | X | X | X | X | X
+
+ This is used to set the contents of a variable. The variable is declared
+ inline.
+
+ Arguments:
+ <var-name> The name of the variable starts with an indication about its
+ scope. The scopes allowed are:
+ "proc" : the variable is shared with the whole process
+ "sess" : the variable is shared with the whole session
+ "txn" : the variable is shared with the transaction
+ (request and response)
+ "req" : the variable is shared only during request
+ processing
+ "res" : the variable is shared only during response
+ processing
+ This prefix is followed by a name. The separator is a '.'.
+ The name may only contain characters 'a-z', 'A-Z', '0-9'
+ and '_'.
+
+ <cond> A set of conditions that must all be true for the variable to
+ actually be set (such as "ifnotempty", "ifgt" ...). See the
+ set-var converter's description for a full list of possible
+ conditions.
+
+ <expr> Is a standard HAProxy expression formed by a sample-fetch
+ followed by some converters.
+
+ <fmt> This is the value expressed using log-format rules (see Custom
+ Log Format in section 8.2.4).
+
+ All scopes are usable for HTTP rules, but scopes "proc" and "sess" are the
+ only usable ones in rule sets which do not have access to contents such as
+ "tcp-request connection" and "tcp-request session".
+
+ Example:
+ http-request set-var(req.my_var) req.fhdr(user-agent),lower
+ http-request set-var-fmt(txn.from) %[src]:%[src_port]
+
+
+silent-drop [ rst-ttl <ttl> ]
+ Usable in: TCP RqCon| RqSes| RqCnt| RsCnt| HTTP Req| Res| Aft
+ X | X | X | X | X | X | -
+
+ This stops the evaluation of the rules and makes the client-facing connection
+ suddenly disappear using a system-dependent way that tries to prevent the
+ client from being notified. When called without the rst-ttl argument,
+ we try to prevent sending any FIN or RST packet back to the client by
+ using TCP_REPAIR. If this fails (mainly because of missing privileges),
+ we fall back to sending a RST packet with a TTL of 1.
+
+ The effect is that the client still sees an established connection while
+ there is none on HAProxy, saving resources. However, stateful equipment
+ placed between the HAProxy and the client (firewalls, proxies,
+ load balancers) will also keep the established connection in their
+ session tables.
+
+ The optional rst-ttl changes this behaviour: TCP_REPAIR is not used, and an
+ RST packet with a configurable TTL is sent. When set to a reasonable value,
+ the RST packet travels through the local infrastructure, deleting the
+ connection in firewalls and other systems, but disappears before reaching
+ the client. Future packets from the client will then be dropped already by
+ front equipments. These local RSTs protect local resources, but not the
+ client's. This must not be used unless the consequences of doing this are
+ fully understood.
+
+
+strict-mode { on | off }
+ Usable in: TCP RqCon| RqSes| RqCnt| RsCnt| HTTP Req| Res| Aft
+ - | - | - | - | X | X | X
+
+ This enables or disables the strict rewriting mode for following rules. It
+ does not affect rules declared before it and it is only applicable on rules
+ performing a rewrite on the requests. When the strict mode is enabled, any
+ rewrite failure triggers an internal error. Otherwise, such errors are
+ silently ignored. The purpose of the strict rewriting mode is to make some
+ rewrites optional while others must be performed to continue the request
+ processing.
+
+ By default, the strict rewriting mode is enabled. Its value is also reset
+ when a ruleset evaluation ends. So, for instance, if you change the mode on
+ the frontend, the default mode is restored when HAProxy starts the backend
+ rules evaluation.
+
+
+switch-mode http [ proto <name> ]
+ Usable in: TCP RqCon| RqSes| RqCnt| RsCnt| HTTP Req| Res| Aft
+ - | - | X | - | - | - | -
+
+ This action is used to perform a connection upgrade. Only HTTP upgrades are
+ supported for now. The protocol may optionally be specified. This action is
+ only available for a proxy with the frontend capability. The connection
+ upgrade is immediately performed, following "tcp-request content" rules are
+ not evaluated. This upgrade method should be preferred to the implicit one
+ consisting to rely on the backend mode. When used, it is possible to set HTTP
+ directives in a frontend without any warning. These directives will be
+ conditionally evaluated if the HTTP upgrade is performed. However, an HTTP
+ backend must still be selected. It remains unsupported to route an HTTP
+ connection (upgraded or not) to a TCP server.
+
+ See section 4 about Proxies for more details on HTTP upgrades.
+
+
+tarpit [ { status | deny_status } <code>] [content-type <type>]
+ [ { default-errorfiles | errorfile <file> | errorfiles <name> |
+ file <file> | lf-file <file> | string <str> | lf-string <fmt> } ]
+ [ hdr <name> <fmt> ]*
+ Usable in: TCP RqCon| RqSes| RqCnt| RsCnt| HTTP Req| Res| Aft
+ - | - | - | - | X | - | -
+
+ This stops the evaluation of the rules and immediately blocks the request
+ without responding for a delay specified by "timeout tarpit" or
+ "timeout connect" if the former is not set. After that delay, if the client
+ is still connected, a response is returned so that the client does not
+ suspect it has been tarpitted. Logs will report the flags "PT". The goal of
+ the tarpit rule is to slow down robots during an attack when they're limited
+ on the number of concurrent requests. It can be very efficient against very
+ dumb robots, and will significantly reduce the load on firewalls compared to
+ a "deny" rule. But when facing "correctly" developed robots, it can make
+ things worse by forcing HAProxy and the front firewall to support insane
+ number of concurrent connections. By default an HTTP error 500 is returned.
+ But the response may be customized using same syntax than
+ "http-request return" rules. Thus, see "http-request return" for details.
+
+ For compatibility purpose, when no argument is defined, or only "deny_status",
+ the argument "default-errorfiles" is implied. It means
+ "http-request tarpit [deny_status <status>]" is an alias of
+ "http-request tarpit [status <status>] default-errorfiles".
+ No further "http-request" rules are evaluated.
+ See also "http-request return" and "http-request silent-drop".
+
+
+track-sc0 <key> [table <table>]
+track-sc1 <key> [table <table>]
+track-sc2 <key> [table <table>]
+ Usable in: TCP RqCon| RqSes| RqCnt| RsCnt| HTTP Req| Res| Aft
+ X | X | X | - | X | X | -
+
+ This enables tracking of sticky counters from current request. These rules do
+ not stop evaluation and do not change default action. The number of counters
+ that may be simultaneously tracked by the same connection is set by the
+ global "tune.stick-counters" setting, which defaults to MAX_SESS_STKCTR if
+ set at build time (it is reported in haproxy -vv) and which defaults to 3,
+ so the track-sc number is between 0 and (tune.stick-counters-1). The first
+ "track-sc0" rule executed enables tracking of the counters of the specified
+ table as the first set. The first "track-sc1" rule executed enables tracking
+ of the counters of the specified table as the second set. The first
+ "track-sc2" rule executed enables tracking of the counters of the specified
+ table as the third set. It is a recommended practice to use the first set of
+ counters for the per-frontend counters and the second set for the per-backend
+ ones. But this is just a guideline, all may be used everywhere.
+
+ Arguments :
+
+ <key> is mandatory, and is a sample expression rule as described in
+ section 7.3. It describes what elements of the incoming connection,
+ request or reponse will be analyzed, extracted, combined, and used
+ to select which table entry to update the counters.
+
+ <table> is an optional table to be used instead of the default one, which
+ is the stick-table declared in the current proxy. All the counters
+ for the matches and updates for the key will then be performed in
+ that table until the session ends.
+
+ Once a "track-sc*" rule is executed, the key is looked up in the table and if
+ it is not found, an entry is allocated for it. Then a pointer to that entry
+ is kept during all the session's life, and this entry's counters are updated
+ as often as possible, every time the session's counters are updated, and also
+ systematically when the session ends. Counters are only updated for events
+ that happen after the tracking has been started. As an exception, connection
+ counters and request counters are systematically updated so that they reflect
+ useful information.
+
+ If the entry tracks concurrent connection counters, one connection is counted
+ for as long as the entry is tracked, and the entry will not expire during
+ that time. Tracking counters also provides a performance advantage over just
+ checking the keys, because only one table lookup is performed for all ACL
+ checks that make use of it.
+
+
+unset-var(<var-name>)
+ Usable in: TCP RqCon| RqSes| RqCnt| RsCnt| HTTP Req| Res| Aft
+ X | X | X | X | X | X | X
+
+ This is used to unset a variable. See the "set-var" action for details about
+ <var-name>.
+
+ Example:
+ http-request unset-var(req.my_var)
+
+
+use-service <service-name>
+ Usable in: TCP RqCon| RqSes| RqCnt| RsCnt| HTTP Req| Res| Aft
+ - | - | X | - | X | - | -
+
+ This action executes the configured TCP or HTTP service to reply to the
+ request, depending on the rule set it's used in. The rule is final, i.e.
+ no further rules are evaluated in the same rule set.
+
+ A service may choose to reply by sending any valid response or it may
+ immediately close the connection without sending any response. For HTTP
+ services, a valid response requires a valid HTTP response. Outside natives
+ services, for instance the Prometheus exporter for HTTP services, it is
+ possible to write custom TCP and HTTP services in Lua.
+
+ Arguments :
+ <service-name> is mandatory. It is the service to call
+
+ Example:
+ http-request use-service prometheus-exporter if { path /metrics }
+
+
+wait-for-body time <time> [ at-least <bytes> ]
+ Usable in: TCP RqCon| RqSes| RqCnt| RsCnt| HTTP Req| Res| Aft
+ - | - | - | - | X | X | -
+
+ This will delay the processing of the request or response until one of the
+ following conditions occurs:
+ - The full request body is received, in which case processing proceeds
+ normally.
+ - <bytes> bytes have been received, when the "at-least" argument is given and
+ <bytes> is non-zero, in which case processing proceeds normally.
+ - The request buffer is full, in which case processing proceeds normally. The
+ size of this buffer is determined by the "tune.bufsize" option.
+ - The request has been waiting for more than <time> milliseconds. In this
+ case HAProxy will respond with a 408 "Request Timeout" error to the client
+ and stop processing the request. Note that if any of the other conditions
+ happens first, this timeout will not occur even if the full body has
+ not yet been recieved.
+
+ This action may be used as a replacement for "option http-buffer-request".
+
+ Arguments :
+
+ <time> is mandatory. It is the maximum time to wait for the body. It
+ follows the HAProxy time format and is expressed in milliseconds.
+
+ <bytes> is optional. It is the minimum payload size to receive to stop to
+ wait. It follows the HAProxy size format and is expressed in
+ bytes. A value of 0 (the default) means no limit.
+
+ Example:
+ http-request wait-for-body time 1s at-least 1k if METH_POST
+
+ See also : "option http-buffer-request"
+
+
+wait-for-handshake
+ Usable in: TCP RqCon| RqSes| RqCnt| RsCnt| HTTP Req| Res| Aft
+ - | - | - | - | X | - | -
+
+ This will delay the processing of the request until the SSL handshake
+ happened. This is mostly useful to delay processing early data until we're
+ sure they are valid.
+
+
+5. Bind and server options
+--------------------------
+
+The "bind", "server" and "default-server" keywords support a number of settings
+depending on some build options and on the system HAProxy was built on. These
+settings generally each consist in one word sometimes followed by a value,
+written on the same line as the "bind" or "server" line. All these options are
+described in this section.
+
+
+5.1. Bind options
+-----------------
+
+The "bind" keyword supports a certain number of settings which are all passed
+as arguments on the same line. The order in which those arguments appear makes
+no importance, provided that they appear after the bind address. All of these
+parameters are optional. Some of them consist in a single words (booleans),
+while other ones expect a value after them. In this case, the value must be
+provided immediately after the setting name.
+
+The currently supported settings are the following ones.
+
+accept-netscaler-cip <magic number>
+ Enforces the use of the NetScaler Client IP insertion protocol over any
+ connection accepted by any of the TCP sockets declared on the same line. The
+ NetScaler Client IP insertion protocol dictates the layer 3/4 addresses of
+ the incoming connection to be used everywhere an address is used, with the
+ only exception of "tcp-request connection" rules which will only see the
+ real connection address. Logs will reflect the addresses indicated in the
+ protocol, unless it is violated, in which case the real address will still
+ be used. This keyword combined with support from external components can be
+ used as an efficient and reliable alternative to the X-Forwarded-For
+ mechanism which is not always reliable and not even always usable. See also
+ "tcp-request connection expect-netscaler-cip" for a finer-grained setting of
+ which client is allowed to use the protocol.
+
+accept-proxy
+ Enforces the use of the PROXY protocol over any connection accepted by any of
+ the sockets declared on the same line. Versions 1 and 2 of the PROXY protocol
+ are supported and correctly detected. The PROXY protocol dictates the layer
+ 3/4 addresses of the incoming connection to be used everywhere an address is
+ used, with the only exception of "tcp-request connection" rules which will
+ only see the real connection address. Logs will reflect the addresses
+ indicated in the protocol, unless it is violated, in which case the real
+ address will still be used. This keyword combined with support from external
+ components can be used as an efficient and reliable alternative to the
+ X-Forwarded-For mechanism which is not always reliable and not even always
+ usable. See also "tcp-request connection expect-proxy" for a finer-grained
+ setting of which client is allowed to use the protocol.
+
+allow-0rtt
+ Allow receiving early data when using TLSv1.3. This is disabled by default,
+ due to security considerations. Because it is vulnerable to replay attacks,
+ you should only allow if for requests that are safe to replay, i.e. requests
+ that are idempotent. You can use the "wait-for-handshake" action for any
+ request that wouldn't be safe with early data.
+
+alpn <protocols>
+ This enables the TLS ALPN extension and advertises the specified protocol
+ list as supported on top of ALPN. The protocol list consists in a comma-
+ delimited list of protocol names, for instance: "http/1.1,http/1.0" (without
+ quotes). This requires that the SSL library is built with support for TLS
+ extensions enabled (check with haproxy -vv). The ALPN extension replaces the
+ initial NPN extension. At the protocol layer, ALPN is required to enable
+ HTTP/2 on an HTTPS frontend and HTTP/3 on a QUIC frontend. However, when such
+ frontends have none of "npn", "alpn" and "no-alpn" set, a default value of
+ "h2,http/1.1" will be used for a regular HTTPS frontend, and "h3" for a QUIC
+ frontend. Versions of OpenSSL prior to 1.0.2 didn't support ALPN and only
+ supposed the now obsolete NPN extension. At the time of writing this, most
+ browsers still support both ALPN and NPN for HTTP/2 so a fallback to NPN may
+ still work for a while. But ALPN must be used whenever possible. Protocols
+ not advertised are not negotiated. For example it is possible to only accept
+ HTTP/2 connections with this:
+
+ bind :443 ssl crt pub.pem alpn h2 # explicitly disable HTTP/1.1
+
+ QUIC supports only h3 and hq-interop as ALPN. h3 is for HTTP/3 and hq-interop
+ is used for http/0.9 and QUIC interop runner (see https://interop.seemann.io).
+ Each "alpn" statement will replace a previous one. In order to remove them,
+ use "no-alpn".
+
+ Note that some old browsers such as Firefox 88 used to experience issues with
+ WebSocket over H2, and in case such a setup is encountered, it may be needed
+ to either explicitly disable HTTP/2 in the "alpn" string by forcing it to
+ "http/1.1" or "no-alpn", or to enable "h2-workaround-bogus-websocket-clients"
+ globally.
+
+backlog <backlog>
+ Sets the socket's backlog to this value. If unspecified or 0, the frontend's
+ backlog is used instead, which generally defaults to the maxconn value.
+
+curves <curves>
+ This setting is only available when support for OpenSSL was built in. It sets
+ the string describing the list of elliptic curves algorithms ("curve suite")
+ that are negotiated during the SSL/TLS handshake with ECDHE. The format of the
+ string is a colon-delimited list of curve name.
+ Example: "X25519:P-256" (without quote)
+ When "curves" is set, "ecdhe" parameter is ignored.
+
+ecdhe <named curve>
+ This setting is only available when support for OpenSSL was built in. It sets
+ the named curve (RFC 4492) used to generate ECDH ephemeral keys. By default,
+ used named curve is prime256v1.
+
+ca-file <cafile>
+ This setting is only available when support for OpenSSL was built in. It
+ designates a PEM file from which to load CA certificates used to verify
+ client's certificate. It is possible to load a directory containing multiple
+ CAs, in this case HAProxy will try to load every ".pem", ".crt", ".cer", and
+ .crl" available in the directory, files starting with a dot are ignored.
+
+ Warning: The "@system-ca" parameter could be used in place of the cafile
+ in order to use the trusted CAs of your system, like its done with the server
+ directive. But you mustn't use it unless you know what you are doing.
+ Configuring it this way basically mean that the bind will accept any client
+ certificate generated from one of the CA present on your system, which is
+ extremely insecure.
+
+ca-ignore-err [all|<errorID>,...]
+ This setting is only available when support for OpenSSL was built in.
+ Sets a comma separated list of errorIDs to ignore during verify at depth > 0.
+ It could be a numerical ID, or the constant name (X509_V_ERR) which is
+ available in the OpenSSL documentation:
+ https://www.openssl.org/docs/manmaster/man3/X509_STORE_CTX_get_error.html#ERROR-CODES
+ It is recommended to use the constant name as the numerical value can change
+ in new version of OpenSSL.
+ If set to 'all', all errors are ignored. SSL handshake is not aborted if an
+ error is ignored.
+
+ca-sign-file <cafile>
+ This setting is only available when support for OpenSSL was built in. It
+ designates a PEM file containing both the CA certificate and the CA private
+ key used to create and sign server's certificates. This is a mandatory
+ setting when the dynamic generation of certificates is enabled. See
+ 'generate-certificates' for details.
+
+ca-sign-pass <passphrase>
+ This setting is only available when support for OpenSSL was built in. It is
+ the CA private key passphrase. This setting is optional and used only when
+ the dynamic generation of certificates is enabled. See
+ 'generate-certificates' for details.
+
+ca-verify-file <cafile>
+ This setting designates a PEM file from which to load CA certificates used to
+ verify client's certificate. It designates CA certificates which must not be
+ included in CA names sent in server hello message. Typically, "ca-file" must
+ be defined with intermediate certificates, and "ca-verify-file" with
+ certificates to ending the chain, like root CA.
+
+ciphers <ciphers>
+ This setting is only available when support for OpenSSL was built in. It sets
+ the string describing the list of cipher algorithms ("cipher suite") that are
+ negotiated during the SSL/TLS handshake up to TLSv1.2. The format of the
+ string is defined in "man 1 ciphers" from OpenSSL man pages. For background
+ information and recommendations see e.g.
+ (https://wiki.mozilla.org/Security/Server_Side_TLS) and
+ (https://mozilla.github.io/server-side-tls/ssl-config-generator/). For TLSv1.3
+ cipher configuration, please check the "ciphersuites" keyword.
+
+ciphersuites <ciphersuites>
+ This setting is only available when support for OpenSSL was built in and
+ OpenSSL 1.1.1 or later was used to build HAProxy. It sets the string describing
+ the list of cipher algorithms ("cipher suite") that are negotiated during the
+ TLSv1.3 handshake. The format of the string is defined in "man 1 ciphers" from
+ OpenSSL man pages under the "ciphersuites" section. For cipher configuration
+ for TLSv1.2 and earlier, please check the "ciphers" keyword.
+
+client-sigalgs <sigalgs>
+ This setting is only available when support for OpenSSL was built in. It sets
+ the string describing the list of signature algorithms related to client
+ authentication that are negotiated . The format of the string is defined in
+ "man 3 SSL_CTX_set1_client_sigalgs" from the OpenSSL man pages. It is not
+ recommended to use this setting if no specific usecase was identified.
+
+crl-file <crlfile>
+ This setting is only available when support for OpenSSL was built in. It
+ designates a PEM file from which to load certificate revocation list used
+ to verify client's certificate. You need to provide a certificate revocation
+ list for every certificate of your certificate authority chain.
+
+crt <cert>
+ This setting is only available when support for OpenSSL was built in. It
+ designates a PEM file containing both the required certificates and any
+ associated private keys. This file can be built by concatenating multiple
+ PEM files into one (e.g. cat cert.pem key.pem > combined.pem). If your CA
+ requires an intermediate certificate, this can also be concatenated into this
+ file. Intermediate certificate can also be shared in a directory via
+ "issuers-chain-path" directive.
+
+ If the file does not contain a private key, HAProxy will try to load
+ the key at the same path suffixed by a ".key".
+
+ If the OpenSSL used supports Diffie-Hellman, parameters present in this file
+ are loaded.
+
+ If a directory name is used instead of a PEM file, then all files found in
+ that directory will be loaded in alphabetic order unless their name ends
+ with '.key', '.issuer', '.ocsp' or '.sctl' (reserved extensions). Files
+ starting with a dot are also ignored. This directive may be specified multiple
+ times in order to load certificates from multiple files or directories. The
+ certificates will be presented to clients who provide a valid TLS Server Name
+ Indication field matching one of their CN or alt subjects. Wildcards are
+ supported, where a wildcard character '*' is used instead of the first
+ hostname component (e.g. *.example.org matches www.example.org but not
+ www.sub.example.org). If an empty directory is used, HAProxy will not start
+ unless the "strict-sni" keyword is used.
+
+ If no SNI is provided by the client or if the SSL library does not support
+ TLS extensions, or if the client provides an SNI hostname which does not
+ match any certificate, then the first loaded certificate will be presented.
+ This means that when loading certificates from a directory, it is highly
+ recommended to load the default one first as a file or to ensure that it will
+ always be the first one in the directory.
+
+ Note that the same cert may be loaded multiple times without side effects.
+
+ Some CAs (such as GoDaddy) offer a drop down list of server types that do not
+ include HAProxy when obtaining a certificate. If this happens be sure to
+ choose a web server that the CA believes requires an intermediate CA (for
+ GoDaddy, selection Apache Tomcat will get the correct bundle, but many
+ others, e.g. nginx, result in a wrong bundle that will not work for some
+ clients).
+
+ For each PEM file, HAProxy checks for the presence of file at the same path
+ suffixed by ".ocsp". If such file is found, support for the TLS Certificate
+ Status Request extension (also known as "OCSP stapling") is automatically
+ enabled. The content of this file is optional. If not empty, it must contain
+ a valid OCSP Response in DER format. In order to be valid an OCSP Response
+ must comply with the following rules: it has to indicate a good status,
+ it has to be a single response for the certificate of the PEM file, and it
+ has to be valid at the moment of addition. If these rules are not respected
+ the OCSP Response is ignored and a warning is emitted. In order to identify
+ which certificate an OCSP Response applies to, the issuer's certificate is
+ necessary. If the issuer's certificate is not found in the PEM file, it will
+ be loaded from a file at the same path as the PEM file suffixed by ".issuer"
+ if it exists otherwise it will fail with an error.
+
+ For each PEM file, HAProxy also checks for the presence of file at the same
+ path suffixed by ".sctl". If such file is found, support for Certificate
+ Transparency (RFC6962) TLS extension is enabled. The file must contain a
+ valid Signed Certificate Timestamp List, as described in RFC. File is parsed
+ to check basic syntax, but no signatures are verified.
+
+ There are cases where it is desirable to support multiple key types, e.g. RSA
+ and ECDSA in the cipher suites offered to the clients. This allows clients
+ that support EC certificates to be able to use EC ciphers, while
+ simultaneously supporting older, RSA only clients.
+
+ To achieve this, OpenSSL 1.1.1 is required, you can configure this behavior
+ by providing one crt entry per certificate type, or by configuring a "cert
+ bundle" like it was required before HAProxy 1.8. See "ssl-load-extra-files".
+
+crt-ignore-err <errors>
+ This setting is only available when support for OpenSSL was built in. Sets a
+ comma separated list of errorIDs to ignore during verify at depth == 0.
+ It could be a numerical ID, or the constant name (X509_V_ERR) which is
+ available in the OpenSSL documentation:
+ https://www.openssl.org/docs/manmaster/man3/X509_STORE_CTX_get_error.html#ERROR-CODES
+ It is recommended to use the constant name as the numerical value can change
+ in new version of OpenSSL.
+ If set to 'all', all errors are ignored. SSL handshake is not aborted if an
+ error is ignored.
+
+crt-list <file>
+ This setting is only available when support for OpenSSL was built in. It
+ designates a list of PEM file with an optional ssl configuration and a SNI
+ filter per certificate, with the following format for each line :
+
+ <crtfile> [\[<sslbindconf> ...\]] [[!]<snifilter> ...]
+
+ sslbindconf supports "allow-0rtt", "alpn", "ca-file", "ca-verify-file",
+ "ciphers", "ciphersuites", "crl-file", "curves", "ecdhe", "no-ca-names",
+ "npn", "verify" configuration. With BoringSSL and Openssl >= 1.1.1
+ "ssl-min-ver" and "ssl-max-ver" are also supported. It overrides the
+ configuration set in bind line for the certificate.
+
+ Wildcards are supported in the SNI filter. Negative filter are also supported,
+ useful in combination with a wildcard filter to exclude a particular SNI, or
+ after the first certificate to exclude a pattern from its CN or Subject Alt
+ Name (SAN). The certificates will be presented to clients who provide a valid
+ TLS Server Name Indication field matching one of the SNI filters. If no SNI
+ filter is specified, the CN and SAN are used. This directive may be specified
+ multiple times. See the "crt" option for more information. The default
+ certificate is still needed to meet OpenSSL expectations. If it is not used,
+ the 'strict-sni' option may be used.
+
+ Multi-cert bundling (see "ssl-load-extra-files") is supported with crt-list,
+ as long as only the base name is given in the crt-list. SNI filter will do
+ the same work on all bundled certificates.
+
+ Empty lines as well as lines beginning with a hash ('#') will be ignored.
+
+ The first declared certificate of a bind line is used as the default
+ certificate, either from crt or crt-list option, which HAProxy should use in
+ the TLS handshake if no other certificate matches. This certificate will also
+ be used if the provided SNI matches its CN or SAN, even if a matching SNI
+ filter is found on any crt-list. The SNI filter !* can be used after the first
+ declared certificate to not include its CN and SAN in the SNI tree, so it will
+ never match except if no other certificate matches. This way the first
+ declared certificate act as a fallback.
+
+ When no ALPN is set, the "bind" line's default one is used. If a "bind" line
+ has no "no-alpn", "alpn" nor "npn" set, a default value will be used
+ depending on the protocol (see "alpn" above). However if the "bind" line has
+ a different default, or explicitly disables ALPN using "no-alpn", it is
+ possible to force a specific value for a certificate.
+
+ crt-list file example:
+ cert1.pem !*
+ # comment
+ cert2.pem [alpn h2,http/1.1]
+ certW.pem *.domain.tld !secure.domain.tld
+ certS.pem [curves X25519:P-256 ciphers ECDHE-ECDSA-AES256-GCM-SHA384] secure.domain.tld
+
+defer-accept
+ Is an optional keyword which is supported only on certain Linux kernels. It
+ states that a connection will only be accepted once some data arrive on it,
+ or at worst after the first retransmit. This should be used only on protocols
+ for which the client talks first (e.g. HTTP). It can slightly improve
+ performance by ensuring that most of the request is already available when
+ the connection is accepted. On the other hand, it will not be able to detect
+ connections which don't talk. It is important to note that this option is
+ broken in all kernels up to 2.6.31, as the connection is never accepted until
+ the client talks. This can cause issues with front firewalls which would see
+ an established connection while the proxy will only see it in SYN_RECV. This
+ option is only supported on TCPv4/TCPv6 sockets and ignored by other ones.
+
+expose-fd listeners
+ This option is only usable with the stats socket. It gives your stats socket
+ the capability to pass listeners FD to another HAProxy process.
+ In master-worker mode, this is not required anymore, the listeners will be
+ passed using the internal socketpairs between the master and the workers.
+ See also "-x" in the management guide.
+
+force-sslv3
+ This option enforces use of SSLv3 only on SSL connections instantiated from
+ this listener. SSLv3 is generally less expensive than the TLS counterparts
+ for high connection rates. This option is also available on global statement
+ "ssl-default-bind-options". See also "ssl-min-ver" and "ssl-max-ver".
+
+force-tlsv10
+ This option enforces use of TLSv1.0 only on SSL connections instantiated from
+ this listener. This option is also available on global statement
+ "ssl-default-bind-options". See also "ssl-min-ver" and "ssl-max-ver".
+
+force-tlsv11
+ This option enforces use of TLSv1.1 only on SSL connections instantiated from
+ this listener. This option is also available on global statement
+ "ssl-default-bind-options". See also "ssl-min-ver" and "ssl-max-ver".
+
+force-tlsv12
+ This option enforces use of TLSv1.2 only on SSL connections instantiated from
+ this listener. This option is also available on global statement
+ "ssl-default-bind-options". See also "ssl-min-ver" and "ssl-max-ver".
+
+force-tlsv13
+ This option enforces use of TLSv1.3 only on SSL connections instantiated from
+ this listener. This option is also available on global statement
+ "ssl-default-bind-options". See also "ssl-min-ver" and "ssl-max-ver".
+
+generate-certificates
+ This setting is only available when support for OpenSSL was built in. It
+ enables the dynamic SSL certificates generation. A CA certificate and its
+ private key are necessary (see 'ca-sign-file'). When HAProxy is configured as
+ a transparent forward proxy, SSL requests generate errors because of a common
+ name mismatch on the certificate presented to the client. With this option
+ enabled, HAProxy will try to forge a certificate using the SNI hostname
+ indicated by the client. This is done only if no certificate matches the SNI
+ hostname (see 'crt-list'). If an error occurs, the default certificate is
+ used, else the 'strict-sni' option is set.
+ It can also be used when HAProxy is configured as a reverse proxy to ease the
+ deployment of an architecture with many backends.
+
+ Creating a SSL certificate is an expensive operation, so a LRU cache is used
+ to store forged certificates (see 'tune.ssl.ssl-ctx-cache-size'). It
+ increases the HAProxy's memory footprint to reduce latency when the same
+ certificate is used many times.
+
+gid <gid>
+ Sets the group of the UNIX sockets to the designated system gid. It can also
+ be set by default in the global section's "unix-bind" statement. Note that
+ some platforms simply ignore this. This setting is equivalent to the "group"
+ setting except that the group ID is used instead of its name. This setting is
+ ignored by non UNIX sockets.
+
+group <group>
+ Sets the group of the UNIX sockets to the designated system group. It can
+ also be set by default in the global section's "unix-bind" statement. Note
+ that some platforms simply ignore this. This setting is equivalent to the
+ "gid" setting except that the group name is used instead of its gid. This
+ setting is ignored by non UNIX sockets.
+
+id <id>
+ Fixes the socket ID. By default, socket IDs are automatically assigned, but
+ sometimes it is more convenient to fix them to ease monitoring. This value
+ must be strictly positive and unique within the listener/frontend. This
+ option can only be used when defining only a single socket.
+
+interface <interface>
+ Restricts the socket to a specific interface. When specified, only packets
+ received from that particular interface are processed by the socket. This is
+ currently only supported on Linux. The interface must be a primary system
+ interface, not an aliased interface. It is also possible to bind multiple
+ frontends to the same address if they are bound to different interfaces. Note
+ that binding to a network interface requires root privileges. This parameter
+ is only compatible with TCPv4/TCPv6 sockets. When specified, return traffic
+ uses the same interface as inbound traffic, and its associated routing table,
+ even if there are explicit routes through different interfaces configured.
+ This can prove useful to address asymmetric routing issues when the same
+ client IP addresses need to be able to reach frontends hosted on different
+ interfaces.
+
+level <level>
+ This setting is used with the stats sockets only to restrict the nature of
+ the commands that can be issued on the socket. It is ignored by other
+ sockets. <level> can be one of :
+ - "user" is the least privileged level; only non-sensitive stats can be
+ read, and no change is allowed. It would make sense on systems where it
+ is not easy to restrict access to the socket.
+ - "operator" is the default level and fits most common uses. All data can
+ be read, and only non-sensitive changes are permitted (e.g. clear max
+ counters).
+ - "admin" should be used with care, as everything is permitted (e.g. clear
+ all counters).
+
+severity-output <format>
+ This setting is used with the stats sockets only to configure severity
+ level output prepended to informational feedback messages. Severity
+ level of messages can range between 0 and 7, conforming to syslog
+ rfc5424. Valid and successful socket commands requesting data
+ (i.e. "show map", "get acl foo" etc.) will never have a severity level
+ prepended. It is ignored by other sockets. <format> can be one of :
+ - "none" (default) no severity level is prepended to feedback messages.
+ - "number" severity level is prepended as a number.
+ - "string" severity level is prepended as a string following the
+ rfc5424 convention.
+
+maxconn <maxconn>
+ Limits the sockets to this number of concurrent connections. Extraneous
+ connections will remain in the system's backlog until a connection is
+ released. If unspecified, the limit will be the same as the frontend's
+ maxconn. Note that in case of port ranges or multiple addresses, the same
+ value will be applied to each socket. This setting enables different
+ limitations on expensive sockets, for instance SSL entries which may easily
+ eat all memory.
+
+mode <mode>
+ Sets the octal mode used to define access permissions on the UNIX socket. It
+ can also be set by default in the global section's "unix-bind" statement.
+ Note that some platforms simply ignore this. This setting is ignored by non
+ UNIX sockets.
+
+mss <maxseg>
+ Sets the TCP Maximum Segment Size (MSS) value to be advertised on incoming
+ connections. This can be used to force a lower MSS for certain specific
+ ports, for instance for connections passing through a VPN. Note that this
+ relies on a kernel feature which is theoretically supported under Linux but
+ was buggy in all versions prior to 2.6.28. It may or may not work on other
+ operating systems. It may also not change the advertised value but change the
+ effective size of outgoing segments. The commonly advertised value for TCPv4
+ over Ethernet networks is 1460 = 1500(MTU) - 40(IP+TCP). If this value is
+ positive, it will be used as the advertised MSS. If it is negative, it will
+ indicate by how much to reduce the incoming connection's advertised MSS for
+ outgoing segments. This parameter is only compatible with TCP v4/v6 sockets.
+
+name <name>
+ Sets an optional name for these sockets, which will be reported on the stats
+ page.
+
+namespace <name>
+ On Linux, it is possible to specify which network namespace a socket will
+ belong to. This directive makes it possible to explicitly bind a listener to
+ a namespace different from the default one. Please refer to your operating
+ system's documentation to find more details about network namespaces.
+
+nbconn <nbconn> [ EXPERIMENTAL ]
+ This setting is only valid for listener instances which uses reverse HTTP.
+ This will define the count of connections which will be mounted in parallel.
+ If not specified, a default value of 1 is used.
+
+ Reverse HTTP is currently still in active development. Configuration
+ mechanism may change in the future. For this reason it is internally marked
+ as expirmental, meaning that "expose-experimental-directives" must appear on
+ a line before this directive.
+
+nice <nice>
+ Sets the 'niceness' of connections initiated from the socket. Value must be
+ in the range -1024..1024 inclusive, and defaults to zero. Positive values
+ means that such connections are more friendly to others and easily offer
+ their place in the scheduler. On the opposite, negative values mean that
+ connections want to run with a higher priority than others. The difference
+ only happens under high loads when the system is close to saturation.
+ Negative values are appropriate for low-latency or administration services,
+ and high values are generally recommended for CPU intensive tasks such as SSL
+ processing or bulk transfers which are less sensible to latency. For example,
+ it may make sense to use a positive value for an SMTP socket and a negative
+ one for an RDP socket.
+
+no-alpn
+ Disables ALPN processing (technically speaking this sets the ALPN string to
+ an empty string that will not be advertised). It permits to cancel a previous
+ occurrence of an "alpn" setting and to disable application protocol
+ negotiation. It may also be used to prevent a listener from negotiating ALPN
+ with a client on an HTTPS or QUIC listener; by default, HTTPS listeners will
+ advertise "h2,http/1.1" and QUIC listeners will advertise "h3". See also
+ "alpn" bove. Note that when using "crt-list", a certificate may override the
+ "alpn" setting and re-enable its processing.
+
+no-ca-names
+ This setting is only available when support for OpenSSL was built in. It
+ prevents from send CA names in server hello message when ca-file is used.
+ Use "ca-verify-file" instead of "ca-file" with "no-ca-names".
+
+no-sslv3
+ This setting is only available when support for OpenSSL was built in. It
+ disables support for SSLv3 on any sockets instantiated from the listener when
+ SSL is supported. Note that SSLv2 is forced disabled in the code and cannot
+ be enabled using any configuration option. This option is also available on
+ global statement "ssl-default-bind-options". Use "ssl-min-ver" and
+ "ssl-max-ver" instead.
+
+no-tls-tickets
+ This setting is only available when support for OpenSSL was built in. It
+ disables the stateless session resumption (RFC 5077 TLS Ticket
+ extension) and force to use stateful session resumption. Stateless
+ session resumption is more expensive in CPU usage. This option is also
+ available on global statement "ssl-default-bind-options".
+ The TLS ticket mechanism is only used up to TLS 1.2.
+ Forward Secrecy is compromised with TLS tickets, unless ticket keys
+ are periodically rotated (via reload or by using "tls-ticket-keys").
+
+no-tlsv10
+ This setting is only available when support for OpenSSL was built in. It
+ disables support for TLSv1.0 on any sockets instantiated from the listener
+ when SSL is supported. Note that SSLv2 is forced disabled in the code and
+ cannot be enabled using any configuration option. This option is also
+ available on global statement "ssl-default-bind-options". Use "ssl-min-ver"
+ and "ssl-max-ver" instead.
+
+no-tlsv11
+ This setting is only available when support for OpenSSL was built in. It
+ disables support for TLSv1.1 on any sockets instantiated from the listener
+ when SSL is supported. Note that SSLv2 is forced disabled in the code and
+ cannot be enabled using any configuration option. This option is also
+ available on global statement "ssl-default-bind-options". Use "ssl-min-ver"
+ and "ssl-max-ver" instead.
+
+no-tlsv12
+ This setting is only available when support for OpenSSL was built in. It
+ disables support for TLSv1.2 on any sockets instantiated from the listener
+ when SSL is supported. Note that SSLv2 is forced disabled in the code and
+ cannot be enabled using any configuration option. This option is also
+ available on global statement "ssl-default-bind-options". Use "ssl-min-ver"
+ and "ssl-max-ver" instead.
+
+no-tlsv13
+ This setting is only available when support for OpenSSL was built in. It
+ disables support for TLSv1.3 on any sockets instantiated from the listener
+ when SSL is supported. Note that SSLv2 is forced disabled in the code and
+ cannot be enabled using any configuration option. This option is also
+ available on global statement "ssl-default-bind-options". Use "ssl-min-ver"
+ and "ssl-max-ver" instead.
+
+npn <protocols>
+ This enables the NPN TLS extension and advertises the specified protocol list
+ as supported on top of NPN. The protocol list consists in a comma-delimited
+ list of protocol names, for instance: "http/1.1,http/1.0" (without quotes).
+ This requires that the SSL library is built with support for TLS extensions
+ enabled (check with haproxy -vv). Note that the NPN extension has been
+ replaced with the ALPN extension (see the "alpn" keyword), though this one is
+ only available starting with OpenSSL 1.0.2. If HTTP/2 is desired on an older
+ version of OpenSSL, NPN might still be used as most clients still support it
+ at the time of writing this. It is possible to enable both NPN and ALPN
+ though it probably doesn't make any sense out of testing.
+
+ocsp-update [ off | on ] (crt-list only)
+ Enable automatic OCSP response update when set to 'on', disable it otherwise.
+ Its value defaults to 'off'.
+ Please note that for now, this option can only be used in a crt-list line, it
+ cannot be used directly on a bind line. It lies in this "Bind options"
+ section because it is still a frontend option. This limitation was set so
+ that the option applies to only one certificate at a time.
+ If a given certificate is used in multiple crt-lists with different values of
+ the 'ocsp-update' set, an error will be raised. Here is an example
+ configuration enabling it:
+
+ haproxy.cfg:
+ frontend fe
+ bind :443 ssl crt-list haproxy.list
+
+ haproxy.list:
+ server_cert.pem [ocsp-update on] foo.bar
+
+ When the option is set to 'on', we will try to get an ocsp response whenever
+ an ocsp uri is found in the frontend's certificate. The only limitation of
+ this mode is that the certificate's issuer will have to be known in order for
+ the OCSP certid to be built.
+ Each OCSP response will be updated at least once an hour, and even more
+ frequently if a given OCSP response has an expire date earlier than this one
+ hour limit. A minimum update interval of 5 minutes will still exist in order
+ to avoid updating too often responses that have a really short expire time or
+ even no 'Next Update' at all. Because of this hard limit, please note that
+ when auto update is set to 'on' or 'auto', any OCSP response loaded during
+ init will not be updated until at least 5 minutes, even if its expire time
+ ends before now+5m. This should not be too much of a hassle since an OCSP
+ response must be valid when it gets loaded during init (its expire time must
+ be in the future) so it is unlikely that this response expires in such a
+ short time after init.
+ On the other hand, if a certificate has an OCSP uri specified and no OCSP
+ response, setting this option to 'on' for the given certificate will ensure
+ that the OCSP response gets fetched automatically right after init.
+ The default minimum and maximum delays (5 minutes and 1 hour respectively)
+ can be configured by the "tune.ssl.ocsp-update.maxdelay" and
+ "tune.ssl.ocsp-update.mindelay" global options.
+
+ Whenever an OCSP response is updated by the auto update task or following a
+ call to the "update ssl ocsp-response" CLI command, a dedicated log line is
+ emitted. It follows a dedicated log-format that contains the following header
+ "%ci:%cp [%tr] %ft" and is followed by specific OCSP-related information:
+ - the path of the corresponding frontend certificate
+ - a numerical update status
+ - a textual update status
+ - the number of update failures for the given response
+ - the number of update successes for the givan response
+ See "show ssl ocsp-updates" CLI command for a full list of error codes and
+ error messages. This line is emitted regardless of the success or failure of
+ the concerned OCSP response update.
+ The OCSP request/response is sent and received through an http_client
+ instance that has the dontlog-normal option set and that uses the regular
+ HTTP log format in case of error (unreachable OCSP responder for instance).
+ If such an error occurs, another log line that contains HTTP-related
+ information will then be emitted alongside the "regular" OCSP one (which will
+ likely have "HTTP error" as text status). But if a purely HTTP error happens
+ (unreachable OCSP responder for instance), an extra log line that follows the
+ regular HTTP log-format will be emitted.
+ Here are two examples of such log lines, with a successful OCSP update log
+ line first and then an example of an HTTP error with the two different lines
+ (lines were spit and the URL was shortened for readability):
+ <134>Mar 6 11:16:53 haproxy[14872]: -:- [06/Mar/2023:11:16:52.808] \
+ <OCSP-UPDATE> /path_to_cert/foo.pem 1 "Update successful" 0 1
+
+ <134>Mar 6 11:18:55 haproxy[14872]: -:- [06/Mar/2023:11:18:54.207] \
+ <OCSP-UPDATE> /path_to_cert/bar.pem 2 "HTTP error" 1 0
+ <134>Mar 6 11:18:55 haproxy[14872]: -:- [06/Mar/2023:11:18:52.200] \
+ <OCSP-UPDATE> -/- 2/0/-1/-1/3009 503 217 - - SC-- 0/0/0/0/3 0/0 {} \
+ "GET http://127.0.0.1:12345/MEMwQT HTTP/1.1"
+
+ Troubleshooting:
+ A common error that can happen with let's encrypt certificates is if the DNS
+ resolution provides an IPv6 address and your system does not have a valid
+ outgoing IPv6 route. In such a case, you can either create the appropriate
+ route or set the "httpclient.resolvers.prefer ipv4" option in the global
+ section.
+ In case of "OCSP response check failure" error, you might want to check that
+ the issuer certificate that you provided is valid.
+
+prefer-client-ciphers
+ Use the client's preference when selecting the cipher suite, by default
+ the server's preference is enforced. This option is also available on
+ global statement "ssl-default-bind-options".
+ Note that with OpenSSL >= 1.1.1 ChaCha20-Poly1305 is reprioritized anyway
+ (without setting this option), if a ChaCha20-Poly1305 cipher is at the top of
+ the client cipher list.
+
+proto <name>
+ Forces the multiplexer's protocol to use for the incoming connections. It
+ must be compatible with the mode of the frontend (TCP or HTTP). It must also
+ be usable on the frontend side. The list of available protocols is reported
+ in haproxy -vv. The protocols properties are reported : the mode (TCP/HTTP),
+ the side (FE/BE), the mux name and its flags.
+
+ Some protocols are subject to the head-of-line blocking on server side
+ (flag=HOL_RISK). Finally some protocols don't support upgrades (flag=NO_UPG).
+ The HTX compatibility is also reported (flag=HTX).
+
+ Here are the protocols that may be used as argument to a "proto" directive on
+ a bind line :
+
+ h2 : mode=HTTP side=FE|BE mux=H2 flags=HTX|HOL_RISK|NO_UPG
+ h1 : mode=HTTP side=FE|BE mux=H1 flags=HTX|NO_UPG
+ none : mode=TCP side=FE|BE mux=PASS flags=NO_UPG
+
+ Idea behind this option is to bypass the selection of the best multiplexer's
+ protocol for all connections instantiated from this listening socket. For
+ instance, it is possible to force the http/2 on clear TCP by specifying "proto
+ h2" on the bind line.
+
+quic-cc-algo { cubic | newreno }
+quic-cc-algo { cubic | newreno }(max_window)
+ This is a QUIC specific setting to select the congestion control algorithm
+ for any connection attempts to the configured QUIC listeners. They are similar
+ to those used by TCP. An optional value in bytes may be used to specify the
+ maximum window size. It must be greater than 1k and smaller than 4g.
+
+ Default value: cubic
+ Default window value: tune.quic.frontend.conn-tx-buffers.limit * tune.bufsize
+
+ Example:
+ # newreno congestion control algorithm
+ quic-cc-algo newreno
+ # cubic congestion control algorithm with one megabytes as window
+ quic-cc-algo cubic(1m)
+
+quic-force-retry
+ This is a QUIC specific setting which forces the use of the QUIC Retry feature
+ for all the connection attempts to the configured QUIC listeners. It consists
+ in verifying the peers are able to receive packets at the transport address
+ they used to initiate a new connection, sending them a Retry packet which
+ contains a token. This token must be sent back to the Retry packet sender,
+ this latter being the only one to be able to validate the token. Note that QUIC
+ Retry will always be used even if a Retry threshold was set (see
+ "tune.quic.retry-threshold" setting).
+
+ This setting requires the cluster secret to be set or else an error will be
+ reported on startup (see "cluster-secret").
+
+ See https://www.rfc-editor.org/rfc/rfc9000.html#section-8.1.2 for more
+ information about QUIC retry.
+
+quic-socket [ connection | listener ]
+ This QUIC specific setting allows to define the socket allocation mode for
+ the specific listeners. See "tune.quic.socket-owner" for a full description
+ of its usage.
+
+shards <number> | by-thread | by-group
+ In multi-threaded mode, on operating systems supporting multiple listeners on
+ the same IP:port, this will automatically create this number of multiple
+ identical listeners for the same line, all bound to a fair share of the number
+ of the threads attached to this listener. This can sometimes be useful when
+ using very large thread counts where the in-kernel locking on a single socket
+ starts to cause a significant overhead. In this case the incoming traffic is
+ distributed over multiple sockets and the contention is reduced. Note that
+ doing this can easily increase the CPU usage by making more threads work a
+ little bit.
+
+ If the number of shards is higher than the number of available threads, it
+ will automatically be trimmed to the number of threads (i.e. one shard per
+ thread). The special "by-thread" value also creates as many shards as there
+ are threads on the "bind" line. Since the system will evenly distribute the
+ incoming traffic between all these shards, it is important that this number
+ is an integral divisor of the number of threads. Alternately, the other
+ special value "by-group" will create one shard per thread group. This can
+ be useful when dealing with many threads and not wanting to create too many
+ sockets. The load distribution will be a bit less optimal but the contention
+ (especially in the system) will still be lower than with a single socket.
+
+ On operating systems that do not support multiple sockets bound to the same
+ address, "by-thread" and "by-group" will automatically fall back to a single
+ shard. For "by-group" this is done without any warning since it doesn't
+ change anything for a single group, and will result in sockets being
+ duplicated for each group anyway. However, for "by-thread", a diagnostic
+ warning will be emitted if this happens since the resulting number of
+ listeners will not be the expected one.
+
+sigalgs <sigalgs>
+ This setting is only available when support for OpenSSL was built in. It sets
+ the string describing the list of signature algorithms that are negotiated
+ during the TLSv1.2 and TLSv1.3 handshake. The format of the string is defined
+ in "man 3 SSL_CTX_set1_sigalgs" from the OpenSSL man pages. It is not
+ recommended to use this setting unless compatibility with a middlebox is
+ required.
+
+ssl
+ This setting is only available when support for OpenSSL was built in. It
+ enables SSL deciphering on connections instantiated from this listener. A
+ certificate is necessary (see "crt" above). All contents in the buffers will
+ appear in clear text, so that ACLs and HTTP processing will only have access
+ to deciphered contents. SSLv3 is disabled per default, use "ssl-min-ver SSLv3"
+ to enable it.
+
+ssl-max-ver [ SSLv3 | TLSv1.0 | TLSv1.1 | TLSv1.2 | TLSv1.3 ]
+ This option enforces use of <version> or lower on SSL connections instantiated
+ from this listener. Using this setting without "ssl-min-ver" can be
+ ambiguous because the default ssl-min-ver value could change in future HAProxy
+ versions. This option is also available on global statement
+ "ssl-default-bind-options". See also "ssl-min-ver".
+
+ssl-min-ver [ SSLv3 | TLSv1.0 | TLSv1.1 | TLSv1.2 | TLSv1.3 ]
+ This option enforces use of <version> or upper on SSL connections
+ instantiated from this listener. The default value is "TLSv1.2". This option
+ is also available on global statement "ssl-default-bind-options".
+ See also "ssl-max-ver".
+
+strict-sni
+ This setting is only available when support for OpenSSL was built in. The
+ SSL/TLS negotiation is allow only if the client provided an SNI which match
+ a certificate. The default certificate is not used. This option also allows
+ to start without any certificate on a bind line, so an empty directory could
+ be used and filled later from the stats socket.
+ See the "crt" option for more information. See "add ssl crt-list" command in
+ the management guide.
+
+tcp-ut <delay>
+ Sets the TCP User Timeout for all incoming connections instantiated from this
+ listening socket. This option is available on Linux since version 2.6.37. It
+ allows HAProxy to configure a timeout for sockets which contain data not
+ receiving an acknowledgment for the configured delay. This is especially
+ useful on long-lived connections experiencing long idle periods such as
+ remote terminals or database connection pools, where the client and server
+ timeouts must remain high to allow a long period of idle, but where it is
+ important to detect that the client has disappeared in order to release all
+ resources associated with its connection (and the server's session). The
+ argument is a delay expressed in milliseconds by default. This only works
+ for regular TCP connections, and is ignored for other protocols.
+
+tfo
+ Is an optional keyword which is supported only on Linux kernels >= 3.7. It
+ enables TCP Fast Open on the listening socket, which means that clients which
+ support this feature will be able to send a request and receive a response
+ during the 3-way handshake starting from second connection, thus saving one
+ round-trip after the first connection. This only makes sense with protocols
+ that use high connection rates and where each round trip matters. This can
+ possibly cause issues with many firewalls which do not accept data on SYN
+ packets, so this option should only be enabled once well tested. This option
+ is only supported on TCPv4/TCPv6 sockets and ignored by other ones. You may
+ need to build HAProxy with USE_TFO=1 if your libc doesn't define
+ TCP_FASTOPEN.
+
+thread [<thread-group>/]<thread-set>[,...]
+ This restricts the list of threads on which this listener is allowed to run.
+ It does not enforce any of them but eliminates those which do not match. It
+ limits the threads allowed to process incoming connections for this listener.
+
+ There are two numbering schemes. By default, thread numbers are absolute in
+ the process, comprised between 1 and the value specified in global.nbthread.
+ It is also possible to designate a thread number using its relative number
+ inside its thread group, by specifying the thread group number first, then a
+ slash ('/') and the relative thread number(s). In this case thread numbers
+ also start at 1 and end at 32 or 64 depending on the platform. When absolute
+ thread numbers are specified, they will be automatically translated to
+ relative numbers once thread groups are known. Usually, absolute numbers are
+ preferred for simple configurations, and relative ones are preferred for
+ complex configurations where CPU arrangement matters for performance.
+
+ After the optional thread group number, the "thread-set" specification must
+ use the following format:
+
+ "all" | "odd" | "even" | [number][-[number]]
+
+ As their names imply, "all" validates all threads within the set (either all
+ of the group's when a group is specified, or all of the process' threads),
+ "odd" validates all odd-numberred threads (every other thread starting at 1)
+ either for the process or the group, and "even" validates all even-numberred
+ threads (every other thread starting at 2). If instead thread number ranges
+ are used, then all threads included in the range from the first to the last
+ thread number are validated. The numbers are either relative to the group
+ or absolute depending on the presence of a thread group number. If the first
+ thread number is omitted, "1" is used, representing either the first thread
+ of the group or the first thread of the process. If the last thread number is
+ omitted, either the last thread number of the group (32 or 64) is used, or
+ the last thread number of the process (global.nbthread).
+
+ These ranges may be repeated and delimited by a comma, so that non-contiguous
+ thread sets can be specified, and the group, if present, must be specified
+ again for each new range. Note that it is not permitted to mix group-relative
+ and absolute specifications because the whole "bind" line must use either
+ an absolute notation or a relative one, as those not set will be resolved at
+ the end of the parsing.
+
+ It is important to know that each listener described by a "bind" line creates
+ at least one socket represented by at least one file descriptor. Since file
+ descriptors cannot span multiple thread groups, if a "bind" line specifies a
+ thread range that covers more than one group, several file descriptors will
+ automatically be created so that there is at least one per group. Technically
+ speaking they all refer to the same socket in the kernel, but they will get a
+ distinct identifier in haproxy and will even have a dedicated stats entry if
+ "option socket-stats" is used.
+
+ The main purpose is to have multiple bind lines sharing the same IP:port but
+ not the same thread in a listener, so that the system can distribute the
+ incoming connections into multiple queues, bypassing haproxy's internal queue
+ load balancing. Currently Linux 3.9 and above is known for supporting this.
+ See also the "shards" keyword above that automates duplication of "bind"
+ lines and their assignment to multiple groups of threads.
+
+ This keyword is compatible with reverse HTTP binds. However, it is forbidden
+ to specify a thread set which spans accross several thread groups for such a
+ listener as this may caused "nbconn" to not work as intended.
+
+tls-ticket-keys <keyfile>
+ Sets the TLS ticket keys file to load the keys from. The keys need to be 48
+ or 80 bytes long, depending if aes128 or aes256 is used, encoded with base64
+ with one line per key (ex. openssl rand 80 | openssl base64 -A | xargs echo).
+ The first key determines the key length used for next keys: you can't mix
+ aes128 and aes256 keys. Number of keys is specified by the TLS_TICKETS_NO
+ build option (default 3) and at least as many keys need to be present in
+ the file. Last TLS_TICKETS_NO keys will be used for decryption and the
+ penultimate one for encryption. This enables easy key rotation by just
+ appending new key to the file and reloading the process. Keys must be
+ periodically rotated (ex. every 12h) or Perfect Forward Secrecy is
+ compromised. It is also a good idea to keep the keys off any permanent
+ storage such as hard drives (hint: use tmpfs and don't swap those files).
+ Lifetime hint can be changed using tune.ssl.timeout.
+
+transparent
+ Is an optional keyword which is supported only on certain Linux kernels. It
+ indicates that the addresses will be bound even if they do not belong to the
+ local machine, and that packets targeting any of these addresses will be
+ intercepted just as if the addresses were locally configured. This normally
+ requires that IP forwarding is enabled. Caution! do not use this with the
+ default address '*', as it would redirect any traffic for the specified port.
+ This keyword is available only when HAProxy is built with USE_LINUX_TPROXY=1.
+ This parameter is only compatible with TCPv4 and TCPv6 sockets, depending on
+ kernel version. Some distribution kernels include backports of the feature,
+ so check for support with your vendor.
+
+v4v6
+ Is an optional keyword which is supported only on most recent systems
+ including Linux kernels >= 2.4.21. It is used to bind a socket to both IPv4
+ and IPv6 when it uses the default address. Doing so is sometimes necessary
+ on systems which bind to IPv6 only by default. It has no effect on non-IPv6
+ sockets, and is overridden by the "v6only" option.
+
+v6only
+ Is an optional keyword which is supported only on most recent systems
+ including Linux kernels >= 2.4.21. It is used to bind a socket to IPv6 only
+ when it uses the default address. Doing so is sometimes preferred to doing it
+ system-wide as it is per-listener. It has no effect on non-IPv6 sockets and
+ has precedence over the "v4v6" option.
+
+uid <uid>
+ Sets the owner of the UNIX sockets to the designated system uid. It can also
+ be set by default in the global section's "unix-bind" statement. Note that
+ some platforms simply ignore this. This setting is equivalent to the "user"
+ setting except that the user numeric ID is used instead of its name. This
+ setting is ignored by non UNIX sockets.
+
+user <user>
+ Sets the owner of the UNIX sockets to the designated system user. It can also
+ be set by default in the global section's "unix-bind" statement. Note that
+ some platforms simply ignore this. This setting is equivalent to the "uid"
+ setting except that the user name is used instead of its uid. This setting is
+ ignored by non UNIX sockets.
+
+verify [none|optional|required]
+ This setting is only available when support for OpenSSL was built in. If set
+ to 'none', client certificate is not requested. This is the default. In other
+ cases, a client certificate is requested. If the client does not provide a
+ certificate after the request and if 'verify' is set to 'required', then the
+ handshake is aborted, while it would have succeeded if set to 'optional'. The
+ certificate provided by the client is always verified using CAs from
+ 'ca-file' and optional CRLs from 'crl-file'. On verify failure the handshake
+ is aborted, regardless of the 'verify' option, unless the error code exactly
+ matches one of those listed with 'ca-ignore-err' or 'crt-ignore-err'.
+
+5.2. Server and default-server options
+------------------------------------
+
+The "server" and "default-server" keywords support a certain number of settings
+which are all passed as arguments on the server line. The order in which those
+arguments appear does not count, and they are all optional. Some of those
+settings are single words (booleans) while others expect one or several values
+after them. In this case, the values must immediately follow the setting name.
+Except default-server, all those settings must be specified after the server's
+address if they are used:
+
+ server <name> <address>[:port] [settings ...]
+ default-server [settings ...]
+
+Note that all these settings are supported both by "server" and "default-server"
+keywords, except "id" which is only supported by "server".
+
+The currently supported settings are the following ones.
+
+addr <ipv4|ipv6>
+ Using the "addr" parameter, it becomes possible to use a different IP address
+ to send health-checks or to probe the agent-check. On some servers, it may be
+ desirable to dedicate an IP address to specific component able to perform
+ complex tests which are more suitable to health-checks than the application.
+ This parameter is ignored if the "check" parameter is not set. See also the
+ "port" parameter.
+
+agent-check
+ Enable an auxiliary agent check which is run independently of a regular
+ health check. An agent health check is performed by making a TCP connection
+ to the port set by the "agent-port" parameter and reading an ASCII string
+ terminated by the first '\r' or '\n' met. The string is made of a series of
+ words delimited by spaces, tabs or commas in any order, each consisting of :
+
+ - An ASCII representation of a positive integer percentage, e.g. "75%".
+ Values in this format will set the weight proportional to the initial
+ weight of a server as configured when HAProxy starts. Note that a zero
+ weight is reported on the stats page as "DRAIN" since it has the same
+ effect on the server (it's removed from the LB farm).
+
+ - The string "maxconn:" followed by an integer (no space between). Values
+ in this format will set the maxconn of a server. The maximum number of
+ connections advertised needs to be multiplied by the number of load
+ balancers and different backends that use this health check to get the
+ total number of connections the server might receive. Example: maxconn:30
+
+ - The word "ready". This will turn the server's administrative state to the
+ READY mode, thus canceling any DRAIN or MAINT state
+
+ - The word "drain". This will turn the server's administrative state to the
+ DRAIN mode, thus it will not accept any new connections other than those
+ that are accepted via persistence.
+
+ - The word "maint". This will turn the server's administrative state to the
+ MAINT mode, thus it will not accept any new connections at all, and health
+ checks will be stopped.
+
+ - The words "down", "fail", or "stopped", optionally followed by a
+ description string after a sharp ('#'). All of these mark the server's
+ operating state as DOWN, but since the word itself is reported on the stats
+ page, the difference allows an administrator to know if the situation was
+ expected or not : the service may intentionally be stopped, may appear up
+ but fail some validity tests, or may be seen as down (e.g. missing process,
+ or port not responding).
+
+ - The word "up" sets back the server's operating state as UP if health checks
+ also report that the service is accessible.
+
+ Parameters which are not advertised by the agent are not changed. For
+ example, an agent might be designed to monitor CPU usage and only report a
+ relative weight and never interact with the operating status. Similarly, an
+ agent could be designed as an end-user interface with 3 radio buttons
+ allowing an administrator to change only the administrative state. However,
+ it is important to consider that only the agent may revert its own actions,
+ so if a server is set to DRAIN mode or to DOWN state using the agent, the
+ agent must implement the other equivalent actions to bring the service into
+ operations again.
+
+ Failure to connect to the agent is not considered an error as connectivity
+ is tested by the regular health check which is enabled by the "check"
+ parameter. Warning though, it is not a good idea to stop an agent after it
+ reports "down", since only an agent reporting "up" will be able to turn the
+ server up again. Note that the CLI on the Unix stats socket is also able to
+ force an agent's result in order to work around a bogus agent if needed.
+
+ Requires the "agent-port" parameter to be set. See also the "agent-inter"
+ and "no-agent-check" parameters.
+
+agent-send <string>
+ If this option is specified, HAProxy will send the given string (verbatim)
+ to the agent server upon connection. You could, for example, encode
+ the backend name into this string, which would enable your agent to send
+ different responses based on the backend. Make sure to include a '\n' if
+ you want to terminate your request with a newline.
+
+agent-inter <delay>
+ The "agent-inter" parameter sets the interval between two agent checks
+ to <delay> milliseconds. If left unspecified, the delay defaults to 2000 ms.
+
+ Just as with every other time-based parameter, it may be entered in any
+ other explicit unit among { us, ms, s, m, h, d }. The "agent-inter"
+ parameter also serves as a timeout for agent checks "timeout check" is
+ not set. In order to reduce "resonance" effects when multiple servers are
+ hosted on the same hardware, the agent and health checks of all servers
+ are started with a small time offset between them. It is also possible to
+ add some random noise in the agent and health checks interval using the
+ global "spread-checks" keyword. This makes sense for instance when a lot
+ of backends use the same servers.
+
+ See also the "agent-check" and "agent-port" parameters.
+
+agent-addr <addr>
+ The "agent-addr" parameter sets address for agent check.
+
+ You can offload agent-check to another target, so you can make single place
+ managing status and weights of servers defined in HAProxy in case you can't
+ make self-aware and self-managing services. You can specify both IP or
+ hostname, it will be resolved.
+
+agent-port <port>
+ The "agent-port" parameter sets the TCP port used for agent checks.
+
+ See also the "agent-check" and "agent-inter" parameters.
+
+allow-0rtt
+ Allow sending early data to the server when using TLS 1.3.
+ Note that early data will be sent only if the client used early data, or
+ if the backend uses "retry-on" with the "0rtt-rejected" keyword.
+
+alpn <protocols>
+ This enables the TLS ALPN extension and advertises the specified protocol
+ list as supported on top of ALPN. The protocol list consists in a comma-
+ delimited list of protocol names, for instance: "http/1.1,http/1.0" (without
+ quotes). This requires that the SSL library is built with support for TLS
+ extensions enabled (check with haproxy -vv). The ALPN extension replaces the
+ initial NPN extension. ALPN is required to connect to HTTP/2 servers.
+ Versions of OpenSSL prior to 1.0.2 didn't support ALPN and only supposed the
+ now obsolete NPN extension.
+ If both HTTP/2 and HTTP/1.1 are expected to be supported, both versions can
+ be advertised, in order of preference, like below :
+
+ server 127.0.0.1:443 ssl crt pub.pem alpn h2,http/1.1
+
+ See also "ws" to use an alternative ALPN for websocket streams.
+
+backup
+ When "backup" is present on a server line, the server is only used in load
+ balancing when all other non-backup servers are unavailable. Requests coming
+ with a persistence cookie referencing the server will always be served
+ though. By default, only the first operational backup server is used, unless
+ the "allbackups" option is set in the backend. See also the "no-backup" and
+ "allbackups" options.
+
+ca-file <cafile>
+ This setting is only available when support for OpenSSL was built in. It
+ designates a PEM file from which to load CA certificates used to verify
+ server's certificate. It is possible to load a directory containing multiple
+ CAs, in this case HAProxy will try to load every ".pem", ".crt", ".cer", and
+ .crl" available in the directory, files starting with a dot are ignored.
+
+ In order to use the trusted CAs of your system, the "@system-ca" parameter
+ could be used in place of the cafile. The location of this directory could be
+ overwritten by setting the SSL_CERT_DIR environment variable.
+
+check
+ This option enables health checks on a server:
+ - when not set, no health checking is performed, and the server is always
+ considered available.
+ - when set and no other check method is configured, the server is considered
+ available when a connection can be established at the highest configured
+ transport layer. This means TCP by default, or SSL/TLS when "ssl" or
+ "check-ssl" are set, both possibly combined with connection prefixes such
+ as a PROXY protocol header when "send-proxy" or "check-send-proxy" are
+ set. This behavior is slightly different for dynamic servers, read the
+ following paragraphs for more details.
+ - when set and an application-level health check is defined, the
+ application-level exchanges are performed on top of the configured
+ transport layer and the server is considered available if all of the
+ exchanges succeed.
+
+ By default, health checks are performed on the same address and port as
+ configured on the server, using the same encapsulation parameters (SSL/TLS,
+ proxy-protocol header, etc... ). It is possible to change the destination
+ address using "addr" and the port using "port". When done, it is assumed the
+ server isn't checked on the service port, and configured encapsulation
+ parameters are not reused. One must explicitly set "check-send-proxy" to send
+ connection headers, "check-ssl" to use SSL/TLS.
+
+ Note that the implicit configuration of ssl and PROXY protocol is not
+ performed for dynamic servers. In this case, it is required to explicitly
+ use "check-ssl" and "check-send-proxy" when wanted, even if the check port is
+ not overridden.
+
+ When "sni" or "alpn" are set on the server line, their value is not used for
+ health checks and one must use "check-sni" or "check-alpn".
+
+ The default source address for health check traffic is the same as the one
+ defined in the backend. It can be changed with the "source" keyword.
+
+ The interval between checks can be set using the "inter" keyword, and the
+ "rise" and "fall" keywords can be used to define how many successful or
+ failed health checks are required to flag a server available or not
+ available.
+
+ Optional application-level health checks can be configured with "option
+ httpchk", "option mysql-check" "option smtpchk", "option pgsql-check",
+ "option ldap-check", or "option redis-check".
+
+ Example:
+ # simple tcp check
+ backend foo
+ server s1 192.168.0.1:80 check
+ # this does a tcp connect + tls handshake
+ backend foo
+ server s1 192.168.0.1:443 ssl check
+ # simple tcp check is enough for check success
+ backend foo
+ option tcp-check
+ tcp-check connect
+ server s1 192.168.0.1:443 ssl check
+
+check-send-proxy
+ This option forces emission of a PROXY protocol line with outgoing health
+ checks, regardless of whether the server uses send-proxy or not for the
+ normal traffic. By default, the PROXY protocol is enabled for health checks
+ if it is already enabled for normal traffic and if no "port" nor "addr"
+ directive is present. However, if such a directive is present, the
+ "check-send-proxy" option needs to be used to force the use of the
+ protocol. See also the "send-proxy" option for more information.
+
+check-alpn <protocols>
+ Defines which protocols to advertise with ALPN. The protocol list consists in
+ a comma-delimited list of protocol names, for instance: "http/1.1,http/1.0"
+ (without quotes). If it is not set, the server ALPN is used.
+
+check-proto <name>
+ Forces the multiplexer's protocol to use for the server's health-check
+ connections. It must be compatible with the health-check type (TCP or
+ HTTP). It must also be usable on the backend side. The list of available
+ protocols is reported in haproxy -vv. The protocols properties are
+ reported : the mode (TCP/HTTP), the side (FE/BE), the mux name and its flags.
+
+ Some protocols are subject to the head-of-line blocking on server side
+ (flag=HOL_RISK). Finally some protocols don't support upgrades (flag=NO_UPG).
+ The HTX compatibility is also reported (flag=HTX).
+
+ Here are the protocols that may be used as argument to a "check-proto"
+ directive on a server line:
+
+ h2 : mode=HTTP side=FE|BE mux=H2 flags=HTX|HOL_RISK|NO_UPG
+ fcgi : mode=HTTP side=BE mux=FCGI flags=HTX|HOL_RISK|NO_UPG
+ h1 : mode=HTTP side=FE|BE mux=H1 flags=HTX|NO_UPG
+ none : mode=TCP side=FE|BE mux=PASS flags=NO_UPG
+
+ Idea behind this option is to bypass the selection of the best multiplexer's
+ protocol for health-check connections established to this server.
+ If not defined, the server one will be used, if set.
+
+check-sni <sni>
+ This option allows you to specify the SNI to be used when doing health checks
+ over SSL. It is only possible to use a string to set <sni>. If you want to
+ set a SNI for proxied traffic, see "sni".
+
+check-ssl
+ This option forces encryption of all health checks over SSL, regardless of
+ whether the server uses SSL or not for the normal traffic. This is generally
+ used when an explicit "port" or "addr" directive is specified and SSL health
+ checks are not inherited. It is important to understand that this option
+ inserts an SSL transport layer below the checks, so that a simple TCP connect
+ check becomes an SSL connect, which replaces the old ssl-hello-chk. The most
+ common use is to send HTTPS checks by combining "httpchk" with SSL checks.
+ All SSL settings are common to health checks and traffic (e.g. ciphers).
+ See the "ssl" option for more information and "no-check-ssl" to disable
+ this option.
+
+check-via-socks4
+ This option enables outgoing health checks using upstream socks4 proxy. By
+ default, the health checks won't go through socks tunnel even it was enabled
+ for normal traffic.
+
+ciphers <ciphers>
+ This setting is only available when support for OpenSSL was built in. This
+ option sets the string describing the list of cipher algorithms that is
+ negotiated during the SSL/TLS handshake with the server. The format of the
+ string is defined in "man 1 ciphers" from OpenSSL man pages. For background
+ information and recommendations see e.g.
+ (https://wiki.mozilla.org/Security/Server_Side_TLS) and
+ (https://mozilla.github.io/server-side-tls/ssl-config-generator/). For TLSv1.3
+ cipher configuration, please check the "ciphersuites" keyword.
+
+ciphersuites <ciphersuites>
+ This setting is only available when support for OpenSSL was built in and
+ OpenSSL 1.1.1 or later was used to build HAProxy. This option sets the string
+ describing the list of cipher algorithms that is negotiated during the TLS
+ 1.3 handshake with the server. The format of the string is defined in
+ "man 1 ciphers" from OpenSSL man pages under the "ciphersuites" section.
+ For cipher configuration for TLSv1.2 and earlier, please check the "ciphers"
+ keyword.
+
+client-sigalgs <sigalgs>
+ This setting is only available when support for OpenSSL was built in. It sets
+ the string describing the list of signature algorithms related to client
+ authentication that are negotiated . The format of the string is defined in
+ "man 3 SSL_CTX_set1_client_sigalgs" from the OpenSSL man pages. It is not
+ recommended to use this setting if no specific usecase was identified.
+
+cookie <value>
+ The "cookie" parameter sets the cookie value assigned to the server to
+ <value>. This value will be checked in incoming requests, and the first
+ operational server possessing the same value will be selected. In return, in
+ cookie insertion or rewrite modes, this value will be assigned to the cookie
+ sent to the client. There is nothing wrong in having several servers sharing
+ the same cookie value, and it is in fact somewhat common between normal and
+ backup servers. See also the "cookie" keyword in backend section.
+
+crl-file <crlfile>
+ This setting is only available when support for OpenSSL was built in. It
+ designates a PEM file from which to load certificate revocation list used
+ to verify server's certificate.
+
+crt <cert>
+ This setting is only available when support for OpenSSL was built in.
+ It designates a PEM file from which to load both a certificate and the
+ associated private key. This file can be built by concatenating both PEM
+ files into one. This certificate will be sent if the server send a client
+ certificate request.
+
+ If the file does not contain a private key, HAProxy will try to load the key
+ at the same path suffixed by a ".key" (provided the "ssl-load-extra-files"
+ option is set accordingly).
+
+curves <curves>
+ This setting is only available when support for OpenSSL was built in. It sets
+ the string describing the list of elliptic curves algorithms ("curve suite")
+ that are negotiated during the SSL/TLS handshake with ECDHE. The format of the
+ string is a colon-delimited list of curve name.
+ Example: "X25519:P-256" (without quote)
+
+disabled
+ The "disabled" keyword starts the server in the "disabled" state. That means
+ that it is marked down in maintenance mode, and no connection other than the
+ ones allowed by persist mode will reach it. It is very well suited to setup
+ new servers, because normal traffic will never reach them, while it is still
+ possible to test the service by making use of the force-persist mechanism.
+ See also "enabled" setting.
+
+enabled
+ This option may be used as 'server' setting to reset any 'disabled'
+ setting which would have been inherited from 'default-server' directive as
+ default value.
+ It may also be used as 'default-server' setting to reset any previous
+ 'default-server' 'disabled' setting.
+
+error-limit <count>
+ If health observing is enabled, the "error-limit" parameter specifies the
+ number of consecutive errors that triggers event selected by the "on-error"
+ option. By default it is set to 10 consecutive errors.
+
+ See also the "check", "error-limit" and "on-error".
+
+fall <count>
+ The "fall" parameter states that a server will be considered as dead after
+ <count> consecutive unsuccessful health checks. This value defaults to 3 if
+ unspecified. See also the "check", "inter" and "rise" parameters.
+
+force-sslv3
+ This option enforces use of SSLv3 only when SSL is used to communicate with
+ the server. SSLv3 is generally less expensive than the TLS counterparts for
+ high connection rates. This option is also available on global statement
+ "ssl-default-server-options". See also "ssl-min-ver" and ssl-max-ver".
+
+force-tlsv10
+ This option enforces use of TLSv1.0 only when SSL is used to communicate with
+ the server. This option is also available on global statement
+ "ssl-default-server-options". See also "ssl-min-ver" and ssl-max-ver".
+
+force-tlsv11
+ This option enforces use of TLSv1.1 only when SSL is used to communicate with
+ the server. This option is also available on global statement
+ "ssl-default-server-options". See also "ssl-min-ver" and ssl-max-ver".
+
+force-tlsv12
+ This option enforces use of TLSv1.2 only when SSL is used to communicate with
+ the server. This option is also available on global statement
+ "ssl-default-server-options". See also "ssl-min-ver" and ssl-max-ver".
+
+force-tlsv13
+ This option enforces use of TLSv1.3 only when SSL is used to communicate with
+ the server. This option is also available on global statement
+ "ssl-default-server-options". See also "ssl-min-ver" and ssl-max-ver".
+
+id <value>
+ Set a persistent ID for the server. This ID must be positive and unique for
+ the proxy. An unused ID will automatically be assigned if unset. The first
+ assigned value will be 1. This ID is currently only returned in statistics.
+
+init-addr {last | libc | none | <ip>},[...]*
+ Indicate in what order the server's address should be resolved upon startup
+ if it uses an FQDN. Attempts are made to resolve the address by applying in
+ turn each of the methods mentioned in the comma-delimited list. The first
+ method which succeeds is used. If the end of the list is reached without
+ finding a working method, an error is thrown. Method "last" suggests to pick
+ the address which appears in the state file (see "server-state-file"). Method
+ "libc" uses the libc's internal resolver (gethostbyname() or getaddrinfo()
+ depending on the operating system and build options). Method "none"
+ specifically indicates that the server should start without any valid IP
+ address in a down state. It can be useful to ignore some DNS issues upon
+ startup, waiting for the situation to get fixed later. Finally, an IP address
+ (IPv4 or IPv6) may be provided. It can be the currently known address of the
+ server (e.g. filled by a configuration generator), or the address of a dummy
+ server used to catch old sessions and present them with a decent error
+ message for example. When the "first" load balancing algorithm is used, this
+ IP address could point to a fake server used to trigger the creation of new
+ instances on the fly. This option defaults to "last,libc" indicating that the
+ previous address found in the state file (if any) is used first, otherwise
+ the libc's resolver is used. This ensures continued compatibility with the
+ historic behavior.
+
+ Example:
+ defaults
+ # never fail on address resolution
+ default-server init-addr last,libc,none
+
+inter <delay>
+fastinter <delay>
+downinter <delay>
+ The "inter" parameter sets the interval between two consecutive health checks
+ to <delay> milliseconds. If left unspecified, the delay defaults to 2000 ms.
+ It is also possible to use "fastinter" and "downinter" to optimize delays
+ between checks depending on the server state :
+
+ Server state | Interval used
+ ----------------------------------------+----------------------------------
+ UP 100% (non-transitional) | "inter"
+ ----------------------------------------+----------------------------------
+ Transitionally UP (going down "fall"), | "fastinter" if set,
+ Transitionally DOWN (going up "rise"), | "inter" otherwise.
+ or yet unchecked. |
+ ----------------------------------------+----------------------------------
+ DOWN 100% (non-transitional) | "downinter" if set,
+ | "inter" otherwise.
+ ----------------------------------------+----------------------------------
+
+ Just as with every other time-based parameter, they can be entered in any
+ other explicit unit among { us, ms, s, m, h, d }. The "inter" parameter also
+ serves as a timeout for health checks sent to servers if "timeout check" is
+ not set. In order to reduce "resonance" effects when multiple servers are
+ hosted on the same hardware, the agent and health checks of all servers
+ are started with a small time offset between them. It is also possible to
+ add some random noise in the agent and health checks interval using the
+ global "spread-checks" keyword. This makes sense for instance when a lot
+ of backends use the same servers. The global "tune.max-checks-per-thread"
+ setting, if defined to a non-nul value, will limit the number of concurrent
+ checks being performed at once on any given thread. In order to achieve this,
+ haproxy will put in a queue the checks that were about to start on a thread
+ that has reached this limit, until another check finishes. This will have for
+ effect to extend the effective check interval. In such a case, reducing the
+ "inter" setting will have a very limited effect as it will not be able to
+ reduce the time spent in the queue.
+
+log-bufsize <bufsize>
+ The "log-bufsize" specifies the ring bufsize to use for the implicit ring
+ that will be associated to the log server in a log backend. When not
+ specified, this defaults to BUFSIZE. Use of a greater value will increase
+ memory usage but can help to prevent the loss of log messages with slow
+ servers since the buffer will be able to hold more pending messages.
+ This keyword may only be used in log backend sections (with "mode log")
+
+log-proto <logproto>
+ The "log-proto" specifies the protocol used to forward event messages to
+ a server configured in a log or ring section. Possible values are "legacy"
+ and "octet-count" corresponding respectively to "Non-transparent-framing"
+ and "Octet counting" in rfc6587. "legacy" is the default.
+
+maxconn <maxconn>
+ The "maxconn" parameter specifies the maximal number of concurrent
+ connections that will be sent to this server. If the number of incoming
+ concurrent connections goes higher than this value, they will be queued,
+ waiting for a slot to be released. This parameter is very important as it can
+ save fragile servers from going down under extreme loads. If a "minconn"
+ parameter is specified, the limit becomes dynamic. The default value is "0"
+ which means unlimited. See also the "minconn" and "maxqueue" parameters, and
+ the backend's "fullconn" keyword.
+
+ In HTTP mode this parameter limits the number of concurrent requests instead
+ of the number of connections. Multiple requests might be multiplexed over a
+ single TCP connection to the server. As an example if you specify a maxconn
+ of 50 you might see between 1 and 50 actual server connections, but no more
+ than 50 concurrent requests.
+
+maxqueue <maxqueue>
+ The "maxqueue" parameter specifies the maximal number of connections which
+ will wait in the queue for this server. If this limit is reached, next
+ requests will be redispatched to other servers instead of indefinitely
+ waiting to be served. This will break persistence but may allow people to
+ quickly re-log in when the server they try to connect to is dying. Some load
+ balancing algorithms such as leastconn take this into account and accept to
+ add requests into a server's queue up to this value if it is explicitly set
+ to a value greater than zero, which often allows to better smooth the load
+ when dealing with single-digit maxconn values. The default value is "0" which
+ means the queue is unlimited. See also the "maxconn" and "minconn" parameters
+ and "balance leastconn".
+
+max-reuse <count>
+ The "max-reuse" argument indicates the HTTP connection processors that they
+ should not reuse a server connection more than this number of times to send
+ new requests. Permitted values are -1 (the default), which disables this
+ limit, or any positive value. Value zero will effectively disable keep-alive.
+ This is only used to work around certain server bugs which cause them to leak
+ resources over time. The argument is not necessarily respected by the lower
+ layers as there might be technical limitations making it impossible to
+ enforce. At least HTTP/2 connections to servers will respect it.
+
+minconn <minconn>
+ When the "minconn" parameter is set, the maxconn limit becomes a dynamic
+ limit following the backend's load. The server will always accept at least
+ <minconn> connections, never more than <maxconn>, and the limit will be on
+ the ramp between both values when the backend has less than <fullconn>
+ concurrent connections. This makes it possible to limit the load on the
+ server during normal loads, but push it further for important loads without
+ overloading the server during exceptional loads. See also the "maxconn"
+ and "maxqueue" parameters, as well as the "fullconn" backend keyword.
+
+namespace <name>
+ On Linux, it is possible to specify which network namespace a socket will
+ belong to. This directive makes it possible to explicitly bind a server to
+ a namespace different from the default one. Please refer to your operating
+ system's documentation to find more details about network namespaces.
+
+no-agent-check
+ This option may be used as "server" setting to reset any "agent-check"
+ setting which would have been inherited from "default-server" directive as
+ default value.
+ It may also be used as "default-server" setting to reset any previous
+ "default-server" "agent-check" setting.
+
+no-backup
+ This option may be used as "server" setting to reset any "backup"
+ setting which would have been inherited from "default-server" directive as
+ default value.
+ It may also be used as "default-server" setting to reset any previous
+ "default-server" "backup" setting.
+
+no-check
+ This option may be used as "server" setting to reset any "check"
+ setting which would have been inherited from "default-server" directive as
+ default value.
+ It may also be used as "default-server" setting to reset any previous
+ "default-server" "check" setting.
+
+no-check-ssl
+ This option may be used as "server" setting to reset any "check-ssl"
+ setting which would have been inherited from "default-server" directive as
+ default value.
+ It may also be used as "default-server" setting to reset any previous
+ "default-server" "check-ssl" setting.
+
+no-send-proxy
+ This option may be used as "server" setting to reset any "send-proxy"
+ setting which would have been inherited from "default-server" directive as
+ default value.
+ It may also be used as "default-server" setting to reset any previous
+ "default-server" "send-proxy" setting.
+
+no-send-proxy-v2
+ This option may be used as "server" setting to reset any "send-proxy-v2"
+ setting which would have been inherited from "default-server" directive as
+ default value.
+ It may also be used as "default-server" setting to reset any previous
+ "default-server" "send-proxy-v2" setting.
+
+no-send-proxy-v2-ssl
+ This option may be used as "server" setting to reset any "send-proxy-v2-ssl"
+ setting which would have been inherited from "default-server" directive as
+ default value.
+ It may also be used as "default-server" setting to reset any previous
+ "default-server" "send-proxy-v2-ssl" setting.
+
+no-send-proxy-v2-ssl-cn
+ This option may be used as "server" setting to reset any "send-proxy-v2-ssl-cn"
+ setting which would have been inherited from "default-server" directive as
+ default value.
+ It may also be used as "default-server" setting to reset any previous
+ "default-server" "send-proxy-v2-ssl-cn" setting.
+
+no-ssl
+ This option may be used as "server" setting to reset any "ssl"
+ setting which would have been inherited from "default-server" directive as
+ default value.
+ It may also be used as "default-server" setting to reset any previous
+ "default-server" "ssl" setting.
+
+ Note that using `default-server ssl` setting and `no-ssl` on server will
+ however init SSL connection, so it can be later be enabled through the
+ runtime API: see `set server` commands in management doc.
+
+no-ssl-reuse
+ This option disables SSL session reuse when SSL is used to communicate with
+ the server. It will force the server to perform a full handshake for every
+ new connection. It's probably only useful for benchmarking, troubleshooting,
+ and for paranoid users.
+
+no-sslv3
+ This option disables support for SSLv3 when SSL is used to communicate with
+ the server. Note that SSLv2 is disabled in the code and cannot be enabled
+ using any configuration option. Use "ssl-min-ver" and "ssl-max-ver" instead.
+
+ Supported in default-server: No
+
+no-tls-tickets
+ This setting is only available when support for OpenSSL was built in. It
+ disables the stateless session resumption (RFC 5077 TLS Ticket
+ extension) and force to use stateful session resumption. Stateless
+ session resumption is more expensive in CPU usage for servers. This option
+ is also available on global statement "ssl-default-server-options".
+ The TLS ticket mechanism is only used up to TLS 1.2.
+ Forward Secrecy is compromised with TLS tickets, unless ticket keys
+ are periodically rotated (via reload or by using "tls-ticket-keys").
+ See also "tls-tickets".
+
+no-tlsv10
+ This option disables support for TLSv1.0 when SSL is used to communicate with
+ the server. Note that SSLv2 is disabled in the code and cannot be enabled
+ using any configuration option. TLSv1 is more expensive than SSLv3 so it
+ often makes sense to disable it when communicating with local servers. This
+ option is also available on global statement "ssl-default-server-options".
+ Use "ssl-min-ver" and "ssl-max-ver" instead.
+
+ Supported in default-server: No
+
+no-tlsv11
+ This option disables support for TLSv1.1 when SSL is used to communicate with
+ the server. Note that SSLv2 is disabled in the code and cannot be enabled
+ using any configuration option. TLSv1 is more expensive than SSLv3 so it
+ often makes sense to disable it when communicating with local servers. This
+ option is also available on global statement "ssl-default-server-options".
+ Use "ssl-min-ver" and "ssl-max-ver" instead.
+
+ Supported in default-server: No
+
+no-tlsv12
+ This option disables support for TLSv1.2 when SSL is used to communicate with
+ the server. Note that SSLv2 is disabled in the code and cannot be enabled
+ using any configuration option. TLSv1 is more expensive than SSLv3 so it
+ often makes sense to disable it when communicating with local servers. This
+ option is also available on global statement "ssl-default-server-options".
+ Use "ssl-min-ver" and "ssl-max-ver" instead.
+
+ Supported in default-server: No
+
+no-tlsv13
+ This option disables support for TLSv1.3 when SSL is used to communicate with
+ the server. Note that SSLv2 is disabled in the code and cannot be enabled
+ using any configuration option. TLSv1 is more expensive than SSLv3 so it
+ often makes sense to disable it when communicating with local servers. This
+ option is also available on global statement "ssl-default-server-options".
+ Use "ssl-min-ver" and "ssl-max-ver" instead.
+
+ Supported in default-server: No
+
+no-verifyhost
+ This option may be used as "server" setting to reset any "verifyhost"
+ setting which would have been inherited from "default-server" directive as
+ default value.
+ It may also be used as "default-server" setting to reset any previous
+ "default-server" "verifyhost" setting.
+
+no-tfo
+ This option may be used as "server" setting to reset any "tfo"
+ setting which would have been inherited from "default-server" directive as
+ default value.
+ It may also be used as "default-server" setting to reset any previous
+ "default-server" "tfo" setting.
+
+non-stick
+ Never add connections allocated to this sever to a stick-table.
+ This may be used in conjunction with backup to ensure that
+ stick-table persistence is disabled for backup servers.
+
+npn <protocols>
+ This enables the NPN TLS extension and advertises the specified protocol list
+ as supported on top of NPN. The protocol list consists in a comma-delimited
+ list of protocol names, for instance: "http/1.1,http/1.0" (without quotes).
+ This requires that the SSL library is built with support for TLS extensions
+ enabled (check with haproxy -vv). Note that the NPN extension has been
+ replaced with the ALPN extension (see the "alpn" keyword), though this one is
+ only available starting with OpenSSL 1.0.2.
+
+observe <mode>
+ This option enables health adjusting based on observing communication with
+ the server. By default this functionality is disabled and enabling it also
+ requires to enable health checks. There are two supported modes: "layer4" and
+ "layer7". In layer4 mode, only successful/unsuccessful tcp connections are
+ significant. In layer7, which is only allowed for http proxies, responses
+ received from server are verified, like valid/wrong http code, unparsable
+ headers, a timeout, etc. Valid status codes include 100 to 499, 501 and 505.
+
+ See also the "check", "on-error" and "error-limit".
+
+on-error <mode>
+ Select what should happen when enough consecutive errors are detected.
+ Currently, four modes are available:
+ - fastinter: force fastinter
+ - fail-check: simulate a failed check, also forces fastinter (default)
+ - sudden-death: simulate a pre-fatal failed health check, one more failed
+ check will mark a server down, forces fastinter
+ - mark-down: mark the server immediately down and force fastinter
+
+ See also the "check", "observe" and "error-limit".
+
+on-marked-down <action>
+ Modify what occurs when a server is marked down.
+ Currently one action is available:
+ - shutdown-sessions: Shutdown peer streams. When this setting is enabled,
+ all connections to the server are immediately terminated when the server
+ goes down. It might be used if the health check detects more complex cases
+ than a simple connection status, and long timeouts would cause the service
+ to remain unresponsive for too long a time. For instance, a health check
+ might detect that a database is stuck and that there's no chance to reuse
+ existing connections anymore. Connections killed this way are logged with
+ a 'D' termination code (for "Down").
+
+ Actions are disabled by default
+
+on-marked-up <action>
+ Modify what occurs when a server is marked up.
+ Currently one action is available:
+ - shutdown-backup-sessions: Shutdown streams on all backup servers. This is
+ done only if the server is not in backup state and if it is not disabled
+ (it must have an effective weight > 0). This can be used sometimes to force
+ an active server to take all the traffic back after recovery when dealing
+ with long sessions (e.g. LDAP, SQL, ...). Doing this can cause more trouble
+ than it tries to solve (e.g. incomplete transactions), so use this feature
+ with extreme care. Streams killed because a server comes up are logged
+ with an 'U' termination code (for "Up").
+
+ Actions are disabled by default
+
+pool-low-conn <max>
+ Set a low threshold on the number of idling connections for a server, below
+ which a thread will not try to steal a connection from another thread. This
+ can be useful to improve CPU usage patterns in scenarios involving many very
+ fast servers, in order to ensure all threads will keep a few idle connections
+ all the time instead of letting them accumulate over one thread and migrating
+ them from thread to thread. Typical values of twice the number of threads
+ seem to show very good performance already with sub-millisecond response
+ times. The default is zero, indicating that any idle connection can be used
+ at any time. It is the recommended setting for normal use. This only applies
+ to connections that can be shared according to the same principles as those
+ applying to "http-reuse". In case connection sharing between threads would
+ be disabled via "tune.idle-pool.shared", it can become very important to use
+ this setting to make sure each thread always has a few connections, or the
+ connection reuse rate will decrease as thread count increases.
+
+pool-max-conn <max>
+ Set the maximum number of idling connections for a server. -1 means unlimited
+ connections, 0 means no idle connections. The default is -1. When idle
+ connections are enabled, orphaned idle connections which do not belong to any
+ client session anymore are moved to a dedicated pool so that they remain
+ usable by future clients. This only applies to connections that can be shared
+ according to the same principles as those applying to "http-reuse".
+
+pool-purge-delay <delay>
+ Sets the delay to start purging idle connections. Each <delay> interval, half
+ of the idle connections are closed. 0 means we don't keep any idle connection.
+ The default is 5s.
+
+port <port>
+ Using the "port" parameter, it becomes possible to use a different port to
+ send health-checks or to probe the agent-check. On some servers, it may be
+ desirable to dedicate a port to a specific component able to perform complex
+ tests which are more suitable to health-checks than the application. It is
+ common to run a simple script in inetd for instance. This parameter is
+ ignored if the "check" parameter is not set. See also the "addr" parameter.
+
+proto <name>
+ Forces the multiplexer's protocol to use for the outgoing connections to this
+ server. It must be compatible with the mode of the backend (TCP or HTTP). It
+ must also be usable on the backend side. The list of available protocols is
+ reported in haproxy -vv.The protocols properties are reported : the mode
+ (TCP/HTTP), the side (FE/BE), the mux name and its flags.
+
+ Some protocols are subject to the head-of-line blocking on server side
+ (flag=HOL_RISK). Finally some protocols don't support upgrades (flag=NO_UPG).
+ The HTX compatibility is also reported (flag=HTX).
+
+ Here are the protocols that may be used as argument to a "proto" directive on
+ a server line :
+
+ h2 : mode=HTTP side=FE|BE mux=H2 flags=HTX|HOL_RISK|NO_UPG
+ fcgi : mode=HTTP side=BE mux=FCGI flags=HTX|HOL_RISK|NO_UPG
+ h1 : mode=HTTP side=FE|BE mux=H1 flags=HTX|NO_UPG
+ none : mode=TCP side=FE|BE mux=PASS flags=NO_UPG
+
+ Idea behind this option is to bypass the selection of the best multiplexer's
+ protocol for all connections established to this server.
+
+ See also "ws" to use an alternative protocol for websocket streams.
+
+redir <prefix>
+ The "redir" parameter enables the redirection mode for all GET and HEAD
+ requests addressing this server. This means that instead of having HAProxy
+ forward the request to the server, it will send an "HTTP 302" response with
+ the "Location" header composed of this prefix immediately followed by the
+ requested URI beginning at the leading '/' of the path component. That means
+ that no trailing slash should be used after <prefix>. All invalid requests
+ will be rejected, and all non-GET or HEAD requests will be normally served by
+ the server. Note that since the response is completely forged, no header
+ mangling nor cookie insertion is possible in the response. However, cookies in
+ requests are still analyzed, making this solution completely usable to direct
+ users to a remote location in case of local disaster. Main use consists in
+ increasing bandwidth for static servers by having the clients directly
+ connect to them. Note: never use a relative location here, it would cause a
+ loop between the client and HAProxy!
+
+ Example : server srv1 192.168.1.1:80 redir http://image1.mydomain.com check
+
+rise <count>
+ The "rise" parameter states that a server will be considered as operational
+ after <count> consecutive successful health checks. This value defaults to 2
+ if unspecified. See also the "check", "inter" and "fall" parameters.
+
+resolve-opts <option>,<option>,...
+ Comma separated list of options to apply to DNS resolution linked to this
+ server.
+
+ Available options:
+
+ * allow-dup-ip
+ By default, HAProxy prevents IP address duplication in a backend when DNS
+ resolution at runtime is in operation.
+ That said, for some cases, it makes sense that two servers (in the same
+ backend, being resolved by the same FQDN) have the same IP address.
+ For such case, simply enable this option.
+ This is the opposite of prevent-dup-ip.
+
+ * ignore-weight
+ Ignore any weight that is set within an SRV record. This is useful when
+ you would like to control the weights using an alternate method, such as
+ using an "agent-check" or through the runtime api.
+
+ * prevent-dup-ip
+ Ensure HAProxy's default behavior is enforced on a server: prevent re-using
+ an IP address already set to a server in the same backend and sharing the
+ same fqdn.
+ This is the opposite of allow-dup-ip.
+
+ Example:
+ backend b_myapp
+ default-server init-addr none resolvers dns
+ server s1 myapp.example.com:80 check resolve-opts allow-dup-ip
+ server s2 myapp.example.com:81 check resolve-opts allow-dup-ip
+
+ With the option allow-dup-ip set:
+ * if the nameserver returns a single IP address, then both servers will use
+ it
+ * If the nameserver returns 2 IP addresses, then each server will pick up a
+ different address
+
+ Default value: not set
+
+resolve-prefer <family>
+ When DNS resolution is enabled for a server and multiple IP addresses from
+ different families are returned, HAProxy will prefer using an IP address
+ from the family mentioned in the "resolve-prefer" parameter.
+ Available families: "ipv4" and "ipv6"
+
+ Default value: ipv6
+
+ Example:
+
+ server s1 app1.domain.com:80 resolvers mydns resolve-prefer ipv6
+
+resolve-net <network>[,<network[,...]]
+ This option prioritizes the choice of an ip address matching a network. This is
+ useful with clouds to prefer a local ip. In some cases, a cloud high
+ availability service can be announced with many ip addresses on many
+ different datacenters. The latency between datacenter is not negligible, so
+ this patch permits to prefer a local datacenter. If no address matches the
+ configured network, another address is selected.
+
+ Example:
+
+ server s1 app1.domain.com:80 resolvers mydns resolve-net 10.0.0.0/8
+
+resolvers <id>
+ Points to an existing "resolvers" section to resolve current server's
+ hostname.
+
+ Example:
+
+ server s1 app1.domain.com:80 check resolvers mydns
+
+ See also section 5.3
+
+send-proxy
+ The "send-proxy" parameter enforces use of the PROXY protocol over any
+ connection established to this server. The PROXY protocol informs the other
+ end about the layer 3/4 addresses of the incoming connection, so that it can
+ know the client's address or the public address it accessed to, whatever the
+ upper layer protocol. For connections accepted by an "accept-proxy" or
+ "accept-netscaler-cip" listener, the advertised address will be used. Only
+ TCPv4 and TCPv6 address families are supported. Other families such as
+ Unix sockets, will report an UNKNOWN family. Servers using this option can
+ fully be chained to another instance of HAProxy listening with an
+ "accept-proxy" setting. This setting must not be used if the server isn't
+ aware of the protocol. When health checks are sent to the server, the PROXY
+ protocol is automatically used when this option is set, unless there is an
+ explicit "port" or "addr" directive, in which case an explicit
+ "check-send-proxy" directive would also be needed to use the PROXY protocol.
+ See also the "no-send-proxy" option of this section and "accept-proxy" and
+ "accept-netscaler-cip" option of the "bind" keyword.
+
+send-proxy-v2
+ The "send-proxy-v2" parameter enforces use of the PROXY protocol version 2
+ over any connection established to this server. The PROXY protocol informs
+ the other end about the layer 3/4 addresses of the incoming connection, so
+ that it can know the client's address or the public address it accessed to,
+ whatever the upper layer protocol. It also send ALPN information if an alpn
+ have been negotiated. This setting must not be used if the server isn't aware
+ of this version of the protocol. See also the "no-send-proxy-v2" option of
+ this section and send-proxy" option of the "bind" keyword.
+
+set-proxy-v2-tlv-fmt(<id>) <fmt>
+ The "set-proxy-v2-tlv-fmt" parameter is used to send arbitrary PROXY protocol
+ version 2 TLVs. For the type (<id>) range of the defined TLV type please refer
+ to section 2.2.8. of the proxy protocol specification. However, the value can
+ be chosen freely as long as it does not exceed the maximum length of 65,535
+ bytes. It can also be used for forwarding TLVs by using the fetch "fc_pp_tlv"
+ to retrieve a received TLV from the frontend. It may be used as a server or
+ a default-server option. It must be used in combination with send-proxy-v2
+ such that PPv2 TLVs are actually sent out.
+
+ Example:
+ server srv1 192.168.1.1:80 send-proxy-v2 set-proxy-v2-tlv-fmt(0x20) %[fc_pp_tlv(0x20)]
+
+ In this case, we fetch the TLV with the type 0x20 as a string and set as the value
+ of a newly created TLV that also has the type 0x20.
+
+proxy-v2-options <option>[,<option>]*
+ The "proxy-v2-options" parameter add options to send in PROXY protocol
+ version 2 when "send-proxy-v2" is used. Options available are:
+
+ - ssl : See also "send-proxy-v2-ssl".
+ - cert-cn : See also "send-proxy-v2-ssl-cn".
+ - ssl-cipher: Name of the used cipher.
+ - cert-sig : Signature algorithm of the used certificate.
+ - cert-key : Key algorithm of the used certificate
+ - authority : Host name value passed by the client (only SNI from a TLS
+ connection is supported).
+ - crc32c : Checksum of the PROXYv2 header.
+ - unique-id : Send a unique ID generated using the frontend's
+ "unique-id-format" within the PROXYv2 header.
+ This unique-id is primarily meant for "mode tcp". It can
+ lead to unexpected results in "mode http", because the
+ generated unique ID is also used for the first HTTP request
+ within a Keep-Alive connection.
+
+send-proxy-v2-ssl
+ The "send-proxy-v2-ssl" parameter enforces use of the PROXY protocol version
+ 2 over any connection established to this server. The PROXY protocol informs
+ the other end about the layer 3/4 addresses of the incoming connection, so
+ that it can know the client's address or the public address it accessed to,
+ whatever the upper layer protocol. In addition, the SSL information extension
+ of the PROXY protocol is added to the PROXY protocol header. This setting
+ must not be used if the server isn't aware of this version of the protocol.
+ See also the "no-send-proxy-v2-ssl" option of this section and the
+ "send-proxy-v2" option of the "bind" keyword.
+
+send-proxy-v2-ssl-cn
+ The "send-proxy-v2-ssl" parameter enforces use of the PROXY protocol version
+ 2 over any connection established to this server. The PROXY protocol informs
+ the other end about the layer 3/4 addresses of the incoming connection, so
+ that it can know the client's address or the public address it accessed to,
+ whatever the upper layer protocol. In addition, the SSL information extension
+ of the PROXY protocol, along along with the Common Name from the subject of
+ the client certificate (if any), is added to the PROXY protocol header. This
+ setting must not be used if the server isn't aware of this version of the
+ protocol. See also the "no-send-proxy-v2-ssl-cn" option of this section and
+ the "send-proxy-v2" option of the "bind" keyword.
+
+shard <shard>
+ This parameter in used only in the context of stick-tables synchronisation
+ with peers protocol. The "shard" parameter identifies the peers which will
+ receive all the stick-table updates for keys with this shard as distribution
+ hash. The accepted values are 0 up to "shards" parameter value specified in
+ the "peers" section. 0 value is the default value meaning that the peer will
+ receive all the key updates. Greater values than "shards" will be ignored.
+ This is also the case for any value provided to the local peer.
+
+ Example :
+
+ peers mypeers
+ shards 3
+ peer A 127.0.0.1:40001 # local peer without shard value (0 internally)
+ peer B 127.0.0.1:40002 shard 1
+ peer C 127.0.0.1:40003 shard 2
+ peer D 127.0.0.1:40004 shard 3
+
+sigalgs <sigalgs>
+ This setting is only available when support for OpenSSL was built in. It sets
+ the string describing the list of signature algorithms that are negotiated
+ during the TLSv1.2 and TLSv1.3 handshake. The format of the string is defined
+ in "man 3 SSL_CTX_set1_sigalgs" from the OpenSSL man pages. It is not
+ recommended to use this setting unless compatibility with a middlebox is
+ required.
+
+slowstart <start_time_in_ms>
+ The "slowstart" parameter for a server accepts a value in milliseconds which
+ indicates after how long a server which has just come back up will run at
+ full speed. Just as with every other time-based parameter, it can be entered
+ in any other explicit unit among { us, ms, s, m, h, d }. The speed grows
+ linearly from 0 to 100% during this time. The limitation applies to two
+ parameters :
+
+ - maxconn: the number of connections accepted by the server will grow from 1
+ to 100% of the usual dynamic limit defined by (minconn,maxconn,fullconn).
+
+ - weight: when the backend uses a dynamic weighted algorithm, the weight
+ grows linearly from 1 to 100%. In this case, the weight is updated at every
+ health-check. For this reason, it is important that the "inter" parameter
+ is smaller than the "slowstart", in order to maximize the number of steps.
+
+ The slowstart never applies when HAProxy starts, otherwise it would cause
+ trouble to running servers. It only applies when a server has been previously
+ seen as failed.
+
+sni <expression>
+ The "sni" parameter evaluates the sample fetch expression, converts it to a
+ string and uses the result as the host name sent in the SNI TLS extension to
+ the server. A typical use case is to send the SNI received from the client in
+ a bridged TCP/SSL scenario, using the "ssl_fc_sni" sample fetch for the
+ expression. THIS MUST NOT BE USED FOR HTTPS, where req.hdr(host) should be
+ used instead, since SNI in HTTPS must always match the Host field and clients
+ are allowed to use different host names over the same connection). If
+ "verify required" is set (which is the recommended setting), the resulting
+ name will also be matched against the server certificate's names. See the
+ "verify" directive for more details. If you want to set a SNI for health
+ checks, see the "check-sni" directive for more details.
+
+source <addr>[:<pl>[-<ph>]] [usesrc { <addr2>[:<port2>] | client | clientip } ]
+source <addr>[:<port>] [usesrc { <addr2>[:<port2>] | hdr_ip(<hdr>[,<occ>]) } ]
+source <addr>[:<pl>[-<ph>]] [interface <name>] ...
+ The "source" parameter sets the source address which will be used when
+ connecting to the server. It follows the exact same parameters and principle
+ as the backend "source" keyword, except that it only applies to the server
+ referencing it. Please consult the "source" keyword for details.
+
+ Additionally, the "source" statement on a server line allows one to specify a
+ source port range by indicating the lower and higher bounds delimited by a
+ dash ('-'). Some operating systems might require a valid IP address when a
+ source port range is specified. It is permitted to have the same IP/range for
+ several servers. Doing so makes it possible to bypass the maximum of 64k
+ total concurrent connections. The limit will then reach 64k connections per
+ server.
+
+ Since Linux 4.2/libc 2.23 IP_BIND_ADDRESS_NO_PORT is set for connections
+ specifying the source address without port(s).
+
+ssl
+ This option enables SSL ciphering on outgoing connections to the server. It
+ is critical to verify server certificates using "verify" when using SSL to
+ connect to servers, otherwise the communication is prone to trivial man in
+ the-middle attacks rendering SSL useless. When this option is used, health
+ checks are automatically sent in SSL too unless there is a "port" or an
+ "addr" directive indicating the check should be sent to a different location.
+ See the "no-ssl" to disable "ssl" option and "check-ssl" option to force
+ SSL health checks.
+
+ssl-max-ver [ SSLv3 | TLSv1.0 | TLSv1.1 | TLSv1.2 | TLSv1.3 ]
+ This option enforces use of <version> or lower when SSL is used to communicate
+ with the server. This option is also available on global statement
+ "ssl-default-server-options". See also "ssl-min-ver".
+
+ssl-min-ver [ SSLv3 | TLSv1.0 | TLSv1.1 | TLSv1.2 | TLSv1.3 ]
+ This option enforces use of <version> or upper when SSL is used to communicate
+ with the server. This option is also available on global statement
+ "ssl-default-server-options". See also "ssl-max-ver".
+
+ssl-reuse
+ This option may be used as "server" setting to reset any "no-ssl-reuse"
+ setting which would have been inherited from "default-server" directive as
+ default value.
+ It may also be used as "default-server" setting to reset any previous
+ "default-server" "no-ssl-reuse" setting.
+
+stick
+ This option may be used as "server" setting to reset any "non-stick"
+ setting which would have been inherited from "default-server" directive as
+ default value.
+ It may also be used as "default-server" setting to reset any previous
+ "default-server" "non-stick" setting.
+
+socks4 <addr>:<port>
+ This option enables upstream socks4 tunnel for outgoing connections to the
+ server. Using this option won't force the health check to go via socks4 by
+ default. You will have to use the keyword "check-via-socks4" to enable it.
+
+tcp-ut <delay>
+ Sets the TCP User Timeout for all outgoing connections to this server. This
+ option is available on Linux since version 2.6.37. It allows HAProxy to
+ configure a timeout for sockets which contain data not receiving an
+ acknowledgment for the configured delay. This is especially useful on
+ long-lived connections experiencing long idle periods such as remote
+ terminals or database connection pools, where the client and server timeouts
+ must remain high to allow a long period of idle, but where it is important to
+ detect that the server has disappeared in order to release all resources
+ associated with its connection (and the client's session). One typical use
+ case is also to force dead server connections to die when health checks are
+ too slow or during a soft reload since health checks are then disabled. The
+ argument is a delay expressed in milliseconds by default. This only works for
+ regular TCP connections, and is ignored for other protocols.
+
+tfo
+ This option enables using TCP fast open when connecting to servers, on
+ systems that support it (currently only the Linux kernel >= 4.11).
+ See the "tfo" bind option for more information about TCP fast open.
+ Please note that when using tfo, you should also use the "conn-failure",
+ "empty-response" and "response-timeout" keywords for "retry-on", or HAProxy
+ won't be able to retry the connection on failure. See also "no-tfo".
+
+track [<backend>/]<server>
+ This option enables ability to set the current state of the server by tracking
+ another one. It is possible to track a server which itself tracks another
+ server, provided that at the end of the chain, a server has health checks
+ enabled. If <backend> is omitted the current one is used. If disable-on-404 is
+ used, it has to be enabled on both proxies.
+
+tls-tickets
+ This option may be used as "server" setting to reset any "no-tls-tickets"
+ setting which would have been inherited from "default-server" directive as
+ default value.
+ The TLS ticket mechanism is only used up to TLS 1.2.
+ Forward Secrecy is compromised with TLS tickets, unless ticket keys
+ are periodically rotated (via reload or by using "tls-ticket-keys").
+ It may also be used as "default-server" setting to reset any previous
+ "default-server" "no-tls-tickets" setting.
+
+verify [none|required]
+ This setting is only available when support for OpenSSL was built in. If set
+ to 'none', server certificate is not verified. In the other case, The
+ certificate provided by the server is verified using CAs from 'ca-file' and
+ optional CRLs from 'crl-file' after having checked that the names provided in
+ the certificate's subject and subjectAlternateNames attributes match either
+ the name passed using the "sni" directive, or if not provided, the static
+ host name passed using the "verifyhost" directive. When no name is found, the
+ certificate's names are ignored. For this reason, without SNI it's important
+ to use "verifyhost". On verification failure the handshake is aborted. It is
+ critically important to verify server certificates when using SSL to connect
+ to servers, otherwise the communication is prone to trivial man-in-the-middle
+ attacks rendering SSL totally useless. Unless "ssl_server_verify" appears in
+ the global section, "verify" is set to "required" by default.
+
+verifyhost <hostname>
+ This setting is only available when support for OpenSSL was built in, and
+ only takes effect if 'verify required' is also specified. This directive sets
+ a default static hostname to check the server's certificate against when no
+ SNI was used to connect to the server. If SNI is not used, this is the only
+ way to enable hostname verification. This static hostname, when set, will
+ also be used for health checks (which cannot provide an SNI value). If none
+ of the hostnames in the certificate match the specified hostname, the
+ handshake is aborted. The hostnames in the server-provided certificate may
+ include wildcards. See also "verify", "sni" and "no-verifyhost" options.
+
+weight <weight>
+ The "weight" parameter is used to adjust the server's weight relative to
+ other servers. All servers will receive a load proportional to their weight
+ relative to the sum of all weights, so the higher the weight, the higher the
+ load. The default weight is 1, and the maximal value is 256. A value of 0
+ means the server will not participate in load-balancing but will still accept
+ persistent connections. If this parameter is used to distribute the load
+ according to server's capacity, it is recommended to start with values which
+ can both grow and shrink, for instance between 10 and 100 to leave enough
+ room above and below for later adjustments.
+
+ws { auto | h1 | h2 }
+ This option allows to configure the protocol used when relaying websocket
+ streams. This is most notably useful when using an HTTP/2 backend without the
+ support for H2 websockets through the RFC8441.
+
+ The default mode is "auto". This will reuse the same protocol as the main
+ one. The only difference is when using ALPN. In this case, it can try to
+ downgrade the ALPN to "http/1.1" only for websocket streams if the configured
+ server ALPN contains it.
+
+ The value "h1" is used to force HTTP/1.1 for websockets streams, through ALPN
+ if SSL ALPN is activated for the server. Similarly, "h2" can be used to
+ force HTTP/2.0 websockets. Use this value with care : the server must support
+ RFC8441 or an error will be reported by haproxy when relaying websockets.
+
+ Note that NPN is not taken into account as its usage has been deprecated in
+ favor of the ALPN extension.
+
+ See also "alpn" and "proto".
+
+
+5.3. Server IP address resolution using DNS
+-------------------------------------------
+
+HAProxy allows using a host name on the server line to retrieve its IP address
+using name servers. By default, HAProxy resolves the name when parsing the
+configuration file, at startup and cache the result for the process's life.
+This is not sufficient in some cases, such as in Amazon where a server's IP
+can change after a reboot or an ELB Virtual IP can change based on current
+workload.
+This chapter describes how HAProxy can be configured to process server's name
+resolution at run time.
+Whether run time server name resolution has been enable or not, HAProxy will
+carry on doing the first resolution when parsing the configuration.
+
+
+5.3.1. Global overview
+----------------------
+
+As we've seen in introduction, name resolution in HAProxy occurs at two
+different steps of the process life:
+
+ 1. when starting up, HAProxy parses the server line definition and matches a
+ host name. It uses libc functions to get the host name resolved. This
+ resolution relies on /etc/resolv.conf file.
+
+ 2. at run time, HAProxy performs periodically name resolutions for servers
+ requiring DNS resolutions.
+
+A few other events can trigger a name resolution at run time:
+ - when a server's health check ends up in a connection timeout: this may be
+ because the server has a new IP address. So we need to trigger a name
+ resolution to know this new IP.
+
+When using resolvers, the server name can either be a hostname, or a SRV label.
+HAProxy considers anything that starts with an underscore as a SRV label. If a
+SRV label is specified, then the corresponding SRV records will be retrieved
+from the DNS server, and the provided hostnames will be used. The SRV label
+will be checked periodically, and if any server are added or removed, HAProxy
+will automatically do the same.
+
+A few things important to notice:
+ - all the name servers are queried in the meantime. HAProxy will process the
+ first valid response.
+
+ - a resolution is considered as invalid (NX, timeout, refused), when all the
+ servers return an error.
+
+
+5.3.2. The resolvers section
+----------------------------
+
+This section is dedicated to host information related to name resolution in
+HAProxy. There can be as many as resolvers section as needed. Each section can
+contain many name servers.
+
+At startup, HAProxy tries to generate a resolvers section named "default", if
+no section was named this way in the configuration. This section is used by
+default by the httpclient and uses the parse-resolv-conf keyword. If HAProxy
+failed to generate automatically this section, no error or warning are emitted.
+
+When multiple name servers are configured in a resolvers section, then HAProxy
+uses the first valid response. In case of invalid responses, only the last one
+is treated. Purpose is to give the chance to a slow server to deliver a valid
+answer after a fast faulty or outdated server.
+
+When each server returns a different error type, then only the last error is
+used by HAProxy. The following processing is applied on this error:
+
+ 1. HAProxy retries the same DNS query with a new query type. The A queries are
+ switch to AAAA or the opposite. SRV queries are not concerned here. Timeout
+ errors are also excluded.
+
+ 2. When the fallback on the query type was done (or not applicable), HAProxy
+ retries the original DNS query, with the preferred query type.
+
+ 3. HAProxy retries previous steps <resolve_retries> times. If no valid
+ response is received after that, it stops the DNS resolution and reports
+ the error.
+
+For example, with 2 name servers configured in a resolvers section, the
+following scenarios are possible:
+
+ - First response is valid and is applied directly, second response is
+ ignored
+
+ - First response is invalid and second one is valid, then second response is
+ applied
+
+ - First response is a NX domain and second one a truncated response, then
+ HAProxy retries the query with a new type
+
+ - First response is a NX domain and second one is a timeout, then HAProxy
+ retries the query with a new type
+
+ - Query timed out for both name servers, then HAProxy retries it with the
+ same query type
+
+As a DNS server may not answer all the IPs in one DNS request, HAProxy keeps
+a cache of previous answers, an answer will be considered obsolete after
+<hold obsolete> seconds without the IP returned.
+
+
+resolvers <resolvers id>
+ Creates a new name server list labeled <resolvers id>
+
+A resolvers section accept the following parameters:
+
+accepted_payload_size <nb>
+ Defines the maximum payload size accepted by HAProxy and announced to all the
+ name servers configured in this resolvers section.
+ <nb> is in bytes. If not set, HAProxy announces 512. (minimal value defined
+ by RFC 6891)
+
+ Note: the maximum allowed value is 65535. Recommended value for UDP is
+ 4096 and it is not recommended to exceed 8192 except if you are sure
+ that your system and network can handle this (over 65507 makes no sense
+ since is the maximum UDP payload size). If you are using only TCP
+ nameservers to handle huge DNS responses, you should put this value
+ to the max: 65535.
+
+nameserver <name> <address>[:port] [param*]
+ Used to configure a nameserver. <name> of the nameserver should ne unique.
+ By default the <address> is considered of type datagram. This means if an
+ IPv4 or IPv6 is configured without special address prefixes (paragraph 11.)
+ the UDP protocol will be used. If an stream protocol address prefix is used,
+ the nameserver will be considered as a stream server (TCP for instance) and
+ "server" parameters found in 5.2 paragraph which are relevant for DNS
+ resolving will be considered. Note: currently, in TCP mode, 4 queries are
+ pipelined on the same connections. A batch of idle connections are removed
+ every 5 seconds. "maxconn" can be configured to limit the amount of those
+ concurrent connections and TLS should also usable if the server supports.
+
+parse-resolv-conf
+ Adds all nameservers found in /etc/resolv.conf to this resolvers nameservers
+ list. Ordered as if each nameserver in /etc/resolv.conf was individually
+ placed in the resolvers section in place of this directive.
+
+hold <status> <period>
+ Upon receiving the DNS response <status>, determines whether a server's state
+ should change from UP to DOWN. To make that determination, it checks whether
+ any valid status has been received during the past <period> in order to
+ counteract the just received invalid status.
+
+ <status> : last name resolution status.
+ nx After receiving an NXDOMAIN status, check for any valid
+ status during the concluding period.
+
+ refused After receiving a REFUSED status, check for any valid
+ status during the concluding period.
+
+ timeout After the "timeout retry" has struck, check for any
+ valid status during the concluding period.
+
+ other After receiving any other invalid status, check for any
+ valid status during the concluding period.
+
+ valid Applies only to "http-request do-resolve" and
+ "tcp-request content do-resolve" actions. It defines the
+ period for which the server will maintain a valid response
+ before triggering another resolution. It does not affect
+ dynamic resolution of servers.
+
+ obsolete Defines how long to wait before removing obsolete DNS
+ records after an updated answer record is received. It
+ applies to SRV records.
+
+ <period> : Amount of time into the past during which a valid response must
+ have been received. It follows the HAProxy time format and is in
+ milliseconds by default.
+
+ For a server that relies on dynamic DNS resolution to determine its IP
+ address, receiving an invalid DNS response, such as NXDOMAIN, will lead to
+ changing the server's state from UP to DOWN. The hold directives define how
+ far into the past to look for a valid response. If a valid response has been
+ received within <period>, the just received invalid status will be ignored.
+
+ Unless a valid response has been receiving during the concluding period, the
+ server will be marked as DOWN. For example, if "hold nx 30s" is set and the
+ last received DNS response was NXDOMAIN, the server will be marked DOWN
+ unless a valid response has been received during the last 30 seconds.
+
+ A server in the DOWN state will be marked UP immediately upon receiving a
+ valid status from the DNS server.
+
+ A separate behavior exists for "hold valid" and "hold obsolete".
+
+resolve_retries <nb>
+ Defines the number <nb> of queries to send to resolve a server name before
+ giving up.
+ Default value: 3
+
+ A retry occurs on name server timeout or when the full sequence of DNS query
+ type failover is over and we need to start up from the default ANY query
+ type.
+
+timeout <event> <time>
+ Defines timeouts related to name resolution
+ <event> : the event on which the <time> timeout period applies to.
+ events available are:
+ - resolve : default time to trigger name resolutions when no
+ other time applied.
+ Default value: 1s
+ - retry : time between two DNS queries, when no valid response
+ have been received.
+ Default value: 1s
+ <time> : time related to the event. It follows the HAProxy time format.
+ <time> is expressed in milliseconds.
+
+ Example:
+
+ resolvers mydns
+ nameserver dns1 10.0.0.1:53
+ nameserver dns2 10.0.0.2:53
+ nameserver dns3 tcp@10.0.0.3:53
+ parse-resolv-conf
+ resolve_retries 3
+ timeout resolve 1s
+ timeout retry 1s
+ hold other 30s
+ hold refused 30s
+ hold nx 30s
+ hold timeout 30s
+ hold valid 10s
+ hold obsolete 30s
+
+
+6. Cache
+---------
+
+HAProxy provides a cache, which was designed to perform cache on small objects
+(favicon, css...). This is a minimalist low-maintenance cache which runs in
+RAM.
+
+The cache is based on a memory area shared between all threads, and split in 1kB
+blocks.
+
+If an object is not used anymore, it can be deleted to store a new object
+independently of its expiration date. The oldest objects are deleted first
+when we try to allocate a new one.
+
+The cache uses a hash of the host header and the URI as the key.
+
+It's possible to view the status of a cache using the Unix socket command
+"show cache" consult section 9.3 "Unix Socket commands" of Management Guide
+for more details.
+
+When an object is delivered from the cache, the server name in the log is
+replaced by "<CACHE>".
+
+
+6.1. Limitation
+----------------
+
+The cache won't store and won't deliver objects in these cases:
+
+- If the response is not a 200
+- If the response contains a Vary header and either the process-vary option is
+ disabled, or a currently unmanaged header is specified in the Vary value (only
+ accept-encoding, referer and origin are managed for now)
+- If the Content-Length + the headers size is greater than "max-object-size"
+- If the response is not cacheable
+- If the response does not have an explicit expiration time (s-maxage or max-age
+ Cache-Control directives or Expires header) or a validator (ETag or Last-Modified
+ headers)
+- If the process-vary option is enabled and there are already max-secondary-entries
+ entries with the same primary key as the current response
+- If the process-vary option is enabled and the response has an unknown encoding (not
+ mentioned in https://www.iana.org/assignments/http-parameters/http-parameters.xhtml)
+ while varying on the accept-encoding client header
+
+- If the request is not a GET
+- If the HTTP version of the request is smaller than 1.1
+- If the request contains an Authorization header
+
+
+6.2. Setup
+-----------
+
+To setup a cache, you must define a cache section and use it in a proxy with
+the corresponding http-request and http-response actions.
+
+
+6.2.1. Cache section
+---------------------
+
+cache <name>
+ Declare a cache section, allocate a shared cache memory named <name>, the
+ size of cache is mandatory.
+
+total-max-size <megabytes>
+ Define the size in RAM of the cache in megabytes. This size is split in
+ blocks of 1kB which are used by the cache entries. Its maximum value is 4095.
+
+max-object-size <bytes>
+ Define the maximum size of the objects to be cached. Must not be greater than
+ an half of "total-max-size". If not set, it equals to a 256th of the cache size.
+ All objects with sizes larger than "max-object-size" will not be cached.
+
+max-age <seconds>
+ Define the maximum expiration duration. The expiration is set as the lowest
+ value between the s-maxage or max-age (in this order) directive in the
+ Cache-Control response header and this value. The default value is 60
+ seconds, which means that you can't cache an object more than 60 seconds by
+ default.
+
+process-vary <on/off>
+ Enable or disable the processing of the Vary header. When disabled, a response
+ containing such a header will never be cached. When enabled, we need to calculate
+ a preliminary hash for a subset of request headers on all the incoming requests
+ (which might come with a cpu cost) which will be used to build a secondary
+ key for a given request (see RFC 7234#4.1). The secondary key is built out of
+ the contents of the 'accept-encoding', 'referer' and 'origin' headers for
+ now. The default value is off (disabled).
+
+max-secondary-entries <number>
+ Define the maximum number of simultaneous secondary entries with the same primary
+ key in the cache. This needs the vary support to be enabled. Its default value is 10
+ and should be passed a strictly positive integer.
+
+
+6.2.2. Proxy section
+---------------------
+
+The proxy section making use of the cache will need to involve the "cache-use"
+action in the "http-request" rule set in order to look up the requested object
+from the cache, and the "cache-store" action in the "http-response" rule set in
+order to store or update the retrieved object into the cache. Each of these
+actions may optionally involve conditions. For example, one could decide to
+skip the "cache-use" action for a certain sub-directory that is known not to
+be cacheable, or to skip the "cache-store" action for certain content-types
+that are known to be worthless. Please note that the cache indexing key is
+computed during the "cache-use" action, so if this action is skipped, no
+attempt to update the cache will be made on the response path anyway.
+
+Example:
+
+ backend bck1
+ mode http
+
+ http-request cache-use foobar
+ http-response cache-store foobar
+ server srv1 127.0.0.1:80
+
+ cache foobar
+ total-max-size 4
+ max-age 240
+
+
+7. Using ACLs and fetching samples
+----------------------------------
+
+HAProxy is capable of extracting data from request or response streams, from
+client or server information, from tables, environmental information etc...
+The action of extracting such data is called fetching a sample. Once retrieved,
+these samples may be used for various purposes such as a key to a stick-table,
+but most common usages consist in matching them against predefined constant
+data called patterns.
+
+
+7.1. ACL basics
+---------------
+
+The use of Access Control Lists (ACL) provides a flexible solution to perform
+content switching and generally to take decisions based on content extracted
+from the request, the response or any environmental status. The principle is
+simple :
+
+ - extract a data sample from a stream, table or the environment
+ - optionally apply some format conversion to the extracted sample
+ - apply one or multiple pattern matching methods on this sample
+ - perform actions only when a pattern matches the sample
+
+The actions generally consist in blocking a request, selecting a backend, or
+adding a header.
+
+In order to define a test, the "acl" keyword is used. The syntax is :
+
+ acl <aclname> <criterion> [flags] [operator] [<value>] ...
+
+This creates a new ACL <aclname> or completes an existing one with new tests.
+Those tests apply to the portion of request/response specified in <criterion>
+and may be adjusted with optional flags [flags]. Some criteria also support
+an operator which may be specified before the set of values. Optionally some
+conversion operators may be applied to the sample, and they will be specified
+as a comma-delimited list of keywords just after the first keyword. The values
+are of the type supported by the criterion, and are separated by spaces.
+
+ACL names must be formed from upper and lower case letters, digits, '-' (dash),
+'_' (underscore) , '.' (dot) and ':' (colon). ACL names are case-sensitive,
+which means that "my_acl" and "My_Acl" are two different ACLs.
+
+There is no enforced limit to the number of ACLs. The unused ones do not affect
+performance, they just consume a small amount of memory.
+
+The criterion generally is the name of a sample fetch method, or one of its ACL
+specific declinations. The default test method is implied by the output type of
+this sample fetch method. The ACL declinations can describe alternate matching
+methods of a same sample fetch method. The sample fetch methods are the only
+ones supporting a conversion.
+
+Sample fetch methods return data which can be of the following types :
+ - boolean
+ - integer (signed or unsigned)
+ - IPv4 or IPv6 address
+ - string
+ - data block
+
+Converters transform any of these data into any of these. For example, some
+converters might convert a string to a lower-case string while other ones
+would turn a string to an IPv4 address, or apply a netmask to an IP address.
+The resulting sample is of the type of the last converter applied to the list,
+which defaults to the type of the sample fetch method.
+
+Each sample or converter returns data of a specific type, specified with its
+keyword in this documentation. When an ACL is declared using a standard sample
+fetch method, certain types automatically involved a default matching method
+which are summarized in the table below :
+
+ +---------------------+-----------------+
+ | Sample or converter | Default |
+ | output type | matching method |
+ +---------------------+-----------------+
+ | boolean | bool |
+ +---------------------+-----------------+
+ | integer | int |
+ +---------------------+-----------------+
+ | ip | ip |
+ +---------------------+-----------------+
+ | string | str |
+ +---------------------+-----------------+
+ | binary | none, use "-m" |
+ +---------------------+-----------------+
+
+Note that in order to match a binary samples, it is mandatory to specify a
+matching method, see below.
+
+The ACL engine can match these types against patterns of the following types :
+ - boolean
+ - integer or integer range
+ - IP address / network
+ - string (exact, substring, suffix, prefix, subdir, domain)
+ - regular expression
+ - hex block
+
+The following ACL flags are currently supported :
+
+ -i : ignore case during matching of all subsequent patterns.
+ -f : load patterns from a file.
+ -m : use a specific pattern matching method
+ -n : forbid the DNS resolutions
+ -M : load the file pointed by -f like a map file.
+ -u : force the unique id of the ACL
+ -- : force end of flags. Useful when a string looks like one of the flags.
+
+The "-f" flag is followed by the name of a file from which all lines will be
+read as individual values. It is even possible to pass multiple "-f" arguments
+if the patterns are to be loaded from multiple files. Empty lines as well as
+lines beginning with a sharp ('#') will be ignored. All leading spaces and tabs
+will be stripped. If it is absolutely necessary to insert a valid pattern
+beginning with a sharp, just prefix it with a space so that it is not taken for
+a comment. Depending on the data type and match method, HAProxy may load the
+lines into a binary tree, allowing very fast lookups. This is true for IPv4 and
+exact string matching. In this case, duplicates will automatically be removed.
+
+The "-M" flag allows an ACL to use a map file. If this flag is set, the file is
+parsed as two column file. The first column contains the patterns used by the
+ACL, and the second column contain the samples. The sample can be used later by
+a map. This can be useful in some rare cases where an ACL would just be used to
+check for the existence of a pattern in a map before a mapping is applied.
+
+The "-u" flag forces the unique id of the ACL. This unique id is used with the
+socket interface to identify ACL and dynamically change its values. Note that a
+file is always identified by its name even if an id is set.
+
+Also, note that the "-i" flag applies to subsequent entries and not to entries
+loaded from files preceding it. For instance :
+
+ acl valid-ua hdr(user-agent) -f exact-ua.lst -i -f generic-ua.lst test
+
+In this example, each line of "exact-ua.lst" will be exactly matched against
+the "user-agent" header of the request. Then each line of "generic-ua" will be
+case-insensitively matched. Then the word "test" will be insensitively matched
+as well.
+
+The "-m" flag is used to select a specific pattern matching method on the input
+sample. All ACL-specific criteria imply a pattern matching method and generally
+do not need this flag. However, this flag is useful with generic sample fetch
+methods to describe how they're going to be matched against the patterns. This
+is required for sample fetches which return data type for which there is no
+obvious matching method (e.g. string or binary). When "-m" is specified and
+followed by a pattern matching method name, this method is used instead of the
+default one for the criterion. This makes it possible to match contents in ways
+that were not initially planned, or with sample fetch methods which return a
+string. The matching method also affects the way the patterns are parsed.
+
+The "-n" flag forbids the dns resolutions. It is used with the load of ip files.
+By default, if the parser cannot parse ip address it considers that the parsed
+string is maybe a domain name and try dns resolution. The flag "-n" disable this
+resolution. It is useful for detecting malformed ip lists. Note that if the DNS
+server is not reachable, the HAProxy configuration parsing may last many minutes
+waiting for the timeout. During this time no error messages are displayed. The
+flag "-n" disable this behavior. Note also that during the runtime, this
+function is disabled for the dynamic acl modifications.
+
+There are some restrictions however. Not all methods can be used with all
+sample fetch methods. Also, if "-m" is used in conjunction with "-f", it must
+be placed first. The pattern matching method must be one of the following :
+
+ - "found" : only check if the requested sample could be found in the stream,
+ but do not compare it against any pattern. It is recommended not
+ to pass any pattern to avoid confusion. This matching method is
+ particularly useful to detect presence of certain contents such
+ as headers, cookies, etc... even if they are empty and without
+ comparing them to anything nor counting them.
+
+ - "bool" : check the value as a boolean. It can only be applied to fetches
+ which return a boolean or integer value, and takes no pattern.
+ Value zero or false does not match, all other values do match.
+
+ - "int" : match the value as an integer. It can be used with integer and
+ boolean samples. Boolean false is integer 0, true is integer 1.
+
+ - "ip" : match the value as an IPv4 or IPv6 address. It is compatible
+ with IP address samples only, so it is implied and never needed.
+
+ - "bin" : match the contents against a hexadecimal string representing a
+ binary sequence. This may be used with binary or string samples.
+
+ - "len" : match the sample's length as an integer. This may be used with
+ binary or string samples.
+
+ - "str" : exact match : match the contents against a string. This may be
+ used with binary or string samples.
+
+ - "sub" : substring match : check that the contents contain at least one of
+ the provided string patterns. This may be used with binary or
+ string samples.
+
+ - "reg" : regex match : match the contents against a list of regular
+ expressions. This may be used with binary or string samples.
+
+ - "beg" : prefix match : check that the contents begin like the provided
+ string patterns. This may be used with binary or string samples.
+
+ - "end" : suffix match : check that the contents end like the provided
+ string patterns. This may be used with binary or string samples.
+
+ - "dir" : subdir match : check that a slash-delimited portion of the
+ contents exactly matches one of the provided string patterns.
+ This may be used with binary or string samples.
+
+ - "dom" : domain match : check that a dot-delimited portion of the contents
+ exactly match one of the provided string patterns. This may be
+ used with binary or string samples.
+
+For example, to quickly detect the presence of cookie "JSESSIONID" in an HTTP
+request, it is possible to do :
+
+ acl jsess_present req.cook(JSESSIONID) -m found
+
+In order to apply a regular expression on the 500 first bytes of data in the
+buffer, one would use the following acl :
+
+ acl script_tag req.payload(0,500) -m reg -i <script>
+
+On systems where the regex library is much slower when using "-i", it is
+possible to convert the sample to lowercase before matching, like this :
+
+ acl script_tag req.payload(0,500),lower -m reg <script>
+
+All ACL-specific criteria imply a default matching method. Most often, these
+criteria are composed by concatenating the name of the original sample fetch
+method and the matching method. For example, "hdr_beg" applies the "beg" match
+to samples retrieved using the "hdr" fetch method. This matching method is only
+usable when the keyword is used alone, without any converter. In case any such
+converter were to be applied after such an ACL keyword, the default matching
+method from the ACL keyword is simply ignored since what will matter for the
+matching is the output type of the last converter. Since all ACL-specific
+criteria rely on a sample fetch method, it is always possible instead to use
+the original sample fetch method and the explicit matching method using "-m".
+
+If an alternate match is specified using "-m" on an ACL-specific criterion,
+the matching method is simply applied to the underlying sample fetch method.
+For example, all ACLs below are exact equivalent :
+
+ acl short_form hdr_beg(host) www.
+ acl alternate1 hdr_beg(host) -m beg www.
+ acl alternate2 hdr_dom(host) -m beg www.
+ acl alternate3 hdr(host) -m beg www.
+
+
+The table below summarizes the compatibility matrix between sample or converter
+types and the pattern types to fetch against. It indicates for each compatible
+combination the name of the matching method to be used, surrounded with angle
+brackets ">" and "<" when the method is the default one and will work by
+default without "-m".
+
+ +-------------------------------------------------+
+ | Input sample type |
+ +----------------------+---------+---------+---------+---------+---------+
+ | pattern type | boolean | integer | ip | string | binary |
+ +----------------------+---------+---------+---------+---------+---------+
+ | none (presence only) | found | found | found | found | found |
+ +----------------------+---------+---------+---------+---------+---------+
+ | none (boolean value) |> bool <| bool | | bool | |
+ +----------------------+---------+---------+---------+---------+---------+
+ | integer (value) | int |> int <| int | int | |
+ +----------------------+---------+---------+---------+---------+---------+
+ | integer (length) | len | len | len | len | len |
+ +----------------------+---------+---------+---------+---------+---------+
+ | IP address | | |> ip <| ip | ip |
+ +----------------------+---------+---------+---------+---------+---------+
+ | exact string | str | str | str |> str <| str |
+ +----------------------+---------+---------+---------+---------+---------+
+ | prefix | beg | beg | beg | beg | beg |
+ +----------------------+---------+---------+---------+---------+---------+
+ | suffix | end | end | end | end | end |
+ +----------------------+---------+---------+---------+---------+---------+
+ | substring | sub | sub | sub | sub | sub |
+ +----------------------+---------+---------+---------+---------+---------+
+ | subdir | dir | dir | dir | dir | dir |
+ +----------------------+---------+---------+---------+---------+---------+
+ | domain | dom | dom | dom | dom | dom |
+ +----------------------+---------+---------+---------+---------+---------+
+ | regex | reg | reg | reg | reg | reg |
+ +----------------------+---------+---------+---------+---------+---------+
+ | hex block | | | | bin | bin |
+ +----------------------+---------+---------+---------+---------+---------+
+
+
+7.1.1. Matching booleans
+------------------------
+
+In order to match a boolean, no value is needed and all values are ignored.
+Boolean matching is used by default for all fetch methods of type "boolean".
+When boolean matching is used, the fetched value is returned as-is, which means
+that a boolean "true" will always match and a boolean "false" will never match.
+
+Boolean matching may also be enforced using "-m bool" on fetch methods which
+return an integer value. Then, integer value 0 is converted to the boolean
+"false" and all other values are converted to "true".
+
+
+7.1.2. Matching integers
+------------------------
+
+Integer matching applies by default to integer fetch methods. It can also be
+enforced on boolean fetches using "-m int". In this case, "false" is converted
+to the integer 0, and "true" is converted to the integer 1.
+
+Integer matching also supports integer ranges and operators. Note that integer
+matching only applies to positive values. A range is a value expressed with a
+lower and an upper bound separated with a colon, both of which may be omitted.
+
+For instance, "1024:65535" is a valid range to represent a range of
+unprivileged ports, and "1024:" would also work. "0:1023" is a valid
+representation of privileged ports, and ":1023" would also work.
+
+As a special case, some ACL functions support decimal numbers which are in fact
+two integers separated by a dot. This is used with some version checks for
+instance. All integer properties apply to those decimal numbers, including
+ranges and operators.
+
+For an easier usage, comparison operators are also supported. Note that using
+operators with ranges does not make much sense and is strongly discouraged.
+Similarly, it does not make much sense to perform order comparisons with a set
+of values.
+
+Available operators for integer matching are :
+
+ eq : true if the tested value equals at least one value
+ ge : true if the tested value is greater than or equal to at least one value
+ gt : true if the tested value is greater than at least one value
+ le : true if the tested value is less than or equal to at least one value
+ lt : true if the tested value is less than at least one value
+
+For instance, the following ACL matches any negative Content-Length header :
+
+ acl negative-length req.hdr_val(content-length) lt 0
+
+This one matches SSL versions between 3.0 and 3.1 (inclusive) :
+
+ acl sslv3 req.ssl_ver 3:3.1
+
+
+7.1.3. Matching strings
+-----------------------
+
+String matching applies to string or binary fetch methods, and exists in 6
+different forms :
+
+ - exact match (-m str) : the extracted string must exactly match the
+ patterns;
+
+ - substring match (-m sub) : the patterns are looked up inside the
+ extracted string, and the ACL matches if any of them is found inside;
+
+ - prefix match (-m beg) : the patterns are compared with the beginning of
+ the extracted string, and the ACL matches if any of them matches.
+
+ - suffix match (-m end) : the patterns are compared with the end of the
+ extracted string, and the ACL matches if any of them matches.
+
+ - subdir match (-m dir) : the patterns are looked up anywhere inside the
+ extracted string, delimited with slashes ("/"), the beginning or the end
+ of the string. The ACL matches if any of them matches. As such, the string
+ "/images/png/logo/32x32.png", would match "/images", "/images/png",
+ "images/png", "/png/logo", "logo/32x32.png" or "32x32.png" but not "png"
+ nor "32x32".
+
+ - domain match (-m dom) : the patterns are looked up anywhere inside the
+ extracted string, delimited with dots ("."), colons (":"), slashes ("/"),
+ question marks ("?"), the beginning or the end of the string. This is made
+ to be used with URLs. Leading and trailing delimiters in the pattern are
+ ignored. The ACL matches if any of them matches. As such, in the example
+ string "http://www1.dc-eu.example.com:80/blah", the patterns "http",
+ "www1", ".www1", "dc-eu", "example", "com", "80", "dc-eu.example",
+ "blah", ":www1:", "dc-eu.example:80" would match, but not "eu" nor "dc".
+ Using it to match domain suffixes for filtering or routing is generally
+ not a good idea, as the routing could easily be fooled by prepending the
+ matching prefix in front of another domain for example.
+
+String matching applies to verbatim strings as they are passed, with the
+exception of the backslash ("\") which makes it possible to escape some
+characters such as the space. If the "-i" flag is passed before the first
+string, then the matching will be performed ignoring the case. In order
+to match the string "-i", either set it second, or pass the "--" flag
+before the first string. Same applies of course to match the string "--".
+
+Do not use string matches for binary fetches which might contain null bytes
+(0x00), as the comparison stops at the occurrence of the first null byte.
+Instead, convert the binary fetch to a hex string with the hex converter first.
+
+Example:
+ # matches if the string <tag> is present in the binary sample
+ acl tag_found req.payload(0,0),hex -m sub 3C7461673E
+
+
+7.1.4. Matching regular expressions (regexes)
+---------------------------------------------
+
+Just like with string matching, regex matching applies to verbatim strings as
+they are passed, with the exception of the backslash ("\") which makes it
+possible to escape some characters such as the space. If the "-i" flag is
+passed before the first regex, then the matching will be performed ignoring
+the case. In order to match the string "-i", either set it second, or pass
+the "--" flag before the first string. Same principle applies of course to
+match the string "--".
+
+
+7.1.5. Matching arbitrary data blocks
+-------------------------------------
+
+It is possible to match some extracted samples against a binary block which may
+not safely be represented as a string. For this, the patterns must be passed as
+a series of hexadecimal digits in an even number, when the match method is set
+to binary. Each sequence of two digits will represent a byte. The hexadecimal
+digits may be used upper or lower case.
+
+Example :
+ # match "Hello\n" in the input stream (\x48 \x65 \x6c \x6c \x6f \x0a)
+ acl hello req.payload(0,6) -m bin 48656c6c6f0a
+
+
+7.1.6. Matching IPv4 and IPv6 addresses
+---------------------------------------
+
+IPv4 addresses values can be specified either as plain addresses or with a
+netmask appended, in which case the IPv4 address matches whenever it is
+within the network. Plain addresses may also be replaced with a resolvable
+host name, but this practice is generally discouraged as it makes it more
+difficult to read and debug configurations. If hostnames are used, you should
+at least ensure that they are present in /etc/hosts so that the configuration
+does not depend on any random DNS match at the moment the configuration is
+parsed.
+
+The dotted IPv4 address notation is supported in both regular as well as the
+abbreviated form with all-0-octets omitted:
+
+ +------------------+------------------+------------------+
+ | Example 1 | Example 2 | Example 3 |
+ +------------------+------------------+------------------+
+ | 192.168.0.1 | 10.0.0.12 | 127.0.0.1 |
+ | 192.168.1 | 10.12 | 127.1 |
+ | 192.168.0.1/22 | 10.0.0.12/8 | 127.0.0.1/8 |
+ | 192.168.1/22 | 10.12/8 | 127.1/8 |
+ +------------------+------------------+------------------+
+
+Notice that this is different from RFC 4632 CIDR address notation in which
+192.168.42/24 would be equivalent to 192.168.42.0/24.
+
+IPv6 may be entered in their usual form, with or without a netmask appended.
+Only bit counts are accepted for IPv6 netmasks. In order to avoid any risk of
+trouble with randomly resolved IP addresses, host names are never allowed in
+IPv6 patterns.
+
+HAProxy is also able to match IPv4 addresses with IPv6 addresses in the
+following situations :
+ - tested address is IPv4, pattern address is IPv4, the match applies
+ in IPv4 using the supplied mask if any.
+ - tested address is IPv6, pattern address is IPv6, the match applies
+ in IPv6 using the supplied mask if any.
+ - tested address is IPv6, pattern address is IPv4, the match applies in IPv4
+ using the pattern's mask if the IPv6 address matches with 2002:IPV4::,
+ ::IPV4 or ::ffff:IPV4, otherwise it fails.
+ - tested address is IPv4, pattern address is IPv6, the IPv4 address is first
+ converted to IPv6 by prefixing ::ffff: in front of it, then the match is
+ applied in IPv6 using the supplied IPv6 mask.
+
+
+7.2. Using ACLs to form conditions
+----------------------------------
+
+Some actions are only performed upon a valid condition. A condition is a
+combination of ACLs with operators. 3 operators are supported :
+
+ - AND (implicit)
+ - OR (explicit with the "or" keyword or the "||" operator)
+ - Negation with the exclamation mark ("!")
+
+A condition is formed as a disjunctive form:
+
+ [!]acl1 [!]acl2 ... [!]acln { or [!]acl1 [!]acl2 ... [!]acln } ...
+
+Such conditions are generally used after an "if" or "unless" statement,
+indicating when the condition will trigger the action.
+
+For instance, to block HTTP requests to the "*" URL with methods other than
+"OPTIONS", as well as POST requests without content-length, and GET or HEAD
+requests with a content-length greater than 0, and finally every request which
+is not either GET/HEAD/POST/OPTIONS !
+
+ acl missing_cl req.hdr_cnt(Content-length) eq 0
+ http-request deny if HTTP_URL_STAR !METH_OPTIONS || METH_POST missing_cl
+ http-request deny if METH_GET HTTP_CONTENT
+ http-request deny unless METH_GET or METH_POST or METH_OPTIONS
+
+To select a different backend for requests to static contents on the "www" site
+and to every request on the "img", "video", "download" and "ftp" hosts :
+
+ acl url_static path_beg /static /images /img /css
+ acl url_static path_end .gif .png .jpg .css .js
+ acl host_www hdr_beg(host) -i www
+ acl host_static hdr_beg(host) -i img. video. download. ftp.
+
+ # now use backend "static" for all static-only hosts, and for static URLs
+ # of host "www". Use backend "www" for the rest.
+ use_backend static if host_static or host_www url_static
+ use_backend www if host_www
+
+It is also possible to form rules using "anonymous ACLs". Those are unnamed ACL
+expressions that are built on the fly without needing to be declared. They must
+be enclosed between braces, with a space before and after each brace (because
+the braces must be seen as independent words). Example :
+
+ The following rule :
+
+ acl missing_cl req.hdr_cnt(Content-length) eq 0
+ http-request deny if METH_POST missing_cl
+
+ Can also be written that way :
+
+ http-request deny if METH_POST { req.hdr_cnt(Content-length) eq 0 }
+
+It is generally not recommended to use this construct because it's a lot easier
+to leave errors in the configuration when written that way. However, for very
+simple rules matching only one source IP address for instance, it can make more
+sense to use them than to declare ACLs with random names. Another example of
+good use is the following :
+
+ With named ACLs :
+
+ acl site_dead nbsrv(dynamic) lt 2
+ acl site_dead nbsrv(static) lt 2
+ monitor fail if site_dead
+
+ With anonymous ACLs :
+
+ monitor fail if { nbsrv(dynamic) lt 2 } || { nbsrv(static) lt 2 }
+
+See section 4.2 for detailed help on the "http-request deny" and "use_backend"
+keywords.
+
+
+7.3. Fetching samples
+---------------------
+
+Historically, sample fetch methods were only used to retrieve data to match
+against patterns using ACLs. With the arrival of stick-tables, a new class of
+sample fetch methods was created, most often sharing the same syntax as their
+ACL counterpart. These sample fetch methods are also known as "fetches". As
+of now, ACLs and fetches have converged. All ACL fetch methods have been made
+available as fetch methods, and ACLs may use any sample fetch method as well.
+
+This section details all available sample fetch methods and their output type.
+Some sample fetch methods have deprecated aliases that are used to maintain
+compatibility with existing configurations. They are then explicitly marked as
+deprecated and should not be used in new setups.
+
+The ACL derivatives are also indicated when available, with their respective
+matching methods. These ones all have a well defined default pattern matching
+method, so it is never necessary (though allowed) to pass the "-m" option to
+indicate how the sample will be matched using ACLs.
+
+As indicated in the sample type versus matching compatibility matrix above,
+when using a generic sample fetch method in an ACL, the "-m" option is
+mandatory unless the sample type is one of boolean, integer, IPv4 or IPv6. When
+the same keyword exists as an ACL keyword and as a standard fetch method, the
+ACL engine will automatically pick the ACL-only one by default.
+
+Some of these keywords support one or multiple mandatory arguments, and one or
+multiple optional arguments. These arguments are strongly typed and are checked
+when the configuration is parsed so that there is no risk of running with an
+incorrect argument (e.g. an unresolved backend name). Fetch function arguments
+are passed between parenthesis and are delimited by commas. When an argument
+is optional, it will be indicated below between square brackets ('[ ]'). When
+all arguments are optional, the parenthesis may be omitted.
+
+Thus, the syntax of a standard sample fetch method is one of the following :
+ - name
+ - name(arg1)
+ - name(arg1,arg2)
+
+
+7.3.1. Converters
+-----------------
+
+Sample fetch methods may be combined with transformations to be applied on top
+of the fetched sample (also called "converters"). These combinations form what
+is called "sample expressions" and the result is a "sample". Initially this
+was only supported by "stick on" and "stick store-request" directives but this
+has now be extended to all places where samples may be used (ACLs, log-format,
+unique-id-format, add-header, ...).
+
+These transformations are enumerated as a series of specific keywords after the
+sample fetch method. These keywords may equally be appended immediately after
+the fetch keyword's argument, delimited by a comma. These keywords can also
+support some arguments (e.g. a netmask) which must be passed in parenthesis.
+
+A certain category of converters are bitwise and arithmetic operators which
+support performing basic operations on integers. Some bitwise operations are
+supported (and, or, xor, cpl) and some arithmetic operations are supported
+(add, sub, mul, div, mod, neg). Some comparators are provided (odd, even, not,
+bool) which make it possible to report a match without having to write an ACL.
+
+The following keywords are supported:
+
+ keyword input type output type
+------------------------------------------------+-------------+----------------
+51d.single(prop[,prop*]) string string
+add(value) integer integer
+add_item(delim,[var][,suff]]) string string
+aes_gcm_dec(bits,nonce,key,aead_tag) binary binary
+and(value) integer integer
+b64dec string binary
+base64 binary string
+be2dec(separator,chunk_size,[truncate]) binary string
+be2hex([separator],[chunk_size],[truncate]) binary string
+bool integer boolean
+bytes(offset[,length]) binary binary
+capture-req(id) string string
+capture-res(id) string string
+concat([start],[var],[end]) string string
+cpl integer integer
+crc32([avalanche]) binary integer
+crc32c([avalanche]) binary integer
+cut_crlf string string
+da-csv-conv(prop[,prop*]) string string
+debug([prefix][,destination]) any same
+-- keyword -------------------------------------+- input type + output type -
+digest(algorithm) binary binary
+div(value) integer integer
+djb2([avalanche]) binary integer
+even integer boolean
+field(index,delimiters[,count]) string string
+fix_is_valid binary boolean
+fix_tag_value(tag) binary binary
+hex binary string
+hex2i binary integer
+hmac(algorithm,key) binary binary
+host_only string string
+htonl integer integer
+http_date([offset],[unit]) integer string
+iif(true,false) boolean string
+in_table(table) string boolean
+ipmask(mask4,[mask6]) address address
+json([input-code]) string string
+json_query(json_path,[output_type]) string _outtype_
+jwt_header_query([json_path],[output_type]) string string
+jwt_payload_query([json_path],[output_type]) string string
+-- keyword -------------------------------------+- input type + output type -
+jwt_verify(alg,key) string integer
+language(value[,default]) string string
+length string integer
+lower string string
+ltime(format[,offset]) integer string
+ltrim(chars) string string
+map(map_name[,default_value]) string string
+map_match(map_name[,default_value]) _match_ string
+map_match_output(map_name[,default_value]) _match_ _output_
+mod(value) integer integer
+mqtt_field_value(pkt_type,fieldname_or_prop_ID) binary binary
+mqtt_is_valid binary boolean
+ms_ltime(format[,offset]) integer string
+ms_utime(format[,offset]) integer string
+mul(value) integer integer
+nbsrv string integer
+neg integer integer
+not integer boolean
+odd integer boolean
+or(value) integer integer
+-- keyword -------------------------------------+- input type + output type -
+param(name,[delim]) string string
+port_only string integer
+protobuf(field_number,[field_type]) binary binary
+regsub(regex,subst[,flags]) string string
+rfc7239_field(field) string string
+rfc7239_is_valid string boolean
+rfc7239_n2nn string address / str
+rfc7239_n2np string integer / str
+rtrim(chars) string string
+sdbm([avalanche]) binary integer
+secure_memcmp(var) string boolean
+set-var(var[,cond...]) any same
+sha1 binary binary
+sha2([bits]) binary binary
+srv_queue string integer
+strcmp(var) string boolean
+sub(value) integer integer
+table_bytes_in_rate(table) string integer
+table_bytes_out_rate(table) string integer
+table_conn_cnt(table) string integer
+-- keyword -------------------------------------+- input type + output type -
+table_conn_cur(table) string integer
+table_conn_rate(table) string integer
+table_expire(table[,default_value]) string integer
+table_gpc(idx,table) string integer
+table_gpc0(table) string integer
+table_gpc0_rate(table) string integer
+table_gpc1(table) string integer
+table_gpc1_rate(table) string integer
+table_gpc_rate(idx,table) string integer
+table_gpt(idx,table) string integer
+table_gpt0(table) string integer
+table_http_err_cnt(table) string integer
+table_http_err_rate(table) string integer
+table_http_fail_cnt(table) string integer
+table_http_fail_rate(table) string integer
+table_http_req_cnt(table) string integer
+table_http_req_rate(table) string integer
+table_idle(table[,default_value]) string integer
+table_kbytes_in(table) string integer
+-- keyword -------------------------------------+- input type + output type -
+table_kbytes_out(table) string integer
+table_server_id(table) string integer
+table_sess_cnt(table) string integer
+table_sess_rate(table) string integer
+table_trackers(table) string integer
+ub64dec string string
+ub64enc string string
+ungrpc(field_number,[field_type]) binary binary / int
+unset-var(var) any same
+upper string string
+url_dec([in_form]) string string
+url_enc([enc_type]) string string
+us_ltime(format[,offset]) integer string
+us_utime(format[,offset]) integer string
+utime(format[,offset]) integer string
+word(index,delimiters[,count]) string string
+wt6([avalanche]) binary integer
+x509_v_err_str integer string
+xor(value) integer integer
+-- keyword -------------------------------------+- input type + output type -
+xxh3([seed]) binary integer
+xxh32([seed]) binary integer
+xxh64([seed]) binary integer
+
+The detailed list of converter keywords follows:
+
+51d.single(<prop>[,<prop>*])
+ Returns values for the properties requested as a string, where values are
+ separated by the delimiter specified with "51degrees-property-separator".
+ The device is identified using the User-Agent header passed to the
+ converter. The function can be passed up to five property names, and if a
+ property name can't be found, the value "NoData" is returned.
+
+ Example :
+ # Here the header "X-51D-DeviceTypeMobileTablet" is added to the request,
+ # containing values for the three properties requested by using the
+ # User-Agent passed to the converter.
+ frontend http-in
+ bind *:8081
+ default_backend servers
+ http-request set-header X-51D-DeviceTypeMobileTablet \
+ %[req.fhdr(User-Agent),51d.single(DeviceType,IsMobile,IsTablet)]
+
+add(<value>)
+ Adds <value> to the input value of type signed integer, and returns the
+ result as a signed integer. <value> can be a numeric value or a variable
+ name. The name of the variable starts with an indication about its scope. The
+ scopes allowed are:
+ "proc" : the variable is shared with the whole process
+ "sess" : the variable is shared with the whole session
+ "txn" : the variable is shared with the transaction (request and response)
+ "req" : the variable is shared only during request processing
+ "res" : the variable is shared only during response processing
+ This prefix is followed by a name. The separator is a '.'. The name may only
+ contain characters 'a-z', 'A-Z', '0-9', '.' and '_'.
+
+add_item(<delim>,[<var>][,<suff>]])
+ Concatenates a minimum of 2 and up to 3 fields after the current sample which
+ is then turned into a string. The first one, <delim>, is a constant string,
+ that will be appended immediately after the existing sample if an existing
+ sample is not empty and either the <var> or the <suff> is not empty. The
+ second one, <var>, is a variable name. The variable will be looked up, its
+ contents converted to a string, and it will be appended immediately after
+ the <delim> part. If the variable is not found, nothing is appended. It is
+ optional and may optionally be followed by a constant string <suff>, however
+ if <var> is omitted, then <suff> is mandatory. This converter is similar to
+ the concat converter and can be used to build new variables made of a
+ succession of other variables but the main difference is that it does the
+ checks if adding a delimiter makes sense as wouldn't be the case if e.g. the
+ current sample is empty. That situation would require 2 separate rules using
+ concat converter where the first rule would have to check if the current
+ sample string is empty before adding a delimiter. If commas or closing
+ parenthesis are needed as delimiters, they must be protected by quotes or
+ backslashes, themselves protected so that they are not stripped by the first
+ level parser (please see section 2.2 for quoting and escaping). See examples
+ below.
+
+ Example:
+ http-request set-var(req.tagged) 'var(req.tagged),add_item(",",req.score1,"(site1)") if src,in_table(site1)'
+ http-request set-var(req.tagged) 'var(req.tagged),add_item(",",req.score2,"(site2)") if src,in_table(site2)'
+ http-request set-var(req.tagged) 'var(req.tagged),add_item(",",req.score3,"(site3)") if src,in_table(site3)'
+ http-request set-header x-tagged %[var(req.tagged)]
+
+ http-request set-var(req.tagged) 'var(req.tagged),add_item(",",req.score1),add_item(",",req.score2)'
+ http-request set-var(req.tagged) 'var(req.tagged),add_item(",",,(site1))' if src,in_table(site1)
+
+aes_gcm_dec(<bits>,<nonce>,<key>,<aead_tag>)
+ Decrypts the raw byte input using the AES128-GCM, AES192-GCM or
+ AES256-GCM algorithm, depending on the <bits> parameter. All other parameters
+ need to be base64 encoded and the returned result is in raw byte format.
+ If the <aead_tag> validation fails, the converter doesn't return any data.
+ The <nonce>, <key> and <aead_tag> can either be strings or variables. This
+ converter requires at least OpenSSL 1.0.1.
+
+ Example:
+ http-response set-header X-Decrypted-Text %[var(txn.enc),\
+ aes_gcm_dec(128,txn.nonce,Zm9vb2Zvb29mb29wZm9vbw==,txn.aead_tag)]
+
+and(<value>)
+ Performs a bitwise "AND" between <value> and the input value of type signed
+ integer, and returns the result as an signed integer. <value> can be a
+ numeric value or a variable name. The name of the variable starts with an
+ indication about its scope. The scopes allowed are:
+ "proc" : the variable is shared with the whole process
+ "sess" : the variable is shared with the whole session
+ "txn" : the variable is shared with the transaction (request and response)
+ "req" : the variable is shared only during request processing
+ "res" : the variable is shared only during response processing
+ This prefix is followed by a name. The separator is a '.'. The name may only
+ contain characters 'a-z', 'A-Z', '0-9', '.' and '_'.
+
+b64dec
+ Converts (decodes) a base64 encoded input string to its binary
+ representation. It performs the inverse operation of base64().
+ For base64url("URL and Filename Safe Alphabet" (RFC 4648)) variant
+ see "ub64dec".
+
+base64
+ Converts a binary input sample to a base64 string. It is used to log or
+ transfer binary content in a way that can be reliably transferred (e.g.
+ an SSL ID can be copied in a header). For base64url("URL and Filename
+ Safe Alphabet" (RFC 4648)) variant see "ub64enc".
+
+be2dec(<separator>,<chunk_size>,[<truncate>])
+ Converts big-endian binary input sample to a string containing an unsigned
+ integer number per <chunk_size> input bytes. <separator> is put every
+ <chunk_size> binary input bytes if specified. <truncate> flag indicates
+ whatever binary input is truncated at <chunk_size> boundaries. <chunk_size>
+ maximum value is limited by the size of long long int (8 bytes).
+
+ Example:
+ bin(01020304050607),be2dec(:,2) # 258:772:1286:7
+ bin(01020304050607),be2dec(-,2,1) # 258-772-1286
+ bin(01020304050607),be2dec(,2,1) # 2587721286
+ bin(7f000001),be2dec(.,1) # 127.0.0.1
+
+be2hex([<separator>],[<chunk_size>],[<truncate>])
+ Converts big-endian binary input sample to a hex string containing two hex
+ digits per input byte. It is used to log or transfer hex dumps of some
+ binary input data in a way that can be reliably transferred (e.g. an SSL ID
+ can be copied in a header). <separator> is put every <chunk_size> binary
+ input bytes if specified. <truncate> flag indicates whatever binary input is
+ truncated at <chunk_size> boundaries.
+
+ Example:
+ bin(01020304050607),be2hex # 01020304050607
+ bin(01020304050607),be2hex(:,2) # 0102:0304:0506:07
+ bin(01020304050607),be2hex(--,2,1) # 0102--0304--0506
+ bin(0102030405060708),be2hex(,3,1) # 010203040506
+
+bool
+ Returns a boolean TRUE if the input value of type signed integer is
+ non-null, otherwise returns FALSE. Used in conjunction with and(), it can be
+ used to report true/false for bit testing on input values (e.g. verify the
+ presence of a flag).
+
+bytes(<offset>[,<length>])
+ Extracts some bytes from an input binary sample. The result is a binary
+ sample starting at an offset (in bytes) of the original sample and
+ optionally truncated at the given length. <offset> and <length> can be numeric
+ values or variable names. The converter returns an empty sample if either
+ <offset> or <length> is invalid. Invalid <offset> means a negative value or a
+ value >= length of the input sample. Invalid <length> means a negative value
+ or, in some cases, a value bigger than the length of the input sample.
+
+ Example:
+ http-request set-var(txn.input) req.hdr(input) # let's say input is "012345"
+
+ http-response set-header bytes_0 "%[var(txn.input),bytes(0)]" # outputs "012345"
+ http-response set-header bytes_1_3 "%[var(txn.input),bytes(1,3)]" # outputs "123"
+
+ http-response set-var(txn.var_start) int(1)
+ http-response set-var(txn.var_length) int(3)
+ http-response set-header bytes_var1_var3 "%[var(txn.input),bytes(txn.var_start,txn.var_length)]" # outputs "123"
+
+capture-req(<id>)
+ Capture the string entry in the request slot <id> and returns the entry as
+ is. If the slot doesn't exist, the capture fails silently.
+
+ See also: "declare capture", "http-request capture",
+ "http-response capture", "capture.req.hdr" and
+ "capture.res.hdr" (sample fetches).
+
+capture-res(<id>)
+ Capture the string entry in the response slot <id> and returns the entry as
+ is. If the slot doesn't exist, the capture fails silently.
+
+ See also: "declare capture", "http-request capture",
+ "http-response capture", "capture.req.hdr" and
+ "capture.res.hdr" (sample fetches).
+
+concat([<start>],[<var>],[<end>])
+ Concatenates up to 3 fields after the current sample which is then turned to
+ a string. The first one, <start>, is a constant string, that will be appended
+ immediately after the existing sample. It may be omitted if not used. The
+ second one, <var>, is a variable name. The variable will be looked up, its
+ contents converted to a string, and it will be appended immediately after the
+ <first> part. If the variable is not found, nothing is appended. It may be
+ omitted as well. The third field, <end> is a constant string that will be
+ appended after the variable. It may also be omitted. Together, these elements
+ allow to concatenate variables with delimiters to an existing set of
+ variables. This can be used to build new variables made of a succession of
+ other variables, such as colon-delimited values. If commas or closing
+ parenthesis are needed as delimiters, they must be protected by quotes or
+ backslashes, themselves protected so that they are not stripped by the first
+ level parser. This is often used to build composite variables from other
+ ones, but sometimes using a format string with multiple fields may be more
+ convenient. See examples below.
+
+ Example:
+ tcp-request session set-var(sess.src) src
+ tcp-request session set-var(sess.dn) ssl_c_s_dn
+ tcp-request session set-var(txn.sig) str(),concat(<ip=,sess.ip,>),concat(<dn=,sess.dn,>)
+ tcp-request session set-var(txn.ipport) "str(),concat('addr=(',sess.ip),concat(',',sess.port,')')"
+ tcp-request session set-var-fmt(txn.ipport) "addr=(%[sess.ip],%[sess.port])" ## does the same
+ http-request set-header x-hap-sig %[var(txn.sig)]
+
+cpl
+ Takes the input value of type signed integer, applies a ones-complement
+ (flips all bits) and returns the result as an signed integer.
+
+crc32([<avalanche>])
+ Hashes a binary input sample into an unsigned 32-bit quantity using the CRC32
+ hash function. Optionally, it is possible to apply a full avalanche hash
+ function to the output if the optional <avalanche> argument equals 1. This
+ converter uses the same functions as used by the various hash-based load
+ balancing algorithms, so it will provide exactly the same results. It is
+ provided for compatibility with other software which want a CRC32 to be
+ computed on some input keys, so it follows the most common implementation as
+ found in Ethernet, Gzip, PNG, etc... It is slower than the other algorithms
+ but may provide a better or at least less predictable distribution. It must
+ not be used for security purposes as a 32-bit hash is trivial to break. See
+ also "djb2", "sdbm", "wt6", "crc32c" and the "hash-type" directive.
+
+crc32c([<avalanche>])
+ Hashes a binary input sample into an unsigned 32-bit quantity using the CRC32C
+ hash function. Optionally, it is possible to apply a full avalanche hash
+ function to the output if the optional <avalanche> argument equals 1. This
+ converter uses the same functions as described in RFC4960, Appendix B [8].
+ It is provided for compatibility with other software which want a CRC32C to be
+ computed on some input keys. It is slower than the other algorithms and it must
+ not be used for security purposes as a 32-bit hash is trivial to break. See
+ also "djb2", "sdbm", "wt6", "crc32" and the "hash-type" directive.
+
+cut_crlf
+ Cuts the string representation of the input sample on the first carriage
+ return ('\r') or newline ('\n') character found. Only the string length is
+ updated.
+
+da-csv-conv(<prop>[,<prop>*])
+ Asks the DeviceAtlas converter to identify the User Agent string passed on
+ input, and to emit a string made of the concatenation of the properties
+ enumerated in argument, delimited by the separator defined by the global
+ keyword "deviceatlas-property-separator", or by default the pipe character
+ ('|'). There's a limit of 12 different properties imposed by the HAProxy
+ configuration language.
+
+ Example:
+ frontend www
+ bind *:8881
+ default_backend servers
+ http-request set-header X-DeviceAtlas-Data %[req.fhdr(User-Agent),da-csv(primaryHardwareType,osName,osVersion,browserName,browserVersion,browserRenderingEngine)]
+
+debug([<prefix][,<destination>])
+ This converter is used as debug tool. It takes a capture of the input sample
+ and sends it to event sink <destination>, which may designate a ring buffer
+ such as "buf0", as well as "stdout", or "stderr". Available sinks may be
+ checked at run time by issuing "show events" on the CLI. When not specified,
+ the output will be "buf0", which may be consulted via the CLI's "show events"
+ command. An optional prefix <prefix> may be passed to help distinguish
+ outputs from multiple expressions. It will then appear before the colon in
+ the output message. The input sample is passed as-is on the output, so that
+ it is safe to insert the debug converter anywhere in a chain, even with non-
+ printable sample types.
+
+ Example:
+ tcp-request connection track-sc0 src,debug(track-sc)
+
+digest(<algorithm>)
+ Converts a binary input sample to a message digest. The result is a binary
+ sample. The <algorithm> must be an OpenSSL message digest name (e.g. sha256).
+
+ Please note that this converter is only available when HAProxy has been
+ compiled with USE_OPENSSL.
+
+div(<value>)
+ Divides the input value of type signed integer by <value>, and returns the
+ result as an signed integer. If <value> is null, the largest unsigned
+ integer is returned (typically 2^63-1). <value> can be a numeric value or a
+ variable name. The name of the variable starts with an indication about its
+ scope. The scopes allowed are:
+ "proc" : the variable is shared with the whole process
+ "sess" : the variable is shared with the whole session
+ "txn" : the variable is shared with the transaction (request and response)
+ "req" : the variable is shared only during request processing
+ "res" : the variable is shared only during response processing
+ This prefix is followed by a name. The separator is a '.'. The name may only
+ contain characters 'a-z', 'A-Z', '0-9', '.' and '_'.
+
+djb2([<avalanche>])
+ Hashes a binary input sample into an unsigned 32-bit quantity using the DJB2
+ hash function. Optionally, it is possible to apply a full avalanche hash
+ function to the output if the optional <avalanche> argument equals 1. This
+ converter uses the same functions as used by the various hash-based load
+ balancing algorithms, so it will provide exactly the same results. It is
+ mostly intended for debugging, but can be used as a stick-table entry to
+ collect rough statistics. It must not be used for security purposes as a
+ 32-bit hash is trivial to break. See also "crc32", "sdbm", "wt6", "crc32c",
+ and the "hash-type" directive.
+
+even
+ Returns a boolean TRUE if the input value of type signed integer is even
+ otherwise returns FALSE. It is functionally equivalent to "not,and(1),bool".
+
+field(<index>,<delimiters>[,<count>])
+ Extracts the substring at the given index counting from the beginning
+ (positive index) or from the end (negative index) considering given delimiters
+ from an input string. Indexes start at 1 or -1 and delimiters are a string
+ formatted list of chars. Optionally you can specify <count> of fields to
+ extract (default: 1). Value of 0 indicates extraction of all remaining
+ fields.
+
+ Example :
+ str(f1_f2_f3__f5),field(4,_) # <empty>
+ str(f1_f2_f3__f5),field(5,_) # f5
+ str(f1_f2_f3__f5),field(2,_,0) # f2_f3__f5
+ str(f1_f2_f3__f5),field(2,_,2) # f2_f3
+ str(f1_f2_f3__f5),field(-2,_,3) # f2_f3_
+ str(f1_f2_f3__f5),field(-3,_,0) # f1_f2_f3
+
+fix_is_valid
+ Parses a binary payload and performs sanity checks regarding FIX (Financial
+ Information eXchange):
+
+ - checks that all tag IDs and values are not empty and the tags IDs are well
+ numeric
+ - checks the BeginString tag is the first tag with a valid FIX version
+ - checks the BodyLength tag is the second one with the right body length
+ - checks the MsgType tag is the third tag.
+ - checks that last tag in the message is the CheckSum tag with a valid
+ checksum
+
+ Due to current HAProxy design, only the first message sent by the client and
+ the server can be parsed.
+
+ This converter returns a boolean, true if the payload contains a valid FIX
+ message, false if not.
+
+ See also the fix_tag_value converter.
+
+ Example:
+ tcp-request inspect-delay 10s
+ tcp-request content reject unless { req.payload(0,0),fix_is_valid }
+
+fix_tag_value(<tag>)
+ Parses a FIX (Financial Information eXchange) message and extracts the value
+ from the tag <tag>. <tag> can be a string or an integer pointing to the
+ desired tag. Any integer value is accepted, but only the following strings
+ are translated into their integer equivalent: BeginString, BodyLength,
+ MsgType, SenderCompID, TargetCompID, CheckSum. More tag names can be easily
+ added.
+
+ Due to current HAProxy design, only the first message sent by the client and
+ the server can be parsed. No message validation is performed by this
+ converter. It is highly recommended to validate the message first using
+ fix_is_valid converter.
+
+ See also the fix_is_valid converter.
+
+ Example:
+ tcp-request inspect-delay 10s
+ tcp-request content reject unless { req.payload(0,0),fix_is_valid }
+ # MsgType tag ID is 35, so both lines below will return the same content
+ tcp-request content set-var(txn.foo) req.payload(0,0),fix_tag_value(35)
+ tcp-request content set-var(txn.bar) req.payload(0,0),fix_tag_value(MsgType)
+
+hex
+ Converts a binary input sample to a hex string containing two hex digits per
+ input byte. It is used to log or transfer hex dumps of some binary input data
+ in a way that can be reliably transferred (e.g. an SSL ID can be copied in a
+ header).
+
+hex2i
+ Converts a hex string containing two hex digits per input byte to an
+ integer. If the input value cannot be converted, then zero is returned.
+
+hmac(<algorithm>,<key>)
+ Converts a binary input sample to a message authentication code with the given
+ key. The result is a binary sample. The <algorithm> must be one of the
+ registered OpenSSL message digest names (e.g. sha256). The <key> parameter must
+ be base64 encoded and can either be a string or a variable.
+
+ Please note that this converter is only available when HAProxy has been
+ compiled with USE_OPENSSL.
+
+host_only
+ Converts a string which contains a Host header value and removes its port.
+ The input must respect the format of the host header value
+ (rfc9110#section-7.2). It will support that kind of input: hostname,
+ hostname:80, 127.0.0.1, 127.0.0.1:80, [::1], [::1]:80.
+
+ This converter also sets the string in lowercase.
+
+ See also: "port_only" converter which will return the port.
+
+htonl
+ Converts the input integer value to its 32-bit binary representation in the
+ network byte order. Because sample fetches own signed 64-bit integer, when
+ this converter is used, the input integer value is first casted to an
+ unsigned 32-bit integer.
+
+http_date([<offset],[<unit>])
+ Converts an integer supposed to contain a date since epoch to a string
+ representing this date in a format suitable for use in HTTP header fields. If
+ an offset value is specified, then it is added to the date before the
+ conversion is operated. This is particularly useful to emit Date header fields,
+ Expires values in responses when combined with a positive offset, or
+ Last-Modified values when the offset is negative.
+ If a unit value is specified, then consider the timestamp as either
+ "s" for seconds (default behavior), "ms" for milliseconds, or "us" for
+ microseconds since epoch. Offset is assumed to have the same unit as
+ input timestamp.
+
+iif(<true>,<false>)
+ Returns the <true> string if the input value is true. Returns the <false>
+ string otherwise.
+
+ Example:
+ http-request set-header x-forwarded-proto %[ssl_fc,iif(https,http)]
+
+in_table(<table>)
+ Uses the string representation of the input sample to perform a look up in
+ the specified table. If the key is not found in the table, a boolean false
+ is returned. Otherwise a boolean true is returned. This can be used to verify
+ the presence of a certain key in a table tracking some elements (e.g. whether
+ or not a source IP address or an Authorization header was already seen).
+
+ipmask(<mask4>,[<mask6>])
+ Apply a mask to an IP address, and use the result for lookups and storage.
+ This can be used to make all hosts within a certain mask to share the same
+ table entries and as such use the same server. The mask4 can be passed in
+ dotted form (e.g. 255.255.255.0) or in CIDR form (e.g. 24). The mask6 can
+ be passed in quadruplet form (e.g. ffff:ffff::) or in CIDR form (e.g. 64).
+ If no mask6 is given IPv6 addresses will fail to convert for backwards
+ compatibility reasons.
+
+json([<input-code>])
+ Escapes the input string and produces an ASCII output string ready to use as a
+ JSON string. The converter tries to decode the input string according to the
+ <input-code> parameter. It can be "ascii", "utf8", "utf8s", "utf8p" or
+ "utf8ps". The "ascii" decoder never fails. The "utf8" decoder detects 3 types
+ of errors:
+ - bad UTF-8 sequence (lone continuation byte, bad number of continuation
+ bytes, ...)
+ - invalid range (the decoded value is within a UTF-8 prohibited range),
+ - code overlong (the value is encoded with more bytes than necessary).
+
+ The UTF-8 JSON encoding can produce a "too long value" error when the UTF-8
+ character is greater than 0xffff because the JSON string escape specification
+ only authorizes 4 hex digits for the value encoding. The UTF-8 decoder exists
+ in 4 variants designated by a combination of two suffix letters : "p" for
+ "permissive" and "s" for "silently ignore". The behaviors of the decoders
+ are :
+ - "ascii" : never fails;
+ - "utf8" : fails on any detected errors;
+ - "utf8s" : never fails, but removes characters corresponding to errors;
+ - "utf8p" : accepts and fixes the overlong errors, but fails on any other
+ error;
+ - "utf8ps" : never fails, accepts and fixes the overlong errors, but removes
+ characters corresponding to the other errors.
+
+ This converter is particularly useful for building properly escaped JSON for
+ logging to servers which consume JSON-formatted traffic logs.
+
+ Example:
+ capture request header Host len 15
+ capture request header user-agent len 150
+ log-format '{"ip":"%[src]","user-agent":"%[capture.req.hdr(1),json(utf8s)]"}'
+
+ Input request from client 127.0.0.1:
+ GET / HTTP/1.0
+ User-Agent: Very "Ugly" UA 1/2
+
+ Output log:
+ {"ip":"127.0.0.1","user-agent":"Very \"Ugly\" UA 1\/2"}
+
+json_query(<json_path>,[<output_type>])
+ The json_query converter supports the JSON types string, boolean, number
+ and array. Floating point numbers will be returned as a string. By
+ specifying the output_type 'int' the value will be converted to an
+ Integer. Arrays will be returned as string, starting and ending with a
+ square brackets. The content is a CSV. Depending on the data type, the
+ array values might be quoted. If the array values are complex types,
+ the string contains the complete json representation of each value
+ separated by a comma. Example result for a roles query to a JWT:
+
+ ["manage-account","manage-account-links","view-profile"]
+
+ If conversion is not possible the json_query converter fails.
+
+ <json_path> must be a valid JSON Path string as defined in
+ https://datatracker.ietf.org/doc/draft-ietf-jsonpath-base/
+
+ Example:
+ # get a integer value from the request body
+ # "{"integer":4}" => 5
+ http-request set-var(txn.pay_int) req.body,json_query('$.integer','int'),add(1)
+
+ # get a key with '.' in the name
+ # {"my.key":"myvalue"} => myvalue
+ http-request set-var(txn.pay_mykey) req.body,json_query('$.my\\.key')
+
+ # {"boolean-false":false} => 0
+ http-request set-var(txn.pay_boolean_false) req.body,json_query('$.boolean-false')
+
+ # get the value of the key 'iss' from a JWT Bearer token
+ http-request set-var(txn.token_payload) req.hdr(Authorization),word(2,.),ub64dec,json_query('$.iss')
+
+jwt_header_query([<json_path>],[<output_type>])
+ When given a JSON Web Token (JWT) in input, either returns the decoded header
+ part of the token (the first base64-url encoded part of the JWT) if no
+ parameter is given, or performs a json_query on the decoded header part of
+ the token. See "json_query" converter for details about the accepted
+ json_path and output_type parameters.
+
+ Please note that this converter is only available when HAProxy has been
+ compiled with USE_OPENSSL.
+
+jwt_payload_query([<json_path>],[<output_type>])
+ When given a JSON Web Token (JWT) in input, either returns the decoded
+ payload part of the token (the second base64-url encoded part of the JWT) if
+ no parameter is given, or performs a json_query on the decoded payload part
+ of the token. See "json_query" converter for details about the accepted
+ json_path and output_type parameters.
+
+ Please note that this converter is only available when HAProxy has been
+ compiled with USE_OPENSSL.
+
+jwt_verify(<alg>,<key>)
+ Performs a signature verification for the JSON Web Token (JWT) given in input
+ by using the <alg> algorithm and the <key> parameter, which should either
+ hold a secret or a path to a public certificate. Returns 1 in case of
+ verification success, 0 in case of verification error and a strictly negative
+ value for any other error. Because of all those non-null error return values,
+ the result of this converter should never be converted to a boolean. See
+ below for a full list of the possible return values.
+
+ For now, only JWS tokens using the Compact Serialization format can be
+ processed (three dot-separated base64-url encoded strings). All the
+ algorithms mentioned in section 3.1 of RFC7518 are managed (HS, ES, RS and PS
+ with the 256, 384 or 512 key sizes, as well as the special "none" case).
+
+ If the used algorithm is of the HMAC family, <key> should be the secret used
+ in the HMAC signature calculation. Otherwise, <key> should be the path to the
+ public certificate that can be used to validate the token's signature. All
+ the certificates that might be used to verify JWTs must be known during init
+ in order to be added into a dedicated certificate cache so that no disk
+ access is required during runtime. For this reason, any used certificate must
+ be mentioned explicitly at least once in a jwt_verify call. Passing an
+ intermediate variable as second parameter is then not advised.
+
+ This converter only verifies the signature of the token and does not perform
+ a full JWT validation as specified in section 7.2 of RFC7519. We do not
+ ensure that the header and payload contents are fully valid JSON's once
+ decoded for instance, and no checks are performed regarding their respective
+ contents.
+
+ The possible return values are the following :
+
+ +----+----------------------------------------------------------------------+
+ | ID | message |
+ +----+----------------------------------------------------------------------+
+ | 0 | "Verification failure" |
+ | 1 | "Verification success" |
+ | -1 | "Unknown algorithm (not mentioned in RFC7518)" |
+ | -2 | "Unmanaged algorithm" |
+ | -3 | "Invalid token" |
+ | -4 | "Out of memory" |
+ | -5 | "Unknown certificate" |
+ +----+----------------------------------------------------------------------+
+
+ Please note that this converter is only available when HAProxy has been
+ compiled with USE_OPENSSL.
+
+ Example:
+ # Get a JWT from the authorization header, extract the "alg" field of its
+ # JOSE header and use a public certificate to verify a signature
+ http-request set-var(txn.bearer) http_auth_bearer
+ http-request set-var(txn.jwt_alg) var(txn.bearer),jwt_header_query('$.alg')
+ http-request deny unless { var(txn.jwt_alg) -m str "RS256" }
+ http-request deny unless { var(txn.bearer),jwt_verify(txn.jwt_alg,"/path/to/crt.pem") 1 }
+
+language(<value>[,<default>])
+ Returns the value with the highest q-factor from a list as extracted from the
+ "accept-language" header using "req.fhdr". Values with no q-factor have a
+ q-factor of 1. Values with a q-factor of 0 are dropped. Only values which
+ belong to the list of semi-colon delimited <values> will be considered. The
+ argument <value> syntax is "lang[;lang[;lang[;...]]]". If no value matches the
+ given list and a default value is provided, it is returned. Note that language
+ names may have a variant after a dash ('-'). If this variant is present in the
+ list, it will be matched, but if it is not, only the base language is checked.
+ The match is case-sensitive, and the output string is always one of those
+ provided in arguments. The ordering of arguments is meaningless, only the
+ ordering of the values in the request counts, as the first value among
+ multiple sharing the same q-factor is used.
+
+ Example :
+
+ # this configuration switches to the backend matching a
+ # given language based on the request :
+
+ acl es req.fhdr(accept-language),language(es;fr;en) -m str es
+ acl fr req.fhdr(accept-language),language(es;fr;en) -m str fr
+ acl en req.fhdr(accept-language),language(es;fr;en) -m str en
+ use_backend spanish if es
+ use_backend french if fr
+ use_backend english if en
+ default_backend choose_your_language
+
+length
+ Get the length of the string. This can only be placed after a string
+ sample fetch function or after a transformation keyword returning a string
+ type. The result is of type integer.
+
+lower
+ Convert a string sample to lower case. This can only be placed after a string
+ sample fetch function or after a transformation keyword returning a string
+ type. The result is of type string.
+
+ltime(<format>[,<offset>])
+ Converts an integer supposed to contain a date since epoch to a string
+ representing this date in local time using a format defined by the <format>
+ string using strftime(3). The purpose is to allow any date format to be used
+ in logs. An optional <offset> in seconds may be applied to the input date
+ (positive or negative). See the strftime() man page for the format supported
+ by your operating system. See also the utime converter.
+
+ Example :
+
+ # Emit two colons, one with the local time and another with ip:port
+ # e.g. 20140710162350 127.0.0.1:57325
+ log-format %[date,ltime(%Y%m%d%H%M%S)]\ %ci:%cp
+
+ltrim(<chars>)
+ Skips any characters from <chars> from the beginning of the string
+ representation of the input sample.
+
+map(<map_file>[,<default_value>])
+map_<match_type>(<map_file>[,<default_value>])
+map_<match_type>_<output_type>(<map_file>[,<default_value>])
+ Search the input value from <map_file> using the <match_type> matching method,
+ and return the associated value converted to the type <output_type>. If the
+ input value cannot be found in the <map_file>, the converter returns the
+ <default_value>. If the <default_value> is not set, the converter fails and
+ acts as if no input value could be fetched. If the <match_type> is not set, it
+ defaults to "str". Likewise, if the <output_type> is not set, it defaults to
+ "str". For convenience, the "map" keyword is an alias for "map_str" and maps a
+ string to another string.
+
+ It is important to avoid overlapping between the keys : IP addresses and
+ strings are stored in trees, so the first of the finest match will be used.
+ Other keys are stored in lists, so the first matching occurrence will be used.
+
+ The following array contains the list of all map functions available sorted by
+ input type, match type and output type.
+
+ input type | match method | output type str | output type int | output type ip
+ -----------+--------------+-----------------+-----------------+---------------
+ str | str | map_str | map_str_int | map_str_ip
+ -----------+--------------+-----------------+-----------------+---------------
+ str | beg | map_beg | map_beg_int | map_end_ip
+ -----------+--------------+-----------------+-----------------+---------------
+ str | sub | map_sub | map_sub_int | map_sub_ip
+ -----------+--------------+-----------------+-----------------+---------------
+ str | dir | map_dir | map_dir_int | map_dir_ip
+ -----------+--------------+-----------------+-----------------+---------------
+ str | dom | map_dom | map_dom_int | map_dom_ip
+ -----------+--------------+-----------------+-----------------+---------------
+ str | end | map_end | map_end_int | map_end_ip
+ -----------+--------------+-----------------+-----------------+---------------
+ str | reg | map_reg | map_reg_int | map_reg_ip
+ -----------+--------------+-----------------+-----------------+---------------
+ str | reg | map_regm | map_reg_int | map_reg_ip
+ -----------+--------------+-----------------+-----------------+---------------
+ int | int | map_int | map_int_int | map_int_ip
+ -----------+--------------+-----------------+-----------------+---------------
+ ip | ip | map_ip | map_ip_int | map_ip_ip
+ -----------+--------------+-----------------+-----------------+---------------
+
+ The special map called "map_regm" expect matching zone in the regular
+ expression and modify the output replacing back reference (like "\1") by
+ the corresponding match text.
+
+ The file contains one key + value per line. Lines which start with '#' are
+ ignored, just like empty lines. Leading tabs and spaces are stripped. The key
+ is then the first "word" (series of non-space/tabs characters), and the value
+ is what follows this series of space/tab till the end of the line excluding
+ trailing spaces/tabs.
+
+ Example :
+
+ # this is a comment and is ignored
+ 2.22.246.0/23 United Kingdom \n
+ <-><-----------><--><------------><---->
+ | | | | `- trailing spaces ignored
+ | | | `---------- value
+ | | `-------------------- middle spaces ignored
+ | `---------------------------- key
+ `------------------------------------ leading spaces ignored
+
+mod(<value>)
+ Divides the input value of type signed integer by <value>, and returns the
+ remainder as an signed integer. If <value> is null, then zero is returned.
+ <value> can be a numeric value or a variable name. The name of the variable
+ starts with an indication about its scope. The scopes allowed are:
+ "proc" : the variable is shared with the whole process
+ "sess" : the variable is shared with the whole session
+ "txn" : the variable is shared with the transaction (request and response)
+ "req" : the variable is shared only during request processing
+ "res" : the variable is shared only during response processing
+ This prefix is followed by a name. The separator is a '.'. The name may only
+ contain characters 'a-z', 'A-Z', '0-9', '.' and '_'.
+
+mqtt_field_value(<packettype>,<fieldname_or_property_ID>)
+ Returns value of <fieldname> found in input MQTT payload of type
+ <packettype>.
+ <packettype> can be either a string (case insensitive matching) or a numeric
+ value corresponding to the type of packet we're supposed to extract data
+ from.
+ Supported string and integers can be found here:
+ https://docs.oasis-open.org/mqtt/mqtt/v3.1.1/os/mqtt-v3.1.1-os.html#_Toc398718021
+ https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901022
+
+ <fieldname> depends on <packettype> and can be any of the following below.
+ (note that <fieldname> matching is case insensitive).
+ <property id> can only be found in MQTT v5.0 streams. check this table:
+ https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901029
+
+ - CONNECT (or 1): flags, protocol_name, protocol_version, client_identifier,
+ will_topic, will_payload, username, password, keepalive
+ OR any property ID as a numeric value (for MQTT v5.0
+ packets only):
+ 17: Session Expiry Interval
+ 33: Receive Maximum
+ 39: Maximum Packet Size
+ 34: Topic Alias Maximum
+ 25: Request Response Information
+ 23: Request Problem Information
+ 21: Authentication Method
+ 22: Authentication Data
+ 18: Will Delay Interval
+ 1: Payload Format Indicator
+ 2: Message Expiry Interval
+ 3: Content Type
+ 8: Response Topic
+ 9: Correlation Data
+ Not supported yet:
+ 38: User Property
+
+ - CONNACK (or 2): flags, protocol_version, reason_code
+ OR any property ID as a numeric value (for MQTT v5.0
+ packets only):
+ 17: Session Expiry Interval
+ 33: Receive Maximum
+ 36: Maximum QoS
+ 37: Retain Available
+ 39: Maximum Packet Size
+ 18: Assigned Client Identifier
+ 34: Topic Alias Maximum
+ 31: Reason String
+ 40; Wildcard Subscription Available
+ 41: Subscription Identifiers Available
+ 42: Shared Subscription Available
+ 19: Server Keep Alive
+ 26: Response Information
+ 28: Server Reference
+ 21: Authentication Method
+ 22: Authentication Data
+ Not supported yet:
+ 38: User Property
+
+ Due to current HAProxy design, only the first message sent by the client and
+ the server can be parsed. Thus this converter can extract data only from
+ CONNECT and CONNACK packet types. CONNECT is the first message sent by the
+ client and CONNACK is the first response sent by the server.
+
+ Example:
+
+ acl data_in_buffer req.len ge 4
+ tcp-request content set-var(txn.username) \
+ req.payload(0,0),mqtt_field_value(connect,protocol_name) \
+ if data_in_buffer
+ # do the same as above
+ tcp-request content set-var(txn.username) \
+ req.payload(0,0),mqtt_field_value(1,protocol_name) \
+ if data_in_buffer
+
+mqtt_is_valid
+ Checks that the binary input is a valid MQTT packet. It returns a boolean.
+
+ Due to current HAProxy design, only the first message sent by the client and
+ the server can be parsed. Thus this converter can extract data only from
+ CONNECT and CONNACK packet types. CONNECT is the first message sent by the
+ client and CONNACK is the first response sent by the server.
+
+ Only MQTT 3.1, 3.1.1 and 5.0 are supported.
+
+ Example:
+
+ acl data_in_buffer req.len ge 4
+ tcp-request content reject unless { req.payload(0,0),mqtt_is_valid }
+
+ms_ltime(<format>[,<offset>])
+ This works like "ltime" but takes an input in milliseconds. It also supports
+ the %N conversion specifier inspired by date(1).
+ Converts an integer supposed to contain a date since epoch to a string
+ representing this date in local time using a format defined by the <format>
+ string using strftime(3). The purpose is to allow any date format to be used
+ in logs. An optional <offset> in milliseconds may be applied to the input date
+ (positive or negative). See the strftime() man page for the format supported
+ by your operating system.
+
+ The %N conversion specifier allows you to output the nanoseconds part of the
+ date, precision is limited since the input is milliseconds.
+ (000000000..999000000). %N can take a width argument between % and N. It is
+ useful to display milliseconds (%3N) or microseconds (%6N). The default and
+ maximum width is 9 (%N = %9N).
+
+ See also the utime converter for UTC as well as "ltime" and "us_ltime"
+ converters.
+
+ Example :
+
+ # Emit 3 colons, the local time, the timezone and another with ip:port
+ # e.g. 2023/07/24/11:53:02.196 +0200 127.0.0.1:41530
+ log-format %[accept_date(ms),ms_ltime("%Y/%m/%d/%H:%M:%S.%3N %z")]\ %ci:%cp
+
+ms_utime(<format>[,<offset>])
+ This works like "utime" but takes an input in milliseconds. It also supports
+ the %N conversion specifier inspired by date(1).
+ Converts an integer supposed to contain a date since epoch to a string
+ representing this date in UTC time using a format defined by the <format>
+ string using strftime(3). The purpose is to allow any date format to be used
+ in logs. An optional <offset> in milliseconds may be applied to the input date
+ (positive or negative). See the strftime() man page for the format supported
+ by your operating system.
+
+ The %N conversion specifier allows you to output the nanoseconds part of the
+ date, precision is limited since the input is milliseconds.
+ (000000000..999000000). %N can take a width argument between % and N. It is
+ useful to display milliseconds (%3N) or microseconds (%6N). The default and
+ maximum width is 9 (%N = %9N).
+
+ See also the ltime converter for local as well as "utime" and "us_utime"
+ converters.
+
+ Example :
+
+ # Emit 3 colons, the UTC time, the timezone and another with ip:port
+ # e.g. 2023/07/24/09:53:02.196 +0000 127.0.0.1:41530
+ log-format %[accept_date(ms),ms_utime("%Y/%m/%d/%H:%M:%S.%3N %z")]\ %ci:%cp
+
+mul(<value>)
+ Multiplies the input value of type signed integer by <value>, and returns
+ the product as an signed integer. In case of overflow, the largest possible
+ value for the sign is returned so that the operation doesn't wrap around.
+ <value> can be a numeric value or a variable name. The name of the variable
+ starts with an indication about its scope. The scopes allowed are:
+ "proc" : the variable is shared with the whole process
+ "sess" : the variable is shared with the whole session
+ "txn" : the variable is shared with the transaction (request and response)
+ "req" : the variable is shared only during request processing
+ "res" : the variable is shared only during response processing
+ This prefix is followed by a name. The separator is a '.'. The name may only
+ contain characters 'a-z', 'A-Z', '0-9', '.' and '_'.
+
+nbsrv
+ Takes an input value of type string, interprets it as a backend name and
+ returns the number of usable servers in that backend. Can be used in places
+ where we want to look up a backend from a dynamic name, like a result of a
+ map lookup.
+
+neg
+ Takes the input value of type signed integer, computes the opposite value,
+ and returns the remainder as an signed integer. 0 is identity. This operator
+ is provided for reversed subtracts : in order to subtract the input from a
+ constant, simply perform a "neg,add(value)".
+
+not
+ Returns a boolean FALSE if the input value of type signed integer is
+ non-null, otherwise returns TRUE. Used in conjunction with and(), it can be
+ used to report true/false for bit testing on input values (e.g. verify the
+ absence of a flag).
+
+odd
+ Returns a boolean TRUE if the input value of type signed integer is odd
+ otherwise returns FALSE. It is functionally equivalent to "and(1),bool".
+
+or(<value>)
+ Performs a bitwise "OR" between <value> and the input value of type signed
+ integer, and returns the result as an signed integer. <value> can be a
+ numeric value or a variable name. The name of the variable starts with an
+ indication about its scope. The scopes allowed are:
+ "proc" : the variable is shared with the whole process
+ "sess" : the variable is shared with the whole session
+ "txn" : the variable is shared with the transaction (request and response)
+ "req" : the variable is shared only during request processing
+ "res" : the variable is shared only during response processing
+ This prefix is followed by a name. The separator is a '.'. The name may only
+ contain characters 'a-z', 'A-Z', '0-9', '.' and '_'.
+
+param(<name>,[<delim>])
+ This extracts the first occurrence of the parameter <name> in the input string
+ where parameters are delimited by <delim>, which defaults to "&", and the name
+ and value of the parameter are separated by a "=". If there is no "=" and
+ value before the end of the parameter segment, it is treated as equivalent to
+ a value of an empty string.
+
+ This can be useful for extracting parameters from a query string, or possibly
+ a x-www-form-urlencoded body. In particular, `query,param(<name>)` can be used
+ as an alternative to `urlp(<name>)` which only uses "&" as a delimiter,
+ whereas "urlp" also uses "?" and ";".
+
+ Note that this converter doesn't do anything special with url encoded
+ characters. If you want to decode the value, you can use the url_dec converter
+ on the output. If the name of the parameter in the input might contain encoded
+ characters, you'll probably want do normalize the input before calling
+ "param". This can be done using "http-request normalize-uri", in particular
+ the percent-decode-unreserved and percent-to-uppercase options.
+
+ Example :
+ str(a=b&c=d&a=r),param(a) # b
+ str(a&b=c),param(a) # ""
+ str(a=&b&c=a),param(b) # ""
+ str(a=1;b=2;c=4),param(b,;) # 2
+ query,param(redirect_uri),urldec()
+
+port_only
+ Converts a string which contains a Host header value into an integer by
+ returning its port.
+ The input must respect the format of the host header value
+ (rfc9110#section-7.2). It will support that kind of input: hostname,
+ hostname:80, 127.0.0.1, 127.0.0.1:80, [::1], [::1]:80.
+
+ If no port were provided in the input, it will return 0.
+
+ See also: "host_only" converter which will return the host.
+
+protobuf(<field_number>,[<field_type>])
+ This extracts the protocol buffers message field in raw mode of an input binary
+ sample representation of a protocol buffer message with <field_number> as field
+ number (dotted notation) if <field_type> is not present, or as an integer sample
+ if this field is present (see also "ungrpc" below).
+ The list of the authorized types is the following one: "int32", "int64", "uint32",
+ "uint64", "sint32", "sint64", "bool", "enum" for the "varint" wire type 0
+ "fixed64", "sfixed64", "double" for the 64bit wire type 1, "fixed32", "sfixed32",
+ "float" for the wire type 5. Note that "string" is considered as a length-delimited
+ type, so it does not require any <field_type> argument to be extracted.
+ More information may be found here about the protocol buffers message field types:
+ https://developers.google.com/protocol-buffers/docs/encoding
+
+regsub(<regex>,<subst>[,<flags>])
+ Applies a regex-based substitution to the input string. It does the same
+ operation as the well-known "sed" utility with "s/<regex>/<subst>/". By
+ default it will replace in the input string the first occurrence of the
+ largest part matching the regular expression <regex> with the substitution
+ string <subst>. It is possible to replace all occurrences instead by adding
+ the flag "g" in the third argument <flags>. It is also possible to make the
+ regex case insensitive by adding the flag "i" in <flags>. Since <flags> is a
+ string, it is made up from the concatenation of all desired flags. Thus if
+ both "i" and "g" are desired, using "gi" or "ig" will have the same effect.
+ The first use of this converter is to replace certain characters or sequence
+ of characters with other ones.
+
+ It is highly recommended to enclose the regex part using protected quotes to
+ improve clarity and never have a closing parenthesis from the regex mixed up
+ with the parenthesis from the function. Just like in Bourne shell, the first
+ level of quotes is processed when delimiting word groups on the line, a
+ second level is usable for argument. It is recommended to use single quotes
+ outside since these ones do not try to resolve backslashes nor dollar signs.
+
+ Examples:
+
+ # de-duplicate "/" in header "x-path".
+ # input: x-path: /////a///b/c/xzxyz/
+ # output: x-path: /a/b/c/xzxyz/
+ http-request set-header x-path "%[hdr(x-path),regsub('/+','/','g')]"
+
+ # copy query string to x-query and drop all leading '?', ';' and '&'
+ http-request set-header x-query "%[query,regsub([?;&]*,'')]"
+
+ # capture groups and backreferences
+ # both lines do the same.
+ http-request redirect location %[url,'regsub("(foo|bar)([0-9]+)?","\2\1",i)']
+ http-request redirect location %[url,regsub(\"(foo|bar)([0-9]+)?\",\"\2\1\",i)]
+
+rfc7239_field(<field>)
+ Extracts a single field/parameter from RFC 7239 compliant header value input.
+
+ Supported fields are:
+ - proto: either 'http' or 'https'
+ - host: http compliant host
+ - for: RFC7239 node
+ - by: RFC7239 node
+
+ More info here:
+ https://www.rfc-editor.org/rfc/rfc7239.html#section-6
+
+ Example:
+ # extract host field from forwarded header and store it in req.fhost var
+ http-request set-var(req.fhost) req.hdr(forwarded),rfc7239_field(host)
+ #input: "proto=https;host=\"haproxy.org:80\""
+ # output: "haproxy.org:80"
+
+ # extract for field from forwarded header and store it in req.ffor var
+ http-request set-var(req.ffor) req.hdr(forwarded),rfc7239_field(for)
+ #input: "proto=https;host=\"haproxy.org:80\";for=\"127.0.0.1:9999\""
+ # output: "127.0.0.1:9999"
+
+rfc7239_is_valid
+ Returns true if input header is RFC 7239 compliant header value and false
+ otherwise.
+
+ Example:
+ acl valid req.hdr(forwarded),rfc7239_is_valid
+ #input: "for=127.0.0.1;proto=http"
+ # output: TRUE
+ #input: "proto=custom"
+ # output: FALSE
+
+rfc7239_n2nn
+ Converts RFC7239 node (provided by 'for' or 'by' 7239 header fields)
+ into its corresponding nodename final form:
+ - ipv4 address
+ - ipv6 address
+ - 'unknown'
+ - '_obfs' identifier
+
+ Example:
+ # extract 'for' field from forwarded header, extract nodename from
+ # resulting node identifier and store the result in req.fnn
+ http-request set-var(req.fnn) req.hdr(forwarded),rfc7239_field(for),rfc7239_n2nn
+ #input: "127.0.0.1:9999"
+ # output: 127.0.0.1 (ipv4)
+ #input: "[ab:cd:ff:ff:ff:ff:ff:ff]:9998"
+ # output: ab:cd:ff:ff:ff:ff:ff:ff (ipv6)
+ #input: "_name:_port"
+ # output: "_name" (string)
+
+rfc7239_n2np
+ Converts RFC7239 node (provided by 'for' or 'by' 7239 header fields)
+ into its corresponding nodeport final form:
+ - unsigned integer
+ - '_obfs' identifier
+
+ Example:
+ # extract 'by' field from forwarded header, extract node port from
+ # resulting node identifier and store the result in req.fnp
+ http-request set-var(req.fnp) req.hdr(forwarded),rfc7239_field(by),rfc7239_n2np
+ #input: "127.0.0.1:9999"
+ # output: 9999 (integer)
+ #input: "[ab:cd:ff:ff:ff:ff:ff:ff]:9998"
+ # output: 9998 (integer)
+ #input: "_name:_port"
+ # output: "_port" (string)
+
+rtrim(<chars>)
+ Skips any characters from <chars> from the end of the string representation
+ of the input sample.
+
+sdbm([<avalanche>])
+ Hashes a binary input sample into an unsigned 32-bit quantity using the SDBM
+ hash function. Optionally, it is possible to apply a full avalanche hash
+ function to the output if the optional <avalanche> argument equals 1. This
+ converter uses the same functions as used by the various hash-based load
+ balancing algorithms, so it will provide exactly the same results. It is
+ mostly intended for debugging, but can be used as a stick-table entry to
+ collect rough statistics. It must not be used for security purposes as a
+ 32-bit hash is trivial to break. See also "crc32", "djb2", "wt6", "crc32c",
+ and the "hash-type" directive.
+
+secure_memcmp(<var>)
+ Compares the contents of <var> with the input value. Both values are treated
+ as a binary string. Returns a boolean indicating whether both binary strings
+ match.
+
+ If both binary strings have the same length then the comparison will be
+ performed in constant time.
+
+ Please note that this converter is only available when HAProxy has been
+ compiled with USE_OPENSSL.
+
+ Example :
+
+ http-request set-var(txn.token) hdr(token)
+ # Check whether the token sent by the client matches the secret token
+ # value, without leaking the contents using a timing attack.
+ acl token_given str(my_secret_token),secure_memcmp(txn.token)
+
+set-var(<var>[,<cond>...])
+ Sets a variable with the input content and returns the content on the output
+ as-is if all of the specified conditions are true (see below for a list of
+ possible conditions). The variable keeps the value and the associated input
+ type. The name of the variable starts with an indication about its scope. The
+ scopes allowed are:
+ "proc" : the variable is shared with the whole process
+ "sess" : the variable is shared with the whole session
+ "txn" : the variable is shared with the transaction (request and
+ response),
+ "req" : the variable is shared only during request processing,
+ "res" : the variable is shared only during response processing.
+ This prefix is followed by a name. The separator is a '.'. The name may only
+ contain characters 'a-z', 'A-Z', '0-9', '.' and '_'.
+
+ You can pass at most four conditions to the converter among the following
+ possible conditions :
+ - "ifexists"/"ifnotexists":
+ Checks if the variable already existed before the current set-var call.
+ A variable is usually created through a successful set-var call.
+ Note that variables of scope "proc" are created during configuration
+ parsing so the "ifexists" condition will always be true for them.
+ - "ifempty"/"ifnotempty":
+ Checks if the input is empty or not.
+ Scalar types are never empty so the ifempty condition will be false for
+ them regardless of the input's contents (integers, booleans, IPs ...).
+ - "ifset"/"ifnotset":
+ Checks if the variable was previously set or not, or if unset-var was
+ called on the variable.
+ A variable that does not exist yet is considered as not set. A "proc"
+ variable can exist while not being set since they are created during
+ configuration parsing.
+ - "ifgt"/"iflt":
+ Checks if the content of the variable is "greater than" or "less than"
+ the input. This check can only be performed if both the input and
+ the variable are of type integer. Otherwise, the check is considered as
+ true by default.
+
+sha1
+ Converts a binary input sample to a SHA-1 digest. The result is a binary
+ sample with length of 20 bytes.
+
+sha2([<bits>])
+ Converts a binary input sample to a digest in the SHA-2 family. The result
+ is a binary sample with length of <bits>/8 bytes.
+
+ Valid values for <bits> are 224, 256, 384, 512, each corresponding to
+ SHA-<bits>. The default value is 256.
+
+ Please note that this converter is only available when HAProxy has been
+ compiled with USE_OPENSSL.
+
+srv_queue
+ Takes an input value of type string, either a server name or <backend>/<server>
+ format and returns the number of queued streams on that server. Can be used
+ in places where we want to look up queued streams from a dynamic name, like a
+ cookie value (e.g. req.cook(SRVID),srv_queue) and then make a decision to break
+ persistence or direct a request elsewhere.
+
+strcmp(<var>)
+ Compares the contents of <var> with the input value of type string. Returns
+ the result as a signed integer compatible with strcmp(3): 0 if both strings
+ are identical. A value less than 0 if the left string is lexicographically
+ smaller than the right string or if the left string is shorter. A value greater
+ than 0 otherwise (right string greater than left string or the right string is
+ shorter).
+
+ See also the secure_memcmp converter if you need to compare two binary
+ strings in constant time.
+
+ Example :
+
+ http-request set-var(txn.host) hdr(host)
+ # Check whether the client is attempting domain fronting.
+ acl ssl_sni_http_host_match ssl_fc_sni,strcmp(txn.host) eq 0
+
+
+sub(<value>)
+ Subtracts <value> from the input value of type signed integer, and returns
+ the result as an signed integer. Note: in order to subtract the input from
+ a constant, simply perform a "neg,add(value)". <value> can be a numeric value
+ or a variable name. The name of the variable starts with an indication about
+ its scope. The scopes allowed are:
+ "proc" : the variable is shared with the whole process
+ "sess" : the variable is shared with the whole session
+ "txn" : the variable is shared with the transaction (request and
+ response),
+ "req" : the variable is shared only during request processing,
+ "res" : the variable is shared only during response processing.
+ This prefix is followed by a name. The separator is a '.'. The name may only
+ contain characters 'a-z', 'A-Z', '0-9', '.' and '_'.
+
+table_bytes_in_rate(<table>)
+ Uses the string representation of the input sample to perform a look up in
+ the specified table. If the key is not found in the table, integer value zero
+ is returned. Otherwise the converter returns the average client-to-server
+ bytes rate associated with the input sample in the designated table, measured
+ in amount of bytes over the period configured in the table. See also the
+ sc_bytes_in_rate sample fetch keyword.
+
+
+table_bytes_out_rate(<table>)
+ Uses the string representation of the input sample to perform a look up in
+ the specified table. If the key is not found in the table, integer value zero
+ is returned. Otherwise the converter returns the average server-to-client
+ bytes rate associated with the input sample in the designated table, measured
+ in amount of bytes over the period configured in the table. See also the
+ sc_bytes_out_rate sample fetch keyword.
+
+table_conn_cnt(<table>)
+ Uses the string representation of the input sample to perform a look up in
+ the specified table. If the key is not found in the table, integer value zero
+ is returned. Otherwise the converter returns the cumulative number of incoming
+ connections associated with the input sample in the designated table. See
+ also the sc_conn_cnt sample fetch keyword.
+
+table_conn_cur(<table>)
+ Uses the string representation of the input sample to perform a look up in
+ the specified table. If the key is not found in the table, integer value zero
+ is returned. Otherwise the converter returns the current amount of concurrent
+ tracked connections associated with the input sample in the designated table.
+ See also the sc_conn_cur sample fetch keyword.
+
+table_conn_rate(<table>)
+ Uses the string representation of the input sample to perform a look up in
+ the specified table. If the key is not found in the table, integer value zero
+ is returned. Otherwise the converter returns the average incoming connection
+ rate associated with the input sample in the designated table. See also the
+ sc_conn_rate sample fetch keyword.
+
+table_expire(<table>[,<default_value>])
+ Uses the input sample to perform a look up in the specified table. If the key
+ is not found in the table, the converter fails except if <default_value> is
+ set: this makes the converter succeed and return <default_value>. If the key
+ is found the converter returns the key expiration delay associated with the
+ input sample in the designated table.
+ See also the table_idle sample fetch keyword.
+
+table_gpc(<idx>,<table>)
+ Uses the string representation of the input sample to perform a lookup in
+ the specified table. If the key is not found in the table, integer value zero
+ is returned. Otherwise the converter returns the current value of the
+ General Purpose Counter at the index <idx> of the array associated
+ to the input sample in the designated <table>. <idx> is an integer
+ between 0 and 99.
+ If there is no GPC stored at this index, it also returns the boolean value 0.
+ This applies only to the 'gpc' array data_type (and not to the legacy
+ 'gpc0' nor 'gpc1' data_types).
+ See also the sc_get_gpc sample fetch keyword.
+
+table_gpc0(<table>)
+ Uses the string representation of the input sample to perform a look up in
+ the specified table. If the key is not found in the table, integer value zero
+ is returned. Otherwise the converter returns the current value of the first
+ general purpose counter associated with the input sample in the designated
+ table. See also the sc_get_gpc0 sample fetch keyword.
+
+table_gpc0_rate(<table>)
+ Uses the string representation of the input sample to perform a look up in
+ the specified table. If the key is not found in the table, integer value zero
+ is returned. Otherwise the converter returns the frequency which the gpc0
+ counter was incremented over the configured period in the table, associated
+ with the input sample in the designated table. See also the sc_get_gpc0_rate
+ sample fetch keyword.
+
+table_gpc1(<table>)
+ Uses the string representation of the input sample to perform a look up in
+ the specified table. If the key is not found in the table, integer value zero
+ is returned. Otherwise the converter returns the current value of the second
+ general purpose counter associated with the input sample in the designated
+ table. See also the sc_get_gpc1 sample fetch keyword.
+
+table_gpc1_rate(<table>)
+ Uses the string representation of the input sample to perform a look up in
+ the specified table. If the key is not found in the table, integer value zero
+ is returned. Otherwise the converter returns the frequency which the gpc1
+ counter was incremented over the configured period in the table, associated
+ with the input sample in the designated table. See also the sc_get_gpc1_rate
+ sample fetch keyword.
+
+table_gpc_rate(<idx>,<table>)
+ Uses the string representation of the input sample to perform a lookup in
+ the specified table. If the key is not found in the table, integer value zero
+ is returned. Otherwise the converter returns the frequency which the Global
+ Purpose Counter at index <idx> of the array (associated to the input sample
+ in the designated stick-table <table>) was incremented over the
+ configured period. <idx> is an integer between 0 and 99.
+ If there is no gpc_rate stored at this index, it also returns the boolean
+ value 0.
+ This applies only to the 'gpc_rate' array data_type (and not to the
+ legacy 'gpc0_rate' nor 'gpc1_rate' data_types).
+ See also the sc_gpc_rate sample fetch keyword.
+
+table_gpt(<idx>,<table>)
+ Uses the string representation of the input sample to perform a lookup in
+ the specified table. If the key is not found in the table, boolean value zero
+ is returned. Otherwise the converter returns the current value of the general
+ purpose tag at the index <idx> of the array associated to the input sample
+ in the designated <table>. <idx> is an integer between 0 and 99.
+ If there is no GPT stored at this index, it also returns the boolean value 0.
+ This applies only to the 'gpt' array data_type (and not on the legacy 'gpt0'
+ data-type).
+ See also the sc_get_gpt sample fetch keyword.
+
+table_gpt0(<table>)
+ Uses the string representation of the input sample to perform a look up in
+ the specified table. If the key is not found in the table, boolean value zero
+ is returned. Otherwise the converter returns the current value of the first
+ general purpose tag associated with the input sample in the designated table.
+ See also the sc_get_gpt0 sample fetch keyword.
+
+table_http_err_cnt(<table>)
+ Uses the string representation of the input sample to perform a look up in
+ the specified table. If the key is not found in the table, integer value zero
+ is returned. Otherwise the converter returns the cumulative number of HTTP
+ errors associated with the input sample in the designated table. See also the
+ sc_http_err_cnt sample fetch keyword.
+
+table_http_err_rate(<table>)
+ Uses the string representation of the input sample to perform a look up in
+ the specified table. If the key is not found in the table, integer value zero
+ is returned. Otherwise the average rate of HTTP errors associated with the
+ input sample in the designated table, measured in amount of errors over the
+ period configured in the table. See also the sc_http_err_rate sample fetch
+ keyword.
+
+table_http_fail_cnt(<table>)
+ Uses the string representation of the input sample to perform a look up in
+ the specified table. If the key is not found in the table, integer value zero
+ is returned. Otherwise the converter returns the cumulative number of HTTP
+ failures associated with the input sample in the designated table. See also
+ the sc_http_fail_cnt sample fetch keyword.
+
+table_http_fail_rate(<table>)
+ Uses the string representation of the input sample to perform a look up in
+ the specified table. If the key is not found in the table, integer value zero
+ is returned. Otherwise the average rate of HTTP failures associated with the
+ input sample in the designated table, measured in amount of failures over the
+ period configured in the table. See also the sc_http_fail_rate sample fetch
+ keyword.
+
+table_http_req_cnt(<table>)
+ Uses the string representation of the input sample to perform a look up in
+ the specified table. If the key is not found in the table, integer value zero
+ is returned. Otherwise the converter returns the cumulative number of HTTP
+ requests associated with the input sample in the designated table. See also
+ the sc_http_req_cnt sample fetch keyword.
+
+table_http_req_rate(<table>)
+ Uses the string representation of the input sample to perform a look up in
+ the specified table. If the key is not found in the table, integer value zero
+ is returned. Otherwise the average rate of HTTP requests associated with the
+ input sample in the designated table, measured in amount of requests over the
+ period configured in the table. See also the sc_http_req_rate sample fetch
+ keyword.
+
+table_idle(<table>[,<default_value>])
+ Uses the input sample to perform a look up in the specified table. If the key
+ is not found in the table, the converter fails except if <default_value> is
+ set: this makes the converter succeed and return <default_value>. If the key
+ is found the converter returns the time the key entry associated with the
+ input sample in the designated table remained idle since the last time it was
+ updated.
+ See also the table_expire sample fetch keyword.
+
+table_kbytes_in(<table>)
+ Uses the string representation of the input sample to perform a look up in
+ the specified table. If the key is not found in the table, integer value zero
+ is returned. Otherwise the converter returns the cumulative number of client-
+ to-server data associated with the input sample in the designated table,
+ measured in kilobytes. The test is currently performed on 32-bit integers,
+ which limits values to 4 terabytes. See also the sc_kbytes_in sample fetch
+ keyword.
+
+table_kbytes_out(<table>)
+ Uses the string representation of the input sample to perform a look up in
+ the specified table. If the key is not found in the table, integer value zero
+ is returned. Otherwise the converter returns the cumulative number of server-
+ to-client data associated with the input sample in the designated table,
+ measured in kilobytes. The test is currently performed on 32-bit integers,
+ which limits values to 4 terabytes. See also the sc_kbytes_out sample fetch
+ keyword.
+
+table_server_id(<table>)
+ Uses the string representation of the input sample to perform a look up in
+ the specified table. If the key is not found in the table, integer value zero
+ is returned. Otherwise the converter returns the server ID associated with
+ the input sample in the designated table. A server ID is associated to a
+ sample by a "stick" rule when a connection to a server succeeds. A server ID
+ zero means that no server is associated with this key.
+
+table_sess_cnt(<table>)
+ Uses the string representation of the input sample to perform a look up in
+ the specified table. If the key is not found in the table, integer value zero
+ is returned. Otherwise the converter returns the cumulative number of incoming
+ sessions associated with the input sample in the designated table. Note that
+ a session here refers to an incoming connection being accepted by the
+ "tcp-request connection" rulesets. See also the sc_sess_cnt sample fetch
+ keyword.
+
+table_sess_rate(<table>)
+ Uses the string representation of the input sample to perform a look up in
+ the specified table. If the key is not found in the table, integer value zero
+ is returned. Otherwise the converter returns the average incoming session
+ rate associated with the input sample in the designated table. Note that a
+ session here refers to an incoming connection being accepted by the
+ "tcp-request connection" rulesets. See also the sc_sess_rate sample fetch
+ keyword.
+
+table_trackers(<table>)
+ Uses the string representation of the input sample to perform a look up in
+ the specified table. If the key is not found in the table, integer value zero
+ is returned. Otherwise the converter returns the current amount of concurrent
+ connections tracking the same key as the input sample in the designated
+ table. It differs from table_conn_cur in that it does not rely on any stored
+ information but on the table's reference count (the "use" value which is
+ returned by "show table" on the CLI). This may sometimes be more suited for
+ layer7 tracking. It can be used to tell a server how many concurrent
+ connections there are from a given address for example. See also the
+ sc_trackers sample fetch keyword.
+
+ub64dec
+ This converter is the base64url variant of b64dec converter. base64url
+ encoding is the "URL and Filename Safe Alphabet" variant of base64 encoding.
+ It is also the encoding used in JWT (JSON Web Token) standard.
+
+ Example:
+ # Decoding a JWT payload:
+ http-request set-var(txn.token_payload) req.hdr(Authorization),word(2,.),ub64dec
+
+ub64enc
+ This converter is the base64url variant of base64 converter.
+
+ungrpc(<field_number>,[<field_type>])
+ This extracts the protocol buffers message field in raw mode of an input binary
+ sample representation of a gRPC message with <field_number> as field number
+ (dotted notation) if <field_type> is not present, or as an integer sample if this
+ field is present.
+ The list of the authorized types is the following one: "int32", "int64", "uint32",
+ "uint64", "sint32", "sint64", "bool", "enum" for the "varint" wire type 0
+ "fixed64", "sfixed64", "double" for the 64bit wire type 1, "fixed32", "sfixed32",
+ "float" for the wire type 5. Note that "string" is considered as a length-delimited
+ type, so it does not require any <field_type> argument to be extracted.
+ More information may be found here about the protocol buffers message field types:
+ https://developers.google.com/protocol-buffers/docs/encoding
+
+ Example:
+ // with such a protocol buffer .proto file content adapted from
+ // https://github.com/grpc/grpc/blob/master/examples/protos/route_guide.proto
+
+ message Point {
+ int32 latitude = 1;
+ int32 longitude = 2;
+ }
+
+ message PPoint {
+ Point point = 59;
+ }
+
+ message Rectangle {
+ // One corner of the rectangle.
+ PPoint lo = 48;
+ // The other corner of the rectangle.
+ PPoint hi = 49;
+ }
+
+ let's say a body request is made of a "Rectangle" object value (two PPoint
+ protocol buffers messages), the four protocol buffers fields could be
+ extracted with these "ungrpc" directives:
+
+ req.body,ungrpc(48.59.1,int32) # "latitude" of "lo" first PPoint
+ req.body,ungrpc(48.59.2,int32) # "longitude" of "lo" first PPoint
+ req.body,ungrpc(49.59.1,int32) # "latitude" of "hi" second PPoint
+ req.body,ungrpc(49.59.2,int32) # "longitude" of "hi" second PPoint
+
+ We could also extract the intermediary 48.59 field as a binary sample as follows:
+
+ req.body,ungrpc(48.59)
+
+ As a gRPC message is always made of a gRPC header followed by protocol buffers
+ messages, in the previous example the "latitude" of "lo" first PPoint
+ could be extracted with these equivalent directives:
+
+ req.body,ungrpc(48.59),protobuf(1,int32)
+ req.body,ungrpc(48),protobuf(59.1,int32)
+ req.body,ungrpc(48),protobuf(59),protobuf(1,int32)
+
+ Note that the first convert must be "ungrpc", the remaining ones must be
+ "protobuf" and only the last one may have or not a second argument to
+ interpret the previous binary sample.
+
+
+unset-var(<var>)
+ Unsets a variable if the input content is defined. The name of the variable
+ starts with an indication about its scope. The scopes allowed are:
+ "proc" : the variable is shared with the whole process
+ "sess" : the variable is shared with the whole session
+ "txn" : the variable is shared with the transaction (request and
+ response),
+ "req" : the variable is shared only during request processing,
+ "res" : the variable is shared only during response processing.
+ This prefix is followed by a name. The separator is a '.'. The name may only
+ contain characters 'a-z', 'A-Z', '0-9', '.' and '_'.
+
+upper
+ Convert a string sample to upper case. This can only be placed after a string
+ sample fetch function or after a transformation keyword returning a string
+ type. The result is of type string.
+
+url_dec([<in_form>])
+ Takes an url-encoded string provided as input and returns the decoded version
+ as output. The input and the output are of type string. If the <in_form>
+ argument is set to a non-zero integer value, the input string is assumed to
+ be part of a form or query string and the '+' character will be turned into a
+ space (' '). Otherwise this will only happen after a question mark indicating
+ a query string ('?').
+
+url_enc([<enc_type>])
+ Takes a string provided as input and returns the encoded version as output.
+ The input and the output are of type string. By default the type of encoding
+ is meant for `query` type. There is no other type supported for now but the
+ optional argument is here for future changes.
+
+us_ltime(<format>[,<offset>])
+ This works like "ltime" but takes an input in microseconds. It also supports
+ the %N conversion specifier inspired by date(1).
+ Converts an integer supposed to contain a date since epoch to a string
+ representing this date in local time using a format defined by the <format>
+ string using strftime(3). The purpose is to allow any date format to be used
+ in logs. An optional <offset> in microseconds may be applied to the input
+ date (positive or negative). See the strftime() man page for the format
+ supported by your operating system.
+
+ The %N conversion specifier allows you to output the nanoseconds part of the
+ date, precision is limited since the input is microseconds.
+ (000000000..999999000). %N can take a width argument between % and N. It is
+ useful to display milliseconds (%3N) or microseconds (%6N). The default and
+ maximum width is 9 (%N = %9N).
+
+ See also the "utime" converter for UTC as well as "ltime" and "ms_ltime"
+ converters.
+
+ Example :
+
+ # Emit 3 colons, the local time, the timezone and another with ip:port
+ # e.g. 2023/07/24/09:53:02.196234 +0000 127.0.0.1:41530
+ log-format %[accept_date(us),us_ltime("%Y/%m/%d/%H:%M:%S.%6N %z")]\ %ci:%cp
+
+us_utime(<format>[,<offset>])
+ This works like "utime" but takes an input in microseconds. It also supports
+ the %N conversion specifier inspired by date(1).
+ Converts an integer supposed to contain a date since epoch to a string
+ representing this date in UTC time using a format defined by the <format>
+ string using strftime(3). The purpose is to allow any date format to be used
+ in logs. An optional <offset> in microseconds may be applied to the input
+ date (positive or negative). See the strftime() man page for the format
+ supported by your operating system.
+
+ The %N conversion specifier allows you to output the nanoseconds part of the
+ date, precision is limited since the input is microseconds.
+ (000000000..999999000). %N can take a width argument between % and N. It is
+ useful to display milliseconds (%3N) or microseconds (%6N). The default and
+ maximum width is 9 (%N = %9N).
+
+ See also the "ltime" converter for local as well as "utime" and "ms_utime"
+ converters.
+
+ Example :
+
+ # Emit 3 colons, the UTC time, the timezone and another with ip:port
+ # e.g. 2023/07/24/09:53:02.196234 +0000 127.0.0.1:41530
+ log-format %[accept_date(us),us_utime("%Y/%m/%d/%H:%M:%S.%6N %z")]\ %ci:%cp
+
+utime(<format>[,<offset>])
+ Converts an integer supposed to contain a date since epoch to a string
+ representing this date in UTC time using a format defined by the <format>
+ string using strftime(3). The purpose is to allow any date format to be used
+ in logs. An optional <offset> in seconds may be applied to the input date
+ (positive or negative). See the strftime() man page for the format supported
+ by your operating system. See also the "ltime" converter as well as "ms_utime"
+ and "us_utime".
+
+ Example :
+
+ # Emit two colons, one with the UTC time and another with ip:port
+ # e.g. 20140710162350 127.0.0.1:57325
+ log-format %[date,utime(%Y%m%d%H%M%S)]\ %ci:%cp
+
+word(<index>,<delimiters>[,<count>])
+ Extracts the nth word counting from the beginning (positive index) or from
+ the end (negative index) considering given delimiters from an input string.
+ Indexes start at 1 or -1 and delimiters are a string formatted list of chars.
+ Empty words are skipped. This means that delimiters at the start or end of
+ the input string are ignored and consecutive delimiters within the input
+ string are considered to be a single delimiter.
+ Optionally you can specify <count> of words to extract (default: 1).
+ Value of 0 indicates extraction of all remaining words.
+
+ Example :
+ str(f1_f2_f3__f5),word(4,_) # f5
+ str(f1_f2_f3__f5),word(5,_) # <not found>
+ str(f1_f2_f3__f5),word(2,_,0) # f2_f3__f5
+ str(f1_f2_f3__f5),word(3,_,2) # f3__f5
+ str(f1_f2_f3__f5),word(-2,_,3) # f1_f2_f3
+ str(f1_f2_f3__f5),word(-3,_,0) # f1_f2
+ str(/f1/f2/f3/f4),word(1,/) # f1
+ str(/f1////f2/f3/f4),word(1,/) # f2
+
+wt6([<avalanche>])
+ Hashes a binary input sample into an unsigned 32-bit quantity using the WT6
+ hash function. Optionally, it is possible to apply a full avalanche hash
+ function to the output if the optional <avalanche> argument equals 1. This
+ converter uses the same functions as used by the various hash-based load
+ balancing algorithms, so it will provide exactly the same results. It is
+ mostly intended for debugging, but can be used as a stick-table entry to
+ collect rough statistics. It must not be used for security purposes as a
+ 32-bit hash is trivial to break. See also "crc32", "djb2", "sdbm", "crc32c",
+ and the "hash-type" directive.
+
+
+x509_v_err_str
+ Convert a numerical value to its corresponding X509_V_ERR constant name. It
+ is useful in ACL in order to have a configuration which works with multiple
+ version of OpenSSL since some codes might change when changing version.
+
+ When the corresponding constant name was not found, outputs the numerical
+ value as a string.
+
+ The list of constant provided by OpenSSL can be found at
+ https://www.openssl.org/docs/manmaster/man3/X509_STORE_CTX_get_error.html#ERROR-CODES
+ Be careful to read the page for the right version of OpenSSL.
+
+ Example:
+
+ bind :443 ssl crt common.pem ca-file ca-auth.crt verify optional crt-ignore-err X509_V_ERR_CERT_REVOKED,X509_V_ERR_CERT_HAS_EXPIRED
+
+ acl cert_expired ssl_c_verify,x509_v_err_str -m str X509_V_ERR_CERT_HAS_EXPIRED
+ acl cert_revoked ssl_c_verify,x509_v_err_str -m str X509_V_ERR_CERT_REVOKED
+ acl cert_ok ssl_c_verify,x509_v_err_str -m str X509_V_OK
+
+ http-response add-header X-SSL Ok if cert_ok
+ http-response add-header X-SSL Expired if cert_expired
+ http-response add-header X-SSL Revoked if cert_revoked
+
+ http-response add-header X-SSL-verify %[ssl_c_verify,x509_v_err_str]
+
+xor(<value>)
+ Performs a bitwise "XOR" (exclusive OR) between <value> and the input value
+ of type signed integer, and returns the result as an signed integer.
+ <value> can be a numeric value or a variable name. The name of the variable
+ starts with an indication about its scope. The scopes allowed are:
+ "proc" : the variable is shared with the whole process
+ "sess" : the variable is shared with the whole session
+ "txn" : the variable is shared with the transaction (request and
+ response),
+ "req" : the variable is shared only during request processing,
+ "res" : the variable is shared only during response processing.
+ This prefix is followed by a name. The separator is a '.'. The name may only
+ contain characters 'a-z', 'A-Z', '0-9', '.' and '_'.
+
+xxh3([<seed>])
+ Hashes a binary input sample into a signed 64-bit quantity using the XXH3
+ 64-bit variant of the XXhash hash function. This hash supports a seed which
+ defaults to zero but a different value maybe passed as the <seed> argument.
+ This hash is known to be very good and very fast so it can be used to hash
+ URLs and/or URL parameters for use as stick-table keys to collect statistics
+ with a low collision rate, though care must be taken as the algorithm is not
+ considered as cryptographically secure.
+
+xxh32([<seed>])
+ Hashes a binary input sample into an unsigned 32-bit quantity using the 32-bit
+ variant of the XXHash hash function. This hash supports a seed which defaults
+ to zero but a different value maybe passed as the <seed> argument. This hash
+ is known to be very good and very fast so it can be used to hash URLs and/or
+ URL parameters for use as stick-table keys to collect statistics with a low
+ collision rate, though care must be taken as the algorithm is not considered
+ as cryptographically secure.
+
+xxh64([<seed>])
+ Hashes a binary input sample into a signed 64-bit quantity using the 64-bit
+ variant of the XXHash hash function. This hash supports a seed which defaults
+ to zero but a different value maybe passed as the <seed> argument. This hash
+ is known to be very good and very fast so it can be used to hash URLs and/or
+ URL parameters for use as stick-table keys to collect statistics with a low
+ collision rate, though care must be taken as the algorithm is not considered
+ as cryptographically secure.
+
+
+7.3.2. Fetching samples from internal states
+--------------------------------------------
+
+A first set of sample fetch methods applies to internal information which does
+not even relate to any client information. These ones are sometimes used with
+"monitor fail" directives to report an internal status to external watchers.
+The sample fetch methods described in this section are usable anywhere.
+
+Summary of sample fetch methods in this section and their respective types:
+
+ keyword output type
+-------------------------------------------------+-------------
+act_conn integer
+acl([!]<name>[,...]) boolean
+always_false boolean
+always_true boolean
+avg_queue([<backend>]) integer
+be_conn([<backend>]) integer
+be_conn_free([<backend>]) integer
+be_sess_rate([<backend>]) integer
+bin(<hex>) bin
+bool(<bool>) bool
+connslots([<backend>]) integer
+cpu_calls integer
+cpu_ns_avg integer
+cpu_ns_tot integer
+date([<offset>],[<unit>]) integer
+date_us integer
+env(<name>) string
+fe_conn([<frontend>]) integer
+fe_req_rate([<frontend>]) integer
+fe_sess_rate([<frontend>]) integer
+hostname string
+int(<integer>) signed
+ipv4(<ipv4>) ipv4
+ipv6(<ipv6>) ipv6
+last_rule_file string
+last_rule_line integer
+lat_ns_avg integer
+lat_ns_tot integer
+meth(<method>) method
+nbsrv([<backend>]) integer
+prio_class integer
+prio_offset integer
+pid integer
+proc integer
+queue([<backend>]) integer
+quic_enabled boolean
+rand([<range>]) integer
+srv_conn([<backend>/]<server>) integer
+srv_conn_free([<backend>/]<server>) integer
+srv_is_up([<backend>/]<server>) boolean
+srv_queue([<backend>/]<server>) integer
+srv_sess_rate([<backend>/]<server>) integer
+srv_iweight([<backend>/]<server>) integer
+srv_uweight([<backend>/]<server>) integer
+srv_weight([<backend>/]<server>) integer
+stopping boolean
+str(<string>) string
+table_avl([<table>]) integer
+table_cnt([<table>]) integer
+thread integer
+txn.id32 integer
+txn.conn_retries integer
+txn.sess_term_state string
+uuid([<version>]) string
+var(<var-name>[,<default>]) undefined
+-------------------------------------------------+-------------
+
+Detailed list:
+
+act_conn : integer
+ Returns the total number of active concurrent connections on the process.
+
+acl([!]<name>[,...]) : boolean
+ Returns true if the evaluation of all the named ACL(s) is true, otherwise
+ returns false. Up to 12 ACLs may be provided, each delimited by comma. Each
+ named ACL may be prefixed with a "!" to invert the result. If any evaluation
+ produces an error then the sample also returns an error.
+ Note that HAProxy does not perform any validation checks on the referenced
+ ACLs, such as whether an ACL which uses a http request sample is used in
+ response context. This behavior may be changed in the future.
+
+always_false : boolean
+ Always returns the boolean "false" value. It may be used with ACLs as a
+ temporary replacement for another one when adjusting configurations.
+
+always_true : boolean
+ Always returns the boolean "true" value. It may be used with ACLs as a
+ temporary replacement for another one when adjusting configurations.
+
+avg_queue([<backend>]) : integer
+ Returns the total number of queued connections of the designated backend
+ divided by the number of active servers. The current backend is used if no
+ backend is specified. This is very similar to "queue" except that the size of
+ the farm is considered, in order to give a more accurate measurement of the
+ time it may take for a new connection to be processed. The main usage is with
+ ACL to return a sorry page to new users when it becomes certain they will get
+ a degraded service, or to pass to the backend servers in a header so that
+ they decide to work in degraded mode or to disable some functions to speed up
+ the processing a bit. Note that in the event there would not be any active
+ server anymore, twice the number of queued connections would be considered as
+ the measured value. This is a fair estimate, as we expect one server to get
+ back soon anyway, but we still prefer to send new traffic to another backend
+ if in better shape. See also the "queue", "be_conn", and "be_sess_rate"
+ sample fetches.
+
+be_conn([<backend>]) : integer
+ Applies to the number of currently established connections on the backend,
+ possibly including the connection being evaluated. If no backend name is
+ specified, the current one is used. But it is also possible to check another
+ backend. It can be used to use a specific farm when the nominal one is full.
+ See also the "fe_conn", "queue", "be_conn_free", and "be_sess_rate" criteria.
+
+be_conn_free([<backend>]) : integer
+ Returns an integer value corresponding to the number of available connections
+ across available servers in the backend. Queue slots are not included. Backup
+ servers are also not included, unless all other servers are down. If no
+ backend name is specified, the current one is used. But it is also possible
+ to check another backend. It can be used to use a specific farm when the
+ nominal one is full. See also the "be_conn", "connslots", and "srv_conn_free"
+ criteria.
+
+ OTHER CAVEATS AND NOTES: if any of the server maxconn, or maxqueue is 0
+ (meaning unlimited), then this fetch clearly does not make sense, in which
+ case the value returned will be -1.
+
+be_sess_rate([<backend>]) : integer
+ Returns an integer value corresponding to the sessions creation rate on the
+ backend, in number of new sessions per second. This is used with ACLs to
+ switch to an alternate backend when an expensive or fragile one reaches too
+ high a session rate, or to limit abuse of service (e.g. prevent sucking of an
+ online dictionary). It can also be useful to add this element to logs using a
+ log-format directive.
+
+ Example :
+ # Redirect to an error page if the dictionary is requested too often
+ backend dynamic
+ mode http
+ acl being_scanned be_sess_rate gt 100
+ redirect location /denied.html if being_scanned
+
+bin(<hex>) : bin
+ Returns a binary chain. The input is the hexadecimal representation
+ of the string.
+
+bool(<bool>) : bool
+ Returns a boolean value. <bool> can be 'true', 'false', '1' or '0'.
+ 'false' and '0' are the same. 'true' and '1' are the same.
+
+connslots([<backend>]) : integer
+ Returns an integer value corresponding to the number of connection slots
+ still available in the backend, by totaling the maximum amount of
+ connections on all servers and the maximum queue size. This is probably only
+ used with ACLs.
+
+ The basic idea here is to be able to measure the number of connection "slots"
+ still available (connection + queue), so that anything beyond that (intended
+ usage; see "use_backend" keyword) can be redirected to a different backend.
+
+ 'connslots' = number of available server connection slots, + number of
+ available server queue slots.
+
+ Note that while "fe_conn" may be used, "connslots" comes in especially
+ useful when you have a case of traffic going to one single ip, splitting into
+ multiple backends (perhaps using ACLs to do name-based load balancing) and
+ you want to be able to differentiate between different backends, and their
+ available "connslots". Also, whereas "nbsrv" only measures servers that are
+ actually *down*, this fetch is more fine-grained and looks into the number of
+ available connection slots as well. See also "queue" and "avg_queue".
+
+ OTHER CAVEATS AND NOTES: at this point in time, the code does not take care
+ of dynamic connections. Also, if any of the server maxconn, or maxqueue is 0,
+ then this fetch clearly does not make sense, in which case the value returned
+ will be -1.
+
+cpu_calls : integer
+ Returns the number of calls to the task processing the stream or current
+ request since it was allocated. This number is reset for each new request on
+ the same connections in case of HTTP keep-alive. This value should usually be
+ low and stable (around 2 calls for a typically simple request) but may become
+ high if some processing (compression, caching or analysis) is performed. This
+ is purely for performance monitoring purposes.
+
+cpu_ns_avg : integer
+ Returns the average number of nanoseconds spent in each call to the task
+ processing the stream or current request. This number is reset for each new
+ request on the same connections in case of HTTP keep-alive. This value
+ indicates the overall cost of processing the request or the connection for
+ each call. There is no good nor bad value but the time spent in a call
+ automatically causes latency for other processing (see lat_ns_avg below),
+ and may affect other connection's apparent response time. Certain operations
+ like compression, complex regex matching or heavy Lua operations may directly
+ affect this value, and having it in the logs will make it easier to spot the
+ faulty processing that needs to be fixed to recover decent performance.
+ Note: this value is exactly cpu_ns_tot divided by cpu_calls.
+
+cpu_ns_tot : integer
+ Returns the total number of nanoseconds spent in each call to the task
+ processing the stream or current request. This number is reset for each new
+ request on the same connections in case of HTTP keep-alive. This value
+ indicates the overall cost of processing the request or the connection for
+ each call. There is no good nor bad value but the time spent in a call
+ automatically causes latency for other processing (see lat_ns_avg below),
+ induces CPU costs on the machine, and may affect other connection's apparent
+ response time. Certain operations like compression, complex regex matching or
+ heavy Lua operations may directly affect this value, and having it in the
+ logs will make it easier to spot the faulty processing that needs to be fixed
+ to recover decent performance. The value may be artificially high due to a
+ high cpu_calls count, for example when processing many HTTP chunks, and for
+ this reason it is often preferred to log cpu_ns_avg instead.
+
+date([<offset>],[<unit>]) : integer
+ Returns the current date as the epoch (number of seconds since 01/01/1970).
+
+ If an offset value is specified, then it is added to the current date before
+ returning the value. This is particularly useful to compute relative dates,
+ as both positive and negative offsets are allowed.
+ It is useful combined with the http_date converter.
+
+ <unit> is facultative, and can be set to "s" for seconds (default behavior),
+ "ms" for milliseconds or "us" for microseconds.
+ If unit is set, return value is an integer reflecting either seconds,
+ milliseconds or microseconds since epoch, plus offset.
+ It is useful when a time resolution of less than a second is needed.
+
+ Example :
+
+ # set an expires header to now+1 hour in every response
+ http-response set-header Expires %[date(3600),http_date]
+
+ # set an expires header to now+1 hour in every response, with
+ # millisecond granularity
+ http-response set-header Expires %[date(3600000,ms),http_date(0,ms)]
+
+date_us : integer
+ Return the microseconds part of the date (the "second" part is returned by
+ date sample). This sample is coherent with the date sample as it is comes
+ from the same timeval structure.
+
+env(<name>) : string
+ Returns a string containing the value of environment variable <name>. As a
+ reminder, environment variables are per-process and are sampled when the
+ process starts. This can be useful to pass some information to a next hop
+ server, or with ACLs to take specific action when the process is started a
+ certain way.
+
+ Examples :
+ # Pass the Via header to next hop with the local hostname in it
+ http-request add-header Via 1.1\ %[env(HOSTNAME)]
+
+ # reject cookie-less requests when the STOP environment variable is set
+ http-request deny if !{ req.cook(SESSIONID) -m found } { env(STOP) -m found }
+
+fe_conn([<frontend>]) : integer
+ Returns the number of currently established connections on the frontend,
+ possibly including the connection being evaluated. If no frontend name is
+ specified, the current one is used. But it is also possible to check another
+ frontend. It can be used to return a sorry page before hard-blocking, or to
+ use a specific backend to drain new requests when the farm is considered
+ full. This is mostly used with ACLs but can also be used to pass some
+ statistics to servers in HTTP headers. See also the "dst_conn", "be_conn",
+ "fe_sess_rate" fetches.
+
+fe_req_rate([<frontend>]) : integer
+ Returns an integer value corresponding to the number of HTTP requests per
+ second sent to a frontend. This number can differ from "fe_sess_rate" in
+ situations where client-side keep-alive is enabled.
+
+fe_sess_rate([<frontend>]) : integer
+ Returns an integer value corresponding to the sessions creation rate on the
+ frontend, in number of new sessions per second. This is used with ACLs to
+ limit the incoming session rate to an acceptable range in order to prevent
+ abuse of service at the earliest moment, for example when combined with other
+ layer 4 ACLs in order to force the clients to wait a bit for the rate to go
+ down below the limit. It can also be useful to add this element to logs using
+ a log-format directive. See also the "rate-limit sessions" directive for use
+ in frontends.
+
+ Example :
+ # This frontend limits incoming mails to 10/s with a max of 100
+ # concurrent connections. We accept any connection below 10/s, and
+ # force excess clients to wait for 100 ms. Since clients are limited to
+ # 100 max, there cannot be more than 10 incoming mails per second.
+ frontend mail
+ bind :25
+ mode tcp
+ maxconn 100
+ acl too_fast fe_sess_rate ge 10
+ tcp-request inspect-delay 100ms
+ tcp-request content accept if ! too_fast
+ tcp-request content accept if WAIT_END
+
+hostname : string
+ Returns the system hostname.
+
+int(<integer>) : signed integer
+ Returns a signed integer.
+
+ipv4(<ipv4>) : ipv4
+ Returns an ipv4.
+
+ipv6(<ipv6>) : ipv6
+ Returns an ipv6.
+
+last_rule_file : string
+ This returns the name of the configuration file containing the last final
+ rule that was matched during stream analysis. A final rule is one that
+ terminates the evaluation of the rule set (like an "accept", "deny" or
+ "redirect"). This works for TCP request and response rules acting on the
+ "content" rulesets, and on HTTP rules from "http-request", "http-response"
+ and "http-after-response" rule sets. The legacy "redirect" rulesets are not
+ supported (such information is not stored there), and neither "tcp-request
+ connection" nor "tcp-request session" rulesets are supported because the
+ information is stored at the stream level and streams do not exist during
+ these rules. The main purpose of this function is to be able to report in
+ logs where was the rule that gave the final verdict, in order to help
+ figure why a request was denied for example. See also "last_rule_line".
+
+last_rule_line : integer
+ This returns the line number in the configuration file where is located the
+ last final rule that was matched during stream analysis. A final rule is one
+ that terminates the evaluation of the rule set (like an "accept", "deny" or
+ "redirect"). This works for TCP request and response rules acting on the
+ "content" rulesets, and on HTTP rules from "http-request", "http-response"
+ and "http-after-response" rule sets. The legacy "redirect" rulesets are not
+ supported (such information is not stored there), and neither "tcp-request
+ connection" nor "tcp-request session" rulesets are supported because the
+ information is stored at the stream level and streams do not exist during
+ these rules. The main purpose of this function is to be able to report in
+ logs where was the rule that gave the final verdict, in order to help
+ figure why a request was denied for example. See also "last_rule_file".
+
+lat_ns_avg : integer
+ Returns the average number of nanoseconds spent between the moment the task
+ handling the stream is woken up and the moment it is effectively called. This
+ number is reset for each new request on the same connections in case of HTTP
+ keep-alive. This value indicates the overall latency inflicted to the current
+ request by all other requests being processed in parallel, and is a direct
+ indicator of perceived performance due to noisy neighbours. In order to keep
+ the value low, it is possible to reduce the scheduler's run queue depth using
+ "tune.runqueue-depth", to reduce the number of concurrent events processed at
+ once using "tune.maxpollevents", to decrease the stream's nice value using
+ the "nice" option on the "bind" lines or in the frontend, to enable low
+ latency scheduling using "tune.sched.low-latency", or to look for other heavy
+ requests in logs (those exhibiting large values of "cpu_ns_avg"), whose
+ processing needs to be adjusted or fixed. Compression of large buffers could
+ be a culprit, like heavy regex or long lists of regex. Note: this value is
+ exactly lat_ns_tot divided by cpu_calls.
+
+lat_ns_tot : integer
+ Returns the total number of nanoseconds spent between the moment the task
+ handling the stream is woken up and the moment it is effectively called. This
+ number is reset for each new request on the same connections in case of HTTP
+ keep-alive. This value indicates the overall latency inflicted to the current
+ request by all other requests being processed in parallel, and is a direct
+ indicator of perceived performance due to noisy neighbours. In order to keep
+ the value low, it is possible to reduce the scheduler's run queue depth using
+ "tune.runqueue-depth", to reduce the number of concurrent events processed at
+ once using "tune.maxpollevents", to decrease the stream's nice value using
+ the "nice" option on the "bind" lines or in the frontend, to enable low
+ latency scheduling using "tune.sched.low-latency", or to look for other heavy
+ requests in logs (those exhibiting large values of "cpu_ns_avg"), whose
+ processing needs to be adjusted or fixed. Compression of large buffers could
+ be a culprit, like heavy regex or long lists of regex. Note: while it
+ may intuitively seem that the total latency adds to a transfer time, it is
+ almost never true because while a task waits for the CPU, network buffers
+ continue to fill up and the next call will process more at once. The value
+ may be artificially high due to a high cpu_calls count, for example when
+ processing many HTTP chunks, and for this reason it is often preferred to log
+ lat_ns_avg instead, which is a more relevant performance indicator.
+
+meth(<method>) : method
+ Returns a method.
+
+nbsrv([<backend>]) : integer
+ Returns an integer value corresponding to the number of usable servers of
+ either the current backend or the named backend. This is mostly used with
+ ACLs but can also be useful when added to logs. This is normally used to
+ switch to an alternate backend when the number of servers is too low to
+ to handle some load. It is useful to report a failure when combined with
+ "monitor fail".
+
+prio_class : integer
+ Returns the priority class of the current stream for http mode or connection
+ for tcp mode. The value will be that set by the last call to "http-request
+ set-priority-class" or "tcp-request content set-priority-class".
+
+prio_offset : integer
+ Returns the priority offset of the current stream for http mode or
+ connection for tcp mode. The value will be that set by the last call to
+ "http-request set-priority-offset" or "tcp-request content
+ set-priority-offset".
+
+pid : integer
+ Return the PID of the current process. In most cases this is the PID of the
+ worker process.
+
+proc : integer
+ Always returns value 1 (historically it would return the calling process
+ number).
+
+queue([<backend>]) : integer
+ Returns the total number of queued connections of the designated backend,
+ including all the connections in server queues. If no backend name is
+ specified, the current one is used, but it is also possible to check another
+ one. This is useful with ACLs or to pass statistics to backend servers. This
+ can be used to take actions when queuing goes above a known level, generally
+ indicating a surge of traffic or a massive slowdown on the servers. One
+ possible action could be to reject new users but still accept old ones. See
+ also the "avg_queue", "be_conn", and "be_sess_rate" fetches.
+
+quic_enabled : boolean
+ Return true when the support for QUIC transport protocol was compiled and
+ if this procotol was not disabled by "no-quic" global option. See also "no-quic"
+ global option.
+
+rand([<range>]) : integer
+ Returns a random integer value within a range of <range> possible values,
+ starting at zero. If the range is not specified, it defaults to 2^32, which
+ gives numbers between 0 and 4294967295. It can be useful to pass some values
+ needed to take some routing decisions for example, or just for debugging
+ purposes. This random must not be used for security purposes.
+
+srv_conn([<backend>/]<server>) : integer
+ Returns an integer value corresponding to the number of currently established
+ connections on the designated server, possibly including the connection being
+ evaluated. If <backend> is omitted, then the server is looked up in the
+ current backend. It can be used to use a specific farm when one server is
+ full, or to inform the server about our view of the number of active
+ connections with it. See also the "fe_conn", "be_conn", "queue", and
+ "srv_conn_free" fetch methods.
+
+srv_conn_free([<backend>/]<server>) : integer
+ Returns an integer value corresponding to the number of available connections
+ on the designated server, possibly including the connection being evaluated.
+ The value does not include queue slots. If <backend> is omitted, then the
+ server is looked up in the current backend. It can be used to use a specific
+ farm when one server is full, or to inform the server about our view of the
+ number of active connections with it. See also the "be_conn_free" and
+ "srv_conn" fetch methods.
+
+ OTHER CAVEATS AND NOTES: If the server maxconn is 0, then this fetch clearly
+ does not make sense, in which case the value returned will be -1.
+
+srv_is_up([<backend>/]<server>) : boolean
+ Returns true when the designated server is UP, and false when it is either
+ DOWN or in maintenance mode. If <backend> is omitted, then the server is
+ looked up in the current backend. It is mainly used to take action based on
+ an external status reported via a health check (e.g. a geographical site's
+ availability). Another possible use which is more of a hack consists in
+ using dummy servers as boolean variables that can be enabled or disabled from
+ the CLI, so that rules depending on those ACLs can be tweaked in realtime.
+
+srv_queue([<backend>/]<server>) : integer
+ Returns an integer value corresponding to the number of connections currently
+ pending in the designated server's queue. If <backend> is omitted, then the
+ server is looked up in the current backend. It can sometimes be used together
+ with the "use-server" directive to force to use a known faster server when it
+ is not much loaded. See also the "srv_conn", "avg_queue" and "queue" sample
+ fetch methods.
+
+srv_sess_rate([<backend>/]<server>) : integer
+ Returns an integer corresponding to the sessions creation rate on the
+ designated server, in number of new sessions per second. If <backend> is
+ omitted, then the server is looked up in the current backend. This is mostly
+ used with ACLs but can make sense with logs too. This is used to switch to an
+ alternate backend when an expensive or fragile one reaches too high a session
+ rate, or to limit abuse of service (e.g. prevent latent requests from
+ overloading servers).
+
+ Example :
+ # Redirect to a separate back
+ acl srv1_full srv_sess_rate(be1/srv1) gt 50
+ acl srv2_full srv_sess_rate(be1/srv2) gt 50
+ use_backend be2 if srv1_full or srv2_full
+
+srv_iweight([<backend>/]<server>) : integer
+ Returns an integer corresponding to the server's initial weight. If <backend>
+ is omitted, then the server is looked up in the current backend. See also
+ "srv_weight" and "srv_uweight".
+
+srv_uweight([<backend>/]<server>) : integer
+ Returns an integer corresponding to the user visible server's weight. If
+ <backend> is omitted, then the server is looked up in the current
+ backend. See also "srv_weight" and "srv_iweight".
+
+srv_weight([<backend>/]<server>) : integer
+ Returns an integer corresponding to the current (or effective) server's
+ weight. If <backend> is omitted, then the server is looked up in the current
+ backend. See also "srv_iweight" and "srv_uweight".
+
+stopping : boolean
+ Returns TRUE if the process calling the function is currently stopping. This
+ can be useful for logging, or for relaxing certain checks or helping close
+ certain connections upon graceful shutdown.
+
+str(<string>) : string
+ Returns a string.
+
+table_avl([<table>]) : integer
+ Returns the total number of available entries in the current proxy's
+ stick-table or in the designated stick-table. See also table_cnt.
+
+table_cnt([<table>]) : integer
+ Returns the total number of entries currently in use in the current proxy's
+ stick-table or in the designated stick-table. See also src_conn_cnt and
+ table_avl for other entry counting methods.
+
+thread : integer
+ Returns an integer value corresponding to the position of the thread calling
+ the function, between 0 and (global.nbthread-1). This is useful for logging
+ and debugging purposes.
+
+txn.id32 : integer
+ Returns the internal transaction ID. It is a 32bits integer. So, in absolute,
+ its value is not unique, transaction IDs may wrap. The wrapping period
+ depends on the request rate. In practice, it should not be an issue. For a
+ true unique ID, see "unique-id-format" directive.
+
+txn.conn_retries : integer
+ Returns the the number of connection retries experienced by this stream when
+ trying to connect to the server. This value is subject to change while the
+ connection is not fully established. For HTTP connections, the value may be
+ affected by L7 retries.
+
+txn.sess_term_state : string
+ Retruns the TCP or HTTP stream termination state, as reported in the log. It
+ is a 2-characters string, The final stream state followed by the event which
+ caused its to terminate. See section 8.5 about stream state at disconnection
+ for the list of possible events. The current value at time the sample fetch
+ is evaluated is returned. It is subject to change. Except used with ACLs in
+ "http-after-response" rule sets or in log messages, it will always be "--".
+
+ Example:
+ # Return a 429-Too-Many-Requests if stream timed out in queue
+ http-after-response set-status 429 if { txn.sess_term_state "sQ" }
+
+uuid([<version>]) : string
+ Returns a UUID following the RFC4122 standard. If the version is not
+ specified, a UUID version 4 (fully random) is returned.
+ Currently, only version 4 is supported.
+
+var(<var-name>[,<default>]) : undefined
+ Returns a variable with the stored type. If the variable is not set, the
+ sample fetch fails, unless a default value is provided, in which case it will
+ return it as a string. Empty strings are permitted. The name of the variable
+ starts with an indication about its scope. The scopes allowed are:
+ "proc" : the variable is shared with the whole process
+ "sess" : the variable is shared with the whole session
+ "txn" : the variable is shared with the transaction (request and
+ response),
+ "req" : the variable is shared only during request processing,
+ "res" : the variable is shared only during response processing.
+ This prefix is followed by a name. The separator is a '.'. The name may only
+ contain characters 'a-z', 'A-Z', '0-9', '.' and '_'.
+
+7.3.3. Fetching samples at Layer 4
+----------------------------------
+
+The layer 4 usually describes just the transport layer which in HAProxy is
+closest to the connection, where no content is yet made available. The fetch
+methods described here are usable as low as the "tcp-request connection" rule
+sets unless they require some future information. Those generally include
+TCP/IP addresses and ports, as well as elements from stick-tables related to
+the incoming connection. For retrieving a value from a sticky counters, the
+counter number can be explicitly set as 0, 1, or 2 using the pre-defined
+"sc0_", "sc1_", or "sc2_" prefix. These three pre-defined prefixes can only be
+used if the global "tune.stick-counters" value does not exceed 3, otherwise the
+counter number can be specified as the first integer argument when using the
+"sc_" prefix starting from "sc_0" to "sc_N" where N is (tune.stick-counters-1).
+An optional table may be specified with the "sc*" form, in which case the
+currently tracked key will be looked up into this alternate table instead of
+the table currently being tracked.
+
+Summary of sample fetch methods in this section and their respective types:
+
+ keyword output type
+-------------------------------------------------+-------------
+accept_date([<unit>]) integer
+bc.timer.connect integer
+bc_dst ip
+bc_dst_port integer
+bc_err integer
+bc_err_str string
+bc_http_major integer
+bc_src ip
+bc_src_port integer
+be_id integer
+be_name string
+bc_rtt(<unit>) integer
+bc_rttvar(<unit>) integer
+be_server_timeout integer
+be_tunnel_timeout integer
+bytes_in integer
+bytes_out integer
+cur_server_timeout integer
+cur_tunnel_timeout integer
+cur_client_timeout integer
+dst ip
+dst_conn integer
+dst_is_local boolean
+dst_port integer
+fc.timer.handshake integer
+fc.timer.total integer
+fc_dst ip
+fc_dst_is_local boolean
+fc_dst_port integer
+fc_err integer
+fc_err_str string
+fc_fackets integer
+fc_http_major integer
+fc_lost integer
+fc_pp_authority string
+fc_pp_unique_id string
+fc_pp_tlv(<id>) string
+fc_rcvd_proxy boolean
+fc_reordering integer
+fc_retrans integer
+fc_rtt(<unit>) integer
+fc_rttvar(<unit>) integer
+fc_sacked integer
+fc_src ip
+fc_src_is_local boolean
+fc_src_port integer
+fc_unacked integer
+fe_defbe string
+fe_id integer
+fe_name string
+fe_client_timeout integer
+res.timer.data integer
+sc_bytes_in_rate(<ctr>[,<table>]) integer
+sc0_bytes_in_rate([<table>]) integer
+sc1_bytes_in_rate([<table>]) integer
+sc2_bytes_in_rate([<table>]) integer
+sc_bytes_out_rate(<ctr>[,<table>]) integer
+sc0_bytes_out_rate([<table>]) integer
+sc1_bytes_out_rate([<table>]) integer
+sc2_bytes_out_rate([<table>]) integer
+sc_clr_gpc(<idx>,<ctr>[,<table>]) integer
+sc_clr_gpc0(<ctr>[,<table>]) integer
+sc0_clr_gpc0([<table>]) integer
+sc1_clr_gpc0([<table>]) integer
+sc2_clr_gpc0([<table>]) integer
+sc_clr_gpc1(<ctr>[,<table>]) integer
+sc0_clr_gpc1([<table>]) integer
+sc1_clr_gpc1([<table>]) integer
+sc2_clr_gpc1([<table>]) integer
+sc_conn_cnt(<ctr>[,<table>]) integer
+sc0_conn_cnt([<table>]) integer
+sc1_conn_cnt([<table>]) integer
+sc2_conn_cnt([<table>]) integer
+sc_conn_cur(<ctr>[,<table>]) integer
+sc0_conn_cur([<table>]) integer
+sc1_conn_cur([<table>]) integer
+sc2_conn_cur([<table>]) integer
+sc_conn_rate(<ctr>[,<table>]) integer
+sc0_conn_rate([<table>]) integer
+sc1_conn_rate([<table>]) integer
+sc2_conn_rate([<table>]) integer
+sc_get_gpc(<idx>,<ctr>[,<table>]) integer
+sc_get_gpc0(<ctr>[,<table>]) integer
+sc0_get_gpc0([<table>]) integer
+sc1_get_gpc0([<table>]) integer
+sc2_get_gpc0([<table>]) integer
+sc_get_gpc1(<ctr>[,<table>]) integer
+sc0_get_gpc1([<table>]) integer
+sc1_get_gpc1([<table>]) integer
+sc2_get_gpc1([<table>]) integer
+sc_get_gpt(<idx>,<ctr>[,<table>]) integer
+sc_get_gpt0(<ctr>[,<table>]) integer
+sc0_get_gpt0([<table>]) integer
+sc1_get_gpt0([<table>]) integer
+sc2_get_gpt0([<table>]) integer
+sc_gpc_rate(<idx>,<ctr>[,<table>]) integer
+sc_gpc0_rate(<ctr>[,<table>]) integer
+sc0_gpc0_rate([<table>]) integer
+sc1_gpc0_rate([<table>]) integer
+sc2_gpc0_rate([<table>]) integer
+sc_gpc1_rate(<ctr>[,<table>]) integer
+sc0_gpc1_rate([<table>]) integer
+sc1_gpc1_rate([<table>]) integer
+sc2_gpc1_rate([<table>]) integer
+sc_http_err_cnt(<ctr>[,<table>]) integer
+sc0_http_err_cnt([<table>]) integer
+sc1_http_err_cnt([<table>]) integer
+sc2_http_err_cnt([<table>]) integer
+sc_http_err_rate(<ctr>[,<table>]) integer
+sc0_http_err_rate([<table>]) integer
+sc1_http_err_rate([<table>]) integer
+sc2_http_err_rate([<table>]) integer
+sc_http_fail_cnt(<ctr>[,<table>]) integer
+sc0_http_fail_cnt([<table>]) integer
+sc1_http_fail_cnt([<table>]) integer
+sc2_http_fail_cnt([<table>]) integer
+sc_http_fail_rate(<ctr>[,<table>]) integer
+sc0_http_fail_rate([<table>]) integer
+sc1_http_fail_rate([<table>]) integer
+sc2_http_fail_rate([<table>]) integer
+sc_http_req_cnt(<ctr>[,<table>]) integer
+sc0_http_req_cnt([<table>]) integer
+sc1_http_req_cnt([<table>]) integer
+sc2_http_req_cnt([<table>]) integer
+sc_http_req_rate(<ctr>[,<table>]) integer
+sc0_http_req_rate([<table>]) integer
+sc1_http_req_rate([<table>]) integer
+sc2_http_req_rate([<table>]) integer
+sc_inc_gpc(<idx>,<ctr>[,<table>]) integer
+sc_inc_gpc0(<ctr>[,<table>]) integer
+sc0_inc_gpc0([<table>]) integer
+sc1_inc_gpc0([<table>]) integer
+sc2_inc_gpc0([<table>]) integer
+sc_inc_gpc1(<ctr>[,<table>]) integer
+sc0_inc_gpc1([<table>]) integer
+sc1_inc_gpc1([<table>]) integer
+sc2_inc_gpc1([<table>]) integer
+sc_kbytes_in(<ctr>[,<table>]) integer
+sc0_kbytes_in([<table>]) integer
+sc1_kbytes_in([<table>]) integer
+sc2_kbytes_in([<table>]) integer
+sc_kbytes_out(<ctr>[,<table>]) integer
+sc0_kbytes_out([<table>]) integer
+sc1_kbytes_out([<table>]) integer
+sc2_kbytes_out([<table>]) integer
+sc_sess_cnt(<ctr>[,<table>]) integer
+sc0_sess_cnt([<table>]) integer
+sc1_sess_cnt([<table>]) integer
+sc2_sess_cnt([<table>]) integer
+sc_sess_rate(<ctr>[,<table>]) integer
+sc0_sess_rate([<table>]) integer
+sc1_sess_rate([<table>]) integer
+sc2_sess_rate([<table>]) integer
+sc_tracked(<ctr>[,<table>]) boolean
+sc0_tracked([<table>]) boolean
+sc1_tracked([<table>]) boolean
+sc2_tracked([<table>]) boolean
+sc_trackers(<ctr>[,<table>]) integer
+sc0_trackers([<table>]) integer
+sc1_trackers([<table>]) integer
+sc2_trackers([<table>]) integer
+so_id integer
+so_name string
+src ip
+src_bytes_in_rate([<table>]) integer
+src_bytes_out_rate([<table>]) integer
+src_clr_gpc(<idx>,[<table>]) integer
+src_clr_gpc0([<table>]) integer
+src_clr_gpc1([<table>]) integer
+src_conn_cnt([<table>]) integer
+src_conn_cur([<table>]) integer
+src_conn_rate([<table>]) integer
+src_get_gpc(<idx>,[<table>]) integer
+src_get_gpc0([<table>]) integer
+src_get_gpc1([<table>]) integer
+src_get_gpt(<idx>[,<table>]) integer
+src_get_gpt0([<table>]) integer
+src_gpc_rate(<idx>[,<table>]) integer
+src_gpc0_rate([<table>]) integer
+src_gpc1_rate([<table>]) integer
+src_http_err_cnt([<table>]) integer
+src_http_err_rate([<table>]) integer
+src_http_fail_cnt([<table>]) integer
+src_http_fail_rate([<table>]) integer
+src_http_req_cnt([<table>]) integer
+src_http_req_rate([<table>]) integer
+src_inc_gpc(<idx>,[<table>]) integer
+src_inc_gpc0([<table>]) integer
+src_inc_gpc1([<table>]) integer
+src_is_local boolean
+src_kbytes_in([<table>]) integer
+src_kbytes_out([<table>]) integer
+src_port integer
+src_sess_cnt([<table>]) integer
+src_sess_rate([<table>]) integer
+src_updt_conn_cnt([<table>]) integer
+srv_id integer
+srv_name string
+txn.conn_retries integer
+-------------------------------------------------+-------------
+
+Detailed list:
+
+accept_date([<unit>]) : integer
+ This is the exact date when the connection was received by HAProxy
+ (which might be very slightly different from the date observed on the
+ network if there was some queuing in the system's backlog). This is usually
+ the same date which may appear in any upstream firewall's log. When used in
+ HTTP mode, the accept_date field will be reset to the first moment the
+ connection is ready to receive a new request (end of previous response for
+ HTTP/1, immediately after previous request for HTTP/2).
+
+ Returns a value in number of seconds since epoch.
+
+ <unit> is facultative, and can be set to "s" for seconds (default behavior),
+ "ms" for milliseconds or "us" for microseconds.
+ If unit is set, return value is an integer reflecting either seconds,
+ milliseconds or microseconds since epoch.
+ It is useful when a time resolution of less than a second is needed.
+
+bc.timer.connect : integer
+ Total time to establish the TCP connection to the server. This is the
+ equivalent of %Tc in the log-format. This is reported in milliseconds (ms).
+ For more information see Section 8.4 "Timing events"
+
+bc_dst : ip
+ This is the destination ip address of the connection on the server side,
+ which is the server address HAProxy connected to. It is of type IP and works
+ on both IPv4 and IPv6 tables. On IPv6 tables, IPv4 address is mapped to its
+ IPv6 equivalent, according to RFC 4291.
+
+bc_dst_port : integer
+ Returns an integer value corresponding to the destination TCP port of the
+ connection on the server side, which is the port HAProxy connected to.
+
+bc_err : integer
+ Returns the ID of the error that might have occurred on the current backend
+ connection. See the "fc_err_str" fetch for a full list of error codes
+ and their corresponding error message.
+
+bc_err_str : string
+ Returns an error message describing what problem happened on the current
+ backend connection, resulting in a connection failure. See the
+ "fc_err_str" fetch for a full list of error codes and their
+ corresponding error message.
+
+bc_http_major : integer
+ Returns the backend connection's HTTP major version encoding, which may be 1
+ for HTTP/0.9 to HTTP/1.1 or 2 for HTTP/2. Note, this is based on the on-wire
+ encoding and not the version present in the request header.
+
+bc_src : ip
+ This is the source ip address of the connection on the server side, which is
+ the server address HAProxy connected from. It is of type IP and works on both
+ IPv4 and IPv6 tables. On IPv6 tables, IPv4 addresses are mapped to their IPv6
+ equivalent, according to RFC 4291.
+
+bc_src_port : integer
+ Returns an integer value corresponding to the TCP source port of the
+ connection on the server side, which is the port HAProxy connected from.
+
+be_id : integer
+ Returns an integer containing the current backend's id. It can be used in
+ frontends with responses to check which backend processed the request. If
+ used in a frontend and no backend was used, it returns the current
+ frontend's id. It can also be used in a tcp-check or an http-check ruleset.
+
+be_name : string
+ Returns a string containing the current backend's name. It can be used in
+ frontends with responses to check which backend processed the request. If
+ used in a frontend and no backend was used, it returns the current
+ frontend's name. It can also be used in a tcp-check or an http-check
+ ruleset.
+
+bc_rtt(<unit>) : integer
+ Returns the Round Trip Time (RTT) measured by the kernel for the backend
+ connection. <unit> is facultative, by default the unit is milliseconds. <unit>
+ can be set to "ms" for milliseconds or "us" for microseconds. If the server
+ connection is not established, if the connection is not TCP or if the
+ operating system does not support TCP_INFO, for example Linux kernels before
+ 2.4, the sample fetch fails.
+
+bc_rttvar(<unit>) : integer
+ Returns the Round Trip Time (RTT) variance measured by the kernel for the
+ backend connection. <unit> is facultative, by default the unit is milliseconds.
+ <unit> can be set to "ms" for milliseconds or "us" for microseconds. If the
+ server connection is not established, if the connection is not TCP or if the
+ operating system does not support TCP_INFO, for example Linux kernels before
+ 2.4, the sample fetch fails.
+
+be_server_timeout : integer
+ Returns the configuration value in millisecond for the server timeout of the
+ current backend. This timeout can be overwritten by a "set-timeout" rule. See
+ also the "cur_server_timeout".
+
+be_tunnel_timeout : integer
+ Returns the configuration value in millisecond for the tunnel timeout of the
+ current backend. This timeout can be overwritten by a "set-timeout" rule. See
+ also the "cur_tunnel_timeout".
+
+bytes_in : integer
+ This returns the number of bytes uploaded from the client to the server.
+
+bytes_out : integer
+ This is the number of bytes transmitted from the server to the client.
+
+cur_server_timeout : integer
+ Returns the currently applied server timeout in millisecond for the stream.
+ In the default case, this will be equal to be_server_timeout unless a
+ "set-timeout" rule has been applied. See also "be_server_timeout".
+
+cur_tunnel_timeout : integer
+ Returns the currently applied tunnel timeout in millisecond for the stream.
+ In the default case, this will be equal to be_tunnel_timeout unless a
+ "set-timeout" rule has been applied. See also "be_tunnel_timeout".
+
+cur_client_timeout : integer
+ Returns the currently applied client timeout in millisecond for the stream.
+ In the default case, this will be equal to fe_client_timeout unless a
+ "set-timeout" rule has been applied. See also "fe_client_timeout".
+
+dst : ip
+ This is the destination IP address of the connection on the client side,
+ which is the address the client connected to. Any tcp/http rules may alter
+ this address. It can be useful when running in transparent mode. It is of
+ type IP and works on both IPv4 and IPv6 tables. On IPv6 tables, IPv4 address
+ is mapped to its IPv6 equivalent, according to RFC 4291. When the incoming
+ connection passed through address translation or redirection involving
+ connection tracking, the original destination address before the redirection
+ will be reported. On Linux systems, the source and destination may seldom
+ appear reversed if the nf_conntrack_tcp_loose sysctl is set, because a late
+ response may reopen a timed out connection and switch what is believed to be
+ the source and the destination.
+
+dst_conn : integer
+ Returns an integer value corresponding to the number of currently established
+ connections on the same socket including the one being evaluated. It is
+ normally used with ACLs but can as well be used to pass the information to
+ servers in an HTTP header or in logs. It can be used to either return a sorry
+ page before hard-blocking, or to use a specific backend to drain new requests
+ when the socket is considered saturated. This offers the ability to assign
+ different limits to different listening ports or addresses. See also the
+ "fe_conn" and "be_conn" fetches.
+
+dst_is_local : boolean
+ Returns true if the destination address of the incoming connection is local
+ to the system, or false if the address doesn't exist on the system, meaning
+ that it was intercepted in transparent mode. It can be useful to apply
+ certain rules by default to forwarded traffic and other rules to the traffic
+ targeting the real address of the machine. For example the stats page could
+ be delivered only on this address, or SSH access could be locally redirected.
+ Please note that the check involves a few system calls, so it's better to do
+ it only once per connection.
+
+dst_port : integer
+ Returns an integer value corresponding to the destination TCP port of the
+ connection on the client side, which is the port the client connected to.
+ Any tcp/http rules may alter this address. This might be used when running in
+ transparent mode, when assigning dynamic ports to some clients for a whole
+ application session, to stick all users to a same server, or to pass the
+ destination port information to a server using an HTTP header.
+
+fc.timer.handshake : integer
+ Total time to accept tcp connection and execute handshakes for low level
+ protocols. Currently, these protocols are proxy-protocol and SSL. This is the
+ equivalent of %Th in the log-format. This is reported in milliseconds (ms).
+ For more information see Section 8.4 "Timing events"
+
+fc.timer.total : integer
+ Total stream duration time, between the moment the proxy accepted it and the
+ moment both ends were closed. This is the equivalent of %Tt in the log-format.
+ This is reported in milliseconds (ms). For more information see Section 8.4
+ "Timing events"
+
+fc_dst : ip
+ This is the original destination IP address of the connection on the client
+ side. Only "tcp-request connection" rules may alter this address. See "dst"
+ for details.
+
+fc_dst_is_local : boolean
+ Returns true if the original destination address of the incoming connection
+ is local to the system, or false if the address doesn't exist on the
+ system. See "dst_is_local" for details.
+
+fc_dst_port : integer
+ Returns an integer value corresponding to the original destination TCP port
+ of the connection on the client side. Only "tcp-request connection" rules may
+ alter this address. See "dst-port" for details.
+
+fc_err : integer
+ Returns the ID of the error that might have occurred on the current
+ connection. Any strictly positive value of this fetch indicates that the
+ connection did not succeed and would result in an error log being output (as
+ described in section 8.2.6). See the "fc_err_str" fetch for a full list of
+ error codes and their corresponding error message.
+
+fc_err_str : string
+ Returns an error message describing what problem happened on the current
+ connection, resulting in a connection failure. This string corresponds to the
+ "message" part of the error log format (see section 8.2.6). See below for a
+ full list of error codes and their corresponding error messages :
+
+ +----+---------------------------------------------------------------------------+
+ | ID | message |
+ +----+---------------------------------------------------------------------------+
+ | 0 | "Success" |
+ | 1 | "Reached configured maxconn value" |
+ | 2 | "Too many sockets on the process" |
+ | 3 | "Too many sockets on the system" |
+ | 4 | "Out of system buffers" |
+ | 5 | "Protocol or address family not supported" |
+ | 6 | "General socket error" |
+ | 7 | "Source port range exhausted" |
+ | 8 | "Can't bind to source address" |
+ | 9 | "Out of local source ports on the system" |
+ | 10 | "Local source address already in use" |
+ | 11 | "Connection closed while waiting for PROXY protocol header" |
+ | 12 | "Connection error while waiting for PROXY protocol header" |
+ | 13 | "Timeout while waiting for PROXY protocol header" |
+ | 14 | "Truncated PROXY protocol header received" |
+ | 15 | "Received something which does not look like a PROXY protocol header" |
+ | 16 | "Received an invalid PROXY protocol header" |
+ | 17 | "Received an unhandled protocol in the PROXY protocol header" |
+ | 18 | "Connection closed while waiting for NetScaler Client IP header" |
+ | 19 | "Connection error while waiting for NetScaler Client IP header" |
+ | 20 | "Timeout while waiting for a NetScaler Client IP header" |
+ | 21 | "Truncated NetScaler Client IP header received" |
+ | 22 | "Received an invalid NetScaler Client IP magic number" |
+ | 23 | "Received an unhandled protocol in the NetScaler Client IP header" |
+ | 24 | "Connection closed during SSL handshake" |
+ | 25 | "Connection error during SSL handshake" |
+ | 26 | "Timeout during SSL handshake" |
+ | 27 | "Too many SSL connections" |
+ | 28 | "Out of memory when initializing an SSL connection" |
+ | 29 | "Rejected a client-initiated SSL renegotiation attempt" |
+ | 30 | "SSL client CA chain cannot be verified" |
+ | 31 | "SSL client certificate not trusted" |
+ | 32 | "Server presented an SSL certificate different from the configured one" |
+ | 33 | "Server presented an SSL certificate different from the expected one" |
+ | 34 | "SSL handshake failure" |
+ | 35 | "SSL handshake failure after heartbeat" |
+ | 36 | "Stopped a TLSv1 heartbeat attack (CVE-2014-0160)" |
+ | 37 | "Attempt to use SSL on an unknown target (internal error)" |
+ | 38 | "Server refused early data" |
+ | 39 | "SOCKS4 Proxy write error during handshake" |
+ | 40 | "SOCKS4 Proxy read error during handshake" |
+ | 41 | "SOCKS4 Proxy deny the request" |
+ | 42 | "SOCKS4 Proxy handshake aborted by server" |
+ | 43 | "SSL fatal error" |
+ +----+---------------------------------------------------------------------------+
+
+fc_fackets : integer
+ Returns the fack counter measured by the kernel for the client
+ connection. If the server connection is not established, if the connection is
+ not TCP or if the operating system does not support TCP_INFO, for example
+ Linux kernels before 2.4, the sample fetch fails.
+
+fc_http_major : integer
+ Reports the front connection's HTTP major version encoding, which may be 1
+ for HTTP/0.9 to HTTP/1.1 or 2 for HTTP/2. Note, this is based on the on-wire
+ encoding and not on the version present in the request header.
+
+fc_lost : integer
+ Returns the lost counter measured by the kernel for the client
+ connection. If the server connection is not established, if the connection is
+ not TCP or if the operating system does not support TCP_INFO, for example
+ Linux kernels before 2.4, the sample fetch fails.
+
+fc_pp_authority : string
+ Returns the first authority TLV sent by the client in the PROXY protocol
+ header, if any.
+
+fc_pp_unique_id : string
+ Returns the first unique ID TLV sent by the client in the PROXY protocol
+ header, if any.
+
+fc_pp_tlv(<id>) : string
+ Returns the TLV value for the given TLV ID. The ID must either be a numeric
+ value between 0 and 255 or one of the following supported symbolic names
+ that correspond to the TLV constant suffixes in the PPv2 spec:
+ "ALPN": PP2_TYPE_ALPN, "AUTHORITY": PP2_TYPE_AUTHORITY,
+ "CRC32": PP2_TYPE_CRC32C, "NETNS": PP2_TYPE_NETNS, "NOOP: PP2_TYPE_NOOP",
+ "SSL": PP2_TYPE_SSL, "SSL_CIPHER": PP2_SUBTYPE_SSL_CIPHER,
+ "SSL_CN": PP2_SUBTYPE_SSL_CN, "SSL_KEY_ALG": PP2_SUBTYPE_SSL_KEY_ALG,
+ "SSL_SIG_ALG": PP2_SUBTYPE_SSL_SIG_ALG,
+ "SSL_VERSION": PP2_SUBTYPE_SSL_VERSION, "UNIQUE_ID": PP2_TYPE_UNIQUE_ID.
+
+ The received value must be smaller or equal to 1024 bytes. This is done to
+ prevent potential DoS attacks. Values smaller or equal to 256 bytes will be
+ able to be memory pooled. Therefore, try to restrict the length of sent
+ values to 256 bytes for optimal performance.
+
+ Note that unlike fc_pp_authority and fc_pp_unique_id, fc_pp_tlv is able to
+ iterate over all occurrences of a requested TLV in case there are duplicate
+ TLV IDs. The order of iteration matches the position in the PROXY protocol
+ header. However, relying on duplicates should mostly be avoided as TLVs are
+ typically assumed to be unique. Generally, finding duplicated TLV IDs
+ indicates an error on the sender side of the PROXY protocol header.
+
+fc_rcvd_proxy : boolean
+ Returns true if the client initiated the connection with a PROXY protocol
+ header.
+
+fc_reordering : integer
+ Returns the reordering counter measured by the kernel for the client
+ connection. If the server connection is not established, if the connection is
+ not TCP or if the operating system does not support TCP_INFO, for example
+ Linux kernels before 2.4, the sample fetch fails.
+
+fc_retrans : integer
+ Returns the retransmits counter measured by the kernel for the client
+ connection. If the server connection is not established, if the connection is
+ not TCP or if the operating system does not support TCP_INFO, for example
+ Linux kernels before 2.4, the sample fetch fails.
+
+fc_rtt(<unit>) : integer
+ Returns the Round Trip Time (RTT) measured by the kernel for the client
+ connection. <unit> is facultative, by default the unit is milliseconds. <unit>
+ can be set to "ms" for milliseconds or "us" for microseconds. If the server
+ connection is not established, if the connection is not TCP or if the
+ operating system does not support TCP_INFO, for example Linux kernels before
+ 2.4, the sample fetch fails.
+
+fc_rttvar(<unit>) : integer
+ Returns the Round Trip Time (RTT) variance measured by the kernel for the
+ client connection. <unit> is facultative, by default the unit is milliseconds.
+ <unit> can be set to "ms" for milliseconds or "us" for microseconds. If the
+ server connection is not established, if the connection is not TCP or if the
+ operating system does not support TCP_INFO, for example Linux kernels before
+ 2.4, the sample fetch fails.
+
+fc_sacked : integer
+ Returns the sacked counter measured by the kernel for the client connection.
+ If the server connection is not established, if the connection is not TCP or
+ if the operating system does not support TCP_INFO, for example Linux kernels
+ before 2.4, the sample fetch fails.
+
+fc_src : ip
+ This is the original source IP address of the connection on the client side
+ Only "tcp-request connection" rules may alter this address. See "src" for
+ details.
+
+fc_src_is_local : boolean
+ Returns true if the source address of incoming connection is local to the
+ system, or false if the address doesn't exist on the system. See
+ "src_is_local" for details.
+
+fc_src_port : integer
+
+ Returns an integer value corresponding to the TCP source port of the
+ connection on the client side. Only "tcp-request connection" rules may alter
+ this address. See "src-port" for details.
+
+
+fc_unacked : integer
+ Returns the unacked counter measured by the kernel for the client connection.
+ If the server connection is not established, if the connection is not TCP or
+ if the operating system does not support TCP_INFO, for example Linux kernels
+ before 2.4, the sample fetch fails.
+
+fe_defbe : string
+ Returns a string containing the frontend's default backend name. It can be
+ used in frontends to check which backend will handle requests by default.
+
+fe_id : integer
+ Returns an integer containing the current frontend's id. It can be used in
+ backends to check from which frontend it was called, or to stick all users
+ coming via a same frontend to the same server.
+
+fe_name : string
+ Returns a string containing the current frontend's name. It can be used in
+ backends to check from which frontend it was called, or to stick all users
+ coming via a same frontend to the same server.
+
+fe_client_timeout : integer
+ Returns the configuration value in millisecond for the client timeout of the
+ current frontend. This timeout can be overwritten by a "set-timeout" rule.
+
+res.timer.data : integer
+ this is the total transfer time of the response payload till the last byte
+ sent to the client. In HTTP it starts after the last response header (after
+ Tr). This is the equivalent of %Td in the log-format and is reported in
+ milliseconds (ms). For more information see Section 8.4 "Timing events"
+
+sc_bytes_in_rate(<ctr>[,<table>]) : integer
+sc0_bytes_in_rate([<table>]) : integer
+sc1_bytes_in_rate([<table>]) : integer
+sc2_bytes_in_rate([<table>]) : integer
+ Returns the average client-to-server bytes rate from the currently tracked
+ counters, measured in amount of bytes over the period configured in the
+ table. See also src_bytes_in_rate.
+
+sc_bytes_out_rate(<ctr>[,<table>]) : integer
+sc0_bytes_out_rate([<table>]) : integer
+sc1_bytes_out_rate([<table>]) : integer
+sc2_bytes_out_rate([<table>]) : integer
+ Returns the average server-to-client bytes rate from the currently tracked
+ counters, measured in amount of bytes over the period configured in the
+ table. See also src_bytes_out_rate.
+
+sc_clr_gpc(<idx>,<ctr>[,<table>]) : integer
+ Clears the General Purpose Counter at the index <idx> of the array
+ associated to the designated tracked counter of ID <ctr> from current
+ proxy's stick table or from the designated stick-table <table>, and
+ returns its previous value. <idx> is an integer between 0 and 99 and
+ <ctr> an integer between 0 and 2.
+ Before the first invocation, the stored value is zero, so first invocation
+ will always return zero.
+ This fetch applies only to the 'gpc' array data_type (and not to the legacy
+ 'gpc0' nor 'gpc1' data_types).
+
+sc_clr_gpc0(<ctr>[,<table>]) : integer
+sc0_clr_gpc0([<table>]) : integer
+sc1_clr_gpc0([<table>]) : integer
+sc2_clr_gpc0([<table>]) : integer
+ Clears the first General Purpose Counter associated to the currently tracked
+ counters, and returns its previous value. Before the first invocation, the
+ stored value is zero, so first invocation will always return zero. This is
+ typically used as a second ACL in an expression in order to mark a connection
+ when a first ACL was verified :
+
+ Example:
+ # block if 5 consecutive requests continue to come faster than 10 sess
+ # per second, and reset the counter as soon as the traffic slows down.
+ acl abuse sc0_http_req_rate gt 10
+ acl kill sc0_inc_gpc0 gt 5
+ acl save sc0_clr_gpc0 ge 0
+ tcp-request connection accept if !abuse save
+ tcp-request connection reject if abuse kill
+
+sc_clr_gpc1(<ctr>[,<table>]) : integer
+sc0_clr_gpc1([<table>]) : integer
+sc1_clr_gpc1([<table>]) : integer
+sc2_clr_gpc1([<table>]) : integer
+ Clears the second General Purpose Counter associated to the currently tracked
+ counters, and returns its previous value. Before the first invocation, the
+ stored value is zero, so first invocation will always return zero. This is
+ typically used as a second ACL in an expression in order to mark a connection
+ when a first ACL was verified.
+
+sc_conn_cnt(<ctr>[,<table>]) : integer
+sc0_conn_cnt([<table>]) : integer
+sc1_conn_cnt([<table>]) : integer
+sc2_conn_cnt([<table>]) : integer
+ Returns the cumulative number of incoming connections from currently tracked
+ counters. See also src_conn_cnt.
+
+sc_conn_cur(<ctr>[,<table>]) : integer
+sc0_conn_cur([<table>]) : integer
+sc1_conn_cur([<table>]) : integer
+sc2_conn_cur([<table>]) : integer
+ Returns the current amount of concurrent connections tracking the same
+ tracked counters. This number is automatically incremented when tracking
+ begins and decremented when tracking stops. See also src_conn_cur.
+
+sc_conn_rate(<ctr>[,<table>]) : integer
+sc0_conn_rate([<table>]) : integer
+sc1_conn_rate([<table>]) : integer
+sc2_conn_rate([<table>]) : integer
+ Returns the average connection rate from the currently tracked counters,
+ measured in amount of connections over the period configured in the table.
+ See also src_conn_rate.
+
+sc_get_gpc(<idx>,<ctr>[,<table>]) : integer
+ Returns the value of the General Purpose Counter at the index <idx>
+ in the GPC array and associated to the currently tracked counter of
+ ID <ctr> from the current proxy's stick-table or from the designated
+ stick-table <table>. <idx> is an integer between 0 and 99 and
+ <ctr> an integer between 0 and 2. If there is not gpc stored at this
+ index, zero is returned.
+ This fetch applies only to the 'gpc' array data_type (and not to the legacy
+ 'gpc0' nor 'gpc1' data_types). See also src_get_gpc and sc_inc_gpc.
+
+sc_get_gpc0(<ctr>[,<table>]) : integer
+sc0_get_gpc0([<table>]) : integer
+sc1_get_gpc0([<table>]) : integer
+sc2_get_gpc0([<table>]) : integer
+ Returns the value of the first General Purpose Counter associated to the
+ currently tracked counters. See also src_get_gpc0 and sc/sc0/sc1/sc2_inc_gpc0.
+
+sc_get_gpc1(<ctr>[,<table>]) : integer
+sc0_get_gpc1([<table>]) : integer
+sc1_get_gpc1([<table>]) : integer
+sc2_get_gpc1([<table>]) : integer
+ Returns the value of the second General Purpose Counter associated to the
+ currently tracked counters. See also src_get_gpc1 and sc/sc0/sc1/sc2_inc_gpc1.
+
+sc_get_gpt(<idx>,<ctr>[,<table>]) : integer
+ Returns the value of the first General Purpose Tag at the index <idx> of
+ the array associated to the tracked counter of ID <ctr> and from the
+ current proxy's sitck-table or the designated stick-table <table>. <idx>
+ is an integer between 0 and 99 and <ctr> an integer between 0 and 2.
+ If there is no GPT stored at this index, zero is returned.
+ This fetch applies only to the 'gpt' array data_type (and not on
+ the legacy 'gpt0' data-type). See also src_get_gpt.
+
+sc_get_gpt0(<ctr>[,<table>]) : integer
+sc0_get_gpt0([<table>]) : integer
+sc1_get_gpt0([<table>]) : integer
+sc2_get_gpt0([<table>]) : integer
+ Returns the value of the first General Purpose Tag associated to the
+ currently tracked counters. See also src_get_gpt0.
+
+sc_gpc_rate(<idx>,<ctr>[,<table>]) : integer
+ Returns the average increment rate of the General Purpose Counter at the
+ index <idx> of the array associated to the tracked counter of ID <ctr> from
+ the current proxy's table or from the designated stick-table <table>.
+ It reports the frequency which the gpc counter was incremented over the
+ configured period. <idx> is an integer between 0 and 99 and <ctr> an integer
+ between 0 and 2.
+ Note that the 'gpc_rate' counter array must be stored in the stick-table
+ for a value to be returned, as 'gpc' only holds the event count.
+ This fetch applies only to the 'gpc_rate' array data_type (and not to
+ the legacy 'gpc0_rate' nor 'gpc1_rate' data_types).
+ See also src_gpc_rate, sc_get_gpc, and sc_inc_gpc.
+
+sc_gpc0_rate(<ctr>[,<table>]) : integer
+sc0_gpc0_rate([<table>]) : integer
+sc1_gpc0_rate([<table>]) : integer
+sc2_gpc0_rate([<table>]) : integer
+ Returns the average increment rate of the first General Purpose Counter
+ associated to the currently tracked counters. It reports the frequency
+ which the gpc0 counter was incremented over the configured period. See also
+ src_gpc0_rate, sc/sc0/sc1/sc2_get_gpc0, and sc/sc0/sc1/sc2_inc_gpc0. Note
+ that the "gpc0_rate" counter must be stored in the stick-table for a value to
+ be returned, as "gpc0" only holds the event count.
+
+sc_gpc1_rate(<ctr>[,<table>]) : integer
+sc0_gpc1_rate([<table>]) : integer
+sc1_gpc1_rate([<table>]) : integer
+sc2_gpc1_rate([<table>]) : integer
+ Returns the average increment rate of the second General Purpose Counter
+ associated to the currently tracked counters. It reports the frequency
+ which the gpc1 counter was incremented over the configured period. See also
+ src_gpcA_rate, sc/sc0/sc1/sc2_get_gpc1, and sc/sc0/sc1/sc2_inc_gpc1. Note
+ that the "gpc1_rate" counter must be stored in the stick-table for a value to
+ be returned, as "gpc1" only holds the event count.
+
+sc_http_err_cnt(<ctr>[,<table>]) : integer
+sc0_http_err_cnt([<table>]) : integer
+sc1_http_err_cnt([<table>]) : integer
+sc2_http_err_cnt([<table>]) : integer
+ Returns the cumulative number of HTTP errors from the currently tracked
+ counters. This includes the both request errors and 4xx error responses.
+ See also src_http_err_cnt.
+
+sc_http_err_rate(<ctr>[,<table>]) : integer
+sc0_http_err_rate([<table>]) : integer
+sc1_http_err_rate([<table>]) : integer
+sc2_http_err_rate([<table>]) : integer
+ Returns the average rate of HTTP errors from the currently tracked counters,
+ measured in amount of errors over the period configured in the table. This
+ includes the both request errors and 4xx error responses. See also
+ src_http_err_rate.
+
+sc_http_fail_cnt(<ctr>[,<table>]) : integer
+sc0_http_fail_cnt([<table>]) : integer
+sc1_http_fail_cnt([<table>]) : integer
+sc2_http_fail_cnt([<table>]) : integer
+ Returns the cumulative number of HTTP response failures from the currently
+ tracked counters. This includes the both response errors and 5xx status codes
+ other than 501 and 505. See also src_http_fail_cnt.
+
+sc_http_fail_rate(<ctr>[,<table>]) : integer
+sc0_http_fail_rate([<table>]) : integer
+sc1_http_fail_rate([<table>]) : integer
+sc2_http_fail_rate([<table>]) : integer
+ Returns the average rate of HTTP response failures from the currently tracked
+ counters, measured in amount of failures over the period configured in the
+ table. This includes the both response errors and 5xx status codes other than
+ 501 and 505. See also src_http_fail_rate.
+
+sc_http_req_cnt(<ctr>[,<table>]) : integer
+sc0_http_req_cnt([<table>]) : integer
+sc1_http_req_cnt([<table>]) : integer
+sc2_http_req_cnt([<table>]) : integer
+ Returns the cumulative number of HTTP requests from the currently tracked
+ counters. This includes every started request, valid or not. See also
+ src_http_req_cnt.
+
+sc_http_req_rate(<ctr>[,<table>]) : integer
+sc0_http_req_rate([<table>]) : integer
+sc1_http_req_rate([<table>]) : integer
+sc2_http_req_rate([<table>]) : integer
+ Returns the average rate of HTTP requests from the currently tracked
+ counters, measured in amount of requests over the period configured in
+ the table. This includes every started request, valid or not. See also
+ src_http_req_rate.
+
+sc_inc_gpc(<idx>,<ctr>[,<table>]) : integer
+ Increments the General Purpose Counter at the index <idx> of the array
+ associated to the designated tracked counter of ID <ctr> from current
+ proxy's stick table or from the designated stick-table <table>, and
+ returns its new value. <idx> is an integer between 0 and 99 and
+ <ctr> an integer between 0 and 2.
+ Before the first invocation, the stored value is zero, so first invocation
+ will increase it to 1 and will return 1.
+ This fetch applies only to the 'gpc' array data_type (and not to the legacy
+ 'gpc0' nor 'gpc1' data_types).
+
+sc_inc_gpc0(<ctr>[,<table>]) : integer
+sc0_inc_gpc0([<table>]) : integer
+sc1_inc_gpc0([<table>]) : integer
+sc2_inc_gpc0([<table>]) : integer
+ Increments the first General Purpose Counter associated to the currently
+ tracked counters, and returns its new value. Before the first invocation,
+ the stored value is zero, so first invocation will increase it to 1 and will
+ return 1. This is typically used as a second ACL in an expression in order
+ to mark a connection when a first ACL was verified :
+
+ Example:
+ acl abuse sc0_http_req_rate gt 10
+ acl kill sc0_inc_gpc0 gt 0
+ tcp-request connection reject if abuse kill
+
+sc_inc_gpc1(<ctr>[,<table>]) : integer
+sc0_inc_gpc1([<table>]) : integer
+sc1_inc_gpc1([<table>]) : integer
+sc2_inc_gpc1([<table>]) : integer
+ Increments the second General Purpose Counter associated to the currently
+ tracked counters, and returns its new value. Before the first invocation,
+ the stored value is zero, so first invocation will increase it to 1 and will
+ return 1. This is typically used as a second ACL in an expression in order
+ to mark a connection when a first ACL was verified.
+
+sc_kbytes_in(<ctr>[,<table>]) : integer
+sc0_kbytes_in([<table>]) : integer
+sc1_kbytes_in([<table>]) : integer
+sc2_kbytes_in([<table>]) : integer
+ Returns the total amount of client-to-server data from the currently tracked
+ counters, measured in kilobytes. The test is currently performed on 32-bit
+ integers, which limits values to 4 terabytes. See also src_kbytes_in.
+
+sc_kbytes_out(<ctr>[,<table>]) : integer
+sc0_kbytes_out([<table>]) : integer
+sc1_kbytes_out([<table>]) : integer
+sc2_kbytes_out([<table>]) : integer
+ Returns the total amount of server-to-client data from the currently tracked
+ counters, measured in kilobytes. The test is currently performed on 32-bit
+ integers, which limits values to 4 terabytes. See also src_kbytes_out.
+
+sc_sess_cnt(<ctr>[,<table>]) : integer
+sc0_sess_cnt([<table>]) : integer
+sc1_sess_cnt([<table>]) : integer
+sc2_sess_cnt([<table>]) : integer
+ Returns the cumulative number of incoming connections that were transformed
+ into sessions, which means that they were accepted by a "tcp-request
+ connection" rule, from the currently tracked counters. A backend may count
+ more sessions than connections because each connection could result in many
+ backend sessions if some HTTP keep-alive is performed over the connection
+ with the client. See also src_sess_cnt.
+
+sc_sess_rate(<ctr>[,<table>]) : integer
+sc0_sess_rate([<table>]) : integer
+sc1_sess_rate([<table>]) : integer
+sc2_sess_rate([<table>]) : integer
+ Returns the average session rate from the currently tracked counters,
+ measured in amount of sessions over the period configured in the table. A
+ session is a connection that got past the early "tcp-request connection"
+ rules. A backend may count more sessions than connections because each
+ connection could result in many backend sessions if some HTTP keep-alive is
+ performed over the connection with the client. See also src_sess_rate.
+
+sc_tracked(<ctr>[,<table>]) : boolean
+sc0_tracked([<table>]) : boolean
+sc1_tracked([<table>]) : boolean
+sc2_tracked([<table>]) : boolean
+ Returns true if the designated session counter is currently being tracked by
+ the current session. This can be useful when deciding whether or not we want
+ to set some values in a header passed to the server.
+
+sc_trackers(<ctr>[,<table>]) : integer
+sc0_trackers([<table>]) : integer
+sc1_trackers([<table>]) : integer
+sc2_trackers([<table>]) : integer
+ Returns the current amount of concurrent connections tracking the same
+ tracked counters. This number is automatically incremented when tracking
+ begins and decremented when tracking stops. It differs from sc0_conn_cur in
+ that it does not rely on any stored information but on the table's reference
+ count (the "use" value which is returned by "show table" on the CLI). This
+ may sometimes be more suited for layer7 tracking. It can be used to tell a
+ server how many concurrent connections there are from a given address for
+ example.
+
+so_id : integer
+ Returns an integer containing the current listening socket's id. It is useful
+ in frontends involving many "bind" lines, or to stick all users coming via a
+ same socket to the same server.
+
+so_name : string
+ Returns a string containing the current listening socket's name, as defined
+ with name on a "bind" line. It can serve the same purposes as so_id but with
+ strings instead of integers.
+
+src : ip
+ This is the source IP address of the client of the session. Any tcp/http
+ rules may alter this address. It is of type IP and works on both IPv4 and
+ IPv6 tables. On IPv6 tables, IPv4 addresses are mapped to their IPv6
+ equivalent, according to RFC 4291. Note that it is the TCP-level source
+ address which is used, and not the address of a client behind a
+ proxy. However if the "accept-proxy" or "accept-netscaler-cip" bind directive
+ is used, it can be the address of a client behind another PROXY-protocol
+ compatible component for all rule sets except "tcp-request connection" which
+ sees the real address. When the incoming connection passed through address
+ translation or redirection involving connection tracking, the original
+ destination address before the redirection will be reported. On Linux
+ systems, the source and destination may seldom appear reversed if the
+ nf_conntrack_tcp_loose sysctl is set, because a late response may reopen a
+ timed out connection and switch what is believed to be the source and the
+ destination.
+
+ Example:
+ # add an HTTP header in requests with the originating address' country
+ http-request set-header X-Country %[src,map_ip(geoip.lst)]
+
+src_bytes_in_rate([<table>]) : integer
+ Returns the average bytes rate from the incoming connection's source address
+ in the current proxy's stick-table or in the designated stick-table, measured
+ in amount of bytes over the period configured in the table. If the address is
+ not found, zero is returned. See also sc/sc0/sc1/sc2_bytes_in_rate.
+
+src_bytes_out_rate([<table>]) : integer
+ Returns the average bytes rate to the incoming connection's source address in
+ the current proxy's stick-table or in the designated stick-table, measured in
+ amount of bytes over the period configured in the table. If the address is
+ not found, zero is returned. See also sc/sc0/sc1/sc2_bytes_out_rate.
+
+src_clr_gpc(<idx>,[<table>]) : integer
+ Clears the General Purpose Counter at the index <idx> of the array
+ associated to the incoming connection's source address in the current proxy's
+ stick-table or in the designated stick-table <table>, and returns its
+ previous value. <idx> is an integer between 0 and 99.
+ If the address is not found, an entry is created and 0 is returned.
+ This fetch applies only to the 'gpc' array data_type (and not to the legacy
+ 'gpc0' nor 'gpc1' data_types).
+ See also sc_clr_gpc.
+
+src_clr_gpc0([<table>]) : integer
+ Clears the first General Purpose Counter associated to the incoming
+ connection's source address in the current proxy's stick-table or in the
+ designated stick-table, and returns its previous value. If the address is not
+ found, an entry is created and 0 is returned. This is typically used as a
+ second ACL in an expression in order to mark a connection when a first ACL
+ was verified :
+
+ Example:
+ # block if 5 consecutive requests continue to come faster than 10 sess
+ # per second, and reset the counter as soon as the traffic slows down.
+ acl abuse src_http_req_rate gt 10
+ acl kill src_inc_gpc0 gt 5
+ acl save src_clr_gpc0 ge 0
+ tcp-request connection accept if !abuse save
+ tcp-request connection reject if abuse kill
+
+src_clr_gpc1([<table>]) : integer
+ Clears the second General Purpose Counter associated to the incoming
+ connection's source address in the current proxy's stick-table or in the
+ designated stick-table, and returns its previous value. If the address is not
+ found, an entry is created and 0 is returned. This is typically used as a
+ second ACL in an expression in order to mark a connection when a first ACL
+ was verified.
+
+src_conn_cnt([<table>]) : integer
+ Returns the cumulative number of connections initiated from the current
+ incoming connection's source address in the current proxy's stick-table or in
+ the designated stick-table. If the address is not found, zero is returned.
+ See also sc/sc0/sc1/sc2_conn_cnt.
+
+src_conn_cur([<table>]) : integer
+ Returns the current amount of concurrent connections initiated from the
+ current incoming connection's source address in the current proxy's
+ stick-table or in the designated stick-table. If the address is not found,
+ zero is returned. See also sc/sc0/sc1/sc2_conn_cur.
+
+src_conn_rate([<table>]) : integer
+ Returns the average connection rate from the incoming connection's source
+ address in the current proxy's stick-table or in the designated stick-table,
+ measured in amount of connections over the period configured in the table. If
+ the address is not found, zero is returned. See also sc/sc0/sc1/sc2_conn_rate.
+
+src_get_gpc(<idx>,[<table>]) : integer
+ Returns the value of the General Purpose Counter at the index <idx> of the
+ array associated to the incoming connection's source address in the
+ current proxy's stick-table or in the designated stick-table <table>. <idx>
+ is an integer between 0 and 99.
+ If the address is not found or there is no gpc stored at this index, zero
+ is returned.
+ This fetch applies only to the 'gpc' array data_type (and not on the legacy
+ 'gpc0' nor 'gpc1' data_types).
+ See also sc_get_gpc and src_inc_gpc.
+
+src_get_gpc0([<table>]) : integer
+ Returns the value of the first General Purpose Counter associated to the
+ incoming connection's source address in the current proxy's stick-table or in
+ the designated stick-table. If the address is not found, zero is returned.
+ See also sc/sc0/sc1/sc2_get_gpc0 and src_inc_gpc0.
+
+src_get_gpc1([<table>]) : integer
+ Returns the value of the second General Purpose Counter associated to the
+ incoming connection's source address in the current proxy's stick-table or in
+ the designated stick-table. If the address is not found, zero is returned.
+ See also sc/sc0/sc1/sc2_get_gpc1 and src_inc_gpc1.
+
+src_get_gpt(<idx>[,<table>]) : integer
+ Returns the value of the first General Purpose Tag at the index <idx> of
+ the array associated to the incoming connection's source address in the
+ current proxy's stick-table or in the designated stick-table <table>.
+ <idx> is an integer between 0 and 99.
+ If the address is not found or the GPT is not stored, zero is returned.
+ See also the sc_get_gpt sample fetch keyword.
+
+src_get_gpt0([<table>]) : integer
+ Returns the value of the first General Purpose Tag associated to the
+ incoming connection's source address in the current proxy's stick-table or in
+ the designated stick-table. If the address is not found, zero is returned.
+ See also sc/sc0/sc1/sc2_get_gpt0.
+
+src_gpc_rate(<idx>[,<table>]) : integer
+ Returns the average increment rate of the General Purpose Counter at the
+ index <idx> of the array associated to the incoming connection's
+ source address in the current proxy's stick-table or in the designated
+ stick-table <table>. It reports the frequency which the gpc counter was
+ incremented over the configured period. <idx> is an integer between 0 and 99.
+ Note that the 'gpc_rate' counter must be stored in the stick-table for a
+ value to be returned, as 'gpc' only holds the event count.
+ This fetch applies only to the 'gpc_rate' array data_type (and not to
+ the legacy 'gpc0_rate' nor 'gpc1_rate' data_types).
+ See also sc_gpc_rate, src_get_gpc, and sc_inc_gpc.
+
+src_gpc0_rate([<table>]) : integer
+ Returns the average increment rate of the first General Purpose Counter
+ associated to the incoming connection's source address in the current proxy's
+ stick-table or in the designated stick-table. It reports the frequency
+ which the gpc0 counter was incremented over the configured period. See also
+ sc/sc0/sc1/sc2_gpc0_rate, src_get_gpc0, and sc/sc0/sc1/sc2_inc_gpc0. Note
+ that the "gpc0_rate" counter must be stored in the stick-table for a value to
+ be returned, as "gpc0" only holds the event count.
+
+src_gpc1_rate([<table>]) : integer
+ Returns the average increment rate of the second General Purpose Counter
+ associated to the incoming connection's source address in the current proxy's
+ stick-table or in the designated stick-table. It reports the frequency
+ which the gpc1 counter was incremented over the configured period. See also
+ sc/sc0/sc1/sc2_gpc1_rate, src_get_gpc1, and sc/sc0/sc1/sc2_inc_gpc1. Note
+ that the "gpc1_rate" counter must be stored in the stick-table for a value to
+ be returned, as "gpc1" only holds the event count.
+
+src_http_err_cnt([<table>]) : integer
+ Returns the cumulative number of HTTP errors from the incoming connection's
+ source address in the current proxy's stick-table or in the designated
+ stick-table. This includes the both request errors and 4xx error responses.
+ See also sc/sc0/sc1/sc2_http_err_cnt. If the address is not found, zero is
+ returned.
+
+src_http_err_rate([<table>]) : integer
+ Returns the average rate of HTTP errors from the incoming connection's source
+ address in the current proxy's stick-table or in the designated stick-table,
+ measured in amount of errors over the period configured in the table. This
+ includes the both request errors and 4xx error responses. If the address is
+ not found, zero is returned. See also sc/sc0/sc1/sc2_http_err_rate.
+
+src_http_fail_cnt([<table>]) : integer
+ Returns the cumulative number of HTTP response failures triggered by the
+ incoming connection's source address in the current proxy's stick-table or in
+ the designated stick-table. This includes the both response errors and 5xx
+ status codes other than 501 and 505. See also sc/sc0/sc1/sc2_http_fail_cnt.
+ If the address is not found, zero is returned.
+
+src_http_fail_rate([<table>]) : integer
+ Returns the average rate of HTTP response failures triggered by the incoming
+ connection's source address in the current proxy's stick-table or in the
+ designated stick-table, measured in amount of failures over the period
+ configured in the table. This includes the both response errors and 5xx
+ status codes other than 501 and 505. If the address is not found, zero is
+ returned. See also sc/sc0/sc1/sc2_http_fail_rate.
+
+src_http_req_cnt([<table>]) : integer
+ Returns the cumulative number of HTTP requests from the incoming connection's
+ source address in the current proxy's stick-table or in the designated stick-
+ table. This includes every started request, valid or not. If the address is
+ not found, zero is returned. See also sc/sc0/sc1/sc2_http_req_cnt.
+
+src_http_req_rate([<table>]) : integer
+ Returns the average rate of HTTP requests from the incoming connection's
+ source address in the current proxy's stick-table or in the designated stick-
+ table, measured in amount of requests over the period configured in the
+ table. This includes every started request, valid or not. If the address is
+ not found, zero is returned. See also sc/sc0/sc1/sc2_http_req_rate.
+
+src_inc_gpc(<idx>,[<table>]) : integer
+ Increments the General Purpose Counter at index <idx> of the array
+ associated to the incoming connection's source address in the current proxy's
+ stick-table or in the designated stick-table <table>, and returns its new
+ value. <idx> is an integer between 0 and 99.
+ If the address is not found, an entry is created and 1 is returned.
+ This fetch applies only to the 'gpc' array data_type (and not to the legacy
+ 'gpc0' nor 'gpc1' data_types).
+ See also sc_inc_gpc.
+
+src_inc_gpc0([<table>]) : integer
+ Increments the first General Purpose Counter associated to the incoming
+ connection's source address in the current proxy's stick-table or in the
+ designated stick-table, and returns its new value. If the address is not
+ found, an entry is created and 1 is returned. See also sc0/sc2/sc2_inc_gpc0.
+ This is typically used as a second ACL in an expression in order to mark a
+ connection when a first ACL was verified :
+
+ Example:
+ acl abuse src_http_req_rate gt 10
+ acl kill src_inc_gpc0 gt 0
+ tcp-request connection reject if abuse kill
+
+src_inc_gpc1([<table>]) : integer
+ Increments the second General Purpose Counter associated to the incoming
+ connection's source address in the current proxy's stick-table or in the
+ designated stick-table, and returns its new value. If the address is not
+ found, an entry is created and 1 is returned. See also sc0/sc2/sc2_inc_gpc1.
+ This is typically used as a second ACL in an expression in order to mark a
+ connection when a first ACL was verified.
+
+src_is_local : boolean
+ Returns true if the source address of the incoming connection is local to the
+ system, or false if the address doesn't exist on the system, meaning that it
+ comes from a remote machine. Note that UNIX addresses are considered local.
+ It can be useful to apply certain access restrictions based on where the
+ client comes from (e.g. require auth or https for remote machines). Please
+ note that the check involves a few system calls, so it's better to do it only
+ once per connection.
+
+src_kbytes_in([<table>]) : integer
+ Returns the total amount of data received from the incoming connection's
+ source address in the current proxy's stick-table or in the designated
+ stick-table, measured in kilobytes. If the address is not found, zero is
+ returned. The test is currently performed on 32-bit integers, which limits
+ values to 4 terabytes. See also sc/sc0/sc1/sc2_kbytes_in.
+
+src_kbytes_out([<table>]) : integer
+ Returns the total amount of data sent to the incoming connection's source
+ address in the current proxy's stick-table or in the designated stick-table,
+ measured in kilobytes. If the address is not found, zero is returned. The
+ test is currently performed on 32-bit integers, which limits values to 4
+ terabytes. See also sc/sc0/sc1/sc2_kbytes_out.
+
+src_port : integer
+ Returns an integer value corresponding to the TCP source port of the
+ connection on the client side, which is the port the client connected
+ from. Any tcp/http rules may alter this address. Usage of this function is
+ very limited as modern protocols do not care much about source ports
+ nowadays.
+
+src_sess_cnt([<table>]) : integer
+ Returns the cumulative number of connections initiated from the incoming
+ connection's source IPv4 address in the current proxy's stick-table or in the
+ designated stick-table, that were transformed into sessions, which means that
+ they were accepted by "tcp-request" rules. If the address is not found, zero
+ is returned. See also sc/sc0/sc1/sc2_sess_cnt.
+
+src_sess_rate([<table>]) : integer
+ Returns the average session rate from the incoming connection's source
+ address in the current proxy's stick-table or in the designated stick-table,
+ measured in amount of sessions over the period configured in the table. A
+ session is a connection that went past the early "tcp-request" rules. If the
+ address is not found, zero is returned. See also sc/sc0/sc1/sc2_sess_rate.
+
+src_updt_conn_cnt([<table>]) : integer
+ Creates or updates the entry associated to the incoming connection's source
+ address in the current proxy's stick-table or in the designated stick-table.
+ This table must be configured to store the "conn_cnt" data type, otherwise
+ the match will be ignored. The current count is incremented by one, and the
+ expiration timer refreshed. The updated count is returned, so this match
+ can't return zero. This was used to reject service abusers based on their
+ source address. Note: it is recommended to use the more complete "track-sc*"
+ actions in "tcp-request" rules instead.
+
+ Example :
+ # This frontend limits incoming SSH connections to 3 per 10 second for
+ # each source address, and rejects excess connections until a 10 second
+ # silence is observed. At most 20 addresses are tracked.
+ listen ssh
+ bind :22
+ mode tcp
+ maxconn 100
+ stick-table type ip size 20 expire 10s store conn_cnt
+ tcp-request content reject if { src_updt_conn_cnt gt 3 }
+ server local 127.0.0.1:22
+
+srv_id : integer
+ Returns an integer containing the server's id when processing the response.
+ While it's almost only used with ACLs, it may be used for logging or
+ debugging. It can also be used in a tcp-check or an http-check ruleset.
+
+srv_name : string
+ Returns a string containing the server's name when processing the response.
+ While it's almost only used with ACLs, it may be used for logging or
+ debugging. It can also be used in a tcp-check or an http-check ruleset.
+
+txn.conn_retries : integer
+ Returns the the number of connection retries experienced by this stream when
+ trying to connect to the server. This value is subject to change while the
+ connection is not fully established. For HTTP connections, the value may be
+ affected by L7 retries.
+
+7.3.4. Fetching samples at Layer 5
+----------------------------------
+
+The layer 5 usually describes just the session layer which in HAProxy is
+closest to the session once all the connection handshakes are finished, but
+when no content is yet made available. The fetch methods described here are
+usable as low as the "tcp-request content" rule sets unless they require some
+future information. Those generally include the results of SSL negotiations.
+
+Summary of sample fetch methods in this section and their respective types:
+
+ keyword output type
+-------------------------------------------------+-------------
+51d.all(<prop>[,<prop>*]) string
+ssl_bc boolean
+ssl_bc_alg_keysize integer
+ssl_bc_alpn string
+ssl_bc_cipher string
+ssl_bc_client_random binary
+ssl_bc_curve string
+ssl_bc_err integer
+ssl_bc_err_str string
+ssl_bc_is_resumed boolean
+ssl_bc_npn string
+ssl_bc_protocol string
+ssl_bc_unique_id binary
+ssl_bc_server_random binary
+ssl_bc_session_id binary
+ssl_bc_session_key binary
+ssl_bc_use_keysize integer
+ssl_c_ca_err integer
+ssl_c_ca_err_depth integer
+ssl_c_chain_der binary
+ssl_c_der binary
+ssl_c_err integer
+ssl_c_i_dn([<entry>[,<occ>[,<format>]]]) string
+ssl_c_key_alg string
+ssl_c_notafter string
+ssl_c_notbefore string
+ssl_c_r_dn([<entry>[,<occ>[,<format>]]]) string
+ssl_c_s_dn([<entry>[,<occ>[,<format>]]]) string
+ssl_c_serial binary
+ssl_c_sha1 binary
+ssl_c_sig_alg string
+ssl_c_used boolean
+ssl_c_verify integer
+ssl_c_version integer
+ssl_f_der binary
+ssl_f_i_dn([<entry>[,<occ>[,<format>]]]) string
+ssl_f_key_alg string
+ssl_f_notafter string
+ssl_f_notbefore string
+ssl_f_s_dn([<entry>[,<occ>[,<format>]]]) string
+ssl_f_serial binary
+ssl_f_sha1 binary
+ssl_f_sig_alg string
+ssl_f_version integer
+ssl_fc boolean
+ssl_fc_alg_keysize integer
+ssl_fc_alpn string
+ssl_fc_cipher string
+ssl_fc_cipherlist_bin([<filter_option>]) binary
+ssl_fc_cipherlist_hex([<filter_option>]) string
+ssl_fc_cipherlist_str([<filter_option>]) string
+ssl_fc_cipherlist_xxh integer
+ssl_fc_curve string
+ssl_fc_ecformats_bin binary
+ssl_fc_eclist_bin([<filter_option>]) binary
+ssl_fc_extlist_bin([<filter_option>]) binary
+ssl_fc_client_random binary
+ssl_fc_client_early_traffic_secret string
+ssl_fc_client_handshake_traffic_secret string
+ssl_fc_client_traffic_secret_0 string
+ssl_fc_exporter_secret string
+ssl_fc_early_exporter_secret string
+ssl_fc_err integer
+ssl_fc_err_str string
+ssl_fc_has_crt boolean
+ssl_fc_has_early boolean
+ssl_fc_has_sni boolean
+ssl_fc_is_resumed boolean
+ssl_fc_npn string
+ssl_fc_protocol string
+ssl_fc_protocol_hello_id integer
+ssl_fc_unique_id binary
+ssl_fc_server_handshake_traffic_secret string
+ssl_fc_server_traffic_secret_0 string
+ssl_fc_server_random binary
+ssl_fc_session_id binary
+ssl_fc_session_key binary
+ssl_fc_sni string
+ssl_fc_use_keysize integer
+ssl_s_der binary
+ssl_s_chain_der binary
+ssl_s_key_alg string
+ssl_s_notafter string
+ssl_s_notbefore string
+ssl_s_i_dn([<entry>[,<occ>[,<format>]]]) string
+ssl_s_s_dn([<entry>[,<occ>[,<format>]]]) string
+ssl_s_serial binary
+ssl_s_sha1 binary
+ssl_s_sig_alg string
+ssl_s_version integer
+txn.timer.user integer
+-------------------------------------------------+-------------
+
+Detailed list:
+
+51d.all(<prop>[,<prop>*]) : string
+ Returns values for the properties requested as a string, where values are
+ separated by the delimiter specified with "51degrees-property-separator".
+ The device is identified using all the important HTTP headers from the
+ request. The function can be passed up to five property names, and if a
+ property name can't be found, the value "NoData" is returned.
+
+ Example :
+ # Here the header "X-51D-DeviceTypeMobileTablet" is added to the request
+ # containing the three properties requested using all relevant headers from
+ # the request.
+ frontend http-in
+ bind *:8081
+ default_backend servers
+ http-request set-header X-51D-DeviceTypeMobileTablet \
+ %[51d.all(DeviceType,IsMobile,IsTablet)]
+
+ssl_bc : boolean
+ Returns true when the back connection was made via an SSL/TLS transport
+ layer and is locally deciphered. This means the outgoing connection was made
+ to a server with the "ssl" option. It can be used in a tcp-check or an
+ http-check ruleset.
+
+ssl_bc_alg_keysize : integer
+ Returns the symmetric cipher key size supported in bits when the outgoing
+ connection was made over an SSL/TLS transport layer. It can be used in a
+ tcp-check or an http-check ruleset.
+
+ssl_bc_alpn : string
+ This extracts the Application Layer Protocol Negotiation field from an
+ outgoing connection made via a TLS transport layer.
+ The result is a string containing the protocol name negotiated with the
+ server. The SSL library must have been built with support for TLS
+ extensions enabled (check haproxy -vv). Note that the TLS ALPN extension is
+ not advertised unless the "alpn" keyword on the "server" line specifies a
+ protocol list. Also, nothing forces the server to pick a protocol from this
+ list, any other one may be requested. The TLS ALPN extension is meant to
+ replace the TLS NPN extension. See also "ssl_bc_npn". It can be used in a
+ tcp-check or an http-check ruleset.
+
+ssl_bc_cipher : string
+ Returns the name of the used cipher when the outgoing connection was made
+ over an SSL/TLS transport layer. It can be used in a tcp-check or an
+ http-check ruleset.
+
+ssl_bc_client_random : binary
+ Returns the client random of the back connection when the incoming connection
+ was made over an SSL/TLS transport layer. It is useful to to decrypt traffic
+ sent using ephemeral ciphers. This requires OpenSSL >= 1.1.0, or BoringSSL.
+ It can be used in a tcp-check or an http-check ruleset.
+
+ssl_bc_curve : string
+ Returns the name of the curve used in the key agreement when the outgoing
+ connection was made over an SSL/TLS transport layer. This requires
+ OpenSSL >= 3.0.0.
+
+ssl_bc_err : integer
+ When the outgoing connection was made over an SSL/TLS transport layer,
+ returns the ID of the last error of the first error stack raised on the
+ backend side. It can raise handshake errors as well as other read or write
+ errors occurring during the connection's lifetime. In order to get a text
+ description of this error code, you can either use the "ssl_bc_err_str"
+ sample fetch or use the "openssl errstr" command (which takes an error code
+ in hexadecimal representation as parameter). Please refer to your SSL
+ library's documentation to find the exhaustive list of error codes.
+
+ssl_bc_err_str : string
+ When the outgoing connection was made over an SSL/TLS transport layer,
+ returns a string representation of the last error of the first error stack
+ that was raised on the connection from the backend's perspective. See also
+ "ssl_fc_err".
+
+ssl_bc_is_resumed : boolean
+ Returns true when the back connection was made over an SSL/TLS transport
+ layer and the newly created SSL session was resumed using a cached
+ session or a TLS ticket. It can be used in a tcp-check or an http-check
+ ruleset.
+
+ssl_bc_npn : string
+ This extracts the Next Protocol Negotiation field from an outgoing connection
+ made via a TLS transport layer. The result is a string containing the
+ protocol name negotiated with the server . The SSL library must have been
+ built with support for TLS extensions enabled (check haproxy -vv). Note that
+ the TLS NPN extension is not advertised unless the "npn" keyword on the
+ "server" line specifies a protocol list. Also, nothing forces the server to
+ pick a protocol from this list, any other one may be used. Please note that
+ the TLS NPN extension was replaced with ALPN. It can be used in a tcp-check
+ or an http-check ruleset.
+
+ssl_bc_protocol : string
+ Returns the name of the used protocol when the outgoing connection was made
+ over an SSL/TLS transport layer. It can be used in a tcp-check or an
+ http-check ruleset.
+
+ssl_bc_unique_id : binary
+ When the outgoing connection was made over an SSL/TLS transport layer,
+ returns the TLS unique ID as defined in RFC5929 section 3. The unique id
+ can be encoded to base64 using the converter: "ssl_bc_unique_id,base64". It
+ can be used in a tcp-check or an http-check ruleset.
+
+ssl_bc_server_random : binary
+ Returns the server random of the back connection when the incoming connection
+ was made over an SSL/TLS transport layer. It is useful to to decrypt traffic
+ sent using ephemeral ciphers. This requires OpenSSL >= 1.1.0, or BoringSSL.
+ It can be used in a tcp-check or an http-check ruleset.
+
+ssl_bc_session_id : binary
+ Returns the SSL ID of the back connection when the outgoing connection was
+ made over an SSL/TLS transport layer. It is useful to log if we want to know
+ if session was reused or not. It can be used in a tcp-check or an http-check
+ ruleset.
+
+ssl_bc_session_key : binary
+ Returns the SSL session master key of the back connection when the outgoing
+ connection was made over an SSL/TLS transport layer. It is useful to decrypt
+ traffic sent using ephemeral ciphers. This requires OpenSSL >= 1.1.0, or
+ BoringSSL. It can be used in a tcp-check or an http-check ruleset.
+
+ssl_bc_use_keysize : integer
+ Returns the symmetric cipher key size used in bits when the outgoing
+ connection was made over an SSL/TLS transport layer. It can be used in a
+ tcp-check or an http-check ruleset.
+
+ssl_c_ca_err : integer
+ When the incoming connection was made over an SSL/TLS transport layer,
+ returns the ID of the first error detected during verification of the client
+ certificate at depth > 0, or 0 if no error was encountered during this
+ verification process. Please refer to your SSL library's documentation to
+ find the exhaustive list of error codes.
+
+ssl_c_ca_err_depth : integer
+ When the incoming connection was made over an SSL/TLS transport layer,
+ returns the depth in the CA chain of the first error detected during the
+ verification of the client certificate. If no error is encountered, 0 is
+ returned.
+
+ssl_c_chain_der : binary
+ Returns the DER formatted chain certificate presented by the client when the
+ incoming connection was made over an SSL/TLS transport layer. When used for
+ an ACL, the value(s) to match against can be passed in hexadecimal form. One
+ can parse the result with any lib accepting ASN.1 DER data. It currently
+ does not support resumed sessions.
+
+ssl_c_der : binary
+ Returns the DER formatted certificate presented by the client when the
+ incoming connection was made over an SSL/TLS transport layer. When used for
+ an ACL, the value(s) to match against can be passed in hexadecimal form.
+
+ssl_c_err : integer
+ When the incoming connection was made over an SSL/TLS transport layer,
+ returns the ID of the first error detected during verification at depth 0, or
+ 0 if no error was encountered during this verification process. Please refer
+ to your SSL library's documentation to find the exhaustive list of error
+ codes.
+
+ssl_c_i_dn([<entry>[,<occ>[,<format>]]]) : string
+ When the incoming connection was made over an SSL/TLS transport layer,
+ returns the full distinguished name of the issuer of the certificate
+ presented by the client when no <entry> is specified, or the value of the
+ first given entry found from the beginning of the DN. If a positive/negative
+ occurrence number is specified as the optional second argument, it returns
+ the value of the nth given entry value from the beginning/end of the DN.
+ For instance, "ssl_c_i_dn(OU,2)" the second organization unit, and
+ "ssl_c_i_dn(CN)" retrieves the common name.
+ The <format> parameter allows you to receive the DN suitable for
+ consumption by different protocols. Currently supported is rfc2253 for
+ LDAP v3.
+ If you'd like to modify the format only you can specify an empty string
+ and zero for the first two parameters. Example: ssl_c_i_dn(,0,rfc2253)
+
+ssl_c_key_alg : string
+ Returns the name of the algorithm used to generate the key of the certificate
+ presented by the client when the incoming connection was made over an SSL/TLS
+ transport layer.
+
+ssl_c_notafter : string
+ Returns the end date presented by the client as a formatted string
+ YYMMDDhhmmss[Z] when the incoming connection was made over an SSL/TLS
+ transport layer.
+
+ssl_c_notbefore : string
+ Returns the start date presented by the client as a formatted string
+ YYMMDDhhmmss[Z] when the incoming connection was made over an SSL/TLS
+ transport layer.
+
+ssl_c_r_dn([<entry>[,<occ>[,<format>]]]) : string
+ When the incoming connection was made over an SSL/TLS transport layer, and is
+ successfully validated with the configured ca-file, returns the full
+ distinguished name of the root CA of the certificate presented by the client
+ when no <entry> is specified, or the value of the first given entry found from
+ the beginning of the DN. If a positive/negative occurrence number is specified
+ as the optional second argument, it returns the value of the nth given entry
+ value from the beginning/end of the DN. For instance, "ssl_c_r_dn(OU,2)" the
+ second organization unit, and "ssl_c_r_dn(CN)" retrieves the common name. The
+ <format> parameter allows you to receive the DN suitable for consumption by
+ different protocols. Currently supported is rfc2253 for LDAP v3. If you'd like
+ to modify the format only you can specify an empty string and zero for the
+ first two parameters. Example: ssl_c_r_dn(,0,rfc2253)
+
+ssl_c_s_dn([<entry>[,<occ>[,<format>]]]) : string
+ When the incoming connection was made over an SSL/TLS transport layer,
+ returns the full distinguished name of the subject of the certificate
+ presented by the client when no <entry> is specified, or the value of the
+ first given entry found from the beginning of the DN. If a positive/negative
+ occurrence number is specified as the optional second argument, it returns
+ the value of the nth given entry value from the beginning/end of the DN.
+ For instance, "ssl_c_s_dn(OU,2)" the second organization unit, and
+ "ssl_c_s_dn(CN)" retrieves the common name.
+ The <format> parameter allows you to receive the DN suitable for
+ consumption by different protocols. Currently supported is rfc2253 for
+ LDAP v3.
+ If you'd like to modify the format only you can specify an empty string
+ and zero for the first two parameters. Example: ssl_c_s_dn(,0,rfc2253)
+
+ssl_c_serial : binary
+ Returns the serial of the certificate presented by the client when the
+ incoming connection was made over an SSL/TLS transport layer. When used for
+ an ACL, the value(s) to match against can be passed in hexadecimal form.
+
+ssl_c_sha1 : binary
+ Returns the SHA-1 fingerprint of the certificate presented by the client when
+ the incoming connection was made over an SSL/TLS transport layer. This can be
+ used to stick a client to a server, or to pass this information to a server.
+ Note that the output is binary, so if you want to pass that signature to the
+ server, you need to encode it in hex or base64, such as in the example below:
+
+ Example:
+ http-request set-header X-SSL-Client-SHA1 %[ssl_c_sha1,hex]
+
+ssl_c_sig_alg : string
+ Returns the name of the algorithm used to sign the certificate presented by
+ the client when the incoming connection was made over an SSL/TLS transport
+ layer.
+
+ssl_c_used : boolean
+ Returns true if current SSL session uses a client certificate even if current
+ connection uses SSL session resumption. See also "ssl_fc_has_crt".
+
+ssl_c_verify : integer
+ Returns the verify result error ID when the incoming connection was made over
+ an SSL/TLS transport layer, otherwise zero if no error is encountered. Please
+ refer to your SSL library's documentation for an exhaustive list of error
+ codes.
+
+ssl_c_version : integer
+ Returns the version of the certificate presented by the client when the
+ incoming connection was made over an SSL/TLS transport layer.
+
+ssl_f_der : binary
+ Returns the DER formatted certificate presented by the frontend when the
+ incoming connection was made over an SSL/TLS transport layer. When used for
+ an ACL, the value(s) to match against can be passed in hexadecimal form.
+
+ssl_f_i_dn([<entry>[,<occ>[,<format>]]]) : string
+ When the incoming connection was made over an SSL/TLS transport layer,
+ returns the full distinguished name of the issuer of the certificate
+ presented by the frontend when no <entry> is specified, or the value of the
+ first given entry found from the beginning of the DN. If a positive/negative
+ occurrence number is specified as the optional second argument, it returns
+ the value of the nth given entry value from the beginning/end of the DN.
+ For instance, "ssl_f_i_dn(OU,2)" the second organization unit, and
+ "ssl_f_i_dn(CN)" retrieves the common name.
+ The <format> parameter allows you to receive the DN suitable for
+ consumption by different protocols. Currently supported is rfc2253 for
+ LDAP v3.
+ If you'd like to modify the format only you can specify an empty string
+ and zero for the first two parameters. Example: ssl_f_i_dn(,0,rfc2253)
+
+ssl_f_key_alg : string
+ Returns the name of the algorithm used to generate the key of the certificate
+ presented by the frontend when the incoming connection was made over an
+ SSL/TLS transport layer.
+
+ssl_f_notafter : string
+ Returns the end date presented by the frontend as a formatted string
+ YYMMDDhhmmss[Z] when the incoming connection was made over an SSL/TLS
+ transport layer.
+
+ssl_f_notbefore : string
+ Returns the start date presented by the frontend as a formatted string
+ YYMMDDhhmmss[Z] when the incoming connection was made over an SSL/TLS
+ transport layer.
+
+ssl_f_s_dn([<entry>[,<occ>[,<format>]]]) : string
+ When the incoming connection was made over an SSL/TLS transport layer,
+ returns the full distinguished name of the subject of the certificate
+ presented by the frontend when no <entry> is specified, or the value of the
+ first given entry found from the beginning of the DN. If a positive/negative
+ occurrence number is specified as the optional second argument, it returns
+ the value of the nth given entry value from the beginning/end of the DN.
+ For instance, "ssl_f_s_dn(OU,2)" the second organization unit, and
+ "ssl_f_s_dn(CN)" retrieves the common name.
+ The <format> parameter allows you to receive the DN suitable for
+ consumption by different protocols. Currently supported is rfc2253 for
+ LDAP v3.
+ If you'd like to modify the format only you can specify an empty string
+ and zero for the first two parameters. Example: ssl_f_s_dn(,0,rfc2253)
+
+ssl_f_serial : binary
+ Returns the serial of the certificate presented by the frontend when the
+ incoming connection was made over an SSL/TLS transport layer. When used for
+ an ACL, the value(s) to match against can be passed in hexadecimal form.
+
+ssl_f_sha1 : binary
+ Returns the SHA-1 fingerprint of the certificate presented by the frontend
+ when the incoming connection was made over an SSL/TLS transport layer. This
+ can be used to know which certificate was chosen using SNI.
+
+ssl_f_sig_alg : string
+ Returns the name of the algorithm used to sign the certificate presented by
+ the frontend when the incoming connection was made over an SSL/TLS transport
+ layer.
+
+ssl_f_version : integer
+ Returns the version of the certificate presented by the frontend when the
+ incoming connection was made over an SSL/TLS transport layer.
+
+ssl_fc : boolean
+ Returns true when the front connection was made via an SSL/TLS transport
+ layer and is locally deciphered. This means it has matched a socket declared
+ with a "bind" line having the "ssl" option.
+
+ Example :
+ # This passes "X-Proto: https" to servers when client connects over SSL
+ listen http-https
+ bind :80
+ bind :443 ssl crt /etc/haproxy.pem
+ http-request add-header X-Proto https if { ssl_fc }
+
+ssl_fc_alg_keysize : integer
+ Returns the symmetric cipher key size supported in bits when the incoming
+ connection was made over an SSL/TLS transport layer.
+
+ssl_fc_alpn : string
+ This extracts the Application Layer Protocol Negotiation field from an
+ incoming connection made via a TLS transport layer and locally deciphered by
+ HAProxy. The result is a string containing the protocol name advertised by
+ the client. The SSL library must have been built with support for TLS
+ extensions enabled (check haproxy -vv). Note that the TLS ALPN extension is
+ not advertised unless the "alpn" keyword on the "bind" line specifies a
+ protocol list. Also, nothing forces the client to pick a protocol from this
+ list, any other one may be requested. The TLS ALPN extension is meant to
+ replace the TLS NPN extension. See also "ssl_fc_npn".
+
+ssl_fc_cipher : string
+ Returns the name of the used cipher when the incoming connection was made
+ over an SSL/TLS transport layer.
+
+ssl_fc_cipherlist_bin([<filter_option>]) : binary
+ Returns the binary form of the client hello cipher list. The maximum
+ returned value length is limited by the shared capture buffer size
+ controlled by "tune.ssl.capture-buffer-size" setting. Setting
+ <filter_option> allows to filter returned data. Accepted values:
+ 0 : return the full list of ciphers (default)
+ 1 : exclude GREASE (RFC8701) values from the output
+
+ Example:
+ http-request set-header X-SSL-JA3 %[ssl_fc_protocol_hello_id],\
+ %[ssl_fc_cipherlist_bin(1),be2dec(-,2)],\
+ %[ssl_fc_extlist_bin(1),be2dec(-,2)],\
+ %[ssl_fc_eclist_bin(1),be2dec(-,2)],\
+ %[ssl_fc_ecformats_bin,be2dec(-,1)]
+ acl is_malware req.fhdr(x-ssl-ja3),digest(md5),hex \
+ -f /path/to/file/with/malware-ja3.lst
+ http-request set-header X-Malware True if is_malware
+ http-request set-header X-Malware False if !is_malware
+
+ssl_fc_cipherlist_hex([<filter_option>]) : string
+ Returns the binary form of the client hello cipher list encoded as
+ hexadecimal. The maximum returned value length is limited by the shared
+ capture buffer size controlled by "tune.ssl.capture-buffer-size" setting.
+ Setting <filter_option> allows to filter returned data. Accepted values:
+ 0 : return the full list of ciphers (default)
+ 1 : exclude GREASE (RFC8701) values from the output
+
+ssl_fc_cipherlist_str([<filter_option>]) : string
+ Returns the decoded text form of the client hello cipher list. The maximum
+ returned value length is limited by the shared capture buffer size
+ controlled by "tune.ssl.capture-buffer-size" setting. Setting
+ <filter_option> allows to filter returned data. Accepted values:
+ 0 : return the full list of ciphers (default)
+ 1 : exclude GREASE (RFC8701) values from the output
+ Note that this sample-fetch is only available with OpenSSL >= 1.0.2. If the
+ function is not enabled, this sample-fetch returns the hash like
+ "ssl_fc_cipherlist_xxh".
+
+ssl_fc_cipherlist_xxh : integer
+ Returns a xxh64 of the cipher list. This hash can return only if the value
+ "tune.ssl.capture-buffer-size" is set greater than 0, however the hash take
+ into account all the data of the cipher list.
+
+ssl_fc_curve : string
+ Returns the name of the curve used in the key agreement when the incoming
+ connection was made over an SSL/TLS transport layer. This requires
+ OpenSSL >= 3.0.0.
+
+ssl_fc_ecformats_bin : binary
+ Return the binary form of the client hello supported elliptic curve point
+ formats. The maximum returned value length is limited by the shared capture
+ buffer size controlled by "tune.ssl.capture-buffer-size" setting.
+
+ Example:
+ http-request set-header X-SSL-JA3 %[ssl_fc_protocol_hello_id],\
+ %[ssl_fc_cipherlist_bin(1),be2dec(-,2)],\
+ %[ssl_fc_extlist_bin(1),be2dec(-,2)],\
+ %[ssl_fc_eclist_bin(1),be2dec(-,2)],\
+ %[ssl_fc_ecformats_bin,be2dec(-,1)]
+ acl is_malware req.fhdr(x-ssl-ja3),digest(md5),hex \
+ -f /path/to/file/with/malware-ja3.lst
+ http-request set-header X-Malware True if is_malware
+ http-request set-header X-Malware False if !is_malware
+
+ssl_fc_eclist_bin([<filter_option>]) : binary
+ Returns the binary form of the client hello supported elliptic curves. The
+ maximum returned value length is limited by the shared capture buffer size
+ controlled by "tune.ssl.capture-buffer-size" setting. Setting
+ <filter_option> allows to filter returned data. Accepted values:
+ 0 : return the full list of supported elliptic curves (default)
+ 1 : exclude GREASE (RFC8701) values from the output
+
+ Example:
+ http-request set-header X-SSL-JA3 %[ssl_fc_protocol_hello_id],\
+ %[ssl_fc_cipherlist_bin(1),be2dec(-,2)],\
+ %[ssl_fc_extlist_bin(1),be2dec(-,2)],\
+ %[ssl_fc_eclist_bin(1),be2dec(-,2)],\
+ %[ssl_fc_ecformats_bin,be2dec(-,1)]
+ acl is_malware req.fhdr(x-ssl-ja3),digest(md5),hex \
+ -f /path/to/file/with/malware-ja3.lst
+ http-request set-header X-Malware True if is_malware
+ http-request set-header X-Malware False if !is_malware
+
+ssl_fc_extlist_bin([<filter_option>]) : binary
+ Returns the binary form of the client hello extension list. The maximum
+ returned value length is limited by the shared capture buffer size
+ controlled by "tune.ssl.capture-buffer-size" setting. Setting
+ <filter_option> allows to filter returned data. Accepted values:
+ 0 : return the full list of extensions (default)
+ 1 : exclude GREASE (RFC8701) values from the output
+
+ Example:
+ http-request set-header X-SSL-JA3 %[ssl_fc_protocol_hello_id],\
+ %[ssl_fc_cipherlist_bin(1),be2dec(-,2)],\
+ %[ssl_fc_extlist_bin(1),be2dec(-,2)],\
+ %[ssl_fc_eclist_bin(1),be2dec(-,2)],\
+ %[ssl_fc_ecformats_bin,be2dec(-,1)]
+ acl is_malware req.fhdr(x-ssl-ja3),digest(md5),hex \
+ -f /path/to/file/with/malware-ja3.lst
+ http-request set-header X-Malware True if is_malware
+ http-request set-header X-Malware False if !is_malware
+
+ssl_fc_client_random : binary
+ Returns the client random of the front connection when the incoming connection
+ was made over an SSL/TLS transport layer. It is useful to to decrypt traffic
+ sent using ephemeral ciphers. This requires OpenSSL >= 1.1.0, or BoringSSL.
+
+ssl_fc_client_early_traffic_secret : string
+ Return the CLIENT_EARLY_TRAFFIC_SECRET as an hexadecimal string for the
+ front connection when the incoming connection was made over a TLS 1.3
+ transport layer.
+ Require OpenSSL >= 1.1.1. This is one of the keys dumped by the OpenSSL
+ keylog callback to generate the SSLKEYLOGFILE. The SSL Key logging must be
+ activated with "tune.ssl.keylog on" in the global section. See also
+ "tune.ssl.keylog"
+
+ssl_fc_client_handshake_traffic_secret : string
+ Return the CLIENT_HANDSHAKE_TRAFFIC_SECRET as an hexadecimal string for the
+ front connection when the incoming connection was made over a TLS 1.3
+ transport layer.
+ Require OpenSSL >= 1.1.1. This is one of the keys dumped by the OpenSSL
+ keylog callback to generate the SSLKEYLOGFILE. The SSL Key logging must be
+ activated with "tune.ssl.keylog on" in the global section. See also
+ "tune.ssl.keylog"
+
+ssl_fc_client_traffic_secret_0 : string
+ Return the CLIENT_TRAFFIC_SECRET_0 as an hexadecimal string for the
+ front connection when the incoming connection was made over a TLS 1.3
+ transport layer.
+ Require OpenSSL >= 1.1.1. This is one of the keys dumped by the OpenSSL
+ keylog callback to generate the SSLKEYLOGFILE. The SSL Key logging must be
+ activated with "tune.ssl.keylog on" in the global section. See also
+ "tune.ssl.keylog"
+
+ssl_fc_exporter_secret : string
+ Return the EXPORTER_SECRET as an hexadecimal string for the
+ front connection when the incoming connection was made over a TLS 1.3
+ transport layer.
+ Require OpenSSL >= 1.1.1. This is one of the keys dumped by the OpenSSL
+ keylog callback to generate the SSLKEYLOGFILE. The SSL Key logging must be
+ activated with "tune.ssl.keylog on" in the global section. See also
+ "tune.ssl.keylog"
+
+ssl_fc_early_exporter_secret : string
+ Return the EARLY_EXPORTER_SECRET as an hexadecimal string for the
+ front connection when the incoming connection was made over an TLS 1.3
+ transport layer.
+ Require OpenSSL >= 1.1.1. This is one of the keys dumped by the OpenSSL
+ keylog callback to generate the SSLKEYLOGFILE. The SSL Key logging must be
+ activated with "tune.ssl.keylog on" in the global section. See also
+ "tune.ssl.keylog"
+
+ssl_fc_err : integer
+ When the incoming connection was made over an SSL/TLS transport layer,
+ returns the ID of the last error of the first error stack raised on the
+ frontend side, or 0 if no error was encountered. It can be used to identify
+ handshake related errors other than verify ones (such as cipher mismatch), as
+ well as other read or write errors occurring during the connection's
+ lifetime. Any error happening during the client's certificate verification
+ process will not be raised through this fetch but via the existing
+ "ssl_c_err", "ssl_c_ca_err" and "ssl_c_ca_err_depth" fetches. In order to get
+ a text description of this error code, you can either use the
+ "ssl_fc_err_str" sample fetch or use the "openssl errstr" command (which
+ takes an error code in hexadecimal representation as parameter). Please refer
+ to your SSL library's documentation to find the exhaustive list of error
+ codes.
+
+ssl_fc_err_str : string
+ When the incoming connection was made over an SSL/TLS transport layer,
+ returns a string representation of the last error of the first error stack
+ that was raised on the frontend side. Any error happening during the client's
+ certificate verification process will not be raised through this fetch. See
+ also "ssl_fc_err".
+
+ssl_fc_has_crt : boolean
+ Returns true if a client certificate is present in an incoming connection over
+ SSL/TLS transport layer. Useful if 'verify' statement is set to 'optional'.
+ Note: on SSL session resumption with Session ID or TLS ticket, client
+ certificate is not present in the current connection but may be retrieved
+ from the cache or the ticket. So prefer "ssl_c_used" if you want to check if
+ current SSL session uses a client certificate.
+
+ssl_fc_has_early : boolean
+ Returns true if early data were sent, and the handshake didn't happen yet. As
+ it has security implications, it is useful to be able to refuse those, or
+ wait until the handshake happened.
+
+ssl_fc_has_sni : boolean
+ This checks for the presence of a Server Name Indication TLS extension (SNI)
+ in an incoming connection was made over an SSL/TLS transport layer. Returns
+ true when the incoming connection presents a TLS SNI field. This requires
+ that the SSL library is built with support for TLS extensions enabled (check
+ haproxy -vv).
+
+ssl_fc_is_resumed : boolean
+ Returns true if the SSL/TLS session has been resumed through the use of
+ SSL session cache or TLS tickets on an incoming connection over an SSL/TLS
+ transport layer.
+
+ssl_fc_npn : string
+ This extracts the Next Protocol Negotiation field from an incoming connection
+ made via a TLS transport layer and locally deciphered by HAProxy. The result
+ is a string containing the protocol name advertised by the client. The SSL
+ library must have been built with support for TLS extensions enabled (check
+ haproxy -vv). Note that the TLS NPN extension is not advertised unless the
+ "npn" keyword on the "bind" line specifies a protocol list. Also, nothing
+ forces the client to pick a protocol from this list, any other one may be
+ requested. Please note that the TLS NPN extension was replaced with ALPN.
+
+ssl_fc_protocol : string
+ Returns the name of the used protocol when the incoming connection was made
+ over an SSL/TLS transport layer.
+
+ssl_fc_protocol_hello_id : integer
+ The version of the TLS protocol by which the client wishes to communicate
+ during the session as indicated in client hello message. This value can
+ return only if the value "tune.ssl.capture-buffer-size" is set greater than
+ 0.
+
+ Example:
+ http-request set-header X-SSL-JA3 %[ssl_fc_protocol_hello_id],\
+ %[ssl_fc_cipherlist_bin(1),be2dec(-,2)],\
+ %[ssl_fc_extlist_bin(1),be2dec(-,2)],\
+ %[ssl_fc_eclist_bin(1),be2dec(-,2)],\
+ %[ssl_fc_ecformats_bin,be2dec(-,1)]
+ acl is_malware req.fhdr(x-ssl-ja3),digest(md5),hex \
+ -f /path/to/file/with/malware-ja3.lst
+ http-request set-header X-Malware True if is_malware
+ http-request set-header X-Malware False if !is_malware
+
+ssl_fc_unique_id : binary
+ When the incoming connection was made over an SSL/TLS transport layer,
+ returns the TLS unique ID as defined in RFC5929 section 3. The unique id
+ can be encoded to base64 using the converter: "ssl_fc_unique_id,base64".
+
+ssl_fc_server_handshake_traffic_secret : string
+ Return the SERVER_HANDSHAKE_TRAFFIC_SECRET as an hexadecimal string for the
+ front connection when the incoming connection was made over a TLS 1.3
+ transport layer.
+ Require OpenSSL >= 1.1.1. This is one of the keys dumped by the OpenSSL
+ keylog callback to generate the SSLKEYLOGFILE. The SSL Key logging must be
+ activated with "tune.ssl.keylog on" in the global section. See also
+ "tune.ssl.keylog"
+
+ssl_fc_server_traffic_secret_0 : string
+ Return the SERVER_TRAFFIC_SECRET_0 as an hexadecimal string for the
+ front connection when the incoming connection was made over an TLS 1.3
+ transport layer.
+ Require OpenSSL >= 1.1.1. This is one of the keys dumped by the OpenSSL
+ keylog callback to generate the SSLKEYLOGFILE. The SSL Key logging must be
+ activated with "tune.ssl.keylog on" in the global section. See also
+ "tune.ssl.keylog"
+
+ssl_fc_server_random : binary
+ Returns the server random of the front connection when the incoming connection
+ was made over an SSL/TLS transport layer. It is useful to to decrypt traffic
+ sent using ephemeral ciphers. This requires OpenSSL >= 1.1.0, or BoringSSL.
+
+ssl_fc_session_id : binary
+ Returns the SSL ID of the front connection when the incoming connection was
+ made over an SSL/TLS transport layer. It is useful to stick a given client to
+ a server. It is important to note that some browsers refresh their session ID
+ every few minutes.
+
+ssl_fc_session_key : binary
+ Returns the SSL session master key of the front connection when the incoming
+ connection was made over an SSL/TLS transport layer. It is useful to decrypt
+ traffic sent using ephemeral ciphers. This requires OpenSSL >= 1.1.0, or
+ BoringSSL.
+
+
+ssl_fc_sni : string
+ This extracts the Server Name Indication TLS extension (SNI) field from an
+ incoming connection made via an SSL/TLS transport layer and locally
+ deciphered by HAProxy. The result (when present) typically is a string
+ matching the HTTPS host name (253 chars or less). The SSL library must have
+ been built with support for TLS extensions enabled (check haproxy -vv).
+
+ This fetch is different from "req.ssl_sni" above in that it applies to the
+ connection being deciphered by HAProxy and not to SSL contents being blindly
+ forwarded. See also "ssl_fc_sni_end" and "ssl_fc_sni_reg" below. This
+ requires that the SSL library is built with support for TLS extensions
+ enabled (check haproxy -vv).
+
+ CAUTION! Except under very specific conditions, it is normally not correct to
+ use this field as a substitute for the HTTP "Host" header field. For example,
+ when forwarding an HTTPS connection to a server, the SNI field must be set
+ from the HTTP Host header field using "req.hdr(host)" and not from the front
+ SNI value. The reason is that SNI is solely used to select the certificate
+ the server side will present, and that clients are then allowed to send
+ requests with different Host values as long as they match the names in the
+ certificate. As such, "ssl_fc_sni" should normally not be used as an argument
+ to the "sni" server keyword, unless the backend works in TCP mode.
+
+ ACL derivatives :
+ ssl_fc_sni_end : suffix match
+ ssl_fc_sni_reg : regex match
+
+ssl_fc_use_keysize : integer
+ Returns the symmetric cipher key size used in bits when the incoming
+ connection was made over an SSL/TLS transport layer.
+
+ssl_s_der : binary
+ Returns the DER formatted certificate presented by the server when the
+ outgoing connection was made over an SSL/TLS transport layer. When used for
+ an ACL, the value(s) to match against can be passed in hexadecimal form.
+
+ssl_s_chain_der : binary
+ Returns the DER formatted chain certificate presented by the server when the
+ outgoing connection was made over an SSL/TLS transport layer. When used for
+ an ACL, the value(s) to match against can be passed in hexadecimal form. One
+ can parse the result with any lib accepting ASN.1 DER data. It currently
+ does not support resumed sessions.
+
+ssl_s_key_alg : string
+ Returns the name of the algorithm used to generate the key of the certificate
+ presented by the server when the outgoing connection was made over an
+ SSL/TLS transport layer.
+
+ssl_s_notafter : string
+ Returns the end date presented by the server as a formatted string
+ YYMMDDhhmmss[Z] when the outgoing connection was made over an SSL/TLS
+ transport layer.
+
+ssl_s_notbefore : string
+ Returns the start date presented by the server as a formatted string
+ YYMMDDhhmmss[Z] when the outgoing connection was made over an SSL/TLS
+ transport layer.
+
+ssl_s_i_dn([<entry>[,<occ>[,<format>]]]) : string
+ When the outgoing connection was made over an SSL/TLS transport layer,
+ returns the full distinguished name of the issuer of the certificate
+ presented by the server when no <entry> is specified, or the value of the
+ first given entry found from the beginning of the DN. If a positive/negative
+ occurrence number is specified as the optional second argument, it returns
+ the value of the nth given entry value from the beginning/end of the DN.
+ For instance, "ssl_s_i_dn(OU,2)" the second organization unit, and
+ "ssl_s_i_dn(CN)" retrieves the common name.
+ The <format> parameter allows you to receive the DN suitable for
+ consumption by different protocols. Currently supported is rfc2253 for
+ LDAP v3.
+ If you'd like to modify the format only you can specify an empty string
+ and zero for the first two parameters. Example: ssl_s_i_dn(,0,rfc2253)
+
+ssl_s_s_dn([<entry>[,<occ>[,<format>]]]) : string
+ When the outgoing connection was made over an SSL/TLS transport layer,
+ returns the full distinguished name of the subject of the certificate
+ presented by the server when no <entry> is specified, or the value of the
+ first given entry found from the beginning of the DN. If a positive/negative
+ occurrence number is specified as the optional second argument, it returns
+ the value of the nth given entry value from the beginning/end of the DN.
+ For instance, "ssl_s_s_dn(OU,2)" the second organization unit, and
+ "ssl_s_s_dn(CN)" retrieves the common name.
+ The <format> parameter allows you to receive the DN suitable for
+ consumption by different protocols. Currently supported is rfc2253 for
+ LDAP v3.
+ If you'd like to modify the format only you can specify an empty string
+ and zero for the first two parameters. Example: ssl_s_s_dn(,0,rfc2253)
+
+ssl_s_serial : binary
+ Returns the serial of the certificate presented by the server when the
+ outgoing connection was made over an SSL/TLS transport layer. When used for
+ an ACL, the value(s) to match against can be passed in hexadecimal form.
+
+ssl_s_sha1 : binary
+ Returns the SHA-1 fingerprint of the certificate presented by the server
+ when the outgoing connection was made over an SSL/TLS transport layer. This
+ can be used to know which certificate was chosen using SNI.
+
+ssl_s_sig_alg : string
+ Returns the name of the algorithm used to sign the certificate presented by
+ the server when the outgoing connection was made over an SSL/TLS transport
+ layer.
+
+ssl_s_version : integer
+ Returns the version of the certificate presented by the server when the
+ outgoing connection was made over an SSL/TLS transport layer.
+
+txn.timer.user : integer
+ Total estimated time as seen from client, between the moment the proxy
+ accepted it and the moment both ends were closed, without idle time.
+ This is the equivalent of %Tu in the log-format and is reported in
+ milliseconds (ms). For more details see Section 8.4 "Timing events"
+
+7.3.5. Fetching samples from buffer contents (Layer 6)
+------------------------------------------------------
+
+Fetching samples from buffer contents is a bit different from the previous
+sample fetches above because the sampled data are ephemeral. These data can
+only be used when they're available and will be lost when they're forwarded.
+For this reason, samples fetched from buffer contents during a request cannot
+be used in a response for example. Even while the data are being fetched, they
+can change. Sometimes it is necessary to set some delays or combine multiple
+sample fetch methods to ensure that the expected data are complete and usable,
+for example through TCP request content inspection. Please see the "tcp-request
+content" keyword for more detailed information on the subject.
+
+Warning : Following sample fetches are ignored if used from HTTP proxies. They
+ only deal with raw contents found in the buffers. On their side,
+ HTTP proxies use structured content. Thus raw representation of
+ these data are meaningless. A warning is emitted if an ACL relies on
+ one of the following sample fetches. But it is not possible to detect
+ all invalid usage (for instance inside a log-format string or a
+ sample expression). So be careful.
+
+Summary of sample fetch methods in this section and their respective types:
+
+ keyword output type
+----------------------------------------------------+-------------
+bs.id integer
+distcc_body(<token>[,<occ>]) binary
+distcc_param(<token>[,<occ>]) integer
+fs.id integer
+payload(<offset>,<length>) binary
+payload_lv(<offset1>,<length>[,<offset2>]) binary
+req.len integer
+req_len integer
+req.payload(<offset>,<length>) binary
+req.payload_lv(<offset1>,<length>[,<offset2>]) binary
+req.proto_http boolean
+req_proto_http boolean
+req.rdp_cookie([<name>]) string
+rdp_cookie([<name>]) string
+req.rdp_cookie_cnt([name]) integer
+rdp_cookie_cnt([name]) integer
+req.ssl_alpn string
+req.ssl_ec_ext boolean
+req.ssl_hello_type integer
+req_ssl_hello_type integer
+req.ssl_sni string
+req_ssl_sni string
+req.ssl_st_ext integer
+req.ssl_ver integer
+req_ssl_ver integer
+res.len integer
+res.payload(<offset>,<length>) binary
+res.payload_lv(<offset1>,<length>[,<offset2>]) binary
+res.ssl_hello_type integer
+rep_ssl_hello_type integer
+wait_end boolean
+----------------------------------------------------+-------------
+
+Detailed list:
+
+bs.id : integer
+ Returns the multiplexer's stream ID on the server side. It is the
+ multiplexer's responsibility to return the appropriate information.
+
+distcc_body(<token>[,<occ>]) : binary
+ Parses a distcc message and returns the body associated to occurrence #<occ>
+ of the token <token>. Occurrences start at 1, and when unspecified, any may
+ match though in practice only the first one is checked for now. This can be
+ used to extract file names or arguments in files built using distcc through
+ HAProxy. Please refer to distcc's protocol documentation for the complete
+ list of supported tokens.
+
+distcc_param(<token>[,<occ>]) : integer
+ Parses a distcc message and returns the parameter associated to occurrence
+ #<occ> of the token <token>. Occurrences start at 1, and when unspecified,
+ any may match though in practice only the first one is checked for now. This
+ can be used to extract certain information such as the protocol version, the
+ file size or the argument in files built using distcc through HAProxy.
+ Another use case consists in waiting for the start of the preprocessed file
+ contents before connecting to the server to avoid keeping idle connections.
+ Please refer to distcc's protocol documentation for the complete list of
+ supported tokens.
+
+ Example :
+ # wait up to 20s for the pre-processed file to be uploaded
+ tcp-request inspect-delay 20s
+ tcp-request content accept if { distcc_param(DOTI) -m found }
+ # send large files to the big farm
+ use_backend big_farm if { distcc_param(DOTI) gt 1000000 }
+
+fs.id : integer
+ Returns the multiplexer's stream ID on the client side. It is the
+ multiplexer's responsibility to return the appropriate information. For
+ instance, on a raw TCP, 0 is always returned because there is no stream.
+
+payload(<offset>,<length>) : binary (deprecated)
+ This is an alias for "req.payload" when used in the context of a request (e.g.
+ "stick on", "stick match"), and for "res.payload" when used in the context of
+ a response such as in "stick store response".
+
+payload_lv(<offset1>,<length>[,<offset2>]) : binary (deprecated)
+ This is an alias for "req.payload_lv" when used in the context of a request
+ (e.g. "stick on", "stick match"), and for "res.payload_lv" when used in the
+ context of a response such as in "stick store response".
+
+req.len : integer
+req_len : integer (deprecated)
+ Returns an integer value corresponding to the number of bytes present in the
+ request buffer. This is mostly used in ACL. It is important to understand
+ that this test does not return false as long as the buffer is changing. This
+ means that a check with equality to zero will almost always immediately match
+ at the beginning of the session, while a test for more data will wait for
+ that data to come in and return false only when HAProxy is certain that no
+ more data will come in. This test was designed to be used with TCP request
+ content inspection.
+
+req.payload(<offset>,<length>) : binary
+ This extracts a binary block of <length> bytes and starting at byte <offset>
+ in the request buffer. As a special case, if the <length> argument is zero,
+ the the whole buffer from <offset> to the end is extracted. This can be used
+ with ACLs in order to check for the presence of some content in a buffer at
+ any location.
+
+ ACL derivatives :
+ req.payload(<offset>,<length>) : hex binary match
+
+req.payload_lv(<offset1>,<length>[,<offset2>]) : binary
+ This extracts a binary block whose size is specified at <offset1> for <length>
+ bytes, and which starts at <offset2> if specified or just after the length in
+ the request buffer. The <offset2> parameter also supports relative offsets if
+ prepended with a '+' or '-' sign.
+
+ ACL derivatives :
+ req.payload_lv(<offset1>,<length>[,<offset2>]) : hex binary match
+
+ Example : please consult the example from the "stick store-response" keyword.
+
+req.proto_http : boolean
+req_proto_http : boolean (deprecated)
+ Returns true when data in the request buffer look like HTTP and correctly
+ parses as such. It is the same parser as the common HTTP request parser which
+ is used so there should be no surprises. The test does not match until the
+ request is complete, failed or timed out. This test may be used to report the
+ protocol in TCP logs, but the biggest use is to block TCP request analysis
+ until a complete HTTP request is present in the buffer, for example to track
+ a header.
+
+ Example:
+ # track request counts per "base" (concatenation of Host+URL)
+ tcp-request inspect-delay 10s
+ tcp-request content reject if !HTTP
+ tcp-request content track-sc0 base table req-rate
+
+req.rdp_cookie([<name>]) : string
+rdp_cookie([<name>]) : string (deprecated)
+ When the request buffer looks like the RDP protocol, extracts the RDP cookie
+ <name>, or any cookie if unspecified. The parser only checks for the first
+ cookie, as illustrated in the RDP protocol specification. The cookie name is
+ case insensitive. Generally the "MSTS" cookie name will be used, as it can
+ contain the user name of the client connecting to the server if properly
+ configured on the client. The "MSTSHASH" cookie is often used as well for
+ session stickiness to servers.
+
+ This differs from "balance rdp-cookie" in that any balancing algorithm may be
+ used and thus the distribution of clients to backend servers is not linked to
+ a hash of the RDP cookie. It is envisaged that using a balancing algorithm
+ such as "balance roundrobin" or "balance leastconn" will lead to a more even
+ distribution of clients to backend servers than the hash used by "balance
+ rdp-cookie".
+
+ ACL derivatives :
+ req.rdp_cookie([<name>]) : exact string match
+
+ Example :
+ listen tse-farm
+ bind 0.0.0.0:3389
+ # wait up to 5s for an RDP cookie in the request
+ tcp-request inspect-delay 5s
+ tcp-request content accept if RDP_COOKIE
+ # apply RDP cookie persistence
+ persist rdp-cookie
+ # Persist based on the mstshash cookie
+ # This is only useful makes sense if
+ # balance rdp-cookie is not used
+ stick-table type string size 204800
+ stick on req.rdp_cookie(mstshash)
+ server srv1 1.1.1.1:3389
+ server srv1 1.1.1.2:3389
+
+ See also : "balance rdp-cookie", "persist rdp-cookie", "tcp-request" and the
+ "req.rdp_cookie" ACL.
+
+req.rdp_cookie_cnt([name]) : integer
+rdp_cookie_cnt([name]) : integer (deprecated)
+ Tries to parse the request buffer as RDP protocol, then returns an integer
+ corresponding to the number of RDP cookies found. If an optional cookie name
+ is passed, only cookies matching this name are considered. This is mostly
+ used in ACL.
+
+ ACL derivatives :
+ req.rdp_cookie_cnt([<name>]) : integer match
+
+req.ssl_alpn : string
+ Returns a string containing the values of the Application-Layer Protocol
+ Negotiation (ALPN) TLS extension (RFC7301), sent by the client within the SSL
+ ClientHello message. Note that this only applies to raw contents found in the
+ request buffer and not to the contents deciphered via an SSL data layer, so
+ this will not work with "bind" lines having the "ssl" option. This is useful
+ in ACL to make a routing decision based upon the ALPN preferences of a TLS
+ client, like in the example below. See also "ssl_fc_alpn".
+
+ Examples :
+ # Wait for a client hello for at most 5 seconds
+ tcp-request inspect-delay 5s
+ tcp-request content accept if { req.ssl_hello_type 1 }
+ use_backend bk_acme if { req.ssl_alpn acme-tls/1 }
+ default_backend bk_default
+
+req.ssl_ec_ext : boolean
+ Returns a boolean identifying if client sent the Supported Elliptic Curves
+ Extension as defined in RFC4492, section 5.1. within the SSL ClientHello
+ message. This can be used to present ECC compatible clients with EC
+ certificate and to use RSA for all others, on the same IP address. Note that
+ this only applies to raw contents found in the request buffer and not to
+ contents deciphered via an SSL data layer, so this will not work with "bind"
+ lines having the "ssl" option.
+
+req.ssl_hello_type : integer
+req_ssl_hello_type : integer (deprecated)
+ Returns an integer value containing the type of the SSL hello message found
+ in the request buffer if the buffer contains data that parse as a complete
+ SSL (v3 or superior) client hello message. Note that this only applies to raw
+ contents found in the request buffer and not to contents deciphered via an
+ SSL data layer, so this will not work with "bind" lines having the "ssl"
+ option. This is mostly used in ACL to detect presence of an SSL hello message
+ that is supposed to contain an SSL session ID usable for stickiness.
+
+req.ssl_sni : string
+req_ssl_sni : string (deprecated)
+ Returns a string containing the value of the Server Name TLS extension sent
+ by a client in a TLS stream passing through the request buffer if the buffer
+ contains data that parse as a complete SSL (v3 or superior) client hello
+ message. Note that this only applies to raw contents found in the request
+ buffer and not to contents deciphered via an SSL data layer, so this will not
+ work with "bind" lines having the "ssl" option. This will only work for actual
+ implicit TLS based protocols like HTTPS (443), IMAPS (993), SMTPS (465),
+ however it will not work for explicit TLS based protocols, like SMTP (25/587)
+ or IMAP (143). SNI normally contains the name of the host the client tries to
+ connect to (for recent browsers). SNI is useful for allowing or denying access
+ to certain hosts when SSL/TLS is used by the client. This test was designed to
+ be used with TCP request content inspection. If content switching is needed,
+ it is recommended to first wait for a complete client hello (type 1), like in
+ the example below. See also "ssl_fc_sni".
+
+ ACL derivatives :
+ req.ssl_sni : exact string match
+
+ Examples :
+ # Wait for a client hello for at most 5 seconds
+ tcp-request inspect-delay 5s
+ tcp-request content accept if { req.ssl_hello_type 1 }
+ use_backend bk_allow if { req.ssl_sni -f allowed_sites }
+ default_backend bk_sorry_page
+
+req.ssl_st_ext : integer
+ Returns 0 if the client didn't send a SessionTicket TLS Extension (RFC5077)
+ Returns 1 if the client sent SessionTicket TLS Extension
+ Returns 2 if the client also sent non-zero length TLS SessionTicket
+ Note that this only applies to raw contents found in the request buffer and
+ not to contents deciphered via an SSL data layer, so this will not work with
+ "bind" lines having the "ssl" option. This can for example be used to detect
+ whether the client sent a SessionTicket or not and stick it accordingly, if
+ no SessionTicket then stick on SessionID or don't stick as there's no server
+ side state is there when SessionTickets are in use.
+
+req.ssl_ver : integer
+req_ssl_ver : integer (deprecated)
+ Returns an integer value containing the version of the SSL/TLS protocol of a
+ stream present in the request buffer. Both SSLv2 hello messages and SSLv3
+ messages are supported. TLSv1 is announced as SSL version 3.1. The value is
+ composed of the major version multiplied by 65536, added to the minor
+ version. Note that this only applies to raw contents found in the request
+ buffer and not to contents deciphered via an SSL data layer, so this will not
+ work with "bind" lines having the "ssl" option. The ACL version of the test
+ matches against a decimal notation in the form MAJOR.MINOR (e.g. 3.1). This
+ fetch is mostly used in ACL.
+
+ ACL derivatives :
+ req.ssl_ver : decimal match
+
+res.len : integer
+ Returns an integer value corresponding to the number of bytes present in the
+ response buffer. This is mostly used in ACL. It is important to understand
+ that this test does not return false as long as the buffer is changing. This
+ means that a check with equality to zero will almost always immediately match
+ at the beginning of the stream, while a test for more data will wait for
+ that data to come in and return false only when HAProxy is certain that no
+ more data will come in. This test was designed to be used with TCP response
+ content inspection. But it may also be used in tcp-check based expect rules.
+
+res.payload(<offset>,<length>) : binary
+ This extracts a binary block of <length> bytes and starting at byte <offset>
+ in the response buffer. As a special case, if the <length> argument is zero,
+ the whole buffer from <offset> to the end is extracted. This can be used
+ with ACLs in order to check for the presence of some content in a buffer at
+ any location. It may also be used in tcp-check based expect rules.
+
+res.payload_lv(<offset1>,<length>[,<offset2>]) : binary
+ This extracts a binary block whose size is specified at <offset1> for <length>
+ bytes, and which starts at <offset2> if specified or just after the length in
+ the response buffer. The <offset2> parameter also supports relative offsets
+ if prepended with a '+' or '-' sign. It may also be used in tcp-check based
+ expect rules.
+
+ Example : please consult the example from the "stick store-response" keyword.
+
+res.ssl_hello_type : integer
+rep_ssl_hello_type : integer (deprecated)
+ Returns an integer value containing the type of the SSL hello message found
+ in the response buffer if the buffer contains data that parses as a complete
+ SSL (v3 or superior) hello message. Note that this only applies to raw
+ contents found in the response buffer and not to contents deciphered via an
+ SSL data layer, so this will not work with "server" lines having the "ssl"
+ option. This is mostly used in ACL to detect presence of an SSL hello message
+ that is supposed to contain an SSL session ID usable for stickiness.
+
+wait_end : boolean
+ This fetch either returns true when the inspection period is over, or does
+ not fetch. It is only used in ACLs, in conjunction with content analysis to
+ avoid returning a wrong verdict early. It may also be used to delay some
+ actions, such as a delayed reject for some special addresses. Since it either
+ stops the rules evaluation or immediately returns true, it is recommended to
+ use this acl as the last one in a rule. Please note that the default ACL
+ "WAIT_END" is always usable without prior declaration. This test was designed
+ to be used with TCP request content inspection.
+
+ Examples :
+ # delay every incoming request by 2 seconds
+ tcp-request inspect-delay 2s
+ tcp-request content accept if WAIT_END
+
+ # don't immediately tell bad guys they are rejected
+ tcp-request inspect-delay 10s
+ acl goodguys src 10.0.0.0/24
+ acl badguys src 10.0.1.0/24
+ tcp-request content accept if goodguys
+ tcp-request content reject if badguys WAIT_END
+ tcp-request content reject
+
+
+7.3.6. Fetching HTTP samples (Layer 7)
+--------------------------------------
+
+It is possible to fetch samples from HTTP contents, requests and responses.
+This application layer is also called layer 7. It is only possible to fetch the
+data in this section when a full HTTP request or response has been parsed from
+its respective request or response buffer. This is always the case with all
+HTTP specific rules and for sections running with "mode http". When using TCP
+content inspection, it may be necessary to support an inspection delay in order
+to let the request or response come in first. These fetches may require a bit
+more CPU resources than the layer 4 ones, but not much since the request and
+response are indexed.
+
+Note : Regarding HTTP processing from the tcp-request content rules, everything
+ will work as expected from an HTTP proxy. However, from a TCP proxy,
+ without an HTTP upgrade, it will only work for HTTP/1 content. For
+ HTTP/2 content, only the preface is visible. Thus, it is only possible
+ to rely to "req.proto_http", "req.ver" and eventually "method" sample
+ fetches. All other L7 sample fetches will fail. After an HTTP upgrade,
+ they will work in the same manner than from an HTTP proxy.
+
+Summary of sample fetch methods in this section and their respective types:
+
+ keyword output type
+-------------------------------------------------+-------------
+base string
+base32 integer
+base32+src binary
+baseq string
+capture.req.hdr(<idx>) string
+capture.req.method string
+capture.req.uri string
+capture.req.ver string
+capture.res.hdr(<idx>) string
+capture.res.ver string
+req.body binary
+req.body_param([<name>[,i]]) string
+req.body_len integer
+req.body_size integer
+req.cook([<name>]) string
+cook([<name>]) string
+req.cook_cnt([<name>]) integer
+cook_cnt([<name>]) integer
+req.cook_val([<name>]) integer
+cook_val([<name>]) integer
+req.cook_names([<delim>]) string
+cookie([<name>]) string
+hdr([<name>[,<occ>]]) string
+request_date([<unit>]) integer
+req.fhdr(<name>[,<occ>]) string
+req.fhdr_cnt([<name>]) integer
+req.hdr([<name>[,<occ>]]) string
+req.hdr_cnt([<name>]) integer
+hdr_cnt([<header>]) integer
+req.hdr_ip([<name>[,<occ>]]) ip
+hdr_ip([<name>[,<occ>]]) ip
+req.hdr_val([<name>[,<occ>]]) integer
+hdr_val([<name>[,<occ>]]) integer
+req.hdrs string
+req.hdrs_bin binary
+req.timer.hdr integer
+req.timer.idle integer
+req.timer.queue integer
+req.timer.tq integer
+res.timer.hdr integer
+http_auth(<userlist>) boolean
+http_auth_bearer([<header>]) string
+http_auth_group(<userlist>) string
+http_auth_pass string
+http_auth_type string
+http_auth_user string
+http_first_req boolean
+method integer
+path string
+pathq string
+query string
+req.hdr_names([<delim>]) string
+req.ver string
+req_ver string
+res.body binary
+res.body_len integer
+res.body_size integer
+res.cache_hit boolean
+res.cache_name string
+res.comp boolean
+res.comp_algo string
+res.cook([<name>]) string
+scook([<name>]) string
+res.cook_cnt([<name>]) integer
+scook_cnt([<name>]) integer
+res.cook_val([<name>]) integer
+scook_val([<name>]) integer
+res.cook_names([<delim>]) string
+res.fhdr([<name>[,<occ>]]) string
+res.fhdr_cnt([<name>]) integer
+res.hdr([<name>[,<occ>]]) string
+shdr([<name>[,<occ>]]) string
+res.hdr_cnt([<name>]) integer
+shdr_cnt([<name>]) integer
+res.hdr_ip([<name>[,<occ>]]) ip
+shdr_ip([<name>[,<occ>]]) ip
+res.hdr_names([<delim>]) string
+res.hdr_val([<name>[,<occ>]]) integer
+shdr_val([<name>[,<occ>]]) integer
+res.hdrs string
+res.hdrs_bin binary
+res.ver string
+resp_ver string
+server_status integer
+set-cookie([<name>]) string
+status integer
+txn.status integer
+txn.timer.total integer
+unique-id string
+url string
+url_ip ip
+url_port integer
+urlp([<name>[,<delim>[,i]]]) string
+url_param([<name>[,<delim>[,i]]]) string
+urlp_val([<name>[,<delim>[,i]]]) integer
+url32 integer
+url32+src binary
+-------------------------------------------------+-------------
+
+Detailed list:
+
+base : string
+ This returns the concatenation of the first Host header and the path part of
+ the request, which starts at the first slash and ends before the question
+ mark. It can be useful in virtual hosted environments to detect URL abuses as
+ well as to improve shared caches efficiency. Using this with a limited size
+ stick table also allows one to collect statistics about most commonly
+ requested objects by host/path. With ACLs it can allow simple content
+ switching rules involving the host and the path at the same time, such as
+ "www.example.com/favicon.ico". See also "path" and "uri".
+
+ ACL derivatives :
+ base : exact string match
+ base_beg : prefix match
+ base_dir : subdir match
+ base_dom : domain match
+ base_end : suffix match
+ base_len : length match
+ base_reg : regex match
+ base_sub : substring match
+
+base32 : integer
+ This returns a 32-bit hash of the value returned by the "base" fetch method
+ above. This is useful to track per-URL activity on high traffic sites without
+ having to store all URLs. Instead a shorter hash is stored, saving a lot of
+ memory. The output type is an unsigned integer. The hash function used is
+ SDBM with full avalanche on the output. Technically, base32 is exactly equal
+ to "base,sdbm(1)".
+
+base32+src : binary
+ This returns the concatenation of the base32 fetch above and the src fetch
+ below. The resulting type is of type binary, with a size of 8 or 20 bytes
+ depending on the source address family. This can be used to track per-IP,
+ per-URL counters.
+
+baseq : string
+ This returns the concatenation of the first Host header and the path part of
+ the request with the query-string, which starts at the first slash. Using this
+ instead of "base" allows one to properly identify the target resource, for
+ statistics or caching use cases. See also "path", "pathq" and "base".
+
+capture.req.hdr(<idx>) : string
+ This extracts the content of the header captured by the "capture request
+ header", idx is the position of the capture keyword in the configuration.
+ The first entry is an index of 0. See also: "capture request header".
+
+capture.req.method : string
+ This extracts the METHOD of an HTTP request. It can be used in both request
+ and response. Unlike "method", it can be used in both request and response
+ because it's allocated.
+
+capture.req.uri : string
+ This extracts the request's URI, which starts at the first slash and ends
+ before the first space in the request (without the host part). Unlike "path"
+ and "url", it can be used in both request and response because it's
+ allocated.
+
+capture.req.ver : string
+ This extracts the request's HTTP version and returns either "HTTP/1.0" or
+ "HTTP/1.1". Unlike "req.ver", it can be used in both request, response, and
+ logs because it relies on a persistent flag.
+
+capture.res.hdr(<idx>) : string
+ This extracts the content of the header captured by the "capture response
+ header", idx is the position of the capture keyword in the configuration.
+ The first entry is an index of 0.
+ See also: "capture response header"
+
+capture.res.ver : string
+ This extracts the response's HTTP version and returns either "HTTP/1.0" or
+ "HTTP/1.1". Unlike "res.ver", it can be used in logs because it relies on a
+ persistent flag.
+
+req.body : binary
+ This returns the HTTP request's available body as a block of data. It is
+ recommended to use "option http-buffer-request" to be sure to wait, as much
+ as possible, for the request's body.
+
+req.body_param([<name>[,i]]) : string
+ This fetch assumes that the body of the POST request is url-encoded. The user
+ can check if the "content-type" contains the value
+ "application/x-www-form-urlencoded". This extracts the first occurrence of the
+ parameter <name> in the body, which ends before '&'. The parameter name is
+ case-sensitive, unless "i" is added as a second argument. If no name is
+ given, any parameter will match, and the first one will be returned. The
+ result is a string corresponding to the value of the parameter <name> as
+ presented in the request body (no URL decoding is performed). Note that the
+ ACL version of this fetch iterates over multiple parameters and will
+ iteratively report all parameters values if no name is given.
+
+req.body_len : integer
+ This returns the length of the HTTP request's available body in bytes. It may
+ be lower than the advertised length if the body is larger than the buffer. It
+ is recommended to use "option http-buffer-request" to be sure to wait, as
+ much as possible, for the request's body.
+
+req.body_size : integer
+ This returns the advertised length of the HTTP request's body in bytes. It
+ will represent the advertised Content-Length header, or the size of the
+ available data in case of chunked encoding.
+
+req.cook([<name>]) : string
+cook([<name>]) : string (deprecated)
+ This extracts the last occurrence of the cookie name <name> on a "Cookie"
+ header line from the request, and returns its value as string. If no name is
+ specified, the first cookie value is returned. When used with ACLs, all
+ matching cookies are evaluated. Spaces around the name and the value are
+ ignored as requested by the Cookie header specification (RFC6265). The cookie
+ name is case-sensitive. Empty cookies are valid, so an empty cookie may very
+ well return an empty value if it is present. Use the "found" match to detect
+ presence. Use the res.cook() variant for response cookies sent by the server.
+
+ ACL derivatives :
+ req.cook([<name>]) : exact string match
+ req.cook_beg([<name>]) : prefix match
+ req.cook_dir([<name>]) : subdir match
+ req.cook_dom([<name>]) : domain match
+ req.cook_end([<name>]) : suffix match
+ req.cook_len([<name>]) : length match
+ req.cook_reg([<name>]) : regex match
+ req.cook_sub([<name>]) : substring match
+
+req.cook_cnt([<name>]) : integer
+cook_cnt([<name>]) : integer (deprecated)
+ Returns an integer value representing the number of occurrences of the cookie
+ <name> in the request, or all cookies if <name> is not specified.
+
+req.cook_val([<name>]) : integer
+cook_val([<name>]) : integer (deprecated)
+ This extracts the last occurrence of the cookie name <name> on a "Cookie"
+ header line from the request, and converts its value to an integer which is
+ returned. If no name is specified, the first cookie value is returned. When
+ used in ACLs, all matching names are iterated over until a value matches.
+
+req.cook_names([<delim>]) : string
+ This builds a string made from the concatenation of all cookie names as they
+ appear in the request (Cookie header) when the rule is evaluated. The default
+ delimiter is the comma (',') but it may be overridden as an optional argument
+ <delim>. In this case, only the first character of <delim> is considered.
+
+cookie([<name>]) : string (deprecated)
+ This extracts the last occurrence of the cookie name <name> on a "Cookie"
+ header line from the request, or a "Set-Cookie" header from the response, and
+ returns its value as a string. A typical use is to get multiple clients
+ sharing a same profile use the same server. This can be similar to what
+ "appsession" did with the "request-learn" statement, but with support for
+ multi-peer synchronization and state keeping across restarts. If no name is
+ specified, the first cookie value is returned. This fetch should not be used
+ anymore and should be replaced by req.cook() or res.cook() instead as it
+ ambiguously uses the direction based on the context where it is used.
+
+hdr([<name>[,<occ>]]) : string
+ This is equivalent to req.hdr() when used on requests, and to res.hdr() when
+ used on responses. Please refer to these respective fetches for more details.
+ In case of doubt about the fetch direction, please use the explicit ones.
+ Note that contrary to the hdr() sample fetch method, the hdr_* ACL keywords
+ unambiguously apply to the request headers.
+
+request_date([<unit>]) : integer
+ This is the exact date when the first byte of the HTTP request was received
+ by HAProxy (log-format tag %tr). This is computed from accept_date +
+ handshake time (%Th) + idle time (%Ti).
+
+ Returns a value in number of seconds since epoch.
+
+ <unit> is facultative, and can be set to "s" for seconds (default behavior),
+ "ms" for milliseconds or "us" for microseconds.
+ If unit is set, return value is an integer reflecting either seconds,
+ milliseconds or microseconds since epoch.
+ It is useful when a time resolution of less than a second is needed.
+
+req.fhdr(<name>[,<occ>]) : string
+ This returns the full value of the last occurrence of header <name> in an
+ HTTP request. It differs from req.hdr() in that any commas present in the
+ value are returned and are not used as delimiters. This is sometimes useful
+ with headers such as User-Agent.
+
+ When used from an ACL, all occurrences are iterated over until a match is
+ found.
+
+ Optionally, a specific occurrence might be specified as a position number.
+ Positive values indicate a position from the first occurrence, with 1 being
+ the first one. Negative values indicate positions relative to the last one,
+ with -1 being the last one.
+
+req.fhdr_cnt([<name>]) : integer
+ Returns an integer value representing the number of occurrences of request
+ header field name <name>, or the total number of header fields if <name> is
+ not specified. Like req.fhdr() it differs from res.hdr_cnt() by not splitting
+ headers at commas.
+
+req.hdr([<name>[,<occ>]]) : string
+ This returns the last comma-separated value of the header <name> in an HTTP
+ request. The fetch considers any comma as a delimiter for distinct values.
+ This is useful if you need to process headers that are defined to be a list
+ of values, such as Accept, or X-Forwarded-For. If full-line headers are
+ desired instead, use req.fhdr(). Please carefully check RFC 7231 to know how
+ certain headers are supposed to be parsed. Also, some of them are case
+ insensitive (e.g. Connection).
+
+ When used from an ACL, all occurrences are iterated over until a match is
+ found.
+
+ Optionally, a specific occurrence might be specified as a position number.
+ Positive values indicate a position from the first occurrence, with 1 being
+ the first one. Negative values indicate positions relative to the last one,
+ with -1 being the last one.
+
+ A typical use is with the X-Forwarded-For header once converted to IP,
+ associated with an IP stick-table.
+
+ ACL derivatives :
+ hdr([<name>[,<occ>]]) : exact string match
+ hdr_beg([<name>[,<occ>]]) : prefix match
+ hdr_dir([<name>[,<occ>]]) : subdir match
+ hdr_dom([<name>[,<occ>]]) : domain match
+ hdr_end([<name>[,<occ>]]) : suffix match
+ hdr_len([<name>[,<occ>]]) : length match
+ hdr_reg([<name>[,<occ>]]) : regex match
+ hdr_sub([<name>[,<occ>]]) : substring match
+
+req.hdr_cnt([<name>]) : integer
+hdr_cnt([<header>]) : integer (deprecated)
+ Returns an integer value representing the number of occurrences of request
+ header field name <name>, or the total number of header field values if
+ <name> is not specified. Like req.hdr() it counts each comma separated
+ part of the header's value. If counting of full-line headers is desired,
+ then req.fhdr_cnt() should be used instead.
+
+ With ACLs, it can be used to detect presence, absence or abuse of a specific
+ header, as well as to block request smuggling attacks by rejecting requests
+ which contain more than one of certain headers.
+
+ Refer to req.hdr() for more information on header matching.
+
+req.hdr_ip([<name>[,<occ>]]) : ip
+hdr_ip([<name>[,<occ>]]) : ip (deprecated)
+ This extracts the last occurrence of header <name> in an HTTP request,
+ converts it to an IPv4 or IPv6 address and returns this address. When used
+ with ACLs, all occurrences are checked, and if <name> is omitted, every value
+ of every header is checked. The parser strictly adheres to the format
+ described in RFC7239, with the extension that IPv4 addresses may optionally
+ be followed by a colon (':') and a valid decimal port number (0 to 65535),
+ which will be silently dropped. All other forms will not match and will
+ cause the address to be ignored.
+
+ The <occ> parameter is processed as with req.hdr().
+
+ A typical use is with the X-Forwarded-For and X-Client-IP headers.
+
+req.hdr_val([<name>[,<occ>]]) : integer
+hdr_val([<name>[,<occ>]]) : integer (deprecated)
+ This extracts the last occurrence of header <name> in an HTTP request, and
+ converts it to an integer value. When used with ACLs, all occurrences are
+ checked, and if <name> is omitted, every value of every header is checked.
+
+ The <occ> parameter is processed as with req.hdr().
+
+ A typical use is with the X-Forwarded-For header.
+
+req.hdrs : string
+ Returns the current request headers as string including the last empty line
+ separating headers from the request body. The last empty line can be used to
+ detect a truncated header block. This sample fetch is useful for some SPOE
+ headers analyzers and for advanced logging.
+
+req.hdrs_bin : binary
+ Returns the current request headers contained in preparsed binary form. This
+ is useful for offloading some processing with SPOE. Each string is described
+ by a length followed by the number of bytes indicated in the length. The
+ length is represented using the variable integer encoding detailed in the
+ SPOE documentation. The end of the list is marked by a couple of empty header
+ names and values (length of 0 for both).
+
+ *(<str:header-name><str:header-value>)<empty string><empty string>
+
+ int: refer to the SPOE documentation for the encoding
+ str: <int:length><bytes>
+
+req.timer.hdr : integer
+ Total time to get the client request (HTTP mode only). It's the time elapsed
+ between the first bytes received and the moment the proxy received the empty
+ line marking the end of the HTTP headers. This is reported in milliseconds
+ (ms) and is equivalent to %TR in log-format. See section 8.4 "Timing events"
+ for more details.
+
+req.timer.idle : integer
+ This is the idle time before the HTTP request (HTTP mode only). This timer
+ counts between the end of the handshakes and the first byte of the HTTP
+ request. This is reported in milliseconds and is equivalent to %Ti in
+ log-format. See section 8.4 "Timing events" for more details.
+
+req.timer.queue : integer
+ Total time spent in the queues waiting for a connection slot.
+ This is reported in milliseconds and is equivalent to %Tw in
+ log-format. See section 8.4 "Timing events" for more details.
+
+req.timer.tq : integer
+ total time to get the client request from the accept date or since the
+ emission of the last byte of the previous response.
+ This is reported in milliseconds and is equivalent to %Tq in
+ log-format. See section 8.4 "Timing events" for more details.
+
+res.timer.hdr : integer
+ It's the time elapsed between the moment the TCP connection was established
+ to the server and the moment the server sent its complete response headers.
+ This is reported in milliseconds and is equivalent to %Tr in log-format. See
+ section 8.4 "Timing events" for more details.
+
+
+http_auth(<userlist>) : boolean
+ Returns a boolean indicating whether the authentication data received from
+ the client match a username & password stored in the specified userlist. This
+ fetch function is not really useful outside of ACLs. Currently only http
+ basic auth is supported.
+
+http_auth_bearer([<header>]) : string
+ Returns the client-provided token found in the authorization data when the
+ Bearer scheme is used (to send JSON Web Tokens for instance). No check is
+ performed on the data sent by the client.
+ If a specific <header> is supplied, it will parse this header instead of the
+ Authorization one.
+
+http_auth_group(<userlist>) : string
+ Returns a string corresponding to the user name found in the authentication
+ data received from the client if both the user name and password are valid
+ according to the specified userlist. The main purpose is to use it in ACLs
+ where it is then checked whether the user belongs to any group within a list.
+ This fetch function is not really useful outside of ACLs. Currently only http
+ basic auth is supported.
+
+ ACL derivatives :
+ http_auth_group(<userlist>) : group ...
+ Returns true when the user extracted from the request and whose password is
+ valid according to the specified userlist belongs to at least one of the
+ groups.
+
+http_auth_pass : string
+ Returns the user's password found in the authentication data received from
+ the client, as supplied in the Authorization header. Not checks are
+ performed by this sample fetch. Only Basic authentication is supported.
+
+http_auth_type : string
+ Returns the authentication method found in the authentication data received from
+ the client, as supplied in the Authorization header. Not checks are
+ performed by this sample fetch. Only Basic authentication is supported.
+
+http_auth_user : string
+ Returns the user name found in the authentication data received from the
+ client, as supplied in the Authorization header. Not checks are performed by
+ this sample fetch. Only Basic authentication is supported.
+
+http_first_req : boolean
+ Returns true when the request being processed is the first one of the
+ connection. This can be used to add or remove headers that may be missing
+ from some requests when a request is not the first one, or to help grouping
+ requests in the logs.
+
+method : integer + string
+ Returns an integer value corresponding to the method in the HTTP request. For
+ example, "GET" equals 1 (check sources to establish the matching). Value 9
+ means "other method" and may be converted to a string extracted from the
+ stream. This should not be used directly as a sample, this is only meant to
+ be used from ACLs, which transparently convert methods from patterns to these
+ integer + string values. Some predefined ACL already check for most common
+ methods.
+
+ ACL derivatives :
+ method : case insensitive method match
+
+ Example :
+ # only accept GET and HEAD requests
+ acl valid_method method GET HEAD
+ http-request deny if ! valid_method
+
+path : string
+ This extracts the request's URL path, which starts at the first slash and
+ ends before the question mark (without the host part). A typical use is with
+ prefetch-capable caches, and with portals which need to aggregate multiple
+ information from databases and keep them in caches. Note that with outgoing
+ caches, it would be wiser to use "url" instead. With ACLs, it's typically
+ used to match exact file names (e.g. "/login.php"), or directory parts using
+ the derivative forms. See also the "url" and "base" fetch methods. Please
+ note that any fragment reference in the URI ('#' after the path) is strictly
+ forbidden by the HTTP standard and will be rejected. However, if the frontend
+ receiving the request has "option accept-invalid-http-request", then this
+ fragment part will be accepted and will also appear in the path.
+
+ ACL derivatives :
+ path : exact string match
+ path_beg : prefix match
+ path_dir : subdir match
+ path_dom : domain match
+ path_end : suffix match
+ path_len : length match
+ path_reg : regex match
+ path_sub : substring match
+
+pathq : string
+ This extracts the request's URL path with the query-string, which starts at
+ the first slash. This sample fetch is pretty handy to always retrieve a
+ relative URI, excluding the scheme and the authority part, if any. Indeed,
+ while it is the common representation for an HTTP/1.1 request target, in
+ HTTP/2, an absolute URI is often used. This sample fetch will return the same
+ result in both cases. Please note that any fragment reference in the URI ('#'
+ after the path) is strictly forbidden by the HTTP standard and will be
+ rejected. However, if the frontend receiving the request has "option
+ accept-invalid-http-request", then this fragment part will be accepted and
+ will also appear in the path.
+
+query : string
+ This extracts the request's query string, which starts after the first
+ question mark. If no question mark is present, this fetch returns nothing. If
+ a question mark is present but nothing follows, it returns an empty string.
+ This means it's possible to easily know whether a query string is present
+ using the "found" matching method. This fetch is the complement of "path"
+ which stops before the question mark.
+
+req.hdr_names([<delim>]) : string
+ This builds a string made from the concatenation of all header names as they
+ appear in the request when the rule is evaluated. The default delimiter is
+ the comma (',') but it may be overridden as an optional argument <delim>. In
+ this case, only the first character of <delim> is considered.
+
+req.ver : string
+req_ver : string (deprecated)
+ Returns the version string from the HTTP request, for example "1.1". This can
+ be useful for ACL. For logs use the "%HV" log variable. Some predefined ACL
+ already check for versions 1.0 and 1.1.
+
+ Common values are "1.0", "1.1", "2.0" or "3.0".
+
+ In the case of http/2 and http/3, the value is not extracted from the HTTP
+ version in the request line but is determined by the negotiated protocol
+ version.
+
+ ACL derivatives :
+ req.ver : exact string match
+
+res.body : binary
+ This returns the HTTP response's available body as a block of data. Unlike
+ the request side, there is no directive to wait for the response's body. This
+ sample fetch is really useful (and usable) in the health-check context.
+
+ It may be used in tcp-check based expect rules.
+
+res.body_len : integer
+ This returns the length of the HTTP response available body in bytes. Unlike
+ the request side, there is no directive to wait for the response's body. This
+ sample fetch is really useful (and usable) in the health-check context.
+
+ It may be used in tcp-check based expect rules.
+
+res.body_size : integer
+ This returns the advertised length of the HTTP response body in bytes. It
+ will represent the advertised Content-Length header, or the size of the
+ available data in case of chunked encoding. Unlike the request side, there is
+ no directive to wait for the response body. This sample fetch is really
+ useful (and usable) in the health-check context.
+
+ It may be used in tcp-check based expect rules.
+
+res.cache_hit : boolean
+ Returns the boolean "true" value if the response has been built out of an
+ HTTP cache entry, otherwise returns boolean "false".
+
+res.cache_name : string
+ Returns a string containing the name of the HTTP cache that was used to
+ build the HTTP response if res.cache_hit is true, otherwise returns an
+ empty string.
+
+res.comp : boolean
+ Returns the boolean "true" value if the response has been compressed by
+ HAProxy, otherwise returns boolean "false". This may be used to add
+ information in the logs.
+
+res.comp_algo : string
+ Returns a string containing the name of the algorithm used if the response
+ was compressed by HAProxy, for example : "deflate". This may be used to add
+ some information in the logs.
+
+res.cook([<name>]) : string
+scook([<name>]) : string (deprecated)
+ This extracts the last occurrence of the cookie name <name> on a "Set-Cookie"
+ header line from the response, and returns its value as string. If no name is
+ specified, the first cookie value is returned.
+
+ It may be used in tcp-check based expect rules.
+
+ ACL derivatives :
+ res.scook([<name>] : exact string match
+
+res.cook_cnt([<name>]) : integer
+scook_cnt([<name>]) : integer (deprecated)
+ Returns an integer value representing the number of occurrences of the cookie
+ <name> in the response, or all cookies if <name> is not specified. This is
+ mostly useful when combined with ACLs to detect suspicious responses.
+
+ It may be used in tcp-check based expect rules.
+
+res.cook_val([<name>]) : integer
+scook_val([<name>]) : integer (deprecated)
+ This extracts the last occurrence of the cookie name <name> on a "Set-Cookie"
+ header line from the response, and converts its value to an integer which is
+ returned. If no name is specified, the first cookie value is returned.
+
+ It may be used in tcp-check based expect rules.
+
+res.cook_names([<delim>]) : string
+ This builds a string made from the concatenation of all cookie names as they
+ appear in the response (Set-Cookie headers) when the rule is evaluated. The
+ default delimiter is the comma (',') but it may be overridden as an optional
+ argument <delim>. In this case, only the first character of <delim> is
+ considered.
+
+ It may be used in tcp-check based expect rules.
+
+res.fhdr([<name>[,<occ>]]) : string
+ This fetch works like the req.fhdr() fetch with the difference that it acts
+ on the headers within an HTTP response.
+
+ Like req.fhdr() the res.fhdr() fetch returns full values. If the header is
+ defined to be a list you should use res.hdr().
+
+ This fetch is sometimes useful with headers such as Date or Expires.
+
+ It may be used in tcp-check based expect rules.
+
+res.fhdr_cnt([<name>]) : integer
+ This fetch works like the req.fhdr_cnt() fetch with the difference that it
+ acts on the headers within an HTTP response.
+
+ Like req.fhdr_cnt() the res.fhdr_cnt() fetch acts on full values. If the
+ header is defined to be a list you should use res.hdr_cnt().
+
+ It may be used in tcp-check based expect rules.
+
+res.hdr([<name>[,<occ>]]) : string
+shdr([<name>[,<occ>]]) : string (deprecated)
+ This fetch works like the req.hdr() fetch with the difference that it acts
+ on the headers within an HTTP response.
+
+ Like req.hdr() the res.hdr() fetch considers the comma to be a delimiter. If
+ this is not desired res.fhdr() should be used.
+
+ It may be used in tcp-check based expect rules.
+
+ ACL derivatives :
+ res.hdr([<name>[,<occ>]]) : exact string match
+ res.hdr_beg([<name>[,<occ>]]) : prefix match
+ res.hdr_dir([<name>[,<occ>]]) : subdir match
+ res.hdr_dom([<name>[,<occ>]]) : domain match
+ res.hdr_end([<name>[,<occ>]]) : suffix match
+ res.hdr_len([<name>[,<occ>]]) : length match
+ res.hdr_reg([<name>[,<occ>]]) : regex match
+ res.hdr_sub([<name>[,<occ>]]) : substring match
+
+res.hdr_cnt([<name>]) : integer
+shdr_cnt([<name>]) : integer (deprecated)
+ This fetch works like the req.hdr_cnt() fetch with the difference that it
+ acts on the headers within an HTTP response.
+
+ Like req.hdr_cnt() the res.hdr_cnt() fetch considers the comma to be a
+ delimiter. If this is not desired res.fhdr_cnt() should be used.
+
+ It may be used in tcp-check based expect rules.
+
+res.hdr_ip([<name>[,<occ>]]) : ip
+shdr_ip([<name>[,<occ>]]) : ip (deprecated)
+ This fetch works like the req.hdr_ip() fetch with the difference that it
+ acts on the headers within an HTTP response.
+
+ This can be useful to learn some data into a stick table.
+
+ It may be used in tcp-check based expect rules.
+
+res.hdr_names([<delim>]) : string
+ This builds a string made from the concatenation of all header names as they
+ appear in the response when the rule is evaluated. The default delimiter is
+ the comma (',') but it may be overridden as an optional argument <delim>. In
+ this case, only the first character of <delim> is considered.
+
+ It may be used in tcp-check based expect rules.
+
+res.hdr_val([<name>[,<occ>]]) : integer
+shdr_val([<name>[,<occ>]]) : integer (deprecated)
+ This fetch works like the req.hdr_val() fetch with the difference that it
+ acts on the headers within an HTTP response.
+
+ This can be useful to learn some data into a stick table.
+
+ It may be used in tcp-check based expect rules.
+
+res.hdrs : string
+ Returns the current response headers as string including the last empty line
+ separating headers from the request body. The last empty line can be used to
+ detect a truncated header block. This sample fetch is useful for some SPOE
+ headers analyzers and for advanced logging.
+
+ It may also be used in tcp-check based expect rules.
+
+res.hdrs_bin : binary
+ Returns the current response headers contained in preparsed binary form. This
+ is useful for offloading some processing with SPOE. It may be used in
+ tcp-check based expect rules. Each string is described by a length followed
+ by the number of bytes indicated in the length. The length is represented
+ using the variable integer encoding detailed in the SPOE documentation. The
+ end of the list is marked by a couple of empty header names and values
+ (length of 0 for both).
+
+ *(<str:header-name><str:header-value>)<empty string><empty string>
+
+ int: refer to the SPOE documentation for the encoding
+ str: <int:length><bytes>
+
+res.ver : string
+resp_ver : string (deprecated)
+ Returns the version string from the HTTP response, for example "1.1". This
+ can be useful for logs, but is mostly there for ACL.
+
+ It may be used in tcp-check based expect rules.
+
+ ACL derivatives :
+ resp.ver : exact string match
+
+server_status : integer
+ Return an integer containing the HTTP status code as received from the
+ server. If no response was received from the server, the sample fetch fails.
+
+set-cookie([<name>]) : string (deprecated)
+ This extracts the last occurrence of the cookie name <name> on a "Set-Cookie"
+ header line from the response and uses the corresponding value to match. This
+ can be comparable to what "appsession" did with default options, but with
+ support for multi-peer synchronization and state keeping across restarts.
+
+ This fetch function is deprecated and has been superseded by the "res.cook"
+ fetch. This keyword will disappear soon.
+
+status : integer
+ Returns an integer containing the HTTP status code in the HTTP response, for
+ example, 302. It is mostly used within ACLs and integer ranges, for example,
+ to remove any Location header if the response is not a 3xx. It will be the
+ status code received by the client if it is not changed, via a 'set-status'
+ action for instance.
+
+ It may be used in tcp-check based expect rules.
+
+txn.status : integer
+ Return an integer containing the HTTP status code of the transaction, as
+ reported in the log.
+
+txn.timer.total : integer
+ Total active time for the HTTP request, between the moment the proxy received
+ the first byte of the request header and the emission of the last byte of the
+ response body. This is the equivalent of %Ta in the log-format and is
+ reported in milliseconds (ms). For more information see Section 8.4 "Timing
+ events"
+
+unique-id : string
+ Returns the unique-id attached to the request. The directive
+ "unique-id-format" must be set. If it is not set, the unique-id sample fetch
+ fails. Note that the unique-id is usually used with HTTP requests, however this
+ sample fetch can be used with other protocols. Obviously, if it is used with
+ other protocols than HTTP, the unique-id-format directive must not contain
+ HTTP parts. See: unique-id-format and unique-id-header
+
+url : string
+ This extracts the request's URL as presented in the request. A typical use is
+ with prefetch-capable caches, and with portals which need to aggregate
+ multiple information from databases and keep them in caches. With ACLs, using
+ "path" is preferred over using "url", because clients may send a full URL as
+ is normally done with proxies. The only real use is to match "*" which does
+ not match in "path", and for which there is already a predefined ACL. See
+ also "path" and "base". Please note that any fragment reference in the URI
+ ('#' after the path) is strictly forbidden by the HTTP standard and will be
+ rejected. However, if the frontend receiving the request has "option
+ accept-invalid-http-request", then this fragment part will be accepted and
+ will also appear in the url.
+
+ ACL derivatives :
+ url : exact string match
+ url_beg : prefix match
+ url_dir : subdir match
+ url_dom : domain match
+ url_end : suffix match
+ url_len : length match
+ url_reg : regex match
+ url_sub : substring match
+
+url_ip : ip
+ This extracts the IP address from the request's URL when the host part is
+ presented as an IP address. Its use is very limited. For instance, a
+ monitoring system might use this field as an alternative for the source IP in
+ order to test what path a given source address would follow, or to force an
+ entry in a table for a given source address. It may be used in combination
+ with 'http-request set-dst' to emulate the older 'option http_proxy'.
+
+url_port : integer
+ This extracts the port part from the request's URL. Note that if the port is
+ not specified in the request, port 80 is assumed..
+
+urlp([<name>[,<delim>[,i]]]) : string
+url_param([<name>[,<delim>[,i]]]) : string
+ This extracts the first occurrence of the parameter <name> in the query
+ string, which begins after either '?' or <delim>, and which ends before '&',
+ ';' or <delim>. The parameter name is case-sensitive, unless"i" is added as a
+ third argument. If no name is given, any parameter will match, and the first
+ one will be returned. The result is a string corresponding to the value of the
+ parameter <name> as presented in the request (no URL decoding is performed).
+ This can be used for session stickiness based on a client ID, to extract an
+ application cookie passed as a URL parameter, or in ACLs to apply some checks.
+ Note that the ACL version of this fetch iterates over multiple parameters and
+ will iteratively report all parameters values if no name is given
+
+ ACL derivatives :
+ urlp(<name>[,<delim>]) : exact string match
+ urlp_beg(<name>[,<delim>]) : prefix match
+ urlp_dir(<name>[,<delim>]) : subdir match
+ urlp_dom(<name>[,<delim>]) : domain match
+ urlp_end(<name>[,<delim>]) : suffix match
+ urlp_len(<name>[,<delim>]) : length match
+ urlp_reg(<name>[,<delim>]) : regex match
+ urlp_sub(<name>[,<delim>]) : substring match
+
+
+ Example :
+ # match http://example.com/foo?PHPSESSIONID=some_id
+ stick on urlp(PHPSESSIONID)
+ # match http://example.com/foo;JSESSIONID=some_id
+ stick on urlp(JSESSIONID,;)
+
+urlp_val([<name>[,<delim>[,i]]]) : integer
+ See "urlp" above. This one extracts the URL parameter <name> in the request
+ and converts it to an integer value. This can be used for session stickiness
+ based on a user ID for example, or with ACLs to match a page number or price.
+
+url32 : integer
+ This returns a 32-bit hash of the value obtained by concatenating the first
+ Host header and the whole URL including parameters (not only the path part of
+ the request, as in the "base32" fetch above). This is useful to track per-URL
+ activity. A shorter hash is stored, saving a lot of memory. The output type
+ is an unsigned integer.
+
+url32+src : binary
+ This returns the concatenation of the "url32" fetch and the "src" fetch. The
+ resulting type is of type binary, with a size of 8 or 20 bytes depending on
+ the source address family. This can be used to track per-IP, per-URL counters.
+
+
+7.3.7. Fetching samples for developers
+---------------------------------------
+
+This set of sample fetch methods is reserved to developers and must never be
+used on a production environment, except on developer demand, for debugging
+purposes. Moreover, no special care will be taken on backwards compatibility.
+There is no warranty the following sample fetches will never change, be renamed
+or simply removed. So be really careful if you should use one of them. To avoid
+any ambiguity, these sample fetches are placed in the dedicated scope "internal",
+for instance "internal.strm.is_htx".
+
+Summary of sample fetch methods in this section and their respective types:
+
+ keyword output type
+-------------------------------------------------+-------------
+internal.htx.data integer
+internal.htx.free integer
+internal.htx.free_data integer
+internal.htx.has_eom boolean
+internal.htx.nbblks integer
+internal.htx.size integer
+internal.htx.used integer
+internal.htx_blk.size(<idx>) integer
+internal.htx_blk.type(<idx>) string
+internal.htx_blk.data(<idx>) binary
+internal.htx_blk.hdrname(<idx>) string
+internal.htx_blk.hdrval(<idx>) string
+internal.htx_blk.start_line(<idx>) string
+internal.strm.is_htx boolean
+-------------------------------------------------+-------------
+
+Detailed list:
+
+internal.htx.data : integer
+ Returns the size in bytes used by data in the HTX message associated to a
+ channel. The channel is chosen depending on the sample direction.
+
+internal.htx.free : integer
+ Returns the free space (size - used) in bytes in the HTX message associated
+ to a channel. The channel is chosen depending on the sample direction.
+
+internal.htx.free_data : integer
+ Returns the free space for the data in bytes in the HTX message associated to
+ a channel. The channel is chosen depending on the sample direction.
+
+internal.htx.has_eom : boolean
+ Returns true if the HTX message associated to a channel contains the
+ end-of-message flag (EOM). Otherwise, it returns false. The channel is chosen
+ depending on the sample direction.
+
+internal.htx.nbblks : integer
+ Returns the number of blocks present in the HTX message associated to a
+ channel. The channel is chosen depending on the sample direction.
+
+internal.htx.size : integer
+ Returns the total size in bytes of the HTX message associated to a
+ channel. The channel is chosen depending on the sample direction.
+
+internal.htx.used : integer
+ Returns the total size used in bytes (data + metadata) in the HTX message
+ associated to a channel. The channel is chosen depending on the sample
+ direction.
+
+internal.htx_blk.size(<idx>) : integer
+ Returns the size of the block at the position <idx> in the HTX message
+ associated to a channel or 0 if it does not exist. The channel is chosen
+ depending on the sample direction. <idx> may be any positive integer or one
+ of the special value :
+ * head : The oldest inserted block
+ * tail : The newest inserted block
+ * first : The first block where to (re)start the analysis
+
+internal.htx_blk.type(<idx>) : string
+ Returns the type of the block at the position <idx> in the HTX message
+ associated to a channel or "HTX_BLK_UNUSED" if it does not exist. The channel
+ is chosen depending on the sample direction. <idx> may be any positive
+ integer or one of the special value :
+ * head : The oldest inserted block
+ * tail : The newest inserted block
+ * first : The first block where to (re)start the analysis
+
+internal.htx_blk.data(<idx>) : binary
+ Returns the value of the DATA block at the position <idx> in the HTX message
+ associated to a channel or an empty string if it does not exist or if it is
+ not a DATA block. The channel is chosen depending on the sample direction.
+ <idx> may be any positive integer or one of the special value :
+
+ * head : The oldest inserted block
+ * tail : The newest inserted block
+ * first : The first block where to (re)start the analysis
+
+internal.htx_blk.hdrname(<idx>) : string
+ Returns the header name of the HEADER block at the position <idx> in the HTX
+ message associated to a channel or an empty string if it does not exist or if
+ it is not an HEADER block. The channel is chosen depending on the sample
+ direction. <idx> may be any positive integer or one of the special value :
+
+ * head : The oldest inserted block
+ * tail : The newest inserted block
+ * first : The first block where to (re)start the analysis
+
+internal.htx_blk.hdrval(<idx>) : string
+ Returns the header value of the HEADER block at the position <idx> in the HTX
+ message associated to a channel or an empty string if it does not exist or if
+ it is not an HEADER block. The channel is chosen depending on the sample
+ direction. <idx> may be any positive integer or one of the special value :
+
+ * head : The oldest inserted block
+ * tail : The newest inserted block
+ * first : The first block where to (re)start the analysis
+
+internal.htx_blk.start_line(<idx>) : string
+ Returns the value of the REQ_SL or RES_SL block at the position <idx> in the
+ HTX message associated to a channel or an empty string if it does not exist
+ or if it is not a SL block. The channel is chosen depending on the sample
+ direction. <idx> may be any positive integer or one of the special value :
+
+ * head : The oldest inserted block
+ * tail : The newest inserted block
+ * first : The first block where to (re)start the analysis
+
+internal.strm.is_htx : boolean
+ Returns true if the current stream is an HTX stream. It means the data in the
+ channels buffers are stored using the internal HTX representation. Otherwise,
+ it returns false.
+
+
+7.4. Pre-defined ACLs
+---------------------
+
+Some predefined ACLs are hard-coded so that they do not have to be declared in
+every frontend which needs them. They all have their names in upper case in
+order to avoid confusion. Their equivalence is provided below.
+
+ACL name Equivalent to Usage
+---------------+----------------------------------+------------------------------------------------------
+FALSE always_false never match
+HTTP req.proto_http match if request protocol is valid HTTP
+HTTP_1.0 req.ver 1.0 match if HTTP request version is 1.0
+HTTP_1.1 req.ver 1.1 match if HTTP request version is 1.1
+HTTP_2.0 req.ver 2.0 match if HTTP request version is 2.0
+HTTP_3.0 req.ver 3.0 match if HTTP request version is 3.0
+HTTP_CONTENT req.hdr_val(content-length) gt 0 match an existing content-length in the HTTP request
+HTTP_URL_ABS url_reg ^[^/:]*:// match absolute URL with scheme
+HTTP_URL_SLASH url_beg / match URL beginning with "/"
+HTTP_URL_STAR url * match URL equal to "*"
+LOCALHOST src 127.0.0.1/8 ::1 match connection from local host
+METH_CONNECT method CONNECT match HTTP CONNECT method
+METH_DELETE method DELETE match HTTP DELETE method
+METH_GET method GET HEAD match HTTP GET or HEAD method
+METH_HEAD method HEAD match HTTP HEAD method
+METH_OPTIONS method OPTIONS match HTTP OPTIONS method
+METH_POST method POST match HTTP POST method
+METH_PUT method PUT match HTTP PUT method
+METH_TRACE method TRACE match HTTP TRACE method
+RDP_COOKIE req.rdp_cookie_cnt gt 0 match presence of an RDP cookie in the request buffer
+REQ_CONTENT req.len gt 0 match data in the request buffer
+TRUE always_true always match
+WAIT_END wait_end wait for end of content analysis
+---------------+----------------------------------+------------------------------------------------------
+
+
+8. Logging
+----------
+
+One of HAProxy's strong points certainly lies is its precise logs. It probably
+provides the finest level of information available for such a product, which is
+very important for troubleshooting complex environments. Standard information
+provided in logs include client ports, TCP/HTTP state timers, precise stream
+state at termination and precise termination cause, information about decisions
+to direct traffic to a server, and of course the ability to capture arbitrary
+headers.
+
+In order to improve administrators reactivity, it offers a great transparency
+about encountered problems, both internal and external, and it is possible to
+send logs to different sources at the same time with different level filters :
+
+ - global process-level logs (system errors, start/stop, etc..)
+ - per-instance system and internal errors (lack of resource, bugs, ...)
+ - per-instance external troubles (servers up/down, max connections)
+ - per-instance activity (client connections), either at the establishment or
+ at the termination.
+ - per-request control of log-level, e.g.
+ http-request set-log-level silent if sensitive_request
+
+The ability to distribute different levels of logs to different log servers
+allow several production teams to interact and to fix their problems as soon
+as possible. For example, the system team might monitor system-wide errors,
+while the application team might be monitoring the up/down for their servers in
+real time, and the security team might analyze the activity logs with one hour
+delay.
+
+
+8.1. Log levels
+---------------
+
+TCP and HTTP connections can be logged with information such as the date, time,
+source IP address, destination address, connection duration, response times,
+HTTP request, HTTP return code, number of bytes transmitted, conditions
+in which the stream ended, and even exchanged cookies values. For example
+track a particular user's problems. All messages may be sent to up to two
+syslog servers. Check the "log" keyword in section 4.2 for more information
+about log facilities.
+
+
+8.2. Log formats
+----------------
+
+HAProxy supports 5 log formats. Several fields are common between these formats
+and will be detailed in the following sections. A few of them may vary
+slightly with the configuration, due to indicators specific to certain
+options. The supported formats are as follows :
+
+ - the default format, which is very basic and very rarely used. It only
+ provides very basic information about the incoming connection at the moment
+ it is accepted : source IP:port, destination IP:port, and frontend-name.
+ This mode will eventually disappear so it will not be described to great
+ extents.
+
+ - the TCP format, which is more advanced. This format is enabled when "option
+ tcplog" is set on the frontend. HAProxy will then usually wait for the
+ connection to terminate before logging. This format provides much richer
+ information, such as timers, connection counts, queue size, etc... This
+ format is recommended for pure TCP proxies.
+
+ - the HTTP format, which is the most advanced for HTTP proxying. This format
+ is enabled when "option httplog" is set on the frontend. It provides the
+ same information as the TCP format with some HTTP-specific fields such as
+ the request, the status code, and captures of headers and cookies. This
+ format is recommended for HTTP proxies.
+
+ - the CLF HTTP format, which is equivalent to the HTTP format, but with the
+ fields arranged in the same order as the CLF format. In this mode, all
+ timers, captures, flags, etc... appear one per field after the end of the
+ common fields, in the same order they appear in the standard HTTP format.
+
+ - the custom log format, allows you to make your own log line.
+
+Next sections will go deeper into details for each of these formats. Format
+specification will be performed on a "field" basis. Unless stated otherwise, a
+field is a portion of text delimited by any number of spaces. Since syslog
+servers are susceptible of inserting fields at the beginning of a line, it is
+always assumed that the first field is the one containing the process name and
+identifier.
+
+Note : Since log lines may be quite long, the log examples in sections below
+ might be broken into multiple lines. The example log lines will be
+ prefixed with 3 closing angle brackets ('>>>') and each time a log is
+ broken into multiple lines, each non-final line will end with a
+ backslash ('\') and the next line will start indented by two characters.
+
+
+8.2.1. Default log format
+-------------------------
+
+This format is used when no specific option is set. The log is emitted as soon
+as the connection is accepted. One should note that this currently is the only
+format which logs the request's destination IP and ports.
+
+ Example :
+ listen www
+ mode http
+ log global
+ server srv1 127.0.0.1:8000
+
+ >>> Feb 6 12:12:09 localhost \
+ haproxy[14385]: Connect from 10.0.1.2:33312 to 10.0.3.31:8012 \
+ (www/HTTP)
+
+ Field Format Extract from the example above
+ 1 process_name '[' pid ']:' haproxy[14385]:
+ 2 'Connect from' Connect from
+ 3 source_ip ':' source_port 10.0.1.2:33312
+ 4 'to' to
+ 5 destination_ip ':' destination_port 10.0.3.31:8012
+ 6 '(' frontend_name '/' mode ')' (www/HTTP)
+
+Detailed fields description :
+ - "source_ip" is the IP address of the client which initiated the connection.
+ - "source_port" is the TCP port of the client which initiated the connection.
+ - "destination_ip" is the IP address the client connected to.
+ - "destination_port" is the TCP port the client connected to.
+ - "frontend_name" is the name of the frontend (or listener) which received
+ and processed the connection.
+ - "mode is the mode the frontend is operating (TCP or HTTP).
+
+In case of a UNIX socket, the source and destination addresses are marked as
+"unix:" and the ports reflect the internal ID of the socket which accepted the
+connection (the same ID as reported in the stats).
+
+It is advised not to use this deprecated format for newer installations as it
+will eventually disappear.
+
+
+8.2.2. TCP log format
+---------------------
+
+The TCP format is used when "option tcplog" is specified in the frontend, and
+is the recommended format for pure TCP proxies. It provides a lot of precious
+information for troubleshooting. Since this format includes timers and byte
+counts, the log is normally emitted at the end of the session. It can be
+emitted earlier if "option logasap" is specified, which makes sense in most
+environments with long sessions such as remote terminals. Sessions which match
+the "monitor" rules are never logged. It is also possible not to emit logs for
+sessions for which no data were exchanged between the client and the server, by
+specifying "option dontlognull" in the frontend. Successful connections will
+not be logged if "option dontlog-normal" is specified in the frontend.
+
+The TCP log format is internally declared as a custom log format based on the
+exact following string, which may also be used as a basis to extend the format
+if required. Additionally the HAPROXY_TCP_LOG_FMT variable can be used instead.
+Refer to section 8.2.6 "Custom log format" to see how to use this:
+
+ # strict equivalent of "option tcplog"
+ log-format "%ci:%cp [%t] %ft %b/%s %Tw/%Tc/%Tt %B %ts \
+ %ac/%fc/%bc/%sc/%rc %sq/%bq"
+ # or using the HAPROXY_TCP_LOG_FMT variable
+ log-format "${HAPROXY_TCP_LOG_FMT}"
+
+A few fields may slightly vary depending on some configuration options, those
+are marked with a star ('*') after the field name below.
+
+ Example :
+ frontend fnt
+ mode tcp
+ option tcplog
+ log global
+ default_backend bck
+
+ backend bck
+ server srv1 127.0.0.1:8000
+
+ >>> Feb 6 12:12:56 localhost \
+ haproxy[14387]: 10.0.1.2:33313 [06/Feb/2009:12:12:51.443] fnt \
+ bck/srv1 0/0/5007 212 -- 0/0/0/0/3 0/0
+
+ Field Format Extract from the example above
+ 1 process_name '[' pid ']:' haproxy[14387]:
+ 2 client_ip ':' client_port 10.0.1.2:33313
+ 3 '[' accept_date ']' [06/Feb/2009:12:12:51.443]
+ 4 frontend_name fnt
+ 5 backend_name '/' server_name bck/srv1
+ 6 Tw '/' Tc '/' Tt* 0/0/5007
+ 7 bytes_read* 212
+ 8 termination_state --
+ 9 actconn '/' feconn '/' beconn '/' srv_conn '/' retries* 0/0/0/0/3
+ 10 srv_queue '/' backend_queue 0/0
+
+Detailed fields description :
+ - "client_ip" is the IP address of the client which initiated the TCP
+ connection to HAProxy. If the connection was accepted on a UNIX socket
+ instead, the IP address would be replaced with the word "unix". Note that
+ when the connection is accepted on a socket configured with "accept-proxy"
+ and the PROXY protocol is correctly used, or with a "accept-netscaler-cip"
+ and the NetScaler Client IP insertion protocol is correctly used, then the
+ logs will reflect the forwarded connection's information.
+
+ - "client_port" is the TCP port of the client which initiated the connection.
+ If the connection was accepted on a UNIX socket instead, the port would be
+ replaced with the ID of the accepting socket, which is also reported in the
+ stats interface.
+
+ - "accept_date" is the exact date when the connection was received by HAProxy
+ (which might be very slightly different from the date observed on the
+ network if there was some queuing in the system's backlog). This is usually
+ the same date which may appear in any upstream firewall's log. When used in
+ HTTP mode, the accept_date field will be reset to the first moment the
+ connection is ready to receive a new request (end of previous response for
+ HTTP/1, immediately after previous request for HTTP/2).
+
+ - "frontend_name" is the name of the frontend (or listener) which received
+ and processed the connection.
+
+ - "backend_name" is the name of the backend (or listener) which was selected
+ to manage the connection to the server. This will be the same as the
+ frontend if no switching rule has been applied, which is common for TCP
+ applications.
+
+ - "server_name" is the name of the last server to which the connection was
+ sent, which might differ from the first one if there were connection errors
+ and a redispatch occurred. Note that this server belongs to the backend
+ which processed the request. If the connection was aborted before reaching
+ a server, "<NOSRV>" is indicated instead of a server name.
+
+ - "Tw" is the total time in milliseconds spent waiting in the various queues.
+ It can be "-1" if the connection was aborted before reaching the queue.
+ See "Timers" below for more details.
+
+ - "Tc" is the total time in milliseconds spent waiting for the connection to
+ establish to the final server, including retries. It can be "-1" if the
+ connection was aborted before a connection could be established. See
+ "Timers" below for more details.
+
+ - "Tt" is the total time in milliseconds elapsed between the accept and the
+ last close. It covers all possible processing. There is one exception, if
+ "option logasap" was specified, then the time counting stops at the moment
+ the log is emitted. In this case, a '+' sign is prepended before the value,
+ indicating that the final one will be larger. See "Timers" below for more
+ details.
+
+ - "bytes_read" is the total number of bytes transmitted from the server to
+ the client when the log is emitted. If "option logasap" is specified, the
+ this value will be prefixed with a '+' sign indicating that the final one
+ may be larger. Please note that this value is a 64-bit counter, so log
+ analysis tools must be able to handle it without overflowing.
+
+ - "termination_state" is the condition the session was in when the session
+ ended. This indicates the session state, which side caused the end of
+ session to happen, and for what reason (timeout, error, ...). The normal
+ flags should be "--", indicating the session was closed by either end with
+ no data remaining in buffers. See below "Stream state at disconnection"
+ for more details.
+
+ - "actconn" is the total number of concurrent connections on the process when
+ the session was logged. It is useful to detect when some per-process system
+ limits have been reached. For instance, if actconn is close to 512 when
+ multiple connection errors occur, chances are high that the system limits
+ the process to use a maximum of 1024 file descriptors and that all of them
+ are used. See section 3 "Global parameters" to find how to tune the system.
+
+ - "feconn" is the total number of concurrent connections on the frontend when
+ the session was logged. It is useful to estimate the amount of resource
+ required to sustain high loads, and to detect when the frontend's "maxconn"
+ has been reached. Most often when this value increases by huge jumps, it is
+ because there is congestion on the backend servers, but sometimes it can be
+ caused by a denial of service attack.
+
+ - "beconn" is the total number of concurrent connections handled by the
+ backend when the session was logged. It includes the total number of
+ concurrent connections active on servers as well as the number of
+ connections pending in queues. It is useful to estimate the amount of
+ additional servers needed to support high loads for a given application.
+ Most often when this value increases by huge jumps, it is because there is
+ congestion on the backend servers, but sometimes it can be caused by a
+ denial of service attack.
+
+ - "srv_conn" is the total number of concurrent connections still active on
+ the server when the session was logged. It can never exceed the server's
+ configured "maxconn" parameter. If this value is very often close or equal
+ to the server's "maxconn", it means that traffic regulation is involved a
+ lot, meaning that either the server's maxconn value is too low, or that
+ there aren't enough servers to process the load with an optimal response
+ time. When only one of the server's "srv_conn" is high, it usually means
+ that this server has some trouble causing the connections to take longer to
+ be processed than on other servers.
+
+ - "retries" is the number of connection retries experienced by this session
+ when trying to connect to the server. It must normally be zero, unless a
+ server is being stopped at the same moment the connection was attempted.
+ Frequent retries generally indicate either a network problem between
+ HAProxy and the server, or a misconfigured system backlog on the server
+ preventing new connections from being queued. This field may optionally be
+ prefixed with a '+' sign, indicating that the session has experienced a
+ redispatch after the maximal retry count has been reached on the initial
+ server. In this case, the server name appearing in the log is the one the
+ connection was redispatched to, and not the first one, though both may
+ sometimes be the same in case of hashing for instance. So as a general rule
+ of thumb, when a '+' is present in front of the retry count, this count
+ should not be attributed to the logged server.
+
+ - "srv_queue" is the total number of requests which were processed before
+ this one in the server queue. It is zero when the request has not gone
+ through the server queue. It makes it possible to estimate the approximate
+ server's response time by dividing the time spent in queue by the number of
+ requests in the queue. It is worth noting that if a session experiences a
+ redispatch and passes through two server queues, their positions will be
+ cumulative. A request should not pass through both the server queue and the
+ backend queue unless a redispatch occurs.
+
+ - "backend_queue" is the total number of requests which were processed before
+ this one in the backend's global queue. It is zero when the request has not
+ gone through the global queue. It makes it possible to estimate the average
+ queue length, which easily translates into a number of missing servers when
+ divided by a server's "maxconn" parameter. It is worth noting that if a
+ session experiences a redispatch, it may pass twice in the backend's queue,
+ and then both positions will be cumulative. A request should not pass
+ through both the server queue and the backend queue unless a redispatch
+ occurs.
+
+
+8.2.3. HTTP log format
+----------------------
+
+The HTTP format is the most complete and the best suited for HTTP proxies. It
+is enabled by when "option httplog" is specified in the frontend. It provides
+the same level of information as the TCP format with additional features which
+are specific to the HTTP protocol. Just like the TCP format, the log is usually
+emitted at the end of the stream, unless "option logasap" is specified, which
+generally only makes sense for download sites. A stream which matches the
+"monitor" rules will never logged. It is also possible not to log streams for
+which no data were sent by the client by specifying "option dontlognull" in the
+frontend. Successful connections will not be logged if "option dontlog-normal"
+is specified in the frontend.
+
+The HTTP log format is internally declared as a custom log format based on the
+exact following string, which may also be used as a basis to extend the format
+if required. Additionally the HAPROXY_HTTP_LOG_FMT variable can be used
+instead. Refer to section 8.2.6 "Custom log format" to see how to use this:
+
+ # strict equivalent of "option httplog"
+ log-format "%ci:%cp [%tr] %ft %b/%s %TR/%Tw/%Tc/%Tr/%Ta %ST %B %CC \
+ %CS %tsc %ac/%fc/%bc/%sc/%rc %sq/%bq %hr %hs %{+Q}r"
+
+And the CLF log format is internally declared as a custom log format based on
+this exact string:
+
+ # strict equivalent of "option httplog clf"
+ log-format "%{+Q}o %{-Q}ci - - [%trg] %r %ST %B \"\" \"\" %cp \
+ %ms %ft %b %s %TR %Tw %Tc %Tr %Ta %tsc %ac %fc \
+ %bc %sc %rc %sq %bq %CC %CS %hrl %hsl"
+ # or using the HAPROXY_HTTP_LOG_FMT variable
+ log-format "${HAPROXY_HTTP_LOG_FMT}"
+
+Most fields are shared with the TCP log, some being different. A few fields may
+slightly vary depending on some configuration options. Those ones are marked
+with a star ('*') after the field name below.
+
+ Example :
+ frontend http-in
+ mode http
+ option httplog
+ log global
+ default_backend bck
+
+ backend static
+ server srv1 127.0.0.1:8000
+
+ >>> Feb 6 12:14:14 localhost \
+ haproxy[14389]: 10.0.1.2:33317 [06/Feb/2009:12:14:14.655] http-in \
+ static/srv1 10/0/30/69/109 200 2750 - - ---- 1/1/1/1/0 0/0 {1wt.eu} \
+ {} "GET /index.html HTTP/1.1"
+
+ Field Format Extract from the example above
+ 1 process_name '[' pid ']:' haproxy[14389]:
+ 2 client_ip ':' client_port 10.0.1.2:33317
+ 3 '[' request_date ']' [06/Feb/2009:12:14:14.655]
+ 4 frontend_name http-in
+ 5 backend_name '/' server_name static/srv1
+ 6 TR '/' Tw '/' Tc '/' Tr '/' Ta* 10/0/30/69/109
+ 7 status_code 200
+ 8 bytes_read* 2750
+ 9 captured_request_cookie -
+ 10 captured_response_cookie -
+ 11 termination_state ----
+ 12 actconn '/' feconn '/' beconn '/' srv_conn '/' retries* 1/1/1/1/0
+ 13 srv_queue '/' backend_queue 0/0
+ 14 '{' captured_request_headers* '}' {haproxy.1wt.eu}
+ 15 '{' captured_response_headers* '}' {}
+ 16 '"' http_request '"' "GET /index.html HTTP/1.1"
+
+Detailed fields description :
+ - "client_ip" is the IP address of the client which initiated the TCP
+ connection to HAProxy. If the connection was accepted on a UNIX socket
+ instead, the IP address would be replaced with the word "unix". Note that
+ when the connection is accepted on a socket configured with "accept-proxy"
+ and the PROXY protocol is correctly used, or with a "accept-netscaler-cip"
+ and the NetScaler Client IP insertion protocol is correctly used, then the
+ logs will reflect the forwarded connection's information.
+
+ - "client_port" is the TCP port of the client which initiated the connection.
+ If the connection was accepted on a UNIX socket instead, the port would be
+ replaced with the ID of the accepting socket, which is also reported in the
+ stats interface.
+
+ - "request_date" is the exact date when the first byte of the HTTP request
+ was received by HAProxy (log field %tr).
+
+ - "frontend_name" is the name of the frontend (or listener) which received
+ and processed the connection.
+
+ - "backend_name" is the name of the backend (or listener) which was selected
+ to manage the connection to the server. This will be the same as the
+ frontend if no switching rule has been applied.
+
+ - "server_name" is the name of the last server to which the connection was
+ sent, which might differ from the first one if there were connection errors
+ and a redispatch occurred. Note that this server belongs to the backend
+ which processed the request. If the request was aborted before reaching a
+ server, "<NOSRV>" is indicated instead of a server name. If the request was
+ intercepted by the stats subsystem, "<STATS>" is indicated instead.
+
+ - "TR" is the total time in milliseconds spent waiting for a full HTTP
+ request from the client (not counting body) after the first byte was
+ received. It can be "-1" if the connection was aborted before a complete
+ request could be received or a bad request was received. It should
+ always be very small because a request generally fits in one single packet.
+ Large times here generally indicate network issues between the client and
+ HAProxy or requests being typed by hand. See section 8.4 "Timing Events"
+ for more details.
+
+ - "Tw" is the total time in milliseconds spent waiting in the various queues.
+ It can be "-1" if the connection was aborted before reaching the queue.
+ See section 8.4 "Timing Events" for more details.
+
+ - "Tc" is the total time in milliseconds spent waiting for the connection to
+ establish to the final server, including retries. It can be "-1" if the
+ request was aborted before a connection could be established. See section
+ 8.4 "Timing Events" for more details.
+
+ - "Tr" is the total time in milliseconds spent waiting for the server to send
+ a full HTTP response, not counting data. It can be "-1" if the request was
+ aborted before a complete response could be received. It generally matches
+ the server's processing time for the request, though it may be altered by
+ the amount of data sent by the client to the server. Large times here on
+ "GET" requests generally indicate an overloaded server. See section 8.4
+ "Timing Events" for more details.
+
+ - "Ta" is the time the request remained active in HAProxy, which is the total
+ time in milliseconds elapsed between the first byte of the request was
+ received and the last byte of response was sent. It covers all possible
+ processing except the handshake (see Th) and idle time (see Ti). There is
+ one exception, if "option logasap" was specified, then the time counting
+ stops at the moment the log is emitted. In this case, a '+' sign is
+ prepended before the value, indicating that the final one will be larger.
+ See section 8.4 "Timing Events" for more details.
+
+ - "status_code" is the HTTP status code returned to the client. This status
+ is generally set by the server, but it might also be set by HAProxy when
+ the server cannot be reached or when its response is blocked by HAProxy.
+
+ - "bytes_read" is the total number of bytes transmitted to the client when
+ the log is emitted. This does include HTTP headers. If "option logasap" is
+ specified, this value will be prefixed with a '+' sign indicating that
+ the final one may be larger. Please note that this value is a 64-bit
+ counter, so log analysis tools must be able to handle it without
+ overflowing.
+
+ - "captured_request_cookie" is an optional "name=value" entry indicating that
+ the client had this cookie in the request. The cookie name and its maximum
+ length are defined by the "capture cookie" statement in the frontend
+ configuration. The field is a single dash ('-') when the option is not
+ set. Only one cookie may be captured, it is generally used to track session
+ ID exchanges between a client and a server to detect session crossing
+ between clients due to application bugs. For more details, please consult
+ the section "Capturing HTTP headers and cookies" below.
+
+ - "captured_response_cookie" is an optional "name=value" entry indicating
+ that the server has returned a cookie with its response. The cookie name
+ and its maximum length are defined by the "capture cookie" statement in the
+ frontend configuration. The field is a single dash ('-') when the option is
+ not set. Only one cookie may be captured, it is generally used to track
+ session ID exchanges between a client and a server to detect session
+ crossing between clients due to application bugs. For more details, please
+ consult the section "Capturing HTTP headers and cookies" below.
+
+ - "termination_state" is the condition the stream was in when the stream
+ ended. This indicates the stream state, which side caused the end of
+ stream to happen, for what reason (timeout, error, ...), just like in TCP
+ logs, and information about persistence operations on cookies in the last
+ two characters. The normal flags should begin with "--", indicating the
+ stream was closed by either end with no data remaining in buffers. See
+ below "Stream state at disconnection" for more details.
+
+ - "actconn" is the total number of concurrent connections on the process when
+ the stream was logged. It is useful to detect when some per-process system
+ limits have been reached. For instance, if actconn is close to 512 or 1024
+ when multiple connection errors occur, chances are high that the system
+ limits the process to use a maximum of 1024 file descriptors and that all
+ of them are used. See section 3 "Global parameters" to find how to tune the
+ system.
+
+ - "feconn" is the total number of concurrent connections on the frontend when
+ the stream was logged. It is useful to estimate the amount of resource
+ required to sustain high loads, and to detect when the frontend's "maxconn"
+ has been reached. Most often when this value increases by huge jumps, it is
+ because there is congestion on the backend servers, but sometimes it can be
+ caused by a denial of service attack.
+
+ - "beconn" is the total number of concurrent connections handled by the
+ backend when the stream was logged. It includes the total number of
+ concurrent connections active on servers as well as the number of
+ connections pending in queues. It is useful to estimate the amount of
+ additional servers needed to support high loads for a given application.
+ Most often when this value increases by huge jumps, it is because there is
+ congestion on the backend servers, but sometimes it can be caused by a
+ denial of service attack.
+
+ - "srv_conn" is the total number of concurrent connections still active on
+ the server when the stream was logged. It can never exceed the server's
+ configured "maxconn" parameter. If this value is very often close or equal
+ to the server's "maxconn", it means that traffic regulation is involved a
+ lot, meaning that either the server's maxconn value is too low, or that
+ there aren't enough servers to process the load with an optimal response
+ time. When only one of the server's "srv_conn" is high, it usually means
+ that this server has some trouble causing the requests to take longer to be
+ processed than on other servers.
+
+ - "retries" is the number of connection retries experienced by this stream
+ when trying to connect to the server. It must normally be zero, unless a
+ server is being stopped at the same moment the connection was attempted.
+ Frequent retries generally indicate either a network problem between
+ HAProxy and the server, or a misconfigured system backlog on the server
+ preventing new connections from being queued. This field may optionally be
+ prefixed with a '+' sign, indicating that the stream has experienced a
+ redispatch after the maximal retry count has been reached on the initial
+ server. In this case, the server name appearing in the log is the one the
+ connection was redispatched to, and not the first one, though both may
+ sometimes be the same in case of hashing for instance. So as a general rule
+ of thumb, when a '+' is present in front of the retry count, this count
+ should not be attributed to the logged server.
+
+ - "srv_queue" is the total number of requests which were processed before
+ this one in the server queue. It is zero when the request has not gone
+ through the server queue. It makes it possible to estimate the approximate
+ server's response time by dividing the time spent in queue by the number of
+ requests in the queue. It is worth noting that if a stream experiences a
+ redispatch and passes through two server queues, their positions will be
+ cumulative. A request should not pass through both the server queue and the
+ backend queue unless a redispatch occurs.
+
+ - "backend_queue" is the total number of requests which were processed before
+ this one in the backend's global queue. It is zero when the request has not
+ gone through the global queue. It makes it possible to estimate the average
+ queue length, which easily translates into a number of missing servers when
+ divided by a server's "maxconn" parameter. It is worth noting that if a
+ stream experiences a redispatch, it may pass twice in the backend's queue,
+ and then both positions will be cumulative. A request should not pass
+ through both the server queue and the backend queue unless a redispatch
+ occurs.
+
+ - "captured_request_headers" is a list of headers captured in the request due
+ to the presence of the "capture request header" statement in the frontend.
+ Multiple headers can be captured, they will be delimited by a vertical bar
+ ('|'). When no capture is enabled, the braces do not appear, causing a
+ shift of remaining fields. It is important to note that this field may
+ contain spaces, and that using it requires a smarter log parser than when
+ it's not used. Please consult the section "Capturing HTTP headers and
+ cookies" below for more details.
+
+ - "captured_response_headers" is a list of headers captured in the response
+ due to the presence of the "capture response header" statement in the
+ frontend. Multiple headers can be captured, they will be delimited by a
+ vertical bar ('|'). When no capture is enabled, the braces do not appear,
+ causing a shift of remaining fields. It is important to note that this
+ field may contain spaces, and that using it requires a smarter log parser
+ than when it's not used. Please consult the section "Capturing HTTP headers
+ and cookies" below for more details.
+
+ - "http_request" is the complete HTTP request line, including the method,
+ request and HTTP version string. Non-printable characters are encoded (see
+ below the section "Non-printable characters"). This is always the last
+ field, and it is always delimited by quotes and is the only one which can
+ contain quotes. If new fields are added to the log format, they will be
+ added before this field. This field might be truncated if the request is
+ huge and does not fit in the standard syslog buffer (1024 characters). This
+ is the reason why this field must always remain the last one.
+
+
+8.2.4. HTTPS log format
+----------------------
+
+The HTTPS format is the best suited for HTTP over SSL connections. It is an
+extension of the HTTP format (see section 8.2.3) to which SSL related
+information are added. It is enabled when "option httpslog" is specified in the
+frontend. Just like the TCP and HTTP formats, the log is usually emitted at the
+end of the stream, unless "option logasap" is specified. A stream which
+matches the "monitor" rules will never logged. It is also possible not to log
+streams for which no data were sent by the client by specifying "option
+dontlognull" in the frontend. Successful connections will not be logged if
+"option dontlog-normal" is specified in the frontend.
+
+The HTTPS log format is internally declared as a custom log format based on the
+exact following string, which may also be used as a basis to extend the format
+if required. Additionally the HAPROXY_HTTPS_LOG_FMT variable can be used
+instead. Refer to section 8.2.6 "Custom log format" to see how to use this:
+
+ # strict equivalent of "option httpslog"
+ log-format "%ci:%cp [%tr] %ft %b/%s %TR/%Tw/%Tc/%Tr/%Ta %ST %B %CC \
+ %CS %tsc %ac/%fc/%bc/%sc/%rc %sq/%bq %hr %hs %{+Q}r \
+ %[fc_err]/%[ssl_fc_err,hex]/%[ssl_c_err]/\
+ %[ssl_c_ca_err]/%[ssl_fc_is_resumed] %[ssl_fc_sni]/%sslv/%sslc"
+ # or using the HAPROXY_HTTPS_LOG_FMT variable
+ log-format "${HAPROXY_HTTPS_LOG_FMT}"
+
+This format is basically the HTTP one (see section 8.2.3) with new fields
+appended to it. The new fields (lines 17 and 18) will be detailed here. For the
+HTTP ones, refer to the HTTP section.
+
+ Example :
+ frontend https-in
+ mode http
+ option httpslog
+ log global
+ bind *:443 ssl crt mycerts/srv.pem ...
+ default_backend bck
+
+ backend static
+ server srv1 127.0.0.1:8000 ssl crt mycerts/clt.pem ...
+
+ >>> Feb 6 12:14:14 localhost \
+ haproxy[14389]: 10.0.1.2:33317 [06/Feb/2009:12:14:14.655] https-in \
+ static/srv1 10/0/30/69/109 200 2750 - - ---- 1/1/1/1/0 0/0 {1wt.eu} \
+ {} "GET /index.html HTTP/1.1" 0/0/0/0/0 \
+ 1wt.eu/TLSv1.3/TLS_AES_256_GCM_SHA384
+
+ Field Format Extract from the example above
+ 1 process_name '[' pid ']:' haproxy[14389]:
+ 2 client_ip ':' client_port 10.0.1.2:33317
+ 3 '[' request_date ']' [06/Feb/2009:12:14:14.655]
+ 4 frontend_name https-in
+ 5 backend_name '/' server_name static/srv1
+ 6 TR '/' Tw '/' Tc '/' Tr '/' Ta* 10/0/30/69/109
+ 7 status_code 200
+ 8 bytes_read* 2750
+ 9 captured_request_cookie -
+ 10 captured_response_cookie -
+ 11 termination_state ----
+ 12 actconn '/' feconn '/' beconn '/' srv_conn '/' retries* 1/1/1/1/0
+ 13 srv_queue '/' backend_queue 0/0
+ 14 '{' captured_request_headers* '}' {haproxy.1wt.eu}
+ 15 '{' captured_response_headers* '}' {}
+ 16 '"' http_request '"' "GET /index.html HTTP/1.1"
+ 17 fc_err '/' ssl_fc_err '/' ssl_c_err
+ '/' ssl_c_ca_err '/' ssl_fc_is_resumed 0/0/0/0/0
+ 18 ssl_fc_sni '/' ssl_version
+ '/' ssl_ciphers 1wt.eu/TLSv1.3/TLS_AES_256_GCM_SHA384
+
+Detailed fields description :
+ - "fc_err" is the status of the connection on the frontend's side. It
+ corresponds to the "fc_err" sample fetch. See the "fc_err" and "fc_err_str"
+ sample fetch functions for more information.
+
+ - "ssl_fc_err" is the last error of the first SSL error stack that was
+ raised on the connection from the frontend's perspective. It might be used
+ to detect SSL handshake errors for instance. It will be 0 if everything
+ went well. See the "ssl_fc_err" sample fetch's description for more
+ information.
+
+ - "ssl_c_err" is the status of the client's certificate verification process.
+ The handshake might be successful while having a non-null verification
+ error code if it is an ignored one. See the "ssl_c_err" sample fetch and
+ the "crt-ignore-err" option.
+
+ - "ssl_c_ca_err" is the status of the client's certificate chain verification
+ process. The handshake might be successful while having a non-null
+ verification error code if it is an ignored one. See the "ssl_c_ca_err"
+ sample fetch and the "ca-ignore-err" option.
+
+ - "ssl_fc_is_resumed" is true if the incoming TLS session was resumed with
+ the stateful cache or a stateless ticket. Don't forgot that a TLS session
+ can be shared by multiple requests.
+
+ - "ssl_fc_sni" is the SNI (Server Name Indication) presented by the client
+ to select the certificate to be used. It usually matches the host name for
+ the first request of a connection. An absence of this field may indicate
+ that the SNI was not sent by the client, and will lead haproxy to use the
+ default certificate, or to reject the connection in case of strict-sni.
+
+ - "ssl_version" is the SSL version of the frontend.
+
+ - "ssl_ciphers" is the SSL cipher used for the connection.
+
+
+8.2.5. Error log format
+-----------------------
+
+When an incoming connection fails due to an SSL handshake or an invalid PROXY
+protocol header, HAProxy will log the event using a shorter, fixed line format,
+unless a dedicated error log format is defined through an "error-log-format"
+line. By default, logs are emitted at the LOG_INFO level, unless the option
+"log-separate-errors" is set in the backend, in which case the LOG_ERR level
+will be used. Connections on which no data are exchanged (e.g. probes) are not
+logged if the "dontlognull" option is set.
+
+The default format looks like this :
+
+ >>> Dec 3 18:27:14 localhost \
+ haproxy[6103]: 127.0.0.1:56059 [03/Dec/2012:17:35:10.380] frt/f1: \
+ Connection error during SSL handshake
+
+ Field Format Extract from the example above
+ 1 process_name '[' pid ']:' haproxy[6103]:
+ 2 client_ip ':' client_port 127.0.0.1:56059
+ 3 '[' accept_date ']' [03/Dec/2012:17:35:10.380]
+ 4 frontend_name "/" bind_name ":" frt/f1:
+ 5 message Connection error during SSL handshake
+
+These fields just provide minimal information to help debugging connection
+failures.
+
+By using the "error-log-format" directive, the legacy log format described
+above will not be used anymore, and all error log lines will follow the
+defined format.
+
+An example of reasonably complete error-log-format follows, it will report the
+source address and port, the connection accept() date, the frontend name, the
+number of active connections on the process and on thit frontend, haproxy's
+internal error identifier on the front connection, the hexadecimal OpenSSL
+error number (that can be copy-pasted to "openssl errstr" for full decoding),
+the client certificate extraction status (0 indicates no error), the client
+certificate validation status using the CA (0 indicates no error), a boolean
+indicating if the connection is new or was resumed, the optional server name
+indication (SNI) provided by the client, the SSL version name and the SSL
+ciphers used on the connection, if any. Note that backend connection errors
+are never reported here since in order for a backend connection to fail, it
+would have passed through a successful stream, hence will be available as
+regular traffic log (see option httplog or option httpslog).
+
+ # detailed frontend connection error log
+ error-log-format "%ci:%cp [%tr] %ft %ac/%fc %[fc_err]/\
+ %[ssl_fc_err,hex]/%[ssl_c_err]/%[ssl_c_ca_err]/%[ssl_fc_is_resumed] \
+ %[ssl_fc_sni]/%sslv/%sslc"
+
+
+8.2.6. Custom log format
+------------------------
+
+When the default log formats are not sufficient, it is possible to define new
+ones in very fine details. As creating a log-format from scratch is not always
+a trivial task, it is strongly recommended to first have a look at the existing
+formats ("option tcplog", "option httplog", "option httpslog"), pick the one
+looking the closest to the expectation, copy its "log-format" equivalent string
+and adjust it.
+
+HAProxy understands some log format variables. % precedes log format variables.
+Variables can take arguments using braces ('{}'), and multiple arguments are
+separated by commas within the braces. Flags may be added or removed by
+prefixing them with a '+' or '-' sign.
+
+Special variable "%o" may be used to propagate its flags to all other
+variables on the same format string. This is particularly handy with quoted
+("Q") and escaped ("E") string formats.
+
+If a variable is named between square brackets ('[' .. ']') then it is used
+as a sample expression rule (see section 7.3). This it useful to add some
+less common information such as the client's SSL certificate's DN, or to log
+the key that would be used to store an entry into a stick table.
+
+Note: spaces must be escaped. In configuration directives "log-format",
+"log-format-sd" and "unique-id-format", spaces are considered as
+delimiters and are merged. In order to emit a verbatim '%', it must be
+preceded by another '%' resulting in '%%'.
+
+Note: when using the RFC5424 syslog message format, the characters '"',
+'\' and ']' inside PARAM-VALUE should be escaped with '\' as prefix (see
+https://tools.ietf.org/html/rfc5424#section-6.3.3 for more details). In
+such cases, the use of the flag "E" should be considered.
+
+Flags are :
+ * Q: quote a string
+ * X: hexadecimal representation (IPs, Ports, %Ts, %rt, %pid)
+ * E: escape characters '"', '\' and ']' in a string with '\' as prefix
+ (intended purpose is for the RFC5424 structured-data log formats)
+
+ Example:
+
+ log-format %T\ %t\ Some\ Text
+ log-format %{+Q}o\ %t\ %s\ %{-Q}r
+
+ log-format-sd %{+Q,+E}o\ [exampleSDID@1234\ header=%[capture.req.hdr(0)]]
+
+Please refer to the table below for currently defined variables :
+
+ +---+------+------------------------------------------------------+---------+
+ | R | var | field name (8.2.2 and 8.2.3 for description) | type |
+ | | | sample fetch alternative | |
+ +===+======+======================================================+=========+
+ | | %o | special variable, apply flags on all next var | |
+ +---+------+------------------------------------------------------+---------+
+ | date formats |
+ +---+------+------------------------------------------------------+---------+
+ | | %T | Accept date UTC + timezone | |
+ | | | %[accept_date,utime("%d/%b/%Y:%H:%M:%S %z")] | date |
+ +---+------+------------------------------------------------------+---------+
+ | | %Tl | Accept date local + timezone | |
+ | | | %[accept_date,ltime("%d/%b/%Y:%H:%M:%S %z")] | date |
+ +---+------+------------------------------------------------------+---------+
+ | | %Ts | Accept date as a UNIX timestamp | numeric |
+ +---+------+------------------------------------------------------+---------+
+ | | %t | Accept date local (with millisecond resolution) | |
+ | | | %[accept_date(ms),ms_ltime("%d/%b/%Y:%H:%M:%S.%3N")] | date |
+ +---+------+------------------------------------------------------+---------+
+ | | %ms | Accept date milliseconds | |
+ | | | %[accept_date(ms),ms_utime("%3N") | numeric |
+ +---+------+------------------------------------------------------+---------+
+ | H | %tr | Request date local (with millisecond resolution) | |
+ | | | %[request_date(ms),ms_ltime("%d/%b/%Y:%H:%M:%S.%3N")]| date |
+ +---+------+------------------------------------------------------+---------+
+ | H | %trg | Request date UTC + timezone | |
+ | | | %[request_date,utime("%d/%b/%Y:%H:%M:%S %z")] | date |
+ +---+------+------------------------------------------------------+---------+
+ | H | %trl | Request date local + timezone | |
+ | | | %[request_date,ltime("%d/%b/%Y:%H:%M:%S %z")] | date |
+ +---+------+------------------------------------------------------+---------+
+ | Timing events |
+ +---+------+------------------------------------------------------+---------+
+ | H | %Ta | Active time of the request (from TR to end) | |
+ | | | %[txn.timer.total] | numeric |
+ +---+------+------------------------------------------------------+---------+
+ | | %Tc | Tc | |
+ | | | %[bc.timer.connect] | numeric |
+ +---+------+------------------------------------------------------+---------+
+ | | %Td | Td = Tt - (Tq + Tw + Tc + Tr) | |
+ | | | %[res.timer.data] | numeric |
+ +---+------+------------------------------------------------------+---------+
+ | | %Th | connection handshake time (SSL, PROXY proto) | |
+ | | | %[fc.timer.handshake] | numeric |
+ +---+------+------------------------------------------------------+---------+
+ | H | %Ti | idle time before the HTTP request | |
+ | | | %[req.timer.idle] | numeric |
+ +---+------+------------------------------------------------------+---------+
+ | H | %Tq | Th + Ti + TR | |
+ | | | %[req.timer.tq] | numeric |
+ +---+------+------------------------------------------------------+---------+
+ | H | %TR | time to receive the full request from 1st byte | |
+ | | | %[req.timer.hdr] | numeric |
+ +---+------+------------------------------------------------------+---------+
+ | H | %Tr | Tr (response time) | |
+ | | | %[res.timer.hdr] | numeric |
+ +---+------+------------------------------------------------------+---------+
+ | | %Tt | Tt | |
+ | | | %[fc.timer.total] | numeric |
+ +---+------+------------------------------------------------------+---------+
+ | | %Tu | Tu | |
+ | | | %[txn.timer.user] | numeric |
+ +---+------+------------------------------------------------------+---------+
+ | | %Tw | Tw | |
+ | | | %[req.timer.queue] | numeric |
+ +---+------+------------------------------------------------------+---------+
+ | Others |
+ +---+------+------------------------------------------------------+---------+
+ | | %B | bytes_read (from server to client) | numeric |
+ | | | %[bytes_out] | |
+ +---+------+------------------------------------------------------+---------+
+ | H | %CC | captured_request_cookie | string |
+ +---+------+------------------------------------------------------+---------+
+ | H | %CS | captured_response_cookie | string |
+ +---+------+------------------------------------------------------+---------+
+ | | %H | hostname | string |
+ +---+------+------------------------------------------------------+---------+
+ | H | %HM | HTTP method (ex: POST) | string |
+ +---+------+------------------------------------------------------+---------+
+ | H | %HP | HTTP request URI without query string | string |
+ +---+------+------------------------------------------------------+---------+
+ | H | %HPO | HTTP path only (without host nor query string) | string |
+ +---+------+------------------------------------------------------+---------+
+ | H | %HQ | HTTP request URI query string (ex: ?bar=baz) | string |
+ | | | ?%[query] | |
+ +---+------+------------------------------------------------------+---------+
+ | H | %HU | HTTP request URI (ex: /foo?bar=baz) | string |
+ +---+------+------------------------------------------------------+---------+
+ | H | %HV | HTTP version (ex: HTTP/1.0) | string |
+ | | | HTTP/%[req.ver] | |
+ +---+------+------------------------------------------------------+---------+
+ | | %ID | unique-id | string |
+ +---+------+------------------------------------------------------+---------+
+ | | %ST | status_code | numeric |
+ | | | %[txn.status] | |
+ +---+------+------------------------------------------------------+---------+
+ | | %U | bytes_uploaded (from client to server) | numeric |
+ | | | %[bytes_in] | |
+ +---+------+------------------------------------------------------+---------+
+ | | %ac | actconn | |
+ | | | %[act_conn] | numeric |
+ +---+------+------------------------------------------------------+---------+
+ | | %b | backend_name | |
+ | | | %[be_name] | string |
+ +---+------+------------------------------------------------------+---------+
+ | | %bc | beconn (backend concurrent connections) | numeric |
+ +---+------+------------------------------------------------------+---------+
+ | | %bi | backend_source_ip (connecting address) | |
+ | | | %[bc_src] | IP |
+ +---+------+------------------------------------------------------+---------+
+ | | %bp | backend_source_port (connecting address) | |
+ | | | %[bc_src_port] | numeric |
+ +---+------+------------------------------------------------------+---------+
+ | | %bq | backend_queue | numeric |
+ +---+------+------------------------------------------------------+---------+
+ | | %ci | client_ip (accepted address) | |
+ | | | %[src] | IP |
+ +---+------+------------------------------------------------------+---------+
+ | | %cp | client_port (accepted address) | |
+ | | | %[src_port] | numeric |
+ +---+------+------------------------------------------------------+---------+
+ | | %f | frontend_name | string |
+ +---+------+------------------------------------------------------+---------+
+ | | %fc | feconn (frontend concurrent connections) | numeric |
+ +---+------+------------------------------------------------------+---------+
+ | | %fi | frontend_ip (accepting address) | |
+ | | | %[dst] | IP |
+ +---+------+------------------------------------------------------+---------+
+ | | %fp | frontend_port (accepting address) | |
+ | | | %[dst_port] | numeric |
+ +---+------+------------------------------------------------------+---------+
+ | | %ft | frontend_name_transport ('~' suffix for SSL) | string |
+ +---+------+------------------------------------------------------+---------+
+ | | %lc | frontend_log_counter | numeric |
+ +---+------+------------------------------------------------------+---------+
+ | | %hr | captured_request_headers default style | string |
+ +---+------+------------------------------------------------------+---------+
+ | | %hrl | captured_request_headers CLF style | string |
+ | | | | list |
+ +---+------+------------------------------------------------------+---------+
+ | | %hs | captured_response_headers default style | string |
+ +---+------+------------------------------------------------------+---------+
+ | | %hsl | captured_response_headers CLF style | string |
+ | | | | list |
+ +---+------+------------------------------------------------------+---------+
+ | | %pid | PID | |
+ | | | %[pid] | numeric |
+ +---+------+------------------------------------------------------+---------+
+ | H | %r | http_request | string |
+ +---+------+------------------------------------------------------+---------+
+ | | %rc | retries | numeric |
+ | | | %[txn.conn_retries] | |
+ +---+------+------------------------------------------------------+---------+
+ | | %rt | request_counter (HTTP req or TCP session) | numeric |
+ | | | %[txn.id32] | |
+ +---+------+------------------------------------------------------+---------+
+ | | %s | server_name | string |
+ +---+------+------------------------------------------------------+---------+
+ | | %sc | srv_conn (server concurrent connections) | numeric |
+ +---+------+------------------------------------------------------+---------+
+ | | %si | server_IP (target address) | |
+ | | | %[bc_dst] | IP |
+ +---+------+------------------------------------------------------+---------+
+ | | %sp | server_port (target address) | |
+ | | | %[bc_dst_port] | numeric |
+ +---+------+------------------------------------------------------+---------+
+ | | %sq | srv_queue | numeric |
+ +---+------+------------------------------------------------------+---------+
+ | S | %sslc| ssl_ciphers (ex: AES-SHA) | |
+ | | | %[ssl_fc_cipher] | string |
+ +---+------+------------------------------------------------------+---------+
+ | S | %sslv| ssl_version (ex: TLSv1) | |
+ | | | %[ssl_fc_protocol] | string |
+ +---+------+------------------------------------------------------+---------+
+ | | %ts | termination_state | string |
+ | | | %[txn.sess_term_state] | |
+ +---+------+------------------------------------------------------+---------+
+ | H | %tsc | termination_state with cookie status | string |
+ +---+------+------------------------------------------------------+---------+
+
+ R = Restrictions : H = mode http only ; S = SSL only
+
+
+8.3. Advanced logging options
+-----------------------------
+
+Some advanced logging options are often looked for but are not easy to find out
+just by looking at the various options. Here is an entry point for the few
+options which can enable better logging. Please refer to the keywords reference
+for more information about their usage.
+
+
+8.3.1. Disabling logging of external tests
+------------------------------------------
+
+It is quite common to have some monitoring tools perform health checks on
+HAProxy. Sometimes it will be a layer 3 load-balancer such as LVS or any
+commercial load-balancer, and sometimes it will simply be a more complete
+monitoring system such as Nagios. When the tests are very frequent, users often
+ask how to disable logging for those checks. There are three possibilities :
+
+ - if connections come from everywhere and are just TCP probes, it is often
+ desired to simply disable logging of connections without data exchange, by
+ setting "option dontlognull" in the frontend. It also disables logging of
+ port scans, which may or may not be desired.
+
+ - it is possible to use the "http-request set-log-level silent" action using
+ a variety of conditions (source networks, paths, user-agents, etc).
+
+ - if the tests are performed on a known URI, use "monitor-uri" to declare
+ this URI as dedicated to monitoring. Any host sending this request will
+ only get the result of a health-check, and the request will not be logged.
+
+
+8.3.2. Logging before waiting for the stream to terminate
+----------------------------------------------------------
+
+The problem with logging at end of connection is that you have no clue about
+what is happening during very long streams, such as remote terminal sessions
+or large file downloads. This problem can be worked around by specifying
+"option logasap" in the frontend. HAProxy will then log as soon as possible,
+just before data transfer begins. This means that in case of TCP, it will still
+log the connection status to the server, and in case of HTTP, it will log just
+after processing the server headers. In this case, the number of bytes reported
+is the number of header bytes sent to the client. In order to avoid confusion
+with normal logs, the total time field and the number of bytes are prefixed
+with a '+' sign which means that real numbers are certainly larger.
+
+
+8.3.3. Raising log level upon errors
+------------------------------------
+
+Sometimes it is more convenient to separate normal traffic from errors logs,
+for instance in order to ease error monitoring from log files. When the option
+"log-separate-errors" is used, connections which experience errors, timeouts,
+retries, redispatches or HTTP status codes 5xx will see their syslog level
+raised from "info" to "err". This will help a syslog daemon store the log in
+a separate file. It is very important to keep the errors in the normal traffic
+file too, so that log ordering is not altered. You should also be careful if
+you already have configured your syslog daemon to store all logs higher than
+"notice" in an "admin" file, because the "err" level is higher than "notice".
+
+
+8.3.4. Disabling logging of successful connections
+--------------------------------------------------
+
+Although this may sound strange at first, some large sites have to deal with
+multiple thousands of logs per second and are experiencing difficulties keeping
+them intact for a long time or detecting errors within them. If the option
+"dontlog-normal" is set on the frontend, all normal connections will not be
+logged. In this regard, a normal connection is defined as one without any
+error, timeout, retry nor redispatch. In HTTP, the status code is checked too,
+and a response with a status 5xx is not considered normal and will be logged
+too. Of course, doing is is really discouraged as it will remove most of the
+useful information from the logs. Do this only if you have no other
+alternative.
+
+
+8.4. Timing events
+------------------
+
+Timers provide a great help in troubleshooting network problems. All values are
+reported in milliseconds (ms). These timers should be used in conjunction with
+the stream termination flags. In TCP mode with "option tcplog" set on the
+frontend, 3 control points are reported under the form "Tw/Tc/Tt", and in HTTP
+mode, 5 control points are reported under the form "TR/Tw/Tc/Tr/Ta". In
+addition, three other measures are provided, "Th", "Ti", and "Tq".
+
+Timings events in HTTP mode:
+
+ first request 2nd request
+ |<-------------------------------->|<-------------- ...
+ t tr t tr ...
+ ---|----|----|----|----|----|----|----|----|--
+ : Th Ti TR Tw Tc Tr Td : Ti ...
+ :<---- Tq ---->: :
+ :<-------------- Tt -------------->:
+ :<-- -----Tu--------------->:
+ :<--------- Ta --------->:
+
+Timings events in TCP mode:
+
+ TCP session
+ |<----------------->|
+ t t
+ ---|----|----|----|----|---
+ | Th Tw Tc Td |
+ |<------ Tt ------->|
+
+ - Th: total time to accept tcp connection and execute handshakes for low level
+ protocols. Currently, these protocols are proxy-protocol and SSL. This may
+ only happen once during the whole connection's lifetime. A large time here
+ may indicate that the client only pre-established the connection without
+ speaking, that it is experiencing network issues preventing it from
+ completing a handshake in a reasonable time (e.g. MTU issues), or that an
+ SSL handshake was very expensive to compute. Please note that this time is
+ reported only before the first request, so it is safe to average it over
+ all request to calculate the amortized value. The second and subsequent
+ request will always report zero here.
+
+ This timer is named %Th as a log-format tag, and fc.timer.handshake as a
+ sample fetch.
+
+ - Ti: is the idle time before the HTTP request (HTTP mode only). This timer
+ counts between the end of the handshakes and the first byte of the HTTP
+ request. When dealing with a second request in keep-alive mode, it starts
+ to count after the end of the transmission the previous response. When a
+ multiplexed protocol such as HTTP/2 is used, it starts to count immediately
+ after the previous request. Some browsers pre-establish connections to a
+ server in order to reduce the latency of a future request, and keep them
+ pending until they need it. This delay will be reported as the idle time. A
+ value of -1 indicates that nothing was received on the connection.
+
+ This timer is named %Ti as a log-format tag, and req.timer.idle as a
+ sample fetch.
+
+ - TR: total time to get the client request (HTTP mode only). It's the time
+ elapsed between the first bytes received and the moment the proxy received
+ the empty line marking the end of the HTTP headers. The value "-1"
+ indicates that the end of headers has never been seen. This happens when
+ the client closes prematurely or times out. This time is usually very short
+ since most requests fit in a single packet. A large time may indicate a
+ request typed by hand during a test.
+
+ This timer is named %TR as a log-format tag, and req.timer.hdr as a
+ sample fetch.
+
+ - Tq: total time to get the client request from the accept date or since the
+ emission of the last byte of the previous response (HTTP mode only). It's
+ exactly equal to Th + Ti + TR unless any of them is -1, in which case it
+ returns -1 as well. This timer used to be very useful before the arrival of
+ HTTP keep-alive and browsers' pre-connect feature. It's recommended to drop
+ it in favor of TR nowadays, as the idle time adds a lot of noise to the
+ reports.
+
+ This timer is named %Tq as a log-format tag, and req.timer.tq as a
+ sample fetch.
+
+ - Tw: total time spent in the queues waiting for a connection slot. It
+ accounts for backend queue as well as the server queues, and depends on the
+ queue size, and the time needed for the server to complete previous
+ requests. The value "-1" means that the request was killed before reaching
+ the queue, which is generally what happens with invalid or denied requests.
+
+ This timer is named %Tw as a log-format tag, and req.timer.queue as a
+ sample fetch.
+
+ - Tc: total time to establish the TCP connection to the server. It's the time
+ elapsed between the moment the proxy sent the connection request, and the
+ moment it was acknowledged by the server, or between the TCP SYN packet and
+ the matching SYN/ACK packet in return. The value "-1" means that the
+ connection never established.
+
+ This timer is named %Tc as a log-format tag, and bc.timer.connect as a
+ sample fetch.
+
+ - Tr: server response time (HTTP mode only). It's the time elapsed between
+ the moment the TCP connection was established to the server and the moment
+ the server sent its complete response headers. It purely shows its request
+ processing time, without the network overhead due to the data transmission.
+ It is worth noting that when the client has data to send to the server, for
+ instance during a POST request, the time already runs, and this can distort
+ apparent response time. For this reason, it's generally wise not to trust
+ too much this field for POST requests initiated from clients behind an
+ untrusted network. A value of "-1" here means that the last the response
+ header (empty line) was never seen, most likely because the server timeout
+ stroke before the server managed to process the request.
+
+ This timer is named %Tr as a log-format tag, and res.timer.hdr as a
+ sample fetch.
+
+ - Td: this is the total transfer time of the response payload till the last
+ byte sent to the client. In HTTP it starts after the last response header
+ (after Tr).
+
+ The data sent are not guaranteed to be received by the client, they can be
+ stuck in either the kernel or the network.
+
+ This timer is named %Td as a log-format tag, and res.timer.data as a
+ sample fetch.
+
+ - Ta: total active time for the HTTP request, between the moment the proxy
+ received the first byte of the request header and the emission of the last
+ byte of the response body. The exception is when the "logasap" option is
+ specified. In this case, it only equals (TR+Tw+Tc+Tr), and is prefixed with
+ a '+' sign. From this field, we can deduce "Td", the data transmission time,
+ by subtracting other timers when valid :
+
+ Td = Ta - (TR + Tw + Tc + Tr)
+
+ Timers with "-1" values have to be excluded from this equation. Note that
+ "Ta" can never be negative.
+
+ This timer is named %Ta as a log-format tag, and txn.timer.total as a
+ sample fetch.
+
+ - Tt: total stream duration time, between the moment the proxy accepted it
+ and the moment both ends were closed. The exception is when the "logasap"
+ option is specified. In this case, it only equals (Th+Ti+TR+Tw+Tc+Tr), and
+ is prefixed with a '+' sign. From this field, we can deduce "Td", the data
+ transmission time, by subtracting other timers when valid :
+
+ Td = Tt - (Th + Ti + TR + Tw + Tc + Tr)
+
+ Timers with "-1" values have to be excluded from this equation. In TCP
+ mode, "Ti", "Tq" and "Tr" have to be excluded too. Note that "Tt" can never
+ be negative and that for HTTP, Tt is simply equal to (Th+Ti+Ta).
+
+ This timer is named %Tt as a log-format tag, and fc.timer.total as a
+ sample fetch.
+
+ - Tu: total estimated time as seen from client, between the moment the proxy
+ accepted it and the moment both ends were closed, without idle time.
+ This is useful to roughly measure end-to-end time as a user would see it,
+ without idle time pollution from keep-alive time between requests. This
+ timer in only an estimation of time seen by user as it assumes network
+ latency is the same in both directions. The exception is when the "logasap"
+ option is specified. In this case, it only equals (Th+TR+Tw+Tc+Tr), and is
+ prefixed with a '+' sign.
+
+ This timer is named %Tu as a log-format tag, and txn.timer.user as a
+ sample fetch.
+
+These timers provide precious indications on trouble causes. Since the TCP
+protocol defines retransmit delays of 3, 6, 12... seconds, we know for sure
+that timers close to multiples of 3s are nearly always related to lost packets
+due to network problems (wires, negotiation, congestion). Moreover, if "Ta" or
+"Tt" is close to a timeout value specified in the configuration, it often means
+that a stream has been aborted on timeout.
+
+Most common cases :
+
+ - If "Th" or "Ti" are close to 3000, a packet has probably been lost between
+ the client and the proxy. This is very rare on local networks but might
+ happen when clients are on far remote networks and send large requests. It
+ may happen that values larger than usual appear here without any network
+ cause. Sometimes, during an attack or just after a resource starvation has
+ ended, HAProxy may accept thousands of connections in a few milliseconds.
+ The time spent accepting these connections will inevitably slightly delay
+ processing of other connections, and it can happen that request times in the
+ order of a few tens of milliseconds are measured after a few thousands of
+ new connections have been accepted at once. Using one of the keep-alive
+ modes may display larger idle times since "Ti" measures the time spent
+ waiting for additional requests.
+
+ - If "Tc" is close to 3000, a packet has probably been lost between the
+ server and the proxy during the server connection phase. This value should
+ always be very low, such as 1 ms on local networks and less than a few tens
+ of ms on remote networks.
+
+ - If "Tr" is nearly always lower than 3000 except some rare values which seem
+ to be the average majored by 3000, there are probably some packets lost
+ between the proxy and the server.
+
+ - If "Ta" is large even for small byte counts, it generally is because
+ neither the client nor the server decides to close the connection while
+ HAProxy is running in tunnel mode and both have agreed on a keep-alive
+ connection mode. In order to solve this issue, it will be needed to specify
+ one of the HTTP options to manipulate keep-alive or close options on either
+ the frontend or the backend. Having the smallest possible 'Ta' or 'Tt' is
+ important when connection regulation is used with the "maxconn" option on
+ the servers, since no new connection will be sent to the server until
+ another one is released.
+
+Other noticeable HTTP log cases ('xx' means any value to be ignored) :
+
+ TR/Tw/Tc/Tr/+Ta The "option logasap" is present on the frontend and the log
+ was emitted before the data phase. All the timers are valid
+ except "Ta" which is shorter than reality.
+
+ -1/xx/xx/xx/Ta The client was not able to send a complete request in time
+ or it aborted too early. Check the stream termination flags
+ then "timeout http-request" and "timeout client" settings.
+
+ TR/-1/xx/xx/Ta It was not possible to process the request, maybe because
+ servers were out of order, because the request was invalid
+ or forbidden by ACL rules. Check the stream termination
+ flags.
+
+ TR/Tw/-1/xx/Ta The connection could not establish on the server. Either it
+ actively refused it or it timed out after Ta-(TR+Tw) ms.
+ Check the stream termination flags, then check the
+ "timeout connect" setting. Note that the tarpit action might
+ return similar-looking patterns, with "Tw" equal to the time
+ the client connection was maintained open.
+
+ TR/Tw/Tc/-1/Ta The server has accepted the connection but did not return
+ a complete response in time, or it closed its connection
+ unexpectedly after Ta-(TR+Tw+Tc) ms. Check the stream
+ termination flags, then check the "timeout server" setting.
+
+
+8.5. Stream state at disconnection
+-----------------------------------
+
+TCP and HTTP logs provide a stream termination indicator in the
+"termination_state" field, just before the number of active connections. It is
+2-characters long in TCP mode, and is extended to 4 characters in HTTP mode,
+each of which has a special meaning :
+
+ - On the first character, a code reporting the first event which caused the
+ stream to terminate :
+
+ C : the TCP session was unexpectedly aborted by the client.
+
+ S : the TCP session was unexpectedly aborted by the server, or the
+ server explicitly refused it.
+
+ P : the stream or session was prematurely aborted by the proxy, because
+ of a connection limit enforcement, because a DENY filter was
+ matched, because of a security check which detected and blocked a
+ dangerous error in server response which might have caused
+ information leak (e.g. cacheable cookie).
+
+ L : the stream was locally processed by HAProxy.
+
+ R : a resource on the proxy has been exhausted (memory, sockets, source
+ ports, ...). Usually, this appears during the connection phase, and
+ system logs should contain a copy of the precise error. If this
+ happens, it must be considered as a very serious anomaly which
+ should be fixed as soon as possible by any means.
+
+ I : an internal error was identified by the proxy during a self-check.
+ This should NEVER happen, and you are encouraged to report any log
+ containing this, because this would almost certainly be a bug. It
+ would be wise to preventively restart the process after such an
+ event too, in case it would be caused by memory corruption.
+
+ D : the stream was killed by HAProxy because the server was detected
+ as down and was configured to kill all connections when going down.
+
+ U : the stream was killed by HAProxy on this backup server because an
+ active server was detected as up and was configured to kill all
+ backup connections when going up.
+
+ K : the stream was actively killed by an admin operating on HAProxy.
+
+ c : the client-side timeout expired while waiting for the client to
+ send or receive data.
+
+ s : the server-side timeout expired while waiting for the server to
+ send or receive data.
+
+ - : normal stream completion, both the client and the server closed
+ with nothing left in the buffers.
+
+ - on the second character, the TCP or HTTP stream state when it was closed :
+
+ R : the proxy was waiting for a complete, valid REQUEST from the client
+ (HTTP mode only). Nothing was sent to any server.
+
+ Q : the proxy was waiting in the QUEUE for a connection slot. This can
+ only happen when servers have a 'maxconn' parameter set. It can
+ also happen in the global queue after a redispatch consecutive to
+ a failed attempt to connect to a dying server. If no redispatch is
+ reported, then no connection attempt was made to any server.
+
+ C : the proxy was waiting for the CONNECTION to establish on the
+ server. The server might at most have noticed a connection attempt.
+
+ H : the proxy was waiting for complete, valid response HEADERS from the
+ server (HTTP only).
+
+ D : the stream was in the DATA phase.
+
+ L : the proxy was still transmitting LAST data to the client while the
+ server had already finished. This one is very rare as it can only
+ happen when the client dies while receiving the last packets.
+
+ T : the request was tarpitted. It has been held open with the client
+ during the whole "timeout tarpit" duration or until the client
+ closed, both of which will be reported in the "Tw" timer.
+
+ - : normal stream completion after end of data transfer.
+
+ - the third character tells whether the persistence cookie was provided by
+ the client (only in HTTP mode) :
+
+ N : the client provided NO cookie. This is usually the case for new
+ visitors, so counting the number of occurrences of this flag in the
+ logs generally indicate a valid trend for the site frequentation.
+
+ I : the client provided an INVALID cookie matching no known server.
+ This might be caused by a recent configuration change, mixed
+ cookies between HTTP/HTTPS sites, persistence conditionally
+ ignored, or an attack.
+
+ D : the client provided a cookie designating a server which was DOWN,
+ so either "option persist" was used and the client was sent to
+ this server, or it was not set and the client was redispatched to
+ another server.
+
+ V : the client provided a VALID cookie, and was sent to the associated
+ server.
+
+ E : the client provided a valid cookie, but with a last date which was
+ older than what is allowed by the "maxidle" cookie parameter, so
+ the cookie is consider EXPIRED and is ignored. The request will be
+ redispatched just as if there was no cookie.
+
+ O : the client provided a valid cookie, but with a first date which was
+ older than what is allowed by the "maxlife" cookie parameter, so
+ the cookie is consider too OLD and is ignored. The request will be
+ redispatched just as if there was no cookie.
+
+ U : a cookie was present but was not used to select the server because
+ some other server selection mechanism was used instead (typically a
+ "use-server" rule).
+
+ - : does not apply (no cookie set in configuration).
+
+ - the last character reports what operations were performed on the persistence
+ cookie returned by the server (only in HTTP mode) :
+
+ N : NO cookie was provided by the server, and none was inserted either.
+
+ I : no cookie was provided by the server, and the proxy INSERTED one.
+ Note that in "cookie insert" mode, if the server provides a cookie,
+ it will still be overwritten and reported as "I" here.
+
+ U : the proxy UPDATED the last date in the cookie that was presented by
+ the client. This can only happen in insert mode with "maxidle". It
+ happens every time there is activity at a different date than the
+ date indicated in the cookie. If any other change happens, such as
+ a redispatch, then the cookie will be marked as inserted instead.
+
+ P : a cookie was PROVIDED by the server and transmitted as-is.
+
+ R : the cookie provided by the server was REWRITTEN by the proxy, which
+ happens in "cookie rewrite" or "cookie prefix" modes.
+
+ D : the cookie provided by the server was DELETED by the proxy.
+
+ - : does not apply (no cookie set in configuration).
+
+The combination of the two first flags gives a lot of information about what
+was happening when the stream or session terminated, and why it did terminate.
+It can be helpful to detect server saturation, network troubles, local system
+resource starvation, attacks, etc...
+
+The most common termination flags combinations are indicated below. They are
+alphabetically sorted, with the lowercase set just after the upper case for
+easier finding and understanding.
+
+ Flags Reason
+
+ -- Normal termination.
+
+ CC The client aborted before the connection could be established to the
+ server. This can happen when HAProxy tries to connect to a recently
+ dead (or unchecked) server, and the client aborts while HAProxy is
+ waiting for the server to respond or for "timeout connect" to expire.
+
+ CD The client unexpectedly aborted during data transfer. This can be
+ caused by a browser crash, by an intermediate equipment between the
+ client and HAProxy which decided to actively break the connection,
+ by network routing issues between the client and HAProxy, or by a
+ keep-alive stream between the server and the client terminated first
+ by the client.
+
+ cD The client did not send nor acknowledge any data for as long as the
+ "timeout client" delay. This is often caused by network failures on
+ the client side, or the client simply leaving the net uncleanly.
+
+ CH The client aborted while waiting for the server to start responding.
+ It might be the server taking too long to respond or the client
+ clicking the 'Stop' button too fast.
+
+ cH The "timeout client" stroke while waiting for client data during a
+ POST request. This is sometimes caused by too large TCP MSS values
+ for PPPoE networks which cannot transport full-sized packets. It can
+ also happen when client timeout is smaller than server timeout and
+ the server takes too long to respond.
+
+ CQ The client aborted while its stream was queued, waiting for a server
+ with enough empty slots to accept it. It might be that either all the
+ servers were saturated or that the assigned server was taking too
+ long a time to respond.
+
+ CR The client aborted before sending a full HTTP request. Most likely
+ the request was typed by hand using a telnet client, and aborted
+ too early. The HTTP status code is likely a 400 here. Sometimes this
+ might also be caused by an IDS killing the connection between HAProxy
+ and the client. "option http-ignore-probes" can be used to ignore
+ connections without any data transfer.
+
+ cR The "timeout http-request" stroke before the client sent a full HTTP
+ request. This is sometimes caused by too large TCP MSS values on the
+ client side for PPPoE networks which cannot transport full-sized
+ packets, or by clients sending requests by hand and not typing fast
+ enough, or forgetting to enter the empty line at the end of the
+ request. The HTTP status code is likely a 408 here. Note: recently,
+ some browsers started to implement a "pre-connect" feature consisting
+ in speculatively connecting to some recently visited web sites just
+ in case the user would like to visit them. This results in many
+ connections being established to web sites, which end up in 408
+ Request Timeout if the timeout strikes first, or 400 Bad Request when
+ the browser decides to close them first. These ones pollute the log
+ and feed the error counters. Some versions of some browsers have even
+ been reported to display the error code. It is possible to work
+ around the undesirable effects of this behavior by adding "option
+ http-ignore-probes" in the frontend, resulting in connections with
+ zero data transfer to be totally ignored. This will definitely hide
+ the errors of people experiencing connectivity issues though.
+
+ CT The client aborted while its stream was tarpitted. It is important to
+ check if this happens on valid requests, in order to be sure that no
+ wrong tarpit rules have been written. If a lot of them happen, it
+ might make sense to lower the "timeout tarpit" value to something
+ closer to the average reported "Tw" timer, in order not to consume
+ resources for just a few attackers.
+
+ LC The request was intercepted and locally handled by HAProxy. The
+ request was not sent to the server. It only happens with a redirect
+ because of a "redir" parameter on the server line.
+
+ LR The request was intercepted and locally handled by HAProxy. The
+ request was not sent to the server. Generally it means a redirect was
+ returned, an HTTP return statement was processed or the request was
+ handled by an applet (stats, cache, Prometheus exported, lua applet...).
+
+ LH The response was intercepted and locally handled by HAProxy. Generally
+ it means a redirect was returned or an HTTP return statement was
+ processed.
+
+ SC The server or an equipment between it and HAProxy explicitly refused
+ the TCP connection (the proxy received a TCP RST or an ICMP message
+ in return). Under some circumstances, it can also be the network
+ stack telling the proxy that the server is unreachable (e.g. no route,
+ or no ARP response on local network). When this happens in HTTP mode,
+ the status code is likely a 502 or 503 here.
+
+ sC The "timeout connect" stroke before a connection to the server could
+ complete. When this happens in HTTP mode, the status code is likely a
+ 503 or 504 here.
+
+ SD The connection to the server died with an error during the data
+ transfer. This usually means that HAProxy has received an RST from
+ the server or an ICMP message from an intermediate equipment while
+ exchanging data with the server. This can be caused by a server crash
+ or by a network issue on an intermediate equipment.
+
+ sD The server did not send nor acknowledge any data for as long as the
+ "timeout server" setting during the data phase. This is often caused
+ by too short timeouts on L4 equipment before the server (firewalls,
+ load-balancers, ...), as well as keep-alive sessions maintained
+ between the client and the server expiring first on HAProxy.
+
+ SH The server aborted before sending its full HTTP response headers, or
+ it crashed while processing the request. Since a server aborting at
+ this moment is very rare, it would be wise to inspect its logs to
+ control whether it crashed and why. The logged request may indicate a
+ small set of faulty requests, demonstrating bugs in the application.
+ Sometimes this might also be caused by an IDS killing the connection
+ between HAProxy and the server.
+
+ sH The "timeout server" stroke before the server could return its
+ response headers. This is the most common anomaly, indicating too
+ long transactions, probably caused by server or database saturation.
+ The immediate workaround consists in increasing the "timeout server"
+ setting, but it is important to keep in mind that the user experience
+ will suffer from these long response times. The only long term
+ solution is to fix the application.
+
+ sQ The stream spent too much time in queue and has been expired. See
+ the "timeout queue" and "timeout connect" settings to find out how to
+ fix this if it happens too often. If it often happens massively in
+ short periods, it may indicate general problems on the affected
+ servers due to I/O or database congestion, or saturation caused by
+ external attacks.
+
+ PC The proxy refused to establish a connection to the server because the
+ process's socket limit has been reached while attempting to connect.
+ The global "maxconn" parameter may be increased in the configuration
+ so that it does not happen anymore. This status is very rare and
+ might happen when the global "ulimit-n" parameter is forced by hand.
+
+ PD The proxy blocked an incorrectly formatted chunked encoded message in
+ a request or a response, after the server has emitted its headers. In
+ most cases, this will indicate an invalid message from the server to
+ the client. HAProxy supports chunk sizes of up to 2GB - 1 (2147483647
+ bytes). Any larger size will be considered as an error.
+
+ PH The proxy blocked the server's response, because it was invalid,
+ incomplete, dangerous (cache control), or matched a security filter.
+ In any case, an HTTP 502 error is sent to the client. One possible
+ cause for this error is an invalid syntax in an HTTP header name
+ containing unauthorized characters. It is also possible but quite
+ rare, that the proxy blocked a chunked-encoding request from the
+ client due to an invalid syntax, before the server responded. In this
+ case, an HTTP 400 error is sent to the client and reported in the
+ logs. Finally, it may be due to an HTTP header rewrite failure on the
+ response. In this case, an HTTP 500 error is sent (see
+ "tune.maxrewrite" and "http-response strict-mode" for more
+ inforomation).
+
+ PR The proxy blocked the client's HTTP request, either because of an
+ invalid HTTP syntax, in which case it returned an HTTP 400 error to
+ the client, or because a deny filter matched, in which case it
+ returned an HTTP 403 error. It may also be due to an HTTP header
+ rewrite failure on the request. In this case, an HTTP 500 error is
+ sent (see "tune.maxrewrite" and "http-request strict-mode" for more
+ inforomation).
+
+ PT The proxy blocked the client's request and has tarpitted its
+ connection before returning it a 500 server error. Nothing was sent
+ to the server. The connection was maintained open for as long as
+ reported by the "Tw" timer field.
+
+ RC A local resource has been exhausted (memory, sockets, source ports)
+ preventing the connection to the server from establishing. The error
+ logs will tell precisely what was missing. This is very rare and can
+ only be solved by proper system tuning.
+
+The combination of the two last flags gives a lot of information about how
+persistence was handled by the client, the server and by HAProxy. This is very
+important to troubleshoot disconnections, when users complain they have to
+re-authenticate. The commonly encountered flags are :
+
+ -- Persistence cookie is not enabled.
+
+ NN No cookie was provided by the client, none was inserted in the
+ response. For instance, this can be in insert mode with "postonly"
+ set on a GET request.
+
+ II A cookie designating an invalid server was provided by the client,
+ a valid one was inserted in the response. This typically happens when
+ a "server" entry is removed from the configuration, since its cookie
+ value can be presented by a client when no other server knows it.
+
+ NI No cookie was provided by the client, one was inserted in the
+ response. This typically happens for first requests from every user
+ in "insert" mode, which makes it an easy way to count real users.
+
+ VN A cookie was provided by the client, none was inserted in the
+ response. This happens for most responses for which the client has
+ already got a cookie.
+
+ VU A cookie was provided by the client, with a last visit date which is
+ not completely up-to-date, so an updated cookie was provided in
+ response. This can also happen if there was no date at all, or if
+ there was a date but the "maxidle" parameter was not set, so that the
+ cookie can be switched to unlimited time.
+
+ EI A cookie was provided by the client, with a last visit date which is
+ too old for the "maxidle" parameter, so the cookie was ignored and a
+ new cookie was inserted in the response.
+
+ OI A cookie was provided by the client, with a first visit date which is
+ too old for the "maxlife" parameter, so the cookie was ignored and a
+ new cookie was inserted in the response.
+
+ DI The server designated by the cookie was down, a new server was
+ selected and a new cookie was emitted in the response.
+
+ VI The server designated by the cookie was not marked dead but could not
+ be reached. A redispatch happened and selected another one, which was
+ then advertised in the response.
+
+
+8.6. Non-printable characters
+-----------------------------
+
+In order not to cause trouble to log analysis tools or terminals during log
+consulting, non-printable characters are not sent as-is into log files, but are
+converted to the two-digits hexadecimal representation of their ASCII code,
+prefixed by the character '#'. The only characters that can be logged without
+being escaped are comprised between 32 and 126 (inclusive). Obviously, the
+escape character '#' itself is also encoded to avoid any ambiguity ("#23"). It
+is the same for the character '"' which becomes "#22", as well as '{', '|' and
+'}' when logging headers.
+
+Note that the space character (' ') is not encoded in headers, which can cause
+issues for tools relying on space count to locate fields. A typical header
+containing spaces is "User-Agent".
+
+Last, it has been observed that some syslog daemons such as syslog-ng escape
+the quote ('"') with a backslash ('\'). The reverse operation can safely be
+performed since no quote may appear anywhere else in the logs.
+
+
+8.7. Capturing HTTP cookies
+---------------------------
+
+Cookie capture simplifies the tracking a complete user session. This can be
+achieved using the "capture cookie" statement in the frontend. Please refer to
+section 4.2 for more details. Only one cookie can be captured, and the same
+cookie will simultaneously be checked in the request ("Cookie:" header) and in
+the response ("Set-Cookie:" header). The respective values will be reported in
+the HTTP logs at the "captured_request_cookie" and "captured_response_cookie"
+locations (see section 8.2.3 about HTTP log format). When either cookie is
+not seen, a dash ('-') replaces the value. This way, it's easy to detect when a
+user switches to a new session for example, because the server will reassign it
+a new cookie. It is also possible to detect if a server unexpectedly sets a
+wrong cookie to a client, leading to session crossing.
+
+ Examples :
+ # capture the first cookie whose name starts with "ASPSESSION"
+ capture cookie ASPSESSION len 32
+
+ # capture the first cookie whose name is exactly "vgnvisitor"
+ capture cookie vgnvisitor= len 32
+
+
+8.8. Capturing HTTP headers
+---------------------------
+
+Header captures are useful to track unique request identifiers set by an upper
+proxy, virtual host names, user-agents, POST content-length, referrers, etc. In
+the response, one can search for information about the response length, how the
+server asked the cache to behave, or an object location during a redirection.
+
+Header captures are performed using the "capture request header" and "capture
+response header" statements in the frontend. Please consult their definition in
+section 4.2 for more details.
+
+It is possible to include both request headers and response headers at the same
+time. Non-existent headers are logged as empty strings, and if one header
+appears more than once, only its last occurrence will be logged. Request headers
+are grouped within braces '{' and '}' in the same order as they were declared,
+and delimited with a vertical bar '|' without any space. Response headers
+follow the same representation, but are displayed after a space following the
+request headers block. These blocks are displayed just before the HTTP request
+in the logs.
+
+As a special case, it is possible to specify an HTTP header capture in a TCP
+frontend. The purpose is to enable logging of headers which will be parsed in
+an HTTP backend if the request is then switched to this HTTP backend.
+
+ Example :
+ # This instance chains to the outgoing proxy
+ listen proxy-out
+ mode http
+ option httplog
+ option logasap
+ log global
+ server cache1 192.168.1.1:3128
+
+ # log the name of the virtual server
+ capture request header Host len 20
+
+ # log the amount of data uploaded during a POST
+ capture request header Content-Length len 10
+
+ # log the beginning of the referrer
+ capture request header Referer len 20
+
+ # server name (useful for outgoing proxies only)
+ capture response header Server len 20
+
+ # logging the content-length is useful with "option logasap"
+ capture response header Content-Length len 10
+
+ # log the expected cache behavior on the response
+ capture response header Cache-Control len 8
+
+ # the Via header will report the next proxy's name
+ capture response header Via len 20
+
+ # log the URL location during a redirection
+ capture response header Location len 20
+
+ >>> Aug 9 20:26:09 localhost \
+ haproxy[2022]: 127.0.0.1:34014 [09/Aug/2004:20:26:09] proxy-out \
+ proxy-out/cache1 0/0/0/162/+162 200 +350 - - ---- 0/0/0/0/0 0/0 \
+ {fr.adserver.yahoo.co||http://fr.f416.mail.} {|864|private||} \
+ "GET http://fr.adserver.yahoo.com/"
+
+ >>> Aug 9 20:30:46 localhost \
+ haproxy[2022]: 127.0.0.1:34020 [09/Aug/2004:20:30:46] proxy-out \
+ proxy-out/cache1 0/0/0/182/+182 200 +279 - - ---- 0/0/0/0/0 0/0 \
+ {w.ods.org||} {Formilux/0.1.8|3495|||} \
+ "GET http://trafic.1wt.eu/ HTTP/1.1"
+
+ >>> Aug 9 20:30:46 localhost \
+ haproxy[2022]: 127.0.0.1:34028 [09/Aug/2004:20:30:46] proxy-out \
+ proxy-out/cache1 0/0/2/126/+128 301 +223 - - ---- 0/0/0/0/0 0/0 \
+ {www.sytadin.equipement.gouv.fr||http://trafic.1wt.eu/} \
+ {Apache|230|||http://www.sytadin.} \
+ "GET http://www.sytadin.equipement.gouv.fr/ HTTP/1.1"
+
+
+8.9. Examples of logs
+---------------------
+
+These are real-world examples of logs accompanied with an explanation. Some of
+them have been made up by hand. The syslog part has been removed for better
+reading. Their sole purpose is to explain how to decipher them.
+
+ >>> haproxy[674]: 127.0.0.1:33318 [15/Oct/2003:08:31:57.130] px-http \
+ px-http/srv1 6559/0/7/147/6723 200 243 - - ---- 5/3/3/1/0 0/0 \
+ "HEAD / HTTP/1.0"
+
+ => long request (6.5s) entered by hand through 'telnet'. The server replied
+ in 147 ms, and the session ended normally ('----')
+
+ >>> haproxy[674]: 127.0.0.1:33319 [15/Oct/2003:08:31:57.149] px-http \
+ px-http/srv1 6559/1230/7/147/6870 200 243 - - ---- 324/239/239/99/0 \
+ 0/9 "HEAD / HTTP/1.0"
+
+ => Idem, but the request was queued in the global queue behind 9 other
+ requests, and waited there for 1230 ms.
+
+ >>> haproxy[674]: 127.0.0.1:33320 [15/Oct/2003:08:32:17.654] px-http \
+ px-http/srv1 9/0/7/14/+30 200 +243 - - ---- 3/3/3/1/0 0/0 \
+ "GET /image.iso HTTP/1.0"
+
+ => request for a long data transfer. The "logasap" option was specified, so
+ the log was produced just before transferring data. The server replied in
+ 14 ms, 243 bytes of headers were sent to the client, and total time from
+ accept to first data byte is 30 ms.
+
+ >>> haproxy[674]: 127.0.0.1:33320 [15/Oct/2003:08:32:17.925] px-http \
+ px-http/srv1 9/0/7/14/30 502 243 - - PH-- 3/2/2/0/0 0/0 \
+ "GET /cgi-bin/bug.cgi? HTTP/1.0"
+
+ => the proxy blocked a server response either because of an "http-response
+ deny" rule, or because the response was improperly formatted and not
+ HTTP-compliant, or because it blocked sensitive information which risked
+ being cached. In this case, the response is replaced with a "502 bad
+ gateway". The flags ("PH--") tell us that it was HAProxy who decided to
+ return the 502 and not the server.
+
+ >>> haproxy[18113]: 127.0.0.1:34548 [15/Oct/2003:15:18:55.798] px-http \
+ px-http/<NOSRV> -1/-1/-1/-1/8490 -1 0 - - CR-- 2/2/2/0/0 0/0 ""
+
+ => the client never completed its request and aborted itself ("C---") after
+ 8.5s, while the proxy was waiting for the request headers ("-R--").
+ Nothing was sent to any server.
+
+ >>> haproxy[18113]: 127.0.0.1:34549 [15/Oct/2003:15:19:06.103] px-http \
+ px-http/<NOSRV> -1/-1/-1/-1/50001 408 0 - - cR-- 2/2/2/0/0 0/0 ""
+
+ => The client never completed its request, which was aborted by the
+ time-out ("c---") after 50s, while the proxy was waiting for the request
+ headers ("-R--"). Nothing was sent to any server, but the proxy could
+ send a 408 return code to the client.
+
+ >>> haproxy[18989]: 127.0.0.1:34550 [15/Oct/2003:15:24:28.312] px-tcp \
+ px-tcp/srv1 0/0/5007 0 cD 0/0/0/0/0 0/0
+
+ => This log was produced with "option tcplog". The client timed out after
+ 5 seconds ("c----").
+
+ >>> haproxy[18989]: 10.0.0.1:34552 [15/Oct/2003:15:26:31.462] px-http \
+ px-http/srv1 3183/-1/-1/-1/11215 503 0 - - SC-- 205/202/202/115/3 \
+ 0/0 "HEAD / HTTP/1.0"
+
+ => The request took 3s to complete (probably a network problem), and the
+ connection to the server failed ('SC--') after 4 attempts of 2 seconds
+ (config says 'retries 3'), and no redispatch (otherwise we would have
+ seen "/+3"). Status code 503 was returned to the client. There were 115
+ connections on this server, 202 connections on this proxy, and 205 on
+ the global process. It is possible that the server refused the
+ connection because of too many already established.
+
+
+9. Supported filters
+--------------------
+
+Here are listed officially supported filters with the list of parameters they
+accept. Depending on compile options, some of these filters might be
+unavailable. The list of available filters is reported in haproxy -vv.
+
+See also : "filter"
+
+9.1. Trace
+----------
+
+filter trace [name <name>] [random-forwarding] [hexdump]
+
+ Arguments:
+ <name> is an arbitrary name that will be reported in
+ messages. If no name is provided, "TRACE" is used.
+
+ <quiet> inhibits trace messages.
+
+ <random-forwarding> enables the random forwarding of parsed data. By
+ default, this filter forwards all previously parsed
+ data. With this parameter, it only forwards a random
+ amount of the parsed data.
+
+ <hexdump> dumps all forwarded data to the server and the client.
+
+This filter can be used as a base to develop new filters. It defines all
+callbacks and print a message on the standard error stream (stderr) with useful
+information for all of them. It may be useful to debug the activity of other
+filters or, quite simply, HAProxy's activity.
+
+Using <random-parsing> and/or <random-forwarding> parameters is a good way to
+tests the behavior of a filter that parses data exchanged between a client and
+a server by adding some latencies in the processing.
+
+
+9.2. HTTP compression
+---------------------
+
+filter compression
+
+The HTTP compression has been moved in a filter in HAProxy 1.7. "compression"
+keyword must still be used to enable and configure the HTTP compression. And
+when no other filter is used, it is enough. When used with the cache or the
+fcgi-app enabled, it is also enough. In this case, the compression is always
+done after the response is stored in the cache. But it is mandatory to
+explicitly use a filter line to enable the HTTP compression when at least one
+filter other than the cache or the fcgi-app is used for the same
+listener/frontend/backend. This is important to know the filters evaluation
+order.
+
+See also : "compression", section 9.4 about the cache filter and section 9.5
+ about the fcgi-app filter.
+
+
+9.3. Stream Processing Offload Engine (SPOE)
+--------------------------------------------
+
+filter spoe [engine <name>] config <file>
+
+ Arguments :
+
+ <name> is the engine name that will be used to find the right scope in
+ the configuration file. If not provided, all the file will be
+ parsed.
+
+ <file> is the path of the engine configuration file. This file can
+ contain configuration of several engines. In this case, each
+ part must be placed in its own scope.
+
+The Stream Processing Offload Engine (SPOE) is a filter communicating with
+external components. It allows the offload of some specifics processing on the
+streams in tiered applications. These external components and information
+exchanged with them are configured in dedicated files, for the main part. It
+also requires dedicated backends, defined in HAProxy configuration.
+
+SPOE communicates with external components using an in-house binary protocol,
+the Stream Processing Offload Protocol (SPOP).
+
+For all information about the SPOE configuration and the SPOP specification, see
+"doc/SPOE.txt".
+
+9.4. Cache
+----------
+
+filter cache <name>
+
+ Arguments :
+
+ <name> is name of the cache section this filter will use.
+
+The cache uses a filter to store cacheable responses. The HTTP rules
+"cache-store" and "cache-use" must be used to define how and when to use a
+cache. By default the corresponding filter is implicitly defined. And when no
+other filters than fcgi-app or compression are used, it is enough. In such
+case, the compression filter is always evaluated after the cache filter. But it
+is mandatory to explicitly use a filter line to use a cache when at least one
+filter other than the compression or the fcgi-app is used for the same
+listener/frontend/backend. This is important to know the filters evaluation
+order.
+
+See also : section 9.2 about the compression filter, section 9.5 about the
+ fcgi-app filter and section 6 about cache.
+
+
+9.5. Fcgi-app
+-------------
+
+filter fcgi-app <name>
+
+ Arguments :
+
+ <name> is name of the fcgi-app section this filter will use.
+
+The FastCGI application uses a filter to evaluate all custom parameters on the
+request path, and to process the headers on the response path. the <name> must
+reference an existing fcgi-app section. The directive "use-fcgi-app" should be
+used to define the application to use. By default the corresponding filter is
+implicitly defined. And when no other filters than cache or compression are
+used, it is enough. But it is mandatory to explicitly use a filter line to a
+fcgi-app when at least one filter other than the compression or the cache is
+used for the same backend. This is important to know the filters evaluation
+order.
+
+See also: "use-fcgi-app", section 9.2 about the compression filter, section 9.4
+ about the cache filter and section 10 about FastCGI application.
+
+
+9.6. OpenTracing
+----------------
+
+The OpenTracing filter adds native support for using distributed tracing in
+HAProxy. This is enabled by sending an OpenTracing compliant request to one
+of the supported tracers such as Datadog, Jaeger, Lightstep and Zipkin tracers.
+Please note: tracers are not listed by any preference, but alphabetically.
+
+This feature is only enabled when HAProxy was built with USE_OT=1.
+
+The OpenTracing filter activation is done explicitly by specifying it in the
+HAProxy configuration. If this is not done, the OpenTracing filter in no way
+participates in the work of HAProxy.
+
+filter opentracing [id <id>] config <file>
+
+ Arguments :
+
+ <id> is the OpenTracing filter id that will be used to find the
+ right scope in the configuration file. If no filter id is
+ specified, 'ot-filter' is used as default. If scope is not
+ specified in the configuration file, it applies to all defined
+ OpenTracing filters.
+
+ <file> is the path of the OpenTracing configuration file. The same
+ file can contain configurations for multiple OpenTracing
+ filters simultaneously. In that case we do not need to define
+ scope so the same configuration applies to all filters or each
+ filter must have its own scope defined.
+
+More detailed documentation related to the operation, configuration and use
+of the filter can be found in the addons/ot directory.
+
+9.7. Bandwidth limitation
+--------------------------
+
+filter bwlim-in <name> default-limit <size> default-period <time> [min-size <sz>]
+filter bwlim-out <name> default-limit <size> default-period <time> [min-size <sz>]
+filter bwlim-in <name> limit <size> key <pattern> [table <table>] [min-size <sz>]
+filter bwlim-out <name> limit <size> key <pattern> [table <table>] [min-size <sz>]
+
+ Arguments :
+
+ <name> is the filter name that will be used by 'set-bandwidth-limit'
+ actions to reference a specific bandwidth limitation filter.
+
+ <size> is max number of bytes that can be forwarded over the period.
+ The value must be specified for per-stream and shared bandwidth
+ limitation filters. It follows the HAProxy size format and is
+ expressed in bytes.
+
+ <pattern> is a sample expression rule as described in section 7.3. It
+ describes what elements will be analyzed, extracted, combined,
+ and used to select which table entry to update the counters. It
+ must be specified for shared bandwidth limitation filters only.
+
+ <table> is an optional table to be used instead of the default one,
+ which is the stick-table declared in the current proxy. It can
+ be specified for shared bandwidth limitation filters only.
+
+ <time> is the default time period used to evaluate the bandwidth
+ limitation rate. It can be specified for per-stream bandwidth
+ limitation filters only. It follows the HAProxy time format and
+ is expressed in milliseconds.
+
+ <min-size> is the optional minimum number of bytes forwarded at a time by
+ a stream excluding the last packet that may be smaller. This
+ value can be specified for per-stream and shared bandwidth
+ limitation filters. It follows the HAProxy size format and is
+ expressed in bytes.
+
+Bandwidth limitation filters should be used to restrict the data forwarding
+speed at the stream level. By extension, such filters limit the network
+bandwidth consumed by a resource. Several bandwidth limitation filters can be
+used. For instance, it is possible to define a limit per source address to be
+sure a client will never consume all the network bandwidth, thereby penalizing
+other clients, and another one per stream to be able to fairly handle several
+connections for a given client.
+
+The definition order of these filters is important. If several bandwidth
+filters are enabled on a stream, the filtering will be applied in their
+definition order. It is also important to understand the definition order of
+the other filters have an influence. For instance, depending on the HTTP
+compression filter is defined before or after a bandwidth limitation filter,
+the limit will be applied on the compressed payload or not. The same is true
+for the cache filter.
+
+There are two kinds of bandwidth limitation filters. The first one enforces a
+default limit and is applied per stream. The second one uses a stickiness table
+to enforce a limit equally divided between all streams sharing the same entry in
+the table.
+
+In addition, for a given filter, depending on the filter keyword used, the
+limitation can be applied on incoming data, received from the client and
+forwarded to a server, or on outgoing data, received from a server and sent to
+the client. To apply a limit on incoming data, "bwlim-in" keyword must be
+used. To apply it on outgoing data, "bwlim-out" keyword must be used. In both
+cases, the bandwidth limitation is applied on forwarded data, at the stream
+level.
+
+The bandwidth limitation is applied at the stream level and not at the
+connection level. For multiplexed protocols (H2, H3 and FastCGI), the streams
+of the same connection may have different limits.
+
+For a per-stream bandwidth limitation filter, default period and limit must be
+defined. As their names suggest, they are the default values used to setup the
+bandwidth limitation rate for a stream. However, for this kind of filter and
+only this one, it is possible to redefine these values using sample expressions
+when the filter is enabled with a TCP/HTTP "set-bandwidth-limit" action.
+
+For a shared bandwidth limitation filter, depending on whether it is applied on
+incoming or outgoing data, the stickiness table used must store the
+corresponding bytes rate information. "bytes_in_rate(<period>)" counter must be
+stored to limit incoming data and "bytes_out_rate(<period>)" counter must be
+used to limit outgoing data.
+
+Finally, it is possible to set the minimum number of bytes that a bandwidth
+limitation filter can forward at a time for a given stream. It should be used
+to not forward too small amount of data, to reduce the CPU usage. It must
+carefully be defined. Too small, a value can increase the CPU usage. Too high,
+it can increase the latency. It is also highly linked to the defined bandwidth
+limit. If it is too close to the bandwidth limit, some pauses may be
+experienced to not exceed the limit because too many bytes will be consumed at
+a time. It is highly dependent on the filter configuration. A good idea is to
+start with something around 2 TCP MSS, typically 2896 bytes, and tune it after
+some experimentations.
+
+ Example:
+ frontend http
+ bind *:80
+ mode http
+
+ # If this filter is enabled, the stream will share the download limit
+ # of 10m/s with all other streams with the same source address.
+ filter bwlim-out limit-by-src key src table limit-by-src limit 10m
+
+ # If this filter is enabled, the stream will be limited to download at 1m/s,
+ # independently of all other streams.
+ filter bwlim-out limit-by-strm default-limit 1m default-period 1s
+
+ # Limit all streams to 1m/s (the default limit) and those accessing the
+ # internal API to 100k/s. Limit each source address to 10m/s. The shared
+ # limit is applied first. Both are limiting the download rate.
+ http-request set-bandwidth-limit limit-by-strm
+ http-request set-bandwidth-limit limit-by-strm limit 100k if { path_beg /internal }
+ http-request set-bandwidth-limit limit-by-src
+ ...
+
+ backend limit-by-src
+ # The stickiness table used by <limit-by-src> filter
+ stick-table type ip size 1m expire 3600s store bytes_out_rate(1s)
+
+See also : "tcp-request content set-bandwidth-limit",
+ "tcp-response content set-bandwidth-limit",
+ "http-request set-bandwidth-limit" and
+ "http-response set-bandwidth-limit".
+
+10. FastCGI applications
+-------------------------
+
+HAProxy is able to send HTTP requests to Responder FastCGI applications. This
+feature was added in HAProxy 2.1. To do so, servers must be configured to use
+the FastCGI protocol (using the keyword "proto fcgi" on the server line) and a
+FastCGI application must be configured and used by the backend managing these
+servers (using the keyword "use-fcgi-app" into the proxy section). Several
+FastCGI applications may be defined, but only one can be used at a time by a
+backend.
+
+HAProxy implements all features of the FastCGI specification for Responder
+application. Especially it is able to multiplex several requests on a simple
+connection.
+
+10.1. Setup
+-----------
+
+10.1.1. Fcgi-app section
+--------------------------
+
+fcgi-app <name>
+ Declare a FastCGI application named <name>. To be valid, at least the
+ document root must be defined.
+
+acl <aclname> <criterion> [flags] [operator] <value> ...
+ Declare or complete an access list.
+
+ See "acl" keyword in section 4.2 and section 7 about ACL usage for
+ details. ACLs defined for a FastCGI application are private. They cannot be
+ used by any other application or by any proxy. In the same way, ACLs defined
+ in any other section are not usable by a FastCGI application. However,
+ Pre-defined ACLs are available.
+
+docroot <path>
+ Define the document root on the remote host. <path> will be used to build
+ the default value of FastCGI parameters SCRIPT_FILENAME and
+ PATH_TRANSLATED. It is a mandatory setting.
+
+index <script-name>
+ Define the script name that will be appended after an URI that ends with a
+ slash ("/") to set the default value of the FastCGI parameter SCRIPT_NAME. It
+ is an optional setting.
+
+ Example :
+ index index.php
+
+log-stderr global
+log-stderr <target> [len <length>] [format <format>]
+ [sample <ranges>:<sample_size>] <facility> [<level> [<minlevel>]]
+ Enable logging of STDERR messages reported by the FastCGI application.
+
+ See "log" keyword in section 4.2 for details. It is an optional setting. By
+ default STDERR messages are ignored.
+
+pass-header <name> [ { if | unless } <condition> ]
+ Specify the name of a request header which will be passed to the FastCGI
+ application. It may optionally be followed by an ACL-based condition, in
+ which case it will only be evaluated if the condition is true.
+
+ Most request headers are already available to the FastCGI application,
+ prefixed with "HTTP_". Thus, this directive is only required to pass headers
+ that are purposefully omitted. Currently, the headers "Authorization",
+ "Proxy-Authorization" and hop-by-hop headers are omitted.
+
+ Note that the headers "Content-type" and "Content-length" are never passed to
+ the FastCGI application because they are already converted into parameters.
+
+path-info <regex>
+ Define a regular expression to extract the script-name and the path-info from
+ the URL-decoded path. Thus, <regex> may have two captures: the first one to
+ capture the script name and the second one to capture the path-info. The
+ first one is mandatory, the second one is optional. This way, it is possible
+ to extract the script-name from the path ignoring the path-info. It is an
+ optional setting. If it is not defined, no matching is performed on the
+ path. and the FastCGI parameters PATH_INFO and PATH_TRANSLATED are not
+ filled.
+
+ For security reason, when this regular expression is defined, the newline and
+ the null characters are forbidden from the path, once URL-decoded. The reason
+ to such limitation is because otherwise the matching always fails (due to a
+ limitation one the way regular expression are executed in HAProxy). So if one
+ of these two characters is found in the URL-decoded path, an error is
+ returned to the client. The principle of least astonishment is applied here.
+
+ Example :
+ path-info ^(/.+\.php)(/.*)?$ # both script-name and path-info may be set
+ path-info ^(/.+\.php) # the path-info is ignored
+
+option get-values
+no option get-values
+ Enable or disable the retrieve of variables about connection management.
+
+ HAProxy is able to send the record FCGI_GET_VALUES on connection
+ establishment to retrieve the value for following variables:
+
+ * FCGI_MAX_REQS The maximum number of concurrent requests this
+ application will accept.
+
+ * FCGI_MPXS_CONNS "0" if this application does not multiplex connections,
+ "1" otherwise.
+
+ Some FastCGI applications does not support this feature. Some others close
+ the connection immediately after sending their response. So, by default, this
+ option is disabled.
+
+ Note that the maximum number of concurrent requests accepted by a FastCGI
+ application is a connection variable. It only limits the number of streams
+ per connection. If the global load must be limited on the application, the
+ server parameters "maxconn" and "pool-max-conn" must be set. In addition, if
+ an application does not support connection multiplexing, the maximum number
+ of concurrent requests is automatically set to 1.
+
+option keep-conn
+no option keep-conn
+ Instruct the FastCGI application to keep the connection open or not after
+ sending a response.
+
+ If disabled, the FastCGI application closes the connection after responding
+ to this request. By default, this option is enabled.
+
+option max-reqs <reqs>
+ Define the maximum number of concurrent requests this application will
+ accept.
+
+ This option may be overwritten if the variable FCGI_MAX_REQS is retrieved
+ during connection establishment. Furthermore, if the application does not
+ support connection multiplexing, this option will be ignored. By default set
+ to 1.
+
+option mpxs-conns
+no option mpxs-conns
+ Enable or disable the support of connection multiplexing.
+
+ This option may be overwritten if the variable FCGI_MPXS_CONNS is retrieved
+ during connection establishment. It is disabled by default.
+
+set-param <name> <fmt> [ { if | unless } <condition> ]
+ Set a FastCGI parameter that should be passed to this application. Its
+ value, defined by <fmt> must follows the log-format rules (see section 8.2.4
+ "Custom Log format"). It may optionally be followed by an ACL-based
+ condition, in which case it will only be evaluated if the condition is true.
+
+ With this directive, it is possible to overwrite the value of default FastCGI
+ parameters. If the value is evaluated to an empty string, the rule is
+ ignored. These directives are evaluated in their declaration order.
+
+ Example :
+ # PHP only, required if PHP was built with --enable-force-cgi-redirect
+ set-param REDIRECT_STATUS 200
+
+ set-param PHP_AUTH_DIGEST %[req.hdr(Authorization)]
+
+
+10.1.2. Proxy section
+---------------------
+
+use-fcgi-app <name>
+ Define the FastCGI application to use for the backend.
+
+ Arguments :
+ <name> is the name of the FastCGI application to use.
+
+ This keyword is only available for HTTP proxies with the backend capability
+ and with at least one FastCGI server. However, FastCGI servers can be mixed
+ with HTTP servers. But except there is a good reason to do so, it is not
+ recommended (see section 10.3 about the limitations for details). Only one
+ application may be defined at a time per backend.
+
+ Note that, once a FastCGI application is referenced for a backend, depending
+ on the configuration some processing may be done even if the request is not
+ sent to a FastCGI server. Rules to set parameters or pass headers to an
+ application are evaluated.
+
+
+10.1.3. Example
+---------------
+
+ frontend front-http
+ mode http
+ bind *:80
+ bind *:
+
+ use_backend back-dynamic if { path_reg ^/.+\.php(/.*)?$ }
+ default_backend back-static
+
+ backend back-static
+ mode http
+ server www A.B.C.D:80
+
+ backend back-dynamic
+ mode http
+ use-fcgi-app php-fpm
+ server php-fpm A.B.C.D:9000 proto fcgi
+
+ fcgi-app php-fpm
+ log-stderr global
+ option keep-conn
+
+ docroot /var/www/my-app
+ index index.php
+ path-info ^(/.+\.php)(/.*)?$
+
+
+10.2. Default parameters
+------------------------
+
+A Responder FastCGI application has the same purpose as a CGI/1.1 program. In
+the CGI/1.1 specification (RFC3875), several variables must be passed to the
+script. So HAProxy set them and some others commonly used by FastCGI
+applications. All these variables may be overwritten, with caution though.
+
+ +-------------------+-----------------------------------------------------+
+ | AUTH_TYPE | Identifies the mechanism, if any, used by HAProxy |
+ | | to authenticate the user. Concretely, only the |
+ | | BASIC authentication mechanism is supported. |
+ | | |
+ +-------------------+-----------------------------------------------------+
+ | CONTENT_LENGTH | Contains the size of the message-body attached to |
+ | | the request. It means only requests with a known |
+ | | size are considered as valid and sent to the |
+ | | application. |
+ | | |
+ +-------------------+-----------------------------------------------------+
+ | CONTENT_TYPE | Contains the type of the message-body attached to |
+ | | the request. It may not be set. |
+ | | |
+ +-------------------+-----------------------------------------------------+
+ | DOCUMENT_ROOT | Contains the document root on the remote host under |
+ | | which the script should be executed, as defined in |
+ | | the application's configuration. |
+ | | |
+ +-------------------+-----------------------------------------------------+
+ | GATEWAY_INTERFACE | Contains the dialect of CGI being used by HAProxy |
+ | | to communicate with the FastCGI application. |
+ | | Concretely, it is set to "CGI/1.1". |
+ | | |
+ +-------------------+-----------------------------------------------------+
+ | PATH_INFO | Contains the portion of the URI path hierarchy |
+ | | following the part that identifies the script |
+ | | itself. To be set, the directive "path-info" must |
+ | | be defined. |
+ | | |
+ +-------------------+-----------------------------------------------------+
+ | PATH_TRANSLATED | If PATH_INFO is set, it is its translated version. |
+ | | It is the concatenation of DOCUMENT_ROOT and |
+ | | PATH_INFO. If PATH_INFO is not set, this parameters |
+ | | is not set too. |
+ | | |
+ +-------------------+-----------------------------------------------------+
+ | QUERY_STRING | Contains the request's query string. It may not be |
+ | | set. |
+ | | |
+ +-------------------+-----------------------------------------------------+
+ | REMOTE_ADDR | Contains the network address of the client sending |
+ | | the request. |
+ | | |
+ +-------------------+-----------------------------------------------------+
+ | REMOTE_USER | Contains the user identification string supplied by |
+ | | client as part of user authentication. |
+ | | |
+ +-------------------+-----------------------------------------------------+
+ | REQUEST_METHOD | Contains the method which should be used by the |
+ | | script to process the request. |
+ | | |
+ +-------------------+-----------------------------------------------------+
+ | REQUEST_URI | Contains the request's URI. |
+ | | |
+ +-------------------+-----------------------------------------------------+
+ | SCRIPT_FILENAME | Contains the absolute pathname of the script. it is |
+ | | the concatenation of DOCUMENT_ROOT and SCRIPT_NAME. |
+ | | |
+ +-------------------+-----------------------------------------------------+
+ | SCRIPT_NAME | Contains the name of the script. If the directive |
+ | | "path-info" is defined, it is the first part of the |
+ | | URI path hierarchy, ending with the script name. |
+ | | Otherwise, it is the entire URI path. |
+ | | |
+ +-------------------+-----------------------------------------------------+
+ | SERVER_NAME | Contains the name of the server host to which the |
+ | | client request is directed. It is the value of the |
+ | | header "Host", if defined. Otherwise, the |
+ | | destination address of the connection on the client |
+ | | side. |
+ | | |
+ +-------------------+-----------------------------------------------------+
+ | SERVER_PORT | Contains the destination TCP port of the connection |
+ | | on the client side, which is the port the client |
+ | | connected to. |
+ | | |
+ +-------------------+-----------------------------------------------------+
+ | SERVER_PROTOCOL | Contains the request's protocol. |
+ | | |
+ +-------------------+-----------------------------------------------------+
+ | SERVER_SOFTWARE | Contains the string "HAProxy" followed by the |
+ | | current HAProxy version. |
+ | | |
+ +-------------------+-----------------------------------------------------+
+ | HTTPS | Set to a non-empty value ("on") if the script was |
+ | | queried through the HTTPS protocol. |
+ | | |
+ +-------------------+-----------------------------------------------------+
+
+
+10.3. Limitations
+------------------
+
+The current implementation have some limitations. The first one is about the
+way some request headers are hidden to the FastCGI applications. This happens
+during the headers analysis, on the backend side, before the connection
+establishment. At this stage, HAProxy know the backend is using a FastCGI
+application but it don't know if the request will be routed to a FastCGI server
+or not. But to hide request headers, it simply removes them from the HTX
+message. So, if the request is finally routed to an HTTP server, it never see
+these headers. For this reason, it is not recommended to mix FastCGI servers
+and HTTP servers under the same backend.
+
+Similarly, the rules "set-param" and "pass-header" are evaluated during the
+request headers analysis. So the evaluation is always performed, even if the
+requests is finally forwarded to an HTTP server.
+
+About the rules "set-param", when a rule is applied, a pseudo header is added
+into the HTX message. So, the same way than for HTTP header rewrites, it may
+fail if the buffer is full. The rules "set-param" will compete with
+"http-request" ones.
+
+Finally, all FastCGI params and HTTP headers are sent into a unique record
+FCGI_PARAM. Encoding of this record must be done in one pass, otherwise a
+processing error is returned. It means the record FCGI_PARAM, once encoded,
+must not exceeds the size of a buffer. However, there is no reserve to respect
+here.
+
+
+11. Address formats
+-------------------
+
+Several statements as "bind, "server", "nameserver" and "log" requires an
+address.
+
+This address can be a host name, an IPv4 address, an IPv6 address, or '*'.
+The '*' is equal to the special address "0.0.0.0" and can be used, in the case
+of "bind" or "dgram-bind" to listen on all IPv4 of the system.The IPv6
+equivalent is '::'.
+
+Depending of the statement, a port or port range follows the IP address. This
+is mandatory on 'bind' statement, optional on 'server'.
+
+This address can also begin with a slash '/'. It is considered as the "unix"
+family, and '/' and following characters must be present the path.
+
+Default socket type or transport method "datagram" or "stream" depends on the
+configuration statement showing the address. Indeed, 'bind' and 'server' will
+use a "stream" socket type by default whereas 'log', 'nameserver' or
+'dgram-bind' will use a "datagram".
+
+Optionally, a prefix could be used to force the address family and/or the
+socket type and the transport method.
+
+
+11.1. Address family prefixes
+-----------------------------
+
+'abns@<name>' following <name> is an abstract namespace (Linux only).
+
+'fd@<n>' following address is a file descriptor <n> inherited from the
+ parent. The fd must be bound and may or may not already be
+ listening.
+
+'ip@<address>[:port1[-port2]]' following <address> is considered as an IPv4 or
+ IPv6 address depending on the syntax. Depending
+ on the statement using this address, a port or
+ a port range may or must be specified.
+
+'ipv4@<address>[:port1[-port2]]' following <address> is always considered as
+ an IPv4 address. Depending on the statement
+ using this address, a port or a port range
+ may or must be specified.
+
+'ipv6@<address>[:port1[-port2]]' following <address> is always considered as
+ an IPv6 address. Depending on the statement
+ using this address, a port or a port range
+ may or must be specified.
+
+'sockpair@<n>' following address is the file descriptor of a connected unix
+ socket or of a socketpair. During a connection, the initiator
+ creates a pair of connected sockets, and passes one of them
+ over the FD to the other end. The listener waits to receive
+ the FD from the unix socket and uses it as if it were the FD
+ of an accept(). Should be used carefully.
+
+'unix@<path>' following string is considered as a UNIX socket <path>. this
+ prefix is useful to declare an UNIX socket path which don't
+ start by slash '/'.
+
+
+11.2. Socket type prefixes
+--------------------------
+
+Previous "Address family prefixes" can also be prefixed to force the socket
+type and the transport method. The default depends of the statement using
+this address but in some cases the user may force it to a different one.
+This is the case for "log" statement where the default is syslog over UDP
+but we could force to use syslog over TCP.
+
+Those prefixes were designed for internal purpose and users should instead use
+use aliases of the next section "11.3 Protocol prefixes". However these can
+sometimes be convenient, for example in combination with inherited sockets
+known by their file descriptor number, in which case the address family is "fd"
+and the socket type must be declared.
+
+If users need one those prefixes to perform what they expect because
+they can not configure the same using the protocol prefixes, they should
+report this to the maintainers.
+
+'stream+<family>@<address>' forces socket type and transport method
+ to "stream"
+
+'dgram+<family>@<address>' forces socket type and transport method
+ to "datagram".
+
+'quic+<family>@<address>' forces socket type to "datagram" and transport
+ method to "stream".
+
+
+
+11.3. Protocol prefixes
+-----------------------
+
+'quic4@<address>[:port1[-port2]]' following <address> is always considered as
+ an IPv4 address but socket type is forced to
+ "datagram" and the transport method is forced
+ to "stream". Depending on the statement using
+ this address, a UDP port or port range can or
+ must be specified. It is equivalent to
+ "quic+ipv4@".
+
+'quic6@<address>[:port1[-port2]]' following <address> is always considered as
+ an IPv6 address but socket type is forced to
+ "datagram" and the transport method is forced
+ to "stream". Depending on the statement using
+ this address, a UDP port or port range can or
+ must be specified. It is equivalent to
+ "quic+ipv6@".
+
+'tcp@<address>[:port1[-port2]]' following <address> is considered as an IPv4
+ or IPv6 address depending of the syntax but
+ socket type and transport method is forced to
+ "stream". Depending on the statement using
+ this address, a port or a port range can or
+ must be specified. It is considered as an alias
+ of 'stream+ip@'.
+
+'tcp4@<address>[:port1[-port2]]' following <address> is always considered as
+ an IPv4 address but socket type and transport
+ method is forced to "stream". Depending on the
+ statement using this address, a port or port
+ range can or must be specified.
+ It is considered as an alias of 'stream+ipv4@'.
+
+'tcp6@<address>[:port1[-port2]]' following <address> is always considered as
+ an IPv6 address but socket type and transport
+ method is forced to "stream". Depending on the
+ statement using this address, a port or port
+ range can or must be specified.
+ It is considered as an alias of 'stream+ipv4@'.
+
+'udp@<address>[:port1[-port2]]' following <address> is considered as an IPv4
+ or IPv6 address depending of the syntax but
+ socket type and transport method is forced to
+ "datagram". Depending on the statement using
+ this address, a port or a port range can or
+ must be specified. It is considered as an alias
+ of 'dgram+ip@'.
+
+'udp4@<address>[:port1[-port2]]' following <address> is always considered as
+ an IPv4 address but socket type and transport
+ method is forced to "datagram". Depending on
+ the statement using this address, a port or
+ port range can or must be specified.
+ It is considered as an alias of 'dgram+ipv4@'.
+
+'udp6@<address>[:port1[-port2]]' following <address> is always considered as
+ an IPv6 address but socket type and transport
+ method is forced to "datagram". Depending on
+ the statement using this address, a port or
+ port range can or must be specified.
+ It is considered as an alias of 'dgram+ipv4@'.
+
+'uxdg@<path>' following string is considered as a unix socket <path> but
+ transport method is forced to "datagram". It is considered as
+ an alias of 'dgram+unix@'.
+
+'uxst@<path>' following string is considered as a unix socket <path> but
+ transport method is forced to "stream". It is considered as
+ an alias of 'stream+unix@'.
+
+In future versions, other prefixes could be used to specify protocols like
+QUIC which proposes stream transport based on socket of type "datagram".
+
+/*
+ * Local variables:
+ * fill-column: 79
+ * End:
+ */
diff --git a/doc/cookie-options.txt b/doc/cookie-options.txt
new file mode 100644
index 0000000..b3badf3
--- /dev/null
+++ b/doc/cookie-options.txt
@@ -0,0 +1,25 @@
+2011/04/13 : List of possible cookie settings with associated behaviours.
+
+PSV="preserve", PFX="prefix", INS="insert", REW="rewrite", IND="indirect"
+0 = option not set
+1 = option is set
+* = option doesn't matter
+
+PSV PFX INS REW IND Behaviour
+ 0 0 0 0 0 passive mode
+ 0 0 0 0 1 passive + indirect : remove response if not needed
+ 0 0 0 1 0 always rewrite response
+ 0 0 1 0 0 always insert or replace response
+ 0 0 1 0 1 insert + indirect : remove req and also resp if not needed
+ * * 1 1 * [ forbidden ]
+ 0 1 0 0 0 prefix
+ 0 1 0 0 1 !! prefix on request, remove response cookie if not needed
+ * 1 * 1 * [ forbidden ]
+ * 1 1 * * [ forbidden ]
+ * * * 1 1 [ forbidden ]
+ 1 * 0 * 0 [ forbidden ]
+ 1 0 0 0 1 passive mode (alternate form)
+ 1 0 1 0 0 insert only, and preserve server response cookie if any
+ 1 0 1 0 1 conditional insert only for new requests
+ 1 1 0 0 1 prefix on requests only (passive prefix)
+
diff --git a/doc/design-thoughts/binding-possibilities.txt b/doc/design-thoughts/binding-possibilities.txt
new file mode 100644
index 0000000..3f5e432
--- /dev/null
+++ b/doc/design-thoughts/binding-possibilities.txt
@@ -0,0 +1,167 @@
+2013/10/10 - possibilities for setting source and destination addresses
+
+
+When establishing a connection to a remote device, this device is designated
+as a target, which designates an entity defined in the configuration. A same
+target appears only once in a configuration, and multiple targets may share
+the same settings if needed.
+
+The following types of targets are currently supported :
+
+ - listener : all connections with this type of target come from clients ;
+ - server : connections to such targets are for "server" lines ;
+ - peer : connections to such target address "peer" lines in "peers"
+ sections ;
+ - proxy : these targets are used by "dispatch", "option transparent"
+ or "option http_proxy" statements.
+
+A connection might not be reused between two different targets, even if all
+parameters seem similar. One of the reason is that some parameters are specific
+to the target and are not easy or not cheap to compare (eg: bind to interface,
+mss, ...).
+
+A number of source and destination addresses may be set for a given target.
+
+ - listener :
+ - the "from" address:port is set by accept()
+
+ - the "to" address:port is set if conn_get_to_addr() is called
+
+ - peer :
+ - the "from" address:port is not set
+
+ - the "to" address:port is static and dependent only on the peer
+
+ - server :
+ - the "from" address may be set alone when "source" is used with
+ a forced IP address, or when "usesrc clientip" is used.
+
+ - the "from" port may be set only combined with the address when
+ "source" is used with IP:port, IP:port-range or "usesrc client" is
+ used. Note that in this case, both the address and the port may be
+ 0, meaning that the kernel will pick the address or port and that
+ the final value might not match the one explicitly set (eg:
+ important for logging).
+
+ - the "from" address may be forced from a header which implies it
+ may change between two consecutive requests on the same connection.
+
+ - the "to" address and port are set together when connecting to a
+ regular server, or by copying the client's IP address when
+ "server 0.0.0.0" is used. Note that the destination port may be
+ an offset applied to the original destination port.
+
+ - proxy :
+ - the "from" address may be set alone when "source" is used with a
+ forced IP address or when "usesrc clientip" is used.
+
+ - the "from" port may be set only combined with the address when
+ "source" is used with IP:port or with "usesrc client". There is
+ no ip:port range for a proxy as of now. Same comment applies as
+ above when port and/or address are 0.
+
+ - the "from" address may be forced from a header which implies it
+ may change between two consecutive requests on the same connection.
+
+ - the "to" address and port are set together, either by configuration
+ when "dispatch" is used, or dynamically when "transparent" is used
+ (1:1 with client connection) or "option http_proxy" is used, where
+ each client request may lead to a different destination address.
+
+
+At the moment, there are some limits in what might happen between multiple
+concurrent requests to a same target.
+
+ - peers parameter do not change, so no problem.
+
+ - server parameters may change in this way :
+ - a connection may require a source bound to an IP address found in a
+ header, which will fall back to the "source" settings if the address
+ is not found in this header. This means that the source address may
+ switch between a dynamically forced IP address and another forced
+ IP and/or port range.
+
+ - if the element is not found (eg: header), the remaining "forced"
+ source address might very well be empty (unset), so the connection
+ reuse is acceptable when switching in that direction.
+
+ - it is not possible to switch between client and clientip or any of
+ these and hdr_ip() because they're exclusive.
+
+ - using a source address/port belonging to a port range is compatible
+ with connection reuse because there is a single range per target, so
+ switching from a range to another range means we remain in the same
+ range.
+
+ - destination address may currently not change since the only possible
+ case for dynamic destination address setting is the transparent mode,
+ reproducing the client's destination address.
+
+ - proxy parameters may change in this way :
+ - a connection may require a source bound to an IP address found in a
+ header, which will fall back to the "source" settings if the address
+ is not found in this header. This means that the source address may
+ switch between a dynamically forced IP address and another forced
+ IP and/or port range.
+
+ - if the element is not found (eg: header), the remaining "forced"
+ source address might very well be empty (unset), so the connection
+ reuse is acceptable when switching in that direction.
+
+ - it is not possible to switch between client and clientip or any of
+ these and hdr_ip() because they're exclusive.
+
+ - proxies do not support port ranges at the moment.
+
+ - destination address might change in the case where "option http_proxy"
+ is used.
+
+So, for each source element (IP, port), we want to know :
+ - if the element was assigned by static configuration (eg: ":80")
+ - if the element was assigned from a connection-specific value (eg: usesrc clientip)
+ - if the element was assigned from a configuration-specific range (eg: 1024-65535)
+ - if the element was assigned from a request-specific value (eg: hdr_ip(xff))
+ - if the element was not assigned at all
+
+For the destination, we want to know :
+ - if the element was assigned by static configuration (eg: ":80")
+ - if the element was assigned from a connection-specific value (eg: transparent)
+ - if the element was assigned from a request-specific value (eg: http_proxy)
+
+We don't need to store the information about the origin of the dynamic value
+since we have the value itself. So in practice we have :
+ - default value, unknown (not yet checked with getsockname/getpeername)
+ - default value, known (check done)
+ - forced value (known)
+ - forced range (known)
+
+We can't do that on an ip:port basis because the port may be fixed regardless
+of the address and conversely.
+
+So that means :
+
+ enum {
+ CO_ADDR_NONE = 0, /* not set, unknown value */
+ CO_ADDR_KNOWN = 1, /* not set, known value */
+ CO_ADDR_FIXED = 2, /* fixed value, known */
+ CO_ADDR_RANGE = 3, /* from assigned range, known */
+ } conn_addr_values;
+
+ unsigned int new_l3_src_status:2;
+ unsigned int new_l4_src_status:2;
+ unsigned int new_l3_dst_status:2;
+ unsigned int new_l4_dst_status:2;
+
+ unsigned int cur_l3_src_status:2;
+ unsigned int cur_l4_src_status:2;
+ unsigned int cur_l3_dsp_status:2;
+ unsigned int cur_l4_dst_status:2;
+
+ unsigned int new_family:2;
+ unsigned int cur_family:2;
+
+Note: this obsoletes CO_FL_ADDR_FROM_SET and CO_FL_ADDR_TO_SET. These flags
+must be changed to individual l3+l4 checks ORed between old and new values,
+or better, set to cur only which will inherit new.
+
+In the connection, these values may be merged in the same word as err_code.
diff --git a/doc/design-thoughts/connection-reuse.txt b/doc/design-thoughts/connection-reuse.txt
new file mode 100644
index 0000000..4eb22f6
--- /dev/null
+++ b/doc/design-thoughts/connection-reuse.txt
@@ -0,0 +1,224 @@
+2015/08/06 - server connection sharing
+
+Improvements on the connection sharing strategies
+-------------------------------------------------
+
+4 strategies are currently supported :
+ - never
+ - safe
+ - aggressive
+ - always
+
+The "aggressive" and "always" strategies take into account the fact that the
+connection has already been reused at least once or not. The principle is that
+second requests can be used to safely "validate" connection reuse on newly
+added connections, and that such validated connections may be used even by
+first requests from other sessions. A validated connection is a connection
+which has already been reused, hence proving that it definitely supports
+multiple requests. Such connections are easy to verify : after processing the
+response, if the txn already had the TX_NOT_FIRST flag, then it was not the
+first request over that connection, and it is validated as safe for reuse.
+Validated connections are put into a distinct list : server->safe_conns.
+
+Incoming requests with TX_NOT_FIRST first pick from the regular idle_conns
+list so that any new idle connection is validated as soon as possible.
+
+Incoming requests without TX_NOT_FIRST only pick from the safe_conns list for
+strategy "aggressive", guaranteeing that the server properly supports connection
+reuse, or first from the safe_conns list, then from the idle_conns list for
+strategy "always".
+
+Connections are always stacked into the list (LIFO) so that there are higher
+changes to convert recent connections and to use them. This will first optimize
+the likeliness that the connection works, and will avoid TCP metrics from being
+lost due to an idle state, and/or the congestion window to drop and the
+connection going to slow start mode.
+
+
+Handling connections in pools
+-----------------------------
+
+A per-server "pool-max" setting should be added to permit disposing unused idle
+connections not attached anymore to a session for use by future requests. The
+principle will be that attached connections are queued from the front of the
+list while the detached connections will be queued from the tail of the list.
+
+This way, most reused connections will be fairly recent and detached connections
+will most often be ignored. The number of detached idle connections in the lists
+should be accounted for (pool_used) and limited (pool_max).
+
+After some time, a part of these detached idle connections should be killed.
+For this, the list is walked from tail to head and connections without an owner
+may be evicted. It may be useful to have a per-server pool_min setting
+indicating how many idle connections should remain in the pool, ready for use
+by new requests. Conversely, a pool_low metric should be kept between eviction
+runs, to indicate the lowest amount of detached connections that were found in
+the pool.
+
+For eviction, the principle of a half-life is appealing. The principle is
+simple : over a period of time, half of the connections between pool_min and
+pool_low should be gone. Since pool_low indicates how many connections were
+remaining unused over a period, it makes sense to kill some of them.
+
+In order to avoid killing thousands of connections in one run, the purge
+interval should be split into smaller batches. Let's call N the ratio of the
+half-life interval and the effective interval.
+
+The algorithm consists in walking over them from the end every interval and
+killing ((pool_low - pool_min) + 2 * N - 1) / (2 * N). It ensures that half
+of the unused connections are killed over the half-life period, in N batches
+of population/2N entries at most.
+
+Unsafe connections should be evicted first. There should be quite few of them
+since most of them are probed and become safe. Since detached connections are
+quickly recycled and attached to a new session, there should not be too many
+detached connections in the pool, and those present there may be killed really
+quickly.
+
+Another interesting point of pools is that when a pool-max is not null, then it
+makes sense to automatically enable pretend-keep-alive on non-private connections
+going to the server in order to be able to feed them back into the pool. With
+the "aggressive" or "always" strategies, it can allow clients making a single
+request over their connection to share persistent connections to the servers.
+
+
+
+2013/10/17 - server connection management and reuse
+
+Current state
+-------------
+
+At the moment, a connection entity is needed to carry any address
+information. This means in the following situations, we need a server
+connection :
+
+- server is elected and the server's destination address is set
+
+- transparent mode is elected and the destination address is set from
+ the incoming connection
+
+- proxy mode is enabled, and the destination's address is set during
+ the parsing of the HTTP request
+
+- connection to the server fails and must be retried on the same
+ server using the same parameters, especially the destination
+ address (SN_ADDR_SET not removed)
+
+
+On the accepting side, we have further requirements :
+
+- allocate a clean connection without a stream interface
+
+- incrementally set the accepted connection's parameters without
+ clearing it, and keep track of what is set (eg: getsockname).
+
+- initialize a stream interface in established mode
+
+- attach the accepted connection to a stream interface
+
+
+This means several things :
+
+- the connection has to be allocated on the fly the first time it is
+ needed to store the source or destination address ;
+
+- the connection has to be attached to the stream interface at this
+ moment ;
+
+- it must be possible to incrementally set some settings on the
+ connection's addresses regardless of the connection's current state
+
+- the connection must not be released across connection retries ;
+
+- it must be possible to clear a connection's parameters for a
+ redispatch without having to detach/attach the connection ;
+
+- we need to allocate a connection without an existing stream interface
+
+So on the accept() side, it looks like this :
+
+ fd = accept();
+ conn = new_conn();
+ get_some_addr_info(&conn->addr);
+ ...
+ si = new_si();
+ si_attach_conn(si, conn);
+ si_set_state(si, SI_ST_EST);
+ ...
+ get_more_addr_info(&conn->addr);
+
+On the connect() side, it looks like this :
+
+ si = new_si();
+ while (!properly_connected) {
+ if (!(conn = si->end)) {
+ conn = new_conn();
+ conn_clear(conn);
+ si_attach_conn(si, conn);
+ }
+ else {
+ if (connected) {
+ f = conn->flags & CO_FL_XPRT_TRACKED;
+ conn->flags &= ~CO_FL_XPRT_TRACKED;
+ conn_close(conn);
+ conn->flags |= f;
+ }
+ if (!correct_dest)
+ conn_clear(conn);
+ }
+ set_some_addr_info(&conn->addr);
+ si_set_state(si, SI_ST_CON);
+ ...
+ set_more_addr_info(&conn->addr);
+ conn->connect();
+ if (must_retry) {
+ close_conn(conn);
+ }
+ }
+
+Note: we need to be able to set the control and transport protocols.
+On outgoing connections, this is set once we know the destination address.
+On incoming connections, this is set the earliest possible (once we know
+the source address).
+
+The problem analysed below was solved on 2013/10/22
+
+| ==> the real requirement is to know whether a connection is still valid or not
+| before deciding to close it. CO_FL_CONNECTED could be enough, though it
+| will not indicate connections that are still waiting for a connect to occur.
+| This combined with CO_FL_WAIT_L4_CONN and CO_FL_WAIT_L6_CONN should be OK.
+|
+| Alternatively, conn->xprt could be used for this, but needs some careful checks
+| (it's used by conn_full_close at least).
+|
+| Right now, conn_xprt_close() checks conn->xprt and sets it to NULL.
+| conn_full_close() also checks conn->xprt and sets it to NULL, except
+| that the check on ctrl is performed within xprt. So conn_xprt_close()
+| followed by conn_full_close() will not close the file descriptor.
+| Note that conn_xprt_close() is never called, maybe we should kill it ?
+|
+| Note: at the moment, it's problematic to leave conn->xprt to NULL before doing
+| xprt_init() because we might end up with a pending file descriptor. Or at
+| least with some transport not de-initialized. We might thus need
+| conn_xprt_close() when conn_xprt_init() fails.
+|
+| The fd should be conditioned by ->ctrl only, and the transport layer by ->xprt.
+|
+| - conn_prepare_ctrl(conn, ctrl)
+| - conn_prepare_xprt(conn, xprt)
+| - conn_prepare_data(conn, data)
+|
+| Note: conn_xprt_init() needs conn->xprt so it's not a problem to set it early.
+|
+| One problem might be with conn_xprt_close() not being able to know if xprt_init()
+| was called or not. That's where it might make sense to only set ->xprt during init.
+| Except that it does not fly with outgoing connections (xprt_init is called after
+| connect()).
+|
+| => currently conn_xprt_close() is only used by ssl_sock.c and decides whether
+| to do something based on ->xprt_ctx which is set by ->init() from xprt_init().
+| So there is nothing to worry about. We just need to restore conn_xprt_close()
+| and rely on ->ctrl to close the fd instead of ->xprt.
+|
+| => we have the same issue with conn_ctrl_close() : when is the fd supposed to be
+| valid ? On outgoing connections, the control is set much before the fd...
diff --git a/doc/design-thoughts/http_load_time.url b/doc/design-thoughts/http_load_time.url
new file mode 100644
index 0000000..f178e46
--- /dev/null
+++ b/doc/design-thoughts/http_load_time.url
@@ -0,0 +1,5 @@
+Excellent paper about page load time for keepalive on/off, pipelining,
+multiple host names, etc...
+
+http://www.die.net/musings/page_load_time/
+
diff --git a/doc/design-thoughts/pool-debugging.txt b/doc/design-thoughts/pool-debugging.txt
new file mode 100644
index 0000000..106e41c
--- /dev/null
+++ b/doc/design-thoughts/pool-debugging.txt
@@ -0,0 +1,243 @@
+2022-02-22 - debugging options with pools
+
+Two goals:
+ - help developers spot bugs as early as possible
+
+ - make the process more reliable in field, by killing sick ones as soon as
+ possible instead of letting them corrupt data, cause trouble, or even be
+ exploited.
+
+An allocated object may exist in 5 forms:
+ - in use: currently referenced and used by haproxy, 100% of its size are
+ dedicated to the application which can do absolutely anything with it,
+ but it may never touch anything before nor after that area.
+
+ - in cache: the object is neither referenced nor used anymore, but it sits
+ in a thread's cache. The application may not touch it at all anymore, and
+ some parts of it could even be unmapped. Only the current thread may safely
+ reach it, though others might find/release it when under thread isolation.
+ The thread cache needs some LRU linking that may be stored anywhere, either
+ inside the area, or outside. The parts surrounding the <size> parts remain
+ invisible to the application layer, and can serve as a protection.
+
+ - in shared cache: the object is neither referenced nor used anymore, but it
+ may be reached by any thread. Some parts of it could be unmapped. Any
+ thread may pick it but only one may find it, hence once grabbed, it is
+ guaranteed no other one will find it. The shared cache needs to set up a
+ linked list and a single pointer needs to be stored anywhere, either inside
+ or outside the area. The parts surrounding the <size> parts remain
+ invisible to the application layer, and can serve as a protection.
+
+ - in the system's memory allocator: the object is not known anymore from
+ haproxy. It may be reassigned in parts or totally to other pools or other
+ subsystems (e.g. crypto library). Some or all of it may be unmapped. The
+ areas surrounding the <size> parts are also part of the object from the
+ library's point of view and may be delivered to other areas. Tampering
+ with these may cause any other part to malfunction in dirty ways.
+
+ - in the OS only: the memory allocator gave it back to the OS.
+
+The following options need to be configurable:
+ - detect improper initialization: this is done by poisonning objects before
+ delivering them to the application.
+
+ - help figure where an object was allocated when in use: a pointer to the
+ call place will help. Pointing to the last pool_free() as well for the
+ same reasons when dealing with a UAF.
+
+ - detection of wrong pointer/pool when in use: a pointer to the pool before
+ or after the area will definitely help.
+
+ - detection of overflows when in use: a canary at the end of the area
+ (closest possible to <size>) will definitely help. The pool above can do
+ that job. Ideally, we should fill some data at the end so that even
+ unaligned sizes can be checked (e.g. a buffer that gets a zero appended).
+ If we just align on 2 pointers, writing the same pointer twice at the end
+ may do the job, but we won't necessarily have our bytes. Thus a particular
+ end-of-string pattern would be useful (e.g. ff55aa01) to fill it.
+
+ - detection of double free when in cache: similar to detection of wrong
+ pointer/pool when in use: the pointer at the end may simply be changed so
+ that it cannot match the pool anymore. By using a pointer to the caller of
+ the previous free() operation, we have the guarantee to see different
+ pointers, and this pointer can be inspected to figure where the object was
+ previously freed. An extra check may even distinguish a perfect double-free
+ (same caller) from just a wrong free (pointer differs from pool).
+
+ - detection of late corruption when in cache: keeping a copy of the
+ checksum of the whole area upon free() will do the job, but requires one
+ extra storage area for the checksum. Filling the area with a pattern also
+ does the job and doesn't require extra storage, but it loses the contents
+ and can be a bit slower. Sometimes losing the contents can be a feature,
+ especially when trying to detect late reads. Probably that both need to
+ be implemented. Note that if contents are not strictly needed, storing a
+ checksum inside the area does the job.
+
+ - preserve total contents in cache for debugging: losing some precious
+ information can be a problem.
+
+ - pattern filling of the area helps detect use-after-free in read-only mode.
+
+ - allocate cold first helps with both cases above.
+
+Uncovered:
+ - overflow/underflow when in cache/shared/libc: it belongs to use-after-free
+ pattern and such an error during regular use ought to be caught while the
+ object was still in use.
+
+ - integrity when in libc: not under our control anymore, this is a libc
+ problem.
+
+Arbitrable:
+ - integrity when in shared cache: unlikely to happen only then if it could
+ have happened in the local cache. Shared cache not often used anymore, thus
+ probably not worth the effort
+
+ - protection against double-free when in shared cache/libc: might be done for
+ a cheap price, probably worth being able to quickly tell that such an
+ object left the local cache (e.g. the mark points to the caller, but could
+ possibly just be incremented, hence still point to the same code location+1
+ byte when released. Calls are 4 bytes min on RISC, 5 on x86 so we do have
+ some margin by having a caller's location be +0,+1,+2 or +3.
+
+ - underflow when in use: hasn't been really needed over time but may change.
+
+ - detection of late corruption when in shared cache: checksum or area filling
+ are possible, but is this as relevant as it used to considering the less
+ common use of the shared cache ?
+
+Design considerations:
+ - object allocation when in use must remain minimal
+
+ - when in cache, there are 2 lists which the compiler expect to be at least
+ aligned each (e.g. if/when we start to use DWCAS).
+
+ - the original "pool debugging" feature covers both pool tracking, double-
+ free detection, overflow detection and caller info at the cost of a single
+ pointer placed immediately after the area.
+
+ - preserving the contents might be done by placing the cache links and the
+ shared cache's list outside of the area (either before or after). Placing
+ it before has the merit that the allocated object preserves the 4-ptr
+ alignment. But when a larger alignment is desired this often does not work
+ anymore. Placing it after requires some dynamic adjustment depending on the
+ object's size. If any protection is installed, this protection must be
+ placed before the links so that the list doesn't get randomly corrupted and
+ corrupts adjacent elements. Note that if protection is desired, the extra
+ waste is probably less critical.
+
+ - a link to the last caller might have to be stored somewhere. Without
+ preservation the free() caller may be placed anywhere while the alloc()
+ caller may only be placed outside. With preservation, again the free()
+ caller may be placed either before the object or after the mark at the end.
+ There is no particular need that both share the same location though it may
+ help. Note that when debugging is enabled, the free() caller doesn't need
+ to be duplicated and can continue to serve as the double-free detection.
+ Thus maybe in the end we only need to store the caller to the last alloc()
+ but not the free() since if we want it it's available via the pool debug.
+
+ - use-after-free detection: contents may be erased on free() and checked on
+ alloc(), but they can also be checksummed on free() and rechecked on
+ alloc(). In the latter case we need to store a checksum somewhere. Note
+ that with pure checksum we don't know what part was modified, but seeing
+ previous contents can be useful.
+
+Possibilities:
+
+1) Linked lists inside the area:
+
+ V size alloc
+ ---+------------------------------+-----------------+--
+ in use |##############################| (Pool) (Tracer) |
+ ---+------------------------------+-----------------+--
+
+ ---+--+--+------------------------+-----------------+--
+ in cache |L1|L2|########################| (Caller) (Sum) |
+ ---+--+--+------------------------+-----------------+--
+or:
+ ---+--+--+------------------------+-----------------+--
+ in cache |L1|L2|###################(sum)| (Caller) |
+ ---+--+--+------------------------+-----------------+--
+
+ ---+-+----------------------------+-----------------+--
+ in global |N|XXXX########################| (Caller) |
+ ---+-+----------------------------+-----------------+--
+
+
+2) Linked lists before the the area leave room for tracer and pool before
+ the area, but the canary must remain at the end, however the area will
+ be more difficult to keep aligned:
+
+ V head size alloc
+ ----+-+-+------------------------------+-----------------+--
+ in use |T|P|##############################| (canary) |
+ ----+-+-+------------------------------+-----------------+--
+
+ --+-----+------------------------------+-----------------+--
+ in cache |L1|L2|##############################| (Caller) (Sum) |
+ --+-----+------------------------------+-----------------+--
+
+ ------+-+------------------------------+-----------------+--
+ in global |N|##############################| (Caller) |
+ ------+-+------------------------------+-----------------+--
+
+
+3) Linked lists at the end of the area, might be shared with extra data
+ depending on the state:
+
+ V size alloc
+ ---+------------------------------+-----------------+--
+ in use |##############################| (Pool) (Tracer) |
+ ---+------------------------------+-----------------+--
+
+ ---+------------------------------+--+--+-----------+--
+ in cache |##############################|L1|L2| (Caller) (Sum)
+ ---+------------------------------+--+--+-----------+--
+
+ ---+------------------------------+-+---------------+--
+ in global |##############################|N| (Caller) |
+ ---+------------------------------+-+---------------+--
+
+This model requires a little bit of alignment at the end of the area, which is
+not incompatible with pattern filling and/or checksumming:
+ - preserving the area for post-mortem analysis means nothing may be placed
+ inside. In this case it could make sense to always store the last releaser.
+ - detecting late corruption may be done either with filling or checksumming,
+ but the simple fact of assuming a risk of corruption that needs to be
+ chased means we must not store the lists nor caller inside the area.
+
+Some models imply dedicating some place when in cache:
+ - preserving contents forces the lists to be prefixed or appended, which
+ leaves unused places when in use. Thus we could systematically place the
+ pool pointer and the caller in this case.
+
+ - if preserving contents is not desired, almost everything can be stored
+ inside when not in use. Then each situation's size should be calculated
+ so that the allocated size is known, and entries are filled from the
+ beginning while not in use, or after the size when in use.
+
+ - if poisonning is requested, late corruption might be detected but then we
+ don't want the list to be stored inside at the risk of being corrupted.
+
+Maybe just implement a few models:
+ - compact/optimal: put l1/l2 inside
+ - detect late corruption: fill/sum, put l1/l2 out
+ - preserve contents: put l1/l2 out
+ - corruption+preserve: do not fill, sum out
+ - poisonning: not needed on free if pattern filling is done.
+
+try2:
+ - poison on alloc to detect missing initialization: yes/no
+ (note: nothing to do if filling done)
+ - poison on free to detect use-after-free: yes/no
+ (note: nothing to do if filling done)
+ - check on alloc for corruption-after-free: yes/no
+ If content-preserving => sum, otherwise pattern filling; in
+ any case, move L1/L2 out.
+ - check for overflows: yes/no: use a canary after the area. The
+ canary can be the pointer to the pool.
+ - check for alloc caller: yes/no => always after the area
+ - content preservation: yes/no
+ (disables filling, moves lists out)
+ - improved caller tracking: used to detect double-free, may benefit
+ from content-preserving but not only.
diff --git a/doc/design-thoughts/thread-group.txt b/doc/design-thoughts/thread-group.txt
new file mode 100644
index 0000000..e845230
--- /dev/null
+++ b/doc/design-thoughts/thread-group.txt
@@ -0,0 +1,655 @@
+Thread groups
+#############
+
+2021-07-13 - first draft
+==========
+
+Objective
+---------
+- support multi-socket systems with limited cache-line bouncing between
+ physical CPUs and/or L3 caches
+
+- overcome the 64-thread limitation
+
+- Support a reasonable number of groups. I.e. if modern CPUs arrive with
+ core complexes made of 8 cores, with 8 CC per chip and 2 chips in a
+ system, it makes sense to support 16 groups.
+
+
+Non-objective
+-------------
+- no need to optimize to the last possible cycle. I.e. some algos like
+ leastconn will remain shared across all threads, servers will keep a
+ single queue, etc. Global information remains global.
+
+- no stubborn enforcement of FD sharing. Per-server idle connection lists
+ can become per-group; listeners can (and should probably) be per-group.
+ Other mechanisms (like SO_REUSEADDR) can already overcome this.
+
+- no need to go beyond 64 threads per group.
+
+
+Identified tasks
+================
+
+General
+-------
+Everywhere tid_bit is used we absolutely need to find a complement using
+either the current group or a specific one. Thread debugging will need to
+be extended as masks are extensively used.
+
+
+Scheduler
+---------
+The global run queue and global wait queue must become per-group. This
+means that a task may only be queued into one of them at a time. It
+sounds like tasks may only belong to a given group, but doing so would
+bring back the original issue that it's impossible to perform remote wake
+ups.
+
+We could probably ignore the group if we don't need to set the thread mask
+in the task. the task's thread_mask is never manipulated using atomics so
+it's safe to complement it with a group.
+
+The sleeping_thread_mask should become per-group. Thus possibly that a
+wakeup may only be performed on the assigned group, meaning that either
+a task is not assigned, in which case it be self-assigned (like today),
+otherwise the tg to be woken up will be retrieved from the task itself.
+
+Task creation currently takes a thread mask of either tid_bit, a specific
+mask, or MAX_THREADS_MASK. How to create a task able to run anywhere
+(checks, Lua, ...) ?
+
+Profiling -> completed
+---------
+There should be one task_profiling_mask per thread group. Enabling or
+disabling profiling should be made per group (possibly by iterating).
+-> not needed anymore, one flag per thread in each thread's context.
+
+Thread isolation
+----------------
+Thread isolation is difficult as we solely rely on atomic ops to figure
+who can complete. Such operation is rare, maybe we could have a global
+read_mostly flag containing a mask of the groups that require isolation.
+Then the threads_want_rdv_mask etc can become per-group. However setting
+and clearing the bits will become problematic as this will happen in two
+steps hence will require careful ordering.
+
+FD
+--
+Tidbit is used in a number of atomic ops on the running_mask. If we have
+one fdtab[] per group, the mask implies that it's within the group.
+Theoretically we should never face a situation where an FD is reported nor
+manipulated for a remote group.
+
+There will still be one poller per thread, except that this time all
+operations will be related to the current thread_group. No fd may appear
+in two thread_groups at once, but we can probably not prevent that (e.g.
+delayed close and reopen). Should we instead have a single shared fdtab[]
+(less memory usage also) ? Maybe adding the group in the fdtab entry would
+work, but when does a thread know it can leave it ? Currently this is
+solved by running_mask and by update_mask. Having two tables could help
+with this (each table sees the FD in a different group with a different
+mask) but this looks overkill.
+
+There's polled_mask[] which needs to be decided upon. Probably that it
+should be doubled as well. Note, polled_mask left fdtab[] for cacheline
+alignment reasons in commit cb92f5cae4.
+
+If we have one fdtab[] per group, what *really* prevents from using the
+same FD in multiple groups ? _fd_delete_orphan() and fd_update_events()
+need to check for no-thread usage before closing the FD. This could be
+a limiting factor. Enabling could require to wake every poller.
+
+Shouldn't we remerge fdinfo[] with fdtab[] (one pointer + one int/short,
+used only during creation and close) ?
+
+Other problem, if we have one fdtab[] per TG, disabling/enabling an FD
+(e.g. pause/resume on listener) can become a problem if it's not necessarily
+on the current TG. We'll then need a way to figure that one. It sounds like
+FDs from listeners and receivers are very specific and suffer from problems
+all other ones under high load do not suffer from. Maybe something specific
+ought to be done for them, if we can guarantee there is no risk of accidental
+reuse (e.g. locate the TG info in the receiver and have a "MT" bit in the
+FD's flags). The risk is always that a close() can result in instant pop-up
+of the same FD on any other thread of the same process.
+
+Observations: right now fdtab[].thread_mask more or less corresponds to a
+declaration of interest, it's very close to meaning "active per thread". It is
+in fact located in the FD while it ought to do nothing there, as it should be
+where the FD is used as it rules accesses to a shared resource that is not
+the FD but what uses it. Indeed, if neither polled_mask nor running_mask have
+a thread's bit, the FD is unknown to that thread and the element using it may
+only be reached from above and not from the FD. As such we ought to have a
+thread_mask on a listener and another one on connections. These ones will
+indicate who uses them. A takeover could then be simplified (atomically set
+exclusivity on the FD's running_mask, upon success, takeover the connection,
+clear the running mask). Probably that the change ought to be performed on
+the connection level first, not the FD level by the way. But running and
+polled are the two relevant elements, one indicates userland knowledge,
+the other one kernel knowledge. For listeners there's no exclusivity so it's
+a bit different but the rule remains the same that we don't have to know
+what threads are *interested* in the FD, only its holder.
+
+Not exact in fact, see FD notes below.
+
+activity
+--------
+There should be one activity array per thread group. The dump should
+simply scan them all since the cumuled values are not very important
+anyway.
+
+applets
+-------
+They use tid_bit only for the task. It looks like the appctx's thread_mask
+is never used (now removed). Furthermore, it looks like the argument is
+*always* tid_bit.
+
+CPU binding
+-----------
+This is going to be tough. It will be needed to detect that threads overlap
+and are not bound (i.e. all threads on same mask). In this case, if the number
+of threads is higher than the number of threads per physical socket, one must
+try hard to evenly spread them among physical sockets (e.g. one thread group
+per physical socket) and start as many threads as needed on each, bound to
+all threads/cores of each socket. If there is a single socket, the same job
+may be done based on L3 caches. Maybe it could always be done based on L3
+caches. The difficulty behind this is the number of sockets to be bound: it
+is not possible to bind several FDs per listener. Maybe with a new bind
+keyword we can imagine to automatically duplicate listeners ? In any case,
+the initially bound cpumap (via taskset) must always be respected, and
+everything should probably start from there.
+
+Frontend binding
+----------------
+We'll have to define a list of threads and thread-groups per frontend.
+Probably that having a group mask and a same thread-mask for each group
+would suffice.
+
+Threads should have two numbers:
+ - the per-process number (e.g. 1..256)
+ - the per-group number (1..64)
+
+The "bind-thread" lines ought to use the following syntax:
+ - bind 45 ## bind to process' thread 45
+ - bind 1/45 ## bind to group 1's thread 45
+ - bind all/45 ## bind to thread 45 in each group
+ - bind 1/all ## bind to all threads in group 1
+ - bind all ## bind to all threads
+ - bind all/all ## bind to all threads in all groups (=all)
+ - bind 1/65 ## rejected
+ - bind 65 ## OK if there are enough
+ - bind 35-45 ## depends. Rejected if it crosses a group boundary.
+
+The global directive "nbthread 28" means 28 total threads for the process. The
+number of groups will sub-divide this. E.g. 4 groups will very likely imply 7
+threads per group. At the beginning, the nbgroup should be manual since it
+implies config adjustments to bind lines.
+
+There should be a trivial way to map a global thread to a group and local ID
+and to do the opposite.
+
+
+Panic handler + watchdog
+------------------------
+Will probably depend on what's done for thread_isolate
+
+Per-thread arrays inside structures
+-----------------------------------
+- listeners have a thr_conn[] array, currently limited to MAX_THREADS. Should
+ we simply bump the limit ?
+- same for servers with idle connections.
+=> doesn't seem very practical.
+- another solution might be to point to dynamically allocated arrays of
+ arrays (e.g. nbthread * nbgroup) or a first level per group and a second
+ per thread.
+=> dynamic allocation based on the global number
+
+Other
+-----
+- what about dynamic thread start/stop (e.g. for containers/VMs) ?
+ E.g. if we decide to start $MANY threads in 4 groups, and only use
+ one, in the end it will not be possible to use less than one thread
+ per group, and at most 64 will be present in each group.
+
+
+FD Notes
+--------
+ - updt_fd_polling() uses thread_mask to figure where to send the update,
+ the local list or a shared list, and which bits to set in update_mask.
+ This could be changed so that it takes the update mask in argument. The
+ call from the poller's fork would just have to broadcast everywhere.
+
+ - pollers use it to figure whether they're concerned or not by the activity
+ update. This looks important as otherwise we could re-enable polling on
+ an FD that changed to another thread.
+
+ - thread_mask being a per-thread active mask looks more exact and is
+ precisely used this way by _update_fd(). In this case using it instead
+ of running_mask to gauge a change or temporarily lock it during a
+ removal could make sense.
+
+ - running should be conditioned by thread. Polled not (since deferred
+ or migrated). In this case testing thread_mask can be enough most of
+ the time, but this requires synchronization that will have to be
+ extended to tgid.. But migration seems a different beast that we shouldn't
+ care about here: if first performed at the higher level it ought to
+ be safe.
+
+In practice the update_mask can be dropped to zero by the first fd_delete()
+as the only authority allowed to fd_delete() is *the* owner, and as soon as
+all running_mask are gone, the FD will be closed, hence removed from all
+pollers. This will be the only way to make sure that update_mask always
+refers to the current tgid.
+
+However, it may happen that a takeover within the same group causes a thread
+to read the update_mask late, while the FD is being wiped by another thread.
+That other thread may close it, causing another thread in another group to
+catch it, and change the tgid and start to update the update_mask. This means
+that it would be possible for a thread entering do_poll() to see the correct
+tgid, then the fd would be closed, reopened and reassigned to another tgid,
+and the thread would see its bit in the update_mask, being confused. Right
+now this should already happen when the update_mask is not cleared, except
+that upon wakeup a migration would be detected and that would be all.
+
+Thus we might need to set the running bit to prevent the FD from migrating
+before reading update_mask, which also implies closing on fd_clr_running() == 0 :-(
+
+Also even fd_update_events() leaves a risk of updating update_mask after
+clearing running, thus affecting the wrong one. Probably that update_mask
+should be updated before clearing running_mask there. Also, how about not
+creating an update on a close ? Not trivial if done before running, unless
+thread_mask==0.
+
+Note that one situation that is currently visible is that a thread closes a
+file descriptor that it's the last one to own and to have an update for. In
+fd_delete_orphan() it does call poller.clo() but this one is not sufficient
+as it doesn't drop the update_mask nor does it clear the polled_mask. The
+typical problem that arises is that the close() happens before processing
+the last update (e.g. a close() just after a partial read), thus it still
+has *at least* one bit set for the current thread in both update_mask and
+polled_mask, and it is present in the update_list. Not handling it would
+mean that the event is lost on update() from the concerned threads and
+that some resource might leak. Handling it means zeroing the update_mask
+and polled_mask, and deleting the update entry from the update_list, thus
+losing the update event. And as indicated above, if the FD switches twice
+between 2 groups, the finally called thread does not necessarily know that
+the FD isn't the same anymore, thus it's difficult to decide whether to
+delete it or not, because deleting the event might in fact mean deleting
+something that was just re-added for the same thread with the same FD but
+a different usage.
+
+Also it really seems unrealistic to scan a single shared update_list like
+this using write operations. There should likely be one per thread-group.
+But in this case there is no more choice than deleting the update event
+upon fd_delete_orphan(). This also means that poller->clo() must do the
+job for all of the group's threads at once. This would mean a synchronous
+removal before the close(), which doesn't seem ridiculously expensive. It
+just requires that any thread of a group may manipulate any other thread's
+status for an FD and a poller.
+
+Note about our currently supported pollers:
+
+ - epoll: our current code base relies on the modern version which
+ automatically removes closed FDs, so we don't have anything to do
+ when closing and we don't need the update.
+
+ - kqueue: according to https://www.freebsd.org/cgi/man.cgi?query=kqueue, just
+ like epoll, a close() implies a removal. Our poller doesn't perform
+ any bookkeeping either so it's OK to directly close.
+
+ - evports: https://docs.oracle.com/cd/E86824_01/html/E54766/port-dissociate-3c.html
+ says the same, i.e. close() implies a removal of all events. No local
+ processing nor bookkeeping either, we can close.
+
+ - poll: the fd_evts[] array is global, thus shared by all threads. As such,
+ a single removal is needed to flush it for all threads at once. The
+ operation is already performed like this.
+
+ - select: works exactly like poll() above, hence already handled.
+
+As a preliminary conclusion, it's safe to delete the event and reset
+update_mask just after calling poller->clo(). If extremely unlucky (changing
+thread mask due to takeover ?), the same FD may appear at the same time:
+ - in one or several thread-local fd_updt[] arrays. These ones are just work
+ queues, there's nothing to do to ignore them, just leave the holes with an
+ outdated FD which will be ignored once met. As a bonus, poller->clo() could
+ check if the last fd_updt[] points to this specific FD and decide to kill
+ it.
+
+ - in the global update_list. In this case, fd_rm_from_fd_list() already
+ performs an attachment check, so it's safe to always call it before closing
+ (since no one else may be in the process of changing anything).
+
+
+###########################################################
+
+Current state:
+
+
+Mux / takeover / fd_delete() code ||| poller code
+-------------------------------------------------|||---------------------------------------------------
+ \|/
+mux_takeover(): | fd_set_running():
+ if (fd_takeover()<0) | old = {running, thread};
+ return fail; | new = {tid_bit, tid_bit};
+ ... |
+fd_takeover(): | do {
+ atomic_or(running, tid_bit); | if (!(old.thread & tid_bit))
+ old = {running, thread}; | return -1;
+ new = {tid_bit, tid_bit}; | new = { running | tid_bit, old.thread }
+ if (owner != expected) { | } while (!dwcas({running, thread}, &old, &new));
+ atomic_and(running, ~tid_bit); |
+ return -1; // fail | fd_clr_running():
+ } | return atomic_and_fetch(running, ~tid_bit);
+ |
+ while (old == {tid_bit, !=0 }) | poll():
+ if (dwcas({running, thread}, &old, &new)) { | if (!owner)
+ atomic_and(running, ~tid_bit); | continue;
+ return 0; // success |
+ } | if (!(thread_mask & tid_bit)) {
+ } | epoll_ctl_del();
+ | continue;
+ atomic_and(running, ~tid_bit); | }
+ return -1; // fail |
+ | // via fd_update_events()
+fd_delete(): | if (fd_set_running() != -1) {
+ atomic_or(running, tid_bit); | iocb();
+ atomic_store(thread, 0); | if (fd_clr_running() == 0 && !thread_mask)
+ if (fd_clr_running(fd) = 0) | fd_delete_orphan();
+ fd_delete_orphan(); | }
+
+
+The idle_conns_lock prevents the connection from being *picked* and released
+while someone else is reading it. What it does is guarantee that on idle
+connections, the caller of the IOCB will not dereference the task's context
+while the connection is still in the idle list, since it might be picked then
+freed at the same instant by another thread. As soon as the IOCB manages to
+get that lock, it removes the connection from the list so that it cannot be
+taken over anymore. Conversely, the mux's takeover() code runs under that
+lock so that if it frees the connection and task, this will appear atomic
+to the IOCB. The timeout task (which is another entry point for connection
+deletion) does the same. Thus, when coming from the low-level (I/O or timeout):
+ - task always exists, but ctx checked under lock validates; conn removal
+ from list prevents takeover().
+ - t->context is stable, except during changes under takeover lock. So
+ h2_timeout_task may well run on a different thread than h2_io_cb().
+
+Coming from the top:
+ - takeover() done under lock() clears task's ctx and possibly closes the FD
+ (unless some running remains present).
+
+Unlikely but currently possible situations:
+ - multiple pollers (up to N) may have an idle connection's FD being
+ polled, if the connection was passed from thread to thread. The first
+ event on the connection would wake all of them. Most of them would
+ see fdtab[].owner set (the late ones might miss it). All but one would
+ see that their bit is missing from fdtab[].thread_mask and give up.
+ However, just after this test, others might take over the connection,
+ so in practice if terribly unlucky, all but 1 could see their bit in
+ thread_mask just before it gets removed, all of them set their bit
+ in running_mask, and all of them call iocb() (sock_conn_iocb()).
+ Thus all of them dereference the connection and touch the subscriber
+ with no protection, then end up in conn_notify_mux() that will call
+ the mux's wake().
+
+ - multiple pollers (up to N-1) might still be in fd_update_events()
+ manipulating fdtab[].state. The cause is that the "locked" variable
+ is determined by atleast2(thread_mask) but that thread_mask is read
+ at a random instant (i.e. it may be stolen by another one during a
+ takeover) since we don't yet hold running to prevent this from being
+ done. Thus we can arrive here with thread_mask==something_else (1bit),
+ locked==0 and fdtab[].state assigned non-atomically.
+
+ - it looks like nothing prevents h2_release() from being called on a
+ thread (e.g. from the top or task timeout) while sock_conn_iocb()
+ dereferences the connection on another thread. Those killing the
+ connection don't yet consider the fact that it's an FD that others
+ might currently be waking up on.
+
+###################
+
+pb with counter:
+
+users count doesn't say who's using the FD and two users can do the same
+close in turn. The thread_mask should define who's responsible for closing
+the FD, and all those with a bit in it ought to do it.
+
+
+2021-08-25 - update with minimal locking on tgid value
+==========
+
+ - tgid + refcount at once using CAS
+ - idle_conns lock during updates
+ - update:
+ if tgid differs => close happened, thus drop update
+ otherwise normal stuff. Lock tgid until running if needed.
+ - poll report:
+ if tgid differs => closed
+ if thread differs => stop polling (migrated)
+ keep tgid lock until running
+ - test on thread_id:
+ if (xadd(&tgid,65536) != my_tgid) {
+ // was closed
+ sub(&tgid, 65536)
+ return -1
+ }
+ if !(thread_id & tidbit) => migrated/closed
+ set_running()
+ sub(tgid,65536)
+ - note: either fd_insert() or the final close() ought to set
+ polled and update to 0.
+
+2021-09-13 - tid / tgroups etc.
+==========
+
+ * tid currently is the thread's global ID. It's essentially used as an index
+ for arrays. It must be clearly stated that it works this way.
+
+ * tasklets use the global thread id, and __tasklet_wakeup_on() must use a
+ global ID as well. It's capital that tinfo[] provides instant access to
+ local/global bits/indexes/arrays
+
+ - tid_bit makes no sense process-wide, so it must be redefined to represent
+ the thread's tid within its group. The name is not much welcome though, but
+ there are 286 of it that are not going to be changed that fast.
+ => now we have ltid and ltid_bit in thread_info. thread-local tid_bit still
+ not changed though. If renamed we must make sure the older one vanishes.
+ Why not rename "ptid, ptid_bit" for the process-wide tid and "gtid,
+ gtid_bit" for the group-wide ones ? This removes the ambiguity on "tid"
+ which is half the time not the one we expect.
+
+ * just like "ti" is the thread_info, we need to have "tg" pointing to the
+ thread_group.
+
+ - other less commonly used elements should be retrieved from ti->xxx. E.g.
+ the thread's local ID.
+
+ - lock debugging must reproduce tgid
+
+ * task profiling must be made per-group (annoying), unless we want to add a
+ per-thread TH_FL_* flag and have the rare places where the bit is changed
+ iterate over all threads if needed. Sounds preferable overall.
+
+ * an offset might be placed in the tgroup so that even with 64 threads max
+ we could have completely separate tid_bits over several groups.
+ => base and count now
+
+2021-09-15 - bind + listen() + rx
+==========
+
+ - thread_mask (in bind_conf->rx_settings) should become an array of
+ MAX_TGROUP longs.
+ - when parsing "thread 123" or "thread 2/37", the proper bit is set,
+ assuming the array is either a contiguous bitfield or a tgroup array.
+ An option RX_O_THR_PER_GRP or RX_O_THR_PER_PROC is set depending on
+ how the thread num was parsed, so that we reject mixes.
+ - end of parsing: entries translated to the cleanest form (to be determined)
+ - binding: for each socket()/bind()/listen()... just perform one extra dup()
+ for each tgroup and store the multiple FDs into an FD array indexed on
+ MAX_TGROUP. => allows to use one FD per tgroup for the same socket, hence
+ to have multiple entries in all tgroup pollers without requiring the user
+ to duplicate the bind line.
+
+2021-09-15 - global thread masks
+==========
+
+Some global variables currently expect to know about thread IDs and it's
+uncertain what must be done with them:
+ - global_tasks_mask /* Mask of threads with tasks in the global runqueue */
+ => touched under the rq lock. Change it per-group ? What exact use is made ?
+
+ - sleeping_thread_mask /* Threads that are about to sleep in poll() */
+ => seems that it can be made per group
+
+ - all_threads_mask: a bit complicated, derived from nbthread and used with
+ masks and with my_ffsl() to wake threads up. Should probably be per-group
+ but we might miss something for global.
+
+ - stopping_thread_mask: used in combination with all_threads_mask, should
+ move per-group.
+
+ - threads_harmless_mask: indicates all threads that are currently harmless in
+ that they promise not to access a shared resource. Must be made per-group
+ but then we'll likely need a second stage to have the harmless groups mask.
+ threads_idle_mask, threads_sync_mask, threads_want_rdv_mask go with the one
+ above. Maybe the right approach will be to request harmless on a group mask
+ so that we can detect collisions and arbiter them like today, but on top of
+ this it becomes possible to request harmless only on the local group if
+ desired. The subtlety is that requesting harmless at the group level does
+ not mean it's achieved since the requester cannot vouch for the other ones
+ in the same group.
+
+In addition, some variables are related to the global runqueue:
+ __decl_aligned_spinlock(rq_lock); /* spin lock related to run queue */
+ struct eb_root rqueue; /* tree constituting the global run queue, accessed under rq_lock */
+ unsigned int grq_total; /* total number of entries in the global run queue, atomic */
+ static unsigned int global_rqueue_ticks; /* insertion count in the grq, use rq_lock */
+
+And others to the global wait queue:
+ struct eb_root timers; /* sorted timers tree, global, accessed under wq_lock */
+ __decl_aligned_rwlock(wq_lock); /* RW lock related to the wait queue */
+ struct eb_root timers; /* sorted timers tree, global, accessed under wq_lock */
+
+
+2022-06-14 - progress on task affinity
+==========
+
+The particularity of the current global run queue is to be usable for remote
+wakeups because it's protected by a lock. There is no need for a global run
+queue beyond this, and there could already be a locked queue per thread for
+remote wakeups, with a random selection at wakeup time. It's just that picking
+a pending task in a run queue among a number is convenient (though it
+introduces some excessive locking). A task will either be tied to a single
+group or will be allowed to run on any group. As such it's pretty clear that we
+don't need a global run queue. When a run-anywhere task expires, either it runs
+on the current group's runqueue with any thread, or a target thread is selected
+during the wakeup and it's directly assigned.
+
+A global wait queue seems important for scheduled repetitive tasks however. But
+maybe it's more a task for a cron-like job and there's no need for the task
+itself to wake up anywhere, because once the task wakes up, it must be tied to
+one (or a set of) thread(s). One difficulty if the task is temporarily assigned
+a thread group is that it's impossible to know where it's running when trying
+to perform a second wakeup or when trying to kill it. Maybe we'll need to have
+two tgid for a task (desired, effective). Or maybe we can restrict the ability
+of such a task to stay in wait queue in case of wakeup, though that sounds
+difficult. Other approaches would be to set the GID to the current one when
+waking up the task, and to have a flag (or sign on the GID) indicating that the
+task is still queued in the global timers queue. We already have TASK_SHARED_WQ
+so it seems that antoher similar flag such as TASK_WAKE_ANYWHERE could make
+sense. But when is TASK_SHARED_WQ really used, except for the "anywhere" case ?
+All calls to task_new() use either 1<<thr, tid_bit, all_threads_mask, or come
+from appctx_new which does exactly the same. The only real user of non-global,
+non-unique task_new() call is debug_parse_cli_sched() which purposely allows to
+use an arbitrary mask.
+
+ +----------------------------------------------------------------------------+
+ | => we don't need one WQ per group, only a global and N local ones, hence |
+ | the TASK_SHARED_WQ flag can continue to be used for this purpose. |
+ +----------------------------------------------------------------------------+
+
+Having TASK_SHARED_WQ should indicate that a task will always be queued to the
+shared queue and will always have a temporary gid and thread mask in the run
+queue.
+
+Going further, as we don't have any single case of a task bound to a small set
+of threads, we could decide to wake up only expired tasks for ourselves by
+looking them up using eb32sc and adopting them. Thus, there's no more need for
+a shared runqueue nor a global_runqueue_ticks counter, and we can simply have
+the ability to wake up a remote task. The task's thread_mask will then change
+so that it's only a thread ID, except when the task has TASK_SHARED_WQ, in
+which case it corresponds to the running thread. That's very close to what is
+already done with tasklets in fact.
+
+
+2021-09-29 - group designation and masks
+==========
+
+Neither FDs nor tasks will belong to incomplete subsets of threads spanning
+over multiple thread groups. In addition there may be a difference between
+configuration and operation (for FDs). This allows to fix the following rules:
+
+ group mask description
+ 0 0 bind_conf: groups & thread not set. bind to any/all
+ task: it would be nice to mean "run on the same as the caller".
+
+ 0 xxx bind_conf: thread set but not group: thread IDs are global
+ FD/task: group 0, mask xxx
+
+ G>0 0 bind_conf: only group is set: bind to all threads of group G
+ FD/task: mask 0 not permitted (= not owned). May be used to
+ mention "any thread of this group", though already covered by
+ G/xxx like today.
+
+ G>0 xxx bind_conf: Bind to these threads of this group
+ FD/task: group G, mask xxx
+
+It looks like keeping groups starting at zero internally complicates everything
+though. But forcing it to start at 1 might also require that we rescan all tasks
+to replace 0 with 1 upon startup. This would also allow group 0 to be special and
+be used as the default group for any new thread creation, so that group0.count
+would keep the number of unassigned threads. Let's try:
+
+ group mask description
+ 0 0 bind_conf: groups & thread not set. bind to any/all
+ task: "run on the same group & thread as the caller".
+
+ 0 xxx bind_conf: thread set but not group: thread IDs are global
+ FD/task: invalid. Or maybe for a task we could use this to
+ mean "run on current group, thread XXX", which would cover
+ the need for health checks (g/t 0/0 while sleeping, 0/xxx
+ while running) and have wake_expired_tasks() detect 0/0 and
+ wake them up to a random group.
+
+ G>0 0 bind_conf: only group is set: bind to all threads of group G
+ FD/task: mask 0 not permitted (= not owned). May be used to
+ mention "any thread of this group", though already covered by
+ G/xxx like today.
+
+ G>0 xxx bind_conf: Bind to these threads of this group
+ FD/task: group G, mask xxx
+
+With a single group declared in the config, group 0 would implicitly find the
+first one.
+
+
+The problem with the approach above is that a task queued in one group+thread's
+wait queue could very well receive a signal from another thread and/or group,
+and that there is no indication about where the task is queued, nor how to
+dequeue it. Thus it seems that it's up to the application itself to unbind/
+rebind a task. This contradicts the principle of leaving a task waiting in a
+wait queue and waking it anywhere.
+
+Another possibility might be to decide that a task having a defined group but
+a mask of zero is shared and will always be queued into its group's wait queue.
+However, upon expiry, the scheduler would notice the thread-mask 0 and would
+broadcast it to any group.
+
+Right now in the code we have:
+ - 18 calls of task_new(tid_bit)
+ - 17 calls of task_new_anywhere()
+ - 2 calls with a single bit
+
+Thus it looks like "task_new_anywhere()", "task_new_on()" and
+"task_new_here()" would be sufficient.
diff --git a/doc/gpl.txt b/doc/gpl.txt
new file mode 100644
index 0000000..f90922e
--- /dev/null
+++ b/doc/gpl.txt
@@ -0,0 +1,340 @@
+ GNU GENERAL PUBLIC LICENSE
+ Version 2, June 1991
+
+ Copyright (C) 1989, 1991 Free Software Foundation, Inc.
+ 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+ Preamble
+
+ The licenses for most software are designed to take away your
+freedom to share and change it. By contrast, the GNU General Public
+License is intended to guarantee your freedom to share and change free
+software--to make sure the software is free for all its users. This
+General Public License applies to most of the Free Software
+Foundation's software and to any other program whose authors commit to
+using it. (Some other Free Software Foundation software is covered by
+the GNU Lesser General Public License instead.) You can apply it to
+your programs, too.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+this service if you wish), that you receive source code or can get it
+if you want it, that you can change the software or use pieces of it
+in new free programs; and that you know you can do these things.
+
+ To protect your rights, we need to make restrictions that forbid
+anyone to deny you these rights or to ask you to surrender the rights.
+These restrictions translate to certain responsibilities for you if you
+distribute copies of the software, or if you modify it.
+
+ For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must give the recipients all the rights that
+you have. You must make sure that they, too, receive or can get the
+source code. And you must show them these terms so they know their
+rights.
+
+ We protect your rights with two steps: (1) copyright the software, and
+(2) offer you this license which gives you legal permission to copy,
+distribute and/or modify the software.
+
+ Also, for each author's protection and ours, we want to make certain
+that everyone understands that there is no warranty for this free
+software. If the software is modified by someone else and passed on, we
+want its recipients to know that what they have is not the original, so
+that any problems introduced by others will not reflect on the original
+authors' reputations.
+
+ Finally, any free program is threatened constantly by software
+patents. We wish to avoid the danger that redistributors of a free
+program will individually obtain patent licenses, in effect making the
+program proprietary. To prevent this, we have made it clear that any
+patent must be licensed for everyone's free use or not licensed at all.
+
+ The precise terms and conditions for copying, distribution and
+modification follow.
+
+ GNU GENERAL PUBLIC LICENSE
+ TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+ 0. This License applies to any program or other work which contains
+a notice placed by the copyright holder saying it may be distributed
+under the terms of this General Public License. The "Program", below,
+refers to any such program or work, and a "work based on the Program"
+means either the Program or any derivative work under copyright law:
+that is to say, a work containing the Program or a portion of it,
+either verbatim or with modifications and/or translated into another
+language. (Hereinafter, translation is included without limitation in
+the term "modification".) Each licensee is addressed as "you".
+
+Activities other than copying, distribution and modification are not
+covered by this License; they are outside its scope. The act of
+running the Program is not restricted, and the output from the Program
+is covered only if its contents constitute a work based on the
+Program (independent of having been made by running the Program).
+Whether that is true depends on what the Program does.
+
+ 1. You may copy and distribute verbatim copies of the Program's
+source code as you receive it, in any medium, provided that you
+conspicuously and appropriately publish on each copy an appropriate
+copyright notice and disclaimer of warranty; keep intact all the
+notices that refer to this License and to the absence of any warranty;
+and give any other recipients of the Program a copy of this License
+along with the Program.
+
+You may charge a fee for the physical act of transferring a copy, and
+you may at your option offer warranty protection in exchange for a fee.
+
+ 2. You may modify your copy or copies of the Program or any portion
+of it, thus forming a work based on the Program, and copy and
+distribute such modifications or work under the terms of Section 1
+above, provided that you also meet all of these conditions:
+
+ a) You must cause the modified files to carry prominent notices
+ stating that you changed the files and the date of any change.
+
+ b) You must cause any work that you distribute or publish, that in
+ whole or in part contains or is derived from the Program or any
+ part thereof, to be licensed as a whole at no charge to all third
+ parties under the terms of this License.
+
+ c) If the modified program normally reads commands interactively
+ when run, you must cause it, when started running for such
+ interactive use in the most ordinary way, to print or display an
+ announcement including an appropriate copyright notice and a
+ notice that there is no warranty (or else, saying that you provide
+ a warranty) and that users may redistribute the program under
+ these conditions, and telling the user how to view a copy of this
+ License. (Exception: if the Program itself is interactive but
+ does not normally print such an announcement, your work based on
+ the Program is not required to print an announcement.)
+
+These requirements apply to the modified work as a whole. If
+identifiable sections of that work are not derived from the Program,
+and can be reasonably considered independent and separate works in
+themselves, then this License, and its terms, do not apply to those
+sections when you distribute them as separate works. But when you
+distribute the same sections as part of a whole which is a work based
+on the Program, the distribution of the whole must be on the terms of
+this License, whose permissions for other licensees extend to the
+entire whole, and thus to each and every part regardless of who wrote it.
+
+Thus, it is not the intent of this section to claim rights or contest
+your rights to work written entirely by you; rather, the intent is to
+exercise the right to control the distribution of derivative or
+collective works based on the Program.
+
+In addition, mere aggregation of another work not based on the Program
+with the Program (or with a work based on the Program) on a volume of
+a storage or distribution medium does not bring the other work under
+the scope of this License.
+
+ 3. You may copy and distribute the Program (or a work based on it,
+under Section 2) in object code or executable form under the terms of
+Sections 1 and 2 above provided that you also do one of the following:
+
+ a) Accompany it with the complete corresponding machine-readable
+ source code, which must be distributed under the terms of Sections
+ 1 and 2 above on a medium customarily used for software interchange; or,
+
+ b) Accompany it with a written offer, valid for at least three
+ years, to give any third party, for a charge no more than your
+ cost of physically performing source distribution, a complete
+ machine-readable copy of the corresponding source code, to be
+ distributed under the terms of Sections 1 and 2 above on a medium
+ customarily used for software interchange; or,
+
+ c) Accompany it with the information you received as to the offer
+ to distribute corresponding source code. (This alternative is
+ allowed only for noncommercial distribution and only if you
+ received the program in object code or executable form with such
+ an offer, in accord with Subsection b above.)
+
+The source code for a work means the preferred form of the work for
+making modifications to it. For an executable work, complete source
+code means all the source code for all modules it contains, plus any
+associated interface definition files, plus the scripts used to
+control compilation and installation of the executable. However, as a
+special exception, the source code distributed need not include
+anything that is normally distributed (in either source or binary
+form) with the major components (compiler, kernel, and so on) of the
+operating system on which the executable runs, unless that component
+itself accompanies the executable.
+
+If distribution of executable or object code is made by offering
+access to copy from a designated place, then offering equivalent
+access to copy the source code from the same place counts as
+distribution of the source code, even though third parties are not
+compelled to copy the source along with the object code.
+
+ 4. You may not copy, modify, sublicense, or distribute the Program
+except as expressly provided under this License. Any attempt
+otherwise to copy, modify, sublicense or distribute the Program is
+void, and will automatically terminate your rights under this License.
+However, parties who have received copies, or rights, from you under
+this License will not have their licenses terminated so long as such
+parties remain in full compliance.
+
+ 5. You are not required to accept this License, since you have not
+signed it. However, nothing else grants you permission to modify or
+distribute the Program or its derivative works. These actions are
+prohibited by law if you do not accept this License. Therefore, by
+modifying or distributing the Program (or any work based on the
+Program), you indicate your acceptance of this License to do so, and
+all its terms and conditions for copying, distributing or modifying
+the Program or works based on it.
+
+ 6. Each time you redistribute the Program (or any work based on the
+Program), the recipient automatically receives a license from the
+original licensor to copy, distribute or modify the Program subject to
+these terms and conditions. You may not impose any further
+restrictions on the recipients' exercise of the rights granted herein.
+You are not responsible for enforcing compliance by third parties to
+this License.
+
+ 7. If, as a consequence of a court judgment or allegation of patent
+infringement or for any other reason (not limited to patent issues),
+conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot
+distribute so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you
+may not distribute the Program at all. For example, if a patent
+license would not permit royalty-free redistribution of the Program by
+all those who receive copies directly or indirectly through you, then
+the only way you could satisfy both it and this License would be to
+refrain entirely from distribution of the Program.
+
+If any portion of this section is held invalid or unenforceable under
+any particular circumstance, the balance of the section is intended to
+apply and the section as a whole is intended to apply in other
+circumstances.
+
+It is not the purpose of this section to induce you to infringe any
+patents or other property right claims or to contest validity of any
+such claims; this section has the sole purpose of protecting the
+integrity of the free software distribution system, which is
+implemented by public license practices. Many people have made
+generous contributions to the wide range of software distributed
+through that system in reliance on consistent application of that
+system; it is up to the author/donor to decide if he or she is willing
+to distribute software through any other system and a licensee cannot
+impose that choice.
+
+This section is intended to make thoroughly clear what is believed to
+be a consequence of the rest of this License.
+
+ 8. If the distribution and/or use of the Program is restricted in
+certain countries either by patents or by copyrighted interfaces, the
+original copyright holder who places the Program under this License
+may add an explicit geographical distribution limitation excluding
+those countries, so that distribution is permitted only in or among
+countries not thus excluded. In such case, this License incorporates
+the limitation as if written in the body of this License.
+
+ 9. The Free Software Foundation may publish revised and/or new versions
+of the General Public License from time to time. Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+Each version is given a distinguishing version number. If the Program
+specifies a version number of this License which applies to it and "any
+later version", you have the option of following the terms and conditions
+either of that version or of any later version published by the Free
+Software Foundation. If the Program does not specify a version number of
+this License, you may choose any version ever published by the Free Software
+Foundation.
+
+ 10. If you wish to incorporate parts of the Program into other free
+programs whose distribution conditions are different, write to the author
+to ask for permission. For software which is copyrighted by the Free
+Software Foundation, write to the Free Software Foundation; we sometimes
+make exceptions for this. Our decision will be guided by the two goals
+of preserving the free status of all derivatives of our free software and
+of promoting the sharing and reuse of software generally.
+
+ NO WARRANTY
+
+ 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
+FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN
+OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
+PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
+OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS
+TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE
+PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
+REPAIR OR CORRECTION.
+
+ 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
+REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
+INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
+OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
+TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
+YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
+PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGES.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Programs
+
+ If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+ To do so, attach the following notices to the program. It is safest
+to attach them to the start of each source file to most effectively
+convey the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+ <one line to give the program's name and a brief idea of what it does.>
+ Copyright (C) <year> <name of author>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+
+Also add information on how to contact you by electronic and paper mail.
+
+If the program is interactive, make it output a short notice like this
+when it starts in an interactive mode:
+
+ Gnomovision version 69, Copyright (C) year name of author
+ Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+ This is free software, and you are welcome to redistribute it
+ under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License. Of course, the commands you use may
+be called something other than `show w' and `show c'; they could even be
+mouse-clicks or menu items--whatever suits your program.
+
+You should also get your employer (if you work as a programmer) or your
+school, if any, to sign a "copyright disclaimer" for the program, if
+necessary. Here is a sample; alter the names:
+
+ Yoyodyne, Inc., hereby disclaims all copyright interest in the program
+ `Gnomovision' (which makes passes at compilers) written by James Hacker.
+
+ <signature of Ty Coon>, 1 April 1989
+ Ty Coon, President of Vice
+
+This General Public License does not permit incorporating your program into
+proprietary programs. If your program is a subroutine library, you may
+consider it more useful to permit linking proprietary applications with the
+library. If this is what you want to do, use the GNU Lesser General
+Public License instead of this License.
diff --git a/doc/haproxy.1 b/doc/haproxy.1
new file mode 100644
index 0000000..4c2d786
--- /dev/null
+++ b/doc/haproxy.1
@@ -0,0 +1,227 @@
+.TH HAPROXY 1 "17 August 2007"
+
+.SH NAME
+
+HAProxy \- fast and reliable http reverse proxy and load balancer
+
+.SH SYNOPSIS
+
+haproxy \-f <configuration\ file|dir> [\-L\ <name>] [\-n\ maxconn] [\-N\ maxconn] [\-C\ <dir>] [\-v|\-vv] [\-d] [\-D] [\-W] [\-Ws] [\-q] [\-V] [\-c] [\-p\ <pidfile>] [\-dk] [\-ds] [\-de] [\-dp] [\-db] [\-dM[<byte>]] [\-m\ <megs>] [\-x <unix_socket>] [{\-sf|\-st}\ pidlist...]
+
+.SH DESCRIPTION
+
+HAProxy is a TCP/HTTP reverse proxy which is particularly suited for
+high availability environments. Indeed, it can:
+ \- route HTTP requests depending on statically assigned cookies ;
+ \- spread the load among several servers while assuring server
+ persistence through the use of HTTP cookies ;
+ \- switch to backup servers in the event a main one fails ;
+ \- accept connections to special ports dedicated to service
+ monitoring ;
+ \- stop accepting connections without breaking existing ones ;
+ \- add/modify/delete HTTP headers both ways ;
+ \- block requests matching a particular pattern ;
+ \- hold clients to the right application server depending on
+ application cookies
+ \- report detailed status as HTML pages to authenticated users from an
+ URI intercepted from the application.
+
+It needs very little resource. Its event-driven architecture allows it
+to easily handle thousands of simultaneous connections on hundreds of
+instances without risking the system's stability.
+
+.SH OPTIONS
+
+.TP
+\fB\-f <configuration file|dir>\fP
+Specify configuration file or directory path. If the argument is a directory
+the files (and only files) it contains are added in lexical order (using
+LC_COLLATE=C) ; only non hidden files with ".cfg" extension are added.
+
+.TP
+\fB\-L <name>\fP
+Set the local instance's peer name. Peers are defined in the \fBpeers\fP
+configuration section and used for syncing stick tables between different
+instances. If this option is not specified, the local hostname is used as peer
+name. This name is exported in the $HAPROXY_LOCALPEER environment variable and
+can be used in the configuration file.
+
+.TP
+\fB\-n <maxconn>\fP
+Set the high limit for the total number of simultaneous connections.
+
+.TP
+\fB\-N <maxconn>\fP
+Set the high limit for the per-listener number of simultaneous connections.
+
+.TP
+\fB\-C <dir>\fP
+Change directory to <\fIdir\fP> before loading any files.
+
+.TP
+\fB\-v\fP
+Display HAProxy's version.
+
+.TP
+\fB\-vv\fP
+Display HAProxy's version and all build options.
+
+.TP
+\fB\-d\fP
+Start in foreground with debugging mode enabled.
+When the proxy runs in this mode, it dumps every connections,
+disconnections, timestamps, and HTTP headers to stdout. This should
+NEVER be used in an init script since it will prevent the system from
+starting up.
+
+.TP
+\fB\-D\fP
+Start in daemon mode.
+
+.TP
+\fB\-W\fP
+Start in master-worker mode. Could be used either with foreground or daemon
+mode.
+
+.TP
+\fB\-Ws\fP
+Start in master-worker mode with systemd notify support. It tells systemd when
+the process is ready. This mode forces foreground.
+
+.TP
+\fB\-q\fP
+Disable messages on output.
+
+.TP
+\fB\-V\fP
+Displays messages on output even when \-q or 'quiet' are specified. Some
+information about pollers and config file are displayed during startup.
+
+.TP
+\fB\-c\fP
+Only checks config file and exits with code 0 if no error was found, or
+exits with code 1 if a syntax error was found.
+
+.TP
+\fB\-p <pidfile>\fP
+Ask the process to write down each of its children's pids to this file
+in daemon mode or ask the process to write down its master's pid to
+this file in master-worker mode.
+
+.TP
+\fB\-dk\fP
+Disable use of \fBkqueue\fP(2). \fBkqueue\fP(2) is available only on BSD systems.
+
+.TP
+\fB\-dv\fP
+Disable use of event ports. Event ports are available only on SunOS systems
+derived from Solaris 10 and later (including illumos systems).
+
+.TP
+\fB\-ds\fP
+Disable use of speculative \fBepoll\fP(7). \fBepoll\fP(7) is available only on
+Linux 2.6 and some custom Linux 2.4 systems.
+
+.TP
+\fB\-de\fP
+Disable use of \fBepoll\fP(7). \fBepoll\fP(7) is available only on Linux 2.6
+and some custom Linux 2.4 systems.
+
+.TP
+\fB\-dp\fP
+Disables use of \fBpoll\fP(2). \fBselect\fP(2) might be used instead.
+
+.TP
+\fB\-dS\fP
+Disables use of \fBsplice\fP(2), which is broken on older kernels.
+
+.TP
+\fB\-db\fP
+Disables background mode (stays in foreground, useful for debugging).
+For debugging, the '\-db' option is very useful as it temporarily
+disables daemon mode and multi-process mode. The service can then be
+stopped by simply pressing Ctrl-C, without having to edit the config nor
+run full debug.
+
+.TP
+\fB\-dM[<byte>]\fP
+Initializes all allocated memory areas with the given <\fIbyte\fP>. This makes
+it easier to detect bugs resulting from uninitialized memory accesses, at the
+expense of touching all allocated memory once. If <\fIbyte\fP> is not
+specified, it defaults to 0x50 (ASCII 'P').
+
+.TP
+\fB\-m <megs>\fP
+Enforce a memory usage limit to a maximum of <megs> megabytes.
+
+.TP
+\fB\-sf <pidlist>\fP
+Send FINISH signal to the pids in pidlist after startup. The processes
+which receive this signal will wait for all sessions to finish before
+exiting. This option must be specified last, followed by any number of
+PIDs. Technically speaking, \fBSIGTTOU\fP and \fBSIGUSR1\fP are sent.
+
+.TP
+\fB\-st <pidlist>\fP
+Send TERMINATE signal to the pids in pidlist after startup. The processes
+which receive this signal will terminate immediately, closing all active
+sessions. This option must be specified last, followed by any number of
+PIDs. Technically speaking, \fBSIGTTOU\fP and \fBSIGTERM\fP are sent.
+
+.TP
+\f8\-x <unix_socket>\fP
+Attempt to connect to the unix socket, and retrieve all the listening sockets
+from the old process. Those sockets will then be used if possible instead of
+binding new ones.
+
+.TP
+\fB\-S <bind>[,<bind options>...]\fP
+In master-worker mode, create a master CLI. This CLI will enable access to the
+CLI of every worker. Useful for debugging, it's a convenient way of accessing a
+leaving process.
+
+.SH LOGGING
+Since HAProxy can run inside a chroot, it cannot reliably access /dev/log.
+For this reason, it uses the UDP protocol to send its logs to the server,
+even if it is the local server. People who experience trouble receiving
+logs should ensure that their syslog daemon listens to the UDP socket.
+Several Linux distributions which ship with syslogd from the sysklogd
+package have UDP disabled by default. The \fB\-r\fP option must be passed
+to the daemon in order to enable UDP.
+
+.SH SIGNALS
+Some signals have a special meaning for the haproxy daemon. Generally, they are used between daemons and need not be used by the administrator.
+.TP
+\- \fBSIGUSR1\fP
+Tells the daemon to stop all proxies and exit once all sessions are closed. It is often referred to as the "soft-stop" signal.
+.TP
+\- \fBSIGUSR2\fP
+In master-worker mode, reloads the configuration and sends a soft-stop signal to old processes.
+.TP
+\- \fBSIGTTOU\fP
+Tells the daemon to stop listening to all sockets. Used internally by \fB\-sf\fP and \fB\-st\fP.
+.TP
+\- \fBSIGTTIN\fP
+Tells the daemon to restart listening to all sockets after a \fBSIGTTOU\fP. Used internally when there was a problem during hot reconfiguration.
+.TP
+\- \fBSIGINT\fP and \fBSIGTERM\fP
+Both signals can be used to quickly stop the daemon.
+.TP
+\- \fBSIGHUP\fP
+Dumps the status of all proxies and servers into the logs. Mostly used for trouble-shooting purposes.
+.TP
+\- \fBSIGQUIT\fP
+Dumps information about memory pools on stderr. Mostly used for debugging purposes.
+.TP
+\- \fBSIGPIPE\fP
+This signal is intercepted and ignored on systems without \fBMSG_NOSIGNAL\fP.
+
+.SH SEE ALSO
+
+A much better documentation can be found in configuration.txt. On Debian
+systems, you can find this file in /usr/share/doc/haproxy/configuration.txt.gz.
+
+.SH AUTHOR
+
+HAProxy was written by Willy Tarreau. This man page was written by Arnaud Cornet and Willy Tarreau.
+
diff --git a/doc/internals/acl.txt b/doc/internals/acl.txt
new file mode 100644
index 0000000..0379331
--- /dev/null
+++ b/doc/internals/acl.txt
@@ -0,0 +1,82 @@
+2011/12/16 - How ACLs work internally in haproxy - w@1wt.eu
+
+An ACL is declared by the keyword "acl" followed by a name, followed by a
+matching method, followed by one or multiple pattern values :
+
+ acl internal src 127.0.0.0/8 10.0.0.0/8 192.168.0.0/16
+
+In the statement above, "internal" is the ACL's name (acl->name), "src" is the
+ACL keyword defining the matching method (acl_expr->kw) and the IP addresses
+are patterns of type acl_pattern to match against the source address.
+
+The acl_pattern struct may define one single pattern, a range of values or a
+tree of values to match against. The type of the patterns is implied by the
+ACL keyword. For instance, the "src" keyword implies IPv4 patterns.
+
+The line above constitutes an ACL expression (acl_expr). ACL expressions are
+formed of a keyword, an optional argument for the keyword, and a list of
+patterns (in fact, both a list and a root tree).
+
+Dynamic values are extracted according to a fetch function defined by the ACL
+keyword. This fetch function fills or updates a struct acl_test with all the
+extracted information so that a match function can compare it against all the
+patterns. The fetch function is called iteratively by the ACL engine until it
+reports no more value. This makes sense for instance when checking IP addresses
+found in HTTP headers, which can appear multiple times. The acl_test is kept
+intact between calls and even holds a context so that the fetch function knows
+where to start from for subsequent calls. The match function may also use the
+context even though it was not designed for that purpose.
+
+An ACL is defined only by its name and can be a series of ACL expressions. The
+ACL is deemed true when any of its expressions is true. They are evaluated in
+the declared order and can involve multiple matching methods.
+
+So in summary :
+
+ - an ACL is a series of tests to perform on a stream, any of which is enough
+ to validate the result.
+
+ - each test is defined by an expression associating a keyword and a series of
+ patterns.
+
+ - a keyword implies several things at once :
+ - the type of the patterns and how to parse them
+ - the method to fetch the required information from the stream
+ - the method to match the fetched information against the patterns
+
+ - a fetch function fills an acl_test struct which is passed to the match
+ function defined by the keyword
+
+ - the match function tries to match the value in the acl_test against the
+ pattern list declared in the expression which involved its acl_keyword.
+
+
+ACLs are used by conditional processing rules. A rule generally uses an "if" or
+"unless" keyword followed by an ACL condition (acl_cond). This condition is a
+series of term suites which are ORed together. Each term suite is a series of
+terms which are ANDed together. Terms may be negated before being evaluated in
+a suite. A term simply is a pointer to an ACL.
+
+We could then represent a rule by the following BNF :
+
+ rule = if-cond
+ | unless-cond
+
+ if-cond (struct acl_cond with ->pol = ACL_COND_IF)
+ = "if" condition
+
+ unless-cond (struct acl_cond with ->pol = ACL_COND_UNLESS)
+ = "unless" condition
+
+ condition
+ = term-suite
+ | term-suite "||" term-suite
+ | term-suite "or" term-suite
+
+ term-suite (struct acl_term_suite)
+ = term
+ | term term
+
+ term = acl
+ | "!" acl
+
diff --git a/doc/internals/api/appctx.txt b/doc/internals/api/appctx.txt
new file mode 100644
index 0000000..137ec7b
--- /dev/null
+++ b/doc/internals/api/appctx.txt
@@ -0,0 +1,142 @@
+Instantiation of applet contexts (appctx) in 2.6.
+
+
+1. Background
+
+Most applets are in fact simplified services that are called by the CLI when a
+registered keyword is matched. Some of them only have a ->parse() function
+which immediately returns with a final result, while others will return zero
+asking for the->io_handler() one to be called till the end. For these ones, a
+context is generally needed between calls to know where to restart from.
+
+Other applets are completely autonomous applets with their init function and
+an I/O handler, and these ones also need a persistent context between calls to
+the I/O handler. These ones are typically instantiated by "use-service" or by
+other means.
+
+Originally a few integers were provided to keep a trivial state (st0, st1, st2)
+and these ones progressively proved insufficient, leading to a "ctx.cli" sub-
+context that was allowed to use extra fields of various types. Other applets
+preferred to use their own context definition.
+
+All this resulted in the appctx->ctx to contain a myriad of definitions of
+various service contexts, and in some services abusing other services'
+definitions by laziness, and others being extended to use their own definition
+after having run for a long time on the generic types, some of which were not
+noticed and mistakenly used the same storage locations by accident. A massive
+cleanup was needed.
+
+
+2. New approach in 2.6
+
+In 2.6, there's an "svcctx" pointer that's initialized to NULL before any
+instantiation of an applet or of a CLI keyword's function. Applets and keyword
+handlers are free to make it point wherever they want, and to find it unaltered
+between subsequent calls, including up to the ->release() call. The "st2" state
+that was totally abused with random enums is not used anymore and was marked as
+deprecated. It's still initialized to zero before the first call though.
+
+One special area, "svc.storage[]", is large enough to contain any of the
+contexts that used to be present under "appctx->ctx". The "svcctx" may be set
+to point to this area so that a small structure can be allocated for free and
+without requiring error checking. In order to make this easier, a specially
+purposed function is provided: "applet_reserve_svcctx()". This function will
+require the caller to indicate how large an area it needs, and will return a
+pointer to this area after checking that it fits. If it does not, haproxy will
+crash. This is purposely done so that it's known during development that if a
+small structure doesn't fit, a different approach is required.
+
+As such, for the vast majority of commands, the process is the following one:
+
+ struct foo_ctx {
+ int myfield1;
+ int myfield2;
+ char *myfield3;
+ };
+
+ int io_handler(struct appctx *appctx)
+ {
+ struct foo_ctx *ctx = applet_reserve_svcctx(appctx, sizeof(*ctx));
+
+ if (!ctx->myfield1) {
+ /* first call */
+ ctx->myfield1++;
+ }
+ ...
+ }
+
+The pointer may be directly accessed from the I/O handler if it's known that it
+was already reserved by the init handler or parsing function. Otherwise it's
+guaranteed to be NULL so that can also serve as a test for a first call:
+
+ int parse_handler(struct appctx *appctx)
+ {
+ struct foo_ctx *ctx = applet_reserve_svcctx(appctx, sizeof(*ctx));
+ ctx->myfield1 = 12;
+ return 0;
+ }
+
+ int io_handler(struct appctx *appctx)
+ {
+ struct foo_ctx *ctx = appctx->svcctx;
+
+ for (; !ctx->myfield1; ctx->myfield1--) {
+ do_something();
+ }
+ ...
+ }
+
+There is no need to free anything because that space is not allocated but just
+points to a reserved area.
+
+If it is too small (its size is APPLET_MAX_SVCCTX bytes), it is preferable to
+use it with dynamically allocated structures (pools, malloc, etc). For example:
+
+ int io_handler(struct appctx *appctx)
+ {
+ struct foo_ctx *ctx = appctx->svcctx;
+
+ if (!ctx) {
+ /* first call */
+ ctx = pool_alloc(pool_foo_ctx);
+ if (!ctx)
+ return 1;
+ }
+ ...
+ }
+
+ void io_release(struct appctx *appctx)
+ {
+ pool_free(pool_foo_ctx, appctx->svcctx);
+ }
+
+The CLI code itself uses this mechanism for the cli_print_*() functions. Since
+these functions are terminal (i.e. not meant to be used in the middle of an I/O
+handler as they share the same contextual space), they always reset the svcctx
+pointer to place it to the "cli_print_ctx" mapped in ->svc.storage.
+
+
+3. Transition for old code
+
+A lot of care was taken to make the transition as smooth as possible for
+out-of-tree code since that's an API change. A dummy "ctx.cli" struct still
+exists in the appctx struct, and it happens to map perfectly to the one set by
+cli_print_*, so that if some code uses a mix of both, it will still work.
+However, it will build with "deprecated" warnings allowing to spot the
+remaining places. It's a good exercise to rename "ctx.cli" in "appctx" and see
+if the code still compiles.
+
+Regarding the "st2" sub-state, it will disappear as well after 2.6, but is
+still provided and initialized so that code relying on it will still work even
+if it builds with deprecation warnings. The correct approach is to move this
+state into the newly defined applet's context, and to stop using the stats
+enums STAT_ST_* that often barely match the needs and result in code that is
+more complicated than desired (the STAT_ST_* enum values have also been marked
+as deprecated).
+
+The code dealing with "show fd", "show sess" and the peers applet show good
+examples of how to convert a registered keyword or an applet.
+
+All this transition code requires complex layouts that will be removed during
+2.7-dev so there is no other long-term option but to update the code (or better
+get it merged if it can be useful to other users).
diff --git a/doc/internals/api/buffer-api.txt b/doc/internals/api/buffer-api.txt
new file mode 100644
index 0000000..ac35300
--- /dev/null
+++ b/doc/internals/api/buffer-api.txt
@@ -0,0 +1,653 @@
+2018-07-13 - HAProxy Internal Buffer API
+
+
+1. Background
+
+HAProxy uses a "struct buffer" internally to store data received from external
+agents, as well as data to be sent to external agents. These buffers are also
+used during data transformation such as compression, header insertion or
+defragmentation, and are used to carry intermediary representations between the
+various internal layers. They support wrapping at the end, and they carry their
+own size information so that in theory it would be possible to use different
+buffer sizes in parallel even though this is not currently implemented.
+
+The format of this structure has evolved over time, to reach a point where it
+is convenient and versatile enough to have permitted to make several internal
+types converge into a single one (specifically the struct chunk disappeared).
+
+
+2. Representation as of 1.9-dev1
+
+The current buffer representation consists in a linear storage area of known
+size, with a head position indicating the oldest data, and a total data count
+expressed in bytes. The head position, data count and size are expressed as
+integers and are positive or null. By convention, the head position is strictly
+smaller than the buffer size and the data count is smaller than or equal to the
+size, so that wrapping can be resolved with a single subtract. A buffer not
+respecting these rules is said to be degenerate. Unless specified otherwise,
+the various API functions will adopt an undefined behaviour when passed such a
+degenerate buffer.
+
+ Buffer declaration :
+
+ struct buffer {
+ size_t size; // size of the storage area (wrapping point)
+ char *area; // start of the storage area
+ size_t data; // contents length after head
+ size_t head; // start offset of remaining data relative to area
+ };
+
+
+ Linear buffer representation :
+
+ area
+ |
+ V<--------------------------------------------------------->| size
+ +-----------+---------------------------------+-------------+
+ | |/////////////////////////////////| |
+ +-----------+---------------------------------+-------------+
+ |<--------->|<------------------------------->|
+ head data ^
+ |
+ tail
+
+
+ Wrapping buffer representation :
+
+ area
+ |
+ V<--------------------------------------------------------->| size
+ +---------------+------------------------+------------------+
+ |///////////////| |//////////////////|
+ +---------------+------------------------+------------------+
+ |<-------------------------------------->| head
+ |-------------->| ...data data...|<-----------------|
+ ^
+ |
+ tail
+
+
+3. Terminology
+
+Manipulating a buffer just based on a head and a wrapping data count is not
+very convenient, so we define a certain number of terms for important elements
+characterizing a buffer :
+
+ - origin : pointer to relative position 0 in the storage area. Undefined
+ when the buffer is not allocated.
+
+ - size : the allocated size of the storage area starting at the origin,
+ expressed in bytes. A buffer whose size is zero is said not to
+ be allocated, and its origin in this case is undefined.
+
+ - data : the amount of data the buffer contains, in bytes. It is always
+ lower than or equal to the buffer's size, hence it is always 0
+ for an unallocated buffer.
+
+ - emptiness : a buffer is said to be empty when it contains no data, hence
+ data == 0. It is possible for such buffers not to be allocated
+ and to have size == 0 as well.
+
+ - room : the available space in the buffer. This is its size minus data.
+
+ - head : position relative to origin where the oldest data byte is found
+ (it typically is what send() uses to pick outgoing data). The
+ head is strictly smaller than the size.
+
+ - tail : position relative to origin where the first spare byte is found
+ (it typically is what recv() uses to store incoming data). It
+ is always equal to the buffer's data added to its head modulo
+ the buffer's size.
+
+ - wrapping : the byte following the last one of the storage area loops back
+ to position 0. This is called wrapping. The wrapping point is
+ the first position relative to origin which doesn't belong to
+ the storage area. There is no wrapping when a buffer is not
+ allocated. Wrapping requires special care and means that the
+ regular string manipulation functions are not usable on most
+ buffers, unless it is known that no wrapping happens. Free
+ space may wrap as well if the buffer only contains data in the
+ middle.
+
+ - alignment : a buffer is said to be aligned if its data do not wrap. That
+ is, its head is strictly before the tail, or the buffer is
+ empty and the head is null. Aligning a buffer may be required
+ to use regular string manipulation functions which have no
+ support for wrapping.
+
+
+A buffer may be in three different states :
+ - unallocated : size == 0, area == 0 (b_is_null() is true)
+ - waiting : size == 0, area != 0
+ - allocated : size > 0, area > 0
+
+It is not permitted to have area == 0 with a non-null size. In addition, the
+waiting state may also be used to indicate a read-only buffer which does not
+wrap and which must not be freed (e.g. for use with error messages).
+
+The basic API only covers allocated buffers. Switching to/from the other states
+is covered by the management API since it requires specific allocation and free
+calls.
+
+
+4. Using buffers
+
+Buffers are defined in a few files :
+ - include/common/buf.h : structure definition, and manipulation functions
+ - include/common/buffer.h : resource management (alloc/free/wait lists)
+ - include/common/istbuf.h : advanced string manipulation
+
+
+4.1. Basic API
+
+The basic API is made of the functions which abstract accesses to the buffers
+and which help calculating their state, free space or used space.
+
+====================+==================+=======================================
+Function | Arguments/Return | Description
+--------------------+------------------+---------------------------------------
+b_is_null() | const buffer *buf| returns true if (and only if) the
+ | ret: int | buffer is not yet allocated and thus
+ | | points to a NULL area
+--------------------+------------------+---------------------------------------
+b_orig() | const buffer *buf| returns the pointer to the origin of
+ | ret: char * | the storage, which is the location of
+ | | byte at offset zero. This is mostly
+ | | used by functions which handle the
+ | | wrapping by themselves
+--------------------+------------------+---------------------------------------
+b_size() | const buffer *buf| returns the size of the buffer
+ | ret: size_t |
+--------------------+------------------+---------------------------------------
+b_wrap() | const buffer *buf| returns the pointer to the wrapping
+ | ret: char * | position of the buffer area, which is
+ | | by definition the first byte not part
+ | | of the buffer
+--------------------+------------------+---------------------------------------
+b_data() | const buffer *buf| returns the number of bytes present in
+ | ret: size_t | the buffer
+--------------------+------------------+---------------------------------------
+b_room() | const buffer *buf| returns the amount of room left in the
+ | ret: size_t | buffer
+--------------------+------------------+---------------------------------------
+b_full() | const buffer *buf| returns true if the buffer is full
+ | ret: int |
+--------------------+------------------+---------------------------------------
+__b_stop() | const buffer *buf| returns a pointer to the byte
+ | ret: char * | following the end of the buffer, which
+ | | may be out of the buffer if the buffer
+ | | ends on the last byte of the area. It
+ | | is the caller's responsibility to
+ | | either know that the buffer does not
+ | | wrap or to check that the result does
+ | | not wrap
+--------------------+------------------+---------------------------------------
+__b_stop_ofs() | const buffer *buf| returns an origin-relative offset
+ | ret: size_t | pointing to the byte following the end
+ | | of the buffer, which may be out of the
+ | | buffer if the buffer ends on the last
+ | | byte of the area. It's the caller's
+ | | responsibility to either know that the
+ | | buffer does not wrap or to check that
+ | | the result does not wrap
+--------------------+------------------+---------------------------------------
+b_stop() | const buffer *buf| returns the pointer to the byte
+ | ret: char * | following the end of the buffer, which
+ | | may be out of the buffer if the buffer
+ | | ends on the last byte of the area
+--------------------+------------------+---------------------------------------
+b_stop_ofs() | const buffer *buf| returns an origin-relative offset
+ | ret: size_t | pointing to the byte following the end
+ | | of the buffer, which may be out of the
+ | | buffer if the buffer ends on the last
+ | | byte of the area
+--------------------+------------------+---------------------------------------
+__b_peek() | const buffer *buf| returns a pointer to the data at
+ | size_t ofs | position <ofs> relative to the head of
+ | ret: char * | the buffer. Will typically point to
+ | | input data if called with the amount
+ | | of output data. It's the caller's
+ | | responsibility to either know that the
+ | | buffer does not wrap or to check that
+ | | the result does not wrap
+--------------------+------------------+---------------------------------------
+__b_peek_ofs() | const buffer *buf| returns an origin-relative offset
+ | size_t ofs | pointing to the data at position <ofs>
+ | ret: size_t | relative to the head of the
+ | | buffer. Will typically point to input
+ | | data if called with the amount of
+ | | output data. It's the caller's
+ | | responsibility to either know that the
+ | | buffer does not wrap or to check that
+ | | the result does not wrap
+--------------------+------------------+---------------------------------------
+b_peek() | const buffer *buf| returns a pointer to the data at
+ | size_t ofs | position <ofs> relative to the head of
+ | ret: char * | the buffer. Will typically point to
+ | | input data if called with the amount
+ | | of output data. If applying <ofs> to
+ | | the buffers' head results in a
+ | | position between <size> and 2*>size>-1
+ | | included, a wrapping compensation is
+ | | applied to the result
+--------------------+------------------+---------------------------------------
+b_peek_ofs() | const buffer *buf| returns an origin-relative offset
+ | size_t ofs | pointing to the data at position <ofs>
+ | ret: size_t | relative to the head of the
+ | | buffer. Will typically point to input
+ | | data if called with the amount of
+ | | output data. If applying <ofs> to the
+ | | buffers' head results in a position
+ | | between <size> and 2*>size>-1
+ | | included, a wrapping compensation is
+ | | applied to the result
+--------------------+------------------+---------------------------------------
+__b_head() | const buffer *buf| returns the pointer to the buffer's
+ | ret: char * | head, which is the location of the
+ | | next byte to be dequeued. The result
+ | | is undefined for unallocated buffers
+--------------------+------------------+---------------------------------------
+__b_head_ofs() | const buffer *buf| returns an origin-relative offset
+ | ret: size_t | pointing to the buffer's head, which
+ | | is the location of the next byte to be
+ | | dequeued. The result is undefined for
+ | | unallocated buffers
+--------------------+------------------+---------------------------------------
+b_head() | const buffer *buf| returns the pointer to the buffer's
+ | ret: char * | head, which is the location of the
+ | | next byte to be dequeued. The result
+ | | is undefined for unallocated
+ | | buffers. If applying <ofs> to the
+ | | buffers' head results in a position
+ | | between <size> and 2*>size>-1
+ | | included, a wrapping compensation is
+ | | applied to the result
+--------------------+------------------+---------------------------------------
+b_head_ofs() | const buffer *buf| returns an origin-relative offset
+ | ret: size_t | pointing to the buffer's head, which
+ | | is the location of the next byte to be
+ | | dequeued. The result is undefined for
+ | | unallocated buffers. If applying
+ | | <ofs> to the buffers' head results in
+ | | a position between <size> and
+ | | 2*>size>-1 included, a wrapping
+ | | compensation is applied to the result
+--------------------+------------------+---------------------------------------
+__b_tail() | const buffer *buf| returns the pointer to the tail of the
+ | ret: char * | buffer, which is the location of the
+ | | first byte where it is possible to
+ | | enqueue new data. The result is
+ | | undefined for unallocated buffers
+--------------------+------------------+---------------------------------------
+__b_tail_ofs() | const buffer *buf| returns an origin-relative offset
+ | ret: size_t | pointing to the tail of the buffer,
+ | | which is the location of the first
+ | | byte where it is possible to enqueue
+ | | new data. The result is undefined for
+ | | unallocated buffers
+--------------------+------------------+---------------------------------------
+b_tail() | const buffer *buf| returns the pointer to the tail of the
+ | ret: char * | buffer, which is the location of the
+ | | first byte where it is possible to
+ | | enqueue new data. The result is
+ | | undefined for unallocated buffers
+--------------------+------------------+---------------------------------------
+b_tail_ofs() | const buffer *buf| returns an origin-relative offset
+ | ret: size_t | pointing to the tail of the buffer,
+ | | which is the location of the first
+ | | byte where it is possible to enqueue
+ | | new data. The result is undefined for
+ | | unallocated buffers
+--------------------+------------------+---------------------------------------
+b_next() | const buffer *buf| for an absolute pointer <p> pointing
+ | const char *p | to a valid location within buffer <b>,
+ | ret: char * | returns the absolute pointer to the
+ | | next byte, which usually is at (p + 1)
+ | | unless p reaches the wrapping point
+ | | and wrapping is needed
+--------------------+------------------+---------------------------------------
+b_next_ofs() | const buffer *buf| for an origin-relative offset <o>
+ | size_t o | pointing to a valid location within
+ | ret: size_t | buffer <b>, returns either the
+ | | relative offset pointing to the next
+ | | byte, which usually is at (o + 1)
+ | | unless o reaches the wrapping point
+ | | and wrapping is needed
+--------------------+------------------+---------------------------------------
+b_dist() | const buffer *buf| returns the distance between two
+ | const char *from | pointers, taking into account the
+ | const char *to | ability to wrap around the buffer's
+ | ret: size_t | end. The operation is not defined if
+ | | either of the pointers does not belong
+ | | to the buffer or if their distance is
+ | | greater than the buffer's size
+--------------------+------------------+---------------------------------------
+b_almost_full() | const buffer *buf| returns 1 if the buffer uses at least
+ | ret: int | 3/4 of its capacity, otherwise
+ | | zero. Buffers of size zero are
+ | | considered full
+--------------------+------------------+---------------------------------------
+b_space_wraps() | const buffer *buf| returns non-zero only if the buffer's
+ | ret: int | free space wraps, which means that the
+ | | buffer contains data that are not
+ | | touching at least one edge
+--------------------+------------------+---------------------------------------
+b_contig_data() | const buffer *buf| returns the amount of data that can
+ | size_t start | contiguously be read at once starting
+ | ret: size_t | from a relative offset <start> (which
+ | | allows to easily pre-compute blocks
+ | | for memcpy). The start point will
+ | | typically contain the amount of past
+ | | data already returned by a previous
+ | | call to this function
+--------------------+------------------+---------------------------------------
+b_contig_space() | const buffer *buf| returns the amount of bytes that can
+ | ret: size_t | be appended to the buffer at once
+--------------------+------------------+---------------------------------------
+b_getblk() | const buffer *buf| gets one full block of data at once
+ | char *blk | from a buffer, starting from offset
+ | size_t len | <offset> after the buffer's head, and
+ | size_t offset | limited to no more than <len> bytes.
+ | ret: size_t | The caller is responsible for ensuring
+ | | that neither <offset> nor <offset> +
+ | | <len> exceed the total number of bytes
+ | | available in the buffer. Return zero
+ | | if not enough data was available, in
+ | | which case blk is left undefined, or
+ | | the number of bytes read which is
+ | | equal to the requested size
+--------------------+------------------+---------------------------------------
+b_getblk_nc() | const buffer *buf| gets one or two blocks of data at once
+ | const char **blk1| from a buffer, starting from offset
+ | size_t *len1 | <ofs> after the beginning of its
+ | const char **blk2| output, and limited to no more than
+ | size_t *len2 | <max> bytes. The caller is responsible
+ | size_t ofs | for ensuring that neither <ofs> nor
+ | size_t max | <ofs>+<max> exceed the total number of
+ | ret: int | bytes available in the buffer. Returns
+ | | 0 if not enough data were available,
+ | | or the number of blocks filled (1 or
+ | | 2). <blk1> is always filled before
+ | | <blk2>. The unused blocks are left
+ | | undefined, and the buffer is left
+ | | unaffected. Unused buffers are left in
+ | | an undefined state
+--------------------+------------------+---------------------------------------
+b_reset() | buffer *buf | resets a buffer. The size is not
+ | ret: void | touched. In practice it resets the
+ | | head and the data length
+--------------------+------------------+---------------------------------------
+b_sub() | buffer *buf | decreases the buffer length by <count>
+ | size_t count | without touching the head position
+ | ret: void | (only the tail moves). this may mostly
+ | | be used to trim pending data before
+ | | reusing a buffer. The caller is
+ | | responsible for not removing more than
+ | | the available data
+--------------------+------------------+---------------------------------------
+b_add() | buffer *buf | increase the buffer length by <count>
+ | size_t count | without touching the head position
+ | ret: void | (only the tail moves). This is used
+ | | when adding data at the tail of a
+ | | buffer. The caller is responsible for
+ | | not adding more than the available
+ | | room
+--------------------+------------------+---------------------------------------
+b_set_data() | buffer *buf | sets the buffer's length, by adjusting
+ | size_t len | the buffer's tail only. The caller is
+ | ret: void | responsible for passing a valid length
+--------------------+------------------+---------------------------------------
+b_del() | buffer *buf | deletes <del> bytes at the head of
+ | size_t del | buffer <b> and updates the head. The
+ | ret: void | caller is responsible for not removing
+ | | more than the available data. This is
+ | | used after sending data from the
+ | | buffer
+--------------------+------------------+---------------------------------------
+b_realign_if_empty()| buffer *buf | realigns a buffer if it's empty, does
+ | ret: void | nothing otherwise. This is mostly used
+ | | after b_del() to make an empty
+ | | buffer's free space contiguous
+--------------------+------------------+---------------------------------------
+b_slow_realign() | buffer *buf | realigns a possibly wrapping buffer so
+ | size_t output | that the part remaining to be parsed
+ | ret: void | is contiguous and starts at the
+ | | beginning of the buffer and the
+ | | already parsed output part ends at the
+ | | end of the buffer. This provides the
+ | | best conditions since it allows the
+ | | largest inputs to be processed at once
+ | | and ensures that once the output data
+ | | leaves, the whole buffer is available
+ | | at once. The number of output bytes
+ | | supposedly present at the beginning of
+ | | the buffer and which need to be moved
+ | | to the end must be passed in <output>.
+ | | It will effectively make this offset
+ | | the new wrapping point. A temporary
+ | | swap area at least as large as b->size
+ | | must be provided in <swap>. It's up
+ | | to the caller to ensure <output> is no
+ | | larger than the difference between the
+ | | whole buffer's length and its input
+--------------------+------------------+---------------------------------------
+b_putchar() | buffer *buf | tries to append char <c> at the end of
+ | char c | buffer <b>. Supports wrapping. New
+ | ret: void | data are silently discarded if the
+ | | buffer is already full
+--------------------+------------------+---------------------------------------
+b_putblk() | buffer *buf | tries to append block <blk> at the end
+ | const char *blk | of buffer <b>. Supports wrapping. Data
+ | size_t len | are truncated if the buffer is too
+ | ret: size_t | short or if not enough space is
+ | | available. It returns the number of
+ | | bytes really copied
+--------------------+------------------+---------------------------------------
+b_move() | buffer *buf | moves block (src,len) left or right
+ | size_t src | by <shift> bytes, supporting wrapping
+ | size_t len | and overlapping.
+ | size_t shift |
+--------------------+------------------+---------------------------------------
+b_rep_blk() | buffer *buf | writes the block <blk> at position
+ | char *pos | <pos> which must be in buffer <b>, and
+ | char *end | moves the part between <end> and the
+ | const char *blk | buffer's tail just after the end of
+ | size_t len | the copy of <blk>. This effectively
+ | ret: int | replaces the part located between
+ | | <pos> and <end> with a copy of <blk>
+ | | of length <len>. The buffer's length
+ | | is automatically updated. This is used
+ | | to replace a block with another one
+ | | inside a buffer. The shift value
+ | | (positive or negative) is returned. If
+ | | there's no space left, the move is not
+ | | done. If <len> is null, the <blk>
+ | | pointer is allowed to be null, in
+ | | order to erase a block
+--------------------+------------------+---------------------------------------
+b_xfer() | buffer *src | transfers at most <count> bytes from
+ | buffer *dst | buffer <src> to buffer <dst> and
+ | size_t cout | returns the number of bytes copied.
+ | ret: size_t | The bytes are removed from <src> and
+ | | added to <dst>. The caller guarantees
+ | | that <count> is <= b_room(dst)
+====================+==================+=======================================
+
+
+4.2. String API
+
+The string API aims at providing both convenient and efficient ways to read and
+write to/from buffers using indirect strings (ist). These strings and some
+associated functions are defined in ist.h.
+
+====================+==================+=======================================
+Function | Arguments/Return | Description
+--------------------+------------------+---------------------------------------
+b_isteq() | const buffer *b | b_isteq() : returns > 0 if the first
+ | size_t o | <n> characters of buffer <b> starting
+ | size_t n | at offset <o> relative to the buffer's
+ | const ist ist | head match <ist>. (empty strings do
+ | ret: int | match). It is designed to be used with
+ | | reasonably small strings (it matches a
+ | | single byte per loop iteration). It is
+ | | expected to be used with an offset to
+ | | skip old data. Return value number of
+ | | matching bytes if >0, not enough bytes
+ | | or empty string if 0, or non-matching
+ | | byte found if <0.
+--------------------+------------------+---------------------------------------
+b_isteat | struct buffer *b | b_isteat() : "eats" string <ist> from
+ | const ist ist | the head of buffer <b>. Wrapping data
+ | ret: ssize_t | is explicitly supported. It matches a
+ | | single byte per iteration so strings
+ | | should remain reasonably small.
+ | | Returns the number of bytes matched
+ | | and eaten if >0, not enough bytes or
+ | | matched empty string if 0, or non
+ | | matching byte found if <0.
+--------------------+------------------+---------------------------------------
+b_istput | struct buffer *b | b_istput() : injects string <ist> at
+ | const ist ist | the tail of output buffer <b> provided
+ | ret: ssize_t | that it fits. Wrapping is supported.
+ | | It's designed for small strings as it
+ | | only writes a single byte per
+ | | iteration. Returns the number of
+ | | characters copied (ist.len), 0 if it
+ | | temporarily does not fit, or -1 if it
+ | | will never fit. It will only modify
+ | | the buffer upon success. In all cases,
+ | | the contents are copied prior to
+ | | reporting an error, so that the
+ | | destination at least contains a valid
+ | | but truncated string.
+--------------------+------------------+---------------------------------------
+b_putist | struct buffer *b | b_putist() : tries to copy as much as
+ | const ist ist | possible of string <ist> into buffer
+ | ret: size_t | <b> and returns the number of bytes
+ | | copied (truncation is possible). It
+ | | uses b_putblk() and is suitable for
+ | | large blocks.
+====================+==================+=======================================
+
+
+4.3. Management API
+
+The management API makes a distinction between an empty buffer, which by
+definition is not allocated but is ready to be allocated at any time, and a
+buffer which failed an allocation and is waiting for an available area to be
+offered. The functions allow to register on a list to be notified about buffer
+availability, to notify others of a number of buffers just released, and to be
+and to be notified of buffer availability. All allocations are made through the
+standard buffer pools.
+
+====================+==================+=======================================
+Function | Arguments/Return | Description
+--------------------+------------------+---------------------------------------
+buffer_almost_full | const buffer *buf| returns true if the buffer is not null
+ | ret: int | and at least 3/4 of the buffer's space
+ | | are used. A waiting buffer will match.
+--------------------+------------------+---------------------------------------
+b_alloc | buffer *buf | ensures that <buf> is allocated or
+ | ret: buffer * | allocates a buffer and assigns it to
+ | | *buf. If no memory is available, (1)
+ | | is assigned instead with a zero size.
+ | | The allocated buffer is returned, or
+ | | NULL in case no memory is available
+--------------------+------------------+---------------------------------------
+__b_free | buffer *buf | releases <buf> which must be allocated
+ | ret: void | and marks it empty
+--------------------+------------------+---------------------------------------
+b_free | buffer *buf | releases <buf> only if it is allocated
+ | ret: void | and marks it empty
+--------------------+------------------+---------------------------------------
+offer_buffers() | void *from | offer a buffer currently belonging to
+ | uint threshold | target <from> to whoever needs
+ | ret: void | one. Any pointer is valid for <from>,
+ | | including NULL. Its purpose is to
+ | | avoid passing a buffer to oneself in
+ | | case of failed allocations (e.g. need
+ | | two buffers, get one, fail, release it
+ | | and wake up self again). In case of
+ | | normal buffer release where it is
+ | | expected that the caller is not
+ | | waiting for a buffer, NULL is fine
+====================+==================+=======================================
+
+
+5. Porting code from older versions
+
+The previous buffer API introduced in 1.5-dev9 (May 2012) used to look like the
+following (with the struct renamed to old_buffer here to avoid confusion during
+quick lookups at the doc). It's worth noting that the "data" field used to be
+part of the struct but with a different type and meaning. It's important to be
+careful about potential code making use of &b->data as it will silently compile
+but fail.
+
+ Previous buffer declaration :
+
+ struct old_buffer {
+ char *p; /* buffer's start pointer, separates in and out data */
+ unsigned int size; /* buffer size in bytes */
+ unsigned int i; /* number of input bytes pending for analysis in the buffer */
+ unsigned int o; /* number of out bytes the sender can consume from this buffer */
+ char data[0]; /* <size> bytes */
+ };
+
+ Previous linear buffer representation :
+
+ data p
+ | |
+ V V
+ +-----------+--------------------+------------+-------------+
+ | |////////////////////|////////////| |
+ +-----------+--------------------+------------+-------------+
+ <---------------------------------------------------------> size
+ <------------------> <---------->
+ o i
+
+There is this correspondence between old and new fields (some will involve a
+knowledge of a channel when the output byte count is required) :
+
+ Old | New
+ --------+----------------------------------------------------
+ p | data + head + co_data(channel) // ci_head(channel)
+ size | size
+ i | data - co_data(channel) // ci_data(channel)
+ o | co_data(channel) // channel->output
+ data | area
+ --------+-----------------------------------------------------
+
+Then some common expressions can be mapped like this :
+
+ Old | New
+ -----------------------+---------------------------------------
+ b->data | b_orig(b)
+ &b->data | b_orig(b)
+ bi_ptr(b) | ci_head(channel)
+ bi_end(b) | b_tail(b)
+ bo_ptr(b) | b_head(b)
+ bo_end(b) | co_tail(channel)
+ bi_putblk(b,s,l) | b_putblk(b,s,l)
+ bo_getblk(b,s,l,o) | b_getblk(b,s,l,o)
+ bo_getblk_nc(b,s,l,o) | b_getblk_nc(b,s,l,o,0,co_data(channel))
+ b->i + b->o | b_data(b)
+ b->data + b->size | b_wrap(b)
+ b->i += len | b_add(b, len)
+ b->i -= len | b_sub(b, len)
+ b->i = len | b_set_data(b, co_data(channel) + len)
+ b->o += len | b_add(b, len); channel->output += len
+ b->o -= len | b_del(b, len); channel->output -= len
+ -----------------------+---------------------------------------
+
+The buffer modification functions are less straightforward and depend a lot on
+the context where they are used. It is strongly advised to figure in the list
+of functions above what is available based on what is attempted to be done in
+the existing code.
+
+Note that it is very likely that any out-of-tree code relying on buffers will
+not use both ->i and ->o but instead will use exclusively ->i on the side
+producing data and use exclusively ->o on the side consuming data (such as in a
+mux or in an applet). In both cases, it should be assumed that the other side
+is always zero and that either ->i or ->o is replaced with ->data, making the
+remaining code much simpler (no more code duplication based on the data
+direction).
diff --git a/doc/internals/api/event_hdl.txt b/doc/internals/api/event_hdl.txt
new file mode 100644
index 0000000..72eeff8
--- /dev/null
+++ b/doc/internals/api/event_hdl.txt
@@ -0,0 +1,1015 @@
+ -----------------------------------------
+ event_hdl Guide - version 2.8
+ ( Last update: 2022-11-14 )
+ ------------------------------------------
+
+ABSTRACT
+--------
+
+The event_hdl support is a new feature of HAProxy 2.7. It is a way to easily
+handle general events in a simple to maintain fashion, while keeping core code
+impact to the bare minimum.
+
+This document first describes how to use already supported events,
+then how to add support for your very own events.
+
+This feature is quite new for now. The API is not frozen and will be
+updated/modified/improved/extended as needed.
+
+SUMMARY
+-------
+
+ 1. event_hdl introduction
+ 2. How to handle existing events
+ 2.1 SYNC mode
+ 2.2 ASYNC mode
+ 2.2.1 normal version
+ 2.2.2 task version
+ 2.3 Advanced features
+ 2.3.1 sub_mgmt
+ 2.3.2 subscription external lookups
+ 2.3.3 subscription ptr
+ 2.3.4 private_free
+ 3. How to add support for new events
+ 3.1 Declaring a new event data structure
+ 3.2 Publishing an event
+ 4. Subscription lists
+ 5. misc/helper functions
+
+
+1. EVENT_HDL INTRODUCTION
+-----------------------
+
+EVENT_HDL provides two complementary APIs, both are implemented
+in src/event_hdl.c and include/haproxy/event_hdl(-t).h:
+
+One API targeting developers that want to register event
+handlers that will be notified when specific events occur in the process.
+(See section 2.)
+
+One API targeting developers that want to notify registered handlers about
+an event that is happening in the process.
+(See section 3.)
+
+2. HOW TO HANDLE EXISTING EVENTS
+---------------------
+
+To handle existing events, you must first decide which events you're
+interested in.
+
+event types are defined as follow:
+
+```
+ /* type for storing event subscription type */
+ typedef struct event_hdl_sub_type
+ {
+ /* up to 256 families, non cumulative, adjust if needed */
+ uint8_t family;
+ /* up to 16 sub types using bitmasks, adjust if needed */
+ uint16_t subtype;
+ } event_hdl_sub_type;
+```
+
+For an up to date list of already supported events,
+please refer to include/haproxy/event_hdl-t.h
+At the end of the file you will find existing event types.
+
+Each event family provides an unique data structure that will
+be provided to the event handler (registered to one or more
+event subtypes) when such events occur.
+
+An event handler can subscribe to a single event family type at a time, but
+within the family type it can subscribe to multiple event subtypes.
+
+ For example, let's consider the SERVER family type.
+
+ Let's assume it provides the event_hdl_cb_data_server data structure.
+
+ We can register a handler that will be notified for
+ every SERVER event types using:
+ EVENT_HDL_SUB_SERVER
+
+ This will include EVENT_HDL_SUB_SERVER_ADD,
+ EVENT_HDL_SUB_SERVER_DEL [...]
+
+ But we can also subscribe to a specific subtype only,
+ for example server deletion:
+ EVENT_HDL_SUB_SERVER_DEL
+
+ You can even combine multiple SERVER subtypes using
+ event_hdl_sub_type_add function helper:
+ event_hdl_sub_type_add(EVENT_HDL_SUB_SERVER_DEL,
+ EVENT_HDL_SUB_SERVER_ADD)
+
+ (will refer to server deletion as well as server addition)
+
+Registering a handler comes into multiple flavors:
+
+ SYNC mode:
+ handler is called in a blocking manner directly from the
+ thread that publishes the event.
+ This mode should be used with precaution because it could
+ slow the caller or cause deadlocks if used improperly.
+
+ Sync mode is useful when you directly depend on data or
+ state consistency from the caller.
+
+ Sync mode gives you access to unsafe elements in the data structure
+ provided by the caller (again, see event_hdl-t.h for more details).
+ The data structure may provide lock hints in the unsafe section
+ so that you know which locks are already held within the
+ calling context, hopefully preventing you from relocking
+ an already locked element and preventing deadlocks.
+
+ ASYNC mode:
+ handler is called in a non-blocking manner
+ (in a dedicated tasklet),
+ thus, the caller (that published the event) is not affected
+ by the handler. (time wise and data wise)
+
+ This is the safest way to handle events,
+ but it also comes with a limitation:
+
+ unsafe elements in the data structure provided by
+ the caller SHOULD be used under NO circumstances.
+ Indeed, only safe elements are meant to be used
+ when handling the event in async mode.
+
+ ASYNC mode is declined in 2 different versions:
+ normal:
+ handler is simply a function pointer
+ (same prototype as sync mode),
+ that is called asynchronously with relevant data
+ when the event is published. Only difference with
+ sync mode here is that 'unsafe' data provided
+ by the data structure may not be used.
+ task:
+ handler is a user defined task(let) that uses an event
+ queue to consume pending events.
+ This mode is interesting when you need to perform
+ advanced operations or you need to handle the event
+ in an already existing task context.
+ It is a bit more complicated to setup, but really
+ nothing to worry about, some examples will be
+ provided later in this document.
+
+event subscription is performed using the function:
+
+ event_hdl_subscribe(list, event, hdl);
+
+ The function returns 1 in case of success,
+ and 0 in case of failure (bad arguments, or memory error)
+
+ The function may BUG_ON if used improperly (invalid arguments)
+
+ <list> is either user specified list used to store the
+ new subscription, or NULL if you want to store the subscription
+ in the process global list.
+
+ <list> is also asked when publishing an event,
+ so specifying list could be useful, if, for example,
+ you only want to subscribe to a specific subscription list
+ (see this as a scope for example, NULL being full scope,
+ and specific list being limited scope)
+
+ We will use server events as an example:
+
+ You could register to events for ALL servers by using the
+ global list (NULL), or only to a specific server events
+ by using the subscription list dedicated to a single server.
+
+ <event> are the events (family.subtypes) you're subscribing to
+
+ <hdl> contains required handler options, it must be provided using
+ EVENT_HDL_(TASK_)(A)SYNC() and EVENT_HDL_ID_(TASK_)(A)SYNC()
+ helper macros.
+
+ See include/haproxy/event_hdl.h or below to know which macro
+ best suits your needs.
+
+ When registering a handler, you have the ability to provide an
+ unique ID (using EVENT_HDL_ID_ macro family) that could be used
+ later to perform lookups on the subscription.
+ ID is stored as an uint64_t hash that is expected to be computed using
+ general purpose event_hdl_id inline function provided by event_hdl.h.
+ Not providing an ID (using EVENT_HDL_ macro family)
+ results in the subscription being considered as anonymous.
+ As the name implies, anonymous subscriptions don't support lookups.
+
+2.1 SYNC MODE
+---------------------
+
+Example, you want to register a sync handler that will be called when
+a new server is added.
+
+Here is what the handler function will look like:
+```
+void my_sync_handler(const struct event_hdl_cb *cb, void *private)
+{
+ const struct event_hdl_cb_data_server *server = cb->e_data;
+
+ /* using EVENT_HDL_ASSERT_SYNC is a good practice to ensure
+ * that the function breaks if used in async mode
+ * (because we will access unsafe data in this function that
+ * is sync mode only)
+ */
+ EVENT_HDL_ASSERT_SYNC(cb);
+ printf("I've been called for '%s', private = %p\n",
+ event_hdl_sub_type_to_string(cb->e_type), private);
+ printf("server name is '%s'\n", server->safe.name);
+
+ /* here it is safe to use unsafe data */
+ printf("server ptr is '%p'\n", server->unsafe.ptr);
+
+ /* from here you have the possibility to manage the subscription
+ * cb->sub_mgmt->unsub(cb->sub_mgmt);
+ * // hdl will be removed from the subscription list
+ */
+}
+```
+
+Here is how you perform the subscription:
+
+anonymous subscription:
+```
+ int private = 10;
+
+ event_hdl_subscribe(NULL, EVENT_HDL_SUB_SERVER_ADD,
+ EVENT_HDL_SYNC(my_sync_handler, &private, NULL));
+```
+
+identified subscription:
+```
+ int private = 10;
+ uint64_t id = event_hdl_id("test", "sync");
+
+ event_hdl_subscribe(NULL, EVENT_HDL_SUB_SERVER_ADD,
+ EVENT_HDL_ID_SYNC(id,
+ my_sync_handler,
+ &private,
+ NULL));
+
+```
+
+identified subscription where freeing private is required when subscription ends:
+(also works for anonymous)
+(more on this feature in 2.3.4)
+```
+ int *private = malloc(sizeof(*private));
+ uint64_t id = event_hdl_id("test", "sync_free");
+
+ BUG_ON(!private);
+ *private = 10;
+
+ /* passing free as 'private_free' function so that
+ * private can be freed when unregistering is performed
+ */
+ event_hdl_subscribe(NULL, EVENT_HDL_SUB_SERVER_ADD,
+ EVENT_HDL_ID_SYNC(id,
+ my_sync_handler,
+ private,
+ free));
+
+
+ /* ... */
+
+ // unregistering the identified hdl
+ if (event_hdl_lookup_unsubscribe(NULL, id)) {
+ printf("private will automatically be freed!\n");
+ }
+```
+
+2.2 ASYNC MODE
+---------------------
+
+As mentioned before, async mode comes in 2 flavors, normal and task.
+
+2.2.1 NORMAL VERSION
+---------------------
+
+Normal is meant to be really easy to use, and highly compatible with sync mode.
+
+(Handler can easily be converted or copy pasted from async to sync mode
+and vice versa)
+
+Quick warning about sync to async handler conversion:
+
+please always use EVENT_HDL_ASSERT_SYNC whenever you develop a
+sync handler that performs unsafe data access.
+
+This way, if the handler were to be converted or copy pasted as is to
+async mode without removing unsafe data accesses,
+the handler will forcefully fail to indicate an error so that you
+know something has to be fixed in your handler code.
+
+Back to our async handler, let's say you want to declare an
+async handler that will be called when a new server is added.
+
+Here is what the handler function will look like:
+```
+void my_async_handler(const struct event_hdl_cb *cb, void *private)
+{
+ const struct event_hdl_cb_data_server *server = cb->e_data;
+
+ printf("I've been called for '%s', private = %p\n",
+ event_hdl_sub_type_to_string(cb->e_type), private);
+ printf("server name is '%s'\n", server->safe.name);
+
+ /* here it is not safe to use unsafe data */
+
+ /* from here you have the possibility to manage the subscription
+ * cb->sub_mgmt->unsub(cb->sub_mgmt);
+ * // hdl will be removed from the subscription list
+ */
+}
+```
+
+Note that it is pretty similar to sync handler, except
+for unsafe data access.
+
+Here is how you declare the subscription:
+
+anonymous subscription:
+```
+ int private = 10;
+
+ event_hdl_subscribe(NULL, EVENT_HDL_SUB_SERVER_ADD,
+ EVENT_HDL_ASYNC(my_async_handler, &private, NULL));
+```
+
+identified subscription:
+```
+ int private = 10;
+ uint64_t id = event_hdl_id("test", "async");
+
+ event_hdl_subscribe(NULL, EVENT_HDL_SUB_SERVER_ADD,
+ EVENT_HDL_ID_ASYNC(id,
+ my_async_handler,
+ &private,
+ NULL));
+
+```
+
+identified subscription where freeing private is required when subscription ends:
+(also works for anonymous)
+```
+ int *private = malloc(sizeof(*private));
+ uint64_t id = event_hdl_id("test", "async_free");
+
+ BUG_ON(!private);
+ *private = 10;
+
+ /* passing free as 'private_free' function so that
+ * private can be freed when unregistering is performed
+ */
+ event_hdl_subscribe(NULL, EVENT_HDL_SUB_SERVER_ADD,
+ EVENT_HDL_ID_ASYNC(id,
+ my_async_handler,
+ private,
+ free));
+
+ /* ... */
+
+ // unregistering the identified hdl
+ if (event_hdl_lookup_unsubscribe(NULL, id)) {
+ printf("private will automatically be freed when "
+ "all pending events referencing private "
+ "are consumed!\n");
+ }
+```
+
+2.2.2 TASK VERSION
+---------------------
+
+task version requires a bit more setup, but it's pretty
+straightforward actually.
+
+
+First, you need to initialize an event queue that will be used
+by event_hdl facility to push you events according to your subscription:
+
+```
+ event_hdl_async_equeue my_q;
+
+ event_hdl_async_equeue_init(&my_q);
+```
+
+
+Then, you need to declare a task(let) (or reuse existing task(let))
+
+It is your responsibility to make sure that the task(let) still exists
+(is not freed) when calling the subscribe function
+(and that the task remains valid as long as the subscription is).
+
+When a subscription referencing your task is over
+(either ended because of list purge, external code or from the handler itself),
+you will receive the EVENT_HDL_SUB_END event.
+When you receive this event, you must free it as usual and you can safely
+assume that the related subscription won't be sending you any more events.
+
+Here is what your task will look like (involving a single event queue):
+
+```
+struct task *event_hdl_async_task_my(struct task *task,
+ void *ctx, unsigned int state)
+{
+ struct tasklet *tl = (struct tasklet *)task;
+ event_hdl_async_equeue *queue = ctx;
+ struct event_hdl_async_event *event;
+ struct event_hdl_cb_data_server *srv;
+ uint8_t done = 0;
+
+ while ((event = event_hdl_async_equeue_pop(queue)))
+ {
+ if (event_hdl_sub_type_equal(event->type, EVENT_HDL_SUB_END)) {
+ done = 1;
+ event_hdl_async_free_event(event);
+ printf("no more events to come, "
+ "subscription is over\n");
+ break;
+ }
+
+ srv = event->data;
+
+ printf("task event %s, %d (name = %s)\n",
+ event_hdl_sub_type_to_string(event->type),
+ *((int *)event->private), srv->safe.name);
+ event_hdl_async_free_event(event);
+ }
+
+ if (done) {
+ /* our job is done, subscription is over:
+ * no more events to come
+ */
+ tasklet_free(tl);
+ return NULL;
+ }
+ return task;
+}
+
+```
+
+Here is how we would initialize the task event_hdl_async_task_my:
+```
+ struct tasklet *my_task;
+
+ my_task = tasklet_new();
+ BUG_ON(!my_task);
+ my_task->context = &my_q; // we declared my_q previously in this example
+ /* we declared event_hdl_async_task_my previously
+ * in this example
+ */
+ my_task->process = event_hdl_async_task_my;
+
+```
+
+Given our task and our previously initialized event queue, here is how
+to perform the subscription:
+```
+ int test_val = 11;
+ uint64_t id = event_hdl_id("test", "my_task");
+
+ /* anonymous variant */
+ event_hdl_subscribe(NULL, EVENT_HDL_SUB_SERVER_ADD,
+ EVENT_HDL_ASYNC_TASK(&my_q,
+ my_task,
+ &test_val,
+ NULL));
+ /* identified variant */
+ event_hdl_subscribe(NULL, EVENT_HDL_SUB_SERVER_ADD,
+ EVENT_HDL_ID_ASYNC_TASK(id,
+ &my_q,
+ my_task,
+ &test_val,
+ NULL));
+```
+
+Note: it is not recommended to perform multiple subscriptions
+ that share the same event queue or same task(let) (or both)
+
+ That is, having more than one subscription waking a task(let)
+ and/or feeding the same event queue.
+
+ No check is performed on this when registering, so the API
+ won't prevent you from doing it.
+
+ If you are going to do this anyway despite this warning:
+
+ In the case you need to stop the task prematurely
+ (if this is not going to happen please skip this paragraph):
+ You are responsible for acknowledging the end of every
+ active subscriptions that refer to your task or
+ your event queue(s).
+ And you really don't want a subscription associated with
+ your task or event queue to keep going when the task
+ is not active anymore because:
+ 1: there will be memory leak
+ (event queue might continue to receive new events)
+ 2: there is a 100% chance of process crash in case of event
+ because we will try to wake a task (your task)
+ that might already be freed. Thus UAF will occur.
+
+2.3 ADVANCED FEATURES
+-----------------------
+
+We've already covered some of these features in the previous examples.
+Here is a documented recap.
+
+
+2.3.1 SUB MGMT
+-----------------------
+
+From an event handler context, either sync or async mode:
+ You have the ability to directly manage the subscription
+ that provided the event.
+
+As of today, these actions are supported:
+ - Consulting the subscription.
+ - Modifying the subscription (resubscribing within same family)
+ - Unregistering the subscription (unsubscribing).
+
+To do this, consider the following structure:
+```
+ struct event_hdl_sub_mgmt
+ {
+ /* manage subscriptions from event
+ * this must not be used directly because
+ * locking might be required
+ */
+ struct event_hdl_sub *this;
+ /* safe functions than can be used from
+ * event context (sync and async mode)
+ */
+ struct event_hdl_sub_type (*getsub)(const struct event_hdl_sub_mgmt *);
+ int (*resub)(const struct event_hdl_sub_mgmt *, struct event_hdl_sub_type);
+ void (*unsub)(const struct event_hdl_sub_mgmt *);
+ };
+
+```
+A reference to this structure is provided in every handler mode.
+
+Sync mode and normal async mode (directly from the callback data pointer):
+```
+ const struct event_hdl_cb *cb;
+ // cb->sub_mgmt
+ // cb->sub_mgmt->getsub(cb->sub_mgmt);
+ // cb->sub_mgmt->unsub(cb->sub_mgmt);
+```
+
+task and notify async modes (from the event):
+```
+ struct event_hdl_async_event *event;
+ // event->sub_mgmt
+ // event->sub_mgmt.getsub(&event->sub_mgmt);
+ // event->sub_mgmt.unsub(&event->sub_mgmt);
+```
+
+2.3.2 SUBSCRIPTION EXTERNAL LOOKUPS
+-----------------------
+
+As you've seen in 2.3.1, managing the subscription directly
+from the handler is a possibility.
+
+But for identified subscriptions, you also have the ability to
+perform lookups and management operations on specific subscriptions
+within a list based on their ID, anywhere in the code.
+
+/!\ This feature is not available for anonymous subscriptions /!\
+
+Here are the actions already supported:
+
+ - unregistering a subscription (unsubscribing)
+ - updating a subscription (resubscribing within same family)
+ - getting a ptr/reference to the subscription
+
+Those functions are documented in event_hdl.h
+(search for EVENT_HDL_LOOKUP section).
+
+To select a specific subscription, you must provide
+the unique identifier (uint64_t hash) that was provided when subscribing.
+(using event_hdl_id(scope, name) function)
+
+Notes:
+ "id" is only unique within a given subscription list.
+
+ When using event_hdl_id to provide the id:
+ It is your responsibility to make sure that you "own"
+ the scope if you rely on name to be "free".
+
+ As ID computation is backed by xxhash hash API,
+ you should be aware that hash collisions could occur,
+ but are extremely rare and are thus considered safe
+ enough for this usage.
+ (see event_hdl.h for implementation details)
+
+ Please consider ptr based subscription management if
+ these limitations don't fit your requirements.
+
+Here are some examples:
+
+unsubscribing:
+```
+ /* registering "scope":"name" subscription */
+ event_hdl_subscribe(NULL, EVENT_HDL_SUB_SERVER_ADD,
+ EVENT_HDL_ID_SYNC(event_hdl_id("scope", "name"),
+ my_sync_handler,
+ NULL,
+ NULL));
+ /* unregistering "scope":"name" subscription */
+ event_hdl_lookup_unsubscribe(NULL, event_hdl_id("scope", "name"));
+```
+
+2.3.3 SUBSCRIPTION PTR
+-----------------------
+
+To manage existing subscriptions from external code,
+we already talked about identified subscriptions that
+allow lookups within list.
+
+But there is another way to accomplish this.
+
+When subscribing, you can use the event_hdl_subscribe_ptr() function
+variant (same arguments as event_hdl_subscribe()).
+
+What this function does, is instead of returning 1 in case of
+success and 0 in case of failure: it returns a valid subscription ptr
+for success and NULL for failure.
+
+Returned ptr is guaranteed to remain valid even if subscription
+is ended meanwhile because the ptr is internally guarded with a refcount.
+
+Thus, as long as you don't explicitly unregister the subscription with
+event_hdl_unsubscribe() or drop the reference using event_hdl_drop(),
+subscription ptr won't be freed.
+
+This ptr will allow you to use the following subscription
+management functions from external code:
+
+ - event_hdl_take() to increment subscription ptr refcount
+ (automatically incremented when using event_hdl_subscribe_ptr)
+ - event_hdl_drop() to decrement subscription ptr refcount
+ - event_hdl_resubscribe() to modify subscription subtype
+ - event_hdl_unsubscribe() to end the subscription
+ (refcount will be automatically decremented)
+
+Here is an example:
+```
+ struct event_hdl_sub *sub_ptr;
+
+ /* registering a subscription with subscribe_ptr */
+ sub_ptr = event_hdl_subscribe_ptr(NULL, EVENT_HDL_SUB_SERVER_ADD,
+ EVENT_HDL_SYNC(my_sync_handler,
+ NULL,
+ NULL));
+
+ /* ... */
+
+ /* unregistering the subscription */
+ event_hdl_unsubscribe(sub_ptr);
+```
+
+Regarding identified subscriptions that were registered using the non ptr
+subscribe function:
+
+You still have the ability to get a reference to the related subscription
+(if it still exists), by using event_hdl_lookup_take(list, id) function.
+event_hdl_lookup_take will return a subscription ptr in case of success
+and NULL in case of failure.
+Returned ptr reference is automatically incremented, so it is safe to use.
+
+Please don't forget to drop the reference
+when holding the ptr is no longer needed.
+
+Example:
+```
+ struct event_hdl_sub *sub_ptr = NULL;
+
+ /* registering subscription id "test":"ptr" with normal subscribe */
+ if (event_hdl_subscribe(NULL, EVENT_HDL_SUB_SERVER_ADD,
+ EVENT_HDL_ID_SYNC(event_hdl_id("test", "ptr"),
+ my_sync_handler,
+ NULL,
+ NULL))) {
+ /* fetch ref to subscription "test":"ptr" */
+ sub_ptr = event_hdl_lookup_take(NULL,
+ event_hdl_id("test", "ptr"));
+
+ /* unregister the subscription using lookup */
+ event_hdl_lookup_unsubscribe(NULL,
+ event_hdl_id("test", "ptr"));
+ }
+
+ /* ... */
+
+ /* unregistering the subscription with ptr
+ * will do nothing because subscription was
+ * already ended by lookup_unsubscribe, but
+ * here the catch is that sub_ptr is still
+ * valid so this won't crash the program
+ */
+ if (sub_ptr) {
+ event_hdl_unsubscribe(sub_ptr);
+ /* unsubscribe will also result in subscription
+ * reference drop, thus subscription will be freed here
+ * because sub_ptr was the last active reference.
+ * You must not use sub_ptr anymore past this point
+ * or UAF could occur
+ */
+ }
+
+```
+
+2.3.4 PRIVATE FREE
+-----------------------
+
+Upon handler subscription, you have the ability to provide
+a private data pointer that will be passed to the handler
+when subscribed events occur.
+
+Sometimes this private data pointer will rely on dynamically allocated memory.
+And in such cases, you have no way of knowing when
+freeing this pointer can be done safely.
+
+You could be tempted to think that freeing right after performing
+the unsubscription could be safe.
+But this is not the case, remember we could be dealing with async handlers
+that might still consume pending events even though unsubscription
+has been performed from external code.
+
+To deal with this, you may want to provide the private_free
+function pointer upon subscription.
+This way, private_free function will automatically be called
+(with private as argument) when private is no longer be used.
+
+Example:
+First we declare our private free function:
+```
+void my_private_free(void *my_private_data) {
+ /* here we only call free,
+ * but you could do more sophisticated stuff
+ */
+ free(my_private_data);
+}
+```
+Then:
+```
+ char *my_private_data = strdup("this string needs to be freed");
+
+ BUG_ON(!my_private_data);
+
+ event_hdl_subscribe(NULL, EVENT_HDL_SUB_SERVER_DEL,
+ EVENT_HDL_ID_ASYNC(event_hdl_id("test", "private"),
+ my_async_handler,
+ my_private_data,
+ my_private_free));
+
+ /* freeing my_private_data is not required anymore,
+ * it will be automatically freed by our private free
+ * function when subscription ends
+ */
+
+ /* unregistering "test":"private" subscription */
+ event_hdl_lookup_unsubscribe(NULL, event_hdl_id("test", "private"));
+
+ /* my_private_free will be automatically summoned when my_private_data
+ * is not referenced anymore
+ */
+```
+
+3 HOW TO ADD SUPPORT FOR NEW EVENTS
+-----------------------
+
+Adding support for a new event is pretty straightforward.
+
+First, you need to declare a new event subtype in event_hdl-t.h file
+(bottom of the file).
+
+You might want to declare a whole new event family, in which case
+you declare both the new family and the associated subtypes (if any).
+
+```
+ #define EVENT_HDL_SUB_NEW_FAMILY EVENT_HDL_SUB_FAMILY(4)
+ #define EVENT_HDL_SUB_NEW_FAMILY_SUBTYPE_1 EVENT_HDL_SUB_TYPE(4,0)
+```
+
+Then, you need to update the event_hdl_sub_type_map map,
+defined in src/event_hdl.c file (top of the file)
+to add string to event type and event type to string conversion support.
+You just need to add the missing entries corresponding to
+the event family / subtypes you've defined.
+
+Please follow this procedure:
+ You only added a new subtype to existing family: go to section 3.2
+ You added a new family: go to section 3.1
+
+3.1 DECLARING A NEW EVENT DATA STRUCTURE
+-----------------------
+
+You have the ability to provide additional data for a given
+event family when such events occur.
+
+Note that it is not mandatory: you could simply declare a new event family
+that does not provide any data.
+If this is your case, you can skip this section and go to 3.2 section.
+
+Now, take a look at this event data structure template
+(also defined at the top of event_hdl-t.h file):
+```
+ /* event data struct are defined as followed */
+ struct event_hdl_cb_data_template {
+ struct {
+ /* safe data can be safely used from both
+ * sync and async functions
+ * data consistency is guaranteed
+ */
+ } safe;
+ struct {
+ /* unsafe data may only be used from sync functions:
+ * in async mode, data consistency cannot be guaranteed
+ * and unsafe data may already be stale, thus using
+ * it is highly discouraged because it
+ * could lead to undefined behavior
+ * (UAF, null dereference...)
+ */
+ } unsafe;
+ };
+```
+
+This structure template allows you to easily create a new event
+data structure that can be provided with your new event family.
+
+You should name it after 'struct event_hdl_cb_data_new_family' so that it is
+easy to guess the event family it relates to.
+
+Indeed, each event data structure is to be associated with an
+unique event family type.
+For each subtypes within a family type, the associated data structure
+should be provided when publishing the event.
+
+The event data struct declaration should not be performed
+directly under event_hdl-t.h file:
+
+ It should be done in the header files of the corresponding
+ facility that will publish/provide this event.
+
+ Example: struct event_hdl_cb_data_server, provided for the
+ EVENT_HDL_SUB_SERVER event family, is going to be declared in
+ include/haproxy/server-t.h file.
+
+ However, in event_hdl-t.h, where you declare event family/subtypes,
+ you should add comments or links to the file containing the relevant
+ data struct declaration. This way we make sure all events related
+ information is centralized in event_hdl-t.h while keeping it clean
+ and not depending on any additional includes (you are free to
+ depend on specific data types within your custom event data structure).
+
+Please make sure that EVENT_HDL_ASYNC_EVENT_DATA (defined in event_hdl-t.h)
+is greater than sizeof(event_hdl_cb_data_new_family).
+
+It is required for async handlers to properly consume event data.
+
+You are free to adjust EVENT_HDL_ASYNC_EVENT_DATA size if needed.
+
+If EVENT_HDL_ASYNC_EVENT_DATA is not big enough to store your new
+event family struct, a compilation assert triggered by EVENT_HDL_CB_DATA
+will occur. In addition to this, an extra runtime BUG_ON will make
+sure the condition is met when publishing the event.
+The goal here is to force haproxy to fail explicitly so you know that
+something must be done on your side.
+
+3.1 PUBLISHING AN EVENT
+-----------------------
+
+Publishing an event is really simple.
+It relies on the event_hdl_publish function.
+
+The function is defined as follow:
+```
+ int event_hdl_publish(event_hdl_sub_list *sub_list,
+ event_hdl_sub_type e_type,
+ const struct event_hdl_cb_data *data);
+```
+
+We will ignore sub_list argument for now.
+In the examples below, we will use sub_list = NULL.
+Go to section 4 for a full picture about this feature.
+
+<e_type>: the event type that should be published.
+ All subscriptions referring to this event within
+ a subscription list context will be notified about the event.
+<data>: data provided for the event family of <e_type>
+ If <e_type>.family does not provide additional data,
+ data should be set to NULL.
+ If <e_type>.family does provide additional data, data should be set
+ using EVENT_HDL_CB_DATA macro.
+ (see the example below)
+
+The function returns 1 in case of SUCCESS (handlers successfully notified)
+and 0 in case of FAILURE (no handlers notified, because of memory error).
+
+Event publishing can be performed from anywhere in the code.
+(this example does not compile)
+```
+ struct event_hdl_cb_data_new_family event_data;
+
+ /* first we need to prepare event data
+ * that will be provided to event handlers
+ */
+
+ /* safe data, available from both sync and async contexts */
+ event_data.safe.my_custom_data = x;
+
+ /* unsafe data, only available from sync contexts */
+ event_data.unsafe.my_unsafe_data = y;
+
+ /* once data is prepared, we can publish the event */
+ event_hdl_publish(NULL,
+ EVENT_HDL_SUB_NEW_FAMILY_SUBTYPE_1,
+ EVENT_HDL_CB_DATA(&event_data));
+
+ /* EVENT_HDL_SUB_NEW_FAMILY_SUBTYPE_1 event was
+ * successfully published in global subscription list
+ */
+```
+
+--------------------------------------------------------------------------------
+|You should know that there is currently a limitation about publish function: |
+|The function should not be used from critical places |
+|(where the calling frequency is high |
+|or where timing sensitivity is high). |
+| |
+|Because in current implementation, subscription list lookups are not |
+|optimized for such uses cases. |
+--------------------------------------------------------------------------------
+
+4 SUBSCRIPTION LISTS
+-----------------------
+
+As you may already know, EVENT_HDL API main functions rely on
+subscription lists.
+Providing NULL where subscription list argument is required
+allows to use the implicit global subscription list.
+
+But you can also provide a specific subscription list, example:
+ subscription list associated with a single entity so that you only
+ subscribe to events of this single entity
+
+A subscription list is of type event_hdl_sub_list.
+It is defined in event_hdl-t.h
+
+To make use of this feature, you should know about these 2 functions:
+
+event_hdl_sub_list_init(list): use this fcn to initialize
+ a new subscription list.
+
+Example:
+```
+ event_hdl_sub_list my_custom_list;
+
+ event_hdl_sub_list_init(&my_custom_list);
+```
+
+event_hdl_sub_list_destroy(list): use this fcn to destroy
+ an existing subscription list.
+
+Example:
+```
+ event_hdl_sub_list_init(&my_custom_list);
+```
+
+ Using this function will cause all the existing subscriptions
+ within the provided sub_list to be properly unregistered
+ and deleted according to their types.
+
+Now we'll take another quick look at event_hdl_publish() function:
+
+Remember that the function is defined as follow:
+```
+ int event_hdl_publish(event_hdl_sub_list *sub_list,
+ event_hdl_sub_type e_type,
+ const struct event_hdl_cb_data *data);
+```
+
+In the previous examples, we used sub_list = NULL.
+
+if sub_list is NULL:
+ event will be published in in global list
+else
+ event will be published in user specified sub_list
+
+5 MISC/HELPER FUNCTIONS
+-----------------------
+
+Don't forget to take a look at MISC/HELPER FUNCTIONS in
+include/haproxy/event_hdl.h (end of the file) for a
+complete list of helper functions / macros.
+
+We've already used some, if not the vast majority
+in the examples shown in this document.
+
+This includes, to name a few:
+ - event types manipulation
+ - event types comparison
+ - lookup id computing
+ - subscriber list management (covered in section 4)
+ - sync/async handler helpers
diff --git a/doc/internals/api/filters.txt b/doc/internals/api/filters.txt
new file mode 100644
index 0000000..f1d2f34
--- /dev/null
+++ b/doc/internals/api/filters.txt
@@ -0,0 +1,1188 @@
+ -----------------------------------------
+ Filters Guide - version 2.9
+ ( Last update: 2021-02-24 )
+ ------------------------------------------
+ Author : Christopher Faulet
+ Contact : christopher dot faulet at capflam dot org
+
+
+ABSTRACT
+--------
+
+The filters support is a new feature of HAProxy 1.7. It is a way to extend
+HAProxy without touching its core code and, in certain extent, without knowing
+its internals. This feature will ease contributions, reducing impact of
+changes. Another advantage will be to simplify HAProxy by replacing some parts
+by filters. As we will see, and as an example, the HTTP compression is the first
+feature moved in a filter.
+
+This document describes how to write a filter and what to keep in mind to do
+so. It also talks about the known limits and the pitfalls to avoid.
+
+As said, filters are quite new for now. The API is not freezed and will be
+updated/modified/improved/extended as needed.
+
+
+
+SUMMARY
+-------
+
+ 1. Filters introduction
+ 2. How to use filters
+ 3. How to write a new filter
+ 3.1. API Overview
+ 3.2. Defining the filter name and its configuration
+ 3.3. Managing the filter lifecycle
+ 3.3.1. Dealing with threads
+ 3.4. Handling the streams activity
+ 3.5. Analyzing the channels activity
+ 3.6. Filtering the data exchanged
+ 4. FAQ
+
+
+
+1. FILTERS INTRODUCTION
+-----------------------
+
+First of all, to fully understand how filters work and how to create one, it is
+best to know, at least from a distance, what is a proxy (frontend/backend), a
+stream and a channel in HAProxy and how these entities are linked to each other.
+In doc/internals/api/layers.txt is a good overview of the different layers in
+HAProxy and in doc/internals/muxes.pdf is described the flow between the
+different muxes.
+
+Then, to support filters, many callbacks has been added to HAProxy at different
+places, mainly around channel analyzers. Their purpose is to allow filters to
+be involved in the data processing, from the stream creation/destruction to
+the data forwarding. Depending of what it should do, a filter can implement all
+or part of these callbacks. For now, existing callbacks are focused on
+streams. But future improvements could enlarge filters scope. For instance, it
+could be useful to handle events at the connection level.
+
+In HAProxy configuration file, a filter is declared in a proxy section, except
+default. So the configuration corresponding to a filter declaration is attached
+to a specific proxy, and will be shared by all its instances. it is opaque from
+the HAProxy point of view, this is the filter responsibility to manage it. For
+each filter declaration matches a uniq configuration. Several declarations of
+the same filter in the same proxy will be handle as different filters by
+HAProxy.
+
+A filter instance is represented by a partially opaque context (or a state)
+attached to a stream and passed as arguments to callbacks. Through this context,
+filter instances are stateful. Depending the filter is declared in a frontend or
+a backend section, its instances will be created, respectively, when a stream is
+created or when a backend is selected. Their behaviors will also be
+different. Only instances of filters declared in a frontend section will be
+aware of the creation and the destruction of the stream, and will take part in
+the channels analyzing before the backend is defined.
+
+It is important to remember the configuration of a filter is shared by all its
+instances, while the context of an instance is owned by a uniq stream.
+
+Filters are designed to be chained. It is possible to declare several filters in
+the same proxy section. The declaration order is important because filters will
+be called one after the other respecting this order. Frontend and backend
+filters are also chained, frontend ones called first. Even if the filters
+processing is serialized, each filter will bahave as it was alone (unless it was
+developed to be aware of other filters). For all that, some constraints are
+imposed to filters, especially when data exchanged between the client and the
+server are processed. We will discuss again these constraints when we will tackle
+the subject of writing a filter.
+
+
+
+2. HOW TO USE FILTERS
+---------------------
+
+To use a filter, the parameter 'filter' should be used, followed by the filter
+name and, optionally, its configuration in the desired listen, frontend or
+backend section. For instance :
+
+ listen test
+ ...
+ filter trace name TST
+ ...
+
+
+See doc/configuration.txt for a formal definition of the parameter 'filter'.
+Note that additional parameters on the filter line must be parsed by the filter
+itself.
+
+The list of available filters is reported by 'haproxy -vv' :
+
+ $> haproxy -vv
+ HAProxy version 1.7-dev2-3a1d4a-33 2016/03/21
+ Copyright 2000-2016 Willy Tarreau <willy@haproxy.org>
+
+ [...]
+
+ Available filters :
+ [COMP] compression
+ [TRACE] trace
+
+
+Multiple filter lines can be used in a proxy section to chain filters. Filters
+will be called in the declaration order.
+
+Some filters can support implicit declarations in certain circumstances
+(without the filter line). This is not recommended for new features but are
+useful for existing ones moved in a filter, for backward compatibility
+reasons. Implicit declarations are supported when there is only one filter used
+on a proxy. When several filters are used, explicit declarations are mandatory.
+The HTTP compression filter is one of these filters. Alone, using 'compression'
+keywords is enough to use it. But when at least a second filter is used, a
+filter line must be added.
+
+ # filter line is optional
+ listen t1
+ bind *:80
+ compression algo gzip
+ compression offload
+ server srv x.x.x.x:80
+
+ # filter line is mandatory for the compression filter
+ listen t2
+ bind *:81
+ filter trace name T2
+ filter compression
+ compression algo gzip
+ compression offload
+ server srv x.x.x.x:80
+
+
+
+
+3. HOW TO WRITE A NEW FILTER
+----------------------------
+
+To write a filter, there are 2 header files to explore :
+
+ * include/haproxy/filters-t.h : This is the main header file, containing all
+ important structures to use. It represents the
+ filter API.
+
+ * include/haproxy/filters.h : This header file contains helper functions that
+ may be used. It also contains the internal API
+ used by HAProxy to handle filters.
+
+To ease the filters integration, it is better to follow some conventions :
+
+ * Use 'flt_' prefix to name the filter (e.g flt_http_comp or flt_trace).
+
+ * Keep everything related to the filter in a same file.
+
+The filter 'trace' can be used as a template to write new filter. It is a good
+start to see how filters really work.
+
+3.1 API OVERVIEW
+----------------
+
+Writing a filter can be summarized to write functions and attach them to the
+existing callbacks. Available callbacks are listed in the following structure :
+
+ struct flt_ops {
+ /*
+ * Callbacks to manage the filter lifecycle
+ */
+ int (*init) (struct proxy *p, struct flt_conf *fconf);
+ void (*deinit) (struct proxy *p, struct flt_conf *fconf);
+ int (*check) (struct proxy *p, struct flt_conf *fconf);
+ int (*init_per_thread) (struct proxy *p, struct flt_conf *fconf);
+ void (*deinit_per_thread)(struct proxy *p, struct flt_conf *fconf);
+
+ /*
+ * Stream callbacks
+ */
+ int (*attach) (struct stream *s, struct filter *f);
+ int (*stream_start) (struct stream *s, struct filter *f);
+ int (*stream_set_backend)(struct stream *s, struct filter *f, struct proxy *be);
+ void (*stream_stop) (struct stream *s, struct filter *f);
+ void (*detach) (struct stream *s, struct filter *f);
+ void (*check_timeouts) (struct stream *s, struct filter *f);
+
+ /*
+ * Channel callbacks
+ */
+ int (*channel_start_analyze)(struct stream *s, struct filter *f,
+ struct channel *chn);
+ int (*channel_pre_analyze) (struct stream *s, struct filter *f,
+ struct channel *chn,
+ unsigned int an_bit);
+ int (*channel_post_analyze) (struct stream *s, struct filter *f,
+ struct channel *chn,
+ unsigned int an_bit);
+ int (*channel_end_analyze) (struct stream *s, struct filter *f,
+ struct channel *chn);
+
+ /*
+ * HTTP callbacks
+ */
+ int (*http_headers) (struct stream *s, struct filter *f,
+ struct http_msg *msg);
+ int (*http_payload) (struct stream *s, struct filter *f,
+ struct http_msg *msg, unsigned int offset,
+ unsigned int len);
+ int (*http_end) (struct stream *s, struct filter *f,
+ struct http_msg *msg);
+
+ void (*http_reset) (struct stream *s, struct filter *f,
+ struct http_msg *msg);
+ void (*http_reply) (struct stream *s, struct filter *f,
+ short status,
+ const struct buffer *msg);
+
+ /*
+ * TCP callbacks
+ */
+ int (*tcp_payload) (struct stream *s, struct filter *f,
+ struct channel *chn, unsigned int offset,
+ unsigned int len);
+ };
+
+
+We will explain in following parts when these callbacks are called and what they
+should do.
+
+Filters are declared in proxy sections. So each proxy have an ordered list of
+filters, possibly empty if no filter is used. When the configuration of a proxy
+is parsed, each filter line represents an entry in this list. In the structure
+'proxy', the filters configurations are stored in the field 'filter_configs',
+each one of type 'struct flt_conf *' :
+
+ /*
+ * Structure representing the filter configuration, attached to a proxy and
+ * accessible from a filter when instantiated in a stream
+ */
+ struct flt_conf {
+ const char *id; /* The filter id */
+ struct flt_ops *ops; /* The filter callbacks */
+ void *conf; /* The filter configuration */
+ struct list list; /* Next filter for the same proxy */
+ unsigned int flags; /* FLT_CFG_FL_* */
+ };
+
+ * 'flt_conf.id' is an identifier, defined by the filter. It can be
+ NULL. HAProxy does not use this field. Filters can use it in log messages or
+ as a uniq identifier to check multiple declarations. It is the filter
+ responsibility to free it, if necessary.
+
+ * 'flt_conf.conf' is opaque. It is the internal configuration of a filter,
+ generally allocated and filled by its parsing function (See § 3.2). It is
+ the filter responsibility to free it.
+
+ * 'flt_conf.ops' references the callbacks implemented by the filter. This
+ field must be set during the parsing phase (See § 3.2) and can be refine
+ during the initialization phase (See § 3.3). If it is dynamically allocated,
+ it is the filter responsibility to free it.
+
+ * 'flt_conf.flags' is a bitfield to specify the filter capabilities. For now,
+ only FLT_CFG_FL_HTX may be set when a filter is able to process HTX
+ streams. If not set, the filter is excluded from the HTTP filtering.
+
+
+The filter configuration is global and shared by all its instances. A filter
+instance is created in the context of a stream and attached to this stream. in
+the structure 'stream', the field 'strm_flt' is the state of all filter
+instances attached to a stream :
+
+ /*
+ * Structure representing the "global" state of filters attached to a
+ * stream.
+ */
+ struct strm_flt {
+ struct list filters; /* List of filters attached to a stream */
+ struct filter *current[2]; /* From which filter resume processing, for a specific channel.
+ * This is used for resumable callbacks only,
+ * If NULL, we start from the first filter.
+ * 0: request channel, 1: response channel */
+ unsigned short flags; /* STRM_FL_* */
+ unsigned char nb_req_data_filters; /* Number of data filters registered on the request channel */
+ unsigned char nb_rsp_data_filters; /* Number of data filters registered on the response channel */
+ unsigned long long offset[2]; /* gloal offset of input data already filtered for a specific channel
+ * 0: request channel, 1: response channel */
+ };
+
+
+Filter instances attached to a stream are stored in the field
+'strm_flt.filters', each instance is of type 'struct filter *' :
+
+ /*
+ * Structure representing a filter instance attached to a stream
+ *
+ * 2D-Array fields are used to store info per channel. The first index
+ * stands for the request channel, and the second one for the response
+ * channel. Especially, <next> and <fwd> are offsets representing amount of
+ * data that the filter are, respectively, parsed and forwarded on a
+ * channel. Filters can access these values using FLT_NXT and FLT_FWD
+ * macros.
+ */
+ struct filter {
+ struct flt_conf *config; /* the filter's configuration */
+ void *ctx; /* The filter context (opaque) */
+ unsigned short flags; /* FLT_FL_* */
+ unsigned long long offset[2]; /* Offset of input data already filtered for a specific channel
+ * 0: request channel, 1: response channel */
+ unsigned int pre_analyzers; /* bit field indicating analyzers to
+ * pre-process */
+ unsigned int post_analyzers; /* bit field indicating analyzers to
+ * post-process */
+ struct list list; /* Next filter for the same proxy/stream */
+ };
+
+ * 'filter.config' is the filter configuration previously described. All
+ instances of a filter share it.
+
+ * 'filter.ctx' is an opaque context. It is managed by the filter, so it is its
+ responsibility to free it.
+
+ * 'filter.pre_analyzers and 'filter.post_analyzers will be described later
+ (See § 3.5).
+
+ * 'filter.offset' will be described later (See § 3.6).
+
+
+3.2. DEFINING THE FILTER NAME AND ITS CONFIGURATION
+---------------------------------------------------
+
+During the filter development, the first thing to do is to add it in the
+supported filters. To do so, its name must be registered as a valid keyword on
+the filter line :
+
+ /* Declare the filter parser for "my_filter" keyword */
+ static struct flt_kw_list flt_kws = { "MY_FILTER_SCOPE", { }, {
+ { "my_filter", parse_my_filter_cfg, NULL /* private data */ },
+ { NULL, NULL, NULL },
+ }
+ };
+ INITCALL1(STG_REGISTER, flt_register_keywords, &flt_kws);
+
+
+Then the filter internal configuration must be defined. For instance :
+
+ struct my_filter_config {
+ struct proxy *proxy;
+ char *name;
+ /* ... */
+ };
+
+
+All callbacks implemented by the filter must then be declared. Here, a global
+variable is used :
+
+ struct flt_ops my_filter_ops {
+ .init = my_filter_init,
+ .deinit = my_filter_deinit,
+ .check = my_filter_config_check,
+
+ /* ... */
+ };
+
+
+Finally, the function to parse the filter configuration must be written, here
+'parse_my_filter_cfg'. This function must parse all remaining keywords on the
+filter line :
+
+ /* Return -1 on error, else 0 */
+ static int
+ parse_my_filter_cfg(char **args, int *cur_arg, struct proxy *px,
+ struct flt_conf *flt_conf, char **err, void *private)
+ {
+ struct my_filter_config *my_conf;
+ int pos = *cur_arg;
+
+ /* Allocate the internal configuration used by the filter */
+ my_conf = calloc(1, sizeof(*my_conf));
+ if (!my_conf) {
+ memprintf(err, "%s : out of memory", args[*cur_arg]);
+ return -1;
+ }
+ my_conf->proxy = px;
+
+ /* ... */
+
+ /* Parse all keywords supported by the filter and fill the internal
+ * configuration */
+ pos++; /* Skip the filter name */
+ while (*args[pos]) {
+ if (!strcmp(args[pos], "name")) {
+ if (!*args[pos + 1]) {
+ memprintf(err, "'%s' : '%s' option without value",
+ args[*cur_arg], args[pos]);
+ goto error;
+ }
+ my_conf->name = strdup(args[pos + 1]);
+ if (!my_conf->name) {
+ memprintf(err, "%s : out of memory", args[*cur_arg]);
+ goto error;
+ }
+ pos += 2;
+ }
+
+ /* ... parse other keywords ... */
+ }
+ *cur_arg = pos;
+
+ /* Set callbacks supported by the filter */
+ flt_conf->ops = &my_filter_ops;
+
+ /* Last, save the internal configuration */
+ flt_conf->conf = my_conf;
+ return 0;
+
+ error:
+ if (my_conf->name)
+ free(my_conf->name);
+ free(my_conf);
+ return -1;
+ }
+
+
+WARNING : In this parsing function, 'flt_conf->ops' must be initialized. All
+ arguments of the filter line must also be parsed. This is mandatory.
+
+In the previous example, the filter lne should be read as follows :
+
+ filter my_filter name MY_NAME ...
+
+
+Optionally, by implementing the 'flt_ops.check' callback, an extra set is added
+to check the internal configuration of the filter after the parsing phase, when
+the HAProxy configuration is fully defined. For instance :
+
+ /* Check configuration of a trace filter for a specified proxy.
+ * Return 1 on error, else 0. */
+ static int
+ my_filter_config_check(struct proxy *px, struct flt_conf *my_conf)
+ {
+ if (px->mode != PR_MODE_HTTP) {
+ Alert("The filter 'my_filter' cannot be used in non-HTTP mode.\n");
+ return 1;
+ }
+
+ /* ... */
+
+ return 0;
+ }
+
+
+
+3.3. MANAGING THE FILTER LIFECYCLE
+----------------------------------
+
+Once the configuration parsed and checked, filters are ready to by used. There
+are two main callbacks to manage the filter lifecycle :
+
+ * 'flt_ops.init' : It initializes the filter for a proxy. This callback may be
+ defined to finish the filter configuration.
+
+ * 'flt_ops.deinit' : It cleans up what the parsing function and the init
+ callback have done. This callback is useful to release
+ memory allocated for the filter configuration.
+
+Here is an example :
+
+ /* Initialize the filter. Returns -1 on error, else 0. */
+ static int
+ my_filter_init(struct proxy *px, struct flt_conf *fconf)
+ {
+ struct my_filter_config *my_conf = fconf->conf;
+
+ /* ... */
+
+ return 0;
+ }
+
+ /* Free resources allocated by the trace filter. */
+ static void
+ my_filter_deinit(struct proxy *px, struct flt_conf *fconf)
+ {
+ struct my_filter_config *my_conf = fconf->conf;
+
+ if (my_conf) {
+ free(my_conf->name);
+ /* ... */
+ free(my_conf);
+ }
+ fconf->conf = NULL;
+ }
+
+
+3.3.1 DEALING WITH THREADS
+--------------------------
+
+When HAProxy is compiled with the threads support and started with more that one
+thread (global.nbthread > 1), then it is possible to manage the filter per
+thread with following callbacks :
+
+ * 'flt_ops.init_per_thread': It initializes the filter for each thread. It
+ works the same way than 'flt_ops.init' but in the
+ context of a thread. This callback is called
+ after the thread creation.
+
+ * 'flt_ops.deinit_per_thread': It cleans up what the init_per_thread callback
+ have done. It is called in the context of a
+ thread, before exiting it.
+
+It is the filter responsibility to deal with concurrency. check, init and deinit
+callbacks are called on the main thread. All others are called on a "worker"
+thread (not always the same). It is also the filter responsibility to know if
+HAProxy is started with more than one thread. If it is started with one thread
+(or compiled without the threads support), these callbacks will be silently
+ignored (in this case, global.nbthread will be always equal to one).
+
+
+3.4. HANDLING THE STREAMS ACTIVITY
+-----------------------------------
+
+It may be interesting to handle streams activity. For now, there is three
+callbacks that should define to do so :
+
+ * 'flt_ops.stream_start' : It is called when a stream is started. This
+ callback can fail by returning a negative value. It
+ will be considered as a critical error by HAProxy
+ which disabled the listener for a short time.
+
+ * 'flt_ops.stream_set_backend' : It is called when a backend is set for a
+ stream. This callbacks will be called for all
+ filters attached to a stream (frontend and
+ backend). Note this callback is not called if
+ the frontend and the backend are the same.
+
+ * 'flt_ops.stream_stop' : It is called when a stream is stopped. This callback
+ always succeed. Anyway, it is too late to return an
+ error.
+
+For instance :
+
+ /* Called when a stream is created. Returns -1 on error, else 0. */
+ static int
+ my_filter_stream_start(struct stream *s, struct filter *filter)
+ {
+ struct my_filter_config *my_conf = FLT_CONF(filter);
+
+ /* ... */
+
+ return 0;
+ }
+
+ /* Called when a backend is set for a stream */
+ static int
+ my_filter_stream_set_backend(struct stream *s, struct filter *filter,
+ struct proxy *be)
+ {
+ struct my_filter_config *my_conf = FLT_CONF(filter);
+
+ /* ... */
+
+ return 0;
+ }
+
+ /* Called when a stream is destroyed */
+ static void
+ my_filter_stream_stop(struct stream *s, struct filter *filter)
+ {
+ struct my_filter_config *my_conf = FLT_CONF(filter);
+
+ /* ... */
+ }
+
+
+WARNING : Handling the streams creation and destruction is only possible for
+ filters defined on proxies with the frontend capability.
+
+In addition, it is possible to handle creation and destruction of filter
+instances using following callbacks:
+
+ * 'flt_ops.attach' : It is called after a filter instance creation, when it is
+ attached to a stream. This happens when the stream is
+ started for filters defined on the stream's frontend and
+ when the backend is set for filters declared on the
+ stream's backend. It is possible to ignore the filter, if
+ needed, by returning 0. This could be useful to have
+ conditional filtering.
+
+ * 'flt_ops.detach' : It is called when a filter instance is detached from a
+ stream, before its destruction. This happens when the
+ stream is stopped for filters defined on the stream's
+ frontend and when the analyze ends for filters defined on
+ the stream's backend.
+
+For instance :
+
+ /* Called when a filter instance is created and attach to a stream */
+ static int
+ my_filter_attach(struct stream *s, struct filter *filter)
+ {
+ struct my_filter_config *my_conf = FLT_CONF(filter);
+
+ if (/* ... */)
+ return 0; /* Ignore the filter here */
+ return 1;
+ }
+
+ /* Called when a filter instance is detach from a stream, just before its
+ * destruction */
+ static void
+ my_filter_detach(struct stream *s, struct filter *filter)
+ {
+ struct my_filter_config *my_conf = FLT_CONF(filter);
+
+ /* ... */
+ }
+
+Finally, it may be interesting to notify the filter when the stream is woken up
+because of an expired timer. This could let a chance to check some internal
+timeouts, if any. To do so the following callback must be used :
+
+ * 'flt_opt.check_timeouts' : It is called when a stream is woken up because of
+ an expired timer.
+
+For instance :
+
+ /* Called when a stream is woken up because of an expired timer */
+ static void
+ my_filter_check_timeouts(struct stream *s, struct filter *filter)
+ {
+ struct my_filter_config *my_conf = FLT_CONF(filter);
+
+ /* ... */
+ }
+
+
+3.5. ANALYZING THE CHANNELS ACTIVITY
+------------------------------------
+
+The main purpose of filters is to take part in the channels analyzing. To do so,
+there is 2 callbacks, 'flt_ops.channel_pre_analyze' and
+'flt_ops.channel_post_analyze', called respectively before and after each
+analyzer attached to a channel, except analyzers responsible for the data
+forwarding (TCP or HTTP). Concretely, on the request channel, these callbacks
+could be called before following analyzers :
+
+ * tcp_inspect_request (AN_REQ_INSPECT_FE and AN_REQ_INSPECT_BE)
+ * http_wait_for_request (AN_REQ_WAIT_HTTP)
+ * http_wait_for_request_body (AN_REQ_HTTP_BODY)
+ * http_process_req_common (AN_REQ_HTTP_PROCESS_FE)
+ * process_switching_rules (AN_REQ_SWITCHING_RULES)
+ * http_process_req_ common (AN_REQ_HTTP_PROCESS_BE)
+ * http_process_tarpit (AN_REQ_HTTP_TARPIT)
+ * process_server_rules (AN_REQ_SRV_RULES)
+ * http_process_request (AN_REQ_HTTP_INNER)
+ * tcp_persist_rdp_cookie (AN_REQ_PRST_RDP_COOKIE)
+ * process_sticking_rules (AN_REQ_STICKING_RULES)
+
+And on the response channel :
+
+ * tcp_inspect_response (AN_RES_INSPECT)
+ * http_wait_for_response (AN_RES_WAIT_HTTP)
+ * process_store_rules (AN_RES_STORE_RULES)
+ * http_process_res_common (AN_RES_HTTP_PROCESS_BE)
+
+Unlike the other callbacks previously seen before, 'flt_ops.channel_pre_analyze'
+can interrupt the stream processing. So a filter can decide to not execute the
+analyzer that follows and wait the next iteration. If there are more than one
+filter, following ones are skipped. On the next iteration, the filtering resumes
+where it was stopped, i.e. on the filter that has previously stopped the
+processing. So it is possible for a filter to stop the stream processing on a
+specific analyzer for a while before continuing. Moreover, this callback can be
+called many times for the same analyzer, until it finishes its processing. For
+instance :
+
+ /* Called before a processing happens on a given channel.
+ * Returns a negative value if an error occurs, 0 if it needs to wait,
+ * any other value otherwise. */
+ static int
+ my_filter_chn_pre_analyze(struct stream *s, struct filter *filter,
+ struct channel *chn, unsigned an_bit)
+ {
+ struct my_filter_config *my_conf = FLT_CONF(filter);
+
+ switch (an_bit) {
+ case AN_REQ_WAIT_HTTP:
+ if (/* wait that a condition is verified before continuing */)
+ return 0;
+ break;
+ /* ... * /
+ }
+ return 1;
+ }
+
+ * 'an_bit' is the analyzer id. All analyzers are listed in
+ 'include/haproxy/channels-t.h'.
+
+ * 'chn' is the channel on which the analyzing is done. It is possible to
+ determine if it is the request or the response channel by testing if
+ CF_ISRESP flag is set :
+
+ │ ((chn->flags & CF_ISRESP) == CF_ISRESP)
+
+
+In previous example, the stream processing is blocked before receipt of the HTTP
+request until a condition is verified.
+
+'flt_ops.channel_post_analyze', for its part, is not resumable. It returns a
+negative value if an error occurs, any other value otherwise. It is called when
+a filterable analyzer finishes its processing, so once for the same analyzer.
+For instance :
+
+ /* Called after a processing happens on a given channel.
+ * Returns a negative value if an error occurs, any other
+ * value otherwise. */
+ static int
+ my_filter_chn_post_analyze(struct stream *s, struct filter *filter,
+ struct channel *chn, unsigned an_bit)
+ {
+ struct my_filter_config *my_conf = FLT_CONF(filter);
+ struct http_msg *msg;
+
+ switch (an_bit) {
+ case AN_REQ_WAIT_HTTP:
+ if (/* A test on received headers before any other treatment */) {
+ msg = ((chn->flags & CF_ISRESP) ? &s->txn->rsp : &s->txn->req);
+ txn->status = 400;
+ msg->msg_state = HTTP_MSG_ERROR;
+ http_reply_and_close(s, s->txn->status, http_error_message(s));
+ return -1; /* This is an error ! */
+ }
+ break;
+ /* ... * /
+ }
+ return 1;
+ }
+
+
+Pre and post analyzer callbacks of a filter are not automatically called. They
+must be regiesterd explicitly on analyzers, updating the value of
+'filter.pre_analyzers' and 'filter.post_analyzers' bit fields. All analyzer bits
+are listed in 'include/types/channels.h'. Here is an example :
+
+ static int
+ my_filter_stream_start(struct stream *s, struct filter *filter)
+ {
+ /* ... * /
+
+ /* Register the pre analyzer callback on all request and response
+ * analyzers */
+ filter->pre_analyzers |= (AN_REQ_ALL | AN_RES_ALL)
+
+ /* Register the post analyzer callback of only on AN_REQ_WAIT_HTTP and
+ * AN_RES_WAIT_HTTP analyzers */
+ filter->post_analyzers |= (AN_REQ_WAIT_HTTP | AN_RES_WAIT_HTTP)
+
+ /* ... * /
+ return 0;
+ }
+
+
+To surround activity of a filter during the channel analyzing, two new analyzers
+has been added :
+
+ * 'flt_start_analyze' (AN_REQ/RES_FLT_START_FE/AN_REQ_RES_FLT_START_BE) : For
+ a specific filter, this analyzer is called before any call to the
+ 'channel_analyze' callback. From the filter point of view, it calls the
+ 'flt_ops.channel_start_analyze' callback.
+
+ * 'flt_end_analyze' (AN_REQ/RES_FLT_END) : For a specific filter, this
+ analyzer is called when all other analyzers have finished their
+ processing. From the filter point of view, it calls the
+ 'flt_ops.channel_end_analyze' callback.
+
+These analyzers are called only once per streams.
+
+'flt_ops.channel_start_analyze' and 'flt_ops.channel_end_analyze' callbacks can
+interrupt the stream processing, as 'flt_ops.channel_analyze'. Here is an
+example :
+
+ /* Called when analyze starts for a given channel
+ * Returns a negative value if an error occurs, 0 if it needs to wait,
+ * any other value otherwise. */
+ static int
+ my_filter_chn_start_analyze(struct stream *s, struct filter *filter,
+ struct channel *chn)
+ {
+ struct my_filter_config *my_conf = FLT_CONF(filter);
+
+ /* ... TODO ... */
+
+ return 1;
+ }
+
+ /* Called when analyze ends for a given channel
+ * Returns a negative value if an error occurs, 0 if it needs to wait,
+ * any other value otherwise. */
+ static int
+ my_filter_chn_end_analyze(struct stream *s, struct filter *filter,
+ struct channel *chn)
+ {
+ struct my_filter_config *my_conf = FLT_CONF(filter);
+
+ /* ... TODO ... */
+
+ return 1;
+ }
+
+
+Workflow on channels can be summarized as following :
+
+ FE: Called for filters defined on the stream's frontend
+ BE: Called for filters defined on the stream's backend
+
+ +------->---------+
+ | | |
+ +----------------------+ | +----------------------+
+ | flt_ops.attach (FE) | | | flt_ops.attach (BE) |
+ +----------------------+ | +----------------------+
+ | | |
+ V | V
+ +--------------------------+ | +------------------------------------+
+ | flt_ops.stream_start (FE)| | | flt_ops.stream_set_backend (FE+BE) |
+ +--------------------------+ | +------------------------------------+
+ | | |
+ ... | ...
+ | | |
+ | ^ |
+ | --+ | | --+
+ +------<----------+ | | +--------<--------+ |
+ | | | | | | |
+ V | | | V | |
++-------------------------------+ | | | +-------------------------------+ | |
+| flt_start_analyze (FE) +-+ | | | flt_start_analyze (BE) +-+ |
+|(flt_ops.channel_start_analyze)| | F | |(flt_ops.channel_start_analyze)| |
++---------------+---------------+ | R | +-------------------------------+ |
+ | | O | | |
+ +------<---------+ | N ^ +--------<-------+ | B
+ | | | T | | | | A
++---------------|------------+ | | E | +---------------|------------+ | | C
+|+--------------V-------------+ | | N | |+--------------V-------------+ | | K
+||+----------------------------+ | | D | ||+----------------------------+ | | E
+|||flt_ops.channel_pre_analyze | | | | |||flt_ops.channel_pre_analyze | | | N
+||| V | | | | ||| V | | | D
+||| analyzer (FE) +-+ | | ||| analyzer (FE+BE) +-+ |
++|| V | | | +|| V | |
+ +|flt_ops.channel_post_analyze| | | +|flt_ops.channel_post_analyze| |
+ +----------------------------+ | | +----------------------------+ |
+ | --+ | | |
+ +------------>------------+ ... |
+ | |
+ [ data filtering (see below) ] |
+ | |
+ ... |
+ | |
+ +--------<--------+ |
+ | | |
+ V | |
+ +-------------------------------+ | |
+ | flt_end_analyze (FE+BE) +-+ |
+ | (flt_ops.channel_end_analyze) | |
+ +---------------+---------------+ |
+ | --+
+ V
+ +----------------------+
+ | flt_ops.detach (BE) |
+ +----------------------+
+ |
+ V
+ +--------------------------+
+ | flt_ops.stream_stop (FE) |
+ +--------------------------+
+ |
+ V
+ +----------------------+
+ | flt_ops.detach (FE) |
+ +----------------------+
+ |
+ V
+
+By zooming on an analyzer box we have:
+
+ ...
+ |
+ V
+ |
+ +-----------<-----------+
+ | |
+ +-----------------+--------------------+ |
+ | | | |
+ | +--------<---------+ | |
+ | | | | |
+ | V | | |
+ | flt_ops.channel_pre_analyze ->-+ | ^
+ | | | |
+ | | | |
+ | V | |
+ | analyzer --------->-----+--+
+ | | |
+ | | |
+ | V |
+ | flt_ops.channel_post_analyze |
+ | | |
+ | | |
+ +-----------------+--------------------+
+ |
+ V
+ ...
+
+
+ 3.6. FILTERING THE DATA EXCHANGED
+-----------------------------------
+
+WARNING : To fully understand this part, it is important to be aware on how the
+ buffers work in HAProxy. For the HTTP part, it is also important to
+ understand how data are parsed and structured, and how the internal
+ representation, called HTX, works. See doc/internals/buffer-api.txt
+ and doc/internals/htx-api.txt for details.
+
+An extended feature of the filters is the data filtering. By default a filter
+does not look into data exchanged between the client and the server because it
+is expensive. Indeed, instead of forwarding data without any processing, each
+byte need to be buffered.
+
+So, to enable the data filtering on a channel, at any time, in one of previous
+callbacks, 'register_data_filter' function must be called. And conversely, to
+disable it, 'unregister_data_filter' function must be called. For instance :
+
+ my_filter_http_headers(struct stream *s, struct filter *filter,
+ struct http_msg *msg)
+ {
+ struct my_filter_config *my_conf = FLT_CONF(filter);
+
+ /* 'chn' must be the request channel */
+ if (!(msg->chn->flags & CF_ISRESP)) {
+ struct htx *htx;
+ struct ist hdr;
+ struct http_hdr_ctx ctx;
+
+ htx = htxbuf(msg->chn->buf);
+
+ /* Enable the data filtering for the request if 'X-Filter' header
+ * is set to 'true'. */
+ hdr = ist("X-Filter);
+ ctx.blk = NULL;
+ if (http_find_header(htx, hdr, &ctx, 0) &&
+ ctx.value.len >= 4 && memcmp(ctx.value.ptr, "true", 4) == 0)
+ register_data_filter(s, chn, filter);
+ }
+
+ return 1;
+ }
+
+Here, the data filtering is enabled if the HTTP header 'X-Filter' is found and
+set to 'true'.
+
+If several filters are declared, the evaluation order remains the same,
+regardless the order of the registrations to the data filtering. Data
+registrations must be performed before the data forwarding step. However, a
+filter may be unregistered from the data filtering at any time.
+
+Depending on the stream type, TCP or HTTP, the way to handle data filtering is
+different. HTTP data are structured while TCP data are raw. And there are more
+callbacks for HTTP streams to fully handle all steps of an HTTP transaction. But
+the main part is the same. The data filtering is performed in one callback,
+called in loop on input data starting at a specific offset for a given
+length. Data analyzed by a filter are considered as forwarded from its point of
+view. Because filters are chained, a filter never analyzes more data than its
+predecessors. Thus only data analyzed by the last filter are effectively
+forwarded. This means, at any time, any filter may choose to not analyze all
+available data (available from its point of view), blocking the data forwarding.
+
+Internally, filters own 2 offsets representing the number of bytes already
+analyzed in the available input data, one per channel. There is also an offset
+couple at the stream level, in the strm_flt object, representing the total
+number of bytes already forwarded. These offsets may be retrieved and updated
+using following macros :
+
+ * FLT_OFF(flt, chn)
+
+ * FLT_STRM_OFF(s, chn)
+
+where 'flt' is the 'struct filter' passed as argument in all callbacks, 's' the
+filtered stream and 'chn' is the considered channel. However, there is no reason
+for a filter to use these macros or take care of these offsets.
+
+
+3.6.1 FILTERING DATA ON TCP STREAMS
+-----------------------------------
+
+The TCP data filtering for TCP streams is the easy case, because HAProxy do not
+parse these data. Data are stored in raw in the buffer. So there is only one
+callback to consider:
+
+ * 'flt_ops.tcp_payload : This callback is called when input data are
+ available. If not defined, all available data will be considered as analyzed
+ and forwarded from the filter point of view.
+
+This callback is called only if the filter is registered to analyze TCP
+data. Here is an example :
+
+ /* Returns a negative value if an error occurs, else the number of
+ * consumed bytes. */
+ static int
+ my_filter_tcp_payload(struct stream *s, struct filter *filter,
+ struct channel *chn, unsigned int offset,
+ unsigned int len)
+ {
+ struct my_filter_config *my_conf = FLT_CONF(filter);
+ int ret = len;
+
+ /* Do not parse more than 'my_conf->max_parse' bytes at a time */
+ if (my_conf->max_parse != 0 && ret > my_conf->max_parse)
+ ret = my_conf->max_parse;
+
+ /* if available data are not completely parsed, wake up the stream to
+ * be sure to not freeze it. The best is probably to set a
+ * chn->analyse_exp timer */
+ if (ret != len)
+ task_wakeup(s->task, TASK_WOKEN_MSG);
+ return ret;
+ }
+
+But it is important to note that tunnelled data of an HTTP stream may also be
+filtered via this callback. Tunnelled data are data exchange after an HTTP tunnel
+is established between the client and the server, via an HTTP CONNECT or via a
+protocol upgrade. In this case, the data are structured. Of course, to do so,
+the filter must be able to parse HTX data and must have the FLT_CFG_FL_HTX flag
+set. At any time, the IS_HTX_STRM() macros may be used on the stream to know if
+it is an HTX stream or a TCP stream.
+
+
+3.6.2 FILTERING DATA ON HTTP STREAMS
+------------------------------------
+
+The HTTP data filtering is a bit more complex because HAProxy data are
+structutred and represented to an internal format, called HTX. So basically
+there is the HTTP counterpart to the previous callback :
+
+ * 'flt_ops.http_payload' : This callback is called when input data are
+ available. If not defined, all available data will be considered as analyzed
+ and forwarded for the filter.
+
+But the prototype for this callbacks is slightly different. Instead of having
+the channel as parameter, we have the HTTP message (struct http_msg). This
+callback is called only if the filter is registered to analyze TCP data. Here is
+an example :
+
+ /* Returns a negative value if an error occurs, else the number of
+ * consumed bytes. */
+ static int
+ my_filter_http_payload(struct stream *s, struct filter *filter,
+ struct http_msg *msg, unsigned int offset,
+ unsigned int len)
+ {
+ struct my_filter_config *my_conf = FLT_CONF(filter);
+ struct htx *htx = htxbuf(&msg->chn->buf);
+ struct htx_ret htxret = htx_find_offset(htx, offset);
+ struct htx_blk *blk;
+
+ blk = htxret.blk;
+ offset = htxret.ret;
+ for (; blk; blk = htx_get_next_blk(blk, htx)) {
+ enum htx_blk_type type = htx_get_blk_type(blk);
+
+ if (type == HTX_BLK_UNUSED)
+ continue;
+ else if (type == HTX_BLK_DATA) {
+ /* filter data */
+ }
+ else
+ break;
+ }
+
+ return len;
+ }
+
+In addition, there are two others callbacks :
+
+ * 'flt_ops.http_headers' : This callback is called just before the HTTP body
+ forwarding and after any processing on the request/response HTTP
+ headers. When defined, this callback is always called for HTTP streams
+ (i.e. without needs of a registration on data filtering).
+ Here is an example :
+
+
+ /* Returns a negative value if an error occurs, 0 if it needs to wait,
+ * any other value otherwise. */
+ static int
+ my_filter_http_headers(struct stream *s, struct filter *filter,
+ struct http_msg *msg)
+ {
+ struct my_filter_config *my_conf = FLT_CONF(filter);
+ struct htx *htx = htxbuf(&msg->chn->buf);
+ struct htx_sl *sl = http_get_stline(htx);
+ int32_t pos;
+
+ for (pos = htx_get_first(htx); pos != -1; pos = htx_get_next(htx, pos)) {
+ struct htx_blk *blk = htx_get_blk(htx, pos);
+ enum htx_blk_type type = htx_get_blk_type(blk);
+ struct ist n, v;
+
+ if (type == HTX_BLK_EOH)
+ break;
+ if (type != HTX_BLK_HDR)
+ continue;
+
+ n = htx_get_blk_name(htx, blk);
+ v = htx_get_blk_value(htx, blk);
+ /* Do something on the header name/value */
+ }
+
+ return 1;
+ }
+
+ * 'flt_ops.http_end' : This callback is called when the whole HTTP message was
+ processed. It may interrupt the stream processing. So, it could be used to
+ synchronize the HTTP request with the HTTP response, for instance :
+
+ /* Returns a negative value if an error occurs, 0 if it needs to wait,
+ * any other value otherwise. */
+ static int
+ my_filter_http_end(struct stream *s, struct filter *filter,
+ struct http_msg *msg)
+ {
+ struct my_filter_ctx *my_ctx = filter->ctx;
+
+
+ if (!(msg->chn->flags & CF_ISRESP)) /* The request */
+ my_ctx->end_of_req = 1;
+ else /* The response */
+ my_ctx->end_of_rsp = 1;
+
+ /* Both the request and the response are finished */
+ if (my_ctx->end_of_req == 1 && my_ctx->end_of_rsp == 1)
+ return 1;
+
+ /* Wait */
+ return 0;
+ }
+
+Then, to finish, there are 2 informational callbacks :
+
+ * 'flt_ops.http_reset' : This callback is called when an HTTP message is
+ reset. This happens either when a 1xx informational response is received, or
+ if we're retrying to send the request to the server after it failed. It
+ could be useful to reset the filter context before receiving the true
+ response.
+ By checking s->txn->status, it is possible to know why this callback is
+ called. If it's a 1xx, we're called because of an informational
+ message. Otherwise, it is a L7 retry.
+
+ * 'flt_ops.http_reply' : This callback is called when, at any time, HAProxy
+ decides to stop the processing on a HTTP message and to send an internal
+ response to the client. This mainly happens when an error or a redirect
+ occurs.
+
+
+3.6.3 REWRITING DATA
+--------------------
+
+The last part, and the trickiest one about the data filtering, is about the data
+rewriting. For now, the filter API does not offer a lot of functions to handle
+it. There are only functions to notify HAProxy that the data size has changed to
+let it update internal state of filters. This is the developer responsibility to
+update data itself, i.e. the buffer offsets, using following function :
+
+ * 'flt_update_offsets()' : This function must be called when a filter alter
+ incoming data. It updates offsets of the stream and of all filters
+ preceding the calling one. Do not call this function when a filter change
+ the size of incoming data leads to an undefined behavior.
+
+A good example of filter changing the data size is the HTTP compression filter.
diff --git a/doc/internals/api/htx-api.txt b/doc/internals/api/htx-api.txt
new file mode 100644
index 0000000..62b3093
--- /dev/null
+++ b/doc/internals/api/htx-api.txt
@@ -0,0 +1,570 @@
+ -----------------------------------------------
+ HTX API
+ Version 1.1
+ ( Last update: 2021-02-24 )
+ -----------------------------------------------
+ Author : Christopher Faulet
+ Contact : cfaulet at haproxy dot com
+
+1. Background
+
+Historically, HAProxy stored HTTP messages in a raw fashion in buffers, keeping
+parsing information separately in a "struct http_msg" owned by the stream. It was
+optimized to the data transfer, but not so much for rewrites. It was also HTTP/1
+centered. While it was the only HTTP version supported, it was not a
+problem. But with the rise of HTTP/2, it starts to be hard to still use this
+representation.
+
+At the first age of the HTTP/2 in HAProxy, H2 messages were converted into
+H1. This was terribly unefficient because it required two parsing passes, a
+first one in H2 and a second one in H1, with a conversion in the middle. And of
+course, the same was also true in the opposite direction. outgoing H1 messages
+had to be converted back in H2 to be sent. Even worse, because the H2->H1
+conversion, only client H2 connections were supported.
+
+So, to address all these problems, we decided to replace the old raw
+representation by a version-agnostic and self-structured internal HTTP
+representation, the HTX. As an additional benefit, with this new representation,
+the message parsing and its processing are now separated, making all the HTTP
+analysis simpler and cleaner. The parsing of HTTP messages is now handled by
+the multiplexers (h1 or h2).
+
+
+2. The HTX message
+
+The HTX is a structure containing useful information about an HTTP message
+followed by a contiguous array with some parts of the message. These parts are
+called blocks. A block is composed of metadata (htx_blk) and an associated
+payload. Blocks' metadata are stored starting from the end of the array while
+their payload are stored at the beginning. Blocks' metadata are often simply
+called blocks. it is a misuse of language that's simplify explanations.
+
+Internally, this structure is "hidden" in a buffer. This way, there are few
+changes into intermediate layers (stream-interface and channels). They still
+manipulate buffers. Only the multiplexer and the stream have to know how data
+are really stored. From the HTX perspective, a buffer is just a memory
+area. When an HTX message is stored in a buffer, this one appears as full.
+
+ * General view of an HTX message :
+
+
+ buffer->area
+ |
+ |<------------ buffer->size == buffer->data ----------------------|
+ | |
+ | |<------------- Blocks array (htx->size) ------------------>|
+ V | |
+ +-----+-----------------+-------------------------+---------------+
+ | HTX | PAYLOADS ==> | | <== HTX_BLKs |
+ +-----+-----------------+-------------------------+---------------+
+ | | | |
+ |<-payloads part->|<----- free space ------>|<-blocks part->|
+ (htx->data)
+
+
+The blocks part remains linear and sorted. It may be see as an array with
+negative indexes. But, instead of using negative indexes, we use positive
+positions to identify a block. This position is then converted to an address
+relatively to the beginning of the blocks array.
+
+ tail head
+ | |
+ V V
+ .....--+----+-----------------------+------+------+
+ | Bn | ... | B1 | B0 |
+ .....--+----+-----------------------+------+------+
+ ^ ^ ^
+ Addr of the block Addr of the block Addr of the block
+ at the position N at the position 1 at the position 0
+
+
+In the HTX structure, 3 "special" positions are stored :
+
+ - tail : Position of the newest inserted block
+ - head : Position of the oldest inserted block
+ - first : Position of the first block to (re)start the analyse
+
+The blocks part never wrap. If we have no space to allocate a new block and if
+there is a hole at the beginning of the blocks part (so at the end of the blocks
+array), we move back all blocks.
+
+
+ tail head tail head
+ | | | |
+ V V V V
+ ...+--------------+---------+ blocks ...----------+--------------+
+ | X== HTX_BLKS | | defrag | <== HTX_BLKS |
+ ...+--------------+---------+ =====> ...----------+--------------+
+
+
+The payloads part is a raw space that may wrap. A block's payload must never be
+accessed directly. Instead a block must be selected to retrieve the address of
+its payload.
+
+
+ +------------------------( B0.addr )--------------------------+
+ | +-------------------( B1.addr )----------------------+ |
+ | | +-----------( B2.addr )----------------+ | |
+ V V V | | |
+ +-----+----+-------+----+--------+-------------+-------+----+----+----+
+ | HTX | P0 | P1 | P2 | ...==> | | <=... | B2 | B1 | B0 |
+ +-----+----+-------+----+--------+-------------+-------+----+----+----+
+
+
+Because the payloads part may wrap, there are 2 usable free spaces :
+
+ - The free space in front of the blocks part. This one is used if and only if
+ the other one was not used yet.
+
+ - The free space at the beginning of the message. Once this one is used, the
+ other one is never used again, until a message defragmentation.
+
+
+ * Linear payloads part :
+
+
+ head_addr end_addr tail_addr
+ | | |
+ V V V
+ +-----+--------------------+-------------+--------------------+-------...
+ | HTX | | PAYLOADS | | HTX_BLKs
+ +-----+--------------------+-------------+--------------------+-------...
+ |<-- free space 2 -->| |<-- free space 1 -->|
+ (used if the other is too small) (used in priority)
+
+
+ * Wrapping payloads part :
+
+
+ head_addr end_addr tail_addr
+ | | |
+ V V V
+ +-----+----+----------------+--------+----------------+-------+-------...
+ | HTX | | PAYLOADS part2 | | PAYLOADS part1 | | HTX_BLKs
+ +-----+----+----------------+--------+----------------+-------+-------...
+ |<-->| |<------>| |<----->|
+ unusable free space unusable
+ free space free space
+
+
+Finally, when the usable free space is not enough to store a new block, unusable
+parts may be get back with a full defragmentation. The payloads part is then
+realigned at the beginning of the blocks array and the free space becomes
+continuous again.
+
+
+3. The HTX blocks
+
+An HTX block can be as well a start-line as a header, a body part or a
+trailer. For all these types of block, a payload is attached to the block. It
+can also be a marker, the end-of-headers or end-of-trailers. For these blocks,
+there is no payload but it counts for a byte. It is important to not skip it
+when data are forwarded.
+
+As already said, a block is composed of metadata and a payload. Metadata are
+stored in the blocks part and are composed of 2 fields :
+
+ - info : It a 32 bits field containing the block's type on 4 bits followed
+ by the payload length. See below for details.
+
+ - addr : The payload's address, if any, relatively to the beginning the
+ array used to store part of the HTTP message itself.
+
+
+ * Block's info representation :
+
+ 0b 0000 0000 0000 0000 0000 0000 0000 0000
+ ---- ------------------------ ---------
+ type value (1 MB max) name length (header/trailer - 256B max)
+ ----------------------------------
+ data length (256 MB max)
+ (body, method, path, version, status, reason)
+
+
+Supported types are :
+
+ - 0000 (0) : The request start-line
+ - 0001 (1) : The response start-line
+ - 0010 (2) : A header block
+ - 0011 (3) : The end-of-headers marker
+ - 0100 (4) : A data block
+ - 0101 (5) : A trailer block
+ - 0110 (6) : The end-of-trailers marker
+ - 1111 (15) : An unused block
+
+Other types are unused for now and reserved for futur extensions.
+
+An HTX message is typically composed of following blocks, in this order :
+
+ - a start-line
+ - zero or more header blocks
+ - an end-of-headers marker
+ - zero or more data blocks
+ - zero or more trailer blocks (optional)
+ - an end-of-trailers marker (optional but always set if there is at least
+ one trailer block)
+
+Only one HTTP request at a time can be stored in an HTX message. For HTTP
+response, it is more complicated. Only one "final" response can be stored in an
+HTX message. It is a response with status-code 101 or greater or equal to
+200. But it may be preceded by several 1xx informational responses. Such
+responses are part of the same HTX message.
+
+When the end of the message is reached a special flag is set on the message
+(HTX_FL_EOM). It means no more data are expected for this message, except
+tunneled data. But tunneled data will never be mixed with message data to avoid
+ambiguities. Thus once the flag marking the end of the message is set, it is
+easy to know the message ends. The end is reached if the HTX message is empty or
+on the tail HTX block in the HTX message. Once all blocks of the HTX message are
+consumed, tunneled data, if any, may be transferred.
+
+
+3.1. The start-line
+
+Every HTX message starts with a start-line. Its payload is a "struct htx_sl". In
+addition to the parts of the HTTP start-line, this structure contains some
+information about the represented HTTP message, mainly in the form of flags
+(HTX_SL_F_*). For instance, if an HTTP message contains the header
+"conten-length", then the flag HTX_SL_F_CLEN is set.
+
+Each HTTP message has its own start-line. So an HTX request has one and only one
+start-line because it must contain only one HTTP request at a time. But an HTX
+response may have more than one start-line if the final HTTP response is
+precedeed by some 1xx informational responses.
+
+In HTTP/2, there is no start-line. So the H2 multiplexer must create one when it
+converts an H2 message to HTX :
+
+ - For the request, it uses the pseudo headers ":method", ":path" or
+ ":authority" depending on the method and the hardcoded version "HTTP/2.0".
+
+ - For the response, it used the hardcoded version "HTTP/2.0", the
+ pseudo-header ":status" and an empty reason.
+
+
+3.2. The headers and trailers
+
+HTX Headers and trailers are quite similar. Different types are used to simplify
+headers processing. But from the HTX point of view, there is no real difference,
+except their position in the HTX message. The header blocks always follow an HTX
+start-line while trailer blocks come after the data. If there is no data, they
+follow the end-of-headers marker.
+
+Headers and trailers are the only blocks containing a Key/Value payload. The
+corresponding end-of marker must always be placed after each group to mark, as
+it name suggests, the end.
+
+In HTTP/1, trailers are only present on chunked messages. But chunked messages
+do not always have trailers. In this case, the end-of-trailers block may or may
+not be present. Multiplexers must be able to handle both situations. In HTTP/2,
+trailers are only present if a HEADERS frame is sent after DATA frames.
+
+
+3.3. The data
+
+The payload body of an HTTP message is stored as DATA blocks in the HTX
+message. For HTTP/1 messages, it is the message body without the chunks
+formatting, if any. For HTTP/2, it is the payload of DATA frames.
+
+The DATA blocks are the only HTX blocks that may be partially processed (copied
+or removed). All other types of block must be entirely processed. This means
+DATA blocks can be resized.
+
+
+3.4. The end-of markers
+
+These blocks are used to delimit parts of an HTX message. It exists two
+markers :
+
+ - end-of-headers (EOH)
+ - end-of-trailers (EOT)
+
+EOH is always present in an HTX message. EOT is optional.
+
+
+4. The HTX API
+
+
+4.1. Get/set HTX message from/to the underlying buffer
+
+The first thing to do to process an HTX message is to get it from the underlying
+buffer. There are 2 functions to do so, the second one relying on the first :
+
+ - htxbuf() returns an HTX message from a buffer. It does not modify the
+ buffer. It only initialize the HTX message if the buffer is empty.
+
+ - htx_from_buf() uses htxbuf(). But it also updates the underlying buffer so
+ that it appears as full.
+
+Both functions return a "zero-sized" HTX message if the buffer is null. This
+way, the HTX message is always valid. The first function is the default function
+to use. The second one is only useful when some content will be added. For
+instance, it used by the HTX analyzers when HAProxy generates a response. Thus,
+the buffer is in a right state.
+
+Once the processing done, if the HTX message has been modified, the underlying
+buffer must be also updated, except htx_from_buf() was used _AND_ data was only
+added. For all other cases, the function htx_to_buf() must be called.
+
+Finally, the function htx_reset() may be called at any time to reset an HTX
+message. And the function buf_room_for_htx_data() may be called to know if a raw
+buffer is full from the HTX perspective. It is used during conversion from/to
+the HTX.
+
+
+4.2. Helpers to deal with free space in an HTX message
+
+Once with an HTX message, following functions may help to process it :
+
+ - htx_used_space() and htx_meta_space() return, respectively, the total
+ space used in an HTX message and the space used by block's metadata only.
+
+ - htx_free_space() and htx_free_data_space() return, respectively, the total
+ free space in an HTX message and the free space available for the payload
+ if a new HTX block is stored (so it is the total free space minus the size
+ of an HTX block).
+
+ - htx_is_empty() and htx_is_not_empty() are boolean functions to know if an
+ HTX message is empty or not.
+
+ - htx_get_max_blksz() returns the maximum size available for the payload,
+ not exceeding a maximum, metadata included.
+
+ - htx_almost_full() should be used to know if an HTX message uses at least
+ 3/4 of its capacity.
+
+
+4.3. HTX Blocks manipulations
+
+Once the available sapce in an HTX message is known, the next step is to add HTX
+blocks. First of all the function htx_nbblks() returns the number of blocks
+allocated in an HTX message. Then, there is an add function per block's type :
+
+ - htx_add_stline() adds a start-line. The type (request or response) and the
+ flags of the start-line must be provided, as well as its three parts
+ (method,uri,version or version,status-code,reason).
+
+ - htx_add_header() and htx_add_trailers() are similar. The name and the
+ value must be provided. The inserted HTX block is returned on success or
+ NULL if an error occurred.
+
+ - htx_add_endof() must be used to add any end-of marker. The block's type
+ (EOH or EOT) must be specified. The inserted HTX block is returned on
+ success or NULL if an error occurred.
+
+ - htx_add_all_headers() and htx_add_all_trailers() add, respectively, a list
+ of headers and a list of trailers, followed by the appropriate end-of
+ marker. On success, this marker is returned. Otherwise, NULL is
+ returned. Note there is no rollback on the HTX message when an error
+ occurred. Some headers or trailers may have been added. So it is the
+ caller responsibility to take care of that.
+
+ - htx_add_data() must be used to add a DATA block. Unlike previous
+ functions, this one returns the number of bytes copied or 0 if nothing was
+ copied. If possible, the data are appended to the tail block if it is a
+ DATA block. Only a part of the payload may be copied because this function
+ will try to limit the message defragmentation and the wrapping of blocks
+ as far as possible.
+
+ - htx_add_data_atonce() must be used if all data must be added or nothing.
+ It tries to insert all the payload, this function returns the inserted
+ block on success. Otherwise it returns NULL.
+
+When an HTX block is added, it is always the last one (the tail). But, if a
+block must be added at a specific place, it is not really handy. 2 functions may
+help (others could be added) :
+
+ - htx_add_last_data() adds a DATA block just after all other DATA blocks and
+ before any trailers and EOT marker. It relies on htx_add_data_atonce(), so
+ a defragmentation may be performed.
+
+ - htx_move_blk_before() moves a specific block just after another one. Both
+ blocks must already be in the HTX message and the block to move must
+ always be placed after the "pivot".
+
+Once added, there are three functions to update the block's payload :
+
+ - htx_replace_stline() updates a start-line. The HTX block must be passed as
+ argument. Only string parts of the start-line are updated by this
+ function. On success, it returns the new start-line. So it is pretty easy
+ to update its flags. NULL is returned if an error occurred.
+
+ - htx_replace_header() fully replaces a header (its name and its value) by a
+ new one. The HTX block must be passed a argument, as well as its new name
+ and its new value. The new header can be smaller or larger than the old
+ one. This function returns the new HTX block on success, or NULL is an
+ error occurred.
+
+ - htx_replace_blk_value() replaces a part of a block's payload or its
+ totality. It works for HEADERS, TRAILERS or DATA blocks. The HTX block
+ must be provided with the part to remove and the new one. The new part can
+ be smaller or larger than the old one. This function returns the new HTX
+ block on success, or NULL is an error occurred.
+
+ - htx_change_blk_value_len() changes the size of the value. It is the caller
+ responsibility to change the value itself, make sure there is enough space
+ and update allocated value. This function updates the HTX message
+ accordingly.
+
+ - htx_set_blk_value_len() changes the size of the value. It is the caller
+ responsibility to change the value itself, make sure there is enough space
+ and update allocated value. Unlike the function
+ htx_change_blk_value_len(), this one does not update the HTX message. So
+ it should be used with caution.
+
+ - htx_cut_data_blk() removes <n> bytes from the beginning of a DATA
+ block. The block's start address and its length are adjusted, and the
+ htx's total data count is updated. This is used to mark that part of some
+ data were transferred from a DATA block without removing this DATA
+ block. No sanity check is performed, the caller is responsible for doing
+ this exclusively on DATA blocks, and never removing more than the block's
+ size.
+
+ - htx_remove_blk() removes a block from an HTX message. It returns the
+ following block or NULL if it is the tail block.
+
+Finally, a block may be removed using the function htx_remove_blk(). This
+function returns the block following the one removed or NULL if it is the tail
+block.
+
+
+4.4. The HTX start-line
+
+Unlike other HTX blocks, the start-line is a bit special because its payload is
+a structure followed by its three parts :
+
+ +--------+-------+-------+-------+
+ | HTX_SL | PART1 | PART2 | PART3 |
+ +--------+-------+-------+-------+
+
+Some macros and functions may help to manipulate these parts :
+
+ - HTX_SL_P{N}_LEN() and HTX_SL_P{N}_PTR() are macros to get the length of a
+ part and a pointer on it. {N} should be 1, 2 or 3.
+
+ - HTX_SL_REQ_MLEN(), HTX_SL_REQ_ULEN(), HTX_SL_REQ_VLEN(),
+ HTX_SL_REQ_MPTR(), HTX_SL_REQ_UPTR() and HTX_SL_REQ_VPTR() are macros to
+ get info about a request start-line. These macros only wrap HTX_SL_P*
+ ones.
+
+ - HTX_SL_RES_VLEN(), HTX_SL_RES_CLEN(), HTX_SL_RES_RLEN(),
+ HTX_SL_RES_VPTR(), HTX_SL_RES_CPTR() and HTX_SL_RES_RPTR() are macros to
+ get info about a response start-line. These macros only wrap HTX_SL_P*
+ ones.
+
+ - htx_sl_p1(), htx_sl_p2() and htx_sl_p2() are functions to get the ist
+ corresponding to the right part of a start-line.
+
+ - htx_sl_req_meth(), htx_sl_req_uri() and htx_sl_req_vsn() get the ist
+ corresponding to the right part of a request start-line.
+
+ - htx_sl_res_vsn(), htx_sl_res_code() and htx_sl_res_reason() get the ist
+ corresponding to the right part of a response start-line.
+
+
+4.5. Iterate on the HTX message
+
+To iterate on an HTX message, the first thing to do is to get the HTX block to
+start the loop. There are three special blocks in an HTX message that may be
+good candidates to start a loop :
+
+ - the head block. It is the oldest inserted block. Multiplexers always start
+ to consume an HTX message from this block. The function htx_get_head()
+ returns its position and htx_get_head_blk() returns the blocks itself. In
+ addition, the function htx_get_head_type() returns its block's type.
+
+ - the tail block. It is the newest inserted block. The function
+ htx_get_tail() returns its position and htx_get_tail_blk() returns the
+ blocks itself. In addition, the function htx_get_tail_type() returns its
+ block's type.
+
+ - the first block. It is the block where to (re)start the analyse. It is
+ used as start point by HTX analyzers. The function htx_get_first() returns
+ its position and htx_get_first_blk() returns the blocks itself. In
+ addition, the function htx_get_first_type() returns its block's type.
+
+For all these functions, if the HTX message is empty, -1 is returned for the
+block's position, NULL instead of a block and HTX_BLK_UNUSED for its type.
+
+Then to iterate on blocks, foreword or backward :
+
+ - htx_get_prev() and htx_get_next() return, respectively, the position of
+ the previous block or the next block, given a specific position. Or -1 if
+ an edge is reached.
+
+ - htx_get_prev_blk() and htx_get_next_blk() return, respectively, the
+ previous block or the next one, given a specific block. Or NULL if an edge
+ is reached.
+
+4.6. Access block content and info
+
+Following functions may be used to retrieve information about a specific HTX
+block :
+
+ - htx_get_blk_pos() returns the position of a block. It must be in the HTX
+ message.
+
+ - htx_get_blk_ptr() returns a pointer on the payload of a block.
+
+ - htx_get_blk_type() returns the type of a block.
+
+ - htx_get_blksz() returns the payload size of a block
+
+ - htx_get_blk_name() returns the name of a block, only if it is a header or
+ a trailer. Otherwise, it returns an empty string.
+
+ - htx_get_blk_value() returns the value of a block, depending on its
+ type. For header and trailer blocks, it is the value field. For markers
+ (EOH or EOT), an empty string is returned. For other blocks an ist
+ pointing on the block payload is returned.
+
+ - htx_is_unique_blk() may be used to know if a block is the only one
+ remaining inside an HTX message, excluding unused blocks. This function is
+ pretty useful to determine the end of a HTX message, in conjunction with
+ HTX_FL_EOM flag.
+
+4.7. Advanced functions
+
+Some more advanced functions may be used to do complex processing on the HTX
+message. These functions are used by HTX analyzers or by multiplexers.
+
+ - htx_truncate() removes all blocks after the one containing a specific
+ offset relatively to the head block of the HTX message. If the offset is
+ inside a DATA block, it is truncated. For all other blocks, the removal
+ starts to the next block.
+
+ - htx_drain() tries to remove a specific amount of bytes of payload. If the
+ tail block is a DATA block, it may be truncated if necessary. All other
+ block are removed at once or kept. This function returns a mixed value,
+ with the first block not removed, or NULL if everything was removed, and
+ the amount of data drained.
+
+ - htx_xfer_blks() transfers HTX blocks from an HTX message to another,
+ stopping on the first block of a specified type or when a specific amount
+ of bytes, including meta-data, was moved. If the tail block is a DATA
+ block, it may be partially moved. All other block are transferred at once
+ or kept. This function returns a mixed value, with the last block moved,
+ or NULL if nothing was moved, and the amount of data transferred. When
+ HEADERS or TRAILERS blocks must be transferred, this function transfers
+ all of them. Otherwise, if it is not possible, it triggers an error. It is
+ the caller responsibility to transfer all headers or trailers at once.
+
+ - htx_append_msg() append an HTX message to another one. All the message is
+ copied or nothing. So, if an error occurred, a rollback is performed. This
+ function returns 1 on success and 0 on error.
+
+ - htx_reserve_max_data() Reserves the maximum possible size for an HTX data
+ block, by extending an existing one or by creating a new one. It returns a
+ compound result with the HTX block and the position where new data must be
+ inserted (0 for a new block). If an error occurs or if there is no space
+ left, NULL is returned instead of a pointer on an HTX block.
+
+ - htx_find_offset() looks for the HTX block containing a specific offset,
+ starting at the HTX message's head. The function returns the found HTX
+ block and the position inside this block where the offset is. If the
+ offset is outside of the HTX message, NULL is returned.
+
+ - htx_defrag() defragments an HTX message. It removes unused blocks and
+ unwraps the payloads part. A temporary buffer is used to do so. This
+ function never fails. A referenced block may be provided. If so, the
+ corresponding new block is returned. Otherwise, NULL is returned.
diff --git a/doc/internals/api/initcalls.txt b/doc/internals/api/initcalls.txt
new file mode 100644
index 0000000..a341edc
--- /dev/null
+++ b/doc/internals/api/initcalls.txt
@@ -0,0 +1,366 @@
+Initialization stages aka how to get your code initialized at the right moment
+
+
+1. Background
+
+Originally all subsystems were initialized via a dedicated function call
+from the huge main() function. Then some code started to become conditional
+or a bit more modular and the #ifdef placed there became a mess, resulting
+in init code being moved to function constructors in each subsystem's own
+file. Then pools of various things were introduced, starting to make the
+whole init sequence more complicated due to some forms of internal
+dependencies. Later epoll was introduced, requiring a post-fork callback,
+and finally threads arrived also requiring some post-thread init/deinit
+and allocation, marking the old architecture's last breath. Finally the
+whole thing resulted in lots of init code duplication and was simplified
+in 1.9 with the introduction of initcalls and initialization stages.
+
+
+2. New architecture
+
+The new architecture relies on two layers :
+ - the registration functions
+ - the INITCALL macros and initialization stages
+
+The first ones are mostly used to add a callback to a list. The second ones
+are used to specify when to call a function. Both are totally independent,
+however they are generally combined via another set consisting in the REGISTER
+macros which make some registration functions be called at some specific points
+during the init sequence.
+
+
+3. Registration functions
+
+Registration functions never fail. Or more precisely, if they fail it will only
+be on out-of-memory condition, and they will cause the process to immediately
+exit. As such they do not return any status and the caller doesn't have to care
+about their success.
+
+All available functions are described below in alphanumeric ordering. Please
+make sure to respect this ordering when adding new ones.
+
+- void hap_register_build_opts(const char *str, int must_free)
+
+ This appends the zero-terminated constant string <str> to the list of known
+ build options that will be reported on the output of "haproxy -vv". A line
+ feed character ('\n') will automatically be appended after the string when it
+ is displayed. The <must_free> argument must be zero, unless the string was
+ allocated by any malloc-compatible function such as malloc()/calloc()/
+ realloc()/strdup() or memprintf(), in which case it's better to pass a
+ non-null value so that the string is freed upon exit. Note that despite the
+ function's prototype taking a "const char *", the pointer will actually be
+ cast and freed. The const char* is here to leave more freedom to use consts
+ when making such options lists.
+
+- void hap_register_per_thread_alloc(int (*fct)())
+
+ This adds a call to function <fct> to the list of functions to be called when
+ threads are started, at the beginning of the polling loop. This is also valid
+ for the main thread and will be called even if threads are disabled, so that
+ it is guaranteed that this function will be called in any circumstance. Each
+ thread will first call all these functions exactly once when it starts. Calls
+ are serialized by the init_mutex, so that locking is not necessary in these
+ functions. There is no relation between the thread numbers and the callback
+ ordering. The function is expected to return non-zero on success, or zero on
+ failure. A failure will make the process emit a succinct error message and
+ immediately exit. See also hap_register_per_thread_free() for functions
+ called after these ones.
+
+- void hap_register_per_thread_deinit(void (*fct)());
+
+ This adds a call to function <fct> to the list of functions to be called when
+ threads are gracefully stopped, at the end of the polling loop. This is also
+ valid for the main thread and will be called even if threads are disabled, so
+ that it is guaranteed that this function will be called in any circumstance
+ if the process experiences a soft stop. Each thread will call this function
+ exactly once when it stops. However contrary to _alloc() and _init(), the
+ calls are made without any protection, thus if any shared resource if touched
+ by the function, the function is responsible for protecting it. The reason
+ behind this is that such resources are very likely to be still in use in one
+ other thread and that most of the time the functions will in fact only touch
+ a refcount or deinitialize their private resources. See also
+ hap_register_per_thread_free() for functions called after these ones.
+
+- void hap_register_per_thread_free(void (*fct)());
+
+ This adds a call to function <fct> to the list of functions to be called when
+ threads are gracefully stopped, at the end of the polling loop, after all calls
+ to _deinit() callbacks are done for this thread. This is also valid for the
+ main thread and will be called even if threads are disabled, so that it is
+ guaranteed that this function will be called in any circumstance if the
+ process experiences a soft stop. Each thread will call this function exactly
+ once when it stops. However contrary to _alloc() and _init(), the calls are
+ made without any protection, thus if any shared resource if touched by the
+ function, the function is responsible for protecting it. The reason behind
+ this is that such resources are very likely to be still in use in one other
+ thread and that most of the time the functions will in fact only touch a
+ refcount or deinitialize their private resources. See also
+ hap_register_per_thread_deinit() for functions called before these ones.
+
+- void hap_register_per_thread_init(int (*fct)())
+
+ This adds a call to function <fct> to the list of functions to be called when
+ threads are started, at the beginning of the polling loop, right after the
+ list of _alloc() functions. This is also valid for the main thread and will
+ be called even if threads are disabled, so that it is guaranteed that this
+ function will be called in any circumstance. Each thread will call this
+ function exactly once when it starts, and calls are serialized by the
+ init_mutex which is held over all _alloc() and _init() calls, so that locking
+ is not necessary in these functions. In other words for all threads but the
+ current one, the sequence of _alloc() and _init() calls will be atomic. There
+ is no relation between the thread numbers and the callback ordering. The
+ function is expected to return non-zero on success, or zero on failure. A
+ failure will make the process emit a succinct error message and immediately
+ exit. See also hap_register_per_thread_alloc() for functions called before
+ these ones.
+
+- void hap_register_pre_check(int (*fct)())
+
+ This adds a call to function <fct> to the list of functions to be called at
+ the step just before the configuration validity checks. This is useful when you
+ need to create things like it would have been done during the configuration
+ parsing and where the initialization should continue in the configuration
+ check.
+ It could be used for example to generate a proxy with multiple servers using
+ the configuration parser itself. At this step the final trash buffers are
+ allocated. Threads are not yet started so no protection is required. The
+ function is expected to return non-zero on success, or zero on failure. A
+ failure will make the process emit a succinct error message and immediately
+ exit.
+
+- void hap_register_post_check(int (*fct)())
+
+ This adds a call to function <fct> to the list of functions to be called at
+ the end of the configuration validity checks, just at the point where the
+ program either forks or exits depending whether it's called with "-c" or not.
+ Such calls are suited for memory allocation or internal table pre-computation
+ that would preferably not be done on the fly to avoid inducing extra time to
+ a pure configuration check. Threads are not yet started so no protection is
+ required. The function is expected to return non-zero on success, or zero on
+ failure. A failure will make the process emit a succinct error message and
+ immediately exit.
+
+- void hap_register_post_deinit(void (*fct)())
+
+ This adds a call to function <fct> to the list of functions to be called when
+ freeing the global sections at the end of deinit(), after everything is
+ stopped. The process is single-threaded at this point, thus these functions
+ are suitable for releasing configuration elements provided that no other
+ _deinit() function uses them, i.e. only close/release what is strictly
+ private to the subsystem. Since such functions are mostly only called during
+ soft stops (reloads) or failed startups, they tend to experience much less
+ test coverage than others despite being more exposed, and as such a lot of
+ care must be taken to test them especially when facing partial subsystem
+ initializations followed by errors.
+
+- void hap_register_post_proxy_check(int (*fct)(struct proxy *))
+
+ This adds a call to function <fct> to the list of functions to be called for
+ each proxy, after the calls to _post_server_check(). This can allow, for
+ example, to pre-configure default values for an option in a frontend based on
+ the "bind" lines or something in a backend based on the "server" lines. It's
+ worth being aware that such a function must be careful not to waste too much
+ time in order not to significantly slow down configurations with tens of
+ thousands of backends. The function is expected to return non-zero on
+ success, or zero on failure. A failure will make the process emit a succinct
+ error message and immediately exit.
+
+- void hap_register_post_server_check(int (*fct)(struct server *))
+
+ This adds a call to function <fct> to the list of functions to be called for
+ each server, after the call to check_config_validity(). This can allow, for
+ example, to preset a health state on a server or to allocate a protocol-
+ specific memory area. It's worth being aware that such a function must be
+ careful not to waste too much time in order not to significantly slow down
+ configurations with tens of thousands of servers. The function is expected
+ to return non-zero on success, or zero on failure. A failure will make the
+ process emit a succinct error message and immediately exit.
+
+- void hap_register_proxy_deinit(void (*fct)(struct proxy *))
+
+ This adds a call to function <fct> to the list of functions to be called when
+ freeing the resources during deinit(). These functions will be called as part
+ of the proxy's resource cleanup. Note that some of the proxy's fields will
+ already have been freed and others not, so such a function must not use any
+ information from the proxy that is subject to being released. In particular,
+ all servers have already been deleted. Since such functions are mostly only
+ called during soft stops (reloads) or failed startups, they tend to
+ experience much less test coverage than others despite being more exposed,
+ and as such a lot of care must be taken to test them especially when facing
+ partial subsystem initializations followed by errors. It's worth mentioning
+ that too slow functions could have a significant impact on the configuration
+ check or exit time especially on large configurations.
+
+- void hap_register_server_deinit(void (*fct)(struct server *))
+
+ This adds a call to function <fct> to the list of functions to be called when
+ freeing the resources during deinit(). These functions will be called as part
+ of the server's resource cleanup. Note that some of the server's fields will
+ already have been freed and others not, so such a function must not use any
+ information from the server that is subject to being released. Since such
+ functions are mostly only called during soft stops (reloads) or failed
+ startups, they tend to experience much less test coverage than others despite
+ being more exposed, and as such a lot of care must be taken to test them
+ especially when facing partial subsystem initializations followed by errors.
+ It's worth mentioning that too slow functions could have a significant impact
+ on the configuration check or exit time especially on large configurations.
+
+
+4. Initialization stages
+
+In order to offer some guarantees, the startup of the program is split into
+several stages. Some callbacks can be placed into each of these stages using
+an INITCALL macro, with 0 to 3 arguments, respectively called INITCALL0 to
+INITCALL3. These macros must be placed anywhere at the top level of a C file,
+preferably at the end so that the referenced symbols have already been met,
+but it may also be fine to place them right after the callbacks themselves.
+
+Such callbacks are referenced into small structures containing a pointer to the
+function and 3 arguments. NULL replaces unused arguments. The callbacks are
+cast to (void (*)(void *, void *, void *)) and the arguments to (void *).
+
+The first argument to the INITCALL macro is the initialization stage. The
+second one is the callback function, and others if any are the arguments.
+The init stage must be among the values of the "init_stage" enum, currently,
+and in this execution order:
+
+ - STG_PREPARE : used to preset variables, pre-initialize lookup tables and
+ pre-initialize list heads
+ - STG_LOCK : used to pre-initialize locks
+ - STG_REGISTER : used to register static lists such as keywords
+ - STG_ALLOC : used to allocate the required structures
+ - STG_POOL : used to create pools
+ - STG_INIT : used to initialize subsystems
+
+Each stage is guaranteed that previous stages have successfully completed. This
+means that an INITCALL placed at stage STG_INIT is guaranteed that all pools
+were already created and will be usable. Conversely, an INITCALL placed at
+stage STG_REGISTER must not rely on any field that requires preliminary
+allocation nor initialization. A callback cannot rely on other callbacks of the
+same stage, as the execution order within a stage is undefined and essentially
+depends on the linking order.
+
+The STG_REGISTER level is made for run-time linking of the various modules that
+compose the executable. Keywords, protocols and various other elements that are
+local known to each compilation unit can will be appended into common lists at
+boot time. This is why this call is placed just before STG_ALLOC.
+
+Note that trash is needed in various functions. Trash is a pool and is
+allocated during STG_POOL, so it's not permitted to use it before STG_INIT,
+where it will only use the default size, and may be reallocated later with a
+different size.
+
+Example: register a very early call to init_log() with no argument, and another
+ call to cli_register_kw(&cli_kws) much later:
+
+ INITCALL0(STG_PREPARE, init_log);
+ INITCALL1(STG_REGISTER, cli_register_kw, &cli_kws);
+
+Technically speaking, each call to such a macro adds a distinct local symbol
+whose dynamic name involves the line number. These symbols are placed into a
+separate section and the beginning and end section pointers are provided by the
+linker. When too old a linker is used, a fallback is applied consisting in
+placing them into a linked list which is built by a constructor function for
+each initcall (this takes more room).
+
+Due to the symbols internally using the line number, it is very important not
+to place more than one INITCALL per line in the source file.
+
+It is also strongly recommended that functions and referenced arguments are
+static symbols local to the source file, unless they are global registration
+functions like in the example above with cli_register_kw(), where only the
+argument is a local keywords table.
+
+INITCALLs do not expect the callback function to return anything and as such
+do not perform any error check. As such, they are very similar to constructors
+offered by the compiler except that they are segmented in stages. It is thus
+the responsibility of the called functions to perform their own error checking
+and to exit in case of error. This may change in the future.
+
+
+5. REGISTER family of macros
+
+The association of INITCALLs and registration functions allows to perform some
+early dynamic registration of functions to be used anywhere, as well as values
+to be added to existing lists without having to manipulate list elements. For
+the sake of simplification, these combinations are available as a set of
+REGISTER macros which register calls to certain functions at the appropriate
+init stage. Such macros must be used at the top level in a file, just like
+INITCALL macros. The following macros are currently supported. Please keep them
+alphanumerically ordered:
+
+- REGISTER_BUILD_OPTS(str)
+
+ Adds the constant string <str> to the list of build options. This is done by
+ registering a call to hap_register_build_opts(str, 0) at stage STG_REGISTER.
+ The string will not be freed.
+
+- REGISTER_CONFIG_POSTPARSER(name, parser)
+
+ Adds a call to function <parser> at the end of the config parsing. The
+ function is called at the very end of check_config_validity() and may be used
+ to initialize a subsystem based on global settings for example. This is done
+ by registering a call to cfg_register_postparser(name, parser) at stage
+ STG_REGISTER.
+
+- REGISTER_CONFIG_SECTION(name, parse, post)
+
+ Registers a new config section name <name> which will be parsed by function
+ <parse> (if not null), and with an optional call to function <post> at the
+ end of the section. Function <parse> must be of type (int (*parse)(const char
+ *file, int linenum, char **args, int inv)), and returns 0 on success or an
+ error code among the ERR_* set on failure. The <post> callback takes no
+ argument and returns a similar error code. This is achieved by registering a
+ call to cfg_register_section() with the three arguments at stage
+ STG_REGISTER.
+
+- REGISTER_PER_THREAD_ALLOC(fct)
+
+ Registers a call to register_per_thread_alloc(fct) at stage STG_REGISTER.
+
+- REGISTER_PER_THREAD_DEINIT(fct)
+
+ Registers a call to register_per_thread_deinit(fct) at stage STG_REGISTER.
+
+- REGISTER_PER_THREAD_FREE(fct)
+
+ Registers a call to register_per_thread_free(fct) at stage STG_REGISTER.
+
+- REGISTER_PER_THREAD_INIT(fct)
+
+ Registers a call to register_per_thread_init(fct) at stage STG_REGISTER.
+
+- REGISTER_POOL(ptr, name, size)
+
+ Used internally to declare a new pool. This is made by calling function
+ create_pool_callback() with these arguments at stage STG_POOL. Do not use it
+ directly, use either DECLARE_POOL() or DECLARE_STATIC_POOL() instead.
+
+- REGISTER_PRE_CHECK(fct)
+
+ Registers a call to register_pre_check(fct) at stage STG_REGISTER.
+
+- REGISTER_POST_CHECK(fct)
+
+ Registers a call to register_post_check(fct) at stage STG_REGISTER.
+
+- REGISTER_POST_DEINIT(fct)
+
+ Registers a call to register_post_deinit(fct) at stage STG_REGISTER.
+
+- REGISTER_POST_PROXY_CHECK(fct)
+
+ Registers a call to register_post_proxy_check(fct) at stage STG_REGISTER.
+
+- REGISTER_POST_SERVER_CHECK(fct)
+
+ Registers a call to register_post_server_check(fct) at stage STG_REGISTER.
+
+- REGISTER_PROXY_DEINIT(fct)
+
+ Registers a call to register_proxy_deinit(fct) at stage STG_REGISTER.
+
+- REGISTER_SERVER_DEINIT(fct)
+
+ Registers a call to register_server_deinit(fct) at stage STG_REGISTER.
+
diff --git a/doc/internals/api/ist.txt b/doc/internals/api/ist.txt
new file mode 100644
index 0000000..0f118d6
--- /dev/null
+++ b/doc/internals/api/ist.txt
@@ -0,0 +1,167 @@
+2021-11-08 - Indirect Strings (IST) API
+
+
+1. Background
+-------------
+
+When parsing traffic, most of the standard C string functions are unusable
+since they rely on a trailing zero. In addition, for the rare ones that support
+a length, we have to constantly maintain both the pointer and the length. But
+then, it's easy to come up with complex lengths and offsets calculations all
+over the place, rendering the code hard to read and bugs hard to avoid or spot.
+
+IST provides a solution to this by defining a structure made of exactly two
+word size elements, that most C ABIs know how to handle as a register when
+used as a function argument or a function's return value. The functions are
+inlined to leave a maximum set of opportunities to the compiler or optimization
+and expression reduction, and as a result they are often inexpensive to use. It
+is important however to keep in mind that all of these are designed for minimal
+code size when dealing with short strings (i.e. parsing tokens in protocols),
+and they are not optimal for processing large blocks.
+
+
+2. API description
+------------------
+
+IST are defined like this:
+
+ struct ist {
+ char *ptr; // pointer to the string's first byte
+ size_t len; // number of valid bytes starting from ptr
+ };
+
+A string is not set if its ->ptr member is NULL. In this case .len is undefined
+and is recommended to be zero.
+
+Declaring a function returning an IST:
+
+ struct ist produce_ist(int ok)
+ {
+ return ok ? IST("OK") : IST("KO");
+ }
+
+Declaring a function consuming an IST:
+
+ void say_ist(struct ist i)
+ {
+ write(1, istptr(i), istlen(i));
+ }
+
+Chaining the two:
+
+ void say_ok(int ok)
+ {
+ say_ist(produce_ist(ok));
+ }
+
+Notes:
+ - the arguments are passed as value, not reference, so there's no need for
+ any "const" in their declaration (except to catch coding mistakes).
+ Pointers to ist may benefit from being marked "const" however.
+
+ - similarly for the return value, there's no point is marking it "const" as
+ this would protect the pointer and length, not the data.
+
+ - use ist0() to append a trailing zero to a variable string for use with
+ printf()'s "%s" format, or for use with functions that work on NUL-
+ terminated strings, but beware of not doing this with constants.
+
+ - the API provides a starting pointer and current length, but does not
+ provide an allocated size. It remains up to the caller to know how large
+ the allocated area is when adding data, though most functions make this
+ easy.
+
+The following macros and functions are defined. Those whose name starts with
+underscores require special care and must not be used without being certain
+they are properly used (typically subject to buffer overflows if misused). Note
+that most functions were added over time depending on instant needs, and some
+are very close to each other. Many useful functions are still missing and would
+deserve being added.
+
+Below, arguments "i1","i2" are all of type "ist". Arguments "s" are
+NUL-terminated strings of type "char*", and "cs" are of type "const char *".
+Arguments "c" are of type "char", and "n" are of type size_t.
+
+ IST(cs):ist make constant IST from a NUL-terminated const string
+ IST_NULL:ist return an unset IST = ist2(NULL,0)
+ __istappend(i1,c):ist append character <c> at the end of ist <i1>
+ ist(s):ist return an IST from a nul-terminated string
+ ist0(i1):char* write a \0 at the end of an IST, return the string
+ ist2(cs,l):ist return a variable IST from a const string and length
+ ist2bin(s,i1):ist copy IST into a buffer, return the result
+ ist2bin_lc(s,i1):ist like ist2bin() but turning turning to lower case
+ ist2bin_uc(s,i1):ist like ist2bin() but turning turning to upper case
+ ist2str(s,i1):ist copy IST into a buffer, add NUL and return the result
+ ist2str_lc(s,i1):ist like ist2str() but turning turning to lower case
+ ist2str_uc(s,i1):ist like ist2str() but turning turning to upper case
+ ist_find(i1,c):ist return first occurrence of char <c> in <i1>
+ ist_find_ctl(i1):char* return pointer to first CTL char in <i1> or NULL
+ ist_skip(i1,c):ist return first occurrence of char not <c> in <i1>
+ istadv(i1,n):ist advance the string by <n> characters
+ istalloc(n):ist return allocated string of zero initial length
+ istcat(d,s,n):ssize_t copy <s> after <d> for <n> chars max, return len or -1
+ istchr(i1,c):char* return pointer to first occurrence of <c> in <i1>
+ istclear(i1*):size_t return previous size and set size to zero
+ istcpy(d,s,n):ssize_t copy <s> over <d> for <n> chars max, return len or -1
+ istdiff(i1,i2):int return the ordinal difference, like strcmp()
+ istdup(i1):ist allocate new ist and copy original one into it
+ istend(i1):char* return pointer to first character after the IST
+ isteq(i1,i2):int return non-zero if strings are equal
+ isteqi(i1,i2):int like isteq() but case-insensitive
+ istfree(i1*) free of allocated <i1>/IST_NULL and set it to IST_NULL
+ istissame(i1,i2):int return true if pointers and lengths are equal
+ istist(i1,i2):ist return first occurrence of <i2> in <i1>
+ istlen(i1):size_t return the length of the IST (number of characters)
+ istmatch(i1,i2):int return non-zero if i1 starts like i2 (empty OK)
+ istmatchi(i1,i2):int like istmatch() but case insensitive
+ istneq(i1,i2,n):int like isteq() but limited to the first <n> chars
+ istnext(i1):ist return the IST advanced by one character
+ istnmatch(i1,i2,n):int like istmatch() but limited to the first <n> chars
+ istpad(s,i1):ist copy IST into a buffer, add a NUL, return the result
+ istptr(i1):char* return the starting pointer of the IST
+ istscat(d,s,n):ssize_t same as istcat() but always place a NUL at the end
+ istscpy(d,s,n):ssize_t same as istcpy() but always place a NUL at the end
+ istshift(i1*):char return the first character and advance the IST by one
+ istsplit(i1*,c):ist return part before <c>, make ist start from <c>
+ iststop(i1,c):ist truncate ist before first occurrence of <c>
+ isttest(i1):int return true if ist is not NULL, false otherwise
+ isttrim(i1,n):ist return ist trimmed to no more than <n> characters
+ istzero(i1,n):ist trim to <n> chars, trailing zero included.
+
+
+3. Quick index by typical C construct or function
+-------------------------------------------------
+
+Some common C constructs may be adjusted to use ist instead. The mapping is not
+always one-to-one, but usually the computations on the length part tends to
+disappear in the refactoring, allowing to directly chain function calls. The
+entries below are hints to figure what function to look for in order to rewrite
+some common use cases.
+
+ char* IST equivalent
+
+ strchr() istchr(), ist_find(), iststop()
+ strstr() istist()
+ strcpy() istcpy()
+ strscpy() istscpy()
+ strlcpy() istscpy()
+ strcat() istcat()
+ strscat() istscat()
+ strlcat() istscat()
+ strcmp() istdiff()
+ strdup() istdup()
+ !strcmp() isteq()
+ !strncmp() istneq(), istmatch(), istnmatch()
+ !strcasecmp() isteqi()
+ !strncasecmp() istneqi(), istmatchi()
+ strtok() istsplit()
+ return NULL return IST_NULL
+ s = malloc() s = istalloc()
+ free(s); s = NULL istfree(&s)
+ p != NULL isttest(p)
+ c = *(p++) c = istshift(p)
+ *(p++) = c __istappend(p, c)
+ p += n istadv(p, n)
+ p + strlen(p) istend(p)
+ p[max] = 0 isttrim(p, max)
+ p[max+1] = 0 istzero(p, max)
diff --git a/doc/internals/api/layers.txt b/doc/internals/api/layers.txt
new file mode 100644
index 0000000..b5c35f4
--- /dev/null
+++ b/doc/internals/api/layers.txt
@@ -0,0 +1,190 @@
+2022-05-27 - Stream layers in HAProxy 2.6
+
+
+1. Background
+
+There are streams at plenty of levels in haproxy, essentially due to the
+introduction of multiplexed protocols which provide high-level streams on top
+of low-level streams, themselves either based on stream-oriented protocols or
+datagram-oriented protocols.
+
+The refactoring of the appctx and muxes that allowed to drop a lot of duplicate
+code between 2.5 and 2.6-dev6 raised another concern with some entities like
+"conn_stream" that were not specific to connections anymore, "endpoints" that
+became entities on their own, and "targets" whose life had been extended to
+last all along a connection.
+
+It was time to rename all such legacy entities introduced in 1.8 and which had
+turned particularly confusing over time as their roles evolved.
+
+
+2. Naming principles
+
+The global renaming of some entities between streams and connections was
+articulated around several principles:
+
+ - avoid the confusing use of "context" in shared places. For example, the
+ endpoint's connection is in "ctx" and nothing makes it obvious that the
+ endpoint's context is a connection, especially when an applet is there.
+
+ - reserve relative nouns for pointers and not for types. "endpoint", just
+ like "owner" or "peer" is relative, but when accessed from a different
+ layer it starts to make no sense at all, or to make one believe it's
+ something else, particularly with void*.
+
+ - avoid too generic terms that have multiple meanings, or words that are
+ synonyms in a same place (e.g. "peer" and "remote", or "endpoint" and
+ "target"). If two synonyms are needed to designate two distinct entities,
+ there's probably a problem elsewhere, or the problem is poorly defined.
+
+ - make it clearer that all that is manipulated is related to streams. This
+ particularly important in sample fetch functions for example, which tend
+ to require low-level access and could be mislead in trying to follow the
+ wrong chain when trying to get information about a connection.
+
+ - use easily spellable short names that abbreviate unambiguously when used
+ together in adjacent contexts
+
+
+3. Current state as of 2.6
+
+- when a name is required to designate the lower block that starts at the mux
+ stream or the appctx, it is spoken of as a "stream endpoint", and abbreviated
+ "se". It's okay because while "endpoint" itself is relative, "stream
+ endpoint" unequivocally designates one extremity of a stream. If a type is
+ needed for this in the future (e.g. via obj_type), then the type "stendp"
+ may be used. Before 2.6-dev6 there was no name for this, it was known as
+ conn_stream->ctx.
+
+- the 2.6-dev6 cs_endpoint which preserves the state of a mux stream or an
+ appctx and abstracts them in front of a conn_stream becomes a "stream
+ endpoint descriptor", of type "sedesc" and often abbreviated "sd", "sed"
+ or "ed". Its "target" pointer became "se" as per the rule above. Before
+ 2.6-dev6, these elements were mixed with others inside conn_stream. From
+ the appctx it's called "sedesc" (few occurrences hence long name OK).
+
+- the conn_stream which is always attached to either a stream or a health check
+ and that is used to reach a mux or an applet becomes a "stream connector" of
+ type "stconn", generally abbreviated "sc". Its "endp" pointer becomes
+ "sedesc" as per the rule above, and that one has a back pointer "sc". The
+ stream uses "scf" and "scb" as the respective front and back pointers to the
+ stconns. Prior to 2.6-dev6, these parts were split between conn_stream and
+ stream_interface.
+
+- the sedesc's "ctx" which is solely used to store the connection as of now, is
+ renamed "conn" to void any doubt in the context of applets or even muxes. In
+ the future the connection should be attached to the "se" instead and this
+ pointer should disappear (or be recycled for anything else).
+
+The new 2.6 model looks like this:
+
+ +------------------------+
+ | stream or health check |
+ +------------------------+
+ ^ \ scf, scb
+ / \
+ | |
+ \ /
+ app \ v
+ +----------+
+ | stconn |
+ +----------+
+ ^ \ sedesc
+ / \
+ . . . . | . . . | . . . . . split point (retries etc)
+ \ /
+ sc \ v
+ +----------+
+ flags <--| sedesc | : sedesc :
+ +----------+ ... +----------+
+ conn / ^ \ se ^ \
+ +------------+ / / \ | \
+ | connection |<--' | | ... OR ... | |
+ +------------+ \ / \ |
+ mux| ^ |ctx sd \ v : sedesc \ v
+ | | | +----------------------+ \ # +----------+ svcctx
+ | | | | mux stream or appctx | | # | appctx |--.
+ | | | +----------------------+ | # +----------+ |
+ | | | ^ | / private # : : |
+ v | | | v > to the # +----------+ |
+ mux_ops | | +----------------+ \ mux # | svcctx |<-'
+ | +---->| mux connection | ) # +----------+
+ +------ +----------------+ / #
+
+Stream descriptors may exist in the following modes:
+ - .conn = NULL, .se = NULL : backend, not connection attempt yet
+ - .conn = NULL, .se = <appctx> : frontend or backend, applet
+ - .conn = <conn>, .se = NULL : backend, connection in progress
+ - .conn = <conn>, .se = <muxs> : frontend or backend, connected
+
+Notes:
+ - for historical reasons (connect, forced protocol upgrades, etc), during a
+ connection setup or a rule-based protocol upgrade, the connection's "ctx"
+ may temporarily point to the stconn
+
+
+4. Invariants and cardinalities
+
+Usually a stream is created from an existing stconn from a mux or some applets,
+but may also be allocated first by other applets schedulers. After stream_new()
+a stream always has exactly one stconn per side (scf, scb), each of which has
+one ->sedesc. Each side is initialized with either one or no stream endpoint
+attached to the descriptor.
+
+Both applets and a mux stream always have a stream endpoint descriptor. AS SUCH
+IT IS NEVER NECESSARY TO TEST FOR THE EXISTENCE OF THE SEDESC FROM ANY SIDE, IT
+ALWAYS EXISTS. This explains why as much as possible it's preferable to use the
+sedesc to access flags and statuses from any side, rather than bouncing via the
+stconn.
+
+An applet's app layer is always a stream, which means that there are always
+channels accessible above, and there is always an opposite stream connector and
+a stream endpoint descriptor. As such, it's always safe for an applet to access
+the other side using sc_opposite().
+
+When an outgoing connection is in the process of being established, the backend
+side sedesc has its ->conn pointer pointing to the pending connection, and no
+->se. Once the connection is established and a mux is chosen, it's attached to
+the ->se. If an applet is used instead of a mux, the appctx is attached to the
+sedesc's ->se and ->conn remains NULL.
+
+If either side wants to detach from the other, it must allocate a new virgin
+sedesc to replace the existing one, and leave the existing one to the endpoint,
+since it continues to describe the stream endpoint. The stconn keeps its state
+(modulo the updates related to the disconnection). The previous sedesc points
+to a NULL stconn. For example, disconnecting from a backend mux will leave the
+entities like this:
+
+ +------------------------+
+ | stream or health check |
+ +------------------------+
+ ^ \ scf, scb
+ / \
+ | |
+ \ /
+ app \ v
+ +----------+
+ | stconn |
+ +----------+
+ ^ \ sedesc
+ / \
+ NULL | |
+ ^ \ /
+ sc | / sc \ v
+ +----------+ / +----------+
+ flags <--| sedesc1 | . . . . . | sedesc2 |--> flags
+ +----------+ / +----------+
+ conn / ^ \ se / conn / \ se
+ +------------+ / / \ | |
+ | connection |<--' | | v v
+ +------------+ \ / NULL NULL
+ mux| ^ |ctx sd \ v
+ | | | +----------------------+
+ | | | | mux stream or appctx |
+ | | | +----------------------+
+ | | | ^ |
+ v | | | v
+ mux_ops | | +----------------+
+ | +---->| mux connection |
+ +------ +----------------+
+
diff --git a/doc/internals/api/list.txt b/doc/internals/api/list.txt
new file mode 100644
index 0000000..d03cf03
--- /dev/null
+++ b/doc/internals/api/list.txt
@@ -0,0 +1,195 @@
+2021-11-09 - List API
+
+
+1. Background
+-------------
+
+HAProxy's lists are almost all doubly-linked and circular so that it is always
+possible to insert at the beginning, append at the end, scan them in any order
+and delete any element without having to scan to search the predecessor nor the
+successor.
+
+A list's head is just a regular list element, and an element always points to
+another list element. Such elements only have two pointers, the next and the
+previous elements. The object being pointed to is retrieved by subtracting the
+list element's offset in its structure from the list element's pointer. This
+way there is no need for any separate allocation for the list element, for a
+pointer to the object in the list, nor for a pointer to the list element from
+the object, as the list is embedded into the object.
+
+All basic operations are provided, as well as some iterators. Some iterators
+are safe for removal of the current element within the loop, others not. In any
+case a list cannot be freely modified while iterating over it (e.g. the current
+element's successor cannot not be freed if it's saved as the restart point).
+
+Extreme care is taken nowadays in HAProxy to make sure that no dangling
+pointers are left in elements, so it is important to always initialize list
+heads and list elements, as well as elements that are removed from a list if
+they are not immediately freed, so that their deletion is idempotent. A rule of
+thumb is that a list pointer's validity never has to be checked, it is always
+valid to dereference it. A lot of complex bugs have been caused in the past by
+incorrect list manipulation, such as an element being deleted twice, resulting
+in damaging previously adjacent elements' neighbours. This usually has serious
+consequences at locations that are totally different from the one of the bug,
+and that are only detected much later, so it is required to be particularly
+strict on using lists safely.
+
+The lists are not thread-safe, but mt_lists may be used instead.
+
+
+2. API description
+------------------
+
+A list is defined like this, both for the list's head, and for any other
+element:
+
+ struct list {
+ struct list *n; /* next */
+ struct list *p; /* prev */
+ };
+
+An empty list points to itself for both pointers. I.e. a list's head is both
+its own successor and its own predecessor. This guarantees that insertions
+and deletions can be done without any check and that deletion is idempotent.
+For this reason and by convention, a detached element ought to be represented
+like an empty head.
+
+Lists are manipulated using a set of macros which are used to initialize, add,
+remove, or iterate over elements. Most of these macros are extremely simple and
+are not even protected against multiple evaluation, so it is fundamentally
+important that the expressions used in the arguments are idempotent and that
+the result does not depend on the evaluation order of the arguments.
+
+Macro Description
+
+ILH
+ Initialized List Head : this is a non-NULL, non-empty list element used
+ to prevent the compiler from moving an empty list head declaration to
+ BSS, typically when it appears in an array of keywords Without this,
+ some older versions of gcc tend to trim all the array and cause
+ corruption.
+
+LIST_INIT(l)
+ Initialize the list as an empty list head
+
+LIST_HEAD_INIT(l)
+ Return a valid initialized empty list head pointing to this
+ element. Essentially used with assignments in declarations.
+
+LIST_INSERT(l, e)
+ Add an element at the beginning of a list and return it
+
+LIST_APPEND(l, e)
+ Add an element at the end of a list and return it
+
+LIST_SPLICE(n, o)
+ Add the contents of a list <o> at the beginning of another list <n>.
+ The old list head remains untouched.
+
+LIST_SPLICE_END_DETACHED(n, o)
+ Add the contents of a list whose first element is is <o> and last one
+ is <o->p> at the end of another list <n>. The old list DOES NOT have
+ any head here.
+
+LIST_DELETE(e)
+ Remove an element from a list and return it. Safe to call on
+ initialized elements, but will not change the element itself so it is
+ not idempotent. Consider using LIST_DEL_INIT() instead unless called
+ immediately after a free().
+
+LIST_DEL_INIT(e)
+ Remove an element from a list, initialize it and return it so that a
+ subsequent LIST_DELETE() is safe. This is faster than performing a
+ LIST_DELETE() followed by a LIST_INIT() as pointers are not reloaded.
+
+LIST_ELEM(l, t, m)
+ Return a pointer of type <t> to a structure containing a list head
+ member called <m> at address <l>. Note that <l> can be the result of a
+ function or macro since it's used only once.
+
+LIST_ISEMPTY(l)
+ Check if the list head <l> is empty (=initialized) or not, and return
+ non-zero only if so.
+
+LIST_INLIST(e)
+ Check if the list element <e> was added to a list or not, thus return
+ true unless the element was initialized.
+
+LIST_INLIST_ATOMIC(e)
+ Atomically check if the list element's next pointer points to anything
+ different from itself, implying the element should be part of a
+ list. This usually is similar to LIST_INLIST() except that while that
+ one might be instrumented using debugging code to perform further
+ consistency checks, the macro below guarantees to always perform a
+ single atomic test and is safe to use with barriers.
+
+LIST_NEXT(l, t, m)
+ Return a pointer of type <t> to a structure following the element which
+ contains list head <l>, which is known as member <m> in struct <t>.
+
+LIST_PREV(l, t, m)
+ Return a pointer of type <t> to a structure preceding the element which
+ contains list head <l>, which is known as member <m> in struct <t>.
+ Note that this macro is first undefined as it happened to already exist
+ on some old OSes.
+
+list_for_each_entry(i, l, m)
+ Iterate local variable <i> through a list of items of type "typeof(*i)"
+ which are linked via a "struct list" member named <m>. A pointer to the
+ head of the list is passed in <l>. No temporary variable is needed.
+ Note that <i> must not be modified during the loop.
+
+list_for_each_entry_from(i, l, m)
+ Same as list_for_each_entry() but starting from current value of <i>
+ instead of the list's head.
+
+list_for_each_entry_from_rev(i, l, m)
+ Same as list_for_each_entry_rev() but starting from current value of <i>
+ instead of the list's head.
+
+list_for_each_entry_rev(i, l, m)
+ Iterate backwards local variable <i> through a list of items of type
+ "typeof(*i)" which are linked via a "struct list" member named <m>. A
+ pointer to the head of the list is passed in <l>. No temporary variable
+ is needed. Note that <i> must not be modified during the loop.
+
+list_for_each_entry_safe(i, b, l, m)
+ Iterate variable <i> through a list of items of type "typeof(*i)" which
+ are linked via a "struct list" member named <m>. A pointer to the head
+ of the list is passed in <l>. A temporary backup variable <b> of same
+ type as <i> is needed so that <i> may safely be deleted if needed. Note
+ that it is only permitted to delete <i> and no other element during
+ this operation!
+
+list_for_each_entry_safe_from(i, b, l, m)
+ Same as list_for_each_entry_safe() but starting from current value of
+ <i> instead of the list's head.
+
+list_for_each_entry_safe_from_rev(i, b, l, m)
+ Same as list_for_each_entry_safe_rev() but starting from current value
+ of <i> instead of the list's head.
+
+list_for_each_entry_safe_rev(i, b, l, m)
+ Iterate backwards local variable <i> through a list of items of type
+ "typeof(*i)" which are linked via a "struct list" member named <m>. A
+ pointer to the head of the list is passed in <l>. A temporary variable
+ <b> of same type as <i> is needed so that <i> may safely be deleted if
+ needed. Note that it is only permitted to delete <i> and no other
+ element during this operation!
+
+3. Notes
+--------
+
+- This API is quite old and some macros are missing. For example there's still
+ no list_first() so it's common to use LIST_ELEM(head->n, ...) instead. Some
+ older parts of the code also used to rely on list_for_each() followed by a
+ break to stop on the first element.
+
+- Some parts were recently renamed because LIST_ADD() used to do what
+ LIST_INSERT() currently does and was often mistaken with LIST_ADDQ() which is
+ what LIST_APPEND() now is. As such it is not totally impossible that some
+ places use a LIST_INSERT() where a LIST_APPEND() would be desired.
+
+- The structure must not be modified at all (even to add debug info). Some
+ parts of the code assume that its layout is exactly this one, particularly
+ the parts ensuring the casting between MT lists and lists.
diff --git a/doc/internals/api/pools.txt b/doc/internals/api/pools.txt
new file mode 100644
index 0000000..d84fb9d
--- /dev/null
+++ b/doc/internals/api/pools.txt
@@ -0,0 +1,585 @@
+2022-02-24 - Pools structure and API
+
+1. Background
+-------------
+
+Memory allocation is a complex problem covered by a massive amount of
+literature. Memory allocators found in field cover a broad spectrum of
+capabilities, performance, fragmentation, efficiency etc.
+
+The main difficulty of memory allocation comes from finding the optimal chunks
+for arbitrary sized requests, that will still preserve a low fragmentation
+level. Doing this well is often expensive in CPU usage and/or memory usage.
+
+In programs like HAProxy that deal with a large number of fixed size objects,
+there is no point having to endure all this risk of fragmentation, and the
+associated costs (sometimes up to several milliseconds with certain minimalist
+allocators) are simply not acceptable. A better approach consists in grouping
+frequently used objects by size, knowing that due to the high repetitiveness of
+operations, a freed object will immediately be needed for another operation.
+
+This grouping of objects by size is what is called a pool. Pools are created
+for certain frequently allocated objects, are usually merged together when they
+are of the same size (or almost the same size), and significantly reduce the
+number of calls to the memory allocator.
+
+With the arrival of threads, pools started to become a bottleneck so they now
+implement an optional thread-local lockless cache. Finally with the arrival of
+really efficient memory allocator in modern operating systems, the shared part
+has also become optional so that it doesn't consume memory if it does not bring
+any value.
+
+In 2.6-dev2, a number of debugging options that used to be configured at build
+time only changed to boot-time and can be modified using keywords passed after
+"-dM" on the command line, which sets or clears bits in the pool_debugging
+variable. The build-time options still affect the default settings however.
+Default values may be consulted using "haproxy -dMhelp".
+
+
+2. Principles
+-------------
+
+The pools architecture is selected at build time. The main options are:
+
+ - thread-local caches and process-wide shared pool enabled (1)
+
+ This is the default situation on most operating systems. Each thread has
+ its own local cache, and when depleted it refills from the process-wide
+ pool that avoids calling the standard allocator too often. It is possible
+ to force this mode at build time by setting CONFIG_HAP_GLOBAL_POOLS or at
+ boot time with "-dMglobal".
+
+ - thread-local caches only are enabled (2)
+
+ This is the situation on operating systems where a fast and modern memory
+ allocator is detected and when it is estimated that the process-wide shared
+ pool will not bring any benefit. This detection is automatic at build time,
+ but may also be forced at build tmie by setting CONFIG_HAP_NO_GLOBAL_POOLS
+ or at boot time with "-dMno-global".
+
+ - pass-through to the standard allocator (3)
+
+ This is used when one absolutely wants to disable pools and rely on regular
+ malloc() and free() calls, essentially in order to trace memory allocations
+ by call points, either internally via DEBUG_MEM_STATS, or externally via
+ tools such as Valgrind. This mode of operation may be forced at build time
+ by setting DEBUG_NO_POOLS or at boot time with "-dMno-cache".
+
+ - pass-through to an mmap-based allocator for debugging (4)
+
+ This is used only during deep debugging when trying to detect various
+ conditions such as use-after-free. In this case each allocated object's
+ size is rounded up to a multiple of a page size (4096 bytes) and an
+ integral number of pages is allocated for each object using mmap(),
+ surrounded by two unaccessible holes that aim to detect some out-of-bounds
+ accesses. Released objects are instantly freed using munmap() so that any
+ immediate subsequent access to the memory area crashes the process if the
+ area had not been reallocated yet. This mode can be enabled at build time
+ by setting DEBUG_UAF, or at run time by disabling pools and enabling UAF
+ with "-dMuaf". It tends to consume a lot of memory and not to scale at all
+ with concurrent calls, that tends to make the system stall. The watchdog
+ may even trigger on some slow allocations.
+
+There are no more provisions for running with a shared pool but no thread-local
+cache: the shared pool's main goal is to compensate for the expensive calls to
+the memory allocator. This gain may be huge on tiny systems using basic
+allocators, but the thread-local cache will already achieve this. And on larger
+threaded systems, the shared pool's benefit is visible when the underlying
+allocator scales poorly, but in this case the shared pool would suffer from
+the same limitations without its thread-local cache and wouldn't provide any
+benefit.
+
+Summary of the various operation modes:
+
+ (1) (2) (3) (4)
+
+ User User User User
+ | | | |
+ pool_alloc() V V | |
+ +---------+ +---------+ | |
+ | Thread | | Thread | | |
+ | Local | | Local | | |
+ | Cache | | Cache | | |
+ +---------+ +---------+ | |
+ | | | |
+ pool_refill*() V | | |
+ +---------+ | | |
+ | Shared | | | |
+ | Pool | | | |
+ +---------+ | | |
+ | | | |
+ malloc() V V V |
+ +---------+ +---------+ +---------+ |
+ | Library | | Library | | Library | |
+ +---------+ +---------+ +---------+ |
+ | | | |
+ mmap() V V V V
+ +---------+ +---------+ +---------+ +---------+
+ | OS | | OS | | OS | | OS |
+ +---------+ +---------+ +---------+ +---------+
+
+One extra build define, DEBUG_FAIL_ALLOC, is used to enforce random allocation
+failure in pool_alloc() by randomly returning NULL, to test that callers
+properly handle allocation failures. It may also be enabled at boot time using
+"-dMfail". In this case the desired average rate of allocation failures can be
+fixed by global setting "tune.fail-alloc" expressed in percent.
+
+The thread-local caches contain the freshest objects. Its total size amounts to
+the number of bytes set in global.tune.pool_cache_size and that may be adjusted
+by the "tune.memory.hot-size" global option, which itself defaults to build
+time setting CONFIG_HAP_POOL_CACHE_SIZE, which was 1MB before 2.6 and 512kB
+after. The aim is to keep hot objects that still fit in the CPU core's private
+L2 cache. Once these objects do not fit into the cache anymore, there's no
+benefit keeping them local to the thread, so they'd rather be returned to the
+shared pool or the main allocator so that any other thread may make use of
+them. Under extreme thread contention the cost of accessing shared structures
+in the global cache or in malloc() may still be important and it may prove
+useful to increase the thread-local cache size.
+
+
+3. Storage in thread-local caches
+---------------------------------
+
+This section describes how objects are linked in thread local caches. This is
+not meant to be a concern for users of the pools API but it can be useful when
+inspecting post-mortem dumps or when trying to figure certain size constraints.
+
+Objects are stored in the local cache using a doubly-linked list. This ensures
+that they can be visited by freshness order like a stack, while at the same
+time being able to access them from oldest to newest when it is needed to
+evict coldest ones first:
+
+ - releasing an object to the cache always puts it on the top.
+
+ - allocating an object from the cache always takes the topmost one, hence the
+ freshest one.
+
+ - scanning for older objects to evict starts from the bottom, where the
+ oldest ones are located
+
+To that end, each thread-local cache keeps a list head in the "list" member of
+its "pool_cache_head" descriptor, that links all objects cast to type
+"pool_cache_item" via their "by_pool" member.
+
+Note that the mechanism described above only works for a single pool. When
+trying to limit the total cache size to a certain value, all pools included,
+there is also a need to arrange all objects from all pools together in the
+local caches. For this, each thread_ctx maintains a list head of recently
+released objects, all pools included, in its member "pool_lru_head". All items
+in a thread-local cache are linked there via their "by_lru" member.
+
+This means that releasing an object using pool_free() consists in inserting
+it at the beginning of two lists:
+ - the local pool_cache_head's "list" list head
+ - the thread context's "pool_lru_head" list head
+
+Allocating an object consists in picking the first entry from the pool's "list"
+and deleting its "by_pool" and "by_lru" links.
+
+Evicting an object consists in scanning the thread context's "pool_lru_head"
+backwards and deleting the object's "by_pool" and "by_lru" links.
+
+Given that entries are both inserted and removed synchronously, we have the
+guarantee that the oldest object in the thread's LRU list is always the oldest
+object in its pool, and that the next element is the cache's list head. This is
+what allows the LRU eviction mechanism to figure what pool an object belongs to
+when releasing it.
+
+Note:
+ | Since a pool_cache_item has two list entries, on 64-bit systems it will be
+ | 32-bytes long. This is the smallest size that a pool may be, and any smaller
+ | size will automatically be rounded up to this size.
+
+When build option DEBUG_POOL_INTEGRITY is set, or the boot-time option
+"-dMintegrity" is passed on the command line, the area of the object between
+the two list elements and the end according to pool->size will be filled with
+pseudo-random words during pool_put_to_cache(), and these words will be
+compared between each other during pool_get_from_cache(), and the process will
+crash in case any bit differs, as this would indicate that the memory area was
+modified after the free. The pseudo-random pattern is in fact incremented by
+(~0)/3 upon each free so that roughly half of the bits change each time and we
+maximize the likelihood of detecting a single bit flip in either direction. In
+order to avoid an immediate reuse and maximize the time the object spends in
+the cache, when this option is set, objects are picked from the cache from the
+oldest one instead of the freshest one. This way even late memory corruptions
+have a chance to be detected.
+
+When build option DEBUG_MEMORY_POOLS is set, or the boot-time option "-dMtag"
+is passed on the executable's command line, pool objects are allocated with
+one extra pointer compared to the requested size, so that the bytes that follow
+the memory area point to the pool descriptor itself as long as the object is
+allocated via pool_alloc(). Upon releasing via pool_free(), the pointer is
+compared and the code will crash in if it differs. This allows to detect both
+memory overflows and object released to the wrong pool (code bug resulting from
+a copy-paste error typically).
+
+Thus an object will look like this depending whether it's in the cache or is
+currently in use:
+
+ in cache in use
+ +------------+ +------------+
+ <--+ by_pool.p | | N bytes |
+ | by_pool.n +--> | |
+ +------------+ |N=16 min on |
+ <--+ by_lru.p | | 32-bit, |
+ | by_lru.n +--> | 32 min on |
+ +------------+ | 64-bit |
+ : : : :
+ | N bytes | | |
+ +------------+ +------------+ \ optional, only if
+ : (unused) : : pool ptr : > DEBUG_MEMORY_POOLS
+ +------------+ +------------+ / is set at build time
+ or -dMtag at boot time
+
+Right now no provisions are made to return objects aligned on larger boundaries
+than those currently covered by malloc() (i.e. two pointers). This need appears
+from time to time and the layout above might evolve a little bit if needed.
+
+
+4. Storage in the process-wide shared pool
+------------------------------------------
+
+In order for the shared pool not to be a contention point in a multi-threaded
+environment, objects are allocated from or released to shared pools by clusters
+of a few objects at once. The maximum number of objects that may be moved to or
+from a shared pool at once is defined by CONFIG_HAP_POOL_CLUSTER_SIZE at build
+time, and currently defaults to 8.
+
+In order to remain scalable, the shared pool has to make some tradeoffs to
+limit the number of atomic operations and the duration of any locked operation.
+As such, it's composed of a single-linked list of clusters, themselves made of
+a single-linked list of objects.
+
+Clusters and objects are of the same type "pool_item" and are accessed from the
+pool's "free_list" member. This member points to the latest pool_item inserted
+into the pool by a release operation. And the pool_item's "next" member points
+to the next pool_item, which was the one present in the pool's free_list just
+before the pool_item was inserted, and the last pool_item in the list simply
+has a NULL "next" field.
+
+The pool_item's "down" pointer points down to the next objects part of the same
+cluster, that will be released or allocated at the same time as the first one.
+Each of these items also has a NULL "next" field, and are chained by their
+respective "down" pointers until the last one is detected by a NULL value.
+
+This results in the following layout:
+
+ pool pool_item pool_item pool_item
+ +-----------+ +------+ +------+ +------+
+ | free_list +--> | next +--> | next +--> | NULL |
+ +-----------+ +------+ +------+ +------+
+ | down | | NULL | | down |
+ +--+---+ +------+ +--+---+
+ | |
+ V V
+ +------+ +------+
+ | NULL | | NULL |
+ +------+ +------+
+ | down | | NULL |
+ +--+---+ +------+
+ |
+ V
+ +------+
+ | NULL |
+ +------+
+ | NULL |
+ +------+
+
+Allocating an entry is only a matter of performing two atomic allocations on
+the free_list and reading the pool's "next" value:
+
+ - atomically mark the free_list as being updated by writing a "magic" pointer
+ - read the first pool_item's "next" field
+ - atomically replace the free_list with this value
+
+This results in a fast operation that instantly retrieves a cluster at once.
+Then outside of the critical section entries are walked over and inserted into
+the local cache one at a time. In order to keep the code simple and efficient,
+objects allocated from the shared pool are all placed into the local cache, and
+only then the first one is allocated from the cache. This operation is
+performed by the dedicated function pool_refill_local_from_shared() which is
+called from pool_get_from_cache() when the cache is empty. It means there is an
+overhead of two list insert/delete operations for the first object and that
+could be avoided at the expense of more complex code in the fast path, but this
+is negligible since it only concerns objects that need to be visited anyway.
+
+Freeing a group of objects consists in performing the operation the other way
+around:
+
+ - atomically mark the free_list as being updated by writing a "magic" pointer
+ - write the free_list value to the to-be-released item's "next" entry
+ - atomically replace the free_list with the pool_item's pointer
+
+The cluster will simply have to be prepared before being sent to the shared
+pool. The operation of releasing a cluster at once is performed by function
+pool_put_to_shared_cache() which is called from pool_evict_last_items() which
+itself is responsible for building the clusters.
+
+Due to the way objects are stored, it is important to try to group objects as
+much as possible when releasing them because this is what will condition their
+retrieval as groups as well. This is the reason why pool_evict_last_items()
+uses the LRU to find a first entry but tries to pick several items at once from
+a single cache. Tests have shown that CONFIG_HAP_POOL_CLUSTER_SIZE set to 8
+achieves up to 6-6.5 objects on average per operation, which effectively
+divides by as much the average time spent per object by each thread and pushes
+the contention point further.
+
+Also, grouping items in clusters is a property of the process-wide shared pool
+and not of the thread-local caches. This means that there is no grouped
+operation when not using the shared pool (mode "2" in the diagram above).
+
+
+5. API
+------
+
+The following functions are public and available for user code:
+
+struct pool_head *create_pool(char *name, uint size, uint flags)
+ Create a new pool named <name> for objects of size <size> bytes. Pool
+ names are truncated to their first 11 characters. Pools of very similar
+ size will usually be merged if both have set the flag MEM_F_SHARED in
+ <flags>. When DEBUG_DONT_SHARE_POOLS was set at build time, or
+ "-dMno-merge" is passed on the executable's command line, the pools
+ also need to have the exact same name to be merged. In addition, unless
+ MEM_F_EXACT is set in <flags>, the object size will usually be rounded
+ up to the size of pointers (16 or 32 bytes). The name that will appear
+ in the pool upon merging is the name of the first created pool. The
+ returned pointer is the new (or reused) pool head, or NULL upon error.
+ Pools created this way must be destroyed using pool_destroy().
+
+void *pool_destroy(struct pool_head *pool)
+ Destroy pool <pool>, that is, all of its unused objects are freed and
+ the structure is freed as well if the pool didn't have any used objects
+ anymore. In this case NULL is returned. If some objects remain in use,
+ the pool is preserved and its pointer is returned. This ought to be
+ used essentially on exit or in rare situations where some internal
+ entities that hold pools have to be destroyed.
+
+void pool_destroy_all(void)
+ Destroy all pools, without checking which ones still have used entries.
+ This is only meant for use on exit.
+
+void *__pool_alloc(struct pool_head *pool, uint flags)
+ Allocate an entry from the pool <pool>. The allocator will first look
+ for an object in the thread-local cache if enabled, then in the shared
+ pool if enabled, then will fall back to the operating system's default
+ allocator. NULL is returned if the object couldn't be allocated (due to
+ configured limits or lack of memory). Object allocated this way have to
+ be released using pool_free(). Like with malloc(), by default the
+ contents of the returned object are undefined. If memory poisonning is
+ enabled, the object will be filled with the poisonning byte. If the
+ global "pool.fail-alloc" setting is non-zero and DEBUG_FAIL_ALLOC is
+ enabled, a random number generator will be called to randomly return a
+ NULL. The allocator's behavior may be adjusted using a few flags passed
+ in <flags>:
+ - POOL_F_NO_POISON : when set, disables memory poisonning (e.g. when
+ pointless and expensive, like for buffers)
+ - POOL_F_MUST_ZERO : when set, the memory area will be zeroed before
+ being returned, similar to what calloc() does
+ - POOL_F_NO_FAIL : when set, disables the random allocation failure,
+ e.g. for use during early init code or critical sections.
+
+void *pool_alloc(struct pool_head *pool)
+ This is an exact equivalent of __pool_alloc(pool, 0). It is the regular
+ way to allocate entries from a pool.
+
+void *pool_alloc_nocache(struct pool_head *pool)
+ Allocate an entry from the pool <pool>, bypassing the cache. If shared
+ pools are enabled, they will be consulted first. Otherwise the object
+ is allocated using the operating system's default allocator. This is
+ essentially used during early boot to pre-allocate a number of objects
+ for pools which require a minimum number of entries to exist.
+
+void *pool_zalloc(struct pool_head *pool)
+ This is an exact equivalent of __pool_alloc(pool, POOL_F_MUST_ZERO).
+
+void pool_free(struct pool_head *pool, void *ptr)
+ Free an entry allocate from one of the pool_alloc() functions above
+ from pool <pool>. The object will be placed into the thread-local cache
+ if enabled, or in the shared pool if enabled, or will be released using
+ the operating system's default allocator. When a local cache is
+ enabled, if the local cache size becomes larger than 75% of the maximum
+ size configured at build time, some objects will be evicted to the
+ shared pool. Such objects are taken first from the same pool, but if
+ the total size is really huge, other pools might be checked as well.
+ Some extra checks enabled at build time may enforce extra checks so
+ that the process will immediately crash if the object was not allocated
+ from this pool or experienced an overflow or some memory corruption.
+
+void pool_flush(struct pool_head *pool)
+ Free all unused objects from shared pool <pool>. Thread-local caches
+ are not affected. This is essentially used when running low on memory
+ or when stopping, in order to release a maximum amount of memory for
+ the new process.
+
+void pool_gc(struct pool_head *pool)
+ Free all unused objects from all pools, but respecting the minimum
+ number of spare objects required for each of them. Then, for operating
+ systems which support it, indicate the system that all unused memory
+ can be released. Thread-local caches are not affected. This operation
+ differs from pool_flush() in that it is run locklessly, under thread
+ isolation, and on all pools in a row. It is called by the SIGQUIT
+ signal handler and upon exit. Note that the obsolete argument <pool> is
+ not used and the convention is to pass NULL there.
+
+void dump_pools_to_trash(void)
+ Dump the current status of all pools into the trash buffer. This is
+ essentially used by the "show pools" CLI command or the SIGQUIT signal
+ handler to dump them on stderr. The total report size may not exceed
+ the size of the trash buffer. If it does, some entries will be missing.
+
+void dump_pools(void)
+ Dump the current status of all pools to stderr. This just calls
+ dump_pools_to_trash() and writes the trash to stderr.
+
+int pool_total_failures(void)
+ Report the total number of failed allocations. This is solely used to
+ report the "PoolFailed" metrics of the "show info" output. The total
+ is calculated on the fly by summing the number of failures in all pools
+ and is only meant to be used as an indicator rather than a precise
+ measure.
+
+ullong pool_total_allocated(void)
+ Report the total number of bytes allocated in all pools, for reporting
+ in the "PoolAlloc_MB" field of the "show info" output. The total is
+ calculated on the fly by summing the number of allocated bytes in all
+ pools and is only meant to be used as an indicator rather than a
+ precise measure.
+
+ullong pool_total_used(void)
+ Report the total number of bytes used in all pools, for reporting in
+ the "PoolUsed_MB" field of the "show info" output. The total is
+ calculated on the fly by summing the number of used bytes in all pools
+ and is only meant to be used as an indicator rather than a precise
+ measure. Note that objects present in caches are accounted as used.
+
+Some other functions exist and are only used by the pools code itself. While
+not strictly forbidden to use outside of this code, it is generally recommended
+to avoid touching them in order not to create undesired dependencies that will
+complicate maintenance.
+
+A few macros exist to ease the declaration of pools:
+
+DECLARE_POOL(ptr, name, size)
+ Placed at the top level of a file, this declares a global memory pool
+ as variable <ptr>, name <name> and size <size> bytes per element. This
+ is made via a call to REGISTER_POOL() and by assigning the resulting
+ pointer to variable <ptr>. <ptr> will be created of type "struct
+ pool_head *". If the pool needs to be visible outside of the function
+ (which is likely), it will also need to be declared somewhere as
+ "extern struct pool_head *<ptr>;". It is recommended to place such
+ declarations very early in the source file so that the variable is
+ already known to all subsequent functions which may use it.
+
+DECLARE_STATIC_POOL(ptr, name, size)
+ Placed at the top level of a file, this declares a static memory pool
+ as variable <ptr>, name <name> and size <size> bytes per element. This
+ is made via a call to REGISTER_POOL() and by assigning the resulting
+ pointer to local variable <ptr>. <ptr> will be created of type "static
+ struct pool_head *". It is recommended to place such declarations very
+ early in the source file so that the variable is already known to all
+ subsequent functions which may use it.
+
+
+6. Build options
+----------------
+
+A number of build-time defines allow to tune the pools behavior. All of them
+have to be enabled using "-Dxxx" or "-Dxxx=yyy" in the makefile's DEBUG
+variable.
+
+DEBUG_NO_POOLS
+ When this is set, pools are entirely disabled, and allocations are made
+ using malloc() instead. This is not recommended for production but may
+ be useful for tracing allocations. It corresponds to "-dMno-cache" at
+ boot time.
+
+DEBUG_MEMORY_POOLS
+ When this is set, an extra pointer is allocated at the end of each
+ object to reference the pool the object was allocated from and detect
+ buffer overflows. Then, pool_free() will provoke a crash in case it
+ detects an anomaly (pointer at the end not matching the pool). It
+ corresponds to "-dMtag" at boot time.
+
+DEBUG_FAIL_ALLOC
+ When enabled, a global setting "tune.fail-alloc" may be set to a non-
+ zero value representing a percentage of memory allocations that will be
+ made to fail in order to stress the calling code. It corresponds to
+ "-dMfail" at boot time.
+
+DEBUG_DONT_SHARE_POOLS
+ When enabled, pools of similar sizes are not merged unless the have the
+ exact same name. It corresponds to "-dMno-merge" at boot time.
+
+DEBUG_UAF
+ When enabled, pools are disabled and all allocations and releases pass
+ through mmap() and munmap(). The memory usage significantly inflates
+ and the performance degrades, but this allows to detect a lot of
+ use-after-free conditions by crashing the program at the first abnormal
+ access. This should not be used in production. It corresponds to
+ boot-time options "-dMuaf". Caching is disabled but may be re-enabled
+ using "-dMcache".
+
+DEBUG_POOL_INTEGRITY
+ When enabled, objects picked from the cache are checked for corruption
+ by comparing their contents against a pattern that was placed when they
+ were inserted into the cache. Objects are also allocated in the reverse
+ order, from the oldest one to the most recent, so as to maximize the
+ ability to detect such a corruption. The goal is to detect writes after
+ free (or possibly hardware memory corruptions). Contrary to DEBUG_UAF
+ this cannot detect reads after free, but may possibly detect later
+ corruptions and will not consume extra memory. The CPU usage will
+ increase a bit due to the cost of filling/checking the area and for the
+ preference for cold cache instead of hot cache, though not as much as
+ with DEBUG_UAF. This option is meant to be usable in production. It
+ corresponds to boot-time options "-dMcold-first,integrity".
+
+DEBUG_POOL_TRACING
+ When enabled, the callers of pool_alloc() and pool_free() will be
+ recorded into an extra memory area placed after the end of the object.
+ This may only be required by developers who want to get a few more
+ hints about code paths involved in some crashes, but will serve no
+ purpose outside of this. It remains compatible (and completes well)
+ DEBUG_POOL_INTEGRITY above. Such information become meaningless once
+ the objects leave the thread-local cache. It corresponds to boot-time
+ option "-dMcaller".
+
+DEBUG_MEM_STATS
+ When enabled, all malloc/calloc/realloc/strdup/free calls are accounted
+ for per call place (file+line number), and may be displayed or reset on
+ the CLI using "debug dev memstats". This is essentially used to detect
+ potential leaks or abnormal usages. When pools are enabled (default),
+ such calls are rare and the output will mostly contain calls induced by
+ libraries. When pools are disabled, about all calls to pool_alloc() and
+ pool_free() will also appear since they will be remapped to standard
+ functions.
+
+CONFIG_HAP_GLOBAL_POOLS
+ When enabled, process-wide shared pools will be forcefully enabled even
+ if not considered useful on the platform. The default is to let haproxy
+ decide based on the OS and C library. It corresponds to boot-time
+ option "-dMglobal".
+
+CONFIG_HAP_NO_GLOBAL_POOLS
+ When enabled, process-wide shared pools will be forcefully disabled
+ even if considered useful on the platform. The default is to let
+ haproxy decide based on the OS and C library. It corresponds to
+ boot-time option "-dMno-global".
+
+CONFIG_HAP_POOL_CACHE_SIZE
+ This allows one to define the default size of the per-thread cache, in
+ bytes. The default value is 512 kB (524288). Smaller values will use
+ less memory at the expense of a possibly higher CPU usage when using
+ many threads. Higher values will give diminishing returns on
+ performance while using much more memory. Usually there is no benefit
+ in using more than a per-core L2 cache size. It would be better not to
+ set this value lower than a few times the size of a buffer (bufsize,
+ defaults to 16 kB). In addition, keep in mind that this option may be
+ changed at runtime using "tune.memory.hot-size".
+
+CONFIG_HAP_POOL_CLUSTER_SIZE
+ This allows one to define the maximum number of objects that will be
+ groupped together in an allocation from the shared pool. Values 4 to 8
+ have experimentally shown good results with 16 threads. On systems with
+ more cores or loosely coupled caches exhibiting slow atomic operations,
+ it could possibly make sense to slightly increase this value.
diff --git a/doc/internals/api/scheduler.txt b/doc/internals/api/scheduler.txt
new file mode 100644
index 0000000..dd1ad5f
--- /dev/null
+++ b/doc/internals/api/scheduler.txt
@@ -0,0 +1,228 @@
+2021-11-17 - Scheduler API
+
+
+1. Background
+-------------
+
+The scheduler relies on two major parts:
+ - the wait queue or timers queue, which contains an ordered tree of the next
+ timers to expire
+
+ - the run queue, which contains tasks that were already woken up and are
+ waiting for a CPU slot to execute.
+
+There are two types of schedulable objects in HAProxy:
+ - tasks: they contain one timer and can be in the run queue without leaving
+ their place in the timers queue.
+
+ - tasklets: they do not have the timers part and are either sleeping or
+ running.
+
+Both the timers queue and run queue in fact exist both shared between all
+threads and per-thread. A task or tasklet may only be queued in a single of
+each at a time. The thread-local queues are not thread-safe while the shared
+ones are. This means that it is only permitted to manipulate an object which
+is in the local queue or in a shared queue, but then after locking it. As such
+tasks and tasklets are usually pinned to threads and do not move, or only in
+very specific ways not detailed here.
+
+In case of doubt, keep in mind that it's not permitted to manipulate another
+thread's private task or tasklet, and that any task held by another thread
+might vanish while it's being looked at.
+
+Internally a large part of the task and tasklet struct is shared between
+the two types, which reduces code duplication and eases the preservation
+of fairness in the run queue by interleaving all of them. As such, some
+fields or flags may not always be relevant to tasklets and may be ignored.
+
+
+Tasklets do not use a thread mask but use a thread ID instead, to which they
+are bound. If the thread ID is negative, the tasklet is not bound but may only
+be run on the calling thread.
+
+
+2. API
+------
+
+There are few functions exposed by the scheduler. A few more ones are in fact
+accessible but if not documented there they'd rather be avoided or used only
+when absolutely certain they're suitable, as some have delicate corner cases.
+In doubt, checking the sched.pdf diagram may help.
+
+int total_run_queues()
+ Return the approximate number of tasks in run queues. This is racy
+ and a bit inaccurate as it iterates over all queues, but it is
+ sufficient for stats reporting.
+
+int task_in_rq(t)
+ Return non-zero if the designated task is in the run queue (i.e. it was
+ already woken up).
+
+int task_in_wq(t)
+ Return non-zero if the designated task is in the timers queue (i.e. it
+ has a valid timeout and will eventually expire).
+
+int thread_has_tasks()
+ Return non-zero if the current thread has some work to be done in the
+ run queue. This is used to decide whether or not to sleep in poll().
+
+void task_wakeup(t, f)
+ Will make sure task <t> will wake up, that is, will execute at least
+ once after the start of the function is called. The task flags <f> will
+ be ORed on the task's state, among TASK_WOKEN_* flags exclusively. In
+ multi-threaded environments it is safe to wake up another thread's task
+ and even if the thread is sleeping it will be woken up. Users have to
+ keep in mind that a task running on another thread might very well
+ finish and go back to sleep before the function returns. It is
+ permitted to wake the current task up, in which case it will be
+ scheduled to run another time after it returns to the scheduler.
+
+struct task *task_unlink_wq(t)
+ Remove the task from the timers queue if it was in it, and return it.
+ It may only be done for the local thread, or for a shared thread that
+ might be in the shared queue. It must not be done for another thread's
+ task.
+
+void task_queue(t)
+ Place or update task <t> into the timers queue, where it may already
+ be, scheduling it for an expiration at date t->expire. If t->expire is
+ infinite, nothing is done, so it's safe to call this function without
+ prior checking the expiration date. It is only valid to call this
+ function for local tasks or for shared tasks who have the calling
+ thread in their thread mask.
+
+void task_set_thread(t, id)
+ Change task <t>'s thread ID to new value <id>. This may only be
+ performed by the task itself while running. This is only used to let a
+ task voluntarily migrate to another thread. Thread id -1 is used to
+ indicate "any thread". It's ignored and replaced by zero when threads
+ are disabled.
+
+void tasklet_wakeup(tl)
+ Make sure that tasklet <tl> will wake up, that is, will execute at
+ least once. The tasklet will run on its assigned thread, or on any
+ thread if its TID is negative.
+
+void tasklet_wakeup_on(tl, thr)
+ Make sure that tasklet <tl> will wake up on thread <thr>, that is, will
+ execute at least once. The designated thread may only differ from the
+ calling one if the tasklet is already configured to run on another
+ thread, and it is not permitted to self-assign a tasklet if its tid is
+ negative, as it may already be scheduled to run somewhere else. Just in
+ case, only use tasklet_wakeup() which will pick the tasklet's assigned
+ thread ID.
+
+struct tasklet *tasklet_new()
+ Allocate a new tasklet and set it to run by default on the calling
+ thread. The caller may change its tid to another one before using it.
+ The new tasklet is returned.
+
+struct task *task_new_anywhere()
+ Allocate a new task to run on any thread, and return the task, or NULL
+ in case of allocation issue. Note that such tasks will be marked as
+ shared and will go through the locked queues, thus their activity will
+ be heavier than for other ones. See also task_new_here().
+
+struct task *task_new_here()
+ Allocate a new task to run on the calling thread, and return the task,
+ or NULL in case of allocation issue.
+
+struct task *task_new_on(t)
+ Allocate a new task to run on thread <t>, and return the task, or NULL
+ in case of allocation issue.
+
+void task_destroy(t)
+ Destroy this task. The task will be unlinked from any timers queue,
+ and either immediately freed, or asynchronously killed if currently
+ running. This may only be done by one of the threads this task is
+ allowed to run on. Developers must not forget that the task's memory
+ area is not always immediately freed, and that certain misuses could
+ only have effect later down the chain (e.g. use-after-free).
+
+void tasklet_free()
+ Free this tasklet, which must not be running, so that may only be
+ called by the thread responsible for the tasklet, typically the
+ tasklet's process() function itself.
+
+void task_schedule(t, d)
+ Schedule task <t> to run no later than date <d>. If the task is already
+ running, or scheduled for an earlier instant, nothing is done. If the
+ task was not in queued or was scheduled to run later, its timer entry
+ will be updated. This function assumes that it will never be called
+ with a timer in the past nor with TICK_ETERNITY. Only one of the
+ threads assigned to the task may call this function.
+
+The task's ->process() function receives the following arguments:
+
+ - struct task *t: a pointer to the task itself. It is always valid.
+
+ - void *ctx : a copy of the task's ->context pointer at the moment
+ the ->process() function was called by the scheduler. A
+ function must use this and not task->context, because
+ task->context might possibly be changed by another thread.
+ For instance, the muxes' takeover() function do this.
+
+ - uint state : a copy of the task's ->state field at the moment the
+ ->process() function was executed. A function must use
+ this and not task->state as the latter misses the wakeup
+ reasons and may constantly change during execution along
+ concurrent wakeups (threads or signals).
+
+The possible state flags to use during a call to task_wakeup() or seen by the
+task being called are the following; they're automatically cleaned from the
+state field before the call to ->process()
+
+ - TASK_WOKEN_INIT each creation of a task causes a first wakeup with this
+ flag set. Applications should not set it themselves.
+
+ - TASK_WOKEN_TIMER this indicates the task's expire date was reached in the
+ timers queue. Applications should not set it themselves.
+
+ - TASK_WOKEN_IO indicates the wake-up happened due to I/O activity. Now
+ that all low-level I/O processing happens on tasklets,
+ this notion of I/O is now application-defined (for
+ example stream-interfaces use it to notify the stream).
+
+ - TASK_WOKEN_SIGNAL indicates that a signal the task was subscribed to was
+ received. Applications should not set it themselves.
+
+ - TASK_WOKEN_MSG any application-defined wake-up reason, usually for
+ inter-task communication (e.g filters vs streams).
+
+ - TASK_WOKEN_RES a resource the task was waiting for was finally made
+ available, allowing the task to continue its work. This
+ is essentially used by buffers and queues. Applications
+ may carefully use it for their own purpose if they're
+ certain not to rely on existing ones.
+
+ - TASK_WOKEN_OTHER any other application-defined wake-up reason.
+
+
+In addition, a few persistent flags may be observed or manipulated by the
+application, both for tasks and tasklets:
+
+ - TASK_SELF_WAKING when set, indicates that this task was found waking
+ itself up, and its class will change to bulk processing.
+ If this behavior is under control temporarily expected,
+ and it is not expected to happen again, it may make
+ sense to reset this flag from the ->process() function
+ itself.
+
+ - TASK_HEAVY when set, indicates that this task does so heavy
+ processing that it will become mandatory to give back
+ control to I/Os otherwise big latencies might occur. It
+ may be set by an application that expects something
+ heavy to happen (tens to hundreds of microseconds), and
+ reset once finished. An example of user is the TLS stack
+ which sets it when an imminent crypto operation is
+ expected.
+
+ - TASK_F_USR1 This is the first application-defined persistent flag.
+ It is always zero unless the application changes it. An
+ example of use cases is the I/O handler for backend
+ connections, to mention whether the connection is safe
+ to use or might have recently been migrated.
+
+Finally, when built with -DDEBUG_TASK, an extra sub-structure "debug" is added
+to both tasks and tasklets to note the code locations of the last two calls to
+task_wakeup() and tasklet_wakeup().
diff --git a/doc/internals/body-parsing.txt b/doc/internals/body-parsing.txt
new file mode 100644
index 0000000..be209af
--- /dev/null
+++ b/doc/internals/body-parsing.txt
@@ -0,0 +1,165 @@
+2014/04/16 - Pointer assignments during processing of the HTTP body
+
+In HAProxy, a struct http_msg is a descriptor for an HTTP message, which stores
+the state of an HTTP parser at any given instant, relative to a buffer which
+contains part of the message being inspected.
+
+Currently, an http_msg holds a few pointers and offsets to some important
+locations in a message depending on the state the parser is in. Some of these
+pointers and offsets may move when data are inserted into or removed from the
+buffer, others won't move.
+
+An important point is that the state of the parser only translates what the
+parser is reading, and not at all what is being done on the message (eg:
+forwarding).
+
+For an HTTP message <msg> and a buffer <buf>, we have the following elements
+to work with :
+
+
+Buffer :
+--------
+
+buf.size : the allocated size of the buffer. A message cannot be larger than
+ this size. In general, a message will even be smaller because the
+ size is almost always reduced by global.maxrewrite bytes.
+
+buf.data : memory area containing the part of the message being worked on. This
+ area is exactly <buf.size> bytes long. It should be seen as a sliding
+ window over the message, but in terms of implementation, it's closer
+ to a wrapping window. For ease of processing, new messages (requests
+ or responses) are aligned to the beginning of the buffer so that they
+ never wrap and common string processing functions can be used.
+
+buf.p : memory pointer (char *) to the beginning of the buffer as the parser
+ understands it. It commonly refers to the first character of an HTTP
+ request or response, but during forwarding, it can point to other
+ locations. This pointer always points to a location in <buf.data>.
+
+buf.i : number of bytes after <buf.p> that are available in the buffer. If
+ <buf.p + buf.i> exceeds <buf.data + buf.size>, then the pending data
+ wrap at the end of the buffer and continue at <buf.data>.
+
+buf.o : number of bytes already processed before <buf.p> that are pending
+ for departure. These bytes may leave at any instant once a connection
+ is established. These ones may wrap before <buf.data> to start before
+ <buf.data + buf.size>.
+
+It's common to call the part between buf.p and buf.p+buf.i the input buffer, and
+the part between buf.p-buf.o and buf.p the output buffer. This design permits
+efficient forwarding without copies. As a result, forwarding one byte from the
+input buffer to the output buffer only consists in :
+ - incrementing buf.p
+ - incrementing buf.o
+ - decrementing buf.i
+
+
+Message :
+---------
+Unless stated otherwise, all values are relative to <buf.p>, and are always
+comprised between 0 and <buf.i>. These values are relative offsets and they do
+not need to take wrapping into account, they are used as if the buffer was an
+infinite length sliding window. The buffer management functions handle the
+wrapping automatically.
+
+msg.next : points to the next byte to inspect. This offset is automatically
+ adjusted when inserting/removing some headers. In data states, it is
+ automatically adjusted to the number of bytes already inspected.
+
+msg.sov : start of value. First character of the header's value in the header
+ states, start of the body in the data states. Strictly positive
+ values indicate that headers were not forwarded yet (<buf.p> is
+ before the start of the body), and null or negative values are seen
+ after headers are forwarded (<buf.p> is at or past the start of the
+ body). The value stops changing when data start to leave the buffer
+ (in order to avoid integer overflows). So the maximum possible range
+ is -<buf.size> to +<buf.size>. This offset is automatically adjusted
+ when inserting or removing some headers. It is useful to rewind the
+ request buffer to the beginning of the body at any phase. The
+ response buffer does not really use it since it is immediately
+ forwarded to the client.
+
+msg.sol : start of line. Points to the beginning of the current header line
+ while parsing headers. It is cleared to zero in the BODY state,
+ and contains exactly the number of bytes comprising the preceding
+ chunk size in the DATA state (which can be zero), so that the sum of
+ msg.sov + msg.sol always points to the beginning of data for all
+ states starting with DATA. For chunked encoded messages, this sum
+ always corresponds to the beginning of the current chunk of data as
+ it appears in the buffer, or to be more precise, it corresponds to
+ the first of the remaining bytes of chunked data to be inspected. In
+ TRAILERS state, it contains the length of the last parsed part of
+ the trailer headers.
+
+msg.eoh : end of headers. Points to the CRLF (or LF) preceding the body and
+ marking the end of headers. It is where new headers are appended.
+ This offset is automatically adjusted when inserting/removing some
+ headers. It always contains the size of the headers excluding the
+ trailing CRLF even after headers have been forwarded.
+
+msg.eol : end of line. Points to the CRLF or LF of the current header line
+ being inspected during the various header states. In data states, it
+ holds the trailing CRLF length (1 or 2) so that msg.eoh + msg.eol
+ always equals the exact header length. It is not affected during data
+ states nor by forwarding.
+
+The beginning of the message headers can always be found this way even after
+headers or data have been forwarded, provided that everything is still present
+in the buffer :
+
+ headers = buf.p + msg->sov - msg->eoh - msg->eol
+
+
+Message length :
+----------------
+msg.chunk_len : amount of bytes of the current chunk or total message body
+ remaining to be inspected after msg.next. It is automatically
+ incremented when parsing a chunk size, and decremented as data
+ are forwarded.
+
+msg.body_len : total message body length, for logging. Equals Content-Length
+ when used, otherwise is the sum of all correctly parsed chunks.
+
+
+Message state :
+---------------
+msg.msg_state contains the current parser state, one of HTTP_MSG_*. The state
+indicates what byte is expected at msg->next.
+
+HTTP_MSG_BODY : all headers have been parsed, parsing of body has not
+ started yet.
+
+HTTP_MSG_100_SENT : parsing of body has started. If a 100-Continue was needed
+ it has already been sent.
+
+HTTP_MSG_DATA : some bytes are remaining for either the whole body when
+ the message size is determined by Content-Length, or for
+ the current chunk in chunked-encoded mode.
+
+HTTP_MSG_CHUNK_CRLF : msg->next points to the CRLF after the current data chunk.
+
+HTTP_MSG_TRAILERS : msg->next points to the beginning of a possibly empty
+ trailer line after the final empty chunk.
+
+HTTP_MSG_DONE : all the Content-Length data has been inspected, or the
+ final CRLF after trailers has been met.
+
+
+Message forwarding :
+--------------------
+Forwarding part of a message consists in advancing buf.p up to the point where
+it points to the byte following the last one to be forwarded. This can be done
+inline if enough bytes are present in the buffer, or in multiple steps if more
+buffers need to be forwarded (possibly including splicing). Thus by definition,
+after a block has been scheduled for being forwarded, msg->next and msg->sov
+must be reset.
+
+The communication channel between the producer and the consumer holds a counter
+of extra bytes remaining to be forwarded directly without consulting analysers,
+after buf.p. This counter is called to_forward. It commonly holds the advertised
+chunk length or content-length that does not fit in the buffer. For example, if
+2000 bytes are to be forwarded, and 10 bytes are present after buf.p as reported
+by buf.i, then both buf.o and buf.p will advance by 10, buf.i will be reset, and
+to_forward will be set to 1990 so that in total, 2000 bytes will be forwarded.
+At the end of the forwarding, buf.p will point to the first byte to be inspected
+after the 2000 forwarded bytes.
diff --git a/doc/internals/connect-status.txt b/doc/internals/connect-status.txt
new file mode 100644
index 0000000..70bbcc5
--- /dev/null
+++ b/doc/internals/connect-status.txt
@@ -0,0 +1,28 @@
+Normally, we should use getsockopt(fd, SOL_SOCKET, SO_ERROR) on a pending
+connect() to detect whether the connection correctly established or not.
+
+Unfortunately, getsockopt() does not report the status of a pending connection,
+which means that it returns 0 if the connection is still pending. This has to
+be expected, because as the name implies it, it only returns errors.
+
+With the speculative I/O, a new problem was introduced : if we pretend the
+socket was indicated as ready and we go to the socket's write() function,
+a pending connection will then inevitably be identified as established.
+
+In fact, there are solutions to this issue :
+
+ - send() returns -EAGAIN if it cannot write, so that as long as there are
+ pending data in the buffer, we'll be informed about the status of the
+ connection
+
+ - connect() on an already pending connection will return -1 with errno set to
+ one of the following values :
+ - EALREADY : connection already in progress
+ - EISCONN : connection already established
+ - anything else will indicate an error.
+
+=> So instead of using getsockopt() on a pending connection with no data, we
+ will switch to connect(). This implies that the connection address must be
+ known within the socket's write() function.
+
+
diff --git a/doc/internals/connection-header.txt b/doc/internals/connection-header.txt
new file mode 100644
index 0000000..b74cea0
--- /dev/null
+++ b/doc/internals/connection-header.txt
@@ -0,0 +1,196 @@
+2010/01/16 - Connection header adjustments depending on the transaction mode.
+
+
+HTTP transactions supports 5 possible modes :
+
+ WANT_TUN : default, nothing changed
+ WANT_TUN + httpclose : headers set for close in both dirs
+ WANT_KAL : keep-alive desired in both dirs
+ WANT_SCL : want close with the server and KA with the client
+ WANT_CLO : want close on both sides.
+
+When only WANT_TUN is set, nothing is changed nor analysed, so for commodity
+below, we'll refer to WANT_TUN+httpclose as WANT_TUN.
+
+The mode is adjusted in 3 steps :
+ - configuration sets initial mode
+ - request headers set required request mode
+ - response headers set the final mode
+
+
+1) Adjusting the initial mode via the configuration
+
+ option httpclose => TUN
+ option http-keep-alive => KAL
+ option http-server-close => SCL
+ option forceclose => CLO
+
+Note that option httpclose combined with any other option is equivalent to
+forceclose.
+
+
+2) Adjusting the request mode once the request is parsed
+
+If we cannot determine the body length from the headers, we set the mode to CLO
+but later we'll switch to tunnel mode once forwarding the body. That way, all
+parties are informed of the correct mode.
+
+Depending on the request version and request Connection header, we may have to
+adjust the current transaction mode and to update the connection header.
+
+mode req_ver req_hdr new_mode hdr_change
+TUN 1.0 - TUN -
+TUN 1.0 ka TUN del_ka
+TUN 1.0 close TUN del_close
+TUN 1.0 both TUN del_ka, del_close
+
+TUN 1.1 - TUN add_close
+TUN 1.1 ka TUN del_ka, add_close
+TUN 1.1 close TUN -
+TUN 1.1 both TUN del_ka
+
+KAL 1.0 - CLO -
+KAL 1.0 ka KAL -
+KAL 1.0 close CLO del_close
+KAL 1.0 both CLO del_ka, del_close
+
+KAL 1.1 - KAL -
+KAL 1.1 ka KAL del_ka
+KAL 1.1 close CLO -
+KAL 1.1 both CLO del_ka
+
+SCL 1.0 - CLO -
+SCL 1.0 ka SCL del_ka
+SCL 1.0 close CLO del_close
+SCL 1.0 both CLO del_ka, del_close
+
+SCL 1.1 - SCL add_close
+SCL 1.1 ka SCL del_ka, add_close
+SCL 1.1 close CLO -
+SCL 1.1 both CLO del_ka
+
+CLO 1.0 - CLO -
+CLO 1.0 ka CLO del_ka
+CLO 1.0 close CLO del_close
+CLO 1.0 both CLO del_ka, del_close
+
+CLO 1.1 - CLO add_close
+CLO 1.1 ka CLO del_ka, add_close
+CLO 1.1 close CLO -
+CLO 1.1 both CLO del_ka
+
+=> Summary:
+ - KAL and SCL are only possible with the same requests :
+ - 1.0 + ka
+ - 1.1 + ka or nothing
+
+ - CLO is assumed for any non-TUN request which contains at least a close
+ header, as well as for any 1.0 request without a keep-alive header.
+
+ - del_ka is set whenever we want a CLO or SCL or TUN and req contains a KA,
+ or when the req is 1.1 and contains a KA.
+
+ - del_close is set whenever a 1.0 request contains a close.
+
+ - add_close is set whenever a 1.1 request must be switched to TUN, SCL, CLO
+ and did not have a close hdr.
+
+Note that the request processing is performed in two passes, one with the
+frontend's config and a second one with the backend's config. It is only
+possible to "raise" the mode between them, so during the second pass, we have
+no reason to re-add a header that we previously removed. As an exception, the
+TUN mode is converted to CLO once combined because in fact it's an httpclose
+option set on a TUN mode connection :
+
+ BE (2)
+ | TUN KAL SCL CLO
+ ----+----+----+----+----
+ TUN | TUN CLO CLO CLO
+ +
+ KAL | CLO KAL SCL CLO
+ FE +
+ (1) SCL | CLO SCL SCL CLO
+ +
+ CLO | CLO CLO CLO CLO
+
+
+3) Adjusting the final mode once the response is parsed
+
+This part becomes trickier. It is possible that the server responds with a
+version that the client does not necessarily understand. Obviously, 1.1 clients
+are asusmed to understand 1.0 responses. The problematic case is a 1.0 client
+receiving a 1.1 response without any Connection header. Some 1.0 clients might
+know that in 1.1 this means "keep-alive" while others might ignore the version
+and assume a "close". Since we know the version on both sides, we may have to
+adjust some responses to remove any ambiguous case. That's the reason why the
+following table considers both the request and the response version. If the
+response length cannot be determined, we switch to CLO mode.
+
+mode res_ver res_hdr req_ver new_mode hdr_change
+TUN 1.0 - any TUN -
+TUN 1.0 ka any TUN del_ka
+TUN 1.0 close any TUN del_close
+TUN 1.0 both any TUN del_ka, del_close
+
+TUN 1.1 - any TUN add_close
+TUN 1.1 ka any TUN del_ka, add_close
+TUN 1.1 close any TUN -
+TUN 1.1 both any TUN del_ka
+
+KAL 1.0 - any SCL add_ka
+KAL 1.0 ka any KAL -
+KAL 1.0 close any SCL del_close, add_ka
+KAL 1.0 both any SCL del_close
+
+KAL 1.1 - 1.0 KAL add_ka
+KAL 1.1 - 1.1 KAL -
+KAL 1.1 ka 1.0 KAL -
+KAL 1.1 ka 1.1 KAL del_ka
+KAL 1.1 close 1.0 SCL del_close, add_ka
+KAL 1.1 close 1.1 SCL del_close
+KAL 1.1 both 1.0 SCL del_close
+KAL 1.1 both 1.1 SCL del_ka, del_close
+
+SCL 1.0 - any SCL add_ka
+SCL 1.0 ka any SCL -
+SCL 1.0 close any SCL del_close, add_ka
+SCL 1.0 both any SCL del_close
+
+SCL 1.1 - 1.0 SCL add_ka
+SCL 1.1 - 1.1 SCL -
+SCL 1.1 ka 1.0 SCL -
+SCL 1.1 ka 1.1 SCL del_ka
+SCL 1.1 close 1.0 SCL del_close, add_ka
+SCL 1.1 close 1.1 SCL del_close
+SCL 1.1 both 1.0 SCL del_close
+SCL 1.1 both 1.1 SCL del_ka, del_close
+
+CLO 1.0 - any CLO -
+CLO 1.0 ka any CLO del_ka
+CLO 1.0 close any CLO del_close
+CLO 1.0 both any CLO del_ka, del_close
+
+CLO 1.1 - any CLO add_close
+CLO 1.1 ka any CLO del_ka, add_close
+CLO 1.1 close any CLO -
+CLO 1.1 both any CLO del_ka
+
+=> in summary :
+ - the header operations do not depend on the initial mode, they only depend
+ on versions and current connection header(s).
+
+ - both CLO and TUN modes work similarly, they need to set a close mode on the
+ response. A 1.1 response will exclusively need the close header, while a 1.0
+ response will have it removed. Any keep-alive header is always removed when
+ found.
+
+ - a KAL request where the server wants to close turns into an SCL response so
+ that we release the server but still maintain the connection to the client.
+
+ - the KAL and SCL modes work the same way as we need to set keep-alive on the
+ response. So a 1.0 response will only have the keep-alive header with any
+ close header removed. A 1.1 response will have the keep-alive header added
+ for 1.0 requests and the close header removed for all requests.
+
+Note that the SCL and CLO modes will automatically cause the server connection
+to be closed at the end of the data transfer.
diff --git a/doc/internals/connection-scale.txt b/doc/internals/connection-scale.txt
new file mode 100644
index 0000000..7c3d902
--- /dev/null
+++ b/doc/internals/connection-scale.txt
@@ -0,0 +1,44 @@
+Problème des connexions simultanées avec un backend
+
+Pour chaque serveur, 3 cas possibles :
+
+ - pas de limite (par défaut)
+ - limite statique (maxconn)
+ - limite dynamique (maxconn/(ratio de px->conn), avec minconn)
+
+On a donc besoin d'une limite sur le proxy dans le cas de la limite
+dynamique, afin de fixer un seuil et un ratio. Ce qui compte, c'est
+le point après lequel on passe d'un régime linéaire à un régime
+saturé.
+
+On a donc 3 phases :
+
+ - régime minimal (0..srv->minconn)
+ - régime linéaire (srv->minconn..srv->maxconn)
+ - régime saturé (srv->maxconn..)
+
+Le minconn pourrait aussi ressortir du serveur ?
+En pratique, on veut :
+ - un max par serveur
+ - un seuil global auquel les serveurs appliquent le max
+ - un seuil minimal en-dessous duquel le nb de conn est
+ maintenu. Cette limite a un sens par serveur (jamais moins de X conns)
+ mais aussi en global (pas la peine de faire du dynamique en dessous de
+ X conns à répartir). La difficulté en global, c'est de savoir comment
+ on calcule le nombre min associé à chaque serveur, vu que c'est un ratio
+ défini à partir du max.
+
+Ca revient à peu près à la même chose que de faire 2 états :
+
+ - régime linéaire avec un offset (srv->minconn..srv->maxconn)
+ - régime saturé (srv->maxconn..)
+
+Sauf que dans ce cas, le min et le max sont bien par serveur, et le seuil est
+global et correspond à la limite de connexions au-delà de laquel on veut
+tourner à plein régime sur l'ensemble des serveurs. On peut donc parler de
+passage en mode "full", "saturated", "optimal". On peut également parler de
+la fin de la partie "scalable", "dynamique".
+
+=> fullconn 1000 par exemple ?
+
+
diff --git a/doc/internals/fd-migration.txt b/doc/internals/fd-migration.txt
new file mode 100644
index 0000000..aaddad3
--- /dev/null
+++ b/doc/internals/fd-migration.txt
@@ -0,0 +1,138 @@
+2021-07-30 - File descriptor migration between threads
+
+An FD migration may happen on any idle connection that experiences a takeover()
+operation by another thread. In this case the acting thread becomes the owner
+of the connection (and FD) while previous one(s) need to forget about it.
+
+File descriptor migration between threads is a fairly complex operation because
+it is required to maintain a durable consistency between the pollers states and
+the haproxy's desired state. Indeed, very often the FD is registered within one
+thread's poller and that thread might be waiting in the system, so there is no
+way to synchronously update it. This is where thread_mask, polled_mask and per
+thread updates are used:
+
+ - a thread knows if it's allowed to manipulate an FD by looking at its bit in
+ the FD's thread_mask ;
+
+ - each thread knows if it was polling an FD by looking at its bit in the
+ polled_mask field ; a recent migration is usually indicated by a bit being
+ present in polled_mask and absent from thread_mask.
+
+ - other threads know whether it's safe to take over an FD by looking at the
+ running mask: if it contains any other thread's bit, then other threads are
+ using it and it's not safe to take it over.
+
+ - sleeping threads are notified about the need to update their polling via
+ local or global updates to the FD. Each thread has its own local update
+ list and its own bit in the update_mask to know whether there are pending
+ updates for it. This allows to reconverge polling with the desired state
+ at the last instant before polling.
+
+While the description above could be seen as "progressive" (it technically is)
+in that there is always a transition and convergence period in a migrated FD's
+life, functionally speaking it's perfectly atomic thanks to the running bit and
+to the per-thread idle connections lock: no takeover is permitted without
+holding the idle_conns lock, and takeover may only happen by atomically picking
+a connection from the list that is also protected by this lock. In practice, an
+FD is never taken over by itself, but always in the context of a connection,
+and by atomically removing a connection from an idle list, it is possible to
+guarantee that a connection will not be picked, hence that its FD will not be
+taken over.
+
+same thread as list!
+
+The possible entry points to a race to use a file descriptor are the following
+ones, with their respective sequences:
+
+ 1) takeover: requested by conn_backend_get() on behalf of connect_server()
+ - take the idle_conns_lock, protecting against a parallel access from the
+ I/O tasklet or timeout task
+ - pick the first connection from the list
+ - attempt an fd_takeover() on this connection's fd. Usually it works,
+ unless a late wakeup of the owning thread shows up in the FD's running
+ mask. The operation is performed in fd_takeover() using a DWCAS which
+ tries to switch both running and thread_mask to the caller's tid_bit. A
+ concurrent bit in running is enough to make it fail. This guarantees
+ another thread does not wakeup from I/O in the middle of the takeover.
+ In case of conflict, this FD is skipped and the attempt is tried again
+ with the next connection.
+ - resets the task/tasklet contexts to NULL, as a signal that they are not
+ allowed to run anymore. The tasks retrieve their execution context from
+ the scheduler in the arguments, but will check the tasks' context from
+ the structure under the lock to detect this possible change, and abort.
+ - at this point the takeover succeeded, the idle_conns_lock is released and
+ the connection and its FD are now owned by the caller
+
+ 2) poll report: happens on late rx, shutdown or error on idle conns
+ - fd_set_running() is called to atomically set the running_mask and check
+ that the caller's tid_bit is still present in the thread_mask. Upon
+ failure the caller arranges itself to stop reporting that FD (e.g. by
+ immediate removal or by an asynchronous update). Upon success, it's
+ guaranteed that any concurrent fd_takeover() will fail the DWCAS and that
+ another connection will need to be picked instead.
+ - FD's state is possibly updated
+ - the iocb is called if needed (almost always)
+ - if the iocb didn't kill the connection, release the bit from running_mask
+ making the connection possibly available to a subsequent fd_takeover().
+
+ 3) I/O tasklet, timeout task: timeout or subscribed wakeup
+ - start by taking the idle_conns_lock, ensuring no takeover() will pick the
+ same connection from this point.
+ - check the task/tasklet's context to verify that no recently completed
+ takeover() stole the connection. If it's NULL, the connection was lost,
+ the lock is released and the task/tasklet killed. Otherwise it is
+ guaranteed that no other thread may use that connection (current takeover
+ candidates are waiting on the lock, previous owners waking from poll()
+ lost their bit in the thread_mask and will not touch the FD).
+ - the connection is removed from the idle conns list. From this point on,
+ no other thread will even find it there nor even try fd_takeover() on it.
+ - the idle_conns_lock is now released, the connection is protected and its
+ FD is not reachable by other threads anymore.
+ - the task does what it has to do
+ - if the connection is still usable (i.e. not upon timeout), it's inserted
+ again into the idle conns list, meaning it may instantly be taken over
+ by a competing thread.
+
+ 4) wake() callback: happens on last user after xfers (may free() the conn)
+ - the connection is still owned by the caller, it's still subscribed to
+ polling but the connection is idle thus inactive. Errors or shutdowns
+ may be reported late, via sock_conn_iocb() and conn_notify_mux(), thus
+ the running bit is set (i.e. a concurrent fd_takeover() will fail).
+ - if the connection is in the list, the idle_conns_lock is grabbed, the
+ connection is removed from the list, and the lock is released.
+ - mux->wake() is called
+ - if the connection previously was in the list, it's reinserted under the
+ idle_conns_lock.
+
+
+With the DWCAS removal between running_mask & thread_mask:
+
+fd_takeover:
+ 1 if (!CAS(&running_mask, 0, tid_bit))
+ 2 return fail;
+ 3 atomic_store(&thread_mask, tid_bit);
+ 4 atomic_and(&running_mask, ~tid_bit);
+
+poller:
+ 1 do {
+ 2 /* read consistent running_mask & thread_mask */
+ 3 do {
+ 4 run = atomic_load(&running_mask);
+ 5 thr = atomic_load(&thread_mask);
+ 6 } while (run & ~thr);
+ 7
+ 8 if (!(thr & tid_bit)) {
+ 9 /* takeover has started */
+ 10 goto disable_fd;
+ 11 }
+ 12 } while (!CAS(&running_mask, run, run | tid_bit));
+
+fd_delete:
+ 1 atomic_or(&running_mask, tid_bit);
+ 2 atomic_store(&thread_mask, 0);
+ 3 atomic_and(&running_mask, ~tid_bit);
+
+The loop in poller:3-6 is used to make sure the thread_mask we read matches
+the last updated running_mask. If nobody can give up on fd_takeover(), it
+might even be possible to spin on thread_mask only. Late pollers will not
+set running anymore with this.
diff --git a/doc/internals/hashing.txt b/doc/internals/hashing.txt
new file mode 100644
index 0000000..260b6af
--- /dev/null
+++ b/doc/internals/hashing.txt
@@ -0,0 +1,83 @@
+2013/11/20 - How hashing works internally in haproxy - maddalab@gmail.com
+
+This document describes how HAProxy implements hashing both map-based and
+consistent hashing, both prior to versions 1.5 and the motivation and tests
+that were done when providing additional options starting in version 2.4
+
+A note on hashing in general, hash functions strive to have little
+correlation between input and output. The heart of a hash function is its
+mixing step. The behavior of the mixing step largely determines whether the
+hash function is collision-resistant. Hash functions that are collision
+resistant are more likely to have an even distribution of load.
+
+The purpose of the mixing function is to spread the effect of each message
+bit throughout all the bits of the internal state. Ideally every bit in the
+hash state is affected by every bit in the message. And we want to do that
+as quickly as possible simply for the sake of program performance. A
+function is said to satisfy the strict avalanche criterion if, whenever a
+single input bit is complemented (toggled between 0 and 1), each of the
+output bits should change with a probability of one half for an arbitrary
+selection of the remaining input bits.
+
+To guard against a combination of hash function and input that results in
+high rate of collisions, haproxy implements an avalanche algorithm on the
+result of the hashing function. In all versions 1.4 and prior avalanche is
+always applied when using the consistent hashing directive. It is intended
+to provide quite a good distribution for little input variations. The result
+is quite suited to fit over a 32-bit space with enough variations so that
+a randomly picked number falls equally before any server position, which is
+ideal for consistently hashed backends, a common use case for caches.
+
+In all versions 1.4 and prior HAProxy implements the SDBM hashing function.
+However tests show that alternatives to SDBM have a better cache
+distribution on different hashing criteria. Additional tests involving
+alternatives for hash input and an option to trigger avalanche, we found
+different algorithms perform better on different criteria. DJB2 performs
+well when hashing ascii text and is a good choice when hashing on host
+header. Other alternatives perform better on numbers and are a good choice
+when using source ip. The results also vary by use of the avalanche flag.
+
+The results of the testing can be found under the tests folder. Here is
+a summary of the discussion on the results on 1 input criteria and the
+methodology used to generate the results.
+
+A note of the setup when validating the results independently, one
+would want to avoid backend server counts that may skew the results. As
+an example with DJB2 avoid 33 servers. Please see the implementations of
+the hashing function, which can be found in the links under references.
+
+The following was the set up used
+
+(a) hash-type consistent/map-based
+(b) avalanche on/off
+(c) balanche host(hdr)
+(d) 3 criteria for inputs
+ - ~ 10K requests, including duplicates
+ - ~ 46K requests, unique requests from 1 MM requests were obtained
+ - ~ 250K requests, including duplicates
+(e) 17 servers in backend, all servers were assigned the same weight
+
+Result of the hashing were obtained across the server via monitoring log
+files for haproxy. Population Standard deviation was used to evaluate the
+efficacy of the hashing algorithm. Lower standard deviation, indicates
+a better distribution of load across the backends.
+
+On 10K requests, when using consistent hashing with avalanche on host
+headers, DJB2 significantly out performs SDBM. Std dev on SDBM was 48.95
+and DJB2 was 26.29. This relationship is inverted with avalanche disabled,
+however DJB2 with avalanche enabled out performs SDBM with avalanche
+disabled.
+
+On map-based hashing SDBM out performs DJB2 irrespective of the avalanche
+option. SDBM without avalanche is marginally better than with avalanche.
+DJB2 performs significantly worse with avalanche enabled.
+
+Summary: The results of the testing indicate that there isn't a hashing
+algorithm that can be applied across all input criteria. It is necessary
+to support alternatives to SDBM, which is generally the best option, with
+algorithms that are better for different inputs. Avalanche is not always
+applicable and may result in less smooth distribution.
+
+References:
+Mixing Functions/Avalanche: https://papa.bretmulvey.com/post/124027987928/hash-functions
+Hash Functions: http://www.cse.yorku.ca/~oz/hash.html
diff --git a/doc/internals/list.fig b/doc/internals/list.fig
new file mode 100644
index 0000000..aeb1f1d
--- /dev/null
+++ b/doc/internals/list.fig
@@ -0,0 +1,599 @@
+#FIG 3.2 Produced by xfig version 2.4
+Landscape
+Center
+Metric
+A4
+119.50
+Single
+-2
+1200 2
+6 3960 3420 4320 4230
+2 1 0 1 0 7 50 0 -1 0.000 0 0 -1 0 0 2
+ 4230 3860 4005 3860
+2 2 0 2 0 2 53 0 20 0.000 0 0 -1 0 0 5
+ 4005 3510 4230 3510 4230 4185 4005 4185 4005 3510
+4 1 0 50 0 14 10 0.0000 4 105 105 4120 4062 P\001
+4 1 0 50 0 14 10 0.0000 4 105 105 4118 3735 N\001
+-6
+6 4185 5580 4545 6390
+2 1 0 1 0 7 50 0 -1 0.000 0 0 -1 0 0 2
+ 4455 6020 4230 6020
+2 2 0 2 0 4 53 0 20 0.000 0 0 -1 0 0 5
+ 4230 5670 4455 5670 4455 6345 4230 6345 4230 5670
+4 1 0 50 0 14 10 0.0000 4 105 105 4345 6222 P\001
+4 1 0 50 0 14 10 0.0000 4 105 105 4343 5895 N\001
+-6
+6 4905 5445 5445 6525
+6 4905 5445 5445 6525
+6 5085 5580 5445 6390
+2 1 0 1 0 7 50 0 -1 0.000 0 0 -1 0 0 2
+ 5355 6020 5130 6020
+2 2 0 2 0 6 53 0 20 0.000 0 0 -1 0 0 5
+ 5130 5670 5355 5670 5355 6345 5130 6345 5130 5670
+4 1 0 50 0 14 10 0.0000 4 105 105 5245 6222 P\001
+4 1 0 50 0 14 10 0.0000 4 105 105 5243 5895 N\001
+-6
+2 1 0 1 0 7 50 0 -1 0.000 0 0 -1 0 0 2
+ 5355 5670 4905 5670
+2 1 0 1 0 7 50 0 -1 0.000 0 0 -1 0 0 2
+ 4905 6345 5355 6345
+2 2 0 1 0 7 50 0 -1 0.000 0 0 -1 0 0 5
+ 4905 5445 5355 5445 5355 6525 4905 6525 4905 5445
+4 1 0 50 0 14 12 0.0000 4 120 120 5040 6075 L\001
+-6
+-6
+6 5805 5445 6345 6525
+6 5805 5445 6345 6525
+6 5985 5580 6345 6390
+2 1 0 1 0 7 50 0 -1 0.000 0 0 -1 0 0 2
+ 6255 6020 6030 6020
+2 2 0 2 0 6 53 0 20 0.000 0 0 -1 0 0 5
+ 6030 5670 6255 5670 6255 6345 6030 6345 6030 5670
+4 1 0 50 0 14 10 0.0000 4 105 105 6145 6222 P\001
+4 1 0 50 0 14 10 0.0000 4 105 105 6143 5895 N\001
+-6
+2 1 0 1 0 7 50 0 -1 0.000 0 0 -1 0 0 2
+ 6255 5670 5805 5670
+2 1 0 1 0 7 50 0 -1 0.000 0 0 -1 0 0 2
+ 5805 6345 6255 6345
+2 2 0 1 0 7 50 0 -1 0.000 0 0 -1 0 0 5
+ 5805 5445 6255 5445 6255 6525 5805 6525 5805 5445
+4 1 0 50 0 14 12 0.0000 4 120 120 5940 6075 L\001
+-6
+-6
+6 6705 5445 7245 6525
+6 6705 5445 7245 6525
+6 6885 5580 7245 6390
+2 1 0 1 0 7 50 0 -1 0.000 0 0 -1 0 0 2
+ 7155 6020 6930 6020
+2 2 0 2 0 6 53 0 20 0.000 0 0 -1 0 0 5
+ 6930 5670 7155 5670 7155 6345 6930 6345 6930 5670
+4 1 0 50 0 14 10 0.0000 4 105 105 7045 6222 P\001
+4 1 0 50 0 14 10 0.0000 4 105 105 7043 5895 N\001
+-6
+2 1 0 1 0 7 50 0 -1 0.000 0 0 -1 0 0 2
+ 7155 5670 6705 5670
+2 1 0 1 0 7 50 0 -1 0.000 0 0 -1 0 0 2
+ 6705 6345 7155 6345
+2 2 0 1 0 7 50 0 -1 0.000 0 0 -1 0 0 5
+ 6705 5445 7155 5445 7155 6525 6705 6525 6705 5445
+4 1 0 50 0 14 12 0.0000 4 120 120 6840 6075 L\001
+-6
+-6
+6 450 5580 810 6390
+2 1 0 1 0 7 50 0 -1 0.000 0 0 -1 0 0 2
+ 720 6020 495 6020
+2 2 0 2 0 4 53 0 20 0.000 0 0 -1 0 0 5
+ 495 5670 720 5670 720 6345 495 6345 495 5670
+4 1 0 50 0 14 10 0.0000 4 105 105 610 6222 P\001
+4 1 0 50 0 14 10 0.0000 4 105 105 608 5895 N\001
+-6
+6 1170 5445 1710 6525
+6 1170 5445 1710 6525
+6 1350 5580 1710 6390
+2 1 0 1 0 7 50 0 -1 0.000 0 0 -1 0 0 2
+ 1620 6020 1395 6020
+2 2 0 2 0 6 53 0 20 0.000 0 0 -1 0 0 5
+ 1395 5670 1620 5670 1620 6345 1395 6345 1395 5670
+4 1 0 50 0 14 10 0.0000 4 105 105 1510 6222 P\001
+4 1 0 50 0 14 10 0.0000 4 105 105 1508 5895 N\001
+-6
+2 1 0 1 0 7 50 0 -1 0.000 0 0 -1 0 0 2
+ 1620 5670 1170 5670
+2 1 0 1 0 7 50 0 -1 0.000 0 0 -1 0 0 2
+ 1170 6345 1620 6345
+2 2 0 1 0 7 50 0 -1 0.000 0 0 -1 0 0 5
+ 1170 5445 1620 5445 1620 6525 1170 6525 1170 5445
+4 1 0 50 0 14 12 0.0000 4 120 120 1305 6075 L\001
+-6
+-6
+6 2070 5445 2610 6525
+6 2070 5445 2610 6525
+6 2250 5580 2610 6390
+2 1 0 1 0 7 50 0 -1 0.000 0 0 -1 0 0 2
+ 2520 6020 2295 6020
+2 2 0 2 0 6 53 0 20 0.000 0 0 -1 0 0 5
+ 2295 5670 2520 5670 2520 6345 2295 6345 2295 5670
+4 1 0 50 0 14 10 0.0000 4 105 105 2410 6222 P\001
+4 1 0 50 0 14 10 0.0000 4 105 105 2408 5895 N\001
+-6
+2 1 0 1 0 7 50 0 -1 0.000 0 0 -1 0 0 2
+ 2520 5670 2070 5670
+2 1 0 1 0 7 50 0 -1 0.000 0 0 -1 0 0 2
+ 2070 6345 2520 6345
+2 2 0 1 0 7 50 0 -1 0.000 0 0 -1 0 0 5
+ 2070 5445 2520 5445 2520 6525 2070 6525 2070 5445
+4 1 0 50 0 14 12 0.0000 4 120 120 2205 6075 L\001
+-6
+-6
+6 2970 5445 3510 6525
+6 2970 5445 3510 6525
+6 3150 5580 3510 6390
+2 1 0 1 0 7 50 0 -1 0.000 0 0 -1 0 0 2
+ 3420 6020 3195 6020
+2 2 0 2 0 6 53 0 20 0.000 0 0 -1 0 0 5
+ 3195 5670 3420 5670 3420 6345 3195 6345 3195 5670
+4 1 0 50 0 14 10 0.0000 4 105 105 3310 6222 P\001
+4 1 0 50 0 14 10 0.0000 4 105 105 3308 5895 N\001
+-6
+2 1 0 1 0 7 50 0 -1 0.000 0 0 -1 0 0 2
+ 3420 5670 2970 5670
+2 1 0 1 0 7 50 0 -1 0.000 0 0 -1 0 0 2
+ 2970 6345 3420 6345
+2 2 0 1 0 7 50 0 -1 0.000 0 0 -1 0 0 5
+ 2970 5445 3420 5445 3420 6525 2970 6525 2970 5445
+4 1 0 50 0 14 12 0.0000 4 120 120 3105 6075 L\001
+-6
+-6
+6 720 3420 1080 4230
+2 1 0 1 0 7 50 0 -1 0.000 0 0 -1 0 0 2
+ 990 3860 765 3860
+2 2 0 2 0 2 53 0 20 0.000 0 0 -1 0 0 5
+ 765 3510 990 3510 990 4185 765 4185 765 3510
+4 1 0 50 0 14 10 0.0000 4 105 105 880 4062 P\001
+4 1 0 50 0 14 10 0.0000 4 105 105 878 3735 N\001
+-6
+6 2700 3420 3060 4230
+2 1 0 1 0 7 50 0 -1 0.000 0 0 -1 0 0 2
+ 2970 3860 2745 3860
+2 2 0 2 0 6 53 0 20 0.000 0 0 -1 0 0 5
+ 2745 3510 2970 3510 2970 4185 2745 4185 2745 3510
+4 1 0 50 0 14 10 0.0000 4 105 105 2860 4062 P\001
+4 1 0 50 0 14 10 0.0000 4 105 105 2858 3735 N\001
+-6
+6 1620 3465 1935 4230
+2 1 0 1 0 7 50 0 -1 0.000 0 0 -1 0 0 2
+ 1890 3860 1665 3860
+2 2 0 2 0 7 50 0 -1 0.000 0 0 -1 0 0 5
+ 1665 3510 1890 3510 1890 4185 1665 4185 1665 3510
+4 1 0 50 0 14 10 0.0000 4 105 105 1780 4062 P\001
+4 1 0 50 0 14 10 0.0000 4 105 105 1778 3735 N\001
+-6
+6 10485 3330 11025 4410
+6 10665 3465 11025 4275
+2 1 0 1 0 7 50 0 -1 0.000 0 0 -1 0 0 2
+ 10935 3905 10710 3905
+2 2 0 2 0 6 53 0 20 0.000 0 0 -1 0 0 5
+ 10710 3555 10935 3555 10935 4230 10710 4230 10710 3555
+4 1 0 50 0 14 10 0.0000 4 105 105 10825 4107 P\001
+4 1 0 50 0 14 10 0.0000 4 105 105 10823 3780 N\001
+-6
+2 1 0 1 0 7 50 0 -1 0.000 0 0 -1 0 0 2
+ 10935 3555 10485 3555
+2 1 0 1 0 7 50 0 -1 0.000 0 0 -1 0 0 2
+ 10485 4230 10935 4230
+2 2 0 1 0 7 50 0 -1 0.000 0 0 -1 0 0 5
+ 10485 3330 10935 3330 10935 4410 10485 4410 10485 3330
+4 1 0 50 0 14 12 0.0000 4 120 120 10620 3960 L\001
+-6
+6 7110 3105 7650 4185
+6 7110 3105 7650 4185
+6 7290 3240 7650 4050
+2 1 0 1 0 7 50 0 -1 0.000 0 0 -1 0 0 2
+ 7560 3680 7335 3680
+2 2 0 2 0 6 53 0 20 0.000 0 0 -1 0 0 5
+ 7335 3330 7560 3330 7560 4005 7335 4005 7335 3330
+4 1 0 50 0 14 10 0.0000 4 105 105 7450 3882 P\001
+4 1 0 50 0 14 10 0.0000 4 105 105 7448 3555 N\001
+-6
+2 1 0 1 0 7 50 0 -1 0.000 0 0 -1 0 0 2
+ 7560 3330 7110 3330
+2 1 0 1 0 7 50 0 -1 0.000 0 0 -1 0 0 2
+ 7110 4005 7560 4005
+2 2 0 1 0 7 50 0 -1 0.000 0 0 -1 0 0 5
+ 7110 3105 7560 3105 7560 4185 7110 4185 7110 3105
+4 1 0 50 0 14 12 0.0000 4 120 120 7245 3735 L\001
+-6
+-6
+6 8010 3105 8550 4185
+6 8010 3105 8550 4185
+6 8190 3240 8550 4050
+2 1 0 1 0 7 50 0 -1 0.000 0 0 -1 0 0 2
+ 8460 3680 8235 3680
+2 2 0 2 0 6 53 0 20 0.000 0 0 -1 0 0 5
+ 8235 3330 8460 3330 8460 4005 8235 4005 8235 3330
+4 1 0 50 0 14 10 0.0000 4 105 105 8350 3882 P\001
+4 1 0 50 0 14 10 0.0000 4 105 105 8348 3555 N\001
+-6
+2 1 0 1 0 7 50 0 -1 0.000 0 0 -1 0 0 2
+ 8460 3330 8010 3330
+2 1 0 1 0 7 50 0 -1 0.000 0 0 -1 0 0 2
+ 8010 4005 8460 4005
+2 2 0 1 0 7 50 0 -1 0.000 0 0 -1 0 0 5
+ 8010 3105 8460 3105 8460 4185 8010 4185 8010 3105
+4 1 0 50 0 14 12 0.0000 4 120 120 8145 3735 L\001
+-6
+-6
+6 9315 990 12195 2160
+6 9675 1080 10035 1890
+2 1 0 1 0 7 50 0 -1 0.000 0 0 -1 0 0 2
+ 9945 1520 9720 1520
+2 2 0 2 0 2 53 0 20 0.000 0 0 -1 0 0 5
+ 9720 1170 9945 1170 9945 1845 9720 1845 9720 1170
+4 1 0 50 0 14 10 0.0000 4 105 105 9835 1722 P\001
+4 1 0 50 0 14 10 0.0000 4 105 105 9833 1395 N\001
+-6
+2 1 0 1 0 7 50 0 -1 0.000 0 0 -1 0 0 2
+ 10935 1520 10710 1520
+2 1 0 1 0 7 50 0 -1 0.000 0 0 -1 0 0 2
+ 11925 1520 11700 1520
+2 2 0 2 0 7 52 0 20 0.000 0 0 -1 0 0 5
+ 10710 1170 10935 1170 10935 1845 10710 1845 10710 1170
+2 2 0 2 0 6 52 0 20 0.000 0 0 -1 0 0 5
+ 11700 1170 11925 1170 11925 1845 11700 1845 11700 1170
+3 0 0 1 0 7 50 0 -1 0.000 0 1 0 2
+ 1 1 1.00 60.00 120.00
+ 9945 1350 10665 1350
+ 0.000 0.000
+3 0 0 1 0 7 50 0 -1 0.000 0 1 0 2
+ 1 1 1.00 60.00 120.00
+ 10935 1350 11655 1350
+ 0.000 0.000
+3 0 0 1 0 7 50 0 -1 0.000 0 1 0 8
+ 1 1 1.00 60.00 120.00
+ 11925 1350 12105 1350 12195 1350 12195 990 9315 990 9315 1350
+ 9495 1350 9675 1350
+ 0.000 1.000 1.000 1.000 1.000 1.000 1.000 0.000
+3 0 0 1 0 7 50 0 -1 0.000 0 1 0 8
+ 1 1 1.00 60.00 120.00
+ 9675 1710 9495 1710 9315 1710 9405 2160 12195 2160 12195 1710
+ 12105 1710 11925 1710
+ 0.000 1.000 1.000 1.000 1.000 1.000 1.000 0.000
+3 0 0 1 0 7 50 0 -1 0.000 0 1 0 2
+ 1 1 1.00 60.00 120.00
+ 11655 1710 10935 1710
+ 0.000 0.000
+3 0 0 1 0 7 50 0 -1 0.000 0 1 0 2
+ 1 1 1.00 60.00 120.00
+ 10665 1710 9945 1710
+ 0.000 0.000
+4 1 0 50 0 14 10 0.0000 4 105 105 10825 1722 P\001
+4 1 0 50 0 14 10 0.0000 4 105 105 10823 1395 N\001
+4 1 0 50 0 14 10 0.0000 4 105 105 11815 1722 P\001
+4 1 0 50 0 14 10 0.0000 4 105 105 11813 1395 N\001
+-6
+6 6345 1080 6705 1890
+2 1 0 1 0 7 50 0 -1 0.000 0 0 -1 0 0 2
+ 6615 1520 6390 1520
+2 2 0 2 0 2 53 0 20 0.000 0 0 -1 0 0 5
+ 6390 1170 6615 1170 6615 1845 6390 1845 6390 1170
+4 1 0 50 0 14 10 0.0000 4 105 105 6505 1722 P\001
+4 1 0 50 0 14 10 0.0000 4 105 105 6503 1395 N\001
+-6
+6 7335 1080 7695 1890
+2 1 0 1 0 7 50 0 -1 0.000 0 0 -1 0 0 2
+ 7605 1520 7380 1520
+2 2 0 2 0 6 52 0 20 0.000 0 0 -1 0 0 5
+ 7380 1170 7605 1170 7605 1845 7380 1845 7380 1170
+4 1 0 50 0 14 10 0.0000 4 105 105 7495 1722 P\001
+4 1 0 50 0 14 10 0.0000 4 105 105 7493 1395 N\001
+-6
+6 8325 1080 8685 1890
+2 1 0 1 0 7 50 0 -1 0.000 0 0 -1 0 0 2
+ 8595 1520 8370 1520
+2 2 0 2 0 7 50 0 -1 0.000 0 0 -1 0 0 5
+ 8370 1170 8595 1170 8595 1845 8370 1845 8370 1170
+4 1 0 50 0 14 10 0.0000 4 105 105 8485 1722 P\001
+4 1 0 50 0 14 10 0.0000 4 105 105 8483 1395 N\001
+-6
+6 3870 1215 4185 1980
+2 1 0 1 0 7 50 0 -1 0.000 0 0 -1 0 0 2
+ 4140 1610 3915 1610
+2 2 0 2 0 2 53 0 20 0.000 0 0 -1 0 0 5
+ 3915 1260 4140 1260 4140 1935 3915 1935 3915 1260
+4 1 0 50 0 14 10 0.0000 4 105 105 4030 1812 P\001
+4 1 0 50 0 14 10 0.0000 4 105 105 4028 1485 N\001
+-6
+6 4770 1215 5085 1980
+2 1 0 1 0 7 50 0 -1 0.000 0 0 -1 0 0 2
+ 5040 1610 4815 1610
+2 2 0 2 0 7 50 0 -1 0.000 0 0 -1 0 0 5
+ 4815 1260 5040 1260 5040 1935 4815 1935 4815 1260
+4 1 0 50 0 14 10 0.0000 4 105 105 4930 1812 P\001
+4 1 0 50 0 14 10 0.0000 4 105 105 4928 1485 N\001
+-6
+6 2205 990 2925 2160
+2 1 0 1 0 7 50 0 -1 0.000 0 0 -1 0 0 2
+ 2655 1610 2430 1610
+2 2 0 2 0 2 53 0 20 0.000 0 0 -1 0 0 5
+ 2430 1260 2655 1260 2655 1935 2430 1935 2430 1260
+3 0 0 1 0 7 50 0 -1 0.000 0 1 0 6
+ 1 1 1.00 60.00 120.00
+ 2655 1440 2880 1440 2880 1035 2205 1035 2205 1440 2430 1440
+ 0.000 1.000 1.000 1.000 1.000 0.000
+3 0 0 1 0 7 50 0 -1 0.000 0 1 0 6
+ 1 1 1.00 60.00 120.00
+ 2655 1755 2880 1755 2880 2160 2205 2160 2205 1755 2430 1755
+ 0.000 1.000 1.000 1.000 1.000 0.000
+4 1 0 50 0 14 10 0.0000 4 105 105 2545 1812 P\001
+4 1 0 50 0 14 10 0.0000 4 105 105 2543 1485 N\001
+-6
+6 525 1350 1455 1830
+2 1 0 1 0 7 50 0 -1 0.000 0 0 -1 0 0 2
+ 540 1590 1440 1590
+2 2 0 2 0 7 50 0 -1 0.000 0 0 -1 0 0 5
+ 540 1365 1440 1365 1440 1815 540 1815 540 1365
+4 1 0 50 0 14 10 0.0000 4 105 735 990 1545 list *N\001
+4 1 0 50 0 14 10 0.0000 4 105 735 990 1770 list *P\001
+-6
+6 4815 3420 5175 4230
+2 1 0 1 0 7 50 0 -1 0.000 0 0 -1 0 0 2
+ 5085 3860 4860 3860
+2 2 0 2 0 7 53 0 20 0.000 0 0 -1 0 0 5
+ 4860 3510 5085 3510 5085 4185 4860 4185 4860 3510
+4 1 0 50 0 14 10 0.0000 4 105 105 4975 4062 P\001
+4 1 0 50 0 14 10 0.0000 4 105 105 4973 3735 N\001
+-6
+6 5715 3285 6390 4410
+2 1 0 1 0 7 50 0 -1 0.000 0 0 -1 0 0 2
+ 6165 3860 5940 3860
+2 2 0 2 0 6 53 0 20 0.000 0 0 -1 0 0 5
+ 5940 3510 6165 3510 6165 4185 5940 4185 5940 3510
+3 0 0 1 0 7 50 0 -1 0.000 0 1 0 6
+ 1 1 1.00 60.00 120.00
+ 6165 3690 6390 3690 6390 3285 5715 3285 5715 3690 5940 3690
+ 0.000 1.000 1.000 1.000 1.000 0.000
+3 0 0 1 0 7 50 0 -1 0.000 0 1 0 6
+ 1 1 1.00 60.00 120.00
+ 6165 4005 6390 4005 6390 4410 5715 4410 5715 4005 5940 4005
+ 0.000 1.000 1.000 1.000 1.000 0.000
+4 1 0 50 0 14 10 0.0000 4 105 105 6055 4062 P\001
+4 1 0 50 0 14 10 0.0000 4 105 105 6053 3735 N\001
+-6
+2 4 0 1 0 7 50 0 -1 0.000 0 0 7 0 0 5
+ 4050 4725 7605 4725 7605 6840 4050 6840 4050 4725
+2 4 0 1 0 7 50 0 -1 0.000 0 0 7 0 0 5
+ 315 4725 3870 4725 3870 6840 315 6840 315 4725
+2 4 0 1 0 7 50 0 -1 0.000 0 0 7 0 0 5
+ 3150 4500 315 4500 315 2475 3150 2475 3150 4500
+2 4 0 1 0 7 50 0 -1 0.000 0 0 7 0 0 5
+ 6660 2475 8910 2475 8910 4500 6660 4500 6660 2475
+2 1 0 1 0 7 50 0 -1 0.000 0 0 -1 1 0 2
+ 1 1 1.00 60.00 120.00
+ 10035 3375 10485 3330
+2 1 0 1 0 7 50 0 -1 0.000 0 0 -1 1 0 2
+ 1 1 1.00 60.00 120.00
+ 10080 3735 10485 3555
+2 4 0 1 0 7 50 0 -1 0.000 0 0 7 0 0 5
+ 9135 2475 12285 2475 12285 4500 9135 4500 9135 2475
+2 4 0 1 0 7 50 0 -1 0.000 0 0 7 0 0 5
+ 9270 270 12285 270 12285 2250 9270 2250 9270 270
+2 4 0 1 0 7 50 0 -1 0.000 0 0 7 0 0 5
+ 5760 270 9045 270 9045 2250 5760 2250 5760 270
+2 4 0 1 0 7 50 0 -1 0.000 0 0 7 0 0 5
+ 3465 270 5535 270 5535 2250 3465 2250 3465 270
+2 4 0 1 0 7 50 0 -1 0.000 0 0 7 0 0 5
+ 1845 270 3240 270 3240 2250 1845 2250 1845 270
+2 4 0 1 0 7 50 0 -1 0.000 0 0 7 0 0 5
+ 315 270 1620 270 1620 2250 315 2250 315 270
+2 4 0 1 0 7 50 0 -1 0.000 0 0 7 0 0 5
+ 3330 2475 6435 2475 6435 4500 3330 4500 3330 2475
+2 4 0 1 0 7 50 0 -1 0.000 0 0 7 0 0 5
+ 12285 6840 12285 4725 7785 4725 7785 6840 12285 6840
+3 0 0 1 0 7 50 0 -1 0.000 0 1 0 2
+ 1 1 1.00 60.00 120.00
+ 4230 3690 4860 3690
+ 0.000 0.000
+3 0 0 1 0 7 50 0 -1 0.000 0 1 0 2
+ 1 1 1.00 60.00 120.00
+ 4860 4050 4230 4050
+ 0.000 0.000
+3 0 0 1 0 7 50 0 -1 0.000 0 1 0 7
+ 1 1 1.00 60.00 120.00
+ 3960 4050 3780 4050 3600 4050 3600 4410 5580 4410 5580 4050
+ 5130 4050
+ 0.000 1.000 1.000 1.000 1.000 1.000 0.000
+3 0 0 1 0 7 50 0 -1 0.000 0 1 0 2
+ 1 1 1.00 60.00 120.00
+ 6261 5805 6711 5670
+ 0.000 0.000
+3 0 0 1 0 7 50 0 -1 0.000 0 1 0 2
+ 1 1 1.00 60.00 120.00
+ 4461 5805 4911 5670
+ 0.000 0.000
+3 0 0 1 0 7 50 0 -1 0.000 0 1 0 2
+ 1 1 1.00 60.00 120.00
+ 5358 5805 5808 5670
+ 0.000 0.000
+3 0 0 1 0 7 50 0 -1 0.000 0 1 0 2
+ 1 1 1.00 60.00 120.00
+ 6705 6210 6255 6210
+ 0.000 0.000
+3 0 0 1 0 7 50 0 -1 0.000 0 1 0 2
+ 1 1 1.00 60.00 120.00
+ 5805 6210 5355 6210
+ 0.000 0.000
+3 0 0 1 0 7 50 0 -1 0.000 0 1 0 2
+ 1 1 1.00 60.00 120.00
+ 4905 6210 4455 6210
+ 0.000 0.000
+3 0 0 1 0 7 50 0 -1 0.000 0 1 0 7
+ 1 1 1.00 60.00 120.00
+ 4320 6345 4320 6525 4320 6750 7470 6750 7470 6480 7470 6210
+ 7155 6210
+ 0.000 1.000 1.000 1.000 1.000 1.000 0.000
+3 0 0 1 0 7 50 0 -1 0.000 0 1 0 8
+ 1 1 1.00 60.00 120.00
+ 7155 5850 7335 5850 7470 5850 7470 5355 7470 5085 4590 5085
+ 4590 5355 4860 5625
+ 0.000 1.000 1.000 1.000 1.000 1.000 1.000 0.000
+3 0 0 1 0 7 50 0 -1 0.000 0 1 0 2
+ 1 1 1.00 60.00 120.00
+ 2526 5805 2976 5670
+ 0.000 0.000
+3 0 0 1 0 7 50 0 -1 0.000 0 1 0 2
+ 1 1 1.00 60.00 120.00
+ 726 5805 1176 5670
+ 0.000 0.000
+3 0 0 1 0 7 50 0 -1 0.000 0 1 0 2
+ 1 1 1.00 60.00 120.00
+ 1623 5805 2073 5670
+ 0.000 0.000
+3 0 0 1 0 7 50 0 -1 0.000 0 1 0 2
+ 1 1 1.00 60.00 120.00
+ 2970 6210 2520 6210
+ 0.000 0.000
+3 0 0 1 0 7 50 0 -1 0.000 0 1 0 2
+ 1 1 1.00 60.00 120.00
+ 2070 6210 1620 6210
+ 0.000 0.000
+3 0 0 1 0 7 50 0 -1 0.000 0 1 0 2
+ 1 1 1.00 60.00 120.00
+ 1170 6210 720 6210
+ 0.000 0.000
+3 0 0 1 0 7 50 0 -1 0.000 0 1 0 7
+ 1 1 1.00 60.00 120.00
+ 585 6345 585 6525 585 6750 3735 6750 3735 6480 3735 6210
+ 3420 6210
+ 0.000 1.000 1.000 1.000 1.000 1.000 0.000
+3 0 0 1 0 7 50 0 -1 0.000 0 1 0 8
+ 1 1 1.00 60.00 120.00
+ 3420 5850 3600 5850 3735 5850 3735 5355 3735 5085 585 5085
+ 585 5265 585 5670
+ 0.000 1.000 1.000 1.000 1.000 1.000 1.000 0.000
+3 0 0 1 0 7 50 0 -1 0.000 0 1 0 2
+ 1 1 1.00 60.00 120.00
+ 990 3690 1620 3690
+ 0.000 0.000
+3 0 0 1 0 7 50 0 -1 0.000 0 1 0 2
+ 1 1 1.00 60.00 120.00
+ 1620 4050 990 4050
+ 0.000 0.000
+3 0 0 1 0 7 50 0 -1 0.000 0 1 0 7
+ 1 1 1.00 60.00 120.00
+ 1890 3690 2340 3690 2340 3240 360 3240 360 3690 540 3690
+ 720 3690
+ 0.000 1.000 1.000 1.000 1.000 1.000 0.000
+3 0 0 1 0 7 50 0 -1 0.000 0 1 0 7
+ 1 1 1.00 60.00 120.00
+ 720 4050 540 4050 360 4050 360 4410 2340 4410 2340 4050
+ 1890 4050
+ 0.000 1.000 1.000 1.000 1.000 1.000 0.000
+3 0 0 1 0 7 50 0 -1 0.000 0 1 0 2
+ 1 1 1.00 60.00 120.00
+ 7560 3465 8010 3330
+ 0.000 0.000
+3 0 0 1 0 7 50 0 -1 0.000 0 1 0 2
+ 1 1 1.00 60.00 120.00
+ 7560 3915 8010 3375
+ 0.000 0.000
+3 0 0 1 0 7 50 0 -1 0.000 0 1 0 7
+ 1 1 1.00 60.00 120.00
+ 8460 3465 8775 3465 8820 3060 8730 2745 6750 2745 6705 3330
+ 7110 3330
+ 0.000 1.000 1.000 1.000 1.000 1.000 0.000
+3 0 0 1 0 7 50 0 -1 0.000 0 1 0 8
+ 1 1 1.00 60.00 120.00
+ 8460 3870 8820 3870 8820 4230 8640 4365 6930 4365 6750 4230
+ 6705 3510 7065 3375
+ 0.000 1.000 1.000 1.000 1.000 1.000 1.000 0.000
+3 0 0 1 0 7 50 0 -1 0.000 0 1 0 2
+ 1 1 1.00 60.00 120.00
+ 6615 1350 7335 1350
+ 0.000 0.000
+3 0 0 1 0 7 50 0 -1 0.000 0 1 0 2
+ 1 1 1.00 60.00 120.00
+ 7605 1350 8325 1350
+ 0.000 0.000
+3 0 0 1 0 7 50 0 -1 0.000 0 1 0 8
+ 1 1 1.00 60.00 120.00
+ 8595 1350 8775 1350 8865 1350 8865 990 5985 990 5985 1350
+ 6165 1350 6345 1350
+ 0.000 1.000 1.000 1.000 1.000 1.000 1.000 0.000
+3 0 0 1 0 7 50 0 -1 0.000 0 1 0 8
+ 1 1 1.00 60.00 120.00
+ 6345 1710 6165 1710 5985 1710 6075 2160 8865 2160 8865 1710
+ 8775 1710 8595 1710
+ 0.000 1.000 1.000 1.000 1.000 1.000 1.000 0.000
+3 0 0 1 0 7 50 0 -1 0.000 0 1 0 2
+ 1 1 1.00 60.00 120.00
+ 8325 1710 7605 1710
+ 0.000 0.000
+3 0 0 1 0 7 50 0 -1 0.000 0 1 0 2
+ 1 1 1.00 60.00 120.00
+ 7335 1710 6615 1710
+ 0.000 0.000
+3 0 0 1 0 7 50 0 -1 0.000 0 1 0 2
+ 1 1 1.00 60.00 120.00
+ 4140 1440 4770 1440
+ 0.000 0.000
+3 0 0 1 0 7 50 0 -1 0.000 0 1 0 2
+ 1 1 1.00 60.00 120.00
+ 4770 1800 4140 1800
+ 0.000 0.000
+3 0 0 1 0 7 50 0 -1 0.000 0 1 0 7
+ 1 1 1.00 60.00 120.00
+ 5040 1440 5490 1440 5490 990 3510 990 3510 1440 3690 1440
+ 3870 1440
+ 0.000 1.000 1.000 1.000 1.000 1.000 0.000
+3 0 0 1 0 7 50 0 -1 0.000 0 1 0 7
+ 1 1 1.00 60.00 120.00
+ 3870 1800 3690 1800 3510 1800 3510 2160 5490 2160 5490 1800
+ 5040 1800
+ 0.000 1.000 1.000 1.000 1.000 1.000 0.000
+3 0 0 1 0 7 50 0 -1 0.000 0 1 0 7
+ 1 1 1.00 60.00 120.00
+ 5130 3690 5580 3690 5580 3240 3600 3240 3600 3690 3780 3690
+ 3960 3690
+ 0.000 1.000 1.000 1.000 1.000 1.000 0.000
+4 1 0 50 0 14 10 0.0000 4 135 3780 5805 4950 Asymmetrical list starting at R(red)\001
+4 1 0 50 0 14 10 0.0000 4 135 4095 10215 4950 Symmetrical lists vs Asymmetrical lists\001
+4 1 0 50 0 12 10 0.0000 4 135 525 5130 5355 foo_0\001
+4 1 0 50 0 12 10 0.0000 4 135 525 6030 5355 foo_1\001
+4 1 0 50 0 12 10 0.0000 4 135 525 6930 5355 foo_2\001
+4 1 0 50 0 14 10 0.0000 4 135 3675 2070 4950 Symmetrical list starting at R(red)\001
+4 1 0 50 0 12 10 0.0000 4 135 525 3195 5355 foo_2\001
+4 1 0 50 0 12 10 0.0000 4 135 525 2295 5355 foo_1\001
+4 1 0 50 0 12 10 0.0000 4 135 525 1395 5355 foo_0\001
+4 1 0 50 0 12 10 0.0000 4 105 315 9855 3420 foo\001
+4 1 0 50 0 12 10 0.0000 4 105 105 9990 3825 E\001
+4 1 0 50 0 14 10 0.0000 4 135 1680 7785 2655 Linking elements\001
+4 1 0 50 0 12 10 0.0000 4 135 525 8235 3015 foo_1\001
+4 1 0 50 0 12 10 0.0000 4 135 525 7335 3015 foo_0\001
+4 1 0 50 0 14 10 0.0000 4 105 1470 2565 675 struct list *G\001
+4 1 0 50 0 14 10 0.0000 4 135 1470 2565 495 LIST_INIT(G):G\001
+4 1 0 50 0 14 10 0.0000 4 135 1890 4500 495 LIST_INSERT(G,W):W\001
+4 1 0 50 0 14 10 0.0000 4 135 3360 10665 2700 foo=LIST_ELEM(E, struct foo*, L)\001
+4 1 0 50 0 14 10 0.0000 4 135 1890 4500 675 LIST_APPEND(G,W):W\001
+4 1 0 50 0 14 10 0.0000 4 135 1890 7425 540 LIST_INSERT(G,Y):Y\001
+4 1 0 50 0 14 10 0.0000 4 135 1890 10755 540 LIST_APPEND(G,Y):Y\001
+4 1 0 50 0 14 10 0.0000 4 135 1680 1755 2790 LIST_DELETE(Y):Y\001
+4 1 0 50 0 14 10 0.0000 4 135 1890 4905 2745 LIST_DEL_INIT(Y):Y\001
+4 1 0 50 0 12 9 0.0000 4 120 2880 10665 2925 Returns a pointer to struct foo*\001
+4 1 0 50 0 12 9 0.0000 4 120 2790 10665 3105 containing header E as member L\001
+4 1 0 50 0 12 9 0.0000 4 120 2700 10755 810 adds Y at the queue (before G)\001
+4 1 0 50 0 12 9 0.0000 4 120 3060 7425 810 adds Y(yellow) just after G(green)\001
+4 1 0 50 0 12 9 0.0000 4 120 1170 4500 855 adds W(white)\001
+4 1 0 50 0 12 9 0.0000 4 105 1080 2565 900 Terminates G\001
+4 1 0 50 0 12 9 0.0000 4 90 540 990 855 N=next\001
+4 1 0 50 0 12 9 0.0000 4 105 540 990 1080 P=prev\001
+4 1 0 50 0 12 9 0.0000 4 120 2610 1755 3060 unlinks and returns Y(yellow)\001
+4 1 0 50 0 12 9 0.0000 4 120 2610 4905 3060 unlinks, inits, and returns Y\001
+4 0 0 50 0 12 8 0.0000 4 105 2175 7875 5265 - both are empty if R->P == R\001
+4 0 0 50 0 12 8 0.0000 4 90 2175 7875 5490 - last element has R->P == &L\001
+4 0 0 50 0 12 8 0.0000 4 105 3150 7875 5715 - FOREACH_ITEM(it, R, end, struct foo*, L)\001
+4 0 0 50 0 12 8 0.0000 4 105 3300 7875 5940 iterates <it> through foo{0,1,2} and stops\001
+4 0 0 50 0 12 8 0.0000 4 105 3900 7875 6165 - FOREACH_ITEM_SAFE(it, bck, R, end, struct foo*, L)\001
+4 0 0 50 0 12 8 0.0000 4 105 3750 7875 6390 does the same except that <bck> allows to delete\001
+4 0 0 50 0 12 8 0.0000 4 105 1950 7875 6570 any node, including <it>\001
+4 1 0 50 0 14 11 0.0000 4 135 1155 945 585 struct list\001
diff --git a/doc/internals/list.png b/doc/internals/list.png
new file mode 100644
index 0000000..ec41a6b
--- /dev/null
+++ b/doc/internals/list.png
Binary files differ
diff --git a/doc/internals/listener-states.fig b/doc/internals/listener-states.fig
new file mode 100644
index 0000000..863e7f5
--- /dev/null
+++ b/doc/internals/listener-states.fig
@@ -0,0 +1,150 @@
+#FIG 3.2 Produced by xfig version 2.3
+Portrait
+Center
+Metric
+A4
+300.00
+Single
+-2
+1200 2
+0 32 #ff60e0
+0 33 #ff8020
+0 34 #56c5ff
+0 35 #55d941
+0 36 #f8e010
+1 1 0 3 0 7 51 -1 20 0.000 1 0.0000 900 450 495 225 900 450 1395 450
+1 1 0 3 0 7 51 -1 20 0.000 1 0.0000 2700 450 495 225 2700 450 3195 450
+1 1 0 3 0 7 51 -1 20 0.000 1 0.0000 4500 450 495 225 4500 450 4995 450
+1 1 0 3 0 7 51 -1 20 0.000 1 0.0000 900 3465 495 225 900 3465 1395 3465
+1 1 0 3 0 7 51 -1 20 0.000 1 0.0000 2700 2475 495 225 2700 2475 3195 2475
+1 1 0 3 0 7 51 -1 20 0.000 1 0.0000 3645 1575 495 225 3645 1575 4140 1575
+1 1 0 3 0 7 51 -1 20 0.000 1 0.0000 4500 2475 495 225 4500 2475 4995 2475
+1 1 0 3 0 7 51 -1 20 0.000 1 0.0000 2700 3471 495 225 2700 3471 3195 3471
+2 1 1 3 1 7 52 -1 -1 8.000 1 0 -1 0 0 2
+ 270 1980 5355 1350
+2 2 0 2 32 32 52 -1 20 0.000 1 0 -1 0 0 5
+ 2070 3060 3330 3060 3330 3870 2070 3870 2070 3060
+2 3 0 1 33 33 53 -1 20 0.000 1 0 -1 0 0 5
+ 2070 990 5130 990 5130 2880 2070 2880 2070 990
+2 2 0 2 35 35 52 -1 20 0.000 1 0 -1 0 0 5
+ 270 90 5130 90 5130 855 270 855 270 90
+2 2 0 2 36 36 52 -1 20 0.000 1 0 -1 0 0 5
+ 270 3060 1530 3060 1530 3870 270 3870 270 3060
+3 0 0 3 0 7 50 -1 -1 0.000 0 1 0 2
+ 1 1 1.00 60.00 120.00
+ 1395 450 2250 450
+ 0.000 0.000
+3 0 0 3 0 7 50 -1 -1 0.000 0 1 0 2
+ 1 1 1.00 60.00 120.00
+ 3195 450 4050 450
+ 0.000 0.000
+3 0 0 3 0 7 50 -1 -1 0.000 0 1 0 3
+ 1 1 1.00 60.00 120.00
+ 4095 1665 4455 2025 4500 2250
+ 0.000 1.000 0.000
+3 0 0 3 0 7 50 -1 -1 0.000 0 1 0 3
+ 1 1 1.00 60.00 120.00
+ 3195 3510 3600 3465 4140 2655
+ 0.000 1.000 0.000
+3 0 0 3 0 7 50 -1 -1 0.000 0 1 0 3
+ 1 1 1.00 60.00 120.00
+ 4410 2250 4365 2070 4050 1710
+ 0.000 1.000 0.000
+3 0 0 3 0 7 50 -1 -1 0.000 0 1 0 4
+ 1 1 1.00 60.00 120.00
+ 945 3240 936 2142 2961 1917 3240 1710
+ 0.000 1.000 1.000 0.000
+3 0 0 3 0 7 50 -1 -1 0.000 0 1 0 4
+ 1 1 1.00 60.00 120.00
+ 3195 1665 2835 1845 855 2115 855 3240
+ 0.000 1.000 1.000 0.000
+3 0 0 3 0 7 50 -1 -1 0.000 0 1 0 5
+ 1 1 1.00 60.00 120.00
+ 990 3690 1035 3960 2880 4050 4365 3915 4410 2700
+ 0.000 1.000 1.000 1.000 0.000
+3 0 0 3 0 7 50 -1 -1 0.000 0 1 0 2
+ 1 1 1.00 60.00 120.00
+ 2700 2700 2700 3240
+ 0.000 0.000
+3 0 0 3 0 7 50 -1 -1 0.000 0 1 0 3
+ 1 1 1.00 60.00 120.00
+ 4095 2610 3600 3375 3150 3420
+ 0.000 1.000 0.000
+3 0 0 3 0 7 50 -1 -1 0.000 0 1 0 5
+ 1 1 1.00 60.00 120.00
+ 4500 2700 4455 4005 2655 4140 945 4005 900 3690
+ 0.000 1.000 1.000 1.000 0.000
+3 0 0 3 0 7 50 -1 -1 0.000 0 1 0 4
+ 1 1 1.00 60.00 120.00
+ 2205 2520 1395 2745 1260 2970 1125 3240
+ 0.000 1.000 1.000 0.000
+3 0 0 3 0 7 50 -1 -1 0.000 0 1 0 4
+ 1 1 1.00 60.00 120.00
+ 3510 1800 3330 2025 3330 2835 2970 3285
+ 0.000 1.000 1.000 0.000
+3 0 0 3 0 7 50 -1 -1 0.000 0 1 0 4
+ 1 1 1.00 60.00 120.00
+ 1170 3285 1305 3015 1485 2790 2250 2610
+ 0.000 1.000 1.000 0.000
+3 0 0 3 0 7 50 -1 -1 0.000 0 1 0 3
+ 1 1 1.00 60.00 120.00
+ 2205 3420 1710 3420 1395 3465
+ 0.000 1.000 0.000
+3 0 0 3 0 7 50 -1 -1 0.000 0 1 0 3
+ 1 1 1.00 60.00 120.00
+ 1395 3510 1800 3510 2205 3465
+ 0.000 1.000 0.000
+3 0 0 3 0 7 51 -1 -1 0.000 0 1 0 3
+ 1 1 1.00 60.00 120.00
+ 2925 2295 3060 1980 3330 1755
+ 0.000 1.000 0.000
+3 0 0 3 0 7 50 -1 -1 0.000 0 1 0 3
+ 1 1 1.00 60.00 120.00
+ 4500 675 4455 990 3960 1395
+ 0.000 1.000 0.000
+4 1 0 50 -1 18 10 0.0000 4 120 375 900 450 NEW\001
+4 1 0 50 -1 18 10 0.0000 4 120 315 2700 450 INIT\001
+4 1 0 50 -1 18 10 0.0000 4 120 810 4500 450 ASSIGNED\001
+4 1 1 50 -1 16 10 0.0000 4 120 90 900 630 0\001
+4 1 1 50 -1 16 10 0.0000 4 120 90 2700 630 1\001
+4 1 1 50 -1 16 10 0.0000 4 120 90 4500 630 2\001
+4 1 0 50 -1 16 7 0.0000 4 120 420 1755 405 create()\001
+4 2 0 50 -1 16 7 0.0000 4 120 660 1215 2160 enable() &&\001
+4 2 0 50 -1 16 7 0.0000 4 90 540 1080 2295 !maxconn\001
+4 2 1 51 -1 16 7 1.5708 4 105 600 5355 1485 transitions\001
+4 0 1 51 -1 16 7 1.5708 4 105 600 5355 1260 transitions\001
+4 2 1 51 -1 16 7 1.5708 4 105 795 5265 1485 multi-threaded\001
+4 0 1 51 -1 16 7 1.5708 4 120 870 5265 1260 single-threaded\001
+4 0 0 52 -1 17 7 0.0000 4 90 345 315 765 no FD\001
+4 0 0 52 -1 17 7 0.0000 4 135 315 315 3825 polled\001
+4 1 0 50 -1 18 10 0.0000 4 120 555 900 3465 READY\001
+4 0 0 50 -1 16 7 0.0000 4 120 255 1170 3825 full()\001
+4 2 0 50 -1 16 7 0.0000 4 90 540 2205 3375 !maxconn\001
+4 2 0 50 -1 16 7 0.0000 4 105 675 2295 3240 resume() &&\001
+4 0 0 50 -1 16 7 0.0000 4 105 405 1395 3645 pause()\001
+4 0 0 52 -1 17 7 0.0000 4 135 585 2115 3825 shut(sock)\001
+4 2 0 50 -1 16 7 0.0000 4 120 480 4320 2205 disable()\001
+4 2 0 50 -1 16 7 0.0000 4 105 405 4005 2655 pause()\001
+4 0 0 50 -1 16 7 0.0000 4 105 465 4545 2835 resume()\001
+4 2 0 50 -1 16 7 0.0000 4 120 480 2925 2160 disable()\001
+4 0 0 50 -1 16 7 0.0000 4 105 405 3465 1980 pause()\001
+4 0 0 50 -1 16 7 0.0000 4 120 660 4230 1710 enable() &&\001
+4 0 0 50 -1 16 7 0.0000 4 75 510 4320 1845 maxconn\001
+4 2 0 50 -1 16 7 0.0000 4 105 405 2655 2835 pause()\001
+4 0 0 50 -1 16 7 0.0000 4 105 675 3375 3555 resume() &&\001
+4 0 0 50 -1 16 7 0.0000 4 75 510 3375 3645 maxconn\001
+4 0 0 50 -1 16 7 0.0000 4 120 480 1080 2655 disable()\001
+4 2 0 50 -1 16 7 0.0000 4 105 465 2160 2475 resume()\001
+4 1 0 50 -1 16 7 0.0000 4 120 330 3555 405 .add()\001
+4 0 0 50 -1 16 7 0.0000 4 120 375 4545 810 .bind()\001
+4 0 0 52 -1 17 7 0.0000 4 135 1080 2115 1125 FD ready, not polled\001
+4 0 0 50 -1 16 7 0.0000 4 120 315 1305 3240 limit()\001
+4 1 0 50 -1 18 10 0.0000 4 120 630 2700 2475 LIMITED\001
+4 1 0 50 -1 18 10 0.0000 4 120 555 3645 1575 LISTEN\001
+4 1 0 50 -1 18 10 0.0000 4 120 375 4500 2475 FULL\001
+4 1 0 50 -1 18 10 0.0000 4 120 630 2700 3465 PAUSED\001
+4 1 1 50 -1 16 10 0.0000 4 120 90 2700 3645 3\001
+4 1 1 50 -1 16 10 0.0000 4 120 90 2700 2655 7\001
+4 1 1 50 -1 16 10 0.0000 4 120 90 4500 2655 6\001
+4 1 1 50 -1 16 10 0.0000 4 120 90 900 3645 5\001
+4 1 1 50 -1 16 10 0.0000 4 120 90 3645 1755 4\001
diff --git a/doc/internals/listener-states.png b/doc/internals/listener-states.png
new file mode 100644
index 0000000..8757a12
--- /dev/null
+++ b/doc/internals/listener-states.png
Binary files differ
diff --git a/doc/internals/lua_socket.fig b/doc/internals/lua_socket.fig
new file mode 100644
index 0000000..7da3294
--- /dev/null
+++ b/doc/internals/lua_socket.fig
@@ -0,0 +1,113 @@
+#FIG 3.2 Produced by xfig version 1.8
+Landscape
+Center
+Metric
+A4
+100.00
+Single
+-2
+1200 2
+6 1125 2745 2565 3555
+2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
+ 1125 2745 2565 2745 2565 3555 1125 3555 1125 2745
+4 0 0 50 -1 16 12 0.0000 4 180 1080 1215 3195 lua_State *T\001
+4 0 0 50 -1 18 12 0.0000 4 150 990 1215 2925 struct hlua\001
+4 0 0 50 -1 16 12 0.0000 4 195 1245 1215 3465 stop_list *stop\001
+-6
+6 7560 4365 10620 5265
+2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
+ 7650 4635 10530 4635 10530 5175 7650 5175 7650 4635
+2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
+ 7560 4365 10620 4365 10620 5265 7560 5265 7560 4365
+4 0 0 50 -1 18 12 0.0000 4 195 2565 7740 4815 struct stream_interface si[0]\001
+4 0 0 50 -1 16 12 0.0000 4 195 1725 7740 5085 enum obj_type *end\001
+4 0 0 50 -1 18 12 0.0000 4 150 1215 7650 4545 struct stream\001
+-6
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
+ 225 4500 2745 4500
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
+ 225 5040 2745 5040
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
+ 225 4770 2745 4770
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2
+ 1 1 1.00 60.00 120.00
+ 1935 5715 7740 6705
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2
+ 1 1 1.00 60.00 120.00
+ 2520 3420 3600 4095
+2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
+ 225 4230 2745 4230 2745 7020 225 7020 225 4230
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
+ 225 6300 2745 6300
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
+ 225 6660 2745 6660
+2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
+ 1035 2205 2655 2205 2655 3645 1035 3645 1035 2205
+2 1 1 4 4 7 500 -1 -1 4.000 0 0 -1 0 0 2
+ 4860 1935 4860 9225
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2
+ 1 1 1.00 60.00 120.00
+ 7695 6435 5760 4410
+2 2 0 1 0 7 50 -1 20 0.000 0 0 -1 0 0 5
+ 3600 3915 6075 3915 6075 4410 3600 4410 3600 3915
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2
+ 1 1 1.00 60.00 120.00
+ 9450 5040 9225 5670
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 4
+ 7740 6300 7695 6345 7695 6525 7740 6570
+2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
+ 7560 5670 9765 5670 9765 7200 7560 7200 7560 5670
+2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
+ 7650 5940 9675 5940 9675 7110 7650 7110 7650 5940
+2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
+ 315 5310 2655 5310 2655 6165 315 6165 315 5310
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2
+ 1 1 1.00 60.00 120.00
+ 7830 6840 2565 5580
+2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
+ 7740 6705 9540 6705 9540 6930 7740 6930 7740 6705
+2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
+ 405 5580 2565 5580 2565 5805 405 5805 405 5580
+3 0 0 1 0 7 50 -1 -1 0.000 0 1 0 5
+ 1 1 1.00 60.00 120.00
+ 1215 3105 765 3330 720 3555 765 3915 810 4230
+ 0.000 1.000 1.000 1.000 0.000
+3 0 1 1 13 7 50 -1 -1 1.000 0 1 0 3
+ 5 1 1.00 60.00 120.00
+ 675 7020 675 7785 900 8104
+ 0.000 1.000 0.000
+3 0 1 1 13 7 50 -1 -1 1.000 0 1 0 2
+ 5 1 1.00 60.00 120.00
+ 7740 7200 7740 8100
+ 0.000 0.000
+3 0 1 1 13 7 50 -1 -1 1.000 0 1 0 3
+ 5 1 1.00 60.00 120.00
+ 7605 7200 7605 8865 7740 9000
+ 0.000 1.000 0.000
+4 0 0 50 -1 18 12 0.0000 4 150 885 315 4410 stack Lua\001
+4 0 0 50 -1 16 12 0.0000 4 195 1140 315 4680 stack entry 0\001
+4 0 0 50 -1 16 12 0.0000 4 195 1140 315 4950 stack entry 1\001
+4 0 0 50 -1 16 12 0.0000 4 195 1140 315 5220 stack entry 2\001
+4 0 0 50 -1 18 12 0.0000 4 195 1695 405 5490 struct hlua_socket\001
+4 0 0 50 -1 16 12 0.0000 4 195 1140 315 6570 stack entry 3\001
+4 0 0 50 -1 16 12 0.0000 4 195 1140 315 6930 stack entry 4\001
+4 1 12 50 -1 12 9 5.6723 4 135 540 3150 3735 (list)\001
+4 0 0 50 -1 18 12 0.0000 4 150 1305 1125 2430 struct session\001
+4 0 0 50 -1 16 12 0.0000 4 150 1440 1125 2655 struct task *task\001
+4 0 0 50 -1 12 12 0.0000 4 165 1560 990 8100 hlua_tcp_gc()\001
+4 0 0 50 -1 16 12 0.0000 4 195 2430 990 8295 Called just before the object\001
+4 0 0 50 -1 16 12 0.0000 4 195 840 990 8535 garbaging\001
+4 1 12 50 -1 12 9 5.5327 4 135 540 6390 4905 (list)\001
+4 0 0 50 -1 18 12 0.0000 4 195 2205 3690 4095 struct hlua_socket_com\001
+4 0 0 50 -1 16 12 0.0000 4 150 1440 3690 4320 struct task *task\001
+4 0 0 50 -1 18 12 0.0000 4 195 1200 7650 5850 struct appctx\001
+4 0 0 50 -1 18 12 0.0000 4 150 1110 7740 6120 struct <lua>\001
+4 0 0 50 -1 16 12 0.0000 4 195 1620 7740 6615 struct hlua_tcp *wr\001
+4 0 0 50 -1 16 12 0.0000 4 195 1590 7740 6390 struct hlua_tcp *rd\001
+4 0 0 50 -1 12 12 0.0000 4 165 2160 7875 9000 hlua_tcp_release()\001
+4 0 0 50 -1 16 12 0.0000 4 195 3150 7875 9195 Called when the applet is destroyed.\001
+4 0 0 50 -1 12 12 0.0000 4 165 2400 7875 8100 update_tcp_handler()\001
+4 0 0 50 -1 16 12 0.0000 4 195 2640 7875 8295 Called on each change on the \001
+4 0 0 50 -1 16 12 0.0000 4 195 1830 7875 8535 tcp connection state.\001
+4 0 0 50 -1 16 12 0.0000 4 150 1350 495 5760 struct xref *xref\001
+4 0 0 50 -1 16 12 0.0000 4 150 1350 7830 6885 struct xref *xref\001
diff --git a/doc/internals/lua_socket.pdf b/doc/internals/lua_socket.pdf
new file mode 100644
index 0000000..e3b80ee
--- /dev/null
+++ b/doc/internals/lua_socket.pdf
Binary files differ
diff --git a/doc/internals/muxes.fig b/doc/internals/muxes.fig
new file mode 100644
index 0000000..babdd55
--- /dev/null
+++ b/doc/internals/muxes.fig
@@ -0,0 +1,401 @@
+#FIG 3.2 Produced by xfig version 3.2.8b
+Landscape
+Center
+Inches
+Letter
+100.00
+Single
+-1
+1200 2
+0 32 #bbf2e2
+0 33 #a7ceb3
+0 34 #dae8fc
+0 35 #458dba
+0 36 #ffe6cc
+0 37 #e9b000
+0 38 #1a1a1a
+0 39 #8e8e8e
+0 40 #ffc1e7
+6 4200 8700 4800 9825
+2 1 0 4 35 -1 48 -1 -1 0.000 0 0 -1 0 0 4
+ 4261 9751 4261 8751 4761 8751 4761 9751
+2 3 0 0 -1 34 49 -1 20 0.000 0 0 -1 0 0 5
+ 4761 9751 4761 8751 4261 8751 4261 9751 4761 9751
+2 1 0 2 35 -1 47 -1 -1 0.000 0 0 -1 0 0 2
+ 4261 8850 4761 8850
+2 1 0 2 35 -1 47 -1 -1 0.000 0 0 -1 0 0 2
+ 4261 8925 4761 8925
+2 1 0 2 35 -1 47 -1 -1 0.000 0 0 -1 0 0 2
+ 4261 9000 4761 9000
+-6
+6 1425 3525 2025 4650
+2 1 0 4 35 -1 48 -1 -1 0.000 0 0 -1 0 0 4
+ 1486 4576 1486 3576 1986 3576 1986 4576
+2 3 0 0 -1 34 49 -1 20 0.000 0 0 -1 0 0 5
+ 1986 4576 1986 3576 1486 3576 1486 4576 1986 4576
+2 1 0 2 35 -1 47 -1 -1 0.000 0 0 -1 0 0 2
+ 1486 3675 1986 3675
+2 1 0 2 35 -1 47 -1 -1 0.000 0 0 -1 0 0 2
+ 1486 3750 1986 3750
+2 1 0 2 35 -1 47 -1 -1 0.000 0 0 -1 0 0 2
+ 1486 3825 1986 3825
+-6
+6 3225 3525 3825 4650
+2 1 0 4 35 -1 48 -1 -1 0.000 0 0 -1 0 0 4
+ 3286 4576 3286 3576 3786 3576 3786 4576
+2 3 0 0 -1 34 49 -1 20 0.000 0 0 -1 0 0 5
+ 3786 4576 3786 3576 3286 3576 3286 4576 3786 4576
+2 1 0 2 35 -1 47 -1 -1 0.000 0 0 -1 0 0 2
+ 3286 3675 3786 3675
+2 1 0 2 35 -1 47 -1 -1 0.000 0 0 -1 0 0 2
+ 3286 3750 3786 3750
+2 1 0 2 35 -1 47 -1 -1 0.000 0 0 -1 0 0 2
+ 3286 3825 3786 3825
+-6
+6 5025 3525 5625 4650
+2 1 0 4 35 -1 48 -1 -1 0.000 0 0 -1 0 0 4
+ 5086 4576 5086 3576 5586 3576 5586 4576
+2 3 0 0 -1 34 49 -1 20 0.000 0 0 -1 0 0 5
+ 5586 4576 5586 3576 5086 3576 5086 4576 5586 4576
+2 1 0 2 35 -1 47 -1 -1 0.000 0 0 -1 0 0 2
+ 5086 3675 5586 3675
+2 1 0 2 35 -1 47 -1 -1 0.000 0 0 -1 0 0 2
+ 5086 3750 5586 3750
+2 1 0 2 35 -1 47 -1 -1 0.000 0 0 -1 0 0 2
+ 5086 3825 5586 3825
+-6
+6 6900 3525 7500 4650
+2 1 0 4 35 -1 48 -1 -1 0.000 0 0 -1 0 0 4
+ 6961 4576 6961 3576 7461 3576 7461 4576
+2 3 0 0 -1 34 49 -1 20 0.000 0 0 -1 0 0 5
+ 7461 4576 7461 3576 6961 3576 6961 4576 7461 4576
+2 1 0 2 35 -1 47 -1 -1 0.000 0 0 -1 0 0 2
+ 6961 3675 7461 3675
+2 1 0 2 35 -1 47 -1 -1 0.000 0 0 -1 0 0 2
+ 6961 3750 7461 3750
+2 1 0 2 35 -1 47 -1 -1 0.000 0 0 -1 0 0 2
+ 6961 3825 7461 3825
+-6
+6 11925 10725 13875 11475
+2 4 0 3 0 35 50 -1 20 0.000 1 0 7 0 0 5
+ 13800 11400 12000 11400 12000 10800 13800 10800 13800 11400
+4 1 0 49 -1 4 18 0.0000 4 285 1335 12900 11175 Transport\001
+-6
+6 6600 1200 10050 1800
+2 1 0 4 35 -1 48 -1 -1 0.000 0 0 -1 0 0 4
+ 6692 1261 9959 1261 9959 1761 6692 1761
+2 3 0 0 -1 34 49 -1 20 0.000 0 0 -1 0 0 5
+ 6692 1761 9959 1761 9959 1261 6692 1261 6692 1761
+2 1 0 2 35 -1 47 -1 -1 0.000 0 0 -1 0 0 2
+ 9750 1261 9750 1761
+2 1 0 2 35 -1 47 -1 -1 0.000 0 0 -1 0 0 2
+ 9525 1261 9525 1761
+2 1 0 2 35 -1 47 -1 -1 0.000 0 0 -1 0 0 2
+ 9300 1261 9300 1761
+4 1 0 46 -1 4 16 0.0000 4 210 1605 8025 1575 channel buf\001
+-6
+6 12375 8100 12900 8700
+2 1 0 2 35 -1 47 -1 -1 0.000 0 0 -1 0 0 2
+ 12600 8161 12600 8661
+2 1 0 4 35 -1 48 -1 -1 0.000 0 0 -1 0 0 4
+ 12425 8161 12825 8161 12825 8661 12425 8661
+2 3 0 0 -1 34 49 -1 20 0.000 0 0 -1 0 0 5
+ 12425 8661 12825 8661 12825 8161 12425 8161 12425 8661
+2 1 0 2 35 -1 47 -1 -1 0.000 0 0 -1 0 0 2
+ 12675 8161 12675 8661
+2 1 0 2 35 -1 47 -1 -1 0.000 0 0 -1 0 0 2
+ 12750 8161 12750 8661
+-6
+6 11700 8100 12225 8700
+2 1 0 2 35 -1 47 -1 -1 0.000 0 0 -1 0 0 2
+ 11925 8161 11925 8661
+2 1 0 4 35 -1 48 -1 -1 0.000 0 0 -1 0 0 4
+ 11750 8161 12150 8161 12150 8661 11750 8661
+2 3 0 0 -1 34 49 -1 20 0.000 0 0 -1 0 0 5
+ 11750 8661 12150 8661 12150 8161 11750 8161 11750 8661
+2 1 0 2 35 -1 47 -1 -1 0.000 0 0 -1 0 0 2
+ 12000 8161 12000 8661
+2 1 0 2 35 -1 47 -1 -1 0.000 0 0 -1 0 0 2
+ 12075 8161 12075 8661
+-6
+6 11025 8100 11550 8700
+2 1 0 2 35 -1 47 -1 -1 0.000 0 0 -1 0 0 2
+ 11250 8161 11250 8661
+2 1 0 4 35 -1 48 -1 -1 0.000 0 0 -1 0 0 4
+ 11075 8161 11475 8161 11475 8661 11075 8661
+2 3 0 0 -1 34 49 -1 20 0.000 0 0 -1 0 0 5
+ 11075 8661 11475 8661 11475 8161 11075 8161 11075 8661
+2 1 0 2 35 -1 47 -1 -1 0.000 0 0 -1 0 0 2
+ 11325 8161 11325 8661
+2 1 0 2 35 -1 47 -1 -1 0.000 0 0 -1 0 0 2
+ 11400 8161 11400 8661
+-6
+6 10350 8100 10875 8700
+2 1 0 2 35 -1 47 -1 -1 0.000 0 0 -1 0 0 2
+ 10575 8161 10575 8661
+2 1 0 4 35 -1 48 -1 -1 0.000 0 0 -1 0 0 4
+ 10400 8161 10800 8161 10800 8661 10400 8661
+2 3 0 0 -1 34 49 -1 20 0.000 0 0 -1 0 0 5
+ 10400 8661 10800 8661 10800 8161 10400 8161 10400 8661
+2 1 0 2 35 -1 47 -1 -1 0.000 0 0 -1 0 0 2
+ 10650 8161 10650 8661
+2 1 0 2 35 -1 47 -1 -1 0.000 0 0 -1 0 0 2
+ 10725 8161 10725 8661
+-6
+6 13050 8100 13575 8700
+2 1 0 2 35 -1 47 -1 -1 0.000 0 0 -1 0 0 2
+ 13275 8161 13275 8661
+2 1 0 4 35 -1 48 -1 -1 0.000 0 0 -1 0 0 4
+ 13100 8161 13500 8161 13500 8661 13100 8661
+2 3 0 0 -1 34 49 -1 20 0.000 0 0 -1 0 0 5
+ 13100 8661 13500 8661 13500 8161 13100 8161 13100 8661
+2 1 0 2 35 -1 47 -1 -1 0.000 0 0 -1 0 0 2
+ 13350 8161 13350 8661
+2 1 0 2 35 -1 47 -1 -1 0.000 0 0 -1 0 0 2
+ 13425 8161 13425 8661
+-6
+6 13725 8100 14250 8700
+2 1 0 2 35 -1 47 -1 -1 0.000 0 0 -1 0 0 2
+ 13950 8161 13950 8661
+2 1 0 4 35 -1 48 -1 -1 0.000 0 0 -1 0 0 4
+ 13775 8161 14175 8161 14175 8661 13775 8661
+2 3 0 0 -1 34 49 -1 20 0.000 0 0 -1 0 0 5
+ 13775 8661 14175 8661 14175 8161 13775 8161 13775 8661
+2 1 0 2 35 -1 47 -1 -1 0.000 0 0 -1 0 0 2
+ 14025 8161 14025 8661
+2 1 0 2 35 -1 47 -1 -1 0.000 0 0 -1 0 0 2
+ 14100 8161 14100 8661
+-6
+6 11100 11700 13050 12150
+1 1 0 4 20 40 49 -1 20 0.000 1 0.0000 11400 11925 225 150 11400 11925 11625 12075
+4 0 0 49 -1 4 12 0.0000 4 165 960 11850 12000 I/O tasklet\001
+-6
+6 11100 12300 11700 12600
+2 1 0 2 35 -1 47 -1 -1 0.000 0 0 -1 0 0 2
+ 11357 12331 11357 12581
+2 1 0 4 35 -1 48 -1 -1 0.000 0 0 -1 0 0 4
+ 11157 12331 11614 12331 11614 12581 11157 12581
+2 3 0 0 -1 34 49 -1 20 0.000 0 0 -1 0 0 5
+ 11157 12581 11614 12581 11614 12331 11157 12331 11157 12581
+2 1 0 2 35 -1 47 -1 -1 0.000 0 0 -1 0 0 2
+ 11443 12331 11443 12581
+2 1 0 2 35 -1 47 -1 -1 0.000 0 0 -1 0 0 2
+ 11529 12331 11529 12581
+-6
+1 3 0 3 0 0 49 -1 20 0.000 1 0.0000 10725 5700 75 75 10725 5700 10800 5700
+1 3 0 3 0 0 49 -1 20 0.000 1 0.0000 12750 5700 75 75 12750 5700 12825 5700
+1 3 0 3 0 0 49 -1 20 0.000 1 0.0000 13875 5700 75 75 13875 5700 13950 5700
+1 3 0 3 0 0 49 -1 20 0.000 1 0.0000 11700 5700 75 75 11700 5700 11775 5700
+1 3 0 3 0 0 49 -1 20 0.000 1 0.0000 2925 6750 75 75 2925 6750 3000 6750
+1 3 0 3 0 0 49 -1 20 0.000 1 0.0000 4950 6750 75 75 4950 6750 5025 6750
+1 3 0 3 0 0 49 -1 20 0.000 1 0.0000 6075 6750 75 75 6075 6750 6150 6750
+1 3 0 3 0 0 49 -1 20 0.000 1 0.0000 3900 6750 75 75 3900 6750 3975 6750
+1 1 0 4 37 36 49 -1 20 0.000 1 0.0000 9525 4140 583 250 9525 4140 10108 3890
+1 1 0 4 37 36 49 -1 20 0.000 1 0.0000 11341 4140 583 250 11341 4140 11924 3890
+1 1 0 4 37 36 49 -1 20 0.000 1 0.0000 13154 4140 583 250 13154 4140 13737 3890
+1 1 0 4 37 36 49 -1 20 0.000 1 0.0000 15033 4140 583 250 15033 4140 15616 3890
+1 1 0 4 37 36 49 -1 20 0.000 1 0.0000 7182 5173 583 250 7182 5173 7765 4923
+1 1 0 4 37 36 49 -1 20 0.000 1 0.0000 3507 5173 583 250 3507 5173 4090 4923
+1 1 0 4 37 36 49 -1 20 0.000 1 0.0000 1719 5173 583 250 1719 5173 2302 4923
+1 1 0 4 37 36 49 -1 20 0.000 1 0.0000 5325 5175 583 250 5325 5175 5908 4925
+1 1 0 4 10 11 45 -1 20 0.000 1 0.0000 4488 8082 612 250 4488 8082 5100 8082
+1 1 0 4 10 11 49 -1 20 0.000 1 0.0000 12333 7025 417 250 12333 7025 12750 7025
+1 1 0 4 20 40 49 -1 20 0.000 1 0.0000 12392 9240 808 210 12392 9240 13200 9240
+1 1 0 4 20 40 49 -1 20 0.000 1 0.0000 3167 9240 808 210 3167 9240 3975 9240
+1 1 0 4 37 36 49 -1 20 0.000 1 0.0000 1800 11925 225 150 1800 11925 2025 12075
+1 1 0 4 10 11 45 -1 20 0.000 1 0.0000 6600 11925 225 150 6600 11925 6825 12075
+1 1 0 4 20 40 49 -1 20 0.000 1 0.0000 8400 600 900 210 8400 600 9300 600
+2 1 1 1 0 7 49 -1 -1 4.000 1 0 -1 0 0 2
+ 2550 3300 2550 6150
+2 1 1 1 0 7 49 -1 -1 4.000 1 0 -1 0 0 2
+ 4500 3300 4500 6150
+2 1 1 1 0 7 49 -1 -1 4.000 1 0 -1 0 0 2
+ 6300 3300 6300 6150
+2 1 0 3 0 7 49 -1 -1 8.000 1 0 -1 1 0 2
+ 1 1 1.00 90.00 180.00
+ 600 8025 600 12225
+2 1 0 3 0 7 49 -1 -1 8.000 1 0 -1 1 0 2
+ 1 1 1.00 90.00 180.00
+ 600 3150 600 1800
+2 1 0 3 0 7 49 -1 -1 8.000 1 0 -1 1 0 2
+ 1 1 1.00 90.00 180.00
+ 600 1500 600 150
+2 1 0 3 0 7 49 -1 -1 0.000 1 0 -1 1 1 4
+ 1 1 1.00 90.00 180.00
+ 1 1 1.00 90.00 180.00
+ 3000 3300 3000 1425 3675 600 7500 600
+2 3 0 4 33 32 50 -1 20 0.000 0 0 -1 0 0 5
+ 900 3300 900 9900 8100 9900 8100 3300 900 3300
+2 1 0 3 0 7 49 -1 -1 0.000 1 0 -1 1 0 4
+ 1 1 1.00 90.00 180.00
+ 3525 3525 3525 2625 4500 1500 6750 1500
+2 1 0 3 0 7 49 -1 -1 0.000 1 0 -1 1 0 3
+ 1 1 1.00 90.00 180.00
+ 11295 4425 11295 4725 11700 5625
+2 1 0 3 0 7 49 -1 -1 0.000 1 0 -1 1 0 3
+ 1 1 1.00 90.00 180.00
+ 9495 4425 9495 4725 10695 5700
+2 1 0 3 0 7 49 -1 -1 0.000 1 0 -1 1 0 3
+ 1 1 1.00 90.00 180.00
+ 13163 4425 13163 4725 12788 5625
+2 1 0 3 0 7 49 -1 -1 0.000 1 0 -1 1 0 3
+ 1 1 1.00 90.00 180.00
+ 15013 4427 15013 4725 13888 5702
+2 1 0 3 0 7 49 -1 -1 8.000 1 0 -1 1 0 2
+ 1 1 1.00 60.00 120.00
+ 9525 3525 9525 3825
+2 1 0 3 0 7 49 -1 -1 8.000 1 0 -1 1 0 2
+ 1 1 1.00 60.00 120.00
+ 13125 3525 13125 3825
+2 1 0 3 0 7 49 -1 -1 8.000 1 0 -1 1 0 2
+ 1 1 1.00 60.00 120.00
+ 15000 3525 15000 3825
+2 1 0 3 0 7 49 -1 -1 8.000 1 0 -1 1 0 5
+ 1 1 1.00 90.00 180.00
+ 12300 7275 12300 7725 9975 7725 9975 8400 10425 8400
+2 1 0 3 0 7 49 -1 -1 0.000 1 0 -1 1 0 3
+ 1 1 1.00 90.00 180.00
+ 11775 5850 12300 6450 12300 6825
+2 1 1 3 0 7 49 -1 -1 8.000 1 0 -1 0 0 3
+ 11475 6150 13200 6150 13200 6825
+2 1 0 3 0 7 49 -1 -1 0.000 1 0 -1 0 1 3
+ 1 1 1.00 90.00 180.00
+ 3975 6900 4500 7650 4500 7875
+2 1 0 3 0 7 49 -1 -1 0.000 1 0 -1 0 1 3
+ 1 1 1.00 90.00 180.00
+ 3495 5475 3495 5775 3900 6675
+2 1 0 3 0 7 49 -1 -1 0.000 1 0 -1 0 1 3
+ 1 1 1.00 90.00 180.00
+ 1695 5475 1695 5775 2895 6750
+2 1 0 3 0 7 49 -1 -1 0.000 1 0 -1 0 1 3
+ 1 1 1.00 90.00 180.00
+ 7213 5477 7213 5775 6088 6752
+2 1 0 3 0 7 49 -1 -1 8.000 1 0 -1 1 0 2
+ 1 1 1.00 60.00 120.00
+ 1725 4875 1725 4575
+2 1 0 3 0 7 49 -1 -1 8.000 1 0 -1 1 0 2
+ 1 1 1.00 60.00 120.00
+ 3525 4875 3525 4575
+2 1 0 3 0 7 49 -1 -1 8.000 1 0 -1 1 0 2
+ 1 1 1.00 60.00 120.00
+ 5325 4875 5325 4575
+2 1 0 3 0 7 49 -1 -1 8.000 1 0 -1 1 0 2
+ 1 1 1.00 60.00 120.00
+ 7200 4875 7200 4575
+2 1 0 3 0 7 49 -1 -1 0.000 1 0 -1 0 1 2
+ 1 1 1.00 90.00 180.00
+ 4500 8325 4500 8721
+2 1 1 3 0 7 49 -1 -1 8.000 1 0 -1 0 0 3
+ 3225 7875 3225 7350 4725 7350
+2 1 0 3 0 7 49 -1 -1 0.000 1 0 -1 1 1 2
+ 1 1 1.00 90.00 180.00
+ 1 1 1.00 90.00 180.00
+ 3900 10800 3225 9450
+2 1 0 3 0 7 49 -1 -1 8.000 1 0 -1 1 0 2
+ 1 1 1.00 90.00 180.00
+ 4500 10800 4500 9750
+2 1 0 3 0 7 49 -1 -1 0.000 1 0 -1 1 1 3
+ 1 1 1.00 90.00 180.00
+ 1 1 1.00 90.00 180.00
+ 12375 10800 12375 9750 12375 9450
+2 1 1 1 0 7 49 -1 -1 4.000 1 0 -1 0 0 2
+ 12225 3300 12225 5025
+2 1 1 1 0 7 49 -1 -1 4.000 1 0 -1 0 0 2
+ 10425 3300 10425 5025
+2 1 1 1 0 7 49 -1 -1 4.000 1 0 -1 0 0 2
+ 14025 3300 14025 5025
+2 1 0 3 0 7 49 -1 -1 8.000 1 0 -1 1 0 4
+ 1 1 1.00 90.00 180.00
+ 9975 1500 10800 1500 11325 2100 11325 3825
+2 1 0 3 0 7 49 -1 -1 8.000 1 0 -1 1 1 4
+ 1 1 1.00 90.00 180.00
+ 1 1 1.00 90.00 180.00
+ 9300 600 11175 600 11775 1275 11775 3300
+2 3 0 4 33 32 50 -1 20 0.000 0 0 -1 0 0 5
+ 8700 3300 8700 9900 15900 9900 15900 3300 8700 3300
+2 1 0 3 0 7 49 -1 -1 8.000 1 0 -1 0 1 5
+ 1 1 1.00 90.00 180.00
+ 13200 10800 13200 10200 14625 9750 14625 8400 14175 8400
+2 1 0 3 0 7 49 -1 -1 0.000 1 0 -1 0 1 3
+ 1 1 1.00 90.00 180.00
+ 5325 5475 5325 5775 4950 6675
+2 1 0 3 0 7 49 -1 -1 8.000 1 0 -1 1 0 2
+ 1 1 1.00 90.00 180.00
+ 600 5400 600 3300
+2 1 0 3 0 7 49 -1 -1 8.000 1 0 -1 0 0 2
+ 600 7800 600 5700
+2 4 0 3 0 35 50 -1 20 0.000 1 0 7 0 0 5
+ 5400 11400 3600 11400 3600 10800 5400 10800 5400 11400
+2 1 0 3 0 7 49 -1 -1 8.000 1 0 -1 1 0 2
+ 1 1 1.00 60.00 120.00
+ 12150 8400 12450 8400
+2 1 0 3 0 7 49 -1 -1 8.000 1 0 -1 1 0 2
+ 1 1 1.00 60.00 120.00
+ 11475 8400 11775 8400
+2 1 0 3 0 7 49 -1 -1 8.000 1 0 -1 1 0 2
+ 1 1 1.00 60.00 120.00
+ 10800 8400 11100 8400
+2 1 0 3 0 7 49 -1 -1 8.000 1 0 -1 1 0 2
+ 1 1 1.00 60.00 120.00
+ 12825 8400 13125 8400
+2 1 0 3 0 7 49 -1 -1 8.000 1 0 -1 1 0 2
+ 1 1 1.00 60.00 120.00
+ 13500 8400 13800 8400
+2 4 0 3 0 35 50 -1 20 0.000 1 0 7 0 0 5
+ 2100 12600 1575 12600 1575 12300 2100 12300 2100 12600
+2 4 0 3 33 32 50 -1 20 0.000 1 0 7 0 0 5
+ 6900 12600 6375 12600 6375 12300 6900 12300 6900 12600
+4 1 0 49 -1 4 14 1.5708 4 225 1335 450 825 application\001
+4 0 0 49 -1 4 12 1.5708 4 180 2595 2850 3225 mux->subscribe(SUB_RECV)\001
+4 1 0 46 -1 4 16 1.5708 4 210 645 3600 4200 rxbuf\001
+4 1 0 46 -1 4 16 1.5708 4 210 615 4575 9375 dbuf\001
+4 1 0 49 -1 4 16 0.0000 4 210 600 12300 7125 MUX\001
+4 1 0 44 -1 4 16 0.0000 4 210 945 4500 8175 DEMUX\001
+4 2 0 49 -1 4 12 0.0000 4 150 915 3600 8100 Stream ID\001
+4 0 0 49 -1 4 12 0.0000 4 150 915 12825 7125 Stream ID\001
+4 2 0 49 -1 4 12 0.0000 4 180 1635 3300 10125 tasklet_wakeup()\001
+4 2 0 49 -1 4 12 0.0000 4 180 1635 12150 10125 tasklet_wakeup()\001
+4 2 0 49 -1 4 12 0.0000 4 180 1470 11175 3150 mux->snd_buf()\001
+4 0 0 49 -1 4 12 0.0000 4 180 1425 3675 3225 mux->rcv_buf()\001
+4 0 0 49 -1 4 12 0.0000 4 180 1920 13425 10575 xprt->snd_buf(mbuf)\001
+4 0 0 49 -1 4 12 0.0000 4 180 1830 4725 10500 xprt->rcv_buf(dbuf)\001
+4 1 0 49 -1 4 12 0.0000 4 150 3105 8400 2100 HTX contents when mode==HTTP\001
+4 2 0 49 -1 4 12 0.0000 4 180 1635 7500 450 tasklet_wakeup()\001
+4 0 0 49 -1 4 12 0.0000 4 180 1635 9300 450 tasklet_wakeup()\001
+4 1 38 48 -1 4 12 0.0000 4 150 750 9534 4200 encode\001
+4 1 38 48 -1 4 12 0.0000 4 150 750 11325 4200 encode\001
+4 1 38 48 -1 4 12 0.0000 4 150 750 13134 4200 encode\001
+4 1 38 48 -1 4 12 0.0000 4 150 750 15009 4200 encode\001
+4 1 38 48 -1 4 12 0.0000 4 150 765 1725 5250 decode\001
+4 1 38 48 -1 4 12 0.0000 4 150 765 3525 5250 decode\001
+4 1 38 48 -1 4 12 0.0000 4 150 765 5325 5250 decode\001
+4 1 38 48 -1 4 12 0.0000 4 150 765 7200 5250 decode\001
+4 1 38 48 -1 4 12 0.0000 4 180 1035 12375 9300 mux_io_cb\001
+4 0 0 49 -1 4 12 1.5708 4 180 2580 12075 3225 mux->subscribe(SUB_SEND)\001
+4 1 0 49 -1 4 14 1.5708 4 180 1425 450 4500 mux streams\001
+4 1 0 49 -1 4 14 1.5708 4 135 1980 450 6750 mux=conn->mux\001
+4 1 0 49 -1 4 18 0.0000 4 285 1335 4500 11175 Transport\001
+4 1 0 46 -1 4 16 0.0000 4 210 690 14625 8175 mbuf\001
+4 1 38 48 -1 4 12 0.0000 4 180 1035 3159 9300 mux_io_cb\001
+4 0 0 49 -1 4 12 0.0000 4 195 2805 2250 12000 encoding/decoding function\001
+4 0 0 49 -1 4 12 0.0000 4 180 1365 2250 12525 transport layer\001
+4 0 0 49 -1 4 12 0.0000 4 180 2445 7050 12525 multiplexer (MUX/DEMUX)\001
+4 0 0 49 -1 4 12 0.0000 4 195 2655 7050 12000 general processing function\001
+4 0 0 49 -1 4 12 0.0000 4 180 2820 11775 12525 stream buffer (byte-level FIFO)\001
+4 2 0 49 -1 4 12 0.0000 4 180 2550 3675 10725 xprt->subscribe(SUB_RECV)\001
+4 2 0 49 -1 4 12 0.0000 4 180 2535 12225 10725 xprt->subscribe(SUB_SEND)\001
+4 1 0 49 -1 4 14 1.5708 4 180 780 450 2550 stconn\001
+4 1 0 49 -1 4 12 1.5708 4 195 2010 900 1125 (eg: checks, streams)\001
+4 1 0 49 -1 4 14 1.5708 4 180 3720 450 10125 connection = sc->sedesc->conn\001
+4 0 0 49 -1 4 12 0.0000 4 150 600 12225 225 Notes:\001
+4 0 0 49 -1 4 12 0.0000 4 180 2220 12975 675 snd_buf() will move the\001
+4 0 0 49 -1 4 12 0.0000 4 180 2310 12975 975 buffer (zero-copy) when\001
+4 0 0 49 -1 4 12 0.0000 4 180 2310 12975 1275 the destination is empty.\001
+4 0 0 49 -1 4 12 0.0000 4 180 2220 12825 1650 - the application is also\001
+4 0 0 49 -1 4 12 0.0000 4 180 2700 12975 2250 is sc->app with sc->app_ops\001
+4 0 0 49 -1 4 12 0.0000 4 180 2490 12825 2550 - transport layers (xprt) are\001
+4 0 0 49 -1 4 12 0.0000 4 180 2250 12975 2775 stackable. conn->xprt is\001
+4 0 0 49 -1 4 12 0.0000 4 180 1635 12975 3000 the topmost one.\001
+4 0 0 49 -1 4 12 0.0000 4 180 2400 12975 1950 called the app layer and\001
+4 0 0 49 -1 4 12 0.0000 4 180 1995 12825 375 - mux->rcv_buf() and\001
+4 1 38 48 -1 4 12 0.0000 4 180 1440 8409 657 sc_conn_io_cb\001
diff --git a/doc/internals/muxes.pdf b/doc/internals/muxes.pdf
new file mode 100644
index 0000000..54f8cc7
--- /dev/null
+++ b/doc/internals/muxes.pdf
Binary files differ
diff --git a/doc/internals/muxes.png b/doc/internals/muxes.png
new file mode 100644
index 0000000..a58f42f
--- /dev/null
+++ b/doc/internals/muxes.png
Binary files differ
diff --git a/doc/internals/muxes.svg b/doc/internals/muxes.svg
new file mode 100644
index 0000000..3feaa4d
--- /dev/null
+++ b/doc/internals/muxes.svg
@@ -0,0 +1,911 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!-- Creator: fig2dev Version 3.2.8b -->
+<!-- CreationDate: 2022-05-27 11:37:43 -->
+<!-- Magnification: 1 -->
+<svg xmlns="http://www.w3.org/2000/svg"
+ xmlns:xlink="http://www.w3.org/1999/xlink"
+ width="942pt" height="755pt"
+ viewBox="254 60 15690 12573">
+<g fill="none">
+<!-- Line -->
+<rect x="12000" y="10800" width="1800" height="600" rx="105" fill="#458dba"
+ stroke="#000000" stroke-width="30px" stroke-linejoin="round"/>
+<!-- Line -->
+<polygon points=" 900,3300 900,9900 8100,9900 8100,3300" fill="#bbf2e2"
+ stroke="#a7ceb3" stroke-width="45px"/>
+<!-- Line -->
+<polygon points=" 8700,3300 8700,9900 15900,9900 15900,3300" fill="#bbf2e2"
+ stroke="#a7ceb3" stroke-width="45px"/>
+<!-- Line -->
+<rect x="3600" y="10800" width="1800" height="600" rx="105" fill="#458dba"
+ stroke="#000000" stroke-width="30px" stroke-linejoin="round"/>
+<!-- Line -->
+<rect x="1575" y="12300" width="525" height="300" rx="105" fill="#458dba"
+ stroke="#000000" stroke-width="30px" stroke-linejoin="round"/>
+<!-- Line -->
+<rect x="6375" y="12300" width="525" height="300" rx="105" fill="#bbf2e2"
+ stroke="#a7ceb3" stroke-width="30px" stroke-linejoin="round"/>
+<!-- Line -->
+<polygon points=" 4761,9751 4761,8751 4261,8751 4261,9751" fill="#dae8fc"/>
+<!-- Line -->
+<polygon points=" 1986,4576 1986,3576 1486,3576 1486,4576" fill="#dae8fc"/>
+<!-- Line -->
+<polygon points=" 3786,4576 3786,3576 3286,3576 3286,4576" fill="#dae8fc"/>
+<!-- Line -->
+<polygon points=" 5586,4576 5586,3576 5086,3576 5086,4576" fill="#dae8fc"/>
+<!-- Line -->
+<polygon points=" 7461,4576 7461,3576 6961,3576 6961,4576" fill="#dae8fc"/>
+<!-- Text -->
+<text xml:space="preserve" x="12900" y="11175" fill="#000000" font-family="AvantGarde" font-style="normal" font-weight="normal" font-size="216" text-anchor="middle">Transport</text>
+<!-- Line -->
+<polygon points=" 6692,1761 9959,1761 9959,1261 6692,1261" fill="#dae8fc"/>
+<!-- Line -->
+<polygon points=" 12425,8661 12825,8661 12825,8161 12425,8161" fill="#dae8fc"/>
+<!-- Line -->
+<polygon points=" 11750,8661 12150,8661 12150,8161 11750,8161" fill="#dae8fc"/>
+<!-- Line -->
+<polygon points=" 11075,8661 11475,8661 11475,8161 11075,8161" fill="#dae8fc"/>
+<!-- Line -->
+<polygon points=" 10400,8661 10800,8661 10800,8161 10400,8161" fill="#dae8fc"/>
+<!-- Line -->
+<polygon points=" 13100,8661 13500,8661 13500,8161 13100,8161" fill="#dae8fc"/>
+<!-- Line -->
+<polygon points=" 13775,8661 14175,8661 14175,8161 13775,8161" fill="#dae8fc"/>
+<!-- Ellipse -->
+<ellipse cx="11400" cy="11925" rx="225" ry="150" fill="#ffc1e7"
+ stroke="#d10000" stroke-width="45px"/>
+<!-- Text -->
+<text xml:space="preserve" x="11850" y="12000" fill="#000000" font-family="AvantGarde" font-style="normal" font-weight="normal" font-size="144" text-anchor="start">I/O tasklet</text>
+<!-- Line -->
+<polygon points=" 11157,12581 11614,12581 11614,12331 11157,12331" fill="#dae8fc"/>
+<!-- Circle -->
+<circle cx="10725" cy="5700" r="75" fill="#000000"
+ stroke="#000000" stroke-width="30px"/>
+<!-- Circle -->
+<circle cx="12750" cy="5700" r="75" fill="#000000"
+ stroke="#000000" stroke-width="30px"/>
+<!-- Circle -->
+<circle cx="13875" cy="5700" r="75" fill="#000000"
+ stroke="#000000" stroke-width="30px"/>
+<!-- Circle -->
+<circle cx="11700" cy="5700" r="75" fill="#000000"
+ stroke="#000000" stroke-width="30px"/>
+<!-- Circle -->
+<circle cx="2925" cy="6750" r="75" fill="#000000"
+ stroke="#000000" stroke-width="30px"/>
+<!-- Circle -->
+<circle cx="4950" cy="6750" r="75" fill="#000000"
+ stroke="#000000" stroke-width="30px"/>
+<!-- Circle -->
+<circle cx="6075" cy="6750" r="75" fill="#000000"
+ stroke="#000000" stroke-width="30px"/>
+<!-- Circle -->
+<circle cx="3900" cy="6750" r="75" fill="#000000"
+ stroke="#000000" stroke-width="30px"/>
+<!-- Ellipse -->
+<ellipse cx="9525" cy="4140" rx="583" ry="250" fill="#ffe6cc"
+ stroke="#e9b000" stroke-width="45px"/>
+<!-- Ellipse -->
+<ellipse cx="11341" cy="4140" rx="583" ry="250" fill="#ffe6cc"
+ stroke="#e9b000" stroke-width="45px"/>
+<!-- Ellipse -->
+<ellipse cx="13154" cy="4140" rx="583" ry="250" fill="#ffe6cc"
+ stroke="#e9b000" stroke-width="45px"/>
+<!-- Ellipse -->
+<ellipse cx="15033" cy="4140" rx="583" ry="250" fill="#ffe6cc"
+ stroke="#e9b000" stroke-width="45px"/>
+<!-- Ellipse -->
+<ellipse cx="7182" cy="5173" rx="583" ry="250" fill="#ffe6cc"
+ stroke="#e9b000" stroke-width="45px"/>
+<!-- Ellipse -->
+<ellipse cx="3507" cy="5173" rx="583" ry="250" fill="#ffe6cc"
+ stroke="#e9b000" stroke-width="45px"/>
+<!-- Ellipse -->
+<ellipse cx="1719" cy="5173" rx="583" ry="250" fill="#ffe6cc"
+ stroke="#e9b000" stroke-width="45px"/>
+<!-- Ellipse -->
+<ellipse cx="5325" cy="5175" rx="583" ry="250" fill="#ffe6cc"
+ stroke="#e9b000" stroke-width="45px"/>
+<!-- Ellipse -->
+<ellipse cx="12333" cy="7025" rx="417" ry="250" fill="#87cfff"
+ stroke="#0000d1" stroke-width="45px"/>
+<!-- Ellipse -->
+<ellipse cx="12392" cy="9240" rx="808" ry="210" fill="#ffc1e7"
+ stroke="#d10000" stroke-width="45px"/>
+<!-- Ellipse -->
+<ellipse cx="3167" cy="9240" rx="808" ry="210" fill="#ffc1e7"
+ stroke="#d10000" stroke-width="45px"/>
+<!-- Ellipse -->
+<ellipse cx="1800" cy="11925" rx="225" ry="150" fill="#ffe6cc"
+ stroke="#e9b000" stroke-width="45px"/>
+<!-- Ellipse -->
+<ellipse cx="8400" cy="600" rx="900" ry="210" fill="#ffc1e7"
+ stroke="#d10000" stroke-width="45px"/>
+<!-- Line -->
+<polyline points=" 2550,3300 2550,6150"
+ stroke="#000000" stroke-width="8px" stroke-linejoin="round" stroke-dasharray="40 40"/>
+<!-- Line -->
+<polyline points=" 4500,3300 4500,6150"
+ stroke="#000000" stroke-width="8px" stroke-linejoin="round" stroke-dasharray="40 40"/>
+<!-- Line -->
+<polyline points=" 6300,3300 6300,6150"
+ stroke="#000000" stroke-width="8px" stroke-linejoin="round" stroke-dasharray="40 40"/>
+<!-- Line -->
+<defs>
+<clipPath id="cp0">
+ <path clip-rule="evenodd" d="M 254,60 H 15944 V 12633 H 254 z
+ M 645,12029 555,12029 582,12243 618,12243z"/>
+</clipPath>
+</defs>
+<polyline points=" 600,8025 600,12225" clip-path="url(#cp0)"
+ stroke="#000000" stroke-width="30px" stroke-linejoin="round"/>
+<!-- Forward arrow to point 600,12225 -->
+<polygon points=" 555,12029 600,12209 645,12029 555,12029"
+ stroke="#000000" stroke-width="8px" stroke-miterlimit="8" fill="#000000"/>
+<!-- Line -->
+<defs>
+<clipPath id="cp1">
+ <path clip-rule="evenodd" d="M 254,60 H 15944 V 12633 H 254 z
+ M 555,1996 645,1996 618,1782 582,1782z"/>
+</clipPath>
+</defs>
+<polyline points=" 600,3150 600,1800" clip-path="url(#cp1)"
+ stroke="#000000" stroke-width="30px" stroke-linejoin="round"/>
+<!-- Forward arrow to point 600,1800 -->
+<polygon points=" 645,1996 600,1816 555,1996 645,1996"
+ stroke="#000000" stroke-width="8px" stroke-miterlimit="8" fill="#000000"/>
+<!-- Line -->
+<defs>
+<clipPath id="cp2">
+ <path clip-rule="evenodd" d="M 254,60 H 15944 V 12633 H 254 z
+ M 555,346 645,346 618,132 582,132z"/>
+</clipPath>
+</defs>
+<polyline points=" 600,1500 600,150" clip-path="url(#cp2)"
+ stroke="#000000" stroke-width="30px" stroke-linejoin="round"/>
+<!-- Forward arrow to point 600,150 -->
+<polygon points=" 645,346 600,166 555,346 645,346"
+ stroke="#000000" stroke-width="8px" stroke-miterlimit="8" fill="#000000"/>
+<!-- Line -->
+<defs>
+<clipPath id="cp3">
+ <path clip-rule="evenodd" d="M 254,60 H 15944 V 12633 H 254 z
+ M 7304,555 7304,645 7518,618 7518,582z
+ M 3045,3104 2955,3104 2982,3318 3018,3318z"/>
+</clipPath>
+</defs>
+<polyline points=" 3000,3300 3000,1425 3675,600 7500,600" clip-path="url(#cp3)"
+ stroke="#000000" stroke-width="30px" stroke-linejoin="round"/>
+<!-- Forward arrow to point 7500,600 -->
+<polygon points=" 7304,645 7484,600 7304,555 7304,645"
+ stroke="#000000" stroke-width="8px" stroke-miterlimit="8" fill="#000000"/>
+<!-- Backward arrow to point 3000,3300 -->
+<polygon points=" 2955,3104 3000,3284 3045,3104 2955,3104"
+ stroke="#000000" stroke-width="8px" stroke-miterlimit="8" fill="#000000"/>
+<!-- Line -->
+<defs>
+<clipPath id="cp4">
+ <path clip-rule="evenodd" d="M 254,60 H 15944 V 12633 H 254 z
+ M 6554,1455 6554,1545 6768,1518 6768,1482z
+ M 3045,3104 2955,3104 2982,3318 3018,3318z"/>
+</clipPath>
+</defs>
+<polyline points=" 3525,3525 3525,2625 4500,1500 6750,1500" clip-path="url(#cp4)"
+ stroke="#000000" stroke-width="30px" stroke-linejoin="round"/>
+<!-- Forward arrow to point 6750,1500 -->
+<polygon points=" 6554,1545 6734,1500 6554,1455 6554,1545"
+ stroke="#000000" stroke-width="8px" stroke-miterlimit="8" fill="#000000"/>
+<!-- Line -->
+<defs>
+<clipPath id="cp5">
+ <path clip-rule="evenodd" d="M 254,60 H 15944 V 12633 H 254 z
+ M 11661,5428 11578,5465 11691,5649 11724,5634z
+ M 3045,3104 2955,3104 2982,3318 3018,3318z"/>
+</clipPath>
+</defs>
+<polyline points=" 11295,4425 11295,4725 11700,5625" clip-path="url(#cp5)"
+ stroke="#000000" stroke-width="30px" stroke-linejoin="round"/>
+<!-- Forward arrow to point 11700,5625 -->
+<polygon points=" 11578,5465 11693,5610 11661,5428 11578,5465"
+ stroke="#000000" stroke-width="8px" stroke-miterlimit="8" fill="#000000"/>
+<!-- Line -->
+<defs>
+<clipPath id="cp6">
+ <path clip-rule="evenodd" d="M 254,60 H 15944 V 12633 H 254 z
+ M 10571,5541 10514,5611 10698,5725 10720,5697z
+ M 3045,3104 2955,3104 2982,3318 3018,3318z"/>
+</clipPath>
+</defs>
+<polyline points=" 9495,4425 9495,4725 10695,5700" clip-path="url(#cp6)"
+ stroke="#000000" stroke-width="30px" stroke-linejoin="round"/>
+<!-- Forward arrow to point 10695,5700 -->
+<polygon points=" 10514,5611 10682,5690 10571,5541 10514,5611"
+ stroke="#000000" stroke-width="8px" stroke-miterlimit="8" fill="#000000"/>
+<!-- Line -->
+<defs>
+<clipPath id="cp7">
+ <path clip-rule="evenodd" d="M 254,60 H 15944 V 12633 H 254 z
+ M 12905,5461 12822,5427 12764,5635 12798,5649z
+ M 3045,3104 2955,3104 2982,3318 3018,3318z"/>
+</clipPath>
+</defs>
+<polyline points=" 13163,4425 13163,4725 12788,5625" clip-path="url(#cp7)"
+ stroke="#000000" stroke-width="30px" stroke-linejoin="round"/>
+<!-- Forward arrow to point 12788,5625 -->
+<polygon points=" 12822,5427 12794,5610 12905,5461 12822,5427"
+ stroke="#000000" stroke-width="8px" stroke-miterlimit="8" fill="#000000"/>
+<!-- Line -->
+<defs>
+<clipPath id="cp8">
+ <path clip-rule="evenodd" d="M 254,60 H 15944 V 12633 H 254 z
+ M 14066,5607 14007,5539 13863,5700 13886,5727z
+ M 3045,3104 2955,3104 2982,3318 3018,3318z"/>
+</clipPath>
+</defs>
+<polyline points=" 15013,4427 15013,4725 13888,5702" clip-path="url(#cp8)"
+ stroke="#000000" stroke-width="30px" stroke-linejoin="round"/>
+<!-- Forward arrow to point 13888,5702 -->
+<polygon points=" 14007,5539 13900,5691 14066,5607 14007,5539"
+ stroke="#000000" stroke-width="8px" stroke-miterlimit="8" fill="#000000"/>
+<!-- Line -->
+<defs>
+<clipPath id="cp9">
+ <path clip-rule="evenodd" d="M 254,60 H 15944 V 12633 H 254 z
+ M 9555,3689 9495,3689 9507,3843 9543,3843z
+ M 3045,3104 2955,3104 2982,3318 3018,3318z"/>
+</clipPath>
+</defs>
+<polyline points=" 9525,3525 9525,3825" clip-path="url(#cp9)"
+ stroke="#000000" stroke-width="30px" stroke-linejoin="round"/>
+<!-- Forward arrow to point 9525,3825 -->
+<polygon points=" 9495,3689 9525,3809 9555,3689 9495,3689"
+ stroke="#000000" stroke-width="8px" stroke-miterlimit="8" fill="#000000"/>
+<!-- Line -->
+<defs>
+<clipPath id="cp10">
+ <path clip-rule="evenodd" d="M 254,60 H 15944 V 12633 H 254 z
+ M 13155,3689 13095,3689 13107,3843 13143,3843z
+ M 3045,3104 2955,3104 2982,3318 3018,3318z"/>
+</clipPath>
+</defs>
+<polyline points=" 13125,3525 13125,3825" clip-path="url(#cp10)"
+ stroke="#000000" stroke-width="30px" stroke-linejoin="round"/>
+<!-- Forward arrow to point 13125,3825 -->
+<polygon points=" 13095,3689 13125,3809 13155,3689 13095,3689"
+ stroke="#000000" stroke-width="8px" stroke-miterlimit="8" fill="#000000"/>
+<!-- Line -->
+<defs>
+<clipPath id="cp11">
+ <path clip-rule="evenodd" d="M 254,60 H 15944 V 12633 H 254 z
+ M 15030,3689 14970,3689 14982,3843 15018,3843z
+ M 3045,3104 2955,3104 2982,3318 3018,3318z"/>
+</clipPath>
+</defs>
+<polyline points=" 15000,3525 15000,3825" clip-path="url(#cp11)"
+ stroke="#000000" stroke-width="30px" stroke-linejoin="round"/>
+<!-- Forward arrow to point 15000,3825 -->
+<polygon points=" 14970,3689 15000,3809 15030,3689 14970,3689"
+ stroke="#000000" stroke-width="8px" stroke-miterlimit="8" fill="#000000"/>
+<!-- Line -->
+<defs>
+<clipPath id="cp12">
+ <path clip-rule="evenodd" d="M 254,60 H 15944 V 12633 H 254 z
+ M 10229,8355 10229,8445 10443,8418 10443,8382z
+ M 3045,3104 2955,3104 2982,3318 3018,3318z"/>
+</clipPath>
+</defs>
+<polyline points=" 12300,7275 12300,7725 9975,7725 9975,8400 10425,8400" clip-path="url(#cp12)"
+ stroke="#000000" stroke-width="30px" stroke-linejoin="round"/>
+<!-- Forward arrow to point 10425,8400 -->
+<polygon points=" 10229,8445 10409,8400 10229,8355 10229,8445"
+ stroke="#000000" stroke-width="8px" stroke-miterlimit="8" fill="#000000"/>
+<!-- Line -->
+<defs>
+<clipPath id="cp13">
+ <path clip-rule="evenodd" d="M 254,60 H 15944 V 12633 H 254 z
+ M 12345,6629 12255,6629 12282,6843 12318,6843z
+ M 3045,3104 2955,3104 2982,3318 3018,3318z"/>
+</clipPath>
+</defs>
+<polyline points=" 11775,5850 12300,6450 12300,6825" clip-path="url(#cp13)"
+ stroke="#000000" stroke-width="30px" stroke-linejoin="round"/>
+<!-- Forward arrow to point 12300,6825 -->
+<polygon points=" 12255,6629 12300,6809 12345,6629 12255,6629"
+ stroke="#000000" stroke-width="8px" stroke-miterlimit="8" fill="#000000"/>
+<!-- Line -->
+<polyline points=" 11475,6150 13200,6150 13200,6825"
+ stroke="#000000" stroke-width="30px" stroke-linejoin="round" stroke-dasharray="80 80"/>
+<!-- Line -->
+<defs>
+<clipPath id="cp14">
+ <path clip-rule="evenodd" d="M 254,60 H 15944 V 12633 H 254 z
+ M 12345,6629 12255,6629 12282,6843 12318,6843z
+ M 4051,7087 4124,7035 3979,6875 3950,6896z"/>
+</clipPath>
+</defs>
+<polyline points=" 3975,6900 4500,7650 4500,7875" clip-path="url(#cp14)"
+ stroke="#000000" stroke-width="30px" stroke-linejoin="round"/>
+<!-- Backward arrow to point 3975,6900 -->
+<polygon points=" 4124,7035 3984,6913 4051,7087 4124,7035"
+ stroke="#000000" stroke-width="8px" stroke-miterlimit="8" fill="#000000"/>
+<!-- Line -->
+<defs>
+<clipPath id="cp15">
+ <path clip-rule="evenodd" d="M 254,60 H 15944 V 12633 H 254 z
+ M 12345,6629 12255,6629 12282,6843 12318,6843z
+ M 3450,5671 3540,5671 3513,5457 3477,5457z"/>
+</clipPath>
+</defs>
+<polyline points=" 3495,5475 3495,5775 3900,6675" clip-path="url(#cp15)"
+ stroke="#000000" stroke-width="30px" stroke-linejoin="round"/>
+<!-- Backward arrow to point 3495,5475 -->
+<polygon points=" 3540,5671 3495,5491 3450,5671 3540,5671"
+ stroke="#000000" stroke-width="8px" stroke-miterlimit="8" fill="#000000"/>
+<!-- Line -->
+<defs>
+<clipPath id="cp16">
+ <path clip-rule="evenodd" d="M 254,60 H 15944 V 12633 H 254 z
+ M 12345,6629 12255,6629 12282,6843 12318,6843z
+ M 1650,5671 1740,5671 1713,5457 1677,5457z"/>
+</clipPath>
+</defs>
+<polyline points=" 1695,5475 1695,5775 2895,6750" clip-path="url(#cp16)"
+ stroke="#000000" stroke-width="30px" stroke-linejoin="round"/>
+<!-- Backward arrow to point 1695,5475 -->
+<polygon points=" 1740,5671 1695,5491 1650,5671 1740,5671"
+ stroke="#000000" stroke-width="8px" stroke-miterlimit="8" fill="#000000"/>
+<!-- Line -->
+<defs>
+<clipPath id="cp17">
+ <path clip-rule="evenodd" d="M 254,60 H 15944 V 12633 H 254 z
+ M 12345,6629 12255,6629 12282,6843 12318,6843z
+ M 7168,5673 7258,5673 7231,5459 7195,5459z"/>
+</clipPath>
+</defs>
+<polyline points=" 7213,5477 7213,5775 6088,6752" clip-path="url(#cp17)"
+ stroke="#000000" stroke-width="30px" stroke-linejoin="round"/>
+<!-- Backward arrow to point 7213,5477 -->
+<polygon points=" 7258,5673 7213,5493 7168,5673 7258,5673"
+ stroke="#000000" stroke-width="8px" stroke-miterlimit="8" fill="#000000"/>
+<!-- Line -->
+<defs>
+<clipPath id="cp18">
+ <path clip-rule="evenodd" d="M 254,60 H 15944 V 12633 H 254 z
+ M 1695,4711 1755,4711 1743,4557 1707,4557z
+ M 7168,5673 7258,5673 7231,5459 7195,5459z"/>
+</clipPath>
+</defs>
+<polyline points=" 1725,4875 1725,4575" clip-path="url(#cp18)"
+ stroke="#000000" stroke-width="30px" stroke-linejoin="round"/>
+<!-- Forward arrow to point 1725,4575 -->
+<polygon points=" 1755,4711 1725,4591 1695,4711 1755,4711"
+ stroke="#000000" stroke-width="8px" stroke-miterlimit="8" fill="#000000"/>
+<!-- Line -->
+<defs>
+<clipPath id="cp19">
+ <path clip-rule="evenodd" d="M 254,60 H 15944 V 12633 H 254 z
+ M 3495,4711 3555,4711 3543,4557 3507,4557z
+ M 7168,5673 7258,5673 7231,5459 7195,5459z"/>
+</clipPath>
+</defs>
+<polyline points=" 3525,4875 3525,4575" clip-path="url(#cp19)"
+ stroke="#000000" stroke-width="30px" stroke-linejoin="round"/>
+<!-- Forward arrow to point 3525,4575 -->
+<polygon points=" 3555,4711 3525,4591 3495,4711 3555,4711"
+ stroke="#000000" stroke-width="8px" stroke-miterlimit="8" fill="#000000"/>
+<!-- Line -->
+<defs>
+<clipPath id="cp20">
+ <path clip-rule="evenodd" d="M 254,60 H 15944 V 12633 H 254 z
+ M 5295,4711 5355,4711 5343,4557 5307,4557z
+ M 7168,5673 7258,5673 7231,5459 7195,5459z"/>
+</clipPath>
+</defs>
+<polyline points=" 5325,4875 5325,4575" clip-path="url(#cp20)"
+ stroke="#000000" stroke-width="30px" stroke-linejoin="round"/>
+<!-- Forward arrow to point 5325,4575 -->
+<polygon points=" 5355,4711 5325,4591 5295,4711 5355,4711"
+ stroke="#000000" stroke-width="8px" stroke-miterlimit="8" fill="#000000"/>
+<!-- Line -->
+<defs>
+<clipPath id="cp21">
+ <path clip-rule="evenodd" d="M 254,60 H 15944 V 12633 H 254 z
+ M 7170,4711 7230,4711 7218,4557 7182,4557z
+ M 7168,5673 7258,5673 7231,5459 7195,5459z"/>
+</clipPath>
+</defs>
+<polyline points=" 7200,4875 7200,4575" clip-path="url(#cp21)"
+ stroke="#000000" stroke-width="30px" stroke-linejoin="round"/>
+<!-- Forward arrow to point 7200,4575 -->
+<polygon points=" 7230,4711 7200,4591 7170,4711 7230,4711"
+ stroke="#000000" stroke-width="8px" stroke-miterlimit="8" fill="#000000"/>
+<!-- Line -->
+<defs>
+<clipPath id="cp22">
+ <path clip-rule="evenodd" d="M 254,60 H 15944 V 12633 H 254 z
+ M 7170,4711 7230,4711 7218,4557 7182,4557z
+ M 4455,8521 4545,8521 4518,8307 4482,8307z"/>
+</clipPath>
+</defs>
+<polyline points=" 4500,8325 4500,8721" clip-path="url(#cp22)"
+ stroke="#000000" stroke-width="30px" stroke-linejoin="round"/>
+<!-- Backward arrow to point 4500,8325 -->
+<polygon points=" 4545,8521 4500,8341 4455,8521 4545,8521"
+ stroke="#000000" stroke-width="8px" stroke-miterlimit="8" fill="#000000"/>
+<!-- Line -->
+<polyline points=" 3225,7875 3225,7350 4725,7350"
+ stroke="#000000" stroke-width="30px" stroke-linejoin="round" stroke-dasharray="80 80"/>
+<!-- Line -->
+<defs>
+<clipPath id="cp23">
+ <path clip-rule="evenodd" d="M 254,60 H 15944 V 12633 H 254 z
+ M 3272,9646 3353,9605 3233,9426 3201,9442z
+ M 3853,10604 3772,10645 3892,10824 3924,10808z"/>
+</clipPath>
+</defs>
+<polyline points=" 3900,10800 3225,9450" clip-path="url(#cp23)"
+ stroke="#000000" stroke-width="30px" stroke-linejoin="round"/>
+<!-- Forward arrow to point 3225,9450 -->
+<polygon points=" 3353,9605 3232,9464 3272,9646 3353,9605"
+ stroke="#000000" stroke-width="8px" stroke-miterlimit="8" fill="#000000"/>
+<!-- Backward arrow to point 3900,10800 -->
+<polygon points=" 3772,10645 3893,10786 3853,10604 3772,10645"
+ stroke="#000000" stroke-width="8px" stroke-miterlimit="8" fill="#000000"/>
+<!-- Line -->
+<defs>
+<clipPath id="cp24">
+ <path clip-rule="evenodd" d="M 254,60 H 15944 V 12633 H 254 z
+ M 4455,9946 4545,9946 4518,9732 4482,9732z
+ M 3853,10604 3772,10645 3892,10824 3924,10808z"/>
+</clipPath>
+</defs>
+<polyline points=" 4500,10800 4500,9750" clip-path="url(#cp24)"
+ stroke="#000000" stroke-width="30px" stroke-linejoin="round"/>
+<!-- Forward arrow to point 4500,9750 -->
+<polygon points=" 4545,9946 4500,9766 4455,9946 4545,9946"
+ stroke="#000000" stroke-width="8px" stroke-miterlimit="8" fill="#000000"/>
+<!-- Line -->
+<defs>
+<clipPath id="cp25">
+ <path clip-rule="evenodd" d="M 254,60 H 15944 V 12633 H 254 z
+ M 12330,9646 12420,9646 12393,9432 12357,9432z
+ M 12420,10604 12330,10604 12357,10818 12393,10818z"/>
+</clipPath>
+</defs>
+<polyline points=" 12375,10800 12375,9750 12375,9450" clip-path="url(#cp25)"
+ stroke="#000000" stroke-width="30px" stroke-linejoin="round"/>
+<!-- Forward arrow to point 12375,9450 -->
+<polygon points=" 12420,9646 12375,9466 12330,9646 12420,9646"
+ stroke="#000000" stroke-width="8px" stroke-miterlimit="8" fill="#000000"/>
+<!-- Backward arrow to point 12375,10800 -->
+<polygon points=" 12330,10604 12375,10784 12420,10604 12330,10604"
+ stroke="#000000" stroke-width="8px" stroke-miterlimit="8" fill="#000000"/>
+<!-- Line -->
+<polyline points=" 12225,3300 12225,5025"
+ stroke="#000000" stroke-width="8px" stroke-linejoin="round" stroke-dasharray="40 40"/>
+<!-- Line -->
+<polyline points=" 10425,3300 10425,5025"
+ stroke="#000000" stroke-width="8px" stroke-linejoin="round" stroke-dasharray="40 40"/>
+<!-- Line -->
+<polyline points=" 14025,3300 14025,5025"
+ stroke="#000000" stroke-width="8px" stroke-linejoin="round" stroke-dasharray="40 40"/>
+<!-- Line -->
+<defs>
+<clipPath id="cp26">
+ <path clip-rule="evenodd" d="M 254,60 H 15944 V 12633 H 254 z
+ M 11370,3629 11280,3629 11307,3843 11343,3843z
+ M 12420,10604 12330,10604 12357,10818 12393,10818z"/>
+</clipPath>
+</defs>
+<polyline points=" 9975,1500 10800,1500 11325,2100 11325,3825" clip-path="url(#cp26)"
+ stroke="#000000" stroke-width="30px" stroke-linejoin="round"/>
+<!-- Forward arrow to point 11325,3825 -->
+<polygon points=" 11280,3629 11325,3809 11370,3629 11280,3629"
+ stroke="#000000" stroke-width="8px" stroke-miterlimit="8" fill="#000000"/>
+<!-- Line -->
+<defs>
+<clipPath id="cp27">
+ <path clip-rule="evenodd" d="M 254,60 H 15944 V 12633 H 254 z
+ M 11820,3104 11730,3104 11757,3318 11793,3318z
+ M 9496,645 9496,555 9282,582 9282,618z"/>
+</clipPath>
+</defs>
+<polyline points=" 9300,600 11175,600 11775,1275 11775,3300" clip-path="url(#cp27)"
+ stroke="#000000" stroke-width="30px" stroke-linejoin="round"/>
+<!-- Forward arrow to point 11775,3300 -->
+<polygon points=" 11730,3104 11775,3284 11820,3104 11730,3104"
+ stroke="#000000" stroke-width="8px" stroke-miterlimit="8" fill="#000000"/>
+<!-- Backward arrow to point 9300,600 -->
+<polygon points=" 9496,555 9316,600 9496,645 9496,555"
+ stroke="#000000" stroke-width="8px" stroke-miterlimit="8" fill="#000000"/>
+<!-- Line -->
+<defs>
+<clipPath id="cp28">
+ <path clip-rule="evenodd" d="M 254,60 H 15944 V 12633 H 254 z
+ M 11820,3104 11730,3104 11757,3318 11793,3318z
+ M 13245,10604 13155,10604 13182,10818 13218,10818z"/>
+</clipPath>
+</defs>
+<polyline points=" 13200,10800 13200,10200 14625,9750 14625,8400 14175,8400" clip-path="url(#cp28)"
+ stroke="#000000" stroke-width="30px" stroke-linejoin="round"/>
+<!-- Backward arrow to point 13200,10800 -->
+<polygon points=" 13155,10604 13200,10784 13245,10604 13155,10604"
+ stroke="#000000" stroke-width="8px" stroke-miterlimit="8" fill="#000000"/>
+<!-- Line -->
+<defs>
+<clipPath id="cp29">
+ <path clip-rule="evenodd" d="M 254,60 H 15944 V 12633 H 254 z
+ M 11820,3104 11730,3104 11757,3318 11793,3318z
+ M 5280,5671 5370,5671 5343,5457 5307,5457z"/>
+</clipPath>
+</defs>
+<polyline points=" 5325,5475 5325,5775 4950,6675" clip-path="url(#cp29)"
+ stroke="#000000" stroke-width="30px" stroke-linejoin="round"/>
+<!-- Backward arrow to point 5325,5475 -->
+<polygon points=" 5370,5671 5325,5491 5280,5671 5370,5671"
+ stroke="#000000" stroke-width="8px" stroke-miterlimit="8" fill="#000000"/>
+<!-- Line -->
+<defs>
+<clipPath id="cp30">
+ <path clip-rule="evenodd" d="M 254,60 H 15944 V 12633 H 254 z
+ M 555,3496 645,3496 618,3282 582,3282z
+ M 5280,5671 5370,5671 5343,5457 5307,5457z"/>
+</clipPath>
+</defs>
+<polyline points=" 600,5400 600,3300" clip-path="url(#cp30)"
+ stroke="#000000" stroke-width="30px" stroke-linejoin="round"/>
+<!-- Forward arrow to point 600,3300 -->
+<polygon points=" 645,3496 600,3316 555,3496 645,3496"
+ stroke="#000000" stroke-width="8px" stroke-miterlimit="8" fill="#000000"/>
+<!-- Line -->
+<polyline points=" 600,7800 600,5700"
+ stroke="#000000" stroke-width="30px" stroke-linejoin="round"/>
+<!-- Line -->
+<defs>
+<clipPath id="cp31">
+ <path clip-rule="evenodd" d="M 254,60 H 15944 V 12633 H 254 z
+ M 12314,8370 12314,8430 12468,8418 12468,8382z
+ M 5280,5671 5370,5671 5343,5457 5307,5457z"/>
+</clipPath>
+</defs>
+<polyline points=" 12150,8400 12450,8400" clip-path="url(#cp31)"
+ stroke="#000000" stroke-width="30px" stroke-linejoin="round"/>
+<!-- Forward arrow to point 12450,8400 -->
+<polygon points=" 12314,8430 12434,8400 12314,8370 12314,8430"
+ stroke="#000000" stroke-width="8px" stroke-miterlimit="8" fill="#000000"/>
+<!-- Line -->
+<defs>
+<clipPath id="cp32">
+ <path clip-rule="evenodd" d="M 254,60 H 15944 V 12633 H 254 z
+ M 11639,8370 11639,8430 11793,8418 11793,8382z
+ M 5280,5671 5370,5671 5343,5457 5307,5457z"/>
+</clipPath>
+</defs>
+<polyline points=" 11475,8400 11775,8400" clip-path="url(#cp32)"
+ stroke="#000000" stroke-width="30px" stroke-linejoin="round"/>
+<!-- Forward arrow to point 11775,8400 -->
+<polygon points=" 11639,8430 11759,8400 11639,8370 11639,8430"
+ stroke="#000000" stroke-width="8px" stroke-miterlimit="8" fill="#000000"/>
+<!-- Line -->
+<defs>
+<clipPath id="cp33">
+ <path clip-rule="evenodd" d="M 254,60 H 15944 V 12633 H 254 z
+ M 10964,8370 10964,8430 11118,8418 11118,8382z
+ M 5280,5671 5370,5671 5343,5457 5307,5457z"/>
+</clipPath>
+</defs>
+<polyline points=" 10800,8400 11100,8400" clip-path="url(#cp33)"
+ stroke="#000000" stroke-width="30px" stroke-linejoin="round"/>
+<!-- Forward arrow to point 11100,8400 -->
+<polygon points=" 10964,8430 11084,8400 10964,8370 10964,8430"
+ stroke="#000000" stroke-width="8px" stroke-miterlimit="8" fill="#000000"/>
+<!-- Line -->
+<defs>
+<clipPath id="cp34">
+ <path clip-rule="evenodd" d="M 254,60 H 15944 V 12633 H 254 z
+ M 12989,8370 12989,8430 13143,8418 13143,8382z
+ M 5280,5671 5370,5671 5343,5457 5307,5457z"/>
+</clipPath>
+</defs>
+<polyline points=" 12825,8400 13125,8400" clip-path="url(#cp34)"
+ stroke="#000000" stroke-width="30px" stroke-linejoin="round"/>
+<!-- Forward arrow to point 13125,8400 -->
+<polygon points=" 12989,8430 13109,8400 12989,8370 12989,8430"
+ stroke="#000000" stroke-width="8px" stroke-miterlimit="8" fill="#000000"/>
+<!-- Line -->
+<defs>
+<clipPath id="cp35">
+ <path clip-rule="evenodd" d="M 254,60 H 15944 V 12633 H 254 z
+ M 13664,8370 13664,8430 13818,8418 13818,8382z
+ M 5280,5671 5370,5671 5343,5457 5307,5457z"/>
+</clipPath>
+</defs>
+<polyline points=" 13500,8400 13800,8400" clip-path="url(#cp35)"
+ stroke="#000000" stroke-width="30px" stroke-linejoin="round"/>
+<!-- Forward arrow to point 13800,8400 -->
+<polygon points=" 13664,8430 13784,8400 13664,8370 13664,8430"
+ stroke="#000000" stroke-width="8px" stroke-miterlimit="8" fill="#000000"/>
+<!-- Text -->
+<g transform="translate(450,825) rotate(-90)" >
+<text xml:space="preserve" x="0" y="0" fill="#000000" font-family="AvantGarde" font-style="normal" font-weight="normal" font-size="168" text-anchor="middle">application</text>
+</g><!-- Text -->
+<g transform="translate(2850,3225) rotate(-90)" >
+<text xml:space="preserve" x="0" y="0" fill="#000000" font-family="AvantGarde" font-style="normal" font-weight="normal" font-size="144" text-anchor="start">mux-&gt;subscribe(SUB_RECV)</text>
+</g><!-- Text -->
+<text xml:space="preserve" x="12300" y="7125" fill="#000000" font-family="AvantGarde" font-style="normal" font-weight="normal" font-size="192" text-anchor="middle">MUX</text>
+<!-- Text -->
+<text xml:space="preserve" x="3600" y="8100" fill="#000000" font-family="AvantGarde" font-style="normal" font-weight="normal" font-size="144" text-anchor="end">Stream ID</text>
+<!-- Text -->
+<text xml:space="preserve" x="12825" y="7125" fill="#000000" font-family="AvantGarde" font-style="normal" font-weight="normal" font-size="144" text-anchor="start">Stream ID</text>
+<!-- Text -->
+<text xml:space="preserve" x="3300" y="10125" fill="#000000" font-family="AvantGarde" font-style="normal" font-weight="normal" font-size="144" text-anchor="end">tasklet_wakeup()</text>
+<!-- Text -->
+<text xml:space="preserve" x="12150" y="10125" fill="#000000" font-family="AvantGarde" font-style="normal" font-weight="normal" font-size="144" text-anchor="end">tasklet_wakeup()</text>
+<!-- Text -->
+<text xml:space="preserve" x="11175" y="3150" fill="#000000" font-family="AvantGarde" font-style="normal" font-weight="normal" font-size="144" text-anchor="end">mux-&gt;snd_buf()</text>
+<!-- Text -->
+<text xml:space="preserve" x="3675" y="3225" fill="#000000" font-family="AvantGarde" font-style="normal" font-weight="normal" font-size="144" text-anchor="start">mux-&gt;rcv_buf()</text>
+<!-- Text -->
+<text xml:space="preserve" x="13425" y="10575" fill="#000000" font-family="AvantGarde" font-style="normal" font-weight="normal" font-size="144" text-anchor="start">xprt-&gt;snd_buf(mbuf)</text>
+<!-- Text -->
+<text xml:space="preserve" x="4725" y="10500" fill="#000000" font-family="AvantGarde" font-style="normal" font-weight="normal" font-size="144" text-anchor="start">xprt-&gt;rcv_buf(dbuf)</text>
+<!-- Text -->
+<text xml:space="preserve" x="8400" y="2100" fill="#000000" font-family="AvantGarde" font-style="normal" font-weight="normal" font-size="144" text-anchor="middle">HTX contents when mode==HTTP</text>
+<!-- Text -->
+<text xml:space="preserve" x="7500" y="450" fill="#000000" font-family="AvantGarde" font-style="normal" font-weight="normal" font-size="144" text-anchor="end">tasklet_wakeup()</text>
+<!-- Text -->
+<text xml:space="preserve" x="9300" y="450" fill="#000000" font-family="AvantGarde" font-style="normal" font-weight="normal" font-size="144" text-anchor="start">tasklet_wakeup()</text>
+<!-- Text -->
+<g transform="translate(12075,3225) rotate(-90)" >
+<text xml:space="preserve" x="0" y="0" fill="#000000" font-family="AvantGarde" font-style="normal" font-weight="normal" font-size="144" text-anchor="start">mux-&gt;subscribe(SUB_SEND)</text>
+</g><!-- Text -->
+<g transform="translate(450,4500) rotate(-90)" >
+<text xml:space="preserve" x="0" y="0" fill="#000000" font-family="AvantGarde" font-style="normal" font-weight="normal" font-size="168" text-anchor="middle">mux streams</text>
+</g><!-- Text -->
+<g transform="translate(450,6750) rotate(-90)" >
+<text xml:space="preserve" x="0" y="0" fill="#000000" font-family="AvantGarde" font-style="normal" font-weight="normal" font-size="168" text-anchor="middle">mux=conn-&gt;mux</text>
+</g><!-- Text -->
+<text xml:space="preserve" x="4500" y="11175" fill="#000000" font-family="AvantGarde" font-style="normal" font-weight="normal" font-size="216" text-anchor="middle">Transport</text>
+<!-- Text -->
+<text xml:space="preserve" x="2250" y="12000" fill="#000000" font-family="AvantGarde" font-style="normal" font-weight="normal" font-size="144" text-anchor="start">encoding/decoding function</text>
+<!-- Text -->
+<text xml:space="preserve" x="2250" y="12525" fill="#000000" font-family="AvantGarde" font-style="normal" font-weight="normal" font-size="144" text-anchor="start">transport layer</text>
+<!-- Text -->
+<text xml:space="preserve" x="7050" y="12525" fill="#000000" font-family="AvantGarde" font-style="normal" font-weight="normal" font-size="144" text-anchor="start">multiplexer (MUX/DEMUX)</text>
+<!-- Text -->
+<text xml:space="preserve" x="7050" y="12000" fill="#000000" font-family="AvantGarde" font-style="normal" font-weight="normal" font-size="144" text-anchor="start">general processing function</text>
+<!-- Text -->
+<text xml:space="preserve" x="11775" y="12525" fill="#000000" font-family="AvantGarde" font-style="normal" font-weight="normal" font-size="144" text-anchor="start">stream buffer (byte-level FIFO)</text>
+<!-- Text -->
+<text xml:space="preserve" x="3675" y="10725" fill="#000000" font-family="AvantGarde" font-style="normal" font-weight="normal" font-size="144" text-anchor="end">xprt-&gt;subscribe(SUB_RECV)</text>
+<!-- Text -->
+<text xml:space="preserve" x="12225" y="10725" fill="#000000" font-family="AvantGarde" font-style="normal" font-weight="normal" font-size="144" text-anchor="end">xprt-&gt;subscribe(SUB_SEND)</text>
+<!-- Text -->
+<g transform="translate(450,2550) rotate(-90)" >
+<text xml:space="preserve" x="0" y="0" fill="#000000" font-family="AvantGarde" font-style="normal" font-weight="normal" font-size="168" text-anchor="middle">stconn</text>
+</g><!-- Text -->
+<g transform="translate(900,1125) rotate(-90)" >
+<text xml:space="preserve" x="0" y="0" fill="#000000" font-family="AvantGarde" font-style="normal" font-weight="normal" font-size="144" text-anchor="middle">(eg: checks, streams)</text>
+</g><!-- Text -->
+<g transform="translate(450,10125) rotate(-90)" >
+<text xml:space="preserve" x="0" y="0" fill="#000000" font-family="AvantGarde" font-style="normal" font-weight="normal" font-size="168" text-anchor="middle">connection = sc-&gt;sedesc-&gt;conn</text>
+</g><!-- Text -->
+<text xml:space="preserve" x="12225" y="225" fill="#000000" font-family="AvantGarde" font-style="normal" font-weight="normal" font-size="144" text-anchor="start">Notes:</text>
+<!-- Text -->
+<text xml:space="preserve" x="12975" y="675" fill="#000000" font-family="AvantGarde" font-style="normal" font-weight="normal" font-size="144" text-anchor="start">snd_buf() will move the</text>
+<!-- Text -->
+<text xml:space="preserve" x="12975" y="975" fill="#000000" font-family="AvantGarde" font-style="normal" font-weight="normal" font-size="144" text-anchor="start">buffer (zero-copy) when</text>
+<!-- Text -->
+<text xml:space="preserve" x="12975" y="1275" fill="#000000" font-family="AvantGarde" font-style="normal" font-weight="normal" font-size="144" text-anchor="start">the destination is empty.</text>
+<!-- Text -->
+<text xml:space="preserve" x="12825" y="1650" fill="#000000" font-family="AvantGarde" font-style="normal" font-weight="normal" font-size="144" text-anchor="start">- the application is also</text>
+<!-- Text -->
+<text xml:space="preserve" x="12975" y="2250" fill="#000000" font-family="AvantGarde" font-style="normal" font-weight="normal" font-size="144" text-anchor="start">is sc-&gt;app with sc-&gt;app_ops</text>
+<!-- Text -->
+<text xml:space="preserve" x="12825" y="2550" fill="#000000" font-family="AvantGarde" font-style="normal" font-weight="normal" font-size="144" text-anchor="start">- transport layers (xprt) are</text>
+<!-- Text -->
+<text xml:space="preserve" x="12975" y="2775" fill="#000000" font-family="AvantGarde" font-style="normal" font-weight="normal" font-size="144" text-anchor="start">stackable. conn-&gt;xprt is</text>
+<!-- Text -->
+<text xml:space="preserve" x="12975" y="3000" fill="#000000" font-family="AvantGarde" font-style="normal" font-weight="normal" font-size="144" text-anchor="start">the topmost one.</text>
+<!-- Text -->
+<text xml:space="preserve" x="12975" y="1950" fill="#000000" font-family="AvantGarde" font-style="normal" font-weight="normal" font-size="144" text-anchor="start">called the app layer and</text>
+<!-- Text -->
+<text xml:space="preserve" x="12825" y="375" fill="#000000" font-family="AvantGarde" font-style="normal" font-weight="normal" font-size="144" text-anchor="start">- mux-&gt;rcv_buf() and</text>
+<!-- Line -->
+<polyline points=" 4261,9751 4261,8751 4761,8751 4761,9751"
+ stroke="#458dba" stroke-width="45px"/>
+<!-- Line -->
+<polyline points=" 1486,4576 1486,3576 1986,3576 1986,4576"
+ stroke="#458dba" stroke-width="45px"/>
+<!-- Line -->
+<polyline points=" 3286,4576 3286,3576 3786,3576 3786,4576"
+ stroke="#458dba" stroke-width="45px"/>
+<!-- Line -->
+<polyline points=" 5086,4576 5086,3576 5586,3576 5586,4576"
+ stroke="#458dba" stroke-width="45px"/>
+<!-- Line -->
+<polyline points=" 6961,4576 6961,3576 7461,3576 7461,4576"
+ stroke="#458dba" stroke-width="45px"/>
+<!-- Line -->
+<polyline points=" 6692,1261 9959,1261 9959,1761 6692,1761"
+ stroke="#458dba" stroke-width="45px"/>
+<!-- Line -->
+<polyline points=" 12425,8161 12825,8161 12825,8661 12425,8661"
+ stroke="#458dba" stroke-width="45px"/>
+<!-- Line -->
+<polyline points=" 11750,8161 12150,8161 12150,8661 11750,8661"
+ stroke="#458dba" stroke-width="45px"/>
+<!-- Line -->
+<polyline points=" 11075,8161 11475,8161 11475,8661 11075,8661"
+ stroke="#458dba" stroke-width="45px"/>
+<!-- Line -->
+<polyline points=" 10400,8161 10800,8161 10800,8661 10400,8661"
+ stroke="#458dba" stroke-width="45px"/>
+<!-- Line -->
+<polyline points=" 13100,8161 13500,8161 13500,8661 13100,8661"
+ stroke="#458dba" stroke-width="45px"/>
+<!-- Line -->
+<polyline points=" 13775,8161 14175,8161 14175,8661 13775,8661"
+ stroke="#458dba" stroke-width="45px"/>
+<!-- Line -->
+<polyline points=" 11157,12331 11614,12331 11614,12581 11157,12581"
+ stroke="#458dba" stroke-width="45px"/>
+<!-- Text -->
+<text xml:space="preserve" x="9534" y="4200" fill="#1a1a1a" font-family="AvantGarde" font-style="normal" font-weight="normal" font-size="144" text-anchor="middle">encode</text>
+<!-- Text -->
+<text xml:space="preserve" x="11325" y="4200" fill="#1a1a1a" font-family="AvantGarde" font-style="normal" font-weight="normal" font-size="144" text-anchor="middle">encode</text>
+<!-- Text -->
+<text xml:space="preserve" x="13134" y="4200" fill="#1a1a1a" font-family="AvantGarde" font-style="normal" font-weight="normal" font-size="144" text-anchor="middle">encode</text>
+<!-- Text -->
+<text xml:space="preserve" x="15009" y="4200" fill="#1a1a1a" font-family="AvantGarde" font-style="normal" font-weight="normal" font-size="144" text-anchor="middle">encode</text>
+<!-- Text -->
+<text xml:space="preserve" x="1725" y="5250" fill="#1a1a1a" font-family="AvantGarde" font-style="normal" font-weight="normal" font-size="144" text-anchor="middle">decode</text>
+<!-- Text -->
+<text xml:space="preserve" x="3525" y="5250" fill="#1a1a1a" font-family="AvantGarde" font-style="normal" font-weight="normal" font-size="144" text-anchor="middle">decode</text>
+<!-- Text -->
+<text xml:space="preserve" x="5325" y="5250" fill="#1a1a1a" font-family="AvantGarde" font-style="normal" font-weight="normal" font-size="144" text-anchor="middle">decode</text>
+<!-- Text -->
+<text xml:space="preserve" x="7200" y="5250" fill="#1a1a1a" font-family="AvantGarde" font-style="normal" font-weight="normal" font-size="144" text-anchor="middle">decode</text>
+<!-- Text -->
+<text xml:space="preserve" x="12375" y="9300" fill="#1a1a1a" font-family="AvantGarde" font-style="normal" font-weight="normal" font-size="144" text-anchor="middle">mux_io_cb</text>
+<!-- Text -->
+<text xml:space="preserve" x="3159" y="9300" fill="#1a1a1a" font-family="AvantGarde" font-style="normal" font-weight="normal" font-size="144" text-anchor="middle">mux_io_cb</text>
+<!-- Text -->
+<text xml:space="preserve" x="8409" y="657" fill="#1a1a1a" font-family="AvantGarde" font-style="normal" font-weight="normal" font-size="144" text-anchor="middle">sc_conn_io_cb</text>
+<!-- Line -->
+<polyline points=" 4261,8850 4761,8850"
+ stroke="#458dba" stroke-width="15px"/>
+<!-- Line -->
+<polyline points=" 4261,8925 4761,8925"
+ stroke="#458dba" stroke-width="15px"/>
+<!-- Line -->
+<polyline points=" 4261,9000 4761,9000"
+ stroke="#458dba" stroke-width="15px"/>
+<!-- Line -->
+<polyline points=" 1486,3675 1986,3675"
+ stroke="#458dba" stroke-width="15px"/>
+<!-- Line -->
+<polyline points=" 1486,3750 1986,3750"
+ stroke="#458dba" stroke-width="15px"/>
+<!-- Line -->
+<polyline points=" 1486,3825 1986,3825"
+ stroke="#458dba" stroke-width="15px"/>
+<!-- Line -->
+<polyline points=" 3286,3675 3786,3675"
+ stroke="#458dba" stroke-width="15px"/>
+<!-- Line -->
+<polyline points=" 3286,3750 3786,3750"
+ stroke="#458dba" stroke-width="15px"/>
+<!-- Line -->
+<polyline points=" 3286,3825 3786,3825"
+ stroke="#458dba" stroke-width="15px"/>
+<!-- Line -->
+<polyline points=" 5086,3675 5586,3675"
+ stroke="#458dba" stroke-width="15px"/>
+<!-- Line -->
+<polyline points=" 5086,3750 5586,3750"
+ stroke="#458dba" stroke-width="15px"/>
+<!-- Line -->
+<polyline points=" 5086,3825 5586,3825"
+ stroke="#458dba" stroke-width="15px"/>
+<!-- Line -->
+<polyline points=" 6961,3675 7461,3675"
+ stroke="#458dba" stroke-width="15px"/>
+<!-- Line -->
+<polyline points=" 6961,3750 7461,3750"
+ stroke="#458dba" stroke-width="15px"/>
+<!-- Line -->
+<polyline points=" 6961,3825 7461,3825"
+ stroke="#458dba" stroke-width="15px"/>
+<!-- Line -->
+<polyline points=" 9750,1261 9750,1761"
+ stroke="#458dba" stroke-width="15px"/>
+<!-- Line -->
+<polyline points=" 9525,1261 9525,1761"
+ stroke="#458dba" stroke-width="15px"/>
+<!-- Line -->
+<polyline points=" 9300,1261 9300,1761"
+ stroke="#458dba" stroke-width="15px"/>
+<!-- Line -->
+<polyline points=" 12600,8161 12600,8661"
+ stroke="#458dba" stroke-width="15px"/>
+<!-- Line -->
+<polyline points=" 12675,8161 12675,8661"
+ stroke="#458dba" stroke-width="15px"/>
+<!-- Line -->
+<polyline points=" 12750,8161 12750,8661"
+ stroke="#458dba" stroke-width="15px"/>
+<!-- Line -->
+<polyline points=" 11925,8161 11925,8661"
+ stroke="#458dba" stroke-width="15px"/>
+<!-- Line -->
+<polyline points=" 12000,8161 12000,8661"
+ stroke="#458dba" stroke-width="15px"/>
+<!-- Line -->
+<polyline points=" 12075,8161 12075,8661"
+ stroke="#458dba" stroke-width="15px"/>
+<!-- Line -->
+<polyline points=" 11250,8161 11250,8661"
+ stroke="#458dba" stroke-width="15px"/>
+<!-- Line -->
+<polyline points=" 11325,8161 11325,8661"
+ stroke="#458dba" stroke-width="15px"/>
+<!-- Line -->
+<polyline points=" 11400,8161 11400,8661"
+ stroke="#458dba" stroke-width="15px"/>
+<!-- Line -->
+<polyline points=" 10575,8161 10575,8661"
+ stroke="#458dba" stroke-width="15px"/>
+<!-- Line -->
+<polyline points=" 10650,8161 10650,8661"
+ stroke="#458dba" stroke-width="15px"/>
+<!-- Line -->
+<polyline points=" 10725,8161 10725,8661"
+ stroke="#458dba" stroke-width="15px"/>
+<!-- Line -->
+<polyline points=" 13275,8161 13275,8661"
+ stroke="#458dba" stroke-width="15px"/>
+<!-- Line -->
+<polyline points=" 13350,8161 13350,8661"
+ stroke="#458dba" stroke-width="15px"/>
+<!-- Line -->
+<polyline points=" 13425,8161 13425,8661"
+ stroke="#458dba" stroke-width="15px"/>
+<!-- Line -->
+<polyline points=" 13950,8161 13950,8661"
+ stroke="#458dba" stroke-width="15px"/>
+<!-- Line -->
+<polyline points=" 14025,8161 14025,8661"
+ stroke="#458dba" stroke-width="15px"/>
+<!-- Line -->
+<polyline points=" 14100,8161 14100,8661"
+ stroke="#458dba" stroke-width="15px"/>
+<!-- Line -->
+<polyline points=" 11357,12331 11357,12581"
+ stroke="#458dba" stroke-width="15px"/>
+<!-- Line -->
+<polyline points=" 11443,12331 11443,12581"
+ stroke="#458dba" stroke-width="15px"/>
+<!-- Line -->
+<polyline points=" 11529,12331 11529,12581"
+ stroke="#458dba" stroke-width="15px"/>
+<!-- Text -->
+<text xml:space="preserve" x="8025" y="1575" fill="#000000" font-family="AvantGarde" font-style="normal" font-weight="normal" font-size="192" text-anchor="middle">channel buf</text>
+<!-- Text -->
+<g transform="translate(3600,4200) rotate(-90)" >
+<text xml:space="preserve" x="0" y="0" fill="#000000" font-family="AvantGarde" font-style="normal" font-weight="normal" font-size="192" text-anchor="middle">rxbuf</text>
+</g><!-- Text -->
+<g transform="translate(4575,9375) rotate(-90)" >
+<text xml:space="preserve" x="0" y="0" fill="#000000" font-family="AvantGarde" font-style="normal" font-weight="normal" font-size="192" text-anchor="middle">dbuf</text>
+</g><!-- Text -->
+<text xml:space="preserve" x="14625" y="8175" fill="#000000" font-family="AvantGarde" font-style="normal" font-weight="normal" font-size="192" text-anchor="middle">mbuf</text>
+<!-- Ellipse -->
+<ellipse cx="4488" cy="8082" rx="612" ry="250" fill="#87cfff"
+ stroke="#0000d1" stroke-width="45px"/>
+<!-- Ellipse -->
+<ellipse cx="6600" cy="11925" rx="225" ry="150" fill="#87cfff"
+ stroke="#0000d1" stroke-width="45px"/>
+<!-- Text -->
+<text xml:space="preserve" x="4500" y="8175" fill="#000000" font-family="AvantGarde" font-style="normal" font-weight="normal" font-size="192" text-anchor="middle">DEMUX</text>
+</g>
+</svg>
diff --git a/doc/internals/notes-layers.txt b/doc/internals/notes-layers.txt
new file mode 100644
index 0000000..541c125
--- /dev/null
+++ b/doc/internals/notes-layers.txt
@@ -0,0 +1,330 @@
+2018-02-21 - Layering in haproxy 1.9
+------------------------------------
+
+2 main zones :
+ - application : reads from conn_streams, writes to conn_streams, often uses
+ streams
+
+ - connection : receives data from the network, presented into buffers
+ available via conn_streams, sends data to the network
+
+
+The connection zone contains multiple layers which behave independently in each
+direction. The Rx direction is activated upon callbacks from the lower layers.
+The Tx direction is activated recursively from the upper layers. Between every
+two layers there may be a buffer, in each direction. When a buffer is full
+either in Tx or Rx direction, this direction is paused from the network layer
+and the location where the congestion is encountered. Upon end of congestion
+(cs_recv() from the upper layer, of sendto() at the lower layers), a
+tasklet_wakeup() is performed on the blocked layer so that suspended operations
+can be resumed. In this case, the Rx side restarts propagating data upwards
+from the lowest blocked level, while the Tx side restarts propagating data
+downwards from the highest blocked level. Proceeding like this ensures that
+information known to the producer may always be used to tailor the buffer sizes
+or decide of a strategy to best aggregate data. Additionally, each time a layer
+is crossed without transformation, it becomes possible to send without copying.
+
+The Rx side notifies the application of data readiness using a wakeup or a
+callback. The Tx side notifies the application of room availability once data
+have been moved resulting in the uppermost buffer having some free space.
+
+When crossing a mux downwards, it is possible that the sender is not allowed to
+access the buffer because it is not yet its turn. It is not a problem, the data
+remains in the conn_stream's buffer (or the stream one) and will be restarted
+once the mux is ready to consume these data.
+
+
+ cs_recv() -------. cs_send()
+ ^ +--------> |||||| -------------+ ^
+ | | -------' | | stream
+ --|----------|-------------------------------|-------|-------------------
+ | | V | connection
+ data .---. | | room
+ ready! |---| |---| available!
+ |---| |---|
+ |---| |---|
+ | | '---'
+ ^ +------------+-------+ |
+ | | ^ | /
+ / V | V /
+ / recvfrom() | sendto() |
+ -------------|----------------|--------------|---------------------------
+ | | poll! V kernel
+
+
+The cs_recv() function should act on pointers to buffer pointers, so that the
+callee may decide to pass its own buffer directly by simply swapping pointers.
+Similarly for cs_send() it is desirable to let the callee steal the buffer by
+swapping the pointers. This way it remains possible to implement zero-copy
+forwarding.
+
+Some operation flags will be needed on cs_recv() :
+ - RECV_ZERO_COPY : refuse to merge new data into the current buffer if it
+ will result in a data copy (ie the buffer is not empty), unless no more
+ than XXX bytes have to be copied (eg: copying 2 cache lines may be cheaper
+ than waiting and playing with pointers)
+
+ - RECV_AT_ONCE : only perform the operation if it will result in the source
+ buffer to become empty at the end of the operation so that no two buffers
+ remain allocated at the end. It will most of the time result in either a
+ small read or a zero-copy operation.
+
+ - RECV_PEEK : retrieve a copy of pending data without removing these data
+ from the source buffer. Maybe an alternate solution could consist in
+ finding the pointer to the source buffer and accessing these data directly,
+ except that it might be less interesting for the long term, thread-wise.
+
+ - RECV_MIN : receive minimum X bytes (or less with a shutdown), or fail.
+ This should help various protocol parsers which need to receive a complete
+ frame before proceeding.
+
+ - RECV_ENOUGH : no more data expected after this read if it's of the
+ requested size, thus no need to re-enable receiving on the lower layers.
+
+ - RECV_ONE_SHOT : perform a single read without re-enabling reading on the
+ lower layers, like we currently do when receiving an HTTP/1 request. Like
+ RECV_ENOUGH where any size is enough. Probably that the two could be merged
+ (eg: by having a MIN argument like RECV_MIN).
+
+
+Some operation flags will be needed on cs_send() :
+ - SEND_ZERO_COPY : refuse to merge the presented data with existing data and
+ prefer to wait for current data to leave and try again, unless the consumer
+ considers the amount of data acceptable for a copy.
+
+ - SEND_AT_ONCE : only perform the operation if it will result in the source
+ buffer to become empty at the end of the operation so that no two buffers
+ remain allocated at the end. It will most of the time result in either a
+ small write or a zero-copy operation.
+
+
+Both operations should return a composite status :
+ - number of bytes transferred
+ - status flags (shutr, shutw, reset, empty, full, ...)
+
+
+2018-07-23 - Update after merging rxbuf
+---------------------------------------
+
+It becomes visible that the mux will not always be welcome to decode incoming
+data because it will sometimes imply extra memory copies and/or usage for no
+benefit.
+
+Ideally, when when a stream is instantiated based on incoming data, these
+incoming data should be passed and the upper layers called, but it should then
+be up these upper layers to peek more data in certain circumstances. Typically
+if the pending connection data are larger than what is expected to be passed
+above, it means some data may cause head-of-line blocking (HOL) to other
+streams, and needs to be pushed up through the layers to let other streams
+continue to work. Similarly very large H2 data frames after header frames
+should probably not be passed as they may require copies that could be avoided
+if passed later. However if the decoded frame fits into the conn_stream's
+buffer, there is an opportunity to use a single buffer for the conn_stream
+and the channel. The H2 demux could set a blocking flag indicating it's waiting
+for the upper stream to take over demuxing. This flag would be purged once the
+upper stream would start reading, or when extra data come and change the
+conditions.
+
+Forcing structured headers and raw data to coexist within a single buffer is
+quite challenging for many code parts. For example it's perfectly possible to
+see a fragmented buffer containing series of headers, then a small data chunk
+that was received at the same time, then a few other headers added by request
+processing, then another data block received afterwards, then possibly yet
+another header added by option http-send-name-header, and yet another data
+block. This causes some pain for compression which still needs to know where
+compressed and uncompressed data start/stop. It also makes it very difficult
+to account the exact bytes to pass through the various layers.
+
+One solution consists in thinking about buffers using 3 representations :
+
+ - a structured message, which is used for the internal HTTP representation.
+ This message may only be atomically processed. It has no clear byte count,
+ it's a message.
+
+ - a raw stream, consisting in sequences of bytes. That's typically what
+ happens in data sequences or in tunnel.
+
+ - a pipe, which contains data to be forwarded, and that haproxy cannot have
+ access to.
+
+The processing efficiency decreases with the higher complexity above, but the
+capabilities increase. The structured message can contain anything including
+serialized data blocks to be processed or forwarded. The raw stream contains
+data blocks to be processed or forwarded. The pipe only contains data blocks
+to be forwarded. The the latter ones are only an optimization of the former
+ones.
+
+Thus ideally a channel should have access to all such 3 storage areas at once,
+depending on the use case :
+ (1) a structured message,
+ (2) a raw stream,
+ (3) a pipe
+
+Right now a channel only has (2) and (3) but after the native HTTP rework, it
+will only have (1) and (3). Placing a raw stream exclusively in (1) comes with
+some performance drawbacks which are not easily recovered, and with some quite
+difficult management still involving the reserve to ensure that a data block
+doesn't prevent headers from being appended. But during header processing, the
+payload may be necessary so we cannot decide to drop this option.
+
+A long-term approach would consist in ensuring that a single channel may have
+access to all 3 representations at once, and to enumerate priority rules to
+define how they interact together. That's exactly what is currently being done
+with the pipe and the raw buffer right now. Doing so would also save the need
+for storing payload in the structured message and void the requirement for the
+reserve. But it would cost more memory to process POST data and server
+responses. Thus an intermediary step consists in keeping this model in mind but
+not implementing everything yet.
+
+Short term proposal : a channel has access to a buffer and a pipe. A non-empty
+buffer is either in structured message format OR raw stream format. Only the
+channel knows. However a structured buffer MAY contain raw data in a properly
+formatted way (using the envelope defined by the structured message format).
+
+By default, when a demux writes to a CS rxbuf, it will try to use the lowest
+possible level for what is being done (i.e. splice if possible, otherwise raw
+stream, otherwise structured message). If the buffer already contains a
+structured message, then this format is exclusive. From this point the MUX has
+two options : either encode the incoming data to match the structured message
+format, or refrain from receiving into the CS's rxbuf and wait until the upper
+layer request those data.
+
+This opens a simplified option which could be suited even for the long term :
+ - cs_recv() will take one or two flags to indicate if a buffer already
+ contains a structured message or not ; the upper layer knows it.
+
+ - cs_recv() will take two flags to indicate what the upper layer is willing
+ to take :
+ - structured message only
+ - raw stream only
+ - any of them
+
+ From this point the mux can decide to either pass anything or refrain from
+ doing so.
+
+ - the demux stores the knowledge it has from the contents into some CS flags
+ to indicate whether or not some structured message are still available, and
+ whether or not some raw data are still available. Thus the caller knows
+ whether or not extra data are available.
+
+ - when the demux works on its own, it refrains from passing structured data
+ to a non-empty buffer, unless these data are causing trouble to other
+ streams (HOL).
+
+ - when a demux has to encapsulate raw data into a structured message, it will
+ always have to respect a configured reserve so that extra header processing
+ can be done on the structured message inside the buffer, regardless of the
+ supposed available room. In addition, the upper layer may indicate using an
+ extra recv() flag whether it wants the demux to defragment serialized data
+ (for example by moving trailing headers apart) or if it's not necessary.
+ This flag will be set by the stream interface if compression is required or
+ if the http-buffer-request option is set for example. Probably that using
+ to_forward==0 is a stronger indication that the reserve must be respected.
+
+ - cs_recv() and cs_send() when fed with a message, should not return byte
+ counts but message counts (i.e. 0 or 1). This implies that a single call to
+ either of these functions cannot mix raw data and structured messages at
+ the same time.
+
+At this point it looks like the conn_stream will have some encapsulation work
+to do for the payload if it needs to be encapsulated into a message. This
+further magnifies the importance of *not* decoding DATA frames into the CS's
+rxbuf until really needed.
+
+The CS will probably need to hold indication of what is available at the mux
+level, not only in the CS. Eg: we know that payload is still available.
+
+Using these elements, it should be possible to ensure that full header frames
+may be received without enforcing any reserve, that too large frames that do
+not fit will be detected because they return 0 message and indicate that such
+a message is still pending, and that data availability is correctly detected
+(later we may expect that the stream-interface allocates a larger or second
+buffer to place the payload).
+
+Regarding the ability for the channel to forward data, it looks like having a
+new function "cs_xfer(src_cs, dst_cs, count)" could be very productive in
+optimizing the forwarding to make use of splicing when available. It is not yet
+totally clear whether it will split into "cs_xfer_in(src_cs, pipe, count)"
+followed by "cs_xfer_out(dst_cs, pipe, count)" or anything different, and it
+still needs to be studied. The general idea seems to be that the receiver might
+have to call the sender directly once they agree on how to transfer data (pipe
+or buffer). If the transfer is incomplete, the cs_xfer() return value and/or
+flags will indicate the current situation (src empty, dst full, etc) so that
+the caller may register for notifications on the appropriate event and wait to
+be called again to continue.
+
+Short term implementation :
+ 1) add new CS flags to qualify what the buffer contains and what we expect
+ to read into it;
+
+ 2) set these flags to pretend we have a structured message when receiving
+ headers (after all, H1 is an atomic header as well) and see what it
+ implies for the code; for H1 it's unclear whether it makes sense to try
+ to set it without the H1 mux.
+
+ 3) use these flags to refrain from sending DATA frames after HEADERS frames
+ in H2.
+
+ 4) flush the flags at the stream interface layer when performing a cs_send().
+
+ 5) use the flags to enforce receipt of data only when necessary
+
+We should be able to end up with sequential receipt in H2 modelling what is
+needed for other protocols without interfering with the native H1 devs.
+
+
+2018-08-17 - Considerations after killing cs_recv()
+---------------------------------------------------
+
+With the ongoing reorganisation of the I/O layers, it's visible that cs_recv()
+will have to transfer data between the cs' rxbuf and the channel's buffer while
+not being aware of the data format. Moreover, in case there's no data there, it
+needs to recursively call the mux's rcv_buf() to trigger a decoding, while this
+function is sometimes replaced with cs_recv(). All this shows that cs_recv() is
+in fact needed while data are pushed upstream from the lower layers, and is not
+suitable for the "pull" mode. Thus it was decided to remove this function and
+put its code back into h2_rcv_buf(). The H1 mux's rcv_buf() already couldn't be
+replaced with cs_recv() since it is the only one knowing about the buffer's
+format.
+
+This opportunity simplified something : if the cs's rxbuf is only read by the
+mux's rcv_buf() method, then it doesn't need to be located into the CS and is
+well placed into the mux's representation of the stream. This has an important
+impact for H2 as it offers more freedom to the mux to allocate/free/reallocate
+this buffer, and it ensures the mux always has access to it.
+
+Furthermore, the conn_stream's txbuf experienced the same fate. Indeed, the H1
+mux has already uncovered the difficulty related to the channel shutting down
+on output, with data stuck into the CS's txbuf. Since the CS is tightly coupled
+to the stream and the stream can close immediately once its buffers are empty,
+it required a way to support orphaned CS with pending data in their txbuf. This
+is something that the H2 mux already has to deal with, by carefully leaving the
+data in the channel's buffer. But due to the snd_buf() call being top-down, it
+is always possible to push the stream's data via the mux's snd_buf() call
+without requiring a CS txbuf anymore. Thus the txbuf (when needed) is only
+implemented in the mux and attached to the mux's representation of the stream,
+and doing so allows to immediately release the channel once the data are safe
+in the mux's buffer.
+
+This is an important change which clarifies the roles and responsibilities of
+each layer in the chain : when receiving data from a mux, it's the mux's
+responsibility to make sure it can correctly decode the incoming data and to
+buffer the possible excess of data it cannot pass to the requester. This means
+that decoding an H2 frame, which is not retryable since it has an impact on the
+HPACK decompression context, and which cannot be reordered for the same reason,
+simply needs to be performed to the H2 stream's rxbuf which will then be passed
+to the stream when this one calls h2_rcv_buf(), even if it reads one byte at a
+time. Similarly when calling h2_snd_buf(), it's the mux's responsibility to
+read as much as it needs to be able to restart later, possibly by buffering
+some data into a local buffer. And it's only once all the output data has been
+consumed by snd_buf() that the stream is free to disappear.
+
+This model presents the nice benefit of being infinitely stackable and solving
+the last identified showstoppers to move towards a structured message internal
+representation, as it will give full power to the rcv_buf() and snd_buf() to
+process what they need.
+
+For now the conn_stream's flags indicating whether a shutdown has been seen in
+any direction or if an end of stream was seen will remain in the conn_stream,
+though it's likely that some of them will move to the mux's representation of
+the stream after structured messages are implemented.
diff --git a/doc/internals/notes-poll-connect.txt b/doc/internals/notes-poll-connect.txt
new file mode 100644
index 0000000..5cb0885
--- /dev/null
+++ b/doc/internals/notes-poll-connect.txt
@@ -0,0 +1,93 @@
+2022-11-17 - Tests involving poll() return states upon a pending connect().
+
+- connect() to a closed port returns OUT and HUP:
+
+ $ dev/poll/poll -v -l clo -c pol
+ #### BEGIN ####
+ cmd #1 stp #1: clo(l=3): ret=0
+ cmd #2 stp #0: con(c=4): ret=-1 (Connection refused)
+ cmd #2 stp #1: pol(c=4): ret=1 ev=0x14 (OUT HUP)
+ #### END ####
+
+=> with HUP we *know* the connection failed, since we never asked for a
+ SHUTW before connecting. It is indeed an error as can be seen with
+ connect() returning -1 ECONNREFUSED.
+
+- connect() to a port that does close(accept()) does return IN and RDHUP:
+
+ $ dev/poll/poll -v -s clo -c pol
+ #### BEGIN ####
+ cmd #1 stp #0: con(c=4): ret=0
+ cmd #1 stp #0: acc(l=3): ret=5
+ cmd #1 stp #1: clo(s=5): ret=0
+ cmd #2 stp #1: pol(c=4): ret=1 ev=0x2005 (IN OUT RDHUP)
+ #### END ####
+
+=> here there's no HUP, only RDHUP because the FIN is pending in the
+ socket buffers, waiting to be read.
+
+- for a HUP to happen after a connect() to a valid port, one would have to
+ perform a shutw() on the client, which is normally not the case, indicating
+ that HUP is reliable here:
+
+ $ dev/poll/poll -v -s clo -c shw,pol
+ #### BEGIN ####
+ cmd #1 stp #0: con(c=4): ret=0
+ cmd #1 stp #0: acc(l=3): ret=5
+ cmd #1 stp #1: clo(s=5): ret=0
+ cmd #2 stp #1: shw(c=4): ret=0
+ cmd #2 stp #2: pol(c=4): ret=1 ev=0x2015 (IN OUT HUP RDHUP)
+ #### END ####
+
+- one case that may happen is when sending a request and immediately shutw()
+ (which leaves a TIME_WAIT so not recommended):
+
+ $ dev/poll/poll -v -c snd,shw -s clo -c pol,rcv,pol
+ #### BEGIN ####
+ cmd #1 stp #0: con(c=4): ret=0
+ cmd #1 stp #1: snd(c=4): ret=3
+ cmd #1 stp #2: shw(c=4): ret=0
+ cmd #2 stp #0: acc(l=3): ret=5
+ cmd #2 stp #1: clo(s=5): ret=0
+ cmd #3 stp #1: pol(c=4): ret=1 ev=0x201d (IN OUT ERR HUP RDHUP)
+ cmd #3 stp #2: rcv(c=4): ret=-1 (Connection reset by peer)
+ cmd #3 stp #3: pol(c=4): ret=1 ev=0x2015 (IN OUT HUP RDHUP)
+ #### END ####
+
+=> here it's impossible to know from the client whether the server consumed the
+ data or not, which is normal since a close on the server causes an RST to be
+ emitted for the data in flight, hence the ERR here. It's also worth noting
+ that once POLL_ERR is consumed by recv() it disappears.
+
+- for the server, sending a shutw() before closing here delivers an ACK in time
+ that prevents the RST from being sent, thus connect() is not notified (but if
+ the server has too much to send, it will truncate and emit an RST):
+
+ $ dev/poll/poll -v -c snd,shw -s shw,clo -c pol,rcv,pol
+ #### BEGIN ####
+ cmd #1 stp #0: con(c=4): ret=0
+ cmd #1 stp #1: snd(c=4): ret=3
+ cmd #1 stp #2: shw(c=4): ret=0
+ cmd #2 stp #0: acc(l=3): ret=5
+ cmd #2 stp #1: shw(s=5): ret=0
+ cmd #2 stp #2: clo(s=5): ret=0
+ cmd #3 stp #1: pol(c=4): ret=1 ev=0x2015 (IN OUT HUP RDHUP)
+ cmd #3 stp #2: rcv(c=4): ret=0
+ cmd #3 stp #3: pol(c=4): ret=1 ev=0x2015 (IN OUT HUP RDHUP)
+ #### END ####
+
+- if the server sends a response, disables lingering and closes with RST, it is
+ possible to get HUP and ERR at the same time during the connect() phase, and
+ recv() can still receive the pending response:
+
+ $ dev/poll/poll -v -s snd,lin,clo -c pol,rcv,pol
+ #### BEGIN ####
+ cmd #1 stp #0: con(c=4): ret=0
+ cmd #1 stp #0: acc(l=3): ret=5
+ cmd #1 stp #1: snd(s=5): ret=3
+ cmd #1 stp #2: lin(s=5): ret=0
+ cmd #1 stp #3: clo(s=5): ret=0
+ cmd #2 stp #1: pol(c=4): ret=1 ev=0x201d (IN OUT ERR HUP RDHUP)
+ cmd #2 stp #2: rcv(c=4): ret=3
+ cmd #2 stp #3: pol(c=4): ret=1 ev=0x201d (IN OUT ERR HUP RDHUP)
+ #### END ####
diff --git a/doc/internals/notes-pollhup.txt b/doc/internals/notes-pollhup.txt
new file mode 100644
index 0000000..ced332b
--- /dev/null
+++ b/doc/internals/notes-pollhup.txt
@@ -0,0 +1,281 @@
+tcp mode 8001->8008
+
+
+Remote test:
+============
+
+willy@up1:~$ echo bar | ncat -lp8008
+willy@wtap:haproxy$ echo foo | ncat 127.1 8001
+
+17:09:53.663154 epoll_wait(3, [{EPOLLIN, {u32=5, u64=5}}], 200, 1000) = 1
+17:09:54.582146 accept4(5, {sa_family=AF_INET, sin_port=htons(33378), sin_addr=inet_addr("127.0.0.1")}, [128->16], SOCK_NONBLOCK) = 8
+17:09:54.582299 setsockopt(8, SOL_TCP, TCP_NODELAY, [1], 4) = 0
+17:09:54.582527 accept4(5, 0x7ffc4a8bf330, [128], SOCK_NONBLOCK) = -1 EAGAIN (Resource temporarily unavailable)
+17:09:54.582655 recvfrom(8, "foo\n", 15360, 0, NULL, NULL) = 4
+17:09:54.582727 recvfrom(8, "", 15356, 0, NULL, NULL) = 0
+17:09:54.582827 socket(AF_INET, SOCK_STREAM, IPPROTO_TCP) = 9
+17:09:54.582878 setsockopt(9, SOL_TCP, TCP_NODELAY, [1], 4) = 0
+17:09:54.582897 connect(9, {sa_family=AF_INET, sin_port=htons(8008), sin_addr=inet_addr("10.0.3.82")}, 16) = -1 EINPROGRESS (Operation now in progress)
+17:09:54.582941 sendto(9, "foo\n", 4, MSG_DONTWAIT|MSG_NOSIGNAL, NULL, 0) = -1 EAGAIN (Resource temporarily unavailable)
+17:09:54.582968 epoll_ctl(3, EPOLL_CTL_ADD, 9, {EPOLLOUT, {u32=9, u64=9}}) = 0
+17:09:54.582997 epoll_wait(3, [{EPOLLOUT, {u32=9, u64=9}}], 200, 1000) = 1
+17:09:54.583686 connect(9, {sa_family=AF_INET, sin_port=htons(8008), sin_addr=inet_addr("10.0.3.82")}, 16) = 0
+17:09:54.583706 sendto(9, "foo\n", 4, MSG_DONTWAIT|MSG_NOSIGNAL, NULL, 0) = 4
+17:09:54.583733 recvfrom(9, 0x19c2300, 15360, 0, NULL, NULL) = -1 EAGAIN (Resource temporarily unavailable)
+17:09:54.583755 shutdown(9, SHUT_WR) = 0
+17:09:54.583775 epoll_ctl(3, EPOLL_CTL_MOD, 9, {EPOLLIN|EPOLLRDHUP, {u32=9, u64=9}}) = 0
+17:09:54.583802 epoll_wait(3, [{EPOLLIN, {u32=9, u64=9}}], 200, 1000) = 1
+17:09:54.584672 recvfrom(9, "bar\n", 16384, 0, NULL, NULL) = 4
+17:09:54.584713 recvfrom(9, "", 16380, 0, NULL, NULL) = 0
+17:09:54.584743 sendto(8, "bar\n", 4, MSG_DONTWAIT|MSG_NOSIGNAL|MSG_MORE, NULL, 0) = 4
+17:09:54.584819 epoll_wait(3, [], 200, 0) = 0
+17:09:54.584901 epoll_wait(3, [], 200, 1000) = 0
+
+
+Notes:
+ - we had data available to try the connect() (see first attempt), despite
+ this during the retry we sent the connect again!
+
+ - why do we wait before sending the shutw to the server if we already know
+ it's needed ? missing CF_SHUTW_NOW ? Missing request forwarding ? Missing
+ auto-close ?
+
+ - response didn't feature HUP nor RDHUP
+
+
+Local:
+
+17:15:43.010786 accept4(5, {sa_family=AF_INET, sin_port=htons(33506), sin_addr=inet_addr("127.0.0.1")}, [128->16], SOCK_NONBLOCK) = 8
+17:15:43.011013 setsockopt(8, SOL_TCP, TCP_NODELAY, [1], 4) = 0
+17:15:43.011181 accept4(5, 0x7ffcd9092cd0, [128], SOCK_NONBLOCK) = -1 EAGAIN (Resource temporarily unavailable)
+17:15:43.011231 recvfrom(8, "foo\n", 15360, 0, NULL, NULL) = 4
+17:15:43.011296 recvfrom(8, "", 15356, 0, NULL, NULL) = 0
+17:15:43.011318 socket(AF_INET, SOCK_STREAM, IPPROTO_TCP) = 9
+17:15:43.011340 setsockopt(9, SOL_TCP, TCP_NODELAY, [1], 4) = 0
+17:15:43.011353 connect(9, {sa_family=AF_INET, sin_port=htons(8008), sin_addr=inet_addr("127.0.0.1")}, 16) = -1 EINPROGRESS (Operation now in progress)
+17:15:43.011395 sendto(9, "foo\n", 4, MSG_DONTWAIT|MSG_NOSIGNAL, NULL, 0) = 4
+17:15:43.011425 shutdown(9, SHUT_WR) = 0
+17:15:43.011459 recvfrom(9, "bar\n", 16384, 0, NULL, NULL) = 4
+17:15:43.011491 recvfrom(9, "", 16380, 0, NULL, NULL) = 0
+17:15:43.011525 sendto(8, "bar\n", 4, MSG_DONTWAIT|MSG_NOSIGNAL|MSG_MORE, NULL, 0) = 4
+17:15:43.011584 epoll_wait(3, [], 200, 0) = 0
+
+Notes:
+ - the shutdown() was properly done right after the sendto(), proving that
+ CF_SHUTW_NOW and auto-close were present. Maybe difference is sync vs async
+ send.
+
+
+Local with delay before closing client:
+
+17:18:17.155349 epoll_wait(3, [{EPOLLIN, {u32=5, u64=5}}], 200, 1000) = 1
+17:18:17.727327 accept4(5, {sa_family=AF_INET, sin_port=htons(33568), sin_addr=inet_addr("127.0.0.1")}, [128->16], SOCK_NONBLOCK) = 8
+17:18:17.727553 setsockopt(8, SOL_TCP, TCP_NODELAY, [1], 4) = 0
+17:18:17.727661 accept4(5, 0x7fff4eb9a0b0, [128], SOCK_NONBLOCK) = -1 EAGAIN (Resource temporarily unavailable)
+17:18:17.727798 recvfrom(8, 0xbda300, 15360, 0, NULL, NULL) = -1 EAGAIN (Resource temporarily unavailable)
+17:18:17.727830 socket(AF_INET, SOCK_STREAM, IPPROTO_TCP) = 9
+17:18:17.727858 setsockopt(9, SOL_TCP, TCP_NODELAY, [1], 4) = 0
+17:18:17.727877 connect(9, {sa_family=AF_INET, sin_port=htons(8008), sin_addr=inet_addr("127.0.0.1")}, 16) = -1 EINPROGRESS (Operation now in progress)
+17:18:17.727923 epoll_ctl(3, EPOLL_CTL_ADD, 8, {EPOLLIN|EPOLLRDHUP, {u32=8, u64=8}}) = 0
+17:18:17.727945 epoll_ctl(3, EPOLL_CTL_ADD, 9, {EPOLLOUT, {u32=9, u64=9}}) = 0
+17:18:17.727989 epoll_wait(3, [{EPOLLOUT, {u32=9, u64=9}}], 200, 1000) = 1
+17:18:17.728010 connect(9, {sa_family=AF_INET, sin_port=htons(8008), sin_addr=inet_addr("127.0.0.1")}, 16) = 0
+17:18:17.728027 recvfrom(9, "bar\n", 15360, 0, NULL, NULL) = 4
+17:18:17.728055 recvfrom(9, 0xbd62f4, 15356, 0, NULL, NULL) = -1 EAGAIN (Resource temporarily unavailable)
+17:18:17.728073 sendto(8, "bar\n", 4, MSG_DONTWAIT|MSG_NOSIGNAL, NULL, 0) = 4
+17:18:17.728104 epoll_ctl(3, EPOLL_CTL_MOD, 9, {EPOLLIN|EPOLLRDHUP, {u32=9, u64=9}}) = 0
+17:18:17.728127 epoll_wait(3, [], 200, 1000) = 0
+17:18:18.729411 epoll_wait(3, [], 200, 1000) = 0
+17:18:19.730654 epoll_wait(3, [{EPOLLIN|EPOLLRDHUP, {u32=8, u64=8}}], 200, 1000) = 1
+17:18:20.299268 recvfrom(8, "", 16384, 0, NULL, NULL) = 0
+17:18:20.299336 epoll_ctl(3, EPOLL_CTL_DEL, 8, 0x7ff3a969f7d0) = 0
+17:18:20.299379 epoll_wait(3, [], 200, 0) = 0
+17:18:20.299401 shutdown(9, SHUT_WR) = 0
+17:18:20.299523 epoll_wait(3, [{EPOLLIN|EPOLLHUP|EPOLLRDHUP, {u32=9, u64=9}}], 200, 1000) = 1
+17:18:20.299678 recvfrom(9, "", 16384, 0, NULL, NULL) = 0
+17:18:20.299761 epoll_wait(3, [], 200, 0) = 0
+
+Notes: server sent the response in two parts ("bar" then EOF) just due to
+netcat's implementation. The second epoll_wait() caught it.
+
+Here we clearly see that :
+ - read0 alone returns EPOLLIN|EPOLLRDHUP
+ - read0 after shutw returns EPOLLIN|EPOLLRDHUP|EPOLLHUP
+ => difference indeed is "cannot write"
+
+
+Local with a delay before closing the server:
+
+17:30:32.527157 epoll_wait(3, [{EPOLLIN, {u32=5, u64=5}}], 200, 1000) = 1
+17:30:33.216827 accept4(5, {sa_family=AF_INET, sin_port=htons(33908), sin_addr=inet_addr("127.0.0.1")}, [128->16], SOCK_NONBLOCK) = 8
+17:30:33.216957 setsockopt(8, SOL_TCP, TCP_NODELAY, [1], 4) = 0
+17:30:33.216984 accept4(5, 0x7ffc1a1fb0c0, [128], SOCK_NONBLOCK) = -1 EAGAIN (Resource temporarily unavailable)
+17:30:33.217071 recvfrom(8, "GET / HTTP/1.0\r\n\r\n\n", 15360, 0, NULL, NULL) = 19
+17:30:33.217115 recvfrom(8, "", 15341, 0, NULL, NULL) = 0
+17:30:33.217135 socket(AF_INET, SOCK_STREAM, IPPROTO_TCP) = 9
+17:30:33.217176 setsockopt(9, SOL_TCP, TCP_NODELAY, [1], 4) = 0
+17:30:33.217190 connect(9, {sa_family=AF_INET, sin_port=htons(8000), sin_addr=inet_addr("127.0.0.1")}, 16) = -1 EINPROGRESS (Operation now in progress)
+17:30:33.217233 sendto(9, "GET / HTTP/1.0\r\n\r\n\n", 19, MSG_DONTWAIT|MSG_NOSIGNAL, NULL, 0) = 19
+17:30:33.217272 shutdown(9, SHUT_WR) = 0
+17:30:33.217318 recvfrom(9, 0x109b2f0, 16384, 0, NULL, NULL) = -1 EAGAIN (Resource temporarily unavailable)
+17:30:33.217332 epoll_ctl(3, EPOLL_CTL_ADD, 9, {EPOLLIN|EPOLLRDHUP, {u32=9, u64=9}}) = 0
+17:30:33.217355 epoll_wait(3, [{EPOLLIN|EPOLLHUP|EPOLLRDHUP, {u32=9, u64=9}}], 200, 1000) = 1
+17:30:33.217377 recvfrom(9, "HTTP/1.0 200\r\nContent-length: 0\r\nX-req: size=19, time=0 ms\r\nX-rsp: id=dummy, code=200, cache=1, size=0, time=0 ms (0 real)\r\n\r\n", 16384, 0, NULL, NULL) = 126
+17:30:33.217395 close(9) = 0
+17:30:33.217411 sendto(8, "HTTP/1.0 200\r\nContent-length: 0\r\nX-req: size=19, time=0 ms\r\nX-rsp: id=dummy, code=200, cache=1, size=0, time=0 ms (0 real)\r\n\r\n", 126, MSG_DONTWAIT|MSG_NOSIGNAL|MSG_MORE, NULL, 0) = 126
+17:30:33.217464 close(8) = 0
+17:30:33.217496 epoll_wait(3, [], 200, 0) = 0
+
+
+Notes:
+ - RDHUP is properly present while some data remain pending.
+ - HUP is present since RDHUP + shutw
+
+It could be concluded that HUP indicates RDHUP+shutw and in no way indicates
+the ability to avoid reading.
+
+Below HUP|ERR|OUT are reported on connection failures, thus WITHOUT read:
+
+accept4(5, {sa_family=AF_INET, sin_port=htons(39080), sin_addr=inet_addr("127.0.0.1")}, [128->16], SOCK_NONBLOCK) = 8
+setsockopt(8, SOL_TCP, TCP_NODELAY, [1], 4) = 0
+accept4(5, 0x7ffffba55730, [128], SOCK_NONBLOCK) = -1 EAGAIN (Resource temporarily unavailable)
+recvfrom(8, "foo\n", 15360, 0, NULL, NULL) = 4
+recvfrom(8, 0x7f634dcfeff4, 15356, 0, NULL, NULL) = -1 EAGAIN (Resource temporarily unavailable)
+socket(AF_INET, SOCK_STREAM, IPPROTO_TCP) = 9
+fcntl(9, F_SETFL, O_RDONLY|O_NONBLOCK) = 0
+setsockopt(9, SOL_TCP, TCP_NODELAY, [1], 4) = 0
+connect(9, {sa_family=AF_INET, sin_port=htons(8008), sin_addr=inet_addr("10.0.3.82")}, 16) = -1 EINPROGRESS (Operation now in progress)
+sendto(9, "foo\n", 4, MSG_DONTWAIT|MSG_NOSIGNAL, NULL, 0) = -1 EAGAIN (Resource temporarily unavailable)
+epoll_ctl(3, EPOLL_CTL_ADD, 8, {EPOLLIN|EPOLLRDHUP, {u32=8, u64=8}}) = 0
+epoll_ctl(3, EPOLL_CTL_ADD, 9, {EPOLLOUT, {u32=9, u64=9}}) = 0
+epoll_wait(3, [{EPOLLOUT|EPOLLERR|EPOLLHUP, {u32=9, u64=9}}], 200, 1000) = 1
+getsockopt(9, SOL_SOCKET, SO_ERROR, [111], [4]) = 0
+recvfrom(9, "", 15360, 0, NULL, NULL) = 0
+close(9) = 0
+
+
+On a failed connect attempt immediately followed by a failed recv (all flags
+set), we can see this:
+
+socket(AF_INET, SOCK_STREAM, IPPROTO_TCP) = 8
+fcntl(8, F_SETFL, O_RDONLY|O_NONBLOCK) = 0
+setsockopt(8, SOL_TCP, TCP_NODELAY, [1], 4) = 0
+connect(8, {sa_family=AF_INET, sin_port=htons(8008), sin_addr=inet_addr("10.0.3.82")}, 16) = -1 EINPROGRESS (Operation now in progress)
+recvfrom(8, 0x1084a20, 16384, 0, NULL, NULL) = -1 EAGAIN (Resource temporarily unavailable)
+epoll_ctl(3, EPOLL_CTL_ADD, 8, {EPOLLIN|EPOLLOUT|EPOLLRDHUP, {u32=8, u64=8}}) = 0
+epoll_wait(3, [{EPOLLIN|EPOLLOUT|EPOLLERR|EPOLLHUP|EPOLLRDHUP, {u32=8, u64=8}}], 200, 1000) = 1
+connect(8, {sa_family=AF_INET, sin_port=htons(8008), sin_addr=inet_addr("10.0.3.82")}, 16) = -1 ECONNREFUSED (Connection refused)
+close(8) = 0
+
+=> all flags are reported in case of error.
+
+It's also interesting to note that POLLOUT is still reported after a shutw,
+and no send error is ever reported after shutw:
+
+ shutdown(4, SHUT_WR) = 0
+ poll([{fd=4, events=POLLIN|POLLOUT|POLLRDHUP}], 1, 0) = 1 ([{fd=4, revents=POLLOUT}])
+
+and:
+ shutdown(4, SHUT_WR) = 0
+ sendto(5, "foo", 3, MSG_NOSIGNAL, NULL, 0) = 3
+ poll([{fd=4, events=POLLIN|POLLOUT|POLLRDHUP}], 1, 0) = 1 ([{fd=4, revents=POLLIN|POLLOUT}])
+
+and:
+ shutdown(4, SHUT_WR) = 0
+ sendto(4, "bar", 3, MSG_NOSIGNAL, NULL, 0) = -1 EPIPE (Broken pipe)
+ poll([{fd=4, events=POLLIN|POLLOUT|POLLRDHUP}], 1, 0) = 1 ([{fd=4, revents=POLLOUT}])
+
+
+POLLOUT is still reported after a SHUTWR:
+
+socket(AF_INET, SOCK_STREAM, IPPROTO_TCP) = 3
+setsockopt(3, SOL_SOCKET, SO_REUSEADDR, [1], 4) = 0
+bind(3, {sa_family=AF_INET, sin_port=htons(0), sin_addr=inet_addr("0.0.0.0")}, 16) = 0
+listen(3, 1000) = 0
+getsockname(3, {sa_family=AF_INET, sin_port=htons(34729), sin_addr=inet_addr("0.0.0.0")}, [16]) = 0
+socket(AF_INET, SOCK_STREAM, IPPROTO_TCP) = 4
+connect(4, {sa_family=AF_INET, sin_port=htons(34729), sin_addr=inet_addr("0.0.0.0")}, 16) = 0
+accept(3, 0x7ffcd6a68300, [0->16]) = 5
+fstat(1, {st_mode=S_IFCHR|0620, st_rdev=makedev(136, 3), ...}) = 0
+brk(NULL) = 0xc4e000
+brk(0xc6f000) = 0xc6f000
+write(1, "\n", 1
+) = 1
+shutdown(4, SHUT_WR) = 0
+poll([{fd=4, events=POLLIN|POLLOUT|POLLRDHUP}], 1, 0) = 1 ([{fd=4, revents=POLLOUT}])
+write(1, "ret=1 ev={fd:4 ev:4}\n", 21ret=1 ev={fd:4 ev:4}
+) = 21
+close(5) = 0
+close(4) = 0
+close(3) = 0
+
+Performing a write() on it reports a SIGPIPE:
+
+shutdown(4, SHUT_WR) = 0
+sendto(4, "bar", 3, MSG_NOSIGNAL, NULL, 0) = -1 EPIPE (Broken pipe)
+poll([{fd=4, events=POLLIN|POLLOUT|POLLRDHUP}], 1, 0) = 1 ([{fd=4, revents=POLLOUT}])
+
+
+On SHUT_RD we see POLLIN|POLLOUT|POLLRDHUP (there's no data pending here) :
+shutdown(4, SHUT_RD) = 0
+poll([{fd=4, events=POLLIN|POLLOUT|POLLRDHUP}], 1, 0) = 1 ([{fd=4, revents=POLLIN|POLLOUT|POLLRDHUP}])
+
+
+What is observed in the end :
+ - POLLOUT is always reported for anything SHUT_WR even if it would cause a broken pipe, including listeners if they're also SHUT_RD
+ - POLLHUP is always reported for anything SHUT_WR + having a SHUT_RD pending with or without anything to read, including listeners
+ - POLLIN is always reported for anything to read or a pending zero
+ - POLLIN is NOT reported for SHUT_RD listeners, even with pending connections, only OUT+HUP are reported
+ - POLLIN and POLLRDHUP are always reported after a SHUTR
+ - POLLERR also enables IN,OUT,HUP,RHUP
+
+
+
+
+
+
+
+Currently there's a bit of an issue with connect() being too impatient to read:
+
+16:26:06.818521 connect(9, {sa_family=AF_INET, sin_port=htons(8000), sin_addr=inet_addr("127.0.0.1")}, 16) = -1 EINPROGRESS (Operation now in progress)
+16:26:06.818558 recvfrom(9, 0x1db9400, 16320, 0, NULL, NULL) = -1 EAGAIN (Resource temporarily unavailable)
+16:26:06.818571 epoll_ctl(3, EPOLL_CTL_ADD, 9, {EPOLLIN|EPOLLOUT|EPOLLRDHUP, {u32=9, u64=9}}) = 0
+16:26:06.818588 epoll_wait(3, [{EPOLLOUT, {u32=9, u64=9}}], 200, 1000) = 1
+16:26:06.818603 connect(9, {sa_family=AF_INET, sin_port=htons(8000), sin_addr=inet_addr("127.0.0.1")}, 16) = 0
+16:26:06.818617 sendto(9, "GET /?s=10k HTTP/1.1\r\nhost: 127.0.0.1:4445\r\nuser-agent: curl/7.54.1\r\naccept: */*\r\n\r\n", 84, MSG_DONTWAIT|MSG_NOSIGNAL, NULL, 0) = 84
+16:26:06.818660 epoll_ctl(3, EPOLL_CTL_MOD, 9, {EPOLLIN|EPOLLRDHUP, {u32=9, u64=9}}) = 0
+16:26:06.818696 epoll_wait(3, [{EPOLLIN, {u32=9, u64=9}}], 200, 1000) = 1
+16:26:06.818711 recvfrom(9, "HTTP/1.1 200\r\nContent-length: 10240\r\nX-req: size=84, time=0 ms\r\nX-rsp: id=dummy, code=200, cache=1, size=10240, time=0 ms (0 real)\r\n\r\n89.123456789.12345678\n.123456789.123456789.123456789.123456789.123"..., 16320, 0, NULL, NULL) = 10374
+16:26:06.818735 recvfrom(9, 0x1dd75f6, 5946, 0, NULL, NULL) = -1 EAGAIN (Resource temporarily unavailable)
+16:26:06.818790 epoll_ctl(3, EPOLL_CTL_DEL, 9, 0x7ffa818fd7d0) = 0
+16:26:06.818804 epoll_wait(3, [], 200, 0) = 0
+
+
+
+
+This one shows that the error is not definitive, it disappears once it's
+been signaled, then only shut remains! Also it's a proof that an error
+may well be reported after a shutw, so the r/w error may not be merged
+with a shutw since it may appear after an deliberate shutw.
+
+$ ./contrib/debug/poll -v -c snd,shw -s pol,rcv,pol,rcv,pol,snd,lin,clo -c pol,rcv,pol,rcv,pol,rcv,pol
+#### BEGIN ####
+cmd #1 stp #1: do_snd(4): ret=3
+cmd #1 stp #2: do_shw(4): ret=0
+cmd #2 stp #0: do_acc(3): ret=5
+cmd #2 stp #1: do_pol(5): ret=1 ev=0x2005 (IN OUT RDHUP)
+cmd #2 stp #2: do_rcv(5): ret=3
+cmd #2 stp #3: do_pol(5): ret=1 ev=0x2005 (IN OUT RDHUP)
+cmd #2 stp #4: do_rcv(5): ret=0
+cmd #2 stp #5: do_pol(5): ret=1 ev=0x2005 (IN OUT RDHUP)
+cmd #2 stp #6: do_snd(5): ret=3
+cmd #2 stp #7: do_lin(5): ret=0
+cmd #2 stp #8: do_clo(5): ret=0
+cmd #3 stp #1: do_pol(4): ret=1 ev=0x201d (IN OUT ERR HUP RDHUP)
+cmd #3 stp #2: do_rcv(4): ret=3
+cmd #3 stp #3: do_pol(4): ret=1 ev=0x201d (IN OUT ERR HUP RDHUP)
+cmd #3 stp #4: do_rcv(4): ret=-1 (Connection reset by peer)
+cmd #3 stp #5: do_pol(4): ret=1 ev=0x2015 (IN OUT HUP RDHUP)
+cmd #3 stp #6: do_rcv(4): ret=0
+cmd #3 stp #7: do_pol(4): ret=1 ev=0x2015 (IN OUT HUP RDHUP)
+#### END ####
diff --git a/doc/internals/notes-polling.txt b/doc/internals/notes-polling.txt
new file mode 100644
index 0000000..e7741a6
--- /dev/null
+++ b/doc/internals/notes-polling.txt
@@ -0,0 +1,192 @@
+2019-09-03
+
+u8 fd.state;
+u8 fd.ev;
+
+
+ev = one of :
+ #define FD_POLL_IN 0x01
+ #define FD_POLL_PRI 0x02
+ #define FD_POLL_OUT 0x04
+ #define FD_POLL_ERR 0x08
+ #define FD_POLL_HUP 0x10
+
+Could we instead have :
+
+ FD_WAIT_IN 0x01
+ FD_WAIT_OUT 0x02
+ FD_WAIT_PRI 0x04
+ FD_SEEN_HUP 0x08
+ FD_SEEN_HUP 0x10
+ FD_WAIT_CON 0x20 <<= shouldn't this be in the connection itself in fact ?
+
+=> not needed, covered by the state instead.
+
+What is missing though is :
+ - FD_DATA_PENDING -- overlaps with READY_R, OK if passed by pollers only
+ - FD_EOI_PENDING
+ - FD_ERR_PENDING
+ - FD_EOI
+ - FD_SHW
+ - FD_ERR
+
+fd_update_events() could do that :
+
+ if ((fd_data_pending|fd_eoi_pending|fd_err_pending) && !(fd_err|fd_eoi))
+ may_recv()
+
+ if (fd_send_ok && !(fd_err|fd_shw))
+ may_send()
+
+ if (fd_err)
+ wake()
+
+the poller could do that :
+ HUP+OUT => always indicates a failed connect(), it should not lack ERR. Is this err_pending ?
+
+ ERR HUP OUT IN
+ 0 0 0 0 => nothing
+ 0 0 0 1 => FD_DATA_PENDING
+ 0 0 1 0 => FD_SEND_OK
+ 0 0 1 1 => FD_DATA_PENDING|FD_SEND_OK
+ 0 1 0 0 => FD_EOI (|FD_SHW)
+ 0 1 0 1 => FD_DATA_PENDING|FD_EOI_PENDING (|FD_SHW)
+ 0 1 1 0 => FD_EOI |FD_ERR (|FD_SHW)
+ 0 1 1 1 => FD_EOI_PENDING (|FD_ERR_PENDING) |FD_DATA_PENDING (|FD_SHW)
+ 1 X 0 0 => FD_ERR | FD_EOI (|FD_SHW)
+ 1 X X 1 => FD_ERR_PENDING | FD_EOI_PENDING | FD_DATA_PENDING (|FD_SHW)
+ 1 X 1 0 => FD_ERR | FD_EOI (|FD_SHW)
+
+ OUT+HUP,OUT+HUP+ERR => FD_ERR
+
+This reorders to:
+
+ IN ERR HUP OUT
+ 0 0 0 0 => nothing
+ 0 0 0 1 => FD_SEND_OK
+ 0 0 1 0 => FD_EOI (|FD_SHW)
+
+ 0 X 1 1 => FD_ERR | FD_EOI (|FD_SHW)
+ 0 1 X 0 => FD_ERR | FD_EOI (|FD_SHW)
+ 0 1 X 1 => FD_ERR | FD_EOI (|FD_SHW)
+
+ 1 0 0 0 => FD_DATA_PENDING
+ 1 0 0 1 => FD_DATA_PENDING|FD_SEND_OK
+ 1 0 1 0 => FD_DATA_PENDING|FD_EOI_PENDING (|FD_SHW)
+ 1 0 1 1 => FD_EOI_PENDING (|FD_ERR_PENDING) |FD_DATA_PENDING (|FD_SHW)
+ 1 1 X X => FD_ERR_PENDING | FD_EOI_PENDING | FD_DATA_PENDING (|FD_SHW)
+
+Regarding "|SHW", it's normally useless since it will already have been done,
+except on connect() error where this indicates there's no need for SHW.
+
+FD_EOI and FD_SHW could be part of the state (FD_EV_SHUT_R, FD_EV_SHUT_W).
+Then all states having these bit and another one would be transient and need
+to resync. We could then have "fd_shut_recv" and "fd_shut_send" to turn these
+states.
+
+The FD's ev then only needs to update EOI_PENDING, ERR_PENDING, ERR, DATA_PENDING.
+With this said, these are not exactly polling states either, as err/eoi/shw are
+orthogonal to the other states and are required to update them so that the polling
+state really is DISABLED in the end. So we need more of an operational status for
+the FD containing EOI_PENDING, EOI, ERR_PENDING, ERR, SHW, CLO?. These could be
+classified in 3 categories: read:(OPEN, EOI_PENDING, EOI); write:(OPEN,SHW),
+ctrl:(OPEN,ERR_PENDING,ERR,CLO). That would be 2 bits for R, 1 for W, 2 for ctrl
+or total 5 vs 6 for individual ones, but would be harder to manipulate.
+
+Proposal:
+ - rename fdtab[].state to "polling_state"
+ - rename fdtab[].ev to "status"
+
+Note: POLLHUP is also reported is a listen() socket has gone in shutdown()
+TEMPORARILY! Thus we may not always consider this as a final error.
+
+
+Work hypothesis:
+
+SHUT RDY ACT
+ 0 0 0 => disabled
+ 0 0 1 => active
+ 0 1 0 => stopped
+ 0 1 1 => ready
+ 1 0 0 => final shut
+ 1 0 1 => shut pending without data
+ 1 1 0 => shut pending, stopped
+ 1 1 1 => shut pending
+
+PB: we can land into final shut if one thread disables the FD while another
+ one that was waiting on it reports it as shut. Theorically it should be
+ implicitly ready though, since reported. But if no data is reported, it
+ will be reportedly shut only. And no event will be reported then. This
+ might still make sense since it's not active, thus we don't want events.
+ But it will not be enabled later either in this case so the shut really
+ risks not to be properly reported. The issue is that there's no difference
+ between a shut coming from the bottom and a shut coming from the top, and
+ we need an event to report activity here. Or we may consider that a poller
+ never leaves a final shut by itself (100) and always reports it as
+ shut+stop (thus ready) if it was not active. Alternately, if active is
+ disabled, shut should possibly be ignored, then a poller cannot report
+ shut. But shut+stopped seems the most suitable as it corresponds to
+ disabled->stopped transition.
+
+Now let's add ERR. ERR necessarily implies SHUT as there doesn't seem to be a
+valid case of ERR pending without shut pending.
+
+ERR SHUT RDY ACT
+ 0 0 0 0 => disabled
+ 0 0 0 1 => active
+ 0 0 1 0 => stopped
+ 0 0 1 1 => ready
+
+ 0 1 0 0 => final shut, no error
+ 0 1 0 1 => shut pending without data
+ 0 1 1 0 => shut pending, stopped
+ 0 1 1 1 => shut pending
+
+ 1 0 X X => invalid
+
+ 1 1 0 0 => final shut, error encountered
+ 1 1 0 1 => error pending without data
+ 1 1 1 0 => error pending after data, stopped
+ 1 1 1 1 => error pending
+
+So the algorithm for the poller is:
+ - if (shutdown_pending or error) reported and ACT==0,
+ report SHUT|RDY or SHUT|ERR|RDY
+
+For read handlers :
+ - if (!(flags & (RDY|ACT)))
+ return
+ - if (ready)
+ try_to_read
+ - if (err)
+ report error
+ - if (shut)
+ read0
+
+For write handlers:
+ - if (!(flags & (RDY|ACT)))
+ return
+ - if (err||shut)
+ report error
+ - if (ready)
+ try_to_write
+
+For listeners:
+ - if (!(flags & (RDY|ACT)))
+ return
+ - if (err||shut)
+ pause
+ - if (ready)
+ try_to_accept
+
+Kqueue reports events differently, it says EV_EOF() on READ or WRITE, that
+we currently map to FD_POLL_HUP and FD_POLL_ERR. Thus kqueue reports only
+POLLRDHUP and not POLLHUP, so for now a direct mapping of POLLHUP to
+FD_POLL_HUP does NOT imply write closed with kqueue while it does for others.
+
+Other approach, use the {RD,WR}_{ERR,SHUT,RDY} flags to build a composite
+status in each poller and pass this to fd_update_events(). We normally
+have enough to be precise, and this latter will rework the events.
+
+FIXME: Normally on KQUEUE we're supposed to look at kev[].fflags to get the error
+on EV_EOF() on read or write.
diff --git a/doc/internals/pattern.dia b/doc/internals/pattern.dia
new file mode 100644
index 0000000..3d13215
--- /dev/null
+++ b/doc/internals/pattern.dia
Binary files differ
diff --git a/doc/internals/pattern.pdf b/doc/internals/pattern.pdf
new file mode 100644
index 0000000..a8d8bc9
--- /dev/null
+++ b/doc/internals/pattern.pdf
Binary files differ
diff --git a/doc/internals/polling-states.fig b/doc/internals/polling-states.fig
new file mode 100644
index 0000000..3b2c782
--- /dev/null
+++ b/doc/internals/polling-states.fig
@@ -0,0 +1,59 @@
+#FIG 3.2 Produced by xfig version 2.3
+Portrait
+Center
+Metric
+A4
+100.00
+Single
+-2
+1200 2
+2 1 0 1 0 7 50 -1 -1 0.000 1 0 -1 1 0 2
+ 1 1 1.00 90.00 180.00
+ 1125 1350 1125 1800
+2 1 0 1 0 7 50 -1 -1 0.000 1 0 -1 1 0 2
+ 1 1 1.00 90.00 180.00
+ 1125 2250 1125 2700
+2 1 0 1 0 7 50 -1 -1 0.000 1 0 -1 1 0 2
+ 1 1 1.00 90.00 180.00
+ 1125 3150 1125 3600
+2 1 0 1 0 7 50 -1 -1 0.000 1 0 -1 1 0 2
+ 1 1 1.00 90.00 180.00
+ 1575 1800 1575 1350
+2 1 0 1 0 7 50 -1 -1 0.000 1 0 -1 1 0 2
+ 1 1 1.00 90.00 180.00
+ 1575 3600 1575 3150
+2 4 0 1 0 7 51 -1 20 0.000 0 0 7 0 0 5
+ 1800 1350 900 1350 900 900 1800 900 1800 1350
+2 4 0 1 0 7 51 -1 20 0.000 0 0 7 0 0 5
+ 1800 2250 900 2250 900 1800 1800 1800 1800 2250
+2 4 0 1 0 7 51 -1 20 0.000 0 0 7 0 0 5
+ 1800 4050 900 4050 900 3600 1800 3600 1800 4050
+2 4 0 1 0 7 51 -1 20 0.000 0 0 7 0 0 5
+ 1800 3150 900 3150 900 2700 1800 2700 1800 3150
+2 1 0 1 0 7 50 -1 -1 0.000 1 0 -1 1 0 2
+ 1 1 1.00 90.00 180.00
+ 1350 450 1350 900
+2 1 0 1 0 7 50 -1 -1 0.000 1 0 -1 1 0 2
+ 1 1 1.00 90.00 180.00
+ 1575 2700 1575 2250
+4 2 0 50 -1 16 8 0.0000 4 105 270 1080 1485 want\001
+4 2 0 50 -1 16 8 0.0000 4 120 255 1035 3285 stop\001
+4 0 0 50 -1 16 8 0.0000 4 105 270 1665 3510 want\001
+4 1 0 50 -1 16 10 0.0000 4 120 735 1350 1080 STOPPED\001
+4 1 0 50 -1 16 10 0.0000 4 120 795 1350 3780 DISABLED\001
+4 1 0 50 -1 16 10 0.0000 4 120 555 1350 2880 ACTIVE\001
+4 1 0 50 -1 16 10 0.0000 4 120 540 1350 1980 READY\001
+4 0 0 50 -1 16 8 0.0000 4 90 210 1665 2565 may\001
+4 2 0 50 -1 16 8 0.0000 4 105 240 1035 2430 cant\001
+4 1 0 50 -1 16 8 0.0000 4 120 240 1350 1260 R,!A\001
+4 1 0 50 -1 16 8 0.0000 4 120 210 1350 2160 R,A\001
+4 1 0 50 -1 16 8 0.0000 4 120 240 1350 3060 !R,A\001
+4 1 0 50 -1 16 8 0.0000 4 120 270 1350 3960 !R,!A\001
+4 0 0 50 -1 16 8 0.0000 4 120 255 1665 1710 stop\001
+4 0 0 50 -1 16 10 0.0000 4 150 855 2520 1125 R=ready flag\001
+4 0 0 50 -1 16 10 0.0000 4 150 885 2520 1290 A=active flag\001
+4 0 0 50 -1 16 10 0.0000 4 150 1365 2520 2475 fd_want sets A flag\001
+4 0 0 50 -1 16 10 0.0000 4 150 1440 2520 2640 fd_stop clears A flag\001
+4 0 0 50 -1 16 10 0.0000 4 150 1905 2520 3300 update() updates the poller.\001
+4 0 0 50 -1 16 10 0.0000 4 150 2190 2520 2970 fd_cant clears R flag (EAGAIN)\001
+4 0 0 50 -1 16 10 0.0000 4 150 2115 2520 3135 fd_rdy sets R flag (poll return)\001
diff --git a/doc/internals/sched.fig b/doc/internals/sched.fig
new file mode 100644
index 0000000..4134420
--- /dev/null
+++ b/doc/internals/sched.fig
@@ -0,0 +1,748 @@
+#FIG 3.2 Produced by xfig version 2.4
+Landscape
+Center
+Metric
+A4
+150.00
+Single
+-2
+1200 2
+0 32 #c5ebe1
+0 33 #86c8a2
+0 34 #ffebac
+0 35 #cbb366
+0 36 #c7b696
+0 37 #effbff
+0 38 #dfcba6
+0 39 #414141
+0 40 #aeaaae
+0 41 #595559
+0 42 #414141
+0 43 #868286
+0 44 #bec3be
+0 45 #868286
+0 46 #bec3be
+0 47 #dfe3df
+0 48 #8e8e8e
+0 49 #8e8e8e
+0 50 #414141
+0 51 #868286
+0 52 #bec3be
+0 53 #dfe3df
+0 54 #414141
+0 55 #868286
+0 56 #bec3be
+0 57 #dfe3df
+0 58 #868286
+0 59 #bec3be
+0 60 #dfe3df
+0 61 #c7b696
+0 62 #effbff
+0 63 #dfcba6
+0 64 #c7b696
+0 65 #effbff
+0 66 #dfcba6
+0 67 #aeaaae
+0 68 #595559
+0 69 #8e8e8e
+0 70 #414141
+0 71 #868286
+0 72 #bec3be
+0 73 #dfe3df
+0 74 #414141
+0 75 #868286
+0 76 #bec3be
+0 77 #dfe3df
+0 78 #868286
+0 79 #bec3be
+0 80 #dfe3df
+0 81 #414141
+0 82 #868286
+0 83 #bec3be
+0 84 #414141
+0 85 #bec3be
+0 86 #dfe3df
+0 87 #414141
+0 88 #868286
+0 89 #bec3be
+0 90 #8e8e8e
+0 91 #414141
+0 92 #868286
+0 93 #bec3be
+0 94 #dfe3df
+0 95 #414141
+0 96 #868286
+0 97 #bec3be
+0 98 #dfe3df
+0 99 #bebebe
+0 100 #515151
+0 101 #e7e3e7
+0 102 #000049
+0 103 #797979
+0 104 #303430
+0 105 #414541
+0 106 #414141
+0 107 #868286
+0 108 #bec3be
+0 109 #dfe3df
+0 110 #cfcfcf
+0 111 #cfcfcf
+0 112 #cfcfcf
+0 113 #cfcfcf
+0 114 #cfcfcf
+0 115 #cfcfcf
+0 116 #cfcfcf
+0 117 #cfcfcf
+0 118 #cfcfcf
+0 119 #cfcfcf
+0 120 #cfcfcf
+0 121 #cfcfcf
+0 122 #cfcfcf
+0 123 #cfcfcf
+0 124 #cfcfcf
+0 125 #cfcfcf
+0 126 #cfcfcf
+0 127 #cfcfcf
+0 128 #cfcfcf
+0 129 #cfcfcf
+0 130 #cfcfcf
+0 131 #cfcfcf
+0 132 #cfcfcf
+0 133 #cfcfcf
+0 134 #cfcfcf
+0 135 #cfcfcf
+0 136 #cfcfcf
+0 137 #cfcfcf
+0 138 #cfcfcf
+0 139 #cfcfcf
+0 140 #cfcfcf
+0 141 #cfcfcf
+0 142 #cfcfcf
+0 143 #cfcfcf
+0 144 #cfcfcf
+0 145 #cfcfcf
+0 146 #cfcfcf
+0 147 #cfcfcf
+0 148 #cfcfcf
+0 149 #cfcfcf
+0 150 #c7c3c7
+0 151 #868286
+0 152 #bec3be
+0 153 #dfe3df
+0 154 #8e8e8e
+0 155 #8e8e8e
+0 156 #494549
+0 157 #868686
+0 158 #c7c7c7
+0 159 #e7e7e7
+0 160 #f7f7f7
+0 161 #9e9e9e
+0 162 #717571
+0 163 #aeaaae
+0 164 #494549
+0 165 #aeaaae
+0 166 #595559
+0 167 #bec3be
+0 168 #dfe3df
+0 169 #494549
+0 170 #616561
+0 171 #494549
+0 172 #868286
+0 173 #bec3be
+0 174 #dfe3df
+0 175 #bec3be
+0 176 #dfe3df
+0 177 #c7b696
+0 178 #effbff
+0 179 #dfcba6
+0 180 #414141
+0 181 #868286
+0 182 #bec3be
+0 183 #dfe3df
+0 184 #8e8e8e
+0 185 #aeaaae
+0 186 #595559
+0 187 #414141
+0 188 #868286
+0 189 #bec3be
+0 190 #868286
+0 191 #bec3be
+0 192 #dfe3df
+0 193 #8e8e8e
+0 194 #8e8e8e
+0 195 #414141
+0 196 #868286
+0 197 #bec3be
+0 198 #dfe3df
+0 199 #414141
+0 200 #868286
+0 201 #bec3be
+0 202 #dfe3df
+0 203 #868286
+0 204 #bec3be
+0 205 #dfe3df
+0 206 #c7b696
+0 207 #effbff
+0 208 #dfcba6
+0 209 #c7b696
+0 210 #effbff
+0 211 #dfcba6
+0 212 #aeaaae
+0 213 #595559
+0 214 #8e8e8e
+0 215 #414141
+0 216 #868286
+0 217 #bec3be
+0 218 #dfe3df
+0 219 #414141
+0 220 #868286
+0 221 #bec3be
+0 222 #dfe3df
+0 223 #868286
+0 224 #bec3be
+0 225 #dfe3df
+0 226 #414141
+0 227 #868286
+0 228 #bec3be
+0 229 #414141
+0 230 #bec3be
+0 231 #dfe3df
+0 232 #414141
+0 233 #868286
+0 234 #bec3be
+0 235 #8e8e8e
+0 236 #414141
+0 237 #868286
+0 238 #bec3be
+0 239 #dfe3df
+0 240 #414141
+0 241 #868286
+0 242 #bec3be
+0 243 #dfe3df
+0 244 #414141
+0 245 #868286
+0 246 #bec3be
+0 247 #dfe3df
+0 248 #868286
+0 249 #bec3be
+0 250 #dfe3df
+0 251 #8e8e8e
+0 252 #8e8e8e
+0 253 #494549
+0 254 #aeaaae
+0 255 #494549
+0 256 #aeaaae
+0 257 #595559
+0 258 #bec3be
+0 259 #dfe3df
+0 260 #494549
+0 261 #616561
+0 262 #494549
+0 263 #868286
+0 264 #bec3be
+0 265 #dfe3df
+0 266 #bec3be
+0 267 #dfe3df
+0 268 #dfe3ef
+0 269 #96969e
+0 270 #d7dbd7
+0 271 #9ea2b6
+0 272 #9e0000
+0 273 #efefef
+0 274 #86aeff
+0 275 #7171ff
+0 276 #bbf2e2
+0 277 #a7ceb3
+0 278 #dae8fc
+0 279 #458dba
+0 280 #ffe6cc
+0 281 #e9b000
+0 282 #1a1a1a
+0 283 #ffc1e7
+0 284 #009ed7
+0 285 #006d9e
+0 286 #00719e
+0 287 #9e9a9e
+0 288 #000000
+0 289 #595959
+0 290 #006596
+0 291 #00a6d7
+0 292 #b6b6b6
+0 293 #8edbef
+0 294 #00699e
+0 295 #595d59
+0 296 #69d3e7
+0 297 #a6e3ef
+0 298 #9ec7d7
+0 299 #aeb2ae
+0 300 #00b6df
+0 301 #00aed7
+0 302 #797d79
+0 303 #00a2d7
+0 304 #303030
+0 305 #006996
+0 306 #086d9e
+0 307 #86b6cf
+0 308 #f7fbf7
+0 309 #9ec3d7
+0 310 #ffff96
+0 311 #ff600a
+5 1 0 2 0 7 50 -1 -1 0.000 0 0 1 0 11301.000 3060.000 11205 3825 10530 3060 11205 2295
+ 0 0 1.00 60.00 120.00
+5 1 0 2 0 7 50 -1 -1 0.000 0 1 0 1 11289.000 3060.000 11385 3825 12060 3060 11385 2295
+ 0 0 1.00 60.00 120.00
+5 1 0 2 0 7 50 -1 -1 0.000 0 1 0 1 11293.750 3060.000 10890 3105 11700 3060 10890 3015
+ 2 1 1.00 60.00 120.00
+5 1 0 2 0 7 50 -1 -1 0.000 0 0 1 0 7611.000 3060.000 7515 3825 6840 3060 7515 2295
+ 0 0 1.00 60.00 120.00
+5 1 0 2 0 7 50 -1 -1 0.000 0 1 0 1 7599.000 3060.000 7695 3825 8370 3060 7695 2295
+ 0 0 1.00 60.00 120.00
+5 1 0 2 0 7 50 -1 -1 0.000 0 1 0 1 7603.750 3060.000 7200 3105 8010 3060 7200 3015
+ 2 1 1.00 60.00 120.00
+5 1 0 2 0 7 50 -1 -1 0.000 0 0 1 0 4956.000 3060.000 4860 3825 4185 3060 4860 2295
+ 0 0 1.00 60.00 120.00
+5 1 0 2 0 7 50 -1 -1 0.000 0 1 0 1 4944.000 3060.000 5040 3825 5715 3060 5040 2295
+ 0 0 1.00 60.00 120.00
+5 1 0 2 0 7 50 -1 -1 0.000 0 1 0 1 4948.750 3060.000 4545 3105 5355 3060 4545 3015
+ 2 1 1.00 60.00 120.00
+5 1 0 2 0 7 50 -1 -1 0.000 0 0 1 0 1266.000 3060.000 1170 3825 495 3060 1170 2295
+ 0 0 1.00 60.00 120.00
+5 1 0 2 0 7 50 -1 -1 0.000 0 1 0 1 1254.000 3060.000 1350 3825 2025 3060 1350 2295
+ 0 0 1.00 60.00 120.00
+5 1 0 2 0 7 50 -1 -1 0.000 0 1 0 1 1258.750 3060.000 855 3105 1665 3060 855 3015
+ 2 1 1.00 60.00 120.00
+6 10606 2371 11985 3749
+2 3 0 2 0 31 50 -1 20 0.000 0 0 -1 0 0 15
+ 11768 3060 11970 3060 11967 3119 11959 3177 11946 3234 11929 3291
+ 11907 3345 11879 3397 11704 3296 11723 3259 11738 3222 11751 3182
+ 11760 3142 11765 3101 11768 3060
+2 3 0 2 0 31 50 -1 20 0.000 0 0 -1 0 0 15
+ 11704 3296 11879 3397 11848 3447 11812 3494 11772 3537 11729 3577
+ 11682 3613 11633 3644 11531 3469 11566 3447 11599 3422 11628 3393
+ 11657 3364 11682 3331 11704 3296
+2 3 0 2 0 31 50 -1 20 0.000 0 0 -1 0 0 15
+ 11531 3469 11633 3644 11580 3672 11526 3694 11469 3711 11412 3724
+ 11354 3732 11295 3734 11295 3532 11336 3530 11377 3525 11417 3516
+ 11457 3503 11494 3488 11531 3469
+2 3 0 2 0 31 50 -1 20 0.000 0 0 -1 0 0 15
+ 11295 3532 11295 3734 11236 3732 11178 3724 11121 3711 11064 3694
+ 11010 3672 10958 3644 11059 3469 11096 3488 11133 3503 11173 3516
+ 11213 3525 11254 3530 11295 3532
+2 3 0 2 0 31 50 -1 20 0.000 0 0 -1 0 0 15
+ 11059 3469 10958 3644 10908 3613 10861 3577 10818 3537 10778 3494
+ 10742 3447 10711 3398 10886 3296 10908 3331 10933 3364 10962 3393
+ 10991 3422 11024 3447 11059 3469
+2 3 0 2 0 31 50 -1 20 0.000 0 0 -1 0 0 15
+ 10886 3296 10711 3398 10683 3345 10661 3291 10644 3234 10631 3177
+ 10623 3119 10621 3060 10823 3060 10825 3101 10830 3142 10839 3182
+ 10852 3222 10867 3259 10886 3296
+2 3 0 2 0 31 50 -1 20 0.000 0 0 -1 0 0 15
+ 10823 3060 10621 3060 10623 3001 10631 2943 10644 2886 10661 2829
+ 10683 2775 10711 2723 10886 2824 10867 2861 10852 2898 10839 2938
+ 10830 2978 10825 3019 10823 3060
+2 3 0 2 0 31 50 -1 20 0.000 0 0 -1 0 0 15
+ 10886 2824 10711 2723 10742 2673 10778 2626 10818 2583 10861 2543
+ 10908 2507 10958 2476 11059 2651 11024 2673 10991 2698 10962 2727
+ 10933 2756 10908 2789 10886 2824
+2 3 0 2 0 31 50 -1 20 0.000 0 0 -1 0 0 15
+ 11059 2651 10958 2476 11010 2448 11064 2426 11121 2409 11178 2396
+ 11236 2388 11295 2386 11295 2588 11254 2590 11213 2595 11173 2604
+ 11133 2617 11096 2632 11059 2651
+2 3 0 2 0 31 50 -1 20 0.000 0 0 -1 0 0 15
+ 11295 2588 11295 2386 11354 2388 11412 2396 11469 2409 11526 2426
+ 11580 2448 11632 2476 11531 2651 11494 2632 11457 2617 11417 2604
+ 11377 2595 11336 2590 11295 2588
+2 3 0 2 0 31 50 -1 20 0.000 0 0 -1 0 0 15
+ 11531 2651 11632 2476 11682 2507 11729 2543 11772 2583 11812 2626
+ 11848 2673 11879 2723 11704 2824 11682 2789 11657 2756 11628 2727
+ 11599 2698 11566 2673 11531 2651
+2 3 0 2 0 31 50 -1 20 0.000 0 0 -1 0 0 15
+ 11704 2824 11879 2723 11907 2775 11929 2829 11946 2886 11959 2943
+ 11967 3001 11969 3060 11767 3060 11765 3019 11760 2978 11751 2938
+ 11738 2898 11723 2861 11704 2824
+-6
+6 4261 2371 5640 3749
+2 3 0 2 0 13 50 -1 20 0.000 0 0 -1 0 0 15
+ 5423 3060 5625 3060 5622 3119 5614 3177 5601 3234 5584 3291
+ 5562 3345 5534 3397 5359 3296 5378 3259 5393 3222 5406 3182
+ 5415 3142 5420 3101 5423 3060
+2 3 0 2 0 13 50 -1 20 0.000 0 0 -1 0 0 15
+ 5359 3296 5534 3397 5503 3447 5467 3494 5427 3537 5384 3577
+ 5337 3613 5288 3644 5186 3469 5221 3447 5254 3422 5283 3393
+ 5312 3364 5337 3331 5359 3296
+2 3 0 2 0 13 50 -1 20 0.000 0 0 -1 0 0 15
+ 5186 3469 5288 3644 5235 3672 5181 3694 5124 3711 5067 3724
+ 5009 3732 4950 3734 4950 3532 4991 3530 5032 3525 5072 3516
+ 5112 3503 5149 3488 5186 3469
+2 3 0 2 0 13 50 -1 20 0.000 0 0 -1 0 0 15
+ 4950 3532 4950 3734 4891 3732 4833 3724 4776 3711 4719 3694
+ 4665 3672 4613 3644 4714 3469 4751 3488 4788 3503 4828 3516
+ 4868 3525 4909 3530 4950 3532
+2 3 0 2 0 13 50 -1 20 0.000 0 0 -1 0 0 15
+ 4714 3469 4613 3644 4563 3613 4516 3577 4473 3537 4433 3494
+ 4397 3447 4366 3398 4541 3296 4563 3331 4588 3364 4617 3393
+ 4646 3422 4679 3447 4714 3469
+2 3 0 2 0 13 50 -1 20 0.000 0 0 -1 0 0 15
+ 4541 3296 4366 3398 4338 3345 4316 3291 4299 3234 4286 3177
+ 4278 3119 4276 3060 4478 3060 4480 3101 4485 3142 4494 3182
+ 4507 3222 4522 3259 4541 3296
+2 3 0 2 0 13 50 -1 20 0.000 0 0 -1 0 0 15
+ 4478 3060 4276 3060 4278 3001 4286 2943 4299 2886 4316 2829
+ 4338 2775 4366 2723 4541 2824 4522 2861 4507 2898 4494 2938
+ 4485 2978 4480 3019 4478 3060
+2 3 0 2 0 13 50 -1 20 0.000 0 0 -1 0 0 15
+ 4541 2824 4366 2723 4397 2673 4433 2626 4473 2583 4516 2543
+ 4563 2507 4613 2476 4714 2651 4679 2673 4646 2698 4617 2727
+ 4588 2756 4563 2789 4541 2824
+2 3 0 2 0 13 50 -1 20 0.000 0 0 -1 0 0 15
+ 4714 2651 4613 2476 4665 2448 4719 2426 4776 2409 4833 2396
+ 4891 2388 4950 2386 4950 2588 4909 2590 4868 2595 4828 2604
+ 4788 2617 4751 2632 4714 2651
+2 3 0 2 0 13 50 -1 20 0.000 0 0 -1 0 0 15
+ 4950 2588 4950 2386 5009 2388 5067 2396 5124 2409 5181 2426
+ 5235 2448 5287 2476 5186 2651 5149 2632 5112 2617 5072 2604
+ 5032 2595 4991 2590 4950 2588
+2 3 0 2 0 13 50 -1 20 0.000 0 0 -1 0 0 15
+ 5186 2651 5287 2476 5337 2507 5384 2543 5427 2583 5467 2626
+ 5503 2673 5534 2723 5359 2824 5337 2789 5312 2756 5283 2727
+ 5254 2698 5221 2673 5186 2651
+2 3 0 2 0 13 50 -1 20 0.000 0 0 -1 0 0 15
+ 5359 2824 5534 2723 5562 2775 5584 2829 5601 2886 5614 2943
+ 5622 3001 5624 3060 5422 3060 5420 3019 5415 2978 5406 2938
+ 5393 2898 5378 2861 5359 2824
+-6
+6 2250 4815 3960 5265
+1 1 0 3 8 11 52 -1 20 0.000 1 0.0000 3105 5049 810 171 3105 5049 3915 5049
+4 1 0 50 -1 6 10 0.0000 4 150 1125 3105 5130 Most Urgent\001
+-6
+6 6916 2371 8295 3749
+2 3 0 2 0 31 50 -1 43 0.000 0 0 -1 0 0 15
+ 8078 3060 8280 3060 8277 3119 8269 3177 8256 3234 8239 3291
+ 8217 3345 8189 3397 8014 3296 8033 3259 8048 3222 8061 3182
+ 8070 3142 8075 3101 8078 3060
+2 3 0 2 0 31 50 -1 43 0.000 0 0 -1 0 0 15
+ 8014 3296 8189 3397 8158 3447 8122 3494 8082 3537 8039 3577
+ 7992 3613 7943 3644 7841 3469 7876 3447 7909 3422 7938 3393
+ 7967 3364 7992 3331 8014 3296
+2 3 0 2 0 31 50 -1 43 0.000 0 0 -1 0 0 15
+ 7841 3469 7943 3644 7890 3672 7836 3694 7779 3711 7722 3724
+ 7664 3732 7605 3734 7605 3532 7646 3530 7687 3525 7727 3516
+ 7767 3503 7804 3488 7841 3469
+2 3 0 2 0 31 50 -1 43 0.000 0 0 -1 0 0 15
+ 7605 3532 7605 3734 7546 3732 7488 3724 7431 3711 7374 3694
+ 7320 3672 7268 3644 7369 3469 7406 3488 7443 3503 7483 3516
+ 7523 3525 7564 3530 7605 3532
+2 3 0 2 0 31 50 -1 43 0.000 0 0 -1 0 0 15
+ 7369 3469 7268 3644 7218 3613 7171 3577 7128 3537 7088 3494
+ 7052 3447 7021 3398 7196 3296 7218 3331 7243 3364 7272 3393
+ 7301 3422 7334 3447 7369 3469
+2 3 0 2 0 31 50 -1 43 0.000 0 0 -1 0 0 15
+ 7196 3296 7021 3398 6993 3345 6971 3291 6954 3234 6941 3177
+ 6933 3119 6931 3060 7133 3060 7135 3101 7140 3142 7149 3182
+ 7162 3222 7177 3259 7196 3296
+2 3 0 2 0 31 50 -1 43 0.000 0 0 -1 0 0 15
+ 7133 3060 6931 3060 6933 3001 6941 2943 6954 2886 6971 2829
+ 6993 2775 7021 2723 7196 2824 7177 2861 7162 2898 7149 2938
+ 7140 2978 7135 3019 7133 3060
+2 3 0 2 0 31 50 -1 43 0.000 0 0 -1 0 0 15
+ 7196 2824 7021 2723 7052 2673 7088 2626 7128 2583 7171 2543
+ 7218 2507 7268 2476 7369 2651 7334 2673 7301 2698 7272 2727
+ 7243 2756 7218 2789 7196 2824
+2 3 0 2 0 31 50 -1 43 0.000 0 0 -1 0 0 15
+ 7369 2651 7268 2476 7320 2448 7374 2426 7431 2409 7488 2396
+ 7546 2388 7605 2386 7605 2588 7564 2590 7523 2595 7483 2604
+ 7443 2617 7406 2632 7369 2651
+2 3 0 2 0 31 50 -1 43 0.000 0 0 -1 0 0 15
+ 7605 2588 7605 2386 7664 2388 7722 2396 7779 2409 7836 2426
+ 7890 2448 7942 2476 7841 2651 7804 2632 7767 2617 7727 2604
+ 7687 2595 7646 2590 7605 2588
+2 3 0 2 0 31 50 -1 43 0.000 0 0 -1 0 0 15
+ 7841 2651 7942 2476 7992 2507 8039 2543 8082 2583 8122 2626
+ 8158 2673 8189 2723 8014 2824 7992 2789 7967 2756 7938 2727
+ 7909 2698 7876 2673 7841 2651
+2 3 0 2 0 31 50 -1 43 0.000 0 0 -1 0 0 15
+ 8014 2824 8189 2723 8217 2775 8239 2829 8256 2886 8269 2943
+ 8277 3001 8279 3060 8077 3060 8075 3019 8070 2978 8061 2938
+ 8048 2898 8033 2861 8014 2824
+-6
+6 571 2371 1950 3749
+2 3 0 2 0 13 50 -1 43 0.000 0 0 -1 0 0 15
+ 1733 3060 1935 3060 1932 3119 1924 3177 1911 3234 1894 3291
+ 1872 3345 1844 3397 1669 3296 1688 3259 1703 3222 1716 3182
+ 1725 3142 1730 3101 1733 3060
+2 3 0 2 0 13 50 -1 43 0.000 0 0 -1 0 0 15
+ 1669 3296 1844 3397 1813 3447 1777 3494 1737 3537 1694 3577
+ 1647 3613 1598 3644 1496 3469 1531 3447 1564 3422 1593 3393
+ 1622 3364 1647 3331 1669 3296
+2 3 0 2 0 13 50 -1 43 0.000 0 0 -1 0 0 15
+ 1496 3469 1598 3644 1545 3672 1491 3694 1434 3711 1377 3724
+ 1319 3732 1260 3734 1260 3532 1301 3530 1342 3525 1382 3516
+ 1422 3503 1459 3488 1496 3469
+2 3 0 2 0 13 50 -1 43 0.000 0 0 -1 0 0 15
+ 1260 3532 1260 3734 1201 3732 1143 3724 1086 3711 1029 3694
+ 975 3672 923 3644 1024 3469 1061 3488 1098 3503 1138 3516
+ 1178 3525 1219 3530 1260 3532
+2 3 0 2 0 13 50 -1 43 0.000 0 0 -1 0 0 15
+ 1024 3469 923 3644 873 3613 826 3577 783 3537 743 3494
+ 707 3447 676 3398 851 3296 873 3331 898 3364 927 3393
+ 956 3422 989 3447 1024 3469
+2 3 0 2 0 13 50 -1 43 0.000 0 0 -1 0 0 15
+ 851 3296 676 3398 648 3345 626 3291 609 3234 596 3177
+ 588 3119 586 3060 788 3060 790 3101 795 3142 804 3182
+ 817 3222 832 3259 851 3296
+2 3 0 2 0 13 50 -1 43 0.000 0 0 -1 0 0 15
+ 788 3060 586 3060 588 3001 596 2943 609 2886 626 2829
+ 648 2775 676 2723 851 2824 832 2861 817 2898 804 2938
+ 795 2978 790 3019 788 3060
+2 3 0 2 0 13 50 -1 43 0.000 0 0 -1 0 0 15
+ 851 2824 676 2723 707 2673 743 2626 783 2583 826 2543
+ 873 2507 923 2476 1024 2651 989 2673 956 2698 927 2727
+ 898 2756 873 2789 851 2824
+2 3 0 2 0 13 50 -1 43 0.000 0 0 -1 0 0 15
+ 1024 2651 923 2476 975 2448 1029 2426 1086 2409 1143 2396
+ 1201 2388 1260 2386 1260 2588 1219 2590 1178 2595 1138 2604
+ 1098 2617 1061 2632 1024 2651
+2 3 0 2 0 13 50 -1 43 0.000 0 0 -1 0 0 15
+ 1260 2588 1260 2386 1319 2388 1377 2396 1434 2409 1491 2426
+ 1545 2448 1597 2476 1496 2651 1459 2632 1422 2617 1382 2604
+ 1342 2595 1301 2590 1260 2588
+2 3 0 2 0 13 50 -1 43 0.000 0 0 -1 0 0 15
+ 1496 2651 1597 2476 1647 2507 1694 2543 1737 2583 1777 2626
+ 1813 2673 1844 2723 1669 2824 1647 2789 1622 2756 1593 2727
+ 1564 2698 1531 2673 1496 2651
+2 3 0 2 0 13 50 -1 43 0.000 0 0 -1 0 0 15
+ 1669 2824 1844 2723 1872 2775 1894 2829 1911 2886 1924 2943
+ 1932 3001 1934 3060 1732 3060 1730 3019 1725 2978 1716 2938
+ 1703 2898 1688 2861 1669 2824
+-6
+6 1800 1845 2520 2385
+4 1 0 50 -1 6 10 0.0000 4 120 570 2160 1980 Global\001
+4 1 0 50 -1 6 10 0.0000 4 120 495 2160 2160 tasks\001
+4 1 0 50 -1 6 10 0.0000 4 150 705 2160 2340 (locked)\001
+-6
+6 3960 1935 4500 2250
+4 1 0 50 -1 6 10 0.0000 4 120 495 4230 2250 tasks\001
+4 1 0 50 -1 6 10 0.0000 4 120 465 4230 2070 Local\001
+-6
+6 8190 1845 8910 2385
+4 1 0 50 -1 6 10 0.0000 4 150 705 8550 2340 (locked)\001
+4 1 0 50 -1 6 10 0.0000 4 120 585 8550 2160 timers\001
+4 1 0 50 -1 6 10 0.0000 4 120 570 8550 1980 Global\001
+-6
+6 10215 1935 10845 2250
+4 1 0 50 -1 6 10 0.0000 4 120 585 10530 2250 timers\001
+4 1 0 50 -1 6 10 0.0000 4 120 465 10530 2070 Local\001
+-6
+6 2430 945 3735 1530
+1 1 0 3 20 29 52 -1 20 0.000 1 0.0000 3083 1180 607 170 3083 1180 3690 1350
+4 1 0 50 -1 6 10 0.0000 4 120 615 3105 1260 Local ?\001
+4 0 0 50 -1 6 9 0.0000 4 105 315 3375 1530 Yes\001
+4 2 0 50 -1 6 9 0.0000 4 105 225 2790 1530 No\001
+-6
+6 8775 945 10080 1530
+1 1 0 3 20 29 52 -1 20 0.000 1 0.0000 9428 1180 607 170 9428 1180 10035 1350
+4 1 0 50 -1 6 10 0.0000 4 120 615 9450 1260 Local ?\001
+4 0 0 50 -1 6 9 0.0000 4 105 315 9720 1530 Yes\001
+4 2 0 50 -1 6 9 0.0000 4 105 225 9135 1530 No\001
+-6
+6 7200 6345 9810 6885
+2 1 0 4 279 -1 48 -1 -1 0.000 0 0 -1 0 0 4
+ 7234 6398 9776 6398 9776 6838 7234 6838
+2 3 0 0 -1 278 49 -1 20 0.000 0 0 -1 0 0 5
+ 7234 6838 9776 6838 9776 6398 7234 6398 7234 6838
+2 1 0 2 279 -1 47 -1 -1 0.000 0 0 -1 0 0 2
+ 9613 6398 9613 6838
+2 1 0 2 279 -1 47 -1 -1 0.000 0 0 -1 0 0 2
+ 9438 6398 9438 6838
+2 1 0 2 279 -1 47 -1 -1 0.000 0 0 -1 0 0 2
+ 9264 6398 9264 6838
+4 1 0 46 -1 4 16 0.0000 4 210 1620 8460 6705 TL_URGENT\001
+-6
+6 4140 7830 4545 9045
+1 1 0 3 20 29 52 -1 20 0.000 1 1.5708 4330 8437 607 170 4330 8437 4500 7830
+4 1 0 50 -1 6 10 1.5708 4 120 585 4410 8415 Class?\001
+-6
+1 1 0 3 8 11 52 -1 20 0.000 1 0.0000 9450 5049 540 171 9450 5049 9990 5049
+1 1 0 3 20 29 52 -1 20 0.000 1 1.5708 2440 7672 607 170 2440 7672 2610 7065
+1 1 0 3 8 11 52 -1 20 0.000 1 1.5708 10755 7695 810 171 10755 7695 10755 6885
+2 1 0 3 0 7 50 -1 -1 0.000 1 0 -1 1 0 4
+ 2 1 1.00 90.00 180.00
+ 7605 3870 7605 4185 9270 4545 9270 4905
+2 1 0 3 0 7 50 -1 -1 0.000 1 0 -1 1 0 4
+ 2 1 1.00 90.00 180.00
+ 11301 3870 11301 4185 9636 4545 9636 4905
+2 1 0 3 0 7 50 -1 -1 0.000 1 0 -1 1 0 4
+ 2 1 1.00 90.00 180.00
+ 9630 1395 9626 1591 11291 1800 11295 2295
+2 1 0 3 0 7 50 -1 -1 0.000 1 0 -1 1 0 4
+ 2 1 1.00 90.00 180.00
+ 9270 1395 9270 1575 7605 1800 7605 2295
+2 1 0 3 0 7 50 -1 -1 0.000 1 0 -1 1 0 2
+ 2 1 1.00 90.00 180.00
+ 9450 360 9450 1035
+2 1 0 3 0 7 50 -1 -1 0.000 1 0 -1 1 0 4
+ 2 1 1.00 90.00 180.00
+ 1260 3870 1260 4185 2925 4545 2925 4905
+2 1 0 3 0 7 50 -1 -1 0.000 1 0 -1 1 0 4
+ 2 1 1.00 90.00 180.00
+ 4956 3870 4956 4185 3291 4545 3291 4905
+2 1 0 3 0 7 50 -1 -1 0.000 1 0 -1 1 0 2
+ 2 1 1.00 90.00 180.00
+ 3105 360 3105 1035
+2 1 0 3 0 7 50 -1 -1 0.000 1 0 -1 1 0 4
+ 2 1 1.00 90.00 180.00
+ 3285 1395 3285 1575 4950 1845 4950 2385
+2 1 0 3 22 7 54 -1 -1 0.000 1 0 -1 0 0 2
+ 9180 5535 9000 5805
+2 1 0 5 13 7 54 -1 -1 0.000 1 0 -1 1 0 4
+ 2 1 1.00 120.00 240.00
+ 3105 5220 3105 5850 3105 7200 7200 7200
+2 1 0 5 22 7 54 -1 -1 0.000 1 0 -1 1 0 5
+ 2 1 1.00 120.00 240.00
+ 9450 5220 9450 5670 6300 5670 6300 1215 3690 1170
+2 1 0 3 13 7 54 -1 -1 0.000 1 0 -1 0 0 2
+ 3195 5535 3015 5805
+2 1 0 3 0 7 50 -1 -1 0.000 1 0 -1 1 0 4
+ 2 1 1.00 90.00 180.00
+ 2925 1395 2925 1575 1260 1845 1260 2385
+2 2 0 3 35 34 100 -1 20 0.000 1 0 -1 0 0 5
+ 6570 720 12330 720 12330 5400 6570 5400 6570 720
+2 2 0 3 33 32 100 -1 20 0.000 1 0 -1 0 0 5
+ 270 720 6030 720 6030 5400 270 5400 270 720
+2 1 0 3 0 7 50 -1 -1 0.000 1 0 -1 1 0 2
+ 2 1 1.00 90.00 180.00
+ 315 7650 2250 7650
+2 1 0 5 4 7 54 -1 -1 0.000 1 0 -1 1 0 2
+ 2 1 1.00 120.00 240.00
+ 10890 7695 12285 7695
+2 1 0 3 0 7 54 -1 -1 0.000 1 0 -1 1 0 3
+ 2 1 1.00 90.00 180.00
+ 4455 8775 4725 8910 7200 8910
+2 1 0 4 279 -1 48 -1 -1 0.000 0 0 -1 0 0 4
+ 7234 7118 9776 7118 9776 7558 7234 7558
+2 3 0 0 -1 278 49 -1 20 0.000 0 0 -1 0 0 5
+ 7234 7558 9776 7558 9776 7118 7234 7118 7234 7558
+2 1 0 2 279 -1 47 -1 -1 0.000 0 0 -1 0 0 2
+ 9613 7118 9613 7558
+2 1 0 2 279 -1 47 -1 -1 0.000 0 0 -1 0 0 2
+ 9438 7118 9438 7558
+2 1 0 2 279 -1 47 -1 -1 0.000 0 0 -1 0 0 2
+ 9264 7118 9264 7558
+2 3 0 0 -1 278 49 -1 20 0.000 0 0 -1 0 0 5
+ 7234 8278 9776 8278 9776 7838 7234 7838 7234 8278
+2 1 0 2 279 -1 47 -1 -1 0.000 0 0 -1 0 0 2
+ 9613 7838 9613 8278
+2 1 0 2 279 -1 47 -1 -1 0.000 0 0 -1 0 0 2
+ 9438 7838 9438 8278
+2 1 0 2 279 -1 47 -1 -1 0.000 0 0 -1 0 0 2
+ 9264 7838 9264 8278
+2 1 0 4 279 -1 48 -1 -1 0.000 0 0 -1 0 0 4
+ 7234 8558 9776 8558 9776 8998 7234 8998
+2 3 0 0 -1 278 49 -1 20 0.000 0 0 -1 0 0 5
+ 7234 8998 9776 8998 9776 8558 7234 8558 7234 8998
+2 1 0 2 279 -1 47 -1 -1 0.000 0 0 -1 0 0 2
+ 9613 8558 9613 8998
+2 1 0 2 279 -1 47 -1 -1 0.000 0 0 -1 0 0 2
+ 9438 8558 9438 8998
+2 1 0 2 279 -1 47 -1 -1 0.000 0 0 -1 0 0 2
+ 9264 8558 9264 8998
+2 1 0 3 0 7 50 -1 -1 0.000 1 0 -1 1 0 2
+ 2 1 1.00 90.00 180.00
+ 6075 6480 7200 6480
+2 1 0 3 0 7 50 -1 -1 0.000 1 0 -1 1 0 3
+ 2 1 1.00 90.00 180.00
+ 2610 7830 3195 8415 4140 8415
+2 1 0 4 45 -1 48 -1 -1 0.000 0 0 -1 0 0 4
+ 4166 6398 6094 6398 6094 6838 4166 6838
+2 1 0 2 45 -1 47 -1 -1 0.000 0 0 -1 0 0 2
+ 5923 6398 5923 6838
+2 1 0 2 45 -1 47 -1 -1 0.000 0 0 -1 0 0 2
+ 5748 6398 5748 6838
+2 1 0 2 45 -1 47 -1 -1 0.000 0 0 -1 0 0 2
+ 5574 6398 5574 6838
+2 1 0 3 0 7 54 -1 -1 0.000 1 0 -1 1 0 4
+ 2 1 1.00 90.00 180.00
+ 2610 7515 2925 6660 3645 6660 4140 6660
+2 3 0 0 277 276 49 -1 43 0.000 0 0 -1 0 0 5
+ 4166 6838 6094 6838 6094 6398 4166 6398 4166 6838
+2 1 0 3 0 7 54 -1 -1 0.000 1 0 -1 1 0 3
+ 2 1 1.00 90.00 180.00
+ 9765 8775 10350 8775 10665 8280
+2 1 0 3 0 7 54 -1 -1 0.000 1 0 -1 1 0 3
+ 2 1 1.00 90.00 180.00
+ 9765 8055 10305 8055 10620 7875
+2 1 0 3 0 7 54 -1 -1 0.000 1 0 -1 1 0 3
+ 2 1 1.00 90.00 180.00
+ 9806 6605 10350 6615 10665 7155
+2 1 0 3 0 7 54 -1 -1 0.000 1 0 -1 1 0 3
+ 2 1 1.00 90.00 180.00
+ 9720 7335 10350 7335 10620 7560
+2 1 1 5 4 7 57 -1 -1 12.000 1 0 -1 1 0 2
+ 2 1 1.00 120.00 240.00
+ 9900 6165 9900 9450
+2 1 0 2 0 7 54 -1 -1 0.000 1 0 -1 0 0 2
+ 10080 7245 9990 7425
+2 1 0 2 0 7 54 -1 -1 0.000 1 0 -1 0 0 2
+ 10080 7965 9990 8145
+2 1 0 2 0 7 54 -1 -1 0.000 1 0 -1 0 0 2
+ 10080 8685 9990 8865
+2 1 0 3 0 7 54 -1 -1 0.000 1 0 -1 1 0 4
+ 2 1 1.00 90.00 180.00
+ 4500 8550 6255 8550 6705 8190 7200 8190
+2 1 0 3 0 7 54 -1 -1 0.000 1 0 -1 1 0 5
+ 2 1 1.00 90.00 180.00
+ 4500 8280 4725 8100 6435 8100 6750 7470 7200 7470
+2 1 0 3 0 7 54 -1 -1 0.000 1 0 -1 1 0 5
+ 2 1 1.00 90.00 180.00
+ 4455 8055 4635 7740 6390 7740 6750 6750 7200 6750
+2 1 0 4 279 -1 48 -1 -1 0.000 0 0 -1 0 0 4
+ 7234 7838 9776 7838 9776 8278 7234 8278
+2 1 0 2 0 7 54 -1 -1 0.000 1 0 -1 0 0 2
+ 10080 6525 9990 6705
+2 2 0 3 43 47 100 -1 20 0.000 1 0 -1 0 0 5
+ 1935 5985 11070 5985 11070 9585 1935 9585 1935 5985
+4 1 0 50 -1 4 9 1.5708 4 135 315 12240 3060 past\001
+4 1 0 50 -1 4 9 1.5708 4 120 465 10440 3060 future\001
+4 1 0 50 -1 4 9 1.5708 4 135 315 8550 3060 past\001
+4 1 0 50 -1 4 9 1.5708 4 120 465 6750 3060 future\001
+4 1 0 50 -1 6 10 0.0000 4 120 600 9450 5130 Oldest\001
+4 1 0 50 -1 4 9 1.5708 4 105 540 405 3060 newest\001
+4 1 0 50 -1 4 9 1.5708 4 120 450 2205 3060 oldest\001
+4 1 0 50 -1 4 9 1.5708 4 105 540 4095 3060 newest\001
+4 1 0 50 -1 4 9 1.5708 4 120 450 5895 3060 oldest\001
+4 0 0 50 -1 14 10 0.0000 4 135 1470 9135 5850 runqueue-depth\001
+4 0 0 50 -1 14 10 0.0000 4 135 1470 3195 5715 runqueue-depth\001
+4 1 0 50 -1 6 12 0.0000 4 165 1320 9450 3600 Time-based\001
+4 1 0 50 -1 6 12 0.0000 4 195 1395 9450 3780 Wait queues\001
+4 0 0 50 -1 6 12 0.0000 4 195 1050 9000 4005 - 1 global\001
+4 0 0 50 -1 6 12 0.0000 4 195 1605 9000 4185 - 1 per thread\001
+4 1 0 50 -1 6 12 0.0000 4 195 1650 3105 3600 Priority-based\001
+4 1 0 50 -1 6 12 0.0000 4 180 1365 3105 3780 Run queues\001
+4 0 0 50 -1 6 12 0.0000 4 195 1050 2655 4005 - 1 global\001
+4 0 0 50 -1 6 12 0.0000 4 195 1605 2655 4185 - 1 per thread\001
+4 0 0 50 -1 14 10 0.0000 4 135 1365 3240 585 task_wakeup()\001
+4 0 0 50 -1 14 10 0.0000 4 135 1575 9585 630 task_schedule()\001
+4 0 0 50 -1 14 10 0.0000 4 135 1260 9585 450 task_queue()\001
+4 0 0 50 -1 14 10 0.0000 4 135 1680 315 7560 tasklet_wakeup()\001
+4 2 0 50 -1 14 10 0.0000 4 135 1260 12285 7515 t->process()\001
+4 2 4 50 -1 6 12 0.0000 4 150 525 12285 7335 Run!\001
+4 1 0 46 -1 4 16 0.0000 4 210 1695 8460 7425 TL_NORMAL\001
+4 1 0 46 -1 4 16 0.0000 4 210 1200 8460 8145 TL_BULK\001
+4 1 0 46 -1 4 16 0.0000 4 210 1425 8460 8865 TL_HEAVY\001
+4 1 0 46 -1 4 16 0.0000 4 195 1095 4950 6705 SHARED\001
+4 0 0 50 -1 6 9 0.0000 4 105 345 10035 7515 37%\001
+4 0 0 50 -1 6 9 0.0000 4 105 210 10080 8955 =1\001
+4 1 0 50 -1 4 10 0.0000 4 150 2280 5085 6255 (accessed using atomic ops)\001
+4 0 0 50 -1 6 9 0.0000 4 105 345 10035 6795 50%\001
+4 0 0 50 -1 6 9 0.0000 4 105 345 10035 8235 13%\001
+4 2 0 50 -1 6 9 1.5708 4 105 315 2745 8100 Yes\001
+4 1 0 50 -1 6 10 1.5708 4 120 615 2520 7650 Local ?\001
+4 0 0 50 -1 6 9 1.5708 4 105 225 2700 7110 No\001
+4 0 0 50 -1 14 10 0.0000 4 135 1680 4725 8460 TASK_SELF_WAKING\001
+4 0 0 50 -1 14 10 0.0000 4 135 1050 4725 8820 TASK_HEAVY\001
+4 0 0 50 -1 4 10 0.0000 4 165 675 4725 8010 (default)\001
+4 0 0 50 -1 4 10 0.0000 4 150 1290 4725 7650 In I/O or signals\001
+4 1 0 50 -1 6 10 1.5708 4 150 1125 10815 7695 Most Urgent\001
+4 0 4 50 -1 6 10 0.0000 4 120 480 9990 6480 order\001
+4 0 4 50 -1 6 10 0.0000 4 120 420 9990 6300 Scan\001
+4 1 0 50 -1 6 12 0.0000 4 195 9075 6030 9450 5 class-based tasklet queues per thread (one accessible from remote threads)\001
diff --git a/doc/internals/sched.pdf b/doc/internals/sched.pdf
new file mode 100644
index 0000000..d1ce3de
--- /dev/null
+++ b/doc/internals/sched.pdf
Binary files differ
diff --git a/doc/internals/sched.png b/doc/internals/sched.png
new file mode 100644
index 0000000..65c97a1
--- /dev/null
+++ b/doc/internals/sched.png
Binary files differ
diff --git a/doc/internals/sched.svg b/doc/internals/sched.svg
new file mode 100644
index 0000000..0fa329a
--- /dev/null
+++ b/doc/internals/sched.svg
@@ -0,0 +1,1204 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!-- Creator: fig2dev Version 3.2.7b -->
+<!-- CreationDate: 2021-02-26 17:49:00 -->
+<!-- Magnification: 1.57 -->
+<svg xmlns="http://www.w3.org/2000/svg"
+ xmlns:xlink="http://www.w3.org/1999/xlink"
+ width="1146pt" height="878pt"
+ viewBox="237 327 12126 9291">
+<g fill="none">
+<!-- Line -->
+<rect x="6570" y="720" width="5760" height="4680" fill="#ffebac"
+ stroke="#cbb366" stroke-width="30px" stroke-linejoin="round"/>
+<!-- Line -->
+<rect x="270" y="720" width="5760" height="4680" fill="#c5ebe1"
+ stroke="#86c8a2" stroke-width="30px" stroke-linejoin="round"/>
+<!-- Line -->
+<rect x="1935" y="5985" width="9135" height="3600" fill="#dfe3df"
+ stroke="#868286" stroke-width="30px" stroke-linejoin="round"/>
+<!-- Line -->
+<defs>
+<clipPath id="cp0">
+ <path clip-rule="evenodd" d="M 237,327 H 12363 V 9618 H 237 z
+ M 9960,9130 9900,9190 9840,9130 9867,9483 9933,9483z"/>
+</clipPath>
+</defs>
+<polyline points=" 9900,6165 9900,9450" clip-path="url(#cp0)"
+ stroke="#ff0000" stroke-width="60px" stroke-linejoin="round" stroke-dasharray="120 120"/>
+<!-- Forward arrow to point 9900,9450 -->
+<polygon points=" 9840,9130 9900,9430 9960,9130 9900,9190 9840,9130"
+ stroke="#ff0000" stroke-width="8px" stroke-miterlimit="8" fill="#ff0000"/>
+<!-- Line -->
+<polyline points=" 9180,5535 9000,5805"
+ stroke="#b000b0" stroke-width="30px" stroke-linejoin="round"/>
+<!-- Line -->
+<defs>
+<clipPath id="cp1">
+ <path clip-rule="evenodd" d="M 237,327 H 12363 V 9618 H 237 z
+ M 6880,7140 6940,7200 6880,7260 7233,7233 7233,7167z"/>
+</clipPath>
+</defs>
+<polyline points=" 3105,5220 3105,5850 3105,7200 7200,7200" clip-path="url(#cp1)"
+ stroke="#00b000" stroke-width="60px" stroke-linejoin="round"/>
+<!-- Forward arrow to point 7200,7200 -->
+<polygon points=" 6880,7260 7180,7200 6880,7140 6940,7200 6880,7260"
+ stroke="#00b000" stroke-width="8px" stroke-miterlimit="8" fill="#00b000"/>
+<!-- Line -->
+<defs>
+<clipPath id="cp2">
+ <path clip-rule="evenodd" d="M 237,327 H 12363 V 9618 H 237 z
+ M 4009,1236 3950,1174 4011,1116 3658,1136 3656,1202z"/>
+</clipPath>
+</defs>
+<polyline points=" 9450,5220 9450,5670 6300,5670 6300,1215 3690,1170" clip-path="url(#cp2)"
+ stroke="#b000b0" stroke-width="60px" stroke-linejoin="round"/>
+<!-- Forward arrow to point 3690,1170 -->
+<polygon points=" 4011,1116 3710,1170 4009,1236 3950,1174 4011,1116"
+ stroke="#b000b0" stroke-width="8px" stroke-miterlimit="8" fill="#b000b0"/>
+<!-- Line -->
+<polyline points=" 3195,5535 3015,5805"
+ stroke="#00b000" stroke-width="30px" stroke-linejoin="round"/>
+<!-- Line -->
+<defs>
+<clipPath id="cp3">
+ <path clip-rule="evenodd" d="M 237,327 H 12363 V 9618 H 237 z
+ M 11965,7635 12025,7695 11965,7755 12318,7728 12318,7662z"/>
+</clipPath>
+</defs>
+<polyline points=" 10890,7695 12285,7695" clip-path="url(#cp3)"
+ stroke="#ff0000" stroke-width="60px" stroke-linejoin="round"/>
+<!-- Forward arrow to point 12285,7695 -->
+<polygon points=" 11965,7755 12265,7695 11965,7635 12025,7695 11965,7755"
+ stroke="#ff0000" stroke-width="8px" stroke-miterlimit="8" fill="#ff0000"/>
+<!-- Line -->
+<defs>
+<clipPath id="cp4">
+ <path clip-rule="evenodd" d="M 237,327 H 12363 V 9618 H 237 z
+ M 6955,8865 7000,8910 6955,8955 7218,8928 7218,8892z"/>
+</clipPath>
+</defs>
+<polyline points=" 4455,8775 4725,8910 7200,8910" clip-path="url(#cp4)"
+ stroke="#000000" stroke-width="30px" stroke-linejoin="round"/>
+<!-- Forward arrow to point 7200,8910 -->
+<polygon points=" 6955,8955 7180,8910 6955,8865 7000,8910 6955,8955"
+ stroke="#000000" stroke-width="8px" stroke-miterlimit="8" fill="#000000"/>
+<!-- Line -->
+<defs>
+<clipPath id="cp5">
+ <path clip-rule="evenodd" d="M 237,327 H 12363 V 9618 H 237 z
+ M 3895,6615 3940,6660 3895,6705 4158,6678 4158,6642z"/>
+</clipPath>
+</defs>
+<polyline points=" 2610,7515 2925,6660 3645,6660 4140,6660" clip-path="url(#cp5)"
+ stroke="#000000" stroke-width="30px" stroke-linejoin="round"/>
+<!-- Forward arrow to point 4140,6660 -->
+<polygon points=" 3895,6705 4120,6660 3895,6615 3940,6660 3895,6705"
+ stroke="#000000" stroke-width="8px" stroke-miterlimit="8" fill="#000000"/>
+<!-- Line -->
+<defs>
+<clipPath id="cp6">
+ <path clip-rule="evenodd" d="M 237,327 H 12363 V 9618 H 237 z
+ M 10496,8462 10558,8449 10572,8511 10690,8274 10659,8255z"/>
+</clipPath>
+</defs>
+<polyline points=" 9765,8775 10350,8775 10665,8280" clip-path="url(#cp6)"
+ stroke="#000000" stroke-width="30px" stroke-linejoin="round"/>
+<!-- Forward arrow to point 10665,8280 -->
+<polygon points=" 10572,8511 10654,8297 10496,8462 10558,8449 10572,8511"
+ stroke="#000000" stroke-width="8px" stroke-miterlimit="8" fill="#000000"/>
+<!-- Line -->
+<defs>
+<clipPath id="cp7">
+ <path clip-rule="evenodd" d="M 237,327 H 12363 V 9618 H 237 z
+ M 10385,7957 10446,7974 10430,8036 10645,7882 10627,7850z"/>
+</clipPath>
+</defs>
+<polyline points=" 9765,8055 10305,8055 10620,7875" clip-path="url(#cp7)"
+ stroke="#000000" stroke-width="30px" stroke-linejoin="round"/>
+<!-- Forward arrow to point 10620,7875 -->
+<polygon points=" 10430,8036 10603,7885 10385,7957 10446,7974 10430,8036"
+ stroke="#000000" stroke-width="8px" stroke-miterlimit="8" fill="#000000"/>
+<!-- Line -->
+<defs>
+<clipPath id="cp8">
+ <path clip-rule="evenodd" d="M 237,327 H 12363 V 9618 H 237 z
+ M 10580,6921 10564,6982 10503,6966 10659,7180 10690,7161z"/>
+</clipPath>
+</defs>
+<polyline points=" 9806,6605 10350,6615 10665,7155" clip-path="url(#cp8)"
+ stroke="#000000" stroke-width="30px" stroke-linejoin="round"/>
+<!-- Forward arrow to point 10665,7155 -->
+<polygon points=" 10503,6966 10655,7138 10580,6921 10564,6982 10503,6966"
+ stroke="#000000" stroke-width="8px" stroke-miterlimit="8" fill="#000000"/>
+<!-- Line -->
+<defs>
+<clipPath id="cp9">
+ <path clip-rule="evenodd" d="M 237,327 H 12363 V 9618 H 237 z
+ M 10461,7369 10466,7432 10403,7438 10622,7585 10645,7558z"/>
+</clipPath>
+</defs>
+<polyline points=" 9720,7335 10350,7335 10620,7560" clip-path="url(#cp9)"
+ stroke="#000000" stroke-width="30px" stroke-linejoin="round"/>
+<!-- Forward arrow to point 10620,7560 -->
+<polygon points=" 10403,7438 10605,7547 10461,7369 10466,7432 10403,7438"
+ stroke="#000000" stroke-width="8px" stroke-miterlimit="8" fill="#000000"/>
+<!-- Line -->
+<polyline points=" 10080,7245 9990,7425"
+ stroke="#000000" stroke-width="15px" stroke-linejoin="round"/>
+<!-- Line -->
+<polyline points=" 10080,7965 9990,8145"
+ stroke="#000000" stroke-width="15px" stroke-linejoin="round"/>
+<!-- Line -->
+<polyline points=" 10080,8685 9990,8865"
+ stroke="#000000" stroke-width="15px" stroke-linejoin="round"/>
+<!-- Line -->
+<defs>
+<clipPath id="cp10">
+ <path clip-rule="evenodd" d="M 237,327 H 12363 V 9618 H 237 z
+ M 6955,8145 7000,8190 6955,8235 7218,8208 7218,8172z"/>
+</clipPath>
+</defs>
+<polyline points=" 4500,8550 6255,8550 6705,8190 7200,8190" clip-path="url(#cp10)"
+ stroke="#000000" stroke-width="30px" stroke-linejoin="round"/>
+<!-- Forward arrow to point 7200,8190 -->
+<polygon points=" 6955,8235 7180,8190 6955,8145 7000,8190 6955,8235"
+ stroke="#000000" stroke-width="8px" stroke-miterlimit="8" fill="#000000"/>
+<!-- Line -->
+<defs>
+<clipPath id="cp11">
+ <path clip-rule="evenodd" d="M 237,327 H 12363 V 9618 H 237 z
+ M 6955,7425 7000,7470 6955,7515 7218,7488 7218,7452z"/>
+</clipPath>
+</defs>
+<polyline points=" 4500,8280 4725,8100 6435,8100 6750,7470 7200,7470" clip-path="url(#cp11)"
+ stroke="#000000" stroke-width="30px" stroke-linejoin="round"/>
+<!-- Forward arrow to point 7200,7470 -->
+<polygon points=" 6955,7515 7180,7470 6955,7425 7000,7470 6955,7515"
+ stroke="#000000" stroke-width="8px" stroke-miterlimit="8" fill="#000000"/>
+<!-- Line -->
+<defs>
+<clipPath id="cp12">
+ <path clip-rule="evenodd" d="M 237,327 H 12363 V 9618 H 237 z
+ M 6955,6705 7000,6750 6955,6795 7218,6768 7218,6732z"/>
+</clipPath>
+</defs>
+<polyline points=" 4455,8055 4635,7740 6390,7740 6750,6750 7200,6750" clip-path="url(#cp12)"
+ stroke="#000000" stroke-width="30px" stroke-linejoin="round"/>
+<!-- Forward arrow to point 7200,6750 -->
+<polygon points=" 6955,6795 7180,6750 6955,6705 7000,6750 6955,6795"
+ stroke="#000000" stroke-width="8px" stroke-miterlimit="8" fill="#000000"/>
+<!-- Line -->
+<polyline points=" 10080,6525 9990,6705"
+ stroke="#000000" stroke-width="15px" stroke-linejoin="round"/>
+<!-- Ellipse -->
+<ellipse cx="3105" cy="5049" rx="810" ry="171" fill="#87cfff"
+ stroke="#00008f" stroke-width="30px"/>
+<!-- Ellipse -->
+<ellipse cx="3083" cy="1180" rx="607" ry="170" fill="#ffbfbf"
+ stroke="#d10000" stroke-width="30px"/>
+<!-- Ellipse -->
+<ellipse cx="9428" cy="1180" rx="607" ry="170" fill="#ffbfbf"
+ stroke="#d10000" stroke-width="30px"/>
+<!-- Ellipse -->
+<ellipse transform="translate(4330,8437) rotate(-90)" rx="607" ry="170" fill="#ffbfbf"
+ stroke="#d10000" stroke-width="30px"/>
+<!-- Ellipse -->
+<ellipse cx="9450" cy="5049" rx="540" ry="171" fill="#87cfff"
+ stroke="#00008f" stroke-width="30px"/>
+<!-- Ellipse -->
+<ellipse transform="translate(2440,7672) rotate(-90)" rx="607" ry="170" fill="#ffbfbf"
+ stroke="#d10000" stroke-width="30px"/>
+<!-- Ellipse -->
+<ellipse transform="translate(10755,7695) rotate(-90)" rx="810" ry="171" fill="#87cfff"
+ stroke="#00008f" stroke-width="30px"/>
+<!-- Line -->
+<polygon points=" 11768,3060 11970,3060 11967,3119 11959,3177 11946,3234 11929,3291 11907,3345
+ 11879,3397 11704,3296 11723,3259 11738,3222 11751,3182 11760,3142 11765,3101
+" fill="#ffd600"
+ stroke="#000000" stroke-width="15px"/>
+<!-- Line -->
+<polygon points=" 11704,3296 11879,3397 11848,3447 11812,3494 11772,3537 11729,3577 11682,3613
+ 11633,3644 11531,3469 11566,3447 11599,3422 11628,3393 11657,3364 11682,3331
+" fill="#ffd600"
+ stroke="#000000" stroke-width="15px"/>
+<!-- Line -->
+<polygon points=" 11531,3469 11633,3644 11580,3672 11526,3694 11469,3711 11412,3724 11354,3732
+ 11295,3734 11295,3532 11336,3530 11377,3525 11417,3516 11457,3503 11494,3488
+" fill="#ffd600"
+ stroke="#000000" stroke-width="15px"/>
+<!-- Line -->
+<polygon points=" 11295,3532 11295,3734 11236,3732 11178,3724 11121,3711 11064,3694 11010,3672
+ 10958,3644 11059,3469 11096,3488 11133,3503 11173,3516 11213,3525 11254,3530
+" fill="#ffd600"
+ stroke="#000000" stroke-width="15px"/>
+<!-- Line -->
+<polygon points=" 11059,3469 10958,3644 10908,3613 10861,3577 10818,3537 10778,3494 10742,3447
+ 10711,3398 10886,3296 10908,3331 10933,3364 10962,3393 10991,3422 11024,3447
+" fill="#ffd600"
+ stroke="#000000" stroke-width="15px"/>
+<!-- Line -->
+<polygon points=" 10886,3296 10711,3398 10683,3345 10661,3291 10644,3234 10631,3177 10623,3119
+ 10621,3060 10823,3060 10825,3101 10830,3142 10839,3182 10852,3222 10867,3259
+" fill="#ffd600"
+ stroke="#000000" stroke-width="15px"/>
+<!-- Line -->
+<polygon points=" 10823,3060 10621,3060 10623,3001 10631,2943 10644,2886 10661,2829 10683,2775
+ 10711,2723 10886,2824 10867,2861 10852,2898 10839,2938 10830,2978 10825,3019
+" fill="#ffd600"
+ stroke="#000000" stroke-width="15px"/>
+<!-- Line -->
+<polygon points=" 10886,2824 10711,2723 10742,2673 10778,2626 10818,2583 10861,2543 10908,2507
+ 10958,2476 11059,2651 11024,2673 10991,2698 10962,2727 10933,2756 10908,2789
+" fill="#ffd600"
+ stroke="#000000" stroke-width="15px"/>
+<!-- Line -->
+<polygon points=" 11059,2651 10958,2476 11010,2448 11064,2426 11121,2409 11178,2396 11236,2388
+ 11295,2386 11295,2588 11254,2590 11213,2595 11173,2604 11133,2617 11096,2632
+" fill="#ffd600"
+ stroke="#000000" stroke-width="15px"/>
+<!-- Line -->
+<polygon points=" 11295,2588 11295,2386 11354,2388 11412,2396 11469,2409 11526,2426 11580,2448
+ 11632,2476 11531,2651 11494,2632 11457,2617 11417,2604 11377,2595 11336,2590
+" fill="#ffd600"
+ stroke="#000000" stroke-width="15px"/>
+<!-- Line -->
+<polygon points=" 11531,2651 11632,2476 11682,2507 11729,2543 11772,2583 11812,2626 11848,2673
+ 11879,2723 11704,2824 11682,2789 11657,2756 11628,2727 11599,2698 11566,2673
+" fill="#ffd600"
+ stroke="#000000" stroke-width="15px"/>
+<!-- Line -->
+<polygon points=" 11704,2824 11879,2723 11907,2775 11929,2829 11946,2886 11959,2943 11967,3001
+ 11969,3060 11767,3060 11765,3019 11760,2978 11751,2938 11738,2898 11723,2861
+" fill="#ffd600"
+ stroke="#000000" stroke-width="15px"/>
+<!-- Line -->
+<polygon points=" 5423,3060 5625,3060 5622,3119 5614,3177 5601,3234 5584,3291 5562,3345 5534,3397
+ 5359,3296 5378,3259 5393,3222 5406,3182 5415,3142 5420,3101" fill="#00b000"
+ stroke="#000000" stroke-width="15px"/>
+<!-- Line -->
+<polygon points=" 5359,3296 5534,3397 5503,3447 5467,3494 5427,3537 5384,3577 5337,3613 5288,3644
+ 5186,3469 5221,3447 5254,3422 5283,3393 5312,3364 5337,3331" fill="#00b000"
+ stroke="#000000" stroke-width="15px"/>
+<!-- Line -->
+<polygon points=" 5186,3469 5288,3644 5235,3672 5181,3694 5124,3711 5067,3724 5009,3732 4950,3734
+ 4950,3532 4991,3530 5032,3525 5072,3516 5112,3503 5149,3488" fill="#00b000"
+ stroke="#000000" stroke-width="15px"/>
+<!-- Line -->
+<polygon points=" 4950,3532 4950,3734 4891,3732 4833,3724 4776,3711 4719,3694 4665,3672 4613,3644
+ 4714,3469 4751,3488 4788,3503 4828,3516 4868,3525 4909,3530" fill="#00b000"
+ stroke="#000000" stroke-width="15px"/>
+<!-- Line -->
+<polygon points=" 4714,3469 4613,3644 4563,3613 4516,3577 4473,3537 4433,3494 4397,3447 4366,3398
+ 4541,3296 4563,3331 4588,3364 4617,3393 4646,3422 4679,3447" fill="#00b000"
+ stroke="#000000" stroke-width="15px"/>
+<!-- Line -->
+<polygon points=" 4541,3296 4366,3398 4338,3345 4316,3291 4299,3234 4286,3177 4278,3119 4276,3060
+ 4478,3060 4480,3101 4485,3142 4494,3182 4507,3222 4522,3259" fill="#00b000"
+ stroke="#000000" stroke-width="15px"/>
+<!-- Line -->
+<polygon points=" 4478,3060 4276,3060 4278,3001 4286,2943 4299,2886 4316,2829 4338,2775 4366,2723
+ 4541,2824 4522,2861 4507,2898 4494,2938 4485,2978 4480,3019" fill="#00b000"
+ stroke="#000000" stroke-width="15px"/>
+<!-- Line -->
+<polygon points=" 4541,2824 4366,2723 4397,2673 4433,2626 4473,2583 4516,2543 4563,2507 4613,2476
+ 4714,2651 4679,2673 4646,2698 4617,2727 4588,2756 4563,2789" fill="#00b000"
+ stroke="#000000" stroke-width="15px"/>
+<!-- Line -->
+<polygon points=" 4714,2651 4613,2476 4665,2448 4719,2426 4776,2409 4833,2396 4891,2388 4950,2386
+ 4950,2588 4909,2590 4868,2595 4828,2604 4788,2617 4751,2632" fill="#00b000"
+ stroke="#000000" stroke-width="15px"/>
+<!-- Line -->
+<polygon points=" 4950,2588 4950,2386 5009,2388 5067,2396 5124,2409 5181,2426 5235,2448 5287,2476
+ 5186,2651 5149,2632 5112,2617 5072,2604 5032,2595 4991,2590" fill="#00b000"
+ stroke="#000000" stroke-width="15px"/>
+<!-- Line -->
+<polygon points=" 5186,2651 5287,2476 5337,2507 5384,2543 5427,2583 5467,2626 5503,2673 5534,2723
+ 5359,2824 5337,2789 5312,2756 5283,2727 5254,2698 5221,2673" fill="#00b000"
+ stroke="#000000" stroke-width="15px"/>
+<!-- Line -->
+<polygon points=" 5359,2824 5534,2723 5562,2775 5584,2829 5601,2886 5614,2943 5622,3001 5624,3060
+ 5422,3060 5420,3019 5415,2978 5406,2938 5393,2898 5378,2861" fill="#00b000"
+ stroke="#000000" stroke-width="15px"/>
+<!-- Text -->
+<text xml:space="preserve" x="3105" y="5130" fill="#000000" font-family="AvantGarde" font-style="normal" font-weight="bold" font-size="120" text-anchor="middle">Most Urgent</text>
+<!-- Line -->
+<defs>
+<polygon points=" 8078,3060 8280,3060 8277,3119 8269,3177 8256,3234 8239,3291 8217,3345 8189,3397
+ 8014,3296 8033,3259 8048,3222 8061,3182 8070,3142 8075,3101" id="p0"/>
+<pattern id="tile0" patternUnits="userSpaceOnUse"
+ x="0" y="0" width="134" height="67">
+<g stroke-width="7.5" stroke="#000000" fill="none">
+<path d="M -7,30 73,70 M 61,-3 141,37 M -7,37 73,-3 M 61,70 141,30"/>
+</g>
+</pattern>
+</defs>
+<use xlink:href="#p0" fill="#ffd600"/>
+<use xlink:href="#p0" fill="url(#tile0)"
+ stroke="#000000" stroke-width="15px"/>
+<!-- Line -->
+<defs>
+<polygon points=" 8014,3296 8189,3397 8158,3447 8122,3494 8082,3537 8039,3577 7992,3613 7943,3644
+ 7841,3469 7876,3447 7909,3422 7938,3393 7967,3364 7992,3331" id="p1"/>
+<pattern id="tile1" patternUnits="userSpaceOnUse"
+ x="0" y="0" width="134" height="67">
+<g stroke-width="7.5" stroke="#000000" fill="none">
+<path d="M -7,30 73,70 M 61,-3 141,37 M -7,37 73,-3 M 61,70 141,30"/>
+</g>
+</pattern>
+</defs>
+<use xlink:href="#p1" fill="#ffd600"/>
+<use xlink:href="#p1" fill="url(#tile1)"
+ stroke="#000000" stroke-width="15px"/>
+<!-- Line -->
+<defs>
+<polygon points=" 7841,3469 7943,3644 7890,3672 7836,3694 7779,3711 7722,3724 7664,3732 7605,3734
+ 7605,3532 7646,3530 7687,3525 7727,3516 7767,3503 7804,3488" id="p2"/>
+<pattern id="tile2" patternUnits="userSpaceOnUse"
+ x="0" y="0" width="134" height="67">
+<g stroke-width="7.5" stroke="#000000" fill="none">
+<path d="M -7,30 73,70 M 61,-3 141,37 M -7,37 73,-3 M 61,70 141,30"/>
+</g>
+</pattern>
+</defs>
+<use xlink:href="#p2" fill="#ffd600"/>
+<use xlink:href="#p2" fill="url(#tile2)"
+ stroke="#000000" stroke-width="15px"/>
+<!-- Line -->
+<defs>
+<polygon points=" 7605,3532 7605,3734 7546,3732 7488,3724 7431,3711 7374,3694 7320,3672 7268,3644
+ 7369,3469 7406,3488 7443,3503 7483,3516 7523,3525 7564,3530" id="p3"/>
+<pattern id="tile3" patternUnits="userSpaceOnUse"
+ x="0" y="0" width="134" height="67">
+<g stroke-width="7.5" stroke="#000000" fill="none">
+<path d="M -7,30 73,70 M 61,-3 141,37 M -7,37 73,-3 M 61,70 141,30"/>
+</g>
+</pattern>
+</defs>
+<use xlink:href="#p3" fill="#ffd600"/>
+<use xlink:href="#p3" fill="url(#tile3)"
+ stroke="#000000" stroke-width="15px"/>
+<!-- Line -->
+<defs>
+<polygon points=" 7369,3469 7268,3644 7218,3613 7171,3577 7128,3537 7088,3494 7052,3447 7021,3398
+ 7196,3296 7218,3331 7243,3364 7272,3393 7301,3422 7334,3447" id="p4"/>
+<pattern id="tile4" patternUnits="userSpaceOnUse"
+ x="0" y="0" width="134" height="67">
+<g stroke-width="7.5" stroke="#000000" fill="none">
+<path d="M -7,30 73,70 M 61,-3 141,37 M -7,37 73,-3 M 61,70 141,30"/>
+</g>
+</pattern>
+</defs>
+<use xlink:href="#p4" fill="#ffd600"/>
+<use xlink:href="#p4" fill="url(#tile4)"
+ stroke="#000000" stroke-width="15px"/>
+<!-- Line -->
+<defs>
+<polygon points=" 7196,3296 7021,3398 6993,3345 6971,3291 6954,3234 6941,3177 6933,3119 6931,3060
+ 7133,3060 7135,3101 7140,3142 7149,3182 7162,3222 7177,3259" id="p5"/>
+<pattern id="tile5" patternUnits="userSpaceOnUse"
+ x="0" y="0" width="134" height="67">
+<g stroke-width="7.5" stroke="#000000" fill="none">
+<path d="M -7,30 73,70 M 61,-3 141,37 M -7,37 73,-3 M 61,70 141,30"/>
+</g>
+</pattern>
+</defs>
+<use xlink:href="#p5" fill="#ffd600"/>
+<use xlink:href="#p5" fill="url(#tile5)"
+ stroke="#000000" stroke-width="15px"/>
+<!-- Line -->
+<defs>
+<polygon points=" 7133,3060 6931,3060 6933,3001 6941,2943 6954,2886 6971,2829 6993,2775 7021,2723
+ 7196,2824 7177,2861 7162,2898 7149,2938 7140,2978 7135,3019" id="p6"/>
+<pattern id="tile6" patternUnits="userSpaceOnUse"
+ x="0" y="0" width="134" height="67">
+<g stroke-width="7.5" stroke="#000000" fill="none">
+<path d="M -7,30 73,70 M 61,-3 141,37 M -7,37 73,-3 M 61,70 141,30"/>
+</g>
+</pattern>
+</defs>
+<use xlink:href="#p6" fill="#ffd600"/>
+<use xlink:href="#p6" fill="url(#tile6)"
+ stroke="#000000" stroke-width="15px"/>
+<!-- Line -->
+<defs>
+<polygon points=" 7196,2824 7021,2723 7052,2673 7088,2626 7128,2583 7171,2543 7218,2507 7268,2476
+ 7369,2651 7334,2673 7301,2698 7272,2727 7243,2756 7218,2789" id="p7"/>
+<pattern id="tile7" patternUnits="userSpaceOnUse"
+ x="0" y="0" width="134" height="67">
+<g stroke-width="7.5" stroke="#000000" fill="none">
+<path d="M -7,30 73,70 M 61,-3 141,37 M -7,37 73,-3 M 61,70 141,30"/>
+</g>
+</pattern>
+</defs>
+<use xlink:href="#p7" fill="#ffd600"/>
+<use xlink:href="#p7" fill="url(#tile7)"
+ stroke="#000000" stroke-width="15px"/>
+<!-- Line -->
+<defs>
+<polygon points=" 7369,2651 7268,2476 7320,2448 7374,2426 7431,2409 7488,2396 7546,2388 7605,2386
+ 7605,2588 7564,2590 7523,2595 7483,2604 7443,2617 7406,2632" id="p8"/>
+<pattern id="tile8" patternUnits="userSpaceOnUse"
+ x="0" y="0" width="134" height="67">
+<g stroke-width="7.5" stroke="#000000" fill="none">
+<path d="M -7,30 73,70 M 61,-3 141,37 M -7,37 73,-3 M 61,70 141,30"/>
+</g>
+</pattern>
+</defs>
+<use xlink:href="#p8" fill="#ffd600"/>
+<use xlink:href="#p8" fill="url(#tile8)"
+ stroke="#000000" stroke-width="15px"/>
+<!-- Line -->
+<defs>
+<polygon points=" 7605,2588 7605,2386 7664,2388 7722,2396 7779,2409 7836,2426 7890,2448 7942,2476
+ 7841,2651 7804,2632 7767,2617 7727,2604 7687,2595 7646,2590" id="p9"/>
+<pattern id="tile9" patternUnits="userSpaceOnUse"
+ x="0" y="0" width="134" height="67">
+<g stroke-width="7.5" stroke="#000000" fill="none">
+<path d="M -7,30 73,70 M 61,-3 141,37 M -7,37 73,-3 M 61,70 141,30"/>
+</g>
+</pattern>
+</defs>
+<use xlink:href="#p9" fill="#ffd600"/>
+<use xlink:href="#p9" fill="url(#tile9)"
+ stroke="#000000" stroke-width="15px"/>
+<!-- Line -->
+<defs>
+<polygon points=" 7841,2651 7942,2476 7992,2507 8039,2543 8082,2583 8122,2626 8158,2673 8189,2723
+ 8014,2824 7992,2789 7967,2756 7938,2727 7909,2698 7876,2673" id="p10"/>
+<pattern id="tile10" patternUnits="userSpaceOnUse"
+ x="0" y="0" width="134" height="67">
+<g stroke-width="7.5" stroke="#000000" fill="none">
+<path d="M -7,30 73,70 M 61,-3 141,37 M -7,37 73,-3 M 61,70 141,30"/>
+</g>
+</pattern>
+</defs>
+<use xlink:href="#p10" fill="#ffd600"/>
+<use xlink:href="#p10" fill="url(#tile10)"
+ stroke="#000000" stroke-width="15px"/>
+<!-- Line -->
+<defs>
+<polygon points=" 8014,2824 8189,2723 8217,2775 8239,2829 8256,2886 8269,2943 8277,3001 8279,3060
+ 8077,3060 8075,3019 8070,2978 8061,2938 8048,2898 8033,2861" id="p11"/>
+<pattern id="tile11" patternUnits="userSpaceOnUse"
+ x="0" y="0" width="134" height="67">
+<g stroke-width="7.5" stroke="#000000" fill="none">
+<path d="M -7,30 73,70 M 61,-3 141,37 M -7,37 73,-3 M 61,70 141,30"/>
+</g>
+</pattern>
+</defs>
+<use xlink:href="#p11" fill="#ffd600"/>
+<use xlink:href="#p11" fill="url(#tile11)"
+ stroke="#000000" stroke-width="15px"/>
+<!-- Line -->
+<defs>
+<polygon points=" 1733,3060 1935,3060 1932,3119 1924,3177 1911,3234 1894,3291 1872,3345 1844,3397
+ 1669,3296 1688,3259 1703,3222 1716,3182 1725,3142 1730,3101" id="p12"/>
+<pattern id="tile12" patternUnits="userSpaceOnUse"
+ x="0" y="0" width="134" height="67">
+<g stroke-width="7.5" stroke="#000000" fill="none">
+<path d="M -7,30 73,70 M 61,-3 141,37 M -7,37 73,-3 M 61,70 141,30"/>
+</g>
+</pattern>
+</defs>
+<use xlink:href="#p12" fill="#00b000"/>
+<use xlink:href="#p12" fill="url(#tile12)"
+ stroke="#000000" stroke-width="15px"/>
+<!-- Line -->
+<defs>
+<polygon points=" 1669,3296 1844,3397 1813,3447 1777,3494 1737,3537 1694,3577 1647,3613 1598,3644
+ 1496,3469 1531,3447 1564,3422 1593,3393 1622,3364 1647,3331" id="p13"/>
+<pattern id="tile13" patternUnits="userSpaceOnUse"
+ x="0" y="0" width="134" height="67">
+<g stroke-width="7.5" stroke="#000000" fill="none">
+<path d="M -7,30 73,70 M 61,-3 141,37 M -7,37 73,-3 M 61,70 141,30"/>
+</g>
+</pattern>
+</defs>
+<use xlink:href="#p13" fill="#00b000"/>
+<use xlink:href="#p13" fill="url(#tile13)"
+ stroke="#000000" stroke-width="15px"/>
+<!-- Line -->
+<defs>
+<polygon points=" 1496,3469 1598,3644 1545,3672 1491,3694 1434,3711 1377,3724 1319,3732 1260,3734
+ 1260,3532 1301,3530 1342,3525 1382,3516 1422,3503 1459,3488" id="p14"/>
+<pattern id="tile14" patternUnits="userSpaceOnUse"
+ x="0" y="0" width="134" height="67">
+<g stroke-width="7.5" stroke="#000000" fill="none">
+<path d="M -7,30 73,70 M 61,-3 141,37 M -7,37 73,-3 M 61,70 141,30"/>
+</g>
+</pattern>
+</defs>
+<use xlink:href="#p14" fill="#00b000"/>
+<use xlink:href="#p14" fill="url(#tile14)"
+ stroke="#000000" stroke-width="15px"/>
+<!-- Line -->
+<defs>
+<polygon points=" 1260,3532 1260,3734 1201,3732 1143,3724 1086,3711 1029,3694 975,3672 923,3644
+ 1024,3469 1061,3488 1098,3503 1138,3516 1178,3525 1219,3530" id="p15"/>
+<pattern id="tile15" patternUnits="userSpaceOnUse"
+ x="0" y="0" width="134" height="67">
+<g stroke-width="7.5" stroke="#000000" fill="none">
+<path d="M -7,30 73,70 M 61,-3 141,37 M -7,37 73,-3 M 61,70 141,30"/>
+</g>
+</pattern>
+</defs>
+<use xlink:href="#p15" fill="#00b000"/>
+<use xlink:href="#p15" fill="url(#tile15)"
+ stroke="#000000" stroke-width="15px"/>
+<!-- Line -->
+<defs>
+<polygon points=" 1024,3469 923,3644 873,3613 826,3577 783,3537 743,3494 707,3447 676,3398 851,3296
+ 873,3331 898,3364 927,3393 956,3422 989,3447" id="p16"/>
+<pattern id="tile16" patternUnits="userSpaceOnUse"
+ x="0" y="0" width="134" height="67">
+<g stroke-width="7.5" stroke="#000000" fill="none">
+<path d="M -7,30 73,70 M 61,-3 141,37 M -7,37 73,-3 M 61,70 141,30"/>
+</g>
+</pattern>
+</defs>
+<use xlink:href="#p16" fill="#00b000"/>
+<use xlink:href="#p16" fill="url(#tile16)"
+ stroke="#000000" stroke-width="15px"/>
+<!-- Line -->
+<defs>
+<polygon points=" 851,3296 676,3398 648,3345 626,3291 609,3234 596,3177 588,3119 586,3060 788,3060
+ 790,3101 795,3142 804,3182 817,3222 832,3259" id="p17"/>
+<pattern id="tile17" patternUnits="userSpaceOnUse"
+ x="0" y="0" width="134" height="67">
+<g stroke-width="7.5" stroke="#000000" fill="none">
+<path d="M -7,30 73,70 M 61,-3 141,37 M -7,37 73,-3 M 61,70 141,30"/>
+</g>
+</pattern>
+</defs>
+<use xlink:href="#p17" fill="#00b000"/>
+<use xlink:href="#p17" fill="url(#tile17)"
+ stroke="#000000" stroke-width="15px"/>
+<!-- Line -->
+<defs>
+<polygon points=" 788,3060 586,3060 588,3001 596,2943 609,2886 626,2829 648,2775 676,2723 851,2824
+ 832,2861 817,2898 804,2938 795,2978 790,3019" id="p18"/>
+<pattern id="tile18" patternUnits="userSpaceOnUse"
+ x="0" y="0" width="134" height="67">
+<g stroke-width="7.5" stroke="#000000" fill="none">
+<path d="M -7,30 73,70 M 61,-3 141,37 M -7,37 73,-3 M 61,70 141,30"/>
+</g>
+</pattern>
+</defs>
+<use xlink:href="#p18" fill="#00b000"/>
+<use xlink:href="#p18" fill="url(#tile18)"
+ stroke="#000000" stroke-width="15px"/>
+<!-- Line -->
+<defs>
+<polygon points=" 851,2824 676,2723 707,2673 743,2626 783,2583 826,2543 873,2507 923,2476 1024,2651
+ 989,2673 956,2698 927,2727 898,2756 873,2789" id="p19"/>
+<pattern id="tile19" patternUnits="userSpaceOnUse"
+ x="0" y="0" width="134" height="67">
+<g stroke-width="7.5" stroke="#000000" fill="none">
+<path d="M -7,30 73,70 M 61,-3 141,37 M -7,37 73,-3 M 61,70 141,30"/>
+</g>
+</pattern>
+</defs>
+<use xlink:href="#p19" fill="#00b000"/>
+<use xlink:href="#p19" fill="url(#tile19)"
+ stroke="#000000" stroke-width="15px"/>
+<!-- Line -->
+<defs>
+<polygon points=" 1024,2651 923,2476 975,2448 1029,2426 1086,2409 1143,2396 1201,2388 1260,2386
+ 1260,2588 1219,2590 1178,2595 1138,2604 1098,2617 1061,2632" id="p20"/>
+<pattern id="tile20" patternUnits="userSpaceOnUse"
+ x="0" y="0" width="134" height="67">
+<g stroke-width="7.5" stroke="#000000" fill="none">
+<path d="M -7,30 73,70 M 61,-3 141,37 M -7,37 73,-3 M 61,70 141,30"/>
+</g>
+</pattern>
+</defs>
+<use xlink:href="#p20" fill="#00b000"/>
+<use xlink:href="#p20" fill="url(#tile20)"
+ stroke="#000000" stroke-width="15px"/>
+<!-- Line -->
+<defs>
+<polygon points=" 1260,2588 1260,2386 1319,2388 1377,2396 1434,2409 1491,2426 1545,2448 1597,2476
+ 1496,2651 1459,2632 1422,2617 1382,2604 1342,2595 1301,2590" id="p21"/>
+<pattern id="tile21" patternUnits="userSpaceOnUse"
+ x="0" y="0" width="134" height="67">
+<g stroke-width="7.5" stroke="#000000" fill="none">
+<path d="M -7,30 73,70 M 61,-3 141,37 M -7,37 73,-3 M 61,70 141,30"/>
+</g>
+</pattern>
+</defs>
+<use xlink:href="#p21" fill="#00b000"/>
+<use xlink:href="#p21" fill="url(#tile21)"
+ stroke="#000000" stroke-width="15px"/>
+<!-- Line -->
+<defs>
+<polygon points=" 1496,2651 1597,2476 1647,2507 1694,2543 1737,2583 1777,2626 1813,2673 1844,2723
+ 1669,2824 1647,2789 1622,2756 1593,2727 1564,2698 1531,2673" id="p22"/>
+<pattern id="tile22" patternUnits="userSpaceOnUse"
+ x="0" y="0" width="134" height="67">
+<g stroke-width="7.5" stroke="#000000" fill="none">
+<path d="M -7,30 73,70 M 61,-3 141,37 M -7,37 73,-3 M 61,70 141,30"/>
+</g>
+</pattern>
+</defs>
+<use xlink:href="#p22" fill="#00b000"/>
+<use xlink:href="#p22" fill="url(#tile22)"
+ stroke="#000000" stroke-width="15px"/>
+<!-- Line -->
+<defs>
+<polygon points=" 1669,2824 1844,2723 1872,2775 1894,2829 1911,2886 1924,2943 1932,3001 1934,3060
+ 1732,3060 1730,3019 1725,2978 1716,2938 1703,2898 1688,2861" id="p23"/>
+<pattern id="tile23" patternUnits="userSpaceOnUse"
+ x="0" y="0" width="134" height="67">
+<g stroke-width="7.5" stroke="#000000" fill="none">
+<path d="M -7,30 73,70 M 61,-3 141,37 M -7,37 73,-3 M 61,70 141,30"/>
+</g>
+</pattern>
+</defs>
+<use xlink:href="#p23" fill="#00b000"/>
+<use xlink:href="#p23" fill="url(#tile23)"
+ stroke="#000000" stroke-width="15px"/>
+<!-- Text -->
+<text xml:space="preserve" x="2160" y="1980" fill="#000000" font-family="AvantGarde" font-style="normal" font-weight="bold" font-size="120" text-anchor="middle">Global</text>
+<!-- Text -->
+<text xml:space="preserve" x="2160" y="2160" fill="#000000" font-family="AvantGarde" font-style="normal" font-weight="bold" font-size="120" text-anchor="middle">tasks</text>
+<!-- Text -->
+<text xml:space="preserve" x="2160" y="2340" fill="#000000" font-family="AvantGarde" font-style="normal" font-weight="bold" font-size="120" text-anchor="middle">(locked)</text>
+<!-- Text -->
+<text xml:space="preserve" x="4230" y="2250" fill="#000000" font-family="AvantGarde" font-style="normal" font-weight="bold" font-size="120" text-anchor="middle">tasks</text>
+<!-- Text -->
+<text xml:space="preserve" x="4230" y="2070" fill="#000000" font-family="AvantGarde" font-style="normal" font-weight="bold" font-size="120" text-anchor="middle">Local</text>
+<!-- Text -->
+<text xml:space="preserve" x="8550" y="2340" fill="#000000" font-family="AvantGarde" font-style="normal" font-weight="bold" font-size="120" text-anchor="middle">(locked)</text>
+<!-- Text -->
+<text xml:space="preserve" x="8550" y="2160" fill="#000000" font-family="AvantGarde" font-style="normal" font-weight="bold" font-size="120" text-anchor="middle">timers</text>
+<!-- Text -->
+<text xml:space="preserve" x="8550" y="1980" fill="#000000" font-family="AvantGarde" font-style="normal" font-weight="bold" font-size="120" text-anchor="middle">Global</text>
+<!-- Text -->
+<text xml:space="preserve" x="10530" y="2250" fill="#000000" font-family="AvantGarde" font-style="normal" font-weight="bold" font-size="120" text-anchor="middle">timers</text>
+<!-- Text -->
+<text xml:space="preserve" x="10530" y="2070" fill="#000000" font-family="AvantGarde" font-style="normal" font-weight="bold" font-size="120" text-anchor="middle">Local</text>
+<!-- Text -->
+<text xml:space="preserve" x="3105" y="1260" fill="#000000" font-family="AvantGarde" font-style="normal" font-weight="bold" font-size="120" text-anchor="middle">Local ?</text>
+<!-- Text -->
+<text xml:space="preserve" x="3375" y="1530" fill="#000000" font-family="AvantGarde" font-style="normal" font-weight="bold" font-size="108" text-anchor="start">Yes</text>
+<!-- Text -->
+<text xml:space="preserve" x="2790" y="1530" fill="#000000" font-family="AvantGarde" font-style="normal" font-weight="bold" font-size="108" text-anchor="end">No</text>
+<!-- Text -->
+<text xml:space="preserve" x="9450" y="1260" fill="#000000" font-family="AvantGarde" font-style="normal" font-weight="bold" font-size="120" text-anchor="middle">Local ?</text>
+<!-- Text -->
+<text xml:space="preserve" x="9720" y="1530" fill="#000000" font-family="AvantGarde" font-style="normal" font-weight="bold" font-size="108" text-anchor="start">Yes</text>
+<!-- Text -->
+<text xml:space="preserve" x="9135" y="1530" fill="#000000" font-family="AvantGarde" font-style="normal" font-weight="bold" font-size="108" text-anchor="end">No</text>
+<!-- Text -->
+<g transform="translate(4410,8415) rotate(-90)" >
+<text xml:space="preserve" x="0" y="0" fill="#000000" font-family="AvantGarde" font-style="normal" font-weight="bold" font-size="120" text-anchor="middle">Class?</text>
+</g><!-- Arc -->
+<defs>
+<clipPath id="cp13">
+ <path clip-rule="evenodd" d="M 237,327 H 12363 V 9618 H 237 z
+ M 11065,2294 11189,2298 11078,2353 11217,2303 11213,2283z"/>
+</clipPath>
+</defs>
+<path d="M 11205,3825 A 771 771 0 0 1 11205 2295" clip-path="url(#cp13)"
+ stroke="#000000" stroke-width="15px"/>
+<!-- Forward arrow to point 11205,2295 -->
+<polyline points=" 11065,2294 11189,2298 11078,2353"
+ stroke="#000000" stroke-width="8px" stroke-miterlimit="8"/>
+<!-- Arc -->
+<defs>
+<clipPath id="cp14">
+ <path clip-rule="evenodd" d="M 237,327 H 12363 V 9618 H 237 z
+ M 11065,2294 11189,2298 11078,2353 11217,2303 11213,2283z
+ M 11525,3826 11401,3822 11512,3767 11373,3817 11377,3837z"/>
+</clipPath>
+</defs>
+<path d="M 11385,3825 A 771 771 0 0 0 11385 2295" clip-path="url(#cp14)"
+ stroke="#000000" stroke-width="15px"/>
+<!-- Backward arrow to point 11385,3825 -->
+<polyline points=" 11525,3826 11401,3822 11512,3767"
+ stroke="#000000" stroke-width="8px" stroke-miterlimit="8"/>
+<!-- Arc -->
+<defs>
+<clipPath id="cp15">
+ <path clip-rule="evenodd" d="M 237,327 H 12363 V 9618 H 237 z
+ M 11065,2294 11189,2298 11078,2353 11217,2303 11213,2283z
+ M 10908,3277 10928,3240 10965,3260 10897,3092 10877,3098z"/>
+</clipPath>
+</defs>
+<path d="M 10890,3105 A 406 406 0 1 0 10890 3015" clip-path="url(#cp15)"
+ stroke="#000000" stroke-width="15px"/>
+<!-- Backward arrow to point 10890,3105 -->
+<polygon points=" 10965,3260 10895,3124 10908,3277 10928,3240 10965,3260"
+ stroke="#000000" stroke-width="8px" stroke-miterlimit="8" fill="#000000"/>
+<!-- Arc -->
+<defs>
+<clipPath id="cp16">
+ <path clip-rule="evenodd" d="M 237,327 H 12363 V 9618 H 237 z
+ M 7375,2294 7499,2298 7388,2353 7527,2303 7523,2283z
+ M 10908,3277 10928,3240 10965,3260 10897,3092 10877,3098z"/>
+</clipPath>
+</defs>
+<path d="M 7515,3825 A 771 771 0 0 1 7515 2295" clip-path="url(#cp16)"
+ stroke="#000000" stroke-width="15px"/>
+<!-- Forward arrow to point 7515,2295 -->
+<polyline points=" 7375,2294 7499,2298 7388,2353"
+ stroke="#000000" stroke-width="8px" stroke-miterlimit="8"/>
+<!-- Arc -->
+<defs>
+<clipPath id="cp17">
+ <path clip-rule="evenodd" d="M 237,327 H 12363 V 9618 H 237 z
+ M 7375,2294 7499,2298 7388,2353 7527,2303 7523,2283z
+ M 7835,3826 7711,3822 7822,3767 7683,3817 7687,3837z"/>
+</clipPath>
+</defs>
+<path d="M 7695,3825 A 771 771 0 0 0 7695 2295" clip-path="url(#cp17)"
+ stroke="#000000" stroke-width="15px"/>
+<!-- Backward arrow to point 7695,3825 -->
+<polyline points=" 7835,3826 7711,3822 7822,3767"
+ stroke="#000000" stroke-width="8px" stroke-miterlimit="8"/>
+<!-- Arc -->
+<defs>
+<clipPath id="cp18">
+ <path clip-rule="evenodd" d="M 237,327 H 12363 V 9618 H 237 z
+ M 7375,2294 7499,2298 7388,2353 7527,2303 7523,2283z
+ M 7218,3277 7238,3240 7275,3260 7207,3092 7187,3098z"/>
+</clipPath>
+</defs>
+<path d="M 7200,3105 A 406 406 0 1 0 7200 3015" clip-path="url(#cp18)"
+ stroke="#000000" stroke-width="15px"/>
+<!-- Backward arrow to point 7200,3105 -->
+<polygon points=" 7275,3260 7205,3124 7218,3277 7238,3240 7275,3260"
+ stroke="#000000" stroke-width="8px" stroke-miterlimit="8" fill="#000000"/>
+<!-- Arc -->
+<defs>
+<clipPath id="cp19">
+ <path clip-rule="evenodd" d="M 237,327 H 12363 V 9618 H 237 z
+ M 4720,2294 4844,2298 4733,2353 4872,2303 4868,2283z
+ M 7218,3277 7238,3240 7275,3260 7207,3092 7187,3098z"/>
+</clipPath>
+</defs>
+<path d="M 4860,3825 A 771 771 0 0 1 4860 2295" clip-path="url(#cp19)"
+ stroke="#000000" stroke-width="15px"/>
+<!-- Forward arrow to point 4860,2295 -->
+<polyline points=" 4720,2294 4844,2298 4733,2353"
+ stroke="#000000" stroke-width="8px" stroke-miterlimit="8"/>
+<!-- Arc -->
+<defs>
+<clipPath id="cp20">
+ <path clip-rule="evenodd" d="M 237,327 H 12363 V 9618 H 237 z
+ M 4720,2294 4844,2298 4733,2353 4872,2303 4868,2283z
+ M 5180,3826 5056,3822 5167,3767 5028,3817 5032,3837z"/>
+</clipPath>
+</defs>
+<path d="M 5040,3825 A 771 771 0 0 0 5040 2295" clip-path="url(#cp20)"
+ stroke="#000000" stroke-width="15px"/>
+<!-- Backward arrow to point 5040,3825 -->
+<polyline points=" 5180,3826 5056,3822 5167,3767"
+ stroke="#000000" stroke-width="8px" stroke-miterlimit="8"/>
+<!-- Arc -->
+<defs>
+<clipPath id="cp21">
+ <path clip-rule="evenodd" d="M 237,327 H 12363 V 9618 H 237 z
+ M 4720,2294 4844,2298 4733,2353 4872,2303 4868,2283z
+ M 4563,3277 4583,3240 4620,3260 4552,3092 4532,3098z"/>
+</clipPath>
+</defs>
+<path d="M 4545,3105 A 406 406 0 1 0 4545 3015" clip-path="url(#cp21)"
+ stroke="#000000" stroke-width="15px"/>
+<!-- Backward arrow to point 4545,3105 -->
+<polygon points=" 4620,3260 4550,3124 4563,3277 4583,3240 4620,3260"
+ stroke="#000000" stroke-width="8px" stroke-miterlimit="8" fill="#000000"/>
+<!-- Arc -->
+<defs>
+<clipPath id="cp22">
+ <path clip-rule="evenodd" d="M 237,327 H 12363 V 9618 H 237 z
+ M 1030,2294 1154,2298 1043,2353 1182,2303 1178,2283z
+ M 4563,3277 4583,3240 4620,3260 4552,3092 4532,3098z"/>
+</clipPath>
+</defs>
+<path d="M 1170,3825 A 771 771 0 0 1 1170 2295" clip-path="url(#cp22)"
+ stroke="#000000" stroke-width="15px"/>
+<!-- Forward arrow to point 1170,2295 -->
+<polyline points=" 1030,2294 1154,2298 1043,2353"
+ stroke="#000000" stroke-width="8px" stroke-miterlimit="8"/>
+<!-- Arc -->
+<defs>
+<clipPath id="cp23">
+ <path clip-rule="evenodd" d="M 237,327 H 12363 V 9618 H 237 z
+ M 1030,2294 1154,2298 1043,2353 1182,2303 1178,2283z
+ M 1490,3826 1366,3822 1477,3767 1338,3817 1342,3837z"/>
+</clipPath>
+</defs>
+<path d="M 1350,3825 A 771 771 0 0 0 1350 2295" clip-path="url(#cp23)"
+ stroke="#000000" stroke-width="15px"/>
+<!-- Backward arrow to point 1350,3825 -->
+<polyline points=" 1490,3826 1366,3822 1477,3767"
+ stroke="#000000" stroke-width="8px" stroke-miterlimit="8"/>
+<!-- Arc -->
+<defs>
+<clipPath id="cp24">
+ <path clip-rule="evenodd" d="M 237,327 H 12363 V 9618 H 237 z
+ M 1030,2294 1154,2298 1043,2353 1182,2303 1178,2283z
+ M 873,3277 893,3240 930,3260 862,3092 842,3098z"/>
+</clipPath>
+</defs>
+<path d="M 855,3105 A 406 406 0 1 0 855 3015" clip-path="url(#cp24)"
+ stroke="#000000" stroke-width="15px"/>
+<!-- Backward arrow to point 855,3105 -->
+<polygon points=" 930,3260 860,3124 873,3277 893,3240 930,3260"
+ stroke="#000000" stroke-width="8px" stroke-miterlimit="8" fill="#000000"/>
+<!-- Line -->
+<defs>
+<clipPath id="cp25">
+ <path clip-rule="evenodd" d="M 237,327 H 12363 V 9618 H 237 z
+ M 9315,4660 9270,4705 9225,4660 9252,4923 9288,4923z
+ M 873,3277 893,3240 930,3260 862,3092 842,3098z"/>
+</clipPath>
+</defs>
+<polyline points=" 7605,3870 7605,4185 9270,4545 9270,4905" clip-path="url(#cp25)"
+ stroke="#000000" stroke-width="30px" stroke-linejoin="round"/>
+<!-- Forward arrow to point 9270,4905 -->
+<polygon points=" 9225,4660 9270,4885 9315,4660 9270,4705 9225,4660"
+ stroke="#000000" stroke-width="8px" stroke-miterlimit="8" fill="#000000"/>
+<!-- Line -->
+<defs>
+<clipPath id="cp26">
+ <path clip-rule="evenodd" d="M 237,327 H 12363 V 9618 H 237 z
+ M 9681,4660 9636,4705 9591,4660 9618,4923 9654,4923z
+ M 873,3277 893,3240 930,3260 862,3092 842,3098z"/>
+</clipPath>
+</defs>
+<polyline points=" 11301,3870 11301,4185 9636,4545 9636,4905" clip-path="url(#cp26)"
+ stroke="#000000" stroke-width="30px" stroke-linejoin="round"/>
+<!-- Forward arrow to point 9636,4905 -->
+<polygon points=" 9591,4660 9636,4885 9681,4660 9636,4705 9591,4660"
+ stroke="#000000" stroke-width="8px" stroke-miterlimit="8" fill="#000000"/>
+<!-- Line -->
+<defs>
+<clipPath id="cp27">
+ <path clip-rule="evenodd" d="M 237,327 H 12363 V 9618 H 237 z
+ M 11338,2050 11293,2095 11248,2051 11277,2313 11313,2313z
+ M 873,3277 893,3240 930,3260 862,3092 842,3098z"/>
+</clipPath>
+</defs>
+<polyline points=" 9630,1395 9626,1591 11291,1800 11295,2295" clip-path="url(#cp27)"
+ stroke="#000000" stroke-width="30px" stroke-linejoin="round"/>
+<!-- Forward arrow to point 11295,2295 -->
+<polygon points=" 11248,2051 11295,2275 11338,2050 11293,2095 11248,2051"
+ stroke="#000000" stroke-width="8px" stroke-miterlimit="8" fill="#000000"/>
+<!-- Line -->
+<defs>
+<clipPath id="cp28">
+ <path clip-rule="evenodd" d="M 237,327 H 12363 V 9618 H 237 z
+ M 7650,2050 7605,2095 7560,2050 7587,2313 7623,2313z
+ M 873,3277 893,3240 930,3260 862,3092 842,3098z"/>
+</clipPath>
+</defs>
+<polyline points=" 9270,1395 9270,1575 7605,1800 7605,2295" clip-path="url(#cp28)"
+ stroke="#000000" stroke-width="30px" stroke-linejoin="round"/>
+<!-- Forward arrow to point 7605,2295 -->
+<polygon points=" 7560,2050 7605,2275 7650,2050 7605,2095 7560,2050"
+ stroke="#000000" stroke-width="8px" stroke-miterlimit="8" fill="#000000"/>
+<!-- Line -->
+<defs>
+<clipPath id="cp29">
+ <path clip-rule="evenodd" d="M 237,327 H 12363 V 9618 H 237 z
+ M 9495,790 9450,835 9405,790 9432,1053 9468,1053z
+ M 873,3277 893,3240 930,3260 862,3092 842,3098z"/>
+</clipPath>
+</defs>
+<polyline points=" 9450,360 9450,1035" clip-path="url(#cp29)"
+ stroke="#000000" stroke-width="30px" stroke-linejoin="round"/>
+<!-- Forward arrow to point 9450,1035 -->
+<polygon points=" 9405,790 9450,1015 9495,790 9450,835 9405,790"
+ stroke="#000000" stroke-width="8px" stroke-miterlimit="8" fill="#000000"/>
+<!-- Line -->
+<defs>
+<clipPath id="cp30">
+ <path clip-rule="evenodd" d="M 237,327 H 12363 V 9618 H 237 z
+ M 2970,4660 2925,4705 2880,4660 2907,4923 2943,4923z
+ M 873,3277 893,3240 930,3260 862,3092 842,3098z"/>
+</clipPath>
+</defs>
+<polyline points=" 1260,3870 1260,4185 2925,4545 2925,4905" clip-path="url(#cp30)"
+ stroke="#000000" stroke-width="30px" stroke-linejoin="round"/>
+<!-- Forward arrow to point 2925,4905 -->
+<polygon points=" 2880,4660 2925,4885 2970,4660 2925,4705 2880,4660"
+ stroke="#000000" stroke-width="8px" stroke-miterlimit="8" fill="#000000"/>
+<!-- Line -->
+<defs>
+<clipPath id="cp31">
+ <path clip-rule="evenodd" d="M 237,327 H 12363 V 9618 H 237 z
+ M 3336,4660 3291,4705 3246,4660 3273,4923 3309,4923z
+ M 873,3277 893,3240 930,3260 862,3092 842,3098z"/>
+</clipPath>
+</defs>
+<polyline points=" 4956,3870 4956,4185 3291,4545 3291,4905" clip-path="url(#cp31)"
+ stroke="#000000" stroke-width="30px" stroke-linejoin="round"/>
+<!-- Forward arrow to point 3291,4905 -->
+<polygon points=" 3246,4660 3291,4885 3336,4660 3291,4705 3246,4660"
+ stroke="#000000" stroke-width="8px" stroke-miterlimit="8" fill="#000000"/>
+<!-- Line -->
+<defs>
+<clipPath id="cp32">
+ <path clip-rule="evenodd" d="M 237,327 H 12363 V 9618 H 237 z
+ M 3150,790 3105,835 3060,790 3087,1053 3123,1053z
+ M 873,3277 893,3240 930,3260 862,3092 842,3098z"/>
+</clipPath>
+</defs>
+<polyline points=" 3105,360 3105,1035" clip-path="url(#cp32)"
+ stroke="#000000" stroke-width="30px" stroke-linejoin="round"/>
+<!-- Forward arrow to point 3105,1035 -->
+<polygon points=" 3060,790 3105,1015 3150,790 3105,835 3060,790"
+ stroke="#000000" stroke-width="8px" stroke-miterlimit="8" fill="#000000"/>
+<!-- Line -->
+<defs>
+<clipPath id="cp33">
+ <path clip-rule="evenodd" d="M 237,327 H 12363 V 9618 H 237 z
+ M 4995,2140 4950,2185 4905,2140 4932,2403 4968,2403z
+ M 873,3277 893,3240 930,3260 862,3092 842,3098z"/>
+</clipPath>
+</defs>
+<polyline points=" 3285,1395 3285,1575 4950,1845 4950,2385" clip-path="url(#cp33)"
+ stroke="#000000" stroke-width="30px" stroke-linejoin="round"/>
+<!-- Forward arrow to point 4950,2385 -->
+<polygon points=" 4905,2140 4950,2365 4995,2140 4950,2185 4905,2140"
+ stroke="#000000" stroke-width="8px" stroke-miterlimit="8" fill="#000000"/>
+<!-- Line -->
+<defs>
+<clipPath id="cp34">
+ <path clip-rule="evenodd" d="M 237,327 H 12363 V 9618 H 237 z
+ M 1305,2140 1260,2185 1215,2140 1242,2403 1278,2403z
+ M 873,3277 893,3240 930,3260 862,3092 842,3098z"/>
+</clipPath>
+</defs>
+<polyline points=" 2925,1395 2925,1575 1260,1845 1260,2385" clip-path="url(#cp34)"
+ stroke="#000000" stroke-width="30px" stroke-linejoin="round"/>
+<!-- Forward arrow to point 1260,2385 -->
+<polygon points=" 1215,2140 1260,2365 1305,2140 1260,2185 1215,2140"
+ stroke="#000000" stroke-width="8px" stroke-miterlimit="8" fill="#000000"/>
+<!-- Line -->
+<defs>
+<clipPath id="cp35">
+ <path clip-rule="evenodd" d="M 237,327 H 12363 V 9618 H 237 z
+ M 2005,7605 2050,7650 2005,7695 2268,7668 2268,7632z
+ M 873,3277 893,3240 930,3260 862,3092 842,3098z"/>
+</clipPath>
+</defs>
+<polyline points=" 315,7650 2250,7650" clip-path="url(#cp35)"
+ stroke="#000000" stroke-width="30px" stroke-linejoin="round"/>
+<!-- Forward arrow to point 2250,7650 -->
+<polygon points=" 2005,7695 2230,7650 2005,7605 2050,7650 2005,7695"
+ stroke="#000000" stroke-width="8px" stroke-miterlimit="8" fill="#000000"/>
+<!-- Line -->
+<defs>
+<clipPath id="cp36">
+ <path clip-rule="evenodd" d="M 237,327 H 12363 V 9618 H 237 z
+ M 6955,6435 7000,6480 6955,6525 7218,6498 7218,6462z
+ M 873,3277 893,3240 930,3260 862,3092 842,3098z"/>
+</clipPath>
+</defs>
+<polyline points=" 6075,6480 7200,6480" clip-path="url(#cp36)"
+ stroke="#000000" stroke-width="30px" stroke-linejoin="round"/>
+<!-- Forward arrow to point 7200,6480 -->
+<polygon points=" 6955,6525 7180,6480 6955,6435 7000,6480 6955,6525"
+ stroke="#000000" stroke-width="8px" stroke-miterlimit="8" fill="#000000"/>
+<!-- Line -->
+<defs>
+<clipPath id="cp37">
+ <path clip-rule="evenodd" d="M 237,327 H 12363 V 9618 H 237 z
+ M 3895,8370 3940,8415 3895,8460 4158,8433 4158,8397z
+ M 873,3277 893,3240 930,3260 862,3092 842,3098z"/>
+</clipPath>
+</defs>
+<polyline points=" 2610,7830 3195,8415 4140,8415" clip-path="url(#cp37)"
+ stroke="#000000" stroke-width="30px" stroke-linejoin="round"/>
+<!-- Forward arrow to point 4140,8415 -->
+<polygon points=" 3895,8460 4120,8415 3895,8370 3940,8415 3895,8460"
+ stroke="#000000" stroke-width="8px" stroke-miterlimit="8" fill="#000000"/>
+<!-- Text -->
+<g transform="translate(12240,3060) rotate(-90)" >
+<text xml:space="preserve" x="0" y="0" fill="#000000" font-family="AvantGarde" font-style="normal" font-weight="normal" font-size="108" text-anchor="middle">past</text>
+</g><!-- Text -->
+<g transform="translate(10440,3060) rotate(-90)" >
+<text xml:space="preserve" x="0" y="0" fill="#000000" font-family="AvantGarde" font-style="normal" font-weight="normal" font-size="108" text-anchor="middle">future</text>
+</g><!-- Text -->
+<g transform="translate(8550,3060) rotate(-90)" >
+<text xml:space="preserve" x="0" y="0" fill="#000000" font-family="AvantGarde" font-style="normal" font-weight="normal" font-size="108" text-anchor="middle">past</text>
+</g><!-- Text -->
+<g transform="translate(6750,3060) rotate(-90)" >
+<text xml:space="preserve" x="0" y="0" fill="#000000" font-family="AvantGarde" font-style="normal" font-weight="normal" font-size="108" text-anchor="middle">future</text>
+</g><!-- Text -->
+<text xml:space="preserve" x="9450" y="5130" fill="#000000" font-family="AvantGarde" font-style="normal" font-weight="bold" font-size="120" text-anchor="middle">Oldest</text>
+<!-- Text -->
+<g transform="translate(405,3060) rotate(-90)" >
+<text xml:space="preserve" x="0" y="0" fill="#000000" font-family="AvantGarde" font-style="normal" font-weight="normal" font-size="108" text-anchor="middle">newest</text>
+</g><!-- Text -->
+<g transform="translate(2205,3060) rotate(-90)" >
+<text xml:space="preserve" x="0" y="0" fill="#000000" font-family="AvantGarde" font-style="normal" font-weight="normal" font-size="108" text-anchor="middle">oldest</text>
+</g><!-- Text -->
+<g transform="translate(4095,3060) rotate(-90)" >
+<text xml:space="preserve" x="0" y="0" fill="#000000" font-family="AvantGarde" font-style="normal" font-weight="normal" font-size="108" text-anchor="middle">newest</text>
+</g><!-- Text -->
+<g transform="translate(5895,3060) rotate(-90)" >
+<text xml:space="preserve" x="0" y="0" fill="#000000" font-family="AvantGarde" font-style="normal" font-weight="normal" font-size="108" text-anchor="middle">oldest</text>
+</g><!-- Text -->
+<text xml:space="preserve" x="9135" y="5850" fill="#000000" font-family="Courier" font-style="normal" font-weight="bold" font-size="120" text-anchor="start">runqueue-depth</text>
+<!-- Text -->
+<text xml:space="preserve" x="3195" y="5715" fill="#000000" font-family="Courier" font-style="normal" font-weight="bold" font-size="120" text-anchor="start">runqueue-depth</text>
+<!-- Text -->
+<text xml:space="preserve" x="9450" y="3600" fill="#000000" font-family="AvantGarde" font-style="normal" font-weight="bold" font-size="144" text-anchor="middle">Time-based</text>
+<!-- Text -->
+<text xml:space="preserve" x="9450" y="3780" fill="#000000" font-family="AvantGarde" font-style="normal" font-weight="bold" font-size="144" text-anchor="middle">Wait queues</text>
+<!-- Text -->
+<text xml:space="preserve" x="9000" y="4005" fill="#000000" font-family="AvantGarde" font-style="normal" font-weight="bold" font-size="144" text-anchor="start">- 1 global</text>
+<!-- Text -->
+<text xml:space="preserve" x="9000" y="4185" fill="#000000" font-family="AvantGarde" font-style="normal" font-weight="bold" font-size="144" text-anchor="start">- 1 per thread</text>
+<!-- Text -->
+<text xml:space="preserve" x="3105" y="3600" fill="#000000" font-family="AvantGarde" font-style="normal" font-weight="bold" font-size="144" text-anchor="middle">Priority-based</text>
+<!-- Text -->
+<text xml:space="preserve" x="3105" y="3780" fill="#000000" font-family="AvantGarde" font-style="normal" font-weight="bold" font-size="144" text-anchor="middle">Run queues</text>
+<!-- Text -->
+<text xml:space="preserve" x="2655" y="4005" fill="#000000" font-family="AvantGarde" font-style="normal" font-weight="bold" font-size="144" text-anchor="start">- 1 global</text>
+<!-- Text -->
+<text xml:space="preserve" x="2655" y="4185" fill="#000000" font-family="AvantGarde" font-style="normal" font-weight="bold" font-size="144" text-anchor="start">- 1 per thread</text>
+<!-- Text -->
+<text xml:space="preserve" x="3240" y="585" fill="#000000" font-family="Courier" font-style="normal" font-weight="bold" font-size="120" text-anchor="start">task_wakeup()</text>
+<!-- Text -->
+<text xml:space="preserve" x="9585" y="630" fill="#000000" font-family="Courier" font-style="normal" font-weight="bold" font-size="120" text-anchor="start">task_schedule()</text>
+<!-- Text -->
+<text xml:space="preserve" x="9585" y="450" fill="#000000" font-family="Courier" font-style="normal" font-weight="bold" font-size="120" text-anchor="start">task_queue()</text>
+<!-- Text -->
+<text xml:space="preserve" x="315" y="7560" fill="#000000" font-family="Courier" font-style="normal" font-weight="bold" font-size="120" text-anchor="start">tasklet_wakeup()</text>
+<!-- Text -->
+<text xml:space="preserve" x="12285" y="7515" fill="#000000" font-family="Courier" font-style="normal" font-weight="bold" font-size="120" text-anchor="end">t-&gt;process()</text>
+<!-- Text -->
+<text xml:space="preserve" x="12285" y="7335" fill="#ff0000" font-family="AvantGarde" font-style="normal" font-weight="bold" font-size="144" text-anchor="end">Run!</text>
+<!-- Text -->
+<text xml:space="preserve" x="10035" y="7515" fill="#000000" font-family="AvantGarde" font-style="normal" font-weight="bold" font-size="108" text-anchor="start">37%</text>
+<!-- Text -->
+<text xml:space="preserve" x="10080" y="8955" fill="#000000" font-family="AvantGarde" font-style="normal" font-weight="bold" font-size="108" text-anchor="start">=1</text>
+<!-- Text -->
+<text xml:space="preserve" x="5085" y="6255" fill="#000000" font-family="AvantGarde" font-style="normal" font-weight="normal" font-size="120" text-anchor="middle">(accessed using atomic ops)</text>
+<!-- Text -->
+<text xml:space="preserve" x="10035" y="6795" fill="#000000" font-family="AvantGarde" font-style="normal" font-weight="bold" font-size="108" text-anchor="start">50%</text>
+<!-- Text -->
+<text xml:space="preserve" x="10035" y="8235" fill="#000000" font-family="AvantGarde" font-style="normal" font-weight="bold" font-size="108" text-anchor="start">13%</text>
+<!-- Text -->
+<g transform="translate(2745,8100) rotate(-90)" >
+<text xml:space="preserve" x="0" y="0" fill="#000000" font-family="AvantGarde" font-style="normal" font-weight="bold" font-size="108" text-anchor="end">Yes</text>
+</g><!-- Text -->
+<g transform="translate(2520,7650) rotate(-90)" >
+<text xml:space="preserve" x="0" y="0" fill="#000000" font-family="AvantGarde" font-style="normal" font-weight="bold" font-size="120" text-anchor="middle">Local ?</text>
+</g><!-- Text -->
+<g transform="translate(2700,7110) rotate(-90)" >
+<text xml:space="preserve" x="0" y="0" fill="#000000" font-family="AvantGarde" font-style="normal" font-weight="bold" font-size="108" text-anchor="start">No</text>
+</g><!-- Text -->
+<text xml:space="preserve" x="4725" y="8460" fill="#000000" font-family="Courier" font-style="normal" font-weight="bold" font-size="120" text-anchor="start">TASK_SELF_WAKING</text>
+<!-- Text -->
+<text xml:space="preserve" x="4725" y="8820" fill="#000000" font-family="Courier" font-style="normal" font-weight="bold" font-size="120" text-anchor="start">TASK_HEAVY</text>
+<!-- Text -->
+<text xml:space="preserve" x="4725" y="8010" fill="#000000" font-family="AvantGarde" font-style="normal" font-weight="normal" font-size="120" text-anchor="start">(default)</text>
+<!-- Text -->
+<text xml:space="preserve" x="4725" y="7650" fill="#000000" font-family="AvantGarde" font-style="normal" font-weight="normal" font-size="120" text-anchor="start">In I/O or signals</text>
+<!-- Text -->
+<g transform="translate(10815,7695) rotate(-90)" >
+<text xml:space="preserve" x="0" y="0" fill="#000000" font-family="AvantGarde" font-style="normal" font-weight="bold" font-size="120" text-anchor="middle">Most Urgent</text>
+</g><!-- Text -->
+<text xml:space="preserve" x="9990" y="6480" fill="#ff0000" font-family="AvantGarde" font-style="normal" font-weight="bold" font-size="120" text-anchor="start">order</text>
+<!-- Text -->
+<text xml:space="preserve" x="9990" y="6300" fill="#ff0000" font-family="AvantGarde" font-style="normal" font-weight="bold" font-size="120" text-anchor="start">Scan</text>
+<!-- Text -->
+<text xml:space="preserve" x="6030" y="9450" fill="#000000" font-family="AvantGarde" font-style="normal" font-weight="bold" font-size="144" text-anchor="middle">5 class-based tasklet queues per thread (one accessible from remote threads)</text>
+<!-- Line -->
+<polygon points=" 7234,6838 9776,6838 9776,6398 7234,6398" fill="#dae8fc"/>
+<!-- Line -->
+<polygon points=" 7234,7558 9776,7558 9776,7118 7234,7118" fill="#dae8fc"/>
+<!-- Line -->
+<polygon points=" 7234,8278 9776,8278 9776,7838 7234,7838" fill="#dae8fc"/>
+<!-- Line -->
+<polygon points=" 7234,8998 9776,8998 9776,8558 7234,8558" fill="#dae8fc"/>
+<!-- Line -->
+<defs>
+<polygon points=" 4166,6838 6094,6838 6094,6398 4166,6398" id="p24"/>
+<pattern id="tile24" patternUnits="userSpaceOnUse"
+ x="0" y="0" width="134" height="67">
+<g stroke-width="7.5" stroke="#a7ceb3" fill="none">
+<path d="M -7,30 73,70 M 61,-3 141,37 M -7,37 73,-3 M 61,70 141,30"/>
+</g>
+</pattern>
+</defs>
+<use xlink:href="#p24" fill="#bbf2e2"/>
+<use xlink:href="#p24" fill="url(#tile24)"/>
+<!-- Line -->
+<polyline points=" 7234,6398 9776,6398 9776,6838 7234,6838"
+ stroke="#458dba" stroke-width="45px"/>
+<!-- Line -->
+<polyline points=" 7234,7118 9776,7118 9776,7558 7234,7558"
+ stroke="#458dba" stroke-width="45px"/>
+<!-- Line -->
+<polyline points=" 7234,8558 9776,8558 9776,8998 7234,8998"
+ stroke="#458dba" stroke-width="45px"/>
+<!-- Line -->
+<polyline points=" 4166,6398 6094,6398 6094,6838 4166,6838"
+ stroke="#868286" stroke-width="45px"/>
+<!-- Line -->
+<polyline points=" 7234,7838 9776,7838 9776,8278 7234,8278"
+ stroke="#458dba" stroke-width="45px"/>
+<!-- Line -->
+<polyline points=" 9613,6398 9613,6838"
+ stroke="#458dba" stroke-width="15px"/>
+<!-- Line -->
+<polyline points=" 9438,6398 9438,6838"
+ stroke="#458dba" stroke-width="15px"/>
+<!-- Line -->
+<polyline points=" 9264,6398 9264,6838"
+ stroke="#458dba" stroke-width="15px"/>
+<!-- Line -->
+<polyline points=" 9613,7118 9613,7558"
+ stroke="#458dba" stroke-width="15px"/>
+<!-- Line -->
+<polyline points=" 9438,7118 9438,7558"
+ stroke="#458dba" stroke-width="15px"/>
+<!-- Line -->
+<polyline points=" 9264,7118 9264,7558"
+ stroke="#458dba" stroke-width="15px"/>
+<!-- Line -->
+<polyline points=" 9613,7838 9613,8278"
+ stroke="#458dba" stroke-width="15px"/>
+<!-- Line -->
+<polyline points=" 9438,7838 9438,8278"
+ stroke="#458dba" stroke-width="15px"/>
+<!-- Line -->
+<polyline points=" 9264,7838 9264,8278"
+ stroke="#458dba" stroke-width="15px"/>
+<!-- Line -->
+<polyline points=" 9613,8558 9613,8998"
+ stroke="#458dba" stroke-width="15px"/>
+<!-- Line -->
+<polyline points=" 9438,8558 9438,8998"
+ stroke="#458dba" stroke-width="15px"/>
+<!-- Line -->
+<polyline points=" 9264,8558 9264,8998"
+ stroke="#458dba" stroke-width="15px"/>
+<!-- Line -->
+<polyline points=" 5923,6398 5923,6838"
+ stroke="#868286" stroke-width="15px"/>
+<!-- Line -->
+<polyline points=" 5748,6398 5748,6838"
+ stroke="#868286" stroke-width="15px"/>
+<!-- Line -->
+<polyline points=" 5574,6398 5574,6838"
+ stroke="#868286" stroke-width="15px"/>
+<!-- Text -->
+<text xml:space="preserve" x="8460" y="6705" fill="#000000" font-family="AvantGarde" font-style="normal" font-weight="normal" font-size="192" text-anchor="middle">TL_URGENT</text>
+<!-- Text -->
+<text xml:space="preserve" x="8460" y="7425" fill="#000000" font-family="AvantGarde" font-style="normal" font-weight="normal" font-size="192" text-anchor="middle">TL_NORMAL</text>
+<!-- Text -->
+<text xml:space="preserve" x="8460" y="8145" fill="#000000" font-family="AvantGarde" font-style="normal" font-weight="normal" font-size="192" text-anchor="middle">TL_BULK</text>
+<!-- Text -->
+<text xml:space="preserve" x="8460" y="8865" fill="#000000" font-family="AvantGarde" font-style="normal" font-weight="normal" font-size="192" text-anchor="middle">TL_HEAVY</text>
+<!-- Text -->
+<text xml:space="preserve" x="4950" y="6705" fill="#000000" font-family="AvantGarde" font-style="normal" font-weight="normal" font-size="192" text-anchor="middle">SHARED</text>
+</g>
+</svg>
diff --git a/doc/internals/ssl_cert.dia b/doc/internals/ssl_cert.dia
new file mode 100644
index 0000000..52496a1
--- /dev/null
+++ b/doc/internals/ssl_cert.dia
Binary files differ
diff --git a/doc/internals/stats-v2.txt b/doc/internals/stats-v2.txt
new file mode 100644
index 0000000..7d2ae76
--- /dev/null
+++ b/doc/internals/stats-v2.txt
@@ -0,0 +1,8 @@
+
+ Qcur Qmax Scur Smax Slim Scum Fin Fout Bin Bout Ereq Econ Ersp Sts Wght Act Bck EChk Down
+Frontend - - X maxX Y totX I O I O Q - - - - - - - -
+Server X maxX X maxX Y totX I O I O - C R S W A B E D
+Server X maxX X maxX Y totX I O I O - C R S W A B E D
+Server X maxX X maxX Y totX I O I O - C R S W A B E D
+Backend X maxX X maxX Y totX I O I O - C R S totW totA totB totE totD
+
diff --git a/doc/internals/stconn-close.txt b/doc/internals/stconn-close.txt
new file mode 100644
index 0000000..fe1ddca
--- /dev/null
+++ b/doc/internals/stconn-close.txt
@@ -0,0 +1,74 @@
+2023-05-23 - closing states on the stream endpoint descriptor
+
+This document deals with the current flags on the SE desc:
+
+ - SE_FL_ERR_PENDING: an error was met while sending, but some incoming data
+ might still be pending. This flag will be promoted to SE_FL_ERROR when the
+ SE_FL_EOI or SE_FL_EOS flags are set via the standard API (se_fl_set()).
+
+ - SE_FL_ERROR ("ERR"): an error was met, last data were received if any, and no
+ more progress will happen.
+
+ - SE_FL_EOI ("EOI"): the end of the input message was seen, without implying
+ an end of the connection nor the end of event reporting for this stream. For
+ example and end of HTTP request or response will set EOI, after which it's
+ still possible (in case of a request) to bring an abort or error. Said
+ differently, the expected end of the message was seen.
+
+ - SE_FL_EOS ("EOS"): the definitive end of the input data was detected. It may
+ result from an error, an abort, a connection shutdown, and no more receive
+ events will be reported.
+
+The different muxes (H1,H2,H3) can face slightly different situations due to
+the nature, properties, and limitations of their underlying protocols, and will
+set these 3 flags to best translate the lower layer's situation and report it
+to the upper layer:
+
+ +-----------+-----------------------------------------------------------------
+ |ERR EOS EOI| Description per mux
+ +-----------+-----------------------------------------------------------------
+ | 0 0 0 | all: transfer still in progress
+ +-----------+-----------------------------------------------------------------
+ | 0 0 1 | H1: end of message reached.
+ | | H2: "ES" flag seen on a frame.
+ | | H3: not set
+ +-----------+-----------------------------------------------------------------
+ | 0 1 0 | H1: not set (*1)
+ | | H2: not set (*2)
+ | | H3: RST received before FIN (client stops uploading)
+ +-----------+-----------------------------------------------------------------
+ | 0 1 1 | H1: end of message + read0, such as close response or aborted
+ | | request
+ | | H2: not set (*2)
+ | | H3: end of message reached (any subsequent RSTs are ignored)
+ +-----------+-----------------------------------------------------------------
+ | 1 0 0 | all: could be used to report a protocol error (ex: invalid chunk
+ | | encoding, forbidden response header seen from a server).
+ +-----------+-----------------------------------------------------------------
+ | 1 0 1 | all: could be used to report an internal error or a downstream
+ | | protocol error, such as a forbidden header in an HTX block
+ | | coming from the stream layer or the impossibility to encode
+ | | a message. Seems unused right now.
+ +-----------+-----------------------------------------------------------------
+ | 1 1 0 | H1: truncated client input data before response, or truncated
+ | | response from the server
+ | | H2: RST or read0 received before end of input message
+ | | H3: RST + STOP_SENDING before FIN
+ +-----------+-----------------------------------------------------------------
+ | 1 1 1 | H1: error face while sending after end of input message
+ | | H2: RST or read0 received after end of input message
+ | | H3: STOP_SENDING received after a frame with FIN
+ +-----------+-----------------------------------------------------------------
+
+*1: EOS alone is currently not set by H1, however this situation could best
+ describe an H1 upload that was interrupted by the client while receiving
+ an early response, a reused persistent server connection that delivered a
+ read0 immediately after the request was sent, or a truncated server
+ response (or possibly one in close mode when no C-L was advertised). Right
+ now these situations are always accompanied with an ERR flag in addition to
+ the EOS one.
+
+*2: H2 doesn't set EOS without ERR because currently the only ways to close a
+ stream in H2 are by resetting the stream (which conveys an error) or
+ closing the connection (which renders it unusable in both directions and
+ prevents from sending as well).
diff --git a/doc/internals/stream-sock-states.fig b/doc/internals/stream-sock-states.fig
new file mode 100644
index 0000000..79131e5
--- /dev/null
+++ b/doc/internals/stream-sock-states.fig
@@ -0,0 +1,535 @@
+#FIG 3.2 Produced by xfig version 2.0
+Portrait
+Center
+Metric
+A4
+100.00
+Single
+-2
+1200 2
+0 32 #8e8e8e
+6 2295 1260 2430 1395
+1 4 0 1 0 7 50 -1 -1 0.000 1 0.0000 2363 1328 68 68 2430 1328 2295 1328
+4 1 0 50 -1 18 5 0.0000 4 60 60 2363 1361 1\001
+-6
+6 1845 2295 1980 2430
+1 4 0 1 0 7 50 -1 -1 0.000 1 0.0000 1913 2363 68 68 1980 2363 1845 2363
+4 1 0 50 -1 18 5 0.0000 4 60 60 1913 2396 2\001
+-6
+6 2475 2340 2610 2475
+1 4 0 1 0 7 50 -1 -1 0.000 1 0.0000 2543 2408 68 68 2610 2408 2475 2408
+4 1 0 50 -1 18 5 0.0000 4 60 60 2543 2441 9\001
+-6
+6 2835 2610 2970 2745
+1 4 0 1 0 7 50 -1 -1 0.000 1 0.0000 2903 2678 68 68 2970 2678 2835 2678
+4 1 0 50 -1 18 5 0.0000 4 60 60 2903 2711 7\001
+-6
+6 3195 2025 3330 2160
+1 4 0 1 0 7 50 -1 -1 0.000 1 0.0000 3263 2093 68 68 3330 2093 3195 2093
+4 1 0 50 -1 18 5 0.0000 4 60 60 3263 2126 8\001
+-6
+6 2745 2160 2880 2295
+1 4 0 1 0 7 50 -1 -1 0.000 1 0.0000 2813 2228 68 68 2880 2228 2745 2228
+4 1 0 50 -1 18 5 0.0000 4 60 60 2813 2261 6\001
+-6
+6 990 2700 1125 2835
+1 4 0 1 0 7 50 -1 -1 0.000 1 0.0000 1058 2768 68 68 1125 2768 990 2768
+4 1 0 50 -1 18 5 0.0000 4 60 120 1058 2801 13\001
+-6
+6 1305 2970 1440 3105
+1 4 0 1 0 7 50 -1 -1 0.000 1 0.0000 1373 3038 68 68 1440 3038 1305 3038
+4 1 0 50 -1 18 5 0.0000 4 60 120 1373 3071 12\001
+-6
+6 3105 1710 3240 1845
+1 4 0 1 0 7 50 -1 -1 0.000 1 0.0000 3173 1778 68 68 3240 1778 3105 1778
+4 1 0 50 -1 18 5 0.0000 4 60 120 3173 1811 15\001
+-6
+6 4275 1260 4410 1395
+1 4 0 1 0 7 50 -1 -1 0.000 1 0.0000 4343 1328 68 68 4410 1328 4275 1328
+4 1 0 50 -1 18 5 0.0000 4 60 60 4343 1361 1\001
+-6
+6 4275 1440 4410 1575
+1 4 0 1 0 7 50 -1 -1 0.000 1 0.0000 4343 1508 68 68 4410 1508 4275 1508
+4 1 0 50 -1 18 5 0.0000 4 60 60 4343 1541 2\001
+-6
+6 4275 1620 4410 1755
+1 4 0 1 0 7 50 -1 -1 0.000 1 0.0000 4343 1688 68 68 4410 1688 4275 1688
+4 1 0 50 -1 18 5 0.0000 4 60 60 4343 1721 3\001
+-6
+6 4275 1800 4410 1935
+1 4 0 1 0 7 50 -1 -1 0.000 1 0.0000 4343 1868 68 68 4410 1868 4275 1868
+4 1 0 50 -1 18 5 0.0000 4 60 60 4343 1901 4\001
+-6
+6 3240 2835 3375 2970
+1 4 0 1 0 7 50 -1 -1 0.000 1 0.0000 3308 2903 68 68 3375 2903 3240 2903
+4 1 0 50 -1 18 5 0.0000 4 60 120 3308 2936 16\001
+-6
+6 2835 3015 2970 3150
+1 4 0 1 0 7 50 -1 -1 0.000 1 0.0000 2903 3083 68 68 2970 3083 2835 3083
+4 1 0 50 -1 18 5 0.0000 4 60 120 2903 3116 17\001
+-6
+6 2295 3195 2430 3330
+1 4 0 1 0 7 50 -1 -1 0.000 1 0.0000 2363 3263 68 68 2430 3263 2295 3263
+4 1 0 50 -1 18 5 0.0000 4 60 60 2363 3296 3\001
+-6
+6 1440 4815 1620 4995
+1 4 0 1 0 7 50 -1 -1 0.000 1 0.0000 1508 4883 68 68 1575 4883 1440 4883
+4 1 0 50 -1 18 5 0.0000 4 60 120 1508 4916 19\001
+-6
+6 1800 3960 1980 4140
+1 4 0 1 0 7 50 -1 -1 0.000 1 0.0000 1868 4028 68 68 1935 4028 1800 4028
+4 1 0 50 -1 18 5 0.0000 4 60 120 1868 4061 18\001
+-6
+6 4275 1980 4410 2115
+1 4 0 1 0 7 50 -1 -1 0.000 1 0.0000 4343 2048 68 68 4410 2048 4275 2048
+4 1 0 50 -1 18 5 0.0000 4 60 60 4343 2081 5\001
+-6
+6 4275 2340 4410 2475
+1 4 0 1 0 7 50 -1 -1 0.000 1 0.0000 4343 2408 68 68 4410 2408 4275 2408
+4 1 0 50 -1 18 5 0.0000 4 60 60 4343 2441 6\001
+-6
+6 4275 2520 4410 2655
+1 4 0 1 0 7 50 -1 -1 0.000 1 0.0000 4343 2588 68 68 4410 2588 4275 2588
+4 1 0 50 -1 18 5 0.0000 4 60 60 4343 2621 7\001
+-6
+6 4275 2700 4410 2835
+1 4 0 1 0 7 50 -1 -1 0.000 1 0.0000 4343 2768 68 68 4410 2768 4275 2768
+4 1 0 50 -1 18 5 0.0000 4 60 60 4343 2801 8\001
+-6
+6 4275 2880 4410 3015
+1 4 0 1 0 7 50 -1 -1 0.000 1 0.0000 4343 2948 68 68 4410 2948 4275 2948
+4 1 0 50 -1 18 5 0.0000 4 60 60 4343 2981 9\001
+-6
+6 4275 3060 4410 3195
+1 4 0 1 0 7 50 -1 -1 0.000 1 0.0000 4343 3128 68 68 4410 3128 4275 3128
+4 1 0 50 -1 18 5 0.0000 4 60 120 4343 3161 10\001
+-6
+6 4275 3240 4410 3375
+1 4 0 1 0 7 50 -1 -1 0.000 1 0.0000 4343 3308 68 68 4410 3308 4275 3308
+4 1 0 50 -1 18 5 0.0000 4 60 120 4343 3341 11\001
+-6
+6 4275 3420 4410 3555
+1 4 0 1 0 7 50 -1 -1 0.000 1 0.0000 4343 3488 68 68 4410 3488 4275 3488
+4 1 0 50 -1 18 5 0.0000 4 60 120 4343 3521 12\001
+-6
+6 4275 3600 4410 3735
+1 4 0 1 0 7 50 -1 -1 0.000 1 0.0000 4343 3668 68 68 4410 3668 4275 3668
+4 1 0 50 -1 18 5 0.0000 4 60 120 4343 3701 13\001
+-6
+6 4275 3960 4410 4095
+1 4 0 1 0 7 50 -1 -1 0.000 1 0.0000 4343 4028 68 68 4410 4028 4275 4028
+4 1 0 50 -1 18 5 0.0000 4 60 120 4343 4061 15\001
+-6
+6 4275 4140 4410 4275
+1 4 0 1 0 7 50 -1 -1 0.000 1 0.0000 4343 4208 68 68 4410 4208 4275 4208
+4 1 0 50 -1 18 5 0.0000 4 60 120 4343 4241 16\001
+-6
+6 4275 4320 4410 4455
+1 4 0 1 0 7 50 -1 -1 0.000 1 0.0000 4343 4388 68 68 4410 4388 4275 4388
+4 1 0 50 -1 18 5 0.0000 4 60 120 4343 4421 17\001
+-6
+6 4275 3780 4455 3960
+1 4 0 1 0 7 50 -1 -1 0.000 1 0.0000 4343 3848 68 68 4410 3848 4275 3848
+4 1 0 50 -1 18 5 0.0000 4 60 120 4343 3881 14\001
+-6
+6 4275 4590 4455 4770
+1 4 0 1 0 7 50 -1 -1 0.000 1 0.0000 4343 4658 68 68 4410 4658 4275 4658
+4 1 0 50 -1 18 5 0.0000 4 60 120 4343 4691 18\001
+-6
+6 4275 4770 4455 4950
+1 4 0 1 0 7 50 -1 -1 0.000 1 0.0000 4343 4838 68 68 4410 4838 4275 4838
+4 1 0 50 -1 18 5 0.0000 4 60 120 4343 4871 19\001
+-6
+6 4275 4950 4455 5130
+1 4 0 1 0 7 50 -1 -1 0.000 1 0.0000 4343 5018 68 68 4410 5018 4275 5018
+4 1 0 50 -1 18 5 0.0000 4 60 120 4343 5051 20\001
+-6
+6 1170 3690 1350 3870
+1 4 0 1 0 7 50 -1 -1 0.000 1 0.0000 1238 3758 68 68 1305 3758 1170 3758
+4 1 0 50 -1 18 5 0.0000 4 60 120 1238 3791 11\001
+-6
+6 1530 3555 1710 3735
+1 4 0 1 0 7 50 -1 -1 0.000 1 0.0000 1598 3623 68 68 1665 3623 1530 3623
+4 1 0 50 -1 18 5 0.0000 4 60 120 1598 3656 10\001
+-6
+6 720 4095 900 4275
+1 4 0 1 0 7 50 -1 -1 0.000 1 0.0000 788 4163 68 68 855 4163 720 4163
+4 1 0 50 -1 18 5 0.0000 4 60 120 788 4196 14\001
+-6
+6 855 3645 1035 3825
+1 4 0 1 0 7 50 -1 -1 0.000 1 0.0000 923 3713 68 68 990 3713 855 3713
+4 1 0 50 -1 18 5 0.0000 4 60 120 923 3746 21\001
+-6
+6 4275 5130 4455 5310
+1 4 0 1 0 7 50 -1 -1 0.000 1 0.0000 4343 5198 68 68 4410 5198 4275 5198
+4 1 0 50 -1 18 5 0.0000 4 60 120 4343 5231 21\001
+-6
+6 2295 4140 2430 4275
+1 4 0 1 0 7 50 -1 -1 0.000 1 0.0000 2363 4208 68 68 2430 4208 2295 4208
+4 1 0 50 -1 18 5 0.0000 4 60 60 2363 4241 4\001
+-6
+6 2475 3870 2655 4050
+1 4 0 1 0 7 50 -1 -1 0.000 1 0.0000 2543 3938 68 68 2610 3938 2475 3938
+4 1 0 50 -1 18 5 0.0000 4 60 120 2543 3971 22\001
+-6
+6 4275 5310 4455 5490
+1 4 0 1 0 7 50 -1 -1 0.000 1 0.0000 4343 5378 68 68 4410 5378 4275 5378
+4 1 0 50 -1 18 5 0.0000 4 60 120 4343 5411 22\001
+-6
+6 2295 5625 2430 5760
+1 4 0 1 0 7 50 -1 -1 0.000 1 0.0000 2363 5693 68 68 2430 5693 2295 5693
+4 1 0 50 -1 18 5 0.0000 4 60 60 2363 5726 5\001
+-6
+6 2295 6480 2475 6660
+1 4 0 1 0 7 50 -1 -1 0.000 1 0.0000 2363 6548 68 68 2430 6548 2295 6548
+4 1 0 50 -1 18 5 0.0000 4 60 120 2363 6581 20\001
+-6
+1 2 0 1 0 6 50 -1 20 0.000 1 0.0000 1350 4612 225 112 1125 4612 1575 4612
+1 2 0 1 0 6 50 -1 20 0.000 1 0.0000 2250 1912 225 112 2025 1912 2475 1912
+1 2 0 1 0 7 50 -1 20 0.000 1 0.0000 1125 3487 225 112 900 3487 1350 3487
+1 2 0 1 0 7 50 -1 20 0.000 1 0.0000 2250 3712 225 112 2025 3712 2475 3712
+1 2 0 1 0 6 50 -1 20 0.000 1 0.0000 2250 2812 225 112 2025 2812 2475 2812
+1 2 0 1 0 7 50 -1 20 0.000 1 0.0000 3375 2362 225 112 3150 2362 3600 2362
+1 2 0 1 0 7 50 -1 20 0.000 1 0.0000 2250 1012 225 112 2025 1012 2475 1012
+1 2 0 1 0 6 50 -1 20 0.000 1 0.0000 2250 6232 225 112 2025 6232 2475 6232
+1 2 0 1 0 7 50 -1 20 0.000 1 0.0000 2250 5422 225 112 2025 5422 2475 5422
+1 2 0 1 0 7 50 -1 20 0.000 1 0.0000 2250 6997 225 112 2025 6997 2475 6997
+1 2 0 1 0 6 50 -1 20 0.000 1 0.0000 2250 4587 225 112 2025 4587 2475 4587
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2
+ 1 1 1.00 60.00 120.00
+ 2250 1125 2250 1800
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
+ 8910 5805 4500 5805
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 3
+ 6885 5900 6930 5990 6975 5810
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 3
+ 6885 6570 6930 6660 6975 6480
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
+ 5310 5589 5310 6921
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
+ 5670 5589 5670 6921
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
+ 6030 5589 6030 6921
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
+ 6390 5589 6390 6921
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
+ 6750 5589 6750 6921
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
+ 7110 5589 7110 6921
+2 1 0 2 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
+ 4950 5589 4950 6921
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
+ 8910 6705 4500 6705
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2
+ 1 1 1.00 60.00 120.00
+ 2250 5535 2250 6120
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2
+ 1 1 1.00 60.00 120.00
+ 2250 6345 2250 6885
+2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
+ 4500 5580 8910 5580 8910 6930 4500 6930 4500 5580
+2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
+ 4500 5580 8910 5580 8910 6930 4500 6930 4500 5580
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
+ 8910 6030 4500 6030
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
+ 8910 6255 4500 6255
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
+ 8910 6480 4500 6480
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
+ 5310 5589 5310 6921
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
+ 5670 5589 5670 6921
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
+ 6030 5589 6030 6921
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
+ 6390 5589 6390 6921
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
+ 6750 5589 6750 6921
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
+ 7110 5589 7110 6921
+2 1 0 2 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
+ 4950 5589 4950 6921
+2 2 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 5
+ 4500 5580 8910 5580 8910 6930 4500 6930 4500 5580
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
+ 8865 5805 4500 5805
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
+ 8865 6030 4500 6030
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
+ 8865 6255 4500 6255
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
+ 8865 6480 4500 6480
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
+ 5310 5589 5310 6921
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
+ 5670 5589 5670 6921
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
+ 6030 5589 6030 6921
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
+ 6390 5589 6390 6921
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
+ 6750 5589 6750 6921
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
+ 7110 5589 7110 6921
+2 1 0 2 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
+ 4950 5589 4950 6921
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
+ 8865 6705 4500 6705
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
+ 8865 5805 4500 5805
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
+ 8865 6030 4500 6030
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
+ 8865 6255 4500 6255
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
+ 8865 6480 4500 6480
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
+ 5310 5589 5310 6921
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
+ 5670 5589 5670 6921
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
+ 6030 5589 6030 6921
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
+ 6390 5589 6390 6921
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
+ 6750 5589 6750 6921
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
+ 7110 5589 7110 6921
+2 1 0 2 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
+ 4950 5589 4950 6921
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
+ 8865 6705 4500 6705
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 3
+ 7605 5890 7650 5980 7695 5800
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 3
+ 7605 6570 7650 6660 7695 6480
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
+ 7470 5589 7470 6921
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 3
+ 7965 5890 8010 5980 8055 5800
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 3
+ 7965 6570 8010 6660 8055 6480
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
+ 7830 5589 7830 6921
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 3
+ 8325 6570 8370 6660 8415 6480
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
+ 8190 5589 8190 6921
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
+ 8550 5589 8550 6921
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
+ 8190 5589 8190 6921
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
+ 8550 5589 8550 6921
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
+ 8190 5589 8190 6921
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
+ 8550 5589 8550 6921
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
+ 8190 5589 8190 6921
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
+ 8550 5589 8550 6921
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 1
+ 4500 5805
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 1
+ 4500 6030
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 1
+ 4500 6255
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 1
+ 4500 6480
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 1
+ 4500 6705
+3 0 0 1 0 7 50 -1 -1 0.000 0 1 0 4
+ 1 1 1.00 60.00 120.00
+ 2250 2700 2475 2475 2475 2250 2250 2025
+ 0.000 1.000 1.000 0.000
+3 0 0 1 0 7 50 -1 -1 0.000 0 1 0 3
+ 1 1 1.00 60.00 120.00
+ 3375 2250 2925 2025 2475 1935
+ 0.000 1.000 0.000
+3 0 0 1 0 7 50 -1 -1 0.000 0 1 0 3
+ 1 1 1.00 60.00 120.00
+ 3375 2475 3375 2700 2475 2835
+ 0.000 1.000 0.000
+3 0 0 1 0 7 50 -1 -1 0.000 0 1 0 4
+ 1 1 1.00 60.00 120.00
+ 3420 2475 3420 4320 3150 5850 2475 6165
+ 0.000 1.000 1.000 0.000
+3 0 0 1 0 7 50 -1 -1 0.000 0 1 0 3
+ 1 1 1.00 60.00 120.00
+ 1125 3375 1125 2925 2025 2790
+ 0.000 1.000 0.000
+3 0 0 1 0 7 50 -1 -1 0.000 0 1 0 3
+ 1 1 1.00 60.00 120.00
+ 1125 3375 1125 2250 2025 1935
+ 0.000 1.000 0.000
+3 0 0 1 0 7 50 -1 -1 0.000 0 1 0 6
+ 1 1 1.00 60.00 120.00
+ 2475 1890 3825 1800 3825 2520 3825 4500 3150 6075 2475 6210
+ 0.000 1.000 1.000 1.000 1.000 0.000
+3 0 0 1 0 7 50 -1 -1 0.000 0 1 0 4
+ 1 1 1.00 60.00 120.00
+ 2250 2025 2025 2250 2025 2475 2250 2700
+ 0.000 1.000 1.000 0.000
+3 0 0 1 0 7 50 -1 -1 0.000 0 1 0 2
+ 1 1 1.00 60.00 120.00
+ 2250 3825 2250 4500
+ 0.000 0.000
+3 0 0 1 0 7 50 -1 -1 0.000 0 1 0 3
+ 1 1 1.00 60.00 120.00
+ 2475 1980 2880 2115 3150 2340
+ 0.000 1.000 0.000
+3 0 0 1 0 7 50 -1 -1 0.000 0 1 0 2
+ 1 1 1.00 60.00 120.00
+ 2250 2925 2250 3600
+ 0.000 0.000
+3 0 0 1 0 7 50 -1 -1 0.000 0 1 0 4
+ 1 1 1.00 60.00 120.00
+ 2205 3825 2070 4140 1622 4221 1440 4500
+ 0.000 1.000 1.000 0.000
+3 0 0 1 0 7 50 -1 -1 0.000 0 1 0 4
+ 1 1 1.00 60.00 120.00
+ 1350 4725 1350 4950 1485 5760 2025 6165
+ 0.000 1.000 1.000 0.000
+3 0 0 1 0 7 50 -1 -1 0.000 0 1 0 7
+ 1 1 1.00 60.00 120.00
+ 1125 4590 720 4455 675 4050 675 3600 675 2250 1350 1800
+ 2025 1935
+ 0.000 1.000 1.000 1.000 1.000 1.000 0.000
+3 0 0 1 0 7 50 -1 -1 0.000 0 1 0 3
+ 1 1 1.00 60.00 120.00
+ 1260 4500 1125 4320 1125 3600
+ 0.000 1.000 0.000
+3 0 0 1 0 7 50 -1 -1 0.000 0 1 0 4
+ 1 1 1.00 60.00 120.00
+ 1350 4500 1440 3645 1575 3330 2070 2880
+ 0.000 1.000 1.000 0.000
+3 0 0 1 32 7 51 -1 -1 0.000 0 1 0 5
+ 1 1 1.00 60.00 120.00
+ 1035 3600 990 4365 990 5040 1395 5895 2025 6210
+ 0.000 1.000 1.000 1.000 0.000
+3 0 0 1 32 7 51 -1 -1 0.000 0 1 0 5
+ 1 1 1.00 60.00 120.00
+ 2340 3825 2385 4005 2925 4275 2655 4815 2295 5310
+ 0.000 1.000 1.000 1.000 0.000
+3 0 0 1 0 7 50 -1 -1 0.000 0 1 0 4
+ 1 1 1.00 60.00 120.00
+ 2475 2835 3150 3375 3150 5625 2475 6120
+ 0.000 1.000 1.000 0.000
+3 0 0 1 0 7 50 -1 -1 0.000 0 1 0 2
+ 1 1 1.00 60.00 120.00
+ 2250 4725 2250 5310
+ 0.000 0.000
+4 0 0 50 -1 14 6 0.0000 4 105 2880 4500 1710 ASS-CON: ssui(): connect_server() == SN_ERR_NONE\001
+4 0 0 50 -1 14 6 0.0000 4 90 540 4500 1350 INI-REQ: \001
+4 0 0 50 -1 14 6 0.0000 4 120 3720 4500 1530 REQ-ASS: prepare_conn_request(): srv_redispatch_connect() == 0\001
+4 0 4 50 -1 14 10 0.0000 4 105 105 2475 2700 4\001
+4 0 4 50 -1 14 10 0.0000 4 105 105 1620 4500 6\001
+4 0 0 50 -1 14 6 0.0000 4 120 3360 4500 1890 CON-EST: sess_update_st_con_tcp(): !timeout && !conn_err\001
+4 0 0 50 -1 14 6 0.0000 4 105 2460 4500 3510 TAR-ASS: ssui(): SI_FL_EXP && SN_ASSIGNED\001
+4 0 0 50 -1 14 6 0.0000 4 105 3420 4500 2970 ASS-REQ: connect_server: conn_retries == 0 && PR_O_REDISP\001
+4 0 0 50 -1 14 6 0.0000 4 120 2460 4500 2610 QUE-REQ: ssui(): !pend_pos && SN_ASSIGNED\001
+4 0 0 50 -1 14 6 0.0000 4 120 2520 4500 2790 QUE-REQ: ssui(): !pend_pos && !SN_ASSIGNED\001
+4 0 0 50 -1 14 6 0.0000 4 120 3300 4500 4230 QUE-CLO: ssui(): pend_pos && (SI_FL_EXP || req_aborted)\001
+4 0 0 50 -1 14 6 0.0000 4 105 2520 4500 3690 TAR-REQ: ssui(): SI_FL_EXP && !SN_ASSIGNED\001
+4 0 0 50 -1 14 6 0.0000 4 120 3960 4500 4545 ASS-CLO: PR_O_REDISP && SN_REDIRECTABLE && perform_http_redirect()\001
+4 0 0 50 -1 14 6 0.0000 4 120 4440 4500 2430 REQ-QUE: prepare_conn_request(): srv_redispatch_connect() != 0 (SI_ST_QUE)\001
+4 0 0 50 -1 14 6 0.0000 4 120 4200 4500 4050 REQ-CLO: prepare_conn_request(): srv_redispatch_connect() != 0 (error)\001
+4 0 0 50 -1 14 6 0.0000 4 105 4320 4500 4410 ASS-CLO: ssui(): connect_server() == SN_ERR_INTERNAL || conn_retries < 0\001
+4 0 0 50 -1 14 6 0.0000 4 120 3120 4500 4680 CON-CER: sess_update_st_con_tcp(): timeout/SI_FL_ERR\001
+4 0 0 50 -1 14 6 0.0000 4 120 3600 4500 4860 CER-CLO: sess_update_st_cer(): (ERR/EXP) && conn_retries < 0\001
+4 0 0 50 -1 14 6 0.0000 4 120 4200 4500 3870 CER-REQ: sess_update_st_cer(): timeout && !conn_retries && PR_O_REDISP\001
+4 0 0 50 -1 14 6 0.0000 4 120 3600 4500 3330 CER-TAR: sess_update_st_cer(): conn_err && conn_retries >= 0\001
+4 0 0 50 -1 14 6 0.0000 4 120 4620 4500 3150 CER-ASS: sess_update_st_cer(): timeout && (conn_retries >= 0 || !PR_O_REDISP)\001
+4 0 4 50 -1 14 10 0.0000 4 105 105 1305 3375 3\001
+4 0 4 50 -1 14 10 0.0000 4 105 105 2430 3600 5\001
+4 0 4 50 -1 14 10 0.0000 4 105 105 3555 2250 2\001
+4 0 4 50 -1 14 10 0.0000 4 105 105 2430 1800 1\001
+4 0 4 50 -1 14 10 0.0000 4 105 105 2430 900 0\001
+4 0 0 50 -1 14 6 0.0000 4 105 3000 4500 2070 EST-DIS: stream_sock_read/write/shutr/shutw: close\001
+4 0 0 50 -1 14 6 0.0000 4 120 1980 4500 2250 EST-DIS: process_session(): error\001
+4 0 0 50 -1 14 6 0.0000 4 120 2100 4500 5040 DIS-CLO: process_session(): cleanup\001
+4 1 0 50 -1 14 10 0.0000 4 105 315 1350 4680 CER\001
+4 1 0 50 -1 14 10 0.0000 4 135 315 2250 1980 REQ\001
+4 1 0 50 -1 14 10 0.0000 4 105 315 1125 3555 TAR\001
+4 1 0 50 -1 14 10 0.0000 4 105 315 2250 2880 ASS\001
+4 1 0 50 -1 14 10 0.0000 4 135 315 3375 2430 QUE\001
+4 1 0 50 -1 14 10 0.0000 4 105 315 2250 3780 CON\001
+4 1 0 50 -1 14 10 0.0000 4 105 315 2250 1080 INI\001
+4 0 0 50 -1 14 6 0.0000 4 120 2820 4500 5220 TAR-CLO: sess_update_stream_int(): client abort\001
+4 0 0 50 -1 14 6 0.0000 4 120 2820 4500 5400 CON-DIS: sess_update_st_con_tcp(): client abort\001
+4 1 0 50 -1 14 8 0.0000 4 15 75 5130 5985 -\001
+4 1 0 50 -1 14 8 0.0000 4 15 75 5490 5985 -\001
+4 1 0 50 -1 14 8 0.0000 4 15 75 5850 5985 -\001
+4 1 0 50 -1 14 8 0.0000 4 15 75 6210 5985 -\001
+4 1 0 50 -1 14 8 0.0000 4 15 75 6570 5985 -\001
+4 1 0 50 -1 14 8 0.0000 4 15 75 7290 5985 -\001
+4 1 0 50 -1 16 7 0.0000 4 105 120 4725 5985 fd\001
+4 1 0 50 -1 14 8 0.0000 4 90 225 5130 5760 INI\001
+4 1 0 50 -1 16 7 0.0000 4 105 270 4725 5760 state\001
+4 1 0 50 -1 14 8 0.0000 4 120 225 5490 5760 REQ\001
+4 1 0 50 -1 14 8 0.0000 4 120 225 5850 5760 QUE\001
+4 1 0 50 -1 14 8 0.0000 4 90 225 6210 5760 TAR\001
+4 1 0 50 -1 14 8 0.0000 4 90 225 6570 5760 ASS\001
+4 1 0 50 -1 14 8 0.0000 4 90 225 6930 5760 CON\001
+4 1 0 50 -1 14 8 0.0000 4 90 225 7290 5760 CER\001
+4 1 0 50 -1 14 8 0.0000 4 90 75 5850 6210 0\001
+4 1 0 50 -1 14 8 0.0000 4 90 75 5130 6210 0\001
+4 1 0 50 -1 16 7 0.0000 4 90 270 4725 6210 ERR\001
+4 1 0 50 -1 16 7 0.0000 4 90 270 4725 6435 EXP\001
+4 1 0 50 -1 14 8 0.0000 4 90 75 5490 6210 X\001
+4 1 0 50 -1 14 8 0.0000 4 90 75 6210 6210 X\001
+4 1 0 50 -1 14 8 0.0000 4 90 75 6570 6210 X\001
+4 1 0 50 -1 14 8 0.0000 4 90 75 6570 6435 X\001
+4 1 0 50 -1 14 8 0.0000 4 90 75 5490 6435 X\001
+4 1 0 50 -1 14 8 0.0000 4 90 75 5130 6435 0\001
+4 1 0 50 -1 14 8 0.0000 4 90 75 5850 6435 0\001
+4 1 0 50 -1 14 8 0.0000 4 90 75 6210 6435 0\001
+4 1 0 50 -1 14 8 0.0000 4 90 75 7290 6435 X\001
+4 1 0 50 -1 14 8 0.0000 4 90 75 6930 6435 X\001
+4 1 0 50 -1 14 8 0.0000 4 90 75 7290 6210 X\001
+4 1 0 50 -1 14 8 0.0000 4 90 75 6930 6210 X\001
+4 1 0 50 -1 16 7 0.0000 4 75 240 4725 6660 sess\001
+4 1 0 50 -1 14 8 0.0000 4 15 75 5130 6660 -\001
+4 1 0 50 -1 14 8 0.0000 4 15 75 5490 6660 -\001
+4 1 0 50 -1 14 8 0.0000 4 15 75 5850 6660 -\001
+4 1 0 50 -1 14 8 0.0000 4 15 75 6210 6660 -\001
+4 1 0 50 -1 14 8 0.0000 4 15 75 6570 6660 -\001
+4 1 0 50 -1 14 8 0.0000 4 15 75 7290 6660 -\001
+4 0 0 50 -1 16 6 0.0000 4 120 5970 675 7335 Note: states painted yellow above are transient ; process_session() will never leave a stream interface in any of those upon return.\001
+4 1 0 50 -1 16 7 0.0000 4 90 330 4725 6840 SHUT\001
+4 1 0 50 -1 14 8 0.0000 4 15 75 7290 6840 -\001
+4 1 0 50 -1 14 8 0.0000 4 90 75 6930 6840 0\001
+4 1 0 50 -1 14 8 0.0000 4 90 75 6570 6840 0\001
+4 1 0 50 -1 14 8 0.0000 4 90 75 6210 6840 0\001
+4 1 0 50 -1 14 8 0.0000 4 90 75 5850 6840 0\001
+4 1 0 50 -1 14 8 0.0000 4 90 75 5490 6840 0\001
+4 1 0 50 -1 14 8 0.0000 4 90 75 5130 6840 0\001
+4 1 0 50 -1 14 10 0.0000 4 105 315 2250 6300 DIS\001
+4 1 0 50 -1 14 10 0.0000 4 105 315 2250 5490 EST\001
+4 1 0 50 -1 14 10 0.0000 4 105 315 2250 7065 CLO\001
+4 1 0 50 -1 14 10 0.0000 4 105 315 2250 4635 RDY\001
+4 0 4 50 -1 14 10 0.0000 4 105 105 2430 4455 7\001
+4 0 4 50 -1 14 10 0.0000 4 105 105 2430 5310 8\001
+4 0 4 50 -1 14 10 0.0000 4 105 105 2385 6120 9\001
+4 0 4 50 -1 14 10 0.0000 4 105 210 2385 6840 10\001
+4 1 0 50 -1 14 8 0.0000 4 90 75 7650 6210 X\001
+4 1 0 50 -1 14 8 0.0000 4 90 75 7650 6435 X\001
+4 1 0 50 -1 14 8 0.0000 4 90 75 7650 6840 0\001
+4 1 0 50 -1 14 8 0.0000 4 90 225 8010 5760 EST\001
+4 1 0 50 -1 14 8 0.0000 4 90 75 8010 6210 X\001
+4 1 0 50 -1 14 8 0.0000 4 90 75 8010 6435 X\001
+4 1 0 50 -1 14 8 0.0000 4 90 75 8010 6840 0\001
+4 1 0 50 -1 14 8 0.0000 4 15 75 8370 5985 -\001
+4 1 0 50 -1 14 8 0.0000 4 90 75 8370 6210 X\001
+4 1 0 50 -1 14 8 0.0000 4 90 75 8370 6435 X\001
+4 1 0 50 -1 14 8 0.0000 4 90 225 8730 5760 CLO\001
+4 1 0 50 -1 14 8 0.0000 4 90 225 8370 5760 DIS\001
+4 1 0 50 -1 14 8 0.0000 4 15 75 8730 5985 -\001
+4 1 0 50 -1 14 8 0.0000 4 90 75 8730 6210 X\001
+4 1 0 50 -1 14 8 0.0000 4 90 75 8730 6435 X\001
+4 1 0 50 -1 14 8 0.0000 4 15 75 8730 6660 -\001
+4 1 0 50 -1 14 8 0.0000 4 90 75 8370 6840 1\001
+4 1 0 50 -1 14 8 0.0000 4 90 75 8730 6840 1\001
+4 1 0 50 -1 14 8 0.0000 4 90 225 7650 5760 RDY\001
diff --git a/doc/intro.txt b/doc/intro.txt
new file mode 100644
index 0000000..f4133a1
--- /dev/null
+++ b/doc/intro.txt
@@ -0,0 +1,1700 @@
+ -----------------------
+ HAProxy Starter Guide
+ -----------------------
+ version 2.9
+
+
+This document is an introduction to HAProxy for all those who don't know it, as
+well as for those who want to re-discover it when they know older versions. Its
+primary focus is to provide users with all the elements to decide if HAProxy is
+the product they're looking for or not. Advanced users may find here some parts
+of solutions to some ideas they had just because they were not aware of a given
+new feature. Some sizing information is also provided, the product's lifecycle
+is explained, and comparisons with partially overlapping products are provided.
+
+This document doesn't provide any configuration help or hints, but it explains
+where to find the relevant documents. The summary below is meant to help you
+search sections by name and navigate through the document.
+
+Note to documentation contributors :
+ This document is formatted with 80 columns per line, with even number of
+ spaces for indentation and without tabs. Please follow these rules strictly
+ so that it remains easily printable everywhere. If you add sections, please
+ update the summary below for easier searching.
+
+
+Summary
+-------
+
+1. Available documentation
+
+2. Quick introduction to load balancing and load balancers
+
+3. Introduction to HAProxy
+3.1. What HAProxy is and is not
+3.2. How HAProxy works
+3.3. Basic features
+3.3.1. Proxying
+3.3.2. SSL
+3.3.3. Monitoring
+3.3.4. High availability
+3.3.5. Load balancing
+3.3.6. Stickiness
+3.3.7. Logging
+3.3.8. Statistics
+3.4. Standard features
+3.4.1. Sampling and converting information
+3.4.2. Maps
+3.4.3. ACLs and conditions
+3.4.4. Content switching
+3.4.5. Stick-tables
+3.4.6. Formatted strings
+3.4.7. HTTP rewriting and redirection
+3.4.8. Server protection
+3.5. Advanced features
+3.5.1. Management
+3.5.2. System-specific capabilities
+3.5.3. Scripting
+3.6. Sizing
+3.7. How to get HAProxy
+
+4. Companion products and alternatives
+4.1. Apache HTTP server
+4.2. NGINX
+4.3. Varnish
+4.4. Alternatives
+
+5. Contacts
+
+
+1. Available documentation
+--------------------------
+
+The complete HAProxy documentation is contained in the following documents.
+Please ensure to consult the relevant documentation to save time and to get the
+most accurate response to your needs. Also please refrain from sending questions
+to the mailing list whose responses are present in these documents.
+
+ - intro.txt (this document) : it presents the basics of load balancing,
+ HAProxy as a product, what it does, what it doesn't do, some known traps to
+ avoid, some OS-specific limitations, how to get it, how it evolves, how to
+ ensure you're running with all known fixes, how to update it, complements
+ and alternatives.
+
+ - management.txt : it explains how to start haproxy, how to manage it at
+ runtime, how to manage it on multiple nodes, and how to proceed with
+ seamless upgrades.
+
+ - configuration.txt : the reference manual details all configuration keywords
+ and their options. It is used when a configuration change is needed.
+
+ - coding-style.txt : this is for developers who want to propose some code to
+ the project. It explains the style to adopt for the code. It is not very
+ strict and not all the code base completely respects it, but contributions
+ which diverge too much from it will be rejected.
+
+ - proxy-protocol.txt : this is the de-facto specification of the PROXY
+ protocol which is implemented by HAProxy and a number of third party
+ products.
+
+ - README : how to build HAProxy from sources
+
+
+2. Quick introduction to load balancing and load balancers
+----------------------------------------------------------
+
+Load balancing consists in aggregating multiple components in order to achieve
+a total processing capacity above each component's individual capacity, without
+any intervention from the end user and in a scalable way. This results in more
+operations being performed simultaneously by the time it takes a component to
+perform only one. A single operation however will still be performed on a single
+component at a time and will not get faster than without load balancing. It
+always requires at least as many operations as available components and an
+efficient load balancing mechanism to make use of all components and to fully
+benefit from the load balancing. A good example of this is the number of lanes
+on a highway which allows as many cars to pass during the same time frame
+without increasing their individual speed.
+
+Examples of load balancing :
+
+ - Process scheduling in multi-processor systems
+ - Link load balancing (e.g. EtherChannel, Bonding)
+ - IP address load balancing (e.g. ECMP, DNS round-robin)
+ - Server load balancing (via load balancers)
+
+The mechanism or component which performs the load balancing operation is
+called a load balancer. In web environments these components are called a
+"network load balancer", and more commonly a "load balancer" given that this
+activity is by far the best known case of load balancing.
+
+A load balancer may act :
+
+ - at the link level : this is called link load balancing, and it consists in
+ choosing what network link to send a packet to;
+
+ - at the network level : this is called network load balancing, and it
+ consists in choosing what route a series of packets will follow;
+
+ - at the server level : this is called server load balancing and it consists
+ in deciding what server will process a connection or request.
+
+Two distinct technologies exist and address different needs, though with some
+overlapping. In each case it is important to keep in mind that load balancing
+consists in diverting the traffic from its natural flow and that doing so always
+requires a minimum of care to maintain the required level of consistency between
+all routing decisions.
+
+The first one acts at the packet level and processes packets more or less
+individually. There is a 1-to-1 relation between input and output packets, so
+it is possible to follow the traffic on both sides of the load balancer using a
+regular network sniffer. This technology can be very cheap and extremely fast.
+It is usually implemented in hardware (ASICs) allowing to reach line rate, such
+as switches doing ECMP. Usually stateless, it can also be stateful (consider
+the session a packet belongs to and called layer4-LB or L4), may support DSR
+(direct server return, without passing through the LB again) if the packets
+were not modified, but provides almost no content awareness. This technology is
+very well suited to network-level load balancing, though it is sometimes used
+for very basic server load balancing at high speed.
+
+The second one acts on session contents. It requires that the input streams is
+reassembled and processed as a whole. The contents may be modified, and the
+output stream is segmented into new packets. For this reason it is generally
+performed by proxies and they're often called layer 7 load balancers or L7.
+This implies that there are two distinct connections on each side, and that
+there is no relation between input and output packets sizes nor counts. Clients
+and servers are not required to use the same protocol (for example IPv4 vs
+IPv6, clear vs SSL). The operations are always stateful, and the return traffic
+must pass through the load balancer. The extra processing comes with a cost so
+it's not always possible to achieve line rate, especially with small packets.
+On the other hand, it offers wide possibilities and is generally achieved by
+pure software, even if embedded into hardware appliances. This technology is
+very well suited for server load balancing.
+
+Packet-based load balancers are generally deployed in cut-through mode, so they
+are installed on the normal path of the traffic and divert it according to the
+configuration. The return traffic doesn't necessarily pass through the load
+balancer. Some modifications may be applied to the network destination address
+in order to direct the traffic to the proper destination. In this case, it is
+mandatory that the return traffic passes through the load balancer. If the
+routes doesn't make this possible, the load balancer may also replace the
+packets' source address with its own in order to force the return traffic to
+pass through it.
+
+Proxy-based load balancers are deployed as a server with their own IP addresses
+and ports, without architecture changes. Sometimes this requires to perform some
+adaptations to the applications so that clients are properly directed to the
+load balancer's IP address and not directly to the server's. Some load balancers
+may have to adjust some servers' responses to make this possible (e.g. the HTTP
+Location header field used in HTTP redirects). Some proxy-based load balancers
+may intercept traffic for an address they don't own, and spoof the client's
+address when connecting to the server. This allows them to be deployed as if
+they were a regular router or firewall, in a cut-through mode very similar to
+the packet based load balancers. This is particularly appreciated for products
+which combine both packet mode and proxy mode. In this case DSR is obviously
+still not possible and the return traffic still has to be routed back to the
+load balancer.
+
+A very scalable layered approach would consist in having a front router which
+receives traffic from multiple load balanced links, and uses ECMP to distribute
+this traffic to a first layer of multiple stateful packet-based load balancers
+(L4). These L4 load balancers in turn pass the traffic to an even larger number
+of proxy-based load balancers (L7), which have to parse the contents to decide
+what server will ultimately receive the traffic.
+
+The number of components and possible paths for the traffic increases the risk
+of failure; in very large environments, it is even normal to permanently have
+a few faulty components being fixed or replaced. Load balancing done without
+awareness of the whole stack's health significantly degrades availability. For
+this reason, any sane load balancer will verify that the components it intends
+to deliver the traffic to are still alive and reachable, and it will stop
+delivering traffic to faulty ones. This can be achieved using various methods.
+
+The most common one consists in periodically sending probes to ensure the
+component is still operational. These probes are called "health checks". They
+must be representative of the type of failure to address. For example a ping-
+based check will not detect that a web server has crashed and doesn't listen to
+a port anymore, while a connection to the port will verify this, and a more
+advanced request may even validate that the server still works and that the
+database it relies on is still accessible. Health checks often involve a few
+retries to cover for occasional measuring errors. The period between checks
+must be small enough to ensure the faulty component is not used for too long
+after an error occurs.
+
+Other methods consist in sampling the production traffic sent to a destination
+to observe if it is processed correctly or not, and to evict the components
+which return inappropriate responses. However this requires to sacrifice a part
+of the production traffic and this is not always acceptable. A combination of
+these two mechanisms provides the best of both worlds, with both of them being
+used to detect a fault, and only health checks to detect the end of the fault.
+A last method involves centralized reporting : a central monitoring agent
+periodically updates all load balancers about all components' state. This gives
+a global view of the infrastructure to all components, though sometimes with
+less accuracy or responsiveness. It's best suited for environments with many
+load balancers and many servers.
+
+Layer 7 load balancers also face another challenge known as stickiness or
+persistence. The principle is that they generally have to direct multiple
+subsequent requests or connections from a same origin (such as an end user) to
+the same target. The best known example is the shopping cart on an online
+store. If each click leads to a new connection, the user must always be sent
+to the server which holds his shopping cart. Content-awareness makes it easier
+to spot some elements in the request to identify the server to deliver it to,
+but that's not always enough. For example if the source address is used as a
+key to pick a server, it can be decided that a hash-based algorithm will be
+used and that a given IP address will always be sent to the same server based
+on a divide of the address by the number of available servers. But if one
+server fails, the result changes and all users are suddenly sent to a different
+server and lose their shopping cart. The solution against this issue consists
+in memorizing the chosen target so that each time the same visitor is seen,
+he's directed to the same server regardless of the number of available servers.
+The information may be stored in the load balancer's memory, in which case it
+may have to be replicated to other load balancers if it's not alone, or it may
+be stored in the client's memory using various methods provided that the client
+is able to present this information back with every request (cookie insertion,
+redirection to a sub-domain, etc). This mechanism provides the extra benefit of
+not having to rely on unstable or unevenly distributed information (such as the
+source IP address). This is in fact the strongest reason to adopt a layer 7
+load balancer instead of a layer 4 one.
+
+In order to extract information such as a cookie, a host header field, a URL
+or whatever, a load balancer may need to decrypt SSL/TLS traffic and even
+possibly to re-encrypt it when passing it to the server. This expensive task
+explains why in some high-traffic infrastructures, sometimes there may be a
+lot of load balancers.
+
+Since a layer 7 load balancer may perform a number of complex operations on the
+traffic (decrypt, parse, modify, match cookies, decide what server to send to,
+etc), it can definitely cause some trouble and will very commonly be accused of
+being responsible for a lot of trouble that it only revealed. Often it will be
+discovered that servers are unstable and periodically go up and down, or for
+web servers, that they deliver pages with some hard-coded links forcing the
+clients to connect directly to one specific server without passing via the load
+balancer, or that they take ages to respond under high load causing timeouts.
+That's why logging is an extremely important aspect of layer 7 load balancing.
+Once a trouble is reported, it is important to figure if the load balancer took
+a wrong decision and if so why so that it doesn't happen anymore.
+
+
+3. Introduction to HAProxy
+--------------------------
+
+HAProxy is written as "HAProxy" to designate the product, and as "haproxy" to
+designate the executable program, software package or a process. However, both
+are commonly used for both purposes, and are pronounced H-A-Proxy. Very early,
+"haproxy" used to stand for "high availability proxy" and the name was written
+in two separate words, though by now it means nothing else than "HAProxy".
+
+
+3.1. What HAProxy is and isn't
+------------------------------
+
+HAProxy is :
+
+ - a TCP proxy : it can accept a TCP connection from a listening socket,
+ connect to a server and attach these sockets together allowing traffic to
+ flow in both directions; IPv4, IPv6 and even UNIX sockets are supported on
+ either side, so this can provide an easy way to translate addresses between
+ different families.
+
+ - an HTTP reverse-proxy (called a "gateway" in HTTP terminology) : it presents
+ itself as a server, receives HTTP requests over connections accepted on a
+ listening TCP socket, and passes the requests from these connections to
+ servers using different connections. It may use any combination of HTTP/1.x
+ or HTTP/2 on any side and will even automatically detect the protocol
+ spoken on each side when ALPN is used over TLS.
+
+ - an SSL terminator / initiator / offloader : SSL/TLS may be used on the
+ connection coming from the client, on the connection going to the server,
+ or even on both connections. A lot of settings can be applied per name
+ (SNI), and may be updated at runtime without restarting. Such setups are
+ extremely scalable and deployments involving tens to hundreds of thousands
+ of certificates were reported.
+
+ - a TCP normalizer : since connections are locally terminated by the operating
+ system, there is no relation between both sides, so abnormal traffic such as
+ invalid packets, flag combinations, window advertisements, sequence numbers,
+ incomplete connections (SYN floods), or so will not be passed to the other
+ side. This protects fragile TCP stacks from protocol attacks, and also
+ allows to optimize the connection parameters with the client without having
+ to modify the servers' TCP stack settings.
+
+ - an HTTP normalizer : when configured to process HTTP traffic, only valid
+ complete requests are passed. This protects against a lot of protocol-based
+ attacks. Additionally, protocol deviations for which there is a tolerance
+ in the specification are fixed so that they don't cause problem on the
+ servers (e.g. multiple-line headers).
+
+ - an HTTP fixing tool : it can modify / fix / add / remove / rewrite the URL
+ or any request or response header. This helps fixing interoperability issues
+ in complex environments.
+
+ - a content-based switch : it can consider any element from the request to
+ decide what server to pass the request or connection to. Thus it is possible
+ to handle multiple protocols over a same port (e.g. HTTP, HTTPS, SSH).
+
+ - a server load balancer : it can load balance TCP connections and HTTP
+ requests. In TCP mode, load balancing decisions are taken for the whole
+ connection. In HTTP mode, decisions are taken per request.
+
+ - a traffic regulator : it can apply some rate limiting at various points,
+ protect the servers against overloading, adjust traffic priorities based on
+ the contents, and even pass such information to lower layers and outer
+ network components by marking packets.
+
+ - a protection against DDoS and service abuse : it can maintain a wide number
+ of statistics per IP address, URL, cookie, etc and detect when an abuse is
+ happening, then take action (slow down the offenders, block them, send them
+ to outdated contents, etc).
+
+ - an observation point for network troubleshooting : due to the precision of
+ the information reported in logs, it is often used to narrow down some
+ network-related issues.
+
+ - an HTTP compression offloader : it can compress responses which were not
+ compressed by the server, thus reducing the page load time for clients with
+ poor connectivity or using high-latency, mobile networks.
+
+ - a caching proxy : it may cache responses in RAM so that subsequent requests
+ for the same object avoid the cost of another network transfer from the
+ server as long as the object remains present and valid. It will however not
+ store objects to any persistent storage. Please note that this caching
+ feature is designed to be maintenance free and focuses solely on saving
+ haproxy's precious resources and not on save the server's resources. Caches
+ designed to optimize servers require much more tuning and flexibility. If
+ you instead need such an advanced cache, please use Varnish Cache, which
+ integrates perfectly with haproxy, especially when SSL/TLS is needed on any
+ side.
+
+ - a FastCGI gateway : FastCGI can be seen as a different representation of
+ HTTP, and as such, HAProxy can directly load-balance a farm comprising any
+ combination of FastCGI application servers without requiring to insert
+ another level of gateway between them. This results in resource savings and
+ a reduction of maintenance costs.
+
+HAProxy is not :
+
+ - an explicit HTTP proxy, i.e. the proxy that browsers use to reach the
+ internet. There are excellent open-source software dedicated for this task,
+ such as Squid. However HAProxy can be installed in front of such a proxy to
+ provide load balancing and high availability.
+
+ - a data scrubber : it will not modify the body of requests nor responses.
+
+ - a static web server : during startup, it isolates itself inside a chroot
+ jail and drops its privileges, so that it will not perform any single file-
+ system access once started. As such it cannot be turned into a static web
+ server (dynamic servers are supported through FastCGI however). There are
+ excellent open-source software for this such as Apache or Nginx, and
+ HAProxy can be easily installed in front of them to provide load balancing,
+ high availability and acceleration.
+
+ - a packet-based load balancer : it will not see IP packets nor UDP datagrams,
+ will not perform NAT or even less DSR. These are tasks for lower layers.
+ Some kernel-based components such as IPVS (Linux Virtual Server) already do
+ this pretty well and complement perfectly with HAProxy.
+
+
+3.2. How HAProxy works
+----------------------
+
+HAProxy is an event-driven, non-blocking engine combining a very fast I/O layer
+with a priority-based, multi-threaded scheduler. As it is designed with a data
+forwarding goal in mind, its architecture is optimized to move data as fast as
+possible with the least possible operations. It focuses on optimizing the CPU
+cache's efficiency by sticking connections to the same CPU as long as possible.
+As such it implements a layered model offering bypass mechanisms at each level
+ensuring data doesn't reach higher levels unless needed. Most of the processing
+is performed in the kernel, and HAProxy does its best to help the kernel do the
+work as fast as possible by giving some hints or by avoiding certain operation
+when it guesses they could be grouped later. As a result, typical figures show
+15% of the processing time spent in HAProxy versus 85% in the kernel in TCP or
+HTTP close mode, and about 30% for HAProxy versus 70% for the kernel in HTTP
+keep-alive mode.
+
+A single process can run many proxy instances; configurations as large as
+300000 distinct proxies in a single process were reported to run fine. A single
+core, single CPU setup is far more than enough for more than 99% users, and as
+such, users of containers and virtual machines are encouraged to use the
+absolute smallest images they can get to save on operational costs and simplify
+troubleshooting. However the machine HAProxy runs on must never ever swap, and
+its CPU must not be artificially throttled (sub-CPU allocation in hypervisors)
+nor be shared with compute-intensive processes which would induce a very high
+context-switch latency.
+
+Threading allows to exploit all available processing capacity by using one
+thread per CPU core. This is mostly useful for SSL or when data forwarding
+rates above 40 Gbps are needed. In such cases it is critically important to
+avoid communications between multiple physical CPUs, which can cause strong
+bottlenecks in the network stack and in HAProxy itself. While counter-intuitive
+to some, the first thing to do when facing some performance issues is often to
+reduce the number of CPUs HAProxy runs on.
+
+HAProxy only requires the haproxy executable and a configuration file to run.
+For logging it is highly recommended to have a properly configured syslog daemon
+and log rotations in place. Logs may also be sent to stdout/stderr, which can be
+useful inside containers. The configuration files are parsed before starting,
+then HAProxy tries to bind all listening sockets, and refuses to start if
+anything fails. Past this point it cannot fail anymore. This means that there
+are no runtime failures and that if it accepts to start, it will work until it
+is stopped.
+
+Once HAProxy is started, it does exactly 3 things :
+
+ - process incoming connections;
+
+ - periodically check the servers' status (known as health checks);
+
+ - exchange information with other haproxy nodes.
+
+Processing incoming connections is by far the most complex task as it depends
+on a lot of configuration possibilities, but it can be summarized as the 9 steps
+below :
+
+ - accept incoming connections from listening sockets that belong to a
+ configuration entity known as a "frontend", which references one or multiple
+ listening addresses;
+
+ - apply the frontend-specific processing rules to these connections that may
+ result in blocking them, modifying some headers, or intercepting them to
+ execute some internal applets such as the statistics page or the CLI;
+
+ - pass these incoming connections to another configuration entity representing
+ a server farm known as a "backend", which contains the list of servers and
+ the load balancing strategy for this server farm;
+
+ - apply the backend-specific processing rules to these connections;
+
+ - decide which server to forward the connection to according to the load
+ balancing strategy;
+
+ - apply the backend-specific processing rules to the response data;
+
+ - apply the frontend-specific processing rules to the response data;
+
+ - emit a log to report what happened in fine details;
+
+ - in HTTP, loop back to the second step to wait for a new request, otherwise
+ close the connection.
+
+Frontends and backends are sometimes considered as half-proxies, since they only
+look at one side of an end-to-end connection; the frontend only cares about the
+clients while the backend only cares about the servers. HAProxy also supports
+full proxies which are exactly the union of a frontend and a backend. When HTTP
+processing is desired, the configuration will generally be split into frontends
+and backends as they open a lot of possibilities since any frontend may pass a
+connection to any backend. With TCP-only proxies, using frontends and backends
+rarely provides a benefit and the configuration can be more readable with full
+proxies.
+
+
+3.3. Basic features
+-------------------
+
+This section will enumerate a number of features that HAProxy implements, some
+of which are generally expected from any modern load balancer, and some of
+which are a direct benefit of HAProxy's architecture. More advanced features
+will be detailed in the next section.
+
+
+3.3.1. Basic features : Proxying
+--------------------------------
+
+Proxying is the action of transferring data between a client and a server over
+two independent connections. The following basic features are supported by
+HAProxy regarding proxying and connection management :
+
+ - Provide the server with a clean connection to protect them against any
+ client-side defect or attack;
+
+ - Listen to multiple IP addresses and/or ports, even port ranges;
+
+ - Transparent accept : intercept traffic targeting any arbitrary IP address
+ that doesn't even belong to the local system;
+
+ - Server port doesn't need to be related to listening port, and may even be
+ translated by a fixed offset (useful with ranges);
+
+ - Transparent connect : spoof the client's (or any) IP address if needed
+ when connecting to the server;
+
+ - Provide a reliable return IP address to the servers in multi-site LBs;
+
+ - Offload the server thanks to buffers and possibly short-lived connections
+ to reduce their concurrent connection count and their memory footprint;
+
+ - Optimize TCP stacks (e.g. SACK), congestion control, and reduce RTT impacts;
+
+ - Support different protocol families on both sides (e.g. IPv4/IPv6/Unix);
+
+ - Timeout enforcement : HAProxy supports multiple levels of timeouts depending
+ on the stage the connection is, so that a dead client or server, or an
+ attacker cannot be granted resources for too long;
+
+ - Protocol validation: HTTP, SSL, or payload are inspected and invalid
+ protocol elements are rejected, unless instructed to accept them anyway;
+
+ - Policy enforcement : ensure that only what is allowed may be forwarded;
+
+ - Both incoming and outgoing connections may be limited to certain network
+ namespaces (Linux only), making it easy to build a cross-container,
+ multi-tenant load balancer;
+
+ - PROXY protocol presents the client's IP address to the server even for
+ non-HTTP traffic. This is an HAProxy extension that was adopted by a number
+ of third-party products by now, at least these ones at the time of writing :
+ - client : haproxy, stud, stunnel, exaproxy, ELB, squid
+ - server : haproxy, stud, postfix, exim, nginx, squid, node.js, varnish
+
+
+3.3.2. Basic features : SSL
+---------------------------
+
+HAProxy's SSL stack is recognized as one of the most featureful according to
+Google's engineers (http://istlsfastyet.com/). The most commonly used features
+making it quite complete are :
+
+ - SNI-based multi-hosting with no limit on sites count and focus on
+ performance. At least one deployment is known for running 50000 domains
+ with their respective certificates;
+
+ - support for wildcard certificates reduces the need for many certificates ;
+
+ - certificate-based client authentication with configurable policies on
+ failure to present a valid certificate. This allows to present a different
+ server farm to regenerate the client certificate for example;
+
+ - authentication of the backend server ensures the backend server is the real
+ one and not a man in the middle;
+
+ - authentication with the backend server lets the backend server know it's
+ really the expected haproxy node that is connecting to it;
+
+ - TLS NPN and ALPN extensions make it possible to reliably offload SPDY/HTTP2
+ connections and pass them in clear text to backend servers;
+
+ - OCSP stapling further reduces first page load time by delivering inline an
+ OCSP response when the client requests a Certificate Status Request;
+
+ - Dynamic record sizing provides both high performance and low latency, and
+ significantly reduces page load time by letting the browser start to fetch
+ new objects while packets are still in flight;
+
+ - permanent access to all relevant SSL/TLS layer information for logging,
+ access control, reporting etc. These elements can be embedded into HTTP
+ header or even as a PROXY protocol extension so that the offloaded server
+ gets all the information it would have had if it performed the SSL
+ termination itself.
+
+ - Detect, log and block certain known attacks even on vulnerable SSL libs,
+ such as the Heartbleed attack affecting certain versions of OpenSSL.
+
+ - support for stateless session resumption (RFC 5077 TLS Ticket extension).
+ TLS tickets can be updated from CLI which provides them means to implement
+ Perfect Forward Secrecy by frequently rotating the tickets.
+
+
+3.3.3. Basic features : Monitoring
+----------------------------------
+
+HAProxy focuses a lot on availability. As such it cares about servers state,
+and about reporting its own state to other network components :
+
+ - Servers' state is continuously monitored using per-server parameters. This
+ ensures the path to the server is operational for regular traffic;
+
+ - Health checks support two hysteresis for up and down transitions in order
+ to protect against state flapping;
+
+ - Checks can be sent to a different address/port/protocol : this makes it
+ easy to check a single service that is considered representative of multiple
+ ones, for example the HTTPS port for an HTTP+HTTPS server.
+
+ - Servers can track other servers and go down simultaneously : this ensures
+ that servers hosting multiple services can fail atomically and that no one
+ will be sent to a partially failed server;
+
+ - Agents may be deployed on the server to monitor load and health : a server
+ may be interested in reporting its load, operational status, administrative
+ status independently from what health checks can see. By running a simple
+ agent on the server, it's possible to consider the server's view of its own
+ health in addition to the health checks validating the whole path;
+
+ - Various check methods are available : TCP connect, HTTP request, SMTP hello,
+ SSL hello, LDAP, SQL, Redis, send/expect scripts, all with/without SSL;
+
+ - State change is notified in the logs and stats page with the failure reason
+ (e.g. the HTTP response received at the moment the failure was detected). An
+ e-mail can also be sent to a configurable address upon such a change ;
+
+ - Server state is also reported on the stats interface and can be used to take
+ routing decisions so that traffic may be sent to different farms depending
+ on their sizes and/or health (e.g. loss of an inter-DC link);
+
+ - HAProxy can use health check requests to pass information to the servers,
+ such as their names, weight, the number of other servers in the farm etc.
+ so that servers can adjust their response and decisions based on this
+ knowledge (e.g. postpone backups to keep more CPU available);
+
+ - Servers can use health checks to report more detailed state than just on/off
+ (e.g. I would like to stop, please stop sending new visitors);
+
+ - HAProxy itself can report its state to external components such as routers
+ or other load balancers, allowing to build very complete multi-path and
+ multi-layer infrastructures.
+
+
+3.3.4. Basic features : High availability
+-----------------------------------------
+
+Just like any serious load balancer, HAProxy cares a lot about availability to
+ensure the best global service continuity :
+
+ - Only valid servers are used ; the other ones are automatically evicted from
+ load balancing farms ; under certain conditions it is still possible to
+ force to use them though;
+
+ - Support for a graceful shutdown so that it is possible to take servers out
+ of a farm without affecting any connection;
+
+ - Backup servers are automatically used when active servers are down and
+ replace them so that sessions are not lost when possible. This also allows
+ to build multiple paths to reach the same server (e.g. multiple interfaces);
+
+ - Ability to return a global failed status for a farm when too many servers
+ are down. This, combined with the monitoring capabilities makes it possible
+ for an upstream component to choose a different LB node for a given service;
+
+ - Stateless design makes it easy to build clusters : by design, HAProxy does
+ its best to ensure the highest service continuity without having to store
+ information that could be lost in the event of a failure. This ensures that
+ a takeover is the most seamless possible;
+
+ - Integrates well with standard VRRP daemon keepalived : HAProxy easily tells
+ keepalived about its state and copes very well with floating virtual IP
+ addresses. Note: only use IP redundancy protocols (VRRP/CARP) over cluster-
+ based solutions (Heartbeat, ...) as they're the ones offering the fastest,
+ most seamless, and most reliable switchover.
+
+
+3.3.5. Basic features : Load balancing
+--------------------------------------
+
+HAProxy offers a fairly complete set of load balancing features, most of which
+are unfortunately not available in a number of other load balancing products :
+
+ - no less than 10 load balancing algorithms are supported, some of which apply
+ to input data to offer an infinite list of possibilities. The most common
+ ones are round-robin (for short connections, pick each server in turn),
+ leastconn (for long connections, pick the least recently used of the servers
+ with the lowest connection count), source (for SSL farms or terminal server
+ farms, the server directly depends on the client's source address), URI (for
+ HTTP caches, the server directly depends on the HTTP URI), hdr (the server
+ directly depends on the contents of a specific HTTP header field), first
+ (for short-lived virtual machines, all connections are packed on the
+ smallest possible subset of servers so that unused ones can be powered
+ down);
+
+ - all algorithms above support per-server weights so that it is possible to
+ accommodate from different server generations in a farm, or direct a small
+ fraction of the traffic to specific servers (debug mode, running the next
+ version of the software, etc);
+
+ - dynamic weights are supported for round-robin, leastconn and consistent
+ hashing ; this allows server weights to be modified on the fly from the CLI
+ or even by an agent running on the server;
+
+ - slow-start is supported whenever a dynamic weight is supported; this allows
+ a server to progressively take the traffic. This is an important feature
+ for fragile application servers which require to compile classes at runtime
+ as well as cold caches which need to fill up before being run at full
+ throttle;
+
+ - hashing can apply to various elements such as client's source address, URL
+ components, query string element, header field values, POST parameter, RDP
+ cookie;
+
+ - consistent hashing protects server farms against massive redistribution when
+ adding or removing servers in a farm. That's very important in large cache
+ farms and it allows slow-start to be used to refill cold caches;
+
+ - a number of internal metrics such as the number of connections per server,
+ per backend, the amount of available connection slots in a backend etc makes
+ it possible to build very advanced load balancing strategies.
+
+
+3.3.6. Basic features : Stickiness
+----------------------------------
+
+Application load balancing would be useless without stickiness. HAProxy provides
+a fairly comprehensive set of possibilities to maintain a visitor on the same
+server even across various events such as server addition/removal, down/up
+cycles, and some methods are designed to be resistant to the distance between
+multiple load balancing nodes in that they don't require any replication :
+
+ - stickiness information can be individually matched and learned from
+ different places if desired. For example a JSESSIONID cookie may be matched
+ both in a cookie and in the URL. Up to 8 parallel sources can be learned at
+ the same time and each of them may point to a different stick-table;
+
+ - stickiness information can come from anything that can be seen within a
+ request or response, including source address, TCP payload offset and
+ length, HTTP query string elements, header field values, cookies, and so
+ on.
+
+ - stick-tables are replicated between all nodes in a multi-master fashion;
+
+ - commonly used elements such as SSL-ID or RDP cookies (for TSE farms) are
+ directly accessible to ease manipulation;
+
+ - all sticking rules may be dynamically conditioned by ACLs;
+
+ - it is possible to decide not to stick to certain servers, such as backup
+ servers, so that when the nominal server comes back, it automatically takes
+ the load back. This is often used in multi-path environments;
+
+ - in HTTP it is often preferred not to learn anything and instead manipulate
+ a cookie dedicated to stickiness. For this, it's possible to detect,
+ rewrite, insert or prefix such a cookie to let the client remember what
+ server was assigned;
+
+ - the server may decide to change or clean the stickiness cookie on logout,
+ so that leaving visitors are automatically unbound from the server;
+
+ - using ACL-based rules it is also possible to selectively ignore or enforce
+ stickiness regardless of the server's state; combined with advanced health
+ checks, that helps admins verify that the server they're installing is up
+ and running before presenting it to the whole world;
+
+ - an innovative mechanism to set a maximum idle time and duration on cookies
+ ensures that stickiness can be smoothly stopped on devices which are never
+ closed (smartphones, TVs, home appliances) without having to store them on
+ persistent storage;
+
+ - multiple server entries may share the same stickiness keys so that
+ stickiness is not lost in multi-path environments when one path goes down;
+
+ - soft-stop ensures that only users with stickiness information will continue
+ to reach the server they've been assigned to but no new users will go there.
+
+
+3.3.7. Basic features : Logging
+-------------------------------
+
+Logging is an extremely important feature for a load balancer, first because a
+load balancer is often wrongly accused of causing the problems it reveals, and
+second because it is placed at a critical point in an infrastructure where all
+normal and abnormal activity needs to be analyzed and correlated with other
+components.
+
+HAProxy provides very detailed logs, with millisecond accuracy and the exact
+connection accept time that can be searched in firewalls logs (e.g. for NAT
+correlation). By default, TCP and HTTP logs are quite detailed and contain
+everything needed for troubleshooting, such as source IP address and port,
+frontend, backend, server, timers (request receipt duration, queue duration,
+connection setup time, response headers time, data transfer time), global
+process state, connection counts, queue status, retries count, detailed
+stickiness actions and disconnect reasons, header captures with a safe output
+encoding. It is then possible to extend or replace this format to include any
+sampled data, variables, captures, resulting in very detailed information. For
+example it is possible to log the number of cumulative requests or number of
+different URLs visited by a client.
+
+The log level may be adjusted per request using standard ACLs, so it is possible
+to automatically silent some logs considered as pollution and instead raise
+warnings when some abnormal behavior happen for a small part of the traffic
+(e.g. too many URLs or HTTP errors for a source address). Administrative logs
+are also emitted with their own levels to inform about the loss or recovery of a
+server for example.
+
+Each frontend and backend may use multiple independent log outputs, which eases
+multi-tenancy. Logs are preferably sent over UDP, maybe JSON-encoded, and are
+truncated after a configurable line length in order to guarantee delivery. But
+it is also possible to send them to stdout/stderr or any file descriptor, as
+well as to a ring buffer that a client can subscribe to in order to retrieve
+them.
+
+
+3.3.8. Basic features : Statistics
+----------------------------------
+
+HAProxy provides a web-based statistics reporting interface with authentication,
+security levels and scopes. It is thus possible to provide each hosted customer
+with his own page showing only his own instances. This page can be located in a
+hidden URL part of the regular web site so that no new port needs to be opened.
+This page may also report the availability of other HAProxy nodes so that it is
+easy to spot if everything works as expected at a glance. The view is synthetic
+with a lot of details accessible (such as error causes, last access and last
+change duration, etc), which are also accessible as a CSV table that other tools
+may import to draw graphs. The page may self-refresh to be used as a monitoring
+page on a large display. In administration mode, the page also allows to change
+server state to ease maintenance operations.
+
+A Prometheus exporter is also provided so that the statistics can be consumed
+in a different format depending on the deployment.
+
+
+3.4. Standard features
+----------------------
+
+In this section, some features that are very commonly used in HAProxy but are
+not necessarily present on other load balancers are enumerated.
+
+
+3.4.1. Standard features : Sampling and converting information
+--------------------------------------------------------------
+
+HAProxy supports information sampling using a wide set of "sample fetch
+functions". The principle is to extract pieces of information known as samples,
+for immediate use. This is used for stickiness, to build conditions, to produce
+information in logs or to enrich HTTP headers.
+
+Samples can be fetched from various sources :
+
+ - constants : integers, strings, IP addresses, binary blocks;
+
+ - the process : date, environment variables, server/frontend/backend/process
+ state, byte/connection counts/rates, queue length, random generator, ...
+
+ - variables : per-session, per-request, per-response variables;
+
+ - the client connection : source and destination addresses and ports, and all
+ related statistics counters;
+
+ - the SSL client session : protocol, version, algorithm, cipher, key size,
+ session ID, all client and server certificate fields, certificate serial,
+ SNI, ALPN, NPN, client support for certain extensions;
+
+ - request and response buffers contents : arbitrary payload at offset/length,
+ data length, RDP cookie, decoding of SSL hello type, decoding of TLS SNI;
+
+ - HTTP (request and response) : method, URI, path, query string arguments,
+ status code, headers values, positional header value, cookies, captures,
+ authentication, body elements;
+
+A sample may then pass through a number of operators known as "converters" to
+experience some transformation. A converter consumes a sample and produces a
+new one, possibly of a completely different type. For example, a converter may
+be used to return only the integer length of the input string, or could turn a
+string to upper case. Any arbitrary number of converters may be applied in
+series to a sample before final use. Among all available sample converters, the
+following ones are the most commonly used :
+
+ - arithmetic and logic operators : they make it possible to perform advanced
+ computation on input data, such as computing ratios, percentages or simply
+ converting from one unit to another one;
+
+ - IP address masks are useful when some addresses need to be grouped by larger
+ networks;
+
+ - data representation : URL-decode, base64, hex, JSON strings, hashing;
+
+ - string conversion : extract substrings at fixed positions, fixed length,
+ extract specific fields around certain delimiters, extract certain words,
+ change case, apply regex-based substitution;
+
+ - date conversion : convert to HTTP date format, convert local to UTC and
+ conversely, add or remove offset;
+
+ - lookup an entry in a stick table to find statistics or assigned server;
+
+ - map-based key-to-value conversion from a file (mostly used for geolocation).
+
+
+3.4.2. Standard features : Maps
+-------------------------------
+
+Maps are a powerful type of converter consisting in loading a two-columns file
+into memory at boot time, then looking up each input sample from the first
+column and either returning the corresponding pattern on the second column if
+the entry was found, or returning a default value. The output information also
+being a sample, it can in turn experience other transformations including other
+map lookups. Maps are most commonly used to translate the client's IP address
+to an AS number or country code since they support a longest match for network
+addresses but they can be used for various other purposes.
+
+Part of their strength comes from being updatable on the fly either from the CLI
+or from certain actions using other samples, making them capable of storing and
+retrieving information between subsequent accesses. Another strength comes from
+the binary tree based indexation which makes them extremely fast even when they
+contain hundreds of thousands of entries, making geolocation very cheap and easy
+to set up.
+
+
+3.4.3. Standard features : ACLs and conditions
+----------------------------------------------
+
+Most operations in HAProxy can be made conditional. Conditions are built by
+combining multiple ACLs using logic operators (AND, OR, NOT). Each ACL is a
+series of tests based on the following elements :
+
+ - a sample fetch method to retrieve the element to test ;
+
+ - an optional series of converters to transform the element ;
+
+ - a list of patterns to match against ;
+
+ - a matching method to indicate how to compare the patterns with the sample
+
+For example, the sample may be taken from the HTTP "Host" header, it could then
+be converted to lower case, then matched against a number of regex patterns
+using the regex matching method.
+
+Technically, ACLs are built on the same core as the maps, they share the exact
+same internal structure, pattern matching methods and performance. The only real
+difference is that instead of returning a sample, they only return "found" or
+or "not found". In terms of usage, ACL patterns may be declared inline in the
+configuration file and do not require their own file. ACLs may be named for ease
+of use or to make configurations understandable. A named ACL may be declared
+multiple times and it will evaluate all definitions in turn until one matches.
+
+About 13 different pattern matching methods are provided, among which IP address
+mask, integer ranges, substrings, regex. They work like functions, and just like
+with any programming language, only what is needed is evaluated, so when a
+condition involving an OR is already true, next ones are not evaluated, and
+similarly when a condition involving an AND is already false, the rest of the
+condition is not evaluated.
+
+There is no practical limit to the number of declared ACLs, and a handful of
+commonly used ones are provided. However experience has shown that setups using
+a lot of named ACLs are quite hard to troubleshoot and that sometimes using
+anonymous ACLs inline is easier as it requires less references out of the scope
+being analyzed.
+
+
+3.4.4. Standard features : Content switching
+--------------------------------------------
+
+HAProxy implements a mechanism known as content-based switching. The principle
+is that a connection or request arrives on a frontend, then the information
+carried with this request or connection are processed, and at this point it is
+possible to write ACLs-based conditions making use of these information to
+decide what backend will process the request. Thus the traffic is directed to
+one backend or another based on the request's contents. The most common example
+consists in using the Host header and/or elements from the path (sub-directories
+or file-name extensions) to decide whether an HTTP request targets a static
+object or the application, and to route static objects traffic to a backend made
+of fast and light servers, and all the remaining traffic to a more complex
+application server, thus constituting a fine-grained virtual hosting solution.
+This is quite convenient to make multiple technologies coexist as a more global
+solution.
+
+Another use case of content-switching consists in using different load balancing
+algorithms depending on various criteria. A cache may use a URI hash while an
+application would use round-robin.
+
+Last but not least, it allows multiple customers to use a small share of a
+common resource by enforcing per-backend (thus per-customer connection limits).
+
+Content switching rules scale very well, though their performance may depend on
+the number and complexity of the ACLs in use. But it is also possible to write
+dynamic content switching rules where a sample value directly turns into a
+backend name and without making use of ACLs at all. Such configurations have
+been reported to work fine at least with 300000 backends in production.
+
+
+3.4.5. Standard features : Stick-tables
+---------------------------------------
+
+Stick-tables are commonly used to store stickiness information, that is, to keep
+a reference to the server a certain visitor was directed to. The key is then the
+identifier associated with the visitor (its source address, the SSL ID of the
+connection, an HTTP or RDP cookie, the customer number extracted from the URL or
+from the payload, ...) and the stored value is then the server's identifier.
+
+Stick tables may use 3 different types of samples for their keys : integers,
+strings and addresses. Only one stick-table may be referenced in a proxy, and it
+is designated everywhere with the proxy name. Up to 8 keys may be tracked in
+parallel. The server identifier is committed during request or response
+processing once both the key and the server are known.
+
+Stick-table contents may be replicated in active-active mode with other HAProxy
+nodes known as "peers" as well as with the new process during a reload operation
+so that all load balancing nodes share the same information and take the same
+routing decision if client's requests are spread over multiple nodes.
+
+Since stick-tables are indexed on what allows to recognize a client, they are
+often also used to store extra information such as per-client statistics. The
+extra statistics take some extra space and need to be explicitly declared. The
+type of statistics that may be stored includes the input and output bandwidth,
+the number of concurrent connections, the connection rate and count over a
+period, the amount and frequency of errors, some specific tags and counters,
+etc. In order to support keeping such information without being forced to
+stick to a given server, a special "tracking" feature is implemented and allows
+to track up to 3 simultaneous keys from different tables at the same time
+regardless of stickiness rules. Each stored statistics may be searched, dumped
+and cleared from the CLI and adds to the live troubleshooting capabilities.
+
+While this mechanism can be used to surclass a returning visitor or to adjust
+the delivered quality of service depending on good or bad behavior, it is
+mostly used to fight against service abuse and more generally DDoS as it allows
+to build complex models to detect certain bad behaviors at a high processing
+speed.
+
+
+3.4.6. Standard features : Formatted strings
+--------------------------------------------
+
+There are many places where HAProxy needs to manipulate character strings, such
+as logs, redirects, header additions, and so on. In order to provide the
+greatest flexibility, the notion of Formatted strings was introduced, initially
+for logging purposes, which explains why it's still called "log-format". These
+strings contain escape characters allowing to introduce various dynamic data
+including variables and sample fetch expressions into strings, and even to
+adjust the encoding while the result is being turned into a string (for example,
+adding quotes). This provides a powerful way to build header contents, to build
+response data or even response templates, or to customize log lines.
+Additionally, in order to remain simple to build most common strings, about 50
+special tags are provided as shortcuts for information commonly used in logs.
+
+
+3.4.7. Standard features : HTTP rewriting and redirection
+---------------------------------------------------------
+
+Installing a load balancer in front of an application that was never designed
+for this can be a challenging task without the proper tools. One of the most
+commonly requested operation in this case is to adjust requests and response
+headers to make the load balancer appear as the origin server and to fix hard
+coded information. This comes with changing the path in requests (which is
+strongly advised against), modifying Host header field, modifying the Location
+response header field for redirects, modifying the path and domain attribute
+for cookies, and so on. It also happens that a number of servers are somewhat
+verbose and tend to leak too much information in the response, making them more
+vulnerable to targeted attacks. While it's theoretically not the role of a load
+balancer to clean this up, in practice it's located at the best place in the
+infrastructure to guarantee that everything is cleaned up.
+
+Similarly, sometimes the load balancer will have to intercept some requests and
+respond with a redirect to a new target URL. While some people tend to confuse
+redirects and rewriting, these are two completely different concepts, since the
+rewriting makes the client and the server see different things (and disagree on
+the location of the page being visited) while redirects ask the client to visit
+the new URL so that it sees the same location as the server.
+
+In order to do this, HAProxy supports various possibilities for rewriting and
+redirects, among which :
+
+ - regex-based URL and header rewriting in requests and responses. Regex are
+ the most commonly used tool to modify header values since they're easy to
+ manipulate and well understood;
+
+ - headers may also be appended, deleted or replaced based on formatted strings
+ so that it is possible to pass information there (e.g. client side TLS
+ algorithm and cipher);
+
+ - HTTP redirects can use any 3xx code to a relative, absolute, or completely
+ dynamic (formatted string) URI;
+
+ - HTTP redirects also support some extra options such as setting or clearing
+ a specific cookie, dropping the query string, appending a slash if missing,
+ and so on;
+
+ - a powerful "return" directive allows to customize every part of a response
+ like status, headers, body using dynamic contents or even template files.
+
+ - all operations support ACL-based conditions;
+
+
+3.4.8. Standard features : Server protection
+--------------------------------------------
+
+HAProxy does a lot to maximize service availability, and for this it takes
+large efforts to protect servers against overloading and attacks. The first
+and most important point is that only complete and valid requests are forwarded
+to the servers. The initial reason is that HAProxy needs to find the protocol
+elements it needs to stay synchronized with the byte stream, and the second
+reason is that until the request is complete, there is no way to know if some
+elements will change its semantics. The direct benefit from this is that servers
+are not exposed to invalid or incomplete requests. This is a very effective
+protection against slowloris attacks, which have almost no impact on HAProxy.
+
+Another important point is that HAProxy contains buffers to store requests and
+responses, and that by only sending a request to a server when it's complete and
+by reading the whole response very quickly from the local network, the server
+side connection is used for a very short time and this preserves server
+resources as much as possible.
+
+A direct extension to this is that HAProxy can artificially limit the number of
+concurrent connections or outstanding requests to a server, which guarantees
+that the server will never be overloaded even if it continuously runs at 100% of
+its capacity during traffic spikes. All excess requests will simply be queued to
+be processed when one slot is released. In the end, this huge resource savings
+most often ensures so much better server response times that it ends up actually
+being faster than by overloading the server. Queued requests may be redispatched
+to other servers, or even aborted in queue when the client aborts, which also
+protects the servers against the "reload effect", where each click on "reload"
+by a visitor on a slow-loading page usually induces a new request and maintains
+the server in an overloaded state.
+
+The slow-start mechanism also protects restarting servers against high traffic
+levels while they're still finalizing their startup or compiling some classes.
+
+Regarding the protocol-level protection, it is possible to relax the HTTP parser
+to accept non standard-compliant but harmless requests or responses and even to
+fix them. This allows bogus applications to be accessible while a fix is being
+developed. In parallel, offending messages are completely captured with a
+detailed report that help developers spot the issue in the application. The most
+dangerous protocol violations are properly detected and dealt with and fixed.
+For example malformed requests or responses with two Content-length headers are
+either fixed if the values are exactly the same, or rejected if they differ,
+since it becomes a security problem. Protocol inspection is not limited to HTTP,
+it is also available for other protocols like TLS or RDP.
+
+When a protocol violation or attack is detected, there are various options to
+respond to the user, such as returning the common "HTTP 400 bad request",
+closing the connection with a TCP reset, or faking an error after a long delay
+("tarpit") to confuse the attacker. All of these contribute to protecting the
+servers by discouraging the offending client from pursuing an attack that
+becomes very expensive to maintain.
+
+HAProxy also proposes some more advanced options to protect against accidental
+data leaks and session crossing. Not only it can log suspicious server responses
+but it will also log and optionally block a response which might affect a given
+visitors' confidentiality. One such example is a cacheable cookie appearing in a
+cacheable response and which may result in an intermediary cache to deliver it
+to another visitor, causing an accidental session sharing.
+
+
+3.5. Advanced features
+----------------------
+
+3.5.1. Advanced features : Management
+-------------------------------------
+
+HAProxy is designed to remain extremely stable and safe to manage in a regular
+production environment. It is provided as a single executable file which doesn't
+require any installation process. Multiple versions can easily coexist, meaning
+that it's possible (and recommended) to upgrade instances progressively by
+order of importance instead of migrating all of them at once. Configuration
+files are easily versioned. Configuration checking is done off-line so it
+doesn't require to restart a service that will possibly fail. During
+configuration checks, a number of advanced mistakes may be detected (e.g. a rule
+hiding another one, or stickiness that will not work) and detailed warnings and
+configuration hints are proposed to fix them. Backwards configuration file
+compatibility goes very far away in time, with version 1.5 still fully
+supporting configurations for versions 1.1 written 13 years before, and 1.6
+only dropping support for almost unused, obsolete keywords that can be done
+differently. The configuration and software upgrade mechanism is smooth and non
+disruptive in that it allows old and new processes to coexist on the system,
+each handling its own connections. System status, build options, and library
+compatibility are reported on startup.
+
+Some advanced features allow an application administrator to smoothly stop a
+server, detect when there's no activity on it anymore, then take it off-line,
+stop it, upgrade it and ensure it doesn't take any traffic while being upgraded,
+then test it again through the normal path without opening it to the public, and
+all of this without touching HAProxy at all. This ensures that even complicated
+production operations may be done during opening hours with all technical
+resources available.
+
+The process tries to save resources as much as possible, uses memory pools to
+save on allocation time and limit memory fragmentation, releases payload buffers
+as soon as their contents are sent, and supports enforcing strong memory limits
+above which connections have to wait for a buffer to become available instead of
+allocating more memory. This system helps guarantee memory usage in certain
+strict environments.
+
+A command line interface (CLI) is available as a UNIX or TCP socket, to perform
+a number of operations and to retrieve troubleshooting information. Everything
+done on this socket doesn't require a configuration change, so it is mostly used
+for temporary changes. Using this interface it is possible to change a server's
+address, weight and status, to consult statistics and clear counters, dump and
+clear stickiness tables, possibly selectively by key criteria, dump and kill
+client-side and server-side connections, dump captured errors with a detailed
+analysis of the exact cause and location of the error, dump, add and remove
+entries from ACLs and maps, update TLS shared secrets, apply connection limits
+and rate limits on the fly to arbitrary frontends (useful in shared hosting
+environments), and disable a specific frontend to release a listening port
+(useful when daytime operations are forbidden and a fix is needed nonetheless).
+Updating certificates and their configuration on the fly is permitted, as well
+as enabling and consulting traces of every processing step of the traffic.
+
+For environments where SNMP is mandatory, at least two agents exist, one is
+provided with the HAProxy sources and relies on the Net-SNMP Perl module.
+Another one is provided with the commercial packages and doesn't require Perl.
+Both are roughly equivalent in terms of coverage.
+
+It is often recommended to install 4 utilities on the machine where HAProxy is
+deployed :
+
+ - socat (in order to connect to the CLI, though certain forks of netcat can
+ also do it to some extents);
+
+ - halog from the latest HAProxy version : this is the log analysis tool, it
+ parses native TCP and HTTP logs extremely fast (1 to 2 GB per second) and
+ extracts useful information and statistics such as requests per URL, per
+ source address, URLs sorted by response time or error rate, termination
+ codes etc. It was designed to be deployed on the production servers to
+ help troubleshoot live issues so it has to be there ready to be used;
+
+ - tcpdump : this is highly recommended to take the network traces needed to
+ troubleshoot an issue that was made visible in the logs. There is a moment
+ where application and haproxy's analysis will diverge and the network traces
+ are the only way to say who's right and who's wrong. It's also fairly common
+ to detect bugs in network stacks and hypervisors thanks to tcpdump;
+
+ - strace : it is tcpdump's companion. It will report what HAProxy really sees
+ and will help sort out the issues the operating system is responsible for
+ from the ones HAProxy is responsible for. Strace is often requested when a
+ bug in HAProxy is suspected;
+
+
+3.5.2. Advanced features : System-specific capabilities
+-------------------------------------------------------
+
+Depending on the operating system HAProxy is deployed on, certain extra features
+may be available or needed. While it is supported on a number of platforms,
+HAProxy is primarily developed on Linux, which explains why some features are
+only available on this platform.
+
+The transparent bind and connect features, the support for binding connections
+to a specific network interface, as well as the ability to bind multiple
+processes to the same IP address and ports are only available on Linux and BSD
+systems, though only Linux performs a kernel-side load balancing of the incoming
+requests between the available processes.
+
+On Linux, there are also a number of extra features and optimizations including
+support for network namespaces (also known as "containers") allowing HAProxy to
+be a gateway between all containers, the ability to set the MSS, Netfilter marks
+and IP TOS field on the client side connection, support for TCP FastOpen on the
+listening side, TCP user timeouts to let the kernel quickly kill connections
+when it detects the client has disappeared before the configured timeouts, TCP
+splicing to let the kernel forward data between the two sides of a connections
+thus avoiding multiple memory copies, the ability to enable the "defer-accept"
+bind option to only get notified of an incoming connection once data become
+available in the kernel buffers, and the ability to send the request with the
+ACK confirming a connect (sometimes called "piggy-back") which is enabled with
+the "tcp-smart-connect" option. On Linux, HAProxy also takes great care of
+manipulating the TCP delayed ACKs to save as many packets as possible on the
+network.
+
+Some systems have an unreliable clock which jumps back and forth in the past
+and in the future. This used to happen with some NUMA systems where multiple
+processors didn't see the exact same time of day, and recently it became more
+common in virtualized environments where the virtual clock has no relation with
+the real clock, resulting in huge time jumps (sometimes up to 30 seconds have
+been observed). This causes a lot of trouble with respect to timeout enforcement
+in general. Due to this flaw of these systems, HAProxy maintains its own
+monotonic clock which is based on the system's clock but where drift is measured
+and compensated for. This ensures that even with a very bad system clock, timers
+remain reasonably accurate and timeouts continue to work. Note that this problem
+affects all the software running on such systems and is not specific to HAProxy.
+The common effects are spurious timeouts or application freezes. Thus if this
+behavior is detected on a system, it must be fixed, regardless of the fact that
+HAProxy protects itself against it.
+
+On Linux, a new starting process may communicate with the previous one to reuse
+its listening file descriptors so that the listening sockets are never
+interrupted during the process's replacement.
+
+
+3.5.3. Advanced features : Scripting
+------------------------------------
+
+HAProxy can be built with support for the Lua embedded language, which opens a
+wide area of new possibilities related to complex manipulation of requests or
+responses, routing decisions, statistics processing and so on. Using Lua it is
+even possible to establish parallel connections to other servers to exchange
+information. This way it becomes possible (though complex) to develop an
+authentication system for example. Please refer to the documentation in the file
+"doc/lua-api/index.rst" for more information on how to use Lua.
+
+
+3.5.4. Advanced features: Tracing
+---------------------------------
+
+At any moment an administrator may connect over the CLI and enable tracing in
+various internal subsystems. Various levels of details are provided by default
+so that in practice anything between one line per request to 500 lines per
+request can be retrieved. Filters as well as an automatic capture on/off/pause
+mechanism are available so that it really is possible to wait for a certain
+event and watch it in detail. This is extremely convenient to diagnose protocol
+violations from faulty servers and clients, or denial of service attacks.
+
+
+3.6. Sizing
+-----------
+
+Typical CPU usage figures show 15% of the processing time spent in HAProxy
+versus 85% in the kernel in TCP or HTTP close mode, and about 30% for HAProxy
+versus 70% for the kernel in HTTP keep-alive mode. This means that the operating
+system and its tuning have a strong impact on the global performance.
+
+Usages vary a lot between users, some focus on bandwidth, other ones on request
+rate, others on connection concurrency, others on SSL performance. This section
+aims at providing a few elements to help with this task.
+
+It is important to keep in mind that every operation comes with a cost, so each
+individual operation adds its overhead on top of the other ones, which may be
+negligible in certain circumstances, and which may dominate in other cases.
+
+When processing the requests from a connection, we can say that :
+
+ - forwarding data costs less than parsing request or response headers;
+
+ - parsing request or response headers cost less than establishing then closing
+ a connection to a server;
+
+ - establishing an closing a connection costs less than a TLS resume operation;
+
+ - a TLS resume operation costs less than a full TLS handshake with a key
+ computation;
+
+ - an idle connection costs less CPU than a connection whose buffers hold data;
+
+ - a TLS context costs even more memory than a connection with data;
+
+So in practice, it is cheaper to process payload bytes than header bytes, thus
+it is easier to achieve high network bandwidth with large objects (few requests
+per volume unit) than with small objects (many requests per volume unit). This
+explains why maximum bandwidth is always measured with large objects, while
+request rate or connection rates are measured with small objects.
+
+Some operations scale well on multiple processes spread over multiple CPUs,
+and others don't scale as well. Network bandwidth doesn't scale very far because
+the CPU is rarely the bottleneck for large objects, it's mostly the network
+bandwidth and data buses to reach the network interfaces. The connection rate
+doesn't scale well over multiple processors due to a few locks in the system
+when dealing with the local ports table. The request rate over persistent
+connections scales very well as it doesn't involve much memory nor network
+bandwidth and doesn't require to access locked structures. TLS key computation
+scales very well as it's totally CPU-bound. TLS resume scales moderately well,
+but reaches its limits around 4 processes where the overhead of accessing the
+shared table offsets the small gains expected from more power.
+
+The performance numbers one can expect from a very well tuned system are in the
+following range. It is important to take them as orders of magnitude and to
+expect significant variations in any direction based on the processor, IRQ
+setting, memory type, network interface type, operating system tuning and so on.
+
+The following numbers were found on a Core i7 running at 3.7 GHz equipped with
+a dual-port 10 Gbps NICs running Linux kernel 3.10, HAProxy 1.6 and OpenSSL
+1.0.2. HAProxy was running as a single process on a single dedicated CPU core,
+and two extra cores were dedicated to network interrupts :
+
+ - 20 Gbps of maximum network bandwidth in clear text for objects 256 kB or
+ higher, 10 Gbps for 41kB or higher;
+
+ - 4.6 Gbps of TLS traffic using AES256-GCM cipher with large objects;
+
+ - 83000 TCP connections per second from client to server;
+
+ - 82000 HTTP connections per second from client to server;
+
+ - 97000 HTTP requests per second in server-close mode (keep-alive with the
+ client, close with the server);
+
+ - 243000 HTTP requests per second in end-to-end keep-alive mode;
+
+ - 300000 filtered TCP connections per second (anti-DDoS)
+
+ - 160000 HTTPS requests per second in keep-alive mode over persistent TLS
+ connections;
+
+ - 13100 HTTPS requests per second using TLS resumed connections;
+
+ - 1300 HTTPS connections per second using TLS connections renegotiated with
+ RSA2048;
+
+ - 20000 concurrent saturated connections per GB of RAM, including the memory
+ required for system buffers; it is possible to do better with careful tuning
+ but this result it easy to achieve.
+
+ - about 8000 concurrent TLS connections (client-side only) per GB of RAM,
+ including the memory required for system buffers;
+
+ - about 5000 concurrent end-to-end TLS connections (both sides) per GB of
+ RAM including the memory required for system buffers;
+
+A more recent benchmark featuring the multi-thread enabled HAProxy 2.4 on a
+64-core ARM Graviton2 processor in AWS reached 2 million HTTPS requests per
+second at sub-millisecond response time, and 100 Gbps of traffic:
+
+ https://www.haproxy.com/blog/haproxy-forwards-over-2-million-http-requests-per-second-on-a-single-aws-arm-instance/
+
+Thus a good rule of thumb to keep in mind is that the request rate is divided
+by 10 between TLS keep-alive and TLS resume, and between TLS resume and TLS
+renegotiation, while it's only divided by 3 between HTTP keep-alive and HTTP
+close. Another good rule of thumb is to remember that a high frequency core
+with AES instructions can do around 20 Gbps of AES-GCM per core.
+
+Another good rule of thumb is to consider that on the same server, HAProxy will
+be able to saturate :
+
+ - about 5-10 static file servers or caching proxies;
+
+ - about 100 anti-virus proxies;
+
+ - and about 100-1000 application servers depending on the technology in use.
+
+
+3.7. How to get HAProxy
+-----------------------
+
+HAProxy is an open source project covered by the GPLv2 license, meaning that
+everyone is allowed to redistribute it provided that access to the sources is
+also provided upon request, especially if any modifications were made.
+
+HAProxy evolves as a main development branch called "master" or "mainline", from
+which new branches are derived once the code is considered stable. A lot of web
+sites run some development branches in production on a voluntarily basis, either
+to participate to the project or because they need a bleeding edge feature, and
+their feedback is highly valuable to fix bugs and judge the overall quality and
+stability of the version being developed.
+
+The new branches that are created when the code is stable enough constitute a
+stable version and are generally maintained for several years, so that there is
+no emergency to migrate to a newer branch even when you're not on the latest.
+Once a stable branch is issued, it may only receive bug fixes, and very rarely
+minor feature updates when that makes users' life easier. All fixes that go into
+a stable branch necessarily come from the master branch. This guarantees that no
+fix will be lost after an upgrade. For this reason, if you fix a bug, please
+make the patch against the master branch, not the stable branch. You may even
+discover it was already fixed. This process also ensures that regressions in a
+stable branch are extremely rare, so there is never any excuse for not upgrading
+to the latest version in your current branch.
+
+Branches are numbered with two digits delimited with a dot, such as "1.6".
+Since 1.9, branches with an odd second digit are mostly focused on sensitive
+technical updates and more aimed at advanced users because they are likely to
+trigger more bugs than the other ones. They are maintained for about a year
+only and must not be deployed where they cannot be rolled back in emergency. A
+complete version includes one or two sub-version numbers indicating the level of
+fix. For example, version 1.5.14 is the 14th fix release in branch 1.5 after
+version 1.5.0 was issued. It contains 126 fixes for individual bugs, 24 updates
+on the documentation, and 75 other backported patches, most of which were needed
+to fix the aforementioned 126 bugs. An existing feature may never be modified
+nor removed in a stable branch, in order to guarantee that upgrades within the
+same branch will always be harmless.
+
+HAProxy is available from multiple sources, at different release rhythms :
+
+ - The official community web site : http://www.haproxy.org/ : this site
+ provides the sources of the latest development release, all stable releases,
+ as well as nightly snapshots for each branch. The release cycle is not fast,
+ several months between stable releases, or between development snapshots.
+ Very old versions are still supported there. Everything is provided as
+ sources only, so whatever comes from there needs to be rebuilt and/or
+ repackaged;
+
+ - GitHub : https://github.com/haproxy/haproxy/ : this is the mirror for the
+ development branch only, which provides integration with the issue tracker,
+ continuous integration and code coverage tools. This is exclusively for
+ contributors;
+
+ - A number of operating systems such as Linux distributions and BSD ports.
+ These systems generally provide long-term maintained versions which do not
+ always contain all the fixes from the official ones, but which at least
+ contain the critical fixes. It often is a good option for most users who do
+ not seek advanced configurations and just want to keep updates easy;
+
+ - Commercial versions from http://www.haproxy.com/ : these are supported
+ professional packages built for various operating systems or provided as
+ appliances, based on the latest stable versions and including a number of
+ features backported from the next release for which there is a strong
+ demand. It is the best option for users seeking the latest features with
+ the reliability of a stable branch, the fastest response time to fix bugs,
+ or simply support contracts on top of an open source product;
+
+
+In order to ensure that the version you're using is the latest one in your
+branch, you need to proceed this way :
+
+ - verify which HAProxy executable you're running : some systems ship it by
+ default and administrators install their versions somewhere else on the
+ system, so it is important to verify in the startup scripts which one is
+ used;
+
+ - determine which source your HAProxy version comes from. For this, it's
+ generally sufficient to type "haproxy -v". A development version will
+ appear like this, with the "dev" word after the branch number :
+
+ HAProxy version 2.4-dev18-a5357c-137 2021/05/09 - https://haproxy.org/
+
+ A stable version will appear like this, as well as unmodified stable
+ versions provided by operating system vendors :
+
+ HAProxy version 1.5.14 2015/07/02
+
+ And a nightly snapshot of a stable version will appear like this with an
+ hexadecimal sequence after the version, and with the date of the snapshot
+ instead of the date of the release :
+
+ HAProxy version 1.5.14-e4766ba 2015/07/29
+
+ Any other format may indicate a system-specific package with its own
+ patch set. For example HAProxy Enterprise versions will appear with the
+ following format (<branch>-<latest commit>-<revision>) :
+
+ HAProxy version 1.5.0-994126-357 2015/07/02
+
+ Please note that historically versions prior to 2.4 used to report the
+ process name with a hyphen between "HA" and "Proxy", including those above
+ which were adjusted to show the correct format only, so better ignore this
+ word or use a relaxed match in scripts. Additionally, modern versions add
+ a URL linking to the project's home.
+
+ Finally, versions 2.1 and above will include a "Status" line indicating
+ whether the version is safe for production or not, and if so, till when, as
+ well as a link to the list of known bugs affecting this version.
+
+ - for system-specific packages, you have to check with your vendor's package
+ repository or update system to ensure that your system is still supported,
+ and that fixes are still provided for your branch. For community versions
+ coming from haproxy.org, just visit the site, verify the status of your
+ branch and compare the latest version with yours to see if you're on the
+ latest one. If not you can upgrade. If your branch is not maintained
+ anymore, you're definitely very late and will have to consider an upgrade
+ to a more recent branch (carefully read the README when doing so).
+
+HAProxy will have to be updated according to the source it came from. Usually it
+follows the system vendor's way of upgrading a package. If it was taken from
+sources, please read the README file in the sources directory after extracting
+the sources and follow the instructions for your operating system.
+
+
+4. Companion products and alternatives
+--------------------------------------
+
+HAProxy integrates fairly well with certain products listed below, which is why
+they are mentioned here even if not directly related to HAProxy.
+
+
+4.1. Apache HTTP server
+-----------------------
+
+Apache is the de-facto standard HTTP server. It's a very complete and modular
+project supporting both file serving and dynamic contents. It can serve as a
+frontend for some application servers. It can even proxy requests and cache
+responses. In all of these use cases, a front load balancer is commonly needed.
+Apache can work in various modes, some being heavier than others. Certain
+modules still require the heavier pre-forked model and will prevent Apache from
+scaling well with a high number of connections. In this case HAProxy can provide
+a tremendous help by enforcing the per-server connection limits to a safe value
+and will significantly speed up the server and preserve its resources that will
+be better used by the application.
+
+Apache can extract the client's address from the X-Forwarded-For header by using
+the "mod_rpaf" extension. HAProxy will automatically feed this header when
+"option forwardfor" is specified in its configuration. HAProxy may also offer a
+nice protection to Apache when exposed to the internet, where it will better
+resist a wide number of types of DoS attacks.
+
+
+4.2. NGINX
+----------
+
+NGINX is the second de-facto standard HTTP server. Just like Apache, it covers a
+wide range of features. NGINX is built on a similar model as HAProxy so it has
+no problem dealing with tens of thousands of concurrent connections. When used
+as a gateway to some applications (e.g. using the included PHP FPM) it can often
+be beneficial to set up some frontend connection limiting to reduce the load
+on the PHP application. HAProxy will clearly be useful there both as a regular
+load balancer and as the traffic regulator to speed up PHP by decongesting
+it. Also since both products use very little CPU thanks to their event-driven
+architecture, it's often easy to install both of them on the same system. NGINX
+implements HAProxy's PROXY protocol, thus it is easy for HAProxy to pass the
+client's connection information to NGINX so that the application gets all the
+relevant information. Some benchmarks have also shown that for large static
+file serving, implementing consistent hash on HAProxy in front of NGINX can be
+beneficial by optimizing the OS' cache hit ratio, which is basically multiplied
+by the number of server nodes.
+
+
+4.3. Varnish
+------------
+
+Varnish is a smart caching reverse-proxy, probably best described as a web
+application accelerator. Varnish doesn't implement SSL/TLS and wants to dedicate
+all of its CPU cycles to what it does best. Varnish also implements HAProxy's
+PROXY protocol so that HAProxy can very easily be deployed in front of Varnish
+as an SSL offloader as well as a load balancer and pass it all relevant client
+information. Also, Varnish naturally supports decompression from the cache when
+a server has provided a compressed object, but doesn't compress however. HAProxy
+can then be used to compress outgoing data when backend servers do not implement
+compression, though it's rarely a good idea to compress on the load balancer
+unless the traffic is low.
+
+When building large caching farms across multiple nodes, HAProxy can make use of
+consistent URL hashing to intelligently distribute the load to the caching nodes
+and avoid cache duplication, resulting in a total cache size which is the sum of
+all caching nodes. In addition, caching of very small dumb objects for a short
+duration on HAProxy can sometimes save network round trips and reduce the CPU
+load on both the HAProxy and the Varnish nodes. This is only possible is no
+processing is done on these objects on Varnish (this is often referred to as
+the notion of "favicon cache", by which a sizeable percentage of useless
+downstream requests can sometimes be avoided). However do not enable HAProxy
+caching for a long time (more than a few seconds) in front of any other cache,
+that would significantly complicate troubleshooting without providing really
+significant savings.
+
+
+4.4. Alternatives
+-----------------
+
+Linux Virtual Server (LVS or IPVS) is the layer 4 load balancer included within
+the Linux kernel. It works at the packet level and handles TCP and UDP. In most
+cases it's more a complement than an alternative since it doesn't have layer 7
+knowledge at all.
+
+Pound is another well-known load balancer. It's much simpler and has much less
+features than HAProxy but for many very basic setups both can be used. Its
+author has always focused on code auditability first and wants to maintain the
+set of features low. Its thread-based architecture scales less well with high
+connection counts, but it's a good product.
+
+Pen is a quite light load balancer. It supports SSL, maintains persistence using
+a fixed-size table of its clients' IP addresses. It supports a packet-oriented
+mode allowing it to support direct server return and UDP to some extents. It is
+meant for small loads (the persistence table only has 2048 entries).
+
+NGINX can do some load balancing to some extents, though it's clearly not its
+primary function. Production traffic is used to detect server failures, the
+load balancing algorithms are more limited, and the stickiness is very limited.
+But it can make sense in some simple deployment scenarios where it is already
+present. The good thing is that since it integrates very well with HAProxy,
+there's nothing wrong with adding HAProxy later when its limits have been
+reached.
+
+Varnish also does some load balancing of its backend servers and does support
+real health checks. It doesn't implement stickiness however, so just like with
+NGINX, as long as stickiness is not needed that can be enough to start with.
+And similarly, since HAProxy and Varnish integrate so well together, it's easy
+to add it later into the mix to complement the feature set.
+
+
+5. Contacts
+-----------
+
+If you want to contact the developers or any community member about anything,
+the best way to do it usually is via the mailing list by sending your message
+to haproxy@formilux.org. Please note that this list is public and its archives
+are public as well so you should avoid disclosing sensitive information. A
+thousand of users of various experience levels are present there and even the
+most complex questions usually find an optimal response relatively quickly.
+Suggestions are welcome too. For users having difficulties with e-mail, a
+Discourse platform is available at http://discourse.haproxy.org/ . However
+please keep in mind that there are less people reading questions there and that
+most are handled by a really tiny team. In any case, please be patient and
+respectful with those who devote their spare time helping others.
+
+I you believe you've found a bug but are not sure, it's best reported on the
+mailing list. If you're quite convinced you've found a bug, that your version
+is up-to-date in its branch, and you already have a GitHub account, feel free
+to go directly to https://github.com/haproxy/haproxy/ and file an issue with
+all possibly available details. Again, this is public so be careful not to post
+information you might later regret. Since the issue tracker presents itself as
+a very long thread, please avoid pasting very long dumps (a few hundreds lines
+or more) and attach them instead.
+
+If you've found what you're absolutely certain can be considered a critical
+security issue that would put many users in serious trouble if discussed in a
+public place, then you can send it with the reproducer to security@haproxy.org.
+A small team of trusted developers will receive it and will be able to propose
+a fix. We usually don't use embargoes and once a fix is available it gets
+merged. In some rare circumstances it can happen that a release is coordinated
+with software vendors. Please note that this process usually messes up with
+eveyone's work, and that rushed up releases can sometimes introduce new bugs,
+so it's best avoided unless strictly necessary; as such, there is often little
+consideration for reports that needlessly cause such extra burden, and the best
+way to see your work credited usually is to provide a working fix, which will
+appear in changelogs.
diff --git a/doc/lgpl.txt b/doc/lgpl.txt
new file mode 100644
index 0000000..5ab7695
--- /dev/null
+++ b/doc/lgpl.txt
@@ -0,0 +1,504 @@
+ GNU LESSER GENERAL PUBLIC LICENSE
+ Version 2.1, February 1999
+
+ Copyright (C) 1991, 1999 Free Software Foundation, Inc.
+ 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+[This is the first released version of the Lesser GPL. It also counts
+ as the successor of the GNU Library Public License, version 2, hence
+ the version number 2.1.]
+
+ Preamble
+
+ The licenses for most software are designed to take away your
+freedom to share and change it. By contrast, the GNU General Public
+Licenses are intended to guarantee your freedom to share and change
+free software--to make sure the software is free for all its users.
+
+ This license, the Lesser General Public License, applies to some
+specially designated software packages--typically libraries--of the
+Free Software Foundation and other authors who decide to use it. You
+can use it too, but we suggest you first think carefully about whether
+this license or the ordinary General Public License is the better
+strategy to use in any particular case, based on the explanations below.
+
+ When we speak of free software, we are referring to freedom of use,
+not price. Our General Public Licenses are designed to make sure that
+you have the freedom to distribute copies of free software (and charge
+for this service if you wish); that you receive source code or can get
+it if you want it; that you can change the software and use pieces of
+it in new free programs; and that you are informed that you can do
+these things.
+
+ To protect your rights, we need to make restrictions that forbid
+distributors to deny you these rights or to ask you to surrender these
+rights. These restrictions translate to certain responsibilities for
+you if you distribute copies of the library or if you modify it.
+
+ For example, if you distribute copies of the library, whether gratis
+or for a fee, you must give the recipients all the rights that we gave
+you. You must make sure that they, too, receive or can get the source
+code. If you link other code with the library, you must provide
+complete object files to the recipients, so that they can relink them
+with the library after making changes to the library and recompiling
+it. And you must show them these terms so they know their rights.
+
+ We protect your rights with a two-step method: (1) we copyright the
+library, and (2) we offer you this license, which gives you legal
+permission to copy, distribute and/or modify the library.
+
+ To protect each distributor, we want to make it very clear that
+there is no warranty for the free library. Also, if the library is
+modified by someone else and passed on, the recipients should know
+that what they have is not the original version, so that the original
+author's reputation will not be affected by problems that might be
+introduced by others.
+
+ Finally, software patents pose a constant threat to the existence of
+any free program. We wish to make sure that a company cannot
+effectively restrict the users of a free program by obtaining a
+restrictive license from a patent holder. Therefore, we insist that
+any patent license obtained for a version of the library must be
+consistent with the full freedom of use specified in this license.
+
+ Most GNU software, including some libraries, is covered by the
+ordinary GNU General Public License. This license, the GNU Lesser
+General Public License, applies to certain designated libraries, and
+is quite different from the ordinary General Public License. We use
+this license for certain libraries in order to permit linking those
+libraries into non-free programs.
+
+ When a program is linked with a library, whether statically or using
+a shared library, the combination of the two is legally speaking a
+combined work, a derivative of the original library. The ordinary
+General Public License therefore permits such linking only if the
+entire combination fits its criteria of freedom. The Lesser General
+Public License permits more lax criteria for linking other code with
+the library.
+
+ We call this license the "Lesser" General Public License because it
+does Less to protect the user's freedom than the ordinary General
+Public License. It also provides other free software developers Less
+of an advantage over competing non-free programs. These disadvantages
+are the reason we use the ordinary General Public License for many
+libraries. However, the Lesser license provides advantages in certain
+special circumstances.
+
+ For example, on rare occasions, there may be a special need to
+encourage the widest possible use of a certain library, so that it becomes
+a de-facto standard. To achieve this, non-free programs must be
+allowed to use the library. A more frequent case is that a free
+library does the same job as widely used non-free libraries. In this
+case, there is little to gain by limiting the free library to free
+software only, so we use the Lesser General Public License.
+
+ In other cases, permission to use a particular library in non-free
+programs enables a greater number of people to use a large body of
+free software. For example, permission to use the GNU C Library in
+non-free programs enables many more people to use the whole GNU
+operating system, as well as its variant, the GNU/Linux operating
+system.
+
+ Although the Lesser General Public License is Less protective of the
+users' freedom, it does ensure that the user of a program that is
+linked with the Library has the freedom and the wherewithal to run
+that program using a modified version of the Library.
+
+ The precise terms and conditions for copying, distribution and
+modification follow. Pay close attention to the difference between a
+"work based on the library" and a "work that uses the library". The
+former contains code derived from the library, whereas the latter must
+be combined with the library in order to run.
+
+ GNU LESSER GENERAL PUBLIC LICENSE
+ TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+ 0. This License Agreement applies to any software library or other
+program which contains a notice placed by the copyright holder or
+other authorized party saying it may be distributed under the terms of
+this Lesser General Public License (also called "this License").
+Each licensee is addressed as "you".
+
+ A "library" means a collection of software functions and/or data
+prepared so as to be conveniently linked with application programs
+(which use some of those functions and data) to form executables.
+
+ The "Library", below, refers to any such software library or work
+which has been distributed under these terms. A "work based on the
+Library" means either the Library or any derivative work under
+copyright law: that is to say, a work containing the Library or a
+portion of it, either verbatim or with modifications and/or translated
+straightforwardly into another language. (Hereinafter, translation is
+included without limitation in the term "modification".)
+
+ "Source code" for a work means the preferred form of the work for
+making modifications to it. For a library, complete source code means
+all the source code for all modules it contains, plus any associated
+interface definition files, plus the scripts used to control compilation
+and installation of the library.
+
+ Activities other than copying, distribution and modification are not
+covered by this License; they are outside its scope. The act of
+running a program using the Library is not restricted, and output from
+such a program is covered only if its contents constitute a work based
+on the Library (independent of the use of the Library in a tool for
+writing it). Whether that is true depends on what the Library does
+and what the program that uses the Library does.
+
+ 1. You may copy and distribute verbatim copies of the Library's
+complete source code as you receive it, in any medium, provided that
+you conspicuously and appropriately publish on each copy an
+appropriate copyright notice and disclaimer of warranty; keep intact
+all the notices that refer to this License and to the absence of any
+warranty; and distribute a copy of this License along with the
+Library.
+
+ You may charge a fee for the physical act of transferring a copy,
+and you may at your option offer warranty protection in exchange for a
+fee.
+
+ 2. You may modify your copy or copies of the Library or any portion
+of it, thus forming a work based on the Library, and copy and
+distribute such modifications or work under the terms of Section 1
+above, provided that you also meet all of these conditions:
+
+ a) The modified work must itself be a software library.
+
+ b) You must cause the files modified to carry prominent notices
+ stating that you changed the files and the date of any change.
+
+ c) You must cause the whole of the work to be licensed at no
+ charge to all third parties under the terms of this License.
+
+ d) If a facility in the modified Library refers to a function or a
+ table of data to be supplied by an application program that uses
+ the facility, other than as an argument passed when the facility
+ is invoked, then you must make a good faith effort to ensure that,
+ in the event an application does not supply such function or
+ table, the facility still operates, and performs whatever part of
+ its purpose remains meaningful.
+
+ (For example, a function in a library to compute square roots has
+ a purpose that is entirely well-defined independent of the
+ application. Therefore, Subsection 2d requires that any
+ application-supplied function or table used by this function must
+ be optional: if the application does not supply it, the square
+ root function must still compute square roots.)
+
+These requirements apply to the modified work as a whole. If
+identifiable sections of that work are not derived from the Library,
+and can be reasonably considered independent and separate works in
+themselves, then this License, and its terms, do not apply to those
+sections when you distribute them as separate works. But when you
+distribute the same sections as part of a whole which is a work based
+on the Library, the distribution of the whole must be on the terms of
+this License, whose permissions for other licensees extend to the
+entire whole, and thus to each and every part regardless of who wrote
+it.
+
+Thus, it is not the intent of this section to claim rights or contest
+your rights to work written entirely by you; rather, the intent is to
+exercise the right to control the distribution of derivative or
+collective works based on the Library.
+
+In addition, mere aggregation of another work not based on the Library
+with the Library (or with a work based on the Library) on a volume of
+a storage or distribution medium does not bring the other work under
+the scope of this License.
+
+ 3. You may opt to apply the terms of the ordinary GNU General Public
+License instead of this License to a given copy of the Library. To do
+this, you must alter all the notices that refer to this License, so
+that they refer to the ordinary GNU General Public License, version 2,
+instead of to this License. (If a newer version than version 2 of the
+ordinary GNU General Public License has appeared, then you can specify
+that version instead if you wish.) Do not make any other change in
+these notices.
+
+ Once this change is made in a given copy, it is irreversible for
+that copy, so the ordinary GNU General Public License applies to all
+subsequent copies and derivative works made from that copy.
+
+ This option is useful when you wish to copy part of the code of
+the Library into a program that is not a library.
+
+ 4. You may copy and distribute the Library (or a portion or
+derivative of it, under Section 2) in object code or executable form
+under the terms of Sections 1 and 2 above provided that you accompany
+it with the complete corresponding machine-readable source code, which
+must be distributed under the terms of Sections 1 and 2 above on a
+medium customarily used for software interchange.
+
+ If distribution of object code is made by offering access to copy
+from a designated place, then offering equivalent access to copy the
+source code from the same place satisfies the requirement to
+distribute the source code, even though third parties are not
+compelled to copy the source along with the object code.
+
+ 5. A program that contains no derivative of any portion of the
+Library, but is designed to work with the Library by being compiled or
+linked with it, is called a "work that uses the Library". Such a
+work, in isolation, is not a derivative work of the Library, and
+therefore falls outside the scope of this License.
+
+ However, linking a "work that uses the Library" with the Library
+creates an executable that is a derivative of the Library (because it
+contains portions of the Library), rather than a "work that uses the
+library". The executable is therefore covered by this License.
+Section 6 states terms for distribution of such executables.
+
+ When a "work that uses the Library" uses material from a header file
+that is part of the Library, the object code for the work may be a
+derivative work of the Library even though the source code is not.
+Whether this is true is especially significant if the work can be
+linked without the Library, or if the work is itself a library. The
+threshold for this to be true is not precisely defined by law.
+
+ If such an object file uses only numerical parameters, data
+structure layouts and accessors, and small macros and small inline
+functions (ten lines or less in length), then the use of the object
+file is unrestricted, regardless of whether it is legally a derivative
+work. (Executables containing this object code plus portions of the
+Library will still fall under Section 6.)
+
+ Otherwise, if the work is a derivative of the Library, you may
+distribute the object code for the work under the terms of Section 6.
+Any executables containing that work also fall under Section 6,
+whether or not they are linked directly with the Library itself.
+
+ 6. As an exception to the Sections above, you may also combine or
+link a "work that uses the Library" with the Library to produce a
+work containing portions of the Library, and distribute that work
+under terms of your choice, provided that the terms permit
+modification of the work for the customer's own use and reverse
+engineering for debugging such modifications.
+
+ You must give prominent notice with each copy of the work that the
+Library is used in it and that the Library and its use are covered by
+this License. You must supply a copy of this License. If the work
+during execution displays copyright notices, you must include the
+copyright notice for the Library among them, as well as a reference
+directing the user to the copy of this License. Also, you must do one
+of these things:
+
+ a) Accompany the work with the complete corresponding
+ machine-readable source code for the Library including whatever
+ changes were used in the work (which must be distributed under
+ Sections 1 and 2 above); and, if the work is an executable linked
+ with the Library, with the complete machine-readable "work that
+ uses the Library", as object code and/or source code, so that the
+ user can modify the Library and then relink to produce a modified
+ executable containing the modified Library. (It is understood
+ that the user who changes the contents of definitions files in the
+ Library will not necessarily be able to recompile the application
+ to use the modified definitions.)
+
+ b) Use a suitable shared library mechanism for linking with the
+ Library. A suitable mechanism is one that (1) uses at run time a
+ copy of the library already present on the user's computer system,
+ rather than copying library functions into the executable, and (2)
+ will operate properly with a modified version of the library, if
+ the user installs one, as long as the modified version is
+ interface-compatible with the version that the work was made with.
+
+ c) Accompany the work with a written offer, valid for at
+ least three years, to give the same user the materials
+ specified in Subsection 6a, above, for a charge no more
+ than the cost of performing this distribution.
+
+ d) If distribution of the work is made by offering access to copy
+ from a designated place, offer equivalent access to copy the above
+ specified materials from the same place.
+
+ e) Verify that the user has already received a copy of these
+ materials or that you have already sent this user a copy.
+
+ For an executable, the required form of the "work that uses the
+Library" must include any data and utility programs needed for
+reproducing the executable from it. However, as a special exception,
+the materials to be distributed need not include anything that is
+normally distributed (in either source or binary form) with the major
+components (compiler, kernel, and so on) of the operating system on
+which the executable runs, unless that component itself accompanies
+the executable.
+
+ It may happen that this requirement contradicts the license
+restrictions of other proprietary libraries that do not normally
+accompany the operating system. Such a contradiction means you cannot
+use both them and the Library together in an executable that you
+distribute.
+
+ 7. You may place library facilities that are a work based on the
+Library side-by-side in a single library together with other library
+facilities not covered by this License, and distribute such a combined
+library, provided that the separate distribution of the work based on
+the Library and of the other library facilities is otherwise
+permitted, and provided that you do these two things:
+
+ a) Accompany the combined library with a copy of the same work
+ based on the Library, uncombined with any other library
+ facilities. This must be distributed under the terms of the
+ Sections above.
+
+ b) Give prominent notice with the combined library of the fact
+ that part of it is a work based on the Library, and explaining
+ where to find the accompanying uncombined form of the same work.
+
+ 8. You may not copy, modify, sublicense, link with, or distribute
+the Library except as expressly provided under this License. Any
+attempt otherwise to copy, modify, sublicense, link with, or
+distribute the Library is void, and will automatically terminate your
+rights under this License. However, parties who have received copies,
+or rights, from you under this License will not have their licenses
+terminated so long as such parties remain in full compliance.
+
+ 9. You are not required to accept this License, since you have not
+signed it. However, nothing else grants you permission to modify or
+distribute the Library or its derivative works. These actions are
+prohibited by law if you do not accept this License. Therefore, by
+modifying or distributing the Library (or any work based on the
+Library), you indicate your acceptance of this License to do so, and
+all its terms and conditions for copying, distributing or modifying
+the Library or works based on it.
+
+ 10. Each time you redistribute the Library (or any work based on the
+Library), the recipient automatically receives a license from the
+original licensor to copy, distribute, link with or modify the Library
+subject to these terms and conditions. You may not impose any further
+restrictions on the recipients' exercise of the rights granted herein.
+You are not responsible for enforcing compliance by third parties with
+this License.
+
+ 11. If, as a consequence of a court judgment or allegation of patent
+infringement or for any other reason (not limited to patent issues),
+conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot
+distribute so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you
+may not distribute the Library at all. For example, if a patent
+license would not permit royalty-free redistribution of the Library by
+all those who receive copies directly or indirectly through you, then
+the only way you could satisfy both it and this License would be to
+refrain entirely from distribution of the Library.
+
+If any portion of this section is held invalid or unenforceable under any
+particular circumstance, the balance of the section is intended to apply,
+and the section as a whole is intended to apply in other circumstances.
+
+It is not the purpose of this section to induce you to infringe any
+patents or other property right claims or to contest validity of any
+such claims; this section has the sole purpose of protecting the
+integrity of the free software distribution system which is
+implemented by public license practices. Many people have made
+generous contributions to the wide range of software distributed
+through that system in reliance on consistent application of that
+system; it is up to the author/donor to decide if he or she is willing
+to distribute software through any other system and a licensee cannot
+impose that choice.
+
+This section is intended to make thoroughly clear what is believed to
+be a consequence of the rest of this License.
+
+ 12. If the distribution and/or use of the Library is restricted in
+certain countries either by patents or by copyrighted interfaces, the
+original copyright holder who places the Library under this License may add
+an explicit geographical distribution limitation excluding those countries,
+so that distribution is permitted only in or among countries not thus
+excluded. In such case, this License incorporates the limitation as if
+written in the body of this License.
+
+ 13. The Free Software Foundation may publish revised and/or new
+versions of the Lesser General Public License from time to time.
+Such new versions will be similar in spirit to the present version,
+but may differ in detail to address new problems or concerns.
+
+Each version is given a distinguishing version number. If the Library
+specifies a version number of this License which applies to it and
+"any later version", you have the option of following the terms and
+conditions either of that version or of any later version published by
+the Free Software Foundation. If the Library does not specify a
+license version number, you may choose any version ever published by
+the Free Software Foundation.
+
+ 14. If you wish to incorporate parts of the Library into other free
+programs whose distribution conditions are incompatible with these,
+write to the author to ask for permission. For software which is
+copyrighted by the Free Software Foundation, write to the Free
+Software Foundation; we sometimes make exceptions for this. Our
+decision will be guided by the two goals of preserving the free status
+of all derivatives of our free software and of promoting the sharing
+and reuse of software generally.
+
+ NO WARRANTY
+
+ 15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO
+WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW.
+EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR
+OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY
+KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE
+LIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME
+THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+ 16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN
+WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY
+AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU
+FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR
+CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE
+LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING
+RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A
+FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF
+SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
+DAMAGES.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Libraries
+
+ If you develop a new library, and you want it to be of the greatest
+possible use to the public, we recommend making it free software that
+everyone can redistribute and change. You can do so by permitting
+redistribution under these terms (or, alternatively, under the terms of the
+ordinary General Public License).
+
+ To apply these terms, attach the following notices to the library. It is
+safest to attach them to the start of each source file to most effectively
+convey the exclusion of warranty; and each file should have at least the
+"copyright" line and a pointer to where the full notice is found.
+
+ <one line to give the library's name and a brief idea of what it does.>
+ Copyright (C) <year> <name of author>
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+Also add information on how to contact you by electronic and paper mail.
+
+You should also get your employer (if you work as a programmer) or your
+school, if any, to sign a "copyright disclaimer" for the library, if
+necessary. Here is a sample; alter the names:
+
+ Yoyodyne, Inc., hereby disclaims all copyright interest in the
+ library `Frob' (a library for tweaking knobs) written by James Random Hacker.
+
+ <signature of Ty Coon>, 1 April 1990
+ Ty Coon, President of Vice
+
+That's all there is to it!
+
+
diff --git a/doc/linux-syn-cookies.txt b/doc/linux-syn-cookies.txt
new file mode 100644
index 0000000..ca13066
--- /dev/null
+++ b/doc/linux-syn-cookies.txt
@@ -0,0 +1,106 @@
+SYN cookie analysis on 3.10
+
+include/net/request_sock.h:
+
+static inline int reqsk_queue_is_full(const struct request_sock_queue *queue)
+{
+ return queue->listen_opt->qlen >> queue->listen_opt->max_qlen_log;
+}
+
+include/net/inet_connection_sock.h:
+
+static inline int inet_csk_reqsk_queue_is_full(const struct sock *sk)
+{
+ return reqsk_queue_is_full(&inet_csk(sk)->icsk_accept_queue);
+}
+
+max_qlen_log is computed to equal log2(min(min(listen_backlog,somaxconn), sysctl_max_syn_backlog),
+and this is done this way following this path :
+
+ socket.c:listen(fd, backlog) :
+
+ backlog = min(backlog, somaxconn)
+ => af_inet.c:inet_listen(sock, backlog)
+
+ => inet_connection_sock.c:inet_csk_listen_start(sk, backlog)
+
+ sk_max_ack_backlog = backlog
+ => request_sock.c:reqsk_queue_alloc(sk, backlog (=nr_table_entries))
+
+ nr_table_entries = min_t(u32, nr_table_entries, sysctl_max_syn_backlog);
+ nr_table_entries = max_t(u32, nr_table_entries, 8);
+ nr_table_entries = roundup_pow_of_two(nr_table_entries + 1);
+ for (lopt->max_qlen_log = 3;
+ (1 << lopt->max_qlen_log) < nr_table_entries;
+ lopt->max_qlen_log++);
+
+
+tcp_ipv4.c:tcp_v4_conn_request()
+ - inet_csk_reqsk_queue_is_full() returns true when the listening socket's
+ qlen is larger than 1 << max_qlen_log, so basically qlen >= min(backlog,max_backlog)
+
+ - tcp_syn_flood_action() returns true when sysctl_tcp_syncookies is set. It
+ also emits a warning once per listening socket when activating the feature.
+
+ if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
+ want_cookie = tcp_syn_flood_action(sk, skb, "TCP");
+ if (!want_cookie)
+ goto drop;
+ }
+
+ => when the socket's current backlog is >= min(backlog,max_backlog),
+ either tcp_syn_cookies is set so we set want_cookie to 1, or we drop.
+
+
+ /* Accept backlog is full. If we have already queued enough
+ * of warm entries in syn queue, drop request. It is better than
+ * clogging syn queue with openreqs with exponentially increasing
+ * timeout.
+ */
+
+sock.h:sk_acceptq_is_full() = sk_ack_backlog > sk_max_ack_backlog
+ = sk_ack_backlog > min(somaxconn, listen_backlog)
+
+ if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) {
+ NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
+ goto drop;
+ }
+
+====> the following algorithm is applied in the reverse order but with these
+ priorities :
+
+ 1) IF socket's accept queue >= min(somaxconn, listen_backlog) THEN drop
+
+ 2) IF socket's SYN backlog < min(somaxconn, listen_backlog, tcp_max_syn_backlog) THEN accept
+
+ 3) IF tcp_syn_cookies THEN send_syn_cookie
+
+ 4) otherwise drop
+
+====> the problem is the accept queue being filled, but it's supposed to be
+ filled only with validated client requests (step 1).
+
+
+
+ req = inet_reqsk_alloc(&tcp_request_sock_ops);
+ if (!req)
+ goto drop;
+
+ ...
+ if (!sysctl_tcp_syncookies &&
+ (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
+ (sysctl_max_syn_backlog >> 2)) &&
+ !tcp_peer_is_proven(req, dst, false)) {
+ /* Without syncookies last quarter of
+ * backlog is filled with destinations,
+ * proven to be alive.
+ * It means that we continue to communicate
+ * to destinations, already remembered
+ * to the moment of synflood.
+ */
+ LIMIT_NETDEBUG(KERN_DEBUG pr_fmt("drop open request from %pI4/%u\n"),
+ &saddr, ntohs(tcp_hdr(skb)->source));
+ goto drop_and_release;
+ }
+
+
diff --git a/doc/lua-api/Makefile b/doc/lua-api/Makefile
new file mode 100644
index 0000000..b21857d
--- /dev/null
+++ b/doc/lua-api/Makefile
@@ -0,0 +1,153 @@
+# Makefile for Sphinx documentation
+#
+
+# You can set these variables from the command line.
+SPHINXOPTS =
+SPHINXBUILD = sphinx-build
+PAPER =
+BUILDDIR = _build
+
+# Internal variables.
+PAPEROPT_a4 = -D latex_paper_size=a4
+PAPEROPT_letter = -D latex_paper_size=letter
+ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
+# the i18n builder cannot share the environment and doctrees with the others
+I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
+
+.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext
+
+help:
+ @echo "Please use \`make <target>' where <target> is one of"
+ @echo " html to make standalone HTML files"
+ @echo " dirhtml to make HTML files named index.html in directories"
+ @echo " singlehtml to make a single large HTML file"
+ @echo " pickle to make pickle files"
+ @echo " json to make JSON files"
+ @echo " htmlhelp to make HTML files and a HTML help project"
+ @echo " qthelp to make HTML files and a qthelp project"
+ @echo " devhelp to make HTML files and a Devhelp project"
+ @echo " epub to make an epub"
+ @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
+ @echo " latexpdf to make LaTeX files and run them through pdflatex"
+ @echo " text to make text files"
+ @echo " man to make manual pages"
+ @echo " texinfo to make Texinfo files"
+ @echo " info to make Texinfo files and run them through makeinfo"
+ @echo " gettext to make PO message catalogs"
+ @echo " changes to make an overview of all changed/added/deprecated items"
+ @echo " linkcheck to check all external links for integrity"
+ @echo " doctest to run all doctests embedded in the documentation (if enabled)"
+
+clean:
+ -rm -rf $(BUILDDIR)/*
+
+html:
+ $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
+ @echo
+ @echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
+
+dirhtml:
+ $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
+ @echo
+ @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
+
+singlehtml:
+ $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml
+ @echo
+ @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml."
+
+pickle:
+ $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
+ @echo
+ @echo "Build finished; now you can process the pickle files."
+
+json:
+ $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
+ @echo
+ @echo "Build finished; now you can process the JSON files."
+
+htmlhelp:
+ $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
+ @echo
+ @echo "Build finished; now you can run HTML Help Workshop with the" \
+ ".hhp project file in $(BUILDDIR)/htmlhelp."
+
+qthelp:
+ $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
+ @echo
+ @echo "Build finished; now you can run "qcollectiongenerator" with the" \
+ ".qhcp project file in $(BUILDDIR)/qthelp, like this:"
+ @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/haproxy-lua.qhcp"
+ @echo "To view the help file:"
+ @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/haproxy-lua.qhc"
+
+devhelp:
+ $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp
+ @echo
+ @echo "Build finished."
+ @echo "To view the help file:"
+ @echo "# mkdir -p $$HOME/.local/share/devhelp/haproxy-lua"
+ @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/haproxy-lua"
+ @echo "# devhelp"
+
+epub:
+ $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
+ @echo
+ @echo "Build finished. The epub file is in $(BUILDDIR)/epub."
+
+latex:
+ $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
+ @echo
+ @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
+ @echo "Run \`make' in that directory to run these through (pdf)latex" \
+ "(use \`make latexpdf' here to do that automatically)."
+
+latexpdf:
+ $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
+ @echo "Running LaTeX files through pdflatex..."
+ $(MAKE) -C $(BUILDDIR)/latex all-pdf
+ @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
+
+text:
+ $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text
+ @echo
+ @echo "Build finished. The text files are in $(BUILDDIR)/text."
+
+man:
+ $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man
+ @echo
+ @echo "Build finished. The manual pages are in $(BUILDDIR)/man."
+
+texinfo:
+ $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
+ @echo
+ @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo."
+ @echo "Run \`make' in that directory to run these through makeinfo" \
+ "(use \`make info' here to do that automatically)."
+
+info:
+ $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
+ @echo "Running Texinfo files through makeinfo..."
+ make -C $(BUILDDIR)/texinfo info
+ @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo."
+
+gettext:
+ $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale
+ @echo
+ @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale."
+
+changes:
+ $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
+ @echo
+ @echo "The overview file is in $(BUILDDIR)/changes."
+
+linkcheck:
+ $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
+ @echo
+ @echo "Link check complete; look for any errors in the above output " \
+ "or in $(BUILDDIR)/linkcheck/output.txt."
+
+doctest:
+ $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
+ @echo "Testing of doctests in the sources finished, look at the " \
+ "results in $(BUILDDIR)/doctest/output.txt."
diff --git a/doc/lua-api/_static/channel.fig b/doc/lua-api/_static/channel.fig
new file mode 100644
index 0000000..8a6c0a1
--- /dev/null
+++ b/doc/lua-api/_static/channel.fig
@@ -0,0 +1,55 @@
+#FIG 3.2 Produced by xfig version 3.2.5b
+Landscape
+Center
+Metric
+A4
+100.00
+Single
+-2
+1200 2
+1 1 0 1 0 7 50 -1 -1 0.000 1 0.0000 4500 1620 1260 585 4500 1620 5760 2205
+2 3 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 8
+ 1170 1350 1170 1890 2790 1890 2790 2070 3240 1620 2790 1170
+ 2790 1350 1170 1350
+2 3 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 8
+ 5760 1350 5760 1890 7380 1890 7380 2070 7830 1620 7380 1170
+ 7380 1350 5760 1350
+2 1 1 1 0 7 50 -1 -1 1.000 0 0 -1 1 0 2
+ 5 1 1.00 60.00 120.00
+ 6210 540 6210 1440
+2 1 1 1 0 7 50 -1 -1 1.000 0 0 -1 1 0 2
+ 5 1 1.00 60.00 120.00
+ 6210 2340 6210 1800
+2 1 1 1 0 7 50 -1 -1 1.000 0 0 -1 1 0 2
+ 5 1 1.00 60.00 120.00
+ 1350 2520 1350 1800
+2 1 1 1 0 7 50 -1 -1 1.000 0 0 -1 1 0 2
+ 5 1 1.00 60.00 120.00
+ 1350 360 1350 1440
+3 0 1 1 0 7 50 -1 -1 1.000 0 0 1 5
+ 5 1 1.00 60.00 120.00
+ 2970 1665 3105 1125 3330 900 3600 765 3915 720
+ 0.000 1.000 1.000 1.000 0.000
+3 0 1 1 0 7 50 -1 -1 1.000 0 0 1 5
+ 5 1 1.00 60.00 120.00
+ 6030 1665 5895 1125 5670 900 5400 765 5040 720
+ 0.000 1.000 1.000 1.000 0.000
+4 2 0 50 -1 16 12 0.0000 4 195 750 1080 1665 producer\001
+4 1 0 50 -1 16 12 0.0000 4 195 1785 4500 1575 HAProxy processing\001
+4 1 0 50 -1 16 12 0.0000 4 195 1260 4500 1815 (including Lua)\001
+4 0 0 50 -1 16 12 0.0000 4 105 855 7920 1665 consumer\001
+4 0 0 50 -1 12 12 0.0000 4 150 600 1440 2205 set()\001
+4 0 0 50 -1 12 12 0.0000 4 165 960 1440 2400 append()\001
+4 0 0 50 -1 16 12 0.0000 4 150 1260 1260 2700 write functions\001
+4 0 0 50 -1 16 12 0.0000 4 150 1230 1260 315 read functions\001
+4 0 0 50 -1 12 12 0.0000 4 165 600 1440 540 dup()\001
+4 0 0 50 -1 12 12 0.0000 4 165 600 1440 735 get()\001
+4 0 0 50 -1 12 12 0.0000 4 165 1200 1440 930 get_line()\001
+4 0 0 50 -1 12 12 0.0000 4 165 1440 1440 1125 get_in_len()\001
+4 1 0 50 -1 12 12 0.0000 4 150 1080 4500 765 forward()\001
+4 0 0 50 -1 16 12 0.0000 4 150 1260 6120 495 write functions\001
+4 0 0 50 -1 12 12 0.0000 4 150 720 6300 1110 send()\001
+4 0 0 50 -1 12 12 0.0000 4 165 1560 6255 2205 get_out_len()\001
+4 0 0 50 -1 16 12 0.0000 4 150 1230 6120 2520 read functions\001
+4 1 0 50 -1 16 12 0.0000 4 150 1650 4500 315 both side functions\001
+4 1 0 50 -1 12 12 0.0000 4 150 1080 4500 540 is_full()\001
diff --git a/doc/lua-api/_static/channel.png b/doc/lua-api/_static/channel.png
new file mode 100644
index 0000000..e12a26e
--- /dev/null
+++ b/doc/lua-api/_static/channel.png
Binary files differ
diff --git a/doc/lua-api/conf.py b/doc/lua-api/conf.py
new file mode 100644
index 0000000..fd7e0ee
--- /dev/null
+++ b/doc/lua-api/conf.py
@@ -0,0 +1,242 @@
+# -*- coding: utf-8 -*-
+#
+# haproxy-lua documentation build configuration file, created by
+# sphinx-quickstart on Tue Mar 10 11:15:09 2015.
+#
+# This file is execfile()d with the current directory set to its containing dir.
+#
+# Note that not all possible configuration values are present in this
+# autogenerated file.
+#
+# All configuration values have a default; values that are commented out
+# serve to show the default.
+
+import sys, os
+
+# If extensions (or modules to document with autodoc) are in another directory,
+# add these directories to sys.path here. If the directory is relative to the
+# documentation root, use os.path.abspath to make it absolute, like shown here.
+#sys.path.insert(0, os.path.abspath('.'))
+
+# -- General configuration -----------------------------------------------------
+
+# If your documentation needs a minimal Sphinx version, state it here.
+#needs_sphinx = '1.0'
+
+# Add any Sphinx extension module names here, as strings. They can be extensions
+# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
+extensions = []
+
+# Add any paths that contain templates here, relative to this directory.
+templates_path = ['_templates']
+
+# The suffix of source filenames.
+source_suffix = '.rst'
+
+# The encoding of source files.
+#source_encoding = 'utf-8-sig'
+
+# The master toctree document.
+master_doc = 'index'
+
+# General information about the project.
+project = u'haproxy-lua'
+copyright = u'2015, Thierry FOURNIER'
+
+# The version info for the project you're documenting, acts as replacement for
+# |version| and |release|, also used in various other places throughout the
+# built documents.
+#
+# The short X.Y version.
+version = '1.0'
+# The full version, including alpha/beta/rc tags.
+release = '1.0'
+
+# The language for content autogenerated by Sphinx. Refer to documentation
+# for a list of supported languages.
+#language = None
+
+# There are two options for replacing |today|: either, you set today to some
+# non-false value, then it is used:
+#today = ''
+# Else, today_fmt is used as the format for a strftime call.
+#today_fmt = '%B %d, %Y'
+
+# List of patterns, relative to source directory, that match files and
+# directories to ignore when looking for source files.
+exclude_patterns = ['_build']
+
+# The reST default role (used for this markup: `text`) to use for all documents.
+#default_role = None
+
+# If true, '()' will be appended to :func: etc. cross-reference text.
+#add_function_parentheses = True
+
+# If true, the current module name will be prepended to all description
+# unit titles (such as .. function::).
+#add_module_names = True
+
+# If true, sectionauthor and moduleauthor directives will be shown in the
+# output. They are ignored by default.
+#show_authors = False
+
+# The name of the Pygments (syntax highlighting) style to use.
+pygments_style = 'sphinx'
+
+# A list of ignored prefixes for module index sorting.
+#modindex_common_prefix = []
+
+
+# -- Options for HTML output ---------------------------------------------------
+
+# The theme to use for HTML and HTML Help pages. See the documentation for
+# a list of builtin themes.
+html_theme = 'default'
+
+# Theme options are theme-specific and customize the look and feel of a theme
+# further. For a list of options available for each theme, see the
+# documentation.
+#html_theme_options = {}
+
+# Add any paths that contain custom themes here, relative to this directory.
+#html_theme_path = []
+
+# The name for this set of Sphinx documents. If None, it defaults to
+# "<project> v<release> documentation".
+#html_title = None
+
+# A shorter title for the navigation bar. Default is the same as html_title.
+#html_short_title = None
+
+# The name of an image file (relative to this directory) to place at the top
+# of the sidebar.
+#html_logo = None
+
+# The name of an image file (within the static path) to use as favicon of the
+# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
+# pixels large.
+#html_favicon = None
+
+# Add any paths that contain custom static files (such as style sheets) here,
+# relative to this directory. They are copied after the builtin static files,
+# so a file named "default.css" will overwrite the builtin "default.css".
+html_static_path = ['_static']
+
+# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
+# using the given strftime format.
+#html_last_updated_fmt = '%b %d, %Y'
+
+# If true, SmartyPants will be used to convert quotes and dashes to
+# typographically correct entities.
+#html_use_smartypants = True
+
+# Custom sidebar templates, maps document names to template names.
+#html_sidebars = {}
+
+# Additional templates that should be rendered to pages, maps page names to
+# template names.
+#html_additional_pages = {}
+
+# If false, no module index is generated.
+#html_domain_indices = True
+
+# If false, no index is generated.
+#html_use_index = True
+
+# If true, the index is split into individual pages for each letter.
+#html_split_index = False
+
+# If true, links to the reST sources are added to the pages.
+#html_show_sourcelink = True
+
+# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
+#html_show_sphinx = True
+
+# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
+#html_show_copyright = True
+
+# If true, an OpenSearch description file will be output, and all pages will
+# contain a <link> tag referring to it. The value of this option must be the
+# base URL from which the finished HTML is served.
+#html_use_opensearch = ''
+
+# This is the file name suffix for HTML files (e.g. ".xhtml").
+#html_file_suffix = None
+
+# Output file base name for HTML help builder.
+htmlhelp_basename = 'haproxy-luadoc'
+
+
+# -- Options for LaTeX output --------------------------------------------------
+
+latex_elements = {
+# The paper size ('letterpaper' or 'a4paper').
+#'papersize': 'letterpaper',
+
+# The font size ('10pt', '11pt' or '12pt').
+#'pointsize': '10pt',
+
+# Additional stuff for the LaTeX preamble.
+#'preamble': '',
+}
+
+# Grouping the document tree into LaTeX files. List of tuples
+# (source start file, target name, title, author, documentclass [howto/manual]).
+latex_documents = [
+ ('index', 'haproxy-lua.tex', u'haproxy-lua Documentation',
+ u'Thierry FOURNIER', 'manual'),
+]
+
+# The name of an image file (relative to this directory) to place at the top of
+# the title page.
+#latex_logo = None
+
+# For "manual" documents, if this is true, then toplevel headings are parts,
+# not chapters.
+#latex_use_parts = False
+
+# If true, show page references after internal links.
+#latex_show_pagerefs = False
+
+# If true, show URL addresses after external links.
+#latex_show_urls = False
+
+# Documents to append as an appendix to all manuals.
+#latex_appendices = []
+
+# If false, no module index is generated.
+#latex_domain_indices = True
+
+
+# -- Options for manual page output --------------------------------------------
+
+# One entry per manual page. List of tuples
+# (source start file, name, description, authors, manual section).
+man_pages = [
+ ('index', 'haproxy-lua', u'haproxy-lua Documentation',
+ [u'Thierry FOURNIER'], 1)
+]
+
+# If true, show URL addresses after external links.
+#man_show_urls = False
+
+
+# -- Options for Texinfo output ------------------------------------------------
+
+# Grouping the document tree into Texinfo files. List of tuples
+# (source start file, target name, title, author,
+# dir menu entry, description, category)
+texinfo_documents = [
+ ('index', 'haproxy-lua', u'haproxy-lua Documentation',
+ u'Thierry FOURNIER', 'haproxy-lua', 'One line description of project.',
+ 'Miscellaneous'),
+]
+
+# Documents to append as an appendix to all manuals.
+#texinfo_appendices = []
+
+# If false, no module index is generated.
+#texinfo_domain_indices = True
+
+# How to display URL addresses: 'footnote', 'no', or 'inline'.
+#texinfo_show_urls = 'footnote'
diff --git a/doc/lua-api/index.rst b/doc/lua-api/index.rst
new file mode 100644
index 0000000..e8df63e
--- /dev/null
+++ b/doc/lua-api/index.rst
@@ -0,0 +1,4491 @@
+.. toctree::
+ :maxdepth: 2
+
+
+How Lua runs in HAProxy
+=======================
+
+HAProxy Lua running contexts
+----------------------------
+
+The Lua code executed in HAProxy can be processed in 2 main modes. The first one
+is the **initialisation mode**, and the second is the **runtime mode**.
+
+* In the **initialisation mode**, we can perform DNS solves, but we cannot
+ perform socket I/O. In this initialisation mode, HAProxy still blocked during
+ the execution of the Lua program.
+
+* In the **runtime mode**, we cannot perform DNS solves, but we can use sockets.
+ The execution of the Lua code is multiplexed with the requests processing, so
+ the Lua code seems to be run in blocking, but it is not the case.
+
+The Lua code is loaded in one or more files. These files contains main code and
+functions. Lua has 8 execution contexts.
+
+1. The Lua file **body context**. It is executed during the load of the Lua file
+ in the HAProxy `[global]` section with the directive `lua-load`. It is
+ executed in initialisation mode. This section is use for configuring Lua
+ bindings in HAProxy.
+
+2. The Lua **init context**. It is a Lua function executed just after the
+ HAProxy configuration parsing. The execution is in initialisation mode. In
+ this context the HAProxy environment are already initialized. It is useful to
+ check configuration, or initializing socket connections or tasks. These
+ functions are declared in the body context with the Lua function
+ `core.register_init()`. The prototype of the function is a simple function
+ without return value and without parameters, like this: `function fcn()`.
+
+3. The Lua **task context**. It is a Lua function executed after the start
+ of the HAProxy scheduler, and just after the declaration of the task with the
+ Lua function `core.register_task()`. This context can be concurrent with the
+ traffic processing. It is executed in runtime mode. The prototype of the
+ function is a simple function without return value and without parameters,
+ like this: `function fcn()`.
+
+4. The **action context**. It is a Lua function conditionally executed. These
+ actions are registered by the Lua directives "`core.register_action()`". The
+ prototype of the Lua called function is a function with doesn't returns
+ anything and that take an object of class TXN as entry. `function fcn(txn)`.
+
+5. The **sample-fetch context**. This function takes a TXN object as entry
+ argument and returns a string. These types of function cannot execute any
+ blocking function. They are useful to aggregate some of original HAProxy
+ sample-fetches and return the result. The prototype of the function is
+ `function string fcn(txn)`. These functions can be registered with the Lua
+ function `core.register_fetches()`. Each declared sample-fetch is prefixed by
+ the string "lua.".
+
+ .. note::
+ It is possible that this function cannot found the required data in the
+ original HAProxy sample-fetches, in this case, it cannot return the
+ result. This case is not yet supported
+
+6. The **converter context**. It is a Lua function that takes a string as input
+ and returns another string as output. These types of function are stateless,
+ it cannot access to any context. They don't execute any blocking function.
+ The call prototype is `function string fcn(string)`. This function can be
+ registered with the Lua function `core.register_converters()`. Each declared
+ converter is prefixed by the string "lua.".
+
+7. The **filter context**: It is a Lua object based on a class defining filter
+ callback functions. Lua filters are registered using
+ `core.register_filter()`. Each declared filter is prefixed by the string
+ "lua.".
+
+8. The **event context**: Inside a function that handles events subscribed
+ through `core.event_sub()` or `Server.event_sub()`.
+
+
+HAProxy Lua Hello world
+-----------------------
+
+HAProxy configuration file (`hello_world.conf`):
+
+::
+
+ global
+ lua-load hello_world.lua
+
+ listen proxy
+ bind 127.0.0.1:10001
+ tcp-request inspect-delay 1s
+ tcp-request content use-service lua.hello_world
+
+HAProxy Lua file (`hello_world.lua`):
+
+.. code-block:: lua
+
+ core.register_service("hello_world", "tcp", function(applet)
+ applet:send("hello world\n")
+ end)
+
+How to start HAProxy for testing this configuration:
+
+::
+
+ ./haproxy -f hello_world.conf
+
+On other terminal, you can test with telnet:
+
+::
+
+ #:~ telnet 127.0.0.1 10001
+ hello world
+
+Usage of load parameters
+------------------------
+
+HAProxy lua-load(-per-thread) directives allow a list of parameters after
+the lua file name. These parameters are accessible through an array of args
+using this code `local args = table.pack(...)` in the body of loaded file.
+
+Below, a new version of the hello world using load parameters
+
+HAProxy configuration file (`hello_world.conf`):
+
+::
+
+ global
+ lua-load hello_world.lua "this is not an hello world"
+
+ listen proxy
+ bind 127.0.0.1:10001
+ tcp-request inspect-delay 1s
+ tcp-request content use-service lua.hello_world
+
+HAProxy Lua file (`hello_world.lua`):
+
+.. code-block:: lua
+
+ local args = table.pack(...)
+
+ core.register_service("hello_world", "tcp", function(applet)
+ applet:send(args[1] .. "\n")
+ end)
+
+
+Core class
+==========
+
+.. js:class:: core
+
+ The "core" class contains all the HAProxy core functions. These function are
+ useful for the controlling of the execution flow, registering hooks,
+ manipulating global maps or ACL, ...
+
+ "core" class is basically provided with HAProxy. No `require` line is
+ required to uses these function.
+
+ The "core" class is static, it is not possible to create a new object of this
+ type.
+
+.. js:attribute:: core.emerg
+
+ :returns: integer
+
+ This attribute is an integer, it contains the value of the loglevel
+ "emergency" (0).
+
+.. js:attribute:: core.alert
+
+ :returns: integer
+
+ This attribute is an integer, it contains the value of the loglevel
+ "alert" (1).
+
+.. js:attribute:: core.crit
+
+ :returns: integer
+
+ This attribute is an integer, it contains the value of the loglevel
+ "critical" (2).
+
+.. js:attribute:: core.err
+
+ :returns: integer
+
+ This attribute is an integer, it contains the value of the loglevel
+ "error" (3).
+
+.. js:attribute:: core.warning
+
+ :returns: integer
+
+ This attribute is an integer, it contains the value of the loglevel
+ "warning" (4).
+
+.. js:attribute:: core.notice
+
+ :returns: integer
+
+ This attribute is an integer, it contains the value of the loglevel
+ "notice" (5).
+
+.. js:attribute:: core.info
+
+ :returns: integer
+
+ This attribute is an integer, it contains the value of the loglevel
+ "info" (6).
+
+.. js:attribute:: core.debug
+
+ :returns: integer
+
+ This attribute is an integer, it contains the value of the loglevel
+ "debug" (7).
+
+.. js:attribute:: core.proxies
+
+ **context**: init, task, action, sample-fetch, converter
+
+ This attribute is a table of declared proxies (frontend and backends). Each
+ proxy give an access to his list of listeners and servers. The table is
+ indexed by proxy name, and each entry is of type :ref:`proxy_class`.
+
+ .. Warning::
+ if you declared a frontend and backend with the same name, only one of
+ them will be listed.
+
+ :see: :js:attr:`core.backends`
+ :see: :js:attr:`core.frontends`
+
+.. js:attribute:: core.backends
+
+ **context**: init, task, action, sample-fetch, converter
+
+ This attribute is a table of declared proxies with backend capability. Each
+ proxy give an access to his list of listeners and servers. The table is
+ indexed by the backend name, and each entry is of type :ref:`proxy_class`.
+
+ :see: :js:attr:`core.proxies`
+ :see: :js:attr:`core.frontends`
+
+.. js:attribute:: core.frontends
+
+ **context**: init, task, action, sample-fetch, converter
+
+ This attribute is a table of declared proxies with frontend capability. Each
+ proxy give an access to his list of listeners and servers. The table is
+ indexed by the frontend name, and each entry is of type :ref:`proxy_class`.
+
+ :see: :js:attr:`core.proxies`
+ :see: :js:attr:`core.backends`
+
+.. js:attribute:: core.thread
+
+ **context**: task, action, sample-fetch, converter, applet
+
+ This variable contains the executing thread number starting at 1. 0 is a
+ special case for the common lua context. So, if thread is 0, Lua scope is
+ shared by all threads, otherwise the scope is dedicated to a single thread.
+ A program which needs to execute some parts exactly once regardless of the
+ number of threads can check that core.thread is 0 or 1.
+
+.. js:function:: core.log(loglevel, msg)
+
+ **context**: body, init, task, action, sample-fetch, converter
+
+ This function sends a log. The log is sent, according with the HAProxy
+ configuration file, to the loggers relevant to the current context and/or
+ to stderr if it is allowed.
+
+ The exact behaviour depends on tune.lua.log.loggers and tune.lua.log.stderr.
+
+ :param integer loglevel: Is the log level associated with the message. It is a
+ number between 0 and 7.
+ :param string msg: The log content.
+ :see: :js:attr:`core.emerg`, :js:attr:`core.alert`, :js:attr:`core.crit`,
+ :js:attr:`core.err`, :js:attr:`core.warning`, :js:attr:`core.notice`,
+ :js:attr:`core.info`, :js:attr:`core.debug` (log level definitions)
+ :see: :js:func:`core.Debug`
+ :see: :js:func:`core.Info`
+ :see: :js:func:`core.Warning`
+ :see: :js:func:`core.Alert`
+
+.. js:function:: core.Debug(msg)
+
+ **context**: body, init, task, action, sample-fetch, converter
+
+ :param string msg: The log content.
+ :see: :js:func:`core.log`
+
+ Does the same job than:
+
+.. code-block:: lua
+
+ function Debug(msg)
+ core.log(core.debug, msg)
+ end
+..
+
+.. js:function:: core.Info(msg)
+
+ **context**: body, init, task, action, sample-fetch, converter
+
+ :param string msg: The log content.
+ :see: :js:func:`core.log`
+
+.. code-block:: lua
+
+ function Info(msg)
+ core.log(core.info, msg)
+ end
+..
+
+.. js:function:: core.Warning(msg)
+
+ **context**: body, init, task, action, sample-fetch, converter
+
+ :param string msg: The log content.
+ :see: :js:func:`core.log`
+
+.. code-block:: lua
+
+ function Warning(msg)
+ core.log(core.warning, msg)
+ end
+..
+
+.. js:function:: core.Alert(msg)
+
+ **context**: body, init, task, action, sample-fetch, converter
+
+ :param string msg: The log content.
+ :see: :js:func:`core.log`
+
+.. code-block:: lua
+
+ function Alert(msg)
+ core.log(core.alert, msg)
+ end
+..
+
+.. js:function:: core.add_acl(filename, key)
+
+ **context**: init, task, action, sample-fetch, converter
+
+ Add the ACL *key* in the ACLs list referenced by the file *filename*.
+
+ :param string filename: the filename that reference the ACL entries.
+ :param string key: the key which will be added.
+
+.. js:function:: core.del_acl(filename, key)
+
+ **context**: init, task, action, sample-fetch, converter
+
+ Delete the ACL entry referenced by the key *key* in the list of ACLs
+ referenced by *filename*.
+
+ :param string filename: the filename that reference the ACL entries.
+ :param string key: the key which will be deleted.
+
+.. js:function:: core.del_map(filename, key)
+
+ **context**: init, task, action, sample-fetch, converter
+
+ Delete the map entry indexed with the specified key in the list of maps
+ referenced by his filename.
+
+ :param string filename: the filename that reference the map entries.
+ :param string key: the key which will be deleted.
+
+.. js:function:: core.get_info()
+
+ **context**: body, init, task, action, sample-fetch, converter
+
+ Returns HAProxy core information. We can find information like the uptime,
+ the pid, memory pool usage, tasks number, ...
+
+ This information is also returned by the management socket via the command
+ "show info". See the management socket documentation for more information
+ about the content of these variables.
+
+ :returns: an array of values.
+
+.. js:function:: core.get_var()
+
+ **context**: body, init, task, action, sample-fetch, converter
+
+ Returns data stored in the variable <var> converter in Lua type.
+ This is limited to "proc." scoped variables.
+
+ :param string var: The variable name in "proc." scope according with the
+ HAProxy variable syntax.
+
+.. js:function:: core.now()
+
+ **context**: body, init, task, action
+
+ This function returns the current time. The time returned is fixed by the
+ HAProxy core and assures than the hour will be monotonic and that the system
+ call 'gettimeofday' will not be called too. The time is refreshed between each
+ Lua execution or resume, so two consecutive call to the function "now" will
+ probably returns the same result.
+
+ :returns: a table which contains two entries "sec" and "usec". "sec"
+ contains the current at the epoch format, and "usec" contains the
+ current microseconds.
+
+.. js:function:: core.http_date(date)
+
+ **context**: body, init, task, action
+
+ This function take a string representing http date, and returns an integer
+ containing the corresponding date with a epoch format. A valid http date
+ me respect the format IMF, RFC850 or ASCTIME.
+
+ :param string date: a date http-date formatted
+ :returns: integer containing epoch date
+ :see: :js:func:`core.imf_date`.
+ :see: :js:func:`core.rfc850_date`.
+ :see: :js:func:`core.asctime_date`.
+ :see: https://tools.ietf.org/html/rfc7231#section-7.1.1.1
+
+.. js:function:: core.imf_date(date)
+
+ **context**: body, init, task, action
+
+ This function take a string representing IMF date, and returns an integer
+ containing the corresponding date with a epoch format.
+
+ :param string date: a date IMF formatted
+ :returns: integer containing epoch date
+ :see: https://tools.ietf.org/html/rfc7231#section-7.1.1.1
+
+ The IMF format is like this:
+
+.. code-block:: text
+
+ Sun, 06 Nov 1994 08:49:37 GMT
+..
+
+.. js:function:: core.rfc850_date(date)
+
+ **context**: body, init, task, action
+
+ This function take a string representing RFC850 date, and returns an integer
+ containing the corresponding date with a epoch format.
+
+ :param string date: a date RFC859 formatted
+ :returns: integer containing epoch date
+ :see: https://tools.ietf.org/html/rfc7231#section-7.1.1.1
+
+ The RFC850 format is like this:
+
+.. code-block:: text
+
+ Sunday, 06-Nov-94 08:49:37 GMT
+..
+
+.. js:function:: core.asctime_date(date)
+
+ **context**: body, init, task, action
+
+ This function take a string representing ASCTIME date, and returns an integer
+ containing the corresponding date with a epoch format.
+
+ :param string date: a date ASCTIME formatted
+ :returns: integer containing epoch date
+ :see: https://tools.ietf.org/html/rfc7231#section-7.1.1.1
+
+ The ASCTIME format is like this:
+
+.. code-block:: text
+
+ Sun Nov 6 08:49:37 1994
+..
+
+.. js:function:: core.msleep(milliseconds)
+
+ **context**: body, init, task, action
+
+ The `core.msleep()` stops the Lua execution between specified milliseconds.
+
+ :param integer milliseconds: the required milliseconds.
+
+.. js:function:: core.register_action(name, actions, func [, nb_args])
+
+ **context**: body
+
+ Register a Lua function executed as action. All the registered action can be
+ used in HAProxy with the prefix "lua.". An action gets a TXN object class as
+ input.
+
+ :param string name: is the name of the action.
+ :param table actions: is a table of string describing the HAProxy actions
+ facilities where to expose the new action. Expected facilities are:
+ 'tcp-req', 'tcp-res', 'http-req', 'http-res', 'http-after-res'.
+ :param function func: is the Lua function called to work as an action.
+ :param integer nb_args: is the expected number of argument for the action.
+ By default the value is 0.
+
+ The prototype of the Lua function used as argument is:
+
+.. code-block:: lua
+
+ function(txn [, arg1 [, arg2]])
+..
+
+ * **txn** (:ref:`txn_class`): this is a TXN object used for manipulating the
+ current request or TCP stream.
+
+ * **argX**: this is argument provided through the HAProxy configuration file.
+
+ Here, an example of action registration. The action just send an 'Hello world'
+ in the logs.
+
+.. code-block:: lua
+
+ core.register_action("hello-world", { "tcp-req", "http-req" }, function(txn)
+ txn:Info("Hello world")
+ end)
+..
+
+ This example code is used in HAProxy configuration like this:
+
+::
+
+ frontend tcp_frt
+ mode tcp
+ tcp-request content lua.hello-world
+
+ frontend http_frt
+ mode http
+ http-request lua.hello-world
+
+..
+
+ A second example using arguments
+
+.. code-block:: lua
+
+ function hello_world(txn, arg)
+ txn:Info("Hello world for " .. arg)
+ end
+ core.register_action("hello-world", { "tcp-req", "http-req" }, hello_world, 2)
+
+..
+
+ This example code is used in HAProxy configuration like this:
+
+::
+
+ frontend tcp_frt
+ mode tcp
+ tcp-request content lua.hello-world everybody
+
+..
+
+.. js:function:: core.register_converters(name, func)
+
+ **context**: body
+
+ Register a Lua function executed as converter. All the registered converters
+ can be used in HAProxy with the prefix "lua.". A converter gets a string as
+ input and returns a string as output. The registered function can take up to 9
+ values as parameter. All the values are strings.
+
+ :param string name: is the name of the converter.
+ :param function func: is the Lua function called to work as converter.
+
+ The prototype of the Lua function used as argument is:
+
+.. code-block:: lua
+
+ function(str, [p1 [, p2 [, ... [, p5]]]])
+..
+
+ * **str** (*string*): this is the input value automatically converted in
+ string.
+ * **p1** .. **p5** (*string*): this is a list of string arguments declared in
+ the HAProxy configuration file. The number of arguments doesn't exceed 5.
+ The order and the nature of these is conventionally chosen by the
+ developer.
+
+.. js:function:: core.register_fetches(name, func)
+
+ **context**: body
+
+ Register a Lua function executed as sample fetch. All the registered sample
+ fetch can be used in HAProxy with the prefix "lua.". A Lua sample fetch
+ returns a string as output. The registered function can take up to 9 values as
+ parameter. All the values are strings.
+
+ :param string name: is the name of the sample fetch.
+ :param function func: is the Lua function called to work as sample fetch.
+
+ The prototype of the Lua function used as argument is:
+
+.. code-block:: lua
+
+ string function(txn, [p1 [, p2 [, ... [, p5]]]])
+..
+
+ * **txn** (:ref:`txn_class`): this is the txn object associated with the
+ current request.
+ * **p1** .. **p5** (*string*): this is a list of string arguments declared in
+ the HAProxy configuration file. The number of arguments doesn't exceed 5.
+ The order and the nature of these is conventionally chosen by the
+ developer.
+ * **Returns**: A string containing some data, or nil if the value cannot be
+ returned now.
+
+ lua example code:
+
+.. code-block:: lua
+
+ core.register_fetches("hello", function(txn)
+ return "hello"
+ end)
+..
+
+ HAProxy example configuration:
+
+::
+
+ frontend example
+ http-request redirect location /%[lua.hello]
+
+.. js:function:: core.register_filter(name, Flt, func)
+
+ **context**: body
+
+ Register a Lua function used to declare a filter. All the registered filters
+ can by used in HAProxy with the prefix "lua.".
+
+ :param string name: is the name of the filter.
+ :param table Flt: is a Lua class containing the filter definition (id, flags,
+ callbacks).
+ :param function func: is the Lua function called to create the Lua filter.
+
+ The prototype of the Lua function used as argument is:
+
+.. code-block:: lua
+
+ function(flt, args)
+..
+
+ * **flt** : Is a filter object based on the class provided in
+ :js:func:`core.register_filter()` function.
+
+ * **args**: Is a table of strings containing all arguments provided through
+ the HAProxy configuration file, on the filter line.
+
+ It must return the filter to use or nil to ignore it. Here, an example of
+ filter registration.
+
+.. code-block:: lua
+
+ core.register_filter("my-filter", MyFilter, function(flt, args)
+ flt.args = args -- Save arguments
+ return flt
+ end)
+..
+
+ This example code is used in HAProxy configuration like this:
+
+::
+
+ frontend http
+ mode http
+ filter lua.my-filter arg1 arg2 arg3
+
+..
+
+ :see: :js:class:`Filter`
+
+.. js:function:: core.register_service(name, mode, func)
+
+ **context**: body
+
+ Register a Lua function executed as a service. All the registered services
+ can be used in HAProxy with the prefix "lua.". A service gets an object class
+ as input according with the required mode.
+
+ :param string name: is the name of the service.
+ :param string mode: is string describing the required mode. Only 'tcp' or
+ 'http' are allowed.
+ :param function func: is the Lua function called to work as service.
+
+ The prototype of the Lua function used as argument is:
+
+.. code-block:: lua
+
+ function(applet)
+..
+
+ * **applet** *applet* will be a :ref:`applettcp_class` or a
+ :ref:`applethttp_class`. It depends the type of registered applet. An applet
+ registered with the 'http' value for the *mode* parameter will gets a
+ :ref:`applethttp_class`. If the *mode* value is 'tcp', the applet will gets
+ a :ref:`applettcp_class`.
+
+ .. warning::
+ Applets of type 'http' cannot be called from 'tcp-*' rulesets. Only the
+ 'http-*' rulesets are authorized, this means that is not possible to call
+ a HTTP applet from a proxy in tcp mode. Applets of type 'tcp' can be
+ called from anywhere.
+
+ Here, an example of service registration. The service just send an
+ 'Hello world' as an http response.
+
+.. code-block:: lua
+
+ core.register_service("hello-world", "http", function(applet)
+ local response = "Hello World !"
+ applet:set_status(200)
+ applet:add_header("content-length", string.len(response))
+ applet:add_header("content-type", "text/plain")
+ applet:start_response()
+ applet:send(response)
+ end)
+..
+
+ This example code is used in HAProxy configuration like this:
+
+::
+
+ frontend example
+ http-request use-service lua.hello-world
+
+.. js:function:: core.register_init(func)
+
+ **context**: body
+
+ Register a function executed after the configuration parsing. This is useful
+ to check any parameters.
+
+ :param function func: is the Lua function called to work as initializer.
+
+ The prototype of the Lua function used as argument is:
+
+.. code-block:: lua
+
+ function()
+..
+
+ It takes no input, and no output is expected.
+
+.. js:function:: core.register_task(func[, arg1[, arg2[, ...[, arg4]]]])
+
+ **context**: body, init, task, action, sample-fetch, converter, event
+
+ Register and start independent task. The task is started when the HAProxy
+ main scheduler starts. For example this type of tasks can be executed to
+ perform complex health checks.
+
+ :param function func: is the Lua function called to work as an async task.
+
+ Up to 4 optional arguments (all types supported) may be passed to the
+ function. (They will be passed as-is to the task function)
+
+ The prototype of the Lua function used as argument is:
+
+.. code-block:: lua
+
+ function([arg1[, arg2[, ...[, arg4]]]])
+..
+
+ It takes up to 4 optional arguments (provided when registering), and no
+ output is expected.
+
+ See also :js:func:`core.queue` to dynamically pass data between main context
+ and tasks or even between tasks.
+
+.. js:function:: core.register_cli([path], usage, func)
+
+ **context**: body
+
+ Register a custom cli that will be available from haproxy stats socket.
+
+ :param array path: is the sequence of word for which the cli execute the Lua
+ binding.
+ :param string usage: is the usage message displayed in the help.
+ :param function func: is the Lua function called to handle the CLI commands.
+
+ The prototype of the Lua function used as argument is:
+
+.. code-block:: lua
+
+ function(AppletTCP, [arg1, [arg2, [...]]])
+..
+
+ I/O are managed with the :ref:`applettcp_class` object. Args are given as
+ parameter. The args embed the registered path. If the path is declared like
+ this:
+
+.. code-block:: lua
+
+ core.register_cli({"show", "ssl", "stats"}, "Display SSL stats..", function(applet, arg1, arg2, arg3, arg4, arg5)
+ end)
+..
+
+ And we execute this in the prompt:
+
+.. code-block:: text
+
+ > prompt
+ > show ssl stats all
+..
+
+ Then, arg1, arg2 and arg3 will contains respectively "show", "ssl" and
+ "stats".
+ arg4 will contain "all". arg5 contains nil.
+
+.. js:function:: core.set_nice(nice)
+
+ **context**: task, action, sample-fetch, converter
+
+ Change the nice of the current task or current session.
+
+ :param integer nice: the nice value, it must be between -1024 and 1024.
+
+.. js:function:: core.set_map(filename, key, value)
+
+ **context**: init, task, action, sample-fetch, converter
+
+ Set the value *value* associated to the key *key* in the map referenced by
+ *filename*.
+
+ :param string filename: the Map reference
+ :param string key: the key to set or replace
+ :param string value: the associated value
+
+.. js:function:: core.sleep(int seconds)
+
+ **context**: body, init, task, action
+
+ The `core.sleep()` functions stop the Lua execution between specified seconds.
+
+ :param integer seconds: the required seconds.
+
+.. js:function:: core.tcp()
+
+ **context**: init, task, action
+
+ This function returns a new object of a *socket* class.
+
+ :returns: A :ref:`socket_class` object.
+
+.. js:function:: core.httpclient()
+
+ **context**: init, task, action
+
+ This function returns a new object of a *httpclient* class.
+
+ :returns: A :ref:`httpclient_class` object.
+
+.. js:function:: core.concat()
+
+ **context**: body, init, task, action, sample-fetch, converter
+
+ This function returns a new concat object.
+
+ :returns: A :ref:`concat_class` object.
+
+.. js:function:: core.queue()
+
+ **context**: body, init, task, event, action, sample-fetch, converter
+
+ This function returns a new queue object.
+
+ :returns: A :ref:`queue_class` object.
+
+.. js:function:: core.done(data)
+
+ **context**: body, init, task, action, sample-fetch, converter
+
+ :param any data: Return some data for the caller. It is useful with
+ sample-fetches and sample-converters.
+
+ Immediately stops the current Lua execution and returns to the caller which
+ may be a sample fetch, a converter or an action and returns the specified
+ value (ignored for actions and init). It is used when the LUA process finishes
+ its work and wants to give back the control to HAProxy without executing the
+ remaining code. It can be seen as a multi-level "return".
+
+.. js:function:: core.yield()
+
+ **context**: task, action, sample-fetch, converter
+
+ Give back the hand at the HAProxy scheduler. It is used when the LUA
+ processing consumes a lot of processing time.
+
+.. js:function:: core.parse_addr(address)
+
+ **context**: body, init, task, action, sample-fetch, converter
+
+ :param network: is a string describing an ipv4 or ipv6 address and optionally
+ its network length, like this: "127.0.0.1/8" or "aaaa::1234/32".
+ :returns: a userdata containing network or nil if an error occurs.
+
+ Parse ipv4 or ipv6 addresses and its facultative associated network.
+
+.. js:function:: core.match_addr(addr1, addr2)
+
+ **context**: body, init, task, action, sample-fetch, converter
+
+ :param addr1: is an address created with "core.parse_addr".
+ :param addr2: is an address created with "core.parse_addr".
+ :returns: boolean, true if the network of the addresses match, else returns
+ false.
+
+ Match two networks. For example "127.0.0.1/32" matches "127.0.0.0/8". The
+ order of network is not important.
+
+.. js:function:: core.tokenize(str, separators [, noblank])
+
+ **context**: body, init, task, action, sample-fetch, converter
+
+ This function is useful for tokenizing an entry, or splitting some messages.
+ :param string str: The string which will be split.
+ :param string separators: A string containing a list of separators.
+ :param boolean noblank: Ignore empty entries.
+ :returns: an array of string.
+
+ For example:
+
+.. code-block:: lua
+
+ local array = core.tokenize("This function is useful, for tokenizing an entry.", "., ", true)
+ print_r(array)
+..
+
+ Returns this array:
+
+.. code-block:: text
+
+ (table) table: 0x21c01e0 [
+ 1: (string) "This"
+ 2: (string) "function"
+ 3: (string) "is"
+ 4: (string) "useful"
+ 5: (string) "for"
+ 6: (string) "tokenizing"
+ 7: (string) "an"
+ 8: (string) "entry"
+ ]
+..
+
+.. js:function:: core.event_sub(event_types, func)
+
+ **context**: body, init, task, action, sample-fetch, converter
+
+ Register a function that will be called on specific system events.
+
+ :param array event_types: array of string containing the event types you want
+ to subscribe to
+ :param function func: is the Lua function called when one of the subscribed
+ events occur.
+ :returns: A :ref:`event_sub_class` object.
+ :see: :js:func:`Server.event_sub()`.
+
+ List of available event types :
+
+ **SERVER** Family:
+
+ * **SERVER_ADD**: when a server is added
+ * **SERVER_DEL**: when a server is removed
+ * **SERVER_DOWN**: when a server state goes from UP to DOWN
+ * **SERVER_UP**: when a server state goes from DOWN to UP
+ * **SERVER_STATE**: when a server state changes
+ * **SERVER_ADMIN**: when a server administrative state changes
+ * **SERVER_CHECK**: when a server's check status change is reported.
+ Be careful when subscribing to this type since many events might be
+ generated.
+
+ .. Note::
+ Use **SERVER** in **event_types** to subscribe to all server events types
+ at once. Note that this should only be used for testing purposes since a
+ single event source could result in multiple events types being generated.
+ (e.g.: SERVER_STATE will always be generated for each SERVER_DOWN or
+ SERVER_UP)
+
+ The prototype of the Lua function used as argument is:
+
+.. code-block:: lua
+
+ function(event, event_data, sub, when)
+..
+
+ * **event** (*string*): the event type (one of the **event_types** specified
+ when subscribing)
+ * **event_data**: specific to each event family (For **SERVER** family,
+ a :ref:`server_event_class` object)
+ * **sub**: class to manage the subscription from within the event
+ (a :ref:`event_sub_class` object)
+ * **when**: timestamp corresponding to the date when the event was generated.
+ It is an integer representing the number of seconds elapsed since Epoch.
+ It may be provided as optional argument to `os.date()` lua function to
+ convert it to a string according to a given format string.
+
+ .. Warning::
+ The callback function will only be scheduled on the very same thread that
+ performed the subscription.
+
+ Moreover, each thread treats events sequentially. It means that if you
+ have, let's say SERVER_UP followed by a SERVER_DOWN in a short timelapse,
+ then the cb function will first be called with SERVER_UP, and once it's
+ done handling the event, the cb function will be called again with
+ SERVER_DOWN.
+
+ This is to ensure event consistency when it comes to logging / triggering
+ logic from lua.
+
+ Your lua cb function may yield if needed, but you're pleased to process the
+ event as fast as possible to prevent the event queue from growing up,
+ depending on the event flow that is expected for the given subscription.
+
+ To prevent abuses, if the event queue for the current subscription goes
+ over a certain amount of unconsumed events, the subscription will pause
+ itself automatically for as long as it takes for your handler to catch up.
+ This would lead to events being missed, so an error will be reported in the
+ logs to warn you about that.
+ This is not something you want to let happen too often, it may indicate
+ that you subscribed to an event that is occurring too frequently or/and
+ that your callback function is too slow to keep up the pace and you should
+ review it.
+
+ If you want to do some parallel processing because your callback functions
+ are slow: you might want to create subtasks from lua using
+ :js:func:`core.register_task()` from within your callback function to
+ perform the heavy job in a dedicated task and allow remaining events to be
+ processed more quickly.
+
+.. js:function:: core.disable_legacy_mailers()
+
+ **LEGACY**
+
+ **context**: body, init
+
+ Disable the sending of email alerts through the legacy email sending
+ function when mailers are used in the configuration.
+
+ Use this when sending email alerts directly from lua.
+
+ :see: :js:func:`Proxy.get_mailers()`
+
+.. _proxy_class:
+
+Proxy class
+============
+
+.. js:class:: Proxy
+
+ This class provides a way for manipulating proxy and retrieving information
+ like statistics.
+
+.. js:attribute:: Proxy.name
+
+ Contain the name of the proxy.
+
+ .. warning::
+ This attribute is now deprecated and will eventually be removed.
+ Please use :js:func:`Proxy.get_name()` function instead.
+
+.. js:function:: Proxy.get_name()
+
+ Returns the name of the proxy.
+
+.. js:attribute:: Proxy.uuid
+
+ Contain the unique identifier of the proxy.
+
+ .. warning::
+ This attribute is now deprecated and will eventually be removed.
+ Please use :js:func:`Proxy.get_uuid()` function instead.
+
+.. js:function:: Proxy.get_uuid()
+
+ Returns the unique identifier of the proxy.
+
+.. js:attribute:: Proxy.servers
+
+ Contain a table with the attached servers. The table is indexed by server
+ name, and each server entry is an object of type :ref:`server_class`.
+
+.. js:attribute:: Proxy.stktable
+
+ Contains a stick table object of type :ref:`sticktable_class` attached to the
+ proxy.
+
+.. js:attribute:: Proxy.listeners
+
+ Contain a table with the attached listeners. The table is indexed by listener
+ name, and each each listeners entry is an object of type
+ :ref:`listener_class`.
+
+.. js:function:: Proxy.pause(px)
+
+ Pause the proxy. See the management socket documentation for more information.
+
+ :param class_proxy px: A :ref:`proxy_class` which indicates the manipulated
+ proxy.
+
+.. js:function:: Proxy.resume(px)
+
+ Resume the proxy. See the management socket documentation for more
+ information.
+
+ :param class_proxy px: A :ref:`proxy_class` which indicates the manipulated
+ proxy.
+
+.. js:function:: Proxy.stop(px)
+
+ Stop the proxy. See the management socket documentation for more information.
+
+ :param class_proxy px: A :ref:`proxy_class` which indicates the manipulated
+ proxy.
+
+.. js:function:: Proxy.shut_bcksess(px)
+
+ Kill the session attached to a backup server. See the management socket
+ documentation for more information.
+
+ :param class_proxy px: A :ref:`proxy_class` which indicates the manipulated
+ proxy.
+
+.. js:function:: Proxy.get_cap(px)
+
+ Returns a string describing the capabilities of the proxy.
+
+ :param class_proxy px: A :ref:`proxy_class` which indicates the manipulated
+ proxy.
+ :returns: a string "frontend", "backend", "proxy" or "ruleset".
+
+.. js:function:: Proxy.get_mode(px)
+
+ Returns a string describing the mode of the current proxy.
+
+ :param class_proxy px: A :ref:`proxy_class` which indicates the manipulated
+ proxy.
+ :returns: a string "tcp", "http", "syslog" or "unknown"
+
+.. js:function:: Proxy.get_srv_act(px)
+
+ Returns the number of current active servers for the current proxy that are
+ eligible for LB.
+
+ :param class_proxy px: A :ref:`proxy_class` which indicates the manipulated
+ proxy.
+ :returns: an integer
+
+.. js:function:: Proxy.get_srv_bck(px)
+
+ Returns the number backup servers for the current proxy that are eligible
+ for LB.
+
+ :param class_proxy px: A :ref:`proxy_class` which indicates the manipulated
+ proxy.
+ :returns: an integer
+
+.. js:function:: Proxy.get_stats(px)
+
+ Returns a table containing the proxy statistics. The statistics returned are
+ not the same if the proxy is frontend or a backend.
+
+ :param class_proxy px: A :ref:`proxy_class` which indicates the manipulated
+ proxy.
+ :returns: a key/value table containing stats
+
+.. js:function:: Proxy.get_mailers(px)
+
+ **LEGACY**
+
+ Returns a table containing mailers config for the current proxy or nil
+ if mailers are not available for the proxy.
+
+ :param class_proxy px: A :ref:`proxy_class` which indicates the manipulated
+ proxy.
+ :returns: a :ref:`proxy_mailers_class` containing proxy mailers config
+
+.. _proxy_mailers_class:
+
+ProxyMailers class
+==================
+
+**LEGACY**
+
+.. js:class:: ProxyMailers
+
+ This class provides mailers config for a given proxy.
+
+ If sending emails directly from lua, please consider
+ :js:func:`core.disable_legacy_mailers()` to disable the email sending from
+ haproxy. (Or email alerts will be sent twice...)
+
+.. js:attribute:: ProxyMailers.track_server_health
+
+ Boolean set to true if the option "log-health-checks" is configured on
+ the proxy, meaning that all server checks event should trigger email alerts.
+
+.. js:attribute:: ProxyMailers.log_level
+
+ An integer, the maximum log level that triggers email alerts. It is a number
+ between 0 and 7 as defined by option "email-alert level".
+
+.. js:attribute:: ProxyMailers.mailservers
+
+ An array containing the list of mail servers that should receive email alerts.
+ Each array entry is a name:desc pair where desc represents the full server
+ address (including port) as described in haproxy's configuration file.
+
+.. js:attribute:: ProxyMailers.mailservers_timeout
+
+ An integer representing the maximum time in milliseconds to wait for the
+ email to be sent. See "timeout mail" directive from "mailers" section in
+ haproxy configuration file.
+
+.. js:attribute:: ProxyMailers.smtp_hostname
+
+ A string containing the hostname to use for the SMTP transaction.
+ (option "email-alert myhostname")
+
+.. js:attribute:: ProxyMailers.smtp_from
+
+ A string containing the "MAIL FROM" address to use for the SMTP transaction.
+ (option "email-alert from")
+
+.. js:attribute:: ProxyMailers.smtp_to
+
+ A string containing the "RCPT TO" address to use for the SMTP transaction.
+ (option "email-alert to")
+
+.. _server_class:
+
+Server class
+============
+
+.. js:class:: Server
+
+ This class provides a way for manipulating servers and retrieving information.
+
+.. js:attribute:: Server.name
+
+ Contain the name of the server.
+
+ .. warning::
+ This attribute is now deprecated and will eventually be removed.
+ Please use :js:func:`Server.get_name()` function instead.
+
+.. js:function:: Server.get_name(sv)
+
+ Returns the name of the server.
+
+.. js:attribute:: Server.puid
+
+ Contain the proxy unique identifier of the server.
+
+ .. warning::
+ This attribute is now deprecated and will eventually be removed.
+ Please use :js:func:`Server.get_puid()` function instead.
+
+.. js:function:: Server.get_puid(sv)
+
+ Returns the proxy unique identifier of the server.
+
+.. js:function:: Server.get_rid(sv)
+
+ Returns the rid (revision ID) of the server.
+ It is an unsigned integer that is set upon server creation. Value is derived
+ from a global counter that starts at 0 and is incremented each time one or
+ multiple server deletions are followed by a server addition (meaning that
+ old name/id reuse could occur).
+
+ Combining server name/id with server rid yields a process-wide unique
+ identifier.
+
+.. js:function:: Server.is_draining(sv)
+
+ Return true if the server is currently draining sticky connections.
+
+ :param class_server sv: A :ref:`server_class` which indicates the manipulated
+ server.
+ :returns: a boolean
+
+.. js:function:: Server.is_backup(sv)
+
+ Return true if the server is a backup server
+
+ :param class_server sv: A :ref:`server_class` which indicates the manipulated
+ server.
+ :returns: a boolean
+
+.. js:function:: Server.is_dynamic(sv)
+
+ Return true if the server was instantiated at runtime (e.g.: from the cli)
+
+ :param class_server sv: A :ref:`server_class` which indicates the manipulated
+ server.
+ :returns: a boolean
+
+.. js:function:: Server.get_cur_sess(sv)
+
+ Return the number of currently active sessions on the server
+
+ :param class_server sv: A :ref:`server_class` which indicates the manipulated
+ server.
+ :returns: an integer
+
+.. js:function:: Server.get_pend_conn(sv)
+
+ Return the number of pending connections to the server
+
+ :param class_server sv: A :ref:`server_class` which indicates the manipulated
+ server.
+ :returns: an integer
+
+.. js:function:: Server.set_maxconn(sv, weight)
+
+ Dynamically change the maximum connections of the server. See the management
+ socket documentation for more information about the format of the string.
+
+ :param class_server sv: A :ref:`server_class` which indicates the manipulated
+ server.
+ :param string maxconn: A string describing the server maximum connections.
+
+.. js:function:: Server.get_maxconn(sv, weight)
+
+ This function returns an integer representing the server maximum connections.
+
+ :param class_server sv: A :ref:`server_class` which indicates the manipulated
+ server.
+ :returns: an integer.
+
+.. js:function:: Server.set_weight(sv, weight)
+
+ Dynamically change the weight of the server. See the management socket
+ documentation for more information about the format of the string.
+
+ :param class_server sv: A :ref:`server_class` which indicates the manipulated
+ server.
+ :param string weight: A string describing the server weight.
+
+.. js:function:: Server.get_weight(sv)
+
+ This function returns an integer representing the server weight.
+
+ :param class_server sv: A :ref:`server_class` which indicates the manipulated
+ server.
+ :returns: an integer.
+
+.. js:function:: Server.set_addr(sv, addr[, port])
+
+ Dynamically change the address of the server. See the management socket
+ documentation for more information about the format of the string.
+
+ :param class_server sv: A :ref:`server_class` which indicates the manipulated
+ server.
+ :param string addr: A string describing the server address.
+
+.. js:function:: Server.get_addr(sv)
+
+ Returns a string describing the address of the server.
+
+ :param class_server sv: A :ref:`server_class` which indicates the manipulated
+ server.
+ :returns: A string
+
+.. js:function:: Server.get_stats(sv)
+
+ Returns server statistics.
+
+ :param class_server sv: A :ref:`server_class` which indicates the manipulated
+ server.
+ :returns: a key/value table containing stats
+
+.. js:function:: Server.get_proxy(sv)
+
+ Returns the parent proxy to which the server belongs.
+
+ :param class_server sv: A :ref:`server_class` which indicates the manipulated
+ server.
+ :returns: a :ref:`proxy_class` or nil if not available
+
+.. js:function:: Server.shut_sess(sv)
+
+ Shutdown all the sessions attached to the server. See the management socket
+ documentation for more information about this function.
+
+ :param class_server sv: A :ref:`server_class` which indicates the manipulated
+ server.
+
+.. js:function:: Server.set_drain(sv)
+
+ Drain sticky sessions. See the management socket documentation for more
+ information about this function.
+
+ :param class_server sv: A :ref:`server_class` which indicates the manipulated
+ server.
+
+.. js:function:: Server.set_maint(sv)
+
+ Set maintenance mode. See the management socket documentation for more
+ information about this function.
+
+ :param class_server sv: A :ref:`server_class` which indicates the manipulated
+ server.
+
+.. js:function:: Server.set_ready(sv)
+
+ Set normal mode. See the management socket documentation for more information
+ about this function.
+
+ :param class_server sv: A :ref:`server_class` which indicates the manipulated
+ server.
+
+.. js:function:: Server.check_enable(sv)
+
+ Enable health checks. See the management socket documentation for more
+ information about this function.
+
+ :param class_server sv: A :ref:`server_class` which indicates the manipulated
+ server.
+
+.. js:function:: Server.check_disable(sv)
+
+ Disable health checks. See the management socket documentation for more
+ information about this function.
+
+ :param class_server sv: A :ref:`server_class` which indicates the manipulated
+ server.
+
+.. js:function:: Server.check_force_up(sv)
+
+ Force health-check up. See the management socket documentation for more
+ information about this function.
+
+ :param class_server sv: A :ref:`server_class` which indicates the manipulated
+ server.
+
+.. js:function:: Server.check_force_nolb(sv)
+
+ Force health-check nolb mode. See the management socket documentation for more
+ information about this function.
+
+ :param class_server sv: A :ref:`server_class` which indicates the manipulated
+ server.
+
+.. js:function:: Server.check_force_down(sv)
+
+ Force health-check down. See the management socket documentation for more
+ information about this function.
+
+ :param class_server sv: A :ref:`server_class` which indicates the manipulated
+ server.
+
+.. js:function:: Server.agent_enable(sv)
+
+ Enable agent check. See the management socket documentation for more
+ information about this function.
+
+ :param class_server sv: A :ref:`server_class` which indicates the manipulated
+ server.
+
+.. js:function:: Server.agent_disable(sv)
+
+ Disable agent check. See the management socket documentation for more
+ information about this function.
+
+ :param class_server sv: A :ref:`server_class` which indicates the manipulated
+ server.
+
+.. js:function:: Server.agent_force_up(sv)
+
+ Force agent check up. See the management socket documentation for more
+ information about this function.
+
+ :param class_server sv: A :ref:`server_class` which indicates the manipulated
+ server.
+
+.. js:function:: Server.agent_force_down(sv)
+
+ Force agent check down. See the management socket documentation for more
+ information about this function.
+
+ :param class_server sv: A :ref:`server_class` which indicates the manipulated
+ server.
+
+.. js:function:: Server.tracking(sv)
+
+ Check if the current server is tracking another server.
+
+ :param class_server sv: A :ref:`server_class` which indicates the manipulated
+ server.
+ :returns: A :ref:`server_class` which indicates the tracked server or nil if
+ the server doesn't track another one.
+
+.. js:function:: Server.get_trackers(sv)
+
+ Check if the current server is being tracked by other servers.
+
+ :param class_server sv: A :ref:`server_class` which indicates the manipulated
+ server.
+ :returns: An array of :ref:`server_class` which indicates the tracking
+ servers (might be empty)
+
+.. js:function:: Server.event_sub(sv, event_types, func)
+
+ Register a function that will be called on specific server events.
+ It works exactly like :js:func:`core.event_sub()` except that the subscription
+ will be performed within the server dedicated subscription list instead of the
+ global one.
+ (Your callback function will only be called for server events affecting sv)
+
+ See :js:func:`core.event_sub()` for function usage.
+
+ A key advantage to using :js:func:`Server.event_sub()` over
+ :js:func:`core.event_sub()` for servers is that :js:func:`Server.event_sub()`
+ allows you to be notified for servers events of a single server only.
+ It removes the needs for extra filtering in your callback function if you only
+ care about a single server, and also prevents useless wakeups.
+
+ For instance, if you want to be notified for UP/DOWN events on a given set of
+ servers, it is recommended to perform multiple per-server subscriptions since
+ it will be more efficient that doing a single global subscription that will
+ filter the received events.
+ Unless you really want to be notified for servers events of ALL servers of
+ course, which could make sense given you setup but should be avoided if you
+ have an important number of servers as it will add a significant load on your
+ haproxy process in case of multiple servers state change in a short amount of
+ time.
+
+ .. Note::
+ You may also combine :js:func:`core.event_sub()` with
+ :js:func:`Server.event_sub()`.
+
+ Also, don't forget that you can use :js:func:`core.register_task()` from
+ your callback function if needed. (ie: parallel work)
+
+ Here is a working example combining :js:func:`core.event_sub()` with
+ :js:func:`Server.event_sub()` and :js:func:`core.register_task()`
+ (This only serves as a demo, this is not necessarily useful to do so)
+
+.. code-block:: lua
+
+ core.event_sub({"SERVER_ADD"}, function(event, data, sub)
+ -- in the global event handler
+ if data["reference"] ~= nil then
+ print("Tracking new server: ", data["name"])
+ data["reference"]:event_sub({"SERVER_UP", "SERVER_DOWN"}, function(event, data, sub)
+ -- in the per-server event handler
+ if data["reference"] ~= nil then
+ core.register_task(function(server)
+ -- subtask to perform some async work (e.g.: HTTP API calls, sending emails...)
+ print("ASYNC: SERVER ", server:get_name(), " is ", event == "SERVER_UP" and "UP" or "DOWN")
+ end, data["reference"])
+ end
+ end)
+ end
+ end)
+
+..
+
+ In this example, we will first track global server addition events.
+ For each newly added server ("add server" on the cli), we will register a
+ UP/DOWN server subscription.
+ Then, the callback function will schedule the event handling in an async
+ subtask which will receive the server reference as an argument.
+
+.. _listener_class:
+
+Listener class
+==============
+
+.. js:function:: Listener.get_stats(ls)
+
+ Returns server statistics.
+
+ :param class_listener ls: A :ref:`listener_class` which indicates the
+ manipulated listener.
+ :returns: a key/value table containing stats
+
+.. _event_sub_class:
+
+EventSub class
+==============
+
+.. js:function:: EventSub.unsub()
+
+ End the subscription, the callback function will not be called again.
+
+.. _server_event_class:
+
+ServerEvent class
+=================
+
+.. js:class:: ServerEvent
+
+This class is provided with every **SERVER** events.
+
+See :js:func:`core.event_sub()` for more info.
+
+.. js:attribute:: ServerEvent.name
+
+ Contains the name of the server.
+
+.. js:attribute:: ServerEvent.puid
+
+ Contains the proxy-unique uid of the server
+
+.. js:attribute:: ServerEvent.rid
+
+ Contains the revision ID of the server
+
+.. js:attribute:: ServerEvent.proxy_name
+
+ Contains the name of the proxy to which the server belongs
+
+.. js:attribute:: ServerEvent.proxy_uuid
+
+ Contains the uuid of the proxy to which the server belongs
+
+.. js:attribute:: ServerEvent.reference
+
+ Reference to the live server (A :ref:`server_class`).
+
+ .. Warning::
+ Not available if the server was removed in the meantime.
+ (Will never be set for SERVER_DEL event since the server does not exist
+ anymore)
+
+.. js:attribute:: ServerEvent.state
+
+ A :ref:`server_event_state_class`
+
+ .. Note::
+ Only available for SERVER_STATE event
+
+.. js:attribute:: ServerEvent.admin
+
+ A :ref:`server_event_admin_class`
+
+ .. Note::
+ Only available for SERVER_ADMIN event
+
+.. js:attribute:: ServerEvent.check
+
+ A :ref:`server_event_checkres_class`
+
+ .. Note::
+ Only available for SERVER_CHECK event
+
+.. _server_event_checkres_class:
+
+ServerEventCheckRes class
+=========================
+
+.. js:class:: ServerEventCheckRes
+
+This class describes the result of a server's check.
+
+.. js:attribute:: ServerEventCheckRes.result
+
+ Effective check result.
+
+ Check result is a string and will be set to one of the following values:
+ - "FAILED": the check failed
+ - "PASSED": the check succeeded
+ - "CONDPASS": the check conditionally passed
+
+.. js:attribute:: ServerEventCheckRes.agent
+
+ Boolean set to true if the check is an agent check.
+ Else it is a health check.
+
+.. js:attribute:: ServerEventCheckRes.duration
+
+ Check's duration in milliseconds
+
+.. js:attribute:: ServerEventCheckRes.reason
+
+ Check's status. An array containing three fields:
+ - **short**: a string representing check status short name
+ - **desc**: a string representing check status description
+ - **code**: an integer, this extra information is provided for checks
+ that went through the data analysis stage (>= layer 5)
+
+.. js:attribute:: ServerEventCheckRes.health
+
+ An array containing values about check's health (integers):
+ - **cur**: current health counter:
+ - 0 to (**rise** - 1) = BAD
+ - **rise** to (**rise** + **fall** - 1) = GOOD
+ - **rise**: server will be considered as operational after **rise**
+ consecutive successful checks
+ - **fall**: server will be considered as dead after **fall** consecutive
+ unsuccessful checks
+
+.. _server_event_state_class:
+
+ServerEventState class
+======================
+
+.. js:class:: ServerEventState
+
+This class contains additional info related to **SERVER_STATE** event.
+
+.. js:attribute:: ServerEventState.admin
+
+ Boolean set to true if the server state change is due to an administrative
+ change. Else it is an operational change.
+
+.. js:attribute:: ServerEventState.check
+
+ A :ref:`server_event_checkres_class`, provided if the state change is
+ due to a server check (must be an operational change).
+
+.. js:attribute:: ServerEventState.cause
+
+ Printable state change cause. Might be empty.
+
+.. js:attribute:: ServerEventState.new_state
+
+ New server state due to operational or admin change.
+
+ It is a string that can be any of the following values:
+ - "STOPPED": The server is down
+ - "STOPPING": The server is up but soft-stopping
+ - "STARTING": The server is warming up
+ - "RUNNING": The server is fully up
+
+.. js:attribute:: ServerEventState.old_state
+
+ Previous server state prior to the operational or admin change.
+
+ Can be any value described in **new_state**, but they should differ.
+
+.. js:attribute:: ServerEventState.requeued
+
+ Number of connections that were requeued due to the server state change.
+
+ For a server going DOWN: it is the number of pending server connections
+ that are requeued to the backend (such connections will be redispatched
+ to any server that is suitable according to the configured load balancing
+ algorithm).
+
+ For a server doing UP: it is the number of pending connections on the
+ backend that may be redispatched to the server according to the load
+ balancing algorithm that is in use.
+
+.. _server_event_admin_class:
+
+ServerEventAdmin class
+======================
+
+.. js:class:: ServerEventAdmin
+
+This class contains additional info related to **SERVER_ADMIN** event.
+
+.. js:attribute:: ServerEventAdmin.cause
+
+ Printable admin state change cause. Might be empty.
+
+.. js:attribute:: ServerEventAdmin.new_admin
+
+ New server admin state due to the admin change.
+
+ It is an array of string containing a composition of following values:
+ - "**MAINT**": server is in maintenance mode
+ - "FMAINT": server is in forced maintenance mode (MAINT is also set)
+ - "IMAINT": server is in inherited maintenance mode (MAINT is also set)
+ - "RMAINT": server is in resolve maintenance mode (MAINT is also set)
+ - "CMAINT": server is in config maintenance mode (MAINT is also set)
+ - "**DRAIN**": server is in drain mode
+ - "FDRAIN": server is in forced drain mode (DRAIN is also set)
+ - "IDRAIN": server is in inherited drain mode (DRAIN is also set)
+
+.. js:attribute:: ServerEventAdmin.old_admin
+
+ Previous server admin state prior to the admin change.
+
+ Values are presented as in **new_admin**, but they should differ.
+ (Comparing old and new helps to find out the change(s))
+
+.. js:attribute:: ServerEventAdmin.requeued
+
+ Same as :js:attr:`ServerEventState.requeued` but when the requeue is due to
+ the server administrative state change.
+
+.. _queue_class:
+
+Queue class
+===========
+
+.. js:class:: Queue
+
+ This class provides a generic FIFO storage mechanism that may be shared
+ between multiple lua contexts to easily pass data between them, as stock
+ Lua doesn't provide easy methods for passing data between multiple coroutines.
+
+ inter-task example:
+
+.. code-block:: lua
+
+ -- script wide shared queue
+ local queue = core.queue()
+
+ -- master task
+ core.register_task(function()
+ -- send the date every second
+ while true do
+ queue:push(os.date("%c", core.now().sec))
+ core.sleep(1)
+ end
+ end)
+
+ -- worker task
+ core.register_task(function()
+ while true do
+ -- print the date sent by master
+ print(queue:pop_wait())
+ end
+ end)
+..
+
+ Of course, queue may also be used as a local storage mechanism.
+
+ Use :js:func:`core.queue` to get a new Queue object.
+
+.. js:function:: Queue.size(queue)
+
+ This function returns the number of items within the Queue.
+
+ :param class_queue queue: A :ref:`queue_class` to the current queue
+
+.. js:function:: Queue.push(queue, item)
+
+ This function pushes the item (may be of any type) to the queue.
+ Pushed item cannot be nil or invalid, or an error will be thrown.
+
+ :param class_queue queue: A :ref:`queue_class` to the current queue
+ :returns: boolean true for success and false for error
+
+.. js:function:: Queue.pop(queue)
+
+ This function immediately tries to pop an item from the queue.
+ It returns nil of no item is available at the time of the call.
+
+ :param class_queue queue: A :ref:`queue_class` to the current queue
+ :returns: the item at the top of the stack (any type) or nil if no items
+
+.. js:function:: Queue.pop_wait(queue)
+
+ **context**: task
+
+ This is an alternative to pop() that may be used within task contexts.
+
+ The call waits for data if no item is currently available. This may be
+ useful when used in a while loop to prevent cpu waste.
+
+ Note that this requires yielding, thus it is only available within contexts
+ that support yielding (mainly task context).
+
+ :param class_queue queue: A :ref:`queue_class` to the current queue
+ :returns: the item at the top of the stack (any type) or nil in case of error
+
+.. _concat_class:
+
+Concat class
+============
+
+.. js:class:: Concat
+
+ This class provides a fast way for string concatenation. The way using native
+ Lua concatenation like the code below is slow for some reasons.
+
+.. code-block:: lua
+
+ str = "string1"
+ str = str .. ", string2"
+ str = str .. ", string3"
+..
+
+ For each concatenation, Lua:
+ - allocates memory for the result,
+ - catenates the two string copying the strings in the new memory block,
+ - frees the old memory block containing the string which is no longer used.
+
+ This process does many memory move, allocation and free. In addition, the
+ memory is not really freed, it is just marked as unused and waits for the
+ garbage collector.
+
+ The Concat class provides an alternative way to concatenate strings. It uses
+ the internal Lua mechanism (it does not allocate memory), but it doesn't copy
+ the data more than once.
+
+ On my computer, the following loops spends 0.2s for the Concat method and
+ 18.5s for the pure Lua implementation. So, the Concat class is about 1000x
+ faster than the embedded solution.
+
+.. code-block:: lua
+
+ for j = 1, 100 do
+ c = core.concat()
+ for i = 1, 20000 do
+ c:add("#####")
+ end
+ end
+..
+
+.. code-block:: lua
+
+ for j = 1, 100 do
+ c = ""
+ for i = 1, 20000 do
+ c = c .. "#####"
+ end
+ end
+..
+
+.. js:function:: Concat.add(concat, string)
+
+ This function adds a string to the current concatenated string.
+
+ :param class_concat concat: A :ref:`concat_class` which contains the currently
+ built string.
+ :param string string: A new string to concatenate to the current built
+ string.
+
+.. js:function:: Concat.dump(concat)
+
+ This function returns the concatenated string.
+
+ :param class_concat concat: A :ref:`concat_class` which contains the currently
+ built string.
+ :returns: the concatenated string
+
+.. _fetches_class:
+
+Fetches class
+=============
+
+.. js:class:: Fetches
+
+ This class contains a lot of internal HAProxy sample fetches. See the
+ HAProxy "configuration.txt" documentation for more information.
+ (chapters 7.3.2 to 7.3.6)
+
+ .. warning::
+ some sample fetches are not available in some context. These limitations
+ are specified in this documentation when they're useful.
+
+ :see: :js:attr:`TXN.f`
+ :see: :js:attr:`TXN.sf`
+
+ Fetches are useful to:
+
+ * get system time,
+ * get environment variable,
+ * get random numbers,
+ * know backend status like the number of users in queue or the number of
+ connections established,
+ * get client information like ip source or destination,
+ * deal with stick tables,
+ * fetch established SSL information,
+ * fetch HTTP information like headers or method.
+
+.. code-block:: lua
+
+ function action(txn)
+ -- Get source IP
+ local clientip = txn.f:src()
+ end
+..
+
+.. _converters_class:
+
+Converters class
+================
+
+.. js:class:: Converters
+
+ This class contains a lot of internal HAProxy sample converters. See the
+ HAProxy documentation "configuration.txt" for more information about her
+ usage. Its the chapter 7.3.1.
+
+ :see: :js:attr:`TXN.c`
+ :see: :js:attr:`TXN.sc`
+
+ Converters provides stateful transformation. They are useful to:
+
+ * convert input to base64,
+ * apply hash on input string (djb2, crc32, sdbm, wt6),
+ * format date,
+ * json escape,
+ * extract preferred language comparing two lists,
+ * turn to lower or upper chars,
+ * deal with stick tables.
+
+.. _channel_class:
+
+Channel class
+=============
+
+.. js:class:: Channel
+
+ **context**: action, sample-fetch, convert, filter
+
+ HAProxy uses two buffers for the processing of the requests. The first one is
+ used with the request data (from the client to the server) and the second is
+ used for the response data (from the server to the client).
+
+ Each buffer contains two types of data. The first type is the incoming data
+ waiting for a processing. The second part is the outgoing data already
+ processed. Usually, the incoming data is processed, after it is tagged as
+ outgoing data, and finally it is sent. The following functions provides tools
+ for manipulating these data in a buffer.
+
+ The following diagram shows where the channel class function are applied.
+
+ .. image:: _static/channel.png
+
+ .. warning::
+ It is not possible to read from the response in request action, and it is
+ not possible to read from the request channel in response action.
+
+ .. warning::
+ It is forbidden to alter the Channels buffer from HTTP contexts. So only
+ :js:func:`Channel.input`, :js:func:`Channel.output`,
+ :js:func:`Channel.may_recv`, :js:func:`Channel.is_full` and
+ :js:func:`Channel.is_resp` can be called from a HTTP context.
+
+ All the functions provided by this class are available in the
+ **sample-fetches**, **actions** and **filters** contexts. For **filters**,
+ incoming data (offset and length) are relative to the filter. Some functions
+ may yield, but only for **actions**. Yield is not possible for
+ **sample-fetches**, **converters** and **filters**.
+
+.. js:function:: Channel.append(channel, string)
+
+ This function copies the string **string** at the end of incoming data of the
+ channel buffer. The function returns the copied length on success or -1 if
+ data cannot be copied.
+
+ Same that :js:func:`Channel.insert(channel, string, channel:input())`.
+
+ :param class_channel channel: The manipulated Channel.
+ :param string string: The data to copy at the end of incoming data.
+ :returns: an integer containing the amount of bytes copied or -1.
+
+.. js:function:: Channel.data(channel [, offset [, length]])
+
+ This function returns **length** bytes of incoming data from the channel
+ buffer, starting at the offset **offset**. The data are not removed from the
+ buffer.
+
+ By default, if no length is provided, all incoming data found, starting at the
+ given offset, are returned. If **length** is set to -1, the function tries to
+ retrieve a maximum of data and, if called by an action, it yields if
+ necessary. It also waits for more data if the requested length exceeds the
+ available amount of incoming data. Not providing an offset is the same as
+ setting it to 0. A positive offset is relative to the beginning of incoming
+ data of the channel buffer while negative offset is relative to the end.
+
+ If there is no incoming data and the channel can't receive more data, a 'nil'
+ value is returned.
+
+ :param class_channel channel: The manipulated Channel.
+ :param integer offset: *optional* The offset in incoming data to start to get
+ data. 0 by default. May be negative to be relative to the end of incoming
+ data.
+ :param integer length: *optional* The expected length of data to retrieve. All
+ incoming data by default. May be set to -1 to get a maximum of data.
+ :returns: a string containing the data found or nil.
+
+.. js:function:: Channel.forward(channel, length)
+
+ This function forwards **length** bytes of data from the channel buffer. If
+ the requested length exceeds the available amount of incoming data, and if
+ called by an action, the function yields, waiting for more data to forward. It
+ returns the amount of data forwarded.
+
+ :param class_channel channel: The manipulated Channel.
+ :param integer int: The amount of data to forward.
+
+.. js:function:: Channel.input(channel)
+
+ This function returns the length of incoming data in the channel buffer. When
+ called by a filter, this value is relative to the filter.
+
+ :param class_channel channel: The manipulated Channel.
+ :returns: an integer containing the amount of available bytes.
+
+.. js:function:: Channel.insert(channel, string [, offset])
+
+ This function copies the string **string** at the offset **offset** in
+ incoming data of the channel buffer. The function returns the copied length on
+ success or -1 if data cannot be copied.
+
+ By default, if no offset is provided, the string is copied in front of
+ incoming data. A positive offset is relative to the beginning of incoming data
+ of the channel buffer while negative offset is relative to their end.
+
+ :param class_channel channel: The manipulated Channel.
+ :param string string: The data to copy into incoming data.
+ :param integer offset: *optional* The offset in incoming data where to copy
+ data. 0 by default. May be negative to be relative to the end of incoming
+ data.
+ :returns: an integer containing the amount of bytes copied or -1.
+
+.. js:function:: Channel.is_full(channel)
+
+ This function returns true if the channel buffer is full.
+
+ :param class_channel channel: The manipulated Channel.
+ :returns: a boolean
+
+.. js:function:: Channel.is_resp(channel)
+
+ This function returns true if the channel is the response one.
+
+ :param class_channel channel: The manipulated Channel.
+ :returns: a boolean
+
+.. js:function:: Channel.line(channel [, offset [, length]])
+
+ This function parses **length** bytes of incoming data of the channel buffer,
+ starting at offset **offset**, and returns the first line found, including the
+ '\\n'. The data are not removed from the buffer. If no line is found, all
+ data are returned.
+
+ By default, if no length is provided, all incoming data, starting at the given
+ offset, are evaluated. If **length** is set to -1, the function tries to
+ retrieve a maximum of data and, if called by an action, yields if
+ necessary. It also waits for more data if the requested length exceeds the
+ available amount of incoming data. Not providing an offset is the same as
+ setting it to 0. A positive offset is relative to the beginning of incoming
+ data of the channel buffer while negative offset is relative to the end.
+
+ If there is no incoming data and the channel can't receive more data, a 'nil'
+ value is returned.
+
+ :param class_channel channel: The manipulated Channel.
+ :param integer offset: *optional* The offset in incoming data to start to
+ parse data. 0 by default. May be negative to be relative to the end of
+ incoming data.
+ :param integer length: *optional* The length of data to parse. All incoming
+ data by default. May be set to -1 to get a maximum of data.
+ :returns: a string containing the line found or nil.
+
+.. js:function:: Channel.may_recv(channel)
+
+ This function returns true if the channel may still receive data.
+
+ :param class_channel channel: The manipulated Channel.
+ :returns: a boolean
+
+.. js:function:: Channel.output(channel)
+
+ This function returns the length of outgoing data of the channel buffer. When
+ called by a filter, this value is relative to the filter.
+
+ :param class_channel channel: The manipulated Channel.
+ :returns: an integer containing the amount of available bytes.
+
+.. js:function:: Channel.prepend(channel, string)
+
+ This function copies the string **string** in front of incoming data of the
+ channel buffer. The function returns the copied length on success or -1 if
+ data cannot be copied.
+
+ Same that :js:func:`Channel.insert(channel, string, 0)`.
+
+ :param class_channel channel: The manipulated Channel.
+ :param string string: The data to copy in front of incoming data.
+ :returns: an integer containing the amount of bytes copied or -1.
+
+.. js:function:: Channel.remove(channel [, offset [, length]])
+
+ This function removes **length** bytes of incoming data of the channel buffer,
+ starting at offset **offset**. This function returns number of bytes removed
+ on success.
+
+ By default, if no length is provided, all incoming data, starting at the given
+ offset, are removed. Not providing an offset is the same as setting it
+ to 0. A positive offset is relative to the beginning of incoming data of the
+ channel buffer while negative offset is relative to the end.
+
+ :param class_channel channel: The manipulated Channel.
+ :param integer offset: *optional* The offset in incoming data where to start
+ to remove data. 0 by default. May be negative to be relative to the end of
+ incoming data.
+ :param integer length: *optional* The length of data to remove. All incoming
+ data by default.
+ :returns: an integer containing the amount of bytes removed.
+
+.. js:function:: Channel.send(channel, string)
+
+ This function requires immediate send of the string **string**. It means the
+ string is copied at the beginning of incoming data of the channel buffer and
+ immediately forwarded. Unless if the connection is close, and if called by an
+ action, this function yields to copy and forward all the string.
+
+ :param class_channel channel: The manipulated Channel.
+ :param string string: The data to send.
+ :returns: an integer containing the amount of bytes copied or -1.
+
+.. js:function:: Channel.set(channel, string [, offset [, length]])
+
+ This function replaces **length** bytes of incoming data of the channel
+ buffer, starting at offset **offset**, by the string **string**. The function
+ returns the copied length on success or -1 if data cannot be copied.
+
+ By default, if no length is provided, all incoming data, starting at the given
+ offset, are replaced. Not providing an offset is the same as setting it
+ to 0. A positive offset is relative to the beginning of incoming data of the
+ channel buffer while negative offset is relative to the end.
+
+ :param class_channel channel: The manipulated Channel.
+ :param string string: The data to copy into incoming data.
+ :param integer offset: *optional* The offset in incoming data where to start
+ the data replacement. 0 by default. May be negative to be relative to the
+ end of incoming data.
+ :param integer length: *optional* The length of data to replace. All incoming
+ data by default.
+ :returns: an integer containing the amount of bytes copied or -1.
+
+.. js:function:: Channel.dup(channel)
+
+ **DEPRECATED**
+
+ This function returns all incoming data found in the channel buffer. The data
+ are not removed from the buffer and can be reprocessed later.
+
+ If there is no incoming data and the channel can't receive more data, a 'nil'
+ value is returned.
+
+ :param class_channel channel: The manipulated Channel.
+ :returns: a string containing all data found or nil.
+
+ .. warning::
+ This function is deprecated. :js:func:`Channel.data()` must be used
+ instead.
+
+.. js:function:: Channel.get(channel)
+
+ **DEPRECATED**
+
+ This function returns all incoming data found in the channel buffer and remove
+ them from the buffer.
+
+ If there is no incoming data and the channel can't receive more data, a 'nil'
+ value is returned.
+
+ :param class_channel channel: The manipulated Channel.
+ :returns: a string containing all the data found or nil.
+
+ .. warning::
+ This function is deprecated. :js:func:`Channel.data()` must be used to
+ retrieve data followed by a call to :js:func:`Channel:remove()` to remove
+ data.
+
+ .. code-block:: lua
+
+ local data = chn:data()
+ chn:remove(0, data:len())
+
+ ..
+
+.. js:function:: Channel.getline(channel)
+
+ **DEPRECATED**
+
+ This function returns the first line found in incoming data of the channel
+ buffer, including the '\\n'. The returned data are removed from the buffer. If
+ no line is found, and if called by an action, this function yields to wait for
+ more data, except if the channel can't receive more data. In this case all
+ data are returned.
+
+ If there is no incoming data and the channel can't receive more data, a 'nil'
+ value is returned.
+
+ :param class_channel channel: The manipulated Channel.
+ :returns: a string containing the line found or nil.
+
+ .. warning::
+ This function is deprecated. :js:func:`Channel.line()` must be used to
+ retrieve a line followed by a call to :js:func:`Channel:remove()` to remove
+ data.
+
+ .. code-block:: lua
+
+ local line = chn:line(0, -1)
+ chn:remove(0, line:len())
+
+ ..
+
+.. js:function:: Channel.get_in_len(channel)
+
+ **DEPRECATED**
+
+ This function returns the length of the input part of the buffer. When called
+ by a filter, this value is relative to the filter.
+
+ :param class_channel channel: The manipulated Channel.
+ :returns: an integer containing the amount of available bytes.
+
+ .. warning::
+ This function is deprecated. :js:func:`Channel.input()` must be used
+ instead.
+
+.. js:function:: Channel.get_out_len(channel)
+
+ **DEPRECATED**
+
+ This function returns the length of the output part of the buffer. When called
+ by a filter, this value is relative to the filter.
+
+ :param class_channel channel: The manipulated Channel.
+ :returns: an integer containing the amount of available bytes.
+
+ .. warning::
+ This function is deprecated. :js:func:`Channel.output()` must be used
+ instead.
+
+.. _http_class:
+
+HTTP class
+==========
+
+.. js:class:: HTTP
+
+ This class contain all the HTTP manipulation functions.
+
+.. js:function:: HTTP.req_get_headers(http)
+
+ Returns a table containing all the request headers.
+
+ :param class_http http: The related http object.
+ :returns: table of headers.
+ :see: :js:func:`HTTP.res_get_headers`
+
+ This is the form of the returned table:
+
+.. code-block:: lua
+
+ HTTP:req_get_headers()['<header-name>'][<header-index>] = "<header-value>"
+
+ local hdr = HTTP:req_get_headers()
+ hdr["host"][0] = "www.test.com"
+ hdr["accept"][0] = "audio/basic q=1"
+ hdr["accept"][1] = "audio/*, q=0.2"
+ hdr["accept"][2] = "*/*, q=0.1"
+..
+
+.. js:function:: HTTP.res_get_headers(http)
+
+ Returns a table containing all the response headers.
+
+ :param class_http http: The related http object.
+ :returns: table of headers.
+ :see: :js:func:`HTTP.req_get_headers`
+
+ This is the form of the returned table:
+
+.. code-block:: lua
+
+ HTTP:res_get_headers()['<header-name>'][<header-index>] = "<header-value>"
+
+ local hdr = HTTP:req_get_headers()
+ hdr["host"][0] = "www.test.com"
+ hdr["accept"][0] = "audio/basic q=1"
+ hdr["accept"][1] = "audio/*, q=0.2"
+ hdr["accept"][2] = "*.*, q=0.1"
+..
+
+.. js:function:: HTTP.req_add_header(http, name, value)
+
+ Appends a HTTP header field in the request whose name is
+ specified in "name" and whose value is defined in "value".
+
+ :param class_http http: The related http object.
+ :param string name: The header name.
+ :param string value: The header value.
+ :see: :js:func:`HTTP.res_add_header`
+
+.. js:function:: HTTP.res_add_header(http, name, value)
+
+ Appends a HTTP header field in the response whose name is
+ specified in "name" and whose value is defined in "value".
+
+ :param class_http http: The related http object.
+ :param string name: The header name.
+ :param string value: The header value.
+ :see: :js:func:`HTTP.req_add_header`
+
+.. js:function:: HTTP.req_del_header(http, name)
+
+ Removes all HTTP header fields in the request whose name is
+ specified in "name".
+
+ :param class_http http: The related http object.
+ :param string name: The header name.
+ :see: :js:func:`HTTP.res_del_header`
+
+.. js:function:: HTTP.res_del_header(http, name)
+
+ Removes all HTTP header fields in the response whose name is
+ specified in "name".
+
+ :param class_http http: The related http object.
+ :param string name: The header name.
+ :see: :js:func:`HTTP.req_del_header`
+
+.. js:function:: HTTP.req_set_header(http, name, value)
+
+ This variable replace all occurrence of all header "name", by only
+ one containing the "value".
+
+ :param class_http http: The related http object.
+ :param string name: The header name.
+ :param string value: The header value.
+ :see: :js:func:`HTTP.res_set_header`
+
+ This function does the same work as the following code:
+
+.. code-block:: lua
+
+ function fcn(txn)
+ TXN.http:req_del_header("header")
+ TXN.http:req_add_header("header", "value")
+ end
+..
+
+.. js:function:: HTTP.res_set_header(http, name, value)
+
+ This function replaces all occurrence of all header "name", by only
+ one containing the "value".
+
+ :param class_http http: The related http object.
+ :param string name: The header name.
+ :param string value: The header value.
+ :see: :js:func:`HTTP.req_rep_header()`
+
+.. js:function:: HTTP.req_rep_header(http, name, regex, replace)
+
+ Matches the regular expression in all occurrences of header field "name"
+ according to "regex", and replaces them with the "replace" argument. The
+ replacement value can contain back references like \1, \2, ... This
+ function works with the request.
+
+ :param class_http http: The related http object.
+ :param string name: The header name.
+ :param string regex: The match regular expression.
+ :param string replace: The replacement value.
+ :see: :js:func:`HTTP.res_rep_header()`
+
+.. js:function:: HTTP.res_rep_header(http, name, regex, string)
+
+ Matches the regular expression in all occurrences of header field "name"
+ according to "regex", and replaces them with the "replace" argument. The
+ replacement value can contain back references like \1, \2, ... This
+ function works with the request.
+
+ :param class_http http: The related http object.
+ :param string name: The header name.
+ :param string regex: The match regular expression.
+ :param string replace: The replacement value.
+ :see: :js:func:`HTTP.req_rep_header()`
+
+.. js:function:: HTTP.req_set_method(http, method)
+
+ Rewrites the request method with the parameter "method".
+
+ :param class_http http: The related http object.
+ :param string method: The new method.
+
+.. js:function:: HTTP.req_set_path(http, path)
+
+ Rewrites the request path with the "path" parameter.
+
+ :param class_http http: The related http object.
+ :param string path: The new path.
+
+.. js:function:: HTTP.req_set_query(http, query)
+
+ Rewrites the request's query string which appears after the first question
+ mark ("?") with the parameter "query".
+
+ :param class_http http: The related http object.
+ :param string query: The new query.
+
+.. js:function:: HTTP.req_set_uri(http, uri)
+
+ Rewrites the request URI with the parameter "uri".
+
+ :param class_http http: The related http object.
+ :param string uri: The new uri.
+
+.. js:function:: HTTP.res_set_status(http, status [, reason])
+
+ Rewrites the response status code with the parameter "code".
+
+ If no custom reason is provided, it will be generated from the status.
+
+ :param class_http http: The related http object.
+ :param integer status: The new response status code.
+ :param string reason: The new response reason (optional).
+
+.. _httpclient_class:
+
+HTTPClient class
+================
+
+.. js:class:: HTTPClient
+
+ The httpclient class allows issue of outbound HTTP requests through a simple
+ API without the knowledge of HAProxy internals.
+
+.. js:function:: HTTPClient.get(httpclient, request)
+.. js:function:: HTTPClient.head(httpclient, request)
+.. js:function:: HTTPClient.put(httpclient, request)
+.. js:function:: HTTPClient.post(httpclient, request)
+.. js:function:: HTTPClient.delete(httpclient, request)
+
+ Send a HTTP request and wait for a response. GET, HEAD PUT, POST and DELETE
+ methods can be used.
+ The HTTPClient will send asynchronously the data and is able to send and
+ receive more than HAProxy bufsize.
+
+ The HTTPClient interface is not able to decompress responses, it is not
+ recommended to send an Accept-Encoding in the request so the response is
+ received uncompressed.
+
+ :param class httpclient: Is the manipulated HTTPClient.
+ :param table request: Is a table containing the parameters of the request
+ that will be send.
+ :param string request.url: Is a mandatory parameter for the request that
+ contains the URL.
+ :param string request.body: Is an optional parameter for the request that
+ contains the body to send.
+ :param table request.headers: Is an optional parameter for the request that
+ contains the headers to send.
+ :param string request.dst: Is an optional parameter for the destination in
+ haproxy address format.
+ :param integer request.timeout: Optional timeout parameter, set a
+ "timeout server" on the connections.
+ :returns: Lua table containing the response
+
+
+.. code-block:: lua
+
+ local httpclient = core.httpclient()
+ local response = httpclient:post{url="http://127.0.0.1", body=body, dst="unix@/var/run/http.sock"}
+
+..
+
+.. code-block:: lua
+
+ response = {
+ status = 400,
+ reason = "Bad request",
+ headers = {
+ ["content-type"] = { "text/html" },
+ ["cache-control"] = { "no-cache", "no-store" },
+ },
+ body = "<html><body><h1>invalid request<h1></body></html>",
+ }
+..
+
+
+.. _txn_class:
+
+TXN class
+=========
+
+.. js:class:: TXN
+
+ The txn class contain all the functions relative to the http or tcp
+ transaction (Note than a tcp stream is the same than a tcp transaction, but
+ a HTTP transaction is not the same than a tcp stream).
+
+ The usage of this class permits to retrieve data from the requests, alter it
+ and forward it.
+
+ All the functions provided by this class are available in the context
+ **sample-fetches**, **actions** and **filters**.
+
+.. js:attribute:: TXN.c
+
+ :returns: An :ref:`converters_class`.
+
+ This attribute contains a Converters class object.
+
+.. js:attribute:: TXN.sc
+
+ :returns: An :ref:`converters_class`.
+
+ This attribute contains a Converters class object. The functions of
+ this object returns always a string.
+
+.. js:attribute:: TXN.f
+
+ :returns: An :ref:`fetches_class`.
+
+ This attribute contains a Fetches class object.
+
+.. js:attribute:: TXN.sf
+
+ :returns: An :ref:`fetches_class`.
+
+ This attribute contains a Fetches class object. The functions of
+ this object returns always a string.
+
+.. js:attribute:: TXN.req
+
+ :returns: An :ref:`channel_class`.
+
+ This attribute contains a channel class object for the request buffer.
+
+.. js:attribute:: TXN.res
+
+ :returns: An :ref:`channel_class`.
+
+ This attribute contains a channel class object for the response buffer.
+
+.. js:attribute:: TXN.http
+
+ :returns: An :ref:`http_class`.
+
+ This attribute contains a HTTP class object. It is available only if the
+ proxy has the "mode http" enabled.
+
+.. js:attribute:: TXN.http_req
+
+ :returns: An :ref:`httpmessage_class`.
+
+ This attribute contains the request HTTPMessage class object. It is available
+ only if the proxy has the "mode http" enabled and only in the **filters**
+ context.
+
+.. js:attribute:: TXN.http_res
+
+ :returns: An :ref:`httpmessage_class`.
+
+ This attribute contains the response HTTPMessage class object. It is available
+ only if the proxy has the "mode http" enabled and only in the **filters**
+ context.
+
+.. js:function:: TXN.log(TXN, loglevel, msg)
+
+ This function sends a log. The log is sent, according with the HAProxy
+ configuration file, to the loggers relevant to the current context and/or
+ to stderr if it is allowed.
+
+ The exact behaviour depends on tune.lua.log.loggers and tune.lua.log.stderr.
+
+ :param class_txn txn: The class txn object containing the data.
+ :param integer loglevel: Is the log level associated with the message. It is
+ a number between 0 and 7.
+ :param string msg: The log content.
+ :see: :js:attr:`core.emerg`, :js:attr:`core.alert`, :js:attr:`core.crit`,
+ :js:attr:`core.err`, :js:attr:`core.warning`, :js:attr:`core.notice`,
+ :js:attr:`core.info`, :js:attr:`core.debug` (log level definitions)
+ :see: :js:func:`TXN.deflog`
+ :see: :js:func:`TXN.Debug`
+ :see: :js:func:`TXN.Info`
+ :see: :js:func:`TXN.Warning`
+ :see: :js:func:`TXN.Alert`
+
+.. js:function:: TXN.deflog(TXN, msg)
+
+ Sends a log line with the default loglevel for the proxy associated with the
+ transaction.
+
+ :param class_txn txn: The class txn object containing the data.
+ :param string msg: The log content.
+ :see: :js:func:`TXN.log`
+
+.. js:function:: TXN.Debug(txn, msg)
+
+ :param class_txn txn: The class txn object containing the data.
+ :param string msg: The log content.
+ :see: :js:func:`TXN.log`
+
+ Does the same job as:
+
+.. code-block:: lua
+
+ function Debug(txn, msg)
+ TXN.log(txn, core.debug, msg)
+ end
+..
+
+.. js:function:: TXN.Info(txn, msg)
+
+ :param class_txn txn: The class txn object containing the data.
+ :param string msg: The log content.
+ :see: :js:func:`TXN.log`
+
+ Does the same job as:
+
+.. code-block:: lua
+
+ function Info(txn, msg)
+ TXN.log(txn, core.info, msg)
+ end
+..
+
+.. js:function:: TXN.Warning(txn, msg)
+
+ :param class_txn txn: The class txn object containing the data.
+ :param string msg: The log content.
+ :see: :js:func:`TXN.log`
+
+ Does the same job as:
+
+.. code-block:: lua
+
+ function Warning(txn, msg)
+ TXN.log(txn, core.warning, msg)
+ end
+..
+
+.. js:function:: TXN.Alert(txn, msg)
+
+ :param class_txn txn: The class txn object containing the data.
+ :param string msg: The log content.
+ :see: :js:func:`TXN.log`
+
+ Does the same job as:
+
+.. code-block:: lua
+
+ function Alert(txn, msg)
+ TXN.log(txn, core.alert, msg)
+ end
+..
+
+.. js:function:: TXN.get_priv(txn)
+
+ Return Lua data stored in the current transaction (with the `TXN.set_priv()`)
+ function. If no data are stored, it returns a nil value.
+
+ :param class_txn txn: The class txn object containing the data.
+ :returns: the opaque data previously stored, or nil if nothing is
+ available.
+
+.. js:function:: TXN.set_priv(txn, data)
+
+ Store any data in the current HAProxy transaction. This action replaces the
+ old stored data.
+
+ :param class_txn txn: The class txn object containing the data.
+ :param opaque data: The data which is stored in the transaction.
+
+.. js:function:: TXN.set_var(TXN, var, value[, ifexist])
+
+ Converts a Lua type in a HAProxy type and store it in a variable <var>.
+
+ :param class_txn txn: The class txn object containing the data.
+ :param string var: The variable name according with the HAProxy variable
+ syntax.
+ :param type value: The value associated to the variable. The type can be
+ string or integer.
+ :param boolean ifexist: If this parameter is set to true the variable will
+ only be set if it was defined elsewhere (i.e. used within the configuration).
+ For global variables (using the "proc" scope), they will only be updated and
+ never created. It is highly recommended to always set this to true.
+
+.. js:function:: TXN.unset_var(TXN, var)
+
+ Unset the variable <var>.
+
+ :param class_txn txn: The class txn object containing the data.
+ :param string var: The variable name according with the HAProxy variable
+ syntax.
+
+.. js:function:: TXN.get_var(TXN, var)
+
+ Returns data stored in the variable <var> converter in Lua type.
+
+ :param class_txn txn: The class txn object containing the data.
+ :param string var: The variable name according with the HAProxy variable
+ syntax.
+
+.. js:function:: TXN.reply([reply])
+
+ Return a new reply object
+
+ :param table reply: A table containing info to initialize the reply fields.
+ :returns: A :ref:`reply_class` object.
+
+ The table used to initialized the reply object may contain following entries :
+
+ * status : The reply status code. the code 200 is used by default.
+ * reason : The reply reason. The reason corresponding to the status code is
+ used by default.
+ * headers : A list of headers, indexed by header name. Empty by default. For
+ a given name, multiple values are possible, stored in an ordered list.
+ * body : The reply body, empty by default.
+
+.. code-block:: lua
+
+ local reply = txn:reply{
+ status = 400,
+ reason = "Bad request",
+ headers = {
+ ["content-type"] = { "text/html" },
+ ["cache-control"] = {"no-cache", "no-store" }
+ },
+ body = "<html><body><h1>invalid request<h1></body></html>"
+ }
+..
+ :see: :js:class:`Reply`
+
+.. js:function:: TXN.done(txn[, reply])
+
+ This function terminates processing of the transaction and the associated
+ session and optionally reply to the client for HTTP sessions.
+
+ :param class_txn txn: The class txn object containing the data.
+ :param class_reply reply: The class reply object to return to the client.
+
+ This functions can be used when a critical error is detected or to terminate
+ processing after some data have been returned to the client (eg: a redirect).
+ To do so, a reply may be provided. This object is optional and may contain a
+ status code, a reason, a header list and a body. All these fields are
+ optional. When not provided, the default values are used. By default, with an
+ empty reply object, an empty HTTP 200 response is returned to the client. If
+ no reply object is provided, the transaction is terminated without any
+ reply. If a reply object is provided, it must not exceed the buffer size once
+ converted into the internal HTTP representation. Because for now there is no
+ easy way to be sure it fits, it is probably better to keep it reasonably
+ small.
+
+ The reply object may be fully created in lua or the class Reply may be used to
+ create it.
+
+.. code-block:: lua
+
+ local reply = txn:reply()
+ reply:set_status(400, "Bad request")
+ reply:add_header("content-type", "text/html")
+ reply:add_header("cache-control", "no-cache")
+ reply:add_header("cache-control", "no-store")
+ reply:set_body("<html><body><h1>invalid request<h1></body></html>")
+ txn:done(reply)
+..
+
+.. code-block:: lua
+
+ txn:done{
+ status = 400,
+ reason = "Bad request",
+ headers = {
+ ["content-type"] = { "text/html" },
+ ["cache-control"] = { "no-cache", "no-store" },
+ },
+ body = "<html><body><h1>invalid request<h1></body></html>"
+ }
+..
+
+ .. warning::
+ It does not make sense to call this function from sample-fetches. In this
+ case the behavior is the same than core.done(): it finishes the Lua
+ execution. The transaction is really aborted only from an action registered
+ function.
+
+ :see: :js:func:`TXN.reply`, :js:class:`Reply`
+
+.. js:function:: TXN.set_loglevel(txn, loglevel)
+
+ Is used to change the log level of the current request. The "loglevel" must
+ be an integer between 0 and 7.
+
+ :param class_txn txn: The class txn object containing the data.
+ :param integer loglevel: The required log level. This variable can be one of
+ :see: :js:attr:`core.emerg`, :js:attr:`core.alert`, :js:attr:`core.crit`,
+ :js:attr:`core.err`, :js:attr:`core.warning`, :js:attr:`core.notice`,
+ :js:attr:`core.info`, :js:attr:`core.debug` (log level definitions)
+
+.. js:function:: TXN.set_tos(txn, tos)
+
+ Is used to set the TOS or DSCP field value of packets sent to the client to
+ the value passed in "tos" on platforms which support this.
+
+ :param class_txn txn: The class txn object containing the data.
+ :param integer tos: The new TOS os DSCP.
+
+.. js:function:: TXN.set_mark(txn, mark)
+
+ Is used to set the Netfilter MARK on all packets sent to the client to the
+ value passed in "mark" on platforms which support it.
+
+ :param class_txn txn: The class txn object containing the data.
+ :param integer mark: The mark value.
+
+.. js:function:: TXN.set_priority_class(txn, prio)
+
+ This function adjusts the priority class of the transaction. The value should
+ be within the range -2047..2047. Values outside this range will be
+ truncated.
+
+ See the HAProxy configuration.txt file keyword "http-request" action
+ "set-priority-class" for details.
+
+.. js:function:: TXN.set_priority_offset(txn, prio)
+
+ This function adjusts the priority offset of the transaction. The value
+ should be within the range -524287..524287. Values outside this range will be
+ truncated.
+
+ See the HAProxy configuration.txt file keyword "http-request" action
+ "set-priority-offset" for details.
+
+.. _reply_class:
+
+Reply class
+============
+
+.. js:class:: Reply
+
+ **context**: action
+
+ This class represents a HTTP response message. It provides some methods to
+ enrich it. Once converted into the internal HTTP representation, the response
+ message must not exceed the buffer size. Because for now there is no
+ easy way to be sure it fits, it is probably better to keep it reasonably
+ small.
+
+ See tune.bufsize in the configuration manual for details.
+
+.. code-block:: lua
+
+ local reply = txn:reply({status = 400}) -- default HTTP 400 reason-phase used
+ reply:add_header("content-type", "text/html")
+ reply:add_header("cache-control", "no-cache")
+ reply:add_header("cache-control", "no-store")
+ reply:set_body("<html><body><h1>invalid request<h1></body></html>")
+..
+
+ :see: :js:func:`TXN.reply`
+
+.. js:attribute:: Reply.status
+
+ The reply status code. By default, the status code is set to 200.
+
+ :returns: integer
+
+.. js:attribute:: Reply.reason
+
+ The reason string describing the status code.
+
+ :returns: string
+
+.. js:attribute:: Reply.headers
+
+ A table indexing all reply headers by name. To each name is associated an
+ ordered list of values.
+
+ :returns: Lua table
+
+.. code-block:: lua
+
+ {
+ ["content-type"] = { "text/html" },
+ ["cache-control"] = {"no-cache", "no-store" },
+ x_header_name = { "value1", "value2", ... }
+ ...
+ }
+..
+
+.. js:attribute:: Reply.body
+
+ The reply payload.
+
+ :returns: string
+
+.. js:function:: Reply.set_status(REPLY, status[, reason])
+
+ Set the reply status code and optionally the reason-phrase. If the reason is
+ not provided, the default reason corresponding to the status code is used.
+
+ :param class_reply reply: The related Reply object.
+ :param integer status: The reply status code.
+ :param string reason: The reply status reason (optional).
+
+.. js:function:: Reply.add_header(REPLY, name, value)
+
+ Add a header to the reply object. If the header does not already exist, a new
+ entry is created with its name as index and a one-element list containing its
+ value as value. Otherwise, the header value is appended to the ordered list of
+ values associated to the header name.
+
+ :param class_reply reply: The related Reply object.
+ :param string name: The header field name.
+ :param string value: The header field value.
+
+.. js:function:: Reply.del_header(REPLY, name)
+
+ Remove all occurrences of a header name from the reply object.
+
+ :param class_reply reply: The related Reply object.
+ :param string name: The header field name.
+
+.. js:function:: Reply.set_body(REPLY, body)
+
+ Set the reply payload.
+
+ :param class_reply reply: The related Reply object.
+ :param string body: The reply payload.
+
+.. _socket_class:
+
+Socket class
+============
+
+.. js:class:: Socket
+
+ This class must be compatible with the Lua Socket class. Only the 'client'
+ functions are available. See the Lua Socket documentation:
+
+ `http://w3.impa.br/~diego/software/luasocket/tcp.html
+ <http://w3.impa.br/~diego/software/luasocket/tcp.html>`_
+
+.. js:function:: Socket.close(socket)
+
+ Closes a TCP object. The internal socket used by the object is closed and the
+ local address to which the object was bound is made available to other
+ applications. No further operations (except for further calls to the close
+ method) are allowed on a closed Socket.
+
+ :param class_socket socket: Is the manipulated Socket.
+
+ Note: It is important to close all used sockets once they are not needed,
+ since, in many systems, each socket uses a file descriptor, which are limited
+ system resources. Garbage-collected objects are automatically closed before
+ destruction, though.
+
+.. js:function:: Socket.connect(socket, address[, port])
+
+ Attempts to connect a socket object to a remote host.
+
+
+ In case of error, the method returns nil followed by a string describing the
+ error. In case of success, the method returns 1.
+
+ :param class_socket socket: Is the manipulated Socket.
+ :param string address: can be an IP address or a host name. See below for more
+ information.
+ :param integer port: must be an integer number in the range [1..64K].
+ :returns: 1 or nil.
+
+ An address field extension permits to use the connect() function to connect to
+ other stream than TCP. The syntax containing a simpleipv4 or ipv6 address is
+ the basically expected format. This format requires the port.
+
+ Other format accepted are a socket path like "/socket/path", it permits to
+ connect to a socket. Abstract namespaces are supported with the prefix
+ "abns@", and finally a file descriptor can be passed with the prefix "fd@".
+ The prefix "ipv4@", "ipv6@" and "unix@" are also supported. The port can be
+ passed int the string. The syntax "127.0.0.1:1234" is valid. In this case, the
+ parameter *port* must not be set.
+
+.. js:function:: Socket.connect_ssl(socket, address, port)
+
+ Same behavior than the function socket:connect, but uses SSL.
+
+ :param class_socket socket: Is the manipulated Socket.
+ :returns: 1 or nil.
+
+.. js:function:: Socket.getpeername(socket)
+
+ Returns information about the remote side of a connected client object.
+
+ Returns a string with the IP address of the peer, followed by the port number
+ that peer is using for the connection. In case of error, the method returns
+ nil.
+
+ :param class_socket socket: Is the manipulated Socket.
+ :returns: a string containing the server information.
+
+.. js:function:: Socket.getsockname(socket)
+
+ Returns the local address information associated to the object.
+
+ The method returns a string with local IP address and a number with the port.
+ In case of error, the method returns nil.
+
+ :param class_socket socket: Is the manipulated Socket.
+ :returns: a string containing the client information.
+
+.. js:function:: Socket.receive(socket, [pattern [, prefix]])
+
+ Reads data from a client object, according to the specified read pattern.
+ Patterns follow the Lua file I/O format, and the difference in performance
+ between all patterns is negligible.
+
+ :param class_socket socket: Is the manipulated Socket.
+ :param string|integer pattern: Describe what is required (see below).
+ :param string prefix: A string which will be prefix the returned data.
+ :returns: a string containing the required data or nil.
+
+ Pattern can be any of the following:
+
+ * **`*a`**: reads from the socket until the connection is closed. No
+ end-of-line translation is performed;
+
+ * **`*l`**: reads a line of text from the Socket. The line is terminated by a
+ LF character (ASCII 10), optionally preceded by a CR character
+ (ASCII 13). The CR and LF characters are not included in the
+ returned line. In fact, all CR characters are ignored by the
+ pattern. This is the default pattern.
+
+ * **number**: causes the method to read a specified number of bytes from the
+ Socket. Prefix is an optional string to be concatenated to the
+ beginning of any received data before return.
+
+ * **empty**: If the pattern is left empty, the default option is `*l`.
+
+ If successful, the method returns the received pattern. In case of error, the
+ method returns nil followed by an error message which can be the string
+ 'closed' in case the connection was closed before the transmission was
+ completed or the string 'timeout' in case there was a timeout during the
+ operation. Also, after the error message, the function returns the partial
+ result of the transmission.
+
+ Important note: This function was changed severely. It used to support
+ multiple patterns (but I have never seen this feature used) and now it
+ doesn't anymore. Partial results used to be returned in the same way as
+ successful results. This last feature violated the idea that all functions
+ should return nil on error. Thus it was changed too.
+
+.. js:function:: Socket.send(socket, data [, start [, end ]])
+
+ Sends data through client object.
+
+ :param class_socket socket: Is the manipulated Socket.
+ :param string data: The data that will be sent.
+ :param integer start: The start position in the buffer of the data which will
+ be sent.
+ :param integer end: The end position in the buffer of the data which will
+ be sent.
+ :returns: see below.
+
+ Data is the string to be sent. The optional arguments i and j work exactly
+ like the standard string.sub Lua function to allow the selection of a
+ substring to be sent.
+
+ If successful, the method returns the index of the last byte within [start,
+ end] that has been sent. Notice that, if start is 1 or absent, this is
+ effectively the total number of bytes sent. In case of error, the method
+ returns nil, followed by an error message, followed by the index of the last
+ byte within [start, end] that has been sent. You might want to try again from
+ the byte following that. The error message can be 'closed' in case the
+ connection was closed before the transmission was completed or the string
+ 'timeout' in case there was a timeout during the operation.
+
+ Note: Output is not buffered. For small strings, it is always better to
+ concatenate them in Lua (with the '..' operator) and send the result in one
+ call instead of calling the method several times.
+
+.. js:function:: Socket.setoption(socket, option [, value])
+
+ Just implemented for compatibility, this cal does nothing.
+
+.. js:function:: Socket.settimeout(socket, value [, mode])
+
+ Changes the timeout values for the object. All I/O operations are blocking.
+ That is, any call to the methods send, receive, and accept will block
+ indefinitely, until the operation completes. The settimeout method defines a
+ limit on the amount of time the I/O methods can block. When a timeout time
+ has elapsed, the affected methods give up and fail with an error code.
+
+ The amount of time to wait is specified as the value parameter, in seconds.
+
+ The timeout modes are not implemented, the only settable timeout is the
+ inactivity time waiting for complete the internal buffer send or waiting for
+ receive data.
+
+ :param class_socket socket: Is the manipulated Socket.
+ :param float value: The timeout value. Use floating point to specify
+ milliseconds.
+
+.. _regex_class:
+
+Regex class
+===========
+
+.. js:class:: Regex
+
+ This class allows the usage of HAProxy regexes because classic lua doesn't
+ provides regexes. This class inherits the HAProxy compilation options, so the
+ regexes can be libc regex, pcre regex or pcre JIT regex.
+
+ The expression matching number is limited to 20 per regex. The only available
+ option is case sensitive.
+
+ Because regexes compilation is a heavy process, it is better to define all
+ your regexes in the **body context** and use it during the runtime.
+
+.. code-block:: lua
+
+ -- Create the regex
+ st, regex = Regex.new("needle (..) (...)", true);
+
+ -- Check compilation errors
+ if st == false then
+ print "error: " .. regex
+ end
+
+ -- Match the regexes
+ print(regex:exec("Looking for a needle in the haystack")) -- true
+ print(regex:exec("Lokking for a cat in the haystack")) -- false
+
+ -- Extract words
+ st, list = regex:match("Looking for a needle in the haystack")
+ print(st) -- true
+ print(list[1]) -- needle in the
+ print(list[2]) -- in
+ print(list[3]) -- the
+
+.. js:function:: Regex.new(regex, case_sensitive)
+
+ Create and compile a regex.
+
+ :param string regex: The regular expression according with the libc or pcre
+ standard
+ :param boolean case_sensitive: Match is case sensitive or not.
+ :returns: boolean status and :ref:`regex_class` or string containing fail
+ reason.
+
+.. js:function:: Regex.exec(regex, str)
+
+ Execute the regex.
+
+ :param class_regex regex: A :ref:`regex_class` object.
+ :param string str: The input string will be compared with the compiled regex.
+ :returns: a boolean status according with the match result.
+
+.. js:function:: Regex.match(regex, str)
+
+ Execute the regex and return matched expressions.
+
+ :param class_map map: A :ref:`regex_class` object.
+ :param string str: The input string will be compared with the compiled regex.
+ :returns: a boolean status according with the match result, and
+ a table containing all the string matched in order of declaration.
+
+.. _map_class:
+
+Map class
+=========
+
+.. js:class:: Map
+
+ This class permits to do some lookups in HAProxy maps. The declared maps can
+ be modified during the runtime through the HAProxy management socket.
+
+.. code-block:: lua
+
+ default = "usa"
+
+ -- Create and load map
+ geo = Map.new("geo.map", Map._ip);
+
+ -- Create new fetch that returns the user country
+ core.register_fetches("country", function(txn)
+ local src;
+ local loc;
+
+ src = txn.f:fhdr("x-forwarded-for");
+ if (src == nil) then
+ src = txn.f:src()
+ if (src == nil) then
+ return default;
+ end
+ end
+
+ -- Perform lookup
+ loc = geo:lookup(src);
+
+ if (loc == nil) then
+ return default;
+ end
+
+ return loc;
+ end);
+
+.. js:attribute:: Map._int
+
+ See the HAProxy configuration.txt file, chapter "Using ACLs and fetching
+ samples" and subchapter "ACL basics" to understand this pattern matching
+ method.
+
+ Note that :js:attr:`Map.int` is also available for compatibility.
+
+.. js:attribute:: Map._ip
+
+ See the HAProxy configuration.txt file, chapter "Using ACLs and fetching
+ samples" and subchapter "ACL basics" to understand this pattern matching
+ method.
+
+ Note that :js:attr:`Map.ip` is also available for compatibility.
+
+.. js:attribute:: Map._str
+
+ See the HAProxy configuration.txt file, chapter "Using ACLs and fetching
+ samples" and subchapter "ACL basics" to understand this pattern matching
+ method.
+
+ Note that :js:attr:`Map.str` is also available for compatibility.
+
+.. js:attribute:: Map._beg
+
+ See the HAProxy configuration.txt file, chapter "Using ACLs and fetching
+ samples" and subchapter "ACL basics" to understand this pattern matching
+ method.
+
+ Note that :js:attr:`Map.beg` is also available for compatibility.
+
+.. js:attribute:: Map._sub
+
+ See the HAProxy configuration.txt file, chapter "Using ACLs and fetching
+ samples" and subchapter "ACL basics" to understand this pattern matching
+ method.
+
+ Note that :js:attr:`Map.sub` is also available for compatibility.
+
+.. js:attribute:: Map._dir
+
+ See the HAProxy configuration.txt file, chapter "Using ACLs and fetching
+ samples" and subchapter "ACL basics" to understand this pattern matching
+ method.
+
+ Note that :js:attr:`Map.dir` is also available for compatibility.
+
+.. js:attribute:: Map._dom
+
+ See the HAProxy configuration.txt file, chapter "Using ACLs and fetching
+ samples" and subchapter "ACL basics" to understand this pattern matching
+ method.
+
+ Note that :js:attr:`Map.dom` is also available for compatibility.
+
+.. js:attribute:: Map._end
+
+ See the HAProxy configuration.txt file, chapter "Using ACLs and fetching
+ samples" and subchapter "ACL basics" to understand this pattern matching
+ method.
+
+.. js:attribute:: Map._reg
+
+ See the HAProxy configuration.txt file, chapter "Using ACLs and fetching
+ samples" and subchapter "ACL basics" to understand this pattern matching
+ method.
+
+ Note that :js:attr:`Map.reg` is also available for compatibility.
+
+
+.. js:function:: Map.new(file, method)
+
+ Creates and load a map.
+
+ :param string file: Is the file containing the map.
+ :param integer method: Is the map pattern matching method. See the attributes
+ of the Map class.
+ :returns: a class Map object.
+ :see: The Map attributes: :js:attr:`Map._int`, :js:attr:`Map._ip`,
+ :js:attr:`Map._str`, :js:attr:`Map._beg`, :js:attr:`Map._sub`,
+ :js:attr:`Map._dir`, :js:attr:`Map._dom`, :js:attr:`Map._end` and
+ :js:attr:`Map._reg`.
+
+.. js:function:: Map.lookup(map, str)
+
+ Perform a lookup in a map.
+
+ :param class_map map: Is the class Map object.
+ :param string str: Is the string used as key.
+ :returns: a string containing the result or nil if no match.
+
+.. js:function:: Map.slookup(map, str)
+
+ Perform a lookup in a map.
+
+ :param class_map map: Is the class Map object.
+ :param string str: Is the string used as key.
+ :returns: a string containing the result or empty string if no match.
+
+.. _applethttp_class:
+
+AppletHTTP class
+================
+
+.. js:class:: AppletHTTP
+
+ This class is used with applets that requires the 'http' mode. The http applet
+ can be registered with the *core.register_service()* function. They are used
+ for processing an http request like a server in back of HAProxy.
+
+ This is an hello world sample code:
+
+.. code-block:: lua
+
+ core.register_service("hello-world", "http", function(applet)
+ local response = "Hello World !"
+ applet:set_status(200)
+ applet:add_header("content-length", string.len(response))
+ applet:add_header("content-type", "text/plain")
+ applet:start_response()
+ applet:send(response)
+ end)
+
+.. js:attribute:: AppletHTTP.c
+
+ :returns: A :ref:`converters_class`
+
+ This attribute contains a Converters class object.
+
+.. js:attribute:: AppletHTTP.sc
+
+ :returns: A :ref:`converters_class`
+
+ This attribute contains a Converters class object. The
+ functions of this object always return a string.
+
+.. js:attribute:: AppletHTTP.f
+
+ :returns: A :ref:`fetches_class`
+
+ This attribute contains a Fetches class object. Note that the
+ applet execution place cannot access to a valid HAProxy core HTTP
+ transaction, so some sample fetches related to the HTTP dependent
+ values (hdr, path, ...) are not available.
+
+.. js:attribute:: AppletHTTP.sf
+
+ :returns: A :ref:`fetches_class`
+
+ This attribute contains a Fetches class object. The functions of
+ this object always return a string. Note that the applet
+ execution place cannot access to a valid HAProxy core HTTP
+ transaction, so some sample fetches related to the HTTP dependent
+ values (hdr, path, ...) are not available.
+
+.. js:attribute:: AppletHTTP.method
+
+ :returns: string
+
+ The attribute method returns a string containing the HTTP
+ method.
+
+.. js:attribute:: AppletHTTP.version
+
+ :returns: string
+
+ The attribute version, returns a string containing the HTTP
+ request version.
+
+.. js:attribute:: AppletHTTP.path
+
+ :returns: string
+
+ The attribute path returns a string containing the HTTP
+ request path.
+
+.. js:attribute:: AppletHTTP.qs
+
+ :returns: string
+
+ The attribute qs returns a string containing the HTTP
+ request query string.
+
+.. js:attribute:: AppletHTTP.length
+
+ :returns: integer
+
+ The attribute length returns an integer containing the HTTP
+ body length.
+
+.. js:attribute:: AppletHTTP.headers
+
+ :returns: table
+
+ The attribute headers returns a table containing the HTTP
+ headers. The header names are always in lower case. As the header name can be
+ encountered more than once in each request, the value is indexed with 0 as
+ first index value. The table has this form:
+
+.. code-block:: lua
+
+ AppletHTTP.headers['<header-name>'][<header-index>] = "<header-value>"
+
+ AppletHTTP.headers["host"][0] = "www.test.com"
+ AppletHTTP.headers["accept"][0] = "audio/basic q=1"
+ AppletHTTP.headers["accept"][1] = "audio/*, q=0.2"
+ AppletHTTP.headers["accept"][2] = "*/*, q=0.1"
+..
+
+.. js:function:: AppletHTTP.set_status(applet, code [, reason])
+
+ This function sets the HTTP status code for the response. The allowed code are
+ from 100 to 599.
+
+ :param class_AppletHTTP applet: An :ref:`applethttp_class`
+ :param integer code: the status code returned to the client.
+ :param string reason: the status reason returned to the client (optional).
+
+.. js:function:: AppletHTTP.add_header(applet, name, value)
+
+ This function adds a header in the response. Duplicated headers are not
+ collapsed. The special header *content-length* is used to determinate the
+ response length. If it does not exist, a *transfer-encoding: chunked* is set,
+ and all the write from the function *AppletHTTP:send()* become a chunk.
+
+ :param class_AppletHTTP applet: An :ref:`applethttp_class`
+ :param string name: the header name
+ :param string value: the header value
+
+.. js:function:: AppletHTTP.start_response(applet)
+
+ This function indicates to the HTTP engine that it can process and send the
+ response headers. After this called we cannot add headers to the response; We
+ cannot use the *AppletHTTP:send()* function if the
+ *AppletHTTP:start_response()* is not called.
+
+ :param class_AppletHTTP applet: An :ref:`applethttp_class`
+
+.. js:function:: AppletHTTP.getline(applet)
+
+ This function returns a string containing one line from the http body. If the
+ data returned doesn't contains a final '\\n' its assumed than its the last
+ available data before the end of stream.
+
+ :param class_AppletHTTP applet: An :ref:`applethttp_class`
+ :returns: a string. The string can be empty if we reach the end of the stream.
+
+.. js:function:: AppletHTTP.receive(applet, [size])
+
+ Reads data from the HTTP body, according to the specified read *size*. If the
+ *size* is missing, the function tries to read all the content of the stream
+ until the end. If the *size* is bigger than the http body, it returns the
+ amount of data available.
+
+ :param class_AppletHTTP applet: An :ref:`applethttp_class`
+ :param integer size: the required read size.
+ :returns: always return a string,the string can be empty is the connection is
+ closed.
+
+.. js:function:: AppletHTTP.send(applet, msg)
+
+ Send the message *msg* on the http request body.
+
+ :param class_AppletHTTP applet: An :ref:`applethttp_class`
+ :param string msg: the message to send.
+
+.. js:function:: AppletHTTP.get_priv(applet)
+
+ Return Lua data stored in the current transaction. If no data are stored,
+ it returns a nil value.
+
+ :param class_AppletHTTP applet: An :ref:`applethttp_class`
+ :returns: the opaque data previously stored, or nil if nothing is
+ available.
+ :see: :js:func:`AppletHTTP.set_priv`
+
+.. js:function:: AppletHTTP.set_priv(applet, data)
+
+ Store any data in the current HAProxy transaction. This action replaces the
+ old stored data.
+
+ :param class_AppletHTTP applet: An :ref:`applethttp_class`
+ :param opaque data: The data which is stored in the transaction.
+ :see: :js:func:`AppletHTTP.get_priv`
+
+.. js:function:: AppletHTTP.set_var(applet, var, value[, ifexist])
+
+ Converts a Lua type in a HAProxy type and store it in a variable <var>.
+
+ :param class_AppletHTTP applet: An :ref:`applethttp_class`
+ :param string var: The variable name according with the HAProxy variable
+ syntax.
+ :param type value: The value associated to the variable. The type ca be string
+ or integer.
+ :param boolean ifexist: If this parameter is set to true the variable will
+ only be set if it was defined elsewhere (i.e. used within the configuration).
+ For global variables (using the "proc" scope), they will only be updated and
+ never created. It is highly recommended to always set this to true.
+
+ :see: :js:func:`AppletHTTP.unset_var`
+ :see: :js:func:`AppletHTTP.get_var`
+
+.. js:function:: AppletHTTP.unset_var(applet, var)
+
+ Unset the variable <var>.
+
+ :param class_AppletHTTP applet: An :ref:`applethttp_class`
+ :param string var: The variable name according with the HAProxy variable
+ syntax.
+ :see: :js:func:`AppletHTTP.set_var`
+ :see: :js:func:`AppletHTTP.get_var`
+
+.. js:function:: AppletHTTP.get_var(applet, var)
+
+ Returns data stored in the variable <var> converter in Lua type.
+
+ :param class_AppletHTTP applet: An :ref:`applethttp_class`
+ :param string var: The variable name according with the HAProxy variable
+ syntax.
+ :see: :js:func:`AppletHTTP.set_var`
+ :see: :js:func:`AppletHTTP.unset_var`
+
+.. _applettcp_class:
+
+AppletTCP class
+===============
+
+.. js:class:: AppletTCP
+
+ This class is used with applets that requires the 'tcp' mode. The tcp applet
+ can be registered with the *core.register_service()* function. They are used
+ for processing a tcp stream like a server in back of HAProxy.
+
+.. js:attribute:: AppletTCP.c
+
+ :returns: A :ref:`converters_class`
+
+ This attribute contains a Converters class object.
+
+.. js:attribute:: AppletTCP.sc
+
+ :returns: A :ref:`converters_class`
+
+ This attribute contains a Converters class object. The
+ functions of this object always return a string.
+
+.. js:attribute:: AppletTCP.f
+
+ :returns: A :ref:`fetches_class`
+
+ This attribute contains a Fetches class object.
+
+.. js:attribute:: AppletTCP.sf
+
+ :returns: A :ref:`fetches_class`
+
+ This attribute contains a Fetches class object.
+
+.. js:function:: AppletTCP.getline(applet)
+
+ This function returns a string containing one line from the stream. If the
+ data returned doesn't contains a final '\\n' its assumed than its the last
+ available data before the end of stream.
+
+ :param class_AppletTCP applet: An :ref:`applettcp_class`
+ :returns: a string. The string can be empty if we reach the end of the stream.
+
+.. js:function:: AppletTCP.receive(applet, [size])
+
+ Reads data from the TCP stream, according to the specified read *size*. If the
+ *size* is missing, the function tries to read all the content of the stream
+ until the end.
+
+ :param class_AppletTCP applet: An :ref:`applettcp_class`
+ :param integer size: the required read size.
+ :returns: always return a string, the string can be empty if the connection is
+ closed.
+
+.. js:function:: AppletTCP.send(appletmsg)
+
+ Send the message on the stream.
+
+ :param class_AppletTCP applet: An :ref:`applettcp_class`
+ :param string msg: the message to send.
+
+.. js:function:: AppletTCP.get_priv(applet)
+
+ Return Lua data stored in the current transaction. If no data are stored,
+ it returns a nil value.
+
+ :param class_AppletTCP applet: An :ref:`applettcp_class`
+ :returns: the opaque data previously stored, or nil if nothing is
+ available.
+ :see: :js:func:`AppletTCP.set_priv`
+
+.. js:function:: AppletTCP.set_priv(applet, data)
+
+ Store any data in the current HAProxy transaction. This action replaces the
+ old stored data.
+
+ :param class_AppletTCP applet: An :ref:`applettcp_class`
+ :param opaque data: The data which is stored in the transaction.
+ :see: :js:func:`AppletTCP.get_priv`
+
+.. js:function:: AppletTCP.set_var(applet, var, value[, ifexist])
+
+ Converts a Lua type in a HAProxy type and stores it in a variable <var>.
+
+ :param class_AppletTCP applet: An :ref:`applettcp_class`
+ :param string var: The variable name according with the HAProxy variable
+ syntax.
+ :param type value: The value associated to the variable. The type can be
+ string or integer.
+ :param boolean ifexist: If this parameter is set to true the variable will
+ only be set if it was defined elsewhere (i.e. used within the configuration).
+ For global variables (using the "proc" scope), they will only be updated and
+ never created. It is highly recommended to always set this to true.
+
+ :see: :js:func:`AppletTCP.unset_var`
+ :see: :js:func:`AppletTCP.get_var`
+
+.. js:function:: AppletTCP.unset_var(applet, var)
+
+ Unsets the variable <var>.
+
+ :param class_AppletTCP applet: An :ref:`applettcp_class`
+ :param string var: The variable name according with the HAProxy variable
+ syntax.
+ :see: :js:func:`AppletTCP.unset_var`
+ :see: :js:func:`AppletTCP.set_var`
+
+.. js:function:: AppletTCP.get_var(applet, var)
+
+ Returns data stored in the variable <var> converter in Lua type.
+
+ :param class_AppletTCP applet: An :ref:`applettcp_class`
+ :param string var: The variable name according with the HAProxy variable
+ syntax.
+ :see: :js:func:`AppletTCP.unset_var`
+ :see: :js:func:`AppletTCP.set_var`
+
+.. _sticktable_class:
+
+StickTable class
+================
+
+.. js:class:: StickTable
+
+ **context**: task, action, sample-fetch
+
+ This class can be used to access the HAProxy stick tables from Lua.
+
+.. js:function:: StickTable.info()
+
+ Returns stick table attributes as a Lua table. See HAProxy documentation for
+ "stick-table" for canonical info, or check out example below.
+
+ :returns: Lua table
+
+ Assume our table has IPv4 key and gpc0 and conn_rate "columns":
+
+.. code-block:: lua
+
+ {
+ expire=<int>, # Value in ms
+ size=<int>, # Maximum table size
+ used=<int>, # Actual number of entries in table
+ data={ # Data columns, with types as key, and periods as values
+ (-1 if type is not rate counter)
+ conn_rate=<int>,
+ gpc0=-1
+ },
+ length=<int>, # max string length for string table keys, key length
+ # otherwise
+ nopurge=<boolean>, # purge oldest entries when table is full
+ type="ip" # can be "ip", "ipv6", "integer", "string", "binary"
+ }
+
+.. js:function:: StickTable.lookup(key)
+
+ Returns stick table entry for given <key>
+
+ :param string key: Stick table key (IP addresses and strings are supported)
+ :returns: Lua table
+
+.. js:function:: StickTable.dump([filter])
+
+ Returns all entries in stick table. An optional filter can be used
+ to extract entries with specific data values. Filter is a table with valid
+ comparison operators as keys followed by data type name and value pairs.
+ Check out the HAProxy docs for "show table" for more details. For the
+ reference, the supported operators are:
+
+ "eq", "ne", "le", "lt", "ge", "gt"
+
+ For large tables, execution of this function can take a long time (for
+ HAProxy standards). That's also true when filter is used, so take care and
+ measure the impact.
+
+ :param table filter: Stick table filter
+ :returns: Stick table entries (table)
+
+ See below for example filter, which contains 4 entries (or comparisons).
+ (Maximum number of filter entries is 4, defined in the source code)
+
+.. code-block:: lua
+
+ local filter = {
+ {"gpc0", "gt", 30}, {"gpc1", "gt", 20}}, {"conn_rate", "le", 10}
+ }
+
+.. _action_class:
+
+Action class
+=============
+
+.. js:class:: Act
+
+ **context**: action
+
+ This class contains all return codes an action may return. It is the lua
+ equivalent to HAProxy "ACT_RET_*" code.
+
+.. code-block:: lua
+
+ core.register_action("deny", { "http-req" }, function (txn)
+ return act.DENY
+ end)
+..
+.. js:attribute:: act.CONTINUE
+
+ This attribute is an integer (0). It instructs HAProxy to continue the
+ current ruleset processing on the message. It is the default return code
+ for a lua action.
+
+ :returns: integer
+
+.. js:attribute:: act.STOP
+
+ This attribute is an integer (1). It instructs HAProxy to stop the current
+ ruleset processing on the message.
+
+.. js:attribute:: act.YIELD
+
+ This attribute is an integer (2). It instructs HAProxy to temporarily pause
+ the message processing. It will be resumed later on the same rule. The
+ corresponding lua script is re-executed for the start.
+
+.. js:attribute:: act.ERROR
+
+ This attribute is an integer (3). It triggers an internal errors The message
+ processing is stopped and the transaction is terminated. For HTTP streams, an
+ HTTP 500 error is returned to the client.
+
+ :returns: integer
+
+.. js:attribute:: act.DONE
+
+ This attribute is an integer (4). It instructs HAProxy to stop the message
+ processing.
+
+ :returns: integer
+
+.. js:attribute:: act.DENY
+
+ This attribute is an integer (5). It denies the current message. The message
+ processing is stopped and the transaction is terminated. For HTTP streams, an
+ HTTP 403 error is returned to the client if the deny is returned during the
+ request analysis. During the response analysis, a HTTP 502 error is returned
+ and the server response is discarded.
+
+ :returns: integer
+
+.. js:attribute:: act.ABORT
+
+ This attribute is an integer (6). It aborts the current message. The message
+ processing is stopped and the transaction is terminated. For HTTP streams,
+ HAProxy assumes a response was already sent to the client. From the Lua
+ actions point of view, when this code is used, the transaction is terminated
+ with no reply.
+
+ :returns: integer
+
+.. js:attribute:: act.INVALID
+
+ This attribute is an integer (7). It triggers an internal errors. The message
+ processing is stopped and the transaction is terminated. For HTTP streams, an
+ HTTP 400 error is returned to the client if the error is returned during the
+ request analysis. During the response analysis, a HTTP 502 error is returned
+ and the server response is discarded.
+
+ :returns: integer
+
+.. js:function:: act:wake_time(milliseconds)
+
+ **context**: action
+
+ Set the script pause timeout to the specified time, defined in
+ milliseconds.
+
+ :param integer milliseconds: the required milliseconds.
+
+ This function may be used when a lua action returns `act.YIELD`, to force its
+ wake-up at most after the specified number of milliseconds.
+
+.. _filter_class:
+
+Filter class
+=============
+
+.. js:class:: filter
+
+ **context**: filter
+
+ This class contains return codes some filter callback functions may return. It
+ also contains configuration flags and some helper functions. To understand how
+ the filter API works, see `doc/internal/filters.txt` documentation.
+
+.. js:attribute:: filter.CONTINUE
+
+ This attribute is an integer (1). It may be returned by some filter callback
+ functions to instruct this filtering step is finished for this filter.
+
+.. js:attribute:: filter.WAIT
+
+ This attribute is an integer (0). It may be returned by some filter callback
+ functions to instruct the filtering must be paused, waiting for more data or
+ for an external event depending on this filter.
+
+.. js:attribute:: filter.ERROR
+
+ This attribute is an integer (-1). It may be returned by some filter callback
+ functions to trigger an error.
+
+.. js:attribute:: filter.FLT_CFG_FL_HTX
+
+ This attribute is a flag corresponding to the filter flag FLT_CFG_FL_HTX. When
+ it is set for a filter, it means the filter is able to filter HTTP streams.
+
+.. js:function:: filter.register_data_filter(chn)
+
+ **context**: filter
+
+ Enable the data filtering on the channel **chn** for the current filter. It
+ may be called at any time from any callback functions proceeding the data
+ analysis.
+
+ :param class_Channel chn: A :ref:`channel_class`.
+
+.. js:function:: filter.unregister_data_filter(chn)
+
+ **context**: filter
+
+ Disable the data filtering on the channel **chn** for the current filter. It
+ may be called at any time from any callback functions.
+
+ :param class_Channel chn: A :ref:`channel_class`.
+
+.. js:function:: filter.wake_time(milliseconds)
+
+ **context**: filter
+
+ Set the script pause timeout to the specified time, defined in
+ milliseconds.
+
+ :param integer milliseconds: the required milliseconds.
+
+ This function may be used from any lua filter callback function to force its
+ wake-up at most after the specified number of milliseconds. Especially, when
+ `filter.CONTINUE` is returned.
+
+
+A filters is declared using :js:func:`core.register_filter()` function. The
+provided class will be used to instantiate filters. It may define following
+attributes:
+
+* id: The filter identifier. It is a string that identifies the filter and is
+ optional.
+
+* flags: The filter flags. Only :js:attr:`filter.FLT_CFG_FL_HTX` may be set
+ for now.
+
+Such filter class must also define all required callback functions in the
+following list. Note that :js:func:`Filter.new()` must be defined otherwise the
+filter is ignored. Others are optional.
+
+* .. js:function:: FILTER.new()
+
+ Called to instantiate a new filter. This function must be defined.
+
+ :returns: a Lua object that will be used as filter instance for the current
+ stream.
+
+* .. js:function:: FILTER.start_analyze(flt, txn, chn)
+
+ Called when the analysis starts on the channel **chn**.
+
+* .. js:function:: FILTER.end_analyze(flt, txn, chn)
+
+ Called when the analysis ends on the channel **chn**.
+
+* .. js:function:: FILTER.http_headers(flt, txn, http_msg)
+
+ Called just before the HTTP payload analysis and after any processing on the
+ HTTP message **http_msg**. This callback functions is only called for HTTP
+ streams.
+
+* .. js:function:: FILTER.http_payload(flt, txn, http_msg)
+
+ Called during the HTTP payload analysis on the HTTP message **http_msg**. This
+ callback functions is only called for HTTP streams.
+
+* .. js:function:: FILTER.http_end(flt, txn, http_msg)
+
+ Called after the HTTP payload analysis on the HTTP message **http_msg**. This
+ callback functions is only called for HTTP streams.
+
+* .. js:function:: FILTER.tcp_payload(flt, txn, chn)
+
+ Called during the TCP payload analysis on the channel **chn**.
+
+Here is a full example:
+
+.. code-block:: lua
+
+ Trace = {}
+ Trace.id = "Lua trace filter"
+ Trace.flags = filter.FLT_CFG_FL_HTX;
+ Trace.__index = Trace
+
+ function Trace:new()
+ local trace = {}
+ setmetatable(trace, Trace)
+ trace.req_len = 0
+ trace.res_len = 0
+ return trace
+ end
+
+ function Trace:start_analyze(txn, chn)
+ if chn:is_resp() then
+ print("Start response analysis")
+ else
+ print("Start request analysis")
+ end
+ filter.register_data_filter(self, chn)
+ end
+
+ function Trace:end_analyze(txn, chn)
+ if chn:is_resp() then
+ print("End response analysis: "..self.res_len.." bytes filtered")
+ else
+ print("End request analysis: "..self.req_len.." bytes filtered")
+ end
+ end
+
+ function Trace:http_headers(txn, http_msg)
+ stline = http_msg:get_stline()
+ if http_msg.channel:is_resp() then
+ print("response:")
+ print(stline.version.." "..stline.code.." "..stline.reason)
+ else
+ print("request:")
+ print(stline.method.." "..stline.uri.." "..stline.version)
+ end
+
+ for n, hdrs in pairs(http_msg:get_headers()) do
+ for i,v in pairs(hdrs) do
+ print(n..": "..v)
+ end
+ end
+ return filter.CONTINUE
+ end
+
+ function Trace:http_payload(txn, http_msg)
+ body = http_msg:body(-20000)
+ if http_msg.channel:is_resp() then
+ self.res_len = self.res_len + body:len()
+ else
+ self.req_len = self.req_len + body:len()
+ end
+ end
+
+ core.register_filter("trace", Trace, function(trace, args)
+ return trace
+ end)
+
+..
+
+.. _httpmessage_class:
+
+HTTPMessage class
+===================
+
+.. js:class:: HTTPMessage
+
+ **context**: filter
+
+ This class contains all functions to manipulate a HTTP message. For now, this
+ class is only available from a filter context.
+
+.. js:function:: HTTPMessage.add_header(http_msg, name, value)
+
+ Appends a HTTP header field in the HTTP message **http_msg** whose name is
+ specified in **name** and whose value is defined in **value**.
+
+ :param class_httpmessage http_msg: The manipulated HTTP message.
+ :param string name: The header name.
+ :param string value: The header value.
+
+.. js:function:: HTTPMessage.append(http_msg, string)
+
+ This function copies the string **string** at the end of incoming data of the
+ HTTP message **http_msg**. The function returns the copied length on success
+ or -1 if data cannot be copied.
+
+ Same that :js:func:`HTTPMessage.insert(http_msg, string, http_msg:input())`.
+
+ :param class_httpmessage http_msg: The manipulated HTTP message.
+ :param string string: The data to copy at the end of incoming data.
+ :returns: an integer containing the amount of bytes copied or -1.
+
+.. js:function:: HTTPMessage.body(http_msgl[, offset[, length]])
+
+ This function returns **length** bytes of incoming data from the HTTP message
+ **http_msg**, starting at the offset **offset**. The data are not removed from
+ the buffer.
+
+ By default, if no length is provided, all incoming data found, starting at the
+ given offset, are returned. If **length** is set to -1, the function tries to
+ retrieve a maximum of data. Because it is called in the filter context, it
+ never yield. Not providing an offset is the same as setting it to 0. A
+ positive offset is relative to the beginning of incoming data of the
+ http_message buffer while negative offset is relative to their end.
+
+ If there is no incoming data and the HTTP message can't receive more data,
+ a 'nil' value is returned.
+
+ :param class_httpmessage http_msg: The manipulated HTTP message.
+ :param integer offset: *optional* The offset in incoming data to start to get
+ data. 0 by default. May be negative to be relative to the end of incoming
+ data.
+ :param integer length: *optional* The expected length of data to retrieve.
+ All incoming data by default. May be set to -1 to get a maximum of data.
+ :returns: a string containing the data found or nil.
+
+.. js:function:: HTTPMessage.eom(http_msg)
+
+ This function returns true if the end of message is reached for the HTTP
+ message **http_msg**.
+
+ :param class_httpmessage http_msg: The manipulated HTTP message.
+ :returns: an integer containing the amount of available bytes.
+
+.. js:function:: HTTPMessage.del_header(http_msg, name)
+
+ Removes all HTTP header fields in the HTTP message **http_msg** whose name is
+ specified in **name**.
+
+ :param class_httpmessage http_msg: The manipulated http message.
+ :param string name: The header name.
+
+.. js:function:: HTTPMessage.get_headers(http_msg)
+
+ Returns a table containing all the headers of the HTTP message **http_msg**.
+
+ :param class_httpmessage http_msg: The manipulated http message.
+ :returns: table of headers.
+
+ This is the form of the returned table:
+
+.. code-block:: lua
+
+ http_msg:get_headers()['<header-name>'][<header-index>] = "<header-value>"
+
+ local hdr = http_msg:get_headers()
+ hdr["host"][0] = "www.test.com"
+ hdr["accept"][0] = "audio/basic q=1"
+ hdr["accept"][1] = "audio/*, q=0.2"
+ hdr["accept"][2] = "*.*, q=0.1"
+..
+
+.. js:function:: HTTPMessage.get_stline(http_msg)
+
+ Returns a table containing the start-line of the HTTP message **http_msg**.
+
+ :param class_httpmessage http_msg: The manipulated http message.
+ :returns: the start-line.
+
+ This is the form of the returned table:
+
+.. code-block:: lua
+
+ -- for the request :
+ {"method" = string, "uri" = string, "version" = string}
+
+ -- for the response:
+ {"version" = string, "code" = string, "reason" = string}
+..
+
+.. js:function:: HTTPMessage.forward(http_msg, length)
+
+ This function forwards **length** bytes of data from the HTTP message
+ **http_msg**. Because it is called in the filter context, it never yields. Only
+ available incoming data may be forwarded, event if the requested length
+ exceeds the available amount of incoming data. It returns the amount of data
+ forwarded.
+
+ :param class_httpmessage http_msg: The manipulated HTTP message.
+ :param integer int: The amount of data to forward.
+
+.. js:function:: HTTPMessage.input(http_msg)
+
+ This function returns the length of incoming data in the HTTP message
+ **http_msg** from the filter point of view.
+
+ :param class_httpmessage http_msg: The manipulated HTTP message.
+ :returns: an integer containing the amount of available bytes.
+
+.. js:function:: HTTPMessage.insert(http_msg, string[, offset])
+
+ This function copies the string **string** at the offset **offset** in
+ incoming data of the HTTP message **http_msg**. The function returns the
+ copied length on success or -1 if data cannot be copied.
+
+ By default, if no offset is provided, the string is copied in front of
+ incoming data. A positive offset is relative to the beginning of incoming data
+ of the HTTP message while negative offset is relative to their end.
+
+ :param class_httpmessage http_msg: The manipulated HTTP message.
+ :param string string: The data to copy into incoming data.
+ :param integer offset: *optional* The offset in incoming data where to copy
+ data. 0 by default. May be negative to be relative to the end of incoming
+ data.
+ :returns: an integer containing the amount of bytes copied or -1.
+
+.. js:function:: HTTPMessage.is_full(http_msg)
+
+ This function returns true if the HTTP message **http_msg** is full.
+
+ :param class_httpmessage http_msg: The manipulated HTTP message.
+ :returns: a boolean
+
+.. js:function:: HTTPMessage.is_resp(http_msg)
+
+ This function returns true if the HTTP message **http_msg** is the response
+ one.
+
+ :param class_httpmessage http_msg: The manipulated HTTP message.
+ :returns: a boolean
+
+.. js:function:: HTTPMessage.may_recv(http_msg)
+
+ This function returns true if the HTTP message **http_msg** may still receive
+ data.
+
+ :param class_httpmessage http_msg: The manipulated HTTP message.
+ :returns: a boolean
+
+.. js:function:: HTTPMessage.output(http_msg)
+
+ This function returns the length of outgoing data of the HTTP message
+ **http_msg**.
+
+ :param class_httpmessage http_msg: The manipulated HTTP message.
+ :returns: an integer containing the amount of available bytes.
+
+.. js:function:: HTTPMessage.prepend(http_msg, string)
+
+ This function copies the string **string** in front of incoming data of the
+ HTTP message **http_msg**. The function returns the copied length on success
+ or -1 if data cannot be copied.
+
+ Same that :js:func:`HTTPMessage.insert(http_msg, string, 0)`.
+
+ :param class_httpmessage http_msg: The manipulated HTTP message.
+ :param string string: The data to copy in front of incoming data.
+ :returns: an integer containing the amount of bytes copied or -1.
+
+.. js:function:: HTTPMessage.remove(http_msg[, offset[, length]])
+
+ This function removes **length** bytes of incoming data of the HTTP message
+ **http_msg**, starting at offset **offset**. This function returns number of
+ bytes removed on success.
+
+ By default, if no length is provided, all incoming data, starting at the given
+ offset, are removed. Not providing an offset is the same that setting it
+ to 0. A positive offset is relative to the beginning of incoming data of the
+ HTTP message while negative offset is relative to the end.
+
+ :param class_httpmessage http_msg: The manipulated HTTP message.
+ :param integer offset: *optional* The offset in incoming data where to start
+ to remove data. 0 by default. May be negative to be relative to the end of
+ incoming data.
+ :param integer length: *optional* The length of data to remove. All incoming
+ data by default.
+ :returns: an integer containing the amount of bytes removed.
+
+.. js:function:: HTTPMessage.rep_header(http_msg, name, regex, replace)
+
+ Matches the regular expression in all occurrences of header field **name**
+ according to regex **regex**, and replaces them with the string **replace**.
+ The replacement value can contain back references like \1, \2, ... This
+ function acts on whole header lines, regardless of the number of values they
+ may contain.
+
+ :param class_httpmessage http_msg: The manipulated HTTP message.
+ :param string name: The header name.
+ :param string regex: The match regular expression.
+ :param string replace: The replacement value.
+
+.. js:function:: HTTPMessage.rep_value(http_msg, name, regex, replace)
+
+ Matches the regular expression on every comma-delimited value of header field
+ **name** according to regex **regex**, and replaces them with the string
+ **replace**. The replacement value can contain back references like \1, \2,
+ ...
+
+ :param class_httpmessage http_msg: The manipulated HTTP message.
+ :param string name: The header name.
+ :param string regex: The match regular expression.
+ :param string replace: The replacement value.
+
+.. js:function:: HTTPMessage.send(http_msg, string)
+
+ This function requires immediate send of the string **string**. It means the
+ string is copied at the beginning of incoming data of the HTTP message
+ **http_msg** and immediately forwarded. Because it is called in the filter
+ context, it never yields.
+
+ :param class_httpmessage http_msg: The manipulated HTTP message.
+ :param string string: The data to send.
+ :returns: an integer containing the amount of bytes copied or -1.
+
+.. js:function:: HTTPMessage.set(http_msg, string[, offset[, length]])
+
+ This function replaces **length** bytes of incoming data of the HTTP message
+ **http_msg**, starting at offset **offset**, by the string **string**. The
+ function returns the copied length on success or -1 if data cannot be copied.
+
+ By default, if no length is provided, all incoming data, starting at the given
+ offset, are replaced. Not providing an offset is the same as setting it
+ to 0. A positive offset is relative to the beginning of incoming data of the
+ HTTP message while negative offset is relative to the end.
+
+ :param class_httpmessage http_msg: The manipulated HTTP message.
+ :param string string: The data to copy into incoming data.
+ :param integer offset: *optional* The offset in incoming data where to start
+ the data replacement. 0 by default. May be negative to be relative to the
+ end of incoming data.
+ :param integer length: *optional* The length of data to replace. All incoming
+ data by default.
+ :returns: an integer containing the amount of bytes copied or -1.
+
+.. js:function:: HTTPMessage.set_eom(http_msg)
+
+ This function set the end of message for the HTTP message **http_msg**.
+
+ :param class_httpmessage http_msg: The manipulated HTTP message.
+
+.. js:function:: HTTPMessage.set_header(http_msg, name, value)
+
+ This variable replace all occurrence of all header matching the name **name**,
+ by only one containing the value **value**.
+
+ :param class_httpmessage http_msg: The manipulated HTTP message.
+ :param string name: The header name.
+ :param string value: The header value.
+
+ This function does the same work as the following code:
+
+.. code-block:: lua
+
+ http_msg:del_header("header")
+ http_msg:add_header("header", "value")
+..
+
+.. js:function:: HTTPMessage.set_method(http_msg, method)
+
+ Rewrites the request method with the string **method**. The HTTP message
+ **http_msg** must be the request.
+
+ :param class_httpmessage http_msg: The manipulated HTTP message.
+ :param string method: The new method.
+
+.. js:function:: HTTPMessage.set_path(http_msg, path)
+
+ Rewrites the request path with the string **path**. The HTTP message
+ **http_msg** must be the request.
+
+ :param class_httpmessage http_msg: The manipulated HTTP message.
+ :param string method: The new method.
+
+.. js:function:: HTTPMessage.set_query(http_msg, query)
+
+ Rewrites the request's query string which appears after the first question
+ mark ("?") with the string **query**. The HTTP message **http_msg** must be
+ the request.
+
+ :param class_httpmessage http_msg: The manipulated HTTP message.
+ :param string query: The new query.
+
+.. js:function:: HTTPMessage.set_status(http_msg, status[, reason])
+
+ Rewrites the response status code with the integer **code** and optional the
+ reason **reason**. If no custom reason is provided, it will be generated from
+ the status. The HTTP message **http_msg** must be the response.
+
+ :param class_httpmessage http_msg: The manipulated HTTP message.
+ :param integer status: The new response status code.
+ :param string reason: The new response reason (optional).
+
+.. js:function:: HTTPMessage.set_uri(http_msg, uri)
+
+ Rewrites the request URI with the string **uri**. The HTTP message
+ **http_msg** must be the request.
+
+ :param class_httpmessage http_msg: The manipulated HTTP message.
+ :param string uri: The new uri.
+
+.. js:function:: HTTPMessage.unset_eom(http_msg)
+
+ This function remove the end of message for the HTTP message **http_msg**.
+
+ :param class_httpmessage http_msg: The manipulated HTTP message.
+
+.. _CertCache_class:
+
+CertCache class
+================
+
+.. js:class:: CertCache
+
+ This class allows to update an SSL certificate file in the memory of the
+ current HAProxy process. It will do the same as "set ssl cert" + "commit ssl
+ cert" over the HAProxy CLI.
+
+.. js:function:: CertCache.set(certificate)
+
+ This function updates a certificate in memory.
+
+ :param table certificate: A table containing the fields to update.
+ :param string certificate.filename: The mandatory filename of the certificate
+ to update, it must already exist in memory.
+ :param string certificate.crt: A certificate in the PEM format. It can also
+ contain a private key.
+ :param string certificate.key: A private key in the PEM format.
+ :param string certificate.ocsp: An OCSP response in base64. (cf management.txt)
+ :param string certificate.issuer: The certificate of the OCSP issuer.
+ :param string certificate.sctl: An SCTL file.
+
+.. code-block:: lua
+
+ CertCache.set{filename="certs/localhost9994.pem.rsa", crt=crt}
+
+
+External Lua libraries
+======================
+
+A lot of useful lua libraries can be found here:
+
+* Lua toolbox has been superseded by
+ `https://luarocks.org/ <https://luarocks.org/>`_
+
+ The old lua toolbox source code is still available here
+ `https://github.com/catwell/lua-toolbox <https://github.com/catwell/lua-toolbox>`_ (DEPRECATED)
+
+Redis client library:
+
+* `https://github.com/nrk/redis-lua <https://github.com/nrk/redis-lua>`_
+
+This is an example about the usage of the Redis library within HAProxy.
+Note that each call to any function of this library can throw an error if
+the socket connection fails.
+
+.. code-block:: lua
+
+ -- load the redis library
+ local redis = require("redis");
+
+ function do_something(txn)
+
+ -- create and connect new tcp socket
+ local tcp = core.tcp();
+ tcp:settimeout(1);
+ tcp:connect("127.0.0.1", 6379);
+
+ -- use the redis library with this new socket
+ local client = redis.connect({socket=tcp});
+ client:ping();
+
+ end
+
+OpenSSL:
+
+* `http://mkottman.github.io/luacrypto/index.html
+ <http://mkottman.github.io/luacrypto/index.html>`_
+
+* `https://github.com/brunoos/luasec/wiki
+ <https://github.com/brunoos/luasec/wiki>`_
diff --git a/doc/lua.txt b/doc/lua.txt
new file mode 100644
index 0000000..5d41a30
--- /dev/null
+++ b/doc/lua.txt
@@ -0,0 +1,972 @@
+ Lua: Architecture and first steps
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ version 2.9
+
+ author: Thierry FOURNIER
+ contact: tfournier at arpalert dot org
+
+
+
+HAProxy is a powerful load balancer. It embeds many options and many
+configuration styles in order to give a solution to many load balancing
+problems. However, HAProxy is not universal and some special or specific
+problems do not have solution with the native software.
+
+This text is not a full explanation of the Lua syntax.
+
+This text is not a replacement of the HAProxy Lua API documentation. The API
+documentation can be found at the project root, in the documentation directory.
+The goal of this text is to discover how Lua is implemented in HAProxy and using
+it efficiently.
+
+However, this can be read by Lua beginners. Some examples are detailed.
+
+Why a scripting language in HAProxy
+===================================
+
+HAProxy 1.5 makes at possible to do many things using samples, but some people
+want to more combining results of samples fetches, programming conditions and
+loops which is not possible. Sometimes people implement these functionalities
+in patches which have no meaning outside their network. These people must
+maintain these patches, or worse we must integrate them in the HAProxy
+mainstream.
+
+Their need is to have an embedded programming language in order to no longer
+modify the HAProxy source code, but to write their own control code. Lua is
+encountered very often in the software industry, and in some open source
+projects. It is easy to understand, efficient, light without external
+dependencies, and leaves the resource control to the implementation. Its design
+is close to the HAProxy philosophy which uses components for what they do
+perfectly.
+
+The HAProxy control block allows one to take a decision based on the comparison
+between samples and patterns. The samples are extracted using fetch functions
+easily extensible, and are used by actions which are also extensible. It seems
+natural to allow Lua to give samples, modify them, and to be an action target.
+So, Lua uses the same entities as the configuration language. This is the most
+natural and reliable way for the Lua integration. So, the Lua engine allows one
+to add new sample fetch functions, new converter functions and new actions.
+These new entities can access the existing samples fetches and converters
+allowing to extend them without rewriting them.
+
+The writing of the first Lua functions shows that implementing complex concepts
+like protocol analysers is easy and can be extended to full services. It appears
+that these services are not easy to implement with the HAProxy configuration
+model which is based on four steps: fetch, convert, compare and action. HAProxy
+is extended with a notion of services which are a formalisation of the existing
+services like stats, cli and peers. The service is an autonomous entity with a
+behaviour pattern close to that of an external client or server. The Lua engine
+inherits from this new service and offers new possibilities for writing
+services.
+
+This scripting language is useful for testing new features as proof of concept.
+Later, if there is general interest, the proof of concept could be integrated
+with C language in the HAProxy core.
+
+The HAProxy Lua integration also provides a simple way for distributing Lua
+packages. The final user needs only to install the Lua file, load it in HAProxy
+and follow the attached documentation.
+
+Design and technical things
+===========================
+
+Lua is integrated into the HAProxy event driven core. We want to preserve the
+fast processing of HAProxy. To ensure this, we implement some technical concepts
+between HAProxy and the Lua library.
+
+The following paragraph also describes the interactions between Lua and HAProxy
+from a technical point of view.
+
+Prerequisite
+-----------
+
+Reading the following documentation links is required to understand the
+current paragraph:
+
+ HAProxy doc: http://docs.haproxy.org/
+ Lua API: http://www.lua.org/manual/5.3/
+ HAProxy API: http://www.arpalert.org/src/haproxy-lua-api/2.6/index.html
+ Lua guide: http://www.lua.org/pil/
+
+more about Lua choice
+---------------------
+
+Lua language is very simple to extend. It is easy to add new functions written
+in C in the core language. It is not required to embed very intrusive libraries,
+and we do not change compilation processes.
+
+The amount of memory consumed can be controlled, and the issues due to lack of
+memory are perfectly caught. The maximum amount of memory allowed for the Lua
+processes is configurable. If some memory is missing, the current Lua action
+fails, and the HAProxy processing flow continues.
+
+Lua provides a way for implementing event driven design. When the Lua code
+wants to do a blocking action, the action is started, it executes non blocking
+operations, and returns control to the HAProxy scheduler when it needs to wait
+for some external event.
+
+The Lua process can be interrupted after a number of instructions executed. The
+Lua execution will resume later. This is a useful way for controlling the
+execution time. This system also keeps HAProxy responsive. When the Lua
+execution is interrupted, HAProxy accepts some connections or transfers pending
+data. The Lua execution does not block the main HAProxy processing, except in
+some cases which we will see later.
+
+Lua function integration
+------------------------
+
+The Lua actions, sample fetches, converters and services are integrated in
+HAProxy with "register_*" functions. The register system is a choice for
+providing HAProxy Lua packages easily. The register system adds new sample
+fetches, converters, actions or services usable in the HAProxy configuration
+file.
+
+The register system is defined in the "core" functions collection. This
+collection is provided by HAProxy and is always available. Below, the list of
+these functions:
+
+ - core.register_action()
+ - core.register_converters()
+ - core.register_fetches()
+ - core.register_init()
+ - core.register_service()
+ - core.register_task()
+
+These functions are the execution entry points.
+
+HTTP action must be used for manipulating HTTP request headers. This action
+can not manipulates HTTP content. It is dangerous to use the channel
+manipulation object with an HTTP request in an HTTP action. The channel
+manipulation can transform a valid request in an invalid request. In this case,
+the action will never resume and the processing will be frozen. HAProxy
+discards the request after the reception timeout.
+
+Non blocking design
+-------------------
+
+HAProxy is an event driven software, so blocking system calls are absolutely
+forbidden. However, the Lua allows to do blocking actions. When an action
+blocks, HAProxy is waiting and do nothing, so the basic functionalities like
+accepting connections or forwarding data are blocked while the end of the system
+call. In this case HAProxy will be less responsive.
+
+This is very insidious because when the developer tries to execute its Lua code
+with only one stream, HAProxy seems to run fine. When the code is used with
+production stream, HAProxy encounters some slow processing, and it cannot
+hold the load.
+
+However, during the initialisation state, you can obviously using blocking
+functions. There are typically used for loading files.
+
+The list of prohibited standard Lua functions during the runtime contains all
+that do filesystem access:
+
+ - os.remove()
+ - os.rename()
+ - os.tmpname()
+ - package.*()
+ - io.*()
+ - file.*()
+
+Some other functions are prohibited:
+
+ - os.execute(), waits for the end of the required execution blocking HAProxy.
+
+ - os.exit(), is not really dangerous for the process, but it's not the good way
+ for exiting the HAProxy process.
+
+ - print(), writes data on stdout. In some cases these writes are blocking, the
+ best practice is reserving this call for debugging. We must prefer
+ to use core.log() or TXN.log() for sending messages.
+
+Some HAProxy functions have a blocking behaviour pattern in the Lua code, but
+there are compatible with the non blocking design. These functions are:
+
+ - All the socket class
+ - core.sleep()
+
+Responsive design
+-----------------
+
+HAProxy must process connections accept, forwarding data and processing timeouts
+as soon as possible. The first thing is to believe that a Lua script with a long
+execution time should impact the expected responsive behaviour.
+
+It is not the case, the Lua script execution are regularly interrupted, and
+HAProxy can process other things. These interruptions are exprimed in number of
+Lua instructions. The number of interruptions between two interrupts is
+configured with the following "tune" option:
+
+ tune.lua.forced-yield <nb>
+
+The default value is 10 000. For determining it, I ran benchmark on my laptop.
+I executed a Lua loop between 10 seconds with different values for the
+"tune.lua.forced-yield" option, and I noted the results:
+
+ configured | Number of
+ instructions | loops executed
+ between two | in millions
+ forced yields |
+ ---------------+---------------
+ 10 | 160
+ 500 | 670
+ 1000 | 680
+ 5000 | 700
+ 7000 | 700
+ 8000 | 700
+ 9000 | 710 <- ceil
+ 10000 | 710
+ 100000 | 710
+ 1000000 | 710
+
+The result showed that from 9000 instructions between two interrupt, we reached
+a ceil, so the default parameter is 10 000.
+
+When HAProxy interrupts the Lua processing, we have two states possible:
+
+ - Lua is resumable, and it returns control to the HAProxy scheduler,
+ - Lua is not resumable, and we just check the execution timeout.
+
+The second case occurs if it is required by the HAProxy core. This state is
+forced if the Lua is processed in a non resumable HAProxy part, like sample
+fetches or converters.
+
+It occurs also if the Lua is non resumable. For example, if some code is
+executed through the Lua pcall() function, the execution is not resumable. This
+is explained later.
+
+So, the Lua code must be fast and simple when is executed as sample fetches and
+converters, it could be slow and complex when is executed as actions and
+services.
+
+Execution time
+--------------
+
+The Lua execution time is measured and limited. Each group of functions has its
+own timeout configured. The time measured is the real Lua execution time, and
+not the difference between the end time and the start time. The groups are:
+
+ - main code and init are not submitted to the timeout,
+ - fetches, converters and action have a default timeout of 4s,
+ - task, by default does not have timeout,
+ - service have a default timeout of 4s.
+
+The corresponding tune options are:
+
+ - tune.lua.session-timeout (action, filter, cli)
+ - tune.lua.task-timeout (task)
+ - tune.lua.service-timeout (services)
+ - tune.lua.burst-timeout (max time between two lua yields)
+
+The task does not have a timeout because it runs in background along the
+HAProxy process life.
+
+For example, if an Lua script is executed during 1.1s and the script executes a
+sleep of 1 second, the effective measured running time is 0.1s.
+
+This timeout is useful for preventing infinite loops. During the runtime, it
+should be never triggered.
+
+The stack and the coprocess
+---------------------------
+
+The Lua execution is organized around a stack. Each Lua action, even out of the
+effective execution, affects the stack. HAProxy integration uses one main stack,
+which is common for all the process, and a secondary one used as coprocess.
+After the initialization, the main stack is no longer used by HAProxy, except
+for global storage. The second type of stack is used by all the Lua functions
+called from different Lua actions declared in HAProxy. The main stack permits
+to store coroutines pointers, and some global variables.
+
+Do you want to see an example of how seems Lua C development around a stack ?
+Some examples follows. This first one, is a simple addition:
+
+ lua_pushnumber(L, 1)
+ lua_pushnumber(L, 2)
+ lua_arith(L, LUA_OPADD)
+
+It's easy, we push 1 on the stack, after, we push 2, and finally, we perform an
+addition. The two top entries of the stack are added, popped, and the result is
+pushed. It is a classic way with a stack.
+
+Now an example for constructing array and objects. It's a little bit more
+complicated. The difficult consist to keep in mind the state of the stack while
+we write the code. The goal is to create the entity described below. Note that
+the notation "*1" is a metatable reference. The metatable will be explained
+later.
+
+ name*1 = {
+ [0] = <userdata>,
+ }
+
+ *1 = {
+ "__index" = {
+ "method1" = <function>,
+ "method2" = <function>
+ }
+ "__gc" = <function>
+ }
+
+Let's go:
+
+ lua_newtable() // The "name" table
+ lua_newtable() // The metatable *1
+ lua_pushstring("__index")
+ lua_newtable() // The "__index" table
+ lua_pushstring("method1")
+ lua_pushfunction(function)
+ lua_settable(-3) // -3 is an index in the stack. insert method1
+ lua_pushstring("method2")
+ lua_pushfunction(function)
+ lua_settable(-3) // insert method2
+ lua_settable(-3) // insert "__index"
+ lua_pushstring("__gc")
+ lua_pushfunction(function)
+ lua_settable() // insert "__gc"
+ lua_setmetatable(-1) // attach metatable to "name"
+ lua_pushnumber(0)
+ lua_pushuserdata(userdata)
+ lua_settable(-3)
+ lua_setglobal("name")
+
+So, coding for Lua in C, is not complex, but it needs some mental gymnastic.
+
+The object concept and the HAProxy format
+-----------------------------------------
+
+The object seems to be not a native concept. An Lua object is a table. We can
+note that the table notation accept three forms:
+
+ 1. mytable["entry"](mytable, "param")
+ 2. mytable.entry(mytable, "param")
+ 3. mytable:entry("param")
+
+These three notation have the same behaviour pattern: a function is executed
+with the table itself as first parameter and string "param" as second parameter
+The notation with [] is commonly used for storing data in a hash table, and the
+dotted notation is used for objects. The notation with ":" indicates that the
+first parameter is the element at the left of the symbol ":".
+
+So, an object is a table and each entry of the table is a variable. A variable
+can be a function. These are the first concepts of the object notation in the
+Lua, but it is not the end.
+
+With the objects, we usually expect classes and inheritance. This is the role of
+the metable. A metable is a table with predefined entries. These entries modify
+the default behaviour of the table. The simplest example is the "__index" entry.
+If this entry exists, it is called when a value is requested in the table. The
+behaviour is the following:
+
+ 1 - looks in the table if the entry exists, and if it the case, return it
+
+ 2 - looks if a metatable exists, and if the "__index" entry exists
+
+ 3 - if "__index" is a function, execute it with the key as parameter, and
+ returns the result of the function.
+
+ 4 - if "__index" is a table, looks if the requested entry exists, and if
+ exists, return it.
+
+ 5 - if not exists, return to step 2
+
+The behaviour of the point 5 represents the inheritance.
+
+In HAProxy all the provided objects are tables, the entry "[0]" contains private
+data, there are often userdata or lightuserdata. The metatable is registered in
+the global part of the main Lua stack, and it is called with the case sensitive
+class name. A great part of these class must not be used directly because it
+requires an initialisation using the HAProxy internal structs.
+
+The HAProxy objects use unified conventions. An Lua object is always a table.
+In most cases, an HAProxy Lua object needs some private data. These are always
+set in the index [0] of the array. The metatable entry "__tostring" returns the
+object name.
+
+The Lua developer can add entries to the HAProxy object. They just work carefully
+and prevent to modify the index [0].
+
+Common HAProxy objects are:
+
+ - TXN : manipulates the transaction between the client and the server
+ - Channel : manipulates proxified data between the client and the server
+ - HTTP : manipulates HTTP between the client and the server
+ - Map : manipulates HAProxy maps.
+ - Fetches : access to all HAProxy sample fetches
+ - Converters : access to all HAProxy sample converters
+ - AppletTCP : process client request like a TCP server
+ - AppletHTTP : process client request like an HTTP server
+ - Socket : establish tcp connection to a server (ipv4/ipv6/socket/ssl/...)
+
+The garbage collector and the memory allocation
+-----------------------------------------------
+
+Lua doesn't really have a global memory limit, but HAProxy implements it. This
+permits to control the amount of memory dedicated to the Lua processes. It is
+specially useful with embedded environments.
+
+When the memory limit is reached, HAProxy refuses to give more memory to the Lua
+scripts. The current Lua execution is terminated with an error and HAProxy
+continues its processing.
+
+The max amount of memory is configured with the option:
+
+ tune.lua.maxmem
+
+As many other script languages, Lua uses a garbage collector for reusing its
+memory. The Lua developer can work without memory preoccupation. Usually, the
+garbage collector is controlled by the Lua core, but sometimes it will be useful
+to run when the user/developer requires. So the garbage collector can be called
+from C part or Lua part.
+
+Sometimes, objects using lightuserdata or userdata requires to free some memory
+block or close filedescriptor not controlled by the Lua. A dedicated garbage
+collection function is provided through the metatable. It is referenced with the
+special entry "__gc".
+
+Generally, in HAProxy, the garbage collector does this job without any
+intervention. However some objects use a great amount of memory, and we want to
+release as quickly as possible. The problem is that only the GC knows if the
+object is in use or not. The reason is simple variable containing objects can be
+shared between coroutines and the main thread, so an object can be used
+everywhere in HAProxy.
+
+The only one example is the HAProxy sockets. These are explained later, just for
+understanding the GC issues, a quick overview of the socket follows. The HAProxy
+socket uses an internal session and stream, the session uses resources like
+memory and file descriptor and in some cases keeps a socket open while it is no
+longer used by Lua.
+
+If the HAProxy socket is used, we forcing a garbage collector cycle after the
+end of each function using HAProxy socket. The reason is simple: if the socket
+is no longer used, we want to close the connection quickly.
+
+A special flag is used in HAProxy indicating that a HAProxy socket is created.
+If this flag is set, a full GC cycle is started after each Lua action. This is
+not free, we loose about 10% of performances, but it is the only way for closing
+sockets quickly.
+
+The yield concept / longjmp issues
+----------------------------------
+
+The "yield" is an action which does some Lua processing in pause and give back
+the hand to the HAProxy core. This action is do when the Lua needs to wait about
+data or other things. The most basically example is the sleep() function. In an
+event driven software the code must not process blocking systems call, so the
+sleep blocks the software between a lot of time. In HAProxy, an Lua sleep does a
+yield, and ask to the scheduler to be woken up in a required sleep time.
+Meanwhile, the HAProxy scheduler does other things, like accepting new
+connection or forwarding data.
+
+A yield is also executed regularly, after a lot of Lua instructions processed.
+This yield permits to control the effective execution time, and also give back
+the hand to the HAProxy core. When HAProxy finishes to process the pending jobs,
+the Lua execution continues.
+
+This special "yield" uses the Lua "debug" functions. Lua provides a debug method
+called "lua_sethook()" which permits to interrupt the execution after some
+configured condition and call a function. This condition used in HAProxy is
+a number of instructions processed and when a function returns. The function
+called controls the effective execution time, and if it is possible to send a
+"yield".
+
+The yield system is based on a couple setjmp/longjmp. In brief, the setjmp()
+stores a stack state, and the longjmp restores the stack in its state which had
+before the last Lua execution.
+
+Lua can immediately stop its execution if an error occurs. This system uses also
+the longjmp system. In HAProxy, we try to use this system only for unrecoverable
+errors. Maybe some trivial errors target an exception, but we try to remove it.
+
+It seems that Lua uses the longjmp system for having a behaviour like the java
+try / catch. We can use the function pcall() to execute some code. The function
+pcall() run a setjmp(). So, if any error occurs while the Lua code execution,
+the flow immediately returns from the pcall() with an error.
+
+The big issue of this behaviour is that we cannot do a yield. So if some Lua code
+executes a library using pcall for catching errors, HAProxy must be wait for the
+end of execution without processing any accept or any stream. The cause is the
+yield must be jump to the root of execution. The intermediate setjmp() avoids
+this behaviour.
+
+
+ HAProxy start Lua execution
+ + Lua puts a setjmp()
+ + Lua executes code
+ + Some code is executed in a pcall()
+ + pcall() puts a setjmp()
+ + Lua executes code
+ + A yield is require for a sleep function
+ it cannot be jumps to the Lua root execution.
+
+
+Another issue with the processing of strong errors is the manipulation of the
+Lua stack outside of an Lua processing. If one of the functions called occurs a
+strong error, the default behaviour is an abort(). It is not acceptable when
+HAProxy is in runtime mode. The Lua documentation propose to use another
+setjmp/longjmp to avoid the abort(). The goal is to put a setjmp between
+manipulating the Lua stack and using an alternative "panic" function which jumps
+to the setjmp() in error case.
+
+All of these behaviours are very dangerous for the stability, and the internal
+HAProxy code must be modified with many precautions.
+
+For preserving a good behaviour of HAProxy, the yield is mandatory.
+Unfortunately, some HAProxy parts are not adapted for resuming an execution
+after a yield. These parts are the sample fetches and the sample converters. So,
+the Lua code written in these parts of HAProxy must be quickly executed, and can
+not do actions which require yield like TCP connection or simple sleep.
+
+HAProxy socket object
+---------------------
+
+The HAProxy design is optimized for the data transfers between a client and a
+server, and processing the many errors which can occurs during these exchanges.
+HAProxy is not designed for having a third connection established to a third
+party server.
+
+The solution consist to put the main stream in pause waiting for the end of the
+exchanges with the third connection. This is completed by a signal between
+internal tasks. The following graph shows the HAProxy Lua socket:
+
+
+ +--------------------+
+ | Lua processing |
+ ------------------\ | creates socket | ------------------\
+ incoming request > | and puts the | Outgoing request >
+ ------------------/ | current processing | ------------------/
+    | in pause waiting |
+ | for TCP applet |
+ +-----------------+--+
+ ^ |
+ | |
+ | signal | read / write
+ | | data
+ | |
+ +-------------+---------+ v
+ | HAProxy internal +----------------+
+ | applet send signals | |
+ | when data is received | | -------------------\
+ | or some room is | Attached I/O | Client TCP stream >
+ | available | Buffers | -------------------/
+ +--------------------+--+ |
+ | |
+ +-------------------+
+
+
+A more detailed graph is available in the "doc/internals" directory.
+
+The HAProxy Lua socket uses a full HAProxy session / stream for establishing the
+connection. This mechanism provides all the facilities and HAProxy features,
+like the SSL stack, many socket type, and support for namespaces.
+Technically it supports the proxy protocol, but there are no way to enable it.
+
+How compiling HAProxy with Lua
+==============================
+
+HAProxy 1.6 requires Lua 5.3. Lua 5.3 offers some features which make easy the
+integration. Lua 5.3 is young, and some distros do not distribute it. Luckily,
+Lua is a great product because it does not require exotic dependencies, and its
+build process is really easy.
+
+The compilation process for linux is easy:
+
+ - download the source tarball
+ wget http://www.lua.org/ftp/lua-5.3.1.tar.gz
+
+ - untar it
+ tar xf lua-5.3.1.tar.gz
+
+ - enter the directory
+ cd lua-5.3.1
+
+ - build the library for linux
+ make linux
+
+ - install it:
+ sudo make INSTALL_TOP=/opt/lua-5.3.1 install
+
+HAProxy builds with your favourite options, plus the following options for
+embedding the Lua script language:
+
+ - download the source tarball
+ wget http://www.haproxy.org/download/1.6/src/haproxy-1.6.2.tar.gz
+
+ - untar it
+ tar xf haproxy-1.6.2.tar.gz
+
+ - enter the directory
+ cd haproxy-1.6.2
+
+ - build HAProxy:
+ make TARGET=linux-glibc \
+ USE_LUA=1 \
+ LUA_LIB=/opt/lua-5.3.1/lib \
+ LUA_INC=/opt/lua-5.3.1/include
+
+ - install it:
+ sudo make PREFIX=/opt/haproxy-1.6.2 install
+
+First steps with Lua
+====================
+
+Now, it's time to use Lua in HAProxy.
+
+Start point
+-----------
+
+The HAProxy global directive "lua-load <file>" allows to load an Lua file. This
+is the entry point. This load become during the configuration parsing, and the
+Lua file is immediately executed.
+
+All the register_*() functions must be called at this time because they are used
+just after the processing of the global section, in the frontend/backend/listen
+sections.
+
+The most simple "Hello world !" is the following line a loaded Lua file:
+
+ core.Alert("Hello World !");
+
+It displays a log during the HAProxy startup:
+
+ [alert] 285/083533 (14465) : Hello World !
+
+Note: By default, logs originating from a LUA script are sent to the loggers
+applicable to the current context, if any. If none are configured for use,
+logs are instead sent to stderr. See tune.lua.log.loggers and tune.lua.log.stderr
+for more information.
+
+Default path and libraries
+--------------------------
+
+Lua can embed some libraries. These libraries can be included from different
+paths. It seems that Lua doesn't like subdirectories. In the following example,
+I try to load a compiled library, so the first line is Lua code, the second line
+is an 'strace' extract proving that the library was opened. The next lines are
+the associated error.
+
+ require("luac/concat")
+
+ open("./luac/concat.so", O_RDONLY|O_CLOEXEC) = 4
+
+ [ALERT] (22806) : parsing [commonstats.conf:15] : lua runtime
+ error: error loading module 'luac/concat' from file './luac/concat.so':
+ ./luac/concat.so: undefined symbol: luaopen_luac/concat
+
+Lua tries to load the C symbol 'luaopen_luac/concat'. When Lua tries to open a
+library, it tries to execute the function associated to the symbol
+"luaopen_<libname>".
+
+The variable "<libname>" is defined using the content of the variable
+"package.cpath" and/or "package.path". The default definition of the
+"package.cpath" (on my computer is ) variable is:
+
+ /usr/local/lib/lua/5.3/?.so;/usr/local/lib/lua/5.3/loadall.so;./?.so
+
+The "<libname>" is the content which replaces the symbol "<?>". In the previous
+example, its "luac/concat", and obviously the Lua core try to load the function
+associated with the symbol "luaopen_luac/concat".
+
+My conclusion is that Lua doesn't support subdirectories. So, for loading
+libraries in subdirectory, it must fill the variable with the name of this
+subdirectory. The extension .so must disappear, otherwise Lua try to execute the
+function associated with the symbol "luaopen_concat.so". The following syntax is
+correct:
+
+ package.cpath = package.cpath .. ";./luac/?.so"
+ require("concat")
+
+First useful example
+--------------------
+
+ core.register_fetches("my-hash", function(txn, salt)
+ return txn.sc:sdbm(salt .. txn.sf:req_fhdr("host") .. txn.sf:path() .. txn.sf:src(), 1)
+ end)
+
+You will see that these 3 lines can generate a lot of explanations :)
+
+Core.register_fetches() is executed during the processing of the global section
+by the HAProxy configuration parser. A new sample fetch is declared with name
+"my-hash", this name is always prefixed by "lua.". So this new declared
+sample fetch will be used calling "lua.my-hash" in the HAProxy configuration
+file.
+
+The second parameter is an inline declared anonymous function. Note the closed
+parenthesis after the keyword "end" which ends the function. The first parameter
+of this anonymous function is "txn". It is an object of class TXN. It provides
+access functions. The second parameter is an arbitrary value provided by the
+HAProxy configuration file. This parameter is optional, the developer must
+check if it is present.
+
+The anonymous function registration is executed when the HAProxy backend or
+frontend configuration references the sample fetch "lua.my-hash".
+
+This example can be written with another style, like below:
+
+ function my_hash(txn, salt)
+ return txn.sc:sdbm(salt .. txn.sf:req_fhdr("host") .. txn.sf:path() .. txn.sf:src(), 1)
+ end
+
+ core.register_fetches("my-hash", my_hash)
+
+This second form is clearer, but the first one is compact.
+
+The operator ".." is a string concatenation. If one of the two operands is not a
+string, an error occurs and the execution is immediately stopped. This is
+important to keep in mind for the following things.
+
+Now I write the example on more than one line. Its an easiest way for commenting
+the code:
+
+ 1. function my_hash(txn, salt)
+ 2. local str = ""
+ 3. str = str .. salt
+ 4. str = str .. txn.sf:req_fhdr("host")
+ 5. str = str .. txn.sf:path()
+ 6. str = str .. txn.sf:src()
+ 7. local result = txn.sc:sdbm(str, 1)
+ 8. return result
+ 9. end
+ 10.
+ 11. core.register_fetches("my-hash", my_hash)
+
+local
+~~~~~
+
+The first keyword is "local". This is a really important keyword. You must
+understand that the function "my_hash" will be called for each HAProxy request
+using the declared sample fetch. So, this function can be executed many times in
+parallel.
+
+By default, Lua uses global variables. So in this example, if the variable "str"
+is declared without the keyword "local", it will be shared by all the parallel
+executions of the function and obviously, the content of the requests will be
+shared.
+
+This warning is very important. I tried to write useful Lua code like a rewrite
+of the statistics page, and it is very hard thing to declare each variable as
+"local".
+
+I guess that this behaviour will be the cause of many troubles on the mailing
+list.
+
+str = str ..
+~~~~~~~~~~~~
+
+Now a parenthesis about the form "str = str ..". This form allows to do string
+concatenations. Remember that Lua uses a garbage collector, so what happens when
+we do "str = str .. 'another string'" ?
+
+ str = str .. "another string"
+ ^ ^ ^ ^
+ 1 2 3 4
+
+Lua executes first the concatenation operator (3), it allocates memory for the
+resulting string and fill this memory with the concatenation of the operands 2
+and 4. Next, it frees the variable 1, now the old content of 1 can be garbage
+collected. And finally, the new content of 1 is the concatenation.
+
+what the matter ? when we do this operation many times, we consume a lot of
+memory, and the string data is duplicated and move many times. So, this practice
+is expensive in execution time and memory consumption.
+
+There are easy ways to prevent this behaviour. I guess that a C binding for
+concatenation with chunks will be available ASAP (it is already written). I do
+some benchmarks. I compare the execution time of 1 000 times, 1 000
+concatenation of 10 bytes written in pure Lua and with a C library. The result is
+10 times faster in C (1s in Lua, and 0.1s in C).
+
+txn
+~~~
+
+txn is an HAProxy object of class TXN. The documentation is available in the
+HAProxy Lua API reference. This class allow the access to the native HAProxy
+sample fetches and converters. The object txn contains 2 members dedicated to
+the sample fetches and 2 members dedicated to the converters.
+
+The sample fetches members are "f" (as sample-Fetch) and "sf" (as String
+sample-Fetch). These two members contain exactly the same functions. All the
+HAProxy native sample fetches are available, obviously, the Lua registered sample
+fetches are not available. Unfortunately, HAProxy sample fetches names are not
+compatible with the Lua function names, and they are renamed. The rename
+convention is simple, we replace all the '.', '+' and '-' by '_'. The '.' is the
+object member separator, and the "-" and "+" is math operator.
+
+Now, that I'm writing this article, I know the Lua better than I wrote the
+sample-fetches wrapper. The original HAProxy sample-fetches name should be used
+using alternative manner to call an object member, so the sample-fetch
+"req.fhdr" (actually renamed req_fhdr") should be used like this:
+
+ txn.f["req.fhdr"](txn.f, ...)
+
+However, I think that this form is not elegant.
+
+The "s" collection return a data with a type near to the original returned type.
+A string returns an Lua string, an integer returns an Lua integer and an IP
+address returns an Lua string. Sometime the data is not or not yet available, in
+this case it returns the Lua nil value.
+
+The "sf" collection guarantees that a string will be always returned. If the data
+is not available, an empty string is returned. The main usage of these collection
+is to concatenate the returned sample-fetches without testing each function.
+
+The parameters of the sample-fetches are according with the HAProxy
+documentation.
+
+The converters run exactly with the same manner as the sample fetches. The
+only one difference is that the first parameter is the converter entry element.
+The "c" collection returns a precise result, and the "sc" collection returns
+always a string.
+
+The sample-fetches used in the example function are "txn.sf:req_fhdr()",
+"txn.sf:path()" and "txn.sf:src()". The converter is "txn.sc:sdbm()". The same
+function with the "s" collection of sample-fetches and the "c" collection of
+converter should be written like this:
+
+ 1. function my_hash(txn, salt)
+ 2. local str = ""
+ 3. str = str .. salt
+ 4. str = str .. tostring(txn.f:req_fhdr("host"))
+ 5. str = str .. tostring(txn.f:path())
+ 6. str = str .. tostring(txn.f:src())
+ 7. local result = tostring(txn.c:sdbm(str, 1))
+ 8. return result
+ 9. end
+ 10.
+ 11. core.register_fetches("my-hash", my_hash)
+
+tostring
+~~~~~~~~
+
+The function tostring ensures that its parameter is returned as a string. If the
+parameter is a table or a thread or anything that will not have any sense as a
+string, a form like the typename followed by a pointer is returned. For example:
+
+ t = {}
+ print(tostring(t))
+
+returns:
+
+ table: 0x15facc0
+
+For objects, if the special function __tostring() is registered in the attached
+metatable, it will be called with the table itself as first argument. The
+HAProxy object returns its own type.
+
+About the converters entry point
+--------------------------------
+
+In HAProxy, a converter is a stateless function that takes a data as entry and
+returns a transformation of this data as output. In Lua it is exactly the same
+behaviour.
+
+So, the registered Lua function doesn't have any special parameters, just a
+variable as input which contains the value to convert, and it must return data.
+
+The data required as input by the Lua converter is a string. So HAProxy will
+always provide a string as input. If the native sample fetch is not a string it
+will be converted in best effort.
+
+The returned value will have anything type, it will be converted as sample of
+the near HAProxy type. The conversion rules from Lua variables to HAProxy
+samples are:
+
+ Lua | HAProxy sample types
+ -----------+---------------------
+ "number" | "sint"
+ "boolean" | "bool"
+ "string" | "str"
+ "userdata" | "bool" (false)
+ "nil" | "bool" (false)
+ "table" | "bool" (false)
+ "function" | "bool" (false)
+ "thread" | "bool" (false)
+
+The function used for registering a converter is:
+
+ core.register_converters()
+
+The task entry point
+--------------------
+
+The function "core.register_task(fcn)" executes once the function "fcn" when the
+scheduler starts. This way is used for executing background task. For example,
+you can use this functionality for periodically checking the health of another
+service, and giving the result to each proxy needing it.
+
+The task is started once, if you want periodic actions, you can use the
+"core.sleep()" or "core.msleep()" for waiting the next runtime.
+
+Storing Lua variable between function in the same session
+---------------------------------------------------------
+
+All the functions registered as action or sample fetch can share an Lua context.
+This context is a memory zone in the stack. sample fetch and action use the
+same stack, so both can access to the context.
+
+The context is accessible via the function get_priv and set_priv provided by an
+object of class TXN. The value given to set_priv replaces the current stored
+value. This value can be a table, it is useful if a lot of data can be shared.
+
+If the value stored is a table, you can add or remove entries from the table
+without storing again the new table. Maybe an example will be clearer:
+
+ local t = {}
+ txn:set_priv(t)
+
+ t["entry1"] = "foo"
+ t["entry2"] = "bar"
+
+ -- this will display "foo"
+ print(txn:get_priv()["entry1"])
+
+HTTP actions
+============
+
+ ... coming soon ...
+
+Lua is fast, but my service require more execution speed
+========================================================
+
+We can write C modules for Lua. These modules must run with HAProxy while they
+are compliant with the HAProxy Lua version. A simple example is the "concat"
+module.
+
+It is very easy to write and compile a C Lua library, however, I don't see
+documentation about this process. So the current chapter is a quick howto.
+
+The entry point
+---------------
+
+The entry point is called "luaopen_<name>", where <name> is the name of the ".so"
+file. An hello world is like this:
+
+ #include <stdio.h>
+ #include <lua.h>
+ #include <lauxlib.h>
+
+ int luaopen_mymod(lua_State *L)
+ {
+ printf("Hello world\n");
+ return 0;
+ }
+
+The build
+---------
+
+The compilation of the source file requires the Lua "include" directory. The
+compilation and the link of the object file requires the -fPIC option. That's
+all.
+
+ cc -I/opt/lua/include -fPIC -shared -o mymod.so mymod.c
+
+Usage
+-----
+
+You can load this module with the following Lua syntax:
+
+ require("mymod")
+
+When you start HAProxy, this module just print "Hello world" when it is loaded.
+Please, remember that HAProxy doesn't allow blocking method, so if you write a
+function doing filesystem access or synchronous network access, all the HAProxy
+process will fail.
diff --git a/doc/management.txt b/doc/management.txt
new file mode 100644
index 0000000..b1789db
--- /dev/null
+++ b/doc/management.txt
@@ -0,0 +1,4521 @@
+ ------------------------
+ HAProxy Management Guide
+ ------------------------
+ version 2.9
+
+
+This document describes how to start, stop, manage, and troubleshoot HAProxy,
+as well as some known limitations and traps to avoid. It does not describe how
+to configure it (for this please read configuration.txt).
+
+Note to documentation contributors :
+ This document is formatted with 80 columns per line, with even number of
+ spaces for indentation and without tabs. Please follow these rules strictly
+ so that it remains easily printable everywhere. If you add sections, please
+ update the summary below for easier searching.
+
+
+Summary
+-------
+
+1. Prerequisites
+2. Quick reminder about HAProxy's architecture
+3. Starting HAProxy
+4. Stopping and restarting HAProxy
+5. File-descriptor limitations
+6. Memory management
+7. CPU usage
+8. Logging
+9. Statistics and monitoring
+9.1. CSV format
+9.2. Typed output format
+9.3. Unix Socket commands
+9.4. Master CLI
+9.4.1. Master CLI commands
+10. Tricks for easier configuration management
+11. Well-known traps to avoid
+12. Debugging and performance issues
+13. Security considerations
+
+
+1. Prerequisites
+----------------
+
+In this document it is assumed that the reader has sufficient administration
+skills on a UNIX-like operating system, uses the shell on a daily basis and is
+familiar with troubleshooting utilities such as strace and tcpdump.
+
+
+2. Quick reminder about HAProxy's architecture
+----------------------------------------------
+
+HAProxy is a multi-threaded, event-driven, non-blocking daemon. This means is
+uses event multiplexing to schedule all of its activities instead of relying on
+the system to schedule between multiple activities. Most of the time it runs as
+a single process, so the output of "ps aux" on a system will report only one
+"haproxy" process, unless a soft reload is in progress and an older process is
+finishing its job in parallel to the new one. It is thus always easy to trace
+its activity using the strace utility. In order to scale with the number of
+available processors, by default haproxy will start one worker thread per
+processor it is allowed to run on. Unless explicitly configured differently,
+the incoming traffic is spread over all these threads, all running the same
+event loop. A great care is taken to limit inter-thread dependencies to the
+strict minimum, so as to try to achieve near-linear scalability. This has some
+impacts such as the fact that a given connection is served by a single thread.
+Thus in order to use all available processing capacity, it is needed to have at
+least as many connections as there are threads, which is almost always granted.
+
+HAProxy is designed to isolate itself into a chroot jail during startup, where
+it cannot perform any file-system access at all. This is also true for the
+libraries it depends on (eg: libc, libssl, etc). The immediate effect is that
+a running process will not be able to reload a configuration file to apply
+changes, instead a new process will be started using the updated configuration
+file. Some other less obvious effects are that some timezone files or resolver
+files the libc might attempt to access at run time will not be found, though
+this should generally not happen as they're not needed after startup. A nice
+consequence of this principle is that the HAProxy process is totally stateless,
+and no cleanup is needed after it's killed, so any killing method that works
+will do the right thing.
+
+HAProxy doesn't write log files, but it relies on the standard syslog protocol
+to send logs to a remote server (which is often located on the same system).
+
+HAProxy uses its internal clock to enforce timeouts, that is derived from the
+system's time but where unexpected drift is corrected. This is done by limiting
+the time spent waiting in poll() for an event, and measuring the time it really
+took. In practice it never waits more than one second. This explains why, when
+running strace over a completely idle process, periodic calls to poll() (or any
+of its variants) surrounded by two gettimeofday() calls are noticed. They are
+normal, completely harmless and so cheap that the load they imply is totally
+undetectable at the system scale, so there's nothing abnormal there. Example :
+
+ 16:35:40.002320 gettimeofday({1442759740, 2605}, NULL) = 0
+ 16:35:40.002942 epoll_wait(0, {}, 200, 1000) = 0
+ 16:35:41.007542 gettimeofday({1442759741, 7641}, NULL) = 0
+ 16:35:41.007998 gettimeofday({1442759741, 8114}, NULL) = 0
+ 16:35:41.008391 epoll_wait(0, {}, 200, 1000) = 0
+ 16:35:42.011313 gettimeofday({1442759742, 11411}, NULL) = 0
+
+HAProxy is a TCP proxy, not a router. It deals with established connections that
+have been validated by the kernel, and not with packets of any form nor with
+sockets in other states (eg: no SYN_RECV nor TIME_WAIT), though their existence
+may prevent it from binding a port. It relies on the system to accept incoming
+connections and to initiate outgoing connections. An immediate effect of this is
+that there is no relation between packets observed on the two sides of a
+forwarded connection, which can be of different size, numbers and even family.
+Since a connection may only be accepted from a socket in LISTEN state, all the
+sockets it is listening to are necessarily visible using the "netstat" utility
+to show listening sockets. Example :
+
+ # netstat -ltnp
+ Active Internet connections (only servers)
+ Proto Recv-Q Send-Q Local Address Foreign Address State PID/Program name
+ tcp 0 0 0.0.0.0:22 0.0.0.0:* LISTEN 1629/sshd
+ tcp 0 0 0.0.0.0:80 0.0.0.0:* LISTEN 2847/haproxy
+ tcp 0 0 0.0.0.0:443 0.0.0.0:* LISTEN 2847/haproxy
+
+
+3. Starting HAProxy
+-------------------
+
+HAProxy is started by invoking the "haproxy" program with a number of arguments
+passed on the command line. The actual syntax is :
+
+ $ haproxy [<options>]*
+
+where [<options>]* is any number of options. An option always starts with '-'
+followed by one of more letters, and possibly followed by one or multiple extra
+arguments. Without any option, HAProxy displays the help page with a reminder
+about supported options. Available options may vary slightly based on the
+operating system. A fair number of these options overlap with an equivalent one
+if the "global" section. In this case, the command line always has precedence
+over the configuration file, so that the command line can be used to quickly
+enforce some settings without touching the configuration files. The current
+list of options is :
+
+ -- <cfgfile>* : all the arguments following "--" are paths to configuration
+ file/directory to be loaded and processed in the declaration order. It is
+ mostly useful when relying on the shell to load many files that are
+ numerically ordered. See also "-f". The difference between "--" and "-f" is
+ that one "-f" must be placed before each file name, while a single "--" is
+ needed before all file names. Both options can be used together, the
+ command line ordering still applies. When more than one file is specified,
+ each file must start on a section boundary, so the first keyword of each
+ file must be one of "global", "defaults", "peers", "listen", "frontend",
+ "backend", and so on. A file cannot contain just a server list for example.
+
+ -f <cfgfile|cfgdir> : adds <cfgfile> to the list of configuration files to be
+ loaded. If <cfgdir> is a directory, all the files (and only files) it
+ contains are added in lexical order (using LC_COLLATE=C) to the list of
+ configuration files to be loaded ; only files with ".cfg" extension are
+ added, only non hidden files (not prefixed with ".") are added.
+ Configuration files are loaded and processed in their declaration order.
+ This option may be specified multiple times to load multiple files. See
+ also "--". The difference between "--" and "-f" is that one "-f" must be
+ placed before each file name, while a single "--" is needed before all file
+ names. Both options can be used together, the command line ordering still
+ applies. When more than one file is specified, each file must start on a
+ section boundary, so the first keyword of each file must be one of
+ "global", "defaults", "peers", "listen", "frontend", "backend", and so on.
+ A file cannot contain just a server list for example.
+
+ -C <dir> : changes to directory <dir> before loading configuration
+ files. This is useful when using relative paths. Warning when using
+ wildcards after "--" which are in fact replaced by the shell before
+ starting haproxy.
+
+ -D : start as a daemon. The process detaches from the current terminal after
+ forking, and errors are not reported anymore in the terminal. It is
+ equivalent to the "daemon" keyword in the "global" section of the
+ configuration. It is recommended to always force it in any init script so
+ that a faulty configuration doesn't prevent the system from booting.
+
+ -L <name> : change the local peer name to <name>, which defaults to the local
+ hostname. This is used only with peers replication. You can use the
+ variable $HAPROXY_LOCALPEER in the configuration file to reference the
+ peer name.
+
+ -N <limit> : sets the default per-proxy maxconn to <limit> instead of the
+ builtin default value (usually 2000). Only useful for debugging.
+
+ -V : enable verbose mode (disables quiet mode). Reverts the effect of "-q" or
+ "quiet".
+
+ -W : master-worker mode. It is equivalent to the "master-worker" keyword in
+ the "global" section of the configuration. This mode will launch a "master"
+ which will monitor the "workers". Using this mode, you can reload HAProxy
+ directly by sending a SIGUSR2 signal to the master. The master-worker mode
+ is compatible either with the foreground or daemon mode. It is
+ recommended to use this mode with multiprocess and systemd.
+
+ -Ws : master-worker mode with support of `notify` type of systemd service.
+ This option is only available when HAProxy was built with `USE_SYSTEMD`
+ build option enabled.
+
+ -c : only performs a check of the configuration files and exits before trying
+ to bind. The exit status is zero if everything is OK, or non-zero if an
+ error is encountered. Presence of warnings will be reported if any.
+
+ -cc : evaluates a condition as used within a conditional block of the
+ configuration. The exit status is zero if the condition is true, 1 if the
+ condition is false or 2 if an error is encountered.
+
+ -d : enable debug mode. This disables daemon mode, forces the process to stay
+ in foreground and to show incoming and outgoing events. It must never be
+ used in an init script.
+
+ -dC[key] : dump the configuration file. It is performed after the lines are
+ tokenized, so comments are stripped and indenting is forced. If a non-zero
+ key is specified, lines are truncated before sensitive/confidential fields,
+ and identifiers and addresses are emitted hashed with this key using the
+ same algorithm as the one used by the anonymized mode on the CLI. This
+ means that the output may safely be shared with a developer who needs it
+ to figure what's happening in a dump that was anonymized using the same
+ key. Please also see the CLI's "set anon" command.
+
+ -dD : enable diagnostic mode. This mode will output extra warnings about
+ suspicious configuration statements. This will never prevent startup even in
+ "zero-warning" mode nor change the exit status code.
+
+ -dF : disable data fast-forward. It is a mechanism to optimize the data
+ forwarding by passing data directly from a side to the other one without
+ waking the stream up. Thanks to this directive, it is possible to disable
+ this optimization. Note it also disable any kernel tcp splicing. This
+ command is not meant for regular use, it will generally only be suggested by
+ developers along complex debugging sessions.
+
+ -dG : disable use of getaddrinfo() to resolve host names into addresses. It
+ can be used when suspecting that getaddrinfo() doesn't work as expected.
+ This option was made available because many bogus implementations of
+ getaddrinfo() exist on various systems and cause anomalies that are
+ difficult to troubleshoot.
+
+ -dK<class[,class]*> : dumps the list of registered keywords in each class.
+ The list of classes is available with "-dKhelp". All classes may be dumped
+ using "-dKall", otherwise a selection of those shown in the help can be
+ specified as a comma-delimited list. The output format will vary depending
+ on what class of keywords is being dumped (e.g. "cfg" will show the known
+ configuration keywords in a format resembling the config file format while
+ "smp" will show sample fetch functions prefixed with a compatibility matrix
+ with each rule set). These may rarely be used as-is by humans but can be of
+ great help for external tools that try to detect the appearance of new
+ keywords at certain places to automatically update some documentation,
+ syntax highlighting files, configuration parsers, API etc. The output
+ format may evolve a bit over time so it is really recommended to use this
+ output mostly to detect differences with previous archives. Note that not
+ all keywords are listed because many keywords have existed long before the
+ different keyword registration subsystems were created, and they do not
+ appear there. However since new keywords are only added via the modern
+ mechanisms, it's reasonably safe to assume that this output may be used to
+ detect language additions with a good accuracy. The keywords are only
+ dumped after the configuration is fully parsed, so that even dynamically
+ created keywords can be dumped. A good way to dump and exit is to run a
+ silent config check on an existing configuration:
+
+ ./haproxy -dKall -q -c -f foo.cfg
+
+ If no configuration file is available, using "-f /dev/null" will work as
+ well to dump all default keywords, but then the return status will not be
+ zero since there will be no listener, and will have to be ignored.
+
+ -dL : dumps the list of dynamic shared libraries that are loaded at the end
+ of the config processing. This will generally also include deep dependencies
+ such as anything loaded from Lua code for example, as well as the executable
+ itself. The list is printed in a format that ought to be easy enough to
+ sanitize to directly produce a tarball of all dependencies. Since it doesn't
+ stop the program's startup, it is recommended to only use it in combination
+ with "-c" and "-q" where only the list of loaded objects will be displayed
+ (or nothing in case of error). In addition, keep in mind that when providing
+ such a package to help with a core file analysis, most libraries are in fact
+ symbolic links that need to be dereferenced when creating the archive:
+
+ ./haproxy -W -q -c -dL -f foo.cfg | tar -T - -hzcf archive.tgz
+
+ When started in verbose mode (-V) the shared libraries' address ranges are
+ also enumerated, unless the quiet mode is in use (-q).
+
+ -dM[<byte>[,]][help|options,...] : forces memory poisoning, and/or changes
+ memory other debugging options. Memory poisonning means that each and every
+ memory region allocated with malloc() or pool_alloc() will be filled with
+ <byte> before being passed to the caller. When <byte> is not specified, it
+ defaults to 0x50 ('P'). While this slightly slows down operations, it is
+ useful to reliably trigger issues resulting from missing initializations in
+ the code that cause random crashes. Note that -dM0 has the effect of
+ turning any malloc() into a calloc(). In any case if a bug appears or
+ disappears when using this option it means there is a bug in haproxy, so
+ please report it. A number of other options are available either alone or
+ after a comma following the byte. The special option "help" will list the
+ currently supported options and their current value. Each debugging option
+ may be forced on or off. The most optimal options are usually chosen at
+ build time based on the operating system and do not need to be adjusted,
+ unless suggested by a developer. Supported debugging options include
+ (set/clear):
+ - fail / no-fail:
+ This enables randomly failing memory allocations, in conjunction with
+ the global "tune.fail-alloc" setting. This is used to detect missing
+ error checks in the code. Setting the option presets the ratio to 1%
+ failure rate.
+
+ - no-merge / merge:
+ By default, pools of very similar sizes are merged, resulting in more
+ efficiency, but this complicates the analysis of certain memory dumps.
+ This option allows to disable this mechanism, and may slightly increase
+ the memory usage.
+
+ - cold-first / hot-first:
+ In order to optimize the CPU cache hit ratio, by default the most
+ recently released objects ("hot") are recycled for new allocations.
+ But doing so also complicates analysis of memory dumps and may hide
+ use-after-free bugs. This option allows to instead pick the coldest
+ objects first, which may result in a slight increase of CPU usage.
+
+ - integrity / no-integrity:
+ When this option is enabled, memory integrity checks are enabled on
+ the allocated area to verify that it hasn't been modified since it was
+ last released. This works best with "no-merge", "cold-first" and "tag".
+ Enabling this option will slightly increase the CPU usage.
+
+ - no-global / global:
+ Depending on the operating system, a process-wide global memory cache
+ may be enabled if it is estimated that the standard allocator is too
+ slow or inefficient with threads. This option allows to forcefully
+ disable it or enable it. Disabling it may result in a CPU usage
+ increase with inefficient allocators. Enabling it may result in a
+ higher memory usage with efficient allocators.
+
+ - no-cache / cache:
+ Each thread uses a very fast local object cache for allocations, which
+ is always enabled by default. This option allows to disable it. Since
+ the global cache also passes via the local caches, this will
+ effectively result in disabling all caches and allocating directly from
+ the default allocator. This may result in a significant increase of CPU
+ usage, but may also result in small memory savings on tiny systems.
+
+ - caller / no-caller:
+ Enabling this option reserves some extra space in each allocated object
+ to store the address of the last caller that allocated or released it.
+ This helps developers go back in time when analysing memory dumps and
+ to guess how something unexpected happened.
+
+ - tag / no-tag:
+ Enabling this option reserves some extra space in each allocated object
+ to store a tag that allows to detect bugs such as double-free, freeing
+ an invalid object, and buffer overflows. It offers much stronger
+ reliability guarantees at the expense of 4 or 8 extra bytes per
+ allocation. It usually is the first step to detect memory corruption.
+
+ - poison / no-poison:
+ Enabling this option will fill allocated objects with a fixed pattern
+ that will make sure that some accidental values such as 0 will not be
+ present if a newly added field was mistakenly forgotten in an
+ initialization routine. Such bugs tend to rarely reproduce, especially
+ when pools are not merged. This is normally enabled by directly passing
+ the byte's value to -dM but using this option allows to disable/enable
+ use of a previously set value.
+
+ -dS : disable use of the splice() system call. It is equivalent to the
+ "global" section's "nosplice" keyword. This may be used when splice() is
+ suspected to behave improperly or to cause performance issues, or when
+ using strace to see the forwarded data (which do not appear when using
+ splice()).
+
+ -dV : disable SSL verify on the server side. It is equivalent to having
+ "ssl-server-verify none" in the "global" section. This is useful when
+ trying to reproduce production issues out of the production
+ environment. Never use this in an init script as it degrades SSL security
+ to the servers.
+
+ -dW : if set, haproxy will refuse to start if any warning was emitted while
+ processing the configuration. This helps detect subtle mistakes and keep the
+ configuration clean and portable across versions. It is recommended to set
+ this option in service scripts when configurations are managed by humans,
+ but it is recommended not to use it with generated configurations, which
+ tend to emit more warnings. It may be combined with "-c" to cause warnings
+ in checked configurations to fail. This is equivalent to global option
+ "zero-warning".
+
+ -db : disable background mode and multi-process mode. The process remains in
+ foreground. It is mainly used during development or during small tests, as
+ Ctrl-C is enough to stop the process. Never use it in an init script.
+
+ -de : disable the use of the "epoll" poller. It is equivalent to the "global"
+ section's keyword "noepoll". It is mostly useful when suspecting a bug
+ related to this poller. On systems supporting epoll, the fallback will
+ generally be the "poll" poller.
+
+ -dk : disable the use of the "kqueue" poller. It is equivalent to the
+ "global" section's keyword "nokqueue". It is mostly useful when suspecting
+ a bug related to this poller. On systems supporting kqueue, the fallback
+ will generally be the "poll" poller.
+
+ -dp : disable the use of the "poll" poller. It is equivalent to the "global"
+ section's keyword "nopoll". It is mostly useful when suspecting a bug
+ related to this poller. On systems supporting poll, the fallback will
+ generally be the "select" poller, which cannot be disabled and is limited
+ to 1024 file descriptors.
+
+ -dr : ignore server address resolution failures. It is very common when
+ validating a configuration out of production not to have access to the same
+ resolvers and to fail on server address resolution, making it difficult to
+ test a configuration. This option simply appends the "none" method to the
+ list of address resolution methods for all servers, ensuring that even if
+ the libc fails to resolve an address, the startup sequence is not
+ interrupted.
+
+ -dt [<trace_desc>,...] : activates traces on stderr. Without argument, this
+ enables all trace sources on error level. This can notably be useful to
+ detect protocol violations from clients or servers. An optional argument
+ can be used to specify a list of various trace configurations using ',' as
+ separator. Each element activates one or all trace sources. Additionally,
+ level and verbosity can be optionally specified on each element using ':' as
+ inner separator with trace name.
+
+ -m <limit> : limit the total allocatable memory to <limit> megabytes across
+ all processes. This may cause some connection refusals or some slowdowns
+ depending on the amount of memory needed for normal operations. This is
+ mostly used to force the processes to work in a constrained resource usage
+ scenario. It is important to note that the memory is not shared between
+ processes, so in a multi-process scenario, this value is first divided by
+ global.nbproc before forking.
+
+ -n <limit> : limits the per-process connection limit to <limit>. This is
+ equivalent to the global section's keyword "maxconn". It has precedence
+ over this keyword. This may be used to quickly force lower limits to avoid
+ a service outage on systems where resource limits are too low.
+
+ -p <file> : write all processes' pids into <file> during startup. This is
+ equivalent to the "global" section's keyword "pidfile". The file is opened
+ before entering the chroot jail, and after doing the chdir() implied by
+ "-C". Each pid appears on its own line.
+
+ -q : set "quiet" mode. This disables the output messages. It can be used in
+ combination with "-c" to just check if a configuration file is valid or not.
+
+ -S <bind>[,bind_options...]: in master-worker mode, bind a master CLI, which
+ allows the access to every processes, running or leaving ones.
+ For security reasons, it is recommended to bind the master CLI to a local
+ UNIX socket. The bind options are the same as the keyword "bind" in
+ the configuration file with words separated by commas instead of spaces.
+
+ Note that this socket can't be used to retrieve the listening sockets from
+ an old process during a seamless reload.
+
+ -sf <pid>* : send the "finish" signal (SIGUSR1) to older processes after boot
+ completion to ask them to finish what they are doing and to leave. <pid>
+ is a list of pids to signal (one per argument). The list ends on any
+ option starting with a "-". It is not a problem if the list of pids is
+ empty, so that it can be built on the fly based on the result of a command
+ like "pidof" or "pgrep".
+
+ -st <pid>* : send the "terminate" signal (SIGTERM) to older processes after
+ boot completion to terminate them immediately without finishing what they
+ were doing. <pid> is a list of pids to signal (one per argument). The list
+ is ends on any option starting with a "-". It is not a problem if the list
+ of pids is empty, so that it can be built on the fly based on the result of
+ a command like "pidof" or "pgrep".
+
+ -v : report the version and build date.
+
+ -vv : display the version, build options, libraries versions and usable
+ pollers. This output is systematically requested when filing a bug report.
+
+ -x <unix_socket> : connect to the specified socket and try to retrieve any
+ listening sockets from the old process, and use them instead of trying to
+ bind new ones. This is useful to avoid missing any new connection when
+ reloading the configuration on Linux. The capability must be enable on the
+ stats socket using "expose-fd listeners" in your configuration.
+ In master-worker mode, the master will use this option upon a reload with
+ the "sockpair@" syntax, which allows the master to connect directly to a
+ worker without using stats socket declared in the configuration.
+
+A safe way to start HAProxy from an init file consists in forcing the daemon
+mode, storing existing pids to a pid file and using this pid file to notify
+older processes to finish before leaving :
+
+ haproxy -f /etc/haproxy.cfg \
+ -D -p /var/run/haproxy.pid -sf $(cat /var/run/haproxy.pid)
+
+When the configuration is split into a few specific files (eg: tcp vs http),
+it is recommended to use the "-f" option :
+
+ haproxy -f /etc/haproxy/global.cfg -f /etc/haproxy/stats.cfg \
+ -f /etc/haproxy/default-tcp.cfg -f /etc/haproxy/tcp.cfg \
+ -f /etc/haproxy/default-http.cfg -f /etc/haproxy/http.cfg \
+ -D -p /var/run/haproxy.pid -sf $(cat /var/run/haproxy.pid)
+
+When an unknown number of files is expected, such as customer-specific files,
+it is recommended to assign them a name starting with a fixed-size sequence
+number and to use "--" to load them, possibly after loading some defaults :
+
+ haproxy -f /etc/haproxy/global.cfg -f /etc/haproxy/stats.cfg \
+ -f /etc/haproxy/default-tcp.cfg -f /etc/haproxy/tcp.cfg \
+ -f /etc/haproxy/default-http.cfg -f /etc/haproxy/http.cfg \
+ -D -p /var/run/haproxy.pid -sf $(cat /var/run/haproxy.pid) \
+ -f /etc/haproxy/default-customers.cfg -- /etc/haproxy/customers/*
+
+Sometimes a failure to start may happen for whatever reason. Then it is
+important to verify if the version of HAProxy you are invoking is the expected
+version and if it supports the features you are expecting (eg: SSL, PCRE,
+compression, Lua, etc). This can be verified using "haproxy -vv". Some
+important information such as certain build options, the target system and
+the versions of the libraries being used are reported there. It is also what
+you will systematically be asked for when posting a bug report :
+
+ $ haproxy -vv
+ HAProxy version 1.6-dev7-a088d3-4 2015/10/08
+ Copyright 2000-2015 Willy Tarreau <willy@haproxy.org>
+
+ Build options :
+ TARGET = linux2628
+ CPU = generic
+ CC = gcc
+ CFLAGS = -pg -O0 -g -fno-strict-aliasing -Wdeclaration-after-statement \
+ -DBUFSIZE=8030 -DMAXREWRITE=1030 -DSO_MARK=36 -DTCP_REPAIR=19
+ OPTIONS = USE_ZLIB=1 USE_DLMALLOC=1 USE_OPENSSL=1 USE_LUA=1 USE_PCRE=1
+
+ Default settings :
+ maxconn = 2000, bufsize = 8030, maxrewrite = 1030, maxpollevents = 200
+
+ Encrypted password support via crypt(3): yes
+ Built with zlib version : 1.2.6
+ Compression algorithms supported : identity("identity"), deflate("deflate"), \
+ raw-deflate("deflate"), gzip("gzip")
+ Built with OpenSSL version : OpenSSL 1.0.1o 12 Jun 2015
+ Running on OpenSSL version : OpenSSL 1.0.1o 12 Jun 2015
+ OpenSSL library supports TLS extensions : yes
+ OpenSSL library supports SNI : yes
+ OpenSSL library supports prefer-server-ciphers : yes
+ Built with PCRE version : 8.12 2011-01-15
+ PCRE library supports JIT : no (USE_PCRE_JIT not set)
+ Built with Lua version : Lua 5.3.1
+ Built with transparent proxy support using: IP_TRANSPARENT IP_FREEBIND
+
+ Available polling systems :
+ epoll : pref=300, test result OK
+ poll : pref=200, test result OK
+ select : pref=150, test result OK
+ Total: 3 (3 usable), will use epoll.
+
+The relevant information that many non-developer users can verify here are :
+ - the version : 1.6-dev7-a088d3-4 above means the code is currently at commit
+ ID "a088d3" which is the 4th one after after official version "1.6-dev7".
+ Version 1.6-dev7 would show as "1.6-dev7-8c1ad7". What matters here is in
+ fact "1.6-dev7". This is the 7th development version of what will become
+ version 1.6 in the future. A development version not suitable for use in
+ production (unless you know exactly what you are doing). A stable version
+ will show as a 3-numbers version, such as "1.5.14-16f863", indicating the
+ 14th level of fix on top of version 1.5. This is a production-ready version.
+
+ - the release date : 2015/10/08. It is represented in the universal
+ year/month/day format. Here this means August 8th, 2015. Given that stable
+ releases are issued every few months (1-2 months at the beginning, sometimes
+ 6 months once the product becomes very stable), if you're seeing an old date
+ here, it means you're probably affected by a number of bugs or security
+ issues that have since been fixed and that it might be worth checking on the
+ official site.
+
+ - build options : they are relevant to people who build their packages
+ themselves, they can explain why things are not behaving as expected. For
+ example the development version above was built for Linux 2.6.28 or later,
+ targeting a generic CPU (no CPU-specific optimizations), and lacks any
+ code optimization (-O0) so it will perform poorly in terms of performance.
+
+ - libraries versions : zlib version is reported as found in the library
+ itself. In general zlib is considered a very stable product and upgrades
+ are almost never needed. OpenSSL reports two versions, the version used at
+ build time and the one being used, as found on the system. These ones may
+ differ by the last letter but never by the numbers. The build date is also
+ reported because most OpenSSL bugs are security issues and need to be taken
+ seriously, so this library absolutely needs to be kept up to date. Seeing a
+ 4-months old version here is highly suspicious and indeed an update was
+ missed. PCRE provides very fast regular expressions and is highly
+ recommended. Certain of its extensions such as JIT are not present in all
+ versions and still young so some people prefer not to build with them,
+ which is why the build status is reported as well. Regarding the Lua
+ scripting language, HAProxy expects version 5.3 which is very young since
+ it was released a little time before HAProxy 1.6. It is important to check
+ on the Lua web site if some fixes are proposed for this branch.
+
+ - Available polling systems will affect the process's scalability when
+ dealing with more than about one thousand of concurrent connections. These
+ ones are only available when the correct system was indicated in the TARGET
+ variable during the build. The "epoll" mechanism is highly recommended on
+ Linux, and the kqueue mechanism is highly recommended on BSD. Lacking them
+ will result in poll() or even select() being used, causing a high CPU usage
+ when dealing with a lot of connections.
+
+
+4. Stopping and restarting HAProxy
+----------------------------------
+
+HAProxy supports a graceful and a hard stop. The hard stop is simple, when the
+SIGTERM signal is sent to the haproxy process, it immediately quits and all
+established connections are closed. The graceful stop is triggered when the
+SIGUSR1 signal is sent to the haproxy process. It consists in only unbinding
+from listening ports, but continue to process existing connections until they
+close. Once the last connection is closed, the process leaves.
+
+The hard stop method is used for the "stop" or "restart" actions of the service
+management script. The graceful stop is used for the "reload" action which
+tries to seamlessly reload a new configuration in a new process.
+
+Both of these signals may be sent by the new haproxy process itself during a
+reload or restart, so that they are sent at the latest possible moment and only
+if absolutely required. This is what is performed by the "-st" (hard) and "-sf"
+(graceful) options respectively.
+
+In master-worker mode, it is not needed to start a new haproxy process in
+order to reload the configuration. The master process reacts to the SIGUSR2
+signal by reexecuting itself with the -sf parameter followed by the PIDs of
+the workers. The master will then parse the configuration file and fork new
+workers.
+
+To understand better how these signals are used, it is important to understand
+the whole restart mechanism.
+
+First, an existing haproxy process is running. The administrator uses a system
+specific command such as "/etc/init.d/haproxy reload" to indicate they want to
+take the new configuration file into effect. What happens then is the following.
+First, the service script (/etc/init.d/haproxy or equivalent) will verify that
+the configuration file parses correctly using "haproxy -c". After that it will
+try to start haproxy with this configuration file, using "-st" or "-sf".
+
+Then HAProxy tries to bind to all listening ports. If some fatal errors happen
+(eg: address not present on the system, permission denied), the process quits
+with an error. If a socket binding fails because a port is already in use, then
+the process will first send a SIGTTOU signal to all the pids specified in the
+"-st" or "-sf" pid list. This is what is called the "pause" signal. It instructs
+all existing haproxy processes to temporarily stop listening to their ports so
+that the new process can try to bind again. During this time, the old process
+continues to process existing connections. If the binding still fails (because
+for example a port is shared with another daemon), then the new process sends a
+SIGTTIN signal to the old processes to instruct them to resume operations just
+as if nothing happened. The old processes will then restart listening to the
+ports and continue to accept connections. Note that this mechanism is system
+dependent and some operating systems may not support it in multi-process mode.
+
+If the new process manages to bind correctly to all ports, then it sends either
+the SIGTERM (hard stop in case of "-st") or the SIGUSR1 (graceful stop in case
+of "-sf") to all processes to notify them that it is now in charge of operations
+and that the old processes will have to leave, either immediately or once they
+have finished their job.
+
+It is important to note that during this timeframe, there are two small windows
+of a few milliseconds each where it is possible that a few connection failures
+will be noticed during high loads. Typically observed failure rates are around
+1 failure during a reload operation every 10000 new connections per second,
+which means that a heavily loaded site running at 30000 new connections per
+second may see about 3 failed connection upon every reload. The two situations
+where this happens are :
+
+ - if the new process fails to bind due to the presence of the old process,
+ it will first have to go through the SIGTTOU+SIGTTIN sequence, which
+ typically lasts about one millisecond for a few tens of frontends, and
+ during which some ports will not be bound to the old process and not yet
+ bound to the new one. HAProxy works around this on systems that support the
+ SO_REUSEPORT socket options, as it allows the new process to bind without
+ first asking the old one to unbind. Most BSD systems have been supporting
+ this almost forever. Linux has been supporting this in version 2.0 and
+ dropped it around 2.2, but some patches were floating around by then. It
+ was reintroduced in kernel 3.9, so if you are observing a connection
+ failure rate above the one mentioned above, please ensure that your kernel
+ is 3.9 or newer, or that relevant patches were backported to your kernel
+ (less likely).
+
+ - when the old processes close the listening ports, the kernel may not always
+ redistribute any pending connection that was remaining in the socket's
+ backlog. Under high loads, a SYN packet may happen just before the socket
+ is closed, and will lead to an RST packet being sent to the client. In some
+ critical environments where even one drop is not acceptable, these ones are
+ sometimes dealt with using firewall rules to block SYN packets during the
+ reload, forcing the client to retransmit. This is totally system-dependent,
+ as some systems might be able to visit other listening queues and avoid
+ this RST. A second case concerns the ACK from the client on a local socket
+ that was in SYN_RECV state just before the close. This ACK will lead to an
+ RST packet while the haproxy process is still not aware of it. This one is
+ harder to get rid of, though the firewall filtering rules mentioned above
+ will work well if applied one second or so before restarting the process.
+
+For the vast majority of users, such drops will never ever happen since they
+don't have enough load to trigger the race conditions. And for most high traffic
+users, the failure rate is still fairly within the noise margin provided that at
+least SO_REUSEPORT is properly supported on their systems.
+
+5. File-descriptor limitations
+------------------------------
+
+In order to ensure that all incoming connections will successfully be served,
+HAProxy computes at load time the total number of file descriptors that will be
+needed during the process's life. A regular Unix process is generally granted
+1024 file descriptors by default, and a privileged process can raise this limit
+itself. This is one reason for starting HAProxy as root and letting it adjust
+the limit. The default limit of 1024 file descriptors roughly allow about 500
+concurrent connections to be processed. The computation is based on the global
+maxconn parameter which limits the total number of connections per process, the
+number of listeners, the number of servers which have a health check enabled,
+the agent checks, the peers, the loggers and possibly a few other technical
+requirements. A simple rough estimate of this number consists in simply
+doubling the maxconn value and adding a few tens to get the approximate number
+of file descriptors needed.
+
+Originally HAProxy did not know how to compute this value, and it was necessary
+to pass the value using the "ulimit-n" setting in the global section. This
+explains why even today a lot of configurations are seen with this setting
+present. Unfortunately it was often miscalculated resulting in connection
+failures when approaching maxconn instead of throttling incoming connection
+while waiting for the needed resources. For this reason it is important to
+remove any vestigial "ulimit-n" setting that can remain from very old versions.
+
+Raising the number of file descriptors to accept even moderate loads is
+mandatory but comes with some OS-specific adjustments. First, the select()
+polling system is limited to 1024 file descriptors. In fact on Linux it used
+to be capable of handling more but since certain OS ship with excessively
+restrictive SELinux policies forbidding the use of select() with more than
+1024 file descriptors, HAProxy now refuses to start in this case in order to
+avoid any issue at run time. On all supported operating systems, poll() is
+available and will not suffer from this limitation. It is automatically picked
+so there is nothing to do to get a working configuration. But poll's becomes
+very slow when the number of file descriptors increases. While HAProxy does its
+best to limit this performance impact (eg: via the use of the internal file
+descriptor cache and batched processing), a good rule of thumb is that using
+poll() with more than a thousand concurrent connections will use a lot of CPU.
+
+For Linux systems base on kernels 2.6 and above, the epoll() system call will
+be used. It's a much more scalable mechanism relying on callbacks in the kernel
+that guarantee a constant wake up time regardless of the number of registered
+monitored file descriptors. It is automatically used where detected, provided
+that HAProxy had been built for one of the Linux flavors. Its presence and
+support can be verified using "haproxy -vv".
+
+For BSD systems which support it, kqueue() is available as an alternative. It
+is much faster than poll() and even slightly faster than epoll() thanks to its
+batched handling of changes. At least FreeBSD and OpenBSD support it. Just like
+with Linux's epoll(), its support and availability are reported in the output
+of "haproxy -vv".
+
+Having a good poller is one thing, but it is mandatory that the process can
+reach the limits. When HAProxy starts, it immediately sets the new process's
+file descriptor limits and verifies if it succeeds. In case of failure, it
+reports it before forking so that the administrator can see the problem. As
+long as the process is started by as root, there should be no reason for this
+setting to fail. However, it can fail if the process is started by an
+unprivileged user. If there is a compelling reason for *not* starting haproxy
+as root (eg: started by end users, or by a per-application account), then the
+file descriptor limit can be raised by the system administrator for this
+specific user. The effectiveness of the setting can be verified by issuing
+"ulimit -n" from the user's command line. It should reflect the new limit.
+
+Warning: when an unprivileged user's limits are changed in this user's account,
+it is fairly common that these values are only considered when the user logs in
+and not at all in some scripts run at system boot time nor in crontabs. This is
+totally dependent on the operating system, keep in mind to check "ulimit -n"
+before starting haproxy when running this way. The general advice is never to
+start haproxy as an unprivileged user for production purposes. Another good
+reason is that it prevents haproxy from enabling some security protections.
+
+Once it is certain that the system will allow the haproxy process to use the
+requested number of file descriptors, two new system-specific limits may be
+encountered. The first one is the system-wide file descriptor limit, which is
+the total number of file descriptors opened on the system, covering all
+processes. When this limit is reached, accept() or socket() will typically
+return ENFILE. The second one is the per-process hard limit on the number of
+file descriptors, it prevents setrlimit() from being set higher. Both are very
+dependent on the operating system. On Linux, the system limit is set at boot
+based on the amount of memory. It can be changed with the "fs.file-max" sysctl.
+And the per-process hard limit is set to 1048576 by default, but it can be
+changed using the "fs.nr_open" sysctl.
+
+File descriptor limitations may be observed on a running process when they are
+set too low. The strace utility will report that accept() and socket() return
+"-1 EMFILE" when the process's limits have been reached. In this case, simply
+raising the "ulimit-n" value (or removing it) will solve the problem. If these
+system calls return "-1 ENFILE" then it means that the kernel's limits have
+been reached and that something must be done on a system-wide parameter. These
+trouble must absolutely be addressed, as they result in high CPU usage (when
+accept() fails) and failed connections that are generally visible to the user.
+One solution also consists in lowering the global maxconn value to enforce
+serialization, and possibly to disable HTTP keep-alive to force connections
+to be released and reused faster.
+
+
+6. Memory management
+--------------------
+
+HAProxy uses a simple and fast pool-based memory management. Since it relies on
+a small number of different object types, it's much more efficient to pick new
+objects from a pool which already contains objects of the appropriate size than
+to call malloc() for each different size. The pools are organized as a stack or
+LIFO, so that newly allocated objects are taken from recently released objects
+still hot in the CPU caches. Pools of similar sizes are merged together, in
+order to limit memory fragmentation.
+
+By default, since the focus is set on performance, each released object is put
+back into the pool it came from, and allocated objects are never freed since
+they are expected to be reused very soon.
+
+On the CLI, it is possible to check how memory is being used in pools thanks to
+the "show pools" command :
+
+ > show pools
+ Dumping pools usage. Use SIGQUIT to flush them.
+ - Pool cache_st (16 bytes) : 0 allocated (0 bytes), 0 used, 0 failures, 1 users, @0x9ccc40=03 [SHARED]
+ - Pool pipe (32 bytes) : 5 allocated (160 bytes), 5 used, 0 failures, 2 users, @0x9ccac0=00 [SHARED]
+ - Pool comp_state (48 bytes) : 3 allocated (144 bytes), 3 used, 0 failures, 5 users, @0x9cccc0=04 [SHARED]
+ - Pool filter (64 bytes) : 0 allocated (0 bytes), 0 used, 0 failures, 3 users, @0x9ccbc0=02 [SHARED]
+ - Pool vars (80 bytes) : 0 allocated (0 bytes), 0 used, 0 failures, 2 users, @0x9ccb40=01 [SHARED]
+ - Pool uniqueid (128 bytes) : 0 allocated (0 bytes), 0 used, 0 failures, 2 users, @0x9cd240=15 [SHARED]
+ - Pool task (144 bytes) : 55 allocated (7920 bytes), 55 used, 0 failures, 1 users, @0x9cd040=11 [SHARED]
+ - Pool session (160 bytes) : 1 allocated (160 bytes), 1 used, 0 failures, 1 users, @0x9cd140=13 [SHARED]
+ - Pool h2s (208 bytes) : 0 allocated (0 bytes), 0 used, 0 failures, 2 users, @0x9ccec0=08 [SHARED]
+ - Pool h2c (288 bytes) : 0 allocated (0 bytes), 0 used, 0 failures, 1 users, @0x9cce40=07 [SHARED]
+ - Pool spoe_ctx (304 bytes) : 0 allocated (0 bytes), 0 used, 0 failures, 2 users, @0x9ccf40=09 [SHARED]
+ - Pool connection (400 bytes) : 2 allocated (800 bytes), 2 used, 0 failures, 1 users, @0x9cd1c0=14 [SHARED]
+ - Pool hdr_idx (416 bytes) : 0 allocated (0 bytes), 0 used, 0 failures, 1 users, @0x9cd340=17 [SHARED]
+ - Pool dns_resolut (480 bytes) : 0 allocated (0 bytes), 0 used, 0 failures, 1 users, @0x9ccdc0=06 [SHARED]
+ - Pool dns_answer_ (576 bytes) : 0 allocated (0 bytes), 0 used, 0 failures, 1 users, @0x9ccd40=05 [SHARED]
+ - Pool stream (960 bytes) : 1 allocated (960 bytes), 1 used, 0 failures, 1 users, @0x9cd0c0=12 [SHARED]
+ - Pool requri (1024 bytes) : 0 allocated (0 bytes), 0 used, 0 failures, 1 users, @0x9cd2c0=16 [SHARED]
+ - Pool buffer (8030 bytes) : 3 allocated (24090 bytes), 2 used, 0 failures, 1 users, @0x9cd3c0=18 [SHARED]
+ - Pool trash (8062 bytes) : 1 allocated (8062 bytes), 1 used, 0 failures, 1 users, @0x9cd440=19
+ Total: 19 pools, 42296 bytes allocated, 34266 used.
+
+The pool name is only indicative, it's the name of the first object type using
+this pool. The size in parenthesis is the object size for objects in this pool.
+Object sizes are always rounded up to the closest multiple of 16 bytes. The
+number of objects currently allocated and the equivalent number of bytes is
+reported so that it is easy to know which pool is responsible for the highest
+memory usage. The number of objects currently in use is reported as well in the
+"used" field. The difference between "allocated" and "used" corresponds to the
+objects that have been freed and are available for immediate use. The address
+at the end of the line is the pool's address, and the following number is the
+pool index when it exists, or is reported as -1 if no index was assigned.
+
+It is possible to limit the amount of memory allocated per process using the
+"-m" command line option, followed by a number of megabytes. It covers all of
+the process's addressable space, so that includes memory used by some libraries
+as well as the stack, but it is a reliable limit when building a resource
+constrained system. It works the same way as "ulimit -v" on systems which have
+it, or "ulimit -d" for the other ones.
+
+If a memory allocation fails due to the memory limit being reached or because
+the system doesn't have any enough memory, then haproxy will first start to
+free all available objects from all pools before attempting to allocate memory
+again. This mechanism of releasing unused memory can be triggered by sending
+the signal SIGQUIT to the haproxy process. When doing so, the pools state prior
+to the flush will also be reported to stderr when the process runs in
+foreground.
+
+During a reload operation, the process switched to the graceful stop state also
+automatically performs some flushes after releasing any connection so that all
+possible memory is released to save it for the new process.
+
+
+7. CPU usage
+------------
+
+HAProxy normally spends most of its time in the system and a smaller part in
+userland. A finely tuned 3.5 GHz CPU can sustain a rate about 80000 end-to-end
+connection setups and closes per second at 100% CPU on a single core. When one
+core is saturated, typical figures are :
+ - 95% system, 5% user for long TCP connections or large HTTP objects
+ - 85% system and 15% user for short TCP connections or small HTTP objects in
+ close mode
+ - 70% system and 30% user for small HTTP objects in keep-alive mode
+
+The amount of rules processing and regular expressions will increase the user
+land part. The presence of firewall rules, connection tracking, complex routing
+tables in the system will instead increase the system part.
+
+On most systems, the CPU time observed during network transfers can be cut in 4
+parts :
+ - the interrupt part, which concerns all the processing performed upon I/O
+ receipt, before the target process is even known. Typically Rx packets are
+ accounted for in interrupt. On some systems such as Linux where interrupt
+ processing may be deferred to a dedicated thread, it can appear as softirq,
+ and the thread is called ksoftirqd/0 (for CPU 0). The CPU taking care of
+ this load is generally defined by the hardware settings, though in the case
+ of softirq it is often possible to remap the processing to another CPU.
+ This interrupt part will often be perceived as parasitic since it's not
+ associated with any process, but it actually is some processing being done
+ to prepare the work for the process.
+
+ - the system part, which concerns all the processing done using kernel code
+ called from userland. System calls are accounted as system for example. All
+ synchronously delivered Tx packets will be accounted for as system time. If
+ some packets have to be deferred due to queues filling up, they may then be
+ processed in interrupt context later (eg: upon receipt of an ACK opening a
+ TCP window).
+
+ - the user part, which exclusively runs application code in userland. HAProxy
+ runs exclusively in this part, though it makes heavy use of system calls.
+ Rules processing, regular expressions, compression, encryption all add to
+ the user portion of CPU consumption.
+
+ - the idle part, which is what the CPU does when there is nothing to do. For
+ example HAProxy waits for an incoming connection, or waits for some data to
+ leave, meaning the system is waiting for an ACK from the client to push
+ these data.
+
+In practice regarding HAProxy's activity, it is in general reasonably accurate
+(but totally inexact) to consider that interrupt/softirq are caused by Rx
+processing in kernel drivers, that user-land is caused by layer 7 processing
+in HAProxy, and that system time is caused by network processing on the Tx
+path.
+
+Since HAProxy runs around an event loop, it waits for new events using poll()
+(or any alternative) and processes all these events as fast as possible before
+going back to poll() waiting for new events. It measures the time spent waiting
+in poll() compared to the time spent doing processing events. The ratio of
+polling time vs total time is called the "idle" time, it's the amount of time
+spent waiting for something to happen. This ratio is reported in the stats page
+on the "idle" line, or "Idle_pct" on the CLI. When it's close to 100%, it means
+the load is extremely low. When it's close to 0%, it means that there is
+constantly some activity. While it cannot be very accurate on an overloaded
+system due to other processes possibly preempting the CPU from the haproxy
+process, it still provides a good estimate about how HAProxy considers it is
+working : if the load is low and the idle ratio is low as well, it may indicate
+that HAProxy has a lot of work to do, possibly due to very expensive rules that
+have to be processed. Conversely, if HAProxy indicates the idle is close to
+100% while things are slow, it means that it cannot do anything to speed things
+up because it is already waiting for incoming data to process. In the example
+below, haproxy is completely idle :
+
+ $ echo "show info" | socat - /var/run/haproxy.sock | grep ^Idle
+ Idle_pct: 100
+
+When the idle ratio starts to become very low, it is important to tune the
+system and place processes and interrupts correctly to save the most possible
+CPU resources for all tasks. If a firewall is present, it may be worth trying
+to disable it or to tune it to ensure it is not responsible for a large part
+of the performance limitation. It's worth noting that unloading a stateful
+firewall generally reduces both the amount of interrupt/softirq and of system
+usage since such firewalls act both on the Rx and the Tx paths. On Linux,
+unloading the nf_conntrack and ip_conntrack modules will show whether there is
+anything to gain. If so, then the module runs with default settings and you'll
+have to figure how to tune it for better performance. In general this consists
+in considerably increasing the hash table size. On FreeBSD, "pfctl -d" will
+disable the "pf" firewall and its stateful engine at the same time.
+
+If it is observed that a lot of time is spent in interrupt/softirq, it is
+important to ensure that they don't run on the same CPU. Most systems tend to
+pin the tasks on the CPU where they receive the network traffic because for
+certain workloads it improves things. But with heavily network-bound workloads
+it is the opposite as the haproxy process will have to fight against its kernel
+counterpart. Pinning haproxy to one CPU core and the interrupts to another one,
+all sharing the same L3 cache tends to sensibly increase network performance
+because in practice the amount of work for haproxy and the network stack are
+quite close, so they can almost fill an entire CPU each. On Linux this is done
+using taskset (for haproxy) or using cpu-map (from the haproxy config), and the
+interrupts are assigned under /proc/irq. Many network interfaces support
+multiple queues and multiple interrupts. In general it helps to spread them
+across a small number of CPU cores provided they all share the same L3 cache.
+Please always stop irq_balance which always does the worst possible thing on
+such workloads.
+
+For CPU-bound workloads consisting in a lot of SSL traffic or a lot of
+compression, it may be worth using multiple processes dedicated to certain
+tasks, though there is no universal rule here and experimentation will have to
+be performed.
+
+In order to increase the CPU capacity, it is possible to make HAProxy run as
+several processes, using the "nbproc" directive in the global section. There
+are some limitations though :
+ - health checks are run per process, so the target servers will get as many
+ checks as there are running processes ;
+ - maxconn values and queues are per-process so the correct value must be set
+ to avoid overloading the servers ;
+ - outgoing connections should avoid using port ranges to avoid conflicts
+ - stick-tables are per process and are not shared between processes ;
+ - each peers section may only run on a single process at a time ;
+ - the CLI operations will only act on a single process at a time.
+
+With this in mind, it appears that the easiest setup often consists in having
+one first layer running on multiple processes and in charge for the heavy
+processing, passing the traffic to a second layer running in a single process.
+This mechanism is suited to SSL and compression which are the two CPU-heavy
+features. Instances can easily be chained over UNIX sockets (which are cheaper
+than TCP sockets and which do not waste ports), and the proxy protocol which is
+useful to pass client information to the next stage. When doing so, it is
+generally a good idea to bind all the single-process tasks to process number 1
+and extra tasks to next processes, as this will make it easier to generate
+similar configurations for different machines.
+
+On Linux versions 3.9 and above, running HAProxy in multi-process mode is much
+more efficient when each process uses a distinct listening socket on the same
+IP:port ; this will make the kernel evenly distribute the load across all
+processes instead of waking them all up. Please check the "process" option of
+the "bind" keyword lines in the configuration manual for more information.
+
+
+8. Logging
+----------
+
+For logging, HAProxy always relies on a syslog server since it does not perform
+any file-system access. The standard way of using it is to send logs over UDP
+to the log server (by default on port 514). Very commonly this is configured to
+127.0.0.1 where the local syslog daemon is running, but it's also used over the
+network to log to a central server. The central server provides additional
+benefits especially in active-active scenarios where it is desirable to keep
+the logs merged in arrival order. HAProxy may also make use of a UNIX socket to
+send its logs to the local syslog daemon, but it is not recommended at all,
+because if the syslog server is restarted while haproxy runs, the socket will
+be replaced and new logs will be lost. Since HAProxy will be isolated inside a
+chroot jail, it will not have the ability to reconnect to the new socket. It
+has also been observed in field that the log buffers in use on UNIX sockets are
+very small and lead to lost messages even at very light loads. But this can be
+fine for testing however.
+
+It is recommended to add the following directive to the "global" section to
+make HAProxy log to the local daemon using facility "local0" :
+
+ log 127.0.0.1:514 local0
+
+and then to add the following one to each "defaults" section or to each frontend
+and backend section :
+
+ log global
+
+This way, all logs will be centralized through the global definition of where
+the log server is.
+
+Some syslog daemons do not listen to UDP traffic by default, so depending on
+the daemon being used, the syntax to enable this will vary :
+
+ - on sysklogd, you need to pass argument "-r" on the daemon's command line
+ so that it listens to a UDP socket for "remote" logs ; note that there is
+ no way to limit it to address 127.0.0.1 so it will also receive logs from
+ remote systems ;
+
+ - on rsyslogd, the following lines must be added to the configuration file :
+
+ $ModLoad imudp
+ $UDPServerAddress *
+ $UDPServerRun 514
+
+ - on syslog-ng, a new source can be created the following way, it then needs
+ to be added as a valid source in one of the "log" directives :
+
+ source s_udp {
+ udp(ip(127.0.0.1) port(514));
+ };
+
+Please consult your syslog daemon's manual for more information. If no logs are
+seen in the system's log files, please consider the following tests :
+
+ - restart haproxy. Each frontend and backend logs one line indicating it's
+ starting. If these logs are received, it means logs are working.
+
+ - run "strace -tt -s100 -etrace=sendmsg -p <haproxy's pid>" and perform some
+ activity that you expect to be logged. You should see the log messages
+ being sent using sendmsg() there. If they don't appear, restart using
+ strace on top of haproxy. If you still see no logs, it definitely means
+ that something is wrong in your configuration.
+
+ - run tcpdump to watch for port 514, for example on the loopback interface if
+ the traffic is being sent locally : "tcpdump -As0 -ni lo port 514". If the
+ packets are seen there, it's the proof they're sent then the syslogd daemon
+ needs to be troubleshooted.
+
+While traffic logs are sent from the frontends (where the incoming connections
+are accepted), backends also need to be able to send logs in order to report a
+server state change consecutive to a health check. Please consult HAProxy's
+configuration manual for more information regarding all possible log settings.
+
+It is convenient to chose a facility that is not used by other daemons. HAProxy
+examples often suggest "local0" for traffic logs and "local1" for admin logs
+because they're never seen in field. A single facility would be enough as well.
+Having separate logs is convenient for log analysis, but it's also important to
+remember that logs may sometimes convey confidential information, and as such
+they must not be mixed with other logs that may accidentally be handed out to
+unauthorized people.
+
+For in-field troubleshooting without impacting the server's capacity too much,
+it is recommended to make use of the "halog" utility provided with HAProxy.
+This is sort of a grep-like utility designed to process HAProxy log files at
+a very fast data rate. Typical figures range between 1 and 2 GB of logs per
+second. It is capable of extracting only certain logs (eg: search for some
+classes of HTTP status codes, connection termination status, search by response
+time ranges, look for errors only), count lines, limit the output to a number
+of lines, and perform some more advanced statistics such as sorting servers
+by response time or error counts, sorting URLs by time or count, sorting client
+addresses by access count, and so on. It is pretty convenient to quickly spot
+anomalies such as a bot looping on the site, and block them.
+
+
+9. Statistics and monitoring
+----------------------------
+
+It is possible to query HAProxy about its status. The most commonly used
+mechanism is the HTTP statistics page. This page also exposes an alternative
+CSV output format for monitoring tools. The same format is provided on the
+Unix socket.
+
+Statistics are regroup in categories labelled as domains, corresponding to the
+multiple components of HAProxy. There are two domains available: proxy and dns.
+If not specified, the proxy domain is selected. Note that only the proxy
+statistics are printed on the HTTP page.
+
+9.1. CSV format
+---------------
+
+The statistics may be consulted either from the unix socket or from the HTTP
+page. Both means provide a CSV format whose fields follow. The first line
+begins with a sharp ('#') and has one word per comma-delimited field which
+represents the title of the column. All other lines starting at the second one
+use a classical CSV format using a comma as the delimiter, and the double quote
+('"') as an optional text delimiter, but only if the enclosed text is ambiguous
+(if it contains a quote or a comma). The double-quote character ('"') in the
+text is doubled ('""'), which is the format that most tools recognize. Please
+do not insert any column before these ones in order not to break tools which
+use hard-coded column positions.
+
+For proxy statistics, after each field name, the types which may have a value
+for that field are specified in brackets. The types are L (Listeners), F
+(Frontends), B (Backends), and S (Servers). There is a fixed set of static
+fields that are always available in the same order. A column containing the
+character '-' delimits the end of the static fields, after which presence or
+order of the fields are not guaranteed.
+
+Here is the list of static fields using the proxy statistics domain:
+ 0. pxname [LFBS]: proxy name
+ 1. svname [LFBS]: service name (FRONTEND for frontend, BACKEND for backend,
+ any name for server/listener)
+ 2. qcur [..BS]: current queued requests. For the backend this reports the
+ number queued without a server assigned.
+ 3. qmax [..BS]: max value of qcur
+ 4. scur [LFBS]: current sessions
+ 5. smax [LFBS]: max sessions
+ 6. slim [LFBS]: configured session limit
+ 7. stot [LFBS]: cumulative number of sessions
+ 8. bin [LFBS]: bytes in
+ 9. bout [LFBS]: bytes out
+ 10. dreq [LFB.]: requests denied because of security concerns.
+ - For tcp this is because of a matched tcp-request content rule.
+ - For http this is because of a matched http-request or tarpit rule.
+ 11. dresp [LFBS]: responses denied because of security concerns.
+ - For http this is because of a matched http-request rule, or
+ "option checkcache".
+ 12. ereq [LF..]: request errors. Some of the possible causes are:
+ - early termination from the client, before the request has been sent.
+ - read error from the client
+ - client timeout
+ - client closed connection
+ - various bad requests from the client.
+ - request was tarpitted.
+ 13. econ [..BS]: number of requests that encountered an error trying to
+ connect to a backend server. The backend stat is the sum of the stat
+ for all servers of that backend, plus any connection errors not
+ associated with a particular server (such as the backend having no
+ active servers).
+ 14. eresp [..BS]: response errors. srv_abrt will be counted here also.
+ Some other errors are:
+ - write error on the client socket (won't be counted for the server stat)
+ - failure applying filters to the response.
+ 15. wretr [..BS]: number of times a connection to a server was retried.
+ 16. wredis [..BS]: number of times a request was redispatched to another
+ server. The server value counts the number of times that server was
+ switched away from.
+ 17. status [LFBS]: status (UP/DOWN/NOLB/MAINT/MAINT(via)/MAINT(resolution)...)
+ 18. weight [..BS]: total effective weight (backend), effective weight (server)
+ 19. act [..BS]: number of active servers (backend), server is active (server)
+ 20. bck [..BS]: number of backup servers (backend), server is backup (server)
+ 21. chkfail [...S]: number of failed checks. (Only counts checks failed when
+ the server is up.)
+ 22. chkdown [..BS]: number of UP->DOWN transitions. The backend counter counts
+ transitions to the whole backend being down, rather than the sum of the
+ counters for each server.
+ 23. lastchg [..BS]: number of seconds since the last UP<->DOWN transition
+ 24. downtime [..BS]: total downtime (in seconds). The value for the backend
+ is the downtime for the whole backend, not the sum of the server downtime.
+ 25. qlimit [...S]: configured maxqueue for the server, or nothing in the
+ value is 0 (default, meaning no limit)
+ 26. pid [LFBS]: process id (0 for first instance, 1 for second, ...)
+ 27. iid [LFBS]: unique proxy id
+ 28. sid [L..S]: server id (unique inside a proxy)
+ 29. throttle [...S]: current throttle percentage for the server, when
+ slowstart is active, or no value if not in slowstart.
+ 30. lbtot [..BS]: total number of times a server was selected, either for new
+ sessions, or when re-dispatching. The server counter is the number
+ of times that server was selected.
+ 31. tracked [...S]: id of proxy/server if tracking is enabled.
+ 32. type [LFBS]: (0=frontend, 1=backend, 2=server, 3=socket/listener)
+ 33. rate [.FBS]: number of sessions per second over last elapsed second
+ 34. rate_lim [.F..]: configured limit on new sessions per second
+ 35. rate_max [.FBS]: max number of new sessions per second
+ 36. check_status [...S]: status of last health check, one of:
+ UNK -> unknown
+ INI -> initializing
+ SOCKERR -> socket error
+ L4OK -> check passed on layer 4, no upper layers testing enabled
+ L4TOUT -> layer 1-4 timeout
+ L4CON -> layer 1-4 connection problem, for example
+ "Connection refused" (tcp rst) or "No route to host" (icmp)
+ L6OK -> check passed on layer 6
+ L6TOUT -> layer 6 (SSL) timeout
+ L6RSP -> layer 6 invalid response - protocol error
+ L7OK -> check passed on layer 7
+ L7OKC -> check conditionally passed on layer 7, for example 404 with
+ disable-on-404
+ L7TOUT -> layer 7 (HTTP/SMTP) timeout
+ L7RSP -> layer 7 invalid response - protocol error
+ L7STS -> layer 7 response error, for example HTTP 5xx
+ Notice: If a check is currently running, the last known status will be
+ reported, prefixed with "* ". e. g. "* L7OK".
+ 37. check_code [...S]: layer5-7 code, if available
+ 38. check_duration [...S]: time in ms took to finish last health check
+ 39. hrsp_1xx [.FBS]: http responses with 1xx code
+ 40. hrsp_2xx [.FBS]: http responses with 2xx code
+ 41. hrsp_3xx [.FBS]: http responses with 3xx code
+ 42. hrsp_4xx [.FBS]: http responses with 4xx code
+ 43. hrsp_5xx [.FBS]: http responses with 5xx code
+ 44. hrsp_other [.FBS]: http responses with other codes (protocol error)
+ 45. hanafail [...S]: failed health checks details
+ 46. req_rate [.F..]: HTTP requests per second over last elapsed second
+ 47. req_rate_max [.F..]: max number of HTTP requests per second observed
+ 48. req_tot [.FB.]: total number of HTTP requests received
+ 49. cli_abrt [..BS]: number of data transfers aborted by the client
+ 50. srv_abrt [..BS]: number of data transfers aborted by the server
+ (inc. in eresp)
+ 51. comp_in [.FB.]: number of HTTP response bytes fed to the compressor
+ 52. comp_out [.FB.]: number of HTTP response bytes emitted by the compressor
+ 53. comp_byp [.FB.]: number of bytes that bypassed the HTTP compressor
+ (CPU/BW limit)
+ 54. comp_rsp [.FB.]: number of HTTP responses that were compressed
+ 55. lastsess [..BS]: number of seconds since last session assigned to
+ server/backend
+ 56. last_chk [...S]: last health check contents or textual error
+ 57. last_agt [...S]: last agent check contents or textual error
+ 58. qtime [..BS]: the average queue time in ms over the 1024 last requests
+ 59. ctime [..BS]: the average connect time in ms over the 1024 last requests
+ 60. rtime [..BS]: the average response time in ms over the 1024 last requests
+ (0 for TCP)
+ 61. ttime [..BS]: the average total session time in ms over the 1024 last
+ requests
+ 62. agent_status [...S]: status of last agent check, one of:
+ UNK -> unknown
+ INI -> initializing
+ SOCKERR -> socket error
+ L4OK -> check passed on layer 4, no upper layers testing enabled
+ L4TOUT -> layer 1-4 timeout
+ L4CON -> layer 1-4 connection problem, for example
+ "Connection refused" (tcp rst) or "No route to host" (icmp)
+ L7OK -> agent reported "up"
+ L7STS -> agent reported "fail", "stop", or "down"
+ 63. agent_code [...S]: numeric code reported by agent if any (unused for now)
+ 64. agent_duration [...S]: time in ms taken to finish last check
+ 65. check_desc [...S]: short human-readable description of check_status
+ 66. agent_desc [...S]: short human-readable description of agent_status
+ 67. check_rise [...S]: server's "rise" parameter used by checks
+ 68. check_fall [...S]: server's "fall" parameter used by checks
+ 69. check_health [...S]: server's health check value between 0 and rise+fall-1
+ 70. agent_rise [...S]: agent's "rise" parameter, normally 1
+ 71. agent_fall [...S]: agent's "fall" parameter, normally 1
+ 72. agent_health [...S]: agent's health parameter, between 0 and rise+fall-1
+ 73. addr [L..S]: address:port or "unix". IPv6 has brackets around the address.
+ 74: cookie [..BS]: server's cookie value or backend's cookie name
+ 75: mode [LFBS]: proxy mode (tcp, http, health, unknown)
+ 76: algo [..B.]: load balancing algorithm
+ 77: conn_rate [.F..]: number of connections over the last elapsed second
+ 78: conn_rate_max [.F..]: highest known conn_rate
+ 79: conn_tot [.F..]: cumulative number of connections
+ 80: intercepted [.FB.]: cum. number of intercepted requests (monitor, stats)
+ 81: dcon [LF..]: requests denied by "tcp-request connection" rules
+ 82: dses [LF..]: requests denied by "tcp-request session" rules
+ 83: wrew [LFBS]: cumulative number of failed header rewriting warnings
+ 84: connect [..BS]: cumulative number of connection establishment attempts
+ 85: reuse [..BS]: cumulative number of connection reuses
+ 86: cache_lookups [.FB.]: cumulative number of cache lookups
+ 87: cache_hits [.FB.]: cumulative number of cache hits
+ 88: srv_icur [...S]: current number of idle connections available for reuse
+ 89: src_ilim [...S]: limit on the number of available idle connections
+ 90. qtime_max [..BS]: the maximum observed queue time in ms
+ 91. ctime_max [..BS]: the maximum observed connect time in ms
+ 92. rtime_max [..BS]: the maximum observed response time in ms (0 for TCP)
+ 93. ttime_max [..BS]: the maximum observed total session time in ms
+ 94. eint [LFBS]: cumulative number of internal errors
+ 95. idle_conn_cur [...S]: current number of unsafe idle connections
+ 96. safe_conn_cur [...S]: current number of safe idle connections
+ 97. used_conn_cur [...S]: current number of connections in use
+ 98. need_conn_est [...S]: estimated needed number of connections
+ 99. uweight [..BS]: total user weight (backend), server user weight (server)
+
+For all other statistics domains, the presence or the order of the fields are
+not guaranteed. In this case, the header line should always be used to parse
+the CSV data.
+
+9.2. Typed output format
+------------------------
+
+Both "show info" and "show stat" support a mode where each output value comes
+with its type and sufficient information to know how the value is supposed to
+be aggregated between processes and how it evolves.
+
+In all cases, the output consists in having a single value per line with all
+the information split into fields delimited by colons (':').
+
+The first column designates the object or metric being dumped. Its format is
+specific to the command producing this output and will not be described in this
+section. Usually it will consist in a series of identifiers and field names.
+
+The second column contains 3 characters respectively indicating the origin, the
+nature and the scope of the value being reported. The first character (the
+origin) indicates where the value was extracted from. Possible characters are :
+
+ M The value is a metric. It is valid at one instant any may change depending
+ on its nature .
+
+ S The value is a status. It represents a discrete value which by definition
+ cannot be aggregated. It may be the status of a server ("UP" or "DOWN"),
+ the PID of the process, etc.
+
+ K The value is a sorting key. It represents an identifier which may be used
+ to group some values together because it is unique among its class. All
+ internal identifiers are keys. Some names can be listed as keys if they
+ are unique (eg: a frontend name is unique). In general keys come from the
+ configuration, even though some of them may automatically be assigned. For
+ most purposes keys may be considered as equivalent to configuration.
+
+ C The value comes from the configuration. Certain configuration values make
+ sense on the output, for example a concurrent connection limit or a cookie
+ name. By definition these values are the same in all processes started
+ from the same configuration file.
+
+ P The value comes from the product itself. There are very few such values,
+ most common use is to report the product name, version and release date.
+ These elements are also the same between all processes.
+
+The second character (the nature) indicates the nature of the information
+carried by the field in order to let an aggregator decide on what operation to
+use to aggregate multiple values. Possible characters are :
+
+ A The value represents an age since a last event. This is a bit different
+ from the duration in that an age is automatically computed based on the
+ current date. A typical example is how long ago did the last session
+ happen on a server. Ages are generally aggregated by taking the minimum
+ value and do not need to be stored.
+
+ a The value represents an already averaged value. The average response times
+ and server weights are of this nature. Averages can typically be averaged
+ between processes.
+
+ C The value represents a cumulative counter. Such measures perpetually
+ increase until they wrap around. Some monitoring protocols need to tell
+ the difference between a counter and a gauge to report a different type.
+ In general counters may simply be summed since they represent events or
+ volumes. Examples of metrics of this nature are connection counts or byte
+ counts.
+
+ D The value represents a duration for a status. There are a few usages of
+ this, most of them include the time taken by the last health check and
+ the time a server has spent down. Durations are generally not summed,
+ most of the time the maximum will be retained to compute an SLA.
+
+ G The value represents a gauge. It's a measure at one instant. The memory
+ usage or the current number of active connections are of this nature.
+ Metrics of this type are typically summed during aggregation.
+
+ L The value represents a limit (generally a configured one). By nature,
+ limits are harder to aggregate since they are specific to the point where
+ they were retrieved. In certain situations they may be summed or be kept
+ separate.
+
+ M The value represents a maximum. In general it will apply to a gauge and
+ keep the highest known value. An example of such a metric could be the
+ maximum amount of concurrent connections that was encountered in the
+ product's life time. To correctly aggregate maxima, you are supposed to
+ output a range going from the maximum of all maxima and the sum of all
+ of them. There is indeed no way to know if they were encountered
+ simultaneously or not.
+
+ m The value represents a minimum. In general it will apply to a gauge and
+ keep the lowest known value. An example of such a metric could be the
+ minimum amount of free memory pools that was encountered in the product's
+ life time. To correctly aggregate minima, you are supposed to output a
+ range going from the minimum of all minima and the sum of all of them.
+ There is indeed no way to know if they were encountered simultaneously
+ or not.
+
+ N The value represents a name, so it is a string. It is used to report
+ proxy names, server names and cookie names. Names have configuration or
+ keys as their origin and are supposed to be the same among all processes.
+
+ O The value represents a free text output. Outputs from various commands,
+ returns from health checks, node descriptions are of such nature.
+
+ R The value represents an event rate. It's a measure at one instant. It is
+ quite similar to a gauge except that the recipient knows that this measure
+ moves slowly and may decide not to keep all values. An example of such a
+ metric is the measured amount of connections per second. Metrics of this
+ type are typically summed during aggregation.
+
+ T The value represents a date or time. A field emitting the current date
+ would be of this type. The method to aggregate such information is left
+ as an implementation choice. For now no field uses this type.
+
+The third character (the scope) indicates what extent the value reflects. Some
+elements may be per process while others may be per configuration or per system.
+The distinction is important to know whether or not a single value should be
+kept during aggregation or if values have to be aggregated. The following
+characters are currently supported :
+
+ C The value is valid for a whole cluster of nodes, which is the set of nodes
+ communicating over the peers protocol. An example could be the amount of
+ entries present in a stick table that is replicated with other peers. At
+ the moment no metric use this scope.
+
+ P The value is valid only for the process reporting it. Most metrics use
+ this scope.
+
+ S The value is valid for the whole service, which is the set of processes
+ started together from the same configuration file. All metrics originating
+ from the configuration use this scope. Some other metrics may use it as
+ well for some shared resources (eg: shared SSL cache statistics).
+
+ s The value is valid for the whole system, such as the system's hostname,
+ current date or resource usage. At the moment this scope is not used by
+ any metric.
+
+Consumers of these information will generally have enough of these 3 characters
+to determine how to accurately report aggregated information across multiple
+processes.
+
+After this column, the third column indicates the type of the field, among "s32"
+(signed 32-bit integer), "s64" (signed 64-bit integer), "u32" (unsigned 32-bit
+integer), "u64" (unsigned 64-bit integer), "str" (string). It is important to
+know the type before parsing the value in order to properly read it. For example
+a string containing only digits is still a string an not an integer (eg: an
+error code extracted by a check).
+
+Then the fourth column is the value itself, encoded according to its type.
+Strings are dumped as-is immediately after the colon without any leading space.
+If a string contains a colon, it will appear normally. This means that the
+output should not be exclusively split around colons or some check outputs
+or server addresses might be truncated.
+
+
+9.3. Unix Socket commands
+-------------------------
+
+The stats socket is not enabled by default. In order to enable it, it is
+necessary to add one line in the global section of the haproxy configuration.
+A second line is recommended to set a larger timeout, always appreciated when
+issuing commands by hand :
+
+ global
+ stats socket /var/run/haproxy.sock mode 600 level admin
+ stats timeout 2m
+
+It is also possible to add multiple instances of the stats socket by repeating
+the line, and make them listen to a TCP port instead of a UNIX socket. This is
+never done by default because this is dangerous, but can be handy in some
+situations :
+
+ global
+ stats socket /var/run/haproxy.sock mode 600 level admin
+ stats socket ipv4@192.168.0.1:9999 level admin
+ stats timeout 2m
+
+To access the socket, an external utility such as "socat" is required. Socat is
+a swiss-army knife to connect anything to anything. We use it to connect
+terminals to the socket, or a couple of stdin/stdout pipes to it for scripts.
+The two main syntaxes we'll use are the following :
+
+ # socat /var/run/haproxy.sock stdio
+ # socat /var/run/haproxy.sock readline
+
+The first one is used with scripts. It is possible to send the output of a
+script to haproxy, and pass haproxy's output to another script. That's useful
+for retrieving counters or attack traces for example.
+
+The second one is only useful for issuing commands by hand. It has the benefit
+that the terminal is handled by the readline library which supports line
+editing and history, which is very convenient when issuing repeated commands
+(eg: watch a counter).
+
+The socket supports two operation modes :
+ - interactive
+ - non-interactive
+
+The non-interactive mode is the default when socat connects to the socket. In
+this mode, a single line may be sent. It is processed as a whole, responses are
+sent back, and the connection closes after the end of the response. This is the
+mode that scripts and monitoring tools use. It is possible to send multiple
+commands in this mode, they need to be delimited by a semi-colon (';'). For
+example :
+
+ # echo "show info;show stat;show table" | socat /var/run/haproxy stdio
+
+If a command needs to use a semi-colon or a backslash (eg: in a value), it
+must be preceded by a backslash ('\').
+
+The interactive mode displays a prompt ('>') and waits for commands to be
+entered on the line, then processes them, and displays the prompt again to wait
+for a new command. This mode is entered via the "prompt" command which must be
+sent on the first line in non-interactive mode. The mode is a flip switch, if
+"prompt" is sent in interactive mode, it is disabled and the connection closes
+after processing the last command of the same line.
+
+For this reason, when debugging by hand, it's quite common to start with the
+"prompt" command :
+
+ # socat /var/run/haproxy readline
+ prompt
+ > show info
+ ...
+ >
+
+Optionally the process' uptime may be displayed in the prompt. In order to
+enable this, the "prompt timed" command will enable the prompt and toggle the
+displaying of the time. The uptime is displayed in format "d:hh:mm:ss" where
+"d" is the number of days, and "hh", "mm", "ss" are respectively the number
+of hours, minutes and seconds on two digits each:
+
+ # socat /var/run/haproxy readline
+ prompt timed
+
+ [23:03:34:39]> show version
+ 2.8-dev9-e5e622-18
+
+ [23:03:34:41]> quit
+
+When the timed prompt is set on the master CLI, the prompt will display the
+currently selected process' uptime, so this will work for the master, current
+worker or an older worker:
+
+ master> prompt timed
+ [0:00:00:50] master> show proc
+ (...)
+ [0:00:00:58] master> @!11955 <-- master, switch to current worker
+ [0:00:01:03] 11955> @!11942 <-- current worker, switch to older worker
+ [0:00:02:17] 11942> @ <-- older worker, switch back to master
+ [0:00:01:10] master>
+
+Since multiple commands may be issued at once, haproxy uses the empty line as a
+delimiter to mark an end of output for each command, and takes care of ensuring
+that no command can emit an empty line on output. A script can thus easily
+parse the output even when multiple commands were pipelined on a single line.
+
+Some commands may take an optional payload. To add one to a command, the first
+line needs to end with the "<<\n" pattern. The next lines will be treated as
+the payload and can contain as many lines as needed. To validate a command with
+a payload, it needs to end with an empty line.
+
+The payload pattern can be customized in order to change the way the payload
+ends. In order to end a payload with something else than an empty line, a
+customized pattern can be set between '<<' and '\n'. Only 7 characters can be
+used in addiction to '<<', otherwise this won't be considered a payload.
+For example, to use a PEM file that contains empty lines and comments:
+
+ # echo -e "set ssl cert common.pem <<%EOF%\n$(cat common.pem)\n%EOF%\n" | \
+ socat /var/run/haproxy.stat -
+
+Limitations do exist: the length of the whole buffer passed to the CLI must
+not be greater than tune.bfsize and the pattern "<<" must not be glued to the
+last word of the line.
+
+When entering a paylod while in interactive mode, the prompt will change from
+"> " to "+ ".
+
+It is important to understand that when multiple haproxy processes are started
+on the same sockets, any process may pick up the request and will output its
+own stats.
+
+The list of commands currently supported on the stats socket is provided below.
+If an unknown command is sent, haproxy displays the usage message which reminds
+all supported commands. Some commands support a more complex syntax, generally
+it will explain what part of the command is invalid when this happens.
+
+Some commands require a higher level of privilege to work. If you do not have
+enough privilege, you will get an error "Permission denied". Please check
+the "level" option of the "bind" keyword lines in the configuration manual
+for more information.
+
+abort ssl ca-file <cafile>
+ Abort and destroy a temporary CA file update transaction.
+
+ See also "set ssl ca-file" and "commit ssl ca-file".
+
+abort ssl cert <filename>
+ Abort and destroy a temporary SSL certificate update transaction.
+
+ See also "set ssl cert" and "commit ssl cert".
+
+abort ssl crl-file <crlfile>
+ Abort and destroy a temporary CRL file update transaction.
+
+ See also "set ssl crl-file" and "commit ssl crl-file".
+
+add acl [@<ver>] <acl> <pattern>
+ Add an entry into the acl <acl>. <acl> is the #<id> or the <file> returned by
+ "show acl". This command does not verify if the entry already exists. Entries
+ are added to the current version of the ACL, unless a specific version is
+ specified with "@<ver>". This version number must have preliminary been
+ allocated by "prepare acl", and it will be comprised between the versions
+ reported in "curr_ver" and "next_ver" on the output of "show acl". Entries
+ added with a specific version number will not match until a "commit acl"
+ operation is performed on them. They may however be consulted using the
+ "show acl @<ver>" command, and cleared using a "clear acl @<ver>" command.
+ This command cannot be used if the reference <acl> is a file also used with
+ a map. In this case, the "add map" command must be used instead.
+
+add map [@<ver>] <map> <key> <value>
+add map [@<ver>] <map> <payload>
+ Add an entry into the map <map> to associate the value <value> to the key
+ <key>. This command does not verify if the entry already exists. It is
+ mainly used to fill a map after a "clear" or "prepare" operation. Entries
+ are added to the current version of the ACL, unless a specific version is
+ specified with "@<ver>". This version number must have preliminary been
+ allocated by "prepare acl", and it will be comprised between the versions
+ reported in "curr_ver" and "next_ver" on the output of "show acl". Entries
+ added with a specific version number will not match until a "commit map"
+ operation is performed on them. They may however be consulted using the
+ "show map @<ver>" command, and cleared using a "clear acl @<ver>" command.
+ If the designated map is also used as an ACL, the ACL will only match the
+ <key> part and will ignore the <value> part. Using the payload syntax it is
+ possible to add multiple key/value pairs by entering them on separate lines.
+ On each new line, the first word is the key and the rest of the line is
+ considered to be the value which can even contains spaces.
+
+ Example:
+
+ # socat /tmp/sock1 -
+ prompt
+
+ > add map #-1 <<
+ + key1 value1
+ + key2 value2 with spaces
+ + key3 value3 also with spaces
+ + key4 value4
+
+ >
+
+add server <backend>/<server> [args]*
+ Instantiate a new server attached to the backend <backend>.
+
+ The <server> name must not be already used in the backend. A special
+ restriction is put on the backend which must used a dynamic load-balancing
+ algorithm. A subset of keywords from the server config file statement can be
+ used to configure the server behavior. Also note that no settings will be
+ reused from an hypothetical 'default-server' statement in the same backend.
+
+ Currently a dynamic server is statically initialized with the "none"
+ init-addr method. This means that no resolution will be undertaken if a FQDN
+ is specified as an address, even if the server creation will be validated.
+
+ To support the reload operations, it is expected that the server created via
+ the CLI is also manually inserted in the relevant haproxy configuration file.
+ A dynamic server not present in the configuration won't be restored after a
+ reload operation.
+
+ A dynamic server may use the "track" keyword to follow the check status of
+ another server from the configuration. However, it is not possible to track
+ another dynamic server. This is to ensure that the tracking chain is kept
+ consistent even in the case of dynamic servers deletion.
+
+ Use the "check" keyword to enable health-check support. Note that the
+ health-check is disabled by default and must be enabled independently from
+ the server using the "enable health" command. For agent checks, use the
+ "agent-check" keyword and the "enable agent" command. Note that in this case
+ the server may be activated via the agent depending on the status reported,
+ without an explicit "enable server" command. This also means that extra care
+ is required when removing a dynamic server with agent check. The agent should
+ be first deactivated via "disable agent" to be able to put the server in the
+ required maintenance mode before removal.
+
+ It may be possible to reach the fd limit when using a large number of dynamic
+ servers. Please refer to the "u-limit" global keyword documentation in this
+ case.
+
+ Here is the list of the currently supported keywords :
+
+ - agent-addr
+ - agent-check
+ - agent-inter
+ - agent-port
+ - agent-send
+ - allow-0rtt
+ - alpn
+ - addr
+ - backup
+ - ca-file
+ - check
+ - check-alpn
+ - check-proto
+ - check-send-proxy
+ - check-sni
+ - check-ssl
+ - check-via-socks4
+ - ciphers
+ - ciphersuites
+ - crl-file
+ - crt
+ - disabled
+ - downinter
+ - enabled
+ - error-limit
+ - fall
+ - fastinter
+ - force-sslv3/tlsv10/tlsv11/tlsv12/tlsv13
+ - id
+ - inter
+ - maxconn
+ - maxqueue
+ - minconn
+ - no-ssl-reuse
+ - no-sslv3/tlsv10/tlsv11/tlsv12/tlsv13
+ - no-tls-tickets
+ - npn
+ - observe
+ - on-error
+ - on-marked-down
+ - on-marked-up
+ - pool-low-conn
+ - pool-max-conn
+ - pool-purge-delay
+ - port
+ - proto
+ - proxy-v2-options
+ - rise
+ - send-proxy
+ - send-proxy-v2
+ - send-proxy-v2-ssl
+ - send-proxy-v2-ssl-cn
+ - slowstart
+ - sni
+ - source
+ - ssl
+ - ssl-max-ver
+ - ssl-min-ver
+ - tfo
+ - tls-tickets
+ - track
+ - usesrc
+ - verify
+ - verifyhost
+ - weight
+ - ws
+
+ Their syntax is similar to the server line from the configuration file,
+ please refer to their individual documentation for details.
+
+add ssl ca-file <cafile> <payload>
+ Add a new certificate to a ca-file. This command is useful when you reached
+ the buffer size limit on the CLI and want to add multiple certificates.
+ Instead of doing a "set" with all the certificates you are able to add each
+ certificate individually. A "set ssl ca-file" will reset the ca-file.
+
+ Example:
+ echo -e "set ssl ca-file cafile.pem <<\n$(cat rootCA.crt)\n" | \
+ socat /var/run/haproxy.stat -
+ echo -e "add ssl ca-file cafile.pem <<\n$(cat intermediate1.crt)\n" | \
+ socat /var/run/haproxy.stat -
+ echo -e "add ssl ca-file cafile.pem <<\n$(cat intermediate2.crt)\n" | \
+ socat /var/run/haproxy.stat -
+ echo "commit ssl ca-file cafile.pem" | socat /var/run/haproxy.stat -
+
+add ssl crt-list <crtlist> <certificate>
+add ssl crt-list <crtlist> <payload>
+ Add an certificate in a crt-list. It can also be used for directories since
+ directories are now loaded the same way as the crt-lists. This command allow
+ you to use a certificate name in parameter, to use SSL options or filters a
+ crt-list line must sent as a payload instead. Only one crt-list line is
+ supported in the payload. This command will load the certificate for every
+ bind lines using the crt-list. To push a new certificate to HAProxy the
+ commands "new ssl cert" and "set ssl cert" must be used.
+
+ Example:
+ $ echo "new ssl cert foobar.pem" | socat /tmp/sock1 -
+ $ echo -e "set ssl cert foobar.pem <<\n$(cat foobar.pem)\n" | socat
+ /tmp/sock1 -
+ $ echo "commit ssl cert foobar.pem" | socat /tmp/sock1 -
+ $ echo "add ssl crt-list certlist1 foobar.pem" | socat /tmp/sock1 -
+
+ $ echo -e 'add ssl crt-list certlist1 <<\nfoobar.pem [allow-0rtt] foo.bar.com
+ !test1.com\n' | socat /tmp/sock1 -
+
+clear counters
+ Clear the max values of the statistics counters in each proxy (frontend &
+ backend) and in each server. The accumulated counters are not affected. The
+ internal activity counters reported by "show activity" are also reset. This
+ can be used to get clean counters after an incident, without having to
+ restart nor to clear traffic counters. This command is restricted and can
+ only be issued on sockets configured for levels "operator" or "admin".
+
+clear counters all
+ Clear all statistics counters in each proxy (frontend & backend) and in each
+ server. This has the same effect as restarting. This command is restricted
+ and can only be issued on sockets configured for level "admin".
+
+clear acl [@<ver>] <acl>
+ Remove all entries from the acl <acl>. <acl> is the #<id> or the <file>
+ returned by "show acl". Note that if the reference <acl> is a file and is
+ shared with a map, this map will be also cleared. By default only the current
+ version of the ACL is cleared (the one being matched against). However it is
+ possible to specify another version using '@' followed by this version.
+
+clear map [@<ver>] <map>
+ Remove all entries from the map <map>. <map> is the #<id> or the <file>
+ returned by "show map". Note that if the reference <map> is a file and is
+ shared with a acl, this acl will be also cleared. By default only the current
+ version of the map is cleared (the one being matched against). However it is
+ possible to specify another version using '@' followed by this version.
+
+clear table <table> [ data.<type> <operator> <value> ] | [ key <key> ]
+ Remove entries from the stick-table <table>.
+
+ This is typically used to unblock some users complaining they have been
+ abusively denied access to a service, but this can also be used to clear some
+ stickiness entries matching a server that is going to be replaced (see "show
+ table" below for details). Note that sometimes, removal of an entry will be
+ refused because it is currently tracked by a session. Retrying a few seconds
+ later after the session ends is usual enough.
+
+ In the case where no options arguments are given all entries will be removed.
+
+ When the "data." form is used entries matching a filter applied using the
+ stored data (see "stick-table" in section 4.2) are removed. A stored data
+ type must be specified in <type>, and this data type must be stored in the
+ table otherwise an error is reported. The data is compared according to
+ <operator> with the 64-bit integer <value>. Operators are the same as with
+ the ACLs :
+
+ - eq : match entries whose data is equal to this value
+ - ne : match entries whose data is not equal to this value
+ - le : match entries whose data is less than or equal to this value
+ - ge : match entries whose data is greater than or equal to this value
+ - lt : match entries whose data is less than this value
+ - gt : match entries whose data is greater than this value
+
+ When the key form is used the entry <key> is removed. The key must be of the
+ same type as the table, which currently is limited to IPv4, IPv6, integer and
+ string.
+
+ Example :
+ $ echo "show table http_proxy" | socat stdio /tmp/sock1
+ >>> # table: http_proxy, type: ip, size:204800, used:2
+ >>> 0x80e6a4c: key=127.0.0.1 use=0 exp=3594729 gpc0=0 conn_rate(30000)=1 \
+ bytes_out_rate(60000)=187
+ >>> 0x80e6a80: key=127.0.0.2 use=0 exp=3594740 gpc0=1 conn_rate(30000)=10 \
+ bytes_out_rate(60000)=191
+
+ $ echo "clear table http_proxy key 127.0.0.1" | socat stdio /tmp/sock1
+
+ $ echo "show table http_proxy" | socat stdio /tmp/sock1
+ >>> # table: http_proxy, type: ip, size:204800, used:1
+ >>> 0x80e6a80: key=127.0.0.2 use=0 exp=3594740 gpc0=1 conn_rate(30000)=10 \
+ bytes_out_rate(60000)=191
+ $ echo "clear table http_proxy data.gpc0 eq 1" | socat stdio /tmp/sock1
+ $ echo "show table http_proxy" | socat stdio /tmp/sock1
+ >>> # table: http_proxy, type: ip, size:204800, used:1
+
+commit acl @<ver> <acl>
+ Commit all changes made to version <ver> of ACL <acl>, and deletes all past
+ versions. <acl> is the #<id> or the <file> returned by "show acl". The
+ version number must be between "curr_ver"+1 and "next_ver" as reported in
+ "show acl". The contents to be committed to the ACL can be consulted with
+ "show acl @<ver> <acl>" if desired. The specified version number has normally
+ been created with the "prepare acl" command. The replacement is atomic. It
+ consists in atomically updating the current version to the specified version,
+ which will instantly cause all entries in other versions to become invisible,
+ and all entries in the new version to become visible. It is also possible to
+ use this command to perform an atomic removal of all visible entries of an
+ ACL by calling "prepare acl" first then committing without adding any
+ entries. This command cannot be used if the reference <acl> is a file also
+ used as a map. In this case, the "commit map" command must be used instead.
+
+commit map @<ver> <map>
+ Commit all changes made to version <ver> of map <map>, and deletes all past
+ versions. <map> is the #<id> or the <file> returned by "show map". The
+ version number must be between "curr_ver"+1 and "next_ver" as reported in
+ "show map". The contents to be committed to the map can be consulted with
+ "show map @<ver> <map>" if desired. The specified version number has normally
+ been created with the "prepare map" command. The replacement is atomic. It
+ consists in atomically updating the current version to the specified version,
+ which will instantly cause all entries in other versions to become invisible,
+ and all entries in the new version to become visible. It is also possible to
+ use this command to perform an atomic removal of all visible entries of an
+ map by calling "prepare map" first then committing without adding any
+ entries.
+
+commit ssl ca-file <cafile>
+ Commit a temporary SSL CA file update transaction.
+
+ In the case of an existing CA file (in a "Used" state in "show ssl ca-file"),
+ the new CA file tree entry is inserted in the CA file tree and every instance
+ that used the CA file entry is rebuilt, along with the SSL contexts it needs.
+ All the contexts previously used by the rebuilt instances are removed.
+ Upon success, the previous CA file entry is removed from the tree.
+ Upon failure, nothing is removed or deleted, and all the original SSL
+ contexts are kept and used.
+ Once the temporary transaction is committed, it is destroyed.
+
+ In the case of a new CA file (after a "new ssl ca-file" and in a "Unused"
+ state in "show ssl ca-file"), the CA file will be inserted in the CA file
+ tree but it won't be used anywhere in HAProxy. To use it and generate SSL
+ contexts that use it, you will need to add it to a crt-list with "add ssl
+ crt-list".
+
+ See also "new ssl ca-file", "set ssl ca-file", "add ssl ca-file",
+ "abort ssl ca-file" and "add ssl crt-list".
+
+commit ssl cert <filename>
+ Commit a temporary SSL certificate update transaction.
+
+ In the case of an existing certificate (in a "Used" state in "show ssl
+ cert"), generate every SSL contextes and SNIs it need, insert them, and
+ remove the previous ones. Replace in memory the previous SSL certificates
+ everywhere the <filename> was used in the configuration. Upon failure it
+ doesn't remove or insert anything. Once the temporary transaction is
+ committed, it is destroyed.
+
+ In the case of a new certificate (after a "new ssl cert" and in a "Unused"
+ state in "show ssl cert"), the certificate will be committed in a certificate
+ storage, but it won't be used anywhere in haproxy. To use it and generate
+ its SNIs you will need to add it to a crt-list or a directory with "add ssl
+ crt-list".
+
+ See also "new ssl cert", "set ssl cert", "abort ssl cert" and
+ "add ssl crt-list".
+
+commit ssl crl-file <crlfile>
+ Commit a temporary SSL CRL file update transaction.
+
+ In the case of an existing CRL file (in a "Used" state in "show ssl
+ crl-file"), the new CRL file entry is inserted in the CA file tree (which
+ holds both the CA files and the CRL files) and every instance that used the
+ CRL file entry is rebuilt, along with the SSL contexts it needs.
+ All the contexts previously used by the rebuilt instances are removed.
+ Upon success, the previous CRL file entry is removed from the tree.
+ Upon failure, nothing is removed or deleted, and all the original SSL
+ contexts are kept and used.
+ Once the temporary transaction is committed, it is destroyed.
+
+ In the case of a new CRL file (after a "new ssl crl-file" and in a "Unused"
+ state in "show ssl crl-file"), the CRL file will be inserted in the CRL file
+ tree but it won't be used anywhere in HAProxy. To use it and generate SSL
+ contexts that use it, you will need to add it to a crt-list with "add ssl
+ crt-list".
+
+ See also "new ssl crl-file", "set ssl crl-file", "abort ssl crl-file" and
+ "add ssl crt-list".
+
+debug dev <command> [args]*
+ Call a developer-specific command. Only supported on a CLI connection running
+ in expert mode (see "expert-mode on"). Such commands are extremely dangerous
+ and not forgiving, any misuse may result in a crash of the process. They are
+ intended for experts only, and must really not be used unless told to do so.
+ Some of them are only available when haproxy is built with DEBUG_DEV defined
+ because they may have security implications. All of these commands require
+ admin privileges, and are purposely not documented to avoid encouraging their
+ use by people who are not at ease with the source code.
+
+del acl <acl> [<key>|#<ref>]
+ Delete all the acl entries from the acl <acl> corresponding to the key <key>.
+ <acl> is the #<id> or the <file> returned by "show acl". If the <ref> is used,
+ this command delete only the listed reference. The reference can be found with
+ listing the content of the acl. Note that if the reference <acl> is a file and
+ is shared with a map, the entry will be also deleted in the map.
+
+del map <map> [<key>|#<ref>]
+ Delete all the map entries from the map <map> corresponding to the key <key>.
+ <map> is the #<id> or the <file> returned by "show map". If the <ref> is used,
+ this command delete only the listed reference. The reference can be found with
+ listing the content of the map. Note that if the reference <map> is a file and
+ is shared with a acl, the entry will be also deleted in the map.
+
+del ssl ca-file <cafile>
+ Delete a CA file tree entry from HAProxy. The CA file must be unused and
+ removed from any crt-list. "show ssl ca-file" displays the status of the CA
+ files. The deletion doesn't work with a certificate referenced directly with
+ the "ca-file" or "ca-verify-file" directives in the configuration.
+
+del ssl cert <certfile>
+ Delete a certificate store from HAProxy. The certificate must be unused and
+ removed from any crt-list or directory. "show ssl cert" displays the status
+ of the certificate. The deletion doesn't work with a certificate referenced
+ directly with the "crt" directive in the configuration.
+
+del ssl crl-file <crlfile>
+ Delete a CRL file tree entry from HAProxy. The CRL file must be unused and
+ removed from any crt-list. "show ssl crl-file" displays the status of the CRL
+ files. The deletion doesn't work with a certificate referenced directly with
+ the "crl-file" directive in the configuration.
+
+del ssl crt-list <filename> <certfile[:line]>
+ Delete an entry in a crt-list. This will delete every SNIs used for this
+ entry in the frontends. If a certificate is used several time in a crt-list,
+ you will need to provide which line you want to delete. To display the line
+ numbers, use "show ssl crt-list -n <crtlist>".
+
+del server <backend>/<server>
+ Remove a server attached to the backend <backend>. All servers are eligible,
+ except servers which are referenced by other configuration elements. The
+ server must be put in maintenance mode prior to its deletion. The operation
+ is cancelled if the serveur still has active or idle connection or its
+ connection queue is not empty.
+
+disable agent <backend>/<server>
+ Mark the auxiliary agent check as temporarily stopped.
+
+ In the case where an agent check is being run as a auxiliary check, due
+ to the agent-check parameter of a server directive, new checks are only
+ initialized when the agent is in the enabled. Thus, disable agent will
+ prevent any new agent checks from begin initiated until the agent
+ re-enabled using enable agent.
+
+ When an agent is disabled the processing of an auxiliary agent check that
+ was initiated while the agent was set as enabled is as follows: All
+ results that would alter the weight, specifically "drain" or a weight
+ returned by the agent, are ignored. The processing of agent check is
+ otherwise unchanged.
+
+ The motivation for this feature is to allow the weight changing effects
+ of the agent checks to be paused to allow the weight of a server to be
+ configured using set weight without being overridden by the agent.
+
+ This command is restricted and can only be issued on sockets configured for
+ level "admin".
+
+disable dynamic-cookie backend <backend>
+ Disable the generation of dynamic cookies for the backend <backend>
+
+disable frontend <frontend>
+ Mark the frontend as temporarily stopped. This corresponds to the mode which
+ is used during a soft restart : the frontend releases the port but can be
+ enabled again if needed. This should be used with care as some non-Linux OSes
+ are unable to enable it back. This is intended to be used in environments
+ where stopping a proxy is not even imaginable but a misconfigured proxy must
+ be fixed. That way it's possible to release the port and bind it into another
+ process to restore operations. The frontend will appear with status "STOP"
+ on the stats page.
+
+ The frontend may be specified either by its name or by its numeric ID,
+ prefixed with a sharp ('#').
+
+ This command is restricted and can only be issued on sockets configured for
+ level "admin".
+
+disable health <backend>/<server>
+ Mark the primary health check as temporarily stopped. This will disable
+ sending of health checks, and the last health check result will be ignored.
+ The server will be in unchecked state and considered UP unless an auxiliary
+ agent check forces it down.
+
+ This command is restricted and can only be issued on sockets configured for
+ level "admin".
+
+disable server <backend>/<server>
+ Mark the server DOWN for maintenance. In this mode, no more checks will be
+ performed on the server until it leaves maintenance.
+ If the server is tracked by other servers, those servers will be set to DOWN
+ during the maintenance.
+
+ In the statistics page, a server DOWN for maintenance will appear with a
+ "MAINT" status, its tracking servers with the "MAINT(via)" one.
+
+ Both the backend and the server may be specified either by their name or by
+ their numeric ID, prefixed with a sharp ('#').
+
+ This command is restricted and can only be issued on sockets configured for
+ level "admin".
+
+enable agent <backend>/<server>
+ Resume auxiliary agent check that was temporarily stopped.
+
+ See "disable agent" for details of the effect of temporarily starting
+ and stopping an auxiliary agent.
+
+ This command is restricted and can only be issued on sockets configured for
+ level "admin".
+
+enable dynamic-cookie backend <backend>
+ Enable the generation of dynamic cookies for the backend <backend>.
+ A secret key must also be provided.
+
+enable frontend <frontend>
+ Resume a frontend which was temporarily stopped. It is possible that some of
+ the listening ports won't be able to bind anymore (eg: if another process
+ took them since the 'disable frontend' operation). If this happens, an error
+ is displayed. Some operating systems might not be able to resume a frontend
+ which was disabled.
+
+ The frontend may be specified either by its name or by its numeric ID,
+ prefixed with a sharp ('#').
+
+ This command is restricted and can only be issued on sockets configured for
+ level "admin".
+
+enable health <backend>/<server>
+ Resume a primary health check that was temporarily stopped. This will enable
+ sending of health checks again. Please see "disable health" for details.
+
+ This command is restricted and can only be issued on sockets configured for
+ level "admin".
+
+enable server <backend>/<server>
+ If the server was previously marked as DOWN for maintenance, this marks the
+ server UP and checks are re-enabled.
+
+ Both the backend and the server may be specified either by their name or by
+ their numeric ID, prefixed with a sharp ('#').
+
+ This command is restricted and can only be issued on sockets configured for
+ level "admin".
+
+experimental-mode [on|off]
+ Without options, this indicates whether the experimental mode is enabled or
+ disabled on the current connection. When passed "on", it turns the
+ experimental mode on for the current CLI connection only. With "off" it turns
+ it off.
+
+ The experimental mode is used to access to extra features still in
+ development. These features are currently not stable and should be used with
+ care. They may be subject to breaking changes across versions.
+
+ When used from the master CLI, this command shouldn't be prefixed, as it will
+ set the mode for any worker when connecting to its CLI.
+
+ Example:
+ echo "@1; experimental-mode on; <experimental_cmd>..." | socat /var/run/haproxy.master -
+ echo "experimental-mode on; @1 <experimental_cmd>..." | socat /var/run/haproxy.master -
+
+expert-mode [on|off]
+ This command is similar to experimental-mode but is used to toggle the
+ expert mode.
+
+ The expert mode enables displaying of expert commands that can be extremely
+ dangerous for the process and which may occasionally help developers collect
+ important information about complex bugs. Any misuse of these features will
+ likely lead to a process crash. Do not use this option without being invited
+ to do so. Note that this command is purposely not listed in the help message.
+ This command is only accessible in admin level. Changing to another level
+ automatically resets the expert mode.
+
+ When used from the master CLI, this command shouldn't be prefixed, as it will
+ set the mode for any worker when connecting to its CLI.
+
+ Example:
+ echo "@1; expert-mode on; debug dev exit 1" | socat /var/run/haproxy.master -
+ echo "expert-mode on; @1 debug dev exit 1" | socat /var/run/haproxy.master -
+
+get map <map> <value>
+get acl <acl> <value>
+ Lookup the value <value> in the map <map> or in the ACL <acl>. <map> or <acl>
+ are the #<id> or the <file> returned by "show map" or "show acl". This command
+ returns all the matching patterns associated with this map. This is useful for
+ debugging maps and ACLs. The output format is composed by one line par
+ matching type. Each line is composed by space-delimited series of words.
+
+ The first two words are:
+
+ <match method>: The match method applied. It can be "found", "bool",
+ "int", "ip", "bin", "len", "str", "beg", "sub", "dir",
+ "dom", "end" or "reg".
+
+ <match result>: The result. Can be "match" or "no-match".
+
+ The following words are returned only if the pattern matches an entry.
+
+ <index type>: "tree" or "list". The internal lookup algorithm.
+
+ <case>: "case-insensitive" or "case-sensitive". The
+ interpretation of the case.
+
+ <entry matched>: match="<entry>". Return the matched pattern. It is
+ useful with regular expressions.
+
+ The two last word are used to show the returned value and its type. With the
+ "acl" case, the pattern doesn't exist.
+
+ return=nothing: No return because there are no "map".
+ return="<value>": The value returned in the string format.
+ return=cannot-display: The value cannot be converted as string.
+
+ type="<type>": The type of the returned sample.
+
+get var <name>
+ Show the existence, type and contents of the process-wide variable 'name'.
+ Only process-wide variables are readable, so the name must begin with
+ 'proc.' otherwise no variable will be found. This command requires levels
+ "operator" or "admin".
+
+get weight <backend>/<server>
+ Report the current weight and the initial weight of server <server> in
+ backend <backend> or an error if either doesn't exist. The initial weight is
+ the one that appears in the configuration file. Both are normally equal
+ unless the current weight has been changed. Both the backend and the server
+ may be specified either by their name or by their numeric ID, prefixed with a
+ sharp ('#').
+
+help [<command>]
+ Print the list of known keywords and their basic usage, or commands matching
+ the requested one. The same help screen is also displayed for unknown
+ commands.
+
+httpclient <method> <URI>
+ Launch an HTTP client request and print the response on the CLI. Only
+ supported on a CLI connection running in expert mode (see "expert-mode on").
+ It's only meant for debugging. The httpclient is able to resolve a server
+ name in the URL using the "default" resolvers section, which is populated
+ with the DNS servers of your /etc/resolv.conf by default. However it won't be
+ able to resolve an host from /etc/hosts if you don't use a local dns daemon
+ which can resolve those.
+
+new ssl ca-file <cafile>
+ Create a new empty CA file tree entry to be filled with a set of CA
+ certificates and added to a crt-list. This command should be used in
+ combination with "set ssl ca-file", "add ssl ca-file" and "add ssl crt-list".
+
+new ssl cert <filename>
+ Create a new empty SSL certificate store to be filled with a certificate and
+ added to a directory or a crt-list. This command should be used in
+ combination with "set ssl cert" and "add ssl crt-list".
+
+new ssl crl-file <crlfile>
+ Create a new empty CRL file tree entry to be filled with a set of CRLs
+ and added to a crt-list. This command should be used in combination with "set
+ ssl crl-file" and "add ssl crt-list".
+
+prepare acl <acl>
+ Allocate a new version number in ACL <acl> for atomic replacement. <acl> is
+ the #<id> or the <file> returned by "show acl". The new version number is
+ shown in response after "New version created:". This number will then be
+ usable to prepare additions of new entries into the ACL which will then
+ atomically replace the current ones once committed. It is reported as
+ "next_ver" in "show acl". There is no impact of allocating new versions, as
+ unused versions will automatically be removed once a more recent version is
+ committed. Version numbers are unsigned 32-bit values which wrap at the end,
+ so care must be taken when comparing them in an external program. This
+ command cannot be used if the reference <acl> is a file also used as a map.
+ In this case, the "prepare map" command must be used instead.
+
+prepare map <map>
+ Allocate a new version number in map <map> for atomic replacement. <map> is
+ the #<id> or the <file> returned by "show map". The new version number is
+ shown in response after "New version created:". This number will then be
+ usable to prepare additions of new entries into the map which will then
+ atomically replace the current ones once committed. It is reported as
+ "next_ver" in "show map". There is no impact of allocating new versions, as
+ unused versions will automatically be removed once a more recent version is
+ committed. Version numbers are unsigned 32-bit values which wrap at the end,
+ so care must be taken when comparing them in an external program.
+
+prompt
+ Toggle the prompt at the beginning of the line and enter or leave interactive
+ mode. In interactive mode, the connection is not closed after a command
+ completes. Instead, the prompt will appear again, indicating the user that
+ the interpreter is waiting for a new command. The prompt consists in a right
+ angle bracket followed by a space "> ". This mode is particularly convenient
+ when one wants to periodically check information such as stats or errors.
+ It is also a good idea to enter interactive mode before issuing a "help"
+ command.
+
+quit
+ Close the connection when in interactive mode.
+
+set anon [on|off] [<key>]
+ This command enables or disables the "anonymized mode" for the current CLI
+ session, which replaces certain fields considered sensitive or confidential
+ in command outputs with hashes that preserve sufficient consistency between
+ elements to help developers identify relations between elements when trying
+ to spot bugs, but a low enough bit count (24) to make them non-reversible due
+ to the high number of possible matches. When turned on, if no key is
+ specified, the global key will be used (either specified in the configuration
+ file by "anonkey" or set via the CLI command "set anon global-key"). If no such
+ key was set, a random one will be generated. Otherwise it's possible to
+ specify the 32-bit key to be used for the current session, for example, to
+ reuse the key that was used in a previous dump to help compare outputs.
+ Developers will never need this key and it's recommended never to share it as
+ it could allow to confirm/infirm some guesses about what certain hashes could
+ be hiding.
+
+set dynamic-cookie-key backend <backend> <value>
+ Modify the secret key used to generate the dynamic persistent cookies.
+ This will break the existing sessions.
+
+set anon global-key <key>
+ This sets the global anonymizing key to <key>, which must be a 32-bit
+ integer between 0 and 4294967295 (0 disables the global key). This command
+ requires admin privilege.
+
+set map <map> [<key>|#<ref>] <value>
+ Modify the value corresponding to each key <key> in a map <map>. <map> is the
+ #<id> or <file> returned by "show map". If the <ref> is used in place of
+ <key>, only the entry pointed by <ref> is changed. The new value is <value>.
+
+set maxconn frontend <frontend> <value>
+ Dynamically change the specified frontend's maxconn setting. Any positive
+ value is allowed including zero, but setting values larger than the global
+ maxconn does not make much sense. If the limit is increased and connections
+ were pending, they will immediately be accepted. If it is lowered to a value
+ below the current number of connections, new connections acceptation will be
+ delayed until the threshold is reached. The frontend might be specified by
+ either its name or its numeric ID prefixed with a sharp ('#').
+
+set maxconn server <backend/server> <value>
+ Dynamically change the specified server's maxconn setting. Any positive
+ value is allowed including zero, but setting values larger than the global
+ maxconn does not make much sense.
+
+set maxconn global <maxconn>
+ Dynamically change the global maxconn setting within the range defined by the
+ initial global maxconn setting. If it is increased and connections were
+ pending, they will immediately be accepted. If it is lowered to a value below
+ the current number of connections, new connections acceptation will be
+ delayed until the threshold is reached. A value of zero restores the initial
+ setting.
+
+set profiling { tasks | memory } { auto | on | off }
+ Enables or disables CPU or memory profiling for the indicated subsystem. This
+ is equivalent to setting or clearing the "profiling" settings in the "global"
+ section of the configuration file. Please also see "show profiling". Note
+ that manually setting the tasks profiling to "on" automatically resets the
+ scheduler statistics, thus allows to check activity over a given interval.
+ The memory profiling is limited to certain operating systems (known to work
+ on the linux-glibc target), and requires USE_MEMORY_PROFILING to be set at
+ compile time.
+
+set rate-limit connections global <value>
+ Change the process-wide connection rate limit, which is set by the global
+ 'maxconnrate' setting. A value of zero disables the limitation. This limit
+ applies to all frontends and the change has an immediate effect. The value
+ is passed in number of connections per second.
+
+set rate-limit http-compression global <value>
+ Change the maximum input compression rate, which is set by the global
+ 'maxcomprate' setting. A value of zero disables the limitation. The value is
+ passed in number of kilobytes per second. The value is available in the "show
+ info" on the line "CompressBpsRateLim" in bytes.
+
+set rate-limit sessions global <value>
+ Change the process-wide session rate limit, which is set by the global
+ 'maxsessrate' setting. A value of zero disables the limitation. This limit
+ applies to all frontends and the change has an immediate effect. The value
+ is passed in number of sessions per second.
+
+set rate-limit ssl-sessions global <value>
+ Change the process-wide SSL session rate limit, which is set by the global
+ 'maxsslrate' setting. A value of zero disables the limitation. This limit
+ applies to all frontends and the change has an immediate effect. The value
+ is passed in number of sessions per second sent to the SSL stack. It applies
+ before the handshake in order to protect the stack against handshake abuses.
+
+set server <backend>/<server> addr <ip4 or ip6 address> [port <port>]
+ Replace the current IP address of a server by the one provided.
+ Optionally, the port can be changed using the 'port' parameter.
+ Note that changing the port also support switching from/to port mapping
+ (notation with +X or -Y), only if a port is configured for the health check.
+
+set server <backend>/<server> agent [ up | down ]
+ Force a server's agent to a new state. This can be useful to immediately
+ switch a server's state regardless of some slow agent checks for example.
+ Note that the change is propagated to tracking servers if any.
+
+set server <backend>/<server> agent-addr <addr> [port <port>]
+ Change addr for servers agent checks. Allows to migrate agent-checks to
+ another address at runtime. You can specify both IP and hostname, it will be
+ resolved.
+ Optionally, change the port agent.
+
+set server <backend>/<server> agent-port <port>
+ Change the port used for agent checks.
+
+set server <backend>/<server> agent-send <value>
+ Change agent string sent to agent check target. Allows to update string while
+ changing server address to keep those two matching.
+
+set server <backend>/<server> health [ up | stopping | down ]
+ Force a server's health to a new state. This can be useful to immediately
+ switch a server's state regardless of some slow health checks for example.
+ Note that the change is propagated to tracking servers if any.
+
+set server <backend>/<server> check-addr <ip4 | ip6> [port <port>]
+ Change the IP address used for server health checks.
+ Optionally, change the port used for server health checks.
+
+set server <backend>/<server> check-port <port>
+ Change the port used for health checking to <port>
+
+set server <backend>/<server> state [ ready | drain | maint ]
+ Force a server's administrative state to a new state. This can be useful to
+ disable load balancing and/or any traffic to a server. Setting the state to
+ "ready" puts the server in normal mode, and the command is the equivalent of
+ the "enable server" command. Setting the state to "maint" disables any traffic
+ to the server as well as any health checks. This is the equivalent of the
+ "disable server" command. Setting the mode to "drain" only removes the server
+ from load balancing but still allows it to be checked and to accept new
+ persistent connections. Changes are propagated to tracking servers if any.
+
+set server <backend>/<server> weight <weight>[%]
+ Change a server's weight to the value passed in argument. This is the exact
+ equivalent of the "set weight" command below.
+
+set server <backend>/<server> fqdn <FQDN>
+ Change a server's FQDN to the value passed in argument. This requires the
+ internal run-time DNS resolver to be configured and enabled for this server.
+
+set server <backend>/<server> ssl [ on | off ] (deprecated)
+ This option configures SSL ciphering on outgoing connections to the server.
+ When switch off, all traffic becomes plain text; health check path is not
+ changed.
+
+ This command is deprecated, create a new server dynamically with or without
+ SSL instead, using the "add server" command.
+
+set severity-output [ none | number | string ]
+ Change the severity output format of the stats socket connected to for the
+ duration of the current session.
+
+set ssl ca-file <cafile> <payload>
+ this command is part of a transaction system, the "commit ssl ca-file" and
+ "abort ssl ca-file" commands could be required.
+ if there is no on-going transaction, it will create a ca file tree entry into
+ which the certificates contained in the payload will be stored. the ca file
+ entry will not be stored in the ca file tree and will only be kept in a
+ temporary transaction. if a transaction with the same filename already exists,
+ the previous ca file entry will be deleted and replaced by the new one.
+ once the modifications are done, you have to commit the transaction through
+ a "commit ssl ca-file" call. If you want to add multiple certificates
+ separately, you can use the "add ssl ca-file" command
+
+ Example:
+ echo -e "set ssl ca-file cafile.pem <<\n$(cat rootCA.crt)\n" | \
+ socat /var/run/haproxy.stat -
+ echo "commit ssl ca-file cafile.pem" | socat /var/run/haproxy.stat -
+
+set ssl cert <filename> <payload>
+ This command is part of a transaction system, the "commit ssl cert" and
+ "abort ssl cert" commands could be required.
+ This whole transaction system works on any certificate displayed by the
+ "show ssl cert" command, so on any frontend or backend certificate.
+ If there is no on-going transaction, it will duplicate the certificate
+ <filename> in memory to a temporary transaction, then update this
+ transaction with the PEM file in the payload. If a transaction exists with
+ the same filename, it will update this transaction. It's also possible to
+ update the files linked to a certificate (.issuer, .sctl, .oscp etc.)
+ Once the modification are done, you have to "commit ssl cert" the
+ transaction.
+
+ Injection of files over the CLI must be done with caution since an empty line
+ is used to notify the end of the payload. It is recommended to inject a PEM
+ file which has been sanitized. A simple method would be to remove every empty
+ line and only leave what are in the PEM sections. It could be achieved with a
+ sed command.
+
+ Example:
+
+ # With some simple sanitizing
+ echo -e "set ssl cert localhost.pem <<\n$(sed -n '/^$/d;/-BEGIN/,/-END/p' 127.0.0.1.pem)\n" | \
+ socat /var/run/haproxy.stat -
+
+ # Complete example with commit
+ echo -e "set ssl cert localhost.pem <<\n$(cat 127.0.0.1.pem)\n" | \
+ socat /var/run/haproxy.stat -
+ echo -e \
+ "set ssl cert localhost.pem.issuer <<\n $(cat 127.0.0.1.pem.issuer)\n" | \
+ socat /var/run/haproxy.stat -
+ echo -e \
+ "set ssl cert localhost.pem.ocsp <<\n$(base64 -w 1000 127.0.0.1.pem.ocsp)\n" | \
+ socat /var/run/haproxy.stat -
+ echo "commit ssl cert localhost.pem" | socat /var/run/haproxy.stat -
+
+set ssl crl-file <crlfile> <payload>
+ This command is part of a transaction system, the "commit ssl crl-file" and
+ "abort ssl crl-file" commands could be required.
+ If there is no on-going transaction, it will create a CRL file tree entry into
+ which the Revocation Lists contained in the payload will be stored. The CRL
+ file entry will not be stored in the CRL file tree and will only be kept in a
+ temporary transaction. If a transaction with the same filename already exists,
+ the previous CRL file entry will be deleted and replaced by the new one.
+ Once the modifications are done, you have to commit the transaction through
+ a "commit ssl crl-file" call.
+
+ Example:
+ echo -e "set ssl crl-file crlfile.pem <<\n$(cat rootCRL.pem)\n" | \
+ socat /var/run/haproxy.stat -
+ echo "commit ssl crl-file crlfile.pem" | socat /var/run/haproxy.stat -
+
+set ssl ocsp-response <response | payload>
+ This command is used to update an OCSP Response for a certificate (see "crt"
+ on "bind" lines). Same controls are performed as during the initial loading of
+ the response. The <response> must be passed as a base64 encoded string of the
+ DER encoded response from the OCSP server. This command is not supported with
+ BoringSSL.
+
+ Example:
+ openssl ocsp -issuer issuer.pem -cert server.pem \
+ -host ocsp.issuer.com:80 -respout resp.der
+ echo "set ssl ocsp-response $(base64 -w 10000 resp.der)" | \
+ socat stdio /var/run/haproxy.stat
+
+ using the payload syntax:
+ echo -e "set ssl ocsp-response <<\n$(base64 resp.der)\n" | \
+ socat stdio /var/run/haproxy.stat
+
+set ssl tls-key <id> <tlskey>
+ Set the next TLS key for the <id> listener to <tlskey>. This key becomes the
+ ultimate key, while the penultimate one is used for encryption (others just
+ decrypt). The oldest TLS key present is overwritten. <id> is either a numeric
+ #<id> or <file> returned by "show tls-keys". <tlskey> is a base64 encoded 48
+ or 80 bits TLS ticket key (ex. openssl rand 80 | openssl base64 -A).
+
+set table <table> key <key> [data.<data_type> <value>]*
+ Create or update a stick-table entry in the table. If the key is not present,
+ an entry is inserted. See stick-table in section 4.2 to find all possible
+ values for <data_type>. The most likely use consists in dynamically entering
+ entries for source IP addresses, with a flag in gpc0 to dynamically block an
+ IP address or affect its quality of service. It is possible to pass multiple
+ data_types in a single call.
+
+set timeout cli <delay>
+ Change the CLI interface timeout for current connection. This can be useful
+ during long debugging sessions where the user needs to constantly inspect
+ some indicators without being disconnected. The delay is passed in seconds.
+
+set var <name> <expression>
+set var <name> expr <expression>
+set var <name> fmt <format>
+ Allows to set or overwrite the process-wide variable 'name' with the result
+ of expression <expression> or format string <format>. Only process-wide
+ variables may be used, so the name must begin with 'proc.' otherwise no
+ variable will be set. The <expression> and <format> may only involve
+ "internal" sample fetch keywords and converters even though the most likely
+ useful ones will be str('something'), int(), simple strings or references to
+ other variables. Note that the command line parser doesn't know about quotes,
+ so any space in the expression must be preceded by a backslash. This command
+ requires levels "operator" or "admin". This command is only supported on a
+ CLI connection running in experimental mode (see "experimental-mode on").
+
+set weight <backend>/<server> <weight>[%]
+ Change a server's weight to the value passed in argument. If the value ends
+ with the '%' sign, then the new weight will be relative to the initially
+ configured weight. Absolute weights are permitted between 0 and 256.
+ Relative weights must be positive with the resulting absolute weight is
+ capped at 256. Servers which are part of a farm running a static
+ load-balancing algorithm have stricter limitations because the weight
+ cannot change once set. Thus for these servers, the only accepted values
+ are 0 and 100% (or 0 and the initial weight). Changes take effect
+ immediately, though certain LB algorithms require a certain amount of
+ requests to consider changes. A typical usage of this command is to
+ disable a server during an update by setting its weight to zero, then to
+ enable it again after the update by setting it back to 100%. This command
+ is restricted and can only be issued on sockets configured for level
+ "admin". Both the backend and the server may be specified either by their
+ name or by their numeric ID, prefixed with a sharp ('#').
+
+show acl [[@<ver>] <acl>]
+ Dump info about acl converters. Without argument, the list of all available
+ acls is returned. If a <acl> is specified, its contents are dumped. <acl> is
+ the #<id> or <file>. By default the current version of the ACL is shown (the
+ version currently being matched against and reported as 'curr_ver' in the ACL
+ list). It is possible to instead dump other versions by prepending '@<ver>'
+ before the ACL's identifier. The version works as a filter and non-existing
+ versions will simply report no result. The dump format is the same as for the
+ maps even for the sample values. The data returned are not a list of
+ available ACL, but are the list of all patterns composing any ACL. Many of
+ these patterns can be shared with maps. The 'entry_cnt' value represents the
+ count of all the ACL entries, not just the active ones, which means that it
+ also includes entries currently being added.
+
+show anon
+ Display the current state of the anonymized mode (enabled or disabled) and
+ the current session's key.
+
+show backend
+ Dump the list of backends available in the running process
+
+show cli level
+ Display the CLI level of the current CLI session. The result could be
+ 'admin', 'operator' or 'user'. See also the 'operator' and 'user' commands.
+
+ Example :
+
+ $ socat /tmp/sock1 readline
+ prompt
+ > operator
+ > show cli level
+ operator
+ > user
+ > show cli level
+ user
+ > operator
+ Permission denied
+
+operator
+ Decrease the CLI level of the current CLI session to operator. It can't be
+ increased. It also drops expert and experimental mode. See also "show cli
+ level".
+
+user
+ Decrease the CLI level of the current CLI session to user. It can't be
+ increased. It also drops expert and experimental mode. See also "show cli
+ level".
+
+show activity [-1 | 0 | thread_num]
+ Reports some counters about internal events that will help developers and
+ more generally people who know haproxy well enough to narrow down the causes
+ of reports of abnormal behaviours. A typical example would be a properly
+ running process never sleeping and eating 100% of the CPU. The output fields
+ will be made of one line per metric, and per-thread counters on the same
+ line. These counters are 32-bit and will wrap during the process's life, which
+ is not a problem since calls to this command will typically be performed
+ twice. The fields are purposely not documented so that their exact meaning is
+ verified in the code where the counters are fed. These values are also reset
+ by the "clear counters" command. On multi-threaded deployments, the first
+ column will indicate the total (or average depending on the nature of the
+ metric) for all threads, and the list of all threads' values will be
+ represented between square brackets in the thread order. Optionally the
+ thread number to be dumped may be specified in argument. The special value
+ "0" will report the aggregated value (first column), and "-1", which is the
+ default, will display all the columns. Note that just like in single-threaded
+ mode, there will be no brackets when a single column is requested.
+
+show cli sockets
+ List CLI sockets. The output format is composed of 3 fields separated by
+ spaces. The first field is the socket address, it can be a unix socket, a
+ ipv4 address:port couple or a ipv6 one. Socket of other types won't be dump.
+ The second field describe the level of the socket: 'admin', 'user' or
+ 'operator'. The last field list the processes on which the socket is bound,
+ separated by commas, it can be numbers or 'all'.
+
+ Example :
+
+ $ echo 'show cli sockets' | socat stdio /tmp/sock1
+ # socket lvl processes
+ /tmp/sock1 admin all
+ 127.0.0.1:9999 user 2,3,4
+ 127.0.0.2:9969 user 2
+ [::1]:9999 operator 2
+
+show cache
+ List the configured caches and the objects stored in each cache tree.
+
+ $ echo 'show cache' | socat stdio /tmp/sock1
+ 0x7f6ac6c5b03a: foobar (shctx:0x7f6ac6c5b000, available blocks:3918)
+ 1 2 3 4
+
+ 1. pointer to the cache structure
+ 2. cache name
+ 3. pointer to the mmap area (shctx)
+ 4. number of blocks available for reuse in the shctx
+
+ 0x7f6ac6c5b4cc hash:286881868 vary:0x0011223344556677 size:39114 (39 blocks), refcount:9, expire:237
+ 1 2 3 4 5 6 7
+
+ 1. pointer to the cache entry
+ 2. first 32 bits of the hash
+ 3. secondary hash of the entry in case of vary
+ 4. size of the object in bytes
+ 5. number of blocks used for the object
+ 6. number of transactions using the entry
+ 7. expiration time, can be negative if already expired
+
+show dev
+ This command is meant to centralize some information that HAProxy developers
+ might need to better understand the causes of a given problem. It generally
+ does not provide useful information for the user, but these information allow
+ developers to eliminate certain hypothesis. The format is roughly a series of
+ sections containing indented lines with one element per line, such as the OS
+ type and version, the CPU type or the boot-time FD limits for example. Some
+ fields will be omitted to avoid repetition or output pollution when they do
+ not add value (e.g. unlimited values). More fields may appear in the future,
+ and some may change. This output is not meant for being parsed by scripts, and
+ should not be considered with a high degree of reliability, it's essentially
+ aimed at saving time for those who can read it.
+
+ Technically speaking, such information are taken as-is out of an internal
+ structure that stores them together at boot time so that they can also be
+ found in a core file after a crash. As such, it may happen that developers
+ ask for an early output on a well behaving process to compare with what is
+ found in a core dump, or to compare between several reloads (e.g. some limits
+ might change). If anonymizing is enabled, any possibly sensitive value will
+ be anonymized as well (e.g. the node name).
+
+ Example of output:
+ $ socat stdio /tmp/sock1 <<< "show dev"
+ Platform info
+ machine vendor: To be filled by O.E.M
+ machine family: Altra
+ cpu model: Impl 0x41 Arch 8 Part 0xd0c r3p1
+ virtual machine: no
+ container: no
+ OS name: Linux
+ OS release: 6.2.0-36-generic
+ OS version: #37~22.04.1-Ubuntu SMP PREEMPT_DYNAMIC Mon Oct 9 18:01:07 UTC 2
+ OS architecture: aarch64
+ node name: 489aaf
+ Process info
+ pid: 1735846
+ boot uid: 509
+ boot gid: 1002
+ fd limit (soft): 1024
+ fd limit (hard): 1048576
+
+show env [<name>]
+ Dump one or all environment variables known by the process. Without any
+ argument, all variables are dumped. With an argument, only the specified
+ variable is dumped if it exists. Otherwise "Variable not found" is emitted.
+ Variables are dumped in the same format as they are stored or returned by the
+ "env" utility, that is, "<name>=<value>". This can be handy when debugging
+ certain configuration files making heavy use of environment variables to
+ ensure that they contain the expected values. This command is restricted and
+ can only be issued on sockets configured for levels "operator" or "admin".
+
+show errors [<iid>|<proxy>] [request|response]
+ Dump last known request and response errors collected by frontends and
+ backends. If <iid> is specified, the limit the dump to errors concerning
+ either frontend or backend whose ID is <iid>. Proxy ID "-1" will cause
+ all instances to be dumped. If a proxy name is specified instead, its ID
+ will be used as the filter. If "request" or "response" is added after the
+ proxy name or ID, only request or response errors will be dumped. This
+ command is restricted and can only be issued on sockets configured for
+ levels "operator" or "admin".
+
+ The errors which may be collected are the last request and response errors
+ caused by protocol violations, often due to invalid characters in header
+ names. The report precisely indicates what exact character violated the
+ protocol. Other important information such as the exact date the error was
+ detected, frontend and backend names, the server name (when known), the
+ internal transaction ID and the source address which has initiated the
+ session are reported too.
+
+ All characters are returned, and non-printable characters are encoded. The
+ most common ones (\t = 9, \n = 10, \r = 13 and \e = 27) are encoded as one
+ letter following a backslash. The backslash itself is encoded as '\\' to
+ avoid confusion. Other non-printable characters are encoded '\xNN' where
+ NN is the two-digits hexadecimal representation of the character's ASCII
+ code.
+
+ Lines are prefixed with the position of their first character, starting at 0
+ for the beginning of the buffer. At most one input line is printed per line,
+ and large lines will be broken into multiple consecutive output lines so that
+ the output never goes beyond 79 characters wide. It is easy to detect if a
+ line was broken, because it will not end with '\n' and the next line's offset
+ will be followed by a '+' sign, indicating it is a continuation of previous
+ line.
+
+ Example :
+ $ echo "show errors -1 response" | socat stdio /tmp/sock1
+ >>> [04/Mar/2009:15:46:56.081] backend http-in (#2) : invalid response
+ src 127.0.0.1, session #54, frontend fe-eth0 (#1), server s2 (#1)
+ response length 213 bytes, error at position 23:
+
+ 00000 HTTP/1.0 200 OK\r\n
+ 00017 header/bizarre:blah\r\n
+ 00038 Location: blah\r\n
+ 00054 Long-line: this is a very long line which should b
+ 00104+ e broken into multiple lines on the output buffer,
+ 00154+ otherwise it would be too large to print in a ter
+ 00204+ minal\r\n
+ 00211 \r\n
+
+ In the example above, we see that the backend "http-in" which has internal
+ ID 2 has blocked an invalid response from its server s2 which has internal
+ ID 1. The request was on transaction 54 (called "session" here) initiated
+ by source 127.0.0.1 and received by frontend fe-eth0 whose ID is 1. The
+ total response length was 213 bytes when the error was detected, and the
+ error was at byte 23. This is the slash ('/') in header name
+ "header/bizarre", which is not a valid HTTP character for a header name.
+
+show events [<sink>] [-w] [-n]
+ With no option, this lists all known event sinks and their types. With an
+ option, it will dump all available events in the designated sink if it is of
+ type buffer. If option "-w" is passed after the sink name, then once the end
+ of the buffer is reached, the command will wait for new events and display
+ them. It is possible to stop the operation by entering any input (which will
+ be discarded) or by closing the session. Finally, option "-n" is used to
+ directly seek to the end of the buffer, which is often convenient when
+ combined with "-w" to only report new events. For convenience, "-wn" or "-nw"
+ may be used to enable both options at once.
+
+show fd [-!plcfbsd]* [<fd>]
+ Dump the list of either all open file descriptors or just the one number <fd>
+ if specified. A set of flags may optionally be passed to restrict the dump
+ only to certain FD types or to omit certain FD types. When '-' or '!' are
+ encountered, the selection is inverted for the following characters in the
+ same argument. The inversion is reset before each argument word delimited by
+ white spaces. Selectable FD types include 'p' for pipes, 'l' for listeners,
+ 'c' for connections (any type), 'f' for frontend connections, 'b' for backend
+ connections (any type), 's' for connections to servers, 'd' for connections
+ to the "dispatch" address or the backend's transparent address. With this,
+ 'b' is a shortcut for 'sd' and 'c' for 'fb' or 'fsd'. 'c!f' is equivalent to
+ 'b' ("any connections except frontend connections" are indeed backend
+ connections). This is only aimed at developers who need to observe internal
+ states in order to debug complex issues such as abnormal CPU usages. One fd
+ is reported per lines, and for each of them, its state in the poller using
+ upper case letters for enabled flags and lower case for disabled flags, using
+ "P" for "polled", "R" for "ready", "A" for "active", the events status using
+ "H" for "hangup", "E" for "error", "O" for "output", "P" for "priority" and
+ "I" for "input", a few other flags like "N" for "new" (just added into the fd
+ cache), "U" for "updated" (received an update in the fd cache), "L" for
+ "linger_risk", "C" for "cloned", then the cached entry position, the pointer
+ to the internal owner, the pointer to the I/O callback and its name when
+ known. When the owner is a connection, the connection flags, and the target
+ are reported (frontend, proxy or server). When the owner is a listener, the
+ listener's state and its frontend are reported. There is no point in using
+ this command without a good knowledge of the internals. It's worth noting
+ that the output format may evolve over time so this output must not be parsed
+ by tools designed to be durable. Some internal structure states may look
+ suspicious to the function listing them, in this case the output line will be
+ suffixed with an exclamation mark ('!'). This may help find a starting point
+ when trying to diagnose an incident.
+
+show info [typed|json] [desc] [float]
+ Dump info about haproxy status on current process. If "typed" is passed as an
+ optional argument, field numbers, names and types are emitted as well so that
+ external monitoring products can easily retrieve, possibly aggregate, then
+ report information found in fields they don't know. Each field is dumped on
+ its own line. If "json" is passed as an optional argument then
+ information provided by "typed" output is provided in JSON format as a
+ list of JSON objects. By default, the format contains only two columns
+ delimited by a colon (':'). The left one is the field name and the right
+ one is the value. It is very important to note that in typed output
+ format, the dump for a single object is contiguous so that there is no
+ need for a consumer to store everything at once. If "float" is passed as an
+ optional argument, some fields usually emitted as integers may switch to
+ floats for higher accuracy. It is purposely unspecified which ones are
+ concerned as this might evolve over time. Using this option implies that the
+ consumer is able to process floats. The output format used is sprintf("%f").
+
+ When using the typed output format, each line is made of 4 columns delimited
+ by colons (':'). The first column is a dot-delimited series of 3 elements. The
+ first element is the numeric position of the field in the list (starting at
+ zero). This position shall not change over time, but holes are to be expected,
+ depending on build options or if some fields are deleted in the future. The
+ second element is the field name as it appears in the default "show info"
+ output. The third element is the relative process number starting at 1.
+
+ The rest of the line starting after the first colon follows the "typed output
+ format" described in the section above. In short, the second column (after the
+ first ':') indicates the origin, nature and scope of the variable. The third
+ column indicates the type of the field, among "s32", "s64", "u32", "u64" and
+ "str". Then the fourth column is the value itself, which the consumer knows
+ how to parse thanks to column 3 and how to process thanks to column 2.
+
+ Thus the overall line format in typed mode is :
+
+ <field_pos>.<field_name>.<process_num>:<tags>:<type>:<value>
+
+ When "desc" is appended to the command, one extra colon followed by a quoted
+ string is appended with a description for the metric. At the time of writing,
+ this is only supported for the "typed" and default output formats.
+
+ Example :
+
+ > show info
+ Name: HAProxy
+ Version: 1.7-dev1-de52ea-146
+ Release_date: 2016/03/11
+ Nbproc: 1
+ Process_num: 1
+ Pid: 28105
+ Uptime: 0d 0h00m04s
+ Uptime_sec: 4
+ Memmax_MB: 0
+ PoolAlloc_MB: 0
+ PoolUsed_MB: 0
+ PoolFailed: 0
+ (...)
+
+ > show info typed
+ 0.Name.1:POS:str:HAProxy
+ 1.Version.1:POS:str:1.7-dev1-de52ea-146
+ 2.Release_date.1:POS:str:2016/03/11
+ 3.Nbproc.1:CGS:u32:1
+ 4.Process_num.1:KGP:u32:1
+ 5.Pid.1:SGP:u32:28105
+ 6.Uptime.1:MDP:str:0d 0h00m08s
+ 7.Uptime_sec.1:MDP:u32:8
+ 8.Memmax_MB.1:CLP:u32:0
+ 9.PoolAlloc_MB.1:MGP:u32:0
+ 10.PoolUsed_MB.1:MGP:u32:0
+ 11.PoolFailed.1:MCP:u32:0
+ (...)
+
+ In the typed format, the presence of the process ID at the end of the
+ first column makes it very easy to visually aggregate outputs from
+ multiple processes.
+ Example :
+
+ $ ( echo show info typed | socat /var/run/haproxy.sock1 ; \
+ echo show info typed | socat /var/run/haproxy.sock2 ) | \
+ sort -t . -k 1,1n -k 2,2 -k 3,3n
+ 0.Name.1:POS:str:HAProxy
+ 0.Name.2:POS:str:HAProxy
+ 1.Version.1:POS:str:1.7-dev1-868ab3-148
+ 1.Version.2:POS:str:1.7-dev1-868ab3-148
+ 2.Release_date.1:POS:str:2016/03/11
+ 2.Release_date.2:POS:str:2016/03/11
+ 3.Nbproc.1:CGS:u32:2
+ 3.Nbproc.2:CGS:u32:2
+ 4.Process_num.1:KGP:u32:1
+ 4.Process_num.2:KGP:u32:2
+ 5.Pid.1:SGP:u32:30120
+ 5.Pid.2:SGP:u32:30121
+ 6.Uptime.1:MDP:str:0d 0h01m28s
+ 6.Uptime.2:MDP:str:0d 0h01m28s
+ (...)
+
+ The format of JSON output is described in a schema which may be output
+ using "show schema json".
+
+ The JSON output contains no extra whitespace in order to reduce the
+ volume of output. For human consumption passing the output through a
+ pretty printer may be helpful. Example :
+
+ $ echo "show info json" | socat /var/run/haproxy.sock stdio | \
+ python -m json.tool
+
+ The JSON output contains no extra whitespace in order to reduce the
+ volume of output. For human consumption passing the output through a
+ pretty printer may be helpful. Example :
+
+ $ echo "show info json" | socat /var/run/haproxy.sock stdio | \
+ python -m json.tool
+
+show libs
+ Dump the list of loaded shared dynamic libraries and object files, on systems
+ that support it. When available, for each shared object the range of virtual
+ addresses will be indicated, the size and the path to the object. This can be
+ used for example to try to estimate what library provides a function that
+ appears in a dump. Note that on many systems, addresses will change upon each
+ restart (address space randomization), so that this list would need to be
+ retrieved upon startup if it is expected to be used to analyse a core file.
+ This command may only be issued on sockets configured for levels "operator"
+ or "admin". Note that the output format may vary between operating systems,
+ architectures and even haproxy versions, and ought not to be relied on in
+ scripts.
+
+show map [[@<ver>] <map>]
+ Dump info about map converters. Without argument, the list of all available
+ maps is returned. If a <map> is specified, its contents are dumped. <map> is
+ the #<id> or <file>. By default the current version of the map is shown (the
+ version currently being matched against and reported as 'curr_ver' in the map
+ list). It is possible to instead dump other versions by prepending '@<ver>'
+ before the map's identifier. The version works as a filter and non-existing
+ versions will simply report no result. The 'entry_cnt' value represents the
+ count of all the map entries, not just the active ones, which means that it
+ also includes entries currently being added.
+
+ In the output, the first column is a unique entry identifier, which is usable
+ as a reference for operations "del map" and "set map". The second column is
+ the pattern and the third column is the sample if available. The data returned
+ are not directly a list of available maps, but are the list of all patterns
+ composing any map. Many of these patterns can be shared with ACL.
+
+show peers [dict|-] [<peers section>]
+ Dump info about the peers configured in "peers" sections. Without argument,
+ the list of the peers belonging to all the "peers" sections are listed. If
+ <peers section> is specified, only the information about the peers belonging
+ to this "peers" section are dumped. When "dict" is specified before the peers
+ section name, the entire Tx/Rx dictionary caches will also be dumped (very
+ large). Passing "-" may be required to dump a peers section called "dict".
+
+ Here are two examples of outputs where hostA, hostB and hostC peers belong to
+ "sharedlb" peers sections. Only hostA and hostB are connected. Only hostA has
+ sent data to hostB.
+
+ $ echo "show peers" | socat - /tmp/hostA
+ 0x55deb0224320: [15/Apr/2019:11:28:01] id=sharedlb state=0 flags=0x3 \
+ resync_timeout=<PAST> task_calls=45122
+ 0x55deb022b540: id=hostC(remote) addr=127.0.0.12:10002 status=CONN \
+ reconnect=4s confirm=0
+ flags=0x0
+ 0x55deb022a440: id=hostA(local) addr=127.0.0.10:10000 status=NONE \
+ reconnect=<NEVER> confirm=0
+ flags=0x0
+ 0x55deb0227d70: id=hostB(remote) addr=127.0.0.11:10001 status=ESTA
+ reconnect=2s confirm=0
+ flags=0x20000200 appctx:0x55deb028fba0 st0=7 st1=0 task_calls=14456 \
+ state=EST
+ xprt=RAW src=127.0.0.1:37257 addr=127.0.0.10:10000
+ remote_table:0x55deb0224a10 id=stkt local_id=1 remote_id=1
+ last_local_table:0x55deb0224a10 id=stkt local_id=1 remote_id=1
+ shared tables:
+ 0x55deb0224a10 local_id=1 remote_id=1 flags=0x0 remote_data=0x65
+ last_acked=0 last_pushed=3 last_get=0 teaching_origin=0 update=3
+ table:0x55deb022d6a0 id=stkt update=3 localupdate=3 \
+ commitupdate=3 syncing=0
+
+ $ echo "show peers" | socat - /tmp/hostB
+ 0x55871b5ab320: [15/Apr/2019:11:28:03] id=sharedlb state=0 flags=0x3 \
+ resync_timeout=<PAST> task_calls=3
+ 0x55871b5b2540: id=hostC(remote) addr=127.0.0.12:10002 status=CONN \
+ reconnect=3s confirm=0
+ flags=0x0
+ 0x55871b5b1440: id=hostB(local) addr=127.0.0.11:10001 status=NONE \
+ reconnect=<NEVER> confirm=0
+ flags=0x0
+ 0x55871b5aed70: id=hostA(remote) addr=127.0.0.10:10000 status=ESTA \
+ reconnect=2s confirm=0
+ flags=0x20000200 appctx:0x7fa46800ee00 st0=7 st1=0 task_calls=62356 \
+ state=EST
+ remote_table:0x55871b5ab960 id=stkt local_id=1 remote_id=1
+ last_local_table:0x55871b5ab960 id=stkt local_id=1 remote_id=1
+ shared tables:
+ 0x55871b5ab960 local_id=1 remote_id=1 flags=0x0 remote_data=0x65
+ last_acked=3 last_pushed=0 last_get=3 teaching_origin=0 update=0
+ table:0x55871b5b46a0 id=stkt update=1 localupdate=0 \
+ commitupdate=0 syncing=0
+
+show pools [byname|bysize|byusage] [match <pfx>] [<nb>]
+ Dump the status of internal memory pools. This is useful to track memory
+ usage when suspecting a memory leak for example. It does exactly the same
+ as the SIGQUIT when running in foreground except that it does not flush the
+ pools. The output is not sorted by default. If "byname" is specified, it is
+ sorted by pool name; if "bysize" is specified, it is sorted by item size in
+ reverse order; if "byusage" is specified, it is sorted by total usage in
+ reverse order, and only used entries are shown. It is also possible to limit
+ the output to the <nb> first entries (e.g. when sorting by usage). Finally,
+ if "match" followed by a prefix is specified, then only pools whose name
+ starts with this prefix will be shown. The reported total only concerns pools
+ matching the filtering criteria. Example:
+
+ $ socat - /tmp/haproxy.sock <<< "show pools match quic byusage"
+ Dumping pools usage. Use SIGQUIT to flush them.
+ - Pool quic_conn_r (65560 bytes) : 1337 allocated (87653720 bytes), ...
+ - Pool quic_crypto (1048 bytes) : 6685 allocated (7005880 bytes), ...
+ - Pool quic_conn (4056 bytes) : 1337 allocated (5422872 bytes), ...
+ - Pool quic_rxbuf (262168 bytes) : 8 allocated (2097344 bytes), ...
+ - Pool quic_conne (184 bytes) : 9359 allocated (1722056 bytes), ...
+ - Pool quic_frame (184 bytes) : 7938 allocated (1460592 bytes), ...
+ - Pool quic_tx_pac (152 bytes) : 6454 allocated (981008 bytes), ...
+ - Pool quic_tls_ke (56 bytes) : 12033 allocated (673848 bytes), ...
+ - Pool quic_rx_pac (408 bytes) : 1596 allocated (651168 bytes), ...
+ - Pool quic_tls_se (88 bytes) : 6685 allocated (588280 bytes), ...
+ - Pool quic_cstrea (88 bytes) : 4011 allocated (352968 bytes), ...
+ - Pool quic_tls_iv (24 bytes) : 12033 allocated (288792 bytes), ...
+ - Pool quic_dgram (344 bytes) : 732 allocated (251808 bytes), ...
+ - Pool quic_arng (56 bytes) : 4011 allocated (224616 bytes), ...
+ - Pool quic_conn_c (152 bytes) : 1337 allocated (203224 bytes), ...
+ Total: 15 pools, 109578176 bytes allocated, 109578176 used ...
+
+show profiling [{all | status | tasks | memory}] [byaddr|bytime|aggr|<max_lines>]*
+ Dumps the current profiling settings, one per line, as well as the command
+ needed to change them. When tasks profiling is enabled, some per-function
+ statistics collected by the scheduler will also be emitted, with a summary
+ covering the number of calls, total/avg CPU time and total/avg latency. When
+ memory profiling is enabled, some information such as the number of
+ allocations/releases and their sizes will be reported. It is possible to
+ limit the dump to only the profiling status, the tasks, or the memory
+ profiling by specifying the respective keywords; by default all profiling
+ information are dumped. It is also possible to limit the number of lines
+ of output of each category by specifying a numeric limit. If is possible to
+ request that the output is sorted by address or by total execution time
+ instead of usage, e.g. to ease comparisons between subsequent calls or to
+ check what needs to be optimized, and to aggregate task activity by called
+ function instead of seeing the details. Please note that profiling is
+ essentially aimed at developers since it gives hints about where CPU cycles
+ or memory are wasted in the code. There is nothing useful to monitor there.
+
+show resolvers [<resolvers section id>]
+ Dump statistics for the given resolvers section, or all resolvers sections
+ if no section is supplied.
+
+ For each name server, the following counters are reported:
+ sent: number of DNS requests sent to this server
+ valid: number of DNS valid responses received from this server
+ update: number of DNS responses used to update the server's IP address
+ cname: number of CNAME responses
+ cname_error: CNAME errors encountered with this server
+ any_err: number of empty response (IE: server does not support ANY type)
+ nx: non existent domain response received from this server
+ timeout: how many time this server did not answer in time
+ refused: number of requests refused by this server
+ other: any other DNS errors
+ invalid: invalid DNS response (from a protocol point of view)
+ too_big: too big response
+ outdated: number of response arrived too late (after another name server)
+
+show quic [oneline|full] [all]
+ Dump information on all active QUIC frontend connections. This command is
+ restricted and can only be issued on sockets configured for levels "operator"
+ or "admin". An optional format can be specified as first argument to control
+ the verbosity. Currently supported values are "oneline" which is the default
+ if format is unspecified or "full". By default, connections on closing or
+ draining state are not displayed. Use the extra argument "all" to include
+ them in the output.
+
+show servers conn [<backend>]
+ Dump the current and idle connections state of the servers belonging to the
+ designated backend (or all backends if none specified). A backend name or
+ identifier may be used.
+
+ The output consists in a header line showing the fields titles, then one
+ server per line with for each, the backend name and ID, server name and ID,
+ the address, port and a series or values. The number of fields varies
+ depending on thread count.
+
+ Given the threaded nature of idle connections, it's important to understand
+ that some values may change once read, and that as such, consistency within a
+ line isn't granted. This output is mostly provided as a debugging tool and is
+ not relevant to be routinely monitored nor graphed.
+
+show servers state [<backend>]
+ Dump the state of the servers found in the running configuration. A backend
+ name or identifier may be provided to limit the output to this backend only.
+
+ The dump has the following format:
+ - first line contains the format version (1 in this specification);
+ - second line contains the column headers, prefixed by a sharp ('#');
+ - third line and next ones contain data;
+ - each line starting by a sharp ('#') is considered as a comment.
+
+ Since multiple versions of the output may co-exist, below is the list of
+ fields and their order per file format version :
+ 1:
+ be_id: Backend unique id.
+ be_name: Backend label.
+ srv_id: Server unique id (in the backend).
+ srv_name: Server label.
+ srv_addr: Server IP address.
+ srv_op_state: Server operational state (UP/DOWN/...).
+ 0 = SRV_ST_STOPPED
+ The server is down.
+ 1 = SRV_ST_STARTING
+ The server is warming up (up but
+ throttled).
+ 2 = SRV_ST_RUNNING
+ The server is fully up.
+ 3 = SRV_ST_STOPPING
+ The server is up but soft-stopping
+ (eg: 404).
+ srv_admin_state: Server administrative state (MAINT/DRAIN/...).
+ The state is actually a mask of values :
+ 0x01 = SRV_ADMF_FMAINT
+ The server was explicitly forced into
+ maintenance.
+ 0x02 = SRV_ADMF_IMAINT
+ The server has inherited the maintenance
+ status from a tracked server.
+ 0x04 = SRV_ADMF_CMAINT
+ The server is in maintenance because of
+ the configuration.
+ 0x08 = SRV_ADMF_FDRAIN
+ The server was explicitly forced into
+ drain state.
+ 0x10 = SRV_ADMF_IDRAIN
+ The server has inherited the drain status
+ from a tracked server.
+ 0x20 = SRV_ADMF_RMAINT
+ The server is in maintenance because of an
+ IP address resolution failure.
+ 0x40 = SRV_ADMF_HMAINT
+ The server FQDN was set from stats socket.
+
+ srv_uweight: User visible server's weight.
+ srv_iweight: Server's initial weight.
+ srv_time_since_last_change: Time since last operational change.
+ srv_check_status: Last health check status.
+ srv_check_result: Last check result (FAILED/PASSED/...).
+ 0 = CHK_RES_UNKNOWN
+ Initialized to this by default.
+ 1 = CHK_RES_NEUTRAL
+ Valid check but no status information.
+ 2 = CHK_RES_FAILED
+ Check failed.
+ 3 = CHK_RES_PASSED
+ Check succeeded and server is fully up
+ again.
+ 4 = CHK_RES_CONDPASS
+ Check reports the server doesn't want new
+ sessions.
+ srv_check_health: Checks rise / fall current counter.
+ srv_check_state: State of the check (ENABLED/PAUSED/...).
+ The state is actually a mask of values :
+ 0x01 = CHK_ST_INPROGRESS
+ A check is currently running.
+ 0x02 = CHK_ST_CONFIGURED
+ This check is configured and may be
+ enabled.
+ 0x04 = CHK_ST_ENABLED
+ This check is currently administratively
+ enabled.
+ 0x08 = CHK_ST_PAUSED
+ Checks are paused because of maintenance
+ (health only).
+ srv_agent_state: State of the agent check (ENABLED/PAUSED/...).
+ This state uses the same mask values as
+ "srv_check_state", adding this specific one :
+ 0x10 = CHK_ST_AGENT
+ Check is an agent check (otherwise it's a
+ health check).
+ bk_f_forced_id: Flag to know if the backend ID is forced by
+ configuration.
+ srv_f_forced_id: Flag to know if the server's ID is forced by
+ configuration.
+ srv_fqdn: Server FQDN.
+ srv_port: Server port.
+ srvrecord: DNS SRV record associated to this SRV.
+ srv_use_ssl: use ssl for server connections.
+ srv_check_port: Server health check port.
+ srv_check_addr: Server health check address.
+ srv_agent_addr: Server health agent address.
+ srv_agent_port: Server health agent port.
+
+show sess
+ Dump all known active streams (formerly called "sessions"). Avoid doing this
+ on slow connections as this can be huge. This command is restricted and can
+ only be issued on sockets configured for levels "operator" or "admin". Note
+ that on machines with quickly recycled connections, it is possible that this
+ output reports less entries than really exist because it will dump all
+ existing streams up to the last one that was created before the command was
+ entered; those which die in the mean time will not appear.
+
+show sess <id> | older <age> | susp | all
+ Display a lot of internal information about the matching streams. In the
+ first form, only the stream matching the specified stream identifier will
+ be shown. This identifier is the first field at the beginning of the lines in
+ the dumps of "show sess" (it corresponds to the stream pointer). In the
+ second form, only streams older than <age> (in seconds by default) will be
+ shown. Passing "susp" instead will only report entries that are considered as
+ suspicious by the developers based on criteria that may in time or vary along
+ versions. If "all" is used instead, then all streams will be dumped. Dumping
+ many streams can produce a huge output, take a lot of time and be CPU
+ intensive, so it's always better to only dump the minimum needed. Those
+ information are useless to most users but may be used by haproxy developers
+ to troubleshoot a complex bug. The output format is intentionally not
+ documented so that it can freely evolve depending on demands. This output
+ is meant to be interpreted while checking function strm_dump_to_buffer() in
+ src/stream.c to figure the exact meaning of certain fields.
+
+show stat [domain <dns|proxy>] [{<iid>|<proxy>} <type> <sid>] [typed|json] \
+ [desc] [up|no-maint]
+ Dump statistics. The domain is used to select which statistics to print; dns
+ and proxy are available for now. By default, the CSV format is used; you can
+ activate the extended typed output format described in the section above if
+ "typed" is passed after the other arguments; or in JSON if "json" is passed
+ after the other arguments. By passing <id>, <type> and <sid>, it is possible
+ to dump only selected items :
+ - <iid> is a proxy ID, -1 to dump everything. Alternatively, a proxy name
+ <proxy> may be specified. In this case, this proxy's ID will be used as
+ the ID selector.
+ - <type> selects the type of dumpable objects : 1 for frontends, 2 for
+ backends, 4 for servers, -1 for everything. These values can be ORed,
+ for example:
+ 1 + 2 = 3 -> frontend + backend.
+ 1 + 2 + 4 = 7 -> frontend + backend + server.
+ - <sid> is a server ID, -1 to dump everything from the selected proxy.
+
+ Example :
+ $ echo "show info;show stat" | socat stdio unix-connect:/tmp/sock1
+ >>> Name: HAProxy
+ Version: 1.4-dev2-49
+ Release_date: 2009/09/23
+ Nbproc: 1
+ Process_num: 1
+ (...)
+
+ # pxname,svname,qcur,qmax,scur,smax,slim,stot,bin,bout,dreq, (...)
+ stats,FRONTEND,,,0,0,1000,0,0,0,0,0,0,,,,,OPEN,,,,,,,,,1,1,0, (...)
+ stats,BACKEND,0,0,0,0,1000,0,0,0,0,0,,0,0,0,0,UP,0,0,0,,0,250,(...)
+ (...)
+ www1,BACKEND,0,0,0,0,1000,0,0,0,0,0,,0,0,0,0,UP,1,1,0,,0,250, (...)
+
+ $
+
+ In this example, two commands have been issued at once. That way it's easy to
+ find which process the stats apply to in multi-process mode. This is not
+ needed in the typed output format as the process number is reported on each
+ line. Notice the empty line after the information output which marks the end
+ of the first block. A similar empty line appears at the end of the second
+ block (stats) so that the reader knows the output has not been truncated.
+
+ When "typed" is specified, the output format is more suitable to monitoring
+ tools because it provides numeric positions and indicates the type of each
+ output field. Each value stands on its own line with process number, element
+ number, nature, origin and scope. This same format is available via the HTTP
+ stats by passing ";typed" after the URI. It is very important to note that in
+ typed output format, the dump for a single object is contiguous so that there
+ is no need for a consumer to store everything at once.
+
+ The "up" modifier will result in listing only servers which reportedly up or
+ not checked. Those down, unresolved, or in maintenance will not be listed.
+ This is analogous to the ";up" option on the HTTP stats. Similarly, the
+ "no-maint" modifier will act like the ";no-maint" HTTP modifier and will
+ result in disabled servers not to be listed. The difference is that those
+ which are enabled but down will not be evicted.
+
+ When using the typed output format, each line is made of 4 columns delimited
+ by colons (':'). The first column is a dot-delimited series of 5 elements. The
+ first element is a letter indicating the type of the object being described.
+ At the moment the following object types are known : 'F' for a frontend, 'B'
+ for a backend, 'L' for a listener, and 'S' for a server. The second element
+ The second element is a positive integer representing the unique identifier of
+ the proxy the object belongs to. It is equivalent to the "iid" column of the
+ CSV output and matches the value in front of the optional "id" directive found
+ in the frontend or backend section. The third element is a positive integer
+ containing the unique object identifier inside the proxy, and corresponds to
+ the "sid" column of the CSV output. ID 0 is reported when dumping a frontend
+ or a backend. For a listener or a server, this corresponds to their respective
+ ID inside the proxy. The fourth element is the numeric position of the field
+ in the list (starting at zero). This position shall not change over time, but
+ holes are to be expected, depending on build options or if some fields are
+ deleted in the future. The fifth element is the field name as it appears in
+ the CSV output. The sixth element is a positive integer and is the relative
+ process number starting at 1.
+
+ The rest of the line starting after the first colon follows the "typed output
+ format" described in the section above. In short, the second column (after the
+ first ':') indicates the origin, nature and scope of the variable. The third
+ column indicates the field type, among "s32", "s64", "u32", "u64", "flt' and
+ "str". Then the fourth column is the value itself, which the consumer knows
+ how to parse thanks to column 3 and how to process thanks to column 2.
+
+ When "desc" is appended to the command, one extra colon followed by a quoted
+ string is appended with a description for the metric. At the time of writing,
+ this is only supported for the "typed" output format.
+
+ Thus the overall line format in typed mode is :
+
+ <obj>.<px_id>.<id>.<fpos>.<fname>.<process_num>:<tags>:<type>:<value>
+
+ Here's an example of typed output format :
+
+ $ echo "show stat typed" | socat stdio unix-connect:/tmp/sock1
+ F.2.0.0.pxname.1:MGP:str:private-frontend
+ F.2.0.1.svname.1:MGP:str:FRONTEND
+ F.2.0.8.bin.1:MGP:u64:0
+ F.2.0.9.bout.1:MGP:u64:0
+ F.2.0.40.hrsp_2xx.1:MGP:u64:0
+ L.2.1.0.pxname.1:MGP:str:private-frontend
+ L.2.1.1.svname.1:MGP:str:sock-1
+ L.2.1.17.status.1:MGP:str:OPEN
+ L.2.1.73.addr.1:MGP:str:0.0.0.0:8001
+ S.3.13.60.rtime.1:MCP:u32:0
+ S.3.13.61.ttime.1:MCP:u32:0
+ S.3.13.62.agent_status.1:MGP:str:L4TOUT
+ S.3.13.64.agent_duration.1:MGP:u64:2001
+ S.3.13.65.check_desc.1:MCP:str:Layer4 timeout
+ S.3.13.66.agent_desc.1:MCP:str:Layer4 timeout
+ S.3.13.67.check_rise.1:MCP:u32:2
+ S.3.13.68.check_fall.1:MCP:u32:3
+ S.3.13.69.check_health.1:SGP:u32:0
+ S.3.13.70.agent_rise.1:MaP:u32:1
+ S.3.13.71.agent_fall.1:SGP:u32:1
+ S.3.13.72.agent_health.1:SGP:u32:1
+ S.3.13.73.addr.1:MCP:str:1.255.255.255:8888
+ S.3.13.75.mode.1:MAP:str:http
+ B.3.0.0.pxname.1:MGP:str:private-backend
+ B.3.0.1.svname.1:MGP:str:BACKEND
+ B.3.0.2.qcur.1:MGP:u32:0
+ B.3.0.3.qmax.1:MGP:u32:0
+ B.3.0.4.scur.1:MGP:u32:0
+ B.3.0.5.smax.1:MGP:u32:0
+ B.3.0.6.slim.1:MGP:u32:1000
+ B.3.0.55.lastsess.1:MMP:s32:-1
+ (...)
+
+ In the typed format, the presence of the process ID at the end of the
+ first column makes it very easy to visually aggregate outputs from
+ multiple processes, as show in the example below where each line appears
+ for each process :
+
+ $ ( echo show stat typed | socat /var/run/haproxy.sock1 - ; \
+ echo show stat typed | socat /var/run/haproxy.sock2 - ) | \
+ sort -t . -k 1,1 -k 2,2n -k 3,3n -k 4,4n -k 5,5 -k 6,6n
+ B.3.0.0.pxname.1:MGP:str:private-backend
+ B.3.0.0.pxname.2:MGP:str:private-backend
+ B.3.0.1.svname.1:MGP:str:BACKEND
+ B.3.0.1.svname.2:MGP:str:BACKEND
+ B.3.0.2.qcur.1:MGP:u32:0
+ B.3.0.2.qcur.2:MGP:u32:0
+ B.3.0.3.qmax.1:MGP:u32:0
+ B.3.0.3.qmax.2:MGP:u32:0
+ B.3.0.4.scur.1:MGP:u32:0
+ B.3.0.4.scur.2:MGP:u32:0
+ B.3.0.5.smax.1:MGP:u32:0
+ B.3.0.5.smax.2:MGP:u32:0
+ B.3.0.6.slim.1:MGP:u32:1000
+ B.3.0.6.slim.2:MGP:u32:1000
+ (...)
+
+ The format of JSON output is described in a schema which may be output
+ using "show schema json".
+
+ The JSON output contains no extra whitespace in order to reduce the
+ volume of output. For human consumption passing the output through a
+ pretty printer may be helpful. Example :
+
+ $ echo "show stat json" | socat /var/run/haproxy.sock stdio | \
+ python -m json.tool
+
+ The JSON output contains no extra whitespace in order to reduce the
+ volume of output. For human consumption passing the output through a
+ pretty printer may be helpful. Example :
+
+ $ echo "show stat json" | socat /var/run/haproxy.sock stdio | \
+ python -m json.tool
+
+show ssl ca-file [<cafile>[:<index>]]
+ Display the list of CA files loaded into the process and their respective
+ certificate counts. The certificates are not used by any frontend or backend
+ until their status is "Used".
+ A "@system-ca" entry can appear in the list, it is loaded by the httpclient
+ by default. It contains the list of trusted CA of your system returned by
+ OpenSSL.
+ If a filename is prefixed by an asterisk, it is a transaction which
+ is not committed yet. If a <cafile> is specified without <index>, it will show
+ the status of the CA file ("Used"/"Unused") followed by details about all the
+ certificates contained in the CA file. The details displayed for every
+ certificate are the same as the ones displayed by a "show ssl cert" command.
+ If a <cafile> is specified followed by an <index>, it will only display the
+ details of the certificate having the specified index. Indexes start from 1.
+ If the index is invalid (too big for instance), nothing will be displayed.
+ This command can be useful to check if a CA file was properly updated.
+ You can also display the details of an ongoing transaction by prefixing the
+ filename by an asterisk.
+
+ Example :
+
+ $ echo "show ssl ca-file" | socat /var/run/haproxy.master -
+ # transaction
+ *cafile.crt - 2 certificate(s)
+ # filename
+ cafile.crt - 1 certificate(s)
+
+ $ echo "show ssl ca-file cafile.crt" | socat /var/run/haproxy.master -
+ Filename: /home/tricot/work/haproxy/reg-tests/ssl/set_cafile_ca2.crt
+ Status: Used
+
+ Certificate #1:
+ Serial: 11A4D2200DC84376E7D233CAFF39DF44BF8D1211
+ notBefore: Apr 1 07:40:53 2021 GMT
+ notAfter: Aug 17 07:40:53 2048 GMT
+ Subject Alternative Name:
+ Algorithm: RSA4096
+ SHA1 FingerPrint: A111EF0FEFCDE11D47FE3F33ADCA8435EBEA4864
+ Subject: /C=FR/ST=Some-State/O=HAProxy Technologies/CN=HAProxy Technologies CA
+ Issuer: /C=FR/ST=Some-State/O=HAProxy Technologies/CN=HAProxy Technologies CA
+
+ $ echo "show ssl ca-file *cafile.crt:2" | socat /var/run/haproxy.master -
+ Filename: */home/tricot/work/haproxy/reg-tests/ssl/set_cafile_ca2.crt
+ Status: Unused
+
+ Certificate #2:
+ Serial: 587A1CE5ED855040A0C82BF255FF300ADB7C8136
+ [...]
+
+show ssl cert [<filename>]
+ Display the list of certificates loaded into the process. They are not used
+ by any frontend or backend until their status is "Used".
+ If a filename is prefixed by an asterisk, it is a transaction which is not
+ committed yet. If a filename is specified, it will show details about the
+ certificate. This command can be useful to check if a certificate was well
+ updated. You can also display details on a transaction by prefixing the
+ filename by an asterisk.
+ This command can also be used to display the details of a certificate's OCSP
+ response by suffixing the filename with a ".ocsp" extension. It works for
+ committed certificates as well as for ongoing transactions. On a committed
+ certificate, this command is equivalent to calling "show ssl ocsp-response"
+ with the certificate's corresponding OCSP response ID.
+
+ Example :
+
+ $ echo "@1 show ssl cert" | socat /var/run/haproxy.master -
+ # transaction
+ *test.local.pem
+ # filename
+ test.local.pem
+
+ $ echo "@1 show ssl cert test.local.pem" | socat /var/run/haproxy.master -
+ Filename: test.local.pem
+ Status: Used
+ Serial: 03ECC19BA54B25E85ABA46EE561B9A10D26F
+ notBefore: Sep 13 21:20:24 2019 GMT
+ notAfter: Dec 12 21:20:24 2019 GMT
+ Issuer: /C=US/O=Let's Encrypt/CN=Let's Encrypt Authority X3
+ Subject: /CN=test.local
+ Subject Alternative Name: DNS:test.local, DNS:imap.test.local
+ Algorithm: RSA2048
+ SHA1 FingerPrint: 417A11CAE25F607B24F638B4A8AEE51D1E211477
+
+ $ echo "@1 show ssl cert *test.local.pem" | socat /var/run/haproxy.master -
+ Filename: *test.local.pem
+ Status: Unused
+ [...]
+
+show ssl crl-file [<crlfile>[:<index>]]
+ Display the list of CRL files loaded into the process. They are not used
+ by any frontend or backend until their status is "Used".
+ If a filename is prefixed by an asterisk, it is a transaction which is not
+ committed yet. If a <crlfile> is specified without <index>, it will show the
+ status of the CRL file ("Used"/"Unused") followed by details about all the
+ Revocation Lists contained in the CRL file. The details displayed for every
+ list are based on the output of "openssl crl -text -noout -in <file>".
+ If a <crlfile> is specified followed by an <index>, it will only display the
+ details of the list having the specified index. Indexes start from 1.
+ If the index is invalid (too big for instance), nothing will be displayed.
+ This command can be useful to check if a CRL file was properly updated.
+ You can also display the details of an ongoing transaction by prefixing the
+ filename by an asterisk.
+
+ Example :
+
+ $ echo "show ssl crl-file" | socat /var/run/haproxy.master -
+ # transaction
+ *crlfile.pem
+ # filename
+ crlfile.pem
+
+ $ echo "show ssl crl-file crlfile.pem" | socat /var/run/haproxy.master -
+ Filename: /home/tricot/work/haproxy/reg-tests/ssl/crlfile.pem
+ Status: Used
+
+ Certificate Revocation List #1:
+ Version 1
+ Signature Algorithm: sha256WithRSAEncryption
+ Issuer: /C=FR/O=HAProxy Technologies/CN=Intermediate CA2
+ Last Update: Apr 23 14:45:39 2021 GMT
+ Next Update: Sep 8 14:45:39 2048 GMT
+ Revoked Certificates:
+ Serial Number: 1008
+ Revocation Date: Apr 23 14:45:36 2021 GMT
+
+ Certificate Revocation List #2:
+ Version 1
+ Signature Algorithm: sha256WithRSAEncryption
+ Issuer: /C=FR/O=HAProxy Technologies/CN=Root CA
+ Last Update: Apr 23 14:30:44 2021 GMT
+ Next Update: Sep 8 14:30:44 2048 GMT
+ No Revoked Certificates.
+
+show ssl crt-list [-n] [<filename>]
+ Display the list of crt-list and directories used in the HAProxy
+ configuration. If a filename is specified, dump the content of a crt-list or
+ a directory. Once dumped the output can be used as a crt-list file.
+ The '-n' option can be used to display the line number, which is useful when
+ combined with the 'del ssl crt-list' option when a entry is duplicated. The
+ output with the '-n' option is not compatible with the crt-list format and
+ not loadable by haproxy.
+
+ Example:
+ echo "show ssl crt-list -n localhost.crt-list" | socat /tmp/sock1 -
+ # localhost.crt-list
+ common.pem:1 !not.test1.com *.test1.com !localhost
+ common.pem:2
+ ecdsa.pem:3 [verify none allow-0rtt ssl-min-ver TLSv1.0 ssl-max-ver TLSv1.3] localhost !www.test1.com
+ ecdsa.pem:4 [verify none allow-0rtt ssl-min-ver TLSv1.0 ssl-max-ver TLSv1.3]
+
+show ssl ocsp-response [[text|base64] <id|path>]
+ Display the IDs of the OCSP tree entries corresponding to all the OCSP
+ responses used in HAProxy, as well as the corresponding frontend
+ certificate's path, the issuer's name and key hash and the serial number of
+ the certificate for which the OCSP response was built.
+ If a valid <id> or the <path> of a valid frontend certificate is provided,
+ display the contents of the corresponding OCSP response. When an <id> is
+ provided, it it possible to define the format in which the data is dumped.
+ The 'text' option is the default one and it allows to display detailed
+ information about the OCSP response the same way as in an "openssl ocsp
+ -respin <ocsp-response> -text" call. The 'base64' format allows to dump the
+ contents of an OCSP response in base64.
+
+ Example :
+
+ $ echo "show ssl ocsp-response" | socat /var/run/haproxy.master -
+ # Certificate IDs
+ Certificate ID key : 303b300906052b0e03021a050004148a83e0060faff709ca7e9b95522a2e81635fda0a0414f652b0e435d5ea923851508f0adbe92d85de007a0202100a
+ Certificate path : /path_to_cert/foo.pem
+ Certificate ID:
+ Issuer Name Hash: 8A83E0060FAFF709CA7E9B95522A2E81635FDA0A
+ Issuer Key Hash: F652B0E435D5EA923851508F0ADBE92D85DE007A
+ Serial Number: 100A
+
+ $ echo "show ssl ocsp-response 303b300906052b0e03021a050004148a83e0060faff709ca7e9b95522a2e81635fda0a0414f652b0e435d5ea923851508f0adbe92d85de007a0202100a" | socat /var/run/haproxy.master -
+ OCSP Response Data:
+ OCSP Response Status: successful (0x0)
+ Response Type: Basic OCSP Response
+ Version: 1 (0x0)
+ Responder Id: C = FR, O = HAProxy Technologies, CN = ocsp.haproxy.com
+ Produced At: May 27 15:43:38 2021 GMT
+ Responses:
+ Certificate ID:
+ Hash Algorithm: sha1
+ Issuer Name Hash: 8A83E0060FAFF709CA7E9B95522A2E81635FDA0A
+ Issuer Key Hash: F652B0E435D5EA923851508F0ADBE92D85DE007A
+ Serial Number: 100A
+ Cert Status: good
+ This Update: May 27 15:43:38 2021 GMT
+ Next Update: Oct 12 15:43:38 2048 GMT
+ [...]
+
+ $ echo "show ssl ocsp-response base64 /path_to_cert/foo.pem" | socat /var/run/haproxy.sock -
+ MIIB8woBAKCCAewwggHoBgkrBgEFBQcwAQEEggHZMIIB1TCBvqE[...]
+
+show ssl ocsp-updates
+ Display information about the entries concerned by the OCSP update mechanism.
+ The command will output one line per OCSP response and will contain the
+ expected update time of the response as well as the time of the last
+ successful update and counters of successful and failed updates. It will also
+ give the status of the last update (successful or not) in numerical form as
+ well as text form. See below for a full list of possible errors. The lines
+ will be sorted by ascending 'Next Update' time. The lines will also contain a
+ path to the first frontend certificate that uses the OCSP response.
+ See "show ssl ocsp-response" command and "ocsp-update" option for more
+ information on the OCSP auto update.
+
+ The update error codes and error strings can be the following:
+
+ +----+-------------------------------------+
+ | ID | message |
+ +----+-------------------------------------+
+ | 0 | "Unknown" |
+ | 1 | "Update successful" |
+ | 2 | "HTTP error" |
+ | 3 | "Missing \"ocsp-response\" header" |
+ | 4 | "OCSP response check failure" |
+ | 5 | "Error during insertion" |
+ +----+-------------------------------------+
+
+ Example :
+ $ echo "show ssl ocsp-updates" | socat /tmp/haproxy.sock -
+ OCSP Certid | Path | Next Update | Last Update | Successes | Failures | Last Update Status | Last Update Status (str)
+ 303b300906052b0e03021a050004148a83e0060faff709ca7e9b95522a2e81635fda0a0414f652b0e435d5ea923851508f0adbe92d85de007a02021015 | /path_to_cert/cert.pem | 30/Jan/2023:00:08:09 +0000 | - | 0 | 1 | 2 | HTTP error
+ 304b300906052b0e03021a0500041448dac9a0fb2bd32d4ff0de68d2f567b735f9b3c40414142eb317b75856cbae500940e61faf9d8b14c2c6021203e16a7aa01542f291237b454a627fdea9c1 | /path_to_cert/other_cert.pem | 30/Jan/2023:01:07:09 +0000 | 30/Jan/2023:00:07:09 +0000 | 1 | 0 | 1 | Update successful
+
+show ssl providers
+ Display the names of the providers loaded by OpenSSL during init. Provider
+ loading can indeed be configured via the OpenSSL configuration file and this
+ option allows to check that the right providers were loaded. This command is
+ only available with OpenSSL v3.
+
+ Example :
+ $ echo "show ssl providers" | socat /var/run/haproxy.master -
+ Loaded providers :
+ - fips
+ - base
+
+show startup-logs
+ Dump all messages emitted during the startup of the current haproxy process,
+ each startup-logs buffer is unique to its haproxy worker.
+
+ This keyword also exists on the master CLI, which shows the latest startup or
+ reload tentative.
+
+show table
+ Dump general information on all known stick-tables. Their name is returned
+ (the name of the proxy which holds them), their type (currently zero, always
+ IP), their size in maximum possible number of entries, and the number of
+ entries currently in use.
+
+ Example :
+ $ echo "show table" | socat stdio /tmp/sock1
+ >>> # table: front_pub, type: ip, size:204800, used:171454
+ >>> # table: back_rdp, type: ip, size:204800, used:0
+
+show table <name> [ data.<type> <operator> <value> [data.<type> ...]] | [ key <key> ]
+ Dump contents of stick-table <name>. In this mode, a first line of generic
+ information about the table is reported as with "show table", then all
+ entries are dumped. Since this can be quite heavy, it is possible to specify
+ a filter in order to specify what entries to display.
+
+ When the "data." form is used the filter applies to the stored data (see
+ "stick-table" in section 4.2). A stored data type must be specified
+ in <type>, and this data type must be stored in the table otherwise an
+ error is reported. The data is compared according to <operator> with the
+ 64-bit integer <value>. Operators are the same as with the ACLs :
+
+ - eq : match entries whose data is equal to this value
+ - ne : match entries whose data is not equal to this value
+ - le : match entries whose data is less than or equal to this value
+ - ge : match entries whose data is greater than or equal to this value
+ - lt : match entries whose data is less than this value
+ - gt : match entries whose data is greater than this value
+
+ In this form, you can use multiple data filter entries, up to a maximum
+ defined during build time (4 by default).
+
+ When the key form is used the entry <key> is shown. The key must be of the
+ same type as the table, which currently is limited to IPv4, IPv6, integer,
+ and string.
+
+ Example :
+ $ echo "show table http_proxy" | socat stdio /tmp/sock1
+ >>> # table: http_proxy, type: ip, size:204800, used:2
+ >>> 0x80e6a4c: key=127.0.0.1 use=0 exp=3594729 gpc0=0 conn_rate(30000)=1 \
+ bytes_out_rate(60000)=187
+ >>> 0x80e6a80: key=127.0.0.2 use=0 exp=3594740 gpc0=1 conn_rate(30000)=10 \
+ bytes_out_rate(60000)=191
+
+ $ echo "show table http_proxy data.gpc0 gt 0" | socat stdio /tmp/sock1
+ >>> # table: http_proxy, type: ip, size:204800, used:2
+ >>> 0x80e6a80: key=127.0.0.2 use=0 exp=3594740 gpc0=1 conn_rate(30000)=10 \
+ bytes_out_rate(60000)=191
+
+ $ echo "show table http_proxy data.conn_rate gt 5" | \
+ socat stdio /tmp/sock1
+ >>> # table: http_proxy, type: ip, size:204800, used:2
+ >>> 0x80e6a80: key=127.0.0.2 use=0 exp=3594740 gpc0=1 conn_rate(30000)=10 \
+ bytes_out_rate(60000)=191
+
+ $ echo "show table http_proxy key 127.0.0.2" | \
+ socat stdio /tmp/sock1
+ >>> # table: http_proxy, type: ip, size:204800, used:2
+ >>> 0x80e6a80: key=127.0.0.2 use=0 exp=3594740 gpc0=1 conn_rate(30000)=10 \
+ bytes_out_rate(60000)=191
+
+ When the data criterion applies to a dynamic value dependent on time such as
+ a bytes rate, the value is dynamically computed during the evaluation of the
+ entry in order to decide whether it has to be dumped or not. This means that
+ such a filter could match for some time then not match anymore because as
+ time goes, the average event rate drops.
+
+ It is possible to use this to extract lists of IP addresses abusing the
+ service, in order to monitor them or even blacklist them in a firewall.
+ Example :
+ $ echo "show table http_proxy data.gpc0 gt 0" \
+ | socat stdio /tmp/sock1 \
+ | fgrep 'key=' | cut -d' ' -f2 | cut -d= -f2 > abusers-ip.txt
+ ( or | awk '/key/{ print a[split($2,a,"=")]; }' )
+
+ When the stick-table is synchronized to a peers section supporting sharding,
+ the shard number will be displayed for each key (otherwise '0' is reported).
+ This allows to know which peers will receive this key.
+ Example:
+ $ echo "show table http_proxy" | socat stdio /tmp/sock1 | fgrep shard=
+ 0x7f23b0c822a8: key=10.0.0.2 use=0 exp=296398 shard=9 gpc0=0
+ 0x7f23a063f948: key=10.0.0.6 use=0 exp=296075 shard=12 gpc0=0
+ 0x7f23b03920b8: key=10.0.0.8 use=0 exp=296766 shard=1 gpc0=0
+ 0x7f23a43c09e8: key=10.0.0.12 use=0 exp=295368 shard=8 gpc0=0
+
+show tasks
+ Dumps the number of tasks currently in the run queue, with the number of
+ occurrences for each function, and their average latency when it's known
+ (for pure tasks with task profiling enabled). The dump is a snapshot of the
+ instant it's done, and there may be variations depending on what tasks are
+ left in the queue at the moment it happens, especially in mono-thread mode
+ as there's less chance that I/Os can refill the queue (unless the queue is
+ full). This command takes exclusive access to the process and can cause
+ minor but measurable latencies when issued on a highly loaded process, so
+ it must not be abused by monitoring bots.
+
+show threads
+ Dumps some internal states and structures for each thread, that may be useful
+ to help developers understand a problem. The output tries to be readable by
+ showing one block per thread. When haproxy is built with USE_THREAD_DUMP=1,
+ an advanced dump mechanism involving thread signals is used so that each
+ thread can dump its own state in turn. Without this option, the thread
+ processing the command shows all its details but the other ones are less
+ detailed. A star ('*') is displayed in front of the thread handling the
+ command. A right angle bracket ('>') may also be displayed in front of
+ threads which didn't make any progress since last invocation of this command,
+ indicating a bug in the code which must absolutely be reported. When this
+ happens between two threads it usually indicates a deadlock. If a thread is
+ alone, it's a different bug like a corrupted list. In all cases the process
+ needs is not fully functional anymore and needs to be restarted.
+
+ The output format is purposely not documented so that it can easily evolve as
+ new needs are identified, without having to maintain any form of backwards
+ compatibility, and just like with "show activity", the values are meaningless
+ without the code at hand.
+
+show tls-keys [id|*]
+ Dump all loaded TLS ticket keys references. The TLS ticket key reference ID
+ and the file from which the keys have been loaded is shown. Both of those
+ can be used to update the TLS keys using "set ssl tls-key". If an ID is
+ specified as parameter, it will dump the tickets, using * it will dump every
+ keys from every references.
+
+show schema json
+ Dump the schema used for the output of "show info json" and "show stat json".
+
+ The contains no extra whitespace in order to reduce the volume of output.
+ For human consumption passing the output through a pretty printer may be
+ helpful. Example :
+
+ $ echo "show schema json" | socat /var/run/haproxy.sock stdio | \
+ python -m json.tool
+
+ The schema follows "JSON Schema" (json-schema.org) and accordingly
+ verifiers may be used to verify the output of "show info json" and "show
+ stat json" against the schema.
+
+show trace [<source>]
+ Show the current trace status. For each source a line is displayed with a
+ single-character status indicating if the trace is stopped, waiting, or
+ running. The output sink used by the trace is indicated (or "none" if none
+ was set), as well as the number of dropped events in this sink, followed by a
+ brief description of the source. If a source name is specified, a detailed
+ list of all events supported by the source, and their status for each action
+ (report, start, pause, stop), indicated by a "+" if they are enabled, or a
+ "-" otherwise. All these events are independent and an event might trigger
+ a start without being reported and conversely.
+
+show version
+ Show the version of the current HAProxy process. This is available from
+ master and workers CLI.
+ Example:
+
+ $ echo "show version" | socat /var/run/haproxy.sock stdio
+ 2.4.9
+
+ $ echo "show version" | socat /var/run/haproxy-master.sock stdio
+ 2.5.0
+
+shutdown frontend <frontend>
+ Completely delete the specified frontend. All the ports it was bound to will
+ be released. It will not be possible to enable the frontend anymore after
+ this operation. This is intended to be used in environments where stopping a
+ proxy is not even imaginable but a misconfigured proxy must be fixed. That
+ way it's possible to release the port and bind it into another process to
+ restore operations. The frontend will not appear at all on the stats page
+ once it is terminated.
+
+ The frontend may be specified either by its name or by its numeric ID,
+ prefixed with a sharp ('#').
+
+ This command is restricted and can only be issued on sockets configured for
+ level "admin".
+
+shutdown session <id>
+ Immediately terminate the stream matching the specified stream identifier.
+ This identifier is the first field at the beginning of the lines in the dumps
+ of "show sess" (it corresponds to the stream pointer). This can be used to
+ terminate a long-running stream without waiting for a timeout or when an
+ endless transfer is ongoing. Such terminated streams are reported with a 'K'
+ flag in the logs.
+
+shutdown sessions server <backend>/<server>
+ Immediately terminate all the streams attached to the specified server. This
+ can be used to terminate long-running streams after a server is put into
+ maintenance mode, for instance. Such terminated streams are reported with a
+ 'K' flag in the logs.
+
+trace
+ The "trace" command alone lists the trace sources, their current status, and
+ their brief descriptions. It is only meant as a menu to enter next levels,
+ see other "trace" commands below.
+
+trace 0
+ Immediately stops all traces. This is made to be used as a quick solution
+ to terminate a debugging session or as an emergency action to be used in case
+ complex traces were enabled on multiple sources and impact the service.
+
+trace <source> event [ [+|-|!]<name> ]
+ Without argument, this will list all the events supported by the designated
+ source. They are prefixed with a "-" if they are not enabled, or a "+" if
+ they are enabled. It is important to note that a single trace may be labelled
+ with multiple events, and as long as any of the enabled events matches one of
+ the events labelled on the trace, the event will be passed to the trace
+ subsystem. For example, receiving an HTTP/2 frame of type HEADERS may trigger
+ a frame event and a stream event since the frame creates a new stream. If
+ either the frame event or the stream event are enabled for this source, the
+ frame will be passed to the trace framework.
+
+ With an argument, it is possible to toggle the state of each event and
+ individually enable or disable them. Two special keywords are supported,
+ "none", which matches no event, and is used to disable all events at once,
+ and "any" which matches all events, and is used to enable all events at
+ once. Other events are specific to the event source. It is possible to
+ enable one event by specifying its name, optionally prefixed with '+' for
+ better readability. It is possible to disable one event by specifying its
+ name prefixed by a '-' or a '!'.
+
+ One way to completely disable a trace source is to pass "event none", and
+ this source will instantly be totally ignored.
+
+trace <source> level [<level>]
+ Without argument, this will list all trace levels for this source, and the
+ current one will be indicated by a star ('*') prepended in front of it. With
+ an argument, this will change the trace level to the specified level. Detail
+ levels are a form of filters that are applied before reporting the events.
+ These filters are used to selectively include or exclude events depending on
+ their level of importance. For example a developer might need to know
+ precisely where in the code an HTTP header was considered invalid while the
+ end user may not even care about this header's validity at all. There are
+ currently 5 distinct levels for a trace :
+
+ user this will report information that are suitable for use by a
+ regular haproxy user who wants to observe his traffic.
+ Typically some HTTP requests and responses will be reported
+ without much detail. Most sources will set this as the
+ default level to ease operations.
+
+ proto in addition to what is reported at the "user" level, it also
+ displays protocol-level updates. This can for example be the
+ frame types or HTTP headers after decoding.
+
+ state in addition to what is reported at the "proto" level, it
+ will also display state transitions (or failed transitions)
+ which happen in parsers, so this will show attempts to
+ perform an operation while the "proto" level only shows
+ the final operation.
+
+ data in addition to what is reported at the "state" level, it
+ will also include data transfers between the various layers.
+
+ developer it reports everything available, which can include advanced
+ information such as "breaking out of this loop" that are
+ only relevant to a developer trying to understand a bug that
+ only happens once in a while in field. Function names are
+ only reported at this level.
+
+ It is highly recommended to always use the "user" level only and switch to
+ other levels only if instructed to do so by a developer. Also it is a good
+ idea to first configure the events before switching to higher levels, as it
+ may save from dumping many lines if no filter is applied.
+
+trace <source> lock [criterion]
+ Without argument, this will list all the criteria supported by this source
+ for lock-on processing, and display the current choice by a star ('*') in
+ front of it. Lock-on means that the source will focus on the first matching
+ event and only stick to the criterion which triggered this event, and ignore
+ all other ones until the trace stops. This allows for example to take a trace
+ on a single connection or on a single stream. The following criteria are
+ supported by some traces, though not necessarily all, since some of them
+ might not be available to the source :
+
+ backend lock on the backend that started the trace
+ connection lock on the connection that started the trace
+ frontend lock on the frontend that started the trace
+ listener lock on the listener that started the trace
+ nothing do not lock on anything
+ server lock on the server that started the trace
+ session lock on the session that started the trace
+ thread lock on the thread that started the trace
+
+ In addition to this, each source may provide up to 4 specific criteria such
+ as internal states or connection IDs. For example in HTTP/2 it is possible
+ to lock on the H2 stream and ignore other streams once a strace starts.
+
+ When a criterion is passed in argument, this one is used instead of the
+ other ones and any existing tracking is immediately terminated so that it can
+ restart with the new criterion. The special keyword "nothing" is supported by
+ all sources to permanently disable tracking.
+
+trace <source> { pause | start | stop } [ [+|-|!]event]
+ Without argument, this will list the events enabled to automatically pause,
+ start, or stop a trace for this source. These events are specific to each
+ trace source. With an argument, this will either enable the event for the
+ specified action (if optionally prefixed by a '+') or disable it (if
+ prefixed by a '-' or '!'). The special keyword "now" is not an event and
+ requests to take the action immediately. The keywords "none" and "any" are
+ supported just like in "trace event".
+
+ The 3 supported actions are respectively "pause", "start" and "stop". The
+ "pause" action enumerates events which will cause a running trace to stop and
+ wait for a new start event to restart it. The "start" action enumerates the
+ events which switch the trace into the waiting mode until one of the start
+ events appears. And the "stop" action enumerates the events which definitely
+ stop the trace until it is manually enabled again. In practice it makes sense
+ to manually start a trace using "start now" without caring about events, and
+ to stop it using "stop now". In order to capture more subtle event sequences,
+ setting "start" to a normal event (like receiving an HTTP request) and "stop"
+ to a very rare event like emitting a certain error, will ensure that the last
+ captured events will match the desired criteria. And the pause event is
+ useful to detect the end of a sequence, disable the lock-on and wait for
+ another opportunity to take a capture. In this case it can make sense to
+ enable lock-on to spot only one specific criterion (e.g. a stream), and have
+ "start" set to anything that starts this criterion (e.g. all events which
+ create a stream), "stop" set to the expected anomaly, and "pause" to anything
+ that ends that criterion (e.g. any end of stream event). In this case the
+ trace log will contain complete sequences of perfectly clean series affecting
+ a single object, until the last sequence containing everything from the
+ beginning to the anomaly.
+
+trace <source> sink [<sink>]
+ Without argument, this will list all event sinks available for this source,
+ and the currently configured one will have a star ('*') prepended in front
+ of it. Sink "none" is always available and means that all events are simply
+ dropped, though their processing is not ignored (e.g. lock-on does occur).
+ Other sinks are available depending on configuration and build options, but
+ typically "stdout" and "stderr" will be usable in debug mode, and in-memory
+ ring buffers should be available as well. When a name is specified, the sink
+ instantly changes for the specified source. Events are not changed during a
+ sink change. In the worst case some may be lost if an invalid sink is used
+ (or "none"), but operations do continue to a different destination.
+
+trace <source> verbosity [<level>]
+ Without argument, this will list all verbosity levels for this source, and the
+ current one will be indicated by a star ('*') prepended in front of it. With
+ an argument, this will change the verbosity level to the specified one.
+
+ Verbosity levels indicate how far the trace decoder should go to provide
+ detailed information. It depends on the trace source, since some sources will
+ not even provide a specific decoder. Level "quiet" is always available and
+ disables any decoding. It can be useful when trying to figure what's
+ happening before trying to understand the details, since it will have a very
+ low impact on performance and trace size. When no verbosity levels are
+ declared by a source, level "default" is available and will cause a decoder
+ to be called when specified in the traces. It is an opportunistic decoding.
+ When the source declares some verbosity levels, these ones are listed with
+ a description of what they correspond to. In this case the trace decoder
+ provided by the source will be as accurate as possible based on the
+ information available at the trace point. The first level above "quiet" is
+ set by default.
+
+update ssl ocsp-response <certfile>
+ Create an OCSP request for the specified <certfile> and send it to the OCSP
+ responder whose URI should be specified in the "Authority Information Access"
+ section of the certificate. Only the first URI is taken into account. The
+ OCSP response that we should receive in return is then checked and inserted
+ in the local OCSP response tree. This command will only work for certificates
+ that already had a stored OCSP response, either because it was provided
+ during init or if it was previously set through the "set ssl cert" or "set
+ ssl ocsp-response" commands.
+ If the received OCSP response is valid and was properly inserted into the
+ local tree, its contents will be displayed on the standard output. The format
+ is the same as the one described in "show ssl ocsp-response".
+
+
+9.4. Master CLI
+---------------
+
+The master CLI is a socket bound to the master process in master-worker mode.
+This CLI gives access to the unix socket commands in every running or leaving
+processes and allows a basic supervision of those processes.
+
+The master CLI is configurable only from the haproxy program arguments with
+the -S option. This option also takes bind options separated by commas.
+
+Example:
+
+ # haproxy -W -S 127.0.0.1:1234 -f test1.cfg
+ # haproxy -Ws -S /tmp/master-socket,uid,1000,gid,1000,mode,600 -f test1.cfg
+ # haproxy -W -S /tmp/master-socket,level,user -f test1.cfg
+
+
+9.4.1. Master CLI commands
+--------------------------
+
+@<[!]pid>
+ The master CLI uses a special prefix notation to access the multiple
+ processes. This notation is easily identifiable as it begins by a @.
+
+ A @ prefix can be followed by a relative process number or by an exclamation
+ point and a PID. (e.g. @1 or @!1271). A @ alone could be use to specify the
+ master. Leaving processes are only accessible with the PID as relative process
+ number are only usable with the current processes.
+
+ Examples:
+
+ $ socat /var/run/haproxy-master.sock readline
+ prompt
+ master> @1 show info; @2 show info
+ [...]
+ Process_num: 1
+ Pid: 1271
+ [...]
+ Process_num: 2
+ Pid: 1272
+ [...]
+ master>
+
+ $ echo '@!1271 show info; @!1272 show info' | socat /var/run/haproxy-master.sock -
+ [...]
+
+ A prefix could be use as a command, which will send every next commands to
+ the specified process.
+
+ Examples:
+
+ $ socat /var/run/haproxy-master.sock readline
+ prompt
+ master> @1
+ 1271> show info
+ [...]
+ 1271> show stat
+ [...]
+ 1271> @
+ master>
+
+ $ echo '@1; show info; show stat; @2; show info; show stat' | socat /var/run/haproxy-master.sock -
+ [...]
+
+expert-mode [on|off]
+ This command activates the "expert-mode" for every worker accessed from the
+ master CLI. Combined with "mcli-debug-mode" it also activates the command on
+ the master. Display the flag "e" in the master CLI prompt.
+
+ See also "expert-mode" in Section 9.3 and "mcli-debug-mode" in 9.4.1.
+
+experimental-mode [on|off]
+ This command activates the "experimental-mode" for every worker accessed from
+ the master CLI. Combined with "mcli-debug-mode" it also activates the command on
+ the master. Display the flag "x" in the master CLI prompt.
+
+ See also "experimental-mode" in Section 9.3 and "mcli-debug-mode" in 9.4.1.
+
+hard-reload
+ This command does the same as the "reload" command over the master CLI with
+ the exception that it does a hard-stop (-st) instead of a stop-stop (-sf) of
+ the previous process. This means the previous process does not wait to
+ achieve anything before exiting, so all connections will be closed.
+
+ See also the "reload" command.
+
+mcli-debug-mode [on|off]
+ This keyword allows a special mode in the master CLI which enables every
+ keywords that were meant for a worker CLI on the master CLI, allowing to debug
+ the master process. Once activated, you list the new available keywords with
+ "help". Combined with "experimental-mode" or "expert-mode" it enables even
+ more keywords. Display the flag "d" in the master CLI prompt.
+
+prompt
+ When the prompt is enabled (via the "prompt" command), the context the CLI is
+ working on is displayed in the prompt. The master is identified by the "master"
+ string, and other processes are identified with their PID. In case the last
+ reload failed, the master prompt will be changed to "master[ReloadFailed]>" so
+ that it becomes visible that the process is still running on the previous
+ configuration and that the new configuration is not operational.
+
+ The prompt of the master CLI is able to display several flags which are the
+ enable modes. "d" for mcli-debug-mode, "e" for expert-mode, "x" for
+ experimental-mode.
+
+ Example:
+ $ socat /var/run/haproxy-master.sock -
+ prompt
+ master> expert-mode on
+ master(e)> experimental-mode on
+ master(xe)> mcli-debug-mode on
+ master(xed)> @1
+ 95191(xed)>
+
+reload
+ You can also reload the HAProxy master process with the "reload" command which
+ does the same as a `kill -USR2` on the master process, provided that the user
+ has at least "operator" or "admin" privileges.
+
+ This command allows you to perform a synchronous reload, the command will
+ return a reload status, once the reload was performed. Be careful with the
+ timeout if a tool is used to parse it, it is only returned once the
+ configuration is parsed and the new worker is forked. The "socat" command uses
+ a timeout of 0.5s by default so it will quits before showing the message if
+ the reload is too long. "ncat" does not have a timeout by default.
+ When compiled with USE_SHM_OPEN=1, the reload command is also able to dump
+ the startup-logs of the master.
+
+ Example:
+
+ $ echo "reload" | socat -t300 /var/run/haproxy-master.sock stdin
+ Success=1
+ --
+ [NOTICE] (482713) : haproxy version is 2.7-dev7-4827fb-69
+ [NOTICE] (482713) : path to executable is ./haproxy
+ [WARNING] (482713) : config : 'http-request' rules ignored for proxy 'frt1' as they require HTTP mode.
+ [NOTICE] (482713) : New worker (482720) forked
+ [NOTICE] (482713) : Loading success.
+
+ $ echo "reload" | socat -t300 /var/run/haproxy-master.sock stdin
+ Success=0
+ --
+ [NOTICE] (482886) : haproxy version is 2.7-dev7-4827fb-69
+ [NOTICE] (482886) : path to executable is ./haproxy
+ [ALERT] (482886) : config : parsing [test3.cfg:1]: unknown keyword 'Aglobal' out of section.
+ [ALERT] (482886) : config : Fatal errors found in configuration.
+ [WARNING] (482886) : Loading failure!
+
+ $
+
+ The reload command is the last executed on the master CLI, every other
+ command after it are ignored. Once the reload command returns its status, it
+ will close the connection to the CLI.
+
+ Note that a reload will close all connections to the master CLI.
+ See also the "hard-reload" command.
+
+show proc
+ The master CLI introduces a 'show proc' command to surpervise the
+ processe.
+
+ Example:
+
+ $ echo 'show proc' | socat /var/run/haproxy-master.sock -
+ #<PID> <type> <reloads> <uptime> <version>
+ 1162 master 5 [failed: 0] 0d00h02m07s 2.5-dev13
+ # workers
+ 1271 worker 1 0d00h00m00s 2.5-dev13
+ # old workers
+ 1233 worker 3 0d00h00m43s 2.0-dev3-6019f6-289
+ # programs
+ 1244 foo 0 0d00h00m00s -
+ 1255 bar 0 0d00h00m00s -
+
+ In this example, the master has been reloaded 5 times but one of the old
+ worker is still running and survived 3 reloads. You could access the CLI of
+ this worker to understand what's going on.
+
+show startup-logs
+ HAProxy needs to be compiled with USE_SHM_OPEN=1 to be used correctly on the
+ master CLI or all messages won't be visible.
+
+ Like its counterpart on the stats socket, this command is able to show the
+ startup messages of HAProxy. However it does not dump the startup messages
+ of the current worker, but the startup messages of the latest startup or
+ reload, which means it is able to dump the parsing messages of a failed
+ reload.
+
+ Those messages are also dumped with the "reload" command.
+
+10. Tricks for easier configuration management
+----------------------------------------------
+
+It is very common that two HAProxy nodes constituting a cluster share exactly
+the same configuration modulo a few addresses. Instead of having to maintain a
+duplicate configuration for each node, which will inevitably diverge, it is
+possible to include environment variables in the configuration. Thus multiple
+configuration may share the exact same file with only a few different system
+wide environment variables. This started in version 1.5 where only addresses
+were allowed to include environment variables, and 1.6 goes further by
+supporting environment variables everywhere. The syntax is the same as in the
+UNIX shell, a variable starts with a dollar sign ('$'), followed by an opening
+curly brace ('{'), then the variable name followed by the closing brace ('}').
+Except for addresses, environment variables are only interpreted in arguments
+surrounded with double quotes (this was necessary not to break existing setups
+using regular expressions involving the dollar symbol).
+
+Environment variables also make it convenient to write configurations which are
+expected to work on various sites where only the address changes. It can also
+permit to remove passwords from some configs. Example below where the the file
+"site1.env" file is sourced by the init script upon startup :
+
+ $ cat site1.env
+ LISTEN=192.168.1.1
+ CACHE_PFX=192.168.11
+ SERVER_PFX=192.168.22
+ LOGGER=192.168.33.1
+ STATSLP=admin:pa$$w0rd
+ ABUSERS=/etc/haproxy/abuse.lst
+ TIMEOUT=10s
+
+ $ cat haproxy.cfg
+ global
+ log "${LOGGER}:514" local0
+
+ defaults
+ mode http
+ timeout client "${TIMEOUT}"
+ timeout server "${TIMEOUT}"
+ timeout connect 5s
+
+ frontend public
+ bind "${LISTEN}:80"
+ http-request reject if { src -f "${ABUSERS}" }
+ stats uri /stats
+ stats auth "${STATSLP}"
+ use_backend cache if { path_end .jpg .css .ico }
+ default_backend server
+
+ backend cache
+ server cache1 "${CACHE_PFX}.1:18080" check
+ server cache2 "${CACHE_PFX}.2:18080" check
+
+ backend server
+ server cache1 "${SERVER_PFX}.1:8080" check
+ server cache2 "${SERVER_PFX}.2:8080" check
+
+
+11. Well-known traps to avoid
+-----------------------------
+
+Once in a while, someone reports that after a system reboot, the haproxy
+service wasn't started, and that once they start it by hand it works. Most
+often, these people are running a clustered IP address mechanism such as
+keepalived, to assign the service IP address to the master node only, and while
+it used to work when they used to bind haproxy to address 0.0.0.0, it stopped
+working after they bound it to the virtual IP address. What happens here is
+that when the service starts, the virtual IP address is not yet owned by the
+local node, so when HAProxy wants to bind to it, the system rejects this
+because it is not a local IP address. The fix doesn't consist in delaying the
+haproxy service startup (since it wouldn't stand a restart), but instead to
+properly configure the system to allow binding to non-local addresses. This is
+easily done on Linux by setting the net.ipv4.ip_nonlocal_bind sysctl to 1. This
+is also needed in order to transparently intercept the IP traffic that passes
+through HAProxy for a specific target address.
+
+Multi-process configurations involving source port ranges may apparently seem
+to work but they will cause some random failures under high loads because more
+than one process may try to use the same source port to connect to the same
+server, which is not possible. The system will report an error and a retry will
+happen, picking another port. A high value in the "retries" parameter may hide
+the effect to a certain extent but this also comes with increased CPU usage and
+processing time. Logs will also report a certain number of retries. For this
+reason, port ranges should be avoided in multi-process configurations.
+
+Since HAProxy uses SO_REUSEPORT and supports having multiple independent
+processes bound to the same IP:port, during troubleshooting it can happen that
+an old process was not stopped before a new one was started. This provides
+absurd test results which tend to indicate that any change to the configuration
+is ignored. The reason is that in fact even the new process is restarted with a
+new configuration, the old one also gets some incoming connections and
+processes them, returning unexpected results. When in doubt, just stop the new
+process and try again. If it still works, it very likely means that an old
+process remains alive and has to be stopped. Linux's "netstat -lntp" is of good
+help here.
+
+When adding entries to an ACL from the command line (eg: when blacklisting a
+source address), it is important to keep in mind that these entries are not
+synchronized to the file and that if someone reloads the configuration, these
+updates will be lost. While this is often the desired effect (for blacklisting)
+it may not necessarily match expectations when the change was made as a fix for
+a problem. See the "add acl" action of the CLI interface.
+
+
+12. Debugging and performance issues
+------------------------------------
+
+When HAProxy is started with the "-d" option, it will stay in the foreground
+and will print one line per event, such as an incoming connection, the end of a
+connection, and for each request or response header line seen. This debug
+output is emitted before the contents are processed, so they don't consider the
+local modifications. The main use is to show the request and response without
+having to run a network sniffer. The output is less readable when multiple
+connections are handled in parallel, though the "debug2ansi" and "debug2html"
+scripts found in the examples/ directory definitely help here by coloring the
+output.
+
+If a request or response is rejected because HAProxy finds it is malformed, the
+best thing to do is to connect to the CLI and issue "show errors", which will
+report the last captured faulty request and response for each frontend and
+backend, with all the necessary information to indicate precisely the first
+character of the input stream that was rejected. This is sometimes needed to
+prove to customers or to developers that a bug is present in their code. In
+this case it is often possible to relax the checks (but still keep the
+captures) using "option accept-invalid-http-request" or its equivalent for
+responses coming from the server "option accept-invalid-http-response". Please
+see the configuration manual for more details.
+
+Example :
+
+ > show errors
+ Total events captured on [13/Oct/2015:13:43:47.169] : 1
+
+ [13/Oct/2015:13:43:40.918] frontend HAProxyLocalStats (#2): invalid request
+ backend <NONE> (#-1), server <NONE> (#-1), event #0
+ src 127.0.0.1:51981, session #0, session flags 0x00000080
+ HTTP msg state 26, msg flags 0x00000000, tx flags 0x00000000
+ HTTP chunk len 0 bytes, HTTP body len 0 bytes
+ buffer flags 0x00808002, out 0 bytes, total 31 bytes
+ pending 31 bytes, wrapping at 8040, error at position 13:
+
+ 00000 GET /invalid request HTTP/1.1\r\n
+
+
+The output of "show info" on the CLI provides a number of useful information
+regarding the maximum connection rate ever reached, maximum SSL key rate ever
+reached, and in general all information which can help to explain temporary
+issues regarding CPU or memory usage. Example :
+
+ > show info
+ Name: HAProxy
+ Version: 1.6-dev7-e32d18-17
+ Release_date: 2015/10/12
+ Nbproc: 1
+ Process_num: 1
+ Pid: 7949
+ Uptime: 0d 0h02m39s
+ Uptime_sec: 159
+ Memmax_MB: 0
+ Ulimit-n: 120032
+ Maxsock: 120032
+ Maxconn: 60000
+ Hard_maxconn: 60000
+ CurrConns: 0
+ CumConns: 3
+ CumReq: 3
+ MaxSslConns: 0
+ CurrSslConns: 0
+ CumSslConns: 0
+ Maxpipes: 0
+ PipesUsed: 0
+ PipesFree: 0
+ ConnRate: 0
+ ConnRateLimit: 0
+ MaxConnRate: 1
+ SessRate: 0
+ SessRateLimit: 0
+ MaxSessRate: 1
+ SslRate: 0
+ SslRateLimit: 0
+ MaxSslRate: 0
+ SslFrontendKeyRate: 0
+ SslFrontendMaxKeyRate: 0
+ SslFrontendSessionReuse_pct: 0
+ SslBackendKeyRate: 0
+ SslBackendMaxKeyRate: 0
+ SslCacheLookups: 0
+ SslCacheMisses: 0
+ CompressBpsIn: 0
+ CompressBpsOut: 0
+ CompressBpsRateLim: 0
+ ZlibMemUsage: 0
+ MaxZlibMemUsage: 0
+ Tasks: 5
+ Run_queue: 1
+ Idle_pct: 100
+ node: wtap
+ description:
+
+When an issue seems to randomly appear on a new version of HAProxy (eg: every
+second request is aborted, occasional crash, etc), it is worth trying to enable
+memory poisoning so that each call to malloc() is immediately followed by the
+filling of the memory area with a configurable byte. By default this byte is
+0x50 (ASCII for 'P'), but any other byte can be used, including zero (which
+will have the same effect as a calloc() and which may make issues disappear).
+Memory poisoning is enabled on the command line using the "-dM" option. It
+slightly hurts performance and is not recommended for use in production. If
+an issue happens all the time with it or never happens when poisoning uses
+byte zero, it clearly means you've found a bug and you definitely need to
+report it. Otherwise if there's no clear change, the problem it is not related.
+
+When debugging some latency issues, it is important to use both strace and
+tcpdump on the local machine, and another tcpdump on the remote system. The
+reason for this is that there are delays everywhere in the processing chain and
+it is important to know which one is causing latency to know where to act. In
+practice, the local tcpdump will indicate when the input data come in. Strace
+will indicate when haproxy receives these data (using recv/recvfrom). Warning,
+openssl uses read()/write() syscalls instead of recv()/send(). Strace will also
+show when haproxy sends the data, and tcpdump will show when the system sends
+these data to the interface. Then the external tcpdump will show when the data
+sent are really received (since the local one only shows when the packets are
+queued). The benefit of sniffing on the local system is that strace and tcpdump
+will use the same reference clock. Strace should be used with "-tts200" to get
+complete timestamps and report large enough chunks of data to read them.
+Tcpdump should be used with "-nvvttSs0" to report full packets, real sequence
+numbers and complete timestamps.
+
+In practice, received data are almost always immediately received by haproxy
+(unless the machine has a saturated CPU or these data are invalid and not
+delivered). If these data are received but not sent, it generally is because
+the output buffer is saturated (ie: recipient doesn't consume the data fast
+enough). This can be confirmed by seeing that the polling doesn't notify of
+the ability to write on the output file descriptor for some time (it's often
+easier to spot in the strace output when the data finally leave and then roll
+back to see when the write event was notified). It generally matches an ACK
+received from the recipient, and detected by tcpdump. Once the data are sent,
+they may spend some time in the system doing nothing. Here again, the TCP
+congestion window may be limited and not allow these data to leave, waiting for
+an ACK to open the window. If the traffic is idle and the data take 40 ms or
+200 ms to leave, it's a different issue (which is not an issue), it's the fact
+that the Nagle algorithm prevents empty packets from leaving immediately, in
+hope that they will be merged with subsequent data. HAProxy automatically
+disables Nagle in pure TCP mode and in tunnels. However it definitely remains
+enabled when forwarding an HTTP body (and this contributes to the performance
+improvement there by reducing the number of packets). Some HTTP non-compliant
+applications may be sensitive to the latency when delivering incomplete HTTP
+response messages. In this case you will have to enable "option http-no-delay"
+to disable Nagle in order to work around their design, keeping in mind that any
+other proxy in the chain may similarly be impacted. If tcpdump reports that data
+leave immediately but the other end doesn't see them quickly, it can mean there
+is a congested WAN link, a congested LAN with flow control enabled and
+preventing the data from leaving, or more commonly that HAProxy is in fact
+running in a virtual machine and that for whatever reason the hypervisor has
+decided that the data didn't need to be sent immediately. In virtualized
+environments, latency issues are almost always caused by the virtualization
+layer, so in order to save time, it's worth first comparing tcpdump in the VM
+and on the external components. Any difference has to be credited to the
+hypervisor and its accompanying drivers.
+
+When some TCP SACK segments are seen in tcpdump traces (using -vv), it always
+means that the side sending them has got the proof of a lost packet. While not
+seeing them doesn't mean there are no losses, seeing them definitely means the
+network is lossy. Losses are normal on a network, but at a rate where SACKs are
+not noticeable at the naked eye. If they appear a lot in the traces, it is
+worth investigating exactly what happens and where the packets are lost. HTTP
+doesn't cope well with TCP losses, which introduce huge latencies.
+
+The "netstat -i" command will report statistics per interface. An interface
+where the Rx-Ovr counter grows indicates that the system doesn't have enough
+resources to receive all incoming packets and that they're lost before being
+processed by the network driver. Rx-Drp indicates that some received packets
+were lost in the network stack because the application doesn't process them
+fast enough. This can happen during some attacks as well. Tx-Drp means that
+the output queues were full and packets had to be dropped. When using TCP it
+should be very rare, but will possibly indicate a saturated outgoing link.
+
+
+13. Security considerations
+---------------------------
+
+HAProxy is designed to run with very limited privileges. The standard way to
+use it is to isolate it into a chroot jail and to drop its privileges to a
+non-root user without any permissions inside this jail so that if any future
+vulnerability were to be discovered, its compromise would not affect the rest
+of the system.
+
+In order to perform a chroot, it first needs to be started as a root user. It is
+pointless to build hand-made chroots to start the process there, these ones are
+painful to build, are never properly maintained and always contain way more
+bugs than the main file-system. And in case of compromise, the intruder can use
+the purposely built file-system. Unfortunately many administrators confuse
+"start as root" and "run as root", resulting in the uid change to be done prior
+to starting haproxy, and reducing the effective security restrictions.
+
+HAProxy will need to be started as root in order to :
+ - adjust the file descriptor limits
+ - bind to privileged port numbers
+ - bind to a specific network interface
+ - transparently listen to a foreign address
+ - isolate itself inside the chroot jail
+ - drop to another non-privileged UID
+
+HAProxy may require to be run as root in order to :
+ - bind to an interface for outgoing connections
+ - bind to privileged source ports for outgoing connections
+ - transparently bind to a foreign address for outgoing connections
+
+Most users will never need the "run as root" case. But the "start as root"
+covers most usages.
+
+A safe configuration will have :
+
+ - a chroot statement pointing to an empty location without any access
+ permissions. This can be prepared this way on the UNIX command line :
+
+ # mkdir /var/empty && chmod 0 /var/empty || echo "Failed"
+
+ and referenced like this in the HAProxy configuration's global section :
+
+ chroot /var/empty
+
+ - both a uid/user and gid/group statements in the global section :
+
+ user haproxy
+ group haproxy
+
+ - a stats socket whose mode, uid and gid are set to match the user and/or
+ group allowed to access the CLI so that nobody may access it :
+
+ stats socket /var/run/haproxy.stat uid hatop gid hatop mode 600
+
diff --git a/doc/netscaler-client-ip-insertion-protocol.txt b/doc/netscaler-client-ip-insertion-protocol.txt
new file mode 100644
index 0000000..dc64327
--- /dev/null
+++ b/doc/netscaler-client-ip-insertion-protocol.txt
@@ -0,0 +1,55 @@
+When NetScaler application switch is used as L3+ switch, information
+regarding the original IP and TCP headers are lost as a new TCP
+connection is created between the NetScaler and the backend server.
+
+NetScaler provides a feature to insert in the TCP data the original data
+that can then be consumed by the backend server.
+
+Specifications and documentations from NetScaler:
+ https://support.citrix.com/article/CTX205670
+ https://www.citrix.com/blogs/2016/04/25/how-to-enable-client-ip-in-tcpip-option-of-netscaler/
+
+When CIP is enabled on the NetScaler, then a TCP packet is inserted just after
+the TCP handshake. Two versions of the CIP extension exist.
+
+Legacy (NetScaler < 10.5)
+
+ - CIP magic number : 4 bytes
+ Both sender and receiver have to agree on a magic number so that
+ they both handle the incoming data as a NetScaler Client IP insertion
+ packet.
+
+ - Header length : 4 bytes
+ Defines the length on the remaining data.
+
+ - IP header : >= 20 bytes if IPv4, 40 bytes if IPv6
+ Contains the header of the last IP packet sent by the client during TCP
+ handshake.
+
+ - TCP header : >= 20 bytes
+ Contains the header of the last TCP packet sent by the client during TCP
+ handshake.
+
+Standard (NetScaler >= 10.5)
+
+ - CIP magic number : 4 bytes
+ Both sender and receiver have to agree on a magic number so that
+ they both handle the incoming data as a NetScaler Client IP insertion
+ packet.
+
+ - CIP length : 4 bytes
+ Defines the total length on the CIP header.
+
+ - CIP type: 2 bytes
+ Always set to 1.
+
+ - Header length : 2 bytes
+ Defines the length on the remaining data.
+
+ - IP header : >= 20 bytes if IPv4, 40 bytes if IPv6
+ Contains the header of the last IP packet sent by the client during TCP
+ handshake.
+
+ - TCP header : >= 20 bytes
+ Contains the header of the last TCP packet sent by the client during TCP
+ handshake.
diff --git a/doc/network-namespaces.txt b/doc/network-namespaces.txt
new file mode 100644
index 0000000..9448f43
--- /dev/null
+++ b/doc/network-namespaces.txt
@@ -0,0 +1,106 @@
+Linux network namespace support for HAProxy
+===========================================
+
+HAProxy supports proxying between Linux network namespaces. This
+feature can be used, for example, in a multi-tenant networking
+environment to proxy between different networks. HAProxy can also act
+as a front-end proxy for non namespace-aware services.
+
+The proxy protocol has been extended to support transferring the
+namespace information, so the originating namespace information can be
+kept. This is useful when chaining multiple proxies and services.
+
+To enable Linux namespace support, compile HAProxy with the `USE_NS=1`
+make option.
+
+
+## Setting up namespaces on Linux
+
+To create network namespaces, use the 'ip netns' command. See the
+manual page ip-netns(8) for details.
+
+Make sure that the file descriptors representing the network namespace
+are located under `/var/run/netns`.
+
+For example, you can create a network namespace and assign one of the
+networking interfaces to the new namespace:
+
+```
+$ ip netns add netns1
+$ ip link set eth7 netns netns1
+```
+
+
+## Listing namespaces in the configuration file
+
+HAProxy uses namespaces explicitly listed in its configuration file.
+If you are not using namespace information received through the proxy
+protocol, this usually means that you must specify namespaces for
+listeners and servers in the configuration file with the 'namespace'
+keyword.
+
+However, if you're using the namespace information received through
+the proxy protocol to determine the namespace of servers (see
+'namespace * below'), you have to explicitly list all allowed
+namespaces in the namespace_list section of your configuration file:
+
+```
+namespace_list
+ namespace netns1
+ namespace netns2
+```
+
+
+## Namespace information flow
+
+The haproxy process always runs in the namespace it was started on.
+This is the default namespace.
+
+The bind addresses of listeners can have their namespace specified in
+the configuration file. Unless specified, sockets associated with
+listener bind addresses are created in the default namespace. For
+example, this creates a listener in the netns2 namespace:
+
+```
+frontend f_example
+ bind 192.168.1.1:80 namespace netns2
+ default_backend http
+```
+
+Each client connection is associated with its source namespace. By
+default, this is the namespace of the bind socket it arrived on, but
+can be overridden by information received through the proxy protocol.
+Proxy protocol v2 supports transferring namespace information, so if
+it is enabled for the listener, it can override the associated
+namespace of the connection.
+
+Servers can have their namespaces specified in the configuration file
+with the 'namespace' keyword:
+
+```
+backend b_example
+ server s1 192.168.1.100:80 namespace netns2
+```
+
+If no namespace is set for a server, it is assumed that it is in the
+default namespace. When specified, outbound sockets to the server are
+created in the network namespace configured. To create the outbound
+(server) connection in the namespace associated with the client, use
+the '*' namespace. This is especially useful when using the
+destination address and namespace received from the proxy protocol.
+
+```
+frontend f_example
+ bind 192.168.1.1:9990 accept-proxy
+ default_backend b_example
+
+backend b_example
+ mode tcp
+ source 0.0.0.0 usesrc clientip
+ server snodes * namespace *
+```
+
+If HAProxy is configured to send proxy protocol v2 headers to the
+server, the outgoing header will always contain the namespace
+associated with the client connection, not the namespace configured
+for the server.
diff --git a/doc/peers-v2.0.txt b/doc/peers-v2.0.txt
new file mode 100644
index 0000000..711c949
--- /dev/null
+++ b/doc/peers-v2.0.txt
@@ -0,0 +1,294 @@
+ HAProxy's peers v2.0 protocol 08/18/2016
+
+Author: Emeric Brun ebrun@haproxy.com
+
+
+I) Encoded Integer and Bitfield.
+
+
+ 0 <= X < 240 : 1 byte (7.875 bits) [ XXXX XXXX ]
+ 240 <= X < 2288 : 2 bytes (11 bits) [ 1111 XXXX ] [ 0XXX XXXX ]
+ 2288 <= X < 264432 : 3 bytes (18 bits) [ 1111 XXXX ] [ 1XXX XXXX ] [ 0XXX XXXX ]
+ 264432 <= X < 33818864 : 4 bytes (25 bits) [ 1111 XXXX ] [ 1XXX XXXX ]*2 [ 0XXX XXXX ]
+ 33818864 <= X < 4328786160 : 5 bytes (32 bits) [ 1111 XXXX ] [ 1XXX XXXX ]*3 [ 0XXX XXXX ]
+
+
+
+
+I) Handshake
+
+Each peer try to connect to each others, and each peer is listening
+for a connect from other peers.
+
+
+Client Server
+ Hello Message
+ ------------------------>
+
+ Status Message
+ <------------------------
+
+1) Hello Message
+
+Hello message is composed of 3 lines:
+
+<protocol> <version>
+<remotepeerid>
+<localpeerid> <processpid> <relativepid>
+
+protocol: current value is "HAProxyS"
+version: current value is "2.0"
+remotepeerid: is the name of the target peer as defined in the configuration peers section.
+localpeerid: is the name of the local peer as defined on cmdline or using hostname.
+processid: is the system process id of the local process.
+relativepid: is the haproxy's relative pid (0 if nbproc == 1)
+
+2) Status Message
+
+Status message is a code followed by a LF.
+
+200: Handshake succeeded
+300: Try again later
+501: Protocol error
+502: Bad version
+503: Local peer name mismatch
+504: Remote peer name mismatch
+
+
+IV) Messages
+
+Messages:
+
+0 - - - - - - - 8 - - - - - - - 16
+ Message Class| Message Type
+
+if Message Type >= 128
+
+0 - - - - - - - 8 - - - - - - - 16 .....
+ Message Class| Message Type | encoded data length | data
+
+Message Classes:
+0: control
+1: error
+10: related to stick table updates
+255: reserved
+
+
+1) Control Messages Class
+
+Available message Types for class control:
+0: resync request
+1: resync finished
+2: resync partial
+3: resync confirm
+
+
+a) Resync Request Message
+
+This message is used to request a full resync from a peer
+
+b) Resync Finished Message
+
+This message is used to signal remote peer that locally known updates have been pushed, and local peer was considered up to date.
+
+c) Resync Partial Message
+
+This message is used to signal remote peer that locally known updates have been pushed, and but the local peer is not considered up to date.
+
+d) Resync Confirm Message
+
+This message is an ack for Resync Partial or Finished Messages.
+
+It's allow the remote peer to go back to "on the fly" update process.
+
+
+2) Messages Class
+
+Available message Types for this class are:
+0: protocol error
+1: size limit reached
+
+a) Protocol Message
+
+To signal that a protocol error occurred. Connection will be shutdown just after sending this message.
+
+b) Size Limit Error Message
+
+To signal that a message is outsized and can not be correctly handled. Connection will be broken.
+
+
+
+3) Stick Table Updates Messages Class
+
+Available message Types for this class are:
+0: Entry update
+1: Incremental entry update
+2: table definition
+3: table switch
+4: updates ack message.
+
+
+a) Update Message
+
+0 - - - - - - - 8 - - - - - - - 16 .....
+ Message class | Message Type | encoded data length | data
+
+
+data is composed like this
+
+0 - - - - - - - 32 .............................
+Local Update ID | Key value | data values ....
+
+Update ID in a 32bits identifier of the local update.
+
+Key value format depends of the table key type:
+
+- for keytype string
+
+0 .................................
+encoded string length | string value
+
+- for keytype integer
+0 - - - - - - - - - - 32
+encoded integer value |
+
+- for other key type
+
+The value length is announced in table definition message
+0 ....................
+value
+
+
+b) Incremental Update Message
+
+Same format than update message except the Update ID is not present, the receiver should
+consider that the update ID is an increment of 1 of the previous considered update message (partial or not)
+
+
+c) Table Definition Message
+
+This message is used by the receiver to identify the stick table concerned by next update messages and
+to know which data is pushed in these updates.
+
+0 - - - - - - - 8 - - - - - - - 16 .....
+ Message class | Message Type | encoded data length | data
+
+
+data is composed like this
+
+0 ...................................................................
+Encoded Sender Table Id | Encoded Local Table Name Length | Table Name | Encoded Table Type | Encoded Table Keylen | Encoded Table Data Types Bitfield
+
+
+Encoded Sender Table ID present a the id numerical ID affected to that table by the sender
+It will be used by "Updates Aknowlegement Messages" and "Table Switch Messages".
+
+Encoded Local Table Name Length present the length to read the table name.
+
+"Table Name" is the shared identifier of the table (name of the current table in the configuration)
+It permits the receiver to identify the concerned table. The receiver should keep in memory the matching
+between the "Sender Table ID" to identify it directly in case of "Table Switch Message".
+
+Table Type present the numeric type of key used to store stick table entries:
+integer
+ 2: signed integer
+ 4: IPv4 address
+ 5: IPv6 address
+ 6: string
+ 7: binary
+
+Table Keylen present the key length or max length in case of strings or binary (padded with 0).
+
+Data Types Bitfield present the types of data linearly pushed in next updates message (they will be linearly pushed in the update message)
+Known types are
+bit
+ 0: server id
+ 1: gpt0
+ 2: gpc0
+ 3: gpc0 rate
+ 4: connections counter
+ 5: connection rate
+ 6: number of current connections
+ 7: sessions counter
+ 8: session rate
+ 9: http requests counter
+ 10: http requests rate
+ 11: errors counter
+ 12: errors rate
+ 13: bytes in counter
+ 14: bytes in rate
+ 15: bytes out rate
+ 16: bytes out rate
+ 17: gpc1
+ 18: gpc1 rate
+ 19: server key
+ 20: http fail counter
+ 21: http fail rate
+ 22: gpt array
+ 23: gpc array
+ 24: gpc rate array
+
+d) Table Switch Message
+
+After a Table Message Define, this message can be used by the receiver to identify the stick table concerned by next update messages.
+
+0 - - - - - - - 8 - - - - - - - 16 .....
+ Message class | Message Type | encoded data length | data
+
+
+data is composed like this
+
+
+0 .....................
+encoded Sender Table Id
+
+c) Update Ack Message
+
+0 - - - - - - - 8 - - - - - - - 16 .....
+ Message class | Message Type | encoded data length | data
+
+data is composed like this
+
+0 ....................... - - - - - - - - 32
+Encoded Remote Table Id | Update Id
+
+
+Remote Table Id is the numeric identifier of the table on the remote side.
+Update Id is the id of the last update locally committed.
+
+If a re-connection occurred, the sender should know they will have to restart the push of updates from this point.
+
+III) Initial full resync process.
+
+
+a) Resync from local old process
+
+An old soft-stopped process will close all established sessions with remote peers and will try to connect to a new
+local process to push all known ending with a Resync Finished Message or a Resync Partial Message (if it it does not consider itself as full updated).
+
+A new process will wait for a an incoming connection from a local process during 5 seconds. It will learn the updates from this
+process until it receives a Resync Finished Message or a Resync Partial Message. If it receive a Resync Finished Message it will consider itself
+as fully updated and stops to ask for resync. If it receive a Resync Partial Message it will wait once again for 5 seconds for an other incoming connection from a local process.
+Same thing if the session was broken before receiving any "Resync Partial Message" or "Resync Finished Message".
+
+If one of these 5 seconds timeout expire, the process will try to request resync from a remote connected peer (see b). The process will wait until 5seconds
+if no available remote peers are found.
+
+If the timeout expire, the process will consider itself ass fully updated
+
+b) Resync from remote peers
+
+The process will randomly choose a remote connected peer and ask for a full resync using a Resync Request Message. The process will wait until 5seconds
+if no available remote peers are found.
+
+The chosen remote peer will push its all known data ending with a Resync Finished Message or a Resync Partial Message (if it it does not consider itself as full updated).
+
+If it receives a Resync Finished Message it will consider itself as fully updated and stops to ask for resync.
+
+If it receives a Resync Partial Message, the current peer will be flagged to anymore be requested and any other connected peer will be randomly chosen for a resync request (5s).
+
+If the session is broken before receiving any of these messages any other connected peer will be randomly chosen for a resync request (5s).
+
+If the timeout expire, the process will consider itself as fully updated
+
+
diff --git a/doc/peers.txt b/doc/peers.txt
new file mode 100644
index 0000000..7ce2fcb
--- /dev/null
+++ b/doc/peers.txt
@@ -0,0 +1,491 @@
+ +--------------------+
+ | Peers protocol 2.1 |
+ +--------------------+
+
+
+ Peers protocol has been implemented over TCP. Its aim is to transmit
+ stick-table entries information between several haproxy processes.
+
+ This protocol is symmetrical. This means that at any time, each peer
+ may connect to other peers they have been configured for, to send
+ their last stick-table updates. There is no role of client or server in this
+ protocol. As peers may connect to each others at the same time, the protocol
+ ensures that only one peer session may stay opened between a couple of peers
+ before they start sending their stick-table information, possibly in both
+ directions (or not).
+
+
+ Handshake
+ +++++++++
+
+ Just after having connected to another one, a peer must identify itself
+ and identify the remote peer, sending a "hello" message. The remote peer
+ replies with a "status" message.
+
+ A "hello" message is made of three lines terminated by a line feed character
+ as follows:
+
+ <protocol identifier> <version>\n
+ <remote peer identifier>\n
+ <local peer identifier> <process ID> <relative process ID>\n
+
+ protocol identifier : HAProxyS
+ version : 2.1
+ remote peer identifier: the peer name this "hello" message is sent to.
+ local peer identifier : the name of the peer which sends this "hello" message.
+ process ID : the ID of the process handling this peer session.
+ relative process ID : the haproxy's relative process ID (0 if nbproc == 1).
+
+ The "status" message is made of a unique line terminated by a line feed
+ character as follows:
+
+ <status code>\n
+
+ with these values as status code (a three-digit number):
+
+ +-------------+---------------------------------+
+ | status code | signification |
+ +-------------+---------------------------------+
+ | 200 | Handshake succeeded |
+ +-------------+---------------------------------+
+ | 300 | Try again later |
+ +-------------+---------------------------------+
+ | 501 | Protocol error |
+ +-------------+---------------------------------+
+ | 502 | Bad version |
+ +-------------+---------------------------------+
+ | 503 | Local peer identifier mismatch |
+ +-------------+---------------------------------+
+ | 504 | Remote peer identifier mismatch |
+ +-------------+---------------------------------+
+
+ As the protocol is symmetrical, some peers may connect to each other at the
+ same time. For efficiency reasons, the protocol ensures there may be only
+ one TCP session opened after the handshake succeeded and before transmitting
+ any stick-table data information. In fact, for each couple of peers, this is
+ the last connected peer which wins. Each time a peer A receives a "hello"
+ message from a peer B, peer A checks if it already managed to open a peer
+ session with peer B, so with a successful handshake. If it is the case,
+ peer A closes its peer session. So, this is the peer session opened by B
+ which stays opened.
+
+
+ Peer A Peer B
+ hello
+ ---------------------->
+ status 200
+ <----------------------
+ hello
+ <++++++++++++++++++++++
+ TCP/FIN-ACK
+ ---------------------->
+ TCP/FIN-ACK
+ <----------------------
+ status 200
+ ++++++++++++++++++++++>
+ data
+ <++++++++++++++++++++++
+ data
+ ++++++++++++++++++++++>
+ data
+ ++++++++++++++++++++++>
+ data
+ <++++++++++++++++++++++
+ .
+ .
+ .
+
+ As it is still possible that a couple of peers decide to close both their
+ peer sessions at the same time, the protocol ensures peers will not reconnect
+ at the same time, adding a random delay (50 up to 2050 ms) before any
+ reconnection.
+
+
+ Encoding
+ ++++++++
+
+ As some TCP data may be corrupted, for integrity reason, some data fields
+ are encoded at peer session level.
+
+ The following algorithms explain how to encode/decode the data.
+
+ encode:
+ input : val (64bits integer)
+ output: bitf (variable-length bitfield)
+
+ if val has no bit set above bit 4 (or if val is less than 0xf0)
+ set the next byte of bitf to the value of val
+ return bitf
+
+ set the next byte of bitf to the value of val OR'ed with 0xf0
+ subtract 0xf0 from val
+ right shift val by 4
+
+ while val bit 7 is set (or if val is greater or equal to 0x80):
+ set the next byte of bitf to the value of the byte made of the last
+ 7 bits of val OR'ed with 0x80
+ subtract 0x80 from val
+ right shift val by 7
+
+ set the next byte of bitf to the value of val
+ return bitf
+
+ decode:
+ input : bitf (variable-length bitfield)
+ output: val (64bits integer)
+
+ set val to the value of the first byte of bitf
+ if bit 4 up to 7 of val are not set
+ return val
+
+ set loop to 0
+ do
+ add to val the value of the next byte of bitf left shifted by (4 + 7*loop)
+ set loop to (loop + 1)
+ while the bit 7 of the next byte of bitf is set
+ return val
+
+ Example:
+
+ let's say that we must encode 0x1234.
+
+ "set the next byte of bitf to the value of val OR'ed with 0xf0"
+ => bitf[0] = (0x1234 | 0xf0) & 0xff = 0xf4
+
+ "subtract 0xf0 from val"
+ => val = 0x1144
+
+ right shift val by 4
+ => val = 0x114
+
+ "set the next byte of bitf to the value of the byte made of the last
+ 7 bits of val OR'ed with 0x80"
+ => bitf[1] = (0x114 | 0x80) & 0xff = 0x94
+
+ "subtract 0x80 from val"
+ => val= 0x94
+
+ "right shift val by 7"
+ => val = 0x1
+
+ => bitf[2] = 0x1
+
+ So, the encoded value of 0x1234 is 0xf49401.
+
+ To decode this value:
+
+ "set val to the value of the first byte of bitf"
+ => val = 0xf4
+
+ "add to val the value of the next byte of bitf left shifted by 4"
+ => val = 0xf4 + (0x94 << 4) = 0xf4 + 0x940 = 0xa34
+
+ "add to val the value of the next byte of bitf left shifted by (4 + 7)"
+ => val = 0xa34 + (0x01 << 11) = 0xa34 + 0x800 = 0x1234
+
+
+ Messages
+ ++++++++
+
+ *** General ***
+
+ After the handshake has successfully completed, peers are authorized to send
+ some messages to each others, possibly in both direction.
+
+ All the messages are made at least of a two bytes length header.
+
+ The first byte of this header identifies the class of the message. The next
+ byte identifies the type of message in the class.
+
+ Some of these messages are variable-length. Others have a fixed size.
+ Variable-length messages are identified by the value of the message type
+ byte. For such messages, it is greater than or equal to 128.
+
+ All variable-length message headers must be followed by the encoded length
+ of the remaining bytes (so the encoded length of the message minus 2 bytes
+ for the header and minus the length of the encoded length).
+
+ There exist four classes of messages:
+
+ +------------+---------------------+--------------+
+ | class byte | signification | message size |
+ +------------+---------------------+--------------+
+ | 0 | control | fixed (2) |
+ +------------+---------------------+--------------|
+ | 1 | error | fixed (2) |
+ +------------+---------------------+--------------|
+ | 10 | stick-table updates | variable |
+ +------------+---------------------+--------------|
+ | 255 | reserved | |
+ +------------+---------------------+--------------+
+
+ At this time of this writing, only control and error messages have a fixed
+ size of two bytes (header only). The stick-table updates messages are all
+ variable-length (their message type bytes are greater than 128).
+
+
+ *** Control message class ***
+
+ At this time of writing, control messages are fixed-length messages used
+ only to control the synchronizations between local and/or remote processes
+ and to emit heartbeat messages.
+
+ There exists five types of such control messages:
+
+ +------------+--------------------------------------------------------+
+ | type byte | signification |
+ +------------+--------------------------------------------------------+
+ | 0 | synchronisation request: ask a remote peer for a full |
+ | | synchronization |
+ +------------+--------------------------------------------------------+
+ | 1 | synchronization finished: signal a remote peer that |
+ | | local updates have been pushed and local is considered |
+ | | up to date. |
+ +------------+--------------------------------------------------------+
+ | 2 | synchronization partial: signal a remote peer that |
+ | | local updates have been pushed and local is not |
+ | | considered up to date. |
+ +------------+--------------------------------------------------------+
+ | 3 | synchronization confirmed: acknowledge a finished or |
+ | | partial synchronization message. |
+ +------------+--------------------------------------------------------+
+ | 4 | Heartbeat message. |
+ +------------+--------------------------------------------------------+
+
+ About heartbeat messages: a peer sends heartbeat messages to peers it is
+ connected to after periods of 3s of inactivity (i.e. when there is no
+ stick-table to synchronize for 3s). After a successful peer protocol
+ handshake between two peers, if one of them does not send any other peer
+ protocol messages (i.e. no heartbeat and no stick-table update messages)
+ during a 5s period, it is considered as no more alive by its remote peer
+ which closes the session and then tries to reconnect to the peer which
+ has just disappeared.
+
+ *** Error message class ***
+
+ There exits two types of such error messages:
+
+ +-----------+------------------+
+ | type byte | signification |
+ +-----------+------------------+
+ | 0 | protocol error |
+ +-----------+------------------+
+ | 1 | size limit error |
+ +-----------+------------------+
+
+
+ *** Stick-table update message class ***
+
+ This class is the more important one because it is in relation with the
+ stick-table entries handling between peers which is at the core of peers
+ protocol.
+
+ All the messages of this class are variable-length. Their type bytes are
+ all greater than or equal to 128.
+
+ There exits five types of such stick-table update messages:
+
+ +-----------+--------------------------------+
+ | type byte | signification |
+ +-----------+--------------------------------+
+ | 128 | Entry update |
+ +-----------+--------------------------------+
+ | 129 | Incremental entry update |
+ +-----------+--------------------------------+
+ | 130 | Stick-table definition |
+ +-----------+--------------------------------+
+ | 131 | Stick-table switch (unused) |
+ +-----------+--------------------------------+
+ | 133 | Update message acknowledgement |
+ +-----------+--------------------------------+
+
+ Note that entry update messages may be multiplexed. This means that different
+ entry update messages for different stick-tables may be sent over the same
+ peer session.
+
+ To do so, each time entry update messages have to sent, they must be preceded
+ by a stick-table definition message. This remains true for incremental entry
+ update messages.
+
+ As its name indicate, "Update message acknowledgement" messages are used to
+ acknowledge the entry update messages.
+
+ In this following paragraph, we give some information about the format of
+ each stick-table update messages. This very simple following legend will
+ contribute in understanding it. The unit used is the octet.
+
+ XX
+ +-----------+
+ | foo | Unique fixed sized "foo" field, made of XX octets.
+ +-----------+
+
+ +===========+
+ | foo | Variable-length "foo" field.
+ +===========+
+
+ +xxxxxxxxxxx+
+ | foo | Encoded variable-length "foo" field.
+ +xxxxxxxxxxx+
+
+ +###########+
+ | foo | hereunder described "foo" field.
+ +###########+
+
+
+ With this legend, all the stick-table update messages have such a header:
+
+ 1 1
+ +--------------------+------------------------+xxxxxxxxxxxxxxxx+
+ | Message Class (10) | Message type (128-133) | Message length |
+ +--------------------+------------------------+xxxxxxxxxxxxxxxx+
+
+ Note that to help in making communicate different versions of peers protocol,
+ such stick-table update messages may be extended adding non mandatory
+ fields at the end of such messages, announcing a total message length
+ which is greater than the message length of the previous versions of
+ peers protocol. After having parsed such messages, the remaining ones
+ will be skipped to parse the next message.
+
+ - Definition message format:
+
+ Before sending entry update messages, a peer must announce the configuration
+ of the stick-table in relation with these messages thanks to a
+ "Stick-table definition" message with such a following format:
+
+ +xxxxxxxxxxxxxxxx+xxxxxxxxxxxxxxxxxxxxxxxxx+==================+
+ | Stick-table ID | Stick-table name length | Stick-table name |
+ +xxxxxxxxxxxxxxxx+xxxxxxxxxxxxxxxxxxxxxxxxx+==================+
+
+ +xxxxxxxxxxxx+xxxxxxxxxxxxxx+xxxxxxxxxxxxxxxxxxxxxxx+xxxxxxxxx+
+ | Key type | Key length | Data types bitfield | Expiry |
+ +xxxxxxxxxxxx+xxxxxxxxxxxxxx+xxxxxxxxxxxxxxxxxxxxxxx+xxxxxxxxx+
+
+ +xxxxxxxxxxxxxxxxxxxxxxxxxxx+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx+
+ | Frequency counter #1 | Frequency counter #1 period |
+ +xxxxxxxxxxxxxxxxxxxxxxxxxxx+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx+
+
+ +xxxxxxxxxxxxxxxxxxxxxxxxxxx+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx+
+ | Frequency counter #2 | Frequency counter #2 period |
+ +xxxxxxxxxxxxxxxxxxxxxxxxxxx+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx+
+ .
+ .
+ .
+
+ Note that "Stick-table ID" field is an encoded integer which is used to
+ identify the stick-table without using its name (or "Stick-table name"
+ field). It is local to the process handling the stick-table. So we can have
+ two peers attached to processes which generate stick-table updates for
+ the same stick-table (same name) but with different stick-table IDs.
+
+ Also note that the list of "Frequency counter #X" and their associated
+ periods fields exists only if their underlying types are already defined
+ in "Data types bitfield" field.
+
+ "Expiry" field and the remaining ones are not used by all the existing
+ version of haproxy peers. But they are MANDATORY, so that to make a
+ stick-table aggregator peer be able to autoconfigure itself.
+
+
+ - Entry update message format:
+ 4
+ +-----------------+###########+############+
+ | Local update ID | Key | Data |
+ +-----------------+###########+############+
+
+ with "Key" described as follows:
+
+ +xxxxxxxxxxx+=======+
+ | length | value | if key type is (non null terminated) "string",
+ +xxxxxxxxxxx+=======+
+
+ 4
+ +-------+
+ | value | if key type is "integer",
+ +-------+
+
+ +=======+
+ | value | for other key types: the size is announced in
+ +=======+ the previous stick-table definition message.
+
+ "Data" field is basically a list of encoded values for each type announced
+ by the "Data types bitfield" field of the previous "Stick-table definition"
+ message:
+
+ +xxxxxxxxxxxxxxxxxxxx+xxxxxxxxxxxxxxxxxxxx+ +xxxxxxxxxxxxxxxxxxxx+
+ | Data type #1 value | Data type #2 value | .... | Data type #n value |
+ +xxxxxxxxxxxxxxxxxxxx+xxxxxxxxxxxxxxxxxxxx+ +xxxxxxxxxxxxxxxxxxxx+
+
+
+ Most of these fields are internally stored as uint32_t (see STD_T_SINT,
+ STD_T_UINT, STD_T_ULL C enumerations) or structures made of several uint32_t
+ (see STD_T_FRQP C enumeration). The remaining one STD_T_DICT is internally
+ used to store entries of LRU caches for others literal dictionary entries
+ (couples of IDs associated to strings). It is used to transmit these cache
+ entries as follows:
+
+ +xxxxxxxxxxx+xxxx+xxxxxxxxxxxxxxx+========+
+ | length | ID | string length | string |
+ +xxxxxxxxxxx+xxxx+xxxxxxxxxxxxxxx+========+
+
+ "length" is the length in bytes of the remaining data after this "length" field.
+ "string length" is the length of "string" field which follows.
+
+ Here the cache is used so that not to have to send again and again an already
+ sent string. Indeed, the second time we have to send the same dictionary entry,
+ if still cached, a peer sends only its ID:
+
+ +xxxxxxxxxxx+xxxx+
+ | length | ID |
+ +xxxxxxxxxxx+xxxx+
+
+ - Update message acknowledgement format:
+
+ These messages are responses to "Entry update" messages only.
+
+ Its format is very basic for efficiency reasons:
+
+ 4
+ +xxxxxxxxxxxxxxxx+-----------+
+ | Stick-table ID | Update ID |
+ +xxxxxxxxxxxxxxxx+-----------+
+
+
+ Note that the "Stick-table ID" field value is in relation with the one which
+ has been previously announce by a "Stick-table definition" message.
+
+ The following schema may help in understanding how to handle a stream of
+ stick-table update messages. The handshake step is not represented.
+ Stick-table IDs are preceded by a '#' character.
+
+
+ Peer A Peer B
+
+ stkt def. #1
+ ---------------------->
+ updates (1-5)
+ ---------------------->
+ stkt def. #3
+ ---------------------->
+ updates (1000-1005)
+ ---------------------->
+
+ stkt def. #2
+ <----------------------
+ updates (10-15)
+ <----------------------
+ ack 5 for #1
+ <----------------------
+ ack 1005 for #3
+ <----------------------
+ stkt def. #4
+ <----------------------
+ updates (100-105)
+ <----------------------
+
+ ack 10 for #2
+ ---------------------->
+ ack 105 for #4
+ ---------------------->
+ (from here, on both sides, all stick-table updates
+ are considered as received)
+
diff --git a/doc/proxy-protocol.txt b/doc/proxy-protocol.txt
new file mode 100644
index 0000000..fac0331
--- /dev/null
+++ b/doc/proxy-protocol.txt
@@ -0,0 +1,1051 @@
+2020/03/05 Willy Tarreau
+ HAProxy Technologies
+ The PROXY protocol
+ Versions 1 & 2
+
+Abstract
+
+ The PROXY protocol provides a convenient way to safely transport connection
+ information such as a client's address across multiple layers of NAT or TCP
+ proxies. It is designed to require little changes to existing components and
+ to limit the performance impact caused by the processing of the transported
+ information.
+
+
+Revision history
+
+ 2010/10/29 - first version
+ 2011/03/20 - update: implementation and security considerations
+ 2012/06/21 - add support for binary format
+ 2012/11/19 - final review and fixes
+ 2014/05/18 - modify and extend PROXY protocol version 2
+ 2014/06/11 - fix example code to consider ver+cmd merge
+ 2014/06/14 - fix v2 header check in example code, and update Forwarded spec
+ 2014/07/12 - update list of implementations (add Squid)
+ 2015/05/02 - update list of implementations and format of the TLV add-ons
+ 2017/03/10 - added the checksum, noop and more SSL-related TLV types,
+ reserved TLV type ranges, added TLV documentation, clarified
+ string encoding. With contributions from Andriy Palamarchuk
+ (Amazon.com).
+ 2020/03/05 - added the unique ID TLV type (Tim Düsterhus)
+
+
+1. Background
+
+Relaying TCP connections through proxies generally involves a loss of the
+original TCP connection parameters such as source and destination addresses,
+ports, and so on. Some protocols make it a little bit easier to transfer such
+information. For SMTP, Postfix authors have proposed the XCLIENT protocol [1]
+which received broad adoption and is particularly suited to mail exchanges.
+For HTTP, there is the "Forwarded" extension [2], which aims at replacing the
+omnipresent "X-Forwarded-For" header which carries information about the
+original source address, and the less common X-Original-To which carries
+information about the destination address.
+
+However, both mechanisms require a knowledge of the underlying protocol to be
+implemented in intermediaries.
+
+Then comes a new class of products which we'll call "dumb proxies", not because
+they don't do anything, but because they're processing protocol-agnostic data.
+Both Stunnel[3] and Stud[4] are examples of such "dumb proxies". They talk raw
+TCP on one side, and raw SSL on the other one, and do that reliably, without
+any knowledge of what protocol is transported on top of the connection. HAProxy
+running in pure TCP mode obviously falls into that category as well.
+
+The problem with such a proxy when it is combined with another one such as
+haproxy, is to adapt it to talk the higher level protocol. A patch is available
+for Stunnel to make it capable of inserting an X-Forwarded-For header in the
+first HTTP request of each incoming connection. HAProxy is able not to add
+another one when the connection comes from Stunnel, so that it's possible to
+hide it from the servers.
+
+The typical architecture becomes the following one :
+
+
+ +--------+ HTTP :80 +----------+
+ | client | --------------------------------> | |
+ | | | haproxy, |
+ +--------+ +---------+ | 1 or 2 |
+ / / HTTPS | stunnel | HTTP :81 | listening|
+ <________/ ---------> | (server | ---------> | ports |
+ | mode) | | |
+ +---------+ +----------+
+
+
+The problem appears when haproxy runs with keep-alive on the side towards the
+client. The Stunnel patch will only add the X-Forwarded-For header to the first
+request of each connection and all subsequent requests will not have it. One
+solution could be to improve the patch to make it support keep-alive and parse
+all forwarded data, whether they're announced with a Content-Length or with a
+Transfer-Encoding, taking care of special methods such as HEAD which announce
+data without transferring them, etc... In fact, it would require implementing a
+full HTTP stack in Stunnel. It would then become a lot more complex, a lot less
+reliable and would not anymore be the "dumb proxy" that fits every purposes.
+
+In practice, we don't need to add a header for each request because we'll emit
+the exact same information every time : the information related to the client
+side connection. We could then cache that information in haproxy and use it for
+every other request. But that becomes dangerous and is still limited to HTTP
+only.
+
+Another approach consists in prepending each connection with a header reporting
+the characteristics of the other side's connection. This method is simpler to
+implement, does not require any protocol-specific knowledge on either side, and
+completely fits the purpose since what is desired precisely is to know the
+other side's connection endpoints. It is easy to perform for the sender (just
+send a short header once the connection is established) and to parse for the
+receiver (simply perform one read() on the incoming connection to fill in
+addresses after an accept). The protocol used to carry connection information
+across proxies was thus called the PROXY protocol.
+
+
+2. The PROXY protocol header
+
+This document uses a few terms that are worth explaining here :
+ - "connection initiator" is the party requesting a new connection
+ - "connection target" is the party accepting a connection request
+ - "client" is the party for which a connection was requested
+ - "server" is the party to which the client desired to connect
+ - "proxy" is the party intercepting and relaying the connection
+ from the client to the server.
+ - "sender" is the party sending data over a connection.
+ - "receiver" is the party receiving data from the sender.
+ - "header" or "PROXY protocol header" is the block of connection information
+ the connection initiator prepends at the beginning of a connection, which
+ makes it the sender from the protocol point of view.
+
+The PROXY protocol's goal is to fill the server's internal structures with the
+information collected by the proxy that the server would have been able to get
+by itself if the client was connecting directly to the server instead of via a
+proxy. The information carried by the protocol are the ones the server would
+get using getsockname() and getpeername() :
+ - address family (AF_INET for IPv4, AF_INET6 for IPv6, AF_UNIX)
+ - socket protocol (SOCK_STREAM for TCP, SOCK_DGRAM for UDP)
+ - layer 3 source and destination addresses
+ - layer 4 source and destination ports if any
+
+Unlike the XCLIENT protocol, the PROXY protocol was designed with limited
+extensibility in order to help the receiver parse it very fast. Version 1 was
+focused on keeping it human-readable for better debugging possibilities, which
+is always desirable for early adoption when few implementations exist. Version
+2 adds support for a binary encoding of the header which is much more efficient
+to produce and to parse, especially when dealing with IPv6 addresses that are
+expensive to emit in ASCII form and to parse.
+
+In both cases, the protocol simply consists in an easily parsable header placed
+by the connection initiator at the beginning of each connection. The protocol
+is intentionally stateless in that it does not expect the sender to wait for
+the receiver before sending the header, nor the receiver to send anything back.
+
+This specification supports two header formats, a human-readable format which
+is the only format supported in version 1 of the protocol, and a binary format
+which is only supported in version 2. Both formats were designed to ensure that
+the header cannot be confused with common higher level protocols such as HTTP,
+SSL/TLS, FTP or SMTP, and that both formats are easily distinguishable one from
+each other for the receiver.
+
+Version 1 senders MAY only produce the human-readable header format. Version 2
+senders MAY only produce the binary header format. Version 1 receivers MUST at
+least implement the human-readable header format. Version 2 receivers MUST at
+least implement the binary header format, and it is recommended that they also
+implement the human-readable header format for better interoperability and ease
+of upgrade when facing version 1 senders.
+
+Both formats are designed to fit in the smallest TCP segment that any TCP/IP
+host is required to support (576 - 40 = 536 bytes). This ensures that the whole
+header will always be delivered at once when the socket buffers are still empty
+at the beginning of a connection. The sender must always ensure that the header
+is sent at once, so that the transport layer maintains atomicity along the path
+to the receiver. The receiver may be tolerant to partial headers or may simply
+drop the connection when receiving a partial header. Recommendation is to be
+tolerant, but implementation constraints may not always easily permit this. It
+is important to note that nothing forces any intermediary to forward the whole
+header at once, because TCP is a streaming protocol which may be processed one
+byte at a time if desired, causing the header to be fragmented when reaching
+the receiver. But due to the places where such a protocol is used, the above
+simplification generally is acceptable because the risk of crossing such a
+device handling one byte at a time is close to zero.
+
+The receiver MUST NOT start processing the connection before it receives a
+complete and valid PROXY protocol header. This is particularly important for
+protocols where the receiver is expected to speak first (eg: SMTP, FTP or SSH).
+The receiver may apply a short timeout and decide to abort the connection if
+the protocol header is not seen within a few seconds (at least 3 seconds to
+cover a TCP retransmit).
+
+The receiver MUST be configured to only receive the protocol described in this
+specification and MUST not try to guess whether the protocol header is present
+or not. This means that the protocol explicitly prevents port sharing between
+public and private access. Otherwise it would open a major security breach by
+allowing untrusted parties to spoof their connection addresses. The receiver
+SHOULD ensure proper access filtering so that only trusted proxies are allowed
+to use this protocol.
+
+Some proxies are smart enough to understand transported protocols and to reuse
+idle server connections for multiple messages. This typically happens in HTTP
+where requests from multiple clients may be sent over the same connection. Such
+proxies MUST NOT implement this protocol on multiplexed connections because the
+receiver would use the address advertised in the PROXY header as the address of
+all forwarded requests's senders. In fact, such proxies are not dumb proxies,
+and since they do have a complete understanding of the transported protocol,
+they MUST use the facilities provided by this protocol to present the client's
+address.
+
+
+2.1. Human-readable header format (Version 1)
+
+This is the format specified in version 1 of the protocol. It consists in one
+line of US-ASCII text matching exactly the following block, sent immediately
+and at once upon the connection establishment and prepended before any data
+flowing from the sender to the receiver :
+
+ - a string identifying the protocol : "PROXY" ( \x50 \x52 \x4F \x58 \x59 )
+ Seeing this string indicates that this is version 1 of the protocol.
+
+ - exactly one space : " " ( \x20 )
+
+ - a string indicating the proxied INET protocol and family. As of version 1,
+ only "TCP4" ( \x54 \x43 \x50 \x34 ) for TCP over IPv4, and "TCP6"
+ ( \x54 \x43 \x50 \x36 ) for TCP over IPv6 are allowed. Other, unsupported,
+ or unknown protocols must be reported with the name "UNKNOWN" ( \x55 \x4E
+ \x4B \x4E \x4F \x57 \x4E ). For "UNKNOWN", the rest of the line before the
+ CRLF may be omitted by the sender, and the receiver must ignore anything
+ presented before the CRLF is found. Note that an earlier version of this
+ specification suggested to use this when sending health checks, but this
+ causes issues with servers that reject the "UNKNOWN" keyword. Thus is it
+ now recommended not to send "UNKNOWN" when the connection is expected to
+ be accepted, but only when it is not possible to correctly fill the PROXY
+ line.
+
+ - exactly one space : " " ( \x20 )
+
+ - the layer 3 source address in its canonical format. IPv4 addresses must be
+ indicated as a series of exactly 4 integers in the range [0..255] inclusive
+ written in decimal representation separated by exactly one dot between each
+ other. Heading zeroes are not permitted in front of numbers in order to
+ avoid any possible confusion with octal numbers. IPv6 addresses must be
+ indicated as series of sets of 4 hexadecimal digits (upper or lower case)
+ delimited by colons between each other, with the acceptance of one double
+ colon sequence to replace the largest acceptable range of consecutive
+ zeroes. The total number of decoded bits must exactly be 128. The
+ advertised protocol family dictates what format to use.
+
+ - exactly one space : " " ( \x20 )
+
+ - the layer 3 destination address in its canonical format. It is the same
+ format as the layer 3 source address and matches the same family.
+
+ - exactly one space : " " ( \x20 )
+
+ - the TCP source port represented as a decimal integer in the range
+ [0..65535] inclusive. Heading zeroes are not permitted in front of numbers
+ in order to avoid any possible confusion with octal numbers.
+
+ - exactly one space : " " ( \x20 )
+
+ - the TCP destination port represented as a decimal integer in the range
+ [0..65535] inclusive. Heading zeroes are not permitted in front of numbers
+ in order to avoid any possible confusion with octal numbers.
+
+ - the CRLF sequence ( \x0D \x0A )
+
+
+The maximum line lengths the receiver must support including the CRLF are :
+ - TCP/IPv4 :
+ "PROXY TCP4 255.255.255.255 255.255.255.255 65535 65535\r\n"
+ => 5 + 1 + 4 + 1 + 15 + 1 + 15 + 1 + 5 + 1 + 5 + 2 = 56 chars
+
+ - TCP/IPv6 :
+ "PROXY TCP6 ffff:f...f:ffff ffff:f...f:ffff 65535 65535\r\n"
+ => 5 + 1 + 4 + 1 + 39 + 1 + 39 + 1 + 5 + 1 + 5 + 2 = 104 chars
+
+ - unknown connection (short form) :
+ "PROXY UNKNOWN\r\n"
+ => 5 + 1 + 7 + 2 = 15 chars
+
+ - worst case (optional fields set to 0xff) :
+ "PROXY UNKNOWN ffff:f...f:ffff ffff:f...f:ffff 65535 65535\r\n"
+ => 5 + 1 + 7 + 1 + 39 + 1 + 39 + 1 + 5 + 1 + 5 + 2 = 107 chars
+
+So a 108-byte buffer is always enough to store all the line and a trailing zero
+for string processing.
+
+The receiver must wait for the CRLF sequence before starting to decode the
+addresses in order to ensure they are complete and properly parsed. If the CRLF
+sequence is not found in the first 107 characters, the receiver should declare
+the line invalid. A receiver may reject an incomplete line which does not
+contain the CRLF sequence in the first atomic read operation. The receiver must
+not tolerate a single CR or LF character to end the line when a complete CRLF
+sequence is expected.
+
+Any sequence which does not exactly match the protocol must be discarded and
+cause the receiver to abort the connection. It is recommended to abort the
+connection as soon as possible so that the sender gets a chance to notice the
+anomaly and log it.
+
+If the announced transport protocol is "UNKNOWN", then the receiver knows that
+the sender speaks the correct PROXY protocol with the appropriate version, and
+SHOULD accept the connection and use the real connection's parameters as if
+there were no PROXY protocol header on the wire. However, senders SHOULD not
+use the "UNKNOWN" protocol when they are the initiators of outgoing connections
+because some receivers may reject them. When a load balancing proxy has to send
+health checks to a server, it SHOULD build a valid PROXY line which it will
+fill with a getsockname()/getpeername() pair indicating the addresses used. It
+is important to understand that doing so is not appropriate when some source
+address translation is performed between the sender and the receiver.
+
+An example of such a line before an HTTP request would look like this (CR
+marked as "\r" and LF marked as "\n") :
+
+ PROXY TCP4 192.168.0.1 192.168.0.11 56324 443\r\n
+ GET / HTTP/1.1\r\n
+ Host: 192.168.0.11\r\n
+ \r\n
+
+For the sender, the header line is easy to put into the output buffers once the
+connection is established. Note that since the line is always shorter than an
+MSS, the sender is guaranteed to always be able to emit it at once and should
+not even bother handling partial sends. For the receiver, once the header is
+parsed, it is easy to skip it from the input buffers. Please consult section 9
+for implementation suggestions.
+
+
+2.2. Binary header format (version 2)
+
+Producing human-readable IPv6 addresses and parsing them is very inefficient,
+due to the multiple possible representation formats and the handling of compact
+address format. It was also not possible to specify address families outside
+IPv4/IPv6 nor non-TCP protocols. Another drawback of the human-readable format
+is the fact that implementations need to parse all characters to find the
+trailing CRLF, which makes it harder to read only the exact bytes count. Last,
+the UNKNOWN address type has not always been accepted by servers as a valid
+protocol because of its imprecise meaning.
+
+Version 2 of the protocol thus introduces a new binary format which remains
+distinguishable from version 1 and from other commonly used protocols. It was
+specially designed in order to be incompatible with a wide range of protocols
+and to be rejected by a number of common implementations of these protocols
+when unexpectedly presented (please see section 7). Also for better processing
+efficiency, IPv4 and IPv6 addresses are respectively aligned on 4 and 16 bytes
+boundaries.
+
+The binary header format starts with a constant 12 bytes block containing the
+protocol signature :
+
+ \x0D \x0A \x0D \x0A \x00 \x0D \x0A \x51 \x55 \x49 \x54 \x0A
+
+Note that this block contains a null byte at the 5th position, so it must not
+be handled as a null-terminated string.
+
+The next byte (the 13th one) is the protocol version and command.
+
+The highest four bits contains the version. As of this specification, it must
+always be sent as \x2 and the receiver must only accept this value.
+
+The lowest four bits represents the command :
+ - \x0 : LOCAL : the connection was established on purpose by the proxy
+ without being relayed. The connection endpoints are the sender and the
+ receiver. Such connections exist when the proxy sends health-checks to the
+ server. The receiver must accept this connection as valid and must use the
+ real connection endpoints and discard the protocol block including the
+ family which is ignored.
+
+ - \x1 : PROXY : the connection was established on behalf of another node,
+ and reflects the original connection endpoints. The receiver must then use
+ the information provided in the protocol block to get original the address.
+
+ - other values are unassigned and must not be emitted by senders. Receivers
+ must drop connections presenting unexpected values here.
+
+The 14th byte contains the transport protocol and address family. The highest 4
+bits contain the address family, the lowest 4 bits contain the protocol.
+
+The address family maps to the original socket family without necessarily
+matching the values internally used by the system. It may be one of :
+
+ - 0x0 : AF_UNSPEC : the connection is forwarded for an unknown, unspecified
+ or unsupported protocol. The sender should use this family when sending
+ LOCAL commands or when dealing with unsupported protocol families. The
+ receiver is free to accept the connection anyway and use the real endpoint
+ addresses or to reject it. The receiver should ignore address information.
+
+ - 0x1 : AF_INET : the forwarded connection uses the AF_INET address family
+ (IPv4). The addresses are exactly 4 bytes each in network byte order,
+ followed by transport protocol information (typically ports).
+
+ - 0x2 : AF_INET6 : the forwarded connection uses the AF_INET6 address family
+ (IPv6). The addresses are exactly 16 bytes each in network byte order,
+ followed by transport protocol information (typically ports).
+
+ - 0x3 : AF_UNIX : the forwarded connection uses the AF_UNIX address family
+ (UNIX). The addresses are exactly 108 bytes each.
+
+ - other values are unspecified and must not be emitted in version 2 of this
+ protocol and must be rejected as invalid by receivers.
+
+The transport protocol is specified in the lowest 4 bits of the 14th byte :
+
+ - 0x0 : UNSPEC : the connection is forwarded for an unknown, unspecified
+ or unsupported protocol. The sender should use this family when sending
+ LOCAL commands or when dealing with unsupported protocol families. The
+ receiver is free to accept the connection anyway and use the real endpoint
+ addresses or to reject it. The receiver should ignore address information.
+
+ - 0x1 : STREAM : the forwarded connection uses a SOCK_STREAM protocol (eg:
+ TCP or UNIX_STREAM). When used with AF_INET/AF_INET6 (TCP), the addresses
+ are followed by the source and destination ports represented on 2 bytes
+ each in network byte order.
+
+ - 0x2 : DGRAM : the forwarded connection uses a SOCK_DGRAM protocol (eg:
+ UDP or UNIX_DGRAM). When used with AF_INET/AF_INET6 (UDP), the addresses
+ are followed by the source and destination ports represented on 2 bytes
+ each in network byte order.
+
+ - other values are unspecified and must not be emitted in version 2 of this
+ protocol and must be rejected as invalid by receivers.
+
+In practice, the following protocol bytes are expected :
+
+ - \x00 : UNSPEC : the connection is forwarded for an unknown, unspecified
+ or unsupported protocol. The sender should use this family when sending
+ LOCAL commands or when dealing with unsupported protocol families. When
+ used with a LOCAL command, the receiver must accept the connection and
+ ignore any address information. For other commands, the receiver is free
+ to accept the connection anyway and use the real endpoints addresses or to
+ reject the connection. The receiver should ignore address information.
+
+ - \x11 : TCP over IPv4 : the forwarded connection uses TCP over the AF_INET
+ protocol family. Address length is 2*4 + 2*2 = 12 bytes.
+
+ - \x12 : UDP over IPv4 : the forwarded connection uses UDP over the AF_INET
+ protocol family. Address length is 2*4 + 2*2 = 12 bytes.
+
+ - \x21 : TCP over IPv6 : the forwarded connection uses TCP over the AF_INET6
+ protocol family. Address length is 2*16 + 2*2 = 36 bytes.
+
+ - \x22 : UDP over IPv6 : the forwarded connection uses UDP over the AF_INET6
+ protocol family. Address length is 2*16 + 2*2 = 36 bytes.
+
+ - \x31 : UNIX stream : the forwarded connection uses SOCK_STREAM over the
+ AF_UNIX protocol family. Address length is 2*108 = 216 bytes.
+
+ - \x32 : UNIX datagram : the forwarded connection uses SOCK_DGRAM over the
+ AF_UNIX protocol family. Address length is 2*108 = 216 bytes.
+
+
+Only the UNSPEC protocol byte (\x00) is mandatory to implement on the receiver.
+A receiver is not required to implement other ones, provided that it
+automatically falls back to the UNSPEC mode for the valid combinations above
+that it does not support.
+
+The 15th and 16th bytes is the address length in bytes in network endian order.
+It is used so that the receiver knows how many address bytes to skip even when
+it does not implement the presented protocol. Thus the length of the protocol
+header in bytes is always exactly 16 + this value. When a sender presents a
+LOCAL connection, it should not present any address so it sets this field to
+zero. Receivers MUST always consider this field to skip the appropriate number
+of bytes and must not assume zero is presented for LOCAL connections. When a
+receiver accepts an incoming connection showing an UNSPEC address family or
+protocol, it may or may not decide to log the address information if present.
+
+So the 16-byte version 2 header can be described this way :
+
+ struct proxy_hdr_v2 {
+ uint8_t sig[12]; /* hex 0D 0A 0D 0A 00 0D 0A 51 55 49 54 0A */
+ uint8_t ver_cmd; /* protocol version and command */
+ uint8_t fam; /* protocol family and address */
+ uint16_t len; /* number of following bytes part of the header */
+ };
+
+Starting from the 17th byte, addresses are presented in network byte order.
+The address order is always the same :
+ - source layer 3 address in network byte order
+ - destination layer 3 address in network byte order
+ - source layer 4 address if any, in network byte order (port)
+ - destination layer 4 address if any, in network byte order (port)
+
+The address block may directly be sent from or received into the following
+union which makes it easy to cast from/to the relevant socket native structs
+depending on the address type :
+
+ union proxy_addr {
+ struct { /* for TCP/UDP over IPv4, len = 12 */
+ uint32_t src_addr;
+ uint32_t dst_addr;
+ uint16_t src_port;
+ uint16_t dst_port;
+ } ipv4_addr;
+ struct { /* for TCP/UDP over IPv6, len = 36 */
+ uint8_t src_addr[16];
+ uint8_t dst_addr[16];
+ uint16_t src_port;
+ uint16_t dst_port;
+ } ipv6_addr;
+ struct { /* for AF_UNIX sockets, len = 216 */
+ uint8_t src_addr[108];
+ uint8_t dst_addr[108];
+ } unix_addr;
+ };
+
+The sender must ensure that all the protocol header is sent at once. This block
+is always smaller than an MSS, so there is no reason for it to be segmented at
+the beginning of the connection. The receiver should also process the header
+at once. The receiver must not start to parse an address before the whole
+address block is received. The receiver must also reject incoming connections
+containing partial protocol headers.
+
+A receiver may be configured to support both version 1 and version 2 of the
+protocol. Identifying the protocol version is easy :
+
+ - if the incoming byte count is 16 or above and the 13 first bytes match
+ the protocol signature block followed by the protocol version 2 :
+
+ \x0D\x0A\x0D\x0A\x00\x0D\x0A\x51\x55\x49\x54\x0A\x20
+
+ - otherwise, if the incoming byte count is 8 or above, and the 5 first
+ characters match the US-ASCII representation of "PROXY" then the protocol
+ must be parsed as version 1 :
+
+ \x50\x52\x4F\x58\x59
+
+ - otherwise the protocol is not covered by this specification and the
+ connection must be dropped.
+
+If the length specified in the PROXY protocol header indicates that additional
+bytes are part of the header beyond the address information, a receiver may
+choose to skip over and ignore those bytes, or attempt to interpret those
+bytes.
+
+The information in those bytes will be arranged in Type-Length-Value (TLV
+vectors) in the following format. The first byte is the Type of the vector.
+The second two bytes represent the length in bytes of the value (not included
+the Type and Length bytes), and following the length field is the number of
+bytes specified by the length.
+
+ struct pp2_tlv {
+ uint8_t type;
+ uint8_t length_hi;
+ uint8_t length_lo;
+ uint8_t value[0];
+ };
+
+A receiver may choose to skip over and ignore the TLVs it is not interested in
+or it does not understand. Senders can generate the TLVs only for
+the information they choose to publish.
+
+The following types have already been registered for the <type> field :
+
+ #define PP2_TYPE_ALPN 0x01
+ #define PP2_TYPE_AUTHORITY 0x02
+ #define PP2_TYPE_CRC32C 0x03
+ #define PP2_TYPE_NOOP 0x04
+ #define PP2_TYPE_UNIQUE_ID 0x05
+ #define PP2_TYPE_SSL 0x20
+ #define PP2_SUBTYPE_SSL_VERSION 0x21
+ #define PP2_SUBTYPE_SSL_CN 0x22
+ #define PP2_SUBTYPE_SSL_CIPHER 0x23
+ #define PP2_SUBTYPE_SSL_SIG_ALG 0x24
+ #define PP2_SUBTYPE_SSL_KEY_ALG 0x25
+ #define PP2_TYPE_NETNS 0x30
+
+
+2.2.1 PP2_TYPE_ALPN
+
+Application-Layer Protocol Negotiation (ALPN). It is a byte sequence defining
+the upper layer protocol in use over the connection. The most common use case
+will be to pass the exact copy of the ALPN extension of the Transport Layer
+Security (TLS) protocol as defined by RFC7301 [9].
+
+
+2.2.2 PP2_TYPE_AUTHORITY
+
+Contains the host name value passed by the client, as an UTF8-encoded string.
+In case of TLS being used on the client connection, this is the exact copy of
+the "server_name" extension as defined by RFC3546 [10], section 3.1, often
+referred to as "SNI". There are probably other situations where an authority
+can be mentioned on a connection without TLS being involved at all.
+
+
+2.2.3. PP2_TYPE_CRC32C
+
+The value of the type PP2_TYPE_CRC32C is a 32-bit number storing the CRC32c
+checksum of the PROXY protocol header.
+
+When the checksum is supported by the sender after constructing the header
+the sender MUST:
+
+ - initialize the checksum field to '0's.
+
+ - calculate the CRC32c checksum of the PROXY header as described in RFC4960,
+ Appendix B [8].
+
+ - put the resultant value into the checksum field, and leave the rest of
+ the bits unchanged.
+
+If the checksum is provided as part of the PROXY header and the checksum
+functionality is supported by the receiver, the receiver MUST:
+
+ - store the received CRC32c checksum value aside.
+
+ - replace the 32 bits of the checksum field in the received PROXY header with
+ all '0's and calculate a CRC32c checksum value of the whole PROXY header.
+
+ - verify that the calculated CRC32c checksum is the same as the received
+ CRC32c checksum. If it is not, the receiver MUST treat the TCP connection
+ providing the header as invalid.
+
+The default procedure for handling an invalid TCP connection is to abort it.
+
+
+2.2.4. PP2_TYPE_NOOP
+
+The TLV of this type should be ignored when parsed. The value is zero or more
+bytes. Can be used for data padding or alignment. Note that it can be used
+to align only by 3 or more bytes because a TLV can not be smaller than that.
+
+
+2.2.5. PP2_TYPE_UNIQUE_ID
+
+The value of the type PP2_TYPE_UNIQUE_ID is an opaque byte sequence of up to
+128 bytes generated by the upstream proxy that uniquely identifies the
+connection.
+
+The unique ID can be used to easily correlate connections across multiple
+layers of proxies, without needing to look up IP addresses and port numbers.
+
+
+2.2.6. The PP2_TYPE_SSL type and subtypes
+
+For the type PP2_TYPE_SSL, the value is itself a defined like this :
+
+ struct pp2_tlv_ssl {
+ uint8_t client;
+ uint32_t verify;
+ struct pp2_tlv sub_tlv[0];
+ };
+
+The <verify> field will be zero if the client presented a certificate
+and it was successfully verified, and non-zero otherwise.
+
+The <client> field is made of a bit field from the following values,
+indicating which element is present :
+
+ #define PP2_CLIENT_SSL 0x01
+ #define PP2_CLIENT_CERT_CONN 0x02
+ #define PP2_CLIENT_CERT_SESS 0x04
+
+Note, that each of these elements may lead to extra data being appended to
+this TLV using a second level of TLV encapsulation. It is thus possible to
+find multiple TLV values after this field. The total length of the pp2_tlv_ssl
+TLV will reflect this.
+
+The PP2_CLIENT_SSL flag indicates that the client connected over SSL/TLS. When
+this field is present, the US-ASCII string representation of the TLS version is
+appended at the end of the field in the TLV format using the type
+PP2_SUBTYPE_SSL_VERSION.
+
+PP2_CLIENT_CERT_CONN indicates that the client provided a certificate over the
+current connection. PP2_CLIENT_CERT_SESS indicates that the client provided a
+certificate at least once over the TLS session this connection belongs to.
+
+The second level TLV PP2_SUBTYPE_SSL_CIPHER provides the US-ASCII string name
+of the used cipher, for example "ECDHE-RSA-AES128-GCM-SHA256".
+
+The second level TLV PP2_SUBTYPE_SSL_SIG_ALG provides the US-ASCII string name
+of the algorithm used to sign the certificate presented by the frontend when
+the incoming connection was made over an SSL/TLS transport layer, for example
+"SHA256".
+
+The second level TLV PP2_SUBTYPE_SSL_KEY_ALG provides the US-ASCII string name
+of the algorithm used to generate the key of the certificate presented by the
+frontend when the incoming connection was made over an SSL/TLS transport layer,
+for example "RSA2048".
+
+In all cases, the string representation (in UTF8) of the Common Name field
+(OID: 2.5.4.3) of the client certificate's Distinguished Name, is appended
+using the TLV format and the type PP2_SUBTYPE_SSL_CN. E.g. "example.com".
+
+
+2.2.7. The PP2_TYPE_NETNS type
+
+The type PP2_TYPE_NETNS defines the value as the US-ASCII string representation
+of the namespace's name.
+
+
+2.2.8. Reserved type ranges
+
+The following range of 16 type values is reserved for application-specific
+data and will be never used by the PROXY Protocol. If you need more values
+consider extending the range with a type field in your TLVs.
+
+ #define PP2_TYPE_MIN_CUSTOM 0xE0
+ #define PP2_TYPE_MAX_CUSTOM 0xEF
+
+This range of 8 values is reserved for temporary experimental use by
+application developers and protocol designers. The values from the range will
+never be used by the PROXY protocol and should not be used by production
+functionality.
+
+ #define PP2_TYPE_MIN_EXPERIMENT 0xF0
+ #define PP2_TYPE_MAX_EXPERIMENT 0xF7
+
+The following range of 8 values is reserved for future use, potentially to
+extend the protocol with multibyte type values.
+
+ #define PP2_TYPE_MIN_FUTURE 0xF8
+ #define PP2_TYPE_MAX_FUTURE 0xFF
+
+
+3. Implementations
+
+HAProxy 1.5 implements version 1 of the PROXY protocol on both sides :
+ - the listening sockets accept the protocol when the "accept-proxy" setting
+ is passed to the "bind" keyword. Connections accepted on such listeners
+ will behave just as if the source really was the one advertised in the
+ protocol. This is true for logging, ACLs, content filtering, transparent
+ proxying, etc...
+
+ - the protocol may be used to connect to servers if the "send-proxy" setting
+ is present on the "server" line. It is enabled on a per-server basis, so it
+ is possible to have it enabled for remote servers only and still have local
+ ones behave differently. If the incoming connection was accepted with the
+ "accept-proxy", then the relayed information is the one advertised in this
+ connection's PROXY line.
+
+ - HAProxy 1.5 also implements version 2 of the PROXY protocol as a sender. In
+ addition, a TLV with limited, optional, SSL information has been added.
+
+Stunnel added support for version 1 of the protocol for outgoing connections in
+version 4.45.
+
+Stud added support for version 1 of the protocol for outgoing connections on
+2011/06/29.
+
+Postfix added support for version 1 of the protocol for incoming connections
+in smtpd and postscreen in version 2.10.
+
+A patch is available for Stud[5] to implement version 1 of the protocol on
+incoming connections.
+
+Support for versions 1 and 2 of the protocol was added to Varnish 4.1 [6].
+
+Exim added support for version 1 and version 2 of the protocol for incoming
+connections on 2014/05/13, and will be released as part of version 4.83.
+
+Squid added support for versions 1 and 2 of the protocol in version 3.5 [7].
+
+Jetty 9.3.0 supports protocol version 1.
+
+lighttpd added support for versions 1 and 2 of the protocol for incoming
+connections in version 1.4.46 [11].
+
+The protocol is simple enough that it is expected that other implementations
+will appear, especially in environments such as SMTP, IMAP, FTP, RDP where the
+client's address is an important piece of information for the server and some
+intermediaries. In fact, several proprietary deployments have already done so
+on FTP and SMTP servers.
+
+Proxy developers are encouraged to implement this protocol, because it will
+make their products much more transparent in complex infrastructures, and will
+get rid of a number of issues related to logging and access control.
+
+
+4. Architectural benefits
+4.1. Multiple layers
+
+Using the PROXY protocol instead of transparent proxy provides several benefits
+in multiple-layer infrastructures. The first immediate benefit is that it
+becomes possible to chain multiple layers of proxies and always present the
+original IP address. for instance, let's consider the following 2-layer proxy
+architecture :
+
+ Internet
+ ,---. | client to PX1:
+ ( X ) | native protocol
+ `---' |
+ | V
+ +--+--+ +-----+
+ | FW1 |------| PX1 |
+ +--+--+ +-----+ | PX1 to PX2: PROXY + native
+ | V
+ +--+--+ +-----+
+ | FW2 |------| PX2 |
+ +--+--+ +-----+ | PX2 to SRV: PROXY + native
+ | V
+ +--+--+
+ | SRV |
+ +-----+
+
+Firewall FW1 receives traffic from internet-based clients and forwards it to
+reverse-proxy PX1. PX1 adds a PROXY header then forwards to PX2 via FW2. PX2
+is configured to read the PROXY header and to emit it on output. It then joins
+the origin server SRV and presents the original client's address there. Since
+all TCP connections endpoints are real machines and are not spoofed, there is
+no issue for the return traffic to pass via the firewalls and reverse proxies.
+Using transparent proxy, this would be quite difficult because the firewalls
+would have to deal with the client's address coming from the proxies in the DMZ
+and would have to correctly route the return traffic there instead of using the
+default route.
+
+
+4.2. IPv4 and IPv6 integration
+
+The protocol also eases IPv4 and IPv6 integration : if only the first layer
+(FW1 and PX1) is IPv6-capable, it is still possible to present the original
+client's IPv6 address to the target server even though the whole chain is only
+connected via IPv4.
+
+
+4.3. Multiple return paths
+
+When transparent proxy is used, it is not possible to run multiple proxies
+because the return traffic would follow the default route instead of finding
+the proper proxy. Some tricks are sometimes possible using multiple server
+addresses and policy routing but these are very limited.
+
+Using the PROXY protocol, this problem disappears as the servers don't need
+to route to the client, just to the proxy that forwarded the connection. So
+it is perfectly possible to run a proxy farm in front of a very large server
+farm and have it working effortless, even when dealing with multiple sites.
+
+This is particularly important in Cloud-like environments where there is little
+choice of binding to random addresses and where the lower processing power per
+node generally requires multiple front nodes.
+
+The example below illustrates the following case : virtualized infrastructures
+are deployed in 3 datacenters (DC1..DC3). Each DC uses its own VIP which is
+handled by the hosting provider's layer 3 load balancer. This load balancer
+routes the traffic to a farm of layer 7 SSL/cache offloaders which load balance
+among their local servers. The VIPs are advertised by geolocalised DNS so that
+clients generally stick to a given DC. Since clients are not guaranteed to
+stick to one DC, the L7 load balancing proxies have to know the other DCs'
+servers that may be reached via the hosting provider's LAN or via the internet.
+The L7 proxies use the PROXY protocol to join the servers behind them, so that
+even inter-DC traffic can forward the original client's address and the return
+path is unambiguous. This would not be possible using transparent proxy because
+most often the L7 proxies would not be able to spoof an address, and this would
+never work between datacenters.
+
+ Internet
+
+ DC1 DC2 DC3
+ ,---. ,---. ,---.
+ ( X ) ( X ) ( X )
+ `---' `---' `---'
+ | +-------+ | +-------+ | +-------+
+ +----| L3 LB | +----| L3 LB | +----| L3 LB |
+ | +-------+ | +-------+ | +-------+
+ ------+------- ~ ~ ~ ------+------- ~ ~ ~ ------+-------
+ ||||| |||| ||||| |||| ||||| ||||
+ 50 SRV 4 PX 50 SRV 4 PX 50 SRV 4 PX
+
+
+5. Security considerations
+
+Version 1 of the protocol header (the human-readable format) was designed so as
+to be distinguishable from HTTP. It will not parse as a valid HTTP request and
+an HTTP request will not parse as a valid proxy request. Version 2 add to use a
+non-parsable binary signature to make many products fail on this block. The
+signature was designed to cause immediate failure on HTTP, SSL/TLS, SMTP, FTP,
+and POP. It also causes aborts on LDAP and RDP servers (see section 6). That
+makes it easier to enforce its use under certain connections and at the same
+time, it ensures that improperly configured servers are quickly detected.
+
+Implementers should be very careful about not trying to automatically detect
+whether they have to decode the header or not, but rather they must only rely
+on a configuration parameter. Indeed, if the opportunity is left to a normal
+client to use the protocol, it will be able to hide its activities or make them
+appear as coming from somewhere else. However, accepting the header only from a
+number of known sources should be safe.
+
+
+6. Validation
+
+The version 2 protocol signature has been sent to a wide variety of protocols
+and implementations including old ones. The following protocol and products
+have been tested to ensure the best possible behavior when the signature was
+presented, even with minimal implementations :
+
+ - HTTP :
+ - Apache 1.3.33 : connection abort => pass/optimal
+ - Nginx 0.7.69 : 400 Bad Request + abort => pass/optimal
+ - lighttpd 1.4.20 : 400 Bad Request + abort => pass/optimal
+ - thttpd 2.20c : 400 Bad Request + abort => pass/optimal
+ - mini-httpd-1.19 : 400 Bad Request + abort => pass/optimal
+ - haproxy 1.4.21 : 400 Bad Request + abort => pass/optimal
+ - Squid 3 : 400 Bad Request + abort => pass/optimal
+ - SSL :
+ - stud 0.3.47 : connection abort => pass/optimal
+ - stunnel 4.45 : connection abort => pass/optimal
+ - nginx 0.7.69 : 400 Bad Request + abort => pass/optimal
+ - FTP :
+ - Pure-ftpd 1.0.20 : 3*500 then 221 Goodbye => pass/optimal
+ - vsftpd 2.0.1 : 3*530 then 221 Goodbye => pass/optimal
+ - SMTP :
+ - postfix 2.3 : 3*500 + 221 Bye => pass/optimal
+ - exim 4.69 : 554 + connection abort => pass/optimal
+ - POP :
+ - dovecot 1.0.10 : 3*ERR + Logout => pass/optimal
+ - IMAP :
+ - dovecot 1.0.10 : 5*ERR + hang => pass/non-optimal
+ - LDAP :
+ - openldap 2.3 : abort => pass/optimal
+ - SSH :
+ - openssh 3.9p1 : abort => pass/optimal
+ - RDP :
+ - Windows XP SP3 : abort => pass/optimal
+
+This means that most protocols and implementations will not be confused by an
+incoming connection exhibiting the protocol signature, which avoids issues when
+facing misconfigurations.
+
+
+7. Future developments
+
+It is possible that the protocol may slightly evolve to present other
+information such as the incoming network interface, or the origin addresses in
+case of network address translation happening before the first proxy, but this
+is not identified as a requirement right now. Some deep thinking has been spent
+on this and it appears that trying to add a few more information open a Pandora
+box with many information from MAC addresses to SSL client certificates, which
+would make the protocol much more complex. So at this point it is not planned.
+Suggestions on improvements are welcome.
+
+
+8. Contacts and links
+
+Please use w@1wt.eu to send any comments to the author.
+
+The following links were referenced in the document.
+
+[1] http://www.postfix.org/XCLIENT_README.html
+[2] http://tools.ietf.org/html/rfc7239
+[3] http://www.stunnel.org/
+[4] https://github.com/bumptech/stud
+[5] https://github.com/bumptech/stud/pull/81
+[6] https://www.varnish-cache.org/docs/trunk/phk/ssl_again.html
+[7] http://wiki.squid-cache.org/Squid-3.5
+[8] https://tools.ietf.org/html/rfc4960#appendix-B
+[9] https://tools.ietf.org/rfc/rfc7301.txt
+[10] https://www.ietf.org/rfc/rfc3546.txt
+[11] https://redmine.lighttpd.net/issues/2804
+
+9. Sample code
+
+The code below is an example of how a receiver may deal with both versions of
+the protocol header for TCP over IPv4 or IPv6. The function is supposed to be
+called upon a read event. Addresses may be directly copied into their final
+memory location since they're transported in network byte order. The sending
+side is even simpler and can easily be deduced from this sample code.
+
+ struct sockaddr_storage from; /* already filled by accept() */
+ struct sockaddr_storage to; /* already filled by getsockname() */
+ const char v2sig[12] = "\x0D\x0A\x0D\x0A\x00\x0D\x0A\x51\x55\x49\x54\x0A";
+
+ /* returns 0 if needs to poll, <0 upon error or >0 if it did the job */
+ int read_evt(int fd)
+ {
+ union {
+ struct {
+ char line[108];
+ } v1;
+ struct {
+ uint8_t sig[12];
+ uint8_t ver_cmd;
+ uint8_t fam;
+ uint16_t len;
+ union {
+ struct { /* for TCP/UDP over IPv4, len = 12 */
+ uint32_t src_addr;
+ uint32_t dst_addr;
+ uint16_t src_port;
+ uint16_t dst_port;
+ } ip4;
+ struct { /* for TCP/UDP over IPv6, len = 36 */
+ uint8_t src_addr[16];
+ uint8_t dst_addr[16];
+ uint16_t src_port;
+ uint16_t dst_port;
+ } ip6;
+ struct { /* for AF_UNIX sockets, len = 216 */
+ uint8_t src_addr[108];
+ uint8_t dst_addr[108];
+ } unx;
+ } addr;
+ } v2;
+ } hdr;
+
+ int size, ret;
+
+ do {
+ ret = recv(fd, &hdr, sizeof(hdr), MSG_PEEK);
+ } while (ret == -1 && errno == EINTR);
+
+ if (ret == -1)
+ return (errno == EAGAIN) ? 0 : -1;
+
+ if (ret >= 16 && memcmp(&hdr.v2, v2sig, 12) == 0 &&
+ (hdr.v2.ver_cmd & 0xF0) == 0x20) {
+ size = 16 + ntohs(hdr.v2.len);
+ if (ret < size)
+ return -1; /* truncated or too large header */
+
+ switch (hdr.v2.ver_cmd & 0xF) {
+ case 0x01: /* PROXY command */
+ switch (hdr.v2.fam) {
+ case 0x11: /* TCPv4 */
+ ((struct sockaddr_in *)&from)->sin_family = AF_INET;
+ ((struct sockaddr_in *)&from)->sin_addr.s_addr =
+ hdr.v2.addr.ip4.src_addr;
+ ((struct sockaddr_in *)&from)->sin_port =
+ hdr.v2.addr.ip4.src_port;
+ ((struct sockaddr_in *)&to)->sin_family = AF_INET;
+ ((struct sockaddr_in *)&to)->sin_addr.s_addr =
+ hdr.v2.addr.ip4.dst_addr;
+ ((struct sockaddr_in *)&to)->sin_port =
+ hdr.v2.addr.ip4.dst_port;
+ goto done;
+ case 0x21: /* TCPv6 */
+ ((struct sockaddr_in6 *)&from)->sin6_family = AF_INET6;
+ memcpy(&((struct sockaddr_in6 *)&from)->sin6_addr,
+ hdr.v2.addr.ip6.src_addr, 16);
+ ((struct sockaddr_in6 *)&from)->sin6_port =
+ hdr.v2.addr.ip6.src_port;
+ ((struct sockaddr_in6 *)&to)->sin6_family = AF_INET6;
+ memcpy(&((struct sockaddr_in6 *)&to)->sin6_addr,
+ hdr.v2.addr.ip6.dst_addr, 16);
+ ((struct sockaddr_in6 *)&to)->sin6_port =
+ hdr.v2.addr.ip6.dst_port;
+ goto done;
+ }
+ /* unsupported protocol, keep local connection address */
+ break;
+ case 0x00: /* LOCAL command */
+ /* keep local connection address for LOCAL */
+ break;
+ default:
+ return -1; /* not a supported command */
+ }
+ }
+ else if (ret >= 8 && memcmp(hdr.v1.line, "PROXY", 5) == 0) {
+ char *end = memchr(hdr.v1.line, '\r', ret - 1);
+ if (!end || end[1] != '\n')
+ return -1; /* partial or invalid header */
+ *end = '\0'; /* terminate the string to ease parsing */
+ size = end + 2 - hdr.v1.line; /* skip header + CRLF */
+ /* parse the V1 header using favorite address parsers like inet_pton.
+ * return -1 upon error, or simply fall through to accept.
+ */
+ }
+ else {
+ /* Wrong protocol */
+ return -1;
+ }
+
+ done:
+ /* we need to consume the appropriate amount of data from the socket */
+ do {
+ ret = recv(fd, &hdr, size, 0);
+ } while (ret == -1 && errno == EINTR);
+ return (ret >= 0) ? 1 : -1;
+ }
diff --git a/doc/queuing.fig b/doc/queuing.fig
new file mode 100644
index 0000000..8d57504
--- /dev/null
+++ b/doc/queuing.fig
@@ -0,0 +1,192 @@
+#FIG 3.2
+Portrait
+Center
+Metric
+A4
+100.00
+Single
+-2
+1200 2
+6 900 4770 1575 5220
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 3
+ 900 4770 1125 4995 1125 5220
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 3
+ 1575 4770 1350 4995 1350 5220
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
+ 1170 4995 1170 5220
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
+ 1215 4995 1215 5220
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
+ 1260 4995 1260 5220
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
+ 1305 4995 1305 5220
+2 3 0 1 7 7 52 -1 20 0.000 2 0 -1 0 0 7
+ 900 4770 1125 4995 1125 5220 1350 5220 1350 4995 1575 4770
+ 900 4770
+-6
+6 2250 4770 2925 5220
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 3
+ 2250 4770 2475 4995 2475 5220
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 3
+ 2925 4770 2700 4995 2700 5220
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
+ 2520 4995 2520 5220
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
+ 2565 4995 2565 5220
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
+ 2610 4995 2610 5220
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
+ 2655 4995 2655 5220
+2 3 0 1 7 7 52 -1 20 0.000 2 0 -1 0 0 7
+ 2250 4770 2475 4995 2475 5220 2700 5220 2700 4995 2925 4770
+ 2250 4770
+-6
+6 1710 3420 2115 3870
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
+ 1710 3780 2115 3780
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
+ 1710 3825 2115 3825
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
+ 1710 3735 2115 3735
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
+ 1710 3690 2115 3690
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
+ 1710 3645 2115 3645
+2 1 0 1 0 6 51 -1 20 0.000 0 0 -1 0 0 4
+ 1710 3420 1710 3870 2115 3870 2115 3420
+-6
+1 2 0 1 0 7 51 -1 20 0.000 1 0.0000 1935 2182 450 113 1485 2182 2385 2182
+1 2 0 1 0 7 51 -1 20 0.000 0 0.0000 2790 3082 450 113 2340 3082 3240 3082
+1 2 0 1 0 7 51 -1 20 0.000 1 0.0000 1935 1367 450 113 1485 1367 2385 1367
+1 2 0 1 0 7 51 -1 20 0.000 1 0.0000 1035 3082 450 113 585 3082 1485 3082
+2 1 2 1 0 2 53 -1 -1 3.000 0 0 -1 0 0 2
+ 2745 3870 3015 3870
+2 1 2 1 0 2 53 -1 -1 3.000 0 0 -1 0 0 2
+ 2745 4320 3015 4320
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2
+ 1 1 1.00 60.00 60.00
+ 2970 5085 2745 5085
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2
+ 1 1 1.00 60.00 60.00
+ 2205 5085 2430 5085
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2
+ 1 1 1.00 60.00 60.00
+ 1620 5085 1395 5085
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2
+ 1 1 1.00 60.00 60.00
+ 855 5085 1080 5085
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 3
+ 0 0 1.00 60.00 60.00
+ 1890 3870 1440 4320 1440 4770
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 3
+ 0 0 1.00 60.00 60.00
+ 1935 3870 2385 4320 2385 4770
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 60.00
+ 2610 4320 2610 4770
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 60.00
+ 2835 3195 2835 4770
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 3
+ 0 0 1.00 60.00 60.00
+ 2745 3195 2610 3330 2610 3870
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 60.00
+ 1935 2295 1935 3420
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 3
+ 0 0 1.00 60.00 60.00
+ 1080 3195 1215 3330 1215 3870
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 3
+ 0 0 1.00 60.00 60.00
+ 1890 2295 1035 2745 1035 2970
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 3
+ 0 0 1.00 60.00 60.00
+ 1980 2295 2790 2745 2790 2970
+2 1 0 1 0 2 50 -1 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 60.00
+ 1935 1485 1935 2070
+2 1 1 1 0 2 50 -1 -1 4.000 0 0 -1 1 0 5
+ 0 0 1.00 60.00 60.00
+ 810 5220 450 5220 450 2160 1080 2160 1485 2160
+2 1 1 1 0 2 50 -1 -1 4.000 0 0 -1 1 0 5
+ 0 0 1.00 60.00 60.00
+ 3060 5220 3375 5220 3375 2160 2655 2160 2385 2160
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 60.00
+ 1215 4320 1215 4770
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 60.00
+ 990 3195 990 4770
+2 1 0 1 0 2 50 -1 -1 0.000 0 0 -1 1 0 2
+ 0 0 1.00 60.00 60.00
+ 1935 855 1935 1260
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 3
+ 0 0 1.00 60.00 60.00
+ 1620 1440 900 2025 900 2970
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 1 0 3
+ 0 0 1.00 60.00 60.00
+ 2205 1440 2925 2025 2925 2970
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
+ 1125 4230 1350 4230
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
+ 1125 4275 1350 4275
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
+ 1125 4185 1350 4185
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
+ 1125 4140 1350 4140
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
+ 1125 4095 1350 4095
+2 1 0 1 0 3 51 -1 20 0.000 0 0 -1 0 0 4
+ 1125 3870 1125 4320 1350 4320 1350 3870
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
+ 2700 4230 2475 4230
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
+ 2700 4275 2475 4275
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
+ 2700 4185 2475 4185
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
+ 2700 4140 2475 4140
+2 1 0 1 0 3 51 -1 20 0.000 0 0 -1 0 0 4
+ 2700 3870 2700 4320 2475 4320 2475 3870
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
+ 1125 4050 1350 4050
+2 1 0 1 0 7 50 -1 -1 0.000 0 0 -1 0 0 2
+ 1125 4005 1350 4005
+2 1 0 1 0 2 53 -1 -1 0.000 0 0 -1 1 1 2
+ 0 0 1.00 60.00 60.00
+ 0 0 1.00 60.00 60.00
+ 900 3870 900 4320
+2 1 2 1 0 2 53 -1 -1 3.000 0 0 -1 0 0 2
+ 855 3870 1125 3870
+2 1 2 1 0 2 53 -1 -1 3.000 0 0 -1 0 0 2
+ 855 4320 1125 4320
+2 1 0 1 0 2 53 -1 -1 0.000 0 0 -1 1 1 2
+ 0 0 1.00 60.00 60.00
+ 0 0 1.00 60.00 60.00
+ 2970 3870 2970 4320
+4 0 0 53 -1 16 7 0.0000 4 75 195 1260 3510 Yes\001
+4 2 0 53 -1 16 7 0.0000 4 75 135 945 3510 No\001
+4 2 0 53 -1 16 7 0.0000 4 75 195 2565 3510 Yes\001
+4 0 0 53 -1 16 7 0.0000 4 75 135 2880 3510 No\001
+4 1 0 50 -1 16 6 0.0000 4 75 210 1935 4140 global\001
+4 1 0 50 -1 16 6 0.0000 4 60 225 1935 4230 queue\001
+4 1 0 50 -1 16 8 1.5708 4 120 1005 405 4680 Redispatch on error\001
+4 1 0 53 -1 14 6 1.5708 4 60 480 2205 3645 maxqueue\001
+4 1 0 50 -1 18 8 0.0000 4 90 165 1935 2205 LB\001
+4 1 0 53 -1 16 7 1.5708 4 90 870 2070 2880 server, all are full.\001
+4 1 0 50 -1 18 8 0.0000 4 90 360 1935 1395 cookie\001
+4 1 0 50 -1 16 10 0.0000 4 135 1200 1935 765 Incoming request\001
+4 1 0 53 -1 16 7 1.5708 4 75 480 1890 1755 no cookie\001
+4 1 0 53 -1 16 7 1.5708 4 75 600 1890 2880 no available\001
+4 0 0 53 -1 16 7 5.6200 4 75 735 2340 1530 SRV2 selected\001
+4 1 0 50 -1 16 10 0.0000 4 105 405 1260 5445 SRV1\001
+4 1 0 50 -1 16 10 0.0000 4 105 405 2610 5445 SRV2\001
+4 2 0 53 -1 16 7 0.4712 4 75 735 1665 2385 SRV1 selected\001
+4 0 0 53 -1 16 7 5.8119 4 75 735 2205 2385 SRV2 selected\001
+4 2 0 53 -1 16 7 0.6632 4 75 735 1485 1530 SRV1 selected\001
+4 0 0 53 -1 14 6 0.0000 4 45 420 2880 5040 maxconn\001
+4 1 0 50 -1 16 8 1.5708 4 120 1005 3510 4680 Redispatch on error\001
+4 1 0 50 -1 18 8 0.0000 4 90 615 2790 3105 SRV2 full ?\001
+4 1 0 50 -1 18 8 0.0000 4 90 615 1035 3105 SRV1 full ?\001
+4 1 0 53 -1 14 6 1.5708 4 60 480 855 4095 maxqueue\001
+4 1 0 53 -1 14 6 1.5708 4 60 480 3105 4095 maxqueue\001
diff --git a/doc/regression-testing.txt b/doc/regression-testing.txt
new file mode 100644
index 0000000..5b070a4
--- /dev/null
+++ b/doc/regression-testing.txt
@@ -0,0 +1,706 @@
+ +---------------------------------------+
+ | HAProxy regression testing with vtest |
+ +---------------------------------------+
+
+
+The information found in this file are a short starting guide to help you to
+write VTC (Varnish Test Case) scripts (or VTC files) for haproxy regression testing.
+Such VTC files are currently used to test Varnish cache application developed by
+Poul-Henning Kamp. A very big thanks you to him for having helped you to add
+our haproxy C modules to vtest tool. Note that vtest was formally developed for
+varnish cache reg testing and was named varnishtest. vtest is an haproxy specific
+version of varnishtest program which reuses the non varnish cache specific code.
+
+A lot of general information about how to write VTC files may be found in 'man/vtc.7'
+manual of varnish cache sources directory or directly on the web here:
+
+ https://varnish-cache.org/docs/trunk/reference/vtc.html
+
+It is *highly* recommended to read this manual before asking to haproxy ML. This
+documentation only deals with the vtest support for haproxy.
+
+
+vtest installation
+------------------------
+
+To use vtest you will have to download and compile the recent vtest
+sources found at https://github.com/vtest/VTest.
+
+To compile vtest:
+
+ $ cd VTest
+ $ make vtest
+
+Note that varnishtest may be also compiled but not without the varnish cache
+sources already compiled:
+
+ $ VARNISH_SRC=<...> make varnishtest
+
+After having compiled these sources, the vtest executable location is at the
+root of the vtest sources directory.
+
+
+vtest execution
+---------------------
+
+vtest is able to search for the haproxy executable file it is supposed to
+launch thanks to the PATH environment variable. To force the executable to be used by
+vtest, the HAPROXY_PROGRAM environment variable for vtest may be
+typically set as follows:
+
+ $ HAPROXY_PROGRAM=~/srcs/haproxy/haproxy vtest ...
+
+vtest program comes with interesting options. The most interesting are:
+
+ -t Timeout in seconds to abort the test if some launched program
+ -v By default, vtest does not dump the outputs of process it launched
+ when the test passes. With this option the outputs are dumped even
+ when the test passes.
+ -L to always keep the temporary VTC directories.
+ -l to keep the temporary VTC directories only when the test fails.
+
+About haproxy, when launched by vtest, -d option is enabled by default.
+
+
+How to write VTC files
+----------------------
+
+A VTC file must start with a "varnishtest" or "vtest" command line followed by a
+descriptive line enclosed by double quotes. This is not specific to the VTC files
+for haproxy.
+
+The VTC files for haproxy must also contain a "feature ignore_unknown_macro" line
+if any macro is used for haproxy in this file. This is due to the fact that
+vtest parser code for haproxy commands generates macros the vtest
+parser code for varnish cache has no knowledge of. This line prevents vtest from
+failing in such cases. As a "cli" macro automatically generated, this
+"feature ignore_unknown_macro" is mandatory for each VTC file for haproxy.
+
+To make vtest capable of testing haproxy, two new VTC commands have been
+implemented: "haproxy" and "syslog". "haproxy" is used to start haproxy process.
+"syslog" is used to start syslog servers (at this time, only used by haproxy).
+
+As haproxy cannot work without configuration file, a VTC file for haproxy must
+embed the configuration files contents for the haproxy instances it declares.
+This may be done using the following intuitive syntax construction: -conf {...}.
+Here -conf is an argument of "haproxy" VTC command to declare the configuration
+file of the haproxy instances it also declares (see "Basic HAProxy test" VTC file
+below).
+
+As for varnish VTC files, the parser of VTC files for haproxy automatically
+generates macros for the declared frontends to be reused by the clients later
+in the script, so after having written the "haproxy" command sections.
+The syntax "fd@${my_frontend_fd_name}" must be used to bind the frontend
+listeners to localhost address and random ports (see "Environment variables"
+section of haproxy documentation). This is mandatory.
+
+Each time the haproxy command parser finds a "fd@${xyz}" string in a 'ABC'
+"haproxy" command section, it generates three macros: 'ABC_xyz_addr', 'ABC_xyz_port'
+and 'ABC_xyz_sock', with 'ABC_xyz_sock' being resolved as 'ABC_xyz_addr
+ABC_xyz_port' typically used by clients -connect parameter.
+
+Each haproxy instance works in their own temporary working directories located
+at '/tmp/vtc.<varnitest PID>.XXXXXXXX/<haproxy_instance_name>' (with XXXXXXXX
+a random 8 digits hexadecimal integer. This is in this temporary directory that
+the configuration file is temporarily written.
+
+A 'stats.sock' UNIX socket is also created in this directory. There is no need
+to declare such stats sockets in the -conf {...} section. The name of the parent
+directory of the haproxy instances working directories is stored in 'tmpdir'. In
+fact this is the working directory of the current vtest process.
+
+There also exists 'testdir' macro which is the parent directory of the VTC file.
+It may be useful to use other files located in the same directory than the current
+VTC file.
+
+
+
+VTC file examples
+-----------------
+
+The following first VTC file is a real regression test case file for a bug which has
+been fixed by 84c844e commit. We declare a basic configuration for a 'h1' haproxy
+instance.
+
+ varnishtest "SPOE bug: missing configuration file"
+
+ #commit 84c844eb12b250aa86f2aadaff77c42dfc3cb619
+ #Author: Christopher Faulet <cfaulet@haproxy.com>
+ #Date: Fri Mar 23 14:37:14 2018 +0100
+
+ # BUG/MINOR: spoe: Initialize variables used during conf parsing before any check
+
+ # Some initializations must be done at the beginning of parse_spoe_flt to avoid
+ # segmentation fault when first errors are caught, when the "filter spoe" line is
+ # parsed.
+
+ haproxy h1 -conf-BAD {} {
+ defaults
+ timeout connect 5000ms
+ timeout client 50000ms
+ timeout server 50000ms
+
+ frontend my-front
+ filter spoe
+ }
+
+
+-conf-BAD haproxy command argument is used. Its role it to launch haproxy with
+-c option (configuration file checking) and check that 'h1' exit(3) with 1 as
+status. Here is the output when running this VTC file:
+
+
+ **** top 0.0 extmacro def pwd=/home/fred/src/haproxy
+ **** top 0.0 extmacro def localhost=127.0.0.1
+ **** top 0.0 extmacro def bad_backend=127.0.0.1 39564
+ **** top 0.0 extmacro def bad_ip=192.0.2.255
+ **** top 0.0 macro def testdir=//home/fred/src/varnish-cache-haproxy
+ **** top 0.0 macro def tmpdir=/tmp/vtc.6377.64329194
+ * top 0.0 TEST /home/fred/src/varnish-cache-haproxy/spoe_bug.vtc starting
+ ** top 0.0 === varnishtest "SPOE bug: missing configuration file"
+ * top 0.0 TEST SPOE bug: missing configuration file
+ ** top 0.0 === haproxy h1 -conf-BAD {} {
+ **** h1 0.0 conf| global
+ **** h1 0.0 conf|\tstats socket /tmp/vtc.6377.64329194/h1/stats.sock level admin mode 600
+ **** h1 0.0 conf|
+ **** h1 0.0 conf|\tdefaults
+ **** h1 0.0 conf| timeout connect 5000ms
+ **** h1 0.0 conf| timeout client 50000ms
+ **** h1 0.0 conf| timeout server 50000ms
+ **** h1 0.0 conf|
+ **** h1 0.0 conf|\tfrontend my-front
+ **** h1 0.0 conf|\t\tfilter spoe
+ **** h1 0.0 conf|
+ ** h1 0.0 haproxy_start
+ **** h1 0.0 opt_worker 0 opt_daemon 0 opt_check_mode 1
+ **** h1 0.0 argv|exec /home/fred/src/haproxy/haproxy -c -f /tmp/vtc.6377.64329194/h1/cfg
+ **** h1 0.0 XXX 5 @277
+ *** h1 0.0 PID: 6395
+ **** h1 0.0 macro def h1_pid=6395
+ **** h1 0.0 macro def h1_name=/tmp/vtc.6377.64329194/h1
+ ** h1 0.0 Wait
+ ** h1 0.0 Stop HAProxy pid=6395
+ **** h1 0.0 STDOUT poll 0x10
+ ** h1 0.0 WAIT4 pid=6395 status=0x008b (user 0.000000 sys 0.000000)
+ * h1 0.0 Expected exit: 0x1 signal: 0 core: 0
+ ---- h1 0.0 Bad exit status: 0x008b exit 0x0 signal 11 core 128
+ * top 0.0 RESETTING after /home/fred/src/varnish-cache-haproxy/spoe_bug.vtc
+ ** h1 0.0 Reset and free h1 haproxy 6395
+ ** h1 0.0 Wait
+ ---- h1 0.0 Assert error in haproxy_wait(), vtc_haproxy.c line 326: Condition(*(&h->fds[1]) >= 0) not true.
+
+ * top 0.0 failure during reset
+ # top TEST /home/fred/src/varnish-cache-haproxy/spoe_bug.vtc FAILED (0.008) exit=2
+
+
+'h1' exited with (128 + 11) status and a core file was produced in
+/tmp/vtc.6377.64329194/h1 directory.
+With the patch provided by 84c844e commit, varnishtest makes this VTC file pass
+as expected (verbose mode execution):
+
+ **** top 0.0 extmacro def pwd=/home/fred/src/haproxy
+ **** top 0.0 extmacro def localhost=127.0.0.1
+ **** top 0.0 extmacro def bad_backend=127.0.0.1 42264
+ **** top 0.0 extmacro def bad_ip=192.0.2.255
+ **** top 0.0 macro def testdir=//home/fred/src/varnish-cache-haproxy
+ **** top 0.0 macro def tmpdir=/tmp/vtc.25540.59b6ec5d
+ * top 0.0 TEST /home/fred/src/varnish-cache-haproxy/spoe_bug.vtc starting
+ ** top 0.0 === varnishtest "SPOE bug: missing configuration file"
+ * top 0.0 TEST SPOE bug: missing configuration file
+ ** top 0.0 === haproxy h1 -conf-BAD {} {
+ **** h1 0.0 conf| global
+ **** h1 0.0 conf|\tstats socket /tmp/vtc.25540.59b6ec5d/h1/stats.sock level admin mode 600
+ **** h1 0.0 conf|
+ **** h1 0.0 conf|\tdefaults
+ **** h1 0.0 conf| timeout connect 5000ms
+ **** h1 0.0 conf| timeout client 50000ms
+ **** h1 0.0 conf| timeout server 50000ms
+ **** h1 0.0 conf|
+ **** h1 0.0 conf|\tfrontend my-front
+ **** h1 0.0 conf|\t\tfilter spoe
+ **** h1 0.0 conf|
+ ** h1 0.0 haproxy_start
+ **** h1 0.0 opt_worker 0 opt_daemon 0 opt_check_mode 1
+ **** h1 0.0 argv|exec /home/fred/src/haproxy/haproxy -c -f /tmp/vtc.25540.59b6ec5d/h1/cfg
+ **** h1 0.0 XXX 5 @277
+ *** h1 0.0 PID: 25558
+ **** h1 0.0 macro def h1_pid=25558
+ **** h1 0.0 macro def h1_name=/tmp/vtc.25540.59b6ec5d/h1
+ ** h1 0.0 Wait
+ ** h1 0.0 Stop HAProxy pid=25558
+ *** h1 0.0 debug|[ALERT] (25558) : parsing [/tmp/vtc.25540.59b6ec5d/h1/cfg:10] : 'filter' : ''spoe' : missing config file'
+ *** h1 0.0 debug|[ALERT] (25558) : Error(s) found in configuration file : /tmp/vtc.25540.59b6ec5d/h1/cfg
+ *** h1 0.0 debug|[ALERT] (25558) : Fatal errors found in configuration.
+ **** h1 0.0 STDOUT poll 0x10
+ ** h1 0.0 WAIT4 pid=25558 status=0x0100 (user 0.000000 sys 0.000000)
+ ** h1 0.0 Found expected ''
+ * top 0.0 RESETTING after /home/fred/src/varnish-cache-haproxy/spoe_bug.vtc
+ ** h1 0.0 Reset and free h1 haproxy -1
+ * top 0.0 TEST /home/fred/src/varnish-cache-haproxy/spoe_bug.vtc completed
+ # top TEST /home/fred/src/varnish-cache-haproxy/spoe_bug.vtc passed (0.004)
+
+
+The following VTC file does almost nothing except running a shell to list
+the contents of 'tmpdir' directory after having launched a haproxy instance
+and 's1' HTTP server. This shell also prints the content of 'cfg' 'h1' configuration
+file.
+
+ varnishtest "List the contents of 'tmpdir'"
+ feature ignore_unknown_macro
+
+ server s1 {
+ } -start
+
+ haproxy h1 -conf {
+ defaults
+ mode http
+ timeout connect 5s
+ timeout server 30s
+ timeout client 30s
+
+ backend be1
+ server srv1 ${s1_addr}:${s1_port}
+
+ frontend http1
+ use_backend be1
+ bind "fd@${my_frontend_fd}"
+ } -start
+
+ shell {
+ echo "${tmpdir} working directory content:"
+ ls -lR ${tmpdir}
+ cat ${tmpdir}/h1/cfg
+ }
+
+We give only the output of the shell to illustrate this example:
+
+ .
+ .
+ .
+ ** top 0.0 === shell {
+ **** top 0.0 shell_cmd|exec 2>&1 ;
+ **** top 0.0 shell_cmd| echo "tmpdir: /tmp/vtc.32092.479d521e"
+ **** top 0.0 shell_cmd| ls -lR /tmp/vtc.32092.479d521e
+ **** top 0.0 shell_cmd| cat /tmp/vtc.32092.479d521e/h1/cfg
+ .
+ .
+ .
+ **** top 0.0 shell_out|/tmp/vtc.3808.448cbfe0 working directory content:
+ **** top 0.0 shell_out|/tmp/vtc.32092.479d521e:
+ **** top 0.0 shell_out|total 8
+ **** top 0.0 shell_out|drwxr-xr-x 2 users 4096 Jun 7 11:09 h1
+ **** top 0.0 shell_out|-rw-r--r-- 1 me users 84 Jun 7 11:09 INFO
+ **** top 0.0 shell_out|
+ **** top 0.0 shell_out|/tmp/vtc.32092.479d521e/h1:
+ **** top 0.0 shell_out|total 4
+ **** top 0.0 shell_out|-rw-r----- 1 fred users 339 Jun 7 11:09 cfg
+ **** top 0.0 shell_out|srw------- 1 fred users 0 Jun 7 11:09 stats.sock
+ **** top 0.0 shell_out| global
+ **** top 0.0 shell_out|\tstats socket /tmp/vtc.32092.479d521e/h1/stats.sock level admin mode 600
+ **** top 0.0 shell_out|
+ **** top 0.0 shell_out| defaults
+ **** top 0.0 shell_out| mode http
+ **** top 0.0 shell_out| timeout connect 5s
+ **** top 0.0 shell_out| timeout server 30s
+ **** top 0.0 shell_out| timeout client 30s
+ **** top 0.0 shell_out|
+ **** top 0.0 shell_out| backend be1
+ **** top 0.0 shell_out| server srv1 127.0.0.1:36984
+ **** top 0.0 shell_out|
+ **** top 0.0 shell_out| frontend http1
+ **** top 0.0 shell_out| use_backend be1
+ **** top 0.0 shell_out| bind "fd@${my_frontend_fd}"
+ **** top 0.0 shell_status = 0x0000
+
+
+The following example illustrate how to run a basic HTTP transaction between 'c1'
+client and 's1' server with 'http1' as haproxy frontend. This frontend listen
+on TCP socket with 'my_frontend_fd' as file descriptor.
+
+ # Mandatory line
+ varnishtest "Basic HAProxy test"
+
+ # As some macros for haproxy are used in this file, this line is mandatory.
+ feature ignore_unknown_macro
+
+ server s1 {
+ rxreq
+ txresp -body "s1 >>> Hello world!"
+ } -start
+
+ haproxy h1 -conf {
+ # Configuration file of 'h1' haproxy instance.
+ defaults
+ mode http
+ timeout connect 5s
+ timeout server 30s
+ timeout client 30s
+
+ backend be1
+ # declare 'srv1' server to point to 's1' server instance declare above.
+ server srv1 ${s1_addr}:${s1_port}
+
+ frontend http1
+ use_backend be1
+ bind "fd@${my_frontend_fd}"
+ } -start
+
+ client c1 -connect ${h1_my_frontend_fd_sock} {
+ txreq -url "/"
+ rxresp
+ expect resp.status == 200
+ expect resp.body == "s1 >>> Hello world!"
+ } -run
+
+
+It is possible to shorten the previous VTC file haproxy command section as follows:
+
+ haproxy h1 -conf {
+ # Configuration file of 'h1' haproxy instance.
+ defaults
+ mode http
+ timeout connect 5s
+ timeout server 30s
+ timeout client 30s
+ }
+
+In this latter example, "backend" and "frontend" sections are automatically
+generated depending on the declarations of server instances.
+
+
+Another interesting real regression test case is the following: we declare one
+server 's1', a syslog server 'Slg_1' and a basic haproxy configuration for 'h1'
+haproxy instance. Here we want to check that the syslog message are correctly
+formatted thanks to "expect" "syslog" command (see syslog Slg_1 {...} command)
+below.
+
+ varnishtest "Wrong ip/port logging"
+ feature ignore_unknown_macro
+
+ #commit d02286d6c866e5c0a7eb6fbb127fa57f3becaf16
+ #Author: Willy Tarreau <w@1wt.eu>
+ #Date: Fri Jun 23 11:23:43 2017 +0200
+ #
+ # BUG/MINOR: log: pin the front connection when front ip/ports are logged
+ #
+ # Mathias Weiersmueller reported an interesting issue with logs which Lukas
+ # diagnosed as dating back from commit 9b061e332 (1.5-dev9). When front
+ # connection information (ip, port) are logged in TCP mode and the log is
+ # emitted at the end of the connection (eg: because %B or any log tag
+ # requiring LW_BYTES is set), the log is emitted after the connection is
+ # closed, so the address and ports cannot be retrieved anymore.
+ #
+ # It could be argued that we'd make a special case of these to immediately
+ # retrieve the source and destination addresses from the connection, but it
+ # seems cleaner to simply pin the front connection, marking it "tracked" by
+ # adding the LW_XPRT flag to mention that we'll need some of these elements
+ # at the last moment. Only LW_FRTIP and LW_CLIP are affected. Note that after
+ # this change, LW_FRTIP could simply be removed as it's not used anywhere.
+
+ # Note that the problem doesn't happen when using %[src] or %[dst] since
+ # all sample expressions set LW_XPRT.
+
+
+ server s1 {
+ rxreq
+ txresp
+ } -start
+
+ syslog Slg_1 -level notice {
+ recv
+ recv
+ recv info
+ expect ~ \"dip\":\"${h1_fe_1_addr}\",\"dport\":\"${h1_fe_1_port}.*\"ts\":\"cD\",\"
+ } -start
+
+ haproxy h1 -conf {
+ global
+ log ${Slg_1_addr}:${Slg_1_port} local0
+
+ defaults
+ log global
+ timeout connect 3000
+ timeout client 5
+ timeout server 10000
+
+ frontend fe1
+ bind "fd@${fe_1}"
+ mode tcp
+ log-format {\"dip\":\"%fi\",\"dport\":\"%fp\",\"c_ip\":\"%ci\",\"c_port\":\"%cp\",\"fe_name\":\"%ft\",\"be_name\":\"%b\",\"s_name\":\"%s\",\"ts\":\"%ts\",\"bytes_read\":\"%B\"}
+ default_backend be_app
+
+ backend be_app
+ server app1 ${s1_addr}:${s1_port} check
+ } -start
+
+ client c1 -connect ${h1_fe_1_sock} {
+ txreq -url "/"
+ delay 0.02
+ } -run
+
+ syslog Slg_1 -wait
+
+
+Here is the output produced by varnishtest with the latter VTC file:
+
+ **** top 0.0 extmacro def pwd=/home/fred/src/haproxy
+ **** top 0.0 extmacro def localhost=127.0.0.1
+ **** top 0.0 extmacro def bad_backend=127.0.0.1 40386
+ **** top 0.0 extmacro def bad_ip=192.0.2.255
+ **** top 0.0 macro def testdir=//home/fred/src/varnish-cache-haproxy
+ **** top 0.0 macro def tmpdir=/tmp/vtc.15752.560ca66b
+ * top 0.0 TEST /home/fred/src/varnish-cache-haproxy/d02286d.vtc starting
+ ** top 0.0 === varnishtest "HAPEE bug 2788"
+ * top 0.0 TEST HAPEE bug 2788
+ ** top 0.0 === feature ignore_unknown_macro
+ ** top 0.0 === server s1 {
+ ** s1 0.0 Starting server
+ **** s1 0.0 macro def s1_addr=127.0.0.1
+ **** s1 0.0 macro def s1_port=35564
+ **** s1 0.0 macro def s1_sock=127.0.0.1 35564
+ * s1 0.0 Listen on 127.0.0.1 35564
+ ** top 0.0 === syslog Slg_1 -level notice {
+ ** Slg_1 0.0 Starting syslog server
+ ** s1 0.0 Started on 127.0.0.1 35564
+ **** Slg_1 0.0 macro def Slg_1_addr=127.0.0.1
+ **** Slg_1 0.0 macro def Slg_1_port=33012
+ **** Slg_1 0.0 macro def Slg_1_sock=127.0.0.1 33012
+ * Slg_1 0.0 Bound on 127.0.0.1 33012
+ ** top 0.0 === haproxy h1 -conf {
+ ** Slg_1 0.0 Started on 127.0.0.1 33012 (level: 5)
+ ** Slg_1 0.0 === recv
+ **** h1 0.0 macro def h1_fe_1_sock=::1 51782
+ **** h1 0.0 macro def h1_fe_1_addr=::1
+ **** h1 0.0 macro def h1_fe_1_port=51782
+ **** h1 0.0 setenv(fe_1, 7)
+ **** h1 0.0 conf| global
+ **** h1 0.0 conf|\tstats socket /tmp/vtc.15752.560ca66b/h1/stats.sock level admin mode 600
+ **** h1 0.0 conf|
+ **** h1 0.0 conf| global
+ **** h1 0.0 conf| log 127.0.0.1:33012 local0
+ **** h1 0.0 conf|
+ **** h1 0.0 conf| defaults
+ **** h1 0.0 conf| log global
+ **** h1 0.0 conf| timeout connect 3000
+ **** h1 0.0 conf| timeout client 5
+ **** h1 0.0 conf| timeout server 10000
+ **** h1 0.0 conf|
+ **** h1 0.0 conf| frontend fe1
+ **** h1 0.0 conf| bind "fd@${fe_1}"
+ **** h1 0.0 conf| mode tcp
+ **** h1 0.0 conf| log-format {\"dip\":\"%fi\",\"dport\":\"%fp\",\"c_ip\":\"%ci\",\"c_port\":\"%cp\",\"fe_name\":\"%ft\",\"be_name\":\"%b\",\"s_name\":\"%s\",\"ts\":\"%ts\",\"bytes_read\":\"%B\"}
+ **** h1 0.0 conf| default_backend be_app
+ **** h1 0.0 conf|
+ **** h1 0.0 conf| backend be_app
+ **** h1 0.0 conf| server app1 127.0.0.1:35564 check
+ ** h1 0.0 haproxy_start
+ **** h1 0.0 opt_worker 0 opt_daemon 0 opt_check_mode 0
+ **** h1 0.0 argv|exec /home/fred/src/haproxy/haproxy -d -f /tmp/vtc.15752.560ca66b/h1/cfg
+ **** h1 0.0 XXX 9 @277
+ *** h1 0.0 PID: 15787
+ **** h1 0.0 macro def h1_pid=15787
+ **** h1 0.0 macro def h1_name=/tmp/vtc.15752.560ca66b/h1
+ ** top 0.0 === client c1 -connect ${h1_fe_1_sock} {
+ ** c1 0.0 Starting client
+ ** c1 0.0 Waiting for client
+ *** c1 0.0 Connect to ::1 51782
+ *** c1 0.0 connected fd 8 from ::1 46962 to ::1 51782
+ ** c1 0.0 === txreq -url "/"
+ **** c1 0.0 txreq|GET / HTTP/1.1\r
+ **** c1 0.0 txreq|Host: 127.0.0.1\r
+ **** c1 0.0 txreq|\r
+ ** c1 0.0 === delay 0.02
+ *** c1 0.0 delaying 0.02 second(s)
+ *** h1 0.0 debug|Note: setting global.maxconn to 2000.
+ *** h1 0.0 debug|Available polling systems :
+ *** h1 0.0 debug| epoll :
+ *** h1 0.0 debug|pref=300,
+ *** h1 0.0 debug| test result OK
+ *** h1 0.0 debug| poll : pref=200, test result OK
+ *** h1 0.0 debug| select :
+ *** h1 0.0 debug|pref=150, test result FAILED
+ *** h1 0.0 debug|Total: 3 (2 usable), will use epoll.
+ *** h1 0.0 debug|
+ *** h1 0.0 debug|Available filters :
+ *** h1 0.0 debug|\t[SPOE] spoe
+ *** h1 0.0 debug|\t[COMP] compression
+ *** h1 0.0 debug|\t[TRACE] trace
+ **** Slg_1 0.0 syslog|<133>Jun 7 14:12:51 haproxy[15787]: Proxy fe1 started.
+ ** Slg_1 0.0 === recv
+ **** Slg_1 0.0 syslog|<133>Jun 7 14:12:51 haproxy[15787]: Proxy be_app started.
+ ** Slg_1 0.0 === recv info
+ *** h1 0.0 debug|00000000:fe1.accept(0007)=000a from [::1:46962]
+ *** s1 0.0 accepted fd 6 127.0.0.1 56770
+ ** s1 0.0 === rxreq
+ **** s1 0.0 rxhdr|GET / HTTP/1.1\r
+ **** s1 0.0 rxhdr|Host: 127.0.0.1\r
+ **** s1 0.0 rxhdr|\r
+ **** s1 0.0 rxhdrlen = 35
+ **** s1 0.0 http[ 0] |GET
+ **** s1 0.0 http[ 1] |/
+ **** s1 0.0 http[ 2] |HTTP/1.1
+ **** s1 0.0 http[ 3] |Host: 127.0.0.1
+ **** s1 0.0 bodylen = 0
+ ** s1 0.0 === txresp
+ **** s1 0.0 txresp|HTTP/1.1 200 OK\r
+ **** s1 0.0 txresp|Content-Length: 0\r
+ **** s1 0.0 txresp|\r
+ *** s1 0.0 shutting fd 6
+ ** s1 0.0 Ending
+ *** h1 0.0 debug|00000000:be_app.srvcls[000a:000c]
+ *** h1 0.0 debug|00000000:be_app.clicls[000a:000c]
+ *** h1 0.0 debug|00000000:be_app.closed[000a:000c]
+ **** Slg_1 0.0 syslog|<134>Jun 7 14:12:51 haproxy[15787]: {"dip":"","dport":"0","c_ip":"::1","c_port":"46962","fe_name":"fe1","be_name":"be_app","s_name":"app1","ts":"cD","bytes_read":"38"}
+ ** Slg_1 0.0 === expect ~ \"dip\":\"${h1_fe_1_addr}\",\"dport\":\"${h1_fe_1_p...
+ ---- Slg_1 0.0 EXPECT FAILED ~ "\"dip\":\"::1\",\"dport\":\"51782.*\"ts\":\"cD\",\""
+ *** c1 0.0 closing fd 8
+ ** c1 0.0 Ending
+ * top 0.0 RESETTING after /home/fred/src/varnish-cache-haproxy/d02286d.vtc
+ ** h1 0.0 Reset and free h1 haproxy 15787
+ ** h1 0.0 Wait
+ ** h1 0.0 Stop HAProxy pid=15787
+ **** h1 0.0 Kill(2)=0: Success
+ **** h1 0.0 STDOUT poll 0x10
+ ** h1 0.1 WAIT4 pid=15787 status=0x0002 (user 0.000000 sys 0.004000)
+ ** s1 0.1 Waiting for server (4/-1)
+ ** Slg_1 0.1 Waiting for syslog server (5)
+ * top 0.1 TEST /home/fred/src/varnish-cache-haproxy/d02286d.vtc FAILED
+ # top TEST /home/fred/src/varnish-cache-haproxy/d02286d.vtc FAILED (0.131) exit=2
+
+This test does not pass without the bug fix of d02286d commit. Indeed the third syslog
+message received by 'Slg_1' syslog server does not match the regular expression
+of the "syslog" "expect" command:
+
+ expect ~ \"dip\":\"${h1_fe_1_addr}\",\"dport\":\"${h1_fe_1_port}.*\"ts\":\"cD\",\"
+
+(the IP address and port are missing), contrary to what happens with the correct bug fix:
+
+ **** top 0.0 extmacro def pwd=/home/fred/src/haproxy
+ **** top 0.0 extmacro def localhost=127.0.0.1
+ **** top 0.0 extmacro def bad_backend=127.0.0.1 37284
+ **** top 0.0 extmacro def bad_ip=192.0.2.255
+ **** top 0.0 macro def testdir=//home/fred/src/varnish-cache-haproxy
+ **** top 0.0 macro def tmpdir=/tmp/vtc.12696.186b28b0
+ * top 0.0 TEST /home/fred/src/varnish-cache-haproxy/d02286d.vtc starting
+ ** top 0.0 === varnishtest "HAPEE bug 2788"
+ * top 0.0 TEST HAPEE bug 2788
+ ** top 0.0 === feature ignore_unknown_macro
+ ** top 0.0 === server s1 {
+ ** s1 0.0 Starting server
+ **** s1 0.0 macro def s1_addr=127.0.0.1
+ **** s1 0.0 macro def s1_port=53384
+ **** s1 0.0 macro def s1_sock=127.0.0.1 53384
+ * s1 0.0 Listen on 127.0.0.1 53384
+ ** top 0.0 === syslog Slg_1 -level notice {
+ ** Slg_1 0.0 Starting syslog server
+ **** Slg_1 0.0 macro def Slg_1_addr=127.0.0.1
+ ** s1 0.0 Started on 127.0.0.1 53384
+ **** Slg_1 0.0 macro def Slg_1_port=36195
+ **** Slg_1 0.0 macro def Slg_1_sock=127.0.0.1 36195
+ * Slg_1 0.0 Bound on 127.0.0.1 36195
+ ** top 0.0 === haproxy h1 -conf {
+ ** Slg_1 0.0 Started on 127.0.0.1 36195 (level: 5)
+ ** Slg_1 0.0 === recv
+ **** h1 0.0 macro def h1_fe_1_sock=::1 39264
+ **** h1 0.0 macro def h1_fe_1_addr=::1
+ **** h1 0.0 macro def h1_fe_1_port=39264
+ **** h1 0.0 setenv(fe_1, 7)
+ **** h1 0.0 conf| global
+ **** h1 0.0 conf|\tstats socket /tmp/vtc.12696.186b28b0/h1/stats.sock level admin mode 600
+ **** h1 0.0 conf|
+ **** h1 0.0 conf| global
+ **** h1 0.0 conf| log 127.0.0.1:36195 local0
+ **** h1 0.0 conf|
+ **** h1 0.0 conf| defaults
+ **** h1 0.0 conf| log global
+ **** h1 0.0 conf| timeout connect 3000
+ **** h1 0.0 conf| timeout client 5
+ **** h1 0.0 conf| timeout server 10000
+ **** h1 0.0 conf|
+ **** h1 0.0 conf| frontend fe1
+ **** h1 0.0 conf| bind "fd@${fe_1}"
+ **** h1 0.0 conf| mode tcp
+ **** h1 0.0 conf| log-format {\"dip\":\"%fi\",\"dport\":\"%fp\",\"c_ip\":\"%ci\",\"c_port\":\"%cp\",\"fe_name\":\"%ft\",\"be_name\":\"%b\",\"s_name\":\"%s\",\"ts\":\"%ts\",\"bytes_read\":\"%B\"}
+ **** h1 0.0 conf| default_backend be_app
+ **** h1 0.0 conf|
+ **** h1 0.0 conf| backend be_app
+ **** h1 0.0 conf| server app1 127.0.0.1:53384 check
+ ** h1 0.0 haproxy_start
+ **** h1 0.0 opt_worker 0 opt_daemon 0 opt_check_mode 0
+ **** h1 0.0 argv|exec /home/fred/src/haproxy/haproxy -d -f /tmp/vtc.12696.186b28b0/h1/cfg
+ **** h1 0.0 XXX 9 @277
+ *** h1 0.0 PID: 12728
+ **** h1 0.0 macro def h1_pid=12728
+ **** h1 0.0 macro def h1_name=/tmp/vtc.12696.186b28b0/h1
+ ** top 0.0 === client c1 -connect ${h1_fe_1_sock} {
+ ** c1 0.0 Starting client
+ ** c1 0.0 Waiting for client
+ *** c1 0.0 Connect to ::1 39264
+ *** c1 0.0 connected fd 8 from ::1 41245 to ::1 39264
+ ** c1 0.0 === txreq -url "/"
+ **** c1 0.0 txreq|GET / HTTP/1.1\r
+ **** c1 0.0 txreq|Host: 127.0.0.1\r
+ **** c1 0.0 txreq|\r
+ ** c1 0.0 === delay 0.02
+ *** c1 0.0 delaying 0.02 second(s)
+ *** h1 0.0 debug|Note: setting global.maxconn to 2000.
+ *** h1 0.0 debug|Available polling systems :
+ *** h1 0.0 debug| epoll : pref=300,
+ *** h1 0.0 debug| test result OK
+ *** h1 0.0 debug| poll : pref=200, test result OK
+ *** h1 0.0 debug| select : pref=150, test result FAILED
+ *** h1 0.0 debug|Total: 3 (2 usable), will use epoll.
+ *** h1 0.0 debug|
+ *** h1 0.0 debug|Available filters :
+ *** h1 0.0 debug|\t[SPOE] spoe
+ *** h1 0.0 debug|\t[COMP] compression
+ *** h1 0.0 debug|\t[TRACE] trace
+ *** h1 0.0 debug|Using epoll() as the polling mechanism.
+ **** Slg_1 0.0 syslog|<133>Jun 7 14:10:18 haproxy[12728]: Proxy fe1 started.
+ ** Slg_1 0.0 === recv
+ **** Slg_1 0.0 syslog|<133>Jun 7 14:10:18 haproxy[12728]: Proxy be_app started.
+ ** Slg_1 0.0 === recv info
+ *** h1 0.0 debug|00000000:fe1.accept(0007)=000c from [::1:41245] ALPN=<none>
+ *** s1 0.0 accepted fd 6 127.0.0.1 49946
+ ** s1 0.0 === rxreq
+ **** s1 0.0 rxhdr|GET / HTTP/1.1\r
+ **** s1 0.0 rxhdr|Host: 127.0.0.1\r
+ **** s1 0.0 rxhdr|\r
+ **** s1 0.0 rxhdrlen = 35
+ **** s1 0.0 http[ 0] |GET
+ **** s1 0.0 http[ 1] |/
+ **** s1 0.0 http[ 2] |HTTP/1.1
+ **** s1 0.0 http[ 3] |Host: 127.0.0.1
+ **** s1 0.0 bodylen = 0
+ ** s1 0.0 === txresp
+ **** s1 0.0 txresp|HTTP/1.1 200 OK\r
+ **** s1 0.0 txresp|Content-Length: 0\r
+ **** s1 0.0 txresp|\r
+ *** s1 0.0 shutting fd 6
+ ** s1 0.0 Ending
+ *** h1 0.0 debug|00000000:be_app.srvcls[000c:adfd]
+ *** h1 0.0 debug|00000000:be_app.clicls[000c:adfd]
+ *** h1 0.0 debug|00000000:be_app.closed[000c:adfd]
+ **** Slg_1 0.0 syslog|<134>Jun 7 14:10:18 haproxy[12728]: {"dip":"::1","dport":"39264","c_ip":"::1","c_port":"41245","fe_name":"fe1","be_name":"be_app","s_name":"app1","ts":"cD","bytes_read":"38"}
+ ** Slg_1 0.0 === expect ~ \"dip\":\"${h1_fe_1_addr}\",\"dport\":\"${h1_fe_1_p...
+ **** Slg_1 0.0 EXPECT MATCH ~ "\"dip\":\"::1\",\"dport\":\"39264.*\"ts\":\"cD\",\""
+ *** Slg_1 0.0 shutting fd 5
+ ** Slg_1 0.0 Ending
+ *** c1 0.0 closing fd 8
+ ** c1 0.0 Ending
+ ** top 0.0 === syslog Slg_1 -wait
+ ** Slg_1 0.0 Waiting for syslog server (-1)
+ * top 0.0 RESETTING after /home/fred/src/varnish-cache-haproxy/d02286d.vtc
+ ** h1 0.0 Reset and free h1 haproxy 12728
+ ** h1 0.0 Wait
+ ** h1 0.0 Stop HAProxy pid=12728
+ **** h1 0.0 Kill(2)=0: Success
+ **** h1 0.0 STDOUT poll 0x10
+ ** h1 0.1 WAIT4 pid=12728 status=0x0002 (user 0.000000 sys 0.004000)
+ ** s1 0.1 Waiting for server (4/-1)
+ * top 0.1 TEST /home/fred/src/varnish-cache-haproxy/d02286d.vtc completed
+ # top TEST /home/fred/src/varnish-cache-haproxy/d02286d.vtc passed (0.128)
+
+In this latter execution the third syslog message is correct:
+
+ **** Slg_1 0.0 syslog|<134>Jun 7 14:10:18 haproxy[12728]: {"dip":"::1","dport":"39264","c_ip":"::1","c_port":"41245","fe_name":"fe1","be_name":"be_app","s_name":"app1","ts":"cD","bytes_read":"38"}
diff --git a/doc/seamless_reload.txt b/doc/seamless_reload.txt
new file mode 100644
index 0000000..94df1bd
--- /dev/null
+++ b/doc/seamless_reload.txt
@@ -0,0 +1,62 @@
+Reloading HAProxy without impacting server states
+=================================================
+
+Of course, to fully understand below information please consult
+doc/configuration.txt to understand how each HAProxy directive works.
+
+In the mean line, we update HAProxy's configuration to tell it where to
+retrieve the last know trustable servers state.
+Then, before reloading HAProxy, we simply dump servers state from running
+process into the locations we pointed into the configuration.
+And voilà :)
+
+
+Using one file for all backends
+-------------------------------
+
+HAProxy configuration
+*********************
+
+ global
+ [...]
+ stats socket /var/run/haproxy/socket
+ server-state-file global
+ server-state-base /var/state/haproxy/
+
+ defaults
+ [...]
+ load-server-state-from-file global
+
+HAProxy init script
+*******************
+
+Run the following command BEFORE reloading:
+
+ socat /var/run/haproxy/socket - <<< "show servers state" > /var/state/haproxy/global
+
+
+Using one state file per backend
+--------------------------------
+
+HAProxy configuration
+*********************
+
+ global
+ [...]
+ stats socket /var/run/haproxy/socket
+ server-state-base /var/state/haproxy/
+
+ defaults
+ [...]
+ load-server-state-from-file local
+
+HAProxy init script
+*******************
+
+Run the following command BEFORE reloading:
+
+ for b in $(socat /var/run/haproxy/socket - <<< "show backend" | fgrep -v '#')
+ do
+ socat /var/run/haproxy/socket - <<< "show servers state $b" > /var/state/haproxy/$b
+ done
+
diff --git a/examples/basic-config-edge.cfg b/examples/basic-config-edge.cfg
new file mode 100644
index 0000000..8ee6bda
--- /dev/null
+++ b/examples/basic-config-edge.cfg
@@ -0,0 +1,131 @@
+# This configuration creates a classical reverse-proxy and load balancer for
+# public services. It presents ports 80 and 443 (with 80 redirecting to 443),
+# enables caching up to one hour, and load-balances the service on a farm of
+# 4 servers on private IP addresses which are checked using HTTP checks and
+# by maintaining stickiness via session cookies. It offloads TLS processing
+# and enables HTTP compression. It uses HAProxy 2.4.
+
+# The global section deals with process-wide settings (security, resource usage)
+global
+ # all file names are relative to the directory containing this config
+ # file by default
+ default-path config
+
+ # refuse to start if any warning is emitted at boot (keep configs clean)
+ zero-warning
+
+ # Security hardening: isolate and drop privileges
+ chroot /var/empty
+ user haproxy
+ group haproxy
+
+ # daemonize
+ daemon
+ pidfile /var/run/haproxy-svc1.pid
+
+ # do not keep old processes longer than that after a reload
+ hard-stop-after 5m
+
+ # The command-line-interface (CLI) used by the admin, by provisionning
+ # tools, and to transfer sockets during reloads
+ stats socket /var/run/haproxy-svc1.sock level admin mode 600 user haproxy expose-fd listeners
+ stats timeout 1h
+
+ # send logs to stderr for logging via the service manager
+ log stderr local0 info
+
+ # intermediate security for SSL, from https://ssl-config.mozilla.org/
+ ssl-default-bind-ciphers ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384
+ ssl-default-bind-ciphersuites TLS_AES_128_GCM_SHA256:TLS_AES_256_GCM_SHA384:TLS_CHACHA20_POLY1305_SHA256
+ ssl-default-bind-options prefer-client-ciphers no-sslv3 no-tlsv10 no-tlsv11 no-tls-tickets
+
+# default settings common to all HTTP proxies below
+defaults http
+ mode http
+ option httplog
+ log global
+ timeout client 1m
+ timeout server 1m
+ timeout connect 10s
+ timeout http-keep-alive 2m
+ timeout queue 15s
+ timeout tunnel 4h # for websocket
+
+# provide a stats page on port 8181
+frontend stats
+ bind :8181
+ # provide advanced stats (ssl, h2, ...)
+ stats uri /
+ stats show-modules
+ # some users may want to protect the access to their stats and/or to
+ # enable admin mode on the page from local networks
+ # stats auth admin:mystats
+ # stats admin if { src 10.0.0.0/8 172.16.0.0/12 192.168.0.0/16 127.0.0.0/8 }
+
+# First incoming public service. Supports HTTP/1.x, HTTP/2, and HTTP/3 over
+# QUIC when built in, uses HSTS, redirects clear to TLS. Uses a dedicated host
+# name for the stats page.
+frontend pub1
+ bind :80 name clear
+ bind :443 name secure ssl crt pub1.pem
+ option socket-stats # provide per-bind line stats
+
+.if feature(QUIC)
+ # indicate QUIC support for 25 hours
+ bind quic4@:443 name quic ssl crt pub1.pem allow-0rtt
+ http-response add-header alt-svc 'h3=":443"; ma=90000'
+.endif
+
+ # set HSTS for one year after all responses
+ http-after-response set-header Strict-Transport-Security "max-age=31536000"
+ http-request redirect scheme https code 301 if !{ ssl_fc }
+
+ # silently ignore connect probes and pre-connect without request
+ option http-ignore-probes
+
+ # pass client's IP address to the server and prevent against attempts
+ # to inject bad contents
+ http-request del-header x-forwarded-for
+ option forwardfor
+
+ # enable HTTP compression of text contents
+ compression algo deflate gzip
+ compression type text/ application/javascript application/xhtml+xml image/x-icon
+
+ # enable HTTP caching of any cacheable content
+ http-request cache-use cache
+ http-response cache-store cache
+
+ default_backend app1
+
+# The cache instance used by the frontend (200MB, 10MB max object, 1 hour max)
+# May be consulted using "show cache" on the CLI socket
+cache cache
+ total-max-size 200 # RAM cache size in megabytes
+ max-object-size 10485760 # max cacheable object size in bytes
+ max-age 3600 # max cache duration in seconds
+ process-vary on # handle the Vary header (otherwise don't cache)
+
+# First application
+backend app1
+ # Algorithm:
+ # - roundrobin is usually better for short requests,
+ # - leastconn is better for mixed slow ones, and long transfers,
+ # - random is generally good when using multiple load balancers
+ balance random
+
+ # abort if the client clicks on stop.
+ option abortonclose
+
+ # insert a session cookie for user stickiness
+ cookie app1 insert indirect nocache
+
+ # check the servers' health using HTTP requests
+ option httpchk
+ http-check send meth GET uri / ver HTTP/1.1 hdr host svc1.example.com
+
+ # do not overload the servers (100 concurrent conns max each)
+ server srv1 192.0.2.1:80 cookie s1 maxconn 100 check inter 1s
+ server srv2 192.0.2.2:80 cookie s2 maxconn 100 check inter 1s
+ server srv3 192.0.2.3:80 cookie s3 maxconn 100 check inter 1s
+ server srv4 192.0.2.4:80 cookie s4 maxconn 100 check inter 1s
diff --git a/examples/content-sw-sample.cfg b/examples/content-sw-sample.cfg
new file mode 100644
index 0000000..e54f976
--- /dev/null
+++ b/examples/content-sw-sample.cfg
@@ -0,0 +1,65 @@
+#
+# This is a sample configuration. It illustrates how to separate static objects
+# traffic from dynamic traffic, and how to dynamically regulate the server load.
+#
+# It listens on 192.168.1.10:80, and directs all requests for Host 'img' or
+# URIs starting with /img or /css to a dedicated group of servers. URIs
+# starting with /admin/stats deliver the stats page.
+#
+
+global
+ maxconn 10000
+ stats socket /var/run/haproxy.stat mode 600 level admin
+ log 127.0.0.1 local0
+ uid 200
+ gid 200
+ chroot /var/empty
+ daemon
+
+# The public 'www' address in the DMZ
+frontend public
+ bind 192.168.1.10:80 name clear
+ #bind 192.168.1.10:443 ssl crt /etc/haproxy/haproxy.pem
+ mode http
+ log global
+ option httplog
+ option dontlognull
+ monitor-uri /monitoruri
+ maxconn 8000
+ timeout client 30s
+
+ stats uri /admin/stats
+ use_backend static if { hdr_beg(host) -i img }
+ use_backend static if { path_beg /img /css }
+ default_backend dynamic
+
+# The static backend backend for 'Host: img', /img and /css.
+backend static
+ mode http
+ balance roundrobin
+ option prefer-last-server
+ retries 2
+ option redispatch
+ timeout connect 5s
+ timeout server 5s
+ option httpchk HEAD /favicon.ico
+ server statsrv1 192.168.1.8:80 check inter 1000
+ server statsrv2 192.168.1.9:80 check inter 1000
+
+# the application servers go here
+backend dynamic
+ mode http
+ balance roundrobin
+ retries 2
+ option redispatch
+ timeout connect 5s
+ timeout server 30s
+ timeout queue 30s
+ option httpchk HEAD /login.php
+ cookie DYNSRV insert indirect nocache
+ fullconn 4000 # the servers will be used at full load above this number of connections
+ server dynsrv1 192.168.1.1:80 minconn 50 maxconn 500 cookie s1 check inter 1000
+ server dynsrv2 192.168.1.2:80 minconn 50 maxconn 500 cookie s2 check inter 1000
+ server dynsrv3 192.168.1.3:80 minconn 50 maxconn 500 cookie s3 check inter 1000
+ server dynsrv4 192.168.1.4:80 minconn 50 maxconn 500 cookie s4 check inter 1000
+
diff --git a/examples/errorfiles/400.http b/examples/errorfiles/400.http
new file mode 100644
index 0000000..e223e38
--- /dev/null
+++ b/examples/errorfiles/400.http
@@ -0,0 +1,9 @@
+HTTP/1.0 400 Bad request
+Cache-Control: no-cache
+Connection: close
+Content-Type: text/html
+
+<html><body><h1>400 Bad request</h1>
+Your browser sent an invalid request.
+</body></html>
+
diff --git a/examples/errorfiles/403.http b/examples/errorfiles/403.http
new file mode 100644
index 0000000..a67e807
--- /dev/null
+++ b/examples/errorfiles/403.http
@@ -0,0 +1,9 @@
+HTTP/1.0 403 Forbidden
+Cache-Control: no-cache
+Connection: close
+Content-Type: text/html
+
+<html><body><h1>403 Forbidden</h1>
+Request forbidden by administrative rules.
+</body></html>
+
diff --git a/examples/errorfiles/408.http b/examples/errorfiles/408.http
new file mode 100644
index 0000000..aafb130
--- /dev/null
+++ b/examples/errorfiles/408.http
@@ -0,0 +1,9 @@
+HTTP/1.0 408 Request Time-out
+Cache-Control: no-cache
+Connection: close
+Content-Type: text/html
+
+<html><body><h1>408 Request Time-out</h1>
+Your browser didn't send a complete request in time.
+</body></html>
+
diff --git a/examples/errorfiles/500.http b/examples/errorfiles/500.http
new file mode 100644
index 0000000..9c3be96
--- /dev/null
+++ b/examples/errorfiles/500.http
@@ -0,0 +1,9 @@
+HTTP/1.0 500 Internal Server Error
+Cache-Control: no-cache
+Connection: close
+Content-Type: text/html
+
+<html><body><h1>500 Internal Server Error</h1>
+An internal server error occurred.
+</body></html>
+
diff --git a/examples/errorfiles/502.http b/examples/errorfiles/502.http
new file mode 100644
index 0000000..94b35d4
--- /dev/null
+++ b/examples/errorfiles/502.http
@@ -0,0 +1,9 @@
+HTTP/1.0 502 Bad Gateway
+Cache-Control: no-cache
+Connection: close
+Content-Type: text/html
+
+<html><body><h1>502 Bad Gateway</h1>
+The server returned an invalid or incomplete response.
+</body></html>
+
diff --git a/examples/errorfiles/503.http b/examples/errorfiles/503.http
new file mode 100644
index 0000000..48fde58
--- /dev/null
+++ b/examples/errorfiles/503.http
@@ -0,0 +1,9 @@
+HTTP/1.0 503 Service Unavailable
+Cache-Control: no-cache
+Connection: close
+Content-Type: text/html
+
+<html><body><h1>503 Service Unavailable</h1>
+No server is available to handle this request.
+</body></html>
+
diff --git a/examples/errorfiles/504.http b/examples/errorfiles/504.http
new file mode 100644
index 0000000..f925184
--- /dev/null
+++ b/examples/errorfiles/504.http
@@ -0,0 +1,9 @@
+HTTP/1.0 504 Gateway Time-out
+Cache-Control: no-cache
+Connection: close
+Content-Type: text/html
+
+<html><body><h1>504 Gateway Time-out</h1>
+The server didn't respond in time.
+</body></html>
+
diff --git a/examples/errorfiles/README b/examples/errorfiles/README
new file mode 100644
index 0000000..a882632
--- /dev/null
+++ b/examples/errorfiles/README
@@ -0,0 +1,9 @@
+These files are default error files that can be customized
+if necessary. They are complete HTTP responses, so that
+everything is possible, including using redirects or setting
+special headers.
+
+They can be used with the 'errorfile' keyword like this :
+
+ errorfile 503 /etc/haproxy/errors/503.http
+
diff --git a/examples/haproxy.init b/examples/haproxy.init
new file mode 100644
index 0000000..cc120d8
--- /dev/null
+++ b/examples/haproxy.init
@@ -0,0 +1,137 @@
+#!/bin/sh
+#
+# chkconfig: - 85 15
+# description: HAProxy is a TCP/HTTP reverse proxy which is particularly suited \
+# for high availability environments.
+# processname: haproxy
+# config: /etc/haproxy/haproxy.cfg
+# pidfile: /var/run/haproxy.pid
+
+# Script Author: Simon Matter <simon.matter@invoca.ch>
+# Version: 2004060600
+
+# Source function library.
+if [ -f /etc/init.d/functions ]; then
+ . /etc/init.d/functions
+elif [ -f /etc/rc.d/init.d/functions ] ; then
+ . /etc/rc.d/init.d/functions
+else
+ exit 0
+fi
+
+# Source networking configuration.
+. /etc/sysconfig/network
+
+# Check that networking is up.
+[ ${NETWORKING} = "no" ] && exit 0
+
+# This is our service name
+BASENAME=`basename $0`
+if [ -L $0 ]; then
+ BASENAME=`find $0 -name $BASENAME -printf %l`
+ BASENAME=`basename $BASENAME`
+fi
+
+BIN=/usr/sbin/$BASENAME
+
+CFG=/etc/$BASENAME/$BASENAME.cfg
+[ -f $CFG ] || exit 1
+
+PIDFILE=/var/run/$BASENAME.pid
+LOCKFILE=/var/lock/subsys/$BASENAME
+
+RETVAL=0
+
+start() {
+ quiet_check
+ if [ $? -ne 0 ]; then
+ echo "Errors found in configuration file, check it with '$BASENAME check'."
+ return 1
+ fi
+
+ echo -n "Starting $BASENAME: "
+ daemon $BIN -D -f $CFG -p $PIDFILE
+ RETVAL=$?
+ echo
+ [ $RETVAL -eq 0 ] && touch $LOCKFILE
+ return $RETVAL
+}
+
+stop() {
+ echo -n "Shutting down $BASENAME: "
+ killproc $BASENAME -USR1
+ RETVAL=$?
+ echo
+ [ $RETVAL -eq 0 ] && rm -f $LOCKFILE
+ [ $RETVAL -eq 0 ] && rm -f $PIDFILE
+ return $RETVAL
+}
+
+restart() {
+ quiet_check
+ if [ $? -ne 0 ]; then
+ echo "Errors found in configuration file, check it with '$BASENAME check'."
+ return 1
+ fi
+ stop
+ start
+}
+
+reload() {
+ if ! [ -s $PIDFILE ]; then
+ return 0
+ fi
+
+ quiet_check
+ if [ $? -ne 0 ]; then
+ echo "Errors found in configuration file, check it with '$BASENAME check'."
+ return 1
+ fi
+ $BIN -D -f $CFG -p $PIDFILE -sf $(cat $PIDFILE)
+}
+
+check() {
+ $BIN -c -q -V -f $CFG
+}
+
+quiet_check() {
+ $BIN -c -q -f $CFG
+}
+
+rhstatus() {
+ status $BASENAME
+}
+
+condrestart() {
+ [ -e $LOCKFILE ] && restart || :
+}
+
+# See how we were called.
+case "$1" in
+ start)
+ start
+ ;;
+ stop)
+ stop
+ ;;
+ restart)
+ restart
+ ;;
+ reload)
+ reload
+ ;;
+ condrestart)
+ condrestart
+ ;;
+ status)
+ rhstatus
+ ;;
+ check)
+ check
+ ;;
+ *)
+ echo $"Usage: $BASENAME {start|stop|restart|reload|condrestart|status|check}"
+ exit 1
+esac
+
+exit $?
diff --git a/examples/lua/README b/examples/lua/README
new file mode 100644
index 0000000..620d832
--- /dev/null
+++ b/examples/lua/README
@@ -0,0 +1,7 @@
+These files are example lua scripts that can be customized
+if necessary.
+
+They can be loaded with the 'lua-load' keyword like this:
+
+ lua-load /path/to/lua_script.lua
+
diff --git a/examples/lua/event_handler.lua b/examples/lua/event_handler.lua
new file mode 100644
index 0000000..41ed712
--- /dev/null
+++ b/examples/lua/event_handler.lua
@@ -0,0 +1,28 @@
+-- haproxy event-handling from Lua
+--
+-- This file serves as a demo to show you the various events that
+-- can be handled directly from custom lua functions.
+-- Events captured from lua will be printed directly to STDOUT
+-- It may not be exhaustive, please refer to the lua documentation
+-- in doc/lua-api/index.rst for up-to-date content and further explanations
+
+-- subscribe to every SERVER family events, this is the equivalent of doing:
+-- core.event_sub({"SERVER_ADD", "SERVER_DEL", "SERVER_UP", "SERVER_DOWN"}, ...)
+core.event_sub({"SERVER"}, function(event, data)
+ -- This function will be called when:
+ -- - new server is added from the CLI (SERVER_ADD)
+ -- - existing server is removed from the CLI (SERVER_DEL)
+ -- - existing server state changes from UP to DOWN (SERVER_DOWN)
+ -- - existing server state changes from DOWN to UP (SERVER_UP)
+ -- If the server still exists at the time the function is called, data["reference"]
+ -- contains a valid reference to the lua server object related to the event
+ --
+ sv_status = data["reference"] ~= nil and data["reference"]:get_stats().status or "DELETED"
+ print("[DEBUG - FROM LUA]", "EventType." .. event .. ": " ..
+ "server " .. data["proxy_name"] .. "/" .. data["name"] .. " " ..
+ "is " .. sv_status)
+end)
+-- Please note that you may also use Server.event_sub() method to subscribe to events
+-- relative to a specific server only. See the lua documentation for more information.
+
+-- New event families will be added over time...
diff --git a/examples/lua/mailers.lua b/examples/lua/mailers.lua
new file mode 100644
index 0000000..47a8356
--- /dev/null
+++ b/examples/lua/mailers.lua
@@ -0,0 +1,426 @@
+-- haproxy mailers implementation in lua
+--
+-- Provides a pure lua alternative to tcpcheck mailers.
+--
+-- To be loaded using "lua-load" from haproxy configuration to handle
+-- email-alerts directly from lua and disable legacy tcpcheck implementation.
+
+local SYSLOG_LEVEL = {
+ ["EMERG"] = 0,
+ ["ALERT"] = 1,
+ ["CRIT"] = 2,
+ ["ERROR"] = 3,
+ ["WARN"] = 4,
+ ["NOTICE"] = 5,
+ ["INFO"] = 6,
+ ["DEBUG"] = 7
+}
+
+local mailqueue = core.queue()
+
+-- smtp : send SMTP message
+--
+-- Copyright 2018 Thierry Fournier
+--
+-- This function is compliant with HAProxy cosockets
+-- EHLO was replaced with HELO for better compatibility with
+-- basic mail server implementations
+--
+-- <server> should contain the full server address (including port) in the
+-- same format used in haproxy config file. It will be passed as it is to
+-- tcp::connect() without explicit port argument. See Socket.connect()
+-- manual for more information.
+--
+-- The function will abort after <timeout> ms
+function smtp_send_email(server, timeout, domain, from, to, data)
+ local ret
+ local reason
+ local tcp = core.tcp()
+ local smtp_wait_code = function(tcp, code)
+ local ret
+ -- Read headers until we reac a 2.. code.
+ while true do
+ -- read line
+ ret = tcp:receive("*l")
+ if ret == nil then
+ return false, "Connection unexpectedly closed"
+ end
+ -- expected code
+ if string.match(ret, code) ~= nil then
+ return true, nil
+ end
+ -- other code
+ if string.match(ret, '^%d%d%d ') ~= nil then
+ return false, ret
+ end
+ -- other informational message, wait.
+ end
+ end
+
+ if timeout ~= nil and timeout > 0 then
+ tcp:settimeout(timeout / 1000)
+ end
+
+ if tcp:connect(server) == nil then
+ return false, "Can't connect to \""..server.."\""
+ end
+
+ ret, reason = smtp_wait_code(tcp, '^220 ')
+ if ret == false then
+ tcp:close()
+ return false, reason
+ end
+
+ if tcp:send("HELO " .. domain .. "\r\n") == nil then
+ tcp:close()
+ return false, "Connection unexpectedly closed"
+ end
+
+ ret, reason = smtp_wait_code(tcp, '^250 ')
+ if ret == false then
+ tcp:close()
+ return false, reason
+ end
+
+ if tcp:send("MAIL FROM: <" .. from .. ">\r\n") == nil then
+ tcp:close()
+ return false, "Connection unexpectedly closed"
+ end
+
+ ret, reason = smtp_wait_code(tcp, '^250 ')
+ if ret == false then
+ tcp:close()
+ return false, reason
+ end
+
+ if tcp:send("RCPT TO: <" .. to .. ">\r\n") == nil then
+ tcp:close()
+ return false, "Connection unexpectedly closed"
+ end
+
+ ret, reason = smtp_wait_code(tcp, '^250 ')
+ if ret == false then
+ tcp:close()
+ return false, reason
+ end
+
+ if tcp:send("DATA\r\n") == nil then
+ tcp:close()
+ return false, "Connection unexpectedly closed"
+ end
+
+ ret, reason = smtp_wait_code(tcp, '^354 ')
+ if ret == false then
+ tcp:close()
+ return false, reason
+ end
+
+ if tcp:send(data .. "\r\n.\r\n") == nil then
+ tcp:close()
+ return false, "Connection unexpectedly closed"
+ end
+
+ ret, reason = smtp_wait_code(tcp, '^250 ')
+ if ret == false then
+ tcp:close()
+ return false, reason
+ end
+
+ if tcp:send("QUIT\r\n") == nil then
+ tcp:close()
+ return false, "Connection unexpectedly closed"
+ end
+
+ ret, reason = smtp_wait_code(tcp, '^221 ')
+ if ret == false then
+ tcp:close()
+ return false, reason
+ end
+
+ tcp:close()
+ return true, nil
+end
+
+local function send_email_alert(srv, level, message, when)
+ local mailers = srv:get_proxy():get_mailers()
+
+ if mailers == nil then
+ return -- nothing to do
+ end
+
+ if level > mailers.log_level then
+ return
+ end
+
+ -- email sending is performed asynchronously thanks to mailqueue
+ local job = {}
+
+ job.mailconf = mailers
+ job.when = when
+ job.msg = message
+
+ -- enqueue email job
+ mailqueue:push(job)
+
+end
+
+local function srv_get_check_details(check)
+ local c = core.concat()
+
+ c:add(", ")
+ c:add(string.format("reason: %s", check.reason.desc))
+ if check.reason.code ~= nil
+ then
+ c:add(string.format(", code: %d", check.reason.code))
+ end
+ if check.duration >= 0
+ then
+ c:add(string.format(", check duration: %dms", check.duration))
+ end
+
+ return c:dump()
+end
+
+local function srv_get_status_details(srv, requeued)
+ local c = core.concat()
+
+ c:add(string.format("%d active and %d backup servers left.",
+ srv:get_proxy():get_srv_act(),
+ srv:get_proxy():get_srv_bck()))
+ c:add(" ")
+ c:add(string.format("%d sessions active, %d requeued, %d remaining in queue",
+ srv:get_cur_sess(),
+ requeued,
+ srv:get_pend_conn()))
+ return c:dump()
+end
+
+local function srv_state_handler(event, data, when)
+ local server = data.reference
+ local state = data.state
+ local c = core.concat()
+ local log_level = SYSLOG_LEVEL["ALERT"]
+ local message
+
+ if server == nil then
+ -- server already removed, can't do much
+ return
+ end
+
+ if state.admin then
+ -- don't report if is related to an administrative change and not
+ -- directly due to an operational change
+ return
+ end
+
+ -- we don't send an alert if the server was previously stopping
+ if state.old_state == "STOPPING" or server:is_draining() then
+ log_level = SYSLOG_LEVEL["NOTICE"]
+ end
+
+ -- prepare the message
+ c:add(string.format("Server %s/%s is %s",
+ server:get_proxy():get_name(),
+ server:get_name(),
+ state.new_state == "RUNNING" and "UP" or "DOWN"))
+
+ if server:tracking()
+ then
+ -- server is tracking another server, it means that the operational
+ -- state change is inherited
+ c:add(string.format(" via %s/%s",
+ server:tracking():get_proxy():get_name(),
+ server:tracking():get_name()))
+ end
+
+ if state.check ~= nil
+ then
+ c:add(srv_get_check_details(state.check))
+ else
+ c:add(state.cause)
+ end
+
+ c:add(". ")
+ c:add(srv_get_status_details(server, state.requeued))
+ send_email_alert(server, log_level, c:dump(), when)
+end
+
+local function srv_admin_handler(event, data, when)
+ local server = data.reference
+ local admin = data.admin
+ local c = core.concat()
+
+ if server == nil then
+ -- server already removed, can't do much
+ return
+ end
+
+ -- only send an email when server is entering drain state and not under maint
+ if not (not admin.old_admin["DRAIN"] and
+ admin.new_admin["DRAIN"] and
+ not admin.new_admin["MAINT"]) then
+ return
+ end
+
+ -- prepare the message
+ c:add(string.format("Server %s/%s enters drain state",
+ server:get_proxy():get_name(),
+ server:get_name()))
+
+ if server:tracking() and admin.new_admin["IDRAIN"]
+ then
+ -- server is tracking another server and IDRAIN is set, it means
+ -- that the admin state change is inherited
+ c:add(string.format(" via %s/%s",
+ server:tracking():get_proxy():get_name(),
+ server:tracking():get_name()))
+ end
+
+ c:add(". ")
+ c:add(srv_get_status_details(server, admin.requeued))
+ send_email_alert(server, SYSLOG_LEVEL["NOTICE"], c:dump(), when)
+end
+
+local function srv_check_handler(event, data, when)
+ local server = data.reference
+ local check = data.check
+ local c = core.concat()
+
+ if server == nil then
+ -- server already removed, can't do much
+ return
+ end
+
+ -- we will always send an email, prepare the message
+ c:add(string.format("%s check for %sserver %s/%s ",
+ check.agent and "Agent" or "Health",
+ server:is_backup() and "backup " or "",
+ server:get_proxy():get_name(),
+ server:get_name()))
+
+ if check.result == "CONDPASS" then
+ c:add("conditionally succeeded")
+ elseif check.result == "PASSED" then
+ c:add("succeeded")
+ else
+ c:add("failed")
+ end
+
+ c:add(srv_get_check_details(check))
+
+ c:add(", status: ")
+ if check.health.cur >= check.health.rise then
+ -- good
+ c:add(string.format("%d/%d %s",
+ check.health.cur - check.health.rise + 1,
+ check.health.fall,
+ server:get_weight() and "UP" or "DRAIN"))
+ else
+ -- bad
+ c:add(string.format("%d/%d DOWN",
+ check.health.cur,
+ check.health.rise))
+ end
+
+ send_email_alert(server, SYSLOG_LEVEL["INFO"], c:dump(), when)
+end
+
+-- single function for multiple event types since all events come
+-- from the same subscription to reduce memory footprint
+local function srv_event_dispatch(event, data, mgmt, when)
+ if event == "SERVER_STATE" then srv_state_handler(event, data, when) end
+ if event == "SERVER_ADMIN" then srv_admin_handler(event, data, when) end
+ if event == "SERVER_CHECK" then srv_check_handler(event, data, when) end
+end
+
+local function mailers_track_server_events(srv)
+ local mailer_conf = srv:get_proxy():get_mailers()
+
+ -- don't track server events if the parent proxy did not enable email alerts
+ if mailer_conf == nil
+ then return
+ end
+
+ -- email alerts are enabled, track server state and admin changes
+ local subscriptions = {"SERVER_STATE", "SERVER_ADMIN"}
+
+ if mailer_conf.track_server_health
+ then
+ -- track server check events as well (this event source is expensive)
+ table.insert(subscriptions, "SERVER_CHECK")
+ end
+
+ -- perform the event subscription from the server
+ srv:event_sub(subscriptions, srv_event_dispatch)
+end
+
+local function srv_event_add(event, data)
+ -- do nothing if the server was already removed
+ if data.reference == nil
+ then return
+ end
+
+ -- server still exists, check if it can be tracked for email alerts
+ mailers_track_server_events(data.reference)
+end
+
+
+-- disable legacy email-alerts since email-alerts will be sent from lua directly
+core.disable_legacy_mailers()
+
+-- event subscriptions are purposely performed in an init function to prevent
+-- email alerts from being generated too early (when process is starting up)
+core.register_init(function()
+
+ -- do nothing if not on primary thread
+ -- this prevents emails from being sent multiple times when
+ -- lua-load-per-thread is used to load the script since the task
+ -- will be started on each haproxy thread
+ if core.thread > 1 then core.done() end
+
+ -- subscribe to SERVER_ADD to be notified when new servers are added
+ core.event_sub({"SERVER_ADD"}, srv_event_add)
+
+ -- loop through existing backends to detect existing servers
+ for backend_name, backend in pairs(core.backends) do
+ for srv_name, srv in pairs(backend.servers) do
+ mailers_track_server_events(srv)
+ end
+ end
+
+end)
+
+-- mail queue
+core.register_task(function()
+ while true
+ do
+ local job = mailqueue:pop_wait()
+
+ if job ~= nil then
+ local date = os.date("%a, %d %b %Y %T %z (%Z)", job.when)
+ local c = core.concat()
+
+ -- prepare email body
+ c:add(string.format("From: %s\r\n", job.mailconf.smtp_from))
+ c:add(string.format("To: %s\r\n", job.mailconf.smtp_to))
+ c:add(string.format("Date: %s\r\n", date))
+ c:add(string.format("Subject: [HAProxy Alert] %s\r\n", job.msg))
+ c:add("\r\n")
+ c:add(string.format("%s\r\n", job.msg))
+
+ -- send email to all mailservers
+ for name, mailsrv in pairs(job.mailconf.mailservers) do
+ -- finally, send email to server
+ local ret, reason = smtp_send_email(mailsrv,
+ job.mailconf.mailservers_timeout,
+ job.mailconf.smtp_hostname,
+ job.mailconf.smtp_from,
+ job.mailconf.smtp_to,
+ c:dump())
+ if ret == false then
+ core.Warning("Can't send email alert to ".. name .. ": " .. reason)
+ end
+ end
+ end
+ end
+end)
diff --git a/examples/option-http_proxy.cfg b/examples/option-http_proxy.cfg
new file mode 100644
index 0000000..8b28f67
--- /dev/null
+++ b/examples/option-http_proxy.cfg
@@ -0,0 +1,54 @@
+#
+# demo config for Proxy mode
+#
+
+global
+ maxconn 20000
+ ulimit-n 16384
+ log 127.0.0.1 local0
+ uid 200
+ gid 200
+ chroot /var/empty
+ daemon
+
+frontend test-proxy
+ bind 192.168.200.10:8080
+ mode http
+ log global
+ option httplog
+ option dontlognull
+ maxconn 8000
+ timeout client 30s
+
+ # layer3: Valid users
+ acl allow_host src 192.168.200.150/32
+ http-request deny if !allow_host
+
+ # layer7: prevent private network relaying
+ acl forbidden_dst url_ip 192.168.0.0/24
+ acl forbidden_dst url_ip 172.16.0.0/12
+ acl forbidden_dst url_ip 10.0.0.0/8
+ http-request deny if forbidden_dst
+
+ default_backend test-proxy-srv
+
+
+backend test-proxy-srv
+ mode http
+ timeout connect 5s
+ timeout server 5s
+ retries 2
+
+ # layer7: Only GET method is valid
+ acl valid_method method GET
+ http-request deny if !valid_method
+
+ # take IP address from URL's authority
+ # and drop scheme+authority from URI
+ http-request set-dst url_ip
+ http-request set-dst-port url_port
+ http-request set-uri %[pathq]
+ server next-hop 0.0.0.0
+
+ # layer7: protect bad reply
+ http-response deny if { res.hdr(content-type) audio/mp3 }
diff --git a/examples/quick-test.cfg b/examples/quick-test.cfg
new file mode 100644
index 0000000..f27eeff
--- /dev/null
+++ b/examples/quick-test.cfg
@@ -0,0 +1,29 @@
+# Basic config mapping a listening IP:port to another host's IP:port with
+# support for HTTP/1 and 2.
+
+global
+ strict-limits # refuse to start if insufficient FDs/memory
+ # add some process-wide tuning here if required
+
+ # A stats socket may be added to check live metrics if the load generators
+ # do not report them.
+ # stats socket /tmp/haproxy.sock level admin
+ # stats timeout 1h
+
+defaults
+ mode http
+ balance random # power-of-two-choices
+ timeout client 60s
+ timeout server 60s
+ timeout connect 1s
+
+listen p
+ # this is the address and port we'll listen to, the ones to aim the
+ # load generators at
+ bind :8000
+
+ # create a certificate and uncomment this for SSL
+ # bind :8443 ssl crt my-cert.pem alpn h2,http/1.1
+
+ # Put the server's IP address and port below
+ server s1 172.31.32.33:8000
diff --git a/examples/socks4.cfg b/examples/socks4.cfg
new file mode 100644
index 0000000..2cbd417
--- /dev/null
+++ b/examples/socks4.cfg
@@ -0,0 +1,55 @@
+global
+ log /dev/log local0
+ log /dev/log local1 notice
+ stats timeout 30s
+
+defaults
+ log global
+ mode http
+ option httplog
+ option dontlognull
+ timeout connect 5000
+ timeout client 50000
+ timeout server 50000
+
+listen SMTP-20025
+ bind 0.0.0.0:20025
+ mode tcp
+ option tcplog
+ maxconn 2000
+ timeout connect 5000
+ timeout client 50000
+ timeout server 50000
+ option tcp-check
+ server SMTPS1 192.0.2.1:25 check inter 30000 fastinter 1000
+ server SMTPS2_Via_SocksProxy1 192.0.2.2:25 socks4 127.0.0.1:1080 check-via-socks4 check inter 30000 fastinter 1000 backup
+
+listen SSL-20080
+ bind 0.0.0.0:20080
+ mode tcp
+ option tcplog
+ maxconn 2000
+ timeout connect 5000
+ timeout client 50000
+ timeout server 50000
+ option tcp-check
+ server HTTPS1_Via_SocksProxy1 192.0.2.1:443 ssl verify none socks4 127.0.0.1:1080 check inter 30000 fastinter 1000
+ server HTTPS2 192.0.2.2:443 ssl verify none check inter 30000 fastinter 1000 backup
+
+# HAProxy web ui
+listen stats
+ bind 0.0.0.0:20936
+ mode http
+ log global
+
+ maxconn 10
+ timeout client 100s
+ timeout server 100s
+ timeout connect 100s
+ timeout queue 100s
+
+ stats enable
+ stats uri /haproxy?stats
+ stats realm HAProxy\ Statistics
+ stats admin if TRUE
+ stats show-node
diff --git a/examples/transparent_proxy.cfg b/examples/transparent_proxy.cfg
new file mode 100644
index 0000000..a8cf6d9
--- /dev/null
+++ b/examples/transparent_proxy.cfg
@@ -0,0 +1,55 @@
+#
+# This is an example of how to configure HAProxy to be used as a 'full transparent proxy' for a single backend server.
+#
+# Note that to actually make this work extra firewall/nat rules are required.
+# Also HAProxy needs to be compiled with support for this, in HAProxy1.5-dev19 you can check if this is the case with "haproxy -vv".
+#
+
+global
+defaults
+ timeout client 30s
+ timeout server 30s
+ timeout connect 30s
+
+frontend MyFrontend
+ bind 192.168.1.22:80
+ default_backend TransparentBack_http
+
+backend TransparentBack_http
+ mode http
+ source 0.0.0.0 usesrc client
+ server MyWebServer 192.168.0.40:80
+
+#
+# To create the the nat rules perform the following:
+#
+# ### (FreeBSD 8) ###
+# --- Step 1 ---
+# ipfw is needed to get 'reply traffic' back to the HAProxy process, this can be achieved by configuring a rule like this:
+# fwd localhost tcp from 192.168.0.40 80 to any in recv em0
+#
+# The following would be even better but this did not seam to work on the pfSense2.1 distribution of FreeBSD 8.3:
+# fwd 127.0.0.1:80 tcp from any 80 to any in recv ${outside_iface} uid ${proxy_uid}
+#
+# If only 'pf' is currently used some additional steps are needed to load and configure ipfw:
+# You need to configure this to always run on startup:
+#
+# /sbin/kldload ipfw
+# /sbin/sysctl net.inet.ip.pfil.inbound="pf" net.inet6.ip6.pfil.inbound="pf" net.inet.ip.pfil.outbound="pf" net.inet6.ip6.pfil.outbound="pf"
+# /sbin/sysctl net.link.ether.ipfw=1
+# ipfw add 10 fwd localhost tcp from 192.168.0.40 80 to any in recv em0
+#
+# the above does the following:
+# - load the ipfw kernel module
+# - set pf as the outer firewall to keep control of routing packets for example to route them to a non-default gateway
+# - enable ipfw
+# - set a rule to catches reply traffic on em0 coming from the webserver
+#
+# --- Step 2 ---
+# To also make the client connection transparent its possible to redirect incoming requests to HAProxy with a pf rule:
+# rdr on em1 proto tcp from any to 192.168.0.40 port 80 -> 192.168.1.22
+# here em1 is the interface that faces the clients, and traffic that is originally send straight to the webserver is redirected to HAProxy
+#
+# ### (FreeBSD 9) (OpenBSD 4.4) ###
+# pf supports "divert-reply" which is probably better suited for the job above then ipfw..
+#
diff --git a/examples/wurfl-example.cfg b/examples/wurfl-example.cfg
new file mode 100644
index 0000000..52df68e
--- /dev/null
+++ b/examples/wurfl-example.cfg
@@ -0,0 +1,41 @@
+#
+# This is an example of how to configure HAProxy to be used with WURFL Device Detection module.
+#
+# HAProxy needs to be compiled with support for this. See README section 1.3
+#
+
+global
+
+ # The WURFL data file
+ wurfl-data-file /usr/share/wurfl/wurfl.zip
+
+ # WURFL patches definition (as much as needed, patches will be applied in the same order as specified in this conf file)
+ #wurfl-patch-file /path/to/patch1.xml;
+
+ #wurfl-cache-size 100000
+ ## no cache
+ #wurfl-cache-size 0
+
+ wurfl-information-list-separator |
+
+ # list of WURFL capabilities, virtual capabilities, property names planned to be used in injected headers
+ wurfl-information-list wurfl_id model_name
+
+defaults
+ mode http
+ timeout connect 30s
+ timeout client 30s
+ timeout server 30s
+
+frontend TheFrontend
+ bind 192.168.1.22:80
+ default_backend TheBackend
+
+ # inject a header called X-Wurfl-All with all the WURFL information listed in wurfl-information-list
+ http-request set-header X-Wurfl-All %[wurfl-get-all()]
+
+ # inject a header called X-WURFL-PROPERTIES with the "wurfl_id" information (should be listed in wurfl-information-list)
+ #http-request set-header X-WURFL-PROPERTIES %[wurfl-get(wurfl_id)]
+
+backend TheBackend
+ server TheWebServer 192.168.0.40:80
diff --git a/include/haproxy/acl-t.h b/include/haproxy/acl-t.h
new file mode 100644
index 0000000..34b7e40
--- /dev/null
+++ b/include/haproxy/acl-t.h
@@ -0,0 +1,160 @@
+/*
+ * include/haproxy/acl-t.h
+ * This file provides structures and types for ACLs.
+ *
+ * Copyright (C) 2000-2020 Willy Tarreau - w@1wt.eu
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_ACL_T_H
+#define _HAPROXY_ACL_T_H
+
+#include <haproxy/pattern-t.h>
+#include <haproxy/sample-t.h>
+
+/* ACL test result.
+ *
+ * We're using a 3-state matching system :
+ * - PASS : at least one pattern already matches
+ * - MISS : some data is missing to decide if some rules may finally match.
+ * - FAIL : no pattern may ever match
+ *
+ * We assign values 0, 1 and 3 to FAIL, MISS and PASS respectively, so that we
+ * can make use of standard arithmetic for the truth tables below :
+ *
+ * x | !x x&y | F(0) | M(1) | P(3) x|y | F(0) | M(1) | P(3)
+ * ------+----- -----+------+------+----- -----+------+------+-----
+ * F(0) | P(3) F(0)| F(0) | F(0) | F(0) F(0)| F(0) | M(1) | P(3)
+ * M(1) | M(1) M(1)| F(0) | M(1) | M(1) M(1)| M(1) | M(1) | P(3)
+ * P(3) | F(0) P(3)| F(0) | M(1) | P(3) P(3)| P(3) | P(3) | P(3)
+ *
+ * neg(x) = (3 >> x) and(x,y) = (x & y) or(x,y) = (x | y)
+ *
+ * For efficiency, the ACL return flags are directly mapped from the pattern
+ * match flags. See include/pattern.h for existing values.
+ */
+enum acl_test_res {
+ ACL_TEST_FAIL = 0, /* test failed */
+ ACL_TEST_MISS = 1, /* test may pass with more info */
+ ACL_TEST_PASS = 3, /* test passed */
+};
+
+/* Condition polarity. It makes it easier for any option to choose between
+ * IF/UNLESS if it can store that information within the condition itself.
+ * Those should be interpreted as "IF/UNLESS result == PASS".
+ */
+enum acl_cond_pol {
+ ACL_COND_NONE, /* no polarity set yet */
+ ACL_COND_IF, /* positive condition (after 'if') */
+ ACL_COND_UNLESS, /* negative condition (after 'unless') */
+};
+
+/*
+ * ACL keyword: Associates keywords with parsers, methods to retrieve the value and testers.
+ */
+/*
+ * NOTE:
+ * The 'parse' function is called to parse words in the configuration. It must
+ * return the number of valid words read. 0 = error. The 'opaque' argument may
+ * be used by functions which need to maintain a context between consecutive
+ * values. It is initialized to zero before the first call, and passed along
+ * successive calls.
+ */
+
+struct acl_expr;
+struct acl_keyword {
+ const char *kw;
+ char *fetch_kw;
+ int match_type; /* Contain PAT_MATCH_* */
+ int (*parse)(const char *text, struct pattern *pattern, int flags, char **err);
+ int (*index)(struct pattern_expr *expr, struct pattern *pattern, char **err);
+ void (*delete)(struct pat_ref *, struct pat_ref_elt *);
+ void (*prune)(struct pattern_expr *expr);
+ struct pattern *(*match)(struct sample *smp, struct pattern_expr *expr, int fill);
+ /* must be after the config params */
+ struct sample_fetch *smp; /* the sample fetch we depend on */
+};
+
+/*
+ * A keyword list. It is a NULL-terminated array of keywords. It embeds a
+ * struct list in order to be linked to other lists, allowing it to easily
+ * be declared where it is needed, and linked without duplicating data nor
+ * allocating memory.
+ */
+struct acl_kw_list {
+ struct list list;
+ struct acl_keyword kw[VAR_ARRAY];
+};
+
+/*
+ * Description of an ACL expression.
+ * The expression is part of a list. It contains pointers to the keyword, the
+ * sample fetch descriptor which defaults to the keyword's, and the associated
+ * pattern matching. The structure is organized so that the hot parts are
+ * grouped together in order to optimize caching.
+ */
+struct acl_expr {
+ struct sample_expr *smp; /* the sample expression we depend on */
+ struct pattern_head pat; /* the pattern matching expression */
+ struct list list; /* chaining */
+ const char *kw; /* points to the ACL kw's name or fetch's name (must not free) */
+};
+
+/* The acl will be linked to from the proxy where it is declared */
+struct acl {
+ struct list list; /* chaining */
+ char *name; /* acl name */
+ struct list expr; /* list of acl_exprs */
+ unsigned int use; /* or'ed bit mask of all acl_expr's SMP_USE_* */
+ unsigned int val; /* or'ed bit mask of all acl_expr's SMP_VAL_* */
+};
+
+/* the condition will be linked to from an action in a proxy */
+struct acl_term {
+ struct list list; /* chaining */
+ struct acl *acl; /* acl pointed to by this term */
+ int neg; /* 1 if the ACL result must be negated */
+};
+
+struct acl_term_suite {
+ struct list list; /* chaining of term suites */
+ struct list terms; /* list of acl_terms */
+};
+
+struct acl_cond {
+ struct list list; /* Some specific tests may use multiple conditions */
+ struct list suites; /* list of acl_term_suites */
+ enum acl_cond_pol pol; /* polarity: ACL_COND_IF / ACL_COND_UNLESS */
+ unsigned int use; /* or'ed bit mask of all suites's SMP_USE_* */
+ unsigned int val; /* or'ed bit mask of all suites's SMP_VAL_* */
+ const char *file; /* config file where the condition is declared */
+ int line; /* line in the config file where the condition is declared */
+};
+
+struct acl_sample {
+ struct acl_cond cond;
+ struct acl_term_suite suite;
+ struct acl_term terms[];
+};
+
+#endif /* _HAPROXY_ACL_T_H */
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/include/haproxy/acl.h b/include/haproxy/acl.h
new file mode 100644
index 0000000..38b1739
--- /dev/null
+++ b/include/haproxy/acl.h
@@ -0,0 +1,157 @@
+/*
+ * include/haproxy/acl.h
+ * This file provides interface definitions for ACL manipulation.
+ *
+ * Copyright (C) 2000-2013 Willy Tarreau - w@1wt.eu
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_ACL_H
+#define _HAPROXY_ACL_H
+
+#include <haproxy/acl-t.h>
+#include <haproxy/api.h>
+#include <haproxy/arg-t.h>
+
+struct stream;
+
+/*
+ * FIXME: we need destructor functions too !
+ */
+
+/* Negate an acl result. This turns (ACL_MATCH_FAIL, ACL_MATCH_MISS,
+ * ACL_MATCH_PASS) into (ACL_MATCH_PASS, ACL_MATCH_MISS, ACL_MATCH_FAIL).
+ */
+static inline enum acl_test_res acl_neg(enum acl_test_res res)
+{
+ return (3 >> res);
+}
+
+/* Convert an acl result to a boolean. Only ACL_MATCH_PASS returns 1. */
+static inline int acl_pass(enum acl_test_res res)
+{
+ return (res >> 1);
+}
+
+/* Return a pointer to the ACL <name> within the list starting at <head>, or
+ * NULL if not found.
+ */
+struct acl *find_acl_by_name(const char *name, struct list *head);
+
+/* Return a pointer to the ACL keyword <kw> within the list starting at <head>,
+ * or NULL if not found. Note that if <kw> contains an opening parenthesis,
+ * only the left part of it is checked.
+ */
+struct acl_keyword *find_acl_kw(const char *kw);
+
+/* Parse an ACL expression starting at <args>[0], and return it.
+ * Right now, the only accepted syntax is :
+ * <subject> [<value>...]
+ */
+struct acl_expr *parse_acl_expr(const char **args, char **err, struct arg_list *al, const char *file, int line);
+
+/* Purge everything in the acl <acl>, then return <acl>. */
+struct acl *prune_acl(struct acl *acl);
+
+/* Parse an ACL with the name starting at <args>[0], and with a list of already
+ * known ACLs in <acl>. If the ACL was not in the list, it will be added.
+ * A pointer to that ACL is returned.
+ *
+ * args syntax: <aclname> <acl_expr>
+ */
+struct acl *parse_acl(const char **args, struct list *known_acl, char **err, struct arg_list *al, const char *file, int line);
+
+/* Parse an ACL condition starting at <args>[0], relying on a list of already
+ * known ACLs passed in <known_acl>. The new condition is returned (or NULL in
+ * case of low memory). Supports multiple conditions separated by "or".
+ */
+struct acl_cond *parse_acl_cond(const char **args, struct list *known_acl,
+ enum acl_cond_pol pol, char **err, struct arg_list *al,
+ const char *file, int line);
+
+/* Builds an ACL condition starting at the if/unless keyword. The complete
+ * condition is returned. NULL is returned in case of error or if the first
+ * word is neither "if" nor "unless". It automatically sets the file name and
+ * the line number in the condition for better error reporting, and sets the
+ * HTTP initialization requirements in the proxy. If <err> is not NULL, it will
+ * be set to an error message upon errors, that the caller will have to free.
+ */
+struct acl_cond *build_acl_cond(const char *file, int line, struct list *known_acl,
+ struct proxy *px, const char **args, char **err);
+
+/* Execute condition <cond> and return either ACL_TEST_FAIL, ACL_TEST_MISS or
+ * ACL_TEST_PASS depending on the test results. ACL_TEST_MISS may only be
+ * returned if <opt> does not contain SMP_OPT_FINAL, indicating that incomplete
+ * data is being examined. The function automatically sets SMP_OPT_ITERATE. This
+ * function only computes the condition, it does not apply the polarity required
+ * by IF/UNLESS, it's up to the caller to do this.
+ */
+enum acl_test_res acl_exec_cond(struct acl_cond *cond, struct proxy *px, struct session *sess, struct stream *strm, unsigned int opt);
+
+/* Returns a pointer to the first ACL conflicting with usage at place <where>
+ * which is one of the SMP_VAL_* bits indicating a check place, or NULL if
+ * no conflict is found. Only full conflicts are detected (ACL is not usable).
+ * Use the next function to check for useless keywords.
+ */
+const struct acl *acl_cond_conflicts(const struct acl_cond *cond, unsigned int where);
+
+/* Returns a pointer to the first ACL and its first keyword to conflict with
+ * usage at place <where> which is one of the SMP_VAL_* bits indicating a check
+ * place. Returns true if a conflict is found, with <acl> and <kw> set (if non
+ * null), or false if not conflict is found. The first useless keyword is
+ * returned.
+ */
+int acl_cond_kw_conflicts(const struct acl_cond *cond, unsigned int where, struct acl const **acl, char const **kw);
+
+/*
+ * Find targets for userlist and groups in acl. Function returns the number
+ * of errors or OK if everything is fine.
+ */
+int acl_find_targets(struct proxy *p);
+
+/* Return a pointer to the ACL <name> within the list starting at <head>, or
+ * NULL if not found.
+ */
+struct acl *find_acl_by_name(const char *name, struct list *head);
+
+/*
+ * Registers the ACL keyword list <kwl> as a list of valid keywords for next
+ * parsing sessions.
+ */
+void acl_register_keywords(struct acl_kw_list *kwl);
+
+/*
+ * Unregisters the ACL keyword list <kwl> from the list of valid keywords.
+ */
+void acl_unregister_keywords(struct acl_kw_list *kwl);
+
+/* initializes ACLs by resolving the sample fetch names they rely upon.
+ * Returns 0 on success, otherwise an error.
+ */
+int init_acl(void);
+
+void acl_dump_kwd(void);
+
+void free_acl_cond(struct acl_cond *cond);
+
+#endif /* _HAPROXY_ACL_H */
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/include/haproxy/action-t.h b/include/haproxy/action-t.h
new file mode 100644
index 0000000..f77bdce
--- /dev/null
+++ b/include/haproxy/action-t.h
@@ -0,0 +1,217 @@
+/*
+ * include/haproxy/action-t.h
+ * This file contains actions definitions.
+ *
+ * Copyright (C) 2000-2010 Willy Tarreau - w@1wt.eu
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_ACTION_T_H
+#define _HAPROXY_ACTION_T_H
+
+#include <haproxy/applet-t.h>
+#include <haproxy/stick_table-t.h>
+#include <haproxy/vars-t.h>
+
+struct session;
+struct stream;
+struct proxy;
+
+enum act_from {
+ ACT_F_TCP_REQ_CON, /* tcp-request connection */
+ ACT_F_TCP_REQ_SES, /* tcp-request session */
+ ACT_F_TCP_REQ_CNT, /* tcp-request content */
+ ACT_F_TCP_RES_CNT, /* tcp-response content */
+ ACT_F_HTTP_REQ, /* http-request */
+ ACT_F_HTTP_RES, /* http-response */
+ ACT_F_TCP_CHK, /* tcp-check. */
+ ACT_F_CFG_PARSER, /* config parser */
+ ACT_F_CLI_PARSER, /* command line parser */
+};
+
+enum act_return {
+ ACT_RET_CONT, /* continue processing. */
+ ACT_RET_STOP, /* stop processing. */
+ ACT_RET_YIELD, /* call me again. */
+ ACT_RET_ERR, /* internal processing error. */
+ ACT_RET_DONE, /* processing done, stop processing */
+ ACT_RET_DENY, /* deny, must be handled by the caller */
+ ACT_RET_ABRT, /* abort, handled by action itsleft. */
+ ACT_RET_INV, /* invalid request/response */
+};
+
+enum act_parse_ret {
+ ACT_RET_PRS_OK, /* continue processing. */
+ ACT_RET_PRS_ERR, /* abort processing. */
+};
+
+/* Option flags passed to custom actions */
+enum act_opt {
+ ACT_OPT_NONE = 0x00000000, /* no flag */
+ ACT_OPT_FINAL = 0x00000001, /* last call, cannot yield */
+ ACT_OPT_FIRST = 0x00000002, /* first call for this action */
+};
+
+/* Flags used to describe the action. */
+enum act_flag {
+ ACT_FLAG_FINAL = 1 << 0, /* the action stops the rules evaluation when executed */
+};
+
+
+/* known actions to be used without any action function pointer. This enum is
+ * typically used in a switch case, if and only if .action_ptr is undefined. So
+ * if an action function is defined for one of following action types, the
+ * function have the priority over the switch.
+ */
+enum act_name {
+ ACT_CUSTOM = 0,
+
+ /* common action */
+ ACT_ACTION_ALLOW,
+ ACT_ACTION_DENY,
+
+ /* common http actions .*/
+ ACT_HTTP_REDIR,
+
+ /* http request actions. */
+ ACT_HTTP_REQ_TARPIT,
+
+ /* tcp actions */
+ ACT_TCP_EXPECT_PX,
+ ACT_TCP_EXPECT_CIP,
+ ACT_TCP_CLOSE, /* close at the sender's */
+};
+
+/* Timeout name valid for a set-timeout rule */
+enum act_timeout_name {
+ ACT_TIMEOUT_SERVER,
+ ACT_TIMEOUT_TUNNEL,
+ ACT_TIMEOUT_CLIENT,
+};
+
+enum act_normalize_uri {
+ ACT_NORMALIZE_URI_PATH_MERGE_SLASHES,
+ ACT_NORMALIZE_URI_PATH_STRIP_DOT,
+ ACT_NORMALIZE_URI_PATH_STRIP_DOTDOT,
+ ACT_NORMALIZE_URI_PATH_STRIP_DOTDOT_FULL,
+ ACT_NORMALIZE_URI_QUERY_SORT_BY_NAME,
+ ACT_NORMALIZE_URI_PERCENT_TO_UPPERCASE,
+ ACT_NORMALIZE_URI_PERCENT_TO_UPPERCASE_STRICT,
+ ACT_NORMALIZE_URI_PERCENT_DECODE_UNRESERVED,
+ ACT_NORMALIZE_URI_PERCENT_DECODE_UNRESERVED_STRICT,
+ ACT_NORMALIZE_URI_FRAGMENT_STRIP,
+ ACT_NORMALIZE_URI_FRAGMENT_ENCODE,
+};
+
+/* NOTE: if <.action_ptr> is defined, the referenced function will always be
+ * called regardless the action type. */
+struct act_rule {
+ struct list list;
+ struct acl_cond *cond; /* acl condition to meet */
+ unsigned int action; /* ACT_* or any meaningful value if action_ptr is defined */
+ unsigned int flags; /* ACT_FLAG_* */
+ enum act_from from; /* ACT_F_* */
+ enum act_return (*action_ptr)(struct act_rule *rule, struct proxy *px, /* ptr to custom action */
+ struct session *sess, struct stream *s, int opts);
+ int (*check_ptr)(struct act_rule *rule, struct proxy *px, char **err); /* ptr to check function */
+ void (*release_ptr)(struct act_rule *rule); /* ptr to release function */
+ const struct action_kw *kw;
+ struct applet applet; /* used for the applet registration. */
+ union {
+ struct {
+ struct sample_expr *expr;
+ char *varname;
+ char *resolvers_id;
+ struct resolvers *resolvers;
+ struct resolv_options *opts;
+ } resolv; /* resolving */
+ struct {
+ int i; /* integer param (status, nice, loglevel, ..) */
+ struct ist str; /* string param (reason, header name, ...) */
+ struct list fmt; /* log-format compatible expression */
+ struct my_regex *re; /* used by replace-header/value/uri/path */
+ } http; /* args used by some HTTP rules */
+ struct http_reply *http_reply; /* HTTP response to be used by return/deny/tarpit rules */
+ struct redirect_rule *redir; /* redirect rule or "http-request redirect" */
+ struct {
+ char *ref; /* MAP or ACL file name to update */
+ struct list key; /* pattern to retrieve MAP or ACL key */
+ struct list value; /* pattern to retrieve MAP value */
+ } map;
+ struct sample_expr *expr;
+ struct {
+ struct sample_expr *expr; /* expression used as the key */
+ struct cap_hdr *hdr; /* the capture storage */
+ } cap;
+ struct {
+ struct sample_expr *expr;
+ int idx;
+ } capid;
+ struct {
+ int value; /* plain timeout value in ms if no expr is used */
+ enum act_timeout_name type; /* timeout type */
+ struct sample_expr *expr; /* timeout value as an expression */
+ } timeout;
+ struct hlua_rule *hlua_rule;
+ struct {
+ struct list fmt; /* log-format compatible expression */
+ struct sample_expr *expr;
+ uint64_t name_hash;
+ enum vars_scope scope;
+ uint conditions; /* Bitfield of the conditions passed to this set-var call */
+ } vars;
+ struct {
+ int sc;
+ unsigned int idx;
+ long long int value;
+ struct sample_expr *expr;
+ } gpc;
+ struct {
+ int sc;
+ unsigned int idx;
+ long long int value;
+ struct sample_expr *expr;
+ } gpt;
+ struct track_ctr_prm trk_ctr;
+ struct {
+ char *srvname; /* server name from config parsing. */
+ struct server *srv; /* target server to attach the connection */
+ struct sample_expr *name; /* used to differentiate idle connections */
+ } attach_srv; /* 'attach-srv' rule */
+ struct {
+ void *p[4];
+ } act; /* generic pointers to be used by custom actions */
+ } arg; /* arguments used by some actions */
+ struct {
+ char *file; /* file name where the rule appears (or NULL) */
+ int line; /* line number where the rule appears */
+ } conf;
+};
+
+struct action_kw {
+ const char *kw;
+ enum act_parse_ret (*parse)(const char **args, int *cur_arg, struct proxy *px,
+ struct act_rule *rule, char **err);
+ int flags;
+ void *private;
+};
+
+struct action_kw_list {
+ struct list list;
+ struct action_kw kw[VAR_ARRAY];
+};
+
+#endif /* _HAPROXY_ACTION_T_H */
diff --git a/include/haproxy/action.h b/include/haproxy/action.h
new file mode 100644
index 0000000..dba1408
--- /dev/null
+++ b/include/haproxy/action.h
@@ -0,0 +1,124 @@
+/*
+ * include/haproxy/action.h
+ * This file contains actions prototypes.
+ *
+ * Copyright (C) 2000-2010 Willy Tarreau - w@1wt.eu
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_ACTION_H
+#define _HAPROXY_ACTION_H
+
+#include <stdio.h>
+#include <haproxy/action-t.h>
+#include <haproxy/cfgparse.h>
+#include <haproxy/list.h>
+#include <haproxy/sample.h>
+
+struct resolv_requester;
+struct dns_counters;
+
+int act_resolution_cb(struct resolv_requester *requester, struct dns_counters *counters);
+int act_resolution_error_cb(struct resolv_requester *requester, int error_code);
+const char *action_suggest(const char *word, const struct list *keywords, const char **extra);
+void free_act_rule(struct act_rule *rule);
+
+static inline struct action_kw *action_lookup(struct list *keywords, const char *kw)
+{
+ struct action_kw_list *kw_list;
+ struct action_kw *best = NULL;
+ int len, bestlen = 0;
+ int i;
+
+ if (LIST_ISEMPTY(keywords))
+ return NULL;
+
+ list_for_each_entry(kw_list, keywords, list) {
+ for (i = 0; kw_list->kw[i].kw != NULL; i++) {
+ if ((kw_list->kw[i].flags & KWF_MATCH_PREFIX) &&
+ (len = strlen(kw_list->kw[i].kw)) > bestlen &&
+ strncmp(kw, kw_list->kw[i].kw, len) == 0) {
+ if (len > bestlen) {
+ bestlen = len;
+ best = &kw_list->kw[i];
+ }
+ }
+ if (strcmp(kw, kw_list->kw[i].kw) == 0)
+ return &kw_list->kw[i];
+ }
+ }
+ return best;
+}
+
+static inline void action_build_list(struct list *keywords,
+ struct buffer *chk)
+{
+ struct action_kw_list *kw_list;
+ int i;
+ char *p;
+ char *end;
+ int l;
+
+ p = chk->area;
+ end = p + chk->size - 1;
+ list_for_each_entry(kw_list, keywords, list) {
+ for (i = 0; kw_list->kw[i].kw != NULL; i++) {
+ l = snprintf(p, end - p, "'%s%s', ", kw_list->kw[i].kw, (kw_list->kw[i].flags & KWF_MATCH_PREFIX) ? "(*)" : "");
+ if (l > end - p)
+ continue;
+ p += l;
+ }
+ }
+ if (p > chk->area)
+ *(p-2) = '\0';
+ else
+ *p = '\0';
+}
+
+/* Check an action ruleset validity. It returns the number of error encountered
+ * and err_code is updated if a warning is emitted.
+ */
+int check_action_rules(struct list *rules, struct proxy *px, int *err_code);
+
+/* Find and check the target table used by an action track-sc*. This
+ * function should be called during the configuration validity check.
+ *
+ * The function returns 1 in success case, otherwise, it returns 0 and err is
+ * filled.
+ */
+int check_trk_action(struct act_rule *rule, struct proxy *px, char **err);
+
+/* check a capture rule. This function should be called during the configuration
+ * validity check.
+ *
+ * The function returns 1 in success case, otherwise, it returns 0 and err is
+ * filled.
+ */
+int check_capture(struct act_rule *rule, struct proxy *px, char **err);
+
+int cfg_parse_rule_set_timeout(const char **args, int idx, struct act_rule *rule,
+ struct proxy *px, char **err);
+
+static inline void release_timeout_action(struct act_rule *rule)
+{
+ release_sample_expr(rule->arg.timeout.expr);
+}
+
+struct act_rule *new_act_rule(enum act_from from, const char *file, int linenum);
+void free_act_rules(struct list *rules);
+void dump_act_rules(const struct list *rules, const char *pfx);
+
+#endif /* _HAPROXY_ACTION_H */
diff --git a/include/haproxy/activity-t.h b/include/haproxy/activity-t.h
new file mode 100644
index 0000000..9faeecd
--- /dev/null
+++ b/include/haproxy/activity-t.h
@@ -0,0 +1,144 @@
+/*
+ * include/haproxy/activity-t.h
+ * This file contains structure declarations for activity measurements.
+ *
+ * Copyright (C) 2000-2020 Willy Tarreau - w@1wt.eu
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_ACTIVITY_T_H
+#define _HAPROXY_ACTIVITY_T_H
+
+#include <haproxy/api-t.h>
+#include <haproxy/freq_ctr-t.h>
+
+/* bit fields for the "profiling" global variable */
+#define HA_PROF_TASKS_OFF 0x00000000 /* per-task CPU profiling forced disabled */
+#define HA_PROF_TASKS_AOFF 0x00000001 /* per-task CPU profiling off (automatic) */
+#define HA_PROF_TASKS_AON 0x00000002 /* per-task CPU profiling on (automatic) */
+#define HA_PROF_TASKS_ON 0x00000003 /* per-task CPU profiling forced enabled */
+#define HA_PROF_TASKS_MASK 0x00000003 /* per-task CPU profiling mask */
+
+#define HA_PROF_MEMORY 0x00000004 /* memory profiling */
+
+
+#ifdef USE_MEMORY_PROFILING
+/* Elements used by memory profiling. This determines the number of buckets to
+ * store stats.
+ */
+#define MEMPROF_HASH_BITS 10
+#define MEMPROF_HASH_BUCKETS (1U << MEMPROF_HASH_BITS)
+
+enum memprof_method {
+ MEMPROF_METH_UNKNOWN = 0,
+ MEMPROF_METH_MALLOC,
+ MEMPROF_METH_CALLOC,
+ MEMPROF_METH_REALLOC,
+ MEMPROF_METH_FREE,
+ MEMPROF_METH_P_ALLOC, // pool_alloc()
+ MEMPROF_METH_P_FREE, // pool_free()
+ MEMPROF_METH_METHODS /* count, must be last */
+};
+
+/* stats:
+ * - malloc increases alloc
+ * - free increases free (if non null)
+ * - realloc increases either depending on the size change.
+ * when the real size is known (malloc_usable_size()), it's used in free_tot
+ * and alloc_tot, otherwise the requested size is reported in alloc_tot and
+ * zero in free_tot.
+ */
+struct memprof_stats {
+ const void *caller;
+ enum memprof_method method;
+ /* 4-7 bytes hole here */
+ unsigned long long alloc_calls;
+ unsigned long long free_calls;
+ unsigned long long alloc_tot;
+ unsigned long long free_tot;
+ void *info; // for pools, ptr to the pool
+ void *pad; // pad to 64
+};
+#endif
+
+/* per-thread activity reports. It's important that it's aligned on cache lines
+ * because some elements will be updated very often. Most counters are OK on
+ * 32-bit since this will be used during debugging sessions for troubleshooting
+ * in iterative mode.
+ */
+struct activity {
+ unsigned int loops; // complete loops in run_poll_loop()
+ unsigned int wake_tasks; // active tasks prevented poll() from sleeping
+ unsigned int wake_signal; // pending signal prevented poll() from sleeping
+ unsigned int poll_io; // number of times poll() reported I/O events
+ unsigned int poll_exp; // number of times poll() sees an expired timeout (includes wake_*)
+ unsigned int poll_drop_fd; // poller dropped a dead FD from the update list
+ unsigned int poll_skip_fd; // poller skipped another thread's FD
+ unsigned int conn_dead; // conn_fd_handler woke up on an FD indicating a dead connection
+ unsigned int stream_calls; // calls to process_stream()
+ unsigned int ctxsw; // total number of context switches
+ unsigned int tasksw; // total number of task switches
+ unsigned int empty_rq; // calls to process_runnable_tasks() with nothing for the thread
+ unsigned int long_rq; // process_runnable_tasks() left with tasks in the run queue
+ unsigned int cpust_total; // sum of half-ms stolen per thread
+ unsigned int fd_takeover; // number of times this thread stole another one's FD
+ unsigned int check_adopted;// number of times a check was migrated to this thread
+ ALWAYS_ALIGN(64);
+
+ struct freq_ctr cpust_1s; // avg amount of half-ms stolen over last second
+ struct freq_ctr cpust_15s; // avg amount of half-ms stolen over last 15s
+ unsigned int avg_loop_us; // average run time per loop over last 1024 runs
+ unsigned int accepted; // accepted incoming connections
+ unsigned int accq_pushed; // accept queue connections pushed
+ unsigned int accq_full; // accept queue connection not pushed because full
+ unsigned int pool_fail; // failed a pool allocation
+ unsigned int buf_wait; // waited on a buffer allocation
+ unsigned int check_started;// number of times a check was started on this thread
+#if defined(DEBUG_DEV)
+ /* keep these ones at the end */
+ unsigned int ctr0; // general purposee debug counter
+ unsigned int ctr1; // general purposee debug counter
+ unsigned int ctr2; // general purposee debug counter
+#endif
+ char __pad[0]; // unused except to check remaining room
+ char __end[0] __attribute__((aligned(64))); // align size to 64.
+};
+
+/* 256 entries for callers * callees should be highly sufficient (~45 seen usually) */
+#define SCHED_ACT_HASH_BITS 8
+#define SCHED_ACT_HASH_BUCKETS (1U << SCHED_ACT_HASH_BITS)
+
+/* global profiling stats from the scheduler: each entry corresponds to a
+ * task or tasklet ->process function pointer, with a number of calls and
+ * a total time. Each entry is unique, except entry 0 which is for colliding
+ * hashes (i.e. others). All of these must be accessed atomically.
+ */
+struct sched_activity {
+ const void *func;
+ const struct ha_caller *caller;
+ uint64_t calls;
+ uint64_t cpu_time;
+ uint64_t lat_time;
+};
+
+#endif /* _HAPROXY_ACTIVITY_T_H */
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/include/haproxy/activity.h b/include/haproxy/activity.h
new file mode 100644
index 0000000..dbc8ec3
--- /dev/null
+++ b/include/haproxy/activity.h
@@ -0,0 +1,47 @@
+/*
+ * include/haproxy/activity.h
+ * This file contains macros and inline functions for activity measurements.
+ *
+ * Copyright (C) 2000-2020 Willy Tarreau - w@1wt.eu
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_ACTIVITY_H
+#define _HAPROXY_ACTIVITY_H
+
+#include <haproxy/activity-t.h>
+#include <haproxy/api.h>
+
+extern unsigned int profiling;
+extern struct activity activity[MAX_THREADS];
+extern struct sched_activity sched_activity[SCHED_ACT_HASH_BUCKETS];
+
+void report_stolen_time(uint64_t stolen);
+void activity_count_runtime(uint32_t run_time);
+struct sched_activity *sched_activity_entry(struct sched_activity *array, const void *func, const void *caller);
+
+#ifdef USE_MEMORY_PROFILING
+struct memprof_stats *memprof_get_bin(const void *ra, enum memprof_method meth);
+#endif
+
+#endif /* _HAPROXY_ACTIVITY_H */
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/include/haproxy/api-t.h b/include/haproxy/api-t.h
new file mode 100644
index 0000000..edb33a8
--- /dev/null
+++ b/include/haproxy/api-t.h
@@ -0,0 +1,40 @@
+/*
+ * include/haproxy/api-t.h
+ * This provides definitions for all common types or type modifiers used
+ * everywhere in the code, and suitable for use in structure fields.
+ *
+ * Copyright (C) 2020 Willy Tarreau - w@1wt.eu
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _HAPROXY_TYPES_H
+#define _HAPROXY_TYPES_H
+
+#include <inttypes.h>
+#include <stddef.h>
+
+#include <haproxy/compat.h>
+#include <haproxy/compiler.h>
+#include <haproxy/defaults.h>
+#include <haproxy/list-t.h>
+
+#endif /* _HAPROXY_TYPES_H */
diff --git a/include/haproxy/api.h b/include/haproxy/api.h
new file mode 100644
index 0000000..a0bb6a8
--- /dev/null
+++ b/include/haproxy/api.h
@@ -0,0 +1,38 @@
+/*
+ * include/haproxy/api.h
+ *
+ * Include wrapper that assembles all includes required by every haproxy file.
+ * Please do not add direct definitions into this file.
+ *
+ * Copyright (C) 2020 Willy Tarreau - w@1wt.eu
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _HAPROXY_BASE_H
+#define _HAPROXY_BASE_H
+
+#include <haproxy/api-t.h>
+#include <haproxy/atomic.h>
+#include <haproxy/bug.h>
+#include <haproxy/init.h>
+
+#endif
diff --git a/include/haproxy/applet-t.h b/include/haproxy/applet-t.h
new file mode 100644
index 0000000..bd96403
--- /dev/null
+++ b/include/haproxy/applet-t.h
@@ -0,0 +1,101 @@
+/*
+ * include/haproxy/applet-t.h
+ * This file describes the applet struct and associated constants.
+ *
+ * Copyright (C) 2000-2020 Willy Tarreau - w@1wt.eu
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_APPLET_T_H
+#define _HAPROXY_APPLET_T_H
+
+#include <haproxy/api-t.h>
+#include <haproxy/buf-t.h>
+#include <haproxy/dynbuf-t.h>
+#include <haproxy/freq_ctr-t.h>
+#include <haproxy/obj_type-t.h>
+#include <haproxy/xref-t.h>
+
+/* flags for appctx->state */
+#define APPLET_WANT_DIE 0x01 /* applet was running and requested to die */
+
+/* Room for per-command context (mostly CLI commands but not only) */
+#define APPLET_MAX_SVCCTX 88
+
+struct appctx;
+struct proxy;
+struct stconn;
+struct sedesc;
+struct session;
+
+/* Applet descriptor */
+struct applet {
+ enum obj_type obj_type; /* object type = OBJ_TYPE_APPLET */
+ /* 3 unused bytes here */
+ char *name; /* applet's name to report in logs */
+ int (*init)(struct appctx *); /* callback to init resources, may be NULL.
+ expect 0 if ok, -1 if an error occurs. */
+ void (*fct)(struct appctx *); /* internal I/O handler, may never be NULL */
+ void (*release)(struct appctx *); /* callback to release resources, may be NULL */
+ unsigned int timeout; /* execution timeout. */
+};
+
+/* Context of a running applet. */
+struct appctx {
+ enum obj_type obj_type; /* OBJ_TYPE_APPCTX */
+ /* 3 unused bytes here */
+ unsigned short state; /* Internal appctx state */
+ unsigned int st0; /* CLI state for stats, session state for peers */
+ unsigned int st1; /* prompt/payload (bitwise OR of APPCTX_CLI_ST1_*) for stats, session error for peers */
+ struct buffer *chunk; /* used to store unfinished commands */
+ struct applet *applet; /* applet this context refers to */
+ struct session *sess; /* session for frontend applets (NULL for backend applets) */
+ struct sedesc *sedesc; /* stream endpoint descriptor the applet is attached to */
+ struct act_rule *rule; /* rule associated with the applet. */
+ int (*io_handler)(struct appctx *appctx); /* used within the cli_io_handler when st0 = CLI_ST_CALLBACK */
+ void (*io_release)(struct appctx *appctx); /* used within the cli_io_handler when st0 = CLI_ST_CALLBACK,
+ if the command is terminated or the session released */
+ int cli_severity_output; /* used within the cli_io_handler to format severity output of informational feedback */
+ int cli_level; /* the level of CLI which can be lowered dynamically */
+ char cli_payload_pat[8]; /* Payload pattern */
+ uint32_t cli_anon_key; /* the key to anonymise with the hash in cli */
+ struct buffer_wait buffer_wait; /* position in the list of objects waiting for a buffer */
+ struct task *t; /* task associated to the applet */
+ struct freq_ctr call_rate; /* appctx call rate */
+ struct list wait_entry; /* entry in a list of waiters for an event (e.g. ring events) */
+
+ /* The pointer seen by application code is appctx->svcctx. In 2.7 the
+ * anonymous union and the "ctx" struct disappeared, and the struct
+ * "svc" became svc_storage, which is never accessed directly by
+ * application code. Look at "show fd" for an example.
+ */
+
+ /* here we have the service's context (CLI command, applet, etc) */
+ void *svcctx; /* pointer to a context used by the command, e.g. <storage> below */
+ struct {
+ void *shadow; /* shadow of svcctx above, do not use! */
+ char storage[APPLET_MAX_SVCCTX]; /* storage of svcctx above */
+ } svc; /* generic storage for most commands */
+};
+
+#endif /* _HAPROXY_APPLET_T_H */
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/include/haproxy/applet.h b/include/haproxy/applet.h
new file mode 100644
index 0000000..b04ffd9
--- /dev/null
+++ b/include/haproxy/applet.h
@@ -0,0 +1,270 @@
+/*
+ * include/haproxy/applet.h
+ * This file contains applet function prototypes
+ *
+ * Copyright (C) 2000-2015 Willy Tarreau - w@1wt.eu
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_APPLET_H
+#define _HAPROXY_APPLET_H
+
+#include <stdlib.h>
+
+#include <haproxy/api.h>
+#include <haproxy/applet-t.h>
+#include <haproxy/channel.h>
+#include <haproxy/list.h>
+#include <haproxy/pool.h>
+#include <haproxy/sc_strm.h>
+#include <haproxy/session.h>
+#include <haproxy/stconn.h>
+#include <haproxy/task.h>
+
+extern unsigned int nb_applets;
+extern struct pool_head *pool_head_appctx;
+
+struct task *task_run_applet(struct task *t, void *context, unsigned int state);
+int appctx_buf_available(void *arg);
+void *applet_reserve_svcctx(struct appctx *appctx, size_t size);
+void applet_reset_svcctx(struct appctx *appctx);
+void appctx_shut(struct appctx *appctx);
+
+struct appctx *appctx_new_on(struct applet *applet, struct sedesc *sedesc, int thr);
+int appctx_finalize_startup(struct appctx *appctx, struct proxy *px, struct buffer *input);
+void appctx_free_on_early_error(struct appctx *appctx);
+void appctx_free(struct appctx *appctx);
+
+static inline struct appctx *appctx_new_here(struct applet *applet, struct sedesc *sedesc)
+{
+ return appctx_new_on(applet, sedesc, tid);
+}
+
+static inline struct appctx *appctx_new_anywhere(struct applet *applet, struct sedesc *sedesc)
+{
+ return appctx_new_on(applet, sedesc, -1);
+}
+
+/* Helper function to call .init applet callback function, if it exists. Returns 0
+ * on success and -1 on error.
+ */
+static inline int appctx_init(struct appctx *appctx)
+{
+ /* Set appctx affinity to the current thread. Because, after this call,
+ * the appctx will be fully initialized. The session and the stream will
+ * eventually be created. The affinity must be set now !
+ */
+ BUG_ON(appctx->t->tid != tid);
+ task_set_thread(appctx->t, tid);
+
+ if (appctx->applet->init)
+ return appctx->applet->init(appctx);
+ return 0;
+}
+
+/* Releases an appctx previously allocated by appctx_new(). */
+static inline void __appctx_free(struct appctx *appctx)
+{
+ task_destroy(appctx->t);
+ if (LIST_INLIST(&appctx->buffer_wait.list))
+ LIST_DEL_INIT(&appctx->buffer_wait.list);
+ if (appctx->sess)
+ session_free(appctx->sess);
+ BUG_ON(appctx->sedesc && !se_fl_test(appctx->sedesc, SE_FL_ORPHAN));
+ sedesc_free(appctx->sedesc);
+ pool_free(pool_head_appctx, appctx);
+ _HA_ATOMIC_DEC(&nb_applets);
+}
+
+/* wakes up an applet when conditions have changed. We're using a macro here in
+ * order to retrieve the caller's place.
+ */
+#define appctx_wakeup(ctx) \
+ _task_wakeup((ctx)->t, TASK_WOKEN_OTHER, MK_CALLER(WAKEUP_TYPE_APPCTX_WAKEUP, 0, 0))
+
+/* returns the stream connector the appctx is attached to, via the sedesc */
+static inline struct stconn *appctx_sc(const struct appctx *appctx)
+{
+ return appctx->sedesc->sc;
+}
+
+/* returns the stream the appctx is attached to. Note that a stream *must*
+ * be attached, as we use an unchecked dereference via __sc_strm().
+ */
+static inline struct stream *appctx_strm(const struct appctx *appctx)
+{
+ return __sc_strm(appctx->sedesc->sc);
+}
+
+/* The applet announces it has more data to deliver to the stream's input
+ * buffer.
+ */
+static inline void applet_have_more_data(struct appctx *appctx)
+{
+ se_fl_clr(appctx->sedesc, SE_FL_HAVE_NO_DATA);
+}
+
+/* The applet announces it doesn't have more data for the stream's input
+ * buffer.
+ */
+static inline void applet_have_no_more_data(struct appctx *appctx)
+{
+ se_fl_set(appctx->sedesc, SE_FL_HAVE_NO_DATA);
+}
+
+/* The applet indicates that it's ready to consume data from the stream's
+ * output buffer. Rely on the corresponding SE function
+ */
+static inline void applet_will_consume(struct appctx *appctx)
+{
+ se_will_consume(appctx->sedesc);
+}
+
+/* The applet indicates that it's not willing to consume data from the stream's
+ * output buffer. Rely on the corresponding SE function
+ */
+static inline void applet_wont_consume(struct appctx *appctx)
+{
+ se_wont_consume(appctx->sedesc);
+}
+
+/* The applet indicates that it's willing to consume data from the stream's
+ * output buffer, but that there's not enough, so it doesn't want to be woken
+ * up until more are presented. Rely on the corresponding SE function
+ */
+static inline void applet_need_more_data(struct appctx *appctx)
+{
+ se_need_more_data(appctx->sedesc);
+}
+
+/* The applet indicates that it does not expect data from the opposite endpoint.
+ * This way the stream know it should not trigger read timeout on the other
+ * side.
+ */
+static inline void applet_expect_no_data(struct appctx *appctx)
+{
+ se_fl_set(appctx->sedesc, SE_FL_EXP_NO_DATA);
+}
+
+/* The applet indicates that it expects data from the opposite endpoint. This
+ * way the stream know it may trigger read timeout on the other side.
+ */
+static inline void applet_expect_data(struct appctx *appctx)
+{
+ se_fl_clr(appctx->sedesc, SE_FL_EXP_NO_DATA);
+}
+
+/* writes chunk <chunk> into the input channel of the stream attached to this
+ * appctx's endpoint, and marks the SC_FL_NEED_ROOM on a channel full error.
+ * See ci_putchk() for the list of return codes.
+ */
+static inline int applet_putchk(struct appctx *appctx, struct buffer *chunk)
+{
+ struct sedesc *se = appctx->sedesc;
+ int ret;
+
+ ret = ci_putchk(sc_ic(se->sc), chunk);
+ if (ret < 0) {
+ /* XXX: Handle all errors as a lack of space because callers
+ * don't handles other cases for now. So applets must be
+ * careful to handles shutdown (-2) and invalid calls (-3) by
+ * themselves.
+ */
+ sc_need_room(se->sc, chunk->data);
+ ret = -1;
+ }
+
+ return ret;
+}
+
+/* writes <len> chars from <blk> into the input channel of the stream attached
+ * to this appctx's endpoint, and marks the SC_FL_NEED_ROOM on a channel full
+ * error. See ci_putblk() for the list of return codes.
+ */
+static inline int applet_putblk(struct appctx *appctx, const char *blk, int len)
+{
+ struct sedesc *se = appctx->sedesc;
+ int ret;
+
+ ret = ci_putblk(sc_ic(se->sc), blk, len);
+ if (ret < -1) {
+ /* XXX: Handle all errors as a lack of space because callers
+ * don't handles other cases for now. So applets must be
+ * careful to handles shutdown (-2) and invalid calls (-3) by
+ * themselves.
+ */
+ sc_need_room(se->sc, len);
+ ret = -1;
+ }
+
+ return ret;
+}
+
+/* writes chars from <str> up to the trailing zero (excluded) into the input
+ * channel of the stream attached to this appctx's endpoint, and marks the
+ * SC_FL_NEED_ROOM on a channel full error. See ci_putstr() for the list of
+ * return codes.
+ */
+static inline int applet_putstr(struct appctx *appctx, const char *str)
+{
+ struct sedesc *se = appctx->sedesc;
+ int ret;
+
+ ret = ci_putstr(sc_ic(se->sc), str);
+ if (ret == -1) {
+ /* XXX: Handle all errors as a lack of space because callers
+ * don't handles other cases for now. So applets must be
+ * careful to handles shutdown (-2) and invalid calls (-3) by
+ * themselves.
+ */
+ sc_need_room(se->sc, strlen(str));
+ ret = -1;
+ }
+
+ return ret;
+}
+
+/* writes character <chr> into the input channel of the stream attached to this
+ * appctx's endpoint, and marks the SC_FL_NEED_ROOM on a channel full error.
+ * See ci_putchr() for the list of return codes.
+ */
+static inline int applet_putchr(struct appctx *appctx, char chr)
+{
+ struct sedesc *se = appctx->sedesc;
+ int ret;
+
+ ret = ci_putchr(sc_ic(se->sc), chr);
+ if (ret == -1) {
+ /* XXX: Handle all errors as a lack of space because callers
+ * don't handles other cases for now. So applets must be
+ * careful to handles shutdown (-2) and invalid calls (-3) by
+ * themselves.
+ */
+ sc_need_room(se->sc, 1);
+ ret = -1;
+ }
+
+ return ret;
+}
+
+#endif /* _HAPROXY_APPLET_H */
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/include/haproxy/arg-t.h b/include/haproxy/arg-t.h
new file mode 100644
index 0000000..d90d326
--- /dev/null
+++ b/include/haproxy/arg-t.h
@@ -0,0 +1,152 @@
+/*
+ * include/haproxy/arg-t.h
+ * This file contains structure declarations for generaic argument parsing.
+ *
+ * Copyright 2012 Willy Tarreau <w@1wt.eu>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_ARG_T_H
+#define _HAPROXY_ARG_T_H
+
+#include <sys/socket.h>
+#include <netinet/in.h>
+
+#include <haproxy/buf-t.h>
+#include <haproxy/protobuf-t.h>
+#include <haproxy/stick_table-t.h>
+#include <haproxy/vars-t.h>
+
+/* encoding of each arg type : up to 31 types are supported */
+#define ARGT_BITS 5
+#define ARGT_NBTYPES (1 << ARGT_BITS)
+#define ARGT_MASK (ARGT_NBTYPES - 1)
+
+/* encoding of the arg count : up to 12 args are possible. 4 bits are left
+ * unused at the top.
+ */
+#define ARGM_MASK ((1 << ARGM_BITS) - 1)
+#define ARGM_BITS 4
+#define ARGM_NBARGS (sizeof(uint64_t) * 8 - ARGM_BITS) / ARGT_BITS
+
+enum {
+ ARGT_STOP = 0, /* end of the arg list */
+ ARGT_SINT, /* signed 64 bit integer. */
+ ARGT_STR, /* string */
+ ARGT_IPV4, /* an IPv4 address */
+ ARGT_MSK4, /* an IPv4 address mask (integer or dotted), stored as ARGT_IPV4 */
+ ARGT_IPV6, /* an IPv6 address */
+ ARGT_MSK6, /* an IPv6 address mask (integer or dotted), stored as ARGT_IPV6 */
+ ARGT_TIME, /* a delay in ms by default, stored as ARGT_UINT */
+ ARGT_SIZE, /* a size in bytes by default, stored as ARGT_UINT */
+ ARGT_FE, /* a pointer to a frontend only */
+ ARGT_BE, /* a pointer to a backend only */
+ ARGT_TAB, /* a pointer to a stick table */
+ ARGT_SRV, /* a pointer to a server */
+ ARGT_USR, /* a pointer to a user list */
+ ARGT_MAP, /* a pointer to a map descriptor */
+ ARGT_REG, /* a pointer to a regex */
+ ARGT_VAR, /* contains a variable description. */
+ ARGT_PBUF_FNUM, /* a protocol buffer field number */
+ ARGT_PTR, /* a pointer to opaque data */
+ /* please update arg_type_names[] in args.c if you add entries here */
+};
+
+/* context where arguments are used, in order to help error reporting */
+enum {
+ ARGC_ACL = 0, /* ACL */
+ ARGC_STK, /* sticking rule */
+ ARGC_TRK, /* tracking rule */
+ ARGC_LOG, /* log-format */
+ ARGC_LOGSD, /* log-format-sd */
+ ARGC_HRQ, /* http-request */
+ ARGC_HRS, /* http-response */
+ ARGC_UIF, /* unique-id-format */
+ ARGC_RDR, /* redirect */
+ ARGC_CAP, /* capture rule */
+ ARGC_SRV, /* server line */
+ ARGC_SPOE, /* spoe message args */
+ ARGC_UBK, /* use_backend message */
+ ARGC_USRV, /* use-server message */
+ ARGC_HERR, /* http-error */
+ ARGC_OT, /* opentracing scope args */
+ ARGC_OPT, /* option directive */
+ ARGC_TCO, /* tcp-request connection expression */
+ ARGC_TSE, /* tcp-request session expression */
+ ARGC_TRQ, /* tcp-request content expression */
+ ARGC_TRS, /* tcp-response content expression */
+ ARGC_TCK, /* tcp-check expression */
+ ARGC_CFG, /* configuration expression */
+ ARGC_CLI, /* CLI expression*/
+};
+
+/* flags used when compiling and executing regex */
+#define ARGF_REG_ICASE 1
+#define ARGF_REG_GLOB 2
+
+/* some types that are externally defined */
+struct proxy;
+struct server;
+struct userlist;
+struct my_regex;
+
+union arg_data {
+ long long int sint;
+ struct buffer str;
+ struct in_addr ipv4;
+ struct in6_addr ipv6;
+ struct proxy *prx; /* used for fe, be, tables */
+ struct server *srv;
+ struct stktable *t;
+ struct userlist *usr;
+ struct map_descriptor *map;
+ struct my_regex *reg;
+ struct pbuf_fid fid;
+ struct var_desc var;
+ void *ptr;
+};
+
+struct arg {
+ unsigned char type; /* argument type, ARGT_* */
+ unsigned char unresolved; /* argument contains a string in <str> that must be resolved and freed */
+ unsigned char type_flags; /* type-specific extra flags (eg: case sensitivity for regex), ARGF_* */
+ union arg_data data; /* argument data */
+};
+
+/* arg lists are used to store information about arguments that could not be
+ * resolved when parsing the configuration. The head is an arg_list which
+ * serves as a template to create new entries. Nothing here is allocated,
+ * so plain copies are OK.
+ */
+struct arg_list {
+ struct list list; /* chaining with other arg_list, or list head */
+ struct arg *arg; /* pointer to the arg, NULL on list head */
+ int arg_pos; /* argument position */
+ int ctx; /* context where the arg is used (ARGC_*) */
+ const char *kw; /* keyword making use of these args */
+ const char *conv; /* conv keyword when in conv, otherwise NULL */
+ const char *file; /* file name where the args are referenced */
+ int line; /* line number where the args are referenced */
+};
+
+#endif /* _HAPROXY_ARG_T_H */
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/include/haproxy/arg.h b/include/haproxy/arg.h
new file mode 100644
index 0000000..5fe1888
--- /dev/null
+++ b/include/haproxy/arg.h
@@ -0,0 +1,94 @@
+/*
+ * include/haproxy/arg.h
+ * This file contains functions and macros declarations for generic argument parsing.
+ *
+ * Copyright 2012 Willy Tarreau <w@1wt.eu>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_ARG_H
+#define _HAPROXY_ARG_H
+
+#include <haproxy/arg-t.h>
+
+/* Some macros used to build some arg list. We can declare various argument
+ * combinations from 0 to 7 args using a single 32-bit integer. The first
+ * argument of these macros is always the mandatory number of arguments, and
+ * remaining ones are optional args. Note: ARGM() may also be used to return
+ * the number of mandatory arguments in a mask.
+ */
+#define ARGM(m) \
+ (uint64_t)(m & ARGM_MASK)
+
+#define ARG1(m, t1) \
+ (ARGM(m) + ((uint64_t)ARGT_##t1 << (ARGM_BITS)))
+
+#define ARG2(m, t1, t2) \
+ (ARG1(m, t1) + ((uint64_t)ARGT_##t2 << (ARGM_BITS + ARGT_BITS)))
+
+#define ARG3(m, t1, t2, t3) \
+ (ARG2(m, t1, t2) + ((uint64_t)ARGT_##t3 << (ARGM_BITS + ARGT_BITS * 2)))
+
+#define ARG4(m, t1, t2, t3, t4) \
+ (ARG3(m, t1, t2, t3) + ((uint64_t)ARGT_##t4 << (ARGM_BITS + ARGT_BITS * 3)))
+
+#define ARG5(m, t1, t2, t3, t4, t5) \
+ (ARG4(m, t1, t2, t3, t4) + ((uint64_t)ARGT_##t5 << (ARGM_BITS + ARGT_BITS * 4)))
+
+#define ARG6(m, t1, t2, t3, t4, t5, t6) \
+ (ARG5(m, t1, t2, t3, t4, t5) + ((uint64_t)ARGT_##t6 << (ARGM_BITS + ARGT_BITS * 5)))
+
+#define ARG7(m, t1, t2, t3, t4, t5, t6, t7) \
+ (ARG6(m, t1, t2, t3, t4, t5, t6) + ((uint64_t)ARGT_##t7 << (ARGM_BITS + ARGT_BITS * 6)))
+
+#define ARG8(m, t1, t2, t3, t4, t5, t6, t7, t8) \
+ (ARG7(m, t1, t2, t3, t4, t5, t6, t7) + ((uint64_t)ARGT_##t8 << (ARGM_BITS + ARGT_BITS * 7)))
+
+#define ARG9(m, t1, t2, t3, t4, t5, t6, t7, t8, t9) \
+ (ARG8(m, t1, t2, t3, t4, t5, t6, t7, t8) + ((uint64_t)ARGT_##t9 << (ARGM_BITS + ARGT_BITS * 8)))
+
+#define ARG10(m, t1, t2, t3, t4, t5, t6, t7, t8, t9, t10) \
+ (ARG9(m, t1, t2, t3, t4, t5, t6, t7, t8, t9) + ((uint64_t)ARGT_##t10 << (ARGM_BITS + ARGT_BITS * 9)))
+
+#define ARG11(m, t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11) \
+ (ARG10(m, t1, t2, t3, t4, t5, t6, t7, t8, t9, t10) + ((uint64_t)ARGT_##t11 << (ARGM_BITS + ARGT_BITS * 10)))
+
+#define ARG12(m, t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11, t12) \
+ (ARG11(m, t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11) + ((uint64_t)ARGT_##t12 << (ARGM_BITS + ARGT_BITS * 11)))
+
+/* Mapping between argument number and literal description. */
+extern const char *arg_type_names[];
+
+/* This dummy arg list may be used by default when no arg is found, it helps
+ * parsers by removing pointer checks.
+ */
+extern struct arg empty_arg_list[ARGM_NBARGS];
+
+struct arg_list *arg_list_clone(const struct arg_list *orig);
+struct arg_list *arg_list_add(struct arg_list *orig, struct arg *arg, int pos);
+int make_arg_list(const char *in, int len, uint64_t mask, struct arg **argp,
+ char **err_msg, const char **end_ptr, int *err_arg,
+ struct arg_list *al);
+struct arg *free_args(struct arg *args);
+
+#endif /* _HAPROXY_ARG_H */
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/include/haproxy/atomic.h b/include/haproxy/atomic.h
new file mode 100644
index 0000000..d64e192
--- /dev/null
+++ b/include/haproxy/atomic.h
@@ -0,0 +1,897 @@
+/*
+ * include/haproxy/atomic.h
+ * Macros and inline functions for thread-safe atomic operations.
+ *
+ * Copyright (C) 2017 Christopher Faulet - cfaulet@haproxy.com
+ * Copyright (C) 2020 Willy Tarreau - w@1wt.eu
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_ATOMIC_H
+#define _HAPROXY_ATOMIC_H
+
+#include <haproxy/compiler.h>
+
+/* A few notes for the macros and functions here:
+ * - this file is painful to edit, most operations exist in 3 variants,
+ * no-thread, threads with gcc<4.7, threads with gcc>=4.7. Be careful when
+ * modifying it not to break any of them.
+ *
+ * - macros named HA_ATOMIC_* are or use in the general case, they contain the
+ * required memory barriers to guarantee sequential consistency
+ *
+ * - macros named _HA_ATOMIC_* are the same but without the memory barriers,
+ * so they may only be used if followed by other HA_ATOMIC_* or within a
+ * sequence of _HA_ATOMIC_* terminated by a store barrier, or when there is
+ * no data dependency (e.g. updating a counter). Not all of them are
+ * implemented, in which case fallbacks to the safe ones are provided. In
+ * case of doubt, don't use them and use the generic ones instead.
+ *
+ * - the __ha_atomic_* barriers are for use around _HA_ATOMIC_* operations.
+ * Some architectures make them useless and they will automatically be
+ * dropped in such a case. Don't use them outside of this use case.
+ *
+ * - in general, the more underscores you find in front of a function or macro
+ * name, the riskier it is to use. Barriers are among them because validating
+ * their usage is not trivial at all and it's often safer to fall back to
+ * more generic behaviors.
+ *
+ * There is also a compiler barrier (__ha_compiler_barrier) which is eliminated
+ * when threads are disabled. We currently don't have a permanent compiler
+ * barrier to prevent the compiler from reordering signal-sensitive code for
+ * example.
+ */
+
+
+#ifndef USE_THREAD
+
+/* Threads are DISABLED, atomic ops are also not used. Note that these MUST
+ * NOT be used for inter-process synchronization nor signal-safe variable
+ * manipulations which might occur without threads, as they are not atomic.
+ */
+
+#define HA_ATOMIC_LOAD(val) *(val)
+#define HA_ATOMIC_STORE(val, new) ({*(val) = new;})
+
+#define HA_ATOMIC_XCHG(val, new) \
+ ({ \
+ typeof(*(val)) __old_xchg = *(val); \
+ *(val) = new; \
+ __old_xchg; \
+ })
+
+#define HA_ATOMIC_AND(val, flags) do { *(val) &= (flags);} while (0)
+#define HA_ATOMIC_OR(val, flags) do { *(val) |= (flags);} while (0)
+#define HA_ATOMIC_ADD(val, i) do { *(val) += (i);} while (0)
+#define HA_ATOMIC_SUB(val, i) do { *(val) -= (i);} while (0)
+#define HA_ATOMIC_INC(val) do { *(val) += 1;} while (0)
+#define HA_ATOMIC_DEC(val) do { *(val) -= 1;} while (0)
+
+#define HA_ATOMIC_AND_FETCH(val, flags) ({ *(val) &= (flags); })
+#define HA_ATOMIC_OR_FETCH(val, flags) ({ *(val) |= (flags); })
+#define HA_ATOMIC_ADD_FETCH(val, i) ({ *(val) += (i); })
+#define HA_ATOMIC_SUB_FETCH(val, i) ({ *(val) -= (i); })
+
+#define HA_ATOMIC_FETCH_AND(val, i) \
+ ({ \
+ typeof((val)) __p_val = (val); \
+ typeof(*(val)) __old_val = *__p_val; \
+ *__p_val &= (i); \
+ __old_val; \
+ })
+
+#define HA_ATOMIC_FETCH_OR(val, i) \
+ ({ \
+ typeof((val)) __p_val = (val); \
+ typeof(*(val)) __old_val = *__p_val; \
+ *__p_val |= (i); \
+ __old_val; \
+ })
+
+#define HA_ATOMIC_FETCH_ADD(val, i) \
+ ({ \
+ typeof((val)) __p_val = (val); \
+ typeof(*(val)) __old_val = *__p_val; \
+ *__p_val += (i); \
+ __old_val; \
+ })
+
+#define HA_ATOMIC_FETCH_SUB(val, i) \
+ ({ \
+ typeof((val)) __p_val = (val); \
+ typeof(*(val)) __old_val = *__p_val; \
+ *__p_val -= (i); \
+ __old_val; \
+ })
+
+#define HA_ATOMIC_BTS(val, bit) \
+ ({ \
+ typeof((val)) __p_bts = (val); \
+ typeof(*__p_bts) __b_bts = (1UL << (bit)); \
+ typeof(*__p_bts) __t_bts = *__p_bts & __b_bts; \
+ if (!__t_bts) \
+ *__p_bts |= __b_bts; \
+ __t_bts; \
+ })
+
+#define HA_ATOMIC_BTR(val, bit) \
+ ({ \
+ typeof((val)) __p_btr = (val); \
+ typeof(*__p_btr) __b_btr = (1UL << (bit)); \
+ typeof(*__p_btr) __t_btr = *__p_btr & __b_btr; \
+ if (__t_btr) \
+ *__p_btr &= ~__b_btr; \
+ __t_btr; \
+ })
+
+#define HA_ATOMIC_CAS(val, old, new) \
+ ({ \
+ typeof(val) _v = (val); \
+ typeof(old) _o = (old); \
+ (*_v == *_o) ? ((*_v = (new)), 1) : ((*_o = *_v), 0); \
+ })
+
+/* warning, n is a pointer to the double value for dwcas */
+#define HA_ATOMIC_DWCAS(val, o, n) \
+ ({ \
+ long *_v = (long*)(val); \
+ long *_o = (long*)(o); \
+ long *_n = (long*)(n); \
+ long _v0 = _v[0], _v1 = _v[1]; \
+ (_v0 == _o[0] && _v1 == _o[1]) ? \
+ (_v[0] = _n[0], _v[1] = _n[1], 1) : \
+ (_o[0] = _v0, _o[1] = _v1, 0); \
+ })
+
+#define HA_ATOMIC_UPDATE_MAX(val, new) \
+ ({ \
+ typeof(val) __val = (val); \
+ typeof(*(val)) __new_max = (new); \
+ \
+ if (*__val < __new_max) \
+ *__val = __new_max; \
+ *__val; \
+ })
+
+#define HA_ATOMIC_UPDATE_MIN(val, new) \
+ ({ \
+ typeof(val) __val = (val); \
+ typeof(*(val)) __new_min = (new); \
+ \
+ if (*__val > __new_min) \
+ *__val = __new_min; \
+ *__val; \
+ })
+
+/* various barriers */
+#define __ha_barrier_atomic_load() do { } while (0)
+#define __ha_barrier_atomic_store() do { } while (0)
+#define __ha_barrier_atomic_full() do { } while (0)
+#define __ha_barrier_load() do { } while (0)
+#define __ha_barrier_store() do { } while (0)
+#define __ha_barrier_full() do { } while (0)
+#define __ha_compiler_barrier() do { } while (0)
+#define __ha_cpu_relax() ({ 1; })
+
+#else /* !USE_THREAD */
+
+/* Threads are ENABLED, all atomic ops are made thread-safe. By extension they
+ * can also be used for inter-process synchronization but one must verify that
+ * the code still builds with threads disabled.
+ */
+
+#if defined(__GNUC__) && (__GNUC__ < 4 || __GNUC__ == 4 && __GNUC_MINOR__ < 7) && !defined(__clang__)
+/* gcc < 4.7 */
+
+#define HA_ATOMIC_LOAD(val) \
+ ({ \
+ typeof(*(val)) ret = \
+ ({ __sync_synchronize(); *(volatile typeof(val))val; }); \
+ __sync_synchronize(); \
+ ret; \
+ })
+
+#define HA_ATOMIC_STORE(val, new) \
+ ({ \
+ typeof((val)) __val_store = (val); \
+ typeof(*(val)) __old_store; \
+ typeof((new)) __new_store = (new); \
+ do { __old_store = *__val_store; \
+ } while (!__sync_bool_compare_and_swap(__val_store, __old_store, __new_store) && __ha_cpu_relax()); \
+ })
+
+#define HA_ATOMIC_XCHG(val, new) \
+ ({ \
+ typeof((val)) __val_xchg = (val); \
+ typeof(*(val)) __old_xchg; \
+ typeof((new)) __new_xchg = (new); \
+ do { __old_xchg = *__val_xchg; \
+ } while (!__sync_bool_compare_and_swap(__val_xchg, __old_xchg, __new_xchg) && __ha_cpu_relax()); \
+ __old_xchg; \
+ })
+
+#define HA_ATOMIC_AND(val, flags) do { __sync_and_and_fetch(val, flags); } while (0)
+#define HA_ATOMIC_OR(val, flags) do { __sync_or_and_fetch(val, flags); } while (0)
+#define HA_ATOMIC_ADD(val, i) do { __sync_add_and_fetch(val, i); } while (0)
+#define HA_ATOMIC_SUB(val, i) do { __sync_sub_and_fetch(val, i); } while (0)
+#define HA_ATOMIC_INC(val) do { __sync_add_and_fetch(val, 1); } while (0)
+#define HA_ATOMIC_DEC(val) do { __sync_sub_and_fetch(val, 1); } while (0)
+
+#define HA_ATOMIC_AND_FETCH(val, flags) __sync_and_and_fetch(val, flags)
+#define HA_ATOMIC_OR_FETCH(val, flags) __sync_or_and_fetch(val, flags)
+#define HA_ATOMIC_ADD_FETCH(val, i) __sync_add_and_fetch(val, i)
+#define HA_ATOMIC_SUB_FETCH(val, i) __sync_sub_and_fetch(val, i)
+
+#define HA_ATOMIC_FETCH_AND(val, flags) __sync_fetch_and_and(val, flags)
+#define HA_ATOMIC_FETCH_OR(val, flags) __sync_fetch_and_or(val, flags)
+#define HA_ATOMIC_FETCH_ADD(val, i) __sync_fetch_and_add(val, i)
+#define HA_ATOMIC_FETCH_SUB(val, i) __sync_fetch_and_sub(val, i)
+
+#define HA_ATOMIC_BTS(val, bit) \
+ ({ \
+ typeof(*(val)) __b_bts = (1UL << (bit)); \
+ __sync_fetch_and_or((val), __b_bts) & __b_bts; \
+ })
+
+#define HA_ATOMIC_BTR(val, bit) \
+ ({ \
+ typeof(*(val)) __b_btr = (1UL << (bit)); \
+ __sync_fetch_and_and((val), ~__b_btr) & __b_btr; \
+ })
+
+/* the CAS is a bit complicated. The older API doesn't support returning the
+ * value and the swap's result at the same time. So here we take what looks
+ * like the safest route, consisting in using the boolean version guaranteeing
+ * that the operation was performed or not, and we snoop a previous value. If
+ * the compare succeeds, we return. If it fails, we return the previous value,
+ * but only if it differs from the expected one. If it's the same it's a race
+ * thus we try again to avoid confusing a possibly sensitive caller.
+ */
+#define HA_ATOMIC_CAS(val, old, new) \
+ ({ \
+ typeof((val)) __val_cas = (val); \
+ typeof((old)) __oldp_cas = (old); \
+ typeof(*(old)) __oldv_cas; \
+ typeof((new)) __new_cas = (new); \
+ int __ret_cas; \
+ do { \
+ __oldv_cas = *__val_cas; \
+ __ret_cas = __sync_bool_compare_and_swap(__val_cas, *__oldp_cas, __new_cas); \
+ } while (!__ret_cas && *__oldp_cas == __oldv_cas && __ha_cpu_relax()); \
+ if (!__ret_cas) \
+ *__oldp_cas = __oldv_cas; \
+ __ret_cas; \
+ })
+
+/* warning, n is a pointer to the double value for dwcas */
+#define HA_ATOMIC_DWCAS(val, o, n) __ha_cas_dw(val, o, n)
+
+#define HA_ATOMIC_UPDATE_MAX(val, new) \
+ ({ \
+ typeof(val) __val = (val); \
+ typeof(*(val)) __old_max = *__val; \
+ typeof(*(val)) __new_max = (new); \
+ \
+ while (__old_max < __new_max && \
+ !HA_ATOMIC_CAS(__val, &__old_max, __new_max) && __ha_cpu_relax()); \
+ *__val; \
+ })
+
+#define HA_ATOMIC_UPDATE_MIN(val, new) \
+ ({ \
+ typeof(val) __val = (val); \
+ typeof(*(val)) __old_min = *__val; \
+ typeof(*(val)) __new_min = (new); \
+ \
+ while (__old_min > __new_min && \
+ !HA_ATOMIC_CAS(__val, &__old_min, __new_min) && __ha_cpu_relax()); \
+ *__val; \
+ })
+
+#else /* gcc */
+
+/* gcc >= 4.7 or clang */
+
+#define HA_ATOMIC_STORE(val, new) __atomic_store_n(val, new, __ATOMIC_RELEASE)
+#define HA_ATOMIC_LOAD(val) __atomic_load_n(val, __ATOMIC_ACQUIRE)
+#define HA_ATOMIC_XCHG(val, new) __atomic_exchange_n(val, new, __ATOMIC_ACQ_REL)
+
+#define HA_ATOMIC_AND(val, flags) do { __atomic_and_fetch(val, flags, __ATOMIC_SEQ_CST); } while (0)
+#define HA_ATOMIC_OR(val, flags) do { __atomic_or_fetch(val, flags, __ATOMIC_SEQ_CST); } while (0)
+#define HA_ATOMIC_ADD(val, i) do { __atomic_add_fetch(val, i, __ATOMIC_SEQ_CST); } while (0)
+#define HA_ATOMIC_SUB(val, i) do { __atomic_sub_fetch(val, i, __ATOMIC_SEQ_CST); } while (0)
+#define HA_ATOMIC_INC(val) do { __atomic_add_fetch(val, 1, __ATOMIC_SEQ_CST); } while (0)
+#define HA_ATOMIC_DEC(val) do { __atomic_sub_fetch(val, 1, __ATOMIC_SEQ_CST); } while (0)
+
+#define HA_ATOMIC_AND_FETCH(val, flags) __atomic_and_fetch(val, flags, __ATOMIC_SEQ_CST)
+#define HA_ATOMIC_OR_FETCH(val, flags) __atomic_or_fetch(val, flags, __ATOMIC_SEQ_CST)
+#define HA_ATOMIC_ADD_FETCH(val, i) __atomic_add_fetch(val, i, __ATOMIC_SEQ_CST)
+#define HA_ATOMIC_SUB_FETCH(val, i) __atomic_sub_fetch(val, i, __ATOMIC_SEQ_CST)
+
+#define HA_ATOMIC_FETCH_AND(val, flags) __atomic_fetch_and(val, flags, __ATOMIC_SEQ_CST)
+#define HA_ATOMIC_FETCH_OR(val, flags) __atomic_fetch_or(val, flags, __ATOMIC_SEQ_CST)
+#define HA_ATOMIC_FETCH_ADD(val, i) __atomic_fetch_add(val, i, __ATOMIC_SEQ_CST)
+#define HA_ATOMIC_FETCH_SUB(val, i) __atomic_fetch_sub(val, i, __ATOMIC_SEQ_CST)
+
+#if defined(__GCC_ASM_FLAG_OUTPUTS__) && (defined(__i386__) || defined (__x86_64__))
+#define HA_ATOMIC_BTS(val, bit) \
+ ({ \
+ unsigned char __ret; \
+ if (sizeof(long) == 8 && sizeof(*(val)) == 8) { \
+ asm volatile("lock btsq %2, %0\n" \
+ : "+m" (*(val)), "=@ccc"(__ret) \
+ : "Ir" ((unsigned long)(bit)) \
+ : "cc"); \
+ } else if (sizeof(*(val)) == 4) { \
+ asm volatile("lock btsl %2, %0\n" \
+ : "+m" (*(val)), "=@ccc"(__ret) \
+ : "Ir" ((unsigned int)(bit)) \
+ : "cc"); \
+ } else if (sizeof(*(val)) == 2) { \
+ asm volatile("lock btsw %2, %0\n" \
+ : "+m" (*(val)), "=@ccc"(__ret) \
+ : "Ir" ((unsigned short)(bit)) \
+ : "cc"); \
+ } else { \
+ typeof(*(val)) __b_bts = (1UL << (bit)); \
+ __ret = !!(__atomic_fetch_or((val), __b_bts, __ATOMIC_SEQ_CST) & __b_bts); \
+ } \
+ __ret; \
+ })
+
+#define HA_ATOMIC_BTR(val, bit) \
+ ({ \
+ unsigned char __ret; \
+ if (sizeof(long) == 8 && sizeof(*(val)) == 8) { \
+ asm volatile("lock btrq %2, %0\n" \
+ : "+m" (*(val)), "=@ccc"(__ret) \
+ : "Ir" ((unsigned long)(bit)) \
+ : "cc"); \
+ } else if (sizeof(*(val)) == 4) { \
+ asm volatile("lock btrl %2, %0\n" \
+ : "+m" (*(val)), "=@ccc"(__ret) \
+ : "Ir" ((unsigned int)(bit)) \
+ : "cc"); \
+ } else if (sizeof(*(val)) == 2) { \
+ asm volatile("lock btrw %2, %0\n" \
+ : "+m" (*(val)), "=@ccc"(__ret) \
+ : "Ir" ((unsigned short)(bit)) \
+ : "cc"); \
+ } else { \
+ typeof(*(val)) __b_bts = (1UL << (bit)); \
+ __ret = !!(__atomic_fetch_and((val), ~__b_bts, __ATOMIC_SEQ_CST) & __b_bts); \
+ } \
+ __ret; \
+ })
+
+#else // not x86 or !__GCC_ASM_FLAG_OUTPUTS__
+
+#define HA_ATOMIC_BTS(val, bit) \
+ ({ \
+ typeof(*(val)) __b_bts = (1UL << (bit)); \
+ __atomic_fetch_or((val), __b_bts, __ATOMIC_SEQ_CST) & __b_bts; \
+ })
+
+#define HA_ATOMIC_BTR(val, bit) \
+ ({ \
+ typeof(*(val)) __b_btr = (1UL << (bit)); \
+ __atomic_fetch_and((val), ~__b_btr, __ATOMIC_SEQ_CST) & __b_btr; \
+ })
+
+#endif // x86 || __GCC_ASM_FLAG_OUTPUTS__
+
+#define HA_ATOMIC_CAS(val, old, new) __atomic_compare_exchange_n(val, old, new, 0, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)
+
+/* warning, n is a pointer to the double value for dwcas */
+#define HA_ATOMIC_DWCAS(val, o, n) __ha_cas_dw(val, o, n)
+
+#define HA_ATOMIC_UPDATE_MAX(val, new) \
+ ({ \
+ typeof(val) __val = (val); \
+ typeof(*(val)) __old_max = *__val; \
+ typeof(*(val)) __new_max = (new); \
+ \
+ while (__old_max < __new_max && \
+ !HA_ATOMIC_CAS(__val, &__old_max, __new_max) && __ha_cpu_relax()); \
+ *__val; \
+ })
+
+#define HA_ATOMIC_UPDATE_MIN(val, new) \
+ ({ \
+ typeof(val) __val = (val); \
+ typeof(*(val)) __old_min = *__val; \
+ typeof(*(val)) __new_min = (new); \
+ \
+ while (__old_min > __new_min && \
+ !HA_ATOMIC_CAS(__val, &__old_min, __new_min) && __ha_cpu_relax()); \
+ *__val; \
+ })
+
+/* Modern compilers provide variants that don't generate any memory barrier.
+ * If you're unsure how to deal with barriers, just use the HA_ATOMIC_* version,
+ * that will always generate correct code.
+ * Usually it's fine to use those when updating data that have no dependency,
+ * ie updating a counter. Otherwise a barrier is required.
+ */
+
+#define _HA_ATOMIC_LOAD(val) __atomic_load_n(val, __ATOMIC_RELAXED)
+#define _HA_ATOMIC_STORE(val, new) __atomic_store_n(val, new, __ATOMIC_RELAXED)
+#define _HA_ATOMIC_XCHG(val, new) __atomic_exchange_n(val, new, __ATOMIC_RELAXED)
+
+#define _HA_ATOMIC_AND(val, flags) do { __atomic_and_fetch(val, flags, __ATOMIC_RELAXED); } while (0)
+#define _HA_ATOMIC_OR(val, flags) do { __atomic_or_fetch(val, flags, __ATOMIC_RELAXED); } while (0)
+#define _HA_ATOMIC_ADD(val, i) do { __atomic_add_fetch(val, i, __ATOMIC_RELAXED); } while (0)
+#define _HA_ATOMIC_SUB(val, i) do { __atomic_sub_fetch(val, i, __ATOMIC_RELAXED); } while (0)
+#define _HA_ATOMIC_INC(val) do { __atomic_add_fetch(val, 1, __ATOMIC_RELAXED); } while (0)
+#define _HA_ATOMIC_DEC(val) do { __atomic_sub_fetch(val, 1, __ATOMIC_RELAXED); } while (0)
+
+#define _HA_ATOMIC_AND_FETCH(val, flags) __atomic_and_fetch(val, flags, __ATOMIC_RELAXED)
+#define _HA_ATOMIC_OR_FETCH(val, flags) __atomic_or_fetch(val, flags, __ATOMIC_RELAXED)
+#define _HA_ATOMIC_ADD_FETCH(val, i) __atomic_add_fetch(val, i, __ATOMIC_RELAXED)
+#define _HA_ATOMIC_SUB_FETCH(val, i) __atomic_sub_fetch(val, i, __ATOMIC_RELAXED)
+
+#define _HA_ATOMIC_FETCH_AND(val, flags) __atomic_fetch_and(val, flags, __ATOMIC_RELAXED)
+#define _HA_ATOMIC_FETCH_OR(val, flags) __atomic_fetch_or(val, flags, __ATOMIC_RELAXED)
+#define _HA_ATOMIC_FETCH_ADD(val, i) __atomic_fetch_add(val, i, __ATOMIC_RELAXED)
+#define _HA_ATOMIC_FETCH_SUB(val, i) __atomic_fetch_sub(val, i, __ATOMIC_RELAXED)
+
+#if defined(__GCC_ASM_FLAG_OUTPUTS__) && (defined(__i386__) || defined (__x86_64__))
+#define _HA_ATOMIC_BTS(val, bit) \
+ ({ \
+ unsigned char __ret; \
+ if (sizeof(long) == 8 && sizeof(*(val)) == 8) { \
+ asm volatile("lock btsq %2, %0\n" \
+ : "+m" (*(val)), "=@ccc"(__ret) \
+ : "Ir" ((unsigned long)(bit)) \
+ : "cc"); \
+ } else if (sizeof(*(val)) == 4) { \
+ asm volatile("lock btsl %2, %0\n" \
+ : "+m" (*(val)), "=@ccc"(__ret) \
+ : "Ir" ((unsigned int)(bit)) \
+ : "cc"); \
+ } else if (sizeof(*(val)) == 2) { \
+ asm volatile("lock btsw %2, %0\n" \
+ : "+m" (*(val)), "=@ccc"(__ret) \
+ : "Ir" ((unsigned short)(bit)) \
+ : "cc"); \
+ } else { \
+ typeof(*(val)) __b_bts = (1UL << (bit)); \
+ __ret = !!(__atomic_fetch_or((val), __b_bts, __ATOMIC_RELAXED) & __b_bts); \
+ } \
+ __ret; \
+ })
+
+#define _HA_ATOMIC_BTR(val, bit) \
+ ({ \
+ unsigned char __ret; \
+ if (sizeof(long) == 8 && sizeof(*(val)) == 8) { \
+ asm volatile("lock btrq %2, %0\n" \
+ : "+m" (*(val)), "=@ccc"(__ret) \
+ : "Ir" ((unsigned long)(bit)) \
+ : "cc"); \
+ } else if (sizeof(*(val)) == 4) { \
+ asm volatile("lock btrl %2, %0\n" \
+ : "+m" (*(val)), "=@ccc"(__ret) \
+ : "Ir" ((unsigned int)(bit)) \
+ : "cc"); \
+ } else if (sizeof(*(val)) == 2) { \
+ asm volatile("lock btrw %2, %0\n" \
+ : "+m" (*(val)), "=@ccc"(__ret) \
+ : "Ir" ((unsigned short)(bit)) \
+ : "cc"); \
+ } else { \
+ typeof(*(val)) __b_bts = (1UL << (bit)); \
+ __ret = !!(__atomic_fetch_and((val), ~__b_bts, __ATOMIC_RELAXED) & __b_bts); \
+ } \
+ __ret; \
+ })
+
+#else // not x86 or !__GCC_ASM_FLAG_OUTPUTS__
+
+#define _HA_ATOMIC_BTS(val, bit) \
+ ({ \
+ typeof(*(val)) __b_bts = (1UL << (bit)); \
+ __atomic_fetch_or((val), __b_bts, __ATOMIC_RELAXED) & __b_bts; \
+ })
+
+#define _HA_ATOMIC_BTR(val, bit) \
+ ({ \
+ typeof(*(val)) __b_btr = (1UL << (bit)); \
+ __atomic_fetch_and((val), ~__b_btr, __ATOMIC_RELAXED) & __b_btr; \
+ })
+#endif
+
+#define _HA_ATOMIC_CAS(val, old, new) __atomic_compare_exchange_n(val, old, new, 0, __ATOMIC_RELAXED, __ATOMIC_RELAXED)
+/* warning, n is a pointer to the double value for dwcas */
+#define _HA_ATOMIC_DWCAS(val, o, n) __ha_cas_dw(val, o, n)
+
+#endif /* gcc >= 4.7 */
+
+/* Here come a few architecture-specific double-word CAS and barrier
+ * implementations.
+ */
+
+#ifdef __x86_64__
+
+static __inline void
+__ha_barrier_load(void)
+{
+ __asm __volatile("" ::: "memory");
+}
+
+static __inline void
+__ha_barrier_store(void)
+{
+ __asm __volatile("" ::: "memory");
+}
+
+static __inline void
+__ha_barrier_full(void)
+{
+ __asm __volatile("mfence" ::: "memory");
+}
+
+/* Use __ha_barrier_atomic* when you're trying to protect data that are
+ * are modified using _HA_ATOMIC*
+ */
+static __inline void
+__ha_barrier_atomic_load(void)
+{
+ __asm __volatile("" ::: "memory");
+}
+
+static __inline void
+__ha_barrier_atomic_store(void)
+{
+ __asm __volatile("" ::: "memory");
+}
+
+static __inline void
+__ha_barrier_atomic_full(void)
+{
+ __asm __volatile("" ::: "memory");
+}
+
+static __inline int
+__ha_cas_dw(void *target, void *compare, const void *set)
+{
+ char ret;
+
+ __asm __volatile("lock cmpxchg16b %0; setz %3"
+ : "+m" (*(void **)target),
+ "=a" (((void **)compare)[0]),
+ "=d" (((void **)compare)[1]),
+ "=q" (ret)
+ : "a" (((void **)compare)[0]),
+ "d" (((void **)compare)[1]),
+ "b" (((const void **)set)[0]),
+ "c" (((const void **)set)[1])
+ : "memory", "cc");
+ return (ret);
+}
+
+/* short-lived CPU relaxation */
+#define __ha_cpu_relax() ({ asm volatile("rep;nop\n"); 1; })
+
+#elif defined(__arm__) && (defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__))
+
+static __inline void
+__ha_barrier_load(void)
+{
+ __asm __volatile("dmb" ::: "memory");
+}
+
+static __inline void
+__ha_barrier_store(void)
+{
+ __asm __volatile("dsb" ::: "memory");
+}
+
+static __inline void
+__ha_barrier_full(void)
+{
+ __asm __volatile("dmb" ::: "memory");
+}
+
+/* Use __ha_barrier_atomic* when you're trying to protect data that are
+ * are modified using _HA_ATOMIC*
+ */
+static __inline void
+__ha_barrier_atomic_load(void)
+{
+ __asm __volatile("dmb" ::: "memory");
+}
+
+static __inline void
+__ha_barrier_atomic_store(void)
+{
+ __asm __volatile("dsb" ::: "memory");
+}
+
+static __inline void
+__ha_barrier_atomic_full(void)
+{
+ __asm __volatile("dmb" ::: "memory");
+}
+
+static __inline int __ha_cas_dw(void *target, void *compare, const void *set)
+{
+ uint64_t previous;
+ int tmp;
+
+ __asm __volatile("1:"
+ "ldrexd %0, [%4];"
+ "cmp %Q0, %Q2;"
+ "ittt eq;"
+ "cmpeq %R0, %R2;"
+ "strexdeq %1, %3, [%4];"
+ "cmpeq %1, #1;"
+ "beq 1b;"
+ : "=&r" (previous), "=&r" (tmp)
+ : "r" (*(uint64_t *)compare), "r" (*(uint64_t *)set), "r" (target)
+ : "memory", "cc");
+ tmp = (previous == *(uint64_t *)compare);
+ *(uint64_t *)compare = previous;
+ return (tmp);
+}
+
+/* short-lived CPU relaxation */
+#define __ha_cpu_relax() ({ asm volatile(""); 1; })
+
+#elif defined (__aarch64__)
+
+static __inline void
+__ha_barrier_load(void)
+{
+ __asm __volatile("dmb ishld" ::: "memory");
+}
+
+static __inline void
+__ha_barrier_store(void)
+{
+ __asm __volatile("dmb ishst" ::: "memory");
+}
+
+static __inline void
+__ha_barrier_full(void)
+{
+ __asm __volatile("dmb ish" ::: "memory");
+}
+
+/* Use __ha_barrier_atomic* when you're trying to protect data that are
+ * are modified using _HA_ATOMIC*
+ */
+static __inline void
+__ha_barrier_atomic_load(void)
+{
+ __asm __volatile("dmb ishld" ::: "memory");
+}
+
+static __inline void
+__ha_barrier_atomic_store(void)
+{
+ __asm __volatile("dmb ishst" ::: "memory");
+}
+
+static __inline void
+__ha_barrier_atomic_full(void)
+{
+ __asm __volatile("dmb ish" ::: "memory");
+}
+
+/* short-lived CPU relaxation; this was shown to improve fairness on
+ * modern ARMv8 cores such as Neoverse N1.
+ */
+#define __ha_cpu_relax() ({ asm volatile("isb" ::: "memory"); 1; })
+
+#if defined(__ARM_FEATURE_ATOMICS) && !defined(__clang__) // ARMv8.1-A atomics
+
+/* returns 0 on failure, non-zero on success */
+static forceinline int __ha_cas_dw(void *target, void *compare, const void *set)
+{
+ /* There's no status set by the CASP instruction so we need to keep a
+ * copy of the original registers and compare them afterwards to detect
+ * if we could apply the change. In order to pass a pair, we simply map
+ * a register pair on a struct so that the compiler can emit register
+ * pairs that we can use thanks to the undocumented "%H" modifier
+ * mentioned on the link below:
+ * https://patchwork.ozlabs.org/project/gcc/patch/59368A74.2060908@foss.arm.com/
+ */
+ struct pair { uint64_t r[2]; };
+ register struct pair bck = *(struct pair *)compare;
+ register struct pair cmp asm("x0") = bck;
+ register struct pair new asm("x2") = *(const struct pair*)set;
+ int ret;
+
+ __asm__ __volatile__("casp %0, %H0, %2, %H2, [%1]\n"
+ : "+r" (cmp) // %0
+ : "r" (target), // %1
+ "r" (new) // %2
+ : "memory");
+
+ /* if the old value is still the same unchanged, we won, otherwise we
+ * store the refreshed old value.
+ */
+ ret = cmp.r[0] == bck.r[0] && cmp.r[1] == bck.r[1];
+ if (unlikely(!ret)) {
+ /* update the old value on failure. Note that in this case the
+ * caller will likely relax and jump backwards so we don't care
+ * about this cost provided that it doesn't enlarge the fast
+ * code path.
+ */
+ *(struct pair *)compare = cmp;
+ }
+ return ret;
+}
+
+#elif defined(__SIZEOF_INT128__) && defined(_ARM_FEATURE_ATOMICS) // 128-bit and ARMv8.1-A will work
+
+/* According to https://gcc.gnu.org/onlinedocs/gcc/_005f_005fatomic-Builtins.html
+ * we can use atomics on __int128. The availability of CAS is defined there:
+ * https://gcc.gnu.org/onlinedocs/cpp/Common-Predefined-Macros.html
+ * However these usually involve a function call which can be expensive for some
+ * cases, but gcc 10.2 and above can reroute the function call to either LL/SC for
+ * v8.0 or LSE for v8.1+, which allows to use a more scalable version on v8.1+ at
+ * the extra cost of a function call.
+ */
+
+/* returns 0 on failure, non-zero on success */
+static __inline int __ha_cas_dw(void *target, void *compare, const void *set)
+{
+ return __atomic_compare_exchange_n((__int128*)target, (__int128*)compare, *(const __int128*)set,
+ 0, __ATOMIC_RELAXED, __ATOMIC_RELAXED);
+}
+
+#else // neither ARMv8.1-A atomics nor 128-bit atomics
+
+/* returns 0 on failure, non-zero on success */
+static __inline int __ha_cas_dw(void *target, void *compare, void *set)
+{
+ void *value[2];
+ uint64_t tmp1, tmp2;
+
+ __asm__ __volatile__("1:"
+ "ldxp %0, %1, [%4]\n"
+ "mov %2, %0\n"
+ "mov %3, %1\n"
+ "eor %0, %0, %5\n"
+ "eor %1, %1, %6\n"
+ "orr %1, %0, %1\n"
+ "mov %w0, #0\n"
+ "cbnz %1, 2f\n"
+ "stxp %w0, %7, %8, [%4]\n"
+ "cbnz %w0, 1b\n"
+ "mov %w0, #1\n"
+ "2:"
+ : "=&r" (tmp1), "=&r" (tmp2), "=&r" (value[0]), "=&r" (value[1])
+ : "r" (target), "r" (((void **)(compare))[0]), "r" (((void **)(compare))[1]), "r" (((void **)(set))[0]), "r" (((void **)(set))[1])
+ : "cc", "memory");
+
+ ((void **)(compare))[0] = value[0];
+ ((void **)(compare))[1] = value[1];
+ return (tmp1);
+}
+#endif // ARMv8.1-A atomics
+
+#else /* unknown / unhandled architecture, fall back to generic barriers */
+
+#define __ha_barrier_atomic_load __sync_synchronize
+#define __ha_barrier_atomic_store __sync_synchronize
+#define __ha_barrier_atomic_full __sync_synchronize
+#define __ha_barrier_load __sync_synchronize
+#define __ha_barrier_store __sync_synchronize
+#define __ha_barrier_full __sync_synchronize
+/* Note: there is no generic DWCAS */
+
+/* short-lived CPU relaxation */
+#define __ha_cpu_relax() ({ asm volatile(""); 1; })
+
+#endif /* end of arch-specific barrier/dwcas */
+
+static inline void __ha_compiler_barrier(void)
+{
+ __asm __volatile("" ::: "memory");
+}
+
+#endif /* USE_THREAD */
+
+
+/* fallbacks to remap all undefined _HA_ATOMIC_* on to their safe equivalent */
+#ifndef _HA_ATOMIC_BTR
+#define _HA_ATOMIC_BTR HA_ATOMIC_BTR
+#endif /* !_HA_ATOMIC_BTR */
+
+#ifndef _HA_ATOMIC_BTS
+#define _HA_ATOMIC_BTS HA_ATOMIC_BTS
+#endif /* !_HA_ATOMIC_BTS */
+
+#ifndef _HA_ATOMIC_CAS
+#define _HA_ATOMIC_CAS HA_ATOMIC_CAS
+#endif /* !_HA_ATOMIC_CAS */
+
+#ifndef _HA_ATOMIC_DWCAS
+#define _HA_ATOMIC_DWCAS HA_ATOMIC_DWCAS
+#endif /* !_HA_ATOMIC_CAS */
+
+#ifndef _HA_ATOMIC_ADD
+#define _HA_ATOMIC_ADD HA_ATOMIC_ADD
+#endif /* !_HA_ATOMIC_ADD */
+
+#ifndef _HA_ATOMIC_ADD_FETCH
+#define _HA_ATOMIC_ADD_FETCH HA_ATOMIC_ADD_FETCH
+#endif /* !_HA_ATOMIC_ADD_FETCH */
+
+#ifndef _HA_ATOMIC_FETCH_ADD
+#define _HA_ATOMIC_FETCH_ADD HA_ATOMIC_FETCH_ADD
+#endif /* !_HA_ATOMIC_FETCH_ADD */
+
+#ifndef _HA_ATOMIC_SUB
+#define _HA_ATOMIC_SUB HA_ATOMIC_SUB
+#endif /* !_HA_ATOMIC_SUB */
+
+#ifndef _HA_ATOMIC_SUB_FETCH
+#define _HA_ATOMIC_SUB_FETCH HA_ATOMIC_SUB_FETCH
+#endif /* !_HA_ATOMIC_SUB_FETCH */
+
+#ifndef _HA_ATOMIC_FETCH_SUB
+#define _HA_ATOMIC_FETCH_SUB HA_ATOMIC_FETCH_SUB
+#endif /* !_HA_ATOMIC_FETCH_SUB */
+
+#ifndef _HA_ATOMIC_INC
+#define _HA_ATOMIC_INC HA_ATOMIC_INC
+#endif /* !_HA_ATOMIC_INC */
+
+#ifndef _HA_ATOMIC_DEC
+#define _HA_ATOMIC_DEC HA_ATOMIC_DEC
+#endif /* !_HA_ATOMIC_DEC */
+
+#ifndef _HA_ATOMIC_AND
+#define _HA_ATOMIC_AND HA_ATOMIC_AND
+#endif /* !_HA_ATOMIC_AND */
+
+#ifndef _HA_ATOMIC_AND_FETCH
+#define _HA_ATOMIC_AND_FETCH HA_ATOMIC_AND_FETCH
+#endif /* !_HA_ATOMIC_AND_FETCH */
+
+#ifndef _HA_ATOMIC_FETCH_AND
+#define _HA_ATOMIC_FETCH_AND HA_ATOMIC_FETCH_AND
+#endif /* !_HA_ATOMIC_FETCH_AND */
+
+#ifndef _HA_ATOMIC_OR
+#define _HA_ATOMIC_OR HA_ATOMIC_OR
+#endif /* !_HA_ATOMIC_OR */
+
+#ifndef _HA_ATOMIC_OR_FETCH
+#define _HA_ATOMIC_OR_FETCH HA_ATOMIC_OR_FETCH
+#endif /* !_HA_ATOMIC_OR_FETCH */
+
+#ifndef _HA_ATOMIC_FETCH_OR
+#define _HA_ATOMIC_FETCH_OR HA_ATOMIC_FETCH_OR
+#endif /* !_HA_ATOMIC_FETCH_OR */
+
+#ifndef _HA_ATOMIC_XCHG
+#define _HA_ATOMIC_XCHG HA_ATOMIC_XCHG
+#endif /* !_HA_ATOMIC_XCHG */
+
+#ifndef _HA_ATOMIC_STORE
+#define _HA_ATOMIC_STORE HA_ATOMIC_STORE
+#endif /* !_HA_ATOMIC_STORE */
+
+#ifndef _HA_ATOMIC_LOAD
+#define _HA_ATOMIC_LOAD HA_ATOMIC_LOAD
+#endif /* !_HA_ATOMIC_LOAD */
+
+#endif /* _HAPROXY_ATOMIC_H */
diff --git a/include/haproxy/auth-t.h b/include/haproxy/auth-t.h
new file mode 100644
index 0000000..35a1ff6
--- /dev/null
+++ b/include/haproxy/auth-t.h
@@ -0,0 +1,57 @@
+/*
+ * include/haproxy/auth-t.h
+ * Types definitions for user authentication & authorization.
+ *
+ * Copyright 2010 Krzysztof Piotr Oledzki <ole@ans.pl>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#ifndef _HAPROXY_AUTH_T_H
+#define _HAPROXY_AUTH_T_H
+
+#include <haproxy/api-t.h>
+
+#define AU_O_INSECURE 0x00000001 /* insecure, unencrypted password */
+
+struct auth_groups {
+ struct auth_groups *next;
+ char *name;
+ char *groupusers; /* Just used during the configuration parsing. */
+};
+
+struct auth_groups_list {
+ struct auth_groups_list *next;
+ struct auth_groups *group;
+};
+
+struct auth_users {
+ struct auth_users *next;
+ unsigned int flags;
+ char *user, *pass;
+ union {
+ char *groups_names; /* Just used during the configuration parsing. */
+ struct auth_groups_list *groups;
+ } u;
+};
+
+struct userlist {
+ struct userlist *next;
+ char *name;
+ struct auth_users *users;
+ struct auth_groups *groups;
+};
+
+#endif /* _HAPROXY_AUTH_T_H */
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
+
diff --git a/include/haproxy/auth.h b/include/haproxy/auth.h
new file mode 100644
index 0000000..2fe2b35
--- /dev/null
+++ b/include/haproxy/auth.h
@@ -0,0 +1,40 @@
+/*
+ * include/haproxy/auth.h
+ * Functions for user authentication & authorization.
+ *
+ * Copyright 2010 Krzysztof Piotr Oledzki <ole@ans.pl>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#ifndef _HAPROXY_AUTH_H
+#define _HAPROXY_AUTH_H
+
+#include <haproxy/api.h>
+#include <haproxy/auth-t.h>
+#include <haproxy/pattern-t.h>
+#include <haproxy/sample-t.h>
+
+extern struct userlist *userlist;
+
+struct userlist *auth_find_userlist(char *name);
+unsigned int auth_resolve_groups(struct userlist *l, char *groups);
+int userlist_postinit();
+void userlist_free(struct userlist *ul);
+struct pattern *pat_match_auth(struct sample *smp, struct pattern_expr *expr, int fill);
+int check_user(struct userlist *ul, const char *user, const char *pass);
+int check_group(struct userlist *ul, char *name);
+
+#endif /* _HAPROXY_AUTH_H */
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
+
diff --git a/include/haproxy/backend-t.h b/include/haproxy/backend-t.h
new file mode 100644
index 0000000..02a2cc5
--- /dev/null
+++ b/include/haproxy/backend-t.h
@@ -0,0 +1,191 @@
+/*
+ * include/haproxy/backend-t.h
+ * This file assembles definitions for backends
+ *
+ * Copyright (C) 2000-2012 Willy Tarreau - w@1wt.eu
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_BACKEND_T_H
+#define _HAPROXY_BACKEND_T_H
+
+#include <haproxy/api-t.h>
+#include <haproxy/lb_chash-t.h>
+#include <haproxy/lb_fas-t.h>
+#include <haproxy/lb_fwlc-t.h>
+#include <haproxy/lb_fwrr-t.h>
+#include <haproxy/lb_map-t.h>
+#include <haproxy/server-t.h>
+#include <haproxy/thread-t.h>
+
+/* Parameters for lbprm.algo */
+
+/* Lower bits define the kind of load balancing method, which means the type of
+ * algorithm, and which criterion it is based on. For this reason, those bits
+ * also include information about dependencies, so that the config parser can
+ * detect incompatibilities.
+ */
+
+/* LB parameters are on the lower 8 bits. Depends on the LB kind. */
+
+/* BE_LB_HASH_* is used with BE_LB_KIND_HI */
+#define BE_LB_HASH_SRC 0x00000000 /* hash source IP */
+#define BE_LB_HASH_URI 0x00000001 /* hash HTTP URI */
+#define BE_LB_HASH_PRM 0x00000002 /* hash HTTP URL parameter */
+#define BE_LB_HASH_HDR 0x00000003 /* hash HTTP header value */
+#define BE_LB_HASH_RDP 0x00000004 /* hash RDP cookie value */
+#define BE_LB_HASH_SMP 0x00000005 /* hash a sample expression */
+
+/* BE_LB_RR_* is used with BE_LB_KIND_RR */
+#define BE_LB_RR_DYN 0x00000000 /* dynamic round robin (default) */
+#define BE_LB_RR_STATIC 0x00000001 /* static round robin */
+#define BE_LB_RR_RANDOM 0x00000002 /* random round robin */
+
+/* BE_LB_CB_* is used with BE_LB_KIND_CB */
+#define BE_LB_CB_LC 0x00000000 /* least-connections */
+#define BE_LB_CB_FAS 0x00000001 /* first available server (opposite of leastconn) */
+
+#define BE_LB_PARM 0x000000FF /* mask to get/clear the LB param */
+
+/* Required input(s) */
+#define BE_LB_NEED_NONE 0x00000000 /* no input needed */
+#define BE_LB_NEED_ADDR 0x00000100 /* only source address needed */
+#define BE_LB_NEED_DATA 0x00000200 /* some payload is needed */
+#define BE_LB_NEED_HTTP 0x00000400 /* an HTTP request is needed */
+#define BE_LB_NEED_LOG 0x00000800 /* LOG backend required */
+#define BE_LB_NEED 0x0000FF00 /* mask to get/clear dependencies */
+
+/* Algorithm */
+#define BE_LB_KIND_NONE 0x00000000 /* algorithm not set */
+#define BE_LB_KIND_RR 0x00010000 /* round-robin */
+#define BE_LB_KIND_CB 0x00020000 /* connection-based */
+#define BE_LB_KIND_HI 0x00030000 /* hash of input (see hash inputs above) */
+#define BE_LB_KIND 0x00070000 /* mask to get/clear LB algorithm */
+
+/* All known variants of load balancing algorithms. These can be cleared using
+ * the BE_LB_ALGO mask. For a check, using BE_LB_KIND is preferred.
+ */
+#define BE_LB_ALGO_NONE (BE_LB_KIND_NONE | BE_LB_NEED_NONE) /* not defined */
+#define BE_LB_ALGO_RR (BE_LB_KIND_RR | BE_LB_NEED_NONE) /* round robin */
+#define BE_LB_ALGO_RND (BE_LB_KIND_RR | BE_LB_NEED_NONE | BE_LB_RR_RANDOM) /* random value */
+#define BE_LB_ALGO_LC (BE_LB_KIND_CB | BE_LB_NEED_NONE | BE_LB_CB_LC) /* least connections */
+#define BE_LB_ALGO_FAS (BE_LB_KIND_CB | BE_LB_NEED_NONE | BE_LB_CB_FAS) /* first available server */
+#define BE_LB_ALGO_SRR (BE_LB_KIND_RR | BE_LB_NEED_NONE | BE_LB_RR_STATIC) /* static round robin */
+#define BE_LB_ALGO_SH (BE_LB_KIND_HI | BE_LB_NEED_ADDR | BE_LB_HASH_SRC) /* hash: source IP */
+#define BE_LB_ALGO_UH (BE_LB_KIND_HI | BE_LB_NEED_HTTP | BE_LB_HASH_URI) /* hash: HTTP URI */
+#define BE_LB_ALGO_PH (BE_LB_KIND_HI | BE_LB_NEED_HTTP | BE_LB_HASH_PRM) /* hash: HTTP URL parameter */
+#define BE_LB_ALGO_HH (BE_LB_KIND_HI | BE_LB_NEED_HTTP | BE_LB_HASH_HDR) /* hash: HTTP header value */
+#define BE_LB_ALGO_RCH (BE_LB_KIND_HI | BE_LB_NEED_DATA | BE_LB_HASH_RDP) /* hash: RDP cookie value */
+#define BE_LB_ALGO_SMP (BE_LB_KIND_HI | BE_LB_NEED_DATA | BE_LB_HASH_SMP) /* hash: sample expression */
+#define BE_LB_ALGO_LH (BE_LB_KIND_HI | BE_LB_NEED_LOG | BE_LB_HASH_SMP) /* log hash: sample expression */
+#define BE_LB_ALGO_LS (BE_LB_KIND_CB | BE_LB_NEED_LOG | BE_LB_CB_FAS) /* log sticky */
+#define BE_LB_ALGO (BE_LB_KIND | BE_LB_NEED | BE_LB_PARM ) /* mask to clear algo */
+
+/* Higher bits define how a given criterion is mapped to a server. In fact it
+ * designates the LB function by itself. The dynamic algorithms will also have
+ * the DYN bit set. These flags are automatically set at the end of the parsing.
+ */
+#define BE_LB_LKUP_NONE 0x00000000 /* not defined */
+#define BE_LB_LKUP_MAP 0x00100000 /* static map based lookup */
+#define BE_LB_LKUP_RRTREE 0x00200000 /* FWRR tree lookup */
+#define BE_LB_LKUP_LCTREE 0x00300000 /* FWLC tree lookup */
+#define BE_LB_LKUP_CHTREE 0x00400000 /* consistent hash */
+#define BE_LB_LKUP_FSTREE 0x00500000 /* FAS tree lookup */
+#define BE_LB_LKUP 0x00700000 /* mask to get just the LKUP value */
+
+/* additional properties */
+#define BE_LB_PROP_DYN 0x00800000 /* bit to indicate a dynamic algorithm */
+
+/* hash types */
+#define BE_LB_HASH_MAP 0x00000000 /* map-based hash (default) */
+#define BE_LB_HASH_CONS 0x01000000 /* consistent hashbit to indicate a dynamic algorithm */
+#define BE_LB_HASH_TYPE 0x01000000 /* get/clear hash types */
+
+/* additional modifier on top of the hash function (only avalanche right now) */
+#define BE_LB_HMOD_AVAL 0x02000000 /* avalanche modifier */
+#define BE_LB_HASH_MOD 0x02000000 /* get/clear hash modifier */
+
+/* BE_LB_HFCN_* is the hash function, to be used with BE_LB_HASH_FUNC */
+#define BE_LB_HFCN_SDBM 0x00000000 /* sdbm hash */
+#define BE_LB_HFCN_DJB2 0x04000000 /* djb2 hash */
+#define BE_LB_HFCN_WT6 0x08000000 /* wt6 hash */
+#define BE_LB_HFCN_CRC32 0x0C000000 /* crc32 hash */
+#define BE_LB_HFCN_NONE 0x10000000 /* none - no hash */
+#define BE_LB_HASH_FUNC 0x1C000000 /* get/clear hash function */
+
+
+/* various constants */
+
+/* The scale factor between user weight and effective weight allows smooth
+ * weight modulation even with small weights (eg: 1). It should not be too high
+ * though because it limits the number of servers in FWRR mode in order to
+ * prevent any integer overflow. The max number of servers per backend is
+ * limited to about (2^32-1)/256^2/scale ~= 65535.9999/scale. A scale of 16
+ * looks like a good value, as it allows 4095 servers per backend while leaving
+ * modulation steps of about 6% for servers with the lowest weight (1).
+ */
+#define BE_WEIGHT_SCALE 16
+
+/* LB parameters for all algorithms */
+struct lbprm {
+ union { /* LB parameters depending on the algo type */
+ struct lb_map map;
+ struct lb_fwrr fwrr;
+ struct lb_fwlc fwlc;
+ struct lb_chash chash;
+ struct lb_fas fas;
+ struct {
+ struct server **srv; /* array containing in-use log servers */
+ struct list avail; /* servers available for lb are registered in this list */
+ uint32_t lastid; /* last relative id used */
+ } log; /* used in log-balancing context (PR_MODE_SYSLOG backend) */
+ };
+ uint32_t algo; /* load balancing algorithm and variants: BE_LB_* */
+ int tot_wact, tot_wbck; /* total effective weights of active and backup servers */
+ int tot_weight; /* total effective weight of servers participating to LB */
+ int tot_uweight; /* total user weight of servers participating to LB (for reporting) */
+ int tot_used; /* total number of servers used for LB */
+ int wmult; /* ratio between user weight and effective weight */
+ int wdiv; /* ratio between effective weight and user weight */
+ int hash_balance_factor; /* load balancing factor * 100, 0 if disabled */
+ struct sample_expr *expr; /* sample expression for "balance hash" */
+ char *arg_str; /* name of the URL parameter/header/cookie used for hashing */
+ int arg_len; /* strlen(arg_str), computed only once */
+ int arg_opt1; /* extra option 1 for the LB algo (algo-specific) */
+ int arg_opt2; /* extra option 2 for the LB algo (algo-specific) */
+ int arg_opt3; /* extra option 3 for the LB algo (algo-specific) */
+ __decl_thread(HA_RWLOCK_T lock);
+ struct server *fbck; /* first backup server when !PR_O_USE_ALL_BK, or NULL */
+
+ /* Call backs for some actions. Any of them may be NULL (thus should be ignored).
+ * Those marked "srvlock" will need to be called with the server lock held.
+ * The other ones might take it themselves if needed.
+ */
+ void (*update_server_eweight)(struct server *); /* to be called after eweight change // srvlock */
+ void (*set_server_status_up)(struct server *); /* to be called after status changes to UP // srvlock */
+ void (*set_server_status_down)(struct server *); /* to be called after status changes to DOWN // srvlock */
+ void (*server_take_conn)(struct server *); /* to be called when connection is assigned */
+ void (*server_drop_conn)(struct server *); /* to be called when connection is dropped */
+};
+
+#endif /* _HAPROXY_BACKEND_T_H */
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/include/haproxy/backend.h b/include/haproxy/backend.h
new file mode 100644
index 0000000..4ab9170
--- /dev/null
+++ b/include/haproxy/backend.h
@@ -0,0 +1,158 @@
+/*
+ * include/haproxy/backend.h
+ * Functions prototypes for the backend.
+ *
+ * Copyright (C) 2000-2012 Willy Tarreau - w@1wt.eu
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_BACKEND_H
+#define _HAPROXY_BACKEND_H
+
+#include <haproxy/api.h>
+#include <haproxy/backend-t.h>
+#include <haproxy/clock.h>
+#include <haproxy/proxy-t.h>
+#include <haproxy/server-t.h>
+#include <haproxy/stream-t.h>
+#include <haproxy/time.h>
+
+int assign_server(struct stream *s);
+int assign_server_address(struct stream *s);
+int assign_server_and_queue(struct stream *s);
+int alloc_bind_address(struct sockaddr_storage **ss,
+ struct server *srv, struct proxy *be,
+ struct stream *s);
+int srv_redispatch_connect(struct stream *t);
+void back_try_conn_req(struct stream *s);
+void back_handle_st_req(struct stream *s);
+void back_handle_st_con(struct stream *s);
+void back_handle_st_rdy(struct stream *s);
+void back_handle_st_cer(struct stream *s);
+
+const char *backend_lb_algo_str(int algo);
+int backend_parse_balance(const char **args, char **err, struct proxy *curproxy);
+int tcp_persist_rdp_cookie(struct stream *s, struct channel *req, int an_bit);
+
+int be_downtime(struct proxy *px);
+void recount_servers(struct proxy *px);
+void update_backend_weight(struct proxy *px);
+int be_lastsession(const struct proxy *be);
+
+/* Returns number of usable servers in backend */
+static inline int be_usable_srv(struct proxy *be)
+{
+ if (be->flags & PR_FL_DISABLED)
+ return 0;
+ else if (be->srv_act)
+ return be->srv_act;
+ else if (be->lbprm.fbck)
+ return 1;
+ else
+ return be->srv_bck;
+}
+
+/* set the time of last session on the backend */
+static inline void be_set_sess_last(struct proxy *be)
+{
+ be->be_counters.last_sess = ns_to_sec(now_ns);
+}
+
+/* This function returns non-zero if the designated server will be
+ * usable for LB according to pending weight and state.
+ * Otherwise it returns 0.
+ */
+static inline int srv_willbe_usable(const struct server *srv)
+{
+ enum srv_state state = srv->next_state;
+
+ if (!srv->next_eweight)
+ return 0;
+ if (srv->next_admin & SRV_ADMF_MAINT)
+ return 0;
+ if (srv->next_admin & SRV_ADMF_DRAIN)
+ return 0;
+ switch (state) {
+ case SRV_ST_STARTING:
+ case SRV_ST_RUNNING:
+ return 1;
+ case SRV_ST_STOPPING:
+ case SRV_ST_STOPPED:
+ return 0;
+ }
+ return 0;
+}
+
+/* This function returns non-zero if the designated server was usable for LB
+ * according to its current weight and state. Otherwise it returns 0.
+ */
+static inline int srv_currently_usable(const struct server *srv)
+{
+ enum srv_state state = srv->cur_state;
+
+ if (!srv->cur_eweight)
+ return 0;
+ if (srv->cur_admin & SRV_ADMF_MAINT)
+ return 0;
+ if (srv->cur_admin & SRV_ADMF_DRAIN)
+ return 0;
+ switch (state) {
+ case SRV_ST_STARTING:
+ case SRV_ST_RUNNING:
+ return 1;
+ case SRV_ST_STOPPING:
+ case SRV_ST_STOPPED:
+ return 0;
+ }
+ return 0;
+}
+
+/* This function commits the next server state and weight onto the current
+ * ones in order to detect future changes. The server's lock is expected to
+ * be held when calling this function.
+ */
+static inline void srv_lb_commit_status(struct server *srv)
+{
+ srv->cur_state = srv->next_state;
+ srv->cur_admin = srv->next_admin;
+ srv->cur_eweight = srv->next_eweight;
+}
+
+/* This function returns true when a server has experienced a change since last
+ * commit on its state or weight, otherwise zero.
+ */
+static inline int srv_lb_status_changed(const struct server *srv)
+{
+ return (srv->next_state != srv->cur_state ||
+ srv->next_admin != srv->cur_admin ||
+ srv->next_eweight != srv->cur_eweight);
+}
+
+/* sends a log message when a backend goes down, and also sets last
+ * change date.
+ */
+void set_backend_down(struct proxy *be);
+
+unsigned int gen_hash(const struct proxy* px, const char* key, unsigned long len);
+
+#endif /* _HAPROXY_BACKEND_H */
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/include/haproxy/base64.h b/include/haproxy/base64.h
new file mode 100644
index 0000000..ace6063
--- /dev/null
+++ b/include/haproxy/base64.h
@@ -0,0 +1,28 @@
+/*
+ * include/haproxy/base64.h
+ * Ascii to Base64 conversion as described in RFC1421.
+ *
+ * Copyright 2006-2020 Willy Tarreau <w@1wt.eu>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#ifndef _HAPROXY_BASE64_H
+#define _HAPROXY_BASE64_H
+
+#include <haproxy/api.h>
+
+int a2base64(char *in, int ilen, char *out, int olen);
+int a2base64url(const char *in, size_t ilen, char *out, size_t olen);
+int base64dec(const char *in, size_t ilen, char *out, size_t olen);
+int base64urldec(const char *in, size_t ilen, char *out, size_t olen);
+const char *s30tob64(int in, char *out);
+int b64tos30(const char *in);
+
+extern const char base64tab[];
+
+#endif /* _HAPROXY_BASE64_H */
diff --git a/include/haproxy/buf-t.h b/include/haproxy/buf-t.h
new file mode 100644
index 0000000..3c0f8b5
--- /dev/null
+++ b/include/haproxy/buf-t.h
@@ -0,0 +1,62 @@
+/*
+ * include/haproxy/buf-t.h
+ * Simple buffer handling - types definitions.
+ *
+ * Copyright (C) 2000-2020 Willy Tarreau - w@1wt.eu
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _HAPROXY_BUF_T_H
+#define _HAPROXY_BUF_T_H
+
+#include <haproxy/api-t.h>
+
+/* Structure defining a buffer's head */
+struct buffer {
+ size_t size; /* buffer size in bytes */
+ char *area; /* points to <size> bytes */
+ size_t data; /* amount of data after head including wrapping */
+ size_t head; /* start offset of remaining data relative to area */
+};
+
+/* A buffer may be in 3 different states :
+ * - unallocated : size == 0, area == 0 (b_is_null() is true)
+ * - waiting : size == 0, area != 0 (b_is_null() is true)
+ * - allocated : size > 0, area > 0 (b_is_null() is false)
+ */
+
+/* initializers for certain buffer states. It is important that the NULL buffer
+ * remains the one with all fields initialized to zero so that a calloc() or a
+ * memset() on a struct automatically sets a NULL buffer.
+ */
+#define BUF_NULL ((struct buffer){ })
+#define BUF_WANTED ((struct buffer){ .area = (char *)1 })
+#define BUF_RING ((struct buffer){ .area = (char *)2 })
+
+#endif /* _HAPROXY_BUF_T_H */
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/include/haproxy/buf.h b/include/haproxy/buf.h
new file mode 100644
index 0000000..e98161e
--- /dev/null
+++ b/include/haproxy/buf.h
@@ -0,0 +1,1161 @@
+/*
+ * include/haproxy/buf.h
+ * Simple buffer handling - functions definitions.
+ *
+ * Copyright (C) 2000-2020 Willy Tarreau - w@1wt.eu
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _HAPROXY_BUF_H
+#define _HAPROXY_BUF_H
+
+#include <sys/types.h>
+#include <string.h>
+#include <haproxy/api.h>
+#include <haproxy/buf-t.h>
+
+/***************************************************************************/
+/* Functions used to compute offsets and pointers. Most of them exist in */
+/* both wrapping-safe and unchecked ("__" prefix) variants. Some returning */
+/* a pointer are also provided with an "_ofs" suffix when they return an */
+/* offset relative to the storage area. */
+/***************************************************************************/
+
+/* b_is_null() : returns true if (and only if) the buffer is not yet allocated
+ * and thus has an empty size. Its pointer may then be anything, including NULL
+ * (unallocated) or an invalid pointer such as (char*)1 (allocation pending).
+ */
+static inline int b_is_null(const struct buffer *buf)
+{
+ return buf->size == 0;
+}
+
+/* b_orig() : returns the pointer to the origin of the storage, which is the
+ * location of byte at offset zero. This is mostly used by functions which
+ * handle the wrapping by themselves.
+ */
+static inline char *b_orig(const struct buffer *b)
+{
+ return b->area;
+}
+
+/* b_size() : returns the size of the buffer. */
+static inline size_t b_size(const struct buffer *b)
+{
+ return b->size;
+}
+
+/* b_wrap() : returns the pointer to the wrapping position of the buffer area,
+ * which is by definition the first byte not part of the buffer.
+ */
+static inline char *b_wrap(const struct buffer *b)
+{
+ return b->area + b->size;
+}
+
+/* b_data() : returns the number of bytes present in the buffer. */
+static inline size_t b_data(const struct buffer *b)
+{
+ return b->data;
+}
+
+/* b_room() : returns the amount of room left in the buffer */
+static inline size_t b_room(const struct buffer *b)
+{
+ BUG_ON_HOT(b->data > b->size);
+ return b->size - b_data(b);
+}
+
+/* b_full() : returns true if the buffer is full. */
+static inline size_t b_full(const struct buffer *b)
+{
+ return !b_room(b);
+}
+
+
+/* b_stop() : returns the pointer to the byte following the end of the buffer,
+ * which may be out of the buffer if the buffer ends on the last byte of the
+ * area.
+ */
+static inline size_t __b_stop_ofs(const struct buffer *b)
+{
+ return b->head + b->data;
+}
+
+static inline const char *__b_stop(const struct buffer *b)
+{
+ return b_orig(b) + __b_stop_ofs(b);
+}
+
+static inline size_t b_stop_ofs(const struct buffer *b)
+{
+ size_t stop = __b_stop_ofs(b);
+
+ if (stop > b->size)
+ stop -= b->size;
+ return stop;
+}
+
+static inline const char *b_stop(const struct buffer *b)
+{
+ return b_orig(b) + b_stop_ofs(b);
+}
+
+
+/* b_peek() : returns a pointer to the data at position <ofs> relative to the
+ * head of the buffer. Will typically point to input data if called with the
+ * amount of output data. The wrapped versions will only support wrapping once
+ * before the beginning or after the end.
+ */
+static inline size_t __b_peek_ofs(const struct buffer *b, size_t ofs)
+{
+ return b->head + ofs;
+}
+
+static inline char *__b_peek(const struct buffer *b, size_t ofs)
+{
+ return b_orig(b) + __b_peek_ofs(b, ofs);
+}
+
+static inline size_t b_peek_ofs(const struct buffer *b, size_t ofs)
+{
+ size_t ret = __b_peek_ofs(b, ofs);
+
+ if (ret >= b->size)
+ ret -= b->size;
+
+ return ret;
+}
+
+static inline char *b_peek(const struct buffer *b, size_t ofs)
+{
+ return b_orig(b) + b_peek_ofs(b, ofs);
+}
+
+
+/* b_head() : returns the pointer to the buffer's head, which is the location
+ * of the next byte to be dequeued. Note that for buffers of size zero, the
+ * returned pointer may be outside of the buffer or even invalid.
+ */
+static inline size_t __b_head_ofs(const struct buffer *b)
+{
+ return b->head;
+}
+
+static inline char *__b_head(const struct buffer *b)
+{
+ return b_orig(b) + __b_head_ofs(b);
+}
+
+static inline size_t b_head_ofs(const struct buffer *b)
+{
+ return __b_head_ofs(b);
+}
+
+static inline char *b_head(const struct buffer *b)
+{
+ return __b_head(b);
+}
+
+
+/* b_tail() : returns the pointer to the tail of the buffer, which is the
+ * location of the first byte where it is possible to enqueue new data. Note
+ * that for buffers of size zero, the returned pointer may be outside of the
+ * buffer or even invalid.
+ */
+static inline size_t __b_tail_ofs(const struct buffer *b)
+{
+ return __b_peek_ofs(b, b_data(b));
+}
+
+static inline char *__b_tail(const struct buffer *b)
+{
+ return __b_peek(b, b_data(b));
+}
+
+static inline size_t b_tail_ofs(const struct buffer *b)
+{
+ return b_peek_ofs(b, b_data(b));
+}
+
+static inline char *b_tail(const struct buffer *b)
+{
+ return b_peek(b, b_data(b));
+}
+
+
+/* b_next() : for an absolute pointer <p> or a relative offset <o> pointing to
+ * a valid location within buffer <b>, returns either the absolute pointer or
+ * the relative offset pointing to the next byte, which usually is at (p + 1)
+ * unless p reaches the wrapping point and wrapping is needed.
+ */
+static inline size_t b_next_ofs(const struct buffer *b, size_t o)
+{
+ o++;
+ BUG_ON_HOT(o > b->size);
+ if (o == b->size)
+ o = 0;
+ return o;
+}
+
+static inline char *b_next(const struct buffer *b, const char *p)
+{
+ p++;
+ BUG_ON_HOT(p > b_wrap(b));
+ if (p == b_wrap(b))
+ p = b_orig(b);
+ return (char *)p;
+}
+
+/* b_dist() : returns the distance between two pointers, taking into account
+ * the ability to wrap around the buffer's end. The operation is not defined if
+ * either of the pointers does not belong to the buffer or if their distance is
+ * greater than the buffer's size.
+ */
+static inline size_t b_dist(const struct buffer *b, const char *from, const char *to)
+{
+ ssize_t dist = to - from;
+
+ BUG_ON_HOT((dist > 0 && dist > b_size(b)) || (dist < 0 && -dist > b_size(b)));
+ dist += dist < 0 ? b_size(b) : 0;
+ return dist;
+}
+
+/* b_almost_full() : returns 1 if the buffer uses at least 3/4 of its capacity,
+ * otherwise zero. Buffers of size zero are considered full.
+ */
+static inline int b_almost_full(const struct buffer *b)
+{
+ BUG_ON_HOT(b->data > b->size);
+ return b_data(b) >= b_size(b) * 3 / 4;
+}
+
+/* b_space_wraps() : returns non-zero only if the buffer's free space wraps :
+ * [ |xxxx| ] => yes
+ * [xxxx| ] => no
+ * [ |xxxx] => no
+ * [xxxx| |xxxx] => no
+ * [xxxxxxxxxx|xxxxxxxxxxx] => no
+ *
+ * So the only case where the buffer does not wrap is when there's data either
+ * at the beginning or at the end of the buffer. Thus we have this :
+ * - if (head <= 0) ==> doesn't wrap
+ * - if (tail >= size) ==> doesn't wrap
+ * - otherwise wraps
+ */
+static inline int b_space_wraps(const struct buffer *b)
+{
+ BUG_ON_HOT(b->data > b->size);
+ if ((ssize_t)__b_head_ofs(b) <= 0)
+ return 0;
+ if (__b_tail_ofs(b) >= b_size(b))
+ return 0;
+ return 1;
+}
+
+/* b_contig_data() : returns the amount of data that can contiguously be read
+ * at once starting from a relative offset <start> (which allows to easily
+ * pre-compute blocks for memcpy). The start point will typically contain the
+ * amount of past data already returned by a previous call to this function.
+ */
+static inline size_t b_contig_data(const struct buffer *b, size_t start)
+{
+ size_t data = b_wrap(b) - b_peek(b, start);
+ size_t limit = b_data(b) - start;
+
+ if (data > limit)
+ data = limit;
+ return data;
+}
+
+/* b_contig_space() : returns the amount of bytes that can be appended to the
+ * buffer at once. We have 8 possible cases :
+ *
+ * [____________________] return size
+ * [______|_____________] return size - tail_ofs
+ * [XXXXXX|_____________] return size - tail_ofs
+ * [___|XXXXXX|_________] return size - tail_ofs
+ * [______________XXXXXX] return head_ofs
+ * [XXXX|___________|XXX] return head_ofs - tail_ofs
+ * [XXXXXXXXXX|XXXXXXXXX] return 0
+ * [XXXXXXXXXXXXXXXXXXXX] return 0
+ */
+static inline size_t b_contig_space(const struct buffer *b)
+{
+ size_t left, right;
+
+ BUG_ON_HOT(b->data > b->size);
+
+ right = b_head_ofs(b);
+ left = right + b_data(b);
+
+ left = b_size(b) - left;
+ if ((ssize_t)left <= 0)
+ left += right;
+ return left;
+}
+
+/* b_getblk() : gets one full block of data at once from a buffer, starting
+ * from offset <offset> after the buffer's head, and limited to no more than
+ * <len> bytes. The caller is responsible for ensuring that neither <offset>
+ * nor <offset>+<len> exceed the total number of bytes available in the buffer.
+ * Return values :
+ * >0 : number of bytes read, equal to requested size.
+ * =0 : not enough data available. <blk> is left undefined.
+ * The buffer is left unaffected.
+ */
+static inline size_t b_getblk(const struct buffer *buf, char *blk, size_t len, size_t offset)
+{
+ size_t firstblock;
+
+ BUG_ON(buf->data > buf->size);
+ BUG_ON(offset > buf->data);
+ BUG_ON(offset + len > buf->data);
+
+ if (len + offset > b_data(buf))
+ return 0;
+
+ firstblock = b_wrap(buf) - b_head(buf);
+ if (firstblock > offset) {
+ if (firstblock >= len + offset) {
+ memcpy(blk, b_head(buf) + offset, len);
+ return len;
+ }
+
+ memcpy(blk, b_head(buf) + offset, firstblock - offset);
+ memcpy(blk + firstblock - offset, b_orig(buf), len - firstblock + offset);
+ return len;
+ }
+
+ memcpy(blk, b_orig(buf) + offset - firstblock, len);
+ return len;
+}
+
+/* b_getblk_nc() : gets one or two blocks of data at once from a buffer,
+ * starting from offset <ofs> after the beginning of its output, and limited to
+ * no more than <max> bytes. The caller is responsible for ensuring that
+ * neither <ofs> nor <ofs>+<max> exceed the total number of bytes available in
+ * the buffer. Return values :
+ * >0 : number of blocks filled (1 or 2). blk1 is always filled before blk2.
+ * =0 : not enough data available. <blk*> are left undefined.
+ * The buffer is left unaffected. Unused buffers are left in an undefined state.
+ */
+static inline size_t b_getblk_nc(const struct buffer *buf, const char **blk1, size_t *len1, const char **blk2, size_t *len2, size_t ofs, size_t max)
+{
+ size_t l1;
+
+ BUG_ON_HOT(buf->data > buf->size);
+ BUG_ON_HOT(ofs > buf->data);
+ BUG_ON_HOT(ofs + max > buf->data);
+
+ if (!max)
+ return 0;
+
+ *blk1 = b_peek(buf, ofs);
+ l1 = b_wrap(buf) - *blk1;
+ if (l1 < max) {
+ *len1 = l1;
+ *len2 = max - l1;
+ *blk2 = b_orig(buf);
+ return 2;
+ }
+ *len1 = max;
+ return 1;
+}
+
+
+/*********************************************/
+/* Functions used to modify the buffer state */
+/*********************************************/
+
+/* b_reset() : resets a buffer. The size is not touched. */
+static inline void b_reset(struct buffer *b)
+{
+ b->head = 0;
+ b->data = 0;
+}
+
+/* b_make() : make a buffer from all parameters */
+static inline struct buffer b_make(char *area, size_t size, size_t head, size_t data)
+{
+ struct buffer b;
+
+ b.area = area;
+ b.size = size;
+ b.head = head;
+ b.data = data;
+ return b;
+}
+
+/* b_sub() : decreases the buffer length by <count> */
+static inline void b_sub(struct buffer *b, size_t count)
+{
+ BUG_ON_HOT(b->data < count);
+ b->data -= count;
+}
+
+/* b_add() : increase the buffer length by <count> */
+static inline void b_add(struct buffer *b, size_t count)
+{
+ BUG_ON_HOT(b->data + count > b->size);
+ b->data += count;
+}
+
+/* b_set_data() : sets the buffer's length */
+static inline void b_set_data(struct buffer *b, size_t len)
+{
+ BUG_ON_HOT(len > b->size);
+ b->data = len;
+}
+
+/* b_del() : skips <del> bytes in a buffer <b>. Covers both the output and the
+ * input parts so it's up to the caller to know where it plays and that <del>
+ * is always smaller than the amount of data in the buffer.
+ */
+static inline void b_del(struct buffer *b, size_t del)
+{
+ BUG_ON_HOT(b->data < del);
+ b->data -= del;
+ b->head += del;
+ if (b->head >= b->size)
+ b->head -= b->size;
+}
+
+/* b_realign_if_empty() : realigns a buffer if it's empty */
+static inline void b_realign_if_empty(struct buffer *b)
+{
+ if (!b_data(b))
+ b->head = 0;
+}
+
+/* b_slow_realign() : this function realigns a possibly wrapping buffer so that
+ * the part remaining to be parsed is contiguous and starts at the beginning of
+ * the buffer and the already parsed output part ends at the end of the buffer.
+ * This provides the best conditions since it allows the largest inputs to be
+ * processed at once and ensures that once the output data leaves, the whole
+ * buffer is available at once. The number of output bytes supposedly present
+ * at the beginning of the buffer and which need to be moved to the end must be
+ * passed in <output>. A temporary swap area at least as large as b->size must
+ * be provided in <swap>. It's up to the caller to ensure <output> is no larger
+ * than the difference between the whole buffer's length and its input.
+ */
+static inline void b_slow_realign(struct buffer *b, char *swap, size_t output)
+{
+ size_t block1 = output;
+ size_t block2 = 0;
+
+ BUG_ON_HOT(b->data > b->size);
+
+ /* process output data in two steps to cover wrapping */
+ if (block1 > b_size(b) - b_head_ofs(b)) {
+ block2 = b_peek_ofs(b, block1);
+ block1 -= block2;
+ }
+ memcpy(swap + b_size(b) - output, b_head(b), block1);
+ memcpy(swap + b_size(b) - block2, b_orig(b), block2);
+
+ /* process input data in two steps to cover wrapping */
+ block1 = b_data(b) - output;
+ block2 = 0;
+
+ if (block1 > b_tail_ofs(b)) {
+ block2 = b_tail_ofs(b);
+ block1 = block1 - block2;
+ }
+ memcpy(swap, b_peek(b, output), block1);
+ memcpy(swap + block1, b_orig(b), block2);
+
+ /* reinject changes into the buffer */
+ memcpy(b_orig(b), swap, b_data(b) - output);
+ memcpy(b_wrap(b) - output, swap + b_size(b) - output, output);
+
+ b->head = (output ? b_size(b) - output : 0);
+}
+
+/* b_slow_realign_ofs() : this function realigns a possibly wrapping buffer
+ * setting its new head at <ofs>. Depending of the <ofs> value, the resulting
+ * buffer may also wrap. A temporary swap area at least as large as b->size must
+ * be provided in <swap>. It's up to the caller to ensuze <ofs> is not larger
+ * than b->size.
+ */
+static inline void b_slow_realign_ofs(struct buffer *b, char *swap, size_t ofs)
+{
+ size_t block1 = b_data(b);
+ size_t block2 = 0;
+
+ BUG_ON_HOT(b->data > b->size);
+ BUG_ON_HOT(ofs > b->size);
+
+ if (__b_tail_ofs(b) >= b_size(b)) {
+ block2 = b_tail_ofs(b);
+ block1 -= block2;
+ }
+ memcpy(swap, b_head(b), block1);
+ memcpy(swap + block1, b_orig(b), block2);
+
+ block1 = b_data(b);
+ block2 = 0;
+ if (block1 > b_size(b) - ofs) {
+ block1 = b_size(b) - ofs;
+ block2 = b_data(b) - block1;
+ }
+ memcpy(b_orig(b) + ofs, swap, block1);
+ memcpy(b_orig(b), swap + block1, block2);
+
+ b->head = ofs;
+}
+
+
+/* b_putchar() : tries to append char <c> at the end of buffer <b>. Supports
+ * wrapping. Data are truncated if buffer is full.
+ */
+static inline void b_putchr(struct buffer *b, char c)
+{
+ if (b_full(b))
+ return;
+ *b_tail(b) = c;
+ b->data++;
+}
+
+/* __b_putblk() : tries to append <len> bytes from block <blk> to the end of
+ * buffer <b> without checking for free space (it's up to the caller to do it).
+ * Supports wrapping. It must not be called with len == 0.
+ */
+static inline void __b_putblk(struct buffer *b, const char *blk, size_t len)
+{
+ size_t half = b_contig_space(b);
+
+ BUG_ON(b_data(b) + len > b_size(b));
+
+ if (half > len)
+ half = len;
+
+ memcpy(b_tail(b), blk, half);
+
+ if (len > half)
+ memcpy(b_peek(b, b_data(b) + half), blk + half, len - half);
+ b->data += len;
+}
+
+/* b_putblk() : tries to append block <blk> at the end of buffer <b>. Supports
+ * wrapping. Data are truncated if buffer is too short. It returns the number
+ * of bytes copied.
+ */
+static inline size_t b_putblk(struct buffer *b, const char *blk, size_t len)
+{
+ if (len > b_room(b))
+ len = b_room(b);
+ if (len)
+ __b_putblk(b, blk, len);
+ return len;
+}
+
+/* b_xfer() : transfers at most <count> bytes from buffer <src> to buffer <dst>
+ * and returns the number of bytes copied. The bytes are removed from <src> and
+ * added to <dst>. The caller is responsible for ensuring that <count> is not
+ * larger than b_room(dst). Whenever possible (if the destination is empty and
+ * at least as much as the source was requested), the buffers are simply
+ * swapped instead of copied.
+ */
+static inline size_t b_xfer(struct buffer *dst, struct buffer *src, size_t count)
+{
+ size_t ret, block1, block2;
+
+ ret = 0;
+ if (!count)
+ goto leave;
+
+ ret = b_data(src);
+ if (!ret)
+ goto leave;
+
+ if (ret > count)
+ ret = count;
+ else if (!b_data(dst)) {
+ /* zero copy is possible by just swapping buffers */
+ struct buffer tmp = *dst;
+ *dst = *src;
+ *src = tmp;
+ goto leave;
+ }
+
+ block1 = b_contig_data(src, 0);
+ if (block1 > ret)
+ block1 = ret;
+ block2 = ret - block1;
+
+ if (block1)
+ __b_putblk(dst, b_head(src), block1);
+
+ if (block2)
+ __b_putblk(dst, b_peek(src, block1), block2);
+
+ b_del(src, ret);
+ leave:
+ return ret;
+}
+
+/* b_ncat() : Copy <count> from <src> buffer at the end of <dst> buffer.
+ * The caller is responsible for ensuring that <count> is not larger than
+ * b_room(dst).
+ * Returns the number of bytes copied.
+ */
+static inline size_t b_ncat(struct buffer *dst, struct buffer *src, size_t count)
+{
+ size_t ret, block1, block2;
+
+ ret = 0;
+ if (!count)
+ goto leave;
+
+ ret = b_data(src);
+ if (!ret)
+ goto leave;
+
+ if (ret > count)
+ ret = count;
+ block1 = b_contig_data(src, 0);
+ if (block1 > ret)
+ block1 = ret;
+ block2 = ret - block1;
+
+ if (block1)
+ __b_putblk(dst, b_head(src), block1);
+
+ if (block2)
+ __b_putblk(dst, b_peek(src, block1), block2);
+
+ leave:
+ return ret;
+}
+
+/* b_force_xfer() : same as b_xfer() but without zero copy.
+ * The caller is responsible for ensuring that <count> is not
+ * larger than b_room(dst).
+ */
+static inline size_t b_force_xfer(struct buffer *dst, struct buffer *src, size_t count)
+{
+ size_t ret;
+
+ ret = b_ncat(dst, src, count);
+ b_del(src, ret);
+
+ return ret;
+}
+
+
+/* Moves <len> bytes from absolute position <src> of buffer <b> by <shift>
+ * bytes, while supporting wrapping of both the source and the destination.
+ * The position is relative to the buffer's origin and may overlap with the
+ * target position. The <shift>'s absolute value must be strictly lower than
+ * the buffer's size. The main purpose is to aggregate data block during
+ * parsing while removing unused delimiters. The buffer's length is not
+ * modified, and the caller must take care of size adjustments and holes by
+ * itself.
+ */
+static inline void b_move(const struct buffer *b, size_t src, size_t len, ssize_t shift)
+{
+ char *orig = b_orig(b);
+ size_t size = b_size(b);
+ size_t dst = src + size + shift;
+ size_t cnt;
+
+ BUG_ON(len > size);
+
+ if (dst >= size)
+ dst -= size;
+
+ if (shift < 0) {
+ BUG_ON(-shift >= size);
+ /* copy from left to right */
+ for (; (cnt = len); len -= cnt) {
+ if (cnt > size - src)
+ cnt = size - src;
+ if (cnt > size - dst)
+ cnt = size - dst;
+
+ memmove(orig + dst, orig + src, cnt);
+ dst += cnt;
+ src += cnt;
+ if (dst >= size)
+ dst -= size;
+ if (src >= size)
+ src -= size;
+ }
+ }
+ else if (shift > 0) {
+ BUG_ON(shift >= size);
+ /* copy from right to left */
+ for (; (cnt = len); len -= cnt) {
+ size_t src_end = src + len;
+ size_t dst_end = dst + len;
+
+ if (dst_end > size)
+ dst_end -= size;
+ if (src_end > size)
+ src_end -= size;
+
+ if (cnt > dst_end)
+ cnt = dst_end;
+ if (cnt > src_end)
+ cnt = src_end;
+
+ memmove(orig + dst_end - cnt, orig + src_end - cnt, cnt);
+ }
+ }
+}
+
+/* b_rep_blk() : writes the block <blk> at position <pos> which must be in
+ * buffer <b>, and moves the part between <end> and the buffer's tail just
+ * after the end of the copy of <blk>. This effectively replaces the part
+ * located between <pos> and <end> with a copy of <blk> of length <len>. The
+ * buffer's length is automatically updated. This is used to replace a block
+ * with another one inside a buffer. The shift value (positive or negative) is
+ * returned. If there's no space left, the move is not done. If <len> is null,
+ * the <blk> pointer is allowed to be null, in order to erase a block.
+ */
+static inline int b_rep_blk(struct buffer *b, char *pos, char *end, const char *blk, size_t len)
+{
+ int delta;
+
+ BUG_ON(pos < b->area || pos >= b->area + b->size);
+
+ delta = len - (end - pos);
+
+ if (__b_tail(b) + delta > b_wrap(b))
+ return 0; /* no space left */
+
+ if (b_data(b) &&
+ b_tail(b) + delta > b_head(b) &&
+ b_head(b) >= b_tail(b))
+ return 0; /* no space left before wrapping data */
+
+ /* first, protect the end of the buffer */
+ memmove(end + delta, end, b_tail(b) - end);
+
+ /* now, copy blk over pos */
+ if (len)
+ memcpy(pos, blk, len);
+
+ b_add(b, delta);
+ b_realign_if_empty(b);
+
+ return delta;
+}
+
+/* b_insert_blk(): inserts the block <blk> at the absolute offset <off> moving
+ * data between this offset and the buffer's tail just after the end of the copy
+ * of <blk>. The buffer's length is automatically updated. It Supports
+ * wrapping. If there are not enough space to perform the copy, 0 is
+ * returned. Otherwise, the number of bytes copied is returned
+*/
+static inline int b_insert_blk(struct buffer *b, size_t off, const char *blk, size_t len)
+{
+ size_t pos;
+
+ if (!len || len > b_room(b))
+ return 0; /* nothing to copy or not enough space left */
+
+ pos = b_peek_ofs(b, off);
+ if (pos == b_tail_ofs(b))
+ __b_putblk(b, blk, len);
+ else {
+ size_t delta = b_data(b) - off;
+
+ /* first, protect the end of the buffer */
+ b_move(b, pos, delta, len);
+
+ /* change the amount of data in the buffer during the copy */
+ b_sub(b, delta);
+ __b_putblk(b, blk, len);
+ b_add(b, delta);
+ }
+ return len;
+}
+
+/* __b_put_varint(): encode 64-bit value <v> as a varint into buffer <b>. The
+ * caller must have checked that the encoded value fits in the buffer so that
+ * there are no length checks. Wrapping is supported. You don't want to use
+ * this function but b_put_varint() instead.
+ */
+static inline void __b_put_varint(struct buffer *b, uint64_t v)
+{
+ size_t data = b->data;
+ size_t size = b_size(b);
+ char *wrap = b_wrap(b);
+ char *tail = b_tail(b);
+
+ BUG_ON_HOT(data >= size);
+
+ if (v >= 0xF0) {
+ /* more than one byte, first write the 4 least significant
+ * bits, then follow with 7 bits per byte.
+ */
+ *tail = v | 0xF0;
+ v = (v - 0xF0) >> 4;
+
+ while (1) {
+ if (++tail == wrap)
+ tail -= size;
+ data++;
+ if (v < 0x80)
+ break;
+ *tail = v | 0x80;
+ v = (v - 0x80) >> 7;
+ }
+ }
+
+ /* last byte */
+ *tail = v;
+ BUG_ON_HOT(data >= size);
+ data++;
+ b->data = data;
+}
+
+/* b_put_varint(): try to encode value <v> as a varint into buffer <b>. Returns
+ * the number of bytes written in case of success, or 0 if there is not enough
+ * room. Wrapping is supported. No partial writes will be performed.
+ */
+static inline int b_put_varint(struct buffer *b, uint64_t v)
+{
+ size_t data = b->data;
+ size_t size = b_size(b);
+ char *wrap = b_wrap(b);
+ char *tail = b_tail(b);
+
+ if (data != size && v >= 0xF0) {
+ BUG_ON_HOT(data > size);
+
+ /* more than one byte, first write the 4 least significant
+ * bits, then follow with 7 bits per byte.
+ */
+ *tail = v | 0xF0;
+ v = (v - 0xF0) >> 4;
+
+ while (1) {
+ if (++tail == wrap)
+ tail -= size;
+ data++;
+ if (data == size || v < 0x80)
+ break;
+ *tail = v | 0x80;
+ v = (v - 0x80) >> 7;
+ }
+ }
+
+ /* last byte */
+ if (data == size)
+ return 0;
+
+ *tail = v;
+ data++;
+
+ size = data - b->data;
+ b->data = data;
+ return size;
+}
+
+/* b_get_varint(): try to decode a varint from buffer <b> into value <vptr>.
+ * Returns the number of bytes read in case of success, or 0 if there were not
+ * enough bytes. Wrapping is supported. No partial reads will be performed.
+ */
+static inline int b_get_varint(struct buffer *b, uint64_t *vptr)
+{
+ const uint8_t *head = (const uint8_t *)b_head(b);
+ const uint8_t *wrap = (const uint8_t *)b_wrap(b);
+ size_t data = b->data;
+ size_t size = b_size(b);
+ uint64_t v = 0;
+ int bits = 0;
+
+ if (data != 0 && (*head >= 0xF0)) {
+ v = *head;
+ bits += 4;
+ while (1) {
+ if (++head == wrap)
+ head -= size;
+ data--;
+ if (!data || !(*head & 0x80))
+ break;
+ v += (uint64_t)*head << bits;
+ bits += 7;
+ }
+ }
+
+ /* last byte */
+ if (!data)
+ return 0;
+
+ v += (uint64_t)*head << bits;
+ *vptr = v;
+ data--;
+ size = b->data - data;
+ b_del(b, size);
+ return size;
+}
+
+/* b_peek_varint(): try to decode a varint from buffer <b> at offset <ofs>
+ * relative to head, into value <vptr>. Returns the number of bytes parsed in
+ * case of success, or 0 if there were not enough bytes, in which case the
+ * contents of <vptr> are not updated. Wrapping is supported. The buffer's head
+ * will NOT be updated. It is illegal to call this function with <ofs> greater
+ * than b->data.
+ */
+static inline int b_peek_varint(struct buffer *b, size_t ofs, uint64_t *vptr)
+{
+ const uint8_t *head = (const uint8_t *)b_peek(b, ofs);
+ const uint8_t *wrap = (const uint8_t *)b_wrap(b);
+ size_t data = b_data(b) - ofs;
+ size_t size = b_size(b);
+ uint64_t v = 0;
+ int bits = 0;
+
+ BUG_ON_HOT(ofs > b_data(b));
+
+ if (data != 0 && (*head >= 0xF0)) {
+ v = *head;
+ bits += 4;
+ while (1) {
+ if (++head == wrap)
+ head -= size;
+ data--;
+ if (!data || !(*head & 0x80))
+ break;
+ v += (uint64_t)*head << bits;
+ bits += 7;
+ }
+ }
+
+ /* last byte */
+ if (!data)
+ return 0;
+
+ v += (uint64_t)*head << bits;
+ *vptr = v;
+ data--;
+ size = b->data - ofs - data;
+ return size;
+}
+
+
+/*
+ * Buffer ring management.
+ *
+ * A buffer ring is a circular list of buffers, with a head buffer (the oldest,
+ * being read from) and a tail (the newest, being written to). Such a ring is
+ * declared as an array of buffers. The first element in the array is the root
+ * and is used differently. It stores the following elements :
+ * - size : number of allocated elements in the array, including the root
+ * - area : magic value BUF_RING (just to help debugging)
+ * - head : position of the head in the array (starts at one)
+ * - data : position of the tail in the array (starts at one).
+ *
+ * Note that contrary to a linear buffer, head and tail may be equal with room
+ * available, since the producer is expected to fill the tail. Also, the tail
+ * might pretty much be equal to BUF_WANTED if an allocation is pending, in
+ * which case it's illegal to try to allocate past this point (only one entry
+ * may be subscribed for allocation). It is illegal to allocate a buffer after
+ * an empty one, so that BUF_NULL is always the last buffer. It is also illegal
+ * to remove elements without freeing the buffers. Buffers between <tail> and
+ * <head> are in an undefined state, but <tail> and <head> are always valid.
+ * A ring may not contain less than 2 elements, since the root is mandatory,
+ * and at least one entry is required to always present a valid buffer.
+ *
+ * Given that buffers are 16- or 32- bytes long, it's convenient to set the
+ * size of the array to 2^N in order to keep (2^N)-1 elements, totalizing
+ * 2^N*16(or 32) bytes. For example on a 64-bit system, a ring of 31 usable
+ * buffers takes 1024 bytes.
+ */
+
+/* Initialization of a ring, the size argument contains the number of allocated
+ * elements, including the root. There must always be at least 2 elements, one
+ * for the root and one for storage.
+ */
+static inline void br_init(struct buffer *r, size_t size)
+{
+ BUG_ON(size < 2);
+
+ r->size = size;
+ r->area = BUF_RING.area;
+ r->head = r->data = 1;
+ r[1] = BUF_NULL;
+}
+
+/* Returns number of elements in the ring, root included */
+static inline unsigned int br_size(const struct buffer *r)
+{
+ BUG_ON_HOT(r->area != BUF_RING.area);
+
+ return r->size;
+}
+
+/* Returns true if no more buffers may be added */
+static inline unsigned int br_full(const struct buffer *r)
+{
+ BUG_ON_HOT(r->area != BUF_RING.area);
+
+ return r->data + 1 == r->head || r->data + 1 == r->head - 1 + r->size;
+}
+
+/* Returns the number of buffers present */
+static inline unsigned int br_count(const struct buffer *r)
+{
+ BUG_ON_HOT(r->area != BUF_RING.area);
+
+ if (r->data >= r->head)
+ return r->data - r->head + 1;
+ else
+ return r->data + r->size - r->head;
+}
+
+/* Returns true if a single buffer is assigned */
+static inline unsigned int br_single(const struct buffer *r)
+{
+ BUG_ON_HOT(r->area != BUF_RING.area);
+
+ return r->data == r->head;
+}
+
+/* Returns the index of the ring's head buffer */
+static inline unsigned int br_head_idx(const struct buffer *r)
+{
+ BUG_ON_HOT(r->area != BUF_RING.area);
+
+ return r->head;
+}
+
+/* Returns the index of the ring's tail buffer */
+static inline unsigned int br_tail_idx(const struct buffer *r)
+{
+ BUG_ON_HOT(r->area != BUF_RING.area);
+
+ return r->data;
+}
+
+/* Returns a pointer to the ring's head buffer */
+static inline struct buffer *br_head(struct buffer *r)
+{
+ BUG_ON_HOT(r->area != BUF_RING.area);
+
+ return r + br_head_idx(r);
+}
+
+/* Returns a pointer to the ring's tail buffer */
+static inline struct buffer *br_tail(struct buffer *r)
+{
+ BUG_ON_HOT(r->area != BUF_RING.area);
+
+ return r + br_tail_idx(r);
+}
+
+/* Returns the amount of data of the ring's HEAD buffer */
+static inline unsigned int br_data(const struct buffer *r)
+{
+ BUG_ON_HOT(r->area != BUF_RING.area);
+
+ return b_data(r + br_head_idx(r));
+}
+
+/* Returns non-zero if the ring is non-full or its tail has some room */
+static inline unsigned int br_has_room(const struct buffer *r)
+{
+ BUG_ON_HOT(r->area != BUF_RING.area);
+
+ if (!br_full(r))
+ return 1;
+ return b_room(r + br_tail_idx(r));
+}
+
+/* Advances the ring's tail if it points to a non-empty buffer, and returns the
+ * buffer, or NULL if the ring is full or the tail buffer is already empty. A
+ * new buffer is initialized to BUF_NULL before being returned. This is to be
+ * used after failing to append data, in order to decide to retry or not.
+ */
+static inline struct buffer *br_tail_add(struct buffer *r)
+{
+ struct buffer *b;
+
+ BUG_ON_HOT(r->area != BUF_RING.area);
+
+ b = br_tail(r);
+ if (!b_size(b))
+ return NULL;
+
+ if (br_full(r))
+ return NULL;
+
+ r->data++;
+ if (r->data >= r->size)
+ r->data = 1;
+
+ b = br_tail(r);
+ *b = BUF_NULL;
+ return b;
+}
+
+/* Extracts the ring's head buffer and returns it. The last buffer (tail) is
+ * never removed but it is returned. This guarantees that we stop on BUF_WANTED
+ * or BUF_EMPTY and that at the end a valid buffer remains present. This is
+ * used for pre-extraction during a free() loop for example. The caller is
+ * expected to detect the end (e.g. using bsize() since b_free() voids the
+ * buffer).
+ */
+static inline struct buffer *br_head_pick(struct buffer *r)
+{
+ struct buffer *b;
+
+ BUG_ON_HOT(r->area != BUF_RING.area);
+
+ b = br_head(r);
+ if (r->head != r->data) {
+ r->head++;
+ if (r->head >= r->size)
+ r->head = 1;
+ }
+ return b;
+}
+
+/* Advances the ring's head and returns the next buffer, unless it's already
+ * the tail, in which case the tail itself is returned. This is used for post-
+ * parsing deletion. The caller is expected to detect the end (e.g. a parser
+ * will typically purge the head before proceeding).
+ */
+static inline struct buffer *br_del_head(struct buffer *r)
+{
+ BUG_ON_HOT(r->area != BUF_RING.area);
+
+ if (r->head != r->data) {
+ r->head++;
+ if (r->head >= r->size)
+ r->head = 1;
+ }
+ return br_head(r);
+}
+
+#endif /* _HAPROXY_BUF_H */
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/include/haproxy/bug.h b/include/haproxy/bug.h
new file mode 100644
index 0000000..1356acf
--- /dev/null
+++ b/include/haproxy/bug.h
@@ -0,0 +1,479 @@
+/*
+ * include/haproxy/bug.h
+ * Assertions and instant crash macros needed everywhere.
+ *
+ * Copyright (C) 2000-2020 Willy Tarreau - w@1wt.eu
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _HAPROXY_BUG_H
+#define _HAPROXY_BUG_H
+
+#include <haproxy/atomic.h>
+#include <haproxy/compiler.h>
+
+/* quick debugging hack, should really be removed ASAP */
+#ifdef DEBUG_FULL
+#define DPRINTF(x...) fprintf(x)
+#else
+#define DPRINTF(x...)
+#endif
+
+#define DUMP_TRACE() do { extern void ha_backtrace_to_stderr(void); ha_backtrace_to_stderr(); } while (0)
+
+/* First, let's try to handle some arch-specific crashing methods. We prefer
+ * the macro to the function because when opening the core, the debugger will
+ * directly show the calling point (e.g. the BUG_ON() condition) based on the
+ * line number, while the function will create new line numbers. But the
+ * function is needed e.g. if some pragmas are needed.
+ */
+
+#if defined(__i386__) || defined(__x86_64__)
+#define ha_crash_now() do { \
+ /* ud2 opcode: 2 bytes, raises illegal instruction */ \
+ __asm__ volatile(".byte 0x0f,0x0b\n"); \
+ DO_NOT_FOLD(); \
+ my_unreachable(); \
+ } while (0)
+
+#elif defined(__aarch64__)
+#define ha_crash_now() do { \
+ /* udf#imm16: 4 bytes (), raises illegal instruction */ \
+ __asm__ volatile(".byte 0x00,0x00,0x00,0x00\n"); \
+ DO_NOT_FOLD(); \
+ my_unreachable(); \
+ } while (0)
+
+#else // not x86
+
+/* generic implementation, causes a segfault */
+static inline __attribute((always_inline)) void ha_crash_now(void)
+{
+#if __GNUC_PREREQ__(5, 0)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Warray-bounds"
+#if __GNUC_PREREQ__(6, 0)
+#pragma GCC diagnostic ignored "-Wnull-dereference"
+#endif
+#endif
+ *(volatile char *)1 = 0;
+#if __GNUC_PREREQ__(5, 0)
+#pragma GCC diagnostic pop
+#endif
+ DO_NOT_FOLD();
+ my_unreachable();
+}
+
+#endif // end of arch-specific ha_crash_now() definitions
+
+#ifdef DEBUG_USE_ABORT
+/* abort() is better recognized by code analysis tools */
+
+/* abort() is generally tagged noreturn, so there's no 100% safe way to prevent
+ * the compiler from doing a tail-merge here. Tests show that stopping folding
+ * just before calling abort() does work in practice at -O2, increasing the
+ * number of abort() calls in h3.o from 18 to 26, probably because there's no
+ * more savings to be made by replacing a call with a jump. However, as -Os it
+ * drops to 5 regardless of the build option. In order to help here, instead we
+ * wrap abort() into another function, with the line number stored into a local
+ * variable on the stack and we pretend to use it, so that unwinding the stack
+ * from abort() will reveal its value even if the call was folded.
+ */
+static __attribute__((noinline,noreturn,unused)) void abort_with_line(uint line)
+{
+ DISGUISE(&line);
+ abort();
+}
+
+#define ABORT_NOW() do { DUMP_TRACE(); abort_with_line(__LINE__); } while (0)
+#else
+/* More efficient than abort() because it does not mangle the
+ * stack and stops at the exact location we need.
+ */
+#define ABORT_NOW() do { DUMP_TRACE(); ha_crash_now(); } while (0)
+#endif
+
+/* This is the generic low-level macro dealing with conditional warnings and
+ * bugs. The caller decides whether to crash or not and what prefix and suffix
+ * to pass. The macro returns the boolean value of the condition as an int for
+ * the case where it wouldn't die. The <crash> flag is made of:
+ * - crash & 1: crash yes/no;
+ * - crash & 2: taint as bug instead of warn
+ */
+#define _BUG_ON(cond, file, line, crash, pfx, sfx) \
+ __BUG_ON(cond, file, line, crash, pfx, sfx)
+
+#define __BUG_ON(cond, file, line, crash, pfx, sfx) \
+ (void)(unlikely(cond) ? ({ \
+ complain(NULL, "\n" pfx "condition \"" #cond "\" matched at " file ":" #line "" sfx "\n", crash); \
+ if (crash & 1) \
+ ABORT_NOW(); \
+ else \
+ DUMP_TRACE(); \
+ 1; /* let's return the true condition */ \
+ }) : 0)
+
+/* This one is equivalent except that it only emits the message once by
+ * maintaining a static counter. This may be used with warnings to detect
+ * certain unexpected conditions in field. Later on, in cores it will be
+ * possible to verify these counters.
+ */
+#define _BUG_ON_ONCE(cond, file, line, crash, pfx, sfx) \
+ __BUG_ON_ONCE(cond, file, line, crash, pfx, sfx)
+
+#define __BUG_ON_ONCE(cond, file, line, crash, pfx, sfx) \
+ (void)(unlikely(cond) ? ({ \
+ static int __match_count_##line; \
+ complain(&__match_count_##line, "\n" pfx "condition \"" #cond "\" matched at " file ":" #line "" sfx "\n", crash); \
+ if (crash & 1) \
+ ABORT_NOW(); \
+ else \
+ DUMP_TRACE(); \
+ 1; /* let's return the true condition */ \
+ }) : 0)
+
+/* DEBUG_STRICT enables/disables runtime checks on condition <cond>
+ * DEBUG_STRICT_ACTION indicates the level of verification on the rules when
+ * <cond> is true:
+ *
+ * macro BUG_ON() WARN_ON() CHECK_IF()
+ * value 0 warn warn warn
+ * 1 CRASH warn warn
+ * 2 CRASH CRASH warn
+ * 3 CRASH CRASH CRASH
+ */
+
+/* The macros below are for general use */
+#if defined(DEBUG_STRICT)
+# if defined(DEBUG_STRICT_ACTION) && (DEBUG_STRICT_ACTION < 1)
+/* Lowest level: BUG_ON() warns, WARN_ON() warns, CHECK_IF() warns */
+# define BUG_ON(cond) _BUG_ON (cond, __FILE__, __LINE__, 2, "WARNING: bug ", " (not crashing but process is untrusted now, please report to developers)")
+# define WARN_ON(cond) _BUG_ON (cond, __FILE__, __LINE__, 0, "WARNING: warn ", " (please report to developers)")
+# define CHECK_IF(cond) _BUG_ON_ONCE(cond, __FILE__, __LINE__, 0, "WARNING: check ", " (please report to developers)")
+# elif !defined(DEBUG_STRICT_ACTION) || (DEBUG_STRICT_ACTION == 1)
+/* default level: BUG_ON() crashes, WARN_ON() warns, CHECK_IF() warns */
+# define BUG_ON(cond) _BUG_ON (cond, __FILE__, __LINE__, 3, "FATAL: bug ", "")
+# define WARN_ON(cond) _BUG_ON (cond, __FILE__, __LINE__, 0, "WARNING: warn ", " (please report to developers)")
+# define CHECK_IF(cond) _BUG_ON_ONCE(cond, __FILE__, __LINE__, 0, "WARNING: check ", " (please report to developers)")
+# elif defined(DEBUG_STRICT_ACTION) && (DEBUG_STRICT_ACTION == 2)
+/* Stricter level: BUG_ON() crashes, WARN_ON() crashes, CHECK_IF() warns */
+# define BUG_ON(cond) _BUG_ON (cond, __FILE__, __LINE__, 3, "FATAL: bug ", "")
+# define WARN_ON(cond) _BUG_ON (cond, __FILE__, __LINE__, 1, "FATAL: warn ", "")
+# define CHECK_IF(cond) _BUG_ON_ONCE(cond, __FILE__, __LINE__, 0, "WARNING: check ", " (please report to developers)")
+# elif defined(DEBUG_STRICT_ACTION) && (DEBUG_STRICT_ACTION >= 3)
+/* Developer/CI level: BUG_ON() crashes, WARN_ON() crashes, CHECK_IF() crashes */
+# define BUG_ON(cond) _BUG_ON (cond, __FILE__, __LINE__, 3, "FATAL: bug ", "")
+# define WARN_ON(cond) _BUG_ON (cond, __FILE__, __LINE__, 1, "FATAL: warn ", "")
+# define CHECK_IF(cond) _BUG_ON_ONCE(cond, __FILE__, __LINE__, 1, "FATAL: check ", "")
+# endif
+#else
+# define BUG_ON(cond) do { (void)sizeof(cond); } while (0)
+# define WARN_ON(cond) do { (void)sizeof(cond); } while (0)
+# define CHECK_IF(cond) do { (void)sizeof(cond); } while (0)
+#endif
+
+/* These macros are only for hot paths and remain disabled unless DEBUG_STRICT is 2 or above.
+ * Only developers/CI should use these levels as they may significantly impact performance by
+ * enabling checks in sensitive areas.
+ */
+#if defined(DEBUG_STRICT) && (DEBUG_STRICT > 1)
+# if defined(DEBUG_STRICT_ACTION) && (DEBUG_STRICT_ACTION < 1)
+/* Lowest level: BUG_ON() warns, CHECK_IF() warns */
+# define BUG_ON_HOT(cond) _BUG_ON_ONCE(cond, __FILE__, __LINE__, 2, "WARNING: bug ", " (not crashing but process is untrusted now, please report to developers)")
+# define CHECK_IF_HOT(cond) _BUG_ON_ONCE(cond, __FILE__, __LINE__, 0, "WARNING: check ", " (please report to developers)")
+# elif !defined(DEBUG_STRICT_ACTION) || (DEBUG_STRICT_ACTION < 3)
+/* default level: BUG_ON() crashes, CHECK_IF() warns */
+# define BUG_ON_HOT(cond) _BUG_ON (cond, __FILE__, __LINE__, 3, "FATAL: bug ", "")
+# define CHECK_IF_HOT(cond) _BUG_ON_ONCE(cond, __FILE__, __LINE__, 0, "WARNING: check ", " (please report to developers)")
+# elif defined(DEBUG_STRICT_ACTION) && (DEBUG_STRICT_ACTION >= 3)
+/* Developer/CI level: BUG_ON() crashes, CHECK_IF() crashes */
+# define BUG_ON_HOT(cond) _BUG_ON (cond, __FILE__, __LINE__, 3, "FATAL: bug ", "")
+# define CHECK_IF_HOT(cond) _BUG_ON_ONCE(cond, __FILE__, __LINE__, 1, "FATAL: check ", "")
+# endif
+#else
+# define BUG_ON_HOT(cond) do { (void)sizeof(cond); } while (0)
+# define CHECK_IF_HOT(cond) do { (void)sizeof(cond); } while (0)
+#endif
+
+
+/* When not optimizing, clang won't remove that code, so only compile it in when optimizing */
+#if defined(__GNUC__) && defined(__OPTIMIZE__)
+#define HA_LINK_ERROR(what) \
+ do { \
+ /* provoke a build-time error */ \
+ extern volatile int what; \
+ what = 1; \
+ } while (0)
+#else
+#define HA_LINK_ERROR(what) \
+ do { \
+ } while (0)
+#endif /* __OPTIMIZE__ */
+
+/* more reliable free() that clears the pointer */
+#define ha_free(x) do { \
+ typeof(x) __x = (x); \
+ if (__builtin_constant_p((x)) || __builtin_constant_p(*(x))) { \
+ HA_LINK_ERROR(call_to_ha_free_attempts_to_free_a_constant); \
+ } \
+ free(*__x); \
+ *__x = NULL; \
+ } while (0)
+
+/* describes a call place in the code, for example for tracing memory
+ * allocations or task wakeups. These must be declared static const.
+ */
+struct ha_caller {
+ const char *func; // function name
+ const char *file; // file name
+ uint16_t line; // line number
+ uint8_t what; // description of the call, usage specific
+ uint8_t arg8; // optional argument, usage specific
+ uint32_t arg32; // optional argument, usage specific
+};
+
+#define MK_CALLER(_what, _arg8, _arg32) \
+ ({ static const struct ha_caller _ = { \
+ .func = __func__, .file = __FILE__, .line = __LINE__, \
+ .what = _what, .arg8 = _arg8, .arg32 = _arg32 }; \
+ &_; })
+
+/* handle 'tainted' status */
+enum tainted_flags {
+ TAINTED_CONFIG_EXP_KW_DECLARED = 0x00000001,
+ TAINTED_ACTION_EXP_EXECUTED = 0x00000002,
+ TAINTED_CLI_EXPERT_MODE = 0x00000004,
+ TAINTED_CLI_EXPERIMENTAL_MODE = 0x00000008,
+ TAINTED_WARN = 0x00000010, /* a WARN_ON triggered */
+ TAINTED_BUG = 0x00000020, /* a BUG_ON triggered */
+ TAINTED_SHARED_LIBS = 0x00000040, /* a shared library was loaded */
+ TAINTED_REDEFINITION = 0x00000080, /* symbol redefinition detected */
+ TAINTED_REPLACED_MEM_ALLOCATOR = 0x00000100, /* memory allocator was replaced using LD_PRELOAD */
+ TAINTED_PANIC = 0x00000200, /* a panic dump has started */
+ TAINTED_LUA_STUCK = 0x00000400, /* stuck in a Lua context */
+ TAINTED_LUA_STUCK_SHARED = 0x00000800, /* stuck in a shared Lua context */
+ TAINTED_MEM_TRIMMING_STUCK = 0x00001000, /* stuck while trimming memory */
+};
+
+/* this is a bit field made of TAINTED_*, and is declared in haproxy.c */
+extern unsigned int tainted;
+
+void complain(int *counter, const char *msg, int taint);
+
+static inline void mark_tainted(const enum tainted_flags flag)
+{
+ HA_ATOMIC_OR(&tainted, flag);
+}
+
+static inline unsigned int get_tainted()
+{
+ return HA_ATOMIC_LOAD(&tainted);
+}
+
+#if defined(DEBUG_MEM_STATS)
+#include <stdlib.h>
+#include <string.h>
+
+/* Memory allocation statistics are centralized into a global "mem_stats"
+ * section. This will not work with some linkers.
+ */
+enum {
+ MEM_STATS_TYPE_UNSET = 0,
+ MEM_STATS_TYPE_CALLOC,
+ MEM_STATS_TYPE_FREE,
+ MEM_STATS_TYPE_MALLOC,
+ MEM_STATS_TYPE_REALLOC,
+ MEM_STATS_TYPE_STRDUP,
+ MEM_STATS_TYPE_P_ALLOC,
+ MEM_STATS_TYPE_P_FREE,
+};
+
+struct mem_stats {
+ size_t calls;
+ size_t size;
+ struct ha_caller caller;
+ const void *extra; // extra info specific to this call (e.g. pool ptr)
+} __attribute__((aligned(sizeof(void*))));
+
+#undef calloc
+#define calloc(x,y) ({ \
+ size_t __x = (x); size_t __y = (y); \
+ static struct mem_stats _ __attribute__((used,__section__("mem_stats"),__aligned__(sizeof(void*)))) = { \
+ .caller = { \
+ .file = __FILE__, .line = __LINE__, \
+ .what = MEM_STATS_TYPE_CALLOC, \
+ .func = __func__, \
+ }, \
+ }; \
+ HA_WEAK(__start_mem_stats); \
+ HA_WEAK(__stop_mem_stats); \
+ _HA_ATOMIC_INC(&_.calls); \
+ _HA_ATOMIC_ADD(&_.size, __x * __y); \
+ calloc(__x,__y); \
+})
+
+/* note: we can't redefine free() because we have a few variables and struct
+ * members called like this. This one may be used before a call to free(),
+ * and when known, the size should be indicated, otherwise pass zero. The
+ * pointer is used to know whether the call should be accounted for (null is
+ * ignored).
+ */
+#undef will_free
+#define will_free(x, y) ({ \
+ void *__x = (x); size_t __y = (y); \
+ static struct mem_stats _ __attribute__((used,__section__("mem_stats"),__aligned__(sizeof(void*)))) = { \
+ .caller = { \
+ .file = __FILE__, .line = __LINE__, \
+ .what = MEM_STATS_TYPE_FREE, \
+ .func = __func__, \
+ }, \
+ }; \
+ HA_WEAK(__start_mem_stats); \
+ HA_WEAK(__stop_mem_stats); \
+ if (__x) { \
+ _HA_ATOMIC_INC(&_.calls); \
+ _HA_ATOMIC_ADD(&_.size, __y); \
+ } \
+})
+
+#undef ha_free
+#define ha_free(x) ({ \
+ typeof(x) __x = (x); \
+ static struct mem_stats _ __attribute__((used,__section__("mem_stats"),__aligned__(sizeof(void*)))) = { \
+ .caller = { \
+ .file = __FILE__, .line = __LINE__, \
+ .what = MEM_STATS_TYPE_FREE, \
+ .func = __func__, \
+ }, \
+ }; \
+ HA_WEAK(__start_mem_stats); \
+ HA_WEAK(__stop_mem_stats); \
+ if (__builtin_constant_p((x)) || __builtin_constant_p(*(x))) { \
+ HA_LINK_ERROR(call_to_ha_free_attempts_to_free_a_constant); \
+ } \
+ if (*__x) \
+ _HA_ATOMIC_INC(&_.calls); \
+ free(*__x); \
+ *__x = NULL; \
+})
+
+#undef malloc
+#define malloc(x) ({ \
+ size_t __x = (x); \
+ static struct mem_stats _ __attribute__((used,__section__("mem_stats"),__aligned__(sizeof(void*)))) = { \
+ .caller = { \
+ .file = __FILE__, .line = __LINE__, \
+ .what = MEM_STATS_TYPE_MALLOC, \
+ .func = __func__, \
+ }, \
+ }; \
+ HA_WEAK(__start_mem_stats); \
+ HA_WEAK(__stop_mem_stats); \
+ _HA_ATOMIC_INC(&_.calls); \
+ _HA_ATOMIC_ADD(&_.size, __x); \
+ malloc(__x); \
+})
+
+#undef realloc
+#define realloc(x,y) ({ \
+ void *__x = (x); size_t __y = (y); \
+ static struct mem_stats _ __attribute__((used,__section__("mem_stats"),__aligned__(sizeof(void*)))) = { \
+ .caller = { \
+ .file = __FILE__, .line = __LINE__, \
+ .what = MEM_STATS_TYPE_REALLOC, \
+ .func = __func__, \
+ }, \
+ }; \
+ HA_WEAK(__start_mem_stats); \
+ HA_WEAK(__stop_mem_stats); \
+ _HA_ATOMIC_INC(&_.calls); \
+ _HA_ATOMIC_ADD(&_.size, __y); \
+ realloc(__x,__y); \
+})
+
+#undef strdup
+#define strdup(x) ({ \
+ const char *__x = (x); size_t __y = strlen(__x); \
+ static struct mem_stats _ __attribute__((used,__section__("mem_stats"),__aligned__(sizeof(void*)))) = { \
+ .caller = { \
+ .file = __FILE__, .line = __LINE__, \
+ .what = MEM_STATS_TYPE_STRDUP, \
+ .func = __func__, \
+ }, \
+ }; \
+ HA_WEAK(__start_mem_stats); \
+ HA_WEAK(__stop_mem_stats); \
+ _HA_ATOMIC_INC(&_.calls); \
+ _HA_ATOMIC_ADD(&_.size, __y); \
+ strdup(__x); \
+})
+#else // DEBUG_MEM_STATS
+
+#define will_free(x, y) do { } while (0)
+
+#endif /* DEBUG_MEM_STATS*/
+
+/* Add warnings to users of such functions. These will be reported at link time
+ * indicating what file name and line used them. The goal is to remind their
+ * users that these are extremely unsafe functions that never have a valid
+ * reason for being used.
+ */
+#undef strcat
+__attribute__warning("\n"
+" * WARNING! strcat() must never be used, because there is no convenient way\n"
+" * to use it that is safe. Use memcpy() instead!\n")
+extern char *strcat(char *__restrict dest, const char *__restrict src);
+
+#undef strcpy
+__attribute__warning("\n"
+" * WARNING! strcpy() must never be used, because there is no convenient way\n"
+" * to use it that is safe. Use memcpy() or strlcpy2() instead!\n")
+extern char *strcpy(char *__restrict dest, const char *__restrict src);
+
+#undef strncat
+__attribute__warning("\n"
+" * WARNING! strncat() must never be used, because there is no convenient way\n"
+" * to use it that is safe. Use memcpy() instead!\n")
+extern char *strncat(char *__restrict dest, const char *__restrict src, size_t n);
+
+#undef sprintf
+__attribute__warning("\n"
+" * WARNING! sprintf() must never be used, because there is no convenient way\n"
+" * to use it that is safe. Use snprintf() instead!\n")
+extern int sprintf(char *__restrict dest, const char *__restrict fmt, ...);
+
+#if defined(_VA_LIST_DEFINED) || defined(_VA_LIST_DECLARED) || defined(_VA_LIST)
+#undef vsprintf
+__attribute__warning("\n"
+" * WARNING! vsprintf() must never be used, because there is no convenient way\n"
+" * to use it that is safe. Use vsnprintf() instead!\n")
+extern int vsprintf(char *__restrict dest, const char *__restrict fmt, va_list ap);
+#endif
+
+#endif /* _HAPROXY_BUG_H */
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/include/haproxy/capture-t.h b/include/haproxy/capture-t.h
new file mode 100644
index 0000000..ebc7fe8
--- /dev/null
+++ b/include/haproxy/capture-t.h
@@ -0,0 +1,43 @@
+/*
+ * include/haproxy/capture-t.h
+ * This file defines types for captures.
+ *
+ * Copyright (C) 2000-2020 Willy Tarreau - w@1wt.eu
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_CAPTURE_T_H
+#define _HAPROXY_CAPTURE_T_H
+
+#include <haproxy/pool-t.h>
+
+struct cap_hdr {
+ struct cap_hdr *next;
+ char *name; /* header name, case insensitive, NULL if not header */
+ int namelen; /* length of the header name, to speed-up lookups, 0 if !name */
+ int len; /* capture length, not including terminal zero */
+ int index; /* index in the output array */
+ struct pool_head *pool; /* pool of pre-allocated memory area of (len+1) bytes */
+};
+
+#endif /* _HAPROXY_CAPTURE_T_H */
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/include/haproxy/capture.h b/include/haproxy/capture.h
new file mode 100644
index 0000000..ba0a6c0
--- /dev/null
+++ b/include/haproxy/capture.h
@@ -0,0 +1,37 @@
+/*
+ * include/haproxy/capture.h
+ * This file defines prototypes for captures.
+ *
+ * Copyright (C) 2000-2020 Willy Tarreau - w@1wt.eu
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_CAPTURE_H
+#define _HAPROXY_CAPTURE_H
+
+#include <haproxy/capture-t.h>
+#include <haproxy/pool-t.h>
+
+extern struct pool_head *pool_head_capture;
+
+#endif /* _HAPROXY_CAPTURE_H */
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/include/haproxy/cbuf-t.h b/include/haproxy/cbuf-t.h
new file mode 100644
index 0000000..27d3bf1
--- /dev/null
+++ b/include/haproxy/cbuf-t.h
@@ -0,0 +1,45 @@
+/*
+ * include/haprox/cbuf-t.h
+ * This file contains definition for circular buffers.
+ *
+ * Copyright 2021 HAProxy Technologies, Frederic Lecaille <flecaille@haproxy.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_CBUF_T_H
+#define _HAPROXY_CBUF_T_H
+#ifdef USE_QUIC
+#ifndef USE_OPENSSL
+#error "Must define USE_OPENSSL"
+#endif
+#endif
+
+#include <haproxy/list-t.h>
+
+extern struct pool_head *pool_head_cbuf;
+
+struct cbuf {
+ /* buffer */
+ unsigned char *buf;
+ /* buffer size */
+ size_t sz;
+ /* Writer index */
+ size_t wr;
+ /* Reader index */
+ size_t rd;
+};
+
+#endif /* _HAPROXY_CBUF_T_H */
diff --git a/include/haproxy/cbuf.h b/include/haproxy/cbuf.h
new file mode 100644
index 0000000..b217a5c
--- /dev/null
+++ b/include/haproxy/cbuf.h
@@ -0,0 +1,136 @@
+/*
+ * include/haprox/cbuf.h
+ * This file contains definitions and prototypes for circular buffers.
+ * Inspired from Linux circular buffers (include/linux/circ_buf.h).
+ *
+ * Copyright 2021 HAProxy Technologies, Frederic Lecaille <flecaille@haproxy.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_CBUF_H
+#define _HAPROXY_CBUF_H
+#ifdef USE_QUIC
+#ifndef USE_OPENSSL
+#error "Must define USE_OPENSSL"
+#endif
+#endif
+
+#include <haproxy/atomic.h>
+#include <haproxy/list.h>
+#include <haproxy/cbuf-t.h>
+
+struct cbuf *cbuf_new(unsigned char *buf, size_t sz);
+void cbuf_free(struct cbuf *cbuf);
+
+/* Amount of data between <rd> and <wr> */
+#define CBUF_DATA(wr, rd, size) (((wr) - (rd)) & ((size) - 1))
+
+/* Return the writer position in <cbuf>.
+ * To be used only by the writer!
+ */
+static inline unsigned char *cb_wr(struct cbuf *cbuf)
+{
+ return cbuf->buf + cbuf->wr;
+}
+
+/* Reset the reader index.
+ * To be used by a reader!
+ */
+static inline void cb_rd_reset(struct cbuf *cbuf)
+{
+ cbuf->rd = 0;
+}
+
+/* Reset the writer index.
+ * To be used by a writer!
+ */
+static inline void cb_wr_reset(struct cbuf *cbuf)
+{
+ cbuf->wr = 0;
+}
+
+/* Increase <cbuf> circular buffer data by <count>.
+ * To be used by a writer!
+ */
+static inline void cb_add(struct cbuf *cbuf, size_t count)
+{
+ cbuf->wr = (cbuf->wr + count) & (cbuf->sz - 1);
+}
+
+/* Return the reader position in <cbuf>.
+ * To be used only by the reader!
+ */
+static inline unsigned char *cb_rd(struct cbuf *cbuf)
+{
+ return cbuf->buf + cbuf->rd;
+}
+
+/* Skip <count> byte in <cbuf> circular buffer.
+ * To be used by a reader!
+ */
+static inline void cb_del(struct cbuf *cbuf, size_t count)
+{
+ cbuf->rd = (cbuf->rd + count) & (cbuf->sz - 1);
+}
+
+/* Return the amount of data left in <cbuf>.
+ * To be used only by the writer!
+ */
+static inline size_t cb_data(struct cbuf *cbuf)
+{
+ size_t rd;
+
+ rd = HA_ATOMIC_LOAD(&cbuf->rd);
+ return CBUF_DATA(cbuf->wr, rd, cbuf->sz);
+}
+
+/* Return the amount of room left in <cbuf> minus 1 to distinguish
+ * the case where the buffer is full from the case where is is empty
+ * To be used only by the write!
+ */
+static inline size_t cb_room(struct cbuf *cbuf)
+{
+ size_t rd;
+
+ rd = HA_ATOMIC_LOAD(&cbuf->rd);
+ return CBUF_DATA(rd, cbuf->wr + 1, cbuf->sz);
+}
+
+/* Return the amount of contiguous data left in <cbuf>.
+ * To be used only by the reader!
+ */
+static inline size_t cb_contig_data(struct cbuf *cbuf)
+{
+ size_t end, n;
+
+ end = cbuf->sz - cbuf->rd;
+ n = (HA_ATOMIC_LOAD(&cbuf->wr) + end) & (cbuf->sz - 1);
+ return n < end ? n : end;
+}
+
+/* Return the amount of contiguous space left in <cbuf>.
+ * To be used only by the writer!
+ */
+static inline size_t cb_contig_space(struct cbuf *cbuf)
+{
+ size_t end, n;
+
+ end = cbuf->sz - 1 - cbuf->wr;
+ n = (HA_ATOMIC_LOAD(&cbuf->rd) + end) & (cbuf->sz - 1);
+ return n <= end ? n : end + 1;
+}
+
+#endif /* _HAPROXY_CBUF_H */
diff --git a/include/haproxy/cfgcond-t.h b/include/haproxy/cfgcond-t.h
new file mode 100644
index 0000000..00fc126
--- /dev/null
+++ b/include/haproxy/cfgcond-t.h
@@ -0,0 +1,105 @@
+/*
+ * include/haproxy/cfgcond-t.h
+ * Types for the configuration condition preprocessor
+ *
+ * Copyright (C) 2000-2021 Willy Tarreau - w@1wt.eu
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_CFGCOND_T_H
+#define _HAPROXY_CFGCOND_T_H
+
+#include <haproxy/api-t.h>
+
+/* nested if/elif/else/endif block states */
+enum nested_cond_state {
+ NESTED_COND_IF_TAKE, // "if" with a true condition
+ NESTED_COND_IF_DROP, // "if" with a false condition
+ NESTED_COND_IF_SKIP, // "if" masked by an outer false condition
+
+ NESTED_COND_ELIF_TAKE, // "elif" with a true condition from a false one
+ NESTED_COND_ELIF_DROP, // "elif" with a false condition from a false one
+ NESTED_COND_ELIF_SKIP, // "elif" masked by an outer false condition or a previously taken if
+
+ NESTED_COND_ELSE_TAKE, // taken "else" after an if false condition
+ NESTED_COND_ELSE_DROP, // "else" masked by outer false condition or an if true condition
+};
+
+/* 100 levels of nested conditions should already be sufficient */
+#define MAXNESTEDCONDS 100
+
+/* supported conditional predicates for .if/.elif */
+enum cond_predicate {
+ CFG_PRED_NONE, // none
+ CFG_PRED_DEFINED, // "defined"
+ CFG_PRED_FEATURE, // "feature"
+ CFG_PRED_STREQ, // "streq"
+ CFG_PRED_STRNEQ, // "strneq"
+ CFG_PRED_STRSTR, // "strstr"
+ CFG_PRED_VERSION_ATLEAST, // "version_atleast"
+ CFG_PRED_VERSION_BEFORE, // "version_before"
+ CFG_PRED_OSSL_VERSION_ATLEAST, // "openssl_version_atleast"
+ CFG_PRED_OSSL_VERSION_BEFORE, // "openssl_version_before"
+ CFG_PRED_SSLLIB_NAME_STARTSWITH, // "ssllib_name_startswith"
+ CFG_PRED_ENABLED, // "enabled"
+};
+
+/* types for condition terms */
+enum cfg_cond_term_type {
+ CCTT_NONE = 0,
+ CCTT_FALSE,
+ CCTT_TRUE,
+ CCTT_PRED,
+ CCTT_PAREN, // '(' EXPR ')'
+};
+
+/* keyword for a condition predicate */
+struct cond_pred_kw {
+ const char *word; // NULL marks the end of the list
+ enum cond_predicate prd; // one of the CFG_PRED_* above
+ uint64_t arg_mask; // mask of supported arguments (strings only)
+};
+
+/* condition term */
+struct cfg_cond_term {
+ enum cfg_cond_term_type type; // CCTT_*
+ struct arg *args; // arguments for predicates
+ int neg; // 0: direct result; 1: negate
+ union {
+ const struct cond_pred_kw *pred; // predicate (function)
+ struct cfg_cond_expr *expr; // expression for CCTT_PAREN
+ };
+};
+
+/* condition sub-expression for an AND:
+ * expr_and = <term> '&&' <expr_and>
+ * | <term>
+ */
+struct cfg_cond_and {
+ struct cfg_cond_term *left;
+ struct cfg_cond_and *right; // may be NULL
+};
+
+/* condition expression:
+ * expr = <expr_and> '||' <expr>
+ * | <expr_and>
+ */
+struct cfg_cond_expr {
+ struct cfg_cond_and *left;
+ struct cfg_cond_expr *right; // may be NULL
+};
+
+#endif /* _HAPROXY_CFGCOND_T_H */
diff --git a/include/haproxy/cfgcond.h b/include/haproxy/cfgcond.h
new file mode 100644
index 0000000..3171f81
--- /dev/null
+++ b/include/haproxy/cfgcond.h
@@ -0,0 +1,43 @@
+/*
+ * include/haproxy/cfgcond.h
+ * Configuration condition preprocessor
+ *
+ * Copyright (C) 2000-2021 Willy Tarreau - w@1wt.eu
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_CFGCOND_H
+#define _HAPROXY_CFGCOND_H
+
+#include <haproxy/api.h>
+#include <haproxy/cfgcond-t.h>
+
+const struct cond_pred_kw *cfg_lookup_cond_pred(const char *str);
+int cfg_parse_cond_term(const char **text, struct cfg_cond_term **term, char **err, const char **errptr, int maxdepth);
+int cfg_eval_cond_term(const struct cfg_cond_term *term, char **err);
+void cfg_free_cond_term(struct cfg_cond_term *term);
+
+int cfg_parse_cond_and(const char **text, struct cfg_cond_and **expr, char **err, const char **errptr, int maxdepth);
+int cfg_eval_cond_and(struct cfg_cond_and *expr, char **err);
+void cfg_free_cond_and(struct cfg_cond_and *expr);
+
+int cfg_parse_cond_expr(const char **text, struct cfg_cond_expr **expr, char **err, const char **errptr, int maxdepth);
+int cfg_eval_cond_expr(struct cfg_cond_expr *expr, char **err);
+void cfg_free_cond_expr(struct cfg_cond_expr *expr);
+
+int cfg_eval_condition(char **args, char **err, const char **errptr);
+
+#endif
diff --git a/include/haproxy/cfgdiag.h b/include/haproxy/cfgdiag.h
new file mode 100644
index 0000000..6989109
--- /dev/null
+++ b/include/haproxy/cfgdiag.h
@@ -0,0 +1,11 @@
+#ifndef _HAPROXY_CFGDIAG_H
+#define _HAPROXY_CFGDIAG_H
+
+/* Placeholder to execute various diagnostic checks after the configuration file
+ * has been fully parsed. It will output a warning for each diagnostic found.
+ *
+ * Returns 0 if no diagnostic message has been found else 1.
+ */
+int cfg_run_diagnostics(void);
+
+#endif /* _HAPROXY_CFGDIAG_H */
diff --git a/include/haproxy/cfgparse.h b/include/haproxy/cfgparse.h
new file mode 100644
index 0000000..adcabb3
--- /dev/null
+++ b/include/haproxy/cfgparse.h
@@ -0,0 +1,149 @@
+/*
+ * include/haproxy/cfgparse.h
+ * Configuration parsing functions.
+ *
+ * Copyright (C) 2000-2020 Willy Tarreau - w@1wt.eu
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_CFGPARSE_H
+#define _HAPROXY_CFGPARSE_H
+
+#include <haproxy/api.h>
+
+struct hap_cpuset;
+struct proxy;
+struct bind_conf;
+struct acl_cond;
+
+/* configuration sections */
+#define CFG_NONE 0
+#define CFG_GLOBAL 1
+#define CFG_LISTEN 2
+#define CFG_USERLIST 3
+#define CFG_PEERS 4
+#define CFG_CRTLIST 5
+
+/* various keyword modifiers */
+enum kw_mod {
+ KWM_STD = 0, /* normal */
+ KWM_NO, /* "no" prefixed before the keyword */
+ KWM_DEF, /* "default" prefixed before the keyword */
+};
+
+enum cfg_keyword_flags {
+ KWF_EXPERIMENTAL = 0x1,
+ KWF_MATCH_PREFIX = 0x2,
+};
+
+struct cfg_keyword {
+ int section; /* section type for this keyword */
+ const char *kw; /* the keyword itself */
+ int (*parse)( /* 0=OK, <0=Alert, >0=Warning */
+ char **args, /* command line and arguments */
+ int section_type, /* current section CFG_{GLOBAL|LISTEN} */
+ struct proxy *curpx, /* current proxy (NULL in GLOBAL) */
+ const struct proxy *defpx, /* default proxy (NULL in GLOBAL) */
+ const char *file, /* config file name */
+ int line, /* config file line number */
+ char **err); /* error or warning message output pointer */
+ int flags;
+};
+
+/* A keyword list. It is a NULL-terminated array of keywords. It embeds a
+ * struct list in order to be linked to other lists, allowing it to easily
+ * be declared where it is needed, and linked without duplicating data nor
+ * allocating memory.
+ */
+struct cfg_kw_list {
+ struct list list;
+ struct cfg_keyword kw[VAR_ARRAY];
+};
+
+/* permit to store configuration section */
+struct cfg_section {
+ struct list list;
+ char *section_name;
+ int (*section_parser)(const char *, int, char **, int);
+ int (*post_section_parser)();
+};
+
+/* store post configuration parsing */
+
+struct cfg_postparser {
+ struct list list;
+ char *name;
+ int (*func)();
+};
+
+extern struct list postparsers;
+extern int cfg_maxpconn;
+extern int cfg_maxconn;
+extern char *cfg_scope;
+extern struct cfg_kw_list cfg_keywords;
+extern char *cursection;
+extern int non_global_section_parsed;
+
+extern struct proxy *curproxy;
+
+int cfg_parse_global(const char *file, int linenum, char **args, int inv);
+int cfg_parse_listen(const char *file, int linenum, char **args, int inv);
+int cfg_parse_track_sc_num(unsigned int *track_sc_num,
+ const char *arg, const char *end, char **err);
+int readcfgfile(const char *file);
+void cfg_register_keywords(struct cfg_kw_list *kwl);
+void cfg_unregister_keywords(struct cfg_kw_list *kwl);
+int check_config_validity(void);
+int str2listener(char *str, struct proxy *curproxy, struct bind_conf *bind_conf, const char *file, int line, char **err);
+int str2receiver(char *str, struct proxy *curproxy, struct bind_conf *bind_conf, const char *file, int line, char **err);
+int cfg_register_section(char *section_name,
+ int (*section_parser)(const char *, int, char **, int),
+ int (*post_section_parser)());
+int cfg_register_postparser(char *name, int (*func)());
+void cfg_unregister_sections(void);
+void cfg_backup_sections(struct list *backup_sections);
+void cfg_restore_sections(struct list *backup_sections);
+int warnif_misplaced_tcp_conn(struct proxy *proxy, const char *file, int line, const char *arg);
+int warnif_misplaced_tcp_sess(struct proxy *proxy, const char *file, int line, const char *arg);
+int warnif_misplaced_tcp_cont(struct proxy *proxy, const char *file, int line, const char *arg);
+int warnif_cond_conflicts(const struct acl_cond *cond, unsigned int where, const char *file, int line);
+int warnif_tcp_http_cond(const struct proxy *px, const struct acl_cond *cond);
+int too_many_args_idx(int maxarg, int index, char **args, char **msg, int *err_code);
+int too_many_args(int maxarg, char **args, char **msg, int *err_code);
+int alertif_too_many_args_idx(int maxarg, int index, const char *file, int linenum, char **args, int *err_code);
+int alertif_too_many_args(int maxarg, const char *file, int linenum, char **args, int *err_code);
+int parse_process_number(const char *arg, unsigned long *proc, int max, int *autoinc, char **err);
+void free_email_alert(struct proxy *p);
+const char *cfg_find_best_match(const char *word, const struct list *list, int section, const char **extra);
+int warnifnotcap(struct proxy *proxy, int cap, const char *file, int line, const char *arg, const char *hint);
+int failifnotcap(struct proxy *proxy, int cap, const char *file, int line, const char *arg, const char *hint);
+void cfg_dump_registered_keywords();
+
+/* simplified way to define a section parser */
+#define REGISTER_CONFIG_SECTION(name, parse, post) \
+ INITCALL3(STG_REGISTER, cfg_register_section, (name), (parse), (post))
+
+#define REGISTER_CONFIG_POSTPARSER(name, parser) \
+ INITCALL2(STG_REGISTER, cfg_register_postparser, (name), (parser))
+
+#endif /* _HAPROXY_CFGPARSE_H */
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/include/haproxy/channel-t.h b/include/haproxy/channel-t.h
new file mode 100644
index 0000000..6972edb
--- /dev/null
+++ b/include/haproxy/channel-t.h
@@ -0,0 +1,314 @@
+/*
+ * include/haproxy/channel-t.h
+ * Channel management definitions, macros and inline functions.
+ *
+ * Copyright (C) 2000-2014 Willy Tarreau - w@1wt.eu
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_CHANNEL_T_H
+#define _HAPROXY_CHANNEL_T_H
+
+#include <haproxy/api-t.h>
+#include <haproxy/buf-t.h>
+#include <haproxy/show_flags-t.h>
+
+/* The CF_* macros designate Channel Flags, which may be ORed in the bit field
+ * member 'flags' in struct channel. Here we have several types of flags :
+ *
+ * - pure status flags, reported by the data layer, which must be cleared
+ * before doing further I/O :
+ * CF_*_EVENT, CF_*_PARTIAL
+ *
+ * - pure status flags, reported by stream connector layer, which must also
+ * be cleared before doing further I/O :
+ * CF_*_TIMEOUT
+ *
+ * - read-only indicators reported by lower data levels :
+ * CF_STREAMER, CF_STREAMER_FAST
+ *
+ * The flags have been arranged for readability, so that the read and write
+ * bits have the same position in a byte (read being the lower byte and write
+ * the second one). All flag names are relative to the channel. For instance,
+ * 'write' indicates the direction from the channel to the stream connector.
+ * Please also update the chn_show_flags() function below in case of changes.
+ */
+
+#define CF_READ_EVENT 0x00000001 /* a read event detected on producer side */
+/* unused: 0x00000002 */
+#define CF_READ_TIMEOUT 0x00000004 /* timeout while waiting for producer */
+/* unused 0x00000008 */
+
+/* unused: 0x00000010 - 0x00000080 */
+
+#define CF_WRITE_EVENT 0x00000100 /* a write event detected on consumer side */
+/* unused: 0x00000200 */
+#define CF_WRITE_TIMEOUT 0x00000400 /* timeout while waiting for consumer */
+/* unused 0x00000800 */
+
+#define CF_WAKE_WRITE 0x00001000 /* wake the task up when there's write activity */
+/* unused: 0x00002000 - 0x00004000 */
+#define CF_AUTO_CLOSE 0x00008000 /* producer can forward shutdown to other side */
+
+#define CF_STREAMER 0x00010000 /* the producer is identified as streaming data */
+#define CF_STREAMER_FAST 0x00020000 /* the consumer seems to eat the stream very fast */
+
+#define CF_WROTE_DATA 0x00040000 /* some data were sent from this buffer */
+/* unused 0x00080000 - 0x00400000 */
+#define CF_AUTO_CONNECT 0x00800000 /* consumer may attempt to establish a new connection */
+
+#define CF_DONT_READ 0x01000000 /* disable reading for now */
+/* unused 0x02000000 - 0x08000000 */
+
+#define CF_WAKE_ONCE 0x10000000 /* pretend there is activity on this channel (one-shoot) */
+#define CF_FLT_ANALYZE 0x20000000 /* at least one filter is still analyzing this channel */
+/* unuse 0x40000000 */
+#define CF_ISRESP 0x80000000 /* 0 = request channel, 1 = response channel */
+
+/* Masks which define input events for stream analysers */
+#define CF_MASK_ANALYSER (CF_READ_EVENT|CF_READ_TIMEOUT|CF_WRITE_EVENT|CF_WAKE_ONCE)
+
+/* This function is used to report flags in debugging tools. Please reflect
+ * below any single-bit flag addition above in the same order via the
+ * __APPEND_FLAG macro. The new end of the buffer is returned.
+ */
+static forceinline char *chn_show_flags(char *buf, size_t len, const char *delim, uint flg)
+{
+#define _(f, ...) __APPEND_FLAG(buf, len, delim, flg, f, #f, __VA_ARGS__)
+ /* prologue */
+ _(0);
+ /* flags */
+ _(CF_READ_EVENT, _(CF_READ_TIMEOUT,
+ _(CF_WRITE_EVENT,
+ _(CF_WRITE_TIMEOUT,
+ _(CF_WAKE_WRITE, _(CF_AUTO_CLOSE,
+ _(CF_STREAMER, _(CF_STREAMER_FAST, _(CF_WROTE_DATA,
+ _(CF_AUTO_CONNECT, _(CF_DONT_READ,
+ _(CF_WAKE_ONCE, _(CF_FLT_ANALYZE,
+ _(CF_ISRESP))))))))))))));
+ /* epilogue */
+ _(~0U);
+ return buf;
+#undef _
+}
+
+/* Analysers (channel->analysers).
+ * Those bits indicate that there are some processing to do on the buffer
+ * contents. It will probably evolve into a linked list later. Those
+ * analysers could be compared to higher level processors.
+ * The field is blanked by channel_init() and only by analysers themselves
+ * afterwards.
+ * Please also update the chn_show_analysers() function below in case of changes.
+ */
+/* AN_REQ_FLT_START_FE: 0x00000001 */
+#define AN_REQ_INSPECT_FE 0x00000002 /* inspect request contents in the frontend */
+#define AN_REQ_WAIT_HTTP 0x00000004 /* wait for an HTTP request */
+#define AN_REQ_HTTP_BODY 0x00000008 /* wait for HTTP request body */
+#define AN_REQ_HTTP_PROCESS_FE 0x00000010 /* process the frontend's HTTP part */
+#define AN_REQ_SWITCHING_RULES 0x00000020 /* apply the switching rules */
+/* AN_REQ_FLT_START_BE: 0x00000040 */
+#define AN_REQ_INSPECT_BE 0x00000080 /* inspect request contents in the backend */
+#define AN_REQ_HTTP_PROCESS_BE 0x00000100 /* process the backend's HTTP part */
+#define AN_REQ_HTTP_TARPIT 0x00000200 /* wait for end of HTTP tarpit */
+#define AN_REQ_SRV_RULES 0x00000400 /* use-server rules */
+#define AN_REQ_HTTP_INNER 0x00000800 /* inner processing of HTTP request */
+#define AN_REQ_PRST_RDP_COOKIE 0x00001000 /* persistence on rdp cookie */
+#define AN_REQ_STICKING_RULES 0x00002000 /* table persistence matching */
+/* AN_REQ_FLT_HTTP_HDRS: 0x00004000 */
+#define AN_REQ_HTTP_XFER_BODY 0x00008000 /* forward request body */
+#define AN_REQ_WAIT_CLI 0x00010000
+/* AN_REQ_FLT_XFER_DATA: 0x00020000 */
+/* AN_REQ_FLT_END: 0x00040000 */
+#define AN_REQ_ALL 0x0001bfbe /* all of the request analysers */
+
+/* response analysers */
+/* AN_RES_FLT_START_FE: 0x00080000 */
+/* AN_RES_FLT_START_BE: 0x00100000 */
+#define AN_RES_INSPECT 0x00200000 /* content inspection */
+#define AN_RES_WAIT_HTTP 0x00400000 /* wait for HTTP response */
+#define AN_RES_STORE_RULES 0x00800000 /* table persistence matching */
+#define AN_RES_HTTP_PROCESS_BE 0x01000000 /* process backend's HTTP part */
+#define AN_RES_HTTP_PROCESS_FE 0x01000000 /* process frontend's HTTP part (same for now) */
+/* AN_RES_FLT_HTTP_HDRS: 0x02000000 */
+#define AN_RES_HTTP_XFER_BODY 0x04000000 /* forward response body */
+#define AN_RES_WAIT_CLI 0x08000000
+/* AN_RES_FLT_XFER_DATA: 0x10000000 */
+/* AN_RES_FLT_END: 0x20000000 */
+#define AN_RES_ALL 0x0de00000 /* all of the response analysers */
+
+/* filters interleaved with analysers, see above */
+#define AN_REQ_FLT_START_FE 0x00000001
+#define AN_REQ_FLT_START_BE 0x00000040
+#define AN_REQ_FLT_HTTP_HDRS 0x00004000
+#define AN_REQ_FLT_XFER_DATA 0x00020000
+#define AN_REQ_FLT_END 0x00040000
+
+#define AN_RES_FLT_START_FE 0x00080000
+#define AN_RES_FLT_START_BE 0x00100000
+#define AN_RES_FLT_HTTP_HDRS 0x02000000
+#define AN_RES_FLT_XFER_DATA 0x10000000
+#define AN_RES_FLT_END 0x20000000
+
+/* This function is used to report flags in debugging tools. Please reflect
+ * below any single-bit flag addition above in the same order via the
+ * __APPEND_FLAG macro. The new end of the buffer is returned.
+ */
+static forceinline char *chn_show_analysers(char *buf, size_t len, const char *delim, uint flg)
+{
+#define _(f, ...) __APPEND_FLAG(buf, len, delim, flg, f, #f, __VA_ARGS__)
+ /* prologue */
+ _(0);
+ /* request flags */
+ _(AN_REQ_FLT_START_FE, _(AN_REQ_INSPECT_FE, _(AN_REQ_WAIT_HTTP,
+ _(AN_REQ_HTTP_BODY, _(AN_REQ_HTTP_PROCESS_FE, _(AN_REQ_SWITCHING_RULES,
+ _(AN_REQ_FLT_START_BE, _(AN_REQ_INSPECT_BE, _(AN_REQ_HTTP_PROCESS_BE,
+ _(AN_REQ_HTTP_TARPIT, _(AN_REQ_SRV_RULES, _(AN_REQ_HTTP_INNER,
+ _(AN_REQ_PRST_RDP_COOKIE, _(AN_REQ_STICKING_RULES,
+ _(AN_REQ_FLT_HTTP_HDRS, _(AN_REQ_HTTP_XFER_BODY, _(AN_REQ_WAIT_CLI,
+ _(AN_REQ_FLT_XFER_DATA, _(AN_REQ_FLT_END,
+ /* response flags */
+ _(AN_RES_FLT_START_FE, _(AN_RES_FLT_START_BE, _(AN_RES_INSPECT,
+ _(AN_RES_WAIT_HTTP, _(AN_RES_STORE_RULES, _(AN_RES_HTTP_PROCESS_FE,
+ _(AN_RES_HTTP_PROCESS_BE, _(AN_RES_FLT_HTTP_HDRS,
+ _(AN_RES_HTTP_XFER_BODY, _(AN_RES_WAIT_CLI, _(AN_RES_FLT_XFER_DATA,
+ _(AN_RES_FLT_END)))))))))))))))))))))))))))))));
+ /* epilogue */
+ _(~0U);
+ return buf;
+#undef _
+}
+
+/* Magic value to forward infinite size (TCP, ...), used with ->to_forward */
+#define CHN_INFINITE_FORWARD MAX_RANGE(unsigned int)
+
+
+struct channel {
+ unsigned int flags; /* CF_* */
+ unsigned int analysers; /* bit field indicating what to do on the channel */
+ struct buffer buf; /* buffer attached to the channel, always present but may move */
+ size_t output; /* part of buffer which is to be forwarded */
+ unsigned int to_forward; /* number of bytes to forward after out without a wake-up */
+ unsigned short last_read; /* 16 lower bits of last read date (max pause=65s) */
+ unsigned char xfer_large; /* number of consecutive large xfers */
+ unsigned char xfer_small; /* number of consecutive small xfers */
+ unsigned long long total; /* total data read */
+ int analyse_exp; /* expiration date for current analysers (if set) */
+};
+
+
+/* Note about the channel structure
+ *
+ * A channel stores information needed to reliably transport data in a single
+ * direction. It stores status flags, timeouts, counters, subscribed analysers,
+ * pointers to a data producer and to a data consumer, and information about
+ * the amount of data which is allowed to flow directly from the producer to
+ * the consumer without waking up the analysers.
+ *
+ * A channel may buffer data into two locations :
+ * - a visible buffer (->buf)
+ * - an invisible buffer which right now consists in a pipe making use of
+ * kernel buffers that cannot be tampered with.
+ *
+ * Data stored into the first location may be analysed and altered by analysers
+ * while data stored in pipes is only aimed at being transported from one
+ * network socket to another one without being subject to memory copies. This
+ * buffer may only be used when both the socket layer and the data layer of the
+ * producer and the consumer support it, which typically is the case with Linux
+ * splicing over sockets, and when there are enough data to be transported
+ * without being analyzed (transport of TCP/HTTP payload or tunnelled data,
+ * which is indicated by ->to_forward).
+ *
+ * In order not to mix data streams, the producer may only feed the invisible
+ * data with data to forward, and only when the visible buffer is empty. The
+ * producer may not always be able to feed the invisible buffer due to platform
+ * limitations (lack of kernel support).
+ *
+ * Conversely, the consumer must always take data from the invisible data first
+ * before ever considering visible data. There is no limit to the size of data
+ * to consume from the invisible buffer, as platform-specific implementations
+ * will rarely leave enough control on this. So any byte fed into the invisible
+ * buffer is expected to reach the destination file descriptor, by any means.
+ * However, it's the consumer's responsibility to ensure that the invisible
+ * data has been entirely consumed before consuming visible data. This must be
+ * reflected by ->pipe->data. This is very important as this and only this can
+ * ensure strict ordering of data between buffers.
+ *
+ * The producer is responsible for decreasing ->to_forward. The ->to_forward
+ * parameter indicates how many bytes may be fed into either data buffer
+ * without waking the parent up. The special value CHN_INFINITE_FORWARD is
+ * never decreased nor increased.
+ *
+ * The buf->o parameter says how many bytes may be consumed from the visible
+ * buffer. This parameter is updated by any buffer_write() as well as any data
+ * forwarded through the visible buffer. Since the ->to_forward attribute
+ * applies to data after buf->p, an analyser will not see a buffer which has a
+ * non-null ->to_forward with buf->i > 0. A producer is responsible for raising
+ * buf->o by min(to_forward, buf->i) when it injects data into the buffer.
+ *
+ * The consumer is responsible for decreasing ->buf->o when it sends data
+ * from the visible buffer, and ->pipe->data when it sends data from the
+ * invisible buffer.
+ *
+ * A real-world example consists in part in an HTTP response waiting in a
+ * buffer to be forwarded. We know the header length (300) and the amount of
+ * data to forward (content-length=9000). The buffer already contains 1000
+ * bytes of data after the 300 bytes of headers. Thus the caller will set
+ * buf->o to 300 indicating that it explicitly wants to send those data, and
+ * set ->to_forward to 9000 (content-length). This value must be normalised
+ * immediately after updating ->to_forward : since there are already 1300 bytes
+ * in the buffer, 300 of which are already counted in buf->o, and that size
+ * is smaller than ->to_forward, we must update buf->o to 1300 to flush the
+ * whole buffer, and reduce ->to_forward to 8000. After that, the producer may
+ * try to feed the additional data through the invisible buffer using a
+ * platform-specific method such as splice().
+ *
+ * The ->to_forward entry is also used to detect whether we can fill the buffer
+ * or not. The idea is that we need to save some space for data manipulation
+ * (mainly header rewriting in HTTP) so we don't want to have a full buffer on
+ * input before processing a request or response. Thus, we ensure that there is
+ * always global.maxrewrite bytes of free space. Since we don't want to forward
+ * chunks without filling the buffer, we rely on ->to_forward. When ->to_forward
+ * is null, we may have some processing to do so we don't want to fill the
+ * buffer. When ->to_forward is non-null, we know we don't care for at least as
+ * many bytes. In the end, we know that each of the ->to_forward bytes will
+ * eventually leave the buffer. So as long as ->to_forward is larger than
+ * global.maxrewrite, we can fill the buffer. If ->to_forward is smaller than
+ * global.maxrewrite, then we don't want to fill the buffer with more than
+ * buf->size - global.maxrewrite + ->to_forward.
+ *
+ * A buffer may contain up to 5 areas :
+ * - the data waiting to be sent. These data are located between buf->p-o and
+ * buf->p ;
+ * - the data to process and possibly transform. These data start at
+ * buf->p and may be up to ->i bytes long.
+ * - the data to preserve. They start at ->p and stop at ->p+i. The limit
+ * between the two solely depends on the protocol being analysed.
+ * - the spare area : it is the remainder of the buffer, which can be used to
+ * store new incoming data. It starts at ->p+i and is up to ->size-i-o long.
+ * It may be limited by global.maxrewrite.
+ * - the reserved area : this is the area which must not be filled and is
+ * reserved for possible rewrites ; it is up to global.maxrewrite bytes
+ * long.
+ */
+
+#endif /* _HAPROXY_CHANNEL_T_H */
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/include/haproxy/channel.h b/include/haproxy/channel.h
new file mode 100644
index 0000000..17dd75f
--- /dev/null
+++ b/include/haproxy/channel.h
@@ -0,0 +1,1021 @@
+/*
+ * include/haproxy/channel.h
+ * Channel management definitions, macros and inline functions.
+ *
+ * Copyright (C) 2000-2020 Willy Tarreau - w@1wt.eu
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_CHANNEL_H
+#define _HAPROXY_CHANNEL_H
+
+#include <haproxy/api.h>
+#include <haproxy/channel-t.h>
+#include <haproxy/dynbuf.h>
+#include <haproxy/global.h>
+#include <haproxy/htx.h>
+#include <haproxy/stream.h>
+#include <haproxy/task.h>
+#include <haproxy/ticks.h>
+#include <haproxy/tools-t.h>
+
+struct stconn;
+
+/* perform minimal initializations, report 0 in case of error, 1 if OK. */
+int init_channel();
+
+unsigned long long __channel_forward(struct channel *chn, unsigned long long bytes);
+
+/* SI-to-channel functions working with buffers */
+int ci_putblk(struct channel *chn, const char *str, int len);
+int ci_putchr(struct channel *chn, char c);
+int ci_getline_nc(const struct channel *chn, char **blk1, size_t *len1, char **blk2, size_t *len2);
+int ci_getblk_nc(const struct channel *chn, char **blk1, size_t *len1, char **blk2, size_t *len2);
+int ci_insert_line2(struct channel *c, int pos, const char *str, int len);
+int co_inject(struct channel *chn, const char *msg, int len);
+int co_getchar(const struct channel *chn, char *c);
+int co_getline(const struct channel *chn, char *str, int len);
+int co_getdelim(const struct channel *chn, char *str, int len, const char *delim, char escape);
+int co_getword(const struct channel *chn, char *str, int len, char sep);
+int co_getblk(const struct channel *chn, char *blk, int len, int offset);
+int co_getline_nc(const struct channel *chn, const char **blk1, size_t *len1, const char **blk2, size_t *len2);
+int co_getblk_nc(const struct channel *chn, const char **blk1, size_t *len1, const char **blk2, size_t *len2);
+
+
+/* returns a pointer to the stream the channel belongs to */
+static inline struct stream *chn_strm(const struct channel *chn)
+{
+ if (chn->flags & CF_ISRESP)
+ return LIST_ELEM(chn, struct stream *, res);
+ else
+ return LIST_ELEM(chn, struct stream *, req);
+}
+
+/* returns a pointer to the stream connector feeding the channel (producer) */
+static inline struct stconn *chn_prod(const struct channel *chn)
+{
+ if (chn->flags & CF_ISRESP)
+ return LIST_ELEM(chn, struct stream *, res)->scb;
+ else
+ return LIST_ELEM(chn, struct stream *, req)->scf;
+}
+
+/* returns a pointer to the stream connector consuming the channel (producer) */
+static inline struct stconn *chn_cons(const struct channel *chn)
+{
+ if (chn->flags & CF_ISRESP)
+ return LIST_ELEM(chn, struct stream *, res)->scf;
+ else
+ return LIST_ELEM(chn, struct stream *, req)->scb;
+}
+
+/* c_orig() : returns the pointer to the channel buffer's origin */
+static inline char *c_orig(const struct channel *c)
+{
+ return b_orig(&c->buf);
+}
+
+/* c_size() : returns the size of the channel's buffer */
+static inline size_t c_size(const struct channel *c)
+{
+ return b_size(&c->buf);
+}
+
+/* c_wrap() : returns the pointer to the channel buffer's wrapping point */
+static inline char *c_wrap(const struct channel *c)
+{
+ return b_wrap(&c->buf);
+}
+
+/* c_data() : returns the amount of data in the channel's buffer */
+static inline size_t c_data(const struct channel *c)
+{
+ return b_data(&c->buf);
+}
+
+/* c_room() : returns the room left in the channel's buffer */
+static inline size_t c_room(const struct channel *c)
+{
+ return b_size(&c->buf) - b_data(&c->buf);
+}
+
+/* c_empty() : returns a boolean indicating if the channel's buffer is empty */
+static inline size_t c_empty(const struct channel *c)
+{
+ return !c_data(c);
+}
+
+/* c_full() : returns a boolean indicating if the channel's buffer is full */
+static inline size_t c_full(const struct channel *c)
+{
+ return !c_room(c);
+}
+
+/* co_data() : returns the amount of output data in the channel's buffer */
+static inline size_t co_data(const struct channel *c)
+{
+ CHECK_IF_HOT(c->output > c_data(c));
+ return c->output;
+}
+
+/* ci_data() : returns the amount of input data in the channel's buffer */
+static inline size_t ci_data(const struct channel *c)
+{
+ return c_data(c) - co_data(c);
+}
+
+/* ci_next() : for an absolute pointer <p> or a relative offset <o> pointing to
+ * a valid location within channel <c>'s buffer, returns either the absolute
+ * pointer or the relative offset pointing to the next byte, which usually is
+ * at (p + 1) unless p reaches the wrapping point and wrapping is needed.
+ */
+static inline size_t ci_next_ofs(const struct channel *c, size_t o)
+{
+ return b_next_ofs(&c->buf, o);
+}
+static inline char *ci_next(const struct channel *c, const char *p)
+{
+ return b_next(&c->buf, p);
+}
+
+
+/* c_ptr() : returns a pointer to an offset relative to the beginning of the
+ * input data in the buffer. If instead the offset is negative, a pointer to
+ * existing output data is returned. The function only takes care of wrapping,
+ * it's up to the caller to ensure the offset is always within byte count
+ * bounds.
+ */
+static inline char *c_ptr(const struct channel *c, ssize_t ofs)
+{
+ return b_peek(&c->buf, co_data(c) + ofs);
+}
+
+/* c_adv() : advances the channel's buffer by <adv> bytes, which means that the
+ * buffer's pointer advances, and that as many bytes from in are transferred
+ * from in to out. The caller is responsible for ensuring that adv is always
+ * smaller than or equal to b->i.
+ */
+static inline void c_adv(struct channel *c, size_t adv)
+{
+ c->output += adv;
+ BUG_ON_HOT(c->output > c_data(c));
+}
+
+/* c_rew() : rewinds the channel's buffer by <adv> bytes, which means that the
+ * buffer's pointer goes backwards, and that as many bytes from out are moved
+ * to in. The caller is responsible for ensuring that adv is always smaller
+ * than or equal to b->o.
+ */
+static inline void c_rew(struct channel *c, size_t adv)
+{
+ BUG_ON_HOT(c->output < adv);
+ c->output -= adv;
+}
+
+/* c_realign_if_empty() : realign the channel's buffer if it's empty */
+static inline void c_realign_if_empty(struct channel *chn)
+{
+ b_realign_if_empty(&chn->buf);
+}
+
+/* Sets the amount of output for the channel */
+static inline void co_set_data(struct channel *c, size_t output)
+{
+ BUG_ON_HOT(output > c_data(c));
+ c->output = output;
+}
+
+
+/* co_head() : returns a pointer to the beginning of output data in the buffer.
+ * The "__" variants don't support wrapping, "ofs" are relative to
+ * the buffer's origin.
+ */
+static inline size_t __co_head_ofs(const struct channel *c)
+{
+ return __b_peek_ofs(&c->buf, 0);
+}
+static inline char *__co_head(const struct channel *c)
+{
+ return __b_peek(&c->buf, 0);
+}
+static inline size_t co_head_ofs(const struct channel *c)
+{
+ return b_peek_ofs(&c->buf, 0);
+}
+static inline char *co_head(const struct channel *c)
+{
+ return b_peek(&c->buf, 0);
+}
+
+
+/* co_tail() : returns a pointer to the end of output data in the buffer.
+ * The "__" variants don't support wrapping, "ofs" are relative to
+ * the buffer's origin.
+ */
+static inline size_t __co_tail_ofs(const struct channel *c)
+{
+ return __b_peek_ofs(&c->buf, co_data(c));
+}
+static inline char *__co_tail(const struct channel *c)
+{
+ return __b_peek(&c->buf, co_data(c));
+}
+static inline size_t co_tail_ofs(const struct channel *c)
+{
+ return b_peek_ofs(&c->buf, co_data(c));
+}
+static inline char *co_tail(const struct channel *c)
+{
+ return b_peek(&c->buf, co_data(c));
+}
+
+
+/* ci_head() : returns a pointer to the beginning of input data in the buffer.
+ * The "__" variants don't support wrapping, "ofs" are relative to
+ * the buffer's origin.
+ */
+static inline size_t __ci_head_ofs(const struct channel *c)
+{
+ return __b_peek_ofs(&c->buf, co_data(c));
+}
+static inline char *__ci_head(const struct channel *c)
+{
+ return __b_peek(&c->buf, co_data(c));
+}
+static inline size_t ci_head_ofs(const struct channel *c)
+{
+ return b_peek_ofs(&c->buf, co_data(c));
+}
+static inline char *ci_head(const struct channel *c)
+{
+ return b_peek(&c->buf, co_data(c));
+}
+
+
+/* ci_tail() : returns a pointer to the end of input data in the buffer.
+ * The "__" variants don't support wrapping, "ofs" are relative to
+ * the buffer's origin.
+ */
+static inline size_t __ci_tail_ofs(const struct channel *c)
+{
+ return __b_peek_ofs(&c->buf, c_data(c));
+}
+static inline char *__ci_tail(const struct channel *c)
+{
+ return __b_peek(&c->buf, c_data(c));
+}
+static inline size_t ci_tail_ofs(const struct channel *c)
+{
+ return b_peek_ofs(&c->buf, c_data(c));
+}
+static inline char *ci_tail(const struct channel *c)
+{
+ return b_peek(&c->buf, c_data(c));
+}
+
+
+/* ci_stop() : returns the pointer to the byte following the end of input data
+ * in the channel buffer. It may be out of the buffer. It's used to
+ * compute lengths or stop pointers.
+ */
+static inline size_t __ci_stop_ofs(const struct channel *c)
+{
+ return __b_stop_ofs(&c->buf);
+}
+static inline const char *__ci_stop(const struct channel *c)
+{
+ return __b_stop(&c->buf);
+}
+static inline size_t ci_stop_ofs(const struct channel *c)
+{
+ return b_stop_ofs(&c->buf);
+}
+static inline const char *ci_stop(const struct channel *c)
+{
+ return b_stop(&c->buf);
+}
+
+
+/* Returns the amount of input data that can contiguously be read at once */
+static inline size_t ci_contig_data(const struct channel *c)
+{
+ return b_contig_data(&c->buf, co_data(c));
+}
+
+/* Initialize all fields in the channel. */
+static inline void channel_init(struct channel *chn)
+{
+ chn->buf = BUF_NULL;
+ chn->to_forward = 0;
+ chn->last_read = now_ms;
+ chn->xfer_small = chn->xfer_large = 0;
+ chn->total = 0;
+ chn->analysers = 0;
+ chn->flags = 0;
+ chn->output = 0;
+}
+
+/* Schedule up to <bytes> more bytes to be forwarded via the channel without
+ * notifying the owner task. Any data pending in the buffer are scheduled to be
+ * sent as well, in the limit of the number of bytes to forward. This must be
+ * the only method to use to schedule bytes to be forwarded. If the requested
+ * number is too large, it is automatically adjusted. The number of bytes taken
+ * into account is returned. Directly touching ->to_forward will cause lockups
+ * when buf->o goes down to zero if nobody is ready to push the remaining data.
+ */
+static inline unsigned long long channel_forward(struct channel *chn, unsigned long long bytes)
+{
+ /* hint: avoid comparisons on long long for the fast case, since if the
+ * length does not fit in an unsigned it, it will never be forwarded at
+ * once anyway.
+ */
+ if (bytes <= ~0U) {
+ unsigned int bytes32 = bytes;
+
+ if (bytes32 <= ci_data(chn)) {
+ /* OK this amount of bytes might be forwarded at once */
+ c_adv(chn, bytes32);
+ return bytes;
+ }
+ }
+ return __channel_forward(chn, bytes);
+}
+
+/* Forwards any input data and marks the channel for permanent forwarding */
+static inline void channel_forward_forever(struct channel *chn)
+{
+ c_adv(chn, ci_data(chn));
+ chn->to_forward = CHN_INFINITE_FORWARD;
+}
+
+/* <len> bytes of input data was added into the channel <chn>. This functions
+ * must be called to update the channel state. It also handles the fast
+ * forwarding. */
+static inline void channel_add_input(struct channel *chn, unsigned int len)
+{
+ if (chn->to_forward) {
+ unsigned long fwd = len;
+ if (chn->to_forward != CHN_INFINITE_FORWARD) {
+ if (fwd > chn->to_forward)
+ fwd = chn->to_forward;
+ chn->to_forward -= fwd;
+ }
+ c_adv(chn, fwd);
+ }
+ /* notify that some data was read */
+ chn->total += len;
+ chn->flags |= CF_READ_EVENT;
+}
+
+static inline unsigned long long channel_htx_forward(struct channel *chn, struct htx *htx, unsigned long long bytes)
+{
+ unsigned long long ret = 0;
+
+ if (htx->data) {
+ b_set_data(&chn->buf, htx->data);
+ ret = channel_forward(chn, bytes);
+ b_set_data(&chn->buf, b_size(&chn->buf));
+ }
+ return ret;
+}
+
+
+static inline void channel_htx_forward_forever(struct channel *chn, struct htx *htx)
+{
+ c_adv(chn, htx->data - co_data(chn));
+ chn->to_forward = CHN_INFINITE_FORWARD;
+}
+/*********************************************************************/
+/* These functions are used to compute various channel content sizes */
+/*********************************************************************/
+
+/* Returns non-zero if the channel is rewritable, which means that the buffer
+ * it is attached to has at least <maxrewrite> bytes immediately available.
+ * This is used to decide when a request or response may be parsed when some
+ * data from a previous exchange might still be present.
+ */
+static inline int channel_is_rewritable(const struct channel *chn)
+{
+ int rem = chn->buf.size;
+
+ rem -= b_data(&chn->buf);
+ rem -= global.tune.maxrewrite;
+ return rem >= 0;
+}
+
+/* Tells whether data are likely to leave the buffer. This is used to know when
+ * we can safely ignore the reserve since we know we cannot retry a connection.
+ * It returns zero if data are blocked, non-zero otherwise.
+ */
+static inline int channel_may_send(const struct channel *chn)
+{
+ return chn_cons(chn)->state == SC_ST_EST;
+}
+
+/* HTX version of channel_may_recv(). Returns non-zero if the channel can still
+ * receive data. */
+static inline int channel_htx_may_recv(const struct channel *chn, const struct htx *htx)
+{
+ uint32_t rem;
+
+ if (!htx->size)
+ return 1;
+
+ rem = htx_free_data_space(htx);
+ if (!rem)
+ return 0; /* htx already full */
+
+ if (rem > global.tune.maxrewrite)
+ return 1; /* reserve not yet reached */
+
+ if (!channel_may_send(chn))
+ return 0; /* don't touch reserve until we can send */
+
+ /* Now we know there's some room left in the reserve and we may
+ * forward. As long as i-to_fwd < size-maxrw, we may still
+ * receive. This is equivalent to i+maxrw-size < to_fwd,
+ * which is logical since i+maxrw-size is what overlaps with
+ * the reserve, and we want to ensure they're covered by scheduled
+ * forwards.
+ */
+ rem += co_data(chn);
+ if (rem > global.tune.maxrewrite)
+ return 1;
+
+ return (global.tune.maxrewrite - rem < chn->to_forward);
+}
+
+/* Returns non-zero if the channel can still receive data. This is used to
+ * decide when to stop reading into a buffer when we want to ensure that we
+ * leave the reserve untouched after all pending outgoing data are forwarded.
+ * The reserved space is taken into account if ->to_forward indicates that an
+ * end of transfer is close to happen. Note that both ->buf.o and ->to_forward
+ * are considered as available since they're supposed to leave the buffer. The
+ * test is optimized to avoid as many operations as possible for the fast case
+ * and to be used as an "if" condition. Just like channel_recv_limit(), we
+ * never allow to overwrite the reserve until the output stream connector is
+ * connected, otherwise we could spin on a POST with http-send-name-header.
+ */
+static inline int channel_may_recv(const struct channel *chn)
+{
+ int rem = chn->buf.size;
+
+ if (IS_HTX_STRM(chn_strm(chn)))
+ return channel_htx_may_recv(chn, htxbuf(&chn->buf));
+
+ if (b_is_null(&chn->buf))
+ return 1;
+
+ rem -= b_data(&chn->buf);
+ if (!rem)
+ return 0; /* buffer already full */
+
+ if (rem > global.tune.maxrewrite)
+ return 1; /* reserve not yet reached */
+
+ if (!channel_may_send(chn))
+ return 0; /* don't touch reserve until we can send */
+
+ /* Now we know there's some room left in the reserve and we may
+ * forward. As long as i-to_fwd < size-maxrw, we may still
+ * receive. This is equivalent to i+maxrw-size < to_fwd,
+ * which is logical since i+maxrw-size is what overlaps with
+ * the reserve, and we want to ensure they're covered by scheduled
+ * forwards.
+ */
+ rem = ci_data(chn) + global.tune.maxrewrite - chn->buf.size;
+ return rem < 0 || (unsigned int)rem < chn->to_forward;
+}
+
+/* Returns true if the channel's input is already closed */
+static inline int channel_input_closed(struct channel *chn)
+{
+ return ((chn_prod(chn)->flags & (SC_FL_ABRT_DONE|SC_FL_EOS)) != 0);
+}
+
+/* Returns true if the channel's output is already closed */
+static inline int channel_output_closed(struct channel *chn)
+{
+ return ((chn_cons(chn)->flags & SC_FL_SHUT_DONE) != 0);
+}
+
+/* Check channel timeouts, and set the corresponding flags. */
+static inline void channel_check_timeout(struct channel *chn)
+{
+ if (likely(!(chn->flags & CF_READ_EVENT)) && unlikely(tick_is_expired(chn->analyse_exp, now_ms)))
+ chn->flags |= CF_READ_EVENT;
+}
+
+
+/* Erase any content from channel <buf> and adjusts flags accordingly. Note
+ * that any spliced data is not affected since we may not have any access to
+ * it.
+ */
+static inline void channel_erase(struct channel *chn)
+{
+ chn->to_forward = 0;
+ chn->output = 0;
+ b_reset(&chn->buf);
+}
+
+static inline void channel_htx_erase(struct channel *chn, struct htx *htx)
+{
+ htx_reset(htx);
+ channel_erase(chn);
+}
+
+
+/* marks the channel as "shutdown" ASAP in both directions */
+static inline void channel_abort(struct channel *chn)
+{
+ chn_prod(chn)->flags |= SC_FL_ABRT_WANTED;
+ chn_cons(chn)->flags |= SC_FL_SHUT_WANTED;
+ chn->flags |= CF_AUTO_CLOSE;
+ chn->flags &= ~CF_AUTO_CONNECT;
+}
+
+/* allow the consumer to try to establish a new connection. */
+static inline void channel_auto_connect(struct channel *chn)
+{
+ chn->flags |= CF_AUTO_CONNECT;
+}
+
+/* prevent the consumer from trying to establish a new connection, and also
+ * disable auto shutdown forwarding.
+ */
+static inline void channel_dont_connect(struct channel *chn)
+{
+ chn->flags &= ~(CF_AUTO_CONNECT|CF_AUTO_CLOSE);
+}
+
+/* allow the producer to forward shutdown requests */
+static inline void channel_auto_close(struct channel *chn)
+{
+ chn->flags |= CF_AUTO_CLOSE;
+}
+
+/* prevent the producer from forwarding shutdown requests */
+static inline void channel_dont_close(struct channel *chn)
+{
+ chn->flags &= ~CF_AUTO_CLOSE;
+}
+
+/* allow the producer to read / poll the input */
+static inline void channel_auto_read(struct channel *chn)
+{
+ chn->flags &= ~CF_DONT_READ;
+}
+
+/* prevent the producer from read / poll the input */
+static inline void channel_dont_read(struct channel *chn)
+{
+ chn->flags |= CF_DONT_READ;
+}
+
+
+/*************************************************/
+/* Buffer operations in the context of a channel */
+/*************************************************/
+
+
+/* Return the max number of bytes the buffer can contain so that once all the
+ * pending bytes are forwarded, the buffer still has global.tune.maxrewrite
+ * bytes free. The result sits between chn->size - maxrewrite and chn->size.
+ * It is important to mention that if buf->i is already larger than size-maxrw
+ * the condition above cannot be satisfied and the lowest size will be returned
+ * anyway. The principles are the following :
+ * 0) the empty buffer has a limit of zero
+ * 1) a non-connected buffer cannot touch the reserve
+ * 2) infinite forward can always fill the buffer since all data will leave
+ * 3) all output bytes are considered in transit since they're leaving
+ * 4) all input bytes covered by to_forward are considered in transit since
+ * they'll be converted to output bytes.
+ * 5) all input bytes not covered by to_forward as considered remaining
+ * 6) all bytes scheduled to be forwarded minus what is already in the input
+ * buffer will be in transit during future rounds.
+ * 7) 4+5+6 imply that the amount of input bytes (i) is irrelevant to the max
+ * usable length, only to_forward and output count. The difference is
+ * visible when to_forward > i.
+ * 8) the reserve may be covered up to the amount of bytes in transit since
+ * these bytes will only take temporary space.
+ *
+ * A typical buffer looks like this :
+ *
+ * <-------------- max_len ----------->
+ * <---- o ----><----- i -----> <--- 0..maxrewrite --->
+ * +------------+--------------+-------+----------------------+
+ * |////////////|\\\\\\\\\\\\\\|xxxxxxx| reserve |
+ * +------------+--------+-----+-------+----------------------+
+ * <- fwd -> <-avail->
+ *
+ * Or when to_forward > i :
+ *
+ * <-------------- max_len ----------->
+ * <---- o ----><----- i -----> <--- 0..maxrewrite --->
+ * +------------+--------------+-------+----------------------+
+ * |////////////|\\\\\\\\\\\\\\|xxxxxxx| reserve |
+ * +------------+--------+-----+-------+----------------------+
+ * <-avail->
+ * <------------------ fwd ---------------->
+ *
+ * - the amount of buffer bytes in transit is : min(i, fwd) + o
+ * - some scheduled bytes may be in transit (up to fwd - i)
+ * - the reserve is max(0, maxrewrite - transit)
+ * - the maximum usable buffer length is size - reserve.
+ * - the available space is max_len - i - o
+ *
+ * So the formula to compute the buffer's maximum length to protect the reserve
+ * when reading new data is :
+ *
+ * max = size - maxrewrite + min(maxrewrite, transit)
+ * = size - max(maxrewrite - transit, 0)
+ *
+ * But WARNING! The conditions might change during the transfer and it could
+ * very well happen that a buffer would contain more bytes than max_len due to
+ * i+o already walking over the reserve (eg: after a header rewrite), including
+ * i or o alone hitting the limit. So it is critical to always consider that
+ * bounds may have already been crossed and that available space may be negative
+ * for example. Due to this it is perfectly possible for this function to return
+ * a value that is lower than current i+o.
+ */
+static inline int channel_recv_limit(const struct channel *chn)
+{
+ unsigned int transit;
+ int reserve;
+
+ /* return zero if empty */
+ reserve = chn->buf.size;
+ if (b_is_null(&chn->buf))
+ goto end;
+
+ /* return size - maxrewrite if we can't send */
+ reserve = global.tune.maxrewrite;
+ if (unlikely(!channel_may_send(chn)))
+ goto end;
+
+ /* We need to check what remains of the reserve after o and to_forward
+ * have been transmitted, but they can overflow together and they can
+ * cause an integer underflow in the comparison since both are unsigned
+ * while maxrewrite is signed.
+ * The code below has been verified for being a valid check for this :
+ * - if (o + to_forward) overflow => return size [ large enough ]
+ * - if o + to_forward >= maxrw => return size [ large enough ]
+ * - otherwise return size - (maxrw - (o + to_forward))
+ */
+ transit = co_data(chn) + chn->to_forward;
+ reserve -= transit;
+ if (transit < chn->to_forward || // addition overflow
+ transit >= (unsigned)global.tune.maxrewrite) // enough transit data
+ return chn->buf.size;
+ end:
+ return chn->buf.size - reserve;
+}
+
+/* HTX version of channel_recv_limit(). Return the max number of bytes the HTX
+ * buffer can contain so that once all the pending bytes are forwarded, the
+ * buffer still has global.tune.maxrewrite bytes free.
+ */
+static inline int channel_htx_recv_limit(const struct channel *chn, const struct htx *htx)
+{
+ unsigned int transit;
+ int reserve;
+
+ /* return zeor if not allocated */
+ if (!htx->size)
+ return 0;
+
+ /* return max_data_space - maxrewrite if we can't send */
+ reserve = global.tune.maxrewrite;
+ if (unlikely(!channel_may_send(chn)))
+ goto end;
+
+ /* We need to check what remains of the reserve after o and to_forward
+ * have been transmitted, but they can overflow together and they can
+ * cause an integer underflow in the comparison since both are unsigned
+ * while maxrewrite is signed.
+ * The code below has been verified for being a valid check for this :
+ * - if (o + to_forward) overflow => return htx->size [ large enough ]
+ * - if o + to_forward >= maxrw => return htx->size [ large enough ]
+ * - otherwise return htx->size - (maxrw - (o + to_forward))
+ */
+ transit = co_data(chn) + chn->to_forward;
+ reserve -= transit;
+ if (transit < chn->to_forward || // addition overflow
+ transit >= (unsigned)global.tune.maxrewrite) // enough transit data
+ return htx->size;
+ end:
+ return (htx->size - reserve);
+}
+
+/* HTX version of channel_full(). Instead of checking if INPUT data exceeds
+ * (size - reserve), this function checks if the free space for data in <htx>
+ * and the data scheduled for output are lower to the reserve. In such case, the
+ * channel is considered as full.
+ */
+static inline int channel_htx_full(const struct channel *c, const struct htx *htx,
+ unsigned int reserve)
+{
+ if (!htx->size)
+ return 0;
+ return (htx_free_data_space(htx) + co_data(c) <= reserve);
+}
+
+/* Returns non-zero if the channel's INPUT buffer's is considered full, which
+ * means that it holds at least as much INPUT data as (size - reserve). This
+ * also means that data that are scheduled for output are considered as potential
+ * free space, and that the reserved space is always considered as not usable.
+ * This information alone cannot be used as a general purpose free space indicator.
+ * However it accurately indicates that too many data were fed in the buffer
+ * for an analyzer for instance. See the channel_may_recv() function for a more
+ * generic function taking everything into account.
+ */
+static inline int channel_full(const struct channel *c, unsigned int reserve)
+{
+ if (b_is_null(&c->buf))
+ return 0;
+
+ if (IS_HTX_STRM(chn_strm(c)))
+ return channel_htx_full(c, htxbuf(&c->buf), reserve);
+
+ return (ci_data(c) + reserve >= c_size(c));
+}
+
+/* HTX version of channel_recv_max(). */
+static inline int channel_htx_recv_max(const struct channel *chn, const struct htx *htx)
+{
+ int ret;
+
+ ret = channel_htx_recv_limit(chn, htx) - htx_used_space(htx);
+ if (ret < 0)
+ ret = 0;
+ return ret;
+}
+
+/* Returns the amount of space available at the input of the buffer, taking the
+ * reserved space into account if ->to_forward indicates that an end of transfer
+ * is close to happen. The test is optimized to avoid as many operations as
+ * possible for the fast case.
+ */
+static inline int channel_recv_max(const struct channel *chn)
+{
+ int ret;
+
+ if (IS_HTX_STRM(chn_strm(chn)))
+ return channel_htx_recv_max(chn, htxbuf(&chn->buf));
+
+ ret = channel_recv_limit(chn) - b_data(&chn->buf);
+ if (ret < 0)
+ ret = 0;
+ return ret;
+}
+
+/* Returns the maximum absolute amount of data that can be copied in a channel,
+ * taking the reserved space into account but also the HTX overhead for HTX
+ * streams.
+ */
+static inline size_t channel_data_limit(const struct channel *chn)
+{
+ size_t max = (global.tune.bufsize - global.tune.maxrewrite);
+
+ if (IS_HTX_STRM(chn_strm(chn)))
+ max -= HTX_BUF_OVERHEAD;
+ return max;
+}
+
+/* Returns the amount of data in a channel, taking the HTX streams into
+ * account. For raw channels, it is equivalent to c_data. For HTX channels, we
+ * rely on the HTX api.
+ */
+static inline size_t channel_data(const struct channel *chn)
+{
+ return (IS_HTX_STRM(chn_strm(chn)) ? htx_used_space(htxbuf(&chn->buf)) : c_data(chn));
+}
+
+/* Returns the amount of input data in a channel, taking he HTX streams into
+ * account. This function relies on channel_data().
+ */
+static inline size_t channel_input_data(const struct channel *chn)
+{
+ return channel_data(chn) - co_data(chn);
+}
+
+/* Returns 1 if the channel is empty, taking he HTX streams into account */
+static inline size_t channel_empty(const struct channel *chn)
+{
+ return (IS_HTX_STRM(chn) ? htx_is_empty(htxbuf(&chn->buf)) : c_empty(chn));
+}
+
+
+/* Returns the amount of bytes that can be written over the input data at once,
+ * including reserved space which may be overwritten. This is used by Lua to
+ * insert data in the input side just before the other data using buffer_replace().
+ * The goal is to transfer these new data in the output buffer.
+ */
+static inline int ci_space_for_replace(const struct channel *chn)
+{
+ const struct buffer *buf = &chn->buf;
+ const char *end;
+
+ /* If the input side data overflows, we cannot insert data contiguously. */
+ if (b_head(buf) + b_data(buf) >= b_wrap(buf))
+ return 0;
+
+ /* Check the last byte used in the buffer, it may be a byte of the output
+ * side if the buffer wraps, or its the end of the buffer.
+ */
+ end = b_head(buf);
+ if (end <= ci_head(chn))
+ end = b_wrap(buf);
+
+ /* Compute the amount of bytes which can be written. */
+ return end - ci_tail(chn);
+}
+
+/* Allocates a buffer for channel <chn>. Returns 0 in case of failure, non-zero
+ * otherwise.
+ *
+ * If no buffer are available, the requester, represented by <wait> pointer,
+ * will be added in the list of objects waiting for an available buffer.
+ */
+static inline int channel_alloc_buffer(struct channel *chn, struct buffer_wait *wait)
+{
+ if (b_alloc(&chn->buf) != NULL)
+ return 1;
+
+ if (!LIST_INLIST(&wait->list))
+ LIST_APPEND(&th_ctx->buffer_wq, &wait->list);
+
+ return 0;
+}
+
+/* Releases a possibly allocated buffer for channel <chn>. If it was not
+ * allocated, this function does nothing. Else the buffer is released and we try
+ * to wake up as many streams/applets as possible. */
+static inline void channel_release_buffer(struct channel *chn, struct buffer_wait *wait)
+{
+ if (c_size(chn) && c_empty(chn)) {
+ b_free(&chn->buf);
+ offer_buffers(wait->target, 1);
+ }
+}
+
+/* Truncate any unread data in the channel's buffer, and disable forwarding.
+ * Outgoing data are left intact. This is mainly to be used to send error
+ * messages after existing data.
+ */
+static inline void channel_truncate(struct channel *chn)
+{
+ if (!co_data(chn))
+ return channel_erase(chn);
+
+ chn->to_forward = 0;
+ if (!ci_data(chn))
+ return;
+
+ chn->buf.data = co_data(chn);
+}
+
+static inline void channel_htx_truncate(struct channel *chn, struct htx *htx)
+{
+ if (!co_data(chn))
+ return channel_htx_erase(chn, htx);
+
+ chn->to_forward = 0;
+ if (htx->data == co_data(chn))
+ return;
+ htx_truncate(htx, co_data(chn));
+}
+
+/* This function realigns a possibly wrapping channel buffer so that the input
+ * part is contiguous and starts at the beginning of the buffer and the output
+ * part ends at the end of the buffer. This provides the best conditions since
+ * it allows the largest inputs to be processed at once and ensures that once
+ * the output data leaves, the whole buffer is available at once.
+ */
+static inline void channel_slow_realign(struct channel *chn, char *swap)
+{
+ return b_slow_realign(&chn->buf, swap, co_data(chn));
+}
+
+
+/* Forward all headers of an HTX message, starting from the SL to the EOH. This
+ * function returns the position of the block after the EOH, if
+ * found. Otherwise, it returns -1.
+ */
+static inline int32_t channel_htx_fwd_headers(struct channel *chn, struct htx *htx)
+{
+ int32_t pos;
+ size_t data = 0;
+
+ for (pos = htx_get_first(htx); pos != -1; pos = htx_get_next(htx, pos)) {
+ struct htx_blk *blk = htx_get_blk(htx, pos);
+ data += htx_get_blksz(blk);
+ if (htx_get_blk_type(blk) == HTX_BLK_EOH) {
+ pos = htx_get_next(htx, pos);
+ break;
+ }
+ }
+ c_adv(chn, data);
+ return pos;
+}
+
+/*
+ * Advance the channel buffer's read pointer by <len> bytes. This is useful
+ * when data have been read directly from the buffer. It is illegal to call
+ * this function with <len> causing a wrapping at the end of the buffer. It's
+ * the caller's responsibility to ensure that <len> is never larger than
+ * chn->o.
+ */
+static inline void co_skip(struct channel *chn, int len)
+{
+ BUG_ON_HOT(len > chn->output);
+ b_del(&chn->buf, len);
+ chn->output -= len;
+ c_realign_if_empty(chn);
+}
+
+/* HTX version of co_skip(). This function skips at most <len> bytes from the
+ * output of the channel <chn>. Depending on how data are stored in <htx> less
+ * than <len> bytes can be skipped..
+ */
+static inline void co_htx_skip(struct channel *chn, struct htx *htx, int len)
+{
+ struct htx_ret htxret;
+
+ htxret = htx_drain(htx, len);
+ if (htxret.ret) {
+ BUG_ON_HOT(htxret.ret > chn->output);
+ chn->output -= htxret.ret;
+ }
+}
+
+/* Tries to copy chunk <chunk> into the channel's buffer after length controls.
+ * The chn->o and to_forward pointers are updated. If the channel's input is
+ * closed, -2 is returned. If the block is too large for this buffer, -3 is
+ * returned. If there is not enough room left in the buffer, -1 is returned.
+ * Otherwise the number of bytes copied is returned (0 being a valid number).
+ * Channel flag READ_PARTIAL is updated if some data can be transferred. The
+ * chunk's length is updated with the number of bytes sent.
+ */
+static inline int ci_putchk(struct channel *chn, struct buffer *chunk)
+{
+ int ret;
+
+ ret = ci_putblk(chn, chunk->area, chunk->data);
+ if (ret > 0)
+ chunk->data -= ret;
+ return ret;
+}
+
+/* Tries to copy string <str> at once into the channel's buffer after length
+ * controls. The chn->o and to_forward pointers are updated. If the channel's
+ * input is closed, -2 is returned. If the block is too large for this buffer,
+ * -3 is returned. If there is not enough room left in the buffer, -1 is
+ * returned. Otherwise the number of bytes copied is returned (0 being a valid
+ * number). Channel flag READ_PARTIAL is updated if some data can be
+ * transferred.
+ */
+static inline int ci_putstr(struct channel *chn, const char *str)
+{
+ return ci_putblk(chn, str, strlen(str));
+}
+
+/*
+ * Return one char from the channel's buffer. If the buffer is empty and the
+ * channel is closed, return -2. If the buffer is just empty, return -1. The
+ * buffer's pointer is not advanced, it's up to the caller to call co_skip(buf,
+ * 1) when it has consumed the char. Also note that this function respects the
+ * chn->o limit.
+ */
+static inline int co_getchr(struct channel *chn)
+{
+ /* closed or empty + imminent close = -2; empty = -1 */
+ if (unlikely((chn_cons(chn)->flags & SC_FL_SHUT_DONE) || !co_data(chn))) {
+ if (chn_cons(chn)->flags & (SC_FL_SHUT_DONE|SC_FL_SHUT_WANTED))
+ return -2;
+ return -1;
+ }
+ return *co_head(chn);
+}
+
+#endif /* _HAPROXY_CHANNEL_H */
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/include/haproxy/check-t.h b/include/haproxy/check-t.h
new file mode 100644
index 0000000..eb080a9
--- /dev/null
+++ b/include/haproxy/check-t.h
@@ -0,0 +1,198 @@
+/*
+ * include/haproxy/check-t.h
+ * Health-checks definitions, enums, macros and bitfields.
+ *
+ * Copyright 2008-2009 Krzysztof Piotr Oledzki <ole@ans.pl>
+ * Copyright (C) 2000-2020 Willy Tarreau - w@1wt.eu
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#ifndef _HAPROXY_CHECKS_T_H
+#define _HAPROXY_CHECKS_T_H
+
+#include <sys/time.h>
+
+#include <import/ebtree-t.h>
+#include <import/ist.h>
+#include <haproxy/api-t.h>
+#include <haproxy/buf-t.h>
+#include <haproxy/connection-t.h>
+#include <haproxy/dynbuf-t.h>
+#include <haproxy/obj_type-t.h>
+#include <haproxy/vars-t.h>
+
+/* Please note: this file tends to commonly be part of circular dependencies,
+ * so it is important to keep its includes list to the minimum possible (i.e.
+ * only types whose size needs to be known). Since there are no function
+ * prototypes nor pointers here, forward declarations are not really necessary.
+ * This file oughtt to be split into multiple parts, at least regular checks vs
+ * tcp-checks.
+ */
+
+/* enum used by check->result. Must remain in this order, as some code uses
+ * result >= CHK_RES_PASSED to declare success.
+ */
+enum chk_result {
+ CHK_RES_UNKNOWN = 0, /* initialized to this by default */
+ CHK_RES_NEUTRAL, /* valid check but no status information */
+ CHK_RES_FAILED, /* check failed */
+ CHK_RES_PASSED, /* check succeeded and server is fully up again */
+ CHK_RES_CONDPASS, /* check reports the server doesn't want new sessions */
+};
+
+/* flags used by check->state */
+#define CHK_ST_INPROGRESS 0x0001 /* a check is currently running */
+#define CHK_ST_CONFIGURED 0x0002 /* this check is configured and may be enabled */
+#define CHK_ST_ENABLED 0x0004 /* this check is currently administratively enabled */
+#define CHK_ST_PAUSED 0x0008 /* checks are paused because of maintenance (health only) */
+#define CHK_ST_AGENT 0x0010 /* check is an agent check (otherwise it's a health check) */
+#define CHK_ST_PORT_MISS 0x0020 /* check can't be send because no port is configured to run it */
+#define CHK_ST_IN_ALLOC 0x0040 /* check blocked waiting for input buffer allocation */
+#define CHK_ST_OUT_ALLOC 0x0080 /* check blocked waiting for output buffer allocation */
+#define CHK_ST_CLOSE_CONN 0x0100 /* check is waiting that the connection gets closed */
+#define CHK_ST_PURGE 0x0200 /* check must be freed */
+#define CHK_ST_FASTINTER 0x0400 /* force fastinter check */
+#define CHK_ST_READY 0x0800 /* check ready to migrate or run, see below */
+#define CHK_ST_SLEEPING 0x1000 /* check was sleeping, i.e. not currently bound to a thread, see below */
+
+/* 4 possible states for CHK_ST_SLEEPING and CHK_ST_READY:
+ * SLP RDY State Description
+ * 0 0 QUEUED Check is in queue due to concurrency limit
+ * 0 1 RUNNING Check is bound to current thread and running
+ * 1 0 SLEEPING Check is sleeping, not bound to a thread
+ * 1 1 MIGRATING Check is migrating to another thread
+ */
+
+/* check status */
+enum healthcheck_status {
+ HCHK_STATUS_UNKNOWN = 0, /* Unknown */
+ HCHK_STATUS_INI, /* Initializing */
+ HCHK_STATUS_START, /* Check started - SPECIAL STATUS */
+
+ /* Below we have finished checks */
+ HCHK_STATUS_CHECKED, /* DUMMY STATUS */
+
+ HCHK_STATUS_HANA, /* Health analyze detected enough consecutive errors */
+
+ HCHK_STATUS_SOCKERR, /* Socket error */
+
+ HCHK_STATUS_L4OK, /* L4 check passed, for example tcp connect */
+ HCHK_STATUS_L4TOUT, /* L4 timeout */
+ HCHK_STATUS_L4CON, /* L4 connection problem, for example: */
+ /* "Connection refused" (tcp rst) or "No route to host" (icmp) */
+
+ HCHK_STATUS_L6OK, /* L6 check passed */
+ HCHK_STATUS_L6TOUT, /* L6 (SSL) timeout */
+ HCHK_STATUS_L6RSP, /* L6 invalid response - protocol error */
+
+ HCHK_STATUS_L7TOUT, /* L7 (HTTP/SMTP) timeout */
+ HCHK_STATUS_L7RSP, /* L7 invalid response - protocol error */
+
+ /* Below we have layer 5-7 data available */
+ HCHK_STATUS_L57DATA, /* DUMMY STATUS */
+ HCHK_STATUS_L7OKD, /* L7 check passed */
+ HCHK_STATUS_L7OKCD, /* L7 check conditionally passed */
+ HCHK_STATUS_L7STS, /* L7 response error, for example HTTP 5xx */
+
+ HCHK_STATUS_PROCERR, /* External process check failure */
+ HCHK_STATUS_PROCTOUT, /* External process check timeout */
+ HCHK_STATUS_PROCOK, /* External process check passed */
+
+ HCHK_STATUS_SIZE
+};
+
+/* health status for response tracking */
+enum {
+ HANA_STATUS_UNKNOWN = 0,
+
+ HANA_STATUS_L4_OK, /* L4 successful connection */
+ HANA_STATUS_L4_ERR, /* L4 unsuccessful connection */
+
+ HANA_STATUS_HTTP_OK, /* Correct http response */
+ HANA_STATUS_HTTP_STS, /* Wrong http response, for example HTTP 5xx */
+ HANA_STATUS_HTTP_HDRRSP, /* Invalid http response (headers) */
+ HANA_STATUS_HTTP_RSP, /* Invalid http response */
+
+ HANA_STATUS_HTTP_READ_ERROR, /* Read error */
+ HANA_STATUS_HTTP_READ_TIMEOUT, /* Read timeout */
+ HANA_STATUS_HTTP_BROKEN_PIPE, /* Unexpected close from server */
+
+ HANA_STATUS_SIZE
+};
+
+enum {
+ HANA_ONERR_UNKNOWN = 0,
+
+ HANA_ONERR_FASTINTER, /* Force fastinter*/
+ HANA_ONERR_FAILCHK, /* Simulate a failed check */
+ HANA_ONERR_SUDDTH, /* Enters sudden death - one more failed check will mark this server down */
+ HANA_ONERR_MARKDWN, /* Mark this server down, now! */
+};
+
+enum {
+ HANA_ONMARKEDDOWN_NONE = 0,
+ HANA_ONMARKEDDOWN_SHUTDOWNSESSIONS, /* Shutdown peer sessions */
+};
+
+enum {
+ HANA_ONMARKEDUP_NONE = 0,
+ HANA_ONMARKEDUP_SHUTDOWNBACKUPSESSIONS, /* Shutdown peer sessions */
+};
+
+enum {
+ HANA_OBS_NONE = 0,
+
+ HANA_OBS_LAYER4, /* Observe L4 - for example tcp */
+ HANA_OBS_LAYER7, /* Observe L7 - for example http */
+
+ HANA_OBS_SIZE
+};
+
+struct tcpcheck_rule;
+struct tcpcheck_rules;
+
+struct check {
+ enum obj_type obj_type; /* object type == OBJ_TYPE_CHECK */
+ struct session *sess; /* Health check session. */
+ struct vars vars; /* Health check dynamic variables. */
+ struct xprt_ops *xprt; /* transport layer operations for health checks */
+ struct stconn *sc; /* stream connector used by health checks */
+ struct buffer bi, bo; /* input and output buffers to send/recv check */
+ struct buffer_wait buf_wait; /* Wait list for buffer allocation */
+ struct task *task; /* the task associated to the health check processing, NULL if disabled */
+ ullong start; /* last health check start time */
+ long duration; /* time in ms took to finish last health check */
+ short status, code; /* check result, check code */
+ unsigned short port; /* the port to use for the health checks */
+ char desc[HCHK_DESC_LEN]; /* health check description */
+ signed char use_ssl; /* use SSL for health checks (1: on, 0: server mode, -1: off) */
+ int send_proxy; /* send a PROXY protocol header with checks */
+ struct tcpcheck_rules *tcpcheck_rules; /* tcp-check send / expect rules */
+ struct tcpcheck_rule *current_step; /* current step when using tcpcheck */
+ int inter, fastinter, downinter; /* checks: time in milliseconds */
+ enum chk_result result; /* health-check result : CHK_RES_* */
+ int state; /* state of the check : CHK_ST_* */
+ int health; /* 0 to rise-1 = bad;
+ * rise to rise+fall-1 = good */
+ int rise, fall; /* time in iterations */
+ int type; /* Check type, one of PR_O2_*_CHK */
+ struct server *server; /* back-pointer to server */
+ struct proxy *proxy; /* proxy to be used */
+ char **argv; /* the arguments to use if running a process-based check */
+ char **envp; /* the environment to use if running a process-based check */
+ struct pid_list *curpid; /* entry in pid_list used for current process-based test, or -1 if not in test */
+ struct sockaddr_storage addr; /* the address to check */
+ char *sni; /* Server name */
+ char *alpn_str; /* ALPN to use for checks */
+ int alpn_len; /* ALPN string length */
+ const struct mux_proto_list *mux_proto; /* the mux to use for all outgoing connections (specified by the "proto" keyword) */
+ struct list check_queue; /* entry in the check queue. Not empty = in queue. */
+ int via_socks4; /* check the connection via socks4 proxy */
+};
+
+#endif /* _HAPROXY_CHECKS_T_H */
diff --git a/include/haproxy/check.h b/include/haproxy/check.h
new file mode 100644
index 0000000..c90d3e7
--- /dev/null
+++ b/include/haproxy/check.h
@@ -0,0 +1,131 @@
+/*
+ * include/haproxy/check.h
+ * Functions prototypes for the checks.
+ *
+ * Copyright (C) 2000-2020 Willy Tarreau - w@1wt.eu
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_CHECKS_H
+#define _HAPROXY_CHECKS_H
+
+#include <haproxy/check-t.h>
+#include <haproxy/proxy-t.h>
+#include <haproxy/server-t.h>
+#include <haproxy/trace-t.h>
+
+extern struct trace_source trace_check;
+
+/* Details about these events are defined in <src/check.c> */
+#define CHK_EV_TASK_WAKE (1ULL << 0)
+#define CHK_EV_HCHK_START (1ULL << 1)
+#define CHK_EV_HCHK_WAKE (1ULL << 2)
+#define CHK_EV_HCHK_RUN (1ULL << 3)
+#define CHK_EV_HCHK_END (1ULL << 4)
+#define CHK_EV_HCHK_SUCC (1ULL << 5)
+#define CHK_EV_HCHK_ERR (1ULL << 6)
+#define CHK_EV_HCHK (CHK_EV_HCHK_START|CHK_EV_HCHK_WAKE|CHK_EV_HCHK_RUN|\
+ CHK_EV_HCHK_END|CHK_EV_HCHK_SUCC|CHK_EV_HCHK_ERR)
+
+#define CHK_EV_TCPCHK_EVAL (1ULL << 7)
+#define CHK_EV_TCPCHK_ERR (1ULL << 8)
+#define CHK_EV_TCPCHK_CONN (1ULL << 9)
+#define CHK_EV_TCPCHK_SND (1ULL << 10)
+#define CHK_EV_TCPCHK_EXP (1ULL << 11)
+#define CHK_EV_TCPCHK_ACT (1ULL << 12)
+#define CHK_EV_TCPCHK (CHK_EV_TCPCHK_EVAL|CHK_EV_TCPCHK_ERR|CHK_EV_TCPCHK_CONN|\
+ CHK_EV_TCPCHK_SND|CHK_EV_TCPCHK_EXP|CHK_EV_TCPCHK_ACT)
+
+#define CHK_EV_RX_DATA (1ULL << 13)
+#define CHK_EV_RX_BLK (1ULL << 14)
+#define CHK_EV_RX_ERR (1ULL << 15)
+#define CHK_EV_RX (CHK_EV_RX_DATA|CHK_EV_RX_BLK|CHK_EV_RX_ERR)
+
+#define CHK_EV_TX_DATA (1ULL << 16)
+#define CHK_EV_TX_BLK (1ULL << 17)
+#define CHK_EV_TX_ERR (1ULL << 18)
+#define CHK_EV_TX (CHK_EV_TX_DATA|CHK_EV_TX_BLK|CHK_EV_TX_ERR)
+
+extern struct data_cb check_conn_cb;
+extern struct proxy checks_fe;
+
+short get_check_status_result(short check_status);
+const char *get_check_status_description(short check_status);
+const char *get_check_status_info(short check_status);
+int httpchk_build_status_header(struct server *s, struct buffer *buf);
+void __health_adjust(struct server *s, short status);
+void check_append_info(struct buffer *msg, struct check *check);
+void set_server_check_status(struct check *check, short status, const char *desc);
+void chk_report_conn_err(struct check *check, int errno_bck, int expired);
+void check_notify_failure(struct check *check);
+void check_notify_stopping(struct check *check);
+void check_notify_success(struct check *check);
+struct task *process_chk(struct task *t, void *context, unsigned int state);
+
+struct task *srv_chk_io_cb(struct task *t, void *ctx, unsigned int state);
+
+int check_buf_available(void *target);
+struct buffer *check_get_buf(struct check *check, struct buffer *bptr);
+void check_release_buf(struct check *check, struct buffer *bptr);
+const char *init_check(struct check *check, int type);
+void free_check(struct check *check);
+void check_purge(struct check *check);
+int wake_srv_chk(struct stconn *sc);
+
+int init_srv_check(struct server *srv);
+int init_srv_agent_check(struct server *srv);
+int start_check_task(struct check *check, int mininter, int nbcheck, int srvpos);
+
+/* Declared here, but the definitions are in flt_spoe.c */
+int spoe_prepare_healthcheck_request(char **req, int *len);
+int spoe_handle_healthcheck_response(char *frame, size_t size, char *err, int errlen);
+
+int set_srv_agent_send(struct server *srv, const char *send);
+
+/* set agent addr and appropriate flag */
+static inline void set_srv_agent_addr(struct server *srv, struct sockaddr_storage *sk)
+{
+ srv->agent.addr = *sk;
+ srv->flags |= SRV_F_AGENTADDR;
+}
+
+/* set agent port and appropriate flag */
+static inline void set_srv_agent_port(struct server *srv, int port)
+{
+ srv->agent.port = port;
+ srv->flags |= SRV_F_AGENTPORT;
+}
+
+/* Use this one only. This inline version only ensures that we don't
+ * call the function when the observe mode is disabled.
+ */
+static inline void health_adjust(struct server *s, short status)
+{
+ /* return now if observing nor health check is not enabled */
+ if (!s->observe || !s->check.task)
+ return;
+
+ __health_adjust(s, status);
+}
+
+#endif /* _HAPROXY_CHECKS_H */
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/include/haproxy/chunk.h b/include/haproxy/chunk.h
new file mode 100644
index 0000000..43c7270
--- /dev/null
+++ b/include/haproxy/chunk.h
@@ -0,0 +1,303 @@
+/*
+ * include/haproxy/chunk.h
+ * Chunk management definitions, macros and inline functions.
+ *
+ * Copyright (C) 2000-2012 Willy Tarreau - w@1wt.eu
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_CHUNK_H
+#define _HAPROXY_CHUNK_H
+
+#include <stdlib.h>
+#include <string.h>
+
+#include <import/ist.h>
+#include <haproxy/api.h>
+#include <haproxy/buf-t.h>
+#include <haproxy/pool.h>
+
+
+extern struct pool_head *pool_head_trash;
+
+/* function prototypes */
+
+int chunk_printf(struct buffer *chk, const char *fmt, ...)
+ __attribute__ ((format(printf, 2, 3)));
+
+int chunk_appendf(struct buffer *chk, const char *fmt, ...)
+ __attribute__ ((format(printf, 2, 3)));
+
+int chunk_htmlencode(struct buffer *dst, struct buffer *src);
+int chunk_asciiencode(struct buffer *dst, struct buffer *src, char qc);
+int chunk_strcmp(const struct buffer *chk, const char *str);
+int chunk_strcasecmp(const struct buffer *chk, const char *str);
+struct buffer *get_trash_chunk(void);
+int init_trash_buffers(int first);
+
+static inline void chunk_reset(struct buffer *chk)
+{
+ chk->data = 0;
+}
+
+static inline void chunk_init(struct buffer *chk, char *str, size_t size)
+{
+ chk->area = str;
+ chk->head = 0;
+ chk->data = 0;
+ chk->size = size;
+}
+
+/* report 0 in case of error, 1 if OK. */
+static inline int chunk_initlen(struct buffer *chk, char *str, size_t size,
+ int len)
+{
+
+ if (len < 0 || (size && len > size))
+ return 0;
+
+ chk->area = str;
+ chk->head = 0;
+ chk->data = len;
+ chk->size = size;
+
+ return 1;
+}
+
+/* this is only for temporary manipulation, the chunk is read-only */
+static inline void chunk_initstr(struct buffer *chk, const char *str)
+{
+ chk->area = (char *)str;
+ chk->head = 0;
+ chk->data = strlen(str);
+ chk->size = 0; /* mark it read-only */
+}
+
+/*
+ * Allocate a trash chunk from the reentrant pool. The buffer starts at the
+ * end of the chunk. This chunk must be freed using free_trash_chunk(). This
+ * call may fail and the caller is responsible for checking that the returned
+ * pointer is not NULL.
+ */
+static forceinline struct buffer *alloc_trash_chunk(void)
+{
+ struct buffer *chunk;
+
+ chunk = pool_alloc(pool_head_trash);
+ if (chunk) {
+ char *buf = (char *)chunk + sizeof(struct buffer);
+ *buf = 0;
+ chunk_init(chunk, buf,
+ pool_head_trash->size - sizeof(struct buffer));
+ }
+ return chunk;
+}
+
+/*
+ * free a trash chunk allocated by alloc_trash_chunk(). NOP on NULL.
+ */
+static forceinline void free_trash_chunk(struct buffer *chunk)
+{
+ pool_free(pool_head_trash, chunk);
+}
+
+/* copies chunk <src> into <chk>. Returns 0 in case of failure. */
+static inline int chunk_cpy(struct buffer *chk, const struct buffer *src)
+{
+ if (unlikely(src->data > chk->size))
+ return 0;
+
+ chk->data = src->data;
+ memcpy(chk->area, src->area, src->data);
+ return 1;
+}
+
+/* copies memory area <src> into <chk> for <len> bytes. Returns 0 in
+ * case of failure. No trailing zero is added.
+ */
+static inline int chunk_memcpy(struct buffer *chk, const char *src,
+ size_t len)
+{
+ if (unlikely(len > chk->size))
+ return 0;
+
+ chk->data = len;
+ memcpy(chk->area, src, len);
+
+ return 1;
+}
+
+/* appends memory area <src> after <chk> for <len> bytes. Returns 0 in
+ * case of failure. No trailing zero is added.
+ */
+static inline int chunk_memcat(struct buffer *chk, const char *src,
+ size_t len)
+{
+ if (unlikely(chk->data + len > chk->size))
+ return 0;
+
+ memcpy(chk->area + chk->data, src, len);
+ chk->data += len;
+ return 1;
+}
+
+/* appends ist <src> after <chk>. Returns 0 in case of failure. */
+static inline int chunk_istcat(struct buffer *chk, const struct ist src)
+{
+ return chunk_memcat(chk, istptr(src), istlen(src));
+}
+
+/* appends chunk <src> after <chk>. Returns 0 in case of failure. */
+static inline int chunk_cat(struct buffer *chk, const struct buffer *src)
+{
+ return chunk_memcat(chk, src->area, src->data);
+}
+
+/* copies str into <chk> followed by a trailing zero. Returns 0 in
+ * case of failure.
+ */
+static inline int chunk_strcpy(struct buffer *chk, const char *str)
+{
+ size_t len;
+
+ len = strlen(str);
+
+ if (unlikely(len >= chk->size))
+ return 0;
+
+ chk->data = len;
+ memcpy(chk->area, str, len + 1);
+
+ return 1;
+}
+
+/* copies at most <max> chars from str into <chk> followed by a trailing zero.
+ * Returns 0 in case of failure.
+ */
+static inline int chunk_strncpy(struct buffer *chk, const char *str, size_t max)
+{
+ size_t len;
+
+ len = strlen(str);
+ if (len > max)
+ len = max;
+
+ if (unlikely(len >= chk->size))
+ return 0;
+
+ memcpy(chk->area, str, len);
+ chk->area[len] = 0;
+ chk->data = len;
+ return 1;
+}
+
+/* appends str after <chk> followed by a trailing zero. Returns 0 in
+ * case of failure.
+ */
+static inline int chunk_strcat(struct buffer *chk, const char *str)
+{
+ size_t len;
+
+ len = strlen(str);
+
+ if (unlikely(chk->data + len >= chk->size))
+ return 0;
+
+ memcpy(chk->area + chk->data, str, len + 1);
+ chk->data += len;
+ return 1;
+}
+
+/* Adds a trailing zero to the current chunk and returns the pointer to the
+ * following part. The purpose is to be able to use a chunk as a series of
+ * short independent strings with chunk_* functions, which do not need to be
+ * released. Returns NULL if no space is available to ensure that the new
+ * string will have its own trailing zero. For example :
+ * chunk_init(&trash);
+ * pid = chunk_newstr(&trash);
+ * chunk_appendf(&trash, "%d", getpid()));
+ * name = chunk_newstr(&trash);
+ * chunk_appendf(&trash, "%s", gethosname());
+ * printf("hostname=<%s>, pid=<%d>\n", name, pid);
+ */
+static inline char *chunk_newstr(struct buffer *chk)
+{
+ if (chk->data + 1 >= chk->size)
+ return NULL;
+
+ chk->area[chk->data++] = 0;
+ return chk->area + chk->data;
+}
+
+static inline void chunk_drop(struct buffer *chk)
+{
+ chk->area = NULL;
+ chk->data = -1;
+ chk->size = 0;
+}
+
+static inline void chunk_destroy(struct buffer *chk)
+{
+ if (!chk->size)
+ return;
+
+ free(chk->area);
+ chunk_drop(chk);
+}
+
+/*
+ * frees the destination chunk if already allocated, allocates a new string,
+ * and copies the source into it. The new chunk will have extra room for a
+ * trailing zero unless the source chunk was actually full. The pointer to
+ * the destination string is returned, or NULL if the allocation fails or if
+ * any pointer is NULL.
+ */
+static inline char *chunk_dup(struct buffer *dst, const struct buffer *src)
+{
+ if (!dst || !src || !src->area)
+ return NULL;
+
+ if (dst->size)
+ free(dst->area);
+ dst->head = src->head;
+ dst->data = src->data;
+ dst->size = src->data;
+ if (dst->size < src->size || !src->size)
+ dst->size++;
+
+ dst->area = malloc(dst->size);
+ if (!dst->area) {
+ dst->head = 0;
+ dst->data = 0;
+ dst->size = 0;
+ return NULL;
+ }
+
+ memcpy(dst->area, src->area, dst->data);
+ if (dst->data < dst->size)
+ dst->area[dst->data] = 0;
+
+ return dst->area;
+}
+
+#endif /* _HAPROXY_CHUNK_H */
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/include/haproxy/cli-t.h b/include/haproxy/cli-t.h
new file mode 100644
index 0000000..c155df3
--- /dev/null
+++ b/include/haproxy/cli-t.h
@@ -0,0 +1,100 @@
+/*
+ * include/haproxy/cli-t.h
+ * This file provides structures and types for CLI.
+ *
+ * Copyright (C) 2000-2020 Willy Tarreau - w@1wt.eu
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_CLI_T_H
+#define _HAPROXY_CLI_T_H
+
+#include <haproxy/applet-t.h>
+
+/* Access level for a stats socket (appctx->cli_level) */
+#define ACCESS_LVL_NONE 0x0000
+#define ACCESS_LVL_USER 0x0001
+#define ACCESS_LVL_OPER 0x0002
+#define ACCESS_LVL_ADMIN 0x0003
+#define ACCESS_LVL_MASK 0x0003
+
+#define ACCESS_FD_LISTENERS 0x0004 /* expose listeners FDs on stats socket */
+#define ACCESS_MASTER 0x0008 /* works with the master (and every other processes) */
+#define ACCESS_MASTER_ONLY 0x0010 /* only works with the master */
+#define ACCESS_EXPERT 0x0020 /* access to dangerous commands reserved to experts */
+#define ACCESS_EXPERIMENTAL 0x0040
+#define ACCESS_MCLI_DEBUG 0x0080 /* allow the master CLI to use any command without the flag ACCESS_MASTER */
+#define ACCESS_MCLI_SEVERITY_NB 0x0100 /* 'set severity-output number' on master CLI */
+#define ACCESS_MCLI_SEVERITY_STR 0x0200 /* 'set severity-output string' on master CLI */
+
+/* flags for appctx->st1 */
+#define APPCTX_CLI_ST1_PROMPT (1 << 0)
+#define APPCTX_CLI_ST1_PAYLOAD (1 << 1)
+#define APPCTX_CLI_ST1_NOLF (1 << 2)
+#define APPCTX_CLI_ST1_TIMED (1 << 3)
+
+#define CLI_PREFIX_KW_NB 5
+#define CLI_MAX_MATCHES 5
+#define CLI_MAX_HELP_ENTRIES 1024
+
+/* CLI states */
+enum {
+ CLI_ST_INIT = 0, /* initial state, must leave to zero ! */
+ CLI_ST_END, /* final state, let's close */
+ CLI_ST_GETREQ, /* wait for a request */
+ CLI_ST_OUTPUT, /* all states after this one are responses */
+ CLI_ST_PROMPT, /* display the prompt (first output, same code) */
+ CLI_ST_PRINT, /* display const message in cli->msg */
+ CLI_ST_PRINT_ERR, /* display const error in cli->msg */
+ CLI_ST_PRINT_DYN, /* display dynamic message in cli->err. After the display, free the pointer */
+ CLI_ST_PRINT_DYNERR, /* display dynamic error in cli->err. After the display, free the pointer */
+ CLI_ST_PRINT_UMSG, /* display usermsgs_ctx buffer. After the display, usermsgs_ctx is reset. */
+ CLI_ST_PRINT_UMSGERR, /* display usermsgs_ctx buffer as error. After the display, usermsgs_ctx is reset. */
+ CLI_ST_CALLBACK, /* custom callback pointer */
+};
+
+/* CLI severity output formats */
+enum {
+ CLI_SEVERITY_UNDEFINED = 0, /* undefined severity format */
+ CLI_SEVERITY_NONE, /* no severity information prepended */
+ CLI_SEVERITY_NUMBER, /* prepend informational cli messages with a severity as number */
+ CLI_SEVERITY_STRING, /* prepend informational cli messages with a severity as string */
+};
+
+/* CLI context for printing command responses. */
+struct cli_print_ctx {
+ const char *msg; /* pointer to a persistent message to be returned in CLI_ST_PRINT state */
+ char *err; /* pointer to a 'must free' message to be returned in CLI_ST_PRINT_DYN state */
+ int severity; /* severity of the message to be returned according to (syslog) rfc5424 */
+};
+
+struct cli_kw {
+ const char *str_kw[CLI_PREFIX_KW_NB]; /* keywords ended by NULL, limited to CLI_PREFIX_KW_NB
+ separated keywords combination */
+ const char *usage; /* usage message */
+ int (*parse)(char **args, char *payload, struct appctx *appctx, void *private);
+ int (*io_handler)(struct appctx *appctx);
+ void (*io_release)(struct appctx *appctx);
+ void *private;
+ int level; /* this is the level needed to show the keyword usage and to use it */
+};
+
+struct cli_kw_list {
+ struct list list;
+ struct cli_kw kw[VAR_ARRAY];
+};
+
+#endif /* _HAPROXY_CLI_T_H */
diff --git a/include/haproxy/cli.h b/include/haproxy/cli.h
new file mode 100644
index 0000000..32c6599
--- /dev/null
+++ b/include/haproxy/cli.h
@@ -0,0 +1,138 @@
+/*
+ * include/haproxy/cli.h
+ * This file contains definitions of some primitives to dedicated to
+ * statistics output.
+ *
+ * Copyright (C) 2000-2011 Willy Tarreau - w@1wt.eu
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_CLI_H
+#define _HAPROXY_CLI_H
+
+#include <haproxy/applet.h>
+#include <haproxy/channel-t.h>
+#include <haproxy/cli-t.h>
+#include <haproxy/global.h>
+#include <haproxy/mworker-t.h>
+#include <haproxy/stream-t.h>
+
+
+void cli_register_kw(struct cli_kw_list *kw_list);
+struct cli_kw* cli_find_kw_exact(char **args);
+void cli_list_keywords(void);
+
+int cli_has_level(struct appctx *appctx, int level);
+
+int cli_parse_default(char **args, char *payload, struct appctx *appctx, void *private);
+
+/* mworker proxy functions */
+
+int mworker_cli_proxy_create(void);
+struct bind_conf *mworker_cli_proxy_new_listener(char *line);
+int mworker_cli_sockpair_new(struct mworker_proc *mworker_proc, int proc);
+void mworker_cli_proxy_stop(void);
+
+extern struct bind_conf *mcli_reload_bind_conf;
+
+/* proxy mode cli functions */
+
+/* analyzers */
+int pcli_wait_for_request(struct stream *s, struct channel *req, int an_bit);
+int pcli_wait_for_response(struct stream *s, struct channel *rep, int an_bit);
+
+/* updates the CLI's context to log <msg> at <severity> and returns 1. This is
+ * for use in CLI parsers to deal with quick response messages.
+ */
+static inline int cli_msg(struct appctx *appctx, int severity, const char *msg)
+{
+ struct cli_print_ctx *ctx = applet_reserve_svcctx(appctx, sizeof(*ctx));
+
+ ctx->severity = severity;
+ ctx->msg = msg;
+ appctx->st0 = CLI_ST_PRINT;
+ return 1;
+}
+
+/* updates the CLI's context to log error message <err> and returns 1. The
+ * message will be logged at level LOG_ERR. This is for use in CLI parsers to
+ * deal with quick response messages.
+ */
+static inline int cli_err(struct appctx *appctx, const char *err)
+{
+ struct cli_print_ctx *ctx = applet_reserve_svcctx(appctx, sizeof(*ctx));
+
+ ctx->msg = err;
+ appctx->st0 = CLI_ST_PRINT_ERR;
+ return 1;
+}
+
+/* updates the CLI's context to log <msg> at <severity> and returns 1. The
+ * message must have been dynamically allocated and will be freed. This is
+ * for use in CLI parsers to deal with quick response messages.
+ */
+static inline int cli_dynmsg(struct appctx *appctx, int severity, char *msg)
+{
+ struct cli_print_ctx *ctx = applet_reserve_svcctx(appctx, sizeof(*ctx));
+
+ ctx->severity = severity;
+ ctx->err = msg;
+ appctx->st0 = CLI_ST_PRINT_DYN;
+ return 1;
+}
+
+/* updates the CLI's context to log error message <err> and returns 1. The
+ * message must have been dynamically allocated and will be freed. The message
+ * will be logged at level LOG_ERR. This is for use in CLI parsers to deal with
+ * quick response messages.
+ */
+static inline int cli_dynerr(struct appctx *appctx, char *err)
+{
+ struct cli_print_ctx *ctx = applet_reserve_svcctx(appctx, sizeof(*ctx));
+
+ ctx->err = err;
+ appctx->st0 = CLI_ST_PRINT_DYNERR;
+ return 1;
+}
+
+/* updates the CLI's context to log messages stored in thread-local
+ * usermsgs_ctx at <severity> level. usermsgs_ctx will be reset when done.
+ * This is for use in CLI parsers to deal with quick response messages.
+ *
+ * Always returns 1.
+ */
+static inline int cli_umsg(struct appctx *appctx, int severity)
+{
+ struct cli_print_ctx *ctx = applet_reserve_svcctx(appctx, sizeof(*ctx));
+
+ ctx->severity = severity;
+ appctx->st0 = CLI_ST_PRINT_UMSG;
+ return 1;
+}
+
+/* updates the CLI's context to log messages stored in thread-local
+ * usermsgs_ctx using error level. usermsgs_ctx will be reset when done.
+ * This is for use in CLI parsers to deal with quick response messages.
+ *
+ * Always returns 1.
+ */
+static inline int cli_umsgerr(struct appctx *appctx)
+{
+ appctx->st0 = CLI_ST_PRINT_UMSGERR;
+ return 1;
+}
+
+#endif /* _HAPROXY_CLI_H */
diff --git a/include/haproxy/clock.h b/include/haproxy/clock.h
new file mode 100644
index 0000000..264363e
--- /dev/null
+++ b/include/haproxy/clock.h
@@ -0,0 +1,59 @@
+/*
+ * include/haproxy/clock.h
+ * Exported parts for time-keeping
+ *
+ * Copyright (C) 2000-2021 Willy Tarreau - w@1wt.eu
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_CLOCK_H
+#define _HAPROXY_CLOCK_H
+
+#include <sys/time.h>
+#include <haproxy/api.h>
+
+extern struct timeval start_date; /* the process's start date in wall-clock time */
+extern struct timeval ready_date; /* date when the process was considered ready */
+extern ullong start_time_ns; /* the process's start date in internal monotonic time (ns) */
+extern volatile ullong global_now_ns; /* common monotonic date between all threads, in ns (wraps every 585 yr) */
+
+extern THREAD_LOCAL ullong now_ns; /* internal monotonic date derived from real clock, in ns (wraps every 585 yr) */
+extern THREAD_LOCAL struct timeval date; /* the real current date (wall-clock time) */
+
+uint64_t now_cpu_time_thread(int thr);
+uint64_t now_mono_time(void);
+uint64_t now_mono_time_fast(void);
+uint64_t now_cpu_time(void);
+uint64_t now_cpu_time_fast(void);
+void clock_set_local_source(void);
+void clock_update_local_date(int max_wait, int interrupted);
+void clock_update_global_date();
+void clock_init_process_date(void);
+void clock_init_thread_date(void);
+int clock_setup_signal_timer(void *timer, int sig, int val);
+char *timeofday_as_iso_us(int pad);
+uint clock_report_idle(void);
+void clock_leaving_poll(int timeout, int interrupted);
+void clock_entering_poll(void);
+void clock_adjust_now_offset(void);
+
+static inline void clock_update_date(int max_wait, int interrupted)
+{
+ clock_update_local_date(max_wait, interrupted);
+ clock_update_global_date();
+}
+
+#endif
diff --git a/include/haproxy/compat.h b/include/haproxy/compat.h
new file mode 100644
index 0000000..aa4f952
--- /dev/null
+++ b/include/haproxy/compat.h
@@ -0,0 +1,313 @@
+/*
+ * include/haproxy/compat.h
+ * Operating system compatibility interface.
+ *
+ * Copyright (C) 2000-2020 Willy Tarreau - w@1wt.eu
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_COMPAT_H
+#define _HAPROXY_COMPAT_H
+
+#include <limits.h>
+#include <unistd.h>
+/* This is needed on Linux for Netfilter includes */
+#include <sys/param.h>
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <arpa/inet.h>
+#include <netinet/in.h>
+#include <netinet/tcp.h>
+
+
+/* These are a few short names for commonly used types whose size and sometimes
+ * signedness depends on the architecture. Be careful not to rely on a few
+ * common but wrong assumptions:
+ * - char is not always signed (ARM, AARCH64, PPC)
+ * - long is not always large enough for a pointer (Windows)
+ * These types are needed with the standard C API (string.h, printf, syscalls).
+ *
+ * When a fixed size is needed (protocol interoperability), better use the
+ * standard types provided by stdint.h:
+ * - size_t : unsigned int of default word size, large enough for any
+ * object in memory
+ * - ssize_t : signed int of default word size, used by some syscalls
+ * - uintptr_t : an unsigned int large enough to store any pointer
+ * - ptrdiff_t : a signed int large enough to hold a distance between 2 ptrs
+ * - int<size>_t : a signed int of <size> bits (8,16,32,64 work everywhere)
+ * - uint<size>_t : an unsigned int of <size> bits
+ */
+typedef signed char schar;
+typedef unsigned char uchar;
+typedef unsigned short ushort;
+typedef unsigned int uint;
+typedef unsigned long ulong;
+typedef unsigned long long ullong;
+typedef long long llong;
+
+
+/* set any optional field in a struct to this type to save ifdefs. Its address
+ * will still be valid but it will not reserve any room nor require any
+ * initialization.
+ */
+typedef struct { } empty_t;
+
+// Redefine some limits that are not present everywhere
+#ifndef LLONG_MAX
+# define LLONG_MAX 9223372036854775807LL
+# define LLONG_MIN (-LLONG_MAX - 1LL)
+#endif
+
+#ifndef ULLONG_MAX
+# define ULLONG_MAX (LLONG_MAX * 2ULL + 1)
+#endif
+
+#ifndef LONGBITS
+#define LONGBITS ((unsigned int)sizeof(long) * 8)
+#endif
+
+#ifndef BITS_PER_INT
+#define BITS_PER_INT (8*sizeof(int))
+#endif
+
+#ifndef __WORDSIZE
+# if defined(__SIZEOF_LONG__) && __SIZEOF_LONG__ == 4
+# define __WORDSIZE 32
+# elif defined(__SIZEOF_LONG__) && __SIZEOF_LONG__ == 8
+# define __WORDSIZE 64
+# else
+# error "Unknown machine word size (__WORDSIZE, __SIZEOF_LONG)"
+# endif
+#endif
+
+#ifndef MIN
+#define MIN(a, b) (((a) < (b)) ? (a) : (b))
+#endif
+
+#ifndef MAX
+#define MAX(a, b) (((a) > (b)) ? (a) : (b))
+#endif
+
+/* this is for libc5 for example */
+#ifndef TCP_NODELAY
+#define TCP_NODELAY 1
+#endif
+
+#ifndef SHUT_RD
+#define SHUT_RD 0
+#endif
+
+#ifndef SHUT_WR
+#define SHUT_WR 1
+#endif
+
+/* only Linux defines it */
+#ifndef MSG_NOSIGNAL
+#define MSG_NOSIGNAL 0
+#endif
+
+/* AIX does not define MSG_DONTWAIT. We'll define it to zero, and test it
+ * wherever appropriate.
+ */
+#ifndef MSG_DONTWAIT
+#define MSG_DONTWAIT 0
+#endif
+
+/* Only Linux defines MSG_MORE */
+#ifndef MSG_MORE
+#define MSG_MORE 0
+#endif
+
+/* On Linux 2.4 and above, MSG_TRUNC can be used on TCP sockets to drop any
+ * pending data. Let's rely on NETFILTER to detect if this is supported.
+ */
+#ifdef USE_NETFILTER
+#define MSG_TRUNC_CLEARS_INPUT
+#endif
+
+/* Maximum path length, OS-dependant */
+#ifndef MAXPATHLEN
+#define MAXPATHLEN 128
+#endif
+
+/* longest UNIX socket name */
+#ifndef UNIX_MAX_PATH
+#define UNIX_MAX_PATH 108
+#endif
+
+/* On Linux, allows pipes to be resized */
+#ifndef F_SETPIPE_SZ
+#define F_SETPIPE_SZ (1024 + 7)
+#endif
+
+#if defined(USE_TPROXY) && defined(USE_NETFILTER)
+#include <linux/types.h>
+#include <linux/netfilter_ipv6.h>
+#include <linux/netfilter_ipv4.h>
+#endif
+
+/* On Linux, IP_TRANSPARENT and/or IP_FREEBIND generally require a kernel patch */
+#if defined(USE_LINUX_TPROXY)
+#if !defined(IP_FREEBIND)
+#define IP_FREEBIND 15
+#endif /* !IP_FREEBIND */
+#if !defined(IP_TRANSPARENT)
+#define IP_TRANSPARENT 19
+#endif /* !IP_TRANSPARENT */
+#if !defined(IPV6_TRANSPARENT)
+#define IPV6_TRANSPARENT 75
+#endif /* !IPV6_TRANSPARENT */
+#endif /* USE_LINUX_TPROXY */
+
+#if defined(IP_FREEBIND) \
+ || defined(IP_BINDANY) \
+ || defined(IPV6_BINDANY) \
+ || defined(SO_BINDANY) \
+ || defined(IP_TRANSPARENT) \
+ || defined(IPV6_TRANSPARENT)
+#define CONFIG_HAP_TRANSPARENT
+#endif
+
+/* We'll try to enable SO_REUSEPORT on Linux 2.4 and 2.6 if not defined.
+ * There are two families of values depending on the architecture. Those
+ * are at least valid on Linux 2.4 and 2.6, reason why we'll rely on the
+ * USE_NETFILTER define.
+ */
+#if !defined(SO_REUSEPORT) && defined(USE_NETFILTER)
+#if defined(SO_REUSEADDR) && (SO_REUSEADDR == 2)
+#define SO_REUSEPORT 15
+#elif defined(SO_REUSEADDR) && (SO_REUSEADDR == 0x0004)
+#define SO_REUSEPORT 0x0200
+#endif /* SO_REUSEADDR */
+#endif /* SO_REUSEPORT */
+
+/* only Linux defines TCP_FASTOPEN */
+#ifdef USE_TFO
+#ifndef TCP_FASTOPEN
+#define TCP_FASTOPEN 23
+#endif
+
+#ifndef TCP_FASTOPEN_CONNECT
+#define TCP_FASTOPEN_CONNECT 30
+#endif
+#endif
+
+/* If IPv6 is supported, define IN6_IS_ADDR_V4MAPPED() if missing. */
+#if defined(IPV6_TCLASS) && !defined(IN6_IS_ADDR_V4MAPPED)
+#define IN6_IS_ADDR_V4MAPPED(a) \
+((((const uint32_t *) (a))[0] == 0) \
+&& (((const uint32_t *) (a))[1] == 0) \
+&& (((const uint32_t *) (a))[2] == htonl (0xffff)))
+#endif
+
+#if defined(__dietlibc__)
+#include <strings.h>
+#endif
+
+/* crypt_r() has been present in glibc since 2.2 and on FreeBSD since 12.0
+ * (12000002). No other OS makes any mention of it for now. Feel free to add
+ * valid known combinations below if needed to relax the crypt() lock when
+ * using threads.
+ */
+#if (defined(__GNU_LIBRARY__) && (__GLIBC__ > 2 || __GLIBC__ == 2 && __GLIBC_MINOR__ >= 2)) \
+ || (defined(__FreeBSD__) && __FreeBSD_version >= 1200002)
+#define HA_HAVE_CRYPT_R
+#endif
+
+/* some backtrace() implementations are broken or incomplete, in this case we
+ * can replace them. We must not do it all the time as some are more accurate
+ * than ours.
+ */
+#ifdef USE_BACKTRACE
+#if defined(__aarch64__)
+/* on aarch64 at least from gcc-4.7.4 to 7.4.1 we only get a single entry, which
+ * is pointless. Ours works though it misses the faulty function itself,
+ * probably due to an alternate stack for the signal handler which does not
+ * create a new frame hence doesn't store the caller's return address.
+ */
+#elif defined(__clang__) && defined(__x86_64__)
+/* this is on FreeBSD, clang 4.0 to 8.0 produce don't go further than the
+ * sighandler.
+ */
+#else
+#define HA_HAVE_WORKING_BACKTRACE
+#endif
+#endif
+
+/* dl_iterate_phdr() is available in GLIBC 2.2.4 and up. Let's round up to 2.3.x */
+#if defined(USE_DL) && defined(__GNU_LIBRARY__) && (__GLIBC__ > 2 || __GLIBC__ == 2 && __GLIBC_MINOR__ >= 3)
+#define HA_HAVE_DL_ITERATE_PHDR
+#define HA_HAVE_DUMP_LIBS
+#endif
+
+/* malloc_trim() can be very convenient to reclaim unused memory especially
+ * from huge pattern files. It's available (and really usable) in glibc 2.8 and
+ * above.
+ */
+#if (defined(__GNU_LIBRARY__) && (__GLIBC__ > 2 || __GLIBC__ == 2 && __GLIBC_MINOR__ >= 8))
+#include <malloc.h>
+#define HA_HAVE_MALLOC_TRIM
+#endif
+
+/* glibc 2.26 includes a thread-local cache which makes it fast enough in threads */
+#if (defined(__GNU_LIBRARY__) && (__GLIBC__ > 2 || __GLIBC__ == 2 && __GLIBC_MINOR__ >= 26))
+#include <malloc.h>
+#define HA_HAVE_FAST_MALLOC
+#endif
+
+/* glibc 2.33 provides mallinfo2() that overcomes mallinfo()'s type limitations */
+#if (defined(__GNU_LIBRARY__) && (__GLIBC__ > 2 || __GLIBC__ == 2 && __GLIBC_MINOR__ >= 33))
+#include <malloc.h>
+#define HA_HAVE_MALLINFO2
+#endif
+
+/* FreeBSD also has malloc_usable_size() but it requires malloc_np.h */
+#if defined(USE_MEMORY_PROFILING) && defined(__FreeBSD__) && (__FreeBSD_version >= 700002)
+#include <malloc_np.h>
+#endif
+
+/* macOS has a call similar to malloc_usable_size */
+#if defined(__APPLE__)
+#include <malloc/malloc.h>
+#define malloc_usable_size malloc_size
+#define HA_HAVE_MALLOC_ZONE
+#define TCP_KEEPIDLE TCP_KEEPALIVE
+#define TCP_INFO TCP_CONNECTION_INFO
+#define tcp_info tcp_connection_info
+#endif
+
+/* Max number of file descriptors we send in one sendmsg(). Linux seems to be
+ * able to send 253 fds per sendmsg(), however musl is limited to 252, not sure
+ * about the other OSes.
+ */
+#define MAX_SEND_FD 252
+
+/* Some bsd kernels (ie: FreeBSD) offer the FAST clock source as equivalent
+ * to Linux COARSE clock source. Aliasing COARSE to FAST on such systems when
+ * COARSE is not already defined.
+ */
+#if !defined(CLOCK_MONOTONIC_COARSE) && defined(CLOCK_MONOTONIC_FAST)
+#define CLOCK_MONOTONIC_COARSE CLOCK_MONOTONIC_FAST
+#endif
+
+#endif /* _HAPROXY_COMPAT_H */
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/include/haproxy/compiler.h b/include/haproxy/compiler.h
new file mode 100644
index 0000000..d8e8a72
--- /dev/null
+++ b/include/haproxy/compiler.h
@@ -0,0 +1,469 @@
+/*
+ * include/haproxy/compiler.h
+ * This files contains some compiler-specific settings.
+ *
+ * Copyright (C) 2000-2020 Willy Tarreau - w@1wt.eu
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_COMPILER_H
+#define _HAPROXY_COMPILER_H
+
+/* leave a chance to the compiler to bring its own definitions first; this
+ * will cause cdefs.h to be included on systems which have it.
+ */
+#include <inttypes.h>
+
+#ifdef DEBUG_USE_ABORT
+#include <stdlib.h>
+#endif
+
+/*
+ * Gcc before 3.0 needs [0] to declare a variable-size array
+ */
+#ifndef VAR_ARRAY
+#if defined(__GNUC__) && (__GNUC__ < 3)
+#define VAR_ARRAY 0
+#else
+#define VAR_ARRAY
+#endif
+#endif
+
+/* This is used to test if a macro is defined and equals 1. The principle is
+ * that the macro is passed as a value and its value concatenated to the word
+ * "comma_for_one" to form a new macro name. The macro "comma_for_one1" equals
+ * one comma, which, once used in an argument, will shift all of them by one,
+ * so that we can use this to concatenate both a 1 and a 0 and always pick the
+ * second one.
+ */
+#define comma_for_one1 ,
+#define _____equals_1(x, y, ...) (y)
+#define ____equals_1(x, ...) _____equals_1(x, 0)
+#define ___equals_1(x) ____equals_1(comma_for_one ## x 1)
+#define __equals_1(x) ___equals_1(x)
+
+/* gcc 5 and clang 3 brought __has_attribute(), which is not well documented in
+ * the case of gcc, but is convenient since handled at the preprocessor level.
+ * In both cases it's possible to test for __has_attribute() using ifdef. When
+ * not defined we remap this to the __has_attribute_<name> macro so that we'll
+ * later be able to implement on a per-compiler basis those which are missing,
+ * by defining __has_attribute_<name> to 1.
+ */
+#ifndef __has_attribute
+#define __has_attribute(x) __equals_1(__has_attribute_ ## x)
+#endif
+
+/* The fallthrough attribute arrived with gcc 7, the same version that started
+ * to emit the fallthrough warnings and to parse the comments. Comments do not
+ * manage to stop the warning when preprocessing is split from compiling (e.g.
+ * when building under distcc). Better encourage the use of a __fallthrough
+ * statement instead. There are still limitations in that clang doesn't accept
+ * it after a label; this is the reason why we're always preceding it with an
+ * empty do-while.
+ */
+#if __has_attribute(fallthrough)
+# define __fallthrough do { } while (0); __attribute__((fallthrough))
+#else
+# define __fallthrough do { } while (0)
+#endif
+
+#if !defined(__GNUC__)
+/* Some versions of glibc irresponsibly redefine __attribute__() to empty for
+ * non-gcc compilers, and as such, silently break all constructors with other
+ * other compilers. Let's make sure such incompatibilities are detected if any,
+ * or that the attribute is properly enforced.
+ */
+#undef __attribute__
+#define __attribute__(x) __attribute__(x)
+#endif
+
+/* attribute(warning) was added in gcc 4.3 */
+#if defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3))
+# define __has_attribute_warning 1
+#endif
+
+/* __attribute__warning(x) does __attribute__((warning(x))) if supported by the
+ * compiler, otherwise __attribute__((deprecated)). Clang supports it since v14
+ * but is a bit capricious in that it refuses a redefinition with a warning
+ * attribute that wasn't there the first time. However it's OK with deprecated(x)
+ * so better use this one. See: https://github.com/llvm/llvm-project/issues/56519
+ */
+#if defined(__clang__)
+# define __attribute__warning(x) __attribute__((deprecated(x)))
+#elif __has_attribute(warning)
+# define __attribute__warning(x) __attribute__((warning(x)))
+#else
+# define __attribute__warning(x) __attribute__((deprecated))
+#endif
+
+/* By default, gcc does not inline large chunks of code, but we want it to
+ * respect our choices.
+ */
+#if !defined(forceinline)
+#if !defined(__GNUC__) || (__GNUC__ < 3)
+#define forceinline inline
+#else
+#define forceinline inline __attribute__((always_inline))
+#endif
+#endif
+
+#ifndef __maybe_unused
+/* silence the "unused" warnings without having to place painful #ifdefs.
+ * For use with variables or functions.
+ */
+#define __maybe_unused __attribute__((unused))
+#endif
+
+/* TCC doesn't support weak attribute, sections etc and needs the more portable
+ * obsolete linker model instead.
+ */
+#if defined(__TINYC__) && !defined(USE_OBSOLETE_LINKER)
+#define USE_OBSOLETE_LINKER 1
+#endif
+
+/* These macros are used to declare a section name for a variable.
+ * WARNING: keep section names short, as MacOS limits them to 16 characters.
+ * The _START and _STOP attributes have to be placed after the start and stop
+ * weak symbol declarations, and are only used by MacOS.
+ */
+#if !defined(USE_OBSOLETE_LINKER)
+
+#ifdef __APPLE__
+#define HA_SECTION(s) __attribute__((__section__("__DATA, " s)))
+#define HA_SECTION_START(s) __asm("section$start$__DATA$" s)
+#define HA_SECTION_STOP(s) __asm("section$end$__DATA$" s)
+#else
+#define HA_SECTION(s) __attribute__((__section__(s)))
+#define HA_SECTION_START(s)
+#define HA_SECTION_STOP(s)
+#endif
+
+#else // obsolete linker below, let's just not force any section
+
+#define HA_SECTION(s)
+#define HA_SECTION_START(s)
+#define HA_SECTION_STOP(s)
+
+#endif // USE_OBSOLETE_LINKER
+
+/* Declare a symbol as weak if possible, otherwise global. Since we don't want to
+ * error on multiple definitions, the symbol is declared weak. On MacOS ".weak"
+ * does not exist and we must continue to use ".globl" instead. Note that
+ * ".global" is to be avoided on other platforms as llvm complains about it
+ * being used for symbols declared as weak elsewhere in the code. It may or may
+ * not work depending on linkers and assemblers, this is only for advanced use
+ * anyway (and most likely it will only work with !USE_OBSOLETE_LINKER).
+ */
+#if defined(__APPLE__)
+# define __HA_WEAK(sym) __asm__(".globl " #sym)
+#else
+# define __HA_WEAK(sym) __asm__(".weak " #sym)
+#endif
+#define HA_WEAK(sym) __HA_WEAK(sym)
+
+/* declare a symbol as global */
+#define __HA_GLOBL(sym) __asm__(".globl " #sym)
+#define HA_GLOBL(sym) __HA_GLOBL(sym)
+
+/* use this attribute on a variable to move it to the read_mostly section */
+#if !defined(__read_mostly)
+#define __read_mostly HA_SECTION("read_mostly")
+#endif
+
+/* This allows gcc to know that some locations are never reached, for example
+ * after a longjmp() in the Lua code, hence that some errors caught by such
+ * methods cannot propagate further. This is important with gcc versions 6 and
+ * above which can more aggressively detect null dereferences. The builtin
+ * below was introduced in gcc 4.5, and before it we didn't care.
+ */
+#ifdef DEBUG_USE_ABORT
+#define my_unreachable() abort()
+#else
+#if defined(__GNUC__) && (__GNUC__ >= 5 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 5))
+#define my_unreachable() __builtin_unreachable()
+#else
+#define my_unreachable() do { } while (1)
+#endif
+#endif
+
+/* This prevents the compiler from folding multiple identical code paths into a
+ * single one, by adding a dependency on the line number in the path. This may
+ * typically happen on function tails, or purposely placed abort() before an
+ * unreachable() statement, due to the compiler performing an Identical Code
+ * Folding optimization. This macro is aimed at helping with code tracing in
+ * crash dumps and may also be used for specific optimizations. One known case
+ * is gcc-4.7 and 4.8 which aggressively fold multiple ABORT_NOW() exit points
+ * and which causes wrong line numbers to be reported by the debugger (note
+ * that even newer compilers do this when using abort()). Please keep in mind
+ * that nothing prevents the compiler from folding the code after that point,
+ * but at least it will not fold the code before.
+ */
+#define DO_NOT_FOLD() do { asm volatile("" :: "i"(__LINE__)); } while (0)
+
+/* This macro may be used to block constant propagation that lets the compiler
+ * detect a possible NULL dereference on a variable resulting from an explicit
+ * assignment in an impossible check. Sometimes a function is called which does
+ * safety checks and returns NULL if safe conditions are not met. The place
+ * where it's called cannot hit this condition and dereferencing the pointer
+ * without first checking it will make the compiler emit a warning about a
+ * "potential null pointer dereference" which is hard to work around. This
+ * macro "washes" the pointer and prevents the compiler from emitting tests
+ * branching to undefined instructions. It may only be used when the developer
+ * is absolutely certain that the conditions are guaranteed and that the
+ * pointer passed in argument cannot be NULL by design.
+ */
+#define ALREADY_CHECKED(p) do { asm("" : "=rm"(p) : "0"(p)); } while (0)
+
+/* same as above but to be used to pass the input value to the output but
+ * without letting the compiler know about its initial properties.
+ */
+#define DISGUISE(v) ({ typeof(v) __v = (v); ALREADY_CHECKED(__v); __v; })
+
+/* Implements a static event counter where it's used. This is typically made to
+ * report some warnings only once, either during boot or at runtime. It only
+ * returns true on the very first call, and zero later. It's thread-safe and
+ * uses a single byte of memory per call place. It relies on the atomic xchg
+ * defined in atomic.h which is also part of the common API.
+ */
+#define ONLY_ONCE() ({ static char __cnt; !_HA_ATOMIC_XCHG(&__cnt, 1); })
+
+/* makes a string from a constant (number or macro), avoids the need for
+ * printf("%d") format just to dump a setting limit or value in an error
+ * message. We use two levels so that macros are resolved.
+ */
+#define _TOSTR(x) #x
+#define TOSTR(x) _TOSTR(x)
+
+/*
+ * Gcc >= 3 provides the ability for the program to give hints to the
+ * compiler about what branch of an if is most likely to be taken. This
+ * helps the compiler produce the most compact critical paths, which is
+ * generally better for the cache and to reduce the number of jumps.
+ */
+#if !defined(likely)
+#if !defined(__GNUC__) || (__GNUC__ < 3)
+#define __builtin_expect(x,y) (x)
+#define likely(x) (x)
+#define unlikely(x) (x)
+#else
+#define likely(x) (__builtin_expect((x) != 0, 1))
+#define unlikely(x) (__builtin_expect((x) != 0, 0))
+#endif
+#endif
+
+#ifndef __GNUC_PREREQ__
+#if defined(__GNUC__) && !defined(__INTEL_COMPILER)
+#define __GNUC_PREREQ__(ma, mi) \
+ (__GNUC__ > (ma) || __GNUC__ == (ma) && __GNUC_MINOR__ >= (mi))
+#else
+#define __GNUC_PREREQ__(ma, mi) 0
+#endif
+#endif
+
+#ifndef offsetof
+#if __GNUC_PREREQ__(4, 1)
+#define offsetof(type, field) __builtin_offsetof(type, field)
+#else
+#define offsetof(type, field) \
+ ((size_t)(uintptr_t)((const volatile void *)&((type *)0)->field))
+#endif
+#endif
+
+/* Linux-like "container_of". It returns a pointer to the structure of type
+ * <type> which has its member <name> stored at address <ptr>.
+ */
+#ifndef container_of
+#define container_of(ptr, type, name) ((type *)(((void *)(ptr)) - ((long)&((type *)0)->name)))
+#endif
+
+/* returns a pointer to the structure of type <type> which has its member <name>
+ * stored at address <ptr>, unless <ptr> is 0, in which case 0 is returned.
+ */
+#ifndef container_of_safe
+#define container_of_safe(ptr, type, name) \
+ ({ void *__p = (ptr); \
+ __p ? (type *)(__p - ((long)&((type *)0)->name)) : (type *)0; \
+ })
+#endif
+
+
+/* From gcc 6 and above, enum values may have attributes */
+#if __GNUC_PREREQ__(6, 0)
+#define ENUM_ATTRIBUTE(x) __attribute__(x)
+#else
+#define ENUM_ATTRIBUTE(x)
+#endif
+
+/* Some architectures have a double-word CAS, sometimes even dual-8 bytes.
+ * Some architectures support unaligned accesses, others are fine with them
+ * but only for non-atomic operations. Also mention those supporting unaligned
+ * accesses and being little endian, and those where unaligned accesses are
+ * known to be fast (almost as fast as aligned ones).
+ */
+#if defined(__x86_64__)
+#define HA_UNALIGNED
+#define HA_UNALIGNED_LE
+#define HA_UNALIGNED_LE64
+#define HA_UNALIGNED_FAST
+#define HA_UNALIGNED_ATOMIC
+#define HA_HAVE_CAS_DW
+#define HA_CAS_IS_8B
+#elif defined(__i386__) || defined(__i486__) || defined(__i586__) || defined(__i686__)
+#define HA_UNALIGNED
+#define HA_UNALIGNED_LE
+#define HA_UNALIGNED_ATOMIC
+#elif defined (__aarch64__) || defined(__ARM_ARCH_8A)
+#define HA_UNALIGNED
+#define HA_UNALIGNED_LE
+#define HA_UNALIGNED_LE64
+#define HA_UNALIGNED_FAST
+#define HA_HAVE_CAS_DW
+#define HA_CAS_IS_8B
+#elif defined(__arm__) && (defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__))
+#define HA_UNALIGNED
+#define HA_UNALIGNED_LE
+#define HA_UNALIGNED_FAST
+#define HA_HAVE_CAS_DW
+#endif
+
+/*********************** IMPORTANT NOTE ABOUT ALIGNMENT **********************\
+ * Alignment works fine for variables. It also works on types and struct *
+ * members by propagating the alignment to the container struct itself, *
+ * but this requires that variables of the affected type are properly *
+ * aligned themselves. While regular variables will always abide, those *
+ * allocated using malloc() will not! Most platforms provide posix_memalign()*
+ * for this, but it's not available everywhere. As such one ought not to use *
+ * these alignment declarations inside structures that are dynamically *
+ * allocated. If the purpose is only to avoid false sharing of cache lines *
+ * for multi_threading, see THREAD_PAD() below. *
+\*****************************************************************************/
+
+/* sets alignment for current field or variable */
+#ifndef ALIGNED
+#define ALIGNED(x) __attribute__((aligned(x)))
+#endif
+
+/* sets alignment only on architectures preventing unaligned atomic accesses */
+#ifndef MAYBE_ALIGNED
+#ifndef HA_UNALIGNED
+#define MAYBE_ALIGNED(x) ALIGNED(x)
+#else
+#define MAYBE_ALIGNED(x)
+#endif
+#endif
+
+/* sets alignment only on architectures preventing unaligned atomic accesses */
+#ifndef ATOMIC_ALIGNED
+#ifndef HA_UNALIGNED_ATOMIC
+#define ATOMIC_ALIGNED(x) ALIGNED(x)
+#else
+#define ATOMIC_ALIGNED(x)
+#endif
+#endif
+
+/* sets alignment for current field or variable only when threads are enabled.
+ * Typically used to respect cache line alignment to avoid false sharing.
+ */
+#ifndef THREAD_ALIGNED
+#ifdef USE_THREAD
+#define THREAD_ALIGNED(x) __attribute__((aligned(x)))
+#else
+#define THREAD_ALIGNED(x)
+#endif
+#endif
+
+/* add a mandatory alignment for next fields in a structure */
+#ifndef ALWAYS_ALIGN
+#define ALWAYS_ALIGN(x) union { } ALIGNED(x)
+#endif
+
+/* add an optional alignment for next fields in a structure, only for archs
+ * which do not support unaligned accesses.
+ */
+#ifndef MAYBE_ALIGN
+#ifndef HA_UNALIGNED
+#define MAYBE_ALIGN(x) union { } ALIGNED(x)
+#else
+#define MAYBE_ALIGN(x)
+#endif
+#endif
+
+/* add an optional alignment for next fields in a structure, only for archs
+ * which do not support unaligned accesses for atomic operations.
+ */
+#ifndef ATOMIC_ALIGN
+#ifndef HA_UNALIGNED_ATOMIC
+#define ATOMIC_ALIGN(x) union { } ALIGNED(x)
+#else
+#define ATOMIC_ALIGN(x)
+#endif
+#endif
+
+/* add an optional alignment for next fields in a structure, only when threads
+ * are enabled. Typically used to respect cache line alignment to avoid false
+ * sharing.
+ */
+#ifndef THREAD_ALIGN
+#ifdef USE_THREAD
+#define THREAD_ALIGN(x) union { } ALIGNED(x)
+#else
+#define THREAD_ALIGN(x)
+#endif
+#endif
+
+/* add optional padding of the specified size between fields in a structure,
+ * only when threads are enabled. This is used to avoid false sharing of cache
+ * lines for dynamically allocated structures which cannot guarantee alignment.
+ */
+#ifndef THREAD_PAD
+# ifdef USE_THREAD
+# define __THREAD_PAD(x,l) char __pad_##l[x]
+# define _THREAD_PAD(x,l) __THREAD_PAD(x, l)
+# define THREAD_PAD(x) _THREAD_PAD(x, __LINE__)
+# else
+# define THREAD_PAD(x)
+# endif
+#endif
+
+/* The THREAD_LOCAL type attribute defines thread-local storage and is defined
+ * to __thread when threads are enabled or empty when disabled.
+ */
+#ifdef USE_THREAD
+#define THREAD_LOCAL __thread
+#else
+#define THREAD_LOCAL
+#endif
+
+/* The __decl_thread() statement is shows the argument when threads are enabled
+ * or hides it when disabled. The purpose is to condition the presence of some
+ * variables or struct members to the fact that threads are enabled, without
+ * having to enclose them inside a #ifdef USE_THREAD/#endif clause.
+ */
+#ifdef USE_THREAD
+#define __decl_thread(decl) decl
+#else
+#define __decl_thread(decl)
+#endif
+
+/* clang has a __has_feature() macro which reports true/false on a number of
+ * internally supported features. Let's make sure this macro is always defined
+ * and returns zero when not supported.
+ */
+#ifndef __has_feature
+#define __has_feature(x) 0
+#endif
+
+#endif /* _HAPROXY_COMPILER_H */
diff --git a/include/haproxy/compression-t.h b/include/haproxy/compression-t.h
new file mode 100644
index 0000000..b8f118b
--- /dev/null
+++ b/include/haproxy/compression-t.h
@@ -0,0 +1,109 @@
+/*
+ * include/haproxy/compression-t.h
+ * This file defines everything related to compression.
+ *
+ * Copyright 2012 Exceliance, David Du Colombier <dducolombier@exceliance.fr>
+ William Lallemand <wlallemand@exceliance.fr>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_COMP_T_H
+#define _HAPROXY_COMP_T_H
+
+#if defined(USE_SLZ)
+#ifdef USE_ZLIB
+#error "Cannot build with both USE_SLZ and USE_ZLIB at the same time."
+#endif
+#include <import/slz.h>
+#elif defined(USE_ZLIB)
+#include <zlib.h>
+#endif
+
+#include <haproxy/buf-t.h>
+
+/* Direction index */
+
+#define COMP_DIR_REQ 0
+#define COMP_DIR_RES 1
+
+/* Compression flags */
+
+#define COMP_FL_OFFLOAD 0x00000001 /* Compression offload */
+#define COMP_FL_DIR_REQ 0x00000002 /* Compress requests */
+#define COMP_FL_DIR_RES 0x00000004 /* Compress responses */
+
+struct comp {
+ struct comp_algo *algos_res; /* Algos available for response */
+ struct comp_algo *algo_req; /* Algo to use for request */
+ struct comp_type *types_req; /* Types to be compressed for requests */
+ struct comp_type *types_res; /* Types to be compressed for responses */
+ unsigned int flags;
+};
+
+struct comp_ctx {
+#if defined(USE_SLZ)
+ struct slz_stream strm;
+ const void *direct_ptr; /* NULL or pointer to beginning of data */
+ int direct_len; /* length of direct_ptr if not NULL */
+ struct buffer queued; /* if not NULL, data already queued */
+#elif defined(USE_ZLIB)
+ z_stream strm; /* zlib stream */
+ void *zlib_deflate_state;
+ void *zlib_window;
+ void *zlib_prev;
+ void *zlib_pending_buf;
+ void *zlib_head;
+#endif
+ int cur_lvl;
+};
+
+/* Thanks to MSIE/IIS, the "deflate" name is ambiguous, as according to the RFC
+ * it's a zlib-wrapped deflate stream, but MSIE only understands a raw deflate
+ * stream. For this reason some people prefer to emit a raw deflate stream on
+ * "deflate" and we'll need two algos for the same name, they are distinguished
+ * with the config name.
+ */
+struct comp_algo {
+ char *cfg_name; /* config name */
+ int cfg_name_len;
+
+ char *ua_name; /* name for the user-agent */
+ int ua_name_len;
+
+ int (*init)(struct comp_ctx **comp_ctx, int level);
+ int (*add_data)(struct comp_ctx *comp_ctx, const char *in_data, int in_len, struct buffer *out);
+ int (*flush)(struct comp_ctx *comp_ctx, struct buffer *out);
+ int (*finish)(struct comp_ctx *comp_ctx, struct buffer *out);
+ int (*end)(struct comp_ctx **comp_ctx);
+ struct comp_algo *next;
+};
+
+struct comp_type {
+ char *name;
+ int name_len;
+ struct comp_type *next;
+};
+
+
+#endif /* _HAPROXY_COMP_T_H */
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
+
diff --git a/include/haproxy/compression.h b/include/haproxy/compression.h
new file mode 100644
index 0000000..851ea23
--- /dev/null
+++ b/include/haproxy/compression.h
@@ -0,0 +1,44 @@
+/*
+ * include/haproxy/compression.h
+ * This file defines function prototypes for compression.
+ *
+ * Copyright 2012 (C) Exceliance, David Du Colombier <dducolombier@exceliance.fr>
+ * William Lallemand <wlallemand@exceliance.fr>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_COMP_H
+#define _HAPROXY_COMP_H
+
+#include <haproxy/compression-t.h>
+
+extern unsigned int compress_min_idle;
+
+int comp_append_type(struct comp_type **types, const char *type);
+int comp_append_algo(struct comp_algo **algos, const char *algo);
+
+#ifdef USE_ZLIB
+extern long zlib_used_memory;
+#endif /* USE_ZLIB */
+
+#endif /* _HAPROXY_COMP_H */
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/include/haproxy/connection-t.h b/include/haproxy/connection-t.h
new file mode 100644
index 0000000..2619fd6
--- /dev/null
+++ b/include/haproxy/connection-t.h
@@ -0,0 +1,722 @@
+/*
+ * include/haproxy/connection-t.h
+ * This file describes the connection struct and associated constants.
+ *
+ * Copyright (C) 2000-2014 Willy Tarreau - w@1wt.eu
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_CONNECTION_T_H
+#define _HAPROXY_CONNECTION_T_H
+
+#include <stdlib.h>
+#include <sys/socket.h>
+#include <netinet/in_systm.h>
+#include <netinet/ip.h>
+#include <netinet/ip6.h>
+
+#include <import/ebtree-t.h>
+#include <import/ist.h>
+
+#include <haproxy/api-t.h>
+#include <haproxy/buf-t.h>
+#include <haproxy/obj_type-t.h>
+#include <haproxy/port_range-t.h>
+#include <haproxy/protocol-t.h>
+#include <haproxy/show_flags-t.h>
+#include <haproxy/thread-t.h>
+
+/* referenced below */
+struct connection;
+struct stconn;
+struct sedesc;
+struct cs_info;
+struct buffer;
+struct proxy;
+struct server;
+struct session;
+struct pipe;
+struct quic_conn;
+struct bind_conf;
+struct qcs;
+struct ssl_sock_ctx;
+
+/* Note: subscribing to these events is only valid after the caller has really
+ * attempted to perform the operation, and failed to proceed or complete.
+ */
+enum sub_event_type {
+ SUB_RETRY_RECV = 0x00000001, /* Schedule the tasklet when we can attempt to recv again */
+ SUB_RETRY_SEND = 0x00000002, /* Schedule the tasklet when we can attempt to send again */
+};
+
+/* For each direction, we have a CO_FL_XPRT_<DIR>_ENA flag, which
+ * indicates if read or write is desired in that direction for the respective
+ * layers. The current status corresponding to the current layer being used is
+ * remembered in the CO_FL_XPRT_<DIR>_ENA flag. The need to poll (ie receipt of
+ * EAGAIN) is remembered at the file descriptor level so that even when the
+ * activity is stopped and restarted, we still remember whether it was needed
+ * to poll before attempting the I/O.
+ *
+ * The FD state is updated according to CO_FL_XPRT_<DIR>_ENA in
+ * conn_cond_update_polling().
+ */
+
+/* flags for use in connection->flags. Please also update the conn_show_flags()
+ * function below in case of changes.
+ */
+enum {
+ CO_FL_NONE = 0x00000000, /* Just for initialization purposes */
+
+ /* Do not change these values without updating conn_*_poll_changes() ! */
+ CO_FL_SAFE_LIST = 0x00000001, /* 0 = not in any list, 1 = in safe_list */
+ CO_FL_IDLE_LIST = 0x00000002, /* 2 = in idle_list, 3 = invalid */
+ CO_FL_LIST_MASK = 0x00000003, /* Is the connection in any server-managed list ? */
+
+ CO_FL_REVERSED = 0x00000004, /* connection has been reversed to backend / reversed and accepted on frontend */
+ CO_FL_ACT_REVERSING = 0x00000008, /* connection has been reversed to frontend but not yet accepted */
+ /* unused : 0x00000008 */
+
+ /* unused : 0x00000010 */
+ /* unused : 0x00000020 */
+ /* unused : 0x00000040, 0x00000080 */
+
+ /* These flags indicate whether the Control and Transport layers are initialized */
+ CO_FL_CTRL_READY = 0x00000100, /* FD was registered, fd_delete() needed */
+ CO_FL_XPRT_READY = 0x00000200, /* xprt_start() done, xprt can be used */
+
+ CO_FL_WANT_DRAIN = 0x00000400, /* try to drain pending data when closing */
+
+ /* This flag is used by data layers to indicate they had to stop
+ * receiving data because a buffer was full. The connection handler
+ * clears it before first calling the I/O and data callbacks.
+ */
+ CO_FL_WAIT_ROOM = 0x00000800, /* data sink is full */
+
+ /* These flags are used to report whether the from/to addresses are set or not */
+ /* unused: 0x00001000 */
+ /* unused: 0x00002000 */
+
+ CO_FL_EARLY_SSL_HS = 0x00004000, /* We have early data pending, don't start SSL handshake yet */
+ CO_FL_EARLY_DATA = 0x00008000, /* At least some of the data are early data */
+ CO_FL_SOCKS4_SEND = 0x00010000, /* handshaking with upstream SOCKS4 proxy, going to send the handshake */
+ CO_FL_SOCKS4_RECV = 0x00020000, /* handshaking with upstream SOCKS4 proxy, going to check if handshake succeed */
+
+ /* flags used to remember what shutdown have been performed/reported */
+ CO_FL_SOCK_RD_SH = 0x00040000, /* SOCK layer was notified about shutr/read0 */
+ CO_FL_SOCK_WR_SH = 0x00080000, /* SOCK layer asked for shutw */
+
+ /* flags used to report connection errors or other closing conditions */
+ CO_FL_ERROR = 0x00100000, /* a fatal error was reported */
+ CO_FL_NOTIFY_DONE = 0x001C0000, /* any xprt shut/error flags above needs to be reported */
+
+ CO_FL_FDLESS = 0x00200000, /* this connection doesn't use any FD (e.g. QUIC) */
+
+ /* flags used to report connection status updates */
+ CO_FL_WAIT_L4_CONN = 0x00400000, /* waiting for L4 to be connected */
+ CO_FL_WAIT_L6_CONN = 0x00800000, /* waiting for L6 to be connected (eg: SSL) */
+ CO_FL_WAIT_L4L6 = 0x00C00000, /* waiting for L4 and/or L6 to be connected */
+
+ /* All the flags below are used for connection handshakes. Any new
+ * handshake should be added after this point, and CO_FL_HANDSHAKE
+ * should be updated.
+ */
+ CO_FL_SEND_PROXY = 0x01000000, /* send a valid PROXY protocol header */
+ CO_FL_ACCEPT_PROXY = 0x02000000, /* receive a valid PROXY protocol header */
+ CO_FL_ACCEPT_CIP = 0x04000000, /* receive a valid NetScaler Client IP header */
+
+ /* below we have all handshake flags grouped into one */
+ CO_FL_HANDSHAKE = CO_FL_SEND_PROXY | CO_FL_ACCEPT_PROXY | CO_FL_ACCEPT_CIP | CO_FL_SOCKS4_SEND | CO_FL_SOCKS4_RECV,
+ CO_FL_WAIT_XPRT = CO_FL_WAIT_L4_CONN | CO_FL_HANDSHAKE | CO_FL_WAIT_L6_CONN,
+
+ CO_FL_SSL_WAIT_HS = 0x08000000, /* wait for an SSL handshake to complete */
+
+ /* This connection may not be shared between clients */
+ CO_FL_PRIVATE = 0x10000000,
+
+ /* This flag is used to know that a PROXY protocol header was sent by the client */
+ CO_FL_RCVD_PROXY = 0x20000000,
+
+ /* The connection is unused by its owner */
+ CO_FL_SESS_IDLE = 0x40000000,
+
+ /* This last flag indicates that the transport layer is used (for instance
+ * by logs) and must not be cleared yet. The last call to conn_xprt_close()
+ * must be done after clearing this flag.
+ */
+ CO_FL_XPRT_TRACKED = 0x80000000,
+
+ /* below we have all SOCKS handshake flags grouped into one */
+ CO_FL_SOCKS4 = CO_FL_SOCKS4_SEND | CO_FL_SOCKS4_RECV,
+};
+
+/* This function is used to report flags in debugging tools. Please reflect
+ * below any single-bit flag addition above in the same order via the
+ * __APPEND_FLAG macro. The new end of the buffer is returned.
+ */
+static forceinline char *conn_show_flags(char *buf, size_t len, const char *delim, uint flg)
+{
+#define _(f, ...) __APPEND_FLAG(buf, len, delim, flg, f, #f, __VA_ARGS__)
+ /* prologue */
+ _(0);
+ /* flags */
+ _(CO_FL_SAFE_LIST, _(CO_FL_IDLE_LIST, _(CO_FL_CTRL_READY,
+ _(CO_FL_REVERSED, _(CO_FL_ACT_REVERSING, _(CO_FL_XPRT_READY,
+ _(CO_FL_WANT_DRAIN, _(CO_FL_WAIT_ROOM, _(CO_FL_EARLY_SSL_HS, _(CO_FL_EARLY_DATA,
+ _(CO_FL_SOCKS4_SEND, _(CO_FL_SOCKS4_RECV, _(CO_FL_SOCK_RD_SH, _(CO_FL_SOCK_WR_SH,
+ _(CO_FL_ERROR, _(CO_FL_FDLESS, _(CO_FL_WAIT_L4_CONN, _(CO_FL_WAIT_L6_CONN,
+ _(CO_FL_SEND_PROXY, _(CO_FL_ACCEPT_PROXY, _(CO_FL_ACCEPT_CIP, _(CO_FL_SSL_WAIT_HS,
+ _(CO_FL_PRIVATE, _(CO_FL_RCVD_PROXY, _(CO_FL_SESS_IDLE, _(CO_FL_XPRT_TRACKED
+ ))))))))))))))))))))))))));
+ /* epilogue */
+ _(~0U);
+ return buf;
+#undef _
+}
+
+/* Possible connection error codes.
+ * Warning: Do not reorder the codes, they are fetchable through the
+ * "fc_err" sample fetch. If a new code is added, please add an error label
+ * in conn_err_code_str and in the "fc_err_str" sample fetch documentation.
+ */
+enum {
+ CO_ER_NONE, /* no error */
+
+ CO_ER_CONF_FDLIM, /* reached process's configured FD limitation */
+ CO_ER_PROC_FDLIM, /* reached process's FD limitation */
+ CO_ER_SYS_FDLIM, /* reached system's FD limitation */
+ CO_ER_SYS_MEMLIM, /* reached system buffers limitation */
+ CO_ER_NOPROTO, /* protocol not supported */
+ CO_ER_SOCK_ERR, /* other socket error */
+
+ CO_ER_PORT_RANGE, /* source port range exhausted */
+ CO_ER_CANT_BIND, /* can't bind to source address */
+ CO_ER_FREE_PORTS, /* no more free ports on the system */
+ CO_ER_ADDR_INUSE, /* local address already in use */
+
+ CO_ER_PRX_EMPTY, /* nothing received in PROXY protocol header */
+ CO_ER_PRX_ABORT, /* client abort during PROXY protocol header */
+ CO_ER_PRX_TIMEOUT, /* timeout while waiting for a PROXY header */
+ CO_ER_PRX_TRUNCATED, /* truncated PROXY protocol header */
+ CO_ER_PRX_NOT_HDR, /* not a PROXY protocol header */
+ CO_ER_PRX_BAD_HDR, /* bad PROXY protocol header */
+ CO_ER_PRX_BAD_PROTO, /* unsupported protocol in PROXY header */
+
+ CO_ER_CIP_EMPTY, /* nothing received in NetScaler Client IP header */
+ CO_ER_CIP_ABORT, /* client abort during NetScaler Client IP header */
+ CO_ER_CIP_TIMEOUT, /* timeout while waiting for a NetScaler Client IP header */
+ CO_ER_CIP_TRUNCATED, /* truncated NetScaler Client IP header */
+ CO_ER_CIP_BAD_MAGIC, /* bad magic number in NetScaler Client IP header */
+ CO_ER_CIP_BAD_PROTO, /* unsupported protocol in NetScaler Client IP header */
+
+ CO_ER_SSL_EMPTY, /* client closed during SSL handshake */
+ CO_ER_SSL_ABORT, /* client abort during SSL handshake */
+ CO_ER_SSL_TIMEOUT, /* timeout during SSL handshake */
+ CO_ER_SSL_TOO_MANY, /* too many SSL connections */
+ CO_ER_SSL_NO_MEM, /* no more memory to allocate an SSL connection */
+ CO_ER_SSL_RENEG, /* forbidden client renegotiation */
+ CO_ER_SSL_CA_FAIL, /* client cert verification failed in the CA chain */
+ CO_ER_SSL_CRT_FAIL, /* client cert verification failed on the certificate */
+ CO_ER_SSL_MISMATCH, /* Server presented an SSL certificate different from the configured one */
+ CO_ER_SSL_MISMATCH_SNI, /* Server presented an SSL certificate different from the expected one */
+ CO_ER_SSL_HANDSHAKE, /* SSL error during handshake */
+ CO_ER_SSL_HANDSHAKE_HB, /* SSL error during handshake with heartbeat present */
+ CO_ER_SSL_KILLED_HB, /* Stopped a TLSv1 heartbeat attack (CVE-2014-0160) */
+ CO_ER_SSL_NO_TARGET, /* unknown target (not client nor server) */
+ CO_ER_SSL_EARLY_FAILED, /* Server refused early data */
+
+ CO_ER_SOCKS4_SEND, /* SOCKS4 Proxy write error during handshake */
+ CO_ER_SOCKS4_RECV, /* SOCKS4 Proxy read error during handshake */
+ CO_ER_SOCKS4_DENY, /* SOCKS4 Proxy deny the request */
+ CO_ER_SOCKS4_ABORT, /* SOCKS4 Proxy handshake aborted by server */
+
+ CO_ERR_SSL_FATAL, /* SSL fatal error during a SSL_read or SSL_write */
+
+ CO_ER_REVERSE, /* Error during reverse connect */
+};
+
+/* error return codes for accept_conn() */
+enum {
+ CO_AC_NONE = 0, /* no error, valid connection returned */
+ CO_AC_DONE, /* reached the end of the queue (typically EAGAIN) */
+ CO_AC_RETRY, /* late signal delivery or anything requiring the caller to try again */
+ CO_AC_YIELD, /* short-lived limitation that requires a short pause */
+ CO_AC_PAUSE, /* long-lived issue (resource/memory allocation error, paused FD) */
+ CO_AC_PERMERR, /* permanent, non-recoverable error (e.g. closed listener socket) */
+};
+
+/* source address settings for outgoing connections */
+enum {
+ /* Tproxy exclusive values from 0 to 7 */
+ CO_SRC_TPROXY_ADDR = 0x0001, /* bind to this non-local address when connecting */
+ CO_SRC_TPROXY_CIP = 0x0002, /* bind to the client's IP address when connecting */
+ CO_SRC_TPROXY_CLI = 0x0003, /* bind to the client's IP+port when connecting */
+ CO_SRC_TPROXY_DYN = 0x0004, /* bind to a dynamically computed non-local address */
+ CO_SRC_TPROXY_MASK = 0x0007, /* bind to a non-local address when connecting */
+
+ CO_SRC_BIND = 0x0008, /* bind to a specific source address when connecting */
+};
+
+/* flags that can be passed to xprt->rcv_buf() and mux->rcv_buf() */
+enum {
+ CO_RFL_BUF_WET = 0x0001, /* Buffer still has some output data present */
+ CO_RFL_BUF_FLUSH = 0x0002, /* Flush mux's buffers but don't read more data */
+ CO_RFL_READ_ONCE = 0x0004, /* don't loop even if the request/response is small */
+ CO_RFL_KEEP_RECV = 0x0008, /* Instruct the mux to still wait for read events */
+ CO_RFL_BUF_NOT_STUCK = 0x0010, /* Buffer is not stuck. Optims are possible during data copy */
+ CO_RFL_MAY_SPLICE = 0x0020, /* The producer can use the kernel splicing */
+};
+
+/* flags that can be passed to xprt->snd_buf() and mux->snd_buf() */
+enum {
+ CO_SFL_MSG_MORE = 0x0001, /* More data to come afterwards */
+ CO_SFL_STREAMER = 0x0002, /* Producer is continuously streaming data */
+};
+
+/* mux->shutr() modes */
+enum co_shr_mode {
+ CO_SHR_DRAIN = 0, /* read shutdown, drain any extra stuff */
+ CO_SHR_RESET = 1, /* read shutdown, reset any extra stuff */
+};
+
+/* mux->shutw() modes */
+enum co_shw_mode {
+ CO_SHW_NORMAL = 0, /* regular write shutdown */
+ CO_SHW_SILENT = 1, /* imminent close, don't notify peer */
+};
+
+/* known transport layers (for ease of lookup) */
+enum {
+ XPRT_RAW = 0,
+ XPRT_SSL = 1,
+ XPRT_HANDSHAKE = 2,
+ XPRT_QUIC = 3,
+ XPRT_ENTRIES /* must be last one */
+};
+
+/* MUX-specific flags */
+enum {
+ MX_FL_NONE = 0x00000000,
+ MX_FL_HTX = 0x00000001, /* set if it is an HTX multiplexer */
+ MX_FL_HOL_RISK = 0x00000002, /* set if the protocol is subject the to head-of-line blocking on server */
+ MX_FL_NO_UPG = 0x00000004, /* set if mux does not support any upgrade */
+ MX_FL_FRAMED = 0x00000008, /* mux working on top of a framed transport layer (QUIC) */
+ MX_FL_REVERSABLE = 0x00000010, /* mux supports connection reversal */
+};
+
+/* PROTO token registration */
+enum proto_proxy_mode {
+ PROTO_MODE_NONE = 0,
+ PROTO_MODE_TCP = 1 << 0, // must not be changed!
+ PROTO_MODE_HTTP = 1 << 1, // must not be changed!
+ PROTO_MODE_ANY = PROTO_MODE_TCP | PROTO_MODE_HTTP,
+};
+
+enum proto_proxy_side {
+ PROTO_SIDE_NONE = 0,
+ PROTO_SIDE_FE = 1, // same as PR_CAP_FE
+ PROTO_SIDE_BE = 2, // same as PR_CAP_BE
+ PROTO_SIDE_BOTH = PROTO_SIDE_FE | PROTO_SIDE_BE,
+};
+
+/* ctl command used by mux->ctl() */
+enum mux_ctl_type {
+ MUX_CTL_STATUS, /* Expects an int as output, sets it to a combinaison of MUX_CTL_STATUS flags */
+ MUX_CTL_EXIT_STATUS, /* Expects an int as output, sets the mux exist/error/http status, if known or 0 */
+ MUX_CTL_REVERSE_CONN, /* Notify about an active reverse connection accepted. */
+ MUX_CTL_SUBS_RECV, /* Notify the mux it must wait for read events again */
+};
+
+/* sctl command used by mux->sctl() */
+enum mux_sctl_type {
+ MUX_SCTL_SID, /* Return the mux stream ID as ouput, as a signed 64bits integer */
+};
+
+/* response for ctl MUX_STATUS */
+#define MUX_STATUS_READY (1 << 0)
+
+enum mux_exit_status {
+ MUX_ES_SUCCESS, /* Success */
+ MUX_ES_INVALID_ERR, /* invalid input */
+ MUX_ES_TOUT_ERR, /* timeout */
+ MUX_ES_NOTIMPL_ERR, /* not-implemented error */
+ MUX_ES_INTERNAL_ERR, /* internal error */
+ MUX_ES_UNKNOWN /* unknown status (must be the last) */
+};
+
+/* socks4 response length */
+#define SOCKS4_HS_RSP_LEN 8
+
+/* socks4 upstream proxy definitions */
+struct socks4_request {
+ uint8_t version; /* SOCKS version number, 1 byte, must be 0x04 for this version */
+ uint8_t command; /* 0x01 = establish a TCP/IP stream connection */
+ uint16_t port; /* port number, 2 bytes (in network byte order) */
+ uint32_t ip; /* IP address, 4 bytes (in network byte order) */
+ char user_id[8]; /* the user ID string, variable length, terminated with a null (0x00); Using "HAProxy\0" */
+};
+
+/* Describes a set of subscriptions. Multiple events may be registered at the
+ * same time. The callee should assume everything not pending for completion is
+ * implicitly possible. It's illegal to change the tasklet if events are still
+ * registered.
+ */
+struct wait_event {
+ struct tasklet *tasklet;
+ int events; /* set of enum sub_event_type above */
+};
+
+/* A connection handle is how we differentiate two connections on the lower
+ * layers. It usually is a file descriptor but can be a connection id. The
+ * CO_FL_FDLESS flag indicates which one is relevant.
+ */
+union conn_handle {
+ struct quic_conn *qc; /* Only present if this connection is a QUIC one (CO_FL_FDLESS=1) */
+ int fd; /* file descriptor, for regular sockets (CO_FL_FDLESS=0) */
+};
+
+/* xprt_ops describes transport-layer operations for a connection. They
+ * generally run over a socket-based control layer, but not always. Some
+ * of them are used for data transfer with the upper layer (rcv_*, snd_*)
+ * and the other ones are used to setup and release the transport layer.
+ */
+struct xprt_ops {
+ size_t (*rcv_buf)(struct connection *conn, void *xprt_ctx, struct buffer *buf, size_t count, int flags); /* recv callback */
+ size_t (*snd_buf)(struct connection *conn, void *xprt_ctx, const struct buffer *buf, size_t count, int flags); /* send callback */
+ int (*rcv_pipe)(struct connection *conn, void *xprt_ctx, struct pipe *pipe, unsigned int count); /* recv-to-pipe callback */
+ int (*snd_pipe)(struct connection *conn, void *xprt_ctx, struct pipe *pipe, unsigned int count); /* send-to-pipe callback */
+ void (*shutr)(struct connection *conn, void *xprt_ctx, int); /* shutr function */
+ void (*shutw)(struct connection *conn, void *xprt_ctx, int); /* shutw function */
+ void (*close)(struct connection *conn, void *xprt_ctx); /* close the transport layer */
+ int (*init)(struct connection *conn, void **ctx); /* initialize the transport layer */
+ int (*start)(struct connection *conn, void *ctx); /* Start the transport layer, if needed */
+ int (*prepare_bind_conf)(struct bind_conf *conf); /* prepare a whole bind_conf */
+ void (*destroy_bind_conf)(struct bind_conf *conf); /* destroy a whole bind_conf */
+ int (*prepare_srv)(struct server *srv); /* prepare a server context */
+ void (*destroy_srv)(struct server *srv); /* destroy a server context */
+ int (*get_alpn)(const struct connection *conn, void *xprt_ctx, const char **str, int *len); /* get application layer name */
+ int (*takeover)(struct connection *conn, void *xprt_ctx, int orig_tid); /* Let the xprt know the fd have been taken over */
+ void (*set_idle)(struct connection *conn, void *xprt_ctx); /* notify the xprt that the connection becomes idle. implies set_used. */
+ void (*set_used)(struct connection *conn, void *xprt_ctx); /* notify the xprt that the connection leaves idle. implies set_idle. */
+ char name[8]; /* transport layer name, zero-terminated */
+ int (*subscribe)(struct connection *conn, void *xprt_ctx, int event_type, struct wait_event *es); /* Subscribe <es> to events, such as "being able to send" */
+ int (*unsubscribe)(struct connection *conn, void *xprt_ctx, int event_type, struct wait_event *es); /* Unsubscribe <es> from events */
+ int (*remove_xprt)(struct connection *conn, void *xprt_ctx, void *toremove_ctx, const struct xprt_ops *newops, void *newctx); /* Remove an xprt from the connection, used by temporary xprt such as the handshake one */
+ int (*add_xprt)(struct connection *conn, void *xprt_ctx, void *toadd_ctx, const struct xprt_ops *toadd_ops, void **oldxprt_ctx, const struct xprt_ops **oldxprt_ops); /* Add a new XPRT as the new xprt, and return the old one */
+ struct ssl_sock_ctx *(*get_ssl_sock_ctx)(struct connection *); /* retrieve the ssl_sock_ctx in use, or NULL if none */
+ int (*show_fd)(struct buffer *, const struct connection *, const void *ctx); /* append some data about xprt for "show fd"; returns non-zero if suspicious */
+};
+
+/* mux_ops describes the mux operations, which are to be performed at the
+ * connection level after data are exchanged with the transport layer in order
+ * to propagate them to streams. The <init> function will automatically be
+ * called once the mux is instantiated by the connection's owner at the end
+ * of a transport handshake, when it is about to transfer data and the data
+ * layer is not ready yet.
+ */
+struct mux_ops {
+ int (*init)(struct connection *conn, struct proxy *prx, struct session *sess, struct buffer *input); /* early initialization */
+ int (*wake)(struct connection *conn); /* mux-layer callback to report activity, mandatory */
+ size_t (*rcv_buf)(struct stconn *sc, struct buffer *buf, size_t count, int flags); /* Called from the upper layer to get data */
+ size_t (*snd_buf)(struct stconn *sc, struct buffer *buf, size_t count, int flags); /* Called from the upper layer to send data */
+ size_t (*nego_fastfwd)(struct stconn *sc, struct buffer *input, size_t count, unsigned int may_splice); /* Callback to fill the SD iobuf */
+ size_t (*done_fastfwd)(struct stconn *sc); /* Callback to terminate fast data forwarding */
+ int (*fastfwd)(struct stconn *sc, unsigned int count, unsigned int flags); /* Callback to init fast data forwarding */
+ int (*resume_fastfwd)(struct stconn *sc, unsigned int flags); /* Callback to resume fast data forwarding */
+ void (*shutr)(struct stconn *sc, enum co_shr_mode); /* shutr function */
+ void (*shutw)(struct stconn *sc, enum co_shw_mode); /* shutw function */
+
+ int (*attach)(struct connection *conn, struct sedesc *, struct session *sess); /* attach a stconn to an outgoing connection */
+ struct stconn *(*get_first_sc)(const struct connection *); /* retrieves any valid stconn from this connection */
+ void (*detach)(struct sedesc *); /* Detach an stconn from the stdesc from an outgoing connection, when the request is done */
+ int (*show_fd)(struct buffer *, struct connection *); /* append some data about connection into chunk for "show fd"; returns non-zero if suspicious */
+ int (*show_sd)(struct buffer *, struct sedesc *, const char *pfx); /* append some data about the mux stream into chunk for "show sess"; returns non-zero if suspicious */
+ int (*subscribe)(struct stconn *sc, int event_type, struct wait_event *es); /* Subscribe <es> to events, such as "being able to send" */
+ int (*unsubscribe)(struct stconn *sc, int event_type, struct wait_event *es); /* Unsubscribe <es> from events */
+ int (*sctl)(struct stconn *sc, enum mux_sctl_type mux_sctl, void *arg); /* Provides information about the mux stream */
+ int (*avail_streams)(struct connection *conn); /* Returns the number of streams still available for a connection */
+ int (*avail_streams_bidi)(struct connection *conn); /* Returns the number of bidirectional streams still available for a connection */
+ int (*avail_streams_uni)(struct connection *conn); /* Returns the number of unidirectional streams still available for a connection */
+ int (*used_streams)(struct connection *conn); /* Returns the number of streams in use on a connection. */
+ void (*destroy)(void *ctx); /* Let the mux know one of its users left, so it may have to disappear */
+ int (*ctl)(struct connection *conn, enum mux_ctl_type mux_ctl, void *arg); /* Provides information about the mux connection */
+ int (*takeover)(struct connection *conn, int orig_tid); /* Attempts to migrate the connection to the current thread */
+ unsigned int flags; /* some flags characterizing the mux's capabilities (MX_FL_*) */
+ char name[8]; /* mux layer name, zero-terminated */
+};
+
+/* list of frontend connections. Used to call mux wake operation on soft-stop
+ * to close idling connections.
+ */
+struct mux_stopping_data {
+ struct list list; /* list of registered frontend connections */
+ struct task *task; /* task woken up on soft-stop */
+};
+
+struct my_tcphdr {
+ uint16_t source;
+ uint16_t dest;
+};
+
+/* a connection source profile defines all the parameters needed to properly
+ * bind an outgoing connection for a server or proxy.
+ */
+struct conn_src {
+ unsigned int opts; /* CO_SRC_* */
+ int iface_len; /* bind interface name length */
+ char *iface_name; /* bind interface name or NULL */
+ struct port_range *sport_range; /* optional per-server TCP source ports */
+ struct sockaddr_storage source_addr; /* the address to which we want to bind for connect() */
+#if defined(CONFIG_HAP_TRANSPARENT)
+ struct sockaddr_storage tproxy_addr; /* non-local address we want to bind to for connect() */
+ char *bind_hdr_name; /* bind to this header name if defined */
+ int bind_hdr_len; /* length of the name of the header above */
+ int bind_hdr_occ; /* occurrence number of header above: >0 = from first, <0 = from end, 0=disabled */
+#endif
+};
+
+/* Hash header flag reflecting the input parameters present
+ * CAUTION! Always update CONN_HASH_PARAMS_TYPE_COUNT when adding a new entry.
+ */
+enum conn_hash_params_t {
+ CONN_HASH_PARAMS_TYPE_SNI = 0x1,
+ CONN_HASH_PARAMS_TYPE_DST_ADDR = 0x2,
+ CONN_HASH_PARAMS_TYPE_DST_PORT = 0x4,
+ CONN_HASH_PARAMS_TYPE_SRC_ADDR = 0x8,
+ CONN_HASH_PARAMS_TYPE_SRC_PORT = 0x10,
+ CONN_HASH_PARAMS_TYPE_PROXY = 0x20,
+};
+#define CONN_HASH_PARAMS_TYPE_COUNT 6
+
+#define CONN_HASH_PAYLOAD_LEN \
+ (((sizeof(((struct conn_hash_node *)0)->node.key)) * 8) - CONN_HASH_PARAMS_TYPE_COUNT)
+
+#define CONN_HASH_GET_PAYLOAD(hash) \
+ (((hash) << CONN_HASH_PARAMS_TYPE_COUNT) >> CONN_HASH_PARAMS_TYPE_COUNT)
+
+/* To avoid overflow, dynamically sized parameters must be pre-hashed. Their
+ * hashed will then be reused as input for the generation of the final
+ * connection hash.
+ */
+struct conn_hash_params {
+ uint64_t sni_prehash;
+ uint64_t proxy_prehash;
+ void *target;
+ struct sockaddr_storage *src_addr;
+ struct sockaddr_storage *dst_addr;
+};
+
+/*
+ * This structure describes an TLV entry consisting of its type
+ * and corresponding payload. This can be used to construct a list
+ * from which arbitrary TLV payloads can be fetched.
+ * It might be possible to embed the 'tlv struct' here in the future.
+ */
+struct conn_tlv_list {
+ struct list list;
+ unsigned short len; // 65535 should be more than enough!
+ unsigned char type;
+ char value[0];
+} __attribute__((packed));
+
+/* This structure describes a connection with its methods and data.
+ * A connection may be performed to proxy or server via a local or remote
+ * socket, and can also be made to an internal applet. It can support
+ * several transport schemes (raw, ssl, ...). It can support several
+ * connection control schemes, generally a protocol for socket-oriented
+ * connections, but other methods for applets.
+ */
+struct connection {
+ /* first cache line */
+ enum obj_type obj_type; /* differentiates connection from applet context */
+ unsigned char err_code; /* CO_ER_* */
+ signed short send_proxy_ofs; /* <0 = offset to (re)send from the end, >0 = send all (reused for SOCKS4) */
+ unsigned int flags; /* CO_FL_* */
+ const struct protocol *ctrl; /* operations at the socket layer */
+ const struct xprt_ops *xprt; /* operations at the transport layer */
+ const struct mux_ops *mux; /* mux layer operations. Must be set before xprt->init() */
+ void *xprt_ctx; /* general purpose pointer, initialized to NULL */
+ void *ctx; /* highest level context (usually the mux), initialized to NULL */
+ void *owner; /* pointer to the owner session, or NULL */
+ enum obj_type *target; /* the target to connect to (server, proxy, applet, ...) */
+
+ /* second cache line */
+ struct wait_event *subs; /* Task to wake when awaited events are ready */
+ union {
+ struct list idle_list; /* list element for idle connection in server idle list */
+ struct mt_list toremove_list; /* list element when idle connection is ready to be purged */
+ };
+ union {
+ struct list session_list; /* used by backend conns, list of attached connections to a session */
+ struct list stopping_list; /* used by frontend conns, attach point in mux stopping list */
+ };
+ union conn_handle handle; /* connection handle at the socket layer */
+ const struct netns_entry *proxy_netns;
+
+ /* third cache line and beyond */
+ void (*destroy_cb)(struct connection *conn); /* callback to notify of imminent death of the connection */
+ struct sockaddr_storage *src; /* source address (pool), when known, otherwise NULL */
+ struct sockaddr_storage *dst; /* destination address (pool), when known, otherwise NULL */
+ struct list tlv_list; /* list of TLVs received via PROXYv2 */
+
+ /* used to identify a backend connection for http-reuse,
+ * thus only present if conn.target is of type OBJ_TYPE_SERVER
+ */
+ struct conn_hash_node *hash_node;
+
+ /* Members used if connection must be reversed. */
+ struct {
+ enum obj_type *target; /* Listener for active reverse, server for passive. */
+ struct buffer name; /* Only used for passive reverse. Used as SNI when connection added to server idle pool. */
+ } reverse;
+};
+
+/* node for backend connection in the idle trees for http-reuse
+ * A connection is identified by a hash generated from its specific parameters
+ */
+struct conn_hash_node {
+ struct eb64_node node; /* contains the hashing key */
+ struct connection *conn; /* connection owner of the node */
+};
+
+struct mux_proto_list {
+ const struct ist token; /* token name and length. Empty is catch-all */
+ enum proto_proxy_mode mode;
+ enum proto_proxy_side side;
+ const struct mux_ops *mux;
+ struct list list;
+};
+
+/* proxy protocol stuff below */
+
+/* proxy protocol v2 definitions */
+#define PP2_SIGNATURE "\x0D\x0A\x0D\x0A\x00\x0D\x0A\x51\x55\x49\x54\x0A"
+#define PP2_SIGNATURE_LEN 12
+#define PP2_HEADER_LEN 16
+
+/* ver_cmd byte */
+#define PP2_CMD_LOCAL 0x00
+#define PP2_CMD_PROXY 0x01
+#define PP2_CMD_MASK 0x0F
+
+#define PP2_VERSION 0x20
+#define PP2_VERSION_MASK 0xF0
+
+/* fam byte */
+#define PP2_TRANS_UNSPEC 0x00
+#define PP2_TRANS_STREAM 0x01
+#define PP2_TRANS_DGRAM 0x02
+#define PP2_TRANS_MASK 0x0F
+
+#define PP2_FAM_UNSPEC 0x00
+#define PP2_FAM_INET 0x10
+#define PP2_FAM_INET6 0x20
+#define PP2_FAM_UNIX 0x30
+#define PP2_FAM_MASK 0xF0
+
+#define PP2_ADDR_LEN_UNSPEC (0)
+#define PP2_ADDR_LEN_INET (4 + 4 + 2 + 2)
+#define PP2_ADDR_LEN_INET6 (16 + 16 + 2 + 2)
+#define PP2_ADDR_LEN_UNIX (108 + 108)
+
+#define PP2_HDR_LEN_UNSPEC (PP2_HEADER_LEN + PP2_ADDR_LEN_UNSPEC)
+#define PP2_HDR_LEN_INET (PP2_HEADER_LEN + PP2_ADDR_LEN_INET)
+#define PP2_HDR_LEN_INET6 (PP2_HEADER_LEN + PP2_ADDR_LEN_INET6)
+#define PP2_HDR_LEN_UNIX (PP2_HEADER_LEN + PP2_ADDR_LEN_UNIX)
+
+#define PP2_TYPE_ALPN 0x01
+#define PP2_TYPE_AUTHORITY 0x02
+#define PP2_TYPE_CRC32C 0x03
+#define PP2_TYPE_NOOP 0x04
+#define PP2_TYPE_UNIQUE_ID 0x05
+#define PP2_TYPE_SSL 0x20
+#define PP2_SUBTYPE_SSL_VERSION 0x21
+#define PP2_SUBTYPE_SSL_CN 0x22
+#define PP2_SUBTYPE_SSL_CIPHER 0x23
+#define PP2_SUBTYPE_SSL_SIG_ALG 0x24
+#define PP2_SUBTYPE_SSL_KEY_ALG 0x25
+#define PP2_TYPE_NETNS 0x30
+
+#define PP2_CLIENT_SSL 0x01
+#define PP2_CLIENT_CERT_CONN 0x02
+#define PP2_CLIENT_CERT_SESS 0x04
+
+#define PP2_CRC32C_LEN 4 /* Length of a CRC32C TLV value */
+
+#define TLV_HEADER_SIZE 3
+
+#define HA_PP2_AUTHORITY_MAX 255 /* Maximum length of an authority TLV */
+#define HA_PP2_TLV_VALUE_128 128 /* E.g., accommodate unique IDs (128 B) */
+#define HA_PP2_TLV_VALUE_256 256 /* E.g., accommodate authority TLVs (currently, <= 255 B) */
+#define HA_PP2_MAX_ALLOC 1024 /* Maximum TLV value for PPv2 to prevent DoS */
+
+struct proxy_hdr_v2 {
+ uint8_t sig[12]; /* hex 0D 0A 0D 0A 00 0D 0A 51 55 49 54 0A */
+ uint8_t ver_cmd; /* protocol version and command */
+ uint8_t fam; /* protocol family and transport */
+ uint16_t len; /* number of following bytes part of the header */
+ union {
+ struct { /* for TCP/UDP over IPv4, len = 12 */
+ uint32_t src_addr;
+ uint32_t dst_addr;
+ uint16_t src_port;
+ uint16_t dst_port;
+ } ip4;
+ struct { /* for TCP/UDP over IPv6, len = 36 */
+ uint8_t src_addr[16];
+ uint8_t dst_addr[16];
+ uint16_t src_port;
+ uint16_t dst_port;
+ } ip6;
+ struct { /* for AF_UNIX sockets, len = 216 */
+ uint8_t src_addr[108];
+ uint8_t dst_addr[108];
+ } unx;
+ } addr;
+};
+
+struct tlv {
+ uint8_t type;
+ uint8_t length_hi;
+ uint8_t length_lo;
+ uint8_t value[0]; // WT: don't use VAR_ARRAY here, it's an end of struct marker
+}__attribute__((packed));
+
+struct tlv_ssl {
+ struct tlv tlv;
+ uint8_t client;
+ uint32_t verify;
+ uint8_t sub_tlv[VAR_ARRAY];
+}__attribute__((packed));
+
+
+/* This structure is used to manage idle connections, their locking, and the
+ * list of such idle connections to be removed. It is per-thread and must be
+ * accessible from foreign threads.
+ */
+struct idle_conns {
+ struct mt_list toremove_conns;
+ struct task *cleanup_task;
+ __decl_thread(HA_SPINLOCK_T idle_conns_lock);
+} THREAD_ALIGNED(64);
+
+#endif /* _HAPROXY_CONNECTION_T_H */
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/include/haproxy/connection.h b/include/haproxy/connection.h
new file mode 100644
index 0000000..c7d9883
--- /dev/null
+++ b/include/haproxy/connection.h
@@ -0,0 +1,762 @@
+/*
+ * include/haproxy/connection.h
+ * This file contains connection function prototypes
+ *
+ * Copyright (C) 2000-2002 Willy Tarreau - w@1wt.eu
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_CONNECTION_H
+#define _HAPROXY_CONNECTION_H
+
+#include <import/ist.h>
+
+#include <haproxy/api.h>
+#include <haproxy/buf.h>
+#include <haproxy/connection-t.h>
+#include <haproxy/stconn-t.h>
+#include <haproxy/fd.h>
+#include <haproxy/list.h>
+#include <haproxy/listener-t.h>
+#include <haproxy/obj_type.h>
+#include <haproxy/pool-t.h>
+#include <haproxy/server.h>
+#include <haproxy/session-t.h>
+#include <haproxy/task-t.h>
+
+extern struct pool_head *pool_head_connection;
+extern struct pool_head *pool_head_conn_hash_node;
+extern struct pool_head *pool_head_sockaddr;
+extern struct pool_head *pool_head_pp_tlv_128;
+extern struct pool_head *pool_head_pp_tlv_256;
+extern struct pool_head *pool_head_uniqueid;
+extern struct xprt_ops *registered_xprt[XPRT_ENTRIES];
+extern struct mux_proto_list mux_proto_list;
+extern struct mux_stopping_data mux_stopping_data[MAX_THREADS];
+
+#define IS_HTX_CONN(conn) ((conn)->mux && ((conn)->mux->flags & MX_FL_HTX))
+
+/* receive a PROXY protocol header over a connection */
+int conn_recv_proxy(struct connection *conn, int flag);
+int conn_send_proxy(struct connection *conn, unsigned int flag);
+int make_proxy_line(char *buf, int buf_len, struct server *srv, struct connection *remote, struct stream *strm);
+struct conn_tlv_list *conn_get_tlv(struct connection *conn, int type);
+
+int conn_append_debug_info(struct buffer *buf, const struct connection *conn, const char *pfx);
+
+int conn_subscribe(struct connection *conn, void *xprt_ctx, int event_type, struct wait_event *es);
+int conn_unsubscribe(struct connection *conn, void *xprt_ctx, int event_type, struct wait_event *es);
+
+/* receive a NetScaler Client IP insertion header over a connection */
+int conn_recv_netscaler_cip(struct connection *conn, int flag);
+
+/* raw send() directly on the socket */
+int conn_ctrl_send(struct connection *conn, const void *buf, int len, int flags);
+
+/* drains any pending bytes from the socket */
+int conn_ctrl_drain(struct connection *conn);
+
+/* scoks4 proxy handshake */
+int conn_send_socks4_proxy_request(struct connection *conn);
+int conn_recv_socks4_proxy_response(struct connection *conn);
+
+/* If we delayed the mux creation because we were waiting for the handshake, do it now */
+int conn_create_mux(struct connection *conn);
+int conn_notify_mux(struct connection *conn, int old_flags, int forced_wake);
+int conn_upgrade_mux_fe(struct connection *conn, void *ctx, struct buffer *buf,
+ struct ist mux_proto, int mode);
+int conn_install_mux_fe(struct connection *conn, void *ctx);
+int conn_install_mux_be(struct connection *conn, void *ctx, struct session *sess,
+ const struct mux_ops *force_mux_ops);
+int conn_install_mux_chk(struct connection *conn, void *ctx, struct session *sess);
+
+void conn_delete_from_tree(struct connection *conn);
+
+void conn_init(struct connection *conn, void *target);
+struct connection *conn_new(void *target);
+void conn_free(struct connection *conn);
+struct conn_hash_node *conn_alloc_hash_node(struct connection *conn);
+struct sockaddr_storage *sockaddr_alloc(struct sockaddr_storage **sap, const struct sockaddr_storage *orig, socklen_t len);
+void sockaddr_free(struct sockaddr_storage **sap);
+
+
+/* connection hash stuff */
+uint64_t conn_calculate_hash(const struct conn_hash_params *params);
+uint64_t conn_hash_prehash(char *buf, size_t size);
+void conn_hash_update(char *buf, size_t *idx,
+ const void *data, size_t size,
+ enum conn_hash_params_t *flags,
+ enum conn_hash_params_t type);
+uint64_t conn_hash_digest(char *buf, size_t bufsize,
+ enum conn_hash_params_t flags);
+
+int conn_reverse(struct connection *conn);
+
+const char *conn_err_code_str(struct connection *c);
+int xprt_add_hs(struct connection *conn);
+void register_mux_proto(struct mux_proto_list *list);
+
+extern struct idle_conns idle_conns[MAX_THREADS];
+
+/* returns true if the transport layer is ready */
+static inline int conn_xprt_ready(const struct connection *conn)
+{
+ return (conn->flags & CO_FL_XPRT_READY);
+}
+
+/* returns true if the control layer is ready */
+static inline int conn_ctrl_ready(const struct connection *conn)
+{
+ return (conn->flags & CO_FL_CTRL_READY);
+}
+
+/*
+ * Calls the start() function of the transport layer, if needed.
+ * Returns < 0 in case of error.
+*/
+
+static inline int conn_xprt_start(struct connection *conn)
+{
+ int ret = 0;
+
+ if (!conn_xprt_ready(conn) && conn->xprt && conn->xprt->start)
+ ret = conn->xprt->start(conn, conn->xprt_ctx);
+
+ if (ret >= 0)
+ conn->flags |= CO_FL_XPRT_READY;
+
+ return ret;
+}
+
+/* Calls the close() function of the transport layer if any and if not done
+ * yet, and clears the CO_FL_XPRT_READY flags
+ * However this is not done if the CO_FL_XPRT_TRACKED flag is set,
+ * which allows logs to take data from the transport layer very late if needed.
+ */
+static inline void conn_xprt_close(struct connection *conn)
+{
+ if (conn->xprt && !(conn->flags & CO_FL_XPRT_TRACKED)) {
+ if (conn->xprt->close)
+ conn->xprt->close(conn, conn->xprt_ctx);
+ conn->xprt_ctx = NULL;
+ conn->flags &= ~CO_FL_XPRT_READY;
+ conn->xprt = NULL;
+ }
+}
+
+/* Initializes the connection's control layer which essentially consists in
+ * registering the connection handle (e.g. file descriptor) for events and
+ * setting the CO_FL_CTRL_READY flag. The caller is responsible for ensuring
+ * that the control layer is already assigned to the connection prior to the
+ * call.
+ */
+static inline void conn_ctrl_init(struct connection *conn)
+{
+ if (!conn_ctrl_ready(conn)) {
+ conn->flags |= CO_FL_CTRL_READY;
+ if (conn->ctrl->ctrl_init)
+ conn->ctrl->ctrl_init(conn);
+ }
+}
+
+/* Deletes the connection's handle (e.g. FD) if the transport layer is already
+ * gone, and removes the CO_FL_CTRL_READY flag.
+ */
+static inline void conn_ctrl_close(struct connection *conn)
+{
+ if (!conn->xprt && (conn->flags & CO_FL_CTRL_READY)) {
+ if ((conn->flags & (CO_FL_WANT_DRAIN | CO_FL_SOCK_RD_SH)) == CO_FL_WANT_DRAIN)
+ conn_ctrl_drain(conn);
+ conn->flags &= ~CO_FL_CTRL_READY;
+ if (conn->ctrl->ctrl_close)
+ conn->ctrl->ctrl_close(conn);
+ }
+}
+
+/* If the connection still has a transport layer, then call its close() function
+ * if any, and delete the file descriptor if a control layer is set. This is
+ * used to close everything at once and atomically. However this is not done if
+ * the CO_FL_XPRT_TRACKED flag is set, which allows logs to take data from the
+ * transport layer very late if needed.
+ */
+static inline void conn_full_close(struct connection *conn)
+{
+ conn_xprt_close(conn);
+ conn_ctrl_close(conn);
+}
+
+/* stop tracking a connection, allowing conn_full_close() to always
+ * succeed.
+ */
+static inline void conn_stop_tracking(struct connection *conn)
+{
+ conn->flags &= ~CO_FL_XPRT_TRACKED;
+}
+
+/* returns the connection's FD if the connection exists, its control is ready,
+ * and the connection has an FD, otherwise -1.
+ */
+static inline int conn_fd(const struct connection *conn)
+{
+ if (!conn || !conn_ctrl_ready(conn) || (conn->flags & CO_FL_FDLESS))
+ return -1;
+ return conn->handle.fd;
+}
+
+/* read shutdown, called from the rcv_buf/rcv_pipe handlers when
+ * detecting an end of connection.
+ */
+static inline void conn_sock_read0(struct connection *c)
+{
+ c->flags |= CO_FL_SOCK_RD_SH;
+ if (conn_ctrl_ready(c)) {
+ /* we don't risk keeping ports unusable if we found the
+ * zero from the other side.
+ */
+ BUG_ON(c->flags & CO_FL_FDLESS);
+ HA_ATOMIC_AND(&fdtab[c->handle.fd].state, ~FD_LINGER_RISK);
+ }
+}
+
+/* write shutdown, indication that the upper layer is not willing to send
+ * anything anymore and wants to close after pending data are sent. The
+ * <clean> argument will allow not to perform the socket layer shutdown if
+ * equal to 0.
+ */
+static inline void conn_sock_shutw(struct connection *c, int clean)
+{
+ c->flags |= CO_FL_SOCK_WR_SH;
+ if (conn_ctrl_ready(c)) {
+ /* don't perform a clean shutdown if we're going to reset or
+ * if the shutr was already received.
+ */
+ BUG_ON(c->flags & CO_FL_FDLESS);
+ if (!(c->flags & CO_FL_SOCK_RD_SH) && clean)
+ shutdown(c->handle.fd, SHUT_WR);
+ }
+}
+
+static inline void conn_xprt_shutw(struct connection *c)
+{
+ /* clean data-layer shutdown */
+ if (c->xprt && c->xprt->shutw)
+ c->xprt->shutw(c, c->xprt_ctx, 1);
+}
+
+static inline void conn_xprt_shutw_hard(struct connection *c)
+{
+ /* unclean data-layer shutdown */
+ if (c->xprt && c->xprt->shutw)
+ c->xprt->shutw(c, c->xprt_ctx, 0);
+}
+
+
+/* detect sock->data read0 transition */
+static inline int conn_xprt_read0_pending(struct connection *c)
+{
+ return (c->flags & CO_FL_SOCK_RD_SH) != 0;
+}
+
+/* prepares a connection to work with protocol <proto> and transport <xprt>.
+ * The transport's is initialized as well, and the mux and its context are
+ * cleared. The target is not reinitialized and it is recommended that it is
+ * set prior to calling this function so that the function may make use of it
+ * in the future to refine the mux choice if needed.
+ */
+static inline int conn_prepare(struct connection *conn, const struct protocol *proto, const struct xprt_ops *xprt)
+{
+ int ret = 0;
+
+ conn->ctrl = proto;
+ conn->xprt = xprt;
+ conn->mux = NULL;
+ conn->xprt_ctx = NULL;
+ conn->ctx = NULL;
+ if (xprt->init) {
+ ret = xprt->init(conn, &conn->xprt_ctx);
+ if (ret < 0)
+ conn->xprt = NULL;
+ }
+ return ret;
+}
+
+/* returns 0 if the connection is valid and is a frontend connection, otherwise
+ * returns 1 indicating it's a backend connection. And uninitialized connection
+ * also returns 1 to better handle the usage in the middle of initialization.
+ */
+static inline int conn_is_back(const struct connection *conn)
+{
+ return !objt_listener(conn->target);
+}
+
+/* sets <owner> as the connection's owner */
+static inline void conn_set_owner(struct connection *conn, void *owner, void (*cb)(struct connection *))
+{
+ conn->owner = owner;
+ conn->destroy_cb = cb;
+}
+
+
+/* Mark the connection <conn> as private and remove it from the available connection list */
+static inline void conn_set_private(struct connection *conn)
+{
+ if (!(conn->flags & CO_FL_PRIVATE)) {
+ conn->flags |= CO_FL_PRIVATE;
+
+ if (obj_type(conn->target) == OBJ_TYPE_SERVER)
+ srv_release_conn(__objt_server(conn->target), conn);
+ }
+}
+
+static inline void conn_force_unsubscribe(struct connection *conn)
+{
+ if (!conn->subs)
+ return;
+ conn->subs->events = 0;
+ conn->subs = NULL;
+}
+
+/* Returns the source address of the connection or NULL if not set */
+static inline const struct sockaddr_storage *conn_src(struct connection *conn)
+{
+ return conn->src;
+}
+
+/* Returns the destination address of the connection or NULL if not set */
+static inline const struct sockaddr_storage *conn_dst(struct connection *conn)
+{
+ return conn->dst;
+}
+
+/* Retrieves the connection's original source address. Returns non-zero on
+ * success or zero on failure. The operation is only performed once and the
+ * address is stored in the connection for future use.
+ */
+static inline int conn_get_src(struct connection *conn)
+{
+ if (conn->src)
+ return 1;
+
+ if (!conn_ctrl_ready(conn))
+ goto fail;
+
+ if (!sockaddr_alloc(&conn->src, NULL, 0))
+ goto fail;
+
+ /* some stream protocols may provide their own get_src/dst functions */
+ if (conn->ctrl->get_src &&
+ conn->ctrl->get_src(conn, (struct sockaddr *)conn->src, sizeof(*conn->src)) != -1)
+ goto done;
+
+ if (conn->ctrl->proto_type != PROTO_TYPE_STREAM)
+ goto fail;
+
+ /* most other socket-based stream protocols will use their socket family's functions */
+ if (conn->ctrl->fam->get_src && !(conn->flags & CO_FL_FDLESS) &&
+ conn->ctrl->fam->get_src(conn->handle.fd, (struct sockaddr *)conn->src,
+ sizeof(*conn->src),
+ obj_type(conn->target) != OBJ_TYPE_LISTENER) != -1)
+ goto done;
+
+ /* no other means */
+ fail:
+ sockaddr_free(&conn->src);
+ return 0;
+ done:
+ return 1;
+}
+
+/* Retrieves the connection's original destination address. Returns non-zero on
+ * success or zero on failure. The operation is only performed once and the
+ * address is stored in the connection for future use.
+ */
+static inline int conn_get_dst(struct connection *conn)
+{
+ if (conn->dst)
+ return 1;
+
+ if (!conn_ctrl_ready(conn))
+ goto fail;
+
+ if (!sockaddr_alloc(&conn->dst, NULL, 0))
+ goto fail;
+
+ /* some stream protocols may provide their own get_src/dst functions */
+ if (conn->ctrl->get_dst &&
+ conn->ctrl->get_dst(conn, (struct sockaddr *)conn->dst, sizeof(*conn->dst)) != -1)
+ goto done;
+
+ if (conn->ctrl->proto_type != PROTO_TYPE_STREAM)
+ goto fail;
+
+ /* most other socket-based stream protocols will use their socket family's functions */
+ if (conn->ctrl->fam->get_dst && !(conn->flags & CO_FL_FDLESS) &&
+ conn->ctrl->fam->get_dst(conn->handle.fd, (struct sockaddr *)conn->dst,
+ sizeof(*conn->dst),
+ obj_type(conn->target) != OBJ_TYPE_LISTENER) != -1)
+ goto done;
+
+ /* no other means */
+ fail:
+ sockaddr_free(&conn->dst);
+ return 0;
+ done:
+ return 1;
+}
+
+/* Sets the TOS header in IPv4 and the traffic class header in IPv6 packets
+ * (as per RFC3260 #4 and BCP37 #4.2 and #5.2). The connection is tested and if
+ * it is null, nothing is done.
+ */
+static inline void conn_set_tos(const struct connection *conn, int tos)
+{
+ if (!conn || !conn_ctrl_ready(conn) || (conn->flags & CO_FL_FDLESS))
+ return;
+
+#ifdef IP_TOS
+ if (conn->src->ss_family == AF_INET)
+ setsockopt(conn->handle.fd, IPPROTO_IP, IP_TOS, &tos, sizeof(tos));
+#endif
+#ifdef IPV6_TCLASS
+ if (conn->src->ss_family == AF_INET6) {
+ if (IN6_IS_ADDR_V4MAPPED(&((struct sockaddr_in6 *)conn->src)->sin6_addr))
+ /* v4-mapped addresses need IP_TOS */
+ setsockopt(conn->handle.fd, IPPROTO_IP, IP_TOS, &tos, sizeof(tos));
+ else
+ setsockopt(conn->handle.fd, IPPROTO_IPV6, IPV6_TCLASS, &tos, sizeof(tos));
+ }
+#endif
+}
+
+/* Sets the netfilter mark on the connection's socket. The connection is tested
+ * and if it is null, nothing is done.
+ */
+static inline void conn_set_mark(const struct connection *conn, int mark)
+{
+ if (!conn || !conn_ctrl_ready(conn) || (conn->flags & CO_FL_FDLESS))
+ return;
+
+#if defined(SO_MARK)
+ setsockopt(conn->handle.fd, SOL_SOCKET, SO_MARK, &mark, sizeof(mark));
+#elif defined(SO_USER_COOKIE)
+ setsockopt(conn->handle.fd, SOL_SOCKET, SO_USER_COOKIE, &mark, sizeof(mark));
+#elif defined(SO_RTABLE)
+ setsockopt(conn->handle.fd, SOL_SOCKET, SO_RTABLE, &mark, sizeof(mark));
+#endif
+}
+
+/* Sets adjust the TCP quick-ack feature on the connection's socket. The
+ * connection is tested and if it is null, nothing is done.
+ */
+static inline void conn_set_quickack(const struct connection *conn, int value)
+{
+ if (!conn || !conn_ctrl_ready(conn) || (conn->flags & CO_FL_FDLESS))
+ return;
+
+#ifdef TCP_QUICKACK
+ setsockopt(conn->handle.fd, IPPROTO_TCP, TCP_QUICKACK, &value, sizeof(value));
+#endif
+}
+
+static inline struct wait_event *wl_set_waitcb(struct wait_event *wl, struct task *(*cb)(struct task *, void *, unsigned int), void *ctx)
+{
+ if (!wl->tasklet->process) {
+ wl->tasklet->process = cb;
+ wl->tasklet->context = ctx;
+ }
+ return wl;
+}
+
+/* Installs the connection's mux layer for upper context <ctx>.
+ * Returns < 0 on error.
+ */
+static inline int conn_install_mux(struct connection *conn, const struct mux_ops *mux,
+ void *ctx, struct proxy *prx, struct session *sess)
+{
+ int ret;
+
+ conn->mux = mux;
+ conn->ctx = ctx;
+ ret = mux->init ? mux->init(conn, prx, sess, &BUF_NULL) : 0;
+ if (ret < 0) {
+ conn->mux = NULL;
+ conn->ctx = NULL;
+ }
+ return ret;
+}
+
+/* Retrieves any valid stream connector from this connection, preferably the first
+ * valid one. The purpose is to be able to figure one other end of a private
+ * connection for purposes like source binding or proxy protocol header
+ * emission. In such cases, any stream connector is expected to be valid so the
+ * mux is encouraged to return the first one it finds. If the connection has
+ * no mux or the mux has no get_first_sc() method or the mux has no valid
+ * stream connector, NULL is returned. The output pointer is purposely marked
+ * const to discourage the caller from modifying anything there.
+ */
+static inline struct stconn *conn_get_first_sc(const struct connection *conn)
+{
+ BUG_ON(!conn || !conn->mux);
+
+ if (!conn->mux->get_first_sc)
+ return NULL;
+ return conn->mux->get_first_sc(conn);
+}
+
+int conn_update_alpn(struct connection *conn, const struct ist alpn, int force);
+
+static inline const char *conn_get_ctrl_name(const struct connection *conn)
+{
+ if (!conn || !conn_ctrl_ready(conn))
+ return "NONE";
+ return conn->ctrl->name;
+}
+
+static inline const char *conn_get_xprt_name(const struct connection *conn)
+{
+ if (!conn || !conn->xprt)
+ return "NONE";
+ return conn->xprt->name;
+}
+
+static inline const char *conn_get_mux_name(const struct connection *conn)
+{
+ if (!conn || !conn->mux)
+ return "NONE";
+ return conn->mux->name;
+}
+
+/* registers pointer to transport layer <id> (XPRT_*) */
+static inline void xprt_register(int id, struct xprt_ops *xprt)
+{
+ if (id >= XPRT_ENTRIES)
+ return;
+ registered_xprt[id] = xprt;
+}
+
+/* returns pointer to transport layer <id> (XPRT_*) or NULL if not registered */
+static inline struct xprt_ops *xprt_get(int id)
+{
+ if (id >= XPRT_ENTRIES)
+ return NULL;
+ return registered_xprt[id];
+}
+
+/* notify the next xprt that the connection is about to become idle and that it
+ * may be stolen at any time after the function returns and that any tasklet in
+ * the chain must be careful before dereferencing its context.
+ */
+static inline void xprt_set_idle(struct connection *conn, const struct xprt_ops *xprt, void *xprt_ctx)
+{
+ if (xprt->set_idle)
+ xprt->set_idle(conn, conn->xprt_ctx);
+}
+
+/* notify the next xprt that the connection is not idle anymore and that it may
+ * not be stolen before the next xprt_set_idle().
+ */
+static inline void xprt_set_used(struct connection *conn, const struct xprt_ops *xprt, void *xprt_ctx)
+{
+ if (xprt->set_used)
+ xprt->set_used(conn, conn->xprt_ctx);
+}
+
+static inline int conn_get_alpn(const struct connection *conn, const char **str, int *len)
+{
+ if (!conn_xprt_ready(conn) || !conn->xprt->get_alpn)
+ return 0;
+ return conn->xprt->get_alpn(conn, conn->xprt_ctx, str, len);
+}
+
+/* unregisters proto mux list <list> */
+static inline void unregister_mux_proto(struct mux_proto_list *list)
+{
+ LIST_DELETE(&list->list);
+ LIST_INIT(&list->list);
+}
+
+static inline struct mux_proto_list *get_mux_proto(const struct ist proto)
+{
+ struct mux_proto_list *item;
+
+ list_for_each_entry(item, &mux_proto_list.list, list) {
+ if (isteq(proto, item->token))
+ return item;
+ }
+ return NULL;
+}
+
+void list_mux_proto(FILE *out);
+/* returns the first mux entry in the list matching the exact same <mux_proto>
+ * and compatible with the <proto_side> (FE or BE) and the <proto_mode> (TCP or
+ * HTTP). <mux_proto> can be empty. Will fall back to the first compatible mux
+ * with exactly the same <proto_mode> or with an empty name. May return
+ * null if the code improperly registered the default mux to use as a fallback.
+ *
+ * <proto_mode> expects PROTO_MODE_* value only: PROXY_MODE_* values should
+ * never be used directly here (but you may use conn_pr_mode_to_proto_mode()
+ * to map proxy mode to corresponding proto mode before calling the function).
+ */
+static inline const struct mux_proto_list *conn_get_best_mux_entry(
+ const struct ist mux_proto,
+ int proto_side, int proto_mode)
+{
+ struct mux_proto_list *item;
+ struct mux_proto_list *fallback = NULL;
+
+ list_for_each_entry(item, &mux_proto_list.list, list) {
+ if (!(item->side & proto_side) || !(item->mode & proto_mode))
+ continue;
+ if (istlen(mux_proto) && isteq(mux_proto, item->token))
+ return item;
+ else if (!istlen(item->token)) {
+ if (!fallback || (item->mode == proto_mode && fallback->mode != proto_mode))
+ fallback = item;
+ }
+ }
+ return fallback;
+
+}
+
+/* returns the first mux in the list matching the exact same <mux_proto> and
+ * compatible with the <proto_side> (FE or BE) and the <proto_mode> (TCP or
+ * HTTP). <mux_proto> can be empty. Will fall back to the first compatible mux
+ * with exactly the same <proto_mode> or with an empty name. May return
+ * null if the code improperly registered the default mux to use as a fallback.
+ */
+static inline const struct mux_ops *conn_get_best_mux(struct connection *conn,
+ const struct ist mux_proto,
+ int proto_side, int proto_mode)
+{
+ const struct mux_proto_list *item;
+
+ item = conn_get_best_mux_entry(mux_proto, proto_side, proto_mode);
+
+ return item ? item->mux : NULL;
+}
+
+/* returns a pointer to the proxy associated with this connection. For a front
+ * connection it returns a pointer to the frontend ; for a back connection, it
+ * returns a pointer to the backend.
+ */
+static inline struct proxy *conn_get_proxy(const struct connection *conn)
+{
+ struct listener *l;
+ struct server *s;
+
+ /* check if it's a frontend connection */
+ l = objt_listener(conn->target);
+ if (l)
+ return l->bind_conf->frontend;
+
+ /* check if it's a backend connection */
+ s = objt_server(conn->target);
+ if (s)
+ return s->proxy;
+
+ return objt_proxy(conn->target);
+}
+
+/* unconditionally retrieves the ssl_sock_ctx for this connection. Prefer using
+ * the standard form conn_get_ssl_sock_ctx() which checks the transport layer
+ * and the availability of the method.
+ */
+static inline struct ssl_sock_ctx *__conn_get_ssl_sock_ctx(struct connection *conn)
+{
+ return conn->xprt->get_ssl_sock_ctx(conn);
+}
+
+/* retrieves the ssl_sock_ctx for this connection otherwise NULL */
+static inline struct ssl_sock_ctx *conn_get_ssl_sock_ctx(struct connection *conn)
+{
+ if (!conn || !conn->xprt || !conn->xprt->get_ssl_sock_ctx)
+ return NULL;
+ return conn->xprt->get_ssl_sock_ctx(conn);
+}
+
+/* boolean, returns true if connection is over SSL */
+static inline int conn_is_ssl(struct connection *conn)
+{
+ return !!conn_get_ssl_sock_ctx(conn);
+}
+
+/* Returns true if connection must be reversed. */
+static inline int conn_is_reverse(const struct connection *conn)
+{
+ return !!(conn->reverse.target);
+}
+
+/* Returns true if connection must be actively reversed or waiting to be accepted. */
+static inline int conn_reverse_in_preconnect(const struct connection *conn)
+{
+ return conn_is_back(conn) ? !!(conn->reverse.target) :
+ !!(conn->flags & CO_FL_ACT_REVERSING);
+}
+
+/* Initialize <conn> as a reverse connection to <target>. */
+static inline void conn_set_reverse(struct connection *conn, enum obj_type *target)
+{
+ /* Ensure the correct target type is used depending on the connection side before reverse. */
+ BUG_ON((!conn_is_back(conn) && !objt_server(target)) ||
+ (conn_is_back(conn) && !objt_listener(target)));
+
+ conn->reverse.target = target;
+}
+
+/* Returns the listener instance for connection used for active reverse. */
+static inline struct listener *conn_active_reverse_listener(const struct connection *conn)
+{
+ return conn_is_back(conn) ? __objt_listener(conn->reverse.target) :
+ __objt_listener(conn->target);
+}
+
+/*
+ * Prepare TLV argument for redirecting fetches.
+ * Note that it is not possible to use an argument check function
+ * as that would require us to allow arguments for functions
+ * that do not need it. Alternatively, the sample logic could be
+ * adjusted to perform checks for no arguments and allocate
+ * in the check function. However, this does not seem worth the trouble.
+ */
+static inline void set_tlv_arg(int tlv_type, struct arg *tlv_arg)
+{
+ tlv_arg->type = ARGT_SINT;
+ tlv_arg->data.sint = tlv_type;
+}
+
+/*
+ * Map proxy mode (PR_MODE_*) to equivalent proto_proxy_mode (PROTO_MODE_*)
+ */
+static inline int conn_pr_mode_to_proto_mode(int proxy_mode)
+{
+ int mode;
+
+ /* for now we only support TCP and HTTP proto_modes, so we
+ * consider that if it's not HTTP, then it's TCP
+ */
+ mode = 1 << (proxy_mode == PR_MODE_HTTP);
+
+ return mode;
+}
+
+#endif /* _HAPROXY_CONNECTION_H */
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/include/haproxy/counters-t.h b/include/haproxy/counters-t.h
new file mode 100644
index 0000000..933c228
--- /dev/null
+++ b/include/haproxy/counters-t.h
@@ -0,0 +1,128 @@
+/*
+ * include/haproxy/counters-t.h
+ * This file contains structure declarations for statistics counters.
+ *
+ * Copyright 2008-2009 Krzysztof Piotr Oledzki <ole@ans.pl>
+ * Copyright 2011-2014 Willy Tarreau <w@1wt.eu>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_COUNTERS_T_H
+#define _HAPROXY_COUNTERS_T_H
+
+/* counters used by listeners and frontends */
+struct fe_counters {
+ unsigned int conn_max; /* max # of active sessions */
+ long long cum_conn; /* cumulated number of received connections */
+ long long cum_sess; /* cumulated number of accepted connections */
+ long long cum_sess_ver[3]; /* cumulated number of h1/h2/h3 sessions */
+
+ unsigned int cps_max; /* maximum of new connections received per second */
+ unsigned int sps_max; /* maximum of new connections accepted per second (sessions) */
+
+ long long bytes_in; /* number of bytes transferred from the client to the server */
+ long long bytes_out; /* number of bytes transferred from the server to the client */
+
+ /* compression counters, index 0 for requests, 1 for responses */
+ long long comp_in[2]; /* input bytes fed to the compressor */
+ long long comp_out[2]; /* output bytes emitted by the compressor */
+ long long comp_byp[2]; /* input bytes that bypassed the compressor (cpu/ram/bw limitation) */
+
+ long long denied_req; /* blocked requests because of security concerns */
+ long long denied_resp; /* blocked responses because of security concerns */
+ long long failed_req; /* failed requests (eg: invalid or timeout) */
+ long long denied_conn; /* denied connection requests (tcp-req-conn rules) */
+ long long denied_sess; /* denied session requests (tcp-req-sess rules) */
+ long long failed_rewrites; /* failed rewrites (warning) */
+ long long internal_errors; /* internal processing errors */
+
+ long long cli_aborts; /* aborted responses during DATA phase caused by the client */
+ long long srv_aborts; /* aborted responses during DATA phase caused by the server */
+ long long intercepted_req; /* number of monitoring or stats requests intercepted by the frontend */
+
+ union {
+ struct {
+ long long cum_req[4]; /* cumulated number of processed other/h1/h2/h3 requests */
+ long long comp_rsp; /* number of compressed responses */
+ unsigned int rps_max; /* maximum of new HTTP requests second observed */
+ long long rsp[6]; /* http response codes */
+ long long cache_lookups;/* cache lookups */
+ long long cache_hits; /* cache hits */
+ } http;
+ } p; /* protocol-specific stats */
+};
+
+/* counters used by servers and backends */
+struct be_counters {
+ unsigned int conn_max; /* max # of active sessions */
+ long long cum_conn; /* cumulated number of received connections */
+ long long cum_sess; /* cumulated number of accepted connections */
+ long long cum_lbconn; /* cumulated number of sessions processed by load balancing (BE only) */
+ unsigned long last_sess; /* last session time */
+
+ unsigned int cps_max; /* maximum of new connections received per second */
+ unsigned int sps_max; /* maximum of new connections accepted per second (sessions) */
+ unsigned int nbpend_max; /* max number of pending connections with no server assigned yet */
+ unsigned int cur_sess_max; /* max number of currently active sessions */
+
+ long long bytes_in; /* number of bytes transferred from the client to the server */
+ long long bytes_out; /* number of bytes transferred from the server to the client */
+
+ /* compression counters, index 0 for requests, 1 for responses */
+ long long comp_in[2]; /* input bytes fed to the compressor */
+ long long comp_out[2]; /* output bytes emitted by the compressor */
+ long long comp_byp[2]; /* input bytes that bypassed the compressor (cpu/ram/bw limitation) */
+
+ long long denied_req; /* blocked requests because of security concerns */
+ long long denied_resp; /* blocked responses because of security concerns */
+
+ long long connect; /* number of connection establishment attempts */
+ long long reuse; /* number of connection reuses */
+ long long failed_conns; /* failed connect() attempts (BE only) */
+ long long failed_resp; /* failed responses (BE only) */
+ long long cli_aborts; /* aborted responses during DATA phase caused by the client */
+ long long srv_aborts; /* aborted responses during DATA phase caused by the server */
+ long long retries; /* retried and redispatched connections (BE only) */
+ long long redispatches; /* retried and redispatched connections (BE only) */
+ long long failed_rewrites; /* failed rewrites (warning) */
+ long long internal_errors; /* internal processing errors */
+
+ long long failed_checks, failed_hana; /* failed health checks and health analyses for servers */
+ long long down_trans; /* up->down transitions */
+
+ unsigned int q_time, c_time, d_time, t_time; /* sums of conn_time, queue_time, data_time, total_time */
+ unsigned int qtime_max, ctime_max, dtime_max, ttime_max; /* maximum of conn_time, queue_time, data_time, total_time observed */
+
+ union {
+ struct {
+ long long cum_req; /* cumulated number of processed HTTP requests */
+ long long comp_rsp; /* number of compressed responses */
+ unsigned int rps_max; /* maximum of new HTTP requests second observed */
+ long long rsp[6]; /* http response codes */
+ long long cache_lookups;/* cache lookups */
+ long long cache_hits; /* cache hits */
+ } http;
+ } p; /* protocol-specific stats */
+};
+
+#endif /* _HAPROXY_COUNTERS_T_H */
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/include/haproxy/cpuset-t.h b/include/haproxy/cpuset-t.h
new file mode 100644
index 0000000..d3ebb35
--- /dev/null
+++ b/include/haproxy/cpuset-t.h
@@ -0,0 +1,54 @@
+#ifndef _HAPROXY_CPUSET_T_H
+#define _HAPROXY_CPUSET_T_H
+
+#define _GNU_SOURCE
+#include <sched.h>
+
+#if defined(__FreeBSD__) || defined(__DragonFly__) || defined(__NetBSD__)
+#include <sys/param.h>
+#ifdef __FreeBSD__
+#include <sys/_cpuset.h>
+#include <sys/cpuset.h>
+#include <sys/sysctl.h>
+#include <strings.h>
+#endif
+#endif
+
+#include <haproxy/api-t.h>
+
+#if defined(__linux__) || defined(__DragonFly__) || \
+ (defined(__FreeBSD_kernel__) && defined(__GLIBC__))
+
+# define CPUSET_REPR cpu_set_t
+# define CPUSET_USE_CPUSET
+
+#elif defined(__FreeBSD__) || defined(__NetBSD__)
+
+# define CPUSET_REPR cpuset_t
+
+# if defined(__FreeBSD__) && __FreeBSD_version >= 1301000
+# define CPUSET_USE_CPUSET
+# else
+# define CPUSET_USE_FREEBSD_CPUSET
+# endif
+
+#elif defined(__APPLE__)
+
+# define CPUSET_REPR unsigned long
+# define CPUSET_USE_ULONG
+
+#else
+
+# error "No cpuset support implemented on this platform"
+
+#endif
+
+struct hap_cpuset {
+ CPUSET_REPR cpuset;
+};
+
+struct cpu_map {
+ struct hap_cpuset thread[MAX_THREADS_PER_GROUP]; /* list of CPU masks for the 32/64 threads of this group */
+};
+
+#endif /* _HAPROXY_CPUSET_T_H */
diff --git a/include/haproxy/cpuset.h b/include/haproxy/cpuset.h
new file mode 100644
index 0000000..87c4ece
--- /dev/null
+++ b/include/haproxy/cpuset.h
@@ -0,0 +1,76 @@
+#ifndef _HAPROXY_CPUSET_H
+#define _HAPROXY_CPUSET_H
+
+#include <haproxy/cpuset-t.h>
+
+extern struct cpu_map *cpu_map;
+
+/* Unset all indexes in <set>.
+ */
+void ha_cpuset_zero(struct hap_cpuset *set);
+
+/* Set <cpu> index in <set> if not present.
+ * Returns 0 on success otherwise non-zero.
+ */
+int ha_cpuset_set(struct hap_cpuset *set, int cpu);
+
+/* Clear <cpu> index in <set> if present.
+ * Returns 0 on success otherwise non-zero.
+ */
+int ha_cpuset_clr(struct hap_cpuset *set, int cpu);
+
+/* Bitwise and equivalent operation between <src> and <dst> stored in <dst>.
+ */
+void ha_cpuset_and(struct hap_cpuset *dst, struct hap_cpuset *src);
+
+/* Bitwise OR equivalent operation between <src> and <dst> stored in <dst>.
+ */
+void ha_cpuset_or(struct hap_cpuset *dst, struct hap_cpuset *src);
+
+/* returns non-zero if CPU index <cpu> is set in <set>, otherwise 0. */
+int ha_cpuset_isset(const struct hap_cpuset *set, int cpu);
+
+/* Returns the count of set index in <set>.
+ */
+int ha_cpuset_count(const struct hap_cpuset *set);
+
+/* Returns the first index set plus one in <set> starting from the lowest.
+ * Returns 0 if no index set.
+ * Do not forget to subtract the result by one if using it for set/clr.
+ */
+int ha_cpuset_ffs(const struct hap_cpuset *set);
+
+/* Copy <src> set into <dst>.
+ */
+void ha_cpuset_assign(struct hap_cpuset *dst, struct hap_cpuset *src);
+
+/* Returns the biggest index plus one usable on the platform.
+ */
+int ha_cpuset_size(void);
+
+/* Detects CPUs that are bound to the current process. Returns the number of
+ * CPUs detected or 0 if the detection failed.
+ */
+int ha_cpuset_detect_bound(struct hap_cpuset *set);
+
+/* Parse cpu sets. Each CPU set is either a unique number between 0 and
+ * ha_cpuset_size() - 1 or a range with two such numbers delimited by a dash
+ * ('-'). Each CPU set can be a list of unique numbers or ranges separated by
+ * a comma. It is also possible to specify multiple cpu numbers or ranges in
+ * distinct argument in <args>. On success, it returns 0, otherwise it returns
+ * 1 with an error message in <err>.
+ */
+int parse_cpu_set(const char **args, struct hap_cpuset *cpu_set, char **err);
+
+/* Parse a linux cpu map string representing to a numeric cpu mask map
+ * The cpu map string is a list of 4-byte hex strings separated by commas, with
+ * most-significant byte first, one bit per cpu number.
+ */
+void parse_cpumap(char *cpumap_str, struct hap_cpuset *cpu_set);
+
+/* Returns true if at least one cpu-map directive was configured, otherwise
+ * false.
+ */
+int cpu_map_configured(void);
+
+#endif /* _HAPROXY_CPUSET_H */
diff --git a/include/haproxy/debug.h b/include/haproxy/debug.h
new file mode 100644
index 0000000..b7a2e20
--- /dev/null
+++ b/include/haproxy/debug.h
@@ -0,0 +1,39 @@
+/*
+ * include/haproxy/debug.h
+ * This files contains some macros to help debugging.
+ *
+ * Copyright (C) 2000-2020 Willy Tarreau - w@1wt.eu
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_DEBUG_H
+#define _HAPROXY_DEBUG_H
+
+struct task;
+struct buffer;
+extern unsigned int debug_commands_issued;
+void ha_task_dump(struct buffer *buf, const struct task *task, const char *pfx);
+void ha_thread_dump_one(int thr, int from_signal);
+void ha_thread_dump(struct buffer *buf, int thr);
+void ha_dump_backtrace(struct buffer *buf, const char *prefix, int dump);
+void ha_backtrace_to_stderr(void);
+void ha_panic(void);
+
+void post_mortem_add_component(const char *name, const char *version,
+ const char *toolchain, const char *toolchain_opts,
+ const char *build_settings, const char *path);
+
+#endif /* _HAPROXY_DEBUG_H */
diff --git a/include/haproxy/defaults.h b/include/haproxy/defaults.h
new file mode 100644
index 0000000..7430c61
--- /dev/null
+++ b/include/haproxy/defaults.h
@@ -0,0 +1,533 @@
+/*
+ * include/haproxy/defaults.h
+ * Miscellaneous default values.
+ *
+ * Copyright (C) 2000-2020 Willy Tarreau - w@1wt.eu
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_DEFAULTS_H
+#define _HAPROXY_DEFAULTS_H
+
+/* MAX_THREADS defines the highest limit for the global nbthread value. It
+ * defaults to the number of bits in a long integer when threads are enabled
+ * but may be lowered to save resources on embedded systems.
+*/
+#ifndef USE_THREAD
+/* threads disabled, 1 thread max, 1 group max (note: group ids start at 1) */
+#define MAX_THREADS 1
+
+#define MAX_TGROUPS 1
+#define MAX_THREADS_PER_GROUP 1
+
+#else
+
+/* theoretical limit is 64, though we'd rather not push it too far for now
+ * as some structures might be enlarged to be indexed per group. Let's start
+ * with 16 groups max, allowing to experiment with dual-socket machines
+ * suffering from up to 8 loosely coupled L3 caches. It's a good start and
+ * doesn't engage us too far.
+ */
+#ifndef MAX_TGROUPS
+#define MAX_TGROUPS 16
+#endif
+
+#define MAX_THREADS_PER_GROUP __WORDSIZE
+
+/* threads enabled, max_threads defaults to long bits for 1 tgroup or 4 times
+ * long bits if more tgroups are enabled.
+ */
+#ifndef MAX_THREADS
+#define MAX_THREADS ((((MAX_TGROUPS) > 1) ? 4 : 1) * (MAX_THREADS_PER_GROUP))
+#endif
+
+#endif // USE_THREAD
+
+/*
+ * BUFSIZE defines the size of a read and write buffer. It is the maximum
+ * amount of bytes which can be stored by the proxy for each stream. However,
+ * when reading HTTP headers, the proxy needs some spare space to add or rewrite
+ * headers if needed. The size of this spare is defined with MAXREWRITE. So it
+ * is not possible to process headers longer than BUFSIZE-MAXREWRITE bytes. By
+ * default, BUFSIZE=16384 bytes and MAXREWRITE=min(1024,BUFSIZE/2), so the
+ * maximum length of headers accepted is 15360 bytes.
+ */
+#ifndef BUFSIZE
+#define BUFSIZE 16384
+#endif
+
+/* certain buffers may only be allocated for responses in order to avoid
+ * deadlocks caused by request queuing. 2 buffers is the absolute minimum
+ * acceptable to ensure that a request gaining access to a server can get
+ * a response buffer even if it doesn't completely flush the request buffer.
+ * The worst case is an applet making use of a request buffer that cannot
+ * completely be sent while the server starts to respond, and all unreserved
+ * buffers are allocated by request buffers from pending connections in the
+ * queue waiting for this one to flush. Both buffers reserved buffers may
+ * thus be used at the same time.
+ */
+#ifndef RESERVED_BUFS
+#define RESERVED_BUFS 2
+#endif
+
+// reserved buffer space for header rewriting
+#ifndef MAXREWRITE
+#define MAXREWRITE 1024
+#endif
+
+#ifndef REQURI_LEN
+#define REQURI_LEN 1024
+#endif
+
+#ifndef CAPTURE_LEN
+#define CAPTURE_LEN 64
+#endif
+
+#ifndef MAX_SYSLOG_LEN
+#define MAX_SYSLOG_LEN 1024
+#endif
+
+/* 64kB to archive startup-logs seems way more than enough
+ * /!\ Careful when changing this size, it is used in a shm when exec() from
+ * mworker to wait mode.
+ */
+#ifndef STARTUP_LOG_SIZE
+#define STARTUP_LOG_SIZE 65536
+#endif
+
+// maximum line size when parsing config
+#ifndef LINESIZE
+#define LINESIZE 2048
+#endif
+
+// max # args on a configuration line
+#define MAX_LINE_ARGS 64
+
+// maximum line size when parsing crt-bind-list config
+#define CRT_LINESIZE 65536
+
+// max # args on crt-bind-list configuration line
+#define MAX_CRT_ARGS 2048
+
+// max # args on a command issued on the CLI ("stats socket")
+// This should cover at least 5 + twice the # of data_types
+#define MAX_CLI_ARGS 64
+
+// max recursion levels in config condition evaluations
+// (note that binary operators add one recursion level, and
+// that parenthesis may add two).
+#define MAX_CFG_RECURSION 1024
+
+// max # of matches per regexp
+#define MAX_MATCH 10
+
+// max # of headers in one HTTP request or response
+// By default, about 100 headers (+1 for the first line)
+#ifndef MAX_HTTP_HDR
+#define MAX_HTTP_HDR 101
+#endif
+
+// max # of headers in history when looking for header #-X
+#ifndef MAX_HDR_HISTORY
+#define MAX_HDR_HISTORY 10
+#endif
+
+// max length of a TRACE_PRINTF() output buffer (one less char for the message)
+#ifndef TRACE_MAX_MSG
+#define TRACE_MAX_MSG 1024
+#endif
+
+// max # of stick counters per session (at least 3 for sc0..sc2)
+#ifndef MAX_SESS_STKCTR
+#define MAX_SESS_STKCTR 3
+#endif
+
+// max # of extra stick-table data types that can be registered at runtime
+#ifndef STKTABLE_EXTRA_DATA_TYPES
+#define STKTABLE_EXTRA_DATA_TYPES 0
+#endif
+
+// max # of stick-table filter entries that can be used during dump
+#ifndef STKTABLE_FILTER_LEN
+#define STKTABLE_FILTER_LEN 4
+#endif
+
+// max # of loops we can perform around a read() which succeeds.
+// It's very frequent that the system returns a few TCP segments at a time.
+#ifndef MAX_READ_POLL_LOOPS
+#define MAX_READ_POLL_LOOPS 4
+#endif
+
+// minimum number of bytes read at once above which we don't try to read
+// more, in order not to risk facing an EAGAIN. Most often, if we read
+// at least 10 kB, we can consider that the system has tried to read a
+// full buffer and got multiple segments (>1 MSS for jumbo frames, >7 MSS
+// for normal frames) did not bother truncating the last segment.
+#ifndef MIN_RECV_AT_ONCE_ENOUGH
+#define MIN_RECV_AT_ONCE_ENOUGH (7*1448)
+#endif
+
+// The minimum number of bytes to be forwarded that is worth trying to splice.
+// Below 4kB, it's not worth allocating pipes nor pretending to zero-copy.
+#ifndef MIN_SPLICE_FORWARD
+#define MIN_SPLICE_FORWARD 4096
+#endif
+
+// the max number of events returned in one call to poll/epoll. Too small a
+// value will cause lots of calls, and too high a value may cause high latency.
+#ifndef MAX_POLL_EVENTS
+#define MAX_POLL_EVENTS 200
+#endif
+
+/* eternity when exprimed in timeval */
+#ifndef TV_ETERNITY
+#define TV_ETERNITY (~0UL)
+#endif
+
+/* eternity when exprimed in ms */
+#ifndef TV_ETERNITY_MS
+#define TV_ETERNITY_MS (-1)
+#endif
+
+/* delay between boot and first time wrap, in seconds */
+#ifndef BOOT_TIME_WRAP_SEC
+#define BOOT_TIME_WRAP_SEC 20
+#endif
+/* we want to be able to detect time jumps. Fix the maximum wait time to a low
+ * value so that we know the time has changed if we wait longer.
+ */
+#ifndef MAX_DELAY_MS
+#define MAX_DELAY_MS 60000
+#endif
+
+// The maximum number of connections accepted at once by a thread for a single
+// listener. It used to default to 64 divided by the number of processes but
+// the tasklet-based model is much more scalable and benefits from smaller
+// values. Experimentation has shown that 4 gives the highest accept rate for
+// all thread values, and that 3 and 5 come very close, as shown below (HTTP/1
+// connections forwarded per second at multi-accept 4 and 64):
+//
+// ac\thr| 1 2 4 8 16
+// ------+------------------------------
+// 4| 80k 106k 168k 270k 336k
+// 64| 63k 89k 145k 230k 274k
+//
+#ifndef MAX_ACCEPT
+#define MAX_ACCEPT 4
+#endif
+
+// The base max number of tasks to run at once to be used when not set by
+// tune.runqueue-depth. It will automatically be divided by the square root
+// of the number of threads for better fairness. As such, 64 threads will
+// use 35 and a single thread will use 280.
+#ifndef RUNQUEUE_DEPTH
+#define RUNQUEUE_DEPTH 280
+#endif
+
+// cookie delimiter in "prefix" mode. This character is inserted between the
+// persistence cookie and the original value. The '~' is allowed by RFC6265,
+// and should not be too common in server names.
+#ifndef COOKIE_DELIM
+#define COOKIE_DELIM '~'
+#endif
+
+// this delimiter is used between a server's name and a last visit date in
+// cookies exchanged with the client.
+#ifndef COOKIE_DELIM_DATE
+#define COOKIE_DELIM_DATE '|'
+#endif
+
+// Max number of acl() sample fetch recursive evaluations, to avoid deep tree
+// loops.
+#ifndef ACL_MAX_RECURSE
+#define ACL_MAX_RECURSE 1000
+#endif
+
+#define CONN_RETRIES 3
+
+#define CHK_CONNTIME 2000
+#define DEF_CHKINTR 2000
+#define DEF_MAILALERTTIME 10000
+#define DEF_FALLTIME 3
+#define DEF_RISETIME 2
+#define DEF_AGENT_FALLTIME 1
+#define DEF_AGENT_RISETIME 1
+#define DEF_CHECK_PATH ""
+
+
+#define DEF_HANA_ONERR HANA_ONERR_FAILCHK
+#define DEF_HANA_ERRLIMIT 10
+
+// X-Forwarded-For header default
+#define DEF_XFORWARDFOR_HDR "X-Forwarded-For"
+
+// X-Original-To header default
+#define DEF_XORIGINALTO_HDR "X-Original-To"
+
+/* Max number of events that may be processed at once by
+ * an event_hdl API consumer to prevent thread contention.
+ */
+#ifndef EVENT_HDL_MAX_AT_ONCE
+#define EVENT_HDL_MAX_AT_ONCE 100
+#endif
+
+/* Default connections limit.
+ *
+ * A system limit can be enforced at build time in order to avoid using haproxy
+ * beyond reasonable system limits. For this, just define SYSTEM_MAXCONN to the
+ * absolute limit accepted by the system. If the configuration specifies a
+ * higher value, it will be capped to SYSTEM_MAXCONN and a warning will be
+ * emitted. The only way to override this limit will be to set it via the
+ * command-line '-n' argument. If SYSTEM_MAXCONN is not set, a minimum value
+ * of 100 will be used for DEFAULT_MAXCONN which almost guarantees that a
+ * process will correctly start in any situation.
+ */
+#ifdef SYSTEM_MAXCONN
+#undef DEFAULT_MAXCONN
+#define DEFAULT_MAXCONN SYSTEM_MAXCONN
+#elif !defined(DEFAULT_MAXCONN)
+#define DEFAULT_MAXCONN 100
+#endif
+
+/* Define a maxconn which will be used in the master process once it re-exec to
+ * the MODE_MWORKER_WAIT and won't change when SYSTEM_MAXCONN is set.
+ *
+ * 100 must be enough for the master since it only does communication between
+ * the master and the workers, and the master CLI.
+ */
+#ifndef MASTER_MAXCONN
+#define MASTER_MAXCONN 100
+#endif
+
+/* Minimum check interval for spread health checks. Servers with intervals
+ * greater than or equal to this value will have their checks spread apart
+ * and will be considered when searching the minimal interval.
+ * Others will be ignored for the minimal interval and will have their checks
+ * scheduled on a different basis.
+ */
+#ifndef SRV_CHK_INTER_THRES
+#define SRV_CHK_INTER_THRES 1000
+#endif
+
+/* Specifies the string used to report the version and release date on the
+ * statistics page. May be defined to the empty string ("") to permanently
+ * disable the feature.
+ */
+#ifndef STATS_VERSION_STRING
+#define STATS_VERSION_STRING " version " HAPROXY_VERSION ", released " HAPROXY_DATE
+#endif
+
+/* This is the default statistics URI */
+#ifdef CONFIG_STATS_DEFAULT_URI
+#define STATS_DEFAULT_URI CONFIG_STATS_DEFAULT_URI
+#else
+#define STATS_DEFAULT_URI "/haproxy?stats"
+#endif
+
+/* This is the default statistics realm */
+#ifdef CONFIG_STATS_DEFAULT_REALM
+#define STATS_DEFAULT_REALM CONFIG_STATS_DEFAULT_REALM
+#else
+#define STATS_DEFAULT_REALM "HAProxy Statistics"
+#endif
+
+/* Maximum signal queue size, and also number of different signals we can
+ * handle.
+ */
+#ifndef MAX_SIGNAL
+#define MAX_SIGNAL 256
+#endif
+
+/* Maximum host name length */
+#ifndef MAX_HOSTNAME_LEN
+#ifdef MAXHOSTNAMELEN
+#define MAX_HOSTNAME_LEN MAXHOSTNAMELEN
+#else
+#define MAX_HOSTNAME_LEN 64
+#endif // MAXHOSTNAMELEN
+#endif // MAX_HOSTNAME_LEN
+
+/* Maximum health check description length */
+#ifndef HCHK_DESC_LEN
+#define HCHK_DESC_LEN 128
+#endif
+
+/* ciphers used as defaults on connect */
+#ifndef CONNECT_DEFAULT_CIPHERS
+#define CONNECT_DEFAULT_CIPHERS NULL
+#endif
+
+/* ciphers used as defaults on TLS 1.3 connect */
+#ifndef CONNECT_DEFAULT_CIPHERSUITES
+#define CONNECT_DEFAULT_CIPHERSUITES NULL
+#endif
+
+/* ciphers used as defaults on listeners */
+#ifndef LISTEN_DEFAULT_CIPHERS
+#define LISTEN_DEFAULT_CIPHERS NULL
+#endif
+
+/* cipher suites used as defaults on TLS 1.3 listeners */
+#ifndef LISTEN_DEFAULT_CIPHERSUITES
+#define LISTEN_DEFAULT_CIPHERSUITES NULL
+#endif
+
+/* named curve used as defaults for ECDHE ciphers */
+#ifndef ECDHE_DEFAULT_CURVE
+#define ECDHE_DEFAULT_CURVE "prime256v1"
+#endif
+
+/* ssl cache size */
+#ifndef SSLCACHESIZE
+#define SSLCACHESIZE 20000
+#endif
+
+/* ssl max dh param size */
+#ifndef SSL_DEFAULT_DH_PARAM
+#define SSL_DEFAULT_DH_PARAM 0
+#endif
+
+/* max memory cost per SSL session */
+#ifndef SSL_SESSION_MAX_COST
+#define SSL_SESSION_MAX_COST (16*1024) // measured
+#endif
+
+/* max memory cost per SSL handshake (on top of session) */
+#ifndef SSL_HANDSHAKE_MAX_COST
+#define SSL_HANDSHAKE_MAX_COST (76*1024) // measured
+#endif
+
+#ifndef DEFAULT_SSL_CTX_CACHE
+#define DEFAULT_SSL_CTX_CACHE 1000
+#endif
+
+/* approximate stream size (for maxconn estimate) */
+#ifndef STREAM_MAX_COST
+#define STREAM_MAX_COST (sizeof(struct stream) + \
+ 2 * sizeof(struct channel) + \
+ 2 * sizeof(struct connection) + \
+ global.tune.requri_len + \
+ 2 * global.tune.cookie_len)
+#endif
+
+/* available memory estimate : count about 3% of overhead in various structures */
+#ifndef MEM_USABLE_RATIO
+#define MEM_USABLE_RATIO 0.97
+#endif
+
+/* if not 0, maximum allocatable memory per process in MB */
+#ifndef HAPROXY_MEMMAX
+#define HAPROXY_MEMMAX 0
+#endif
+
+/* For USE_ZLIB, DEFAULT_MAXZLIBMEM may be set to a hard-coded value that will
+ * preset a maxzlibmem value. Just leave it to zero for other configurations.
+ * Note that it's expressed in megabytes.
+ */
+#if !defined(DEFAULT_MAXZLIBMEM) || !defined(USE_ZLIB)
+#undef DEFAULT_MAXZLIBMEM
+#define DEFAULT_MAXZLIBMEM 0
+#endif
+
+/* On modern architectures with many threads, a fast memory allocator, and
+ * local pools, the global pools with their single list can be way slower than
+ * the standard allocator which already has its own per-thread arenas. In this
+ * case we disable global pools. The global pools may still be enforced
+ * using CONFIG_HAP_GLOBAL_POOLS though.
+ */
+#if defined(USE_THREAD) && defined(HA_HAVE_FAST_MALLOC) && !defined(CONFIG_HAP_GLOBAL_POOLS)
+#define CONFIG_HAP_NO_GLOBAL_POOLS
+#endif
+
+/* default per-thread pool cache size when enabled */
+#ifndef CONFIG_HAP_POOL_CACHE_SIZE
+#define CONFIG_HAP_POOL_CACHE_SIZE 524288
+#endif
+
+#ifndef CONFIG_HAP_POOL_CLUSTER_SIZE
+#define CONFIG_HAP_POOL_CLUSTER_SIZE 8
+#endif
+
+/* number of bits to encode the per-pool buckets for large setups */
+#ifndef CONFIG_HAP_POOL_BUCKETS_BITS
+# if defined(USE_THREAD) && MAX_THREADS >= 512
+# define CONFIG_HAP_POOL_BUCKETS_BITS 6
+# elif defined(USE_THREAD) && MAX_THREADS >= 128
+# define CONFIG_HAP_POOL_BUCKETS_BITS 5
+# elif defined(USE_THREAD) && MAX_THREADS >= 16
+# define CONFIG_HAP_POOL_BUCKETS_BITS 4
+# elif defined(USE_THREAD)
+# define CONFIG_HAP_POOL_BUCKETS_BITS 3
+# else
+# define CONFIG_HAP_POOL_BUCKETS_BITS 0
+# endif
+#endif
+
+#define CONFIG_HAP_POOL_BUCKETS (1UL << (CONFIG_HAP_POOL_BUCKETS_BITS))
+
+/* Number of samples used to compute the times reported in stats. A power of
+ * two is highly recommended, and this value multiplied by the largest response
+ * time must not overflow and unsigned int. See freq_ctr.h for more information.
+ * We consider that values are accurate to 95% with two batches of samples below,
+ * so in order to advertise accurate times across 1k samples, we effectively
+ * measure over 512.
+ */
+#ifndef TIME_STATS_SAMPLES
+#define TIME_STATS_SAMPLES 512
+#endif
+
+/* max ocsp cert id asn1 encoded length */
+#ifndef OCSP_MAX_CERTID_ASN1_LENGTH
+#define OCSP_MAX_CERTID_ASN1_LENGTH 128
+#endif
+
+#ifndef OCSP_MAX_RESPONSE_TIME_SKEW
+#define OCSP_MAX_RESPONSE_TIME_SKEW 300
+#endif
+
+/* Number of TLS tickets to check, used for rotation */
+#ifndef TLS_TICKETS_NO
+#define TLS_TICKETS_NO 3
+#endif
+
+/* pattern lookup default cache size, in number of entries :
+ * 10k entries at 10k req/s mean 1% risk of a collision after 60 years, that's
+ * already much less than the memory's reliability in most machines and more
+ * durable than most admin's life expectancy. A collision will result in a
+ * valid result to be returned for a different entry from the same list.
+ */
+#ifndef DEFAULT_PAT_LRU_SIZE
+#define DEFAULT_PAT_LRU_SIZE 10000
+#endif
+
+/* maximum number of pollers that may be registered */
+#ifndef MAX_POLLERS
+#define MAX_POLLERS 10
+#endif
+
+/* system sysfs directory */
+#define NUMA_DETECT_SYSTEM_SYSFS_PATH "/sys/devices/system"
+
+/* Number of cache trees */
+#ifndef CACHE_TREE_NUM
+# if defined(USE_THREAD)
+# define CACHE_TREE_NUM 8
+# else
+# define CACHE_TREE_NUM 1
+# endif
+#endif
+
+#endif /* _HAPROXY_DEFAULTS_H */
diff --git a/include/haproxy/dgram-t.h b/include/haproxy/dgram-t.h
new file mode 100644
index 0000000..4e4c2af
--- /dev/null
+++ b/include/haproxy/dgram-t.h
@@ -0,0 +1,53 @@
+/*
+ * include/haproxy/dgram-t.h
+ * This file provides structures and types for datagram processing
+ *
+ * Copyright (C) 2014 Baptiste Assmann <bedis9@gmail.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_HAPROXY_DGRAM_T_H
+#define _HAPROXY_HAPROXY_DGRAM_T_H
+
+#include <arpa/inet.h>
+
+/*
+ * datagram related structure
+ */
+struct dgram_conn {
+ __decl_thread(HA_SPINLOCK_T lock);
+ const struct dgram_data_cb *data; /* data layer callbacks. Must be set before */
+ void *owner; /* pointer to upper layer's entity */
+ union { /* definitions which depend on connection type */
+ struct { /*** information used by socket-based dgram ***/
+ int fd; /* file descriptor */
+ } sock;
+ } t;
+ struct {
+ struct sockaddr_storage from; /* client address, or address to spoof when connecting to the server */
+ struct sockaddr_storage to; /* address reached by the client, or address to connect to */
+ } addr; /* addresses of the remote side, client for producer and server for consumer */
+};
+
+/*
+ * datagram callback structure
+ */
+struct dgram_data_cb {
+ void (*recv)(struct dgram_conn *dgram); /* recv callback */
+ void (*send)(struct dgram_conn *dgram); /* send callback */
+};
+
+#endif /* _HAPROXY_HAPROXY_DGRAM_T_H */
diff --git a/include/haproxy/dgram.h b/include/haproxy/dgram.h
new file mode 100644
index 0000000..92d00ab
--- /dev/null
+++ b/include/haproxy/dgram.h
@@ -0,0 +1,29 @@
+/*
+ * include/haproxy/proto_dgram.h
+ * This file provides functions related to DGRAM processing.
+ *
+ * Copyright (C) 2014 Baptiste Assmann <bedis9@gmail.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_PROTO_DGRAM_H
+#define _HAPROXY_PROTO_DGRAM_H
+
+#include <haproxy/dgram-t.h>
+
+void dgram_fd_handler(int);
+
+#endif // _HAPROXY_PROTO_DGRAM_H
diff --git a/include/haproxy/dict-t.h b/include/haproxy/dict-t.h
new file mode 100644
index 0000000..deaa88d
--- /dev/null
+++ b/include/haproxy/dict-t.h
@@ -0,0 +1,46 @@
+/*
+ * include/haproxy/dict-t.h
+ * Dictionaries - types definitions
+ *
+ * Copyright 2019 Frederic Lecaille <flecaille@haproxy.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _HAPROXY_DICT_T_H
+#define _HAPROXY_DICT_T_H
+
+#include <import/ebtree-t.h>
+#include <haproxy/api-t.h>
+#include <haproxy/thread-t.h>
+
+struct dict_entry {
+ struct ebpt_node value;
+ unsigned int refcount;
+ size_t len;
+};
+
+struct dict {
+ const char *name;
+ struct eb_root values;
+ __decl_thread(HA_RWLOCK_T rwlock);
+};
+
+#endif /* _HAPROXY_DICT_T_H */
diff --git a/include/haproxy/dict.h b/include/haproxy/dict.h
new file mode 100644
index 0000000..635c3f1
--- /dev/null
+++ b/include/haproxy/dict.h
@@ -0,0 +1,36 @@
+/*
+ * include/haproxy/dict.h
+ * Dictionaries - functions prototypes
+ *
+ * Copyright 2019 Frederic Lecaille <flecaille@haproxy.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _HAPROXY_DICT_H
+#define _HAPROXY_DICT_H
+
+#include <haproxy/dict-t.h>
+
+struct dict *new_dict(const char *name);
+struct dict_entry *dict_insert(struct dict *d, char *str);
+void dict_entry_unref(struct dict *d, struct dict_entry *de);
+
+#endif /* _HAPROXY_DICT_H */
diff --git a/include/haproxy/dns-t.h b/include/haproxy/dns-t.h
new file mode 100644
index 0000000..1c876e3
--- /dev/null
+++ b/include/haproxy/dns-t.h
@@ -0,0 +1,179 @@
+/*
+ * include/haproxy/dns-t.h
+ * This file provides structures and types for DNS.
+ *
+ * Copyright (C) 2014 Baptiste Assmann <bedis9@gmail.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_DNS_T_H
+#define _HAPROXY_DNS_T_H
+
+#include <import/ebtree-t.h>
+
+#include <haproxy/connection-t.h>
+#include <haproxy/buf-t.h>
+#include <haproxy/dgram-t.h>
+#include <haproxy/obj_type-t.h>
+#include <haproxy/ring-t.h>
+#include <haproxy/stats-t.h>
+#include <haproxy/task-t.h>
+#include <haproxy/thread.h>
+
+/* DNS header size */
+#define DNS_HEADER_SIZE ((int)sizeof(struct dns_header))
+
+/* max pending requests per stream */
+#define DNS_STREAM_MAX_PIPELINED_REQ 4
+
+#define DNS_TCP_MSG_MAX_SIZE 65535
+#define DNS_TCP_MSG_RING_MAX_SIZE (1 + 1 + 3 + DNS_TCP_MSG_MAX_SIZE) // varint_bytes(DNS_TCP_MSG_MAX_SIZE) == 3
+
+/* DNS request or response header structure */
+struct dns_header {
+ uint16_t id;
+ uint16_t flags;
+ uint16_t qdcount;
+ uint16_t ancount;
+ uint16_t nscount;
+ uint16_t arcount;
+} __attribute__ ((packed));
+
+/* short structure to describe a DNS question */
+/* NOTE: big endian structure */
+struct dns_question {
+ unsigned short qtype; /* question type */
+ unsigned short qclass; /* query class */
+} __attribute__ ((packed));
+
+
+/* NOTE: big endian structure */
+struct dns_additional_record {
+ uint8_t name; /* domain name, must be 0 (RFC 6891) */
+ uint16_t type; /* record type DNS_RTYPE_OPT (41) */
+ uint16_t udp_payload_size; /* maximum size accepted for the response */
+ uint32_t extension; /* extended rcode and flags, not used for now */
+ uint16_t data_length; /* data length */
+/* as of today, we don't support yet edns options, that said I already put a
+ * placeholder here for this purpose. We may need to define a dns_option_record
+ * structure which itself should point to different type of data, based on the
+ * extension set (client subnet, tcp keepalive, etc...)*/
+} __attribute__ ((packed));
+
+/* Structure describing a name server used during name resolution.
+ * A name server belongs to a resolvers section.
+ */
+struct dns_stream_server {
+ struct server *srv;
+ struct ring *ring_req;
+ int max_slots;
+ int maxconn;
+ int idle_conns;
+ int cur_conns;
+ int max_active_conns;
+ size_t ofs_req; // ring buffer reader offset
+ size_t ofs_rsp; // ring buffer reader offset
+ struct task *task_req; /* req conn management */
+ struct task *task_rsp; /* rsp management */
+ struct task *task_idle; /* handle idle sess */
+ struct list free_sess;
+ struct list idle_sess;
+ struct list wait_sess;
+ __decl_thread(HA_SPINLOCK_T lock); // lock to protect current struct
+};
+
+struct dns_dgram_server {
+ struct dgram_conn conn; /* transport layer */
+ struct ring *ring_req;
+ size_t ofs_req; // ring buffer reader offset
+};
+
+struct dns_query {
+ struct eb32_node qid;
+ uint16_t original_qid;
+ int expire;
+ struct list list;
+};
+
+struct dns_session {
+ struct appctx *appctx; // appctx of current session
+ struct dns_stream_server *dss;
+ uint16_t tx_msg_offset;
+ int nb_queries;
+ int onfly_queries;
+ int query_counter;
+ struct list list;
+ struct list waiter;
+ struct list queries;
+ struct task *task_exp;
+ struct eb_root query_ids; /* tree to quickly lookup/retrieve query ids currently in use */
+ size_t ofs; // ring buffer reader offset
+ struct ring ring;
+ struct {
+ uint16_t len;
+ uint16_t offset;
+ char *area;
+ } rx_msg;
+ unsigned char *tx_ring_area;
+ int shutdown;
+};
+
+/* Structure describing a name server
+ */
+struct dns_nameserver {
+ char *id; /* nameserver unique identifier */
+ void *parent;
+ struct {
+ const char *file; /* file where the section appears */
+ int line; /* line where the section appears */
+ } conf; /* config information */
+
+ int (*process_responses)(struct dns_nameserver *ns); /* callback used to process responses */
+ struct dns_dgram_server *dgram; /* used for dgram dns */
+ struct dns_stream_server *stream; /* used for tcp dns */
+
+ EXTRA_COUNTERS(extra_counters);
+ struct dns_counters *counters;
+
+ struct list list; /* nameserver chained list */
+};
+
+/* mixed dns and resolver counters, we will have to split them */
+struct dns_counters {
+ char *id;
+ char *pid;
+ long long sent; /* - queries sent */
+ long long snd_error; /* - sending errors */
+ union {
+ struct {
+ long long valid; /* - valid response */
+ long long update; /* - valid response used to update server's IP */
+ long long cname; /* - CNAME response requiring new resolution */
+ long long cname_error; /* - error when resolving CNAMEs */
+ long long any_err; /* - void response (usually because ANY qtype) */
+ long long nx; /* - NX response */
+ long long timeout; /* - queries which reached timeout */
+ long long refused; /* - queries refused */
+ long long other; /* - other type of response */
+ long long invalid; /* - malformed DNS response */
+ long long too_big; /* - too big response */
+ long long outdated; /* - outdated response (server slower than the other ones) */
+ long long truncated; /* - truncated response */;
+ } resolver;
+ } app; /* application specific counteurs */
+};
+
+#endif /* _HAPROXY_DNS_T_H */
diff --git a/include/haproxy/dns.h b/include/haproxy/dns.h
new file mode 100644
index 0000000..84181c4
--- /dev/null
+++ b/include/haproxy/dns.h
@@ -0,0 +1,33 @@
+/*
+ * include/haproxy/dns.h
+ * This file provides functions related to DNS protocol
+ *
+ * Copyright (C) 2020 HAProxy Technologies
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_DNS_H
+#define _HAPROXY_DNS_H
+
+#include <haproxy/dns-t.h>
+#include <haproxy/server-t.h>
+
+int dns_send_nameserver(struct dns_nameserver *ns, void *buf, size_t len);
+ssize_t dns_recv_nameserver(struct dns_nameserver *ns, void *data, size_t size);
+int dns_dgram_init(struct dns_nameserver *ns, struct sockaddr_storage *sk);
+int dns_stream_init(struct dns_nameserver *ns, struct server *s);
+
+#endif // _HAPROXY_DNS_H
diff --git a/include/haproxy/dynbuf-t.h b/include/haproxy/dynbuf-t.h
new file mode 100644
index 0000000..b5545ab
--- /dev/null
+++ b/include/haproxy/dynbuf-t.h
@@ -0,0 +1,41 @@
+/*
+ * include/haproxy/dynbuf-t.h
+ * Structure definitions for dynamic buffer management.
+ *
+ * Copyright (C) 2000-2020 Willy Tarreau - w@1wt.eu
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_DYNBUF_T_H
+#define _HAPROXY_DYNBUF_T_H
+
+
+/* an element of the <buffer_wq> list. It represents an object that need to
+ * acquire a buffer to continue its process. */
+struct buffer_wait {
+ void *target; /* The waiting object that should be woken up */
+ int (*wakeup_cb)(void *); /* The function used to wake up the <target>, passed as argument */
+ struct list list; /* Next element in the <buffer_wq> list */
+};
+
+#endif /* _HAPROXY_DYNBUF_T_H */
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/include/haproxy/dynbuf.h b/include/haproxy/dynbuf.h
new file mode 100644
index 0000000..a89800c
--- /dev/null
+++ b/include/haproxy/dynbuf.h
@@ -0,0 +1,131 @@
+/*
+ * include/haproxy/dynbuf.h
+ * Buffer management functions.
+ *
+ * Copyright (C) 2000-2020 Willy Tarreau - w@1wt.eu
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_DYNBUF_H
+#define _HAPROXY_DYNBUF_H
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <import/ist.h>
+#include <haproxy/activity.h>
+#include <haproxy/api.h>
+#include <haproxy/buf.h>
+#include <haproxy/chunk.h>
+#include <haproxy/dynbuf-t.h>
+#include <haproxy/pool.h>
+
+extern struct pool_head *pool_head_buffer;
+
+int init_buffer(void);
+void buffer_dump(FILE *o, struct buffer *b, int from, int to);
+
+/*****************************************************************/
+/* These functions are used to compute various buffer area sizes */
+/*****************************************************************/
+
+/* Return 1 if the buffer has less than 1/4 of its capacity free, otherwise 0 */
+static inline int buffer_almost_full(const struct buffer *buf)
+{
+ if (b_is_null(buf))
+ return 0;
+
+ return b_almost_full(buf);
+}
+
+/**************************************************/
+/* Functions below are used for buffer allocation */
+/**************************************************/
+
+/* Ensures that <buf> is allocated, or allocates it. If no memory is available,
+ * ((char *)1) is assigned instead with a zero size. The allocated buffer is
+ * returned, or NULL in case no memory is available. Since buffers only contain
+ * user data, poisonning is always disabled as it brings no benefit and impacts
+ * performance. Due to the difficult buffer_wait management, they are not
+ * subject to forced allocation failures either.
+ */
+#define b_alloc(_buf) \
+({ \
+ char *_area; \
+ struct buffer *_retbuf = _buf; \
+ \
+ if (!_retbuf->size) { \
+ *_retbuf = BUF_WANTED; \
+ _area = pool_alloc_flag(pool_head_buffer, POOL_F_NO_POISON | POOL_F_NO_FAIL); \
+ if (unlikely(!_area)) { \
+ activity[tid].buf_wait++; \
+ _retbuf = NULL; \
+ } \
+ else { \
+ _retbuf->area = _area; \
+ _retbuf->size = pool_head_buffer->size; \
+ } \
+ } \
+ _retbuf; \
+ })
+
+/* Releases buffer <buf> (no check of emptiness). The buffer's head is marked
+ * empty.
+ */
+#define __b_free(_buf) \
+ do { \
+ char *area = (_buf)->area; \
+ \
+ /* let's first clear the area to save an occasional "show sess all" \
+ * glancing over our shoulder from getting a dangling pointer. \
+ */ \
+ *(_buf) = BUF_NULL; \
+ __ha_barrier_store(); \
+ pool_free(pool_head_buffer, area); \
+ } while (0) \
+
+/* Releases buffer <buf> if allocated, and marks it empty. */
+#define b_free(_buf) \
+ do { \
+ if ((_buf)->size) \
+ __b_free((_buf)); \
+ } while (0)
+
+/* Offer one or multiple buffer currently belonging to target <from> to whoever
+ * needs one. Any pointer is valid for <from>, including NULL. Its purpose is
+ * to avoid passing a buffer to oneself in case of failed allocations (e.g.
+ * need two buffers, get one, fail, release it and wake up self again). In case
+ * of normal buffer release where it is expected that the caller is not waiting
+ * for a buffer, NULL is fine. It will wake waiters on the current thread only.
+ */
+void __offer_buffers(void *from, unsigned int count);
+
+static inline void offer_buffers(void *from, unsigned int count)
+{
+ if (!LIST_ISEMPTY(&th_ctx->buffer_wq))
+ __offer_buffers(from, count);
+}
+
+
+#endif /* _HAPROXY_DYNBUF_H */
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/include/haproxy/errors.h b/include/haproxy/errors.h
new file mode 100644
index 0000000..c102fed
--- /dev/null
+++ b/include/haproxy/errors.h
@@ -0,0 +1,139 @@
+/*
+ * include/haproxy/errors.h
+ * Global error macros and constants
+ *
+ * Copyright (C) 2000-2020 Willy Tarreau - w@1wt.eu
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_ERRORS_H
+#define _HAPROXY_ERRORS_H
+
+#include <stdarg.h>
+#include <stdio.h>
+
+#include <haproxy/buf-t.h>
+#include <haproxy/obj_type-t.h>
+
+/* These flags may be used in various functions which are called from within
+ * loops (eg: to start all listeners from all proxies). They provide enough
+ * information to let the caller decide what to do. ERR_WARN and ERR_ALERT
+ * do not indicate any error, just that a message has been put in a shared
+ * buffer in order to be displayed by the caller.
+ */
+#define ERR_NONE 0x00 /* no error, no message returned */
+#define ERR_RETRYABLE 0x01 /* retryable error, may be cumulated */
+#define ERR_FATAL 0x02 /* fatal error, may be cumulated */
+#define ERR_ABORT 0x04 /* it's preferable to end any possible loop */
+#define ERR_WARN 0x08 /* a warning message has been returned */
+#define ERR_ALERT 0x10 /* an alert message has been returned */
+
+#define ERR_CODE (ERR_RETRYABLE|ERR_FATAL|ERR_ABORT) /* mask */
+
+extern struct ring *startup_logs;
+
+/* These codes may be used by config parsing functions which detect errors and
+ * which need to inform the upper layer about them. They are all prefixed with
+ * "PE_" for "Parse Error". These codes will probably be extended, and functions
+ * making use of them should be documented as such. Only code PE_NONE (zero) may
+ * indicate a valid condition, all other ones must be caught as errors, event if
+ * unknown by the caller. This must not be used to forward warnings.
+ */
+enum {
+ PE_NONE = 0, /* no error */
+ PE_ENUM_OOR, /* enum data out of allowed range */
+ PE_EXIST, /* trying to create something which already exists */
+ PE_ARG_MISSING, /* mandatory argument not provided */
+ PE_ARG_NOT_USED, /* argument provided cannot be used */
+ PE_ARG_INVC, /* invalid char in argument (pointer not provided) */
+ PE_ARG_INVC_PTR, /* invalid char in argument (pointer provided) */
+ PE_ARG_NOT_FOUND, /* argument references something not found */
+ PE_ARG_VALUE_OOR, /* argument value is out of range */
+};
+
+
+void usermsgs_clr(const char *prefix);
+int usermsgs_empty(void);
+const char *usermsgs_str(void);
+extern uint tot_warnings;
+
+/************ Error reporting functions ***********/
+
+struct usermsgs_ctx {
+ struct buffer str;
+
+ const char *prefix; /* prefix of every output */
+ const char *file; /* related filename for config parsing */
+ int line; /* related line number for config parsing */
+ enum obj_type *obj; /* related proxy, server, ... */
+};
+void set_usermsgs_ctx(const char *file, int line, enum obj_type *obj);
+void register_parsing_obj(enum obj_type *obj);
+void reset_usermsgs_ctx(void);
+
+/*
+ * Displays the message on stderr with the date and pid. Overrides the quiet
+ * mode during startup.
+ */
+void ha_alert(const char *fmt, ...)
+ __attribute__ ((format(printf, 1, 2)));
+
+/*
+ * Displays the message on stderr with the date and pid.
+ */
+void ha_warning(const char *fmt, ...)
+ __attribute__ ((format(printf, 1, 2)));
+
+/*
+ * These functions are reserved to output diagnostics on MODE_DIAG.
+ * Use the underscore variants only if MODE_DIAG has already been checked.
+ */
+void _ha_vdiag_warning(const char *fmt, va_list argp);
+void _ha_diag_warning(const char *fmt, ...);
+void ha_diag_warning(const char *fmt, ...)
+ __attribute__ ((format(printf, 1 ,2)));
+
+/* Check for both MODE_DIAG and <cond> before outputting a diagnostic warning */
+#define HA_DIAG_WARNING_COND(cond, fmt, ...) \
+ do { \
+ if ((global.mode & MODE_DIAG) && (cond)) \
+ _ha_diag_warning((fmt), ##__VA_ARGS__); \
+ } while (0)
+
+/*
+ * Displays the message on stderr with the date and pid.
+ */
+void ha_notice(const char *fmt, ...)
+ __attribute__ ((format(printf, 1, 2)));
+
+/*
+ * Displays the message on <out> only if quiet mode is not set.
+ */
+void qfprintf(FILE *out, const char *fmt, ...)
+ __attribute__ ((format(printf, 2, 3)));
+
+void startup_logs_init();
+struct ring *startup_logs_dup(struct ring *src);
+void startup_logs_free(struct ring *r);
+
+#endif /* _HAPROXY_ERRORS_H */
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/include/haproxy/event_hdl-t.h b/include/haproxy/event_hdl-t.h
new file mode 100644
index 0000000..d499852
--- /dev/null
+++ b/include/haproxy/event_hdl-t.h
@@ -0,0 +1,295 @@
+/*
+ * include/haproxy/event_hdl-t.h
+ * event handlers management definitions
+ *
+ * Copyright 2022 HAProxy Technologies
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_EVENT_HDL_T_H
+# define _HAPROXY_EVENT_HDL_T_H
+
+#include <stdint.h>
+#include <sys/time.h>
+
+#include <haproxy/api-t.h>
+
+/* event data struct are defined as followed */
+struct event_hdl_cb_data_template {
+ struct {
+ /* safe data can be safely used from both
+ * sync and async handlers
+ * data consistency is guaranteed
+ */
+ } safe;
+ struct {
+ /* unsafe data may only be used from sync handlers:
+ * in async mode, data consistency cannot be guaranteed
+ * and unsafe data may already be stale, thus using
+ * it is highly discouraged because it
+ * could lead to undefined behavior (UAF, null dereference...)
+ */
+ } unsafe;
+};
+
+/* event_hdl tunables */
+struct event_hdl_tune {
+ unsigned int max_events_at_once;
+};
+
+/* FIXME: adjust if needed! Should be large enough
+ * to support every struct event_hdl_cb_data_x types
+ * BUG_ON check in publish/async_mode and static assert
+ * in EVENT_HDL_CB_DATA will ensure this
+ */
+#define EVENT_HDL_ASYNC_EVENT_DATA (768)
+/* used internally to store a single copy of event data when dealing with
+ * async handlers.
+ * The same copy can be provided to multiple handlers to prevent memory waste:
+ * refcount is used to keep track of references so that
+ * data can be freed when not used anymore
+ */
+typedef void (*event_hdl_data_free)(const void *data);
+struct event_hdl_async_event_data
+{
+ /* internal storage */
+ char data[EVENT_HDL_ASYNC_EVENT_DATA];
+ /* user-provided free function if event data relies on
+ * dynamic members that require specific cleanup
+ */
+ event_hdl_data_free mfree;
+ uint32_t refcount;
+};
+
+/* type for storing event subscription type */
+struct event_hdl_sub_type
+{
+ /* up to 256 families, non cumulative, adjust if needed */
+ uint8_t family;
+ /* up to 16 sub types using bitmasks, adjust if needed */
+ uint16_t subtype;
+};
+
+struct event_hdl_sub_list_head {
+ struct mt_list head;
+ struct mt_list known; /* api uses this to track known subscription lists */
+};
+
+/* event_hdl_sub_list is an alias (please use this for portability) */
+typedef struct event_hdl_sub_list_head event_hdl_sub_list;
+
+struct event_hdl_async_equeue_head {
+ struct mt_list head;
+ uint32_t size; /* near realtime size, not fully synced with head (to be used as a hint) */
+};
+
+/* event_hdl_async_equeue is an alias to mt_list (please use this for portability) */
+typedef struct event_hdl_async_equeue_head event_hdl_async_equeue;
+
+/* subscription mgmt from event */
+struct event_hdl_sub_mgmt
+{
+ /* manage subscriptions from event
+ * this must not be used directly because locking might be required
+ */
+ struct event_hdl_sub *this;
+ /* safe functions than can be used from event context (sync and async mode) */
+ struct event_hdl_sub_type (*getsub)(const struct event_hdl_sub_mgmt *);
+ int (*resub)(const struct event_hdl_sub_mgmt *, struct event_hdl_sub_type);
+ void (*unsub)(const struct event_hdl_sub_mgmt *);
+};
+
+/* single event structure pushed into async event queue
+ * used by tasks async handlers
+ */
+struct event_hdl_async_event
+{
+ struct mt_list mt_list;
+ struct event_hdl_sub_type type;
+ /* data wrapper - should not be used directly */
+ struct event_hdl_async_event_data *_data;
+ /* for easy data access,
+ * points to _data->data if data is available
+ */
+ void *data;
+ void *private;
+ struct timeval when;
+ struct event_hdl_sub_mgmt sub_mgmt;
+};
+
+/* internal structure provided to function event_hdl_publish()
+ * It contains ptr to data relevant to the event
+ */
+struct event_hdl_cb_data {
+ /* internal use: ptr to struct event_hdl_cb_data_type */
+ void *_ptr;
+ /* internal use: holds actual data size*/
+ size_t _size;
+ /* user specified freeing function for event_hdl_cb_data_type
+ * struct members
+ */
+ event_hdl_data_free _mfree;
+};
+
+/* struct provided to event_hdl_cb_* handlers
+ * contains data related to the event
+ * that triggered the handler
+ */
+struct event_hdl_cb
+{
+ /* event type */
+ struct event_hdl_sub_type e_type;
+ /* event data */
+ void *e_data;
+ /* manage the subscription responsible for handing the event to us */
+ const struct event_hdl_sub_mgmt *sub_mgmt;
+
+ /* may be used by sync event handler to ensure
+ * it runs in sync mode, and thus is eligible to access unsafe data.
+ * This could save the day when users are copy-pasting function
+ * logic from a sync handler to an async handler without
+ * taking appropriate precautions and unsafe accesses are performed.
+ * (See EVENT_HDL_ASSERT_SYNC macro API helper)
+ */
+ uint8_t _sync;
+};
+
+/* prototype for event_hdl_cb_sync function pointer */
+typedef void (*event_hdl_cb_sync)(const struct event_hdl_cb *cb, void *private);
+/* prototype for event_hdl_cb async function pointer */
+typedef void (*event_hdl_cb_async)(const struct event_hdl_cb *cb, void *private);
+/* prototype for event_hdl_private_free function pointer */
+typedef void (*event_hdl_private_free)(void *private);
+
+/* tasklet forward declaration */
+struct tasklet;
+/* enum for sync mode */
+enum event_hdl_async_mode
+{
+ EVENT_HDL_ASYNC_MODE_NORMAL = 1,
+ EVENT_HDL_ASYNC_MODE_ADVANCED = 2
+};
+
+/* event hdl, used when subscribing (and then associated with a subscription) */
+struct event_hdl {
+ /* optional unique id (hash) for lookup */
+ uint64_t id;
+ /* handler debug: origin (initial event subscription calling place) */
+ const char *dorigin;
+ /* handler requires async mode:
+ * EVENT_HDL_ASYNC_MODE_NORMAL = normal
+ * EVENT_HDL_ASYNC_MODE_ADVANCED = advanced, single task wakeup
+ */
+ uint8_t async;
+
+ union {
+ event_hdl_cb_sync sync_ptr; /* if !async */
+ event_hdl_cb_async async_ptr; /* only used if async==1 (normal) */
+ };
+
+ /* ptr to async task responsible for consuming events */
+ struct tasklet *async_task;
+ /* used by async tasks to consume pending events */
+ event_hdl_async_equeue *async_equeue;
+ /* function ptr automatically called by:
+ * async task when hdl is unregistered and private is no longer referenced
+ * sync context when unregistering is performed
+ */
+ event_hdl_private_free private_free;
+ /* it is not safe to assume that private will not
+ * be used anymore once hdl is unregistered:
+ * with async handlers, private could still be referenced
+ * in pending events to be consumed later by the task (by design).
+ * If freeing private is needed, you must provide async_private_free
+ * function pointer when registering.
+ * It will be called when private is no longer used
+ * after unregistering hdl to perform private cleanup.
+ * (please use this even in sync mode so that subscription
+ * can easily be turned into async mode later without breaking stuff)
+ */
+ void *private;
+};
+
+/* flags for event_hdl_sub struct (32 bits) */
+#define EHDL_SUB_F_PAUSED 0x0001 /* subscription will temporarily ignore events */
+
+/* list elem: subscription (handler subscribed to specific events)
+ */
+struct event_hdl_sub {
+ struct mt_list mt_list;
+ /* event type subscription */
+ struct event_hdl_sub_type sub;
+ uint32_t flags;
+ /* event handler */
+ struct event_hdl hdl;
+ /* used to guarantee that END event will be delivered
+ * (memory is allocated when registering, no memory failure can occur at runtime)
+ */
+ struct event_hdl_async_event *async_end;
+ /* > 0 : subscription is referenced, don't free yet
+ * use atomic OPS to write and read from it
+ */
+ uint32_t refcount;
+ /* TODO: atomic_call_counter for stats?! */
+};
+
+#define ESUB_INDEX(n) (1 << (n - 1))
+
+#define EVENT_HDL_SUB_TYPE(_family, _type) ((struct event_hdl_sub_type){ .family = _family, .subtype = ESUB_INDEX(_type) })
+#define EVENT_HDL_SUB_FAMILY(_family) ((struct event_hdl_sub_type){ .family = _family, .subtype = ~0 })
+
+#define EVENT_HDL_SUB_NONE ((struct event_hdl_sub_type){ .family = 0, .subtype = 0})
+/* for async tasks: subscription is ending */
+#define EVENT_HDL_SUB_END ((struct event_hdl_sub_type){ .family = 0, .subtype = 1})
+
+/* --------------------------------------- */
+
+/* user defined event types are listed here
+ * please reflect any change in these macros in the subtype map
+ * defined below that is used to perform string to event type and
+ * event type to string conversions
+ */
+
+/* TODO */
+
+/* SERVER FAMILY, provides event_hdl_cb_data_server struct
+ * (will be defined in haproxy/server-t.h)
+ */
+#define EVENT_HDL_SUB_SERVER EVENT_HDL_SUB_FAMILY(1)
+#define EVENT_HDL_SUB_SERVER_ADD EVENT_HDL_SUB_TYPE(1,1)
+#define EVENT_HDL_SUB_SERVER_DEL EVENT_HDL_SUB_TYPE(1,2)
+#define EVENT_HDL_SUB_SERVER_UP EVENT_HDL_SUB_TYPE(1,3)
+#define EVENT_HDL_SUB_SERVER_DOWN EVENT_HDL_SUB_TYPE(1,4)
+/* server state change */
+#define EVENT_HDL_SUB_SERVER_STATE EVENT_HDL_SUB_TYPE(1,5)
+/* server admin change */
+#define EVENT_HDL_SUB_SERVER_ADMIN EVENT_HDL_SUB_TYPE(1,6)
+/* server check-related (agent or health) event */
+#define EVENT_HDL_SUB_SERVER_CHECK EVENT_HDL_SUB_TYPE(1,7)
+/* server inet addr (addr:svc_port tuple) change event */
+#define EVENT_HDL_SUB_SERVER_INETADDR EVENT_HDL_SUB_TYPE(1,8)
+
+/* --------------------------------------- */
+
+/* Please reflect changes above in event_hdl_sub_type_map defined
+ * in event_hdl.c file
+ */
+struct event_hdl_sub_type_map {
+ const char *name;
+ struct event_hdl_sub_type type;
+};
+
+#endif /* _HAPROXY_EVENT_HDL_T_H */
diff --git a/include/haproxy/event_hdl.h b/include/haproxy/event_hdl.h
new file mode 100644
index 0000000..5a7ee66
--- /dev/null
+++ b/include/haproxy/event_hdl.h
@@ -0,0 +1,512 @@
+/*
+ * include/haproxy/event_hdl.h
+ * event handlers management
+ *
+ * Copyright 2022 HAProxy Technologies
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_EVENT_HDL_H
+# define _HAPROXY_EVENT_HDL_H
+
+#include <haproxy/event_hdl-t.h>
+#include <haproxy/list.h>
+
+/* preprocessor trick to extract function calling place
+ * __FILE__:__LINE__
+ */
+#define _EVENT_HDL_CALLING_PLACE2(line) #line
+#define _EVENT_HDL_CALLING_PLACE1(line) _EVENT_HDL_CALLING_PLACE2(line)
+#define _EVENT_HDL_CALLING_PLACE __FILE__":"_EVENT_HDL_CALLING_PLACE1(__LINE__)
+
+/* ------ PUBLIC EVENT_HDL API ------ */
+
+/* You will find a lot of useful information/comments in this file, but if you're looking
+ * for a step by step documentation please check out 'doc/internals/api/event_hdl.txt'
+ */
+
+/* Note: API helper macros are used in this file to make event_hdl functions usage
+ * simpler, safer and more consistent between sync mode and async mode
+ */
+
+/* ======================================= EVENT_HDL_SYNC handlers =====================================
+ * must be used only with extreme precautions
+ * sync handlers are directly called under the function that published the event.
+ * Hence, all the processing done within such function will impact the caller.
+ *
+ * For this reason, you must be extremely careful when using sync mode, because trying to lock something
+ * that is already held by the caller, or depending on something external to the current thread will
+ * prevent the caller from running.
+ *
+ * Please consider using async handlers in this case, they are specifically made to solve this limitation.
+ *
+ * On the other hand, sync handlers are really useful when you directly depend on callers' provided data
+ * (example: pointer to data) or you need to perform something before the caller keeps going.
+ * A good example could be a cleanup function that will take care of freeing data, closing fds... related
+ * to event data before caller's flow keeps going (interrupting the process while dealing with the event).
+ */
+
+
+/* ===================================== EVENT_HDL_ASYNC handlers ======================================
+ * async handlers are run in independent tasks, so that the caller (that published the event) can safely
+ * return to its own processing.
+ *
+ * async handlers may access safe event data safely with guaranteed consistency.
+ */
+
+
+/* ================================ IDENTIFIED vs ANONYMOUS EVENT_HDL =================================
+ * When registering a sync or async event handler, you are free to provide a unique identifier (hash).
+ *
+ * id can be computed using event_hdl_id function.
+ *
+ * Not providing an id results in the subscription being considered as anonymous subscription.
+ * 0 is not a valid identifier (should be > 0)
+ *
+ * Identified subscription is guaranteed to be unique for a given subscription list,
+ * whereas anonymous subscriptions don't provide such guarantees.
+ *
+ * Identified subscriptions provide the ability to be later queried or unregistered from external code
+ * using dedicated id/hash for the lookups.
+ *
+ * On the other hand, anonymous subscriptions don't, the only other way to reference an anonymous subscription
+ * is to use a subscription pointer.
+ *
+ */
+
+/* general purpose hashing function when you want to compute
+ * an ID based on <scope> x <name>
+ * It is your responsibility to make sure <scope> is not used
+ * elsewhere in the code (or that you are fine with sharing
+ * the scope).
+ */
+uint64_t event_hdl_id(const char *scope, const char *name);
+
+/* ------ EVENT SUBSCRIPTIONS FUNCTIONS ------ */
+
+/* macro helper:
+ * sync version
+ *
+ * identified subscription
+ *
+ * <_id>: subscription id that could be used later
+ * to perform subscription lookup by id
+ * <func>: pointer to 'event_hdl_cb_sync' prototyped function
+ * <_private>: pointer to private data that will be handled to <func>
+ * <_private_free>: pointer to 'event_hdl_private_free' prototyped function
+ * that will be called with <private> when unsubscription is performed
+ */
+#define EVENT_HDL_ID_SYNC(_id, func, _private, _private_free) \
+ (struct event_hdl){ .id = _id, \
+ .dorigin = _EVENT_HDL_CALLING_PLACE, \
+ .async = 0, \
+ .sync_ptr = func, \
+ .private = _private, \
+ .private_free = _private_free }
+
+/* macro helper:
+ * sync version
+ *
+ * anonymous subscription (no lookup by id)
+ *
+ * <func>: pointer to 'event_hdl_cb_sync' prototyped function
+ * <_private>: pointer to private data that will be handled to <func>
+ * <_private_free>: pointer to 'event_hdl_private_free' prototyped function
+ * that will be called with <private> when unsubscription is performed
+ */
+#define EVENT_HDL_SYNC(func, _private, _private_free) \
+ EVENT_HDL_ID_SYNC(0, func, _private, _private_free)
+
+/* macro helper:
+ * async version
+ *
+ * identified subscription
+ *
+ * <_id>: subscription id that could be used later
+ * to perform subscription lookup by id
+ * <func>: pointer to 'event_hdl_cb_sync' prototyped function
+ * <_private>: pointer to private data that will be handled to <func>
+ * <_private_free>: pointer to 'event_hdl_private_free' prototyped function
+ * that will be called with <private> after unsubscription is performed,
+ * when no more events can refer to <private>.
+ */
+#define EVENT_HDL_ID_ASYNC(_id, func, _private, _private_free) \
+ (struct event_hdl){ .id = _id, \
+ .dorigin = _EVENT_HDL_CALLING_PLACE, \
+ .async = EVENT_HDL_ASYNC_MODE_NORMAL, \
+ .async_ptr = func, \
+ .private = _private, \
+ .private_free = _private_free }
+
+/* macro helper:
+ * async version
+ *
+ * anonymous subscription (no lookup by id)
+ *
+ * <func>: pointer to 'event_hdl_cb_sync' prototyped function
+ * <_private>: pointer to private data that will be handled to <func>
+ * <_private_free>: pointer to 'event_hdl_private_free' prototyped function
+ * that will be called with <private> after unsubscription is performed,
+ * when no more events can refer to <private>.
+ */
+#define EVENT_HDL_ASYNC(func, _private, _private_free) \
+ EVENT_HDL_ID_ASYNC(0, func, _private, _private_free)
+
+/* macro helper:
+ * async version
+ * same than EVENT_HDL_ID_ASYNC - advanced mode:
+ * you directly provide task and event_queue list.
+ *
+ * identified subscription
+ *
+ * <_id>: subscription id that could be used later
+ * to perform subscription lookup by id
+ * <equeue>: pointer to event_hdl_async_event queue where the pending
+ * events will be pushed. Cannot be NULL.
+ * <task>: pointer to task(let) responsible for consuming the events.
+* Cannot be NULL.
+ * <_private>: pointer to private data that will be handled to <func>
+ * <_private_free>: pointer to 'event_hdl_private_free' prototyped function
+ * that will be called with <private> after unsubscription is performed,
+ * when no more events can refer to <private>.
+ */
+#define EVENT_HDL_ID_ASYNC_TASK(_id, equeue, task, _private, _private_free) \
+ (struct event_hdl){ .id = _id, \
+ .dorigin = _EVENT_HDL_CALLING_PLACE, \
+ .async = EVENT_HDL_ASYNC_MODE_ADVANCED, \
+ .async_task = (struct tasklet *)task, \
+ .async_equeue = equeue, \
+ .private = _private, \
+ .private_free = _private_free }
+
+/* macro helper:
+ * async version
+ * same than EVENT_HDL_ASYNC - advanced mode:
+ * you directly provide task and event_queue list.
+ *
+ * anonymous subscription (no lookup by id)
+ *
+ * <equeue>: pointer to event_hdl_async_event queue where the pending
+ * events will be pushed. Cannot be NULL.
+ * <task>: pointer to task(let) responsible for consuming the events
+ * Cannot be NULL.
+ * <_private>: pointer to private data that will be handled to <func>
+ * <_private_free>: pointer to 'event_hdl_private_free' prototyped function
+ * that will be called with <private> after unsubscription is performed,
+ * when no more events can refer to <private>.
+ */
+#define EVENT_HDL_ASYNC_TASK(equeue, task, _private, _private_free) \
+ EVENT_HDL_ID_ASYNC_TASK(0, equeue, task, _private, _private_free)
+
+/* register a new event subscription in <sub_list>
+ * that will handle <e_type> events
+ *
+ * This function requires you to use
+ * EVENT_HDL_(TASK_)(A)SYNC() EVENT_HDL_ID_(TASK_)(A)SYNC() (choose wisely)
+ * macro helpers to provide <hdl> argument
+ *
+ * If <sub_list> is not specified (equals NULL):
+ * global subscription list (process wide) will be used.
+ *
+ * For identified subscriptions (EVENT_HDL_ID_*), the function is safe against
+ * concurrent subscriptions attempts with the same ID: the ID will only be
+ * inserted once in the list and subsequent attempts will yield an error.
+ * However, trying to register the same ID multiple times is considered as
+ * an error (no specific error code is returned in this case) so the check should
+ * be performed by the caller if it is expected. (The caller must ensure that the ID
+ * is unique to prevent the error from being raised)
+ *
+ * Returns 1 in case of success, 0 in case of failure (invalid argument / memory error)
+ */
+int event_hdl_subscribe(event_hdl_sub_list *sub_list,
+ struct event_hdl_sub_type e_type, struct event_hdl hdl);
+
+/* same as event_hdl_subscribe, but
+ * returns the subscription ptr in case of success
+ * or NULL in case of failure
+ * subscription refcount is automatically incremented by 1
+ * so that ptr remains valid while you use it.
+ * You must call event_hdl_drop() when you no longer
+ * use it or event_hdl_unsubscribe() to unregister the
+ * subscription
+ */
+struct event_hdl_sub *event_hdl_subscribe_ptr(event_hdl_sub_list *sub_list,
+ struct event_hdl_sub_type e_type, struct event_hdl hdl);
+
+/* update subscription type:
+ * if new type family does not match current family, does nothing
+ * only subtype update is supported
+ * Returns 1 for SUCCESS and 0 for FAILURE (update not supported)
+ */
+int event_hdl_resubscribe(struct event_hdl_sub *cur_sub, struct event_hdl_sub_type type);
+
+/* unregister an existing subscription <sub>
+ * will automatically call event_hdl_drop()
+ */
+void event_hdl_unsubscribe(struct event_hdl_sub *sub);
+
+/* decrease subscription refcount by 1
+ * use this when you no longer use sub ptr
+ * provided by event_hdl_subscribe_ptr or
+ * to cancel previous event_hdl_take()
+ */
+void event_hdl_drop(struct event_hdl_sub *sub);
+
+/* increase subscription refcount by 1
+ * event_hdl_drop is needed when ptr
+ * is not longer used
+ * or event_hdl_unsubscribe to end the subscription
+ */
+void event_hdl_take(struct event_hdl_sub *sub);
+
+/* ------ EVENT_HDL_LOOKUP: subscription lookup operations from external code ------ */
+
+/* use this function to unregister the subscription <lookup_ip>
+ * within <sub_list> list.
+ * If <sub_list> is NULL, global subscription list will be used.
+ * Returns 1 for SUCCESS and 0 if not found
+ */
+int event_hdl_lookup_unsubscribe(event_hdl_sub_list *sub_list,
+ uint64_t lookup_id);
+
+/* use this function to update subscription by <lookup_id> within <sub_list> list
+ * if new type family does not match current family, does nothing
+ * only subtype update is supported
+ * If <sub_list> is NULL, global subscription list will be used.
+ * Returns 1 for SUCCESS and 0 if not found or not supported
+ */
+int event_hdl_lookup_resubscribe(event_hdl_sub_list *sub_list,
+ uint64_t lookup_id, struct event_hdl_sub_type type);
+
+/* use this function to get a new reference ptr to the subscription
+ * identified by <id>
+ * or event_hdl_unsubscribe to end the subscription
+ * If <sub_list> is NULL, global subscription list will be used.
+ * returns NULL if not found
+ * returned ptr should be called with event_hdl_drop when no longer used
+ */
+struct event_hdl_sub *event_hdl_lookup_take(event_hdl_sub_list *sub_list,
+ uint64_t lookup_id);
+
+/* pause an existing subscription <sub>
+ * the subscription will no longer receive events (reversible)
+ * This can be reverted thanks to _resume() function
+ */
+void event_hdl_pause(struct event_hdl_sub *sub);
+
+/* resume an existing subscription <sub>
+ * that was previously paused using _pause() function
+ */
+void event_hdl_resume(struct event_hdl_sub *sub);
+
+/* Same as event_hdl_pause() for identified subscriptions:
+ * use this function to pause the subscription <lookup_ip>
+ * within <sub_list> list.
+ * If <sub_list> is NULL, global subscription list will be used.
+ * Returns 1 for SUCCESS and 0 if not found
+ */
+int event_hdl_lookup_pause(event_hdl_sub_list *sub_list,
+ uint64_t lookup_id);
+
+/* Same as event_hdl_resume() for identified subscriptions:
+ * use this function to resume the subscription <lookup_ip>
+ * within <sub_list> list.
+ * If <sub_list> is NULL, global subscription list will be used.
+ * Returns 1 for SUCCESS and 0 if not found
+ */
+int event_hdl_lookup_resume(event_hdl_sub_list *sub_list,
+ uint64_t lookup_id);
+
+/* ------ PUBLISHING FUNCTIONS ------ */
+
+/* this macro is provided as an internal helper to automatically populate
+ * data for fixed length structs as required by event_hdl publish function
+ */
+#define _EVENT_HDL_CB_DATA_ASSERT(size) \
+ ({ \
+ /* if this fails to compile \
+ * it means you need to fix \
+ * EVENT_HDL_ASYNC_EVENT_DATA \
+ * size in event_hdl-t.h \
+ */ \
+ __attribute__((unused)) \
+ char __static_assert[(size <= EVENT_HDL_ASYNC_EVENT_DATA) ? 1 : -1];\
+ (size); \
+ })
+#define _EVENT_HDL_CB_DATA(data,size,mfree) \
+ (&(struct event_hdl_cb_data){ ._ptr = data, \
+ ._size = size, \
+ ._mfree = mfree })
+
+/* Use this when 'safe' data is completely standalone */
+#define EVENT_HDL_CB_DATA(data) \
+ _EVENT_HDL_CB_DATA(data, \
+ _EVENT_HDL_CB_DATA_ASSERT(sizeof(*data)), \
+ NULL)
+/* Use this when 'safe' data points to dynamically allocated members
+ * that require freeing when the event is completely consumed
+ * (data in itself may be statically allocated as with
+ * EVENT_HDL_CB_DATA since the publish function will take
+ * care of copying it for async handlers)
+ *
+ * mfree function will be called with data as argument
+ * (or copy of data in async context) when the event is completely
+ * consumed (sync and async handlers included). This will give you
+ * enough context to perform the required cleanup steps.
+ *
+ * mfree should be prototyped like this:
+ * void (*mfree)(const void *data)
+ */
+#define EVENT_HDL_CB_DATA_DM(data, mfree) \
+ _EVENT_HDL_CB_DATA(data, \
+ _EVENT_HDL_CB_DATA_ASSERT(sizeof(*data)), \
+ mfree)
+
+/* event publishing function
+ * this function should be called from anywhere in the code to notify
+ * about an <e_type> and provide some relevant <data>
+ * that will be provided to subscriptions in <sub_list>
+ * that are subscribed to <e_type>.
+ * <data> should be provided using EVENT_HDL_CB_DATA helper macro
+ *
+ * Example:
+ * struct event_hdl_cb_data_server cb_data;
+ *
+ * /...
+ * cb_data initialization
+ * .../
+ *
+ * event_hdl_publish(NULL, EVENT_HDL_SUB_SERVER_UP, EVENT_HDL_CB_DATA(&cb_data));
+ */
+int event_hdl_publish(event_hdl_sub_list *sub_list,
+ struct event_hdl_sub_type e_type, const struct event_hdl_cb_data *data);
+
+/* ------ MISC/HELPER FUNCTIONS ------ */
+
+/* returns a statically allocated string that is
+ * the printable representation of <sub_type>
+ * or "N/A" if <sub_type> does not exist
+ */
+const char *event_hdl_sub_type_to_string(struct event_hdl_sub_type sub_type);
+
+/* returns the internal sub_type corresponding
+ * to the printable representation <name>
+ * or EVENT_HDL_SUB_NONE if no such event exists
+ * (see event_hdl-t.h for the complete list of supported types)
+ */
+struct event_hdl_sub_type event_hdl_string_to_sub_type(const char *name);
+
+/* Use this from sync hdl to ensure the function is executed
+ * in sync mode (and thus unsafe data is safe to use from this ctx)
+ * This macro is meant to prevent unsafe data access
+ * if code from sync function is copy pasted into
+ * async function (or if sync handler is changed
+ * to async handler without adapting the code)
+ * FIXME: do we BUG_ON, or simply warn and return from the function?
+ */
+#define EVENT_HDL_ASSERT_SYNC(cb) BUG_ON(!cb->_sync)
+
+/* check if a and b sub types are part of the same family */
+static inline int event_hdl_sub_family_equal(struct event_hdl_sub_type a, struct event_hdl_sub_type b)
+{
+ return (a.family == b.family);
+}
+
+/* compares 2 event_hdl_sub_type structs
+ * returns 1 if equal, 0 if not equal
+ */
+static inline int event_hdl_sub_type_equal(struct event_hdl_sub_type a, struct event_hdl_sub_type b)
+{
+ return (a.family == b.family && a.subtype == b.subtype);
+}
+
+/* performs subtraction between A and B event_hdl_sub_type
+ */
+static inline struct event_hdl_sub_type event_hdl_sub_type_del(struct event_hdl_sub_type a, struct event_hdl_sub_type b)
+{
+ if (unlikely(!a.family))
+ a.family = b.family;
+ if (unlikely(a.family != b.family))
+ return a;
+ a.subtype &= ~b.subtype;
+
+ return a;
+}
+
+/* performs addition between A and B event_hdl_sub_type
+ */
+static inline struct event_hdl_sub_type event_hdl_sub_type_add(struct event_hdl_sub_type a, struct event_hdl_sub_type b)
+{
+ if (unlikely(!a.family))
+ a.family = b.family;
+ if (unlikely(a.family != b.family))
+ return a;
+ a.subtype |= b.subtype;
+
+ return a;
+}
+
+/* use this function when you consumed an event in async handler
+ * (this will free the event so you must ensure that the event
+ * is already removed from the event queue and that you
+ * no longer make use of it)
+ */
+void event_hdl_async_free_event(struct event_hdl_async_event *e);
+
+/* use this for advanced async mode to initialize event queue */
+static inline void event_hdl_async_equeue_init(event_hdl_async_equeue *queue)
+{
+ MT_LIST_INIT(&queue->head);
+ queue->size = 0;
+}
+
+/* use this for advanced async mode to pop an event from event queue */
+static inline struct event_hdl_async_event *event_hdl_async_equeue_pop(event_hdl_async_equeue *queue)
+{
+ struct event_hdl_async_event *event;
+
+ event = MT_LIST_POP(&queue->head, struct event_hdl_async_event *, mt_list);
+ if (event)
+ HA_ATOMIC_DEC(&queue->size);
+ return event;
+}
+
+/* use this for advanced async mode to check if the event queue is empty */
+static inline int event_hdl_async_equeue_isempty(event_hdl_async_equeue *queue)
+{
+ return MT_LIST_ISEMPTY(&queue->head);
+}
+
+/* use this for advanced async mode to check if the event queue size */
+static inline uint32_t event_hdl_async_equeue_size(event_hdl_async_equeue *queue)
+{
+ return HA_ATOMIC_LOAD(&queue->size);
+}
+
+/* use this to initialize <sub_list> event subscription list */
+void event_hdl_sub_list_init(event_hdl_sub_list *sub_list);
+
+/* use this function when you need to destroy <sub_list>
+ * event subscription list
+ * All subscriptions will be removed and properly freed according
+ * to their types
+ */
+void event_hdl_sub_list_destroy(event_hdl_sub_list *sub_list);
+
+/* event_hdl tunables */
+extern struct event_hdl_tune event_hdl_tune;
+
+#endif /* _HAPROXY_EVENT_HDL_H */
diff --git a/include/haproxy/extcheck.h b/include/haproxy/extcheck.h
new file mode 100644
index 0000000..233d7c5
--- /dev/null
+++ b/include/haproxy/extcheck.h
@@ -0,0 +1,49 @@
+/*
+ * include/haproxy/extchecks.h
+ * Functions prototypes for the external checks.
+ *
+ * Copyright 2000-2009,2020 Willy Tarreau <w@1wt.eu>
+ * Copyright 2014 Horms Solutions Ltd, Simon Horman <horms@verge.net.au>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_EXTCHECK_H
+#define _HAPROXY_EXTCHECK_H
+
+#include <haproxy/check-t.h>
+#include <haproxy/proxy-t.h>
+#include <haproxy/task-t.h>
+
+struct task *process_chk_proc(struct task *t, void *context, unsigned int state);
+int prepare_external_check(struct check *check);
+int init_pid_list(void);
+
+int proxy_parse_extcheck(char **args, int section, struct proxy *curpx,
+ struct proxy *defpx, const char *file, int line,
+ char **errmsg);
+
+int proxy_parse_external_check_opt(char **args, int cur_arg, struct proxy *curpx, const struct proxy *defpx,
+ const char *file, int line);
+
+
+#endif /* _HAPROXY_EXTCHECK_H */
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/include/haproxy/fcgi-app-t.h b/include/haproxy/fcgi-app-t.h
new file mode 100644
index 0000000..fb6ab27
--- /dev/null
+++ b/include/haproxy/fcgi-app-t.h
@@ -0,0 +1,123 @@
+/*
+ * include/haproxy/fcgi-app-t.h
+ * This file defines everything related to FCGI applications.
+ *
+ * Copyright (C) 2019 HAProxy Technologies, Christopher Faulet <cfaulet@haproxy.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_HTTP_FCGI_T_H
+#define _HAPROXY_HTTP_FCGI_T_H
+
+#include <import/ebtree-t.h>
+#include <import/ist.h>
+
+#include <haproxy/acl-t.h>
+#include <haproxy/api-t.h>
+#include <haproxy/arg-t.h>
+#include <haproxy/fcgi.h>
+#include <haproxy/filters-t.h>
+#include <haproxy/regex-t.h>
+
+#define FCGI_APP_FL_KEEP_CONN 0x00000001 /* Keep the connection alive */
+#define FCGI_APP_FL_GET_VALUES 0x00000002 /* Retrieve FCGI variables on connection establishment */
+#define FCGI_APP_FL_MPXS_CONNS 0x00000004 /* FCGI APP supports connection multiplexing */
+
+
+enum fcgi_rule_type {
+ FCGI_RULE_SET_PARAM = 0,
+ FCGI_RULE_UNSET_PARAM,
+ FCGI_RULE_PASS_HDR,
+ FCGI_RULE_HIDE_HDR,
+};
+
+/* Used during configuration parsing only and converted into fcgi_rule when
+ * filter is created.
+ */
+struct fcgi_rule_conf {
+ enum fcgi_rule_type type;
+ char *name;
+ char *value;
+ struct acl_cond *cond; /* acl condition to set/unset the param */
+ struct list list;
+};
+
+/* parameter rule evaluated during request analyzis */
+struct fcgi_rule {
+ enum fcgi_rule_type type;
+ struct ist name; /* name of the parameter/header */
+ struct list value; /* log-format compatible expression, may be empty */
+ struct acl_cond *cond; /* acl condition to set the param */
+ struct list list;
+};
+
+/* parameter rule to set/unset a param at the end of the analyzis */
+struct fcgi_param_rule {
+ struct ist name;
+ struct list *value; /* if empty , unset the parameter */
+ struct ebpt_node node;
+};
+
+/* header rule to pass/hide a header at the end of the analyzis */
+struct fcgi_hdr_rule {
+ struct ist name;
+ int pass; /* 1 to pass the header, 0 Otherwise */
+ struct ebpt_node node;
+};
+
+struct fcgi_app {
+ char *name; /* name to identify this set of params */
+ struct ist docroot; /* FCGI docroot */
+ struct ist index; /* filename to append to URI ending by a '/' */
+ struct my_regex *pathinfo_re; /* Regex to use to split scriptname and path-info */
+ unsigned int flags; /* FCGI_APP_FL_* */
+ struct list loggers; /* one per 'log' directive */
+ unsigned int maxreqs; /* maximum number of concurrent requests */
+
+ struct list acls; /* list of acls declared for this application */
+
+ struct {
+ char *file; /* file where the section appears */
+ int line; /* line where the section appears */
+ struct list rules; /* list of rules used during config parsing */
+ struct arg_list args; /* sample arg list that need to be resolved */
+ } conf; /* config information */
+ struct fcgi_app *next; /* used to chain fcgi-app */
+};
+
+/* FCGI config attached to backend proxies */
+struct fcgi_flt_conf {
+ char *name; /* fcgi-app name used during config parsing */
+ struct fcgi_app *app; /* configuration of the fcgi application */
+
+ struct list param_rules; /* list of set/unset rules */
+ struct list hdr_rules; /* list of pass/add rules */
+};
+
+/* FCGI context attached to streames */
+struct fcgi_flt_ctx {
+ struct filter *filter;
+ struct fcgi_app *app;
+};
+
+#endif /* _HAPROXY_HTTP_FCGI_T_H */
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/include/haproxy/fcgi-app.h b/include/haproxy/fcgi-app.h
new file mode 100644
index 0000000..99f0d58
--- /dev/null
+++ b/include/haproxy/fcgi-app.h
@@ -0,0 +1,42 @@
+/*
+ * include/haproxy/fcgi-app.h
+ * This file defines function prototypes for FCGI applications.
+ *
+ * Copyright (C) 2019 HAProxy Technologies, Christopher Faulet <cfaulet@haproxy.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_HTTP_FCGI_H
+#define _HAPROXY_HTTP_FCGI_H
+
+#include <haproxy/fcgi-app-t.h>
+#include <haproxy/proxy-t.h>
+#include <haproxy/stream-t.h>
+
+struct fcgi_app *fcgi_app_find_by_name(const char *name);
+struct fcgi_flt_conf *find_px_fcgi_conf(struct proxy *px);
+struct fcgi_flt_ctx *find_strm_fcgi_ctx(struct stream *s);
+struct fcgi_app *get_px_fcgi_app(struct proxy *px);
+struct fcgi_app *get_strm_fcgi_app(struct stream *s);
+
+#endif /* _HAPROXY_HTTP_FCGI_H */
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/include/haproxy/fcgi.h b/include/haproxy/fcgi.h
new file mode 100644
index 0000000..e276d69
--- /dev/null
+++ b/include/haproxy/fcgi.h
@@ -0,0 +1,133 @@
+/*
+ * include/haproxy/fcgi.h
+ * This file contains FastCGI protocol definitions.
+ *
+ * Copyright (C) 2019 HAProxy Technologies, Christopher Faulet <cfaulet@haproxy.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_FCGI_H
+#define _HAPROXY_FCGI_H
+
+#include <import/ist.h>
+#include <haproxy/api.h>
+#include <haproxy/buf-t.h>
+
+/* FCGI protocol version */
+#define FCGI_VERSION 0x1
+
+/* flags for FCGI_BEGIN_REQUEST records */
+#define FCGI_KEEP_CONN 0x01
+
+/* FCGI record's type */
+enum fcgi_record_type {
+ FCGI_BEGIN_REQUEST = 1,
+ FCGI_ABORT_REQUEST = 2,
+ FCGI_END_REQUEST = 3,
+ FCGI_PARAMS = 4,
+ FCGI_STDIN = 5,
+ FCGI_STDOUT = 6,
+ FCGI_STDERR = 7,
+ FCGI_DATA = 8,
+ FCGI_GET_VALUES = 9,
+ FCGI_GET_VALUES_RESULT = 10,
+ FCGI_UNKNOWN_TYPE = 11,
+ FCGI_ENTRIES
+} __attribute__((packed));
+
+
+enum fcgi_role {
+ FCGI_RESPONDER = 1,
+ FCGI_AUTHORIZER = 2, /* Unsupported */
+ FCGI_FILTER = 3, /* Unsupported */
+} __attribute__((packed));
+
+/* Protocol status */
+enum fcgi_proto_status {
+ FCGI_PS_REQUEST_COMPLETE = 0,
+ FCGI_PS_CANT_MPX_CONN = 1,
+ FCGI_PS_OVERLOADED = 2,
+ FCGI_PS_UNKNOWN_ROLE = 3,
+ FCGI_PS_ENTRIES,
+} __attribute__((packed));
+
+struct fcgi_header {
+ uint8_t vsn;
+ uint8_t type;
+ uint16_t id;
+ uint16_t len;
+ uint8_t padding;
+ uint8_t rsv;
+};
+
+struct fcgi_param {
+ struct ist n;
+ struct ist v;
+};
+
+struct fcgi_begin_request {
+ enum fcgi_role role;
+ uint8_t flags;
+};
+
+struct fcgi_end_request {
+ uint32_t status;
+ uint8_t errcode;
+};
+
+struct fcgi_unknown_type {
+ uint8_t type;
+};
+
+
+static inline const char *fcgi_rt_str(int type)
+{
+ switch (type) {
+ case FCGI_BEGIN_REQUEST : return "BEGIN_REQUEST";
+ case FCGI_ABORT_REQUEST : return "ABORT_REQUEST";
+ case FCGI_END_REQUEST : return "END_REQUEST";
+ case FCGI_PARAMS : return "PARAMS";
+ case FCGI_STDIN : return "STDIN";
+ case FCGI_STDOUT : return "STDOUT";
+ case FCGI_STDERR : return "STDERR";
+ case FCGI_DATA : return "DATA";
+ case FCGI_GET_VALUES : return "GET_VALUES";
+ case FCGI_GET_VALUES_RESULT : return "GET_VALUES_RESULT";
+ case FCGI_UNKNOWN_TYPE : return "UNKNOWN_TYPE";
+ default : return "_UNKNOWN_";
+ }
+}
+
+
+int fcgi_encode_record_hdr(struct buffer *out, const struct fcgi_header *h);
+size_t fcgi_decode_record_hdr(const struct buffer *in, size_t o, struct fcgi_header *h);
+
+int fcgi_encode_begin_request(struct buffer *out, const struct fcgi_begin_request *r);
+
+int fcgi_encode_param(struct buffer *out, const struct fcgi_param *p);
+size_t fcgi_decode_param(const struct buffer *in, size_t o, struct fcgi_param *p);
+size_t fcgi_aligned_decode_param(const struct buffer *in, size_t o, struct fcgi_param *p);
+
+size_t fcgi_decode_end_request(const struct buffer *in, size_t o, struct fcgi_end_request *r);
+
+#endif /* _HAPROXY_FCGI_H */
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/include/haproxy/fd-t.h b/include/haproxy/fd-t.h
new file mode 100644
index 0000000..c5e94cb
--- /dev/null
+++ b/include/haproxy/fd-t.h
@@ -0,0 +1,251 @@
+/*
+ * include/haproxy/fd-t.h
+ * File descriptors states - check src/fd.c for explanations.
+ *
+ * Copyright (C) 2000-2014 Willy Tarreau - w@1wt.eu
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_FD_T_H
+#define _HAPROXY_FD_T_H
+
+#include <haproxy/api-t.h>
+#include <haproxy/port_range-t.h>
+#include <haproxy/show_flags-t.h>
+
+/* Direction for each FD event update */
+enum {
+ DIR_RD=0,
+ DIR_WR=1,
+};
+
+
+/* fdtab[].state is a composite state describing what is known about the FD.
+ * For now, the following information are stored in it:
+ * - event configuration and status for each direction (R,W) split into
+ * active, ready, shutdown categories (FD_EV_*). These are known by their
+ * bit values as well so that test-and-set bit operations are possible.
+ *
+ * - last known polling status (FD_POLL_*). For ease of troubleshooting,
+ * avoid visually mixing these ones with the other ones above. 3 of these
+ * flags are updated on each poll() report (FD_POLL_IN, FD_POLL_OUT,
+ * FD_POLL_PRI). FD_POLL_HUP and FD_POLL_ERR are "sticky" in that once they
+ * are reported, they will not be cleared until the FD is closed.
+ */
+
+/* bits positions for a few flags */
+#define FD_EV_ACTIVE_R_BIT 0
+#define FD_EV_READY_R_BIT 1
+#define FD_EV_SHUT_R_BIT 2
+/* unused: 3 */
+
+#define FD_EV_ACTIVE_W_BIT 4
+#define FD_EV_READY_W_BIT 5
+#define FD_EV_SHUT_W_BIT 6
+#define FD_EV_ERR_RW_BIT 7
+
+#define FD_POLL_IN_BIT 8
+#define FD_POLL_PRI_BIT 9
+#define FD_POLL_OUT_BIT 10
+#define FD_POLL_ERR_BIT 11
+#define FD_POLL_HUP_BIT 12
+
+/* info/config bits */
+#define FD_LINGER_RISK_BIT 16 /* must kill lingering before closing */
+#define FD_CLONED_BIT 17 /* cloned socket, requires EPOLL_CTL_DEL on close */
+#define FD_INITIALIZED_BIT 18 /* init phase was done (e.g. output pipe set non-blocking) */
+#define FD_ET_POSSIBLE_BIT 19 /* edge-triggered is possible on this FD */
+#define FD_EXPORTED_BIT 20 /* FD is exported and must not be closed */
+#define FD_EXCL_SYSCALL_BIT 21 /* a syscall claims exclusivity on this FD */
+#define FD_DISOWN_BIT 22 /* this fd will be closed by some external code */
+#define FD_MUST_CLOSE_BIT 23 /* this fd will be closed by some external code */
+
+
+/* and flag values */
+#define FD_EV_ACTIVE_R (1U << FD_EV_ACTIVE_R_BIT)
+#define FD_EV_ACTIVE_W (1U << FD_EV_ACTIVE_W_BIT)
+#define FD_EV_ACTIVE_RW (FD_EV_ACTIVE_R | FD_EV_ACTIVE_W)
+
+#define FD_EV_READY_R (1U << FD_EV_READY_R_BIT)
+#define FD_EV_READY_W (1U << FD_EV_READY_W_BIT)
+#define FD_EV_READY_RW (FD_EV_READY_R | FD_EV_READY_W)
+
+/* note that when FD_EV_SHUT is set, ACTIVE and READY are cleared */
+#define FD_EV_SHUT_R (1U << FD_EV_SHUT_R_BIT)
+#define FD_EV_SHUT_W (1U << FD_EV_SHUT_W_BIT)
+#define FD_EV_SHUT_RW (FD_EV_SHUT_R | FD_EV_SHUT_W)
+
+/* note that when FD_EV_ERR is set, SHUT is also set. Also, ERR is for both
+ * directions at once (write error, socket dead, etc).
+ */
+#define FD_EV_ERR_RW (1U << FD_EV_ERR_RW_BIT)
+
+/* mask covering all use cases above */
+#define FD_EV_ANY (FD_EV_ACTIVE_RW | FD_EV_READY_RW | FD_EV_SHUT_RW | FD_EV_ERR_RW)
+
+/* polling status */
+#define FD_POLL_IN (1U << FD_POLL_IN_BIT)
+#define FD_POLL_PRI (1U << FD_POLL_PRI_BIT)
+#define FD_POLL_OUT (1U << FD_POLL_OUT_BIT)
+#define FD_POLL_ERR (1U << FD_POLL_ERR_BIT)
+#define FD_POLL_HUP (1U << FD_POLL_HUP_BIT)
+#define FD_POLL_UPDT_MASK (FD_POLL_IN | FD_POLL_PRI | FD_POLL_OUT)
+#define FD_POLL_ANY_MASK (FD_POLL_IN | FD_POLL_PRI | FD_POLL_OUT | FD_POLL_ERR | FD_POLL_HUP)
+
+/* information/configuration flags */
+#define FD_LINGER_RISK (1U << FD_LINGER_RISK_BIT)
+#define FD_CLONED (1U << FD_CLONED_BIT)
+#define FD_INITIALIZED (1U << FD_INITIALIZED_BIT)
+#define FD_ET_POSSIBLE (1U << FD_ET_POSSIBLE_BIT)
+#define FD_EXPORTED (1U << FD_EXPORTED_BIT)
+#define FD_EXCL_SYSCALL (1U << FD_EXCL_SYSCALL_BIT)
+#define FD_DISOWN (1U << FD_DISOWN_BIT)
+#define FD_MUST_CLOSE (1U << FD_MUST_CLOSE_BIT)
+
+/* This function is used to report flags in debugging tools. Please reflect
+ * below any single-bit flag addition above in the same order via the
+ * __APPEND_FLAG macro. The new end of the buffer is returned.
+ */
+static forceinline char *fd_show_flags(char *buf, size_t len, const char *delim, uint flg)
+{
+#define _(f, ...) __APPEND_FLAG(buf, len, delim, flg, f, #f, __VA_ARGS__)
+ /* prologue */
+ _(0);
+ /* flags */
+ _(FD_EV_ACTIVE_R, _(FD_EV_ACTIVE_W, _(FD_EV_READY_R, _(FD_EV_READY_W,
+ _(FD_EV_SHUT_R, _(FD_EV_SHUT_W, _(FD_EV_ERR_RW, _(FD_POLL_IN,
+ _(FD_POLL_PRI, _(FD_POLL_OUT, _(FD_POLL_ERR, _(FD_POLL_HUP,
+ _(FD_LINGER_RISK, _(FD_CLONED, _(FD_INITIALIZED, _(FD_ET_POSSIBLE,
+ _(FD_EXPORTED, _(FD_EXCL_SYSCALL, _(FD_DISOWN)))))))))))))))))));
+ /* epilogue */
+ _(~0U);
+ return buf;
+#undef _
+}
+
+/* FD update status after fd_update_events() */
+enum {
+ FD_UPDT_DONE = 0, // update done, nothing else to be done
+ FD_UPDT_CLOSED, // FD was closed
+ FD_UPDT_MIGRATED, // FD was migrated, ignore it now
+};
+
+/* This is the value used to mark a file descriptor as dead. This value is
+ * negative, this is important so that tests on fd < 0 properly match. It
+ * also has the nice property of being highly negative but neither overflowing
+ * nor changing sign on 32-bit machines when multiplied by sizeof(fdtab).
+ * This ensures that any unexpected dereference of such an uninitialized
+ * file descriptor will lead to so large a dereference that it will crash
+ * the process at the exact location of the bug with a clean stack trace
+ * instead of causing silent manipulation of other FDs. And it's readable
+ * when found in a dump.
+ */
+#define DEAD_FD_MAGIC 0xFDDEADFD
+
+/* fdlist_entry: entry used by the fd cache.
+ * >= 0 means we're in the cache and gives the FD of the next in the cache,
+ * -1 means we're in the cache and the last element,
+ * -2 means the entry is locked,
+ * <= -3 means not in the cache, and next element is -4-fd
+ *
+ * It must remain 8-aligned so that aligned CAS operations may be done on both
+ * entries at once.
+ */
+struct fdlist_entry {
+ int next;
+ int prev;
+} ALIGNED(8);
+
+/* head of the fd cache, per-group */
+struct fdlist {
+ int first;
+ int last;
+} ALIGNED(64);
+
+/* info about one given fd. Note: only align on cache lines when using threads;
+ * 32-bit small archs can put everything in 32-bytes when threads are disabled.
+ * refc_tgid is an atomic 32-bit composite value made of 16 higher bits
+ * containing a refcount on tgid and the running_mask, and 16 lower bits
+ * containing a thread group ID and a lock bit on the 16th. The tgid may only
+ * be changed when refc is zero and running may only be checked/changed when
+ * refc is held and shows the reader is alone. An FD with tgid zero belongs to
+ * nobody.
+ */
+struct fdtab {
+ unsigned long running_mask; /* mask of thread IDs currently using the fd */
+ unsigned long thread_mask; /* mask of thread IDs authorized to process the fd */
+ unsigned long update_mask; /* mask of thread IDs having an update for fd */
+ struct fdlist_entry update; /* Entry in the global update list */
+ void (*iocb)(int fd); /* I/O handler */
+ void *owner; /* the connection or listener associated with this fd, NULL if closed */
+ unsigned int state; /* FD state for read and write directions (FD_EV_*) + FD_POLL_* */
+ unsigned int refc_tgid; /* refcounted tgid, updated atomically */
+#ifdef DEBUG_FD
+ unsigned int event_count; /* number of events reported */
+#endif
+} THREAD_ALIGNED(64);
+
+/* polled mask, one bit per thread and per direction for each FD */
+struct polled_mask {
+ unsigned long poll_recv;
+ unsigned long poll_send;
+};
+
+/* less often used information */
+struct fdinfo {
+ struct port_range *port_range; /* optional port range to bind to */
+ int local_port; /* optional local port */
+};
+
+/*
+ * Poller descriptors.
+ * - <name> is initialized by the poller's register() function, and should not
+ * be allocated, just linked to.
+ * - <pref> is initialized by the poller's register() function. It is set to 0
+ * by default, meaning the poller is disabled. init() should set it to 0 in
+ * case of failure. term() must set it to 0. A generic unoptimized select()
+ * poller should set it to 100.
+ * - <private> is initialized by the poller's init() function, and cleaned by
+ * the term() function.
+ * - clo() should be used to do indicate the poller that fd will be closed.
+ * - poll() calls the poller, expiring at <exp>, or immediately if <wake> is set
+ * - flags indicate what the poller supports (HAP_POLL_F_*)
+ */
+
+#define HAP_POLL_F_RDHUP 0x00000001 /* the poller notifies of HUP with reads */
+#define HAP_POLL_F_ERRHUP 0x00000002 /* the poller reports ERR and HUP */
+
+struct poller {
+ void *private; /* any private data for the poller */
+ void (*clo)(const int fd); /* mark <fd> as closed */
+ void (*poll)(struct poller *p, int exp, int wake); /* the poller itself */
+ int (*init)(struct poller *p); /* poller initialization */
+ void (*term)(struct poller *p); /* termination of this poller */
+ int (*test)(struct poller *p); /* pre-init check of the poller */
+ int (*fork)(struct poller *p); /* post-fork re-opening */
+ const char *name; /* poller name */
+ unsigned int flags; /* HAP_POLL_F_* */
+ int pref; /* try pollers with higher preference first */
+};
+
+#endif /* _HAPROXY_FD_T_H */
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/include/haproxy/fd.h b/include/haproxy/fd.h
new file mode 100644
index 0000000..11212ff
--- /dev/null
+++ b/include/haproxy/fd.h
@@ -0,0 +1,542 @@
+/*
+ * include/haproxy/fd.h
+ * File descriptors states - exported variables and functions
+ *
+ * Copyright (C) 2000-2020 Willy Tarreau - w@1wt.eu
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_FD_H
+#define _HAPROXY_FD_H
+
+#include <sys/time.h>
+#include <sys/types.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <import/ist.h>
+#include <haproxy/api.h>
+#include <haproxy/atomic.h>
+#include <haproxy/fd-t.h>
+#include <haproxy/global.h>
+#include <haproxy/thread.h>
+
+/* public variables */
+
+extern struct poller cur_poller; /* the current poller */
+extern int nbpollers;
+extern struct poller pollers[MAX_POLLERS]; /* all registered pollers */
+extern struct fdtab *fdtab; /* array of all the file descriptors */
+extern struct fdinfo *fdinfo; /* less-often used infos for file descriptors */
+extern int totalconn; /* total # of terminated sessions */
+extern int actconn; /* # of active sessions */
+
+extern volatile struct fdlist update_list[MAX_TGROUPS];
+extern struct polled_mask *polled_mask;
+
+extern THREAD_LOCAL int *fd_updt; // FD updates list
+extern THREAD_LOCAL int fd_nbupdt; // number of updates in the list
+
+extern int poller_wr_pipe[MAX_THREADS];
+
+extern volatile int ha_used_fds; // Number of FDs we're currently using
+
+/* Deletes an FD from the fdsets.
+ * The file descriptor is also closed.
+ */
+void fd_delete(int fd);
+void _fd_delete_orphan(int fd);
+
+/* makes the new fd non-blocking and clears all other O_* flags;
+ * this is meant to be used on new FDs. Returns -1 on failure.
+ */
+int fd_set_nonblock(int fd);
+
+/* makes the fd close-on-exec; returns -1 on failure. */
+int fd_set_cloexec(int fd);
+
+/* Migrate a FD to a new thread <new_tid>. */
+void fd_migrate_on(int fd, uint new_tid);
+
+/*
+ * Take over a FD belonging to another thread.
+ * Returns 0 on success, and -1 on failure.
+ */
+int fd_takeover(int fd, void *expected_owner);
+
+ssize_t fd_write_frag_line(int fd, size_t maxlen, const struct ist pfx[], size_t npfx, const struct ist msg[], size_t nmsg, int nl);
+
+/* close all FDs starting from <start> */
+void my_closefrom(int start);
+
+struct rlimit;
+int raise_rlim_nofile(struct rlimit *old_limit, struct rlimit *new_limit);
+
+int compute_poll_timeout(int next);
+void fd_leaving_poll(int wait_time, int status);
+
+/* disable the specified poller */
+void disable_poller(const char *poller_name);
+
+void poller_pipe_io_handler(int fd);
+
+/*
+ * Initialize the pollers till the best one is found.
+ * If none works, returns 0, otherwise 1.
+ * The pollers register themselves just before main() is called.
+ */
+int init_pollers(void);
+
+/*
+ * Deinitialize the pollers.
+ */
+void deinit_pollers(void);
+
+/*
+ * Some pollers may lose their connection after a fork(). It may be necessary
+ * to create initialize part of them again. Returns 0 in case of failure,
+ * otherwise 1. The fork() function may be NULL if unused. In case of error,
+ * the the current poller is destroyed and the caller is responsible for trying
+ * another one by calling init_pollers() again.
+ */
+int fork_poller(void);
+
+/*
+ * Lists the known pollers on <out>.
+ * Should be performed only before initialization.
+ */
+int list_pollers(FILE *out);
+
+/*
+ * Runs the polling loop
+ */
+void run_poller();
+
+void fd_add_to_fd_list(volatile struct fdlist *list, int fd);
+void fd_rm_from_fd_list(volatile struct fdlist *list, int fd);
+void updt_fd_polling(const int fd);
+int fd_update_events(int fd, uint evts);
+void fd_reregister_all(int tgrp, ulong mask);
+
+/* Called from the poller to acknowledge we read an entry from the global
+ * update list, to remove our bit from the update_mask, and remove it from
+ * the list if we were the last one.
+ */
+static inline void done_update_polling(int fd)
+{
+ unsigned long update_mask;
+
+ update_mask = _HA_ATOMIC_AND_FETCH(&fdtab[fd].update_mask, ~ti->ltid_bit);
+ while ((update_mask & _HA_ATOMIC_LOAD(&tg->threads_enabled)) == 0) {
+ /* If we were the last one that had to update that entry, remove it from the list */
+ fd_rm_from_fd_list(&update_list[tgid - 1], fd);
+ update_mask = _HA_ATOMIC_LOAD(&fdtab[fd].update_mask);
+ if ((update_mask & _HA_ATOMIC_LOAD(&tg->threads_enabled)) != 0) {
+ /* Maybe it's been re-updated in the meanwhile, and we
+ * wrongly removed it from the list, if so, re-add it
+ */
+ fd_add_to_fd_list(&update_list[tgid - 1], fd);
+ update_mask = _HA_ATOMIC_LOAD(&fdtab[fd].update_mask);
+ /* And then check again, just in case after all it
+ * should be removed, even if it's very unlikely, given
+ * the current thread wouldn't have been able to take
+ * care of it yet */
+ } else
+ break;
+ }
+}
+
+/*
+ * returns true if the FD is active for recv
+ */
+static inline int fd_recv_active(const int fd)
+{
+ return (unsigned)fdtab[fd].state & FD_EV_ACTIVE_R;
+}
+
+/*
+ * returns true if the FD is ready for recv
+ */
+static inline int fd_recv_ready(const int fd)
+{
+ return (unsigned)fdtab[fd].state & FD_EV_READY_R;
+}
+
+/*
+ * returns true if the FD is active for send
+ */
+static inline int fd_send_active(const int fd)
+{
+ return (unsigned)fdtab[fd].state & FD_EV_ACTIVE_W;
+}
+
+/*
+ * returns true if the FD is ready for send
+ */
+static inline int fd_send_ready(const int fd)
+{
+ return (unsigned)fdtab[fd].state & FD_EV_READY_W;
+}
+
+/*
+ * returns true if the FD is active for recv or send
+ */
+static inline int fd_active(const int fd)
+{
+ return (unsigned)fdtab[fd].state & FD_EV_ACTIVE_RW;
+}
+
+/* Disable processing recv events on fd <fd> */
+static inline void fd_stop_recv(int fd)
+{
+ if (!(fdtab[fd].state & FD_EV_ACTIVE_R) ||
+ !HA_ATOMIC_BTR(&fdtab[fd].state, FD_EV_ACTIVE_R_BIT))
+ return;
+}
+
+/* Disable processing send events on fd <fd> */
+static inline void fd_stop_send(int fd)
+{
+ if (!(fdtab[fd].state & FD_EV_ACTIVE_W) ||
+ !HA_ATOMIC_BTR(&fdtab[fd].state, FD_EV_ACTIVE_W_BIT))
+ return;
+}
+
+/* Disable processing of events on fd <fd> for both directions. */
+static inline void fd_stop_both(int fd)
+{
+ uint old, new;
+
+ old = fdtab[fd].state;
+ do {
+ if (!(old & FD_EV_ACTIVE_RW))
+ return;
+ new = old & ~FD_EV_ACTIVE_RW;
+ } while (unlikely(!_HA_ATOMIC_CAS(&fdtab[fd].state, &old, new)));
+}
+
+/* Report that FD <fd> cannot receive anymore without polling (EAGAIN detected). */
+static inline void fd_cant_recv(const int fd)
+{
+ /* marking ready never changes polled status */
+ if (!(fdtab[fd].state & FD_EV_READY_R) ||
+ !HA_ATOMIC_BTR(&fdtab[fd].state, FD_EV_READY_R_BIT))
+ return;
+}
+
+/* Report that FD <fd> may receive again without polling. */
+static inline void fd_may_recv(const int fd)
+{
+ /* marking ready never changes polled status */
+ if ((fdtab[fd].state & FD_EV_READY_R) ||
+ HA_ATOMIC_BTS(&fdtab[fd].state, FD_EV_READY_R_BIT))
+ return;
+}
+
+/* Report that FD <fd> may receive again without polling but only if its not
+ * active yet. This is in order to speculatively try to enable I/Os when it's
+ * highly likely that these will succeed, but without interfering with polling.
+ */
+static inline void fd_cond_recv(const int fd)
+{
+ if ((fdtab[fd].state & (FD_EV_ACTIVE_R|FD_EV_READY_R)) == 0)
+ HA_ATOMIC_BTS(&fdtab[fd].state, FD_EV_READY_R_BIT);
+}
+
+/* Report that FD <fd> may send again without polling but only if its not
+ * active yet. This is in order to speculatively try to enable I/Os when it's
+ * highly likely that these will succeed, but without interfering with polling.
+ */
+static inline void fd_cond_send(const int fd)
+{
+ if ((fdtab[fd].state & (FD_EV_ACTIVE_W|FD_EV_READY_W)) == 0)
+ HA_ATOMIC_BTS(&fdtab[fd].state, FD_EV_READY_W_BIT);
+}
+
+/* Report that FD <fd> may receive and send without polling. Used at FD
+ * initialization.
+ */
+static inline void fd_may_both(const int fd)
+{
+ HA_ATOMIC_OR(&fdtab[fd].state, FD_EV_READY_RW);
+}
+
+/* Report that FD <fd> cannot send anymore without polling (EAGAIN detected). */
+static inline void fd_cant_send(const int fd)
+{
+ /* removing ready never changes polled status */
+ if (!(fdtab[fd].state & FD_EV_READY_W) ||
+ !HA_ATOMIC_BTR(&fdtab[fd].state, FD_EV_READY_W_BIT))
+ return;
+}
+
+/* Report that FD <fd> may send again without polling (EAGAIN not detected). */
+static inline void fd_may_send(const int fd)
+{
+ /* marking ready never changes polled status */
+ if ((fdtab[fd].state & FD_EV_READY_W) ||
+ HA_ATOMIC_BTS(&fdtab[fd].state, FD_EV_READY_W_BIT))
+ return;
+}
+
+/* Prepare FD <fd> to try to receive */
+static inline void fd_want_recv(int fd)
+{
+ if ((fdtab[fd].state & FD_EV_ACTIVE_R) ||
+ HA_ATOMIC_BTS(&fdtab[fd].state, FD_EV_ACTIVE_R_BIT))
+ return;
+ updt_fd_polling(fd);
+}
+
+/* Prepare FD <fd> to try to receive, and only create update if fd_updt exists
+ * (essentially for receivers during early boot).
+ */
+static inline void fd_want_recv_safe(int fd)
+{
+ if ((fdtab[fd].state & FD_EV_ACTIVE_R) ||
+ HA_ATOMIC_BTS(&fdtab[fd].state, FD_EV_ACTIVE_R_BIT))
+ return;
+ if (fd_updt)
+ updt_fd_polling(fd);
+}
+
+/* Prepare FD <fd> to try to send */
+static inline void fd_want_send(int fd)
+{
+ if ((fdtab[fd].state & FD_EV_ACTIVE_W) ||
+ HA_ATOMIC_BTS(&fdtab[fd].state, FD_EV_ACTIVE_W_BIT))
+ return;
+ updt_fd_polling(fd);
+}
+
+/* returns the tgid from an fd (masks the refcount) */
+static forceinline int fd_tgid(int fd)
+{
+ return _HA_ATOMIC_LOAD(&fdtab[fd].refc_tgid) & 0xFFFF;
+}
+
+/* Release a tgid previously taken by fd_grab_tgid() */
+static forceinline void fd_drop_tgid(int fd)
+{
+ HA_ATOMIC_SUB(&fdtab[fd].refc_tgid, 0x10000);
+}
+
+/* Unlock a tgid currently locked by fd_lock_tgid(). This will effectively
+ * allow threads from the FD's tgid to check the masks and manipulate the FD.
+ */
+static forceinline void fd_unlock_tgid(int fd)
+{
+ HA_ATOMIC_AND(&fdtab[fd].refc_tgid, 0xffff7fffU);
+}
+
+/* Switch the FD's TGID to the new value with a refcount of 1 and the lock bit
+ * set. It doesn't care about the current TGID, except that it will wait for
+ * the FD not to be already switching and having its refcount cleared. After
+ * the function returns, the caller is free to manipulate the masks, and it
+ * must call fd_unlock_tgid() to drop the lock, allowing threads from the
+ * designated group to use the FD. Finally a call to fd_drop_tgid() will be
+ * needed to drop the reference.
+ */
+static inline void fd_lock_tgid(int fd, uint desired_tgid)
+{
+ uint old;
+
+ BUG_ON(!desired_tgid);
+
+ old = tgid; // assume we start from the caller's tgid
+ desired_tgid |= 0x18000; // refcount=1, lock bit=1.
+
+ while (1) {
+ old &= 0x7fff; // expect no lock and refcount==0
+ if (_HA_ATOMIC_CAS(&fdtab[fd].refc_tgid, &old, desired_tgid))
+ break;
+ __ha_cpu_relax();
+ }
+}
+
+/* Grab a reference to the FD's TGID, and return the tgid. Note that a TGID of
+ * zero indicates the FD was closed, thus also fails (i.e. no need to drop it).
+ * On non-zero (success), the caller must release it using fd_drop_tgid().
+ */
+static inline uint fd_take_tgid(int fd)
+{
+ uint old;
+
+ old = _HA_ATOMIC_FETCH_ADD(&fdtab[fd].refc_tgid, 0x10000) & 0xffff;
+ if (likely(old))
+ return old;
+ HA_ATOMIC_SUB(&fdtab[fd].refc_tgid, 0x10000);
+ return 0;
+}
+
+/* Reset a tgid without affecting the refcount */
+static forceinline void fd_reset_tgid(int fd)
+{
+ HA_ATOMIC_AND(&fdtab[fd].refc_tgid, 0xffff0000U);
+}
+
+/* Try to grab a reference to the FD's TGID, but only if it matches the
+ * requested one (i.e. it succeeds with TGID refcnt held, or fails). Note that
+ * a TGID of zero indicates the FD was closed, thus also fails. It returns
+ * non-zero on success, in which case the caller must then release it using
+ * fd_drop_tgid(), or zero on failure. The function is optimized for use
+ * when it's likely that the tgid matches the desired one as it's by far
+ * the most common.
+ */
+static inline uint fd_grab_tgid(int fd, uint desired_tgid)
+{
+ uint old;
+
+ old = _HA_ATOMIC_FETCH_ADD(&fdtab[fd].refc_tgid, 0x10000) & 0xffff;
+ if (likely(old == desired_tgid))
+ return 1;
+ HA_ATOMIC_SUB(&fdtab[fd].refc_tgid, 0x10000);
+ return 0;
+}
+
+/* Set the FD's TGID to the new value with a refcount of 1, waiting for the
+ * current refcount to become 0, to cover the rare possibly that a late
+ * competing thread would be touching the tgid or the running mask in parallel.
+ * The caller must call fd_drop_tgid() once done.
+ */
+static inline void fd_claim_tgid(int fd, uint desired_tgid)
+{
+ uint old;
+
+ BUG_ON(!desired_tgid);
+
+ desired_tgid += 0x10000; // refcount=1
+ old = 0; // assume unused (most likely)
+ while (1) {
+ if (_HA_ATOMIC_CAS(&fdtab[fd].refc_tgid, &old, desired_tgid))
+ break;
+ __ha_cpu_relax();
+ old &= 0x7fff; // keep only the tgid and drop the lock
+ }
+}
+
+/* atomically read the running mask if the tgid matches, or returns zero if it
+ * does not match. This is meant for use in code paths where the bit is expected
+ * to be present and will be sufficient to protect against a short-term group
+ * migration (e.g. takss and return from iocb).
+ */
+static inline ulong fd_get_running(int fd, uint desired_tgid)
+{
+ ulong ret = 0;
+ uint old;
+
+ /* TODO: may also be checked using an atomic double-load from a DWCAS
+ * on compatible architectures, which wouldn't require to modify nor
+ * restore the original value.
+ */
+ old = _HA_ATOMIC_ADD_FETCH(&fdtab[fd].refc_tgid, 0x10000);
+ if (likely((old & 0xffff) == desired_tgid))
+ ret = _HA_ATOMIC_LOAD(&fdtab[fd].running_mask);
+ _HA_ATOMIC_SUB(&fdtab[fd].refc_tgid, 0x10000);
+ return ret;
+}
+
+/* remove tid_bit from the fd's running mask and returns the value before the
+ * atomic operation, so that the caller can know if it was present.
+ */
+static inline long fd_clr_running(int fd)
+{
+ return _HA_ATOMIC_FETCH_AND(&fdtab[fd].running_mask, ~ti->ltid_bit);
+}
+
+/* Prepares <fd> for being polled on all permitted threads of this group ID
+ * (these will then be refined to only cover running ones).
+*/
+static inline void fd_insert(int fd, void *owner, void (*iocb)(int fd), int tgid, unsigned long thread_mask)
+{
+ extern void sock_conn_iocb(int);
+ int newstate;
+
+ /* conn_fd_handler should support edge-triggered FDs */
+ newstate = 0;
+ if ((global.tune.options & GTUNE_FD_ET) && iocb == sock_conn_iocb)
+ newstate |= FD_ET_POSSIBLE;
+
+ /* This must never happen and would definitely indicate a bug, in
+ * addition to overwriting some unexpected memory areas.
+ */
+ BUG_ON(fd < 0);
+ BUG_ON(fd >= global.maxsock);
+ BUG_ON(fdtab[fd].owner != NULL);
+ BUG_ON(fdtab[fd].state != 0);
+ BUG_ON(tgid < 1 || tgid > MAX_TGROUPS);
+
+ thread_mask &= tg->threads_enabled;
+ BUG_ON(thread_mask == 0);
+
+ fd_claim_tgid(fd, tgid);
+
+ BUG_ON(fdtab[fd].running_mask);
+
+ fdtab[fd].owner = owner;
+ fdtab[fd].iocb = iocb;
+ fdtab[fd].state = newstate;
+ fdtab[fd].thread_mask = thread_mask;
+ fd_drop_tgid(fd);
+
+#ifdef DEBUG_FD
+ fdtab[fd].event_count = 0;
+#endif
+
+ /* note: do not reset polled_mask here as it indicates which poller
+ * still knows this FD from a possible previous round.
+ */
+
+ /* the two directions are ready until proven otherwise */
+ fd_may_both(fd);
+ _HA_ATOMIC_INC(&ha_used_fds);
+}
+
+/* These are replacements for FD_SET, FD_CLR, FD_ISSET, working on uints */
+static inline void hap_fd_set(int fd, unsigned int *evts)
+{
+ _HA_ATOMIC_OR(&evts[fd / (8*sizeof(*evts))], 1U << (fd & (8*sizeof(*evts) - 1)));
+}
+
+static inline void hap_fd_clr(int fd, unsigned int *evts)
+{
+ _HA_ATOMIC_AND(&evts[fd / (8*sizeof(*evts))], ~(1U << (fd & (8*sizeof(*evts) - 1))));
+}
+
+static inline unsigned int hap_fd_isset(int fd, unsigned int *evts)
+{
+ return evts[fd / (8*sizeof(*evts))] & (1U << (fd & (8*sizeof(*evts) - 1)));
+}
+
+/* send a wake-up event to this thread, only if it's asleep and not notified yet */
+static inline void wake_thread(int thr)
+{
+ struct thread_ctx *ctx = &ha_thread_ctx[thr];
+
+ if ((_HA_ATOMIC_FETCH_OR(&ctx->flags, TH_FL_NOTIFIED) & (TH_FL_SLEEPING|TH_FL_NOTIFIED)) == TH_FL_SLEEPING) {
+ char c = 'c';
+ DISGUISE(write(poller_wr_pipe[thr], &c, 1));
+ }
+}
+
+
+#endif /* _HAPROXY_FD_H */
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/include/haproxy/filters-t.h b/include/haproxy/filters-t.h
new file mode 100644
index 0000000..c86ef6f
--- /dev/null
+++ b/include/haproxy/filters-t.h
@@ -0,0 +1,258 @@
+/*
+ * include/haproxy/filteers-t.h
+ * This file defines everything related to stream filters.
+ *
+ * Copyright (C) 2015 Qualys Inc., Christopher Faulet <cfaulet@qualys.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#ifndef _HAPROXY_FILTERS_T_H
+#define _HAPROXY_FILTERS_T_H
+
+#include <haproxy/api-t.h>
+
+/* Flags set on a filter config */
+#define FLT_CFG_FL_HTX 0x00000001 /* The filter can filter HTX streams */
+
+/* Flags set on a filter instance */
+#define FLT_FL_IS_BACKEND_FILTER 0x0001 /* The filter is a backend filter */
+#define FLT_FL_IS_REQ_DATA_FILTER 0x0002 /* The filter will parse data on the request channel */
+#define FLT_FL_IS_RSP_DATA_FILTER 0x0004 /* The filter will parse data on the response channel */
+
+/* Flags set on the stream, common to all filters attached to its stream */
+#define STRM_FLT_FL_HAS_FILTERS 0x0001 /* The stream has at least one filter */
+#define STRM_FLT_FL_HOLD_HTTP_HDRS 0x0002 /* At least one filter on the stream want to hold the message headers */
+
+
+struct http_msg;
+struct proxy;
+struct stream;
+struct channel;
+struct flt_conf;
+struct filter;
+
+/* Descriptor for a "filter" keyword. The ->parse() function returns 0 in case
+ * of success, or a combination of ERR_* flags if an error is encountered. The
+ * function pointer can be NULL if not implemented.
+ */
+struct flt_kw {
+ const char *kw;
+ int (*parse)(char **args, int *cur_arg, struct proxy *px,
+ struct flt_conf *fconf, char **err, void *private);
+ void *private;
+};
+
+/*
+ * A keyword list. It is a NULL-terminated array of keywords. It embeds a struct
+ * list in order to be linked to other lists, allowing it to easily be declared
+ * where it is needed, and linked without duplicating data nor allocating
+ * memory. It is also possible to indicate a scope for the keywords.
+ */
+struct flt_kw_list {
+ const char *scope;
+ struct list list;
+ struct flt_kw kw[VAR_ARRAY];
+};
+
+/*
+ * Callbacks available on a filter:
+ *
+ * - init : Initializes the filter for a proxy. Returns a
+ * negative value if an error occurs.
+ * - deinit : Cleans up what the init function has done.
+ * - check : Check the filter config for a proxy. Returns the
+ * number of errors encountered.
+ * - init_per_thread : Initializes the filter for a proxy for a specific
+ * thread. Returns a negative value if an error
+ * occurs.
+ * - deinit_per_thread : Cleans up what the init_per_thread function has
+ * done.
+ *
+ *
+ * - attach : Called after a filter instance creation, when it is
+ * attached to a stream. This happens when the stream
+ * is started for filters defined on the stream's
+ * frontend and when the backend is set for filters
+ * declared on the stream's backend.
+ * Returns a negative value if an error occurs, 0 if
+ * the filter must be ignored for the stream, any other
+ * value otherwise.
+ * - stream_start : Called when a stream is started. This callback will
+ * only be called for filters defined on the stream's
+ * frontend.
+ * Returns a negative value if an error occurs, any
+ * other value otherwise.
+ * - stream_set_backend : Called when a backend is set for a stream. This
+ * callbacks will be called for all filters attached
+ * to a stream (frontend and backend).
+ * Returns a negative value if an error occurs, any
+ * other value otherwise.
+ * - stream_stop : Called when a stream is stopped. This callback will
+ * only be called for filters defined on the stream's
+ * frontend.
+ * - detach : Called when a filter instance is detached from a
+ * stream, before its destruction. This happens when
+ * the stream is stopped for filters defined on the
+ * stream's frontend and when the analyze ends for
+ * filters defined on the stream's backend.
+ * - check_timeouts : Called when a a stream is woken up because of an
+ * expired timer.
+ *
+ *
+ * - channel_start_analyze: Called when a filter starts to analyze a channel.
+ * Returns a negative value if an error occurs, 0 if
+ * it needs to wait, any other value otherwise.
+ * - channel_pre_analyze : Called before each analyzer attached to a channel,
+ * expects analyzers responsible for data sending.
+ * Returns a negative value if an error occurs, 0 if
+ * it needs to wait, any other value otherwise.
+ * - channel_post_analyze: Called after each analyzer attached to a channel,
+ * expects analyzers responsible for data sending.
+ * Returns a negative value if an error occurs,
+ * any other value otherwise.
+ * - channel_end_analyze : Called when all other analyzers have finished their
+ * processing.
+ * Returns a negative value if an error occurs, 0 if
+ * it needs to wait, any other value otherwise.
+ *
+ *
+ * - http_headers : Called before the body parsing, after all HTTP
+ * headers was parsed and analyzed.
+ * Returns a negative value if an error occurs, 0 if
+ * it needs to wait, any other value otherwise.
+ * - http_payload : Called when some data can be consumed.
+ * Returns a negative value if an error occurs, else
+ * the number of forwarded bytes.
+ * - http_end : Called when all the request/response has been
+ * processed and all body data has been forwarded.
+ * Returns a negative value if an error occurs, 0 if
+ * it needs to wait for some reason, any other value
+ * otherwise.
+ * - http_reset : Called when the HTTP message is reset. It happens
+ * either when a 100-continue response is received.
+ * that can be detected if s->txn->status is 10X, or
+ * if we're attempting a L7 retry.
+ * Returns nothing.
+ * - http_reply : Called when, at any time, HAProxy decides to stop
+ * the HTTP message's processing and to send a message
+ * to the client (mainly, when an error or a redirect
+ * occur).
+ * Returns nothing.
+ *
+ *
+ * - tcp_payload : Called when some data can be consumed.
+ * Returns a negative value if an error occurs, else
+ * the number of forwarded bytes.
+ */
+struct flt_ops {
+ /*
+ * Callbacks to manage the filter lifecycle
+ */
+ int (*init) (struct proxy *p, struct flt_conf *fconf);
+ void (*deinit) (struct proxy *p, struct flt_conf *fconf);
+ int (*check) (struct proxy *p, struct flt_conf *fconf);
+ int (*init_per_thread) (struct proxy *p, struct flt_conf *fconf);
+ void (*deinit_per_thread)(struct proxy *p, struct flt_conf *fconf);
+ /*
+ * Stream callbacks
+ */
+ int (*attach) (struct stream *s, struct filter *f);
+ int (*stream_start) (struct stream *s, struct filter *f);
+ int (*stream_set_backend)(struct stream *s, struct filter *f, struct proxy *be);
+ void (*stream_stop) (struct stream *s, struct filter *f);
+ void (*detach) (struct stream *s, struct filter *f);
+ void (*check_timeouts) (struct stream *s, struct filter *f);
+ /*
+ * Channel callbacks
+ */
+ int (*channel_start_analyze)(struct stream *s, struct filter *f, struct channel *chn);
+ int (*channel_pre_analyze) (struct stream *s, struct filter *f, struct channel *chn, unsigned int an_bit);
+ int (*channel_post_analyze) (struct stream *s, struct filter *f, struct channel *chn, unsigned int an_bit);
+ int (*channel_end_analyze) (struct stream *s, struct filter *f, struct channel *chn);
+
+ /*
+ * HTTP callbacks
+ */
+ int (*http_headers) (struct stream *s, struct filter *f, struct http_msg *msg);
+ int (*http_payload) (struct stream *s, struct filter *f, struct http_msg *msg,
+ unsigned int offset, unsigned int len);
+ int (*http_end) (struct stream *s, struct filter *f, struct http_msg *msg);
+
+ void (*http_reset) (struct stream *s, struct filter *f, struct http_msg *msg);
+ void (*http_reply) (struct stream *s, struct filter *f, short status,
+ const struct buffer *msg);
+
+ /*
+ * TCP callbacks
+ */
+ int (*tcp_payload) (struct stream *s, struct filter *f, struct channel *chn,
+ unsigned int offset, unsigned int len);
+};
+
+/*
+ * Structure representing the filter configuration, attached to a proxy and
+ * accessible from a filter when instantiated in a stream
+ */
+struct flt_conf {
+ const char *id; /* The filter id */
+ struct flt_ops *ops; /* The filter callbacks */
+ void *conf; /* The filter configuration */
+ struct list list; /* Next filter for the same proxy */
+ unsigned int flags; /* FLT_CFG_FL_* */
+};
+
+/*
+ * Structure reprensenting a filter instance attached to a stream
+ *
+ * 2D-Array fields are used to store info per channel. The first index stands
+ * for the request channel, and the second one for the response channel.
+ * Especially, <next> and <fwd> are offsets representing amount of data that the
+ * filter are, respectively, parsed and forwarded on a channel. Filters can
+ * access these values using FLT_NXT and FLT_FWD macros.
+ */
+struct filter {
+ struct flt_conf *config; /* the filter's configuration */
+ void *ctx; /* The filter context (opaque) */
+ unsigned short flags; /* FLT_FL_* */
+ unsigned long long offset[2]; /* Offset of input data already filtered for a specific channel
+ * 0: request channel, 1: response channel */
+ unsigned int pre_analyzers; /* bit field indicating analyzers to pre-process */
+ unsigned int post_analyzers; /* bit field indicating analyzers to post-process */
+ struct list list; /* Next filter for the same proxy/stream */
+};
+
+/*
+ * Structure reprensenting the "global" state of filters attached to a stream.
+ */
+struct strm_flt {
+ struct list filters; /* List of filters attached to a stream */
+ struct filter *current[2]; /* From which filter resume processing, for a specific channel.
+ * This is used for resumable callbacks only,
+ * If NULL, we start from the first filter.
+ * 0: request channel, 1: response channel */
+ unsigned short flags; /* STRM_FL_* */
+ unsigned char nb_req_data_filters; /* Number of data filters registered on the request channel */
+ unsigned char nb_rsp_data_filters; /* Number of data filters registered on the response channel */
+ unsigned long long offset[2];
+};
+
+#endif /* _HAPROXY_FILTERS_T_H */
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/include/haproxy/filters.h b/include/haproxy/filters.h
new file mode 100644
index 0000000..4a32c21
--- /dev/null
+++ b/include/haproxy/filters.h
@@ -0,0 +1,187 @@
+/*
+ * include/haproxy/filters.h
+ * This file defines function prototypes for stream filters management.
+ *
+ * Copyright (C) 2015 Qualys Inc., Christopher Faulet <cfaulet@qualys.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#ifndef _HAPROXY_FILTERS_H
+#define _HAPROXY_FILTERS_H
+
+#include <haproxy/channel.h>
+#include <haproxy/filters-t.h>
+#include <haproxy/http_ana-t.h>
+#include <haproxy/proxy-t.h>
+#include <haproxy/stream-t.h>
+
+extern const char *trace_flt_id;
+extern const char *http_comp_flt_id;
+extern const char *cache_store_flt_id;
+extern const char *spoe_filter_id;
+extern const char *fcgi_flt_id;
+
+#define FLT_ID(flt) (flt)->config->id
+#define FLT_CONF(flt) (flt)->config->conf
+#define FLT_OPS(flt) (flt)->config->ops
+
+/* Useful macros to access per-channel values. It can be safely used inside
+ * filters. */
+#define CHN_IDX(chn) (((chn)->flags & CF_ISRESP) == CF_ISRESP)
+#define FLT_STRM_OFF(s, chn) (strm_flt(s)->offset[CHN_IDX(chn)])
+#define FLT_OFF(flt, chn) ((flt)->offset[CHN_IDX(chn)])
+
+#define HAS_FILTERS(strm) ((strm)->strm_flt.flags & STRM_FLT_FL_HAS_FILTERS)
+
+#define HAS_REQ_DATA_FILTERS(strm) ((strm)->strm_flt.nb_req_data_filters != 0)
+#define HAS_RSP_DATA_FILTERS(strm) ((strm)->strm_flt.nb_rsp_data_filters != 0)
+#define HAS_DATA_FILTERS(strm, chn) (((chn)->flags & CF_ISRESP) ? HAS_RSP_DATA_FILTERS(strm) : HAS_REQ_DATA_FILTERS(strm))
+
+#define IS_REQ_DATA_FILTER(flt) ((flt)->flags & FLT_FL_IS_REQ_DATA_FILTER)
+#define IS_RSP_DATA_FILTER(flt) ((flt)->flags & FLT_FL_IS_RSP_DATA_FILTER)
+#define IS_DATA_FILTER(flt, chn) (((chn)->flags & CF_ISRESP) ? IS_RSP_DATA_FILTER(flt) : IS_REQ_DATA_FILTER(flt))
+
+#define FLT_STRM_CB(strm, call) \
+ do { \
+ if (HAS_FILTERS(strm)) { call; } \
+ } while (0)
+
+#define FLT_STRM_DATA_CB_IMPL_1(strm, chn, call, default_ret) \
+ (HAS_DATA_FILTERS(strm, chn) ? call : default_ret)
+#define FLT_STRM_DATA_CB_IMPL_2(strm, chn, call, default_ret, on_error) \
+ ({ \
+ int _ret; \
+ if (HAS_DATA_FILTERS(strm, chn)) { \
+ _ret = call; \
+ if (_ret < 0) { on_error; } \
+ } \
+ else \
+ _ret = default_ret; \
+ _ret; \
+ })
+#define FLT_STRM_DATA_CB_IMPL_3(strm, chn, call, default_ret, on_error, on_wait) \
+ ({ \
+ int _ret; \
+ if (HAS_DATA_FILTERS(strm, chn)) { \
+ _ret = call; \
+ if (_ret < 0) { on_error; } \
+ if (!_ret) { on_wait; } \
+ } \
+ else \
+ _ret = default_ret; \
+ _ret; \
+ })
+
+#define FLT_STRM_DATA_CB_IMPL_X(strm, chn, call, A, B, C, DATA_CB_IMPL, ...) \
+ DATA_CB_IMPL
+
+#define FLT_STRM_DATA_CB(strm, chn, call, ...) \
+ FLT_STRM_DATA_CB_IMPL_X(strm, chn, call, ##__VA_ARGS__, \
+ FLT_STRM_DATA_CB_IMPL_3(strm, chn, call, ##__VA_ARGS__), \
+ FLT_STRM_DATA_CB_IMPL_2(strm, chn, call, ##__VA_ARGS__), \
+ FLT_STRM_DATA_CB_IMPL_1(strm, chn, call, ##__VA_ARGS__))
+
+void flt_deinit(struct proxy *p);
+int flt_check(struct proxy *p);
+
+int flt_stream_start(struct stream *s);
+void flt_stream_stop(struct stream *s);
+int flt_set_stream_backend(struct stream *s, struct proxy *be);
+int flt_stream_init(struct stream *s);
+void flt_stream_release(struct stream *s, int only_backend);
+void flt_stream_check_timeouts(struct stream *s);
+
+int flt_http_payload(struct stream *s, struct http_msg *msg, unsigned int len);
+int flt_http_end(struct stream *s, struct http_msg *msg);
+
+void flt_http_reset(struct stream *s, struct http_msg *msg);
+void flt_http_reply(struct stream *s, short status, const struct buffer *msg);
+
+int flt_start_analyze(struct stream *s, struct channel *chn, unsigned int an_bit);
+int flt_pre_analyze(struct stream *s, struct channel *chn, unsigned int an_bit);
+int flt_post_analyze(struct stream *s, struct channel *chn, unsigned int an_bit);
+int flt_analyze_http_headers(struct stream *s, struct channel *chn, unsigned int an_bit);
+int flt_end_analyze(struct stream *s, struct channel *chn, unsigned int an_bit);
+
+int flt_xfer_data(struct stream *s, struct channel *chn, unsigned int an_bit);
+
+void flt_register_keywords(struct flt_kw_list *kwl);
+struct flt_kw *flt_find_kw(const char *kw);
+void flt_dump_kws(char **out);
+void list_filters(FILE *out);
+
+/* Helper function that returns the "global" state of filters attached to a
+ * stream. */
+static inline struct strm_flt *
+strm_flt(struct stream *s)
+{
+ return &s->strm_flt;
+}
+
+/* Registers a filter to a channel. If a filter was already registered, this
+ * function do nothing. Once registered, the filter becomes a "data" filter for
+ * this channel. */
+static inline void
+register_data_filter(struct stream *s, struct channel *chn, struct filter *filter)
+{
+ if (!IS_DATA_FILTER(filter, chn)) {
+ if (chn->flags & CF_ISRESP) {
+ filter->flags |= FLT_FL_IS_RSP_DATA_FILTER;
+ strm_flt(s)->nb_rsp_data_filters++;
+ }
+ else {
+ filter->flags |= FLT_FL_IS_REQ_DATA_FILTER;
+ strm_flt(s)->nb_req_data_filters++;
+ }
+ }
+}
+
+/* Unregisters a "data" filter from a channel. */
+static inline void
+unregister_data_filter(struct stream *s, struct channel *chn, struct filter *filter)
+{
+ if (IS_DATA_FILTER(filter, chn)) {
+ if (chn->flags & CF_ISRESP) {
+ filter->flags &= ~FLT_FL_IS_RSP_DATA_FILTER;
+ strm_flt(s)->nb_rsp_data_filters--;
+
+ }
+ else {
+ filter->flags &= ~FLT_FL_IS_REQ_DATA_FILTER;
+ strm_flt(s)->nb_req_data_filters--;
+ }
+ }
+}
+
+/* This function must be called when a filter alter payload data. It updates
+ * offsets of all previous filters. Do not call this function when a filter
+ * change the size of payload data leads to an undefined behavior.
+ *
+ * This is the filter's responsiblitiy to update data itself.
+ */
+static inline void
+flt_update_offsets(struct filter *filter, struct channel *chn, int len)
+{
+ struct stream *s = chn_strm(chn);
+ struct filter *f;
+
+ list_for_each_entry(f, &strm_flt(s)->filters, list) {
+ if (f == filter)
+ break;
+ FLT_OFF(f, chn) += len;
+ }
+}
+
+#endif /* _HAPROXY_FILTERS_H */
diff --git a/include/haproxy/fix-t.h b/include/haproxy/fix-t.h
new file mode 100644
index 0000000..4b4de55
--- /dev/null
+++ b/include/haproxy/fix-t.h
@@ -0,0 +1,70 @@
+/*
+ * include/haproxy/fix-t.h
+ * This file contains structure declarations for FIX protocol.
+ *
+ * Copyright 2020 Baptiste Assmann <bedis9@gmail.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_FIX_T_H
+#define _HAPROXY_FIX_T_H
+
+#include <import/ist.h>
+
+/*
+ * FIX messages are composed by a list of Tag=Value separated by a 'delimiter'
+ */
+#define FIX_DELIMITER 0x01
+
+/*
+ * know FIX version strings
+ */
+#define FIX_4_0 (ist("FIX.4.0"))
+#define FIX_4_1 (ist("FIX.4.1"))
+#define FIX_4_2 (ist("FIX.4.2"))
+#define FIX_4_3 (ist("FIX.4.3"))
+#define FIX_4_4 (ist("FIX.4.4"))
+#define FIX_5_0 (ist("FIXT.1.1"))
+/* FIX_5_0SP1 and FIX_5_0SP2 have the same version string than FIX5_0 */
+
+/*
+ * Supported FIX tag ID
+ */
+#define FIX_TAG_BeginString 8
+#define FIX_TAG_BodyLength 9
+#define FIX_TAG_CheckSum 10
+#define FIX_TAG_MsgType 35
+#define FIX_TAG_SenderCompID 49
+#define FIX_TAG_TargetCompID 56
+
+
+#define FIX_MSG_MINSIZE 26 /* Minimal length for a FIX Message */
+#define FIX_CHKSUM_SIZE 7 /* Length of the CheckSum tag (10=NNN<delim>) */
+/*
+ * return code when parsing / validating FIX messages
+ */
+#define FIX_INVALID_MESSAGE -1
+#define FIX_NEED_MORE_DATA 0
+#define FIX_VALID_MESSAGE 1
+
+#endif /* _HAPROXY_FIX_T_H */
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/include/haproxy/fix.h b/include/haproxy/fix.h
new file mode 100644
index 0000000..94aa815
--- /dev/null
+++ b/include/haproxy/fix.h
@@ -0,0 +1,97 @@
+/*
+ * include/haproxy/fix.h
+ * This file contains functions and macros declarations for FIX protocol decoding.
+ *
+ * Copyright 2020 Baptiste Assmann <bedis9@gmail.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_FIX_H
+#define _HAPROXY_FIX_H
+
+#include <import/ist.h>
+
+#include <haproxy/fix-t.h>
+#include <haproxy/tools.h>
+
+unsigned int fix_check_id(const struct ist str, const struct ist version);
+int fix_validate_message(const struct ist msg);
+struct ist fix_tag_value(const struct ist msg, unsigned int tagid);
+
+/*
+ * Return the FIX version string (one of FIX_X_Y macros) corresponding to
+ * <str> or IST_NULL if not found.
+ */
+static inline struct ist fix_version(const struct ist str)
+{
+ /* 7 is the minimal size for the FIX version string */
+ if (istlen(str) < 7)
+ return IST_NULL;
+
+ if (isteq(FIX_4_0, str))
+ return FIX_4_0;
+ else if (isteq(FIX_4_1, str))
+ return FIX_4_1;
+ else if (isteq(FIX_4_2, str))
+ return FIX_4_2;
+ else if (isteq(FIX_4_3, str))
+ return FIX_4_3;
+ else if (isteq(FIX_4_4, str))
+ return FIX_4_4;
+ else if (isteq(FIX_5_0, str))
+ return FIX_5_0;
+
+ return IST_NULL;
+}
+
+/*
+ * Return the FIX tag ID corresponding to <tag> if one found or 0 if not.
+ *
+ * full list of tag ID available here, just in case we need to support
+ * more "string" equivalent in the future:
+ * https://www.onixs.biz/fix-dictionary/4.2/fields_by_tag.html
+ */
+static inline unsigned int fix_tagid(const struct ist tag)
+{
+ unsigned id = fix_check_id(tag, IST_NULL);
+
+ if (id)
+ return id;
+
+ else if (isteqi(tag, ist("MsgType")))
+ return FIX_TAG_MsgType;
+ else if (isteqi(tag, ist("CheckSum")))
+ return FIX_TAG_CheckSum;
+ else if (isteqi(tag, ist("BodyLength")))
+ return FIX_TAG_BodyLength;
+ else if (isteqi(tag, ist("TargetCompID")))
+ return FIX_TAG_TargetCompID;
+ else if (isteqi(tag, ist("BeginString")))
+ return FIX_TAG_BeginString;
+ else if (isteqi(tag, ist("SenderCompID")))
+ return FIX_TAG_SenderCompID;
+
+ return 0;
+}
+
+#endif /* _HAPROXY_FIX_H */
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/include/haproxy/flt_http_comp.h b/include/haproxy/flt_http_comp.h
new file mode 100644
index 0000000..56f984a
--- /dev/null
+++ b/include/haproxy/flt_http_comp.h
@@ -0,0 +1,28 @@
+/*
+ * include/haproxy/flt_http_comp.h
+ * This file defines function prototypes for the compression filter.
+ *
+ * Copyright (C) 2015 Qualys Inc., Christopher Faulet <cfaulet@qualys.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#ifndef _HAPROXY_FLT_HTTP_COMP_H
+#define _HAPROXY_FLT_HTTP_COMP_H
+
+#include <haproxy/proxy-t.h>
+
+int check_implicit_http_comp_flt(struct proxy *proxy);
+
+#endif // _HAPROXY_FLT_HTTP_COMP_H
diff --git a/include/haproxy/freq_ctr-t.h b/include/haproxy/freq_ctr-t.h
new file mode 100644
index 0000000..d5f1a89
--- /dev/null
+++ b/include/haproxy/freq_ctr-t.h
@@ -0,0 +1,45 @@
+/*
+ * include/haproxy/freq_ctr.h
+ * This file contains structure declarations for frequency counters.
+ *
+ * Copyright (C) 2000-2020 Willy Tarreau - w@1wt.eu
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_FREQ_CTR_T_H
+#define _HAPROXY_FREQ_CTR_T_H
+
+#include <haproxy/api-t.h>
+
+/* The generic freq_ctr counter counts a rate of events per period, where the
+ * period has to be known by the user. The period is measured in ticks and
+ * must be at least 2 ticks long. This form is slightly more CPU intensive for
+ * reads than the per-second form as it involves a divide.
+ */
+struct freq_ctr {
+ unsigned int curr_tick; /* start date of current period (wrapping ticks) */
+ unsigned int curr_ctr; /* cumulated value for current period */
+ unsigned int prev_ctr; /* value for last period */
+};
+
+#endif /* _HAPROXY_FREQ_CTR_T_H */
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/include/haproxy/freq_ctr.h b/include/haproxy/freq_ctr.h
new file mode 100644
index 0000000..f3f6903
--- /dev/null
+++ b/include/haproxy/freq_ctr.h
@@ -0,0 +1,402 @@
+/*
+ * include/haproxy/freq_ctr.h
+ * This file contains macros and inline functions for frequency counters.
+ *
+ * Copyright (C) 2000-2020 Willy Tarreau - w@1wt.eu
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_FREQ_CTR_H
+#define _HAPROXY_FREQ_CTR_H
+
+#include <haproxy/api.h>
+#include <haproxy/freq_ctr-t.h>
+#include <haproxy/intops.h>
+#include <haproxy/ticks.h>
+
+/* exported functions from freq_ctr.c */
+ullong freq_ctr_total(const struct freq_ctr *ctr, uint period, int pend);
+int freq_ctr_overshoot_period(const struct freq_ctr *ctr, uint period, uint freq);
+uint update_freq_ctr_period_slow(struct freq_ctr *ctr, uint period, uint inc);
+
+/* Update a frequency counter by <inc> incremental units. It is automatically
+ * rotated if the period is over. It is important that it correctly initializes
+ * a null area.
+ */
+static inline uint update_freq_ctr_period(struct freq_ctr *ctr, uint period, uint inc)
+{
+ uint curr_tick;
+
+ /* our local clock (now_ms) is most of the time strictly equal to
+ * global_now_ms, and during the edge of the millisecond, global_now_ms
+ * might have been pushed further by another thread. Given that
+ * accessing this shared variable is extremely expensive, we first try
+ * to use our local date, which will be good almost every time. And we
+ * only switch to the global clock when we're out of the period so as
+ * to never put a date in the past there.
+ */
+ curr_tick = HA_ATOMIC_LOAD(&ctr->curr_tick);
+ if (likely(now_ms - curr_tick < period))
+ return HA_ATOMIC_ADD_FETCH(&ctr->curr_ctr, inc);
+
+ return update_freq_ctr_period_slow(ctr, period, inc);
+}
+
+/* Update a 1-sec frequency counter by <inc> incremental units. It is automatically
+ * rotated if the period is over. It is important that it correctly initializes
+ * a null area.
+ */
+static inline unsigned int update_freq_ctr(struct freq_ctr *ctr, unsigned int inc)
+{
+ return update_freq_ctr_period(ctr, MS_TO_TICKS(1000), inc);
+}
+
+/* Reads a frequency counter taking history into account for missing time in
+ * current period. The period has to be passed in number of ticks and must
+ * match the one used to feed the counter. The counter value is reported for
+ * current global date. The return value has the same precision as one input
+ * data sample, so low rates over the period will be inaccurate but still
+ * appropriate for max checking. One trick we use for low values is to specially
+ * handle the case where the rate is between 0 and 1 in order to avoid flapping
+ * while waiting for the next event.
+ *
+ * For immediate limit checking, it's recommended to use freq_ctr_period_remain()
+ * instead which does not have the flapping correction, so that even frequencies
+ * as low as one event/period are properly handled.
+ */
+static inline uint read_freq_ctr_period(const struct freq_ctr *ctr, uint period)
+{
+ ullong total = freq_ctr_total(ctr, period, -1);
+
+ return div64_32(total, period);
+}
+
+/* same as read_freq_ctr_period() above except that floats are used for the
+ * output so that low rates can be more precise.
+ */
+static inline double read_freq_ctr_period_flt(const struct freq_ctr *ctr, uint period)
+{
+ ullong total = freq_ctr_total(ctr, period, -1);
+
+ return (double)total / (double)period;
+}
+
+/* Read a 1-sec frequency counter taking history into account for missing time
+ * in current period.
+ */
+static inline unsigned int read_freq_ctr(const struct freq_ctr *ctr)
+{
+ return read_freq_ctr_period(ctr, MS_TO_TICKS(1000));
+}
+
+/* same as read_freq_ctr() above except that floats are used for the
+ * output so that low rates can be more precise.
+ */
+static inline double read_freq_ctr_flt(const struct freq_ctr *ctr)
+{
+ return read_freq_ctr_period_flt(ctr, MS_TO_TICKS(1000));
+}
+
+/* Returns the number of remaining events that can occur on this freq counter
+ * while respecting <freq> events per period, and taking into account that
+ * <pend> events are already known to be pending. Returns 0 if limit was reached.
+ */
+static inline uint freq_ctr_remain_period(const struct freq_ctr *ctr, uint period, uint freq, uint pend)
+{
+ ullong total = freq_ctr_total(ctr, period, pend);
+ uint avg = div64_32(total, period);
+
+ if (avg > freq)
+ avg = freq;
+ return freq - avg;
+}
+
+/* returns the number of remaining events that can occur on this freq counter
+ * while respecting <freq> and taking into account that <pend> events are
+ * already known to be pending. Returns 0 if limit was reached.
+ */
+static inline unsigned int freq_ctr_remain(const struct freq_ctr *ctr, unsigned int freq, unsigned int pend)
+{
+ return freq_ctr_remain_period(ctr, MS_TO_TICKS(1000), freq, pend);
+}
+
+/* return the expected wait time in ms before the next event may occur,
+ * respecting frequency <freq>, and assuming there may already be some pending
+ * events. It returns zero if we can proceed immediately, otherwise the wait
+ * time, which will be rounded down 1ms for better accuracy, with a minimum
+ * of one ms.
+ */
+static inline uint next_event_delay_period(const struct freq_ctr *ctr, uint period, uint freq, uint pend)
+{
+ ullong total = freq_ctr_total(ctr, period, pend);
+ ullong limit = (ullong)freq * period;
+ uint wait;
+
+ if (total < limit)
+ return 0;
+
+ /* too many events already, let's count how long to wait before they're
+ * processed. For this we'll subtract from the number of pending events
+ * the ones programmed for the current period, to know how long to wait
+ * for the next period. Each event takes period/freq ticks.
+ */
+ total -= limit;
+ wait = div64_32(total, (freq ? freq : 1));
+ return MAX(wait, 1);
+}
+
+/* Returns the expected wait time in ms before the next event may occur,
+ * respecting frequency <freq> over 1 second, and assuming there may already be
+ * some pending events. It returns zero if we can proceed immediately, otherwise
+ * the wait time, which will be rounded down 1ms for better accuracy, with a
+ * minimum of one ms.
+ */
+static inline unsigned int next_event_delay(const struct freq_ctr *ctr, unsigned int freq, unsigned int pend)
+{
+ return next_event_delay_period(ctr, MS_TO_TICKS(1000), freq, pend);
+}
+
+/* While the functions above report average event counts per period, we are
+ * also interested in average values per event. For this we use a different
+ * method. The principle is to rely on a long tail which sums the new value
+ * with a fraction of the previous value, resulting in a sliding window of
+ * infinite length depending on the precision we're interested in.
+ *
+ * The idea is that we always keep (N-1)/N of the sum and add the new sampled
+ * value. The sum over N values can be computed with a simple program for a
+ * constant value 1 at each iteration :
+ *
+ * N
+ * ,---
+ * \ N - 1 e - 1
+ * > ( --------- )^x ~= N * -----
+ * / N e
+ * '---
+ * x = 1
+ *
+ * Note: I'm not sure how to demonstrate this but at least this is easily
+ * verified with a simple program, the sum equals N * 0.632120 for any N
+ * moderately large (tens to hundreds).
+ *
+ * Inserting a constant sample value V here simply results in :
+ *
+ * sum = V * N * (e - 1) / e
+ *
+ * But we don't want to integrate over a small period, but infinitely. Let's
+ * cut the infinity in P periods of N values. Each period M is exactly the same
+ * as period M-1 with a factor of ((N-1)/N)^N applied. A test shows that given a
+ * large N :
+ *
+ * N - 1 1
+ * ( ------- )^N ~= ---
+ * N e
+ *
+ * Our sum is now a sum of each factor times :
+ *
+ * N*P P
+ * ,--- ,---
+ * \ N - 1 e - 1 \ 1
+ * > v ( --------- )^x ~= VN * ----- * > ---
+ * / N e / e^x
+ * '--- '---
+ * x = 1 x = 0
+ *
+ * For P "large enough", in tests we get this :
+ *
+ * P
+ * ,---
+ * \ 1 e
+ * > --- ~= -----
+ * / e^x e - 1
+ * '---
+ * x = 0
+ *
+ * This simplifies the sum above :
+ *
+ * N*P
+ * ,---
+ * \ N - 1
+ * > v ( --------- )^x = VN
+ * / N
+ * '---
+ * x = 1
+ *
+ * So basically by summing values and applying the last result an (N-1)/N factor
+ * we just get N times the values over the long term, so we can recover the
+ * constant value V by dividing by N. In order to limit the impact of integer
+ * overflows, we'll use this equivalence which saves us one multiply :
+ *
+ * N - 1 1 x0
+ * x1 = x0 * ------- = x0 * ( 1 - --- ) = x0 - ----
+ * N N N
+ *
+ * And given that x0 is discrete here we'll have to saturate the values before
+ * performing the divide, so the value insertion will become :
+ *
+ * x0 + N - 1
+ * x1 = x0 - ------------
+ * N
+ *
+ * A value added at the entry of the sliding window of N values will thus be
+ * reduced to 1/e or 36.7% after N terms have been added. After a second batch,
+ * it will only be 1/e^2, or 13.5%, and so on. So practically speaking, each
+ * old period of N values represents only a quickly fading ratio of the global
+ * sum :
+ *
+ * period ratio
+ * 1 36.7%
+ * 2 13.5%
+ * 3 4.98%
+ * 4 1.83%
+ * 5 0.67%
+ * 6 0.25%
+ * 7 0.09%
+ * 8 0.033%
+ * 9 0.012%
+ * 10 0.0045%
+ *
+ * So after 10N samples, the initial value has already faded out by a factor of
+ * 22026, which is quite fast. If the sliding window is 1024 samples wide, it
+ * means that a sample will only count for 1/22k of its initial value after 10k
+ * samples went after it, which results in half of the value it would represent
+ * using an arithmetic mean. The benefit of this method is that it's very cheap
+ * in terms of computations when N is a power of two. This is very well suited
+ * to record response times as large values will fade out faster than with an
+ * arithmetic mean and will depend on sample count and not time.
+ *
+ * Demonstrating all the above assumptions with maths instead of a program is
+ * left as an exercise for the reader.
+ */
+
+/* Adds sample value <v> to sliding window sum <sum> configured for <n> samples.
+ * The sample is returned. Better if <n> is a power of two. This function is
+ * thread-safe.
+ */
+static inline unsigned int swrate_add(unsigned int *sum, unsigned int n, unsigned int v)
+{
+ unsigned int new_sum, old_sum;
+
+ old_sum = *sum;
+ do {
+ new_sum = old_sum - (old_sum + n - 1) / n + v;
+ } while (!HA_ATOMIC_CAS(sum, &old_sum, new_sum) && __ha_cpu_relax());
+ return new_sum;
+}
+
+/* Adds sample value <v> to sliding window sum <sum> configured for <n> samples.
+ * The sample is returned. Better if <n> is a power of two. This function is
+ * thread-safe.
+ * This function should give better accuracy than swrate_add when number of
+ * samples collected is lower than nominal window size. In such circumstances
+ * <n> should be set to 0.
+ */
+static inline unsigned int swrate_add_dynamic(unsigned int *sum, unsigned int n, unsigned int v)
+{
+ unsigned int new_sum, old_sum;
+
+ old_sum = *sum;
+ do {
+ new_sum = old_sum - (n ? (old_sum + n - 1) / n : 0) + v;
+ } while (!HA_ATOMIC_CAS(sum, &old_sum, new_sum) && __ha_cpu_relax());
+ return new_sum;
+}
+
+/* Adds sample value <v> spanning <s> samples to sliding window sum <sum>
+ * configured for <n> samples, where <n> is supposed to be "much larger" than
+ * <s>. The sample is returned. Better if <n> is a power of two. Note that this
+ * is only an approximate. Indeed, as can be seen with two samples only over a
+ * 8-sample window, the original function would return :
+ * sum1 = sum - (sum + 7) / 8 + v
+ * sum2 = sum1 - (sum1 + 7) / 8 + v
+ * = (sum - (sum + 7) / 8 + v) - (sum - (sum + 7) / 8 + v + 7) / 8 + v
+ * ~= 7sum/8 - 7/8 + v - sum/8 + sum/64 - 7/64 - v/8 - 7/8 + v
+ * ~= (3sum/4 + sum/64) - (7/4 + 7/64) + 15v/8
+ *
+ * while the function below would return :
+ * sum = sum + 2*v - (sum + 8) * 2 / 8
+ * = 3sum/4 + 2v - 2
+ *
+ * this presents an error of ~ (sum/64 + 9/64 + v/8) = (sum+n+1)/(n^s) + v/n
+ *
+ * Thus the simplified function effectively replaces a part of the history with
+ * a linear sum instead of applying the exponential one. But as long as s/n is
+ * "small enough", the error fades away and remains small for both small and
+ * large values of n and s (typically < 0.2% measured). This function is
+ * thread-safe.
+ */
+static inline unsigned int swrate_add_scaled(unsigned int *sum, unsigned int n, unsigned int v, unsigned int s)
+{
+ unsigned int new_sum, old_sum;
+
+ old_sum = *sum;
+ do {
+ new_sum = old_sum + v * s - div64_32((unsigned long long)old_sum * s + n - 1, n);
+ } while (!HA_ATOMIC_CAS(sum, &old_sum, new_sum) && __ha_cpu_relax());
+ return new_sum;
+}
+
+/* opportunistic versions of the functions above: an attempt is made to update
+ * the value, but in case of contention, it's not retried. This is fine when
+ * rough estimates are needed and speed is preferred over accuracy.
+ */
+
+static inline uint swrate_add_opportunistic(uint *sum, uint n, uint v)
+{
+ uint new_sum, old_sum;
+
+ old_sum = *sum;
+ new_sum = old_sum - (old_sum + n - 1) / n + v;
+ HA_ATOMIC_CAS(sum, &old_sum, new_sum);
+ return new_sum;
+}
+
+static inline uint swrate_add_dynamic_opportunistic(uint *sum, uint n, uint v)
+{
+ uint new_sum, old_sum;
+
+ old_sum = *sum;
+ new_sum = old_sum - (n ? (old_sum + n - 1) / n : 0) + v;
+ HA_ATOMIC_CAS(sum, &old_sum, new_sum);
+ return new_sum;
+}
+
+static inline uint swrate_add_scaled_opportunistic(uint *sum, uint n, uint v, uint s)
+{
+ uint new_sum, old_sum;
+
+ old_sum = *sum;
+ new_sum = old_sum + v * s - div64_32((unsigned long long)old_sum * s + n - 1, n);
+ HA_ATOMIC_CAS(sum, &old_sum, new_sum);
+ return new_sum;
+}
+
+/* Returns the average sample value for the sum <sum> over a sliding window of
+ * <n> samples. Better if <n> is a power of two. It must be the same <n> as the
+ * one used above in all additions.
+ */
+static inline unsigned int swrate_avg(unsigned int sum, unsigned int n)
+{
+ return (sum + n - 1) / n;
+}
+
+#endif /* _HAPROXY_FREQ_CTR_H */
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/include/haproxy/frontend.h b/include/haproxy/frontend.h
new file mode 100644
index 0000000..8cd1a0a
--- /dev/null
+++ b/include/haproxy/frontend.h
@@ -0,0 +1,38 @@
+/*
+ * include/haproxy/frontend.h
+ * This file declares frontend-specific functions.
+ *
+ * Copyright (C) 2000-2011 Willy Tarreau - w@1wt.eu
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_FRONTEND_H
+#define _HAPROXY_FRONTEND_H
+
+#include <haproxy/stream-t.h>
+
+int frontend_accept(struct stream *s);
+
+int increment_actconn();
+
+#endif /* _HAPROXY_FRONTEND_H */
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/include/haproxy/global-t.h b/include/haproxy/global-t.h
new file mode 100644
index 0000000..9b3cd78
--- /dev/null
+++ b/include/haproxy/global-t.h
@@ -0,0 +1,251 @@
+/*
+ * include/haproxy/global-t.h
+ * Global types and macros. Please avoid adding more stuff here!
+ *
+ * Copyright (C) 2000-2020 Willy Tarreau - w@1wt.eu
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_GLOBAL_T_H
+#define _HAPROXY_GLOBAL_T_H
+
+#include <haproxy/api-t.h>
+#include <haproxy/buf-t.h>
+#include <haproxy/freq_ctr-t.h>
+
+/* modes of operation (global.mode) */
+#define MODE_DEBUG 0x01
+#define MODE_DAEMON 0x02
+#define MODE_QUIET 0x04
+#define MODE_CHECK 0x08
+#define MODE_VERBOSE 0x10
+#define MODE_STARTING 0x20
+#define MODE_FOREGROUND 0x40
+#define MODE_MWORKER 0x80 /* Master Worker */
+#define MODE_MWORKER_WAIT 0x100 /* Master Worker wait mode */
+#define MODE_ZERO_WARNING 0x200 /* warnings cause a failure */
+#define MODE_DIAG 0x400 /* extra warnings */
+#define MODE_CHECK_CONDITION 0x800 /* -cc mode */
+#define MODE_STOPPING 0x1000 /* the process is in the deinit phase, the event loop is not running anymore. */
+#define MODE_DUMP_LIBS 0x2000 /* dump loaded libraries at the end of init phase */
+#define MODE_DUMP_KWD 0x4000 /* dump registered keywords (see kwd_dump for the list) */
+#define MODE_DUMP_CFG 0x8000 /* dump the configuration file */
+#define MODE_DUMP_NB_L 0x10000 /* dump line numbers when the configuration file is dump */
+
+/* list of last checks to perform, depending on config options */
+#define LSTCHK_CAP_BIND 0x00000001 /* check that we can bind to any port */
+#define LSTCHK_NETADM 0x00000002 /* check that we have CAP_NET_ADMIN */
+
+/* Global tuning options */
+/* available polling mechanisms */
+#define GTUNE_USE_SELECT (1<<0)
+#define GTUNE_USE_POLL (1<<1)
+#define GTUNE_USE_EPOLL (1<<2)
+#define GTUNE_USE_KQUEUE (1<<3)
+/* platform-specific options */
+#define GTUNE_USE_SPLICE (1<<4)
+#define GTUNE_USE_GAI (1<<5)
+#define GTUNE_LIMITED_QUIC (1<<6)
+#define GTUNE_RESOLVE_DONTFAIL (1<<7)
+
+#define GTUNE_SOCKET_TRANSFER (1<<8)
+#define GTUNE_NOEXIT_ONFAILURE (1<<9)
+#define GTUNE_USE_SYSTEMD (1<<10)
+
+#define GTUNE_BUSY_POLLING (1<<11)
+/* (1<<12) unused */
+#define GTUNE_SET_DUMPABLE (1<<13)
+#define GTUNE_USE_EVPORTS (1<<14)
+#define GTUNE_STRICT_LIMITS (1<<15)
+#define GTUNE_INSECURE_FORK (1<<16)
+#define GTUNE_INSECURE_SETUID (1<<17)
+#define GTUNE_FD_ET (1<<18)
+#define GTUNE_SCHED_LOW_LATENCY (1<<19)
+#define GTUNE_IDLE_POOL_SHARED (1<<20)
+#define GTUNE_DISABLE_H2_WEBSOCKET (1<<21)
+#define GTUNE_DISABLE_ACTIVE_CLOSE (1<<22)
+#define GTUNE_QUICK_EXIT (1<<23)
+#define GTUNE_QUIC_SOCK_PER_CONN (1<<24)
+#define GTUNE_NO_QUIC (1<<25)
+#define GTUNE_USE_FAST_FWD (1<<26)
+#define GTUNE_LISTENER_MQ_FAIR (1<<27)
+#define GTUNE_LISTENER_MQ_OPT (1<<28)
+#define GTUNE_LISTENER_MQ_ANY (GTUNE_LISTENER_MQ_FAIR | GTUNE_LISTENER_MQ_OPT)
+
+#define NO_ZERO_COPY_FWD 0x0001 /* Globally disable zero-copy FF */
+#define NO_ZERO_COPY_FWD_PT 0x0002 /* disable zero-copy FF for PT (recv & send are disabled automatically) */
+#define NO_ZERO_COPY_FWD_H1_RCV 0x0004 /* disable zero-copy FF for H1 on received */
+#define NO_ZERO_COPY_FWD_H1_SND 0x0008 /* disable zero-copy FF for H1 on send */
+#define NO_ZERO_COPY_FWD_H2_RCV 0x0010 /* disable zero-copy FF for H2 on received */
+#define NO_ZERO_COPY_FWD_H2_SND 0x0020 /* disable zero-copy FF for H2 on send */
+#define NO_ZERO_COPY_FWD_QUIC_RCV 0x0040 /* disable zero-copy FF for QUIC on received */
+#define NO_ZERO_COPY_FWD_QUIC_SND 0x0080 /* disable zero-copy FF for QUIC on send */
+#define NO_ZERO_COPY_FWD_FCGI_RCV 0x0100 /* disable zero-copy FF for FCGI on received */
+#define NO_ZERO_COPY_FWD_FCGI_SND 0x0200 /* disable zero-copy FF for FCGI on send */
+
+
+extern int cluster_secret_isset; /* non zero means a cluster secret was initialized */
+
+/* SSL server verify mode */
+enum {
+ SSL_SERVER_VERIFY_NONE = 0,
+ SSL_SERVER_VERIFY_REQUIRED = 1,
+};
+
+/* bit values to go with "warned" above */
+#define WARN_ANY 0x00000001 /* any warning was emitted */
+#define WARN_FORCECLOSE_DEPRECATED 0x00000002
+#define WARN_EXEC_PATH 0x00000004 /* executable path already reported */
+
+/* put there the forward declarations needed for global.h */
+struct proxy;
+
+/* FIXME : this will have to be redefined correctly */
+struct global {
+ int uid;
+ int gid;
+ int external_check; /* 0=disabled, 1=enabled, 2=enabled with env */
+ int nbthread;
+ int mode;
+ unsigned int hard_stop_after; /* maximum time allowed to perform a soft-stop */
+ unsigned int grace_delay; /* grace delay between SIGUSR1 and soft-stop */
+ unsigned int close_spread_time; /* time window during which connection closing is spread */
+ unsigned int close_spread_end; /* end of close spread window */
+ int maxconn, hardmaxconn;
+ int maxsslconn;
+ int ssl_session_max_cost; /* how many bytes an SSL session may cost */
+ int ssl_handshake_max_cost; /* how many bytes an SSL handshake may use */
+ int ssl_used_frontend; /* non-zero if SSL is used in a frontend */
+ int ssl_used_backend; /* non-zero if SSL is used in a backend */
+ int ssl_used_async_engines; /* number of used async engines */
+ unsigned int ssl_server_verify; /* default verify mode on servers side */
+ int comp_rate_lim; /* HTTP compression rate limit */
+ int maxpipes; /* max # of pipes */
+ int maxsock; /* max # of sockets */
+ int rlimit_nofile; /* default ulimit-n value : 0=unset */
+ int rlimit_memmax_all; /* default all-process memory limit in megs ; 0=unset */
+ int rlimit_memmax; /* default per-process memory limit in megs ; 0=unset */
+ long maxzlibmem; /* max RAM for zlib in bytes */
+ int nbtgroups; /* number of thread groups (IDs start at 1) */
+ int spread_checks;
+ int max_spread_checks;
+ int max_syslog_len;
+ char *chroot;
+ char *pidfile;
+ char *node, *desc; /* node name & description */
+ int localpeer_cmdline; /* whether or not the commandline "-L" was set */
+ int fd_hard_limit; /* hard limit on ulimit-n : 0=unset */
+ struct buffer log_tag; /* name for syslog */
+ struct list loggers; /* one per 'log' directive */
+ char *log_send_hostname; /* set hostname in syslog header */
+ char *server_state_base; /* path to a directory where server state files can be found */
+ char *server_state_file; /* path to the file where server states are loaded from */
+ unsigned char cluster_secret[16]; /* 128 bits of an SHA1 digest of a secret defined as ASCII string */
+ struct {
+ int maxpollevents; /* max number of poll events at once */
+ int maxaccept; /* max number of consecutive accept() */
+ int options; /* various tuning options */
+ int runqueue_depth;/* max number of tasks to run at once */
+ int recv_enough; /* how many input bytes at once are "enough" */
+ int bufsize; /* buffer size in bytes, defaults to BUFSIZE */
+ int maxrewrite; /* buffer max rewrite size in bytes, defaults to MAXREWRITE */
+ int reserved_bufs; /* how many buffers can only be allocated for response */
+ int buf_limit; /* if not null, how many total buffers may only be allocated */
+ int client_sndbuf; /* set client sndbuf to this value if not null */
+ int client_rcvbuf; /* set client rcvbuf to this value if not null */
+ int server_sndbuf; /* set server sndbuf to this value if not null */
+ int server_rcvbuf; /* set server rcvbuf to this value if not null */
+ int frontend_sndbuf; /* set frontend dgram sndbuf to this value if not null */
+ int frontend_rcvbuf; /* set frontend dgram rcvbuf to this value if not null */
+ int backend_sndbuf; /* set backend dgram sndbuf to this value if not null */
+ int backend_rcvbuf; /* set backend dgram rcvbuf to this value if not null */
+ int pipesize; /* pipe size in bytes, system defaults if zero */
+ int max_http_hdr; /* max number of HTTP headers, use MAX_HTTP_HDR if zero */
+ int requri_len; /* max len of request URI, use REQURI_LEN if zero */
+ int cookie_len; /* max length of cookie captures */
+ int pattern_cache; /* max number of entries in the pattern cache. */
+ int sslcachesize; /* SSL cache size in session, defaults to 20000 */
+ int comp_maxlevel; /* max HTTP compression level */
+ int pool_low_ratio; /* max ratio of FDs used before we stop using new idle connections */
+ int pool_high_ratio; /* max ratio of FDs used before we start killing idle connections when creating new connections */
+ int pool_low_count; /* max number of opened fd before we stop using new idle connections */
+ int pool_high_count; /* max number of opened fd before we start killing idle connections when creating new connections */
+ size_t pool_cache_size; /* per-thread cache size per pool (defaults to CONFIG_HAP_POOL_CACHE_SIZE) */
+ unsigned short idle_timer; /* how long before an empty buffer is considered idle (ms) */
+ unsigned short no_zero_copy_fwd; /* Flags to disable zero-copy fast-forwarding (global & per-protocols) */
+ int nb_stk_ctr; /* number of stick counters, defaults to MAX_SESS_STKCTR */
+ int default_shards; /* default shards for listeners, or -1 (by-thread) or -2 (by-group) */
+ uint max_checks_per_thread; /* if >0, no more than this concurrent checks per thread */
+#ifdef USE_QUIC
+ unsigned int quic_backend_max_idle_timeout;
+ unsigned int quic_frontend_max_idle_timeout;
+ unsigned int quic_frontend_max_streams_bidi;
+ unsigned int quic_retry_threshold;
+ unsigned int quic_reorder_ratio;
+ unsigned int quic_streams_buf;
+ unsigned int quic_max_frame_loss;
+#endif /* USE_QUIC */
+ } tune;
+ struct {
+ char *prefix; /* path prefix of unix bind socket */
+ struct { /* UNIX socket permissions */
+ uid_t uid; /* -1 to leave unchanged */
+ gid_t gid; /* -1 to leave unchanged */
+ mode_t mode; /* 0 to leave unchanged */
+ } ux;
+ } unix_bind;
+ struct proxy *cli_fe; /* the frontend holding the stats settings */
+ int numa_cpu_mapping;
+ int prealloc_fd;
+ int cfg_curr_line; /* line number currently being parsed */
+ const char *cfg_curr_file; /* config file currently being parsed or NULL */
+ char *cfg_curr_section; /* config section name currently being parsed or NULL */
+
+ /* The info above is config stuff, it doesn't change during the process' life */
+ /* A number of the elements below are updated by all threads in real time and
+ * suffer high contention, so we need to put them in their own cache lines, if
+ * possible grouped by changes.
+ */
+ ALWAYS_ALIGN(64);
+ struct freq_ctr conn_per_sec;
+ struct freq_ctr sess_per_sec;
+ struct freq_ctr ssl_per_sec;
+ struct freq_ctr ssl_fe_keys_per_sec;
+ struct freq_ctr ssl_be_keys_per_sec;
+ struct freq_ctr comp_bps_in; /* bytes per second, before http compression */
+ struct freq_ctr comp_bps_out; /* bytes per second, after http compression */
+ uint sslconns, totalsslconns; /* active, total # of SSL conns */
+ int cps_lim, cps_max;
+ int sps_lim, sps_max;
+ int ssl_lim, ssl_max;
+ int ssl_fe_keys_max, ssl_be_keys_max;
+ unsigned int shctx_lookups, shctx_misses;
+ unsigned int req_count; /* request counter (HTTP or TCP session) for logs and unique_id */
+ int last_checks;
+ uint32_t anon_key;
+
+ /* leave this at the end to make sure we don't share this cache line by accident */
+ ALWAYS_ALIGN(64);
+};
+
+#endif /* _HAPROXY_GLOBAL_T_H */
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/include/haproxy/global.h b/include/haproxy/global.h
new file mode 100644
index 0000000..2e7fa6b
--- /dev/null
+++ b/include/haproxy/global.h
@@ -0,0 +1,98 @@
+/*
+ * include/haproxy/global.h
+ * Exported global variables and functions.
+ *
+ * Copyright (C) 2000-2020 Willy Tarreau - w@1wt.eu
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_GLOBAL_H
+#define _HAPROXY_GLOBAL_H
+
+#include <haproxy/api-t.h>
+#include <haproxy/global-t.h>
+
+extern char *build_features;
+extern struct global global;
+extern int pid; /* current process id */
+extern int actconn; /* # of active sessions */
+extern int listeners;
+extern int jobs; /* # of active jobs (listeners, sessions, open devices) */
+extern int unstoppable_jobs; /* # of active jobs that can't be stopped during a soft stop */
+extern int active_peers; /* # of active peers (connection attempts and successes) */
+extern int connected_peers; /* # of really connected peers */
+extern int nb_oldpids; /* contains the number of old pids found */
+extern const int zero;
+extern const int one;
+extern const struct linger nolinger;
+extern int stopping; /* non zero means stopping in progress */
+extern int killed; /* >0 means a hard-stop is triggered, >1 means hard-stop immediately */
+extern char hostname[MAX_HOSTNAME_LEN];
+extern char *localpeer;
+extern unsigned int warned; /* bitfield of a few warnings to emit just once */
+extern struct list proc_list; /* list of process in mworker mode */
+extern int master; /* 1 if in master, 0 otherwise */
+extern unsigned int rlim_fd_cur_at_boot;
+extern unsigned int rlim_fd_max_at_boot;
+extern int atexit_flag;
+extern unsigned char boot_seed[20]; // per-boot random seed (160 bits initially)
+extern THREAD_LOCAL struct buffer trash;
+
+struct proxy;
+struct server;
+int main(int argc, char **argv);
+void deinit(void);
+__attribute__((noreturn)) void deinit_and_exit(int);
+void run_poll_loop(void);
+int tell_old_pids(int sig);
+int delete_oldpid(int pid);
+void hap_register_build_opts(const char *str, int must_free);
+void hap_register_feature(const char *name);
+int split_version(const char *version, unsigned int *value);
+int compare_current_version(const char *version);
+void display_version();
+
+void mworker_accept_wrapper(int fd);
+void mworker_reload(int hardreload);
+
+/* to be used with warned and WARN_* */
+static inline int already_warned(unsigned int warning)
+{
+ if (warned & warning)
+ return 1;
+ warned |= warning;
+ return 0;
+}
+
+extern unsigned int experimental_directives_allowed;
+
+struct cfg_keyword;
+int check_kw_experimental(struct cfg_keyword *kw, const char *file, int linenum,
+ char **errmsg);
+const char **hap_get_next_build_opt(const char **curr);
+
+/* simplified way to declare static build options in a file */
+#define REGISTER_BUILD_OPTS(str) \
+ INITCALL2(STG_REGISTER, hap_register_build_opts, (str), 0)
+
+#endif /* _HAPROXY_GLOBAL_H */
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/include/haproxy/h1.h b/include/haproxy/h1.h
new file mode 100644
index 0000000..7152c6e
--- /dev/null
+++ b/include/haproxy/h1.h
@@ -0,0 +1,377 @@
+/*
+ * include/haproxy/h1.h
+ * This file contains HTTP/1 protocol definitions.
+ *
+ * Copyright (C) 2000-2020 Willy Tarreau - w@1wt.eu
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_H1_H
+#define _HAPROXY_H1_H
+
+#include <import/ist.h>
+#include <haproxy/api.h>
+#include <haproxy/buf.h>
+#include <haproxy/http.h>
+#include <haproxy/http-hdr-t.h>
+#include <haproxy/intops.h>
+
+
+/* Possible states while parsing HTTP/1 messages (request|response) */
+enum h1m_state {
+ H1_MSG_RQBEFORE = 0, // request: leading LF, before start line
+ H1_MSG_RQBEFORE_CR = 1, // request: leading CRLF, before start line
+ /* these ones define a request start line */
+ H1_MSG_RQMETH = 2, // parsing the Method
+ H1_MSG_RQMETH_SP = 3, // space(s) after the Method
+ H1_MSG_RQURI = 4, // parsing the Request URI
+ H1_MSG_RQURI_SP = 5, // space(s) after the Request URI
+ H1_MSG_RQVER = 6, // parsing the Request Version
+ H1_MSG_RQLINE_END = 7, // end of request line (CR or LF)
+
+ H1_MSG_RPBEFORE = 8, // response: leading LF, before start line
+ H1_MSG_RPBEFORE_CR = 9, // response: leading CRLF, before start line
+
+ /* these ones define a response start line */
+ H1_MSG_RPVER = 10, // parsing the Response Version
+ H1_MSG_RPVER_SP = 11, // space(s) after the Response Version
+ H1_MSG_RPCODE = 12, // response code
+ H1_MSG_RPCODE_SP = 13, // space(s) after the response code
+ H1_MSG_RPREASON = 14, // response reason
+ H1_MSG_RPLINE_END = 15, // end of response line (CR or LF)
+
+ /* common header processing */
+ H1_MSG_HDR_FIRST = 16, // waiting for first header or last CRLF (no LWS possible)
+ H1_MSG_HDR_NAME = 17, // parsing header name
+ H1_MSG_HDR_COL = 18, // parsing header colon
+ H1_MSG_HDR_L1_SP = 19, // parsing header LWS (SP|HT) before value
+ H1_MSG_HDR_L1_LF = 20, // parsing header LWS (LF) before value
+ H1_MSG_HDR_L1_LWS = 21, // checking whether it's a new header or an LWS
+ H1_MSG_HDR_VAL = 22, // parsing header value
+ H1_MSG_HDR_L2_LF = 23, // parsing header LWS (LF) inside/after value
+ H1_MSG_HDR_L2_LWS = 24, // checking whether it's a new header or an LWS
+
+ H1_MSG_LAST_LF = 25, // parsing last LF, last state for headers
+
+ /* Body processing. */
+
+ H1_MSG_CHUNK_SIZE = 26, // parsing the chunk size (RFC7230 #4.1)
+ H1_MSG_DATA = 27, // skipping data chunk / content-length data
+ H1_MSG_CHUNK_CRLF = 28, // skipping CRLF after data chunk
+ H1_MSG_TRAILERS = 29, // trailers (post-data entity headers)
+ /* we enter this state when we've received the end of the current message */
+ H1_MSG_DONE = 30, // message end received, waiting for resync or close
+ H1_MSG_TUNNEL = 31, // tunneled data after DONE
+} __attribute__((packed));
+
+
+/* HTTP/1 message flags (32 bit), for use in h1m->flags only */
+#define H1_MF_NONE 0x00000000
+#define H1_MF_CLEN 0x00000001 // content-length present
+#define H1_MF_CHNK 0x00000002 // chunk present (as last encoding), exclusive with c-l
+#define H1_MF_RESP 0x00000004 // this message is the response message
+#define H1_MF_TOLOWER 0x00000008 // turn the header names to lower case
+#define H1_MF_VER_11 0x00000010 // message indicates version 1.1 or above
+#define H1_MF_CONN_CLO 0x00000020 // message contains "connection: close"
+#define H1_MF_CONN_KAL 0x00000040 // message contains "connection: keep-alive"
+#define H1_MF_CONN_UPG 0x00000080 // message contains "connection: upgrade"
+#define H1_MF_XFER_LEN 0x00000100 // message xfer size can be determined
+#define H1_MF_XFER_ENC 0x00000200 // transfer-encoding is present
+#define H1_MF_NO_PHDR 0x00000400 // don't add pseudo-headers in the header list
+#define H1_MF_HDRS_ONLY 0x00000800 // parse headers only
+#define H1_MF_CLEAN_CONN_HDR 0x00001000 // skip close/keep-alive values of connection headers during parsing
+#define H1_MF_METH_CONNECT 0x00002000 // Set for a response to a CONNECT request
+#define H1_MF_METH_HEAD 0x00004000 // Set for a response to a HEAD request
+#define H1_MF_UPG_WEBSOCKET 0x00008000 // Set for a Websocket upgrade handshake
+#define H1_MF_TE_CHUNKED 0x00010000 // T-E "chunked"
+#define H1_MF_TE_OTHER 0x00020000 // T-E other than supported ones found (only "chunked" is supported for now)
+
+/* Mask to use to reset H1M flags when we restart headers parsing.
+ *
+ * WARNING: Don't forget to update it if a new flag must be preserved when
+ * headers parsing is restarted.
+ */
+#define H1_MF_RESTART_MASK (H1_MF_RESP|H1_MF_TOLOWER|H1_MF_NO_PHDR|H1_MF_HDRS_ONLY| \
+ H1_MF_CLEAN_CONN_HDR|H1_MF_METH_CONNECT|H1_MF_METH_HEAD)
+
+/* Note: for a connection to be persistent, we need this for the request :
+ * - one of CLEN or CHNK
+ * - version 1.0 and KAL and not CLO
+ * - or version 1.1 and not CLO
+ * For the response it's the same except that UPG must not appear either.
+ * So in short, for a request it's (CLEN|CHNK) > 0 && !CLO && (VER_11 || KAL)
+ * and for a response it's (CLEN|CHNK) > 0 && !(CLO|UPG) && (VER_11 || KAL)
+ */
+
+
+/* basic HTTP/1 message state for use in parsers. The err_pos field is special,
+ * it is pre-set to a negative value (-1 or -2), and once non-negative it contains
+ * the relative position in the message of the first parse error. -2 is used to tell
+ * the parser that we want to block the invalid message. -1 is used to only perform
+ * a silent capture.
+ */
+struct h1m {
+ enum h1m_state state; // H1 message state (H1_MSG_*)
+ /* 24 bits available here */
+ uint32_t flags; // H1 message flags (H1_MF_*)
+ uint64_t curr_len; // content-length or last chunk length
+ uint64_t body_len; // total known size of the body length
+ uint32_t next; // next byte to parse, relative to buffer's head
+ int err_pos; // position in the byte stream of the first error (H1 or H2)
+ int err_state; // state where the first error was met (H1 or H2)
+};
+
+/* basic H1 start line, describes either the request and the response */
+union h1_sl { /* useful start line pointers, relative to ->sol */
+ struct {
+ struct ist m; /* METHOD */
+ struct ist u; /* URI */
+ struct ist v; /* VERSION */
+ enum http_meth_t meth; /* method */
+ } rq; /* request line : field, length */
+ struct {
+ struct ist v; /* VERSION */
+ struct ist c; /* CODE */
+ struct ist r; /* REASON */
+ uint16_t status; /* status code */
+ } st; /* status line : field, length */
+};
+
+int h1_headers_to_hdr_list(char *start, const char *stop,
+ struct http_hdr *hdr, unsigned int hdr_num,
+ struct h1m *h1m, union h1_sl *slp);
+int h1_measure_trailers(const struct buffer *buf, unsigned int ofs, unsigned int max);
+
+int h1_parse_cont_len_header(struct h1m *h1m, struct ist *value);
+int h1_parse_xfer_enc_header(struct h1m *h1m, struct ist value);
+void h1_parse_connection_header(struct h1m *h1m, struct ist *value);
+void h1_parse_upgrade_header(struct h1m *h1m, struct ist value);
+
+void h1_generate_random_ws_input_key(char key_out[25]);
+void h1_calculate_ws_output_key(const char *key, char *result);
+
+/* for debugging, reports the HTTP/1 message state name */
+static inline const char *h1m_state_str(enum h1m_state msg_state)
+{
+ switch (msg_state) {
+ case H1_MSG_RQBEFORE: return "MSG_RQBEFORE";
+ case H1_MSG_RQBEFORE_CR: return "MSG_RQBEFORE_CR";
+ case H1_MSG_RQMETH: return "MSG_RQMETH";
+ case H1_MSG_RQMETH_SP: return "MSG_RQMETH_SP";
+ case H1_MSG_RQURI: return "MSG_RQURI";
+ case H1_MSG_RQURI_SP: return "MSG_RQURI_SP";
+ case H1_MSG_RQVER: return "MSG_RQVER";
+ case H1_MSG_RQLINE_END: return "MSG_RQLINE_END";
+ case H1_MSG_RPBEFORE: return "MSG_RPBEFORE";
+ case H1_MSG_RPBEFORE_CR: return "MSG_RPBEFORE_CR";
+ case H1_MSG_RPVER: return "MSG_RPVER";
+ case H1_MSG_RPVER_SP: return "MSG_RPVER_SP";
+ case H1_MSG_RPCODE: return "MSG_RPCODE";
+ case H1_MSG_RPCODE_SP: return "MSG_RPCODE_SP";
+ case H1_MSG_RPREASON: return "MSG_RPREASON";
+ case H1_MSG_RPLINE_END: return "MSG_RPLINE_END";
+ case H1_MSG_HDR_FIRST: return "MSG_HDR_FIRST";
+ case H1_MSG_HDR_NAME: return "MSG_HDR_NAME";
+ case H1_MSG_HDR_COL: return "MSG_HDR_COL";
+ case H1_MSG_HDR_L1_SP: return "MSG_HDR_L1_SP";
+ case H1_MSG_HDR_L1_LF: return "MSG_HDR_L1_LF";
+ case H1_MSG_HDR_L1_LWS: return "MSG_HDR_L1_LWS";
+ case H1_MSG_HDR_VAL: return "MSG_HDR_VAL";
+ case H1_MSG_HDR_L2_LF: return "MSG_HDR_L2_LF";
+ case H1_MSG_HDR_L2_LWS: return "MSG_HDR_L2_LWS";
+ case H1_MSG_LAST_LF: return "MSG_LAST_LF";
+ case H1_MSG_CHUNK_SIZE: return "MSG_CHUNK_SIZE";
+ case H1_MSG_DATA: return "MSG_DATA";
+ case H1_MSG_CHUNK_CRLF: return "MSG_CHUNK_CRLF";
+ case H1_MSG_TRAILERS: return "MSG_TRAILERS";
+ case H1_MSG_DONE: return "MSG_DONE";
+ case H1_MSG_TUNNEL: return "MSG_TUNNEL";
+ default: return "MSG_??????";
+ }
+}
+
+/* This function may be called only in HTTP_MSG_CHUNK_CRLF. It reads the CRLF
+ * at the end of a chunk. The caller should adjust msg->next
+ * in order to include this part into the next forwarding phase. Note that the
+ * caller must ensure that head+start points to the first byte to parse. It
+ * returns the number of bytes parsed on success, so the caller can set msg_state
+ * to HTTP_MSG_CHUNK_SIZE. If not enough data are available, the function does not
+ * change anything and returns zero. Otherwise it returns a negative value
+ * indicating the error position relative to <stop>. Note: this function is
+ * designed to parse wrapped CRLF at the end of the buffer.
+ */
+static inline int h1_skip_chunk_crlf(const struct buffer *buf, int start, int stop)
+{
+ const char *ptr = b_peek(buf, start);
+ int bytes = 1;
+
+ if (stop <= start)
+ return 0;
+
+ if (unlikely(*ptr != '\r')) // negative position to stop
+ return ptr - __b_peek(buf, stop);
+
+ /* NB: we'll check data availability at the end. It's not a
+ * problem because whatever we match first will be checked
+ * against the correct length.
+ */
+ bytes++;
+ ptr++;
+ if (ptr >= b_wrap(buf))
+ ptr = b_orig(buf);
+
+ if (bytes > stop - start)
+ return 0;
+
+ if (*ptr != '\n') // negative position to stop
+ return ptr - __b_peek(buf, stop);
+
+ return bytes;
+}
+
+/* Parse the chunk size start at buf + start and stops before buf + stop. The
+ * positions are relative to the buffer's head.
+ * It returns the chunk size in <res> and the amount of bytes read this way :
+ * < 0 : error at this position relative to <stop>
+ * = 0 : not enough bytes to read a complete chunk size
+ * > 0 : number of bytes successfully read that the caller can skip
+ * On success, the caller should adjust its msg->next to point to the first
+ * byte of data after the chunk size, so that we know we can forward exactly
+ * msg->next bytes, and msg->sol to contain the exact number of bytes forming
+ * the chunk size. That way it is always possible to differentiate between the
+ * start of the body and the start of the data. Note: this function is designed
+ * to parse wrapped CRLF at the end of the buffer.
+ */
+static inline int h1_parse_chunk_size(const struct buffer *buf, int start, int stop, uint64_t *res)
+{
+ const char *ptr = b_peek(buf, start);
+ const char *ptr_old = ptr;
+ const char *end = b_wrap(buf);
+ uint64_t chunk = 0;
+
+ stop -= start; // bytes left
+ start = stop; // bytes to transfer
+
+ /* The chunk size is in the following form, though we are only
+ * interested in the size and CRLF :
+ * 1*HEXDIGIT *WSP *[ ';' extensions ] CRLF
+ */
+ while (1) {
+ int c;
+ if (!stop)
+ return 0;
+ c = hex2i(*ptr);
+ if (c < 0) /* not a hex digit anymore */
+ break;
+ if (unlikely(++ptr >= end))
+ ptr = b_orig(buf);
+ chunk = (chunk << 4) + c;
+ if (unlikely(chunk & 0xF0000000000000ULL)) {
+ /* Don't get more than 13 hexa-digit (2^52 - 1) to never fed possibly
+ * bogus values from languages that use floats for their integers
+ */
+ goto error;
+ }
+ stop--;
+ }
+
+ /* empty size not allowed */
+ if (unlikely(ptr == ptr_old))
+ goto error;
+
+ while (HTTP_IS_SPHT(*ptr)) {
+ if (++ptr >= end)
+ ptr = b_orig(buf);
+ if (--stop == 0)
+ return 0;
+ }
+
+ /* Up to there, we know that at least one byte is present at *ptr. Check
+ * for the end of chunk size.
+ */
+ while (1) {
+ if (likely(*ptr == '\r')) {
+ /* we now have a CR, it must be followed by a LF */
+ if (++ptr >= end)
+ ptr = b_orig(buf);
+ if (--stop == 0)
+ return 0;
+
+ if (*ptr != '\n')
+ goto error;
+ if (++ptr >= end)
+ ptr = b_orig(buf);
+ --stop;
+ /* done */
+ break;
+ }
+ else if (likely(*ptr == ';')) {
+ /* chunk extension, ends at next CRLF */
+ if (++ptr >= end)
+ ptr = b_orig(buf);
+ if (--stop == 0)
+ return 0;
+
+ while (!HTTP_IS_CRLF(*ptr)) {
+ if (++ptr >= end)
+ ptr = b_orig(buf);
+ if (--stop == 0)
+ return 0;
+ }
+ /* we have a CRLF now, loop above */
+ continue;
+ }
+ else
+ goto error;
+ }
+
+ /* OK we found our CRLF and now <ptr> points to the next byte, which may
+ * or may not be present. Let's return the number of bytes parsed.
+ */
+ *res = chunk;
+ return start - stop;
+ error:
+ *res = 0; // just to stop gcc's -Wuninitialized warning :-(
+ return -stop;
+}
+
+/* initializes an H1 message for a request */
+static inline struct h1m *h1m_init_req(struct h1m *h1m)
+{
+ h1m->state = H1_MSG_RQBEFORE;
+ h1m->next = 0;
+ h1m->flags = H1_MF_NONE;
+ h1m->curr_len = 0;
+ h1m->body_len = 0;
+ h1m->err_pos = -2;
+ h1m->err_state = 0;
+ return h1m;
+}
+
+/* initializes an H1 message for a response */
+static inline struct h1m *h1m_init_res(struct h1m *h1m)
+{
+ h1m->state = H1_MSG_RPBEFORE;
+ h1m->next = 0;
+ h1m->flags = H1_MF_RESP;
+ h1m->curr_len = 0;
+ h1m->body_len = 0;
+ h1m->err_pos = -2;
+ h1m->err_state = 0;
+ return h1m;
+}
+
+#endif /* _HAPROXY_H1_H */
diff --git a/include/haproxy/h1_htx.h b/include/haproxy/h1_htx.h
new file mode 100644
index 0000000..61b96e0
--- /dev/null
+++ b/include/haproxy/h1_htx.h
@@ -0,0 +1,76 @@
+/*
+ * include/haproxy/h1_htx.h
+ * This file defines function prototypes for H1 manipulation using the
+ * internal representation.
+ *
+ * Copyright (C) 2019 HAProxy Technologies, Christopher Faulet <cfaulet@haproxy.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_H1_HTX_H
+#define _HAPROXY_H1_HTX_H
+
+#include <import/ist.h>
+#include <haproxy/api-t.h>
+#include <haproxy/buf-t.h>
+#include <haproxy/h1.h>
+#include <haproxy/htx.h>
+
+int h1_parse_msg_hdrs(struct h1m *h1m, union h1_sl *h1sl, struct htx *dsthtx,
+ struct buffer *srcbuf, size_t ofs, size_t max);
+size_t h1_parse_msg_data(struct h1m *h1m, struct htx **dsthtx,
+ struct buffer *srcbuf, size_t ofs, size_t max,
+ struct buffer *htxbuf);
+int h1_parse_msg_tlrs(struct h1m *h1m, struct htx *dsthtx,
+ struct buffer *srcbuf, size_t ofs, size_t max);
+
+/* Returns the URI of an HTX message in the most common format for a H1 peer. It
+ * is the path part of an absolute URI when the URI was normalized, ortherwise
+ * it is the whole URI, as received. Concretely, it is only a special case for
+ * URIs received from H2 clients, to be able to send a relative path the H1
+ * servers.
+ */
+static inline struct ist h1_get_uri(const struct htx_sl *sl)
+{
+ struct ist uri;
+
+ uri = htx_sl_req_uri(sl);
+ if (sl->flags & HTX_SL_F_NORMALIZED_URI) {
+ struct http_uri_parser parser = http_uri_parser_init(uri);
+ uri = http_parse_path(&parser);
+ if (unlikely(!uri.len)) {
+ if (sl->info.req.meth == HTTP_METH_OPTIONS)
+ uri = ist("*");
+ else
+ uri = ist("/");
+ }
+ }
+ return uri;
+}
+
+int h1_format_htx_reqline(const struct htx_sl *sl, struct buffer *chk);
+int h1_format_htx_stline(const struct htx_sl *sl, struct buffer *chk);
+int h1_format_htx_hdr(const struct ist n, const struct ist v, struct buffer *chk);
+int h1_format_htx_data(const struct ist data, struct buffer *chk, int chunked);
+
+#endif /* _HAPROXY_H1_HTX_H */
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/include/haproxy/h2.h b/include/haproxy/h2.h
new file mode 100644
index 0000000..4082b38
--- /dev/null
+++ b/include/haproxy/h2.h
@@ -0,0 +1,351 @@
+/*
+ * include/haproxy/h2.h
+ * This file contains types and macros used for the HTTP/2 protocol
+ *
+ * Copyright (C) 2000-2017 Willy Tarreau - w@1wt.eu
+ * Copyright (C) 2017 HAProxy Technologies
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _HAPROXY_H2_H
+#define _HAPROXY_H2_H
+
+#include <import/ist.h>
+#include <haproxy/api.h>
+#include <haproxy/http-hdr-t.h>
+#include <haproxy/htx-t.h>
+
+/* indexes of most important pseudo headers can be simplified to an almost
+ * linear array by dividing the index by 2 for all values from 1 to 9, and
+ * caping to 4 for values up to 14 ; thus it fits in a single 24-bit array
+ * shifted by 3 times the index value/2, or a 32-bit array shifted by 4x.
+ * Don't change these values, they are assumed by hpack_idx_to_phdr(). There
+ * is an entry for the Host header field which is not a pseudo-header but
+ * needs to be tracked as we should only use :authority if it's absent.
+ */
+enum {
+ H2_PHDR_IDX_NONE = 0,
+ H2_PHDR_IDX_AUTH = 1, /* :authority = 1 */
+ H2_PHDR_IDX_METH = 2, /* :method = 2..3 */
+ H2_PHDR_IDX_PATH = 3, /* :path = 4..5 */
+ H2_PHDR_IDX_SCHM = 4, /* :scheme = 6..7 */
+ H2_PHDR_IDX_STAT = 5, /* :status = 8..14 */
+ H2_PHDR_IDX_HOST = 6, /* Host, never returned, just a place-holder */
+ H2_PHDR_IDX_PROT = 7, /* :protocol from rfc 8441 Extended Connect */
+ H2_PHDR_NUM_ENTRIES /* must be last */
+};
+
+/* bit fields indicating the pseudo-headers found. It also covers the HOST
+ * header field as well as any non-pseudo-header field (NONE).
+ */
+enum {
+ H2_PHDR_FND_NONE = 1 << H2_PHDR_IDX_NONE, /* found a regular header */
+ H2_PHDR_FND_AUTH = 1 << H2_PHDR_IDX_AUTH,
+ H2_PHDR_FND_METH = 1 << H2_PHDR_IDX_METH,
+ H2_PHDR_FND_PATH = 1 << H2_PHDR_IDX_PATH,
+ H2_PHDR_FND_SCHM = 1 << H2_PHDR_IDX_SCHM,
+ H2_PHDR_FND_STAT = 1 << H2_PHDR_IDX_STAT,
+ H2_PHDR_FND_HOST = 1 << H2_PHDR_IDX_HOST,
+ H2_PHDR_FND_PROT = 1 << H2_PHDR_IDX_PROT,
+};
+
+/* frame types, from the standard */
+enum h2_ft {
+ H2_FT_DATA = 0x00, // RFC7540 #6.1
+ H2_FT_HEADERS = 0x01, // RFC7540 #6.2
+ H2_FT_PRIORITY = 0x02, // RFC7540 #6.3
+ H2_FT_RST_STREAM = 0x03, // RFC7540 #6.4
+ H2_FT_SETTINGS = 0x04, // RFC7540 #6.5
+ H2_FT_PUSH_PROMISE = 0x05, // RFC7540 #6.6
+ H2_FT_PING = 0x06, // RFC7540 #6.7
+ H2_FT_GOAWAY = 0x07, // RFC7540 #6.8
+ H2_FT_WINDOW_UPDATE = 0x08, // RFC7540 #6.9
+ H2_FT_CONTINUATION = 0x09, // RFC7540 #6.10
+ H2_FT_ENTRIES /* must be last */
+} __attribute__((packed));
+
+/* frame types, turned to bits or bit fields */
+enum {
+ /* one bit per frame type */
+ H2_FT_DATA_BIT = 1U << H2_FT_DATA,
+ H2_FT_HEADERS_BIT = 1U << H2_FT_HEADERS,
+ H2_FT_PRIORITY_BIT = 1U << H2_FT_PRIORITY,
+ H2_FT_RST_STREAM_BIT = 1U << H2_FT_RST_STREAM,
+ H2_FT_SETTINGS_BIT = 1U << H2_FT_SETTINGS,
+ H2_FT_PUSH_PROMISE_BIT = 1U << H2_FT_PUSH_PROMISE,
+ H2_FT_PING_BIT = 1U << H2_FT_PING,
+ H2_FT_GOAWAY_BIT = 1U << H2_FT_GOAWAY,
+ H2_FT_WINDOW_UPDATE_BIT = 1U << H2_FT_WINDOW_UPDATE,
+ H2_FT_CONTINUATION_BIT = 1U << H2_FT_CONTINUATION,
+ /* padded frames */
+ H2_FT_PADDED_MASK = H2_FT_DATA_BIT | H2_FT_HEADERS_BIT | H2_FT_PUSH_PROMISE_BIT,
+ /* flow controlled frames */
+ H2_FT_FC_MASK = H2_FT_DATA_BIT,
+ /* header frames */
+ H2_FT_HDR_MASK = H2_FT_HEADERS_BIT | H2_FT_PUSH_PROMISE_BIT | H2_FT_CONTINUATION_BIT,
+ /* frames allowed to arrive late on a stream */
+ H2_FT_LATE_MASK = H2_FT_WINDOW_UPDATE_BIT | H2_FT_RST_STREAM_BIT | H2_FT_PRIORITY_BIT,
+};
+
+
+/* flags defined for each frame type */
+
+// RFC7540 #6.1
+#define H2_F_DATA_END_STREAM 0x01
+#define H2_F_DATA_PADDED 0x08
+
+// RFC7540 #6.2
+#define H2_F_HEADERS_END_STREAM 0x01
+#define H2_F_HEADERS_END_HEADERS 0x04
+#define H2_F_HEADERS_PADDED 0x08
+#define H2_F_HEADERS_PRIORITY 0x20
+
+// RFC7540 #6.3 : PRIORITY defines no flags
+// RFC7540 #6.4 : RST_STREAM defines no flags
+
+// RFC7540 #6.5
+#define H2_F_SETTINGS_ACK 0x01
+
+// RFC7540 #6.6
+#define H2_F_PUSH_PROMISE_END_HEADERS 0x04
+#define H2_F_PUSH_PROMISE_PADDED 0x08
+
+// RFC7540 #6.7
+#define H2_F_PING_ACK 0x01
+
+// RFC7540 #6.8 : GOAWAY defines no flags
+// RFC7540 #6.9 : WINDOW_UPDATE defines no flags
+
+// PADDED is the exact same among DATA, HEADERS and PUSH_PROMISE (8)
+#define H2_F_PADDED 0x08
+
+/* HTTP/2 error codes - RFC7540 #7 */
+enum h2_err {
+ H2_ERR_NO_ERROR = 0x0,
+ H2_ERR_PROTOCOL_ERROR = 0x1,
+ H2_ERR_INTERNAL_ERROR = 0x2,
+ H2_ERR_FLOW_CONTROL_ERROR = 0x3,
+ H2_ERR_SETTINGS_TIMEOUT = 0x4,
+ H2_ERR_STREAM_CLOSED = 0x5,
+ H2_ERR_FRAME_SIZE_ERROR = 0x6,
+ H2_ERR_REFUSED_STREAM = 0x7,
+ H2_ERR_CANCEL = 0x8,
+ H2_ERR_COMPRESSION_ERROR = 0x9,
+ H2_ERR_CONNECT_ERROR = 0xa,
+ H2_ERR_ENHANCE_YOUR_CALM = 0xb,
+ H2_ERR_INADEQUATE_SECURITY = 0xc,
+ H2_ERR_HTTP_1_1_REQUIRED = 0xd,
+} __attribute__((packed));
+
+// RFC7540 #11.3 : Settings Registry
+#define H2_SETTINGS_HEADER_TABLE_SIZE 0x0001
+#define H2_SETTINGS_ENABLE_PUSH 0x0002
+#define H2_SETTINGS_MAX_CONCURRENT_STREAMS 0x0003
+#define H2_SETTINGS_INITIAL_WINDOW_SIZE 0x0004
+#define H2_SETTINGS_MAX_FRAME_SIZE 0x0005
+#define H2_SETTINGS_MAX_HEADER_LIST_SIZE 0x0006
+#define H2_SETTINGS_ENABLE_CONNECT_PROTOCOL 0x0008
+
+
+/* some protocol constants */
+
+// PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n
+#define H2_CONN_PREFACE \
+ "\x50\x52\x49\x20\x2a\x20\x48\x54" \
+ "\x54\x50\x2f\x32\x2e\x30\x0d\x0a" \
+ "\x0d\x0a\x53\x4d\x0d\x0a\x0d\x0a"
+
+
+/* some flags related to protocol parsing */
+#define H2_MSGF_BODY 0x0001 // a body is present
+#define H2_MSGF_BODY_CL 0x0002 // content-length is present
+#define H2_MSGF_BODY_TUNNEL 0x0004 // a tunnel is in use (CONNECT)
+#define H2_MSGF_RSP_1XX 0x0010 // a 1xx ( != 101) HEADERS frame was received
+#define H2_MSGF_BODYLESS_RSP 0x0020 // response message is known to have no body
+ // (response to HEAD request or 204/304 response)
+#define H2_MSGF_EXT_CONNECT 0x0040 // Extended CONNECT method from rfc 8441
+
+#define H2_MAX_STREAM_ID ((1U << 31) - 1)
+#define H2_MAX_FRAME_LEN ((1U << 24) - 1)
+#define H2_DIR_REQ 1
+#define H2_DIR_RES 2
+#define H2_DIR_BOTH 3
+
+/* constraints imposed by the protocol on each frame type, in terms of stream
+ * ID values, frame sizes, and direction so that most connection-level checks
+ * can be centralized regardless of the frame's acceptance.
+ */
+struct h2_frame_definition {
+ int32_t dir; /* 0=none, 1=request, 2=response, 3=both */
+ int32_t min_id; /* minimum allowed stream ID */
+ int32_t max_id; /* maximum allowed stream ID */
+ int32_t min_len; /* minimum frame length */
+ int32_t max_len; /* maximum frame length */
+};
+
+extern struct h2_frame_definition h2_frame_definition[H2_FT_ENTRIES];
+
+/* various protocol processing functions */
+
+int h2_parse_cont_len_header(unsigned int *msgf, struct ist *value, unsigned long long *body_len);
+int h2_make_htx_request(struct http_hdr *list, struct htx *htx, unsigned int *msgf, unsigned long long *body_len, int relaxed);
+int h2_make_htx_response(struct http_hdr *list, struct htx *htx, unsigned int *msgf, unsigned long long *body_len, char *upgrade_protocol);
+int h2_make_htx_trailers(struct http_hdr *list, struct htx *htx);
+
+/*
+ * Some helpful debugging functions.
+ */
+
+/* returns a bit corresponding to the frame type */
+static inline unsigned int h2_ft_bit(enum h2_ft ft)
+{
+ if (ft >= H2_FT_ENTRIES)
+ return 0;
+ return 1U << ft;
+}
+
+/* returns the frame type as a string */
+static inline const char *h2_ft_str(int type)
+{
+ switch (type) {
+ case H2_FT_DATA : return "DATA";
+ case H2_FT_HEADERS : return "HEADERS";
+ case H2_FT_PRIORITY : return "PRIORITY";
+ case H2_FT_RST_STREAM : return "RST_STREAM";
+ case H2_FT_SETTINGS : return "SETTINGS";
+ case H2_FT_PUSH_PROMISE : return "PUSH_PROMISE";
+ case H2_FT_PING : return "PING";
+ case H2_FT_GOAWAY : return "GOAWAY";
+ case H2_FT_WINDOW_UPDATE : return "WINDOW_UPDATE";
+ default : return "_UNKNOWN_";
+ }
+}
+
+/* returns the error code as a string */
+static inline const char *h2_err_str(enum h2_err err)
+{
+ switch (err) {
+ case H2_ERR_NO_ERROR : return "NO_ERROR";
+ case H2_ERR_PROTOCOL_ERROR : return "PROTOCOL_ERROR";
+ case H2_ERR_INTERNAL_ERROR : return "INTERNAL_ERROR";
+ case H2_ERR_FLOW_CONTROL_ERROR : return "FLOW_CONTROL_ERROR";
+ case H2_ERR_SETTINGS_TIMEOUT : return "SETTINGS_TIMEOUT";
+ case H2_ERR_STREAM_CLOSED : return "STREAM_CLOSED";
+ case H2_ERR_FRAME_SIZE_ERROR : return "FRAME_SIZE_ERROR";
+ case H2_ERR_REFUSED_STREAM : return "REFUSED_STREAM";
+ case H2_ERR_CANCEL : return "CANCEL";
+ case H2_ERR_COMPRESSION_ERROR : return "COMPRESSION_ERROR";
+ case H2_ERR_CONNECT_ERROR : return "CONNECT_ERROR";
+ case H2_ERR_ENHANCE_YOUR_CALM : return "ENHANCE_YOUR_CALM";
+ case H2_ERR_INADEQUATE_SECURITY : return "INADEQUATE_SECURITY";
+ case H2_ERR_HTTP_1_1_REQUIRED : return "HTTP_1_1_REQUIRED";
+ default : return "_UNKNOWN_";
+ }
+}
+
+/* Returns an error code if the frame is valid protocol-wise, otherwise 0. <ft>
+ * is the frame type (H2_FT_*), <dir> is the direction (1=req, 2=res), <id> is
+ * the stream ID from the frame header, <len> is the frame length from the
+ * header. The purpose is to be able to quickly return a PROTOCOL_ERROR or
+ * FRAME_SIZE_ERROR connection error even for situations where the frame will
+ * be ignored. <mfs> must be the max frame size currently in place for the
+ * protocol.
+ */
+static inline int h2_frame_check(enum h2_ft ft, int dir, int32_t id, int32_t len, int32_t mfs)
+{
+ struct h2_frame_definition *fd;
+
+ if (ft >= H2_FT_ENTRIES)
+ return H2_ERR_NO_ERROR; // ignore unhandled frame types
+
+ fd = &h2_frame_definition[ft];
+
+ if (!(dir & fd->dir))
+ return H2_ERR_PROTOCOL_ERROR;
+
+ if (id < fd->min_id || id > fd->max_id)
+ return H2_ERR_PROTOCOL_ERROR;
+
+ if (len < fd->min_len || len > fd->max_len)
+ return H2_ERR_FRAME_SIZE_ERROR;
+
+ if (len > mfs)
+ return H2_ERR_FRAME_SIZE_ERROR;
+
+ if (ft == H2_FT_SETTINGS && (len % 6) != 0)
+ return H2_ERR_FRAME_SIZE_ERROR; // RFC7540#6.5
+
+ return H2_ERR_NO_ERROR;
+}
+
+/* returns the pseudo-header <str> corresponds to among H2_PHDR_IDX_*, 0 if not a
+ * pseudo-header, or -1 if not a valid pseudo-header.
+ */
+static inline int h2_str_to_phdr(const struct ist str)
+{
+ if (*str.ptr == ':') {
+ if (isteq(str, ist(":path"))) return H2_PHDR_IDX_PATH;
+ else if (isteq(str, ist(":method"))) return H2_PHDR_IDX_METH;
+ else if (isteq(str, ist(":scheme"))) return H2_PHDR_IDX_SCHM;
+ else if (isteq(str, ist(":status"))) return H2_PHDR_IDX_STAT;
+ else if (isteq(str, ist(":authority"))) return H2_PHDR_IDX_AUTH;
+ else if (isteq(str, ist(":protocol"))) return H2_PHDR_IDX_PROT;
+
+ /* all other names starting with ':' */
+ return -1;
+ }
+
+ /* not a pseudo header */
+ return 0;
+}
+
+/* returns the pseudo-header name <num> as an ist, or ":UNKNOWN" if unknown.
+ * Note that all strings are zero-terminated constants.
+ */
+static inline struct ist h2_phdr_to_ist(int phdr)
+{
+ switch (phdr) {
+ case H2_PHDR_IDX_NONE: return ist(":NONE");
+ case H2_PHDR_IDX_AUTH: return ist(":authority");
+ case H2_PHDR_IDX_METH: return ist(":method");
+ case H2_PHDR_IDX_PATH: return ist(":path");
+ case H2_PHDR_IDX_SCHM: return ist(":scheme");
+ case H2_PHDR_IDX_STAT: return ist(":status");
+ case H2_PHDR_IDX_HOST: return ist("Host");
+ default: return ist(":UNKNOWN");
+ }
+}
+
+/* returns the pseudo-header name <num> as a string, or ":UNKNOWN" if unknown */
+static inline const char *h2_phdr_to_str(int phdr)
+{
+ return h2_phdr_to_ist(phdr).ptr;
+}
+
+#endif /* _HAPROXY_H2_H */
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/include/haproxy/h3.h b/include/haproxy/h3.h
new file mode 100644
index 0000000..1bedf43
--- /dev/null
+++ b/include/haproxy/h3.h
@@ -0,0 +1,118 @@
+/*
+ * include/haproxy/h3.h
+ * This file contains types for H3
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_H3_T_H
+#define _HAPROXY_H3_T_H
+#ifdef USE_QUIC
+#ifndef USE_OPENSSL
+#error "Must define USE_OPENSSL"
+#endif
+
+#include <haproxy/buf-t.h>
+#include <haproxy/mux_quic-t.h>
+
+/* H3 unidirecational stream types
+ * Emitted as the first byte on the stream to differentiate it.
+ */
+#define H3_UNI_S_T_CTRL 0x00
+#define H3_UNI_S_T_PUSH 0x01
+#define H3_UNI_S_T_QPACK_ENC 0x02
+#define H3_UNI_S_T_QPACK_DEC 0x03
+/* Must be the last one */
+#define H3_UNI_S_T_MAX H3_UNI_S_T_QPACK_DEC
+
+/* Settings */
+#define H3_SETTINGS_RESERVED_0 0x00
+#define H3_SETTINGS_QPACK_MAX_TABLE_CAPACITY 0x01
+/* there is a hole here of reserved settings, matching the h2 settings */
+#define H3_SETTINGS_RESERVED_2 0x02
+#define H3_SETTINGS_RESERVED_3 0x03
+#define H3_SETTINGS_RESERVED_4 0x04
+#define H3_SETTINGS_RESERVED_5 0x05
+#define H3_SETTINGS_MAX_FIELD_SECTION_SIZE 0x06
+#define H3_SETTINGS_QPACK_BLOCKED_STREAMS 0x07
+
+/* Errors. */
+enum h3_err {
+ H3_NO_ERROR = 0x100,
+ H3_GENERAL_PROTOCOL_ERROR = 0x101,
+ H3_INTERNAL_ERROR = 0x102,
+ H3_STREAM_CREATION_ERROR = 0x103,
+ H3_CLOSED_CRITICAL_STREAM = 0x104,
+ H3_FRAME_UNEXPECTED = 0x105,
+ H3_FRAME_ERROR = 0x106,
+ H3_EXCESSIVE_LOAD = 0x107,
+ H3_ID_ERROR = 0x108,
+ H3_SETTINGS_ERROR = 0x109,
+ H3_MISSING_SETTINGS = 0x10a,
+ H3_REQUEST_REJECTED = 0x10b,
+ H3_REQUEST_CANCELLED = 0x10c,
+ H3_REQUEST_INCOMPLETE = 0x10d,
+ H3_MESSAGE_ERROR = 0x10e,
+ H3_CONNECT_ERROR = 0x10f,
+ H3_VERSION_FALLBACK = 0x110,
+
+ QPACK_DECOMPRESSION_FAILED = 0x200,
+ QPACK_ENCODER_STREAM_ERROR = 0x201,
+ QPACK_DECODER_STREAM_ERROR = 0x202,
+};
+
+/* Frame types. */
+enum h3_ft {
+ /* internal value used to mark demuxing as inactive */
+ H3_FT_UNINIT = -1,
+
+ H3_FT_DATA = 0x00,
+ H3_FT_HEADERS = 0x01,
+ /* hole */
+ H3_FT_CANCEL_PUSH = 0x03,
+ H3_FT_SETTINGS = 0x04,
+ H3_FT_PUSH_PROMISE = 0x05,
+ /* hole */
+ H3_FT_GOAWAY = 0x07,
+ /* hole */
+ H3_FT_MAX_PUSH_ID = 0x0d,
+};
+
+/* Stream types */
+enum h3s_t {
+ /* unidirectional streams */
+ H3S_T_CTRL,
+ H3S_T_PUSH,
+ H3S_T_QPACK_DEC,
+ H3S_T_QPACK_ENC,
+
+ /* bidirectional streams */
+ H3S_T_REQ,
+
+ H3S_T_UNKNOWN
+};
+
+/* State for request streams */
+enum h3s_st_req {
+ H3S_ST_REQ_BEFORE = 0, /* initial state */
+ H3S_ST_REQ_HEADERS, /* header section received */
+ H3S_ST_REQ_DATA, /* first DATA frame for content received */
+ H3S_ST_REQ_TRAILERS, /* trailer section received */
+};
+
+extern const struct qcc_app_ops h3_ops;
+
+#endif /* USE_QUIC */
+#endif /* _HAPROXY_H3_T_H */
diff --git a/include/haproxy/h3_stats-t.h b/include/haproxy/h3_stats-t.h
new file mode 100644
index 0000000..3c00f6c
--- /dev/null
+++ b/include/haproxy/h3_stats-t.h
@@ -0,0 +1,12 @@
+#ifndef _HAPROXY_H3_STATS_T_H
+#define _HAPROXY_H3_STATS_T_H
+
+#ifdef USE_QUIC
+#ifndef USE_OPENSSL
+#error "Must define USE_OPENSSL"
+#endif
+
+extern struct stats_module h3_stats_module;
+
+#endif /* USE_QUIC */
+#endif /* _HAPROXY_H3_STATS_T_H */
diff --git a/include/haproxy/h3_stats.h b/include/haproxy/h3_stats.h
new file mode 100644
index 0000000..ed7c5e7
--- /dev/null
+++ b/include/haproxy/h3_stats.h
@@ -0,0 +1,17 @@
+#ifndef _HAPROXY_H3_STATS_H
+#define _HAPROXY_H3_STATS_H
+
+#ifdef USE_QUIC
+#ifndef USE_OPENSSL
+#error "Must define USE_OPENSSL"
+#endif
+
+#include <haproxy/h3_stats-t.h>
+
+struct h3_counters;
+
+void h3_inc_err_cnt(void *ctx, int error_code);
+void h3_inc_frame_type_cnt(struct h3_counters *ctrs, int frm_type);
+
+#endif /* USE_QUIC */
+#endif /* _HAPROXY_H3_STATS_H */
diff --git a/include/haproxy/hash.h b/include/haproxy/hash.h
new file mode 100644
index 0000000..cb506c7
--- /dev/null
+++ b/include/haproxy/hash.h
@@ -0,0 +1,33 @@
+/*
+ * include/haproxy/hash.h
+ * Macros for different hashing function.
+ *
+ * Copyright (C) 2000-2020 Willy Tarreau - w@1wt.eu
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_HASH_H_
+#define _HAPROXY_HASH_H_
+
+#include <inttypes.h>
+
+unsigned int hash_djb2(const void *input, int len);
+unsigned int hash_wt6(const void *input, int len);
+unsigned int hash_sdbm(const void *input, int len);
+unsigned int hash_crc32(const void *input, int len);
+uint32_t hash_crc32c(const void *input, int len);
+
+#endif /* _HAPROXY_HASH_H_ */
diff --git a/include/haproxy/hlua-t.h b/include/haproxy/hlua-t.h
new file mode 100644
index 0000000..2672ffd
--- /dev/null
+++ b/include/haproxy/hlua-t.h
@@ -0,0 +1,243 @@
+/*
+ * include/haproxy/hlua-t.h
+ * Lua core types definitions
+ *
+ * Copyright (C) 2015-2016 Thierry Fournier <tfournier@arpalert.org>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_HLUA_T_H
+#define _HAPROXY_HLUA_T_H
+
+#ifdef USE_LUA
+
+#include <lua.h>
+#include <lauxlib.h>
+#include <stdint.h>
+
+#include <import/ebtree-t.h>
+
+#include <haproxy/proxy-t.h>
+#include <haproxy/regex-t.h>
+#include <haproxy/server-t.h>
+#include <haproxy/stick_table-t.h>
+#include <haproxy/xref-t.h>
+#include <haproxy/event_hdl-t.h>
+
+#define CLASS_CORE "Core"
+#define CLASS_TXN "TXN"
+#define CLASS_FETCHES "Fetches"
+#define CLASS_CONVERTERS "Converters"
+#define CLASS_SOCKET "Socket"
+#define CLASS_CHANNEL "Channel"
+#define CLASS_HTTP "HTTP"
+#define CLASS_HTTP_MSG "HTTPMessage"
+#define CLASS_HTTPCLIENT "HTTPClient"
+#define CLASS_MAP "Map"
+#define CLASS_APPLET_TCP "AppletTCP"
+#define CLASS_APPLET_HTTP "AppletHTTP"
+#define CLASS_PROXY "Proxy"
+#define CLASS_SERVER "Server"
+#define CLASS_LISTENER "Listener"
+#define CLASS_EVENT_SUB "EventSub"
+#define CLASS_REGEX "Regex"
+#define CLASS_STKTABLE "StickTable"
+#define CLASS_CERTCACHE "CertCache"
+#define CLASS_PROXY_LIST "ProxyList"
+#define CLASS_SERVER_LIST "ServerList"
+
+struct stream;
+
+#define HLUA_RUN 0x00000001
+#define HLUA_CTRLYIELD 0x00000002
+#define HLUA_WAKERESWR 0x00000004
+#define HLUA_WAKEREQWR 0x00000008
+#define HLUA_EXIT 0x00000010
+#define HLUA_NOYIELD 0x00000020
+
+#define HLUA_F_AS_STRING 0x01
+#define HLUA_F_MAY_USE_HTTP 0x02
+
+/* HLUA TXN flags */
+#define HLUA_TXN_NOTERM 0x00000001
+/* 0x00000002 .. 0x00000008 unused */
+
+/* The execution context (enum), bits values from 0x00000010 to
+ * 0x00000030. These flags are mutually exclusives. Only one must be set at a
+ * time.
+ */
+#define HLUA_TXN_SMP_NONE 0x00000000 /* No specific execution context */
+#define HLUA_TXN_SMP_CTX 0x00000010 /* Executed from a sample fecth context */
+#define HLUA_TXN_ACT_CTX 0x00000020 /* Executed from a action context */
+#define HLUA_TXN_FLT_CTX 0x00000030 /* Executed from a filter context */
+#define HLUA_TXN_CTX_MASK 0x00000030 /* Mask to get the execution context */
+
+
+#define HLUA_CONCAT_BLOCSZ 2048
+
+enum hlua_exec {
+ HLUA_E_OK = 0,
+ HLUA_E_AGAIN, /* LUA yield, must resume the stack execution later, when
+ the associatedtask is waked. */
+ HLUA_E_ETMOUT, /* Execution timeout */
+ HLUA_E_NOMEM, /* Out of memory error */
+ HLUA_E_YIELD, /* LUA code try to yield, and this is not allowed */
+ HLUA_E_ERRMSG, /* LUA stack execution failed with a string error message
+ in the top of stack. */
+ HLUA_E_ERR, /* LUA stack execution failed without error message. */
+};
+
+struct hlua_timer {
+ uint32_t start; /* cpu time in ms when the timer was started */
+ uint32_t burst; /* execution time for the current call in ms */
+ uint32_t cumulative; /* cumulative execution time for the coroutine in ms */
+ uint32_t max; /* max (cumulative) execution time for the coroutine in ms */
+};
+
+struct hlua {
+ lua_State *T; /* The LUA stack. */
+ int state_id; /* contains the lua state id. 0 is common state, 1 to n are per-thread states.*/
+ int Tref; /* The reference of the stack in coroutine case.
+ -1 for the main lua stack. */
+ int Mref; /* The reference of the memory context in coroutine case.
+ -1 if the memory context is not used. */
+ int nargs; /* The number of arguments in the stack at the start of execution. */
+ unsigned int flags; /* The current execution flags. */
+ int wake_time; /* The lua wants to be waked at this time, or before. (ticks) */
+ struct hlua_timer timer; /* lua multipurpose timer */
+ struct task *task; /* The task associated with the lua stack execution.
+ We must wake this task to continue the task execution */
+ struct list com; /* The list head of the signals attached to this task. */
+ struct mt_list hc_list; /* list of httpclient associated to this lua task */
+ struct ebpt_node node;
+ int gc_count; /* number of items which need a GC */
+};
+
+/* This is a part of the list containing references to functions
+ * called at the initialisation time.
+ */
+struct hlua_init_function {
+ struct list l;
+ int function_ref;
+};
+
+/* This struct contains the lua data used to bind
+ * Lua function on HAProxy hook like sample-fetches
+ * or actions.
+ */
+struct hlua_function {
+ struct list l;
+ char *name;
+ int function_ref[MAX_THREADS + 1];
+ int nargs;
+};
+
+/* This struct is used with the structs:
+ * - http_req_rule
+ * - http_res_rule
+ * - tcp_rule
+ * It contains the lua execution configuration.
+ */
+struct hlua_rule {
+ struct hlua_function *fcn;
+ char **args;
+};
+
+/* This struct contains the pointer provided on the most
+ * of internal HAProxy calls during the processing of
+ * rules, converters and sample-fetches. This struct is
+ * associated with the lua object called "TXN".
+ */
+struct hlua_txn {
+ struct stream *s;
+ struct proxy *p;
+ int dir; /* SMP_OPT_DIR_{REQ,RES} */
+ int flags;
+};
+
+/* This struct contains the applet context. */
+struct hlua_appctx {
+ struct appctx *appctx;
+ luaL_Buffer b; /* buffer used to prepare strings. */
+ struct hlua_txn htxn;
+};
+
+/* This struct is used with sample fetches and sample converters. */
+struct hlua_smp {
+ struct stream *s;
+ struct proxy *p;
+ unsigned int flags; /* LUA_F_OPT_* */
+ int dir; /* SMP_OPT_DIR_{REQ,RES} */
+};
+
+/* This struct contains data used with sleep functions. */
+struct hlua_sleep {
+ struct task *task; /* task associated with sleep. */
+ struct list com; /* list of signal to wake at the end of sleep. */
+ unsigned int wakeup_ms; /* hour to wakeup. */
+};
+
+/* This struct is used to create coprocess doing TCP or
+ * SSL I/O. It uses a fake stream.
+ */
+struct hlua_socket {
+ struct xref xref; /* cross reference with the stream used for socket I/O. */
+ luaL_Buffer b; /* buffer used to prepare strings. */
+ unsigned long tid; /* Store the thread id which creates the socket. */
+};
+
+struct hlua_concat {
+ int size;
+ int len;
+};
+
+/* This struct is used to store the httpclient */
+struct hlua_httpclient {
+ struct httpclient *hc; /* ptr to the httpclient instance */
+ size_t sent; /* payload sent */
+ luaL_Buffer b; /* buffer used to prepare strings. */
+ struct mt_list by_hlua; /* linked in the current hlua task */
+};
+
+struct hlua_proxy_list {
+ char capabilities;
+};
+
+struct hlua_proxy_list_iterator_context {
+ struct proxy *next;
+ char capabilities;
+};
+
+struct hlua_server_list {
+ struct proxy *px;
+};
+
+struct hlua_server_list_iterator_context {
+ struct server *cur;
+ struct proxy *px;
+};
+
+#else /* USE_LUA */
+/************************ For use when Lua is disabled ********************/
+
+/* Empty struct for compilation compatibility */
+struct hlua { };
+struct hlua_socket { };
+struct hlua_rule { };
+
+#endif /* USE_LUA */
+
+#endif /* _HAPROXY_HLUA_T_H */
diff --git a/include/haproxy/hlua.h b/include/haproxy/hlua.h
new file mode 100644
index 0000000..3c67cce
--- /dev/null
+++ b/include/haproxy/hlua.h
@@ -0,0 +1,81 @@
+/*
+ * include/haproxy/hlua.h
+ * Lua core management functions
+ *
+ * Copyright (C) 2015-2016 Thierry Fournier <tfournier@arpalert.org>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_HLUA_H
+#define _HAPROXY_HLUA_H
+
+#include <haproxy/hlua-t.h>
+
+#ifdef USE_LUA
+
+/* The following macros are used to set flags. */
+#define HLUA_SET_RUN(__hlua) do {(__hlua)->flags |= HLUA_RUN;} while(0)
+#define HLUA_CLR_RUN(__hlua) do {(__hlua)->flags &= ~HLUA_RUN;} while(0)
+#define HLUA_IS_RUNNING(__hlua) ((__hlua)->flags & HLUA_RUN)
+#define HLUA_SET_CTRLYIELD(__hlua) do {(__hlua)->flags |= HLUA_CTRLYIELD;} while(0)
+#define HLUA_CLR_CTRLYIELD(__hlua) do {(__hlua)->flags &= ~HLUA_CTRLYIELD;} while(0)
+#define HLUA_IS_CTRLYIELDING(__hlua) ((__hlua)->flags & HLUA_CTRLYIELD)
+#define HLUA_SET_WAKERESWR(__hlua) do {(__hlua)->flags |= HLUA_WAKERESWR;} while(0)
+#define HLUA_CLR_WAKERESWR(__hlua) do {(__hlua)->flags &= ~HLUA_WAKERESWR;} while(0)
+#define HLUA_IS_WAKERESWR(__hlua) ((__hlua)->flags & HLUA_WAKERESWR)
+#define HLUA_SET_WAKEREQWR(__hlua) do {(__hlua)->flags |= HLUA_WAKEREQWR;} while(0)
+#define HLUA_CLR_WAKEREQWR(__hlua) do {(__hlua)->flags &= ~HLUA_WAKEREQWR;} while(0)
+#define HLUA_IS_WAKEREQWR(__hlua) ((__hlua)->flags & HLUA_WAKEREQWR)
+#define HLUA_CLR_NOYIELD(__hlua) do {(__hlua)->flags &= ~HLUA_NOYIELD;} while(0)
+#define HLUA_SET_NOYIELD(__hlua) do {(__hlua)->flags |= HLUA_NOYIELD;} while(0)
+#define HLUA_CANT_YIELD(__hlua) ((__hlua)->flags & HLUA_NOYIELD)
+
+
+#define HLUA_INIT(__hlua) do { (__hlua)->T = 0; } while(0)
+
+/* Lua HAProxy integration functions. */
+const char *hlua_traceback(lua_State *L, const char* sep);
+void hlua_ctx_destroy(struct hlua *lua);
+void hlua_init();
+int hlua_post_init();
+void hlua_applet_tcp_fct(struct appctx *ctx);
+void hlua_applet_http_fct(struct appctx *ctx);
+int hlua_event_sub(lua_State *L, event_hdl_sub_list *sub_list);
+struct task *hlua_process_task(struct task *task, void *context, unsigned int state);
+const char *hlua_show_current_location(const char *pfx);
+int hlua_ref(lua_State *L);
+void hlua_pushref(lua_State *L, int ref);
+void hlua_unref(lua_State *L, int ref);
+struct hlua *hlua_gethlua(lua_State *L);
+void hlua_yieldk(lua_State *L, int nresults, lua_KContext ctx, lua_KFunction k, int timeout, unsigned int flags);
+
+#else /* USE_LUA */
+
+/************************ For use when Lua is disabled ********************/
+
+#define HLUA_IS_RUNNING(__hlua) 0
+
+#define HLUA_INIT(__hlua)
+
+/* Empty function for compilation without Lua. */
+static inline void hlua_init() { }
+static inline int hlua_post_init() { return 1; }
+static inline void hlua_ctx_destroy(struct hlua *lua) { }
+static inline const char *hlua_show_current_location(const char *pfx) { return NULL; }
+
+#endif /* USE_LUA */
+
+#endif /* _HAPROXY_HLUA_H */
diff --git a/include/haproxy/hlua_fcn.h b/include/haproxy/hlua_fcn.h
new file mode 100644
index 0000000..ff9250a
--- /dev/null
+++ b/include/haproxy/hlua_fcn.h
@@ -0,0 +1,41 @@
+/*
+ * include/haproxy/hlua_fcn.h
+ * Lua user-level management functions
+ *
+ * Copyright (C) 2015-2016 Thierry Fournier <tfournier@arpalert.org>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_HLUA_FCN_H
+#define _HAPROXY_HLUA_FCN_H
+
+#include <lua.h>
+#include <haproxy/hlua-t.h>
+
+int hlua_checkboolean(lua_State *L, int index);
+
+void hlua_class_const_int(lua_State *L, const char *name, int value);
+void hlua_class_const_str(lua_State *L, const char *name, const char *value);
+void hlua_class_function(lua_State *L, const char *name, int (*function)(lua_State *L));
+void *hlua_checkudata(lua_State *L, int ud, int class_ref);
+int hlua_register_metatable(struct lua_State *L, char *name);
+void hlua_fcn_reg_core_fcn(lua_State *L);
+int hlua_dump_object(lua_State *L);
+int hlua_fcn_new_proxy(lua_State *L, struct proxy *px);
+int hlua_fcn_new_server(lua_State *L, struct server *srv);
+int hlua_fcn_new_event_sub(lua_State *L, struct event_hdl_sub *sub);
+
+#endif /* _HAPROXY_HLUA_FCN_H */
diff --git a/include/haproxy/hpack-dec.h b/include/haproxy/hpack-dec.h
new file mode 100644
index 0000000..4fb1a36
--- /dev/null
+++ b/include/haproxy/hpack-dec.h
@@ -0,0 +1,39 @@
+/*
+ * HPACK decompressor (RFC7541)
+ *
+ * Copyright (C) 2014-2020 Willy Tarreau <willy@haproxy.org>
+ * Copyright (C) 2017 HAProxy Technologies
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _COMMON_HPACK_DEC_H
+#define _COMMON_HPACK_DEC_H
+
+#include <haproxy/api.h>
+#include <haproxy/chunk.h>
+#include <haproxy/hpack-tbl.h>
+
+int hpack_decode_frame(struct hpack_dht *dht, const uint8_t *raw, uint32_t len,
+ struct http_hdr *list, int list_size,
+ struct buffer *tmp);
+
+#endif /* _COMMON_HPACK_DEC_H */
diff --git a/include/haproxy/hpack-enc.h b/include/haproxy/hpack-enc.h
new file mode 100644
index 0000000..7511c5d
--- /dev/null
+++ b/include/haproxy/hpack-enc.h
@@ -0,0 +1,261 @@
+/*
+ * HPACK compressor (RFC7541)
+ *
+ * Copyright (C) 2014-2020 Willy Tarreau <willy@haproxy.org>
+ * Copyright (C) 2017 HAProxy Technologies
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _COMMON_HPACK_ENC_H
+#define _COMMON_HPACK_ENC_H
+
+#include <string.h>
+#include <import/ist.h>
+#include <haproxy/api.h>
+#include <haproxy/buf-t.h>
+#include <haproxy/http-t.h>
+
+int hpack_encode_header(struct buffer *out, const struct ist n,
+ const struct ist v);
+
+/* Returns the number of bytes required to encode the string length <len>. The
+ * number of usable bits is an integral multiple of 7 plus 6 for the last byte.
+ * The maximum number of bytes returned is 4 (2097279 max length). Larger values
+ * return 0.
+ */
+static inline int hpack_len_to_bytes(size_t len)
+{
+ ssize_t slen = len;
+
+ slen -= 127;
+ if (__builtin_expect(slen < 0, 1))
+ return 1;
+ if (slen < (1 << 14)) {
+ if (__builtin_expect(slen < (1 << 7), 1))
+ return 2;
+ else
+ return 3;
+ }
+ if (slen < (1 << 21))
+ return 4;
+ return 0;
+}
+
+/* Encodes <len> into <out>+<pos> and return the new position. The caller is
+ * responsible for checking for available room using hpack_len_to_bytes()
+ * first.
+ */
+static inline int hpack_encode_len(char *out, int pos, int len)
+{
+ int code = len - 127;
+
+ if (code < 0) {
+ out[pos++] = len;
+ } else {
+ out[pos++] = 127;
+ for (; code >= 128; code >>= 7)
+ out[pos++] = code | 128;
+ out[pos++] = code;
+ }
+ return pos;
+}
+
+/* Tries to encode header field index <idx> with short value <val> into the
+ * aligned buffer <out>. Returns non-zero on success, 0 on failure (buffer
+ * full). The caller is responsible for ensuring that the length of <val> is
+ * strictly lower than 127, and that <idx> is lower than 64 (static list only),
+ * and that the buffer is aligned (head==0).
+ */
+static inline int hpack_encode_short_idx(struct buffer *out, int idx, struct ist val)
+{
+ if (out->data + 2 + val.len > out->size)
+ return 0;
+
+ /* literal header field with incremental indexing */
+ out->area[out->data++] = idx | 0x40;
+ out->area[out->data++] = val.len;
+ ist2bin(&out->area[out->data], val);
+ out->data += val.len;
+ return 1;
+}
+
+/* Tries to encode header field index <idx> with long value <val> into the
+ * aligned buffer <out>. Returns non-zero on success, 0 on failure (buffer
+ * full). The caller is responsible for ensuring <idx> is lower than 64 (static
+ * list only), and that the buffer is aligned (head==0).
+ */
+static inline int hpack_encode_long_idx(struct buffer *out, int idx, struct ist val)
+{
+ int len = out->data;
+
+ if (!hpack_len_to_bytes(val.len) ||
+ 1 + len + hpack_len_to_bytes(val.len) + val.len > out->size)
+ return 0;
+
+ /* emit literal with indexing (7541#6.2.1) :
+ * [ 0 | 1 | Index (6+) ]
+ */
+ out->area[len++] = idx | 0x40;
+ len = hpack_encode_len(out->area, len, val.len);
+ memcpy(out->area + len, val.ptr, val.len);
+ len += val.len;
+
+ out->data = len;
+ return 1;
+}
+
+/* Tries to encode a :status pseudo-header with the integer status <status>
+ * into the aligned buffer <out>. Returns non-zero on success, 0 on failure
+ * (buffer full). The caller is responsible for ensuring that the status is
+ * comprised between 100 and 999 inclusive and that the buffer is aligned. It's
+ * inlined because it's easily optimizable by the compiler.
+ */
+static inline int hpack_encode_int_status(struct buffer *out, unsigned int status)
+{
+ int len = out->data;
+ int size = out->size;
+ unsigned char c = 0;
+
+ /* try to emit a single byte code */
+ len++;
+ if (__builtin_expect(len > size, 0))
+ goto fail;
+
+ c = (status <= 304) ?
+ (status <= 204) ?
+ (status == 204) ? 0x89 :
+ (status == 200) ? 0x88 :
+ 0: /* > 204 */
+ (status == 304) ? 0x8b :
+ (status == 206) ? 0x8a :
+ 0:
+ (status <= 404) ?
+ (status == 404) ? 0x8d :
+ (status == 400) ? 0x8c :
+ 0: /* > 404 */
+ (status == 500) ? 0x8e :
+ 0;
+
+ if (c)
+ goto last;
+
+ /* fall back to literal */
+ len += 4;
+ if (__builtin_expect(len > size, 0))
+ goto fail;
+
+ /* basic encoding of the status code */
+ out->area[len - 5] = 0x48; // indexed name -- name=":status" (idx 8)
+ out->area[len - 4] = 0x03; // 3 bytes status
+ out->area[len - 3] = '0' + status / 100;
+ out->area[len - 2] = '0' + status / 10 % 10;
+ c = '0' + status % 10;
+ last:
+ out->area[len - 1] = c;
+ out->data = len;
+ return 1;
+ fail:
+ return 0;
+}
+
+/* Tries to encode a :status pseudo-header with the integer status <status>
+ * also represented by <str> into the aligned buffer <out>. Returns non-zero
+ * on success or 0 on failure (buffer full). The caller is responsible for
+ * ensuring that the status is comprised between 100 and 999 inclusive, that
+ * <str> contains a valid representation of the numerical value, and that the
+ * buffer is aligned. This version is preferred when the caller already knows
+ * a string representation of the status because it avoids the computation in
+ * the uncompressed case. It's inlined because it's easily optimizable.
+ */
+static inline int hpack_encode_str_status(struct buffer *out, unsigned int status, struct ist str)
+{
+ /* don't try too hard, we already have the ASCII value for less common cases */
+ if (status == 200 || status == 304) {
+ if (out->data >= out->size)
+ return 0;
+ out->area[out->data] = (status == 304) ? 0x8b : 0x88;
+ out->data++;
+ return 1;
+ }
+ return hpack_encode_short_idx(out, 8, str); // name=":status" (idx 8)
+}
+
+/* Tries to encode a :method pseudo-header with the method in <meth>, which
+ * also exists as a string in <str>, into the aligned buffer <out>. Returns
+ * non-zero on success or 0 on failure (buffer full). The caller is responsible
+ * for ensuring that the string matches <meth>, that it's smaller than 127
+ * bytes, and that the buffer is aligned. If <meth> is unknown then using
+ * HTTP_METH_OTHER will lead to the string being encoded as a literal. It's
+ * inlined because it's easily optimizable.
+ */
+static inline int hpack_encode_method(struct buffer *out, enum http_meth_t meth, struct ist str)
+{
+ if (out->data < out->size && meth == HTTP_METH_GET)
+ out->area[out->data++] = 0x82; // indexed field : idx[02]=(":method", "GET")
+ else if (out->data < out->size && meth == HTTP_METH_POST)
+ out->area[out->data++] = 0x83; // indexed field : idx[03]=(":method", "POST")
+ else
+ return hpack_encode_short_idx(out, 2, str); // name=":method" (idx 2)
+ return 1;
+}
+
+/* Tries to encode a :scheme pseudo-header with the scheme in <scheme>, into
+ * the aligned buffer <out>. Returns non-zero on success or 0 on failure
+ * (buffer full). Only "http" and "https" are recognized and handled as indexed
+ * values, others are turned into short literals. The caller is responsible for
+ * ensuring that the scheme is smaller than 127 bytes, and that the buffer is
+ * aligned. Normally the compiler will detect constant strings in the comparison
+ * if the code remains inlined.
+ */
+static inline int hpack_encode_scheme(struct buffer *out, struct ist scheme)
+{
+ if (out->data < out->size && isteq(scheme, ist("https")))
+ out->area[out->data++] = 0x87; // indexed field : idx[07]=(":scheme", "https")
+ else if (out->data < out->size && isteq(scheme, ist("http")))
+ out->area[out->data++] = 0x86; // indexed field : idx[06]=(":scheme", "http")
+ else
+ return hpack_encode_short_idx(out, 6, scheme); // name=":scheme" (idx 6)
+ return 1;
+}
+
+/* Tries to encode a :path pseudo-header with the path in <path>, into the
+ * aligned buffer <out>. Returns non-zero on success or 0 on failure (buffer
+ * full). The well-known values "/" and "/index.html" are recognized, and other
+ * ones are handled as literals. The caller is responsible for ensuring that
+ * the buffer is aligned. Normally the compiler will detect constant strings
+ * in the comparison if the code remains inlined.
+ */
+static inline int hpack_encode_path(struct buffer *out, struct ist path)
+{
+ if (out->data < out->size && isteq(path, ist("/")))
+ out->area[out->data++] = 0x84; // indexed field : idx[04]=(":path", "/")
+ else if (out->data < out->size && isteq(path, ist("/index.html")))
+ out->area[out->data++] = 0x85; // indexed field : idx[05]=(":path", "/index.html")
+ else if (path.len < 127)
+ return hpack_encode_short_idx(out, 4, path); // name=":path" (idx 4)
+ else
+ return hpack_encode_long_idx(out, 4, path); // name=":path" (idx 4)
+ return 1;
+}
+
+
+#endif /* _COMMON_HPACK_ENC_H */
diff --git a/include/haproxy/hpack-huff.h b/include/haproxy/hpack-huff.h
new file mode 100644
index 0000000..f939103
--- /dev/null
+++ b/include/haproxy/hpack-huff.h
@@ -0,0 +1,35 @@
+/*
+ * Huffman decoding and encoding for HPACK (RFC7541)
+ *
+ * Copyright (C) 2014-2020 Willy Tarreau <willy@haproxy.org>
+ * Copyright (C) 2017 HAProxy Technologies
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _HAPROXY_HPACK_HUFF_H
+#define _HAPROXY_HPACK_HUFF_H
+
+#include <inttypes.h>
+
+int huff_enc(const char *s, char *out);
+int huff_dec(const uint8_t *huff, int hlen, char *out, int olen);
+
+#endif /* _HAPROXY_HPACK_HUFF_H */
diff --git a/include/haproxy/hpack-tbl-t.h b/include/haproxy/hpack-tbl-t.h
new file mode 100644
index 0000000..4e5d536
--- /dev/null
+++ b/include/haproxy/hpack-tbl-t.h
@@ -0,0 +1,143 @@
+/*
+ * HPACK header table management (RFC7541) - type definitions
+ *
+ * Copyright (C) 2014-2020 Willy Tarreau <willy@haproxy.org>
+ * Copyright (C) 2017 HAProxy Technologies
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _HAPROXY_HPACK_TBL_T_H
+#define _HAPROXY_HPACK_TBL_T_H
+
+#include <inttypes.h>
+
+/* Dynamic Headers Table, usable for tables up to 4GB long and values of 64kB-1.
+ * The model can be improved by using offsets relative to the table entry's end
+ * or to the end of the area, or by moving the descriptors at the end of the
+ * table and the data at the beginning. This entry is 8 bytes long, which is 1/4
+ * of the bookkeeping planned by the HPACK spec. Thus it saves 24 bytes per
+ * header field, meaning that even with a single header, 24 extra bytes can be
+ * stored (ie one such descriptor). At 29.2 average bytes per header field as
+ * found in the hpack test case, that's slightly more than 1.5kB of space saved
+ * from a 4kB block, resulting in contiguous space almost always being
+ * available.
+ *
+ * Principle: the table is stored in a contiguous array containing both the
+ * descriptors and the contents. Descriptors are stored at the beginning of the
+ * array while contents are stored starting from the end. Most of the time there
+ * is enough room left in the table to insert a new header field, thanks to the
+ * savings on the descriptor size. Thus by inserting headers from the end it's
+ * possible to maximize the delay before a collision of DTEs and data. In order
+ * to always insert from the right, we need to keep a reference to the latest
+ * inserted element and look before it. The last inserted cell's address defines
+ * the lowest known address still in use, unless the area wraps in which case
+ * the available space lies between the end of the tail and the beginning of the
+ * head.
+ *
+ * In order to detect collisions between data blocks and DTEs, we also maintain
+ * an index to the lowest element facing the DTE table, called "front". This one
+ * is updated each time an element is inserted before it. Once the buffer wraps,
+ * this element doesn't have to be updated anymore until it is released, in
+ * which case the buffer doesn't wrap anymore and the front element becomes the
+ * head again.
+ *
+ * Various heuristics are possible concerning the opportunity to wrap the
+ * entries to limit the risk of collisions with the DTE, but experimentation
+ * shows that thanks to the important savings made on the descriptors, the
+ * likeliness of finding a large amount of free space at the end of the area is
+ * much higher than the risk of colliding, so in the end the most naive
+ * algorithms work pretty fine. Typical ratios of 1 collision per 2000 requests
+ * have been observed.
+ *
+ * The defragmentation should be rare ; a study on live data shows on average
+ * 29.2 bytes used per header field. This plus the 32 bytes overhead fix an
+ * average of 66.9 header fields per 4kB table. This brings a 1606 bytes saving
+ * using the current storage description, ensuring that oldest headers are
+ * linearly removed by the sender before fragmentation occurs. This means that
+ * for all smaller header fields there will not be any requirement to defragment
+ * the area and most of the time it will even be possible to copy the old values
+ * directly within the buffer after creating a new entry. On average within the
+ * available space there will be enough room to store 1606/(29.2+8)=43 extra
+ * header fields without switching to another place.
+ *
+ * The table header fits in the table itself, it only takes 16 bytes, so in the
+ * worst case (1 single header) it's possible to store 4096 - 16 - 8 = 4072
+ * data bytes, which is larger than the 4064 the protocol requires (4096 - 32).
+ */
+
+/*
+ * Gcc before 3.0 needs [0] to declare a variable-size array
+ */
+#ifndef VAR_ARRAY
+#if defined(__GNUC__) && (__GNUC__ < 3)
+#define VAR_ARRAY 0
+#else
+#define VAR_ARRAY
+#endif
+#endif
+
+/* One dynamic table entry descriptor */
+struct hpack_dte {
+ uint32_t addr; /* storage address, relative to the dte address */
+ uint16_t nlen; /* header name length */
+ uint16_t vlen; /* header value length */
+};
+
+/* Note: the table's head plus a struct hpack_dte must be smaller than or equal to 32
+ * bytes so that a single large header can always fit. Here that's 16 bytes for
+ * the header, plus 8 bytes per slot.
+ * Note that when <used> == 0, front, head, and wrap are undefined.
+ */
+struct hpack_dht {
+ uint32_t size; /* allocated table size in bytes */
+ uint32_t total; /* sum of nlen + vlen in bytes */
+ uint16_t front; /* slot number of the first node after the idx table */
+ uint16_t wrap; /* number of allocated slots, wraps here */
+ uint16_t head; /* last inserted slot number */
+ uint16_t used; /* number of slots in use */
+ struct hpack_dte dte[VAR_ARRAY]; /* dynamic table entries */
+};
+
+/* supported hpack encoding/decoding errors */
+enum {
+ HPACK_ERR_NONE = 0, /* no error */
+ HPACK_ERR_ALLOC_FAIL, /* memory allocation error */
+ HPACK_ERR_UNKNOWN_OPCODE, /* invalid first byte */
+ HPACK_ERR_TRUNCATED, /* truncated stream */
+ HPACK_ERR_HUFFMAN, /* huffman decoding error */
+ HPACK_ERR_INVALID_PHDR, /* invalid pseudo header field name */
+ HPACK_ERR_MISPLACED_PHDR, /* pseudo header field after a regular header field */
+ HPACK_ERR_DUPLICATE_PHDR, /* duplicate pseudo header field */
+ HPACK_ERR_DHT_INSERT_FAIL, /* failed to insert into DHT */
+ HPACK_ERR_TOO_LARGE, /* decoded request/response is too large */
+ HPACK_ERR_MISSING_METHOD, /* :method is missing */
+ HPACK_ERR_MISSING_SCHEME, /* :scheme is missing */
+ HPACK_ERR_MISSING_PATH, /* :path is missing */
+ HPACK_ERR_MISSING_AUTHORITY, /* :authority is missing with CONNECT */
+ HPACK_ERR_SCHEME_NOT_ALLOWED, /* :scheme not allowed with CONNECT */
+ HPACK_ERR_PATH_NOT_ALLOWED, /* :path not allowed with CONNECT */
+ HPACK_ERR_INVALID_ARGUMENT, /* an invalid argument was passed */
+};
+
+/* static header table as in RFC7541 Appendix A. [0] unused. */
+#define HPACK_SHT_SIZE 62
+
+#endif /* _HAPROXY_HPACK_TBL_T_H */
diff --git a/include/haproxy/hpack-tbl.h b/include/haproxy/hpack-tbl.h
new file mode 100644
index 0000000..02cf7db
--- /dev/null
+++ b/include/haproxy/hpack-tbl.h
@@ -0,0 +1,184 @@
+/*
+ * HPACK header table management (RFC7541) - prototypes
+ *
+ * Copyright (C) 2014-2020 Willy Tarreau <willy@haproxy.org>
+ * Copyright (C) 2017 HAProxy Technologies
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _HAPROXY_HPACK_TBL_H
+#define _HAPROXY_HPACK_TBL_H
+
+#include <import/ist.h>
+#include <haproxy/api.h>
+#include <haproxy/hpack-tbl-t.h>
+#include <haproxy/http-hdr-t.h>
+
+/* when built outside of haproxy, HPACK_STANDALONE must be defined, and
+ * pool_head_hpack_tbl->size must be set to the DHT size.
+ */
+#ifndef HPACK_STANDALONE
+#include <haproxy/pool.h>
+#define hpack_alloc(pool) pool_alloc(pool)
+#define hpack_free(pool, ptr) pool_free(pool, ptr)
+#else
+#include <stdlib.h>
+#include <haproxy/pool-t.h>
+#define hpack_alloc(pool) malloc(pool->size)
+#define hpack_free(pool, ptr) free(ptr)
+#endif
+
+extern const struct http_hdr hpack_sht[HPACK_SHT_SIZE];
+extern struct pool_head *pool_head_hpack_tbl;
+
+int __hpack_dht_make_room(struct hpack_dht *dht, unsigned int needed);
+int hpack_dht_insert(struct hpack_dht *dht, struct ist name, struct ist value);
+
+#ifdef DEBUG_HPACK
+void hpack_dht_dump(FILE *out, const struct hpack_dht *dht);
+void hpack_dht_check_consistency(const struct hpack_dht *dht);
+#endif
+
+/* return a pointer to the entry designated by index <idx> (starting at 1) or
+ * NULL if this index is not there.
+ */
+static inline const struct hpack_dte *hpack_get_dte(const struct hpack_dht *dht, uint16_t idx)
+{
+ idx--;
+
+ if (idx >= dht->used)
+ return NULL;
+
+ if (idx <= dht->head)
+ idx = dht->head - idx;
+ else
+ idx = dht->head - idx + dht->wrap;
+
+ return &dht->dte[idx];
+}
+
+/* returns non-zero if <idx> is valid for table <dht> */
+static inline int hpack_valid_idx(const struct hpack_dht *dht, uint32_t idx)
+{
+ return idx < dht->used + HPACK_SHT_SIZE;
+}
+
+/* return a pointer to the header name for entry <dte>. */
+static inline struct ist hpack_get_name(const struct hpack_dht *dht, const struct hpack_dte *dte)
+{
+ struct ist ret = {
+ .ptr = (void *)dht + dte->addr,
+ .len = dte->nlen,
+ };
+ return ret;
+}
+
+/* return a pointer to the header value for entry <dte>. */
+static inline struct ist hpack_get_value(const struct hpack_dht *dht, const struct hpack_dte *dte)
+{
+ struct ist ret = {
+ .ptr = (void *)dht + dte->addr + dte->nlen,
+ .len = dte->vlen,
+ };
+ return ret;
+}
+
+/* takes an idx, returns the associated name */
+static inline struct ist hpack_idx_to_name(const struct hpack_dht *dht, uint32_t idx)
+{
+ const struct hpack_dte *dte;
+
+ if (idx < HPACK_SHT_SIZE)
+ return hpack_sht[idx].n;
+
+ dte = hpack_get_dte(dht, idx - HPACK_SHT_SIZE + 1);
+ if (!dte)
+ return ist("### ERR ###"); // error
+
+ return hpack_get_name(dht, dte);
+}
+
+/* takes an idx, returns the associated value */
+static inline struct ist hpack_idx_to_value(const struct hpack_dht *dht, uint32_t idx)
+{
+ const struct hpack_dte *dte;
+
+ if (idx < HPACK_SHT_SIZE)
+ return hpack_sht[idx].v;
+
+ dte = hpack_get_dte(dht, idx - HPACK_SHT_SIZE + 1);
+ if (!dte)
+ return ist("### ERR ###"); // error
+
+ return hpack_get_value(dht, dte);
+}
+
+/* returns the slot number of the oldest entry (tail). Must not be used on an
+ * empty table.
+ */
+static inline unsigned int hpack_dht_get_tail(const struct hpack_dht *dht)
+{
+ return ((dht->head + 1U < dht->used) ? dht->wrap : 0) + dht->head + 1U - dht->used;
+}
+
+/* Purges table dht until a header field of <needed> bytes fits according to
+ * the protocol (adding 32 bytes overhead). Returns non-zero on success, zero
+ * on failure (ie: table empty but still not sufficient).
+ */
+static inline int hpack_dht_make_room(struct hpack_dht *dht, unsigned int needed)
+{
+ if (dht->used * 32 + dht->total + needed + 32 <= dht->size)
+ return 1;
+ else if (!dht->used)
+ return 0;
+
+ return __hpack_dht_make_room(dht, needed);
+}
+
+/* allocate a dynamic headers table of <size> bytes and return it initialized */
+static inline void hpack_dht_init(struct hpack_dht *dht, uint32_t size)
+{
+ dht->size = size;
+ dht->total = 0;
+ dht->used = 0;
+}
+
+/* allocate a dynamic headers table from the pool and return it initialized */
+static inline struct hpack_dht *hpack_dht_alloc()
+{
+ struct hpack_dht *dht;
+
+ if (unlikely(!pool_head_hpack_tbl))
+ return NULL;
+
+ dht = hpack_alloc(pool_head_hpack_tbl);
+ if (dht)
+ hpack_dht_init(dht, pool_head_hpack_tbl->size);
+ return dht;
+}
+
+/* free a dynamic headers table */
+static inline void hpack_dht_free(struct hpack_dht *dht)
+{
+ hpack_free(pool_head_hpack_tbl, dht);
+}
+
+#endif /* _HAPROXY_HPACK_TBL_H */
diff --git a/include/haproxy/hq_interop.h b/include/haproxy/hq_interop.h
new file mode 100644
index 0000000..eb6ebf6
--- /dev/null
+++ b/include/haproxy/hq_interop.h
@@ -0,0 +1,6 @@
+#ifndef _HAPROXY_HQ_INTEROP_H_
+#define _HAPROXY_HQ_INTEROP_H_
+
+extern const struct qcc_app_ops hq_interop_ops;
+
+#endif /* _HAPROXY_HQ_INTEROP_H_ */
diff --git a/include/haproxy/http-hdr-t.h b/include/haproxy/http-hdr-t.h
new file mode 100644
index 0000000..3534f43
--- /dev/null
+++ b/include/haproxy/http-hdr-t.h
@@ -0,0 +1,41 @@
+/*
+ * include/haproxy/http-hdr-t.h
+ * HTTP header management (new model) - type definitions
+ *
+ * Copyright (C) 2014-2020 Willy Tarreau <willy@haproxy.org>
+ * Copyright (C) 2017 HAProxy Technologies
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _HAPROXY_HTTP_HDR_T_H
+#define _HAPROXY_HTTP_HDR_T_H
+
+#include <import/ist.h>
+
+/* a header field made of a name and a value. Such structure stores 4 longs so
+ * it takes 16 bytes on 32-bit systems and 32 bytes on 64-bit systems.
+ */
+struct http_hdr {
+ struct ist n; /* name */
+ struct ist v; /* value */
+};
+
+#endif /* _HAPROXY_HTTP_HDR_T_H */
diff --git a/include/haproxy/http-hdr.h b/include/haproxy/http-hdr.h
new file mode 100644
index 0000000..e9e253b
--- /dev/null
+++ b/include/haproxy/http-hdr.h
@@ -0,0 +1,60 @@
+/*
+ * include/haproxy/http-hdr.h
+ * HTTP header management (new model) - functions
+ *
+ * Copyright (C) 2014-2017 Willy Tarreau <willy@haproxy.org>
+ * Copyright (C) 2017 HAProxy Technologies
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _HAPROXY_HTTP_HDR_H
+#define _HAPROXY_HTTP_HDR_H
+
+#include <import/ist.h>
+#include <haproxy/http-hdr-t.h>
+
+/* sets an http_hdr <hdr> to name <n> and value <v>. Useful to avoid casts in
+ * immediate assignments.
+ */
+static inline void http_set_hdr(struct http_hdr *hdr, const struct ist n, const struct ist v)
+{
+ hdr->n = n;
+ hdr->v = v;
+}
+
+/* removes all occurrences of header name <n> in list <hdr> and returns the new count. The
+ * list must be terminated by the empty header.
+ */
+static inline int http_del_hdr(struct http_hdr *hdr, const struct ist n)
+{
+ int src = 0, dst = 0;
+
+ do {
+ if (!isteqi(hdr[src].n, n)) {
+ if (src != dst)
+ hdr[dst] = hdr[src];
+ dst++;
+ }
+ } while (hdr[src++].n.len);
+
+ return dst;
+}
+#endif /* _HAPROXY_HTTP_HDR_H */
diff --git a/include/haproxy/http-t.h b/include/haproxy/http-t.h
new file mode 100644
index 0000000..3165082
--- /dev/null
+++ b/include/haproxy/http-t.h
@@ -0,0 +1,184 @@
+/*
+ * include/haproxy/http-t.h
+ *
+ * Version-agnostic and implementation-agnostic HTTP protocol definitions.
+ *
+ * Copyright (C) 2000-2020 Willy Tarreau - w@1wt.eu
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_HTTP_T_H
+#define _HAPROXY_HTTP_T_H
+
+#include <inttypes.h>
+#include <import/ist.h>
+#include <haproxy/buf-t.h>
+
+/*
+ * some macros mainly used when parsing header fields.
+ * from RFC7230:
+ * CTL = <any US-ASCII control character (octets 0 - 31) and DEL (127)>
+ * SEP = one of the 17 defined separators or SP or HT
+ * LWS = CR, LF, SP or HT
+ * SPHT = SP or HT. Use this macro and not a boolean expression for best speed.
+ * CRLF = CR or LF. Use this macro and not a boolean expression for best speed.
+ * token = any CHAR except CTL or SEP. Use this macro and not a boolean expression for best speed.
+ *
+ * added for ease of use:
+ * ver_token = 'H', 'P', 'T', '/', '.', and digits.
+ */
+#define HTTP_FLG_CTL 0x01
+#define HTTP_FLG_SEP 0x02
+#define HTTP_FLG_LWS 0x04
+#define HTTP_FLG_SPHT 0x08
+#define HTTP_FLG_CRLF 0x10
+#define HTTP_FLG_TOK 0x20
+#define HTTP_FLG_VER 0x40
+#define HTTP_FLG_DIG 0x80
+
+#define HTTP_IS_CTL(x) (http_char_classes[(uint8_t)(x)] & HTTP_FLG_CTL)
+#define HTTP_IS_SEP(x) (http_char_classes[(uint8_t)(x)] & HTTP_FLG_SEP)
+#define HTTP_IS_LWS(x) (http_char_classes[(uint8_t)(x)] & HTTP_FLG_LWS)
+#define HTTP_IS_SPHT(x) (http_char_classes[(uint8_t)(x)] & HTTP_FLG_SPHT)
+#define HTTP_IS_CRLF(x) (http_char_classes[(uint8_t)(x)] & HTTP_FLG_CRLF)
+#define HTTP_IS_TOKEN(x) (http_char_classes[(uint8_t)(x)] & HTTP_FLG_TOK)
+#define HTTP_IS_VER_TOKEN(x) (http_char_classes[(uint8_t)(x)] & HTTP_FLG_VER)
+#define HTTP_IS_DIGIT(x) (http_char_classes[(uint8_t)(x)] & HTTP_FLG_DIG)
+
+/* Known HTTP methods */
+enum http_meth_t {
+ HTTP_METH_OPTIONS,
+ HTTP_METH_GET,
+ HTTP_METH_HEAD,
+ HTTP_METH_POST,
+ HTTP_METH_PUT,
+ HTTP_METH_DELETE,
+ HTTP_METH_TRACE,
+ HTTP_METH_CONNECT,
+ HTTP_METH_OTHER, /* Must be the last entry */
+} __attribute__((packed));
+
+/* Known HTTP authentication schemes */
+enum ht_auth_m {
+ HTTP_AUTH_WRONG = -1, /* missing or unknown */
+ HTTP_AUTH_UNKNOWN = 0,
+ HTTP_AUTH_BASIC,
+ HTTP_AUTH_DIGEST,
+ HTTP_AUTH_BEARER,
+} __attribute__((packed));
+
+/* All implemented HTTP status codes */
+enum {
+ HTTP_ERR_200 = 0,
+ HTTP_ERR_400,
+ HTTP_ERR_401,
+ HTTP_ERR_403,
+ HTTP_ERR_404,
+ HTTP_ERR_405,
+ HTTP_ERR_407,
+ HTTP_ERR_408,
+ HTTP_ERR_410,
+ HTTP_ERR_413,
+ HTTP_ERR_421,
+ HTTP_ERR_422,
+ HTTP_ERR_425,
+ HTTP_ERR_429,
+ HTTP_ERR_500,
+ HTTP_ERR_501,
+ HTTP_ERR_502,
+ HTTP_ERR_503,
+ HTTP_ERR_504,
+ HTTP_ERR_SIZE
+};
+
+/* Note: the strings below make use of chunks. Chunks may carry an allocated
+ * size in addition to the length. The size counts from the beginning (str)
+ * to the end. If the size is unknown, it MUST be zero, in which case the
+ * sample will automatically be duplicated when a change larger than <len> has
+ * to be performed. Thus it is safe to always set size to zero.
+ */
+struct http_meth {
+ enum http_meth_t meth;
+ struct buffer str;
+};
+
+struct http_auth_data {
+ enum ht_auth_m method; /* one of HTTP_AUTH_* */
+ /* 7 bytes unused here */
+ struct buffer method_data; /* points to the creditial part from 'Authorization:' header */
+ char *user, *pass; /* extracted username & password */
+};
+
+struct http_method_desc {
+ enum http_meth_t meth;
+ const struct ist text;
+};
+
+enum http_etag_type {
+ ETAG_INVALID = 0,
+ ETAG_STRONG,
+ ETAG_WEAK
+};
+
+/* Indicates what elements have been parsed in a HTTP URI. */
+enum http_uri_parser_state {
+ URI_PARSER_STATE_BEFORE = 0,
+ URI_PARSER_STATE_SCHEME_DONE,
+ URI_PARSER_STATE_AUTHORITY_DONE,
+ URI_PARSER_STATE_PATH_DONE,
+};
+
+/* HTTP URI format as described in rfc 7230 5.3.
+ * As the first character is used to identify the format, absolute-form and
+ * authority-form are not differentiated.
+ */
+enum http_uri_parser_format {
+ URI_PARSER_FORMAT_EMPTY,
+ URI_PARSER_FORMAT_ASTERISK,
+ URI_PARSER_FORMAT_ABSPATH,
+ URI_PARSER_FORMAT_ABSURI_OR_AUTHORITY,
+};
+
+/* Parser context for a HTTP URI. Must be initialized with http_uri_parser_init
+ * before its usage.
+ *
+ * The parser API is not idempotent. For an initialized parser instance, each
+ * URI element can be extracted only once using its related function :
+ * - http_parse_scheme
+ * - http_parse_authority
+ * - http_parse_path
+ *
+ * Also each element must be extracted in the order of its appearance in the
+ * URI according to the rfc 3986. However, it is possible to skip the parsing
+ * of elements which are of no interest.
+ *
+ * If the above rules are not respected, the parsing functions return an empty
+ * ist.
+ */
+struct http_uri_parser {
+ struct ist uri; /* HTTP URI for parsing */
+ enum http_uri_parser_state state; /* already parsed HTTP URI elements */
+ enum http_uri_parser_format format; /* rfc 7230 5.3 HTTP URI format */
+};
+
+#endif /* _HAPROXY_HTTP_T_H */
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/include/haproxy/http.h b/include/haproxy/http.h
new file mode 100644
index 0000000..2992640
--- /dev/null
+++ b/include/haproxy/http.h
@@ -0,0 +1,222 @@
+/*
+ * include/haproxy/http.h
+ *
+ * Functions for version-agnostic and implementation-agnostic HTTP protocol.
+ *
+ * Copyright (C) 2000-2020 Willy Tarreau - w@1wt.eu
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_HTTP_H
+#define _HAPROXY_HTTP_H
+
+#include <string.h>
+#include <import/ist.h>
+#include <haproxy/api.h>
+#include <haproxy/http-t.h>
+
+extern const int http_err_codes[HTTP_ERR_SIZE];
+extern const char *http_err_msgs[HTTP_ERR_SIZE];
+extern const struct ist http_known_methods[HTTP_METH_OTHER];
+extern const uint8_t http_char_classes[256];
+
+enum http_meth_t find_http_meth(const char *str, const int len);
+int http_get_status_idx(unsigned int status);
+const char *http_get_reason(unsigned int status);
+struct ist http_get_host_port(const struct ist host);
+int http_is_default_port(const struct ist schm, const struct ist port);
+int http_validate_scheme(const struct ist schm);
+struct ist http_parse_scheme(struct http_uri_parser *parser);
+struct ist http_parse_authority(struct http_uri_parser *parser, int no_userinfo);
+struct ist http_parse_path(struct http_uri_parser *parser);
+int http_parse_cont_len_header(struct ist *value, unsigned long long *body_len,
+ int not_first);
+int http_header_match2(const char *hdr, const char *end,
+ const char *name, int len);
+char *http_find_hdr_value_end(char *s, const char *e);
+char *http_find_cookie_value_end(char *s, const char *e);
+char *http_extract_cookie_value(char *hdr, const char *hdr_end,
+ char *cookie_name, size_t cookie_name_l,
+ int list, char **value, size_t *value_l);
+char *http_extract_next_cookie_name(char *hdr_beg, char *hdr_end, int is_req,
+ char **ptr, size_t *len);
+int http_parse_qvalue(const char *qvalue, const char **end);
+const char *http_find_url_param_pos(const char **chunks,
+ const char* url_param_name,
+ size_t url_param_name_l, char delim, char insensitive);
+int http_find_next_url_param(const char **chunks,
+ const char* url_param_name, size_t url_param_name_l,
+ const char **vstart, const char **vend, char delim, char insensitive);
+
+int http_parse_header(const struct ist hdr, struct ist *name, struct ist *value);
+int http_parse_stline(const struct ist line, struct ist *p1, struct ist *p2, struct ist *p3);
+int http_parse_status_val(const struct ist value, struct ist *status, struct ist *reason);
+
+int http_compare_etags(struct ist etag1, struct ist etag2);
+
+struct ist http_trim_leading_spht(struct ist value);
+struct ist http_trim_trailing_spht(struct ist value);
+
+/*
+ * Given a path string and its length, find the position of beginning of the
+ * query string. Returns NULL if no query string is found in the path.
+ *
+ * Example: if path = "/foo/bar/fubar?yo=mama;ye=daddy", and n = 22:
+ *
+ * find_query_string(path, n, '?') points to "yo=mama;ye=daddy" string.
+ */
+static inline char *http_find_param_list(char *path, size_t path_l, char delim)
+{
+ char *p;
+
+ p = memchr(path, delim, path_l);
+ return p ? p + 1 : NULL;
+}
+
+static inline int http_is_param_delimiter(char c, char delim)
+{
+ return c == '&' || c == ';' || c == delim;
+}
+
+/* Match language range with language tag. RFC2616 14.4:
+ *
+ * A language-range matches a language-tag if it exactly equals
+ * the tag, or if it exactly equals a prefix of the tag such
+ * that the first tag character following the prefix is "-".
+ *
+ * Return 1 if the strings match, else return 0.
+ */
+static inline int http_language_range_match(const char *range, int range_len,
+ const char *tag, int tag_len)
+{
+ const char *end = range + range_len;
+ const char *tend = tag + tag_len;
+
+ while (range < end) {
+ if (*range == '-' && tag == tend)
+ return 1;
+ if (*range != *tag || tag == tend)
+ return 0;
+ range++;
+ tag++;
+ }
+ /* Return true only if the last char of the tag is matched. */
+ return tag == tend;
+}
+
+static inline enum http_etag_type http_get_etag_type(const struct ist etag)
+{
+ /* An ETag must be at least 2 characters. */
+ if (etag.len < 2)
+ return ETAG_INVALID;
+
+ /* The last character must be a `"`. */
+ if (etag.ptr[etag.len - 1] != '"')
+ return ETAG_INVALID;
+
+ /* If the ETag starts with a `"` then it is a strong ETag. */
+ if (etag.ptr[0] == '"')
+ return ETAG_STRONG;
+
+ /* If the ETag starts with `W/"` then it is a weak ETag. */
+ if (istnmatch(etag, ist("W/\""), 3))
+ return ETAG_WEAK;
+
+ return ETAG_INVALID;
+}
+
+/* Initialize a HTTP URI parser to use it with http URI parsing functions. The
+ * URI format is detected according to its first character.
+ */
+static inline struct http_uri_parser http_uri_parser_init(const struct ist uri)
+{
+ struct http_uri_parser parser = {
+ .uri = uri,
+ .state = URI_PARSER_STATE_BEFORE,
+ };
+
+ /* RFC7230, par. 2.7 :
+ * Request-URI = "*" | absuri | abspath | authority
+ */
+
+ if (!istlen(parser.uri)) {
+ parser.format = URI_PARSER_FORMAT_EMPTY;
+ }
+ else {
+ /* detect the format according to the first URI character */
+ switch (*istptr(parser.uri)) {
+ case '*':
+ parser.format = URI_PARSER_FORMAT_ASTERISK;
+ break;
+
+ case '/':
+ parser.format = URI_PARSER_FORMAT_ABSPATH;
+ break;
+
+ default:
+ parser.format = URI_PARSER_FORMAT_ABSURI_OR_AUTHORITY;
+ break;
+ }
+ }
+
+ return parser;
+}
+
+/* Looks into <ist> for forbidden characters for header values (0x00, 0x0A,
+ * 0x0D), starting at pointer <start> which must be within <ist>. Returns
+ * non-zero if such a character is found, 0 otherwise. When run on unlikely
+ * header match, it's recommended to first check for the presence of control
+ * chars using ist_find_ctl().
+ */
+static inline int http_header_has_forbidden_char(const struct ist ist, const char *start)
+{
+ do {
+ if ((uint8_t)*start <= 0x0d &&
+ (1U << (uint8_t)*start) & ((1<<13) | (1<<10) | (1<<0)))
+ return 1;
+ start++;
+ } while (start < istend(ist));
+ return 0;
+}
+
+/* Looks into <ist> for forbidden characters for :path values (0x00..0x1F,
+ * 0x20, 0x23), starting at pointer <start> which must be within <ist>.
+ * Returns non-zero if such a character is found, 0 otherwise. When run on
+ * unlikely header match, it's recommended to first check for the presence
+ * of control chars using ist_find_ctl().
+ */
+static inline int http_path_has_forbidden_char(const struct ist ist, const char *start)
+{
+ do {
+ if ((uint8_t)*start <= 0x23) {
+ if ((uint8_t)*start < 0x20)
+ return 1;
+ if ((1U << ((uint8_t)*start & 0x1F)) & ((1<<3) | (1<<0)))
+ return 1;
+ }
+ start++;
+ } while (start < istend(ist));
+ return 0;
+}
+
+#endif /* _HAPROXY_HTTP_H */
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/include/haproxy/http_ana-t.h b/include/haproxy/http_ana-t.h
new file mode 100644
index 0000000..5b7342f
--- /dev/null
+++ b/include/haproxy/http_ana-t.h
@@ -0,0 +1,264 @@
+/*
+ * include/haproxy/http_ana-t.h
+ * This file contains HTTP protocol definitions.
+ *
+ * Copyright (C) 2000-2011 Willy Tarreau - w@1wt.eu
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_PROTO_HTTP_T_H
+#define _HAPROXY_PROTO_HTTP_T_H
+
+#include <haproxy/api-t.h>
+#include <haproxy/channel-t.h>
+#include <haproxy/http-t.h>
+
+/* These are the flags that are found in txn->flags */
+
+/* action flags.
+ * Please also update the txn_show_flags() function below in case of changes.
+ */
+/* Unused: 0x00000001..0x00000004 */
+#define TX_CONST_REPLY 0x00000008 /* The http reply must not be rewritten (don't eval after-response ruleset) */
+#define TX_CLTARPIT 0x00000010 /* the transaction is tarpitted (anti-dos) */
+
+/* transaction flags dedicated to cookies : bits values 0x20 to 0x80 (0-7 shift 5) */
+#define TX_CK_NONE 0x00000000 /* this transaction had no cookie */
+#define TX_CK_INVALID 0x00000020 /* this transaction had a cookie which matches no server */
+#define TX_CK_DOWN 0x00000040 /* this transaction had cookie matching a down server */
+#define TX_CK_VALID 0x00000060 /* this transaction had cookie matching a valid server */
+#define TX_CK_EXPIRED 0x00000080 /* this transaction had an expired cookie (idle for too long) */
+#define TX_CK_OLD 0x000000A0 /* this transaction had too old a cookie (offered too long ago) */
+#define TX_CK_UNUSED 0x000000C0 /* this transaction had a cookie but it was not used (eg: use-server was preferred) */
+#define TX_CK_MASK 0x000000E0 /* mask to get this transaction's cookie flags */
+#define TX_CK_SHIFT 5 /* bit shift */
+
+/* response cookie information, bits values 0x100 to 0x700 (0-7 shift 8) */
+#define TX_SCK_NONE 0x00000000 /* no cookie found in the response */
+#define TX_SCK_FOUND 0x00000100 /* a persistence cookie was found and forwarded */
+#define TX_SCK_DELETED 0x00000200 /* an existing persistence cookie was deleted */
+#define TX_SCK_INSERTED 0x00000300 /* a persistence cookie was inserted */
+#define TX_SCK_REPLACED 0x00000400 /* a persistence cookie was present and rewritten */
+#define TX_SCK_UPDATED 0x00000500 /* an expirable persistence cookie was updated */
+#define TX_SCK_MASK 0x00000700 /* mask to get the set-cookie field */
+#define TX_SCK_SHIFT 8 /* bit shift */
+
+#define TX_SCK_PRESENT 0x00000800 /* a cookie was found in the server's response */
+
+/* cacheability management, bits values 0x1000 to 0x3000 (0-3 shift 12) */
+#define TX_CACHEABLE 0x00001000 /* at least part of the response is cacheable */
+#define TX_CACHE_COOK 0x00002000 /* a cookie in the response is cacheable */
+#define TX_CACHE_IGNORE 0x00004000 /* do not retrieve object from cache */
+#define TX_CACHE_SHIFT 12 /* bit shift */
+
+#define TX_CON_WANT_TUN 0x00008000 /* Will be a tunnel (CONNECT or 101-Switching-Protocol) */
+
+#define TX_CACHE_HAS_SEC_KEY 0x00010000 /* secondary key building succeeded */
+
+#define TX_USE_PX_CONN 0x00020000 /* Use "Proxy-Connection" instead of "Connection" */
+
+/* used only for keep-alive purposes, to indicate we're on a second transaction */
+#define TX_NOT_FIRST 0x00040000 /* the transaction is not the first one */
+
+#define TX_L7_RETRY 0x000800000 /* The transaction may attempt L7 retries */
+#define TX_D_L7_RETRY 0x001000000 /* Disable L7 retries on this transaction, even if configured to do it */
+
+/* This function is used to report flags in debugging tools. Please reflect
+ * below any single-bit flag addition above in the same order via the
+ * __APPEND_FLAG and __APPEND_ENUM macros. The new end of the buffer is
+ * returned.
+ */
+static forceinline char *txn_show_flags(char *buf, size_t len, const char *delim, uint flg)
+{
+#define _(f, ...) __APPEND_FLAG(buf, len, delim, flg, f, #f, __VA_ARGS__)
+#define _e(m, e, ...) __APPEND_ENUM(buf, len, delim, flg, m, e, #e, __VA_ARGS__)
+ /* prologue */
+ _(0);
+ /* flags & enums */
+ _(TX_SCK_PRESENT, _(TX_CACHEABLE, _(TX_CACHE_COOK, _(TX_CACHE_IGNORE,
+ _(TX_CON_WANT_TUN, _(TX_CACHE_HAS_SEC_KEY, _(TX_USE_PX_CONN,
+ _(TX_NOT_FIRST, _(TX_L7_RETRY, _(TX_D_L7_RETRY))))))))));
+
+ _e(TX_SCK_MASK, TX_SCK_FOUND, _e(TX_SCK_MASK, TX_SCK_DELETED,
+ _e(TX_SCK_MASK, TX_SCK_INSERTED, _e(TX_SCK_MASK, TX_SCK_REPLACED,
+ _e(TX_SCK_MASK, TX_SCK_UPDATED)))));
+
+ _e(TX_CK_MASK, TX_CK_INVALID, _e(TX_CK_MASK, TX_CK_DOWN,
+ _e(TX_CK_MASK, TX_CK_VALID, _e(TX_CK_MASK, TX_CK_EXPIRED,
+ _e(TX_CK_MASK, TX_CK_OLD, _e(TX_CK_MASK, TX_CK_UNUSED))))));
+
+ _(TX_CONST_REPLY, _(TX_CLTARPIT));
+ /* epilogue */
+ _(~0U);
+ return buf;
+#undef _e
+#undef _
+}
+
+
+/*
+ * HTTP message status flags (msg->flags).
+ * Please also update the txn_show_flags() function below in case of changes.
+ */
+#define HTTP_MSGF_CNT_LEN 0x00000001 /* content-length was found in the message */
+#define HTTP_MSGF_TE_CHNK 0x00000002 /* transfer-encoding: chunked was found */
+
+/* if this flags is not set in either direction, we may be forced to complete a
+ * connection as a half-way tunnel (eg if no content-length appears in a 1.1
+ * response, but the request is correctly sized)
+ */
+#define HTTP_MSGF_XFER_LEN 0x00000004 /* message xfer size can be determined */
+#define HTTP_MSGF_VER_11 0x00000008 /* the message is HTTP/1.1 or above */
+
+#define HTTP_MSGF_SOFT_RW 0x00000010 /* soft header rewrites, no error triggered */
+
+#define HTTP_MSGF_COMPRESSING 0x00000020 /* data compression is in progress */
+
+#define HTTP_MSGF_BODYLESS 0x00000040 /* The message has no body (content-length = 0) */
+#define HTTP_MSGF_CONN_UPG 0x00000080 /* The message contains "Connection: Upgrade" header */
+
+#define HTTP_MSGF_EXPECT_CHECKED 0x00000100 /* Expect header was already handled, if any */
+
+/* This function is used to report flags in debugging tools. Please reflect
+ * below any single-bit flag addition above in the same order via the
+ * __APPEND_FLAG macro. The new end of the buffer is returned.
+ */
+static forceinline char *hmsg_show_flags(char *buf, size_t len, const char *delim, uint flg)
+{
+#define _(f, ...) __APPEND_FLAG(buf, len, delim, flg, f, #f, __VA_ARGS__)
+ /* prologue */
+ _(0);
+ /* flags */
+ _(HTTP_MSGF_CNT_LEN, _(HTTP_MSGF_TE_CHNK, _(HTTP_MSGF_XFER_LEN,
+ _(HTTP_MSGF_VER_11, _(HTTP_MSGF_SOFT_RW, _(HTTP_MSGF_COMPRESSING,
+ _(HTTP_MSGF_BODYLESS, _(HTTP_MSGF_CONN_UPG, _(HTTP_MSGF_EXPECT_CHECKED)))))))));
+ /* epilogue */
+ _(~0U);
+ return buf;
+#undef _
+}
+
+
+/* Maximum length of the cache secondary key (sum of all the possible parts of
+ * the secondary key). The actual keys might be smaller for some
+ * request/response pairs, because they depend on the responses' optional Vary
+ * header. The different sizes can be found in the vary_information object (see
+ * cache.c).*/
+#define HTTP_CACHE_SEC_KEY_LEN (sizeof(uint32_t)+sizeof(uint64_t)+sizeof(uint64_t))
+
+
+/* Redirect flags */
+enum {
+ REDIRECT_FLAG_NONE = 0,
+ REDIRECT_FLAG_DROP_QS = 1, /* drop query string */
+ REDIRECT_FLAG_APPEND_SLASH = 2, /* append a slash if missing at the end */
+ REDIRECT_FLAG_FROM_REQ = 4, /* redirect rule on the request path */
+ REDIRECT_FLAG_IGNORE_EMPTY = 8, /* silently ignore empty location expressions */
+};
+
+/* Redirect types (location, prefix, extended ) */
+enum {
+ REDIRECT_TYPE_NONE = 0, /* no redirection */
+ REDIRECT_TYPE_LOCATION, /* location redirect */
+ REDIRECT_TYPE_PREFIX, /* prefix redirect */
+ REDIRECT_TYPE_SCHEME, /* scheme redirect (eg: switch from http to https) */
+};
+
+/* Persist types (force-persist, ignore-persist) */
+enum {
+ PERSIST_TYPE_NONE = 0, /* no persistence */
+ PERSIST_TYPE_FORCE, /* force-persist */
+ PERSIST_TYPE_IGNORE, /* ignore-persist */
+};
+
+/* final results for http-request rules */
+enum rule_result {
+ HTTP_RULE_RES_CONT = 0, /* nothing special, continue rules evaluation */
+ HTTP_RULE_RES_YIELD, /* call me later because some data is missing. */
+ HTTP_RULE_RES_STOP, /* stopped processing on an accept */
+ HTTP_RULE_RES_DENY, /* deny (or tarpit if TX_CLTARPIT) */
+ HTTP_RULE_RES_ABRT, /* abort request, msg already sent (eg: auth) */
+ HTTP_RULE_RES_DONE, /* processing done, stop processing (eg: redirect) */
+ HTTP_RULE_RES_BADREQ, /* bad request */
+ HTTP_RULE_RES_ERROR, /* Internal error */
+};
+
+/* Legacy version of the HTTP/1 message state, used by the channels, should
+ * ultimately be removed.
+ */
+enum h1_state {
+ HTTP_MSG_RQBEFORE = 0, // request: leading LF, before start line
+ HTTP_MSG_RPBEFORE = 1, // response: leading LF, before start line
+
+ /* Body processing.
+ * The state HTTP_MSG_BODY is a delimiter to know if we're waiting for headers
+ * or body. All the sub-states below also indicate we're processing the body,
+ * with some additional information.
+ */
+ HTTP_MSG_BODY = 2, // parsing body at end of headers
+ HTTP_MSG_DATA = 3, // skipping data chunk / content-length data
+ /* we enter this state when we've received the end of the current message */
+ HTTP_MSG_ENDING = 4, // message end received, wait that the filters end too
+ HTTP_MSG_DONE = 5, // message end received, waiting for resync or close
+ HTTP_MSG_CLOSING = 6, // shutdown_w done, not all bytes sent yet
+ HTTP_MSG_CLOSED = 7, // shutdown_w done, all bytes sent
+ HTTP_MSG_TUNNEL = 8, // tunneled data after DONE
+} __attribute__((packed));
+
+
+/* This is the state of an HTTP seen from the analyzers point of view. It can be
+ * either a request message or a response message.
+ */
+struct http_msg {
+ enum h1_state msg_state; /* where we are in the current message parsing */
+ /* 3 bytes unused here */
+ unsigned int flags; /* flags describing the message (HTTP version, ...) */
+ struct channel *chn; /* pointer to the channel transporting the message */
+};
+
+
+/* This is an HTTP transaction. It contains both a request message and a
+ * response message (which can be empty).
+ */
+struct http_txn {
+ struct http_msg rsp; /* HTTP response message */
+ struct http_msg req; /* HTTP request message */
+ unsigned int flags; /* transaction flags */
+ enum http_meth_t meth; /* HTTP method */
+ /* 1 unused byte here */
+ short status; /* HTTP status sent to the client, negative if not set */
+ short server_status; /* HTTP status received from the server, negative if not received */
+ struct http_reply *http_reply; /* The HTTP reply to use as reply */
+ struct buffer l7_buffer; /* To store the data, in case we have to retry */
+ char cache_hash[20]; /* Store the cache hash */
+ char cache_secondary_hash[HTTP_CACHE_SEC_KEY_LEN]; /* Optional cache secondary key. */
+ char *uri; /* first line if log needed, NULL otherwise */
+ char *cli_cookie; /* cookie presented by the client, in capture mode */
+ char *srv_cookie; /* cookie presented by the server, in capture mode */
+ int cookie_first_date; /* if non-zero, first date the expirable cookie was set/seen */
+ int cookie_last_date; /* if non-zero, last date the expirable cookie was set/seen */
+
+ struct http_auth_data auth; /* HTTP auth data */
+};
+
+#endif /* _HAPROXY_PROTO_HTTP_T_H */
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/include/haproxy/http_ana.h b/include/haproxy/http_ana.h
new file mode 100644
index 0000000..2cc6516
--- /dev/null
+++ b/include/haproxy/http_ana.h
@@ -0,0 +1,91 @@
+/*
+ * include/haproxy/http_ana.h
+ * This file contains HTTP protocol definitions.
+ *
+ * Copyright (C) 2000-2011 Willy Tarreau - w@1wt.eu
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_PROTO_HTTP_H
+#define _HAPROXY_PROTO_HTTP_H
+
+#include <haproxy/api.h>
+#include <haproxy/channel-t.h>
+#include <haproxy/http_ana-t.h>
+#include <haproxy/htx-t.h>
+#include <haproxy/proxy-t.h>
+#include <haproxy/stream-t.h>
+
+extern struct pool_head *pool_head_uniqueid;
+extern struct pool_head *pool_head_http_txn;
+
+int http_wait_for_request(struct stream *s, struct channel *req, int an_bit);
+int http_process_req_common(struct stream *s, struct channel *req, int an_bit, struct proxy *px);
+int http_process_request(struct stream *s, struct channel *req, int an_bit);
+int http_process_tarpit(struct stream *s, struct channel *req, int an_bit);
+int http_wait_for_request_body(struct stream *s, struct channel *req, int an_bit);
+int http_wait_for_response(struct stream *s, struct channel *rep, int an_bit);
+int http_process_res_common(struct stream *s, struct channel *rep, int an_bit, struct proxy *px);
+int http_request_forward_body(struct stream *s, struct channel *req, int an_bit);
+int http_response_forward_body(struct stream *s, struct channel *res, int an_bit);
+int http_apply_redirect_rule(struct redirect_rule *rule, struct stream *s, struct http_txn *txn);
+int http_eval_after_res_rules(struct stream *s);
+int http_replace_hdrs(struct stream* s, struct htx *htx, struct ist name, const char *str, struct my_regex *re, int full);
+int http_req_replace_stline(int action, const char *replace, int len,
+ struct proxy *px, struct stream *s);
+int http_res_set_status(unsigned int status, struct ist reason, struct stream *s);
+void http_check_request_for_cacheability(struct stream *s, struct channel *req);
+void http_check_response_for_cacheability(struct stream *s, struct channel *res);
+enum rule_result http_wait_for_msg_body(struct stream *s, struct channel *chn, unsigned int time, unsigned int bytes);
+void http_perform_server_redirect(struct stream *s, struct stconn *sc);
+void http_server_error(struct stream *s, struct stconn *sc, int err, int finst, struct http_reply *msg);
+void http_reply_and_close(struct stream *s, short status, struct http_reply *msg);
+void http_return_srv_error(struct stream *s, struct stconn *sc);
+struct http_reply *http_error_message(struct stream *s);
+int http_reply_to_htx(struct stream *s, struct htx *htx, struct http_reply *reply);
+int http_reply_message(struct stream *s, struct http_reply *reply);
+int http_forward_proxy_resp(struct stream *s, int final);
+
+struct http_txn *http_create_txn(struct stream *s);
+void http_destroy_txn(struct stream *s);
+
+void http_set_term_flags(struct stream *s);
+
+/* for debugging, reports the HTTP/1 message state name (legacy version) */
+static inline const char *h1_msg_state_str(enum h1_state msg_state)
+{
+ switch (msg_state) {
+ case HTTP_MSG_RQBEFORE: return "MSG_RQBEFORE";
+ case HTTP_MSG_RPBEFORE: return "MSG_RPBEFORE";
+ case HTTP_MSG_BODY: return "MSG_BODY";
+ case HTTP_MSG_DATA: return "MSG_DATA";
+ case HTTP_MSG_ENDING: return "MSG_ENDING";
+ case HTTP_MSG_DONE: return "MSG_DONE";
+ case HTTP_MSG_CLOSING: return "MSG_CLOSING";
+ case HTTP_MSG_CLOSED: return "MSG_CLOSED";
+ case HTTP_MSG_TUNNEL: return "MSG_TUNNEL";
+ default: return "MSG_??????";
+ }
+}
+
+#endif /* _HAPROXY_PROTO_HTTP_H */
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/include/haproxy/http_client-t.h b/include/haproxy/http_client-t.h
new file mode 100644
index 0000000..7ae0e61
--- /dev/null
+++ b/include/haproxy/http_client-t.h
@@ -0,0 +1,69 @@
+#ifndef _HAPROXY_HTTPCLIENT_T_H
+#define _HAPROXY_HTTPCLIENT_T_H
+
+#include <haproxy/http-t.h>
+
+struct httpclient {
+ struct {
+ struct ist url; /* URL of the request */
+ enum http_meth_t meth; /* method of the request */
+ struct buffer buf; /* output buffer, HTX */
+ } req;
+ struct {
+ struct ist vsn;
+ uint16_t status;
+ struct ist reason;
+ struct http_hdr *hdrs; /* headers */
+ struct buffer buf; /* input buffer, raw HTTP */
+ } res;
+ struct {
+ /* callbacks used to send the request, */
+ void (*req_payload)(struct httpclient *hc); /* send a payload */
+
+ /* callbacks used to receive the response, if not set, the IO
+ * handler will consume the data without doing anything */
+ void (*res_stline)(struct httpclient *hc); /* start line received */
+ void (*res_headers)(struct httpclient *hc); /* headers received */
+ void (*res_payload)(struct httpclient *hc); /* payload received */
+ void (*res_end)(struct httpclient *hc); /* end of the response */
+ } ops;
+ struct sockaddr_storage *dst; /* destination address */
+ struct appctx *appctx; /* HTTPclient appctx */
+ int timeout_server; /* server timeout in ms */
+ void *caller; /* ptr of the caller */
+ unsigned int flags; /* other flags */
+ struct proxy *px; /* proxy for special cases */
+ struct server *srv_raw; /* server for clear connections */
+#ifdef USE_OPENSSL
+ struct server *srv_ssl; /* server for SSL connections */
+#endif
+};
+
+/* Action (FA) to do */
+#define HTTPCLIENT_FA_STOP 0x00000001 /* stops the httpclient at the next IO handler call */
+#define HTTPCLIENT_FA_AUTOKILL 0x00000002 /* sets the applet to destroy the httpclient struct itself */
+
+/* status (FS) */
+#define HTTPCLIENT_FS_STARTED 0x00010000 /* the httpclient was started */
+#define HTTPCLIENT_FS_ENDED 0x00020000 /* the httpclient is stopped */
+
+/* States of the HTTP Client Appctx */
+enum {
+ HTTPCLIENT_S_REQ = 0,
+ HTTPCLIENT_S_REQ_BODY,
+ HTTPCLIENT_S_RES_STLINE,
+ HTTPCLIENT_S_RES_HDR,
+ HTTPCLIENT_S_RES_BODY,
+ HTTPCLIENT_S_RES_END,
+};
+
+#define HTTPCLIENT_USERAGENT "HAProxy"
+
+/* What kind of data we need to read */
+#define HC_F_RES_STLINE 0x01
+#define HC_F_RES_HDR 0x02
+#define HC_F_RES_BODY 0x04
+#define HC_F_RES_END 0x08
+
+
+#endif /* ! _HAPROXY_HTTCLIENT__T_H */
diff --git a/include/haproxy/http_client.h b/include/haproxy/http_client.h
new file mode 100644
index 0000000..241ca24
--- /dev/null
+++ b/include/haproxy/http_client.h
@@ -0,0 +1,40 @@
+#ifndef _HAPROXY_HTTPCLIENT_H
+#define _HAPROXY_HTTPCLIENT_H
+
+#include <haproxy/http_client-t.h>
+
+void httpclient_destroy(struct httpclient *hc);
+void httpclient_stop_and_destroy(struct httpclient *hc);
+
+struct proxy *httpclient_create_proxy(const char *id);
+struct httpclient *httpclient_new(void *caller, enum http_meth_t meth, struct ist url);
+struct httpclient *httpclient_new_from_proxy(struct proxy *px, void *caller, enum http_meth_t meth, struct ist url);
+int httpclient_set_proxy(struct httpclient *hc, struct proxy *px);
+
+struct appctx *httpclient_start(struct httpclient *hc);
+int httpclient_set_dst(struct httpclient *hc, const char *dst);
+void httpclient_set_timeout(struct httpclient *hc, int timeout);
+int httpclient_res_xfer(struct httpclient *hc, struct buffer *dst);
+int httpclient_req_gen(struct httpclient *hc, const struct ist url, enum http_meth_t meth, const struct http_hdr *hdrs, const struct ist payload);
+int httpclient_req_xfer(struct httpclient *hc, struct ist src, int end);
+
+/* Return the amount of data available in the httpclient response buffer */
+static inline int httpclient_data(struct httpclient *hc)
+{
+ return b_data(&hc->res.buf);
+}
+
+/* Return 1 if the httpclient ended and won't receive any new data */
+static inline int httpclient_ended(struct httpclient *hc)
+{
+ return !!(hc->flags & HTTPCLIENT_FS_ENDED);
+}
+
+/* Return 1 if the httpclient started */
+static inline int httpclient_started(struct httpclient *hc)
+{
+
+ return !!(hc->flags & HTTPCLIENT_FS_STARTED);
+}
+
+#endif /* ! _HAPROXY_HTTCLIENT_H */
diff --git a/include/haproxy/http_ext-t.h b/include/haproxy/http_ext-t.h
new file mode 100644
index 0000000..68eb047
--- /dev/null
+++ b/include/haproxy/http_ext-t.h
@@ -0,0 +1,149 @@
+/*
+ * include/haproxy/http_ext-t.h
+ * Version-agnostic and implementation-agnostic HTTP extensions definitions
+ *
+ * Copyright 2022 HAProxy Technologies
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_HTTPEXT_T_H
+#define _HAPROXY_HTTPEXT_T_H
+
+#include <arpa/inet.h>
+#include <import/ist.h>
+#include <haproxy/tools-t.h>
+
+enum forwarded_header_attribute_type {
+ FORWARDED_HEADER_UNK = 0,
+ FORWARDED_HEADER_OBFS = 1,
+ FORWARDED_HEADER_PORT = 2,
+ FORWARDED_HEADER_IP = 3,
+};
+
+struct forwarded_header_nodename {
+ union {
+ struct sockaddr_storage ip;
+ struct ist obfs;
+ };
+ enum forwarded_header_attribute_type type;
+};
+
+struct forwarded_header_nodeport {
+ union {
+ uint16_t port;
+ struct ist obfs;
+ };
+ enum forwarded_header_attribute_type type;
+};
+
+struct forwarded_header_node {
+ struct forwarded_header_nodename nodename;
+ struct forwarded_header_nodeport nodeport;
+ struct ist raw;
+};
+
+enum forwarded_header_proto {
+ FORWARDED_HEADER_HTTP = 1,
+ FORWARDED_HEADER_HTTPS = 2
+};
+
+struct forwarded_header_ctx {
+ struct forwarded_header_node nfor;
+ struct forwarded_header_node nby;
+ struct ist host;
+ enum forwarded_header_proto proto;
+};
+
+enum http_ext_7239_forby_mode {
+ HTTP_7239_FORBY_ORIG = 1,
+ HTTP_7239_FORBY_SMP = 2
+};
+struct http_ext_7239_forby {
+ /* nn = nodename, np = nodeport */
+ union {
+ char *nn_expr_s;
+ struct sample_expr *nn_expr;
+ };
+ union {
+ char *np_expr_s;
+ struct sample_expr *np_expr;
+ };
+ enum http_ext_7239_forby_mode nn_mode;
+ enum http_ext_7239_forby_mode np_mode;
+};
+
+enum http_ext_7239_host_mode {
+ HTTP_7239_HOST_ORIG = 1,
+ HTTP_7239_HOST_SMP = 2
+};
+struct http_ext_7239_host {
+ union {
+ char *expr_s;
+ struct sample_expr *expr;
+ };
+ enum http_ext_7239_host_mode mode;
+};
+
+struct http_ext_7239 {
+ /* forwarded header parameters options */
+ struct http_ext_7239_forby p_for;
+ struct http_ext_7239_forby p_by;
+ struct http_ext_7239_host p_host;
+ uint8_t p_proto;
+ /* config error hints, used only during configuration parsing */
+ char *c_file;
+ int c_line;
+ int c_mode; /* 0: parsed, 1: compiled */
+};
+
+enum forwarded_header_field {
+ FORWARDED_HEADER_FOR = 0x01,
+ FORWARDED_HEADER_BY = 0x02,
+ FORWARDED_HEADER_HOST = 0x04,
+ FORWARDED_HEADER_PROTO = 0x08,
+ FORWARDED_HEADER_ALL = FORWARDED_HEADER_FOR|FORWARDED_HEADER_BY|FORWARDED_HEADER_HOST|FORWARDED_HEADER_PROTO
+};
+
+enum http_ext_xff_mode {
+ HTTP_XFF_IFNONE = 0, /* set if not already set */
+ HTTP_XFF_ALWAYS = 1 /* always set x-forwarded-for */
+};
+struct http_ext_xff {
+ struct ist hdr_name; /* header to use - default: "x-forwarded-for" */
+ struct net_addr except_net; /* don't forward x-forward-for for this address. */
+ uint8_t mode;
+};
+
+struct http_ext_xot {
+ struct ist hdr_name; /* header to use - default: "x-original-to" */
+ struct net_addr except_net; /* don't forward x-original-to for this address. */
+};
+
+/* http_ext options */
+struct http_ext {
+ /* forwarded header (RFC 7239) */
+ struct http_ext_7239 *fwd;
+ /* x-forward-for:
+ * conditionally insert x-forwarded-for with client address
+ */
+ struct http_ext_xff *xff;
+ /* x-original-to:
+ * insert x-original-to with destination address
+ */
+ struct http_ext_xot *xot;
+};
+
+#endif /* !_HAPROXY_HTTPEXT_T_H */
diff --git a/include/haproxy/http_ext.h b/include/haproxy/http_ext.h
new file mode 100644
index 0000000..53764a2
--- /dev/null
+++ b/include/haproxy/http_ext.h
@@ -0,0 +1,58 @@
+/*
+ * include/haproxy/http_ext.h
+ * Functions for Version-agnostic and implementation-agnostic HTTP extensions
+ *
+ * Copyright 2022 HAProxy Technologies
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_HTTPEXT_H
+#define _HAPROXY_HTTPEXT_H
+
+#include <haproxy/http_ext-t.h>
+#include <haproxy/channel-t.h>
+#include <haproxy/proxy-t.h>
+#include <haproxy/stream-t.h>
+
+int http_validate_7239_header(struct ist hdr, int required_steps, struct forwarded_header_ctx *ctx);
+
+int http_handle_7239_header(struct stream *s, struct channel *req);
+int http_handle_xff_header(struct stream *s, struct channel *req);
+int http_handle_xot_header(struct stream *s, struct channel *req);
+
+int proxy_http_parse_7239(char **args, int cur_arg, struct proxy *curproxy, const struct proxy *defpx, const char *file, int linenum);
+int proxy_http_compile_7239(struct proxy *curproxy);
+int proxy_http_parse_xff(char **args, int cur_arg, struct proxy *curproxy, const struct proxy *defpx, const char *file, int linenum);
+int proxy_http_parse_xot(char **args, int cur_arg, struct proxy *curproxy, const struct proxy *defpx, const char *file, int linenum);
+
+int http_ext_7239_prepare(struct proxy *cur);
+int http_ext_xff_prepare(struct proxy *cur);
+int http_ext_xot_prepare(struct proxy *cur);
+
+void http_ext_7239_dup(const struct proxy *def, struct proxy *cpy);
+void http_ext_xff_dup(const struct proxy *def, struct proxy *cpy);
+void http_ext_xot_dup(const struct proxy *def, struct proxy *cpy);
+
+void http_ext_7239_clean(struct proxy *cur);
+void http_ext_xff_clean(struct proxy *cur);
+void http_ext_xot_clean(struct proxy *cur);
+
+int http_ext_prepare(struct proxy *cur);
+void http_ext_dup(const struct proxy *def, struct proxy *cpy);
+void http_ext_clean(struct proxy *cur);
+void http_ext_softclean(struct proxy *cur);
+
+#endif /* !_HAPROXY_HTTPEXT_H */
diff --git a/include/haproxy/http_fetch.h b/include/haproxy/http_fetch.h
new file mode 100644
index 0000000..7997629
--- /dev/null
+++ b/include/haproxy/http_fetch.h
@@ -0,0 +1,41 @@
+/*
+ * include/haproxy/http_fetch.h
+ * This file contains the minimally required http sample fetch declarations.
+ *
+ * Copyright (C) 2000-2018 Willy Tarreau - w@1wt.eu
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_HTTP_FETCH_H
+#define _HAPROXY_HTTP_FETCH_H
+
+#include <haproxy/api.h>
+#include <haproxy/arg-t.h>
+#include <haproxy/channel-t.h>
+#include <haproxy/check-t.h>
+#include <haproxy/sample-t.h>
+
+struct htx *smp_prefetch_htx(struct sample *smp, struct channel *chn, struct check *check, int vol);
+int val_hdr(struct arg *arg, char **err_msg);
+
+#endif /* _HAPROXY_HTTP_FETCH_H */
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/include/haproxy/http_htx-t.h b/include/haproxy/http_htx-t.h
new file mode 100644
index 0000000..8051925
--- /dev/null
+++ b/include/haproxy/http_htx-t.h
@@ -0,0 +1,95 @@
+/*
+ * include/haproxy/http_htx-t.h
+ * This file defines everything related to HTTP manipulation using the internal
+ * representation.
+ *
+ * Copyright (C) 2018 HAProxy Technologies, Christopher Faulet <cfaulet@haproxy.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_HTTP_HTX_T_H
+#define _HAPROXY_HTTP_HTX_T_H
+
+#include <import/ebistree.h>
+#include <import/ist.h>
+
+#include <haproxy/buf-t.h>
+#include <haproxy/http-t.h>
+#include <haproxy/htx-t.h>
+
+/* Context used to find/remove an HTTP header. */
+struct http_hdr_ctx {
+ struct htx_blk *blk;
+ struct ist value;
+ uint16_t lws_before;
+ uint16_t lws_after;
+};
+
+
+/* Structure used to build the header list of an HTTP reply */
+struct http_reply_hdr {
+ struct ist name; /* the header name */
+ struct list value; /* the log-format string value */
+ struct list list; /* header chained list */
+};
+
+#define HTTP_REPLY_EMPTY 0x00 /* the reply has no payload */
+#define HTTP_REPLY_ERRMSG 0x01 /* the reply is an error message (may be NULL) */
+#define HTTP_REPLY_ERRFILES 0x02 /* the reply references an http-errors section */
+#define HTTP_REPLY_RAW 0x03 /* the reply use a raw payload */
+#define HTTP_REPLY_LOGFMT 0x04 /* the reply use a log-format payload */
+#define HTTP_REPLY_INDIRECT 0x05 /* the reply references another http-reply (may be NULL) */
+
+/* Uses by HAProxy to generate internal responses */
+struct http_reply {
+ unsigned char type; /* HTTP_REPLY_* */
+ int status; /* The response status code */
+ char *ctype; /* The response content-type, may be NULL */
+ struct list hdrs; /* A list of http_reply_hdr */
+ union {
+ struct list fmt; /* A log-format string (type = HTTP_REPLY_LOGFMT) */
+ struct buffer obj; /* A raw string (type = HTTP_REPLY_RAW) */
+ struct buffer *errmsg; /* The error message to use as response (type = HTTP_REPLY_ERRMSG).
+ * may be NULL, if so rely on the proxy error messages */
+ struct http_reply *reply; /* The HTTP reply to use as response (type = HTTP_REPLY_INDIRECT) */
+ char *http_errors; /* The http-errors section to use (type = HTTP_REPLY_ERRFILES).
+ * Should be resolved during post-check */
+ } body;
+ struct list list; /* next http_reply in the global list.
+ * Only used for replies defined in a proxy section */
+};
+
+/* A custom HTTP error message load from a row file and converted in HTX. The
+ * node key is the file path.
+ */
+struct http_error_msg {
+ struct buffer msg;
+ struct ebpt_node node;
+};
+
+/* http-errors section and parameters. */
+struct http_errors {
+ char *id; /* unique identifier */
+ struct {
+ char *file; /* file where the section appears */
+ int line; /* line where the section appears */
+ } conf; /* config information */
+
+ struct http_reply *replies[HTTP_ERR_SIZE]; /* HTTP replies for known errors */
+ struct list list; /* http-errors list */
+};
+
+#endif /* _HAPROXY_HTTP_HTX_T_H */
diff --git a/include/haproxy/http_htx.h b/include/haproxy/http_htx.h
new file mode 100644
index 0000000..3d01a06
--- /dev/null
+++ b/include/haproxy/http_htx.h
@@ -0,0 +1,84 @@
+/*
+ * include/haproxy/http_htx-t.h
+ * This file defines function prototypes for HTTP manipulation using the
+ * internal representation.
+ *
+ * Copyright (C) 2018 HAProxy Technologies, Christopher Faulet <cfaulet@haproxy.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_HTTP_HTX_H
+#define _HAPROXY_HTTP_HTX_H
+
+#include <import/ist.h>
+#include <haproxy/buf-t.h>
+#include <haproxy/http-hdr-t.h>
+#include <haproxy/http_htx-t.h>
+#include <haproxy/proxy-t.h>
+#include <haproxy/regex-t.h>
+
+extern struct buffer http_err_chunks[HTTP_ERR_SIZE];
+extern struct http_reply http_err_replies[HTTP_ERR_SIZE];
+extern struct list http_errors_list;
+
+struct htx_sl *http_get_stline(const struct htx *htx);
+size_t http_get_hdrs_size(struct htx *htx);
+int http_find_header(const struct htx *htx, const struct ist name, struct http_hdr_ctx *ctx, int full);
+int http_find_str_header(const struct htx *htx, const struct ist name, struct http_hdr_ctx *ctx, int full);
+int http_find_pfx_header(const struct htx *htx, const struct ist prefix, struct http_hdr_ctx *ctx, int full);
+int http_find_sfx_header(const struct htx *htx, const struct ist suffix, struct http_hdr_ctx *ctx, int full);
+int http_find_sub_header(const struct htx *htx, const struct ist sub, struct http_hdr_ctx *ctx, int full);
+int http_match_header(const struct htx *htx, const struct my_regex *re, struct http_hdr_ctx *ctx, int full);
+int http_add_header(struct htx *htx, const struct ist n, const struct ist v);
+int http_replace_stline(struct htx *htx, const struct ist p1, const struct ist p2, const struct ist p3);
+int http_replace_req_meth(struct htx *htx, const struct ist meth);
+int http_replace_req_uri(struct htx *htx, const struct ist uri);
+int http_replace_req_path(struct htx *htx, const struct ist path, int with_qs);
+int http_replace_req_query(struct htx *htx, const struct ist query);
+int http_replace_res_status(struct htx *htx, const struct ist status, const struct ist reason);
+int http_replace_res_reason(struct htx *htx, const struct ist reason);
+int http_append_header_value(struct htx *htx, struct http_hdr_ctx *ctx, const struct ist data);
+int http_prepend_header_value(struct htx *htx, struct http_hdr_ctx *ctx, const struct ist data);
+int http_replace_header_value(struct htx *htx, struct http_hdr_ctx *ctx, const struct ist data);
+int http_replace_header(struct htx *htx, struct http_hdr_ctx *ctx, const struct ist name, const struct ist value);
+int http_remove_header(struct htx *htx, struct http_hdr_ctx *ctx);
+int http_update_authority(struct htx *htx, struct htx_sl *sl, const struct ist host);
+int http_update_host(struct htx *htx, struct htx_sl *sl, const struct ist uri);
+
+unsigned int http_get_htx_hdr(const struct htx *htx, const struct ist hdr,
+ int occ, struct http_hdr_ctx *ctx, char **vptr, size_t *vlen);
+unsigned int http_get_htx_fhdr(const struct htx *htx, const struct ist hdr,
+ int occ, struct http_hdr_ctx *ctx, char **vptr, size_t *vlen);
+int http_str_to_htx(struct buffer *buf, struct ist raw, char **errmsg);
+
+void release_http_reply(struct http_reply *http_reply);
+int http_check_http_reply(struct http_reply *reply, struct proxy*px, char **errmsg);
+struct http_reply *http_parse_http_reply(const char **args, int *orig_arg, struct proxy *px,
+ int default_status, char **errmsg);
+
+int http_scheme_based_normalize(struct htx *htx);
+
+void http_cookie_register(struct http_hdr *list, int idx, int *first, int *last);
+int http_cookie_merge(struct htx *htx, struct http_hdr *list, int first);
+
+struct buffer *http_load_errorfile(const char *file, char **errmsg);
+struct buffer *http_load_errormsg(const char *key, const struct ist msg, char **errmsg);
+struct buffer *http_parse_errorfile(int status, const char *file, char **errmsg);
+struct buffer *http_parse_errorloc(int errloc, int status, const char *url, char **errmsg);
+int proxy_dup_default_conf_errors(struct proxy *curpx, const struct proxy *defpx, char **errmsg);
+void proxy_release_conf_errors(struct proxy *px);
+
+#endif /* _HAPROXY_HTTP_HTX_H */
diff --git a/include/haproxy/http_rules.h b/include/haproxy/http_rules.h
new file mode 100644
index 0000000..740b546
--- /dev/null
+++ b/include/haproxy/http_rules.h
@@ -0,0 +1,56 @@
+/*
+ * include/haproxy/http_rules.h
+ * This file contains "http" rules definitions
+ *
+ * Copyright (C) 2000-2018 Willy Tarreau - w@1wt.eu
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_HTTP_RULES_H
+#define _HAPROXY_HTTP_RULES_H
+
+#include <haproxy/action-t.h>
+#include <haproxy/api.h>
+#include <haproxy/list.h>
+#include <haproxy/proxy-t.h>
+
+extern struct action_kw_list http_req_keywords;
+extern struct action_kw_list http_res_keywords;
+extern struct action_kw_list http_after_res_keywords;
+
+struct act_rule *parse_http_req_cond(const char **args, const char *file, int linenum, struct proxy *proxy);
+struct act_rule *parse_http_res_cond(const char **args, const char *file, int linenum, struct proxy *proxy);
+struct act_rule *parse_http_after_res_cond(const char **args, const char *file, int linenum, struct proxy *proxy);
+void http_free_redirect_rule(struct redirect_rule *rdr);
+struct redirect_rule *http_parse_redirect_rule(const char *file, int linenum, struct proxy *curproxy,
+ const char **args, char **errmsg, int use_fmt, int dir);
+
+void http_req_keywords_register(struct action_kw_list *kw_list);
+void http_res_keywords_register(struct action_kw_list *kw_list);
+void http_after_res_keywords_register(struct action_kw_list *kw_list);
+
+struct action_kw *action_http_req_custom(const char *kw);
+struct action_kw *action_http_res_custom(const char *kw);
+struct action_kw *action_http_after_res_custom(const char *kw);
+
+#endif /* _HAPROXY_HTTP_RULES_H */
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/include/haproxy/htx-t.h b/include/haproxy/htx-t.h
new file mode 100644
index 0000000..2ea6bc8
--- /dev/null
+++ b/include/haproxy/htx-t.h
@@ -0,0 +1,277 @@
+/*
+ * include/haproxy/htx-t.h
+ * This file declares the types and constants used the internal HTTP messages
+ *
+ * Copyright (C) 2018 HAProxy Technologies, Christopher Faulet <cfaulet@haproxy.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_HTX_T_H
+#define _HAPROXY_HTX_T_H
+
+#include <haproxy/api.h>
+#include <haproxy/http-t.h>
+#include <haproxy/show_flags-t.h>
+
+/*
+ * The internal representation of an HTTP message, called HTX, is a structure
+ * with useful information on the message followed by a contiguous array
+ * containing parts of the message, called blocks. A block is composed of
+ * metadata (htx_blk) and the associated payload. Blocks' metadata are stored
+ * starting from the end of the array while their payload are stored at the
+ * beginning. Blocks' metadata are often simply called blocks. it is a misuse of
+ * language that's simplify explanations.
+ *
+ *
+ * +-----+---------------+------------------------------+--------------+
+ * | HTX | PAYLOADS ==> | | <== HTX_BLKs |
+ * +-----+---------------+------------------------------+--------------+
+ * ^
+ * blocks[] (the beginning of the bocks array)
+ *
+ *
+ * The blocks part remains linear and sorted. You may think about it as an array
+ * with negative indexes. But, instead of using negative indexes, we use
+ * positive positions to identify a block. This position is then converted to a
+ * address relatively to the beginning of the blocks array.
+ *
+ *
+ * .....--+------------------------------+-----+-----+
+ * | ... | BLK | BLK |
+ * .....--+------------------------------+-----+-----+
+ * ^ ^
+ * Addr of the block Addr of the block
+ * at the position 1 at the position 0
+ *
+ *
+ * The payloads part is a raw space that may wrap. You never access to a block's
+ * payload directly. Instead you get a block to retrieve the address of its
+ * payload. When no more space left between blocks and payloads parts, the free
+ * space at the beginning, if any, is used.
+ *
+ *
+ * +----------- WRAPPING ------------------------+
+ * | |
+ * V |
+ * +-----+-------------+---------------+---------------++--------------+
+ * | HTX | PAYLOAD ==> | | PAYLOADS ==X || X== HTX_BLKs |
+ * +-----+-------------+---------------+---------------++--------------+
+ *
+ *
+ * The blocks part, on its side, never wrap. If we have no space to allocate a
+ * new block and if there is a hole at the beginning of the blocks part (so at
+ * the end of the blocks array), we move back all blocks.x
+ *
+ *
+ * ...+--------------+----------+ blocks ...+----------+--------------+
+ * | X== HTX_BLKS | | defrag | | <== HTX_BLKS |
+ * ...+--------------+----------+ =====> ...+----------+--------------+
+ *
+ *
+ * At the end, if payload wrapping or blocks defragmentation is not enough, some
+ * free space may be get back with a full defragmentation. This way, the holes in
+ * the middle are not reusable but count in the available free space. The only
+ * way to reuse this lost space is to fully defragmenate the HTX message.
+ *
+ * - * -
+ *
+ * An HTX block is as well a header as a body part or a trailer. For all these
+ * types of block, a payload is attached to the block. It can also be a mark,
+ * like the end-of-headers or end-of-trailers. For these blocks, there is no
+ * payload but it count for a byte. It is important to not skip it when data are
+ * forwarded. Metadata of an HTX block are composed of 2 fields :
+ *
+ * - .info : It a 32 bits field containing the block's type on 4 bits
+ * followed by the payload length. See below for details.
+ *
+ * - .addr : The payload's address, if any, relatively to the beginning the
+ * array used to store the HTX message itself.
+ *
+ * htx_blk.info representation :
+ *
+ * 0b 0000 0000 0000 0000 0000 0000 0000 0000
+ * ---- ------------------------ ---------
+ * type value (1 MB max) name length (header/trailer)
+ * ----------------------------------
+ * data length (256 MB max)
+ * (body, method, path, version, status, reason)
+ *
+ * types :
+ * - 0000 = request start-line
+ * - 0001 = response start-line
+ * - 0010 = header
+ * - 0011 = pseudo-header ou "special" header
+ * - 0100 = end-of-headers
+ * - 0101 = data
+ * - 0110 = trailer
+ * - 0111 = end-of-trailers
+ * ...
+ * - 1111 = unused
+ *
+ */
+
+/* HTX start-line flags.
+ * Please also update the se_show_flags() function below in case of changes.
+ */
+#define HTX_SL_F_NONE 0x00000000
+#define HTX_SL_F_IS_RESP 0x00000001 /* It is the response start-line (unset means the request one) */
+#define HTX_SL_F_XFER_LEN 0x00000002 /* The message xfer size can be dertermined */
+#define HTX_SL_F_XFER_ENC 0x00000004 /* The transfer-encoding header was found in message */
+#define HTX_SL_F_CLEN 0x00000008 /* The content-length header was found in message */
+#define HTX_SL_F_CHNK 0x00000010 /* The message payload is chunked */
+#define HTX_SL_F_VER_11 0x00000020 /* The message indicates version 1.1 or above */
+#define HTX_SL_F_BODYLESS 0x00000040 /* The message has no body (content-length = 0) */
+#define HTX_SL_F_HAS_SCHM 0x00000080 /* The scheme is explicitly specified */
+#define HTX_SL_F_SCHM_HTTP 0x00000100 /* The scheme HTTP should be used */
+#define HTX_SL_F_SCHM_HTTPS 0x00000200 /* The scheme HTTPS should be used */
+#define HTX_SL_F_HAS_AUTHORITY 0x00000400 /* The request authority is explicitly specified */
+#define HTX_SL_F_NORMALIZED_URI 0x00000800 /* The received URI is normalized (an implicit absolute-uri form) */
+#define HTX_SL_F_CONN_UPG 0x00001000 /* The message contains "connection: upgrade" header */
+#define HTX_SL_F_BODYLESS_RESP 0x00002000 /* The response to this message is bodyloess (only for reqyest) */
+
+/* This function is used to report flags in debugging tools. Please reflect
+ * below any single-bit flag addition above in the same order via the
+ * __APPEND_FLAG macro. The new end of the buffer is returned.
+ */
+static forceinline char *hsl_show_flags(char *buf, size_t len, const char *delim, uint flg)
+{
+#define _(f, ...) __APPEND_FLAG(buf, len, delim, flg, f, #f, __VA_ARGS__)
+ /* prologue */
+ _(0);
+ /* flags */
+
+ _(HTX_SL_F_IS_RESP, _(HTX_SL_F_XFER_LEN, _(HTX_SL_F_XFER_ENC,
+ _(HTX_SL_F_CLEN, _(HTX_SL_F_CHNK, _(HTX_SL_F_VER_11,
+ _(HTX_SL_F_BODYLESS, _(HTX_SL_F_HAS_SCHM, _(HTX_SL_F_SCHM_HTTP,
+ _(HTX_SL_F_SCHM_HTTPS, _(HTX_SL_F_HAS_AUTHORITY,
+ _(HTX_SL_F_NORMALIZED_URI, _(HTX_SL_F_CONN_UPG)))))))))))));
+ /* epilogue */
+ _(~0U);
+ return buf;
+#undef _
+}
+
+/* Overhead induced by HTX on buffers during transfers. In addition to the size
+ * of the HTX structure itself, and meta data for one block, another block is
+ * accounted to favored zero-copy xfer.
+ */
+#define HTX_BUF_OVERHEAD (sizeof(struct htx) + 2 * sizeof(struct htx_blk))
+
+/* HTX flags.
+ * Please also update the htx_show_flags() function below in case of changes.
+ */
+#define HTX_FL_NONE 0x00000000
+#define HTX_FL_PARSING_ERROR 0x00000001 /* Set when a parsing error occurred */
+#define HTX_FL_PROCESSING_ERROR 0x00000002 /* Set when a processing error occurred */
+#define HTX_FL_FRAGMENTED 0x00000004 /* Set when the HTX buffer is fragmented */
+#define HTX_FL_PROXY_RESP 0x00000008 /* Set when the response was generated by HAProxy */
+#define HTX_FL_EOM 0x00000010 /* Set when end-of-message is reached from the HTTP point of view
+ * (at worst, on the EOM block is missing)
+ */
+/* This function is used to report flags in debugging tools. Please reflect
+ * below any single-bit flag addition above in the same order via the
+ * __APPEND_FLAG macro. The new end of the buffer is returned.
+ */
+static forceinline char *htx_show_flags(char *buf, size_t len, const char *delim, uint flg)
+{
+#define _(f, ...) __APPEND_FLAG(buf, len, delim, flg, f, #f, __VA_ARGS__)
+ /* prologue */
+ _(0);
+ /* flags */
+ _(HTX_FL_PARSING_ERROR, _(HTX_FL_PROCESSING_ERROR,
+ _(HTX_FL_FRAGMENTED, _(HTX_FL_PROXY_RESP, _(HTX_FL_EOM)))));
+ /* epilogue */
+ _(~0U);
+ return buf;
+#undef _
+}
+
+
+/* HTX block's type (max 15). */
+enum htx_blk_type {
+ HTX_BLK_REQ_SL = 0, /* Request start-line */
+ HTX_BLK_RES_SL = 1, /* Response start-line */
+ HTX_BLK_HDR = 2, /* header name/value block */
+ HTX_BLK_EOH = 3, /* end-of-headers block */
+ HTX_BLK_DATA = 4, /* data block */
+ HTX_BLK_TLR = 5, /* trailer name/value block */
+ HTX_BLK_EOT = 6, /* end-of-trailers block */
+ /* 7 .. 14 unused */
+ HTX_BLK_UNUSED = 15, /* unused/removed block */
+};
+
+/* One HTX block descriptor */
+struct htx_blk {
+ uint32_t addr; /* relative storage address of the block's payload */
+ uint32_t info; /* information about the block (type, length) */
+};
+
+/* Composite return value used by some HTX functions */
+struct htx_ret {
+ int32_t ret; /* A numerical value */
+ struct htx_blk *blk; /* An HTX block */
+};
+
+/* HTX start-line */
+struct htx_sl {
+ unsigned int flags; /* HTX_SL_F_* */
+ union {
+ struct {
+ enum http_meth_t meth; /* method */
+ } req;
+ struct {
+ uint16_t status; /* status code */
+ } res;
+ } info;
+
+ /* XXX 2 bytes unused */
+
+ unsigned int len[3]; /* length of different parts of the start-line */
+ char l[VAR_ARRAY];
+};
+
+/* Internal representation of an HTTP message */
+struct htx {
+ uint32_t size; /* the array size, in bytes, used to store the HTTP message itself */
+ uint32_t data; /* the data size, in bytes. To known to total size used by all allocated
+ * blocks (blocks and their contents), you need to add size used by blocks,
+ * i.e. [ used * sizeof(struct htx_blk *) ] */
+
+ int32_t tail; /* newest inserted block. -1 if the HTX message is empty */
+ int32_t head; /* oldest inserted block. -1 if the HTX message is empty */
+ int32_t first; /* position of the first block to (re)start the analyse. -1 if unset. */
+
+ uint32_t tail_addr; /* start address of the free space in front of the the blocks table */
+ uint32_t head_addr; /* start address of the free space at the beginning */
+ uint32_t end_addr; /* end address of the free space at the beginning */
+
+ uint64_t extra; /* known bytes amount remaining to receive */
+ uint32_t flags; /* HTX_FL_* */
+
+ /* XXX 4 bytes unused */
+
+ /* Blocks representing the HTTP message itself */
+ char blocks[VAR_ARRAY] __attribute__((aligned(8)));
+};
+
+#endif /* _HAPROXY_HTX_T_H */
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/include/haproxy/htx.h b/include/haproxy/htx.h
new file mode 100644
index 0000000..c991c81
--- /dev/null
+++ b/include/haproxy/htx.h
@@ -0,0 +1,885 @@
+/*
+ * include/haproxy/htx.h
+ * This file defines everything related to the internal HTTP messages.
+ *
+ * Copyright (C) 2018 HAProxy Technologies, Christopher Faulet <cfaulet@haproxy.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_HTX_H
+#define _HAPROXY_HTX_H
+
+#include <import/ist.h>
+#include <haproxy/api.h>
+#include <haproxy/buf.h>
+#include <haproxy/chunk.h>
+#include <haproxy/http-hdr-t.h>
+#include <haproxy/http-t.h>
+#include <haproxy/htx-t.h>
+
+/* ->extra field value when the payload length is unknown (non-chunked message
+ * with no "Content-length" header)
+ */
+#define HTX_UNKOWN_PAYLOAD_LENGTH ULLONG_MAX
+
+extern struct htx htx_empty;
+
+struct htx_blk *htx_defrag(struct htx *htx, struct htx_blk *blk, uint32_t info);
+struct htx_blk *htx_add_blk(struct htx *htx, enum htx_blk_type type, uint32_t blksz);
+struct htx_blk *htx_remove_blk(struct htx *htx, struct htx_blk *blk);
+struct htx_ret htx_find_offset(struct htx *htx, uint32_t offset);
+void htx_truncate(struct htx *htx, uint32_t offset);
+struct htx_ret htx_drain(struct htx *htx, uint32_t max);
+
+struct htx_blk *htx_replace_blk_value(struct htx *htx, struct htx_blk *blk,
+ const struct ist old, const struct ist new);
+struct htx_ret htx_xfer_blks(struct htx *dst, struct htx *src, uint32_t count,
+ enum htx_blk_type mark);
+
+struct htx_sl *htx_replace_stline(struct htx *htx, struct htx_blk *blk, const struct ist p1,
+ const struct ist p2, const struct ist p3);
+
+struct htx_blk *htx_replace_header(struct htx *htx, struct htx_blk *blk,
+ const struct ist name, const struct ist value);
+
+struct htx_ret htx_reserve_max_data(struct htx *htx);
+struct htx_blk *htx_add_data_atonce(struct htx *htx, struct ist data);
+size_t htx_add_data(struct htx *htx, const struct ist data);
+struct htx_blk *htx_add_last_data(struct htx *htx, struct ist data);
+void htx_move_blk_before(struct htx *htx, struct htx_blk **blk, struct htx_blk **ref);
+int htx_append_msg(struct htx *dst, const struct htx *src);
+
+/* Functions and macros to get parts of the start-line or length of these
+ * parts. Request and response start-lines are both composed of 3 parts.
+ */
+#define HTX_SL_LEN(sl) ((sl)->len[0] + (sl)->len[1] + (sl)->len[2])
+
+#define HTX_SL_P1_LEN(sl) ((sl)->len[0])
+#define HTX_SL_P2_LEN(sl) ((sl)->len[1])
+#define HTX_SL_P3_LEN(sl) ((sl)->len[2])
+#define HTX_SL_P1_PTR(sl) ((sl)->l)
+#define HTX_SL_P2_PTR(sl) (HTX_SL_P1_PTR(sl) + HTX_SL_P1_LEN(sl))
+#define HTX_SL_P3_PTR(sl) (HTX_SL_P2_PTR(sl) + HTX_SL_P2_LEN(sl))
+
+#define HTX_SL_REQ_MLEN(sl) HTX_SL_P1_LEN(sl)
+#define HTX_SL_REQ_ULEN(sl) HTX_SL_P2_LEN(sl)
+#define HTX_SL_REQ_VLEN(sl) HTX_SL_P3_LEN(sl)
+#define HTX_SL_REQ_MPTR(sl) HTX_SL_P1_PTR(sl)
+#define HTX_SL_REQ_UPTR(sl) HTX_SL_P2_PTR(sl)
+#define HTX_SL_REQ_VPTR(sl) HTX_SL_P3_PTR(sl)
+
+#define HTX_SL_RES_VLEN(sl) HTX_SL_P1_LEN(sl)
+#define HTX_SL_RES_CLEN(sl) HTX_SL_P2_LEN(sl)
+#define HTX_SL_RES_RLEN(sl) HTX_SL_P3_LEN(sl)
+#define HTX_SL_RES_VPTR(sl) HTX_SL_P1_PTR(sl)
+#define HTX_SL_RES_CPTR(sl) HTX_SL_P2_PTR(sl)
+#define HTX_SL_RES_RPTR(sl) HTX_SL_P3_PTR(sl)
+
+static inline struct ist htx_sl_p1(const struct htx_sl *sl)
+{
+ return ist2(HTX_SL_P1_PTR(sl), HTX_SL_P1_LEN(sl));
+}
+
+static inline struct ist htx_sl_p2(const struct htx_sl *sl)
+{
+ return ist2(HTX_SL_P2_PTR(sl), HTX_SL_P2_LEN(sl));
+}
+
+static inline struct ist htx_sl_p3(const struct htx_sl *sl)
+{
+ return ist2(HTX_SL_P3_PTR(sl), HTX_SL_P3_LEN(sl));
+}
+
+static inline struct ist htx_sl_req_meth(const struct htx_sl *sl)
+{
+ return htx_sl_p1(sl);
+}
+
+static inline struct ist htx_sl_req_uri(const struct htx_sl *sl)
+{
+ return htx_sl_p2(sl);
+}
+
+static inline struct ist htx_sl_req_vsn(const struct htx_sl *sl)
+{
+ return htx_sl_p3(sl);
+}
+
+
+static inline struct ist htx_sl_res_vsn(const struct htx_sl *sl)
+{
+ return htx_sl_p1(sl);
+}
+
+static inline struct ist htx_sl_res_code(const struct htx_sl *sl)
+{
+ return htx_sl_p2(sl);
+}
+
+static inline struct ist htx_sl_res_reason(const struct htx_sl *sl)
+{
+ return htx_sl_p3(sl);
+}
+
+/* Converts a position to the corresponding relative address */
+static inline uint32_t htx_pos_to_addr(const struct htx *htx, uint32_t pos)
+{
+ return htx->size - (pos + 1) * sizeof(struct htx_blk);
+}
+
+/* Returns the position of the block <blk>. It is the caller responsibility to
+ * be sure <blk> is part of <htx>. */
+static inline uint32_t htx_get_blk_pos(const struct htx *htx, const struct htx_blk *blk)
+{
+ return ((htx->blocks + htx->size - (char *)blk) / sizeof(struct htx_blk) - 1);
+}
+
+/* Returns the block at the position <pos>. It is the caller responsibility to
+ * be sure the block at the position <pos> exists. */
+static inline struct htx_blk *htx_get_blk(const struct htx *htx, uint32_t pos)
+{
+ return (struct htx_blk *)(htx->blocks + htx_pos_to_addr(htx, pos));
+}
+
+/* Returns the type of the block <blk> */
+static inline enum htx_blk_type htx_get_blk_type(const struct htx_blk *blk)
+{
+ return (blk->info >> 28);
+}
+
+/* Returns the size of the block <blk>, depending of its type */
+static inline uint32_t htx_get_blksz(const struct htx_blk *blk)
+{
+ enum htx_blk_type type = htx_get_blk_type(blk);
+
+ switch (type) {
+ case HTX_BLK_HDR:
+ case HTX_BLK_TLR:
+ /* name.length + value.length */
+ return ((blk->info & 0xff) + ((blk->info >> 8) & 0xfffff));
+ default:
+ /* value.length */
+ return (blk->info & 0xfffffff);
+ }
+}
+
+/* Returns the position of the oldest entry (head). It returns a signed 32-bits
+ * integer, -1 means the HTX message is empty.
+ */
+static inline int32_t htx_get_head(const struct htx *htx)
+{
+ return htx->head;
+}
+
+/* Returns the oldest HTX block (head) if the HTX message is not
+ * empty. Otherwise it returns NULL.
+ */
+static inline struct htx_blk *htx_get_head_blk(const struct htx *htx)
+{
+ int32_t head = htx_get_head(htx);
+
+ return ((head == -1) ? NULL : htx_get_blk(htx, head));
+}
+
+/* same as above but unchecked, may only be used when certain that a block
+ * exists.
+ */
+static inline struct htx_blk *__htx_get_head_blk(const struct htx *htx)
+{
+ int32_t head = htx_get_head(htx);
+
+ return htx_get_blk(htx, head);
+}
+
+/* Returns the type of the oldest HTX block (head) if the HTX message is not
+ * empty. Otherwise it returns HTX_BLK_UNUSED.
+ */
+static inline enum htx_blk_type htx_get_head_type(const struct htx *htx)
+{
+ struct htx_blk *blk = htx_get_head_blk(htx);
+
+ return (blk ? htx_get_blk_type(blk) : HTX_BLK_UNUSED);
+}
+
+/* Returns the position of the newest entry (tail). It returns a signed 32-bits
+ * integer, -1 means the HTX message is empty.
+ */
+static inline int32_t htx_get_tail(const struct htx *htx)
+{
+ return htx->tail;
+}
+
+/* Returns the newest HTX block (tail) if the HTX message is not
+ * empty. Otherwise it returns NULL.
+ */
+static inline struct htx_blk *htx_get_tail_blk(const struct htx *htx)
+{
+ int32_t tail = htx_get_tail(htx);
+
+ return ((tail == -1) ? NULL : htx_get_blk(htx, tail));
+}
+
+/* Returns the type of the newest HTX block (tail) if the HTX message is not
+ * empty. Otherwise it returns HTX_BLK_UNUSED.
+ */
+static inline enum htx_blk_type htx_get_tail_type(const struct htx *htx)
+{
+ struct htx_blk *blk = htx_get_tail_blk(htx);
+
+ return (blk ? htx_get_blk_type(blk) : HTX_BLK_UNUSED);
+}
+
+/* Returns the position of the first block in the HTX message <htx>. -1 means
+ * the first block is unset or the HTS is empty.
+ */
+static inline int32_t htx_get_first(const struct htx *htx)
+{
+ return htx->first;
+}
+
+/* Returns the first HTX block in the HTX message <htx>. If unset or if <htx> is
+ * empty, NULL returned.
+ */
+static inline struct htx_blk *htx_get_first_blk(const struct htx *htx)
+{
+ int32_t pos;
+
+ pos = htx_get_first(htx);
+ return ((pos == -1) ? NULL : htx_get_blk(htx, pos));
+}
+
+/* Returns the type of the first block in the HTX message <htx>. If unset or if
+ * <htx> is empty, HTX_BLK_UNUSED is returned.
+ */
+static inline enum htx_blk_type htx_get_first_type(const struct htx *htx)
+{
+ struct htx_blk *blk = htx_get_first_blk(htx);
+
+ return (blk ? htx_get_blk_type(blk) : HTX_BLK_UNUSED);
+}
+
+/* Returns the position of block immediately before the one pointed by <pos>. If
+ * the message is empty or if <pos> is the position of the head, -1 returned.
+ */
+static inline int32_t htx_get_prev(const struct htx *htx, uint32_t pos)
+{
+ if (htx->head == -1 || pos == htx->head)
+ return -1;
+ return (pos - 1);
+}
+
+/* Returns the HTX block before <blk> in the HTX message <htx>. If <blk> is the
+ * head, NULL returned.
+ */
+static inline struct htx_blk *htx_get_prev_blk(const struct htx *htx,
+ const struct htx_blk *blk)
+{
+ int32_t pos;
+
+ pos = htx_get_prev(htx, htx_get_blk_pos(htx, blk));
+ return ((pos == -1) ? NULL : htx_get_blk(htx, pos));
+}
+
+/* Returns the position of block immediately after the one pointed by <pos>. If
+ * the message is empty or if <pos> is the position of the tail, -1 returned.
+ */
+static inline int32_t htx_get_next(const struct htx *htx, uint32_t pos)
+{
+ if (htx->tail == -1 || pos == htx->tail)
+ return -1;
+ return (pos + 1);
+
+}
+
+/* Returns the HTX block after <blk> in the HTX message <htx>. If <blk> is the
+ * tail, NULL returned.
+ */
+static inline struct htx_blk *htx_get_next_blk(const struct htx *htx,
+ const struct htx_blk *blk)
+{
+ int32_t pos;
+
+ pos = htx_get_next(htx, htx_get_blk_pos(htx, blk));
+ return ((pos == -1) ? NULL : htx_get_blk(htx, pos));
+}
+
+/* Returns 1 if <blk> is the block is the only one inside the HTX message <htx>,
+ * excluding all unused blocks. Otherwise, it returns 0. If 1 is returned, this
+ * means that there is only <blk> and eventually some unused ones in <htx>.
+ */
+static inline int htx_is_unique_blk(const struct htx *htx,
+ const struct htx_blk *blk)
+{
+ return (htx_get_blksz(blk) == htx->data);
+}
+
+/* Changes the size of the value. It is the caller responsibility to change the
+ * value itself, make sure there is enough space and update allocated
+ * value. This function updates the HTX message accordingly.
+ */
+static inline void htx_change_blk_value_len(struct htx *htx, struct htx_blk *blk, uint32_t newlen)
+{
+ enum htx_blk_type type = htx_get_blk_type(blk);
+ uint32_t oldlen, sz;
+ int32_t delta;
+
+ sz = htx_get_blksz(blk);
+ switch (type) {
+ case HTX_BLK_HDR:
+ case HTX_BLK_TLR:
+ oldlen = (blk->info >> 8) & 0xfffff;
+ blk->info = (type << 28) + (newlen << 8) + (blk->info & 0xff);
+ break;
+ default:
+ oldlen = blk->info & 0xfffffff;
+ blk->info = (type << 28) + newlen;
+ break;
+ }
+
+ /* Update HTTP message */
+ delta = (newlen - oldlen);
+ htx->data += delta;
+ if (blk->addr+sz == htx->tail_addr)
+ htx->tail_addr += delta;
+ else if (blk->addr+sz == htx->head_addr)
+ htx->head_addr += delta;
+}
+
+/* Changes the size of the value. It is the caller responsibility to change the
+ * value itself, make sure there is enough space and update allocated
+ * value. Unlike the function htx_change_blk_value_len(), this one does not
+ * update the HTX message. So it should be used with caution.
+ */
+static inline void htx_set_blk_value_len(struct htx_blk *blk, uint32_t vlen)
+{
+ enum htx_blk_type type = htx_get_blk_type(blk);
+
+ switch (type) {
+ case HTX_BLK_HDR:
+ case HTX_BLK_TLR:
+ blk->info = (type << 28) + (vlen << 8) + (blk->info & 0xff);
+ break;
+ case HTX_BLK_REQ_SL:
+ case HTX_BLK_RES_SL:
+ case HTX_BLK_DATA:
+ blk->info = (type << 28) + vlen;
+ break;
+ default:
+ /* Unexpected case */
+ break;
+ }
+}
+
+/* Returns the data pointer of the block <blk> */
+static inline void *htx_get_blk_ptr(const struct htx *htx, const struct htx_blk *blk)
+{
+ return ((void *)htx->blocks + blk->addr);
+}
+
+/* Returns the name of the block <blk>, only if it is a header or a
+ * trailer. Otherwise it returns an empty string.
+ */
+static inline struct ist htx_get_blk_name(const struct htx *htx, const struct htx_blk *blk)
+{
+ enum htx_blk_type type = htx_get_blk_type(blk);
+ struct ist ret;
+
+ switch (type) {
+ case HTX_BLK_HDR:
+ case HTX_BLK_TLR:
+ ret = ist2(htx_get_blk_ptr(htx, blk),
+ blk->info & 0xff);
+ break;
+
+ default:
+ return ist("");
+ }
+ return ret;
+}
+
+
+/* Returns the value of the block <blk>, depending on its type. If there is no
+ * value (for end-of blocks), an empty one is returned.
+ */
+static inline struct ist htx_get_blk_value(const struct htx *htx, const struct htx_blk *blk)
+{
+ enum htx_blk_type type = htx_get_blk_type(blk);
+ struct ist ret;
+
+ switch (type) {
+ case HTX_BLK_HDR:
+ case HTX_BLK_TLR:
+ ret = ist2(htx_get_blk_ptr(htx, blk) + (blk->info & 0xff),
+ (blk->info >> 8) & 0xfffff);
+ break;
+
+ case HTX_BLK_REQ_SL:
+ case HTX_BLK_RES_SL:
+ case HTX_BLK_DATA:
+ ret = ist2(htx_get_blk_ptr(htx, blk),
+ blk->info & 0xfffffff);
+ break;
+
+ default:
+ return ist("");
+ }
+ return ret;
+}
+
+/* Add a new start-line. It returns it on success, otherwise it returns NULL. It
+ * is the caller responsibility to set sl->info, if necessary.
+ */
+static inline struct htx_sl *htx_add_stline(struct htx *htx, enum htx_blk_type type, unsigned int flags,
+ const struct ist p1, const struct ist p2, const struct ist p3)
+{
+ struct htx_blk *blk;
+ struct htx_sl *sl;
+ uint32_t size;
+
+ if (type != HTX_BLK_REQ_SL && type != HTX_BLK_RES_SL)
+ return NULL;
+
+ size = sizeof(*sl) + p1.len + p2.len + p3.len;
+
+ blk = htx_add_blk(htx, type, size);
+ if (!blk)
+ return NULL;
+ blk->info += size;
+
+ sl = htx_get_blk_ptr(htx, blk);
+ sl->flags = flags;
+
+ HTX_SL_P1_LEN(sl) = p1.len;
+ HTX_SL_P2_LEN(sl) = p2.len;
+ HTX_SL_P3_LEN(sl) = p3.len;
+
+ memcpy(HTX_SL_P1_PTR(sl), p1.ptr, p1.len);
+ memcpy(HTX_SL_P2_PTR(sl), p2.ptr, p2.len);
+ memcpy(HTX_SL_P3_PTR(sl), p3.ptr, p3.len);
+
+ return sl;
+}
+
+/* Adds an HTX block of type HDR in <htx>. It returns the new block on
+ * success. Otherwise, it returns NULL. The header name is always lower cased.
+ */
+static inline struct htx_blk *htx_add_header(struct htx *htx, const struct ist name,
+ const struct ist value)
+{
+ struct htx_blk *blk;
+
+ if (name.len > 255 || value.len > 1048575)
+ return NULL;
+
+ blk = htx_add_blk(htx, HTX_BLK_HDR, name.len + value.len);
+ if (!blk)
+ return NULL;
+
+ blk->info += (value.len << 8) + name.len;
+ ist2bin_lc(htx_get_blk_ptr(htx, blk), name);
+ memcpy(htx_get_blk_ptr(htx, blk) + name.len, value.ptr, value.len);
+ return blk;
+}
+
+/* Adds an HTX block of type TLR in <htx>. It returns the new block on
+ * success. Otherwise, it returns NULL. The trailer name is always lower cased.
+ */
+static inline struct htx_blk *htx_add_trailer(struct htx *htx, const struct ist name,
+ const struct ist value)
+{
+ struct htx_blk *blk;
+
+ if (name.len > 255 || value.len > 1048575)
+ return NULL;
+
+ blk = htx_add_blk(htx, HTX_BLK_TLR, name.len + value.len);
+ if (!blk)
+ return NULL;
+
+ blk->info += (value.len << 8) + name.len;
+ ist2bin_lc(htx_get_blk_ptr(htx, blk), name);
+ memcpy(htx_get_blk_ptr(htx, blk) + name.len, value.ptr, value.len);
+ return blk;
+}
+
+/* Adds an HTX block of type EOH or EOT in <htx>. It returns the new block on
+ * success. Otherwise, it returns NULL.
+ */
+static inline struct htx_blk *htx_add_endof(struct htx *htx, enum htx_blk_type type)
+{
+ struct htx_blk *blk;
+
+ blk = htx_add_blk(htx, type, 1);
+ if (!blk)
+ return NULL;
+
+ blk->info += 1;
+ return blk;
+}
+
+/* Add all headers from the list <hdrs> into the HTX message <htx>, followed by
+ * the EOH. On success, it returns the last block inserted (the EOH), otherwise
+ * NULL is returned.
+ *
+ * Headers with a NULL value (.ptr == NULL) are ignored but not those with empty
+ * value (.len == 0 but .ptr != NULL)
+ */
+static inline struct htx_blk *htx_add_all_headers(struct htx *htx, const struct http_hdr *hdrs)
+{
+ int i;
+
+ for (i = 0; hdrs[i].n.len; i++) {
+ /* Don't check the value length because a header value may be empty */
+ if (isttest(hdrs[i].v) == 0)
+ continue;
+ if (!htx_add_header(htx, hdrs[i].n, hdrs[i].v))
+ return NULL;
+ }
+ return htx_add_endof(htx, HTX_BLK_EOH);
+}
+
+/* Add all trailers from the list <hdrs> into the HTX message <htx>, followed by
+ * the EOT. On success, it returns the last block inserted (the EOT), otherwise
+ * NULL is returned.
+ *
+ * Trailers with a NULL value (.ptr == NULL) are ignored but not those with
+ * empty value (.len == 0 but .ptr != NULL)
+ */
+static inline struct htx_blk *htx_add_all_trailers(struct htx *htx, const struct http_hdr *hdrs)
+{
+ int i;
+
+ for (i = 0; hdrs[i].n.len; i++) {
+ /* Don't check the value length because a header value may be empty */
+ if (isttest(hdrs[i].v) == 0)
+ continue;
+ if (!htx_add_trailer(htx, hdrs[i].n, hdrs[i].v))
+ return NULL;
+ }
+ return htx_add_endof(htx, HTX_BLK_EOT);
+}
+
+/* Removes <n> bytes from the beginning of DATA block <blk>. The block's start
+ * address and its length are adjusted, and the htx's total data count is
+ * updated. This is used to mark that part of some data were transferred
+ * from a DATA block without removing this DATA block. No sanity check is
+ * performed, the caller is responsible for doing this exclusively on DATA
+ * blocks, and never removing more than the block's size.
+ */
+static inline void htx_cut_data_blk(struct htx *htx, struct htx_blk *blk, uint32_t n)
+{
+ if (blk->addr == htx->end_addr)
+ htx->end_addr += n;
+ blk->addr += n;
+ blk->info -= n;
+ htx->data -= n;
+}
+
+/* Returns the space used by metadata in <htx>. */
+static inline uint32_t htx_meta_space(const struct htx *htx)
+{
+ if (htx->tail == -1)
+ return 0;
+
+ return ((htx->tail + 1 - htx->head) * sizeof(struct htx_blk));
+}
+
+/* Returns the space used (payload + metadata) in <htx> */
+static inline uint32_t htx_used_space(const struct htx *htx)
+{
+ return (htx->data + htx_meta_space(htx));
+}
+
+/* Returns the free space in <htx> */
+static inline uint32_t htx_free_space(const struct htx *htx)
+{
+ return (htx->size - htx_used_space(htx));
+}
+
+/* Returns the maximum size available to store some data in <htx> if a new block
+ * is reserved.
+ */
+static inline uint32_t htx_free_data_space(const struct htx *htx)
+{
+ uint32_t free = htx_free_space(htx);
+
+ if (free < sizeof(struct htx_blk))
+ return 0;
+ return (free - sizeof(struct htx_blk));
+}
+
+/* Returns non-zero only if the HTX message free space wraps */
+static inline int htx_space_wraps(const struct htx *htx)
+{
+ uint32_t headroom, tailroom;
+
+ headroom = (htx->end_addr - htx->head_addr);
+ tailroom = (htx_pos_to_addr(htx, htx->tail) - htx->tail_addr);
+
+ return (headroom && tailroom);
+}
+
+/* Returns the maximum size for a block, not exceeding <max> bytes. <max> may be
+ * set to -1 to have no limit.
+ */
+static inline uint32_t htx_get_max_blksz(const struct htx *htx, int32_t max)
+{
+ uint32_t free = htx_free_space(htx);
+
+ if (max != -1 && free > max)
+ free = max;
+ if (free < sizeof(struct htx_blk))
+ return 0;
+ return (free - sizeof(struct htx_blk));
+}
+
+/* Returns 1 if the message has less than 1/4 of its capacity free, otherwise 0 */
+static inline int htx_almost_full(const struct htx *htx)
+{
+ if (!htx->size || htx_free_space(htx) < htx->size / 4)
+ return 1;
+ return 0;
+}
+
+/* Resets an HTX message */
+static inline void htx_reset(struct htx *htx)
+{
+ htx->tail = htx->head = htx->first = -1;
+ htx->data = 0;
+ htx->tail_addr = htx->head_addr = htx->end_addr = 0;
+ htx->extra = 0;
+ htx->flags = HTX_FL_NONE;
+}
+
+/* Returns the available room for raw data in buffer <buf> once HTX overhead is
+ * taken into account (one HTX header and two blocks). The purpose is to figure
+ * the optimal fill length to avoid copies.
+ */
+static inline size_t buf_room_for_htx_data(const struct buffer *buf)
+{
+ size_t room;
+
+ room = b_room(buf);
+ if (room <= HTX_BUF_OVERHEAD)
+ room = 0;
+ else
+ room -= HTX_BUF_OVERHEAD;
+
+ return room;
+}
+
+
+/* Returns an HTX message using the buffer <buf>. Unlike htx_from_buf(), this
+ * function does not update the buffer. So if the HTX message is updated, the
+ * caller must call htx_to_buf() to be sure to also update the underlying buffer
+ * accordingly. Note that it always returns a valid pointer, either to an
+ * initialized buffer or to the empty buffer. This function must always be
+ * called with a buffer containing an HTX message (or an empty buffer).
+ */
+static inline struct htx *htxbuf(const struct buffer *buf)
+{
+ struct htx *htx;
+
+ if (b_is_null(buf))
+ return &htx_empty;
+ htx = ((struct htx *)(buf->area));
+ if (!b_data(buf)) {
+ htx->size = buf->size - sizeof(*htx);
+ htx_reset(htx);
+ }
+ return htx;
+}
+
+/* Returns an HTX message using the buffer <buf>. <buf> is updated to appear as
+ * full. It should be used when you want to add something into the HTX message,
+ * so the call to htx_to_buf() may be skipped. But, it is the caller
+ * responsibility to call htx_to_buf() to reset <buf> if it is relevant. The
+ * returned pointer is always valid. This function must always be called with a
+ * buffer containing an HTX message (or an empty buffer).
+ *
+ * The caller can call htxbuf() function to avoid any update of the buffer.
+ */
+static inline struct htx *htx_from_buf(struct buffer *buf)
+{
+ struct htx *htx = htxbuf(buf);
+
+ b_set_data(buf, b_size(buf));
+ return htx;
+}
+
+/* Update <buf> accordingly to the HTX message <htx> */
+static inline void htx_to_buf(struct htx *htx, struct buffer *buf)
+{
+ if ((htx->head == -1) &&
+ !(htx->flags & (HTX_FL_PARSING_ERROR|HTX_FL_PROCESSING_ERROR))) {
+ htx_reset(htx);
+ b_set_data(buf, 0);
+ }
+ else
+ b_set_data(buf, b_size(buf));
+}
+
+/* Returns 1 if the message is empty, otherwise it returns 0. Note that it is
+ * illegal to call this with htx == NULL.
+ */
+static inline int htx_is_empty(const struct htx *htx)
+{
+ return (htx->head == -1);
+}
+
+/* Returns 1 if the message is not empty, otherwise it returns 0. Note that it
+ * is illegal to call this with htx == NULL.
+ */
+static inline int htx_is_not_empty(const struct htx *htx)
+{
+ return (htx->head != -1);
+}
+
+/* Returns 1 if no more data are expected for the message <htx>. Otherwise it
+ * returns 0. Note that it is illegal to call this with htx == NULL. This
+ * function relies on the HTX_FL_EOM flags. It means tunneled data are not
+ * considered here.
+ */
+static inline int htx_expect_more(const struct htx *htx)
+{
+ return !(htx->flags & HTX_FL_EOM);
+}
+
+/* Set EOM flag in <htx>. This function is useful if the HTX message is empty.
+ * In this case, an EOT block is appended first to ensure the EOM will be
+ * forwarded as expected. This is a workaround as it is not possibly currently
+ * to push an empty HTX DATA block.
+ *
+ * Returns 1 on success else 0.
+ */
+static inline int htx_set_eom(struct htx *htx)
+{
+ if (htx_is_empty(htx)) {
+ if (!htx_add_endof(htx, HTX_BLK_EOT))
+ return 0;
+ }
+
+ htx->flags |= HTX_FL_EOM;
+ return 1;
+}
+
+/* Copy an HTX message stored in the buffer <msg> to <htx>. We take care to
+ * not overwrite existing data. All the message is copied or nothing. It returns
+ * 1 on success and 0 on error.
+ */
+static inline int htx_copy_msg(struct htx *htx, const struct buffer *msg)
+{
+ /* The destination HTX message is allocated and empty, we can do a raw copy */
+ if (htx_is_empty(htx) && htx_free_space(htx)) {
+ memcpy(htx, msg->area, msg->size);
+ return 1;
+ }
+
+ /* Otherwise, we need to append the HTX message */
+ return htx_append_msg(htx, htxbuf(msg));
+}
+
+/* Remove all blocks except headers. Trailers will also be removed too. */
+static inline void htx_skip_msg_payload(struct htx *htx)
+{
+ struct htx_blk *blk = htx_get_first_blk(htx);
+
+ while (blk) {
+ enum htx_blk_type type = htx_get_blk_type(blk);
+
+ blk = ((type > HTX_BLK_EOH)
+ ? htx_remove_blk(htx, blk)
+ : htx_get_next_blk(htx, blk));
+ }
+}
+
+/* Returns the number of used blocks in the HTX message <htx>. Note that it is
+ * illegal to call this function with htx == NULL. Note also blocks of type
+ * HTX_BLK_UNUSED are part of used blocks.
+ */
+static inline int htx_nbblks(const struct htx *htx)
+{
+ return ((htx->head != -1) ? (htx->tail + 1 - htx->head) : 0);
+}
+/* For debugging purpose */
+static inline const char *htx_blk_type_str(enum htx_blk_type type)
+{
+ switch (type) {
+ case HTX_BLK_REQ_SL: return "HTX_BLK_REQ_SL";
+ case HTX_BLK_RES_SL: return "HTX_BLK_RES_SL";
+ case HTX_BLK_HDR: return "HTX_BLK_HDR";
+ case HTX_BLK_EOH: return "HTX_BLK_EOH";
+ case HTX_BLK_DATA: return "HTX_BLK_DATA";
+ case HTX_BLK_TLR: return "HTX_BLK_TLR";
+ case HTX_BLK_EOT: return "HTX_BLK_EOT";
+ case HTX_BLK_UNUSED: return "HTX_BLK_UNUSED";
+ default: return "HTX_BLK_???";
+ };
+}
+
+/* For debugging purpose */
+static inline void htx_dump(struct buffer *chunk, const struct htx *htx, int full)
+{
+ int32_t pos;
+
+ chunk_appendf(chunk, " htx=%p(size=%u,data=%u,used=%u,wrap=%s,flags=0x%08x,extra=%llu,"
+ "first=%d,head=%d,tail=%d,tail_addr=%d,head_addr=%d,end_addr=%d)",
+ htx, htx->size, htx->data, htx_nbblks(htx), (!htx->head_addr) ? "NO" : "YES",
+ htx->flags, (unsigned long long)htx->extra, htx->first, htx->head, htx->tail,
+ htx->tail_addr, htx->head_addr, htx->end_addr);
+
+ if (!full || !htx_nbblks(htx))
+ return;
+ chunk_memcat(chunk, "\n", 1);
+
+ for (pos = htx_get_head(htx); pos != -1; pos = htx_get_next(htx, pos)) {
+ struct htx_sl *sl;
+ struct htx_blk *blk = htx_get_blk(htx, pos);
+ enum htx_blk_type type = htx_get_blk_type(blk);
+ uint32_t sz = htx_get_blksz(blk);
+ struct ist n, v;
+
+ n = htx_get_blk_name(htx, blk);
+ v = htx_get_blk_value(htx, blk);
+
+ if (type == HTX_BLK_REQ_SL || type == HTX_BLK_RES_SL) {
+ sl = htx_get_blk_ptr(htx, blk);
+ chunk_appendf(chunk, "\t\t[%u] type=%-17s - size=%-6u - addr=%-6u\t%.*s %.*s %.*s\n",
+ pos, htx_blk_type_str(type), sz, blk->addr,
+ HTX_SL_P1_LEN(sl), HTX_SL_P1_PTR(sl),
+ HTX_SL_P2_LEN(sl), HTX_SL_P2_PTR(sl),
+ HTX_SL_P3_LEN(sl), HTX_SL_P3_PTR(sl));
+ }
+ else if (type == HTX_BLK_HDR || type == HTX_BLK_TLR)
+ chunk_appendf(chunk, "\t\t[%u] type=%-17s - size=%-6u - addr=%-6u\t%.*s: %.*s\n",
+ pos, htx_blk_type_str(type), sz, blk->addr,
+ (int)MIN(n.len, 32), n.ptr,
+ (int)MIN(v.len, 64), v.ptr);
+ else
+ chunk_appendf(chunk, "\t\t[%u] type=%-17s - size=%-6u - addr=%-6u%s\n",
+ pos, htx_blk_type_str(type), sz, blk->addr,
+ (!v.len ? "\t<empty>" : ""));
+ }
+}
+
+#endif /* _HAPROXY_HTX_H */
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/include/haproxy/init-t.h b/include/haproxy/init-t.h
new file mode 100644
index 0000000..110171b
--- /dev/null
+++ b/include/haproxy/init-t.h
@@ -0,0 +1,64 @@
+#ifndef _HAPROXY_INIT_T_H
+#define _HAPROXY_INIT_T_H
+
+#include <haproxy/list-t.h>
+
+struct proxy;
+struct server;
+
+struct pre_check_fct {
+ struct list list;
+ int (*fct)();
+};
+
+struct post_check_fct {
+ struct list list;
+ int (*fct)();
+};
+
+struct post_proxy_check_fct {
+ struct list list;
+ int (*fct)(struct proxy *);
+};
+
+struct post_server_check_fct {
+ struct list list;
+ int (*fct)(struct server *);
+};
+
+struct per_thread_alloc_fct {
+ struct list list;
+ int (*fct)();
+};
+
+struct per_thread_init_fct {
+ struct list list;
+ int (*fct)();
+};
+
+struct post_deinit_fct {
+ struct list list;
+ void (*fct)();
+};
+
+struct proxy_deinit_fct {
+ struct list list;
+ void (*fct)(struct proxy *);
+};
+
+struct server_deinit_fct {
+ struct list list;
+ void (*fct)(struct server *);
+};
+
+struct per_thread_free_fct {
+ struct list list;
+ void (*fct)();
+};
+
+struct per_thread_deinit_fct {
+ struct list list;
+ void (*fct)();
+};
+
+#endif /* _HAPROXY_INIT_T_H */
diff --git a/include/haproxy/init.h b/include/haproxy/init.h
new file mode 100644
index 0000000..6e30475
--- /dev/null
+++ b/include/haproxy/init.h
@@ -0,0 +1,79 @@
+#ifndef _HAPROXY_INIT_H
+#define _HAPROXY_INIT_H
+
+#include <haproxy/init-t.h>
+#include <haproxy/initcall.h>
+
+struct proxy;
+struct server;
+
+extern struct list pre_check_list;
+extern struct list post_check_list;
+extern struct list post_proxy_check_list;
+extern struct list post_server_check_list;
+extern struct list per_thread_alloc_list;
+extern struct list per_thread_init_list;
+extern struct list post_deinit_list;
+extern struct list proxy_deinit_list;
+extern struct list server_deinit_list;
+extern struct list per_thread_free_list;
+extern struct list per_thread_deinit_list;
+
+void hap_register_pre_check(int (*fct)());
+void hap_register_post_check(int (*fct)());
+void hap_register_post_proxy_check(int (*fct)(struct proxy *));
+void hap_register_post_server_check(int (*fct)(struct server *));
+void hap_register_post_deinit(void (*fct)());
+void hap_register_proxy_deinit(void (*fct)(struct proxy *));
+void hap_register_server_deinit(void (*fct)(struct server *));
+
+void hap_register_per_thread_alloc(int (*fct)());
+void hap_register_per_thread_init(int (*fct)());
+void hap_register_per_thread_deinit(void (*fct)());
+void hap_register_per_thread_free(void (*fct)());
+
+/* simplified way to declare a pre-check callback in a file */
+#define REGISTER_PRE_CHECK(fct) \
+ INITCALL1(STG_REGISTER, hap_register_pre_check, (fct))
+
+/* simplified way to declare a post-check callback in a file */
+#define REGISTER_POST_CHECK(fct) \
+ INITCALL1(STG_REGISTER, hap_register_post_check, (fct))
+
+/* simplified way to declare a post-proxy-check callback in a file */
+#define REGISTER_POST_PROXY_CHECK(fct) \
+ INITCALL1(STG_REGISTER, hap_register_post_proxy_check, (fct))
+
+/* simplified way to declare a post-server-check callback in a file */
+#define REGISTER_POST_SERVER_CHECK(fct) \
+ INITCALL1(STG_REGISTER, hap_register_post_server_check, (fct))
+
+/* simplified way to declare a post-deinit callback in a file */
+#define REGISTER_POST_DEINIT(fct) \
+ INITCALL1(STG_REGISTER, hap_register_post_deinit, (fct))
+
+/* simplified way to declare a proxy-deinit callback in a file */
+#define REGISTER_PROXY_DEINIT(fct) \
+ INITCALL1(STG_REGISTER, hap_register_proxy_deinit, (fct))
+
+/* simplified way to declare a proxy-deinit callback in a file */
+#define REGISTER_SERVER_DEINIT(fct) \
+ INITCALL1(STG_REGISTER, hap_register_server_deinit, (fct))
+
+/* simplified way to declare a per-thread allocation callback in a file */
+#define REGISTER_PER_THREAD_ALLOC(fct) \
+ INITCALL1(STG_REGISTER, hap_register_per_thread_alloc, (fct))
+
+/* simplified way to declare a per-thread init callback in a file */
+#define REGISTER_PER_THREAD_INIT(fct) \
+ INITCALL1(STG_REGISTER, hap_register_per_thread_init, (fct))
+
+/* simplified way to declare a per-thread deinit callback in a file */
+#define REGISTER_PER_THREAD_DEINIT(fct) \
+ INITCALL1(STG_REGISTER, hap_register_per_thread_deinit, (fct))
+
+/* simplified way to declare a per-thread free callback in a file */
+#define REGISTER_PER_THREAD_FREE(fct) \
+ INITCALL1(STG_REGISTER, hap_register_per_thread_free, (fct))
+
+#endif /* _HAPROXY_INIT_H */
diff --git a/include/haproxy/initcall.h b/include/haproxy/initcall.h
new file mode 100644
index 0000000..dffec04
--- /dev/null
+++ b/include/haproxy/initcall.h
@@ -0,0 +1,257 @@
+/*
+ * include/haproxy/initcall.h
+ *
+ * Initcall management.
+ *
+ * Copyright (C) 2018-2020 Willy Tarreau - w@1wt.eu
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _HAPROXY_INITCALL_H
+#define _HAPROXY_INITCALL_H
+
+#include <haproxy/compiler.h>
+
+/* List of known init stages. If others are added, please declare their
+ * section at the end of the file below.
+ */
+
+/* The principle of the initcalls is to create optional sections in the target
+ * program which are made of arrays of structures containing a function pointer
+ * and 3 argument pointers. Then at boot time, these sections are scanned in a
+ * well defined order to call in turn each of these functions with their
+ * arguments. This allows to declare register callbacks in C files without
+ * having to export lots of things nor to cross-reference functions. There are
+ * several initialization stages defined so that certain guarantees are offered
+ * (for example list heads might or might not be initialized, pools might or
+ * might not have been created yet).
+ *
+ * On some very old platforms there is no convenient way to retrieve the start
+ * or stop pointer for these sections so there is no reliable way to enumerate
+ * the callbacks. When this is the case, as detected when USE_OBSOLETE_LINKER
+ * is set, instead of using sections we exclusively use constructors whose name
+ * is based on the current line number in the file to guarantee uniqueness.
+ * When called, these constructors then add their callback to their respective
+ * list. It works as well but slightly inflates the executable's size since
+ * code has to be emitted just to register each of these callbacks.
+ */
+
+/*
+ * Please keep those names short enough, they are used to generate section
+ * names, Mac OS X accepts section names up to 16 characters, and we prefix
+ * them with i_, so stage name can't be more than 14 characters.
+ */
+enum init_stage {
+ STG_PREPARE = 0, // preset variables, tables, list heads
+ STG_LOCK, // pre-initialize locks
+ STG_REGISTER, // register static lists (keywords etc)
+ STG_ALLOC, // allocate required structures
+ STG_POOL, // create pools
+ STG_INIT, // subsystems normal initialization
+ STG_SIZE // size of the stages array, must be last
+};
+
+/* This is the descriptor for an initcall */
+struct initcall {
+ void (*const fct)(void *arg1, void *arg2, void *arg3);
+ void *arg1;
+ void *arg2;
+ void *arg3;
+#if defined(USE_OBSOLETE_LINKER)
+ void *next;
+#endif
+};
+
+
+#if !defined(USE_OBSOLETE_LINKER)
+
+#define HA_INIT_SECTION(s) HA_SECTION("i_" # s)
+
+/* Declare a static variable in the init section dedicated to stage <stg>,
+ * with an element referencing function <function> and arguments <a1..a3>.
+ * <linenum> is needed to deduplicate entries created from a same file. The
+ * trick with (stg<STG_SIZE) consists in verifying that stg if a valid enum
+ * value from the initcall set, and to emit a warning or error if it is not.
+ * The function's type is cast so that it is technically possible to call a
+ * function taking other argument types, provided they are all the same size
+ * as a pointer (args are cast to (void*)). Do not use this macro directly,
+ * use INITCALL{0..3}() instead.
+ */
+#define __DECLARE_INITCALL(stg, linenum, function, a1, a2, a3) \
+ HA_GLOBL(__start_i_##stg ); \
+ HA_GLOBL(__stop_i_##stg ); \
+ static const struct initcall *__initcb_##linenum \
+ __attribute__((__used__)) HA_INIT_SECTION(stg) = \
+ (stg < STG_SIZE) ? &(const struct initcall) { \
+ .fct = (void (*)(void *,void *,void *))function, \
+ .arg1 = (void *)(a1), \
+ .arg2 = (void *)(a2), \
+ .arg3 = (void *)(a3), \
+ } : NULL
+
+
+#else // USE_OBSOLETE_LINKER
+
+/* Declare a static constructor function to register a static descriptor for
+ * stage <stg>, with an element referencing function <function> and arguments
+ * <a1..a3>. <linenum> is needed to deduplicate entries created from a same
+ * file. The trick with (stg<STG_SIZE) consists in verifying that stg if a
+ * valid enum value from the initcall set, and to emit a warning or error if
+ * it is not.
+ * The function's type is cast so that it is technically possible to call a
+ * function taking other argument types, provided they are all the same size
+ * as a pointer (args are cast to (void*)). Do not use this macro directly,
+ * use INITCALL{0..3}() instead.
+ */
+#define __DECLARE_INITCALL(stg, linenum, function, a1, a2, a3) \
+__attribute__((constructor)) static void __initcb_##linenum() \
+{ \
+ static struct initcall entry = { \
+ .fct = (void (*)(void *,void *,void *))function, \
+ .arg1 = (void *)(a1), \
+ .arg2 = (void *)(a2), \
+ .arg3 = (void *)(a3), \
+ }; \
+ if (stg < STG_SIZE) { \
+ entry.next = __initstg[stg]; \
+ __initstg[stg] = &entry; \
+ }; \
+}
+
+#endif // USE_OBSOLETE_LINKER
+
+/* This is used to resolve <linenum> to an integer before calling
+ * __DECLARE_INITCALL(). Do not use this macro directly, use INITCALL{0..3}()
+ * instead.
+ */
+#define _DECLARE_INITCALL(...) \
+ __DECLARE_INITCALL(__VA_ARGS__)
+
+/* This requires that function <function> is called with pointer argument
+ * <argument> during init stage <stage> which must be one of init_stage.
+ */
+#define INITCALL0(stage, function) \
+ _DECLARE_INITCALL(stage, __LINE__, function, 0, 0, 0)
+
+/* This requires that function <function> is called with pointer argument
+ * <argument> during init stage <stage> which must be one of init_stage.
+ */
+#define INITCALL1(stage, function, arg1) \
+ _DECLARE_INITCALL(stage, __LINE__, function, arg1, 0, 0)
+
+/* This requires that function <function> is called with pointer arguments
+ * <arg1..2> during init stage <stage> which must be one of init_stage.
+ */
+#define INITCALL2(stage, function, arg1, arg2) \
+ _DECLARE_INITCALL(stage, __LINE__, function, arg1, arg2, 0)
+
+/* This requires that function <function> is called with pointer arguments
+ * <arg1..3> during init stage <stage> which must be one of init_stage.
+ */
+#define INITCALL3(stage, function, arg1, arg2, arg3) \
+ _DECLARE_INITCALL(stage, __LINE__, function, arg1, arg2, arg3)
+
+#if !defined(USE_OBSOLETE_LINKER)
+/* Iterate pointer p (of type initcall**) over all registered calls at
+ * stage <stg>.
+ */
+#define FOREACH_INITCALL(p,stg) \
+ for ((p) = &(__start_i_##stg); (p) < &(__stop_i_##stg); (p)++)
+
+#else // USE_OBSOLETE_LINKER
+
+#define FOREACH_INITCALL(p,stg) \
+ for ((p) = __initstg[stg]; (p); (p) = (p)->next)
+#endif // USE_OBSOLETE_LINKER
+
+
+#if !defined(USE_OBSOLETE_LINKER)
+/* Declare a section for stage <stg>. The start and stop pointers are set by
+ * the linker itself, which is why they're declared extern here. The weak
+ * attribute is used so that we declare them ourselves if the section is
+ * empty. The corresponding sections must contain exclusively pointers to
+ * make sure each location may safely be visited by incrementing a pointer.
+ */
+#define DECLARE_INIT_SECTION(stg) \
+ extern __attribute__((__weak__)) const struct initcall *__start_i_##stg HA_SECTION_START("i_" # stg); \
+ extern __attribute__((__weak__)) const struct initcall *__stop_i_##stg HA_SECTION_STOP("i_" # stg)
+
+/* Declare all initcall sections here */
+DECLARE_INIT_SECTION(STG_PREPARE);
+DECLARE_INIT_SECTION(STG_LOCK);
+DECLARE_INIT_SECTION(STG_REGISTER);
+DECLARE_INIT_SECTION(STG_ALLOC);
+DECLARE_INIT_SECTION(STG_POOL);
+DECLARE_INIT_SECTION(STG_INIT);
+
+// for use in the main haproxy.c file
+#define DECLARE_INIT_STAGES asm("")
+
+/* not needed anymore */
+#undef DECLARE_INIT_SECTION
+
+#else // USE_OBSOLETE_LINKER
+
+extern struct initcall *__initstg[STG_SIZE];
+
+// for use in the main haproxy.c file
+#define DECLARE_INIT_STAGES struct initcall *__initstg[STG_SIZE]
+
+#endif // USE_OBSOLETE_LINKER
+
+#if !defined(USE_OBSOLETE_LINKER)
+/* Run the initcalls for stage <stg>. The test on <stg> is only there to
+ * ensure it is a valid initcall stage.
+ */
+#define RUN_INITCALLS(stg) \
+ do { \
+ const struct initcall **ptr; \
+ if (stg >= STG_SIZE) \
+ break; \
+ FOREACH_INITCALL(ptr, stg) \
+ (*ptr)->fct((*ptr)->arg1, (*ptr)->arg2, (*ptr)->arg3); \
+ } while (0)
+
+#else // USE_OBSOLETE_LINKER
+
+/* Run the initcalls for stage <stg>. The test on <stg> is only there to
+ * ensure it is a valid initcall stage.
+ */
+#define RUN_INITCALLS(stg) \
+ do { \
+ const struct initcall *ptr; \
+ if (stg >= STG_SIZE) \
+ break; \
+ FOREACH_INITCALL(ptr, stg) \
+ (ptr)->fct((ptr)->arg1, (ptr)->arg2, (ptr)->arg3); \
+ } while (0)
+
+#endif // USE_OBSOLETE_LINKER
+
+#endif /* _HAPROXY_INITCALL_H */
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/include/haproxy/intops.h b/include/haproxy/intops.h
new file mode 100644
index 0000000..34010cc
--- /dev/null
+++ b/include/haproxy/intops.h
@@ -0,0 +1,495 @@
+/*
+ * include/haproxy/intops.h
+ * Functions for integer operations.
+ *
+ * Copyright (C) 2020 Willy Tarreau - w@1wt.eu
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+*/
+
+#ifndef _HAPROXY_INTOPS_H
+#define _HAPROXY_INTOPS_H
+
+#include <haproxy/api.h>
+
+/* exported functions, mostly integer parsing */
+/* rounds <i> down to the closest value having max 2 digits */
+unsigned int round_2dig(unsigned int i);
+unsigned int full_hash(unsigned int a);
+int varint_bytes(uint64_t v);
+unsigned int read_uint(const char **s, const char *end);
+long long read_int64(const char **s, const char *end);
+unsigned long long read_uint64(const char **s, const char *end);
+unsigned int str2ui(const char *s);
+unsigned int str2uic(const char *s);
+unsigned int strl2ui(const char *s, int len);
+unsigned int strl2uic(const char *s, int len);
+int strl2ic(const char *s, int len);
+int strl2irc(const char *s, int len, int *ret);
+int strl2llrc(const char *s, int len, long long *ret);
+int strl2llrc_dotted(const char *text, int len, long long *ret);
+unsigned int mask_find_rank_bit(unsigned int r, unsigned long m);
+unsigned int mask_find_rank_bit_fast(unsigned int r, unsigned long m,
+ unsigned long a, unsigned long b,
+ unsigned long c, unsigned long d);
+void mask_prep_rank_map(unsigned long m,
+ unsigned long *a, unsigned long *b,
+ unsigned long *c, unsigned long *d);
+int one_among_mask(unsigned long v, int bit);
+
+
+/* Multiply the two 32-bit operands and shift the 64-bit result right 32 bits.
+ * This is used to compute fixed ratios by setting one of the operands to
+ * (2^32*ratio).
+ */
+static inline unsigned int mul32hi(unsigned int a, unsigned int b)
+{
+ return ((unsigned long long)a * b + a) >> 32;
+}
+
+/* gcc does not know when it can safely divide 64 bits by 32 bits. Use this
+ * function when you know for sure that the result fits in 32 bits, because
+ * it is optimal on x86 and on 64bit processors.
+ */
+static inline unsigned int div64_32(unsigned long long o1, unsigned int o2)
+{
+ unsigned long long result;
+#ifdef __i386__
+ asm("divl %2"
+ : "=A" (result)
+ : "A"(o1), "rm"(o2));
+#else
+ result = o1 / o2;
+#endif
+ return result;
+}
+
+/* rotate left a 64-bit integer by <bits:[0-5]> bits */
+static inline uint64_t rotl64(uint64_t v, uint8_t bits)
+{
+#if !defined(__ARM_ARCH_8A) && !defined(__x86_64__)
+ bits &= 63;
+#endif
+ v = (v << bits) | (v >> (-bits & 63));
+ return v;
+}
+
+/* rotate right a 64-bit integer by <bits:[0-5]> bits */
+static inline uint64_t rotr64(uint64_t v, uint8_t bits)
+{
+#if !defined(__ARM_ARCH_8A) && !defined(__x86_64__)
+ bits &= 63;
+#endif
+ v = (v >> bits) | (v << (-bits & 63));
+ return v;
+}
+
+/* Simple popcountl implementation. It returns the number of ones in a word.
+ * Described here : https://graphics.stanford.edu/~seander/bithacks.html
+ */
+static inline unsigned int my_popcountl(unsigned long a)
+{
+ a = a - ((a >> 1) & ~0UL/3);
+ a = (a & ~0UL/15*3) + ((a >> 2) & ~0UL/15*3);
+ a = (a + (a >> 4)) & ~0UL/255*15;
+ return (unsigned long)(a * (~0UL/255)) >> (sizeof(unsigned long) - 1) * 8;
+}
+
+/* returns non-zero if <a> has at least 2 bits set */
+static inline unsigned long atleast2(unsigned long a)
+{
+ return a & (a - 1);
+}
+
+/* Simple ffs implementation. It returns the position of the lowest bit set to
+ * one, starting at 1. It is illegal to call it with a==0 (undefined result).
+ */
+static inline unsigned int my_ffsl(unsigned long a)
+{
+ unsigned long cnt;
+
+#if defined(__x86_64__)
+ __asm__("bsf %1,%0\n" : "=r" (cnt) : "rm" (a));
+ cnt++;
+#else
+
+ cnt = 1;
+#if LONG_MAX > 0x7FFFFFFFL /* 64bits */
+ if (!(a & 0xFFFFFFFFUL)) {
+ a >>= 32;
+ cnt += 32;
+ }
+#endif
+ if (!(a & 0XFFFFU)) {
+ a >>= 16;
+ cnt += 16;
+ }
+ if (!(a & 0XFF)) {
+ a >>= 8;
+ cnt += 8;
+ }
+ if (!(a & 0xf)) {
+ a >>= 4;
+ cnt += 4;
+ }
+ if (!(a & 0x3)) {
+ a >>= 2;
+ cnt += 2;
+ }
+ if (!(a & 0x1)) {
+ cnt += 1;
+ }
+#endif /* x86_64 */
+
+ return cnt;
+}
+
+/* Simple fls implementation. It returns the position of the highest bit set to
+ * one, starting at 1. It is illegal to call it with a==0 (undefined result).
+ */
+static inline unsigned int my_flsl(unsigned long a)
+{
+ unsigned long cnt;
+
+#if defined(__x86_64__)
+ __asm__("bsr %1,%0\n" : "=r" (cnt) : "rm" (a));
+ cnt++;
+#else
+
+ cnt = 1;
+#if LONG_MAX > 0x7FFFFFFFUL /* 64bits */
+ if (a & 0xFFFFFFFF00000000UL) {
+ a >>= 32;
+ cnt += 32;
+ }
+#endif
+ if (a & 0XFFFF0000U) {
+ a >>= 16;
+ cnt += 16;
+ }
+ if (a & 0XFF00) {
+ a >>= 8;
+ cnt += 8;
+ }
+ if (a & 0xf0) {
+ a >>= 4;
+ cnt += 4;
+ }
+ if (a & 0xc) {
+ a >>= 2;
+ cnt += 2;
+ }
+ if (a & 0x2) {
+ cnt += 1;
+ }
+#endif /* x86_64 */
+
+ return cnt;
+}
+
+/* Build a word with the <bits> lower bits set (reverse of my_popcountl) */
+static inline unsigned long nbits(int bits)
+{
+ if (--bits < 0)
+ return 0;
+ else
+ return (2UL << bits) - 1;
+}
+
+/* Turns 64-bit value <a> from host byte order to network byte order.
+ * The principle consists in letting the compiler detect we're playing
+ * with a union and simplify most or all operations. The asm-optimized
+ * htonl() version involving bswap (x86) / rev (arm) / other is a single
+ * operation on little endian, or a NOP on big-endian. In both cases,
+ * this lets the compiler "see" that we're rebuilding a 64-bit word from
+ * two 32-bit quantities that fit into a 32-bit register. In big endian,
+ * the whole code is optimized out. In little endian, with a decent compiler,
+ * a few bswap and 2 shifts are left, which is the minimum acceptable.
+ */
+static inline unsigned long long my_htonll(unsigned long long a)
+{
+#if defined(__x86_64__)
+ __asm__ volatile("bswapq %0" : "=r"(a) : "0"(a));
+ return a;
+#else
+ union {
+ struct {
+ unsigned int w1;
+ unsigned int w2;
+ } by32;
+ unsigned long long by64;
+ } w = { .by64 = a };
+ return ((unsigned long long)htonl(w.by32.w1) << 32) | htonl(w.by32.w2);
+#endif
+}
+
+/* Turns 64-bit value <a> from network byte order to host byte order. */
+static inline unsigned long long my_ntohll(unsigned long long a)
+{
+ return my_htonll(a);
+}
+
+/* sets bit <bit> into map <map>, which must be long-aligned */
+static inline void ha_bit_set(unsigned long bit, long *map)
+{
+ map[bit / (8 * sizeof(*map))] |= 1UL << (bit & (8 * sizeof(*map) - 1));
+}
+
+/* clears bit <bit> from map <map>, which must be long-aligned */
+static inline void ha_bit_clr(unsigned long bit, long *map)
+{
+ map[bit / (8 * sizeof(*map))] &= ~(1UL << (bit & (8 * sizeof(*map) - 1)));
+}
+
+/* flips bit <bit> from map <map>, which must be long-aligned */
+static inline void ha_bit_flip(unsigned long bit, long *map)
+{
+ map[bit / (8 * sizeof(*map))] ^= 1UL << (bit & (8 * sizeof(*map) - 1));
+}
+
+/* returns non-zero if bit <bit> from map <map> is set, otherwise 0 */
+static inline int ha_bit_test(unsigned long bit, const long *map)
+{
+ return !!(map[bit / (8 * sizeof(*map))] & 1UL << (bit & (8 * sizeof(*map) - 1)));
+}
+
+/* hash a 32-bit integer to another 32-bit integer. This code may be large when
+ * inlined, use full_hash() instead.
+ */
+static inline unsigned int __full_hash(unsigned int a)
+{
+ /* This function is one of Bob Jenkins' full avalanche hashing
+ * functions, which when provides quite a good distribution for little
+ * input variations. The result is quite suited to fit over a 32-bit
+ * space with enough variations so that a randomly picked number falls
+ * equally before any server position.
+ * Check http://burtleburtle.net/bob/hash/integer.html for more info.
+ */
+ a = (a+0x7ed55d16) + (a<<12);
+ a = (a^0xc761c23c) ^ (a>>19);
+ a = (a+0x165667b1) + (a<<5);
+ a = (a+0xd3a2646c) ^ (a<<9);
+ a = (a+0xfd7046c5) + (a<<3);
+ a = (a^0xb55a4f09) ^ (a>>16);
+
+ /* ensure values are better spread all around the tree by multiplying
+ * by a large prime close to 3/4 of the tree.
+ */
+ return a * 3221225473U;
+}
+
+/*
+ * Return integer equivalent of character <c> for a hex digit (0-9, a-f, A-F),
+ * otherwise -1. This compact form helps gcc produce efficient code.
+ */
+static inline int hex2i(int c)
+{
+ if ((unsigned char)(c -= '0') > 9) {
+ if ((unsigned char)(c -= 'A' - '0') > 5 &&
+ (unsigned char)(c -= 'a' - 'A') > 5)
+ c = -11;
+ c += 10;
+ }
+ return c;
+}
+
+/* This one is 6 times faster than strtoul() on athlon, but does
+ * no check at all.
+ */
+static inline unsigned int __str2ui(const char *s)
+{
+ unsigned int i = 0;
+ while (*s) {
+ i = i * 10 - '0';
+ i += (unsigned char)*s++;
+ }
+ return i;
+}
+
+/* This one is 5 times faster than strtoul() on athlon with checks.
+ * It returns the value of the number composed of all valid digits read.
+ */
+static inline unsigned int __str2uic(const char *s)
+{
+ unsigned int i = 0;
+ unsigned int j;
+
+ while (1) {
+ j = (*s++) - '0';
+ if (j > 9)
+ break;
+ i *= 10;
+ i += j;
+ }
+ return i;
+}
+
+/* This one is 28 times faster than strtoul() on athlon, but does
+ * no check at all!
+ */
+static inline unsigned int __strl2ui(const char *s, int len)
+{
+ unsigned int i = 0;
+
+ while (len-- > 0) {
+ i = i * 10 - '0';
+ i += (unsigned char)*s++;
+ }
+ return i;
+}
+
+/* This one is 7 times faster than strtoul() on athlon with checks.
+ * It returns the value of the number composed of all valid digits read.
+ */
+static inline unsigned int __strl2uic(const char *s, int len)
+{
+ unsigned int i = 0;
+ unsigned int j, k;
+
+ while (len-- > 0) {
+ j = (*s++) - '0';
+ k = i * 10;
+ if (j > 9)
+ break;
+ i = k + j;
+ }
+ return i;
+}
+
+/* This function reads an unsigned integer from the string pointed to by <s>
+ * and returns it. The <s> pointer is adjusted to point to the first unread
+ * char. The function automatically stops at <end>.
+ */
+static inline unsigned int __read_uint(const char **s, const char *end)
+{
+ const char *ptr = *s;
+ unsigned int i = 0;
+ unsigned int j, k;
+
+ while (ptr < end) {
+ j = *ptr - '0';
+ k = i * 10;
+ if (j > 9)
+ break;
+ i = k + j;
+ ptr++;
+ }
+ *s = ptr;
+ return i;
+}
+
+/* returns the number of bytes needed to encode <v> as a varint. Be careful, use
+ * it only with constants as it generates a large code (typ. 180 bytes). Use the
+ * varint_bytes() version instead in case of doubt.
+ */
+static inline int __varint_bytes(uint64_t v)
+{
+ switch (v) {
+ case 0x0000000000000000ULL ... 0x00000000000000efULL: return 1;
+ case 0x00000000000000f0ULL ... 0x00000000000008efULL: return 2;
+ case 0x00000000000008f0ULL ... 0x00000000000408efULL: return 3;
+ case 0x00000000000408f0ULL ... 0x00000000020408efULL: return 4;
+ case 0x00000000020408f0ULL ... 0x00000001020408efULL: return 5;
+ case 0x00000001020408f0ULL ... 0x00000081020408efULL: return 6;
+ case 0x00000081020408f0ULL ... 0x00004081020408efULL: return 7;
+ case 0x00004081020408f0ULL ... 0x00204081020408efULL: return 8;
+ case 0x00204081020408f0ULL ... 0x10204081020408efULL: return 9;
+ default: return 10;
+ }
+}
+
+/* Encode the integer <i> into a varint (variable-length integer). The encoded
+ * value is copied in <*buf>. Here is the encoding format:
+ *
+ * 0 <= X < 240 : 1 byte (7.875 bits) [ XXXX XXXX ]
+ * 240 <= X < 2288 : 2 bytes (11 bits) [ 1111 XXXX ] [ 0XXX XXXX ]
+ * 2288 <= X < 264432 : 3 bytes (18 bits) [ 1111 XXXX ] [ 1XXX XXXX ] [ 0XXX XXXX ]
+ * 264432 <= X < 33818864 : 4 bytes (25 bits) [ 1111 XXXX ] [ 1XXX XXXX ]*2 [ 0XXX XXXX ]
+ * 33818864 <= X < 4328786160 : 5 bytes (32 bits) [ 1111 XXXX ] [ 1XXX XXXX ]*3 [ 0XXX XXXX ]
+ * ...
+ *
+ * On success, it returns the number of written bytes and <*buf> is moved after
+ * the encoded value. Otherwise, it returns -1. */
+static inline int encode_varint(uint64_t i, char **buf, char *end)
+{
+ unsigned char *p = (unsigned char *)*buf;
+ int r;
+
+ if (p >= (unsigned char *)end)
+ return -1;
+
+ if (i < 240) {
+ *p++ = i;
+ *buf = (char *)p;
+ return 1;
+ }
+
+ *p++ = (unsigned char)i | 240;
+ i = (i - 240) >> 4;
+ while (i >= 128) {
+ if (p >= (unsigned char *)end)
+ return -1;
+ *p++ = (unsigned char)i | 128;
+ i = (i - 128) >> 7;
+ }
+
+ if (p >= (unsigned char *)end)
+ return -1;
+ *p++ = (unsigned char)i;
+
+ r = ((char *)p - *buf);
+ *buf = (char *)p;
+ return r;
+}
+
+/* Decode a varint from <*buf> and save the decoded value in <*i>. See
+ * 'spoe_encode_varint' for details about varint.
+ * On success, it returns the number of read bytes and <*buf> is moved after the
+ * varint. Otherwise, it returns -1. */
+static inline int decode_varint(char **buf, char *end, uint64_t *i)
+{
+ unsigned char *p = (unsigned char *)*buf;
+ int r;
+
+ if (p >= (unsigned char *)end)
+ return -1;
+
+ *i = *p++;
+ if (*i < 240) {
+ *buf = (char *)p;
+ return 1;
+ }
+
+ r = 4;
+ do {
+ if (p >= (unsigned char *)end)
+ return -1;
+ *i += (uint64_t)*p << r;
+ r += 7;
+ } while (*p++ >= 128);
+
+ r = ((char *)p - *buf);
+ *buf = (char *)p;
+ return r;
+}
+
+#endif /* _HAPROXY_INTOPS_H */
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/include/haproxy/istbuf.h b/include/haproxy/istbuf.h
new file mode 100644
index 0000000..392ec46
--- /dev/null
+++ b/include/haproxy/istbuf.h
@@ -0,0 +1,162 @@
+/*
+ * include/haproxy/istbuf.h
+ * Functions used to manipulate indirect strings with wrapping buffers.
+ *
+ * Copyright (C) 2000-2020 Willy Tarreau - w@1wt.eu
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _HAPROXY_ISTBUF_H
+#define _HAPROXY_ISTBUF_H
+
+#include <sys/types.h>
+#include <import/ist.h>
+#include <haproxy/buf.h>
+
+
+/* b_isteq() : returns > 0 if the first <n> characters of buffer <b> starting
+ * at offset <o> relative to the buffer's head match <ist>. (empty strings do
+ * match). It is designed to be used with reasonably small strings (it matches
+ * a single byte per loop iteration). It is expected to be used with an offset
+ * to skip old data. For example :
+ * - "input" contents : b_isteq(b, old_cnt, new_cnt, ist);
+ * - "output" contents : b_isteq(b, 0, old_cnt, ist);
+ * Return value :
+ * >0 : the number of matching bytes
+ * =0 : not enough bytes (or matching of empty string)
+ * <0 : non-matching byte found
+ */
+static inline ssize_t b_isteq(const struct buffer *b, size_t o, size_t n, const struct ist ist)
+{
+ struct ist r = ist;
+ const char *p;
+ const char *end = b_wrap(b);
+
+ if (n < r.len)
+ return 0;
+
+ p = b_peek(b, o);
+ while (r.len--) {
+ if (*p++ != *r.ptr++)
+ return -1;
+ if (unlikely(p == end))
+ p = b_orig(b);
+ }
+ return ist.len;
+}
+
+/* Same as b_isteq but case-insensitive */
+static inline ssize_t b_isteqi(const struct buffer *b, size_t o, size_t n, const struct ist ist)
+{
+ struct ist r = ist;
+ const char *p;
+ const char *end = b_wrap(b);
+
+ if (n < r.len)
+ return 0;
+
+ p = b_peek(b, o);
+ while (r.len--) {
+ if (*p != *r.ptr &&
+ ist_lc[(unsigned char)*p] != ist_lc[(unsigned char)*r.ptr])
+ return -1;
+ p++;
+ r.ptr++;
+ if (unlikely(p == end))
+ p = b_orig(b);
+ }
+ return ist.len;
+}
+
+/* b_isteat() : "eats" string <ist> from the head of buffer <b>. Wrapping data
+ * is explicitly supported. It matches a single byte per iteration so strings
+ * should remain reasonably small. Returns :
+ * > 0 : number of bytes matched and eaten
+ * = 0 : not enough bytes (or matching an empty string)
+ * < 0 : non-matching byte found
+ */
+static inline ssize_t b_isteat(struct buffer *b, const struct ist ist)
+{
+ ssize_t ret = b_isteq(b, 0, b_data(b), ist);
+
+ if (ret > 0)
+ b_del(b, ret);
+ return ret;
+}
+
+/* b_istput() : injects string <ist> at the tail of output buffer <b> provided
+ * that it fits. Wrapping is supported. It's designed for small strings as it
+ * only writes a single byte per iteration. Returns the number of characters
+ * copied (ist.len), 0 if it temporarily does not fit, or -1 if it will never
+ * fit. It will only modify the buffer upon success. In all cases, the contents
+ * are copied prior to reporting an error, so that the destination at least
+ * contains a valid but truncated string.
+ */
+static inline ssize_t b_istput(struct buffer *b, const struct ist ist)
+{
+ const char *end = b_wrap(b);
+ struct ist r = ist;
+ char *p;
+
+ if (r.len > (size_t)b_room(b))
+ return r.len < b->size ? 0 : -1;
+
+ p = b_tail(b);
+ b->data += r.len;
+ while (r.len--) {
+ *p++ = *r.ptr++;
+ if (unlikely(p == end))
+ p = b_orig(b);
+ }
+ return ist.len;
+}
+
+/* b_putist() : tries to copy as much as possible of string <ist> into buffer
+ * <b> and returns the number of bytes copied (truncation is possible). It uses
+ * b_putblk() and is suitable for large blocks.
+ */
+static inline size_t b_putist(struct buffer *b, const struct ist ist)
+{
+ return b_putblk(b, ist.ptr, ist.len);
+}
+
+/* builds and return a <struct buffer> based on <ist>
+ */
+static inline struct buffer ist2buf(const struct ist ist)
+{
+ struct buffer buf;
+
+ buf.area = ist.ptr;
+ buf.size = ist.len;
+ buf.data = ist.len;
+ buf.head = 0;
+ return buf;
+}
+
+#endif /* _HAPROXY_ISTBUF_H */
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/include/haproxy/jwt-t.h b/include/haproxy/jwt-t.h
new file mode 100644
index 0000000..e94607e
--- /dev/null
+++ b/include/haproxy/jwt-t.h
@@ -0,0 +1,86 @@
+/*
+ * include/haproxy/jwt-t.h
+ * Macros, variables and structures for JWT management.
+ *
+ * Copyright (C) 2021 HAProxy Technologies, Remi Tricot-Le Breton <rlebreton@haproxy.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_JWT_T_H
+#define _HAPROXY_JWT_T_H
+
+#include <haproxy/openssl-compat.h>
+
+#ifdef USE_OPENSSL
+enum jwt_alg {
+ JWT_ALG_DEFAULT,
+ JWS_ALG_NONE,
+ JWS_ALG_HS256,
+ JWS_ALG_HS384,
+ JWS_ALG_HS512,
+ JWS_ALG_RS256,
+ JWS_ALG_RS384,
+ JWS_ALG_RS512,
+ JWS_ALG_ES256,
+ JWS_ALG_ES384,
+ JWS_ALG_ES512,
+ JWS_ALG_PS256,
+ JWS_ALG_PS384,
+ JWS_ALG_PS512,
+};
+
+struct jwt_item {
+ char *start;
+ size_t length;
+};
+
+struct jwt_ctx {
+ enum jwt_alg alg;
+ struct jwt_item jose;
+ struct jwt_item claims;
+ struct jwt_item signature;
+ char *key;
+ unsigned int key_length;
+};
+
+enum jwt_elt {
+ JWT_ELT_JOSE = 0,
+ JWT_ELT_CLAIMS,
+ JWT_ELT_SIG,
+ JWT_ELT_MAX
+};
+
+struct jwt_cert_tree_entry {
+ EVP_PKEY *pkey;
+ struct ebmb_node node;
+ char path[VAR_ARRAY];
+};
+
+enum jwt_vrfy_status {
+ JWT_VRFY_KO = 0,
+ JWT_VRFY_OK = 1,
+
+ JWT_VRFY_UNKNOWN_ALG = -1,
+ JWT_VRFY_UNMANAGED_ALG = -2,
+ JWT_VRFY_INVALID_TOKEN = -3,
+ JWT_VRFY_OUT_OF_MEMORY = -4,
+ JWT_VRFY_UNKNOWN_CERT = -5
+};
+
+#endif /* USE_OPENSSL */
+
+
+#endif /* _HAPROXY_JWT_T_H */
diff --git a/include/haproxy/jwt.h b/include/haproxy/jwt.h
new file mode 100644
index 0000000..a343ffa
--- /dev/null
+++ b/include/haproxy/jwt.h
@@ -0,0 +1,37 @@
+/*
+ * include/haproxy/jwt.h
+ * Functions for JSON Web Token (JWT) management.
+ *
+ * Copyright (C) 2021 HAProxy Technologies, Remi Tricot-Le Breton <rlebreton@haproxy.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_JWT_H
+#define _HAPROXY_JWT_H
+
+#include <haproxy/jwt-t.h>
+#include <haproxy/buf-t.h>
+
+#ifdef USE_OPENSSL
+enum jwt_alg jwt_parse_alg(const char *alg_str, unsigned int alg_len);
+int jwt_tokenize(const struct buffer *jwt, struct jwt_item *items, unsigned int *item_num);
+int jwt_tree_load_cert(char *path, int pathlen, char **err);
+
+enum jwt_vrfy_status jwt_verify(const struct buffer *token, const struct buffer *alg,
+ const struct buffer *key);
+#endif /* USE_OPENSSL */
+
+#endif /* _HAPROXY_JWT_H */
diff --git a/include/haproxy/lb_chash-t.h b/include/haproxy/lb_chash-t.h
new file mode 100644
index 0000000..c437981
--- /dev/null
+++ b/include/haproxy/lb_chash-t.h
@@ -0,0 +1,40 @@
+/*
+ * include/haproxy/lb_chash-t.h
+ * Types for Consistent Hash LB algorithm.
+ *
+ * Copyright (C) 2000-2009 Willy Tarreau - w@1wt.eu
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_LB_CHASH_T_H
+#define _HAPROXY_LB_CHASH_T_H
+
+#include <import/ebtree-t.h>
+
+struct lb_chash {
+ struct eb_root act; /* weighted chash entries of active servers */
+ struct eb_root bck; /* weighted chash entries of backup servers */
+ struct eb32_node *last; /* last node found in case of round robin (or NULL) */
+};
+
+#endif /* _HAPROXY_LB_CHASH_T_H */
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/include/haproxy/lb_chash.h b/include/haproxy/lb_chash.h
new file mode 100644
index 0000000..7950457
--- /dev/null
+++ b/include/haproxy/lb_chash.h
@@ -0,0 +1,41 @@
+/*
+ * include/haproxy/lb_chash.h
+ * Function declarations for Consistent Hash LB algorithm.
+ *
+ * Copyright (C) 2000-2009 Willy Tarreau - w@1wt.eu
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_LB_CHASH_H
+#define _HAPROXY_LB_CHASH_H
+
+#include <haproxy/api.h>
+#include <haproxy/lb_chash-t.h>
+
+struct proxy;
+struct server;
+int chash_init_server_tree(struct proxy *p);
+struct server *chash_get_next_server(struct proxy *p, struct server *srvtoavoid);
+struct server *chash_get_server_hash(struct proxy *p, unsigned int hash, const struct server *avoid);
+
+#endif /* _HAPROXY_LB_CHASH_H */
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/include/haproxy/lb_fas-t.h b/include/haproxy/lb_fas-t.h
new file mode 100644
index 0000000..cfb274c
--- /dev/null
+++ b/include/haproxy/lb_fas-t.h
@@ -0,0 +1,39 @@
+/*
+ * include/types/lb_fas-t.h
+ * Types for First Available Server load balancing algorithm.
+ *
+ * Copyright (C) 2000-2012 Willy Tarreau - w@1wt.eu
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_LB_FAS_T_H
+#define _HAPROXY_LB_FAS_T_H
+
+#include <import/ebtree-t.h>
+
+struct lb_fas {
+ struct eb_root act; /* weighted least conns on the active servers */
+ struct eb_root bck; /* weighted least conns on the backup servers */
+};
+
+#endif /* _HAPROXY_LB_FAS_T_H */
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/include/haproxy/lb_fas.h b/include/haproxy/lb_fas.h
new file mode 100644
index 0000000..b12831c
--- /dev/null
+++ b/include/haproxy/lb_fas.h
@@ -0,0 +1,40 @@
+/*
+ * include/haproxy/lb_fas.h
+ * First Available Server load balancing algorithm.
+ *
+ * Copyright (C) 2000-2009 Willy Tarreau - w@1wt.eu
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_LB_FAS_H
+#define _HAPROXY_LB_FAS_H
+
+#include <haproxy/api.h>
+#include <haproxy/lb_fas-t.h>
+#include <haproxy/proxy-t.h>
+#include <haproxy/server-t.h>
+
+struct server *fas_get_next_server(struct proxy *p, struct server *srvtoavoid);
+void fas_init_server_tree(struct proxy *p);
+
+#endif /* _HAPROXY_LB_FAS_H */
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/include/haproxy/lb_fwlc-t.h b/include/haproxy/lb_fwlc-t.h
new file mode 100644
index 0000000..258a6ab
--- /dev/null
+++ b/include/haproxy/lb_fwlc-t.h
@@ -0,0 +1,39 @@
+/*
+ * include/haproxy/lb_fwlc-t.h
+ * Types for Fast Weighted Least Connection load balancing algorithm.
+ *
+ * Copyright (C) 2000-2009 Willy Tarreau - w@1wt.eu
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_LB_FWLC_T_H
+#define _HAPROXY_LB_FWLC_T_H
+
+#include <import/ebtree-t.h>
+
+struct lb_fwlc {
+ struct eb_root act; /* weighted least conns on the active servers */
+ struct eb_root bck; /* weighted least conns on the backup servers */
+};
+
+#endif /* _HAPROXY_LB_FWLC_T_H */
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/include/haproxy/lb_fwlc.h b/include/haproxy/lb_fwlc.h
new file mode 100644
index 0000000..a598af9
--- /dev/null
+++ b/include/haproxy/lb_fwlc.h
@@ -0,0 +1,40 @@
+/*
+ * include/haproxy/lb_fwlc.h
+ * Fast Weighted Least Connection load balancing algorithm.
+ *
+ * Copyright (C) 2000-2009 Willy Tarreau - w@1wt.eu
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_LB_FWLC_H
+#define _HAPROXY_LB_FWLC_H
+
+#include <haproxy/api.h>
+#include <haproxy/lb_fwlc-t.h>
+#include <haproxy/proxy-t.h>
+#include <haproxy/server-t.h>
+
+struct server *fwlc_get_next_server(struct proxy *p, struct server *srvtoavoid);
+void fwlc_init_server_tree(struct proxy *p);
+
+#endif /* _HAPROXY_LB_FWLC_H */
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/include/haproxy/lb_fwrr-t.h b/include/haproxy/lb_fwrr-t.h
new file mode 100644
index 0000000..f7b746e
--- /dev/null
+++ b/include/haproxy/lb_fwrr-t.h
@@ -0,0 +1,50 @@
+/*
+ * include/haproxy/lb_fwrr-t.h
+ * Types for Fast Weighted Round Robin load balancing algorithm.
+ *
+ * Copyright (C) 2000-2009 Willy Tarreau - w@1wt.eu
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_LB_FWRR_T_H
+#define _HAPROXY_LB_FWRR_T_H
+
+#include <import/ebtree-t.h>
+
+/* This structure is used to apply fast weighted round robin on a server group */
+struct fwrr_group {
+ struct eb_root curr; /* tree for servers in "current" time range */
+ struct eb_root t0, t1; /* "init" and "next" servers */
+ struct eb_root *init; /* servers waiting to be placed */
+ struct eb_root *next; /* servers to be placed at next run */
+ int curr_pos; /* current position in the tree */
+ int curr_weight; /* total weight of the current time range */
+ int next_weight; /* total weight of the next time range */
+};
+
+struct lb_fwrr {
+ struct fwrr_group act; /* weighted round robin on the active servers */
+ struct fwrr_group bck; /* weighted round robin on the backup servers */
+};
+
+#endif /* _HAPROXY_LB_FWRR_T_H */
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/include/haproxy/lb_fwrr.h b/include/haproxy/lb_fwrr.h
new file mode 100644
index 0000000..27b0a94
--- /dev/null
+++ b/include/haproxy/lb_fwrr.h
@@ -0,0 +1,40 @@
+/*
+ * include/haproxy/lb_fwrr.h
+ * Fast Weighted Round Robin load balancing algorithm.
+ *
+ * Copyright (C) 2000-2009 Willy Tarreau - w@1wt.eu
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_LB_FWRR_H
+#define _HAPROXY_LB_FWRR_H
+
+#include <haproxy/api.h>
+#include <haproxy/lb_fwrr-t.h>
+#include <haproxy/proxy-t.h>
+#include <haproxy/server-t.h>
+
+void fwrr_init_server_groups(struct proxy *p);
+struct server *fwrr_get_next_server(struct proxy *p, struct server *srvtoavoid);
+
+#endif /* _HAPROXY_LB_FWRR_H */
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/include/haproxy/lb_map-t.h b/include/haproxy/lb_map-t.h
new file mode 100644
index 0000000..6d1dd1a
--- /dev/null
+++ b/include/haproxy/lb_map-t.h
@@ -0,0 +1,40 @@
+/*
+ * include/haproxy/lb_map-t.h
+ * Types for map-based load-balancing (RR and HASH)
+ *
+ * Copyright (C) 2000-2009 Willy Tarreau - w@1wt.eu
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_LB_MAP_T_H
+#define _HAPROXY_LB_MAP_T_H
+
+#include <haproxy/api-t.h>
+#include <haproxy/server-t.h>
+
+struct lb_map {
+ struct server **srv; /* the server map used to apply weights */
+ int rr_idx; /* next server to be elected in round robin mode */
+};
+
+#endif /* _HAPROXY_LB_MAP_T_H */
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/include/haproxy/lb_map.h b/include/haproxy/lb_map.h
new file mode 100644
index 0000000..ca483b2
--- /dev/null
+++ b/include/haproxy/lb_map.h
@@ -0,0 +1,41 @@
+/*
+ * include/haproxy/lb_map.h
+ * Map-based load-balancing (RR and HASH)
+ *
+ * Copyright (C) 2000-2009 Willy Tarreau - w@1wt.eu
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_LB_MAP_H
+#define _HAPROXY_LB_MAP_H
+
+#include <haproxy/api.h>
+#include <haproxy/proxy-t.h>
+#include <haproxy/server-t.h>
+
+void recalc_server_map(struct proxy *px);
+void init_server_map(struct proxy *p);
+struct server *map_get_server_rr(struct proxy *px, struct server *srvtoavoid);
+struct server *map_get_server_hash(struct proxy *px, unsigned int hash);
+
+#endif /* _HAPROXY_LB_MAP_H */
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/include/haproxy/linuxcap.h b/include/haproxy/linuxcap.h
new file mode 100644
index 0000000..9c337a4
--- /dev/null
+++ b/include/haproxy/linuxcap.h
@@ -0,0 +1,7 @@
+#ifndef _HAPROXY_LINUXCAP_H
+#define _HAPROXY_LINUXCAP_H
+
+int prepare_caps_for_setuid(int from_uid, int to_uid);
+int finalize_caps_after_setuid(int from_uid, int to_uid);
+
+#endif /* _HAPROXY_LINUXCAP_H */
diff --git a/include/haproxy/list-t.h b/include/haproxy/list-t.h
new file mode 100644
index 0000000..dd8493e
--- /dev/null
+++ b/include/haproxy/list-t.h
@@ -0,0 +1,73 @@
+/*
+ * include/haproxy/list-t.h
+ * Circular list manipulation types definitions
+ *
+ * Copyright (C) 2002-2020 Willy Tarreau - w@1wt.eu
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_LIST_T_H
+#define _HAPROXY_LIST_T_H
+
+
+/* these are circular or bidirectionnal lists only. Each list pointer points to
+ * another list pointer in a structure, and not the structure itself. The
+ * pointer to the next element MUST be the first one so that the list is easily
+ * cast as a single linked list or pointer.
+ */
+struct list {
+ struct list *n; /* next */
+ struct list *p; /* prev */
+};
+
+/* This is similar to struct list, but we want to be sure the compiler will
+ * yell at you if you use macroes for one when you're using the other. You have
+ * to expicitely cast if that's really what you want to do.
+ */
+struct mt_list {
+ struct mt_list *next;
+ struct mt_list *prev;
+};
+
+
+/* a back-ref is a pointer to a target list entry. It is used to detect when an
+ * element being deleted is currently being tracked by another user. The best
+ * example is a user dumping the session table. The table does not fit in the
+ * output buffer so we have to set a mark on a session and go on later. But if
+ * that marked session gets deleted, we don't want the user's pointer to go in
+ * the wild. So we can simply link this user's request to the list of this
+ * session's users, and put a pointer to the list element in ref, that will be
+ * used as the mark for next iteration.
+ */
+struct bref {
+ struct list users;
+ struct list *ref; /* pointer to the target's list entry */
+};
+
+/* a word list is a generic list with a pointer to a string in each element. */
+struct wordlist {
+ struct list list;
+ char *s;
+};
+
+/* this is the same as above with an additional pointer to a condition. */
+struct cond_wordlist {
+ struct list list;
+ void *cond;
+ char *s;
+};
+
+#endif /* _HAPROXY_LIST_T_H */
diff --git a/include/haproxy/list.h b/include/haproxy/list.h
new file mode 100644
index 0000000..368e6d7
--- /dev/null
+++ b/include/haproxy/list.h
@@ -0,0 +1,907 @@
+/*
+ * include/haproxy/list.h
+ * Circular list manipulation macros and functions.
+ *
+ * Copyright (C) 2002-2020 Willy Tarreau - w@1wt.eu
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_LIST_H
+#define _HAPROXY_LIST_H
+
+#include <haproxy/api.h>
+#include <haproxy/thread.h>
+
+/* First undefine some macros which happen to also be defined on OpenBSD,
+ * in sys/queue.h, used by sys/event.h
+ */
+#undef LIST_HEAD
+#undef LIST_INIT
+#undef LIST_NEXT
+
+/* ILH = Initialized List Head : used to prevent gcc from moving an empty
+ * list to BSS. Some older version tend to trim all the array and cause
+ * corruption.
+ */
+#define ILH { .n = (struct list *)1, .p = (struct list *)2 }
+
+#define LIST_HEAD(a) ((void *)(&(a)))
+
+#define LIST_INIT(l) ((l)->n = (l)->p = (l))
+
+#define LIST_HEAD_INIT(l) { &l, &l }
+
+/* adds an element at the beginning of a list ; returns the element */
+#define LIST_INSERT(lh, el) ({ (el)->n = (lh)->n; (el)->n->p = (lh)->n = (el); (el)->p = (lh); (el); })
+
+/* adds an element at the end of a list ; returns the element */
+#define LIST_APPEND(lh, el) ({ (el)->p = (lh)->p; (el)->p->n = (lh)->p = (el); (el)->n = (lh); (el); })
+
+/* adds the contents of a list <old> at the beginning of another list <new>. The old list head remains untouched. */
+#define LIST_SPLICE(new, old) do { \
+ if (!LIST_ISEMPTY(old)) { \
+ (old)->p->n = (new)->n; (old)->n->p = (new); \
+ (new)->n->p = (old)->p; (new)->n = (old)->n; \
+ } \
+ } while (0)
+
+/* adds the contents of a list whose first element is <old> and last one is
+ * <old->prev> at the end of another list <new>. The old list DOES NOT have
+ * any head here.
+ */
+#define LIST_SPLICE_END_DETACHED(new, old) do { \
+ typeof(new) __t; \
+ (new)->p->n = (old); \
+ (old)->p->n = (new); \
+ __t = (old)->p; \
+ (old)->p = (new)->p; \
+ (new)->p = __t; \
+ } while (0)
+
+/* removes an element from a list and returns it */
+#if defined(DEBUG_LIST)
+/* purposely corrupt the detached element to detect use-after-delete */
+#define LIST_DELETE(el) ({ typeof(el) __ret = (el); (el)->n->p = (el)->p; (el)->p->n = (el)->n; *(__ret) = (struct list)ILH; (__ret);})
+#else
+#define LIST_DELETE(el) ({ typeof(el) __ret = (el); (el)->n->p = (el)->p; (el)->p->n = (el)->n; (__ret); })
+#endif
+
+/* removes an element from a list, initializes it and returns it.
+ * This is faster than LIST_DELETE+LIST_INIT as we avoid reloading the pointers.
+ */
+#define LIST_DEL_INIT(el) ({ \
+ typeof(el) __ret = (el); \
+ typeof(__ret->n) __n = __ret->n; \
+ typeof(__ret->p) __p = __ret->p; \
+ __n->p = __p; __p->n = __n; \
+ __ret->n = __ret->p = __ret; \
+ __ret; \
+})
+
+/* returns a pointer of type <pt> to a structure containing a list head called
+ * <el> at address <lh>. Note that <lh> can be the result of a function or macro
+ * since it's used only once.
+ * Example: LIST_ELEM(cur_node->args.next, struct node *, args)
+ */
+#define LIST_ELEM(lh, pt, el) ((pt)(((const char *)(lh)) - ((size_t)&((pt)NULL)->el)))
+
+/* checks if the list head <lh> is empty or not */
+#define LIST_ISEMPTY(lh) ((lh)->n == (lh))
+
+/* checks if the list element <el> was added to a list or not. This only
+ * works when detached elements are reinitialized (using LIST_DEL_INIT)
+ */
+#define LIST_INLIST(el) ((el)->n != (el))
+
+/* atomically checks if the list element's next pointer points to anything
+ * different from itself, implying the element should be part of a list. This
+ * usually is similar to LIST_INLIST() except that while that one might be
+ * instrumented using debugging code to perform further consistency checks,
+ * the macro below guarantees to always perform a single atomic test and is
+ * safe to use with barriers.
+ */
+#define LIST_INLIST_ATOMIC(el) ({ \
+ typeof(el) __ptr = (el); \
+ HA_ATOMIC_LOAD(&(__ptr)->n) != __ptr; \
+})
+
+/* returns a pointer of type <pt> to a structure following the element
+ * which contains list head <lh>, which is known as element <el> in
+ * struct pt.
+ * Example: LIST_NEXT(args, struct node *, list)
+ */
+#define LIST_NEXT(lh, pt, el) (LIST_ELEM((lh)->n, pt, el))
+
+
+/* returns a pointer of type <pt> to a structure preceding the element
+ * which contains list head <lh>, which is known as element <el> in
+ * struct pt.
+ */
+#undef LIST_PREV
+#define LIST_PREV(lh, pt, el) (LIST_ELEM((lh)->p, pt, el))
+
+/*
+ * Simpler FOREACH_ITEM macro inspired from Linux sources.
+ * Iterates <item> through a list of items of type "typeof(*item)" which are
+ * linked via a "struct list" member named <member>. A pointer to the head of
+ * the list is passed in <list_head>. No temporary variable is needed. Note
+ * that <item> must not be modified during the loop.
+ * Example: list_for_each_entry(cur_acl, known_acl, list) { ... };
+ */
+#define list_for_each_entry(item, list_head, member) \
+ for (item = LIST_ELEM((list_head)->n, typeof(item), member); \
+ &item->member != (list_head); \
+ item = LIST_ELEM(item->member.n, typeof(item), member))
+
+/*
+ * Same as list_for_each_entry but starting from current point
+ * Iterates <item> through the list starting from <item>
+ * It's basically the same macro but without initializing item to the head of
+ * the list.
+ */
+#define list_for_each_entry_from(item, list_head, member) \
+ for ( ; &item->member != (list_head); \
+ item = LIST_ELEM(item->member.n, typeof(item), member))
+
+/*
+ * Simpler FOREACH_ITEM_SAFE macro inspired from Linux sources.
+ * Iterates <item> through a list of items of type "typeof(*item)" which are
+ * linked via a "struct list" member named <member>. A pointer to the head of
+ * the list is passed in <list_head>. A temporary variable <back> of same type
+ * as <item> is needed so that <item> may safely be deleted if needed.
+ * Example: list_for_each_entry_safe(cur_acl, tmp, known_acl, list) { ... };
+ */
+#define list_for_each_entry_safe(item, back, list_head, member) \
+ for (item = LIST_ELEM((list_head)->n, typeof(item), member), \
+ back = LIST_ELEM(item->member.n, typeof(item), member); \
+ &item->member != (list_head); \
+ item = back, back = LIST_ELEM(back->member.n, typeof(back), member))
+
+
+/*
+ * Same as list_for_each_entry_safe but starting from current point
+ * Iterates <item> through the list starting from <item>
+ * It's basically the same macro but without initializing item to the head of
+ * the list.
+ */
+#define list_for_each_entry_safe_from(item, back, list_head, member) \
+ for (back = LIST_ELEM(item->member.n, typeof(item), member); \
+ &item->member != (list_head); \
+ item = back, back = LIST_ELEM(back->member.n, typeof(back), member))
+
+/*
+ * Iterate backwards <item> through a list of items of type "typeof(*item)"
+ * which are linked via a "struct list" member named <member>. A pointer to
+ * the head of the list is passed in <list_head>. No temporary variable is
+ * needed. Note that <item> must not be modified during the loop.
+ * Example: list_for_each_entry_rev(cur_acl, known_acl, list) { ... };
+ */
+#define list_for_each_entry_rev(item, list_head, member) \
+ for (item = LIST_ELEM((list_head)->p, typeof(item), member); \
+ &item->member != (list_head); \
+ item = LIST_ELEM(item->member.p, typeof(item), member))
+
+/*
+ * Same as list_for_each_entry_rev but starting from current point
+ * Iterate backwards <item> through the list starting from <item>
+ * It's basically the same macro but without initializing item to the head of
+ * the list.
+ */
+#define list_for_each_entry_from_rev(item, list_head, member) \
+ for ( ; &item->member != (list_head); \
+ item = LIST_ELEM(item->member.p, typeof(item), member))
+
+/*
+ * Iterate backwards <item> through a list of items of type "typeof(*item)"
+ * which are linked via a "struct list" member named <member>. A pointer to
+ * the head of the list is passed in <list_head>. A temporary variable <back>
+ * of same type as <item> is needed so that <item> may safely be deleted
+ * if needed.
+ * Example: list_for_each_entry_safe_rev(cur_acl, tmp, known_acl, list) { ... };
+ */
+#define list_for_each_entry_safe_rev(item, back, list_head, member) \
+ for (item = LIST_ELEM((list_head)->p, typeof(item), member), \
+ back = LIST_ELEM(item->member.p, typeof(item), member); \
+ &item->member != (list_head); \
+ item = back, back = LIST_ELEM(back->member.p, typeof(back), member))
+
+/*
+ * Same as list_for_each_entry_safe_rev but starting from current point
+ * Iterate backwards <item> through the list starting from <item>
+ * It's basically the same macro but without initializing item to the head of
+ * the list.
+ */
+#define list_for_each_entry_safe_from_rev(item, back, list_head, member) \
+ for (back = LIST_ELEM(item->member.p, typeof(item), member); \
+ &item->member != (list_head); \
+ item = back, back = LIST_ELEM(back->member.p, typeof(back), member))
+
+
+/*
+ * Locked version of list manipulation macros.
+ * It is OK to use those concurrently from multiple threads, as long as the
+ * list is only used with the locked variants.
+ */
+#define MT_LIST_BUSY ((struct mt_list *)1)
+
+/*
+ * Add an item at the beginning of a list.
+ * Returns 1 if we added the item, 0 otherwise (because it was already in a
+ * list).
+ */
+#define MT_LIST_TRY_INSERT(_lh, _el) \
+ ({ \
+ int _ret = 0; \
+ struct mt_list *lh = (_lh), *el = (_el); \
+ for (;;__ha_cpu_relax()) { \
+ struct mt_list *n, *n2; \
+ struct mt_list *p, *p2; \
+ n = _HA_ATOMIC_XCHG(&(lh)->next, MT_LIST_BUSY); \
+ if (n == MT_LIST_BUSY) \
+ continue; \
+ p = _HA_ATOMIC_XCHG(&n->prev, MT_LIST_BUSY); \
+ if (p == MT_LIST_BUSY) { \
+ (lh)->next = n; \
+ __ha_barrier_store(); \
+ continue; \
+ } \
+ n2 = _HA_ATOMIC_XCHG(&el->next, MT_LIST_BUSY); \
+ if (n2 != el) { /* element already linked */ \
+ if (n2 != MT_LIST_BUSY) \
+ el->next = n2; \
+ n->prev = p; \
+ __ha_barrier_store(); \
+ lh->next = n; \
+ __ha_barrier_store(); \
+ if (n2 == MT_LIST_BUSY) \
+ continue; \
+ break; \
+ } \
+ p2 = _HA_ATOMIC_XCHG(&el->prev, MT_LIST_BUSY); \
+ if (p2 != el) { \
+ if (p2 != MT_LIST_BUSY) \
+ el->prev = p2; \
+ n->prev = p; \
+ el->next = el; \
+ __ha_barrier_store(); \
+ lh->next = n; \
+ __ha_barrier_store(); \
+ if (p2 == MT_LIST_BUSY) \
+ continue; \
+ break; \
+ } \
+ (el)->next = n; \
+ (el)->prev = p; \
+ __ha_barrier_store(); \
+ n->prev = (el); \
+ __ha_barrier_store(); \
+ p->next = (el); \
+ __ha_barrier_store(); \
+ _ret = 1; \
+ break; \
+ } \
+ (_ret); \
+ })
+
+/*
+ * Add an item at the end of a list.
+ * Returns 1 if we added the item, 0 otherwise (because it was already in a
+ * list).
+ */
+#define MT_LIST_TRY_APPEND(_lh, _el) \
+ ({ \
+ int _ret = 0; \
+ struct mt_list *lh = (_lh), *el = (_el); \
+ for (;;__ha_cpu_relax()) { \
+ struct mt_list *n, *n2; \
+ struct mt_list *p, *p2; \
+ p = _HA_ATOMIC_XCHG(&(lh)->prev, MT_LIST_BUSY); \
+ if (p == MT_LIST_BUSY) \
+ continue; \
+ n = _HA_ATOMIC_XCHG(&p->next, MT_LIST_BUSY); \
+ if (n == MT_LIST_BUSY) { \
+ (lh)->prev = p; \
+ __ha_barrier_store(); \
+ continue; \
+ } \
+ p2 = _HA_ATOMIC_XCHG(&el->prev, MT_LIST_BUSY); \
+ if (p2 != el) { \
+ if (p2 != MT_LIST_BUSY) \
+ el->prev = p2; \
+ p->next = n; \
+ __ha_barrier_store(); \
+ lh->prev = p; \
+ __ha_barrier_store(); \
+ if (p2 == MT_LIST_BUSY) \
+ continue; \
+ break; \
+ } \
+ n2 = _HA_ATOMIC_XCHG(&el->next, MT_LIST_BUSY); \
+ if (n2 != el) { /* element already linked */ \
+ if (n2 != MT_LIST_BUSY) \
+ el->next = n2; \
+ p->next = n; \
+ el->prev = el; \
+ __ha_barrier_store(); \
+ lh->prev = p; \
+ __ha_barrier_store(); \
+ if (n2 == MT_LIST_BUSY) \
+ continue; \
+ break; \
+ } \
+ (el)->next = n; \
+ (el)->prev = p; \
+ __ha_barrier_store(); \
+ p->next = (el); \
+ __ha_barrier_store(); \
+ n->prev = (el); \
+ __ha_barrier_store(); \
+ _ret = 1; \
+ break; \
+ } \
+ (_ret); \
+ })
+
+/*
+ * Add an item at the beginning of a list.
+ * It is assumed the element can't already be in a list, so it isn't checked.
+ */
+#define MT_LIST_INSERT(_lh, _el) \
+ ({ \
+ int _ret = 0; \
+ struct mt_list *lh = (_lh), *el = (_el); \
+ for (;;__ha_cpu_relax()) { \
+ struct mt_list *n; \
+ struct mt_list *p; \
+ n = _HA_ATOMIC_XCHG(&(lh)->next, MT_LIST_BUSY); \
+ if (n == MT_LIST_BUSY) \
+ continue; \
+ p = _HA_ATOMIC_XCHG(&n->prev, MT_LIST_BUSY); \
+ if (p == MT_LIST_BUSY) { \
+ (lh)->next = n; \
+ __ha_barrier_store(); \
+ continue; \
+ } \
+ (el)->next = n; \
+ (el)->prev = p; \
+ __ha_barrier_store(); \
+ n->prev = (el); \
+ __ha_barrier_store(); \
+ p->next = (el); \
+ __ha_barrier_store(); \
+ _ret = 1; \
+ break; \
+ } \
+ (_ret); \
+ })
+
+/*
+ * Add an item at the end of a list.
+ * It is assumed the element can't already be in a list, so it isn't checked
+ */
+#define MT_LIST_APPEND(_lh, _el) \
+ ({ \
+ int _ret = 0; \
+ struct mt_list *lh = (_lh), *el = (_el); \
+ for (;;__ha_cpu_relax()) { \
+ struct mt_list *n; \
+ struct mt_list *p; \
+ p = _HA_ATOMIC_XCHG(&(lh)->prev, MT_LIST_BUSY); \
+ if (p == MT_LIST_BUSY) \
+ continue; \
+ n = _HA_ATOMIC_XCHG(&p->next, MT_LIST_BUSY); \
+ if (n == MT_LIST_BUSY) { \
+ (lh)->prev = p; \
+ __ha_barrier_store(); \
+ continue; \
+ } \
+ (el)->next = n; \
+ (el)->prev = p; \
+ __ha_barrier_store(); \
+ p->next = (el); \
+ __ha_barrier_store(); \
+ n->prev = (el); \
+ __ha_barrier_store(); \
+ _ret = 1; \
+ break; \
+ } \
+ (_ret); \
+ })
+
+/*
+ * Add an item at the end of a list.
+ * It is assumed the element can't already be in a list, so it isn't checked
+ * Item will be added in busy/locked state, so that it is already
+ * referenced in the list but no other thread can use it until we're ready.
+ *
+ * This returns a struct mt_list, that will be needed at unlock time.
+ * (using MT_LIST_UNLOCK_ELT)
+ */
+#define MT_LIST_APPEND_LOCKED(_lh, _el) \
+ ({ \
+ struct mt_list np; \
+ struct mt_list *lh = (_lh), *el = (_el); \
+ (el)->next = MT_LIST_BUSY; \
+ (el)->prev = MT_LIST_BUSY; \
+ for (;;__ha_cpu_relax()) { \
+ struct mt_list *n; \
+ struct mt_list *p; \
+ p = _HA_ATOMIC_XCHG(&(lh)->prev, MT_LIST_BUSY); \
+ if (p == MT_LIST_BUSY) \
+ continue; \
+ n = _HA_ATOMIC_XCHG(&p->next, MT_LIST_BUSY); \
+ if (n == MT_LIST_BUSY) { \
+ (lh)->prev = p; \
+ __ha_barrier_store(); \
+ continue; \
+ } \
+ np.prev = p; \
+ np.next = n; \
+ break; \
+ } \
+ (np); \
+ })
+
+/*
+ * Detach a list from its head. A pointer to the first element is returned
+ * and the list is closed. If the list was empty, NULL is returned. This may
+ * exclusively be used with lists modified by MT_LIST_TRY_INSERT/MT_LIST_TRY_APPEND. This
+ * is incompatible with MT_LIST_DELETE run concurrently.
+ * If there's at least one element, the next of the last element will always
+ * be NULL.
+ */
+#define MT_LIST_BEHEAD(_lh) ({ \
+ struct mt_list *lh = (_lh); \
+ struct mt_list *_n; \
+ struct mt_list *_p; \
+ for (;;__ha_cpu_relax()) { \
+ _p = _HA_ATOMIC_XCHG(&(lh)->prev, MT_LIST_BUSY); \
+ if (_p == MT_LIST_BUSY) \
+ continue; \
+ if (_p == (lh)) { \
+ (lh)->prev = _p; \
+ __ha_barrier_store(); \
+ _n = NULL; \
+ break; \
+ } \
+ _n = _HA_ATOMIC_XCHG(&(lh)->next, MT_LIST_BUSY); \
+ if (_n == MT_LIST_BUSY) { \
+ (lh)->prev = _p; \
+ __ha_barrier_store(); \
+ continue; \
+ } \
+ if (_n == (lh)) { \
+ (lh)->next = _n; \
+ (lh)->prev = _p; \
+ __ha_barrier_store(); \
+ _n = NULL; \
+ break; \
+ } \
+ (lh)->next = (lh); \
+ (lh)->prev = (lh); \
+ __ha_barrier_store(); \
+ _n->prev = _p; \
+ __ha_barrier_store(); \
+ _p->next = NULL; \
+ __ha_barrier_store(); \
+ break; \
+ } \
+ (_n); \
+})
+
+
+/* Remove an item from a list.
+ * Returns 1 if we removed the item, 0 otherwise (because it was in no list).
+ */
+#define MT_LIST_DELETE(_el) \
+ ({ \
+ int _ret = 0; \
+ struct mt_list *el = (_el); \
+ for (;;__ha_cpu_relax()) { \
+ struct mt_list *n, *n2; \
+ struct mt_list *p, *p2 = NULL; \
+ n = _HA_ATOMIC_XCHG(&(el)->next, MT_LIST_BUSY); \
+ if (n == MT_LIST_BUSY) \
+ continue; \
+ p = _HA_ATOMIC_XCHG(&(el)->prev, MT_LIST_BUSY); \
+ if (p == MT_LIST_BUSY) { \
+ (el)->next = n; \
+ __ha_barrier_store(); \
+ continue; \
+ } \
+ if (p != (el)) { \
+ p2 = _HA_ATOMIC_XCHG(&p->next, MT_LIST_BUSY); \
+ if (p2 == MT_LIST_BUSY) { \
+ (el)->prev = p; \
+ (el)->next = n; \
+ __ha_barrier_store(); \
+ continue; \
+ } \
+ } \
+ if (n != (el)) { \
+ n2 = _HA_ATOMIC_XCHG(&n->prev, MT_LIST_BUSY); \
+ if (n2 == MT_LIST_BUSY) { \
+ if (p2 != NULL) \
+ p->next = p2; \
+ (el)->prev = p; \
+ (el)->next = n; \
+ __ha_barrier_store(); \
+ continue; \
+ } \
+ } \
+ n->prev = p; \
+ p->next = n; \
+ if (p != (el) && n != (el)) \
+ _ret = 1; \
+ __ha_barrier_store(); \
+ (el)->prev = (el); \
+ (el)->next = (el); \
+ __ha_barrier_store(); \
+ break; \
+ } \
+ (_ret); \
+ })
+
+
+/* Remove the first element from the list, and return it */
+#define MT_LIST_POP(_lh, pt, el) \
+ ({ \
+ void *_ret; \
+ struct mt_list *lh = (_lh); \
+ for (;;__ha_cpu_relax()) { \
+ struct mt_list *n, *n2; \
+ struct mt_list *p, *p2; \
+ n = _HA_ATOMIC_XCHG(&(lh)->next, MT_LIST_BUSY); \
+ if (n == MT_LIST_BUSY) \
+ continue; \
+ if (n == (lh)) { \
+ (lh)->next = lh; \
+ __ha_barrier_store(); \
+ _ret = NULL; \
+ break; \
+ } \
+ p = _HA_ATOMIC_XCHG(&n->prev, MT_LIST_BUSY); \
+ if (p == MT_LIST_BUSY) { \
+ (lh)->next = n; \
+ __ha_barrier_store(); \
+ continue; \
+ } \
+ n2 = _HA_ATOMIC_XCHG(&n->next, MT_LIST_BUSY); \
+ if (n2 == MT_LIST_BUSY) { \
+ n->prev = p; \
+ __ha_barrier_store(); \
+ (lh)->next = n; \
+ __ha_barrier_store(); \
+ continue; \
+ } \
+ p2 = _HA_ATOMIC_XCHG(&n2->prev, MT_LIST_BUSY); \
+ if (p2 == MT_LIST_BUSY) { \
+ n->next = n2; \
+ n->prev = p; \
+ __ha_barrier_store(); \
+ (lh)->next = n; \
+ __ha_barrier_store(); \
+ continue; \
+ } \
+ (lh)->next = n2; \
+ (n2)->prev = (lh); \
+ __ha_barrier_store(); \
+ (n)->prev = (n); \
+ (n)->next = (n); \
+ __ha_barrier_store(); \
+ _ret = MT_LIST_ELEM(n, pt, el); \
+ break; \
+ } \
+ (_ret); \
+ })
+
+#define MT_LIST_HEAD(a) ((void *)(&(a)))
+
+#define MT_LIST_INIT(l) ((l)->next = (l)->prev = (l))
+
+#define MT_LIST_HEAD_INIT(l) { &l, &l }
+/* returns a pointer of type <pt> to a structure containing a list head called
+ * <el> at address <lh>. Note that <lh> can be the result of a function or macro
+ * since it's used only once.
+ * Example: MT_LIST_ELEM(cur_node->args.next, struct node *, args)
+ */
+#define MT_LIST_ELEM(lh, pt, el) ((pt)(((const char *)(lh)) - ((size_t)&((pt)NULL)->el)))
+
+/* checks if the list head <lh> is empty or not */
+#define MT_LIST_ISEMPTY(lh) ((lh)->next == (lh))
+
+/* returns a pointer of type <pt> to a structure following the element
+ * which contains list head <lh>, which is known as element <el> in
+ * struct pt.
+ * Example: MT_LIST_NEXT(args, struct node *, list)
+ */
+#define MT_LIST_NEXT(lh, pt, el) (MT_LIST_ELEM((lh)->next, pt, el))
+
+
+/* returns a pointer of type <pt> to a structure preceding the element
+ * which contains list head <lh>, which is known as element <el> in
+ * struct pt.
+ */
+#undef MT_LIST_PREV
+#define MT_LIST_PREV(lh, pt, el) (MT_LIST_ELEM((lh)->prev, pt, el))
+
+/* checks if the list element <el> was added to a list or not. This only
+ * works when detached elements are reinitialized (using LIST_DEL_INIT)
+ */
+#define MT_LIST_INLIST(el) ((el)->next != (el))
+
+/* Lock an element in the list, to be sure it won't be removed nor
+ * accessed by another thread while the lock is held.
+ * Locking behavior is inspired from MT_LIST_DELETE macro,
+ * thus this macro can safely be used concurrently with MT_LIST_DELETE.
+ * This returns a struct mt_list, that will be needed at unlock time.
+ * (using MT_LIST_UNLOCK_ELT)
+ */
+#define MT_LIST_LOCK_ELT(_el) \
+ ({ \
+ struct mt_list ret; \
+ struct mt_list *el = (_el); \
+ for (;;__ha_cpu_relax()) { \
+ struct mt_list *n, *n2; \
+ struct mt_list *p, *p2 = NULL; \
+ n = _HA_ATOMIC_XCHG(&(el)->next, MT_LIST_BUSY); \
+ if (n == MT_LIST_BUSY) \
+ continue; \
+ p = _HA_ATOMIC_XCHG(&(el)->prev, MT_LIST_BUSY); \
+ if (p == MT_LIST_BUSY) { \
+ (el)->next = n; \
+ __ha_barrier_store(); \
+ continue; \
+ } \
+ if (p != (el)) { \
+ p2 = _HA_ATOMIC_XCHG(&p->next, MT_LIST_BUSY);\
+ if (p2 == MT_LIST_BUSY) { \
+ (el)->prev = p; \
+ (el)->next = n; \
+ __ha_barrier_store(); \
+ continue; \
+ } \
+ } \
+ if (n != (el)) { \
+ n2 = _HA_ATOMIC_XCHG(&n->prev, MT_LIST_BUSY);\
+ if (n2 == MT_LIST_BUSY) { \
+ if (p2 != NULL) \
+ p->next = p2; \
+ (el)->prev = p; \
+ (el)->next = n; \
+ __ha_barrier_store(); \
+ continue; \
+ } \
+ } \
+ ret.next = n; \
+ ret.prev = p; \
+ break; \
+ } \
+ ret; \
+ })
+
+/* Unlock an element previously locked by MT_LIST_LOCK_ELT. "np" is the
+ * struct mt_list returned by MT_LIST_LOCK_ELT().
+ */
+#define MT_LIST_UNLOCK_ELT(_el, np) \
+ do { \
+ struct mt_list *n = (np).next, *p = (np).prev; \
+ struct mt_list *el = (_el); \
+ (el)->next = n; \
+ (el)->prev = p; \
+ if (n != (el)) \
+ n->prev = (el); \
+ if (p != (el)) \
+ p->next = (el); \
+ } while (0)
+
+/* Internal macroes for the foreach macroes */
+#define _MT_LIST_UNLOCK_NEXT(el, np) \
+ do { \
+ struct mt_list *n = (np); \
+ (el)->next = n; \
+ if (n != (el)) \
+ n->prev = (el); \
+ } while (0)
+
+/* Internal macroes for the foreach macroes */
+#define _MT_LIST_UNLOCK_PREV(el, np) \
+ do { \
+ struct mt_list *p = (np); \
+ (el)->prev = p; \
+ if (p != (el)) \
+ p->next = (el); \
+ } while (0)
+
+#define _MT_LIST_LOCK_NEXT(el) \
+ ({ \
+ struct mt_list *n = NULL; \
+ for (;;__ha_cpu_relax()) { \
+ struct mt_list *n2; \
+ n = _HA_ATOMIC_XCHG(&((el)->next), MT_LIST_BUSY); \
+ if (n == MT_LIST_BUSY) \
+ continue; \
+ if (n != (el)) { \
+ n2 = _HA_ATOMIC_XCHG(&n->prev, MT_LIST_BUSY);\
+ if (n2 == MT_LIST_BUSY) { \
+ (el)->next = n; \
+ __ha_barrier_store(); \
+ continue; \
+ } \
+ } \
+ break; \
+ } \
+ n; \
+ })
+
+#define _MT_LIST_LOCK_PREV(el) \
+ ({ \
+ struct mt_list *p = NULL; \
+ for (;;__ha_cpu_relax()) { \
+ struct mt_list *p2; \
+ p = _HA_ATOMIC_XCHG(&((el)->prev), MT_LIST_BUSY); \
+ if (p == MT_LIST_BUSY) \
+ continue; \
+ if (p != (el)) { \
+ p2 = _HA_ATOMIC_XCHG(&p->next, MT_LIST_BUSY);\
+ if (p2 == MT_LIST_BUSY) { \
+ (el)->prev = p; \
+ __ha_barrier_store(); \
+ continue; \
+ } \
+ } \
+ break; \
+ } \
+ p; \
+ })
+
+#define _MT_LIST_RELINK_DELETED(elt2) \
+ do { \
+ struct mt_list *n = elt2.next, *p = elt2.prev; \
+ ALREADY_CHECKED(p); \
+ n->prev = p; \
+ p->next = n; \
+ } while (0);
+
+/* Equivalent of MT_LIST_DELETE(), to be used when parsing the list with mt_list_entry_for_each_safe().
+ * It should be the element currently parsed (tmpelt1)
+ */
+#define MT_LIST_DELETE_SAFE(_el) \
+ do { \
+ struct mt_list *el = (_el); \
+ (el)->prev = (el); \
+ (el)->next = (el); \
+ (_el) = NULL; \
+ } while (0)
+
+/* Safe as MT_LIST_DELETE_SAFE, but it won't reinit the element */
+#define MT_LIST_DELETE_SAFE_NOINIT(_el) \
+ do { \
+ (_el) = NULL; \
+ } while (0)
+
+/* Iterates <item> through a list of items of type "typeof(*item)" which are
+ * linked via a "struct mt_list" member named <member>. A pointer to the head
+ * of the list is passed in <list_head>.
+ *
+ * <tmpelt> is a temporary struct mt_list *, and <tmpelt2> is a temporary
+ * struct mt_list, used internally, both are needed for MT_LIST_DELETE_SAFE.
+ *
+ * This macro is implemented using a nested loop. The inner loop will run for
+ * each element in the list, and the upper loop will run only once to do some
+ * cleanup when the end of the list is reached or user breaks from inner loop.
+ * It's safe to break from this macro as the cleanup will be performed anyway,
+ * but it is strictly forbidden to goto from the loop because skipping the
+ * cleanup will lead to undefined behavior.
+ *
+ * In order to remove the current element, please use MT_LIST_DELETE_SAFE.
+ *
+ * Example:
+ * mt_list_for_each_entry_safe(item, list_head, list_member, elt1, elt2) {
+ * ...
+ * }
+ */
+#define mt_list_for_each_entry_safe(item, list_head, member, tmpelt, tmpelt2) \
+ for ((tmpelt) = NULL; (tmpelt) != MT_LIST_BUSY; ({ \
+ /* post loop cleanup: \
+ * gets executed only once to perform cleanup \
+ * after child loop has finished \
+ */ \
+ if (tmpelt) { \
+ /* last elem still exists, unlocking it */ \
+ if (tmpelt2.prev) \
+ MT_LIST_UNLOCK_ELT(tmpelt, tmpelt2); \
+ else { \
+ /* special case: child loop did not run \
+ * so tmpelt2.prev == NULL \
+ * (empty list) \
+ */ \
+ _MT_LIST_UNLOCK_NEXT(tmpelt, tmpelt2.next); \
+ } \
+ } else { \
+ /* last elem was deleted by user, relink required: \
+ * prev->next = next \
+ * next->prev = prev \
+ */ \
+ _MT_LIST_RELINK_DELETED(tmpelt2); \
+ } \
+ /* break parent loop \
+ * (this loop runs exactly one time) \
+ */ \
+ (tmpelt) = MT_LIST_BUSY; \
+ })) \
+ for ((tmpelt) = (list_head), (tmpelt2).prev = NULL, (tmpelt2).next = _MT_LIST_LOCK_NEXT(tmpelt); ({ \
+ /* this gets executed before each user body loop */ \
+ (item) = MT_LIST_ELEM((tmpelt2.next), typeof(item), member); \
+ if (&item->member != (list_head)) { \
+ /* did not reach end of list \
+ * (back to list_head == end of list reached) \
+ */ \
+ if (tmpelt2.prev != &item->member) \
+ tmpelt2.next = _MT_LIST_LOCK_NEXT(&item->member); \
+ else { \
+ /* FIXME: is this even supposed to happen?? \
+ * I'm not understanding how \
+ * tmpelt2.prev could be equal to &item->member. \
+ * running 'test_list' multiple times with 8 \
+ * concurrent threads: this never gets reached \
+ */ \
+ tmpelt2.next = tmpelt; \
+ } \
+ if (tmpelt != NULL) { \
+ /* if tmpelt was not deleted by user */ \
+ if (tmpelt2.prev) { \
+ /* not executed on first run \
+ * (tmpelt2.prev == NULL on first run) \
+ */ \
+ _MT_LIST_UNLOCK_PREV(tmpelt, tmpelt2.prev); \
+ /* unlock_prev will implicitly relink: \
+ * elt->prev = prev \
+ * prev->next = elt \
+ */ \
+ } \
+ tmpelt2.prev = tmpelt; \
+ } \
+ (tmpelt) = &item->member; \
+ } \
+ /* else: end of list reached (loop stop cond) */ \
+ }), \
+ &item->member != (list_head);)
+
+static __inline struct list *mt_list_to_list(struct mt_list *list)
+{
+ union {
+ struct mt_list *mt_list;
+ struct list *list;
+ } mylist;
+
+ mylist.mt_list = list;
+ return mylist.list;
+}
+
+static __inline struct mt_list *list_to_mt_list(struct list *list)
+{
+ union {
+ struct mt_list *mt_list;
+ struct list *list;
+ } mylist;
+
+ mylist.list = list;
+ return mylist.mt_list;
+
+}
+
+#endif /* _HAPROXY_LIST_H */
diff --git a/include/haproxy/listener-t.h b/include/haproxy/listener-t.h
new file mode 100644
index 0000000..7f5e52a
--- /dev/null
+++ b/include/haproxy/listener-t.h
@@ -0,0 +1,317 @@
+/*
+ * include/haproxy/listener-t.h
+ * This file defines the structures needed to manage listeners.
+ *
+ * Copyright (C) 2000-2012 Willy Tarreau - w@1wt.eu
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_LISTENER_T_H
+#define _HAPROXY_LISTENER_T_H
+
+#include <sys/types.h>
+#include <sys/socket.h>
+
+#include <import/ebtree-t.h>
+
+#include <haproxy/api-t.h>
+#include <haproxy/obj_type-t.h>
+#include <haproxy/quic_cc-t.h>
+#include <haproxy/quic_sock-t.h>
+#include <haproxy/quic_tp-t.h>
+#include <haproxy/receiver-t.h>
+#include <haproxy/stats-t.h>
+#include <haproxy/thread.h>
+
+/* Some pointer types reference below */
+struct task;
+struct protocol;
+struct xprt_ops;
+struct proxy;
+struct fe_counters;
+struct connection;
+
+/* listener state */
+enum li_state {
+ LI_NEW = 0, /* not initialized yet */
+ LI_INIT, /* all parameters filled in, but not assigned yet */
+ LI_ASSIGNED, /* assigned to the protocol, but not listening yet */
+ LI_PAUSED, /* listener was paused, it's bound but not listening */
+ LI_LISTEN, /* started, listening but not enabled */
+ LI_READY, /* started, listening and enabled */
+ LI_FULL, /* reached its connection limit */
+ LI_LIMITED, /* transient state: limits have been reached, listener is queued */
+} __attribute__((packed));
+
+/* Listener transitions
+ * calloc() set() add_listener() bind()
+ * -------> NEW ----> INIT ----------> ASSIGNED -----> LISTEN
+ * <------- <---- <---------- <-----
+ * free() bzero() del_listener() unbind()
+ *
+ * The file descriptor is valid only during these three states :
+ *
+ * disable()
+ * LISTEN <------------ READY
+ * A| ------------> |A
+ * || !max & enable() ||
+ * || ||
+ * || max ||
+ * || max & enable() V| !max
+ * |+---------------> FULL
+ * +-----------------
+ * disable()
+ *
+ * The LIMITED state my be used when a limit has been detected just before
+ * using a listener. In this case, the listener MUST be queued into the
+ * appropriate wait queue (either the proxy's or the global one). It may be
+ * set back to the READY state at any instant and for any reason, so one must
+ * not rely on this state.
+ */
+
+/* listener status for stats */
+enum li_status {
+ LI_STATUS_WAITING = 0,
+ LI_STATUS_OPEN,
+ LI_STATUS_FULL,
+
+ LI_STATE_COUNT /* must be last */
+};
+
+/* Note: if a bind_conf uses BC_O_UNLIMITED, it is highly recommended that it adds its own
+ * maxconn setting to the global.maxsock value so that its resources are reserved.
+ */
+
+/* flags used with bind_conf->options */
+#define BC_O_USE_SSL 0x00000001 /* SSL is being used on this bind_conf */
+#define BC_O_GENERATE_CERTS 0x00000002 /* 1 if generate-certificates option is set, else 0 */
+#define BC_O_QUIC_FORCE_RETRY 0x00000004 /* always send Retry on reception of Initial without token */
+#define BC_O_USE_SOCK_DGRAM 0x00000008 /* at least one datagram-type listener is used */
+#define BC_O_USE_SOCK_STREAM 0x00000010 /* at least one stream-type listener is used */
+#define BC_O_USE_XPRT_DGRAM 0x00000020 /* at least one dgram-only xprt listener is used */
+#define BC_O_USE_XPRT_STREAM 0x00000040 /* at least one stream-only xprt listener is used */
+#define BC_O_NOLINGER 0x00000080 /* disable lingering on these listeners */
+#define BC_O_NOQUICKACK 0x00000100 /* disable quick ack of immediate data (linux) */
+#define BC_O_DEF_ACCEPT 0x00000200 /* wait up to 1 second for data before accepting */
+#define BC_O_TCP_FO 0x00000400 /* enable TCP Fast Open (linux >= 3.7) */
+#define BC_O_ACC_PROXY 0x00000800 /* find the proxied address in the first request line */
+#define BC_O_ACC_CIP 0x00001000 /* find the proxied address in the NetScaler Client IP header */
+#define BC_O_UNLIMITED 0x00002000 /* listeners not subject to global limits (peers & stats socket) */
+#define BC_O_NOSTOP 0x00004000 /* keep the listeners active even after a soft stop */
+#define BC_O_REVERSE_HTTP 0x00008000 /* a reverse HTTP bind is used */
+#define BC_O_XPRT_MAXCONN 0x00010000 /* transport layer allocates its own resource prior to accept and is responsible to check maxconn limit */
+
+
+/* flags used with bind_conf->ssl_options */
+#ifdef USE_OPENSSL
+#define BC_SSL_O_NONE 0x0000
+#define BC_SSL_O_NO_TLS_TICKETS 0x0100 /* disable session resumption tickets */
+#define BC_SSL_O_PREF_CLIE_CIPH 0x0200 /* prefer client ciphers */
+#endif
+
+struct tls_version_filter {
+ uint16_t flags; /* ssl options */
+ uint8_t min; /* min TLS version */
+ uint8_t max; /* max TLS version */
+};
+
+/* ssl "bind" settings */
+struct ssl_bind_conf {
+#ifdef USE_OPENSSL
+ char *npn_str; /* NPN protocol string */
+ int npn_len; /* NPN protocol string length */
+ char *alpn_str; /* ALPN protocol string */
+ int alpn_len; /* ALPN protocol string length */
+ unsigned int verify:3; /* verify method (set of SSL_VERIFY_* flags) */
+ unsigned int no_ca_names:1;/* do not send ca names to clients (ca_file related) */
+ unsigned int early_data:1; /* early data allowed */
+ unsigned int ocsp_update:2;/* enable OCSP auto update */
+ char *ca_file; /* CAfile to use on verify and ca-names */
+ char *ca_verify_file; /* CAverify file to use on verify only */
+ char *crl_file; /* CRLfile to use on verify */
+ char *ciphers; /* cipher suite to use if non-null */
+ char *ciphersuites; /* TLS 1.3 cipher suite to use if non-null */
+ char *curves; /* curves suite to use for ECDHE */
+ char *ecdhe; /* named curve to use for ECDHE */
+ char *sigalgs; /* Signature algorithms */
+ char *client_sigalgs; /* Client Signature algorithms */
+ struct tls_version_filter ssl_methods_cfg; /* original ssl methods found in configuration */
+ struct tls_version_filter ssl_methods; /* actual ssl methods used at runtime */
+#endif
+};
+
+/*
+ * In OpenSSL 3.0.0, the biggest verify error code's value is 94 and on the
+ * latest 1.1.1 it already reaches 79 so we need to size the ca/crt-ignore-err
+ * arrays accordingly. If the max error code increases, the arrays might need to
+ * be resized.
+ */
+#define SSL_MAX_VFY_ERROR_CODE 94
+#define IGNERR_BF_SIZE ((SSL_MAX_VFY_ERROR_CODE >> 6) + 1)
+
+/* "bind" line settings */
+struct bind_conf {
+#ifdef USE_OPENSSL
+ struct ssl_bind_conf ssl_conf; /* ssl conf for ctx setting */
+ unsigned long long ca_ignerr_bitfield[IGNERR_BF_SIZE]; /* ignored verify errors in handshake if depth > 0 */
+ unsigned long long crt_ignerr_bitfield[IGNERR_BF_SIZE]; /* ignored verify errors in handshake if depth == 0 */
+ void *initial_ctx; /* SSL context for initial negotiation */
+ void *default_ctx; /* SSL context of first/default certificate */
+ struct ckch_inst *default_inst;
+ struct ssl_bind_conf *default_ssl_conf; /* custom SSL conf of default_ctx */
+ int strict_sni; /* refuse negotiation if sni doesn't match a certificate */
+ int ssl_options; /* ssl options */
+ struct eb_root sni_ctx; /* sni_ctx tree of all known certs full-names sorted by name */
+ struct eb_root sni_w_ctx; /* sni_ctx tree of all known certs wildcards sorted by name */
+ struct tls_keys_ref *keys_ref; /* TLS ticket keys reference */
+
+ char *ca_sign_file; /* CAFile used to generate and sign server certificates */
+ char *ca_sign_pass; /* CAKey passphrase */
+
+ struct ckch_data *ca_sign_ckch; /* CA and possible certificate chain for ca generation */
+#endif
+#ifdef USE_QUIC
+ struct quic_transport_params quic_params; /* QUIC transport parameters. */
+ struct quic_cc_algo *quic_cc_algo; /* QUIC control congestion algorithm */
+ size_t max_cwnd; /* QUIC maximumu congestion control window size (kB) */
+ enum quic_sock_mode quic_mode; /* QUIC socket allocation strategy */
+#endif
+ struct proxy *frontend; /* the frontend all these listeners belong to, or NULL */
+ const struct mux_proto_list *mux_proto; /* the mux to use for all incoming connections (specified by the "proto" keyword) */
+ struct xprt_ops *xprt; /* transport-layer operations for all listeners */
+ uint options; /* set of BC_O_* flags */
+ unsigned int analysers; /* bitmap of required protocol analysers */
+ int maxseg; /* for TCP, advertised MSS */
+ int tcp_ut; /* for TCP, user timeout */
+ int maxaccept; /* if set, max number of connections accepted at once (-1 when disabled) */
+ unsigned int backlog; /* if set, listen backlog */
+ int maxconn; /* maximum connections allowed on this listener */
+ int (*accept)(struct connection *conn); /* upper layer's accept() */
+ int level; /* stats access level (ACCESS_LVL_*) */
+ int severity_output; /* default severity output format in cli feedback messages */
+ short int nice; /* nice value to assign to the instantiated tasks */
+ /* 2-byte hole here */
+ struct list listeners; /* list of listeners using this bind config */
+ uint32_t ns_cip_magic; /* Excepted NetScaler Client IP magic number */
+ struct list by_fe; /* next binding for the same frontend, or NULL */
+ char *arg; /* argument passed to "bind" for better error reporting */
+ char *file; /* file where the section appears */
+ int line; /* line where the section appears */
+ char *rhttp_srvname; /* name of server when using "rhttp@" address */
+ int rhttp_nbconn; /* count of connections to initiate in parallel */
+ __decl_thread(HA_RWLOCK_T sni_lock); /* lock the SNI trees during add/del operations */
+ struct thread_set thread_set; /* entire set of the allowed threads (0=no restriction) */
+ struct rx_settings settings; /* all the settings needed for the listening socket */
+};
+
+/* Fields of a listener allocated per thread */
+struct li_per_thread {
+ struct {
+ struct mt_list list; /* list element in the QUIC accept queue */
+ struct mt_list conns; /* list of QUIC connections from this listener ready to be accepted */
+ } quic_accept;
+
+ struct listener *li; /* back reference on the listener */
+};
+
+
+/* The listener will be directly referenced by the fdtab[] which holds its
+ * socket. The listener provides the protocol-specific accept() function to
+ * the fdtab.
+ */
+struct listener {
+ enum obj_type obj_type; /* object type = OBJ_TYPE_LISTENER */
+ enum li_state state; /* state: NEW, INIT, ASSIGNED, LISTEN, READY, FULL */
+ uint16_t flags; /* listener flags: LI_F_* */
+ int luid; /* listener universally unique ID, used for SNMP */
+ int nbconn; /* current number of connections on this listener */
+ unsigned long thr_idx; /* thread indexes for queue distribution (see listener_accept()) */
+ __decl_thread(HA_RWLOCK_T lock);
+
+ struct fe_counters *counters; /* statistics counters */
+ struct mt_list wait_queue; /* link element to make the listener wait for something (LI_LIMITED) */
+ char *name; /* listener's name */
+
+ unsigned int thr_conn[MAX_THREADS_PER_GROUP]; /* number of connections per thread for the group */
+
+ struct list by_fe; /* chaining in frontend's list of listeners */
+ struct list by_bind; /* chaining in bind_conf's list of listeners */
+ struct bind_conf *bind_conf; /* "bind" line settings, include SSL settings among other things */
+ struct receiver rx; /* network receiver parts */
+ struct {
+ struct eb32_node id; /* place in the tree of used IDs */
+ } conf; /* config information */
+
+ struct li_per_thread *per_thr; /* per-thread fields (one per thread in the group) */
+
+ EXTRA_COUNTERS(extra_counters);
+};
+
+/* listener flags (16 bits) */
+#define LI_F_FINALIZED 0x0001 /* listener made it to the READY||LIMITED||FULL state at least once, may be suspended/resumed safely */
+#define LI_F_SUSPENDED 0x0002 /* listener has been suspended using suspend_listener(), it is either is LI_PAUSED or LI_ASSIGNED state */
+
+/* Descriptor for a "bind" keyword. The ->parse() function returns 0 in case of
+ * success, or a combination of ERR_* flags if an error is encountered. The
+ * function pointer can be NULL if not implemented. The function also has an
+ * access to the current "bind" config line. The ->skip value tells the parser
+ * how many words have to be skipped after the keyword.
+ */
+struct bind_kw {
+ const char *kw;
+ int (*parse)(char **args, int cur_arg, struct proxy *px, struct bind_conf *conf, char **err);
+ int skip; /* nb of args to skip */
+ int rhttp_ok; /* non-zero if kw is support for reverse HTTP bind */
+};
+
+/* same as bind_kw but for crtlist keywords */
+struct ssl_crtlist_kw {
+ const char *kw;
+ int (*parse)(char **args, int cur_arg, struct proxy *px, struct ssl_bind_conf *conf, int from_cli, char **err);
+ int skip; /* nb of args to skip */
+};
+
+/*
+ * A keyword list. It is a NULL-terminated array of keywords. It embeds a
+ * struct list in order to be linked to other lists, allowing it to easily
+ * be declared where it is needed, and linked without duplicating data nor
+ * allocating memory. It is also possible to indicate a scope for the keywords.
+ */
+struct bind_kw_list {
+ const char *scope;
+ struct list list;
+ struct bind_kw kw[VAR_ARRAY];
+};
+
+/* The per-thread accept queue ring, must be a power of two minus 1 */
+#define ACCEPT_QUEUE_SIZE ((1<<10) - 1)
+
+/* head and tail are both 16 bits so that idx can be accessed atomically */
+struct accept_queue_ring {
+ uint32_t idx; /* (head << 16) | tail */
+ struct tasklet *tasklet; /* tasklet of the thread owning this ring */
+ struct connection *entry[ACCEPT_QUEUE_SIZE] __attribute((aligned(64)));
+};
+
+
+#endif /* _HAPROXY_LISTENER_T_H */
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/include/haproxy/listener.h b/include/haproxy/listener.h
new file mode 100644
index 0000000..5b3dc18
--- /dev/null
+++ b/include/haproxy/listener.h
@@ -0,0 +1,246 @@
+/*
+ * include/haproxy/listener.h
+ * This file declares listener management primitives.
+ *
+ * Copyright (C) 2000-2012 Willy Tarreau - w@1wt.eu
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_LISTENER_H
+#define _HAPROXY_LISTENER_H
+
+#include <stdlib.h>
+#include <string.h>
+
+#include <haproxy/api.h>
+#include <haproxy/listener-t.h>
+
+struct proxy;
+struct task;
+
+int li_init_per_thr(struct listener *li);
+
+/* adjust the listener's state and its proxy's listener counters if needed */
+void listener_set_state(struct listener *l, enum li_state st);
+
+/* This function tries to temporarily disable a listener, depending on the OS
+ * capabilities. Linux unbinds the listen socket after a SHUT_RD, and ignores
+ * SHUT_WR. Solaris refuses either shutdown(). OpenBSD ignores SHUT_RD but
+ * closes upon SHUT_WR and refuses to rebind. So a common validation path
+ * involves SHUT_WR && listen && SHUT_RD. In case of success, the FD's polling
+ * is disabled. It normally returns non-zero, unless an error is reported.
+ * It will need to operate under the proxy's lock and the listener's lock.
+ * suspend() may totally stop a listener if it doesn't support the PAUSED
+ * state, in which case state will be set to ASSIGNED.
+ * The caller is responsible for indicating in lpx, lli whether the respective
+ * locks are already held (non-zero) or not (zero) so that the function pick
+ * the missing ones, in this order.
+ */
+int suspend_listener(struct listener *l, int lpx, int lli);
+
+/* This function tries to resume a temporarily disabled listener.
+ * The resulting state will either be LI_READY or LI_FULL. 0 is returned
+ * in case of failure to resume (eg: dead socket).
+ * It will need to operate under the proxy's lock and the listener's lock.
+ * The caller is responsible for indicating in lpx, lli whether the respective
+ * locks are already held (non-zero) or not (zero) so that the function pick
+ * the missing ones, in this order.
+ */
+int resume_listener(struct listener *l, int lpx, int lli);
+
+/* Same as resume_listener(), but will only work to resume from
+ * LI_FULL or LI_LIMITED states because we try to relax listeners that
+ * were temporarily restricted and not to resume inactive listeners that
+ * may have been paused or completely stopped in the meantime.
+ * Returns positive value for success and 0 for failure.
+ * It will need to operate under the proxy's lock and the listener's lock.
+ * The caller is responsible for indicating in lpx, lli whether the respective
+ * locks are already held (non-zero) or not (zero) so that the function pick
+ * the missing ones, in this order.
+ */
+int relax_listener(struct listener *l, int lpx, int lli);
+
+/*
+ * This function completely stops a listener. It will need to operate under the
+ * proxy's lock, the protocol's and the listener's lock. The caller is
+ * responsible for indicating in lpx, lpr, lli whether the respective locks are
+ * already held (non-zero) or not (zero) so that the function picks the missing
+ * ones, in this order.
+ */
+void stop_listener(struct listener *l, int lpx, int lpr, int lli);
+
+/* This function adds the specified listener's file descriptor to the polling
+ * lists if it is in the LI_LISTEN state. The listener enters LI_READY or
+ * LI_FULL state depending on its number of connections. In daemon mode, we
+ * also support binding only the relevant processes to their respective
+ * listeners. We don't do that in debug mode however.
+ */
+void enable_listener(struct listener *listener);
+
+/* Dequeues all listeners waiting for a resource the global wait queue */
+void dequeue_all_listeners(void);
+
+/* Dequeues all listeners waiting for a resource in proxy <px>'s queue */
+void dequeue_proxy_listeners(struct proxy *px);
+
+/* This function closes the listening socket for the specified listener,
+ * provided that it's already in a listening state. The listener enters the
+ * LI_ASSIGNED state, except if the FD is not closed, in which case it may
+ * remain in LI_LISTEN. Depending on the process's status (master or worker),
+ * the listener's bind options and the receiver's origin, it may or may not
+ * close the receiver's FD. Must be called with the lock held.
+ */
+void do_unbind_listener(struct listener *listener);
+
+/* This function closes the listening socket for the specified listener,
+ * provided that it's already in a listening state. The listener enters the
+ * LI_ASSIGNED state, except if the FD is not closed, in which case it may
+ * remain in LI_LISTEN. This function is intended to be used as a generic
+ * function for standard protocols.
+ */
+void unbind_listener(struct listener *listener);
+
+/* creates one or multiple listeners for bind_conf <bc> on sockaddr <ss> on port
+ * range <portl> to <porth>, and possibly attached to fd <fd> (or -1 for auto
+ * allocation). The address family is taken from ss->ss_family, and the protocol
+ * passed in <proto> must be usable on this family. The number of jobs and
+ * listeners is automatically increased by the number of listeners created. It
+ * returns non-zero on success, zero on error with the error message set in <err>.
+ */
+int create_listeners(struct bind_conf *bc, const struct sockaddr_storage *ss,
+ int portl, int porth, int fd, struct protocol *proto, char **err);
+struct shard_info *shard_info_attach(struct receiver *rx, struct shard_info *si);
+void shard_info_detach(struct receiver *rx);
+struct listener *clone_listener(struct listener *src);
+
+/* Delete a listener from its protocol's list of listeners. The listener's
+ * state is automatically updated from LI_ASSIGNED to LI_INIT. The protocol's
+ * number of listeners is updated. Note that the listener must have previously
+ * been unbound. This is the generic function to use to remove a listener.
+ */
+void delete_listener(struct listener *listener);
+void __delete_listener(struct listener *listener);
+
+/* This function is called on a read event from a listening socket, corresponding
+ * to an accept. It tries to accept as many connections as possible, and for each
+ * calls the listener's accept handler (generally the frontend's accept handler).
+ */
+void listener_accept(struct listener *l);
+
+/* Returns a suitable value for a listener's backlog. It uses the listener's,
+ * otherwise the frontend's backlog, otherwise the listener's maxconn,
+ * otherwise the frontend's maxconn, otherwise 1024.
+ */
+int listener_backlog(const struct listener *l);
+
+/* Notify the listener that a connection initiated from it was released. This
+ * is used to keep the connection count consistent and to possibly re-open
+ * listening when it was limited.
+ */
+void listener_release(struct listener *l);
+
+/* This function adds the specified <listener> to the protocol <proto>. It
+ * does nothing if the protocol was already added. The listener's state is
+ * automatically updated from LI_INIT to LI_ASSIGNED. The number of listeners
+ * for the protocol is updated. This must be called with the proto lock held.
+ */
+void default_add_listener(struct protocol *proto, struct listener *listener);
+
+/* default function used to unbind a listener. This is for use by standard
+ * protocols working on top of accepted sockets. The receiver's rx_unbind()
+ * will automatically be used after the listener is disabled if the socket is
+ * still bound. This must be used under the listener's lock.
+ */
+void default_unbind_listener(struct listener *listener);
+
+/* default function called to suspend a listener: it simply passes the call to
+ * the underlying receiver. This is find for most socket-based protocols. This
+ * must be called under the listener's lock. It will return non-zero on success,
+ * 0 on failure. If no receiver-level suspend is provided, the operation is
+ * assumed to succeed.
+ */
+int default_suspend_listener(struct listener *l);
+
+/* Tries to resume a suspended listener, and returns non-zero on success or
+ * zero on failure. On certain errors, an alert or a warning might be displayed.
+ * It must be called with the listener's lock held. Depending on the listener's
+ * state and protocol, a listen() call might be used to resume operations, or a
+ * call to the receiver's resume() function might be used as well. This is
+ * suitable as a default function for TCP and UDP. This must be called with the
+ * listener's lock held.
+ */
+int default_resume_listener(struct listener *l);
+
+/* Applies the thread mask, shards etc to the bind_conf. It normally returns 0
+ * otherwie the number of errors. Upon error it may set error codes (ERR_*) in
+ * err_code. It is supposed to be called only once very late in the boot process
+ * after the bind_conf's thread_set is fixed. The function may emit warnings and
+ * alerts. Extra listeners may be created on the fly.
+ */
+int bind_complete_thread_setup(struct bind_conf *bind_conf, int *err_code);
+
+/*
+ * Registers the bind keyword list <kwl> as a list of valid keywords for next
+ * parsing sessions.
+ */
+void bind_register_keywords(struct bind_kw_list *kwl);
+
+/* Return a pointer to the bind keyword <kw>, or NULL if not found. */
+struct bind_kw *bind_find_kw(const char *kw);
+
+/* Dumps all registered "bind" keywords to the <out> string pointer. */
+void bind_dump_kws(char **out);
+const char *bind_find_best_kw(const char *word);
+int bind_parse_args_list(struct bind_conf *bind_conf, char **args, int cur_arg,
+ const char *section, const char *file, int linenum);
+
+void bind_recount_thread_bits(struct bind_conf *conf);
+unsigned int bind_map_thread_id(const struct bind_conf *conf, unsigned int r);
+struct bind_conf *bind_conf_alloc(struct proxy *fe, const char *file,
+ int line, const char *arg, struct xprt_ops *xprt);
+const char *listener_state_str(const struct listener *l);
+struct task *accept_queue_process(struct task *t, void *context, unsigned int state);
+struct task *manage_global_listener_queue(struct task *t, void *context, unsigned int state);
+
+extern struct accept_queue_ring accept_queue_rings[MAX_THREADS] __attribute__((aligned(64)));
+
+extern const char* li_status_st[LI_STATE_COUNT];
+enum li_status get_li_status(struct listener *l);
+
+/* number of times an accepted connection resulted in maxconn being reached */
+extern ullong maxconn_reached;
+
+static inline uint accept_queue_ring_len(const struct accept_queue_ring *ring)
+{
+ uint idx, head, tail, len;
+
+ idx = _HA_ATOMIC_LOAD(&ring->idx); /* (head << 16) + tail */
+ head = idx >> 16;
+ tail = idx & 0xffff;
+ len = tail + ACCEPT_QUEUE_SIZE - head;
+ if (len >= ACCEPT_QUEUE_SIZE)
+ len -= ACCEPT_QUEUE_SIZE;
+ return len;
+}
+
+#endif /* _HAPROXY_LISTENER_H */
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/include/haproxy/log-t.h b/include/haproxy/log-t.h
new file mode 100644
index 0000000..a0a25ac
--- /dev/null
+++ b/include/haproxy/log-t.h
@@ -0,0 +1,277 @@
+/*
+ * include/haproxy/log-t.h
+ * This file contains definitions of log-related structures and macros.
+ *
+ * Copyright (C) 2000-2020 Willy Tarreau - w@1wt.eu
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_LOG_T_H
+#define _HAPROXY_LOG_T_H
+
+#include <sys/socket.h>
+#include <sys/un.h>
+#include <netinet/in.h>
+
+#include <haproxy/api-t.h>
+#include <haproxy/ring-t.h>
+#include <haproxy/thread-t.h>
+
+
+#define NB_LOG_FACILITIES 24
+#define NB_LOG_LEVELS 8
+#define NB_LOG_HDR_MAX_ELEMENTS 15
+#define SYSLOG_PORT 514
+#define UNIQUEID_LEN 128
+
+/* flags used in logformat_node->options */
+#define LOG_OPT_HEXA 0x00000001
+#define LOG_OPT_MANDATORY 0x00000002
+#define LOG_OPT_QUOTE 0x00000004
+#define LOG_OPT_REQ_CAP 0x00000008
+#define LOG_OPT_RES_CAP 0x00000010
+#define LOG_OPT_HTTP 0x00000020
+#define LOG_OPT_ESC 0x00000040
+#define LOG_OPT_MERGE_SPACES 0x00000080
+
+
+/* Fields that need to be extracted from the incoming connection or request for
+ * logging or for sending specific header information. They're set in px->to_log
+ * and appear as flags in session->logs.logwait, which are removed once the
+ * required information has been collected.
+ */
+#define LW_INIT 1 /* anything */
+#define LW_CLIP 2 /* CLient IP */
+#define LW_SVIP 4 /* SerVer IP */
+#define LW_SVID 8 /* server ID */
+#define LW_REQ 16 /* http REQuest */
+#define LW_RESP 32 /* http RESPonse */
+#define LW_BYTES 256 /* bytes read from server */
+#define LW_COOKIE 512 /* captured cookie */
+#define LW_REQHDR 1024 /* request header(s) */
+#define LW_RSPHDR 2048 /* response header(s) */
+#define LW_BCKIP 4096 /* backend IP */
+#define LW_FRTIP 8192 /* frontend IP */
+#define LW_XPRT 16384 /* transport layer information (eg: SSL) */
+
+#define LOG_LEGACYTIME_LEN 15
+#define LOG_ISOTIME_MINLEN 20
+#define LOG_ISOTIME_MAXLEN 32
+
+/* enum for log format */
+enum log_fmt {
+ LOG_FORMAT_UNSPEC = 0,
+ LOG_FORMAT_LOCAL,
+ LOG_FORMAT_RFC3164,
+ LOG_FORMAT_RFC5424,
+ LOG_FORMAT_PRIO,
+ LOG_FORMAT_SHORT,
+ LOG_FORMAT_TIMED,
+ LOG_FORMAT_ISO,
+ LOG_FORMAT_RAW,
+ LOG_FORMATS /* number of supported log formats, must always be last */
+};
+
+/* enum log header meta data */
+enum log_meta {
+ LOG_META_PRIO,
+ LOG_META_TIME,
+ LOG_META_HOST,
+ LOG_META_TAG,
+ LOG_META_PID,
+ LOG_META_MSGID,
+ LOG_META_STDATA,
+ LOG_META_FIELDS /* must always be the last */
+};
+
+/* log header data */
+struct log_header {
+ enum log_fmt format; /* how to format the header */
+ int level, facility; /* used by several formats */
+ struct ist *metadata; /* optional metadata - per-format */
+};
+
+#define LOG_HEADER_NONE (struct log_header){ \
+ .format = LOG_FORMAT_UNSPEC, \
+ .level = 0, \
+ .facility = 0, \
+ .metadata = NULL \
+ }
+
+/* log target types */
+enum log_tgt {
+ LOG_TARGET_DGRAM = 0, // datagram address (udp, unix socket)
+ LOG_TARGET_FD, // file descriptor
+ LOG_TARGET_BUFFER, // ring buffer
+ LOG_TARGET_BACKEND, // backend with SYSLOG mode
+};
+
+/* lists of fields that can be logged, for logformat_node->type */
+enum {
+
+ LOG_FMT_TEXT = 0, /* raw text */
+ LOG_FMT_EXPR, /* sample expression */
+ LOG_FMT_SEPARATOR, /* separator replaced by one space */
+
+ /* information fields */
+ LOG_FMT_GLOBAL,
+ LOG_FMT_CLIENTIP,
+ LOG_FMT_CLIENTPORT,
+ LOG_FMT_BACKENDIP,
+ LOG_FMT_BACKENDPORT,
+ LOG_FMT_FRONTENDIP,
+ LOG_FMT_FRONTENDPORT,
+ LOG_FMT_SERVERPORT,
+ LOG_FMT_SERVERIP,
+ LOG_FMT_COUNTER,
+ LOG_FMT_LOGCNT,
+ LOG_FMT_PID,
+ LOG_FMT_DATE,
+ LOG_FMT_DATEGMT,
+ LOG_FMT_DATELOCAL,
+ LOG_FMT_TS,
+ LOG_FMT_MS,
+ LOG_FMT_FRONTEND,
+ LOG_FMT_FRONTEND_XPRT,
+ LOG_FMT_BACKEND,
+ LOG_FMT_SERVER,
+ LOG_FMT_BYTES,
+ LOG_FMT_BYTES_UP,
+ LOG_FMT_Ta,
+ LOG_FMT_Th,
+ LOG_FMT_Ti,
+ LOG_FMT_TQ,
+ LOG_FMT_TW,
+ LOG_FMT_TC,
+ LOG_FMT_Tr,
+ LOG_FMT_tr,
+ LOG_FMT_trg,
+ LOG_FMT_trl,
+ LOG_FMT_TR,
+ LOG_FMT_TD,
+ LOG_FMT_TT,
+ LOG_FMT_TU,
+ LOG_FMT_STATUS,
+ LOG_FMT_CCLIENT,
+ LOG_FMT_CSERVER,
+ LOG_FMT_TERMSTATE,
+ LOG_FMT_TERMSTATE_CK,
+ LOG_FMT_ACTCONN,
+ LOG_FMT_FECONN,
+ LOG_FMT_BECONN,
+ LOG_FMT_SRVCONN,
+ LOG_FMT_RETRIES,
+ LOG_FMT_SRVQUEUE,
+ LOG_FMT_BCKQUEUE,
+ LOG_FMT_HDRREQUEST,
+ LOG_FMT_HDRRESPONS,
+ LOG_FMT_HDRREQUESTLIST,
+ LOG_FMT_HDRRESPONSLIST,
+ LOG_FMT_REQ,
+ LOG_FMT_HTTP_METHOD,
+ LOG_FMT_HTTP_URI,
+ LOG_FMT_HTTP_PATH,
+ LOG_FMT_HTTP_PATH_ONLY,
+ LOG_FMT_HTTP_QUERY,
+ LOG_FMT_HTTP_VERSION,
+ LOG_FMT_HOSTNAME,
+ LOG_FMT_UNIQUEID,
+ LOG_FMT_SSL_CIPHER,
+ LOG_FMT_SSL_VERSION,
+};
+
+/* enum for parse_logformat_string */
+enum {
+ LF_INIT = 0, // before first character
+ LF_TEXT, // normal text
+ LF_SEPARATOR, // a single separator
+ LF_VAR, // variable name, after '%' or '%{..}'
+ LF_STARTVAR, // % in text
+ LF_STARG, // after '%{' and berore '}'
+ LF_EDARG, // '}' after '%{'
+ LF_STEXPR, // after '%[' or '%{..}[' and berore ']'
+ LF_EDEXPR, // ']' after '%['
+ LF_END, // \0 found
+};
+
+
+struct logformat_node {
+ struct list list;
+ int type; // LOG_FMT_*
+ int options; // LOG_OPT_*
+ char *arg; // text for LOG_FMT_TEXT, arg for others
+ void *expr; // for use with LOG_FMT_EXPR
+};
+
+/* Range of indexes for log sampling. */
+struct smp_log_range {
+ unsigned int low; /* Low limit of the indexes of this range. */
+ unsigned int high; /* High limit of the indexes of this range. */
+ size_t sz; /* The size of this range, or number of indexes in
+ * this range.
+ */
+};
+
+/* Log sampling information. */
+struct smp_info {
+ struct smp_log_range *smp_rgs; /* Array of ranges for log sampling. */
+ size_t smp_rgs_sz; /* The size of <smp_rgs> array. */
+ size_t smp_sz; /* The total number of logs to be sampled. */
+ ullong curr_rg_idx; /* 63:32 = current range; 31:0 = current index */
+};
+
+enum log_target_flags {
+ LOG_TARGET_FL_NONE = 0x00,
+ LOG_TARGET_FL_RESOLVED = 0x01
+};
+
+struct log_target {
+ struct sockaddr_storage *addr;
+ union {
+ char *ring_name; /* type = BUFFER - preparsing */
+ struct sink *sink; /* type = BUFFER - postparsing */
+ char *be_name; /* type = BACKEND - preparsing */
+ struct proxy *be; /* type = BACKEND - postparsing */
+ char *resolv_name; /* generic - preparsing */
+ };
+ enum log_tgt type;
+ uint16_t flags;
+};
+
+struct logger {
+ struct list list;
+ struct log_target target;
+ struct smp_info lb;
+ enum log_fmt format;
+ int facility;
+ int level;
+ int minlvl;
+ int maxlen;
+ struct logger *ref;
+ struct {
+ char *file; /* file where the logger appears */
+ int line; /* line where the logger appears */
+ } conf;
+};
+
+#endif /* _HAPROXY_LOG_T_H */
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/include/haproxy/log.h b/include/haproxy/log.h
new file mode 100644
index 0000000..68b8207
--- /dev/null
+++ b/include/haproxy/log.h
@@ -0,0 +1,195 @@
+/*
+ * include/haproxy/log.h
+ * This file contains definitions of log-related functions.
+ *
+ * Copyright (C) 2000-2020 Willy Tarreau - w@1wt.eu
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_LOG_H
+#define _HAPROXY_LOG_H
+
+#include <syslog.h>
+
+#include <haproxy/api.h>
+#include <haproxy/log-t.h>
+#include <haproxy/pool-t.h>
+#include <haproxy/proxy-t.h>
+#include <haproxy/stream.h>
+
+extern struct pool_head *pool_head_requri;
+extern struct pool_head *pool_head_uniqueid;
+
+extern const char *log_levels[];
+extern char *log_format;
+extern char httpclient_log_format[];
+extern char default_tcp_log_format[];
+extern char default_http_log_format[];
+extern char clf_http_log_format[];
+extern char default_https_log_format[];
+
+extern char default_rfc5424_sd_log_format[];
+
+extern const char sess_term_cond[];
+extern const char sess_fin_state[];
+
+extern unsigned int dropped_logs;
+
+/* lof forward proxy list */
+extern struct proxy *cfg_log_forward;
+
+extern THREAD_LOCAL char *logline;
+extern THREAD_LOCAL char *logline_rfc5424;
+
+/* global syslog message counter */
+extern int cum_log_messages;
+
+/* syslog UDP message handler */
+void syslog_fd_handler(int fd);
+
+/* Initialize/Deinitialize log buffers used for syslog messages */
+int init_log_buffers(void);
+void deinit_log_buffers(void);
+
+/* build a log line for the session and an optional stream */
+int sess_build_logline(struct session *sess, struct stream *s, char *dst, size_t maxsize, struct list *list_format);
+
+/*
+ * send a log for the stream when we have enough info about it.
+ * Will not log if the frontend has no log defined.
+ */
+void strm_log(struct stream *s);
+void sess_log(struct session *sess);
+
+/* send a applicative log with custom list of loggers */
+void app_log(struct list *loggers, struct buffer *tag, int level, const char *format, ...)
+ __attribute__ ((format(printf, 4, 5)));
+
+/*
+ * add to the logformat linked list
+ */
+int add_to_logformat_list(char *start, char *end, int type, struct list *list_format, char **err);
+
+/*
+ * Parse the log_format string and fill a linked list.
+ * Variable name are preceded by % and composed by characters [a-zA-Z0-9]* : %varname
+ * You can set arguments using { } : %{many arguments}varname
+ */
+int parse_logformat_string(const char *str, struct proxy *curproxy, struct list *list_format, int options, int cap, char **err);
+
+int postresolve_logger_list(struct list *loggers, const char *section, const char *section_name);
+
+struct logger *dup_logger(struct logger *def);
+void free_logger(struct logger *logger);
+void deinit_log_target(struct log_target *target);
+
+/* Parse "log" keyword and update the linked list. */
+int parse_logger(char **args, struct list *loggers, int do_del, const char *file, int linenum, char **err);
+
+/*
+ * This function adds a header to the message and sends the syslog message
+ * using a printf format string
+ */
+void send_log(struct proxy *p, int level, const char *format, ...)
+ __attribute__ ((format(printf, 3, 4)));
+
+/*
+ * This function sends a syslog message to all loggers of a proxy,
+ * or to global loggers if the proxy is NULL.
+ * It also tries not to waste too much time computing the message header.
+ * It doesn't care about errors nor does it report them.
+ */
+
+void __send_log(struct list *loggers, struct buffer *tag, int level, char *message, size_t size, char *sd, size_t sd_size);
+
+/*
+ * returns log format for <fmt> or LOG_FORMAT_UNSPEC if not found.
+ */
+enum log_fmt get_log_format(const char *fmt);
+
+/*
+ * returns log level for <lev> or -1 if not found.
+ */
+int get_log_level(const char *lev);
+
+/*
+ * returns log facility for <fac> or -1 if not found.
+ */
+int get_log_facility(const char *fac);
+
+/*
+ * Write a string in the log string
+ * Take cares of quote options
+ *
+ * Return the address of the \0 character, or NULL on error
+ */
+char *lf_text_len(char *dst, const char *src, size_t len, size_t size, const struct logformat_node *node);
+
+/*
+ * Write a IP address to the log string
+ * +X option write in hexadecimal notation, most significant byte on the left
+ */
+char *lf_ip(char *dst, const struct sockaddr *sockaddr, size_t size, const struct logformat_node *node);
+
+/*
+ * Write a port to the log
+ * +X option write in hexadecimal notation, most significant byte on the left
+ */
+char *lf_port(char *dst, const struct sockaddr *sockaddr, size_t size, const struct logformat_node *node);
+
+
+/*
+ * Function to handle log header building (exported for sinks)
+ */
+char *update_log_hdr_rfc5424(const time_t time, suseconds_t frac);
+char *update_log_hdr(const time_t time);
+char * get_format_pid_sep1(int format, size_t *len);
+char * get_format_pid_sep2(int format, size_t *len);
+
+/*
+ * Builds a log line for the stream (must be valid).
+ */
+static inline int build_logline(struct stream *s, char *dst, size_t maxsize, struct list *list_format)
+{
+ return sess_build_logline(strm_sess(s), s, dst, maxsize, list_format);
+}
+
+struct ist *build_log_header(struct log_header hdr, size_t *nbelem);
+
+/*
+ * lookup log forward proxy by name
+ * Returns NULL if no proxy found.
+ */
+static inline struct proxy *log_forward_by_name(const char *name)
+{
+ struct proxy *px = cfg_log_forward;
+
+ while (px) {
+ if (strcmp(px->id, name) == 0)
+ return px;
+ px = px->next;
+ }
+ return NULL;
+}
+
+#endif /* _HAPROXY_LOG_H */
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/include/haproxy/mailers-t.h b/include/haproxy/mailers-t.h
new file mode 100644
index 0000000..0fa3197
--- /dev/null
+++ b/include/haproxy/mailers-t.h
@@ -0,0 +1,83 @@
+/*
+ * include/haproxy/mailer-t.h
+ * This file defines everything related to mailer.
+ *
+ * Copyright 2015 Horms Solutions Ltd., Simon Horman <horms@verge.net.au>
+ *
+ * Based on include/haproxy/peers-t.h
+ *
+ * Copyright 2010 EXCELIANCE, Emeric Brun <ebrun@exceliance.fr>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_MAILERS_T_H
+#define _HAPROXY_MAILERS_T_H
+
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <netinet/in.h>
+#include <arpa/inet.h>
+
+#include <haproxy/check-t.h>
+#include <haproxy/tcpcheck-t.h>
+#include <haproxy/thread-t.h>
+
+struct mailer {
+ char *id;
+ struct mailers *mailers;
+ struct {
+ const char *file; /* file where the section appears */
+ int line; /* line where the section appears */
+ } conf; /* config information */
+ struct sockaddr_storage addr; /* SMTP server address */
+ struct protocol *proto; /* SMTP server address's protocol */
+ struct xprt_ops *xprt; /* SMTP server socket operations at transport layer */
+ void *sock_init_arg; /* socket operations's opaque init argument if needed */
+ struct mailer *next; /* next mailer in the list */
+};
+
+struct mailers {
+ char *id; /* mailers section name */
+ struct mailer *mailer_list; /* mailers in this mailers section */
+ struct {
+ const char *file; /* file where the section appears */
+ int line; /* line where the section appears */
+ } conf; /* config information */
+ struct mailers *next; /* next mailers section */
+ int count; /* total number of mailers in this mailers section */
+ int users; /* number of users of this mailers section */
+ struct { /* time to: */
+ int mail; /* try connecting to mailserver and sending a email */
+ } timeout;
+};
+
+struct email_alert {
+ struct list list;
+ struct tcpcheck_rules rules;
+ struct server *srv;
+};
+
+struct email_alertq {
+ struct list email_alerts;
+ struct check check; /* Email alerts are implemented using existing check
+ * code even though they are not checks. This structure
+ * is as a parameter to the check code.
+ * Each check corresponds to a mailer */
+ __decl_thread(HA_SPINLOCK_T lock);
+};
+
+#endif /* _HAPROXY_MAILERS_T_H */
+
diff --git a/include/haproxy/mailers.h b/include/haproxy/mailers.h
new file mode 100644
index 0000000..89aa1b0
--- /dev/null
+++ b/include/haproxy/mailers.h
@@ -0,0 +1,42 @@
+/*
+ * include/haproxy/mailer.h
+ * This file lists exported variables and functions for mailers.
+ *
+ * Copyright 2015 Horms Solutions Ltd., Simon Horman <horms@verge.net.au>
+ * Copyright 2020 Willy Tarreau <w@1wt.eu>
+ *
+ * Based on include/haproxy/peers-t.h
+ *
+ * Copyright 2010 EXCELIANCE, Emeric Brun <ebrun@exceliance.fr>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_MAILERS_H
+#define _HAPROXY_MAILERS_H
+
+#include <haproxy/mailers-t.h>
+#include <haproxy/proxy-t.h>
+#include <haproxy/server-t.h>
+
+extern struct mailers *mailers;
+extern int send_email_disabled;
+
+int init_email_alert(struct mailers *mailers, struct proxy *p, char **err);
+void send_email_alert(struct server *s, int priority, const char *format, ...)
+ __attribute__ ((format(printf, 3, 4)));
+
+
+#endif /* _HAPROXY_MAILERS_H */
diff --git a/include/haproxy/map-t.h b/include/haproxy/map-t.h
new file mode 100644
index 0000000..d6085ee
--- /dev/null
+++ b/include/haproxy/map-t.h
@@ -0,0 +1,34 @@
+/*
+ * include/haproxy/map-t.h
+ * This file provides structures and types for MAPs.
+ *
+ * Copyright (C) 2000-2012 Willy Tarreau - w@1wt.eu
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_MAP_T_H
+#define _HAPROXY_MAP_T_H
+
+#include <haproxy/pattern-t.h>
+#include <haproxy/sample-t.h>
+
+struct map_descriptor {
+ struct sample_conv *conv; /* original converter descriptor */
+ struct pattern_head pat; /* the pattern matching associated to the map */
+ int do_free; /* set if <pat> is the original pat and must be freed */
+};
+
+#endif /* _HAPROXY_MAP_T_H */
diff --git a/include/haproxy/map.h b/include/haproxy/map.h
new file mode 100644
index 0000000..3ec3418
--- /dev/null
+++ b/include/haproxy/map.h
@@ -0,0 +1,39 @@
+/*
+ * include/haproxy/map.h
+ * This file provides structures and types for pattern matching.
+ *
+ * Copyright (C) 2000-2013 Willy Tarreau - w@1wt.eu
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_MAP_H
+#define _HAPROXY_MAP_H
+
+#include <haproxy/map-t.h>
+#include <haproxy/sample-t.h>
+
+/* maps output sample parser */
+int map_parse_ip(const char *text, struct sample_data *data);
+int map_parse_ip6(const char *text, struct sample_data *data);
+int map_parse_str(const char *text, struct sample_data *data);
+int map_parse_int(const char *text, struct sample_data *data);
+
+struct map_reference *map_get_reference(const char *reference);
+
+int sample_load_map(struct arg *arg, struct sample_conv *conv,
+ const char *file, int line, char **err);
+
+#endif /* _HAPROXY_MAP_H */
diff --git a/include/haproxy/mqtt-t.h b/include/haproxy/mqtt-t.h
new file mode 100644
index 0000000..51f55ea
--- /dev/null
+++ b/include/haproxy/mqtt-t.h
@@ -0,0 +1,310 @@
+/*
+ * include/haproxy/mqtt.h
+ * This file contains structure declarations for MQTT protocol.
+ *
+ * Copyright 2020 Baptiste Assmann <bedis9@gmail.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_MQTT_T_H
+#define _HAPROXY_MQTT_T_H
+
+#include <import/ist.h>
+
+/* MQTT protocol version
+ * In MQTT 3.1.1, version is called "level"
+ */
+#define MQTT_VERSION_3_1 3
+#define MQTT_VERSION_3_1_1 4
+#define MQTT_VERSION_5_0 5
+
+/*
+ * return code when parsing / validating MQTT messages
+ */
+#define MQTT_INVALID_MESSAGE -1
+#define MQTT_NEED_MORE_DATA 0
+#define MQTT_VALID_MESSAGE 1
+
+
+/*
+ * MQTT Control Packet Type: MQTT_CPT_*
+ *
+ * Part of the fixed headers, encoded on the first packet byte :
+ *
+ * +-------+-----------+-----------+-----------+---------+----------+----------+---------+------------+
+ * | bit | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0 |
+ * +-------+-----------+-----------+-----------+---------+----------+----------+---------+------------+
+ * | field | MQTT Control Packet Type | Flags specific to each Control Packet type |
+ * +-------+---------------------------------------------+--------------------------------------------+
+ *
+ * Don't forget to "left offset by 4 bits (<< 4)" the values below when matching against the fixed
+ * header collected in a MQTT packet.
+ *
+ * value 0x0 is reserved and forbidden
+ */
+enum {
+ MQTT_CPT_INVALID = 0,
+
+ MQTT_CPT_CONNECT,
+ MQTT_CPT_CONNACK,
+ MQTT_CPT_PUBLISH,
+ MQTT_CPT_PUBACK,
+ MQTT_CPT_PUBREC,
+ MQTT_CPT_PUBREL,
+ MQTT_CPT_PUBCOMP,
+ MQTT_CPT_SUBSCRIBE,
+ MQTT_CPT_SUBACK,
+ MQTT_CPT_UNSUBSCRIBE,
+ MQTT_CPT_UNSUBACK,
+ MQTT_CPT_PINGREQ,
+ MQTT_CPT_PINGRESP,
+ MQTT_CPT_DISCONNECT,
+ MQTT_CPT_AUTH,
+ MQTT_CPT_ENTRIES /* used to mark the end/size of our MQTT_CPT_* list */
+};
+
+/* MQTT CONNECT packet flags */
+#define MQTT_CONNECT_FL_RESERVED 0x01
+#define MQTT_CONNECT_FL_CLEAN_SESSION 0x02
+#define MQTT_CONNECT_FL_WILL 0x04
+#define MQTT_CONNECT_FL_WILL_QOS 0x18 /* covers 2 bits 00011000 */
+#define MQTT_CONNECT_FL_WILL_RETAIN 0x20
+#define MQTT_CONNECT_FL_PASSWORD 0x40
+#define MQTT_CONNECT_FL_USERNAME 0x80
+
+/* MQTT packet properties identifiers
+ * https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901029
+ */
+#define MQTT_PROP_PAYLOAD_FORMAT_INDICATOR 0x01
+#define MQTT_PROP_MESSAGE_EXPIRY_INTERVAL 0x02
+#define MQTT_PROP_CONTENT_TYPE 0x03
+#define MQTT_PROP_RESPONSE_TOPIC 0x08
+#define MQTT_PROP_CORRELATION_DATA 0x09
+#define MQTT_PROP_SESSION_EXPIRY_INTERVAL 0x11
+#define MQTT_PROP_ASSIGNED_CLIENT_IDENTIFIER 0x12
+#define MQTT_PROP_SERVER_KEEPALIVE 0x13
+#define MQTT_PROP_AUTHENTICATION_METHOD 0x15
+#define MQTT_PROP_AUTHENTICATION_DATA 0x16
+#define MQTT_PROP_REQUEST_PROBLEM_INFORMATION 0x17
+#define MQTT_PROP_WILL_DELAY_INTERVAL 0x18
+#define MQTT_PROP_REQUEST_RESPONSE_INFORMATION 0x19
+#define MQTT_PROP_RESPONSE_INFORMATION 0x1A
+#define MQTT_PROP_SERVER_REFERENCE 0x1C
+#define MQTT_PROP_RECEIVE_MAXIMUM 0x21
+#define MQTT_PROP_TOPIC_ALIAS_MAXIMUM 0x22
+#define MQTT_PROP_MAXIMUM_QOS 0x24
+#define MQTT_PROP_RETAIN_AVAILABLE 0x25
+#define MQTT_PROP_USER_PROPERTIES 0x26
+#define MQTT_PROP_MAXIMUM_PACKET_SIZE 0x27
+#define MQTT_PROP_WILDCARD_SUBSCRIPTION_AVAILABLE 0x28
+#define MQTT_PROP_SUBSCRIPTION_IDENTIFIERS_AVAILABLE 0x29
+#define MQTT_PROP_SHARED_SUBSRIPTION_AVAILABLE 0x2A
+#define MQTT_PROP_REASON_STRING 0x1F
+#define MQTT_PROP_LAST 0xFF
+
+/* MQTT minimal packet size */
+#define MQTT_MIN_PKT_SIZE 2
+#define MQTT_REMAINING_LENGHT_MAX_SIZE 4
+
+/* list of supported capturable Field Names and configuration file string */
+enum {
+ MQTT_FN_INVALID = 0,
+
+ MQTT_FN_FLAGS,
+ MQTT_FN_REASON_CODE,
+ MQTT_FN_PROTOCOL_NAME,
+ MQTT_FN_PROTOCOL_VERSION,
+ MQTT_FN_CLIENT_IDENTIFIER,
+ MQTT_FN_WILL_TOPIC,
+ MQTT_FN_WILL_PAYLOAD,
+ MQTT_FN_USERNAME,
+ MQTT_FN_PASSWORD,
+ MQTT_FN_KEEPALIVE,
+
+ /* MQTT 5.0 properties
+ * https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901029
+ */
+ MQTT_FN_PAYLOAD_FORMAT_INDICATOR,
+ MQTT_FN_MESSAGE_EXPIRY_INTERVAL,
+ MQTT_FN_CONTENT_TYPE,
+ MQTT_FN_RESPONSE_TOPIC,
+ MQTT_FN_CORRELATION_DATA,
+ MQTT_FN_SUBSCRIPTION_IDENTIFIER,
+ MQTT_FN_SESSION_EXPIRY_INTERVAL,
+ MQTT_FN_ASSIGNED_CLIENT_IDENTIFIER,
+ MQTT_FN_SERVER_KEEPALIVE,
+ MQTT_FN_AUTHENTICATION_METHOD,
+ MQTT_FN_AUTHENTICATION_DATA,
+ MQTT_FN_REQUEST_PROBLEM_INFORMATION,
+ MQTT_FN_DELAY_INTERVAL,
+ MQTT_FN_REQUEST_RESPONSE_INFORMATION,
+ MQTT_FN_RESPONSE_INFORMATION,
+ MQTT_FN_SERVER_REFERENCE,
+ MQTT_FN_REASON_STRING,
+ MQTT_FN_RECEIVE_MAXIMUM,
+ MQTT_FN_TOPIC_ALIAS_MAXIMUM,
+ MQTT_FN_TOPIC_ALIAS,
+ MQTT_FN_MAXIMUM_QOS,
+ MQTT_FN_RETAIN_AVAILABLE,
+ MQTT_FN_USER_PROPERTY,
+ MQTT_FN_MAXIMUM_PACKET_SIZE,
+ MQTT_FN_WILDCARD_SUBSCRIPTION_AVAILABLE,
+ MQTT_FN_SUBSCRIPTION_IDENTIFIERS_AVAILABLE,
+ MQTT_FN_SHARED_SUBSCRIPTION_AVAILABLE,
+
+ MQTT_FN_ENTRIES /* this one must always be the latest one */
+};
+
+/* MQTT field string bit, for easy match using bitmasks
+ * ATTENTION: "user-properties" are not supported for now
+ */
+enum {
+ MQTT_FN_BIT_FLAGS = (1ULL << MQTT_FN_FLAGS),
+ MQTT_FN_BIT_REASON_CODE = (1ULL << MQTT_FN_REASON_CODE),
+ MQTT_FN_BIT_PROTOCOL_NAME = (1ULL << MQTT_FN_PROTOCOL_NAME),
+ MQTT_FN_BIT_PROTOCOL_VERSION = (1ULL << MQTT_FN_PROTOCOL_VERSION),
+ MQTT_FN_BIT_CLIENT_IDENTIFIER = (1ULL << MQTT_FN_CLIENT_IDENTIFIER),
+ MQTT_FN_BIT_WILL_TOPIC = (1ULL << MQTT_FN_WILL_TOPIC),
+ MQTT_FN_BIT_WILL_PAYLOAD = (1ULL << MQTT_FN_WILL_PAYLOAD),
+ MQTT_FN_BIT_USERNAME = (1ULL << MQTT_FN_USERNAME),
+ MQTT_FN_BIT_PASSWORD = (1ULL << MQTT_FN_PASSWORD),
+ MQTT_FN_BIT_KEEPALIVE = (1ULL << MQTT_FN_KEEPALIVE),
+ MQTT_FN_BIT_PAYLOAD_FORMAT_INDICATOR = (1ULL << MQTT_FN_PAYLOAD_FORMAT_INDICATOR),
+ MQTT_FN_BIT_MESSAGE_EXPIRY_INTERVAL = (1ULL << MQTT_FN_MESSAGE_EXPIRY_INTERVAL),
+ MQTT_FN_BIT_CONTENT_TYPE = (1ULL << MQTT_FN_CONTENT_TYPE),
+ MQTT_FN_BIT_RESPONSE_TOPIC = (1ULL << MQTT_FN_RESPONSE_TOPIC),
+ MQTT_FN_BIT_CORRELATION_DATA = (1ULL << MQTT_FN_CORRELATION_DATA),
+ MQTT_FN_BIT_SUBSCRIPTION_IDENTIFIER = (1ULL << MQTT_FN_SUBSCRIPTION_IDENTIFIER),
+ MQTT_FN_BIT_SESSION_EXPIRY_INTERVAL = (1ULL << MQTT_FN_SESSION_EXPIRY_INTERVAL),
+ MQTT_FN_BIT_ASSIGNED_CLIENT_IDENTIFIER = (1ULL << MQTT_FN_ASSIGNED_CLIENT_IDENTIFIER),
+ MQTT_FN_BIT_SERVER_KEEPALIVE = (1ULL << MQTT_FN_SERVER_KEEPALIVE),
+ MQTT_FN_BIT_AUTHENTICATION_METHOD = (1ULL << MQTT_FN_AUTHENTICATION_METHOD),
+ MQTT_FN_BIT_AUTHENTICATION_DATA = (1ULL << MQTT_FN_AUTHENTICATION_DATA),
+ MQTT_FN_BIT_REQUEST_PROBLEM_INFORMATION = (1ULL << MQTT_FN_REQUEST_PROBLEM_INFORMATION),
+ MQTT_FN_BIT_DELAY_INTERVAL = (1ULL << MQTT_FN_DELAY_INTERVAL),
+ MQTT_FN_BIT_REQUEST_RESPONSE_INFORMATION = (1ULL << MQTT_FN_REQUEST_RESPONSE_INFORMATION),
+ MQTT_FN_BIT_RESPONSE_INFORMATION = (1ULL << MQTT_FN_RESPONSE_INFORMATION),
+ MQTT_FN_BIT_SERVER_REFERENCE = (1ULL << MQTT_FN_SERVER_REFERENCE),
+ MQTT_FN_BIT_REASON_STRING = (1ULL << MQTT_FN_REASON_STRING),
+ MQTT_FN_BIT_RECEIVE_MAXIMUM = (1ULL << MQTT_FN_RECEIVE_MAXIMUM),
+ MQTT_FN_BIT_TOPIC_ALIAS_MAXIMUM = (1ULL << MQTT_FN_TOPIC_ALIAS_MAXIMUM),
+ MQTT_FN_BIT_TOPIC_ALIAS = (1ULL << MQTT_FN_TOPIC_ALIAS),
+ MQTT_FN_BIT_MAXIMUM_QOS = (1ULL << MQTT_FN_MAXIMUM_QOS),
+ MQTT_FN_BIT_RETAIN_AVAILABLE = (1ULL << MQTT_FN_RETAIN_AVAILABLE),
+ MQTT_FN_BIT_USER_PROPERTY = (1ULL << MQTT_FN_USER_PROPERTY),
+ MQTT_FN_BIT_MAXIMUM_PACKET_SIZE = (1ULL << MQTT_FN_MAXIMUM_PACKET_SIZE),
+ MQTT_FN_BIT_WILDCARD_SUBSCRIPTION_AVAILABLE = (1ULL << MQTT_FN_WILDCARD_SUBSCRIPTION_AVAILABLE),
+ MQTT_FN_BIT_SUBSCRIPTION_IDENTIFIERS_AVAILABLE= (1ULL << MQTT_FN_SUBSCRIPTION_IDENTIFIERS_AVAILABLE),
+ MQTT_FN_BIT_SHARED_SUBSCRIPTION_AVAILABLE = (1ULL << MQTT_FN_SHARED_SUBSCRIPTION_AVAILABLE),
+};
+
+/* structure to host fields for a MQTT CONNECT packet */
+#define MQTT_PROP_USER_PROPERTY_ENTRIES 5
+struct connect {
+ struct {
+ struct ist protocol_name;
+ uint8_t protocol_version;
+ uint8_t flags;
+ uint16_t keepalive;
+
+ struct {
+ uint32_t session_expiry_interval;
+ uint16_t receive_maximum;
+ uint32_t maximum_packet_size;
+ uint16_t topic_alias_maximum;
+ uint8_t request_response_information;
+ uint8_t request_problem_information;
+ struct {
+ struct ist name;
+ struct ist value;
+ } user_props[MQTT_PROP_USER_PROPERTY_ENTRIES];
+ struct ist authentication_method;
+ struct ist authentication_data;
+ } props;
+ } var_hdr;
+ struct {
+ struct ist client_identifier;
+ struct {
+ uint32_t delay_interval;
+ uint8_t payload_format_indicator;
+ uint32_t message_expiry_interval;
+ struct ist content_type;
+ struct ist response_topic;
+ struct ist correlation_data;
+ struct {
+ struct ist name;
+ struct ist value;
+ } user_props[MQTT_PROP_USER_PROPERTY_ENTRIES];
+ } will_props;
+ struct ist will_topic;
+ struct ist will_payload;
+ struct ist username;
+ struct ist password;
+ } payload;
+};
+
+/* structure to host fields for a MQTT CONNACK packet */
+struct connack {
+ struct {
+ uint8_t protocol_version;
+ uint8_t flags;
+ uint8_t reason_code;
+ struct {
+ uint32_t session_expiry_interval;
+ uint16_t receive_maximum;
+ uint8_t maximum_qos;
+ uint8_t retain_available;
+ uint32_t maximum_packet_size;
+ struct ist assigned_client_identifier;
+ uint16_t topic_alias_maximum;
+ struct ist reason_string;
+ struct {
+ struct ist name;
+ struct ist value;
+ } user_props[MQTT_PROP_USER_PROPERTY_ENTRIES];
+ uint8_t wildcard_subscription_available;
+ uint8_t subscription_identifiers_available;
+ uint8_t shared_subsription_available;
+ uint16_t server_keepalive;
+ struct ist response_information;
+ struct ist server_reference;
+ struct ist authentication_method;
+ struct ist authentication_data;
+ } props;
+ } var_hdr;
+};
+
+/* structure to host a MQTT packet */
+struct mqtt_pkt {
+ struct {
+ uint8_t type; /* MQTT_CPT_* */
+ uint8_t flags; /* MQTT_CPT_FL* */
+ uint32_t remaining_length;
+ } fixed_hdr;
+ union {
+ struct connect connect;
+ struct connack connack;
+ } data;
+};
+
+#endif /* _HAPROXY_MQTT_T_H */
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/include/haproxy/mqtt.h b/include/haproxy/mqtt.h
new file mode 100644
index 0000000..6720bb7
--- /dev/null
+++ b/include/haproxy/mqtt.h
@@ -0,0 +1,118 @@
+/*
+ * include/haproxt/mqtt.h
+ * This file contains structure declarations for MQTT protocol.
+ *
+ * Copyright 2020 Baptiste Assmann <bedis9@gmail.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_MQTT_H
+#define _HAPROXY_MQTT_H
+
+#include <import/ist.h>
+
+#include <haproxy/mqtt-t.h>
+#include <haproxy/tools.h>
+
+/* expected flags for control packets */
+extern uint8_t mqtt_cpt_flags[MQTT_CPT_ENTRIES];
+
+/* MQTT field string names */
+extern const struct ist mqtt_fields_string[MQTT_FN_ENTRIES];
+
+/* list of supported capturable field names for each MQTT control packet type */
+extern const uint64_t mqtt_fields_per_packet[MQTT_CPT_ENTRIES];
+
+int mqtt_validate_message(const struct ist msg, struct mqtt_pkt *mpkt);
+struct ist mqtt_field_value(const struct ist msg, int type, int fieldname_id);
+
+/*
+ * Return a MQTT packet type ID based found in <str>.
+ * <str> can be a number or a string and returned value will always be the numeric value.
+ *
+ * If <str> can't be translated into an ID, then MQTT_CPT_INVALID (0) is returned.
+ */
+static inline int mqtt_typeid(struct ist str)
+{
+ int id;
+
+ id = strl2ui(str.ptr, istlen(str));
+ if ((id >= MQTT_CPT_CONNECT) && (id < MQTT_CPT_ENTRIES))
+ return id;
+
+ else if (isteqi(str, ist("CONNECT")) != 0)
+ return MQTT_CPT_CONNECT;
+ else if (isteqi(str, ist("CONNACK")) != 0)
+ return MQTT_CPT_CONNACK;
+ else if (isteqi(str, ist("PUBLISH")) != 0)
+ return MQTT_CPT_PUBLISH;
+ else if (isteqi(str, ist("PUBACK")) != 0)
+ return MQTT_CPT_PUBACK;
+ else if (isteqi(str, ist("PUBREC")) != 0)
+ return MQTT_CPT_PUBREC;
+ else if (isteqi(str, ist("PUBREL")) != 0)
+ return MQTT_CPT_PUBREL;
+ else if (isteqi(str, ist("PUBCOMP")) != 0)
+ return MQTT_CPT_PUBCOMP;
+ else if (isteqi(str, ist("SUBSCRIBE")) != 0)
+ return MQTT_CPT_SUBSCRIBE;
+ else if (isteqi(str, ist("SUBACK")) != 0)
+ return MQTT_CPT_SUBACK;
+ else if (isteqi(str, ist("UNSUBSCRIBE")) != 0)
+ return MQTT_CPT_UNSUBSCRIBE;
+ else if (isteqi(str, ist("UNSUBACK")) != 0)
+ return MQTT_CPT_UNSUBACK;
+ else if (isteqi(str, ist("PINGREQ")) != 0)
+ return MQTT_CPT_PINGREQ;
+ else if (isteqi(str, ist("PINGRESP")) != 0)
+ return MQTT_CPT_PINGRESP;
+ else if (isteqi(str, ist("DISCONNECT")) != 0)
+ return MQTT_CPT_DISCONNECT;
+ else if (isteqi(str, ist("AUTH")) != 0)
+ return MQTT_CPT_AUTH;
+
+ return MQTT_CPT_INVALID;
+}
+
+/*
+ * validate that <str> is a field that can be extracted from a <type> MQTT packet
+ *
+ * return the field name ID (MQTT_FN_*) if a match is found, MQTT_FN_INVALID (0) otherwise.
+ */
+static inline int mqtt_check_type_fieldname(int type, struct ist str)
+{
+ int i, id = MQTT_FN_INVALID;
+
+ for (i = 0; i < MQTT_FN_ENTRIES; i++) {
+ if (isteqi(str, mqtt_fields_string[i])) {
+ if (mqtt_fields_per_packet[type] & (1ULL << i))
+ id = i;
+ break;
+ }
+ }
+
+ return id;
+
+}
+
+#endif /* _HAPROXY_MQTT_H */
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/include/haproxy/mux_fcgi-t.h b/include/haproxy/mux_fcgi-t.h
new file mode 100644
index 0000000..27973db
--- /dev/null
+++ b/include/haproxy/mux_fcgi-t.h
@@ -0,0 +1,175 @@
+/*
+ * include/haproxy/mux_fcgi-t.h
+ * Definitions for basic FCGI mux internal types, constants and flags.
+ *
+ * Copyright 2022 Christopher Faulet <cfaulet@haproxy.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_MUX_FCGI_T_H
+#define _HAPROXY_MUX_FCGI_T_H
+
+#include <haproxy/api-t.h>
+#include <haproxy/show_flags-t.h>
+
+/**** FCGI connection flags (32 bit), in fcgi_conn->flags ****/
+#define FCGI_CF_NONE 0x00000000
+
+/* Flags indicating why writing to the mux is blocked */
+#define FCGI_CF_MUX_MALLOC 0x00000001 /* mux is blocked on lack connection's mux buffer */
+#define FCGI_CF_MUX_MFULL 0x00000002 /* mux is blocked on connection's mux buffer full */
+#define FCGI_CF_MUX_BLOCK_ANY 0x00000003 /* mux is blocked on connection's mux buffer full */
+
+/* Flags indicating why writing to the demux is blocked.
+ * The first two ones directly affect the ability for the mux to receive data
+ * from the connection. The other ones affect the mux's ability to demux
+ * received data.
+ */
+#define FCGI_CF_DEM_DALLOC 0x00000004 /* demux blocked on lack of connection's demux buffer */
+#define FCGI_CF_DEM_DFULL 0x00000008 /* demux blocked on connection's demux buffer full */
+#define FCGI_CF_DEM_MROOM 0x00000010 /* demux blocked on lack of room in mux buffer */
+#define FCGI_CF_DEM_SALLOC 0x00000020 /* demux blocked on lack of stream's rx buffer */
+#define FCGI_CF_DEM_SFULL 0x00000040 /* demux blocked on stream request buffer full */
+#define FCGI_CF_DEM_TOOMANY 0x00000080 /* demux blocked waiting for some stream connectors to leave */
+#define FCGI_CF_DEM_BLOCK_ANY 0x000000F0 /* aggregate of the demux flags above except DALLOC/DFULL */
+
+/* Other flags */
+#define FCGI_CF_MPXS_CONNS 0x00000100 /* connection multiplexing is supported */
+#define FCGI_CF_ABRTS_SENT 0x00000200 /* a record ABORT was successfully sent to all active streams */
+#define FCGI_CF_ABRTS_FAILED 0x00000400 /* failed to abort processing of all streams */
+#define FCGI_CF_WAIT_FOR_HS 0x00000800 /* We did check that at least a stream was waiting for handshake */
+#define FCGI_CF_KEEP_CONN 0x00001000 /* HAProxy is responsible to close the connection */
+#define FCGI_CF_GET_VALUES 0x00002000 /* retrieve settings */
+
+#define FCGI_CF_EOS 0x00004000 /* End-of-stream seen on the H1 connection (read0 detected) */
+#define FCGI_CF_ERR_PENDING 0x00008000 /* A write error was detected (block sends but not reads) */
+#define FCGI_CF_ERROR 0x00010000 /* A read error was detected (handled has an abort) */
+
+
+/* This function is used to report flags in debugging tools. Please reflect
+ * below any single-bit flag addition above in the same order via the
+ * __APPEND_FLAG macro. The new end of the buffer is returned.
+ */
+static forceinline char *fconn_show_flags(char *buf, size_t len, const char *delim, uint flg)
+{
+#define _(f, ...) __APPEND_FLAG(buf, len, delim, flg, f, #f, __VA_ARGS__)
+ /* prologue */
+ _(0);
+ /* flags */
+ _(FCGI_CF_MUX_MALLOC, _(FCGI_CF_MUX_MFULL,
+ _(FCGI_CF_DEM_DALLOC, _(FCGI_CF_DEM_DFULL, _(FCGI_CF_DEM_MROOM,
+ _(FCGI_CF_DEM_SALLOC, _(FCGI_CF_DEM_SFULL, _(FCGI_CF_DEM_TOOMANY,
+ _(FCGI_CF_MPXS_CONNS, _(FCGI_CF_ABRTS_SENT, _(FCGI_CF_ABRTS_FAILED,
+ _(FCGI_CF_WAIT_FOR_HS, _(FCGI_CF_KEEP_CONN, _(FCGI_CF_GET_VALUES,
+ _(FCGI_CF_EOS, _(FCGI_CF_ERR_PENDING, _(FCGI_CF_ERROR)))))))))))))))));
+ /* epilogue */
+ _(~0U);
+ return buf;
+#undef _
+}
+
+/**** FCGI stream flags (32 bit), in fcgi_strm->flags ****/
+#define FCGI_SF_NONE 0x00000000
+#define FCGI_SF_ES_RCVD 0x00000001 /* end-of-stream received (empty STDOUT or EDN_REQUEST record) */
+#define FCGI_SF_ES_SENT 0x00000002 /* end-of-stream sent (empty STDIN record) */
+#define FCGI_SF_EP_SENT 0x00000004 /* end-of-param sent (empty PARAMS record) */
+#define FCGI_SF_ABRT_SENT 0x00000008 /* abort sent (ABORT_REQUEST record) */
+
+/* Stream flags indicating the reason the stream is blocked */
+#define FCGI_SF_BLK_MBUSY 0x00000010 /* blocked waiting for mux access (transient) */
+#define FCGI_SF_BLK_MROOM 0x00000020 /* blocked waiting for room in the mux */
+#define FCGI_SF_BLK_ANY 0x00000030 /* any of the reasons above */
+
+#define FCGI_SF_BEGIN_SENT 0x00000100 /* a BEGIN_REQUEST record was sent for this stream */
+#define FCGI_SF_OUTGOING_DATA 0x00000200 /* set whenever we've seen outgoing data */
+#define FCGI_SF_NOTIFIED 0x00000400 /* a paused stream was notified to try to send again */
+
+#define FCGI_SF_WANT_SHUTR 0x00001000 /* a stream couldn't shutr() (mux full/busy) */
+#define FCGI_SF_WANT_SHUTW 0x00002000 /* a stream couldn't shutw() (mux full/busy) */
+
+/* This function is used to report flags in debugging tools. Please reflect
+ * below any single-bit flag addition above in the same order via the
+ * __APPEND_FLAG macro. The new end of the buffer is returned.
+ */
+static forceinline char *fstrm_show_flags(char *buf, size_t len, const char *delim, uint flg)
+{
+#define _(f, ...) __APPEND_FLAG(buf, len, delim, flg, f, #f, __VA_ARGS__)
+ /* prologue */
+ _(0);
+ /* flags */
+ _(FCGI_SF_ES_RCVD, _(FCGI_SF_ES_SENT, _(FCGI_SF_EP_SENT, _(FCGI_SF_ABRT_SENT,
+ _(FCGI_SF_BLK_MBUSY, _(FCGI_SF_BLK_MROOM,
+ _(FCGI_SF_BEGIN_SENT, _(FCGI_SF_OUTGOING_DATA, _(FCGI_SF_NOTIFIED,
+ _(FCGI_SF_WANT_SHUTR, _(FCGI_SF_WANT_SHUTW)))))))))));
+ /* epilogue */
+ _(~0U);
+ return buf;
+#undef _
+}
+
+/* FCGI connection state (fcgi_conn->state) */
+enum fcgi_conn_st {
+ FCGI_CS_INIT = 0, /* init done, waiting for sending GET_VALUES record */
+ FCGI_CS_SETTINGS, /* GET_VALUES sent, waiting for the GET_VALUES_RESULT record */
+ FCGI_CS_RECORD_H, /* GET_VALUES_RESULT received, waiting for a record header */
+ FCGI_CS_RECORD_D, /* Record header OK, waiting for a record data */
+ FCGI_CS_RECORD_P, /* Record processed, remains the padding */
+ FCGI_CS_CLOSED, /* abort requests if necessary and close the connection ASAP */
+ FCGI_CS_ENTRIES
+} __attribute__((packed));
+
+/* returns a fconn state as an abbreviated 3-letter string, or "???" if unknown */
+static inline const char *fconn_st_to_str(enum fcgi_conn_st st)
+{
+ switch (st) {
+ case FCGI_CS_INIT : return "INI";
+ case FCGI_CS_SETTINGS : return "STG";
+ case FCGI_CS_RECORD_H : return "RDH";
+ case FCGI_CS_RECORD_D : return "RDD";
+ case FCGI_CS_RECORD_P : return "RDP";
+ case FCGI_CS_CLOSED : return "CLO";
+ default : return "???";
+ }
+}
+
+/* FCGI stream state, in fcgi_strm->state */
+enum fcgi_strm_st {
+ FCGI_SS_IDLE = 0,
+ FCGI_SS_OPEN,
+ FCGI_SS_HREM, // half-closed(remote)
+ FCGI_SS_HLOC, // half-closed(local)
+ FCGI_SS_ERROR,
+ FCGI_SS_CLOSED,
+ FCGI_SS_ENTRIES
+} __attribute__((packed));
+
+
+/* returns a fstrm state as an abbreviated 3-letter string, or "???" if unknown */
+static inline const char *fstrm_st_to_str(enum fcgi_strm_st st)
+{
+ switch (st) {
+ case FCGI_SS_IDLE : return "IDL";
+ case FCGI_SS_OPEN : return "OPN";
+ case FCGI_SS_HREM : return "RCL";
+ case FCGI_SS_HLOC : return "HCL";
+ case FCGI_SS_ERROR : return "ERR";
+ case FCGI_SS_CLOSED : return "CLO";
+ default : return "???";
+ }
+}
+
+
+#endif /* _HAPROXY_MUX_FCGI_T_H */
diff --git a/include/haproxy/mux_h1-t.h b/include/haproxy/mux_h1-t.h
new file mode 100644
index 0000000..2f49a49
--- /dev/null
+++ b/include/haproxy/mux_h1-t.h
@@ -0,0 +1,160 @@
+/*
+ * include/haproxy/mux_h1-t.h
+ * Definitions for basic H1 mux internal types, constants and flags.
+ *
+ * Copyright 2022 Christopher Faulet <cfaulet@haproxy.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_MUX_H1_T_H
+#define _HAPROXY_MUX_H1_T_H
+
+#include <haproxy/api-t.h>
+#include <haproxy/show_flags-t.h>
+
+/**** Connection flags (32 bit), in h1c->flags ****/
+#define H1C_F_NONE 0x00000000
+
+/* Flags indicating why writing output data are blocked */
+#define H1C_F_OUT_ALLOC 0x00000001 /* mux is blocked on lack of output buffer */
+#define H1C_F_OUT_FULL 0x00000002 /* mux is blocked on output buffer full */
+/* 0x00000004 - 0x00000008 unused */
+
+/* Flags indicating why reading input data are blocked. */
+#define H1C_F_IN_ALLOC 0x00000010 /* mux is blocked on lack of input buffer */
+#define H1C_F_IN_FULL 0x00000020 /* mux is blocked on input buffer full */
+#define H1C_F_IN_SALLOC 0x00000040 /* mux is blocked on lack of stream's request buffer */
+/* 0x00000080 unused */
+
+#define H1C_F_EOS 0x00000100 /* End-of-stream seen on the H1 connection (read0 detected) */
+#define H1C_F_ERR_PENDING 0x00000200 /* A write error was detected (block sends but not reads) */
+#define H1C_F_ERROR 0x00000400 /* A read error was detected (handled has an abort) */
+#define H1C_F_SILENT_SHUT 0x00000800 /* if H1C is closed closed, silent (or dirty) shutdown must be performed */
+#define H1C_F_ABRT_PENDING 0x00001000 /* An error must be sent (previous attempt failed) and H1 connection must be closed ASAP */
+#define H1C_F_ABRTED 0x00002000 /* An error must be sent (previous attempt failed) and H1 connection must be closed ASAP */
+#define H1C_F_WANT_FASTFWD 0x00004000 /* Don't read into a buffer because we want to fast forward data */
+#define H1C_F_WAIT_NEXT_REQ 0x00008000 /* waiting for the next request to start, use keep-alive timeout */
+#define H1C_F_UPG_H2C 0x00010000 /* set if an upgrade to h2 should be done */
+#define H1C_F_CO_MSG_MORE 0x00020000 /* set if CO_SFL_MSG_MORE must be set when calling xprt->snd_buf() */
+#define H1C_F_CO_STREAMER 0x00040000 /* set if CO_SFL_STREAMER must be set when calling xprt->snd_buf() */
+#define H1C_F_CANT_FASTFWD 0x00080000 /* Fast-forwarding is not supported (exclusive with WANT_FASTFWD) */
+
+/* 0x00100000 - 0x40000000 unused */
+#define H1C_F_IS_BACK 0x80000000 /* Set on outgoing connection */
+
+
+/* This function is used to report flags in debugging tools. Please reflect
+ * below any single-bit flag addition above in the same order via the
+ * __APPEND_FLAG macro. The new end of the buffer is returned.
+ */
+static forceinline char *h1c_show_flags(char *buf, size_t len, const char *delim, uint flg)
+{
+#define _(f, ...) __APPEND_FLAG(buf, len, delim, flg, f, #f, __VA_ARGS__)
+ /* prologue */
+ _(0);
+ /* flags */
+ _(H1C_F_OUT_ALLOC, _(H1C_F_OUT_FULL,
+ _(H1C_F_IN_ALLOC, _(H1C_F_IN_FULL, _(H1C_F_IN_SALLOC,
+ _(H1C_F_EOS, _(H1C_F_ERR_PENDING, _(H1C_F_ERROR,
+ _(H1C_F_SILENT_SHUT, _(H1C_F_ABRT_PENDING, _(H1C_F_ABRTED,
+ _(H1C_F_WANT_FASTFWD, _(H1C_F_WAIT_NEXT_REQ, _(H1C_F_UPG_H2C, _(H1C_F_CO_MSG_MORE,
+ _(H1C_F_CO_STREAMER, _(H1C_F_CANT_FASTFWD, _(H1C_F_IS_BACK))))))))))))))))));
+ /* epilogue */
+ _(~0U);
+ return buf;
+#undef _
+}
+
+
+/**** H1 stream flags (32 bit), in h1s->flags ****/
+#define H1S_F_NONE 0x00000000
+
+#define H1S_F_RX_BLK 0x00100000 /* Don't process more input data, waiting sync with output side */
+#define H1S_F_TX_BLK 0x00200000 /* Don't process more output data, waiting sync with input side */
+#define H1S_F_RX_CONGESTED 0x00000004 /* Cannot process input data RX path is congested (waiting for more space in channel's buffer) */
+
+/* 0x00000008 unused */
+#define H1S_F_WANT_KAL 0x00000010
+#define H1S_F_WANT_TUN 0x00000020
+#define H1S_F_WANT_CLO 0x00000040
+#define H1S_F_WANT_MSK 0x00000070
+#define H1S_F_NOT_FIRST 0x00000080 /* The H1 stream is not the first one */
+#define H1S_F_BODYLESS_RESP 0x00000100 /* Bodyless response message */
+
+#define H1S_F_INTERNAL_ERROR 0x00000200 /* Set when an internal error occurred during the message parsing */
+#define H1S_F_NOT_IMPL_ERROR 0x00000400 /* Set when a feature is not implemented during the message parsing */
+#define H1S_F_PARSING_ERROR 0x00000800 /* Set when an error occurred during the message parsing */
+#define H1S_F_PROCESSING_ERROR 0x00001000 /* Set when an error occurred during the message xfer */
+#define H1S_F_ERROR_MASK 0x00003800 /* stream error mask */
+
+#define H1S_F_HAVE_SRV_NAME 0x00002000 /* Set during output process if the server name header was added to the request */
+#define H1S_F_HAVE_O_CONN 0x00004000 /* Set during output process to know connection mode was processed */
+#define H1S_F_HAVE_WS_KEY 0x00008000 /* Set during output process to know WS key was found or generated */
+#define H1S_F_HAVE_CLEN 0x00010000 /* Set during output process to know C*L header was found or generated */
+#define H1S_F_HAVE_CHNK 0x00020000 /* Set during output process to know "T-E; chunk" header was found or generated */
+
+/* This function is used to report flags in debugging tools. Please reflect
+ * below any single-bit flag addition above in the same order via the
+ * __APPEND_FLAG macro. The new end of the buffer is returned.
+ */
+static forceinline char *h1s_show_flags(char *buf, size_t len, const char *delim, uint flg)
+{
+#define _(f, ...) __APPEND_FLAG(buf, len, delim, flg, f, #f, __VA_ARGS__)
+ /* prologue */
+ _(0);
+ /* flags */
+ _(H1S_F_RX_BLK, _(H1S_F_TX_BLK, _(H1S_F_RX_CONGESTED,
+ _(H1S_F_WANT_KAL, _(H1S_F_WANT_TUN, _(H1S_F_WANT_CLO,
+ _(H1S_F_NOT_FIRST, _(H1S_F_BODYLESS_RESP,
+ _(H1S_F_INTERNAL_ERROR, _(H1S_F_NOT_IMPL_ERROR, _(H1S_F_PARSING_ERROR, _(H1S_F_PROCESSING_ERROR,
+ _(H1S_F_HAVE_SRV_NAME, _(H1S_F_HAVE_O_CONN, _(H1S_F_HAVE_WS_KEY,
+ _(H1S_F_HAVE_CLEN, _(H1S_F_HAVE_CHNK)))))))))))))))));
+ /* epilogue */
+ _(~0U);
+ return buf;
+#undef _
+}
+
+/* H1 connection state, in h1c->state */
+enum h1_cs {
+ H1_CS_IDLE, /* IDLE connection. A freashly open or a reusable connection (H1S is NULL) */
+ H1_CS_EMBRYONIC, /* Connection is waiting for the message headers (H1S is not NULL, not attached to a SC - Frontend connection only) */
+ H1_CS_UPGRADING, /* TCP>H1 upgrade in-progress (H1S is not NULL and attached to a SC - Frontend connection only) */
+ H1_CS_RUNNING, /* Connection fully established and the H1S is processing data (H1S is not NULL and attached to a SC) */
+ H1_CS_CLOSING, /* Send pending outgoing data and close the connection ASAP (H1S may be NULL) */
+ H1_CS_CLOSED, /* Connection must be closed now and H1C must be released (H1S is NULL) */
+ H1_CS_ENTRIES,
+} __attribute__((packed));
+
+
+/**** tiny state decoding functions for debug helpers ****/
+
+/* returns a h1c state as an abbreviated 3-letter string, or "???" if unknown */
+static inline const char *h1c_st_to_str(enum h1_cs st)
+{
+ switch (st) {
+ case H1_CS_IDLE: return "IDL";
+ case H1_CS_EMBRYONIC: return "EMB";
+ case H1_CS_UPGRADING: return "UPG";
+ case H1_CS_RUNNING: return "RUN";
+ case H1_CS_CLOSING: return "CLI";
+ case H1_CS_CLOSED: return "CLD";
+ default: return "???";
+ }
+}
+
+
+#endif /* _HAPROXY_MUX_H1_T_H */
diff --git a/include/haproxy/mux_h2-t.h b/include/haproxy/mux_h2-t.h
new file mode 100644
index 0000000..ccb40b2
--- /dev/null
+++ b/include/haproxy/mux_h2-t.h
@@ -0,0 +1,222 @@
+/*
+ * include/haproxy/mux_h2-t.h
+ * Definitions for basic H2 mux internal types, constants and flags.
+ *
+ * Copyright 2017-2022 Willy Tarreau <w@1wt.eu>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_MUX_H2_T_H
+#define _HAPROXY_MUX_H2_T_H
+
+#include <haproxy/api-t.h>
+#include <haproxy/show_flags-t.h>
+
+/**** Connection flags (32 bit), in h2c->flags ****/
+
+#define H2_CF_NONE 0x00000000
+
+/* Flags indicating why writing to the mux is blocked. */
+#define H2_CF_MUX_MALLOC 0x00000001 // mux blocked on lack of connection's mux buffer
+#define H2_CF_MUX_MFULL 0x00000002 // mux blocked on connection's mux buffer full
+#define H2_CF_MUX_BLOCK_ANY 0x00000003 // aggregate of the mux flags above
+
+/* Flags indicating why writing to the demux is blocked.
+ * The first two ones directly affect the ability for the mux to receive data
+ * from the connection. The other ones affect the mux's ability to demux
+ * received data.
+ */
+#define H2_CF_DEM_DALLOC 0x00000004 // demux blocked on lack of connection's demux buffer
+#define H2_CF_DEM_DFULL 0x00000008 // demux blocked on connection's demux buffer full
+
+#define H2_CF_WAIT_INLIST 0x00000010 // there is at least one stream blocked by another stream in send_list/fctl_list
+#define H2_CF_DEM_MROOM 0x00000020 // demux blocked on lack of room in mux buffer
+#define H2_CF_DEM_SALLOC 0x00000040 // demux blocked on lack of stream's request buffer
+#define H2_CF_DEM_SFULL 0x00000080 // demux blocked on stream request buffer full
+#define H2_CF_DEM_TOOMANY 0x00000100 // demux blocked waiting for some stream connectors to leave
+#define H2_CF_DEM_BLOCK_ANY 0x000001E0 // aggregate of the demux flags above except DALLOC/DFULL
+ // (SHORT_READ is also excluded)
+
+#define H2_CF_DEM_SHORT_READ 0x00000200 // demux blocked on incomplete frame
+#define H2_CF_DEM_IN_PROGRESS 0x00000400 // demux in progress (dsi,dfl,dft are valid)
+
+/* other flags */
+#define H2_CF_MBUF_HAS_DATA 0x00000800 // some stream data (data, headers) still in mbuf
+#define H2_CF_GOAWAY_SENT 0x00001000 // a GOAWAY frame was successfully sent
+#define H2_CF_GOAWAY_FAILED 0x00002000 // a GOAWAY frame failed to be sent
+#define H2_CF_WAIT_FOR_HS 0x00004000 // We did check that at least a stream was waiting for handshake
+#define H2_CF_IS_BACK 0x00008000 // this is an outgoing connection
+#define H2_CF_WINDOW_OPENED 0x00010000 // demux increased window already advertised
+#define H2_CF_RCVD_SHUT 0x00020000 // a recv() attempt already failed on a shutdown
+#define H2_CF_END_REACHED 0x00040000 // pending data too short with RCVD_SHUT present
+
+#define H2_CF_RCVD_RFC8441 0x00100000 // settings from RFC8441 has been received indicating support for Extended CONNECT
+#define H2_CF_SHTS_UPDATED 0x00200000 // SETTINGS_HEADER_TABLE_SIZE updated
+#define H2_CF_DTSU_EMITTED 0x00400000 // HPACK Dynamic Table Size Update opcode emitted
+
+#define H2_CF_ERR_PENDING 0x00800000 // A write error was detected (block sends but not reads)
+#define H2_CF_ERROR 0x01000000 //A read error was detected (handled has an abort)
+
+/* This function is used to report flags in debugging tools. Please reflect
+ * below any single-bit flag addition above in the same order via the
+ * __APPEND_FLAG macro. The new end of the buffer is returned.
+ */
+static forceinline char *h2c_show_flags(char *buf, size_t len, const char *delim, uint flg)
+{
+#define _(f, ...) __APPEND_FLAG(buf, len, delim, flg, f, #f, __VA_ARGS__)
+ /* prologue */
+ _(0);
+ /* flags */
+ _(H2_CF_MUX_MALLOC, _(H2_CF_MUX_MFULL, _(H2_CF_DEM_DALLOC,
+ _(H2_CF_DEM_DFULL, _(H2_CF_WAIT_INLIST, _(H2_CF_DEM_MROOM,
+ _(H2_CF_DEM_SALLOC, _(H2_CF_DEM_SFULL, _(H2_CF_DEM_TOOMANY,
+ _(H2_CF_DEM_SHORT_READ, _(H2_CF_DEM_IN_PROGRESS, _(H2_CF_MBUF_HAS_DATA,
+ _(H2_CF_GOAWAY_SENT, _(H2_CF_GOAWAY_FAILED, _(H2_CF_WAIT_FOR_HS, _(H2_CF_IS_BACK,
+ _(H2_CF_WINDOW_OPENED, _(H2_CF_RCVD_SHUT, _(H2_CF_END_REACHED,
+ _(H2_CF_RCVD_RFC8441, _(H2_CF_SHTS_UPDATED, _(H2_CF_DTSU_EMITTED,
+ _(H2_CF_ERR_PENDING, _(H2_CF_ERROR))))))))))))))))))))))));
+ /* epilogue */
+ _(~0U);
+ return buf;
+#undef _
+}
+
+
+/**** HTTP/2 stream flags (32 bit), in h2s->flags ****/
+
+#define H2_SF_NONE 0x00000000
+#define H2_SF_ES_RCVD 0x00000001
+#define H2_SF_ES_SENT 0x00000002
+
+#define H2_SF_RST_RCVD 0x00000004 // received RST_STREAM
+#define H2_SF_RST_SENT 0x00000008 // sent RST_STREAM
+
+/* stream flags indicating the reason the stream is blocked */
+#define H2_SF_BLK_MBUSY 0x00000010 // blocked waiting for mux access (transient)
+#define H2_SF_BLK_MROOM 0x00000020 // blocked waiting for room in the mux (must be in send list)
+#define H2_SF_BLK_MFCTL 0x00000040 // blocked due to mux fctl (must be in fctl list)
+#define H2_SF_BLK_SFCTL 0x00000080 // blocked due to stream fctl (must be in blocked list)
+#define H2_SF_BLK_ANY 0x000000F0 // any of the reasons above
+
+/* stream flags indicating how data is supposed to be sent */
+#define H2_SF_DATA_CLEN 0x00000100 // data sent using content-length
+#define H2_SF_BODYLESS_RESP 0x00000200 /* Bodyless response message */
+#define H2_SF_BODY_TUNNEL 0x00000400 // Attempt to establish a Tunnelled stream (the result depends on the status code)
+
+#define H2_SF_NOTIFIED 0x00000800 // a paused stream was notified to try to send again
+#define H2_SF_HEADERS_SENT 0x00001000 // a HEADERS frame was sent for this stream
+#define H2_SF_OUTGOING_DATA 0x00002000 // set whenever we've seen outgoing data
+
+#define H2_SF_HEADERS_RCVD 0x00004000 // a HEADERS frame was received for this stream
+
+#define H2_SF_WANT_SHUTR 0x00008000 // a stream couldn't shutr() (mux full/busy)
+#define H2_SF_WANT_SHUTW 0x00010000 // a stream couldn't shutw() (mux full/busy)
+
+#define H2_SF_EXT_CONNECT_SENT 0x00040000 // rfc 8441 an Extended CONNECT has been sent
+#define H2_SF_EXT_CONNECT_RCVD 0x00080000 // rfc 8441 an Extended CONNECT has been received and parsed
+
+#define H2_SF_TUNNEL_ABRT 0x00100000 // A tunnel attempt was aborted
+#define H2_SF_MORE_HTX_DATA 0x00200000 // more data expected from HTX
+
+/* This function is used to report flags in debugging tools. Please reflect
+ * below any single-bit flag addition above in the same order via the
+ * __APPEND_FLAG macro. The new end of the buffer is returned.
+ */
+static forceinline char *h2s_show_flags(char *buf, size_t len, const char *delim, uint flg)
+{
+#define _(f, ...) __APPEND_FLAG(buf, len, delim, flg, f, #f, __VA_ARGS__)
+ /* prologue */
+ _(0);
+ /* flags */
+ _(H2_SF_ES_RCVD, _(H2_SF_ES_SENT, _(H2_SF_RST_RCVD, _(H2_SF_RST_SENT,
+ _(H2_SF_BLK_MBUSY, _(H2_SF_BLK_MROOM, _(H2_SF_BLK_MFCTL,
+ _(H2_SF_BLK_SFCTL, _(H2_SF_DATA_CLEN, _(H2_SF_BODYLESS_RESP,
+ _(H2_SF_BODY_TUNNEL, _(H2_SF_NOTIFIED, _(H2_SF_HEADERS_SENT,
+ _(H2_SF_OUTGOING_DATA, _(H2_SF_HEADERS_RCVD, _(H2_SF_WANT_SHUTR,
+ _(H2_SF_WANT_SHUTW, _(H2_SF_EXT_CONNECT_SENT, _(H2_SF_EXT_CONNECT_RCVD,
+ _(H2_SF_TUNNEL_ABRT, _(H2_SF_MORE_HTX_DATA)))))))))))))))))))));
+ /* epilogue */
+ _(~0U);
+ return buf;
+#undef _
+}
+
+
+/* H2 connection state, in h2c->st0 */
+enum h2_cs {
+ H2_CS_PREFACE, // init done, waiting for connection preface
+ H2_CS_SETTINGS1, // preface OK, waiting for first settings frame
+ H2_CS_FRAME_H, // first settings frame ok, waiting for frame header
+ H2_CS_FRAME_P, // frame header OK, waiting for frame payload
+ H2_CS_FRAME_A, // frame payload OK, trying to send ACK frame
+ H2_CS_FRAME_E, // frame payload OK, trying to send RST frame
+ H2_CS_ERROR, // send GOAWAY(errcode) and close the connection ASAP
+ H2_CS_ERROR2, // GOAWAY(errcode) sent, close the connection ASAP
+ H2_CS_ENTRIES // must be last
+} __attribute__((packed));
+
+/* H2 stream state, in h2s->st */
+enum h2_ss {
+ H2_SS_IDLE = 0, // idle
+ H2_SS_RLOC, // reserved(local)
+ H2_SS_RREM, // reserved(remote)
+ H2_SS_OPEN, // open
+ H2_SS_HREM, // half-closed(remote)
+ H2_SS_HLOC, // half-closed(local)
+ H2_SS_ERROR, // an error needs to be sent using RST_STREAM
+ H2_SS_CLOSED, // closed
+ H2_SS_ENTRIES // must be last
+} __attribute__((packed));
+
+
+/* 32 buffers: one for the ring's root, rest for the mbuf itself */
+#define H2C_MBUF_CNT 32
+
+/**** tiny state decoding functions for debug helpers ****/
+
+/* returns a h2c state as an abbreviated 3-letter string, or "???" if unknown */
+static inline const char *h2c_st_to_str(enum h2_cs st)
+{
+ switch (st) {
+ case H2_CS_PREFACE: return "PRF";
+ case H2_CS_SETTINGS1: return "STG";
+ case H2_CS_FRAME_H: return "FRH";
+ case H2_CS_FRAME_P: return "FRP";
+ case H2_CS_FRAME_A: return "FRA";
+ case H2_CS_FRAME_E: return "FRE";
+ case H2_CS_ERROR: return "ERR";
+ case H2_CS_ERROR2: return "ER2";
+ default: return "???";
+ }
+}
+
+/* returns a h2s state as an abbreviated 3-letter string, or "???" if unknown */
+static inline const char *h2s_st_to_str(enum h2_ss st)
+{
+ switch (st) {
+ case H2_SS_IDLE: return "IDL"; // idle
+ case H2_SS_RLOC: return "RSL"; // reserved local
+ case H2_SS_RREM: return "RSR"; // reserved remote
+ case H2_SS_OPEN: return "OPN"; // open
+ case H2_SS_HREM: return "HCR"; // half-closed remote
+ case H2_SS_HLOC: return "HCL"; // half-closed local
+ case H2_SS_ERROR : return "ERR"; // error
+ case H2_SS_CLOSED: return "CLO"; // closed
+ default: return "???";
+ }
+}
+
+#endif /* _HAPROXY_MUX_H2_T_H */
diff --git a/include/haproxy/mux_quic-t.h b/include/haproxy/mux_quic-t.h
new file mode 100644
index 0000000..abfc20a
--- /dev/null
+++ b/include/haproxy/mux_quic-t.h
@@ -0,0 +1,204 @@
+#ifndef _HAPROXY_MUX_QUIC_T_H
+#define _HAPROXY_MUX_QUIC_T_H
+
+#ifdef USE_QUIC
+#ifndef USE_OPENSSL
+#error "Must define USE_OPENSSL"
+#endif
+
+#include <import/ebtree-t.h>
+
+#include <haproxy/buf-t.h>
+#include <haproxy/connection-t.h>
+#include <haproxy/htx-t.h>
+#include <haproxy/list-t.h>
+#include <haproxy/ncbuf-t.h>
+#include <haproxy/quic_frame-t.h>
+#include <haproxy/quic_stream-t.h>
+#include <haproxy/stconn-t.h>
+
+/* Stream types */
+enum qcs_type {
+ QCS_CLT_BIDI,
+ QCS_SRV_BIDI,
+ QCS_CLT_UNI,
+ QCS_SRV_UNI,
+
+ /* Must be the last one */
+ QCS_MAX_TYPES
+};
+
+#define QC_CF_ERRL 0x00000001 /* fatal error detected locally, connection should be closed soon */
+#define QC_CF_ERRL_DONE 0x00000002 /* local error properly handled, connection can be released */
+#define QC_CF_BLK_MFCTL 0x00000004 /* sending blocked due to connection flow-control */
+#define QC_CF_CONN_FULL 0x00000008 /* no stream buffers available on connection */
+#define QC_CF_APP_SHUT 0x00000010 /* Application layer shutdown done. */
+#define QC_CF_ERR_CONN 0x00000020 /* fatal error reported by transport layer */
+
+struct qcc {
+ struct connection *conn;
+ uint64_t nb_sc; /* number of attached stream connectors */
+ uint64_t nb_hreq; /* number of in-progress http requests */
+ uint32_t flags; /* QC_CF_* */
+
+ /* flow-control fields set by us enforced on our side. */
+ struct {
+ struct list frms; /* prepared frames related to flow-control */
+
+ uint64_t ms_bidi_init; /* max initial sub-ID of bidi stream allowed for the peer */
+ uint64_t ms_bidi; /* max sub-ID of bidi stream allowed for the peer */
+ uint64_t cl_bidi_r; /* total count of closed remote bidi stream since last MAX_STREAMS emission */
+
+ uint64_t ms_uni; /* max sub-ID of uni stream allowed for the peer */
+
+ uint64_t msd_bidi_l; /* initial max-stream-data on local bidi streams */
+ uint64_t msd_bidi_r; /* initial max-stream-data on remote bidi streams */
+ uint64_t msd_uni_r; /* initial max-stream-data on remote uni streams */
+
+ uint64_t md; /* current max-data allowed for the peer */
+ uint64_t md_init; /* initial max-data */
+ uint64_t offsets_recv; /* sum of offsets received */
+ uint64_t offsets_consume; /* sum of offsets consumed */
+ } lfctl;
+
+ /* flow-control fields set by the peer which we must respect. */
+ struct {
+ uint64_t md; /* connection flow control limit updated on MAX_DATA frames reception */
+ uint64_t msd_bidi_l; /* initial max-stream-data from peer on local bidi streams */
+ uint64_t msd_bidi_r; /* initial max-stream-data from peer on remote bidi streams */
+ uint64_t msd_uni_l; /* initial max-stream-data from peer on local uni streams */
+ } rfctl;
+
+ struct {
+ uint64_t offsets; /* sum of all offsets prepared */
+ uint64_t sent_offsets; /* sum of all offset sent */
+ } tx;
+
+ uint64_t largest_bidi_r; /* largest remote bidi stream ID opened. */
+ uint64_t largest_uni_r; /* largest remote uni stream ID opened. */
+ uint64_t next_bidi_l; /* next stream ID to use for local bidi stream */
+ uint64_t next_uni_l; /* next stream ID to use for local uni stream */
+
+ struct eb_root streams_by_id; /* all active streams by their ID */
+
+ struct list send_retry_list; /* list of qcs eligible to send retry */
+ struct list send_list; /* list of qcs ready to send (STREAM, STOP_SENDING or RESET_STREAM emission) */
+
+ struct wait_event wait_event; /* To be used if we're waiting for I/Os */
+
+ struct proxy *proxy;
+
+ /* haproxy timeout management */
+ struct task *task;
+ struct list opening_list; /* list of not already attached streams (http-request timeout) */
+ int timeout;
+ int shut_timeout;
+ int idle_start; /* base time for http-keep-alive timeout */
+ struct quic_err err; /* code for locally detected error */
+
+ const struct qcc_app_ops *app_ops;
+ void *ctx; /* Application layer context */
+};
+
+#define QC_SF_NONE 0x00000000
+#define QC_SF_SIZE_KNOWN 0x00000001 /* last frame received for this stream */
+#define QC_SF_FIN_STREAM 0x00000002 /* FIN bit must be set for last frame of the stream */
+#define QC_SF_BLK_MROOM 0x00000004 /* app layer is blocked waiting for room in the qcs.tx.buf */
+#define QC_SF_DETACH 0x00000008 /* sc is detached but there is remaining data to send */
+#define QC_SF_BLK_SFCTL 0x00000010 /* stream blocked due to stream flow control limit */
+#define QC_SF_DEM_FULL 0x00000020 /* demux blocked on request channel buffer full */
+#define QC_SF_READ_ABORTED 0x00000040 /* Rx closed using STOP_SENDING*/
+#define QC_SF_TO_RESET 0x00000080 /* a RESET_STREAM must be sent */
+#define QC_SF_HREQ_RECV 0x00000100 /* a full HTTP request has been received */
+#define QC_SF_TO_STOP_SENDING 0x00000200 /* a STOP_SENDING must be sent */
+#define QC_SF_UNKNOWN_PL_LENGTH 0x00000400 /* HTX EOM may be missing from the stream layer */
+#define QC_SF_RECV_RESET 0x00000800 /* a RESET_STREAM was received */
+
+/* Maximum size of stream Rx buffer. */
+#define QC_S_RX_BUF_SZ (global.tune.bufsize - NCB_RESERVED_SZ)
+
+/* QUIC stream states
+ *
+ * On initialization a stream is put on idle state. It is opened as soon as
+ * data has been successfully sent or received on it.
+ *
+ * A bidirectional stream has two channels which can be closed separately. The
+ * local channel is closed when the STREAM frame with FIN or a RESET_STREAM has
+ * been emitted. The remote channel is closed as soon as all data from the peer
+ * has been received. The stream goes instantely to the close state once both
+ * channels are closed.
+ *
+ * A unidirectional stream has only one channel of communication. Thus, it does
+ * not use half closed states and transition directly from open to close state.
+ */
+enum qcs_state {
+ QC_SS_IDLE = 0, /* initial state */
+ QC_SS_OPEN, /* opened */
+ QC_SS_HLOC, /* half-closed local */
+ QC_SS_HREM, /* half-closed remote */
+ QC_SS_CLO, /* closed */
+} __attribute__((packed));
+
+struct qcs {
+ struct qcc *qcc;
+ struct sedesc *sd;
+ uint32_t flags; /* QC_SF_* */
+ enum qcs_state st; /* QC_SS_* state */
+ void *ctx; /* app-ops context */
+
+ struct {
+ uint64_t offset; /* absolute current base offset of ncbuf */
+ uint64_t offset_max; /* maximum absolute offset received */
+ struct ncbuf ncbuf; /* receive buffer - can handle out-of-order offset frames */
+ struct buffer app_buf; /* receive buffer used by stconn layer */
+ uint64_t msd; /* current max-stream-data limit to enforce */
+ uint64_t msd_init; /* initial max-stream-data */
+ } rx;
+ struct {
+ uint64_t offset; /* last offset of data ready to be sent */
+ uint64_t sent_offset; /* last offset sent by transport layer */
+ struct buffer buf; /* transmit buffer before sending via xprt */
+ uint64_t msd; /* fctl bytes limit to respect on emission */
+ } tx;
+
+ struct eb64_node by_id;
+ uint64_t id;
+ struct qc_stream_desc *stream;
+
+ struct list el; /* element of qcc.send_retry_list */
+ struct list el_send; /* element of qcc.send_list */
+ struct list el_opening; /* element of qcc.opening_list */
+
+ struct wait_event wait_event;
+ struct wait_event *subs;
+
+ uint64_t err; /* error code to transmit via RESET_STREAM */
+
+ int start; /* base timestamp for http-request timeout */
+};
+
+/* Used as qcc_app_ops.close callback argument. */
+enum qcc_app_ops_close_side {
+ QCC_APP_OPS_CLOSE_SIDE_RD, /* Read channel closed (RESET_STREAM received). */
+ QCC_APP_OPS_CLOSE_SIDE_WR /* Write channel closed (STOP_SENDING received). */
+};
+
+/* QUIC application layer operations */
+struct qcc_app_ops {
+ int (*init)(struct qcc *qcc);
+ int (*attach)(struct qcs *qcs, void *conn_ctx);
+ ssize_t (*decode_qcs)(struct qcs *qcs, struct buffer *b, int fin);
+ size_t (*snd_buf)(struct qcs *qcs, struct buffer *buf, size_t count);
+ size_t (*nego_ff)(struct qcs *qcs, size_t count);
+ size_t (*done_ff)(struct qcs *qcs);
+ int (*close)(struct qcs *qcs, enum qcc_app_ops_close_side side);
+ void (*detach)(struct qcs *qcs);
+ int (*finalize)(void *ctx);
+ void (*shutdown)(void *ctx); /* Close a connection. */
+ void (*release)(void *ctx);
+ void (*inc_err_cnt)(void *ctx, int err_code);
+};
+
+#endif /* USE_QUIC */
+
+#endif /* _HAPROXY_MUX_QUIC_T_H */
diff --git a/include/haproxy/mux_quic.h b/include/haproxy/mux_quic.h
new file mode 100644
index 0000000..872c5ea
--- /dev/null
+++ b/include/haproxy/mux_quic.h
@@ -0,0 +1,116 @@
+#ifndef _HAPROXY_MUX_QUIC_H
+#define _HAPROXY_MUX_QUIC_H
+
+#ifdef USE_QUIC
+#ifndef USE_OPENSSL
+#error "Must define USE_OPENSSL"
+#endif
+
+#include <haproxy/api.h>
+#include <haproxy/connection.h>
+#include <haproxy/list.h>
+#include <haproxy/mux_quic-t.h>
+#include <haproxy/stconn.h>
+
+void qcc_set_error(struct qcc *qcc, int err, int app);
+struct qcs *qcc_init_stream_local(struct qcc *qcc, int bidi);
+struct stconn *qcs_attach_sc(struct qcs *qcs, struct buffer *buf, char fin);
+int qcs_is_close_local(struct qcs *qcs);
+int qcs_is_close_remote(struct qcs *qcs);
+struct buffer *qcs_get_buf(struct qcs *qcs, struct buffer *bptr);
+
+int qcs_subscribe(struct qcs *qcs, int event_type, struct wait_event *es);
+void qcs_notify_recv(struct qcs *qcs);
+void qcs_notify_send(struct qcs *qcs);
+
+void qcc_emit_cc_app(struct qcc *qcc, int err, int immediate);
+void qcc_reset_stream(struct qcs *qcs, int err);
+void qcc_send_stream(struct qcs *qcs, int urg);
+void qcc_abort_stream_read(struct qcs *qcs);
+int qcc_recv(struct qcc *qcc, uint64_t id, uint64_t len, uint64_t offset,
+ char fin, char *data);
+int qcc_recv_max_data(struct qcc *qcc, uint64_t max);
+int qcc_recv_max_stream_data(struct qcc *qcc, uint64_t id, uint64_t max);
+int qcc_recv_reset_stream(struct qcc *qcc, uint64_t id, uint64_t err, uint64_t final_size);
+int qcc_recv_stop_sending(struct qcc *qcc, uint64_t id, uint64_t err);
+void qcc_streams_sent_done(struct qcs *qcs, uint64_t data, uint64_t offset);
+
+/* Bit shift to get the stream sub ID for internal use which is obtained
+ * shifting the stream IDs by this value, knowing that the
+ * QCS_ID_TYPE_SHIFT less significant bits identify the stream ID
+ * types (client initiated bidirectional, server initiated bidirectional,
+ * client initiated unidirectional, server initiated bidirectional).
+ * Note that there is no reference to such stream sub IDs in the RFC.
+ */
+#define QCS_ID_TYPE_MASK 0x3
+#define QCS_ID_TYPE_SHIFT 2
+/* The less significant bit of a stream ID is set for a server initiated stream */
+#define QCS_ID_SRV_INTIATOR_BIT 0x1
+/* This bit is set for unidirectional streams */
+#define QCS_ID_DIR_BIT 0x2
+
+static inline enum qcs_type qcs_id_type(uint64_t id)
+{
+ return id & QCS_ID_TYPE_MASK;
+}
+
+/* Return true if stream has been opened locally. */
+static inline int quic_stream_is_local(struct qcc *qcc, uint64_t id)
+{
+ return conn_is_back(qcc->conn) == !(id & QCS_ID_SRV_INTIATOR_BIT);
+}
+
+/* Return true if stream is opened by peer. */
+static inline int quic_stream_is_remote(struct qcc *qcc, uint64_t id)
+{
+ return !quic_stream_is_local(qcc, id);
+}
+
+static inline int quic_stream_is_uni(uint64_t id)
+{
+ return id & QCS_ID_DIR_BIT;
+}
+
+static inline int quic_stream_is_bidi(uint64_t id)
+{
+ return !quic_stream_is_uni(id);
+}
+
+static inline char *qcs_st_to_str(enum qcs_state st)
+{
+ switch (st) {
+ case QC_SS_IDLE: return "IDL";
+ case QC_SS_OPEN: return "OPN";
+ case QC_SS_HLOC: return "HCL";
+ case QC_SS_HREM: return "HCR";
+ case QC_SS_CLO: return "CLO";
+ default: return "???";
+ }
+}
+
+int qcc_install_app_ops(struct qcc *qcc, const struct qcc_app_ops *app_ops);
+
+/* Register <qcs> stream for http-request timeout. If the stream is not yet
+ * attached in the configured delay, qcc timeout task will be triggered. This
+ * means the full header section was not received in time.
+ *
+ * This function should be called by the application protocol layer on request
+ * streams initialization.
+ */
+static inline void qcs_wait_http_req(struct qcs *qcs)
+{
+ struct qcc *qcc = qcs->qcc;
+
+ /* A stream cannot be registered several times. */
+ BUG_ON_HOT(tick_isset(qcs->start));
+ qcs->start = now_ms;
+
+ /* qcc.opening_list size is limited by flow-control so no custom
+ * restriction is needed here.
+ */
+ LIST_APPEND(&qcc->opening_list, &qcs->el_opening);
+}
+
+#endif /* USE_QUIC */
+
+#endif /* _HAPROXY_MUX_QUIC_H */
diff --git a/include/haproxy/mworker-t.h b/include/haproxy/mworker-t.h
new file mode 100644
index 0000000..3137ec0
--- /dev/null
+++ b/include/haproxy/mworker-t.h
@@ -0,0 +1,51 @@
+/*
+ * include/haproxy/mworker-t.h
+ * Master Worker type definitions.
+ *
+ * Copyright HAProxy Technologies 2019 - William Lallemand <wlallemand@haproxy.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#ifndef _HAPROXY_MWORKER_T_H_
+#define _HAPROXY_MWORKER_T_H_
+
+#include <haproxy/list.h>
+#include <haproxy/signal-t.h>
+
+/* options for mworker_proc */
+
+#define PROC_O_TYPE_MASTER 0x00000001
+#define PROC_O_TYPE_WORKER 0x00000002
+#define PROC_O_TYPE_PROG 0x00000004
+/* 0x00000008 unused */
+#define PROC_O_LEAVING 0x00000010 /* this process should be leaving */
+/* 0x00000020 to 0x00000080 unused */
+#define PROC_O_START_RELOAD 0x00000100 /* Start the process even if the master was re-executed */
+
+/*
+ * Structure used to describe the processes in master worker mode
+ */
+struct server;
+struct mworker_proc {
+ int pid;
+ int options;
+ char *id;
+ char **command;
+ char *path;
+ char *version;
+ int ipc_fd[2]; /* 0 is master side, 1 is worker side */
+ int reloads;
+ int failedreloads; /* number of failed reloads since the last successful one */
+ int timestamp;
+ struct server *srv; /* the server entry in the master proxy */
+ struct list list;
+ int uid;
+ int gid;
+};
+
+#endif /* _HAPROXY_MWORKER_T_H_ */
diff --git a/include/haproxy/mworker.h b/include/haproxy/mworker.h
new file mode 100644
index 0000000..c9dd840
--- /dev/null
+++ b/include/haproxy/mworker.h
@@ -0,0 +1,48 @@
+/*
+ * include/haproxy/mworker-t.h
+ * Master Worker function prototypes.
+ *
+ * Copyright HAProxy Technologies 2019 - William Lallemand <wlallemand@haproxy.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#ifndef _HAPROXY_MWORKER_H_
+#define _HAPROXY_MWORKER_H_
+
+#include <haproxy/mworker-t.h>
+#include <haproxy/signal-t.h>
+
+extern struct mworker_proc *proc_self;
+
+void mworker_proc_list_to_env(void);
+int mworker_env_to_proc_list(void);
+
+
+void mworker_block_signals(void);
+void mworker_unblock_signals(void);
+
+void mworker_broadcast_signal(struct sig_handler *sh);
+void mworker_catch_sighup(struct sig_handler *sh);
+void mworker_catch_sigterm(struct sig_handler *sh);
+void mworker_catch_sigchld(struct sig_handler *sh);
+
+void mworker_accept_wrapper(int fd);
+
+void mworker_cleanlisteners(void);
+
+int mworker_child_nb(void);
+
+int mworker_ext_launch_all(void);
+
+void mworker_kill_max_reloads(int sig);
+
+struct mworker_proc *mworker_proc_new();
+void mworker_free_child(struct mworker_proc *);
+void mworker_cleanup_proc();
+
+#endif /* _HAPROXY_MWORKER_H_ */
diff --git a/include/haproxy/namespace-t.h b/include/haproxy/namespace-t.h
new file mode 100644
index 0000000..fe46577
--- /dev/null
+++ b/include/haproxy/namespace-t.h
@@ -0,0 +1,39 @@
+/*
+ * include/haproxy/namespace-t.h
+ * Linux network namespaces types definitions
+ *
+ * Copyright (C) 2014 Tamas Kovacs, Sarkozi Laszlo, Krisztian Kovacs
+ * Copyright (C) 2015-2020 Willy Tarreau
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_NAMESPACE_T_H
+#define _HAPROXY_NAMESPACE_T_H
+
+#include <import/ebtree-t.h>
+#include <haproxy/api-t.h>
+
+/* the struct is just empty if namespaces are not supported */
+struct netns_entry
+{
+#ifdef USE_NS
+ struct ebpt_node node;
+ size_t name_len;
+ int fd;
+#endif
+};
+
+#endif /* _HAPROXY_NAMESPACE_T_H */
diff --git a/include/haproxy/namespace.h b/include/haproxy/namespace.h
new file mode 100644
index 0000000..2d6b6f8
--- /dev/null
+++ b/include/haproxy/namespace.h
@@ -0,0 +1,47 @@
+/*
+ * include/haproxy/namespace.h
+ * Linux network namespaces management
+ *
+ * Copyright (C) 2014 Tamas Kovacs, Sarkozi Laszlo, Krisztian Kovacs
+ * Copyright (C) 2015-2020 Willy Tarreau
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_NAMESPACE_H
+#define _HAPROXY_NAMESPACE_H
+
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <import/ebistree.h>
+#include <haproxy/namespace-t.h>
+
+#ifdef USE_NS
+
+int my_socketat(const struct netns_entry *ns, int domain, int type, int protocol);
+struct netns_entry* netns_store_insert(const char *ns_name);
+const struct netns_entry* netns_store_lookup(const char *ns_name, size_t ns_name_len);
+int netns_init(void);
+
+#else /* no namespace support */
+
+static inline int my_socketat(const struct netns_entry *ns, int domain, int type, int protocol)
+{
+ return socket(domain, type, protocol);
+}
+
+#endif /* USE_NS */
+
+#endif /* _HAPROXY_NAMESPACE_H */
diff --git a/include/haproxy/ncbuf-t.h b/include/haproxy/ncbuf-t.h
new file mode 100644
index 0000000..0dd958f
--- /dev/null
+++ b/include/haproxy/ncbuf-t.h
@@ -0,0 +1,104 @@
+#ifndef _HAPROXY_NCBUF_T_H
+#define _HAPROXY_NCBUF_T_H
+
+/* **** public documentation ****
+ *
+ * <ncbuf> stands for non-contiguous circular buffer. This type can be used to
+ * store data in a non-linear way with gaps between them. The buffer is
+ * circular and so data may wrapped.
+ *
+ * The API of <ncbuf> is split in two parts. Please refer to the public API
+ * declared in this header file which should cover all the needs.
+ *
+ * To minimize the memory footprint, size of data and gaps are inserted in the
+ * gaps themselves. This way <ncbuf> does not need to maintain a separate list
+ * of data offsets in a dedicated structure. However, this put some limitations
+ * on the buffer usage that the user need to know.
+ *
+ * First, a space will always be reserved in the allocated buffer area to store
+ * the size of the first data block. Use ncb_size(buf) to retrieve the usable
+ * size of the allocated buffer excluding the reserved space.
+ *
+ * Second, add and deletion operations are constraint and may be impossible if
+ * a minimal gap size between data is not respected. A caller must always
+ * inspect the return values of these functions. To limit these errors and
+ * improve the buffer performance, <ncbuf> should be reserved for use-cases
+ * where the number of formed gaps is kept minimal and evenly spread.
+ */
+
+/* **** internal documentation ****
+ *
+ * This section is useful to users who need to understand how ncbuf are
+ * implemented.
+ *
+ * Public and internal functions all shared a common abstraction of the buffer.
+ * The buffer content is represented as a list of blocks, alternating between
+ * DATA and GAP blocks. This simplifies the buffer examination loop and
+ * insertion/deletion. Note that this list of blocks is not stored in the
+ * buffer structure.
+ *
+ * The buffer is considered to always start with a DATA block. The size of this
+ * block is stored just before <head> which is the pointer for offset 0. This
+ * space will always be reserved for this usage. It can be accessed through
+ * ncb_int_head(buf). If the buffer has no data at head, the reserved space
+ * will simply contains the value 0, and will be follow by a gap.
+ *
+ * A gap always contains the size of the gap itself and the size of the next
+ * data block. Here is a small representation of a gap stored at offset <x>
+ * before a data block at offset <y>.
+ *
+ * x y
+ * ------------------------------------------------------------
+ * xxxxxx| GAP-SZ | DATA-SZ | | xxxxxxxxxxxxx...
+ * ------------------------------------------------------------
+ * | -------- GAP-SZ -------------- > | --- DATA-SZ --->
+ *
+ * This means that a gap must be at least big enough to store two sizes.
+ * However, there is an optimization when the last block of the buffer is a
+ * gap. In this case, there is no minimal size for this block. If the gap is
+ * too small, the two sizes won't be stored in it. This block is considered
+ * to be a reduced gap. The block API will detect such a gap if stored at an
+ * offset near the end of the buffer.
+ *
+ */
+
+#include <inttypes.h>
+
+/* ncb_sz_t is the basic type used in ncbuf to represent data and gap sizes.
+ * Use a bigger type to extend the maximum data size supported in the buffer.
+ * On the other hand, this also increases the minimal gap size which can
+ * cause more rejection for add/delete operations.
+ */
+typedef uint32_t ncb_sz_t;
+
+/* reserved size before head used to store first data block size */
+#define NCB_RESERVED_SZ (sizeof(ncb_sz_t))
+
+/* A gap contains its size and the size of the data following it. */
+#define NCB_GAP_MIN_SZ (sizeof(ncb_sz_t) * 2)
+#define NCB_GAP_SZ_OFF 0
+#define NCB_GAP_SZ_DATA_OFF (sizeof(ncb_sz_t))
+
+#define NCBUF_NULL ((struct ncbuf){ })
+
+struct ncbuf {
+ char *area;
+ ncb_sz_t size;
+ ncb_sz_t head;
+};
+
+enum ncb_ret {
+ NCB_RET_OK = 0, /* no error */
+
+ NCB_RET_GAP_SIZE, /* operation would create a too small gap */
+ NCB_RET_DATA_REJ, /* operation would overwrite data with different one */
+};
+
+/* Define how insert is conducted in regards with already stored data. */
+enum ncb_add_mode {
+ NCB_ADD_PRESERVE, /* keep the already stored data and only insert in gaps */
+ NCB_ADD_OVERWRT, /* overwrite old data with new ones */
+ NCB_ADD_COMPARE, /* compare before insert : if new data are different do not proceed */
+};
+
+#endif /* _HAPROXY_NCBUF_T_H */
diff --git a/include/haproxy/ncbuf.h b/include/haproxy/ncbuf.h
new file mode 100644
index 0000000..8972793
--- /dev/null
+++ b/include/haproxy/ncbuf.h
@@ -0,0 +1,54 @@
+#ifndef _HAPROXY_NCBUF_H
+#define _HAPROXY_NCBUF_H
+
+#include <haproxy/ncbuf-t.h>
+
+static inline int ncb_is_null(const struct ncbuf *buf)
+{
+ return buf->size == 0;
+}
+
+void ncb_init(struct ncbuf *buf, ncb_sz_t head);
+struct ncbuf ncb_make(char *area, ncb_sz_t size, ncb_sz_t head);
+
+/* Returns start of allocated buffer area. */
+static inline char *ncb_orig(const struct ncbuf *buf)
+{
+ return buf->area;
+}
+
+/* Returns current head pointer into buffer area. */
+static inline char *ncb_head(const struct ncbuf *buf)
+{
+ return buf->area + buf->head;
+}
+
+/* Returns the first byte after the allocated buffer area. */
+static inline char *ncb_wrap(const struct ncbuf *buf)
+{
+ return buf->area + buf->size;
+}
+
+/* Returns the usable size of <buf> for data storage. This is the size of the
+ * allocated buffer without the reserved header space.
+ */
+static inline ncb_sz_t ncb_size(const struct ncbuf *buf)
+{
+ if (ncb_is_null(buf))
+ return 0;
+
+ return buf->size - NCB_RESERVED_SZ;
+}
+
+ncb_sz_t ncb_total_data(const struct ncbuf *buf);
+int ncb_is_empty(const struct ncbuf *buf);
+int ncb_is_full(const struct ncbuf *buf);
+int ncb_is_fragmented(const struct ncbuf *buf);
+
+ncb_sz_t ncb_data(const struct ncbuf *buf, ncb_sz_t offset);
+
+enum ncb_ret ncb_add(struct ncbuf *buf, ncb_sz_t off,
+ const char *data, ncb_sz_t len, enum ncb_add_mode mode);
+enum ncb_ret ncb_advance(struct ncbuf *buf, ncb_sz_t adv);
+
+#endif /* _HAPROXY_NCBUF_H */
diff --git a/include/haproxy/net_helper.h b/include/haproxy/net_helper.h
new file mode 100644
index 0000000..f019d30
--- /dev/null
+++ b/include/haproxy/net_helper.h
@@ -0,0 +1,387 @@
+/*
+ * include/haproxy/net_helper.h
+ * This file contains miscellaneous network helper functions.
+ *
+ * Copyright (C) 2017 Olivier Houchard
+ * Copyright (C) 2017-2020 Willy Tarreau
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _HAPROXY_NET_HELPER_H
+#define _HAPROXY_NET_HELPER_H
+
+#include <arpa/inet.h>
+#include <haproxy/api.h>
+#include <haproxy/intops.h>
+
+/* Functions to read/write various integers that may be unaligned */
+
+/* Read a uint16_t in native host order */
+static inline uint16_t read_u16(const void *p)
+{
+ const union { uint16_t u16; } __attribute__((packed))*u = p;
+ return u->u16;
+}
+
+/* Write a uint16_t in native host order */
+static inline void write_u16(void *p, const uint16_t u16)
+{
+ union { uint16_t u16; } __attribute__((packed))*u = p;
+ u->u16 = u16;
+}
+
+/* Read a uint32_t in native host order */
+static inline uint32_t read_u32(const void *p)
+{
+ const union { uint32_t u32; } __attribute__((packed))*u = p;
+ return u->u32;
+}
+
+/* Write a uint32_t in native host order */
+static inline void write_u32(void *p, const uint32_t u32)
+{
+ union { uint32_t u32; } __attribute__((packed))*u = p;
+ u->u32 = u32;
+}
+
+/* Read a uint64_t in native host order */
+static inline uint64_t read_u64(const void *p)
+{
+ const union { uint64_t u64; } __attribute__((packed))*u = p;
+ return u->u64;
+}
+
+/* Write a uint64_t in native host order */
+static inline void write_u64(void *p, const uint64_t u64)
+{
+ union { uint64_t u64; } __attribute__((packed))*u = p;
+ u->u64 = u64;
+}
+
+/* Read a void* in native host order */
+static inline void *read_ptr(const void *p)
+{
+ const union { void *ptr; } __attribute__((packed))*u = p;
+ return u->ptr;
+}
+
+/* Write a void* in native host order */
+static inline void write_ptr(void *p, const void *ptr)
+{
+ if (sizeof(ptr) == 4)
+ return write_u32(p, (uintptr_t)ptr);
+ else
+ return write_u64(p, (uintptr_t)ptr);
+}
+
+/* Read a possibly wrapping number of bytes <bytes> into destination <dst>. The
+ * first segment is composed of <s1> bytes at p1. The remaining byte(s), if any,
+ * are read from <p2>. <s1> may be zero and may also be larger than <bytes>. The
+ * caller is always responsible for providing enough bytes. Note: the function
+ * is purposely *not* marked inline to let the compiler decide what to do with
+ * it, because it's around 34 bytes long, placed on critical path but rarely
+ * called, and uses uses a lot of arguments if not inlined. The compiler will
+ * thus decide what's best to do with it depending on the context.
+ */
+static void readv_bytes(void *dst, const size_t bytes, const void *p1, size_t s1, const void *p2)
+{
+ size_t idx;
+
+ p2 -= s1;
+ for (idx = 0; idx < bytes; idx++) {
+ if (idx == s1)
+ p1 = p2;
+ ((uint8_t *)dst)[idx] = ((const uint8_t *)p1)[idx];
+ }
+ /* this memory barrier is critical otherwise gcc may over-optimize this
+ * code, completely removing it as well as any surrounding boundary
+ * check (4.7.1..6.4.0)!
+ */
+ __asm__ volatile("" ::: "memory");
+}
+
+/* Write a possibly wrapping number of bytes <bytes> from location <src>. The
+ * first segment is composed of <s1> bytes at p1. The remaining byte(s), if any,
+ * are written to <p2>. <s1> may be zero and may also be larger than <bytes>.
+ * The caller is always responsible for providing enough room. Note: the
+ * function is purposely *not* marked inline to let the compiler decide what to
+ * do with it, because it's around 34 bytes long, placed on critical path but
+ * rarely called, and uses uses a lot of arguments if not inlined. The compiler
+ * will thus decide what's best to do with it depending on the context.
+ */
+static void writev_bytes(const void *src, const size_t bytes, void *p1, size_t s1, void *p2)
+{
+ size_t idx;
+
+ p2 -= s1;
+ for (idx = 0; idx < bytes; idx++) {
+ if (idx == s1)
+ p1 = p2;
+ ((uint8_t *)p1)[idx] = ((const uint8_t *)src)[idx];
+ }
+}
+
+/* Read a possibly wrapping uint16_t in native host order. The first segment is
+ * composed of <s1> bytes at p1. The remaining byte(s), if any, are read from
+ * <p2>. <s1> may be zero and may be larger than the type. The caller is always
+ * responsible for providing enough bytes.
+ */
+static inline uint16_t readv_u16(const void *p1, size_t s1, const void *p2)
+{
+ if (unlikely(s1 == 1)) {
+ volatile uint16_t u16;
+
+ ((uint8_t *)&u16)[0] = *(uint8_t *)p1;
+ ((uint8_t *)&u16)[1] = *(uint8_t *)p2;
+ return u16;
+ }
+ else {
+ const union { uint16_t u16; } __attribute__((packed)) *u;
+
+ u = (s1 == 0) ? p2 : p1;
+ return u->u16;
+ }
+}
+
+/* Write a possibly wrapping uint16_t in native host order. The first segment is
+ * composed of <s1> bytes at p1. The remaining byte(s), if any, are written to
+ * <p2>. <s1> may be zero and may be larger than the type. The caller is always
+ * responsible for providing enough room.
+ */
+static inline void writev_u16(void *p1, size_t s1, void *p2, const uint16_t u16)
+{
+ union { uint16_t u16; } __attribute__((packed)) *u;
+
+ if (unlikely(s1 == 1)) {
+ *(uint8_t *)p1 = ((const uint8_t *)&u16)[0];
+ *(uint8_t *)p2 = ((const uint8_t *)&u16)[1];
+ }
+ else {
+ u = (s1 == 0) ? p2 : p1;
+ u->u16 = u16;
+ }
+}
+
+/* Read a possibly wrapping uint32_t in native host order. The first segment is
+ * composed of <s1> bytes at p1. The remaining byte(s), if any, are read from
+ * <p2>. <s1> may be zero and may be larger than the type. The caller is always
+ * responsible for providing enough bytes.
+ */
+static inline uint32_t readv_u32(const void *p1, size_t s1, const void *p2)
+{
+ uint32_t u32;
+
+ if (likely(s1 >= sizeof(u32)))
+ u32 = read_u32(p1);
+ else
+ readv_bytes(&u32, sizeof(u32), p1, s1, p2);
+ return u32;
+}
+
+/* Write a possibly wrapping uint32_t in native host order. The first segment is
+ * composed of <s1> bytes at p1. The remaining byte(s), if any, are written to
+ * <p2>. <s1> may be zero and may be larger than the type. The caller is always
+ * responsible for providing enough room.
+ */
+static inline void writev_u32(void *p1, size_t s1, void *p2, const uint32_t u32)
+{
+ if (likely(s1 >= sizeof(u32)))
+ write_u32(p1, u32);
+ else
+ writev_bytes(&u32, sizeof(u32), p1, s1, p2);
+}
+
+/* Read a possibly wrapping uint64_t in native host order. The first segment is
+ * composed of <s1> bytes at p1. The remaining byte(s), if any, are read from
+ * <p2>. <s1> may be zero and may be larger than the type. The caller is always
+ * responsible for providing enough bytes.
+ */
+static inline uint64_t readv_u64(const void *p1, size_t s1, const void *p2)
+{
+ uint64_t u64;
+
+ if (likely(s1 >= sizeof(u64)))
+ u64 = read_u64(p1);
+ else
+ readv_bytes(&u64, sizeof(u64), p1, s1, p2);
+ return u64;
+}
+
+/* Write a possibly wrapping uint64_t in native host order. The first segment is
+ * composed of <s1> bytes at p1. The remaining byte(s), if any, are written to
+ * <p2>. <s1> may be zero and may be larger than the type. The caller is always
+ * responsible for providing enough room.
+ */
+static inline void writev_u64(void *p1, size_t s1, void *p2, const uint64_t u64)
+{
+ if (likely(s1 >= sizeof(u64)))
+ write_u64(p1, u64);
+ else
+ writev_bytes(&u64, sizeof(u64), p1, s1, p2);
+}
+
+/* Signed integer versions : return the same data but signed */
+
+/* Read an int16_t in native host order */
+static inline int16_t read_i16(const void *p)
+{
+ return read_u16(p);
+}
+
+/* Read an int32_t in native host order */
+static inline int32_t read_i32(const void *p)
+{
+ return read_u32(p);
+}
+
+/* Read an int64_t in native host order */
+static inline int64_t read_i64(const void *p)
+{
+ return read_u64(p);
+}
+
+/* Read a possibly wrapping int16_t in native host order */
+static inline int16_t readv_i16(const void *p1, size_t s1, const void *p2)
+{
+ return readv_u16(p1, s1, p2);
+}
+
+/* Read a possibly wrapping int32_t in native host order */
+static inline int32_t readv_i32(const void *p1, size_t s1, const void *p2)
+{
+ return readv_u32(p1, s1, p2);
+}
+
+/* Read a possibly wrapping int64_t in native host order */
+static inline int64_t readv_i64(const void *p1, size_t s1, const void *p2)
+{
+ return readv_u64(p1, s1, p2);
+}
+
+/* Read a uint16_t, and convert from network order to host order */
+static inline uint16_t read_n16(const void *p)
+{
+ return ntohs(read_u16(p));
+}
+
+/* Write a uint16_t after converting it from host order to network order */
+static inline void write_n16(void *p, const uint16_t u16)
+{
+ write_u16(p, htons(u16));
+}
+
+/* Read a uint32_t, and convert from network order to host order */
+static inline uint32_t read_n32(const void *p)
+{
+ return ntohl(read_u32(p));
+}
+
+/* Write a uint32_t after converting it from host order to network order */
+static inline void write_n32(void *p, const uint32_t u32)
+{
+ write_u32(p, htonl(u32));
+}
+
+/* Read a uint64_t, and convert from network order to host order */
+static inline uint64_t read_n64(const void *p)
+{
+ return my_ntohll(read_u64(p));
+}
+
+/* Write a uint64_t after converting it from host order to network order */
+static inline void write_n64(void *p, const uint64_t u64)
+{
+ write_u64(p, my_htonll(u64));
+}
+
+/* Read a possibly wrapping uint16_t in network order. The first segment is
+ * composed of <s1> bytes at p1. The remaining byte(s), if any, are read from
+ * <p2>. <s1> may be zero and may be larger than the type. The caller is always
+ * responsible for providing enough bytes.
+ */
+static inline uint16_t readv_n16(const void *p1, size_t s1, const void *p2)
+{
+ if (unlikely(s1 < 2)) {
+ if (s1 == 0)
+ p1 = p2++;
+ }
+ else
+ p2 = p1 + 1;
+ return (*(uint8_t *)p1 << 8) + *(uint8_t *)p2;
+}
+
+/* Write a possibly wrapping uint16_t in network order. The first segment is
+ * composed of <s1> bytes at p1. The remaining byte(s), if any, are written to
+ * <p2>. <s1> may be zero and may be larger than the type. The caller is always
+ * responsible for providing enough room.
+ */
+static inline void writev_n16(const void *p1, size_t s1, const void *p2, const uint16_t u16)
+{
+ if (unlikely(s1 < 2)) {
+ if (s1 == 0)
+ p1 = p2++;
+ }
+ else
+ p2 = p1 + 1;
+ *(uint8_t *)p1 = u16 >> 8;
+ *(uint8_t *)p2 = u16;
+}
+
+/* Read a possibly wrapping uint32_t in network order. The first segment is
+ * composed of <s1> bytes at p1. The remaining byte(s), if any, are read from
+ * <p2>. <s1> may be zero and may be larger than the type. The caller is always
+ * responsible for providing enough bytes.
+ */
+static inline uint32_t readv_n32(const void *p1, size_t s1, const void *p2)
+{
+ return ntohl(readv_u32(p1, s1, p2));
+}
+
+/* Write a possibly wrapping uint32_t in network order. The first segment is
+ * composed of <s1> bytes at p1. The remaining byte(s), if any, are written to
+ * <p2>. <s1> may be zero and may be larger than the type. The caller is always
+ * responsible for providing enough room.
+ */
+static inline void writev_n32(void *p1, size_t s1, void *p2, const uint32_t u32)
+{
+ writev_u32(p1, s1, p2, htonl(u32));
+}
+
+/* Read a possibly wrapping uint64_t in network order. The first segment is
+ * composed of <s1> bytes at p1. The remaining byte(s), if any, are read from
+ * <p2>. <s1> may be zero and may be larger than the type. The caller is always
+ * responsible for providing enough bytes.
+ */
+static inline uint64_t readv_n64(const void *p1, size_t s1, const void *p2)
+{
+ return my_ntohll(readv_u64(p1, s1, p2));
+}
+
+/* Write a possibly wrapping uint64_t in network order. The first segment is
+ * composed of <s1> bytes at p1. The remaining byte(s), if any, are written to
+ * <p2>. <s1> may be zero and may be larger than the type. The caller is always
+ * responsible for providing enough room.
+ */
+static inline void writev_n64(void *p1, size_t s1, void *p2, const uint64_t u64)
+{
+ writev_u64(p1, s1, p2, my_htonll(u64));
+}
+
+#endif /* HAPROXY_NET_HELPER_H */
diff --git a/include/haproxy/obj_type-t.h b/include/haproxy/obj_type-t.h
new file mode 100644
index 0000000..517d230
--- /dev/null
+++ b/include/haproxy/obj_type-t.h
@@ -0,0 +1,56 @@
+/*
+ * include/haproxy/obj_type-t.h
+ * This file declares some object types for use in various structures.
+ *
+ * Copyright (C) 2000-2013 Willy Tarreau - w@1wt.eu
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_OBJ_TYPE_T_H
+#define _HAPROXY_OBJ_TYPE_T_H
+
+/* The principle is to be able to change the type of a pointer by pointing
+ * it directly to an object type. The object type indicates the format of the
+ * structure holing the type, and this is used to retrieve the pointer to the
+ * beginning of the structure. Doing so saves us from having to maintain both
+ * a pointer and a type for elements such as connections which can point to
+ * various types of objects.
+ */
+
+/* object types : these ones take the same space as a char */
+enum obj_type {
+ OBJ_TYPE_NONE = 0, /* pointer is NULL by definition */
+ OBJ_TYPE_LISTENER, /* object is a struct listener */
+ OBJ_TYPE_PROXY, /* object is a struct proxy */
+ OBJ_TYPE_SERVER, /* object is a struct server */
+ OBJ_TYPE_APPLET, /* object is a struct applet */
+ OBJ_TYPE_APPCTX, /* object is a struct appctx */
+ OBJ_TYPE_CONN, /* object is a struct connection */
+ OBJ_TYPE_SRVRQ, /* object is a struct dns_srvrq */
+ OBJ_TYPE_SC, /* object is a struct stconn */
+ OBJ_TYPE_STREAM, /* object is a struct stream */
+ OBJ_TYPE_CHECK, /* object is a struct check */
+ OBJ_TYPE_ENTRIES /* last one : number of entries */
+} __attribute__((packed)) ;
+
+#endif /* _HAPROXY_OBJ_TYPE_T_H */
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/include/haproxy/obj_type.h b/include/haproxy/obj_type.h
new file mode 100644
index 0000000..1037460
--- /dev/null
+++ b/include/haproxy/obj_type.h
@@ -0,0 +1,213 @@
+/*
+ * include/haproxy/obj_type.h
+ * This file contains function prototypes to manipulate object types
+ *
+ * Copyright (C) 2000-2013 Willy Tarreau - w@1wt.eu
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_OBJ_TYPE_H
+#define _HAPROXY_OBJ_TYPE_H
+
+#include <haproxy/api.h>
+#include <haproxy/applet-t.h>
+#include <haproxy/check-t.h>
+#include <haproxy/connection-t.h>
+#include <haproxy/listener-t.h>
+#include <haproxy/obj_type-t.h>
+#include <haproxy/pool.h>
+#include <haproxy/proxy-t.h>
+#include <haproxy/server-t.h>
+#include <haproxy/stream-t.h>
+
+static inline enum obj_type obj_type(const enum obj_type *t)
+{
+ if (!t || *t >= OBJ_TYPE_ENTRIES)
+ return OBJ_TYPE_NONE;
+ return *t;
+}
+
+static inline const char *obj_type_name(const enum obj_type *t)
+{
+ switch (obj_type(t)) {
+ case OBJ_TYPE_NONE: return "NONE";
+ case OBJ_TYPE_LISTENER: return "LISTENER";
+ case OBJ_TYPE_PROXY: return "PROXY";
+ case OBJ_TYPE_SERVER: return "SERVER";
+ case OBJ_TYPE_APPLET: return "APPLET";
+ case OBJ_TYPE_APPCTX: return "APPCTX";
+ case OBJ_TYPE_CONN: return "CONN";
+ case OBJ_TYPE_SRVRQ: return "SRVRQ";
+ case OBJ_TYPE_SC: return "SC";
+ case OBJ_TYPE_STREAM: return "STREAM";
+ case OBJ_TYPE_CHECK: return "CHECK";
+ default: return "!INVAL!";
+ }
+}
+
+/* Note: for convenience, we provide two versions of each function :
+ * - __objt_<type> : converts the pointer without any control of its
+ * value nor type.
+ * - objt_<type> : same as above except that if the pointer is NULL
+ * or points to a non-matching type, NULL is returned instead.
+ */
+
+static inline struct listener *__objt_listener(enum obj_type *t)
+{
+ return container_of(t, struct listener, obj_type);
+}
+
+static inline struct listener *objt_listener(enum obj_type *t)
+{
+ if (!t || *t != OBJ_TYPE_LISTENER)
+ return NULL;
+ return __objt_listener(t);
+}
+
+static inline struct proxy *__objt_proxy(enum obj_type *t)
+{
+ return container_of(t, struct proxy, obj_type);
+}
+
+static inline struct proxy *objt_proxy(enum obj_type *t)
+{
+ if (!t || *t != OBJ_TYPE_PROXY)
+ return NULL;
+ return __objt_proxy(t);
+}
+
+static inline struct server *__objt_server(enum obj_type *t)
+{
+ return container_of(t, struct server, obj_type);
+}
+
+static inline struct server *objt_server(enum obj_type *t)
+{
+ if (!t || *t != OBJ_TYPE_SERVER)
+ return NULL;
+ return __objt_server(t);
+}
+
+static inline struct applet *__objt_applet(enum obj_type *t)
+{
+ return container_of(t, struct applet, obj_type);
+}
+
+static inline struct applet *objt_applet(enum obj_type *t)
+{
+ if (!t || *t != OBJ_TYPE_APPLET)
+ return NULL;
+ return __objt_applet(t);
+}
+
+static inline struct appctx *__objt_appctx(enum obj_type *t)
+{
+ return container_of(t, struct appctx, obj_type);
+}
+
+static inline struct appctx *objt_appctx(enum obj_type *t)
+{
+ if (!t || *t != OBJ_TYPE_APPCTX)
+ return NULL;
+ return __objt_appctx(t);
+}
+
+static inline struct stconn *__objt_sc(enum obj_type *t)
+{
+ return (container_of(t, struct stconn, obj_type));
+}
+
+static inline struct stconn *objt_sc(enum obj_type *t)
+{
+ if (!t || *t != OBJ_TYPE_SC)
+ return NULL;
+ return __objt_sc(t);
+}
+
+static inline struct connection *__objt_conn(enum obj_type *t)
+{
+ return container_of(t, struct connection, obj_type);
+}
+
+static inline struct connection *objt_conn(enum obj_type *t)
+{
+ if (!t || *t != OBJ_TYPE_CONN)
+ return NULL;
+ return __objt_conn(t);
+}
+
+static inline struct resolv_srvrq *__objt_resolv_srvrq(enum obj_type *t)
+{
+ return container_of(t, struct resolv_srvrq, obj_type);
+}
+
+static inline struct resolv_srvrq *objt_resolv_srvrq(enum obj_type *t)
+{
+ if (!t || *t != OBJ_TYPE_SRVRQ)
+ return NULL;
+ return __objt_resolv_srvrq(t);
+}
+
+static inline struct stream *__objt_stream(enum obj_type *t)
+{
+ return container_of(t, struct stream, obj_type);
+}
+
+static inline struct stream *objt_stream(enum obj_type *t)
+{
+ if (!t || *t != OBJ_TYPE_STREAM)
+ return NULL;
+ return __objt_stream(t);
+}
+
+static inline struct check *__objt_check(enum obj_type *t)
+{
+ return container_of(t, struct check, obj_type);
+}
+
+static inline struct check *objt_check(enum obj_type *t)
+{
+ if (!t || *t != OBJ_TYPE_CHECK)
+ return NULL;
+ return __objt_check(t);
+}
+
+static inline void *obj_base_ptr(enum obj_type *t)
+{
+ switch (obj_type(t)) {
+ case OBJ_TYPE_NONE: return NULL;
+ case OBJ_TYPE_LISTENER: return __objt_listener(t);
+ case OBJ_TYPE_PROXY: return __objt_proxy(t);
+ case OBJ_TYPE_SERVER: return __objt_server(t);
+ case OBJ_TYPE_APPLET: return __objt_applet(t);
+ case OBJ_TYPE_APPCTX: return __objt_appctx(t);
+ case OBJ_TYPE_CONN: return __objt_conn(t);
+ case OBJ_TYPE_SRVRQ: return __objt_resolv_srvrq(t);
+ case OBJ_TYPE_SC: return __objt_sc(t);
+ case OBJ_TYPE_STREAM: return __objt_stream(t);
+ case OBJ_TYPE_CHECK: return __objt_check(t);
+ default: return t; // exact pointer for invalid case
+ }
+}
+
+#endif /* _HAPROXY_OBJ_TYPE_H */
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/include/haproxy/openssl-compat.h b/include/haproxy/openssl-compat.h
new file mode 100644
index 0000000..5639468
--- /dev/null
+++ b/include/haproxy/openssl-compat.h
@@ -0,0 +1,487 @@
+#ifndef _HAPROXY_OPENSSL_COMPAT_H
+#define _HAPROXY_OPENSSL_COMPAT_H
+#ifdef USE_OPENSSL
+
+#ifdef USE_OPENSSL_WOLFSSL
+#define TLSEXT_MAXLEN_host_name 255
+#include <wolfssl/options.h>
+#endif
+
+#ifdef USE_OPENSSL_AWSLC
+#include <openssl/base.h>
+#if !defined(OPENSSL_IS_AWSLC)
+#error "USE_OPENSSL_AWSLC is set but OPENSSL_IS_AWSLC is not defined, wrong header files detected"
+#endif
+#endif
+
+#include <openssl/bn.h>
+#include <openssl/crypto.h>
+#include <openssl/ssl.h>
+#include <openssl/x509.h>
+#include <openssl/x509v3.h>
+#include <openssl/err.h>
+#include <openssl/rand.h>
+#include <openssl/hmac.h>
+#include <openssl/rsa.h>
+#if (defined SSL_CTRL_SET_TLSEXT_STATUS_REQ_CB && !defined OPENSSL_NO_OCSP)
+#include <openssl/ocsp.h>
+#endif
+#ifndef OPENSSL_NO_DH
+#include <openssl/dh.h>
+#endif
+#if defined(USE_ENGINE) && !defined(OPENSSL_NO_ENGINE)
+#include <openssl/engine.h>
+#endif
+
+#ifdef SSL_MODE_ASYNC
+#include <openssl/async.h>
+#endif
+
+#if (OPENSSL_VERSION_NUMBER >= 0x3000000fL)
+#include <openssl/core_names.h>
+#include <openssl/decoder.h>
+#include <openssl/param_build.h>
+#include <openssl/provider.h>
+#endif
+
+#ifdef USE_QUIC_OPENSSL_COMPAT
+#include <haproxy/quic_openssl_compat.h>
+#endif
+
+#if defined(LIBRESSL_VERSION_NUMBER)
+/* LibreSSL is a fork of OpenSSL 1.0.1g but pretends to be 2.0.0, thus
+ * systematically breaking when some code is written for a specific version
+ * of OpenSSL. Let's make it appear like what it really is and deal with
+ * extra features with ORs and not with AND NOT.
+ */
+#define HA_OPENSSL_VERSION_NUMBER 0x1000107fL
+#else /* this is for a real OpenSSL or a truly compatible derivative */
+#define HA_OPENSSL_VERSION_NUMBER OPENSSL_VERSION_NUMBER
+#endif
+
+#ifndef OPENSSL_VERSION
+#define OPENSSL_VERSION SSLEAY_VERSION
+#define OpenSSL_version(x) SSLeay_version(x)
+#define OpenSSL_version_num SSLeay
+#endif
+
+#if (defined(LIBRESSL_VERSION_NUMBER) && LIBRESSL_VERSION_NUMBER >= 0x2070100fL) || defined(OPENSSL_IS_BORINGSSL) || (!defined(LIBRESSL_VERSION_NUMBER) && (OPENSSL_VERSION_NUMBER >= 0x10100000L))
+#define HAVE_SSL_EXTRACT_RANDOM
+#endif
+
+#if ((OPENSSL_VERSION_NUMBER >= 0x10101000L) && !defined(OPENSSL_IS_BORINGSSL) && !defined(LIBRESSL_VERSION_NUMBER))
+#define HAVE_SSL_RAND_KEEP_RANDOM_DEVICES_OPEN
+#endif
+
+#if ((OPENSSL_VERSION_NUMBER >= 0x10101000L) && !defined(LIBRESSL_VERSION_NUMBER) && !defined(OPENSSL_IS_BORINGSSL)) || defined(USE_OPENSSL_WOLFSSL)
+#define HAVE_SSL_CTX_SET_CIPHERSUITES
+#define HAVE_ASN1_TIME_TO_TM
+#endif
+
+#if (defined(SSL_CLIENT_HELLO_CB) || defined(OPENSSL_IS_BORINGSSL))
+#define HAVE_SSL_CLIENT_HELLO_CB
+#endif
+
+#if ((OPENSSL_VERSION_NUMBER >= 0x1000200fL) && !defined(OPENSSL_NO_TLSEXT) && !defined(LIBRESSL_VERSION_NUMBER) && !defined(OPENSSL_IS_BORINGSSL))
+#define HAVE_SSL_CTX_ADD_SERVER_CUSTOM_EXT
+#endif
+
+#if ((OPENSSL_VERSION_NUMBER >= 0x10002000L) && !defined(LIBRESSL_VERSION_NUMBER))
+#define HAVE_SSL_CTX_get0_privatekey
+#endif
+
+#if HA_OPENSSL_VERSION_NUMBER >= 0x1000104fL || defined(USE_OPENSSL_WOLFSSL) || defined(USE_OPENSSL_AWSLC)
+/* CRYPTO_memcmp() is present since openssl 1.0.1d */
+#define HAVE_CRYPTO_memcmp
+#endif
+
+#if (defined(SN_ct_cert_scts) && !defined(OPENSSL_NO_TLSEXT))
+#define HAVE_SSL_SCTL
+#endif
+
+#if (HA_OPENSSL_VERSION_NUMBER >= 0x10101000L) || defined(USE_OPENSSL_AWSLC) || (defined(USE_OPENSSL_WOLFSSL) && defined(HAVE_SECRET_CALLBACK))
+#define HAVE_SSL_KEYLOG
+#endif
+
+/* minimum OpenSSL 1.1.1 & libreSSL 3.3.6 */
+#if (defined(LIBRESSL_VERSION_NUMBER) && (LIBRESSL_VERSION_NUMBER >= 0x3030600L)) || (HA_OPENSSL_VERSION_NUMBER >= 0x10101000L) || defined(USE_OPENSSL_WOLFSSL)
+#define HAVE_SSL_get0_verified_chain
+#endif
+
+
+#if (HA_OPENSSL_VERSION_NUMBER >= 0x3000000fL)
+#define HAVE_OSSL_PARAM
+#define MAC_CTX EVP_MAC_CTX
+#define HASSL_DH EVP_PKEY
+#define HASSL_DH_free EVP_PKEY_free
+#define HASSL_DH_up_ref EVP_PKEY_up_ref
+
+#define HAVE_SSL_PROVIDERS
+
+#else /* HA_OPENSSL_VERSION_NUMBER >= 0x3000000fL */
+#define MAC_CTX HMAC_CTX
+#define HASSL_DH DH
+#define HASSL_DH_free DH_free
+#define HASSL_DH_up_ref DH_up_ref
+#endif
+
+#if ((HA_OPENSSL_VERSION_NUMBER < 0x1000000fL) && !defined(X509_get_X509_PUBKEY))
+#define X509_get_X509_PUBKEY(x) ((x)->cert_info->key)
+#endif
+
+
+#if (HA_OPENSSL_VERSION_NUMBER < 0x1000100fL)
+/*
+ * Functions introduced in OpenSSL 1.0.1
+ */
+static inline int SSL_SESSION_set1_id_context(SSL_SESSION *s, const unsigned char *sid_ctx, unsigned int sid_ctx_len)
+{
+ s->sid_ctx_length = sid_ctx_len;
+ memcpy(s->sid_ctx, sid_ctx, sid_ctx_len);
+ return 1;
+}
+#endif
+
+
+#if (HA_OPENSSL_VERSION_NUMBER < 0x1000200fL) && (!defined(LIBRESSL_VERSION_NUMBER) || LIBRESSL_VERSION_NUMBER < 0x2070500fL)
+/* introduced in openssl 1.0.2 */
+
+static inline STACK_OF(X509) *X509_chain_up_ref(STACK_OF(X509) *chain)
+{
+ STACK_OF(X509) *ret;
+ int i;
+
+ if ((ret = sk_X509_dup(chain)) == NULL)
+ return NULL;
+ for (i = 0; i < sk_X509_num(ret); i++) {
+ X509 *x = sk_X509_value(ret, i);
+ CRYPTO_add(&x->references, 1, CRYPTO_LOCK_X509);
+ }
+ return ret;
+}
+
+#endif
+
+#ifdef OPENSSL_IS_BORINGSSL
+/*
+ * Functions missing in BoringSSL
+ */
+
+static inline X509_CRL *X509_OBJECT_get0_X509_CRL(const X509_OBJECT *a)
+{
+ if (a == NULL || a->type != X509_LU_CRL) {
+ return NULL;
+ }
+ return a->data.crl;
+}
+#endif
+
+#if (HA_OPENSSL_VERSION_NUMBER < 0x1010000fL) && (!defined(LIBRESSL_VERSION_NUMBER) || LIBRESSL_VERSION_NUMBER < 0x2070000fL)
+/*
+ * Functions introduced in OpenSSL 1.1.0 and in LibreSSL 2.7.0
+ */
+
+static inline STACK_OF(X509_OBJECT) *X509_STORE_get0_objects(X509_STORE *st)
+{
+ return st->objs;
+}
+
+static inline int X509_OBJECT_get_type(const X509_OBJECT *a)
+{
+ return a->type;
+}
+
+static inline X509 *X509_OBJECT_get0_X509(const X509_OBJECT *a)
+{
+ if (a == NULL || a->type != X509_LU_X509) {
+ return NULL;
+ }
+ return a->data.x509;
+}
+
+static inline X509_CRL *X509_OBJECT_get0_X509_CRL(const X509_OBJECT *a)
+{
+ if (a == NULL || a->type != X509_LU_CRL) {
+ return NULL;
+ }
+ return a->data.crl;
+}
+
+static inline int SSL_SESSION_set1_id(SSL_SESSION *s, const unsigned char *sid, unsigned int sid_len)
+{
+ s->session_id_length = sid_len;
+ memcpy(s->session_id, sid, sid_len);
+ return 1;
+}
+
+static inline X509_ALGOR *X509_get0_tbs_sigalg(const X509 *x)
+{
+ return x->cert_info->signature;
+}
+
+#if (!defined OPENSSL_NO_OCSP)
+static inline const OCSP_CERTID *OCSP_SINGLERESP_get0_id(const OCSP_SINGLERESP *single)
+{
+ return single->certId;
+}
+#endif
+
+#ifndef OPENSSL_NO_DH
+static inline int DH_set0_pqg(DH *dh, BIGNUM *p, BIGNUM *q, BIGNUM *g)
+{
+ /* Implements only the bare necessities for HAProxy */
+ dh->p = p;
+ dh->g = g;
+ return 1;
+}
+#endif
+
+static inline const unsigned char *ASN1_STRING_get0_data(const ASN1_STRING *x)
+{
+ return x->data;
+}
+
+static inline void X509_up_ref(X509 *x)
+{
+ CRYPTO_add(&x->references, 1, CRYPTO_LOCK_X509);
+}
+
+static inline void EVP_PKEY_up_ref(EVP_PKEY *pkey)
+{
+ CRYPTO_add(&pkey->references, 1, CRYPTO_LOCK_EVP_PKEY);
+}
+
+static inline void SSL_CTX_up_ref(SSL_CTX *ctx)
+{
+ CRYPTO_add(&ctx->references, 1, CRYPTO_LOCK_SSL_CTX);
+}
+
+static inline int X509_CRL_get_signature_nid(const X509_CRL *crl)
+{
+ return OBJ_obj2nid(crl->sig_alg->algorithm);
+}
+
+static inline const ASN1_TIME *X509_CRL_get0_lastUpdate(const X509_CRL *crl)
+{
+ return X509_CRL_get_lastUpdate(crl);
+}
+
+static inline const ASN1_TIME *X509_CRL_get0_nextUpdate(const X509_CRL *crl)
+{
+ return X509_CRL_get_nextUpdate(crl);
+}
+
+static inline const ASN1_INTEGER *X509_REVOKED_get0_serialNumber(const X509_REVOKED *x)
+{
+ return x->serialNumber;
+}
+
+static inline const ASN1_TIME *X509_REVOKED_get0_revocationDate(const X509_REVOKED *x)
+{
+ return x->revocationDate;
+}
+
+static inline X509 *X509_STORE_CTX_get0_cert(X509_STORE_CTX *ctx)
+{
+ return ctx->cert;
+}
+
+static inline int ECDSA_SIG_set0(ECDSA_SIG *sig, BIGNUM *r, BIGNUM *s)
+{
+ if (r == NULL || s == NULL)
+ return 0;
+ BN_clear_free(sig->r);
+ BN_clear_free(sig->s);
+
+ sig->r = r;
+ sig->s = s;
+ return 1;
+}
+
+#endif
+
+#if (HA_OPENSSL_VERSION_NUMBER < 0x3000000fL)
+#if defined(SSL_CTRL_SET_TLSEXT_TICKET_KEY_CB)
+#define SSL_CTX_set_tlsext_ticket_key_evp_cb SSL_CTX_set_tlsext_ticket_key_cb
+#endif
+
+/*
+ * Functions introduced in OpenSSL 3.0.0
+ */
+static inline unsigned long ERR_peek_error_func(const char **func)
+{
+ unsigned long ret = ERR_peek_error();
+ if (ret == 0)
+ return ret;
+
+ if (func)
+ *func = ERR_func_error_string(ret);
+
+ return ret;
+}
+
+#endif
+
+#if (HA_OPENSSL_VERSION_NUMBER >= 0x1010000fL) || (defined(LIBRESSL_VERSION_NUMBER) && LIBRESSL_VERSION_NUMBER >= 0x2070200fL)
+#define __OPENSSL_110_CONST__ const
+#else
+#define __OPENSSL_110_CONST__
+#endif
+
+/* ERR_remove_state() was deprecated in 1.0.0 in favor of
+ * ERR_remove_thread_state(), which was in turn deprecated in
+ * 1.1.0 and does nothing anymore. Let's simply silently kill
+ * it.
+ */
+#if (HA_OPENSSL_VERSION_NUMBER >= 0x1010000fL)
+#undef ERR_remove_state
+#define ERR_remove_state(x)
+#endif
+
+
+/* RAND_pseudo_bytes() is deprecated in 1.1.0 in favor of RAND_bytes(). Note
+ * that the return codes differ, but it happens that the only use case (ticket
+ * key update) was already wrong, considering a non-cryptographic random as a
+ * failure.
+ */
+#if (HA_OPENSSL_VERSION_NUMBER >= 0x1010000fL)
+#undef RAND_pseudo_bytes
+#define RAND_pseudo_bytes(x,y) RAND_bytes(x,y)
+#endif
+
+
+/* Signature from RFC 5246, missing in openssl < 1.0.1 */
+#ifndef TLSEXT_signature_anonymous
+#define TLSEXT_signature_anonymous 0
+#define TLSEXT_signature_rsa 1
+#define TLSEXT_signature_dsa 2
+#define TLSEXT_signature_ecdsa 3
+#endif
+
+#if ((HA_OPENSSL_VERSION_NUMBER < 0x1010000fL) && (!defined(LIBRESSL_VERSION_NUMBER) || LIBRESSL_VERSION_NUMBER < 0x2070000fL)) ||\
+ defined(OPENSSL_IS_BORINGSSL)
+#define X509_getm_notBefore X509_get_notBefore
+#define X509_getm_notAfter X509_get_notAfter
+#endif
+
+#if !defined(EVP_CTRL_AEAD_SET_IVLEN)
+#define EVP_CTRL_AEAD_SET_IVLEN EVP_CTRL_GCM_SET_IVLEN
+#endif
+
+#if !defined(EVP_CTRL_AEAD_SET_TAG)
+#define EVP_CTRL_AEAD_SET_TAG EVP_CTRL_GCM_SET_TAG
+#endif
+
+/* Supported hash function for TLS tickets */
+#ifdef OPENSSL_NO_SHA256
+#define TLS_TICKET_HASH_FUNCT EVP_sha1
+#else
+#define TLS_TICKET_HASH_FUNCT EVP_sha256
+#endif /* OPENSSL_NO_SHA256 */
+
+#ifndef SSL_OP_CIPHER_SERVER_PREFERENCE /* needs OpenSSL >= 0.9.7 */
+#define SSL_OP_CIPHER_SERVER_PREFERENCE 0
+#endif
+
+#ifndef SSL_OP_NO_SESSION_RESUMPTION_ON_RENEGOTIATION /* needs OpenSSL >= 0.9.7 */
+#define SSL_OP_NO_SESSION_RESUMPTION_ON_RENEGOTIATION 0
+#define SSL_renegotiate_pending(arg) 0
+#endif
+
+#ifndef SSL_OP_SINGLE_ECDH_USE /* needs OpenSSL >= 0.9.8 */
+#define SSL_OP_SINGLE_ECDH_USE 0
+#endif
+
+#ifndef SSL_OP_NO_TICKET /* needs OpenSSL >= 0.9.8 */
+#define SSL_OP_NO_TICKET 0
+#endif
+
+#ifndef SSL_OP_NO_COMPRESSION /* needs OpenSSL >= 0.9.9 */
+#define SSL_OP_NO_COMPRESSION 0
+#endif
+
+#ifdef OPENSSL_NO_SSL3 /* SSLv3 support removed */
+#undef SSL_OP_NO_SSLv3
+#define SSL_OP_NO_SSLv3 0
+#endif
+
+#ifndef SSL_OP_NO_TLSv1_1 /* needs OpenSSL >= 1.0.1 */
+#define SSL_OP_NO_TLSv1_1 0
+#endif
+
+#ifndef SSL_OP_NO_TLSv1_2 /* needs OpenSSL >= 1.0.1 */
+#define SSL_OP_NO_TLSv1_2 0
+#endif
+
+#ifndef SSL_OP_NO_TLSv1_3 /* needs OpenSSL >= 1.1.1 */
+#define SSL_OP_NO_TLSv1_3 0
+#endif
+
+#ifndef SSL_OP_SINGLE_DH_USE /* needs OpenSSL >= 0.9.6 */
+#define SSL_OP_SINGLE_DH_USE 0
+#endif
+
+#ifndef SSL_OP_SINGLE_ECDH_USE /* needs OpenSSL >= 1.0.0 */
+#define SSL_OP_SINGLE_ECDH_USE 0
+#endif
+
+#ifndef SSL_MODE_RELEASE_BUFFERS /* needs OpenSSL >= 1.0.0 */
+#define SSL_MODE_RELEASE_BUFFERS 0
+#endif
+
+#ifndef SSL_MODE_SMALL_BUFFERS /* needs small_records.patch */
+#define SSL_MODE_SMALL_BUFFERS 0
+#endif
+
+#ifndef SSL_OP_PRIORITIZE_CHACHA /* needs OpenSSL >= 1.1.1 */
+#define SSL_OP_PRIORITIZE_CHACHA 0
+#endif
+
+#ifndef SSL_CTRL_GET_EXTRA_CHAIN_CERTS
+#define SSL_CTX_get_extra_chain_certs(ctx, chain) do { *(chain) = (ctx)->extra_certs; } while (0)
+#endif
+
+#if HA_OPENSSL_VERSION_NUMBER < 0x10100000L && (!defined(LIBRESSL_VERSION_NUMBER) || LIBRESSL_VERSION_NUMBER < 0x2070000fL)
+#define BIO_get_data(b) (b)->ptr
+#define BIO_set_data(b, v) do { (b)->ptr = (v); } while (0)
+#define BIO_set_init(b, v) do { (b)->init = (v); } while (0)
+
+#define BIO_meth_free(m) free(m)
+#define BIO_meth_new(type, name) calloc(1, sizeof(BIO_METHOD))
+#define BIO_meth_set_gets(m, f) do { (m)->bgets = (f); } while (0)
+#define BIO_meth_set_puts(m, f) do { (m)->bputs = (f); } while (0)
+#define BIO_meth_set_read(m, f) do { (m)->bread = (f); } while (0)
+#define BIO_meth_set_write(m, f) do { (m)->bwrite = (f); } while (0)
+#define BIO_meth_set_create(m, f) do { (m)->create = (f); } while (0)
+#define BIO_meth_set_ctrl(m, f) do { (m)->ctrl = (f); } while (0)
+#define BIO_meth_set_destroy(m, f) do { (m)->destroy = (f); } while (0)
+#endif
+
+#ifndef SSL_CTX_set_ecdh_auto
+#define SSL_CTX_set_ecdh_auto(dummy, onoff) ((onoff) != 0)
+#endif
+
+/* The EVP_MD_CTX_create() and EVP_MD_CTX_destroy() functions were renamed to
+ * EVP_MD_CTX_new() and EVP_MD_CTX_free() in OpenSSL 1.1.0, respectively.
+ */
+#if (HA_OPENSSL_VERSION_NUMBER < 0x1010000fL)
+#define EVP_MD_CTX_new EVP_MD_CTX_create
+#define EVP_MD_CTX_free EVP_MD_CTX_destroy
+#endif
+
+/* OpenSSL 1.0.2 and onwards define SSL_CTX_set1_curves_list which is both a
+ * function and a macro. OpenSSL 1.0.2 to 1.1.0 define SSL_CTRL_SET_CURVES_LIST
+ * as a macro, which disappeared from 1.1.1. BoringSSL only has that one and
+ * not the former macro but it does have the function. Let's keep the test on
+ * the macro matching the function name.
+ */
+#if !defined(SSL_CTX_set1_curves_list) && defined(SSL_CTRL_SET_CURVES_LIST)
+#define SSL_CTX_set1_curves_list SSL_CTX_set1_curves_list
+#endif
+
+#if !defined(SSL_CTX_set1_sigalgs_list) && defined(SSL_CTRL_SET_SIGALGS_LIST)
+#define SSL_CTX_set1_sigalgs_list SSL_CTX_set1_sigalgs_list
+#endif
+
+#endif /* USE_OPENSSL */
+#endif /* _HAPROXY_OPENSSL_COMPAT_H */
diff --git a/include/haproxy/pattern-t.h b/include/haproxy/pattern-t.h
new file mode 100644
index 0000000..6c1ba24
--- /dev/null
+++ b/include/haproxy/pattern-t.h
@@ -0,0 +1,235 @@
+/*
+ * include/haproxy/pattern-t.h
+ * This file provides structures and types for ACLs.
+ *
+ * Copyright (C) 2000-2012 Willy Tarreau - w@1wt.eu
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_PATTERN_T_H
+#define _HAPROXY_PATTERN_T_H
+
+#include <import/ebtree-t.h>
+
+#include <haproxy/api-t.h>
+#include <haproxy/regex-t.h>
+#include <haproxy/sample_data-t.h>
+#include <haproxy/thread-t.h>
+
+
+/* Pattern matching function result.
+ *
+ * We're using a 3-state matching system to match samples against patterns in
+ * ACLs :
+ * - PASS : at least one pattern already matches
+ * - MISS : some data is missing to decide if some rules may finally match.
+ * - FAIL : no pattern may ever match
+ *
+ * We assign values 0, 1 and 3 to FAIL, MISS and PASS respectively, so that we
+ * can make use of standard arithmetic for the truth tables below :
+ *
+ * x | !x x&y | F(0) | M(1) | P(3) x|y | F(0) | M(1) | P(3)
+ * ------+----- -----+------+------+----- -----+------+------+-----
+ * F(0) | P(3) F(0)| F(0) | F(0) | F(0) F(0)| F(0) | M(1) | P(3)
+ * M(1) | M(1) M(1)| F(0) | M(1) | M(1) M(1)| M(1) | M(1) | P(3)
+ * P(3) | F(0) P(3)| F(0) | M(1) | P(3) P(3)| P(3) | P(3) | P(3)
+ *
+ * neg(x) = (3 >> x) and(x,y) = (x & y) or(x,y) = (x | y)
+ *
+ * For efficiency, the ACL return flags are directly mapped from the pattern
+ * match flags. A pattern can't return "MISS" since it's always presented an
+ * existing sample. So that leaves us with only two possible values :
+ * MATCH = 0
+ * NOMATCH = 3
+ */
+enum pat_match_res {
+ PAT_NOMATCH = 0, /* sample didn't match any pattern */
+ PAT_MATCH = 3, /* sample matched at least one pattern */
+};
+
+/* possible flags for patterns matching or parsing */
+enum {
+ PAT_MF_IGNORE_CASE = 1 << 0, /* ignore case */
+ PAT_MF_NO_DNS = 1 << 1, /* don't perform any DNS requests */
+};
+
+/* possible flags for patterns storage */
+enum {
+ PAT_SF_TREE = 1 << 0, /* some patterns are arranged in a tree */
+ PAT_SF_REGFREE = 1 << 1, /* run regex_free() on the pointer */
+};
+
+/* ACL match methods */
+enum {
+ PAT_MATCH_FOUND, /* just ensure that fetch found the sample */
+ PAT_MATCH_BOOL, /* match fetch's integer value as boolean */
+ PAT_MATCH_INT, /* unsigned integer (int) */
+ PAT_MATCH_IP, /* IPv4/IPv6 address (IP) */
+ PAT_MATCH_BIN, /* hex string (bin) */
+ PAT_MATCH_LEN, /* string length (str -> int) */
+ PAT_MATCH_STR, /* exact string match (str) */
+ PAT_MATCH_BEG, /* beginning of string (str) */
+ PAT_MATCH_SUB, /* substring (str) */
+ PAT_MATCH_DIR, /* directory-like sub-string (str) */
+ PAT_MATCH_DOM, /* domain-like sub-string (str) */
+ PAT_MATCH_END, /* end of string (str) */
+ PAT_MATCH_REG, /* regex (str -> reg) */
+ PAT_MATCH_REGM, /* regex (str -> reg) with match zones */
+ /* keep this one last */
+ PAT_MATCH_NUM
+};
+
+#define PAT_REF_MAP 0x1 /* Set if the reference is used by at least one map. */
+#define PAT_REF_ACL 0x2 /* Set if the reference is used by at least one acl. */
+#define PAT_REF_SMP 0x4 /* Flag used if the reference contains a sample. */
+
+/* This struct contain a list of reference strings for dunamically
+ * updatable patterns.
+ */
+struct pat_ref {
+ struct list list; /* Used to chain refs. */
+ char *reference; /* The reference name. */
+ char *display; /* String displayed to identify the pattern origin. */
+ struct list head; /* The head of the list of struct pat_ref_elt. */
+ struct eb_root ebmb_root; /* The tree where pattern reference elements are attached. */
+ struct list pat; /* The head of the list of struct pattern_expr. */
+ unsigned int flags; /* flags PAT_REF_*. */
+ unsigned int curr_gen; /* current generation number (anything below can be removed) */
+ unsigned int next_gen; /* next generation number (insertions use this one) */
+ int unique_id; /* Each pattern reference have unique id. */
+ unsigned long long revision; /* updated for each update */
+ unsigned long long entry_cnt; /* the total number of entries */
+ THREAD_ALIGN(64);
+ __decl_thread(HA_RWLOCK_T lock); /* Lock used to protect pat ref elements */
+};
+
+/* This is a part of struct pat_ref. Each entry contains one pattern and one
+ * associated value as original string. All derivative forms (via exprs) are
+ * accessed from list_head or tree_head. Be careful, it's variable-sized!
+ */
+struct pat_ref_elt {
+ struct list list; /* Used to chain elements. */
+ struct list back_refs; /* list of users tracking this pat ref */
+ void *list_head; /* all &pattern_list->from_ref derived from this reference, ends with NULL */
+ void *tree_head; /* all &pattern_tree->from_ref derived from this reference, ends with NULL */
+ char *sample;
+ unsigned int gen_id; /* generation of pat_ref this was made for */
+ int line;
+ struct ebmb_node node; /* Node to attach this element to its <pat_ref> ebtree. */
+ const char pattern[0]; // const only to make sure nobody tries to free it.
+};
+
+/* This contain each tree indexed entry. This struct permit to associate
+ * "sample" with a tree entry. It is used with maps.
+ */
+struct pattern_tree {
+ void *from_ref; // pattern_tree linked from pat_ref_elt, ends with NULL
+ struct sample_data *data;
+ struct pat_ref_elt *ref;
+ struct pattern_expr *expr;
+ struct ebmb_node node;
+};
+
+/* This describes one ACL pattern, which might be a single value or a tree of
+ * values. All patterns for a single ACL expression are linked together. Some
+ * of them might have a type (eg: IP). Right now, the types are shared with
+ * the samples, though it is possible that in the future this will change to
+ * accommodate for other types (eg: meth, regex). Unsigned and constant types
+ * are preferred when there is a doubt.
+ */
+struct pattern {
+ int type; /* type of the ACL pattern (SMP_T_*) */
+ union {
+ int i; /* integer value */
+ struct {
+ signed long long min, max;
+ unsigned int min_set:1;
+ unsigned int max_set:1;
+ } range; /* integer range */
+ struct {
+ struct in_addr addr;
+ struct in_addr mask;
+ } ipv4; /* IPv4 address */
+ struct {
+ struct in6_addr addr;
+ unsigned char mask; /* number of bits */
+ } ipv6; /* IPv6 address/mask */
+ } val; /* direct value */
+ union {
+ void *ptr; /* any data */
+ char *str; /* any string */
+ struct my_regex *reg; /* a compiled regex */
+ } ptr; /* indirect values, allocated or NULL */
+ int len; /* data length when required */
+ int sflags; /* flags relative to the storage method. */
+ struct sample_data *data; /* used to store a pointer to sample value associated
+ with the match. It is used with maps */
+ struct pat_ref_elt *ref;
+};
+
+/* This struct is just used for chaining patterns */
+struct pattern_list {
+ void *from_ref; // pattern_tree linked from pat_ref_elt, ends with NULL
+ struct list list;
+ struct pattern pat;
+ struct pattern_expr *expr;
+};
+
+/* Description of a pattern expression.
+ * It contains pointers to the parse and match functions, and a list or tree of
+ * patterns to test against. The structure is organized so that the hot parts
+ * are grouped together in order to optimize caching.
+ */
+struct pattern_expr {
+ struct list list; /* Used for chaining pattern_expr in pat_ref. */
+ struct pat_ref *ref; /* The pattern reference if exists. */
+ struct pattern_head *pat_head; /* Point to the pattern_head that contain manipulation functions.
+ * Note that this link point on compatible head but not on the real
+ * head. You can use only the function, and you must not use the
+ * "head". Don't write "(struct pattern_expr *)any->pat_head->expr".
+ */
+ struct list patterns; /* list of acl_patterns */
+ struct eb_root pattern_tree; /* may be used for lookup in large datasets */
+ struct eb_root pattern_tree_2; /* may be used for different types */
+ int mflags; /* flags relative to the parsing or matching method. */
+ __decl_thread(HA_RWLOCK_T lock); /* lock used to protect patterns */
+};
+
+/* This is a list of expression. A struct pattern_expr can be used by
+ * more than one "struct pattern_head". this intermediate struct
+ * permit more than one list.
+ */
+struct pattern_expr_list {
+ struct list list; /* Used for chaining pattern_expr in pattern_head. */
+ int do_free;
+ struct pattern_expr *expr; /* The used expr. */
+};
+
+
+/* This struct contains a list of pattern expr */
+struct sample;
+struct pattern_head {
+ int (*parse)(const char *text, struct pattern *pattern, int flags, char **err);
+ int (*parse_smp)(const char *text, struct sample_data *data);
+ int (*index)(struct pattern_expr *, struct pattern *, char **);
+ void (*prune)(struct pattern_expr *);
+ struct pattern *(*match)(struct sample *, struct pattern_expr *, int);
+ int expect_type; /* type of the expected sample (SMP_T_*) */
+
+ struct list head; /* This is a list of struct pattern_expr_list. */
+};
+
+#endif /* _HAPROXY_PATTERN_T_H */
diff --git a/include/haproxy/pattern.h b/include/haproxy/pattern.h
new file mode 100644
index 0000000..49e5ad2
--- /dev/null
+++ b/include/haproxy/pattern.h
@@ -0,0 +1,273 @@
+/*
+ * include/haproxy/pattern.h
+ * This file provides structures and types for pattern matching.
+ *
+ * Copyright (C) 2000-2013 Willy Tarreau - w@1wt.eu
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_PATTERN_H
+#define _HAPROXY_PATTERN_H
+
+#include <string.h>
+
+#include <haproxy/api.h>
+#include <haproxy/pattern-t.h>
+#include <haproxy/sample-t.h>
+
+/* pattern management function arrays */
+extern const char *const pat_match_names[PAT_MATCH_NUM];
+extern int const pat_match_types[PAT_MATCH_NUM];
+
+extern int (*const pat_parse_fcts[PAT_MATCH_NUM])(const char *, struct pattern *, int, char **);
+extern int (*const pat_index_fcts[PAT_MATCH_NUM])(struct pattern_expr *, struct pattern *, char **);
+extern void (*const pat_prune_fcts[PAT_MATCH_NUM])(struct pattern_expr *);
+extern struct pattern *(*const pat_match_fcts[PAT_MATCH_NUM])(struct sample *, struct pattern_expr *, int);
+
+/* This is the root of the list of all pattern_ref avalaibles. */
+extern struct list pattern_reference;
+
+int pattern_finalize_config(void);
+
+/* return the PAT_MATCH_* index for match name "name", or < 0 if not found */
+static inline int pat_find_match_name(const char *name)
+{
+ int i;
+
+ for (i = 0; i < PAT_MATCH_NUM; i++)
+ if (strcmp(name, pat_match_names[i]) == 0)
+ return i;
+ return -1;
+}
+
+/* This function executes a pattern match on a sample. It applies pattern <expr>
+ * to sample <smp>. The function returns NULL if the sample don't match. It returns
+ * non-null if the sample match. If <fill> is true and the sample match, the
+ * function returns the matched pattern. In many cases, this pattern can be a
+ * static buffer.
+ */
+struct pattern *pattern_exec_match(struct pattern_head *head, struct sample *smp, int fill);
+
+/*
+ *
+ * The following function gets "pattern", duplicate it and index it in "expr"
+ *
+ */
+int pat_idx_list_val(struct pattern_expr *expr, struct pattern *pat, char **err);
+int pat_idx_list_ptr(struct pattern_expr *expr, struct pattern *pat, char **err);
+int pat_idx_list_str(struct pattern_expr *expr, struct pattern *pat, char **err);
+int pat_idx_list_reg(struct pattern_expr *expr, struct pattern *pat, char **err);
+int pat_idx_list_regm(struct pattern_expr *expr, struct pattern *pat, char **err);
+int pat_idx_tree_ip(struct pattern_expr *expr, struct pattern *pat, char **err);
+int pat_idx_tree_str(struct pattern_expr *expr, struct pattern *pat, char **err);
+int pat_idx_tree_pfx(struct pattern_expr *expr, struct pattern *pat, char **err);
+
+/*
+ *
+ * The following function deletes all patterns related to reference pattern
+ * element <elt> in pattern reference <ref>.
+ *
+ */
+void pat_delete_gen(struct pat_ref *ref, struct pat_ref_elt *elt);
+
+/*
+ *
+ * The following functions clean all entries of a pattern expression and
+ * reset the tree and list root.
+ *
+ */
+void pat_prune_gen(struct pattern_expr *expr);
+
+/*
+ *
+ * The following functions are general purpose pattern matching functions.
+ *
+ */
+
+
+/* ignore the current line */
+int pat_parse_nothing(const char *text, struct pattern *pattern, int mflags, char **err);
+
+/* Parse an integer. It is put both in min and max. */
+int pat_parse_int(const char *text, struct pattern *pattern, int mflags, char **err);
+
+/* Parse an version. It is put both in min and max. */
+int pat_parse_dotted_ver(const char *text, struct pattern *pattern, int mflags, char **err);
+
+/* Parse a range of integers delimited by either ':' or '-'. If only one
+ * integer is read, it is set as both min and max.
+ */
+int pat_parse_range(const char *text, struct pattern *pattern, int mflags, char **err);
+
+/* Parse a string. It is allocated and duplicated. */
+int pat_parse_str(const char *text, struct pattern *pattern, int mflags, char **err);
+
+/* Parse a hexa binary definition. It is allocated and duplicated. */
+int pat_parse_bin(const char *text, struct pattern *pattern, int mflags, char **err);
+
+/* Parse a regex. It is allocated. */
+int pat_parse_reg(const char *text, struct pattern *pattern, int mflags, char **err);
+
+/* Parse an IP address and an optional mask in the form addr[/mask].
+ * The addr may either be an IPv4 or IPv6 address, or a hostname that resolves
+ * to a valid IPv4 address. The mask can be provided as a number of bits, or
+ * even as a dotted mask (but the latter only works for IPv4 addresses).
+ * Returns 1 if OK, otherwise 0.
+ */
+int pat_parse_ip(const char *text, struct pattern *pattern, int mflags, char **err);
+
+/* NB: For two strings to be identical, it is required that their lengths match */
+struct pattern *pat_match_str(struct sample *smp, struct pattern_expr *expr, int fill);
+
+/* NB: For two binary buffers to be identical, it is required that their lengths match */
+struct pattern *pat_match_bin(struct sample *smp, struct pattern_expr *expr, int fill);
+
+/* Checks that the length of the pattern in <test> is included between min and max */
+struct pattern *pat_match_len(struct sample *smp, struct pattern_expr *expr, int fill);
+
+/* Checks that the integer in <test> is included between min and max */
+struct pattern *pat_match_int(struct sample *smp, struct pattern_expr *expr, int fill);
+
+/* always return false */
+struct pattern *pat_match_nothing(struct sample *smp, struct pattern_expr *expr, int fill);
+
+/* Checks that the pattern matches the end of the tested string. */
+struct pattern *pat_match_end(struct sample *smp, struct pattern_expr *expr, int fill);
+
+/* Checks that the pattern matches the beginning of the tested string. */
+struct pattern *pat_match_beg(struct sample *smp, struct pattern_expr *expr, int fill);
+
+/* Checks that the pattern is included inside the tested string. */
+struct pattern *pat_match_sub(struct sample *smp, struct pattern_expr *expr, int fill);
+
+/* Checks that the pattern is included inside the tested string, but enclosed
+ * between slashes or at the beginning or end of the string. Slashes at the
+ * beginning or end of the pattern are ignored.
+ */
+struct pattern *pat_match_dir(struct sample *smp, struct pattern_expr *expr, int fill);
+
+/* Checks that the pattern is included inside the tested string, but enclosed
+ * between dots or at the beginning or end of the string. Dots at the beginning
+ * or end of the pattern are ignored.
+ */
+struct pattern *pat_match_dom(struct sample *smp, struct pattern_expr *expr, int fill);
+
+/* Check that the input IP address (IPv4 or IPv6) in <smp> matches the IP/mask
+ * in pattern
+ */
+struct pattern *pat_match_ip(struct sample *smp, struct pattern_expr *expr, int fill);
+
+/* Executes a regex. It temporarily changes the data to add a trailing zero,
+ * and restores the previous character when leaving.
+ */
+struct pattern *pat_match_reg(struct sample *smp, struct pattern_expr *expr, int fill);
+struct pattern *pat_match_regm(struct sample *smp, struct pattern_expr *expr, int fill);
+
+/*
+ * pattern_ref manipulation.
+ */
+struct pat_ref *pat_ref_lookup(const char *reference);
+struct pat_ref *pat_ref_lookupid(int unique_id);
+struct pat_ref *pat_ref_new(const char *reference, const char *display, unsigned int flags);
+struct pat_ref *pat_ref_newid(int unique_id, const char *display, unsigned int flags);
+struct pat_ref_elt *pat_ref_find_elt(struct pat_ref *ref, const char *key);
+struct pat_ref_elt *pat_ref_append(struct pat_ref *ref, const char *pattern, const char *sample, int line);
+struct pat_ref_elt *pat_ref_load(struct pat_ref *ref, unsigned int gen, const char *pattern, const char *sample, int line, char **err);
+int pat_ref_push(struct pat_ref_elt *elt, struct pattern_expr *expr, int patflags, char **err);
+int pat_ref_add(struct pat_ref *ref, const char *pattern, const char *sample, char **err);
+int pat_ref_set(struct pat_ref *ref, const char *pattern, const char *sample, char **err, struct pat_ref_elt *elt);
+int pat_ref_set_by_id(struct pat_ref *ref, struct pat_ref_elt *refelt, const char *value, char **err);
+int pat_ref_delete(struct pat_ref *ref, const char *key);
+void pat_ref_delete_by_ptr(struct pat_ref *ref, struct pat_ref_elt *elt);
+int pat_ref_delete_by_id(struct pat_ref *ref, struct pat_ref_elt *refelt);
+int pat_ref_prune(struct pat_ref *ref);
+int pat_ref_commit_elt(struct pat_ref *ref, struct pat_ref_elt *elt, char **err);
+int pat_ref_purge_range(struct pat_ref *ref, uint from, uint to, int budget);
+
+/* Create a new generation number for next pattern updates and returns it. This
+ * must be used to atomically insert new patterns that will atomically replace
+ * all current ones on commit. Generation numbers start at zero and are only
+ * incremented and wrap at 2^32. There must not be more than 2^31-1 called
+ * without a commit. The new reserved number is returned. Locking is not
+ * necessary.
+ */
+static inline unsigned int pat_ref_newgen(struct pat_ref *ref)
+{
+ return HA_ATOMIC_ADD_FETCH(&ref->next_gen, 1);
+}
+
+/* Give up a previously assigned generation number. By doing this the caller
+ * certifies that no element was inserted using this number, and that this
+ * number might safely be reused if none was assigned since. This is convenient
+ * to avoid wasting numbers in case an operation couldn't be started right
+ * after a call to pat_ref_newgen(), but it is absolutely not necessary. The
+ * main use case is to politely abandon an update attempt upon error just after
+ * having received a number (e.g. attempting to retrieve entries from the
+ * network, and failed to establish a connection). This is done atomically so
+ * no locking is necessary.
+ */
+static inline void pat_ref_giveup(struct pat_ref *ref, unsigned int gen)
+{
+ HA_ATOMIC_CAS(&ref->next_gen, &gen, gen - 1);
+}
+
+/* Commit the whole pattern reference by updating the generation number or
+ * failing in case someone else managed to do it meanwhile. While this could
+ * be done using a CAS, it must instead be called with the PATREF_LOCK held in
+ * order to guarantee the consistency of the generation number for all other
+ * functions that rely on it. It returns zero on success, non-zero on failure
+ * (technically speaking it returns the difference between the attempted
+ * generation and the effective one, so that it can be used for reporting).
+ */
+static inline int pat_ref_commit(struct pat_ref *ref, unsigned int gen)
+{
+ if ((int)(gen - ref->curr_gen) > 0)
+ ref->curr_gen = gen;
+ return gen - ref->curr_gen;
+}
+
+/* This function purges all elements from <ref> that are older than generation
+ * <oldest>. It will not purge more than <budget> entries at once, in order to
+ * remain responsive. If budget is negative, no limit is applied.
+ * The caller must already hold the PATREF_LOCK on <ref>. The function will
+ * take the PATEXP_LOCK on all expressions of the pattern as needed. It returns
+ * non-zero on completion, or zero if it had to stop before the end after
+ * <budget> was depleted.
+ */
+static inline int pat_ref_purge_older(struct pat_ref *ref, uint oldest, int budget)
+{
+ return pat_ref_purge_range(ref, oldest + 1, oldest - 1, budget);
+}
+
+
+/*
+ * pattern_head manipulation.
+ */
+void pattern_init_head(struct pattern_head *head);
+void pattern_prune(struct pattern_head *head);
+int pattern_read_from_file(struct pattern_head *head, unsigned int refflags, const char *filename, int patflags, int load_smp, char **err, const char *file, int line);
+
+/*
+ * pattern_expr manipulation.
+ */
+void pattern_init_expr(struct pattern_expr *expr);
+struct pattern_expr *pattern_lookup_expr(struct pattern_head *head, struct pat_ref *ref);
+struct pattern_expr *pattern_new_expr(struct pattern_head *head, struct pat_ref *ref,
+ int patflags, char **err, int *reuse);
+struct sample_data **pattern_find_smp(struct pattern_expr *expr, struct pat_ref_elt *elt);
+
+
+#endif
diff --git a/include/haproxy/payload.h b/include/haproxy/payload.h
new file mode 100644
index 0000000..f91817a
--- /dev/null
+++ b/include/haproxy/payload.h
@@ -0,0 +1,39 @@
+/*
+ * include/haproxy/payload.h
+ * Definitions for payload-based sample fetches and ACLs
+ *
+ * Copyright (C) 2000-2013 Willy Tarreau - w@1wt.eu
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_PAYLOAD_H
+#define _HAPROXY_PAYLOAD_H
+
+#include <haproxy/api.h>
+#include <haproxy/sample-t.h>
+#include <haproxy/stream-t.h>
+
+int fetch_rdp_cookie_name(struct stream *s, struct sample *smp, const char *cname, int clen);
+int val_payload_lv(struct arg *arg, char **err_msg);
+
+#endif /* _HAPROXY_PAYLOAD_H */
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/include/haproxy/peers-t.h b/include/haproxy/peers-t.h
new file mode 100644
index 0000000..124fac3
--- /dev/null
+++ b/include/haproxy/peers-t.h
@@ -0,0 +1,160 @@
+/*
+ * include/haproxy/peers-t.h
+ * This file defines everything related to peers.
+ *
+ * Copyright 2010 EXCELIANCE, Emeric Brun <ebrun@exceliance.fr>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_PEERS_T_H
+#define _HAPROXY_PEERS_T_H
+
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <netinet/in.h>
+#include <arpa/inet.h>
+
+#include <import/ebtree-t.h>
+
+#include <haproxy/api-t.h>
+#include <haproxy/dict-t.h>
+#include <haproxy/stick_table-t.h>
+#include <haproxy/thread-t.h>
+
+
+struct shared_table {
+ struct stktable *table; /* stick table to sync */
+ int local_id;
+ int remote_id;
+ int flags;
+ uint64_t remote_data;
+ unsigned int remote_data_nbelem[STKTABLE_DATA_TYPES];
+ unsigned int last_acked;
+ unsigned int last_pushed;
+ unsigned int last_get;
+ unsigned int teaching_origin;
+ unsigned int update;
+ struct shared_table *next; /* next shared table in list */
+};
+
+struct peer {
+ int local; /* proxy state */
+ __decl_thread(HA_SPINLOCK_T lock); /* lock used to handle this peer section */
+ char *id;
+ struct {
+ const char *file; /* file where the section appears */
+ int line; /* line where the section appears */
+ } conf; /* config information */
+ time_t last_change;
+ struct sockaddr_storage addr; /* peer address */
+ struct protocol *proto; /* peer address protocol */
+ struct xprt_ops *xprt; /* peer socket operations at transport layer */
+ void *sock_init_arg; /* socket operations's opaque init argument if needed */
+ unsigned int flags; /* peer session flags */
+ unsigned int statuscode; /* current/last session status code */
+ unsigned int reconnect; /* next connect timer */
+ unsigned int heartbeat; /* next heartbeat timer */
+ unsigned int confirm; /* confirm message counter */
+ unsigned int last_hdshk; /* Date of the last handshake. */
+ uint32_t rx_hbt; /* received heartbeats counter */
+ uint32_t tx_hbt; /* transmitted heartbeats counter */
+ uint32_t no_hbt; /* no received heartbeat counter */
+ uint32_t new_conn; /* new connection after reconnection timeout expiration counter */
+ uint32_t proto_err; /* protocol errors counter */
+ uint32_t coll; /* connection collisions counter */
+ struct appctx *appctx; /* the appctx running it */
+ struct shared_table *remote_table;
+ struct shared_table *last_local_table; /* Last table that emit update messages during a teach process */
+ struct shared_table *stop_local_table; /* last evaluated table, used as restart point for the next teach process */
+ struct shared_table *tables;
+ struct server *srv;
+ struct dcache *dcache; /* dictionary cache */
+ struct peers *peers; /* associated peer section */
+ struct peer *next; /* next peer in the list */
+};
+
+
+struct peers {
+ char *id; /* peer section name */
+ struct task *sync_task; /* main sync task */
+ struct sig_handler *sighandler; /* signal handler */
+ struct peer *remote; /* remote peers list */
+ struct peer *local; /* local peer list */
+ struct proxy *peers_fe; /* peer frontend */
+ struct {
+ const char *file; /* file where the section appears */
+ int line; /* line where the section appears */
+ } conf; /* config information */
+ time_t last_change;
+ struct peers *next; /* next peer section */
+ unsigned int flags; /* current peers section resync state */
+ unsigned int resync_timeout; /* resync timeout timer */
+ int count; /* total of peers */
+ int nb_shards; /* Number of peer shards */
+ int disabled; /* peers proxy disabled if >0 */
+ int applet_count[MAX_THREADS]; /* applet count per thread */
+};
+
+/* LRU cache for dictionaies */
+struct dcache_tx {
+ /* The last recently used key */
+ unsigned int lru_key;
+ /* An array of entries to store pointers to dictionary entries. */
+ struct ebpt_node *entries;
+ /* The previous lookup result. */
+ struct ebpt_node *prev_lookup;
+ /* ebtree to store the previous entries. */
+ struct eb_root cached_entries;
+};
+
+struct dcache_rx {
+ unsigned int id;
+ struct dict_entry *de;
+};
+
+struct dcache_tx_entry {
+ unsigned int id;
+ struct ebpt_node entry;
+};
+
+/* stick-table data type cache */
+struct dcache {
+ /* Cache used upon transmission */
+ struct dcache_tx *tx;
+ /* Cache used upon receipt */
+ struct dcache_rx *rx;
+ /* Maximum number of entries in this cache */
+ size_t max_entries;
+};
+
+struct peers_keyword {
+ const char *kw;
+ int (*parse)(
+ char **args,
+ struct peers *curpeer,
+ const char *file,
+ int line,
+ char **err);
+ int flags;
+};
+
+struct peers_kw_list {
+ struct list list;
+ struct peers_keyword kw[VAR_ARRAY];
+};
+
+#endif /* _HAPROXY_PEERS_T_H */
+
diff --git a/include/haproxy/peers.h b/include/haproxy/peers.h
new file mode 100644
index 0000000..e3c5fd3
--- /dev/null
+++ b/include/haproxy/peers.h
@@ -0,0 +1,69 @@
+/*
+ * include/haproxy/peers.h
+ * This file defines function prototypes for peers management.
+ *
+ * Copyright 2010 EXCELIANCE, Emeric Brun <ebrun@exceliance.fr>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_PEERS_H
+#define _HAPROXY_PEERS_H
+
+#include <haproxy/api.h>
+#include <haproxy/connection.h>
+#include <haproxy/obj_type.h>
+#include <haproxy/peers-t.h>
+#include <haproxy/proxy-t.h>
+#include <haproxy/stick_table-t.h>
+#include <haproxy/stream-t.h>
+
+
+extern struct peers_kw_list peers_keywords;
+extern struct peers *cfg_peers;
+
+int peers_init_sync(struct peers *peers);
+int peers_alloc_dcache(struct peers *peers);
+int peers_register_table(struct peers *, struct stktable *table);
+void peers_setup_frontend(struct proxy *fe);
+void peers_register_keywords(struct peers_kw_list *pkwl);
+
+#if defined(USE_OPENSSL)
+static inline enum obj_type *peer_session_target(struct peer *p, struct stream *s)
+{
+ if (p->srv->use_ssl)
+ return &p->srv->obj_type;
+ else
+ return &s->be->obj_type;
+}
+
+static inline struct xprt_ops *peer_xprt(struct peer *p)
+{
+ return p->srv->use_ssl ? xprt_get(XPRT_SSL) : xprt_get(XPRT_RAW);
+}
+#else
+static inline enum obj_type *peer_session_target(struct peer *p, struct stream *s)
+{
+ return &s->be->obj_type;
+}
+
+static inline struct xprt_ops *peer_xprt(struct peer *p)
+{
+ return xprt_get(XPRT_RAW);
+}
+#endif
+
+#endif /* _HAPROXY_PEERS_H */
+
diff --git a/include/haproxy/pipe-t.h b/include/haproxy/pipe-t.h
new file mode 100644
index 0000000..1a1fcfd
--- /dev/null
+++ b/include/haproxy/pipe-t.h
@@ -0,0 +1,43 @@
+/*
+ * include/haproxy/pipe-t.h
+ * Pipe management - types definitions.
+ *
+ * Copyright (C) 2000-2020 Willy Tarreau - w@1wt.eu
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_PIPE_T_H
+#define _HAPROXY_PIPE_T_H
+
+/* A pipe is described by its read and write FDs, and the data remaining in it.
+ * The FDs are valid if there are data pending. The user is not allowed to
+ * change the FDs.
+ */
+struct pipe {
+ int data; /* number of bytes present in the pipe */
+ int prod; /* FD the producer must write to ; -1 if none */
+ int cons; /* FD the consumer must read from ; -1 if none */
+ struct pipe *next;
+};
+
+#endif /* _HAPROXY_PIPE_T_H */
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/include/haproxy/pipe.h b/include/haproxy/pipe.h
new file mode 100644
index 0000000..12bd8ea
--- /dev/null
+++ b/include/haproxy/pipe.h
@@ -0,0 +1,54 @@
+/*
+ * include/haproxy/pipe.h
+ * Pipe management - exported functions
+ *
+ * Copyright (C) 2000-2020 Willy Tarreau - w@1wt.eu
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_PIPE_H
+#define _HAPROXY_PIPE_H
+
+#include <haproxy/api.h>
+#include <haproxy/pipe-t.h>
+
+extern int pipes_used; /* # of pipes in use (2 fds each) */
+extern int pipes_free; /* # of pipes unused (2 fds each) */
+
+/* return a pre-allocated empty pipe. Try to allocate one if there isn't any
+ * left. NULL is returned if a pipe could not be allocated.
+ */
+struct pipe *get_pipe();
+
+/* destroy a pipe, possibly because an error was encountered on it. Its FDs
+ * will be closed and it will not be reinjected into the live pool.
+ */
+void kill_pipe(struct pipe *p);
+
+/* put back a unused pipe into the live pool. If it still has data in it, it is
+ * closed and not reinjected into the live pool. The caller is not allowed to
+ * use it once released.
+ */
+void put_pipe(struct pipe *p);
+
+#endif /* _HAPROXY_PIPE_H */
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/include/haproxy/pool-os.h b/include/haproxy/pool-os.h
new file mode 100644
index 0000000..cf29c58
--- /dev/null
+++ b/include/haproxy/pool-os.h
@@ -0,0 +1,109 @@
+/*
+ * include/haproxy/pool-os.h
+ * OS-level interface for memory management
+ *
+ * Copyright (C) 2000-2020 Willy Tarreau - w@1wt.eu
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_POOL_OS_H
+#define _HAPROXY_POOL_OS_H
+
+#include <sys/mman.h>
+#include <stdlib.h>
+#include <haproxy/api.h>
+
+
+/************* normal allocator *************/
+
+/* allocates an area of size <size> and returns it. The semantics are similar
+ * to those of malloc().
+ */
+static forceinline void *pool_alloc_area(size_t size)
+{
+ return malloc(size);
+}
+
+/* frees an area <area> of size <size> allocated by pool_alloc_area(). The
+ * semantics are identical to free() except that the size is specified and
+ * may be ignored.
+ */
+static forceinline void pool_free_area(void *area, size_t __maybe_unused size)
+{
+ will_free(area, size);
+ free(area);
+}
+
+/************* use-after-free allocator *************/
+
+/* allocates an area of size <size> and returns it. The semantics are similar
+ * to those of malloc(). However the allocation is rounded up to 4kB so that a
+ * full page is allocated. This ensures the object can be freed alone so that
+ * future dereferences are easily detected. The returned object is always
+ * 16-bytes aligned to avoid issues with unaligned structure objects. In case
+ * some padding is added, the area's start address is copied at the end of the
+ * padding to help detect underflows.
+ */
+static inline void *pool_alloc_area_uaf(size_t size)
+{
+ size_t pad = (4096 - size) & 0xFF0;
+ void *ret;
+
+ ret = mmap(NULL, (size + 4095) & -4096, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
+ if (ret != MAP_FAILED) {
+ /* let's dereference the page before returning so that the real
+ * allocation in the system is performed without holding the lock.
+ */
+ *(int *)ret = 0;
+ if (pad >= sizeof(void *))
+ *(void **)(ret + pad - sizeof(void *)) = ret + pad;
+ ret += pad;
+ } else {
+ ret = NULL;
+ }
+ return ret;
+}
+
+/* frees an area <area> of size <size> allocated by pool_alloc_area_uaf(). The
+ * semantics are identical to free() except that the size must absolutely match
+ * the one passed to pool_alloc_area_uaf(). In case some padding is added, the
+ * area's start address is compared to the one at the end of the padding, and
+ * a segfault is triggered if they don't match, indicating an underflow.
+ */
+static inline void pool_free_area_uaf(void *area, size_t size)
+{
+ size_t pad = (4096 - size) & 0xFF0;
+
+ /* This object will be released for real in order to detect a use after
+ * free. We also force a write to the area to ensure we crash on double
+ * free or free of a const area.
+ */
+ *(uint32_t *)area = 0xDEADADD4;
+
+ if (pad >= sizeof(void *) && *(void **)(area - sizeof(void *)) != area)
+ ABORT_NOW();
+
+ munmap(area - pad, (size + 4095) & -4096);
+}
+
+#endif /* _HAPROXY_POOL_OS_H */
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/include/haproxy/pool-t.h b/include/haproxy/pool-t.h
new file mode 100644
index 0000000..157e2ca
--- /dev/null
+++ b/include/haproxy/pool-t.h
@@ -0,0 +1,149 @@
+/*
+ * include/haproxy/pool-t.h
+ * Memory pools configuration and type definitions.
+ *
+ * Copyright (C) 2000-2020 Willy Tarreau - w@1wt.eu
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_POOL_T_H
+#define _HAPROXY_POOL_T_H
+
+#include <haproxy/api-t.h>
+#include <haproxy/list-t.h>
+
+#define MEM_F_SHARED 0x1
+#define MEM_F_EXACT 0x2
+
+/* A special pointer for the pool's free_list that indicates someone is
+ * currently manipulating it. Serves as a short-lived lock.
+ */
+#define POOL_BUSY ((void *)1)
+
+#define POOL_AVG_SAMPLES 1024
+
+/* possible flags for __pool_alloc() */
+#define POOL_F_NO_POISON 0x00000001 // do not poison the area
+#define POOL_F_MUST_ZERO 0x00000002 // zero the returned area
+#define POOL_F_NO_FAIL 0x00000004 // do not randomly fail
+
+/* pool debugging flags */
+#define POOL_DBG_FAIL_ALLOC 0x00000001 // randomly fail memory allocations
+#define POOL_DBG_DONT_MERGE 0x00000002 // do not merge same-size pools
+#define POOL_DBG_COLD_FIRST 0x00000004 // pick cold objects first
+#define POOL_DBG_INTEGRITY 0x00000008 // perform integrity checks on cache
+#define POOL_DBG_NO_GLOBAL 0x00000010 // disable global pools
+#define POOL_DBG_NO_CACHE 0x00000020 // disable thread-local pool caches
+#define POOL_DBG_CALLER 0x00000040 // trace last caller's location
+#define POOL_DBG_TAG 0x00000080 // place a tag at the end of the area
+#define POOL_DBG_POISON 0x00000100 // poison memory area on pool_alloc()
+#define POOL_DBG_UAF 0x00000200 // enable use-after-free protection
+
+
+/* This is the head of a thread-local cache */
+struct pool_cache_head {
+ struct list list; /* head of objects in this pool */
+ unsigned int count; /* number of objects in this pool */
+ unsigned int tid; /* thread id, for debugging only */
+ struct pool_head *pool; /* assigned pool, for debugging only */
+ ulong fill_pattern; /* pattern used to fill the area on free */
+} THREAD_ALIGNED(64);
+
+/* This represents one item stored in the thread-local cache. <by_pool> links
+ * the object to the list of objects in the pool, and <by_lru> links the object
+ * to the local thread's list of hottest objects. This way it's possible to
+ * allocate a fresh object from the cache, or to release cold objects from any
+ * pool (no bookkeeping is needed since shared pools do not know how many
+ * objects they store).
+ */
+struct pool_cache_item {
+ struct list by_pool; /* link to objects in this pool */
+ struct list by_lru; /* link to objects by LRU order */
+};
+
+/* This structure is used to represent an element in the pool's shared
+ * free_list. An item may carry a series of other items allocated or released
+ * as a same cluster. The storage then looks like this:
+ * +------+ +------+ +------+
+ * -->| next |-->| next |-->| NULL |
+ * +------+ +------+ +------+
+ * | NULL | | down | | down |
+ * +------+ +--|---+ +--|---+
+ * V V
+ * +------+ +------+
+ * | NULL | | NULL |
+ * +------+ +------+
+ * | down | | NULL |
+ * +--|---+ +------+
+ * V
+ * +------+
+ * | NULL |
+ * +------+
+ * | NULL |
+ * +------+
+ */
+struct pool_item {
+ struct pool_item *next;
+ struct pool_item *down; // link to other items of the same cluster
+};
+
+/* This describes a complete pool, with its status, usage statistics and the
+ * thread-local caches if any. Even if pools are disabled, these descriptors
+ * are valid and are used at least to get names and sizes. For small builds
+ * using neither threads nor pools, this structure might be reduced, and
+ * alignment could be removed.
+ */
+struct pool_head {
+ /* read-mostly part, purely configuration */
+ unsigned int limit; /* hard limit on the number of chunks */
+ unsigned int minavail; /* how many chunks are expected to be used */
+ unsigned int size; /* chunk size */
+ unsigned int flags; /* MEM_F_* */
+ unsigned int users; /* number of pools sharing this zone */
+ unsigned int alloc_sz; /* allocated size (includes hidden fields) */
+ struct list list; /* list of all known pools */
+ void *base_addr; /* allocation address, for free() */
+ char name[12]; /* name of the pool */
+
+ /* heavily read-write part */
+ THREAD_ALIGN(64);
+
+ /* these entries depend on the pointer value, they're used to reduce
+ * the contention on fast-changing values. The alignment here is
+ * important since the purpose is to lower the thread contention.
+ * The free_list and used/allocated are not related, the array is
+ * just meant to shard elements and there are no per-free_list stats.
+ */
+ struct {
+ THREAD_ALIGN(64);
+ struct pool_item *free_list; /* list of free shared objects */
+ unsigned int allocated; /* how many chunks have been allocated */
+ unsigned int used; /* how many chunks are currently in use */
+ unsigned int needed_avg;/* floating indicator between used and allocated */
+ unsigned int failed; /* failed allocations (indexed by hash of TID) */
+ } buckets[CONFIG_HAP_POOL_BUCKETS];
+
+ struct pool_cache_head cache[MAX_THREADS] THREAD_ALIGNED(64); /* pool caches */
+} __attribute__((aligned(64)));
+
+#endif /* _HAPROXY_POOL_T_H */
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/include/haproxy/pool.h b/include/haproxy/pool.h
new file mode 100644
index 0000000..bf7cb8d
--- /dev/null
+++ b/include/haproxy/pool.h
@@ -0,0 +1,368 @@
+/*
+ * include/haproxy/pool.h
+ * Memory management definitions..
+ *
+ * Copyright (C) 2000-2020 Willy Tarreau - w@1wt.eu
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_POOL_H
+#define _HAPROXY_POOL_H
+
+#include <string.h>
+
+#include <haproxy/api.h>
+#include <haproxy/freq_ctr.h>
+#include <haproxy/list.h>
+#include <haproxy/pool-t.h>
+#include <haproxy/thread.h>
+
+/* This registers a call to create_pool_callback(ptr, name, size) */
+#define REGISTER_POOL(ptr, name, size) \
+ INITCALL3(STG_POOL, create_pool_callback, (ptr), (name), (size))
+
+/* This macro declares a pool head <ptr> and registers its creation */
+#define DECLARE_POOL(ptr, name, size) \
+ struct pool_head *(ptr) __read_mostly = NULL; \
+ REGISTER_POOL(&ptr, name, size)
+
+/* This macro declares a static pool head <ptr> and registers its creation */
+#define DECLARE_STATIC_POOL(ptr, name, size) \
+ static struct pool_head *(ptr) __read_mostly; \
+ REGISTER_POOL(&ptr, name, size)
+
+/* By default, free objects are linked by a pointer stored at the beginning of
+ * the memory area. When DEBUG_MEMORY_POOLS is set, the allocated area is
+ * inflated by the size of a pointer so that the link is placed at the end
+ * of the objects. Hence free objects in pools remain intact. In addition,
+ * this location is used to keep a pointer to the pool the object was
+ * allocated from, and verify it's freed into the appropriate one.
+ */
+# define POOL_EXTRA_MARK (sizeof(void *))
+# define POOL_DEBUG_SET_MARK(pool, item) \
+ do { \
+ typeof(pool) __p = (pool); \
+ typeof(item) __i = (item); \
+ if (likely(!(pool_debugging & POOL_DBG_TAG))) \
+ break; \
+ *(typeof(pool)*)(((char *)__i) + __p->size) = __p; \
+ } while (0)
+
+# define POOL_DEBUG_RESET_MARK(pool, item) \
+ do { \
+ typeof(pool) __p = (pool); \
+ typeof(item) __i = (item); \
+ if (likely(!(pool_debugging & POOL_DBG_TAG))) \
+ break; \
+ *(typeof(pool)*)(((char *)__i) + __p->size) = __builtin_return_address(0); \
+ } while (0)
+
+# define POOL_DEBUG_CHECK_MARK(pool, item, caller) \
+ do { \
+ typeof(pool) __p = (pool); \
+ typeof(item) __i = (item); \
+ if (likely(!(pool_debugging & POOL_DBG_TAG))) \
+ break; \
+ if (*(typeof(pool)*)(((char *)__i) + __p->size) != __p) { \
+ pool_inspect_item("tag mismatch on free()", pool, item, caller); \
+ ABORT_NOW(); \
+ } \
+ } while (0)
+
+/* It's possible to trace callers of pool_free() by placing their pointer
+ * after the end of the area and the optional mark above, which means the
+ * end of the allocated array.
+ */
+# define POOL_EXTRA_CALLER (sizeof(void *))
+# define POOL_DEBUG_TRACE_CALLER(pool, item, caller) \
+ do { \
+ typeof(pool) __p = (pool); \
+ typeof(item) __i = (item); \
+ typeof(caller) __c = (caller); \
+ if (likely(!(pool_debugging & POOL_DBG_CALLER))) \
+ break; \
+ *(typeof(caller)*)(((char *)__i) + __p->alloc_sz - sizeof(void*)) = __c; \
+ } while (0)
+
+/* poison each newly allocated area with this byte if >= 0 */
+extern int mem_poison_byte;
+
+/* trim() in progress */
+extern int pool_trim_in_progress;
+
+/* set of POOL_DBG_* flags */
+extern uint pool_debugging;
+
+int malloc_trim(size_t pad);
+void trim_all_pools(void);
+
+void *pool_get_from_os_noinc(struct pool_head *pool);
+void pool_put_to_os_nodec(struct pool_head *pool, void *ptr);
+void *pool_alloc_nocache(struct pool_head *pool, const void *caller);
+void pool_free_nocache(struct pool_head *pool, void *ptr);
+void dump_pools(void);
+int pool_parse_debugging(const char *str, char **err);
+int pool_total_failures(void);
+unsigned long long pool_total_allocated(void);
+unsigned long long pool_total_used(void);
+void pool_flush(struct pool_head *pool);
+void pool_gc(struct pool_head *pool_ctx);
+struct pool_head *create_pool(char *name, unsigned int size, unsigned int flags);
+void create_pool_callback(struct pool_head **ptr, char *name, unsigned int size);
+void *pool_destroy(struct pool_head *pool);
+void pool_destroy_all(void);
+void *__pool_alloc(struct pool_head *pool, unsigned int flags);
+void __pool_free(struct pool_head *pool, void *ptr);
+void pool_inspect_item(const char *msg, struct pool_head *pool, const void *item, const void *caller);
+
+
+/****************** Thread-local cache management ******************/
+
+extern THREAD_LOCAL size_t pool_cache_bytes; /* total cache size */
+extern THREAD_LOCAL size_t pool_cache_count; /* #cache objects */
+
+void pool_evict_from_local_cache(struct pool_head *pool, int full);
+void pool_evict_from_local_caches(void);
+void pool_put_to_cache(struct pool_head *pool, void *ptr, const void *caller);
+void pool_fill_pattern(struct pool_cache_head *pch, struct pool_cache_item *item, uint size);
+void pool_check_pattern(struct pool_cache_head *pch, struct pool_head *pool, struct pool_cache_item *item, const void *caller);
+void pool_refill_local_from_shared(struct pool_head *pool, struct pool_cache_head *pch);
+void pool_put_to_shared_cache(struct pool_head *pool, struct pool_item *item);
+
+/* returns the total number of allocated entries for a pool across all buckets */
+static inline uint pool_allocated(const struct pool_head *pool)
+{
+ int bucket;
+ uint ret;
+
+ for (bucket = ret = 0; bucket < CONFIG_HAP_POOL_BUCKETS; bucket++)
+ ret += HA_ATOMIC_LOAD(&pool->buckets[bucket].allocated);
+ return ret;
+}
+
+/* returns the total number of used entries for a pool across all buckets */
+static inline uint pool_used(const struct pool_head *pool)
+{
+ int bucket;
+ uint ret;
+
+ for (bucket = ret = 0; bucket < CONFIG_HAP_POOL_BUCKETS; bucket++)
+ ret += HA_ATOMIC_LOAD(&pool->buckets[bucket].used);
+ return ret;
+}
+
+/* returns the raw total number needed entries across all buckets. It must
+ * be passed to swrate_avg() to get something usable.
+ */
+static inline uint pool_needed_avg(const struct pool_head *pool)
+{
+ int bucket;
+ uint ret;
+
+ for (bucket = ret = 0; bucket < CONFIG_HAP_POOL_BUCKETS; bucket++)
+ ret += HA_ATOMIC_LOAD(&pool->buckets[bucket].needed_avg);
+ return ret;
+}
+
+/* returns the total number of failed allocations for a pool across all buckets */
+static inline uint pool_failed(const struct pool_head *pool)
+{
+ int bucket;
+ uint ret;
+
+ for (bucket = ret = 0; bucket < CONFIG_HAP_POOL_BUCKETS; bucket++)
+ ret += HA_ATOMIC_LOAD(&pool->buckets[bucket].failed);
+ return ret;
+}
+
+/* Returns the max number of entries that may be brought back to the pool
+ * before it's considered as full. Note that it is only usable for releasing
+ * objects, hence the function assumes that no more than ->used entries will
+ * be released in the worst case, and that this value is always lower than or
+ * equal to ->allocated. It's important to understand that under thread
+ * contention these values may not always be accurate but the principle is that
+ * any deviation remains contained. When global pools are disabled, this
+ * function always returns zero so that the caller knows it must free the
+ * object via other ways.
+ */
+static inline uint pool_releasable(const struct pool_head *pool)
+{
+ uint alloc, used;
+ uint needed_raw;
+
+ if (unlikely(pool_debugging & (POOL_DBG_NO_CACHE|POOL_DBG_NO_GLOBAL)))
+ return 0;
+
+ alloc = pool_allocated(pool);
+ used = pool_used(pool);
+ if (used > alloc)
+ alloc = used;
+
+ needed_raw = pool_needed_avg(pool);
+ if (alloc < swrate_avg(needed_raw + needed_raw / 4, POOL_AVG_SAMPLES))
+ return used; // less than needed is allocated, can release everything
+
+ if ((uint)(alloc - used) < pool->minavail)
+ return pool->minavail - (alloc - used); // less than minimum available
+
+ /* there are enough objects in this pool */
+ return 0;
+}
+
+/* These are generic cache-aware wrappers that allocate/free from/to the local
+ * cache first, then from the second level if it exists.
+ */
+
+/* Tries to retrieve an object from the local pool cache corresponding to pool
+ * <pool>. If none is available, tries to allocate from the shared cache if any
+ * and returns NULL if nothing is available. Must not be used when pools are
+ * disabled.
+ */
+static inline void *pool_get_from_cache(struct pool_head *pool, const void *caller)
+{
+ struct pool_cache_item *item;
+ struct pool_cache_head *ph;
+
+ BUG_ON(pool_debugging & POOL_DBG_NO_CACHE);
+
+ ph = &pool->cache[tid];
+ if (unlikely(LIST_ISEMPTY(&ph->list))) {
+ if (!(pool_debugging & POOL_DBG_NO_GLOBAL))
+ pool_refill_local_from_shared(pool, ph);
+ if (LIST_ISEMPTY(&ph->list))
+ return NULL;
+ }
+
+ /* allocate hottest objects first */
+ item = LIST_NEXT(&ph->list, typeof(item), by_pool);
+
+ if (unlikely(pool_debugging & (POOL_DBG_COLD_FIRST|POOL_DBG_INTEGRITY))) {
+ /* allocate oldest objects first so as to keep them as long as possible
+ * in the cache before being reused and maximizing the chance to detect
+ * an overwrite.
+ */
+ if (pool_debugging & POOL_DBG_COLD_FIRST)
+ item = LIST_PREV(&ph->list, typeof(item), by_pool);
+
+ if (pool_debugging & POOL_DBG_INTEGRITY)
+ pool_check_pattern(ph, pool, item, caller);
+ }
+
+ BUG_ON(&item->by_pool == &ph->list);
+ LIST_DELETE(&item->by_pool);
+ LIST_DELETE(&item->by_lru);
+
+ /* keep track of where the element was allocated from */
+ POOL_DEBUG_SET_MARK(pool, item);
+ POOL_DEBUG_TRACE_CALLER(pool, item, caller);
+
+ ph->count--;
+ pool_cache_bytes -= pool->size;
+ pool_cache_count--;
+
+ return item;
+}
+
+
+/****************** Common high-level code ******************/
+
+#if !defined(DEBUG_MEM_STATS)
+
+/*
+ * Returns a pointer to an object from pool <pool> allocated using
+ * flags <flag> from the POOL_F_* set.
+ */
+#define pool_alloc_flag(pool, flag) __pool_alloc((pool), (flag))
+
+/*
+ * Returns a pointer to type <type> taken from the pool <pool_type> or
+ * dynamically allocated. Memory poisonning is performed if enabled.
+ */
+#define pool_alloc(pool) __pool_alloc((pool), 0)
+
+/*
+ * Returns a pointer to type <type> taken from the pool <pool_type> or
+ * dynamically allocated. The area is zeroed.
+ */
+#define pool_zalloc(pool) __pool_alloc((pool), POOL_F_MUST_ZERO)
+
+/*
+ * Puts a memory area back to the corresponding pool. Just like with the libc's
+ * free(), <ptr> may be NULL.
+ */
+#define pool_free(pool, ptr) \
+ do { \
+ typeof(ptr) __ptr = (ptr); \
+ if (likely((__ptr) != NULL)) \
+ __pool_free(pool, __ptr); \
+ } while (0)
+
+
+#else /* DEBUG_MEM_STATS is set below */
+
+#define pool_free(pool, ptr) ({ \
+ struct pool_head *__pool = (pool); \
+ typeof(ptr) __ptr = (ptr); \
+ static struct mem_stats _ __attribute__((used,__section__("mem_stats"),__aligned__(sizeof(void*)))) = { \
+ .caller = { \
+ .file = __FILE__, .line = __LINE__, \
+ .what = MEM_STATS_TYPE_P_FREE, \
+ .func = __func__, \
+ }, \
+ }; \
+ _.extra = __pool; \
+ HA_WEAK(__start_mem_stats); \
+ HA_WEAK(__stop_mem_stats); \
+ if (__ptr) { \
+ _HA_ATOMIC_INC(&_.calls); \
+ _HA_ATOMIC_ADD(&_.size, __pool->size); \
+ __pool_free(__pool, __ptr); \
+ } \
+})
+
+#define pool_alloc_flag(pool, flag) ({ \
+ struct pool_head *__pool = (pool); \
+ uint __flag = (flag); \
+ size_t __x = __pool->size; \
+ static struct mem_stats _ __attribute__((used,__section__("mem_stats"),__aligned__(sizeof(void*)))) = { \
+ .caller = { \
+ .file = __FILE__, .line = __LINE__, \
+ .what = MEM_STATS_TYPE_P_ALLOC, \
+ .func = __func__, \
+ }, \
+ }; \
+ _.extra = __pool; \
+ HA_WEAK(__start_mem_stats); \
+ HA_WEAK(__stop_mem_stats); \
+ _HA_ATOMIC_INC(&_.calls); \
+ _HA_ATOMIC_ADD(&_.size, __x); \
+ __pool_alloc(__pool, __flag); \
+})
+
+#define pool_alloc(pool) pool_alloc_flag(pool, 0)
+
+#define pool_zalloc(pool) pool_alloc_flag(pool, POOL_F_MUST_ZERO)
+
+#endif /* DEBUG_MEM_STATS */
+
+#endif /* _HAPROXY_POOL_H */
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/include/haproxy/port_range-t.h b/include/haproxy/port_range-t.h
new file mode 100644
index 0000000..eea1132
--- /dev/null
+++ b/include/haproxy/port_range-t.h
@@ -0,0 +1,40 @@
+/*
+ * include/haproxy/port_range-t.h
+ * This file defines the prt_range type
+ *
+ * Copyright (C) 2000-2020 Willy Tarreau - w@1wt.eu
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_PORT_RANGE_T_H
+#define _HAPROXY_PORT_RANGE_T_H
+
+#include <netinet/in.h>
+#include <haproxy/api-t.h>
+
+struct port_range {
+ int size, get, put_h, put_t; /* range size, and get/put positions */
+ uint16_t ports[VAR_ARRAY]; /* array of <size> ports, in host byte order */
+};
+
+#endif /* _HAPROXY_PORT_RANGE_T_H */
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/include/haproxy/port_range.h b/include/haproxy/port_range.h
new file mode 100644
index 0000000..9e4379a
--- /dev/null
+++ b/include/haproxy/port_range.h
@@ -0,0 +1,105 @@
+/*
+ * include/haproxy/port_range.h
+ * This file defines everything needed to manage port ranges
+ *
+ * Copyright (C) 2000-2020 Willy Tarreau - w@1wt.eu
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_PORT_RANGE_H
+#define _HAPROXY_PORT_RANGE_H
+
+#include <stdlib.h>
+#include <haproxy/api.h>
+#include <haproxy/port_range-t.h>
+
+#define GET_NEXT_OFF(range, off) ((off) == (range)->size - 1 ? 0 : (off) + 1)
+
+/* return an available port from range <range>, or zero if none is left */
+static inline int port_range_alloc_port(struct port_range *range)
+{
+ int ret;
+ int get;
+ int put;
+
+ get = _HA_ATOMIC_LOAD(&range->get);
+ do {
+ /* barrier to make sure get is loaded before put */
+ __ha_barrier_atomic_load();
+ put = _HA_ATOMIC_LOAD(&range->put_t);
+ if (unlikely(put == get))
+ return 0;
+ ret = range->ports[get];
+ } while (!(_HA_ATOMIC_CAS(&range->get, &get, GET_NEXT_OFF(range, get))));
+ return ret;
+}
+
+/* release port <port> into port range <range>. Does nothing if <port> is zero
+ * nor if <range> is null. The caller is responsible for marking the port
+ * unused by either setting the port to zero or the range to NULL.
+ */
+static inline void port_range_release_port(struct port_range *range, int port)
+{
+ int put;
+
+ if (!port || !range)
+ return;
+
+ put = range->put_h;
+ /* put_h is reserved for producers, so that they can each get a
+ * free slot, put_t is what is used by consumers to know if there's
+ * elements available or not
+ */
+ /* First reserve or slot, we know the ring buffer can't be full,
+ * as we will only ever release port we allocated before
+ */
+ while (!(_HA_ATOMIC_CAS(&range->put_h, &put, GET_NEXT_OFF(range, put))));
+ _HA_ATOMIC_STORE(&range->ports[put], port);
+ /* Barrier to make sure the new port is visible before we change put_t */
+ __ha_barrier_atomic_store();
+ /* Wait until all the threads that got a slot before us are done */
+ while ((volatile int)range->put_t != put)
+ __ha_compiler_barrier();
+ /* Let the world know we're done, and any potential consumer they
+ * can use that port.
+ */
+ _HA_ATOMIC_STORE(&range->put_t, GET_NEXT_OFF(range, put));
+}
+
+/* return a new initialized port range of N ports. The ports are not
+ * filled in, it's up to the caller to do it.
+ */
+static inline struct port_range *port_range_alloc_range(int n)
+{
+ struct port_range *ret;
+ ret = calloc(1, sizeof(struct port_range) +
+ (n + 1) * sizeof(((struct port_range *)0)->ports[0]));
+ if (!ret)
+ return NULL;
+ ret->size = n + 1;
+ /* Start at the first free element */
+ ret->put_h = ret->put_t = n;
+ return ret;
+}
+
+#endif /* _HAPROXY_PORT_RANGE_H */
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/include/haproxy/proto_quic.h b/include/haproxy/proto_quic.h
new file mode 100644
index 0000000..a0e2b98
--- /dev/null
+++ b/include/haproxy/proto_quic.h
@@ -0,0 +1,35 @@
+/*
+ * AF_INET/AF_INET6 QUIC protocol layer definitions.
+ *
+ * Copyright 2020 Frederic Lecaille <flecaille@haproxy.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_PROTO_QUIC_H
+#define _HAPROXY_PROTO_QUIC_H
+
+extern struct protocol proto_quic4;
+extern struct protocol proto_quic6;
+
+struct quic_cid_tree {
+ struct eb_root root;
+ __decl_thread(HA_RWLOCK_T lock);
+};
+
+extern struct quic_dghdlr *quic_dghdlrs;
+extern struct quic_cid_tree *quic_cid_trees;
+
+#endif /* _HAPROXY_PROTO_QUIC_H */
diff --git a/include/haproxy/proto_rhttp-t.h b/include/haproxy/proto_rhttp-t.h
new file mode 100644
index 0000000..28e2ff9
--- /dev/null
+++ b/include/haproxy/proto_rhttp-t.h
@@ -0,0 +1,14 @@
+#ifndef _HAPROXY_PROTO_RHTTP_H_T
+#define _HAPROXY_PROTO_RHTTP_H_T
+
+/* State for reverse preconnect listener state machine.
+ * Used to limit log reporting only on state changes.
+ */
+enum li_preconn_state {
+ LI_PRECONN_ST_STOP, /* pre-connect task inactive */
+ LI_PRECONN_ST_INIT, /* pre-connect task bootstrapped */
+ LI_PRECONN_ST_ERR, /* last pre-connect attempt failed */
+ LI_PRECONN_ST_FULL, /* pre-connect maxconn reached */
+};
+
+#endif /* _HAPROXY_PROTO_RHTTP_H_T */
diff --git a/include/haproxy/proto_rhttp.h b/include/haproxy/proto_rhttp.h
new file mode 100644
index 0000000..421680f
--- /dev/null
+++ b/include/haproxy/proto_rhttp.h
@@ -0,0 +1,21 @@
+#ifndef _HAPROXY_PROTO_RHTTP_H
+#define _HAPROXY_PROTO_RHTTP_H
+
+#include <haproxy/connection-t.h>
+#include <haproxy/listener-t.h>
+#include <haproxy/receiver-t.h>
+
+int rhttp_bind_receiver(struct receiver *rx, char **errmsg);
+
+int rhttp_bind_listener(struct listener *listener, char *errmsg, int errlen);
+void rhttp_enable_listener(struct listener *l);
+void rhttp_disable_listener(struct listener *l);
+struct connection *rhttp_accept_conn(struct listener *l, int *status);
+void rhttp_unbind_receiver(struct listener *l);
+int rhttp_set_affinity(struct connection *conn, int new_tid);
+
+int rhttp_accepting_conn(const struct receiver *rx);
+
+void rhttp_notify_preconn_err(struct listener *l);
+
+#endif /* _HAPROXY_PROTO_RHTTP_H */
diff --git a/include/haproxy/proto_sockpair.h b/include/haproxy/proto_sockpair.h
new file mode 100644
index 0000000..bb0256e
--- /dev/null
+++ b/include/haproxy/proto_sockpair.h
@@ -0,0 +1,32 @@
+/*
+ * Socket Pair protocol layer (sockpair)
+ *
+ * Copyright HAProxy Technologies - William Lallemand <wlallemand@haproxy.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_PROTO_SOCKPAIR_H
+#define _HAPROXY_PROTO_SOCKPAIR_H
+
+extern struct proto_fam proto_fam_sockpair;
+extern struct protocol proto_sockpair;
+
+int recv_fd_uxst(int sock);
+int send_fd_uxst(int fd, int send_fd);
+int sockpair_bind_receiver(struct receiver *rx, char **errmsg);
+
+#endif /* _HAPROXY_PROTO_SOCKPAIR_H */
+
diff --git a/include/haproxy/proto_tcp.h b/include/haproxy/proto_tcp.h
new file mode 100644
index 0000000..8a3d9fd
--- /dev/null
+++ b/include/haproxy/proto_tcp.h
@@ -0,0 +1,45 @@
+/*
+ * include/haproxy/proto_tcp.h
+ * This file contains TCP socket protocol definitions.
+ *
+ * Copyright (C) 2000-2013 Willy Tarreau - w@1wt.eu
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_PROTO_TCP_H
+#define _HAPROXY_PROTO_TCP_H
+
+#include <haproxy/api.h>
+#include <haproxy/arg-t.h>
+#include <haproxy/connection-t.h>
+#include <haproxy/listener-t.h>
+#include <haproxy/sample-t.h>
+
+extern struct protocol proto_tcpv4;
+extern struct protocol proto_tcpv6;
+
+int tcp_bind_socket(int fd, int flags, struct sockaddr_storage *local, struct sockaddr_storage *remote);
+int tcp_connect_server(struct connection *conn, int flags);
+int tcp_is_foreign(int fd, sa_family_t family);
+
+#endif /* _HAPROXY_PROTO_TCP_H */
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/include/haproxy/proto_udp.h b/include/haproxy/proto_udp.h
new file mode 100644
index 0000000..1c4da77
--- /dev/null
+++ b/include/haproxy/proto_udp.h
@@ -0,0 +1,41 @@
+/*
+ * include/haproxy/proto_udp.h
+ * This file contains UDP socket protocol definitions.
+ *
+ * Copyright 2019 HAProxy Technologies, Frederic Lecaille <flecaille@haproxy.com>
+ *
+ * Partial merge by Emeric Brun <ebrun@haproxy.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _PROTO_PROTO_UDP_H
+#define _PROTO_PROTO_UDP_H
+
+extern struct protocol proto_udp4;
+extern struct protocol proto_udp6;
+
+int udp_bind_socket(int fd, int flags, struct sockaddr_storage *local, struct sockaddr_storage *remote);
+int udp_suspend_receiver(struct receiver *rx);
+int udp_resume_receiver(struct receiver *rx);
+
+#endif /* _PROTO_PROTO_UDP_H */
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/include/haproxy/proto_uxst.h b/include/haproxy/proto_uxst.h
new file mode 100644
index 0000000..77caf3d
--- /dev/null
+++ b/include/haproxy/proto_uxst.h
@@ -0,0 +1,34 @@
+/*
+ * include/haproxy/proto_uxst.h
+ * This file contains UNIX stream socket protocol definitions.
+ *
+ * Copyright (C) 2000-2013 Willy Tarreau - w@1wt.eu
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _PROTO_PROTO_UXST_H
+#define _PROTO_PROTO_UXST_H
+
+extern struct protocol proto_uxst;
+
+#endif /* _PROTO_PROTO_UXST_H */
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/include/haproxy/protobuf-t.h b/include/haproxy/protobuf-t.h
new file mode 100644
index 0000000..b1a14e7
--- /dev/null
+++ b/include/haproxy/protobuf-t.h
@@ -0,0 +1,87 @@
+/*
+ * include/haproxy/protobuf-t.h
+ * This file contains structure declarations for protocol buffers.
+ *
+ * Copyright 2012 Willy Tarreau <w@1wt.eu>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_PROTOBUF_T_H
+#define _HAPROXY_PROTOBUF_T_H
+
+#include <haproxy/api-t.h>
+
+enum protobuf_wire_type {
+ PBUF_TYPE_VARINT,
+ PBUF_TYPE_64BIT,
+ PBUF_TYPE_LENGTH_DELIMITED,
+ PBUF_TYPE_START_GROUP, /* Deprecated */
+ PBUF_TYPE_STOP_GROUP, /* Deprecated */
+ PBUF_TYPE_32BIT,
+};
+
+enum protobuf_type {
+ /* These enums are used to initialize calloc()'ed struct fields.
+ * Start them from 1 to avoid collisions with the default 0 value
+ * of such struct fields.
+ */
+ PBUF_T_BINARY = 1,
+
+ /* Do not reorder the following ones:
+ * PBUF_T_VARINT_*, PBUF_T_32BIT_* and PBUF_T_64BIT_*
+ */
+ PBUF_T_VARINT_INT32,
+ PBUF_T_VARINT_UINT32,
+ PBUF_T_VARINT_INT64,
+ PBUF_T_VARINT_UINT64,
+ PBUF_T_VARINT_BOOL,
+ PBUF_T_VARINT_ENUM,
+
+ /* These two following varints are first encoded with zigzag. */
+ PBUF_T_VARINT_SINT32,
+ PBUF_T_VARINT_SINT64,
+
+ /* Fixed size types from here. */
+ PBUF_T_32BIT_FIXED32,
+ PBUF_T_32BIT_SFIXED32,
+ PBUF_T_32BIT_FLOAT,
+
+ PBUF_T_64BIT_FIXED64,
+ PBUF_T_64BIT_SFIXED64,
+ PBUF_T_64BIT_DOUBLE,
+};
+
+
+struct pbuf_fid {
+ unsigned int *ids;
+ size_t sz;
+};
+
+struct sample;
+struct protobuf_parser_def {
+ int (*skip)(unsigned char **pos, size_t *left, size_t vlen);
+ int (*smp_store)(struct sample *, int type,
+ unsigned char *pos, size_t left, size_t vlen);
+};
+
+#endif /* _HAPROXY_PROTOBUF_T_H */
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/include/haproxy/protobuf.h b/include/haproxy/protobuf.h
new file mode 100644
index 0000000..009bd13
--- /dev/null
+++ b/include/haproxy/protobuf.h
@@ -0,0 +1,577 @@
+/*
+ * include/haproxy/protobuf.h
+ * This file contains functions and macros declarations for protocol buffers decoding.
+ *
+ * Copyright 2012 Willy Tarreau <w@1wt.eu>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_PROTOBUF_H
+#define _HAPROXY_PROTOBUF_H
+
+#include <haproxy/api-t.h>
+#include <haproxy/arg-t.h>
+#include <haproxy/protobuf-t.h>
+#include <haproxy/sample-t.h>
+
+#define PBUF_VARINT_DONT_STOP_BIT 7
+#define PBUF_VARINT_DONT_STOP_BITMASK (1 << PBUF_VARINT_DONT_STOP_BIT)
+#define PBUF_VARINT_DATA_BITMASK ~PBUF_VARINT_DONT_STOP_BITMASK
+
+/* .skip and .smp_store prototypes. */
+int protobuf_skip_varint(unsigned char **pos, size_t *len, size_t vlen);
+int protobuf_smp_store_varint(struct sample *smp, int type,
+ unsigned char *pos, size_t len, size_t vlen);
+int protobuf_skip_64bit(unsigned char **pos, size_t *len, size_t vlen);
+int protobuf_smp_store_64bit(struct sample *smp, int type,
+ unsigned char *pos, size_t len, size_t vlen);
+int protobuf_skip_vlen(unsigned char **pos, size_t *len, size_t vlen);
+int protobuf_smp_store_vlen(struct sample *smp, int type,
+ unsigned char *pos, size_t len, size_t vlen);
+int protobuf_skip_32bit(unsigned char **pos, size_t *len, size_t vlen);
+int protobuf_smp_store_32bit(struct sample *smp, int type,
+ unsigned char *pos, size_t len, size_t vlen);
+
+struct protobuf_parser_def protobuf_parser_defs [] = {
+ [PBUF_TYPE_VARINT ] = {
+ .skip = protobuf_skip_varint,
+ .smp_store = protobuf_smp_store_varint,
+ },
+ [PBUF_TYPE_64BIT ] = {
+ .skip = protobuf_skip_64bit,
+ .smp_store = protobuf_smp_store_64bit,
+ },
+ [PBUF_TYPE_LENGTH_DELIMITED] = {
+ .skip = protobuf_skip_vlen,
+ .smp_store = protobuf_smp_store_vlen,
+ },
+ [PBUF_TYPE_START_GROUP ] = {
+ /* XXX Deprecated XXX */
+ },
+ [PBUF_TYPE_STOP_GROUP ] = {
+ /* XXX Deprecated XXX */
+ },
+ [PBUF_TYPE_32BIT ] = {
+ .skip = protobuf_skip_32bit,
+ .smp_store = protobuf_smp_store_32bit,
+ },
+};
+
+/*
+ * Note that the field values with protocol buffers 32bit and 64bit fixed size as type
+ * are sent in little-endian byte order to the network.
+ */
+
+/* Convert a little-endian ordered 32bit integer to the byte order of the host. */
+static inline uint32_t pbuf_le32toh(uint32_t v)
+{
+ uint8_t *p = (uint8_t *)&v;
+ return (p[0] | (p[1] << 8) | (p[2] << 16) | (p[3] << 24));
+}
+
+/* Convert a little-endian ordered 64bit integer to the byte order of the host. */
+static inline uint64_t pbuf_le64toh(uint64_t v)
+{
+ return (uint64_t)(pbuf_le32toh(v >> 32)) << 32 | pbuf_le32toh(v);
+}
+
+/*
+ * Return a protobuf type enum from <s> string if succeeded, -1 if not.
+ */
+int protobuf_type(const char *s)
+{
+ /* varint types. */
+ if (strcmp(s, "int32") == 0)
+ return PBUF_T_VARINT_INT32;
+ else if (strcmp(s, "uint32") == 0)
+ return PBUF_T_VARINT_UINT32;
+ else if (strcmp(s, "sint32") == 0)
+ return PBUF_T_VARINT_SINT32;
+ else if (strcmp(s, "int64") == 0)
+ return PBUF_T_VARINT_INT64;
+ else if (strcmp(s, "uint64") == 0)
+ return PBUF_T_VARINT_UINT64;
+ else if (strcmp(s, "sint64") == 0)
+ return PBUF_T_VARINT_SINT64;
+ else if (strcmp(s, "bool") == 0)
+ return PBUF_T_VARINT_BOOL;
+ else if (strcmp(s, "enum") == 0)
+ return PBUF_T_VARINT_ENUM;
+
+ /* 32bit fixed size types. */
+ else if (strcmp(s, "fixed32") == 0)
+ return PBUF_T_32BIT_FIXED32;
+ else if (strcmp(s, "sfixed32") == 0)
+ return PBUF_T_32BIT_SFIXED32;
+ else if (strcmp(s, "float") == 0)
+ return PBUF_T_32BIT_FLOAT;
+
+ /* 64bit fixed size types. */
+ else if (strcmp(s, "fixed64") == 0)
+ return PBUF_T_64BIT_FIXED64;
+ else if (strcmp(s, "sfixed64") == 0)
+ return PBUF_T_64BIT_SFIXED64;
+ else if (strcmp(s, "double") == 0)
+ return PBUF_T_64BIT_DOUBLE;
+ else
+ return -1;
+}
+
+/*
+ * Decode a protocol buffers varint located in a buffer at <pos> address with
+ * <len> as length. The decoded value is stored at <val>.
+ * Returns 1 if succeeded, 0 if not.
+ */
+static inline int
+protobuf_varint(uint64_t *val, unsigned char *pos, size_t len)
+{
+ unsigned int shift;
+
+ *val = 0;
+ shift = 0;
+
+ while (len > 0) {
+ int stop = !(*pos & PBUF_VARINT_DONT_STOP_BITMASK);
+
+ *val |= ((uint64_t)(*pos & PBUF_VARINT_DATA_BITMASK)) << shift;
+
+ ++pos;
+ --len;
+
+ if (stop)
+ break;
+ else if (!len)
+ return 0;
+
+ shift += 7;
+ /* The maximum length in bytes of a 64-bit encoded value is 10. */
+ if (shift > 63)
+ return 0;
+ }
+
+ return 1;
+}
+
+/*
+ * Decode a protocol buffers varint located in a buffer at <pos> offset address with
+ * <len> as length address. Update <pos> and <len> consequently. Decrease <*len>
+ * by the number of decoded bytes. The decoded value is stored at <val>.
+ * Returns 1 if succeeded, 0 if not.
+ */
+static inline int
+protobuf_decode_varint(uint64_t *val, unsigned char **pos, size_t *len)
+{
+ unsigned int shift;
+
+ *val = 0;
+ shift = 0;
+
+ while (*len > 0) {
+ int stop = !(**pos & PBUF_VARINT_DONT_STOP_BITMASK);
+
+ *val |= ((uint64_t)**pos & PBUF_VARINT_DATA_BITMASK) << shift;
+
+ ++*pos;
+ --*len;
+
+ if (stop)
+ break;
+ else if (!*len)
+ return 0;
+
+ shift += 7;
+ /* The maximum length in bytes of a 64-bit encoded value is 10. */
+ if (shift > 63)
+ return 0;
+ }
+
+ return 1;
+}
+
+/*
+ * Skip a protocol buffer varint found at <pos> as position address with <len>
+ * as available length address. Update <*pos> to make it point to the next
+ * available byte. Decrease <*len> by the number of skipped bytes.
+ * Returns 1 if succeeded, 0 if not.
+ */
+int
+protobuf_skip_varint(unsigned char **pos, size_t *len, size_t vlen)
+{
+ unsigned int shift;
+
+ shift = 0;
+
+ while (*len > 0) {
+ int stop = !(**pos & PBUF_VARINT_DONT_STOP_BITMASK);
+
+ ++*pos;
+ --*len;
+
+ if (stop)
+ break;
+ else if (!*len)
+ return 0;
+
+ shift += 7;
+ /* The maximum length in bytes of a 64-bit encoded value is 10. */
+ if (shift > 63)
+ return 0;
+ }
+
+ return 1;
+}
+
+/*
+ * If succeeded, return the length of a prococol buffers varint found at <pos> as
+ * position address, with <len> as address of the available bytes at <*pos>.
+ * Update <*pos> to make it point to the next available byte. Decrease <*len>
+ * by the number of bytes used to encode this varint.
+ * Return -1 if failed.
+ */
+static inline int
+protobuf_varint_getlen(unsigned char *pos, size_t len)
+{
+ unsigned char *spos;
+ unsigned int shift;
+
+ shift = 0;
+ spos = pos;
+
+ while (len > 0) {
+ int stop = !(*pos & PBUF_VARINT_DONT_STOP_BITMASK);
+
+ ++pos;
+ --len;
+
+ if (stop)
+ break;
+ else if (!len)
+ return -1;
+
+ shift += 7;
+ /* The maximum length in bytes of a 64-bit encoded value is 10. */
+ if (shift > 63)
+ return -1;
+ }
+
+ return pos - spos;
+}
+
+/*
+ * Store a varint field value in a sample from <pos> buffer
+ * with <len> available bytes after having decoded it if needed
+ * depending on <type> the expected protocol buffer type of the field.
+ * Return 1 if succeeded, 0 if not.
+ */
+int protobuf_smp_store_varint(struct sample *smp, int type,
+ unsigned char *pos, size_t len, size_t vlen)
+{
+ switch (type) {
+ case PBUF_T_BINARY:
+ {
+ int varint_len;
+
+ varint_len = protobuf_varint_getlen(pos, len);
+ if (varint_len == -1)
+ return 0;
+
+ smp->data.type = SMP_T_BIN;
+ smp->data.u.str.area = (char *)pos;
+ smp->data.u.str.data = varint_len;
+ smp->flags = SMP_F_VOL_TEST;
+ break;
+ }
+
+ case PBUF_T_VARINT_INT32 ... PBUF_T_VARINT_ENUM:
+ {
+ uint64_t varint;
+
+ if (!protobuf_varint(&varint, pos, len))
+ return 0;
+
+ smp->data.u.sint = varint;
+ smp->data.type = SMP_T_SINT;
+ break;
+ }
+
+ case PBUF_T_VARINT_SINT32 ... PBUF_T_VARINT_SINT64:
+ {
+ uint64_t varint;
+
+ if (!protobuf_varint(&varint, pos, len))
+ return 0;
+
+ /* zigzag decoding. */
+ smp->data.u.sint = (varint >> 1) ^ -(varint & 1);
+ smp->data.type = SMP_T_SINT;
+ break;
+ }
+
+ default:
+ return 0;
+
+ }
+
+ return 1;
+}
+
+/*
+ * Move forward <*pos> buffer by 8 bytes. Used to skip a 64bit field.
+ */
+int protobuf_skip_64bit(unsigned char **pos, size_t *len, size_t vlen)
+{
+ if (*len < sizeof(uint64_t))
+ return 0;
+
+ *pos += sizeof(uint64_t);
+ *len -= sizeof(uint64_t);
+
+ return 1;
+}
+
+/*
+ * Store a fixed size 64bit field value in a sample from <pos> buffer
+ * with <len> available bytes after having decoded it depending on <type>
+ * the expected protocol buffer type of the field.
+ * Return 1 if succeeded, 0 if not.
+ */
+int protobuf_smp_store_64bit(struct sample *smp, int type,
+ unsigned char *pos, size_t len, size_t vlen)
+{
+ if (len < sizeof(uint64_t))
+ return 0;
+
+ switch (type) {
+ case PBUF_T_BINARY:
+ smp->data.type = SMP_T_BIN;
+ smp->data.u.str.area = (char *)pos;
+ smp->data.u.str.data = sizeof(uint64_t);
+ smp->flags = SMP_F_VOL_TEST;
+ break;
+
+ case PBUF_T_64BIT_FIXED64:
+ case PBUF_T_64BIT_SFIXED64:
+ smp->data.type = SMP_T_SINT;
+ smp->data.u.sint = pbuf_le64toh(*(uint64_t *)pos);
+ smp->flags = SMP_F_VOL_TEST;
+ break;
+
+ case PBUF_T_64BIT_DOUBLE:
+ smp->data.type = SMP_T_SINT;
+ smp->data.u.sint = pbuf_le64toh(*(double *)pos);
+ smp->flags = SMP_F_VOL_TEST;
+ break;
+
+ default:
+ return 0;
+ }
+
+ return 1;
+}
+
+/*
+ * Move forward <*pos> buffer by <vlen> bytes. Use to skip a length-delimited
+ * field.
+ */
+int protobuf_skip_vlen(unsigned char **pos, size_t *len, size_t vlen)
+{
+ if (*len < vlen)
+ return 0;
+
+ *pos += vlen;
+ *len -= vlen;
+
+ return 1;
+}
+
+/*
+ * Store a <vlen>-bytes length-delimited field value in a sample from <pos>
+ * buffer with <len> available bytes.
+ * Return 1 if succeeded, 0 if not.
+ */
+int protobuf_smp_store_vlen(struct sample *smp, int type,
+ unsigned char *pos, size_t len, size_t vlen)
+{
+ if (len < vlen)
+ return 0;
+
+ if (type != PBUF_T_BINARY)
+ return 0;
+
+ smp->data.type = SMP_T_BIN;
+ smp->data.u.str.area = (char *)pos;
+ smp->data.u.str.data = vlen;
+ smp->flags = SMP_F_VOL_TEST;
+
+ return 1;
+}
+
+/*
+ * Move forward <*pos> buffer by 4 bytes. Used to skip a 32bit field.
+ */
+int protobuf_skip_32bit(unsigned char **pos, size_t *len, size_t vlen)
+{
+ if (*len < sizeof(uint32_t))
+ return 0;
+
+ *pos += sizeof(uint32_t);
+ *len -= sizeof(uint32_t);
+
+ return 1;
+}
+
+/*
+ * Store a fixed size 32bit field value in a sample from <pos> buffer
+ * with <len> available bytes after having decoded it depending on <type>
+ * the expected protocol buffer type of the field.
+ * Return 1 if succeeded, 0 if not.
+ */
+int protobuf_smp_store_32bit(struct sample *smp, int type,
+ unsigned char *pos, size_t len, size_t vlen)
+{
+ if (len < sizeof(uint32_t))
+ return 0;
+
+ switch (type) {
+ case PBUF_T_BINARY:
+ smp->data.type = SMP_T_BIN;
+ smp->data.u.str.area = (char *)pos;
+ smp->data.u.str.data = sizeof(uint32_t);
+ smp->flags = SMP_F_VOL_TEST;
+ break;
+
+ case PBUF_T_32BIT_FIXED32:
+ smp->data.type = SMP_T_SINT;
+ smp->data.u.sint = pbuf_le32toh(*(uint32_t *)pos);
+ smp->flags = SMP_F_VOL_TEST;
+ break;
+
+ case PBUF_T_32BIT_SFIXED32:
+ smp->data.type = SMP_T_SINT;
+ smp->data.u.sint = (int32_t)pbuf_le32toh(*(uint32_t *)pos);
+ smp->flags = SMP_F_VOL_TEST;
+ break;
+
+ case PBUF_T_32BIT_FLOAT:
+ smp->data.type = SMP_T_SINT;
+ smp->data.u.sint = pbuf_le32toh(*(float *)pos);
+ smp->flags = SMP_F_VOL_TEST;
+ break;
+
+ default:
+ return 0;
+ }
+
+ return 1;
+}
+
+/*
+ * Lookup for a protocol buffers field whose parameters are provided by <arg_p>
+ * first argument in the buffer with <pos> as address and <len> as length address.
+ * If found, store its value depending on the type of storage to use provided by <arg_p>
+ * second argument and return 1, 0 if not.
+ */
+static inline int protobuf_field_lookup(const struct arg *arg_p, struct sample *smp,
+ unsigned char **pos, size_t *len)
+{
+ unsigned int *fid;
+ size_t fid_sz;
+ int type;
+ uint64_t elen;
+ int field;
+
+ fid = arg_p[0].data.fid.ids;
+ fid_sz = arg_p[0].data.fid.sz;
+ type = arg_p[1].data.sint;
+
+ /* Length of the length-delimited messages if any. */
+ elen = 0;
+ field = 0;
+
+ while (field < fid_sz) {
+ int found;
+ uint64_t key, sleft;
+ struct protobuf_parser_def *pbuf_parser = NULL;
+ unsigned int wire_type, field_number;
+
+ if ((ssize_t)*len <= 0)
+ return 0;
+
+ /* Remaining bytes saving. */
+ sleft = *len;
+
+ /* Key decoding */
+ if (!protobuf_decode_varint(&key, pos, len))
+ return 0;
+
+ wire_type = key & 0x7;
+ field_number = key >> 3;
+ found = field_number == fid[field];
+
+ /* Skip the data if the current field does not match. */
+ switch (wire_type) {
+ case PBUF_TYPE_VARINT:
+ case PBUF_TYPE_32BIT:
+ case PBUF_TYPE_64BIT:
+ pbuf_parser = &protobuf_parser_defs[wire_type];
+ if (!found && !pbuf_parser->skip(pos, len, 0))
+ return 0;
+ break;
+
+ case PBUF_TYPE_LENGTH_DELIMITED:
+ /* Decode the length of this length-delimited field. */
+ if (!protobuf_decode_varint(&elen, pos, len) || elen > *len)
+ return 0;
+
+ /* The size of the current field is computed from here to skip
+ * the bytes used to encode the previous length.*
+ */
+ sleft = *len;
+ pbuf_parser = &protobuf_parser_defs[wire_type];
+ if (!found && !pbuf_parser->skip(pos, len, elen))
+ return 0;
+ break;
+
+ default:
+ return 0;
+ }
+
+ /* Store the data if found. Note that <pbuf_parser> is not NULL */
+ if (found && field == fid_sz - 1)
+ return pbuf_parser->smp_store(smp, type, *pos, *len, elen);
+
+ if ((ssize_t)(elen) > 0)
+ elen -= sleft - *len;
+
+ if (found) {
+ field++;
+ }
+ else if ((ssize_t)elen <= 0) {
+ field = 0;
+ }
+ }
+
+ return 0;
+}
+
+#endif /* _HAPROXY_PROTOBUF_H */
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/include/haproxy/protocol-t.h b/include/haproxy/protocol-t.h
new file mode 100644
index 0000000..b85f29c
--- /dev/null
+++ b/include/haproxy/protocol-t.h
@@ -0,0 +1,148 @@
+/*
+ * include/haproxy/protocol-t.h
+ * This file defines the structures used by generic network protocols.
+ *
+ * Copyright (C) 2000-2020 Willy Tarreau - w@1wt.eu
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_PROTOCOL_T_H
+#define _HAPROXY_PROTOCOL_T_H
+
+#include <sys/types.h>
+#include <sys/socket.h>
+
+#include <import/ebtree-t.h>
+#include <haproxy/api-t.h>
+
+/* some pointer types referenced below */
+struct listener;
+struct receiver;
+struct connection;
+
+/*
+ * Custom network family for str2sa parsing. Should be ok to do this since
+ * sa_family_t is standardized as an unsigned integer
+ */
+#define AF_CUST_EXISTING_FD (AF_MAX + 1)
+#define AF_CUST_SOCKPAIR (AF_MAX + 2)
+#define AF_CUST_RHTTP_SRV (AF_MAX + 3)
+#define AF_CUST_MAX (AF_MAX + 4)
+
+/*
+ * Test in case AF_CUST_MAX overflows the sa_family_t (unsigned int)
+ */
+#if (AF_CUST_MAX < AF_MAX)
+# error "Can't build on the target system, AF_CUST_MAX overflow"
+#endif
+
+/* socket-level protocol types, used for protocol selection */
+enum proto_type {
+ PROTO_TYPE_STREAM, /* streaming protocol (like TCP) */
+ PROTO_TYPE_DGRAM, /* datagram protocol (like UDP) */
+ PROTO_NUM_TYPES /* must be the last one */
+};
+
+/* max length of a protocol name, including trailing zero */
+#define PROTO_NAME_LEN 16
+
+/* flags for ->connect() */
+#define CONNECT_HAS_DATA 0x00000001 /* There's data available to be sent */
+#define CONNECT_DELACK_SMART_CONNECT 0x00000002 /* Use a delayed ACK if the backend has tcp-smart-connect */
+#define CONNECT_DELACK_ALWAYS 0x00000004 /* Use a delayed ACK */
+#define CONNECT_CAN_USE_TFO 0x00000008 /* We can use TFO for this connection */
+
+/* Flags for protocol->flags */
+#define PROTO_F_REUSEPORT_SUPPORTED 0x00000001 /* SO_REUSEPORT is supported */
+#define PROTO_F_REUSEPORT_TESTED 0x00000002 /* SO_REUSEPORT support was tested */
+
+/* protocol families define standard functions acting on a given address family
+ * for a socket implementation, such as AF_INET/PF_INET for example.
+ */
+struct proto_fam {
+ char name[PROTO_NAME_LEN]; /* family name, zero-terminated */
+ int sock_domain; /* socket domain, as passed to socket() */
+ sa_family_t sock_family; /* socket family, for sockaddr */
+ ushort l3_addrlen; /* layer3 address length, used by hashes */
+ socklen_t sock_addrlen; /* socket address length, used by bind() */
+ /* 4-bytes hole here */
+ int (*addrcmp)(const struct sockaddr_storage *, const struct sockaddr_storage *); /* compare addresses (like memcmp) */
+ int (*bind)(struct receiver *rx, char **errmsg); /* bind a receiver */
+ int (*get_src)(int fd, struct sockaddr *, socklen_t, int dir); /* syscall used to retrieve connection's src addr */
+ int (*get_dst)(int fd, struct sockaddr *, socklen_t, int dir); /* syscall used to retrieve connection's dst addr */
+ void (*set_port)(struct sockaddr_storage *, int port); /* set the port on the address; NULL if not implemented */
+};
+
+/* This structure contains all information needed to easily handle a protocol.
+ * Its primary goal is to ease listeners maintenance. Specifically, the
+ * bind() primitive must be used before any fork(). rx_suspend()/rx_resume()
+ * return >0 on success, 0 if rx stopped, -1 on failure to proceed. rx_* may
+ * be null if the protocol doesn't provide direct access to the receiver.
+ */
+struct protocol {
+ char name[PROTO_NAME_LEN]; /* protocol name, zero-terminated */
+ struct proto_fam *fam; /* protocol family */
+ int xprt_type; /* transport layer type (PROTO_TYPE_STREAM/PROTO_TYPE_DGRAM) */
+ enum proto_type proto_type; /* protocol type at the socket layer (PROTO_TYPE_*) */
+ int sock_type; /* socket type, as passed to socket() */
+ int sock_prot; /* socket protocol, as passed to socket() */
+
+ /* functions acting on the listener */
+ void (*add)(struct protocol *p, struct listener *l); /* add a listener for this protocol */
+ int (*listen)(struct listener *l, char *errmsg, int errlen); /* start a listener */
+ void (*enable)(struct listener *l); /* enable receipt of new connections */
+ void (*disable)(struct listener *l); /* disable receipt of new connections */
+ void (*unbind)(struct listener *l); /* unbind the listener and possibly its receiver */
+ int (*suspend)(struct listener *l); /* try to suspend the listener */
+ int (*resume)(struct listener *l); /* try to resume a suspended listener */
+ struct connection *(*accept_conn)(struct listener *l, int *status); /* accept a new connection */
+
+ /* functions acting on connections */
+ void (*ctrl_init)(struct connection *); /* completes initialization of the connection */
+ void (*ctrl_close)(struct connection *); /* completes release of the connection */
+ int (*connect)(struct connection *, int flags); /* connect function if any, see below for flags values */
+ int (*drain)(struct connection *); /* drain pending data; 0=failed, >0=success */
+ int (*check_events)(struct connection *conn, int event_type); /* subscribe to socket events */
+ void (*ignore_events)(struct connection *conn, int event_type); /* unsubscribe from socket events */
+ int (*get_src)(struct connection *conn, struct sockaddr *, socklen_t); /* retrieve connection's source address; -1=fail */
+ int (*get_dst)(struct connection *conn, struct sockaddr *, socklen_t); /* retrieve connection's dest address; -1=fail */
+ int (*set_affinity)(struct connection *conn, int new_tid);
+
+ /* functions acting on the receiver */
+ int (*rx_suspend)(struct receiver *rx); /* temporarily suspend this receiver for a soft restart */
+ int (*rx_resume)(struct receiver *rx); /* try to resume a temporarily suspended receiver */
+ void (*rx_enable)(struct receiver *rx); /* enable receiving on the receiver */
+ void (*rx_disable)(struct receiver *rx); /* disable receiving on the receiver */
+ void (*rx_unbind)(struct receiver *rx); /* unbind the receiver, most often closing the FD */
+ int (*rx_listening)(const struct receiver *rx); /* is the receiver listening ? 0=no, >0=OK, <0=unrecoverable */
+
+ /* default I/O handler */
+ void (*default_iocb)(int fd); /* generic I/O handler (typically accept callback) */
+
+ uint flags; /* flags describing protocol support (PROTO_F_*) */
+ uint nb_receivers; /* number of receivers (under proto_lock) */
+ struct list receivers; /* list of receivers using this protocol (under proto_lock) */
+ struct list list; /* list of registered protocols (under proto_lock) */
+};
+
+#endif /* _HAPROXY_PROTOCOL_T_H */
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/include/haproxy/protocol.h b/include/haproxy/protocol.h
new file mode 100644
index 0000000..828093d
--- /dev/null
+++ b/include/haproxy/protocol.h
@@ -0,0 +1,111 @@
+/*
+ * include/haproxy/protocol.h
+ * This file declares generic protocol management primitives.
+ *
+ * Copyright (C) 2000-2020 Willy Tarreau - w@1wt.eu
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_PROTOCOL_H
+#define _HAPROXY_PROTOCOL_H
+
+#include <sys/socket.h>
+#include <haproxy/protocol-t.h>
+#include <haproxy/thread.h>
+
+/* [AF][sock_dgram][ctrl_dgram] */
+extern struct protocol *__protocol_by_family[AF_CUST_MAX][PROTO_NUM_TYPES][2];
+__decl_thread(extern HA_SPINLOCK_T proto_lock);
+
+/* Registers the protocol <proto> */
+void protocol_register(struct protocol *proto);
+
+/* Unregisters the protocol <proto>. Note that all listeners must have
+ * previously been unbound.
+ */
+void protocol_unregister(struct protocol *proto);
+
+/* clears flag <flag> on all protocols. */
+void protocol_clrf_all(uint flag);
+
+/* sets flag <flag> on all protocols. */
+void protocol_setf_all(uint flag);
+
+/* Checks if protocol <proto> supports PROTO_F flag <flag>. Returns zero if not,
+ * non-zero if supported. It may return a cached value from a previous test,
+ * and may run live tests then update the proto's flags to cache a result. It's
+ * better to call it only if needed so that it doesn't result in modules being
+ * loaded in case of a live test.
+ */
+int protocol_supports_flag(struct protocol *proto, uint flag);
+
+/* binds all listeners of all registered protocols. Returns a composition
+ * of ERR_NONE, ERR_RETRYABLE, ERR_FATAL, ERR_ABORT.
+ */
+int protocol_bind_all(int verbose);
+
+/* unbinds all listeners of all registered protocols. They are also closed.
+ * This must be performed before calling exit() in order to get a chance to
+ * remove file-system based sockets and pipes.
+ * Returns a composition of ERR_NONE, ERR_RETRYABLE, ERR_FATAL.
+ */
+int protocol_unbind_all(void);
+
+/* stops all listeners of all registered protocols. This will normally catch
+ * every single listener, all protocols included. This is to be used during
+ * soft_stop() only. It does not return any error.
+ */
+void protocol_stop_now(void);
+
+/* pauses all listeners of all registered protocols. This is typically
+ * used on SIG_TTOU to release all listening sockets for the time needed to
+ * try to bind a new process. The listeners enter LI_PAUSED. It returns
+ * ERR_NONE, with ERR_FATAL on failure.
+ */
+int protocol_pause_all(void);
+
+/* resumes all listeners of all registered protocols. This is typically used on
+ * SIG_TTIN to re-enable listening sockets after a new process failed to bind.
+ * The listeners switch to LI_READY/LI_FULL. It returns ERR_NONE, with ERR_FATAL
+ * on failure.
+ */
+int protocol_resume_all(void);
+
+/* enables all listeners of all registered protocols. This is intended to be
+ * used after a fork() to enable reading on all file descriptors. Returns a
+ * composition of ERR_NONE, ERR_RETRYABLE, ERR_FATAL.
+ */
+int protocol_enable_all(void);
+
+/* returns the protocol associated to family <family> with proto_type among the
+ * supported protocol types, and ctrl_type of either SOCK_STREAM or SOCK_DGRAM
+ * depending on the requested values, or NULL if not found.
+ */
+static inline struct protocol *protocol_lookup(int family, enum proto_type proto_type, int ctrl_dgram)
+{
+ if (family >= 0 && family < AF_CUST_MAX)
+ return __protocol_by_family[family][proto_type][!!ctrl_dgram];
+ return NULL;
+}
+
+#endif /* _HAPROXY_PROTOCOL_H */
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/include/haproxy/proxy-t.h b/include/haproxy/proxy-t.h
new file mode 100644
index 0000000..2f7bf7b
--- /dev/null
+++ b/include/haproxy/proxy-t.h
@@ -0,0 +1,547 @@
+/*
+ * include/haproxy/proxy-t.h
+ * This file defines everything related to proxies.
+ *
+ * Copyright (C) 2000-2011 Willy Tarreau - w@1wt.eu
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_PROXY_T_H
+#define _HAPROXY_PROXY_T_H
+
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <netinet/in.h>
+#include <arpa/inet.h>
+
+#include <import/ebtree-t.h>
+
+#include <haproxy/api-t.h>
+#include <haproxy/arg-t.h>
+#include <haproxy/backend-t.h>
+#include <haproxy/compression-t.h>
+#include <haproxy/counters-t.h>
+#include <haproxy/freq_ctr-t.h>
+#include <haproxy/obj_type-t.h>
+#include <haproxy/queue-t.h>
+#include <haproxy/server-t.h>
+#include <haproxy/stats-t.h>
+#include <haproxy/tcpcheck-t.h>
+#include <haproxy/thread-t.h>
+#include <haproxy/tools-t.h>
+#include <haproxy/uri_auth-t.h>
+#include <haproxy/http_ext-t.h>
+
+/* values for proxy->mode */
+enum pr_mode {
+ PR_MODE_TCP = 0,
+ PR_MODE_HTTP,
+ PR_MODE_CLI,
+ PR_MODE_SYSLOG,
+ PR_MODE_PEERS,
+ PR_MODES
+} __attribute__((packed));
+
+enum PR_SRV_STATE_FILE {
+ PR_SRV_STATE_FILE_UNSPEC = 0,
+ PR_SRV_STATE_FILE_NONE,
+ PR_SRV_STATE_FILE_GLOBAL,
+ PR_SRV_STATE_FILE_LOCAL,
+};
+
+
+/* flag values for proxy->cap. This is a bitmask of capabilities supported by the proxy */
+#define PR_CAP_NONE 0x0000
+#define PR_CAP_FE 0x0001
+#define PR_CAP_BE 0x0002
+#define PR_CAP_LISTEN (PR_CAP_FE|PR_CAP_BE)
+#define PR_CAP_DEF 0x0004 /* defaults section */
+#define PR_CAP_INT 0x0008 /* internal proxy (used by lua engine) */
+#define PR_CAP_LB 0x0010 /* load-balancing capabilities, i.e. listen/frontend/backend proxies */
+#define PR_CAP_HTTPCLIENT 0x0020 /* proxy used for httpclient */
+
+/* bits for proxy->options */
+#define PR_O_REDISP 0x00000001 /* allow reconnection to dispatch in case of errors */
+#define PR_O_TRANSP 0x00000002 /* transparent mode : use original DEST as dispatch */
+
+/* HTTP server-side reuse */
+#define PR_O_REUSE_NEVR 0x00000000 /* never reuse a shared connection */
+#define PR_O_REUSE_SAFE 0x00000004 /* only reuse a shared connection when it's safe to do so */
+#define PR_O_REUSE_AGGR 0x00000008 /* aggressively reuse a shared connection */
+#define PR_O_REUSE_ALWS 0x0000000C /* always reuse a shared connection */
+#define PR_O_REUSE_MASK 0x0000000C /* mask to retrieve shared connection preferences */
+
+#define PR_O_IDLE_CLOSE_RESP 0x00000010 /* avoid closing idle connections during a soft stop */
+#define PR_O_PREF_LAST 0x00000020 /* prefer last server */
+#define PR_O_DISPATCH 0x00000040 /* use dispatch mode */
+#define PR_O_FORCED_ID 0x00000080 /* proxy's ID was forced in the configuration */
+/* unused: 0x00000100 */
+#define PR_O_IGNORE_PRB 0x00000200 /* ignore empty requests (aborts and timeouts) */
+#define PR_O_NULLNOLOG 0x00000400 /* a connect without request will not be logged */
+#define PR_O_WREQ_BODY 0x00000800 /* always wait for the HTTP request body */
+#define PR_O_HTTP_UPG 0x00001000 /* Contain a "switch-mode http" tcp-request rule */
+/* unused: 0x00002000 */
+#define PR_O_PERSIST 0x00004000 /* server persistence stays effective even when server is down */
+#define PR_O_LOGASAP 0x00008000 /* log as soon as possible, without waiting for the stream to complete */
+#define PR_O_ERR_LOGFMT 0x00010000 /* use log-format for connection error message */
+#define PR_O_CHK_CACHE 0x00020000 /* require examination of cacheability of the 'set-cookie' field */
+#define PR_O_TCP_CLI_KA 0x00040000 /* enable TCP keep-alive on client-side streams */
+#define PR_O_TCP_SRV_KA 0x00080000 /* enable TCP keep-alive on server-side streams */
+#define PR_O_USE_ALL_BK 0x00100000 /* load-balance between backup servers */
+/* unused: 0x00200000 */
+#define PR_O_TCP_NOLING 0x00400000 /* disable lingering on client and server connections */
+#define PR_O_ABRT_CLOSE 0x00800000 /* immediately abort request when client closes */
+
+#define PR_O_HTTP_KAL 0x00000000 /* HTTP keep-alive mode (http-keep-alive) */
+#define PR_O_HTTP_CLO 0x01000000 /* HTTP close mode (httpclose) */
+#define PR_O_HTTP_SCL 0x02000000 /* HTTP server close mode (http-server-close) */
+#define PR_O_HTTP_MODE 0x03000000 /* MASK to retrieve the HTTP mode */
+/* unused: 0x04000000 */
+
+#define PR_O_TCPCHK_SSL 0x08000000 /* at least one TCPCHECK connect rule requires SSL */
+#define PR_O_CONTSTATS 0x10000000 /* continuous counters */
+/* unused: 0x20000000 */
+#define PR_O_DISABLE404 0x40000000 /* Disable a server on a 404 response to a health-check */
+/* unused: 0x80000000 */
+
+/* bits for proxy->options2 */
+#define PR_O2_SPLIC_REQ 0x00000001 /* transfer requests using linux kernel's splice() */
+#define PR_O2_SPLIC_RTR 0x00000002 /* transfer responses using linux kernel's splice() */
+#define PR_O2_SPLIC_AUT 0x00000004 /* automatically use linux kernel's splice() */
+#define PR_O2_SPLIC_ANY (PR_O2_SPLIC_REQ|PR_O2_SPLIC_RTR|PR_O2_SPLIC_AUT)
+#define PR_O2_REQBUG_OK 0x00000008 /* let buggy requests pass through */
+#define PR_O2_RSPBUG_OK 0x00000010 /* let buggy responses pass through */
+#define PR_O2_NOLOGNORM 0x00000020 /* don't log normal traffic, only errors and retries */
+#define PR_O2_LOGERRORS 0x00000040 /* log errors and retries at level LOG_ERR */
+#define PR_O2_SMARTACC 0x00000080 /* don't immediately ACK request after accept */
+#define PR_O2_SMARTCON 0x00000100 /* don't immediately send empty ACK after connect */
+#define PR_O2_RDPC_PRST 0x00000200 /* Activate rdp cookie analyser */
+#define PR_O2_CLFLOG 0x00000400 /* log into clf format */
+#define PR_O2_LOGHCHKS 0x00000800 /* log health checks */
+#define PR_O2_INDEPSTR 0x00001000 /* independent streams, don't update rex on write */
+#define PR_O2_SOCKSTAT 0x00002000 /* collect & provide separate statistics for sockets */
+
+#define PR_O2_H1_ADJ_BUGCLI 0x00008000 /* adjust the case of h1 headers of the response for bogus clients */
+#define PR_O2_H1_ADJ_BUGSRV 0x00004000 /* adjust the case of h1 headers of the request for bogus servers */
+#define PR_O2_NO_H2_UPGRADE 0x00010000 /* disable the implicit H2 upgrades from H1 client connections */
+
+#define PR_O2_NODELAY 0x00020000 /* fully interactive mode, never delay outgoing data */
+#define PR_O2_USE_PXHDR 0x00040000 /* use Proxy-Connection for proxy requests */
+#define PR_O2_CHK_SNDST 0x00080000 /* send the state of each server along with HTTP health checks */
+
+#define PR_O2_SRC_ADDR 0x00100000 /* get the source ip and port for logs */
+
+#define PR_O2_FAKE_KA 0x00200000 /* pretend we do keep-alive with server even though we close */
+
+#define PR_O2_RSTRICT_REQ_HDR_NAMES_BLK 0x00400000 /* reject request with header names containing chars outside of [0-9a-zA-Z-] charset */
+#define PR_O2_RSTRICT_REQ_HDR_NAMES_DEL 0x00800000 /* remove request header names containing chars outside of [0-9a-zA-Z-] charset */
+#define PR_O2_RSTRICT_REQ_HDR_NAMES_NOOP 0x01000000 /* preserve request header names containing chars outside of [0-9a-zA-Z-] charset */
+#define PR_O2_RSTRICT_REQ_HDR_NAMES_MASK 0x01c00000 /* mask for restrict-http-header-names option */
+/* unused : 0x0000000..0x80000000 */
+
+/* server health checks */
+#define PR_O2_CHK_NONE 0x00000000 /* no L7 health checks configured (TCP by default) */
+#define PR_O2_TCPCHK_CHK 0x90000000 /* use TCPCHK check for server health */
+#define PR_O2_EXT_CHK 0xA0000000 /* use external command for server health */
+/* unused: 0xB0000000 to 0xF000000, reserved for health checks */
+#define PR_O2_CHK_ANY 0xF0000000 /* Mask to cover any check */
+/* end of proxy->options2 */
+
+/* Cookie settings for pr->ck_opts */
+#define PR_CK_RW 0x00000001 /* rewrite all direct cookies with the right serverid */
+#define PR_CK_IND 0x00000002 /* keep only indirect cookies */
+#define PR_CK_INS 0x00000004 /* insert cookies when not accessing a server directly */
+#define PR_CK_PFX 0x00000008 /* rewrite all cookies by prefixing the right serverid */
+#define PR_CK_ANY (PR_CK_RW | PR_CK_IND | PR_CK_INS | PR_CK_PFX)
+#define PR_CK_NOC 0x00000010 /* add a 'Cache-control' header with the cookie */
+#define PR_CK_POST 0x00000020 /* don't insert cookies for requests other than a POST */
+#define PR_CK_PSV 0x00000040 /* cookie ... preserve */
+#define PR_CK_HTTPONLY 0x00000080 /* emit the "HttpOnly" attribute */
+#define PR_CK_SECURE 0x00000100 /* emit the "Secure" attribute */
+#define PR_CK_DYNAMIC 0x00000200 /* create dynamic cookies for each server */
+
+/* bits for sticking rules */
+#define STK_IS_MATCH 0x00000001 /* match on request fetch */
+#define STK_IS_STORE 0x00000002 /* store on request fetch */
+#define STK_ON_RSP 0x00000004 /* store on response fetch */
+
+/* diff bits for proxy_find_best_match */
+#define PR_FBM_MISMATCH_ID 0x01
+#define PR_FBM_MISMATCH_NAME 0x02
+#define PR_FBM_MISMATCH_PROXYTYPE 0x04
+
+/* Bits for the different retry causes */
+#define PR_RE_CONN_FAILED 0x00000001 /* Retry if we failed to connect */
+#define PR_RE_DISCONNECTED 0x00000002 /* Retry if we got disconnected with no answer */
+#define PR_RE_TIMEOUT 0x00000004 /* Retry if we got a server timeout before we got any data */
+#define PR_RE_401 0x00000008 /* Retry if we got a 401 */
+#define PR_RE_403 0x00000010 /* Retry if we got a 403 */
+#define PR_RE_404 0x00000020 /* Retry if we got a 404 */
+#define PR_RE_408 0x00000040 /* Retry if we got a 408 */
+#define PR_RE_425 0x00000080 /* Retry if we got a 425 */
+#define PR_RE_500 0x00000100 /* Retry if we got a 500 */
+#define PR_RE_501 0x00000200 /* Retry if we got a 501 */
+#define PR_RE_502 0x00000400 /* Retry if we got a 502 */
+#define PR_RE_503 0x00000800 /* Retry if we got a 503 */
+#define PR_RE_504 0x00001000 /* Retry if we got a 504 */
+#define PR_RE_STATUS_MASK (PR_RE_401 | PR_RE_403 | PR_RE_404 | \
+ PR_RE_408 | PR_RE_425 | PR_RE_500 | \
+ PR_RE_501 | PR_RE_502 | PR_RE_503 | \
+ PR_RE_504)
+/* 0x00000800, 0x00001000, 0x00002000, 0x00004000 and 0x00008000 unused,
+ * reserved for eventual future status codes
+ */
+#define PR_RE_EARLY_ERROR 0x00010000 /* Retry if we failed at sending early data */
+#define PR_RE_JUNK_REQUEST 0x00020000 /* We received an incomplete or garbage response */
+
+/* Proxy flags */
+#define PR_FL_DISABLED 0x01 /* The proxy was disabled in the configuration (not at runtime) */
+#define PR_FL_STOPPED 0x02 /* The proxy was stopped */
+#define PR_FL_READY 0x04 /* The proxy is ready to be used (initialized and configured) */
+#define PR_FL_EXPLICIT_REF 0x08 /* The default proxy is explicitly referenced by another proxy */
+#define PR_FL_IMPLICIT_REF 0x10 /* The default proxy is implicitly referenced by another proxy */
+#define PR_FL_PAUSED 0x20 /* The proxy was paused at run time (reversible) */
+
+struct stream;
+
+struct http_snapshot {
+ unsigned int sid; /* ID of the faulty stream */
+ unsigned int state; /* message state before the error (when saved) */
+ unsigned int b_flags; /* buffer flags */
+ unsigned int s_flags; /* stream flags */
+
+ unsigned int t_flags; /* transaction flags */
+ unsigned int m_flags; /* message flags */
+ unsigned long long m_clen; /* chunk len for this message */
+ unsigned long long m_blen; /* body len for this message */
+};
+
+struct h1_snapshot {
+ unsigned int state; /* H1 message state when the error occurred */
+ unsigned int c_flags; /* H1 connection flags */
+ unsigned int s_flags; /* H1 stream flags */
+ unsigned int m_flags; /* H1 message flags */
+ unsigned long long m_clen; /* chunk len for this message */
+ unsigned long long m_blen; /* body len for this message */
+};
+
+union error_snapshot_ctx {
+ struct http_snapshot http;
+ struct h1_snapshot h1;
+};
+
+struct error_snapshot {
+ /**** common part ****/
+ struct timeval when; /* date of this event, (tv_sec == 0) means "never" */
+ /* @16 */
+ void (*show)(struct buffer *, const struct error_snapshot *); /* dump function */
+ unsigned long long buf_ofs; /* relative position of the buffer's input inside its container */
+ /* @32 */
+ unsigned int buf_out; /* pending output bytes _before_ the buffer's input (0..buf->data-1) */
+ unsigned int buf_len; /* original length of the last invalid request/response (0..buf->data-1-buf_out) */
+ unsigned int buf_err; /* buffer-relative position where the error was detected (0..len-1) */
+ unsigned int buf_wrap; /* buffer-relative position where the buffer is expected to wrap (1..buf_size) */
+ /* @48 */
+ struct proxy *oe; /* other end = frontend or backend involved */
+ struct server *srv; /* server associated with the error (or NULL) */
+ /* @64 */
+ unsigned int ev_id; /* event number (counter incremented for each capture) */
+ /* @68: 4 bytes hole here */
+ struct sockaddr_storage src; /* client's address */
+
+ /**** protocol-specific part ****/
+ union error_snapshot_ctx ctx;
+ char buf[VAR_ARRAY]; /* copy of the beginning of the message for bufsize bytes */
+};
+
+struct proxy {
+ enum obj_type obj_type; /* object type == OBJ_TYPE_PROXY */
+ char flags; /* bit field PR_FL_* */
+ enum pr_mode mode; /* mode = PR_MODE_TCP, PR_MODE_HTTP, ... */
+ char cap; /* supported capabilities (PR_CAP_*) */
+ unsigned int maxconn; /* max # of active streams on the frontend */
+
+ int options; /* PR_O_REDISP, PR_O_TRANSP, ... */
+ int options2; /* PR_O2_* */
+ unsigned int ck_opts; /* PR_CK_* (cookie options) */
+ unsigned int fe_req_ana, be_req_ana; /* bitmap of common request protocol analysers for the frontend and backend */
+ unsigned int fe_rsp_ana, be_rsp_ana; /* bitmap of common response protocol analysers for the frontend and backend */
+ unsigned int http_needed; /* non-null if HTTP analyser may be used */
+ union {
+ struct proxy *be; /* default backend, or NULL if none set */
+ char *name; /* default backend name during config parse */
+ } defbe;
+ struct proxy *defpx; /* default proxy used to init this one (may be NULL) */
+ struct list acl; /* ACL declared on this proxy */
+ struct list http_req_rules; /* HTTP request rules: allow/deny/... */
+ struct list http_res_rules; /* HTTP response rules: allow/deny/... */
+ struct list http_after_res_rules; /* HTTP final response rules: set-header/del-header/... */
+ struct list redirect_rules; /* content redirecting rules (chained) */
+ struct list switching_rules; /* content switching rules (chained) */
+ struct list persist_rules; /* 'force-persist' and 'ignore-persist' rules (chained) */
+ struct list sticking_rules; /* content sticking rules (chained) */
+ struct list storersp_rules; /* content store response rules (chained) */
+ struct list server_rules; /* server switching rules (chained) */
+ struct { /* TCP request processing */
+ unsigned int inspect_delay; /* inspection delay */
+ struct list inspect_rules; /* inspection rules */
+ struct list l4_rules; /* layer4 rules */
+ struct list l5_rules; /* layer5 rules */
+ } tcp_req;
+ struct { /* TCP request processing */
+ unsigned int inspect_delay; /* inspection delay */
+ struct list inspect_rules; /* inspection rules */
+ } tcp_rep;
+ struct server *srv, defsrv; /* known servers; default server configuration */
+ struct lbprm lbprm; /* load-balancing parameters */
+ int srv_act, srv_bck; /* # of servers eligible for LB (UP|!checked) AND (enabled+weight!=0) */
+ int served; /* # of active sessions currently being served */
+ int cookie_len; /* strlen(cookie_name), computed only once */
+ char *cookie_domain; /* domain used to insert the cookie */
+ char *cookie_name; /* name of the cookie to look for */
+ char *cookie_attrs; /* list of attributes to add to the cookie */
+ char *dyncookie_key; /* Secret key used to generate dynamic persistent cookies */
+ unsigned int cookie_maxidle; /* max idle time for this cookie */
+ unsigned int cookie_maxlife; /* max life time for this cookie */
+ char *rdp_cookie_name; /* name of the RDP cookie to look for */
+ char *capture_name; /* beginning of the name of the cookie to capture */
+ int rdp_cookie_len; /* strlen(rdp_cookie_name), computed only once */
+ int capture_namelen; /* length of the cookie name to match */
+ struct uri_auth *uri_auth; /* if non-NULL, the (list of) per-URI authentications */
+ int capture_len; /* length of the string to be captured */
+ int max_out_conns; /* Max number of idling connections we keep for a session */
+ int max_ka_queue; /* 1+maximum requests in queue accepted for reusing a K-A conn (0=none) */
+ int clitcpka_cnt; /* The maximum number of keepalive probes TCP should send before dropping the connection. (client side) */
+ int clitcpka_idle; /* The time (in seconds) the connection needs to remain idle before TCP starts sending keepalive probes. (client side) */
+ int clitcpka_intvl; /* The time (in seconds) between individual keepalive probes. (client side) */
+ int srvtcpka_cnt; /* The maximum number of keepalive probes TCP should send before dropping the connection. (server side) */
+ int srvtcpka_idle; /* The time (in seconds) the connection needs to remain idle before TCP starts sending keepalive probes. (server side) */
+ int srvtcpka_intvl; /* The time (in seconds) between individual keepalive probes. (server side) */
+ struct ist monitor_uri; /* a special URI to which we respond with HTTP/200 OK */
+ struct list mon_fail_cond; /* list of conditions to fail monitoring requests (chained) */
+ struct { /* WARNING! check proxy_reset_timeouts() in proxy.h !!! */
+ int client; /* client I/O timeout (in ticks) */
+ int tarpit; /* tarpit timeout, defaults to connect if unspecified */
+ int queue; /* queue timeout, defaults to connect if unspecified */
+ int connect; /* connect timeout (in ticks) */
+ int server; /* server I/O timeout (in ticks) */
+ int client_hs; /* maximum time for client handshake completion */
+ int httpreq; /* maximum time for complete HTTP request */
+ int httpka; /* maximum time for a new HTTP request when using keep-alive */
+ int check; /* maximum time for complete check */
+ int tunnel; /* I/O timeout to use in tunnel mode (in ticks) */
+ int clientfin; /* timeout to apply to client half-closed connections */
+ int serverfin; /* timeout to apply to server half-closed connections */
+ } timeout;
+ __decl_thread(HA_RWLOCK_T lock); /* may be taken under the server's lock */
+
+ char *id, *desc; /* proxy id (name) and description */
+ struct queue queue; /* queued requests (pendconns) */
+ int totpend; /* total number of pending connections on this instance (for stats) */
+ unsigned int feconn, beconn; /* # of active frontend and backends streams */
+ struct freq_ctr fe_req_per_sec; /* HTTP requests per second on the frontend */
+ struct freq_ctr fe_conn_per_sec; /* received connections per second on the frontend */
+ struct freq_ctr fe_sess_per_sec; /* accepted sessions per second on the frontend (after tcp rules) */
+ struct freq_ctr be_sess_per_sec; /* sessions per second on the backend */
+ unsigned int fe_sps_lim; /* limit on new sessions per second on the frontend */
+ unsigned int fullconn; /* #conns on backend above which servers are used at full load */
+ unsigned int tot_fe_maxconn; /* #maxconn of frontends linked to that backend, it is used to compute fullconn */
+ struct ist server_id_hdr_name; /* the header to use to send the server id (name) */
+ int conn_retries; /* maximum number of connect retries */
+ unsigned int retry_type; /* Type of retry allowed */
+ int redispatch_after; /* number of retries before redispatch */
+ unsigned down_trans; /* up-down transitions */
+ unsigned down_time; /* total time the proxy was down */
+ time_t last_change; /* last time, when the state was changed */
+ int (*accept)(struct stream *s); /* application layer's accept() */
+ struct conn_src conn_src; /* connection source settings */
+ enum obj_type *default_target; /* default target to use for accepted streams or NULL */
+ struct proxy *next;
+ struct proxy *next_stkt_ref; /* Link to the list of proxies which refer to the same stick-table. */
+
+ struct list loggers; /* one per 'log' directive */
+ struct list logformat; /* log_format linked list */
+ struct list logformat_sd; /* log_format linked list for the RFC5424 structured-data part */
+ struct list logformat_error; /* log_format linked list used in case of connection error on the frontend */
+ struct buffer log_tag; /* override default syslog tag */
+ struct ist header_unique_id; /* unique-id header */
+ struct list format_unique_id; /* unique-id format */
+ int to_log; /* things to be logged (LW_*) */
+ int nb_req_cap, nb_rsp_cap; /* # of headers to be captured */
+ struct cap_hdr *req_cap; /* chained list of request headers to be captured */
+ struct cap_hdr *rsp_cap; /* chained list of response headers to be captured */
+ struct pool_head *req_cap_pool, /* pools of pre-allocated char ** used to build the streams */
+ *rsp_cap_pool;
+ struct be_counters be_counters; /* backend statistics counters */
+ struct fe_counters fe_counters; /* frontend statistics counters */
+
+ struct mt_list listener_queue; /* list of the temporarily limited listeners because of lack of a proxy resource */
+ struct stktable *table; /* table for storing sticking streams */
+
+ struct task *task; /* the associated task, mandatory to manage rate limiting, stopping and resource shortage, NULL if disabled */
+ struct tcpcheck_rules tcpcheck_rules; /* tcp-check send / expect rules */
+ char *check_command; /* Command to use for external agent checks */
+ char *check_path; /* PATH environment to use for external agent checks */
+ struct http_reply *replies[HTTP_ERR_SIZE]; /* HTTP replies for known errors */
+ unsigned int log_count; /* number of logs produced by the frontend */
+ int uuid; /* universally unique proxy ID, used for SNMP */
+ unsigned int backlog; /* force the frontend's listen backlog */
+ unsigned int li_all; /* total number of listeners attached to this proxy */
+ unsigned int li_paused; /* total number of listeners paused (LI_PAUSED) */
+ unsigned int li_bound; /* total number of listeners ready (LI_LISTEN) */
+ unsigned int li_ready; /* total number of listeners ready (>=LI_READY) */
+ unsigned int li_suspended; /* total number of listeners suspended (could be paused or unbound) */
+
+ /* warning: these structs are huge, keep them at the bottom */
+ struct sockaddr_storage dispatch_addr; /* the default address to connect to */
+ struct error_snapshot *invalid_req, *invalid_rep; /* captures of last errors */
+
+ /* used only during configuration parsing */
+ int no_options; /* PR_O_REDISP, PR_O_TRANSP, ... */
+ int no_options2; /* PR_O2_* */
+
+ struct {
+ char *file; /* file where the section appears */
+ struct eb32_node id; /* place in the tree of used IDs */
+ int line; /* line where the section appears */
+ struct eb_root used_listener_id;/* list of listener IDs in use */
+ struct eb_root used_server_id; /* list of server IDs in use */
+ struct eb_root used_server_name; /* list of server names in use */
+ struct list bind; /* list of bind settings */
+ struct list listeners; /* list of listeners belonging to this frontend */
+ struct list errors; /* list of all custom error files */
+ struct arg_list args; /* sample arg list that need to be resolved */
+ unsigned int refcount; /* refcount on this proxy (only used for default proxy for now) */
+ struct ebpt_node by_name; /* proxies are stored sorted by name here */
+ char *logformat_string; /* log format string */
+ char *lfs_file; /* file name where the logformat string appears (strdup) */
+ int lfs_line; /* file name where the logformat string appears */
+ int uif_line; /* file name where the unique-id-format string appears */
+ char *uif_file; /* file name where the unique-id-format string appears (strdup) */
+ char *uniqueid_format_string; /* unique-id format string */
+ char *logformat_sd_string; /* log format string for the RFC5424 structured-data part */
+ char *lfsd_file; /* file name where the structured-data logformat string for RFC5424 appears (strdup) */
+ int lfsd_line; /* file name where the structured-data logformat string for RFC5424 appears */
+ char *error_logformat_string;
+ char *elfs_file;
+ int elfs_line;
+ } conf; /* config information */
+ struct http_ext *http_ext; /* http ext options */
+ struct eb_root used_server_addr; /* list of server addresses in use */
+ void *parent; /* parent of the proxy when applicable */
+ struct comp *comp; /* http compression */
+
+ struct {
+ union {
+ struct mailers *m; /* Mailer to send email alerts via */
+ char *name;
+ } mailers;
+ char *from; /* Address to send email alerts from */
+ char *to; /* Address(es) to send email alerts to */
+ char *myhostname; /* Identity to use in HELO command sent to mailer */
+ int level; /* Maximum syslog level of messages to send
+ * email alerts for */
+ int set; /* True if email_alert settings are present */
+ struct email_alertq *queues; /* per-mailer alerts queues */
+ } email_alert;
+
+ int load_server_state_from_file; /* location of the file containing server state.
+ * flag PR_SRV_STATE_FILE_* */
+ char *server_state_file_name; /* used when load_server_state_from_file is set to
+ * PR_SRV_STATE_FILE_LOCAL. Give a specific file name for
+ * this backend. If not specified or void, then the backend
+ * name is used
+ */
+ struct list filter_configs; /* list of the filters that are declared on this proxy */
+
+ EXTRA_COUNTERS(extra_counters_fe);
+ EXTRA_COUNTERS(extra_counters_be);
+};
+
+struct switching_rule {
+ struct list list; /* list linked to from the proxy */
+ struct acl_cond *cond; /* acl condition to meet */
+ int dynamic; /* this is a dynamic rule using the logformat expression */
+ union {
+ struct proxy *backend; /* target backend */
+ char *name; /* target backend name during config parsing */
+ struct list expr; /* logformat expression to use for dynamic rules */
+ } be;
+ char *file;
+ int line;
+};
+
+struct server_rule {
+ struct list list; /* list linked to from the proxy */
+ struct acl_cond *cond; /* acl condition to meet */
+ int dynamic;
+ union {
+ struct server *ptr; /* target server */
+ char *name; /* target server name during config parsing */
+ } srv;
+ struct list expr; /* logformat expression to use for dynamic rules */
+ char *file;
+ int line;
+};
+
+struct persist_rule {
+ struct list list; /* list linked to from the proxy */
+ struct acl_cond *cond; /* acl condition to meet */
+ int type;
+};
+
+struct sticking_rule {
+ struct list list; /* list linked to from the proxy */
+ struct acl_cond *cond; /* acl condition to meet */
+ struct sample_expr *expr; /* fetch expr to fetch key */
+ int flags; /* STK_* */
+ union {
+ struct stktable *t; /* target table */
+ char *name; /* target table name during config parsing */
+ } table;
+};
+
+
+struct redirect_rule {
+ struct list list; /* list linked to from the proxy */
+ struct acl_cond *cond; /* acl condition to meet */
+ int type;
+ int rdr_len;
+ char *rdr_str;
+ struct list rdr_fmt;
+ int code;
+ unsigned int flags;
+ int cookie_len;
+ char *cookie_str;
+};
+
+/* some of the most common options which are also the easiest to handle */
+struct cfg_opt {
+ const char *name;
+ unsigned int val;
+ unsigned int cap;
+ unsigned int checks;
+ unsigned int mode;
+};
+
+#endif /* _HAPROXY_PROXY_T_H */
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/include/haproxy/proxy.h b/include/haproxy/proxy.h
new file mode 100644
index 0000000..efdfa21
--- /dev/null
+++ b/include/haproxy/proxy.h
@@ -0,0 +1,264 @@
+/*
+ * include/haproxy/proxy.h
+ * This file defines function prototypes for proxy management.
+ *
+ * Copyright (C) 2000-2011 Willy Tarreau - w@1wt.eu
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_PROXY_H
+#define _HAPROXY_PROXY_H
+
+#include <haproxy/api.h>
+#include <haproxy/applet-t.h>
+#include <haproxy/freq_ctr.h>
+#include <haproxy/list.h>
+#include <haproxy/listener-t.h>
+#include <haproxy/proxy-t.h>
+#include <haproxy/server-t.h>
+#include <haproxy/ticks.h>
+#include <haproxy/thread.h>
+
+extern struct proxy *proxies_list;
+extern struct eb_root used_proxy_id; /* list of proxy IDs in use */
+extern unsigned int error_snapshot_id; /* global ID assigned to each error then incremented */
+extern struct eb_root proxy_by_name; /* tree of proxies sorted by name */
+
+extern const struct cfg_opt cfg_opts[];
+extern const struct cfg_opt cfg_opts2[];
+
+struct task *manage_proxy(struct task *t, void *context, unsigned int state);
+void proxy_cond_pause(struct proxy *p);
+void proxy_cond_resume(struct proxy *p);
+void proxy_cond_disable(struct proxy *p);
+void soft_stop(void);
+int pause_proxy(struct proxy *p);
+int resume_proxy(struct proxy *p);
+void stop_proxy(struct proxy *p);
+int stream_set_backend(struct stream *s, struct proxy *be);
+
+void free_proxy(struct proxy *p);
+const char *proxy_cap_str(int cap);
+const char *proxy_mode_str(int mode);
+const char *proxy_find_best_option(const char *word, const char **extra);
+void proxy_store_name(struct proxy *px);
+struct proxy *proxy_find_by_id(int id, int cap, int table);
+struct proxy *proxy_find_by_name(const char *name, int cap, int table);
+struct proxy *proxy_find_best_match(int cap, const char *name, int id, int *diff);
+struct server *findserver(const struct proxy *px, const char *name);
+struct server *findserver_unique_id(const struct proxy *px, int puid, uint32_t rid);
+struct server *findserver_unique_name(const struct proxy *px, const char *name, uint32_t rid);
+int proxy_cfg_ensure_no_http(struct proxy *curproxy);
+int proxy_cfg_ensure_no_log(struct proxy *curproxy);
+void init_new_proxy(struct proxy *p);
+void proxy_preset_defaults(struct proxy *defproxy);
+void proxy_free_defaults(struct proxy *defproxy);
+void proxy_destroy_defaults(struct proxy *px);
+void proxy_destroy_all_unref_defaults(void);
+void proxy_ref_defaults(struct proxy *px, struct proxy *defpx);
+void proxy_unref_defaults(struct proxy *px);
+struct proxy *alloc_new_proxy(const char *name, unsigned int cap,
+ char **errmsg);
+struct proxy *parse_new_proxy(const char *name, unsigned int cap,
+ const char *file, int linenum,
+ const struct proxy *defproxy);
+void proxy_capture_error(struct proxy *proxy, int is_back,
+ struct proxy *other_end, enum obj_type *target,
+ const struct session *sess,
+ const struct buffer *buf, long buf_ofs,
+ unsigned int buf_out, unsigned int err_pos,
+ const union error_snapshot_ctx *ctx,
+ void (*show)(struct buffer *, const struct error_snapshot *));
+void proxy_adjust_all_maxconn(void);
+struct proxy *cli_find_frontend(struct appctx *appctx, const char *arg);
+struct proxy *cli_find_frontend(struct appctx *appctx, const char *arg);
+int resolve_stick_rule(struct proxy *curproxy, struct sticking_rule *mrule);
+void free_stick_rules(struct list *rules);
+void free_server_rules(struct list *srules);
+
+/*
+ * This function returns a string containing the type of the proxy in a format
+ * suitable for error messages, from its capabilities.
+ */
+static inline const char *proxy_type_str(struct proxy *proxy)
+{
+ if (proxy->mode == PR_MODE_PEERS)
+ return "peers section";
+ return proxy_cap_str(proxy->cap);
+}
+
+/* Find the frontend having name <name>. The name may also start with a '#' to
+ * reference a numeric id. NULL is returned if not found.
+ */
+static inline struct proxy *proxy_fe_by_name(const char *name)
+{
+ return proxy_find_by_name(name, PR_CAP_FE, 0);
+}
+
+/* Find the backend having name <name>. The name may also start with a '#' to
+ * reference a numeric id. NULL is returned if not found.
+ */
+static inline struct proxy *proxy_be_by_name(const char *name)
+{
+ return proxy_find_by_name(name, PR_CAP_BE, 0);
+}
+
+/* this function initializes all timeouts for proxy p */
+static inline void proxy_reset_timeouts(struct proxy *proxy)
+{
+ proxy->timeout.client = TICK_ETERNITY;
+ proxy->timeout.tarpit = TICK_ETERNITY;
+ proxy->timeout.queue = TICK_ETERNITY;
+ proxy->timeout.connect = TICK_ETERNITY;
+ proxy->timeout.server = TICK_ETERNITY;
+ proxy->timeout.httpreq = TICK_ETERNITY;
+ proxy->timeout.check = TICK_ETERNITY;
+ proxy->timeout.tunnel = TICK_ETERNITY;
+}
+
+/* increase the number of cumulated connections received on the designated frontend */
+static inline void proxy_inc_fe_conn_ctr(struct listener *l, struct proxy *fe)
+{
+ _HA_ATOMIC_INC(&fe->fe_counters.cum_conn);
+ if (l && l->counters)
+ _HA_ATOMIC_INC(&l->counters->cum_conn);
+ HA_ATOMIC_UPDATE_MAX(&fe->fe_counters.cps_max,
+ update_freq_ctr(&fe->fe_conn_per_sec, 1));
+}
+
+/* increase the number of cumulated connections accepted by the designated frontend */
+static inline void proxy_inc_fe_sess_ctr(struct listener *l, struct proxy *fe)
+{
+
+ _HA_ATOMIC_INC(&fe->fe_counters.cum_sess);
+ if (l && l->counters)
+ _HA_ATOMIC_INC(&l->counters->cum_sess);
+ HA_ATOMIC_UPDATE_MAX(&fe->fe_counters.sps_max,
+ update_freq_ctr(&fe->fe_sess_per_sec, 1));
+}
+
+/* increase the number of cumulated HTTP sessions on the designated frontend.
+ * <http_ver> must be the HTTP version for such requests.
+ */
+static inline void proxy_inc_fe_cum_sess_ver_ctr(struct listener *l, struct proxy *fe,
+ unsigned int http_ver)
+{
+ if (http_ver == 0 ||
+ http_ver > sizeof(fe->fe_counters.cum_sess_ver) / sizeof(*fe->fe_counters.cum_sess_ver))
+ return;
+
+ _HA_ATOMIC_INC(&fe->fe_counters.cum_sess_ver[http_ver - 1]);
+ if (l && l->counters)
+ _HA_ATOMIC_INC(&l->counters->cum_sess_ver[http_ver - 1]);
+}
+
+/* increase the number of cumulated connections on the designated backend */
+static inline void proxy_inc_be_ctr(struct proxy *be)
+{
+ _HA_ATOMIC_INC(&be->be_counters.cum_conn);
+ HA_ATOMIC_UPDATE_MAX(&be->be_counters.sps_max,
+ update_freq_ctr(&be->be_sess_per_sec, 1));
+}
+
+/* increase the number of cumulated requests on the designated frontend.
+ * <http_ver> must be the HTTP version for HTTP request. 0 may be provided
+ * for others requests.
+ */
+static inline void proxy_inc_fe_req_ctr(struct listener *l, struct proxy *fe,
+ unsigned int http_ver)
+{
+ if (http_ver >= sizeof(fe->fe_counters.p.http.cum_req) / sizeof(*fe->fe_counters.p.http.cum_req))
+ return;
+
+ _HA_ATOMIC_INC(&fe->fe_counters.p.http.cum_req[http_ver]);
+ if (l && l->counters)
+ _HA_ATOMIC_INC(&l->counters->p.http.cum_req[http_ver]);
+ HA_ATOMIC_UPDATE_MAX(&fe->fe_counters.p.http.rps_max,
+ update_freq_ctr(&fe->fe_req_per_sec, 1));
+}
+
+/* Returns non-zero if the proxy is configured to retry a request if we got that status, 0 otherwise */
+static inline int l7_status_match(struct proxy *p, int status)
+{
+ /* Just return 0 if no retry was configured for any status */
+ if (!(p->retry_type & PR_RE_STATUS_MASK))
+ return 0;
+
+ switch (status) {
+ case 401:
+ return (p->retry_type & PR_RE_401);
+ case 403:
+ return (p->retry_type & PR_RE_403);
+ case 404:
+ return (p->retry_type & PR_RE_404);
+ case 408:
+ return (p->retry_type & PR_RE_408);
+ case 425:
+ return (p->retry_type & PR_RE_425);
+ case 500:
+ return (p->retry_type & PR_RE_500);
+ case 501:
+ return (p->retry_type & PR_RE_501);
+ case 502:
+ return (p->retry_type & PR_RE_502);
+ case 503:
+ return (p->retry_type & PR_RE_503);
+ case 504:
+ return (p->retry_type & PR_RE_504);
+ default:
+ break;
+ }
+ return 0;
+}
+
+/* Return 1 if <p> proxy is in <list> list of proxies which are also stick-tables,
+ * 0 if not.
+ */
+static inline int in_proxies_list(struct proxy *list, struct proxy *proxy)
+{
+ struct proxy *p;
+
+ for (p = list; p; p = p->next_stkt_ref)
+ if (proxy == p)
+ return 1;
+
+ return 0;
+}
+
+/* Add <bytes> to the global total bytes sent and adjust the send rate. Set
+ * <splice> if this was sent usigin splicing.
+ */
+static inline void increment_send_rate(uint64_t bytes, int splice)
+{
+ /* We count the total bytes sent, and the send rate for 32-byte blocks.
+ * The reason for the latter is that freq_ctr are limited to 4GB and
+ * that it's not enough per second.
+ */
+
+ if (splice)
+ _HA_ATOMIC_ADD(&th_ctx->spliced_out_bytes, bytes);
+ _HA_ATOMIC_ADD(&th_ctx->out_bytes, bytes);
+ update_freq_ctr(&th_ctx->out_32bps, (bytes + 16) / 32);
+}
+
+#endif /* _HAPROXY_PROXY_H */
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/include/haproxy/qmux_http.h b/include/haproxy/qmux_http.h
new file mode 100644
index 0000000..a7dbe7c
--- /dev/null
+++ b/include/haproxy/qmux_http.h
@@ -0,0 +1,17 @@
+#ifndef _HAPROXY_MUX_QUIC_HTTP_H
+#define _HAPROXY_MUX_QUIC_HTTP_H
+
+#ifdef USE_QUIC
+
+#include <haproxy/buf.h>
+#include <haproxy/mux_quic.h>
+
+size_t qcs_http_rcv_buf(struct qcs *qcs, struct buffer *buf, size_t count,
+ char *fin);
+size_t qcs_http_snd_buf(struct qcs *qcs, struct buffer *buf, size_t count,
+ char *fin);
+size_t qcs_http_reset_buf(struct qcs *qcs, struct buffer *buf, size_t count);
+
+#endif /* USE_QUIC */
+
+#endif /* _HAPROXY_MUX_QUIC_HTTP_H */
diff --git a/include/haproxy/qmux_trace.h b/include/haproxy/qmux_trace.h
new file mode 100644
index 0000000..49759a3
--- /dev/null
+++ b/include/haproxy/qmux_trace.h
@@ -0,0 +1,73 @@
+#ifndef _HAPROXY_QMUX_TRACE_H
+#define _HAPROXY_QMUX_TRACE_H
+
+#ifdef USE_QUIC
+
+#include <haproxy/api-t.h>
+#include <haproxy/trace.h>
+
+extern struct trace_source trace_qmux;
+#define TRACE_SOURCE &trace_qmux
+
+static const struct trace_event qmux_trace_events[] = {
+#define QMUX_EV_QCC_NEW (1ULL << 0)
+ { .mask = QMUX_EV_QCC_NEW , .name = "qcc_new", .desc = "new QUIC connection" },
+#define QMUX_EV_QCC_RECV (1ULL << 1)
+ { .mask = QMUX_EV_QCC_RECV, .name = "qcc_recv", .desc = "Rx on QUIC connection" },
+#define QMUX_EV_QCC_SEND (1ULL << 2)
+ { .mask = QMUX_EV_QCC_SEND, .name = "qcc_send", .desc = "Tx on QUIC connection" },
+#define QMUX_EV_QCC_WAKE (1ULL << 3)
+ { .mask = QMUX_EV_QCC_WAKE, .name = "qcc_wake", .desc = "QUIC connection woken up" },
+#define QMUX_EV_QCC_END (1ULL << 4)
+ { .mask = QMUX_EV_QCC_END, .name = "qcc_end", .desc = "QUIC connection terminated" },
+#define QMUX_EV_QCC_NQCS (1ULL << 5)
+ { .mask = QMUX_EV_QCC_NQCS, .name = "qcc_no_qcs", .desc = "QUIC stream not found" },
+#define QMUX_EV_QCS_NEW (1ULL << 6)
+ { .mask = QMUX_EV_QCS_NEW, .name = "qcs_new", .desc = "new QUIC stream" },
+#define QMUX_EV_QCS_RECV (1ULL << 7)
+ { .mask = QMUX_EV_QCS_RECV, .name = "qcs_recv", .desc = "Rx on QUIC stream" },
+#define QMUX_EV_QCS_SEND (1ULL << 8)
+ { .mask = QMUX_EV_QCS_SEND, .name = "qcs_send", .desc = "Tx on QUIC stream" },
+#define QMUX_EV_QCS_END (1ULL << 9)
+ { .mask = QMUX_EV_QCS_END, .name = "qcs_end", .desc = "QUIC stream terminated" },
+#define QMUX_EV_STRM_RECV (1ULL << 10)
+ { .mask = QMUX_EV_STRM_RECV, .name = "strm_recv", .desc = "receiving data for stream" },
+#define QMUX_EV_STRM_SEND (1ULL << 11)
+ { .mask = QMUX_EV_STRM_SEND, .name = "strm_send", .desc = "sending data for stream" },
+#define QMUX_EV_STRM_WAKE (1ULL << 12)
+ { .mask = QMUX_EV_STRM_WAKE, .name = "strm_wake", .desc = "stream woken up" },
+#define QMUX_EV_STRM_SHUT (1ULL << 13)
+ { .mask = QMUX_EV_STRM_SHUT, .name = "strm_shut", .desc = "stream shutdown" },
+#define QMUX_EV_STRM_END (1ULL << 14)
+ { .mask = QMUX_EV_STRM_END, .name = "strm_end", .desc = "detaching app-layer stream" },
+#define QMUX_EV_SEND_FRM (1ULL << 15)
+ { .mask = QMUX_EV_SEND_FRM, .name = "send_frm", .desc = "sending QUIC frame" },
+/* special event dedicated to qcs_xfer_data */
+#define QMUX_EV_QCS_XFER_DATA (1ULL << 16)
+ { .mask = QMUX_EV_QCS_XFER_DATA, .name = "qcs_xfer_data", .desc = "qcs_xfer_data" },
+/* special event dedicated to qcs_build_stream_frm */
+#define QMUX_EV_QCS_BUILD_STRM (1ULL << 17)
+ { .mask = QMUX_EV_QCS_BUILD_STRM, .name = "qcs_build_stream_frm", .desc = "qcs_build_stream_frm" },
+#define QMUX_EV_PROTO_ERR (1ULL << 18)
+ { .mask = QMUX_EV_PROTO_ERR, .name = "proto_err", .desc = "protocol error" },
+#define QMUX_EV_QCC_ERR (1ULL << 19)
+ { .mask = QMUX_EV_QCC_ERR, .name = "qcc_err", .desc = "connection on error" },
+ { }
+};
+
+/* custom arg for QMUX_EV_QCS_XFER_DATA */
+struct qcs_xfer_data_trace_arg {
+ size_t prep;
+ int xfer;
+};
+
+/* custom arg for QMUX_EV_QCS_BUILD_STRM */
+struct qcs_build_stream_trace_arg {
+ size_t len;
+ char fin;
+ uint64_t offset;
+};
+
+#endif /* USE_QUIC */
+
+#endif /* _HAPROXY_QMUX_TRACE_H */
diff --git a/include/haproxy/qpack-dec.h b/include/haproxy/qpack-dec.h
new file mode 100644
index 0000000..993f450
--- /dev/null
+++ b/include/haproxy/qpack-dec.h
@@ -0,0 +1,51 @@
+/*
+ * QPACK decompressor
+ *
+ * Copyright 2021 HAProxy Technologies, Frederic Lecaille <flecaille@haproxy.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_QPACK_DEC_H
+#define _HAPROXY_QPACK_DEC_H
+
+struct buffer;
+struct http_hdr;
+
+/* Internal QPACK processing errors.
+ *Nothing to see with the RFC.
+ */
+enum {
+ QPACK_ERR_NONE = 0, /* no error */
+ QPACK_ERR_RIC, /* cannot decode Required Insert Count prefix field */
+ QPACK_ERR_DB, /* cannot decode Delta Base prefix field */
+ QPACK_ERR_TRUNCATED, /* truncated stream */
+ QPACK_ERR_HUFFMAN, /* huffman decoding error */
+ QPACK_ERR_TOO_LARGE, /* decoded request/response is too large */
+};
+
+struct qpack_dec {
+ /* Insert count */
+ uint64_t ic;
+ /* Known received count */
+ uint64_t krc;
+};
+
+int qpack_decode_fs(const unsigned char *buf, uint64_t len, struct buffer *tmp,
+ struct http_hdr *list, int list_size);
+int qpack_decode_enc(struct buffer *buf, int fin, void *ctx);
+int qpack_decode_dec(struct buffer *buf, int fin, void *ctx);
+
+#endif /* _HAPROXY_QPACK_DEC_H */
diff --git a/include/haproxy/qpack-enc.h b/include/haproxy/qpack-enc.h
new file mode 100644
index 0000000..0126937
--- /dev/null
+++ b/include/haproxy/qpack-enc.h
@@ -0,0 +1,12 @@
+#ifndef QPACK_ENC_H_
+#define QPACK_ENC_H_
+
+#include <haproxy/istbuf.h>
+
+struct buffer;
+
+int qpack_encode_field_section_line(struct buffer *out);
+int qpack_encode_int_status(struct buffer *out, unsigned int status);
+int qpack_encode_header(struct buffer *out, const struct ist n, const struct ist v);
+
+#endif /* QPACK_ENC_H_ */
diff --git a/include/haproxy/qpack-t.h b/include/haproxy/qpack-t.h
new file mode 100644
index 0000000..0e1736a
--- /dev/null
+++ b/include/haproxy/qpack-t.h
@@ -0,0 +1,47 @@
+/*
+ * include/haproxy/qpack-t.h
+ * This file contains types for QPACK
+ *
+ * Copyright 2021 HAProxy Technologies, Frederic Lecaille <flecaille@haproxy.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_QPACK_T_H
+#define _HAPROXY_QPACK_T_H
+#ifdef USE_QUIC
+#ifndef USE_OPENSSL
+#error "Must define USE_OPENSSL"
+#endif
+
+/* Encoder */
+/* Instruction bitmask */
+#define QPACK_ENC_INST_BITMASK 0xf0
+/* Instructions */
+#define QPACK_ENC_INST_DUP 0x00 // Duplicate
+#define QPACK_ENC_INST_SDTC_BIT 0x20 // Set Dynamic Table Capacity
+#define QPACK_ENC_INST_IWLN_BIT 0x40 // Insert With Literal Name
+#define QPACK_ENC_INST_IWNR_BIT 0x80 // Insert With Name Reference
+
+/* Decoder */
+/* Instructions bitmask */
+#define QPACK_DEC_INST_BITMASK 0xf0
+/* Instructions */
+#define QPACK_DEC_INST_ICINC 0x00 // Insert Count Increment
+#define QPACK_DEC_INST_SCCL 0x40 // Stream Cancellation
+#define QPACK_DEC_INST_SACK 0x80 // Section Acknowledgment
+
+#endif /* USE_QUIC */
+#endif /* _HAPROXY_QPACK_T_H */
diff --git a/include/haproxy/qpack-tbl-t.h b/include/haproxy/qpack-tbl-t.h
new file mode 100644
index 0000000..c27c623
--- /dev/null
+++ b/include/haproxy/qpack-tbl-t.h
@@ -0,0 +1,65 @@
+/*
+ * QPACK header table management (draft-ietf-quic-qpack-20) - type definitions
+ *
+ * Copyright 2020 HAProxy Technologies, Frederic Lecaille <flecaille@haproxy.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _HAPROXY_QPACK_TBL_T_H
+#define _HAPROXY_QPACK_TBL_T_H
+
+/*
+ * Gcc before 3.0 needs [0] to declare a variable-size array
+ */
+#ifndef VAR_ARRAY
+#if defined(__GNUC__) && (__GNUC__ < 3)
+#define VAR_ARRAY 0
+#else
+#define VAR_ARRAY
+#endif
+#endif
+
+/* One dynamic table entry descriptor */
+struct qpack_dte {
+ uint32_t addr; /* storage address, relative to the dte address */
+ uint16_t nlen; /* header name length */
+ uint16_t vlen; /* header value length */
+};
+
+/* Note: the table's head plus a struct qpack_dte must be smaller than or equal to 32
+ * bytes so that a single large header can always fit. Here that's 16 bytes for
+ * the header, plus 8 bytes per slot.
+ * Note that when <used> == 0, front, head, and wrap are undefined.
+ */
+struct qpack_dht {
+ uint32_t size; /* allocated table size in bytes */
+ uint32_t total; /* sum of nlen + vlen in bytes */
+ uint16_t front; /* slot number of the first node after the idx table */
+ uint16_t wrap; /* number of allocated slots, wraps here */
+ uint16_t head; /* last inserted slot number */
+ uint16_t used; /* number of slots in use */
+ struct qpack_dte dte[VAR_ARRAY]; /* dynamic table entries */
+};
+
+/* static header table as in draft-ietf-quic-qpack-20 Appendix A. [0] unused. */
+#define QPACK_SHT_SIZE 99
+
+#endif /* _HAPROXY_QPACK_TBL_T_H */
diff --git a/include/haproxy/qpack-tbl.h b/include/haproxy/qpack-tbl.h
new file mode 100644
index 0000000..05f3ab4
--- /dev/null
+++ b/include/haproxy/qpack-tbl.h
@@ -0,0 +1,170 @@
+/*
+ * QPACK header table management - prototypes
+ *
+ * Copyright 2021 HAProxy Technologies, Frederic Lecaille <flecaille@haproxy.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _HAPROXY_QPACK_TBL_H
+#define _HAPROXY_QPACK_TBL_H
+
+#include <import/ist.h>
+#include <haproxy/api.h>
+#include <haproxy/qpack-tbl-t.h>
+#include <haproxy/http-hdr-t.h>
+
+/* when built outside of haproxy, QPACK_STANDALONE must be defined, and
+ * pool_head_qpack_tbl->size must be set to the DHT size.
+ */
+#ifndef QPACK_STANDALONE
+#include <haproxy/pool.h>
+#define qpack_alloc(pool) pool_alloc(pool)
+#define qpack_free(pool, ptr) pool_free(pool, ptr)
+#else
+#include <stdlib.h>
+#include <haproxy/pool-t.h>
+#define qpack_alloc(pool) malloc(pool->size)
+#define qpack_free(pool, ptr) free(ptr)
+#endif
+
+extern const struct http_hdr qpack_sht[QPACK_SHT_SIZE];
+extern struct pool_head *pool_head_qpack_tbl;
+
+int __qpack_dht_make_room(struct qpack_dht *dht, unsigned int needed);
+int qpack_dht_insert(struct qpack_dht *dht, struct ist name, struct ist value);
+
+#ifdef DEBUG_QPACK
+void qpack_dht_dump(FILE *out, const struct qpack_dht *dht);
+void qpack_dht_check_consistency(const struct qpack_dht *dht);
+#endif
+
+/* return a pointer to the entry designated by index <idx> (starting at 0) or
+ * NULL if this index is not there.
+ */
+static inline const struct qpack_dte *qpack_get_dte(const struct qpack_dht *dht, uint16_t idx)
+{
+ if (idx >= dht->used)
+ return NULL;
+
+ return &dht->dte[idx];
+}
+
+/* returns non-zero if <idx> is valid for table <dht> */
+static inline int qpack_valid_idx(const struct qpack_dht *dht, uint32_t idx)
+{
+ return idx < dht->used;
+}
+
+/* return a pointer to the header name for entry <dte>. */
+static inline struct ist qpack_get_name(const struct qpack_dht *dht, const struct qpack_dte *dte)
+{
+ struct ist ret = {
+ .ptr = (void *)dht + dte->addr,
+ .len = dte->nlen,
+ };
+ return ret;
+}
+
+/* return a pointer to the header value for entry <dte>. */
+static inline struct ist qpack_get_value(const struct qpack_dht *dht, const struct qpack_dte *dte)
+{
+ struct ist ret = {
+ .ptr = (void *)dht + dte->addr + dte->nlen,
+ .len = dte->vlen,
+ };
+ return ret;
+}
+
+/* takes an idx, returns the associated name */
+static inline struct ist qpack_idx_to_name(const struct qpack_dht *dht, uint32_t idx)
+{
+ const struct qpack_dte *dte;
+
+ dte = qpack_get_dte(dht, idx);
+ if (!dte)
+ return ist("### ERR ###"); // error
+
+ return qpack_get_name(dht, dte);
+}
+
+/* takes an idx, returns the associated value */
+static inline struct ist qpack_idx_to_value(const struct qpack_dht *dht, uint32_t idx)
+{
+ const struct qpack_dte *dte;
+
+ dte = qpack_get_dte(dht, idx);
+ if (!dte)
+ return ist("### ERR ###"); // error
+
+ return qpack_get_value(dht, dte);
+}
+
+/* returns the slot number of the oldest entry (tail). Must not be used on an
+ * empty table.
+ */
+static inline unsigned int qpack_dht_get_tail(const struct qpack_dht *dht)
+{
+ return ((dht->head + 1U < dht->used) ? dht->wrap : 0) + dht->head + 1U - dht->used;
+}
+
+/* Purges table dht until a header field of <needed> bytes fits according to
+ * the protocol (adding 32 bytes overhead). Returns non-zero on success, zero
+ * on failure (ie: table empty but still not sufficient).
+ */
+static inline int qpack_dht_make_room(struct qpack_dht *dht, unsigned int needed)
+{
+ if (dht->used * 32 + dht->total + needed + 32 <= dht->size)
+ return 1;
+ else if (!dht->used)
+ return 0;
+
+ return __qpack_dht_make_room(dht, needed);
+}
+
+/* allocate a dynamic headers table of <size> bytes and return it initialized */
+static inline void qpack_dht_init(struct qpack_dht *dht, uint32_t size)
+{
+ dht->size = size;
+ dht->total = 0;
+ dht->used = 0;
+}
+
+/* allocate a dynamic headers table from the pool and return it initialized */
+static inline struct qpack_dht *qpack_dht_alloc()
+{
+ struct qpack_dht *dht;
+
+ if (unlikely(!pool_head_qpack_tbl))
+ return NULL;
+
+ dht = qpack_alloc(pool_head_qpack_tbl);
+ if (dht)
+ qpack_dht_init(dht, pool_head_qpack_tbl->size);
+ return dht;
+}
+
+/* free a dynamic headers table */
+static inline void qpack_dht_free(struct qpack_dht *dht)
+{
+ qpack_free(pool_head_qpack_tbl, dht);
+}
+
+#endif /* _HAPROXY_QPACK_TBL_H */
diff --git a/include/haproxy/queue-t.h b/include/haproxy/queue-t.h
new file mode 100644
index 0000000..8f6a1ec
--- /dev/null
+++ b/include/haproxy/queue-t.h
@@ -0,0 +1,59 @@
+/*
+ * include/haproxy/queue-t.h
+ * This file defines variables and structures needed for queues.
+ *
+ * Copyright (C) 2000-2020 Willy Tarreau - w@1wt.eu
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_QUEUE_T_H
+#define _HAPROXY_QUEUE_T_H
+
+#include <import/ebtree-t.h>
+#include <haproxy/api-t.h>
+
+struct proxy;
+struct server;
+struct stream;
+struct queue;
+
+struct pendconn {
+ int strm_flags; /* stream flags */
+ unsigned int queue_idx; /* value of proxy/server queue_idx at time of enqueue */
+ struct stream *strm;
+ struct queue *queue; /* the queue the entry is queued into */
+ struct server *target; /* the server that was assigned, = srv except if srv==NULL */
+ struct eb32_node node;
+ __decl_thread(HA_SPINLOCK_T del_lock); /* use before removal, always under queue's lock */
+};
+
+struct queue {
+ struct eb_root head; /* queued pendconnds */
+ struct proxy *px; /* the proxy we're waiting for, never NULL in queue */
+ struct server *sv; /* the server we are waiting for, may be NULL if don't care */
+ __decl_thread(HA_SPINLOCK_T lock); /* for manipulations in the tree */
+ unsigned int idx; /* current queuing index */
+ unsigned int length; /* number of entries */
+};
+
+#endif /* _HAPROXY_QUEUE_T_H */
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/include/haproxy/queue.h b/include/haproxy/queue.h
new file mode 100644
index 0000000..e77370c
--- /dev/null
+++ b/include/haproxy/queue.h
@@ -0,0 +1,134 @@
+/*
+ * include/haproxy/queue.h
+ * This file defines everything related to queues.
+ *
+ * Copyright (C) 2000-2020 Willy Tarreau - w@1wt.eu
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_QUEUE_H
+#define _HAPROXY_QUEUE_H
+
+#include <haproxy/api.h>
+#include <haproxy/backend.h>
+#include <haproxy/pool.h>
+#include <haproxy/proxy-t.h>
+#include <haproxy/queue-t.h>
+#include <haproxy/server-t.h>
+#include <haproxy/stream-t.h>
+
+extern struct pool_head *pool_head_pendconn;
+
+struct pendconn *pendconn_add(struct stream *strm);
+int pendconn_dequeue(struct stream *strm);
+void process_srv_queue(struct server *s);
+unsigned int srv_dynamic_maxconn(const struct server *s);
+int pendconn_redistribute(struct server *s);
+int pendconn_grab_from_px(struct server *s);
+void pendconn_unlink(struct pendconn *p);
+
+/* Removes the pendconn from the server/proxy queue. It supports being called
+ * with NULL for pendconn and with a pendconn not in the list. It is the
+ * function to be used by default when unsure. Do not call it with server
+ * or proxy locks held however. Warning: this is called from stream_free()
+ * which may run concurrently with pendconn_process_next_strm() which can be
+ * dequeuing the entry. The function must not return until the pendconn is
+ * guaranteed not to be known, which means that we must check its presence
+ * in the tree under the queue's lock so that penconn_process_next_strm()
+ * finishes before we return in case it would have grabbed this pendconn. See
+ * github bugs #880 and #908, and the commit log for this fix for more details.
+ */
+static inline void pendconn_cond_unlink(struct pendconn *p)
+{
+ if (p)
+ pendconn_unlink(p);
+}
+
+/* Releases the pendconn associated to stream <s> if it has any, and decreases
+ * the pending count if needed. The connection might have been queued to a
+ * specific server as well as to the proxy. The stream also gets marked
+ * unqueued.
+ *
+ * This function must be called by the stream itself, so in the context of
+ * process_stream, without any lock held among the pendconn, the server's queue
+ * nor the proxy's queue.
+ */
+static inline void pendconn_free(struct stream *s)
+{
+ struct pendconn *p = s->pend_pos;
+
+ if (p) {
+ pendconn_cond_unlink(p);
+ s->pend_pos = NULL;
+ pool_free(pool_head_pendconn, p);
+ }
+}
+
+/* Returns 0 if all slots are full on a server, or 1 if there are slots available. */
+static inline int server_has_room(const struct server *s) {
+ return !s->maxconn || s->cur_sess < srv_dynamic_maxconn(s);
+}
+
+/* returns 0 if nothing has to be done for server <s> regarding queued connections,
+ * and non-zero otherwise. If the server is down, we only check its own queue. Suited
+ * for and if/else usage.
+ */
+static inline int may_dequeue_tasks(const struct server *s, const struct proxy *p) {
+ return (s && (s->queue.length || (p->queue.length && srv_currently_usable(s))) &&
+ (!s->maxconn || s->cur_sess < srv_dynamic_maxconn(s)));
+}
+
+static inline int queue_limit_class(int class)
+{
+ if (class < -0x7ff)
+ return -0x7ff;
+ if (class > 0x7ff)
+ return 0x7ff;
+ return class;
+}
+
+static inline int queue_limit_offset(int offset)
+{
+ if (offset < -0x7ffff)
+ return -0x7ffff;
+ if (offset > 0x7ffff)
+ return 0x7ffff;
+ return offset;
+}
+
+/* initialize the queue <queue> for proxy <px> and server <sv>. A server's
+ * always has both a valid proxy and a valid server. A proxy's queue only
+ * has a valid proxy and NULL for the server queue. This is how they're
+ * distinguished during operations.
+ */
+static inline void queue_init(struct queue *queue, struct proxy *px, struct server *sv)
+{
+ queue->head = EB_ROOT;
+ queue->length = 0;
+ queue->idx = 0;
+ queue->px = px;
+ queue->sv = sv;
+ HA_SPIN_INIT(&queue->lock);
+}
+
+#endif /* _HAPROXY_QUEUE_H */
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/include/haproxy/quic_ack-t.h b/include/haproxy/quic_ack-t.h
new file mode 100644
index 0000000..95b77f1
--- /dev/null
+++ b/include/haproxy/quic_ack-t.h
@@ -0,0 +1,43 @@
+/*
+ * include/haproxy/quic_ack-t.h
+ * Definitions for QUIC acknowledgements internal types, constants and flags.
+ *
+ * Copyright (C) 2023
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+#ifndef _HAPROXY_QUIC_ACK_T_H
+#define _HAPROXY_QUIC_ACK_T_H
+
+/* The maximum number of ack ranges to be built in ACK frames */
+#define QUIC_MAX_ACK_RANGES 32
+
+/* Structure to maintain a set of ACK ranges to be used to build ACK frames. */
+struct quic_arngs {
+ /* ebtree of ACK ranges organized by their first value. */
+ struct eb_root root;
+ /* The number of ACK ranges is this tree */
+ size_t sz;
+ /* The number of bytes required to encode this ACK ranges lists. */
+ size_t enc_sz;
+};
+
+/* Structure to hold a range of ACKs sent in ACK frames. */
+struct quic_arng {
+ int64_t first;
+ int64_t last;
+};
+
+/* Structure to hold a range of ACKs to be store as a node in a tree of
+ * ACK ranges.
+ */
+struct quic_arng_node {
+ struct eb64_node first;
+ uint64_t last;
+};
+
+#endif /* _HAPROXY_QUIC_ACK_T_H */
diff --git a/include/haproxy/quic_ack.h b/include/haproxy/quic_ack.h
new file mode 100644
index 0000000..540e2c0
--- /dev/null
+++ b/include/haproxy/quic_ack.h
@@ -0,0 +1,23 @@
+/*
+ * include/proto/quic_ack.h
+ * This file provides definitions for QUIC acknowledgements.
+ *
+ * Copyright (C) 2023
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _HAPROXY_QUIC_ACK_H
+#define _HAPROXY_QUIC_ACK_H
+
+void quic_free_arngs(struct quic_conn *qc, struct quic_arngs *arngs);
+int quic_update_ack_ranges_list(struct quic_conn *qc,
+ struct quic_arngs *arngs,
+ struct quic_arng *ar);
+void qc_treat_ack_of_ack(struct quic_conn *qc, struct quic_arngs *arngs,
+ int64_t largest_acked_pn);
+
+#endif /* _HAPROXY_QUIC_ACK_H */
diff --git a/include/haproxy/quic_cc-t.h b/include/haproxy/quic_cc-t.h
new file mode 100644
index 0000000..888efca
--- /dev/null
+++ b/include/haproxy/quic_cc-t.h
@@ -0,0 +1,123 @@
+/*
+ * include/haproxy/quic_cc-t.h
+ * This file contains definitions for QUIC congestion control.
+ *
+ * Copyright 2020 HAProxy Technologies, Frederic Lecaille <flecaille@haproxy.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_QUIC_CC_H
+#define _HAPROXY_QUIC_CC_H
+#ifdef USE_QUIC
+#ifndef USE_OPENSSL
+#error "Must define USE_OPENSSL"
+#endif
+
+#include <inttypes.h>
+#include <stddef.h> /* size_t */
+
+#include <haproxy/buf-t.h>
+#include <haproxy/quic_loss-t.h>
+
+#define QUIC_CC_INFINITE_SSTHESH ((uint32_t)-1)
+
+extern struct quic_cc_algo quic_cc_algo_nr;
+extern struct quic_cc_algo quic_cc_algo_cubic;
+extern struct quic_cc_algo *default_quic_cc_algo;
+
+/* Fake algorithm with its fixed window */
+extern struct quic_cc_algo quic_cc_algo_nocc;
+
+extern unsigned long long last_ts;
+
+enum quic_cc_algo_state_type {
+ /* Slow start. */
+ QUIC_CC_ST_SS,
+ /* Congestion avoidance. */
+ QUIC_CC_ST_CA,
+ /* Recovery period. */
+ QUIC_CC_ST_RP,
+};
+
+enum quic_cc_event_type {
+ /* ACK receipt. */
+ QUIC_CC_EVT_ACK,
+ /* Packet loss. */
+ QUIC_CC_EVT_LOSS,
+ /* ECN-CE. */
+ QUIC_CC_EVT_ECN_CE,
+};
+
+struct quic_cc_event {
+ enum quic_cc_event_type type;
+ union {
+ struct ack {
+ uint64_t acked;
+ unsigned int time_sent;
+ } ack;
+ struct loss {
+ unsigned int time_sent;
+ } loss;
+ };
+};
+
+enum quic_cc_algo_type {
+ QUIC_CC_ALGO_TP_NEWRENO,
+ QUIC_CC_ALGO_TP_CUBIC,
+ QUIC_CC_ALGO_TP_NOCC,
+};
+
+struct quic_cc {
+ /* <conn> is there only for debugging purpose. */
+ struct quic_conn *qc;
+ struct quic_cc_algo *algo;
+ uint32_t priv[16];
+};
+
+struct quic_cc_path {
+ /* Control congestion. */
+ struct quic_cc cc;
+ /* Packet loss detection information. */
+ struct quic_loss loss;
+
+ /* MTU. */
+ size_t mtu;
+ /* Congestion window. */
+ uint64_t cwnd;
+ /* The current maximum congestion window value reached. */
+ uint64_t mcwnd;
+ /* The maximum congestion window value which can be reached. */
+ uint64_t max_cwnd;
+ /* Minimum congestion window. */
+ uint64_t min_cwnd;
+ /* Prepared data to be sent (in bytes). */
+ uint64_t prep_in_flight;
+ /* Outstanding data (in bytes). */
+ uint64_t in_flight;
+ /* Number of in flight ack-eliciting packets. */
+ uint64_t ifae_pkts;
+};
+
+struct quic_cc_algo {
+ enum quic_cc_algo_type type;
+ int (*init)(struct quic_cc *cc);
+ void (*event)(struct quic_cc *cc, struct quic_cc_event *ev);
+ void (*slow_start)(struct quic_cc *cc);
+ void (*state_trace)(struct buffer *buf, const struct quic_cc *cc);
+};
+
+#endif /* USE_QUIC */
+#endif /* _HAPROXY_QUIC_CC_H */
diff --git a/include/haproxy/quic_cc.h b/include/haproxy/quic_cc.h
new file mode 100644
index 0000000..721feca
--- /dev/null
+++ b/include/haproxy/quic_cc.h
@@ -0,0 +1,112 @@
+/*
+ * include/proto/quic_cc.h
+ * This file contains prototypes for QUIC congestion control.
+ *
+ * Copyright 2019 HAProxy Technologies, Frederic Lecaille <flecaille@haproxy.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _PROTO_QUIC_CC_H
+#define _PROTO_QUIC_CC_H
+#ifdef USE_QUIC
+#ifndef USE_OPENSSL
+#error "Must define USE_OPENSSL"
+#endif
+
+#include <haproxy/api.h>
+#include <haproxy/buf.h>
+#include <haproxy/chunk.h>
+#include <haproxy/quic_cc-t.h>
+#include <haproxy/quic_conn-t.h>
+#include <haproxy/quic_loss.h>
+
+void quic_cc_init(struct quic_cc *cc, struct quic_cc_algo *algo, struct quic_conn *qc);
+void quic_cc_event(struct quic_cc *cc, struct quic_cc_event *ev);
+void quic_cc_state_trace(struct buffer *buf, const struct quic_cc *cc);
+
+static inline const char *quic_cc_state_str(enum quic_cc_algo_state_type state)
+{
+ switch (state) {
+ case QUIC_CC_ST_SS:
+ return "ss";
+ case QUIC_CC_ST_CA:
+ return "ca";
+ case QUIC_CC_ST_RP:
+ return "rp";
+ default:
+ return "unknown";
+ }
+}
+
+/* Return a human readable string from <ev> control congestion event type. */
+static inline void quic_cc_event_trace(struct buffer *buf, const struct quic_cc_event *ev)
+{
+ chunk_appendf(buf, " event=");
+ switch (ev->type) {
+ case QUIC_CC_EVT_ACK:
+ chunk_appendf(buf, "ack acked=%llu time_sent:%dms",
+ (unsigned long long)ev->ack.acked, TICKS_TO_MS(tick_remain(ev->ack.time_sent, now_ms)));
+ break;
+ case QUIC_CC_EVT_LOSS:
+ chunk_appendf(buf, "loss time_sent=%dms", TICKS_TO_MS(tick_remain(ev->loss.time_sent, now_ms)));
+ break;
+ case QUIC_CC_EVT_ECN_CE:
+ chunk_appendf(buf, "ecn_ce");
+ break;
+ }
+}
+
+static inline void *quic_cc_priv(const struct quic_cc *cc)
+{
+ return (void *)cc->priv;
+}
+
+/* Initialize <p> QUIC network path depending on <ipv4> boolean
+ * which is true for an IPv4 path, if not false for an IPv6 path.
+ */
+static inline void quic_cc_path_init(struct quic_cc_path *path, int ipv4, unsigned long max_cwnd,
+ struct quic_cc_algo *algo, struct quic_conn *qc)
+{
+ unsigned int max_dgram_sz;
+
+ max_dgram_sz = ipv4 ? QUIC_INITIAL_IPV4_MTU : QUIC_INITIAL_IPV6_MTU;
+ quic_loss_init(&path->loss);
+ path->mtu = max_dgram_sz;
+ path->cwnd = QUIC_MIN(10 * max_dgram_sz, QUIC_MAX(max_dgram_sz << 1, 14720U));
+ path->mcwnd = path->cwnd;
+ path->max_cwnd = max_cwnd;
+ path->min_cwnd = max_dgram_sz << 1;
+ path->prep_in_flight = 0;
+ path->in_flight = 0;
+ path->ifae_pkts = 0;
+ quic_cc_init(&path->cc, algo, qc);
+}
+
+/* Return the remaining <room> available on <path> QUIC path for prepared data
+ * (before being sent). Almost the same that for the QUIC path room, except that
+ * here this is the data which have been prepared which are taken into an account.
+ */
+static inline size_t quic_cc_path_prep_data(struct quic_cc_path *path)
+{
+ if (path->prep_in_flight > path->cwnd)
+ return 0;
+
+ return path->cwnd - path->prep_in_flight;
+}
+
+
+#endif /* USE_QUIC */
+#endif /* _PROTO_QUIC_CC_H */
diff --git a/include/haproxy/quic_cid-t.h b/include/haproxy/quic_cid-t.h
new file mode 100644
index 0000000..ccce844
--- /dev/null
+++ b/include/haproxy/quic_cid-t.h
@@ -0,0 +1,38 @@
+#ifndef _HAPROXY_QUIC_CID_T_H
+#define _HAPROXY_QUIC_CID_T_H
+
+#include <import/ebtree-t.h>
+#include <haproxy/quic_tp-t.h>
+
+/* QUIC connection ID maximum length for version 1. */
+#define QUIC_CID_MAXLEN 20 /* bytes */
+
+/* QUIC connection id data.
+ *
+ * This struct is used by ebmb_node structs as last member of flexible arrays.
+ * So do not change the order of the member of quic_cid struct.
+ * <data> member must be the first one.
+ */
+struct quic_cid {
+ unsigned char data[QUIC_CID_MAXLEN];
+ unsigned char len; /* size of QUIC CID */
+};
+
+/* QUIC connection id attached to a QUIC connection.
+ *
+ * This structure is used to match received packets DCIDs with the
+ * corresponding QUIC connection.
+ */
+struct quic_connection_id {
+ struct eb64_node seq_num;
+ uint64_t retire_prior_to;
+ unsigned char stateless_reset_token[QUIC_STATELESS_RESET_TOKEN_LEN];
+
+ struct ebmb_node node; /* node for receiver tree, cid.data as key */
+ struct quic_cid cid; /* CID data */
+
+ struct quic_conn *qc; /* QUIC connection using this CID */
+ uint tid; /* Attached Thread ID for the connection. */
+};
+
+#endif /* _HAPROXY_QUIC_CID_T_H */
diff --git a/include/haproxy/quic_cid.h b/include/haproxy/quic_cid.h
new file mode 100644
index 0000000..482a020
--- /dev/null
+++ b/include/haproxy/quic_cid.h
@@ -0,0 +1,110 @@
+#ifndef _HAPROXY_QUIC_CID_H
+#define _HAPROXY_QUIC_CID_H
+
+#ifdef USE_QUIC
+#ifndef USE_OPENSSL
+#error "Must define USE_OPENSSL"
+#endif
+
+#include <import/ebmbtree.h>
+
+#include <haproxy/buf-t.h>
+#include <haproxy/chunk.h>
+#include <haproxy/quic_conn-t.h>
+#include <haproxy/quic_rx-t.h>
+#include <haproxy/proto_quic.h>
+
+struct quic_connection_id *new_quic_cid(struct eb_root *root,
+ struct quic_conn *qc,
+ const struct quic_cid *orig,
+ const struct sockaddr_storage *addr);
+int quic_get_cid_tid(const unsigned char *cid, size_t cid_len,
+ const struct sockaddr_storage *cli_addr,
+ unsigned char *pos, size_t len);
+struct quic_cid quic_derive_cid(const struct quic_cid *orig,
+ const struct sockaddr_storage *addr);
+struct quic_conn *retrieve_qc_conn_from_cid(struct quic_rx_packet *pkt,
+ struct sockaddr_storage *saddr,
+ int *new_tid);
+int qc_build_new_connection_id_frm(struct quic_conn *qc,
+ struct quic_connection_id *conn_id);
+
+/* Copy <src> QUIC CID to <dst>.
+ * This is the responsibility of the caller to check there is enough room in
+ * <dst> to copy <src>.
+ * Always succeeds.
+ */
+static inline void quic_cid_cpy(struct quic_cid *dst, const struct quic_cid *src)
+{
+ memcpy(dst->data, src->data, src->len);
+ dst->len = src->len;
+}
+
+/* Dump the QUIC connection ID value if present (non null length). Used only for
+ * debugging purposes.
+ * Always succeeds.
+ */
+static inline void quic_cid_dump(struct buffer *buf,
+ const struct quic_cid *cid)
+{
+ int i;
+
+ chunk_appendf(buf, "(%d", cid->len);
+ if (cid->len)
+ chunk_appendf(buf, ",");
+ for (i = 0; i < cid->len; i++)
+ chunk_appendf(buf, "%02x", cid->data[i]);
+ chunk_appendf(buf, ")");
+}
+
+/* Return tree index where <cid> is stored. */
+static inline uchar _quic_cid_tree_idx(const unsigned char *cid)
+{
+ return cid[0];
+}
+
+/* Return tree index where <cid> is stored. */
+static inline uchar quic_cid_tree_idx(const struct quic_cid *cid)
+{
+ return _quic_cid_tree_idx(cid->data);
+}
+
+/* Insert <conn_id> into global CID tree as a thread-safe operation. */
+static inline void quic_cid_insert(struct quic_connection_id *conn_id)
+{
+ const uchar idx = quic_cid_tree_idx(&conn_id->cid);
+ struct quic_cid_tree *tree = &quic_cid_trees[idx];
+
+ HA_RWLOCK_WRLOCK(QC_CID_LOCK, &tree->lock);
+ ebmb_insert(&tree->root, &conn_id->node, conn_id->cid.len);
+ HA_RWLOCK_WRUNLOCK(QC_CID_LOCK, &tree->lock);
+}
+
+/* Remove <conn_id> from global CID tree as a thread-safe operation. */
+static inline void quic_cid_delete(struct quic_connection_id *conn_id)
+{
+ const uchar idx = quic_cid_tree_idx(&conn_id->cid);
+ struct quic_cid_tree __maybe_unused *tree = &quic_cid_trees[idx];
+
+ HA_RWLOCK_WRLOCK(QC_CID_LOCK, &tree->lock);
+ ebmb_delete(&conn_id->node);
+ HA_RWLOCK_WRUNLOCK(QC_CID_LOCK, &tree->lock);
+}
+
+/* Copy <src> new connection ID information to <dst> NEW_CONNECTION_ID frame.
+ * Always succeeds.
+ */
+static inline void quic_connection_id_to_frm_cpy(struct quic_frame *dst,
+ struct quic_connection_id *src)
+{
+ struct qf_new_connection_id *ncid_frm = &dst->new_connection_id;
+
+ ncid_frm->seq_num = src->seq_num.key;
+ ncid_frm->retire_prior_to = src->retire_prior_to;
+ ncid_frm->cid.len = src->cid.len;
+ ncid_frm->cid.data = src->cid.data;
+ ncid_frm->stateless_reset_token = src->stateless_reset_token;
+}
+
+#endif /* USE_QUIC */
+#endif /* _HAPROXY_QUIC_CID_H */
diff --git a/include/haproxy/quic_cli-t.h b/include/haproxy/quic_cli-t.h
new file mode 100644
index 0000000..6f95899
--- /dev/null
+++ b/include/haproxy/quic_cli-t.h
@@ -0,0 +1,18 @@
+/*
+ * include/haproxy/quic_trace-t.h
+ * Definitions for QUIC CLI internal types, constants and flags.
+ *
+ * Copyright (C) 2023
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+#ifndef _HAPROXY_QUIC_CLI_T_H
+#define _HAPROXY_QUIC_CLI_T_H
+
+extern unsigned int qc_epoch;
+
+#endif /* _HAPROXY_QUIC_CLI_T_H */
diff --git a/include/haproxy/quic_conn-t.h b/include/haproxy/quic_conn-t.h
new file mode 100644
index 0000000..8aec6f0
--- /dev/null
+++ b/include/haproxy/quic_conn-t.h
@@ -0,0 +1,446 @@
+/*
+ * include/haproxy/quic_conn-t.h
+ *
+ * Copyright 2019 HAProxy Technologies, Frederic Lecaille <flecaille@haproxy.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_QUIC_CONN_T_H
+#define _HAPROXY_QUIC_CONN_T_H
+
+#ifdef USE_QUIC
+#ifndef USE_OPENSSL
+#error "Must define USE_OPENSSL"
+#endif
+
+#include <sys/socket.h>
+
+#include <haproxy/cbuf-t.h>
+#include <haproxy/list.h>
+
+#include <haproxy/openssl-compat.h>
+#include <haproxy/mux_quic-t.h>
+#include <haproxy/quic_cid-t.h>
+#include <haproxy/quic_cc-t.h>
+#include <haproxy/quic_loss-t.h>
+#include <haproxy/quic_openssl_compat-t.h>
+#include <haproxy/quic_stats-t.h>
+#include <haproxy/quic_tls-t.h>
+#include <haproxy/quic_tp-t.h>
+#include <haproxy/task.h>
+
+#include <import/ebtree-t.h>
+
+typedef unsigned long long ull;
+
+#define QUIC_PROTOCOL_VERSION_DRAFT_29 0xff00001d /* draft-29 */
+#define QUIC_PROTOCOL_VERSION_1 0x00000001 /* V1 */
+#define QUIC_PROTOCOL_VERSION_2 0x6b3343cf /* V2 */
+
+#define QUIC_INITIAL_IPV4_MTU 1252 /* (bytes) */
+#define QUIC_INITIAL_IPV6_MTU 1232
+
+/* The minimum length of Initial packets. */
+#define QUIC_INITIAL_PACKET_MINLEN 1200
+
+/* Lengths of the QUIC CIDs generated by the haproxy implementation. Current
+ * value is used to match 64 bits hash produced when deriving ODCID.
+ */
+#define QUIC_HAP_CID_LEN 8
+
+/* Common definitions for short and long QUIC packet headers. */
+/* QUIC original destination connection ID minial length */
+#define QUIC_ODCID_MINLEN 8 /* bytes */
+/*
+ * All QUIC packets with long headers are made of at least (in bytes):
+ * flags(1), version(4), DCID length(1), DCID(0..20), SCID length(1), SCID(0..20)
+ */
+#define QUIC_LONG_PACKET_MINLEN 7
+/* DCID offset from beginning of a long packet */
+#define QUIC_LONG_PACKET_DCID_OFF (1 + sizeof(uint32_t))
+/*
+ * All QUIC packets with short headers are made of at least (in bytes):
+ * flags(1), DCID(0..20)
+ */
+#define QUIC_SHORT_PACKET_MINLEN 1
+/* DCID offset from beginning of a short packet */
+#define QUIC_SHORT_PACKET_DCID_OFF 1
+
+/* Byte 0 of QUIC packets. */
+#define QUIC_PACKET_LONG_HEADER_BIT 0x80 /* Long header format if set, short if not. */
+#define QUIC_PACKET_FIXED_BIT 0x40 /* Must always be set for all the headers. */
+
+/* Tokens formats */
+/* Format for Retry tokens sent by a QUIC server */
+#define QUIC_TOKEN_FMT_RETRY 0x9c
+/* Format for token sent for new connections after a Retry token was sent */
+#define QUIC_TOKEN_FMT_NEW 0xb7
+/* Retry token duration */
+#define QUIC_RETRY_DURATION_SEC 10
+/* Default Retry threshold */
+#define QUIC_DFLT_RETRY_THRESHOLD 100 /* in connection openings */
+/* Default ratio value applied to a dynamic Packet reorder threshold. */
+#define QUIC_DFLT_REORDER_RATIO 50 /* in percent */
+/* Default limit of loss detection on a single frame. If exceeded, connection is closed. */
+#define QUIC_DFLT_MAX_FRAME_LOSS 10
+
+/*
+ * 0 1 2 3
+ * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ * +-+-+-+-+-+-+-+-+
+ * |1|1|T|T|X|X|X|X|
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Version (32) |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | DCID Len (8) |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Destination Connection ID (0..160) ...
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | SCID Len (8) |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Source Connection ID (0..160) ...
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * Long Header Packet Format
+ */
+
+/* Two bits (T) for QUIC packet types. */
+#define QUIC_PACKET_TYPE_BITMASK 0x03
+#define QUIC_PACKET_TYPE_SHIFT 4
+
+enum quic_pkt_type {
+ QUIC_PACKET_TYPE_INITIAL,
+ QUIC_PACKET_TYPE_0RTT,
+ QUIC_PACKET_TYPE_HANDSHAKE,
+ QUIC_PACKET_TYPE_RETRY,
+ /*
+ * The following one is not defined by the RFC but we define it for our
+ * own convenience.
+ */
+ QUIC_PACKET_TYPE_SHORT,
+
+ /* unknown type */
+ QUIC_PACKET_TYPE_UNKNOWN
+};
+
+/* Packet number field length. */
+#define QUIC_PACKET_PNL_BITMASK 0x03
+#define QUIC_PACKET_PN_MAXLEN 4
+
+/*
+ * 0 1 2 3
+ * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ * +-+-+-+-+-+-+-+-+
+ * |0|1|S|R|R|K|P|P|
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Destination Connection ID (0..160) ...
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Packet Number (8/16/24/32) ...
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Protected Payload (*) ...
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * Short Header Packet Format
+ */
+
+/* Bit (S) of short header. */
+#define QUIC_PACKET_SPIN_BIT 0x20
+
+/* Reserved Bits (R): The next two bits of byte 0 are reserved.
+ * These bits are protected using header protection
+ * (see Section 5.4 of [QUIC-TLS]). The value included
+ * prior to protection MUST be set to 0. An endpoint MUST treat
+ * receipt of a packet that has a non-zero value for these bits,
+ * after removing both packet and header protection, as a connection
+ * error of type PROTOCOL_VIOLATION. Discarding such a packet after
+ * only removing header protection can expose the endpoint to attacks
+ * (see Section 9.3 of [QUIC-TLS]).
+ */
+#define QUIC_PACKET_RESERVED_BITS 0x18 /* (protected) */
+
+#define QUIC_PACKET_KEY_PHASE_BIT 0x04 /* (protected) */
+
+/* The maximum number of QUIC packets stored by the fd I/O handler by QUIC
+ * connection. Must be a power of two.
+ */
+#define QUIC_CONN_MAX_PACKET 64
+
+#define QUIC_STATELESS_RESET_PACKET_HEADER_LEN 5
+#define QUIC_STATELESS_RESET_PACKET_MINLEN (22 + QUIC_HAP_CID_LEN)
+
+/* Similar to kernel min()/max() definitions. */
+#define QUIC_MIN(a, b) ({ \
+ typeof(a) _a = (a); \
+ typeof(b) _b = (b); \
+ (void) (&_a == &_b); \
+ _a < _b ? _a : _b; })
+
+#define QUIC_MAX(a, b) ({ \
+ typeof(a) _a = (a); \
+ typeof(b) _b = (b); \
+ (void) (&_a == &_b); \
+ _a > _b ? _a : _b; })
+
+/* Size of the QUIC RX buffer for the connections */
+#define QUIC_CONN_RX_BUFSZ (1UL << 16)
+
+struct quic_version {
+ uint32_t num;
+ const unsigned char *initial_salt;
+ size_t initial_salt_len;
+ const unsigned char *key_label;
+ size_t key_label_len;
+ const unsigned char *iv_label;
+ size_t iv_label_len;
+ const unsigned char *hp_label;
+ size_t hp_label_len;
+ const unsigned char *ku_label;
+ size_t ku_label_len;
+ /* Retry tag */
+ const unsigned char *retry_tag_key;
+ const unsigned char *retry_tag_nonce;
+};
+
+extern const struct quic_version quic_versions[];
+extern const size_t quic_versions_nb;
+extern const struct quic_version *preferred_version;
+
+/* unused: 0x01 */
+/* Flag the packet number space as requiring an ACK frame to be sent. */
+#define QUIC_FL_PKTNS_ACK_REQUIRED (1UL << 1)
+/* Flag the packet number space as needing probing */
+#define QUIC_FL_PKTNS_PROBE_NEEDED (1UL << 2)
+/* Flag the packet number space as having received a packet with a new largest
+ * packet number, to be acknowledege
+ */
+#define QUIC_FL_PKTNS_NEW_LARGEST_PN (1UL << 3)
+
+/* The maximum number of dgrams which may be sent upon PTO expirations. */
+#define QUIC_MAX_NB_PTO_DGRAMS 2
+
+/* The QUIC packet numbers are 62-bits integers */
+#define QUIC_MAX_PACKET_NUM ((1ULL << 62) - 1)
+
+/* The maximum number of bytes of CRYPTO data in flight during handshakes. */
+#define QUIC_CRYPTO_IN_FLIGHT_MAX 4096
+
+/* Status of the connection/mux layer. This defines how to handle app data.
+ *
+ * During a standard quic_conn lifetime it transitions like this :
+ * QC_MUX_NULL -> QC_MUX_READY -> QC_MUX_RELEASED
+ */
+enum qc_mux_state {
+ QC_MUX_NULL, /* not allocated, data should be buffered */
+ QC_MUX_READY, /* allocated, ready to handle data */
+ QC_MUX_RELEASED, /* released, data can be dropped */
+};
+
+/* Counters at QUIC connection level */
+struct quic_conn_cntrs {
+ long long dropped_pkt; /* total number of dropped packets */
+ long long dropped_pkt_bufoverrun;/* total number of dropped packets because of buffer overrun */
+ long long dropped_parsing; /* total number of dropped packets upon parsing errors */
+ long long socket_full; /* total number of EAGAIN errors on sendto() calls */
+ long long sendto_err; /* total number of errors on sendto() calls, EAGAIN excepted */
+ long long sendto_err_unknown; /* total number of errors on sendto() calls which are currently not supported */
+ long long sent_pkt; /* total number of sent packets */
+ long long lost_pkt; /* total number of lost packets */
+ long long conn_migration_done; /* total number of connection migration handled */
+ /* Streams related counters */
+ long long data_blocked; /* total number of times DATA_BLOCKED frame was received */
+ long long stream_data_blocked; /* total number of times STREAM_DATA_BLOCKED frame was received */
+ long long streams_blocked_bidi; /* total number of times STREAMS_BLOCKED_BIDI frame was received */
+ long long streams_blocked_uni; /* total number of times STREAMS_BLOCKED_UNI frame was received */
+};
+
+/* Flags at connection level */
+#define QUIC_FL_CONN_ANTI_AMPLIFICATION_REACHED (1U << 0)
+#define QUIC_FL_CONN_SPIN_BIT (1U << 1) /* Spin bit set by remote peer */
+#define QUIC_FL_CONN_NEED_POST_HANDSHAKE_FRMS (1U << 2) /* HANDSHAKE_DONE must be sent */
+#define QUIC_FL_CONN_LISTENER (1U << 3)
+#define QUIC_FL_CONN_ACCEPT_REGISTERED (1U << 4)
+#define QUIC_FL_CONN_TX_MUX_CONTEXT (1U << 5) /* sending in progress from the MUX layer */
+#define QUIC_FL_CONN_IDLE_TIMER_RESTARTED_AFTER_READ (1U << 6)
+#define QUIC_FL_CONN_RETRANS_NEEDED (1U << 7)
+#define QUIC_FL_CONN_RETRANS_OLD_DATA (1U << 8) /* retransmission in progress for probing with already sent data */
+#define QUIC_FL_CONN_TLS_ALERT (1U << 9)
+#define QUIC_FL_CONN_AFFINITY_CHANGED (1U << 10) /* qc_finalize_affinity_rebind() must be called to finalize affinity rebind */
+/* gap here */
+#define QUIC_FL_CONN_HALF_OPEN_CNT_DECREMENTED (1U << 11) /* The half-open connection counter was decremented */
+#define QUIC_FL_CONN_HANDSHAKE_SPEED_UP (1U << 12) /* Handshake speeding up was done */
+#define QUIC_FL_CONN_ACK_TIMER_FIRED (1U << 13) /* idle timer triggered for acknowledgements */
+#define QUIC_FL_CONN_IO_TO_REQUEUE (1U << 14) /* IO handler must be requeued on new thread after connection migration */
+#define QUIC_FL_CONN_IPKTNS_DCD (1U << 15) /* Initial packet number space discarded */
+#define QUIC_FL_CONN_HPKTNS_DCD (1U << 16) /* Handshake packet number space discarded */
+#define QUIC_FL_CONN_PEER_VALIDATED_ADDR (1U << 17) /* Peer address is considered as validated for this connection. */
+#define QUIC_FL_CONN_TO_KILL (1U << 24) /* Unusable connection, to be killed */
+#define QUIC_FL_CONN_TX_TP_RECEIVED (1U << 25) /* Peer transport parameters have been received (used for the transmitting part) */
+#define QUIC_FL_CONN_FINALIZED (1U << 26) /* QUIC connection finalized (functional, ready to send/receive) */
+/* gap here */
+#define QUIC_FL_CONN_EXP_TIMER (1U << 28) /* timer has expired, quic-conn can be freed */
+#define QUIC_FL_CONN_CLOSING (1U << 29) /* closing state, entered on CONNECTION_CLOSE emission */
+#define QUIC_FL_CONN_DRAINING (1U << 30) /* draining state, entered on CONNECTION_CLOSE reception */
+#define QUIC_FL_CONN_IMMEDIATE_CLOSE (1U << 31) /* A CONNECTION_CLOSE must be sent */
+
+#define QUIC_CONN_COMMON \
+ struct { \
+ /* Connection owned socket FD. */ \
+ int fd; \
+ unsigned int flags; \
+ struct quic_err err; \
+ /* When in closing state, number of packet before sending CC */ \
+ unsigned int nb_pkt_for_cc; \
+ /* When in closing state, number of packet since receiving CC */ \
+ unsigned int nb_pkt_since_cc; \
+ struct wait_event wait_event; \
+ struct wait_event *subs; \
+ struct sockaddr_storage local_addr; \
+ struct sockaddr_storage peer_addr; \
+ struct { \
+ /* Number of bytes for prepared packets */ \
+ uint64_t prep; \
+ /* Number of sent bytes. */ \
+ uint64_t tx; \
+ /* Number of received bytes. */ \
+ uint64_t rx; \
+ } bytes; \
+ /* First DCID used by client on its Initial packet. */ \
+ struct quic_cid odcid; \
+ /* DCID of our endpoint - not updated when a new DCID is used */ \
+ struct quic_cid dcid; \
+ /* first SCID of our endpoint - not updated when a new SCID is used */ \
+ struct quic_cid scid; \
+ /* tree of quic_connection_id - used to match a received packet DCID \
+ * with a connection \
+ */ \
+ struct eb_root *cids; \
+ struct listener *li; /* only valid for frontend connections */ \
+ /* Idle timer task */ \
+ struct task *idle_timer_task; \
+ unsigned int idle_expire; \
+ /* QUIC connection level counters */ \
+ struct quic_conn_cntrs cntrs; \
+ struct connection *conn; \
+ }
+
+struct quic_conn {
+ QUIC_CONN_COMMON;
+ /* Used only to reach the tasklet for the I/O handler from this
+ * quic_conn object.
+ */
+ struct ssl_sock_ctx *xprt_ctx;
+ const struct quic_version *original_version;
+ const struct quic_version *negotiated_version;
+ /* Negotiated version Initial TLS context */
+ struct quic_tls_ctx *nictx;
+ /* QUIC transport parameters TLS extension */
+ int tps_tls_ext;
+ int state;
+ enum qc_mux_state mux_state; /* status of the connection/mux layer */
+#ifdef USE_QUIC_OPENSSL_COMPAT
+ unsigned char enc_params[QUIC_TP_MAX_ENCLEN]; /* encoded QUIC transport parameters */
+ size_t enc_params_len;
+#endif
+
+ uint64_t next_cid_seq_num;
+ /* Initial hash computed from first ID (derived from ODCID).
+ * it could be reused to derive extra CIDs from the same hash
+ */
+ uint64_t hash64;
+
+ /* Initial encryption level */
+ struct quic_enc_level *iel;
+ /* 0-RTT encryption level */
+ struct quic_enc_level *eel;
+ /* Handshake encryption level */
+ struct quic_enc_level *hel;
+ /* 1-RTT encryption level */
+ struct quic_enc_level *ael;
+ /* List of allocated QUIC TLS encryption level */
+ struct list qel_list;
+
+ struct quic_pktns *ipktns;
+ struct quic_pktns *hpktns;
+ struct quic_pktns *apktns;
+ /* List of packet number spaces attached to this connection */
+ struct list pktns_list;
+
+#ifdef USE_QUIC_OPENSSL_COMPAT
+ struct quic_openssl_compat openssl_compat;
+#endif
+
+ struct {
+ /* Transport parameters sent by the peer */
+ struct quic_transport_params params;
+ /* Send buffer used to write datagrams. */
+ struct buffer buf;
+ /* Send buffer used to send a "connection close" datagram . */
+ struct buffer cc_buf;
+ char *cc_buf_area;
+ /* Length of the "connection close" datagram. */
+ size_t cc_dgram_len;
+ } tx;
+ struct {
+ /* Transport parameters the peer will receive */
+ struct quic_transport_params params;
+ /* RX buffer */
+ struct buffer buf;
+ struct list pkt_list;
+ struct {
+ /* Number of open or closed streams */
+ uint64_t nb_streams;
+ } strms[QCS_MAX_TYPES];
+ } rx;
+ struct {
+ struct quic_tls_kp prv_rx;
+ struct quic_tls_kp nxt_rx;
+ struct quic_tls_kp nxt_tx;
+ } ku;
+ unsigned int max_ack_delay;
+ unsigned int max_idle_timeout;
+ struct quic_cc_path paths[1];
+ struct quic_cc_path *path;
+
+ struct mt_list accept_list; /* chaining element used for accept, only valid for frontend connections */
+
+ struct eb_root streams_by_id; /* qc_stream_desc tree */
+ int stream_buf_count; /* total count of allocated stream buffers for this connection */
+
+ /* MUX */
+ struct qcc *qcc;
+ struct task *timer_task;
+ unsigned int timer;
+ unsigned int ack_expire;
+ /* Handshake expiration date */
+ unsigned int hs_expire;
+
+ const struct qcc_app_ops *app_ops;
+ /* Proxy counters */
+ struct quic_counters *prx_counters;
+
+ struct list el_th_ctx; /* list elem in ha_thread_ctx */
+ struct list back_refs; /* list head of CLI context currently dumping this connection. */
+ unsigned int qc_epoch; /* delimiter for newer instances started after "show quic". */
+};
+
+/* QUIC connection in "connection close" state. */
+struct quic_conn_closed {
+ QUIC_CONN_COMMON;
+ char *cc_buf_area;
+ /* Length of the "connection close" datagram. */
+ size_t cc_dgram_len;
+};
+
+#endif /* USE_QUIC */
+#endif /* _HAPROXY_QUIC_CONN_T_H */
diff --git a/include/haproxy/quic_conn.h b/include/haproxy/quic_conn.h
new file mode 100644
index 0000000..92caed4
--- /dev/null
+++ b/include/haproxy/quic_conn.h
@@ -0,0 +1,201 @@
+/*
+ * include/haproxy/quic_conn.h
+ *
+ * Copyright 2020 HAProxy Technologies, Frederic Lecaille <flecaille@haproxy.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_QUIC_CONN_H
+#define _HAPROXY_QUIC_CONN_H
+#ifdef USE_QUIC
+#ifndef USE_OPENSSL
+#error "Must define USE_OPENSSL"
+#endif
+
+#include <inttypes.h>
+
+#include <import/eb64tree.h>
+#include <import/ebmbtree.h>
+
+#include <haproxy/chunk.h>
+#include <haproxy/dynbuf.h>
+#include <haproxy/ncbuf.h>
+#include <haproxy/net_helper.h>
+#include <haproxy/openssl-compat.h>
+#include <haproxy/ticks.h>
+
+#include <haproxy/listener.h>
+#include <haproxy/proto_quic.h>
+#include <haproxy/quic_cc.h>
+#include <haproxy/quic_cid.h>
+#include <haproxy/quic_conn-t.h>
+#include <haproxy/quic_enc.h>
+#include <haproxy/quic_frame.h>
+#include <haproxy/quic_loss.h>
+#include <haproxy/quic_rx.h>
+#include <haproxy/mux_quic.h>
+
+#include <openssl/rand.h>
+
+extern struct pool_head *pool_head_quic_connection_id;
+
+int qc_conn_finalize(struct quic_conn *qc, int server);
+int ssl_quic_initial_ctx(struct bind_conf *bind_conf);
+struct quic_cstream *quic_cstream_new(struct quic_conn *qc);
+void quic_cstream_free(struct quic_cstream *cs);
+void quic_free_arngs(struct quic_conn *qc, struct quic_arngs *arngs);
+struct quic_cstream *quic_cstream_new(struct quic_conn *qc);
+struct task *quic_conn_app_io_cb(struct task *t, void *context, unsigned int state);
+
+struct quic_connection_id *new_quic_cid(struct eb_root *root,
+ struct quic_conn *qc,
+ const struct quic_cid *orig,
+ const struct sockaddr_storage *addr);
+void quic_conn_closed_err_count_inc(struct quic_conn *qc, struct quic_frame *frm);
+int qc_h3_request_reject(struct quic_conn *qc, uint64_t id);
+struct quic_conn *qc_new_conn(const struct quic_version *qv, int ipv4,
+ struct quic_cid *dcid, struct quic_cid *scid,
+ const struct quic_cid *token_odcid,
+ struct quic_connection_id *conn_id,
+ struct sockaddr_storage *local_addr,
+ struct sockaddr_storage *peer_addr,
+ int server, int token, void *owner);
+int quic_build_post_handshake_frames(struct quic_conn *qc);
+const struct quic_version *qc_supported_version(uint32_t version);
+int quic_peer_validated_addr(struct quic_conn *qc);
+void qc_set_timer(struct quic_conn *qc);
+void qc_detach_th_ctx_list(struct quic_conn *qc, int closing);
+void qc_idle_timer_do_rearm(struct quic_conn *qc, int arm_ack);
+void qc_idle_timer_rearm(struct quic_conn *qc, int read, int arm_ack);
+void qc_check_close_on_released_mux(struct quic_conn *qc);
+int quic_stateless_reset_token_cpy(unsigned char *pos, size_t len,
+ const unsigned char *salt, size_t saltlen);
+
+static inline int qc_is_listener(struct quic_conn *qc)
+{
+ return qc->flags & QUIC_FL_CONN_LISTENER;
+}
+
+/* Free the CIDs attached to <conn> QUIC connection. */
+static inline void free_quic_conn_cids(struct quic_conn *conn)
+{
+ struct eb64_node *node;
+
+ if (!conn->cids)
+ return;
+
+ node = eb64_first(conn->cids);
+ while (node) {
+ struct quic_connection_id *conn_id;
+
+ conn_id = eb64_entry(node, struct quic_connection_id, seq_num);
+
+ /* remove the CID from the receiver tree */
+ quic_cid_delete(conn_id);
+
+ /* remove the CID from the quic_conn tree */
+ node = eb64_next(node);
+ eb64_delete(&conn_id->seq_num);
+ pool_free(pool_head_quic_connection_id, conn_id);
+ }
+}
+
+/* Move all the connection IDs from <conn> QUIC connection to <cc_conn> */
+static inline void quic_conn_mv_cids_to_cc_conn(struct quic_conn_closed *cc_conn,
+ struct quic_conn *conn)
+{
+ struct eb64_node *node;
+
+ node = eb64_first(conn->cids);
+ while (node) {
+ struct quic_connection_id *conn_id;
+
+ conn_id = eb64_entry(node, struct quic_connection_id, seq_num);
+ conn_id->qc = (struct quic_conn *)cc_conn;
+ node = eb64_next(node);
+ }
+
+}
+
+/* Allocate the underlying required memory for <ncbuf> non-contiguous buffer */
+static inline struct ncbuf *quic_get_ncbuf(struct ncbuf *ncbuf)
+{
+ struct buffer buf = BUF_NULL;
+
+ if (!ncb_is_null(ncbuf))
+ return ncbuf;
+
+ b_alloc(&buf);
+ BUG_ON(b_is_null(&buf));
+
+ *ncbuf = ncb_make(buf.area, buf.size, 0);
+ ncb_init(ncbuf, 0);
+
+ return ncbuf;
+}
+
+/* Release the underlying memory use by <ncbuf> non-contiguous buffer */
+static inline void quic_free_ncbuf(struct ncbuf *ncbuf)
+{
+ struct buffer buf;
+
+ if (ncb_is_null(ncbuf))
+ return;
+
+ buf = b_make(ncbuf->area, ncbuf->size, 0, 0);
+ b_free(&buf);
+ offer_buffers(NULL, 1);
+
+ *ncbuf = NCBUF_NULL;
+}
+
+void chunk_frm_appendf(struct buffer *buf, const struct quic_frame *frm);
+void quic_set_connection_close(struct quic_conn *qc, const struct quic_err err);
+void quic_set_tls_alert(struct quic_conn *qc, int alert);
+int quic_set_app_ops(struct quic_conn *qc, const unsigned char *alpn, size_t alpn_len);
+int qc_check_dcid(struct quic_conn *qc, unsigned char *dcid, size_t dcid_len);
+struct quic_cid quic_derive_cid(const struct quic_cid *orig,
+ const struct sockaddr_storage *addr);
+int quic_get_cid_tid(const unsigned char *cid, size_t cid_len,
+ const struct sockaddr_storage *cli_addr,
+ unsigned char *buf, size_t buf_len);
+int qc_send_mux(struct quic_conn *qc, struct list *frms);
+
+void qc_notify_err(struct quic_conn *qc);
+int qc_notify_send(struct quic_conn *qc);
+
+void qc_check_close_on_released_mux(struct quic_conn *qc);
+
+void quic_conn_release(struct quic_conn *qc);
+
+void qc_kill_conn(struct quic_conn *qc);
+
+int qc_parse_hd_form(struct quic_rx_packet *pkt,
+ unsigned char **buf, const unsigned char *end);
+
+int qc_set_tid_affinity(struct quic_conn *qc, uint new_tid, struct listener *new_li);
+void qc_finalize_affinity_rebind(struct quic_conn *qc);
+int qc_handle_conn_migration(struct quic_conn *qc,
+ const struct sockaddr_storage *peer_addr,
+ const struct sockaddr_storage *local_addr);
+
+/* Function pointer that can be used to compute a hash from first generated CID (derived from ODCID) */
+extern uint64_t (*quic_hash64_from_cid)(const unsigned char *cid, int size, const unsigned char *secret, size_t secretlen);
+/* Function pointer that can be used to derive a new CID from the previously computed hash */
+extern void (*quic_newcid_from_hash64)(unsigned char *cid, int size, uint64_t hash, const unsigned char *secret, size_t secretlen);
+
+#endif /* USE_QUIC */
+#endif /* _HAPROXY_QUIC_CONN_H */
diff --git a/include/haproxy/quic_enc.h b/include/haproxy/quic_enc.h
new file mode 100644
index 0000000..4b85605
--- /dev/null
+++ b/include/haproxy/quic_enc.h
@@ -0,0 +1,275 @@
+/*
+ * include/haproxy/quic_enc.h
+ * This file contains QUIC varint encoding function prototypes
+ *
+ * Copyright 2021 HAProxy Technologies, Frederic Lecaille <flecaille@haproxy.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_QUIC_ENC_H
+#define _HAPROXY_QUIC_ENC_H
+#ifdef USE_QUIC
+#ifndef USE_OPENSSL
+#error "Must define USE_OPENSSL"
+#endif
+
+#include <inttypes.h>
+
+#include <haproxy/buf.h>
+#include <haproxy/chunk.h>
+
+/* The maximum size of a variable-length QUIC integer encoded with 1 byte */
+#define QUIC_VARINT_1_BYTE_MAX ((1UL << 6) - 1)
+/* The maximum size of a variable-length QUIC integer encoded with 2 bytes */
+#define QUIC_VARINT_2_BYTE_MAX ((1UL << 14) - 1)
+/* The maximum size of a variable-length QUIC integer encoded with 4 bytes */
+#define QUIC_VARINT_4_BYTE_MAX ((1UL << 30) - 1)
+/* The maximum size of a variable-length QUIC integer encoded with 8 bytes */
+#define QUIC_VARINT_8_BYTE_MAX ((1ULL << 62) - 1)
+
+/* The maximum size of a variable-length QUIC integer */
+#define QUIC_VARINT_MAX_SIZE 8
+
+/* The two most significant bits of byte #0 from a QUIC packet gives the 2
+ * logarithm of the length of a variable length encoded integer.
+ */
+#define QUIC_VARINT_BYTE_0_BITMASK 0x3f
+#define QUIC_VARINT_BYTE_0_SHIFT 6
+
+/* Returns enough log2 of first powers of two to encode QUIC variable length
+ * integers.
+ * Returns -1 if <val> if out of the range of lengths supported by QUIC.
+ */
+static inline int quic_log2(unsigned int val)
+{
+ switch (val) {
+ case 8:
+ return 3;
+ case 4:
+ return 2;
+ case 2:
+ return 1;
+ case 1:
+ return 0;
+ default:
+ return -1;
+ }
+}
+
+/* Returns the size in bytes required to encode a 64bits integer if
+ * not out of range (< (1 << 62)), or 0 if out of range.
+ */
+static inline size_t quic_int_getsize(uint64_t val)
+{
+ switch (val) {
+ case 0 ... QUIC_VARINT_1_BYTE_MAX:
+ return 1;
+ case QUIC_VARINT_1_BYTE_MAX + 1 ... QUIC_VARINT_2_BYTE_MAX:
+ return 2;
+ case QUIC_VARINT_2_BYTE_MAX + 1 ... QUIC_VARINT_4_BYTE_MAX:
+ return 4;
+ case QUIC_VARINT_4_BYTE_MAX + 1 ... QUIC_VARINT_8_BYTE_MAX:
+ return 8;
+ default:
+ return 0;
+ }
+}
+
+/* Returns the maximum value of a QUIC variable-length integer with <sz> as size */
+static inline uint64_t quic_max_int(size_t sz)
+{
+ switch (sz) {
+ case 1:
+ return QUIC_VARINT_1_BYTE_MAX;
+ case 2:
+ return QUIC_VARINT_2_BYTE_MAX;
+ case 4:
+ return QUIC_VARINT_4_BYTE_MAX;
+ case 8:
+ return QUIC_VARINT_8_BYTE_MAX;
+ }
+
+ return -1;
+}
+
+/* Decode a QUIC variable-length integer from <buf> buffer into <val>.
+ * Note that the result is a 64-bits integer but with the less significant
+ * 62 bits as relevant information. The most significant 2 remaining bits encode
+ * the length of the integer.
+ * Returns 1 if succeeded there was enough data in <buf>), 0 if not.
+ */
+static inline int quic_dec_int(uint64_t *val,
+ const unsigned char **buf,
+ const unsigned char *end)
+{
+ size_t len;
+
+ if (*buf >= end)
+ return 0;
+
+ len = 1 << (**buf >> QUIC_VARINT_BYTE_0_SHIFT);
+ if (*buf + len > end)
+ return 0;
+
+ *val = *(*buf)++ & QUIC_VARINT_BYTE_0_BITMASK;
+ while (--len)
+ *val = (*val << 8) | *(*buf)++;
+
+ return 1;
+}
+
+/* Decode a QUIC variable-length integer from <b> buffer into <val> supporting wrapping.
+ * Note that the result is a 64-bits integer but with the less significant
+ * 62 bits as relevant information. The most significant 2 bits encode
+ * the length of the integer.
+ * Note that this function update <b> buffer when a variable-length integer
+ * has successfully been parsed.
+ * Returns 1 and if succeeded (there was enough data in <buf>), 0 if not.
+ * If <retlen> is not null, increment <*retlen> by the number of bytes consumed to decode
+ * the varint.
+ */
+static inline size_t b_quic_dec_int(uint64_t *val, struct buffer *b, size_t *retlen)
+{
+ const unsigned char *pos = (const unsigned char *)b_head(b);
+ const unsigned char *end = (const unsigned char *)b_wrap(b);
+ size_t size = b_size(b);
+ size_t data = b_data(b);
+ size_t save_len, len;
+
+ if (!data)
+ return 0;
+
+ save_len = len = 1 << (*pos >> QUIC_VARINT_BYTE_0_SHIFT);
+ if (data < len)
+ return 0;
+
+ *val = *pos & QUIC_VARINT_BYTE_0_BITMASK;
+ if (++pos == end)
+ pos -= size;
+ while (--len) {
+ *val = (*val << 8) | *pos;
+ if (++pos == end)
+ pos -= size;
+ }
+ if (retlen)
+ *retlen += save_len;
+ b_del(b, save_len);
+
+ return 1;
+}
+
+/* Encode a QUIC variable-length integer from <val> into <buf> buffer with <end> as first
+ * byte address after the end of this buffer.
+ * Returns 1 if succeeded (there was enough room in buf), 0 if not.
+ */
+static inline int quic_enc_int(unsigned char **buf, const unsigned char *end, uint64_t val)
+{
+ size_t len;
+ unsigned int shift;
+ unsigned char size_bits, *head;
+
+ len = quic_int_getsize(val);
+ if (!len || end - *buf < len)
+ return 0;
+
+ shift = (len - 1) * 8;
+ /* set the bits of byte#0 which gives the length of the encoded integer */
+ size_bits = quic_log2(len) << QUIC_VARINT_BYTE_0_SHIFT;
+ head = *buf;
+ while (len--) {
+ *(*buf)++ = val >> shift;
+ shift -= 8;
+ }
+ *head |= size_bits;
+
+ return 1;
+}
+
+/* Encode a QUIC variable-length integer <val> into <b> buffer. <width> can be
+ * set to specify the desired output width. By default use 0 for the minimal
+ * integer size. Other valid values are 1, 2, 4 or 8.
+ *
+ * Returns 1 on success else 0.
+ */
+static inline int b_quic_enc_int(struct buffer *b, uint64_t val, int width)
+{
+ char *pos;
+ int save_width, len;
+
+ /* width can only by 0, 1, 2, 4 or 8 */
+ BUG_ON(width && (width > 8 || atleast2(width)));
+
+ len = quic_int_getsize(val);
+ if (!len)
+ return 0;
+
+ /* Check that buffer room is sufficient and width big enough if set. */
+ if (b_room(b) < len || (width && width < len))
+ return 0;
+
+ if (!width)
+ width = len;
+ save_width = width;
+
+ pos = b_tail(b);
+ while (width--) {
+ /* Encode the shifted integer or 0 if width bigger than integer length. */
+ *pos++ = width >= len ? 0 : val >> (width * 8);
+
+ if (pos == b_wrap(b))
+ pos = b_orig(b);
+ }
+
+ /* set the bits of byte#0 which gives the length of the encoded integer */
+ *b_tail(b) |= quic_log2(save_width) << QUIC_VARINT_BYTE_0_SHIFT;
+ b_add(b, save_width);
+
+ return 1;
+}
+
+static inline size_t quic_incint_size_diff(uint64_t val)
+{
+ switch (val) {
+ case QUIC_VARINT_1_BYTE_MAX:
+ return 1;
+ case QUIC_VARINT_2_BYTE_MAX:
+ return 2;
+ case QUIC_VARINT_4_BYTE_MAX:
+ return 4;
+ default:
+ return 0;
+ }
+}
+
+/* Return the difference between the encoded length of <val> and the encoded
+ * length of <val-1>.
+ */
+static inline size_t quic_decint_size_diff(uint64_t val)
+{
+ switch (val) {
+ case QUIC_VARINT_1_BYTE_MAX + 1:
+ return 1;
+ case QUIC_VARINT_2_BYTE_MAX + 1:
+ return 2;
+ case QUIC_VARINT_4_BYTE_MAX + 1:
+ return 4;
+ default:
+ return 0;
+ }
+}
+
+#endif /* USE_QUIC */
+#endif /* _HAPROXY_QUIC_ENC_H */
diff --git a/include/haproxy/quic_frame-t.h b/include/haproxy/quic_frame-t.h
new file mode 100644
index 0000000..5e91f93
--- /dev/null
+++ b/include/haproxy/quic_frame-t.h
@@ -0,0 +1,309 @@
+/*
+ * include/types/quic_frame.h
+ * This file contains QUIC frame definitions.
+ *
+ * Copyright 2019 HAProxy Technologies, Frederic Lecaille <flecaille@haproxy.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _TYPES_QUIC_FRAME_H
+#define _TYPES_QUIC_FRAME_H
+#ifdef USE_QUIC
+#ifndef USE_OPENSSL
+#error "Must define USE_OPENSSL"
+#endif
+
+#include <inttypes.h>
+#include <stdlib.h>
+
+#include <import/ebtree-t.h>
+#include <haproxy/buf-t.h>
+#include <haproxy/list.h>
+#include <haproxy/quic_stream-t.h>
+
+extern struct pool_head *pool_head_quic_frame;
+extern struct pool_head *pool_head_qf_crypto;
+
+/* forward declarations from xprt-quic */
+struct quic_arngs;
+struct quic_enc_level;
+struct quic_tx_packet;
+
+/* QUIC frame types. */
+enum quic_frame_type {
+ QUIC_FT_PADDING = 0x00,
+ QUIC_FT_PING = 0x01,
+ QUIC_FT_ACK = 0x02,
+ QUIC_FT_ACK_ECN = 0x03,
+ QUIC_FT_RESET_STREAM = 0x04,
+ QUIC_FT_STOP_SENDING = 0x05,
+ QUIC_FT_CRYPTO = 0x06,
+ QUIC_FT_NEW_TOKEN = 0x07,
+
+ QUIC_FT_STREAM_8 = 0x08,
+ QUIC_FT_STREAM_9 = 0x09,
+ QUIC_FT_STREAM_A = 0x0a,
+ QUIC_FT_STREAM_B = 0x0b,
+ QUIC_FT_STREAM_C = 0x0c,
+ QUIC_FT_STREAM_D = 0x0d,
+ QUIC_FT_STREAM_E = 0x0e,
+ QUIC_FT_STREAM_F = 0x0f,
+
+ QUIC_FT_MAX_DATA = 0x10,
+ QUIC_FT_MAX_STREAM_DATA = 0x11,
+ QUIC_FT_MAX_STREAMS_BIDI = 0x12,
+ QUIC_FT_MAX_STREAMS_UNI = 0x13,
+ QUIC_FT_DATA_BLOCKED = 0x14,
+ QUIC_FT_STREAM_DATA_BLOCKED = 0x15,
+ QUIC_FT_STREAMS_BLOCKED_BIDI = 0x16,
+ QUIC_FT_STREAMS_BLOCKED_UNI = 0x17,
+ QUIC_FT_NEW_CONNECTION_ID = 0x18,
+ QUIC_FT_RETIRE_CONNECTION_ID = 0x19,
+ QUIC_FT_PATH_CHALLENGE = 0x1a,
+ QUIC_FT_PATH_RESPONSE = 0x1b,
+ QUIC_FT_CONNECTION_CLOSE = 0x1c,
+ QUIC_FT_CONNECTION_CLOSE_APP = 0x1d,
+ QUIC_FT_HANDSHAKE_DONE = 0x1e,
+ /* Do not insert enums after the following one. */
+ QUIC_FT_MAX
+};
+
+#define QUIC_FT_PKT_TYPE_I_BITMASK (1 << QUIC_PACKET_TYPE_INITIAL)
+#define QUIC_FT_PKT_TYPE_0_BITMASK (1 << QUIC_PACKET_TYPE_0RTT)
+#define QUIC_FT_PKT_TYPE_H_BITMASK (1 << QUIC_PACKET_TYPE_HANDSHAKE)
+#define QUIC_FT_PKT_TYPE_1_BITMASK (1 << QUIC_PACKET_TYPE_SHORT)
+
+#define QUIC_FT_PKT_TYPE_IH01_BITMASK \
+ (QUIC_FT_PKT_TYPE_I_BITMASK | QUIC_FT_PKT_TYPE_H_BITMASK | \
+ QUIC_FT_PKT_TYPE_0_BITMASK | QUIC_FT_PKT_TYPE_1_BITMASK)
+
+#define QUIC_FT_PKT_TYPE_IH_1_BITMASK \
+ (QUIC_FT_PKT_TYPE_I_BITMASK | QUIC_FT_PKT_TYPE_H_BITMASK | \
+ QUIC_FT_PKT_TYPE_1_BITMASK)
+
+#define QUIC_FT_PKT_TYPE___01_BITMASK \
+ (QUIC_FT_PKT_TYPE_0_BITMASK | QUIC_FT_PKT_TYPE_1_BITMASK)
+
+#define QUIC_FT_PKT_TYPE____1_BITMASK QUIC_FT_PKT_TYPE_1_BITMASK
+
+
+/* Flag a TX frame as acknowledged */
+#define QUIC_FL_TX_FRAME_ACKED 0x01
+
+#define QUIC_STREAM_FRAME_TYPE_FIN_BIT 0x01
+#define QUIC_STREAM_FRAME_TYPE_LEN_BIT 0x02
+#define QUIC_STREAM_FRAME_TYPE_OFF_BIT 0x04
+
+/* Servers have the stream initiator bit set. */
+#define QUIC_STREAM_FRAME_ID_INITIATOR_BIT 0x01
+/* Unidirectional streams have the direction bit set. */
+#define QUIC_STREAM_FRAME_ID_DIR_BIT 0x02
+
+#define QUIC_PATH_CHALLENGE_LEN 8
+/* Maximum phrase length in CONNECTION_CLOSE frame */
+#define QUIC_CC_REASON_PHRASE_MAXLEN 64
+
+struct qf_padding {
+ size_t len;
+};
+
+struct qf_ack {
+ uint64_t largest_ack;
+ uint64_t ack_delay;
+ uint64_t ack_range_num;
+ uint64_t first_ack_range;
+};
+
+/* Structure used when emitting ACK frames. */
+struct qf_tx_ack {
+ uint64_t ack_delay;
+ struct quic_arngs *arngs;
+};
+
+struct qf_reset_stream {
+ uint64_t id;
+ uint64_t app_error_code;
+ uint64_t final_size;
+};
+
+struct qf_stop_sending {
+ uint64_t id;
+ uint64_t app_error_code;
+};
+
+struct qf_crypto {
+ struct list list;
+ uint64_t offset;
+ uint64_t len;
+ const struct quic_enc_level *qel;
+ const unsigned char *data;
+};
+
+struct qf_new_token {
+ uint64_t len;
+ const unsigned char *data;
+};
+
+struct qf_stream {
+ uint64_t id;
+ struct qc_stream_desc *stream;
+
+ /* used only on TX when constructing frames.
+ * Data cleared when processing ACK related to this STREAM frame.
+ *
+ * A same buffer may be shared between several STREAM frames. The
+ * <data> field of each quic_stream serves to differentiate the payload
+ * of each of these.
+ */
+ struct buffer *buf;
+
+ struct eb64_node offset;
+ uint64_t len;
+
+ /* for TX pointer into <buf> field.
+ * for RX pointer into the packet buffer.
+ */
+ const unsigned char *data;
+
+ char dup; /* set for duplicated frame : this forces to check for the underlying qc_stream_buf instance before emitting it. */
+};
+
+struct qf_max_data {
+ uint64_t max_data;
+};
+
+struct qf_max_stream_data {
+ uint64_t id;
+ uint64_t max_stream_data;
+};
+
+struct qf_max_streams {
+ uint64_t max_streams;
+};
+
+struct qf_data_blocked {
+ uint64_t limit;
+};
+
+struct qf_stream_data_blocked {
+ uint64_t id;
+ uint64_t limit;
+};
+
+struct qf_streams_blocked {
+ uint64_t limit;
+};
+
+struct qf_new_connection_id {
+ uint64_t seq_num;
+ uint64_t retire_prior_to;
+ struct {
+ unsigned char len;
+ const unsigned char *data;
+ } cid;
+ const unsigned char *stateless_reset_token;
+};
+
+struct qf_retire_connection_id {
+ uint64_t seq_num;
+};
+
+struct qf_path_challenge {
+ unsigned char data[QUIC_PATH_CHALLENGE_LEN];
+};
+
+struct qf_path_challenge_response {
+ unsigned char data[QUIC_PATH_CHALLENGE_LEN];
+};
+
+struct qf_connection_close {
+ uint64_t error_code;
+ uint64_t frame_type;
+ uint64_t reason_phrase_len;
+ unsigned char reason_phrase[QUIC_CC_REASON_PHRASE_MAXLEN];
+};
+
+struct qf_connection_close_app {
+ uint64_t error_code;
+ uint64_t reason_phrase_len;
+ unsigned char reason_phrase[QUIC_CC_REASON_PHRASE_MAXLEN];
+};
+
+struct quic_frame {
+ struct list list; /* List elem from parent elem (typically a Tx packet instance, a PKTNS or a MUX element). */
+ struct quic_tx_packet *pkt; /* Last Tx packet used to send the frame. */
+ unsigned char type; /* QUIC frame type. */
+ union {
+ struct qf_padding padding;
+ struct qf_ack ack;
+ struct qf_tx_ack tx_ack;
+ struct qf_crypto crypto;
+ struct qf_reset_stream reset_stream;
+ struct qf_stop_sending stop_sending;
+ struct qf_new_token new_token;
+ struct qf_stream stream;
+ struct qf_max_data max_data;
+ struct qf_max_stream_data max_stream_data;
+ struct qf_max_streams max_streams_bidi;
+ struct qf_max_streams max_streams_uni;
+ struct qf_data_blocked data_blocked;
+ struct qf_stream_data_blocked stream_data_blocked;
+ struct qf_streams_blocked streams_blocked_bidi;
+ struct qf_streams_blocked streams_blocked_uni;
+ struct qf_new_connection_id new_connection_id;
+ struct qf_retire_connection_id retire_connection_id;
+ struct qf_path_challenge path_challenge;
+ struct qf_path_challenge_response path_challenge_response;
+ struct qf_connection_close connection_close;
+ struct qf_connection_close_app connection_close_app;
+ };
+ struct quic_frame *origin; /* Parent frame. Set if frame is a duplicate (used for retransmission). */
+ struct list reflist; /* List head containing duplicated children frames. */
+ struct list ref; /* List elem from parent frame reflist. Set if frame is a duplicate (used for retransmission). */
+ unsigned int flags; /* QUIC_FL_TX_FRAME_* */
+ unsigned int loss_count; /* Counter for each occurrence of this frame marked as lost. */
+};
+
+
+/* QUIC error codes */
+struct quic_err {
+ uint64_t code; /* error code */
+ int app; /* set for Application error code */
+};
+
+/* Transport level error codes. */
+#define QC_ERR_NO_ERROR 0x00
+#define QC_ERR_INTERNAL_ERROR 0x01
+#define QC_ERR_CONNECTION_REFUSED 0x02
+#define QC_ERR_FLOW_CONTROL_ERROR 0x03
+#define QC_ERR_STREAM_LIMIT_ERROR 0x04
+#define QC_ERR_STREAM_STATE_ERROR 0x05
+#define QC_ERR_FINAL_SIZE_ERROR 0x06
+#define QC_ERR_FRAME_ENCODING_ERROR 0x07
+#define QC_ERR_TRANSPORT_PARAMETER_ERROR 0x08
+#define QC_ERR_CONNECTION_ID_LIMIT_ERROR 0x09
+#define QC_ERR_PROTOCOL_VIOLATION 0x0a
+#define QC_ERR_INVALID_TOKEN 0x0b
+#define QC_ERR_APPLICATION_ERROR 0x0c
+#define QC_ERR_CRYPTO_BUFFER_EXCEEDED 0x0d
+#define QC_ERR_KEY_UPDATE_ERROR 0x0e
+#define QC_ERR_AEAD_LIMIT_REACHED 0x0f
+#define QC_ERR_NO_VIABLE_PATH 0x10
+/* 256 TLS reserved errors 0x100-0x1ff. */
+#define QC_ERR_CRYPTO_ERROR 0x100
+
+#endif /* USE_QUIC */
+#endif /* _TYPES_QUIC_FRAME_H */
diff --git a/include/haproxy/quic_frame.h b/include/haproxy/quic_frame.h
new file mode 100644
index 0000000..90d6b21
--- /dev/null
+++ b/include/haproxy/quic_frame.h
@@ -0,0 +1,281 @@
+/*
+ * include/haproxy/quic_frame.h
+ * This file contains prototypes for QUIC frames.
+ *
+ * Copyright 2020 HAProxy Technologies, Frederic Lecaille <flecaille@haproxy.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_QUIC_FRAME_H
+#define _HAPROXY_QUIC_FRAME_H
+#ifdef USE_QUIC
+#ifndef USE_OPENSSL
+#error "Must define USE_OPENSSL"
+#endif
+
+#include <import/eb64tree.h>
+#include <haproxy/quic_conn-t.h>
+#include <haproxy/quic_enc.h>
+#include <haproxy/quic_frame-t.h>
+#include <haproxy/quic_rx-t.h>
+
+const char *quic_frame_type_string(enum quic_frame_type ft);
+
+int qc_build_frm(unsigned char **pos, const unsigned char *end,
+ struct quic_frame *frm, struct quic_tx_packet *pkt,
+ struct quic_conn *conn);
+
+int qc_parse_frm(struct quic_frame *frm, struct quic_rx_packet *pkt,
+ const unsigned char **pos, const unsigned char *end,
+ struct quic_conn *conn);
+
+void qc_release_frm(struct quic_conn *qc, struct quic_frame *frm);
+
+/* Return the length of <frm> frame if succeeded, -1 if not (unknown frames
+ * or which must not be transmitted again after having been lost (PING, PADDING).
+ */
+static inline size_t qc_frm_len(struct quic_frame *frm)
+{
+ size_t len = 0;
+
+ switch (frm->type) {
+ case QUIC_FT_ACK: {
+ struct qf_tx_ack *tx_ack = &frm->tx_ack;
+ struct eb64_node *ar, *prev_ar;
+ struct quic_arng_node *ar_node, *prev_ar_node;
+
+ ar = eb64_last(&tx_ack->arngs->root);
+ ar_node = eb64_entry(ar, struct quic_arng_node, first);
+ len += 1 + quic_int_getsize(ar_node->last);
+ len += quic_int_getsize(tx_ack->ack_delay);
+ len += quic_int_getsize(tx_ack->arngs->sz - 1);
+ len += quic_int_getsize(ar_node->last - ar_node->first.key);
+
+ while ((prev_ar = eb64_prev(ar))) {
+ prev_ar_node = eb64_entry(prev_ar, struct quic_arng_node, first);
+ len += quic_int_getsize(ar_node->first.key - prev_ar_node->last - 2);
+ len += quic_int_getsize(prev_ar_node->last - prev_ar_node->first.key);
+ ar = prev_ar;
+ ar_node = eb64_entry(ar, struct quic_arng_node, first);
+ }
+ break;
+ }
+ case QUIC_FT_RESET_STREAM: {
+ struct qf_reset_stream *f = &frm->reset_stream;
+ len += 1 + quic_int_getsize(f->id) +
+ quic_int_getsize(f->app_error_code) + quic_int_getsize(f->final_size);
+ break;
+ }
+ case QUIC_FT_STOP_SENDING: {
+ struct qf_stop_sending *f = &frm->stop_sending;
+ len += 1 + quic_int_getsize(f->id) + quic_int_getsize(f->app_error_code);
+ break;
+ }
+ case QUIC_FT_CRYPTO: {
+ struct qf_crypto *f = &frm->crypto;
+ len += 1 + quic_int_getsize(f->offset) + quic_int_getsize(f->len) + f->len;
+ break;
+ }
+ case QUIC_FT_NEW_TOKEN: {
+ struct qf_new_token *f = &frm->new_token;
+ len += 1 + quic_int_getsize(f->len) + f->len;
+ break;
+ }
+ case QUIC_FT_STREAM_8 ... QUIC_FT_STREAM_F: {
+ struct qf_stream *f = &frm->stream;
+ len += 1 + quic_int_getsize(f->id) +
+ ((frm->type & QUIC_STREAM_FRAME_TYPE_OFF_BIT) ? quic_int_getsize(f->offset.key) : 0) +
+ ((frm->type & QUIC_STREAM_FRAME_TYPE_LEN_BIT) ? quic_int_getsize(f->len) : 0) + f->len;
+ break;
+ }
+ case QUIC_FT_MAX_DATA: {
+ struct qf_max_data *f = &frm->max_data;
+ len += 1 + quic_int_getsize(f->max_data);
+ break;
+ }
+ case QUIC_FT_MAX_STREAM_DATA: {
+ struct qf_max_stream_data *f = &frm->max_stream_data;
+ len += 1 + quic_int_getsize(f->id) + quic_int_getsize(f->max_stream_data);
+ break;
+ }
+ case QUIC_FT_MAX_STREAMS_BIDI: {
+ struct qf_max_streams *f = &frm->max_streams_bidi;
+ len += 1 + quic_int_getsize(f->max_streams);
+ break;
+ }
+ case QUIC_FT_MAX_STREAMS_UNI: {
+ struct qf_max_streams *f = &frm->max_streams_uni;
+ len += 1 + quic_int_getsize(f->max_streams);
+ break;
+ }
+ case QUIC_FT_DATA_BLOCKED: {
+ struct qf_data_blocked *f = &frm->data_blocked;
+ len += 1 + quic_int_getsize(f->limit);
+ break;
+ }
+ case QUIC_FT_STREAM_DATA_BLOCKED: {
+ struct qf_stream_data_blocked *f = &frm->stream_data_blocked;
+ len += 1 + quic_int_getsize(f->id) + quic_int_getsize(f->limit);
+ break;
+ }
+ case QUIC_FT_STREAMS_BLOCKED_BIDI: {
+ struct qf_streams_blocked *f = &frm->streams_blocked_bidi;
+ len += 1 + quic_int_getsize(f->limit);
+ break;
+ }
+ case QUIC_FT_STREAMS_BLOCKED_UNI: {
+ struct qf_streams_blocked *f = &frm->streams_blocked_uni;
+ len += 1 + quic_int_getsize(f->limit);
+ break;
+ }
+ case QUIC_FT_NEW_CONNECTION_ID: {
+ struct qf_new_connection_id *f = &frm->new_connection_id;
+ len += 1 + quic_int_getsize(f->seq_num) + quic_int_getsize(f->retire_prior_to) +
+ quic_int_getsize(f->cid.len) + f->cid.len + QUIC_STATELESS_RESET_TOKEN_LEN;
+ break;
+ }
+ case QUIC_FT_RETIRE_CONNECTION_ID: {
+ struct qf_retire_connection_id *f = &frm->retire_connection_id;
+ len += 1 + quic_int_getsize(f->seq_num);
+ break;
+ }
+ case QUIC_FT_PATH_CHALLENGE: {
+ struct qf_path_challenge *f = &frm->path_challenge;
+ len += 1 + sizeof f->data;
+ break;
+ }
+ case QUIC_FT_PATH_RESPONSE: {
+ struct qf_path_challenge_response *f = &frm->path_challenge_response;
+ len += 1 + sizeof f->data;
+ break;
+ }
+ case QUIC_FT_CONNECTION_CLOSE: {
+ struct qf_connection_close *f = &frm->connection_close;
+ len += 1 + quic_int_getsize(f->error_code) + quic_int_getsize(f->frame_type) +
+ quic_int_getsize(f->reason_phrase_len) + f->reason_phrase_len;
+ break;
+ }
+ case QUIC_FT_CONNECTION_CLOSE_APP: {
+ struct qf_connection_close *f = &frm->connection_close;
+ len += 1 + quic_int_getsize(f->error_code) +
+ quic_int_getsize(f->reason_phrase_len) + f->reason_phrase_len;
+ break;
+ }
+ case QUIC_FT_HANDSHAKE_DONE: {
+ len += 1;
+ break;
+ }
+ default:
+ return -1;
+ }
+
+ return len;
+}
+
+static inline struct quic_err quic_err_transport(uint64_t code)
+{
+ return (struct quic_err){ .code = code, .app = 0 };
+}
+
+static inline struct quic_err quic_err_tls(uint64_t tls_alert)
+{
+ const uint64_t code = QC_ERR_CRYPTO_ERROR|tls_alert;
+ return (struct quic_err){ .code = code, .app = 0 };
+}
+
+static inline struct quic_err quic_err_app(uint64_t code)
+{
+ return (struct quic_err){ .code = code, .app = 1 };
+}
+
+/* Allocate a quic_frame with type <type>. Frame must be freed with
+ * qc_frm_free().
+ *
+ * Returns the allocated frame or NULL on failure.
+ */
+static inline struct quic_frame *qc_frm_alloc(int type)
+{
+ struct quic_frame *frm = NULL;
+
+ frm = pool_alloc(pool_head_quic_frame);
+ if (!frm)
+ return NULL;
+
+ frm->type = type;
+
+ LIST_INIT(&frm->list);
+ LIST_INIT(&frm->reflist);
+ LIST_INIT(&frm->ref);
+ frm->pkt = NULL;
+ frm->origin = NULL;
+ frm->flags = 0;
+ frm->loss_count = 0;
+
+ return frm;
+}
+
+/* Allocate a quic_frame by duplicating <origin> frame. This will create a new
+ * frame of the same type with the same content. Internal fields such as packet
+ * owner and flags are however reset for the newly allocated frame except
+ * for the loss counter. Frame must be freed with qc_frm_free().
+ *
+ * Returns the allocated frame or NULL on failure.
+ */
+static inline struct quic_frame *qc_frm_dup(struct quic_frame *origin)
+{
+ struct quic_frame *frm = NULL;
+
+ frm = pool_alloc(pool_head_quic_frame);
+ if (!frm)
+ return NULL;
+
+ *frm = *origin;
+
+ /* Reinit all internal members except loss_count. */
+ LIST_INIT(&frm->list);
+ LIST_INIT(&frm->reflist);
+ frm->pkt = NULL;
+ frm->flags = 0;
+
+ /* Attach <frm> to <origin>. */
+ LIST_APPEND(&origin->reflist, &frm->ref);
+ frm->origin = origin;
+
+ return frm;
+}
+
+void qc_frm_free(struct quic_conn *qc, struct quic_frame **frm);
+void qc_frm_unref(struct quic_frame *frm, struct quic_conn *qc);
+
+/* Move forward <strm> STREAM frame by <data> bytes. */
+static inline void qc_stream_frm_mv_fwd(struct quic_frame *frm, uint64_t data)
+{
+ struct qf_stream *strm_frm = &frm->stream;
+ struct buffer cf_buf;
+
+ /* Set offset bit if not already there. */
+ strm_frm->offset.key += data;
+ frm->type |= QUIC_STREAM_FRAME_TYPE_OFF_BIT;
+
+ strm_frm->len -= data;
+ cf_buf = b_make(b_orig(strm_frm->buf),
+ b_size(strm_frm->buf),
+ (char *)strm_frm->data - b_orig(strm_frm->buf), 0);
+ strm_frm->data = (unsigned char *)b_peek(&cf_buf, data);
+}
+
+#endif /* USE_QUIC */
+#endif /* _HAPROXY_QUIC_FRAME_H */
diff --git a/include/haproxy/quic_loss-t.h b/include/haproxy/quic_loss-t.h
new file mode 100644
index 0000000..0f07ddc
--- /dev/null
+++ b/include/haproxy/quic_loss-t.h
@@ -0,0 +1,62 @@
+/*
+ * include/types/quic_loss.h
+ * This file contains definitions for QUIC loss detection.
+ *
+ * Copyright 2019 HAProxy Technologies, Frederic Lecaille <flecaille@haproxy.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _TYPES_QUIC_LOSS_H
+#define _TYPES_QUIC_LOSS_H
+#ifdef USE_QUIC
+#ifndef USE_OPENSSL
+#error "Must define USE_OPENSSL"
+#endif
+
+#include <inttypes.h>
+
+/* Maximum reordering in packets. */
+#define QUIC_LOSS_PACKET_THRESHOLD 3
+#define QUIC_TIMER_GRANULARITY 1U /* 1ms */
+#define QUIC_LOSS_INITIAL_RTT 333U /* 333ms */
+
+/* QUIC loss time threshold expressed an RTT multiplier
+ * (QUIC_LOSS_TIME_THRESHOLD_MULTIPLICAND / QUIC_LOSS_TIME_THRESHOLD_DIVISOR)
+ */
+#define QUIC_LOSS_TIME_THRESHOLD_MULTIPLICAND 9
+#define QUIC_LOSS_TIME_THRESHOLD_DIVISOR 8
+
+/* Note that all the unit of variables for QUIC LOSS detections
+ * is the tick.
+ */
+
+struct quic_loss {
+ /* The most recent RTT measurement (ms) */
+ unsigned int latest_rtt;
+ /* Smoothed RTT (ms) */
+ unsigned int srtt;
+ /* RTT variation (ms) */
+ unsigned int rtt_var;
+ /* Minimum RTT (ms) */
+ unsigned int rtt_min;
+ /* Number of NACKed sent PTO. */
+ unsigned int pto_count;
+ unsigned long nb_lost_pkt;
+ unsigned long nb_reordered_pkt;
+};
+
+#endif /* USE_QUIC */
+#endif /* _TYPES_QUIC_LOSS_H */
diff --git a/include/haproxy/quic_loss.h b/include/haproxy/quic_loss.h
new file mode 100644
index 0000000..fc713ca
--- /dev/null
+++ b/include/haproxy/quic_loss.h
@@ -0,0 +1,92 @@
+/*
+ * include/proto/quic_loss.h
+ * This file provides interface definition for QUIC loss detection.
+ *
+ * Copyright 2019 HAProxy Technologies, Frederic Lecaille <flecaille@haproxy.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _PROTO_QUIC_LOSS_H
+#define _PROTO_QUIC_LOSS_H
+#ifdef USE_QUIC
+#ifndef USE_OPENSSL
+#error "Must define USE_OPENSSL"
+#endif
+
+#include <haproxy/quic_loss-t.h>
+
+#include <haproxy/api.h>
+#include <haproxy/quic_conn-t.h>
+#include <haproxy/quic_tls-t.h>
+
+static inline void quic_loss_init(struct quic_loss *ql)
+{
+ ql->latest_rtt = 0;
+ ql->srtt = QUIC_LOSS_INITIAL_RTT;
+ ql->rtt_var = QUIC_LOSS_INITIAL_RTT / 2;
+ ql->rtt_min = 0;
+ ql->pto_count = 0;
+ ql->nb_lost_pkt = 0;
+ ql->nb_reordered_pkt = 0;
+}
+
+/* Return 1 if a persistent congestion is observed for a list of
+ * lost packets sent during <period> period depending on <ql> loss information,
+ * <now_us> the current time and <max_ack_delay_us> the maximum ACK delay of the connection
+ * experiencing a packet loss. Return 0 on the contrary.
+ */
+static inline int quic_loss_persistent_congestion(struct quic_loss *ql,
+ unsigned int period,
+ unsigned int now_us,
+ unsigned int max_ack_delay)
+{
+ unsigned int congestion_period;
+
+ if (!period)
+ return 0;
+
+ congestion_period = ql->srtt +
+ QUIC_MAX(4 * ql->rtt_var, QUIC_TIMER_GRANULARITY) + max_ack_delay;
+ congestion_period *= QUIC_LOSS_PACKET_THRESHOLD;
+
+ return period >= congestion_period;
+}
+
+/* Return the PTO associated to <pktns> packet number space for <qc> connection */
+static inline unsigned int quic_pto(struct quic_conn *qc)
+{
+ struct quic_loss *ql = &qc->path->loss;
+
+ return ql->srtt + QUIC_MAX(4 * ql->rtt_var, QUIC_TIMER_GRANULARITY) +
+ (HA_ATOMIC_LOAD(&qc->state) >= QUIC_HS_ST_COMPLETE ? qc->max_ack_delay : 0);
+}
+
+void quic_loss_srtt_update(struct quic_loss *ql,
+ unsigned int rtt, unsigned int ack_delay,
+ struct quic_conn *qc);
+
+struct quic_pktns *quic_loss_pktns(struct quic_conn *qc);
+
+struct quic_pktns *quic_pto_pktns(struct quic_conn *qc,
+ int handshake_completed,
+ unsigned int *pto);
+
+void qc_packet_loss_lookup(struct quic_pktns *pktns, struct quic_conn *qc,
+ struct list *lost_pkts);
+int qc_release_lost_pkts(struct quic_conn *qc, struct quic_pktns *pktns,
+ struct list *pkts, uint64_t now_us);
+#endif /* USE_QUIC */
+#endif /* _PROTO_QUIC_LOSS_H */
diff --git a/include/haproxy/quic_openssl_compat-t.h b/include/haproxy/quic_openssl_compat-t.h
new file mode 100644
index 0000000..2f2b92b
--- /dev/null
+++ b/include/haproxy/quic_openssl_compat-t.h
@@ -0,0 +1,64 @@
+#ifndef _HAPROXY_QUIC_OPENSSL_COMPAT_T_H_
+#define _HAPROXY_QUIC_OPENSSL_COMPAT_T_H_
+
+#ifdef USE_QUIC_OPENSSL_COMPAT
+#ifndef USE_OPENSSL
+#error "Must define USE_OPENSSL"
+#endif
+
+#define QUIC_OPENSSL_COMPAT_TLS_SECRET_LEN 48
+#define QUIC_OPENSSL_COMPAT_TLS_IV_LEN 12
+
+/* Highly inspired from nginx QUIC TLS compatibility code */
+
+enum ssl_encryption_level_t {
+ ssl_encryption_initial = 0,
+ ssl_encryption_early_data,
+ ssl_encryption_handshake,
+ ssl_encryption_application
+};
+
+typedef struct ssl_quic_method_st {
+ int (*set_encryption_secrets)(SSL *ssl, enum ssl_encryption_level_t level,
+ const uint8_t *rsecret, const uint8_t *wsecret,
+ size_t secret_len);
+ int (*add_handshake_data)(SSL *ssl, enum ssl_encryption_level_t level,
+ const uint8_t *data, size_t len);
+ int (*flush_flight)(SSL *ssl);
+ int (*send_alert)(SSL *ssl, enum ssl_encryption_level_t level,
+ uint8_t alert);
+} SSL_QUIC_METHOD;
+
+struct quic_tls_md {
+ unsigned char data[QUIC_OPENSSL_COMPAT_TLS_SECRET_LEN];
+ size_t len;
+};
+
+struct quic_tls_iv {
+ unsigned char data[QUIC_OPENSSL_COMPAT_TLS_IV_LEN];
+ size_t len;
+};
+
+struct quic_tls_secret {
+ struct quic_tls_md secret;
+ struct quic_tls_md key;
+ struct quic_tls_iv iv;
+};
+
+struct quic_tls_compat_keys {
+ struct quic_tls_secret secret;
+ const EVP_CIPHER *cipher;
+};
+
+struct quic_openssl_compat {
+ BIO *rbio;
+ BIO *wbio;
+ const SSL_QUIC_METHOD *method;
+ enum ssl_encryption_level_t write_level;
+ enum ssl_encryption_level_t read_level;
+ uint64_t read_record;
+ struct quic_tls_compat_keys keys;
+};
+
+#endif /* USE_QUIC_OPENSSL_COMPAT */
+#endif /* _HAPROXY_QUIC_OPENSSL_COMPAT_T_H_ */
diff --git a/include/haproxy/quic_openssl_compat.h b/include/haproxy/quic_openssl_compat.h
new file mode 100644
index 0000000..837a28d
--- /dev/null
+++ b/include/haproxy/quic_openssl_compat.h
@@ -0,0 +1,33 @@
+#ifndef _HAPROXY_QUIC_OPENSSL_COMPAT_H_
+#define _HAPROXY_QUIC_OPENSSL_COMPAT_H_
+
+#ifdef USE_QUIC_OPENSSL_COMPAT
+
+/* Highly inspired from nginx QUIC TLS compatibility code */
+#include <haproxy/listener-t.h>
+#include <haproxy/quic_openssl_compat-t.h>
+
+#define QUIC_OPENSSL_COMPAT_SSL_TP_EXT 0x39
+
+/* Used by keylog */
+#define QUIC_OPENSSL_COMPAT_CLIENT_HANDSHAKE "CLIENT_HANDSHAKE_TRAFFIC_SECRET"
+#define QUIC_OPENSSL_COMPAT_SERVER_HANDSHAKE "SERVER_HANDSHAKE_TRAFFIC_SECRET"
+#define QUIC_OPENSSL_COMPAT_CLIENT_APPLICATION "CLIENT_TRAFFIC_SECRET_0"
+#define QUIC_OPENSSL_COMPAT_SERVER_APPLICATION "SERVER_TRAFFIC_SECRET_0"
+
+void quic_tls_compat_msg_callback(struct connection *conn,
+ int write_p, int version, int content_type,
+ const void *buf, size_t len, SSL *ssl);
+int quic_tls_compat_init(struct bind_conf *bind_conf, SSL_CTX *ctx);
+void quic_tls_compat_keylog_callback(const SSL *ssl, const char *line);
+
+int SSL_set_quic_method(SSL *ssl, const SSL_QUIC_METHOD *quic_method);
+enum ssl_encryption_level_t SSL_quic_read_level(const SSL *ssl);
+enum ssl_encryption_level_t SSL_quic_write_level(const SSL *ssl);
+int SSL_set_quic_transport_params(SSL *ssl, const uint8_t *params, size_t params_len);
+int SSL_provide_quic_data(SSL *ssl, enum ssl_encryption_level_t level,
+ const uint8_t *data, size_t len);
+int SSL_process_quic_post_handshake(SSL *ssl);
+
+#endif /* USE_QUIC_OPENSSL_COMPAT */
+#endif /* _HAPROXY_QUIC_OPENSSL_COMPAT_H_ */
diff --git a/include/haproxy/quic_retransmit.h b/include/haproxy/quic_retransmit.h
new file mode 100644
index 0000000..403a53c
--- /dev/null
+++ b/include/haproxy/quic_retransmit.h
@@ -0,0 +1,20 @@
+#ifndef _HAPROXY_QUIC_RETRANSMIT_H
+#define _HAPROXY_QUIC_RETRANSMIT_H
+
+#ifdef USE_QUIC
+#ifndef USE_OPENSSL
+#error "Must define USE_OPENSSL"
+#endif
+
+#include <haproxy/list-t.h>
+#include <haproxy/quic_conn-t.h>
+#include <haproxy/quic_tls-t.h>
+
+void qc_prep_fast_retrans(struct quic_conn *qc,
+ struct quic_pktns *pktns,
+ struct list *frms1, struct list *frms2);
+void qc_prep_hdshk_fast_retrans(struct quic_conn *qc,
+ struct list *ifrms, struct list *hfrms);
+
+#endif /* USE_QUIC */
+#endif /* _HAPROXY_QUIC_RETRANSMIT_H */
diff --git a/include/haproxy/quic_retry.h b/include/haproxy/quic_retry.h
new file mode 100644
index 0000000..d31be02
--- /dev/null
+++ b/include/haproxy/quic_retry.h
@@ -0,0 +1,33 @@
+#ifndef _HAPROXY_QUIC_RETRY_H
+#define _HAPROXY_QUIC_RETRY_H
+
+#ifdef USE_QUIC
+#ifndef USE_OPENSSL
+#error "Must define USE_OPENSSL"
+#endif
+
+#include <inttypes.h>
+#include <sys/socket.h>
+
+#include <haproxy/quic_cid-t.h>
+#include <haproxy/quic_rx-t.h>
+#include <haproxy/quic_sock-t.h>
+
+struct listener;
+
+int quic_generate_retry_token(unsigned char *token, size_t len,
+ const uint32_t version,
+ const struct quic_cid *odcid,
+ const struct quic_cid *dcid,
+ struct sockaddr_storage *addr);
+int parse_retry_token(struct quic_conn *qc,
+ const unsigned char *token, const unsigned char *end,
+ struct quic_cid *odcid);
+int quic_retry_token_check(struct quic_rx_packet *pkt,
+ struct quic_dgram *dgram,
+ struct listener *l,
+ struct quic_conn *qc,
+ struct quic_cid *odcid);
+
+#endif /* USE_QUIC */
+#endif /* _HAPROXY_QUIC_RETRY_H */
diff --git a/include/haproxy/quic_rx-t.h b/include/haproxy/quic_rx-t.h
new file mode 100644
index 0000000..9ef8e7a
--- /dev/null
+++ b/include/haproxy/quic_rx-t.h
@@ -0,0 +1,54 @@
+#ifndef _HAPROXY_RX_T_H
+#define _HAPROXY_RX_T_H
+
+extern struct pool_head *pool_head_quic_conn_rxbuf;
+extern struct pool_head *pool_head_quic_dgram;
+extern struct pool_head *pool_head_quic_rx_packet;
+
+/* Maximum number of ack-eliciting received packets since the last
+ * ACK frame was sent
+ */
+#define QUIC_MAX_RX_AEPKTS_SINCE_LAST_ACK 2
+#define QUIC_ACK_DELAY (QUIC_TP_DFLT_MAX_ACK_DELAY - 5)
+/* Flag a received packet as being an ack-eliciting packet. */
+#define QUIC_FL_RX_PACKET_ACK_ELICITING (1UL << 0)
+/* Packet is the first one in the containing datagram. */
+#define QUIC_FL_RX_PACKET_DGRAM_FIRST (1UL << 1)
+/* Spin bit set */
+#define QUIC_FL_RX_PACKET_SPIN_BIT (1UL << 2)
+
+struct quic_rx_packet {
+ struct list list;
+ struct list qc_rx_pkt_list;
+
+ /* QUIC version used in packet. */
+ const struct quic_version *version;
+
+ unsigned char type;
+ /* Initial desctination connection ID. */
+ struct quic_cid dcid;
+ struct quic_cid scid;
+ /* Packet number offset : only valid for Initial/Handshake/0-RTT/1-RTT. */
+ size_t pn_offset;
+ /* Packet number */
+ int64_t pn;
+ /* Packet number length */
+ uint32_t pnl;
+ uint64_t token_len;
+ unsigned char *token;
+ /* Packet length */
+ uint64_t len;
+ /* Packet length before decryption */
+ uint64_t raw_len;
+ /* Additional authenticated data length */
+ size_t aad_len;
+ unsigned char *data;
+ struct eb64_node pn_node;
+ volatile unsigned int refcnt;
+ /* Source address of this packet. */
+ struct sockaddr_storage saddr;
+ unsigned int flags;
+ unsigned int time_received;
+};
+
+#endif /* _HAPROXY_RX_T_H */
diff --git a/include/haproxy/quic_rx.h b/include/haproxy/quic_rx.h
new file mode 100644
index 0000000..494bc4a
--- /dev/null
+++ b/include/haproxy/quic_rx.h
@@ -0,0 +1,58 @@
+/*
+ * QUIC protocol definitions (RX side).
+ *
+ * Copyright (C) 2023
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_QUIC_RX_H
+#define _HAPROXY_QUIC_RX_H
+
+#include <haproxy/listener-t.h>
+#include <haproxy/quic_conn-t.h>
+#include <haproxy/quic_rx-t.h>
+
+int quic_dgram_parse(struct quic_dgram *dgram, struct quic_conn *from_qc,
+ struct listener *li);
+int qc_treat_rx_pkts(struct quic_conn *qc);
+int qc_parse_hd_form(struct quic_rx_packet *pkt,
+ unsigned char **pos, const unsigned char *end);
+int qc_treat_rx_crypto_frms(struct quic_conn *qc, struct quic_enc_level *el,
+ struct ssl_sock_ctx *ctx);
+int qc_handle_frms_of_lost_pkt(struct quic_conn *qc,
+ struct quic_tx_packet *pkt,
+ struct list *pktns_frm_list);
+
+/* Increment the reference counter of <pkt> */
+static inline void quic_rx_packet_refinc(struct quic_rx_packet *pkt)
+{
+ pkt->refcnt++;
+}
+
+/* Decrement the reference counter of <pkt> while remaining positive */
+static inline void quic_rx_packet_refdec(struct quic_rx_packet *pkt)
+{
+ if (pkt->refcnt)
+ pkt->refcnt--;
+}
+
+/* Return 1 if <pkt> header form is long, 0 if not. */
+static inline int qc_pkt_long(const struct quic_rx_packet *pkt)
+{
+ return pkt->type != QUIC_PACKET_TYPE_SHORT;
+}
+
+#endif /* _HAPROXY_QUIC_RX_H */
diff --git a/include/haproxy/quic_sock-t.h b/include/haproxy/quic_sock-t.h
new file mode 100644
index 0000000..67a5749
--- /dev/null
+++ b/include/haproxy/quic_sock-t.h
@@ -0,0 +1,50 @@
+#ifndef _HAPROXY_QUIC_SOCK_T_H
+#define _HAPROXY_QUIC_SOCK_T_H
+#ifdef USE_QUIC
+
+#include <haproxy/buf-t.h>
+
+/* QUIC socket allocation strategy. */
+enum quic_sock_mode {
+ QUIC_SOCK_MODE_CONN, /* Use a dedicated socket per connection. */
+ QUIC_SOCK_MODE_LSTNR, /* Multiplex connections over listener socket. */
+};
+
+/* QUIC connection accept queue. One per thread. */
+struct quic_accept_queue {
+ struct mt_list listeners; /* QUIC listeners with at least one connection ready to be accepted on this queue */
+ struct tasklet *tasklet; /* task responsible to call listener_accept */
+};
+
+/* Buffer used to receive QUIC datagrams on random thread and redispatch them
+ * to the connection thread.
+ */
+struct quic_receiver_buf {
+ struct buffer buf; /* storage for datagrams received. */
+ struct list dgram_list; /* datagrams received with this rxbuf. */
+ struct mt_list rxbuf_el; /* list element into receiver.rxbuf_list. */
+};
+
+/* QUIC datagram */
+struct quic_dgram {
+ void *owner;
+ unsigned char *buf;
+ size_t len;
+ unsigned char *dcid;
+ size_t dcid_len;
+ struct sockaddr_storage saddr;
+ struct sockaddr_storage daddr;
+ struct quic_conn *qc;
+
+ struct list recv_list; /* elemt to quic_receiver_buf <dgram_list>. */
+ struct mt_list handler_list; /* elem to quic_dghdlr <dgrams>. */
+};
+
+/* QUIC datagram handler */
+struct quic_dghdlr {
+ struct mt_list dgrams;
+ struct tasklet *task;
+};
+
+#endif /* USE_QUIC */
+#endif /* _HAPROXY_QUIC_SOCK_T_H */
diff --git a/include/haproxy/quic_sock.h b/include/haproxy/quic_sock.h
new file mode 100644
index 0000000..531cf62
--- /dev/null
+++ b/include/haproxy/quic_sock.h
@@ -0,0 +1,107 @@
+/*
+ * include/haproxy/quic_sock.h
+ * This file contains declarations for QUIC sockets.
+ *
+ * Copyright 2020 Frederic Lecaille <flecaille@haproxy.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_QUIC_SOCK_H
+#define _HAPROXY_QUIC_SOCK_H
+#ifdef USE_QUIC
+#ifndef USE_OPENSSL
+#error "Must define USE_OPENSSL"
+#endif
+
+#include <sys/socket.h>
+#include <sys/types.h>
+
+#include <haproxy/api.h>
+#include <haproxy/connection-t.h>
+#include <haproxy/listener-t.h>
+#include <haproxy/quic_conn-t.h>
+#include <haproxy/quic_sock-t.h>
+
+int quic_session_accept(struct connection *cli_conn);
+int quic_sock_get_src(struct connection *conn, struct sockaddr *addr, socklen_t len);
+int quic_sock_get_dst(struct connection *conn, struct sockaddr *addr, socklen_t len);
+int quic_sock_accepting_conn(const struct receiver *rx);
+struct connection *quic_sock_accept_conn(struct listener *l, int *status);
+
+struct task *quic_lstnr_dghdlr(struct task *t, void *ctx, unsigned int state);
+void quic_lstnr_sock_fd_iocb(int fd);
+int qc_snd_buf(struct quic_conn *qc, const struct buffer *buf, size_t count,
+ int flags);
+int qc_rcv_buf(struct quic_conn *qc);
+void quic_conn_sock_fd_iocb(int fd);
+
+void qc_alloc_fd(struct quic_conn *qc, const struct sockaddr_storage *src,
+ const struct sockaddr_storage *dst);
+void qc_release_fd(struct quic_conn *qc, int reinit);
+void qc_want_recv(struct quic_conn *qc);
+
+void quic_accept_push_qc(struct quic_conn *qc);
+
+int quic_listener_max_handshake(const struct listener *l);
+int quic_listener_max_accept(const struct listener *l);
+
+/* Set default value for <qc> socket as uninitialized. */
+static inline void qc_init_fd(struct quic_conn *qc)
+{
+ qc->fd = -1;
+}
+
+/* Returns true if <qc> socket is initialized else false. */
+static inline char qc_test_fd(struct quic_conn *qc)
+{
+ /* quic-conn socket should not be accessed once it has been released. */
+ BUG_ON(qc->fd == DEAD_FD_MAGIC);
+ return qc->fd >= 0;
+}
+
+/* Try to increment <l> handshake current counter. If listener limit is
+ * reached, incrementation is rejected and 0 is returned.
+ */
+static inline int quic_increment_curr_handshake(struct listener *l)
+{
+ unsigned int count, next;
+ const int max = quic_listener_max_handshake(l);
+
+ do {
+ count = l->rx.quic_curr_handshake;
+ if (count >= max) {
+ /* maxconn reached */
+ next = 0;
+ goto end;
+ }
+
+ /* try to increment quic_curr_handshake */
+ next = count + 1;
+ } while (!_HA_ATOMIC_CAS(&l->rx.quic_curr_handshake, &count, next) && __ha_cpu_relax());
+
+ end:
+ return next;
+}
+
+#endif /* USE_QUIC */
+#endif /* _HAPROXY_QUIC_SOCK_H */
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/include/haproxy/quic_ssl-t.h b/include/haproxy/quic_ssl-t.h
new file mode 100644
index 0000000..3c057c6
--- /dev/null
+++ b/include/haproxy/quic_ssl-t.h
@@ -0,0 +1,21 @@
+/*
+ * include/haproxy/quic_ssl-t.h
+ * Definitions for QUIC over TLS/SSL api types, constants and flags.
+ *
+ * Copyright (C) 2023
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#ifndef _HAPROXY_QUIC_SSL_T_H
+#define _HAPROXY_QUIC_SSL_T_H
+
+#include <haproxy/pool-t.h>
+
+extern struct pool_head *pool_head_quic_ssl_sock_ctx;
+
+#endif /* _HAPROXY_QUIC_SSL_T_H */
diff --git a/include/haproxy/quic_ssl.h b/include/haproxy/quic_ssl.h
new file mode 100644
index 0000000..8f7df47
--- /dev/null
+++ b/include/haproxy/quic_ssl.h
@@ -0,0 +1,55 @@
+/*
+ * include/haproxy/quic_ssl.h
+ * This file contains QUIC over TLS/SSL api definitions.
+ *
+ * Copyright (C) 2023
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#ifndef _HAPROXY_QUIC_SSL_H
+#define _HAPROXY_QUIC_SSL_H
+
+#ifdef USE_QUIC
+#ifndef USE_OPENSSL
+#error "Must define USE_OPENSSL"
+#endif
+
+#include <haproxy/listener-t.h>
+#include <haproxy/ncbuf-t.h>
+#include <haproxy/openssl-compat.h>
+#include <haproxy/pool.h>
+#include <haproxy/quic_ssl-t.h>
+#include <haproxy/ssl_sock-t.h>
+
+int ssl_quic_initial_ctx(struct bind_conf *bind_conf);
+int qc_alloc_ssl_sock_ctx(struct quic_conn *qc);
+int qc_ssl_provide_quic_data(struct ncbuf *ncbuf,
+ enum ssl_encryption_level_t level,
+ struct ssl_sock_ctx *ctx,
+ const unsigned char *data, size_t len);
+int qc_ssl_provide_all_quic_data(struct quic_conn *qc, struct ssl_sock_ctx *ctx);
+
+static inline void qc_free_ssl_sock_ctx(struct ssl_sock_ctx **ctx)
+{
+ if (!*ctx)
+ return;
+
+ SSL_free((*ctx)->ssl);
+ pool_free(pool_head_quic_ssl_sock_ctx, *ctx);
+ *ctx = NULL;
+}
+
+#endif /* USE_QUIC */
+#endif /* _HAPROXY_QUIC_SSL_H */
diff --git a/include/haproxy/quic_stats-t.h b/include/haproxy/quic_stats-t.h
new file mode 100644
index 0000000..1ee6265
--- /dev/null
+++ b/include/haproxy/quic_stats-t.h
@@ -0,0 +1,105 @@
+#ifndef _HAPROXY_QUIC_STATS_T_H
+#define _HAPROXY_QUIC_STATS_T_H
+
+#ifdef USE_QUIC
+#ifndef USE_OPENSSL
+#error "Must define USE_OPENSSL"
+#endif
+
+extern struct stats_module quic_stats_module;
+
+enum {
+ QUIC_ST_RXBUF_FULL,
+ QUIC_ST_DROPPED_PACKET,
+ QUIC_ST_DROPPED_PACKET_BUFOVERRUN,
+ QUIC_ST_DROPPED_PARSING,
+ QUIC_ST_SOCKET_FULL,
+ QUIC_ST_SENDTO_ERR,
+ QUIC_ST_SENDTO_ERR_UNKNWN,
+ QUIC_ST_SENT_PACKET,
+ QUIC_ST_LOST_PACKET,
+ QUIC_ST_TOO_SHORT_INITIAL_DGRAM,
+ QUIC_ST_RETRY_SENT,
+ QUIC_ST_RETRY_VALIDATED,
+ QUIC_ST_RETRY_ERRORS,
+ QUIC_ST_HALF_OPEN_CONN,
+ QUIC_ST_HDSHK_FAIL,
+ QUIC_ST_STATELESS_RESET_SENT,
+ /* Special events of interest */
+ QUIC_ST_CONN_MIGRATION_DONE,
+ /* Transport errors */
+ QUIC_ST_TRANSP_ERR_NO_ERROR,
+ QUIC_ST_TRANSP_ERR_INTERNAL_ERROR,
+ QUIC_ST_TRANSP_ERR_CONNECTION_REFUSED,
+ QUIC_ST_TRANSP_ERR_FLOW_CONTROL_ERROR,
+ QUIC_ST_TRANSP_ERR_STREAM_LIMIT_ERROR,
+ QUIC_ST_TRANSP_ERR_STREAM_STATE_ERROR,
+ QUIC_ST_TRANSP_ERR_FINAL_SIZE_ERROR,
+ QUIC_ST_TRANSP_ERR_FRAME_ENCODING_ERROR,
+ QUIC_ST_TRANSP_ERR_TRANSPORT_PARAMETER_ERROR,
+ QUIC_ST_TRANSP_ERR_CONNECTION_ID_LIMIT_ERROR,
+ QUIC_ST_TRANSP_ERR_PROTOCOL_VIOLATION,
+ QUIC_ST_TRANSP_ERR_INVALID_TOKEN,
+ QUIC_ST_TRANSP_ERR_APPLICATION_ERROR,
+ QUIC_ST_TRANSP_ERR_CRYPTO_BUFFER_EXCEEDED,
+ QUIC_ST_TRANSP_ERR_KEY_UPDATE_ERROR,
+ QUIC_ST_TRANSP_ERR_AEAD_LIMIT_REACHED,
+ QUIC_ST_TRANSP_ERR_NO_VIABLE_PATH,
+ QUIC_ST_TRANSP_ERR_CRYPTO_ERROR,
+ QUIC_ST_TRANSP_ERR_UNKNOWN_ERROR,
+ /* Stream related counters */
+ QUIC_ST_DATA_BLOCKED,
+ QUIC_ST_STREAM_DATA_BLOCKED,
+ QUIC_ST_STREAMS_BLOCKED_BIDI,
+ QUIC_ST_STREAMS_BLOCKED_UNI,
+ QUIC_STATS_COUNT /* must be the last */
+};
+
+struct quic_counters {
+ long long rxbuf_full; /* receive operation cancelled due to full buffer */
+ long long dropped_pkt; /* total number of dropped packets */
+ long long dropped_pkt_bufoverrun;/* total number of dropped packets because of buffer overrun */
+ long long dropped_parsing; /* total number of dropped packets upon parsing errors */
+ long long socket_full; /* total number of EAGAIN errors on sendto() calls */
+ long long sendto_err; /* total number of errors on sendto() calls, EAGAIN excepted */
+ long long sendto_err_unknown; /* total number of errors on sendto() calls which are currently not supported */
+ long long sent_pkt; /* total number of sent packets */
+ long long lost_pkt; /* total number of lost packets */
+ long long too_short_initial_dgram; /* total number of too short datagrams with Initial packets */
+ long long retry_sent; /* total number of Retry sent */
+ long long retry_validated; /* total number of validated Retry tokens */
+ long long retry_error; /* total number of Retry token errors */
+ long long half_open_conn; /* current number of connections waiting for address validation */
+ long long hdshk_fail; /* total number of handshake failures */
+ long long stateless_reset_sent; /* total number of handshake failures */
+ /* Special events of interest */
+ long long conn_migration_done; /* total number of connection migration handled */
+ /* Transport errors */
+ long long quic_transp_err_no_error; /* total number of NO_ERROR connection errors */
+ long long quic_transp_err_internal_error; /* total number of INTERNAL_ERROR connection errors */
+ long long quic_transp_err_connection_refused; /* total number of CONNECTION_REFUSED connection errors */
+ long long quic_transp_err_flow_control_error; /* total number of FLOW_CONTROL_ERROR connection errors */
+ long long quic_transp_err_stream_limit_error; /* total number of STREAM_LIMIT_ERROR connection errors */
+ long long quic_transp_err_stream_state_error; /* total number of STREAM_STATE_ERROR connection errors */
+ long long quic_transp_err_final_size_error; /* total number of FINAL_SIZE_ERROR connection errors */
+ long long quic_transp_err_frame_encoding_error; /* total number of FRAME_ENCODING_ERROR connection errors */
+ long long quic_transp_err_transport_parameter_error; /* total number of TRANSPORT_PARAMETER_ERROR connection errors */
+ long long quic_transp_err_connection_id_limit; /* total number of CONNECTION_ID_LIMIT_ERROR connection errors */
+ long long quic_transp_err_protocol_violation; /* total number of PROTOCOL_VIOLATION connection errors */
+ long long quic_transp_err_invalid_token; /* total number of INVALID_TOKEN connection errors */
+ long long quic_transp_err_application_error; /* total number of APPLICATION_ERROR connection errors */
+ long long quic_transp_err_crypto_buffer_exceeded; /* total number of CRYPTO_BUFFER_EXCEEDED connection errors */
+ long long quic_transp_err_key_update_error; /* total number of KEY_UPDATE_ERROR connection errors */
+ long long quic_transp_err_aead_limit_reached; /* total number of AEAD_LIMIT_REACHED connection errors */
+ long long quic_transp_err_no_viable_path; /* total number of NO_VIABLE_PATH connection errors */
+ long long quic_transp_err_crypto_error; /* total number of CRYPTO_ERROR connection errors */
+ long long quic_transp_err_unknown_error; /* total number of UNKNOWN_ERROR connection errors */
+ /* Streams related counters */
+ long long data_blocked; /* total number of times DATA_BLOCKED frame was received */
+ long long stream_data_blocked; /* total number of times STREAM_DATA_BLOCKED frame was received */
+ long long streams_blocked_bidi; /* total number of times STREAMS_BLOCKED_BIDI frame was received */
+ long long streams_blocked_uni; /* total number of times STREAMS_BLOCKED_UNI frame was received */
+};
+
+#endif /* USE_QUIC */
+#endif /* _HAPROXY_QUIC_STATS_T_H */
diff --git a/include/haproxy/quic_stats.h b/include/haproxy/quic_stats.h
new file mode 100644
index 0000000..b2a8dec
--- /dev/null
+++ b/include/haproxy/quic_stats.h
@@ -0,0 +1,14 @@
+#ifndef _HAPROXY_QUIC_STATS_H
+#define _HAPROXY_QUIC_STATS_H
+
+#ifdef USE_QUIC
+#ifndef USE_OPENSSL
+#error "Must define USE_OPENSSL"
+#endif
+
+#include <haproxy/quic_stats-t.h>
+
+void quic_stats_transp_err_count_inc(struct quic_counters *ctrs, int error_code);
+
+#endif /* USE_QUIC */
+#endif /* _HAPROXY_QUIC_STATS_H */
diff --git a/include/haproxy/quic_stream-t.h b/include/haproxy/quic_stream-t.h
new file mode 100644
index 0000000..e10ca6d
--- /dev/null
+++ b/include/haproxy/quic_stream-t.h
@@ -0,0 +1,48 @@
+#ifndef _HAPROXY_QUIC_STREAM_T_H_
+#define _HAPROXY_QUIC_STREAM_T_H_
+
+#ifdef USE_QUIC
+
+#include <import/ebtree-t.h>
+
+#include <haproxy/buf-t.h>
+#include <haproxy/list-t.h>
+
+/* A QUIC STREAM buffer used for Tx.
+ *
+ * Currently, no offset is associated with an offset. The qc_stream_desc must
+ * store them in order and keep the offset of the oldest buffer. The buffers
+ * can be freed in strict order.
+ */
+struct qc_stream_buf {
+ struct buffer buf; /* STREAM payload */
+ struct list list; /* element for qc_stream_desc list */
+};
+
+/* QUIC STREAM descriptor.
+ *
+ * This structure is the low-level counterpart of the QUIC STREAM at the MUX
+ * layer. It is stored in the quic-conn and provides facility for Tx buffering.
+ *
+ * Once the MUX has finished to transfer data on a STREAM, it must release its
+ * QUIC STREAM descriptor. The descriptor will be kept by the quic_conn until
+ * all acknowledgement has been received.
+ */
+struct qc_stream_desc {
+ struct eb64_node by_id; /* node for quic_conn tree */
+ struct quic_conn *qc;
+
+ struct list buf_list; /* buffers waiting for ACK, oldest offset first */
+ struct qc_stream_buf *buf; /* current buffer used by the MUX */
+ uint64_t buf_offset; /* base offset of current buffer */
+
+ uint64_t ack_offset; /* last acknowledged offset */
+ struct eb_root acked_frms; /* ACK frames tree for non-contiguous ACK ranges */
+
+ int release; /* set to 1 when the MUX has finished to use this stream */
+
+ void *ctx; /* MUX specific context */
+};
+
+#endif /* USE_QUIC */
+#endif /* _HAPROXY_QUIC_STREAM_T_H_ */
diff --git a/include/haproxy/quic_stream.h b/include/haproxy/quic_stream.h
new file mode 100644
index 0000000..4489728
--- /dev/null
+++ b/include/haproxy/quic_stream.h
@@ -0,0 +1,23 @@
+#ifndef _HAPROXY_QUIC_STREAM_H_
+#define _HAPROXY_QUIC_STREAM_H_
+
+#ifdef USE_QUIC
+
+#include <haproxy/mux_quic-t.h>
+#include <haproxy/quic_stream-t.h>
+
+struct quic_conn;
+
+struct qc_stream_desc *qc_stream_desc_new(uint64_t id, enum qcs_type, void *ctx,
+ struct quic_conn *qc);
+void qc_stream_desc_release(struct qc_stream_desc *stream, uint64_t final_size);
+int qc_stream_desc_ack(struct qc_stream_desc **stream, size_t offset, size_t len);
+void qc_stream_desc_free(struct qc_stream_desc *stream, int closing);
+
+struct buffer *qc_stream_buf_get(struct qc_stream_desc *stream);
+struct buffer *qc_stream_buf_alloc(struct qc_stream_desc *stream,
+ uint64_t offset, int *avail);
+void qc_stream_buf_release(struct qc_stream_desc *stream);
+
+#endif /* USE_QUIC */
+#endif /* _HAPROXY_QUIC_STREAM_H_ */
diff --git a/include/haproxy/quic_tls-t.h b/include/haproxy/quic_tls-t.h
new file mode 100644
index 0000000..ae65149
--- /dev/null
+++ b/include/haproxy/quic_tls-t.h
@@ -0,0 +1,283 @@
+/*
+ * include/types/quic_tls.h
+ * This file provides definitions for QUIC-TLS.
+ *
+ * Copyright 2019 HAProxy Technologies, Frederic Lecaille <flecaille@haproxy.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _TYPES_QUIC_TLS_H
+#define _TYPES_QUIC_TLS_H
+#ifdef USE_QUIC
+#ifndef USE_OPENSSL
+#error "Must define USE_OPENSSL"
+#endif
+
+#include <openssl/evp.h>
+
+#include <import/ebtree.h>
+
+#include <haproxy/ncbuf-t.h>
+#include <haproxy/quic_ack-t.h>
+#include <haproxy/openssl-compat.h>
+
+/* It seems TLS 1.3 ciphersuites macros differ between openssl and boringssl */
+
+#if defined(OPENSSL_IS_BORINGSSL) || defined(OPENSSL_IS_AWSLC)
+#if !defined(TLS1_3_CK_AES_128_GCM_SHA256)
+#define TLS1_3_CK_AES_128_GCM_SHA256 TLS1_CK_AES_128_GCM_SHA256
+#endif
+#if !defined(TLS1_3_CK_AES_256_GCM_SHA384)
+#define TLS1_3_CK_AES_256_GCM_SHA384 TLS1_CK_AES_256_GCM_SHA384
+#endif
+#if !defined(TLS1_3_CK_CHACHA20_POLY1305_SHA256)
+#define TLS1_3_CK_CHACHA20_POLY1305_SHA256 TLS1_CK_CHACHA20_POLY1305_SHA256
+#endif
+#if !defined(TLS1_3_CK_AES_128_CCM_SHA256)
+/* Note that TLS1_CK_AES_128_CCM_SHA256 is not defined in boringssl */
+#define TLS1_3_CK_AES_128_CCM_SHA256 0x03001304
+#endif
+#endif
+
+/* AEAD iv and secrete key lengths */
+#define QUIC_TLS_IV_LEN 12 /* bytes */
+#define QUIC_TLS_KEY_LEN 32 /* bytes */
+#define QUIC_TLS_SECRET_LEN 48 /* bytes */
+/* The ciphersuites for AEAD QUIC-TLS have 16-bytes authentication tags */
+#define QUIC_TLS_TAG_LEN 16 /* bytes */
+
+/* The TLS extensions for QUIC transport parameters */
+#define TLS_EXTENSION_QUIC_TRANSPORT_PARAMETERS 0x0039
+#define TLS_EXTENSION_QUIC_TRANSPORT_PARAMETERS_DRAFT 0xffa5
+
+extern struct pool_head *pool_head_quic_pktns;
+extern struct pool_head *pool_head_quic_enc_level;
+extern struct pool_head *pool_head_quic_tls_ctx;
+extern struct pool_head *pool_head_quic_tls_secret;
+extern struct pool_head *pool_head_quic_tls_iv;
+extern struct pool_head *pool_head_quic_tls_key;
+
+#define QUIC_HKDF_KEY_LABEL_V1 "quic key"
+#define QUIC_HKDF_IV_LABEL_V1 "quic iv"
+#define QUIC_HKDF_HP_LABEL_V1 "quic hp"
+#define QUIC_HKDF_KU_LABEL_V1 "quic ku"
+
+#define QUIC_HKDF_KEY_LABEL_V2 "quicv2 key"
+#define QUIC_HKDF_IV_LABEL_V2 "quicv2 iv"
+#define QUIC_HKDF_HP_LABEL_V2 "quicv2 hp"
+#define QUIC_HKDF_KU_LABEL_V2 "quicv2 ku"
+
+#define QUIC_TLS_RETRY_KEY_DRAFT \
+ "\xcc\xce\x18\x7e\xd0\x9a\x09\xd0\x57\x28\x15\x5a\x6c\xb9\x6b\xe1"
+#define QUIC_TLS_RETRY_NONCE_DRAFT \
+ "\xe5\x49\x30\xf9\x7f\x21\x36\xf0\x53\x0a\x8c\x1c"
+#define QUIC_TLS_RETRY_KEY_V1 \
+ "\xbe\x0c\x69\x0b\x9f\x66\x57\x5a\x1d\x76\x6b\x54\xe3\x68\xc8\x4e"
+#define QUIC_TLS_RETRY_NONCE_V1 \
+ "\x46\x15\x99\xd3\x5d\x63\x2b\xf2\x23\x98\x25\xbb"
+#define QUIC_TLS_RETRY_KEY_V2 \
+ "\x8f\xb4\xb0\x1b\x56\xac\x48\xe2\x60\xfb\xcb\xce\xad\x7c\xcc\x92"
+#define QUIC_TLS_RETRY_NONCE_V2 \
+ "\xd8\x69\x69\xbc\x2d\x7c\x6d\x99\x90\xef\xb0\x4a"
+
+/* QUIC handshake states for both clients and servers. */
+enum quic_handshake_state {
+ QUIC_HS_ST_CLIENT_HANDSHAKE_FAILED,
+ QUIC_HS_ST_SERVER_HANDSHAKE_FAILED,
+
+ QUIC_HS_ST_CLIENT_INITIAL,
+ QUIC_HS_ST_CLIENT_HANDSHAKE,
+
+ QUIC_HS_ST_SERVER_INITIAL,
+ QUIC_HS_ST_SERVER_HANDSHAKE,
+
+ /* Common to servers and clients */
+ QUIC_HS_ST_COMPLETE,
+ QUIC_HS_ST_CONFIRMED,
+};
+
+/* QUIC TLS level encryption */
+enum quic_tls_enc_level {
+ QUIC_TLS_ENC_LEVEL_NONE = -1,
+ QUIC_TLS_ENC_LEVEL_INITIAL,
+ QUIC_TLS_ENC_LEVEL_EARLY_DATA,
+ QUIC_TLS_ENC_LEVEL_HANDSHAKE,
+ QUIC_TLS_ENC_LEVEL_APP,
+ /* Please do not insert any value after this following one */
+ QUIC_TLS_ENC_LEVEL_MAX,
+};
+
+/* QUIC packet number spaces */
+enum quic_tls_pktns {
+ QUIC_TLS_PKTNS_INITIAL,
+ QUIC_TLS_PKTNS_HANDSHAKE,
+ QUIC_TLS_PKTNS_01RTT,
+ /* Please do not insert any value after this following one */
+ QUIC_TLS_PKTNS_MAX,
+};
+
+extern unsigned char initial_salt[20];
+extern const unsigned char initial_salt_draft_29[20];
+extern const unsigned char initial_salt_v1[20];
+extern const unsigned char initial_salt_v2[20];
+
+/* QUIC packet number space */
+struct quic_pktns {
+ struct list list;
+ struct {
+ /* List of frames to send. */
+ struct list frms;
+ /* Next packet number to use for transmissions. */
+ int64_t next_pn;
+ /* The packet which has been sent. */
+ struct eb_root pkts;
+ /* The time the most recent ack-eliciting packer was sent. */
+ unsigned int time_of_last_eliciting;
+ /* The time this packet number space has experienced packet loss. */
+ unsigned int loss_time;
+ /* Boolean to denote if we must send probe packet. */
+ unsigned int pto_probe;
+ /* In flight bytes for this packet number space. */
+ size_t in_flight;
+ /* The acknowledgement delay of the packet with the largest packet number */
+ uint64_t ack_delay;
+ } tx;
+ struct {
+ /* Largest packet number */
+ int64_t largest_pn;
+ /* Largest acked sent packet. */
+ int64_t largest_acked_pn;
+ struct quic_arngs arngs;
+ unsigned int nb_aepkts_since_last_ack;
+ /* The time the packet with the largest packet number was received */
+ uint64_t largest_time_received;
+ } rx;
+ unsigned int flags;
+};
+
+/* Key phase used for Key Update */
+struct quic_tls_kp {
+ EVP_CIPHER_CTX *ctx;
+ unsigned char *secret;
+ size_t secretlen;
+ unsigned char *iv;
+ size_t ivlen;
+ unsigned char *key;
+ size_t keylen;
+ uint64_t count;
+ int64_t pn;
+ unsigned char flags;
+};
+
+/* Key update phase bit */
+#define QUIC_FL_TLS_KP_BIT_SET (1 << 0)
+
+struct quic_tls_secrets {
+ EVP_CIPHER_CTX *ctx;
+ const EVP_CIPHER *aead;
+ const EVP_MD *md;
+ EVP_CIPHER_CTX *hp_ctx;
+ const EVP_CIPHER *hp;
+ unsigned char *secret;
+ size_t secretlen;
+ /* Header protection key.
+ * Note: the header protection is applied after packet protection.
+ * As the header belong to the data, its protection must be removed before removing
+ * the packet protection.
+ */
+ unsigned char hp_key[32];
+ unsigned char *iv;
+ size_t ivlen;
+ unsigned char *key;
+ size_t keylen;
+ /* Used only on the RX part to store the largest received packet number */
+ int64_t pn;
+};
+
+struct quic_tls_ctx {
+ struct quic_tls_secrets rx;
+ struct quic_tls_secrets tx;
+ unsigned char flags;
+};
+
+#define QUIC_CRYPTO_BUF_SHIFT 10
+#define QUIC_CRYPTO_BUF_MASK ((1UL << QUIC_CRYPTO_BUF_SHIFT) - 1)
+/* The maximum allowed size of CRYPTO data buffer provided by the TLS stack. */
+#define QUIC_CRYPTO_BUF_SZ (1UL << QUIC_CRYPTO_BUF_SHIFT) /* 1 KB */
+
+extern struct pool_head *pool_head_quic_crypto_buf;
+
+/*
+ * CRYPTO buffer struct.
+ * Such buffers are used to send CRYPTO data.
+ */
+struct quic_crypto_buf {
+ unsigned char data[QUIC_CRYPTO_BUF_SZ];
+ size_t sz;
+};
+
+/* Crypto data stream (one by encryption level) */
+struct quic_cstream {
+ struct {
+ uint64_t offset; /* absolute current base offset of ncbuf */
+ struct ncbuf ncbuf; /* receive buffer - can handle out-of-order offset frames */
+ } rx;
+ struct {
+ uint64_t offset; /* last offset of data ready to be sent */
+ uint64_t sent_offset; /* last offset sent by transport layer */
+ struct buffer buf; /* transmit buffer before sending via xprt */
+ } tx;
+
+ struct qc_stream_desc *desc;
+};
+
+struct quic_enc_level {
+ struct list list;
+ /* Attach point to enqueue this encryption level during retransmissions */
+ struct list retrans;
+ /* pointer to list used only during retransmissions */
+ struct list *retrans_frms;
+ /* Encryption level, as defined by the TLS stack. */
+ enum ssl_encryption_level_t level;
+ /* TLS encryption context (AEAD only) */
+ struct quic_tls_ctx tls_ctx;
+
+ /* RX part */
+ struct {
+ /* The packets received by the listener I/O handler
+ * with header protection removed.
+ */
+ struct eb_root pkts;
+ /* List of QUIC packets with protected header. */
+ struct list pqpkts;
+ /* List of crypto frames received in order. */
+ struct list crypto_frms;
+ } rx;
+
+ /* TX part */
+ struct {
+ struct {
+ /* Array of CRYPTO data buffers */
+ struct quic_crypto_buf **bufs;
+ /* The number of element in use in the previous array. */
+ size_t nb_buf;
+ /* The total size of the CRYPTO data stored in the CRYPTO buffers. */
+ size_t sz;
+ /* The offset of the CRYPT0 data stream. */
+ uint64_t offset;
+ } crypto;
+ } tx;
+
+ /* Crypto data stream */
+ struct quic_cstream *cstream;
+ /* Packet number space */
+ struct quic_pktns *pktns;
+};
+
+#endif /* USE_QUIC */
+#endif /* _TYPES_QUIC_TLS_H */
+
diff --git a/include/haproxy/quic_tls.h b/include/haproxy/quic_tls.h
new file mode 100644
index 0000000..86b8c1e
--- /dev/null
+++ b/include/haproxy/quic_tls.h
@@ -0,0 +1,1116 @@
+/*
+ * include/proto/quic_tls.h
+ * This file provides definitions for QUIC-TLS.
+ *
+ * Copyright 2019 HAProxy Technologies, Frederic Lecaille <flecaille@haproxy.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _PROTO_QUIC_TLS_H
+#define _PROTO_QUIC_TLS_H
+#ifdef USE_QUIC
+#ifndef USE_OPENSSL
+#error "Must define USE_OPENSSL"
+#endif
+
+#include <stdlib.h>
+#include <string.h>
+
+#include <haproxy/dynbuf.h>
+#include <haproxy/pool.h>
+#include <haproxy/openssl-compat.h>
+#include <haproxy/quic_conn.h>
+#include <haproxy/quic_frame.h>
+#include <haproxy/quic_tls-t.h>
+#include <haproxy/quic_tx.h>
+#include <haproxy/quic_trace.h>
+#include <haproxy/trace.h>
+
+int quic_tls_finalize(struct quic_conn *qc, int server);
+void quic_tls_ctx_free(struct quic_tls_ctx **ctx);
+void quic_pktns_release(struct quic_conn *qc, struct quic_pktns **pktns);
+int qc_enc_level_alloc(struct quic_conn *qc, struct quic_pktns **pktns,
+ struct quic_enc_level **qel, enum ssl_encryption_level_t level);
+void qc_enc_level_free(struct quic_conn *qc, struct quic_enc_level **qel);
+
+void quic_tls_keys_hexdump(struct buffer *buf,
+ const struct quic_tls_secrets *secs);
+void quic_tls_kp_keys_hexdump(struct buffer *buf,
+ const struct quic_tls_kp *kp);
+
+void quic_conn_enc_level_uninit(struct quic_conn *qc, struct quic_enc_level *qel);
+void quic_tls_secret_hexdump(struct buffer *buf,
+ const unsigned char *secret, size_t secret_len);
+
+int quic_derive_initial_secret(const EVP_MD *md,
+ const unsigned char *initial_salt, size_t initial_salt_sz,
+ unsigned char *initial_secret, size_t initial_secret_sz,
+ const unsigned char *secret, size_t secret_sz);
+
+int quic_tls_derive_initial_secrets(const EVP_MD *md,
+ unsigned char *rx, size_t rx_sz,
+ unsigned char *tx, size_t tx_sz,
+ const unsigned char *secret, size_t secret_sz,
+ int server);
+
+int quic_tls_encrypt(unsigned char *buf, size_t len,
+ const unsigned char *aad, size_t aad_len,
+ EVP_CIPHER_CTX *ctx, const EVP_CIPHER *aead,
+ const unsigned char *iv);
+
+int quic_tls_decrypt2(unsigned char *out,
+ unsigned char *in, size_t ilen,
+ unsigned char *aad, size_t aad_len,
+ EVP_CIPHER_CTX *ctx, const EVP_CIPHER *aead,
+ const unsigned char *key, const unsigned char *iv);
+
+int quic_tls_decrypt(unsigned char *buf, size_t len,
+ unsigned char *aad, size_t aad_len,
+ EVP_CIPHER_CTX *tls_ctx, const EVP_CIPHER *aead,
+ const unsigned char *key, const unsigned char *iv);
+
+int quic_tls_generate_retry_integrity_tag(unsigned char *odcid, unsigned char odcid_len,
+ unsigned char *buf, size_t len,
+ const struct quic_version *qv);
+
+int quic_tls_derive_keys(const EVP_CIPHER *aead, const EVP_CIPHER *hp,
+ const EVP_MD *md, const struct quic_version *qv,
+ unsigned char *key, size_t keylen,
+ unsigned char *iv, size_t ivlen,
+ unsigned char *hp_key, size_t hp_keylen,
+ const unsigned char *secret, size_t secretlen);
+
+int quic_tls_derive_retry_token_secret(const EVP_MD *md,
+ unsigned char *key, size_t keylen,
+ unsigned char *iv, size_t ivlen,
+ const unsigned char *salt, size_t saltlen,
+ const unsigned char *secret, size_t secretlen);
+
+int quic_hkdf_expand(const EVP_MD *md,
+ unsigned char *buf, size_t buflen,
+ const unsigned char *key, size_t keylen,
+ const unsigned char *label, size_t labellen);
+
+int quic_hkdf_expand_label(const EVP_MD *md,
+ unsigned char *buf, size_t buflen,
+ const unsigned char *key, size_t keylen,
+ const unsigned char *label, size_t labellen);
+
+int quic_hkdf_extract_and_expand(const EVP_MD *md,
+ unsigned char *buf, size_t buflen,
+ const unsigned char *key, size_t keylen,
+ const unsigned char *salt, size_t saltlen,
+ const unsigned char *label, size_t labellen);
+
+int quic_tls_rx_ctx_init(EVP_CIPHER_CTX **rx_ctx,
+ const EVP_CIPHER *aead, unsigned char *key);
+int quic_tls_tx_ctx_init(EVP_CIPHER_CTX **tx_ctx,
+ const EVP_CIPHER *aead, unsigned char *key);
+
+int quic_tls_sec_update(const EVP_MD *md, const struct quic_version *qv,
+ unsigned char *new_sec, size_t new_seclen,
+ const unsigned char *sec, size_t seclen);
+
+void quic_aead_iv_build(unsigned char *iv, size_t ivlen,
+ unsigned char *aead_iv, size_t aead_ivlen, uint64_t pn);
+
+/* HP protection (AES) */
+int quic_tls_dec_aes_ctx_init(EVP_CIPHER_CTX **aes_ctx,
+ const EVP_CIPHER *aes, unsigned char *key);
+int quic_tls_enc_aes_ctx_init(EVP_CIPHER_CTX **aes_ctx,
+ const EVP_CIPHER *aes, unsigned char *key);
+int quic_tls_aes_decrypt(unsigned char *out,
+ const unsigned char *in, size_t inlen,
+ EVP_CIPHER_CTX *ctx);
+int quic_tls_aes_encrypt(unsigned char *out,
+ const unsigned char *in, size_t inlen,
+ EVP_CIPHER_CTX *ctx);
+
+int quic_tls_key_update(struct quic_conn *qc);
+void quic_tls_rotate_keys(struct quic_conn *qc);
+
+static inline const EVP_CIPHER *tls_aead(const SSL_CIPHER *cipher)
+{
+ switch (SSL_CIPHER_get_id(cipher)) {
+ case TLS1_3_CK_AES_128_GCM_SHA256:
+ return EVP_aes_128_gcm();
+ case TLS1_3_CK_AES_256_GCM_SHA384:
+ return EVP_aes_256_gcm();
+#if !defined(OPENSSL_IS_AWSLC)
+ case TLS1_3_CK_CHACHA20_POLY1305_SHA256:
+ return EVP_chacha20_poly1305();
+#endif
+#if !defined(USE_OPENSSL_WOLFSSL) && !defined(OPENSSL_IS_AWSLC)
+ case TLS1_3_CK_AES_128_CCM_SHA256:
+ return EVP_aes_128_ccm();
+#endif
+ default:
+ return NULL;
+ }
+}
+
+static inline const EVP_MD *tls_md(const SSL_CIPHER *cipher)
+{
+ switch (SSL_CIPHER_get_id(cipher)) {
+ case TLS1_3_CK_AES_128_GCM_SHA256:
+ case TLS1_3_CK_AES_128_CCM_SHA256:
+ case TLS1_3_CK_CHACHA20_POLY1305_SHA256:
+ return EVP_sha256();
+ case TLS1_3_CK_AES_256_GCM_SHA384:
+ return EVP_sha384();
+ default:
+ return NULL;
+ }
+}
+
+static inline const EVP_CIPHER *tls_hp(const SSL_CIPHER *cipher)
+{
+ switch (SSL_CIPHER_get_id(cipher)) {
+#if !defined(OPENSSL_IS_AWSLC)
+ case TLS1_3_CK_CHACHA20_POLY1305_SHA256:
+ return EVP_chacha20();
+#endif
+ case TLS1_3_CK_AES_128_CCM_SHA256:
+ case TLS1_3_CK_AES_128_GCM_SHA256:
+ return EVP_aes_128_ctr();
+ case TLS1_3_CK_AES_256_GCM_SHA384:
+ return EVP_aes_256_ctr();
+ default:
+ return NULL;
+ }
+
+}
+
+/* These following functions map TLS implementation encryption level to ours */
+static inline struct quic_pktns **ssl_to_quic_pktns(struct quic_conn *qc,
+ enum ssl_encryption_level_t level)
+{
+ switch (level) {
+ case ssl_encryption_initial:
+ return &qc->ipktns;
+ case ssl_encryption_early_data:
+ return &qc->apktns;
+ case ssl_encryption_handshake:
+ return &qc->hpktns;
+ case ssl_encryption_application:
+ return &qc->apktns;
+ default:
+ return NULL;
+ }
+}
+
+/* These following functions map TLS implementation encryption level to ours */
+static inline struct quic_pktns **qel_to_quic_pktns(struct quic_conn *qc,
+ enum quic_tls_enc_level level)
+{
+ switch (level) {
+ case QUIC_TLS_ENC_LEVEL_INITIAL:
+ return &qc->ipktns;
+ case QUIC_TLS_ENC_LEVEL_EARLY_DATA:
+ return &qc->apktns;
+ case QUIC_TLS_ENC_LEVEL_HANDSHAKE:
+ return &qc->hpktns;
+ case QUIC_TLS_ENC_LEVEL_APP:
+ return &qc->apktns;
+ default:
+ return NULL;
+ }
+}
+
+/* Map <level> TLS stack encryption level to our internal QUIC TLS encryption level
+ * if succeeded, or -1 if failed.
+ */
+static inline enum quic_tls_enc_level ssl_to_quic_enc_level(enum ssl_encryption_level_t level)
+{
+ switch (level) {
+ case ssl_encryption_initial:
+ return QUIC_TLS_ENC_LEVEL_INITIAL;
+ case ssl_encryption_early_data:
+ return QUIC_TLS_ENC_LEVEL_EARLY_DATA;
+ case ssl_encryption_handshake:
+ return QUIC_TLS_ENC_LEVEL_HANDSHAKE;
+ case ssl_encryption_application:
+ return QUIC_TLS_ENC_LEVEL_APP;
+ default:
+ return -1;
+ }
+}
+
+/* Return the address of the QUIC TLS encryption level associated to <level> TLS
+ * stack encryption level and attached to <qc> QUIC connection if succeeded, or
+ * NULL if failed.
+ */
+static inline struct quic_enc_level **ssl_to_qel_addr(struct quic_conn *qc,
+ enum ssl_encryption_level_t level)
+{
+ switch (level) {
+ case ssl_encryption_initial:
+ return &qc->iel;
+ case ssl_encryption_early_data:
+ return &qc->eel;
+ case ssl_encryption_handshake:
+ return &qc->hel;
+ case ssl_encryption_application:
+ return &qc->ael;
+ default:
+ return NULL;
+ }
+}
+
+/* Return the address of the QUIC TLS encryption level associated to <level> internal
+ * encryption level and attached to <qc> QUIC connection if succeeded, or
+ * NULL if failed.
+ */
+static inline struct quic_enc_level **qel_to_qel_addr(struct quic_conn *qc,
+ enum quic_tls_enc_level level)
+{
+ switch (level) {
+ case QUIC_TLS_ENC_LEVEL_INITIAL:
+ return &qc->iel;
+ case QUIC_TLS_ENC_LEVEL_EARLY_DATA:
+ return &qc->eel;
+ case QUIC_TLS_ENC_LEVEL_HANDSHAKE:
+ return &qc->hel;
+ case QUIC_TLS_ENC_LEVEL_APP:
+ return &qc->ael;
+ default:
+ return NULL;
+ }
+}
+
+/* Return the QUIC TLS encryption level associated to <level> internal encryption
+ * level attached to <qc> QUIC connection if succeeded, or NULL if failed.
+ */
+static inline struct quic_enc_level *qc_quic_enc_level(const struct quic_conn *qc,
+ enum quic_tls_enc_level level)
+{
+ switch (level) {
+ case QUIC_TLS_ENC_LEVEL_INITIAL:
+ return qc->iel;
+ case QUIC_TLS_ENC_LEVEL_EARLY_DATA:
+ return qc->eel;
+ case QUIC_TLS_ENC_LEVEL_HANDSHAKE:
+ return qc->hel;
+ case QUIC_TLS_ENC_LEVEL_APP:
+ return qc->ael;
+ default:
+ return NULL;
+ }
+}
+
+/* These two following functions map our encryption level to the TLS implementation ones. */
+static inline enum ssl_encryption_level_t quic_to_ssl_enc_level(enum quic_tls_enc_level level)
+{
+ switch (level) {
+ case QUIC_TLS_ENC_LEVEL_INITIAL:
+ return ssl_encryption_initial;
+ case QUIC_TLS_ENC_LEVEL_EARLY_DATA:
+ return ssl_encryption_early_data;
+ case QUIC_TLS_ENC_LEVEL_HANDSHAKE:
+ return ssl_encryption_handshake;
+ case QUIC_TLS_ENC_LEVEL_APP:
+ return ssl_encryption_application;
+ default:
+ return -1;
+ }
+}
+
+/* Return a human readable string from <state> QUIC handshake state of NULL
+ * for unknown state values (for debug purpose).
+ */
+static inline char *quic_hdshk_state_str(const enum quic_handshake_state state)
+{
+ switch (state) {
+ case QUIC_HS_ST_CLIENT_INITIAL:
+ return "CI";
+ case QUIC_HS_ST_CLIENT_HANDSHAKE:
+ return "CH";
+ case QUIC_HS_ST_CLIENT_HANDSHAKE_FAILED:
+ return "CF";
+ case QUIC_HS_ST_SERVER_INITIAL:
+ return "SI";
+ case QUIC_HS_ST_SERVER_HANDSHAKE:
+ return "SH";
+ case QUIC_HS_ST_SERVER_HANDSHAKE_FAILED:
+ return "SF";
+ case QUIC_HS_ST_COMPLETE:
+ return "HCP";
+ case QUIC_HS_ST_CONFIRMED:
+ return "HCF";
+ }
+
+ return NULL;
+}
+
+/* Return a human readable string from <err> SSL error (returned from
+ * SSL_get_error())
+ */
+static inline const char *ssl_error_str(int err)
+{
+ switch (err) {
+ case SSL_ERROR_NONE:
+ return "NONE";
+ case SSL_ERROR_SSL:
+ return "SSL";
+ case SSL_ERROR_WANT_READ:
+ return "WANT_READ";
+ case SSL_ERROR_WANT_WRITE:
+ return "WANT_WRITE";
+ case SSL_ERROR_WANT_X509_LOOKUP:
+ return "X509_LOOKUP";
+ case SSL_ERROR_SYSCALL:
+ return "SYSCALL";
+ case SSL_ERROR_ZERO_RETURN:
+ return "ZERO_RETURN";
+ case SSL_ERROR_WANT_CONNECT:
+ return "WANT_CONNECT";
+ case SSL_ERROR_WANT_ACCEPT:
+ return "WANT_ACCEPT";
+#if !defined(LIBRESSL_VERSION_NUMBER) && !defined(USE_OPENSSL_WOLFSSL) && !defined(OPENSSL_IS_AWSLC)
+ case SSL_ERROR_WANT_ASYNC:
+ return "WANT_ASYNC";
+ case SSL_ERROR_WANT_ASYNC_JOB:
+ return "WANT_ASYNC_JOB";
+ case SSL_ERROR_WANT_CLIENT_HELLO_CB:
+ return "WANT_CLIENT_HELLO_CB";
+#endif
+ default:
+ return "UNKNOWN";
+ }
+}
+
+
+/* Return a character identifying the encryption level from <level> QUIC TLS
+ * encryption level (for debug purpose).
+ * Initial -> 'I', Early Data -> 'E', Handshake -> 'H', Application -> 'A' and
+ * '-' if undefined.
+ */
+static inline char quic_enc_level_char(enum quic_tls_enc_level level)
+{
+ switch (level) {
+ case QUIC_TLS_ENC_LEVEL_INITIAL:
+ return 'I';
+ case QUIC_TLS_ENC_LEVEL_EARLY_DATA:
+ return 'E';
+ case QUIC_TLS_ENC_LEVEL_HANDSHAKE:
+ return 'H';
+ case QUIC_TLS_ENC_LEVEL_APP:
+ return 'A';
+ default:
+ return '-';
+ }
+}
+
+/* Return a character identifying <qel> encryption level from <qc> QUIC connection
+ * (for debug purpose).
+ * Initial -> 'I', Early Data -> 'E', Handshake -> 'H', Application -> 'A' and
+ * '-' if undefined.
+ */
+static inline char quic_enc_level_char_from_qel(const struct quic_enc_level *qel,
+ const struct quic_conn *qc)
+{
+ if (qel == qc->iel)
+ return 'I';
+ else if (qel == qc->eel)
+ return 'E';
+ else if (qel == qc->hel)
+ return 'H';
+ else if (qel == qc->ael)
+ return 'A';
+ return '-';
+}
+
+/* Return a character identifying the encryption level of a packet depending on
+ * its <type> type, and its <long_header> header length (for debug purpose).
+ * Initial -> 'I', ORTT -> '0', Handshake -> 'H', Application -> 'A' and
+ * '-' if undefined.
+ */
+static inline char quic_packet_type_enc_level_char(int packet_type)
+{
+ switch (packet_type) {
+ case QUIC_PACKET_TYPE_INITIAL:
+ return 'I';
+ case QUIC_PACKET_TYPE_0RTT:
+ return '0';
+ case QUIC_PACKET_TYPE_HANDSHAKE:
+ return 'H';
+ case QUIC_PACKET_TYPE_SHORT:
+ return 'A';
+ default:
+ return '-';
+ }
+}
+
+/* Initialize a QUIC packet number space.
+ * Never fails.
+ */
+static inline int quic_pktns_init(struct quic_conn *qc, struct quic_pktns **p)
+{
+ struct quic_pktns *pktns;
+
+ pktns = pool_alloc(pool_head_quic_pktns);
+ if (!pktns)
+ return 0;
+
+ LIST_INIT(&pktns->tx.frms);
+ pktns->tx.next_pn = -1;
+ pktns->tx.pkts = EB_ROOT_UNIQUE;
+ pktns->tx.time_of_last_eliciting = 0;
+ pktns->tx.loss_time = TICK_ETERNITY;
+ pktns->tx.pto_probe = 0;
+ pktns->tx.in_flight = 0;
+ pktns->tx.ack_delay = 0;
+
+ pktns->rx.largest_pn = -1;
+ pktns->rx.largest_acked_pn = -1;
+ pktns->rx.arngs.root = EB_ROOT_UNIQUE;
+ pktns->rx.arngs.sz = 0;
+ pktns->rx.arngs.enc_sz = 0;
+ pktns->rx.nb_aepkts_since_last_ack = 0;
+ pktns->rx.largest_time_received = 0;
+
+ pktns->flags = 0;
+ if (p == &qc->hpktns && qc->apktns)
+ LIST_INSERT(&qc->ipktns->list, &pktns->list);
+ else
+ LIST_APPEND(&qc->pktns_list, &pktns->list);
+ *p = pktns;
+
+ return 1;
+}
+
+static inline void quic_pktns_tx_pkts_release(struct quic_pktns *pktns, struct quic_conn *qc)
+{
+ struct eb64_node *node;
+
+ TRACE_ENTER(QUIC_EV_CONN_PHPKTS, qc);
+
+ node = eb64_first(&pktns->tx.pkts);
+ while (node) {
+ struct quic_tx_packet *pkt;
+ struct quic_frame *frm, *frmbak;
+
+ pkt = eb64_entry(node, struct quic_tx_packet, pn_node);
+ node = eb64_next(node);
+ if (pkt->flags & QUIC_FL_TX_PACKET_ACK_ELICITING)
+ qc->path->ifae_pkts--;
+ list_for_each_entry_safe(frm, frmbak, &pkt->frms, list) {
+ TRACE_DEVEL("freeing frame from packet",
+ QUIC_EV_CONN_PRSAFRM, qc, frm, &pkt->pn_node.key);
+ qc_frm_unref(frm, qc);
+ LIST_DEL_INIT(&frm->list);
+ quic_tx_packet_refdec(frm->pkt);
+ qc_frm_free(qc, &frm);
+ }
+ eb64_delete(&pkt->pn_node);
+ quic_tx_packet_refdec(pkt);
+ }
+
+ TRACE_LEAVE(QUIC_EV_CONN_PHPKTS, qc);
+}
+
+/* Discard <pktns> packet number space attached to <qc> QUIC connection.
+ * Its loss information are reset. Deduce the outstanding bytes for this
+ * packet number space from the outstanding bytes for the path of this
+ * connection.
+ * Note that all the non acknowledged TX packets and their frames are freed.
+ * Always succeeds.
+ */
+static inline void quic_pktns_discard(struct quic_pktns *pktns,
+ struct quic_conn *qc)
+{
+ TRACE_ENTER(QUIC_EV_CONN_PHPKTS, qc);
+
+ if (pktns == qc->ipktns)
+ qc->flags |= QUIC_FL_CONN_IPKTNS_DCD;
+ else if (pktns == qc->hpktns)
+ qc->flags |= QUIC_FL_CONN_HPKTNS_DCD;
+ qc->path->in_flight -= pktns->tx.in_flight;
+ qc->path->prep_in_flight -= pktns->tx.in_flight;
+ qc->path->loss.pto_count = 0;
+
+ pktns->tx.time_of_last_eliciting = 0;
+ pktns->tx.loss_time = TICK_ETERNITY;
+ pktns->tx.pto_probe = 0;
+ pktns->tx.in_flight = 0;
+ quic_pktns_tx_pkts_release(pktns, qc);
+
+ TRACE_LEAVE(QUIC_EV_CONN_PHPKTS, qc);
+}
+
+
+/* Release all the frames attached to <pktns> packet number space */
+static inline void qc_release_pktns_frms(struct quic_conn *qc,
+ struct quic_pktns *pktns)
+{
+ struct quic_frame *frm, *frmbak;
+
+ TRACE_ENTER(QUIC_EV_CONN_PHPKTS, qc);
+
+ if (!pktns)
+ goto leave;
+
+ list_for_each_entry_safe(frm, frmbak, &pktns->tx.frms, list)
+ qc_frm_free(qc, &frm);
+
+ leave:
+ TRACE_LEAVE(QUIC_EV_CONN_PHPKTS, qc);
+}
+
+/* Return 1 if <pktns> matches with the Application packet number space of
+ * <conn> connection which is common to the 0-RTT and 1-RTT encryption levels, 0
+ * if not (handshake packets).
+ */
+static inline int quic_application_pktns(struct quic_pktns *pktns, struct quic_conn *qc)
+{
+ return pktns == qc->apktns;
+}
+
+/* Returns the current largest acknowledged packet number if exists, -1 if not */
+static inline int64_t quic_pktns_get_largest_acked_pn(struct quic_pktns *pktns)
+{
+ struct eb64_node *ar = eb64_last(&pktns->rx.arngs.root);
+
+ if (!ar)
+ return -1;
+
+ return eb64_entry(ar, struct quic_arng_node, first)->last;
+}
+
+/* Return a character to identify the packet number space <pktns> of <qc> QUIC
+ * connection. 'I' for Initial packet number space, 'H' for Handshake packet
+ * space, and 'A' for Application data number space, or '-' if not found.
+ */
+static inline char quic_pktns_char(const struct quic_conn *qc,
+ const struct quic_pktns *pktns)
+{
+ if (pktns == qc->apktns)
+ return 'A';
+ else if (pktns == qc->hpktns)
+ return 'H';
+ else if (pktns == qc->ipktns)
+ return 'I';
+
+ return '-';
+}
+
+/* Return the TLS encryption level to be used for <packet_type>
+ * QUIC packet type.
+ * Returns -1 if there is no TLS encryption level for <packet_type>
+ * packet type.
+ */
+static inline enum quic_tls_enc_level quic_packet_type_enc_level(enum quic_pkt_type packet_type)
+{
+ switch (packet_type) {
+ case QUIC_PACKET_TYPE_INITIAL:
+ return QUIC_TLS_ENC_LEVEL_INITIAL;
+ case QUIC_PACKET_TYPE_0RTT:
+ return QUIC_TLS_ENC_LEVEL_EARLY_DATA;
+ case QUIC_PACKET_TYPE_HANDSHAKE:
+ return QUIC_TLS_ENC_LEVEL_HANDSHAKE;
+ case QUIC_PACKET_TYPE_RETRY:
+ return QUIC_TLS_ENC_LEVEL_NONE;
+ case QUIC_PACKET_TYPE_SHORT:
+ return QUIC_TLS_ENC_LEVEL_APP;
+ default:
+ return QUIC_TLS_ENC_LEVEL_NONE;
+ }
+}
+
+static inline enum quic_tls_pktns quic_tls_pktns(enum quic_tls_enc_level level)
+{
+ switch (level) {
+ case QUIC_TLS_ENC_LEVEL_INITIAL:
+ return QUIC_TLS_PKTNS_INITIAL;
+ case QUIC_TLS_ENC_LEVEL_EARLY_DATA:
+ case QUIC_TLS_ENC_LEVEL_APP:
+ return QUIC_TLS_PKTNS_01RTT;
+ case QUIC_TLS_ENC_LEVEL_HANDSHAKE:
+ return QUIC_TLS_PKTNS_HANDSHAKE;
+ default:
+ return -1;
+ }
+}
+
+/* Return 1 if <pktns> packet number space attached to <qc> connection has been discarded,
+ * 0 if not.
+ */
+static inline int quic_tls_pktns_is_dcd(struct quic_conn *qc, struct quic_pktns *pktns)
+{
+ if (pktns == qc->apktns)
+ return 0;
+
+ if ((pktns == qc->ipktns && (qc->flags & QUIC_FL_CONN_IPKTNS_DCD)) ||
+ (pktns == qc->hpktns && (qc->flags & QUIC_FL_CONN_HPKTNS_DCD)))
+ return 1;
+
+ return 0;
+}
+
+/* Return 1 the packet number space attached to <qc> connection with <type> associated
+ * packet type has been discarded, 0 if not.
+ */
+static inline int quic_tls_pkt_type_pktns_dcd(struct quic_conn *qc, unsigned char type)
+{
+ if ((type == QUIC_PACKET_TYPE_INITIAL && (qc->flags & QUIC_FL_CONN_IPKTNS_DCD)) ||
+ (type == QUIC_PACKET_TYPE_HANDSHAKE && (qc->flags & QUIC_FL_CONN_HPKTNS_DCD)))
+ return 1;
+
+ return 0;
+}
+
+/* Select the correct TLS cipher context to used to decipher an RX packet
+ * with <type> as type and <version> as version and attached to <qc>
+ * connection from <qel> encryption level.
+ */
+static inline struct quic_tls_ctx *qc_select_tls_ctx(struct quic_conn *qc,
+ struct quic_enc_level *qel,
+ unsigned char type,
+ const struct quic_version *version)
+{
+ return type != QUIC_PACKET_TYPE_INITIAL ? &qel->tls_ctx :
+ version == qc->negotiated_version ? qc->nictx : &qel->tls_ctx;
+}
+
+/* Reset all members of <ctx> to default values, ->hp_key[] excepted */
+static inline void quic_tls_ctx_reset(struct quic_tls_ctx *ctx)
+{
+ ctx->rx.ctx = NULL;
+ ctx->rx.aead = NULL;
+ ctx->rx.md = NULL;
+ ctx->rx.hp_ctx = NULL;
+ ctx->rx.hp = NULL;
+ ctx->rx.secret = NULL;
+ ctx->rx.secretlen = 0;
+ ctx->rx.iv = NULL;
+ ctx->rx.ivlen = 0;
+ ctx->rx.key = NULL;
+ ctx->rx.keylen = 0;
+ ctx->rx.pn = 0;
+
+ ctx->tx.ctx = NULL;
+ ctx->tx.aead = NULL;
+ ctx->tx.md = NULL;
+ ctx->tx.hp_ctx = NULL;
+ ctx->tx.hp = NULL;
+ ctx->tx.secret = NULL;
+ ctx->tx.secretlen = 0;
+ ctx->tx.iv = NULL;
+ ctx->tx.ivlen = 0;
+ ctx->tx.key = NULL;
+ ctx->tx.keylen = 0;
+ /* Not used on the TX path. */
+ ctx->tx.pn = 0;
+
+ ctx->flags = 0;
+}
+
+/* Erase and free the secrets for a QUIC encryption level with <ctx> as
+ * context.
+ * Always succeeds.
+ */
+static inline void quic_tls_ctx_secs_free(struct quic_tls_ctx *ctx)
+{
+ if (!ctx)
+ return;
+
+ if (ctx->rx.iv) {
+ memset(ctx->rx.iv, 0, ctx->rx.ivlen);
+ ctx->rx.ivlen = 0;
+ }
+ if (ctx->rx.key) {
+ memset(ctx->rx.key, 0, ctx->rx.keylen);
+ ctx->rx.keylen = 0;
+ }
+ if (ctx->tx.iv) {
+ memset(ctx->tx.iv, 0, ctx->tx.ivlen);
+ ctx->tx.ivlen = 0;
+ }
+ if (ctx->tx.key) {
+ memset(ctx->tx.key, 0, ctx->tx.keylen);
+ ctx->tx.keylen = 0;
+ }
+
+ /* RX HP protection */
+ EVP_CIPHER_CTX_free(ctx->rx.hp_ctx);
+ /* RX AEAD decryption */
+ EVP_CIPHER_CTX_free(ctx->rx.ctx);
+ pool_free(pool_head_quic_tls_iv, ctx->rx.iv);
+ pool_free(pool_head_quic_tls_key, ctx->rx.key);
+
+ /* TX HP protection */
+ EVP_CIPHER_CTX_free(ctx->tx.hp_ctx);
+ /* TX AEAD encryption */
+ EVP_CIPHER_CTX_free(ctx->tx.ctx);
+ pool_free(pool_head_quic_tls_iv, ctx->tx.iv);
+ pool_free(pool_head_quic_tls_key, ctx->tx.key);
+
+ quic_tls_ctx_reset(ctx);
+}
+
+/* Allocate the secrete keys for a QUIC encryption level with <ctx> as context.
+ * Returns 1 if succeeded, 0 if not.
+ */
+static inline int quic_tls_ctx_keys_alloc(struct quic_tls_ctx *ctx)
+{
+ if (ctx->rx.key)
+ goto write;
+
+ if (!(ctx->rx.iv = pool_alloc(pool_head_quic_tls_iv)) ||
+ !(ctx->rx.key = pool_alloc(pool_head_quic_tls_key)))
+ goto err;
+
+ write:
+ if (ctx->tx.key)
+ goto out;
+
+ if (!(ctx->tx.iv = pool_alloc(pool_head_quic_tls_iv)) ||
+ !(ctx->tx.key = pool_alloc(pool_head_quic_tls_key)))
+ goto err;
+
+ ctx->rx.ivlen = ctx->tx.ivlen = QUIC_TLS_IV_LEN;
+ ctx->rx.keylen = ctx->tx.keylen = QUIC_TLS_KEY_LEN;
+out:
+ return 1;
+
+ err:
+ quic_tls_ctx_secs_free(ctx);
+ return 0;
+}
+
+/* Release the memory allocated for <secs> secrets */
+static inline void quic_tls_secrets_keys_free(struct quic_tls_secrets *secs)
+{
+ if (secs->iv) {
+ memset(secs->iv, 0, secs->ivlen);
+ secs->ivlen = 0;
+ }
+
+ if (secs->key) {
+ memset(secs->key, 0, secs->keylen);
+ secs->keylen = 0;
+ }
+
+ /* HP protection */
+ EVP_CIPHER_CTX_free(secs->hp_ctx);
+ /* AEAD decryption */
+ EVP_CIPHER_CTX_free(secs->ctx);
+ pool_free(pool_head_quic_tls_iv, secs->iv);
+ pool_free(pool_head_quic_tls_key, secs->key);
+
+ secs->iv = secs->key = NULL;
+}
+
+/* Allocate the memory for the <secs> secrets.
+ * Return 1 if succeeded, 0 if not.
+ */
+static inline int quic_tls_secrets_keys_alloc(struct quic_tls_secrets *secs)
+{
+ if (!(secs->iv = pool_alloc(pool_head_quic_tls_iv)) ||
+ !(secs->key = pool_alloc(pool_head_quic_tls_key)))
+ goto err;
+
+ secs->ivlen = QUIC_TLS_IV_LEN;
+ secs->keylen = QUIC_TLS_KEY_LEN;
+
+ return 1;
+
+ err:
+ quic_tls_secrets_keys_free(secs);
+ return 0;
+}
+
+/* Release the memory allocated for the negotiated Initial QUIC TLS context
+ * attached to <qc> connection.
+ */
+static inline void quic_nictx_free(struct quic_conn *qc)
+{
+ quic_tls_ctx_secs_free(qc->nictx);
+ pool_free(pool_head_quic_tls_ctx, qc->nictx);
+ qc->nictx = NULL;
+}
+
+/* Initialize a TLS cryptographic context for the Initial encryption level. */
+static inline int quic_initial_tls_ctx_init(struct quic_tls_ctx *ctx)
+{
+ ctx->rx.aead = ctx->tx.aead = EVP_aes_128_gcm();
+ ctx->rx.md = ctx->tx.md = EVP_sha256();
+ ctx->rx.hp = ctx->tx.hp = EVP_aes_128_ctr();
+
+ ctx->rx.iv = NULL;
+ ctx->rx.ivlen = 0;
+ ctx->rx.key = NULL;
+ ctx->rx.keylen = 0;
+ ctx->rx.secret = NULL;
+ ctx->rx.secretlen = 0;
+
+ ctx->tx.iv = NULL;
+ ctx->tx.ivlen = 0;
+ ctx->tx.key = NULL;
+ ctx->tx.keylen = 0;
+ ctx->tx.secret = NULL;
+ ctx->tx.secretlen = 0;
+
+ return quic_tls_ctx_keys_alloc(ctx);
+}
+
+static inline int quic_tls_level_pkt_type(enum quic_tls_enc_level level)
+{
+ switch (level) {
+ case QUIC_TLS_ENC_LEVEL_INITIAL:
+ return QUIC_PACKET_TYPE_INITIAL;
+ case QUIC_TLS_ENC_LEVEL_EARLY_DATA:
+ return QUIC_PACKET_TYPE_0RTT;
+ case QUIC_TLS_ENC_LEVEL_HANDSHAKE:
+ return QUIC_PACKET_TYPE_HANDSHAKE;
+ case QUIC_TLS_ENC_LEVEL_APP:
+ return QUIC_PACKET_TYPE_SHORT;
+ default:
+ return -1;
+ }
+}
+
+/* Return the packet type associated to <qel> encryption for <qc> QUIC connection,
+ * or -1 if not found.
+ */
+static inline enum quic_pkt_type quic_enc_level_pkt_type(struct quic_conn *qc,
+ struct quic_enc_level *qel)
+{
+ if (qel == qc->iel)
+ return QUIC_PACKET_TYPE_INITIAL;
+ else if (qel == qc->hel)
+ return QUIC_PACKET_TYPE_HANDSHAKE;
+ else if (qel == qc->eel)
+ return QUIC_PACKET_TYPE_0RTT;
+ else if (qel == qc->ael)
+ return QUIC_PACKET_TYPE_SHORT;
+ else
+ return -1;
+}
+
+/* Derive the initial secrets with <ctx> as QUIC TLS context which is the
+ * cryptographic context for the first encryption level (Initial) from
+ * <cid> connection ID with <cidlen> as length (in bytes) for a server or not
+ * depending on <server> boolean value.
+ * Return 1 if succeeded or 0 if not.
+ */
+static inline int qc_new_isecs(struct quic_conn *qc,
+ struct quic_tls_ctx *ctx, const struct quic_version *ver,
+ const unsigned char *cid, size_t cidlen, int server)
+{
+ unsigned char initial_secret[32];
+ /* Initial secret to be derived for incoming packets */
+ unsigned char rx_init_sec[32];
+ /* Initial secret to be derived for outgoing packets */
+ unsigned char tx_init_sec[32];
+ struct quic_tls_secrets *rx_ctx, *tx_ctx;
+
+ TRACE_ENTER(QUIC_EV_CONN_ISEC);
+ if (!quic_initial_tls_ctx_init(ctx))
+ goto err;
+
+ if (!quic_derive_initial_secret(ctx->rx.md,
+ ver->initial_salt, ver->initial_salt_len,
+ initial_secret, sizeof initial_secret,
+ cid, cidlen))
+ goto err;
+
+ if (!quic_tls_derive_initial_secrets(ctx->rx.md,
+ rx_init_sec, sizeof rx_init_sec,
+ tx_init_sec, sizeof tx_init_sec,
+ initial_secret, sizeof initial_secret, server))
+ goto err;
+
+ rx_ctx = &ctx->rx;
+ tx_ctx = &ctx->tx;
+ if (!quic_tls_derive_keys(ctx->rx.aead, ctx->rx.hp, ctx->rx.md, ver,
+ rx_ctx->key, rx_ctx->keylen,
+ rx_ctx->iv, rx_ctx->ivlen,
+ rx_ctx->hp_key, sizeof rx_ctx->hp_key,
+ rx_init_sec, sizeof rx_init_sec))
+ goto err;
+
+ if (!quic_tls_rx_ctx_init(&rx_ctx->ctx, rx_ctx->aead, rx_ctx->key))
+ goto err;
+
+ if (!quic_tls_enc_aes_ctx_init(&rx_ctx->hp_ctx, rx_ctx->hp, rx_ctx->hp_key))
+ goto err;
+
+ if (!quic_tls_derive_keys(ctx->tx.aead, ctx->tx.hp, ctx->tx.md, ver,
+ tx_ctx->key, tx_ctx->keylen,
+ tx_ctx->iv, tx_ctx->ivlen,
+ tx_ctx->hp_key, sizeof tx_ctx->hp_key,
+ tx_init_sec, sizeof tx_init_sec))
+ goto err;
+
+ if (!quic_tls_tx_ctx_init(&tx_ctx->ctx, tx_ctx->aead, tx_ctx->key))
+ goto err;
+
+ if (!quic_tls_enc_aes_ctx_init(&tx_ctx->hp_ctx, tx_ctx->hp, tx_ctx->hp_key))
+ goto err;
+
+ TRACE_LEAVE(QUIC_EV_CONN_ISEC, qc, rx_init_sec, tx_init_sec);
+
+ return 1;
+
+ err:
+ TRACE_DEVEL("leaving in error", QUIC_EV_CONN_ISEC);
+ return 0;
+}
+
+/* Reset all members of <tls_kp> to default values. */
+static inline void quic_tls_ku_reset(struct quic_tls_kp *tls_kp)
+{
+ tls_kp->ctx = NULL;
+ tls_kp->secret = NULL;
+ tls_kp->iv = NULL;
+ tls_kp->key = NULL;
+}
+
+/* Release the memory allocated for all the key update key phase
+ * structures for <qc> QUIC connection.
+ * Always succeeds.
+ */
+static inline void quic_tls_ku_free(struct quic_conn *qc)
+{
+ EVP_CIPHER_CTX_free(qc->ku.prv_rx.ctx);
+ pool_free(pool_head_quic_tls_secret, qc->ku.prv_rx.secret);
+ pool_free(pool_head_quic_tls_iv, qc->ku.prv_rx.iv);
+ pool_free(pool_head_quic_tls_key, qc->ku.prv_rx.key);
+ quic_tls_ku_reset(&qc->ku.prv_rx);
+ EVP_CIPHER_CTX_free(qc->ku.nxt_rx.ctx);
+ pool_free(pool_head_quic_tls_secret, qc->ku.nxt_rx.secret);
+ pool_free(pool_head_quic_tls_iv, qc->ku.nxt_rx.iv);
+ pool_free(pool_head_quic_tls_key, qc->ku.nxt_rx.key);
+ quic_tls_ku_reset(&qc->ku.nxt_rx);
+ EVP_CIPHER_CTX_free(qc->ku.nxt_tx.ctx);
+ pool_free(pool_head_quic_tls_secret, qc->ku.nxt_tx.secret);
+ pool_free(pool_head_quic_tls_iv, qc->ku.nxt_tx.iv);
+ pool_free(pool_head_quic_tls_key, qc->ku.nxt_tx.key);
+ quic_tls_ku_reset(&qc->ku.nxt_tx);
+}
+
+/* Initialize <kp> key update secrets, allocating the required memory.
+ * Return 1 if all the secrets could be allocated, 0 if not.
+ * This is the responsibility of the caller to release the memory
+ * allocated by this function in case of failure.
+ */
+static inline int quic_tls_kp_init(struct quic_tls_kp *kp)
+{
+ kp->count = 0;
+ kp->pn = 0;
+ kp->flags = 0;
+ kp->secret = pool_alloc(pool_head_quic_tls_secret);
+ kp->secretlen = QUIC_TLS_SECRET_LEN;
+ kp->iv = pool_alloc(pool_head_quic_tls_iv);
+ kp->ivlen = QUIC_TLS_IV_LEN;
+ kp->key = pool_alloc(pool_head_quic_tls_key);
+ kp->keylen = QUIC_TLS_KEY_LEN;
+
+ return kp->secret && kp->iv && kp->key;
+}
+
+/* Initialize all the key update key phase structures for <qc>
+ * QUIC connection, allocating the required memory.
+ *
+ * Returns 1 if succeeded, 0 if not. The caller is responsible to use
+ * quic_tls_ku_free() on error to cleanup partially allocated content.
+ */
+static inline int quic_tls_ku_init(struct quic_conn *qc)
+{
+ struct quic_tls_kp *prv_rx = &qc->ku.prv_rx;
+ struct quic_tls_kp *nxt_rx = &qc->ku.nxt_rx;
+ struct quic_tls_kp *nxt_tx = &qc->ku.nxt_tx;
+
+ if (!quic_tls_kp_init(prv_rx) ||
+ !quic_tls_kp_init(nxt_rx) ||
+ !quic_tls_kp_init(nxt_tx))
+ goto err;
+
+ return 1;
+
+ err:
+ return 0;
+}
+
+/* Return 1 if <qel> has RX secrets, 0 if not. */
+static inline int quic_tls_has_rx_sec(const struct quic_enc_level *qel)
+{
+ return qel && !!qel->tls_ctx.rx.key;
+}
+
+/* Return 1 if <qel> has TX secrets, 0 if not. */
+static inline int quic_tls_has_tx_sec(const struct quic_enc_level *qel)
+{
+ return qel && !!qel->tls_ctx.tx.key;
+}
+
+/* Return 1 if there is RX packets for <qel> QUIC encryption level, 0 if not */
+static inline int qc_el_rx_pkts(struct quic_enc_level *qel)
+{
+ int ret;
+
+ ret = !eb_is_empty(&qel->rx.pkts);
+
+ return ret;
+}
+
+/* Delete all RX packets for <qel> QUIC encryption level */
+static inline void qc_el_rx_pkts_del(struct quic_enc_level *qel)
+{
+ struct eb64_node *node;
+
+ node = eb64_first(&qel->rx.pkts);
+ while (node) {
+ struct quic_rx_packet *pkt =
+ eb64_entry(node, struct quic_rx_packet, pn_node);
+
+ node = eb64_next(node);
+ eb64_delete(&pkt->pn_node);
+ quic_rx_packet_refdec(pkt);
+ }
+}
+
+static inline void qc_list_qel_rx_pkts(struct quic_enc_level *qel)
+{
+ struct eb64_node *node;
+
+ node = eb64_first(&qel->rx.pkts);
+ while (node) {
+ struct quic_rx_packet *pkt;
+
+ pkt = eb64_entry(node, struct quic_rx_packet, pn_node);
+ fprintf(stderr, "pkt@%p type=%d pn=%llu\n",
+ pkt, pkt->type, (ull)pkt->pn_node.key);
+ node = eb64_next(node);
+ }
+}
+
+/* Returns a boolean if <qc> needs to emit frames for <qel> encryption level. */
+static inline int qc_need_sending(struct quic_conn *qc, struct quic_enc_level *qel)
+{
+ return (qc->flags & QUIC_FL_CONN_IMMEDIATE_CLOSE) ||
+ (qel->pktns->flags & QUIC_FL_PKTNS_ACK_REQUIRED) ||
+ qel->pktns->tx.pto_probe ||
+ !LIST_ISEMPTY(&qel->pktns->tx.frms);
+}
+
+/* Return 1 if <qc> connection may probe the Initial packet number space, 0 if not.
+ * This is not the case if the remote peer address is not validated and if
+ * it cannot send at least QUIC_INITIAL_PACKET_MINLEN bytes.
+ */
+static inline int qc_may_probe_ipktns(struct quic_conn *qc)
+{
+ return quic_peer_validated_addr(qc) ||
+ quic_may_send_bytes(qc) >= QUIC_INITIAL_PACKET_MINLEN;
+}
+
+
+
+#endif /* USE_QUIC */
+#endif /* _PROTO_QUIC_TLS_H */
+
diff --git a/include/haproxy/quic_tp-t.h b/include/haproxy/quic_tp-t.h
new file mode 100644
index 0000000..4897441
--- /dev/null
+++ b/include/haproxy/quic_tp-t.h
@@ -0,0 +1,118 @@
+#ifndef _HAPROXY_QUIC_TP_T_H
+#define _HAPROXY_QUIC_TP_T_H
+#ifdef USE_QUIC
+#ifndef USE_OPENSSL
+#error "Must define USE_OPENSSL"
+#endif
+
+#include <inttypes.h>
+#include <sys/socket.h>
+#include <netinet/in.h>
+
+#define QUIC_STATELESS_RESET_TOKEN_LEN 16
+
+/* Default QUIC connection transport parameters */
+extern struct quic_transport_params quic_dflt_transport_params;
+
+struct tp_cid {
+ uint8_t len;
+ uint8_t data[20];
+};
+
+struct tp_preferred_address {
+ uint16_t ipv4_port;
+ uint16_t ipv6_port;
+ struct in_addr ipv4_addr;
+ struct in6_addr ipv6_addr;
+ struct tp_cid cid;
+ uint8_t stateless_reset_token[QUIC_STATELESS_RESET_TOKEN_LEN];
+};
+
+struct tp_version_information {
+ uint32_t chosen;
+ const struct quic_version *negotiated_version;
+};
+
+/* Default values for the absent transport parameters */
+#define QUIC_TP_DFLT_MAX_UDP_PAYLOAD_SIZE 65527 /* bytes */
+#define QUIC_TP_DFLT_ACK_DELAY_COMPONENT 3 /* milliseconds */
+#define QUIC_TP_DFLT_MAX_ACK_DELAY 25 /* milliseconds */
+#define QUIC_TP_DFLT_ACTIVE_CONNECTION_ID_LIMIT 2 /* number of connections */
+/* These ones are our implementation default values when not set
+ * by configuration
+ */
+#define QUIC_TP_DFLT_FRONT_MAX_IDLE_TIMEOUT 30000 /* milliseconds */
+#define QUIC_TP_DFLT_FRONT_MAX_STREAMS_BIDI 100
+#define QUIC_TP_DFLT_BACK_MAX_IDLE_TIMEOUT 30000 /* milliseconds */
+
+/* Types of QUIC transport parameters */
+#define QUIC_TP_ORIGINAL_DESTINATION_CONNECTION_ID 0x00
+#define QUIC_TP_MAX_IDLE_TIMEOUT 0x01
+#define QUIC_TP_STATELESS_RESET_TOKEN 0x02
+#define QUIC_TP_MAX_UDP_PAYLOAD_SIZE 0x03
+#define QUIC_TP_INITIAL_MAX_DATA 0x04
+#define QUIC_TP_INITIAL_MAX_STREAM_DATA_BIDI_LOCAL 0x05
+#define QUIC_TP_INITIAL_MAX_STREAM_DATA_BIDI_REMOTE 0x06
+#define QUIC_TP_INITIAL_MAX_STREAM_DATA_UNI 0x07
+#define QUIC_TP_INITIAL_MAX_STREAMS_BIDI 0x08
+#define QUIC_TP_INITIAL_MAX_STREAMS_UNI 0x09
+#define QUIC_TP_ACK_DELAY_EXPONENT 0x0a
+#define QUIC_TP_MAX_ACK_DELAY 0x0b
+#define QUIC_TP_DISABLE_ACTIVE_MIGRATION 0x0c
+#define QUIC_TP_PREFERRED_ADDRESS 0x0d
+#define QUIC_TP_ACTIVE_CONNECTION_ID_LIMIT 0x0e
+#define QUIC_TP_INITIAL_SOURCE_CONNECTION_ID 0x0f
+#define QUIC_TP_RETRY_SOURCE_CONNECTION_ID 0x10
+#define QUIC_TP_VERSION_INFORMATION 0x11
+
+/*
+ * These defines are not for transport parameter type, but the maximum accepted value for
+ * transport parameter types.
+ */
+#define QUIC_TP_ACK_DELAY_EXPONENT_LIMIT 20
+#define QUIC_TP_MAX_ACK_DELAY_LIMIT (1UL << 14)
+
+/* The maximum length of encoded transport parameters for any QUIC peer. */
+#define QUIC_TP_MAX_ENCLEN 128
+/*
+ * QUIC transport parameters.
+ * Note that forbidden parameters sent by clients MUST generate TRANSPORT_PARAMETER_ERROR errors.
+ */
+struct quic_transport_params {
+ uint64_t max_idle_timeout;
+ uint64_t max_udp_payload_size; /* Default: 65527 bytes (max of UDP payload for IPv6) */
+ uint64_t initial_max_data;
+ uint64_t initial_max_stream_data_bidi_local;
+ uint64_t initial_max_stream_data_bidi_remote;
+ uint64_t initial_max_stream_data_uni;
+ uint64_t initial_max_streams_bidi;
+ uint64_t initial_max_streams_uni;
+ uint64_t ack_delay_exponent; /* Default: 3, max: 20 */
+ uint64_t max_ack_delay; /* Default: 3ms, max: 2^14ms*/
+ uint64_t active_connection_id_limit;
+
+ /* Booleans */
+ uint8_t disable_active_migration;
+ uint8_t with_stateless_reset_token;
+ uint8_t with_preferred_address;
+ uint8_t original_destination_connection_id_present;
+ uint8_t initial_source_connection_id_present;
+
+ uint8_t stateless_reset_token[QUIC_STATELESS_RESET_TOKEN_LEN]; /* Forbidden for clients */
+ /*
+ * MUST be sent by servers.
+ * When received by clients, must be set to 1 if present.
+ */
+ struct tp_cid original_destination_connection_id; /* Forbidden for clients */
+ /*
+ * MUST be sent by servers after Retry.
+ */
+ struct tp_cid retry_source_connection_id; /* Forbidden for clients */
+ /* MUST be present both for servers and clients. */
+ struct tp_cid initial_source_connection_id;
+ struct tp_preferred_address preferred_address; /* Forbidden for clients */
+ struct tp_version_information version_information;
+};
+
+#endif /* USE_QUIC */
+#endif /* _HAPROXY_QUIC_TP_T_H */
diff --git a/include/haproxy/quic_tp.h b/include/haproxy/quic_tp.h
new file mode 100644
index 0000000..d3bdd18
--- /dev/null
+++ b/include/haproxy/quic_tp.h
@@ -0,0 +1,124 @@
+#ifndef _HAPROXY_QUIC_TP_H
+#define _HAPROXY_QUIC_TP_H
+#ifdef USE_QUIC
+#ifndef USE_OPENSSL
+#error "Must define USE_OPENSSL"
+#endif
+
+#include <haproxy/chunk.h>
+#include <haproxy/quic_conn-t.h>
+#include <haproxy/quic_tp-t.h>
+
+void quic_transport_params_init(struct quic_transport_params *p, int server);
+int quic_transport_params_encode(unsigned char *buf,
+ const unsigned char *end,
+ struct quic_transport_params *p,
+ const struct quic_version *chosen_version,
+ int server);
+
+int quic_transport_params_store(struct quic_conn *conn, int server,
+ const unsigned char *buf,
+ const unsigned char *end);
+
+int qc_lstnr_params_init(struct quic_conn *qc,
+ const struct quic_transport_params *listener_params,
+ const unsigned char *stateless_reset_token,
+ const unsigned char *dcid, size_t dcidlen,
+ const unsigned char *scid, size_t scidlen,
+ const struct quic_cid *token_odcid);
+
+/* Dump <cid> transport parameter connection ID value if present (non null length).
+ * Used only for debugging purposes.
+ */
+static inline void quic_tp_cid_dump(struct buffer *buf,
+ const struct tp_cid *cid)
+{
+ int i;
+
+ for (i = 0; i < cid->len; i++)
+ chunk_appendf(buf, "%02x", cid->data[i]);
+}
+
+static inline void quic_tp_version_info_dump(struct buffer *b,
+ const struct tp_version_information *tp, int local)
+{
+ if (!tp->chosen)
+ return;
+
+ chunk_appendf(b, " versions:chosen=0x%08x", tp->chosen);
+ if (tp->negotiated_version)
+ chunk_appendf(b, ",negotiated=0x%08x", tp->negotiated_version->num);
+}
+
+static inline void quic_transport_params_dump(struct buffer *b,
+ const struct quic_conn *qc,
+ const struct quic_transport_params *p)
+{
+ int local = p == &qc->rx.params;
+
+ if (p->original_destination_connection_id.len) {
+ chunk_appendf(b, " odcid=");
+ quic_tp_cid_dump(b, &p->original_destination_connection_id);
+ }
+ chunk_appendf(b, " iscid=");
+ quic_tp_cid_dump(b, &p->initial_source_connection_id);
+ if (p->retry_source_connection_id.len) {
+ chunk_appendf(b, " rscid=");
+ quic_tp_cid_dump(b, &p->retry_source_connection_id);
+ }
+ chunk_appendf(b, "\n");
+
+ chunk_appendf(b, " midle_timeout=%llums", (ull)p->max_idle_timeout);
+ chunk_appendf(b, " mudp_payload_sz=%llu", (ull)p->max_udp_payload_size);
+ chunk_appendf(b, " ack_delay_exp=%llu", (ull)p->ack_delay_exponent);
+ chunk_appendf(b, " mack_delay=%llums", (ull)p->max_ack_delay);
+ chunk_appendf(b, " act_cid_limit=%llu\n", (ull)p->active_connection_id_limit);
+
+ chunk_appendf(b, " md=%llu", (ull)p->initial_max_data);
+ chunk_appendf(b, " msd_bidi_l=%llu",
+ (ull)p->initial_max_stream_data_bidi_local);
+ chunk_appendf(b, " msd_bidi_r=%llu",
+ (ull)p->initial_max_stream_data_bidi_remote);
+ chunk_appendf(b, " msd_uni=%llu",
+ (ull)p->initial_max_stream_data_uni);
+ chunk_appendf(b, " ms_bidi=%llu", (ull)p->initial_max_streams_bidi);
+ chunk_appendf(b, " ms_uni=%llu\n", (ull)p->initial_max_streams_uni);
+
+ if (p->disable_active_migration || p->with_stateless_reset_token) {
+ int prev = 0;
+
+ chunk_appendf(b, " (");
+ if (p->disable_active_migration) {
+ if (prev)
+ chunk_appendf(b, ",");
+ prev = 1;
+ chunk_appendf(b, "no_act_migr");
+ }
+ if (p->with_stateless_reset_token) {
+ if (prev)
+ chunk_appendf(b, ",");
+ prev = 1;
+ chunk_appendf(b, "stless_rst_tok");
+ }
+ chunk_appendf(b, ")");
+ }
+
+ if (p->with_preferred_address) {
+ char bufaddr[INET6_ADDRSTRLEN];
+ chunk_appendf(b, " pref_addr=");
+ inet_ntop(AF_INET, &p->preferred_address.ipv4_addr,
+ bufaddr, sizeof(bufaddr));
+ chunk_appendf(b, "%s:%hu ", bufaddr, p->preferred_address.ipv4_port);
+
+ inet_ntop(AF_INET6, &p->preferred_address.ipv6_addr,
+ bufaddr, sizeof(bufaddr));
+ chunk_appendf(b, "[%s]:%hu ", bufaddr, p->preferred_address.ipv6_port);
+ quic_tp_cid_dump(b, &p->preferred_address.cid);
+ chunk_appendf(b, "\n");
+ }
+
+ quic_tp_version_info_dump(b, &p->version_information, local);
+}
+
+#endif /* USE_QUIC */
+#endif /* _HAPROXY_QUIC_TP_H */
diff --git a/include/haproxy/quic_trace-t.h b/include/haproxy/quic_trace-t.h
new file mode 100644
index 0000000..7ebc8a7
--- /dev/null
+++ b/include/haproxy/quic_trace-t.h
@@ -0,0 +1,103 @@
+/*
+ * include/haproxy/quic_trace-t.h
+ * Definitions for QUIC traces internal types, constants and flags.
+ *
+ * Copyright (C) 2023
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#ifndef _HAPROXY_QUIC_TRACE_T_H
+#define _HAPROXY_QUIC_TRACE_T_H
+
+#include <haproxy/quic_tls-t.h>
+#include <haproxy/trace-t.h>
+
+extern struct trace_source trace_quic;
+
+/* Used only for QUIC TLS key phase traces */
+struct quic_kp_trace {
+ const unsigned char *rx_sec;
+ size_t rx_seclen;
+ const struct quic_tls_kp *rx;
+ const unsigned char *tx_sec;
+ size_t tx_seclen;
+ const struct quic_tls_kp *tx;
+};
+
+/* Only for debug purpose */
+struct enc_debug_info {
+ unsigned char *payload;
+ size_t payload_len;
+ unsigned char *aad;
+ size_t aad_len;
+ uint64_t pn;
+};
+
+/* Structure to store enough information about the RX CRYPTO frames. */
+struct quic_rx_crypto_frm {
+ struct eb64_node offset_node;
+ uint64_t len;
+ const unsigned char *data;
+ struct quic_rx_packet *pkt;
+};
+
+#define QUIC_EV_CONN_NEW (1ULL << 0)
+#define QUIC_EV_CONN_INIT (1ULL << 1)
+#define QUIC_EV_CONN_ISEC (1ULL << 2)
+#define QUIC_EV_CONN_RSEC (1ULL << 3)
+#define QUIC_EV_CONN_WSEC (1ULL << 4)
+#define QUIC_EV_CONN_RWSEC (1ULL << 5)
+#define QUIC_EV_CONN_LPKT (1ULL << 6)
+#define QUIC_EV_CONN_SPKT (1ULL << 7)
+#define QUIC_EV_CONN_ENCPKT (1ULL << 8)
+#define QUIC_EV_CONN_TXPKT (1ULL << 9)
+#define QUIC_EV_CONN_PAPKT (1ULL << 10)
+#define QUIC_EV_CONN_PAPKTS (1ULL << 11)
+#define QUIC_EV_CONN_IO_CB (1ULL << 12)
+#define QUIC_EV_CONN_RMHP (1ULL << 13)
+#define QUIC_EV_CONN_PRSHPKT (1ULL << 14)
+#define QUIC_EV_CONN_PRSAPKT (1ULL << 15)
+#define QUIC_EV_CONN_PRSFRM (1ULL << 16)
+#define QUIC_EV_CONN_PRSAFRM (1ULL << 17)
+#define QUIC_EV_CONN_BFRM (1ULL << 18)
+#define QUIC_EV_CONN_PHPKTS (1ULL << 19)
+#define QUIC_EV_CONN_TRMHP (1ULL << 20)
+#define QUIC_EV_CONN_ELRMHP (1ULL << 21)
+#define QUIC_EV_CONN_RXPKT (1ULL << 22)
+#define QUIC_EV_CONN_SSLDATA (1ULL << 23)
+#define QUIC_EV_CONN_RXCDATA (1ULL << 24)
+#define QUIC_EV_CONN_ADDDATA (1ULL << 25)
+#define QUIC_EV_CONN_FFLIGHT (1ULL << 26)
+#define QUIC_EV_CONN_SSLALERT (1ULL << 27)
+#define QUIC_EV_CONN_PSTRM (1ULL << 28)
+#define QUIC_EV_CONN_RTTUPDT (1ULL << 29)
+#define QUIC_EV_CONN_CC (1ULL << 30)
+#define QUIC_EV_CONN_SPPKTS (1ULL << 31)
+#define QUIC_EV_CONN_PKTLOSS (1ULL << 32)
+#define QUIC_EV_CONN_STIMER (1ULL << 33)
+#define QUIC_EV_CONN_PTIMER (1ULL << 34)
+#define QUIC_EV_CONN_SPTO (1ULL << 35)
+#define QUIC_EV_CONN_BCFRMS (1ULL << 36)
+#define QUIC_EV_CONN_XPRTSEND (1ULL << 37)
+#define QUIC_EV_CONN_XPRTRECV (1ULL << 38)
+#define QUIC_EV_CONN_FREED (1ULL << 39)
+#define QUIC_EV_CONN_CLOSE (1ULL << 40)
+#define QUIC_EV_CONN_ACKSTRM (1ULL << 41)
+#define QUIC_EV_CONN_FRMLIST (1ULL << 42)
+#define QUIC_EV_STATELESS_RST (1ULL << 43)
+#define QUIC_EV_TRANSP_PARAMS (1ULL << 44)
+#define QUIC_EV_CONN_IDLE_TIMER (1ULL << 45)
+#define QUIC_EV_CONN_SUB (1ULL << 46)
+#define QUIC_EV_CONN_ELEVELSEL (1ULL << 47)
+#define QUIC_EV_CONN_RCV (1ULL << 48)
+#define QUIC_EV_CONN_KILL (1ULL << 49)
+#define QUIC_EV_CONN_KP (1ULL << 50)
+#define QUIC_EV_CONN_SSL_COMPAT (1ULL << 51)
+#define QUIC_EV_CONN_SET_AFFINITY (1ULL << 52)
+
+#endif /* _HAPROXY_QUIC_TRACE_T_H */
diff --git a/include/haproxy/quic_trace.h b/include/haproxy/quic_trace.h
new file mode 100644
index 0000000..19fe864
--- /dev/null
+++ b/include/haproxy/quic_trace.h
@@ -0,0 +1,40 @@
+/*
+ * include/haproxy/quic_trace.h
+ * This file contains QUIC traces definitions.
+ *
+ * Copyright (C) 2023
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#ifndef _HAPROXY_QUIC_TRACE_H
+#define _HAPROXY_QUIC_TRACE_H
+
+#include <haproxy/quic_trace-t.h>
+
+#define TRACE_SOURCE &trace_quic
+
+/* Initializes a enc_debug_info struct (only for debug purpose) */
+static inline void enc_debug_info_init(struct enc_debug_info *edi,
+ unsigned char *payload, size_t payload_len,
+ unsigned char *aad, size_t aad_len, uint64_t pn)
+{
+ edi->payload = payload;
+ edi->payload_len = payload_len;
+ edi->aad = aad;
+ edi->aad_len = aad_len;
+ edi->pn = pn;
+}
+
+#endif /* _HAPROXY_QUIC_TRACE_H */
diff --git a/include/haproxy/quic_tx-t.h b/include/haproxy/quic_tx-t.h
new file mode 100644
index 0000000..4653f04
--- /dev/null
+++ b/include/haproxy/quic_tx-t.h
@@ -0,0 +1,56 @@
+#ifndef _HAPROXY_TX_T_H
+#define _HAPROXY_TX_T_H
+
+#define QUIC_MIN_CC_PKTSIZE 128
+#define QUIC_DGRAM_HEADLEN (sizeof(uint16_t) + sizeof(void *))
+#define QUIC_MAX_CC_BUFSIZE (2 * (QUIC_MIN_CC_PKTSIZE + QUIC_DGRAM_HEADLEN))
+
+extern struct pool_head *pool_head_quic_tx_packet;
+extern struct pool_head *pool_head_quic_cc_buf;
+
+/* Flag a sent packet as being an ack-eliciting packet. */
+#define QUIC_FL_TX_PACKET_ACK_ELICITING (1UL << 0)
+/* Flag a sent packet as containing a PADDING frame. */
+#define QUIC_FL_TX_PACKET_PADDING (1UL << 1)
+/* Flag a sent packet as being in flight. */
+#define QUIC_FL_TX_PACKET_IN_FLIGHT (QUIC_FL_TX_PACKET_ACK_ELICITING | QUIC_FL_TX_PACKET_PADDING)
+/* Flag a sent packet as containing a CONNECTION_CLOSE frame */
+#define QUIC_FL_TX_PACKET_CC (1UL << 2)
+/* Flag a sent packet as containing an ACK frame */
+#define QUIC_FL_TX_PACKET_ACK (1UL << 3)
+/* Flag a sent packet as being coalesced to another one in the same datagram */
+#define QUIC_FL_TX_PACKET_COALESCED (1UL << 4)
+/* Flag a sent packet as being probing with old data */
+#define QUIC_FL_TX_PACKET_PROBE_WITH_OLD_DATA (1UL << 5)
+
+/* Structure to store enough information about TX QUIC packets. */
+struct quic_tx_packet {
+ /* List entry point. */
+ struct list list;
+ /* Packet length */
+ size_t len;
+ /* This is not the packet length but the length of outstanding data
+ * for in flight TX packet.
+ */
+ size_t in_flight_len;
+ struct eb64_node pn_node;
+ /* The list of frames of this packet. */
+ struct list frms;
+ /* The time this packet was sent (ms). */
+ unsigned int time_sent;
+ /* Packet number spakce. */
+ struct quic_pktns *pktns;
+ /* Flags. */
+ unsigned int flags;
+ /* Reference counter */
+ int refcnt;
+ /* Next packet in the same datagram */
+ struct quic_tx_packet *next;
+ /* Previous packet in the same datagram */
+ struct quic_tx_packet *prev;
+ /* Largest acknowledged packet number if this packet contains an ACK frame */
+ int64_t largest_acked_pn;
+ unsigned char type;
+};
+
+#endif /* _HAPROXY_TX_T_H */
diff --git a/include/haproxy/quic_tx.h b/include/haproxy/quic_tx.h
new file mode 100644
index 0000000..0659c14
--- /dev/null
+++ b/include/haproxy/quic_tx.h
@@ -0,0 +1,92 @@
+/*
+ * QUIC protocol definitions (TX side).
+ *
+ * Copyright (C) 2023
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_QUIC_TX_H
+#define _HAPROXY_QUIC_TX_H
+
+#include <haproxy/buf-t.h>
+#include <haproxy/list-t.h>
+#include <haproxy/quic_conn-t.h>
+#include <haproxy/quic_tls-t.h>
+#include <haproxy/quic_rx-t.h>
+#include <haproxy/quic_tx-t.h>
+
+struct buffer *qc_txb_alloc(struct quic_conn *qc);
+void qc_txb_release(struct quic_conn *qc);
+int qc_purge_txbuf(struct quic_conn *qc, struct buffer *buf);
+struct buffer *qc_get_txb(struct quic_conn *qc);
+
+int qc_prep_hpkts(struct quic_conn *qc, struct buffer *buf, struct list *qels);
+int qc_send_ppkts(struct buffer *buf, struct ssl_sock_ctx *ctx);
+int qc_send_app_pkts(struct quic_conn *qc, struct list *frms);
+int qc_dgrams_retransmit(struct quic_conn *qc);
+void qc_prep_hdshk_fast_retrans(struct quic_conn *qc,
+ struct list *ifrms, struct list *hfrms);
+int send_retry(int fd, struct sockaddr_storage *addr,
+ struct quic_rx_packet *pkt, const struct quic_version *qv);
+int send_stateless_reset(struct listener *l, struct sockaddr_storage *dstaddr,
+ struct quic_rx_packet *rxpkt);
+int send_version_negotiation(int fd, struct sockaddr_storage *addr,
+ struct quic_rx_packet *pkt);
+
+/* The TX packets sent in the same datagram are linked to each others in
+ * the order they are built. This function detach a packet from its successor
+ * and predecessor in the same datagram.
+ */
+static inline void quic_tx_packet_dgram_detach(struct quic_tx_packet *pkt)
+{
+ if (pkt->prev)
+ pkt->prev->next = pkt->next;
+ if (pkt->next)
+ pkt->next->prev = pkt->prev;
+}
+
+
+/* Increment the reference counter of <pkt> */
+static inline void quic_tx_packet_refinc(struct quic_tx_packet *pkt)
+{
+ pkt->refcnt++;
+}
+
+/* Decrement the reference counter of <pkt> */
+static inline void quic_tx_packet_refdec(struct quic_tx_packet *pkt)
+{
+ if (--pkt->refcnt == 0) {
+ BUG_ON(!LIST_ISEMPTY(&pkt->frms));
+ /* If there are others packet in the same datagram <pkt> is attached to,
+ * detach the previous one and the next one from <pkt>.
+ */
+ quic_tx_packet_dgram_detach(pkt);
+ pool_free(pool_head_quic_tx_packet, pkt);
+ }
+}
+
+/* Return the number of bytes which may be sent from <qc> connection when
+ * it has not already been validated. Note that this is the responsability
+ * of the caller to check that the case with quic_peer_validated_addr().
+ * This latter BUG_ON() if 3 * qc->rx.bytes < qc->tx.prep_bytes.
+ */
+static inline size_t quic_may_send_bytes(struct quic_conn *qc)
+{
+ return 3 * qc->bytes.rx - qc->bytes.prep;
+}
+
+
+#endif /* _HAPROXY_QUIC_TX_H */
diff --git a/include/haproxy/receiver-t.h b/include/haproxy/receiver-t.h
new file mode 100644
index 0000000..0ae441e
--- /dev/null
+++ b/include/haproxy/receiver-t.h
@@ -0,0 +1,106 @@
+/*
+ * include/haproxy/receiver-t.h
+ * This file defines the structures needed to manage receivers.
+ *
+ * Copyright (C) 2000-2020 Willy Tarreau - w@1wt.eu
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_RECEIVER_T_H
+#define _HAPROXY_RECEIVER_T_H
+
+#include <sys/types.h>
+#include <sys/socket.h>
+
+#include <haproxy/api-t.h>
+#include <haproxy/namespace-t.h>
+#include <haproxy/proto_rhttp-t.h>
+#include <haproxy/quic_sock-t.h>
+#include <haproxy/thread.h>
+
+/* Bit values for receiver->flags */
+#define RX_F_BOUND 0x00000001 /* receiver already bound */
+#define RX_F_INHERITED 0x00000002 /* inherited FD from the parent process (fd@) or duped from another local receiver */
+#define RX_F_MWORKER 0x00000004 /* keep the FD open in the master but close it in the children */
+#define RX_F_MUST_DUP 0x00000008 /* this receiver's fd must be dup() from a reference; ignore socket-level ops here */
+#define RX_F_NON_SUSPENDABLE 0x00000010 /* this socket cannot be suspended hence must always be unbound */
+
+/* Bit values for rx_settings->options */
+#define RX_O_FOREIGN 0x00000001 /* receives on foreign addresses */
+#define RX_O_V4V6 0x00000002 /* binds to both IPv4 and IPv6 addresses if !V6ONLY */
+#define RX_O_V6ONLY 0x00000004 /* binds to IPv6 addresses only */
+
+/* All the settings that are used to configure a receiver */
+struct rx_settings {
+ struct { /* UNIX socket permissions */
+ uid_t uid; /* -1 to leave unchanged */
+ gid_t gid; /* -1 to leave unchanged */
+ mode_t mode; /* 0 to leave unchanged */
+ } ux;
+ char *interface; /* interface name or NULL */
+ const struct netns_entry *netns; /* network namespace of the listener*/
+ unsigned int options; /* receiver options (RX_O_*) */
+ int shards; /* number of shards, 0=not set yet, -1="by-thread" */
+};
+
+/* info about a shard that is shared between multiple groups. Receivers that
+ * are alone in their shard do not have a shard_info.
+ */
+struct shard_info {
+ uint nbgroups; /* number of groups in this shard (=#rx); Zero = unused. */
+ uint nbthreads; /* number of threads in this shard (>=nbgroups) */
+ ulong tgroup_mask; /* bitmask of thread groups having a member here */
+ struct receiver *ref; /* first one, reference for FDs to duplicate */
+ struct receiver *members[MAX_TGROUPS]; /* all members of the shard (one per thread group) */
+};
+
+/* This describes a receiver with all its characteristics (address, options, etc) */
+struct receiver {
+ int fd; /* handle we receive from (fd only for now) */
+ unsigned int flags; /* receiver options (RX_F_*) */
+ struct protocol *proto; /* protocol this receiver belongs to */
+ void *owner; /* receiver's owner (usually a listener) */
+ void (*iocb)(int fd); /* generic I/O handler (typically accept callback) */
+ unsigned long bind_thread; /* bitmask of threads allowed on this receiver */
+ uint bind_tgroup; /* thread group ID: 0=global IDs, non-zero=local IDs */
+ struct rx_settings *settings; /* points to the settings used by this receiver */
+ struct shard_info *shard_info; /* points to info about the owning shard, NULL if single rx */
+ struct list proto_list; /* list in the protocol header */
+#ifdef USE_QUIC
+ struct mt_list rxbuf_list; /* list of buffers to receive and dispatch QUIC datagrams. */
+ enum quic_sock_mode quic_mode; /* QUIC socket allocation strategy */
+ unsigned int quic_curr_handshake; /* count of active QUIC handshakes */
+ unsigned int quic_curr_accept; /* count of QUIC conns waiting for accept */
+#endif
+ struct {
+ struct task *task; /* Task used to open connection for reverse. */
+ struct server *srv; /* Underlying server used to initiate reverse pre-connect. */
+ struct connection *pend_conn; /* Pending connection waiting to complete reversal before being accepted. */
+ enum li_preconn_state state; /* State for transition logging. */
+ } rhttp;
+
+ /* warning: this struct is huge, keep it at the bottom */
+ struct sockaddr_storage addr; /* the address the socket is bound to */
+};
+
+#endif /* _HAPROXY_RECEIVER_T_H */
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/include/haproxy/regex-t.h b/include/haproxy/regex-t.h
new file mode 100644
index 0000000..33d88a2
--- /dev/null
+++ b/include/haproxy/regex-t.h
@@ -0,0 +1,78 @@
+/*
+ * include/haproxy/regex-t.h
+ * Types and macros definitions for regular expressions
+ *
+ * Copyright (C) 2000-2020 Willy Tarreau - w@1wt.eu
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_REGEX_T_H
+#define _HAPROXY_REGEX_T_H
+
+#include <stdlib.h>
+#include <string.h>
+
+#include <haproxy/api.h>
+
+#ifdef USE_PCRE
+#include <pcre.h>
+#include <pcreposix.h>
+
+/* For pre-8.20 PCRE compatibility */
+#ifndef PCRE_STUDY_JIT_COMPILE
+#define PCRE_STUDY_JIT_COMPILE 0
+#endif
+
+#elif defined(USE_PCRE2)
+#include <pcre2.h>
+#include <pcre2posix.h>
+
+#else /* no PCRE, nor PCRE2 */
+#include <regex.h>
+#endif
+
+struct my_regex {
+#ifdef USE_PCRE
+ pcre *reg;
+ pcre_extra *extra;
+#ifdef USE_PCRE_JIT
+#ifndef PCRE_CONFIG_JIT
+#error "The PCRE lib doesn't support JIT. Change your lib, or remove the option USE_PCRE_JIT."
+#endif
+#endif
+#elif defined(USE_PCRE2)
+ int(*mfn)(const pcre2_code *, PCRE2_SPTR, PCRE2_SIZE, PCRE2_SIZE, uint32_t, pcre2_match_data *, pcre2_match_context *);
+ pcre2_code *reg;
+#else /* no PCRE */
+ regex_t regex;
+#endif
+};
+
+struct hdr_exp {
+ struct hdr_exp *next;
+ struct my_regex *preg; /* expression to look for */
+ const char *replace; /* expression to set instead */
+ void *cond; /* a possible condition or NULL */
+};
+
+#endif /* _HAPROXY_REGEX_T_H */
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/include/haproxy/regex.h b/include/haproxy/regex.h
new file mode 100644
index 0000000..2cd9573
--- /dev/null
+++ b/include/haproxy/regex.h
@@ -0,0 +1,144 @@
+/*
+ * include/haproxy/regex.h
+ * Compatibility layer for various regular expression engines
+ *
+ * Copyright (C) 2000-2020 Willy Tarreau - w@1wt.eu
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_REGEX_H
+#define _HAPROXY_REGEX_H
+
+#include <stdlib.h>
+#include <string.h>
+
+#include <haproxy/api.h>
+#include <haproxy/regex-t.h>
+
+extern THREAD_LOCAL regmatch_t pmatch[MAX_MATCH];
+
+/* "str" is the string that contain the regex to compile.
+ * "regex" is preallocated memory. After the execution of this function, this
+ * struct contain the compiled regex.
+ * "cs" is the case sensitive flag. If cs is true, case sensitive is enabled.
+ * "cap" is capture flag. If cap if true the regex can capture into
+ * parenthesis strings.
+ * "err" is the standard error message pointer.
+ *
+ * The function return 1 is success case, else return 0 and err is filled.
+ */
+struct my_regex *regex_comp(const char *str, int cs, int cap, char **err);
+int exp_replace(char *dst, unsigned int dst_size, char *src, const char *str, const regmatch_t *matches);
+const char *check_replace_string(const char *str);
+int regex_exec_match(const struct my_regex *preg, const char *subject,
+ size_t nmatch, regmatch_t pmatch[], int flags);
+int regex_exec_match2(const struct my_regex *preg, char *subject, int length,
+ size_t nmatch, regmatch_t pmatch[], int flags);
+
+
+/* If the function doesn't match, it returns false, else it returns true.
+ */
+static inline int regex_exec(const struct my_regex *preg, char *subject)
+{
+#if defined(USE_PCRE) || defined(USE_PCRE_JIT)
+ if (pcre_exec(preg->reg, preg->extra, subject, strlen(subject), 0, 0, NULL, 0) < 0)
+ return 0;
+ return 1;
+#elif defined(USE_PCRE2)
+ pcre2_match_data *pm;
+ int ret;
+
+ pm = pcre2_match_data_create_from_pattern(preg->reg, NULL);
+ ret = preg->mfn(preg->reg, (PCRE2_SPTR)subject, (PCRE2_SIZE)strlen(subject),
+ 0, 0, pm, NULL);
+ pcre2_match_data_free(pm);
+ if (ret < 0)
+ return 0;
+ return 1;
+#else
+ int match;
+ match = regexec(&preg->regex, subject, 0, NULL, 0);
+ if (match == REG_NOMATCH)
+ return 0;
+ return 1;
+#endif
+}
+
+/* Note that <subject> MUST be at least <length+1> characters long and must
+ * be writable because the function will temporarily force a zero past the
+ * last character.
+ *
+ * If the function doesn't match, it returns false, else it returns true.
+ */
+static inline int regex_exec2(const struct my_regex *preg, char *subject, int length)
+{
+#if defined(USE_PCRE) || defined(USE_PCRE_JIT)
+ if (pcre_exec(preg->reg, preg->extra, subject, length, 0, 0, NULL, 0) < 0)
+ return 0;
+ return 1;
+#elif defined(USE_PCRE2)
+ pcre2_match_data *pm;
+ int ret;
+
+ pm = pcre2_match_data_create_from_pattern(preg->reg, NULL);
+ ret = preg->mfn(preg->reg, (PCRE2_SPTR)subject, (PCRE2_SIZE)length,
+ 0, 0, pm, NULL);
+ pcre2_match_data_free(pm);
+ if (ret < 0)
+ return 0;
+ return 1;
+#else
+ int match;
+ char old_char = subject[length];
+ subject[length] = 0;
+ match = regexec(&preg->regex, subject, 0, NULL, 0);
+ subject[length] = old_char;
+ if (match == REG_NOMATCH)
+ return 0;
+ return 1;
+#endif
+}
+
+static inline void regex_free(struct my_regex *preg)
+{
+ if (!preg)
+ return;
+#if defined(USE_PCRE) || defined(USE_PCRE_JIT)
+ pcre_free(preg->reg);
+/* PCRE < 8.20 requires pcre_free() while >= 8.20 requires pcre_study_free(),
+ * which is easily detected using PCRE_CONFIG_JIT.
+ */
+#ifdef PCRE_CONFIG_JIT
+ pcre_free_study(preg->extra);
+#else /* PCRE_CONFIG_JIT */
+ pcre_free(preg->extra);
+#endif /* PCRE_CONFIG_JIT */
+#elif defined(USE_PCRE2) || defined(USE_PCRE2_JIT)
+ pcre2_code_free(preg->reg);
+#else
+ regfree(&preg->regex);
+#endif
+ free(preg);
+}
+
+#endif /* _HAPROXY_REGEX_H */
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/include/haproxy/resolvers-t.h b/include/haproxy/resolvers-t.h
new file mode 100644
index 0000000..b727463
--- /dev/null
+++ b/include/haproxy/resolvers-t.h
@@ -0,0 +1,297 @@
+/*
+ * include/haproxy/dns-t.h
+ * This file provides structures and types for DNS.
+ *
+ * Copyright (C) 2014 Baptiste Assmann <bedis9@gmail.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_RESOLVERS_T_H
+#define _HAPROXY_RESOLVERS_T_H
+
+#include <import/ebtree-t.h>
+
+#include <haproxy/connection-t.h>
+#include <haproxy/dns-t.h>
+#include <haproxy/obj_type-t.h>
+#include <haproxy/stats-t.h>
+#include <haproxy/task-t.h>
+#include <haproxy/thread.h>
+
+extern struct pool_head *resolv_requester_pool;
+
+/*DNS maximum values */
+/*
+ * Maximum issued from RFC:
+ * RFC 1035: https://www.ietf.org/rfc/rfc1035.txt chapter 2.3.4
+ * RFC 2671: http://tools.ietf.org/html/rfc2671
+ */
+#define DNS_MAX_LABEL_SIZE 63
+#define DNS_MAX_NAME_SIZE 255
+#define DNS_MAX_UDP_MESSAGE 65535
+
+/* DNS minimum record size: 1 char + 1 NULL + type + class */
+#define DNS_MIN_RECORD_SIZE (1 + 1 + 2 + 2)
+
+/* DNS smallest fqdn 'a.gl' size */
+# define DNS_SMALLEST_FQDN_SIZE 4
+
+/* maximum number of query records in a DNS response
+ * For now, we allow only one */
+#define DNS_MAX_QUERY_RECORDS 1
+
+/* maximum number of answer record in a DNS response */
+#define DNS_MAX_ANSWER_RECORDS ((DNS_MAX_UDP_MESSAGE - DNS_HEADER_SIZE) / DNS_MIN_RECORD_SIZE)
+
+/* size of dns_buffer used to store responses from the buffer
+ * dns_buffer is used to store data collected from records found in a response.
+ * Before using it, caller will always check that there is at least DNS_MAX_NAME_SIZE bytes
+ * available */
+#define DNS_ANALYZE_BUFFER_SIZE DNS_MAX_UDP_MESSAGE + DNS_MAX_NAME_SIZE
+
+/* DNS error messages */
+#define DNS_TOO_LONG_FQDN "hostname too long"
+#define DNS_LABEL_TOO_LONG "one label too long"
+#define DNS_INVALID_CHARACTER "found an invalid character"
+
+/* dns query class */
+#define DNS_RCLASS_IN 1 /* internet class */
+
+/* dns record types (non exhaustive list) */
+#define DNS_RTYPE_A 1 /* IPv4 address */
+#define DNS_RTYPE_CNAME 5 /* canonical name */
+#define DNS_RTYPE_AAAA 28 /* IPv6 address */
+#define DNS_RTYPE_SRV 33 /* SRV record */
+#define DNS_RTYPE_OPT 41 /* OPT */
+#define DNS_RTYPE_ANY 255 /* all records */
+
+/* dns rcode values */
+#define DNS_RCODE_NO_ERROR 0 /* no error */
+#define DNS_RCODE_NX_DOMAIN 3 /* non existent domain */
+#define DNS_RCODE_REFUSED 5 /* query refused */
+
+/* dns flags masks */
+#define DNS_FLAG_TRUNCATED 0x0200 /* mask for truncated flag */
+#define DNS_FLAG_REPLYCODE 0x000F /* mask for reply code */
+
+/* max number of network preference entries are available from the
+ * configuration file.
+ */
+#define SRV_MAX_PREF_NET 5
+
+/* NOTE: big endian structure */
+struct resolv_query_item {
+ char name[DNS_MAX_NAME_SIZE+1]; /* query name */
+ unsigned short type; /* question type */
+ unsigned short class; /* query class */
+};
+
+/* NOTE: big endian structure */
+struct resolv_answer_item {
+ /*For SRV type, name also includes service and protocol value */
+ char name[DNS_MAX_NAME_SIZE+1]; /* answer name */
+ int16_t type; /* question type */
+ int16_t class; /* query class */
+ int32_t ttl; /* response TTL */
+ int16_t priority; /* SRV type priority */
+ uint16_t weight; /* SRV type weight */
+ uint16_t port; /* SRV type port */
+ uint16_t data_len; /* number of bytes in the <data> field below */
+ struct eb32_node link; /* linking node */
+ union {
+ struct sockaddr_in in4; /* IPv4 address for RTYPE_A */
+ struct sockaddr_in6 in6; /* IPv6 address for RTYPE_AAAA */
+ char target[DNS_MAX_NAME_SIZE+1]; /* Response data: SRV or CNAME type target */
+ } data;
+ unsigned int last_seen; /* When was the answer was last seen */
+ struct resolv_answer_item *ar_item; /* pointer to a RRset from the additional section, if exists */
+ struct list attached_servers; /* attached server head */
+};
+
+struct resolv_response {
+ struct dns_header header;
+ struct eb_root answer_tree;
+ /* authority ignored for now */
+};
+
+/* Resolvers section and parameters. It is linked to the name servers
+ * servers points to it.
+ * current resolution are stored in a FIFO list.
+ */
+struct resolvers {
+ __decl_thread(HA_SPINLOCK_T lock);
+ unsigned int accepted_payload_size; /* maximum payload size we accept for responses */
+ int nb_nameservers; /* total number of active nameservers in a resolvers section */
+ int resolve_retries; /* number of retries before giving up */
+ struct { /* time to: */
+ int resolve; /* wait between 2 queries for the same resolution */
+ int retry; /* wait for a response before retrying */
+ } timeout;
+ struct { /* time to hold current data when */
+ int valid; /* a response is valid */
+ int nx; /* a response doesn't exist */
+ int timeout; /* no answer was delivered */
+ int refused; /* dns server refused to answer */
+ int other; /* other dns response errors */
+ int obsolete; /* an answer hasn't been seen */
+ } hold;
+ struct task *t; /* timeout management */
+ struct {
+ struct list wait; /* resolutions managed to this resolvers section */
+ struct list curr; /* current running resolutions */
+ } resolutions;
+ struct eb_root query_ids; /* tree to quickly lookup/retrieve query ids currently in use
+ * used by each nameserver, but stored in resolvers since there must
+ * be a unique relation between an eb_root and an eb_node (resolution) */
+ struct list list; /* resolvers list */
+ struct list nameservers; /* dns server list */
+ struct proxy *px; /* px to handle connections to DNS servers */
+ char *id; /* resolvers unique identifier */
+ struct {
+ const char *file; /* file where the section appears */
+ int line; /* line where the section appears */
+ int implicit; /* config was auto-generated and must be silent */
+ } conf; /* config information */
+};
+
+struct resolv_options {
+ int family_prio; /* which IP family should the resolver use when both are returned */
+ struct {
+ int family;
+ union {
+ struct in_addr in4;
+ struct in6_addr in6;
+ } addr;
+ union {
+ struct in_addr in4;
+ struct in6_addr in6;
+ } mask;
+ } pref_net[SRV_MAX_PREF_NET];
+ int pref_net_nb; /* The number of registered preferred networks. */
+ int accept_duplicate_ip; /* flag to indicate whether the associated object can use an IP address
+ already set to an other object of the same group */
+ int ignore_weight; /* flag to indicate whether to ignore the weight within the record */
+};
+
+/* Resolution structure associated to single server and used to manage name
+ * resolution for this server.
+ * The only link between the resolution and a nameserver is through the
+ * query_id.
+ */
+struct resolv_resolution {
+ struct resolvers *resolvers; /* pointer to the resolvers structure owning the resolution */
+ struct list requesters; /* list of requesters using this resolution */
+ int uuid; /* unique id (used for debugging purpose) */
+ char *hostname_dn; /* server hostname in domain name label format */
+ int hostname_dn_len; /* server domain name label len */
+ unsigned int last_resolution; /* time of the last resolution */
+ unsigned int last_query; /* time of the last query sent */
+ unsigned int last_valid; /* time of the last valid response */
+ int query_id; /* DNS query ID dedicated for this resolution */
+ struct eb32_node qid; /* ebtree query id */
+ int prefered_query_type; /* preferred query type */
+ int query_type; /* current query type */
+ int status; /* status of the resolution being processed RSLV_STATUS_* */
+ int step; /* RSLV_STEP_* */
+ int try; /* current resolution try */
+ int nb_queries; /* count number of queries sent */
+ int nb_responses; /* count number of responses received */
+
+ struct resolv_response response; /* structure hosting the DNS response */
+ struct resolv_query_item response_query_records[DNS_MAX_QUERY_RECORDS]; /* <response> query records */
+
+ struct list list; /* resolution list */
+};
+
+/* Structure used to describe the owner of a DNS resolution. */
+struct resolv_requester {
+ enum obj_type *owner; /* pointer to the owner (server or dns_srvrq) */
+ struct resolv_resolution *resolution; /* pointer to the owned DNS resolution */
+
+ int (*requester_cb)(struct resolv_requester *, struct dns_counters *); /* requester callback for valid response */
+ int (*requester_error_cb)(struct resolv_requester *, int); /* requester callback, for error management */
+
+ struct list list; /* requester list */
+};
+
+/* Last resolution status code */
+enum {
+ RSLV_STATUS_NONE = 0, /* no resolution occurred yet */
+ RSLV_STATUS_VALID, /* no error */
+ RSLV_STATUS_INVALID, /* invalid responses */
+ RSLV_STATUS_ERROR, /* error */
+ RSLV_STATUS_NX, /* NXDOMAIN */
+ RSLV_STATUS_REFUSED, /* server refused our query */
+ RSLV_STATUS_TIMEOUT, /* no response from DNS servers */
+ RSLV_STATUS_OTHER, /* other errors */
+};
+
+/* Current resolution step */
+enum {
+ RSLV_STEP_NONE = 0, /* nothing happening currently */
+ RSLV_STEP_RUNNING, /* resolution is running */
+};
+
+/* Return codes after analyzing a DNS response */
+enum {
+ RSLV_RESP_VALID = 0, /* valid response */
+ RSLV_RESP_INVALID, /* invalid response (various type of errors can trigger it) */
+ RSLV_RESP_ERROR, /* DNS error code */
+ RSLV_RESP_NX_DOMAIN, /* resolution unsuccessful */
+ RSLV_RESP_REFUSED, /* DNS server refused to answer */
+ RSLV_RESP_ANCOUNT_ZERO, /* no answers in the response */
+ RSLV_RESP_WRONG_NAME, /* response does not match query name */
+ RSLV_RESP_CNAME_ERROR, /* error when resolving a CNAME in an atomic response */
+ RSLV_RESP_TIMEOUT, /* DNS server has not answered in time */
+ RSLV_RESP_TRUNCATED, /* DNS response is truncated */
+ RSLV_RESP_NO_EXPECTED_RECORD, /* No expected records were found in the response */
+ RSLV_RESP_QUERY_COUNT_ERROR, /* we did not get the expected number of queries in the response */
+ RSLV_RESP_INTERNAL, /* internal resolver error */
+};
+
+/* Return codes after searching an IP in a DNS response buffer, using a family
+ * preference
+ */
+enum {
+ RSLV_UPD_NO = 1, /* provided IP was found and preference is matched
+ * OR provided IP found and preference is not matched, but no IP
+ * matching preference was found.
+ */
+ RSLV_UPD_SRVIP_NOT_FOUND, /* provided IP not found
+ * OR provided IP found and preference is not match and an IP
+ * matching preference was found.
+ */
+ RSLV_UPD_CNAME, /* CNAME without any IP provided in the response */
+ RSLV_UPD_NAME_ERROR, /* name in the response did not match the query */
+ RSLV_UPD_NO_IP_FOUND, /* no IP could be found in the response */
+ RSLV_UPD_OBSOLETE_IP, /* The server IP was obsolete, and no other IP was found */
+};
+
+struct proxy;
+struct resolv_srvrq {
+ enum obj_type obj_type; /* object type == OBJ_TYPE_SRVRQ */
+ struct resolvers *resolvers; /* pointer to the resolvers structure used for this server template */
+ struct proxy *proxy; /* associated proxy */
+ char *name;
+ char *hostname_dn; /* server hostname in Domain Name format */
+ int hostname_dn_len; /* string length of the server hostname in Domain Name format */
+ struct resolv_requester *requester; /* used to link to its DNS resolution */
+ struct list attached_servers; /* List of the servers free to use */
+ struct eb_root named_servers; /* tree of servers indexed by hostnames found in server state file */
+ struct list list; /* Next SRV RQ for the same proxy */
+};
+
+#endif /* _HAPROXY_RESOLVERS_T_H */
diff --git a/include/haproxy/resolvers.h b/include/haproxy/resolvers.h
new file mode 100644
index 0000000..5d4c744
--- /dev/null
+++ b/include/haproxy/resolvers.h
@@ -0,0 +1,66 @@
+/*
+ * include/haproxy/dns.h
+ * This file provides functions related to DNS protocol
+ *
+ * Copyright (C) 2014 Baptiste Assmann <bedis9@gmail.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_RESOLVERS_H
+#define _HAPROXY_RESOLVERS_H
+
+#include <haproxy/resolvers-t.h>
+
+struct proxy;
+struct server;
+struct stconn;
+struct act_rule;
+struct list;
+
+extern struct list sec_resolvers;
+extern unsigned int resolv_failed_resolutions;
+
+struct resolvers *find_resolvers_by_id(const char *id);
+struct resolv_srvrq *find_srvrq_by_name(const char *name, struct proxy *px);
+struct resolv_srvrq *new_resolv_srvrq(struct server *srv, char *fqdn);
+struct resolv_answer_item *find_srvrq_answer_record(const struct resolv_requester *requester);
+
+int resolv_str_to_dn_label(const char *str, int str_len, char *dn, int dn_len);
+int resolv_dn_label_to_str(const char *dn, int dn_len, char *str, int str_len);
+
+int resolv_hostname_validation(const char *string, char **err);
+int resolv_get_ip_from_response(struct resolv_response *r_res,
+ struct resolv_options *resolv_opts, void *currentip,
+ short currentip_sin_family,
+ void **newip, short *newip_sin_family,
+ struct server *owner);
+
+int resolv_link_resolution(void *requester, int requester_type, int requester_locked);
+void resolv_unlink_resolution(struct resolv_requester *requester);
+void resolv_detach_from_resolution_answer_items(struct resolv_resolution *res, struct resolv_requester *req);
+void resolv_trigger_resolution(struct resolv_requester *requester);
+enum act_parse_ret resolv_parse_do_resolve(const char **args, int *orig_arg, struct proxy *px, struct act_rule *rule, char **err);
+int check_action_do_resolve(struct act_rule *rule, struct proxy *px, char **err);
+
+int stats_dump_resolvers(struct stconn *sc,
+ struct field *stats, size_t stats_count,
+ struct list *stat_modules);
+void resolv_stats_clear_counters(int clrall, struct list *stat_modules);
+int resolv_allocate_counters(struct list *stat_modules);
+int dns_dgram_init(struct dns_nameserver *ns, struct sockaddr_storage *sk);
+int resolvers_create_default();
+
+#endif // _HAPROXY_RESOLVER_H
diff --git a/include/haproxy/ring-t.h b/include/haproxy/ring-t.h
new file mode 100644
index 0000000..b89c886
--- /dev/null
+++ b/include/haproxy/ring-t.h
@@ -0,0 +1,113 @@
+/*
+ * include/haproxy/ring-t.h
+ * This file provides definitions for ring buffers used for disposable data.
+ *
+ * Copyright (C) 2000-2019 Willy Tarreau - w@1wt.eu
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_RING_T_H
+#define _HAPROXY_RING_T_H
+
+#include <haproxy/api-t.h>
+#include <haproxy/buf-t.h>
+#include <haproxy/thread.h>
+
+/* The code below handles circular buffers with single-producer and multiple
+ * readers (up to 255). The buffer storage area must remain always allocated.
+ * It's made of series of payload blocks followed by a readers count (RC).
+ * There is always a readers count at the beginning of the buffer as well. Each
+ * payload block is composed of a varint-encoded size (VI) followed by the
+ * actual payload (PL).
+ *
+ * The readers count is encoded on a single byte. It indicates how many readers
+ * are still waiting at this position. The writer writes after the buffer's
+ * tail, which initially starts just past the first readers count. Then it
+ * knows by reading this count that it must wake up the readers to indicate
+ * data availability. When a reader reads the payload block, it increments the
+ * next readers count and decrements the current one. The area between the
+ * initial readers count and the next one is protected from overwriting for as
+ * long as the initial count is non-null. As such these readers count are
+ * effective barriers against data recycling.
+ *
+ * Only the writer is allowed to update the buffer's tail/head. This ensures
+ * that events can remain as long as possible so that late readers can get the
+ * maximum history available. It also helps dealing with multi-thread accesses
+ * using a simple RW lock during the buffer head's manipulation. The writer
+ * will have to delete some old records starting at the head until the new
+ * message can fit or a non-null readers count is encountered. If a message
+ * cannot fit due to insufficient room, the message is lost and the drop
+ * counted must be incremented.
+ *
+ * Like any buffer, this buffer naturally wraps at the end and continues at the
+ * beginning. The creation process consists in immediately adding a null
+ * readers count byte into the buffer. The write process consists in always
+ * writing a payload block followed by a new readers count. The delete process
+ * consists in removing a null readers count and payload block. As such, there
+ * is always at least one readers count byte in the buffer available at the
+ * head for new readers to attach to, and one before the tail, both of which
+ * may be the same when the buffer doesn't contain any event. It is thus safe
+ * for any reader to simply keep the absolute offset of the last visited
+ * position and to restart from there. The write will update the buffer's
+ * absolute offset when deleting entries. All this also has the benefit of
+ * allowing a buffer to be hot-resized without losing its contents.
+ *
+ * Thus we have this :
+ * - init of empty buffer:
+ * head-, ,-tail
+ * [ RC | xxxxxxxxxxxxxxxxxxxxxxxxxx ]
+ *
+ * - reader attached:
+ * head-, ,-tail
+ * [ RC | xxxxxxxxxxxxxxxxxxxxxxxxxx ]
+ * ^- +1
+ *
+ * - append of one event:
+ * appended
+ * head-, <----------> ,-tail
+ * [ RC | VI | PL | RC | xxxxxxxxxxx ]
+ *
+ * - reader advancing:
+ * head-, ,-tail
+ * [ RC | VI | PL | RC | xxxxxxxxxxx ]
+ * ^- -1 ^- +1
+ *
+ * - writer removing older message:
+ * head-, ,-tail
+ * [ xxxxxxxxxxxx | RC | xxxxxxxxxxx ]
+ * <---------->
+ * removed
+ */
+
+/* ring watch flags to be used when watching the ring */
+#define RING_WF_WAIT_MODE 0x00000001 /* wait for new contents */
+#define RING_WF_SEEK_NEW 0x00000002 /* seek to new contents */
+
+struct ring {
+ struct buffer buf; // storage area
+ struct list waiters; // list of waiters, for now, CLI "show event"
+ __decl_thread(HA_RWLOCK_T lock);
+ int readers_count;
+};
+
+#endif /* _HAPROXY_RING_T_H */
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/include/haproxy/ring.h b/include/haproxy/ring.h
new file mode 100644
index 0000000..71217d5
--- /dev/null
+++ b/include/haproxy/ring.h
@@ -0,0 +1,53 @@
+/*
+ * include/haproxy/ring.h
+ * Exported functions for ring buffers used for disposable data.
+ *
+ * Copyright (C) 2000-2019 Willy Tarreau - w@1wt.eu
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_RING_H
+#define _HAPROXY_RING_H
+
+#include <stdlib.h>
+#include <import/ist.h>
+#include <haproxy/ring-t.h>
+
+struct appctx;
+
+struct ring *ring_new(size_t size);
+struct ring *ring_make_from_area(void *area, size_t size);
+struct ring *ring_cast_from_area(void *area);
+void ring_init(struct ring *ring, void* area, size_t size);
+struct ring *ring_resize(struct ring *ring, size_t size);
+void ring_free(struct ring *ring);
+ssize_t ring_write(struct ring *ring, size_t maxlen, const struct ist pfx[], size_t npfx, const struct ist msg[], size_t nmsg);
+int ring_attach(struct ring *ring);
+void ring_detach_appctx(struct ring *ring, struct appctx *appctx, size_t ofs);
+int ring_attach_cli(struct ring *ring, struct appctx *appctx, uint flags);
+int cli_io_handler_show_ring(struct appctx *appctx);
+void cli_io_release_show_ring(struct appctx *appctx);
+
+size_t ring_max_payload(const struct ring *ring);
+
+#endif /* _HAPROXY_RING_H */
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/include/haproxy/sample-t.h b/include/haproxy/sample-t.h
new file mode 100644
index 0000000..27cf4ba
--- /dev/null
+++ b/include/haproxy/sample-t.h
@@ -0,0 +1,315 @@
+/*
+ * include/haproxy/sample-t.h
+ * Macros, variables and structures for sample management.
+ *
+ * Copyright (C) 2009-2010 EXCELIANCE, Emeric Brun <ebrun@exceliance.fr>
+ * Copyright (C) 2012-2013 Willy Tarreau <w@1wt.eu>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_SAMPLE_T_H
+#define _HAPROXY_SAMPLE_T_H
+
+#include <haproxy/api-t.h>
+#include <haproxy/sample_data-t.h>
+
+/* input and output sample types
+ *
+ * Some of them are pseudo types which means that they can be used for
+ * in_type and out_type in sample (fetches/conv) definitions (they serve as
+ * compatibility and conversion hints) but they cannot be emitted at runtime.
+ */
+enum {
+ SMP_T_ANY = 0, /* pseudo type: any type */
+ SMP_T_SAME, /* special: output type hint for converters that don't alter input type (out == in) */
+ SMP_T_BOOL, /* boolean */
+ SMP_T_SINT, /* signed 64bits integer type */
+ SMP_T_ADDR, /* pseudo type: could be ipv4 or ipv6 */
+ SMP_T_IPV4, /* ipv4 type */
+ SMP_T_IPV6, /* ipv6 type */
+ SMP_T_STR, /* char string type */
+ SMP_T_BIN, /* buffer type */
+ SMP_T_METH, /* contain method */
+ SMP_TYPES /* number of types, must always be last */
+};
+
+/* Sample sources are used to establish a relation between fetch keywords and
+ * the location where they're about to be used. They're reserved for internal
+ * use and are not meant to be known outside the sample management code.
+ */
+enum {
+ SMP_SRC_CONST, /* constat elements known at configuration time */
+ SMP_SRC_INTRN, /* internal context-less information */
+ SMP_SRC_LISTN, /* listener which accepted the connection */
+ SMP_SRC_FTEND, /* frontend which accepted the connection */
+ SMP_SRC_L4CLI, /* L4 information about the client */
+ SMP_SRC_L5CLI, /* fetch uses client information from embryonic session */
+ SMP_SRC_TRACK, /* fetch involves track counters */
+ SMP_SRC_L6REQ, /* fetch uses raw information from the request buffer */
+ SMP_SRC_HRQHV, /* fetch uses volatile information about HTTP request headers (eg: value) */
+ SMP_SRC_HRQHP, /* fetch uses persistent information about HTTP request headers (eg: meth) */
+ SMP_SRC_HRQBO, /* fetch uses information about HTTP request body */
+ SMP_SRC_BKEND, /* fetch uses information about the backend */
+ SMP_SRC_SERVR, /* fetch uses information about the selected server */
+ SMP_SRC_L4SRV, /* fetch uses information about the server L4 connection */
+ SMP_SRC_L5SRV, /* fetch uses information about the server L5 connection */
+ SMP_SRC_L6RES, /* fetch uses raw information from the response buffer */
+ SMP_SRC_HRSHV, /* fetch uses volatile information about HTTP response headers (eg: value) */
+ SMP_SRC_HRSHP, /* fetch uses persistent information about HTTP response headers (eg: status) */
+ SMP_SRC_HRSBO, /* fetch uses information about HTTP response body */
+ SMP_SRC_RQFIN, /* final information about request buffer (eg: tot bytes) */
+ SMP_SRC_RSFIN, /* final information about response buffer (eg: tot bytes) */
+ SMP_SRC_TXFIN, /* final information about the transaction (eg: #comp rate) */
+ SMP_SRC_SSFIN, /* final information about the stream (eg: #requests, final flags) */
+ SMP_SRC_ENTRIES /* nothing after this */
+};
+
+/* Sample checkpoints are a list of places where samples may be used. This is
+ * an internal enum used only to build SMP_VAL_*.
+ */
+enum {
+ SMP_CKP_FE_CON_ACC, /* FE connection accept rules ("tcp request connection") */
+ SMP_CKP_FE_SES_ACC, /* FE stream accept rules (to come soon) */
+ SMP_CKP_FE_REQ_CNT, /* FE request content rules ("tcp request content") */
+ SMP_CKP_FE_HRQ_HDR, /* FE HTTP request headers (rules, headers, monitor, stats, redirect) */
+ SMP_CKP_FE_HRQ_BDY, /* FE HTTP request body */
+ SMP_CKP_FE_SET_BCK, /* FE backend switching rules ("use_backend") */
+ SMP_CKP_BE_REQ_CNT, /* BE request content rules ("tcp request content") */
+ SMP_CKP_BE_HRQ_HDR, /* BE HTTP request headers (rules, headers, monitor, stats, redirect) */
+ SMP_CKP_BE_HRQ_BDY, /* BE HTTP request body */
+ SMP_CKP_BE_SET_SRV, /* BE server switching rules ("use_server", "balance", "force-persist", "stick", ...) */
+ SMP_CKP_BE_SRV_CON, /* BE server connect (eg: "source") */
+ SMP_CKP_BE_RES_CNT, /* BE response content rules ("tcp response content") */
+ SMP_CKP_BE_HRS_HDR, /* BE HTTP response headers (rules, headers) */
+ SMP_CKP_BE_HRS_BDY, /* BE HTTP response body (stick-store rules are there) */
+ SMP_CKP_BE_STO_RUL, /* BE stick-store rules */
+ SMP_CKP_FE_RES_CNT, /* FE response content rules ("tcp response content") */
+ SMP_CKP_FE_HRS_HDR, /* FE HTTP response headers (rules, headers) */
+ SMP_CKP_FE_HRS_BDY, /* FE HTTP response body */
+ SMP_CKP_FE_LOG_END, /* FE log at the end of the txn/stream */
+ SMP_CKP_BE_CHK_RUL, /* BE tcp-check rules */
+ SMP_CKP_CFG_PARSER, /* config parser (i.e. before boot) */
+ SMP_CKP_CLI_PARSER, /* command line parser */
+ SMP_CKP_ENTRIES /* nothing after this */
+};
+
+/* SMP_USE_* are flags used to declare fetch keywords. Fetch methods are
+ * associated with bitfields composed of these values, generally only one, to
+ * indicate where the contents may be sampled. Some fetches are ambiguous as
+ * they apply to either the request or the response depending on the context,
+ * so they will have 2 of these bits (eg: hdr(), payload(), ...). These are
+ * stored in smp->use.
+ */
+enum {
+ SMP_USE_CONST = 1 << SMP_SRC_CONST, /* constant values known at config time */
+ SMP_USE_INTRN = 1 << SMP_SRC_INTRN, /* internal context-less information */
+ SMP_USE_LISTN = 1 << SMP_SRC_LISTN, /* listener which accepted the connection */
+ SMP_USE_FTEND = 1 << SMP_SRC_FTEND, /* frontend which accepted the connection */
+ SMP_USE_L4CLI = 1 << SMP_SRC_L4CLI, /* L4 information about the client */
+ SMP_USE_L5CLI = 1 << SMP_SRC_L5CLI, /* fetch uses client information from embryonic session */
+ SMP_USE_TRACK = 1 << SMP_SRC_TRACK, /* fetch involves track counters */
+ SMP_USE_L6REQ = 1 << SMP_SRC_L6REQ, /* fetch uses raw information from the request buffer */
+ SMP_USE_HRQHV = 1 << SMP_SRC_HRQHV, /* fetch uses volatile information about HTTP request headers (eg: value) */
+ SMP_USE_HRQHP = 1 << SMP_SRC_HRQHP, /* fetch uses persistent information about HTTP request headers (eg: meth) */
+ SMP_USE_HRQBO = 1 << SMP_SRC_HRQBO, /* fetch uses information about HTTP request body */
+ SMP_USE_BKEND = 1 << SMP_SRC_BKEND, /* fetch uses information about the backend */
+ SMP_USE_SERVR = 1 << SMP_SRC_SERVR, /* fetch uses information about the selected server */
+ SMP_USE_L4SRV = 1 << SMP_SRC_L4SRV, /* fetch uses information about the server L4 connection */
+ SMP_USE_L5SRV = 1 << SMP_SRC_L5SRV, /* fetch uses information about the server L5 connection */
+ SMP_USE_L6RES = 1 << SMP_SRC_L6RES, /* fetch uses raw information from the response buffer */
+ SMP_USE_HRSHV = 1 << SMP_SRC_HRSHV, /* fetch uses volatile information about HTTP response headers (eg: value) */
+ SMP_USE_HRSHP = 1 << SMP_SRC_HRSHP, /* fetch uses persistent information about HTTP response headers (eg: status) */
+ SMP_USE_HRSBO = 1 << SMP_SRC_HRSBO, /* fetch uses information about HTTP response body */
+ SMP_USE_RQFIN = 1 << SMP_SRC_RQFIN, /* final information about request buffer (eg: tot bytes) */
+ SMP_USE_RSFIN = 1 << SMP_SRC_RSFIN, /* final information about response buffer (eg: tot bytes) */
+ SMP_USE_TXFIN = 1 << SMP_SRC_TXFIN, /* final information about the transaction (eg: #comp rate) */
+ SMP_USE_SSFIN = 1 << SMP_SRC_SSFIN, /* final information about the stream (eg: #requests, final flags) */
+
+ /* This composite one is useful to detect if an http_txn needs to be allocated */
+ SMP_USE_HTTP_ANY = SMP_USE_HRQHV | SMP_USE_HRQHP | SMP_USE_HRQBO |
+ SMP_USE_HRSHV | SMP_USE_HRSHP | SMP_USE_HRSBO,
+};
+
+/* Sample validity is computed from the fetch sources above when keywords
+ * are registered. Each fetch method may be used at different locations. The
+ * configuration parser will check whether the fetches are compatible with the
+ * location where they're used. These are stored in smp->val.
+ */
+enum {
+ SMP_VAL___________ = 0, /* Just used as a visual marker */
+ SMP_VAL_FE_CON_ACC = 1 << SMP_CKP_FE_CON_ACC, /* FE connection accept rules ("tcp request connection") */
+ SMP_VAL_FE_SES_ACC = 1 << SMP_CKP_FE_SES_ACC, /* FE stream accept rules (to come soon) */
+ SMP_VAL_FE_REQ_CNT = 1 << SMP_CKP_FE_REQ_CNT, /* FE request content rules ("tcp request content") */
+ SMP_VAL_FE_HRQ_HDR = 1 << SMP_CKP_FE_HRQ_HDR, /* FE HTTP request headers (rules, headers, monitor, stats, redirect) */
+ SMP_VAL_FE_HRQ_BDY = 1 << SMP_CKP_FE_HRQ_BDY, /* FE HTTP request body */
+ SMP_VAL_FE_SET_BCK = 1 << SMP_CKP_FE_SET_BCK, /* FE backend switching rules ("use_backend") */
+ SMP_VAL_BE_REQ_CNT = 1 << SMP_CKP_BE_REQ_CNT, /* BE request content rules ("tcp request content") */
+ SMP_VAL_BE_HRQ_HDR = 1 << SMP_CKP_BE_HRQ_HDR, /* BE HTTP request headers (rules, headers, monitor, stats, redirect) */
+ SMP_VAL_BE_HRQ_BDY = 1 << SMP_CKP_BE_HRQ_BDY, /* BE HTTP request body */
+ SMP_VAL_BE_SET_SRV = 1 << SMP_CKP_BE_SET_SRV, /* BE server switching rules ("use_server", "balance", "force-persist", "stick", ...) */
+ SMP_VAL_BE_SRV_CON = 1 << SMP_CKP_BE_SRV_CON, /* BE server connect (eg: "source") */
+ SMP_VAL_BE_RES_CNT = 1 << SMP_CKP_BE_RES_CNT, /* BE response content rules ("tcp response content") */
+ SMP_VAL_BE_HRS_HDR = 1 << SMP_CKP_BE_HRS_HDR, /* BE HTTP response headers (rules, headers) */
+ SMP_VAL_BE_HRS_BDY = 1 << SMP_CKP_BE_HRS_BDY, /* BE HTTP response body (stick-store rules are there) */
+ SMP_VAL_BE_STO_RUL = 1 << SMP_CKP_BE_STO_RUL, /* BE stick-store rules */
+ SMP_VAL_FE_RES_CNT = 1 << SMP_CKP_FE_RES_CNT, /* FE response content rules ("tcp response content") */
+ SMP_VAL_FE_HRS_HDR = 1 << SMP_CKP_FE_HRS_HDR, /* FE HTTP response headers (rules, headers) */
+ SMP_VAL_FE_HRS_BDY = 1 << SMP_CKP_FE_HRS_BDY, /* FE HTTP response body */
+ SMP_VAL_FE_LOG_END = 1 << SMP_CKP_FE_LOG_END, /* FE log at the end of the txn/stream */
+ SMP_VAL_BE_CHK_RUL = 1 << SMP_CKP_BE_CHK_RUL, /* BE tcp-check rule */
+ SMP_VAL_CFG_PARSER = 1 << SMP_CKP_CFG_PARSER, /* within config parser */
+ SMP_VAL_CLI_PARSER = 1 << SMP_CKP_CLI_PARSER, /* within command line parser */
+
+ /* a few combinations to decide what direction to try to fetch (useful for logs) */
+ SMP_VAL_REQUEST = SMP_VAL_FE_CON_ACC | SMP_VAL_FE_SES_ACC | SMP_VAL_FE_REQ_CNT |
+ SMP_VAL_FE_HRQ_HDR | SMP_VAL_FE_HRQ_BDY | SMP_VAL_FE_SET_BCK |
+ SMP_VAL_BE_REQ_CNT | SMP_VAL_BE_HRQ_HDR | SMP_VAL_BE_HRQ_BDY |
+ SMP_VAL_BE_SET_SRV | SMP_VAL_BE_CHK_RUL,
+
+ SMP_VAL_RESPONSE = SMP_VAL_BE_SRV_CON | SMP_VAL_BE_RES_CNT | SMP_VAL_BE_HRS_HDR |
+ SMP_VAL_BE_HRS_BDY | SMP_VAL_BE_STO_RUL | SMP_VAL_FE_RES_CNT |
+ SMP_VAL_FE_HRS_HDR | SMP_VAL_FE_HRS_BDY | SMP_VAL_FE_LOG_END |
+ SMP_VAL_BE_CHK_RUL,
+};
+
+/* Sample fetch options are passed to sample fetch functions to add precision
+ * about what is desired :
+ * - fetch direction (req/resp)
+ * - intermediary / final fetch
+ */
+enum {
+ SMP_OPT_DIR_REQ = 0, /* direction = request */
+ SMP_OPT_DIR_RES = 1, /* direction = response */
+ SMP_OPT_DIR = (SMP_OPT_DIR_REQ|SMP_OPT_DIR_RES), /* mask to get direction */
+ SMP_OPT_FINAL = 2, /* final fetch, contents won't change anymore */
+ SMP_OPT_ITERATE = 4, /* fetches may be iterated if supported (for ACLs) */
+};
+
+/* Flags used to describe fetched samples. MAY_CHANGE indicates that the result
+ * of the fetch might still evolve, for instance because of more data expected,
+ * even if the fetch has failed. VOL_* indicates how long a result may be cached.
+ */
+enum {
+ SMP_F_NOT_LAST = 1 << 0, /* other occurrences might exist for this sample */
+ SMP_F_MAY_CHANGE = 1 << 1, /* sample is unstable and might change (eg: request length) */
+ SMP_F_VOL_TEST = 1 << 2, /* result must not survive longer than the test (eg: time) */
+ SMP_F_VOL_1ST = 1 << 3, /* result sensitive to changes in first line (eg: URI) */
+ SMP_F_VOL_HDR = 1 << 4, /* result sensitive to changes in headers */
+ SMP_F_VOL_TXN = 1 << 5, /* result sensitive to new transaction (eg: HTTP version) */
+ SMP_F_VOL_SESS = 1 << 6, /* result sensitive to new session (eg: src IP) */
+ SMP_F_VOLATILE = (1<<2)|(1<<3)|(1<<4)|(1<<5)|(1<<6), /* any volatility condition */
+ SMP_F_CONST = 1 << 7, /* This sample use constant memory. May diplicate it before changes */
+};
+
+/* needed below */
+struct session;
+struct stream;
+struct arg;
+
+/* a sample context might be used by any sample fetch function in order to
+ * store information needed across multiple calls (eg: restart point for a
+ * next occurrence). By definition it may store up to 8 pointers, or any
+ * scalar (double, int, long long).
+ */
+union smp_ctx {
+ void *p; /* any pointer */
+ int i; /* any integer */
+ long long ll; /* any long long or smaller */
+ double d; /* any float or double */
+ void *a[8]; /* any array of up to 8 pointers */
+};
+
+/* a sample is a typed data extracted from a stream. It has a type, contents,
+ * validity constraints, a context for use in iterative calls.
+ */
+struct sample {
+ unsigned int flags; /* SMP_F_* */
+ struct sample_data data;
+ union smp_ctx ctx;
+
+ /* Some sample analyzer (sample-fetch or converters) needs to
+ * known the attached proxy, session and stream. The sample-fetches
+ * and the converters function pointers cannot be called without
+ * these 3 pointers filled.
+ */
+ struct proxy *px;
+ struct session *sess;
+ struct stream *strm; /* WARNING! MAY BE NULL! (eg: tcp-request connection) */
+ unsigned int opt; /* fetch options (SMP_OPT_*) */
+};
+
+/* Descriptor for a sample conversion */
+struct sample_conv {
+ const char *kw; /* configuration keyword */
+ int (*process)(const struct arg *arg_p,
+ struct sample *smp,
+ void *private); /* process function */
+ uint64_t arg_mask; /* arguments (ARG*()) */
+ int (*val_args)(struct arg *arg_p,
+ struct sample_conv *smp_conv,
+ const char *file, int line,
+ char **err_msg); /* argument validation function */
+ unsigned int in_type; /* expected input sample type */
+ unsigned int out_type; /* output sample type */
+ void *private; /* private values. only used by maps and Lua */
+};
+
+/* sample conversion expression */
+struct sample_conv_expr {
+ struct list list; /* member of a sample_expr */
+ struct sample_conv *conv; /* sample conversion used */
+ struct arg *arg_p; /* optional arguments */
+};
+
+/* Descriptor for a sample fetch method */
+struct sample_fetch {
+ const char *kw; /* configuration keyword */
+ int (*process)(const struct arg *arg_p,
+ struct sample *smp,
+ const char *kw, /* fetch processing function */
+ void *private); /* private value. */
+ uint64_t arg_mask; /* arguments (ARG*()) */
+ int (*val_args)(struct arg *arg_p,
+ char **err_msg); /* argument validation function */
+ unsigned long out_type; /* output sample type */
+ unsigned int use; /* fetch source (SMP_USE_*) */
+ unsigned int val; /* fetch validity (SMP_VAL_*) */
+ void *private; /* private values. only used by Lua */
+};
+
+/* sample expression */
+struct sample_expr {
+ struct list list; /* member of list of sample, currently not used */
+ struct sample_fetch *fetch; /* sample fetch method */
+ struct arg *arg_p; /* optional pointer to arguments to fetch function */
+ struct list conv_exprs; /* list of conversion expression to apply */
+};
+
+/* sample fetch keywords list */
+struct sample_fetch_kw_list {
+ struct list list; /* head of sample fetch keyword list */
+ struct sample_fetch kw[VAR_ARRAY]; /* array of sample fetch descriptors */
+};
+
+/* sample conversion keywords list */
+struct sample_conv_kw_list {
+ struct list list; /* head of sample conversion keyword list */
+ struct sample_conv kw[VAR_ARRAY]; /* array of sample conversion descriptors */
+};
+
+typedef int (*sample_cast_fct)(struct sample *smp);
+
+#endif /* _HAPROXY_SAMPLE_T_H */
diff --git a/include/haproxy/sample.h b/include/haproxy/sample.h
new file mode 100644
index 0000000..7e05e78
--- /dev/null
+++ b/include/haproxy/sample.h
@@ -0,0 +1,186 @@
+/*
+ * include/haproxy/sample.h
+ * Functions for samples management.
+ *
+ * Copyright (C) 2009-2010 EXCELIANCE, Emeric Brun <ebrun@exceliance.fr>
+ * Copyright (C) 2012 Willy Tarreau <w@1wt.eu>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_SAMPLE_H
+#define _HAPROXY_SAMPLE_H
+
+#include <haproxy/api.h>
+#include <haproxy/arg-t.h>
+#include <haproxy/sample-t.h>
+#include <haproxy/stick_table-t.h>
+
+extern sample_cast_fct sample_casts[SMP_TYPES][SMP_TYPES];
+extern const unsigned int fetch_cap[SMP_SRC_ENTRIES];
+extern const char *smp_to_type[SMP_TYPES];
+
+struct sample_expr *sample_parse_expr(char **str, int *idx, const char *file, int line, char **err, struct arg_list *al, char **endptr);
+int sample_parse_expr_cnv(char **str, int *idx, char **endptr, char **err_msg, struct arg_list *al, const char *file, int line,
+ struct sample_expr *expr, const char *start);
+struct sample_conv *find_sample_conv(const char *kw, int len);
+struct sample *sample_process(struct proxy *px, struct session *sess,
+ struct stream *strm, unsigned int opt,
+ struct sample_expr *expr, struct sample *p);
+int sample_process_cnv(struct sample_expr *expr, struct sample *p);
+struct sample *sample_fetch_as_type(struct proxy *px, struct session *sess,
+ struct stream *strm, unsigned int opt,
+ struct sample_expr *expr, int smp_type);
+int sample_conv_var2smp(const struct var_desc *var, struct sample *smp, int type);
+int sample_conv_var2smp_sint(const struct arg *arg, struct sample *smp);
+int sample_conv_var2smp_str(const struct arg *arg, struct sample *smp);
+void release_sample_expr(struct sample_expr *expr);
+void sample_register_fetches(struct sample_fetch_kw_list *psl);
+void sample_register_convs(struct sample_conv_kw_list *psl);
+const char *sample_src_names(unsigned int use);
+const char *sample_ckp_names(unsigned int use);
+struct sample_fetch *find_sample_fetch(const char *kw, int len);
+void smp_dump_fetch_kw(void);
+void smp_dump_conv_kw(void);
+struct sample_fetch *sample_fetch_getnext(struct sample_fetch *current, int *idx);
+struct sample_conv *sample_conv_getnext(struct sample_conv *current, int *idx);
+int smp_resolve_args(struct proxy *p, char **err);
+int smp_check_date_unit(struct arg *args, char **err);
+int smp_expr_output_type(struct sample_expr *expr);
+int c_none(struct sample *smp);
+int c_pseudo(struct sample *smp);
+int smp_dup(struct sample *smp);
+
+/*
+ * This function just apply a cast on sample. It returns 0 if the cast is not
+ * available or if the cast fails, otherwise returns 1. It does not modify the
+ * input sample on failure.
+ */
+static inline
+int sample_convert(struct sample *sample, int req_type)
+{
+ if (!sample_casts[sample->data.type][req_type])
+ return 0;
+ if (sample_casts[sample->data.type][req_type] == c_none)
+ return 1;
+ return sample_casts[sample->data.type][req_type](sample);
+}
+
+static inline
+struct sample *smp_set_owner(struct sample *smp, struct proxy *px,
+ struct session *sess, struct stream *strm, int opt)
+{
+ smp->px = px;
+ smp->sess = sess;
+ smp->strm = strm;
+ smp->opt = opt;
+ return smp;
+}
+
+
+/* Returns 1 if a sample may be safely used. It performs a few checks on the
+ * string length versus size, same for the binary version, and ensures that
+ * strings are properly terminated by a zero. If this last point is not granted
+ * but the string is not const, then the \0 is appended. Otherwise it returns 0,
+ * meaning the caller may need to call smp_dup() before going further.
+ */
+static inline
+int smp_is_safe(struct sample *smp)
+{
+ switch (smp->data.type) {
+ case SMP_T_METH:
+ if (smp->data.u.meth.meth != HTTP_METH_OTHER)
+ return 1;
+ __fallthrough;
+
+ case SMP_T_STR:
+ if (!smp->data.u.str.size || smp->data.u.str.data >= smp->data.u.str.size)
+ return 0;
+
+ if (smp->data.u.str.area[smp->data.u.str.data] == 0)
+ return 1;
+
+ if (smp->flags & SMP_F_CONST)
+ return 0;
+
+ smp->data.u.str.area[smp->data.u.str.data] = 0;
+ return 1;
+
+ case SMP_T_BIN:
+ return !smp->data.u.str.size || smp->data.u.str.data <= smp->data.u.str.size;
+
+ default:
+ return 1;
+ }
+}
+
+/* checks that a sample may freely be used, or duplicates it to normalize it.
+ * Returns 1 on success, 0 if the sample must not be used. The function also
+ * checks for NULL to simplify the calling code.
+ */
+static inline
+int smp_make_safe(struct sample *smp)
+{
+ return smp && (smp_is_safe(smp) || smp_dup(smp));
+}
+
+/* Returns 1 if a sample may be safely modified in place. It performs a few
+ * checks on the string length versus size, same for the binary version, and
+ * ensures that strings are properly terminated by a zero, and of course that
+ * the size is allocate and that the SMP_F_CONST flag is not set. If only the
+ * trailing zero is missing, it is appended. Otherwise it returns 0, meaning
+ * the caller may need to call smp_dup() before going further.
+ */
+static inline
+int smp_is_rw(struct sample *smp)
+{
+ if (smp->flags & SMP_F_CONST)
+ return 0;
+
+ switch (smp->data.type) {
+ case SMP_T_METH:
+ if (smp->data.u.meth.meth != HTTP_METH_OTHER)
+ return 1;
+ __fallthrough;
+
+ case SMP_T_STR:
+ if (!smp->data.u.str.size ||
+ smp->data.u.str.data >= smp->data.u.str.size)
+ return 0;
+
+ if (smp->data.u.str.area[smp->data.u.str.data] != 0)
+ smp->data.u.str.area[smp->data.u.str.data] = 0;
+ return 1;
+
+ case SMP_T_BIN:
+ return smp->data.u.str.size &&
+ smp->data.u.str.data <= smp->data.u.str.size;
+
+ default:
+ return 1;
+ }
+}
+
+/* checks that a sample may freely be modified, or duplicates it to normalize
+ * it and make it R/W. Returns 1 on success, 0 if the sample must not be used.
+ * The function also checks for NULL to simplify the calling code.
+ */
+static inline
+int smp_make_rw(struct sample *smp)
+{
+ return smp && (smp_is_rw(smp) || smp_dup(smp));
+}
+
+#endif /* _HAPROXY_SAMPLE_H */
diff --git a/include/haproxy/sample_data-t.h b/include/haproxy/sample_data-t.h
new file mode 100644
index 0000000..2546028
--- /dev/null
+++ b/include/haproxy/sample_data-t.h
@@ -0,0 +1,51 @@
+/*
+ * include/haproxy/sample_data-t.h
+ * Definitions of sample data
+ *
+ * Copyright (C) 2009-2010 EXCELIANCE, Emeric Brun <ebrun@exceliance.fr>
+ * Copyright (C) 2020 Willy Tarreau <w@1wt.eu>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_SAMPLE_DATA_T_H
+#define _HAPROXY_SAMPLE_DATA_T_H
+
+#include <sys/socket.h>
+#include <netinet/in.h>
+#include <haproxy/buf-t.h>
+#include <haproxy/http-t.h>
+
+/* Note: the strings below make use of chunks. Chunks may carry an allocated
+ * size in addition to the length. The size counts from the beginning (str)
+ * to the end. If the size is unknown, it MUST be zero, in which case the
+ * sample will automatically be duplicated when a change larger than <len> has
+ * to be performed. Thus it is safe to always set size to zero.
+ */
+union sample_value {
+ long long int sint; /* used for signed 64bits integers */
+ struct in_addr ipv4; /* used for ipv4 addresses */
+ struct in6_addr ipv6; /* used for ipv6 addresses */
+ struct buffer str; /* used for char strings or buffers */
+ struct http_meth meth; /* used for http method */
+};
+
+/* Used to store sample constant */
+struct sample_data {
+ int type; /* SMP_T_* */
+ union sample_value u; /* sample data */
+};
+
+#endif /* _HAPROXY_SAMPLE_DATA_T_H */
diff --git a/include/haproxy/sc_strm.h b/include/haproxy/sc_strm.h
new file mode 100644
index 0000000..41f07e9
--- /dev/null
+++ b/include/haproxy/sc_strm.h
@@ -0,0 +1,447 @@
+/*
+ * include/haproxy/sc_strm.h
+ * This file contains stream-specific stream-connector functions prototypes
+ *
+ * Copyright 2022 Christopher Faulet <cfaulet@haproxy.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_SC_STRM_H
+#define _HAPROXY_SC_STRM_H
+
+#include <haproxy/api.h>
+#include <haproxy/buf-t.h>
+#include <haproxy/channel-t.h>
+#include <haproxy/stream-t.h>
+#include <haproxy/task-t.h>
+#include <haproxy/connection.h>
+#include <haproxy/channel.h>
+#include <haproxy/session.h>
+#include <haproxy/stconn.h>
+#include <haproxy/stream.h>
+
+void sc_update_rx(struct stconn *sc);
+void sc_update_tx(struct stconn *sc);
+
+struct task *sc_conn_io_cb(struct task *t, void *ctx, unsigned int state);
+int sc_conn_sync_recv(struct stconn *sc);
+void sc_conn_sync_send(struct stconn *sc);
+
+
+/* returns the channel which receives data from this stream connector (input channel) */
+static inline struct channel *sc_ic(const struct stconn *sc)
+{
+ struct stream *strm = __sc_strm(sc);
+
+ return ((sc->flags & SC_FL_ISBACK) ? &(strm->res) : &(strm->req));
+}
+
+/* returns the channel which feeds data to this stream connector (output channel) */
+static inline struct channel *sc_oc(const struct stconn *sc)
+{
+ struct stream *strm = __sc_strm(sc);
+
+ return ((sc->flags & SC_FL_ISBACK) ? &(strm->req) : &(strm->res));
+}
+
+/* returns the buffer which receives data from this stream connector (input channel's buffer) */
+static inline struct buffer *sc_ib(const struct stconn *sc)
+{
+ return &sc_ic(sc)->buf;
+}
+
+/* returns the buffer which feeds data to this stream connector (output channel's buffer) */
+static inline struct buffer *sc_ob(const struct stconn *sc)
+{
+ return &sc_oc(sc)->buf;
+}
+/* returns the stream's task associated to this stream connector */
+static inline struct task *sc_strm_task(const struct stconn *sc)
+{
+ struct stream *strm = __sc_strm(sc);
+
+ return strm->task;
+}
+
+/* returns the stream connector on the other side. Used during forwarding. */
+static inline struct stconn *sc_opposite(const struct stconn *sc)
+{
+ struct stream *strm = __sc_strm(sc);
+
+ return ((sc->flags & SC_FL_ISBACK) ? strm->scf : strm->scb);
+}
+
+
+/* sets the current and previous state of a stream connector to <state>. This is
+ * mainly used to create one in the established state on incoming connections.
+ */
+static inline void sc_set_state(struct stconn *sc, int state)
+{
+ sc->state = __sc_strm(sc)->prev_conn_state = state;
+}
+
+/* returns a bit for a stream connector state, to match against SC_SB_* */
+static inline enum sc_state_bit sc_state_bit(enum sc_state state)
+{
+ BUG_ON(state > SC_ST_CLO);
+ return 1U << state;
+}
+
+/* returns true if <state> matches one of the SC_SB_* bits in <mask> */
+static inline int sc_state_in(enum sc_state state, enum sc_state_bit mask)
+{
+ BUG_ON(mask & ~SC_SB_ALL);
+ return !!(sc_state_bit(state) & mask);
+}
+
+/* Returns true if a connection is attached to the stream connector <sc> and if this
+ * connection is ready.
+ */
+static inline int sc_conn_ready(const struct stconn *sc)
+{
+ const struct connection *conn = sc_conn(sc);
+
+ return conn && conn_ctrl_ready(conn) && conn_xprt_ready(conn);
+}
+
+
+/* The stream connector is only responsible for the connection during the early
+ * states, before plugging a mux. Thus it should only care about CO_FL_ERROR
+ * before SC_ST_EST, and after that it must absolutely ignore it since the mux
+ * may hold pending data. This function returns true if such an error was
+ * reported. Both the SC and the CONN must be valid.
+ */
+static inline int sc_is_conn_error(const struct stconn *sc)
+{
+ const struct connection *conn;
+
+ if (sc->state >= SC_ST_EST)
+ return 0;
+
+ conn = __sc_conn(sc);
+ BUG_ON(!conn);
+ return !!(conn->flags & CO_FL_ERROR);
+}
+
+/* Try to allocate a buffer for the stream connector's input channel. It relies on
+ * channel_alloc_buffer() for this so it abides by its rules. It returns 0 on
+ * failure, non-zero otherwise. If no buffer is available, the requester,
+ * represented by the <wait> pointer, will be added in the list of objects
+ * waiting for an available buffer, and SC_FL_NEED_BUFF will be set on the
+ * stream connector and SE_FL_HAVE_NO_DATA cleared. The requester will be responsible
+ * for calling this function to try again once woken up.
+ */
+static inline int sc_alloc_ibuf(struct stconn *sc, struct buffer_wait *wait)
+{
+ int ret;
+
+ ret = channel_alloc_buffer(sc_ic(sc), wait);
+ if (!ret)
+ sc_need_buff(sc);
+ return ret;
+}
+
+
+/* Returns the source address of the stream connector and, if not set, fallbacks on
+ * the session for frontend SC and the server connection for the backend SC. It
+ * returns a const address on success or NULL on failure.
+ */
+static inline const struct sockaddr_storage *sc_src(const struct stconn *sc)
+{
+ if (sc->src)
+ return sc->src;
+ if (!(sc->flags & SC_FL_ISBACK))
+ return sess_src(strm_sess(__sc_strm(sc)));
+ else {
+ struct connection *conn = sc_conn(sc);
+
+ if (conn)
+ return conn_src(conn);
+ }
+ return NULL;
+}
+
+
+/* Returns the destination address of the stream connector and, if not set, fallbacks
+ * on the session for frontend SC and the server connection for the backend
+ * SC. It returns a const address on success or NULL on failure.
+ */
+static inline const struct sockaddr_storage *sc_dst(const struct stconn *sc)
+{
+ if (sc->dst)
+ return sc->dst;
+ if (!(sc->flags & SC_FL_ISBACK))
+ return sess_dst(strm_sess(__sc_strm(sc)));
+ else {
+ struct connection *conn = sc_conn(sc);
+
+ if (conn)
+ return conn_dst(conn);
+ }
+ return NULL;
+}
+
+/* Retrieves the source address of the stream connector. Returns non-zero on success
+ * or zero on failure. The operation is only performed once and the address is
+ * stored in the stream connector for future use. On the first call, the stream connector
+ * source address is copied from the session one for frontend SC and the server
+ * connection for the backend SC.
+ */
+static inline int sc_get_src(struct stconn *sc)
+{
+ const struct sockaddr_storage *src = NULL;
+
+ if (sc->src)
+ return 1;
+
+ if (!(sc->flags & SC_FL_ISBACK))
+ src = sess_src(strm_sess(__sc_strm(sc)));
+ else {
+ struct connection *conn = sc_conn(sc);
+
+ if (conn)
+ src = conn_src(conn);
+ }
+ if (!src)
+ return 0;
+
+ if (!sockaddr_alloc(&sc->src, src, sizeof(*src)))
+ return 0;
+
+ return 1;
+}
+
+/* Retrieves the destination address of the stream connector. Returns non-zero on
+ * success or zero on failure. The operation is only performed once and the
+ * address is stored in the stream connector for future use. On the first call, the
+ * stream connector destination address is copied from the session one for frontend
+ * SC and the server connection for the backend SC.
+ */
+static inline int sc_get_dst(struct stconn *sc)
+{
+ const struct sockaddr_storage *dst = NULL;
+
+ if (sc->dst)
+ return 1;
+
+ if (!(sc->flags & SC_FL_ISBACK))
+ dst = sess_dst(strm_sess(__sc_strm(sc)));
+ else {
+ struct connection *conn = sc_conn(sc);
+
+ if (conn)
+ dst = conn_dst(conn);
+ }
+ if (!dst)
+ return 0;
+
+ if (!sockaddr_alloc(&sc->dst, dst, sizeof(*dst)))
+ return 0;
+
+ return 1;
+}
+
+
+/* Marks on the stream connector that next shutdown must kill the whole connection */
+static inline void sc_must_kill_conn(struct stconn *sc)
+{
+ sc_ep_set(sc, SE_FL_KILL_CONN);
+}
+
+
+/* Returns non-zero if the stream connector is allowed to receive from the
+ * endpoint, which means that no flag indicating a blocked channel, lack of
+ * buffer or room is set, and that the endpoint is not waiting for the
+ * application to complete a connection setup on the other side, and that
+ * the stream's channel is not shut for reads. This is only used by stream
+ * applications.
+ */
+__attribute__((warn_unused_result))
+static inline int sc_is_recv_allowed(const struct stconn *sc)
+{
+ if (sc->flags & (SC_FL_ABRT_DONE|SC_FL_EOS))
+ return 0;
+
+ if (sc_ep_test(sc, SE_FL_APPLET_NEED_CONN))
+ return 0;
+
+ if (sc_ep_test(sc, SE_FL_HAVE_NO_DATA))
+ return 0;
+
+ if (sc_ep_test(sc, SE_FL_MAY_FASTFWD_PROD) && (sc_opposite(sc)->sedesc->iobuf.flags & IOBUF_FL_FF_BLOCKED))
+ return 0;
+
+ return !(sc->flags & (SC_FL_WONT_READ|SC_FL_NEED_BUFF|SC_FL_NEED_ROOM));
+}
+
+/* This is to be used after making some room available in a channel. It will
+ * return without doing anything if the stream connector's RX path is blocked.
+ * It will automatically mark the stream connector as busy processing the end
+ * point in order to avoid useless repeated wakeups.
+ * It will then call ->chk_rcv() to enable receipt of new data.
+ */
+static inline void sc_chk_rcv(struct stconn *sc)
+{
+ if (sc_ep_test(sc, SE_FL_APPLET_NEED_CONN) &&
+ sc_state_in(sc_opposite(sc)->state, SC_SB_RDY|SC_SB_EST|SC_SB_DIS|SC_SB_CLO)) {
+ sc_ep_clr(sc, SE_FL_APPLET_NEED_CONN);
+ sc_ep_report_read_activity(sc);
+ }
+
+ if (!sc_is_recv_allowed(sc))
+ return;
+
+ if (!sc_state_in(sc->state, SC_SB_RDY|SC_SB_EST))
+ return;
+
+ sc_ep_set(sc, SE_FL_HAVE_NO_DATA);
+ if (likely(sc->app_ops->chk_rcv))
+ sc->app_ops->chk_rcv(sc);
+}
+
+/* Calls chk_snd on the endpoint using the data layer */
+static inline void sc_chk_snd(struct stconn *sc)
+{
+ if (likely(sc->app_ops->chk_snd))
+ sc->app_ops->chk_snd(sc);
+}
+
+/* Combines both sc_update_rx() and sc_update_tx() at once */
+static inline void sc_update(struct stconn *sc)
+{
+ sc_update_rx(sc);
+ sc_update_tx(sc);
+}
+
+/* for debugging, reports the stream connector state name */
+static inline const char *sc_state_str(int state)
+{
+ switch (state) {
+ case SC_ST_INI: return "INI";
+ case SC_ST_REQ: return "REQ";
+ case SC_ST_QUE: return "QUE";
+ case SC_ST_TAR: return "TAR";
+ case SC_ST_ASS: return "ASS";
+ case SC_ST_CON: return "CON";
+ case SC_ST_CER: return "CER";
+ case SC_ST_RDY: return "RDY";
+ case SC_ST_EST: return "EST";
+ case SC_ST_DIS: return "DIS";
+ case SC_ST_CLO: return "CLO";
+ default: return "???";
+ }
+}
+
+/* indicates if the connector may send data to the endpoint, that is, the
+ * endpoint is both willing to receive data and ready to do so. This is only
+ * used with applets so there's always a stream attached to this connector.
+ */
+__attribute__((warn_unused_result))
+static inline int sc_is_send_allowed(const struct stconn *sc)
+{
+ if (sc->flags & SC_FL_SHUT_DONE)
+ return 0;
+
+ return !sc_ep_test(sc, SE_FL_WAIT_DATA | SE_FL_WONT_CONSUME);
+}
+
+static inline int sc_rcv_may_expire(const struct stconn *sc)
+{
+ if ((sc->flags & (SC_FL_ABRT_DONE|SC_FL_EOS)) || (sc_ic(sc)->flags & CF_READ_TIMEOUT))
+ return 0;
+ if (sc->flags & (SC_FL_EOI|SC_FL_WONT_READ|SC_FL_NEED_BUFF|SC_FL_NEED_ROOM))
+ return 0;
+ if (sc_ep_test(sc, SE_FL_APPLET_NEED_CONN) || sc_ep_test(sc_opposite(sc), SE_FL_EXP_NO_DATA))
+ return 0;
+ return 1;
+}
+
+static inline int sc_snd_may_expire(const struct stconn *sc)
+{
+ if ((sc->flags & SC_FL_SHUT_DONE) || (sc_oc(sc)->flags & CF_WRITE_TIMEOUT))
+ return 0;
+ if (sc_ep_test(sc, SE_FL_WONT_CONSUME))
+ return 0;
+ return 1;
+}
+
+static forceinline int sc_ep_rcv_ex(const struct stconn *sc)
+{
+ return ((tick_isset(sc->sedesc->lra) && sc_rcv_may_expire(sc))
+ ? tick_add_ifset(sc->sedesc->lra, sc->ioto)
+ : TICK_ETERNITY);
+}
+
+static forceinline int sc_ep_snd_ex(const struct stconn *sc)
+{
+ return ((tick_isset(sc->sedesc->fsb) && sc_snd_may_expire(sc))
+ ? tick_add_ifset(sc->sedesc->fsb, sc->ioto)
+ : TICK_ETERNITY);
+}
+
+static inline void sc_check_timeouts(const struct stconn *sc)
+{
+ if (unlikely(tick_is_expired(sc_ep_rcv_ex(sc), now_ms)))
+ sc_ic(sc)->flags |= CF_READ_TIMEOUT;
+ if (unlikely(tick_is_expired(sc_ep_snd_ex(sc), now_ms)))
+ sc_oc(sc)->flags |= CF_WRITE_TIMEOUT;
+}
+
+static inline void sc_set_hcto(struct stconn *sc)
+{
+ struct stream *strm = __sc_strm(sc);
+
+ if (IS_HTX_STRM(strm))
+ return;
+
+ if (sc->flags & SC_FL_ISBACK) {
+ if ((strm->flags & SF_BE_ASSIGNED) && tick_isset(strm->be->timeout.serverfin))
+ sc->ioto = strm->be->timeout.serverfin;
+ }
+ else {
+ if (tick_isset(strm_fe(strm)->timeout.clientfin))
+ sc->ioto = strm_fe(strm)->timeout.clientfin;
+ }
+
+}
+
+/* Schedule an abort for the SC */
+static inline void sc_schedule_abort(struct stconn *sc)
+{
+ sc->flags |= SC_FL_ABRT_WANTED;
+}
+
+/* Abort the SC and notify the endpoint using the data layer */
+static inline void sc_abort(struct stconn *sc)
+{
+ if (likely(sc->app_ops->abort))
+ sc->app_ops->abort(sc);
+}
+
+/* Schedule a shutdown for the SC */
+static inline void sc_schedule_shutdown(struct stconn *sc)
+{
+ sc->flags |= SC_FL_SHUT_WANTED;
+}
+
+/* Shutdown the SC and notify the endpoint using the data layer */
+static inline void sc_shutdown(struct stconn *sc)
+{
+ if (likely(sc->app_ops->shutdown))
+ sc->app_ops->shutdown(sc);
+}
+
+#endif /* _HAPROXY_SC_STRM_H */
diff --git a/include/haproxy/server-t.h b/include/haproxy/server-t.h
new file mode 100644
index 0000000..666d2cc
--- /dev/null
+++ b/include/haproxy/server-t.h
@@ -0,0 +1,681 @@
+/*
+ * include/haproxy/server-t.h
+ * This file defines everything related to servers.
+ *
+ * Copyright (C) 2000-2012 Willy Tarreau - w@1wt.eu
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_SERVER_T_H
+#define _HAPROXY_SERVER_T_H
+
+#include <netinet/in.h>
+#include <arpa/inet.h>
+
+#include <import/ebtree-t.h>
+
+#include <haproxy/api-t.h>
+#include <haproxy/check-t.h>
+#include <haproxy/connection-t.h>
+#include <haproxy/counters-t.h>
+#include <haproxy/freq_ctr-t.h>
+#include <haproxy/listener-t.h>
+#include <haproxy/obj_type-t.h>
+#include <haproxy/queue-t.h>
+#include <haproxy/quic_tp-t.h>
+#include <haproxy/resolvers-t.h>
+#include <haproxy/stats-t.h>
+#include <haproxy/task-t.h>
+#include <haproxy/thread-t.h>
+#include <haproxy/event_hdl-t.h>
+#include <haproxy/tools-t.h>
+
+
+/* server states. Only SRV_ST_STOPPED indicates a down server. */
+enum srv_state {
+ SRV_ST_STOPPED = 0, /* the server is down. Please keep set to zero. */
+ SRV_ST_STARTING, /* the server is warming up (up but throttled) */
+ SRV_ST_RUNNING, /* the server is fully up */
+ SRV_ST_STOPPING, /* the server is up but soft-stopping (eg: 404) */
+} __attribute__((packed));
+
+/* Administrative status : a server runs in one of these 3 stats :
+ * - READY : normal mode
+ * - DRAIN : takes no new visitor, equivalent to weight == 0
+ * - MAINT : maintenance mode, no more traffic nor health checks.
+ *
+ * Each server may be in maintenance by itself or may inherit this status from
+ * another server it tracks. It can also be in drain mode by itself or inherit
+ * it from another server. Let's store these origins here as flags. These flags
+ * are combined this way :
+ *
+ * FMAINT IMAINT FDRAIN IDRAIN Resulting state
+ * 0 0 0 0 READY
+ * 0 0 0 1 DRAIN
+ * 0 0 1 x DRAIN
+ * 0 1 x x MAINT
+ * 1 x x x MAINT
+ *
+ * This can be simplified this way :
+ *
+ * state_str = (state & MAINT) ? "MAINT" : (state & DRAIN) : "DRAIN" : "READY"
+ */
+enum srv_admin {
+ SRV_ADMF_FMAINT = 0x01, /* the server was explicitly forced into maintenance */
+ SRV_ADMF_IMAINT = 0x02, /* the server has inherited the maintenance status from a tracked server */
+ SRV_ADMF_MAINT = 0x23, /* mask to check if any maintenance flag is present */
+ SRV_ADMF_CMAINT = 0x04, /* the server is in maintenance because of the configuration */
+ SRV_ADMF_FDRAIN = 0x08, /* the server was explicitly forced into drain state */
+ SRV_ADMF_IDRAIN = 0x10, /* the server has inherited the drain status from a tracked server */
+ SRV_ADMF_DRAIN = 0x18, /* mask to check if any drain flag is present */
+ SRV_ADMF_RMAINT = 0x20, /* the server is down because of an IP address resolution failure */
+ SRV_ADMF_HMAINT = 0x40, /* the server FQDN has been set from socket stats */
+} __attribute__((packed));
+
+/* options for servers' "init-addr" parameter
+ * this parameter may be used to drive HAProxy's behavior when parsing a server
+ * address at start up time.
+ * These values are stored as a list into an integer ordered from first to last
+ * starting with the lowest to highest bits. SRV_IADDR_END (0) is used to
+ * indicate the end of the list. 3 bits are enough to store each value.
+ */
+enum srv_initaddr {
+ SRV_IADDR_END = 0, /* end of the list */
+ SRV_IADDR_NONE = 1, /* the server won't have any address at start up */
+ SRV_IADDR_LIBC = 2, /* address set using the libc DNS resolver */
+ SRV_IADDR_LAST = 3, /* we set the IP address found in state-file for this server */
+ SRV_IADDR_IP = 4, /* we set an arbitrary IP address to the server */
+} __attribute__((packed));
+
+/* server-state-file version */
+#define SRV_STATE_FILE_VERSION 1
+#define SRV_STATE_FILE_VERSION_MIN 1
+#define SRV_STATE_FILE_VERSION_MAX 1
+#define SRV_STATE_FILE_FIELD_NAMES \
+ "be_id " \
+ "be_name " \
+ "srv_id " \
+ "srv_name " \
+ "srv_addr " \
+ "srv_op_state " \
+ "srv_admin_state " \
+ "srv_uweight " \
+ "srv_iweight " \
+ "srv_time_since_last_change " \
+ "srv_check_status " \
+ "srv_check_result " \
+ "srv_check_health " \
+ "srv_check_state " \
+ "srv_agent_state " \
+ "bk_f_forced_id " \
+ "srv_f_forced_id " \
+ "srv_fqdn " \
+ "srv_port " \
+ "srvrecord " \
+ "srv_use_ssl " \
+ "srv_check_port " \
+ "srv_check_addr " \
+ "srv_agent_addr " \
+ "srv_agent_port"
+
+#define SRV_STATE_FILE_MAX_FIELDS 25
+#define SRV_STATE_FILE_MIN_FIELDS_VERSION_1 20
+#define SRV_STATE_FILE_MAX_FIELDS_VERSION_1 25
+#define SRV_STATE_LINE_MAXLEN 2000
+
+/* server flags -- 32 bits */
+#define SRV_F_BACKUP 0x0001 /* this server is a backup server */
+#define SRV_F_MAPPORTS 0x0002 /* this server uses mapped ports */
+#define SRV_F_NON_STICK 0x0004 /* never add connections allocated to this server to a stick table */
+#define SRV_F_USE_NS_FROM_PP 0x0008 /* use namespace associated with connection if present */
+#define SRV_F_FORCED_ID 0x0010 /* server's ID was forced in the configuration */
+#define SRV_F_RHTTP 0x0020 /* reverse HTTP server which requires idle connection for transfers */
+#define SRV_F_AGENTPORT 0x0040 /* this server has a agent port configured */
+#define SRV_F_AGENTADDR 0x0080 /* this server has a agent addr configured */
+#define SRV_F_COOKIESET 0x0100 /* this server has a cookie configured, so don't generate dynamic cookies */
+#define SRV_F_FASTOPEN 0x0200 /* Use TCP Fast Open to connect to server */
+#define SRV_F_SOCKS4_PROXY 0x0400 /* this server uses SOCKS4 proxy */
+#define SRV_F_NO_RESOLUTION 0x0800 /* disable runtime DNS resolution on this server */
+#define SRV_F_DYNAMIC 0x1000 /* dynamic server instantiated at runtime */
+#define SRV_F_NON_PURGEABLE 0x2000 /* this server cannot be removed at runtime */
+#define SRV_F_DEFSRV_USE_SSL 0x4000 /* default-server uses SSL */
+#define SRV_F_DELETED 0x8000 /* srv is deleted but not yet purged */
+
+/* configured server options for send-proxy (server->pp_opts) */
+#define SRV_PP_V1 0x0001 /* proxy protocol version 1 */
+#define SRV_PP_V2 0x0002 /* proxy protocol version 2 */
+#define SRV_PP_V2_SSL 0x0004 /* proxy protocol version 2 with SSL */
+#define SRV_PP_V2_SSL_CN 0x0008 /* proxy protocol version 2 with CN */
+#define SRV_PP_V2_SSL_KEY_ALG 0x0010 /* proxy protocol version 2 with cert key algorithm */
+#define SRV_PP_V2_SSL_SIG_ALG 0x0020 /* proxy protocol version 2 with cert signature algorithm */
+#define SRV_PP_V2_SSL_CIPHER 0x0040 /* proxy protocol version 2 with cipher used */
+#define SRV_PP_V2_AUTHORITY 0x0080 /* proxy protocol version 2 with authority */
+#define SRV_PP_V2_CRC32C 0x0100 /* proxy protocol version 2 with crc32c */
+#define SRV_PP_V2_UNIQUE_ID 0x0200 /* proxy protocol version 2 with unique ID */
+
+/* function which act on servers need to return various errors */
+#define SRV_STATUS_OK 0 /* everything is OK. */
+#define SRV_STATUS_INTERNAL 1 /* other unrecoverable errors. */
+#define SRV_STATUS_NOSRV 2 /* no server is available */
+#define SRV_STATUS_FULL 3 /* the/all server(s) are saturated */
+#define SRV_STATUS_QUEUED 4 /* the/all server(s) are saturated but the connection was queued */
+
+/* various constants */
+#define SRV_UWGHT_RANGE 256
+#define SRV_UWGHT_MAX (SRV_UWGHT_RANGE)
+#define SRV_EWGHT_RANGE (SRV_UWGHT_RANGE * BE_WEIGHT_SCALE)
+#define SRV_EWGHT_MAX (SRV_UWGHT_MAX * BE_WEIGHT_SCALE)
+
+/* server ssl options */
+#define SRV_SSL_O_NONE 0x0000
+#define SRV_SSL_O_NO_TLS_TICKETS 0x0100 /* disable session resumption tickets */
+#define SRV_SSL_O_NO_REUSE 0x200 /* disable session reuse */
+#define SRV_SSL_O_EARLY_DATA 0x400 /* Allow using early data */
+
+/* log servers ring's protocols options */
+enum srv_log_proto {
+ SRV_LOG_PROTO_LEGACY, // messages on TCP separated by LF
+ SRV_LOG_PROTO_OCTET_COUNTING, // TCP frames: MSGLEN SP MSG
+};
+
+/* srv administrative change causes */
+enum srv_adm_st_chg_cause {
+ SRV_ADM_STCHGC_NONE = 0,
+ SRV_ADM_STCHGC_DNS_NOENT, /* entry removed from srv record */
+ SRV_ADM_STCHGC_DNS_NOIP, /* no server ip in the srv record */
+ SRV_ADM_STCHGC_DNS_NX, /* resolution spent too much time in NX state */
+ SRV_ADM_STCHGC_DNS_TIMEOUT, /* resolution timeout */
+ SRV_ADM_STCHGC_DNS_REFUSED, /* query refused by dns server */
+ SRV_ADM_STCHGC_DNS_UNSPEC, /* unspecified dns error */
+ SRV_ADM_STCHGC_STATS_DISABLE, /* legacy disable from the stats */
+ SRV_ADM_STCHGC_STATS_STOP /* legacy stop from the stats */
+};
+
+/* srv operational change causes */
+enum srv_op_st_chg_cause {
+ SRV_OP_STCHGC_NONE = 0,
+ SRV_OP_STCHGC_HEALTH, /* changed from a health check */
+ SRV_OP_STCHGC_AGENT, /* changed from an agent check */
+ SRV_OP_STCHGC_CLI, /* changed from the cli */
+ SRV_OP_STCHGC_LUA, /* changed from lua */
+ SRV_OP_STCHGC_STATS_WEB, /* changed from the web interface */
+ SRV_OP_STCHGC_STATEFILE /* changed from state file */
+};
+
+struct pid_list {
+ struct list list;
+ pid_t pid;
+ struct task *t;
+ int status;
+ int exited;
+};
+
+/* A tree occurrence is a descriptor of a place in a tree, with a pointer back
+ * to the server itself.
+ */
+struct server;
+struct tree_occ {
+ struct server *server;
+ struct eb32_node node;
+};
+
+/* Each server will have one occurrence of this structure per thread */
+struct srv_per_thread {
+ struct mt_list streams; /* streams using this server (used by "shutdown server sessions") */
+ struct eb_root idle_conns; /* Shareable idle connections */
+ struct eb_root safe_conns; /* Safe idle connections */
+ struct eb_root avail_conns; /* Connections in use, but with still new streams available */
+
+ /* Secondary idle conn storage used in parallel to idle/safe trees.
+ * Used to sort them by last usage and purge them in reverse order.
+ */
+ struct list idle_conn_list;
+};
+
+/* Each server will have one occurrence of this structure per thread group */
+struct srv_per_tgroup {
+ unsigned int next_takeover; /* thread ID to try to steal connections from next time */
+};
+
+/* Configure the protocol selection for websocket */
+enum __attribute__((__packed__)) srv_ws_mode {
+ SRV_WS_AUTO = 0,
+ SRV_WS_H1,
+ SRV_WS_H2,
+};
+
+/* Server-side TLV list, contains the types of the TLVs that should be sent out.
+ * Additionally, it can contain a format string, if specified in the config.
+ */
+struct srv_pp_tlv_list {
+ struct list list;
+ struct list fmt;
+ char *fmt_string;
+ unsigned char type;
+};
+
+struct proxy;
+struct server {
+ /* mostly config or admin stuff, doesn't change often */
+ enum obj_type obj_type; /* object type == OBJ_TYPE_SERVER */
+ enum srv_state next_state, cur_state; /* server state among SRV_ST_* */
+ enum srv_admin next_admin, cur_admin; /* server maintenance status : SRV_ADMF_* */
+ signed char use_ssl; /* ssl enabled (1: on, 0: disabled, -1 forced off) */
+ unsigned int flags; /* server flags (SRV_F_*) */
+ unsigned int pp_opts; /* proxy protocol options (SRV_PP_*) */
+ struct list global_list; /* attach point in the global servers_list */
+ struct server *next;
+ struct mt_list prev_deleted; /* deleted servers with 'next' ptr pointing to us */
+ int cklen; /* the len of the cookie, to speed up checks */
+ int rdr_len; /* the length of the redirection prefix */
+ char *cookie; /* the id set in the cookie */
+ char *rdr_pfx; /* the redirection prefix */
+
+ struct proxy *proxy; /* the proxy this server belongs to */
+ const struct mux_proto_list *mux_proto; /* the mux to use for all outgoing connections (specified by the "proto" keyword) */
+ struct net_addr_type addr_type; /* server address type (socket and transport hints) */
+ struct log_target *log_target; /* when 'mode log' is enabled, target facility used to transport log messages */
+ unsigned maxconn, minconn; /* max # of active sessions (0 = unlimited), min# for dynamic limit. */
+ struct srv_per_thread *per_thr; /* array of per-thread stuff such as connections lists */
+ struct srv_per_tgroup *per_tgrp; /* array of per-tgroup stuff such as idle conns */
+ unsigned int *curr_idle_thr; /* Current number of orphan idling connections per thread */
+
+ unsigned int pool_purge_delay; /* Delay before starting to purge the idle conns pool */
+ unsigned int low_idle_conns; /* min idle connection count to start picking from other threads */
+ unsigned int max_idle_conns; /* Max number of connection allowed in the orphan connections list */
+ int max_reuse; /* Max number of requests on a same connection */
+ struct task *warmup; /* the task dedicated to the warmup when slowstart is set */
+
+ struct server *track; /* the server we're currently tracking, if any */
+ struct server *trackers; /* the list of servers tracking us, if any */
+ struct server *tracknext; /* next server tracking <track> in <track>'s trackers list */
+ char *trackit; /* temporary variable to make assignment deferrable */
+ int consecutive_errors_limit; /* number of consecutive errors that triggers an event */
+ short observe, onerror; /* observing mode: one of HANA_OBS_*; what to do on error: on of ANA_ONERR_* */
+ short onmarkeddown; /* what to do when marked down: one of HANA_ONMARKEDDOWN_* */
+ short onmarkedup; /* what to do when marked up: one of HANA_ONMARKEDUP_* */
+ int slowstart; /* slowstart time in seconds (ms in the conf) */
+
+ char *id; /* just for identification */
+ uint32_t rid; /* revision: if id has been reused for a new server, rid won't match */
+ unsigned iweight,uweight, cur_eweight; /* initial weight, user-specified weight, and effective weight */
+ unsigned wscore; /* weight score, used during srv map computation */
+ unsigned next_eweight; /* next pending eweight to commit */
+ unsigned rweight; /* remainder of weight in the current LB tree */
+ unsigned cumulative_weight; /* weight of servers prior to this one in the same group, for chash balancing */
+ int maxqueue; /* maximum number of pending connections allowed */
+ int shard; /* shard (in peers protocol context only) */
+ int log_bufsize; /* implicit ring bufsize (for log server only - in log backend) */
+
+ enum srv_ws_mode ws; /* configure the protocol selection for websocket */
+ /* 3 bytes hole here */
+
+ uint refcount; /* refcount used to remove a server at runtime */
+
+ /* The elements below may be changed on every single request by any
+ * thread, and generally at the same time.
+ */
+ THREAD_PAD(63);
+ struct eb32_node idle_node; /* When to next do cleanup in the idle connections */
+ unsigned int curr_idle_conns; /* Current number of orphan idling connections, both the idle and the safe lists */
+ unsigned int curr_idle_nb; /* Current number of connections in the idle list */
+ unsigned int curr_safe_nb; /* Current number of connections in the safe list */
+ unsigned int curr_used_conns; /* Current number of used connections */
+ unsigned int max_used_conns; /* Max number of used connections (the counter is reset at each connection purges */
+ unsigned int est_need_conns; /* Estimate on the number of needed connections (max of curr and previous max_used) */
+
+ struct queue queue; /* pending connections */
+
+ /* Element below are usd by LB algorithms and must be doable in
+ * parallel to other threads reusing connections above.
+ */
+ THREAD_PAD(63);
+ __decl_thread(HA_SPINLOCK_T lock); /* may enclose the proxy's lock, must not be taken under */
+ unsigned npos, lpos; /* next and last positions in the LB tree, protected by LB lock */
+ union {
+ struct eb32_node lb_node; /* node used for tree-based load balancing */
+ struct list lb_list; /* elem used for list-based load balancing */
+ };
+ struct server *next_full; /* next server in the temporary full list */
+
+ /* usually atomically updated by any thread during parsing or on end of request */
+ THREAD_PAD(63);
+ int cur_sess; /* number of currently active sessions (including syn_sent) */
+ int served; /* # of active sessions currently being served (ie not pending) */
+ int consecutive_errors; /* current number of consecutive errors */
+ struct freq_ctr sess_per_sec; /* sessions per second on this server */
+ struct be_counters counters; /* statistics counters */
+
+ /* Below are some relatively stable settings, only changed under the lock */
+ THREAD_PAD(63);
+
+ struct eb_root *lb_tree; /* we want to know in what tree the server is */
+ struct tree_occ *lb_nodes; /* lb_nodes_tot * struct tree_occ */
+ unsigned lb_nodes_tot; /* number of allocated lb_nodes (C-HASH) */
+ unsigned lb_nodes_now; /* number of lb_nodes placed in the tree (C-HASH) */
+
+ const struct netns_entry *netns; /* contains network namespace name or NULL. Network namespace comes from configuration */
+ struct xprt_ops *xprt; /* transport-layer operations */
+ unsigned int svc_port; /* the port to connect to (for relevant families) */
+ unsigned down_time; /* total time the server was down */
+ time_t last_change; /* last time, when the state was changed */
+
+ int puid; /* proxy-unique server ID, used for SNMP, and "first" LB algo */
+ int tcp_ut; /* for TCP, user timeout */
+
+ int do_check; /* temporary variable used during parsing to denote if health checks must be enabled */
+ int do_agent; /* temporary variable used during parsing to denote if an auxiliary agent check must be enabled */
+ struct check check; /* health-check specific configuration */
+ struct check agent; /* agent specific configuration */
+
+ struct resolv_requester *resolv_requester; /* used to link a server to its DNS resolution */
+ char *resolvers_id; /* resolvers section used by this server */
+ struct resolvers *resolvers; /* pointer to the resolvers structure used by this server */
+ char *lastaddr; /* the address string provided by the server-state file */
+ struct resolv_options resolv_opts;
+ int hostname_dn_len; /* string length of the server hostname in Domain Name format */
+ char *hostname_dn; /* server hostname in Domain Name format */
+ char *hostname; /* server hostname */
+ struct sockaddr_storage init_addr; /* plain IP address specified on the init-addr line */
+ unsigned int init_addr_methods; /* initial address setting, 3-bit per method, ends at 0, enough to store 10 entries */
+ enum srv_log_proto log_proto; /* used proto to emit messages on server lines from log or ring section */
+
+ char *sni_expr; /* Temporary variable to store a sample expression for SNI */
+ struct {
+ void *ctx;
+ struct {
+ /* ptr/size may be shared R/O with other threads under read lock
+ * "sess_lock", however only the owning thread may change them
+ * (under write lock).
+ */
+ unsigned char *ptr;
+ int size;
+ int allocated_size;
+ char *sni; /* SNI used for the session */
+ __decl_thread(HA_RWLOCK_T sess_lock);
+ } * reused_sess;
+ uint last_ssl_sess_tid; /* last tid+1 having updated reused_sess (0=none, >0=tid+1) */
+
+ struct ckch_inst *inst; /* Instance of the ckch_store in which the certificate was loaded (might be null if server has no certificate) */
+ __decl_thread(HA_RWLOCK_T lock); /* lock the cache and SSL_CTX during commit operations */
+
+ char *ciphers; /* cipher suite to use if non-null */
+ char *ciphersuites; /* TLS 1.3 cipher suite to use if non-null */
+ char *curves; /* TLS curves list */
+ int options; /* ssl options */
+ int verify; /* verify method (set of SSL_VERIFY_* flags) */
+ struct tls_version_filter methods; /* ssl methods */
+ char *verify_host; /* hostname of certificate must match this host */
+ char *ca_file; /* CAfile to use on verify */
+ char *crl_file; /* CRLfile to use on verify */
+ char *client_crt; /* client certificate to send */
+ char *sigalgs; /* Signature algorithms */
+ char *client_sigalgs; /* Client Signature algorithms */
+ struct sample_expr *sni; /* sample expression for SNI */
+ char *npn_str; /* NPN protocol string */
+ int npn_len; /* NPN protocol string length */
+ char *alpn_str; /* ALPN protocol string */
+ int alpn_len; /* ALPN protocol string length */
+ } ssl_ctx;
+ struct resolv_srvrq *srvrq; /* Pointer representing the DNS SRV requeest, if any */
+ struct list srv_rec_item; /* to attach server to a srv record item */
+ struct list ip_rec_item; /* to attach server to a A or AAAA record item */
+ struct ebpt_node host_dn; /* hostdn store for srvrq and state file matching*/
+ struct list pp_tlvs; /* to send out PROXY protocol v2 TLVs */
+ struct task *srvrq_check; /* Task testing SRV record expiration date for this server */
+ struct {
+ const char *file; /* file where the section appears */
+ struct eb32_node id; /* place in the tree of used IDs */
+ struct ebpt_node name; /* place in the tree of used names */
+ int line; /* line where the section appears */
+ } conf; /* config information */
+ struct ebpt_node addr_node; /* Node for string representation of address for the server (including port number) */
+ /* Template information used only for server objects which
+ * serve as template filled at parsing time and used during
+ * server allocations from server templates.
+ */
+ struct {
+ char *prefix;
+ int nb_low;
+ int nb_high;
+ } tmpl_info;
+
+ event_hdl_sub_list e_subs; /* event_hdl: server's subscribers list (atomically updated) */
+
+ /* warning, these structs are huge, keep them at the bottom */
+ struct conn_src conn_src; /* connection source settings */
+ struct sockaddr_storage addr; /* the address to connect to, doesn't include the port */
+ struct sockaddr_storage socks4_addr; /* the address of the SOCKS4 Proxy, including the port */
+
+ EXTRA_COUNTERS(extra_counters);
+};
+
+/* data provided to EVENT_HDL_SUB_SERVER handlers through event_hdl facility */
+struct event_hdl_cb_data_server {
+ /* provided by:
+ * EVENT_HDL_SUB_SERVER_ADD
+ * EVENT_HDL_SUB_SERVER_DEL
+ * EVENT_HDL_SUB_SERVER_UP
+ * EVENT_HDL_SUB_SERVER_DOWN
+ * EVENT_HDL_SUB_SERVER_STATE
+ * EVENT_HDL_SUB_SERVER_ADMIN
+ * EVENT_HDL_SUB_SERVER_CHECK
+ * EVENT_HDL_SUB_SERVER_INETADDR
+ */
+ struct {
+ /* safe data can be safely used from both
+ * sync and async handlers
+ * data consistency is guaranteed
+ */
+ char name[64]; /* server name/id */
+ char proxy_name[64]; /* id of proxy the server belongs to */
+ int proxy_uuid; /* uuid of the proxy the server belongs to */
+ int puid; /* proxy-unique server ID */
+ uint32_t rid; /* server id revision */
+ unsigned int flags; /* server flags */
+ } safe;
+ struct {
+ /* unsafe data may only be used from sync handlers:
+ * in async mode, data consistency cannot be guaranteed
+ * and unsafe data may already be stale, thus using
+ * it is highly discouraged because it
+ * could lead to undefined behavior (UAF, null dereference...)
+ */
+ struct server *ptr; /* server live ptr */
+ /* lock hints */
+ uint8_t thread_isolate; /* 1 = thread_isolate is on, no locking required */
+ uint8_t srv_lock; /* 1 = srv lock is held */
+ } unsafe;
+};
+
+/* check result snapshot provided through some event_hdl server events */
+struct event_hdl_cb_data_server_checkres {
+ uint8_t agent; /* 1 = agent check, 0 = health check */
+ enum chk_result result; /* failed, passed, condpass (CHK_RES_*) */
+ long duration; /* total check duration in ms */
+ struct {
+ short status; /* check status as in check->status */
+ short code; /* provided with some check statuses */
+ } reason;
+ struct {
+ int cur; /* dynamic (= check->health) */
+ int rise, fall; /* config dependent */
+ } health; /* check's health, see check-t.h */
+};
+
+/* data provided to EVENT_HDL_SUB_SERVER_STATE handlers through
+ * event_hdl facility
+ *
+ * Note that this may be casted to regular event_hdl_cb_data_server if
+ * you don't care about state related optional info
+ */
+struct event_hdl_cb_data_server_state {
+ /* provided by:
+ * EVENT_HDL_SUB_SERVER_STATE
+ */
+ struct event_hdl_cb_data_server server; /* must be at the beginning */
+ struct {
+ uint8_t type; /* 0 = operational, 1 = administrative */
+ enum srv_state old_state, new_state; /* updated by both operational and admin changes */
+ uint32_t requeued; /* requeued connections due to server state change */
+ union {
+ /* state change cause:
+ *
+ * look for op_st_chg for operational state change,
+ * and adm_st_chg for administrative state change
+ */
+ struct {
+ enum srv_op_st_chg_cause cause;
+ union {
+ /* check result is provided with
+ * cause == SRV_OP_STCHGC_HEALTH or cause == SRV_OP_STCHGC_AGENT
+ */
+ struct event_hdl_cb_data_server_checkres check;
+ };
+ } op_st_chg;
+ struct {
+ enum srv_adm_st_chg_cause cause;
+ } adm_st_chg;
+ };
+ } safe;
+ /* no unsafe data */
+};
+
+/* data provided to EVENT_HDL_SUB_SERVER_ADMIN handlers through
+ * event_hdl facility
+ *
+ * Note that this may be casted to regular event_hdl_cb_data_server if
+ * you don't care about admin related optional info
+ */
+struct event_hdl_cb_data_server_admin {
+ /* provided by:
+ * EVENT_HDL_SUB_SERVER_ADMIN
+ */
+ struct event_hdl_cb_data_server server; /* must be at the beginning */
+ struct {
+ enum srv_admin old_admin, new_admin;
+ uint32_t requeued; /* requeued connections due to server admin change */
+ /* admin change cause */
+ enum srv_adm_st_chg_cause cause;
+ } safe;
+ /* no unsafe data */
+};
+
+/* data provided to EVENT_HDL_SUB_SERVER_CHECK handlers through
+ * event_hdl facility
+ *
+ * Note that this may be casted to regular event_hdl_cb_data_server if
+ * you don't care about check related optional info
+ */
+struct event_hdl_cb_data_server_check {
+ /* provided by:
+ * EVENT_HDL_SUB_SERVER_CHECK
+ */
+ struct event_hdl_cb_data_server server; /* must be at the beginning */
+ struct {
+ struct event_hdl_cb_data_server_checkres res; /* check result snapshot */
+ } safe;
+ struct {
+ struct check *ptr; /* check ptr */
+ } unsafe;
+};
+
+/* struct to store server address and port information in INET
+ * context
+ */
+struct server_inetaddr {
+ int family; /* AF_UNSPEC, AF_INET or AF_INET6 */
+ union {
+ struct in_addr v4;
+ struct in6_addr v6;
+ } addr; /* may hold v4 or v6 addr */
+ struct {
+ unsigned int svc;
+ uint8_t map; /* is a mapped port? (boolean) */
+ } port;
+};
+
+/* data provided to EVENT_HDL_SUB_SERVER_INETADDR handlers through
+ * event_hdl facility
+ *
+ * Note that this may be casted to regular event_hdl_cb_data_server if
+ * you don't care about inetaddr related optional info
+ */
+struct event_hdl_cb_data_server_inetaddr {
+ /* provided by:
+ * EVENT_HDL_SUB_SERVER_INETADDR
+ */
+ struct event_hdl_cb_data_server server; /* must be at the beginning */
+ struct {
+ struct server_inetaddr prev;
+ struct server_inetaddr next;
+ uint8_t purge_conn; /* set to 1 if the network change will force a connection cleanup */
+ } safe;
+ /* no unsafe data */
+};
+
+/* Storage structure to load server-state lines from a flat file into
+ * an ebtree, for faster processing
+ */
+struct server_state_line {
+ char *line;
+ char *params[SRV_STATE_FILE_MAX_FIELDS];
+ struct eb64_node node;
+};
+
+
+/* Descriptor for a "server" keyword. The ->parse() function returns 0 in case of
+ * success, or a combination of ERR_* flags if an error is encountered. The
+ * function pointer can be NULL if not implemented. The function also has an
+ * access to the current "server" config line. The ->skip value tells the parser
+ * how many words have to be skipped after the keyword. If the function needs to
+ * parse more keywords, it needs to update cur_arg.
+ */
+struct srv_kw {
+ const char *kw;
+ int (*parse)(char **args, int *cur_arg, struct proxy *px, struct server *srv, char **err);
+ int skip; /* nb min of args to skip, for use when kw is not handled */
+ int default_ok; /* non-zero if kw is supported in default-server section */
+ int dynamic_ok; /* non-zero if kw is supported in add server cli command */
+};
+
+/*
+ * A keyword list. It is a NULL-terminated array of keywords. It embeds a
+ * struct list in order to be linked to other lists, allowing it to easily
+ * be declared where it is needed, and linked without duplicating data nor
+ * allocating memory. It is also possible to indicate a scope for the keywords.
+ */
+struct srv_kw_list {
+ const char *scope;
+ struct list list;
+ struct srv_kw kw[VAR_ARRAY];
+};
+
+#define SRV_PARSE_DEFAULT_SERVER 0x01 /* 'default-server' keyword */
+#define SRV_PARSE_TEMPLATE 0x02 /* 'server-template' keyword */
+#define SRV_PARSE_IN_PEER_SECTION 0x04 /* keyword in a peer section */
+#define SRV_PARSE_PARSE_ADDR 0x08 /* required to parse the server address in the second argument */
+#define SRV_PARSE_DYNAMIC 0x10 /* dynamic server created at runtime with cli */
+#define SRV_PARSE_INITIAL_RESOLVE 0x20 /* resolve immediately the fqdn to an ip address */
+
+#endif /* _HAPROXY_SERVER_T_H */
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/include/haproxy/server.h b/include/haproxy/server.h
new file mode 100644
index 0000000..2ba6e45
--- /dev/null
+++ b/include/haproxy/server.h
@@ -0,0 +1,328 @@
+/*
+ * include/haproxy/server.h
+ * This file defines everything related to servers.
+ *
+ * Copyright (C) 2000-2009 Willy Tarreau - w@1wt.eu
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_SERVER_H
+#define _HAPROXY_SERVER_H
+
+#include <unistd.h>
+
+#include <haproxy/api.h>
+#include <haproxy/applet-t.h>
+#include <haproxy/freq_ctr.h>
+#include <haproxy/proxy-t.h>
+#include <haproxy/resolvers-t.h>
+#include <haproxy/server-t.h>
+#include <haproxy/task.h>
+#include <haproxy/thread-t.h>
+#include <haproxy/time.h>
+#include <haproxy/tools.h>
+
+
+__decl_thread(extern HA_SPINLOCK_T idle_conn_srv_lock);
+extern struct idle_conns idle_conns[MAX_THREADS];
+extern struct task *idle_conn_task;
+extern struct list servers_list;
+extern struct dict server_key_dict;
+
+int srv_downtime(const struct server *s);
+int srv_lastsession(const struct server *s);
+int srv_getinter(const struct check *check);
+void srv_settings_cpy(struct server *srv, const struct server *src, int srv_tmpl);
+int parse_server(const char *file, int linenum, char **args, struct proxy *curproxy, const struct proxy *defproxy, int parse_flags);
+int srv_update_addr(struct server *s, void *ip, int ip_sin_family, const char *updater);
+int server_parse_sni_expr(struct server *newsrv, struct proxy *px, char **err);
+const char *srv_update_addr_port(struct server *s, const char *addr, const char *port, char *updater);
+const char *srv_update_check_addr_port(struct server *s, const char *addr, const char *port);
+const char *srv_update_agent_addr_port(struct server *s, const char *addr, const char *port);
+struct server *server_find_by_id(struct proxy *bk, int id);
+struct server *server_find_by_name(struct proxy *bk, const char *name);
+struct server *server_find_best_match(struct proxy *bk, char *name, int id, int *diff);
+void apply_server_state(void);
+void srv_compute_all_admin_states(struct proxy *px);
+int srv_set_addr_via_libc(struct server *srv, int *err_code);
+int srv_init_addr(void);
+struct server *cli_find_server(struct appctx *appctx, char *arg);
+struct server *new_server(struct proxy *proxy);
+void srv_take(struct server *srv);
+struct server *srv_drop(struct server *srv);
+void srv_free_params(struct server *srv);
+int srv_init_per_thr(struct server *srv);
+void srv_set_ssl(struct server *s, int use_ssl);
+const char *srv_adm_st_chg_cause(enum srv_adm_st_chg_cause cause);
+const char *srv_op_st_chg_cause(enum srv_op_st_chg_cause cause);
+void srv_event_hdl_publish_check(struct server *srv, struct check *check);
+
+/* functions related to server name resolution */
+int srv_prepare_for_resolution(struct server *srv, const char *hostname);
+int srvrq_update_srv_status(struct server *s, int has_no_ip);
+int snr_update_srv_status(struct server *s, int has_no_ip);
+int srv_set_fqdn(struct server *srv, const char *fqdn, int resolv_locked);
+const char *srv_update_fqdn(struct server *server, const char *fqdn, const char *updater, int dns_locked);
+int snr_resolution_cb(struct resolv_requester *requester, struct dns_counters *counters);
+int srvrq_resolution_error_cb(struct resolv_requester *requester, int error_code);
+int snr_resolution_error_cb(struct resolv_requester *requester, int error_code);
+struct server *snr_check_ip_callback(struct server *srv, void *ip, unsigned char *ip_family);
+struct task *srv_cleanup_idle_conns(struct task *task, void *ctx, unsigned int state);
+void srv_release_conn(struct server *srv, struct connection *conn);
+struct connection *srv_lookup_conn(struct eb_root *tree, uint64_t hash);
+struct connection *srv_lookup_conn_next(struct connection *conn);
+
+void _srv_add_idle(struct server *srv, struct connection *conn, int is_safe);
+int srv_add_to_idle_list(struct server *srv, struct connection *conn, int is_safe);
+void srv_add_to_avail_list(struct server *srv, struct connection *conn);
+struct task *srv_cleanup_toremove_conns(struct task *task, void *context, unsigned int state);
+
+int srv_apply_track(struct server *srv, struct proxy *curproxy);
+
+/*
+ * Registers the server keyword list <kwl> as a list of valid keywords for next
+ * parsing sessions.
+ */
+void srv_register_keywords(struct srv_kw_list *kwl);
+
+/* Return a pointer to the server keyword <kw>, or NULL if not found. */
+struct srv_kw *srv_find_kw(const char *kw);
+
+/* Dumps all registered "server" keywords to the <out> string pointer. */
+void srv_dump_kws(char **out);
+
+/* Recomputes the server's eweight based on its state, uweight, the current time,
+ * and the proxy's algorithm. To be used after updating sv->uweight. The warmup
+ * state is automatically disabled if the time is elapsed.
+ */
+void server_recalc_eweight(struct server *sv, int must_update);
+
+/*
+ * Parses weight_str and configures sv accordingly.
+ * Returns NULL on success, error message string otherwise.
+ */
+const char *server_parse_weight_change_request(struct server *sv,
+ const char *weight_str);
+
+/*
+ * Parses addr_str and configures sv accordingly. updater precise
+ * the source of the change in the associated message log.
+ * Returns NULL on success, error message string otherwise.
+ */
+const char *server_parse_addr_change_request(struct server *sv,
+ const char *addr_str, const char *updater);
+
+/*
+ * Parses maxconn_str and configures sv accordingly.
+ * Returns NULL on success, error message string otherwise.
+ */
+const char *server_parse_maxconn_change_request(struct server *sv,
+ const char *maxconn_str);
+
+/* Shutdown all connections of a server. The caller must pass a termination
+ * code in <why>, which must be one of SF_ERR_* indicating the reason for the
+ * shutdown.
+ */
+void srv_shutdown_streams(struct server *srv, int why);
+
+/* Shutdown all connections of all backup servers of a proxy. The caller must
+ * pass a termination code in <why>, which must be one of SF_ERR_* indicating
+ * the reason for the shutdown.
+ */
+void srv_shutdown_backup_streams(struct proxy *px, int why);
+
+void srv_append_status(struct buffer *msg, struct server *s, struct check *,
+ int xferred, int forced);
+
+void srv_set_stopped(struct server *s, enum srv_op_st_chg_cause cause);
+void srv_set_running(struct server *s, enum srv_op_st_chg_cause cause);
+void srv_set_stopping(struct server *s, enum srv_op_st_chg_cause cause);
+
+/* Enables admin flag <mode> (among SRV_ADMF_*) on server <s>. This is used to
+ * enforce either maint mode or drain mode. It is not allowed to set more than
+ * one flag at once. The equivalent "inherited" flag is propagated to all
+ * tracking servers. Maintenance mode disables health checks (but not agent
+ * checks). When either the flag is already set or no flag is passed, nothing
+ * is done. If <cause> is non-null, it will be displayed at the end of the log
+ * lines to justify the state change.
+ */
+void srv_set_admin_flag(struct server *s, enum srv_admin mode, enum srv_adm_st_chg_cause cause);
+
+/* Disables admin flag <mode> (among SRV_ADMF_*) on server <s>. This is used to
+ * stop enforcing either maint mode or drain mode. It is not allowed to set more
+ * than one flag at once. The equivalent "inherited" flag is propagated to all
+ * tracking servers. Leaving maintenance mode re-enables health checks. When
+ * either the flag is already cleared or no flag is passed, nothing is done.
+ */
+void srv_clr_admin_flag(struct server *s, enum srv_admin mode);
+
+/* Calculates the dynamic persistent cookie for a server, if a secret key has
+ * been provided.
+ */
+void srv_set_dyncookie(struct server *s);
+
+int srv_check_reuse_ws(struct server *srv);
+const struct mux_ops *srv_get_ws_proto(struct server *srv);
+
+/* increase the number of cumulated connections on the designated server */
+static inline void srv_inc_sess_ctr(struct server *s)
+{
+ _HA_ATOMIC_INC(&s->counters.cum_sess);
+ HA_ATOMIC_UPDATE_MAX(&s->counters.sps_max,
+ update_freq_ctr(&s->sess_per_sec, 1));
+}
+
+/* set the time of last session on the designated server */
+static inline void srv_set_sess_last(struct server *s)
+{
+ s->counters.last_sess = ns_to_sec(now_ns);
+}
+
+/* returns the current server throttle rate between 0 and 100% */
+static inline unsigned int server_throttle_rate(struct server *sv)
+{
+ struct proxy *px = sv->proxy;
+
+ /* when uweight is 0, we're in soft-stop so that cannot be a slowstart,
+ * thus the throttle is 100%.
+ */
+ if (!sv->uweight)
+ return 100;
+
+ return (100U * px->lbprm.wmult * sv->cur_eweight + px->lbprm.wdiv - 1) / (px->lbprm.wdiv * sv->uweight);
+}
+
+/*
+ * Return true if the server has a zero user-weight, meaning it's in draining
+ * mode (ie: not taking new non-persistent connections).
+ */
+static inline int server_is_draining(const struct server *s)
+{
+ return !s->uweight || (s->cur_admin & SRV_ADMF_DRAIN);
+}
+
+/* Puts server <s> into maintenance mode, and propagate that status down to all
+ * tracking servers.
+ */
+static inline void srv_adm_set_maint(struct server *s)
+{
+ srv_set_admin_flag(s, SRV_ADMF_FMAINT, SRV_ADM_STCHGC_NONE);
+ srv_clr_admin_flag(s, SRV_ADMF_FDRAIN);
+}
+
+/* Puts server <s> into drain mode, and propagate that status down to all
+ * tracking servers.
+ */
+static inline void srv_adm_set_drain(struct server *s)
+{
+ srv_set_admin_flag(s, SRV_ADMF_FDRAIN, SRV_ADM_STCHGC_NONE);
+ srv_clr_admin_flag(s, SRV_ADMF_FMAINT);
+}
+
+/* Puts server <s> into ready mode, and propagate that status down to all
+ * tracking servers.
+ */
+static inline void srv_adm_set_ready(struct server *s)
+{
+ srv_clr_admin_flag(s, SRV_ADMF_FDRAIN);
+ srv_clr_admin_flag(s, SRV_ADMF_FMAINT);
+}
+
+/* appends an initaddr method to the existing list. Returns 0 on failure. */
+static inline int srv_append_initaddr(unsigned int *list, enum srv_initaddr addr)
+{
+ int shift = 0;
+
+ while (shift + 3 < 32 && (*list >> shift))
+ shift += 3;
+
+ if (shift + 3 > 32)
+ return 0;
+
+ *list |= addr << shift;
+ return 1;
+}
+
+/* returns the next initaddr method and removes it from <list> by shifting
+ * it right (implying that it MUST NOT be the server's. Returns SRV_IADDR_END
+ * at the end.
+ */
+static inline enum srv_initaddr srv_get_next_initaddr(unsigned int *list)
+{
+ enum srv_initaddr ret;
+
+ ret = *list & 7;
+ *list >>= 3;
+ return ret;
+}
+
+static inline void srv_use_conn(struct server *srv, struct connection *conn)
+{
+ unsigned int curr, prev;
+
+ curr = _HA_ATOMIC_ADD_FETCH(&srv->curr_used_conns, 1);
+
+
+ /* It's ok not to do that atomically, we don't need an
+ * exact max.
+ */
+ prev = HA_ATOMIC_LOAD(&srv->max_used_conns);
+ if (prev < curr)
+ HA_ATOMIC_STORE(&srv->max_used_conns, curr);
+
+ prev = HA_ATOMIC_LOAD(&srv->est_need_conns);
+ if (prev < curr)
+ HA_ATOMIC_STORE(&srv->est_need_conns, curr);
+}
+
+/* checks if minconn and maxconn are consistent to each other
+ * and automatically adjust them if it is not the case
+ * This logic was historically implemented in check_config_validity()
+ * at boot time, but with the introduction of dynamic servers
+ * this may be used at multiple places in the code now
+ */
+static inline void srv_minmax_conn_apply(struct server *srv)
+{
+ if (srv->minconn > srv->maxconn) {
+ /* Only 'minconn' was specified, or it was higher than or equal
+ * to 'maxconn'. Let's turn this into maxconn and clean it, as
+ * this will avoid further useless expensive computations.
+ */
+ srv->maxconn = srv->minconn;
+ } else if (srv->maxconn && !srv->minconn) {
+ /* minconn was not specified, so we set it to maxconn */
+ srv->minconn = srv->maxconn;
+ }
+}
+
+/* Returns true if server is used as transparent mode. */
+static inline int srv_is_transparent(const struct server *srv)
+{
+ /* A reverse server does not have any address but it is not used as a
+ * transparent one.
+ */
+ return (!is_addr(&srv->addr) && !(srv->flags & SRV_F_RHTTP)) ||
+ (srv->flags & SRV_F_MAPPORTS);
+}
+
+#endif /* _HAPROXY_SERVER_H */
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/include/haproxy/session-t.h b/include/haproxy/session-t.h
new file mode 100644
index 0000000..dff167e
--- /dev/null
+++ b/include/haproxy/session-t.h
@@ -0,0 +1,78 @@
+/*
+ * include/haproxy/session-t.h
+ * This file defines everything related to sessions.
+ *
+ * Copyright (C) 2000-2020 Willy Tarreau - w@1wt.eu
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_SESSION_T_H
+#define _HAPROXY_SESSION_T_H
+
+
+#include <sys/time.h>
+#include <unistd.h>
+#include <netinet/in.h>
+#include <arpa/inet.h>
+
+#include <haproxy/api-t.h>
+#include <haproxy/obj_type-t.h>
+#include <haproxy/stick_table-t.h>
+#include <haproxy/task-t.h>
+#include <haproxy/vars-t.h>
+
+
+/* session flags */
+enum {
+ SESS_FL_NONE = 0x00000000, /* nothing */
+ SESS_FL_PREFER_LAST = 0x00000001, /* NTML authent, we should reuse last conn */
+};
+
+/* max number of idle server connections kept attached to a session */
+#define MAX_SRV_LIST 5
+
+struct session {
+ struct proxy *fe; /* the proxy this session depends on for the client side */
+ struct listener *listener; /* the listener by which the request arrived */
+ enum obj_type *origin; /* the connection / applet which initiated this session */
+ struct timeval accept_date; /* date of the session's accept() in user date */
+ ullong accept_ts; /* date of the session's accept() in internal date (monotonic) */
+ struct stkctr *stkctr; /* stick counters for tcp-connection */
+ struct vars vars; /* list of variables for the session scope. */
+ struct task *task; /* handshake timeout processing */
+ long t_handshake; /* handshake duration, -1 = not completed */
+ long t_idle; /* idle duration, -1 if never occurs */
+ int idle_conns; /* Number of connections we're currently responsible for that we are not using */
+ unsigned int flags; /* session flags, SESS_FL_* */
+ struct list srv_list; /* List of servers and the connections the session is currently responsible for */
+ struct sockaddr_storage *src; /* source address (pool), when known, otherwise NULL */
+ struct sockaddr_storage *dst; /* destination address (pool), when known, otherwise NULL */
+};
+
+struct sess_srv_list {
+ void *target;
+ struct list conn_list; /* Head of the connections list */
+ struct list srv_list; /* Next element of the server list */
+};
+
+#endif /* _HAPROXY_SESSION_T_H */
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/include/haproxy/session.h b/include/haproxy/session.h
new file mode 100644
index 0000000..38335e4
--- /dev/null
+++ b/include/haproxy/session.h
@@ -0,0 +1,335 @@
+/*
+ * include/haproxy/session.h
+ * This file contains functions used to manage sessions.
+ *
+ * Copyright (C) 2000-2020 Willy Tarreau - w@1wt.eu
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_SESSION_H
+#define _HAPROXY_SESSION_H
+
+#include <haproxy/api.h>
+#include <haproxy/connection.h>
+#include <haproxy/global-t.h>
+#include <haproxy/obj_type-t.h>
+#include <haproxy/pool.h>
+#include <haproxy/server.h>
+#include <haproxy/session-t.h>
+#include <haproxy/stick_table.h>
+
+extern struct pool_head *pool_head_session;
+extern struct pool_head *pool_head_sess_srv_list;
+
+struct session *session_new(struct proxy *fe, struct listener *li, enum obj_type *origin);
+void session_free(struct session *sess);
+int session_accept_fd(struct connection *cli_conn);
+int conn_complete_session(struct connection *conn);
+struct task *session_expire_embryonic(struct task *t, void *context, unsigned int state);
+
+/* Remove the refcount from the session to the tracked counters, and clear the
+ * pointer to ensure this is only performed once. The caller is responsible for
+ * ensuring that the pointer is valid first.
+ */
+static inline void session_store_counters(struct session *sess)
+{
+ void *ptr;
+ int i;
+ struct stksess *ts;
+
+ if (unlikely(!sess->stkctr)) // pool not allocated yet
+ return;
+
+ for (i = 0; i < global.tune.nb_stk_ctr; i++) {
+ struct stkctr *stkctr = &sess->stkctr[i];
+
+ ts = stkctr_entry(stkctr);
+ if (!ts)
+ continue;
+
+ ptr = stktable_data_ptr(stkctr->table, ts, STKTABLE_DT_CONN_CUR);
+ if (ptr) {
+ HA_RWLOCK_WRLOCK(STK_SESS_LOCK, &ts->lock);
+
+ if (stktable_data_cast(ptr, std_t_uint) > 0)
+ stktable_data_cast(ptr, std_t_uint)--;
+
+ HA_RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
+
+ /* If data was modified, we need to touch to re-schedule sync */
+ stktable_touch_local(stkctr->table, ts, 0);
+ }
+
+ stkctr_set_entry(stkctr, NULL);
+ stksess_kill_if_expired(stkctr->table, ts, 1);
+ }
+}
+
+/* Increase the number of cumulated HTTP requests in the tracked counters */
+static inline void session_inc_http_req_ctr(struct session *sess)
+{
+ int i;
+
+ if (unlikely(!sess->stkctr)) // pool not allocated yet
+ return;
+
+ for (i = 0; i < global.tune.nb_stk_ctr; i++)
+ stkctr_inc_http_req_ctr(&sess->stkctr[i]);
+}
+
+/* Increase the number of cumulated failed HTTP requests in the tracked
+ * counters. Only 4xx requests should be counted here so that we can
+ * distinguish between errors caused by client behaviour and other ones.
+ * Note that even 404 are interesting because they're generally caused by
+ * vulnerability scans.
+ */
+static inline void session_inc_http_err_ctr(struct session *sess)
+{
+ int i;
+
+ if (unlikely(!sess->stkctr)) // pool not allocated yet
+ return;
+
+ for (i = 0; i < global.tune.nb_stk_ctr; i++)
+ stkctr_inc_http_err_ctr(&sess->stkctr[i]);
+}
+
+/* Increase the number of cumulated failed HTTP responses in the tracked
+ * counters. Only some 5xx responses should be counted here so that we can
+ * distinguish between server failures and errors triggered by the client
+ * (i.e. 501 and 505 may be triggered and must be ignored).
+ */
+static inline void session_inc_http_fail_ctr(struct session *sess)
+{
+ int i;
+
+ if (unlikely(!sess->stkctr)) // pool not allocated yet
+ return;
+
+ for (i = 0; i < global.tune.nb_stk_ctr; i++)
+ stkctr_inc_http_fail_ctr(&sess->stkctr[i]);
+}
+
+
+/* Remove the connection from the session list, and destroy the srv_list if it's now empty */
+static inline void session_unown_conn(struct session *sess, struct connection *conn)
+{
+ struct sess_srv_list *srv_list = NULL;
+
+ BUG_ON(objt_listener(conn->target));
+
+ /* WT: this currently is a workaround for an inconsistency between
+ * the link status of the connection in the session list and the
+ * connection's owner. This should be removed as soon as all this
+ * is addressed. Right now it's possible to enter here with a non-null
+ * conn->owner that points to a dead session, but in this case the
+ * element is not linked.
+ */
+ if (!LIST_INLIST(&conn->session_list))
+ return;
+
+ if (conn->flags & CO_FL_SESS_IDLE)
+ sess->idle_conns--;
+ LIST_DEL_INIT(&conn->session_list);
+ conn->owner = NULL;
+ list_for_each_entry(srv_list, &sess->srv_list, srv_list) {
+ if (srv_list->target == conn->target) {
+ if (LIST_ISEMPTY(&srv_list->conn_list)) {
+ LIST_DELETE(&srv_list->srv_list);
+ pool_free(pool_head_sess_srv_list, srv_list);
+ }
+ break;
+ }
+ }
+}
+
+/* Add the connection <conn> to the server list of the session <sess>. This
+ * function is called only if the connection is private. Nothing is performed if
+ * the connection is already in the session sever list or if the session does
+ * not own the connection.
+ */
+static inline int session_add_conn(struct session *sess, struct connection *conn, void *target)
+{
+ struct sess_srv_list *srv_list = NULL;
+ int found = 0;
+
+ BUG_ON(objt_listener(conn->target));
+
+ /* Already attach to the session or not the connection owner */
+ if (!LIST_ISEMPTY(&conn->session_list) || (conn->owner && conn->owner != sess))
+ return 1;
+
+ list_for_each_entry(srv_list, &sess->srv_list, srv_list) {
+ if (srv_list->target == target) {
+ found = 1;
+ break;
+ }
+ }
+ if (!found) {
+ /* The session has no connection for the server, create a new entry */
+ srv_list = pool_alloc(pool_head_sess_srv_list);
+ if (!srv_list)
+ return 0;
+ srv_list->target = target;
+ LIST_INIT(&srv_list->conn_list);
+ LIST_APPEND(&sess->srv_list, &srv_list->srv_list);
+ }
+ LIST_APPEND(&srv_list->conn_list, &conn->session_list);
+ return 1;
+}
+
+/* Returns 0 if the session can keep the idle conn, -1 if it was destroyed. The
+ * connection must be private.
+ */
+static inline int session_check_idle_conn(struct session *sess, struct connection *conn)
+{
+ /* Another session owns this connection */
+ if (conn->owner != sess)
+ return 0;
+
+ if (sess->idle_conns >= sess->fe->max_out_conns) {
+ session_unown_conn(sess, conn);
+ conn->owner = NULL;
+ conn->flags &= ~CO_FL_SESS_IDLE;
+ conn->mux->destroy(conn->ctx);
+ return -1;
+ } else {
+ conn->flags |= CO_FL_SESS_IDLE;
+ sess->idle_conns++;
+ }
+ return 0;
+}
+
+/* Look for an available connection matching the target <target> in the server
+ * list of the session <sess>. It returns a connection if found. Otherwise it
+ * returns NULL.
+ */
+static inline struct connection *session_get_conn(struct session *sess, void *target, int64_t hash)
+{
+ struct connection *srv_conn = NULL;
+ struct sess_srv_list *srv_list;
+
+ list_for_each_entry(srv_list, &sess->srv_list, srv_list) {
+ if (srv_list->target == target) {
+ list_for_each_entry(srv_conn, &srv_list->conn_list, session_list) {
+ if ((srv_conn->hash_node && srv_conn->hash_node->node.key == hash) &&
+ srv_conn->mux &&
+ (srv_conn->mux->avail_streams(srv_conn) > 0) &&
+ !(srv_conn->flags & CO_FL_WAIT_XPRT)) {
+ if (srv_conn->flags & CO_FL_SESS_IDLE) {
+ srv_conn->flags &= ~CO_FL_SESS_IDLE;
+ sess->idle_conns--;
+ }
+ goto end;
+ }
+ }
+ srv_conn = NULL; /* No available connection found */
+ goto end;
+ }
+ }
+
+ end:
+ return srv_conn;
+}
+
+/* Returns the source address of the session and fallbacks on the client
+ * connection if not set. It returns a const address on success or NULL on
+ * failure.
+ */
+static inline const struct sockaddr_storage *sess_src(struct session *sess)
+{
+ struct connection *cli_conn = objt_conn(sess->origin);
+
+ if (sess->src)
+ return sess->src;
+ if (cli_conn && conn_get_src(cli_conn))
+ return conn_src(cli_conn);
+ return NULL;
+}
+
+/* Returns the destination address of the session and fallbacks on the client
+ * connection if not set. It returns a const address on success or NULL on
+ * failure.
+ */
+static inline const struct sockaddr_storage *sess_dst(struct session *sess)
+{
+ struct connection *cli_conn = objt_conn(sess->origin);
+
+ if (sess->dst)
+ return sess->dst;
+ if (cli_conn && conn_get_dst(cli_conn))
+ return conn_dst(cli_conn);
+ return NULL;
+}
+
+
+/* Retrieves the source address of the session <sess>. Returns non-zero on
+ * success or zero on failure. The operation is only performed once and the
+ * address is stored in the session for future use. On the first call, the
+ * session source address is copied from the client connection one.
+ */
+static inline int sess_get_src(struct session *sess)
+{
+ struct connection *cli_conn = objt_conn(sess->origin);
+ const struct sockaddr_storage *src = NULL;
+
+ if (sess->src)
+ return 1;
+
+ if (cli_conn && conn_get_src(cli_conn))
+ src = conn_src(cli_conn);
+ if (!src)
+ return 0;
+
+ if (!sockaddr_alloc(&sess->src, src, sizeof(*src)))
+ return 0;
+
+ return 1;
+}
+
+
+/* Retrieves the destination address of the session <sess>. Returns non-zero on
+ * success or zero on failure. The operation is only performed once and the
+ * address is stored in the session for future use. On the first call, the
+ * session destination address is copied from the client connection one.
+ */
+static inline int sess_get_dst(struct session *sess)
+{
+ struct connection *cli_conn = objt_conn(sess->origin);
+ const struct sockaddr_storage *dst = NULL;
+
+ if (sess->dst)
+ return 1;
+
+ if (cli_conn && conn_get_dst(cli_conn))
+ dst = conn_dst(cli_conn);
+ if (!dst)
+ return 0;
+
+ if (!sockaddr_alloc(&sess->dst, dst, sizeof(*dst)))
+ return 0;
+
+ return 1;
+}
+
+#endif /* _HAPROXY_SESSION_H */
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/include/haproxy/shctx-t.h b/include/haproxy/shctx-t.h
new file mode 100644
index 0000000..493024a
--- /dev/null
+++ b/include/haproxy/shctx-t.h
@@ -0,0 +1,63 @@
+/*
+ * include/haproxy/shctx-t.h - shared context management functions for SSL
+ *
+ * Copyright (C) 2011-2012 EXCELIANCE
+ *
+ * Author: Emeric Brun - emeric@exceliance.fr
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef __HAPROXY_SHCTX_T_H
+#define __HAPROXY_SHCTX_T_H
+
+#include <haproxy/api-t.h>
+#include <haproxy/thread-t.h>
+
+#ifndef SHSESS_BLOCK_MIN_SIZE
+#define SHSESS_BLOCK_MIN_SIZE 128
+#endif
+
+#ifndef SHSESS_MAX_DATA_LEN
+#define SHSESS_MAX_DATA_LEN 4096
+#endif
+
+#ifndef SHCTX_APPNAME
+#define SHCTX_APPNAME "haproxy"
+#endif
+
+#define SHCTX_E_ALLOC_CACHE -1
+#define SHCTX_E_INIT_LOCK -2
+
+#define SHCTX_F_REMOVING 0x1 /* Removing flag, does not accept new */
+
+/* generic shctx struct */
+struct shared_block {
+ struct list list;
+ unsigned int len; /* data length for the row */
+ unsigned int block_count; /* number of blocks */
+ unsigned int refcount;
+ struct shared_block *last_reserved;
+ struct shared_block *last_append;
+ unsigned char data[VAR_ARRAY];
+};
+
+struct shared_context {
+ __decl_thread(HA_RWLOCK_T lock);
+ struct list avail; /* list for active and free blocks */
+ unsigned int nbav; /* number of available blocks */
+ unsigned int max_obj_size; /* maximum object size (in bytes). */
+ void (*free_block)(struct shared_block *first, void *data);
+ void (*reserve_finish)(struct shared_context *shctx);
+ void *cb_data;
+ short int block_size;
+ ALWAYS_ALIGN(64); /* The following member needs to be aligned to 64 in the
+ cache's case because the cache struct contains an explicitly
+ aligned member (struct cache_tree). */
+ unsigned char data[VAR_ARRAY];
+};
+
+#endif /* __HAPROXY_SHCTX_T_H */
diff --git a/include/haproxy/shctx.h b/include/haproxy/shctx.h
new file mode 100644
index 0000000..a57cf15
--- /dev/null
+++ b/include/haproxy/shctx.h
@@ -0,0 +1,80 @@
+/*
+ * include/haproxy/shctx.h - shared context management functions for SSL
+ *
+ * Copyright (C) 2011-2012 EXCELIANCE
+ *
+ * Author: Emeric Brun - emeric@exceliance.fr
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef __HAPROXY_SHCTX_H
+#define __HAPROXY_SHCTX_H
+
+#include <haproxy/api.h>
+#include <haproxy/list.h>
+#include <haproxy/shctx-t.h>
+#include <haproxy/thread.h>
+
+int shctx_init(struct shared_context **orig_shctx,
+ int maxblocks, int blocksize, unsigned int maxobjsz,
+ int extra);
+struct shared_block *shctx_row_reserve_hot(struct shared_context *shctx,
+ struct shared_block *last, int data_len);
+void shctx_row_detach(struct shared_context *shctx, struct shared_block *first);
+void shctx_row_reattach(struct shared_context *shctx, struct shared_block *first);
+int shctx_row_data_append(struct shared_context *shctx,
+ struct shared_block *first,
+ unsigned char *data, int len);
+int shctx_row_data_get(struct shared_context *shctx, struct shared_block *first,
+ unsigned char *dst, int offset, int len);
+
+
+/* Lock functions */
+
+static inline void shctx_rdlock(struct shared_context *shctx)
+{
+ HA_RWLOCK_RDLOCK(SHCTX_LOCK, &shctx->lock);
+}
+static inline void shctx_rdunlock(struct shared_context *shctx)
+{
+ HA_RWLOCK_RDUNLOCK(SHCTX_LOCK, &shctx->lock);
+}
+static inline void shctx_wrlock(struct shared_context *shctx)
+{
+ HA_RWLOCK_WRLOCK(SHCTX_LOCK, &shctx->lock);
+}
+static inline void shctx_wrunlock(struct shared_context *shctx)
+{
+ HA_RWLOCK_WRUNLOCK(SHCTX_LOCK, &shctx->lock);
+}
+
+/* List Macros */
+
+/*
+ * Insert <s> block after <head> which is not necessarily the head of a list,
+ * so between <head> and the next element after <head>.
+ */
+static inline void shctx_block_append_hot(struct shared_context *shctx,
+ struct shared_block *first,
+ struct shared_block *s)
+{
+ shctx->nbav--;
+ LIST_DELETE(&s->list);
+ LIST_APPEND(&first->list, &s->list);
+}
+
+static inline struct shared_block *shctx_block_detach(struct shared_context *shctx,
+ struct shared_block *s)
+{
+ shctx->nbav--;
+ LIST_DELETE(&s->list);
+ LIST_INIT(&s->list);
+ return s;
+}
+
+#endif /* __HAPROXY_SHCTX_H */
+
diff --git a/include/haproxy/show_flags-t.h b/include/haproxy/show_flags-t.h
new file mode 100644
index 0000000..824d771
--- /dev/null
+++ b/include/haproxy/show_flags-t.h
@@ -0,0 +1,99 @@
+/*
+ * include/haproxy/show_flags.h
+ * These are helper macros used to decode flags for debugging
+ *
+ * Copyright (C) 2022 Willy Tarreau - w@1wt.eu
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_SHOW_FLAGS_H
+#define _HAPROXY_SHOW_FLAGS_H
+
+/* Only define the macro below if the caller requests it using HA_EXPOSE_FLAGS.
+ * It will be used by many low-level includes and we don't want to
+ * include the huge stdio here by default. The macro is used to make a string
+ * of a set of flags (and handles one flag at a time). It will append into
+ * <_buf>:<_len> the state of flag <_val> in <_flg>, appending string <_del> as
+ * delimiters till the last flag is dumped, then updating <_buf> and <_len>
+ * accordingly. <_nam> is used as the name for value <_val>. <_flg> loses all
+ * dumped flags. If <_flg> is zero and <_val> is 0, a "0" is reported, this can
+ * be used as a prologue to the dump. If <_val> contains more than one bit set,
+ * <_flg>'s hexadecimal output is reported instead of a name.
+ *
+ * It is possible to use it to enumerate all flags from right to left so that
+ * they are easier to check in the code. It will start by executing the optional
+ * code block in the extra flags (if any) before proceeding with the dump using
+ * the arguments. It is suggested to locally rename it to a single-char macro
+ * locally for readability, e.g:
+ *
+ * #define _(n, ...) __APPEND_FLAG(buf, len, del, flg, n, #n, __VA_ARGS__)
+ * _(0);
+ * _(X_FLAG1, _(X_FLAG2, _(X_FLAG3)));
+ * _(~0);
+ * #undef _
+ *
+ * __APPEND_ENUM() works a bit differently in that it takes an additional mask
+ * to isolate bits to compare to the enum's value, and will remove the mask's
+ * bits at once in case of match.
+ */
+#ifdef HA_EXPOSE_FLAGS
+
+#define __APPEND_FLAG(_buf, _len, _del, _flg, _val, _nam, ...) \
+ do { \
+ size_t _ret = 0; \
+ unsigned int _flg0 = (_flg); \
+ do { __VA_ARGS__; } while (0); \
+ (_flg) &= ~(unsigned int)(_val); \
+ if (!((unsigned int)_val) && !(_flg)) \
+ _ret = snprintf(_buf, _len, "0%s", \
+ (_flg) ? (_del) : ""); \
+ else if ((_flg0) & (_val)) { \
+ if ((_val) & ((_val) - 1)) \
+ _ret = snprintf(_buf, _len, "%#x%s", \
+ (_flg0), (_flg) ? (_del) : ""); \
+ else \
+ _ret = snprintf(_buf, _len, _nam "%s", \
+ (_flg) ? (_del) : ""); \
+ } \
+ if (_ret < _len) { \
+ _len -= _ret; \
+ _buf += _ret; \
+ } \
+ } while (0)
+
+#define __APPEND_ENUM(_buf, _len, _del, _flg, _msk, _val, _nam, ...) \
+ do { \
+ size_t _ret = 0; \
+ do { __VA_ARGS__; } while (0); \
+ if (((_flg) & (_msk)) == (_val)) { \
+ (_flg) &= ~(_msk); \
+ _ret = snprintf(_buf, _len, _nam "%s", \
+ (_flg) ? (_del) : ""); \
+ } \
+ if (_ret < _len) { \
+ _len -= _ret; \
+ _buf += _ret; \
+ } \
+ } while (0)
+
+#else /* EOF not defined => no stdio, do nothing */
+
+#define __APPEND_FLAG(_buf, _len, _del, _flg, _val, _nam, ...) do { } while (0)
+#define __APPEND_ENUM(_buf, _len, _del, _flg, _msk, _val, _nam, ...) do { } while (0)
+
+#endif /* EOF */
+
+#endif /* _HAPROXY_SHOW_FLAGS_H */
diff --git a/include/haproxy/signal-t.h b/include/haproxy/signal-t.h
new file mode 100644
index 0000000..85d4b33
--- /dev/null
+++ b/include/haproxy/signal-t.h
@@ -0,0 +1,66 @@
+/*
+ * include/haproxy/signal-t.h
+ * Asynchronous signal delivery functions descriptors.
+ *
+ * Copyright 2000-2010 Willy Tarreau <w@1wt.eu>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#ifndef _HAPROXY_SIGNAL_T_H
+#define _HAPROXY_SIGNAL_T_H
+
+#include <signal.h>
+#include <haproxy/api-t.h>
+
+/* flags for -> flags */
+#define SIG_F_ONE_SHOOT 0x0001 /* unregister handler before calling it */
+#define SIG_F_TYPE_FCT 0x0002 /* handler is a function + arg */
+#define SIG_F_TYPE_TASK 0x0004 /* handler is a task + reason */
+
+/* Define WDTSIG if available */
+#if defined(USE_RT) && (_POSIX_TIMERS > 0) && defined(_POSIX_THREAD_CPUTIME)
+
+
+/* We'll deliver SIGALRM when we've run out of CPU as it's not intercepted by
+ * gdb by default.
+ */
+#define WDTSIG SIGALRM
+
+#endif
+
+#ifdef USE_THREAD_DUMP
+/* The signal to trigger a debug dump on a thread is SIGURG. It has the benefit
+ * of not stopping gdb by default, so that issuing "show threads" in a process
+ * being debugged has no adverse effect.
+ */
+#define DEBUGSIG SIGURG
+
+#endif
+
+/* those are highly dynamic and stored in pools */
+struct sig_handler {
+ struct list list;
+ void *handler; /* function to call or task to wake up */
+ int arg; /* arg to pass to function, or signals*/
+ int flags; /* SIG_F_* */
+};
+
+/* one per signal */
+struct signal_descriptor {
+ int count; /* number of times raised */
+ struct list handlers; /* sig_handler */
+};
+
+#endif /* _HAPROXY_SIGNAL_T_H */
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/include/haproxy/signal.h b/include/haproxy/signal.h
new file mode 100644
index 0000000..25a4ef1
--- /dev/null
+++ b/include/haproxy/signal.h
@@ -0,0 +1,52 @@
+/*
+ * include/haproxy/signal.h
+ * Asynchronous signal delivery functions.
+ *
+ * Copyright 2000-2010 Willy Tarreau <w@1wt.eu>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#ifndef _HAPROXY_SIGNAL_H
+#define _HAPROXY_SIGNAL_H
+
+#include <signal.h>
+
+#include <haproxy/api.h>
+#include <haproxy/signal-t.h>
+#include <haproxy/task-t.h>
+#include <haproxy/thread.h>
+
+extern int signal_queue_len;
+extern struct signal_descriptor signal_state[];
+
+__decl_thread(extern HA_SPINLOCK_T signals_lock);
+
+void signal_handler(int sig);
+void __signal_process_queue(void);
+void deinit_signals(void);
+struct sig_handler *signal_register_fct(int sig, void (*fct)(struct sig_handler *), int arg);
+struct sig_handler *signal_register_task(int sig, struct task *task, int reason);
+void signal_unregister_handler(struct sig_handler *handler);
+void signal_unregister_target(int sig, void *target);
+void signal_unregister(int sig);
+void haproxy_unblock_signals(void);
+
+static inline void signal_process_queue()
+{
+ if (unlikely(signal_queue_len > 0))
+ __signal_process_queue();
+}
+
+#endif /* _HAPROXY_SIGNAL_H */
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/include/haproxy/sink-t.h b/include/haproxy/sink-t.h
new file mode 100644
index 0000000..79a0dda
--- /dev/null
+++ b/include/haproxy/sink-t.h
@@ -0,0 +1,76 @@
+/*
+ * include/haproxy/sink-t.h
+ * This file provides definitions for event sinks
+ *
+ * Copyright (C) 2000-2019 Willy Tarreau - w@1wt.eu
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_SINK_T_H
+#define _HAPROXY_SINK_T_H
+
+#include <import/ist.h>
+#include <haproxy/api-t.h>
+#include <haproxy/log-t.h>
+
+/* A sink may be of 4 distinct types :
+ * - file descriptor (such as stdout)
+ * - ring buffer, readable from CLI
+ */
+enum sink_type {
+ SINK_TYPE_NEW, // not yet initialized
+ SINK_TYPE_FD, // events sent to a file descriptor
+ SINK_TYPE_BUFFER, // events sent to a ring buffer
+};
+
+struct sink_forward_target {
+ struct server *srv; // used server
+ struct appctx *appctx; // appctx of current session
+ size_t ofs; // ring buffer reader offset
+ struct sink *sink; // the associated sink
+ struct sink_forward_target *next;
+ __decl_thread(HA_SPINLOCK_T lock); // lock to protect current struct
+};
+
+/* describes the configuration and current state of an event sink */
+struct sink {
+ struct list sink_list; // position in the sink list
+ char *name; // sink name
+ char *desc; // sink description
+ char *store; // backing-store file when buffer
+ enum log_fmt fmt; // format expected by the sink
+ enum sink_type type; // type of storage
+ uint32_t maxlen; // max message length (truncated above)
+ struct proxy* forward_px; // internal proxy used to forward (only set when exclusive to sink)
+ struct sink_forward_target *sft; // sink forward targets
+ struct task *forward_task; // task to handle forward targets conns
+ struct sig_handler *forward_sighandler; /* signal handler */
+ struct {
+ struct ring *ring; // used by ring buffer and STRM sender
+ unsigned int dropped; // dropped events since last one.
+ int fd; // fd num for FD type sink
+ __decl_thread(HA_RWLOCK_T lock); // shared/excl for dropped
+ } ctx;
+};
+
+#endif /* _HAPROXY_SINK_T_H */
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/include/haproxy/sink.h b/include/haproxy/sink.h
new file mode 100644
index 0000000..3b428a1
--- /dev/null
+++ b/include/haproxy/sink.h
@@ -0,0 +1,97 @@
+/*
+ * include/haproxy/sink.h
+ * This file provides declarations for event sinks management
+ *
+ * Copyright (C) 2000-2019 Willy Tarreau - w@1wt.eu
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_SINK_H
+#define _HAPROXY_SINK_H
+
+#include <sys/types.h>
+#include <haproxy/sink-t.h>
+#include <haproxy/thread.h>
+
+extern struct list sink_list;
+
+extern struct proxy *sink_proxies_list;
+
+struct sink *sink_find(const char *name);
+struct sink *sink_new_fd(const char *name, const char *desc, enum log_fmt, int fd);
+ssize_t __sink_write(struct sink *sink, struct log_header hdr, size_t maxlen,
+ const struct ist msg[], size_t nmsg);
+int sink_announce_dropped(struct sink *sink, struct log_header hdr);
+
+
+/* tries to send <nmsg> message parts from message array <msg> to sink <sink>.
+ * Formatting according to the sink's preference is done here, unless sink->fmt
+ * is unspecified, in which case the caller formatting will be used instead.
+ *
+ * It will stop writing at <maxlen> instead of sink->maxlen if <maxlen> is
+ * positive and inferior to sink->maxlen.
+ *
+ * Lost messages are accounted for in the sink's counter. If there
+ * were lost messages, an attempt is first made to indicate it.
+ * The function returns the number of Bytes effectively sent or announced.
+ * or <= 0 in other cases.
+ */
+static inline ssize_t sink_write(struct sink *sink, struct log_header hdr,
+ size_t maxlen, const struct ist msg[], size_t nmsg)
+{
+ ssize_t sent;
+
+ if (unlikely(sink->ctx.dropped > 0)) {
+ /* We need to take an exclusive lock so that other producers
+ * don't do the same thing at the same time and above all we
+ * want to be sure others have finished sending their messages
+ * so that the dropped event arrives exactly at the right
+ * position.
+ */
+ HA_RWLOCK_WRLOCK(RING_LOCK, &sink->ctx.lock);
+ sent = sink_announce_dropped(sink, hdr);
+ HA_RWLOCK_WRUNLOCK(RING_LOCK, &sink->ctx.lock);
+
+ if (!sent) {
+ /* we failed, we don't try to send our log as if it
+ * would pass by chance, we'd get disordered events.
+ */
+ goto fail;
+ }
+ }
+
+ HA_RWLOCK_RDLOCK(RING_LOCK, &sink->ctx.lock);
+ sent = __sink_write(sink, hdr, maxlen, msg, nmsg);
+ HA_RWLOCK_RDUNLOCK(RING_LOCK, &sink->ctx.lock);
+
+ fail:
+ if (unlikely(sent <= 0))
+ HA_ATOMIC_INC(&sink->ctx.dropped);
+
+ return sent;
+}
+
+struct sink *sink_new_from_srv(struct server *srv, const char *from);
+int sink_resolve_logger_buffer(struct logger *logger, char **msg);
+
+#endif /* _HAPROXY_SINK_H */
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/include/haproxy/sock-t.h b/include/haproxy/sock-t.h
new file mode 100644
index 0000000..b843d44
--- /dev/null
+++ b/include/haproxy/sock-t.h
@@ -0,0 +1,37 @@
+/*
+ * include/haproxy/sock-t.h
+ * This file contains type definitions for native (BSD-compatible) sockets.
+ *
+ * Copyright (C) 2000-2020 Willy Tarreau - w@1wt.eu
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_SOCK_T_H
+#define _HAPROXY_SOCK_T_H
+
+#include <sys/socket.h>
+#include <sys/types.h>
+
+#include <haproxy/api-t.h>
+
+#endif /* _HAPROXY_SOCK_T_H */
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/include/haproxy/sock.h b/include/haproxy/sock.h
new file mode 100644
index 0000000..60e81ec
--- /dev/null
+++ b/include/haproxy/sock.h
@@ -0,0 +1,62 @@
+/*
+ * include/haproxy/sock.h
+ * This file contains declarations for native (BSD-compatible) sockets.
+ *
+ * Copyright (C) 2000-2020 Willy Tarreau - w@1wt.eu
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_SOCK_H
+#define _HAPROXY_SOCK_H
+
+#include <sys/socket.h>
+#include <sys/types.h>
+
+#include <haproxy/api.h>
+#include <haproxy/connection-t.h>
+#include <haproxy/listener-t.h>
+#include <haproxy/sock-t.h>
+
+int sock_create_server_socket(struct connection *conn);
+void sock_enable(struct receiver *rx);
+void sock_disable(struct receiver *rx);
+void sock_unbind(struct receiver *rx);
+int sock_get_src(int fd, struct sockaddr *sa, socklen_t salen, int dir);
+int sock_get_dst(int fd, struct sockaddr *sa, socklen_t salen, int dir);
+int sock_get_old_sockets(const char *unixsocket);
+int sock_find_compatible_fd(const struct receiver *rx);
+void sock_drop_unused_old_sockets();
+int sock_accepting_conn(const struct receiver *rx);
+struct connection *sock_accept_conn(struct listener *l, int *status);
+void sock_accept_iocb(int fd);
+void sock_conn_ctrl_init(struct connection *conn);
+void sock_conn_ctrl_close(struct connection *conn);
+void sock_conn_iocb(int fd);
+int sock_conn_check(struct connection *conn);
+int sock_drain(struct connection *conn);
+int sock_check_events(struct connection *conn, int event_type);
+void sock_ignore_events(struct connection *conn, int event_type);
+int _sock_supports_reuseport(const struct proto_fam *fam, int type, int protocol);
+
+
+#endif /* _HAPROXY_SOCK_H */
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/include/haproxy/sock_inet.h b/include/haproxy/sock_inet.h
new file mode 100644
index 0000000..6f07e63
--- /dev/null
+++ b/include/haproxy/sock_inet.h
@@ -0,0 +1,49 @@
+/*
+ * include/haproxy/sock_inet.h
+ * This file contains declarations for AF_INET & AF_INET6 sockets.
+ *
+ * Copyright (C) 2000-2020 Willy Tarreau - w@1wt.eu
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_SOCK_INET_H
+#define _HAPROXY_SOCK_INET_H
+
+#include <sys/socket.h>
+#include <sys/types.h>
+
+#include <haproxy/api.h>
+
+extern int sock_inet6_v6only_default;
+extern int sock_inet_tcp_maxseg_default;
+extern int sock_inet6_tcp_maxseg_default;
+
+extern struct proto_fam proto_fam_inet4;
+extern struct proto_fam proto_fam_inet6;
+
+/* external types */
+struct receiver;
+
+int sock_inet4_addrcmp(const struct sockaddr_storage *a, const struct sockaddr_storage *b);
+int sock_inet6_addrcmp(const struct sockaddr_storage *a, const struct sockaddr_storage *b);
+void sock_inet_set_port(struct sockaddr_storage *addr, int port);
+int sock_inet_get_dst(int fd, struct sockaddr *sa, socklen_t salen, int dir);
+int sock_inet_is_foreign(int fd, sa_family_t family);
+int sock_inet4_make_foreign(int fd);
+int sock_inet6_make_foreign(int fd);
+int sock_inet_bind_receiver(struct receiver *rx, char **errmsg);
+
+#endif /* _HAPROXY_SOCK_INET_H */
diff --git a/include/haproxy/sock_unix.h b/include/haproxy/sock_unix.h
new file mode 100644
index 0000000..9934341
--- /dev/null
+++ b/include/haproxy/sock_unix.h
@@ -0,0 +1,36 @@
+/*
+ * include/haproxy/sock_unix.h
+ * This file contains declarations for AF_UNIX sockets.
+ *
+ * Copyright (C) 2000-2020 Willy Tarreau - w@1wt.eu
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_SOCK_UNIX_H
+#define _HAPROXY_SOCK_UNIX_H
+
+#include <sys/socket.h>
+#include <sys/types.h>
+
+#include <haproxy/api.h>
+#include <haproxy/receiver-t.h>
+
+extern struct proto_fam proto_fam_unix;
+
+int sock_unix_addrcmp(const struct sockaddr_storage *a, const struct sockaddr_storage *b);
+int sock_unix_bind_receiver(struct receiver *rx, char **errmsg);
+
+#endif /* _HAPROXY_SOCK_UNIX_H */
diff --git a/include/haproxy/spoe-t.h b/include/haproxy/spoe-t.h
new file mode 100644
index 0000000..2732443
--- /dev/null
+++ b/include/haproxy/spoe-t.h
@@ -0,0 +1,413 @@
+/*
+ * include/haproxy/spoe-t.h
+ * Macros, variables and structures for the SPOE filter.
+ *
+ * Copyright (C) 2017 HAProxy Technologies, Christopher Faulet <cfaulet@haproxy.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_SPOE_T_H
+#define _HAPROXY_SPOE_T_H
+
+#include <sys/time.h>
+
+#include <haproxy/buf-t.h>
+#include <haproxy/dynbuf-t.h>
+#include <haproxy/filters-t.h>
+#include <haproxy/freq_ctr-t.h>
+#include <haproxy/proxy-t.h>
+#include <haproxy/sample-t.h>
+#include <haproxy/stream-t.h>
+#include <haproxy/task-t.h>
+#include <haproxy/thread-t.h>
+
+/* Type of list of messages */
+#define SPOE_MSGS_BY_EVENT 0x01
+#define SPOE_MSGS_BY_GROUP 0x02
+
+/* Flags set on the SPOE agent */
+#define SPOE_FL_CONT_ON_ERR 0x00000001 /* Do not stop events processing when an error occurred */
+#define SPOE_FL_PIPELINING 0x00000002 /* Set when SPOE agent supports pipelining (set by default) */
+#define SPOE_FL_ASYNC 0x00000004 /* Set when SPOE agent supports async (set by default) */
+#define SPOE_FL_SND_FRAGMENTATION 0x00000008 /* Set when SPOE agent supports sending fragmented payload */
+#define SPOE_FL_RCV_FRAGMENTATION 0x00000010 /* Set when SPOE agent supports receiving fragmented payload */
+#define SPOE_FL_FORCE_SET_VAR 0x00000020 /* Set when SPOE agent will set all variables from agent (and not only known variables) */
+
+/* Flags set on the SPOE context */
+#define SPOE_CTX_FL_CLI_CONNECTED 0x00000001 /* Set after that on-client-session event was processed */
+#define SPOE_CTX_FL_SRV_CONNECTED 0x00000002 /* Set after that on-server-session event was processed */
+#define SPOE_CTX_FL_REQ_PROCESS 0x00000004 /* Set when SPOE is processing the request */
+#define SPOE_CTX_FL_RSP_PROCESS 0x00000008 /* Set when SPOE is processing the response */
+#define SPOE_CTX_FL_FRAGMENTED 0x00000010 /* Set when a fragmented frame is processing */
+
+#define SPOE_CTX_FL_PROCESS (SPOE_CTX_FL_REQ_PROCESS|SPOE_CTX_FL_RSP_PROCESS)
+
+/* Flags set on the SPOE applet */
+#define SPOE_APPCTX_FL_PIPELINING 0x00000001 /* Set if pipelining is supported */
+#define SPOE_APPCTX_FL_ASYNC 0x00000002 /* Set if asynchronous frames is supported */
+#define SPOE_APPCTX_FL_FRAGMENTATION 0x00000004 /* Set if fragmentation is supported */
+
+#define SPOE_APPCTX_ERR_NONE 0x00000000 /* no error yet, leave it to zero */
+#define SPOE_APPCTX_ERR_TOUT 0x00000001 /* SPOE applet timeout */
+
+/* Flags set on the SPOE frame */
+#define SPOE_FRM_FL_FIN 0x00000001
+#define SPOE_FRM_FL_ABRT 0x00000002
+
+/* Masks to get data type or flags value */
+#define SPOE_DATA_T_MASK 0x0F
+#define SPOE_DATA_FL_MASK 0xF0
+
+/* Flags to set Boolean values */
+#define SPOE_DATA_FL_FALSE 0x00
+#define SPOE_DATA_FL_TRUE 0x10
+
+/* All possible states for a SPOE context */
+enum spoe_ctx_state {
+ SPOE_CTX_ST_NONE = 0,
+ SPOE_CTX_ST_READY,
+ SPOE_CTX_ST_ENCODING_MSGS,
+ SPOE_CTX_ST_SENDING_MSGS,
+ SPOE_CTX_ST_WAITING_ACK,
+ SPOE_CTX_ST_DONE,
+ SPOE_CTX_ST_ERROR,
+};
+
+/* All possible states for a SPOE applet */
+enum spoe_appctx_state {
+ SPOE_APPCTX_ST_CONNECT = 0,
+ SPOE_APPCTX_ST_CONNECTING,
+ SPOE_APPCTX_ST_IDLE,
+ SPOE_APPCTX_ST_PROCESSING,
+ SPOE_APPCTX_ST_SENDING_FRAG_NOTIFY,
+ SPOE_APPCTX_ST_WAITING_SYNC_ACK,
+ SPOE_APPCTX_ST_DISCONNECT,
+ SPOE_APPCTX_ST_DISCONNECTING,
+ SPOE_APPCTX_ST_EXIT,
+ SPOE_APPCTX_ST_END,
+};
+
+/* All supported SPOE actions */
+enum spoe_action_type {
+ SPOE_ACT_T_SET_VAR = 1,
+ SPOE_ACT_T_UNSET_VAR,
+ SPOE_ACT_TYPES,
+};
+
+/* All supported SPOE events */
+enum spoe_event {
+ SPOE_EV_NONE = 0,
+
+ /* Request events */
+ SPOE_EV_ON_CLIENT_SESS = 1,
+ SPOE_EV_ON_TCP_REQ_FE,
+ SPOE_EV_ON_TCP_REQ_BE,
+ SPOE_EV_ON_HTTP_REQ_FE,
+ SPOE_EV_ON_HTTP_REQ_BE,
+
+ /* Response events */
+ SPOE_EV_ON_SERVER_SESS,
+ SPOE_EV_ON_TCP_RSP,
+ SPOE_EV_ON_HTTP_RSP,
+
+ SPOE_EV_EVENTS
+};
+
+/* Errors triggered by streams */
+enum spoe_context_error {
+ SPOE_CTX_ERR_NONE = 0,
+ SPOE_CTX_ERR_TOUT,
+ SPOE_CTX_ERR_RES,
+ SPOE_CTX_ERR_TOO_BIG,
+ SPOE_CTX_ERR_FRAG_FRAME_ABRT,
+ SPOE_CTX_ERR_INTERRUPT,
+ SPOE_CTX_ERR_UNKNOWN = 255,
+ SPOE_CTX_ERRS,
+};
+
+/* Errors triggered by SPOE applet */
+enum spoe_frame_error {
+ SPOE_FRM_ERR_NONE = 0,
+ SPOE_FRM_ERR_IO,
+ SPOE_FRM_ERR_TOUT,
+ SPOE_FRM_ERR_TOO_BIG,
+ SPOE_FRM_ERR_INVALID,
+ SPOE_FRM_ERR_NO_VSN,
+ SPOE_FRM_ERR_NO_FRAME_SIZE,
+ SPOE_FRM_ERR_NO_CAP,
+ SPOE_FRM_ERR_BAD_VSN,
+ SPOE_FRM_ERR_BAD_FRAME_SIZE,
+ SPOE_FRM_ERR_FRAG_NOT_SUPPORTED,
+ SPOE_FRM_ERR_INTERLACED_FRAMES,
+ SPOE_FRM_ERR_FRAMEID_NOTFOUND,
+ SPOE_FRM_ERR_RES,
+ SPOE_FRM_ERR_UNKNOWN = 99,
+ SPOE_FRM_ERRS,
+};
+
+/* Scopes used for variables set by agents. It is a way to be agnotic to vars
+ * scope. */
+enum spoe_vars_scope {
+ SPOE_SCOPE_PROC = 0, /* <=> SCOPE_PROC */
+ SPOE_SCOPE_SESS, /* <=> SCOPE_SESS */
+ SPOE_SCOPE_TXN, /* <=> SCOPE_TXN */
+ SPOE_SCOPE_REQ, /* <=> SCOPE_REQ */
+ SPOE_SCOPE_RES, /* <=> SCOPE_RES */
+};
+
+/* Frame Types sent by HAProxy and by agents */
+enum spoe_frame_type {
+ SPOE_FRM_T_UNSET = 0,
+
+ /* Frames sent by HAProxy */
+ SPOE_FRM_T_HAPROXY_HELLO = 1,
+ SPOE_FRM_T_HAPROXY_DISCON,
+ SPOE_FRM_T_HAPROXY_NOTIFY,
+
+ /* Frames sent by the agents */
+ SPOE_FRM_T_AGENT_HELLO = 101,
+ SPOE_FRM_T_AGENT_DISCON,
+ SPOE_FRM_T_AGENT_ACK
+};
+
+/* All supported data types */
+enum spoe_data_type {
+ SPOE_DATA_T_NULL = 0,
+ SPOE_DATA_T_BOOL,
+ SPOE_DATA_T_INT32,
+ SPOE_DATA_T_UINT32,
+ SPOE_DATA_T_INT64,
+ SPOE_DATA_T_UINT64,
+ SPOE_DATA_T_IPV4,
+ SPOE_DATA_T_IPV6,
+ SPOE_DATA_T_STR,
+ SPOE_DATA_T_BIN,
+ SPOE_DATA_TYPES
+};
+
+
+/* Describe an argument that will be linked to a message. It is a sample fetch,
+ * with an optional name. */
+struct spoe_arg {
+ char *name; /* Name of the argument, may be NULL */
+ unsigned int name_len; /* The name length, 0 if NULL */
+ struct sample_expr *expr; /* Sample expression */
+ struct list list; /* Used to chain SPOE args */
+};
+
+/* Used during the config parsing only because, when a SPOE agent section is
+ * parsed, messages/groups can be undefined. */
+struct spoe_placeholder {
+ char *id; /* SPOE placeholder id */
+ struct list list; /* Use to chain SPOE placeholders */
+};
+
+/* Used during the config parsing, when SPOE agent section is parsed, to
+ * register some variable names. */
+struct spoe_var_placeholder {
+ char *name; /* The variable name */
+ struct list list; /* Use to chain SPOE var placeholders */
+};
+
+/* Describe a message that will be sent in a NOTIFY frame. A message has a name,
+ * an argument list (see above) and it is linked to a specific event. */
+struct spoe_message {
+ char *id; /* SPOE message id */
+ unsigned int id_len; /* The message id length */
+ struct spoe_agent *agent; /* SPOE agent owning this SPOE message */
+ struct spoe_group *group; /* SPOE group owning this SPOE message (can be NULL) */
+ struct {
+ char *file; /* file where the SPOE message appears */
+ int line; /* line where the SPOE message appears */
+ } conf; /* config information */
+ unsigned int nargs; /* # of arguments */
+ struct list args; /* Arguments added when the SPOE messages is sent */
+ struct list list; /* Used to chain SPOE messages */
+ struct list by_evt; /* By event list */
+ struct list by_grp; /* By group list */
+
+ struct list acls; /* ACL declared on this message */
+ struct acl_cond *cond; /* acl condition to meet */
+ enum spoe_event event; /* SPOE_EV_* */
+};
+
+/* Describe a group of messages that will be sent in a NOTIFY frame. A group has
+ * a name and a list of messages. It can be used by HAProxy, outside events
+ * processing, mainly in (tcp|http) rules. */
+struct spoe_group {
+ char *id; /* SPOE group id */
+ struct spoe_agent *agent; /* SPOE agent owning this SPOE group */
+ struct {
+ char *file; /* file where the SPOE group appears */
+ int line; /* line where the SPOE group appears */
+ } conf; /* config information */
+
+ struct list phs; /* List of placeholders used during conf parsing */
+ struct list messages; /* List of SPOE messages that will be sent by this
+ * group */
+
+ struct list list; /* Used to chain SPOE groups */
+};
+
+/* Describe a SPOE agent. */
+struct spoe_agent {
+ char *id; /* SPOE agent id (name) */
+ struct {
+ char *file; /* file where the SPOE agent appears */
+ int line; /* line where the SPOE agent appears */
+ } conf; /* config information */
+ union {
+ struct proxy *be; /* Backend used by this agent */
+ char *name; /* Backend name used during conf parsing */
+ } b;
+ struct {
+ unsigned int hello; /* Max time to receive AGENT-HELLO frame (in SPOE applet) */
+ unsigned int idle; /* Max Idle timeout (in SPOE applet) */
+ unsigned int processing; /* Max time to process an event (in the main stream) */
+ } timeout;
+
+ /* Config info */
+ struct spoe_config *spoe_conf; /* SPOE filter config */
+ char *var_pfx; /* Prefix used for vars set by the agent */
+ char *var_on_error; /* Variable to set when an error occurred, in the TXN scope */
+ char *var_t_process; /* Variable to set to report the processing time of the last event/group, in the TXN scope */
+ char *var_t_total; /* Variable to set to report the cumulative processing time, in the TXN scope */
+ unsigned int flags; /* SPOE_FL_* */
+ unsigned int cps_max; /* Maximum # of connections per second */
+ unsigned int eps_max; /* Maximum # of errors per second */
+ unsigned int max_frame_size; /* Maximum frame size for this agent, before any negotiation */
+ unsigned int max_fpa; /* Maximum # of frames handled per applet at once */
+
+ struct list events[SPOE_EV_EVENTS]; /* List of SPOE messages that will be sent
+ * for each supported events */
+
+ struct list groups; /* List of available SPOE groups */
+
+ struct list messages; /* list of all messages attached to this SPOE agent */
+
+ /* running info */
+ struct {
+ char *engine_id; /* engine-id string */
+ unsigned int frame_size; /* current maximum frame size, only used to encode messages */
+ unsigned int processing;
+ struct freq_ctr processing_per_sec;
+
+ struct freq_ctr conn_per_sec; /* connections per second */
+ struct freq_ctr err_per_sec; /* connection errors per second */
+
+ unsigned int idles; /* # of idle applets */
+ struct eb_root idle_applets; /* idle SPOE applets available to process data */
+ struct list applets; /* all SPOE applets for this agent */
+ struct list sending_queue; /* Queue of streams waiting to send data */
+ struct list waiting_queue; /* Queue of streams waiting for a ack, in async mode */
+ __decl_thread(HA_SPINLOCK_T lock);
+ } *rt;
+
+ struct {
+ unsigned int applets; /* # of SPOE applets */
+ unsigned int idles; /* # of idle applets */
+ unsigned int nb_sending; /* # of streams waiting to send data */
+ unsigned int nb_waiting; /* # of streams waiting for a ack */
+ unsigned long long nb_processed; /* # of frames processed by the SPOE */
+ unsigned long long nb_errors; /* # of errors during the processing */
+ } counters;
+};
+
+/* SPOE filter configuration */
+struct spoe_config {
+ char *id; /* The SPOE engine name. If undefined in HAProxy config,
+ * it will be set with the SPOE agent name */
+ struct proxy *proxy; /* Proxy owning the filter */
+ struct spoe_agent *agent; /* Agent used by this filter */
+ struct proxy agent_fe; /* Agent frontend */
+};
+
+/* SPOE context attached to a stream. It is the main structure that handles the
+ * processing offload */
+struct spoe_context {
+ struct filter *filter; /* The SPOE filter */
+ struct stream *strm; /* The stream that should be offloaded */
+
+ struct list *events; /* List of messages that will be sent during the stream processing */
+ struct list *groups; /* List of available SPOE group */
+
+ struct buffer buffer; /* Buffer used to store a encoded messages */
+ struct buffer_wait buffer_wait; /* position in the list of resources waiting for a buffer */
+ struct list list;
+
+ enum spoe_ctx_state state; /* SPOE_CTX_ST_* */
+ unsigned int flags; /* SPOE_CTX_FL_* */
+ unsigned int status_code; /* SPOE_CTX_ERR_* */
+
+ unsigned int stream_id; /* stream_id and frame_id are used */
+ unsigned int frame_id; /* to map NOTIFY and ACK frames */
+ unsigned int process_exp; /* expiration date to process an event */
+
+ struct spoe_appctx *spoe_appctx; /* SPOE appctx sending the current frame */
+ struct {
+ struct spoe_message *curmsg; /* SPOE message from which to resume encoding */
+ struct spoe_arg *curarg; /* SPOE arg in <curmsg> from which to resume encoding */
+ unsigned int curoff; /* offset in <curarg> from which to resume encoding */
+ unsigned int curlen; /* length of <curarg> need to be encode, for SMP_F_MAY_CHANGE data */
+ unsigned int flags; /* SPOE_FRM_FL_* */
+ } frag_ctx; /* Info about fragmented frames, valid on if SPOE_CTX_FL_FRAGMENTED is set */
+
+ struct {
+ ullong start_ts; /* start date of the current event/group */
+ ullong request_ts; /* date the frame processing starts (reset for each frag) */
+ ullong queue_ts; /* date the frame is queued (reset for each frag) */
+ ullong wait_ts; /* date the stream starts waiting for a response */
+ ullong response_ts; /* date the response processing starts */
+ long t_request; /* delay to encode and push the frame in queue (cumulative for frags) */
+ long t_queue; /* delay before the frame gets out the sending queue (cumulative for frags) */
+ long t_waiting; /* delay before the response is received */
+ long t_response; /* delay to process the response (from the stream pov) */
+ long t_process; /* processing time of the last event/group */
+ unsigned long t_total; /* cumulative processing time */
+ } stats; /* Stats for this stream */
+};
+
+/* SPOE context inside a appctx */
+struct spoe_appctx {
+ struct appctx *owner; /* the owner */
+ struct task *task; /* task to handle applet timeouts */
+ struct spoe_agent *agent; /* agent on which the applet is attached */
+
+ unsigned int version; /* the negotiated version */
+ unsigned int max_frame_size; /* the negotiated max-frame-size value */
+ unsigned int flags; /* SPOE_APPCTX_FL_* */
+
+ unsigned int status_code; /* SPOE_FRM_ERR_* */
+#if defined(DEBUG_SPOE) || defined(DEBUG_FULL)
+ char *reason; /* Error message, used for debugging only */
+ int rlen; /* reason length */
+#endif
+
+ struct buffer buffer; /* Buffer used to store a encoded messages */
+ struct buffer_wait buffer_wait; /* position in the list of resources waiting for a buffer */
+ struct list waiting_queue; /* list of streams waiting for a ACK frame, in sync and pipelining mode */
+ struct list list; /* next spoe appctx for the same agent */
+ struct eb32_node node; /* node used for applets tree */
+ unsigned int cur_fpa;
+
+ struct {
+ struct spoe_context *ctx; /* SPOE context owning the fragmented frame */
+ unsigned int cursid; /* stream-id of the fragmented frame. used if the processing is aborted */
+ unsigned int curfid; /* frame-id of the fragmented frame. used if the processing is aborted */
+ } frag_ctx; /* Info about fragmented frames, unused for unfragmented frames */
+};
+
+#endif /* _HAPROXY_SPOE_T_H */
diff --git a/include/haproxy/spoe.h b/include/haproxy/spoe.h
new file mode 100644
index 0000000..7cd0987
--- /dev/null
+++ b/include/haproxy/spoe.h
@@ -0,0 +1,351 @@
+/*
+ * include/haproxy/spoe.h
+ * Encoding/Decoding functions for the SPOE filters (and other helpers).
+ *
+ * Copyright (C) 2017 HAProxy Technologies, Christopher Faulet <cfaulet@haproxy.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_SPOE_H
+#define _HAPROXY_SPOE_H
+
+#include <haproxy/api.h>
+#include <haproxy/intops.h>
+#include <haproxy/sample-t.h>
+#include <haproxy/spoe-t.h>
+
+
+/* Encode a buffer. Its length <len> is encoded as a varint, followed by a copy
+ * of <str>. It must have enough space in <*buf> to encode the buffer, else an
+ * error is triggered.
+ * On success, it returns <len> and <*buf> is moved after the encoded value. If
+ * an error occurred, it returns -1. */
+static inline int
+spoe_encode_buffer(const char *str, size_t len, char **buf, char *end)
+{
+ char *p = *buf;
+ int ret;
+
+ if (p >= end)
+ return -1;
+
+ if (!len) {
+ *p++ = 0;
+ *buf = p;
+ return 0;
+ }
+
+ ret = encode_varint(len, &p, end);
+ if (ret == -1 || p + len > end)
+ return -1;
+
+ memcpy(p, str, len);
+ *buf = p + len;
+ return len;
+}
+
+/* Encode a buffer, possibly partially. It does the same thing than
+ * 'spoe_encode_buffer', but if there is not enough space, it does not fail.
+ * On success, it returns the number of copied bytes and <*buf> is moved after
+ * the encoded value. If an error occurred, it returns -1. */
+static inline int
+spoe_encode_frag_buffer(const char *str, size_t len, char **buf, char *end)
+{
+ char *p = *buf;
+ int ret;
+
+ if (p >= end)
+ return -1;
+
+ if (!len) {
+ *p++ = 0;
+ *buf = p;
+ return 0;
+ }
+
+ ret = encode_varint(len, &p, end);
+ if (ret == -1 || p >= end)
+ return -1;
+
+ ret = (p+len < end) ? len : (end - p);
+ memcpy(p, str, ret);
+ *buf = p + ret;
+ return ret;
+}
+
+/* Decode a buffer. The buffer length is decoded and saved in <*len>. <*str>
+ * points on the first byte of the buffer.
+ * On success, it returns the buffer length and <*buf> is moved after the
+ * encoded buffer. Otherwise, it returns -1. */
+static inline int
+spoe_decode_buffer(char **buf, char *end, char **str, uint64_t *len)
+{
+ char *p = *buf;
+ uint64_t sz;
+ int ret;
+
+ *str = NULL;
+ *len = 0;
+
+ ret = decode_varint(&p, end, &sz);
+ if (ret == -1 || p + sz > end)
+ return -1;
+
+ *str = p;
+ *len = sz;
+ *buf = p + sz;
+ return sz;
+}
+
+/* Encode a typed data using value in <smp>. On success, it returns the number
+ * of copied bytes and <*buf> is moved after the encoded value. If an error
+ * occurred, it returns -1.
+ *
+ * If the value is too big to be encoded, depending on its type, then encoding
+ * failed or the value is partially encoded. Only strings and binaries can be
+ * partially encoded. */
+static inline int
+spoe_encode_data(struct sample *smp, char **buf, char *end)
+{
+ char *p = *buf;
+ int ret;
+
+ if (p >= end)
+ return -1;
+
+ if (smp == NULL) {
+ *p++ = SPOE_DATA_T_NULL;
+ goto end;
+ }
+
+ switch (smp->data.type) {
+ case SMP_T_BOOL:
+ *p = SPOE_DATA_T_BOOL;
+ *p++ |= ((!smp->data.u.sint) ? SPOE_DATA_FL_FALSE : SPOE_DATA_FL_TRUE);
+ break;
+
+ case SMP_T_SINT:
+ *p++ = SPOE_DATA_T_INT64;
+ if (encode_varint(smp->data.u.sint, &p, end) == -1)
+ return -1;
+ break;
+
+ case SMP_T_IPV4:
+ if (p + 5 > end)
+ return -1;
+ *p++ = SPOE_DATA_T_IPV4;
+ memcpy(p, &smp->data.u.ipv4, 4);
+ p += 4;
+ break;
+
+ case SMP_T_IPV6:
+ if (p + 17 > end)
+ return -1;
+ *p++ = SPOE_DATA_T_IPV6;
+ memcpy(p, &smp->data.u.ipv6, 16);
+ p += 16;
+ break;
+
+ case SMP_T_STR:
+ case SMP_T_BIN: {
+ /* If defined, get length and offset of the sample by reading the sample
+ * context. ctx.a[0] is the pointer to the length and ctx.a[1] is the
+ * pointer to the offset. If the offset is greater than 0, it means the
+ * sample is partially encoded. In this case, we only need to encode the
+ * remaining. When all the sample is encoded, the offset is reset to 0.
+ * So the caller know it can try to encode the next sample. */
+ struct buffer *chk = &smp->data.u.str;
+ unsigned int *len = smp->ctx.a[0];
+ unsigned int *off = smp->ctx.a[1];
+
+ if (!*off) {
+ /* First evaluation of the sample : encode the
+ * type (string or binary), the buffer length
+ * (as a varint) and at least 1 byte of the
+ * buffer. */
+ struct buffer *chk = &smp->data.u.str;
+
+ *p++ = (smp->data.type == SMP_T_STR)
+ ? SPOE_DATA_T_STR
+ : SPOE_DATA_T_BIN;
+ ret = spoe_encode_frag_buffer(chk->area,
+ chk->data, &p,
+ end);
+ if (ret == -1)
+ return -1;
+ *len = chk->data;
+ }
+ else {
+ /* The sample has been fragmented, encode remaining data */
+ ret = MIN(*len - *off, end - p);
+ memcpy(p, chk->area + *off, ret);
+ p += ret;
+ }
+ /* Now update <*off> */
+ if (ret + *off != *len)
+ *off += ret;
+ else
+ *off = 0;
+ break;
+ }
+
+ case SMP_T_METH: {
+ char *m;
+ size_t len;
+
+ *p++ = SPOE_DATA_T_STR;
+ switch (smp->data.u.meth.meth) {
+ case HTTP_METH_OPTIONS: m = "OPTIONS"; len = 7; break;
+ case HTTP_METH_GET : m = "GET"; len = 3; break;
+ case HTTP_METH_HEAD : m = "HEAD"; len = 4; break;
+ case HTTP_METH_POST : m = "POST"; len = 4; break;
+ case HTTP_METH_PUT : m = "PUT"; len = 3; break;
+ case HTTP_METH_DELETE : m = "DELETE"; len = 6; break;
+ case HTTP_METH_TRACE : m = "TRACE"; len = 5; break;
+ case HTTP_METH_CONNECT: m = "CONNECT"; len = 7; break;
+
+ default :
+ m = smp->data.u.meth.str.area;
+ len = smp->data.u.meth.str.data;
+ }
+ if (spoe_encode_buffer(m, len, &p, end) == -1)
+ return -1;
+ break;
+ }
+
+ default:
+ *p++ = SPOE_DATA_T_NULL;
+ break;
+ }
+
+ end:
+ ret = (p - *buf);
+ *buf = p;
+ return ret;
+}
+
+/* Skip a typed data. If an error occurred, -1 is returned, otherwise the number
+ * of skipped bytes is returned and the <*buf> is moved after skipped data.
+ *
+ * A types data is composed of a type (1 byte) and corresponding data:
+ * - boolean: non additional data (0 bytes)
+ * - integers: a variable-length integer (see decode_varint)
+ * - ipv4: 4 bytes
+ * - ipv6: 16 bytes
+ * - binary and string: a buffer prefixed by its size, a variable-length
+ * integer (see spoe_decode_buffer) */
+static inline int
+spoe_skip_data(char **buf, char *end)
+{
+ char *str, *p = *buf;
+ int type, ret;
+ uint64_t v, sz;
+
+ if (p >= end)
+ return -1;
+
+ type = *p++;
+ switch (type & SPOE_DATA_T_MASK) {
+ case SPOE_DATA_T_BOOL:
+ break;
+ case SPOE_DATA_T_INT32:
+ case SPOE_DATA_T_INT64:
+ case SPOE_DATA_T_UINT32:
+ case SPOE_DATA_T_UINT64:
+ if (decode_varint(&p, end, &v) == -1)
+ return -1;
+ break;
+ case SPOE_DATA_T_IPV4:
+ if (p+4 > end)
+ return -1;
+ p += 4;
+ break;
+ case SPOE_DATA_T_IPV6:
+ if (p+16 > end)
+ return -1;
+ p += 16;
+ break;
+ case SPOE_DATA_T_STR:
+ case SPOE_DATA_T_BIN:
+ /* All the buffer must be skipped */
+ if (spoe_decode_buffer(&p, end, &str, &sz) == -1)
+ return -1;
+ break;
+ }
+
+ ret = (p - *buf);
+ *buf = p;
+ return ret;
+}
+
+/* Decode a typed data and fill <smp>. If an error occurred, -1 is returned,
+ * otherwise the number of read bytes is returned and <*buf> is moved after the
+ * decoded data. See spoe_skip_data for details. */
+static inline int
+spoe_decode_data(char **buf, char *end, struct sample *smp)
+{
+ char *str, *p = *buf;
+ int type, r = 0;
+ uint64_t sz;
+
+ if (p >= end)
+ return -1;
+
+ type = *p++;
+ switch (type & SPOE_DATA_T_MASK) {
+ case SPOE_DATA_T_BOOL:
+ smp->data.u.sint = ((type & SPOE_DATA_FL_MASK) == SPOE_DATA_FL_TRUE);
+ smp->data.type = SMP_T_BOOL;
+ break;
+ case SPOE_DATA_T_INT32:
+ case SPOE_DATA_T_INT64:
+ case SPOE_DATA_T_UINT32:
+ case SPOE_DATA_T_UINT64:
+ if (decode_varint(&p, end, (uint64_t *)&smp->data.u.sint) == -1)
+ return -1;
+ smp->data.type = SMP_T_SINT;
+ break;
+ case SPOE_DATA_T_IPV4:
+ if (p+4 > end)
+ return -1;
+ smp->data.type = SMP_T_IPV4;
+ memcpy(&smp->data.u.ipv4, p, 4);
+ p += 4;
+ break;
+ case SPOE_DATA_T_IPV6:
+ if (p+16 > end)
+ return -1;
+ memcpy(&smp->data.u.ipv6, p, 16);
+ smp->data.type = SMP_T_IPV6;
+ p += 16;
+ break;
+ case SPOE_DATA_T_STR:
+ case SPOE_DATA_T_BIN:
+ /* All the buffer must be decoded */
+ if (spoe_decode_buffer(&p, end, &str, &sz) == -1)
+ return -1;
+ smp->data.u.str.area = str;
+ smp->data.u.str.data = sz;
+ smp->data.type = (type == SPOE_DATA_T_STR) ? SMP_T_STR : SMP_T_BIN;
+ break;
+ }
+
+ r = (p - *buf);
+ *buf = p;
+ return r;
+}
+
+#endif /* _HAPROXY_SPOE_H */
diff --git a/include/haproxy/ssl_ckch-t.h b/include/haproxy/ssl_ckch-t.h
new file mode 100644
index 0000000..0002b84
--- /dev/null
+++ b/include/haproxy/ssl_ckch-t.h
@@ -0,0 +1,161 @@
+/*
+ * include/haproxy/ssl_ckch-t.h
+ * ckch structures
+ *
+ * Copyright (C) 2020 HAProxy Technologies, William Lallemand <wlallemand@haproxy.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+
+/* The ckch (cert key and chain) structures are a group of structures used to
+ * cache and manipulate the certificates files loaded from the configuration
+ * file and the CLI Every certificate change made in a SSL_CTX should be done
+ * in these structures before being applied to a SSL_CTX.
+ *
+ * The complete architecture is described in doc/internals/ssl_cert.dia
+ */
+
+
+#ifndef _HAPROXY_SSL_CKCH_T_H
+#define _HAPROXY_SSL_CKCH_T_H
+#ifdef USE_OPENSSL
+
+#include <import/ebtree-t.h>
+#include <haproxy/buf-t.h>
+#include <haproxy/openssl-compat.h>
+
+/* This is used to preload the certificate, private key
+ * and Cert Chain of a file passed in via the crt
+ * argument
+ *
+ * This way, we do not have to read the file multiple times
+ *
+ * This structure is the base one, in the case of a multi-cert bundle, we
+ * allocate 1 structure per type.
+ */
+struct ckch_data {
+ X509 *cert;
+ EVP_PKEY *key;
+ STACK_OF(X509) *chain;
+ HASSL_DH *dh;
+ struct buffer *sctl;
+ struct buffer *ocsp_response;
+ X509 *ocsp_issuer;
+ OCSP_CERTID *ocsp_cid;
+ int ocsp_update_mode;
+};
+
+/*
+ * this is used to store 1 to SSL_SOCK_NUM_KEYTYPES cert_key_and_chain and
+ * metadata.
+ *
+ * "ckch" for cert, key and chain.
+ *
+ * XXX: Once we remove the multi-cert bundle support, we could merge this structure
+ * with the cert_key_and_chain one.
+ */
+struct ckch_store {
+ struct ckch_data *data;
+ struct list ckch_inst; /* list of ckch_inst which uses this ckch_node */
+ struct list crtlist_entry; /* list of entries which use this store */
+ struct ebmb_node node;
+ char path[VAR_ARRAY];
+};
+
+/* forward declarations for ckch_inst */
+struct ssl_bind_conf;
+struct crtlist_entry;
+
+
+/* Used to keep a list of all the instances using a specific cafile_entry.
+ * It enables to link instances regardless of how they are using the CA file
+ * (either via the ca-file, ca-verify-file or crl-file option). */
+struct ckch_inst_link {
+ struct ckch_inst *ckch_inst;
+ struct list list;
+};
+
+/* Used to keep in a ckch instance a list of all the ckch_inst_link which
+ * reference it. This way, when deleting a ckch_inst, we can ensure that no
+ * dangling reference on it will remain. */
+struct ckch_inst_link_ref {
+ struct ckch_inst_link *link;
+ struct list list;
+};
+
+/*
+ * This structure describe a ckch instance. An instance is generated for each
+ * bind_conf. The instance contains a linked list of the sni ctx which uses
+ * the ckch in this bind_conf.
+ */
+struct ckch_inst {
+ struct bind_conf *bind_conf; /* pointer to the bind_conf that uses this ckch_inst */
+ struct ssl_bind_conf *ssl_conf; /* pointer to the ssl_conf which is used by every sni_ctx of this inst */
+ struct ckch_store *ckch_store; /* pointer to the store used to generate this inst */
+ struct crtlist_entry *crtlist_entry; /* pointer to the crtlist_entry used, or NULL */
+ struct server *server; /* pointer to the server if is_server_instance is set, NULL otherwise */
+ SSL_CTX *ctx; /* pointer to the SSL context used by this instance */
+ unsigned int is_default:1; /* This instance is used as the default ctx for this bind_conf */
+ unsigned int is_server_instance:1; /* This instance is used by a backend server */
+ /* space for more flag there */
+ struct list sni_ctx; /* list of sni_ctx using this ckch_inst */
+ struct list by_ckchs; /* chained in ckch_store's list of ckch_inst */
+ struct list by_crtlist_entry; /* chained in crtlist_entry list of inst */
+ struct list cafile_link_refs; /* list of ckch_inst_link pointing to this instance */
+};
+
+
+/* Option through which a cafile_entry was created, either
+ * ca-file/ca-verify-file or crl-file. */
+enum cafile_type {
+ CAFILE_CERT,
+ CAFILE_CRL
+};
+
+/*
+ * deduplicate cafile (and crlfile)
+ */
+struct cafile_entry {
+ X509_STORE *ca_store;
+ STACK_OF(X509_NAME) *ca_list;
+ struct list ckch_inst_link; /* list of ckch_inst which use this CA file entry */
+ enum cafile_type type;
+ struct ebmb_node node;
+ char path[0];
+};
+
+enum {
+ CERT_TYPE_PEM = 0,
+ CERT_TYPE_KEY,
+#if ((defined SSL_CTRL_SET_TLSEXT_STATUS_REQ_CB && !defined OPENSSL_NO_OCSP) || defined OPENSSL_IS_BORINGSSL)
+ CERT_TYPE_OCSP,
+#endif
+ CERT_TYPE_ISSUER,
+#ifdef HAVE_SSL_SCTL
+ CERT_TYPE_SCTL,
+#endif
+ CERT_TYPE_MAX,
+};
+
+struct cert_exts {
+ const char *ext;
+ int type;
+ int (*load)(const char *path, char *payload, struct ckch_data *data, char **err);
+ /* add a parsing callback */
+};
+
+#endif /* USE_OPENSSL */
+#endif /* _HAPROXY_SSL_CKCH_T_H */
diff --git a/include/haproxy/ssl_ckch.h b/include/haproxy/ssl_ckch.h
new file mode 100644
index 0000000..64ac3df
--- /dev/null
+++ b/include/haproxy/ssl_ckch.h
@@ -0,0 +1,75 @@
+/*
+ * include/haproxy/ssl_ckch.h
+ * ckch function prototypes
+ *
+ * Copyright (C) 2020 HAProxy Technologies, William Lallemand <wlallemand@haproxy.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_SSL_CKCH_H
+#define _HAPROXY_SSL_CKCH_H
+#ifdef USE_OPENSSL
+
+#include <haproxy/ssl_ckch-t.h>
+
+/* cert_key_and_chain functions */
+
+int ssl_sock_load_files_into_ckch(const char *path, struct ckch_data *data, char **err);
+int ssl_sock_load_pem_into_ckch(const char *path, char *buf, struct ckch_data *datackch , char **err);
+void ssl_sock_free_cert_key_and_chain_contents(struct ckch_data *data);
+
+int ssl_sock_load_key_into_ckch(const char *path, char *buf, struct ckch_data *data , char **err);
+int ssl_sock_load_ocsp_response_from_file(const char *ocsp_path, char *buf, struct ckch_data *data, char **err);
+int ssl_sock_load_sctl_from_file(const char *sctl_path, char *buf, struct ckch_data *data, char **err);
+int ssl_sock_load_issuer_file_into_ckch(const char *path, char *buf, struct ckch_data *data, char **err);
+
+/* ckch_store functions */
+struct ckch_store *ckchs_load_cert_file(char *path, char **err);
+struct ckch_store *ckchs_lookup(char *path);
+struct ckch_store *ckchs_dup(const struct ckch_store *src);
+struct ckch_store *ckch_store_new(const char *filename);
+void ckch_store_free(struct ckch_store *store);
+void ckch_store_replace(struct ckch_store *old_ckchs, struct ckch_store *new_ckchs);
+
+/* ckch_inst functions */
+void ckch_inst_free(struct ckch_inst *inst);
+struct ckch_inst *ckch_inst_new();
+int ckch_inst_new_load_store(const char *path, struct ckch_store *ckchs, struct bind_conf *bind_conf,
+ struct ssl_bind_conf *ssl_conf, char **sni_filter, int fcount, struct ckch_inst **ckchi, char **err);
+int ckch_inst_new_load_srv_store(const char *path, struct ckch_store *ckchs,
+ struct ckch_inst **ckchi, char **err);
+int ckch_inst_rebuild(struct ckch_store *ckch_store, struct ckch_inst *ckchi,
+ struct ckch_inst **new_inst, char **err);
+
+void ckch_deinit();
+void ckch_inst_add_cafile_link(struct ckch_inst *ckch_inst, struct bind_conf *bind_conf,
+ struct ssl_bind_conf *ssl_conf, const struct server *srv);
+
+/* ssl_store functions */
+struct cafile_entry *ssl_store_get_cafile_entry(char *path, int oldest_entry);
+X509_STORE* ssl_store_get0_locations_file(char *path);
+int ssl_store_add_uncommitted_cafile_entry(struct cafile_entry *entry);
+struct cafile_entry *ssl_store_create_cafile_entry(char *path, X509_STORE *store, enum cafile_type type);
+struct cafile_entry *ssl_store_dup_cafile_entry(struct cafile_entry *src);
+void ssl_store_delete_cafile_entry(struct cafile_entry *ca_e);
+int ssl_store_load_ca_from_buf(struct cafile_entry *ca_e, char *cert_buf, int append);
+int ssl_store_load_locations_file(char *path, int create_if_none, enum cafile_type type);
+int __ssl_store_load_locations_file(char *path, int create_if_none, enum cafile_type type, int shuterror);
+
+extern struct cert_exts cert_exts[];
+
+#endif /* USE_OPENSSL */
+#endif /* _HAPROXY_SSL_CRTLIST_H */
diff --git a/include/haproxy/ssl_crtlist-t.h b/include/haproxy/ssl_crtlist-t.h
new file mode 100644
index 0000000..dc7a376
--- /dev/null
+++ b/include/haproxy/ssl_crtlist-t.h
@@ -0,0 +1,63 @@
+/*
+ * include/haproxy/ssl_crtlist-t.h
+ * crt-list structures
+ *
+ * Copyright (C) 2020 HAProxy Technologies, William Lallemand <wlallemand@haproxy.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_SSL_CRTLIST_T_H
+#define _HAPROXY_SSL_CRTLIST_T_H
+#ifdef USE_OPENSSL
+
+#include <import/ebtree-t.h>
+
+
+/* forward declarations for structures below */
+struct bind_conf;
+struct ssl_bind_conf;
+struct proxy;
+
+/* list of bind conf used by struct crtlist */
+struct bind_conf_list {
+ struct bind_conf *bind_conf;
+ struct bind_conf_list *next;
+};
+
+/* This structure is basically a crt-list or a directory */
+struct crtlist {
+ struct bind_conf_list *bind_conf; /* list of bind_conf which use this crtlist */
+ unsigned int linecount; /* number of lines */
+ struct eb_root entries;
+ struct list ord_entries; /* list to keep the line order of the crt-list file */
+ struct ebmb_node node; /* key is the filename or directory */
+};
+
+/* a file in a directory or a line in a crt-list */
+struct crtlist_entry {
+ struct ssl_bind_conf *ssl_conf; /* SSL conf in crt-list */
+ unsigned int linenum;
+ unsigned int fcount; /* filters count */
+ char **filters;
+ struct crtlist *crtlist; /* ptr to the parent crtlist */
+ struct list ckch_inst; /* list of instances of this entry, there is 1 ckch_inst per instance of the crt-list */
+ struct list by_crtlist; /* ordered entries */
+ struct list by_ckch_store; /* linked in ckch_store list of crtlist_entries */
+ struct ebpt_node node; /* key is a ptr to a ckch_store */
+};
+
+#endif /* USE_OPENSSL */
+#endif /* _HAPROXY_SSL_CRTLIST_T_H */
diff --git a/include/haproxy/ssl_crtlist.h b/include/haproxy/ssl_crtlist.h
new file mode 100644
index 0000000..961cfc3
--- /dev/null
+++ b/include/haproxy/ssl_crtlist.h
@@ -0,0 +1,48 @@
+/*
+ * include/haproxy/ssl_crtlist.h
+ * crt-list function prototypes
+ *
+ * Copyright (C) 2020 HAProxy Technologies, William Lallemand <wlallemand@haproxy.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_SSL_CRTLIST_H
+#define _HAPROXY_SSL_CRTLIST_H
+#ifdef USE_OPENSSL
+
+#include <haproxy/ssl_crtlist-t.h>
+
+
+/* crt-list entry functions */
+void ssl_sock_free_ssl_conf(struct ssl_bind_conf *conf);
+char **crtlist_dup_filters(char **args, int fcount);
+void crtlist_free_filters(char **args);
+void crtlist_entry_free(struct crtlist_entry *entry);
+struct crtlist_entry *crtlist_entry_new();
+
+/* crt-list functions */
+void crtlist_free(struct crtlist *crtlist);
+struct crtlist *crtlist_new(const char *filename, int unique);
+
+/* file loading */
+int crtlist_parse_line(char *line, char **crt_path, struct crtlist_entry *entry, const char *file, int linenum, int from_cli, char **err);
+int crtlist_parse_file(char *file, struct bind_conf *bind_conf, struct proxy *curproxy, struct crtlist **crtlist, char **err);
+int crtlist_load_cert_dir(char *path, struct bind_conf *bind_conf, struct crtlist **crtlist, char **err);
+
+void crtlist_deinit();
+
+#endif /* USE_OPENSSL */
+#endif /* _HAPROXY_SSL_CRTLIST_H */
diff --git a/include/haproxy/ssl_ocsp-t.h b/include/haproxy/ssl_ocsp-t.h
new file mode 100644
index 0000000..fc2750b
--- /dev/null
+++ b/include/haproxy/ssl_ocsp-t.h
@@ -0,0 +1,94 @@
+/*
+ * include/haproxy/ssl_ocsp-t.h
+ * SSL structures related to OCSP
+ *
+ * Copyright (C) 2022 Remi Tricot-Le Breton - rlebreton@haproxy.com
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_SSL_OCSP_T_H
+#define _HAPROXY_SSL_OCSP_T_H
+#ifdef USE_OPENSSL
+
+#include <import/ebtree-t.h>
+
+#include <haproxy/buf-t.h>
+#include <haproxy/openssl-compat.h>
+#include <haproxy/ssl_sock-t.h>
+
+#ifndef OPENSSL_NO_OCSP
+extern int ocsp_ex_index;
+#endif
+
+#define SSL_OCSP_UPDATE_DELAY_MAX 60*60 /* 1H */
+#define SSL_OCSP_UPDATE_DELAY_MIN 5*60 /* 5 minutes */
+#define SSL_OCSP_UPDATE_MARGIN 60 /* 1 minute */
+#define SSL_OCSP_HTTP_ERR_REPLAY 60 /* 1 minute */
+
+#if (defined SSL_CTRL_SET_TLSEXT_STATUS_REQ_CB && !defined OPENSSL_NO_OCSP)
+/*
+ * struct alignment works here such that the key.key is the same as key_data
+ * Do not change the placement of key_data
+ */
+struct certificate_ocsp {
+ struct ebmb_node key;
+ unsigned char key_data[OCSP_MAX_CERTID_ASN1_LENGTH];
+ unsigned int key_length;
+ int refcount_store; /* Number of ckch_store that reference this certificate_ocsp */
+ int refcount_instance; /* Number of ckch_inst that reference this certificate_ocsp */
+ struct buffer response;
+ long expire;
+ X509 *issuer;
+ STACK_OF(X509) *chain;
+ struct eb64_node next_update; /* Key of items inserted in ocsp_update_tree (sorted by absolute date) */
+ struct buffer *uri; /* First OCSP URI contained in the corresponding certificate */
+
+ /* OCSP update stats */
+ u64 last_update; /* Time of last successful update */
+ unsigned int last_update_status;/* Status of the last OCSP update */
+ unsigned int num_success; /* Number of successful updates */
+ unsigned int num_failure; /* Number of failed updates */
+ unsigned int fail_count:30; /* Number of successive failures */
+ unsigned int update_once:1; /* Set if an entry should not be reinserted into te tree after update */
+ unsigned int updating:1; /* Set if an entry is already being updated */
+ char path[VAR_ARRAY];
+};
+
+struct ocsp_cbk_arg {
+ int is_single;
+ int single_kt;
+ union {
+ struct certificate_ocsp *s_ocsp;
+ /*
+ * m_ocsp will have multiple entries dependent on key type
+ * Entry 0 - DSA
+ * Entry 1 - ECDSA
+ * Entry 2 - RSA
+ */
+ struct certificate_ocsp *m_ocsp[SSL_SOCK_NUM_KEYTYPES];
+ };
+};
+
+extern struct eb_root cert_ocsp_tree;
+extern struct eb_root ocsp_update_tree;
+extern struct task *ocsp_update_task;
+
+__decl_thread(extern HA_SPINLOCK_T ocsp_tree_lock);
+
+#endif /* (defined SSL_CTRL_SET_TLSEXT_STATUS_REQ_CB && !defined OPENSSL_NO_OCSP) */
+
+#endif /* USE_OPENSSL */
+#endif /* _HAPROXY_SSL_OCSP_T_H */
diff --git a/include/haproxy/ssl_ocsp.h b/include/haproxy/ssl_ocsp.h
new file mode 100644
index 0000000..8a4197c
--- /dev/null
+++ b/include/haproxy/ssl_ocsp.h
@@ -0,0 +1,70 @@
+/*
+ * include/haproxy/ssl_ocsp.h
+ * This file contains definition for ssl OCSP operations
+ *
+ * Copyright (C) 2022 Remi Tricot-Le Breton - rlebreton@haproxy.com
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_SSL_OCSP_H
+#define _HAPROXY_SSL_OCSP_H
+#ifdef USE_OPENSSL
+
+#include <haproxy/openssl-compat.h>
+#include <haproxy/ssl_ckch-t.h>
+#include <haproxy/ssl_crtlist-t.h>
+#include <haproxy/ssl_ocsp-t.h>
+
+#if (defined SSL_CTRL_SET_TLSEXT_STATUS_REQ_CB && !defined OPENSSL_NO_OCSP)
+
+int ssl_ocsp_build_response_key(OCSP_CERTID *ocsp_cid, unsigned char certid[OCSP_MAX_CERTID_ASN1_LENGTH], unsigned int *key_length);
+
+int ssl_sock_get_ocsp_arg_kt_index(int evp_keytype);
+int ssl_sock_ocsp_stapling_cbk(SSL *ssl, void *arg);
+
+void ssl_sock_free_ocsp(struct certificate_ocsp *ocsp);
+void ssl_sock_free_ocsp_instance(struct certificate_ocsp *ocsp);
+
+int ssl_sock_load_ocsp_response(struct buffer *ocsp_response,
+ struct certificate_ocsp *ocsp,
+ OCSP_CERTID *cid, char **err);
+int ssl_sock_update_ocsp_response(struct buffer *ocsp_response, char **err);
+void ssl_sock_ocsp_free_func(void *parent, void *ptr, CRYPTO_EX_DATA *ad, int idx, long argl, void *argp);
+
+int ssl_ocsp_get_uri_from_cert(X509 *cert, struct buffer *out, char **err);
+int ssl_ocsp_create_request_details(const OCSP_CERTID *certid, struct buffer *req_url,
+ struct buffer *req_body, char **err);
+int ssl_ocsp_check_response(STACK_OF(X509) *chain, X509 *issuer,
+ struct buffer *respbuf, char **err);
+
+int ssl_create_ocsp_update_task(char **err);
+void ssl_destroy_ocsp_update_task(void);
+
+int ssl_ocsp_update_insert(struct certificate_ocsp *ocsp);
+
+int ocsp_update_check_cfg_consistency(struct ckch_store *store, struct crtlist_entry *entry, char *crt_path, char **err);
+
+#endif /* (defined SSL_CTRL_SET_TLSEXT_STATUS_REQ_CB && !defined OPENSSL_NO_OCSP) */
+
+#endif /* USE_OPENSSL */
+#endif /* _HAPROXY_SSL_OCSP_H */
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/include/haproxy/ssl_sock-t.h b/include/haproxy/ssl_sock-t.h
new file mode 100644
index 0000000..fdf41a7
--- /dev/null
+++ b/include/haproxy/ssl_sock-t.h
@@ -0,0 +1,323 @@
+/*
+ * include/haproxy/ssl_sock-t.h
+ * SSL settings for listeners and servers
+ *
+ * Copyright (C) 2012 EXCELIANCE, Emeric Brun <ebrun@exceliance.fr>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_SSL_SOCK_T_H
+#define _HAPROXY_SSL_SOCK_T_H
+#ifdef USE_OPENSSL
+
+#include <import/ebtree-t.h>
+
+#include <haproxy/buf-t.h>
+#include <haproxy/connection-t.h> /* struct wait_event */
+#include <haproxy/listener-t.h>
+#include <haproxy/openssl-compat.h>
+#include <haproxy/ssl_ckch-t.h>
+#include <haproxy/ssl_crtlist-t.h>
+#include <haproxy/thread-t.h>
+
+/* ***** READ THIS before adding code here! *****
+ *
+ * Due to API incompatibilities between multiple OpenSSL versions and their
+ * derivatives, it's often tempting to add macros to (re-)define certain
+ * symbols. Please do not do this here, and do it in common/openssl-compat.h
+ * exclusively so that the whole code consistently uses the same macros.
+ *
+ * Whenever possible if a macro is missing in certain versions, it's better
+ * to conditionally define it in openssl-compat.h than using lots of ifdefs.
+ */
+
+/* Warning, these are bits, not integers! */
+#define SSL_SOCK_ST_FL_VERIFY_DONE 0x00000001
+#define SSL_SOCK_ST_FL_16K_WBFSIZE 0x00000002
+#define SSL_SOCK_SEND_UNLIMITED 0x00000004
+#define SSL_SOCK_RECV_HEARTBEAT 0x00000008
+#define SSL_SOCK_SEND_MORE 0x00000010 /* set MSG_MORE at lower levels */
+
+/* bits 0xFFFFFF00 are reserved to store verify errors.
+ * The CA en CRT error codes will be stored on 7 bits each
+ * (since the max verify error code does not exceed 127)
+ * and the CA error depth will be stored on 4 bits.
+ */
+
+/* Verify errors macros */
+#define SSL_SOCK_CA_ERROR_TO_ST(e) (((e > 127) ? 127 : e) << (8))
+#define SSL_SOCK_CAEDEPTH_TO_ST(d) (((d > 15) ? 15 : d) << (7+8))
+#define SSL_SOCK_CRTERROR_TO_ST(e) (((e > 127) ? 127 : e) << (4+7+8))
+
+#define SSL_SOCK_ST_TO_CA_ERROR(s) ((s >> (8)) & 127)
+#define SSL_SOCK_ST_TO_CAEDEPTH(s) ((s >> (7+8)) & 15)
+#define SSL_SOCK_ST_TO_CRTERROR(s) ((s >> (4+7+8)) & 127)
+
+/* ssl_methods flags for ssl options */
+#define MC_SSL_O_ALL 0x0000
+#define MC_SSL_O_NO_SSLV3 0x0001 /* disable SSLv3 */
+#define MC_SSL_O_NO_TLSV10 0x0002 /* disable TLSv10 */
+#define MC_SSL_O_NO_TLSV11 0x0004 /* disable TLSv11 */
+#define MC_SSL_O_NO_TLSV12 0x0008 /* disable TLSv12 */
+#define MC_SSL_O_NO_TLSV13 0x0010 /* disable TLSv13 */
+
+/* file to guess during file loading */
+#define SSL_GF_NONE 0x00000000 /* Don't guess any file, only open the files specified in the configuration files */
+#define SSL_GF_BUNDLE 0x00000001 /* try to open the bundles */
+#define SSL_GF_SCTL 0x00000002 /* try to open the .sctl file */
+#define SSL_GF_OCSP 0x00000004 /* try to open the .ocsp file */
+#define SSL_GF_OCSP_ISSUER 0x00000008 /* try to open the .issuer file if an OCSP file was loaded */
+#define SSL_GF_KEY 0x00000010 /* try to open the .key file to load a private key */
+
+#define SSL_GF_ALL (SSL_GF_BUNDLE|SSL_GF_SCTL|SSL_GF_OCSP|SSL_GF_OCSP_ISSUER|SSL_GF_KEY)
+
+/* ssl_methods versions */
+enum {
+ CONF_TLSV_NONE = 0,
+ CONF_TLSV_MIN = 1,
+ CONF_SSLV3 = 1,
+ CONF_TLSV10 = 2,
+ CONF_TLSV11 = 3,
+ CONF_TLSV12 = 4,
+ CONF_TLSV13 = 5,
+ CONF_TLSV_MAX = 5,
+};
+
+/* server and bind verify method, it uses a global value as default */
+enum {
+ SSL_SOCK_VERIFY_DEFAULT = 0,
+ SSL_SOCK_VERIFY_REQUIRED = 1,
+ SSL_SOCK_VERIFY_OPTIONAL = 2,
+ SSL_SOCK_VERIFY_NONE = 3,
+};
+
+/* bind ocsp update mode */
+enum {
+ SSL_SOCK_OCSP_UPDATE_DFLT = 0,
+ SSL_SOCK_OCSP_UPDATE_OFF = 1,
+ SSL_SOCK_OCSP_UPDATE_ON = 2,
+};
+
+/* states of the CLI IO handler for 'set ssl cert' */
+enum {
+ SETCERT_ST_INIT = 0,
+ SETCERT_ST_GEN,
+ SETCERT_ST_INSERT,
+ SETCERT_ST_FIN,
+};
+
+#if (HA_OPENSSL_VERSION_NUMBER < 0x1010000fL)
+typedef enum { SET_CLIENT, SET_SERVER } set_context_func;
+#else /* openssl >= 1.1.0 */
+typedef enum { SET_MIN, SET_MAX } set_context_func;
+#endif
+
+struct methodVersions {
+ int option;
+ uint16_t flag;
+ void (*ctx_set_version)(SSL_CTX *, set_context_func);
+ void (*ssl_set_version)(SSL *, set_context_func);
+ const char *name;
+};
+
+struct pkey_info {
+ uint8_t sig; /* TLSEXT_signature_[rsa,ecdsa,...] */
+ uint16_t bits; /* key size in bits */
+};
+
+struct sni_ctx {
+ SSL_CTX *ctx; /* context associated to the certificate */
+ int order; /* load order for the certificate */
+ unsigned int neg:1; /* reject if match */
+ unsigned int wild:1; /* wildcard sni */
+ struct pkey_info kinfo; /* pkey info */
+ struct ssl_bind_conf *conf; /* ptr to a crtlist's ssl_conf, must not be free from here */
+ struct list by_ckch_inst; /* chained in ckch_inst's list of sni_ctx */
+ struct ckch_inst *ckch_inst; /* instance used to create this sni_ctx */
+ struct ebmb_node name; /* node holding the servername value */
+};
+
+struct tls_sess_key_128 {
+ unsigned char name[16];
+ unsigned char aes_key[16];
+ unsigned char hmac_key[16];
+} __attribute__((packed));
+
+struct tls_sess_key_256 {
+ unsigned char name[16];
+ unsigned char aes_key[32];
+ unsigned char hmac_key[32];
+} __attribute__((packed));
+
+union tls_sess_key{
+ unsigned char name[16];
+ struct tls_sess_key_128 key_128;
+ struct tls_sess_key_256 key_256;
+} __attribute__((packed));
+
+struct tls_keys_ref {
+ struct list list; /* Used to chain refs. */
+ char *filename;
+ int unique_id; /* Each pattern reference have unique id. */
+ int refcount; /* number of users of this tls_keys_ref. */
+ union tls_sess_key *tlskeys;
+ int tls_ticket_enc_index;
+ int key_size_bits;
+ __decl_thread(HA_RWLOCK_T lock); /* lock used to protect the ref */
+};
+
+/* shared ssl session */
+struct sh_ssl_sess_hdr {
+ struct ebmb_node key;
+ unsigned char key_data[SSL_MAX_SSL_SESSION_ID_LENGTH];
+};
+
+/* issuer chain store with hash of Subject Key Identifier
+ certificate/issuer matching is verify with X509_check_issued
+*/
+struct issuer_chain {
+ struct eb64_node node;
+ STACK_OF(X509) *chain;
+ char *path;
+};
+
+struct connection;
+
+typedef void (*ssl_sock_msg_callback_func)(struct connection *conn,
+ int write_p, int version, int content_type,
+ const void *buf, size_t len, SSL *ssl);
+
+/* This structure contains a function pointer <func> that is called
+ * when observing received or sent SSL/TLS protocol messages, such as
+ * handshake messages or other events that can occur during processing.
+ */
+struct ssl_sock_msg_callback {
+ ssl_sock_msg_callback_func func;
+ struct list list; /* list of registered callbacks */
+};
+
+/* This memory pool is used for capturing clienthello parameters. */
+struct ssl_capture {
+ ullong xxh64;
+ ushort protocol_version;
+ ushort ciphersuite_len;
+ ushort extensions_len;
+ ushort ec_len;
+ uint ciphersuite_offset;
+ uint extensions_offset;
+ uint ec_offset;
+ uint ec_formats_offset;
+ uchar ec_formats_len;
+ char data[VAR_ARRAY];
+};
+
+#ifdef HAVE_SSL_KEYLOG
+#define SSL_KEYLOG_MAX_SECRET_SIZE 129
+
+struct ssl_keylog {
+ /*
+ * https://developer.mozilla.org/en-US/docs/Mozilla/Projects/NSS/Key_Log_Format
+ */
+ char *client_random;
+
+ /* TLS 1.3 */
+ char *client_early_traffic_secret;
+ char *client_handshake_traffic_secret;
+ char *server_handshake_traffic_secret;
+ char *client_traffic_secret_0;
+ char *server_traffic_secret_0;
+ char *exporter_secret;
+ char *early_exporter_secret;
+};
+#endif
+
+struct ssl_sock_ctx {
+ struct connection *conn;
+ SSL *ssl;
+ BIO *bio;
+ const struct xprt_ops *xprt;
+ void *xprt_ctx;
+ struct wait_event wait_event;
+ struct wait_event *subs;
+ int xprt_st; /* transport layer state, initialized to zero */
+ unsigned long error_code; /* last error code of the error stack */
+ struct buffer early_buf; /* buffer to store the early data received */
+ int sent_early_data; /* Amount of early data we sent so far */
+
+#ifdef USE_QUIC
+ struct quic_conn *qc;
+#endif
+};
+
+struct global_ssl {
+ char *crt_base; /* base directory path for certificates */
+ char *ca_base; /* base directory path for CAs and CRLs */
+ char *issuers_chain_path; /* from "issuers-chain-path" */
+ int skip_self_issued_ca;
+
+ int async; /* whether we use ssl async mode */
+
+ char *listen_default_ciphers;
+ char *connect_default_ciphers;
+#ifdef HAVE_SSL_CTX_SET_CIPHERSUITES
+ char *listen_default_ciphersuites;
+ char *connect_default_ciphersuites;
+#endif
+#if defined(SSL_CTX_set1_curves_list)
+ char *listen_default_curves;
+ char *connect_default_curves;
+#endif
+#if defined(SSL_CTX_set1_sigalgs_list)
+ char *listen_default_sigalgs;
+ char *connect_default_sigalgs;
+#endif
+#if defined(SSL_CTX_set1_sigalgs_list)
+ char *listen_default_client_sigalgs;
+ char *connect_default_client_sigalgs;
+#endif
+ int listen_default_ssloptions;
+ int connect_default_ssloptions;
+ struct tls_version_filter listen_default_sslmethods;
+ struct tls_version_filter connect_default_sslmethods;
+
+ int private_cache; /* Force to use a private session cache even if nbproc > 1 */
+ unsigned int life_time; /* SSL session lifetime in seconds */
+ unsigned int max_record; /* SSL max record size */
+ unsigned int hard_max_record; /* SSL max record size hard limit */
+ unsigned int default_dh_param; /* SSL maximum DH parameter size */
+ int ctx_cache; /* max number of entries in the ssl_ctx cache. */
+ int capture_buffer_size; /* Size of the capture buffer. */
+ int keylog; /* activate keylog */
+ int extra_files; /* which files not defined in the configuration file are we looking for */
+ int extra_files_noext; /* whether we remove the extension when looking up a extra file */
+
+#ifndef OPENSSL_NO_OCSP
+ struct {
+ unsigned int delay_max;
+ unsigned int delay_min;
+ } ocsp_update;
+#endif
+};
+
+/* The order here matters for picking a default context,
+ * keep the most common keytype at the bottom of the list
+ */
+extern const char *SSL_SOCK_KEYTYPE_NAMES[];
+
+#define SSL_SOCK_NUM_KEYTYPES 3
+
+#endif /* USE_OPENSSL */
+#endif /* _HAPROXY_SSL_SOCK_T_H */
diff --git a/include/haproxy/ssl_sock.h b/include/haproxy/ssl_sock.h
new file mode 100644
index 0000000..02d5b02
--- /dev/null
+++ b/include/haproxy/ssl_sock.h
@@ -0,0 +1,191 @@
+/*
+ * include/haproxy/ssl_sock.h
+ * This file contains definition for ssl stream socket operations
+ *
+ * Copyright (C) 2012 EXCELIANCE, Emeric Brun <ebrun@exceliance.fr>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_SSL_SOCK_H
+#define _HAPROXY_SSL_SOCK_H
+#ifdef USE_OPENSSL
+
+
+#include <haproxy/connection.h>
+#include <haproxy/openssl-compat.h>
+#include <haproxy/pool-t.h>
+#include <haproxy/proxy-t.h>
+#include <haproxy/ssl_sock-t.h>
+#include <haproxy/thread.h>
+
+extern struct list tlskeys_reference;
+extern struct eb_root ckchs_tree;
+extern struct eb_root crtlists_tree;
+extern struct eb_root cafile_tree;
+extern int sctl_ex_index;
+extern struct global_ssl global_ssl;
+extern struct ssl_crtlist_kw ssl_crtlist_kws[];
+extern struct methodVersions methodVersions[];
+__decl_thread(extern HA_SPINLOCK_T ckch_lock);
+extern struct pool_head *pool_head_ssl_capture;
+extern int ssl_app_data_index;
+#ifdef USE_QUIC
+extern int ssl_qc_app_data_index;
+#endif /* USE_QUIC */
+extern unsigned int openssl_engines_initialized;
+extern int nb_engines;
+extern struct xprt_ops ssl_sock;
+extern int ssl_capture_ptr_index;
+extern int ssl_keylog_index;
+extern int ssl_client_sni_index;
+extern struct pool_head *pool_head_ssl_keylog;
+extern struct pool_head *pool_head_ssl_keylog_str;
+extern struct list openssl_providers;
+
+int ssl_sock_prep_ctx_and_inst(struct bind_conf *bind_conf, struct ssl_bind_conf *ssl_conf,
+ SSL_CTX *ctx, struct ckch_inst *ckch_inst, char **err);
+int ssl_sock_prep_srv_ctx_and_inst(const struct server *srv, SSL_CTX *ctx,
+ struct ckch_inst *ckch_inst);
+int ssl_sock_prepare_all_ctx(struct bind_conf *bind_conf);
+int ssl_sock_prepare_bind_conf(struct bind_conf *bind_conf);
+void ssl_sock_destroy_bind_conf(struct bind_conf *bind_conf);
+int ssl_sock_prepare_srv_ctx(struct server *srv);
+void ssl_sock_free_srv_ctx(struct server *srv);
+void ssl_sock_free_all_ctx(struct bind_conf *bind_conf);
+int ssl_sock_get_alpn(const struct connection *conn, void *xprt_ctx,
+ const char **str, int *len);
+int ssl_sock_load_ca(struct bind_conf *bind_conf);
+void ssl_sock_free_ca(struct bind_conf *bind_conf);
+int ssl_bio_and_sess_init(struct connection *conn, SSL_CTX *ssl_ctx,
+ SSL **ssl, BIO **bio, BIO_METHOD *bio_meth, void *ctx);
+const char *ssl_sock_get_sni(struct connection *conn);
+const char *ssl_sock_get_cert_sig(struct connection *conn);
+const char *ssl_sock_get_cipher_name(struct connection *conn);
+const char *ssl_sock_get_proto_version(struct connection *conn);
+int ssl_sock_parse_alpn(char *arg, char **alpn_str, int *alpn_len, char **err);
+void ssl_sock_set_alpn(struct connection *conn, const unsigned char *, int);
+void ssl_sock_set_servername(struct connection *conn, const char *hostname);
+
+int ssl_sock_get_cert_used_sess(struct connection *conn);
+int ssl_sock_get_cert_used_conn(struct connection *conn);
+int ssl_sock_get_remote_common_name(struct connection *conn,
+ struct buffer *out);
+int ssl_sock_get_pkey_algo(struct connection *conn, struct buffer *out);
+unsigned int ssl_sock_get_verify_result(struct connection *conn);
+#if (defined SSL_CTRL_SET_TLSEXT_TICKET_KEY_CB && TLS_TICKETS_NO > 0)
+int ssl_sock_update_tlskey_ref(struct tls_keys_ref *ref,
+ struct buffer *tlskey);
+int ssl_sock_update_tlskey(char *filename, struct buffer *tlskey, char **err);
+struct tls_keys_ref *tlskeys_ref_lookup(const char *filename);
+struct tls_keys_ref *tlskeys_ref_lookupid(int unique_id);
+#endif
+#ifndef OPENSSL_NO_DH
+HASSL_DH *ssl_sock_get_dh_from_bio(BIO *bio);
+int ssl_sock_load_global_dh_param_from_file(const char *filename);
+void ssl_free_dh(void);
+#endif
+void ssl_free_engines(void);
+#ifdef HAVE_SSL_PROVIDERS
+void ssl_unload_providers(void);
+#endif
+
+#ifdef HAVE_SSL_CLIENT_HELLO_CB
+int ssl_sock_switchctx_err_cbk(SSL *ssl, int *al, void *priv);
+# ifdef OPENSSL_IS_BORINGSSL
+int ssl_sock_switchctx_cbk(const struct ssl_early_callback_ctx *ctx);
+# else /* ! OPENSSL_IS_BORINGSSL */
+int ssl_sock_switchctx_cbk(SSL *ssl, int *al, void *arg);
+# endif
+#else /* ! HAVE_SSL_CLIENT_HELLO_CB */
+int ssl_sock_switchctx_cbk(SSL *ssl, int *al, void *priv);
+#endif
+
+int increment_sslconn();
+SSL_CTX *ssl_sock_assign_generated_cert(unsigned int key, struct bind_conf *bind_conf, SSL *ssl);
+SSL_CTX *ssl_sock_get_generated_cert(unsigned int key, struct bind_conf *bind_conf);
+int ssl_sock_set_generated_cert(SSL_CTX *ctx, unsigned int key, struct bind_conf *bind_conf);
+unsigned int ssl_sock_generated_cert_key(const void *data, size_t len);
+void ssl_sock_load_cert_sni(struct ckch_inst *ckch_inst, struct bind_conf *bind_conf);
+#ifdef SSL_MODE_ASYNC
+void ssl_async_fd_handler(int fd);
+void ssl_async_fd_free(int fd);
+#endif
+struct issuer_chain* ssl_get0_issuer_chain(X509 *cert);
+int ssl_load_global_issuer_from_BIO(BIO *in, char *fp, char **err);
+int ssl_sock_load_cert(char *path, struct bind_conf *bind_conf, char **err);
+int ssl_sock_load_srv_cert(char *path, struct server *server, int create_if_none, char **err);
+void ssl_free_global_issuers(void);
+int ssl_initialize_random(void);
+int ssl_sock_load_cert_list_file(char *file, int dir, struct bind_conf *bind_conf, struct proxy *curproxy, char **err);
+int ssl_init_single_engine(const char *engine_id, const char *def_algorithms);
+#ifdef HAVE_SSL_PROVIDERS
+int ssl_init_provider(const char *provider_name);
+#endif
+#if ((defined SSL_CTRL_SET_TLSEXT_STATUS_REQ_CB && !defined OPENSSL_NO_OCSP) && !defined OPENSSL_IS_BORINGSSL)
+int ssl_get_ocspresponse_detail(unsigned char *ocsp_certid, struct buffer *out);
+int ssl_ocsp_response_print(struct buffer *ocsp_response, struct buffer *out);
+#endif
+
+/* ssl shctx macro */
+
+#define sh_ssl_sess_tree_delete(s) ebmb_delete(&(s)->key);
+
+#define sh_ssl_sess_tree_insert(s) (struct sh_ssl_sess_hdr *)ebmb_insert(sh_ssl_sess_tree, \
+ &(s)->key, SSL_MAX_SSL_SESSION_ID_LENGTH);
+
+#define sh_ssl_sess_tree_lookup(k) (struct sh_ssl_sess_hdr *)ebmb_lookup(sh_ssl_sess_tree, \
+ (k), SSL_MAX_SSL_SESSION_ID_LENGTH);
+
+/* Registers the function <func> in order to be called on SSL/TLS protocol
+ * message processing.
+ */
+int ssl_sock_register_msg_callback(ssl_sock_msg_callback_func func);
+
+SSL *ssl_sock_get_ssl_object(struct connection *conn);
+
+static inline int cert_ignerr_bitfield_get(const unsigned long long *bitfield, int bit_index)
+{
+ int byte_index = bit_index >> 6;
+ int val = 0;
+
+ if (byte_index < IGNERR_BF_SIZE)
+ val = bitfield[byte_index] & (1ULL << (bit_index & 0x3F));
+
+ return val != 0;
+}
+
+static inline void cert_ignerr_bitfield_set(unsigned long long *bitfield, int bit_index)
+{
+ int byte_index = bit_index >> 6;
+
+ if (byte_index < IGNERR_BF_SIZE)
+ bitfield[byte_index] |= (1ULL << (bit_index & 0x3F));
+}
+
+static inline void cert_ignerr_bitfield_set_all(unsigned long long *bitfield)
+{
+ memset(bitfield, -1, IGNERR_BF_SIZE*sizeof(*bitfield));
+}
+
+#endif /* USE_OPENSSL */
+#endif /* _HAPROXY_SSL_SOCK_H */
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/include/haproxy/ssl_utils.h b/include/haproxy/ssl_utils.h
new file mode 100644
index 0000000..3391efd
--- /dev/null
+++ b/include/haproxy/ssl_utils.h
@@ -0,0 +1,51 @@
+/*
+ * include/haproxy/ssl_utils.h
+ *
+ * Utility functions for SSL:
+ * Mostly generic functions that retrieve information from certificates
+ *
+ * Copyright (C) 2012 EXCELIANCE, Emeric Brun <ebrun@exceliance.fr>
+ * Copyright (C) 2020 HAProxy Technologies, William Lallemand <wlallemand@haproxy.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_SSL_UTILS_H
+#define _HAPROXY_SSL_UTILS_H
+
+#ifdef USE_OPENSSL
+
+#include <haproxy/buf-t.h>
+#include <haproxy/openssl-compat.h>
+
+int cert_get_pkey_algo(X509 *crt, struct buffer *out);
+int ssl_sock_get_serial(X509 *crt, struct buffer *out);
+int ssl_sock_crt2der(X509 *crt, struct buffer *out);
+int ssl_sock_get_time(ASN1_TIME *tm, struct buffer *out);
+int ssl_sock_get_dn_entry(X509_NAME *a, const struct buffer *entry, int pos,
+ struct buffer *out);
+int ssl_sock_get_dn_formatted(X509_NAME *a, const struct buffer *format, struct buffer *out);
+int ssl_sock_get_dn_oneline(X509_NAME *a, struct buffer *out);
+X509* ssl_sock_get_peer_certificate(SSL *ssl);
+X509* ssl_sock_get_verified_chain_root(SSL *ssl);
+unsigned int openssl_version_parser(const char *version);
+void exclude_tls_grease(char *input, int len, struct buffer *output);
+int x509_v_err_str_to_int(const char *str);
+const char *x509_v_err_int_to_str(int code);
+long asn1_generalizedtime_to_epoch(ASN1_GENERALIZEDTIME *d);
+
+#endif /* _HAPROXY_SSL_UTILS_H */
+#endif /* USE_OPENSSL */
+
diff --git a/include/haproxy/stats-t.h b/include/haproxy/stats-t.h
new file mode 100644
index 0000000..34a4cc2
--- /dev/null
+++ b/include/haproxy/stats-t.h
@@ -0,0 +1,617 @@
+/*
+ * include/haproxy/stats-t.h
+ * This file provides structures and types for stats.
+ *
+ * Copyright (C) 2000-2020 Willy Tarreau - w@1wt.eu
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_STATS_T_H
+#define _HAPROXY_STATS_T_H
+
+#include <haproxy/api-t.h>
+
+/* Flags for applet.ctx.stats.flags */
+#define STAT_FMT_HTML 0x00000001 /* dump the stats in HTML format */
+#define STAT_FMT_TYPED 0x00000002 /* use the typed output format */
+#define STAT_FMT_JSON 0x00000004 /* dump the stats in JSON format */
+#define STAT_HIDE_DOWN 0x00000008 /* hide 'down' servers in the stats page */
+#define STAT_NO_REFRESH 0x00000010 /* do not automatically refresh the stats page */
+#define STAT_ADMIN 0x00000020 /* indicate a stats admin level */
+#define STAT_CHUNKED 0x00000040 /* use chunked encoding (HTTP/1.1) */
+#define STAT_JSON_SCHM 0x00000080 /* dump the json schema */
+
+#define STAT_HIDEVER 0x00000100 /* conf: do not report the version and reldate */
+#define STAT_SHNODE 0x00000200 /* conf: show node name */
+#define STAT_SHDESC 0x00000400 /* conf: show description */
+#define STAT_SHLGNDS 0x00000800 /* conf: show legends */
+#define STAT_SHOW_FDESC 0x00001000 /* show the field descriptions when possible */
+#define STAT_SHMODULES 0x00002000 /* conf: show modules */
+#define STAT_HIDE_MAINT 0x00004000 /* hide maint/disabled servers */
+#define STAT_CONVDONE 0x00008000 /* conf: rules conversion done */
+#define STAT_USE_FLOAT 0x00010000 /* use floats where possible in the outputs */
+
+#define STAT_BOUND 0x00800000 /* bound statistics to selected proxies/types/services */
+#define STAT_STARTED 0x01000000 /* some output has occurred */
+
+#define STAT_FMT_MASK 0x00000007
+
+#define STATS_TYPE_FE 0
+#define STATS_TYPE_BE 1
+#define STATS_TYPE_SV 2
+#define STATS_TYPE_SO 3
+
+#define STATS_DOMAIN (0) /* used for bitshifting, type of statistics: proxy or dns */
+#define STATS_PX_CAP (8) /* used for bitshifting, differentiate obj1 type for proxy statistics */
+
+/* HTTP stats : applet.st0 */
+enum {
+ STAT_HTTP_INIT = 0, /* Initial state */
+ STAT_HTTP_HEAD, /* send headers before dump */
+ STAT_HTTP_DUMP, /* dumping stats */
+ STAT_HTTP_POST, /* waiting post data */
+ STAT_HTTP_LAST, /* sending last chunk of response */
+ STAT_HTTP_DONE, /* dump is finished */
+ STAT_HTTP_END, /* finished */
+};
+
+/* status codes available for the stats admin page */
+enum {
+ STAT_STATUS_INIT = 0,
+ STAT_STATUS_DENY, /* action denied */
+ STAT_STATUS_DONE, /* the action is successful */
+ STAT_STATUS_ERRP, /* an error occurred due to invalid values in parameters */
+ STAT_STATUS_EXCD, /* an error occurred because the buffer couldn't store all data */
+ STAT_STATUS_NONE, /* nothing happened (no action chosen or servers state didn't change) */
+ STAT_STATUS_PART, /* the action is partially successful */
+ STAT_STATUS_UNKN, /* an unknown error occurred, shouldn't happen */
+ STAT_STATUS_IVAL, /* invalid requests (chunked or invalid post) */
+ STAT_STATUS_SIZE
+};
+
+/* HTML form to limit output scope */
+#define STAT_SCOPE_TXT_MAXLEN 20 /* max len for scope substring */
+#define STAT_SCOPE_INPUT_NAME "scope" /* pattern form scope name <input> in html form */
+#define STAT_SCOPE_PATTERN "?" STAT_SCOPE_INPUT_NAME "="
+
+/* Actions available for the stats admin forms */
+enum {
+ ST_ADM_ACTION_NONE = 0,
+
+ /* enable/disable health checks */
+ ST_ADM_ACTION_DHLTH,
+ ST_ADM_ACTION_EHLTH,
+
+ /* force health check status */
+ ST_ADM_ACTION_HRUNN,
+ ST_ADM_ACTION_HNOLB,
+ ST_ADM_ACTION_HDOWN,
+
+ /* enable/disable agent checks */
+ ST_ADM_ACTION_DAGENT,
+ ST_ADM_ACTION_EAGENT,
+
+ /* force agent check status */
+ ST_ADM_ACTION_ARUNN,
+ ST_ADM_ACTION_ADOWN,
+
+ /* set admin state */
+ ST_ADM_ACTION_READY,
+ ST_ADM_ACTION_DRAIN,
+ ST_ADM_ACTION_MAINT,
+ ST_ADM_ACTION_SHUTDOWN,
+ /* these are the ancient actions, still available for compatibility */
+ ST_ADM_ACTION_DISABLE,
+ ST_ADM_ACTION_ENABLE,
+ ST_ADM_ACTION_STOP,
+ ST_ADM_ACTION_START,
+};
+
+
+/* data transmission states for the stats responses */
+enum stat_state {
+ STAT_STATE_INIT = 0,
+ STAT_STATE_HEAD,
+ STAT_STATE_INFO,
+ STAT_STATE_LIST,
+ STAT_STATE_END,
+ STAT_STATE_FIN,
+};
+
+/* kept in 2.6 only for compatibility with legacy code. Will be removed in 2.7,
+ * please do not use these values anymore and defined your own!
+ */
+enum obsolete_stat_state {
+ STAT_ST_INIT ENUM_ATTRIBUTE((deprecated)) = 0,
+ STAT_ST_HEAD ENUM_ATTRIBUTE((deprecated)),
+ STAT_ST_INFO ENUM_ATTRIBUTE((deprecated)),
+ STAT_ST_LIST ENUM_ATTRIBUTE((deprecated)),
+ STAT_ST_END ENUM_ATTRIBUTE((deprecated)),
+ STAT_ST_FIN ENUM_ATTRIBUTE((deprecated)),
+};
+
+/* data transmission states for the stats responses inside a proxy */
+enum {
+ STAT_PX_ST_INIT = 0,
+ STAT_PX_ST_TH,
+ STAT_PX_ST_FE,
+ STAT_PX_ST_LI,
+ STAT_PX_ST_SV,
+ STAT_PX_ST_BE,
+ STAT_PX_ST_END,
+ STAT_PX_ST_FIN,
+};
+
+/* This level of detail is needed to let the stats consumer know how to
+ * aggregate them (eg: between processes or cluster nodes). Only a few
+ * combinations are actually in use, though the mechanism tends to make
+ * this easy to extend to future uses.
+ *
+ * Each reported stats element is typed based on 4 dimensions :
+ * - the field format : it indicates the validity range of the reported value,
+ * its limits and how to parse it. 6 types are currently supported :
+ * empty, signed 32-bit integer, unsigned 32-bit integer, signed 64-bit
+ * integer, unsigned 64-bit integer, string
+ *
+ * - the field origin : how was the value retrieved and what it depends on.
+ * 5 origins are currently defined : product (eg: haproxy version or
+ * release date), configuration (eg: a configured limit), key (identifier
+ * used to group values at a certain level), metric (a measure of something),
+ * status (something discrete which by definition cannot be averaged nor
+ * aggregated, such as "listening" versus "full").
+ *
+ * - the field nature : what does the data represent, implying how to aggregate
+ * it. At least 9 different natures are expected : counter (an increasing
+ * positive counter that may wrap when its type is overflown such as a byte
+ * counter), gauge (a measure at any instant that may vary, such as a
+ * concurrent connection count), a limit (eg: maximum acceptable concurrent
+ * connections), a minimum (eg: minimum free memory over a period), a
+ * maximum (eg: highest queue length over a period), an event rate (eg:
+ * incoming connections per second), a duration that is often aggregated by
+ * taking the max (eg: service uptime), an age that generally reports the
+ * last time an event appeared and which generally is aggregated by taking
+ * the most recent event hence the smallest one, the time which reports a
+ * discrete instant and cannot obviously be averaged either, a name which
+ * will generally be the name of an entity (such as a server name or cookie
+ * name), an output which is mostly used for various unsafe strings that are
+ * retrieved (eg: last check output, product name, description, etc), and an
+ * average which indicates that the value is relative and meant to be averaged
+ * between all nodes (eg: response time, throttling, etc).
+ *
+ * - the field scope : if the value is shared with other elements, which ones
+ * are expected to report the same value. The first scope with the least
+ * share is the process (most common one) where all data are only relevant
+ * to the process being consulted. The next one is the service, which is
+ * valid for all processes launched together (eg: shared SSL cache usage
+ * among processes). The next one is the system (such as the OS version)
+ * and which will report the same information for all instances running on
+ * the same node. The next one is the cluster, which indicates that the
+ * information are shared with other nodes being part of a same cluster.
+ * Stick-tables may carry such cluster-wide information. Larger scopes may
+ * be added in the future such as datacenter, country, continent, planet,
+ * galaxy, universe, etc.
+ *
+ * All these information will be encoded in the field as a bit field so that
+ * it is easy to pass composite values by simply ORing elements above, and
+ * to ease the definition of a few field types for the most common field
+ * combinations.
+ *
+ * The enums try to be arranged so that most likely characteristics are
+ * assigned the value zero, making it easier to add new fields.
+ *
+ * Field format has precedence over the other parts of the type. Please avoid
+ * declaring extra formats unless absolutely needed. The first one, FF_EMPTY,
+ * must absolutely have value zero so that it is what is returned after a
+ * memset(0). Furthermore, the producer is responsible for ensuring that when
+ * this format is set, all other bits of the type as well as the values in the
+ * union only contain zeroes. This makes it easier for the consumer to use the
+ * values as the expected type.
+ */
+
+enum field_format {
+ FF_EMPTY = 0x00000000,
+ FF_S32 = 0x00000001,
+ FF_U32 = 0x00000002,
+ FF_S64 = 0x00000003,
+ FF_U64 = 0x00000004,
+ FF_STR = 0x00000005,
+ FF_FLT = 0x00000006,
+ FF_MASK = 0x000000FF,
+};
+
+enum field_origin {
+ FO_METRIC = 0x00000000,
+ FO_STATUS = 0x00000100,
+ FO_KEY = 0x00000200,
+ FO_CONFIG = 0x00000300,
+ FO_PRODUCT = 0x00000400,
+ FO_MASK = 0x0000FF00,
+};
+
+enum field_nature {
+ FN_GAUGE = 0x00000000,
+ FN_LIMIT = 0x00010000,
+ FN_MIN = 0x00020000,
+ FN_MAX = 0x00030000,
+ FN_RATE = 0x00040000,
+ FN_COUNTER = 0x00050000,
+ FN_DURATION = 0x00060000,
+ FN_AGE = 0x00070000,
+ FN_TIME = 0x00080000,
+ FN_NAME = 0x00090000,
+ FN_OUTPUT = 0x000A0000,
+ FN_AVG = 0x000B0000,
+ FN_MASK = 0x00FF0000,
+};
+
+enum field_scope {
+ FS_PROCESS = 0x00000000,
+ FS_SERVICE = 0x01000000,
+ FS_SYSTEM = 0x02000000,
+ FS_CLUSTER = 0x03000000,
+ FS_MASK = 0xFF000000,
+};
+
+/* Show info fields for CLI output. For any field added here, please add the
+ * text representation in the info_fields array. Please only append at the end,
+ * before the INF_TOTAL_FIELDS entry, and never insert anything in the middle
+ * nor at the beginning.
+ */
+enum info_field {
+ INF_NAME,
+ INF_VERSION,
+ INF_RELEASE_DATE,
+ INF_NBTHREAD,
+ INF_NBPROC,
+ INF_PROCESS_NUM,
+ INF_PID,
+ INF_UPTIME,
+ INF_UPTIME_SEC,
+ INF_MEMMAX_MB,
+ INF_POOL_ALLOC_MB,
+ INF_POOL_USED_MB,
+ INF_POOL_FAILED,
+ INF_ULIMIT_N,
+ INF_MAXSOCK,
+ INF_MAXCONN,
+ INF_HARD_MAXCONN,
+ INF_CURR_CONN,
+ INF_CUM_CONN,
+ INF_CUM_REQ,
+ INF_MAX_SSL_CONNS,
+ INF_CURR_SSL_CONNS,
+ INF_CUM_SSL_CONNS,
+ INF_MAXPIPES,
+ INF_PIPES_USED,
+ INF_PIPES_FREE,
+ INF_CONN_RATE,
+ INF_CONN_RATE_LIMIT,
+ INF_MAX_CONN_RATE,
+ INF_SESS_RATE,
+ INF_SESS_RATE_LIMIT,
+ INF_MAX_SESS_RATE,
+ INF_SSL_RATE,
+ INF_SSL_RATE_LIMIT,
+ INF_MAX_SSL_RATE,
+ INF_SSL_FRONTEND_KEY_RATE,
+ INF_SSL_FRONTEND_MAX_KEY_RATE,
+ INF_SSL_FRONTEND_SESSION_REUSE_PCT,
+ INF_SSL_BACKEND_KEY_RATE,
+ INF_SSL_BACKEND_MAX_KEY_RATE,
+ INF_SSL_CACHE_LOOKUPS,
+ INF_SSL_CACHE_MISSES,
+ INF_COMPRESS_BPS_IN,
+ INF_COMPRESS_BPS_OUT,
+ INF_COMPRESS_BPS_RATE_LIM,
+ INF_ZLIB_MEM_USAGE,
+ INF_MAX_ZLIB_MEM_USAGE,
+ INF_TASKS,
+ INF_RUN_QUEUE,
+ INF_IDLE_PCT,
+ INF_NODE,
+ INF_DESCRIPTION,
+ INF_STOPPING,
+ INF_JOBS,
+ INF_UNSTOPPABLE_JOBS,
+ INF_LISTENERS,
+ INF_ACTIVE_PEERS,
+ INF_CONNECTED_PEERS,
+ INF_DROPPED_LOGS,
+ INF_BUSY_POLLING,
+ INF_FAILED_RESOLUTIONS,
+ INF_TOTAL_BYTES_OUT,
+ INF_TOTAL_SPLICED_BYTES_OUT,
+ INF_BYTES_OUT_RATE,
+ INF_DEBUG_COMMANDS_ISSUED,
+ INF_CUM_LOG_MSGS,
+ INF_BUILD_INFO,
+ INF_MEMMAX_BYTES,
+ INF_POOL_ALLOC_BYTES,
+ INF_POOL_USED_BYTES,
+ INF_START_TIME_SEC,
+ INF_TAINTED,
+ INF_WARNINGS,
+ INF_MAXCONN_REACHED,
+ INF_BOOTTIME_MS,
+ INF_NICED_TASKS,
+
+ /* must always be the last one */
+ INF_TOTAL_FIELDS
+};
+
+
+/* Stats fields for CSV output. For any field added here, please add the text
+ * representation in the stat_fields array. Please only append at the end,
+ * before the ST_F_TOTAL_FIELDS entry, and never insert anything in the middle
+ * nor at the beginning.When adding an entry here, one must always add a
+ * corresponding one in stat_fields[] otherwise Lua's get_stats() will break,
+ * and "show stats" will show a null.
+ */
+enum stat_field {
+ ST_F_PXNAME,
+ ST_F_SVNAME,
+ ST_F_QCUR,
+ ST_F_QMAX,
+ ST_F_SCUR,
+ ST_F_SMAX,
+ ST_F_SLIM,
+ ST_F_STOT,
+ ST_F_BIN ,
+ ST_F_BOUT,
+ ST_F_DREQ,
+ ST_F_DRESP,
+ ST_F_EREQ,
+ ST_F_ECON,
+ ST_F_ERESP,
+ ST_F_WRETR,
+ ST_F_WREDIS,
+ ST_F_STATUS,
+ ST_F_WEIGHT,
+ ST_F_ACT,
+ ST_F_BCK,
+ ST_F_CHKFAIL,
+ ST_F_CHKDOWN,
+ ST_F_LASTCHG,
+ ST_F_DOWNTIME,
+ ST_F_QLIMIT,
+ ST_F_PID,
+ ST_F_IID,
+ ST_F_SID,
+ ST_F_THROTTLE,
+ ST_F_LBTOT,
+ ST_F_TRACKED,
+ ST_F_TYPE,
+ ST_F_RATE,
+ ST_F_RATE_LIM,
+ ST_F_RATE_MAX,
+ ST_F_CHECK_STATUS,
+ ST_F_CHECK_CODE,
+ ST_F_CHECK_DURATION,
+ ST_F_HRSP_1XX,
+ ST_F_HRSP_2XX,
+ ST_F_HRSP_3XX,
+ ST_F_HRSP_4XX,
+ ST_F_HRSP_5XX,
+ ST_F_HRSP_OTHER,
+ ST_F_HANAFAIL,
+ ST_F_REQ_RATE,
+ ST_F_REQ_RATE_MAX,
+ ST_F_REQ_TOT,
+ ST_F_CLI_ABRT,
+ ST_F_SRV_ABRT,
+ ST_F_COMP_IN,
+ ST_F_COMP_OUT,
+ ST_F_COMP_BYP,
+ ST_F_COMP_RSP,
+ ST_F_LASTSESS,
+ ST_F_LAST_CHK,
+ ST_F_LAST_AGT,
+ ST_F_QTIME,
+ ST_F_CTIME,
+ ST_F_RTIME,
+ ST_F_TTIME,
+ ST_F_AGENT_STATUS,
+ ST_F_AGENT_CODE,
+ ST_F_AGENT_DURATION,
+ ST_F_CHECK_DESC,
+ ST_F_AGENT_DESC,
+ ST_F_CHECK_RISE,
+ ST_F_CHECK_FALL,
+ ST_F_CHECK_HEALTH,
+ ST_F_AGENT_RISE,
+ ST_F_AGENT_FALL,
+ ST_F_AGENT_HEALTH,
+ ST_F_ADDR,
+ ST_F_COOKIE,
+ ST_F_MODE,
+ ST_F_ALGO,
+ ST_F_CONN_RATE,
+ ST_F_CONN_RATE_MAX,
+ ST_F_CONN_TOT,
+ ST_F_INTERCEPTED,
+ ST_F_DCON,
+ ST_F_DSES,
+ ST_F_WREW,
+ ST_F_CONNECT,
+ ST_F_REUSE,
+ ST_F_CACHE_LOOKUPS,
+ ST_F_CACHE_HITS,
+ ST_F_SRV_ICUR,
+ ST_F_SRV_ILIM,
+ ST_F_QT_MAX,
+ ST_F_CT_MAX,
+ ST_F_RT_MAX,
+ ST_F_TT_MAX,
+ ST_F_EINT,
+ ST_F_IDLE_CONN_CUR,
+ ST_F_SAFE_CONN_CUR,
+ ST_F_USED_CONN_CUR,
+ ST_F_NEED_CONN_EST,
+ ST_F_UWEIGHT,
+ ST_F_AGG_SRV_STATUS,
+ ST_F_AGG_SRV_CHECK_STATUS,
+ ST_F_AGG_CHECK_STATUS,
+ ST_F_SRID,
+ ST_F_SESS_OTHER,
+ ST_F_H1SESS,
+ ST_F_H2SESS,
+ ST_F_H3SESS,
+ ST_F_REQ_OTHER,
+ ST_F_H1REQ,
+ ST_F_H2REQ,
+ ST_F_H3REQ,
+ ST_F_PROTO,
+
+ /* must always be the last one */
+ ST_F_TOTAL_FIELDS
+};
+
+/* Please consider updating stats_dump_fields_*(),
+ * stats_dump_.*_info_fields() and stats_*_schema()
+ * when modifying struct field or related enums.
+ */
+struct field {
+ uint32_t type;
+ union {
+ int32_t s32; /* FF_S32 */
+ uint32_t u32; /* FF_U32 */
+ int64_t s64; /* FF_S64 */
+ uint64_t u64; /* FF_U64 */
+ double flt; /* FF_FLT */
+ const char *str; /* FF_STR */
+ } u;
+};
+
+enum counters_type {
+ COUNTERS_FE = 0,
+ COUNTERS_BE,
+ COUNTERS_SV,
+ COUNTERS_LI,
+ COUNTERS_RSLV,
+
+ COUNTERS_OFF_END
+};
+
+/* Entity used to generate statistics on an HAProxy component */
+struct stats_module {
+ struct list list;
+ const char *name;
+
+ /* functor used to generate the stats module using counters provided through data parameter */
+ void (*fill_stats)(void *data, struct field *);
+
+ struct name_desc *stats; /* name/description of stats provided by the module */
+ void *counters; /* initial values of allocated counters */
+ size_t counters_off[COUNTERS_OFF_END]; /* list of offsets of allocated counters in various objects */
+ size_t stats_count; /* count of stats provided */
+ size_t counters_size; /* sizeof counters */
+
+ uint32_t domain_flags; /* stats application domain for this module */
+ char clearable; /* reset on a clear counters */
+};
+
+struct extra_counters {
+ char *data; /* heap containing counters allocated in a linear fashion */
+ size_t size; /* size of allocated data */
+ enum counters_type type; /* type of object containing the counters */
+};
+
+/* stats_domain is used in a flag as a 1 byte field */
+enum stats_domain {
+ STATS_DOMAIN_PROXY = 0,
+ STATS_DOMAIN_RESOLVERS,
+ STATS_DOMAIN_COUNT,
+
+ STATS_DOMAIN_MASK = 0xff
+};
+
+/* used in a flag as a 1 byte field */
+enum stats_domain_px_cap {
+ STATS_PX_CAP_FE = 0x01,
+ STATS_PX_CAP_BE = 0x02,
+ STATS_PX_CAP_SRV = 0x04,
+ STATS_PX_CAP_LI = 0x08,
+
+ STATS_PX_CAP_MASK = 0xff
+};
+
+/* the context of a "show stat" command in progress on the CLI or the stats applet */
+struct show_stat_ctx {
+ struct proxy *http_px; /* parent proxy of the current applet (only relevant for HTTP applet) */
+ void *obj1; /* context pointer used in stats dump */
+ void *obj2; /* context pointer used in stats dump */
+ uint32_t domain; /* set the stats to used, for now only proxy stats are supported */
+ int scope_str; /* limit scope to a frontend/backend substring */
+ int scope_len; /* length of the string above in the buffer */
+ int field; /* current field iterator when stat line is dumped through returning function */
+ int px_st; /* STAT_PX_ST* */
+ unsigned int flags; /* STAT_* from stats-t.h */
+ int iid, type, sid; /* proxy id, type and service id if bounding of stats is enabled */
+ int st_code; /* the status code returned by an action */
+ enum stat_state state; /* phase of output production */
+};
+
+extern THREAD_LOCAL void *trash_counters;
+
+#define EXTRA_COUNTERS(name) \
+ struct extra_counters *name
+
+#define EXTRA_COUNTERS_GET(counters, mod) \
+ (likely(counters) ? \
+ ((void *)((counters)->data + (mod)->counters_off[(counters)->type])) : \
+ (trash_counters))
+
+#define EXTRA_COUNTERS_REGISTER(counters, ctype, alloc_failed_label) \
+ do { \
+ typeof(*counters) _ctr; \
+ _ctr = calloc(1, sizeof(*_ctr)); \
+ if (!_ctr) \
+ goto alloc_failed_label; \
+ _ctr->type = (ctype); \
+ *(counters) = _ctr; \
+ } while (0)
+
+#define EXTRA_COUNTERS_ADD(mod, counters, new_counters, csize) \
+ do { \
+ typeof(counters) _ctr = (counters); \
+ (mod)->counters_off[_ctr->type] = _ctr->size; \
+ _ctr->size += (csize); \
+ } while (0)
+
+#define EXTRA_COUNTERS_ALLOC(counters, alloc_failed_label) \
+ do { \
+ typeof(counters) _ctr = (counters); \
+ _ctr->data = malloc((_ctr)->size); \
+ if (!_ctr->data) \
+ goto alloc_failed_label; \
+ } while (0)
+
+#define EXTRA_COUNTERS_INIT(counters, mod, init_counters, init_counters_size) \
+ do { \
+ typeof(counters) _ctr = (counters); \
+ memcpy(_ctr->data + mod->counters_off[_ctr->type], \
+ (init_counters), (init_counters_size)); \
+ } while (0)
+
+#define EXTRA_COUNTERS_FREE(counters) \
+ do { \
+ if (counters) { \
+ free((counters)->data); \
+ free(counters); \
+ } \
+ } while (0)
+
+#endif /* _HAPROXY_STATS_T_H */
diff --git a/include/haproxy/stats.h b/include/haproxy/stats.h
new file mode 100644
index 0000000..f9e6d97
--- /dev/null
+++ b/include/haproxy/stats.h
@@ -0,0 +1,145 @@
+/*
+ * include/haproxy/stats.h
+ * This file contains definitions of some primitives to dedicated to
+ * statistics output.
+ *
+ * Copyright (C) 2000-2020 Willy Tarreau - w@1wt.eu
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_STATS_H
+#define _HAPROXY_STATS_H
+
+#include <haproxy/api.h>
+#include <haproxy/listener-t.h>
+#include <haproxy/stats-t.h>
+#include <haproxy/tools-t.h>
+
+struct channel;
+struct buffer;
+struct proxy;
+struct appctx;
+struct htx;
+
+/* These two structs contains all field names and descriptions according to
+ * the the number of entries in "enum stat_field" and "enum info_field"
+ */
+extern const struct name_desc stat_fields[];
+extern const struct name_desc info_fields[];
+extern const char *stat_status_codes[];
+extern struct applet http_stats_applet;
+extern THREAD_LOCAL struct field info[];
+extern THREAD_LOCAL struct field *stat_l[];
+
+struct htx;
+int stats_putchk(struct appctx *appctx, struct htx *htx);
+
+int stats_dump_one_line(const struct field *stats, size_t stats_count, struct appctx *appctx);
+
+int stats_fill_info(struct field *info, int len, uint flags);
+int stats_fill_fe_stats(struct proxy *px, struct field *stats, int len,
+ enum stat_field *selected_field);
+int stats_fill_li_stats(struct proxy *px, struct listener *l, int flags,
+ struct field *stats, int len, enum stat_field *selected_field);
+int stats_fill_sv_stats(struct proxy *px, struct server *sv, int flags,
+ struct field *stats, int len, enum stat_field *selected_field);
+int stats_fill_be_stats(struct proxy *px, int flags, struct field *stats, int len,
+ enum stat_field *selected_field);
+
+int stats_emit_raw_data_field(struct buffer *out, const struct field *f);
+int stats_emit_typed_data_field(struct buffer *out, const struct field *f);
+int stats_emit_field_tags(struct buffer *out, const struct field *f,
+ char delim);
+
+
+static inline enum field_format field_format(const struct field *f, int e)
+{
+ return f[e].type & FF_MASK;
+}
+
+static inline enum field_origin field_origin(const struct field *f, int e)
+{
+ return f[e].type & FO_MASK;
+}
+
+static inline enum field_scope field_scope(const struct field *f, int e)
+{
+ return f[e].type & FS_MASK;
+}
+
+static inline enum field_nature field_nature(const struct field *f, int e)
+{
+ return f[e].type & FN_MASK;
+}
+
+static inline const char *field_str(const struct field *f, int e)
+{
+ return (field_format(f, e) == FF_STR && f[e].u.str) ? f[e].u.str : "";
+}
+
+static inline struct field mkf_s32(uint32_t type, int32_t value)
+{
+ struct field f = { .type = FF_S32 | type, .u.s32 = value };
+ return f;
+}
+
+static inline struct field mkf_u32(uint32_t type, uint32_t value)
+{
+ struct field f = { .type = FF_U32 | type, .u.u32 = value };
+ return f;
+}
+
+static inline struct field mkf_s64(uint32_t type, int64_t value)
+{
+ struct field f = { .type = FF_S64 | type, .u.s64 = value };
+ return f;
+}
+
+static inline struct field mkf_u64(uint32_t type, uint64_t value)
+{
+ struct field f = { .type = FF_U64 | type, .u.u64 = value };
+ return f;
+}
+
+static inline struct field mkf_str(uint32_t type, const char *value)
+{
+ struct field f = { .type = FF_STR | type, .u.str = value };
+ return f;
+}
+
+static inline struct field mkf_flt(uint32_t type, double value)
+{
+ struct field f = { .type = FF_FLT | type, .u.flt = value };
+ return f;
+}
+
+#define MK_STATS_PROXY_DOMAIN(px_cap) \
+ ((px_cap) << STATS_PX_CAP | STATS_DOMAIN_PROXY)
+
+int stats_allocate_proxy_counters_internal(struct extra_counters **counters,
+ int type, int px_cap);
+int stats_allocate_proxy_counters(struct proxy *px);
+
+void stats_register_module(struct stats_module *m);
+
+#endif /* _HAPROXY_STATS_H */
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/include/haproxy/stconn-t.h b/include/haproxy/stconn-t.h
new file mode 100644
index 0000000..63bcb79
--- /dev/null
+++ b/include/haproxy/stconn-t.h
@@ -0,0 +1,325 @@
+/*
+ * include/haproxy/stconn-t.h
+ * This file describes the stream connector struct and associated constants.
+ *
+ * Copyright 2021 Christopher Faulet <cfaulet@haproxy.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_STCONN_T_H
+#define _HAPROXY_STCONN_T_H
+
+#include <haproxy/obj_type-t.h>
+#include <haproxy/connection-t.h>
+#include <haproxy/pipe-t.h>
+#include <haproxy/show_flags-t.h>
+#include <haproxy/xref-t.h>
+
+enum iobuf_flags {
+ IOBUF_FL_NONE = 0x00000000, /* For initialization purposes */
+ IOBUF_FL_NO_FF = 0x00000001, /* Fast-forwarding is not supported */
+ IOBUF_FL_NO_SPLICING = 0x00000002, /* Splicing is not supported or unusable for this stream */
+ IOBUF_FL_FF_BLOCKED = 0x00000004, /* Fast-forwarding is blocked (buffer allocation/full) */
+
+ IOBUF_FL_INTERIM_FF = 0x00000008, /* Producer side warn it will immediately retry a fast-forward.
+ * .done_fastfwd() on consumer side must take care of this flag
+ */
+ IOBUF_FL_EOI = 0x00000010, /* A EOI was encountered on producer side */
+};
+
+struct iobuf {
+ struct pipe *pipe; /* non-NULL only when data present */
+ struct buffer *buf;
+ size_t offset;
+ size_t data;
+ unsigned int flags;
+};
+
+/* Stream Endpoint Flags.
+ * Please also update the se_show_flags() function below in case of changes.
+ */
+enum se_flags {
+ SE_FL_NONE = 0x00000000, /* For initialization purposes */
+
+ /* Endpoint types */
+ SE_FL_T_MUX = 0x00000001, /* The endpoint is a mux (the target may be NULL before the mux init) */
+ SE_FL_T_APPLET = 0x00000002, /* The endpoint is an applet */
+
+ /* unused: 0x00000004 .. 0x00000008 */
+
+ /* Endpoint states: none == attached to a mux with a stream connector */
+ SE_FL_DETACHED = 0x00000010, /* The endpoint is detached (no mux/no applet) */
+ SE_FL_ORPHAN = 0x00000020, /* The endpoint is orphan (no stream connector) */
+
+ /* unused: 0x00000040 .. 0x00000080 */
+
+ SE_FL_SHRD = 0x00000100, /* read shut, draining extra data */
+ SE_FL_SHRR = 0x00000200, /* read shut, resetting extra data */
+ SE_FL_SHR = SE_FL_SHRD | SE_FL_SHRR, /* read shut status */
+
+ SE_FL_SHWN = 0x00000400, /* write shut, verbose mode */
+ SE_FL_SHWS = 0x00000800, /* write shut, silent mode */
+ SE_FL_SHW = SE_FL_SHWN | SE_FL_SHWS, /* write shut status */
+
+ /* following flags are supposed to be set by the endpoint and read by
+ * the app layer :
+ */
+
+ /* Permanent flags */
+ SE_FL_NOT_FIRST = 0x00001000, /* This stream connector is not the first one for the endpoint */
+ SE_FL_WEBSOCKET = 0x00002000, /* The endpoint uses the websocket proto */
+ SE_FL_EOI = 0x00004000, /* end-of-input reached */
+ SE_FL_EOS = 0x00008000, /* End of stream delivered to data layer */
+ SE_FL_ERROR = 0x00010000, /* a fatal error was reported */
+ /* Transient flags */
+ SE_FL_ERR_PENDING= 0x00020000, /* An error is pending, but there's still data to be read */
+ SE_FL_RCV_MORE = 0x00040000, /* Endpoint may have more bytes to transfer */
+ SE_FL_WANT_ROOM = 0x00080000, /* More bytes to transfer, but not enough room */
+ SE_FL_EXP_NO_DATA= 0x00100000, /* No data expected by the endpoint */
+ SE_FL_MAY_FASTFWD_PROD = 0x00200000, /* The endpoint may produce data via zero-copy forwarding */
+ SE_FL_MAY_FASTFWD_CONS = 0x00400000, /* The endpoint may consume data via zero-copy forwarding */
+ SE_FL_ENDP_MASK = 0x004ff000, /* Mask for flags set by the endpoint */
+
+ /* following flags are supposed to be set by the app layer and read by
+ * the endpoint :
+ */
+ /* unused 0x00800000,*/
+ /* unused 0x01000000,*/
+ /* unused 0x02000000,*/
+ SE_FL_WAIT_FOR_HS = 0x04000000, /* This stream is waiting for handhskae */
+ SE_FL_KILL_CONN = 0x08000000, /* must kill the connection when the SC closes */
+ SE_FL_WAIT_DATA = 0x10000000, /* stream endpoint cannot work without more data from the stream's output */
+ SE_FL_WONT_CONSUME = 0x20000000, /* stream endpoint will not consume more data */
+ SE_FL_HAVE_NO_DATA = 0x40000000, /* the endpoint has no more data to deliver to the stream */
+ SE_FL_APPLET_NEED_CONN = 0x80000000, /* applet is waiting for the other side to (fail to) connect */
+};
+
+/* This function is used to report flags in debugging tools. Please reflect
+ * below any single-bit flag addition above in the same order via the
+ * __APPEND_FLAG macro. The new end of the buffer is returned.
+ */
+static forceinline char *se_show_flags(char *buf, size_t len, const char *delim, uint flg)
+{
+#define _(f, ...) __APPEND_FLAG(buf, len, delim, flg, f, #f, __VA_ARGS__)
+ /* prologue */
+ _(0);
+ /* flags */
+ _(SE_FL_T_MUX, _(SE_FL_T_APPLET, _(SE_FL_DETACHED, _(SE_FL_ORPHAN,
+ _(SE_FL_SHRD, _(SE_FL_SHRR, _(SE_FL_SHWN, _(SE_FL_SHWS,
+ _(SE_FL_NOT_FIRST, _(SE_FL_WEBSOCKET, _(SE_FL_EOI, _(SE_FL_EOS,
+ _(SE_FL_ERROR, _(SE_FL_ERR_PENDING, _(SE_FL_RCV_MORE,
+ _(SE_FL_WANT_ROOM, _(SE_FL_EXP_NO_DATA, _(SE_FL_MAY_FASTFWD_PROD, _(SE_FL_MAY_FASTFWD_CONS,
+ _(SE_FL_WAIT_FOR_HS, _(SE_FL_KILL_CONN, _(SE_FL_WAIT_DATA,
+ _(SE_FL_WONT_CONSUME, _(SE_FL_HAVE_NO_DATA, _(SE_FL_APPLET_NEED_CONN)))))))))))))))))))))))));
+ /* epilogue */
+ _(~0U);
+ return buf;
+#undef _
+}
+
+/* stconn flags.
+ * Please also update the sc_show_flags() function below in case of changes.
+ *
+ * When SC_FL_ABRT_WANTED/SC_FL_EOS is set, it is strictly forbidden for the
+ * producer to alter the buffer contents. In this case, the consumer is free to
+ * perform a shutdown when it has consumed the last contents, otherwise the
+ * session processor will do it anyway. SC_FL_ABRT* are set at the upper layer
+ * level (the stream) while SC_FL_EOS is set at the SE layer.
+ *
+ * The SC_FL_SHUT_WANTED flaga should be set by the session processor when
+ * SC_FLABRT_DONE/SC_FL_EOS and CF_AUTO_CLOSE are both set. And it may also be
+ * set by the producer when it detects SC_FL_EOS while directly forwarding data to the
+ * consumer.
+ *
+ * The SHUT/ABRT flags work like this :
+ *
+ * ABRT_WANTED ABRT_DONE meaning
+ * 0 0 normal case, connection still open and data is being read
+ * 1 0 closing : the producer cannot feed data anymore but can close
+ * 0/1 1 closed: the producer has closed its input channel.
+ *
+ * SHUT_WANTED SHUT_DONE meaning
+ * 0 0 normal case, connection still open and data is being written
+ * 1 0 closing: the consumer can send last data and may then close
+ * 0/1 1 closed: the consumer has closed its output channel.
+ *
+ *
+ * The ABRT_WANTED flag is mostly used to force the producer to abort when an error is
+ * detected on the consumer side.
+ *
+ */
+enum sc_flags {
+ SC_FL_NONE = 0x00000000, /* Just for initialization purposes */
+ SC_FL_ISBACK = 0x00000001, /* Set for SC on back-side */
+
+ SC_FL_EOI = 0x00000002, /* End of input was reached. no more data will be received from the endpoint */
+ SC_FL_ERROR = 0x00000004, /* A fatal error was reported */
+
+ SC_FL_NOLINGER = 0x00000008, /* may close without lingering. One-shot. */
+ SC_FL_NOHALF = 0x00000010, /* no half close, close both sides at once */
+ SC_FL_DONT_WAKE = 0x00000020, /* resync in progress, don't wake up */
+ SC_FL_INDEP_STR = 0x00000040, /* independent streams = don't update rex on write */
+
+ SC_FL_WONT_READ = 0x00000080, /* SC doesn't want to read data */
+ SC_FL_NEED_BUFF = 0x00000100, /* SC waits for an rx buffer allocation to complete */
+ SC_FL_NEED_ROOM = 0x00000200, /* SC needs more room in the rx buffer to store incoming data */
+
+ SC_FL_RCV_ONCE = 0x00000400, /* Don't loop to receive data. cleared after a successful receive */
+ SC_FL_SND_ASAP = 0x00000800, /* Don't wait for sending. cleared when all data were sent */
+ SC_FL_SND_NEVERWAIT = 0x00001000, /* Never wait for sending (permanent) */
+ SC_FL_SND_EXP_MORE = 0x00002000, /* More data expected to be sent very soon. cleared when all data were sent */
+
+ SC_FL_ABRT_WANTED = 0x00004000, /* An abort was requested and must be performed ASAP (up side to down side) */
+ SC_FL_SHUT_WANTED = 0x00008000, /* A shutdown was requested and mux be performed ASAP (up side to down side) */
+ SC_FL_ABRT_DONE = 0x00010000, /* An abort was performed for the SC */
+ SC_FL_SHUT_DONE = 0x00020000, /* A shutdown was performed for the SC */
+
+ SC_FL_EOS = 0x00040000, /* End of stream was reached (from down side to up side) */
+};
+
+/* This function is used to report flags in debugging tools. Please reflect
+ * below any single-bit flag addition above in the same order via the
+ * __APPEND_FLAG macro. The new end of the buffer is returned.
+ */
+static forceinline char *sc_show_flags(char *buf, size_t len, const char *delim, uint flg)
+{
+#define _(f, ...) __APPEND_FLAG(buf, len, delim, flg, f, #f, __VA_ARGS__)
+ /* prologue */
+ _(0);
+ /* flags */
+ _(SC_FL_ISBACK, _(SC_FL_EOI, _(SC_FL_ERROR, _(SC_FL_NOLINGER, _(SC_FL_NOHALF,
+ _(SC_FL_DONT_WAKE, _(SC_FL_INDEP_STR, _(SC_FL_WONT_READ,
+ _(SC_FL_NEED_BUFF, _(SC_FL_NEED_ROOM,
+ _(SC_FL_RCV_ONCE, _(SC_FL_SND_ASAP, _(SC_FL_SND_NEVERWAIT, _(SC_FL_SND_EXP_MORE,
+ _(SC_FL_ABRT_WANTED, _(SC_FL_SHUT_WANTED, _(SC_FL_ABRT_DONE, _(SC_FL_SHUT_DONE,
+ _(SC_FL_EOS)))))))))))))))))));
+ /* epilogue */
+ _(~0U);
+ return buf;
+#undef _
+}
+
+/* A conn stream must have its own errors independently of the buffer's, so that
+ * applications can rely on what the buffer reports while the conn stream is
+ * performing some retries (eg: connection error). Some states are transient and
+ * do not last beyond process_session().
+ */
+enum sc_state {
+ SC_ST_INI = 0, /* SC not sollicitated yet */
+ SC_ST_REQ, /* [transient] connection initiation desired and not started yet */
+ SC_ST_QUE, /* SC waiting in queue */
+ SC_ST_TAR, /* SC in turn-around state after failed connect attempt */
+ SC_ST_ASS, /* server just assigned to this SC */
+ SC_ST_CON, /* initiated connection request (resource exists) */
+ SC_ST_CER, /* [transient] previous connection attempt failed (resource released) */
+ SC_ST_RDY, /* [transient] ready proven after I/O success during SC_ST_CON */
+ SC_ST_EST, /* connection established (resource exists) */
+ SC_ST_DIS, /* [transient] disconnected from other side, but cleanup not done yet */
+ SC_ST_CLO, /* SC closed, might not existing anymore. Buffers shut. */
+} __attribute__((packed));
+
+/* state bits for use with lists of states */
+enum sc_state_bit {
+ SC_SB_NONE = 0,
+ SC_SB_INI = 1U << SC_ST_INI,
+ SC_SB_REQ = 1U << SC_ST_REQ,
+ SC_SB_QUE = 1U << SC_ST_QUE,
+ SC_SB_TAR = 1U << SC_ST_TAR,
+ SC_SB_ASS = 1U << SC_ST_ASS,
+ SC_SB_CON = 1U << SC_ST_CON,
+ SC_SB_CER = 1U << SC_ST_CER,
+ SC_SB_RDY = 1U << SC_ST_RDY,
+ SC_SB_EST = 1U << SC_ST_EST,
+ SC_SB_DIS = 1U << SC_ST_DIS,
+ SC_SB_CLO = 1U << SC_ST_CLO,
+ SC_SB_ALL = SC_SB_INI|SC_SB_REQ|SC_SB_QUE|SC_SB_TAR|SC_SB_ASS|SC_SB_CON|SC_SB_CER|SC_SB_RDY|SC_SB_EST|SC_SB_DIS|SC_SB_CLO,
+};
+
+struct stconn;
+
+/* A Stream Endpoint Descriptor (sedesc) is the link between the stream
+ * connector (ex. stconn) and the Stream Endpoint (mux or appctx).
+ * It always exists for either of them, and binds them together. It also
+ * contains some shared information relative to the endpoint. It is created by
+ * the first one which needs it and is shared by the other one, i.e. on the
+ * client side, it's created the mux or applet and shared with the connector.
+ * An sedesc without stconn is called an ORPHANED descriptor. An sedesc with
+ * no mux/applet is called a DETACHED descriptor. Upon detach, the connector
+ * transfers the whole responsibility of the endpoint descriptor to the
+ * endpoint itself (mux/applet) and eventually creates a new sedesc (for
+ * instance on connection retries).
+ *
+ * <lra> should be updated when a read activity at the endpoint level is
+ * detected. It can be a successful receive or when a EOS/EOI is reported.
+ * A read activity is also reported when receives are unblocked.
+
+ * <fsb> should be updated when the first send of a series is blocked and reset
+ * when a successful send is reported.
+ *
+ *
+ * NOTE: <lra> and <fsb> must only be used via the SC api to compute read/write
+ * expiration date.
+ *
+ */
+struct sedesc {
+ void *se; /* the stream endpoint, i.e. the mux stream or the appctx */
+ struct connection *conn; /* the connection for connection-based streams */
+ struct stconn *sc; /* the stream connector we're attached to, or NULL */
+ struct iobuf iobuf; /* contains data forwarded by the other side and that must be sent by the stream endpoint */
+ unsigned int flags; /* SE_FL_* */
+ unsigned int lra; /* the last read activity */
+ unsigned int fsb; /* the first send blocked */
+ /* 4 bytes hole here */
+ struct xref xref; /* cross reference with the opposite SC */
+};
+
+/* sc_app_ops describes the application layer's operations and notification
+ * callbacks when I/O activity is reported and to use to perform shutr/shutw.
+ * There are very few combinations in practice (strm/chk <-> none/mux/applet).
+ */
+struct sc_app_ops {
+ void (*chk_rcv)(struct stconn *); /* chk_rcv function, may not be null */
+ void (*chk_snd)(struct stconn *); /* chk_snd function, may not be null */
+ void (*abort)(struct stconn *); /* abort function, may not be null */
+ void (*shutdown)(struct stconn *); /* shutdown function, may not be null */
+ int (*wake)(struct stconn *); /* data-layer callback to report activity */
+ char name[8]; /* data layer name, zero-terminated */
+};
+
+/*
+ * This structure describes the elements of a connection relevant to a stream
+ */
+struct stconn {
+ enum obj_type obj_type; /* differentiates connection from applet context */
+ enum sc_state state; /* SC_ST* */
+ /* 2 bytes hole here */
+
+ unsigned int flags; /* SC_FL_* */
+ unsigned int ioto; /* I/O activity timeout */
+ ssize_t room_needed; /* free space in the input buffer required to receive more data.
+ * -1 : the SC is waiting for room but not on a specific amount of data
+ * >= 0 : min free space required to progress. 0 means SC must be unblocked ASAP
+ */
+ struct wait_event wait_event; /* We're in a wait list */
+ struct sedesc *sedesc; /* points to the stream endpoint descriptor */
+ enum obj_type *app; /* points to the applicative point (stream or check) */
+ const struct sc_app_ops *app_ops; /* general operations used at the app layer */
+ struct sockaddr_storage *src; /* source address (pool), when known, otherwise NULL */
+ struct sockaddr_storage *dst; /* destination address (pool), when known, otherwise NULL */
+};
+
+
+#endif /* _HAPROXY_STCONN_T_H */
diff --git a/include/haproxy/stconn.h b/include/haproxy/stconn.h
new file mode 100644
index 0000000..7869fa3
--- /dev/null
+++ b/include/haproxy/stconn.h
@@ -0,0 +1,557 @@
+/*
+ * include/haproxy/stconn.h
+ * This file contains stream connector function prototypes
+ *
+ * Copyright 2021 Christopher Faulet <cfaulet@haproxy.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_STCONN_H
+#define _HAPROXY_STCONN_H
+
+#include <haproxy/api.h>
+#include <haproxy/connection.h>
+#include <haproxy/htx-t.h>
+#include <haproxy/obj_type.h>
+#include <haproxy/stconn-t.h>
+
+struct buffer;
+struct session;
+struct appctx;
+struct stream;
+struct check;
+
+#define IS_HTX_SC(sc) (sc_conn(sc) && IS_HTX_CONN(__sc_conn(sc)))
+
+struct sedesc *sedesc_new();
+void sedesc_free(struct sedesc *sedesc);
+
+struct stconn *sc_new_from_endp(struct sedesc *sedesc, struct session *sess, struct buffer *input);
+struct stconn *sc_new_from_strm(struct stream *strm, unsigned int flags);
+struct stconn *sc_new_from_check(struct check *check, unsigned int flags);
+void sc_free(struct stconn *sc);
+
+int sc_attach_mux(struct stconn *sc, void *target, void *ctx);
+int sc_attach_strm(struct stconn *sc, struct stream *strm);
+
+void sc_destroy(struct stconn *sc);
+int sc_reset_endp(struct stconn *sc);
+
+struct appctx *sc_applet_create(struct stconn *sc, struct applet *app);
+
+void sc_conn_prepare_endp_upgrade(struct stconn *sc);
+void sc_conn_abort_endp_upgrade(struct stconn *sc);
+void sc_conn_commit_endp_upgrade(struct stconn *sc);
+
+/* The se_fl_*() set of functions manipulate the stream endpoint flags from
+ * the stream endpoint itself. The sc_ep_*() set of functions manipulate the
+ * stream endpoint flags from the the stream connector (ex. stconn).
+ * _zero() clears all flags, _clr() clears a set of flags (&=~), _set() sets
+ * a set of flags (|=), _test() tests the presence of a set of flags, _get()
+ * retrieves the exact flags, _setall() replaces the flags with the new value.
+ * All functions are purposely marked "forceinline" to avoid slowing down
+ * debugging code too much. None of these functions is atomic-safe.
+ */
+
+/* stream endpoint version */
+static forceinline void se_fl_zero(struct sedesc *se)
+{
+ se->flags = 0;
+}
+
+static forceinline void se_fl_setall(struct sedesc *se, uint all)
+{
+ se->flags = all;
+}
+
+/* sets flags <on> on se->flags and handles ERR_PENDING to ERROR promotion if
+ * needed (upon EOI/EOS).
+ */
+static forceinline void se_fl_set(struct sedesc *se, uint on)
+{
+ if (((on & (SE_FL_EOS|SE_FL_EOI)) && se->flags & SE_FL_ERR_PENDING) ||
+ ((on & SE_FL_ERR_PENDING) && se->flags & (SE_FL_EOI|SE_FL_EOS)))
+ on |= SE_FL_ERROR;
+ se->flags |= on;
+}
+
+static forceinline void se_fl_clr(struct sedesc *se, uint off)
+{
+ se->flags &= ~off;
+}
+
+static forceinline uint se_fl_test(const struct sedesc *se, uint test)
+{
+ return !!(se->flags & test);
+}
+
+static forceinline uint se_fl_get(const struct sedesc *se)
+{
+ return se->flags;
+}
+
+/* sets SE_FL_ERROR or SE_FL_ERR_PENDING on the endpoint */
+static inline void se_fl_set_error(struct sedesc *se)
+{
+ if (se_fl_test(se, (SE_FL_EOS|SE_FL_EOI)))
+ se_fl_set(se, SE_FL_ERROR);
+ else
+ se_fl_set(se, SE_FL_ERR_PENDING);
+}
+
+static inline void se_expect_no_data(struct sedesc *se)
+{
+ se_fl_set(se, SE_FL_EXP_NO_DATA);
+}
+
+static inline void se_expect_data(struct sedesc *se)
+{
+ se_fl_clr(se, SE_FL_EXP_NO_DATA);
+}
+
+static inline unsigned int se_have_ff_data(struct sedesc *se)
+{
+ return (se->iobuf.data | (long)se->iobuf.pipe);
+}
+
+static inline size_t se_ff_data(struct sedesc *se)
+{
+ return (se->iobuf.data + (se->iobuf.pipe ? se->iobuf.pipe->data : 0));
+}
+
+/* stream connector version */
+static forceinline void sc_ep_zero(struct stconn *sc)
+{
+ se_fl_zero(sc->sedesc);
+}
+
+static forceinline void sc_ep_setall(struct stconn *sc, uint all)
+{
+ se_fl_setall(sc->sedesc, all);
+}
+
+static forceinline void sc_ep_set(struct stconn *sc, uint on)
+{
+ se_fl_set(sc->sedesc, on);
+}
+
+static forceinline void sc_ep_clr(struct stconn *sc, uint off)
+{
+ se_fl_clr(sc->sedesc, off);
+}
+
+static forceinline uint sc_ep_test(const struct stconn *sc, uint test)
+{
+ return se_fl_test(sc->sedesc, test);
+}
+
+static forceinline uint sc_ep_get(const struct stconn *sc)
+{
+ return se_fl_get(sc->sedesc);
+}
+
+/* Return the last read activity timestamp. May be TICK_ETERNITY */
+static forceinline unsigned int sc_ep_lra(const struct stconn *sc)
+{
+ return sc->sedesc->lra;
+}
+
+/* Return the first send blocked timestamp. May be TICK_ETERNITY */
+static forceinline unsigned int sc_ep_fsb(const struct stconn *sc)
+{
+ return sc->sedesc->fsb;
+}
+
+/* Report a read activity. This function sets <lra> to now_ms */
+static forceinline void sc_ep_report_read_activity(struct stconn *sc)
+{
+ sc->sedesc->lra = now_ms;
+}
+
+/* Report a send blocked. This function sets <fsb> to now_ms if it was not
+ * already set or if something was sent (to renew <fsb>).
+ *
+ * if something was sent (<did_send> != 0), a read activity is also reported for
+ * non-independent stream.
+ */
+static forceinline void sc_ep_report_blocked_send(struct stconn *sc, int did_send)
+{
+ if (did_send || !tick_isset(sc->sedesc->fsb)) {
+ sc->sedesc->fsb = now_ms;
+ if (did_send && !(sc->flags & SC_FL_INDEP_STR))
+ sc_ep_report_read_activity(sc);
+ }
+}
+
+/* Report a send activity by setting <fsb> to TICK_ETERNITY.
+ * For non-independent stream, a read activity is reported.
+ */
+static forceinline void sc_ep_report_send_activity(struct stconn *sc)
+{
+ sc->sedesc->fsb = TICK_ETERNITY;
+ if (!(sc->flags & SC_FL_INDEP_STR))
+ sc_ep_report_read_activity(sc);
+}
+
+static forceinline unsigned int sc_ep_have_ff_data(struct stconn *sc)
+{
+ return se_have_ff_data(sc->sedesc);
+}
+
+static forceinline size_t sc_ep_ff_data(struct stconn *sc)
+{
+ return se_ff_data(sc->sedesc);
+}
+
+/* Returns the stream endpoint from an connector, without any control */
+static inline void *__sc_endp(const struct stconn *sc)
+{
+ return sc->sedesc->se;
+}
+
+/* Returns the connection from a sc if the endpoint is a mux stream. Otherwise
+ * NULL is returned. __sc_conn() returns the connection without any control
+ * while sc_conn() check the endpoint type.
+ */
+static inline struct connection *__sc_conn(const struct stconn *sc)
+{
+ return sc->sedesc->conn;
+}
+static inline struct connection *sc_conn(const struct stconn *sc)
+{
+ if (sc_ep_test(sc, SE_FL_T_MUX))
+ return __sc_conn(sc);
+ return NULL;
+}
+
+/* Returns the mux ops of the connection from an stconn if the endpoint is a
+ * mux stream. Otherwise NULL is returned.
+ */
+static inline const struct mux_ops *sc_mux_ops(const struct stconn *sc)
+{
+ const struct connection *conn = sc_conn(sc);
+
+ return (conn ? conn->mux : NULL);
+}
+
+/* Returns a pointer to the mux stream from a connector if the endpoint is
+ * a mux. Otherwise NULL is returned. __sc_mux_strm() returns the mux without
+ * any control while sc_mux_strm() checks the endpoint type.
+ */
+static inline void *__sc_mux_strm(const struct stconn *sc)
+{
+ return __sc_endp(sc);
+}
+static inline struct appctx *sc_mux_strm(const struct stconn *sc)
+{
+ if (sc_ep_test(sc, SE_FL_T_MUX))
+ return __sc_mux_strm(sc);
+ return NULL;
+}
+
+/* Returns the appctx from a sc if the endpoint is an appctx. Otherwise
+ * NULL is returned. __sc_appctx() returns the appctx without any control
+ * while sc_appctx() checks the endpoint type.
+ */
+static inline struct appctx *__sc_appctx(const struct stconn *sc)
+{
+ return __sc_endp(sc);
+}
+static inline struct appctx *sc_appctx(const struct stconn *sc)
+{
+ if (sc_ep_test(sc, SE_FL_T_APPLET))
+ return __sc_appctx(sc);
+ return NULL;
+}
+
+/* Returns the stream from a sc if the application is a stream. Otherwise
+ * NULL is returned. __sc_strm() returns the stream without any control
+ * while sc_strm() check the application type.
+ */
+static inline struct stream *__sc_strm(const struct stconn *sc)
+{
+ return __objt_stream(sc->app);
+}
+
+static inline struct stream *sc_strm(const struct stconn *sc)
+{
+ if (obj_type(sc->app) == OBJ_TYPE_STREAM)
+ return __sc_strm(sc);
+ return NULL;
+}
+
+/* Returns the healthcheck from a sc if the application is a
+ * healthcheck. Otherwise NULL is returned. __sc_check() returns the healthcheck
+ * without any control while sc_check() check the application type.
+ */
+static inline struct check *__sc_check(const struct stconn *sc)
+{
+ return __objt_check(sc->app);
+}
+static inline struct check *sc_check(const struct stconn *sc)
+{
+ if (obj_type(sc->app) == OBJ_TYPE_CHECK)
+ return __objt_check(sc->app);
+ return NULL;
+}
+
+/* Returns the name of the application layer's name for the stconn,
+ * or "NONE" when none is attached.
+ */
+static inline const char *sc_get_data_name(const struct stconn *sc)
+{
+ if (!sc->app_ops)
+ return "NONE";
+ return sc->app_ops->name;
+}
+
+/* shut read */
+static inline void sc_conn_shutr(struct stconn *sc, enum co_shr_mode mode)
+{
+ const struct mux_ops *mux;
+
+ BUG_ON(!sc_conn(sc));
+
+ if (sc_ep_test(sc, SE_FL_SHR))
+ return;
+
+ /* clean data-layer shutdown */
+ mux = sc_mux_ops(sc);
+ if (mux && mux->shutr)
+ mux->shutr(sc, mode);
+ sc_ep_set(sc, (mode == CO_SHR_DRAIN) ? SE_FL_SHRD : SE_FL_SHRR);
+}
+
+/* shut write */
+static inline void sc_conn_shutw(struct stconn *sc, enum co_shw_mode mode)
+{
+ const struct mux_ops *mux;
+
+ BUG_ON(!sc_conn(sc));
+
+ if (sc_ep_test(sc, SE_FL_SHW))
+ return;
+
+ /* clean data-layer shutdown */
+ mux = sc_mux_ops(sc);
+ if (mux && mux->shutw)
+ mux->shutw(sc, mode);
+ sc_ep_set(sc, (mode == CO_SHW_NORMAL) ? SE_FL_SHWN : SE_FL_SHWS);
+}
+
+/* completely close a stream connector (but do not detach it) */
+static inline void sc_conn_shut(struct stconn *sc)
+{
+ sc_conn_shutw(sc, CO_SHW_SILENT);
+ sc_conn_shutr(sc, CO_SHR_RESET);
+}
+
+/* completely close a stream connector after draining possibly pending data (but do not detach it) */
+static inline void sc_conn_drain_and_shut(struct stconn *sc)
+{
+ sc_conn_shutw(sc, CO_SHW_SILENT);
+ sc_conn_shutr(sc, CO_SHR_DRAIN);
+}
+
+/* Returns non-zero if the stream connector's Rx path is blocked because of
+ * lack of room in the input buffer. This usually happens after applets failed
+ * to deliver data into the channel's buffer and reported it via sc_need_room().
+ */
+__attribute__((warn_unused_result))
+static inline int sc_waiting_room(const struct stconn *sc)
+{
+ return !!(sc->flags & SC_FL_NEED_ROOM);
+}
+
+/* The stream endpoint announces it has more data to deliver to the stream's
+ * input buffer.
+ */
+static inline void se_have_more_data(struct sedesc *se)
+{
+ se_fl_clr(se, SE_FL_HAVE_NO_DATA);
+}
+
+/* The stream endpoint announces it doesn't have more data for the stream's
+ * input buffer.
+ */
+static inline void se_have_no_more_data(struct sedesc *se)
+{
+ se_fl_set(se, SE_FL_HAVE_NO_DATA);
+}
+
+/* The application layer informs a stream connector that it's willing to
+ * receive data from the endpoint. A read activity is reported.
+ */
+static inline void sc_will_read(struct stconn *sc)
+{
+ if (sc->flags & SC_FL_WONT_READ) {
+ sc->flags &= ~SC_FL_WONT_READ;
+ sc_ep_report_read_activity(sc);
+ }
+}
+
+/* The application layer informs a stream connector that it will not receive
+ * data from the endpoint (e.g. need to flush, bw limitations etc). Usually
+ * it corresponds to the channel's CF_DONT_READ flag.
+ */
+static inline void sc_wont_read(struct stconn *sc)
+{
+ sc->flags |= SC_FL_WONT_READ;
+}
+
+/* An frontend (applet) stream endpoint tells the connector it needs the other
+ * side to connect or fail before continuing to work. This is used for example
+ * to allow an applet not to deliver data to a request channel before a
+ * connection is confirmed.
+ */
+static inline void se_need_remote_conn(struct sedesc *se)
+{
+ se_fl_set(se, SE_FL_APPLET_NEED_CONN);
+}
+
+/* The application layer tells the stream connector that it just got the input
+ * buffer it was waiting for. A read activity is reported.
+ */
+static inline void sc_have_buff(struct stconn *sc)
+{
+ if (sc->flags & SC_FL_NEED_BUFF) {
+ sc->flags &= ~SC_FL_NEED_BUFF;
+ sc_ep_report_read_activity(sc);
+ }
+}
+
+/* The stream connector failed to get an input buffer and is waiting for it.
+ * It indicates a willingness to deliver data to the buffer that will have to
+ * be retried. As such, callers will often automatically clear SE_FL_HAVE_NO_DATA
+ * to be called again as soon as SC_FL_NEED_BUFF is cleared.
+ */
+static inline void sc_need_buff(struct stconn *sc)
+{
+ sc->flags |= SC_FL_NEED_BUFF;
+}
+
+/* Tell a stream connector some room was made in the input buffer and any
+ * failed attempt to inject data into it may be tried again. This is usually
+ * called after a successful transfer of buffer contents to the other side.
+ * A read activity is reported.
+ */
+static inline void sc_have_room(struct stconn *sc)
+{
+ if (sc->flags & SC_FL_NEED_ROOM) {
+ sc->flags &= ~SC_FL_NEED_ROOM;
+ sc->room_needed = 0;
+ sc_ep_report_read_activity(sc);
+ }
+}
+
+/* The stream connector announces it failed to put data into the input buffer
+ * by lack of room. Since it indicates a willingness to deliver data to the
+ * buffer that will have to be retried. Usually the caller will also clear
+ * SE_FL_HAVE_NO_DATA to be called again as soon as SC_FL_NEED_ROOM is cleared.
+ *
+ * The caller is responsible to specified the amount of free space required to
+ * progress. It must take care to not exceed the buffer size.
+ */
+static inline void sc_need_room(struct stconn *sc, ssize_t room_needed)
+{
+ sc->flags |= SC_FL_NEED_ROOM;
+ BUG_ON_HOT(room_needed > (ssize_t)global.tune.bufsize);
+ sc->room_needed = room_needed;
+}
+
+/* The stream endpoint indicates that it's ready to consume data from the
+ * stream's output buffer. Report a send activity if the SE is unblocked.
+ */
+static inline void se_will_consume(struct sedesc *se)
+{
+ if (se_fl_test(se, SE_FL_WONT_CONSUME)) {
+ se_fl_clr(se, SE_FL_WONT_CONSUME);
+ sc_ep_report_send_activity(se->sc);
+ }
+}
+
+/* The stream endpoint indicates that it's not willing to consume data from the
+ * stream's output buffer.
+ */
+static inline void se_wont_consume(struct sedesc *se)
+{
+ se_fl_set(se, SE_FL_WONT_CONSUME);
+}
+
+/* The stream endpoint indicates that it's willing to consume data from the
+ * stream's output buffer, but that there's not enough, so it doesn't want to
+ * be woken up until more are presented.
+ */
+static inline void se_need_more_data(struct sedesc *se)
+{
+ se_will_consume(se);
+ se_fl_set(se, SE_FL_WAIT_DATA);
+}
+
+
+static inline size_t se_nego_ff(struct sedesc *se, struct buffer *input, size_t count, unsigned int may_splice)
+{
+ size_t ret = 0;
+
+ if (se_fl_test(se, SE_FL_T_MUX)) {
+ const struct mux_ops *mux = se->conn->mux;
+
+ se->iobuf.flags &= ~IOBUF_FL_FF_BLOCKED;
+ if (mux->nego_fastfwd && mux->done_fastfwd) {
+ /* Disable zero-copy forwarding if EOS or an error was reported. */
+ if (se_fl_test(se, SE_FL_EOS|SE_FL_ERROR|SE_FL_ERR_PENDING)) {
+ se->iobuf.flags |= IOBUF_FL_NO_FF;
+ goto end;
+ }
+
+ ret = mux->nego_fastfwd(se->sc, input, count, may_splice);
+ if (se->iobuf.flags & IOBUF_FL_FF_BLOCKED) {
+ sc_ep_report_blocked_send(se->sc, 0);
+
+ if (!(se->sc->wait_event.events & SUB_RETRY_SEND)) {
+ /* The SC must be subs for send to be notify when some
+ * space is made
+ */
+ mux->subscribe(se->sc, SUB_RETRY_SEND, &se->sc->wait_event);
+ }
+ }
+ goto end;
+ }
+ }
+ se->iobuf.flags |= IOBUF_FL_NO_FF;
+
+ end:
+ return ret;
+}
+
+static inline void se_done_ff(struct sedesc *se)
+{
+ if (se_fl_test(se, SE_FL_T_MUX)) {
+ const struct mux_ops *mux = se->conn->mux;
+ size_t sent, to_send = se_ff_data(se);
+
+ BUG_ON(!mux->done_fastfwd);
+ sent = mux->done_fastfwd(se->sc);
+ if (to_send) {
+ if (sent == to_send)
+ sc_ep_report_send_activity(se->sc);
+ else
+ sc_ep_report_blocked_send(se->sc, sent != 0);
+ }
+ }
+}
+
+#endif /* _HAPROXY_STCONN_H */
diff --git a/include/haproxy/stick_table-t.h b/include/haproxy/stick_table-t.h
new file mode 100644
index 0000000..749cb9a
--- /dev/null
+++ b/include/haproxy/stick_table-t.h
@@ -0,0 +1,250 @@
+/*
+ * include/haproxy/stick_table-t.h
+ * Macros, variables and structures for stick tables management.
+ *
+ * Copyright (C) 2009-2010 EXCELIANCE, Emeric Brun <ebrun@exceliance.fr>
+ * Copyright (C) 2010 Willy Tarreau <w@1wt.eu>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_STICK_TABLE_T_H
+#define _HAPROXY_STICK_TABLE_T_H
+
+#include <import/ebtree-t.h>
+
+#include <haproxy/api-t.h>
+#include <haproxy/freq_ctr-t.h>
+#include <haproxy/thread-t.h>
+
+#define STKTABLE_MAX_DT_ARRAY_SIZE 100
+
+/* The types of extra data we can store in a stick table */
+enum {
+ STKTABLE_DT_SERVER_ID, /* the server ID to use with this stream if > 0 */
+ STKTABLE_DT_GPT0, /* General Purpose Flag 0. */
+ STKTABLE_DT_GPC0, /* General Purpose Counter 0 (unsigned 32-bit integer) */
+ STKTABLE_DT_GPC0_RATE, /* General Purpose Counter 0's event rate */
+ STKTABLE_DT_CONN_CNT, /* cumulated number of connections */
+ STKTABLE_DT_CONN_RATE, /* incoming connection rate */
+ STKTABLE_DT_CONN_CUR, /* concurrent number of connections */
+ STKTABLE_DT_SESS_CNT, /* cumulated number of sessions (accepted connections) */
+ STKTABLE_DT_SESS_RATE, /* accepted sessions rate */
+ STKTABLE_DT_HTTP_REQ_CNT, /* cumulated number of incoming HTTP requests */
+ STKTABLE_DT_HTTP_REQ_RATE,/* incoming HTTP request rate */
+ STKTABLE_DT_HTTP_ERR_CNT, /* cumulated number of HTTP requests errors (4xx) */
+ STKTABLE_DT_HTTP_ERR_RATE,/* HTTP request error rate */
+ STKTABLE_DT_BYTES_IN_CNT, /* cumulated bytes count from client to servers */
+ STKTABLE_DT_BYTES_IN_RATE,/* bytes rate from client to servers */
+ STKTABLE_DT_BYTES_OUT_CNT,/* cumulated bytes count from servers to client */
+ STKTABLE_DT_BYTES_OUT_RATE,/* bytes rate from servers to client */
+ STKTABLE_DT_GPC1, /* General Purpose Counter 1 (unsigned 32-bit integer) */
+ STKTABLE_DT_GPC1_RATE, /* General Purpose Counter 1's event rate */
+ STKTABLE_DT_SERVER_KEY, /* The server key */
+ STKTABLE_DT_HTTP_FAIL_CNT, /* cumulated number of HTTP server failures */
+ STKTABLE_DT_HTTP_FAIL_RATE,/* HTTP server failures rate */
+ STKTABLE_DT_GPT, /* array of gpt */
+ STKTABLE_DT_GPC, /* array of gpc */
+ STKTABLE_DT_GPC_RATE, /* array of gpc_rate */
+
+
+ STKTABLE_STATIC_DATA_TYPES,/* number of types above */
+ /* up to STKTABLE_EXTRA_DATA_TYPES types may be registered here, always
+ * followed by the number of data types, must always be last.
+ */
+ STKTABLE_DATA_TYPES = STKTABLE_STATIC_DATA_TYPES + STKTABLE_EXTRA_DATA_TYPES
+};
+
+/* The equivalent standard types of the stored data */
+enum {
+ STD_T_SINT = 0, /* data is of type signed int */
+ STD_T_UINT, /* data is of type unsigned int */
+ STD_T_ULL, /* data is of type unsigned long long */
+ STD_T_FRQP, /* data is of type freq_ctr */
+ STD_T_DICT, /* data is of type key of dictionary entry */
+};
+
+/* The types of optional arguments to stored data */
+enum {
+ ARG_T_NONE = 0, /* data type takes no argument (default) */
+ ARG_T_INT, /* signed integer */
+ ARG_T_DELAY, /* a delay which supports time units */
+};
+
+/* They types of keys that servers can be identified by */
+enum {
+ STKTABLE_SRV_NAME = 0,
+ STKTABLE_SRV_ADDR,
+};
+
+/* stick table key type flags */
+#define STK_F_CUSTOM_KEYSIZE 0x00000001 /* this table's key size is configurable */
+
+/* WARNING: if new fields are added, they must be initialized in stream_accept()
+ * and freed in stream_free() !
+ *
+ * What's the purpose of there two macro:
+ * - STKCTR_TRACK_BACKEND indicates that a tracking pointer was set from the backend
+ * and thus that when a keep-alive request goes to another backend, the track
+ * must cease.
+ *
+ * - STKCTR_TRACK_CONTENT indicates that the tracking pointer was set in a
+ * content-aware rule (tcp-request content or http-request) and that the
+ * tracking has to be performed in the stream and not in the session, and
+ * will cease for a new keep-alive request over the same connection.
+ *
+ * These values are mixed with the stksess pointer in stkctr->entry.
+ */
+#define STKCTR_TRACK_BACKEND 1
+#define STKCTR_TRACK_CONTENT 2
+
+/* stick_table extra data. This is mainly used for casting or size computation */
+union stktable_data {
+ /* standard types for easy casting */
+ int std_t_sint;
+ unsigned int std_t_uint;
+ unsigned long long std_t_ull;
+ struct freq_ctr std_t_frqp;
+ struct dict_entry *std_t_dict;
+};
+
+/* known data types */
+struct stktable_data_type {
+ const char *name; /* name of the data type */
+ int std_type; /* standard type we can use for this data, STD_T_* */
+ int arg_type; /* type of optional argument, ARG_T_* */
+ uint is_array:1; /* this is an array of gpc/gpt */
+ uint is_local:1; /* this is local only and never learned */
+ uint as_is:1; /* cannot be processed / used with arithmetic operations */
+};
+
+/* stick table keyword type */
+struct stktable_type {
+ const char *kw; /* keyword string */
+ int flags; /* type flags */
+ size_t default_size; /* default key size */
+};
+
+/* Sticky session.
+ * Any additional data related to the stuck session is installed *before*
+ * stksess (with negative offsets). This allows us to run variable-sized
+ * keys and variable-sized data without making use of intermediate pointers.
+ */
+struct stksess {
+ unsigned int expire; /* session expiration date */
+ unsigned int ref_cnt; /* reference count, can only purge when zero */
+ __decl_thread(HA_RWLOCK_T lock); /* lock related to the table entry */
+ int shard; /* shard */
+ struct eb32_node exp; /* ebtree node used to hold the session in expiration tree */
+ struct eb32_node upd; /* ebtree node used to hold the update sequence tree */
+ struct ebmb_node key; /* ebtree node used to hold the session in table */
+ /* WARNING! do not put anything after <keys>, it's used by the key */
+};
+
+
+/* stick table */
+struct stktable {
+ char *id; /* local table id name. */
+ size_t idlen; /* local table id name length. */
+ char *nid; /* table id name sent over the network with peers protocol. */
+ struct stktable *next; /* The stick-table may be linked when belonging to
+ * the same configuration section.
+ */
+ struct ebpt_node name; /* Stick-table are lookup by name here. */
+ struct pool_head *pool; /* pool used to allocate sticky sessions */
+ struct task *exp_task; /* expiration task */
+ struct task *sync_task; /* sync task */
+
+ uint64_t hash_seed; /* hash seed used by shards */
+ union {
+ struct peers *p; /* sync peers */
+ char *name;
+ } peers;
+
+ unsigned long type; /* type of table (determines key format) */
+ size_t key_size; /* size of a key, maximum size in case of string */
+ unsigned int server_key_type; /* What type of key is used to identify servers */
+ unsigned int size; /* maximum number of sticky sessions in table */
+ int nopurge; /* if non-zero, don't purge sticky sessions when full */
+ int expire; /* time to live for sticky sessions (milliseconds) */
+ int data_size; /* the size of the data that is prepended *before* stksess */
+ int data_ofs[STKTABLE_DATA_TYPES]; /* negative offsets of present data types, or 0 if absent */
+ unsigned int data_nbelem[STKTABLE_DATA_TYPES]; /* to store nb_elem in case of array types */
+ union {
+ int i;
+ unsigned int u;
+ void *p;
+ } data_arg[STKTABLE_DATA_TYPES]; /* optional argument of each data type */
+ struct proxy *proxy; /* The proxy this stick-table is attached to, if any.*/
+ union {
+ char *name; /* preparsing hint */
+ struct stktable *t; /* postparsing */
+ void *ptr; /* generic ptr to check if set or not */
+ } write_to; /* updates received on the source table will also update write_to */
+
+ THREAD_ALIGN(64);
+
+ struct eb_root keys; /* head of sticky session tree */
+ struct eb_root exps; /* head of sticky session expiration tree */
+ unsigned int refcnt; /* number of local peer over all peers sections
+ attached to this table */
+ unsigned int current; /* number of sticky sessions currently in table */
+ __decl_thread(HA_RWLOCK_T lock); /* lock related to the table */
+
+ THREAD_ALIGN(64);
+
+ struct eb_root updates; /* head of sticky updates sequence tree, uses updt_lock */
+ unsigned int update; /* uses updt_lock */
+ unsigned int localupdate; /* uses updt_lock */
+ unsigned int commitupdate;/* used to identify the latest local updates pending for sync, uses updt_lock */
+
+ THREAD_ALIGN(64);
+ /* this lock is heavily used and must be on its own cache line */
+ __decl_thread(HA_RWLOCK_T updt_lock); /* lock protecting the updates part */
+
+ /* rarely used config stuff below (should not interfere with updt_lock) */
+ struct proxy *proxies_list; /* The list of proxies which reference this stick-table. */
+ struct {
+ const char *file; /* The file where the stick-table is declared. */
+ int line; /* The line in this <file> the stick-table is declared. */
+ } conf;
+};
+
+extern struct stktable_data_type stktable_data_types[STKTABLE_DATA_TYPES];
+
+/* stick table key */
+struct stktable_key {
+ void *key; /* pointer on key buffer */
+ size_t key_len; /* data len to read in buff in case of null terminated string */
+};
+
+/* stick counter. The <entry> member is a composite address (caddr) made of a
+ * pointer to an stksess struct, and two flags among STKCTR_TRACK_* above.
+ */
+struct stkctr {
+ unsigned long entry; /* entry containing counters currently being tracked by this stream */
+ struct stktable *table; /* table the counters above belong to (undefined if counters are null) */
+};
+
+/* parameters to configure tracked counters */
+struct track_ctr_prm {
+ struct sample_expr *expr; /* expression used as the key */
+ union {
+ struct stktable *t; /* a pointer to the table */
+ char *n; /* or its name during parsing. */
+ } table;
+};
+
+#endif /* _HAPROXY_STICK_TABLE_T_H */
diff --git a/include/haproxy/stick_table.h b/include/haproxy/stick_table.h
new file mode 100644
index 0000000..3200437
--- /dev/null
+++ b/include/haproxy/stick_table.h
@@ -0,0 +1,404 @@
+/*
+ * include/haproxy/stick_table.h
+ * Functions for stick tables management.
+ *
+ * Copyright (C) 2009-2010 EXCELIANCE, Emeric Brun <ebrun@exceliance.fr>
+ * Copyright (C) 2010 Willy Tarreau <w@1wt.eu>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_STICK_TABLE_H
+#define _HAPROXY_STICK_TABLE_H
+
+#include <haproxy/api.h>
+#include <haproxy/dict-t.h>
+#include <haproxy/errors.h>
+#include <haproxy/freq_ctr.h>
+#include <haproxy/sample-t.h>
+#include <haproxy/stick_table-t.h>
+#include <haproxy/ticks.h>
+
+extern struct stktable *stktables_list;
+extern struct pool_head *pool_head_stk_ctr;
+extern struct stktable_type stktable_types[];
+
+#define stktable_data_size(type) (sizeof(((union stktable_data*)0)->type))
+#define stktable_data_cast(ptr, type) ((union stktable_data*)(ptr))->type
+
+void stktable_store_name(struct stktable *t);
+struct stktable *stktable_find_by_name(const char *name);
+struct stksess *stksess_new(struct stktable *t, struct stktable_key *key);
+void stksess_setkey(struct stktable *t, struct stksess *ts, struct stktable_key *key);
+void stksess_free(struct stktable *t, struct stksess *ts);
+int stksess_kill(struct stktable *t, struct stksess *ts, int decrefcount);
+int stktable_get_key_shard(struct stktable *t, const void *key, size_t len);
+
+int stktable_init(struct stktable *t, char **err_msg);
+void stktable_deinit(struct stktable *t);
+int stktable_parse_type(char **args, int *idx, unsigned long *type, size_t *key_size, const char *file, int linenum);
+int parse_stick_table(const char *file, int linenum, char **args,
+ struct stktable *t, char *id, char *nid, struct peers *peers);
+struct stksess *stktable_get_entry(struct stktable *table, struct stktable_key *key);
+struct stksess *stktable_set_entry(struct stktable *table, struct stksess *nts);
+void stktable_requeue_exp(struct stktable *t, const struct stksess *ts);
+void stktable_touch_with_exp(struct stktable *t, struct stksess *ts, int decrefcount, int expire, int decrefcnt);
+void stktable_touch_remote(struct stktable *t, struct stksess *ts, int decrefcnt);
+void stktable_touch_local(struct stktable *t, struct stksess *ts, int decrefccount);
+struct stksess *stktable_lookup(struct stktable *t, struct stksess *ts);
+struct stksess *stktable_lookup_key(struct stktable *t, struct stktable_key *key);
+struct stksess *stktable_update_key(struct stktable *table, struct stktable_key *key);
+struct stktable_key *smp_to_stkey(struct sample *smp, struct stktable *t);
+struct stktable_key *stktable_fetch_key(struct stktable *t, struct proxy *px, struct session *sess,
+ struct stream *strm, unsigned int opt,
+ struct sample_expr *expr, struct sample *smp);
+struct stkctr *smp_fetch_sc_stkctr(struct session *sess, struct stream *strm, const struct arg *args, const char *kw, struct stkctr *stkctr);
+struct stkctr *smp_create_src_stkctr(struct session *sess, struct stream *strm, const struct arg *args, const char *kw, struct stkctr *stkctr);
+int stktable_compatible_sample(struct sample_expr *expr, unsigned long table_type);
+int stktable_register_data_store(int idx, const char *name, int std_type, int arg_type);
+int stktable_get_data_type(char *name);
+int stktable_trash_oldest(struct stktable *t, int to_batch);
+int __stksess_kill(struct stktable *t, struct stksess *ts);
+
+/************************* Composite address manipulation *********************
+ * Composite addresses are simply unsigned long data in which the higher bits
+ * represent a pointer, and the two lower bits are flags. There are several
+ * places where we just want to associate one or two flags to a pointer (eg,
+ * to type it), and these functions permit this. The pointer is necessarily a
+ * 32-bit aligned pointer, as its two lower bits will be cleared and replaced
+ * with the flags.
+ *****************************************************************************/
+
+/* Masks the two lower bits of a composite address and converts it to a
+ * pointer. This is used to mix some bits with some aligned pointers to
+ * structs and to retrieve the original (32-bit aligned) pointer.
+ */
+static inline void *caddr_to_ptr(unsigned long caddr)
+{
+ return (void *)(caddr & ~3UL);
+}
+
+/* Only retrieves the two lower bits of a composite address. This is used to mix
+ * some bits with some aligned pointers to structs and to retrieve the original
+ * data (2 bits).
+ */
+static inline unsigned int caddr_to_data(unsigned long caddr)
+{
+ return (caddr & 3UL);
+}
+
+/* Combines the aligned pointer whose 2 lower bits will be masked with the bits
+ * from <data> to form a composite address. This is used to mix some bits with
+ * some aligned pointers to structs and to retrieve the original (32-bit aligned)
+ * pointer.
+ */
+static inline unsigned long caddr_from_ptr(void *ptr, unsigned int data)
+{
+ return (((unsigned long)ptr) & ~3UL) + (data & 3);
+}
+
+/* sets the 2 bits of <data> in the <caddr> composite address */
+static inline unsigned long caddr_set_flags(unsigned long caddr, unsigned int data)
+{
+ return caddr | (data & 3);
+}
+
+/* clears the 2 bits of <data> in the <caddr> composite address */
+static inline unsigned long caddr_clr_flags(unsigned long caddr, unsigned int data)
+{
+ return caddr & ~(unsigned long)(data & 3);
+}
+
+
+/* return allocation size for standard data type <type> */
+static inline int stktable_type_size(int type)
+{
+ switch(type) {
+ case STD_T_SINT:
+ case STD_T_UINT:
+ return sizeof(int);
+ case STD_T_ULL:
+ return sizeof(unsigned long long);
+ case STD_T_FRQP:
+ return sizeof(struct freq_ctr);
+ case STD_T_DICT:
+ return sizeof(struct dict_entry *);
+ }
+ return 0;
+}
+
+int stktable_alloc_data_type(struct stktable *t, int type, const char *sa, const char *sa2);
+
+/* return pointer for data type <type> in sticky session <ts> of table <t>, all
+ * of which must exist (otherwise use stktable_data_ptr() if unsure).
+ */
+static inline void *__stktable_data_ptr(struct stktable *t, struct stksess *ts, int type)
+{
+ return (void *)ts + t->data_ofs[type];
+}
+
+/* return pointer for data type <type> in sticky session <ts> of table <t>, or
+ * NULL if either <ts> is NULL or the type is not stored.
+ */
+static inline void *stktable_data_ptr(struct stktable *t, struct stksess *ts, int type)
+{
+ if (type >= STKTABLE_DATA_TYPES)
+ return NULL;
+
+ if (!t->data_ofs[type]) /* type not stored */
+ return NULL;
+
+ if (!ts)
+ return NULL;
+
+ return __stktable_data_ptr(t, ts, type);
+}
+
+/* return pointer on the element of index <idx> from the array data type <type>
+ * in sticky session <ts> of table <t>, or NULL if either <ts> is NULL
+ * or this element is not stored because this type is not stored or
+ * requested index is greater than the number of elements of the array.
+ * Note: this function is also usable on non array types, they are
+ * considered as array of size 1, so a call with <idx> at 0
+ * as the same behavior than 'stktable_data_ptr'.
+ */
+static inline void *stktable_data_ptr_idx(struct stktable *t, struct stksess *ts, int type, unsigned int idx)
+{
+ if (type >= STKTABLE_DATA_TYPES)
+ return NULL;
+
+ if (!t->data_ofs[type]) /* type not stored */
+ return NULL;
+
+ if (!ts)
+ return NULL;
+
+ if (t->data_nbelem[type] <= idx)
+ return NULL;
+
+ return __stktable_data_ptr(t, ts, type) + idx*stktable_type_size(stktable_data_types[type].std_type);
+}
+
+/* kill an entry if it's expired and its ref_cnt is zero */
+static inline int __stksess_kill_if_expired(struct stktable *t, struct stksess *ts)
+{
+ if (t->expire != TICK_ETERNITY && tick_is_expired(ts->expire, now_ms))
+ return __stksess_kill(t, ts);
+
+ return 0;
+}
+
+static inline void stksess_kill_if_expired(struct stktable *t, struct stksess *ts, int decrefcnt)
+{
+
+ if (decrefcnt && HA_ATOMIC_SUB_FETCH(&ts->ref_cnt, 1) != 0)
+ return;
+
+ if (t->expire != TICK_ETERNITY && tick_is_expired(ts->expire, now_ms)) {
+ HA_RWLOCK_WRLOCK(STK_TABLE_LOCK, &t->lock);
+ __stksess_kill_if_expired(t, ts);
+ HA_RWLOCK_WRUNLOCK(STK_TABLE_LOCK, &t->lock);
+ }
+}
+
+/* sets the stick counter's entry pointer */
+static inline void stkctr_set_entry(struct stkctr *stkctr, struct stksess *entry)
+{
+ stkctr->entry = caddr_from_ptr(entry, 0);
+}
+
+/* returns the entry pointer from a stick counter */
+static inline struct stksess *stkctr_entry(struct stkctr *stkctr)
+{
+ return caddr_to_ptr(stkctr->entry);
+}
+
+/* returns the two flags from a stick counter */
+static inline unsigned int stkctr_flags(struct stkctr *stkctr)
+{
+ return caddr_to_data(stkctr->entry);
+}
+
+/* sets up to two flags at a time on a composite address */
+static inline void stkctr_set_flags(struct stkctr *stkctr, unsigned int flags)
+{
+ stkctr->entry = caddr_set_flags(stkctr->entry, flags);
+}
+
+/* returns the two flags from a stick counter */
+static inline void stkctr_clr_flags(struct stkctr *stkctr, unsigned int flags)
+{
+ stkctr->entry = caddr_clr_flags(stkctr->entry, flags);
+}
+
+/* Increase the number of cumulated HTTP requests in the tracked counter
+ * <stkctr>. It returns 0 if the entry pointer does not exist and nothing is
+ * performed. Otherwise it returns 1.
+ */
+static inline int stkctr_inc_http_req_ctr(struct stkctr *stkctr)
+{
+ struct stksess *ts;
+ void *ptr1, *ptr2;
+
+ ts = stkctr_entry(stkctr);
+ if (!ts)
+ return 0;
+
+ HA_RWLOCK_WRLOCK(STK_SESS_LOCK, &ts->lock);
+
+ ptr1 = stktable_data_ptr(stkctr->table, ts, STKTABLE_DT_HTTP_REQ_CNT);
+ if (ptr1)
+ stktable_data_cast(ptr1, std_t_uint)++;
+
+ ptr2 = stktable_data_ptr(stkctr->table, ts, STKTABLE_DT_HTTP_REQ_RATE);
+ if (ptr2)
+ update_freq_ctr_period(&stktable_data_cast(ptr2, std_t_frqp),
+ stkctr->table->data_arg[STKTABLE_DT_HTTP_REQ_RATE].u, 1);
+
+ HA_RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
+
+ /* If data was modified, we need to touch to re-schedule sync */
+ if (ptr1 || ptr2)
+ stktable_touch_local(stkctr->table, ts, 0);
+ return 1;
+}
+
+/* Increase the number of cumulated failed HTTP requests in the tracked counter
+ * <stkctr>. It returns 0 if the entry pointer does not exist and nothing is
+ * performed. Otherwise it returns 1.
+ */
+static inline int stkctr_inc_http_err_ctr(struct stkctr *stkctr)
+{
+ struct stksess *ts;
+ void *ptr1, *ptr2;
+
+ ts = stkctr_entry(stkctr);
+ if (!ts)
+ return 0;
+
+ HA_RWLOCK_WRLOCK(STK_SESS_LOCK, &ts->lock);
+
+ ptr1 = stktable_data_ptr(stkctr->table, ts, STKTABLE_DT_HTTP_ERR_CNT);
+ if (ptr1)
+ stktable_data_cast(ptr1, std_t_uint)++;
+
+ ptr2 = stktable_data_ptr(stkctr->table, ts, STKTABLE_DT_HTTP_ERR_RATE);
+ if (ptr2)
+ update_freq_ctr_period(&stktable_data_cast(ptr2, std_t_frqp),
+ stkctr->table->data_arg[STKTABLE_DT_HTTP_ERR_RATE].u, 1);
+
+ HA_RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
+
+ /* If data was modified, we need to touch to re-schedule sync */
+ if (ptr1 || ptr2)
+ stktable_touch_local(stkctr->table, ts, 0);
+ return 1;
+}
+
+/* Increase the number of cumulated failed HTTP responses in the tracked counter
+ * <stkctr>. It returns 0 if the entry pointer does not exist and nothing is
+ * performed. Otherwise it returns 1.
+ */
+static inline int stkctr_inc_http_fail_ctr(struct stkctr *stkctr)
+{
+ struct stksess *ts;
+ void *ptr1, *ptr2;
+
+ ts = stkctr_entry(stkctr);
+ if (!ts)
+ return 0;
+
+ HA_RWLOCK_WRLOCK(STK_SESS_LOCK, &ts->lock);
+
+ ptr1 = stktable_data_ptr(stkctr->table, ts, STKTABLE_DT_HTTP_FAIL_CNT);
+ if (ptr1)
+ stktable_data_cast(ptr1, std_t_uint)++;
+
+ ptr2 = stktable_data_ptr(stkctr->table, ts, STKTABLE_DT_HTTP_FAIL_RATE);
+ if (ptr2)
+ update_freq_ctr_period(&stktable_data_cast(ptr2, std_t_frqp),
+ stkctr->table->data_arg[STKTABLE_DT_HTTP_FAIL_RATE].u, 1);
+
+ HA_RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
+
+ /* If data was modified, we need to touch to re-schedule sync */
+ if (ptr1 || ptr2)
+ stktable_touch_local(stkctr->table, ts, 0);
+ return 1;
+}
+
+/* Increase the number of bytes received in the tracked counter <stkctr>. It
+ * returns 0 if the entry pointer does not exist and nothing is
+ * performed. Otherwise it returns 1.
+ */
+static inline int stkctr_inc_bytes_in_ctr(struct stkctr *stkctr, unsigned long long bytes)
+{
+ struct stksess *ts;
+ void *ptr1, *ptr2;
+
+ ts = stkctr_entry(stkctr);
+ if (!ts)
+ return 0;
+
+ HA_RWLOCK_WRLOCK(STK_SESS_LOCK, &ts->lock);
+ ptr1 = stktable_data_ptr(stkctr->table, ts, STKTABLE_DT_BYTES_IN_CNT);
+ if (ptr1)
+ stktable_data_cast(ptr1, std_t_ull) += bytes;
+
+ ptr2 = stktable_data_ptr(stkctr->table, ts, STKTABLE_DT_BYTES_IN_RATE);
+ if (ptr2)
+ update_freq_ctr_period(&stktable_data_cast(ptr2, std_t_frqp),
+ stkctr->table->data_arg[STKTABLE_DT_BYTES_IN_RATE].u, bytes);
+ HA_RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
+
+
+ /* If data was modified, we need to touch to re-schedule sync */
+ if (ptr1 || ptr2)
+ stktable_touch_local(stkctr->table, ts, 0);
+ return 1;
+}
+
+/* Increase the number of bytes sent in the tracked counter <stkctr>. It
+ * returns 0 if the entry pointer does not exist and nothing is
+ * performed. Otherwise it returns 1.
+ */
+static inline int stkctr_inc_bytes_out_ctr(struct stkctr *stkctr, unsigned long long bytes)
+{
+ struct stksess *ts;
+ void *ptr1, *ptr2;
+
+ ts = stkctr_entry(stkctr);
+ if (!ts)
+ return 0;
+
+ HA_RWLOCK_WRLOCK(STK_SESS_LOCK, &ts->lock);
+ ptr1 = stktable_data_ptr(stkctr->table, ts, STKTABLE_DT_BYTES_OUT_CNT);
+ if (ptr1)
+ stktable_data_cast(ptr1, std_t_ull) += bytes;
+
+ ptr2 = stktable_data_ptr(stkctr->table, ts, STKTABLE_DT_BYTES_OUT_RATE);
+ if (ptr2)
+ update_freq_ctr_period(&stktable_data_cast(ptr2, std_t_frqp),
+ stkctr->table->data_arg[STKTABLE_DT_BYTES_OUT_RATE].u, bytes);
+ HA_RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
+
+
+ /* If data was modified, we need to touch to re-schedule sync */
+ if (ptr1 || ptr2)
+ stktable_touch_local(stkctr->table, ts, 0);
+ return 1;
+}
+
+#endif /* _HAPROXY_STICK_TABLE_H */
diff --git a/include/haproxy/stream-t.h b/include/haproxy/stream-t.h
new file mode 100644
index 0000000..7e79b96
--- /dev/null
+++ b/include/haproxy/stream-t.h
@@ -0,0 +1,301 @@
+/*
+ * include/haproxy/stream-t.h
+ * This file defines everything related to streams.
+ *
+ * Copyright (C) 2000-2020 Willy Tarreau - w@1wt.eu
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_STREAM_T_H
+#define _HAPROXY_STREAM_T_H
+
+#include <sys/time.h>
+
+#include <haproxy/api-t.h>
+#include <haproxy/channel-t.h>
+#include <haproxy/stconn-t.h>
+#include <haproxy/dynbuf-t.h>
+#include <haproxy/filters-t.h>
+#include <haproxy/obj_type-t.h>
+#include <haproxy/show_flags-t.h>
+#include <haproxy/stick_table-t.h>
+#include <haproxy/vars-t.h>
+
+
+/* Various Stream Flags, bits values 0x01 to 0x100 (shift 0).
+ * Please also update the txn_show_flags() function below in case of changes.
+ */
+#define SF_DIRECT 0x00000001 /* connection made on the server matching the client cookie */
+#define SF_ASSIGNED 0x00000002 /* no need to assign a server to this stream */
+/* unused: 0x00000004 */
+#define SF_BE_ASSIGNED 0x00000008 /* a backend was assigned. Conns are accounted. */
+
+#define SF_FORCE_PRST 0x00000010 /* force persistence here, even if server is down */
+#define SF_MONITOR 0x00000020 /* this stream comes from a monitoring system */
+#define SF_CURR_SESS 0x00000040 /* a connection is currently being counted on the server */
+#define SF_CONN_EXP 0x00000080 /* timeout has expired */
+#define SF_REDISP 0x00000100 /* set if this stream was redispatched from one server to another */
+#define SF_IGNORE 0x00000200 /* The stream lead to a mux upgrade, and should be ignored */
+#define SF_REDIRECTABLE 0x00000400 /* set if this stream is redirectable (GET or HEAD) */
+#define SF_HTX 0x00000800 /* set if this stream is an htx stream */
+
+/* stream termination conditions, bits values 0x1000 to 0x7000 (0-9 shift 12) */
+#define SF_ERR_NONE 0x00000000 /* normal end of request */
+#define SF_ERR_LOCAL 0x00001000 /* the proxy locally processed this request => not an error */
+#define SF_ERR_CLITO 0x00002000 /* client time-out */
+#define SF_ERR_CLICL 0x00003000 /* client closed (read/write error) */
+#define SF_ERR_SRVTO 0x00004000 /* server time-out, connect time-out */
+#define SF_ERR_SRVCL 0x00005000 /* server closed (connect/read/write error) */
+#define SF_ERR_PRXCOND 0x00006000 /* the proxy decided to close (deny...) */
+#define SF_ERR_RESOURCE 0x00007000 /* the proxy encountered a lack of a local resources (fd, mem, ...) */
+#define SF_ERR_INTERNAL 0x00008000 /* the proxy encountered an internal error */
+#define SF_ERR_DOWN 0x00009000 /* the proxy killed a stream because the backend became unavailable */
+#define SF_ERR_KILLED 0x0000a000 /* the proxy killed a stream because it was asked to do so */
+#define SF_ERR_UP 0x0000b000 /* the proxy killed a stream because a preferred backend became available */
+#define SF_ERR_CHK_PORT 0x0000c000 /* no port could be found for a health check. TODO: check SF_ERR_SHIFT */
+#define SF_ERR_MASK 0x0000f000 /* mask to get only stream error flags */
+#define SF_ERR_SHIFT 12 /* bit shift */
+
+/* stream state at termination, bits values 0x10000 to 0x70000 (0-7 shift 16) */
+#define SF_FINST_R 0x00010000 /* stream ended during client request */
+#define SF_FINST_C 0x00020000 /* stream ended during server connect */
+#define SF_FINST_H 0x00030000 /* stream ended during server headers */
+#define SF_FINST_D 0x00040000 /* stream ended during data phase */
+#define SF_FINST_L 0x00050000 /* stream ended while pushing last data to client */
+#define SF_FINST_Q 0x00060000 /* stream ended while waiting in queue for a server slot */
+#define SF_FINST_T 0x00070000 /* stream ended tarpitted */
+#define SF_FINST_MASK 0x00070000 /* mask to get only final stream state flags */
+#define SF_FINST_SHIFT 16 /* bit shift */
+
+#define SF_IGNORE_PRST 0x00080000 /* ignore persistence */
+
+#define SF_SRV_REUSED 0x00100000 /* the server-side connection was reused */
+#define SF_SRV_REUSED_ANTICIPATED 0x00200000 /* the connection was reused but the mux is not ready yet */
+#define SF_WEBSOCKET 0x00400000 /* websocket stream */ // TODO: must be removed
+#define SF_SRC_ADDR 0x00800000 /* get the source ip/port with getsockname */
+
+/* This function is used to report flags in debugging tools. Please reflect
+ * below any single-bit flag addition above in the same order via the
+ * __APPEND_FLAG and __APPEND_ENUM macros. The new end of the buffer is
+ * returned.
+ */
+static forceinline char *strm_show_flags(char *buf, size_t len, const char *delim, uint flg)
+{
+#define _(f, ...) __APPEND_FLAG(buf, len, delim, flg, f, #f, __VA_ARGS__)
+#define _e(m, e, ...) __APPEND_ENUM(buf, len, delim, flg, m, e, #e, __VA_ARGS__)
+ /* prologue */
+ _(0);
+ /* flags & enums */
+ _(SF_IGNORE_PRST, _(SF_SRV_REUSED, _(SF_SRV_REUSED_ANTICIPATED,
+ _(SF_WEBSOCKET, _(SF_SRC_ADDR)))));
+
+ _e(SF_FINST_MASK, SF_FINST_R, _e(SF_FINST_MASK, SF_FINST_C,
+ _e(SF_FINST_MASK, SF_FINST_H, _e(SF_FINST_MASK, SF_FINST_D,
+ _e(SF_FINST_MASK, SF_FINST_L, _e(SF_FINST_MASK, SF_FINST_Q,
+ _e(SF_FINST_MASK, SF_FINST_T)))))));
+
+ _e(SF_ERR_MASK, SF_ERR_LOCAL, _e(SF_ERR_MASK, SF_ERR_CLITO,
+ _e(SF_ERR_MASK, SF_ERR_CLICL, _e(SF_ERR_MASK, SF_ERR_SRVTO,
+ _e(SF_ERR_MASK, SF_ERR_SRVCL, _e(SF_ERR_MASK, SF_ERR_PRXCOND,
+ _e(SF_ERR_MASK, SF_ERR_RESOURCE, _e(SF_ERR_MASK, SF_ERR_INTERNAL,
+ _e(SF_ERR_MASK, SF_ERR_DOWN, _e(SF_ERR_MASK, SF_ERR_KILLED,
+ _e(SF_ERR_MASK, SF_ERR_UP, _e(SF_ERR_MASK, SF_ERR_CHK_PORT))))))))))));
+
+ _(SF_DIRECT, _(SF_ASSIGNED, _(SF_BE_ASSIGNED, _(SF_FORCE_PRST,
+ _(SF_MONITOR, _(SF_CURR_SESS, _(SF_CONN_EXP, _(SF_REDISP,
+ _(SF_IGNORE, _(SF_REDIRECTABLE, _(SF_HTX)))))))))));
+
+ /* epilogue */
+ _(~0U);
+ return buf;
+#undef _e
+#undef _
+}
+
+
+/* flags for the proxy of the master CLI */
+/* 0x0001.. to 0x8000 are reserved for ACCESS_* flags from cli-t.h */
+
+#define PCLI_F_PROMPT 0x10000
+#define PCLI_F_PAYLOAD 0x20000
+#define PCLI_F_RELOAD 0x40000 /* this is the "reload" stream, quits after displaying reload status */
+#define PCLI_F_TIMED 0x80000 /* the prompt shows the process' uptime */
+
+
+/* error types reported on the streams for more accurate reporting.
+ * Please also update the strm_et_show_flags() function below in case of changes.
+ */
+enum {
+ STRM_ET_NONE = 0x0000, /* no error yet, leave it to zero */
+ STRM_ET_QUEUE_TO = 0x0001, /* queue timeout */
+ STRM_ET_QUEUE_ERR = 0x0002, /* queue error (eg: full) */
+ STRM_ET_QUEUE_ABRT = 0x0004, /* aborted in queue by external cause */
+ STRM_ET_CONN_TO = 0x0008, /* connection timeout */
+ STRM_ET_CONN_ERR = 0x0010, /* connection error (eg: no server available) */
+ STRM_ET_CONN_ABRT = 0x0020, /* connection aborted by external cause (eg: abort) */
+ STRM_ET_CONN_RES = 0x0040, /* connection aborted due to lack of resources */
+ STRM_ET_CONN_OTHER = 0x0080, /* connection aborted for other reason (eg: 500) */
+ STRM_ET_DATA_TO = 0x0100, /* timeout during data phase */
+ STRM_ET_DATA_ERR = 0x0200, /* error during data phase */
+ STRM_ET_DATA_ABRT = 0x0400, /* data phase aborted by external cause */
+};
+
+/* This function is used to report flags in debugging tools. Please reflect
+ * below any single-bit flag addition above in the same order via the
+ * __APPEND_FLAG macro. The new end of the buffer is returned.
+ */
+static forceinline char *strm_et_show_flags(char *buf, size_t len, const char *delim, uint flg)
+{
+#define _(f, ...) __APPEND_FLAG(buf, len, delim, flg, f, #f, __VA_ARGS__)
+ /* prologue */
+ _(0);
+ /* flags */
+ _(STRM_ET_QUEUE_TO, _(STRM_ET_QUEUE_ERR, _(STRM_ET_QUEUE_ABRT,
+ _(STRM_ET_CONN_TO, _(STRM_ET_CONN_ERR, _(STRM_ET_CONN_ABRT,
+ _(STRM_ET_CONN_RES, _(STRM_ET_CONN_OTHER, _(STRM_ET_DATA_TO,
+ _(STRM_ET_DATA_ERR, _(STRM_ET_DATA_ABRT)))))))))));
+ /* epilogue */
+ _(~0U);
+ return buf;
+#undef _
+}
+
+struct hlua;
+struct proxy;
+struct pendconn;
+struct session;
+struct server;
+struct task;
+struct sockaddr_storage;
+
+/* some external definitions */
+struct strm_logs {
+ int logwait; /* log fields waiting to be collected : LW_* */
+ int level; /* log level to force + 1 if > 0, -1 = no log */
+ struct timeval accept_date; /* date of the stream's accept() in user date */
+ ullong accept_ts; /* date of the session's accept() in internal date (monotonic) */
+ long t_handshake; /* handshake duration, -1 if never occurs */
+ long t_idle; /* idle duration, -1 if never occurs */
+ ullong request_ts; /* date when the request arrives in internal date */
+ long t_queue; /* delay before the stream gets out of the connect queue, -1 if never occurs */
+ long t_connect; /* delay before the connect() to the server succeeds, -1 if never occurs */
+ long t_data; /* delay before the first data byte from the server ... */
+ unsigned long t_close; /* total stream duration */
+ unsigned long srv_queue_pos; /* number of streams de-queued while waiting for a connection slot on this server */
+ unsigned long prx_queue_pos; /* number of streams de-qeuued while waiting for a connection slot on this instance */
+ long long bytes_in; /* number of bytes transferred from the client to the server */
+ long long bytes_out; /* number of bytes transferred from the server to the client */
+};
+
+struct stream {
+ enum obj_type obj_type; /* object type == OBJ_TYPE_STREAM */
+ enum sc_state prev_conn_state; /* CS_ST*, copy of previous state of the server stream connector */
+
+ int16_t priority_class; /* priority class of the stream for the pending queue */
+ int32_t priority_offset; /* priority offset of the stream for the pending queue */
+
+ int flags; /* some flags describing the stream */
+ unsigned int uniq_id; /* unique ID used for the traces */
+ enum obj_type *target; /* target to use for this stream */
+
+ struct session *sess; /* the session this stream is attached to */
+
+ struct channel req; /* request channel */
+ struct channel res; /* response channel */
+
+ struct proxy *be; /* the proxy this stream depends on for the server side */
+
+ struct server *srv_conn; /* stream already has a slot on a server and is not in queue */
+ struct pendconn *pend_pos; /* if not NULL, points to the pending position in the pending queue */
+
+ struct http_txn *txn; /* current HTTP transaction being processed. Should become a list. */
+
+ struct task *task; /* the task associated with this stream */
+ unsigned int pending_events; /* the pending events not yet processed by the stream.
+ * This is a bit field of TASK_WOKEN_* */
+ int conn_retries; /* number of connect retries performed */
+ unsigned int conn_exp; /* wake up time for connect, queue, turn-around, ... */
+ unsigned int conn_err_type; /* first error detected, one of STRM_ET_* */
+ struct list list; /* position in the thread's streams list */
+ struct mt_list by_srv; /* position in server stream list */
+ struct list back_refs; /* list of users tracking this stream */
+ struct buffer_wait buffer_wait; /* position in the list of objects waiting for a buffer */
+
+ uint64_t lat_time; /* total latency time experienced */
+ uint64_t cpu_time; /* total CPU time consumed */
+ struct freq_ctr call_rate; /* stream task call rate without making progress */
+
+ short store_count;
+ /* 2 unused bytes here */
+
+ struct {
+ struct stksess *ts;
+ struct stktable *table;
+ } store[8]; /* tracked stickiness values to store */
+
+ struct stkctr *stkctr; /* content-aware stick counters */
+
+ struct strm_flt strm_flt; /* current state of filters active on this stream */
+
+ char **req_cap; /* array of captures from the request (may be NULL) */
+ char **res_cap; /* array of captures from the response (may be NULL) */
+ struct vars vars_txn; /* list of variables for the txn scope. */
+ struct vars vars_reqres; /* list of variables for the request and resp scope. */
+
+ struct stconn *scf; /* frontend stream connector */
+ struct stconn *scb; /* backend stream connector */
+
+ struct strm_logs logs; /* logs for this stream */
+
+ void (*do_log)(struct stream *s); /* the function to call in order to log (or NULL) */
+ void (*srv_error)(struct stream *s, /* the function to call upon unrecoverable server errors (or NULL) */
+ struct stconn *sc);
+
+ int pcli_next_pid; /* next target PID to use for the CLI proxy */
+ int pcli_flags; /* flags for CLI proxy */
+ char pcli_payload_pat[8]; /* payload pattern for the CLI proxy */
+
+ struct ist unique_id; /* custom unique ID */
+
+ /* These two pointers are used to resume the execution of the rule lists. */
+ struct list *current_rule_list; /* this is used to store the current executed rule list. */
+ void *current_rule; /* this is used to store the current rule to be resumed. */
+ int rules_exp; /* expiration date for current rules execution */
+ int tunnel_timeout;
+ const char *last_rule_file; /* last evaluated final rule's file (def: NULL) */
+ int last_rule_line; /* last evaluated final rule's line (def: 0) */
+
+ unsigned int stream_epoch; /* copy of stream_epoch when the stream was created */
+ struct hlua *hlua; /* lua runtime context */
+
+ /* Context */
+ struct {
+ struct resolv_requester *requester; /* owner of the resolution */
+ struct act_rule *parent; /* rule which requested this resolution */
+ char *hostname_dn; /* hostname being resolve, in domain name format */
+ int hostname_dn_len; /* size of hostname_dn */
+ /* 4 unused bytes here, recoverable via packing if needed */
+ } resolv_ctx; /* context information for DNS resolution */
+};
+
+#endif /* _HAPROXY_STREAM_T_H */
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/include/haproxy/stream.h b/include/haproxy/stream.h
new file mode 100644
index 0000000..a884007
--- /dev/null
+++ b/include/haproxy/stream.h
@@ -0,0 +1,404 @@
+/*
+ * include/haproxy/stream.h
+ * This file defines everything related to streams.
+ *
+ * Copyright (C) 2000-2020 Willy Tarreau - w@1wt.eu
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_STREAM_H
+#define _HAPROXY_STREAM_H
+
+#include <haproxy/action-t.h>
+#include <haproxy/api.h>
+#include <haproxy/fd.h>
+#include <haproxy/freq_ctr.h>
+#include <haproxy/obj_type.h>
+#include <haproxy/pool-t.h>
+#include <haproxy/queue.h>
+#include <haproxy/session.h>
+#include <haproxy/stconn.h>
+#include <haproxy/stick_table.h>
+#include <haproxy/stream-t.h>
+#include <haproxy/task-t.h>
+#include <haproxy/trace-t.h>
+
+extern struct trace_source trace_strm;
+
+/* Details about these events are defined in <src/stream.c> */
+#define STRM_EV_STRM_NEW (1ULL << 0)
+#define STRM_EV_STRM_FREE (1ULL << 1)
+#define STRM_EV_STRM_ERR (1ULL << 2)
+#define STRM_EV_STRM_ANA (1ULL << 3)
+#define STRM_EV_STRM_PROC (1ULL << 4)
+#define STRM_EV_CS_ST (1ULL << 5)
+#define STRM_EV_HTTP_ANA (1ULL << 6)
+#define STRM_EV_HTTP_ERR (1ULL << 7)
+#define STRM_EV_TCP_ANA (1ULL << 8)
+#define STRM_EV_TCP_ERR (1ULL << 9)
+#define STRM_EV_FLT_ANA (1ULL << 10)
+#define STRM_EV_FLT_ERR (1ULL << 11)
+
+#define IS_HTX_STRM(strm) ((strm)->flags & SF_HTX)
+
+extern struct pool_head *pool_head_stream;
+extern struct pool_head *pool_head_uniqueid;
+
+extern struct data_cb sess_conn_cb;
+
+struct stream *stream_new(struct session *sess, struct stconn *sc, struct buffer *input);
+void stream_free(struct stream *s);
+int stream_upgrade_from_sc(struct stconn *sc, struct buffer *input);
+int stream_set_http_mode(struct stream *s, const struct mux_proto_list *mux_proto);
+
+/* kill a stream and set the termination flags to <why> (one of SF_ERR_*) */
+void stream_shutdown(struct stream *stream, int why);
+void stream_dump_and_crash(enum obj_type *obj, int rate);
+void strm_dump_to_buffer(struct buffer *buf, const struct stream *strm, const char *pfx, uint32_t anon_key);
+
+struct ist stream_generate_unique_id(struct stream *strm, struct list *format);
+
+void stream_process_counters(struct stream *s);
+void sess_change_server(struct stream *strm, struct server *newsrv);
+struct task *process_stream(struct task *t, void *context, unsigned int state);
+void default_srv_error(struct stream *s, struct stconn *sc);
+
+/* Update the stream's backend and server time stats */
+void stream_update_time_stats(struct stream *s);
+void stream_release_buffers(struct stream *s);
+int stream_buf_available(void *arg);
+
+/* returns the session this stream belongs to */
+static inline struct session *strm_sess(const struct stream *strm)
+{
+ return strm->sess;
+}
+
+/* returns the frontend this stream was initiated from */
+static inline struct proxy *strm_fe(const struct stream *strm)
+{
+ return strm->sess->fe;
+}
+
+/* returns the listener this stream was initiated from */
+static inline struct listener *strm_li(const struct stream *strm)
+{
+ return strm->sess->listener;
+}
+
+/* returns a pointer to the origin of the session which created this stream */
+static inline enum obj_type *strm_orig(const struct stream *strm)
+{
+ return strm->sess->origin;
+}
+
+/* Remove the refcount from the stream to the tracked counters, and clear the
+ * pointer to ensure this is only performed once. The caller is responsible for
+ * ensuring that the pointer is valid first. We must be extremely careful not
+ * to touch the entries we inherited from the session.
+ */
+static inline void stream_store_counters(struct stream *s)
+{
+ void *ptr;
+ int i;
+ struct stksess *ts;
+
+ if (unlikely(!s->stkctr)) // pool not allocated yet
+ return;
+
+ for (i = 0; i < global.tune.nb_stk_ctr; i++) {
+ ts = stkctr_entry(&s->stkctr[i]);
+ if (!ts)
+ continue;
+
+ if (stkctr_entry(&s->sess->stkctr[i]))
+ continue;
+
+ ptr = stktable_data_ptr(s->stkctr[i].table, ts, STKTABLE_DT_CONN_CUR);
+ if (ptr) {
+ HA_RWLOCK_WRLOCK(STK_SESS_LOCK, &ts->lock);
+
+ if (stktable_data_cast(ptr, std_t_uint) > 0)
+ stktable_data_cast(ptr, std_t_uint)--;
+
+ HA_RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
+
+ /* If data was modified, we need to touch to re-schedule sync */
+ stktable_touch_local(s->stkctr[i].table, ts, 0);
+ }
+ stkctr_set_entry(&s->stkctr[i], NULL);
+ stksess_kill_if_expired(s->stkctr[i].table, ts, 1);
+ }
+}
+
+/* Remove the refcount from the stream counters tracked at the content level if
+ * any, and clear the pointer to ensure this is only performed once. The caller
+ * is responsible for ensuring that the pointer is valid first. We must be
+ * extremely careful not to touch the entries we inherited from the session.
+ */
+static inline void stream_stop_content_counters(struct stream *s)
+{
+ struct stksess *ts;
+ void *ptr;
+ int i;
+
+ if (unlikely(!s->stkctr)) // pool not allocated yet
+ return;
+
+ for (i = 0; i < global.tune.nb_stk_ctr; i++) {
+ ts = stkctr_entry(&s->stkctr[i]);
+ if (!ts)
+ continue;
+
+ if (stkctr_entry(&s->sess->stkctr[i]))
+ continue;
+
+ if (!(stkctr_flags(&s->stkctr[i]) & STKCTR_TRACK_CONTENT))
+ continue;
+
+ ptr = stktable_data_ptr(s->stkctr[i].table, ts, STKTABLE_DT_CONN_CUR);
+ if (ptr) {
+ HA_RWLOCK_WRLOCK(STK_SESS_LOCK, &ts->lock);
+
+ if (stktable_data_cast(ptr, std_t_uint) > 0)
+ stktable_data_cast(ptr, std_t_uint)--;
+
+ HA_RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
+
+ /* If data was modified, we need to touch to re-schedule sync */
+ stktable_touch_local(s->stkctr[i].table, ts, 0);
+ }
+ stkctr_set_entry(&s->stkctr[i], NULL);
+ stksess_kill_if_expired(s->stkctr[i].table, ts, 1);
+ }
+}
+
+/* Increase total and concurrent connection count for stick entry <ts> of table
+ * <t>. The caller is responsible for ensuring that <t> and <ts> are valid
+ * pointers, and for calling this only once per connection.
+ */
+static inline void stream_start_counters(struct stktable *t, struct stksess *ts)
+{
+ void *ptr;
+
+ HA_RWLOCK_WRLOCK(STK_SESS_LOCK, &ts->lock);
+
+ ptr = stktable_data_ptr(t, ts, STKTABLE_DT_CONN_CUR);
+ if (ptr)
+ stktable_data_cast(ptr, std_t_uint)++;
+
+ ptr = stktable_data_ptr(t, ts, STKTABLE_DT_CONN_CNT);
+ if (ptr)
+ stktable_data_cast(ptr, std_t_uint)++;
+
+ ptr = stktable_data_ptr(t, ts, STKTABLE_DT_CONN_RATE);
+ if (ptr)
+ update_freq_ctr_period(&stktable_data_cast(ptr, std_t_frqp),
+ t->data_arg[STKTABLE_DT_CONN_RATE].u, 1);
+ if (tick_isset(t->expire))
+ ts->expire = tick_add(now_ms, MS_TO_TICKS(t->expire));
+
+ HA_RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
+
+ /* If data was modified, we need to touch to re-schedule sync */
+ stktable_touch_local(t, ts, 0);
+}
+
+/* Enable tracking of stream counters as <stkctr> on stksess <ts>. The caller is
+ * responsible for ensuring that <t> and <ts> are valid pointers. Some controls
+ * are performed to ensure the state can still change.
+ */
+static inline void stream_track_stkctr(struct stkctr *ctr, struct stktable *t, struct stksess *ts)
+{
+ /* Why this test ???? */
+ if (stkctr_entry(ctr))
+ return;
+
+ ctr->table = t;
+ stkctr_set_entry(ctr, ts);
+ stream_start_counters(t, ts);
+}
+
+/* Increase the number of cumulated HTTP requests in the tracked counters */
+static inline void stream_inc_http_req_ctr(struct stream *s)
+{
+ int i;
+
+ if (unlikely(!s->stkctr)) // pool not allocated yet
+ return;
+
+ for (i = 0; i < global.tune.nb_stk_ctr; i++) {
+ if (!stkctr_inc_http_req_ctr(&s->stkctr[i]))
+ stkctr_inc_http_req_ctr(&s->sess->stkctr[i]);
+ }
+}
+
+/* Increase the number of cumulated HTTP requests in the backend's tracked
+ * counters. We don't look up the session since it cannot happen in the backend.
+ */
+static inline void stream_inc_be_http_req_ctr(struct stream *s)
+{
+ int i;
+
+ if (unlikely(!s->stkctr)) // pool not allocated yet
+ return;
+
+ for (i = 0; i < global.tune.nb_stk_ctr; i++) {
+ if (!stkctr_entry(&s->stkctr[i]) || !(stkctr_flags(&s->stkctr[i]) & STKCTR_TRACK_BACKEND))
+ continue;
+
+ stkctr_inc_http_req_ctr(&s->stkctr[i]);
+ }
+}
+
+/* Increase the number of cumulated failed HTTP requests in the tracked
+ * counters. Only 4xx requests should be counted here so that we can
+ * distinguish between errors caused by client behaviour and other ones.
+ * Note that even 404 are interesting because they're generally caused by
+ * vulnerability scans.
+ */
+static inline void stream_inc_http_err_ctr(struct stream *s)
+{
+ int i;
+
+ if (unlikely(!s->stkctr)) // pool not allocated yet
+ return;
+
+ for (i = 0; i < global.tune.nb_stk_ctr; i++) {
+ if (!stkctr_inc_http_err_ctr(&s->stkctr[i]))
+ stkctr_inc_http_err_ctr(&s->sess->stkctr[i]);
+ }
+}
+
+/* Increase the number of cumulated failed HTTP responses in the tracked
+ * counters. Only some 5xx responses should be counted here so that we can
+ * distinguish between server failures and errors triggered by the client
+ * (i.e. 501 and 505 may be triggered and must be ignored).
+ */
+static inline void stream_inc_http_fail_ctr(struct stream *s)
+{
+ int i;
+
+ if (unlikely(!s->stkctr)) // pool not allocated yet
+ return;
+
+ for (i = 0; i < global.tune.nb_stk_ctr; i++) {
+ if (!stkctr_inc_http_fail_ctr(&s->stkctr[i]))
+ stkctr_inc_http_fail_ctr(&s->sess->stkctr[i]);
+ }
+}
+
+static inline void stream_add_srv_conn(struct stream *strm, struct server *srv)
+{
+ /* note: this inserts in reverse order but we do not care, it's only
+ * used for massive kills (i.e. almost never). MT_LIST_INSERT() is a bit
+ * faster than MT_LIST_APPEND under contention due to a faster recovery
+ * from a conflict with an adjacent MT_LIST_DELETE, and using it improves
+ * the performance by about 3% on 32-cores.
+ */
+ MT_LIST_INSERT(&srv->per_thr[tid].streams, &strm->by_srv);
+ HA_ATOMIC_STORE(&strm->srv_conn, srv);
+}
+
+static inline void stream_del_srv_conn(struct stream *strm)
+{
+ struct server *srv = strm->srv_conn;
+
+ if (!srv)
+ return;
+
+ MT_LIST_DELETE(&strm->by_srv);
+ HA_ATOMIC_STORE(&strm->srv_conn, NULL);
+}
+
+static inline void stream_init_srv_conn(struct stream *strm)
+{
+ strm->srv_conn = NULL;
+ MT_LIST_INIT(&strm->by_srv);
+}
+
+static inline void stream_choose_redispatch(struct stream *s)
+{
+ /* If the "redispatch" option is set on the backend, we are allowed to
+ * retry on another server. By default this redispatch occurs on the
+ * last retry, but if configured we allow redispatches to occur on
+ * configurable intervals, e.g. on every retry. In order to achieve this,
+ * we must mark the stream unassigned, and eventually clear the DIRECT
+ * bit to ignore any persistence cookie. We won't count a retry nor a
+ * redispatch yet, because this will depend on what server is selected.
+ * If the connection is not persistent, the balancing algorithm is not
+ * determinist (round robin) and there is more than one active server,
+ * we accept to perform an immediate redispatch without waiting since
+ * we don't care about this particular server.
+ */
+ if (objt_server(s->target) &&
+ (s->be->options & PR_O_REDISP) && !(s->flags & SF_FORCE_PRST) &&
+ ((__objt_server(s->target)->cur_state < SRV_ST_RUNNING) ||
+ (((s->be->redispatch_after > 0) &&
+ (s->conn_retries % s->be->redispatch_after == 0)) ||
+ ((s->be->redispatch_after < 0) &&
+ (s->conn_retries % (s->be->conn_retries + 1 + s->be->redispatch_after) == 0))) ||
+ (!(s->flags & SF_DIRECT) && s->be->srv_act > 1 &&
+ ((s->be->lbprm.algo & BE_LB_KIND) != BE_LB_KIND_HI)))) {
+ sess_change_server(s, NULL);
+ if (may_dequeue_tasks(objt_server(s->target), s->be))
+ process_srv_queue(objt_server(s->target));
+
+ sockaddr_free(&s->scb->dst);
+ s->flags &= ~(SF_DIRECT | SF_ASSIGNED);
+ s->scb->state = SC_ST_REQ;
+ } else {
+ if (objt_server(s->target))
+ _HA_ATOMIC_INC(&__objt_server(s->target)->counters.retries);
+ _HA_ATOMIC_INC(&s->be->be_counters.retries);
+ s->scb->state = SC_ST_ASS;
+ }
+
+}
+
+/*
+ * This function only has to be called once after a wakeup event in case of
+ * suspected timeout. It controls the stream connection timeout and sets
+ * si->flags accordingly. It does NOT close anything, as this timeout may
+ * be used for any purpose. It returns 1 if the timeout fired, otherwise
+ * zero.
+ */
+static inline int stream_check_conn_timeout(struct stream *s)
+{
+ if (tick_is_expired(s->conn_exp, now_ms)) {
+ s->flags |= SF_CONN_EXP;
+ return 1;
+ }
+ return 0;
+}
+
+int stream_set_timeout(struct stream *s, enum act_timeout_name name, int timeout);
+void stream_retnclose(struct stream *s, const struct buffer *msg);
+void sess_set_term_flags(struct stream *s);
+void stream_abort(struct stream *s);
+
+void service_keywords_register(struct action_kw_list *kw_list);
+struct action_kw *service_find(const char *kw);
+void list_services(FILE *out);
+
+#endif /* _HAPROXY_STREAM_H */
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/include/haproxy/task-t.h b/include/haproxy/task-t.h
new file mode 100644
index 0000000..ea52de9
--- /dev/null
+++ b/include/haproxy/task-t.h
@@ -0,0 +1,182 @@
+/*
+ * include/haproxy/task-t.h
+ * Macros, variables and structures for task management.
+ *
+ * Copyright (C) 2000-2010 Willy Tarreau - w@1wt.eu
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_TASK_T_H
+#define _HAPROXY_TASK_T_H
+
+#include <sys/time.h>
+
+#include <import/ebtree-t.h>
+
+#include <haproxy/api-t.h>
+#include <haproxy/show_flags-t.h>
+#include <haproxy/thread-t.h>
+
+/* values for task->state (32 bits).
+ * Please also update the task_show_state() function below in case of changes.
+ */
+#define TASK_SLEEPING 0x00000000 /* task sleeping */
+#define TASK_RUNNING 0x00000001 /* the task is currently running */
+/* unused 0x00000002 */
+#define TASK_QUEUED 0x00000004 /* The task has been (re-)added to the run queue */
+/* unused 0x00000008 */
+#define TASK_SELF_WAKING 0x00000010 /* task/tasklet found waking itself */
+#define TASK_KILLED 0x00000020 /* task/tasklet killed, may now be freed */
+#define TASK_IN_LIST 0x00000040 /* tasklet is in a tasklet list */
+#define TASK_HEAVY 0x00000080 /* this task/tasklet is extremely heavy */
+
+#define TASK_WOKEN_INIT 0x00000100 /* woken up for initialisation purposes */
+#define TASK_WOKEN_TIMER 0x00000200 /* woken up because of expired timer */
+#define TASK_WOKEN_IO 0x00000400 /* woken up because of completed I/O */
+#define TASK_WOKEN_SIGNAL 0x00000800 /* woken up by a system signal */
+#define TASK_WOKEN_MSG 0x00001000 /* woken up by another task's message */
+#define TASK_WOKEN_RES 0x00002000 /* woken up because of available resource */
+#define TASK_WOKEN_OTHER 0x00004000 /* woken up for an unspecified reason */
+
+/* use this to check a task state or to clean it up before queueing */
+#define TASK_WOKEN_ANY (TASK_WOKEN_OTHER|TASK_WOKEN_INIT|TASK_WOKEN_TIMER| \
+ TASK_WOKEN_IO|TASK_WOKEN_SIGNAL|TASK_WOKEN_MSG| \
+ TASK_WOKEN_RES)
+
+#define TASK_F_TASKLET 0x00008000 /* nature of this task: 0=task 1=tasklet */
+#define TASK_F_USR1 0x00010000 /* preserved user flag 1, application-specific, def:0 */
+/* unused: 0x20000..0x80000000 */
+
+/* These flags are persistent across scheduler calls */
+#define TASK_PERSISTENT (TASK_SELF_WAKING | TASK_KILLED | \
+ TASK_HEAVY | TASK_F_TASKLET | TASK_F_USR1)
+
+/* This function is used to report state in debugging tools. Please reflect
+ * below any single-bit flag addition above in the same order via the
+ * __APPEND_FLAG macro. The new end of the buffer is returned.
+ */
+static forceinline char *task_show_state(char *buf, size_t len, const char *delim, uint flg)
+{
+#define _(f, ...) __APPEND_FLAG(buf, len, delim, flg, f, #f, __VA_ARGS__)
+ /* prologue */
+ _(0);
+ /* flags */
+ _(TASK_RUNNING, _(TASK_QUEUED, _(TASK_SELF_WAKING,
+ _(TASK_KILLED, _(TASK_IN_LIST, _(TASK_HEAVY, _(TASK_WOKEN_INIT,
+ _(TASK_WOKEN_TIMER, _(TASK_WOKEN_IO, _(TASK_WOKEN_SIGNAL,
+ _(TASK_WOKEN_MSG, _(TASK_WOKEN_RES, _(TASK_WOKEN_OTHER,
+ _(TASK_F_TASKLET, _(TASK_F_USR1)))))))))))))));
+ /* epilogue */
+ _(~0U);
+ return buf;
+#undef _
+}
+
+/* these wakeup types are used to indicate how a task/tasklet was woken up, for
+ * debugging purposes.
+ */
+enum {
+ WAKEUP_TYPE_UNSET = 0,
+ WAKEUP_TYPE_TASK_WAKEUP,
+ WAKEUP_TYPE_TASK_INSTANT_WAKEUP,
+ WAKEUP_TYPE_TASKLET_WAKEUP,
+ WAKEUP_TYPE_TASKLET_WAKEUP_AFTER,
+ WAKEUP_TYPE_TASK_SCHEDULE,
+ WAKEUP_TYPE_TASK_QUEUE,
+ WAKEUP_TYPE_APPCTX_WAKEUP,
+};
+
+struct notification {
+ struct list purge_me; /* Part of the list of signals to be purged in the
+ case of the LUA execution stack crash. */
+ struct list wake_me; /* Part of list of signals to be targeted if an
+ event occurs. */
+ struct task *task; /* The task to be wake if an event occurs. */
+ __decl_thread(HA_SPINLOCK_T lock);
+};
+
+#ifdef DEBUG_TASK
+/* prev_caller keeps a copy of the previous value of the <caller> field. */
+#define TASK_DEBUG_STORAGE \
+ struct { \
+ const struct ha_caller *prev_caller; \
+ } debug
+#else
+#define TASK_DEBUG_STORAGE
+#endif
+
+/* This part is common between struct task and struct tasklet so that tasks
+ * can be used as-is as tasklets.
+ *
+ * Note that the process() function must ALWAYS return the task/tasklet's
+ * pointer if the task/tasklet remains valid, and return NULL if it has been
+ * deleted. The scheduler relies on this to know if it should update its state
+ * on return.
+ */
+#define TASK_COMMON \
+ struct { \
+ unsigned int state; /* task state : bitfield of TASK_ */ \
+ int tid; /* tid of task/tasklet. <0 = local for tasklet, unbound for task */ \
+ struct task *(*process)(struct task *t, void *ctx, unsigned int state); /* the function which processes the task */ \
+ void *context; /* the task's context */ \
+ const struct ha_caller *caller; /* call place of last wakeup(); 0 on init, -1 on free */ \
+ uint32_t wake_date; /* date of the last task wakeup */ \
+ unsigned int calls; /* number of times process was called */ \
+ TASK_DEBUG_STORAGE; \
+ }
+
+/* The base for all tasks */
+struct task {
+ TASK_COMMON; /* must be at the beginning! */
+ struct eb32_node rq; /* ebtree node used to hold the task in the run queue */
+ /* WARNING: the struct task is often aliased as a struct tasklet when
+ * it is NOT in the run queue. The tasklet has its struct list here
+ * where rq starts and this works because both are exclusive. Never
+ * ever reorder these fields without taking this into account!
+ */
+ struct eb32_node wq; /* ebtree node used to hold the task in the wait queue */
+ int expire; /* next expiration date for this task, in ticks */
+ short nice; /* task prio from -1024 to +1024 */
+ /* 16-bit hole here */
+};
+
+/* lightweight tasks, without priority, mainly used for I/Os */
+struct tasklet {
+ TASK_COMMON; /* must be at the beginning! */
+ struct list list;
+ /* WARNING: the struct task is often aliased as a struct tasklet when
+ * it is not in the run queue. The task has its struct rq here where
+ * list starts and this works because both are exclusive. Never ever
+ * reorder these fields without taking this into account!
+ */
+};
+
+/*
+ * The task callback (->process) is responsible for updating ->expire. It must
+ * return a pointer to the task itself, except if the task has been deleted, in
+ * which case it returns NULL so that the scheduler knows it must not check the
+ * expire timer. The scheduler will requeue the task at the proper location.
+ */
+
+
+#endif /* _HAPROXY_TASK_T_H */
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/include/haproxy/task.h b/include/haproxy/task.h
new file mode 100644
index 0000000..1c9c45f
--- /dev/null
+++ b/include/haproxy/task.h
@@ -0,0 +1,857 @@
+/*
+ * include/haproxy/task.h
+ * Functions for task management.
+ *
+ * Copyright (C) 2000-2020 Willy Tarreau - w@1wt.eu
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_TASK_H
+#define _HAPROXY_TASK_H
+
+
+#include <sys/time.h>
+
+#include <import/eb32tree.h>
+
+#include <haproxy/activity.h>
+#include <haproxy/api.h>
+#include <haproxy/clock.h>
+#include <haproxy/fd.h>
+#include <haproxy/global.h>
+#include <haproxy/intops.h>
+#include <haproxy/list.h>
+#include <haproxy/pool.h>
+#include <haproxy/task-t.h>
+#include <haproxy/thread.h>
+#include <haproxy/ticks.h>
+
+
+/* Principle of the wait queue.
+ *
+ * We want to be able to tell whether an expiration date is before of after the
+ * current time <now>. We KNOW that expiration dates are never too far apart,
+ * because they are measured in ticks (milliseconds). We also know that almost
+ * all dates will be in the future, and that a very small part of them will be
+ * in the past, they are the ones which have expired since last time we checked
+ * them. Using ticks, we know if a date is in the future or in the past, but we
+ * cannot use that to store sorted information because that reference changes
+ * all the time.
+ *
+ * We'll use the fact that the time wraps to sort timers. Timers above <now>
+ * are in the future, timers below <now> are in the past. Here, "above" and
+ * "below" are to be considered modulo 2^31.
+ *
+ * Timers are stored sorted in an ebtree. We use the new ability for ebtrees to
+ * lookup values starting from X to only expire tasks between <now> - 2^31 and
+ * <now>. If the end of the tree is reached while walking over it, we simply
+ * loop back to the beginning. That way, we have no problem keeping sorted
+ * wrapping timers in a tree, between (now - 24 days) and (now + 24 days). The
+ * keys in the tree always reflect their real position, none can be infinite.
+ * This reduces the number of checks to be performed.
+ *
+ * Another nice optimisation is to allow a timer to stay at an old place in the
+ * queue as long as it's not further than the real expiration date. That way,
+ * we use the tree as a place holder for a minorant of the real expiration
+ * date. Since we have a very low chance of hitting a timeout anyway, we can
+ * bounce the nodes to their right place when we scan the tree if we encounter
+ * a misplaced node once in a while. This even allows us not to remove the
+ * infinite timers from the wait queue.
+ *
+ * So, to summarize, we have :
+ * - node->key always defines current position in the wait queue
+ * - timer is the real expiration date (possibly infinite)
+ * - node->key is always before or equal to timer
+ *
+ * The run queue works similarly to the wait queue except that the current date
+ * is replaced by an insertion counter which can also wrap without any problem.
+ */
+
+/* The farthest we can look back in a timer tree */
+#define TIMER_LOOK_BACK (1U << 31)
+
+/* tasklets are recognized with nice==-32768 */
+#define TASK_IS_TASKLET(t) ((t)->state & TASK_F_TASKLET)
+
+/* a few exported variables */
+extern struct pool_head *pool_head_task;
+extern struct pool_head *pool_head_tasklet;
+extern struct pool_head *pool_head_notification;
+
+__decl_thread(extern HA_RWLOCK_T wq_lock THREAD_ALIGNED(64));
+
+void __tasklet_wakeup_on(struct tasklet *tl, int thr);
+struct list *__tasklet_wakeup_after(struct list *head, struct tasklet *tl);
+void task_kill(struct task *t);
+void tasklet_kill(struct tasklet *t);
+void __task_wakeup(struct task *t);
+void __task_queue(struct task *task, struct eb_root *wq);
+
+unsigned int run_tasks_from_lists(unsigned int budgets[]);
+
+/*
+ * This does 3 things :
+ * - wake up all expired tasks
+ * - call all runnable tasks
+ * - return the date of next event in <next> or eternity.
+ */
+
+void process_runnable_tasks(void);
+
+/*
+ * Extract all expired timers from the timer queue, and wakes up all
+ * associated tasks.
+ */
+void wake_expired_tasks(void);
+
+/* Checks the next timer for the current thread by looking into its own timer
+ * list and the global one. It may return TICK_ETERNITY if no timer is present.
+ * Note that the next timer might very well be slightly in the past.
+ */
+int next_timer_expiry(void);
+
+/*
+ * Delete every tasks before running the master polling loop
+ */
+void mworker_cleantasks(void);
+
+/* returns the number of running tasks+tasklets on the whole process. Note
+ * that this *is* racy since a task may move from the global to a local
+ * queue for example and be counted twice. This is only for statistics
+ * reporting.
+ */
+static inline int total_run_queues()
+{
+ int thr, ret = 0;
+
+ for (thr = 0; thr < global.nbthread; thr++)
+ ret += _HA_ATOMIC_LOAD(&ha_thread_ctx[thr].rq_total);
+ return ret;
+}
+
+/* returns the number of allocated tasks across all threads. Note that this
+ * *is* racy since some threads might be updating their counts while we're
+ * looking, but this is only for statistics reporting.
+ */
+static inline int total_allocated_tasks()
+{
+ int thr, ret;
+
+ for (thr = ret = 0; thr < global.nbthread; thr++)
+ ret += _HA_ATOMIC_LOAD(&ha_thread_ctx[thr].nb_tasks);
+ return ret;
+}
+
+/* returns the number of running niced tasks+tasklets on the whole process.
+ * Note that this *is* racy since a task may move from the global to a local
+ * queue for example and be counted twice. This is only for statistics
+ * reporting.
+ */
+static inline int total_niced_running_tasks()
+{
+ int tgrp, ret = 0;
+
+ for (tgrp = 0; tgrp < global.nbtgroups; tgrp++)
+ ret += _HA_ATOMIC_LOAD(&ha_tgroup_ctx[tgrp].niced_tasks);
+ return ret;
+}
+
+/* return 0 if task is in run queue, otherwise non-zero */
+static inline int task_in_rq(struct task *t)
+{
+ /* Check if leaf_p is NULL, in case he's not in the runqueue, and if
+ * it's not 0x1, which would mean it's in the tasklet list.
+ */
+ return t->rq.node.leaf_p != NULL;
+}
+
+/* return 0 if task is in wait queue, otherwise non-zero */
+static inline int task_in_wq(struct task *t)
+{
+ return t->wq.node.leaf_p != NULL;
+}
+
+/* returns true if the current thread has some work to do */
+static inline int thread_has_tasks(void)
+{
+ return ((int)!eb_is_empty(&th_ctx->rqueue) |
+ (int)!eb_is_empty(&th_ctx->rqueue_shared) |
+ (int)!!th_ctx->tl_class_mask |
+ (int)!MT_LIST_ISEMPTY(&th_ctx->shared_tasklet_list));
+}
+
+/* puts the task <t> in run queue with reason flags <f>, and returns <t> */
+/* This will put the task in the local runqueue if the task is only runnable
+ * by the current thread, in the global runqueue otherwies. With DEBUG_TASK,
+ * the <file>:<line> from the call place are stored into the task for tracing
+ * purposes.
+ */
+#define task_wakeup(t, f) \
+ _task_wakeup(t, f, MK_CALLER(WAKEUP_TYPE_TASK_WAKEUP, 0, 0))
+
+static inline void _task_wakeup(struct task *t, unsigned int f, const struct ha_caller *caller)
+{
+ unsigned int state;
+
+ state = _HA_ATOMIC_OR_FETCH(&t->state, f);
+ while (!(state & (TASK_RUNNING | TASK_QUEUED))) {
+ if (_HA_ATOMIC_CAS(&t->state, &state, state | TASK_QUEUED)) {
+ if (likely(caller)) {
+ caller = HA_ATOMIC_XCHG(&t->caller, caller);
+ BUG_ON((ulong)caller & 1);
+#ifdef DEBUG_TASK
+ HA_ATOMIC_STORE(&t->debug.prev_caller, caller);
+#endif
+ }
+ __task_wakeup(t);
+ break;
+ }
+ }
+}
+
+/* Atomically drop the TASK_RUNNING bit while ensuring that any wakeup that
+ * happened since the flag was set will result in the task being queued (if
+ * it wasn't already). This is used to safely drop the flag from within the
+ * scheduler. The flag <f> is combined with existing flags before the test so
+ * that it's possible to unconditionally wakeup the task and drop the RUNNING
+ * flag if needed.
+ */
+static inline void task_drop_running(struct task *t, unsigned int f)
+{
+ unsigned int state, new_state;
+
+ state = _HA_ATOMIC_LOAD(&t->state);
+
+ while (1) {
+ new_state = state | f;
+ if (new_state & TASK_WOKEN_ANY)
+ new_state |= TASK_QUEUED;
+
+ if (_HA_ATOMIC_CAS(&t->state, &state, new_state & ~TASK_RUNNING))
+ break;
+ __ha_cpu_relax();
+ }
+
+ if ((new_state & ~state) & TASK_QUEUED)
+ __task_wakeup(t);
+}
+
+/*
+ * Unlink the task from the wait queue, and possibly update the last_timer
+ * pointer. A pointer to the task itself is returned. The task *must* already
+ * be in the wait queue before calling this function. If unsure, use the safer
+ * task_unlink_wq() function.
+ */
+static inline struct task *__task_unlink_wq(struct task *t)
+{
+ eb32_delete(&t->wq);
+ return t;
+}
+
+/* remove a task from its wait queue. It may either be the local wait queue if
+ * the task is bound to a single thread or the global queue. If the task uses a
+ * shared wait queue, the global wait queue lock is used.
+ */
+static inline struct task *task_unlink_wq(struct task *t)
+{
+ unsigned long locked;
+
+ if (likely(task_in_wq(t))) {
+ locked = t->tid < 0;
+ BUG_ON(t->tid >= 0 && t->tid != tid && !(global.mode & MODE_STOPPING));
+ if (locked)
+ HA_RWLOCK_WRLOCK(TASK_WQ_LOCK, &wq_lock);
+ __task_unlink_wq(t);
+ if (locked)
+ HA_RWLOCK_WRUNLOCK(TASK_WQ_LOCK, &wq_lock);
+ }
+ return t;
+}
+
+/* Place <task> into the wait queue, where it may already be. If the expiration
+ * timer is infinite, do nothing and rely on wake_expired_task to clean up.
+ * If the task uses a shared wait queue, it's queued into the global wait queue,
+ * protected by the global wq_lock, otherwise by it necessarily belongs to the
+ * current thread'sand is queued without locking.
+ */
+#define task_queue(t) \
+ _task_queue(t, MK_CALLER(WAKEUP_TYPE_TASK_QUEUE, 0, 0))
+
+static inline void _task_queue(struct task *task, const struct ha_caller *caller)
+{
+ /* If we already have a place in the wait queue no later than the
+ * timeout we're trying to set, we'll stay there, because it is very
+ * unlikely that we will reach the timeout anyway. If the timeout
+ * has been disabled, it's useless to leave the queue as well. We'll
+ * rely on wake_expired_tasks() to catch the node and move it to the
+ * proper place should it ever happen. Finally we only add the task
+ * to the queue if it was not there or if it was further than what
+ * we want.
+ */
+ if (!tick_isset(task->expire))
+ return;
+
+#ifdef USE_THREAD
+ if (task->tid < 0) {
+ HA_RWLOCK_WRLOCK(TASK_WQ_LOCK, &wq_lock);
+ if (!task_in_wq(task) || tick_is_lt(task->expire, task->wq.key)) {
+ if (likely(caller)) {
+ caller = HA_ATOMIC_XCHG(&task->caller, caller);
+ BUG_ON((ulong)caller & 1);
+#ifdef DEBUG_TASK
+ HA_ATOMIC_STORE(&task->debug.prev_caller, caller);
+#endif
+ }
+ __task_queue(task, &tg_ctx->timers);
+ }
+ HA_RWLOCK_WRUNLOCK(TASK_WQ_LOCK, &wq_lock);
+ } else
+#endif
+ {
+ BUG_ON(task->tid != tid);
+ if (!task_in_wq(task) || tick_is_lt(task->expire, task->wq.key)) {
+ if (likely(caller)) {
+ caller = HA_ATOMIC_XCHG(&task->caller, caller);
+ BUG_ON((ulong)caller & 1);
+#ifdef DEBUG_TASK
+ HA_ATOMIC_STORE(&task->debug.prev_caller, caller);
+#endif
+ }
+ __task_queue(task, &th_ctx->timers);
+ }
+ }
+}
+
+/* Change the thread affinity of a task to <thr>, which may either be a valid
+ * thread number from 0 to nbthread-1, or a negative value to allow the task
+ * to run on any thread.
+ *
+ * This may only be done from within the running task itself or during its
+ * initialization. It will unqueue and requeue the task from the wait queue
+ * if it was in it. This is safe against a concurrent task_queue() call because
+ * task_queue() itself will unlink again if needed after taking into account
+ * the new thread_mask.
+ */
+static inline void task_set_thread(struct task *t, int thr)
+{
+#ifndef USE_THREAD
+ /* no shared queue without threads */
+ thr = 0;
+#endif
+ if (unlikely(task_in_wq(t))) {
+ task_unlink_wq(t);
+ t->tid = thr;
+ task_queue(t);
+ }
+ else {
+ t->tid = thr;
+ }
+}
+
+/* schedules tasklet <tl> to run onto thread <thr> or the current thread if
+ * <thr> is negative. Note that it is illegal to wakeup a foreign tasklet if
+ * its tid is negative and it is illegal to self-assign a tasklet that was
+ * at least once scheduled on a specific thread. With DEBUG_TASK, the
+ * <file>:<line> from the call place are stored into the tasklet for tracing
+ * purposes.
+ */
+#define tasklet_wakeup_on(tl, thr) \
+ _tasklet_wakeup_on(tl, thr, MK_CALLER(WAKEUP_TYPE_TASKLET_WAKEUP, 0, 0))
+
+static inline void _tasklet_wakeup_on(struct tasklet *tl, int thr, const struct ha_caller *caller)
+{
+ unsigned int state = tl->state;
+
+ do {
+ /* do nothing if someone else already added it */
+ if (state & TASK_IN_LIST)
+ return;
+ } while (!_HA_ATOMIC_CAS(&tl->state, &state, state | TASK_IN_LIST));
+
+ /* at this point we're the first ones to add this task to the list */
+ if (likely(caller)) {
+ caller = HA_ATOMIC_XCHG(&tl->caller, caller);
+ BUG_ON((ulong)caller & 1);
+#ifdef DEBUG_TASK
+ HA_ATOMIC_STORE(&tl->debug.prev_caller, caller);
+#endif
+ }
+
+ if (_HA_ATOMIC_LOAD(&th_ctx->flags) & TH_FL_TASK_PROFILING)
+ tl->wake_date = now_mono_time();
+ __tasklet_wakeup_on(tl, thr);
+}
+
+/* schedules tasklet <tl> to run onto the thread designated by tl->tid, which
+ * is either its owner thread if >= 0 or the current thread if < 0. When
+ * DEBUG_TASK is set, the <file>:<line> from the call place are stored into the
+ * task for tracing purposes.
+ */
+#define tasklet_wakeup(tl) \
+ _tasklet_wakeup_on(tl, (tl)->tid, MK_CALLER(WAKEUP_TYPE_TASKLET_WAKEUP, 0, 0))
+
+/* instantly wakes up task <t> on its owner thread even if it's not the current
+ * one, bypassing the run queue. The purpose is to be able to avoid contention
+ * in the global run queue for massively remote tasks (e.g. queue) when there's
+ * no value in passing the task again through the priority ordering since it has
+ * already been subject to it once (e.g. before entering process_stream). The
+ * task goes directly into the shared mt_list as a tasklet and will run as
+ * TL_URGENT. Great care is taken to be certain it's not queued nor running
+ * already.
+ */
+#define task_instant_wakeup(t, f) \
+ _task_instant_wakeup(t, f, MK_CALLER(WAKEUP_TYPE_TASK_INSTANT_WAKEUP, 0, 0))
+
+static inline void _task_instant_wakeup(struct task *t, unsigned int f, const struct ha_caller *caller)
+{
+ int thr = t->tid;
+ unsigned int state;
+
+ if (thr < 0)
+ thr = tid;
+
+ /* first, let's update the task's state with the wakeup condition */
+ state = _HA_ATOMIC_OR_FETCH(&t->state, f);
+
+ /* next we need to make sure the task was not/will not be added to the
+ * run queue because the tasklet list's mt_list uses the same storage
+ * as the task's run_queue.
+ */
+ do {
+ /* do nothing if someone else already added it */
+ if (state & (TASK_QUEUED|TASK_RUNNING))
+ return;
+ } while (!_HA_ATOMIC_CAS(&t->state, &state, state | TASK_QUEUED));
+
+ BUG_ON_HOT(task_in_rq(t));
+
+ /* at this point we're the first ones to add this task to the list */
+ if (likely(caller)) {
+ caller = HA_ATOMIC_XCHG(&t->caller, caller);
+ BUG_ON((ulong)caller & 1);
+#ifdef DEBUG_TASK
+ HA_ATOMIC_STORE(&t->debug.prev_caller, caller);
+#endif
+ }
+
+ if (_HA_ATOMIC_LOAD(&th_ctx->flags) & TH_FL_TASK_PROFILING)
+ t->wake_date = now_mono_time();
+ __tasklet_wakeup_on((struct tasklet *)t, thr);
+}
+
+/* schedules tasklet <tl> to run immediately after the current one is done
+ * <tl> will be queued after entry <head>, or at the head of the task list. Return
+ * the new head to be used to queue future tasks. This is used to insert multiple entries
+ * at the head of the tasklet list, typically to transfer processing from a tasklet
+ * to another one or a set of other ones. If <head> is NULL, the tasklet list of <thr>
+ * thread will be used.
+ * With DEBUG_TASK, the <file>:<line> from the call place are stored into the tasklet
+ * for tracing purposes.
+ */
+#define tasklet_wakeup_after(head, tl) \
+ _tasklet_wakeup_after(head, tl, MK_CALLER(WAKEUP_TYPE_TASKLET_WAKEUP_AFTER, 0, 0))
+
+static inline struct list *_tasklet_wakeup_after(struct list *head, struct tasklet *tl,
+ const struct ha_caller *caller)
+{
+ unsigned int state = tl->state;
+
+ do {
+ /* do nothing if someone else already added it */
+ if (state & TASK_IN_LIST)
+ return head;
+ } while (!_HA_ATOMIC_CAS(&tl->state, &state, state | TASK_IN_LIST));
+
+ /* at this point we're the first one to add this task to the list */
+ if (likely(caller)) {
+ caller = HA_ATOMIC_XCHG(&tl->caller, caller);
+ BUG_ON((ulong)caller & 1);
+#ifdef DEBUG_TASK
+ HA_ATOMIC_STORE(&tl->debug.prev_caller, caller);
+#endif
+ }
+
+ if (th_ctx->flags & TH_FL_TASK_PROFILING)
+ tl->wake_date = now_mono_time();
+ return __tasklet_wakeup_after(head, tl);
+}
+
+/* This macro shows the current function name and the last known caller of the
+ * task (or tasklet) wakeup.
+ */
+#ifdef DEBUG_TASK
+#define DEBUG_TASK_PRINT_CALLER(t) do { \
+ const struct ha_caller *__caller = (t)->caller; \
+ printf("%s woken up from %s(%s:%d)\n", __FUNCTION__, \
+ __caller ? __caller->func : NULL, \
+ __caller ? __caller->file : NULL, \
+ __caller ? __caller->line : 0); \
+} while (0)
+#else
+#define DEBUG_TASK_PRINT_CALLER(t) do { } while (0)
+#endif
+
+
+/* Try to remove a tasklet from the list. This call is inherently racy and may
+ * only be performed on the thread that was supposed to dequeue this tasklet.
+ * This way it is safe to call MT_LIST_DELETE without first removing the
+ * TASK_IN_LIST bit, which must absolutely be removed afterwards in case
+ * another thread would want to wake this tasklet up in parallel.
+ */
+static inline void tasklet_remove_from_tasklet_list(struct tasklet *t)
+{
+ if (MT_LIST_DELETE(list_to_mt_list(&t->list))) {
+ _HA_ATOMIC_AND(&t->state, ~TASK_IN_LIST);
+ _HA_ATOMIC_DEC(&ha_thread_ctx[t->tid >= 0 ? t->tid : tid].rq_total);
+ }
+}
+
+/*
+ * Initialize a new task. The bare minimum is performed (queue pointers and
+ * state). The task is returned. This function should not be used outside of
+ * task_new(). If the thread ID is < 0, the task may run on any thread.
+ */
+static inline struct task *task_init(struct task *t, int tid)
+{
+ t->wq.node.leaf_p = NULL;
+ t->rq.node.leaf_p = NULL;
+ t->state = TASK_SLEEPING;
+#ifndef USE_THREAD
+ /* no shared wq without threads */
+ tid = 0;
+#endif
+ t->tid = tid;
+ t->nice = 0;
+ t->calls = 0;
+ t->wake_date = 0;
+ t->expire = TICK_ETERNITY;
+ t->caller = NULL;
+ return t;
+}
+
+/* Initialize a new tasklet. It's identified as a tasklet by its flags
+ * TASK_F_TASKLET. It is expected to run on the calling thread by default,
+ * it's up to the caller to change ->tid if it wants to own it.
+ */
+static inline void tasklet_init(struct tasklet *t)
+{
+ t->calls = 0;
+ t->state = TASK_F_TASKLET;
+ t->process = NULL;
+ t->tid = -1;
+ t->wake_date = 0;
+ t->caller = NULL;
+ LIST_INIT(&t->list);
+}
+
+/* Allocate and initialize a new tasklet, local to the thread by default. The
+ * caller may assign its tid if it wants to own the tasklet.
+ */
+static inline struct tasklet *tasklet_new(void)
+{
+ struct tasklet *t = pool_alloc(pool_head_tasklet);
+
+ if (t) {
+ tasklet_init(t);
+ }
+ return t;
+}
+
+/*
+ * Allocate and initialize a new task, to run on global thread <thr>, or any
+ * thread if negative. The task count is incremented. The new task is returned,
+ * or NULL in case of lack of memory. It's up to the caller to pass a valid
+ * thread number (in tid space, 0 to nbthread-1, or <0 for any). Tasks created
+ * this way must be freed using task_destroy().
+ */
+static inline struct task *task_new_on(int thr)
+{
+ struct task *t = pool_alloc(pool_head_task);
+ if (t) {
+ th_ctx->nb_tasks++;
+ task_init(t, thr);
+ }
+ return t;
+}
+
+/* Allocate and initialize a new task, to run on the calling thread. The new
+ * task is returned, or NULL in case of lack of memory. The task count is
+ * incremented.
+ */
+static inline struct task *task_new_here()
+{
+ return task_new_on(tid);
+}
+
+/* Allocate and initialize a new task, to run on any thread. The new task is
+ * returned, or NULL in case of lack of memory. The task count is incremented.
+ */
+static inline struct task *task_new_anywhere()
+{
+ return task_new_on(-1);
+}
+
+/*
+ * Free a task. Its context must have been freed since it will be lost. The
+ * task count is decremented. It it is the current task, this one is reset.
+ */
+static inline void __task_free(struct task *t)
+{
+ if (t == th_ctx->current) {
+ th_ctx->current = NULL;
+ __ha_barrier_store();
+ }
+ BUG_ON(task_in_wq(t) || task_in_rq(t));
+
+ BUG_ON((ulong)t->caller & 1);
+#ifdef DEBUG_TASK
+ HA_ATOMIC_STORE(&t->debug.prev_caller, HA_ATOMIC_LOAD(&t->caller));
+#endif
+ HA_ATOMIC_STORE(&t->caller, (void*)1); // make sure to crash if used after free
+
+ pool_free(pool_head_task, t);
+ th_ctx->nb_tasks--;
+ if (unlikely(stopping))
+ pool_flush(pool_head_task);
+}
+
+/* Destroys a task : it's unlinked from the wait queues and is freed if it's
+ * the current task or not queued otherwise it's marked to be freed by the
+ * scheduler. It does nothing if <t> is NULL.
+ */
+static inline void task_destroy(struct task *t)
+{
+ if (!t)
+ return;
+
+ task_unlink_wq(t);
+ /* We don't have to explicitly remove from the run queue.
+ * If we are in the runqueue, the test below will set t->process
+ * to NULL, and the task will be free'd when it'll be its turn
+ * to run.
+ */
+
+ /* There's no need to protect t->state with a lock, as the task
+ * has to run on the current thread.
+ */
+ if (t == th_ctx->current || !(t->state & (TASK_QUEUED | TASK_RUNNING)))
+ __task_free(t);
+ else
+ t->process = NULL;
+}
+
+/* Should only be called by the thread responsible for the tasklet */
+static inline void tasklet_free(struct tasklet *tl)
+{
+ if (!tl)
+ return;
+
+ if (MT_LIST_DELETE(list_to_mt_list(&tl->list)))
+ _HA_ATOMIC_DEC(&ha_thread_ctx[tl->tid >= 0 ? tl->tid : tid].rq_total);
+
+ BUG_ON((ulong)tl->caller & 1);
+#ifdef DEBUG_TASK
+ HA_ATOMIC_STORE(&tl->debug.prev_caller, HA_ATOMIC_LOAD(&tl->caller));
+#endif
+ HA_ATOMIC_STORE(&tl->caller, (void*)1); // make sure to crash if used after free
+ pool_free(pool_head_tasklet, tl);
+ if (unlikely(stopping))
+ pool_flush(pool_head_tasklet);
+}
+
+static inline void tasklet_set_tid(struct tasklet *tl, int tid)
+{
+ tl->tid = tid;
+}
+
+/* Ensure <task> will be woken up at most at <when>. If the task is already in
+ * the run queue (but not running), nothing is done. It may be used that way
+ * with a delay : task_schedule(task, tick_add(now_ms, delay));
+ * It MUST NOT be used with a timer in the past, and even less with
+ * TICK_ETERNITY (which would block all timers). Note that passing it directly
+ * now_ms without using tick_add() will definitely make this happen once every
+ * 49.7 days.
+ */
+#define task_schedule(t, w) \
+ _task_schedule(t, w, MK_CALLER(WAKEUP_TYPE_TASK_SCHEDULE, 0, 0))
+
+static inline void _task_schedule(struct task *task, int when, const struct ha_caller *caller)
+{
+ /* TODO: mthread, check if there is no tisk with this test */
+ if (task_in_rq(task))
+ return;
+
+#ifdef USE_THREAD
+ if (task->tid < 0) {
+ /* FIXME: is it really needed to lock the WQ during the check ? */
+ HA_RWLOCK_WRLOCK(TASK_WQ_LOCK, &wq_lock);
+ if (task_in_wq(task))
+ when = tick_first(when, task->expire);
+
+ task->expire = when;
+ if (!task_in_wq(task) || tick_is_lt(task->expire, task->wq.key)) {
+ if (likely(caller)) {
+ caller = HA_ATOMIC_XCHG(&task->caller, caller);
+ BUG_ON((ulong)caller & 1);
+#ifdef DEBUG_TASK
+ HA_ATOMIC_STORE(&task->debug.prev_caller, caller);
+#endif
+ }
+ __task_queue(task, &tg_ctx->timers);
+ }
+ HA_RWLOCK_WRUNLOCK(TASK_WQ_LOCK, &wq_lock);
+ } else
+#endif
+ {
+ BUG_ON(task->tid != tid);
+ if (task_in_wq(task))
+ when = tick_first(when, task->expire);
+
+ task->expire = when;
+ if (!task_in_wq(task) || tick_is_lt(task->expire, task->wq.key)) {
+ if (likely(caller)) {
+ caller = HA_ATOMIC_XCHG(&task->caller, caller);
+ BUG_ON((ulong)caller & 1);
+#ifdef DEBUG_TASK
+ HA_ATOMIC_STORE(&task->debug.prev_caller, caller);
+#endif
+ }
+ __task_queue(task, &th_ctx->timers);
+ }
+ }
+}
+
+/* returns the string corresponding to a task type as found in the task caller
+ * locations.
+ */
+static inline const char *task_wakeup_type_str(uint t)
+{
+ switch (t) {
+ case WAKEUP_TYPE_TASK_WAKEUP : return "task_wakeup";
+ case WAKEUP_TYPE_TASK_INSTANT_WAKEUP : return "task_instant_wakeup";
+ case WAKEUP_TYPE_TASKLET_WAKEUP : return "tasklet_wakeup";
+ case WAKEUP_TYPE_TASKLET_WAKEUP_AFTER : return "tasklet_wakeup_after";
+ case WAKEUP_TYPE_TASK_QUEUE : return "task_queue";
+ case WAKEUP_TYPE_TASK_SCHEDULE : return "task_schedule";
+ case WAKEUP_TYPE_APPCTX_WAKEUP : return "appctx_wakeup";
+ default : return "?";
+ }
+}
+
+/* This function register a new signal. "lua" is the current lua
+ * execution context. It contains a pointer to the associated task.
+ * "link" is a list head attached to an other task that must be wake
+ * the lua task if an event occurs. This is useful with external
+ * events like TCP I/O or sleep functions. This function allocate
+ * memory for the signal.
+ */
+static inline struct notification *notification_new(struct list *purge, struct list *event, struct task *wakeup)
+{
+ struct notification *com = pool_alloc(pool_head_notification);
+ if (!com)
+ return NULL;
+ LIST_APPEND(purge, &com->purge_me);
+ LIST_APPEND(event, &com->wake_me);
+ HA_SPIN_INIT(&com->lock);
+ com->task = wakeup;
+ return com;
+}
+
+/* This function purge all the pending signals when the LUA execution
+ * is finished. This prevent than a coprocess try to wake a deleted
+ * task. This function remove the memory associated to the signal.
+ * The purge list is not locked because it is owned by only one
+ * process. before browsing this list, the caller must ensure to be
+ * the only one browser.
+ */
+static inline void notification_purge(struct list *purge)
+{
+ struct notification *com, *back;
+
+ /* Delete all pending communication signals. */
+ list_for_each_entry_safe(com, back, purge, purge_me) {
+ HA_SPIN_LOCK(NOTIF_LOCK, &com->lock);
+ LIST_DELETE(&com->purge_me);
+ if (!com->task) {
+ HA_SPIN_UNLOCK(NOTIF_LOCK, &com->lock);
+ pool_free(pool_head_notification, com);
+ continue;
+ }
+ com->task = NULL;
+ HA_SPIN_UNLOCK(NOTIF_LOCK, &com->lock);
+ }
+}
+
+/* In some cases, the disconnected notifications must be cleared.
+ * This function just release memory blocks. The purge list is not
+ * locked because it is owned by only one process. Before browsing
+ * this list, the caller must ensure to be the only one browser.
+ * The "com" is not locked because when com->task is NULL, the
+ * notification is no longer used.
+ */
+static inline void notification_gc(struct list *purge)
+{
+ struct notification *com, *back;
+
+ /* Delete all pending communication signals. */
+ list_for_each_entry_safe (com, back, purge, purge_me) {
+ if (com->task)
+ continue;
+ LIST_DELETE(&com->purge_me);
+ pool_free(pool_head_notification, com);
+ }
+}
+
+/* This function sends signals. It wakes all the tasks attached
+ * to a list head, and remove the signal, and free the used
+ * memory. The wake list is not locked because it is owned by
+ * only one process. before browsing this list, the caller must
+ * ensure to be the only one browser.
+ */
+static inline void notification_wake(struct list *wake)
+{
+ struct notification *com, *back;
+
+ /* Wake task and delete all pending communication signals. */
+ list_for_each_entry_safe(com, back, wake, wake_me) {
+ HA_SPIN_LOCK(NOTIF_LOCK, &com->lock);
+ LIST_DELETE(&com->wake_me);
+ if (!com->task) {
+ HA_SPIN_UNLOCK(NOTIF_LOCK, &com->lock);
+ pool_free(pool_head_notification, com);
+ continue;
+ }
+ task_wakeup(com->task, TASK_WOKEN_MSG);
+ com->task = NULL;
+ HA_SPIN_UNLOCK(NOTIF_LOCK, &com->lock);
+ }
+}
+
+/* This function returns true is some notification are pending
+ */
+static inline int notification_registered(struct list *wake)
+{
+ return !LIST_ISEMPTY(wake);
+}
+
+#endif /* _HAPROXY_TASK_H */
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/include/haproxy/tcp_rules.h b/include/haproxy/tcp_rules.h
new file mode 100644
index 0000000..2ed515e
--- /dev/null
+++ b/include/haproxy/tcp_rules.h
@@ -0,0 +1,52 @@
+/*
+ * include/haproxy/tcp_rules.h
+ * This file contains "tcp" rules definitions
+ *
+ * Copyright (C) 2000-2016 Willy Tarreau - w@1wt.eu
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_TCP_RULES_H
+#define _HAPROXY_TCP_RULES_H
+
+#include <haproxy/action-t.h>
+#include <haproxy/api.h>
+#include <haproxy/session-t.h>
+#include <haproxy/stream-t.h>
+
+int tcp_inspect_request(struct stream *s, struct channel *req, int an_bit);
+int tcp_inspect_response(struct stream *s, struct channel *rep, int an_bit);
+int tcp_exec_l4_rules(struct session *sess);
+int tcp_exec_l5_rules(struct session *sess);
+
+void tcp_req_conn_keywords_register(struct action_kw_list *kw_list);
+void tcp_req_sess_keywords_register(struct action_kw_list *kw_list);
+void tcp_req_cont_keywords_register(struct action_kw_list *kw_list);
+void tcp_res_cont_keywords_register(struct action_kw_list *kw_list);
+
+struct action_kw *tcp_req_conn_action(const char *kw);
+struct action_kw *tcp_req_sess_action(const char *kw);
+struct action_kw *tcp_req_cont_action(const char *kw);
+struct action_kw *tcp_res_cont_action(const char *kw);
+
+#endif /* _HAPROXY_TCP_RULES_H */
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/include/haproxy/tcpcheck-t.h b/include/haproxy/tcpcheck-t.h
new file mode 100644
index 0000000..8878995
--- /dev/null
+++ b/include/haproxy/tcpcheck-t.h
@@ -0,0 +1,242 @@
+/*
+ * include/haproxy/tcpcheck-t.h
+ * TCP check definitions, enums, macros and bitfields.
+ *
+ * Copyright 2000-2009,2020 Willy Tarreau <w@1wt.eu>
+ * Copyright 2007-2010 Krzysztof Piotr Oledzki <ole@ans.pl>
+ * Copyright 2013 Baptiste Assmann <bedis9@gmail.com>
+ * Copyright 2020 Gaetan Rivet <grive@u256.net>
+ * Copyright 2020 Christopher Faulet <cfaulet@haproxy.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#ifndef _HAPROXY_TCPCHECK_T_H
+#define _HAPROXY_TCPCHECK_T_H
+
+#include <import/ebtree-t.h>
+#include <import/ist.h>
+#include <haproxy/api-t.h>
+#include <haproxy/buf-t.h>
+#include <haproxy/check-t.h>
+#include <haproxy/connection-t.h>
+#include <haproxy/obj_type-t.h>
+#include <haproxy/vars-t.h>
+
+/* options for tcp-check connect */
+#define TCPCHK_OPT_NONE 0x0000 /* no options specified, default */
+#define TCPCHK_OPT_SEND_PROXY 0x0001 /* send proxy-protocol string */
+#define TCPCHK_OPT_SSL 0x0002 /* SSL connection */
+#define TCPCHK_OPT_LINGER 0x0004 /* Do not RST connection, let it linger */
+#define TCPCHK_OPT_DEFAULT_CONNECT 0x0008 /* Do a connect using server params */
+#define TCPCHK_OPT_IMPLICIT 0x0010 /* Implicit connect */
+#define TCPCHK_OPT_SOCKS4 0x0020 /* check the connection via socks4 proxy */
+#define TCPCHK_OPT_HAS_DATA 0x0040 /* data should be sent after connection */
+
+enum tcpcheck_send_type {
+ TCPCHK_SEND_UNDEF = 0, /* Send is not parsed. */
+ TCPCHK_SEND_STRING, /* Send an ASCII string. */
+ TCPCHK_SEND_BINARY, /* Send a binary sequence. */
+ TCPCHK_SEND_STRING_LF, /* Send an ASCII log-format string. */
+ TCPCHK_SEND_BINARY_LF, /* Send a binary log-format sequence. */
+ TCPCHK_SEND_HTTP, /* Send an HTTP request */
+};
+
+/* flags for tcp-check send */
+#define TCPCHK_SND_HTTP_FL_URI_FMT 0x0001 /* Use a log-format string for the uri */
+#define TCPCHK_SND_HTTP_FL_BODY_FMT 0x0002 /* Use a log-format string for the body */
+#define TCPCHK_SND_HTTP_FROM_OPT 0x0004 /* Send rule coming from "option httpck" directive */
+
+enum tcpcheck_eval_ret {
+ TCPCHK_EVAL_WAIT = 0,
+ TCPCHK_EVAL_STOP,
+ TCPCHK_EVAL_CONTINUE,
+};
+
+enum tcpcheck_expect_type {
+ TCPCHK_EXPECT_UNDEF = 0, /* Match is not used. */
+ TCPCHK_EXPECT_STRING, /* Matches a string. */
+ TCPCHK_EXPECT_STRING_REGEX, /* Matches a regular pattern. */
+ TCPCHK_EXPECT_STRING_LF, /* Matches a log-format string. */
+ TCPCHK_EXPECT_BINARY, /* Matches a binary sequence on a hex-encoded text. */
+ TCPCHK_EXPECT_BINARY_REGEX, /* Matches a regular pattern on a hex-encoded text. */
+ TCPCHK_EXPECT_BINARY_LF, /* Matches a log-format binary sequence on a hex-encoded text. */
+ TCPCHK_EXPECT_CUSTOM, /* Execute a custom function. */
+ TCPCHK_EXPECT_HTTP_STATUS, /* Matches a list of codes on the HTTP status */
+ TCPCHK_EXPECT_HTTP_STATUS_REGEX, /* Matches a regular pattern on the HTTP status */
+ TCPCHK_EXPECT_HTTP_HEADER, /* Matches on HTTP headers */
+ TCPCHK_EXPECT_HTTP_BODY, /* Matches a string oa the HTTP payload */
+ TCPCHK_EXPECT_HTTP_BODY_REGEX, /* Matches a regular pattern on a HTTP payload */
+ TCPCHK_EXPECT_HTTP_BODY_LF, /* Matches a log-format string on the HTTP payload */
+};
+
+/* tcp-check expect flags */
+#define TCPCHK_EXPT_FL_INV 0x0001 /* Matching is inversed */
+#define TCPCHK_EXPT_FL_HTTP_HNAME_STR 0x0002 /* Exact match on the HTTP header name */
+#define TCPCHK_EXPT_FL_HTTP_HNAME_BEG 0x0004 /* Prefix match on the HTTP header name */
+#define TCPCHK_EXPT_FL_HTTP_HNAME_END 0x0008 /* Suffix match on the HTTP header name */
+#define TCPCHK_EXPT_FL_HTTP_HNAME_SUB 0x0010 /* Substring match on the HTTP header name */
+#define TCPCHK_EXPT_FL_HTTP_HNAME_REG 0x0020 /* Regex match on the HTTP header name */
+#define TCPCHK_EXPT_FL_HTTP_HNAME_FMT 0x0040 /* The HTTP header name is a log-format string */
+#define TCPCHK_EXPT_FL_HTTP_HVAL_NONE 0x0080 /* No match on the HTTP header value */
+#define TCPCHK_EXPT_FL_HTTP_HVAL_STR 0x0100 /* Exact match on the HTTP header value */
+#define TCPCHK_EXPT_FL_HTTP_HVAL_BEG 0x0200 /* Prefix match on the HTTP header value */
+#define TCPCHK_EXPT_FL_HTTP_HVAL_END 0x0400 /* Suffix match on the HTTP header value */
+#define TCPCHK_EXPT_FL_HTTP_HVAL_SUB 0x0800 /* Substring match on the HTTP header value */
+#define TCPCHK_EXPT_FL_HTTP_HVAL_REG 0x1000 /* Regex match on the HTTP header value*/
+#define TCPCHK_EXPT_FL_HTTP_HVAL_FMT 0x2000 /* The HTTP header value is a log-format string */
+#define TCPCHK_EXPT_FL_HTTP_HVAL_FULL 0x4000 /* Match the full header value ( no stop on commas ) */
+
+#define TCPCHK_EXPT_FL_HTTP_HNAME_TYPE 0x003E /* Mask to get matching method on header name */
+#define TCPCHK_EXPT_FL_HTTP_HVAL_TYPE 0x1F00 /* Mask to get matching method on header value */
+
+/* possible actions for tcpcheck_rule->action */
+enum tcpcheck_rule_type {
+ TCPCHK_ACT_SEND = 0, /* send action, regular string format */
+ TCPCHK_ACT_EXPECT, /* expect action, either regular or binary string */
+ TCPCHK_ACT_CONNECT, /* connect action, to probe a new port */
+ TCPCHK_ACT_COMMENT, /* no action, simply a comment used for logs */
+ TCPCHK_ACT_ACTION_KW, /* custom registered action_kw rule. */
+};
+
+#define TCPCHK_RULES_NONE 0x00000000
+#define TCPCHK_RULES_UNUSED_TCP_RS 0x00000001 /* An unused tcp-check ruleset exists */
+#define TCPCHK_RULES_UNUSED_HTTP_RS 0x00000002 /* An unused http-check ruleset exists */
+#define TCPCHK_RULES_UNUSED_RS 0x00000003 /* Mask for unused ruleset */
+
+#define TCPCHK_RULES_PGSQL_CHK 0x00000010
+#define TCPCHK_RULES_REDIS_CHK 0x00000020
+#define TCPCHK_RULES_SMTP_CHK 0x00000030
+#define TCPCHK_RULES_HTTP_CHK 0x00000040
+#define TCPCHK_RULES_MYSQL_CHK 0x00000050
+#define TCPCHK_RULES_LDAP_CHK 0x00000060
+#define TCPCHK_RULES_SSL3_CHK 0x00000070
+#define TCPCHK_RULES_AGENT_CHK 0x00000080
+#define TCPCHK_RULES_SPOP_CHK 0x00000090
+/* Unused 0x000000A0..0x00000FF0 (reserved for future proto) */
+#define TCPCHK_RULES_TCP_CHK 0x00000FF0
+#define TCPCHK_RULES_PROTO_CHK 0x00000FF0 /* Mask to cover protocol check */
+
+struct check;
+struct tcpcheck_connect {
+ char *sni; /* server name to use for SSL connections */
+ char *alpn; /* ALPN to use for the SSL connection */
+ int alpn_len; /* ALPN string length */
+ const struct mux_proto_list *mux_proto; /* the mux to use for all outgoing connections (specified by the "proto" keyword) */
+ uint16_t options; /* options when setting up a new connection */
+ uint16_t port; /* port to connect to */
+ struct sample_expr *port_expr; /* sample expr to determine the port, may be NULL */
+ struct sockaddr_storage addr; /* the address to the connect */
+};
+
+struct tcpcheck_http_hdr {
+ struct ist name; /* the header name */
+ struct list value; /* the log-format string value */
+ struct list list; /* header chained list */
+};
+
+struct tcpcheck_codes {
+ unsigned int (*codes)[2]; /* an array of roange of codes: [0]=min [1]=max */
+ size_t num; /* number of entry in the array */
+};
+
+struct tcpcheck_send {
+ enum tcpcheck_send_type type;
+ union {
+ struct ist data; /* an ASCII string or a binary sequence */
+ struct list fmt; /* an ASCII or hexa log-format string */
+ struct {
+ unsigned int flags; /* TCPCHK_SND_HTTP_FL_* */
+ struct http_meth meth; /* the HTTP request method */
+ union {
+ struct ist uri; /* the HTTP request uri is a string */
+ struct list uri_fmt; /* or a log-format string */
+ };
+ struct ist vsn; /* the HTTP request version string */
+ struct list hdrs; /* the HTTP request header list */
+ union {
+ struct ist body; /* the HTTP request payload is a string */
+ struct list body_fmt; /* or a log-format string */
+ };
+ } http; /* Info about the HTTP request to send */
+ };
+};
+
+struct tcpcheck_expect {
+ enum tcpcheck_expect_type type; /* Type of pattern used for matching. */
+ unsigned int flags; /* TCPCHK_EXPT_FL_* */
+ union {
+ struct ist data; /* Matching a literal string / binary anywhere in the response. */
+ struct my_regex *regex; /* Matching a regex pattern. */
+ struct tcpcheck_codes codes; /* Matching a list of codes */
+ struct list fmt; /* Matching a log-format string / binary */
+ struct {
+ union {
+ struct ist name;
+ struct list name_fmt;
+ struct my_regex *name_re;
+ };
+ union {
+ struct ist value;
+ struct list value_fmt;
+ struct my_regex *value_re;
+ };
+ } hdr; /* Matching a header pattern */
+
+
+ /* custom function to eval expect rule */
+ enum tcpcheck_eval_ret (*custom)(struct check *, struct tcpcheck_rule *, int);
+ };
+ struct tcpcheck_rule *head; /* first expect of a chain. */
+ int min_recv; /* Minimum amount of data before an expect can be applied. (default: -1, ignored) */
+ enum healthcheck_status ok_status; /* The healthcheck status to use on success (default: L7OKD) */
+ enum healthcheck_status err_status; /* The healthcheck status to use on error (default: L7RSP) */
+ enum healthcheck_status tout_status; /* The healthcheck status to use on timeout (default: L7TOUT) */
+ struct list onerror_fmt; /* log-format string to use as comment on error */
+ struct list onsuccess_fmt; /* log-format string to use as comment on success (if last rule) */
+ struct sample_expr *status_expr; /* sample expr to determine the check status code */
+};
+
+struct tcpcheck_action_kw {
+ struct act_rule *rule;
+};
+
+struct tcpcheck_rule {
+ struct list list; /* list linked to from the proxy */
+ enum tcpcheck_rule_type action; /* type of the rule. */
+ int index; /* Index within the list. Starts at 0. */
+ char *comment; /* comment to be used in the logs and on the stats socket */
+ union {
+ struct tcpcheck_connect connect; /* Connect rule. */
+ struct tcpcheck_send send; /* Send rule. */
+ struct tcpcheck_expect expect; /* Expected pattern. */
+ struct tcpcheck_action_kw action_kw; /* Custom action. */
+ };
+};
+
+/* A list of tcp-check vars, to be registered before executing a ruleset */
+struct tcpcheck_var {
+ struct ist name; /* the variable name with the scope */
+ struct sample_data data; /* the data associated to the variable */
+ struct list list; /* element to chain tcp-check vars */
+};
+
+/* a list of tcp-check rules */
+struct tcpcheck_rules {
+ unsigned int flags; /* flags applied to the rules */
+ struct list *list; /* the list of tcpcheck_rules */
+ struct list preset_vars; /* The list of variable to preset before executing the ruleset */
+};
+
+/* A list of tcp-check rules with a name */
+struct tcpcheck_ruleset {
+ struct list rules; /* the list of tcpcheck_rule */
+ struct ebpt_node node; /* node in the shared tree */
+};
+
+
+#endif /* _HAPROXY_CHECKS_T_H */
diff --git a/include/haproxy/tcpcheck.h b/include/haproxy/tcpcheck.h
new file mode 100644
index 0000000..3abd1ef
--- /dev/null
+++ b/include/haproxy/tcpcheck.h
@@ -0,0 +1,125 @@
+/*
+ * include/haproxy/tcpcheck.h
+ * Functions prototypes for the TCP checks.
+ *
+ * Copyright 2000-2009,2020 Willy Tarreau <w@1wt.eu>
+ * Copyright 2007-2010 Krzysztof Piotr Oledzki <ole@ans.pl>
+ * Copyright 2013 Baptiste Assmann <bedis9@gmail.com>
+ * Copyright 2020 Gaetan Rivet <grive@u256.net>
+ * Copyright 2020 Christopher Faulet <cfaulet@haproxy.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_TCPCHECK_H
+#define _HAPROXY_TCPCHECK_H
+
+#include <haproxy/action.h>
+#include <haproxy/check-t.h>
+#include <haproxy/pool-t.h>
+#include <haproxy/proxy-t.h>
+#include <haproxy/tcpcheck-t.h>
+
+extern struct action_kw_list tcp_check_keywords;
+extern struct pool_head *pool_head_tcpcheck_rule;
+
+int tcpcheck_get_step_id(const struct check *check, const struct tcpcheck_rule *rule);
+struct tcpcheck_rule *get_first_tcpcheck_rule(const struct tcpcheck_rules *rules);
+
+struct tcpcheck_ruleset *create_tcpcheck_ruleset(const char *name);
+struct tcpcheck_ruleset *find_tcpcheck_ruleset(const char *name);
+void free_tcpcheck_ruleset(struct tcpcheck_ruleset *rs);
+
+void free_tcpcheck(struct tcpcheck_rule *rule, int in_pool);
+void deinit_proxy_tcpcheck(struct proxy *px);
+
+struct tcpcheck_var *create_tcpcheck_var(const struct ist name);
+void free_tcpcheck_var(struct tcpcheck_var *var);
+int dup_tcpcheck_vars(struct list *dst, const struct list *src);
+void free_tcpcheck_vars(struct list *vars);
+
+int add_tcpcheck_expect_str(struct tcpcheck_rules *rules, const char *str);
+int add_tcpcheck_send_strs(struct tcpcheck_rules *rules, const char * const *strs);
+int tcpcheck_add_http_rule(struct tcpcheck_rule *chk, struct tcpcheck_rules *rules, char **errmsg);
+
+void free_tcpcheck_http_hdr(struct tcpcheck_http_hdr *hdr);
+
+enum tcpcheck_eval_ret tcpcheck_mysql_expect_iniths(struct check *check, struct tcpcheck_rule *rule, int last_read);
+enum tcpcheck_eval_ret tcpcheck_mysql_expect_ok(struct check *check, struct tcpcheck_rule *rule, int last_read);
+enum tcpcheck_eval_ret tcpcheck_ldap_expect_bindrsp(struct check *check, struct tcpcheck_rule *rule, int last_read);
+enum tcpcheck_eval_ret tcpcheck_spop_expect_agenthello(struct check *check, struct tcpcheck_rule *rule, int last_read);
+enum tcpcheck_eval_ret tcpcheck_agent_expect_reply(struct check *check, struct tcpcheck_rule *rule, int last_read);
+enum tcpcheck_eval_ret tcpcheck_eval_connect(struct check *check, struct tcpcheck_rule *rule);
+enum tcpcheck_eval_ret tcpcheck_eval_send(struct check *check, struct tcpcheck_rule *rule);
+enum tcpcheck_eval_ret tcpcheck_eval_recv(struct check *check, struct tcpcheck_rule *rule);
+enum tcpcheck_eval_ret tcpcheck_eval_expect_http(struct check *check, struct tcpcheck_rule *rule, int last_read);
+enum tcpcheck_eval_ret tcpcheck_eval_expect(struct check *check, struct tcpcheck_rule *rule, int last_read);
+enum tcpcheck_eval_ret tcpcheck_eval_action_kw(struct check *check, struct tcpcheck_rule *rule);
+int tcpcheck_main(struct check *check);
+struct tcpcheck_rule *parse_tcpcheck_action(char **args, int cur_arg, struct proxy *px,
+ struct list *rules, struct action_kw *kw,
+ const char *file, int line, char **errmsg);
+struct tcpcheck_rule *parse_tcpcheck_connect(char **args, int cur_arg, struct proxy *px, struct list *rules,
+ const char *file, int line, char **errmsg);
+struct tcpcheck_rule *parse_tcpcheck_send(char **args, int cur_arg, struct proxy *px, struct list *rules,
+ const char *file, int line, char **errmsg);
+struct tcpcheck_rule *parse_tcpcheck_send_http(char **args, int cur_arg, struct proxy *px, struct list *rules,
+ const char *file, int line, char **errmsg);
+struct tcpcheck_rule *parse_tcpcheck_comment(char **args, int cur_arg, struct proxy *px, struct list *rules,
+ const char *file, int line, char **errmsg);
+struct tcpcheck_rule *parse_tcpcheck_expect(char **args, int cur_arg, struct proxy *px,
+ struct list *rules, unsigned int proto,
+ const char *file, int line, char **errmsg);
+
+int proxy_parse_tcp_check_opt(char **args, int cur_arg, struct proxy *curpx, const struct proxy *defpx,
+ const char *file, int line);
+int proxy_parse_redis_check_opt(char **args, int cur_arg, struct proxy *curpx, const struct proxy *defpx,
+ const char *file, int line);
+int proxy_parse_ssl_hello_chk_opt(char **args, int cur_arg, struct proxy *curpx, const struct proxy *defpx,
+ const char *file, int line);
+int proxy_parse_smtpchk_opt(char **args, int cur_arg, struct proxy *curpx, const struct proxy *defpx,
+ const char *file, int line);
+int proxy_parse_pgsql_check_opt(char **args, int cur_arg, struct proxy *curpx, const struct proxy *defpx,
+ const char *file, int line);
+int proxy_parse_mysql_check_opt(char **args, int cur_arg, struct proxy *curpx, const struct proxy *defpx,
+ const char *file, int line);
+int proxy_parse_ldap_check_opt(char **args, int cur_arg, struct proxy *curpx, const struct proxy *defpx,
+ const char *file, int line);
+int proxy_parse_spop_check_opt(char **args, int cur_arg, struct proxy *curpx, const struct proxy *defpx,
+ const char *file, int line);
+int proxy_parse_httpchk_opt(char **args, int cur_arg, struct proxy *curpx, const struct proxy *defpx,
+ const char *file, int line);
+
+void tcp_check_keywords_register(struct action_kw_list *kw_list);
+
+/* Return the struct action_kw associated to a keyword */
+static inline struct action_kw *action_kw_tcp_check_lookup(const char *kw)
+{
+ return action_lookup(&tcp_check_keywords.list, kw);
+}
+
+static inline void action_kw_tcp_check_build_list(struct buffer *chk)
+{
+ action_build_list(&tcp_check_keywords.list, chk);
+}
+
+#endif /* _HAPROXY_TCPCHECK_H */
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/include/haproxy/thread-t.h b/include/haproxy/thread-t.h
new file mode 100644
index 0000000..f3552c2
--- /dev/null
+++ b/include/haproxy/thread-t.h
@@ -0,0 +1,165 @@
+/*
+ * include/haproxy/thread-t.h
+ * Definitions and types for thread support.
+ *
+ * Copyright (C) 2017 Christopher Faulet - cfaulet@haproxy.com
+ * Copyright (C) 2020 Willy Tarreau - w@1wt.eu
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_THREAD_T_H
+#define _HAPROXY_THREAD_T_H
+
+#include <haproxy/defaults.h>
+
+/* Note: this file mainly contains 3 sections:
+ * - one used solely when USE_THREAD is *not* set
+ * - one used solely when USE_THREAD is set
+ * - a common one.
+ */
+
+#ifndef USE_THREAD
+
+/********************** THREADS DISABLED ************************/
+
+/* These macros allow to make some struct fields or local variables optional */
+#define __decl_spinlock(lock)
+#define __decl_aligned_spinlock(lock)
+#define __decl_rwlock(lock)
+#define __decl_aligned_rwlock(lock)
+
+#elif !defined(DEBUG_THREAD) && !defined(DEBUG_FULL)
+
+/************** THREADS ENABLED WITHOUT DEBUGGING **************/
+
+/* declare a self-initializing spinlock */
+#define __decl_spinlock(lock) \
+ HA_SPINLOCK_T (lock) = 0;
+
+/* declare a self-initializing spinlock, aligned on a cache line */
+#define __decl_aligned_spinlock(lock) \
+ HA_SPINLOCK_T (lock) __attribute__((aligned(64))) = 0;
+
+/* declare a self-initializing rwlock */
+#define __decl_rwlock(lock) \
+ HA_RWLOCK_T (lock) = 0;
+
+/* declare a self-initializing rwlock, aligned on a cache line */
+#define __decl_aligned_rwlock(lock) \
+ HA_RWLOCK_T (lock) __attribute__((aligned(64))) = 0;
+
+#else /* !USE_THREAD */
+
+/**************** THREADS ENABLED WITH DEBUGGING ***************/
+
+/* declare a self-initializing spinlock */
+#define __decl_spinlock(lock) \
+ HA_SPINLOCK_T (lock); \
+ INITCALL1(STG_LOCK, ha_spin_init, &(lock))
+
+/* declare a self-initializing spinlock, aligned on a cache line */
+#define __decl_aligned_spinlock(lock) \
+ HA_SPINLOCK_T (lock) __attribute__((aligned(64))); \
+ INITCALL1(STG_LOCK, ha_spin_init, &(lock))
+
+/* declare a self-initializing rwlock */
+#define __decl_rwlock(lock) \
+ HA_RWLOCK_T (lock); \
+ INITCALL1(STG_LOCK, ha_rwlock_init, &(lock))
+
+/* declare a self-initializing rwlock, aligned on a cache line */
+#define __decl_aligned_rwlock(lock) \
+ HA_RWLOCK_T (lock) __attribute__((aligned(64))); \
+ INITCALL1(STG_LOCK, ha_rwlock_init, &(lock))
+
+#endif /* USE_THREAD */
+
+
+/*** Common parts below ***/
+
+/* storage types used by spinlocks and RW locks */
+#define __HA_SPINLOCK_T unsigned long
+#define __HA_RWLOCK_T unsigned long
+
+
+/* When thread debugging is enabled, we remap HA_SPINLOCK_T and HA_RWLOCK_T to
+ * complex structures which embed debugging info.
+ */
+#if !defined(DEBUG_THREAD) && !defined(DEBUG_FULL)
+
+#define HA_SPINLOCK_T __HA_SPINLOCK_T
+#define HA_RWLOCK_T __HA_RWLOCK_T
+
+#else /* !DEBUG_THREAD */
+
+#define HA_SPINLOCK_T struct ha_spinlock
+#define HA_RWLOCK_T struct ha_rwlock
+
+/* Debugging information that is only used when thread debugging is enabled */
+
+struct lock_stat {
+ uint64_t nsec_wait_for_write;
+ uint64_t nsec_wait_for_read;
+ uint64_t nsec_wait_for_seek;
+ uint64_t num_write_locked;
+ uint64_t num_write_unlocked;
+ uint64_t num_read_locked;
+ uint64_t num_read_unlocked;
+ uint64_t num_seek_locked;
+ uint64_t num_seek_unlocked;
+};
+
+struct ha_spinlock_state {
+ unsigned long owner; /* a bit is set to 1 << tid for the lock owner */
+ unsigned long waiters; /* a bit is set to 1 << tid for waiting threads */
+};
+
+struct ha_rwlock_state {
+ unsigned long cur_writer; /* a bit is set to 1 << tid for the lock owner */
+ unsigned long wait_writers; /* a bit is set to 1 << tid for waiting writers */
+ unsigned long cur_readers; /* a bit is set to 1 << tid for current readers */
+ unsigned long wait_readers; /* a bit is set to 1 << tid for waiting waiters */
+ unsigned long cur_seeker; /* a bit is set to 1 << tid for the lock seekers */
+ unsigned long wait_seekers; /* a bit is set to 1 << tid for waiting seekers */
+};
+
+struct ha_spinlock {
+ __HA_SPINLOCK_T lock;
+ struct {
+ struct ha_spinlock_state st[MAX_TGROUPS];
+ struct {
+ const char *function;
+ const char *file;
+ int line;
+ } last_location; /* location of the last owner */
+ } info;
+};
+
+struct ha_rwlock {
+ __HA_RWLOCK_T lock;
+ struct {
+ struct ha_rwlock_state st[MAX_TGROUPS];
+ struct {
+ const char *function;
+ const char *file;
+ int line;
+ } last_location; /* location of the last write owner */
+ } info;
+};
+
+#endif /* DEBUG_THREAD */
+
+#endif /* _HAPROXY_THREAD_T_H */
diff --git a/include/haproxy/thread.h b/include/haproxy/thread.h
new file mode 100644
index 0000000..8c7520b
--- /dev/null
+++ b/include/haproxy/thread.h
@@ -0,0 +1,489 @@
+/*
+ * include/haproxy/thread.h
+ * definitions, macros and inline functions used by threads.
+ *
+ * Copyright (C) 2017 Christopher Faulet - cfaulet@haproxy.com
+ * Copyright (C) 2020 Willy Tarreau - w@1wt.eu
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_THREAD_H
+#define _HAPROXY_THREAD_H
+
+#include <haproxy/api.h>
+#include <haproxy/thread-t.h>
+#include <haproxy/tinfo.h>
+
+
+/* Note: this file mainly contains 5 sections:
+ * - a small common part, which also corresponds to the common API
+ * - one used solely when USE_THREAD is *not* set
+ * - one used solely when USE_THREAD is set
+ * - one used solely when USE_THREAD is set WITHOUT debugging
+ * - one used solely when USE_THREAD is set WITH debugging
+ *
+ */
+
+
+/* Generic exports */
+int parse_nbthread(const char *arg, char **err);
+void ha_tkill(unsigned int thr, int sig);
+void ha_tkillall(int sig);
+void ha_thread_relax(void);
+int thread_detect_binding_discrepancies(void);
+int thread_detect_more_than_cpus(void);
+int thread_map_to_groups();
+int thread_resolve_group_mask(struct thread_set *ts, int defgrp, char **err);
+int parse_thread_set(const char *arg, struct thread_set *ts, char **err);
+extern int thread_cpus_enabled_at_boot;
+
+
+#ifndef USE_THREAD
+
+/********************** THREADS DISABLED ************************/
+
+/* Only way found to replace variables with constants that are optimized away
+ * at build time.
+ */
+enum { all_tgroups_mask = 1UL };
+enum { tid_bit = 1UL };
+enum { tid = 0 };
+enum { tgid = 1 };
+
+#define HA_SPIN_INIT(l) do { /* do nothing */ } while(0)
+#define HA_SPIN_DESTROY(l) do { /* do nothing */ } while(0)
+#define HA_SPIN_LOCK(lbl, l) do { /* do nothing */ } while(0)
+#define HA_SPIN_TRYLOCK(lbl, l) ({ 0; })
+#define HA_SPIN_UNLOCK(lbl, l) do { /* do nothing */ } while(0)
+
+#define HA_RWLOCK_INIT(l) do { /* do nothing */ } while(0)
+#define HA_RWLOCK_DESTROY(l) do { /* do nothing */ } while(0)
+#define HA_RWLOCK_WRLOCK(lbl, l) do { /* do nothing */ } while(0)
+#define HA_RWLOCK_TRYWRLOCK(lbl, l) ({ 0; })
+#define HA_RWLOCK_WRUNLOCK(lbl, l) do { /* do nothing */ } while(0)
+#define HA_RWLOCK_RDLOCK(lbl, l) do { /* do nothing */ } while(0)
+#define HA_RWLOCK_TRYRDLOCK(lbl, l) ({ 0; })
+#define HA_RWLOCK_RDUNLOCK(lbl, l) do { /* do nothing */ } while(0)
+
+#define HA_RWLOCK_SKLOCK(lbl,l) do { /* do nothing */ } while(0)
+#define HA_RWLOCK_SKTOWR(lbl,l) do { /* do nothing */ } while(0)
+#define HA_RWLOCK_WRTOSK(lbl,l) do { /* do nothing */ } while(0)
+#define HA_RWLOCK_SKTORD(lbl,l) do { /* do nothing */ } while(0)
+#define HA_RWLOCK_WRTORD(lbl,l) do { /* do nothing */ } while(0)
+#define HA_RWLOCK_SKUNLOCK(lbl,l) do { /* do nothing */ } while(0)
+#define HA_RWLOCK_TRYSKLOCK(lbl,l) ({ 0; })
+#define HA_RWLOCK_TRYRDTOSK(lbl,l) ({ 0; })
+
+#define ha_sigmask(how, set, oldset) sigprocmask(how, set, oldset)
+
+/* Sets the current thread to a valid one described by <thr>, or to any thread
+ * and any group if NULL (e.g. for use during boot where they're not totally
+ * initialized).
+ */
+static inline void ha_set_thread(const struct thread_info *thr)
+{
+ if (thr) {
+ ti = thr;
+ tg = ti->tg;
+ th_ctx = &ha_thread_ctx[ti->tid];
+ } else {
+ ti = &ha_thread_info[0];
+ tg = &ha_tgroup_info[0];
+ th_ctx = &ha_thread_ctx[0];
+ }
+}
+
+static inline void thread_idle_now()
+{
+}
+
+static inline void thread_idle_end()
+{
+}
+
+static inline void thread_harmless_now()
+{
+}
+
+static inline int is_thread_harmless()
+{
+ return 1;
+}
+
+static inline void thread_harmless_end()
+{
+}
+
+static inline void thread_harmless_end_sig()
+{
+}
+
+static inline void thread_isolate()
+{
+}
+
+static inline void thread_isolate_full()
+{
+}
+
+static inline void thread_release()
+{
+}
+
+static inline unsigned long thread_isolated()
+{
+ return 1;
+}
+
+static inline void setup_extra_threads(void *(*handler)(void *))
+{
+}
+
+static inline void wait_for_threads_completion()
+{
+}
+
+static inline void set_thread_cpu_affinity()
+{
+}
+
+static inline unsigned long long ha_get_pthread_id(unsigned int thr)
+{
+ return 0;
+}
+
+#else /* !USE_THREAD */
+
+/********************** THREADS ENABLED ************************/
+
+#define PLOCK_LORW_INLINE_WAIT
+#include <import/plock.h>
+
+void thread_harmless_till_end(void);
+void thread_isolate(void);
+void thread_isolate_full(void);
+void thread_release(void);
+void ha_spin_init(HA_SPINLOCK_T *l);
+void ha_rwlock_init(HA_RWLOCK_T *l);
+void setup_extra_threads(void *(*handler)(void *));
+void wait_for_threads_completion();
+void set_thread_cpu_affinity();
+unsigned long long ha_get_pthread_id(unsigned int thr);
+
+extern volatile unsigned long all_tgroups_mask;
+extern volatile unsigned int rdv_requests;
+extern volatile unsigned int isolated_thread;
+extern THREAD_LOCAL unsigned int tid; /* The thread id */
+extern THREAD_LOCAL unsigned int tgid; /* The thread group id (starts at 1) */
+
+#define ha_sigmask(how, set, oldset) pthread_sigmask(how, set, oldset)
+
+/* Sets the current thread to a valid one described by <thr>, or to any thread
+ * and any group if NULL (e.g. for use during boot where they're not totally
+ * initialized).
+ */
+static inline void ha_set_thread(const struct thread_info *thr)
+{
+ if (thr) {
+ BUG_ON(!thr->ltid_bit);
+ BUG_ON(!thr->tg);
+ BUG_ON(!thr->tgid);
+
+ ti = thr;
+ tg = thr->tg;
+ tid = thr->tid;
+ tgid = thr->tgid;
+ th_ctx = &ha_thread_ctx[tid];
+ tg_ctx = &ha_tgroup_ctx[tgid-1];
+ } else {
+ tgid = 1;
+ tid = 0;
+ ti = &ha_thread_info[0];
+ tg = &ha_tgroup_info[0];
+ th_ctx = &ha_thread_ctx[0];
+ tg_ctx = &ha_tgroup_ctx[0];
+ }
+}
+
+/* Marks the thread as idle, which means that not only it's not doing anything
+ * dangerous, but in addition it has not started anything sensitive either.
+ * This essentially means that the thread currently is in the poller, thus
+ * outside of any execution block. Needs to be terminated using
+ * thread_idle_end(). This is needed to release a concurrent call to
+ * thread_isolate_full().
+ */
+static inline void thread_idle_now()
+{
+ HA_ATOMIC_OR(&tg_ctx->threads_idle, ti->ltid_bit);
+}
+
+/* Ends the harmless period started by thread_idle_now(), i.e. the thread is
+ * about to restart engaging in sensitive operations. This must not be done on
+ * a thread marked harmless, as it could cause a deadlock between another
+ * thread waiting for idle again and thread_harmless_end() in this thread.
+ *
+ * The right sequence is thus:
+ * thread_idle_now();
+ * thread_harmless_now();
+ * poll();
+ * thread_harmless_end();
+ * thread_idle_end();
+ */
+static inline void thread_idle_end()
+{
+ HA_ATOMIC_AND(&tg_ctx->threads_idle, ~ti->ltid_bit);
+}
+
+
+/* Marks the thread as harmless. Note: this must be true, i.e. the thread must
+ * not be touching any unprotected shared resource during this period. Usually
+ * this is called before poll(), but it may also be placed around very slow
+ * calls (eg: some crypto operations). Needs to be terminated using
+ * thread_harmless_end().
+ */
+static inline void thread_harmless_now()
+{
+ HA_ATOMIC_OR(&tg_ctx->threads_harmless, ti->ltid_bit);
+}
+
+/* Returns non-zero if the current thread is already harmless */
+static inline int is_thread_harmless()
+{
+ return !!(HA_ATOMIC_LOAD(&tg_ctx->threads_harmless) & ti->ltid_bit);
+}
+
+/* Ends the harmless period started by thread_harmless_now(). Usually this is
+ * placed after the poll() call. If it is discovered that a job was running and
+ * is relying on the thread still being harmless, the thread waits for the
+ * other one to finish.
+ */
+static inline void thread_harmless_end()
+{
+ while (1) {
+ HA_ATOMIC_AND(&tg_ctx->threads_harmless, ~ti->ltid_bit);
+ if (likely(_HA_ATOMIC_LOAD(&rdv_requests) == 0))
+ break;
+ thread_harmless_till_end();
+ }
+}
+
+/* Ends the harmless period started by thread_harmless_now(), but without
+ * waiting for isolated requests. This is meant to be used from signal handlers
+ * which might be called recursively while a thread already requested an
+ * isolation that must be ignored. It must not be used past a checkpoint where
+ * another thread could return and see the current thread as harmless before
+ * this call (or this could validate an isolation request by accident).
+ */
+static inline void thread_harmless_end_sig()
+{
+ HA_ATOMIC_AND(&tg_ctx->threads_harmless, ~ti->ltid_bit);
+}
+
+/* an isolated thread has its ID in isolated_thread */
+static inline unsigned long thread_isolated()
+{
+ return _HA_ATOMIC_LOAD(&isolated_thread) == tid;
+}
+
+/* Returns 1 if the cpu set is currently restricted for the process else 0.
+ * Currently only implemented for the Linux platform.
+ */
+int thread_cpu_mask_forced(void);
+
+#if !defined(DEBUG_THREAD) && !defined(DEBUG_FULL)
+
+/* Thread debugging is DISABLED, these are the regular locking functions */
+
+#define HA_SPIN_INIT(l) ({ (*l) = 0; })
+#define HA_SPIN_DESTROY(l) ({ (*l) = 0; })
+#define HA_SPIN_LOCK(lbl, l) pl_take_s(l)
+#define HA_SPIN_TRYLOCK(lbl, l) (!pl_try_s(l))
+#define HA_SPIN_UNLOCK(lbl, l) pl_drop_s(l)
+
+#define HA_RWLOCK_INIT(l) ({ (*l) = 0; })
+#define HA_RWLOCK_DESTROY(l) ({ (*l) = 0; })
+#define HA_RWLOCK_WRLOCK(lbl,l) pl_take_w(l)
+#define HA_RWLOCK_TRYWRLOCK(lbl,l) (!pl_try_w(l))
+#define HA_RWLOCK_WRUNLOCK(lbl,l) pl_drop_w(l)
+#define HA_RWLOCK_RDLOCK(lbl,l) pl_take_r(l)
+#define HA_RWLOCK_TRYRDLOCK(lbl,l) (!pl_try_r(l))
+#define HA_RWLOCK_RDUNLOCK(lbl,l) pl_drop_r(l)
+
+/* rwlock upgrades via seek locks */
+#define HA_RWLOCK_SKLOCK(lbl,l) pl_take_s(l) /* N --> S */
+#define HA_RWLOCK_SKTOWR(lbl,l) pl_stow(l) /* S --> W */
+#define HA_RWLOCK_WRTOSK(lbl,l) pl_wtos(l) /* W --> S */
+#define HA_RWLOCK_SKTORD(lbl,l) pl_stor(l) /* S --> R */
+#define HA_RWLOCK_WRTORD(lbl,l) pl_wtor(l) /* W --> R */
+#define HA_RWLOCK_SKUNLOCK(lbl,l) pl_drop_s(l) /* S --> N */
+#define HA_RWLOCK_TRYSKLOCK(lbl,l) (!pl_try_s(l)) /* N -?> S */
+#define HA_RWLOCK_TRYRDTOSK(lbl,l) (!pl_try_rtos(l)) /* R -?> S */
+
+#else /* !defined(DEBUG_THREAD) && !defined(DEBUG_FULL) */
+
+/* Thread debugging is ENABLED, these are the instrumented functions */
+
+#define __SPIN_INIT(l) ({ (*l) = 0; })
+#define __SPIN_DESTROY(l) ({ (*l) = 0; })
+#define __SPIN_LOCK(l) pl_take_s(l)
+#define __SPIN_TRYLOCK(l) (!pl_try_s(l))
+#define __SPIN_UNLOCK(l) pl_drop_s(l)
+
+#define __RWLOCK_INIT(l) ({ (*l) = 0; })
+#define __RWLOCK_DESTROY(l) ({ (*l) = 0; })
+#define __RWLOCK_WRLOCK(l) pl_take_w(l)
+#define __RWLOCK_TRYWRLOCK(l) (!pl_try_w(l))
+#define __RWLOCK_WRUNLOCK(l) pl_drop_w(l)
+#define __RWLOCK_RDLOCK(l) pl_take_r(l)
+#define __RWLOCK_TRYRDLOCK(l) (!pl_try_r(l))
+#define __RWLOCK_RDUNLOCK(l) pl_drop_r(l)
+
+/* rwlock upgrades via seek locks */
+#define __RWLOCK_SKLOCK(l) pl_take_s(l) /* N --> S */
+#define __RWLOCK_SKTOWR(l) pl_stow(l) /* S --> W */
+#define __RWLOCK_WRTOSK(l) pl_wtos(l) /* W --> S */
+#define __RWLOCK_SKTORD(l) pl_stor(l) /* S --> R */
+#define __RWLOCK_WRTORD(l) pl_wtor(l) /* W --> R */
+#define __RWLOCK_SKUNLOCK(l) pl_drop_s(l) /* S --> N */
+#define __RWLOCK_TRYSKLOCK(l) (!pl_try_s(l)) /* N -?> S */
+#define __RWLOCK_TRYRDTOSK(l) (!pl_try_rtos(l)) /* R -?> S */
+
+#define HA_SPIN_INIT(l) __spin_init(l)
+#define HA_SPIN_DESTROY(l) __spin_destroy(l)
+
+#define HA_SPIN_LOCK(lbl, l) __spin_lock(lbl, l, __func__, __FILE__, __LINE__)
+#define HA_SPIN_TRYLOCK(lbl, l) __spin_trylock(lbl, l, __func__, __FILE__, __LINE__)
+#define HA_SPIN_UNLOCK(lbl, l) __spin_unlock(lbl, l, __func__, __FILE__, __LINE__)
+
+#define HA_RWLOCK_INIT(l) __ha_rwlock_init((l))
+#define HA_RWLOCK_DESTROY(l) __ha_rwlock_destroy((l))
+#define HA_RWLOCK_WRLOCK(lbl,l) __ha_rwlock_wrlock(lbl, l, __func__, __FILE__, __LINE__)
+#define HA_RWLOCK_TRYWRLOCK(lbl,l) __ha_rwlock_trywrlock(lbl, l, __func__, __FILE__, __LINE__)
+#define HA_RWLOCK_WRUNLOCK(lbl,l) __ha_rwlock_wrunlock(lbl, l, __func__, __FILE__, __LINE__)
+#define HA_RWLOCK_RDLOCK(lbl,l) __ha_rwlock_rdlock(lbl, l)
+#define HA_RWLOCK_TRYRDLOCK(lbl,l) __ha_rwlock_tryrdlock(lbl, l)
+#define HA_RWLOCK_RDUNLOCK(lbl,l) __ha_rwlock_rdunlock(lbl, l)
+
+#define HA_RWLOCK_SKLOCK(lbl,l) __ha_rwlock_sklock(lbl, l, __func__, __FILE__, __LINE__)
+#define HA_RWLOCK_SKTOWR(lbl,l) __ha_rwlock_sktowr(lbl, l, __func__, __FILE__, __LINE__)
+#define HA_RWLOCK_WRTOSK(lbl,l) __ha_rwlock_wrtosk(lbl, l, __func__, __FILE__, __LINE__)
+#define HA_RWLOCK_SKTORD(lbl,l) __ha_rwlock_sktord(lbl, l, __func__, __FILE__, __LINE__)
+#define HA_RWLOCK_WRTORD(lbl,l) __ha_rwlock_wrtord(lbl, l, __func__, __FILE__, __LINE__)
+#define HA_RWLOCK_SKUNLOCK(lbl,l) __ha_rwlock_skunlock(lbl, l, __func__, __FILE__, __LINE__)
+#define HA_RWLOCK_TRYSKLOCK(lbl,l) __ha_rwlock_trysklock(lbl, l, __func__, __FILE__, __LINE__)
+#define HA_RWLOCK_TRYRDTOSK(lbl,l) __ha_rwlock_tryrdtosk(lbl, l, __func__, __FILE__, __LINE__)
+
+/* WARNING!!! if you update this enum, please also keep lock_label() up to date
+ * below.
+ */
+enum lock_label {
+ TASK_RQ_LOCK,
+ TASK_WQ_LOCK,
+ LISTENER_LOCK,
+ PROXY_LOCK,
+ SERVER_LOCK,
+ LBPRM_LOCK,
+ SIGNALS_LOCK,
+ STK_TABLE_LOCK,
+ STK_SESS_LOCK,
+ APPLETS_LOCK,
+ PEER_LOCK,
+ SHCTX_LOCK,
+ SSL_LOCK,
+ SSL_GEN_CERTS_LOCK,
+ PATREF_LOCK,
+ PATEXP_LOCK,
+ VARS_LOCK,
+ COMP_POOL_LOCK,
+ LUA_LOCK,
+ NOTIF_LOCK,
+ SPOE_APPLET_LOCK,
+ DNS_LOCK,
+ PID_LIST_LOCK,
+ EMAIL_ALERTS_LOCK,
+ PIPES_LOCK,
+ TLSKEYS_REF_LOCK,
+ AUTH_LOCK,
+ RING_LOCK,
+ DICT_LOCK,
+ PROTO_LOCK,
+ QUEUE_LOCK,
+ CKCH_LOCK,
+ SNI_LOCK,
+ SSL_SERVER_LOCK,
+ SFT_LOCK, /* sink forward target */
+ IDLE_CONNS_LOCK,
+ OCSP_LOCK,
+ QC_CID_LOCK,
+ CACHE_LOCK,
+ OTHER_LOCK,
+ /* WT: make sure never to use these ones outside of development,
+ * we need them for lock profiling!
+ */
+ DEBUG1_LOCK,
+ DEBUG2_LOCK,
+ DEBUG3_LOCK,
+ DEBUG4_LOCK,
+ DEBUG5_LOCK,
+ LOCK_LABELS
+};
+
+
+/* Following functions are used to collect some stats about locks. We wrap
+ * pthread functions to known how much time we wait in a lock. */
+
+void show_lock_stats();
+void __ha_rwlock_init(struct ha_rwlock *l);
+void __ha_rwlock_destroy(struct ha_rwlock *l);
+void __ha_rwlock_wrlock(enum lock_label lbl, struct ha_rwlock *l,
+ const char *func, const char *file, int line);
+int __ha_rwlock_trywrlock(enum lock_label lbl, struct ha_rwlock *l,
+ const char *func, const char *file, int line);
+void __ha_rwlock_wrunlock(enum lock_label lbl,struct ha_rwlock *l,
+ const char *func, const char *file, int line);
+void __ha_rwlock_rdlock(enum lock_label lbl,struct ha_rwlock *l);
+int __ha_rwlock_tryrdlock(enum lock_label lbl,struct ha_rwlock *l);
+void __ha_rwlock_rdunlock(enum lock_label lbl,struct ha_rwlock *l);
+void __ha_rwlock_wrtord(enum lock_label lbl, struct ha_rwlock *l,
+ const char *func, const char *file, int line);
+void __ha_rwlock_wrtosk(enum lock_label lbl, struct ha_rwlock *l,
+ const char *func, const char *file, int line);
+void __ha_rwlock_sklock(enum lock_label lbl, struct ha_rwlock *l,
+ const char *func, const char *file, int line);
+void __ha_rwlock_sktowr(enum lock_label lbl, struct ha_rwlock *l,
+ const char *func, const char *file, int line);
+void __ha_rwlock_sktord(enum lock_label lbl, struct ha_rwlock *l,
+ const char *func, const char *file, int line);
+void __ha_rwlock_skunlock(enum lock_label lbl,struct ha_rwlock *l,
+ const char *func, const char *file, int line);
+int __ha_rwlock_trysklock(enum lock_label lbl, struct ha_rwlock *l,
+ const char *func, const char *file, int line);
+int __ha_rwlock_tryrdtosk(enum lock_label lbl, struct ha_rwlock *l,
+ const char *func, const char *file, int line);
+void __spin_init(struct ha_spinlock *l);
+void __spin_destroy(struct ha_spinlock *l);
+void __spin_lock(enum lock_label lbl, struct ha_spinlock *l,
+ const char *func, const char *file, int line);
+int __spin_trylock(enum lock_label lbl, struct ha_spinlock *l,
+ const char *func, const char *file, int line);
+void __spin_unlock(enum lock_label lbl, struct ha_spinlock *l,
+ const char *func, const char *file, int line);
+
+#endif /* DEBUG_THREAD */
+
+#endif /* USE_THREAD */
+
+#endif /* _HAPROXY_THREAD_H */
diff --git a/include/haproxy/ticks.h b/include/haproxy/ticks.h
new file mode 100644
index 0000000..8b8fcc6
--- /dev/null
+++ b/include/haproxy/ticks.h
@@ -0,0 +1,157 @@
+/*
+ * include/haproxy/ticks.h
+ * Functions and macros for manipulation of expiration timers
+ *
+ * Copyright (C) 2000-2020 Willy Tarreau - w@1wt.eu
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/*
+ * Using a mix of milliseconds and timeval for internal timers is expensive and
+ * overkill, because we don't need such a precision to compute timeouts.
+ * So we're converting them to "ticks".
+ *
+ * A tick is a representation of a date relative to another one, and is
+ * measured in milliseconds. The natural usage is to represent an absolute date
+ * relative to the current date. Since it is not practical to update all values
+ * each time the current date changes, instead we use the absolute date rounded
+ * down to fit in a tick. We then have to compare a tick to the current date to
+ * know whether it is in the future or in the past. If a tick is below the
+ * current date, it is in the past. If it is above, it is in the future. The
+ * values will wrap so we can't compare that easily, instead we check the sign
+ * of the difference between a tick and the current date.
+ *
+ * Proceeding like this allows us to manipulate dates that are stored in
+ * scalars with enough precision and range. For this reason, we store ticks in
+ * 32-bit integers. This is enough to handle dates that are between 24.85 days
+ * in the past and as much in the future.
+ *
+ * We must both support absolute dates (well in fact, dates relative to now+/-
+ * 24 days), and intervals (for timeouts). Both types need an "eternity" magic
+ * value. For optimal code generation, we'll use zero as the magic value
+ * indicating that an expiration timer or a timeout is not set. We have to
+ * check that we don't return this value when adding timeouts to <now>. If a
+ * computation returns 0, we must increase it to 1 (which will push the timeout
+ * 1 ms further). For this reason, timeouts must not be added by hand but via
+ * the dedicated tick_add() function.
+ */
+
+#ifndef _HAPROXY_TICKS_H
+#define _HAPROXY_TICKS_H
+
+#include <haproxy/api.h>
+
+#define TICK_ETERNITY 0
+
+/* right now, ticks are milliseconds. Both negative ms and negative ticks
+ * indicate eternity.
+ */
+#define MS_TO_TICKS(ms) (ms)
+#define TICKS_TO_MS(tk) (tk)
+
+/* currently updated and stored in time.c */
+extern THREAD_LOCAL unsigned int now_ms; /* internal date in milliseconds (may wrap) */
+extern volatile unsigned int global_now_ms;
+
+/* return 1 if tick is set, otherwise 0 */
+static inline int tick_isset(int expire)
+{
+ return expire != 0;
+}
+
+/* Add <timeout> to <now>, and return the resulting expiration date.
+ * <timeout> will not be checked for null values.
+ */
+static inline int tick_add(int now, int timeout)
+{
+ now += timeout;
+ if (unlikely(!now))
+ now++; /* unfortunate value */
+ return now;
+}
+
+/* add <timeout> to <now> if it is set, otherwise set it to eternity.
+ * Return the resulting expiration date.
+ */
+static inline int tick_add_ifset(int now, int timeout)
+{
+ if (!timeout)
+ return TICK_ETERNITY;
+ return tick_add(now, timeout);
+}
+
+/* return 1 if timer <t1> is before <t2>, none of which can be infinite. */
+static inline int tick_is_lt(int t1, int t2)
+{
+ return (t1 - t2) < 0;
+}
+
+/* return 1 if timer <t1> is before or equal to <t2>, none of which can be infinite. */
+static inline int tick_is_le(int t1, int t2)
+{
+ return (t1 - t2) <= 0;
+}
+
+/* return 1 if timer <timer> is expired at date <now>, otherwise zero */
+static inline int tick_is_expired(int timer, int now)
+{
+ if (unlikely(!tick_isset(timer)))
+ return 0;
+ if (unlikely((timer - now) <= 0))
+ return 1;
+ return 0;
+}
+
+/* return the first one of the two timers, both of which may be infinite */
+static inline int tick_first(int t1, int t2)
+{
+ if (!tick_isset(t1))
+ return t2;
+ if (!tick_isset(t2))
+ return t1;
+ if ((t1 - t2) <= 0)
+ return t1;
+ else
+ return t2;
+}
+
+/* return the first one of the two timers, where only the first one may be infinite */
+static inline int tick_first_2nz(int t1, int t2)
+{
+ if (!tick_isset(t1))
+ return t2;
+ if ((t1 - t2) <= 0)
+ return t1;
+ else
+ return t2;
+}
+
+/* return the number of ticks remaining from <now> to <exp>, or zero if expired */
+static inline int tick_remain(int now, int exp)
+{
+ if (tick_is_expired(exp, now))
+ return 0;
+ return exp - now;
+}
+
+#endif /* _HAPROXY_TICKS_H */
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/include/haproxy/time.h b/include/haproxy/time.h
new file mode 100644
index 0000000..3ebc683
--- /dev/null
+++ b/include/haproxy/time.h
@@ -0,0 +1,520 @@
+/*
+ * include/haproxy/time.h
+ * timeval-based time calculation functions and macros.
+ *
+ * Copyright (C) 2000-2020 Willy Tarreau - w@1wt.eu
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_TIME_H
+#define _HAPROXY_TIME_H
+
+#include <sys/time.h>
+#include <haproxy/api.h>
+
+#define TIME_ETERNITY (TV_ETERNITY_MS)
+
+
+
+/**** exported functions *************************************************/
+/*
+ * adds <ms> ms to <from>, set the result to <tv> and returns a pointer <tv>
+ */
+struct timeval *tv_ms_add(struct timeval *tv, const struct timeval *from, int ms);
+
+/*
+ * compares <tv1> and <tv2> modulo 1ms: returns 0 if equal, -1 if tv1 < tv2, 1 if tv1 > tv2
+ * Must not be used when either argument is eternity. Use tv_ms_cmp2() for that.
+ */
+int tv_ms_cmp(const struct timeval *tv1, const struct timeval *tv2);
+
+/*
+ * compares <tv1> and <tv2> modulo 1 ms: returns 0 if equal, -1 if tv1 < tv2, 1 if tv1 > tv2,
+ * assuming that TV_ETERNITY is greater than everything.
+ */
+int tv_ms_cmp2(const struct timeval *tv1, const struct timeval *tv2);
+
+
+/**** general purpose functions and macros *******************************/
+
+
+/*
+ * sets a struct timeval to its highest value so that it can never happen
+ * note that only tv_usec is necessary to detect it since a tv_usec > 999999
+ * is normally not possible.
+ */
+static inline struct timeval *tv_eternity(struct timeval *tv)
+{
+ tv->tv_sec = (typeof(tv->tv_sec))TV_ETERNITY;
+ tv->tv_usec = (typeof(tv->tv_usec))TV_ETERNITY;
+ return tv;
+}
+
+/*
+ * sets a struct timeval to 0
+ *
+ */
+static inline struct timeval *tv_zero(struct timeval *tv) {
+ tv->tv_sec = tv->tv_usec = 0;
+ return tv;
+}
+
+/*
+ * returns non null if tv is [eternity], otherwise 0.
+ */
+#define tv_iseternity(tv) ((tv)->tv_usec == (typeof((tv)->tv_usec))TV_ETERNITY)
+
+/*
+ * returns 0 if tv is [eternity], otherwise non-zero.
+ */
+#define tv_isset(tv) ((tv)->tv_usec != (typeof((tv)->tv_usec))TV_ETERNITY)
+
+/*
+ * returns non null if tv is [0], otherwise 0.
+ */
+#define tv_iszero(tv) (((tv)->tv_sec | (tv)->tv_usec) == 0)
+
+/*
+ * Converts a struct timeval to a wrapping number of milliseconds.
+ */
+static inline uint __tv_to_ms(const struct timeval *tv)
+{
+ unsigned int ret;
+
+ ret = (uint)tv->tv_sec * 1000;
+ ret += (uint)tv->tv_usec / 1000;
+ return ret;
+}
+
+/*
+ * Converts a struct timeval to a number of milliseconds.
+ */
+static inline struct timeval * __tv_from_ms(struct timeval *tv, unsigned long ms)
+{
+ tv->tv_sec = ms / 1000;
+ tv->tv_usec = (ms % 1000) * 1000;
+ return tv;
+}
+
+/*
+ * Converts a struct timeval to a relative timestamp in nanoseconds (only
+ * wraps every 585 years, i.e. never for our purpose).
+ */
+static forceinline ullong tv_to_ns(const struct timeval *tv)
+{
+ ullong ret;
+
+ ret = (ullong)tv->tv_sec * 1000000000ULL;
+ ret += (ullong)tv->tv_usec * 1000ULL;
+ return ret;
+}
+
+/* turns nanoseconds to seconds, just to avoid typos */
+static forceinline uint ns_to_sec(ullong ns)
+{
+ return ns / 1000000000ULL;
+}
+
+/* turns nanoseconds to milliseconds, just to avoid typos */
+static forceinline uint ns_to_ms(ullong ns)
+{
+ return ns / 1000000ULL;
+}
+
+/* turns seconds to nanoseconds, just to avoid typos */
+static forceinline ullong sec_to_ns(uint sec)
+{
+ return sec * 1000000000ULL;
+}
+
+/* turns milliseconds to nanoseconds, just to avoid typos */
+static forceinline ullong ms_to_ns(uint ms)
+{
+ return ms * 1000000ULL;
+}
+
+/* turns microseconds to nanoseconds, just to avoid typos */
+static forceinline ullong us_to_ns(uint us)
+{
+ return us * 1000ULL;
+}
+
+/* creates a struct timeval from a relative timestamp in nanosecond */
+#define NS_TO_TV(t) ((const struct timeval){ .tv_sec = (t) / 1000000000ULL, .tv_usec = ((t) % 1000000000ULL) / 1000U })
+
+/* Return a number of 1024Hz ticks between 0 and 1023 for input number of
+ * usecs between 0 and 999999. This function has been optimized to remove
+ * any divide and multiply, as it is completely optimized away by the compiler
+ * on CPUs which don't have a fast multiply. Its avg error rate is 305 ppm,
+ * which is almost twice as low as a direct usec to ms conversion. This version
+ * also has the benefit of returning 1024 for 1000000.
+ */
+static inline unsigned int __usec_to_1024th(unsigned int usec)
+{
+ return (usec * 1073 + 742516) >> 20;
+}
+
+
+/**** comparison functions and macros ***********************************/
+
+
+/* tv_cmp: compares <tv1> and <tv2> : returns 0 if equal, -1 if tv1 < tv2, 1 if tv1 > tv2. */
+static inline int __tv_cmp(const struct timeval *tv1, const struct timeval *tv2)
+{
+ if ((unsigned)tv1->tv_sec < (unsigned)tv2->tv_sec)
+ return -1;
+ else if ((unsigned)tv1->tv_sec > (unsigned)tv2->tv_sec)
+ return 1;
+ else if ((unsigned)tv1->tv_usec < (unsigned)tv2->tv_usec)
+ return -1;
+ else if ((unsigned)tv1->tv_usec > (unsigned)tv2->tv_usec)
+ return 1;
+ else
+ return 0;
+}
+
+/* tv_iseq: compares <tv1> and <tv2> : returns 1 if tv1 == tv2, otherwise 0 */
+#define tv_iseq __tv_iseq
+static inline int __tv_iseq(const struct timeval *tv1, const struct timeval *tv2)
+{
+ return ((unsigned)tv1->tv_sec == (unsigned)tv2->tv_sec) &&
+ ((unsigned)tv1->tv_usec == (unsigned)tv2->tv_usec);
+}
+
+/* tv_isgt: compares <tv1> and <tv2> : returns 1 if tv1 > tv2, otherwise 0 */
+#define tv_isgt _tv_isgt
+int _tv_isgt(const struct timeval *tv1, const struct timeval *tv2);
+static inline int __tv_isgt(const struct timeval *tv1, const struct timeval *tv2)
+{
+ return
+ ((unsigned)tv1->tv_sec == (unsigned)tv2->tv_sec) ?
+ ((unsigned)tv1->tv_usec > (unsigned)tv2->tv_usec) :
+ ((unsigned)tv1->tv_sec > (unsigned)tv2->tv_sec);
+}
+
+/* tv_isge: compares <tv1> and <tv2> : returns 1 if tv1 >= tv2, otherwise 0 */
+#define tv_isge __tv_isge
+static inline int __tv_isge(const struct timeval *tv1, const struct timeval *tv2)
+{
+ return
+ ((unsigned)tv1->tv_sec == (unsigned)tv2->tv_sec) ?
+ ((unsigned)tv1->tv_usec >= (unsigned)tv2->tv_usec) :
+ ((unsigned)tv1->tv_sec > (unsigned)tv2->tv_sec);
+}
+
+/* tv_islt: compares <tv1> and <tv2> : returns 1 if tv1 < tv2, otherwise 0 */
+#define tv_islt __tv_islt
+static inline int __tv_islt(const struct timeval *tv1, const struct timeval *tv2)
+{
+ return
+ ((unsigned)tv1->tv_sec == (unsigned)tv2->tv_sec) ?
+ ((unsigned)tv1->tv_usec < (unsigned)tv2->tv_usec) :
+ ((unsigned)tv1->tv_sec < (unsigned)tv2->tv_sec);
+}
+
+/* tv_isle: compares <tv1> and <tv2> : returns 1 if tv1 <= tv2, otherwise 0 */
+#define tv_isle _tv_isle
+int _tv_isle(const struct timeval *tv1, const struct timeval *tv2);
+static inline int __tv_isle(const struct timeval *tv1, const struct timeval *tv2)
+{
+ return
+ ((unsigned)tv1->tv_sec == (unsigned)tv2->tv_sec) ?
+ ((unsigned)tv1->tv_usec <= (unsigned)tv2->tv_usec) :
+ ((unsigned)tv1->tv_sec < (unsigned)tv2->tv_sec);
+}
+
+/*
+ * compares <tv1> and <tv2> modulo 1ms: returns 0 if equal, -1 if tv1 < tv2, 1 if tv1 > tv2
+ * Must not be used when either argument is eternity. Use tv_ms_cmp2() for that.
+ */
+#define tv_ms_cmp _tv_ms_cmp
+int _tv_ms_cmp(const struct timeval *tv1, const struct timeval *tv2);
+static inline int __tv_ms_cmp(const struct timeval *tv1, const struct timeval *tv2)
+{
+ if ((unsigned)tv1->tv_sec == (unsigned)tv2->tv_sec) {
+ if ((unsigned)tv2->tv_usec >= (unsigned)tv1->tv_usec + 1000)
+ return -1;
+ else if ((unsigned)tv1->tv_usec >= (unsigned)tv2->tv_usec + 1000)
+ return 1;
+ else
+ return 0;
+ }
+ else if (((unsigned)tv2->tv_sec > (unsigned)tv1->tv_sec + 1) ||
+ (((unsigned)tv2->tv_sec == (unsigned)tv1->tv_sec + 1) &&
+ ((unsigned)tv2->tv_usec + 1000000 >= (unsigned)tv1->tv_usec + 1000)))
+ return -1;
+ else if (((unsigned)tv1->tv_sec > (unsigned)tv2->tv_sec + 1) ||
+ (((unsigned)tv1->tv_sec == (unsigned)tv2->tv_sec + 1) &&
+ ((unsigned)tv1->tv_usec + 1000000 >= (unsigned)tv2->tv_usec + 1000)))
+ return 1;
+ else
+ return 0;
+}
+
+/*
+ * compares <tv1> and <tv2> modulo 1 ms: returns 0 if equal, -1 if tv1 < tv2, 1 if tv1 > tv2,
+ * assuming that TV_ETERNITY is greater than everything.
+ */
+#define tv_ms_cmp2 _tv_ms_cmp2
+int _tv_ms_cmp2(const struct timeval *tv1, const struct timeval *tv2);
+static inline int __tv_ms_cmp2(const struct timeval *tv1, const struct timeval *tv2)
+{
+ if (tv_iseternity(tv1))
+ if (tv_iseternity(tv2))
+ return 0; /* same */
+ else
+ return 1; /* tv1 later than tv2 */
+ else if (tv_iseternity(tv2))
+ return -1; /* tv2 later than tv1 */
+ return tv_ms_cmp(tv1, tv2);
+}
+
+/*
+ * compares <tv1> and <tv2> modulo 1 ms: returns 1 if tv1 <= tv2, 0 if tv1 > tv2,
+ * assuming that TV_ETERNITY is greater than everything. Returns 0 if tv1 is
+ * TV_ETERNITY, and always assumes that tv2 != TV_ETERNITY. Designed to replace
+ * occurrences of (tv_ms_cmp2(tv,now) <= 0).
+ */
+#define tv_ms_le2 _tv_ms_le2
+int _tv_ms_le2(const struct timeval *tv1, const struct timeval *tv2);
+static inline int __tv_ms_le2(const struct timeval *tv1, const struct timeval *tv2)
+{
+ if (likely((unsigned)tv1->tv_sec > (unsigned)tv2->tv_sec + 1))
+ return 0;
+
+ if (likely((unsigned)tv1->tv_sec < (unsigned)tv2->tv_sec))
+ return 1;
+
+ if (likely((unsigned)tv1->tv_sec == (unsigned)tv2->tv_sec)) {
+ if ((unsigned)tv2->tv_usec >= (unsigned)tv1->tv_usec + 1000)
+ return 1;
+ else
+ return 0;
+ }
+
+ if (unlikely(((unsigned)tv1->tv_sec == (unsigned)tv2->tv_sec + 1) &&
+ ((unsigned)tv1->tv_usec + 1000000 >= (unsigned)tv2->tv_usec + 1000)))
+ return 0;
+ else
+ return 1;
+}
+
+
+/**** operators **********************************************************/
+
+
+/*
+ * Returns the time in ms elapsed between tv1 and tv2, assuming that tv1<=tv2.
+ * Must not be used when either argument is eternity.
+ */
+#define tv_ms_elapsed __tv_ms_elapsed
+unsigned long _tv_ms_elapsed(const struct timeval *tv1, const struct timeval *tv2);
+static inline unsigned long __tv_ms_elapsed(const struct timeval *tv1, const struct timeval *tv2)
+{
+ unsigned long ret;
+
+ ret = ((signed long)(tv2->tv_sec - tv1->tv_sec)) * 1000;
+ ret += ((signed long)(tv2->tv_usec - tv1->tv_usec)) / 1000;
+ return ret;
+}
+
+/*
+ * returns the remaining time between tv1=now and event=tv2
+ * if tv2 is passed, 0 is returned.
+ * Must not be used when either argument is eternity.
+ */
+
+#define tv_ms_remain __tv_ms_remain
+unsigned long _tv_ms_remain(const struct timeval *tv1, const struct timeval *tv2);
+static inline unsigned long __tv_ms_remain(const struct timeval *tv1, const struct timeval *tv2)
+{
+ if (tv_ms_cmp(tv1, tv2) >= 0)
+ return 0; /* event elapsed */
+
+ return __tv_ms_elapsed(tv1, tv2);
+}
+
+/*
+ * returns the remaining time between tv1=now and event=tv2
+ * if tv2 is passed, 0 is returned.
+ * Returns TIME_ETERNITY if tv2 is eternity.
+ */
+#define tv_ms_remain2 _tv_ms_remain2
+unsigned long _tv_ms_remain2(const struct timeval *tv1, const struct timeval *tv2);
+static inline unsigned long __tv_ms_remain2(const struct timeval *tv1, const struct timeval *tv2)
+{
+ if (tv_iseternity(tv2))
+ return TIME_ETERNITY;
+
+ return tv_ms_remain(tv1, tv2);
+}
+
+/*
+ * adds <inc> to <from>, set the result to <tv> and returns a pointer <tv>
+ */
+#define tv_add _tv_add
+struct timeval *_tv_add(struct timeval *tv, const struct timeval *from, const struct timeval *inc);
+static inline struct timeval *__tv_add(struct timeval *tv, const struct timeval *from, const struct timeval *inc)
+{
+ tv->tv_usec = from->tv_usec + inc->tv_usec;
+ tv->tv_sec = from->tv_sec + inc->tv_sec;
+ if (tv->tv_usec >= 1000000) {
+ tv->tv_usec -= 1000000;
+ tv->tv_sec++;
+ }
+ return tv;
+}
+
+
+/*
+ * If <inc> is set, then add it to <from> and set the result to <tv>, then
+ * return 1, otherwise return 0. It is meant to be used in if conditions.
+ */
+#define tv_add_ifset _tv_add_ifset
+int _tv_add_ifset(struct timeval *tv, const struct timeval *from, const struct timeval *inc);
+static inline int __tv_add_ifset(struct timeval *tv, const struct timeval *from, const struct timeval *inc)
+{
+ if (tv_iseternity(inc))
+ return 0;
+ tv->tv_usec = from->tv_usec + inc->tv_usec;
+ tv->tv_sec = from->tv_sec + inc->tv_sec;
+ if (tv->tv_usec >= 1000000) {
+ tv->tv_usec -= 1000000;
+ tv->tv_sec++;
+ }
+ return 1;
+}
+
+/*
+ * adds <inc> to <tv> and returns a pointer <tv>
+ */
+static inline struct timeval *__tv_add2(struct timeval *tv, const struct timeval *inc)
+{
+ tv->tv_usec += inc->tv_usec;
+ tv->tv_sec += inc->tv_sec;
+ if (tv->tv_usec >= 1000000) {
+ tv->tv_usec -= 1000000;
+ tv->tv_sec++;
+ }
+ return tv;
+}
+
+
+/*
+ * Computes the remaining time between tv1=now and event=tv2. if tv2 is passed,
+ * 0 is returned. The result is stored into tv.
+ */
+#define tv_remain _tv_remain
+struct timeval *_tv_remain(const struct timeval *tv1, const struct timeval *tv2, struct timeval *tv);
+static inline struct timeval *__tv_remain(const struct timeval *tv1, const struct timeval *tv2, struct timeval *tv)
+{
+ tv->tv_usec = tv2->tv_usec - tv1->tv_usec;
+ tv->tv_sec = tv2->tv_sec - tv1->tv_sec;
+ if ((signed)tv->tv_sec > 0) {
+ if ((signed)tv->tv_usec < 0) {
+ tv->tv_usec += 1000000;
+ tv->tv_sec--;
+ }
+ } else if (tv->tv_sec == 0) {
+ if ((signed)tv->tv_usec < 0)
+ tv->tv_usec = 0;
+ } else {
+ tv->tv_sec = 0;
+ tv->tv_usec = 0;
+ }
+ return tv;
+}
+
+
+/*
+ * Computes the remaining time between tv1=now and event=tv2. if tv2 is passed,
+ * 0 is returned. The result is stored into tv. Returns ETERNITY if tv2 is
+ * eternity.
+ */
+#define tv_remain2 _tv_remain2
+struct timeval *_tv_remain2(const struct timeval *tv1, const struct timeval *tv2, struct timeval *tv);
+static inline struct timeval *__tv_remain2(const struct timeval *tv1, const struct timeval *tv2, struct timeval *tv)
+{
+ if (tv_iseternity(tv2))
+ return tv_eternity(tv);
+ return __tv_remain(tv1, tv2, tv);
+}
+
+
+/*
+ * adds <ms> ms to <from>, set the result to <tv> and returns a pointer <tv>
+ */
+#define tv_ms_add _tv_ms_add
+struct timeval *_tv_ms_add(struct timeval *tv, const struct timeval *from, int ms);
+static inline struct timeval *__tv_ms_add(struct timeval *tv, const struct timeval *from, int ms)
+{
+ tv->tv_usec = from->tv_usec + (ms % 1000) * 1000;
+ tv->tv_sec = from->tv_sec + (ms / 1000);
+ while (tv->tv_usec >= 1000000) {
+ tv->tv_usec -= 1000000;
+ tv->tv_sec++;
+ }
+ return tv;
+}
+
+
+/*
+ * compares <tv1> and <tv2> : returns 1 if <tv1> is before <tv2>, otherwise 0.
+ * This should be very fast because it's used in schedulers.
+ * It has been optimized to return 1 (so call it in a loop which continues
+ * as long as tv1<=tv2)
+ */
+
+#define tv_isbefore(tv1, tv2) \
+ (unlikely((unsigned)(tv1)->tv_sec < (unsigned)(tv2)->tv_sec) ? 1 : \
+ (unlikely((unsigned)(tv1)->tv_sec > (unsigned)(tv2)->tv_sec) ? 0 : \
+ unlikely((unsigned)(tv1)->tv_usec < (unsigned)(tv2)->tv_usec)))
+
+/*
+ * returns the first event between <tv1> and <tv2> into <tvmin>.
+ * a zero tv is ignored. <tvmin> is returned. If <tvmin> is known
+ * to be the same as <tv1> or <tv2>, it is recommended to use
+ * tv_bound instead.
+ */
+#define tv_min(tvmin, tv1, tv2) ({ \
+ if (tv_isbefore(tv1, tv2)) { \
+ *tvmin = *tv1; \
+ } \
+ else { \
+ *tvmin = *tv2; \
+ } \
+ tvmin; \
+})
+
+/*
+ * returns the first event between <tv1> and <tv2> into <tvmin>.
+ * a zero tv is ignored. <tvmin> is returned. This function has been
+ * optimized to be called as tv_min(a,a,b) or tv_min(b,a,b).
+ */
+#define tv_bound(tv1, tv2) ({ \
+ if (tv_isbefore(tv2, tv1)) \
+ *tv1 = *tv2; \
+ tv1; \
+})
+
+#endif /* _HAPROXY_TIME_H */
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/include/haproxy/timeshift.h b/include/haproxy/timeshift.h
new file mode 100644
index 0000000..62e5855
--- /dev/null
+++ b/include/haproxy/timeshift.h
@@ -0,0 +1,10 @@
+#include <sys/time.h>
+#include <sys/epoll.h>
+
+#define gettimeofday(tv, tz) timeshift_gettimeofday(tv, tz)
+#define clock_gettime(clk_id, tp) timeshift_clock_gettime(clk_id, tp)
+#define epoll_wait(epfd, events, maxevents, timeout) timeshift_epoll_wait(epfd, events, maxevents, timeout)
+
+int timeshift_gettimeofday(struct timeval *tv, void *tz);
+int timeshift_clock_gettime(clockid_t clk_id, struct timespec *tp);
+int timeshift_epoll_wait(int epfd, struct epoll_event *events, int maxevents, int timeout);
diff --git a/include/haproxy/tinfo-t.h b/include/haproxy/tinfo-t.h
new file mode 100644
index 0000000..357c4c0
--- /dev/null
+++ b/include/haproxy/tinfo-t.h
@@ -0,0 +1,180 @@
+/*
+ * include/haproxy/tinfo-t.h
+ * Definitions of the thread_info structure.
+ *
+ * Copyright (C) 2020 Willy Tarreau - w@1wt.eu
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_TINFO_T_H
+#define _HAPROXY_TINFO_T_H
+
+#include <import/ebtree-t.h>
+
+#include <haproxy/api-t.h>
+#include <haproxy/freq_ctr-t.h>
+#include <haproxy/thread-t.h>
+
+/* forward declarations for types used below */
+struct buffer;
+
+/* Threads sets are known either by a set of absolute thread numbers, or by a
+ * set of relative thread numbers within a group, for each group. The default
+ * is the absolute mode and corresponds to the case where no group is known
+ * (nbgrp == 0). The mode may only be changed when the set is empty (use
+ * thread_set_is_empty() for this).
+ */
+struct thread_set {
+ union {
+ ulong abs[(MAX_THREADS + LONGBITS - 1) / LONGBITS];
+ ulong rel[MAX_TGROUPS];
+ };
+ ulong grps; /* bit field of all non-empty groups, 0 for abs */
+};
+
+/* tasklet classes */
+enum {
+ TL_URGENT = 0, /* urgent tasklets (I/O callbacks) */
+ TL_NORMAL = 1, /* normal tasks */
+ TL_BULK = 2, /* bulk task/tasklets, streaming I/Os */
+ TL_HEAVY = 3, /* heavy computational tasklets (e.g. TLS handshakes) */
+ TL_CLASSES /* must be last */
+};
+
+/* thread_ctx flags, for ha_thread_ctx[].flags. These flags describe the
+ * thread's state and are visible to other threads, so they must be used
+ * with atomic ops.
+ */
+#define TH_FL_STUCK 0x00000001
+#define TH_FL_TASK_PROFILING 0x00000002
+#define TH_FL_NOTIFIED 0x00000004 /* task was notified about the need to wake up */
+#define TH_FL_SLEEPING 0x00000008 /* thread won't check its task list before next wakeup */
+#define TH_FL_STARTED 0x00000010 /* set once the thread starts */
+#define TH_FL_IN_LOOP 0x00000020 /* set only inside the polling loop */
+
+
+/* Thread group information. This defines a base and a count of global thread
+ * IDs which belong to it, and which can be looked up into thread_info/ctx. It
+ * is set up during parsing and is stable during operation. Thread groups start
+ * at 1 so tgroup[0] describes thread group 1.
+ */
+struct tgroup_info {
+ ulong threads_enabled; /* mask of threads enabled in this group */
+ uint base; /* first thread in this group */
+ uint count; /* number of threads in this group */
+ ulong tgid_bit; /* bit corresponding to the tgroup ID */
+
+ /* pad to cache line (64B) */
+ char __pad[0]; /* unused except to check remaining room */
+ char __end[0] __attribute__((aligned(64)));
+};
+
+/* This structure describes the group-specific context (e.g. active threads
+ * etc). It uses one cache line per thread to limit false sharing.
+ */
+struct tgroup_ctx {
+ ulong threads_harmless; /* mask of threads that are not modifying anything */
+ ulong threads_idle; /* mask of threads idling in the poller */
+ ulong stopping_threads; /* mask of threads currently stopping */
+
+ struct eb_root timers; /* wait queue (sorted timers tree, global, accessed under wq_lock) */
+
+ uint niced_tasks; /* number of niced tasks in this group's run queues */
+
+ /* pad to cache line (64B) */
+ char __pad[0]; /* unused except to check remaining room */
+ char __end[0] __attribute__((aligned(64)));
+};
+
+/* This structure describes all the per-thread info we need. When threads are
+ * disabled, it contains the same info for the single running thread. This is
+ * stable across all of a thread's life, and is being pointed to by the
+ * thread-local "ti" pointer.
+ */
+struct thread_info {
+ const struct tgroup_info *tg; /* config of the thread-group this thread belongs to */
+ struct tgroup_ctx *tg_ctx; /* context of the thread-group this thread belongs to */
+ uint tid, ltid; /* process-wide and group-wide thread ID (start at 0) */
+ ulong ltid_bit; /* bit masks for the tid/ltid */
+ uint tgid; /* ID of the thread group this thread belongs to (starts at 1; 0=unset) */
+ /* 32-bit hole here */
+
+ ullong pth_id; /* the pthread_t cast to a ullong */
+ void *stack_top; /* the top of the stack when entering the thread */
+
+ /* pad to cache line (64B) */
+ char __pad[0]; /* unused except to check remaining room */
+ char __end[0] __attribute__((aligned(64)));
+};
+
+/* This structure describes all the per-thread context we need. This is
+ * essentially the scheduler-specific stuff and a few important per-thread
+ * lists that need to be thread-local. We take care of splitting this into
+ * separate cache lines.
+ */
+struct thread_ctx {
+ // first and second cache lines on 64 bits: thread-local operations only.
+ struct eb_root timers; /* tree constituting the per-thread wait queue */
+ struct eb_root rqueue; /* tree constituting the per-thread run queue */
+ struct task *current; /* current task (not tasklet) */
+ int current_queue; /* points to current tasklet list being run, -1 if none */
+ unsigned int nb_tasks; /* number of tasks allocated on this thread */
+ uint8_t tl_class_mask; /* bit mask of non-empty tasklets classes */
+
+ // 7 bytes hole here
+ struct list pool_lru_head; /* oldest objects in thread-local pool caches */
+ struct list buffer_wq; /* buffer waiters */
+ struct list streams; /* list of streams attached to this thread */
+ struct list quic_conns; /* list of active quic-conns attached to this thread */
+ struct list quic_conns_clo; /* list of closing quic-conns attached to this thread */
+ struct list queued_checks; /* checks waiting for a connection slot */
+ unsigned int nb_rhttp_conns; /* count of current conns used for active reverse HTTP */
+
+ ALWAYS_ALIGN(2*sizeof(void*));
+ struct list tasklets[TL_CLASSES]; /* tasklets (and/or tasks) to run, by class */
+
+ // third cache line here on 64 bits: accessed mostly using atomic ops
+ ALWAYS_ALIGN(64);
+ struct mt_list shared_tasklet_list; /* Tasklet to be run, woken up by other threads */
+ unsigned int rqueue_ticks; /* Insertion counter for the run queue */
+ unsigned int rq_total; /* total size of the run queue, prio_tree + tasklets */
+ int tasks_in_list; /* Number of tasks in the per-thread tasklets list */
+ uint idle_pct; /* idle to total ratio over last sample (percent) */
+ uint flags; /* thread flags, TH_FL_*, atomic! */
+ uint active_checks; /* number of active health checks on this thread, incl migrated */
+
+ uint32_t sched_wake_date; /* current task/tasklet's wake date or 0 */
+ uint32_t sched_call_date; /* current task/tasklet's call date (valid if sched_wake_date > 0) */
+ struct sched_activity *sched_profile_entry; /* profile entry in use by the current task/tasklet, only if sched_wake_date>0 */
+
+ uint64_t prev_cpu_time; /* previous per thread CPU time */
+ uint64_t prev_mono_time; /* previous system wide monotonic time */
+
+ struct eb_root rqueue_shared; /* run queue fed by other threads */
+ __decl_thread(HA_SPINLOCK_T rqsh_lock); /* lock protecting the shared runqueue */
+
+ struct freq_ctr out_32bps; /* #of 32-byte blocks emitted per second */
+ uint running_checks; /* number of health checks currently running on this thread */
+
+ unsigned long long out_bytes; /* total #of bytes emitted */
+ unsigned long long spliced_out_bytes; /* total #of bytes emitted though a kernel pipe */
+ struct buffer *thread_dump_buffer; /* NULL out of dump, valid during a dump, 0x01 once done */
+
+ ALWAYS_ALIGN(128);
+};
+
+
+#endif /* _HAPROXY_TINFO_T_H */
diff --git a/include/haproxy/tinfo.h b/include/haproxy/tinfo.h
new file mode 100644
index 0000000..ddb26aa
--- /dev/null
+++ b/include/haproxy/tinfo.h
@@ -0,0 +1,120 @@
+/*
+ * include/haproxy/tinfo.h
+ * Export of ha_thread_info[] and ti pointer.
+ *
+ * Copyright (C) 2020 Willy Tarreau - w@1wt.eu
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_TINFO_H
+#define _HAPROXY_TINFO_H
+
+#include <haproxy/api.h>
+#include <haproxy/tinfo-t.h>
+#include <haproxy/intops.h>
+
+/* the structs are in thread.c */
+extern struct tgroup_info ha_tgroup_info[MAX_TGROUPS];
+extern THREAD_LOCAL const struct tgroup_info *tg;
+
+extern struct thread_info ha_thread_info[MAX_THREADS];
+extern THREAD_LOCAL const struct thread_info *ti; /* thread_info for the current thread */
+
+extern struct tgroup_ctx ha_tgroup_ctx[MAX_TGROUPS];
+extern THREAD_LOCAL struct tgroup_ctx *tg_ctx; /* ha_tgroup_ctx for the current thread */
+
+extern struct thread_ctx ha_thread_ctx[MAX_THREADS];
+extern THREAD_LOCAL struct thread_ctx *th_ctx; /* ha_thread_ctx for the current thread */
+
+/* returns the number of threads set in set <ts>. */
+static inline int thread_set_count(const struct thread_set *ts)
+{
+ int i, n;
+
+ /* iterating over tgroups guarantees to visit all possible threads, the
+ * opposite is not true.
+ */
+ for (i = n = 0; i < MAX_TGROUPS; i++)
+ n += my_popcountl(ts->rel[i]);
+ return n;
+}
+
+/* returns zero if the thread set <ts> has at least one thread set,
+ * otherwise non-zero.
+ */
+static inline int thread_set_is_empty(const struct thread_set *ts)
+{
+ int i;
+
+ /* iterating over tgroups guarantees to visit all possible threads, the
+ * opposite is not true.
+ */
+ for (i = 0; i < MAX_TGROUPS; i++)
+ if (ts->rel[i])
+ return 0;
+ return 1;
+}
+
+/* returns the number starting at 1 of the <n>th thread-group set in thread set
+ * <ts>, or zero if the set is empty or if thread numbers are only absolute.
+ * <n> starts at zero and corresponds to the number of non-empty groups to be
+ * skipped (i.e. 0 returns the first one).
+ */
+static inline int thread_set_nth_group(const struct thread_set *ts, int n)
+{
+ int i;
+
+ if (ts->grps) {
+ for (i = 0; i < MAX_TGROUPS; i++)
+ if (ts->rel[i] && !n--)
+ return i + 1;
+ }
+ return 0;
+}
+
+/* returns the thread mask of the <n>th assigned thread-group in the thread
+ * set <ts> for relative sets, the first thread mask at all in case of absolute
+ * sets, or zero if the set is empty. This is only used temporarily to ease the
+ * transition. <n> starts at zero and corresponds to the number of non-empty
+ * groups to be skipped (i.e. 0 returns the first one).
+ */
+static inline ulong thread_set_nth_tmask(const struct thread_set *ts, int n)
+{
+ int i;
+
+ if (ts->grps) {
+ for (i = 0; i < MAX_TGROUPS; i++)
+ if (ts->rel[i] && !n--)
+ return ts->rel[i];
+ }
+ return ts->abs[0];
+}
+
+/* Pins the thread set to the specified thread mask on group 1 (use ~0UL for
+ * all threads). This is for compatibility with some rare legacy code. If a
+ * "thread" directive on a bind line is parsed, this one will be overwritten.
+ */
+static inline void thread_set_pin_grp1(struct thread_set *ts, ulong mask)
+{
+ int i;
+
+ ts->grps = 1;
+ ts->rel[0] = mask;
+ for (i = 1; i < MAX_TGROUPS; i++)
+ ts->rel[i] = 0;
+}
+
+#endif /* _HAPROXY_TINFO_H */
diff --git a/include/haproxy/tools-t.h b/include/haproxy/tools-t.h
new file mode 100644
index 0000000..32d8193
--- /dev/null
+++ b/include/haproxy/tools-t.h
@@ -0,0 +1,166 @@
+/*
+ * include/haproxy/tools-t.h
+ * This files contains some general purpose macros and structures.
+ *
+ * Copyright (C) 2000-2020 Willy Tarreau - w@1wt.eu
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_TOOLS_T_H
+#define _HAPROXY_TOOLS_T_H
+
+/* size used for max length of decimal representation of long long int. */
+#define NB_LLMAX_STR (sizeof("-9223372036854775807")-1)
+
+/* number of itoa_str entries */
+#define NB_ITOA_STR 16
+
+/* maximum quoted string length (truncated above) */
+#define QSTR_SIZE 200
+#define NB_QSTR 10
+
+/* returns 1 only if only zero or one bit is set in X, which means that X is a
+ * power of 2, and 0 otherwise */
+#define POWEROF2(x) (((x) & ((x)-1)) == 0)
+
+/* return an integer of type <ret> with only the highest bit set. <ret> may be
+ * both a variable or a type.
+ */
+#define MID_RANGE(ret) ((typeof(ret))1 << (8*sizeof(ret) - 1))
+
+/* return the largest possible integer of type <ret>, with all bits set */
+#define MAX_RANGE(ret) (~(typeof(ret))0)
+
+/* DEFNULL() returns either the argument as-is, or NULL if absent. This is for
+ * use in macros arguments.
+ */
+#define DEFNULL(...) _FIRST_ARG(NULL, ##__VA_ARGS__, NULL)
+#define _FIRST_ARG(a, b, ...) b
+
+/* options flags for parse_line() */
+#define PARSE_OPT_SHARP 0x00000001 // '#' ends the line
+#define PARSE_OPT_BKSLASH 0x00000002 // '\' escapes chars
+#define PARSE_OPT_SQUOTE 0x00000004 // "'" encloses a string
+#define PARSE_OPT_DQUOTE 0x00000008 // '"' encloses a string
+#define PARSE_OPT_ENV 0x00000010 // '$' is followed by environment variables
+#define PARSE_OPT_INPLACE 0x00000020 // parse and tokenize in-place (src == dst)
+#define PARSE_OPT_WORD_EXPAND 0x00000040 // '[*]' suffix to expand an environment variable as several individual arguments
+
+/* return error flags from parse_line() */
+#define PARSE_ERR_TOOLARGE 0x00000001 // result is too large for initial outlen
+#define PARSE_ERR_TOOMANY 0x00000002 // more words than initial nbargs
+#define PARSE_ERR_QUOTE 0x00000004 // unmatched quote (offending one at errptr)
+#define PARSE_ERR_BRACE 0x00000008 // unmatched brace (offending one at errptr)
+#define PARSE_ERR_HEX 0x00000010 // unparsable hex sequence (at errptr)
+#define PARSE_ERR_VARNAME 0x00000020 // invalid variable name (at errptr)
+#define PARSE_ERR_OVERLAP 0x00000040 // output overlaps with input, need to allocate
+#define PARSE_ERR_WRONG_EXPAND 0x00000080 // unparsable word expansion sequence
+
+/* special return values for the time parser (parse_time_err()) */
+#define PARSE_TIME_UNDER ((char *)1)
+#define PARSE_TIME_OVER ((char *)2)
+
+/* unit flags to pass to parse_time_err() */
+#define TIME_UNIT_US 0x0000
+#define TIME_UNIT_MS 0x0001
+#define TIME_UNIT_S 0x0002
+#define TIME_UNIT_MIN 0x0003
+#define TIME_UNIT_HOUR 0x0004
+#define TIME_UNIT_DAY 0x0005
+#define TIME_UNIT_MASK 0x0007
+
+#define SEC 1
+#define MINUTE (60 * SEC)
+#define HOUR (60 * MINUTE)
+#define DAY (24 * HOUR)
+
+/* Address parsing options for use with str2sa_range() */
+#define PA_O_RESOLVE 0x00000001 /* do resolve the FQDN to an IP address */
+#define PA_O_PORT_OK 0x00000002 /* ports are supported */
+#define PA_O_PORT_MAND 0x00000004 /* ports are mandatory */
+#define PA_O_PORT_RANGE 0x00000008 /* port ranges are supported */
+#define PA_O_PORT_OFS 0x00000010 /* port offsets are supported */
+#define PA_O_SOCKET_FD 0x00000020 /* inherited socket FDs are supported */
+#define PA_O_RAW_FD 0x00000040 /* inherited raw FDs are supported (pipes, ttys, ...) */
+#define PA_O_DGRAM 0x00000080 /* the address can be used for a datagram socket (in or out) */
+#define PA_O_STREAM 0x00000100 /* the address can be used for streams (in or out) */
+#define PA_O_XPRT 0x00000200 /* transport protocols may be specified */
+#define PA_O_CONNECT 0x00000400 /* the protocol must have a ->connect method */
+#define PA_O_DEFAULT_DGRAM 0x00000800 /* by default, this address will be used for a datagram socket */
+
+/* UTF-8 decoder status */
+#define UTF8_CODE_OK 0x00
+#define UTF8_CODE_OVERLONG 0x10
+#define UTF8_CODE_INVRANGE 0x20
+#define UTF8_CODE_BADSEQ 0x40
+
+/* HAP_STRING() makes a string from a literal while HAP_XSTRING() first
+ * evaluates the argument and is suited to pass macros.
+ *
+ * They allow macros like PCRE_MAJOR to be defined without quotes, which
+ * is convenient for applications that want to test its value.
+ */
+#define HAP_STRING(...) #__VA_ARGS__
+#define HAP_XSTRING(...) HAP_STRING(__VA_ARGS__)
+
+/* operators to compare values. They're ordered that way so that the lowest bit
+ * serves as a negation for the test and contains all tests that are not equal.
+ */
+enum {
+ STD_OP_LE = 0, STD_OP_GT = 1,
+ STD_OP_EQ = 2, STD_OP_NE = 3,
+ STD_OP_GE = 4, STD_OP_LT = 5,
+};
+
+enum http_scheme {
+ SCH_HTTP,
+ SCH_HTTPS,
+};
+
+/* output format used by url2sa() */
+struct split_url {
+ enum http_scheme scheme;
+ const char *host;
+ int host_len;
+};
+
+/* generic structure associating a name and a value, for use in arrays */
+struct name_desc {
+ const char *name;
+ const char *desc;
+};
+
+struct net_addr {
+ int family; /* AF_INET or AF_INET6 if defined, AF_UNSET if undefined */
+ union {
+ struct {
+ struct in_addr ip;
+ struct in_addr mask;
+ } v4;
+ struct {
+ struct in6_addr ip;
+ struct in6_addr mask;
+ } v6;
+ } addr;
+};
+
+/* holds socket and xprt types for a given address */
+struct net_addr_type {
+ int proto_type; // socket layer
+ int xprt_type; // transport layer
+};
+
+#endif /* _HAPROXY_TOOLS_T_H */
diff --git a/include/haproxy/tools.h b/include/haproxy/tools.h
new file mode 100644
index 0000000..3726f63
--- /dev/null
+++ b/include/haproxy/tools.h
@@ -0,0 +1,1179 @@
+/*
+ * include/haproxy/tools.h
+ * This files contains some general purpose functions and macros.
+ *
+ * Copyright (C) 2000-2020 Willy Tarreau - w@1wt.eu
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_TOOLS_H
+#define _HAPROXY_TOOLS_H
+
+#ifdef USE_BACKTRACE
+#define _GNU_SOURCE
+#include <execinfo.h>
+#endif
+
+#include <string.h>
+#include <stdio.h>
+#include <time.h>
+#include <stdarg.h>
+#include <sys/time.h>
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <sys/un.h>
+#include <netinet/in.h>
+#include <arpa/inet.h>
+#include <import/eb32sctree.h>
+#include <import/eb32tree.h>
+#include <haproxy/api.h>
+#include <haproxy/chunk.h>
+#include <haproxy/intops.h>
+#include <haproxy/namespace-t.h>
+#include <haproxy/protocol-t.h>
+#include <haproxy/tools-t.h>
+
+/****** string-specific macros and functions ******/
+/* if a > max, then bound <a> to <max>. The macro returns the new <a> */
+#define UBOUND(a, max) ({ typeof(a) b = (max); if ((a) > b) (a) = b; (a); })
+
+/* if a < min, then bound <a> to <min>. The macro returns the new <a> */
+#define LBOUND(a, min) ({ typeof(a) b = (min); if ((a) < b) (a) = b; (a); })
+
+#define SWAP(a, b) do { typeof(a) t; t = a; a = b; b = t; } while(0)
+
+/* use if you want to return a simple hash. Key 0 doesn't hash. */
+#define HA_ANON_STR(key, str) hash_anon(key, str, "", "")
+
+/* use if you want to return a hash like : ID('hash'). Key 0 doesn't hash. */
+#define HA_ANON_ID(key, str) hash_anon(key, str, "ID(", ")")
+
+/* use if you want to return a hash like : PATH('hash'). Key 0 doesn't hash. */
+#define HA_ANON_PATH(key, str) hash_anon(key, str, "PATH(", ")")
+
+/* use only in a function that contains an appctx (key comes from appctx). */
+#define HA_ANON_CLI(str) hash_anon(appctx->cli_anon_key, str, "", "")
+
+
+/*
+ * copies at most <size-1> chars from <src> to <dst>. Last char is always
+ * set to 0, unless <size> is 0. The number of chars copied is returned
+ * (excluding the terminating zero).
+ * This code has been optimized for size and speed : on x86, it's 45 bytes
+ * long, uses only registers, and consumes only 4 cycles per char.
+ */
+extern int strlcpy2(char *dst, const char *src, int size);
+
+/*
+ * This function simply returns a locally allocated string containing
+ * the ascii representation for number 'n' in decimal.
+ */
+extern THREAD_LOCAL int itoa_idx; /* index of next itoa_str to use */
+extern THREAD_LOCAL char itoa_str[][171];
+extern int build_is_static;
+extern char *ultoa_r(unsigned long n, char *buffer, int size);
+extern char *lltoa_r(long long int n, char *buffer, int size);
+extern char *sltoa_r(long n, char *buffer, int size);
+extern const char *ulltoh_r(unsigned long long n, char *buffer, int size);
+size_t flt_trim(char *buffer, size_t num_start, size_t len);
+char *ftoa_r(double n, char *buffer, int size);
+static inline const char *ultoa(unsigned long n)
+{
+ return ultoa_r(n, itoa_str[0], sizeof(itoa_str[0]));
+}
+
+/*
+ * unsigned long long ASCII representation
+ *
+ * return the last char '\0' or NULL if no enough
+ * space in dst
+ */
+char *ulltoa(unsigned long long n, char *dst, size_t size);
+
+
+/*
+ * unsigned long ASCII representation
+ *
+ * return the last char '\0' or NULL if no enough
+ * space in dst
+ */
+char *ultoa_o(unsigned long n, char *dst, size_t size);
+
+/*
+ * signed long ASCII representation
+ *
+ * return the last char '\0' or NULL if no enough
+ * space in dst
+ */
+char *ltoa_o(long int n, char *dst, size_t size);
+
+/*
+ * signed long long ASCII representation
+ *
+ * return the last char '\0' or NULL if no enough
+ * space in dst
+ */
+char *lltoa(long long n, char *dst, size_t size);
+
+/*
+ * write a ascii representation of a unsigned into dst,
+ * return a pointer to the last character
+ * Pad the ascii representation with '0', using size.
+ */
+char *utoa_pad(unsigned int n, char *dst, size_t size);
+
+/*
+ * This function simply returns a locally allocated string containing the ascii
+ * representation for number 'n' in decimal, unless n is 0 in which case it
+ * returns the alternate string (or an empty string if the alternate string is
+ * NULL). It use is intended for limits reported in reports, where it's
+ * desirable not to display anything if there is no limit. Warning! it shares
+ * the same vector as ultoa_r().
+ */
+extern const char *limit_r(unsigned long n, char *buffer, int size, const char *alt);
+
+/* returns a locally allocated string containing the ASCII representation of
+ * the number 'n' in decimal. Up to NB_ITOA_STR calls may be used in the same
+ * function call (eg: printf), shared with the other similar functions making
+ * use of itoa_str[].
+ */
+static inline const char *U2A(unsigned long n)
+{
+ const char *ret = ultoa_r(n, itoa_str[itoa_idx], sizeof(itoa_str[0]));
+ if (++itoa_idx >= NB_ITOA_STR)
+ itoa_idx = 0;
+ return ret;
+}
+
+/* returns a locally allocated string containing the HTML representation of
+ * the number 'n' in decimal. Up to NB_ITOA_STR calls may be used in the same
+ * function call (eg: printf), shared with the other similar functions making
+ * use of itoa_str[].
+ */
+static inline const char *U2H(unsigned long long n)
+{
+ const char *ret = ulltoh_r(n, itoa_str[itoa_idx], sizeof(itoa_str[0]));
+ if (++itoa_idx >= NB_ITOA_STR)
+ itoa_idx = 0;
+ return ret;
+}
+
+/* returns a locally allocated string containing the ASCII representation of
+ * the number 'n' in decimal. Up to NB_ITOA_STR calls may be used in the same
+ * function call (eg: printf), shared with the other similar functions making
+ * use of itoa_str[].
+ */
+static inline const char *F2A(double n)
+{
+ const char *ret = ftoa_r(n, itoa_str[itoa_idx], sizeof(itoa_str[0]));
+ if (++itoa_idx >= NB_ITOA_STR)
+ itoa_idx = 0;
+ return ret;
+}
+
+/* returns a locally allocated string containing the HTML representation of
+ * the number 'n' in decimal. Up to NB_ITOA_STR calls may be used in the same
+ * function call (eg: printf), shared with the other similar functions making
+ * use of itoa_str[].
+ */
+static inline const char *F2H(double n)
+{
+ const char *ret = ftoa_r(n, itoa_str[itoa_idx], sizeof(itoa_str[0]));
+ if (++itoa_idx >= NB_ITOA_STR)
+ itoa_idx = 0;
+ return ret;
+}
+
+/* returns a locally allocated string containing the ASCII representation of
+ * the number 'n' in decimal. Up to NB_ITOA_STR calls may be used in the same
+ * function call (eg: printf), shared with the other similar functions making
+ * use of itoa_str[].
+ */
+static inline const char *LIM2A(unsigned long n, const char *alt)
+{
+ const char *ret = limit_r(n, itoa_str[itoa_idx], sizeof(itoa_str[0]), alt);
+ if (++itoa_idx >= NB_ITOA_STR)
+ itoa_idx = 0;
+ return ret;
+}
+
+/* returns a locally allocated string containing the quoted encoding of the
+ * input string. The output may be truncated to QSTR_SIZE chars, but it is
+ * guaranteed that the string will always be properly terminated. Quotes are
+ * encoded by doubling them as is commonly done in CSV files. QSTR_SIZE must
+ * always be at least 4 chars.
+ */
+const char *qstr(const char *str);
+
+/* returns <str> or its quote-encoded equivalent if it contains at least one
+ * quote or a comma. This is aimed at build CSV-compatible strings.
+ */
+static inline const char *cstr(const char *str)
+{
+ const char *p = str;
+
+ while (*p) {
+ if (*p == ',' || *p == '"')
+ return qstr(str);
+ p++;
+ }
+ return str;
+}
+
+/*
+ * Returns non-zero if character <s> is a hex digit (0-9, a-f, A-F), else zero.
+ */
+extern int ishex(char s);
+
+/*
+ * Checks <name> for invalid characters. Valid chars are [A-Za-z0-9_:.-]. If an
+ * invalid character is found, a pointer to it is returned. If everything is
+ * fine, NULL is returned.
+ */
+extern const char *invalid_char(const char *name);
+
+/*
+ * Checks <name> for invalid characters. Valid chars are [A-Za-z0-9_.-].
+ * If an invalid character is found, a pointer to it is returned.
+ * If everything is fine, NULL is returned.
+ */
+extern const char *invalid_domainchar(const char *name);
+
+/*
+ * Checks <name> for invalid characters. Valid chars are [A-Za-z_.-].
+ * If an invalid character is found, a pointer to it is returned.
+ * If everything is fine, NULL is returned.
+ */
+extern const char *invalid_prefix_char(const char *name);
+
+/* returns true if <c> is an identifier character, that is, a digit, a letter,
+ * or '-', '+', '_', ':' or '.'. This is usable for proxy names, server names,
+ * ACL names, sample fetch names, and converter names.
+ */
+static inline int is_idchar(char c)
+{
+ return isalnum((unsigned char)c) ||
+ c == '.' || c == '_' || c == '-' || c == '+' || c == ':';
+}
+
+/*
+ * converts <str> to a locally allocated struct sockaddr_storage *, and a
+ * port range consisting in two integers. The low and high end are always set
+ * even if the port is unspecified, in which case (0,0) is returned. The low
+ * port is set in the sockaddr. Thus, it is enough to check the size of the
+ * returned range to know if an array must be allocated or not. The format is
+ * "addr[:[port[-port]]]", where "addr" can be a dotted IPv4 address, an IPv6
+ * address, a host name, or empty or "*" to indicate INADDR_ANY. If an IPv6
+ * address wants to ignore port, it must be terminated by a trailing colon (':').
+ * The IPv6 '::' address is IN6ADDR_ANY, so in order to bind to a given port on
+ * IPv6, use ":::port". NULL is returned if the host part cannot be resolved.
+ * If <pfx> is non-null, it is used as a string prefix before any path-based
+ * address (typically the path to a unix socket).
+ */
+struct sockaddr_storage *str2sa_range(const char *str, int *port, int *low, int *high, int *fd,
+ struct protocol **proto, struct net_addr_type *sa_type,
+ char **err, const char *pfx, char **fqdn, unsigned int opts);
+
+
+/* converts <addr> and <port> into a string representation of the address and port. This is sort
+ * of an inverse of str2sa_range, with some restrictions. The supported families are AF_INET,
+ * AF_INET6, AF_UNIX, and AF_CUST_SOCKPAIR. If the family is unsopported NULL is returned.
+ * If map_ports is true, then the sign of the port is included in the output, to indicate it is
+ * relative to the incoming port. AF_INET and AF_INET6 will be in the form "<addr>:<port>".
+ * AF_UNIX will either be just the path (if using a pathname) or "abns@<path>" if it is abstract.
+ * AF_CUST_SOCKPAIR will be of the form "sockpair@<fd>".
+ *
+ * The returned char* is allocated, and it is the responsibility of the caller to free it.
+ */
+char *sa2str(const struct sockaddr_storage *addr, int port, int map_ports);
+
+/* converts <str> to a struct in_addr containing a network mask. It can be
+ * passed in dotted form (255.255.255.0) or in CIDR form (24). It returns 1
+ * if the conversion succeeds otherwise zero.
+ */
+int str2mask(const char *str, struct in_addr *mask);
+
+/* converts <str> to a struct in6_addr containing a network mask. It can be
+ * passed in quadruplet form (ffff:ffff::) or in CIDR form (64). It returns 1
+ * if the conversion succeeds otherwise zero.
+ */
+int str2mask6(const char *str, struct in6_addr *mask);
+
+/* convert <cidr> to struct in_addr <mask>. It returns 1 if the conversion
+ * succeeds otherwise non-zero.
+ */
+int cidr2dotted(int cidr, struct in_addr *mask);
+
+/*
+ * converts <str> to two struct in_addr* which must be pre-allocated.
+ * The format is "addr[/mask]", where "addr" cannot be empty, and mask
+ * is optional and either in the dotted or CIDR notation.
+ * Note: "addr" can also be a hostname. Returns 1 if OK, 0 if error.
+ */
+int str2net(const char *str, int resolve, struct in_addr *addr, struct in_addr *mask);
+
+/* str2ip and str2ip2:
+ *
+ * converts <str> to a struct sockaddr_storage* provided by the caller. The
+ * caller must have zeroed <sa> first, and may have set sa->ss_family to force
+ * parse a specific address format. If the ss_family is 0 or AF_UNSPEC, then
+ * the function tries to guess the address family from the syntax. If the
+ * family is forced and the format doesn't match, an error is returned. The
+ * string is assumed to contain only an address, no port. The address can be a
+ * dotted IPv4 address, an IPv6 address, a host name, or empty or "*" to
+ * indicate INADDR_ANY. NULL is returned if the host part cannot be resolved.
+ * The return address will only have the address family and the address set,
+ * all other fields remain zero. The string is not supposed to be modified.
+ * The IPv6 '::' address is IN6ADDR_ANY.
+ *
+ * str2ip2:
+ *
+ * If <resolve> is set, this function try to resolve DNS, otherwise, it returns
+ * NULL result.
+ */
+struct sockaddr_storage *str2ip2(const char *str, struct sockaddr_storage *sa, int resolve);
+static inline struct sockaddr_storage *str2ip(const char *str, struct sockaddr_storage *sa)
+{
+ return str2ip2(str, sa, 1);
+}
+
+/*
+ * converts <str> to two struct in6_addr* which must be pre-allocated.
+ * The format is "addr[/mask]", where "addr" cannot be empty, and mask
+ * is an optional number of bits (128 being the default).
+ * Returns 1 if OK, 0 if error.
+ */
+int str62net(const char *str, struct in6_addr *addr, unsigned char *mask);
+
+/*
+ * Parse IP address found in url.
+ */
+int url2ipv4(const char *addr, struct in_addr *dst);
+
+/*
+ * Resolve destination server from URL. Convert <str> to a sockaddr_storage*.
+ */
+int url2sa(const char *url, int ulen, struct sockaddr_storage *addr, struct split_url *out);
+
+/* Tries to convert a sockaddr_storage address to text form. Upon success, the
+ * address family is returned so that it's easy for the caller to adapt to the
+ * output format. Zero is returned if the address family is not supported. -1
+ * is returned upon error, with errno set. AF_INET, AF_INET6 and AF_UNIX are
+ * supported.
+ */
+int addr_to_str(const struct sockaddr_storage *addr, char *str, int size);
+
+/* Tries to convert a sockaddr_storage port to text form. Upon success, the
+ * address family is returned so that it's easy for the caller to adapt to the
+ * output format. Zero is returned if the address family is not supported. -1
+ * is returned upon error, with errno set. AF_INET, AF_INET6 and AF_UNIX are
+ * supported.
+ */
+int port_to_str(const struct sockaddr_storage *addr, char *str, int size);
+
+/* check if the given address is local to the system or not. It will return
+ * -1 when it's not possible to know, 0 when the address is not local, 1 when
+ * it is. We don't want to iterate over all interfaces for this (and it is not
+ * portable). So instead we try to bind in UDP to this address on a free non
+ * privileged port and to connect to the same address, port 0 (connect doesn't
+ * care). If it succeeds, we own the address. Note that non-inet addresses are
+ * considered local since they're most likely AF_UNIX.
+ */
+int addr_is_local(const struct netns_entry *ns,
+ const struct sockaddr_storage *orig);
+
+/* will try to encode the string <string> replacing all characters tagged in
+ * <map> with the hexadecimal representation of their ASCII-code (2 digits)
+ * prefixed by <escape>, and will store the result between <start> (included)
+ * and <stop> (excluded), and will always terminate the string with a '\0'
+ * before <stop>. The position of the '\0' is returned if the conversion
+ * completes. If bytes are missing between <start> and <stop>, then the
+ * conversion will be incomplete and truncated. If <stop> <= <start>, the '\0'
+ * cannot even be stored so we return <start> without writing the 0.
+ * The input string must also be zero-terminated.
+ */
+extern const char hextab[];
+extern long query_encode_map[];
+char *encode_string(char *start, char *stop,
+ const char escape, const long *map,
+ const char *string);
+
+/*
+ * Same behavior, except that it encodes chunk <chunk> instead of a string.
+ */
+char *encode_chunk(char *start, char *stop,
+ const char escape, const long *map,
+ const struct buffer *chunk);
+
+/*
+ * Tries to prefix characters tagged in the <map> with the <escape>
+ * character. The input <string> is processed until string_stop
+ * is reached or NULL-byte is encountered. The result will
+ * be stored between <start> (included) and <stop> (excluded). This
+ * function will always try to terminate the resulting string with a '\0'
+ * before <stop>, and will return its position if the conversion
+ * completes.
+ */
+char *escape_string(char *start, char *stop,
+ const char escape, const long *map,
+ const char *string, const char *string_stop);
+
+/* Check a string for using it in a CSV output format. If the string contains
+ * one of the following four char <">, <,>, CR or LF, the string is
+ * encapsulated between <"> and the <"> are escaped by a <""> sequence.
+ * <str> is the input string to be escaped. The function assumes that
+ * the input string is null-terminated.
+ *
+ * If <quote> is 0, the result is returned escaped but without double quote.
+ * It is useful if the escaped string is used between double quotes in the
+ * format.
+ *
+ * printf("..., \"%s\", ...\r\n", csv_enc(str, 0, 0, &trash));
+ *
+ * If <quote> is 1, the converter puts the quotes only if any character is
+ * escaped. If <quote> is 2, the converter always puts the quotes.
+ *
+ * If <oneline> is not 0, CRs are skipped and LFs are replaced by spaces.
+ * This re-format multi-lines strings to only one line. The purpose is to
+ * allow a line by line parsing but also to keep the output compliant with
+ * the CLI witch uses LF to defines the end of the response.
+ *
+ * If <oneline> is 2, In addition to previous action, the trailing spaces are
+ * removed.
+ *
+ * <output> is a struct chunk used for storing the output string.
+ *
+ * The function returns the converted string on its output. If an error
+ * occurs, the function returns an empty string. This type of output is useful
+ * for using the function directly as printf() argument.
+ *
+ * If the output buffer is too short to contain the input string, the result
+ * is truncated.
+ *
+ * This function appends the encoding to the existing output chunk. Please
+ * use csv_enc() instead if you want to replace the output chunk.
+ */
+const char *csv_enc_append(const char *str, int quote, int online,
+ struct buffer *output);
+
+/* same as above but the output chunk is reset first */
+static inline const char *csv_enc(const char *str, int quote, int oneline,
+ struct buffer *output)
+{
+ chunk_reset(output);
+ return csv_enc_append(str, quote, oneline, output);
+}
+
+/* Decode an URL-encoded string in-place. The resulting string might
+ * be shorter. If some forbidden characters are found, the conversion is
+ * aborted, the string is truncated before the issue and non-zero is returned,
+ * otherwise the operation returns non-zero indicating success.
+ * If the 'in_form' argument is non-nul the string is assumed to be part of
+ * an "application/x-www-form-urlencoded" encoded string, and the '+' will be
+ * turned to a space. If it's zero, this will only be done after a question
+ * mark ('?').
+ */
+int url_decode(char *string, int in_form);
+
+unsigned int inetaddr_host(const char *text);
+unsigned int inetaddr_host_lim(const char *text, const char *stop);
+unsigned int inetaddr_host_lim_ret(char *text, char *stop, char **ret);
+
+/* Function that hashes or not a string according to the anonymizing key (scramble). */
+const char *hash_anon(uint32_t scramble, const char *string2hash, const char *prefix, const char *suffix);
+
+/* Function that hashes or not an ip according to the ipstring entered */
+const char * hash_ipanon(uint32_t scramble, char *ipstring, int hasport);
+
+static inline char *cut_crlf(char *s) {
+
+ while (*s != '\r' && *s != '\n') {
+ char *p = s++;
+
+ if (!*p)
+ return p;
+ }
+
+ *s++ = '\0';
+
+ return s;
+}
+
+static inline char *ltrim(char *s, char c) {
+
+ if (c)
+ while (*s == c)
+ s++;
+
+ return s;
+}
+
+static inline char *rtrim(char *s, char c) {
+
+ char *p = s + strlen(s);
+
+ while (p-- > s)
+ if (*p == c)
+ *p = '\0';
+ else
+ break;
+
+ return s;
+}
+
+static inline char *alltrim(char *s, char c) {
+
+ rtrim(s, c);
+
+ return ltrim(s, c);
+}
+
+/* This function converts the time_t value <now> into a broken out struct tm
+ * which must be allocated by the caller. It is highly recommended to use this
+ * function instead of localtime() because that one requires a time_t* which
+ * is not always compatible with tv_sec depending on OS/hardware combinations.
+ */
+static inline void get_localtime(const time_t now, struct tm *tm)
+{
+ localtime_r(&now, tm);
+}
+
+/* This function converts the time_t value <now> into a broken out struct tm
+ * which must be allocated by the caller. It is highly recommended to use this
+ * function instead of gmtime() because that one requires a time_t* which
+ * is not always compatible with tv_sec depending on OS/hardware combinations.
+ */
+static inline void get_gmtime(const time_t now, struct tm *tm)
+{
+ gmtime_r(&now, tm);
+}
+
+/* Counts a number of elapsed days since 01/01/0000 based solely on elapsed
+ * years and assuming the regular rule for leap years applies. It's fake but
+ * serves as a temporary origin. It's worth remembering that it's the first
+ * year of each period that is leap and not the last one, so for instance year
+ * 1 sees 366 days since year 0 was leap. For this reason we have to apply
+ * modular arithmetic which is why we offset the year by 399 before
+ * subtracting the excess at the end. No overflow here before ~11.7 million
+ * years.
+ */
+static inline unsigned int days_since_zero(unsigned int y)
+{
+ return y * 365 + (y + 399) / 4 - (y + 399) / 100 + (y + 399) / 400
+ - 399 / 4 + 399 / 100;
+}
+
+/* Returns the number of seconds since 01/01/1970 0:0:0 GMT for GMT date <tm>.
+ * It is meant as a portable replacement for timegm() for use with valid inputs.
+ * Returns undefined results for invalid dates (eg: months out of range 0..11).
+ */
+extern time_t my_timegm(const struct tm *tm);
+
+/* This function parses a time value optionally followed by a unit suffix among
+ * "d", "h", "m", "s", "ms" or "us". It converts the value into the unit
+ * expected by the caller. The computation does its best to avoid overflows.
+ * The value is returned in <ret> if everything is fine, and a NULL is returned
+ * by the function. In case of error, a pointer to the error is returned and
+ * <ret> is left untouched.
+ */
+extern const char *parse_time_err(const char *text, unsigned *ret, unsigned unit_flags);
+extern const char *parse_size_err(const char *text, unsigned *ret);
+
+/*
+ * Parse binary string written in hexadecimal (source) and store the decoded
+ * result into binstr and set binstrlen to the length of binstr. Memory for
+ * binstr is allocated by the function. In case of error, returns 0 with an
+ * error message in err.
+ */
+int parse_binary(const char *source, char **binstr, int *binstrlen, char **err);
+
+/* copies at most <n> characters from <src> and always terminates with '\0' */
+char *my_strndup(const char *src, int n);
+
+/*
+ * search needle in haystack
+ * returns the pointer if found, returns NULL otherwise
+ */
+const void *my_memmem(const void *, size_t, const void *, size_t);
+
+/* get length of the initial segment consisting entirely of bytes within a given
+ * mask
+ */
+size_t my_memspn(const void *, size_t, const void *, size_t);
+
+/* get length of the initial segment consisting entirely of bytes not within a
+ * given mask
+ */
+size_t my_memcspn(const void *, size_t, const void *, size_t);
+
+/* This function returns the first unused key greater than or equal to <key> in
+ * ID tree <root>. Zero is returned if no place is found.
+ */
+unsigned int get_next_id(struct eb_root *root, unsigned int key);
+
+/* dump the full tree to <file> in DOT format for debugging purposes. Will
+ * optionally highlight node <subj> if found, depending on operation <op> :
+ * 0 : nothing
+ * >0 : insertion, node/leaf are surrounded in red
+ * <0 : removal, node/leaf are dashed with no background
+ * Will optionally add "desc" as a label on the graph if set and non-null.
+ */
+void eb32sc_to_file(FILE *file, struct eb_root *root, const struct eb32sc_node *subj,
+ int op, const char *desc);
+
+/* same but for ebmb */
+void ebmb_to_file(FILE *file, struct eb_root *root, const struct ebmb_node *subj,
+ int op, const char *desc);
+
+/* This function compares a sample word possibly followed by blanks to another
+ * clean word. The compare is case-insensitive. 1 is returned if both are equal,
+ * otherwise zero. This intends to be used when checking HTTP headers for some
+ * values.
+ */
+int word_match(const char *sample, int slen, const char *word, int wlen);
+
+/* Convert a fixed-length string to an IP address. Returns 0 in case of error,
+ * or the number of chars read in case of success.
+ */
+int buf2ip(const char *buf, size_t len, struct in_addr *dst);
+int buf2ip6(const char *buf, size_t len, struct in6_addr *dst);
+
+/* To be used to quote config arg positions. Returns the string at <ptr>
+ * surrounded by simple quotes if <ptr> is valid and non-empty, or "end of line"
+ * if ptr is NULL or empty. The string is locally allocated.
+ */
+const char *quote_arg(const char *ptr);
+
+/* returns an operator among STD_OP_* for string <str> or < 0 if unknown */
+int get_std_op(const char *str);
+
+/* sets the address family to AF_UNSPEC so that is_addr() does not match */
+static inline void clear_addr(struct sockaddr_storage *addr)
+{
+ addr->ss_family = AF_UNSPEC;
+}
+
+/* returns non-zero if addr has a valid and non-null IPv4 or IPv6 address,
+ * otherwise zero.
+ */
+static inline int is_inet_addr(const struct sockaddr_storage *addr)
+{
+ int i;
+
+ switch (addr->ss_family) {
+ case AF_INET:
+ return *(int *)&((struct sockaddr_in *)addr)->sin_addr;
+ case AF_INET6:
+ for (i = 0; i < sizeof(struct in6_addr) / sizeof(int); i++)
+ if (((int *)&((struct sockaddr_in6 *)addr)->sin6_addr)[i] != 0)
+ return ((int *)&((struct sockaddr_in6 *)addr)->sin6_addr)[i];
+ }
+ return 0;
+}
+
+/* returns non-zero if addr has a valid and non-null IPv4 or IPv6 address,
+ * or is a unix address, otherwise returns zero.
+ */
+static inline int is_addr(const struct sockaddr_storage *addr)
+{
+ if (addr->ss_family == AF_UNIX || addr->ss_family == AF_CUST_SOCKPAIR)
+ return 1;
+ else
+ return is_inet_addr(addr);
+}
+
+/* returns port in network byte order */
+static inline int get_net_port(const struct sockaddr_storage *addr)
+{
+ switch (addr->ss_family) {
+ case AF_INET:
+ return ((struct sockaddr_in *)addr)->sin_port;
+ case AF_INET6:
+ return ((struct sockaddr_in6 *)addr)->sin6_port;
+ }
+ return 0;
+}
+
+/* returns port in host byte order */
+static inline int get_host_port(const struct sockaddr_storage *addr)
+{
+ switch (addr->ss_family) {
+ case AF_INET:
+ return ntohs(((struct sockaddr_in *)addr)->sin_port);
+ case AF_INET6:
+ return ntohs(((struct sockaddr_in6 *)addr)->sin6_port);
+ }
+ return 0;
+}
+
+/* returns address len for <addr>'s family, 0 for unknown families */
+static inline int get_addr_len(const struct sockaddr_storage *addr)
+{
+ switch (addr->ss_family) {
+ case AF_INET:
+ return sizeof(struct sockaddr_in);
+ case AF_INET6:
+ return sizeof(struct sockaddr_in6);
+ case AF_UNIX:
+ return sizeof(struct sockaddr_un);
+ }
+ return 0;
+}
+
+/* set port in host byte order */
+static inline int set_net_port(struct sockaddr_storage *addr, int port)
+{
+ switch (addr->ss_family) {
+ case AF_INET:
+ ((struct sockaddr_in *)addr)->sin_port = port;
+ break;
+ case AF_INET6:
+ ((struct sockaddr_in6 *)addr)->sin6_port = port;
+ break;
+ }
+ return 0;
+}
+
+/* set port in network byte order */
+static inline int set_host_port(struct sockaddr_storage *addr, int port)
+{
+ switch (addr->ss_family) {
+ case AF_INET:
+ ((struct sockaddr_in *)addr)->sin_port = htons(port);
+ break;
+ case AF_INET6:
+ ((struct sockaddr_in6 *)addr)->sin6_port = htons(port);
+ break;
+ }
+ return 0;
+}
+
+/* Convert mask from bit length form to in_addr form.
+ * This function never fails.
+ */
+void len2mask4(int len, struct in_addr *addr);
+
+/* Convert mask from bit length form to in6_addr form.
+ * This function never fails.
+ */
+void len2mask6(int len, struct in6_addr *addr);
+
+/* Return true if IPv4 address is part of the network */
+extern int in_net_ipv4(const void *addr, const struct in_addr *mask, const struct in_addr *net);
+
+/* Return true if IPv6 address is part of the network */
+extern int in_net_ipv6(const void *addr, const struct in6_addr *mask, const struct in6_addr *net);
+
+/* Map IPv4 address on IPv6 address, as specified in RFC 3513. */
+extern void v4tov6(struct in6_addr *sin6_addr, struct in_addr *sin_addr);
+
+/* Map IPv6 address on IPv4 address, as specified in RFC 3513.
+ * Return true if conversion is possible and false otherwise.
+ */
+extern int v6tov4(struct in_addr *sin_addr, struct in6_addr *sin6_addr);
+
+/* compare two struct sockaddr_storage, including port if <check_port> is true,
+ * and return:
+ * 0 (true) if the addr is the same in both
+ * 1 (false) if the addr is not the same in both
+ * -1 (unable) if one of the addr is not AF_INET*
+ */
+int ipcmp(const struct sockaddr_storage *ss1, const struct sockaddr_storage *ss2, int check_port);
+
+/* compare a struct sockaddr_storage to a struct net_addr and return :
+ * 0 (true) if <addr> is matching <net>
+ * 1 (false) if <addr> is not matching <net>
+ * -1 (unable) if <addr> or <net> is not AF_INET*
+ */
+int ipcmp2net(const struct sockaddr_storage *addr, const struct net_addr *net);
+
+/* copy ip from <source> into <dest>
+ * the caller must clear <dest> before calling.
+ * Returns a pointer to the destination
+ */
+struct sockaddr_storage *ipcpy(const struct sockaddr_storage *source, struct sockaddr_storage *dest);
+
+char *human_time(int t, short hz_div);
+
+extern const char *monthname[];
+
+/* date2str_log: write a date in the format :
+ * sprintf(str, "%02d/%s/%04d:%02d:%02d:%02d.%03d",
+ * tm.tm_mday, monthname[tm.tm_mon], tm.tm_year+1900,
+ * tm.tm_hour, tm.tm_min, tm.tm_sec, (int)date.tv_usec/1000);
+ *
+ * without using sprintf. return a pointer to the last char written (\0) or
+ * NULL if there isn't enough space.
+ */
+char *date2str_log(char *dest, const struct tm *tm, const struct timeval *date, size_t size);
+
+/* Return the GMT offset for a specific local time.
+ * Both t and tm must represent the same time.
+ * The string returned has the same format as returned by strftime(... "%z", tm).
+ * Offsets are kept in an internal cache for better performances.
+ */
+const char *get_gmt_offset(time_t t, struct tm *tm);
+
+/* gmt2str_log: write a date in the format :
+ * "%02d/%s/%04d:%02d:%02d:%02d +0000" without using snprintf
+ * return a pointer to the last char written (\0) or
+ * NULL if there isn't enough space.
+ */
+char *gmt2str_log(char *dst, struct tm *tm, size_t size);
+
+/* localdate2str_log: write a date in the format :
+ * "%02d/%s/%04d:%02d:%02d:%02d +0000(local timezone)" without using snprintf
+ * Both t and tm must represent the same time.
+ * return a pointer to the last char written (\0) or
+ * NULL if there isn't enough space.
+ */
+char *localdate2str_log(char *dst, time_t t, struct tm *tm, size_t size);
+
+/* These 3 functions parses date string and fills the
+ * corresponding broken-down time in <tm>. In success case,
+ * it returns 1, otherwise, it returns 0.
+ */
+int parse_http_date(const char *date, int len, struct tm *tm);
+int parse_imf_date(const char *date, int len, struct tm *tm);
+int parse_rfc850_date(const char *date, int len, struct tm *tm);
+int parse_asctime_date(const char *date, int len, struct tm *tm);
+int print_time_short(struct buffer *out, const char *pfx, uint64_t ns, const char *sfx);
+
+/* Dynamically allocates a string of the proper length to hold the formatted
+ * output. NULL is returned on error. The caller is responsible for freeing the
+ * memory area using free(). The resulting string is returned in <out> if the
+ * pointer is not NULL. A previous version of <out> might be used to build the
+ * new string, and it will be freed before returning if it is not NULL, which
+ * makes it possible to build complex strings from iterative calls without
+ * having to care about freeing intermediate values, as in the example below :
+ *
+ * memprintf(&err, "invalid argument: '%s'", arg);
+ * ...
+ * memprintf(&err, "parser said : <%s>\n", *err);
+ * ...
+ * free(*err);
+ *
+ * This means that <err> must be initialized to NULL before first invocation.
+ * The return value also holds the allocated string, which eases error checking
+ * and immediate consumption. If the output pointer is not used, NULL must be
+ * passed instead and it will be ignored. The returned message will then also
+ * be NULL so that the caller does not have to bother with freeing anything.
+ *
+ * It is also convenient to use it without any free except the last one :
+ * err = NULL;
+ * if (!fct1(err)) report(*err);
+ * if (!fct2(err)) report(*err);
+ * if (!fct3(err)) report(*err);
+ * free(*err);
+ *
+ * memprintf relies on memvprintf. This last version can be called from any
+ * function with variadic arguments.
+ */
+char *memvprintf(char **out, const char *format, va_list args)
+ __attribute__ ((format(printf, 2, 0)));
+
+char *memprintf(char **out, const char *format, ...)
+ __attribute__ ((format(printf, 2, 3)));
+
+/* Used to add <level> spaces before each line of <out>, unless there is only one line.
+ * The input argument is automatically freed and reassigned. The result will have to be
+ * freed by the caller.
+ * Example of use :
+ * parse(cmd, &err); (callee: memprintf(&err, ...))
+ * fprintf(stderr, "Parser said: %s\n", indent_error(&err));
+ * free(err);
+ */
+char *indent_msg(char **out, int level);
+int append_prefixed_str(struct buffer *out, const char *in, const char *pfx, char eol, int first);
+
+/* removes environment variable <name> from the environment as found in
+ * environ. This is only provided as an alternative for systems without
+ * unsetenv() (old Solaris and AIX versions). THIS IS NOT THREAD SAFE.
+ * The principle is to scan environ for each occurrence of variable name
+ * <name> and to replace the matching pointers with the last pointer of
+ * the array (since variables are not ordered).
+ * It always returns 0 (success).
+ */
+int my_unsetenv(const char *name);
+
+/* Convert occurrences of environment variables in the input string to their
+ * corresponding value. A variable is identified as a series of alphanumeric
+ * characters or underscores following a '$' sign. The <in> string must be
+ * free()able. NULL returns NULL. The resulting string might be reallocated if
+ * some expansion is made.
+ */
+char *env_expand(char *in);
+uint32_t parse_line(char *in, char *out, size_t *outlen, char **args, int *nbargs, uint32_t opts, const char **errptr);
+ssize_t read_line_to_trash(const char *path_fmt, ...);
+size_t sanitize_for_printing(char *line, size_t pos, size_t width);
+void update_word_fingerprint(uint8_t *fp, const char *word);
+void make_word_fingerprint(uint8_t *fp, const char *word);
+int word_fingerprint_distance(const uint8_t *fp1, const uint8_t *fp2);
+
+/* debugging macro to emit messages using write() on fd #-1 so that strace sees
+ * them.
+ */
+#define fddebug(msg...) do { char *_m = NULL; memprintf(&_m, ##msg); if (_m) write(-1, _m, strlen(_m)); free(_m); } while (0)
+
+/* displays a <len> long memory block at <buf>, assuming first byte of <buf>
+ * has address <baseaddr>. String <pfx> may be placed as a prefix in front of
+ * each line. It may be NULL if unused. The output is emitted to file <out>.
+ */
+void debug_hexdump(FILE *out, const char *pfx, const char *buf, unsigned int baseaddr, int len);
+
+/* this is used to emit call traces when building with TRACE=1 */
+__attribute__((format(printf, 1, 2)))
+void calltrace(char *fmt, ...);
+
+/* same as strstr() but case-insensitive */
+const char *strnistr(const char *str1, int len_str1, const char *str2, int len_str2);
+
+int strordered(const char *s1, const char *s2, const char *s3);
+
+/* after increasing a pointer value, it can exceed the first buffer
+ * size. This function transform the value of <ptr> according with
+ * the expected position. <chunks> is an array of the one or two
+ * available chunks. The first value is the start of the first chunk,
+ * the second value if the end+1 of the first chunks. The third value
+ * is NULL or the start of the second chunk and the fourth value is
+ * the end+1 of the second chunk. The function returns 1 if does a
+ * wrap, else returns 0.
+ */
+static inline int fix_pointer_if_wrap(const char **chunks, const char **ptr)
+{
+ if (*ptr < chunks[1])
+ return 0;
+ if (!chunks[2])
+ return 0;
+ *ptr = chunks[2] + ( *ptr - chunks[1] );
+ return 1;
+}
+
+unsigned char utf8_next(const char *s, int len, unsigned int *c);
+
+static inline unsigned char utf8_return_code(unsigned int code)
+{
+ return code & 0xf0;
+}
+
+static inline unsigned char utf8_return_length(unsigned char code)
+{
+ return code & 0x0f;
+}
+
+/* returns a 64-bit a timestamp with the finest resolution available. The
+ * unit is intentionally not specified. It's mostly used to compare dates.
+ */
+#if defined(__i386__) || defined(__x86_64__)
+static inline unsigned long long rdtsc()
+{
+ unsigned int a, d;
+ asm volatile("rdtsc" : "=a" (a), "=d" (d));
+ return a + ((unsigned long long)d << 32);
+}
+#else
+static inline unsigned long long rdtsc()
+{
+ struct timeval tv;
+ gettimeofday(&tv, NULL);
+ return tv.tv_sec * 1000000 + tv.tv_usec;
+}
+#endif
+
+/* append a copy of string <str> (in a wordlist) at the end of the list <li>
+ * On failure : return 0 and <err> filled with an error message.
+ * The caller is responsible for freeing the <err> and <str> copy
+ * memory area using free()
+ */
+struct list;
+int list_append_word(struct list *li, const char *str, char **err);
+
+int dump_text(struct buffer *out, const char *buf, int bsize);
+int dump_binary(struct buffer *out, const char *buf, int bsize);
+int dump_text_line(struct buffer *out, const char *buf, int bsize, int len,
+ int *line, int ptr);
+void dump_addr_and_bytes(struct buffer *buf, const char *pfx, const void *addr, int n);
+void dump_hex(struct buffer *out, const char *pfx, const void *buf, int len, int unsafe);
+int may_access(const void *ptr);
+const void *resolve_sym_name(struct buffer *buf, const char *pfx, const void *addr);
+const char *get_exec_path(void);
+void *get_sym_curr_addr(const char *name);
+void *get_sym_next_addr(const char *name);
+int dump_libs(struct buffer *output, int with_addr);
+
+/* Note that this may result in opening libgcc() on first call, so it may need
+ * to have been called once before chrooting.
+ */
+static forceinline int my_backtrace(void **buffer, int max)
+{
+#if !defined(USE_BACKTRACE)
+ return 0;
+#elif defined(HA_HAVE_WORKING_BACKTRACE)
+ return backtrace(buffer, max);
+#else
+ const struct frame {
+ const struct frame *next;
+ void *ra;
+ } *frame;
+ int count;
+
+ frame = __builtin_frame_address(0);
+ for (count = 0; count < max && may_access(frame) && may_access(frame->ra);) {
+ buffer[count++] = frame->ra;
+ frame = frame->next;
+ }
+ return count;
+#endif
+}
+
+/* same as realloc() except that ptr is also freed upon failure */
+static inline void *my_realloc2(void *ptr, size_t size)
+{
+ void *ret;
+
+ ret = realloc(ptr, size);
+ if (!ret && size)
+ free(ptr);
+ return ret;
+}
+
+int parse_dotted_uints(const char *s, unsigned int **nums, size_t *sz);
+
+/* PRNG */
+void ha_generate_uuid(struct buffer *output);
+void ha_random_seed(const unsigned char *seed, size_t len);
+void ha_random_jump96(uint32_t dist);
+uint64_t ha_random64(void);
+
+static inline uint32_t ha_random32()
+{
+ return ha_random64() >> 32;
+}
+
+static inline int32_t ha_random()
+{
+ return ha_random32() >> 1;
+}
+
+extern THREAD_LOCAL unsigned int statistical_prng_state;
+
+/* Xorshift RNGs from http://www.jstatsoft.org/v08/i14/paper.
+ * This has a (2^32)-1 period, only zero is never returned.
+ */
+static inline unsigned int statistical_prng()
+{
+ unsigned int x = statistical_prng_state;
+
+ x ^= x << 13;
+ x ^= x >> 17;
+ x ^= x << 5;
+ return statistical_prng_state = x;
+}
+
+/* returns a random number between 0 and <range> - 1 that is evenly distributed
+ * over the range.
+ */
+static inline uint statistical_prng_range(uint range)
+{
+ return mul32hi(statistical_prng(), range ? range - 1 : 0);
+}
+
+/* returns a hash on <bits> bits of pointer <p> that is suitable for being used
+ * to compute statistic buckets, in that it's fast and reasonably distributed
+ * thanks to mixing the bits via a multiplication by a prime number and using
+ * the middle bits on 64-bit platforms or remixing the topmost with lowest ones
+ * on 32-bit. The distribution is smooth enough for the hash to provide on
+ * average 1/e non-colliding entries per input, and use on average 1-1/e
+ * entries total. Thus for example hashing 1024 random valid pointers will
+ * result on average in ~647 distinct keys, 377 of which are unique. It was
+ * carefully selected to deliver optimal distribution for low bit counts so
+ * that hashing on 2,3,4 or 5 bits delivers good results.
+ */
+static forceinline uint ptr_hash(const void *p, const int bits)
+{
+ unsigned long long x = (unsigned long)p;
+
+ if (!bits)
+ return 0;
+
+ x *= 0xacd1be85U;
+ if (sizeof(long) == 4)
+ x ^= x >> 32;
+ else
+ x >>= 31 - (bits + 1) / 2;
+ return x & (~0U >> (-bits & 31));
+}
+
+/* Same as above but works on two pointers. It will return the same values
+ * if the second pointer is NULL.
+ */
+static forceinline uint ptr2_hash(const void *p1, const void *p2, const int bits)
+{
+ unsigned long long x = (unsigned long)p1;
+ unsigned long long y = (unsigned long)p2;
+
+ if (!bits)
+ return 0;
+
+ x *= 0xacd1be85U;
+ y *= 0x9d28e4e9U;
+ x ^= y;
+ if (sizeof(long) == 4)
+ x ^= x >> 32;
+ else
+ x >>= 33 - bits / 2;
+ return x & (~0U >> (-bits & 31));
+}
+
+
+/* Update array <fp> with the character transition <prev> to <curr>. If <prev>
+ * is zero, it's assumed that <curr> is the first character. If <curr> is zero
+ * its assumed to mark the end. Both may be zero. <fp> is a 1024-entries array
+ * indexed as 32*from+to. Positions for 'from' and 'to' are:
+ * 1..26=letter, 27=digit, 28=other/begin/end.
+ * Row "from=0" is used to mark the character's presence. Others unused.
+ */
+static inline void update_char_fingerprint(uint8_t *fp, char prev, char curr)
+{
+ int from, to;
+
+ switch (prev) {
+ case 0: from = 28; break; // begin
+ case 'a'...'z': from = prev - 'a' + 1; break;
+ case 'A'...'Z': from = tolower(prev) - 'a' + 1; break;
+ case '0'...'9': from = 27; break;
+ default: from = 28; break;
+ }
+
+ switch (curr) {
+ case 0: to = 28; break; // end
+ case 'a'...'z': to = curr - 'a' + 1; break;
+ case 'A'...'Z': to = tolower(curr) - 'a' + 1; break;
+ case '0'...'9': to = 27; break;
+ default: to = 28; break;
+ }
+ if (curr)
+ fp[to] = 1;
+ fp[32 * from + to]++;
+}
+
+
+/* compare the current OpenSSL version to a string */
+int openssl_compare_current_version(const char *version);
+/* compare the current OpenSSL name to a string */
+int openssl_compare_current_name(const char *name);
+
+#endif /* _HAPROXY_TOOLS_H */
diff --git a/include/haproxy/trace-t.h b/include/haproxy/trace-t.h
new file mode 100644
index 0000000..322fccd
--- /dev/null
+++ b/include/haproxy/trace-t.h
@@ -0,0 +1,179 @@
+/*
+ * include/haproxy/trace-t.h
+ * This file provides definitions for runtime tracing
+ *
+ * Copyright (C) 2000-2019 Willy Tarreau - w@1wt.eu
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_TRACE_T_H
+#define _HAPROXY_TRACE_T_H
+
+#include <import/ist.h>
+#include <haproxy/api-t.h>
+#include <haproxy/sink-t.h>
+
+/* the macros below define an optional type for each of the 4 args passed to
+ * the trace() call. When such a type is set, the caller commits to exclusively
+ * using a valid pointer when this argument is not null. This allows the trace()
+ * function to automatically start or stop the lock-on mechanism when it detects
+ * a type that it can dereference such as a connection or a stream. Each value
+ * is represented as an exclusive bit and each arg is represented by a distinct
+ * byte. The reason for using a single bit per value is to speed up tests using
+ * bitmasks. Users must not declare args with multiple bits set for the same arg.
+ * By default arguments are private, corresponding to value 0.
+ */
+
+/* for use only in macro definitions above */
+#define TRC_ARG_PRIV (0)
+#define TRC_ARG_CONN (1 << 0)
+#define TRC_ARG_SESS (1 << 1)
+#define TRC_ARG_STRM (1 << 2)
+#define TRC_ARG_CHK (1 << 3)
+#define TRC_ARG_QCON (1 << 4)
+#define TRC_ARG_APPCTX (1 << 5)
+
+#define TRC_ARG1_PRIV (TRC_ARG_PRIV << 0)
+#define TRC_ARG1_CONN (TRC_ARG_CONN << 0)
+#define TRC_ARG1_SESS (TRC_ARG_SESS << 0)
+#define TRC_ARG1_STRM (TRC_ARG_STRM << 0)
+#define TRC_ARG1_CHK (TRC_ARG_CHK << 0)
+#define TRC_ARG1_QCON (TRC_ARG_QCON << 0)
+#define TRC_ARG1_APPCTX (TRC_ARG_APPCTX << 0)
+
+#define TRC_ARG2_PRIV (TRC_ARG_PRIV << 8)
+#define TRC_ARG2_CONN (TRC_ARG_CONN << 8)
+#define TRC_ARG2_SESS (TRC_ARG_SESS << 8)
+#define TRC_ARG2_STRM (TRC_ARG_STRM << 8)
+#define TRC_ARG2_CHK (TRC_ARG_CHK << 8)
+#define TRC_ARG2_QCON (TRC_ARG_QCON << 8)
+#define TRC_ARG2_APPCTX (TRC_ARG_APPCTX << 8)
+
+#define TRC_ARG3_PRIV (TRC_ARG_PRIV << 16)
+#define TRC_ARG3_CONN (TRC_ARG_CONN << 16)
+#define TRC_ARG3_SESS (TRC_ARG_SESS << 16)
+#define TRC_ARG3_STRM (TRC_ARG_STRM << 16)
+#define TRC_ARG3_CHK (TRC_ARG_CHK << 16)
+#define TRC_ARG3_QCON (TRC_ARG_QCON << 16)
+#define TRC_ARG3_APPCTX (TRC_ARG_APPCTX << 16)
+
+#define TRC_ARG4_PRIV (TRC_ARG_PRIV << 24)
+#define TRC_ARG4_CONN (TRC_ARG_CONN << 24)
+#define TRC_ARG4_SESS (TRC_ARG_SESS << 24)
+#define TRC_ARG4_STRM (TRC_ARG_STRM << 24)
+#define TRC_ARG4_CHK (TRC_ARG_CHK << 24)
+#define TRC_ARG4_QCON (TRC_ARG_QCON << 24)
+#define TRC_ARG4_APPCTX (TRC_ARG_APPCTX << 24)
+
+/* usable to detect the presence of any arg of the desired type */
+#define TRC_ARGS_CONN (TRC_ARG_CONN * 0x01010101U)
+#define TRC_ARGS_SESS (TRC_ARG_SESS * 0x01010101U)
+#define TRC_ARGS_STRM (TRC_ARG_STRM * 0x01010101U)
+#define TRC_ARGS_CHK (TRC_ARG_CHK * 0x01010101U)
+#define TRC_ARGS_QCON (TRC_ARG_QCON * 0x01010101U)
+#define TRC_ARGS_APPCTX (TRC_ARG_APPCTX * 0x01010101U)
+
+
+enum trace_state {
+ TRACE_STATE_STOPPED = 0, // completely disabled
+ TRACE_STATE_WAITING, // waiting for the start condition to happen
+ TRACE_STATE_RUNNING, // waiting for the stop or pause conditions
+};
+
+/* trace levels, from least detailed to most detailed. Traces emitted at a
+ * lower level are always reported at higher levels.
+ */
+enum trace_level {
+ TRACE_LEVEL_ERROR = 0, // only errors
+ TRACE_LEVEL_USER, // also info useful to the end user
+ TRACE_LEVEL_PROTO, // also report protocol-level updates
+ TRACE_LEVEL_STATE, // also report state changes
+ TRACE_LEVEL_DATA, // also report data exchanges
+ TRACE_LEVEL_DEVELOPER, // functions entry/exit and any other developer info
+};
+
+enum trace_lockon {
+ TRACE_LOCKON_NOTHING = 0, // don't lock on anything
+ TRACE_LOCKON_THREAD, // lock on the thread that started the trace
+ TRACE_LOCKON_LISTENER, // lock on the listener that started the trace
+ TRACE_LOCKON_FRONTEND, // lock on the frontend that started the trace
+ TRACE_LOCKON_BACKEND, // lock on the backend that started the trace
+ TRACE_LOCKON_SERVER, // lock on the server that started the trace
+ TRACE_LOCKON_CONNECTION, // lock on the connection that started the trace
+ TRACE_LOCKON_SESSION, // lock on the session that started the trace
+ TRACE_LOCKON_STREAM, // lock on the stream that started the trace
+ TRACE_LOCKON_CHECK, // lock on the check that started the trace
+ TRACE_LOCKON_QCON, // lock on the QUIC connection that started the trace
+ TRACE_LOCKON_APPCTX, // lock on the appctx that started the trace
+ TRACE_LOCKON_ARG1, // lock on arg1, totally source-dependent
+ TRACE_LOCKON_ARG2, // lock on arg2, totally source-dependent
+ TRACE_LOCKON_ARG3, // lock on arg3, totally source-dependent
+ TRACE_LOCKON_ARG4, // lock on arg4, totally source-dependent
+};
+
+/* Each trace event maps a name to a mask in an uint64_t. Multiple bits are
+ * permitted to have composite events. This is supposed to be stored into an
+ * array terminated by mask 0 (name and desc are then ignored). Names "now",
+ * "any" and "none" are reserved by the CLI parser for start/pause/stop
+ * operations..
+ */
+struct trace_event {
+ uint64_t mask;
+ const char *name;
+ const char *desc;
+};
+
+/* Regarding the verbosity, if <decoding> is not NULL, it must point to a NULL-
+ * terminated array of name:description, which will define verbosity levels
+ * implemented by the decoding callback. The verbosity value will default to
+ * 1. When verbosity levels are defined, levels 1 and above are described by
+ * these levels. At level zero, the callback is never called.
+ */
+struct trace_source {
+ /* source definition */
+ const struct ist name;
+ const char *desc;
+ const struct trace_event *known_events;
+ struct list source_link; // element in list of known trace sources
+ void (*default_cb)(enum trace_level level, uint64_t mask,
+ const struct trace_source *src,
+ const struct ist where, const struct ist func,
+ const void *a1, const void *a2, const void *a3, const void *a4);
+ uint32_t arg_def; // argument definitions (sum of TRC_ARG{1..4}_*)
+ const struct name_desc *lockon_args; // must be 4 entries if not NULL
+ const struct name_desc *decoding; // null-terminated if not NULL
+ /* trace configuration, adjusted by "trace <module>" on CLI */
+ enum trace_lockon lockon;
+ uint64_t start_events; // what will start the trace. default: 0=nothing
+ uint64_t pause_events; // what will pause the trace. default: 0=nothing
+ uint64_t stop_events; // what will stop the trace. default: 0=nothing
+ uint64_t report_events; // mask of which events need to be reported.
+ enum trace_level level; // report traces up to this level of info
+ unsigned int verbosity; // decoder's level of detail among <decoding> (0=no cb)
+ struct sink *sink; // where to send the trace
+ /* trace state part below */
+ enum trace_state state;
+ const void *lockon_ptr; // what to lockon when lockon is set
+};
+
+#endif /* _HAPROXY_TRACE_T_H */
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/include/haproxy/trace.h b/include/haproxy/trace.h
new file mode 100644
index 0000000..703ac8d
--- /dev/null
+++ b/include/haproxy/trace.h
@@ -0,0 +1,216 @@
+/*
+ * include/haproxy/trace.h
+ * This file provides functions for runtime tracing
+ *
+ * Copyright (C) 2000-2019 Willy Tarreau - w@1wt.eu
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_TRACE_H
+#define _HAPROXY_TRACE_H
+
+#include <import/ist.h>
+#include <haproxy/api.h>
+#include <haproxy/list.h>
+#include <haproxy/sink-t.h>
+#include <haproxy/tools.h>
+#include <haproxy/trace-t.h>
+
+/* Make a string from the location of the trace producer as "file:line" */
+#define TRC_LOC _TRC_LOC(__FILE__, __LINE__)
+#define _TRC_LOC(f,l) __TRC_LOC(f, ":", l)
+#define __TRC_LOC(f,c,l) f c #l
+
+/* truncate a macro arg list to exactly 5 args and replace missing ones with NULL.
+ * The first one (a0) is always ignored.
+ */
+#define TRC_5ARGS(a0,a1,a2,a3,a4,a5,...) DEFNULL(a1),DEFNULL(a2),DEFNULL(a3),DEFNULL(a4),DEFNULL(a5)
+
+/* reports whether trace is active for the source and the arguments. It uses
+ * the same criteria as trace() (locking, filtering etc) so it's safe to use
+ * from application code to decide whether or not to engage in heavier data
+ * preparation processing.
+ */
+#define _trace_enabled(level, mask, src, args...) \
+ (unlikely((src)->state != TRACE_STATE_STOPPED && \
+ __trace_enabled(level, mask, src, ##args, NULL) > 0))
+
+/* sends a trace for the given source. Arguments are passed in the exact same
+ * order as in the __trace() function, which is only called if (src)->state is
+ * not TRACE_STATE_STOPPED. This is the only case where arguments are evaluated.
+ */
+#define _trace(level, mask, src, args...) \
+ do { \
+ if (unlikely((src)->state != TRACE_STATE_STOPPED)) \
+ __trace(level, mask, src, ##args); \
+ } while (0)
+
+/* For convenience, TRACE() alone uses the file's default TRACE_LEVEL, most
+ * likely TRACE_LEVEL_DEVELOPER, though the other explicit variants specify
+ * the desired level and will work when TRACE_LEVEL is not set. The 5 optional
+ * arguments are the 4 source-specific arguments that are passed to the cb()
+ * callback dedicated to decoding, and which may be used for special tracking.
+ * These 4 arguments as well as the cb() function pointer may all be NULL, or
+ * simply omitted (in which case they will be replaced by a NULL). This
+ * ordering allows many TRACE() calls to be placed using copy-paste and just
+ * change the message at the beginning. Only TRACE_DEVEL(), TRACE_ENTER() and
+ * TRACE_LEAVE() will report the calling function's name. TRACE_PRINTF() does
+ * require all the optional a1..a4 to be passed (possibly zero) so that they're
+ * always followed by the format string, then the values to be formatted.
+ *
+ * TRACE_* will call the _trace() macro which will test if the trace is enabled
+ * before calling the __trace() function. _trace() shouldn't be a function (nor
+ * inline) itself because we don't want the caller to compute its arguments if
+ * traces are not enabled.
+ *
+ * TRACE_ENABLED() reports whether or not trace is enabled for the current
+ * source, level, mask and arguments.
+ */
+#define TRACE_ENABLED(level, mask, args...) (_trace_enabled((level), (mask), TRACE_SOURCE, ist(TRC_LOC), __FUNCTION__, ##args))
+
+#define TRACE(msg, mask, args...) \
+ _trace(TRACE_LEVEL, (mask), TRACE_SOURCE, ist(TRC_LOC), NULL, TRC_5ARGS(0,##args,0,0,0,0,0), ist(msg))
+
+#define TRACE_ERROR(msg, mask, args...) \
+ _trace(TRACE_LEVEL_ERROR, (mask), TRACE_SOURCE, ist(TRC_LOC), NULL, TRC_5ARGS(0,##args,0,0,0,0,0), ist(msg))
+
+#define TRACE_USER(msg, mask, args...) \
+ _trace(TRACE_LEVEL_USER, (mask), TRACE_SOURCE, ist(TRC_LOC), NULL, TRC_5ARGS(0,##args,0,0,0,0,0), ist(msg))
+
+#define TRACE_DATA(msg, mask, args...) \
+ _trace(TRACE_LEVEL_DATA, (mask), TRACE_SOURCE, ist(TRC_LOC), NULL, TRC_5ARGS(0,##args,0,0,0,0,0), ist(msg))
+
+#define TRACE_PROTO(msg, mask, args...) \
+ _trace(TRACE_LEVEL_PROTO, (mask), TRACE_SOURCE, ist(TRC_LOC), NULL, TRC_5ARGS(0,##args,0,0,0,0,0), ist(msg))
+
+#define TRACE_STATE(msg, mask, args...) \
+ _trace(TRACE_LEVEL_STATE, (mask), TRACE_SOURCE, ist(TRC_LOC), NULL, TRC_5ARGS(0,##args,0,0,0,0,0), ist(msg))
+
+#define TRACE_DEVEL(msg, mask, args...) \
+ _trace(TRACE_LEVEL_DEVELOPER, (mask), TRACE_SOURCE, ist(TRC_LOC), __FUNCTION__, TRC_5ARGS(0,##args,0,0,0,0,0), ist(msg))
+
+#define TRACE_ENTER(mask, args...) \
+ _trace(TRACE_LEVEL_DEVELOPER, (mask), TRACE_SOURCE, ist(TRC_LOC), __FUNCTION__, TRC_5ARGS(0,##args,0,0,0,0,0), ist("entering"))
+
+#define TRACE_LEAVE(mask, args...) \
+ _trace(TRACE_LEVEL_DEVELOPER, (mask), TRACE_SOURCE, ist(TRC_LOC), __FUNCTION__, TRC_5ARGS(0,##args,0,0,0,0,0), ist("leaving"))
+
+#define TRACE_POINT(mask, args...) \
+ _trace(TRACE_LEVEL_DEVELOPER, (mask), TRACE_SOURCE, ist(TRC_LOC), __FUNCTION__, TRC_5ARGS(0,##args,0,0,0,0,0), ist("in"))
+
+/* This produces a printf-like trace at level <level> for event mask <mask> and
+ * trace arguments <a1..a4>. All args mandatory, but may be zero. No output
+ * callback will be used since we expect the caller to pass a fully formatted
+ * message that must not be degraded. The output will be truncated to
+ * TRACE_MAX_MSG-1 bytes (1023 by default). Caller must include <stdio.h> for
+ * snprintf(). One call will lead to one independent message, which means that
+ * multiple messages may be interleaved between threads, hence the caller is
+ * encouraged to prepend a context at the beginning of the format string when
+ * dumping lists or arrays. The _LOC variation takes the caller's location and
+ * function name as an ist and a (const char *) respectively, it is meant for
+ * being called from wrapper function which will work on behalf of a caller.
+ */
+#define TRACE_PRINTF(level, mask, a1, a2, a3, a4, fmt, args...) \
+ TRACE_PRINTF_LOC(level, mask, ist(TRC_LOC), __FUNCTION__, a1, a2, a3, a4, fmt, ##args)
+
+#define TRACE_PRINTF_LOC(level, mask, trc_loc, func, a1, a2, a3, a4, fmt, args...) \
+ do { \
+ if (TRACE_ENABLED((level), (mask), a1, a2, a3, a4)) { \
+ char _msg[TRACE_MAX_MSG]; \
+ size_t _msg_len; \
+ _msg_len = snprintf(_msg, sizeof(_msg), (fmt), ##args); \
+ if (_msg_len >= sizeof(_msg)) \
+ _msg_len = sizeof(_msg) - 1; \
+ _trace((level), (mask), TRACE_SOURCE, \
+ trc_loc, func, a1, a2, a3, a4, \
+ &trace_no_cb, ist2(_msg, _msg_len)); \
+ } \
+ } while (0)
+
+#if defined(DEBUG_DEV) || defined(DEBUG_FULL)
+# define DBG_TRACE(msg, mask, args...) TRACE(msg, mask, ##args)
+# define DBG_TRACE_ERROR(msg, mask, args...) TRACE_ERROR(msg, mask, ##args)
+# define DBG_TRACE_USER(msg, mask, args...) TRACE_USER(msg, mask, ##args)
+# define DBG_TRACE_DATA(msg, mask, args...) TRACE_DATA(msg, mask, ##args)
+# define DBG_TRACE_PROTO(msg, mask, args...) TRACE_PROTO(msg, mask, ##args)
+# define DBG_TRACE_STATE(msg, mask, args...) TRACE_STATE(msg, mask, ##args)
+# define DBG_TRACE_DEVEL(msg, mask, args...) TRACE_DEVEL(msg, mask, ##args)
+# define DBG_TRACE_ENTER(mask, args...) TRACE_ENTER(mask, ##args)
+# define DBG_TRACE_LEAVE(mask, args...) TRACE_LEAVE(mask, ##args)
+# define DBG_TRACE_POINT(mask, args...) TRACE_POINT(mask, ##args)
+# define DBG_TRACE_PRINTF(level, args...) TRACE_PRINTF(level, ##args)
+# define DBG_TRACE_PRINTF_LOC(level, args...) TRACE_PRINTF_LOC(level, ##args)
+#else
+# define DBG_TRACE(msg, mask, args...) do { /* do nothing */ } while(0)
+# define DBG_TRACE_ERROR(msg, mask, args...) do { /* do nothing */ } while(0)
+# define DBG_TRACE_USER(msg, mask, args...) do { /* do nothing */ } while(0)
+# define DBG_TRACE_DATA(msg, mask, args...) do { /* do nothing */ } while(0)
+# define DBG_TRACE_PROTO(msg, mask, args...) do { /* do nothing */ } while(0)
+# define DBG_TRACE_STATE(msg, mask, args...) do { /* do nothing */ } while(0)
+# define DBG_TRACE_DEVEL(msg, mask, args...) do { /* do nothing */ } while(0)
+# define DBG_TRACE_ENTER(mask, args...) do { /* do nothing */ } while(0)
+# define DBG_TRACE_LEAVE(mask, args...) do { /* do nothing */ } while(0)
+# define DBG_TRACE_POINT(mask, args...) do { /* do nothing */ } while(0)
+# define DBG_TRACE_PRINTF(level, args...) do { /* do nothing */ } while(0)
+# define DBG_TRACE_PRINTF_LOC(level, args...) do { /* do nothing */ } while(0)
+#endif
+
+extern struct list trace_sources;
+extern THREAD_LOCAL struct buffer trace_buf;
+
+int __trace_enabled(enum trace_level level, uint64_t mask, struct trace_source *src,
+ const struct ist where, const char *func,
+ const void *a1, const void *a2, const void *a3, const void *a4,
+ const void **plockptr);
+
+void __trace(enum trace_level level, uint64_t mask, struct trace_source *src,
+ const struct ist where, const char *func,
+ const void *a1, const void *a2, const void *a3, const void *a4,
+ void (*cb)(enum trace_level level, uint64_t mask, const struct trace_source *src,
+ const struct ist where, const struct ist func,
+ const void *a1, const void *a2, const void *a3, const void *a4),
+ const struct ist msg);
+
+void trace_no_cb(enum trace_level level, uint64_t mask, const struct trace_source *src,
+ const struct ist where, const struct ist func,
+ const void *a1, const void *a2, const void *a3, const void *a4);
+
+void trace_register_source(struct trace_source *source);
+
+int trace_parse_cmd(char *arg, char **errmsg);
+
+/* return a single char to describe a trace state */
+static inline char trace_state_char(enum trace_state st)
+{
+ return (st == TRACE_STATE_RUNNING) ? 'R' :
+ (st == TRACE_STATE_WAITING) ? 'w' :
+ '.';
+}
+
+/* return a single char to describe an event state */
+static inline char trace_event_char(uint64_t conf, uint64_t ev)
+{
+ return (conf & ev) ? '+' : '-';
+}
+
+#endif /* _HAPROXY_TRACE_H */
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/include/haproxy/uri_auth-t.h b/include/haproxy/uri_auth-t.h
new file mode 100644
index 0000000..009adfd
--- /dev/null
+++ b/include/haproxy/uri_auth-t.h
@@ -0,0 +1,56 @@
+/*
+ * include/haproxy/uri_auth-t.h
+ * Definitions for URI-based user authentication using the HTTP basic method.
+ *
+ * Copyright 2006-2020 Willy Tarreau <w@1wt.eu>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#ifndef _HAPROXY_URI_AUTH_T_H
+#define _HAPROXY_URI_AUTH_T_H
+
+#include <haproxy/acl-t.h>
+#include <haproxy/auth-t.h>
+
+/* This is a list of proxies we are allowed to see. Later, it should go in the
+ * user list, but before this we need to support de/re-authentication.
+ */
+struct stat_scope {
+ struct stat_scope *next; /* next entry, NULL if none */
+ int px_len; /* proxy name length */
+ char *px_id; /* proxy id */
+};
+
+/* later we may link them to support multiple URI matching */
+struct uri_auth {
+ int uri_len; /* the prefix length */
+ char *uri_prefix; /* the prefix we want to match */
+ char *auth_realm; /* the realm reported to the client */
+ char *node, *desc; /* node name & description reported in this stats */
+ int refresh; /* refresh interval for the browser (in seconds) */
+ unsigned int flags; /* STAT_* flags from stats.h and for applet.ctx.stats.flags */
+ struct stat_scope *scope; /* linked list of authorized proxies */
+ struct userlist *userlist; /* private userlist to emulate legacy "stats auth user:password" */
+ struct list http_req_rules; /* stats http-request rules : allow/deny/auth */
+ struct list admin_rules; /* 'stats admin' rules (chained) */
+ struct uri_auth *next; /* Used at deinit() to build a list of unique elements */
+};
+
+struct stats_admin_rule {
+ struct list list; /* list linked to from the proxy */
+ struct acl_cond *cond; /* acl condition to meet */
+};
+
+#endif /* _HAPROXY_URI_AUTH_T_H */
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/include/haproxy/uri_auth.h b/include/haproxy/uri_auth.h
new file mode 100644
index 0000000..27dca02
--- /dev/null
+++ b/include/haproxy/uri_auth.h
@@ -0,0 +1,44 @@
+/*
+ * include/haproxy/uri_auth.h
+ * Functions for URI-based user authentication using the HTTP basic method.
+ *
+ * Copyright 2006-2020 Willy Tarreau <w@1wt.eu>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#ifndef _HAPROXY_URI_AUTH_H
+#define _HAPROXY_URI_AUTH_H
+
+#include <haproxy/api.h>
+#include <haproxy/uri_auth-t.h>
+
+/* Various functions used to set the fields during the configuration parsing.
+ * Please that all those function can initialize the root entry in order not to
+ * force the user to respect a certain order in the configuration file.
+ *
+ * Default values are used during initialization. Check STATS_DEFAULT_* for
+ * more information.
+ */
+struct uri_auth *stats_check_init_uri_auth(struct uri_auth **root);
+struct uri_auth *stats_set_uri(struct uri_auth **root, char *uri);
+struct uri_auth *stats_set_realm(struct uri_auth **root, char *realm);
+struct uri_auth *stats_set_refresh(struct uri_auth **root, int interval);
+struct uri_auth *stats_set_flag(struct uri_auth **root, int flag);
+struct uri_auth *stats_add_auth(struct uri_auth **root, char *user);
+struct uri_auth *stats_add_scope(struct uri_auth **root, char *scope);
+struct uri_auth *stats_set_node(struct uri_auth **root, char *name);
+struct uri_auth *stats_set_desc(struct uri_auth **root, char *desc);
+
+#endif /* _HAPROXY_URI_AUTH_H */
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/include/haproxy/uri_normalizer-t.h b/include/haproxy/uri_normalizer-t.h
new file mode 100644
index 0000000..bcbcaef
--- /dev/null
+++ b/include/haproxy/uri_normalizer-t.h
@@ -0,0 +1,31 @@
+/*
+ * include/haproxy/uri_normalizer.h
+ * HTTP request URI normalization.
+ *
+ * Copyright 2021 Tim Duesterhus <tim@bastelstu.be>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#ifndef _HAPROXY_URI_NORMALIZER_T_H
+#define _HAPROXY_URI_NORMALIZER_T_H
+
+enum uri_normalizer_err {
+ URI_NORMALIZER_ERR_NONE = 0,
+ URI_NORMALIZER_ERR_ALLOC,
+ URI_NORMALIZER_ERR_INVALID_INPUT,
+ URI_NORMALIZER_ERR_INTERNAL_ERROR = 0xdead,
+};
+
+#endif /* _HAPROXY_URI_NORMALIZER_T_H */
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/include/haproxy/uri_normalizer.h b/include/haproxy/uri_normalizer.h
new file mode 100644
index 0000000..b384007
--- /dev/null
+++ b/include/haproxy/uri_normalizer.h
@@ -0,0 +1,44 @@
+/*
+ * include/haproxy/uri_normalizer.h
+ * HTTP request URI normalization.
+ *
+ * Copyright 2021 Tim Duesterhus <tim@bastelstu.be>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#ifndef _HAPROXY_URI_NORMALIZER_H
+#define _HAPROXY_URI_NORMALIZER_H
+
+#include <import/ist.h>
+
+#include <haproxy/uri_normalizer-t.h>
+
+/* Cuts the input at the first '#'. */
+static inline enum uri_normalizer_err uri_normalizer_fragment_strip(const struct ist input, struct ist *dst)
+{
+ *dst = iststop(input, '#');
+
+ return URI_NORMALIZER_ERR_NONE;
+}
+
+enum uri_normalizer_err uri_normalizer_fragment_encode(const struct ist input, struct ist *dst);
+enum uri_normalizer_err uri_normalizer_percent_decode_unreserved(const struct ist input, int strict, struct ist *dst);
+enum uri_normalizer_err uri_normalizer_percent_upper(const struct ist input, int strict, struct ist *dst);
+enum uri_normalizer_err uri_normalizer_path_dot(const struct ist path, struct ist *dst);
+enum uri_normalizer_err uri_normalizer_path_dotdot(const struct ist path, int full, struct ist *dst);
+enum uri_normalizer_err uri_normalizer_path_merge_slashes(const struct ist path, struct ist *dst);
+enum uri_normalizer_err uri_normalizer_query_sort(const struct ist query, const char delim, struct ist *dst);
+
+#endif /* _HAPROXY_URI_NORMALIZER_H */
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/include/haproxy/vars-t.h b/include/haproxy/vars-t.h
new file mode 100644
index 0000000..e239b1c
--- /dev/null
+++ b/include/haproxy/vars-t.h
@@ -0,0 +1,71 @@
+/*
+ * include/haproxy/vars-t.h
+ * Macros and structures definitions for variables.
+ *
+ * Copyright (C) 2015 Thierry FOURNIER <tfournier@arpalert.org>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_VARS_T_H
+#define _HAPROXY_VARS_T_H
+
+#include <haproxy/sample_data-t.h>
+#include <haproxy/thread-t.h>
+
+/* flags used when setting/clearing variables */
+#define VF_CREATEONLY 0x00000001 // do nothing if the variable already exists
+#define VF_PERMANENT 0x00000002 // variables known to the config parser
+
+#define VF_COND_IFEXISTS 0x00000004 // only set variable if it already exists
+#define VF_COND_IFNOTEXISTS 0x00000008 // only set variable if it did not exist yet
+#define VF_COND_IFEMPTY 0x00000010 // only set variable if sample is empty
+#define VF_COND_IFNOTEMPTY 0x00000020 // only set variable if sample is not empty
+#define VF_COND_IFSET 0x00000040 // only set variable if its type is not SMP_TYPE_ANY
+#define VF_COND_IFNOTSET 0x00000080 // only set variable if its type is ANY
+#define VF_COND_IFGT 0x00000100 // only set variable if its value is greater than the sample's
+#define VF_COND_IFLT 0x00000200 // only set variable if its value is less than the sample's
+
+enum vars_scope {
+ SCOPE_SESS = 0,
+ SCOPE_TXN,
+ SCOPE_REQ,
+ SCOPE_RES,
+ SCOPE_PROC,
+ SCOPE_CHECK,
+};
+
+struct vars {
+ struct list head;
+ enum vars_scope scope;
+ unsigned int size;
+ __decl_thread(HA_RWLOCK_T rwlock);
+};
+
+/* This struct describes a variable as found in an arg_data */
+struct var_desc {
+ uint64_t name_hash;
+ enum vars_scope scope;
+};
+
+struct var {
+ struct list l; /* Used for chaining vars. */
+ uint64_t name_hash; /* XXH3() of the variable's name */
+ uint flags; // VF_*
+ /* 32-bit hole here */
+ struct sample_data data; /* data storage. */
+};
+
+#endif
diff --git a/include/haproxy/vars.h b/include/haproxy/vars.h
new file mode 100644
index 0000000..ebd1f15
--- /dev/null
+++ b/include/haproxy/vars.h
@@ -0,0 +1,72 @@
+/*
+ * include/haproxy/vars.h
+ * Prototypes for variables.
+ *
+ * Copyright (C) 2015 Thierry FOURNIER <tfournier@arpalert.org>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_VARS_H
+#define _HAPROXY_VARS_H
+
+#include <haproxy/api-t.h>
+#include <haproxy/session-t.h>
+#include <haproxy/stream-t.h>
+#include <haproxy/vars-t.h>
+
+extern struct vars proc_vars;
+
+void vars_init_head(struct vars *vars, enum vars_scope scope);
+void var_accounting_diff(struct vars *vars, struct session *sess, struct stream *strm, int size);
+unsigned int var_clear(struct var *var, int force);
+void vars_prune(struct vars *vars, struct session *sess, struct stream *strm);
+void vars_prune_per_sess(struct vars *vars);
+int vars_get_by_name(const char *name, size_t len, struct sample *smp, const struct buffer *def);
+int vars_set_by_name_ifexist(const char *name, size_t len, struct sample *smp);
+int vars_set_by_name(const char *name, size_t len, struct sample *smp);
+int vars_unset_by_name_ifexist(const char *name, size_t len, struct sample *smp);
+int vars_get_by_desc(const struct var_desc *var_desc, struct sample *smp, const struct buffer *def);
+int vars_check_arg(struct arg *arg, char **err);
+
+/* locks the <vars> for writes if it's in a shared scope */
+static inline void vars_wrlock(struct vars *vars)
+{
+ if (vars->scope == SCOPE_PROC)
+ HA_RWLOCK_WRLOCK(VARS_LOCK, &vars->rwlock);
+}
+
+/* unlocks the <vars> for writes if it's in a shared scope */
+static inline void vars_wrunlock(struct vars *vars)
+{
+ if (vars->scope == SCOPE_PROC)
+ HA_RWLOCK_WRUNLOCK(VARS_LOCK, &vars->rwlock);
+}
+
+/* locks the <vars> for reads if it's in a shared scope */
+static inline void vars_rdlock(struct vars *vars)
+{
+ if (vars->scope == SCOPE_PROC)
+ HA_RWLOCK_RDLOCK(VARS_LOCK, &vars->rwlock);
+}
+
+/* unlocks the <vars> for reads if it's in a shared scope */
+static inline void vars_rdunlock(struct vars *vars)
+{
+ if (vars->scope == SCOPE_PROC)
+ HA_RWLOCK_RDUNLOCK(VARS_LOCK, &vars->rwlock);
+}
+
+#endif
diff --git a/include/haproxy/version.h b/include/haproxy/version.h
new file mode 100644
index 0000000..651a8de
--- /dev/null
+++ b/include/haproxy/version.h
@@ -0,0 +1,86 @@
+/*
+ * include/haproxy/version.h
+ * This file serves as a template for future include files.
+ *
+ * Copyright (C) 2000-2020 Willy Tarreau - w@1wt.eu
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _HAPROXY_VERSION_H
+#define _HAPROXY_VERSION_H
+
+#include <haproxy/api.h>
+
+#ifdef CONFIG_PRODUCT_NAME
+#define PRODUCT_NAME CONFIG_PRODUCT_NAME
+#else
+#define PRODUCT_NAME "HAProxy"
+#endif
+
+#ifdef CONFIG_PRODUCT_BRANCH
+#define PRODUCT_BRANCH CONFIG_PRODUCT_BRANCH
+#else
+#define PRODUCT_BRANCH "2.9"
+#endif
+
+#ifdef CONFIG_PRODUCT_STATUS
+#define PRODUCT_STATUS CONFIG_PRODUCT_STATUS
+#else
+#define PRODUCT_STATUS "Status: stable branch - will stop receiving fixes around Q1 2025."
+#endif
+
+#ifdef CONFIG_PRODUCT_URL_BUGS
+#define PRODUCT_URL_BUGS CONFIG_PRODUCT_URL_BUGS
+#else
+#define PRODUCT_URL_BUGS "http://www.haproxy.org/bugs/bugs-%s.html"
+#endif
+
+#ifdef CONFIG_PRODUCT_URL
+#define PRODUCT_URL CONFIG_PRODUCT_URL
+#else
+#define PRODUCT_URL "http://www.haproxy.org/"
+#endif
+
+#ifdef CONFIG_PRODUCT_URL_UPD
+#define PRODUCT_URL_UPD CONFIG_PRODUCT_URL_UPD
+#else
+#define PRODUCT_URL_UPD "http://www.haproxy.org/#down"
+#endif
+
+#ifdef CONFIG_PRODUCT_URL_DOC
+#define PRODUCT_URL_DOC CONFIG_PRODUCT_URL_DOC
+#else
+#define PRODUCT_URL_DOC "http://www.haproxy.org/#docs"
+#endif
+
+#ifdef CONFIG_HAPROXY_VERSION
+#define HAPROXY_VERSION CONFIG_HAPROXY_VERSION
+#else
+#error "Must define CONFIG_HAPROXY_VERSION"
+#endif
+
+#ifdef CONFIG_HAPROXY_DATE
+#define HAPROXY_DATE CONFIG_HAPROXY_DATE
+#else
+#error "Must define CONFIG_HAPROXY_DATE"
+#endif
+
+extern char haproxy_version[];
+extern char haproxy_date[];
+extern char stats_version_string[];
+
+#endif /* _HAPROXY_VERSION_H */
+
diff --git a/include/haproxy/xref-t.h b/include/haproxy/xref-t.h
new file mode 100644
index 0000000..a2aed54
--- /dev/null
+++ b/include/haproxy/xref-t.h
@@ -0,0 +1,45 @@
+/*
+ * include/haproxy/xref-t.h
+ * Atomic cross-references between two elements - types
+ *
+ * Copyright (C) 2017 Thierry Fournier <thierry.fournier@ozon.io>
+ * Copyright (C) 2020 Willy Tarreau - w@1wt.eu
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef __HAPROXY_XREF_T_H__
+#define __HAPROXY_XREF_T_H__
+
+/* xref is used to create relation between two elements.
+ * Once an element is released, it breaks the relation. If the
+ * relation is already broken, it frees the xref struct.
+ * The pointer between two elements is sort of a refcount with
+ * max value 1. The relation is only between two elements.
+ * The pointer and the type of elements a and b are conventional.
+ */
+
+#define XREF_BUSY ((struct xref *)1)
+
+struct xref {
+ struct xref *peer;
+};
+
+#endif /* __HAPROXY_XREF_T_H__ */
diff --git a/include/haproxy/xref.h b/include/haproxy/xref.h
new file mode 100644
index 0000000..42eed58
--- /dev/null
+++ b/include/haproxy/xref.h
@@ -0,0 +1,105 @@
+/*
+ * include/haproxy/xref.h
+ * Atomic cross-references between two elements - functions
+ *
+ * Copyright (C) 2017 Thierry Fournier <thierry.fournier@ozon.io>
+ * Copyright (C) 2020 Willy Tarreau - w@1wt.eu
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef __HAPROXY_XREF_H__
+#define __HAPROXY_XREF_H__
+
+#include <haproxy/xref-t.h>
+
+/* xref is used to create relation between two elements.
+ * Once an element is released, it breaks the relation. If the
+ * relation is already broken, it frees the xref struct.
+ * The pointer between two elements is sort of a refcount with
+ * max value 1. The relation is only between two elements.
+ * The pointer and the type of elements a and b are conventional.
+ */
+
+static inline void xref_create(struct xref *xref_a, struct xref *xref_b)
+{
+ xref_a->peer = xref_b;
+ xref_b->peer = xref_a;
+}
+
+static inline struct xref *xref_get_peer_and_lock(struct xref *xref)
+{
+ struct xref *local;
+ struct xref *remote;
+
+ while (1) {
+
+ /* Get the local pointer to the peer. */
+ local = _HA_ATOMIC_XCHG(&xref->peer, XREF_BUSY);
+ __ha_barrier_atomic_store();
+
+ /* If the local pointer is NULL, the peer no longer exists. */
+ if (local == NULL) {
+ xref->peer = NULL;
+ return NULL;
+ }
+
+ /* If the local pointeru is BUSY, the peer try to acquire the
+ * lock. We retry the process.
+ */
+ if (local == XREF_BUSY)
+ continue;
+
+ /* We are locked, the peer can't disappear, try to acquire
+ * the pper's lock. Note that remote can't be NULL.
+ */
+ remote = _HA_ATOMIC_XCHG(&local->peer, XREF_BUSY);
+
+ /* The remote lock is BUSY, We retry the process. */
+ if (remote == XREF_BUSY) {
+ xref->peer = local;
+ __ha_barrier_store();
+ continue;
+ }
+
+ /* We have the lock, we return the value of the xref. */
+ return local;
+ }
+}
+
+static inline void xref_unlock(struct xref *xref, struct xref *peer)
+{
+ /* Release the peer. */
+ peer->peer = xref;
+
+ __ha_barrier_store();
+
+ /* Release myself. */
+ xref->peer = peer;
+}
+
+static inline void xref_disconnect(struct xref *xref, struct xref *peer)
+{
+ peer->peer = NULL;
+ __ha_barrier_store();
+ xref->peer = NULL;
+}
+
+#endif /* __HAPROXY_XREF_H__ */
diff --git a/include/haproxy/xxhash.h b/include/haproxy/xxhash.h
new file mode 100644
index 0000000..cd333e6
--- /dev/null
+++ b/include/haproxy/xxhash.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 2020 Dragan Dosen <ddosen@haproxy.com>
+ * Copyright (C) 2021 Tim Duesterhus <tim@bastelstu.be>
+ *
+ * BSD 2-Clause License (https://www.opensource.org/licenses/bsd-license.php)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _HAPROXY_XXHASH_H
+#define _HAPROXY_XXHASH_H
+
+/* Make all xxhash functions inline, with implementations being directly
+ * included within xxhash.h.
+ */
+#ifndef XXH_INLINE_ALL
+#define XXH_INLINE_ALL
+#else
+#error "XXH_INLINE_ALL is already defined."
+#endif
+
+#include <import/xxhash.h>
+
+/* Make the new complex name for the xxhash function easier to remember
+ * and use.
+ */
+#ifndef XXH3
+#define XXH3(data, len, seed) XXH3_64bits_withSeed(data, len, seed)
+#endif
+
+#endif
diff --git a/include/import/atomic-ops.h b/include/import/atomic-ops.h
new file mode 100644
index 0000000..29674db
--- /dev/null
+++ b/include/import/atomic-ops.h
@@ -0,0 +1,1991 @@
+/* generic atomic operations used by progressive locks
+ *
+ * Copyright (C) 2012-2022 Willy Tarreau <w@1wt.eu>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef PL_ATOMIC_OPS_H
+#define PL_ATOMIC_OPS_H
+
+/* The definitions below exist in two forms:
+ * - fallback form (_pl_*)
+ * - preferred form (pl_*)
+ *
+ * As a general rule, given that C11 atomics tend to offer more flexibility to
+ * the compiler, these should set the preferred form, and the arch-specific
+ * code should set the fallback code. But it's possible for arch-specific code
+ * to set a preferred form, in which case it will simply be used over the other
+ * ones.
+ */
+
+/*
+ * Architecture-specific versions of the various operations
+ */
+
+/*
+ * ###### ix86 / x86_64 below ######
+ */
+#if defined(__i386__) || defined (__i486__) || defined (__i586__) || defined (__i686__) || defined (__x86_64__)
+
+/* for compilers supporting condition flags on output, let's directly return them */
+#if defined(__GCC_ASM_FLAG_OUTPUTS__)
+#define X86_COND_C_TO_REG(reg) ""
+#define X86_COND_Z_TO_REG(reg) ""
+#define X86_COND_NZ_TO_REG(reg) ""
+#define X86_COND_C_RESULT(var) "=@ccc"(var)
+#define X86_COND_Z_RESULT(var) "=@ccz"(var)
+#define X86_COND_NZ_RESULT(var) "=@ccnz"(var)
+#else
+#define X86_COND_C_TO_REG(reg) "sbb %" #reg ", %" #reg "\n\t"
+#define X86_COND_Z_TO_REG(reg) "sete %" #reg "\n\t"
+#define X86_COND_NZ_TO_REG(reg) "setne %" #reg "\n\t"
+#define X86_COND_C_RESULT(var) "=r"(var)
+#define X86_COND_Z_RESULT(var) "=qm"(var)
+#define X86_COND_NZ_RESULT(var) "=qm"(var)
+#endif
+
+/* CPU relaxation while waiting (PAUSE instruction on x86) */
+#define pl_cpu_relax() do { \
+ asm volatile("rep;nop\n"); \
+ } while (0)
+
+/* full memory barrier using mfence when SSE2 is supported, falling back to
+ * "lock add %esp" (gcc uses "lock add" or "lock or").
+ */
+#if defined(__SSE2__)
+
+#define _pl_mb() do { \
+ asm volatile("mfence" ::: "memory"); \
+ } while (0)
+
+#elif defined(__x86_64__)
+
+#define _pl_mb() do { \
+ asm volatile("lock addl $0,0 (%%rsp)" ::: "memory", "cc"); \
+ } while (0)
+
+#else /* ix86 */
+
+#define _pl_mb() do { \
+ asm volatile("lock addl $0,0 (%%esp)" ::: "memory", "cc"); \
+ } while (0)
+
+#endif /* end of pl_mb() case for sse2/x86_64/x86 */
+
+/* load/store barriers are nops on x86 */
+#define _pl_mb_load() do { asm volatile("" ::: "memory"); } while (0)
+#define _pl_mb_store() do { asm volatile("" ::: "memory"); } while (0)
+
+/* atomic full/load/store are also nops on x86 */
+#define _pl_mb_ato() do { asm volatile("" ::: "memory"); } while (0)
+#define _pl_mb_ato_load() do { asm volatile("" ::: "memory"); } while (0)
+#define _pl_mb_ato_store() do { asm volatile("" ::: "memory"); } while (0)
+
+/* atomic load: on x86 it's just a volatile read */
+#define _pl_load_lax(ptr) _pl_load(ptr)
+#define _pl_load(ptr) ({ typeof(*(ptr)) __ptr = *(volatile typeof(ptr))ptr; __ptr; })
+
+/* atomic store: on x86 it's just a volatile write */
+#define _pl_store_lax(ptr) _pl_store(ptr)
+#define _pl_store(ptr, x) do { *((volatile typeof(ptr))(ptr)) = (typeof(*ptr))(x); } while (0)
+
+/* increment integer value pointed to by pointer <ptr>, and return non-zero if
+ * result is non-null.
+ */
+#define _pl_inc_lax(ptr) _pl_inc(ptr)
+#define _pl_inc_acq(ptr) _pl_inc(ptr)
+#define _pl_inc_rel(ptr) _pl_inc(ptr)
+#define _pl_inc(ptr) ( \
+ (sizeof(long) == 8 && sizeof(*(ptr)) == 8) ? ({ \
+ unsigned char ret; \
+ asm volatile("lock incq %0\n" \
+ X86_COND_NZ_TO_REG(1) \
+ : "+m" (*(ptr)), X86_COND_NZ_RESULT(ret) \
+ : \
+ : "cc"); \
+ ret; /* return value */ \
+ }) : (sizeof(*(ptr)) == 4) ? ({ \
+ unsigned char ret; \
+ asm volatile("lock incl %0\n" \
+ X86_COND_NZ_TO_REG(1) \
+ : "+m" (*(ptr)), X86_COND_NZ_RESULT(ret) \
+ : \
+ : "cc"); \
+ ret; /* return value */ \
+ }) : (sizeof(*(ptr)) == 2) ? ({ \
+ unsigned char ret; \
+ asm volatile("lock incw %0\n" \
+ X86_COND_NZ_TO_REG(1) \
+ : "+m" (*(ptr)), X86_COND_NZ_RESULT(ret) \
+ : \
+ : "cc"); \
+ ret; /* return value */ \
+ }) : (sizeof(*(ptr)) == 1) ? ({ \
+ unsigned char ret; \
+ asm volatile("lock incb %0\n" \
+ X86_COND_NZ_TO_REG(1) \
+ : "+m" (*(ptr)), X86_COND_NZ_RESULT(ret) \
+ : \
+ : "cc"); \
+ ret; /* return value */ \
+ }) : ({ \
+ void __unsupported_argument_size_for_pl_inc__(char *,int); \
+ if (sizeof(*(ptr)) != 1 && sizeof(*(ptr)) != 2 && \
+ sizeof(*(ptr)) != 4 && (sizeof(long) != 8 || sizeof(*(ptr)) != 8)) \
+ __unsupported_argument_size_for_pl_inc__(__FILE__,__LINE__); \
+ 0; \
+ }) \
+)
+
+/* decrement integer value pointed to by pointer <ptr>, and return non-zero if
+ * result is non-null.
+ */
+#define _pl_dec_lax(ptr) _pl_dec(ptr)
+#define _pl_dec_acq(ptr) _pl_dec(ptr)
+#define _pl_dec_rel(ptr) _pl_dec(ptr)
+#define _pl_dec(ptr) ( \
+ (sizeof(long) == 8 && sizeof(*(ptr)) == 8) ? ({ \
+ unsigned char ret; \
+ asm volatile("lock decq %0\n" \
+ X86_COND_NZ_TO_REG(1) \
+ : "+m" (*(ptr)), X86_COND_NZ_RESULT(ret) \
+ : \
+ : "cc"); \
+ ret; /* return value */ \
+ }) : (sizeof(*(ptr)) == 4) ? ({ \
+ unsigned char ret; \
+ asm volatile("lock decl %0\n" \
+ X86_COND_NZ_TO_REG(1) \
+ : "+m" (*(ptr)), X86_COND_NZ_RESULT(ret) \
+ : \
+ : "cc"); \
+ ret; /* return value */ \
+ }) : (sizeof(*(ptr)) == 2) ? ({ \
+ unsigned char ret; \
+ asm volatile("lock decw %0\n" \
+ X86_COND_NZ_TO_REG(1) \
+ : "+m" (*(ptr)), X86_COND_NZ_RESULT(ret) \
+ : \
+ : "cc"); \
+ ret; /* return value */ \
+ }) : (sizeof(*(ptr)) == 1) ? ({ \
+ unsigned char ret; \
+ asm volatile("lock decb %0\n" \
+ X86_COND_NZ_TO_REG(1) \
+ : "+m" (*(ptr)), X86_COND_NZ_RESULT(ret) \
+ : \
+ : "cc"); \
+ ret; /* return value */ \
+ }) : ({ \
+ void __unsupported_argument_size_for_pl_dec__(char *,int); \
+ if (sizeof(*(ptr)) != 1 && sizeof(*(ptr)) != 2 && \
+ sizeof(*(ptr)) != 4 && (sizeof(long) != 8 || sizeof(*(ptr)) != 8)) \
+ __unsupported_argument_size_for_pl_dec__(__FILE__,__LINE__); \
+ 0; \
+ }) \
+)
+
+/* increment integer value pointed to by pointer <ptr>, no return */
+#define pl_inc_noret_lax(ptr) pl_inc_noret(ptr)
+#define pl_inc_noret_acq(ptr) pl_inc_noret(ptr)
+#define pl_inc_noret_rel(ptr) pl_inc_noret(ptr)
+#define pl_inc_noret(ptr) do { \
+ if (sizeof(long) == 8 && sizeof(*(ptr)) == 8) { \
+ asm volatile("lock incq %0\n" \
+ : "+m" (*(ptr)) \
+ : \
+ : "cc"); \
+ } else if (sizeof(*(ptr)) == 4) { \
+ asm volatile("lock incl %0\n" \
+ : "+m" (*(ptr)) \
+ : \
+ : "cc"); \
+ } else if (sizeof(*(ptr)) == 2) { \
+ asm volatile("lock incw %0\n" \
+ : "+m" (*(ptr)) \
+ : \
+ : "cc"); \
+ } else if (sizeof(*(ptr)) == 1) { \
+ asm volatile("lock incb %0\n" \
+ : "+m" (*(ptr)) \
+ : \
+ : "cc"); \
+ } else { \
+ void __unsupported_argument_size_for_pl_inc_noret__(char *,int); \
+ if (sizeof(*(ptr)) != 1 && sizeof(*(ptr)) != 2 && \
+ sizeof(*(ptr)) != 4 && (sizeof(long) != 8 || sizeof(*(ptr)) != 8)) \
+ __unsupported_argument_size_for_pl_inc_noret__(__FILE__,__LINE__); \
+ } \
+} while (0)
+
+/* decrement integer value pointed to by pointer <ptr>, no return */
+#define pl_dec_noret_lax(ptr) pl_dec_noret(ptr)
+#define pl_dec_noret_acq(ptr) pl_dec_noret(ptr)
+#define pl_dec_noret_rel(ptr) pl_dec_noret(ptr)
+#define pl_dec_noret(ptr) do { \
+ if (sizeof(long) == 8 && sizeof(*(ptr)) == 8) { \
+ asm volatile("lock decq %0\n" \
+ : "+m" (*(ptr)) \
+ : \
+ : "cc"); \
+ } else if (sizeof(*(ptr)) == 4) { \
+ asm volatile("lock decl %0\n" \
+ : "+m" (*(ptr)) \
+ : \
+ : "cc"); \
+ } else if (sizeof(*(ptr)) == 2) { \
+ asm volatile("lock decw %0\n" \
+ : "+m" (*(ptr)) \
+ : \
+ : "cc"); \
+ } else if (sizeof(*(ptr)) == 1) { \
+ asm volatile("lock decb %0\n" \
+ : "+m" (*(ptr)) \
+ : \
+ : "cc"); \
+ } else { \
+ void __unsupported_argument_size_for_pl_dec_noret__(char *,int); \
+ if (sizeof(*(ptr)) != 1 && sizeof(*(ptr)) != 2 && \
+ sizeof(*(ptr)) != 4 && (sizeof(long) != 8 || sizeof(*(ptr)) != 8)) \
+ __unsupported_argument_size_for_pl_dec_noret__(__FILE__,__LINE__); \
+ } \
+} while (0)
+
+/* add integer constant <x> to integer value pointed to by pointer <ptr>,
+ * no return. Size of <x> is not checked.
+ */
+#define _pl_add_noret_lax(ptr, x) _pl_add_noret(ptr, x)
+#define _pl_add_noret_acq(ptr, x) _pl_add_noret(ptr, x)
+#define _pl_add_noret_rel(ptr, x) _pl_add_noret(ptr, x)
+#define _pl_add_noret(ptr, x) do { \
+ if (sizeof(long) == 8 && sizeof(*(ptr)) == 8) { \
+ asm volatile("lock addq %1, %0\n" \
+ : "+m" (*(ptr)) \
+ : "er" ((unsigned long)(x)) \
+ : "cc"); \
+ } else if (sizeof(*(ptr)) == 4) { \
+ asm volatile("lock addl %1, %0\n" \
+ : "+m" (*(ptr)) \
+ : "er" ((unsigned int)(x)) \
+ : "cc"); \
+ } else if (sizeof(*(ptr)) == 2) { \
+ asm volatile("lock addw %1, %0\n" \
+ : "+m" (*(ptr)) \
+ : "er" ((unsigned short)(x)) \
+ : "cc"); \
+ } else if (sizeof(*(ptr)) == 1) { \
+ asm volatile("lock addb %1, %0\n" \
+ : "+m" (*(ptr)) \
+ : "er" ((unsigned char)(x)) \
+ : "cc"); \
+ } else { \
+ void __unsupported_argument_size_for_pl_add__(char *,int); \
+ if (sizeof(*(ptr)) != 1 && sizeof(*(ptr)) != 2 && \
+ sizeof(*(ptr)) != 4 && (sizeof(long) != 8 || sizeof(*(ptr)) != 8)) \
+ __unsupported_argument_size_for_pl_add__(__FILE__,__LINE__); \
+ } \
+} while (0)
+
+/* subtract integer constant <x> from integer value pointed to by pointer
+ * <ptr>, no return. Size of <x> is not checked.
+ */
+#define _pl_sub_noret_lax(ptr, x) _pl_sub_noret(ptr, x)
+#define _pl_sub_noret_acq(ptr, x) _pl_sub_noret(ptr, x)
+#define _pl_sub_noret_rel(ptr, x) _pl_sub_noret(ptr, x)
+#define _pl_sub_noret(ptr, x) do { \
+ if (sizeof(long) == 8 && sizeof(*(ptr)) == 8) { \
+ asm volatile("lock subq %1, %0\n" \
+ : "+m" (*(ptr)) \
+ : "er" ((unsigned long)(x)) \
+ : "cc"); \
+ } else if (sizeof(*(ptr)) == 4) { \
+ asm volatile("lock subl %1, %0\n" \
+ : "+m" (*(ptr)) \
+ : "er" ((unsigned int)(x)) \
+ : "cc"); \
+ } else if (sizeof(*(ptr)) == 2) { \
+ asm volatile("lock subw %1, %0\n" \
+ : "+m" (*(ptr)) \
+ : "er" ((unsigned short)(x)) \
+ : "cc"); \
+ } else if (sizeof(*(ptr)) == 1) { \
+ asm volatile("lock subb %1, %0\n" \
+ : "+m" (*(ptr)) \
+ : "er" ((unsigned char)(x)) \
+ : "cc"); \
+ } else { \
+ void __unsupported_argument_size_for_pl_sub__(char *,int); \
+ if (sizeof(*(ptr)) != 1 && sizeof(*(ptr)) != 2 && \
+ sizeof(*(ptr)) != 4 && (sizeof(long) != 8 || sizeof(*(ptr)) != 8)) \
+ __unsupported_argument_size_for_pl_sub__(__FILE__,__LINE__); \
+ } \
+} while (0)
+
+/* binary and integer value pointed to by pointer <ptr> with constant <x>, no
+ * return. Size of <x> is not checked.
+ */
+#define _pl_and_noret_lax(ptr, x) _pl_and_noret(ptr, x)
+#define _pl_and_noret_acq(ptr, x) _pl_and_noret(ptr, x)
+#define _pl_and_noret_rel(ptr, x) _pl_and_noret(ptr, x)
+#define _pl_and_noret(ptr, x) do { \
+ if (sizeof(long) == 8 && sizeof(*(ptr)) == 8) { \
+ asm volatile("lock andq %1, %0\n" \
+ : "+m" (*(ptr)) \
+ : "er" ((unsigned long)(x)) \
+ : "cc"); \
+ } else if (sizeof(*(ptr)) == 4) { \
+ asm volatile("lock andl %1, %0\n" \
+ : "+m" (*(ptr)) \
+ : "er" ((unsigned int)(x)) \
+ : "cc"); \
+ } else if (sizeof(*(ptr)) == 2) { \
+ asm volatile("lock andw %1, %0\n" \
+ : "+m" (*(ptr)) \
+ : "er" ((unsigned short)(x)) \
+ : "cc"); \
+ } else if (sizeof(*(ptr)) == 1) { \
+ asm volatile("lock andb %1, %0\n" \
+ : "+m" (*(ptr)) \
+ : "er" ((unsigned char)(x)) \
+ : "cc"); \
+ } else { \
+ void __unsupported_argument_size_for_pl_and__(char *,int); \
+ if (sizeof(*(ptr)) != 1 && sizeof(*(ptr)) != 2 && \
+ sizeof(*(ptr)) != 4 && (sizeof(long) != 8 || sizeof(*(ptr)) != 8)) \
+ __unsupported_argument_size_for_pl_and__(__FILE__,__LINE__); \
+ } \
+} while (0)
+
+/* binary or integer value pointed to by pointer <ptr> with constant <x>, no
+ * return. Size of <x> is not checked.
+ */
+#define _pl_or_noret_lax(ptr, x) _pl_or_noret(ptr, x)
+#define _pl_or_noret_acq(ptr, x) _pl_or_noret(ptr, x)
+#define _pl_or_noret_rel(ptr, x) _pl_or_noret(ptr, x)
+#define _pl_or_noret(ptr, x) do { \
+ if (sizeof(long) == 8 && sizeof(*(ptr)) == 8) { \
+ asm volatile("lock orq %1, %0\n" \
+ : "+m" (*(ptr)) \
+ : "er" ((unsigned long)(x)) \
+ : "cc"); \
+ } else if (sizeof(*(ptr)) == 4) { \
+ asm volatile("lock orl %1, %0\n" \
+ : "+m" (*(ptr)) \
+ : "er" ((unsigned int)(x)) \
+ : "cc"); \
+ } else if (sizeof(*(ptr)) == 2) { \
+ asm volatile("lock orw %1, %0\n" \
+ : "+m" (*(ptr)) \
+ : "er" ((unsigned short)(x)) \
+ : "cc"); \
+ } else if (sizeof(*(ptr)) == 1) { \
+ asm volatile("lock orb %1, %0\n" \
+ : "+m" (*(ptr)) \
+ : "er" ((unsigned char)(x)) \
+ : "cc"); \
+ } else { \
+ void __unsupported_argument_size_for_pl_or__(char *,int); \
+ if (sizeof(*(ptr)) != 1 && sizeof(*(ptr)) != 2 && \
+ sizeof(*(ptr)) != 4 && (sizeof(long) != 8 || sizeof(*(ptr)) != 8)) \
+ __unsupported_argument_size_for_pl_or__(__FILE__,__LINE__); \
+ } \
+} while (0)
+
+/* binary xor integer value pointed to by pointer <ptr> with constant <x>, no
+ * return. Size of <x> is not checked.
+ */
+#define _pl_xor_noret_lax(ptr, x) _pl_xor_noret(ptr, x)
+#define _pl_xor_noret_acq(ptr, x) _pl_xor_noret(ptr, x)
+#define _pl_xor_noret_rel(ptr, x) _pl_xor_noret(ptr, x)
+#define _pl_xor_noret(ptr, x) do { \
+ if (sizeof(long) == 8 && sizeof(*(ptr)) == 8) { \
+ asm volatile("lock xorq %1, %0\n" \
+ : "+m" (*(ptr)) \
+ : "er" ((unsigned long)(x)) \
+ : "cc"); \
+ } else if (sizeof(*(ptr)) == 4) { \
+ asm volatile("lock xorl %1, %0\n" \
+ : "+m" (*(ptr)) \
+ : "er" ((unsigned int)(x)) \
+ : "cc"); \
+ } else if (sizeof(*(ptr)) == 2) { \
+ asm volatile("lock xorw %1, %0\n" \
+ : "+m" (*(ptr)) \
+ : "er" ((unsigned short)(x)) \
+ : "cc"); \
+ } else if (sizeof(*(ptr)) == 1) { \
+ asm volatile("lock xorb %1, %0\n" \
+ : "+m" (*(ptr)) \
+ : "er" ((unsigned char)(x)) \
+ : "cc"); \
+ } else { \
+ void __unsupported_argument_size_for_pl_xor__(char *,int); \
+ if (sizeof(*(ptr)) != 1 && sizeof(*(ptr)) != 2 && \
+ sizeof(*(ptr)) != 4 && (sizeof(long) != 8 || sizeof(*(ptr)) != 8)) \
+ __unsupported_argument_size_for_pl_xor__(__FILE__,__LINE__); \
+ } \
+} while (0)
+
+/* test and reset bit <bit> in integer value pointed to by pointer <ptr>. Returns
+ * 0 if the bit was not set, or ~0 of the same type as *ptr if it was set. Note
+ * that there is no 8-bit equivalent operation.
+ */
+#define pl_btr_lax(ptr, bit) pl_btr(ptr, bit)
+#define pl_btr_acq(ptr, bit) pl_btr(ptr, bit)
+#define pl_btr_rel(ptr, bit) pl_btr(ptr, bit)
+#define pl_btr(ptr, bit) ( \
+ (sizeof(long) == 8 && sizeof(*(ptr)) == 8) ? ({ \
+ unsigned long ret; \
+ asm volatile("lock btrq %2, %0\n\t" \
+ X86_COND_C_TO_REG(1) \
+ : "+m" (*(ptr)), X86_COND_C_RESULT(ret) \
+ : "Ir" ((unsigned long)(bit)) \
+ : "cc"); \
+ ret; /* return value */ \
+ }) : (sizeof(*(ptr)) == 4) ? ({ \
+ unsigned int ret; \
+ asm volatile("lock btrl %2, %0\n\t" \
+ X86_COND_C_TO_REG(1) \
+ : "+m" (*(ptr)), X86_COND_C_RESULT(ret) \
+ : "Ir" ((unsigned int)(bit)) \
+ : "cc"); \
+ ret; /* return value */ \
+ }) : (sizeof(*(ptr)) == 2) ? ({ \
+ unsigned short ret; \
+ asm volatile("lock btrw %2, %0\n\t" \
+ X86_COND_C_TO_REG(1) \
+ : "+m" (*(ptr)), X86_COND_C_RESULT(ret) \
+ : "Ir" ((unsigned short)(bit)) \
+ : "cc"); \
+ ret; /* return value */ \
+ }) : ({ \
+ void __unsupported_argument_size_for_pl_btr__(char *,int); \
+ if (sizeof(*(ptr)) != 1 && sizeof(*(ptr)) != 2 && \
+ sizeof(*(ptr)) != 4 && (sizeof(long) != 8 || sizeof(*(ptr)) != 8)) \
+ __unsupported_argument_size_for_pl_btr__(__FILE__,__LINE__); \
+ 0; \
+ }) \
+)
+
+/* test and set bit <bit> in integer value pointed to by pointer <ptr>. Returns
+ * 0 if the bit was not set, or ~0 of the same type as *ptr if it was set. Note
+ * that there is no 8-bit equivalent operation.
+ */
+#define pl_bts_lax(ptr, bit) pl_bts(ptr, bit)
+#define pl_bts_acq(ptr, bit) pl_bts(ptr, bit)
+#define pl_bts_rel(ptr, bit) pl_bts(ptr, bit)
+#define pl_bts(ptr, bit) ( \
+ (sizeof(long) == 8 && sizeof(*(ptr)) == 8) ? ({ \
+ unsigned long ret; \
+ asm volatile("lock btsq %2, %0\n\t" \
+ X86_COND_C_TO_REG(1) \
+ : "+m" (*(ptr)), X86_COND_C_RESULT(ret) \
+ : "Ir" ((unsigned long)(bit)) \
+ : "cc"); \
+ ret; /* return value */ \
+ }) : (sizeof(*(ptr)) == 4) ? ({ \
+ unsigned int ret; \
+ asm volatile("lock btsl %2, %0\n\t" \
+ X86_COND_C_TO_REG(1) \
+ : "+m" (*(ptr)), X86_COND_C_RESULT(ret) \
+ : "Ir" ((unsigned int)(bit)) \
+ : "cc"); \
+ ret; /* return value */ \
+ }) : (sizeof(*(ptr)) == 2) ? ({ \
+ unsigned short ret; \
+ asm volatile("lock btsw %2, %0\n\t" \
+ X86_COND_C_TO_REG(1) \
+ : "+m" (*(ptr)), X86_COND_C_RESULT(ret) \
+ : "Ir" ((unsigned short)(bit)) \
+ : "cc"); \
+ ret; /* return value */ \
+ }) : ({ \
+ void __unsupported_argument_size_for_pl_bts__(char *,int); \
+ if (sizeof(*(ptr)) != 1 && sizeof(*(ptr)) != 2 && \
+ sizeof(*(ptr)) != 4 && (sizeof(long) != 8 || sizeof(*(ptr)) != 8)) \
+ __unsupported_argument_size_for_pl_bts__(__FILE__,__LINE__); \
+ 0; \
+ }) \
+)
+
+/* Note: for an unclear reason, gcc's __sync_fetch_and_add() implementation
+ * produces less optimal than hand-crafted asm code so let's implement here the
+ * operations we need for the most common archs.
+ */
+
+/* fetch-and-add: fetch integer value pointed to by pointer <ptr>, add <x> to
+ * to <*ptr> and return the previous value.
+ * => THIS IS LEGACY, USE _pl_ldadd() INSTEAD.
+ */
+#define _pl_xadd(ptr, x) ( \
+ (sizeof(long) == 8 && sizeof(*(ptr)) == 8) ? ({ \
+ unsigned long ret = (unsigned long)(x); \
+ asm volatile("lock xaddq %0, %1\n" \
+ : "=r" (ret), "+m" (*(ptr)) \
+ : "0" (ret) \
+ : "cc"); \
+ ret; /* return value */ \
+ }) : (sizeof(*(ptr)) == 4) ? ({ \
+ unsigned int ret = (unsigned int)(x); \
+ asm volatile("lock xaddl %0, %1\n" \
+ : "=r" (ret), "+m" (*(ptr)) \
+ : "0" (ret) \
+ : "cc"); \
+ ret; /* return value */ \
+ }) : (sizeof(*(ptr)) == 2) ? ({ \
+ unsigned short ret = (unsigned short)(x); \
+ asm volatile("lock xaddw %0, %1\n" \
+ : "=r" (ret), "+m" (*(ptr)) \
+ : "0" (ret) \
+ : "cc"); \
+ ret; /* return value */ \
+ }) : (sizeof(*(ptr)) == 1) ? ({ \
+ unsigned char ret = (unsigned char)(x); \
+ asm volatile("lock xaddb %0, %1\n" \
+ : "=r" (ret), "+m" (*(ptr)) \
+ : "0" (ret) \
+ : "cc"); \
+ ret; /* return value */ \
+ }) : ({ \
+ void __unsupported_argument_size_for_pl_xadd__(char *,int); \
+ if (sizeof(*(ptr)) != 1 && sizeof(*(ptr)) != 2 && \
+ sizeof(*(ptr)) != 4 && (sizeof(long) != 8 || sizeof(*(ptr)) != 8)) \
+ __unsupported_argument_size_for_pl_xadd__(__FILE__,__LINE__); \
+ 0; \
+ }) \
+)
+
+/* fetch-and-add: fetch integer value pointed to by pointer <ptr>, add <x> to
+ * to <*ptr> and return the previous value.
+ */
+#define _pl_ldadd_lax(ptr, x) _pl_ldadd(ptr, x)
+#define _pl_ldadd_acq(ptr, x) _pl_ldadd(ptr, x)
+#define _pl_ldadd_rel(ptr, x) _pl_ldadd(ptr, x)
+#define _pl_ldadd(ptr, x) ( \
+ (sizeof(long) == 8 && sizeof(*(ptr)) == 8) ? ({ \
+ unsigned long ret = (unsigned long)(x); \
+ asm volatile("lock xaddq %0, %1\n" \
+ : "=r" (ret), "+m" (*(ptr)) \
+ : "0" (ret) \
+ : "cc"); \
+ ret; /* return value */ \
+ }) : (sizeof(*(ptr)) == 4) ? ({ \
+ unsigned int ret = (unsigned int)(x); \
+ asm volatile("lock xaddl %0, %1\n" \
+ : "=r" (ret), "+m" (*(ptr)) \
+ : "0" (ret) \
+ : "cc"); \
+ ret; /* return value */ \
+ }) : (sizeof(*(ptr)) == 2) ? ({ \
+ unsigned short ret = (unsigned short)(x); \
+ asm volatile("lock xaddw %0, %1\n" \
+ : "=r" (ret), "+m" (*(ptr)) \
+ : "0" (ret) \
+ : "cc"); \
+ ret; /* return value */ \
+ }) : (sizeof(*(ptr)) == 1) ? ({ \
+ unsigned char ret = (unsigned char)(x); \
+ asm volatile("lock xaddb %0, %1\n" \
+ : "=r" (ret), "+m" (*(ptr)) \
+ : "0" (ret) \
+ : "cc"); \
+ ret; /* return value */ \
+ }) : ({ \
+ void __unsupported_argument_size_for_pl_ldadd__(char *,int); \
+ if (sizeof(*(ptr)) != 1 && sizeof(*(ptr)) != 2 && \
+ sizeof(*(ptr)) != 4 && (sizeof(long) != 8 || sizeof(*(ptr)) != 8)) \
+ __unsupported_argument_size_for_pl_ldadd__(__FILE__,__LINE__); \
+ 0; \
+ }) \
+)
+
+/* fetch-and-sub: fetch integer value pointed to by pointer <ptr>, add -<x> to
+ * to <*ptr> and return the previous value.
+ */
+#define _pl_ldsub_lax(ptr, x) _pl_ldsub(ptr, x)
+#define _pl_ldsub_acq(ptr, x) _pl_ldsub(ptr, x)
+#define _pl_ldsub_rel(ptr, x) _pl_ldsub(ptr, x)
+#define _pl_ldsub(ptr, x) ( \
+ (sizeof(long) == 8 && sizeof(*(ptr)) == 8) ? ({ \
+ unsigned long ret = (unsigned long)(-x); \
+ asm volatile("lock xaddq %0, %1\n" \
+ : "=r" (ret), "+m" (*(ptr)) \
+ : "0" (ret) \
+ : "cc"); \
+ ret; /* return value */ \
+ }) : (sizeof(*(ptr)) == 4) ? ({ \
+ unsigned int ret = (unsigned int)(-x); \
+ asm volatile("lock xaddl %0, %1\n" \
+ : "=r" (ret), "+m" (*(ptr)) \
+ : "0" (ret) \
+ : "cc"); \
+ ret; /* return value */ \
+ }) : (sizeof(*(ptr)) == 2) ? ({ \
+ unsigned short ret = (unsigned short)(-x); \
+ asm volatile("lock xaddw %0, %1\n" \
+ : "=r" (ret), "+m" (*(ptr)) \
+ : "0" (ret) \
+ : "cc"); \
+ ret; /* return value */ \
+ }) : (sizeof(*(ptr)) == 1) ? ({ \
+ unsigned char ret = (unsigned char)(-x); \
+ asm volatile("lock xaddb %0, %1\n" \
+ : "=r" (ret), "+m" (*(ptr)) \
+ : "0" (ret) \
+ : "cc"); \
+ ret; /* return value */ \
+ }) : ({ \
+ void __unsupported_argument_size_for_pl_ldsub__(char *,int); \
+ if (sizeof(*(ptr)) != 1 && sizeof(*(ptr)) != 2 && \
+ sizeof(*(ptr)) != 4 && (sizeof(long) != 8 || sizeof(*(ptr)) != 8)) \
+ __unsupported_argument_size_for_pl_ldsub__(__FILE__,__LINE__); \
+ 0; \
+ }) \
+)
+
+/* exchange value <x> with integer value pointed to by pointer <ptr>, and return
+ * previous <*ptr> value. <x> must be of the same size as <*ptr>.
+ */
+#define _pl_xchg(ptr, x) ( \
+ (sizeof(long) == 8 && sizeof(*(ptr)) == 8) ? ({ \
+ unsigned long ret = (unsigned long)(x); \
+ asm volatile("xchgq %0, %1\n" \
+ : "=r" (ret), "+m" (*(ptr)) \
+ : "0" (ret) \
+ : "cc"); \
+ ret; /* return value */ \
+ }) : (sizeof(*(ptr)) == 4) ? ({ \
+ unsigned int ret = (unsigned int)(x); \
+ asm volatile("xchgl %0, %1\n" \
+ : "=r" (ret), "+m" (*(ptr)) \
+ : "0" (ret) \
+ : "cc"); \
+ ret; /* return value */ \
+ }) : (sizeof(*(ptr)) == 2) ? ({ \
+ unsigned short ret = (unsigned short)(x); \
+ asm volatile("xchgw %0, %1\n" \
+ : "=r" (ret), "+m" (*(ptr)) \
+ : "0" (ret) \
+ : "cc"); \
+ ret; /* return value */ \
+ }) : (sizeof(*(ptr)) == 1) ? ({ \
+ unsigned char ret = (unsigned char)(x); \
+ asm volatile("xchgb %0, %1\n" \
+ : "=r" (ret), "+m" (*(ptr)) \
+ : "0" (ret) \
+ : "cc"); \
+ ret; /* return value */ \
+ }) : ({ \
+ void __unsupported_argument_size_for_pl_xchg__(char *,int); \
+ if (sizeof(*(ptr)) != 1 && sizeof(*(ptr)) != 2 && \
+ sizeof(*(ptr)) != 4 && (sizeof(long) != 8 || sizeof(*(ptr)) != 8)) \
+ __unsupported_argument_size_for_pl_xchg__(__FILE__,__LINE__); \
+ 0; \
+ }) \
+)
+
+/* compare integer value <*ptr> with <old> and exchange it with <new> if
+ * it matches, and return <old>. <old> and <new> must be of the same size as
+ * <*ptr>.
+ */
+#define _pl_cmpxchg(ptr, old, new) ( \
+ (sizeof(long) == 8 && sizeof(*(ptr)) == 8) ? ({ \
+ unsigned long ret; \
+ asm volatile("lock cmpxchgq %2,%1" \
+ : "=a" (ret), "+m" (*(ptr)) \
+ : "r" ((unsigned long)(new)), \
+ "0" ((unsigned long)(old)) \
+ : "cc"); \
+ ret; /* return value */ \
+ }) : (sizeof(*(ptr)) == 4) ? ({ \
+ unsigned int ret; \
+ asm volatile("lock cmpxchgl %2,%1" \
+ : "=a" (ret), "+m" (*(ptr)) \
+ : "r" ((unsigned int)(new)), \
+ "0" ((unsigned int)(old)) \
+ : "cc"); \
+ ret; /* return value */ \
+ }) : (sizeof(*(ptr)) == 2) ? ({ \
+ unsigned short ret; \
+ asm volatile("lock cmpxchgw %2,%1" \
+ : "=a" (ret), "+m" (*(ptr)) \
+ : "r" ((unsigned short)(new)), \
+ "0" ((unsigned short)(old)) \
+ : "cc"); \
+ ret; /* return value */ \
+ }) : (sizeof(*(ptr)) == 1) ? ({ \
+ unsigned char ret; \
+ asm volatile("lock cmpxchgb %2,%1" \
+ : "=a" (ret), "+m" (*(ptr)) \
+ : "r" ((unsigned char)(new)), \
+ "0" ((unsigned char)(old)) \
+ : "cc"); \
+ ret; /* return value */ \
+ }) : ({ \
+ void __unsupported_argument_size_for_pl_cmpxchg__(char *,int); \
+ if (sizeof(*(ptr)) != 1 && sizeof(*(ptr)) != 2 && \
+ sizeof(*(ptr)) != 4 && (sizeof(long) != 8 || sizeof(*(ptr)) != 8)) \
+ __unsupported_argument_size_for_pl_cmpxchg__(__FILE__,__LINE__); \
+ 0; \
+ }) \
+)
+
+/*
+ * ##### ARM64 (aarch64) below #####
+ */
+#elif defined(__aarch64__)
+
+/* This was shown to improve fairness on modern ARMv8 such as Neoverse N1 */
+#define pl_cpu_relax() do { \
+ asm volatile("isb" ::: "memory"); \
+ } while (0)
+
+/* full/load/store barriers */
+#define _pl_mb() do { asm volatile("dmb ish" ::: "memory"); } while (0)
+#define _pl_mb_load() do { asm volatile("dmb ishld" ::: "memory"); } while (0)
+#define _pl_mb_store() do { asm volatile("dmb ishst" ::: "memory"); } while (0)
+
+/* atomic full/load/store */
+#define _pl_mb_ato() do { asm volatile("dmb ish" ::: "memory"); } while (0)
+#define _pl_mb_ato_load() do { asm volatile("dmb ishld" ::: "memory"); } while (0)
+#define _pl_mb_ato_store() do { asm volatile("dmb ishst" ::: "memory"); } while (0)
+
+#endif // end of arch-specific code
+
+
+/*
+ * Generic code using the C11 __atomic API for functions not defined above.
+ * These are usable from gcc-4.7 and clang. We'll simply rely on the macros
+ * defining the memory orders to detect them. All operations are not
+ * necessarily defined, so some fallbacks to the default methods might still
+ * be necessary.
+ */
+
+
+#if defined(__ATOMIC_RELAXED) && defined(__ATOMIC_CONSUME) && defined(__ATOMIC_ACQUIRE) && \
+ defined(__ATOMIC_RELEASE) && defined(__ATOMIC_ACQ_REL) && defined(__ATOMIC_SEQ_CST)
+
+/* compiler-only memory barrier, for use around locks */
+#ifndef pl_barrier
+#define pl_barrier() __atomic_signal_fence(__ATOMIC_SEQ_CST)
+#endif
+
+/* full memory barrier */
+#ifndef pl_mb
+#define pl_mb() __atomic_thread_fence(__ATOMIC_SEQ_CST)
+#endif
+
+/* atomic load */
+#ifndef pl_load_lax
+#define pl_load_lax(ptr) __atomic_load_n(ptr, __ATOMIC_RELAXED)
+#endif
+
+#ifndef pl_load
+#define pl_load(ptr) __atomic_load_n(ptr, __ATOMIC_ACQUIRE)
+#endif
+
+/* atomic store */
+#ifndef pl_store_lax
+#define pl_store_lax(ptr, x) __atomic_store_n((ptr), (x), __ATOMIC_RELAXED)
+#endif
+
+#ifndef pl_store
+#define pl_store(ptr, x) __atomic_store_n((ptr), (x), __ATOMIC_RELEASE)
+#endif
+
+/* increment integer value pointed to by pointer <ptr>, and return non-zero if
+ * result is non-null.
+ */
+#ifndef pl_inc_lax
+#define pl_inc_lax(ptr) (__atomic_add_fetch((ptr), 1, __ATOMIC_RELAXED) != 0)
+#endif
+
+#ifndef pl_inc_acq
+#define pl_inc_acq(ptr) (__atomic_add_fetch((ptr), 1, __ATOMIC_ACQUIRE) != 0)
+#endif
+
+#ifndef pl_inc_rel
+#define pl_inc_rel(ptr) (__atomic_add_fetch((ptr), 1, __ATOMIC_RELEASE) != 0)
+#endif
+
+#ifndef pl_inc
+#define pl_inc(ptr) (__atomic_add_fetch((ptr), 1, __ATOMIC_SEQ_CST) != 0)
+#endif
+
+/* decrement integer value pointed to by pointer <ptr>, and return non-zero if
+ * result is non-null.
+ */
+#ifndef pl_dec_lax
+#define pl_dec_lax(ptr) (__atomic_sub_fetch((ptr), 1, __ATOMIC_RELAXED) != 0)
+#endif
+
+#ifndef pl_dec_acq
+#define pl_dec_acq(ptr) (__atomic_sub_fetch((ptr), 1, __ATOMIC_ACQUIRE) != 0)
+#endif
+
+#ifndef pl_dec_rel
+#define pl_dec_rel(ptr) (__atomic_sub_fetch((ptr), 1, __ATOMIC_RELEASE) != 0)
+#endif
+
+#ifndef pl_dec
+#define pl_dec(ptr) (__atomic_sub_fetch((ptr), 1, __ATOMIC_SEQ_CST) != 0)
+#endif
+
+/* increment integer value pointed to by pointer <ptr>, no return */
+#ifndef pl_inc_noret_lax
+#define pl_inc_noret_lax(ptr) ((void)__atomic_add_fetch((ptr), 1, __ATOMIC_RELAXED))
+#endif
+
+#ifndef pl_inc_noret_acq
+#define pl_inc_noret_acq(ptr) ((void)__atomic_add_fetch((ptr), 1, __ATOMIC_ACQUIRE))
+#endif
+
+#ifndef pl_inc_noret_rel
+#define pl_inc_noret_relc(ptr) ((void)__atomic_add_fetch((ptr), 1, __ATOMIC_RELEASE))
+#endif
+
+#ifndef pl_inc_noret
+#define pl_inc_noret(ptr) ((void)__atomic_add_fetch((ptr), 1, __ATOMIC_SEQ_CST))
+#endif
+
+/* decrement integer value pointed to by pointer <ptr>, no return */
+#ifndef pl_dec_noret_lax
+#define pl_dec_noret_lax(ptr) ((void)__atomic_sub_fetch((ptr), 1, __ATOMIC_RELAXED))
+#endif
+
+#ifndef pl_dec_noret_acq
+#define pl_dec_noret_acq(ptr) ((void)__atomic_sub_fetch((ptr), 1, __ATOMIC_ACQUIRE))
+#endif
+
+#ifndef pl_dec_noret_rel
+#define pl_dec_noret_relc(ptr) ((void)__atomic_sub_fetch((ptr), 1, __ATOMIC_RELEASE))
+#endif
+
+#ifndef pl_dec_noret
+#define pl_dec_noret(ptr) ((void)__atomic_sub_fetch((ptr), 1, __ATOMIC_SEQ_CST))
+#endif
+
+/* add integer constant <x> to integer value pointed to by pointer <ptr>,
+ * no return. Size of <x> is not checked.
+ */
+#ifndef pl_add_lax
+#define pl_add_lax(ptr, x) (__atomic_add_fetch((ptr), (x), __ATOMIC_RELAXED))
+#endif
+
+#ifndef pl_add_acq
+#define pl_add_acq(ptr, x) (__atomic_add_fetch((ptr), (x), __ATOMIC_ACQUIRE))
+#endif
+
+#ifndef pl_add_rel
+#define pl_add_relc(ptr, x) (__atomic_add_fetch((ptr), (x), __ATOMIC_RELEASE))
+#endif
+
+#ifndef pl_add
+#define pl_add(ptr, x) (__atomic_add_fetch((ptr), (x), __ATOMIC_SEQ_CST))
+#endif
+
+/* subtract integer constant <x> from integer value pointed to by pointer
+ * <ptr>, no return. Size of <x> is not checked.
+ */
+#ifndef pl_sub_lax
+#define pl_sub_lax(ptr, x) (__atomic_sub_fetch((ptr), (x), __ATOMIC_RELAXED))
+#endif
+
+#ifndef pl_sub_acq
+#define pl_sub_acq(ptr, x) (__atomic_sub_fetch((ptr), (x), __ATOMIC_ACQUIRE))
+#endif
+
+#ifndef pl_sub_rel
+#define pl_sub_relc(ptr, x) (__atomic_sub_fetch((ptr), (x), __ATOMIC_RELEASE))
+#endif
+
+#ifndef pl_sub
+#define pl_sub(ptr, x) (__atomic_sub_fetch((ptr), (x), __ATOMIC_SEQ_CST))
+#endif
+
+/* binary and integer value pointed to by pointer <ptr> with constant <x>, no
+ * return. Size of <x> is not checked.
+ */
+#ifndef pl_and_lax
+#define pl_and_lax(ptr, x) (__atomic_and_fetch((ptr), (x), __ATOMIC_RELAXED))
+#endif
+
+#ifndef pl_and_acq
+#define pl_and_acq(ptr, x) (__atomic_and_fetch((ptr), (x), __ATOMIC_ACQUIRE))
+#endif
+
+#ifndef pl_and_rel
+#define pl_and_relc(ptr, x) (__atomic_and_fetch((ptr), (x), __ATOMIC_RELEASE))
+#endif
+
+#ifndef pl_and
+#define pl_and(ptr, x) (__atomic_and_fetch((ptr), (x), __ATOMIC_SEQ_CST))
+#endif
+
+/* binary or integer value pointed to by pointer <ptr> with constant <x>, no
+ * return. Size of <x> is not checked.
+ */
+#ifndef pl_or_lax
+#define pl_or_lax(ptr, x) (__atomic_or_fetch((ptr), (x), __ATOMIC_RELAXED))
+#endif
+
+#ifndef pl_or_acq
+#define pl_or_acq(ptr, x) (__atomic_or_fetch((ptr), (x), __ATOMIC_ACQUIRE))
+#endif
+
+#ifndef pl_or_rel
+#define pl_or_relc(ptr, x) (__atomic_or_fetch((ptr), (x), __ATOMIC_RELEASE))
+#endif
+
+#ifndef pl_or
+#define pl_or(ptr, x) (__atomic_or_fetch((ptr), (x), __ATOMIC_SEQ_CST))
+#endif
+
+/* binary xor integer value pointed to by pointer <ptr> with constant <x>, no
+ * return. Size of <x> is not checked.
+ */
+#ifndef pl_xor_lax
+#define pl_xor_lax(ptr, x) (__atomic_xor_fetch((ptr), (x), __ATOMIC_RELAXED))
+#endif
+
+#ifndef pl_xor_acq
+#define pl_xor_acq(ptr, x) (__atomic_xor_fetch((ptr), (x), __ATOMIC_ACQUIRE))
+#endif
+
+#ifndef pl_xor_rel
+#define pl_xor_relc(ptr, x) (__atomic_xor_fetch((ptr), (x), __ATOMIC_RELEASE))
+#endif
+
+#ifndef pl_xor
+#define pl_xor(ptr, x) (__atomic_xor_fetch((ptr), (x), __ATOMIC_SEQ_CST))
+#endif
+
+/* fetch-and-add: fetch integer value pointed to by pointer <ptr>, add <x> to
+ * to <*ptr> and return the previous value.
+ * => THIS IS LEGACY, USE pl_ldadd() INSTEAD.
+ */
+#ifndef pl_xadd
+#define pl_xadd(ptr, x) (__atomic_fetch_add((ptr), (x), __ATOMIC_SEQ_CST))
+#endif
+
+/* exchange value <x> with integer value pointed to by pointer <ptr>, and return
+ * previous <*ptr> value. <x> must be of the same size as <*ptr>.
+ */
+#ifndef pl_xchg
+#define pl_xchg(ptr, x) (__atomic_exchange_n((ptr), (x), __ATOMIC_SEQ_CST))
+#endif
+
+/* compare integer value <*ptr> with <old> and exchange it with <new> if
+ * it matches, and return <old>. <old> and <new> must be of the same size as
+ * <*ptr>.
+ */
+#ifndef pl_cmpxchg
+#define pl_cmpxchg(ptr, old, new) ({ \
+ typeof(*ptr) __old = (old); \
+ __atomic_compare_exchange_n((ptr), &__old, (new), 0, __ATOMIC_SEQ_CST, __ATOMIC_RELAXED); \
+ __old; })
+#endif
+
+/* fetch-and-add: fetch integer value pointed to by pointer <ptr>, add <x> to
+ * to <*ptr> and return the previous value.
+ */
+#ifndef pl_ldadd_lax
+#define pl_ldadd_lax(ptr, x) (__atomic_fetch_add((ptr), (x), __ATOMIC_RELAXED))
+#endif
+
+#ifndef pl_ldadd_acq
+#define pl_ldadd_acq(ptr, x) (__atomic_fetch_add((ptr), (x), __ATOMIC_ACQUIRE))
+#endif
+
+#ifndef pl_ldadd_rel
+#define pl_ldadd_relc(ptr, x) (__atomic_fetch_add((ptr), (x), __ATOMIC_RELEASE))
+#endif
+
+#ifndef pl_ldadd
+#define pl_ldadd(ptr, x) (__atomic_fetch_add((ptr), (x), __ATOMIC_SEQ_CST))
+#endif
+
+
+#ifndef pl_ldand_lax
+#define pl_ldand_lax(ptr, x) (__atomic_fetch_and((ptr), (x), __ATOMIC_RELAXED))
+#endif
+
+#ifndef pl_ldand_acq
+#define pl_ldand_acq(ptr, x) (__atomic_fetch_and((ptr), (x), __ATOMIC_ACQUIRE))
+#endif
+
+#ifndef pl_ldand_rel
+#define pl_ldand_relc(ptr, x) (__atomic_fetch_and((ptr), (x), __ATOMIC_RELEASE))
+#endif
+
+#ifndef pl_ldand
+#define pl_ldand(ptr, x) (__atomic_fetch_and((ptr), (x), __ATOMIC_SEQ_CST))
+#endif
+
+
+#ifndef pl_ldor_lax
+#define pl_ldor_lax(ptr, x) (__atomic_fetch_or((ptr), (x), __ATOMIC_RELAXED))
+#endif
+
+#ifndef pl_ldor_acq
+#define pl_ldor_acq(ptr, x) (__atomic_fetch_or((ptr), (x), __ATOMIC_ACQUIRE))
+#endif
+
+#ifndef pl_ldor_rel
+#define pl_ldor_relc(ptr, x) (__atomic_fetch_or((ptr), (x), __ATOMIC_RELEASE))
+#endif
+
+#ifndef pl_ldor
+#define pl_ldor(ptr, x) (__atomic_fetch_or((ptr), (x), __ATOMIC_SEQ_CST))
+#endif
+
+
+#ifndef pl_ldsub_lax
+#define pl_ldsub_lax(ptr, x) (__atomic_fetch_sub((ptr), (x), __ATOMIC_RELAXED))
+#endif
+
+#ifndef pl_ldsub_acq
+#define pl_ldsub_acq(ptr, x) (__atomic_fetch_sub((ptr), (x), __ATOMIC_ACQUIRE))
+#endif
+
+#ifndef pl_ldsub_rel
+#define pl_ldsub_relc(ptr, x) (__atomic_fetch_sub((ptr), (x), __ATOMIC_RELEASE))
+#endif
+
+#ifndef pl_ldsub
+#define pl_ldsub(ptr, x) (__atomic_fetch_sub((ptr), (x), __ATOMIC_SEQ_CST))
+#endif
+
+
+#ifndef pl_ldxor_lax
+#define pl_ldxor_lax(ptr, x) (__atomic_fetch_xor((ptr), (x), __ATOMIC_RELAXED))
+#endif
+
+#ifndef pl_ldxor_acq
+#define pl_ldxor_acq(ptr, x) (__atomic_fetch_xor((ptr), (x), __ATOMIC_ACQUIRE))
+#endif
+
+#ifndef pl_ldxor_rel
+#define pl_ldxor_relc(ptr, x) (__atomic_fetch_xor((ptr), (x), __ATOMIC_RELEASE))
+#endif
+
+#ifndef pl_ldxor
+#define pl_ldxor(ptr, x) (__atomic_fetch_xor((ptr), (x), __ATOMIC_SEQ_CST))
+#endif
+
+#endif /* end of C11 atomics */
+
+
+/*
+ * Automatically remap to fallback code when available. This allows the arch
+ * specific code above to be used as an immediate fallback for missing C11
+ * definitions. Everything not defined will use the generic code at the end.
+ */
+
+#if !defined(pl_cpu_relax) && defined(_pl_cpu_relax)
+# define pl_cpu_relax _pl_cpu_relax
+#endif
+
+#if !defined(pl_barrier) && defined(_pl_barrier)
+# define pl_barrier _pl_barrier
+#endif
+
+#if !defined(pl_mb) && defined(_pl_mb)
+# define pl_mb _pl_mb
+#endif
+
+#if !defined(pl_mb_load) && defined(_pl_mb_load)
+# define pl_mb_load _pl_mb_load
+#endif
+
+#if !defined(pl_mb_store) && defined(_pl_mb_store)
+# define pl_mb_store _pl_mb_store
+#endif
+
+#if !defined(pl_mb_ato) && defined(_pl_mb_ato)
+# define pl_mb_ato _pl_mb_ato
+#endif
+
+#if !defined(pl_mb_ato_load) && defined(_pl_mb_ato_load)
+# define pl_mb_ato_load _pl_mb_ato_load
+#endif
+
+#if !defined(pl_mb_ato_store) && defined(_pl_mb_ato_store)
+# define pl_mb_ato_store _pl_mb_ato_store
+#endif
+
+
+#if !defined(pl_load) && defined(_pl_load)
+#define pl_load _pl_load
+#endif
+
+#if !defined(pl_load_lax) && defined(_pl_load_lax)
+#define pl_load_lax _pl_load_lax
+#endif
+
+#if !defined(pl_store) && defined(_pl_store)
+#define pl_store _pl_store
+#endif
+
+#if !defined(pl_store_lax) && defined(_pl_store_lax)
+#define pl_store_lax _pl_store_lax
+#endif
+
+
+#if !defined(pl_inc_noret_lax) && defined(_pl_inc_noret_lax)
+# define pl_inc_noret_lax _pl_inc_noret_lax
+#endif
+
+#if !defined(pl_inc_noret_acq) && defined(_pl_inc_noret_acq)
+# define pl_inc_noret_acq _pl_inc_noret_acq
+#endif
+
+#if !defined(pl_inc_noret_rel) && defined(_pl_inc_noret_rel)
+# define pl_inc_noret_rel _pl_inc_noret_rel
+#endif
+
+#if !defined(pl_inc_noret) && defined(_pl_inc_noret)
+# define pl_inc_noret _pl_inc_noret
+#endif
+
+
+#if !defined(pl_dec_noret_lax) && defined(_pl_dec_noret_lax)
+# define pl_dec_noret_lax _pl_dec_noret_lax
+#endif
+
+#if !defined(pl_dec_noret_acq) && defined(_pl_dec_noret_acq)
+# define pl_dec_noret_acq _pl_dec_noret_acq
+#endif
+
+#if !defined(pl_dec_noret_rel) && defined(_pl_dec_noret_rel)
+# define pl_dec_noret_rel _pl_dec_noret_rel
+#endif
+
+#if !defined(pl_dec_noret) && defined(_pl_dec_noret)
+# define pl_dec_noret _pl_dec_noret
+#endif
+
+
+#if !defined(pl_inc_lax) && defined(_pl_inc_lax)
+# define pl_inc_lax _pl_inc_lax
+#endif
+
+#if !defined(pl_inc_acq) && defined(_pl_inc_acq)
+# define pl_inc_acq _pl_inc_acq
+#endif
+
+#if !defined(pl_inc_rel) && defined(_pl_inc_rel)
+# define pl_inc_rel _pl_inc_rel
+#endif
+
+#if !defined(pl_inc) && defined(_pl_inc)
+# define pl_inc _pl_inc
+#endif
+
+
+#if !defined(pl_dec_lax) && defined(_pl_dec_lax)
+# define pl_dec_lax _pl_dec_lax
+#endif
+
+#if !defined(pl_dec_acq) && defined(_pl_dec_acq)
+# define pl_dec_acq _pl_dec_acq
+#endif
+
+#if !defined(pl_dec_rel) && defined(_pl_dec_rel)
+# define pl_dec_rel _pl_dec_rel
+#endif
+
+#if !defined(pl_dec) && defined(_pl_dec)
+# define pl_dec _pl_dec
+#endif
+
+
+#if !defined(pl_add_lax) && defined(_pl_add_lax)
+# define pl_add_lax _pl_add_lax
+#endif
+
+#if !defined(pl_add_acq) && defined(_pl_add_acq)
+# define pl_add_acq _pl_add_acq
+#endif
+
+#if !defined(pl_add_rel) && defined(_pl_add_rel)
+# define pl_add_rel _pl_add_rel
+#endif
+
+#if !defined(pl_add) && defined(_pl_add)
+# define pl_add _pl_add
+#endif
+
+
+#if !defined(pl_add_noret_lax) && defined(_pl_add_noret_lax)
+# define pl_add_noret_lax _pl_add_noret_lax
+#endif
+
+#if !defined(pl_add_noret_acq) && defined(_pl_add_noret_acq)
+# define pl_add_noret_acq _pl_add_noret_acq
+#endif
+
+#if !defined(pl_add_noret_rel) && defined(_pl_add_noret_rel)
+# define pl_add_noret_rel _pl_add_noret_rel
+#endif
+
+#if !defined(pl_add_noret) && defined(_pl_add_noret)
+# define pl_add_noret _pl_add_noret
+#endif
+
+#if !defined(pl_and_lax) && defined(_pl_and_lax)
+# define pl_and_lax _pl_and_lax
+#endif
+
+#if !defined(pl_and_acq) && defined(_pl_and_acq)
+# define pl_and_acq _pl_and_acq
+#endif
+
+#if !defined(pl_and_rel) && defined(_pl_and_rel)
+# define pl_and_rel _pl_and_rel
+#endif
+
+#if !defined(pl_and) && defined(_pl_and)
+# define pl_and _pl_and
+#endif
+
+
+#if !defined(pl_and_noret_lax) && defined(_pl_and_noret_lax)
+# define pl_and_noret_lax _pl_and_noret_lax
+#endif
+
+#if !defined(pl_and_noret_acq) && defined(_pl_and_noret_acq)
+# define pl_and_noret_acq _pl_and_noret_acq
+#endif
+
+#if !defined(pl_and_noret_rel) && defined(_pl_and_noret_rel)
+# define pl_and_noret_rel _pl_and_noret_rel
+#endif
+
+#if !defined(pl_and_noret) && defined(_pl_and_noret)
+# define pl_and_noret _pl_and_noret
+#endif
+
+
+#if !defined(pl_or_lax) && defined(_pl_or_lax)
+# define pl_or_lax _pl_or_lax
+#endif
+
+#if !defined(pl_or_acq) && defined(_pl_or_acq)
+# define pl_or_acq _pl_or_acq
+#endif
+
+#if !defined(pl_or_rel) && defined(_pl_or_rel)
+# define pl_or_rel _pl_or_rel
+#endif
+
+#if !defined(pl_or) && defined(_pl_or)
+# define pl_or _pl_or
+#endif
+
+
+#if !defined(pl_or_noret_lax) && defined(_pl_or_noret_lax)
+# define pl_or_noret_lax _pl_or_noret_lax
+#endif
+
+#if !defined(pl_or_noret_acq) && defined(_pl_or_noret_acq)
+# define pl_or_noret_acq _pl_or_noret_acq
+#endif
+
+#if !defined(pl_or_noret_rel) && defined(_pl_or_noret_rel)
+# define pl_or_noret_rel _pl_or_noret_rel
+#endif
+
+#if !defined(pl_or_noret) && defined(_pl_or_noret)
+# define pl_or_noret _pl_or_noret
+#endif
+
+
+#if !defined(pl_xor_lax) && defined(_pl_xor_lax)
+# define pl_xor_lax _pl_xor_lax
+#endif
+
+#if !defined(pl_xor_acq) && defined(_pl_xor_acq)
+# define pl_xor_acq _pl_xor_acq
+#endif
+
+#if !defined(pl_xor_rel) && defined(_pl_xor_rel)
+# define pl_xor_rel _pl_xor_rel
+#endif
+
+#if !defined(pl_xor) && defined(_pl_xor)
+# define pl_xor _pl_xor
+#endif
+
+
+#if !defined(pl_xor_noret_lax) && defined(_pl_xor_noret_lax)
+# define pl_xor_noret_lax _pl_xor_noret_lax
+#endif
+
+#if !defined(pl_xor_noret_acq) && defined(_pl_xor_noret_acq)
+# define pl_xor_noret_acq _pl_xor_noret_acq
+#endif
+
+#if !defined(pl_xor_noret_rel) && defined(_pl_xor_noret_rel)
+# define pl_xor_noret_rel _pl_xor_noret_rel
+#endif
+
+#if !defined(pl_xor_noret) && defined(_pl_xor_noret)
+# define pl_xor_noret _pl_xor_noret
+#endif
+
+
+#if !defined(pl_sub_lax) && defined(_pl_sub_lax)
+# define pl_sub_lax _pl_sub_lax
+#endif
+
+#if !defined(pl_sub_acq) && defined(_pl_sub_acq)
+# define pl_sub_acq _pl_sub_acq
+#endif
+
+#if !defined(pl_sub_rel) && defined(_pl_sub_rel)
+# define pl_sub_rel _pl_sub_rel
+#endif
+
+#if !defined(pl_sub) && defined(_pl_sub)
+# define pl_sub _pl_sub
+#endif
+
+
+#if !defined(pl_sub_noret_lax) && defined(_pl_sub_noret_lax)
+# define pl_sub_noret_lax _pl_sub_noret_lax
+#endif
+
+#if !defined(pl_sub_noret_acq) && defined(_pl_sub_noret_acq)
+# define pl_sub_noret_acq _pl_sub_noret_acq
+#endif
+
+#if !defined(pl_sub_noret_rel) && defined(_pl_sub_noret_rel)
+# define pl_sub_noret_rel _pl_sub_noret_rel
+#endif
+
+#if !defined(pl_sub_noret) && defined(_pl_sub_noret)
+# define pl_sub_noret _pl_sub_noret
+#endif
+
+
+#if !defined(pl_btr_lax) && defined(_pl_btr_lax)
+# define pl_btr_lax _pl_btr_lax
+#endif
+
+#if !defined(pl_btr_acq) && defined(_pl_btr_acq)
+# define pl_btr_acq _pl_btr_acq
+#endif
+
+#if !defined(pl_btr_rel) && defined(_pl_btr_rel)
+# define pl_btr_rel _pl_btr_rel
+#endif
+
+#if !defined(pl_btr) && defined(_pl_btr)
+# define pl_btr _pl_btr
+#endif
+
+
+#if !defined(pl_bts_lax) && defined(_pl_bts_lax)
+# define pl_bts_lax _pl_bts_lax
+#endif
+
+#if !defined(pl_bts_acq) && defined(_pl_bts_acq)
+# define pl_bts_acq _pl_bts_acq
+#endif
+
+#if !defined(pl_bts_rel) && defined(_pl_bts_rel)
+# define pl_bts_rel _pl_bts_rel
+#endif
+
+#if !defined(pl_bts) && defined(_pl_bts)
+# define pl_bts _pl_bts
+#endif
+
+
+#if !defined(pl_xadd) && defined(_pl_xadd)
+# define pl_xadd _pl_xadd
+#endif
+
+#if !defined(pl_cmpxchg) && defined(_pl_cmpxchg)
+# define pl_cmpxchg _pl_cmpxchg
+#endif
+
+#if !defined(pl_xchg) && defined(_pl_xchg)
+# define pl_xchg _pl_xchg
+#endif
+
+
+#if !defined(pl_ldadd_lax) && defined(_pl_ldadd_lax)
+# define pl_ldadd_lax _pl_ldadd_lax
+#endif
+
+#if !defined(pl_ldadd_acq) && defined(_pl_ldadd_acq)
+# define pl_ldadd_acq _pl_ldadd_acq
+#endif
+
+#if !defined(pl_ldadd_rel) && defined(_pl_ldadd_rel)
+# define pl_ldadd_rel _pl_ldadd_rel
+#endif
+
+#if !defined(pl_ldadd) && defined(_pl_ldadd)
+# define pl_ldadd _pl_ldadd
+#endif
+
+
+#if !defined(pl_ldand_lax) && defined(_pl_ldand_lax)
+# define pl_ldand_lax _pl_ldand_lax
+#endif
+
+#if !defined(pl_ldand_acq) && defined(_pl_ldand_acq)
+# define pl_ldand_acq _pl_ldand_acq
+#endif
+
+#if !defined(pl_ldand_rel) && defined(_pl_ldand_rel)
+# define pl_ldand_rel _pl_ldand_rel
+#endif
+
+#if !defined(pl_ldand) && defined(_pl_ldand)
+# define pl_ldand _pl_ldand
+#endif
+
+
+#if !defined(pl_ldor_lax) && defined(_pl_ldor_lax)
+# define pl_ldor_lax _pl_ldor_lax
+#endif
+
+#if !defined(pl_ldor_acq) && defined(_pl_ldor_acq)
+# define pl_ldor_acq _pl_ldor_acq
+#endif
+
+#if !defined(pl_ldor_rel) && defined(_pl_ldor_rel)
+# define pl_ldor_rel _pl_ldor_rel
+#endif
+
+#if !defined(pl_ldor) && defined(_pl_ldor)
+# define pl_ldor _pl_ldor
+#endif
+
+
+#if !defined(pl_ldxor_lax) && defined(_pl_ldxor_lax)
+# define pl_ldxor_lax _pl_ldxor_lax
+#endif
+
+#if !defined(pl_ldxor_acq) && defined(_pl_ldxor_acq)
+# define pl_ldxor_acq _pl_ldxor_acq
+#endif
+
+#if !defined(pl_ldxor_rel) && defined(_pl_ldxor_rel)
+# define pl_ldxor_rel _pl_ldxor_rel
+#endif
+
+#if !defined(pl_ldxor) && defined(_pl_ldxor)
+# define pl_ldxor _pl_ldxor
+#endif
+
+
+#if !defined(pl_ldsub_lax) && defined(_pl_ldsub_lax)
+# define pl_ldsub_lax _pl_ldsub_lax
+#endif
+
+#if !defined(pl_ldsub_acq) && defined(_pl_ldsub_acq)
+# define pl_ldsub_acq _pl_ldsub_acq
+#endif
+
+#if !defined(pl_ldsub_rel) && defined(_pl_ldsub_rel)
+# define pl_ldsub_rel _pl_ldsub_rel
+#endif
+
+#if !defined(pl_ldsub) && defined(_pl_ldsub)
+# define pl_ldsub _pl_ldsub
+#endif
+
+
+/*
+ * Generic code using the __sync API for everything not defined above.
+ */
+
+
+/* CPU relaxation while waiting */
+#ifndef pl_cpu_relax
+#define pl_cpu_relax() do { \
+ asm volatile(""); \
+ } while (0)
+#endif
+
+/* compiler-only memory barrier, for use around locks */
+#ifndef pl_barrier
+#define pl_barrier() do { \
+ asm volatile("" ::: "memory"); \
+ } while (0)
+#endif
+
+/* full memory barrier */
+#ifndef pl_mb
+#define pl_mb() do { \
+ __sync_synchronize(); \
+ } while (0)
+#endif
+
+#ifndef pl_mb_load
+#define pl_mb_load() pl_mb()
+#endif
+
+#ifndef pl_mb_store
+#define pl_mb_store() pl_mb()
+#endif
+
+#ifndef pl_mb_ato
+#define pl_mb_ato() pl_mb()
+#endif
+
+#ifndef pl_mb_ato_load
+#define pl_mb_ato_load() pl_mb_ato()
+#endif
+
+#ifndef pl_mb_ato_store
+#define pl_mb_ato_store() pl_mb_ato()
+#endif
+
+/* atomic load: volatile after a load barrier */
+#ifndef pl_load
+#define pl_load(ptr) ({ \
+ typeof(*(ptr)) __pl_ret = ({ \
+ pl_mb_load(); \
+ *(volatile typeof(ptr))ptr; \
+ }); \
+ __pl_ret; \
+ })
+#endif
+
+/* atomic store, old style using a CAS */
+#ifndef pl_store
+#define pl_store(ptr, x) do { \
+ typeof((ptr)) __pl_ptr = (ptr); \
+ typeof((x)) __pl_x = (x); \
+ typeof(*(ptr)) __pl_old; \
+ do { \
+ __pl_old = *__pl_ptr; \
+ } while (!__sync_bool_compare_and_swap(__pl_ptr, __pl_old, __pl_x)); \
+ } while (0)
+#endif
+
+#ifndef pl_inc_noret
+#define pl_inc_noret(ptr) do { __sync_add_and_fetch((ptr), 1); } while (0)
+#endif
+
+#ifndef pl_dec_noret
+#define pl_dec_noret(ptr) do { __sync_sub_and_fetch((ptr), 1); } while (0)
+#endif
+
+#ifndef pl_inc
+#define pl_inc(ptr) ({ __sync_add_and_fetch((ptr), 1); })
+#endif
+
+#ifndef pl_dec
+#define pl_dec(ptr) ({ __sync_sub_and_fetch((ptr), 1); })
+#endif
+
+#ifndef pl_add
+#define pl_add(ptr, x) ({ __sync_add_and_fetch((ptr), (x)); })
+#endif
+
+#ifndef pl_and
+#define pl_and(ptr, x) ({ __sync_and_and_fetch((ptr), (x)); })
+#endif
+
+#ifndef pl_or
+#define pl_or(ptr, x) ({ __sync_or_and_fetch((ptr), (x)); })
+#endif
+
+#ifndef pl_xor
+#define pl_xor(ptr, x) ({ __sync_xor_and_fetch((ptr), (x)); })
+#endif
+
+#ifndef pl_sub
+#define pl_sub(ptr, x) ({ __sync_sub_and_fetch((ptr), (x)); })
+#endif
+
+#ifndef pl_btr
+#define pl_btr(ptr, bit) ({ typeof(*(ptr)) __pl_t = ((typeof(*(ptr)))1) << (bit); \
+ __sync_fetch_and_and((ptr), ~__pl_t) & __pl_t; \
+ })
+#endif
+
+#ifndef pl_bts
+#define pl_bts(ptr, bit) ({ typeof(*(ptr)) __pl_t = ((typeof(*(ptr)))1) << (bit); \
+ __sync_fetch_and_or((ptr), __pl_t) & __pl_t; \
+ })
+#endif
+
+#ifndef pl_xadd
+#define pl_xadd(ptr, x) ({ __sync_fetch_and_add((ptr), (x)); })
+#endif
+
+#ifndef pl_cmpxchg
+#define pl_cmpxchg(ptr, o, n) ({ __sync_val_compare_and_swap((ptr), (o), (n)); })
+#endif
+
+#ifndef pl_xchg
+#define pl_xchg(ptr, x) ({ \
+ typeof((ptr)) __pl_ptr = (ptr); \
+ typeof((x)) __pl_x = (x); \
+ typeof(*(ptr)) __pl_old; \
+ do { \
+ __pl_old = *__pl_ptr; \
+ } while (!__sync_bool_compare_and_swap(__pl_ptr, __pl_old, __pl_x)); \
+ __pl_old; \
+ })
+#endif
+
+#ifndef pl_ldadd
+#define pl_ldadd(ptr, x) ({ __sync_fetch_and_add((ptr), (x)); })
+#endif
+
+#ifndef pl_ldand
+#define pl_ldand(ptr, x) ({ __sync_fetch_and_and((ptr), (x)); })
+#endif
+
+#ifndef pl_ldor
+#define pl_ldor(ptr, x) ({ __sync_fetch_and_or((ptr), (x)); })
+#endif
+
+#ifndef pl_ldxor
+#define pl_ldxor(ptr, x) ({ __sync_fetch_and_xor((ptr), (x)); })
+#endif
+
+#ifndef pl_ldsub
+#define pl_ldsub(ptr, x) ({ __sync_fetch_and_sub((ptr), (x)); })
+#endif
+
+/* certain _noret operations may be defined from the regular ones */
+#if !defined(pl_inc_noret) && defined(pl_inc)
+# define pl_inc_noret(ptr) (void)pl_inc(ptr)
+#endif
+
+#if !defined(pl_dec_noret) && defined(pl_dec)
+# define pl_dec_noret(ptr) (void)pl_dec(ptr)
+#endif
+
+#if !defined(pl_add_noret) && defined(pl_add)
+# define pl_add_noret(ptr, x) (void)pl_add(ptr, x)
+#endif
+
+#if !defined(pl_sub_noret) && defined(pl_sub)
+# define pl_sub_noret(ptr, x) (void)pl_sub(ptr, x)
+#endif
+
+#if !defined(pl_or_noret) && defined(pl_or)
+# define pl_or_noret(ptr, x) (void)pl_or(ptr, x)
+#endif
+
+#if !defined(pl_and_noret) && defined(pl_and)
+# define pl_and_noret(ptr, x) (void)pl_and(ptr, x)
+#endif
+
+#if !defined(pl_xor_noret) && defined(pl_xor)
+# define pl_xor_noret(ptr, x) (void)pl_xor(ptr, x)
+#endif
+
+/* certain memory orders may fallback to the generic seq_cst definition */
+
+#if !defined(pl_load_lax) && defined(pl_load)
+#define pl_load_lax pl_load
+#endif
+
+
+#if !defined(pl_store_lax) && defined(pl_store)
+#define pl_store_lax pl_store
+#endif
+
+
+#if !defined(pl_inc_lax) && defined(pl_inc)
+# define pl_inc_lax pl_inc
+#endif
+#if !defined(pl_inc_acq) && defined(pl_inc)
+# define pl_inc_acq pl_inc
+#endif
+#if !defined(pl_inc_rel) && defined(pl_inc)
+# define pl_inc_rel pl_inc
+#endif
+
+
+#if !defined(pl_dec_lax) && defined(pl_dec)
+# define pl_dec_lax pl_dec
+#endif
+#if !defined(pl_dec_acq) && defined(pl_dec)
+# define pl_dec_acq pl_dec
+#endif
+
+#if !defined(pl_dec_rel) && defined(pl_dec)
+# define pl_dec_rel pl_dec
+#endif
+
+
+#if !defined(pl_inc_noret_lax) && defined(pl_inc_noret)
+# define pl_inc_noret_lax pl_inc_noret
+#endif
+
+#if !defined(pl_inc_noret_acq) && defined(pl_inc_noret)
+# define pl_inc_noret_acq pl_inc_noret
+#endif
+
+#if !defined(pl_inc_noret_rel) && defined(pl_inc_noret)
+# define pl_inc_noret_rel pl_inc_noret
+#endif
+
+
+#if !defined(pl_dec_noret_lax) && defined(pl_dec_noret)
+# define pl_dec_noret_lax pl_dec_noret
+#endif
+
+#if !defined(pl_dec_noret_acq) && defined(pl_dec_noret)
+# define pl_dec_noret_acq pl_dec_noret
+#endif
+
+#if !defined(pl_dec_noret_rel) && defined(pl_dec_noret)
+# define pl_dec_noret_rel pl_dec_noret
+#endif
+
+
+#if !defined(pl_add_lax) && defined(pl_add)
+# define pl_add_lax pl_add
+#endif
+
+#if !defined(pl_add_acq) && defined(pl_add)
+# define pl_add_acq pl_add
+#endif
+
+#if !defined(pl_add_rel) && defined(pl_add)
+# define pl_add_rel pl_add
+#endif
+
+
+#if !defined(pl_sub_lax) && defined(pl_sub)
+# define pl_sub_lax pl_sub
+#endif
+
+#if !defined(pl_sub_acq) && defined(pl_sub)
+# define pl_sub_acq pl_sub
+#endif
+
+#if !defined(pl_sub_rel) && defined(pl_sub)
+# define pl_sub_rel pl_sub
+#endif
+
+
+#if !defined(pl_and_lax) && defined(pl_and)
+# define pl_and_lax pl_and
+#endif
+
+#if !defined(pl_and_acq) && defined(pl_and)
+# define pl_and_acq pl_and
+#endif
+
+#if !defined(pl_and_rel) && defined(pl_and)
+# define pl_and_rel pl_and
+#endif
+
+
+#if !defined(pl_or_lax) && defined(pl_or)
+# define pl_or_lax pl_or
+#endif
+
+#if !defined(pl_or_acq) && defined(pl_or)
+# define pl_or_acq pl_or
+#endif
+
+#if !defined(pl_or_rel) && defined(pl_or)
+# define pl_or_rel pl_or
+#endif
+
+
+#if !defined(pl_xor_lax) && defined(pl_xor)
+# define pl_xor_lax pl_xor
+#endif
+
+#if !defined(pl_xor_acq) && defined(pl_xor)
+# define pl_xor_acq pl_xor
+#endif
+
+#if !defined(pl_xor_rel) && defined(pl_xor)
+# define pl_xor_rel pl_xor
+#endif
+
+
+#if !defined(pl_add_noret_lax) && defined(pl_add_noret)
+# define pl_add_noret_lax pl_add_noret
+#endif
+
+#if !defined(pl_add_noret_acq) && defined(pl_add_noret)
+# define pl_add_noret_acq pl_add_noret
+#endif
+
+#if !defined(pl_add_noret_rel) && defined(pl_add_noret)
+# define pl_add_noret_rel pl_add_noret
+#endif
+
+
+#if !defined(pl_sub_noret_lax) && defined(pl_sub_noret)
+# define pl_sub_noret_lax pl_sub_noret
+#endif
+
+#if !defined(pl_sub_noret_acq) && defined(pl_sub_noret)
+# define pl_sub_noret_acq pl_sub_noret
+#endif
+
+#if !defined(pl_sub_noret_rel) && defined(pl_sub_noret)
+# define pl_sub_noret_rel pl_sub_noret
+#endif
+
+
+#if !defined(pl_and_noret_lax) && defined(pl_and_noret)
+# define pl_and_noret_lax pl_and_noret
+#endif
+
+#if !defined(pl_and_noret_acq) && defined(pl_and_noret)
+# define pl_and_noret_acq pl_and_noret
+#endif
+
+#if !defined(pl_and_noret_rel) && defined(pl_and_noret)
+# define pl_and_noret_rel pl_and_noret
+#endif
+
+
+#if !defined(pl_or_noret_lax) && defined(pl_or_noret)
+# define pl_or_noret_lax pl_or_noret
+#endif
+
+#if !defined(pl_or_noret_acq) && defined(pl_or_noret)
+# define pl_or_noret_acq pl_or_noret
+#endif
+
+#if !defined(pl_or_noret_rel) && defined(pl_or_noret)
+# define pl_or_noret_rel pl_or_noret
+#endif
+
+
+#if !defined(pl_xor_noret_lax) && defined(pl_xor_noret)
+# define pl_xor_noret_lax pl_xor_noret
+#endif
+
+#if !defined(pl_xor_noret_acq) && defined(pl_xor_noret)
+# define pl_xor_noret_acq pl_xor_noret
+#endif
+
+#if !defined(pl_xor_noret_rel) && defined(pl_xor_noret)
+# define pl_xor_noret_rel pl_xor_noret
+#endif
+
+
+#if !defined(pl_btr_lax) && defined(pl_btr)
+# define pl_btr_lax pl_btr
+#endif
+
+#if !defined(pl_btr_acq) && defined(pl_btr)
+# define pl_btr_acq pl_btr
+#endif
+
+#if !defined(pl_btr_rel) && defined(pl_btr)
+# define pl_btr_rel pl_btr
+#endif
+
+
+#if !defined(pl_bts_lax) && defined(pl_bts)
+# define pl_bts_lax pl_bts
+#endif
+
+#if !defined(pl_bts_acq) && defined(pl_bts)
+# define pl_bts_acq pl_bts
+#endif
+
+#if !defined(pl_bts_rel) && defined(pl_bts)
+# define pl_bts_rel pl_bts
+#endif
+
+
+#if !defined(pl_ldadd_lax) && defined(pl_ldadd)
+# define pl_ldadd_lax pl_ldadd
+#endif
+
+#if !defined(pl_ldadd_acq) && defined(pl_ldadd)
+# define pl_ldadd_acq pl_ldadd
+#endif
+
+#if !defined(pl_ldadd_rel) && defined(pl_ldadd)
+# define pl_ldadd_rel pl_ldadd
+#endif
+
+
+#if !defined(pl_ldsub_lax) && defined(pl_ldsub)
+# define pl_ldsub_lax pl_ldsub
+#endif
+
+#if !defined(pl_ldsub_acq) && defined(pl_ldsub)
+# define pl_ldsub_acq pl_ldsub
+#endif
+
+#if !defined(pl_ldsub_rel) && defined(pl_ldsub)
+# define pl_ldsub_rel pl_ldsub
+#endif
+
+
+#if !defined(pl_ldand_lax) && defined(pl_ldand)
+# define pl_ldand_lax pl_ldand
+#endif
+
+#if !defined(pl_ldand_acq) && defined(pl_ldand)
+# define pl_ldand_acq pl_ldand
+#endif
+
+#if !defined(pl_ldand_rel) && defined(pl_ldand)
+# define pl_ldand_rel pl_ldand
+#endif
+
+
+#if !defined(pl_ldor_lax) && defined(pl_ldor)
+# define pl_ldor_lax pl_ldor
+#endif
+
+#if !defined(pl_ldor_acq) && defined(pl_ldor)
+# define pl_ldor_acq pl_ldor
+#endif
+
+#if !defined(pl_ldor_rel) && defined(pl_ldor)
+# define pl_ldor_rel pl_ldor
+#endif
+
+
+#if !defined(pl_ldxor_lax) && defined(pl_ldxor)
+# define pl_ldxor_lax pl_ldxor
+#endif
+
+#if !defined(pl_ldxor_acq) && defined(pl_ldxor)
+# define pl_ldxor_acq pl_ldxor
+#endif
+
+#if !defined(pl_ldxor_rel) && defined(pl_ldxor)
+# define pl_ldxor_rel pl_ldxor
+#endif
+
+
+#endif /* PL_ATOMIC_OPS_H */
diff --git a/include/import/eb32sctree.h b/include/import/eb32sctree.h
new file mode 100644
index 0000000..5ace662
--- /dev/null
+++ b/include/import/eb32sctree.h
@@ -0,0 +1,121 @@
+/*
+ * Elastic Binary Trees - macros and structures for operations on 32bit nodes.
+ * Version 6.0.6 with backports from v7-dev
+ * (C) 2002-2017 - Willy Tarreau <w@1wt.eu>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _EB32SCTREE_H
+#define _EB32SCTREE_H
+
+#include "ebtree.h"
+
+
+/* Return the structure of type <type> whose member <member> points to <ptr> */
+#define eb32sc_entry(ptr, type, member) container_of(ptr, type, member)
+
+/*
+ * Exported functions and macros.
+ * Many of them are always inlined because they are extremely small, and
+ * are generally called at most once or twice in a program.
+ */
+
+/*
+ * The following functions are not inlined by default. They are declared
+ * in eb32sctree.c, which simply relies on their inline version.
+ */
+struct eb32sc_node *eb32sc_lookup_ge(struct eb_root *root, u32 x, unsigned long scope);
+struct eb32sc_node *eb32sc_lookup_ge_or_first(struct eb_root *root, u32 x, unsigned long scope);
+struct eb32sc_node *eb32sc_insert(struct eb_root *root, struct eb32sc_node *new, unsigned long scope);
+void eb32sc_delete(struct eb32sc_node *node);
+
+/* Walks down left starting at root pointer <start>, and follow the leftmost
+ * branch whose scope matches <scope>. It either returns the node hosting the
+ * first leaf on that side, or NULL if no leaf is found. <start> may either be
+ * NULL or a branch pointer. The pointer to the leaf (or NULL) is returned.
+ */
+static inline struct eb32sc_node *eb32sc_walk_down_left(eb_troot_t *start, unsigned long scope)
+{
+ struct eb_root *root;
+ struct eb_node *node;
+ struct eb32sc_node *eb32;
+
+ if (unlikely(!start))
+ return NULL;
+
+ while (1) {
+ if (eb_gettag(start) == EB_NODE) {
+ root = eb_untag(start, EB_NODE);
+ node = eb_root_to_node(root);
+ eb32 = container_of(node, struct eb32sc_node, node);
+ if (eb32->node_s & scope) {
+ start = node->branches.b[EB_LEFT];
+ continue;
+ }
+ start = node->node_p;
+ }
+ else {
+ root = eb_untag(start, EB_LEAF);
+ node = eb_root_to_node(root);
+ eb32 = container_of(node, struct eb32sc_node, node);
+ if (eb32->leaf_s & scope)
+ return eb32;
+ start = node->leaf_p;
+ }
+
+ /* here we're on a node that doesn't match the scope. We have
+ * to walk to the closest right location.
+ */
+ while (eb_gettag(start) != EB_LEFT)
+ /* Walking up from right branch, so we cannot be below root */
+ start = (eb_root_to_node(eb_untag(start, EB_RGHT)))->node_p;
+
+ /* Note that <start> cannot be NULL at this stage */
+ root = eb_untag(start, EB_LEFT);
+ start = root->b[EB_RGHT];
+ if (eb_clrtag(start) == NULL)
+ return NULL;
+ }
+}
+
+/* Return next node in the tree, starting with tagged parent <start>, or NULL if none */
+static inline struct eb32sc_node *eb32sc_next_with_parent(eb_troot_t *start, unsigned long scope)
+{
+ while (eb_gettag(start) != EB_LEFT)
+ /* Walking up from right branch, so we cannot be below root */
+ start = (eb_root_to_node(eb_untag(start, EB_RGHT)))->node_p;
+
+ /* Note that <t> cannot be NULL at this stage */
+ start = (eb_untag(start, EB_LEFT))->b[EB_RGHT];
+ if (eb_clrtag(start) == NULL)
+ return NULL;
+
+ return eb32sc_walk_down_left(start, scope);
+}
+
+/* Return next node in the tree, or NULL if none */
+static inline struct eb32sc_node *eb32sc_next(struct eb32sc_node *eb32, unsigned long scope)
+{
+ return eb32sc_next_with_parent(eb32->node.leaf_p, scope);
+}
+
+/* Return leftmost node in the tree, or NULL if none */
+static inline struct eb32sc_node *eb32sc_first(struct eb_root *root, unsigned long scope)
+{
+ return eb32sc_walk_down_left(root->b[0], scope);
+}
+
+#endif /* _EB32SC_TREE_H */
diff --git a/include/import/eb32tree.h b/include/import/eb32tree.h
new file mode 100644
index 0000000..1c03fc1
--- /dev/null
+++ b/include/import/eb32tree.h
@@ -0,0 +1,482 @@
+/*
+ * Elastic Binary Trees - macros and structures for operations on 32bit nodes.
+ * Version 6.0.6
+ * (C) 2002-2011 - Willy Tarreau <w@1wt.eu>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _EB32TREE_H
+#define _EB32TREE_H
+
+#include "ebtree.h"
+
+
+/* Return the structure of type <type> whose member <member> points to <ptr> */
+#define eb32_entry(ptr, type, member) container_of(ptr, type, member)
+
+/*
+ * Exported functions and macros.
+ * Many of them are always inlined because they are extremely small, and
+ * are generally called at most once or twice in a program.
+ */
+
+/* Return leftmost node in the tree, or NULL if none */
+static inline struct eb32_node *eb32_first(struct eb_root *root)
+{
+ return eb32_entry(eb_first(root), struct eb32_node, node);
+}
+
+/* Return rightmost node in the tree, or NULL if none */
+static inline struct eb32_node *eb32_last(struct eb_root *root)
+{
+ return eb32_entry(eb_last(root), struct eb32_node, node);
+}
+
+/* Return next node in the tree, or NULL if none */
+static inline struct eb32_node *eb32_next(struct eb32_node *eb32)
+{
+ return eb32_entry(eb_next(&eb32->node), struct eb32_node, node);
+}
+
+/* Return previous node in the tree, or NULL if none */
+static inline struct eb32_node *eb32_prev(struct eb32_node *eb32)
+{
+ return eb32_entry(eb_prev(&eb32->node), struct eb32_node, node);
+}
+
+/* Return next leaf node within a duplicate sub-tree, or NULL if none. */
+static inline struct eb32_node *eb32_next_dup(struct eb32_node *eb32)
+{
+ return eb32_entry(eb_next_dup(&eb32->node), struct eb32_node, node);
+}
+
+/* Return previous leaf node within a duplicate sub-tree, or NULL if none. */
+static inline struct eb32_node *eb32_prev_dup(struct eb32_node *eb32)
+{
+ return eb32_entry(eb_prev_dup(&eb32->node), struct eb32_node, node);
+}
+
+/* Return next node in the tree, skipping duplicates, or NULL if none */
+static inline struct eb32_node *eb32_next_unique(struct eb32_node *eb32)
+{
+ return eb32_entry(eb_next_unique(&eb32->node), struct eb32_node, node);
+}
+
+/* Return previous node in the tree, skipping duplicates, or NULL if none */
+static inline struct eb32_node *eb32_prev_unique(struct eb32_node *eb32)
+{
+ return eb32_entry(eb_prev_unique(&eb32->node), struct eb32_node, node);
+}
+
+/* Delete node from the tree if it was linked in. Mark the node unused. Note
+ * that this function relies on a non-inlined generic function: eb_delete.
+ */
+static inline void eb32_delete(struct eb32_node *eb32)
+{
+ eb_delete(&eb32->node);
+}
+
+/*
+ * The following functions are not inlined by default. They are declared
+ * in eb32tree.c, which simply relies on their inline version.
+ */
+struct eb32_node *eb32_lookup(struct eb_root *root, u32 x);
+struct eb32_node *eb32i_lookup(struct eb_root *root, s32 x);
+struct eb32_node *eb32_lookup_le(struct eb_root *root, u32 x);
+struct eb32_node *eb32_lookup_ge(struct eb_root *root, u32 x);
+struct eb32_node *eb32_insert(struct eb_root *root, struct eb32_node *new);
+struct eb32_node *eb32i_insert(struct eb_root *root, struct eb32_node *new);
+
+/*
+ * The following functions are less likely to be used directly, because their
+ * code is larger. The non-inlined version is preferred.
+ */
+
+/* Delete node from the tree if it was linked in. Mark the node unused. */
+static forceinline void __eb32_delete(struct eb32_node *eb32)
+{
+ __eb_delete(&eb32->node);
+}
+
+/*
+ * Find the first occurrence of a key in the tree <root>. If none can be
+ * found, return NULL.
+ */
+static forceinline struct eb32_node *__eb32_lookup(struct eb_root *root, u32 x)
+{
+ struct eb32_node *node;
+ eb_troot_t *troot;
+ u32 y;
+ int node_bit;
+
+ troot = root->b[EB_LEFT];
+ if (unlikely(troot == NULL))
+ return NULL;
+
+ while (1) {
+ if ((eb_gettag(troot) == EB_LEAF)) {
+ node = container_of(eb_untag(troot, EB_LEAF),
+ struct eb32_node, node.branches);
+ if (node->key == x)
+ return node;
+ else
+ return NULL;
+ }
+ node = container_of(eb_untag(troot, EB_NODE),
+ struct eb32_node, node.branches);
+ node_bit = node->node.bit;
+
+ y = node->key ^ x;
+ if (!y) {
+ /* Either we found the node which holds the key, or
+ * we have a dup tree. In the later case, we have to
+ * walk it down left to get the first entry.
+ */
+ if (node_bit < 0) {
+ troot = node->node.branches.b[EB_LEFT];
+ while (eb_gettag(troot) != EB_LEAF)
+ troot = (eb_untag(troot, EB_NODE))->b[EB_LEFT];
+ node = container_of(eb_untag(troot, EB_LEAF),
+ struct eb32_node, node.branches);
+ }
+ return node;
+ }
+
+ if ((y >> node_bit) >= EB_NODE_BRANCHES)
+ return NULL; /* no more common bits */
+
+ troot = node->node.branches.b[(x >> node_bit) & EB_NODE_BRANCH_MASK];
+ }
+}
+
+/*
+ * Find the first occurrence of a signed key in the tree <root>. If none can
+ * be found, return NULL.
+ */
+static forceinline struct eb32_node *__eb32i_lookup(struct eb_root *root, s32 x)
+{
+ struct eb32_node *node;
+ eb_troot_t *troot;
+ u32 key = x ^ 0x80000000;
+ u32 y;
+ int node_bit;
+
+ troot = root->b[EB_LEFT];
+ if (unlikely(troot == NULL))
+ return NULL;
+
+ while (1) {
+ if ((eb_gettag(troot) == EB_LEAF)) {
+ node = container_of(eb_untag(troot, EB_LEAF),
+ struct eb32_node, node.branches);
+ if (node->key == (u32)x)
+ return node;
+ else
+ return NULL;
+ }
+ node = container_of(eb_untag(troot, EB_NODE),
+ struct eb32_node, node.branches);
+ node_bit = node->node.bit;
+
+ y = node->key ^ x;
+ if (!y) {
+ /* Either we found the node which holds the key, or
+ * we have a dup tree. In the later case, we have to
+ * walk it down left to get the first entry.
+ */
+ if (node_bit < 0) {
+ troot = node->node.branches.b[EB_LEFT];
+ while (eb_gettag(troot) != EB_LEAF)
+ troot = (eb_untag(troot, EB_NODE))->b[EB_LEFT];
+ node = container_of(eb_untag(troot, EB_LEAF),
+ struct eb32_node, node.branches);
+ }
+ return node;
+ }
+
+ if ((y >> node_bit) >= EB_NODE_BRANCHES)
+ return NULL; /* no more common bits */
+
+ troot = node->node.branches.b[(key >> node_bit) & EB_NODE_BRANCH_MASK];
+ }
+}
+
+/* Insert eb32_node <new> into subtree starting at node root <root>.
+ * Only new->key needs be set with the key. The eb32_node is returned.
+ * If root->b[EB_RGHT]==1, the tree may only contain unique keys.
+ */
+static forceinline struct eb32_node *
+__eb32_insert(struct eb_root *root, struct eb32_node *new) {
+ struct eb32_node *old;
+ unsigned int side;
+ eb_troot_t *troot, **up_ptr;
+ u32 newkey; /* caching the key saves approximately one cycle */
+ eb_troot_t *root_right;
+ eb_troot_t *new_left, *new_rght;
+ eb_troot_t *new_leaf;
+ int old_node_bit;
+
+ side = EB_LEFT;
+ troot = root->b[EB_LEFT];
+ root_right = root->b[EB_RGHT];
+ if (unlikely(troot == NULL)) {
+ /* Tree is empty, insert the leaf part below the left branch */
+ root->b[EB_LEFT] = eb_dotag(&new->node.branches, EB_LEAF);
+ new->node.leaf_p = eb_dotag(root, EB_LEFT);
+ new->node.node_p = NULL; /* node part unused */
+ return new;
+ }
+
+ /* The tree descent is fairly easy :
+ * - first, check if we have reached a leaf node
+ * - second, check if we have gone too far
+ * - third, reiterate
+ * Everywhere, we use <new> for the node node we are inserting, <root>
+ * for the node we attach it to, and <old> for the node we are
+ * displacing below <new>. <troot> will always point to the future node
+ * (tagged with its type). <side> carries the side the node <new> is
+ * attached to below its parent, which is also where previous node
+ * was attached. <newkey> carries the key being inserted.
+ */
+ newkey = new->key;
+
+ while (1) {
+ if (eb_gettag(troot) == EB_LEAF) {
+ /* insert above a leaf */
+ old = container_of(eb_untag(troot, EB_LEAF),
+ struct eb32_node, node.branches);
+ new->node.node_p = old->node.leaf_p;
+ up_ptr = &old->node.leaf_p;
+ break;
+ }
+
+ /* OK we're walking down this link */
+ old = container_of(eb_untag(troot, EB_NODE),
+ struct eb32_node, node.branches);
+ old_node_bit = old->node.bit;
+
+ /* Stop going down when we don't have common bits anymore. We
+ * also stop in front of a duplicates tree because it means we
+ * have to insert above.
+ */
+
+ if ((old_node_bit < 0) || /* we're above a duplicate tree, stop here */
+ (((new->key ^ old->key) >> old_node_bit) >= EB_NODE_BRANCHES)) {
+ /* The tree did not contain the key, so we insert <new> before the node
+ * <old>, and set ->bit to designate the lowest bit position in <new>
+ * which applies to ->branches.b[].
+ */
+ new->node.node_p = old->node.node_p;
+ up_ptr = &old->node.node_p;
+ break;
+ }
+
+ /* walk down */
+ root = &old->node.branches;
+ side = (newkey >> old_node_bit) & EB_NODE_BRANCH_MASK;
+ troot = root->b[side];
+ }
+
+ new_left = eb_dotag(&new->node.branches, EB_LEFT);
+ new_rght = eb_dotag(&new->node.branches, EB_RGHT);
+ new_leaf = eb_dotag(&new->node.branches, EB_LEAF);
+
+ /* We need the common higher bits between new->key and old->key.
+ * What differences are there between new->key and the node here ?
+ * NOTE that bit(new) is always < bit(root) because highest
+ * bit of new->key and old->key are identical here (otherwise they
+ * would sit on different branches).
+ */
+
+ // note that if EB_NODE_BITS > 1, we should check that it's still >= 0
+ new->node.bit = flsnz(new->key ^ old->key) - EB_NODE_BITS;
+
+ if (new->key == old->key) {
+ new->node.bit = -1; /* mark as new dup tree, just in case */
+
+ if (likely(eb_gettag(root_right))) {
+ /* we refuse to duplicate this key if the tree is
+ * tagged as containing only unique keys.
+ */
+ return old;
+ }
+
+ if (eb_gettag(troot) != EB_LEAF) {
+ /* there was already a dup tree below */
+ struct eb_node *ret;
+ ret = eb_insert_dup(&old->node, &new->node);
+ return container_of(ret, struct eb32_node, node);
+ }
+ /* otherwise fall through */
+ }
+
+ if (new->key >= old->key) {
+ new->node.branches.b[EB_LEFT] = troot;
+ new->node.branches.b[EB_RGHT] = new_leaf;
+ new->node.leaf_p = new_rght;
+ *up_ptr = new_left;
+ }
+ else {
+ new->node.branches.b[EB_LEFT] = new_leaf;
+ new->node.branches.b[EB_RGHT] = troot;
+ new->node.leaf_p = new_left;
+ *up_ptr = new_rght;
+ }
+
+ /* Ok, now we are inserting <new> between <root> and <old>. <old>'s
+ * parent is already set to <new>, and the <root>'s branch is still in
+ * <side>. Update the root's leaf till we have it. Note that we can also
+ * find the side by checking the side of new->node.node_p.
+ */
+
+ root->b[side] = eb_dotag(&new->node.branches, EB_NODE);
+ return new;
+}
+
+/* Insert eb32_node <new> into subtree starting at node root <root>, using
+ * signed keys. Only new->key needs be set with the key. The eb32_node
+ * is returned. If root->b[EB_RGHT]==1, the tree may only contain unique keys.
+ */
+static forceinline struct eb32_node *
+__eb32i_insert(struct eb_root *root, struct eb32_node *new) {
+ struct eb32_node *old;
+ unsigned int side;
+ eb_troot_t *troot, **up_ptr;
+ int newkey; /* caching the key saves approximately one cycle */
+ eb_troot_t *root_right;
+ eb_troot_t *new_left, *new_rght;
+ eb_troot_t *new_leaf;
+ int old_node_bit;
+
+ side = EB_LEFT;
+ troot = root->b[EB_LEFT];
+ root_right = root->b[EB_RGHT];
+ if (unlikely(troot == NULL)) {
+ /* Tree is empty, insert the leaf part below the left branch */
+ root->b[EB_LEFT] = eb_dotag(&new->node.branches, EB_LEAF);
+ new->node.leaf_p = eb_dotag(root, EB_LEFT);
+ new->node.node_p = NULL; /* node part unused */
+ return new;
+ }
+
+ /* The tree descent is fairly easy :
+ * - first, check if we have reached a leaf node
+ * - second, check if we have gone too far
+ * - third, reiterate
+ * Everywhere, we use <new> for the node node we are inserting, <root>
+ * for the node we attach it to, and <old> for the node we are
+ * displacing below <new>. <troot> will always point to the future node
+ * (tagged with its type). <side> carries the side the node <new> is
+ * attached to below its parent, which is also where previous node
+ * was attached. <newkey> carries a high bit shift of the key being
+ * inserted in order to have negative keys stored before positive
+ * ones.
+ */
+ newkey = new->key + 0x80000000;
+
+ while (1) {
+ if (eb_gettag(troot) == EB_LEAF) {
+ old = container_of(eb_untag(troot, EB_LEAF),
+ struct eb32_node, node.branches);
+ new->node.node_p = old->node.leaf_p;
+ up_ptr = &old->node.leaf_p;
+ break;
+ }
+
+ /* OK we're walking down this link */
+ old = container_of(eb_untag(troot, EB_NODE),
+ struct eb32_node, node.branches);
+ old_node_bit = old->node.bit;
+
+ /* Stop going down when we don't have common bits anymore. We
+ * also stop in front of a duplicates tree because it means we
+ * have to insert above.
+ */
+
+ if ((old_node_bit < 0) || /* we're above a duplicate tree, stop here */
+ (((new->key ^ old->key) >> old_node_bit) >= EB_NODE_BRANCHES)) {
+ /* The tree did not contain the key, so we insert <new> before the node
+ * <old>, and set ->bit to designate the lowest bit position in <new>
+ * which applies to ->branches.b[].
+ */
+ new->node.node_p = old->node.node_p;
+ up_ptr = &old->node.node_p;
+ break;
+ }
+
+ /* walk down */
+ root = &old->node.branches;
+ side = (newkey >> old_node_bit) & EB_NODE_BRANCH_MASK;
+ troot = root->b[side];
+ }
+
+ new_left = eb_dotag(&new->node.branches, EB_LEFT);
+ new_rght = eb_dotag(&new->node.branches, EB_RGHT);
+ new_leaf = eb_dotag(&new->node.branches, EB_LEAF);
+
+ /* We need the common higher bits between new->key and old->key.
+ * What differences are there between new->key and the node here ?
+ * NOTE that bit(new) is always < bit(root) because highest
+ * bit of new->key and old->key are identical here (otherwise they
+ * would sit on different branches).
+ */
+
+ // note that if EB_NODE_BITS > 1, we should check that it's still >= 0
+ new->node.bit = flsnz(new->key ^ old->key) - EB_NODE_BITS;
+
+ if (new->key == old->key) {
+ new->node.bit = -1; /* mark as new dup tree, just in case */
+
+ if (likely(eb_gettag(root_right))) {
+ /* we refuse to duplicate this key if the tree is
+ * tagged as containing only unique keys.
+ */
+ return old;
+ }
+
+ if (eb_gettag(troot) != EB_LEAF) {
+ /* there was already a dup tree below */
+ struct eb_node *ret;
+ ret = eb_insert_dup(&old->node, &new->node);
+ return container_of(ret, struct eb32_node, node);
+ }
+ /* otherwise fall through */
+ }
+
+ if ((s32)new->key >= (s32)old->key) {
+ new->node.branches.b[EB_LEFT] = troot;
+ new->node.branches.b[EB_RGHT] = new_leaf;
+ new->node.leaf_p = new_rght;
+ *up_ptr = new_left;
+ }
+ else {
+ new->node.branches.b[EB_LEFT] = new_leaf;
+ new->node.branches.b[EB_RGHT] = troot;
+ new->node.leaf_p = new_left;
+ *up_ptr = new_rght;
+ }
+
+ /* Ok, now we are inserting <new> between <root> and <old>. <old>'s
+ * parent is already set to <new>, and the <root>'s branch is still in
+ * <side>. Update the root's leaf till we have it. Note that we can also
+ * find the side by checking the side of new->node.node_p.
+ */
+
+ root->b[side] = eb_dotag(&new->node.branches, EB_NODE);
+ return new;
+}
+
+#endif /* _EB32_TREE_H */
diff --git a/include/import/eb64tree.h b/include/import/eb64tree.h
new file mode 100644
index 0000000..d6e5db4
--- /dev/null
+++ b/include/import/eb64tree.h
@@ -0,0 +1,575 @@
+/*
+ * Elastic Binary Trees - macros and structures for operations on 64bit nodes.
+ * Version 6.0.6
+ * (C) 2002-2011 - Willy Tarreau <w@1wt.eu>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _EB64TREE_H
+#define _EB64TREE_H
+
+#include "ebtree.h"
+
+
+/* Return the structure of type <type> whose member <member> points to <ptr> */
+#define eb64_entry(ptr, type, member) container_of(ptr, type, member)
+
+/*
+ * Exported functions and macros.
+ * Many of them are always inlined because they are extremely small, and
+ * are generally called at most once or twice in a program.
+ */
+
+/* Return leftmost node in the tree, or NULL if none */
+static inline struct eb64_node *eb64_first(struct eb_root *root)
+{
+ return eb64_entry(eb_first(root), struct eb64_node, node);
+}
+
+/* Return rightmost node in the tree, or NULL if none */
+static inline struct eb64_node *eb64_last(struct eb_root *root)
+{
+ return eb64_entry(eb_last(root), struct eb64_node, node);
+}
+
+/* Return next node in the tree, or NULL if none */
+static inline struct eb64_node *eb64_next(struct eb64_node *eb64)
+{
+ return eb64_entry(eb_next(&eb64->node), struct eb64_node, node);
+}
+
+/* Return previous node in the tree, or NULL if none */
+static inline struct eb64_node *eb64_prev(struct eb64_node *eb64)
+{
+ return eb64_entry(eb_prev(&eb64->node), struct eb64_node, node);
+}
+
+/* Return next leaf node within a duplicate sub-tree, or NULL if none. */
+static inline struct eb64_node *eb64_next_dup(struct eb64_node *eb64)
+{
+ return eb64_entry(eb_next_dup(&eb64->node), struct eb64_node, node);
+}
+
+/* Return previous leaf node within a duplicate sub-tree, or NULL if none. */
+static inline struct eb64_node *eb64_prev_dup(struct eb64_node *eb64)
+{
+ return eb64_entry(eb_prev_dup(&eb64->node), struct eb64_node, node);
+}
+
+/* Return next node in the tree, skipping duplicates, or NULL if none */
+static inline struct eb64_node *eb64_next_unique(struct eb64_node *eb64)
+{
+ return eb64_entry(eb_next_unique(&eb64->node), struct eb64_node, node);
+}
+
+/* Return previous node in the tree, skipping duplicates, or NULL if none */
+static inline struct eb64_node *eb64_prev_unique(struct eb64_node *eb64)
+{
+ return eb64_entry(eb_prev_unique(&eb64->node), struct eb64_node, node);
+}
+
+/* Delete node from the tree if it was linked in. Mark the node unused. Note
+ * that this function relies on a non-inlined generic function: eb_delete.
+ */
+static inline void eb64_delete(struct eb64_node *eb64)
+{
+ eb_delete(&eb64->node);
+}
+
+/*
+ * The following functions are not inlined by default. They are declared
+ * in eb64tree.c, which simply relies on their inline version.
+ */
+struct eb64_node *eb64_lookup(struct eb_root *root, u64 x);
+struct eb64_node *eb64i_lookup(struct eb_root *root, s64 x);
+struct eb64_node *eb64_lookup_le(struct eb_root *root, u64 x);
+struct eb64_node *eb64_lookup_ge(struct eb_root *root, u64 x);
+struct eb64_node *eb64_insert(struct eb_root *root, struct eb64_node *new);
+struct eb64_node *eb64i_insert(struct eb_root *root, struct eb64_node *new);
+
+/*
+ * The following functions are less likely to be used directly, because their
+ * code is larger. The non-inlined version is preferred.
+ */
+
+/* Delete node from the tree if it was linked in. Mark the node unused. */
+static forceinline void __eb64_delete(struct eb64_node *eb64)
+{
+ __eb_delete(&eb64->node);
+}
+
+/*
+ * Find the first occurrence of a key in the tree <root>. If none can be
+ * found, return NULL.
+ */
+static forceinline struct eb64_node *__eb64_lookup(struct eb_root *root, u64 x)
+{
+ struct eb64_node *node;
+ eb_troot_t *troot;
+ u64 y;
+
+ troot = root->b[EB_LEFT];
+ if (unlikely(troot == NULL))
+ return NULL;
+
+ while (1) {
+ if ((eb_gettag(troot) == EB_LEAF)) {
+ node = container_of(eb_untag(troot, EB_LEAF),
+ struct eb64_node, node.branches);
+ if (node->key == x)
+ return node;
+ else
+ return NULL;
+ }
+ node = container_of(eb_untag(troot, EB_NODE),
+ struct eb64_node, node.branches);
+
+ y = node->key ^ x;
+ if (!y) {
+ /* Either we found the node which holds the key, or
+ * we have a dup tree. In the later case, we have to
+ * walk it down left to get the first entry.
+ */
+ if (node->node.bit < 0) {
+ troot = node->node.branches.b[EB_LEFT];
+ while (eb_gettag(troot) != EB_LEAF)
+ troot = (eb_untag(troot, EB_NODE))->b[EB_LEFT];
+ node = container_of(eb_untag(troot, EB_LEAF),
+ struct eb64_node, node.branches);
+ }
+ return node;
+ }
+
+ if ((y >> node->node.bit) >= EB_NODE_BRANCHES)
+ return NULL; /* no more common bits */
+
+ troot = node->node.branches.b[(x >> node->node.bit) & EB_NODE_BRANCH_MASK];
+ }
+}
+
+/*
+ * Find the first occurrence of a signed key in the tree <root>. If none can
+ * be found, return NULL.
+ */
+static forceinline struct eb64_node *__eb64i_lookup(struct eb_root *root, s64 x)
+{
+ struct eb64_node *node;
+ eb_troot_t *troot;
+ u64 key = x ^ (1ULL << 63);
+ u64 y;
+
+ troot = root->b[EB_LEFT];
+ if (unlikely(troot == NULL))
+ return NULL;
+
+ while (1) {
+ if ((eb_gettag(troot) == EB_LEAF)) {
+ node = container_of(eb_untag(troot, EB_LEAF),
+ struct eb64_node, node.branches);
+ if (node->key == (u64)x)
+ return node;
+ else
+ return NULL;
+ }
+ node = container_of(eb_untag(troot, EB_NODE),
+ struct eb64_node, node.branches);
+
+ y = node->key ^ x;
+ if (!y) {
+ /* Either we found the node which holds the key, or
+ * we have a dup tree. In the later case, we have to
+ * walk it down left to get the first entry.
+ */
+ if (node->node.bit < 0) {
+ troot = node->node.branches.b[EB_LEFT];
+ while (eb_gettag(troot) != EB_LEAF)
+ troot = (eb_untag(troot, EB_NODE))->b[EB_LEFT];
+ node = container_of(eb_untag(troot, EB_LEAF),
+ struct eb64_node, node.branches);
+ }
+ return node;
+ }
+
+ if ((y >> node->node.bit) >= EB_NODE_BRANCHES)
+ return NULL; /* no more common bits */
+
+ troot = node->node.branches.b[(key >> node->node.bit) & EB_NODE_BRANCH_MASK];
+ }
+}
+
+/* Insert eb64_node <new> into subtree starting at node root <root>.
+ * Only new->key needs be set with the key. The eb64_node is returned.
+ * If root->b[EB_RGHT]==1, the tree may only contain unique keys.
+ */
+static forceinline struct eb64_node *
+__eb64_insert(struct eb_root *root, struct eb64_node *new) {
+ struct eb64_node *old;
+ unsigned int side;
+ eb_troot_t *troot;
+ u64 newkey; /* caching the key saves approximately one cycle */
+ eb_troot_t *root_right;
+ int old_node_bit;
+
+ side = EB_LEFT;
+ troot = root->b[EB_LEFT];
+ root_right = root->b[EB_RGHT];
+ if (unlikely(troot == NULL)) {
+ /* Tree is empty, insert the leaf part below the left branch */
+ root->b[EB_LEFT] = eb_dotag(&new->node.branches, EB_LEAF);
+ new->node.leaf_p = eb_dotag(root, EB_LEFT);
+ new->node.node_p = NULL; /* node part unused */
+ return new;
+ }
+
+ /* The tree descent is fairly easy :
+ * - first, check if we have reached a leaf node
+ * - second, check if we have gone too far
+ * - third, reiterate
+ * Everywhere, we use <new> for the node node we are inserting, <root>
+ * for the node we attach it to, and <old> for the node we are
+ * displacing below <new>. <troot> will always point to the future node
+ * (tagged with its type). <side> carries the side the node <new> is
+ * attached to below its parent, which is also where previous node
+ * was attached. <newkey> carries the key being inserted.
+ */
+ newkey = new->key;
+
+ while (1) {
+ if (unlikely(eb_gettag(troot) == EB_LEAF)) {
+ eb_troot_t *new_left, *new_rght;
+ eb_troot_t *new_leaf, *old_leaf;
+
+ old = container_of(eb_untag(troot, EB_LEAF),
+ struct eb64_node, node.branches);
+
+ new_left = eb_dotag(&new->node.branches, EB_LEFT);
+ new_rght = eb_dotag(&new->node.branches, EB_RGHT);
+ new_leaf = eb_dotag(&new->node.branches, EB_LEAF);
+ old_leaf = eb_dotag(&old->node.branches, EB_LEAF);
+
+ new->node.node_p = old->node.leaf_p;
+
+ /* Right here, we have 3 possibilities :
+ - the tree does not contain the key, and we have
+ new->key < old->key. We insert new above old, on
+ the left ;
+
+ - the tree does not contain the key, and we have
+ new->key > old->key. We insert new above old, on
+ the right ;
+
+ - the tree does contain the key, which implies it
+ is alone. We add the new key next to it as a
+ first duplicate.
+
+ The last two cases can easily be partially merged.
+ */
+
+ if (new->key < old->key) {
+ new->node.leaf_p = new_left;
+ old->node.leaf_p = new_rght;
+ new->node.branches.b[EB_LEFT] = new_leaf;
+ new->node.branches.b[EB_RGHT] = old_leaf;
+ } else {
+ /* we may refuse to duplicate this key if the tree is
+ * tagged as containing only unique keys.
+ */
+ if ((new->key == old->key) && eb_gettag(root_right))
+ return old;
+
+ /* new->key >= old->key, new goes the right */
+ old->node.leaf_p = new_left;
+ new->node.leaf_p = new_rght;
+ new->node.branches.b[EB_LEFT] = old_leaf;
+ new->node.branches.b[EB_RGHT] = new_leaf;
+
+ if (new->key == old->key) {
+ new->node.bit = -1;
+ root->b[side] = eb_dotag(&new->node.branches, EB_NODE);
+ return new;
+ }
+ }
+ break;
+ }
+
+ /* OK we're walking down this link */
+ old = container_of(eb_untag(troot, EB_NODE),
+ struct eb64_node, node.branches);
+ old_node_bit = old->node.bit;
+
+ /* Stop going down when we don't have common bits anymore. We
+ * also stop in front of a duplicates tree because it means we
+ * have to insert above.
+ */
+
+ if ((old_node_bit < 0) || /* we're above a duplicate tree, stop here */
+ (((new->key ^ old->key) >> old_node_bit) >= EB_NODE_BRANCHES)) {
+ /* The tree did not contain the key, so we insert <new> before the node
+ * <old>, and set ->bit to designate the lowest bit position in <new>
+ * which applies to ->branches.b[].
+ */
+ eb_troot_t *new_left, *new_rght;
+ eb_troot_t *new_leaf, *old_node;
+
+ new_left = eb_dotag(&new->node.branches, EB_LEFT);
+ new_rght = eb_dotag(&new->node.branches, EB_RGHT);
+ new_leaf = eb_dotag(&new->node.branches, EB_LEAF);
+ old_node = eb_dotag(&old->node.branches, EB_NODE);
+
+ new->node.node_p = old->node.node_p;
+
+ if (new->key < old->key) {
+ new->node.leaf_p = new_left;
+ old->node.node_p = new_rght;
+ new->node.branches.b[EB_LEFT] = new_leaf;
+ new->node.branches.b[EB_RGHT] = old_node;
+ }
+ else if (new->key > old->key) {
+ old->node.node_p = new_left;
+ new->node.leaf_p = new_rght;
+ new->node.branches.b[EB_LEFT] = old_node;
+ new->node.branches.b[EB_RGHT] = new_leaf;
+ }
+ else {
+ struct eb_node *ret;
+ ret = eb_insert_dup(&old->node, &new->node);
+ return container_of(ret, struct eb64_node, node);
+ }
+ break;
+ }
+
+ /* walk down */
+ root = &old->node.branches;
+
+ if (sizeof(long) >= 8) {
+ side = newkey >> old_node_bit;
+ } else {
+ /* note: provides the best code on low-register count archs
+ * such as i386.
+ */
+ side = newkey;
+ side >>= old_node_bit;
+ if (old_node_bit >= 32) {
+ side = newkey >> 32;
+ side >>= old_node_bit & 0x1F;
+ }
+ }
+ side &= EB_NODE_BRANCH_MASK;
+ troot = root->b[side];
+ }
+
+ /* Ok, now we are inserting <new> between <root> and <old>. <old>'s
+ * parent is already set to <new>, and the <root>'s branch is still in
+ * <side>. Update the root's leaf till we have it. Note that we can also
+ * find the side by checking the side of new->node.node_p.
+ */
+
+ /* We need the common higher bits between new->key and old->key.
+ * What differences are there between new->key and the node here ?
+ * NOTE that bit(new) is always < bit(root) because highest
+ * bit of new->key and old->key are identical here (otherwise they
+ * would sit on different branches).
+ */
+ // note that if EB_NODE_BITS > 1, we should check that it's still >= 0
+ new->node.bit = fls64(new->key ^ old->key) - EB_NODE_BITS;
+ root->b[side] = eb_dotag(&new->node.branches, EB_NODE);
+
+ return new;
+}
+
+/* Insert eb64_node <new> into subtree starting at node root <root>, using
+ * signed keys. Only new->key needs be set with the key. The eb64_node
+ * is returned. If root->b[EB_RGHT]==1, the tree may only contain unique keys.
+ */
+static forceinline struct eb64_node *
+__eb64i_insert(struct eb_root *root, struct eb64_node *new) {
+ struct eb64_node *old;
+ unsigned int side;
+ eb_troot_t *troot;
+ u64 newkey; /* caching the key saves approximately one cycle */
+ eb_troot_t *root_right;
+ int old_node_bit;
+
+ side = EB_LEFT;
+ troot = root->b[EB_LEFT];
+ root_right = root->b[EB_RGHT];
+ if (unlikely(troot == NULL)) {
+ /* Tree is empty, insert the leaf part below the left branch */
+ root->b[EB_LEFT] = eb_dotag(&new->node.branches, EB_LEAF);
+ new->node.leaf_p = eb_dotag(root, EB_LEFT);
+ new->node.node_p = NULL; /* node part unused */
+ return new;
+ }
+
+ /* The tree descent is fairly easy :
+ * - first, check if we have reached a leaf node
+ * - second, check if we have gone too far
+ * - third, reiterate
+ * Everywhere, we use <new> for the node node we are inserting, <root>
+ * for the node we attach it to, and <old> for the node we are
+ * displacing below <new>. <troot> will always point to the future node
+ * (tagged with its type). <side> carries the side the node <new> is
+ * attached to below its parent, which is also where previous node
+ * was attached. <newkey> carries a high bit shift of the key being
+ * inserted in order to have negative keys stored before positive
+ * ones.
+ */
+ newkey = new->key ^ (1ULL << 63);
+
+ while (1) {
+ if (unlikely(eb_gettag(troot) == EB_LEAF)) {
+ eb_troot_t *new_left, *new_rght;
+ eb_troot_t *new_leaf, *old_leaf;
+
+ old = container_of(eb_untag(troot, EB_LEAF),
+ struct eb64_node, node.branches);
+
+ new_left = eb_dotag(&new->node.branches, EB_LEFT);
+ new_rght = eb_dotag(&new->node.branches, EB_RGHT);
+ new_leaf = eb_dotag(&new->node.branches, EB_LEAF);
+ old_leaf = eb_dotag(&old->node.branches, EB_LEAF);
+
+ new->node.node_p = old->node.leaf_p;
+
+ /* Right here, we have 3 possibilities :
+ - the tree does not contain the key, and we have
+ new->key < old->key. We insert new above old, on
+ the left ;
+
+ - the tree does not contain the key, and we have
+ new->key > old->key. We insert new above old, on
+ the right ;
+
+ - the tree does contain the key, which implies it
+ is alone. We add the new key next to it as a
+ first duplicate.
+
+ The last two cases can easily be partially merged.
+ */
+
+ if ((s64)new->key < (s64)old->key) {
+ new->node.leaf_p = new_left;
+ old->node.leaf_p = new_rght;
+ new->node.branches.b[EB_LEFT] = new_leaf;
+ new->node.branches.b[EB_RGHT] = old_leaf;
+ } else {
+ /* we may refuse to duplicate this key if the tree is
+ * tagged as containing only unique keys.
+ */
+ if ((new->key == old->key) && eb_gettag(root_right))
+ return old;
+
+ /* new->key >= old->key, new goes the right */
+ old->node.leaf_p = new_left;
+ new->node.leaf_p = new_rght;
+ new->node.branches.b[EB_LEFT] = old_leaf;
+ new->node.branches.b[EB_RGHT] = new_leaf;
+
+ if (new->key == old->key) {
+ new->node.bit = -1;
+ root->b[side] = eb_dotag(&new->node.branches, EB_NODE);
+ return new;
+ }
+ }
+ break;
+ }
+
+ /* OK we're walking down this link */
+ old = container_of(eb_untag(troot, EB_NODE),
+ struct eb64_node, node.branches);
+ old_node_bit = old->node.bit;
+
+ /* Stop going down when we don't have common bits anymore. We
+ * also stop in front of a duplicates tree because it means we
+ * have to insert above.
+ */
+
+ if ((old_node_bit < 0) || /* we're above a duplicate tree, stop here */
+ (((new->key ^ old->key) >> old_node_bit) >= EB_NODE_BRANCHES)) {
+ /* The tree did not contain the key, so we insert <new> before the node
+ * <old>, and set ->bit to designate the lowest bit position in <new>
+ * which applies to ->branches.b[].
+ */
+ eb_troot_t *new_left, *new_rght;
+ eb_troot_t *new_leaf, *old_node;
+
+ new_left = eb_dotag(&new->node.branches, EB_LEFT);
+ new_rght = eb_dotag(&new->node.branches, EB_RGHT);
+ new_leaf = eb_dotag(&new->node.branches, EB_LEAF);
+ old_node = eb_dotag(&old->node.branches, EB_NODE);
+
+ new->node.node_p = old->node.node_p;
+
+ if ((s64)new->key < (s64)old->key) {
+ new->node.leaf_p = new_left;
+ old->node.node_p = new_rght;
+ new->node.branches.b[EB_LEFT] = new_leaf;
+ new->node.branches.b[EB_RGHT] = old_node;
+ }
+ else if ((s64)new->key > (s64)old->key) {
+ old->node.node_p = new_left;
+ new->node.leaf_p = new_rght;
+ new->node.branches.b[EB_LEFT] = old_node;
+ new->node.branches.b[EB_RGHT] = new_leaf;
+ }
+ else {
+ struct eb_node *ret;
+ ret = eb_insert_dup(&old->node, &new->node);
+ return container_of(ret, struct eb64_node, node);
+ }
+ break;
+ }
+
+ /* walk down */
+ root = &old->node.branches;
+
+ if (sizeof(long) >= 8) {
+ side = newkey >> old_node_bit;
+ } else {
+ /* note: provides the best code on low-register count archs
+ * such as i386.
+ */
+ side = newkey;
+ side >>= old_node_bit;
+ if (old_node_bit >= 32) {
+ side = newkey >> 32;
+ side >>= old_node_bit & 0x1F;
+ }
+ }
+ side &= EB_NODE_BRANCH_MASK;
+ troot = root->b[side];
+ }
+
+ /* Ok, now we are inserting <new> between <root> and <old>. <old>'s
+ * parent is already set to <new>, and the <root>'s branch is still in
+ * <side>. Update the root's leaf till we have it. Note that we can also
+ * find the side by checking the side of new->node.node_p.
+ */
+
+ /* We need the common higher bits between new->key and old->key.
+ * What differences are there between new->key and the node here ?
+ * NOTE that bit(new) is always < bit(root) because highest
+ * bit of new->key and old->key are identical here (otherwise they
+ * would sit on different branches).
+ */
+ // note that if EB_NODE_BITS > 1, we should check that it's still >= 0
+ new->node.bit = fls64(new->key ^ old->key) - EB_NODE_BITS;
+ root->b[side] = eb_dotag(&new->node.branches, EB_NODE);
+
+ return new;
+}
+
+#endif /* _EB64_TREE_H */
diff --git a/include/import/ebimtree.h b/include/import/ebimtree.h
new file mode 100644
index 0000000..0afbdd1
--- /dev/null
+++ b/include/import/ebimtree.h
@@ -0,0 +1,324 @@
+/*
+ * Elastic Binary Trees - macros for Indirect Multi-Byte data nodes.
+ * Version 6.0.6
+ * (C) 2002-2011 - Willy Tarreau <w@1wt.eu>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _EBIMTREE_H
+#define _EBIMTREE_H
+
+#include <string.h>
+#include "ebtree.h"
+#include "ebpttree.h"
+
+/* These functions and macros rely on Pointer nodes and use the <key> entry as
+ * a pointer to an indirect key. Most operations are performed using ebpt_*.
+ */
+
+/* The following functions are not inlined by default. They are declared
+ * in ebimtree.c, which simply relies on their inline version.
+ */
+struct ebpt_node *ebim_lookup(struct eb_root *root, const void *x, unsigned int len);
+struct ebpt_node *ebim_insert(struct eb_root *root, struct ebpt_node *new, unsigned int len);
+
+/* Find the first occurrence of a key of a least <len> bytes matching <x> in the
+ * tree <root>. The caller is responsible for ensuring that <len> will not exceed
+ * the common parts between the tree's keys and <x>. In case of multiple matches,
+ * the leftmost node is returned. This means that this function can be used to
+ * lookup string keys by prefix if all keys in the tree are zero-terminated. If
+ * no match is found, NULL is returned. Returns first node if <len> is zero.
+ */
+static forceinline struct ebpt_node *
+__ebim_lookup(struct eb_root *root, const void *x, unsigned int len)
+{
+ struct ebpt_node *node;
+ eb_troot_t *troot;
+ int pos, side;
+ int node_bit;
+
+ troot = root->b[EB_LEFT];
+ if (unlikely(troot == NULL))
+ goto ret_null;
+
+ if (unlikely(len == 0))
+ goto walk_down;
+
+ pos = 0;
+ while (1) {
+ if (eb_gettag(troot) == EB_LEAF) {
+ node = container_of(eb_untag(troot, EB_LEAF),
+ struct ebpt_node, node.branches);
+ if (eb_memcmp(node->key + pos, x, len) != 0)
+ goto ret_null;
+ else
+ goto ret_node;
+ }
+ node = container_of(eb_untag(troot, EB_NODE),
+ struct ebpt_node, node.branches);
+
+ node_bit = node->node.bit;
+ if (node_bit < 0) {
+ /* We have a dup tree now. Either it's for the same
+ * value, and we walk down left, or it's a different
+ * one and we don't have our key.
+ */
+ if (eb_memcmp(node->key + pos, x, len) != 0)
+ goto ret_null;
+ else
+ goto walk_left;
+ }
+
+ /* OK, normal data node, let's walk down. We check if all full
+ * bytes are equal, and we start from the last one we did not
+ * completely check. We stop as soon as we reach the last byte,
+ * because we must decide to go left/right or abort.
+ */
+ node_bit = ~node_bit + (pos << 3) + 8; // = (pos<<3) + (7 - node_bit)
+ if (node_bit < 0) {
+ /* This surprising construction gives better performance
+ * because gcc does not try to reorder the loop. Tested to
+ * be fine with 2.95 to 4.2.
+ */
+ while (1) {
+ if (*(unsigned char*)(node->key + pos++) ^ *(unsigned char*)(x++))
+ goto ret_null; /* more than one full byte is different */
+ if (--len == 0)
+ goto walk_left; /* return first node if all bytes matched */
+ node_bit += 8;
+ if (node_bit >= 0)
+ break;
+ }
+ }
+
+ /* here we know that only the last byte differs, so node_bit < 8.
+ * We have 2 possibilities :
+ * - more than the last bit differs => return NULL
+ * - walk down on side = (x[pos] >> node_bit) & 1
+ */
+ side = *(unsigned char *)x >> node_bit;
+ if (((*(unsigned char*)(node->key + pos) >> node_bit) ^ side) > 1)
+ goto ret_null;
+ side &= 1;
+ troot = node->node.branches.b[side];
+ }
+ walk_left:
+ troot = node->node.branches.b[EB_LEFT];
+ walk_down:
+ while (eb_gettag(troot) != EB_LEAF)
+ troot = (eb_untag(troot, EB_NODE))->b[EB_LEFT];
+ node = container_of(eb_untag(troot, EB_LEAF),
+ struct ebpt_node, node.branches);
+ ret_node:
+ return node;
+ ret_null:
+ return NULL;
+}
+
+/* Insert ebpt_node <new> into subtree starting at node root <root>.
+ * Only new->key needs be set with the key. The ebpt_node is returned.
+ * If root->b[EB_RGHT]==1, the tree may only contain unique keys. The
+ * len is specified in bytes.
+ */
+static forceinline struct ebpt_node *
+__ebim_insert(struct eb_root *root, struct ebpt_node *new, unsigned int len)
+{
+ struct ebpt_node *old;
+ unsigned int side;
+ eb_troot_t *troot;
+ eb_troot_t *root_right;
+ int diff;
+ int bit;
+ int old_node_bit;
+
+ side = EB_LEFT;
+ troot = root->b[EB_LEFT];
+ root_right = root->b[EB_RGHT];
+ if (unlikely(troot == NULL)) {
+ /* Tree is empty, insert the leaf part below the left branch */
+ root->b[EB_LEFT] = eb_dotag(&new->node.branches, EB_LEAF);
+ new->node.leaf_p = eb_dotag(root, EB_LEFT);
+ new->node.node_p = NULL; /* node part unused */
+ return new;
+ }
+
+ len <<= 3;
+
+ /* The tree descent is fairly easy :
+ * - first, check if we have reached a leaf node
+ * - second, check if we have gone too far
+ * - third, reiterate
+ * Everywhere, we use <new> for the node node we are inserting, <root>
+ * for the node we attach it to, and <old> for the node we are
+ * displacing below <new>. <troot> will always point to the future node
+ * (tagged with its type). <side> carries the side the node <new> is
+ * attached to below its parent, which is also where previous node
+ * was attached.
+ */
+
+ bit = 0;
+ while (1) {
+ if (unlikely(eb_gettag(troot) == EB_LEAF)) {
+ eb_troot_t *new_left, *new_rght;
+ eb_troot_t *new_leaf, *old_leaf;
+
+ old = container_of(eb_untag(troot, EB_LEAF),
+ struct ebpt_node, node.branches);
+
+ new_left = eb_dotag(&new->node.branches, EB_LEFT);
+ new_rght = eb_dotag(&new->node.branches, EB_RGHT);
+ new_leaf = eb_dotag(&new->node.branches, EB_LEAF);
+ old_leaf = eb_dotag(&old->node.branches, EB_LEAF);
+
+ new->node.node_p = old->node.leaf_p;
+
+ /* Right here, we have 3 possibilities :
+ * - the tree does not contain the key, and we have
+ * new->key < old->key. We insert new above old, on
+ * the left ;
+ *
+ * - the tree does not contain the key, and we have
+ * new->key > old->key. We insert new above old, on
+ * the right ;
+ *
+ * - the tree does contain the key, which implies it
+ * is alone. We add the new key next to it as a
+ * first duplicate.
+ *
+ * The last two cases can easily be partially merged.
+ */
+ bit = equal_bits(new->key, old->key, bit, len);
+
+ /* Note: we can compare more bits than the current node's because as
+ * long as they are identical, we know we descend along the correct
+ * side. However we don't want to start to compare past the end.
+ */
+ diff = 0;
+ if (((unsigned)bit >> 3) < len)
+ diff = cmp_bits(new->key, old->key, bit);
+
+ if (diff < 0) {
+ new->node.leaf_p = new_left;
+ old->node.leaf_p = new_rght;
+ new->node.branches.b[EB_LEFT] = new_leaf;
+ new->node.branches.b[EB_RGHT] = old_leaf;
+ } else {
+ /* we may refuse to duplicate this key if the tree is
+ * tagged as containing only unique keys.
+ */
+ if (diff == 0 && eb_gettag(root_right))
+ return old;
+
+ /* new->key >= old->key, new goes the right */
+ old->node.leaf_p = new_left;
+ new->node.leaf_p = new_rght;
+ new->node.branches.b[EB_LEFT] = old_leaf;
+ new->node.branches.b[EB_RGHT] = new_leaf;
+
+ if (diff == 0) {
+ new->node.bit = -1;
+ root->b[side] = eb_dotag(&new->node.branches, EB_NODE);
+ return new;
+ }
+ }
+ break;
+ }
+
+ /* OK we're walking down this link */
+ old = container_of(eb_untag(troot, EB_NODE),
+ struct ebpt_node, node.branches);
+ old_node_bit = old->node.bit;
+
+ /* Stop going down when we don't have common bits anymore. We
+ * also stop in front of a duplicates tree because it means we
+ * have to insert above. Note: we can compare more bits than
+ * the current node's because as long as they are identical, we
+ * know we descend along the correct side.
+ */
+ if (old_node_bit < 0) {
+ /* we're above a duplicate tree, we must compare till the end */
+ bit = equal_bits(new->key, old->key, bit, len);
+ goto dup_tree;
+ }
+ else if (bit < old_node_bit) {
+ bit = equal_bits(new->key, old->key, bit, old_node_bit);
+ }
+
+ if (bit < old_node_bit) { /* we don't have all bits in common */
+ /* The tree did not contain the key, so we insert <new> before the node
+ * <old>, and set ->bit to designate the lowest bit position in <new>
+ * which applies to ->branches.b[].
+ */
+ eb_troot_t *new_left, *new_rght;
+ eb_troot_t *new_leaf, *old_node;
+
+ dup_tree:
+ new_left = eb_dotag(&new->node.branches, EB_LEFT);
+ new_rght = eb_dotag(&new->node.branches, EB_RGHT);
+ new_leaf = eb_dotag(&new->node.branches, EB_LEAF);
+ old_node = eb_dotag(&old->node.branches, EB_NODE);
+
+ new->node.node_p = old->node.node_p;
+
+ /* Note: we can compare more bits than the current node's because as
+ * long as they are identical, we know we descend along the correct
+ * side. However we don't want to start to compare past the end.
+ */
+ diff = 0;
+ if (((unsigned)bit >> 3) < len)
+ diff = cmp_bits(new->key, old->key, bit);
+
+ if (diff < 0) {
+ new->node.leaf_p = new_left;
+ old->node.node_p = new_rght;
+ new->node.branches.b[EB_LEFT] = new_leaf;
+ new->node.branches.b[EB_RGHT] = old_node;
+ }
+ else if (diff > 0) {
+ old->node.node_p = new_left;
+ new->node.leaf_p = new_rght;
+ new->node.branches.b[EB_LEFT] = old_node;
+ new->node.branches.b[EB_RGHT] = new_leaf;
+ }
+ else {
+ struct eb_node *ret;
+ ret = eb_insert_dup(&old->node, &new->node);
+ return container_of(ret, struct ebpt_node, node);
+ }
+ break;
+ }
+
+ /* walk down */
+ root = &old->node.branches;
+ side = (((unsigned char *)new->key)[old_node_bit >> 3] >> (~old_node_bit & 7)) & 1;
+ troot = root->b[side];
+ }
+
+ /* Ok, now we are inserting <new> between <root> and <old>. <old>'s
+ * parent is already set to <new>, and the <root>'s branch is still in
+ * <side>. Update the root's leaf till we have it. Note that we can also
+ * find the side by checking the side of new->node.node_p.
+ */
+
+ /* We need the common higher bits between new->key and old->key.
+ * This number of bits is already in <bit>.
+ */
+ new->node.bit = bit;
+ root->b[side] = eb_dotag(&new->node.branches, EB_NODE);
+ return new;
+}
+
+#endif /* _EBIMTREE_H */
diff --git a/include/import/ebistree.h b/include/import/ebistree.h
new file mode 100644
index 0000000..a438fa1
--- /dev/null
+++ b/include/import/ebistree.h
@@ -0,0 +1,329 @@
+/*
+ * Elastic Binary Trees - macros to manipulate Indirect String data nodes.
+ * Version 6.0.6
+ * (C) 2002-2011 - Willy Tarreau <w@1wt.eu>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/* These functions and macros rely on Multi-Byte nodes */
+
+#ifndef _EBISTREE_H
+#define _EBISTREE_H
+
+#include <string.h>
+#include "ebtree.h"
+#include "ebpttree.h"
+#include "ebimtree.h"
+
+/* These functions and macros rely on Pointer nodes and use the <key> entry as
+ * a pointer to an indirect key. Most operations are performed using ebpt_*.
+ */
+
+/* The following functions are not inlined by default. They are declared
+ * in ebistree.c, which simply relies on their inline version.
+ */
+struct ebpt_node *ebis_lookup(struct eb_root *root, const char *x);
+struct ebpt_node *ebis_insert(struct eb_root *root, struct ebpt_node *new);
+
+/* Find the first occurrence of a length <len> string <x> in the tree <root>.
+ * It's the caller's responsibility to use this function only on trees which
+ * only contain zero-terminated strings, and that no null character is present
+ * in string <x> in the first <len> chars. If none can be found, return NULL.
+ */
+static forceinline struct ebpt_node *
+ebis_lookup_len(struct eb_root *root, const char *x, unsigned int len)
+{
+ struct ebpt_node *node;
+
+ node = ebim_lookup(root, x, len);
+ if (!node || ((const char *)node->key)[len] != 0)
+ return NULL;
+ return node;
+}
+
+/* Find the first occurrence of a zero-terminated string <x> in the tree <root>.
+ * It's the caller's responsibility to use this function only on trees which
+ * only contain zero-terminated strings. If none can be found, return NULL.
+ */
+static forceinline struct ebpt_node *__ebis_lookup(struct eb_root *root, const void *x)
+{
+ struct ebpt_node *node;
+ eb_troot_t *troot;
+ int bit;
+ int node_bit;
+
+ troot = root->b[EB_LEFT];
+ if (unlikely(troot == NULL))
+ return NULL;
+
+ bit = 0;
+ while (1) {
+ if ((eb_gettag(troot) == EB_LEAF)) {
+ node = container_of(eb_untag(troot, EB_LEAF),
+ struct ebpt_node, node.branches);
+ if (strcmp(node->key, x) == 0)
+ return node;
+ else
+ return NULL;
+ }
+ node = container_of(eb_untag(troot, EB_NODE),
+ struct ebpt_node, node.branches);
+ node_bit = node->node.bit;
+
+ if (node_bit < 0) {
+ /* We have a dup tree now. Either it's for the same
+ * value, and we walk down left, or it's a different
+ * one and we don't have our key.
+ */
+ if (strcmp(node->key, x) != 0)
+ return NULL;
+
+ troot = node->node.branches.b[EB_LEFT];
+ while (eb_gettag(troot) != EB_LEAF)
+ troot = (eb_untag(troot, EB_NODE))->b[EB_LEFT];
+ node = container_of(eb_untag(troot, EB_LEAF),
+ struct ebpt_node, node.branches);
+ return node;
+ }
+
+ /* OK, normal data node, let's walk down but don't compare data
+ * if we already reached the end of the key.
+ */
+ if (likely(bit >= 0)) {
+ bit = string_equal_bits(x, node->key, bit);
+ if (likely(bit < node_bit)) {
+ if (bit >= 0)
+ return NULL; /* no more common bits */
+
+ /* bit < 0 : we reached the end of the key. If we
+ * are in a tree with unique keys, we can return
+ * this node. Otherwise we have to walk it down
+ * and stop comparing bits.
+ */
+ if (eb_gettag(root->b[EB_RGHT]))
+ return node;
+ }
+ /* if the bit is larger than the node's, we must bound it
+ * because we might have compared too many bytes with an
+ * inappropriate leaf. For a test, build a tree from "0",
+ * "WW", "W", "S" inserted in this exact sequence and lookup
+ * "W" => "S" is returned without this assignment.
+ */
+ else
+ bit = node_bit;
+ }
+
+ troot = node->node.branches.b[(((unsigned char*)x)[node_bit >> 3] >>
+ (~node_bit & 7)) & 1];
+ }
+}
+
+/* Insert ebpt_node <new> into subtree starting at node root <root>. Only
+ * new->key needs be set with the zero-terminated string key. The ebpt_node is
+ * returned. If root->b[EB_RGHT]==1, the tree may only contain unique keys. The
+ * caller is responsible for properly terminating the key with a zero.
+ */
+static forceinline struct ebpt_node *
+__ebis_insert(struct eb_root *root, struct ebpt_node *new)
+{
+ struct ebpt_node *old;
+ unsigned int side;
+ eb_troot_t *troot;
+ eb_troot_t *root_right;
+ int diff;
+ int bit;
+ int old_node_bit;
+
+ side = EB_LEFT;
+ troot = root->b[EB_LEFT];
+ root_right = root->b[EB_RGHT];
+ if (unlikely(troot == NULL)) {
+ /* Tree is empty, insert the leaf part below the left branch */
+ root->b[EB_LEFT] = eb_dotag(&new->node.branches, EB_LEAF);
+ new->node.leaf_p = eb_dotag(root, EB_LEFT);
+ new->node.node_p = NULL; /* node part unused */
+ return new;
+ }
+
+ /* The tree descent is fairly easy :
+ * - first, check if we have reached a leaf node
+ * - second, check if we have gone too far
+ * - third, reiterate
+ * Everywhere, we use <new> for the node node we are inserting, <root>
+ * for the node we attach it to, and <old> for the node we are
+ * displacing below <new>. <troot> will always point to the future node
+ * (tagged with its type). <side> carries the side the node <new> is
+ * attached to below its parent, which is also where previous node
+ * was attached.
+ */
+
+ bit = 0;
+ while (1) {
+ if (unlikely(eb_gettag(troot) == EB_LEAF)) {
+ eb_troot_t *new_left, *new_rght;
+ eb_troot_t *new_leaf, *old_leaf;
+
+ old = container_of(eb_untag(troot, EB_LEAF),
+ struct ebpt_node, node.branches);
+
+ new_left = eb_dotag(&new->node.branches, EB_LEFT);
+ new_rght = eb_dotag(&new->node.branches, EB_RGHT);
+ new_leaf = eb_dotag(&new->node.branches, EB_LEAF);
+ old_leaf = eb_dotag(&old->node.branches, EB_LEAF);
+
+ new->node.node_p = old->node.leaf_p;
+
+ /* Right here, we have 3 possibilities :
+ * - the tree does not contain the key, and we have
+ * new->key < old->key. We insert new above old, on
+ * the left ;
+ *
+ * - the tree does not contain the key, and we have
+ * new->key > old->key. We insert new above old, on
+ * the right ;
+ *
+ * - the tree does contain the key, which implies it
+ * is alone. We add the new key next to it as a
+ * first duplicate.
+ *
+ * The last two cases can easily be partially merged.
+ */
+ if (bit >= 0)
+ bit = string_equal_bits(new->key, old->key, bit);
+
+ if (bit < 0) {
+ /* key was already there */
+
+ /* we may refuse to duplicate this key if the tree is
+ * tagged as containing only unique keys.
+ */
+ if (eb_gettag(root_right))
+ return old;
+
+ /* new arbitrarily goes to the right and tops the dup tree */
+ old->node.leaf_p = new_left;
+ new->node.leaf_p = new_rght;
+ new->node.branches.b[EB_LEFT] = old_leaf;
+ new->node.branches.b[EB_RGHT] = new_leaf;
+ new->node.bit = -1;
+ root->b[side] = eb_dotag(&new->node.branches, EB_NODE);
+ return new;
+ }
+
+ diff = cmp_bits(new->key, old->key, bit);
+ if (diff < 0) {
+ /* new->key < old->key, new takes the left */
+ new->node.leaf_p = new_left;
+ old->node.leaf_p = new_rght;
+ new->node.branches.b[EB_LEFT] = new_leaf;
+ new->node.branches.b[EB_RGHT] = old_leaf;
+ } else {
+ /* new->key > old->key, new takes the right */
+ old->node.leaf_p = new_left;
+ new->node.leaf_p = new_rght;
+ new->node.branches.b[EB_LEFT] = old_leaf;
+ new->node.branches.b[EB_RGHT] = new_leaf;
+ }
+ break;
+ }
+
+ /* OK we're walking down this link */
+ old = container_of(eb_untag(troot, EB_NODE),
+ struct ebpt_node, node.branches);
+ old_node_bit = old->node.bit;
+
+ /* Stop going down when we don't have common bits anymore. We
+ * also stop in front of a duplicates tree because it means we
+ * have to insert above. Note: we can compare more bits than
+ * the current node's because as long as they are identical, we
+ * know we descend along the correct side.
+ */
+ if (bit >= 0 && (bit < old_node_bit || old_node_bit < 0))
+ bit = string_equal_bits(new->key, old->key, bit);
+
+ if (unlikely(bit < 0)) {
+ /* Perfect match, we must only stop on head of dup tree
+ * or walk down to a leaf.
+ */
+ if (old_node_bit < 0) {
+ /* We know here that string_equal_bits matched all
+ * bits and that we're on top of a dup tree, then
+ * we can perform the dup insertion and return.
+ */
+ struct eb_node *ret;
+ ret = eb_insert_dup(&old->node, &new->node);
+ return container_of(ret, struct ebpt_node, node);
+ }
+ /* OK so let's walk down */
+ }
+ else if (bit < old_node_bit || old_node_bit < 0) {
+ /* The tree did not contain the key, or we stopped on top of a dup
+ * tree, possibly containing the key. In the former case, we insert
+ * <new> before the node <old>, and set ->bit to designate the lowest
+ * bit position in <new> which applies to ->branches.b[]. In the later
+ * case, we add the key to the existing dup tree. Note that we cannot
+ * enter here if we match an intermediate node's key that is not the
+ * head of a dup tree.
+ */
+ eb_troot_t *new_left, *new_rght;
+ eb_troot_t *new_leaf, *old_node;
+
+ new_left = eb_dotag(&new->node.branches, EB_LEFT);
+ new_rght = eb_dotag(&new->node.branches, EB_RGHT);
+ new_leaf = eb_dotag(&new->node.branches, EB_LEAF);
+ old_node = eb_dotag(&old->node.branches, EB_NODE);
+
+ new->node.node_p = old->node.node_p;
+
+ /* we can never match all bits here */
+ diff = cmp_bits(new->key, old->key, bit);
+ if (diff < 0) {
+ new->node.leaf_p = new_left;
+ old->node.node_p = new_rght;
+ new->node.branches.b[EB_LEFT] = new_leaf;
+ new->node.branches.b[EB_RGHT] = old_node;
+ }
+ else {
+ old->node.node_p = new_left;
+ new->node.leaf_p = new_rght;
+ new->node.branches.b[EB_LEFT] = old_node;
+ new->node.branches.b[EB_RGHT] = new_leaf;
+ }
+ break;
+ }
+
+ /* walk down */
+ root = &old->node.branches;
+ side = (((unsigned char *)new->key)[old_node_bit >> 3] >> (~old_node_bit & 7)) & 1;
+ troot = root->b[side];
+ }
+
+ /* Ok, now we are inserting <new> between <root> and <old>. <old>'s
+ * parent is already set to <new>, and the <root>'s branch is still in
+ * <side>. Update the root's leaf till we have it. Note that we can also
+ * find the side by checking the side of new->node.node_p.
+ */
+
+ /* We need the common higher bits between new->key and old->key.
+ * This number of bits is already in <bit>.
+ * NOTE: we can't get here whit bit < 0 since we found a dup !
+ */
+ new->node.bit = bit;
+ root->b[side] = eb_dotag(&new->node.branches, EB_NODE);
+ return new;
+}
+
+#endif /* _EBISTREE_H */
diff --git a/include/import/ebmbtree.h b/include/import/ebmbtree.h
new file mode 100644
index 0000000..365042e
--- /dev/null
+++ b/include/import/ebmbtree.h
@@ -0,0 +1,850 @@
+/*
+ * Elastic Binary Trees - macros and structures for Multi-Byte data nodes.
+ * Version 6.0.6
+ * (C) 2002-2011 - Willy Tarreau <w@1wt.eu>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _EBMBTREE_H
+#define _EBMBTREE_H
+
+#include <string.h>
+#include "ebtree.h"
+
+/* Return the structure of type <type> whose member <member> points to <ptr> */
+#define ebmb_entry(ptr, type, member) container_of(ptr, type, member)
+
+/*
+ * Exported functions and macros.
+ * Many of them are always inlined because they are extremely small, and
+ * are generally called at most once or twice in a program.
+ */
+
+/* Return leftmost node in the tree, or NULL if none */
+static forceinline struct ebmb_node *ebmb_first(struct eb_root *root)
+{
+ return ebmb_entry(eb_first(root), struct ebmb_node, node);
+}
+
+/* Return rightmost node in the tree, or NULL if none */
+static forceinline struct ebmb_node *ebmb_last(struct eb_root *root)
+{
+ return ebmb_entry(eb_last(root), struct ebmb_node, node);
+}
+
+/* Return next node in the tree, or NULL if none */
+static forceinline struct ebmb_node *ebmb_next(struct ebmb_node *ebmb)
+{
+ return ebmb_entry(eb_next(&ebmb->node), struct ebmb_node, node);
+}
+
+/* Return previous node in the tree, or NULL if none */
+static forceinline struct ebmb_node *ebmb_prev(struct ebmb_node *ebmb)
+{
+ return ebmb_entry(eb_prev(&ebmb->node), struct ebmb_node, node);
+}
+
+/* Return next leaf node within a duplicate sub-tree, or NULL if none. */
+static inline struct ebmb_node *ebmb_next_dup(struct ebmb_node *ebmb)
+{
+ return ebmb_entry(eb_next_dup(&ebmb->node), struct ebmb_node, node);
+}
+
+/* Return previous leaf node within a duplicate sub-tree, or NULL if none. */
+static inline struct ebmb_node *ebmb_prev_dup(struct ebmb_node *ebmb)
+{
+ return ebmb_entry(eb_prev_dup(&ebmb->node), struct ebmb_node, node);
+}
+
+/* Return next node in the tree, skipping duplicates, or NULL if none */
+static forceinline struct ebmb_node *ebmb_next_unique(struct ebmb_node *ebmb)
+{
+ return ebmb_entry(eb_next_unique(&ebmb->node), struct ebmb_node, node);
+}
+
+/* Return previous node in the tree, skipping duplicates, or NULL if none */
+static forceinline struct ebmb_node *ebmb_prev_unique(struct ebmb_node *ebmb)
+{
+ return ebmb_entry(eb_prev_unique(&ebmb->node), struct ebmb_node, node);
+}
+
+/* Delete node from the tree if it was linked in. Mark the node unused. Note
+ * that this function relies on a non-inlined generic function: eb_delete.
+ */
+static forceinline void ebmb_delete(struct ebmb_node *ebmb)
+{
+ eb_delete(&ebmb->node);
+}
+
+/* The following functions are not inlined by default. They are declared
+ * in ebmbtree.c, which simply relies on their inline version.
+ */
+struct ebmb_node *ebmb_lookup(struct eb_root *root, const void *x, unsigned int len);
+struct ebmb_node *ebmb_insert(struct eb_root *root, struct ebmb_node *new, unsigned int len);
+struct ebmb_node *ebmb_lookup_longest(struct eb_root *root, const void *x);
+struct ebmb_node *ebmb_lookup_prefix(struct eb_root *root, const void *x, unsigned int pfx);
+struct ebmb_node *ebmb_insert_prefix(struct eb_root *root, struct ebmb_node *new, unsigned int len);
+
+/* start from a valid leaf and find the next matching prefix that's either a
+ * duplicate, or immediately shorter than the node's current one and still
+ * matches it. The purpose is to permit a caller that is not satisfied with a
+ * result provided by ebmb_lookup_longest() to evaluate the next matching
+ * entry. Given that shorter keys are necessarily attached to nodes located
+ * above the current one, it's sufficient to restart from the current leaf and
+ * go up until we find a shorter prefix, or a non-matching one.
+ */
+static inline struct ebmb_node *ebmb_lookup_shorter(struct ebmb_node *start)
+{
+ eb_troot_t *t = start->node.leaf_p;
+ struct ebmb_node *node;
+
+ /* first, check for duplicates */
+ node = ebmb_next_dup(start);
+ if (node)
+ return node;
+
+ while (1) {
+ if (eb_gettag(t) == EB_LEFT) {
+ /* Walking up from left branch. We must ensure that we never
+ * walk beyond root.
+ */
+ if (unlikely(eb_clrtag((eb_untag(t, EB_LEFT))->b[EB_RGHT]) == NULL))
+ return NULL;
+ node = container_of(eb_root_to_node(eb_untag(t, EB_LEFT)), struct ebmb_node, node);
+ } else {
+ /* Walking up from right branch, so we cannot be below
+ * root. However, if we end up on a node with an even
+ * and positive bit, this is a cover node, which mandates
+ * that the left branch only contains cover values, so we
+ * must descend it.
+ */
+ node = container_of(eb_root_to_node(eb_untag(t, EB_RGHT)), struct ebmb_node, node);
+ if (node->node.bit > 0 && !(node->node.bit & 1))
+ return ebmb_entry(eb_walk_down(t, EB_LEFT), struct ebmb_node, node);
+ }
+
+ /* Note that <t> cannot be NULL at this stage */
+ t = node->node.node_p;
+
+ /* this is a node attached to a deeper (and possibly different)
+ * leaf, not interesting for us.
+ */
+ if (node->node.pfx >= start->node.pfx)
+ continue;
+
+ if (check_bits(start->key, node->key, 0, node->node.pfx) == 0)
+ break;
+ }
+ return node;
+}
+
+/* The following functions are less likely to be used directly, because their
+ * code is larger. The non-inlined version is preferred.
+ */
+
+/* Delete node from the tree if it was linked in. Mark the node unused. */
+static forceinline void __ebmb_delete(struct ebmb_node *ebmb)
+{
+ __eb_delete(&ebmb->node);
+}
+
+/* Find the first occurrence of a key of a least <len> bytes matching <x> in the
+ * tree <root>. The caller is responsible for ensuring that <len> will not exceed
+ * the common parts between the tree's keys and <x>. In case of multiple matches,
+ * the leftmost node is returned. This means that this function can be used to
+ * lookup string keys by prefix if all keys in the tree are zero-terminated. If
+ * no match is found, NULL is returned. Returns first node if <len> is zero.
+ */
+static forceinline struct ebmb_node *__ebmb_lookup(struct eb_root *root, const void *x, unsigned int len)
+{
+ struct ebmb_node *node;
+ eb_troot_t *troot;
+ int pos, side;
+ int node_bit;
+
+ troot = root->b[EB_LEFT];
+ if (unlikely(troot == NULL))
+ goto ret_null;
+
+ if (unlikely(len == 0))
+ goto walk_down;
+
+ pos = 0;
+ while (1) {
+ if (eb_gettag(troot) == EB_LEAF) {
+ node = container_of(eb_untag(troot, EB_LEAF),
+ struct ebmb_node, node.branches);
+ if (eb_memcmp(node->key + pos, x, len) != 0)
+ goto ret_null;
+ else
+ goto ret_node;
+ }
+ node = container_of(eb_untag(troot, EB_NODE),
+ struct ebmb_node, node.branches);
+
+ node_bit = node->node.bit;
+ if (node_bit < 0) {
+ /* We have a dup tree now. Either it's for the same
+ * value, and we walk down left, or it's a different
+ * one and we don't have our key.
+ */
+ if (eb_memcmp(node->key + pos, x, len) != 0)
+ goto ret_null;
+ else
+ goto walk_left;
+ }
+
+ /* OK, normal data node, let's walk down. We check if all full
+ * bytes are equal, and we start from the last one we did not
+ * completely check. We stop as soon as we reach the last byte,
+ * because we must decide to go left/right or abort.
+ */
+ node_bit = ~node_bit + (pos << 3) + 8; // = (pos<<3) + (7 - node_bit)
+ if (node_bit < 0) {
+ /* This surprising construction gives better performance
+ * because gcc does not try to reorder the loop. Tested to
+ * be fine with 2.95 to 4.2.
+ */
+ while (1) {
+ if (node->key[pos++] ^ *(unsigned char*)(x++))
+ goto ret_null; /* more than one full byte is different */
+ if (--len == 0)
+ goto walk_left; /* return first node if all bytes matched */
+ node_bit += 8;
+ if (node_bit >= 0)
+ break;
+ }
+ }
+
+ /* here we know that only the last byte differs, so node_bit < 8.
+ * We have 2 possibilities :
+ * - more than the last bit differs => return NULL
+ * - walk down on side = (x[pos] >> node_bit) & 1
+ */
+ side = *(unsigned char *)x >> node_bit;
+ if (((node->key[pos] >> node_bit) ^ side) > 1)
+ goto ret_null;
+ side &= 1;
+ troot = node->node.branches.b[side];
+ }
+ walk_left:
+ troot = node->node.branches.b[EB_LEFT];
+ walk_down:
+ while (eb_gettag(troot) != EB_LEAF)
+ troot = (eb_untag(troot, EB_NODE))->b[EB_LEFT];
+ node = container_of(eb_untag(troot, EB_LEAF),
+ struct ebmb_node, node.branches);
+ ret_node:
+ return node;
+ ret_null:
+ return NULL;
+}
+
+/* Insert ebmb_node <new> into subtree starting at node root <root>.
+ * Only new->key needs be set with the key. The ebmb_node is returned.
+ * If root->b[EB_RGHT]==1, the tree may only contain unique keys. The
+ * len is specified in bytes. It is absolutely mandatory that this length
+ * is the same for all keys in the tree. This function cannot be used to
+ * insert strings.
+ */
+static forceinline struct ebmb_node *
+__ebmb_insert(struct eb_root *root, struct ebmb_node *new, unsigned int len)
+{
+ struct ebmb_node *old;
+ unsigned int side;
+ eb_troot_t *troot, **up_ptr;
+ eb_troot_t *root_right;
+ int diff;
+ int bit;
+ eb_troot_t *new_left, *new_rght;
+ eb_troot_t *new_leaf;
+ int old_node_bit;
+
+ side = EB_LEFT;
+ troot = root->b[EB_LEFT];
+ root_right = root->b[EB_RGHT];
+ if (unlikely(troot == NULL)) {
+ /* Tree is empty, insert the leaf part below the left branch */
+ root->b[EB_LEFT] = eb_dotag(&new->node.branches, EB_LEAF);
+ new->node.leaf_p = eb_dotag(root, EB_LEFT);
+ new->node.node_p = NULL; /* node part unused */
+ return new;
+ }
+
+ /* The tree descent is fairly easy :
+ * - first, check if we have reached a leaf node
+ * - second, check if we have gone too far
+ * - third, reiterate
+ * Everywhere, we use <new> for the node node we are inserting, <root>
+ * for the node we attach it to, and <old> for the node we are
+ * displacing below <new>. <troot> will always point to the future node
+ * (tagged with its type). <side> carries the side the node <new> is
+ * attached to below its parent, which is also where previous node
+ * was attached.
+ */
+
+ bit = 0;
+ while (1) {
+ if (unlikely(eb_gettag(troot) == EB_LEAF)) {
+ /* insert above a leaf */
+ old = container_of(eb_untag(troot, EB_LEAF),
+ struct ebmb_node, node.branches);
+ new->node.node_p = old->node.leaf_p;
+ up_ptr = &old->node.leaf_p;
+ goto check_bit_and_break;
+ }
+
+ /* OK we're walking down this link */
+ old = container_of(eb_untag(troot, EB_NODE),
+ struct ebmb_node, node.branches);
+ old_node_bit = old->node.bit;
+
+ if (unlikely(old->node.bit < 0)) {
+ /* We're above a duplicate tree, so we must compare the whole value */
+ new->node.node_p = old->node.node_p;
+ up_ptr = &old->node.node_p;
+ check_bit_and_break:
+ bit = equal_bits(new->key, old->key, bit, len << 3);
+ break;
+ }
+
+ /* Stop going down when we don't have common bits anymore. We
+ * also stop in front of a duplicates tree because it means we
+ * have to insert above. Note: we can compare more bits than
+ * the current node's because as long as they are identical, we
+ * know we descend along the correct side.
+ */
+
+ bit = equal_bits(new->key, old->key, bit, old_node_bit);
+ if (unlikely(bit < old_node_bit)) {
+ /* The tree did not contain the key, so we insert <new> before the
+ * node <old>, and set ->bit to designate the lowest bit position in
+ * <new> which applies to ->branches.b[].
+ */
+ new->node.node_p = old->node.node_p;
+ up_ptr = &old->node.node_p;
+ break;
+ }
+ /* we don't want to skip bits for further comparisons, so we must limit <bit>.
+ * However, since we're going down around <old_node_bit>, we know it will be
+ * properly matched, so we can skip this bit.
+ */
+ bit = old_node_bit + 1;
+
+ /* walk down */
+ root = &old->node.branches;
+ side = old_node_bit & 7;
+ side ^= 7;
+ side = (new->key[old_node_bit >> 3] >> side) & 1;
+ troot = root->b[side];
+ }
+
+ new_left = eb_dotag(&new->node.branches, EB_LEFT);
+ new_rght = eb_dotag(&new->node.branches, EB_RGHT);
+ new_leaf = eb_dotag(&new->node.branches, EB_LEAF);
+
+ new->node.bit = bit;
+
+ /* Note: we can compare more bits than the current node's because as
+ * long as they are identical, we know we descend along the correct
+ * side. However we don't want to start to compare past the end.
+ */
+ diff = 0;
+ if (((unsigned)bit >> 3) < len)
+ diff = cmp_bits(new->key, old->key, bit);
+
+ if (diff == 0) {
+ new->node.bit = -1; /* mark as new dup tree, just in case */
+
+ if (likely(eb_gettag(root_right))) {
+ /* we refuse to duplicate this key if the tree is
+ * tagged as containing only unique keys.
+ */
+ return old;
+ }
+
+ if (eb_gettag(troot) != EB_LEAF) {
+ /* there was already a dup tree below */
+ struct eb_node *ret;
+ ret = eb_insert_dup(&old->node, &new->node);
+ return container_of(ret, struct ebmb_node, node);
+ }
+ /* otherwise fall through */
+ }
+
+ if (diff >= 0) {
+ new->node.branches.b[EB_LEFT] = troot;
+ new->node.branches.b[EB_RGHT] = new_leaf;
+ new->node.leaf_p = new_rght;
+ *up_ptr = new_left;
+ }
+ else {
+ new->node.branches.b[EB_LEFT] = new_leaf;
+ new->node.branches.b[EB_RGHT] = troot;
+ new->node.leaf_p = new_left;
+ *up_ptr = new_rght;
+ }
+
+ /* Ok, now we are inserting <new> between <root> and <old>. <old>'s
+ * parent is already set to <new>, and the <root>'s branch is still in
+ * <side>. Update the root's leaf till we have it. Note that we can also
+ * find the side by checking the side of new->node.node_p.
+ */
+
+ root->b[side] = eb_dotag(&new->node.branches, EB_NODE);
+ return new;
+}
+
+
+/* Find the first occurrence of the longest prefix matching a key <x> in the
+ * tree <root>. It's the caller's responsibility to ensure that key <x> is at
+ * least as long as the keys in the tree. Note that this can be ensured by
+ * having a byte at the end of <x> which cannot be part of any prefix, typically
+ * the trailing zero for a string. If none can be found, return NULL.
+ */
+static forceinline struct ebmb_node *__ebmb_lookup_longest(struct eb_root *root, const void *x)
+{
+ struct ebmb_node *node;
+ eb_troot_t *troot, *cover;
+ int pos, side;
+ int node_bit;
+
+ troot = root->b[EB_LEFT];
+ if (unlikely(troot == NULL))
+ return NULL;
+
+ cover = NULL;
+ pos = 0;
+ while (1) {
+ if ((eb_gettag(troot) == EB_LEAF)) {
+ node = container_of(eb_untag(troot, EB_LEAF),
+ struct ebmb_node, node.branches);
+ if (check_bits(x - pos, node->key, pos, node->node.pfx))
+ goto not_found;
+
+ return node;
+ }
+ node = container_of(eb_untag(troot, EB_NODE),
+ struct ebmb_node, node.branches);
+
+ node_bit = node->node.bit;
+ if (node_bit < 0) {
+ /* We have a dup tree now. Either it's for the same
+ * value, and we walk down left, or it's a different
+ * one and we don't have our key.
+ */
+ if (check_bits(x - pos, node->key, pos, node->node.pfx))
+ goto not_found;
+
+ troot = node->node.branches.b[EB_LEFT];
+ while (eb_gettag(troot) != EB_LEAF)
+ troot = (eb_untag(troot, EB_NODE))->b[EB_LEFT];
+ node = container_of(eb_untag(troot, EB_LEAF),
+ struct ebmb_node, node.branches);
+ return node;
+ }
+
+ node_bit >>= 1; /* strip cover bit */
+ node_bit = ~node_bit + (pos << 3) + 8; // = (pos<<3) + (7 - node_bit)
+ if (node_bit < 0) {
+ /* This uncommon construction gives better performance
+ * because gcc does not try to reorder the loop. Tested to
+ * be fine with 2.95 to 4.2.
+ */
+ while (1) {
+ x++; pos++;
+ if (node->key[pos-1] ^ *(unsigned char*)(x-1))
+ goto not_found; /* more than one full byte is different */
+ node_bit += 8;
+ if (node_bit >= 0)
+ break;
+ }
+ }
+
+ /* here we know that only the last byte differs, so 0 <= node_bit <= 7.
+ * We have 2 possibilities :
+ * - more than the last bit differs => data does not match
+ * - walk down on side = (x[pos] >> node_bit) & 1
+ */
+ side = *(unsigned char *)x >> node_bit;
+ if (((node->key[pos] >> node_bit) ^ side) > 1)
+ goto not_found;
+
+ if (!(node->node.bit & 1)) {
+ /* This is a cover node, let's keep a reference to it
+ * for later. The covering subtree is on the left, and
+ * the covered subtree is on the right, so we have to
+ * walk down right.
+ */
+ cover = node->node.branches.b[EB_LEFT];
+ troot = node->node.branches.b[EB_RGHT];
+ continue;
+ }
+ side &= 1;
+ troot = node->node.branches.b[side];
+ }
+
+ not_found:
+ /* Walk down last cover tree if it exists. It does not matter if cover is NULL */
+ return ebmb_entry(eb_walk_down(cover, EB_LEFT), struct ebmb_node, node);
+}
+
+
+/* Find the first occurrence of a prefix matching a key <x> of <pfx> BITS in the
+ * tree <root>. It's the caller's responsibility to ensure that key <x> is at
+ * least as long as the keys in the tree. Note that this can be ensured by
+ * having a byte at the end of <x> which cannot be part of any prefix, typically
+ * the trailing zero for a string. If none can be found, return NULL.
+ */
+static forceinline struct ebmb_node *__ebmb_lookup_prefix(struct eb_root *root, const void *x, unsigned int pfx)
+{
+ struct ebmb_node *node;
+ eb_troot_t *troot;
+ int pos, side;
+ int node_bit;
+
+ troot = root->b[EB_LEFT];
+ if (unlikely(troot == NULL))
+ return NULL;
+
+ pos = 0;
+ while (1) {
+ if ((eb_gettag(troot) == EB_LEAF)) {
+ node = container_of(eb_untag(troot, EB_LEAF),
+ struct ebmb_node, node.branches);
+ if (node->node.pfx != pfx)
+ return NULL;
+ if (check_bits(x - pos, node->key, pos, node->node.pfx))
+ return NULL;
+ return node;
+ }
+ node = container_of(eb_untag(troot, EB_NODE),
+ struct ebmb_node, node.branches);
+
+ node_bit = node->node.bit;
+ if (node_bit < 0) {
+ /* We have a dup tree now. Either it's for the same
+ * value, and we walk down left, or it's a different
+ * one and we don't have our key.
+ */
+ if (node->node.pfx != pfx)
+ return NULL;
+ if (check_bits(x - pos, node->key, pos, node->node.pfx))
+ return NULL;
+
+ troot = node->node.branches.b[EB_LEFT];
+ while (eb_gettag(troot) != EB_LEAF)
+ troot = (eb_untag(troot, EB_NODE))->b[EB_LEFT];
+ node = container_of(eb_untag(troot, EB_LEAF),
+ struct ebmb_node, node.branches);
+ return node;
+ }
+
+ node_bit >>= 1; /* strip cover bit */
+ node_bit = ~node_bit + (pos << 3) + 8; // = (pos<<3) + (7 - node_bit)
+ if (node_bit < 0) {
+ /* This uncommon construction gives better performance
+ * because gcc does not try to reorder the loop. Tested to
+ * be fine with 2.95 to 4.2.
+ */
+ while (1) {
+ x++; pos++;
+ if (node->key[pos-1] ^ *(unsigned char*)(x-1))
+ return NULL; /* more than one full byte is different */
+ node_bit += 8;
+ if (node_bit >= 0)
+ break;
+ }
+ }
+
+ /* here we know that only the last byte differs, so 0 <= node_bit <= 7.
+ * We have 2 possibilities :
+ * - more than the last bit differs => data does not match
+ * - walk down on side = (x[pos] >> node_bit) & 1
+ */
+ side = *(unsigned char *)x >> node_bit;
+ if (((node->key[pos] >> node_bit) ^ side) > 1)
+ return NULL;
+
+ if (!(node->node.bit & 1)) {
+ /* This is a cover node, it may be the entry we're
+ * looking for. We already know that it matches all the
+ * bits, let's compare prefixes and descend the cover
+ * subtree if they match.
+ */
+ if ((unsigned short)node->node.bit >> 1 == pfx)
+ troot = node->node.branches.b[EB_LEFT];
+ else
+ troot = node->node.branches.b[EB_RGHT];
+ continue;
+ }
+ side &= 1;
+ troot = node->node.branches.b[side];
+ }
+}
+
+
+/* Insert ebmb_node <new> into a prefix subtree starting at node root <root>.
+ * Only new->key and new->pfx need be set with the key and its prefix length.
+ * Note that bits between <pfx> and <len> are theoretically ignored and should be
+ * zero, as it is not certain yet that they will always be ignored everywhere
+ * (eg in bit compare functions).
+ * The ebmb_node is returned.
+ * If root->b[EB_RGHT]==1, the tree may only contain unique keys. The
+ * len is specified in bytes.
+ */
+static forceinline struct ebmb_node *
+__ebmb_insert_prefix(struct eb_root *root, struct ebmb_node *new, unsigned int len)
+{
+ struct ebmb_node *old;
+ unsigned int side;
+ eb_troot_t *troot, **up_ptr;
+ eb_troot_t *root_right;
+ int diff;
+ int bit;
+ eb_troot_t *new_left, *new_rght;
+ eb_troot_t *new_leaf;
+ int old_node_bit;
+ unsigned int npfx = new->node.pfx;
+ unsigned int npfx1 = npfx << 1;
+ const unsigned char *nkey = new->key;
+
+ side = EB_LEFT;
+ troot = root->b[EB_LEFT];
+ root_right = root->b[EB_RGHT];
+ if (unlikely(troot == NULL)) {
+ /* Tree is empty, insert the leaf part below the left branch */
+ root->b[EB_LEFT] = eb_dotag(&new->node.branches, EB_LEAF);
+ new->node.leaf_p = eb_dotag(root, EB_LEFT);
+ new->node.node_p = NULL; /* node part unused */
+ return new;
+ }
+
+ len <<= 3;
+ if (len > npfx)
+ len = npfx;
+
+ /* The tree descent is fairly easy :
+ * - first, check if we have reached a leaf node
+ * - second, check if we have gone too far
+ * - third, reiterate
+ * Everywhere, we use <new> for the node node we are inserting, <root>
+ * for the node we attach it to, and <old> for the node we are
+ * displacing below <new>. <troot> will always point to the future node
+ * (tagged with its type). <side> carries the side the node <new> is
+ * attached to below its parent, which is also where previous node
+ * was attached.
+ */
+
+ bit = 0;
+ while (1) {
+ if (unlikely(eb_gettag(troot) == EB_LEAF)) {
+ /* Insert above a leaf. Note that this leaf could very
+ * well be part of a cover node.
+ */
+ old = container_of(eb_untag(troot, EB_LEAF),
+ struct ebmb_node, node.branches);
+ new->node.node_p = old->node.leaf_p;
+ up_ptr = &old->node.leaf_p;
+ goto check_bit_and_break;
+ }
+
+ /* OK we're walking down this link */
+ old = container_of(eb_untag(troot, EB_NODE),
+ struct ebmb_node, node.branches);
+ old_node_bit = old->node.bit;
+ /* Note that old_node_bit can be :
+ * < 0 : dup tree
+ * = 2N : cover node for N bits
+ * = 2N+1 : normal node at N bits
+ */
+
+ if (unlikely(old_node_bit < 0)) {
+ /* We're above a duplicate tree, so we must compare the whole value */
+ new->node.node_p = old->node.node_p;
+ up_ptr = &old->node.node_p;
+ check_bit_and_break:
+ /* No need to compare everything if the leaves are shorter than the new one. */
+ if (len > old->node.pfx)
+ len = old->node.pfx;
+ bit = equal_bits(nkey, old->key, bit, len);
+ break;
+ }
+
+ /* WARNING: for the two blocks below, <bit> is counted in half-bits */
+
+ bit = equal_bits(nkey, old->key, bit, old_node_bit >> 1);
+ bit = (bit << 1) + 1; // assume comparisons with normal nodes
+
+ /* we must always check that our prefix is larger than the nodes
+ * we visit, otherwise we have to stop going down. The following
+ * test is able to stop before both normal and cover nodes.
+ */
+ if (bit >= npfx1 && npfx1 < old_node_bit) {
+ /* insert cover node here on the left */
+ new->node.node_p = old->node.node_p;
+ up_ptr = &old->node.node_p;
+ new->node.bit = npfx1;
+ diff = -1;
+ goto insert_above;
+ }
+
+ if (unlikely(bit < old_node_bit)) {
+ /* The tree did not contain the key, so we insert <new> before the
+ * node <old>, and set ->bit to designate the lowest bit position in
+ * <new> which applies to ->branches.b[]. We know that the bit is not
+ * greater than the prefix length thanks to the test above.
+ */
+ new->node.node_p = old->node.node_p;
+ up_ptr = &old->node.node_p;
+ new->node.bit = bit;
+ diff = cmp_bits(nkey, old->key, bit >> 1);
+ goto insert_above;
+ }
+
+ if (!(old_node_bit & 1)) {
+ /* if we encounter a cover node with our exact prefix length, it's
+ * necessarily the same value, so we insert there as a duplicate on
+ * the left. For that, we go down on the left and the leaf detection
+ * code will finish the job.
+ */
+ if (npfx1 == old_node_bit) {
+ root = &old->node.branches;
+ side = EB_LEFT;
+ troot = root->b[side];
+ continue;
+ }
+
+ /* cover nodes are always walked through on the right */
+ side = EB_RGHT;
+ bit = old_node_bit >> 1; /* recheck that bit */
+ root = &old->node.branches;
+ troot = root->b[side];
+ continue;
+ }
+
+ /* we don't want to skip bits for further comparisons, so we must limit <bit>.
+ * However, since we're going down around <old_node_bit>, we know it will be
+ * properly matched, so we can skip this bit.
+ */
+ old_node_bit >>= 1;
+ bit = old_node_bit + 1;
+
+ /* walk down */
+ root = &old->node.branches;
+ side = old_node_bit & 7;
+ side ^= 7;
+ side = (nkey[old_node_bit >> 3] >> side) & 1;
+ troot = root->b[side];
+ }
+
+ /* Right here, we have 4 possibilities :
+ * - the tree does not contain any leaf matching the
+ * key, and we have new->key < old->key. We insert
+ * new above old, on the left ;
+ *
+ * - the tree does not contain any leaf matching the
+ * key, and we have new->key > old->key. We insert
+ * new above old, on the right ;
+ *
+ * - the tree does contain the key with the same prefix
+ * length. We add the new key next to it as a first
+ * duplicate (since it was alone).
+ *
+ * The last two cases can easily be partially merged.
+ *
+ * - the tree contains a leaf matching the key, we have
+ * to insert above it as a cover node. The leaf with
+ * the shortest prefix becomes the left subtree and
+ * the leaf with the longest prefix becomes the right
+ * one. The cover node gets the min of both prefixes
+ * as its new bit.
+ */
+
+ /* first we want to ensure that we compare the correct bit, which means
+ * the largest common to both nodes.
+ */
+ if (bit > npfx)
+ bit = npfx;
+ if (bit > old->node.pfx)
+ bit = old->node.pfx;
+
+ new->node.bit = (bit << 1) + 1; /* assume normal node by default */
+
+ /* if one prefix is included in the second one, we don't compare bits
+ * because they won't necessarily match, we just proceed with a cover
+ * node insertion.
+ */
+ diff = 0;
+ if (bit < old->node.pfx && bit < npfx)
+ diff = cmp_bits(nkey, old->key, bit);
+
+ if (diff == 0) {
+ /* Both keys match. Either it's a duplicate entry or we have to
+ * put the shortest prefix left and the largest one right below
+ * a new cover node. By default, diff==0 means we'll be inserted
+ * on the right.
+ */
+ new->node.bit--; /* anticipate cover node insertion */
+ if (npfx == old->node.pfx) {
+ new->node.bit = -1; /* mark as new dup tree, just in case */
+
+ if (unlikely(eb_gettag(root_right))) {
+ /* we refuse to duplicate this key if the tree is
+ * tagged as containing only unique keys.
+ */
+ return old;
+ }
+
+ if (eb_gettag(troot) != EB_LEAF) {
+ /* there was already a dup tree below */
+ struct eb_node *ret;
+ ret = eb_insert_dup(&old->node, &new->node);
+ return container_of(ret, struct ebmb_node, node);
+ }
+ /* otherwise fall through to insert first duplicate */
+ }
+ /* otherwise we just rely on the tests below to select the right side */
+ else if (npfx < old->node.pfx)
+ diff = -1; /* force insertion to left side */
+ }
+
+ insert_above:
+ new_left = eb_dotag(&new->node.branches, EB_LEFT);
+ new_rght = eb_dotag(&new->node.branches, EB_RGHT);
+ new_leaf = eb_dotag(&new->node.branches, EB_LEAF);
+
+ if (diff >= 0) {
+ new->node.branches.b[EB_LEFT] = troot;
+ new->node.branches.b[EB_RGHT] = new_leaf;
+ new->node.leaf_p = new_rght;
+ *up_ptr = new_left;
+ }
+ else {
+ new->node.branches.b[EB_LEFT] = new_leaf;
+ new->node.branches.b[EB_RGHT] = troot;
+ new->node.leaf_p = new_left;
+ *up_ptr = new_rght;
+ }
+
+ root->b[side] = eb_dotag(&new->node.branches, EB_NODE);
+ return new;
+}
+
+
+
+#endif /* _EBMBTREE_H */
+
diff --git a/include/import/ebpttree.h b/include/import/ebpttree.h
new file mode 100644
index 0000000..64816a2
--- /dev/null
+++ b/include/import/ebpttree.h
@@ -0,0 +1,156 @@
+/*
+ * Elastic Binary Trees - macros and structures for operations on pointer nodes.
+ * Version 6.0.6
+ * (C) 2002-2011 - Willy Tarreau <w@1wt.eu>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _EBPTTREE_H
+#define _EBPTTREE_H
+
+#include "ebtree.h"
+#include "eb32tree.h"
+#include "eb64tree.h"
+
+
+/* Return the structure of type <type> whose member <member> points to <ptr> */
+#define ebpt_entry(ptr, type, member) container_of(ptr, type, member)
+
+/*
+ * Exported functions and macros.
+ * Many of them are always inlined because they are extremely small, and
+ * are generally called at most once or twice in a program.
+ */
+
+/* Return leftmost node in the tree, or NULL if none */
+static forceinline struct ebpt_node *ebpt_first(struct eb_root *root)
+{
+ return ebpt_entry(eb_first(root), struct ebpt_node, node);
+}
+
+/* Return rightmost node in the tree, or NULL if none */
+static forceinline struct ebpt_node *ebpt_last(struct eb_root *root)
+{
+ return ebpt_entry(eb_last(root), struct ebpt_node, node);
+}
+
+/* Return next node in the tree, or NULL if none */
+static forceinline struct ebpt_node *ebpt_next(struct ebpt_node *ebpt)
+{
+ return ebpt_entry(eb_next(&ebpt->node), struct ebpt_node, node);
+}
+
+/* Return previous node in the tree, or NULL if none */
+static forceinline struct ebpt_node *ebpt_prev(struct ebpt_node *ebpt)
+{
+ return ebpt_entry(eb_prev(&ebpt->node), struct ebpt_node, node);
+}
+
+/* Return next leaf node within a duplicate sub-tree, or NULL if none. */
+static inline struct ebpt_node *ebpt_next_dup(struct ebpt_node *ebpt)
+{
+ return ebpt_entry(eb_next_dup(&ebpt->node), struct ebpt_node, node);
+}
+
+/* Return previous leaf node within a duplicate sub-tree, or NULL if none. */
+static inline struct ebpt_node *ebpt_prev_dup(struct ebpt_node *ebpt)
+{
+ return ebpt_entry(eb_prev_dup(&ebpt->node), struct ebpt_node, node);
+}
+
+/* Return next node in the tree, skipping duplicates, or NULL if none */
+static forceinline struct ebpt_node *ebpt_next_unique(struct ebpt_node *ebpt)
+{
+ return ebpt_entry(eb_next_unique(&ebpt->node), struct ebpt_node, node);
+}
+
+/* Return previous node in the tree, skipping duplicates, or NULL if none */
+static forceinline struct ebpt_node *ebpt_prev_unique(struct ebpt_node *ebpt)
+{
+ return ebpt_entry(eb_prev_unique(&ebpt->node), struct ebpt_node, node);
+}
+
+/* Delete node from the tree if it was linked in. Mark the node unused. Note
+ * that this function relies on a non-inlined generic function: eb_delete.
+ */
+static forceinline void ebpt_delete(struct ebpt_node *ebpt)
+{
+ eb_delete(&ebpt->node);
+}
+
+/*
+ * The following functions are inlined but derived from the integer versions.
+ */
+static forceinline struct ebpt_node *ebpt_lookup(struct eb_root *root, void *x)
+{
+ if (sizeof(void *) == 4)
+ return (struct ebpt_node *)eb32_lookup(root, (u32)(PTR_INT_TYPE)x);
+ else
+ return (struct ebpt_node *)eb64_lookup(root, (u64)(PTR_INT_TYPE)x);
+}
+
+static forceinline struct ebpt_node *ebpt_lookup_le(struct eb_root *root, void *x)
+{
+ if (sizeof(void *) == 4)
+ return (struct ebpt_node *)eb32_lookup_le(root, (u32)(PTR_INT_TYPE)x);
+ else
+ return (struct ebpt_node *)eb64_lookup_le(root, (u64)(PTR_INT_TYPE)x);
+}
+
+static forceinline struct ebpt_node *ebpt_lookup_ge(struct eb_root *root, void *x)
+{
+ if (sizeof(void *) == 4)
+ return (struct ebpt_node *)eb32_lookup_ge(root, (u32)(PTR_INT_TYPE)x);
+ else
+ return (struct ebpt_node *)eb64_lookup_ge(root, (u64)(PTR_INT_TYPE)x);
+}
+
+static forceinline struct ebpt_node *ebpt_insert(struct eb_root *root, struct ebpt_node *new)
+{
+ if (sizeof(void *) == 4)
+ return (struct ebpt_node *)eb32_insert(root, (struct eb32_node *)new);
+ else
+ return (struct ebpt_node *)eb64_insert(root, (struct eb64_node *)new);
+}
+
+/*
+ * The following functions are less likely to be used directly, because
+ * their code is larger. The non-inlined version is preferred.
+ */
+
+/* Delete node from the tree if it was linked in. Mark the node unused. */
+static forceinline void __ebpt_delete(struct ebpt_node *ebpt)
+{
+ __eb_delete(&ebpt->node);
+}
+
+static forceinline struct ebpt_node *__ebpt_lookup(struct eb_root *root, void *x)
+{
+ if (sizeof(void *) == 4)
+ return (struct ebpt_node *)__eb32_lookup(root, (u32)(PTR_INT_TYPE)x);
+ else
+ return (struct ebpt_node *)__eb64_lookup(root, (u64)(PTR_INT_TYPE)x);
+}
+
+static forceinline struct ebpt_node *__ebpt_insert(struct eb_root *root, struct ebpt_node *new)
+{
+ if (sizeof(void *) == 4)
+ return (struct ebpt_node *)__eb32_insert(root, (struct eb32_node *)new);
+ else
+ return (struct ebpt_node *)__eb64_insert(root, (struct eb64_node *)new);
+}
+
+#endif /* _EBPT_TREE_H */
diff --git a/include/import/ebsttree.h b/include/import/ebsttree.h
new file mode 100644
index 0000000..db2267b
--- /dev/null
+++ b/include/import/ebsttree.h
@@ -0,0 +1,324 @@
+/*
+ * Elastic Binary Trees - macros to manipulate String data nodes.
+ * Version 6.0.6
+ * (C) 2002-2011 - Willy Tarreau <w@1wt.eu>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/* These functions and macros rely on Multi-Byte nodes */
+
+#ifndef _EBSTTREE_H
+#define _EBSTTREE_H
+
+#include "ebtree.h"
+#include "ebmbtree.h"
+
+/* The following functions are not inlined by default. They are declared
+ * in ebsttree.c, which simply relies on their inline version.
+ */
+struct ebmb_node *ebst_lookup(struct eb_root *root, const char *x);
+struct ebmb_node *ebst_insert(struct eb_root *root, struct ebmb_node *new);
+
+/* Find the first occurrence of a length <len> string <x> in the tree <root>.
+ * It's the caller's responsibility to use this function only on trees which
+ * only contain zero-terminated strings, and that no null character is present
+ * in string <x> in the first <len> chars. If none can be found, return NULL.
+ */
+static forceinline struct ebmb_node *
+ebst_lookup_len(struct eb_root *root, const char *x, unsigned int len)
+{
+ struct ebmb_node *node;
+
+ node = ebmb_lookup(root, x, len);
+ if (!node || node->key[len] != 0)
+ return NULL;
+ return node;
+}
+
+/* Find the first occurrence of a zero-terminated string <x> in the tree <root>.
+ * It's the caller's responsibility to use this function only on trees which
+ * only contain zero-terminated strings. If none can be found, return NULL.
+ */
+static forceinline struct ebmb_node *__ebst_lookup(struct eb_root *root, const void *x)
+{
+ struct ebmb_node *node;
+ eb_troot_t *troot;
+ int bit;
+ int node_bit;
+
+ troot = root->b[EB_LEFT];
+ if (unlikely(troot == NULL))
+ return NULL;
+
+ bit = 0;
+ while (1) {
+ if ((eb_gettag(troot) == EB_LEAF)) {
+ node = container_of(eb_untag(troot, EB_LEAF),
+ struct ebmb_node, node.branches);
+ if (strcmp((char *)node->key, x) == 0)
+ return node;
+ else
+ return NULL;
+ }
+ node = container_of(eb_untag(troot, EB_NODE),
+ struct ebmb_node, node.branches);
+ node_bit = node->node.bit;
+
+ if (node_bit < 0) {
+ /* We have a dup tree now. Either it's for the same
+ * value, and we walk down left, or it's a different
+ * one and we don't have our key.
+ */
+ if (strcmp((char *)node->key, x) != 0)
+ return NULL;
+
+ troot = node->node.branches.b[EB_LEFT];
+ while (eb_gettag(troot) != EB_LEAF)
+ troot = (eb_untag(troot, EB_NODE))->b[EB_LEFT];
+ node = container_of(eb_untag(troot, EB_LEAF),
+ struct ebmb_node, node.branches);
+ return node;
+ }
+
+ /* OK, normal data node, let's walk down but don't compare data
+ * if we already reached the end of the key.
+ */
+ if (likely(bit >= 0)) {
+ bit = string_equal_bits(x, node->key, bit);
+ if (likely(bit < node_bit)) {
+ if (bit >= 0)
+ return NULL; /* no more common bits */
+
+ /* bit < 0 : we reached the end of the key. If we
+ * are in a tree with unique keys, we can return
+ * this node. Otherwise we have to walk it down
+ * and stop comparing bits.
+ */
+ if (eb_gettag(root->b[EB_RGHT]))
+ return node;
+ }
+ /* if the bit is larger than the node's, we must bound it
+ * because we might have compared too many bytes with an
+ * inappropriate leaf. For a test, build a tree from "0",
+ * "WW", "W", "S" inserted in this exact sequence and lookup
+ * "W" => "S" is returned without this assignment.
+ */
+ else
+ bit = node_bit;
+ }
+
+ troot = node->node.branches.b[(((unsigned char*)x)[node_bit >> 3] >>
+ (~node_bit & 7)) & 1];
+ }
+}
+
+/* Insert ebmb_node <new> into subtree starting at node root <root>. Only
+ * new->key needs be set with the zero-terminated string key. The ebmb_node is
+ * returned. If root->b[EB_RGHT]==1, the tree may only contain unique keys. The
+ * caller is responsible for properly terminating the key with a zero.
+ */
+static forceinline struct ebmb_node *
+__ebst_insert(struct eb_root *root, struct ebmb_node *new)
+{
+ struct ebmb_node *old;
+ unsigned int side;
+ eb_troot_t *troot;
+ eb_troot_t *root_right;
+ int diff;
+ int bit;
+ int old_node_bit;
+
+ side = EB_LEFT;
+ troot = root->b[EB_LEFT];
+ root_right = root->b[EB_RGHT];
+ if (unlikely(troot == NULL)) {
+ /* Tree is empty, insert the leaf part below the left branch */
+ root->b[EB_LEFT] = eb_dotag(&new->node.branches, EB_LEAF);
+ new->node.leaf_p = eb_dotag(root, EB_LEFT);
+ new->node.node_p = NULL; /* node part unused */
+ return new;
+ }
+
+ /* The tree descent is fairly easy :
+ * - first, check if we have reached a leaf node
+ * - second, check if we have gone too far
+ * - third, reiterate
+ * Everywhere, we use <new> for the node node we are inserting, <root>
+ * for the node we attach it to, and <old> for the node we are
+ * displacing below <new>. <troot> will always point to the future node
+ * (tagged with its type). <side> carries the side the node <new> is
+ * attached to below its parent, which is also where previous node
+ * was attached.
+ */
+
+ bit = 0;
+ while (1) {
+ if (unlikely(eb_gettag(troot) == EB_LEAF)) {
+ eb_troot_t *new_left, *new_rght;
+ eb_troot_t *new_leaf, *old_leaf;
+
+ old = container_of(eb_untag(troot, EB_LEAF),
+ struct ebmb_node, node.branches);
+
+ new_left = eb_dotag(&new->node.branches, EB_LEFT);
+ new_rght = eb_dotag(&new->node.branches, EB_RGHT);
+ new_leaf = eb_dotag(&new->node.branches, EB_LEAF);
+ old_leaf = eb_dotag(&old->node.branches, EB_LEAF);
+
+ new->node.node_p = old->node.leaf_p;
+
+ /* Right here, we have 3 possibilities :
+ * - the tree does not contain the key, and we have
+ * new->key < old->key. We insert new above old, on
+ * the left ;
+ *
+ * - the tree does not contain the key, and we have
+ * new->key > old->key. We insert new above old, on
+ * the right ;
+ *
+ * - the tree does contain the key, which implies it
+ * is alone. We add the new key next to it as a
+ * first duplicate.
+ *
+ * The last two cases can easily be partially merged.
+ */
+ if (bit >= 0)
+ bit = string_equal_bits(new->key, old->key, bit);
+
+ if (bit < 0) {
+ /* key was already there */
+
+ /* we may refuse to duplicate this key if the tree is
+ * tagged as containing only unique keys.
+ */
+ if (eb_gettag(root_right))
+ return old;
+
+ /* new arbitrarily goes to the right and tops the dup tree */
+ old->node.leaf_p = new_left;
+ new->node.leaf_p = new_rght;
+ new->node.branches.b[EB_LEFT] = old_leaf;
+ new->node.branches.b[EB_RGHT] = new_leaf;
+ new->node.bit = -1;
+ root->b[side] = eb_dotag(&new->node.branches, EB_NODE);
+ return new;
+ }
+
+ diff = cmp_bits(new->key, old->key, bit);
+ if (diff < 0) {
+ /* new->key < old->key, new takes the left */
+ new->node.leaf_p = new_left;
+ old->node.leaf_p = new_rght;
+ new->node.branches.b[EB_LEFT] = new_leaf;
+ new->node.branches.b[EB_RGHT] = old_leaf;
+ } else {
+ /* new->key > old->key, new takes the right */
+ old->node.leaf_p = new_left;
+ new->node.leaf_p = new_rght;
+ new->node.branches.b[EB_LEFT] = old_leaf;
+ new->node.branches.b[EB_RGHT] = new_leaf;
+ }
+ break;
+ }
+
+ /* OK we're walking down this link */
+ old = container_of(eb_untag(troot, EB_NODE),
+ struct ebmb_node, node.branches);
+ old_node_bit = old->node.bit;
+
+ /* Stop going down when we don't have common bits anymore. We
+ * also stop in front of a duplicates tree because it means we
+ * have to insert above. Note: we can compare more bits than
+ * the current node's because as long as they are identical, we
+ * know we descend along the correct side.
+ */
+ if (bit >= 0 && (bit < old_node_bit || old_node_bit < 0))
+ bit = string_equal_bits(new->key, old->key, bit);
+
+ if (unlikely(bit < 0)) {
+ /* Perfect match, we must only stop on head of dup tree
+ * or walk down to a leaf.
+ */
+ if (old_node_bit < 0) {
+ /* We know here that string_equal_bits matched all
+ * bits and that we're on top of a dup tree, then
+ * we can perform the dup insertion and return.
+ */
+ struct eb_node *ret;
+ ret = eb_insert_dup(&old->node, &new->node);
+ return container_of(ret, struct ebmb_node, node);
+ }
+ /* OK so let's walk down */
+ }
+ else if (bit < old_node_bit || old_node_bit < 0) {
+ /* The tree did not contain the key, or we stopped on top of a dup
+ * tree, possibly containing the key. In the former case, we insert
+ * <new> before the node <old>, and set ->bit to designate the lowest
+ * bit position in <new> which applies to ->branches.b[]. In the later
+ * case, we add the key to the existing dup tree. Note that we cannot
+ * enter here if we match an intermediate node's key that is not the
+ * head of a dup tree.
+ */
+ eb_troot_t *new_left, *new_rght;
+ eb_troot_t *new_leaf, *old_node;
+
+ new_left = eb_dotag(&new->node.branches, EB_LEFT);
+ new_rght = eb_dotag(&new->node.branches, EB_RGHT);
+ new_leaf = eb_dotag(&new->node.branches, EB_LEAF);
+ old_node = eb_dotag(&old->node.branches, EB_NODE);
+
+ new->node.node_p = old->node.node_p;
+
+ /* we can never match all bits here */
+ diff = cmp_bits(new->key, old->key, bit);
+ if (diff < 0) {
+ new->node.leaf_p = new_left;
+ old->node.node_p = new_rght;
+ new->node.branches.b[EB_LEFT] = new_leaf;
+ new->node.branches.b[EB_RGHT] = old_node;
+ }
+ else {
+ old->node.node_p = new_left;
+ new->node.leaf_p = new_rght;
+ new->node.branches.b[EB_LEFT] = old_node;
+ new->node.branches.b[EB_RGHT] = new_leaf;
+ }
+ break;
+ }
+
+ /* walk down */
+ root = &old->node.branches;
+ side = (new->key[old_node_bit >> 3] >> (~old_node_bit & 7)) & 1;
+ troot = root->b[side];
+ }
+
+ /* Ok, now we are inserting <new> between <root> and <old>. <old>'s
+ * parent is already set to <new>, and the <root>'s branch is still in
+ * <side>. Update the root's leaf till we have it. Note that we can also
+ * find the side by checking the side of new->node.node_p.
+ */
+
+ /* We need the common higher bits between new->key and old->key.
+ * This number of bits is already in <bit>.
+ * NOTE: we can't get here whit bit < 0 since we found a dup !
+ */
+ new->node.bit = bit;
+ root->b[side] = eb_dotag(&new->node.branches, EB_NODE);
+ return new;
+}
+
+#endif /* _EBSTTREE_H */
+
diff --git a/include/import/ebtree-t.h b/include/import/ebtree-t.h
new file mode 100644
index 0000000..b695426
--- /dev/null
+++ b/include/import/ebtree-t.h
@@ -0,0 +1,217 @@
+/*
+ * Elastic Binary Trees - types
+ * Version 6.0.6
+ * (C) 2002-2011 - Willy Tarreau <w@1wt.eu>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _EBTREE_T_H
+#define _EBTREE_T_H
+
+#include <haproxy/api-t.h>
+
+/*
+ * generic types for ebtree
+ */
+
+/* Number of bits per node, and number of leaves per node */
+#define EB_NODE_BITS 1
+#define EB_NODE_BRANCHES (1 << EB_NODE_BITS)
+#define EB_NODE_BRANCH_MASK (EB_NODE_BRANCHES - 1)
+
+/* Be careful not to tweak those values. The walking code is optimized for NULL
+ * detection on the assumption that the following values are intact.
+ */
+#define EB_LEFT 0
+#define EB_RGHT 1
+#define EB_LEAF 0
+#define EB_NODE 1
+
+/* Tags to set in root->b[EB_RGHT] :
+ * - EB_NORMAL is a normal tree which stores duplicate keys.
+ * - EB_UNIQUE is a tree which stores unique keys.
+ */
+#define EB_NORMAL 0
+#define EB_UNIQUE 1
+
+/* This is the same as an eb_node pointer, except that the lower bit embeds
+ * a tag. See eb_dotag()/eb_untag()/eb_gettag(). This tag has two meanings :
+ * - 0=left, 1=right to designate the parent's branch for leaf_p/node_p
+ * - 0=link, 1=leaf to designate the branch's type for branch[]
+ */
+typedef void eb_troot_t;
+
+/* The eb_root connects the node which contains it, to two nodes below it, one
+ * of which may be the same node. At the top of the tree, we use an eb_root
+ * too, which always has its right branch NULL (+/1 low-order bits).
+ */
+struct eb_root {
+ eb_troot_t *b[EB_NODE_BRANCHES]; /* left and right branches */
+};
+
+/* The eb_node contains the two parts, one for the leaf, which always exists,
+ * and one for the node, which remains unused in the very first node inserted
+ * into the tree. This structure is 20 bytes per node on 32-bit machines. Do
+ * not change the order, benchmarks have shown that it's optimal this way.
+ * Note: be careful about this struct's alignment if it gets included into
+ * another struct and some atomic ops are expected on the keys or the node.
+ */
+struct eb_node {
+ struct eb_root branches; /* branches, must be at the beginning */
+ eb_troot_t *node_p; /* link node's parent */
+ eb_troot_t *leaf_p; /* leaf node's parent */
+ short int bit; /* link's bit position. */
+ short unsigned int pfx; /* data prefix length, always related to leaf */
+} __attribute__((packed));
+
+
+/* The root of a tree is an eb_root initialized with both pointers NULL.
+ * During its life, only the left pointer will change. The right one will
+ * always remain NULL, which is the way we detect it.
+ */
+#define EB_ROOT \
+ (struct eb_root) { \
+ .b = {[0] = NULL, [1] = NULL }, \
+ }
+
+#define EB_ROOT_UNIQUE \
+ (struct eb_root) { \
+ .b = {[0] = NULL, [1] = (void *)1 }, \
+ }
+
+#define EB_TREE_HEAD(name) \
+ struct eb_root name = EB_ROOT
+
+
+/*
+ * types for eb32tree
+ */
+
+#define EB32_ROOT EB_ROOT
+#define EB32_TREE_HEAD EB_TREE_HEAD
+
+/* These types may sometimes already be defined */
+typedef unsigned int u32;
+typedef signed int s32;
+
+/* This structure carries a node, a leaf, and a key. It must start with the
+ * eb_node so that it can be cast into an eb_node. We could also have put some
+ * sort of transparent union here to reduce the indirection level, but the fact
+ * is, the end user is not meant to manipulate internals, so this is pointless.
+ */
+struct eb32_node {
+ struct eb_node node; /* the tree node, must be at the beginning */
+ MAYBE_ALIGN(sizeof(u32));
+ u32 key;
+} ALIGNED(sizeof(void*));
+
+/* This structure carries a node, a leaf, a scope, and a key. It must start
+ * with the eb_node so that it can be cast into an eb_node. We could also
+ * have put some sort of transparent union here to reduce the indirection
+ * level, but the fact is, the end user is not meant to manipulate internals,
+ * so this is pointless.
+ * In case sizeof(void*)>=sizeof(long), we know there will be some padding after
+ * the leaf if it's unaligned. In this case we force the alignment on void* so
+ * that we prefer to have the padding before for more efficient accesses.
+ */
+struct eb32sc_node {
+ struct eb_node node; /* the tree node, must be at the beginning */
+ MAYBE_ALIGN(sizeof(u32));
+ u32 key;
+ ALWAYS_ALIGN(sizeof(void*));
+ unsigned long node_s; /* visibility of this node's branches */
+ unsigned long leaf_s; /* visibility of this node's leaf */
+} ALIGNED(sizeof(void*));
+
+/*
+ * types for eb64tree
+ */
+
+#define EB64_ROOT EB_ROOT
+#define EB64_TREE_HEAD EB_TREE_HEAD
+
+/* These types may sometimes already be defined */
+typedef unsigned long long u64;
+typedef signed long long s64;
+
+/* This structure carries a node, a leaf, and a key. It must start with the
+ * eb_node so that it can be cast into an eb_node. We could also have put some
+ * sort of transparent union here to reduce the indirection level, but the fact
+ * is, the end user is not meant to manipulate internals, so this is pointless.
+ * In case sizeof(void*)>=sizeof(u64), we know there will be some padding after
+ * the key if it's unaligned. In this case we force the alignment on void* so
+ * that we prefer to have the padding before for more efficient accesses.
+ */
+struct eb64_node {
+ struct eb_node node; /* the tree node, must be at the beginning */
+ MAYBE_ALIGN(sizeof(u64));
+ ALWAYS_ALIGN(sizeof(void*));
+ u64 key;
+} ALIGNED(sizeof(void*));
+
+#define EBPT_ROOT EB_ROOT
+#define EBPT_TREE_HEAD EB_TREE_HEAD
+
+/* on *almost* all platforms, a pointer can be cast into a size_t which is unsigned */
+#ifndef PTR_INT_TYPE
+#define PTR_INT_TYPE size_t
+#endif
+
+/*
+ * types for ebpttree
+ */
+
+typedef PTR_INT_TYPE ptr_t;
+
+/* This structure carries a node, a leaf, and a key. It must start with the
+ * eb_node so that it can be cast into an eb_node. We could also have put some
+ * sort of transparent union here to reduce the indirection level, but the fact
+ * is, the end user is not meant to manipulate internals, so this is pointless.
+ * Internally, it is automatically cast as an eb32_node or eb64_node.
+ * We always align the key since the struct itself will be padded to the same
+ * size anyway.
+ */
+struct ebpt_node {
+ struct eb_node node; /* the tree node, must be at the beginning */
+ ALWAYS_ALIGN(sizeof(void*));
+ void *key;
+} ALIGNED(sizeof(void*));
+
+/*
+ * types for ebmbtree
+ */
+
+#define EBMB_ROOT EB_ROOT
+#define EBMB_TREE_HEAD EB_TREE_HEAD
+
+/* This structure carries a node, a leaf, and a key. It must start with the
+ * eb_node so that it can be cast into an eb_node. We could also have put some
+ * sort of transparent union here to reduce the indirection level, but the fact
+ * is, the end user is not meant to manipulate internals, so this is pointless.
+ * The 'node.bit' value here works differently from scalar types, as it contains
+ * the number of identical bits between the two branches.
+ * Note that we take a great care of making sure the key is located exactly at
+ * the end of the struct even if that involves holes before it, so that it
+ * always aliases any external key a user would append after. This is why the
+ * key uses the same alignment as the struct.
+ */
+struct ebmb_node {
+ struct eb_node node; /* the tree node, must be at the beginning */
+ ALWAYS_ALIGN(sizeof(void*));
+ unsigned char key[0]; /* the key, its size depends on the application */
+} ALIGNED(sizeof(void*));
+
+#endif /* _EB_TREE_T_H */
diff --git a/include/import/ebtree.h b/include/import/ebtree.h
new file mode 100644
index 0000000..d6e51d5
--- /dev/null
+++ b/include/import/ebtree.h
@@ -0,0 +1,857 @@
+/*
+ * Elastic Binary Trees - generic macros and structures.
+ * Version 6.0.6
+ * (C) 2002-2011 - Willy Tarreau <w@1wt.eu>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+
+
+/*
+ General idea:
+ -------------
+ In a radix binary tree, we may have up to 2N-1 nodes for N keys if all of
+ them are leaves. If we find a way to differentiate intermediate nodes (later
+ called "nodes") and final nodes (later called "leaves"), and we associate
+ them by two, it is possible to build sort of a self-contained radix tree with
+ intermediate nodes always present. It will not be as cheap as the ultree for
+ optimal cases as shown below, but the optimal case almost never happens :
+
+ Eg, to store 8, 10, 12, 13, 14 :
+
+ ultree this theoretical tree
+
+ 8 8
+ / \ / \
+ 10 12 10 12
+ / \ / \
+ 13 14 12 14
+ / \
+ 12 13
+
+ Note that on real-world tests (with a scheduler), is was verified that the
+ case with data on an intermediate node never happens. This is because the
+ data spectrum is too large for such coincidences to happen. It would require
+ for instance that a task has its expiration time at an exact second, with
+ other tasks sharing that second. This is too rare to try to optimize for it.
+
+ What is interesting is that the node will only be added above the leaf when
+ necessary, which implies that it will always remain somewhere above it. So
+ both the leaf and the node can share the exact value of the leaf, because
+ when going down the node, the bit mask will be applied to comparisons. So we
+ are tempted to have one single key shared between the node and the leaf.
+
+ The bit only serves the nodes, and the dups only serve the leaves. So we can
+ put a lot of information in common. This results in one single entity with
+ two branch pointers and two parent pointers, one for the node part, and one
+ for the leaf part :
+
+ node's leaf's
+ parent parent
+ | |
+ [node] [leaf]
+ / \
+ left right
+ branch branch
+
+ The node may very well refer to its leaf counterpart in one of its branches,
+ indicating that its own leaf is just below it :
+
+ node's
+ parent
+ |
+ [node]
+ / \
+ left [leaf]
+ branch
+
+ Adding keys in such a tree simply consists in inserting nodes between
+ other nodes and/or leaves :
+
+ [root]
+ |
+ [node2]
+ / \
+ [leaf1] [node3]
+ / \
+ [leaf2] [leaf3]
+
+ On this diagram, we notice that [node2] and [leaf2] have been pulled away
+ from each other due to the insertion of [node3], just as if there would be
+ an elastic between both parts. This elastic-like behaviour gave its name to
+ the tree : "Elastic Binary Tree", or "EBtree". The entity which associates a
+ node part and a leaf part will be called an "EB node".
+
+ We also notice on the diagram that there is a root entity required to attach
+ the tree. It only contains two branches and there is nothing above it. This
+ is an "EB root". Some will note that [leaf1] has no [node1]. One property of
+ the EBtree is that all nodes have their branches filled, and that if a node
+ has only one branch, it does not need to exist. Here, [leaf1] was added
+ below [root] and did not need any node.
+
+ An EB node contains :
+ - a pointer to the node's parent (node_p)
+ - a pointer to the leaf's parent (leaf_p)
+ - two branches pointing to lower nodes or leaves (branches)
+ - a bit position (bit)
+ - an optional key.
+
+ The key here is optional because it's used only during insertion, in order
+ to classify the nodes. Nothing else in the tree structure requires knowledge
+ of the key. This makes it possible to write type-agnostic primitives for
+ everything, and type-specific insertion primitives. This has led to consider
+ two types of EB nodes. The type-agnostic ones will serve as a header for the
+ other ones, and will simply be called "struct eb_node". The other ones will
+ have their type indicated in the structure name. Eg: "struct eb32_node" for
+ nodes carrying 32 bit keys.
+
+ We will also node that the two branches in a node serve exactly the same
+ purpose as an EB root. For this reason, a "struct eb_root" will be used as
+ well inside the struct eb_node. In order to ease pointer manipulation and
+ ROOT detection when walking upwards, all the pointers inside an eb_node will
+ point to the eb_root part of the referenced EB nodes, relying on the same
+ principle as the linked lists in Linux.
+
+ Another important point to note, is that when walking inside a tree, it is
+ very convenient to know where a node is attached in its parent, and what
+ type of branch it has below it (leaf or node). In order to simplify the
+ operations and to speed up the processing, it was decided in this specific
+ implementation to use the lowest bit from the pointer to designate the side
+ of the upper pointers (left/right) and the type of a branch (leaf/node).
+ This practise is not mandatory by design, but an implementation-specific
+ optimisation permitted on all platforms on which data must be aligned. All
+ known 32 bit platforms align their integers and pointers to 32 bits, leaving
+ the two lower bits unused. So, we say that the pointers are "tagged". And
+ since they designate pointers to root parts, we simply call them
+ "tagged root pointers", or "eb_troot" in the code.
+
+ Duplicate keys are stored in a special manner. When inserting a key, if
+ the same one is found, then an incremental binary tree is built at this
+ place from these keys. This ensures that no special case has to be written
+ to handle duplicates when walking through the tree or when deleting entries.
+ It also guarantees that duplicates will be walked in the exact same order
+ they were inserted. This is very important when trying to achieve fair
+ processing distribution for instance.
+
+ Algorithmic complexity can be derived from 3 variables :
+ - the number of possible different keys in the tree : P
+ - the number of entries in the tree : N
+ - the number of duplicates for one key : D
+
+ Note that this tree is deliberately NOT balanced. For this reason, the worst
+ case may happen with a small tree (eg: 32 distinct keys of one bit). BUT,
+ the operations required to manage such data are so much cheap that they make
+ it worth using it even under such conditions. For instance, a balanced tree
+ may require only 6 levels to store those 32 keys when this tree will
+ require 32. But if per-level operations are 5 times cheaper, it wins.
+
+ Minimal, Maximal and Average times are specified in number of operations.
+ Minimal is given for best condition, Maximal for worst condition, and the
+ average is reported for a tree containing random keys. An operation
+ generally consists in jumping from one node to the other.
+
+ Complexity :
+ - lookup : min=1, max=log(P), avg=log(N)
+ - insertion from root : min=1, max=log(P), avg=log(N)
+ - insertion of dups : min=1, max=log(D), avg=log(D)/2 after lookup
+ - deletion : min=1, max=1, avg=1
+ - prev/next : min=1, max=log(P), avg=2 :
+ N/2 nodes need 1 hop => 1*N/2
+ N/4 nodes need 2 hops => 2*N/4
+ N/8 nodes need 3 hops => 3*N/8
+ ...
+ N/x nodes need log(x) hops => log2(x)*N/x
+ Total cost for all N nodes : sum[i=1..N](log2(i)*N/i) = N*sum[i=1..N](log2(i)/i)
+ Average cost across N nodes = total / N = sum[i=1..N](log2(i)/i) = 2
+
+ This design is currently limited to only two branches per node. Most of the
+ tree descent algorithm would be compatible with more branches (eg: 4, to cut
+ the height in half), but this would probably require more complex operations
+ and the deletion algorithm would be problematic.
+
+ Useful properties :
+ - a node is always added above the leaf it is tied to, and never can get
+ below nor in another branch. This implies that leaves directly attached
+ to the root do not use their node part, which is indicated by a NULL
+ value in node_p. This also enhances the cache efficiency when walking
+ down the tree, because when the leaf is reached, its node part will
+ already have been visited (unless it's the first leaf in the tree).
+
+ - pointers to lower nodes or leaves are stored in "branch" pointers. Only
+ the root node may have a NULL in either branch, it is not possible for
+ other branches. Since the nodes are attached to the left branch of the
+ root, it is not possible to see a NULL left branch when walking up a
+ tree. Thus, an empty tree is immediately identified by a NULL left
+ branch at the root. Conversely, the one and only way to identify the
+ root node is to check that it right branch is NULL. Note that the
+ NULL pointer may have a few low-order bits set.
+
+ - a node connected to its own leaf will have branch[0|1] pointing to
+ itself, and leaf_p pointing to itself.
+
+ - a node can never have node_p pointing to itself.
+
+ - a node is linked in a tree if and only if it has a non-null leaf_p.
+
+ - a node can never have both branches equal, except for the root which can
+ have them both NULL.
+
+ - deletion only applies to leaves. When a leaf is deleted, its parent must
+ be released too (unless it's the root), and its sibling must attach to
+ the grand-parent, replacing the parent. Also, when a leaf is deleted,
+ the node tied to this leaf will be removed and must be released too. If
+ this node is different from the leaf's parent, the freshly released
+ leaf's parent will be used to replace the node which must go. A released
+ node will never be used anymore, so there's no point in tracking it.
+
+ - the bit index in a node indicates the bit position in the key which is
+ represented by the branches. That means that a node with (bit == 0) is
+ just above two leaves. Negative bit values are used to build a duplicate
+ tree. The first node above two identical leaves gets (bit == -1). This
+ value logarithmically decreases as the duplicate tree grows. During
+ duplicate insertion, a node is inserted above the highest bit value (the
+ lowest absolute value) in the tree during the right-sided walk. If bit
+ -1 is not encountered (highest < -1), we insert above last leaf.
+ Otherwise, we insert above the node with the highest value which was not
+ equal to the one of its parent + 1.
+
+ - the "eb_next" primitive walks from left to right, which means from lower
+ to higher keys. It returns duplicates in the order they were inserted.
+ The "eb_first" primitive returns the left-most entry.
+
+ - the "eb_prev" primitive walks from right to left, which means from
+ higher to lower keys. It returns duplicates in the opposite order they
+ were inserted. The "eb_last" primitive returns the right-most entry.
+
+ - a tree which has 1 in the lower bit of its root's right branch is a
+ tree with unique nodes. This means that when a node is inserted with
+ a key which already exists will not be inserted, and the previous
+ entry will be returned.
+
+ */
+
+#ifndef _EBTREE_H
+#define _EBTREE_H
+
+#include <stdlib.h>
+#include <import/ebtree-t.h>
+#include <haproxy/api.h>
+
+static inline int flsnz8_generic(unsigned int x)
+{
+ int ret = 0;
+ if (x >> 4) { x >>= 4; ret += 4; }
+ return ret + ((0xFFFFAA50U >> (x << 1)) & 3) + 1;
+}
+
+/* Note: we never need to run fls on null keys, so we can optimize the fls
+ * function by removing a conditional jump.
+ */
+#if defined(__i386__) || defined(__x86_64__)
+/* this code is similar on 32 and 64 bit */
+static inline int flsnz(int x)
+{
+ int r;
+ __asm__("bsrl %1,%0\n"
+ : "=r" (r) : "rm" (x));
+ return r+1;
+}
+
+static inline int flsnz8(unsigned char x)
+{
+ int r;
+ __asm__("movzbl %%al, %%eax\n"
+ "bsrl %%eax,%0\n"
+ : "=r" (r) : "a" (x));
+ return r+1;
+}
+
+#else
+// returns 1 to 32 for 1<<0 to 1<<31. Undefined for 0.
+#define flsnz(___a) ({ \
+ register int ___x, ___bits = 0; \
+ ___x = (___a); \
+ if (___x & 0xffff0000) { ___x &= 0xffff0000; ___bits += 16;} \
+ if (___x & 0xff00ff00) { ___x &= 0xff00ff00; ___bits += 8;} \
+ if (___x & 0xf0f0f0f0) { ___x &= 0xf0f0f0f0; ___bits += 4;} \
+ if (___x & 0xcccccccc) { ___x &= 0xcccccccc; ___bits += 2;} \
+ if (___x & 0xaaaaaaaa) { ___x &= 0xaaaaaaaa; ___bits += 1;} \
+ ___bits + 1; \
+ })
+
+static inline int flsnz8(unsigned int x)
+{
+ return flsnz8_generic(x);
+}
+
+
+#endif
+
+static inline int fls64(unsigned long long x)
+{
+ unsigned int h;
+ unsigned int bits = 32;
+
+ h = x >> 32;
+ if (!h) {
+ h = x;
+ bits = 0;
+ }
+ return flsnz(h) + bits;
+}
+
+#define fls_auto(x) ((sizeof(x) > 4) ? fls64(x) : flsnz(x))
+
+/* Linux-like "container_of". It returns a pointer to the structure of type
+ * <type> which has its member <name> stored at address <ptr>.
+ */
+#ifndef container_of
+#define container_of(ptr, type, name) ((type *)(((void *)(ptr)) - ((long)&((type *)0)->name)))
+#endif
+
+/* returns a pointer to the structure of type <type> which has its member <name>
+ * stored at address <ptr>, unless <ptr> is 0, in which case 0 is returned.
+ */
+#ifndef container_of_safe
+#define container_of_safe(ptr, type, name) \
+ ({ void *__p = (ptr); \
+ __p ? (type *)(__p - ((long)&((type *)0)->name)) : (type *)0; \
+ })
+#endif
+
+/* Return the structure of type <type> whose member <member> points to <ptr> */
+#define eb_entry(ptr, type, member) container_of(ptr, type, member)
+
+/***************************************\
+ * Private functions. Not for end-user *
+\***************************************/
+
+/* Converts a root pointer to its equivalent eb_troot_t pointer,
+ * ready to be stored in ->branch[], leaf_p or node_p. NULL is not
+ * conserved. To be used with EB_LEAF, EB_NODE, EB_LEFT or EB_RGHT in <tag>.
+ */
+static inline eb_troot_t *eb_dotag(const struct eb_root *root, const int tag)
+{
+ return (eb_troot_t *)((void *)root + tag);
+}
+
+/* Converts an eb_troot_t pointer pointer to its equivalent eb_root pointer,
+ * for use with pointers from ->branch[], leaf_p or node_p. NULL is conserved
+ * as long as the tree is not corrupted. To be used with EB_LEAF, EB_NODE,
+ * EB_LEFT or EB_RGHT in <tag>.
+ */
+static inline struct eb_root *eb_untag(const eb_troot_t *troot, const int tag)
+{
+ return (struct eb_root *)((void *)troot - tag);
+}
+
+/* returns the tag associated with an eb_troot_t pointer */
+static inline int eb_gettag(eb_troot_t *troot)
+{
+ return (unsigned long)troot & 1;
+}
+
+/* Converts a root pointer to its equivalent eb_troot_t pointer and clears the
+ * tag, no matter what its value was.
+ */
+static inline struct eb_root *eb_clrtag(const eb_troot_t *troot)
+{
+ return (struct eb_root *)((unsigned long)troot & ~1UL);
+}
+
+/* Returns a pointer to the eb_node holding <root> */
+static inline struct eb_node *eb_root_to_node(struct eb_root *root)
+{
+ return container_of(root, struct eb_node, branches);
+}
+
+/* Walks down starting at root pointer <start>, and always walking on side
+ * <side>. It either returns the node hosting the first leaf on that side,
+ * or NULL if no leaf is found. <start> may either be NULL or a branch pointer.
+ * The pointer to the leaf (or NULL) is returned.
+ */
+static inline struct eb_node *eb_walk_down(eb_troot_t *start, unsigned int side)
+{
+ /* A NULL pointer on an empty tree root will be returned as-is */
+ while (eb_gettag(start) == EB_NODE)
+ start = (eb_untag(start, EB_NODE))->b[side];
+ /* NULL is left untouched (root==eb_node, EB_LEAF==0) */
+ return eb_root_to_node(eb_untag(start, EB_LEAF));
+}
+
+/* This function is used to build a tree of duplicates by adding a new node to
+ * a subtree of at least 2 entries. It will probably never be needed inlined,
+ * and it is not for end-user.
+ */
+static forceinline struct eb_node *
+__eb_insert_dup(struct eb_node *sub, struct eb_node *new)
+{
+ struct eb_node *head = sub;
+
+ eb_troot_t *new_left = eb_dotag(&new->branches, EB_LEFT);
+ eb_troot_t *new_rght = eb_dotag(&new->branches, EB_RGHT);
+ eb_troot_t *new_leaf = eb_dotag(&new->branches, EB_LEAF);
+
+ /* first, identify the deepest hole on the right branch */
+ while (eb_gettag(head->branches.b[EB_RGHT]) != EB_LEAF) {
+ struct eb_node *last = head;
+ head = container_of(eb_untag(head->branches.b[EB_RGHT], EB_NODE),
+ struct eb_node, branches);
+ if (head->bit > last->bit + 1)
+ sub = head; /* there's a hole here */
+ }
+
+ /* Here we have a leaf attached to (head)->b[EB_RGHT] */
+ if (head->bit < -1) {
+ /* A hole exists just before the leaf, we insert there */
+ new->bit = -1;
+ sub = container_of(eb_untag(head->branches.b[EB_RGHT], EB_LEAF),
+ struct eb_node, branches);
+ head->branches.b[EB_RGHT] = eb_dotag(&new->branches, EB_NODE);
+
+ new->node_p = sub->leaf_p;
+ new->leaf_p = new_rght;
+ sub->leaf_p = new_left;
+ new->branches.b[EB_LEFT] = eb_dotag(&sub->branches, EB_LEAF);
+ new->branches.b[EB_RGHT] = new_leaf;
+ return new;
+ } else {
+ int side;
+ /* No hole was found before a leaf. We have to insert above
+ * <sub>. Note that we cannot be certain that <sub> is attached
+ * to the right of its parent, as this is only true if <sub>
+ * is inside the dup tree, not at the head.
+ */
+ new->bit = sub->bit - 1; /* install at the lowest level */
+ side = eb_gettag(sub->node_p);
+ head = container_of(eb_untag(sub->node_p, side), struct eb_node, branches);
+ head->branches.b[side] = eb_dotag(&new->branches, EB_NODE);
+
+ new->node_p = sub->node_p;
+ new->leaf_p = new_rght;
+ sub->node_p = new_left;
+ new->branches.b[EB_LEFT] = eb_dotag(&sub->branches, EB_NODE);
+ new->branches.b[EB_RGHT] = new_leaf;
+ return new;
+ }
+}
+
+
+/**************************************\
+ * Public functions, for the end-user *
+\**************************************/
+
+/* Return non-zero if the tree is empty, otherwise zero */
+static inline int eb_is_empty(const struct eb_root *root)
+{
+ return !root->b[EB_LEFT];
+}
+
+/* Return non-zero if the node is a duplicate, otherwise zero */
+static inline int eb_is_dup(const struct eb_node *node)
+{
+ return node->bit < 0;
+}
+
+/* Return the first leaf in the tree starting at <root>, or NULL if none */
+static inline struct eb_node *eb_first(struct eb_root *root)
+{
+ return eb_walk_down(root->b[0], EB_LEFT);
+}
+
+/* Return the last leaf in the tree starting at <root>, or NULL if none */
+static inline struct eb_node *eb_last(struct eb_root *root)
+{
+ return eb_walk_down(root->b[0], EB_RGHT);
+}
+
+/* Return previous leaf node before an existing leaf node, or NULL if none. */
+static inline struct eb_node *eb_prev(struct eb_node *node)
+{
+ eb_troot_t *t = node->leaf_p;
+
+ while (eb_gettag(t) == EB_LEFT) {
+ /* Walking up from left branch. We must ensure that we never
+ * walk beyond root.
+ */
+ if (unlikely(eb_clrtag((eb_untag(t, EB_LEFT))->b[EB_RGHT]) == NULL))
+ return NULL;
+ t = (eb_root_to_node(eb_untag(t, EB_LEFT)))->node_p;
+ }
+ /* Note that <t> cannot be NULL at this stage */
+ t = (eb_untag(t, EB_RGHT))->b[EB_LEFT];
+ return eb_walk_down(t, EB_RGHT);
+}
+
+/* Return next leaf node after an existing leaf node, or NULL if none. */
+static inline struct eb_node *eb_next(struct eb_node *node)
+{
+ eb_troot_t *t = node->leaf_p;
+
+ while (eb_gettag(t) != EB_LEFT)
+ /* Walking up from right branch, so we cannot be below root */
+ t = (eb_root_to_node(eb_untag(t, EB_RGHT)))->node_p;
+
+ /* Note that <t> cannot be NULL at this stage */
+ t = (eb_untag(t, EB_LEFT))->b[EB_RGHT];
+ if (eb_clrtag(t) == NULL)
+ return NULL;
+ return eb_walk_down(t, EB_LEFT);
+}
+
+/* Return previous leaf node within a duplicate sub-tree, or NULL if none. */
+static inline struct eb_node *eb_prev_dup(struct eb_node *node)
+{
+ eb_troot_t *t = node->leaf_p;
+
+ while (eb_gettag(t) == EB_LEFT) {
+ /* Walking up from left branch. We must ensure that we never
+ * walk beyond root.
+ */
+ if (unlikely(eb_clrtag((eb_untag(t, EB_LEFT))->b[EB_RGHT]) == NULL))
+ return NULL;
+ /* if the current node leaves a dup tree, quit */
+ if ((eb_root_to_node(eb_untag(t, EB_LEFT)))->bit >= 0)
+ return NULL;
+ t = (eb_root_to_node(eb_untag(t, EB_LEFT)))->node_p;
+ }
+ /* Note that <t> cannot be NULL at this stage */
+ if ((eb_root_to_node(eb_untag(t, EB_RGHT)))->bit >= 0)
+ return NULL;
+ t = (eb_untag(t, EB_RGHT))->b[EB_LEFT];
+ return eb_walk_down(t, EB_RGHT);
+}
+
+/* Return next leaf node within a duplicate sub-tree, or NULL if none. */
+static inline struct eb_node *eb_next_dup(struct eb_node *node)
+{
+ eb_troot_t *t = node->leaf_p;
+
+ while (eb_gettag(t) != EB_LEFT) {
+ /* Walking up from right branch, so we cannot be below root */
+ /* if the current node leaves a dup tree, quit */
+ if ((eb_root_to_node(eb_untag(t, EB_RGHT)))->bit >= 0)
+ return NULL;
+ t = (eb_root_to_node(eb_untag(t, EB_RGHT)))->node_p;
+ }
+
+ /* Note that <t> cannot be NULL at this stage. If our leaf is directly
+ * under the root, we must not try to cast the leaf_p into a eb_node*
+ * since it is a pointer to an eb_root.
+ */
+ if (eb_clrtag((eb_untag(t, EB_LEFT))->b[EB_RGHT]) == NULL)
+ return NULL;
+
+ if ((eb_root_to_node(eb_untag(t, EB_LEFT)))->bit >= 0)
+ return NULL;
+ t = (eb_untag(t, EB_LEFT))->b[EB_RGHT];
+ return eb_walk_down(t, EB_LEFT);
+}
+
+/* Return previous leaf node before an existing leaf node, skipping duplicates,
+ * or NULL if none. */
+static inline struct eb_node *eb_prev_unique(struct eb_node *node)
+{
+ eb_troot_t *t = node->leaf_p;
+
+ while (1) {
+ if (eb_gettag(t) != EB_LEFT) {
+ node = eb_root_to_node(eb_untag(t, EB_RGHT));
+ /* if we're right and not in duplicates, stop here */
+ if (node->bit >= 0)
+ break;
+ t = node->node_p;
+ }
+ else {
+ /* Walking up from left branch. We must ensure that we never
+ * walk beyond root.
+ */
+ if (unlikely(eb_clrtag((eb_untag(t, EB_LEFT))->b[EB_RGHT]) == NULL))
+ return NULL;
+ t = (eb_root_to_node(eb_untag(t, EB_LEFT)))->node_p;
+ }
+ }
+ /* Note that <t> cannot be NULL at this stage */
+ t = (eb_untag(t, EB_RGHT))->b[EB_LEFT];
+ return eb_walk_down(t, EB_RGHT);
+}
+
+/* Return next leaf node after an existing leaf node, skipping duplicates, or
+ * NULL if none.
+ */
+static inline struct eb_node *eb_next_unique(struct eb_node *node)
+{
+ eb_troot_t *t = node->leaf_p;
+
+ while (1) {
+ if (eb_gettag(t) == EB_LEFT) {
+ if (unlikely(eb_clrtag((eb_untag(t, EB_LEFT))->b[EB_RGHT]) == NULL))
+ return NULL; /* we reached root */
+ node = eb_root_to_node(eb_untag(t, EB_LEFT));
+ /* if we're left and not in duplicates, stop here */
+ if (node->bit >= 0)
+ break;
+ t = node->node_p;
+ }
+ else {
+ /* Walking up from right branch, so we cannot be below root */
+ t = (eb_root_to_node(eb_untag(t, EB_RGHT)))->node_p;
+ }
+ }
+
+ /* Note that <t> cannot be NULL at this stage */
+ t = (eb_untag(t, EB_LEFT))->b[EB_RGHT];
+ if (eb_clrtag(t) == NULL)
+ return NULL;
+ return eb_walk_down(t, EB_LEFT);
+}
+
+
+/* Removes a leaf node from the tree if it was still in it. Marks the node
+ * as unlinked.
+ */
+static forceinline void __eb_delete(struct eb_node *node)
+{
+ __label__ delete_unlink;
+ unsigned int pside, gpside, sibtype;
+ struct eb_node *parent;
+ struct eb_root *gparent;
+
+ if (!node->leaf_p)
+ return;
+
+ /* we need the parent, our side, and the grand parent */
+ pside = eb_gettag(node->leaf_p);
+ parent = eb_root_to_node(eb_untag(node->leaf_p, pside));
+
+ /* We likely have to release the parent link, unless it's the root,
+ * in which case we only set our branch to NULL. Note that we can
+ * only be attached to the root by its left branch.
+ */
+
+ if (eb_clrtag(parent->branches.b[EB_RGHT]) == NULL) {
+ /* we're just below the root, it's trivial. */
+ parent->branches.b[EB_LEFT] = NULL;
+ goto delete_unlink;
+ }
+
+ /* To release our parent, we have to identify our sibling, and reparent
+ * it directly to/from the grand parent. Note that the sibling can
+ * either be a link or a leaf.
+ */
+
+ gpside = eb_gettag(parent->node_p);
+ gparent = eb_untag(parent->node_p, gpside);
+
+ gparent->b[gpside] = parent->branches.b[!pside];
+ sibtype = eb_gettag(gparent->b[gpside]);
+
+ if (sibtype == EB_LEAF) {
+ eb_root_to_node(eb_untag(gparent->b[gpside], EB_LEAF))->leaf_p =
+ eb_dotag(gparent, gpside);
+ } else {
+ eb_root_to_node(eb_untag(gparent->b[gpside], EB_NODE))->node_p =
+ eb_dotag(gparent, gpside);
+ }
+ /* Mark the parent unused. Note that we do not check if the parent is
+ * our own node, but that's not a problem because if it is, it will be
+ * marked unused at the same time, which we'll use below to know we can
+ * safely remove it.
+ */
+ parent->node_p = NULL;
+
+ /* The parent node has been detached, and is currently unused. It may
+ * belong to another node, so we cannot remove it that way. Also, our
+ * own node part might still be used. so we can use this spare node
+ * to replace ours if needed.
+ */
+
+ /* If our link part is unused, we can safely exit now */
+ if (!node->node_p)
+ goto delete_unlink;
+
+ /* From now on, <node> and <parent> are necessarily different, and the
+ * <node>'s node part is in use. By definition, <parent> is at least
+ * below <node>, so keeping its key for the bit string is OK.
+ */
+
+ parent->node_p = node->node_p;
+ parent->branches = node->branches;
+ parent->bit = node->bit;
+
+ /* We must now update the new node's parent... */
+ gpside = eb_gettag(parent->node_p);
+ gparent = eb_untag(parent->node_p, gpside);
+ gparent->b[gpside] = eb_dotag(&parent->branches, EB_NODE);
+
+ /* ... and its branches */
+ for (pside = 0; pside <= 1; pside++) {
+ if (eb_gettag(parent->branches.b[pside]) == EB_NODE) {
+ eb_root_to_node(eb_untag(parent->branches.b[pside], EB_NODE))->node_p =
+ eb_dotag(&parent->branches, pside);
+ } else {
+ eb_root_to_node(eb_untag(parent->branches.b[pside], EB_LEAF))->leaf_p =
+ eb_dotag(&parent->branches, pside);
+ }
+ }
+ delete_unlink:
+ /* Now the node has been completely unlinked */
+ node->leaf_p = NULL;
+ return; /* tree is not empty yet */
+}
+
+/* Compare blocks <a> and <b> byte-to-byte, from bit <ignore> to bit <len-1>.
+ * Return the number of equal bits between strings, assuming that the first
+ * <ignore> bits are already identical. It is possible to return slightly more
+ * than <len> bits if <len> does not stop on a byte boundary and we find exact
+ * bytes. Note that parts or all of <ignore> bits may be rechecked. It is only
+ * passed here as a hint to speed up the check.
+ */
+static forceinline int equal_bits(const unsigned char *a,
+ const unsigned char *b,
+ int ignore, int len)
+{
+ for (ignore >>= 3, a += ignore, b += ignore, ignore <<= 3;
+ ignore < len; ) {
+ unsigned char c;
+
+ a++; b++;
+ ignore += 8;
+ c = b[-1] ^ a[-1];
+
+ if (c) {
+ /* OK now we know that old and new differ at byte <ptr> and that <c> holds
+ * the bit differences. We have to find what bit is differing and report
+ * it as the number of identical bits. Note that low bit numbers are
+ * assigned to high positions in the byte, as we compare them as strings.
+ */
+ ignore -= flsnz8(c);
+ break;
+ }
+ }
+ return ignore;
+}
+
+/* check that the two blocks <a> and <b> are equal on <len> bits. If it is known
+ * they already are on some bytes, this number of equal bytes to be skipped may
+ * be passed in <skip>. It returns 0 if they match, otherwise non-zero.
+ */
+static forceinline int check_bits(const unsigned char *a,
+ const unsigned char *b,
+ int skip,
+ int len)
+{
+ int bit, ret;
+
+ /* This uncommon construction gives the best performance on x86 because
+ * it makes heavy use multiple-index addressing and parallel instructions,
+ * and it prevents gcc from reordering the loop since it is already
+ * properly oriented. Tested to be fine with 2.95 to 4.2.
+ */
+ bit = ~len + (skip << 3) + 9; // = (skip << 3) + (8 - len)
+ ret = a[skip] ^ b[skip];
+ if (unlikely(bit >= 0))
+ return ret >> bit;
+ while (1) {
+ skip++;
+ if (ret)
+ return ret;
+ ret = a[skip] ^ b[skip];
+ bit += 8;
+ if (bit >= 0)
+ return ret >> bit;
+ }
+}
+
+
+/* Compare strings <a> and <b> byte-to-byte, from bit <ignore> to the last 0.
+ * Return the number of equal bits between strings, assuming that the first
+ * <ignore> bits are already identical. Note that parts or all of <ignore> bits
+ * may be rechecked. It is only passed here as a hint to speed up the check.
+ * The caller is responsible for not passing an <ignore> value larger than any
+ * of the two strings. However, referencing any bit from the trailing zero is
+ * permitted. Equal strings are reported as a negative number of bits, which
+ * indicates the end was reached.
+ */
+static forceinline int string_equal_bits(const unsigned char *a,
+ const unsigned char *b,
+ int ignore)
+{
+ int beg;
+ unsigned char c;
+
+ beg = ignore >> 3;
+
+ /* skip known and identical bits. We stop at the first different byte
+ * or at the first zero we encounter on either side.
+ */
+ while (1) {
+ unsigned char d;
+
+ c = a[beg];
+ d = b[beg];
+ beg++;
+
+ c ^= d;
+ if (c)
+ break;
+ if (!d)
+ return -1;
+ }
+ /* OK now we know that a and b differ at byte <beg>, or that both are zero.
+ * We have to find what bit is differing and report it as the number of
+ * identical bits. Note that low bit numbers are assigned to high positions
+ * in the byte, as we compare them as strings.
+ */
+ return (beg << 3) - flsnz8(c);
+}
+
+static forceinline int cmp_bits(const unsigned char *a, const unsigned char *b, unsigned int pos)
+{
+ unsigned int ofs;
+ unsigned char bit_a, bit_b;
+
+ ofs = pos >> 3;
+ pos = ~pos & 7;
+
+ bit_a = (a[ofs] >> pos) & 1;
+ bit_b = (b[ofs] >> pos) & 1;
+
+ return bit_a - bit_b; /* -1: a<b; 0: a=b; 1: a>b */
+}
+
+static forceinline int get_bit(const unsigned char *a, unsigned int pos)
+{
+ unsigned int ofs;
+
+ ofs = pos >> 3;
+ pos = ~pos & 7;
+ return (a[ofs] >> pos) & 1;
+}
+
+/* These functions are declared in ebtree.c */
+void eb_delete(struct eb_node *node);
+struct eb_node *eb_insert_dup(struct eb_node *sub, struct eb_node *new);
+int eb_memcmp(const void *m1, const void *m2, size_t len);
+
+#endif /* _EB_TREE_H */
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/include/import/ist.h b/include/import/ist.h
new file mode 100644
index 0000000..16b8616
--- /dev/null
+++ b/include/import/ist.h
@@ -0,0 +1,957 @@
+/*
+ * include/import/ist.h
+ * Very simple indirect string manipulation functions.
+ *
+ * Copyright (C) 2014-2020 Willy Tarreau - w@1wt.eu
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _IMPORT_IST_H
+#define _IMPORT_IST_H
+
+#include <sys/types.h>
+#include <ctype.h>
+#include <stddef.h>
+#include <string.h>
+
+#ifndef IST_FREESTANDING
+#include <stdlib.h>
+#endif
+
+/* ASCII to lower case conversion table */
+#define _IST_LC { \
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, \
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, \
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, \
+ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, \
+ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, \
+ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, \
+ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, \
+ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, \
+ 0x40, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, \
+ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, \
+ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, \
+ 0x78, 0x79, 0x7a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, \
+ 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, \
+ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, \
+ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, \
+ 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, \
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, \
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, \
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, \
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, \
+ 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, \
+ 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, \
+ 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, \
+ 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, \
+ 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, \
+ 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, \
+ 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, \
+ 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, \
+ 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, \
+ 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, \
+ 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, \
+ 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, \
+}
+
+/* ASCII to upper case conversion table */
+#define _IST_UC { \
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, \
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, \
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, \
+ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, \
+ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, \
+ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, \
+ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, \
+ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, \
+ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, \
+ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, \
+ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, \
+ 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, \
+ 0x60, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, \
+ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, \
+ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, \
+ 0x58, 0x59, 0x5a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, \
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, \
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, \
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, \
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, \
+ 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, \
+ 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, \
+ 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, \
+ 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, \
+ 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, \
+ 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, \
+ 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, \
+ 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, \
+ 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, \
+ 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, \
+ 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, \
+ 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, \
+}
+
+#if defined(USE_OBSOLETE_LINKER) || defined(__TINYC__)
+/* some old linkers and some non-ELF platforms have issues with the weak
+ * attribute so we turn these arrays to literals there. TCC silently ignores
+ * it so we switch to literal as well.
+ */
+#define ist_lc ((const unsigned char[256])_IST_LC)
+#define ist_uc ((const unsigned char[256])_IST_UC)
+#else
+const unsigned char ist_lc[256] __attribute__((weak)) = _IST_LC;
+const unsigned char ist_uc[256] __attribute__((weak)) = _IST_UC;
+#endif
+
+/* This string definition will most often be used to represent a read-only
+ * string returned from a function, based on the starting point and its length
+ * in bytes. No storage is provided, only a pointer and a length. The types
+ * here are important as we only want to have 2 native machine words there so
+ * that on modern architectures the compiler is capable of efficiently
+ * returning a register pair without having to allocate stack room from the
+ * caller. This is done with -freg-struct which is often enabled by default.
+ */
+struct ist {
+ char *ptr;
+ size_t len;
+};
+
+/* makes a constant ist from a constant string, for use in array declarations */
+#define IST(str) { .ptr = str "", .len = (sizeof str "") - 1 }
+
+/* IST_NULL is equivalent to an `ist` with `.ptr = NULL` and `.len = 0` */
+#define IST_NULL ((const struct ist){ .ptr = 0, .len = 0 })
+
+/* makes an ist from a regular zero terminated string. Null has length 0.
+ * Constants are detected and replaced with constant initializers. Other values
+ * are measured by hand without strlen() as it's much cheaper and inlinable on
+ * small strings. The construct is complex because we must never call
+ * __builtin_strlen() with an expression otherwise it involves a real
+ * measurement.
+ */
+#if __GNUC__ >= 4
+// gcc >= 4 detects constant propagation of str through __x and resolves the
+// length of constant strings easily.
+#define ist(str) ({ \
+ char *__x = (void *)(str); \
+ (struct ist){ \
+ .ptr = __x, \
+ .len = __builtin_constant_p(str) ? \
+ ((void *)str == (void *)0) ? 0 : \
+ __builtin_strlen(__x) : \
+ ({ \
+ size_t __l = 0; \
+ if (__x) for (__l--; __x[++__l]; ) ; \
+ __l; \
+ }) \
+ }; \
+})
+#else
+// gcc < 4 can't do this, and the side effect is a warning each time a NULL is
+// passed to ist() due to the check on __builtin_strlen(). It doesn't have the
+// ability to know that this code is never called.
+#define ist(str) ({ \
+ char *__x = (void *)(str); \
+ (struct ist){ \
+ .ptr = __x, \
+ .len = __builtin_constant_p(str) ? \
+ ((void *)str == (void *)0) ? 0 : \
+ __builtin_strlen(str) : \
+ ({ \
+ size_t __l = 0; \
+ if (__x) for (__l--; __x[++__l]; ) ; \
+ __l; \
+ }) \
+ }; \
+})
+#endif
+
+/* makes an ist struct from a string and a length */
+static inline struct ist ist2(const void *ptr, size_t len)
+{
+ return (struct ist){ .ptr = (char *)ptr, .len = len };
+}
+
+/* returns the result of `ist.ptr != NULL` */
+static inline int isttest(const struct ist ist)
+{
+ return ist.ptr != NULL;
+}
+
+/* This function MODIFIES the string to add a zero AFTER the end, and returns
+ * the start pointer. The purpose is to use it on strings extracted by parsers
+ * from larger strings cut with delimiters that are not important and can be
+ * destroyed. It allows any such string to be used with regular string
+ * functions. It's also convenient to use with printf() to show data extracted
+ * from writable areas. The caller is obviously responsible for ensuring that
+ * the string is valid and that the first byte past the end is writable. If
+ * these conditions cannot be satisfied, use istpad() below instead.
+ */
+static inline char *ist0(struct ist ist)
+{
+ ist.ptr[ist.len] = 0;
+ return ist.ptr;
+}
+
+/* returns the pointer of the string */
+static inline char *istptr(const struct ist ist)
+{
+ return ist.ptr;
+}
+
+/* returns the length of the string */
+static inline size_t istlen(const struct ist ist)
+{
+ return ist.len;
+}
+
+/* returns the pointer to the end the string */
+static inline char *istend(const struct ist ist)
+{
+ return (ist.ptr + ist.len);
+}
+
+/* skips to next character in the string, always stops at the end */
+static inline struct ist istnext(const struct ist ist)
+{
+ struct ist ret = ist;
+
+ if (ret.len) {
+ ret.len--;
+ ret.ptr++;
+ }
+ return ret;
+}
+
+/* Returns the first character of the <ist> and advances the <ist> by 1.
+ * If the <ist> is empty the result is undefined.
+ */
+static inline char istshift(struct ist *ist)
+{
+ if (ist->len) {
+ char c = *ist->ptr;
+ *ist = istnext(*ist);
+
+ return c;
+ }
+
+ return 0;
+}
+
+/* copies the contents from string <ist> to buffer <buf> and adds a trailing
+ * zero. The caller must ensure <buf> is large enough.
+ */
+static inline struct ist istpad(void *buf, const struct ist ist)
+{
+ struct ist ret = { .ptr = buf, .len = ist.len };
+
+ for (ret.len = 0; ret.len < ist.len; ret.len++)
+ ret.ptr[ret.len] = ist.ptr[ret.len];
+
+ ret.ptr[ret.len] = 0;
+ return ret;
+}
+
+/* trims string <ist> to no more than <size> characters. The string is
+ * returned.
+ */
+static inline struct ist isttrim(const struct ist ist, size_t size)
+{
+ struct ist ret = ist;
+
+ if (ret.len > size)
+ ret.len = size;
+ return ret;
+}
+
+/* Sets the <len> of the <ist> to zero and returns the previous length.
+ *
+ * This function is meant to be used in functions that receive an ist containing
+ * the destination buffer and the buffer's size. The returned size must be stored
+ * to prevent an overflow of such a destination buffer.
+ *
+ * If you simply want to clear an ist and do not care about the previous length
+ * then you should use `isttrim(ist, 0)`.
+ *
+ * Example Usage (fill the complete buffer with 'x'):
+ *
+ * void my_func(struct ist* dst)
+ * {
+ * size_t dst_size = istclear(dst);
+ * size_t i;
+ *
+ * for (i = 0; i < dst_size; i++)
+ * *dst = __istappend(*dst, 'x');
+ * }
+ */
+__attribute__((warn_unused_result))
+static inline size_t istclear(struct ist* ist)
+{
+ size_t len = ist->len;
+
+ ist->len = 0;
+
+ return len;
+}
+
+/* trims string <ist> to no more than <size>-1 characters and ensures that a
+ * zero is placed after <ist.len> (possibly reduced by one) and before <size>,
+ * unless <size> is already zero. The string is returned. This is mostly aimed
+ * at building printable strings that need to be zero-terminated.
+ */
+static inline struct ist istzero(const struct ist ist, size_t size)
+{
+ struct ist ret = ist;
+
+ if (!size)
+ ret.len = 0;
+ else {
+ if (ret.len > size - 1)
+ ret.len = size - 1;
+ ret.ptr[ret.len] = 0;
+ }
+ return ret;
+}
+
+/* returns the ordinal difference between two strings :
+ * < 0 if ist1 < ist2
+ * = 0 if ist1 == ist2
+ * > 0 if ist1 > ist2
+ */
+static inline int istdiff(const struct ist ist1, const struct ist ist2)
+{
+ struct ist l = ist1;
+ struct ist r = ist2;
+
+ do {
+ if (!l.len--)
+ return -r.len;
+ if (!r.len--)
+ return 1;
+ } while (*l.ptr++ == *r.ptr++);
+
+ return *(unsigned char *)(l.ptr - 1) - *(unsigned char *)(r.ptr - 1);
+}
+
+/* returns non-zero if <ist1> starts like <ist2> (empty strings do match) */
+static inline int istmatch(const struct ist ist1, const struct ist ist2)
+{
+ struct ist l = ist1;
+ struct ist r = ist2;
+
+ if (l.len < r.len)
+ return 0;
+
+ while (r.len--) {
+ if (*l.ptr++ != *r.ptr++)
+ return 0;
+ }
+ return 1;
+}
+
+/* returns non-zero if <ist1> starts like <ist2>, ignoring the case (empty strings do match) */
+static inline int istmatchi(const struct ist ist1, const struct ist ist2)
+{
+ struct ist l = ist1;
+ struct ist r = ist2;
+
+ if (l.len < r.len)
+ return 0;
+
+ while (r.len--) {
+ if (*l.ptr != *r.ptr &&
+ ist_lc[(unsigned char)*l.ptr] != ist_lc[(unsigned char)*r.ptr])
+ return 0;
+
+ l.ptr++;
+ r.ptr++;
+ }
+ return 1;
+}
+
+/* returns non-zero if <ist1> starts like <ist2> on the first <count>
+ * characters (empty strings do match).
+ */
+static inline int istnmatch(const struct ist ist1, const struct ist ist2, size_t count)
+{
+ struct ist l = ist1;
+ struct ist r = ist2;
+
+ if (l.len > count)
+ l.len = count;
+ if (r.len > count)
+ r.len = count;
+ return istmatch(l, r);
+}
+
+/* returns non-zero if <ist1> equals <ist2> (empty strings are equal) */
+static inline int isteq(const struct ist ist1, const struct ist ist2)
+{
+ struct ist l = ist1;
+ struct ist r = ist2;
+
+ if (l.len != r.len)
+ return 0;
+
+ while (l.len--) {
+ if (*l.ptr++ != *r.ptr++)
+ return 0;
+ }
+ return 1;
+}
+
+/* returns non-zero if <ist1> equals <ist2>, ignoring the case (empty strings are equal) */
+static inline int isteqi(const struct ist ist1, const struct ist ist2)
+{
+ struct ist l = ist1;
+ struct ist r = ist2;
+
+ if (l.len != r.len)
+ return 0;
+
+ while (l.len--) {
+ if (*l.ptr != *r.ptr &&
+ ist_lc[(unsigned char)*l.ptr] != ist_lc[(unsigned char)*r.ptr])
+ return 0;
+
+ l.ptr++;
+ r.ptr++;
+ }
+ return 1;
+}
+
+/* returns non-zero if <ist1> equals <ist2> on the first <count> characters
+ * (empty strings are equal).
+ */
+static inline int istneq(const struct ist ist1, const struct ist ist2, size_t count)
+{
+ struct ist l = ist1;
+ struct ist r = ist2;
+
+ if (l.len > count)
+ l.len = count;
+ if (r.len > count)
+ r.len = count;
+ return isteq(l, r);
+}
+
+/* appends <src> after <dst>. The caller must ensure that the underlying buffer
+ * is large enough to fit the character.
+ */
+static inline struct ist __istappend(struct ist dst, const char src)
+{
+ dst.ptr[dst.len++] = src;
+
+ return dst;
+}
+
+/* copies <src> over <dst> for a maximum of <count> bytes. Returns the number
+ * of characters copied (src.len), or -1 if it does not fit. In all cases, the
+ * contents are copied prior to reporting an error, so that the destination
+ * at least contains a valid but truncated string.
+ */
+static inline ssize_t istcpy(struct ist *dst, const struct ist src, size_t count)
+{
+ dst->len = 0;
+
+ if (count > src.len)
+ count = src.len;
+
+ while (dst->len < count) {
+ dst->ptr[dst->len] = src.ptr[dst->len];
+ dst->len++;
+ }
+
+ if (dst->len == src.len)
+ return src.len;
+
+ return -1;
+}
+
+/* copies <src> over <dst> for a maximum of <count> bytes. Returns the number
+ * of characters copied, or -1 if it does not fit. A (possibly truncated) valid
+ * copy of <src> is always left into <dst>, and a trailing \0 is appended as
+ * long as <count> is not null, even if that results in reducing the string by
+ * one character.
+ */
+static inline ssize_t istscpy(struct ist *dst, const struct ist src, size_t count)
+{
+ dst->len = 0;
+
+ if (!count)
+ goto fail;
+
+ if (count > src.len)
+ count = src.len + 1;
+
+ while (dst->len < count - 1) {
+ dst->ptr[dst->len] = src.ptr[dst->len];
+ dst->len++;
+ }
+
+ dst->ptr[dst->len] = 0;
+ if (dst->len == src.len)
+ return src.len;
+ fail:
+ return -1;
+}
+
+/* appends <src> after <dst> for a maximum of <count> total bytes in <dst> after
+ * the copy. <dst> is assumed to be <count> or less before the call. The new
+ * string's length is returned, or -1 if a truncation happened. In all cases,
+ * the contents are copied prior to reporting an error, so that the destination
+ * at least contains a valid but truncated string.
+ */
+static inline ssize_t istcat(struct ist *dst, const struct ist src, size_t count)
+{
+ const char *s = src.ptr;
+
+ while (dst->len < count && s != src.ptr + src.len)
+ dst->ptr[dst->len++] = *s++;
+
+ if (s == src.ptr + src.len)
+ return dst->len;
+
+ return -1;
+}
+
+/* appends <src> after <dst> for a maximum of <count> total bytes in <dst> after
+ * the copy. <dst> is assumed to be <count> or less before the call. The new
+ * string's length is returned, or -1 if a truncation happened. In all cases,
+ * the contents are copied prior to reporting an error, so that the destination
+ * at least contains a valid but truncated string.
+ */
+static inline ssize_t istscat(struct ist *dst, const struct ist src, size_t count)
+{
+ const char *s = src.ptr;
+
+ if (!count)
+ goto fail;
+
+ while (dst->len < count - 1 && s != src.ptr + src.len) {
+ dst->ptr[dst->len++] = *s++;
+ }
+
+ dst->ptr[dst->len] = 0;
+ if (s == src.ptr + src.len)
+ return dst->len;
+ fail:
+ return -1;
+}
+
+/* copies the entire <src> over <dst>, which must be allocated large enough to
+ * hold the whole contents. No trailing zero is appended, this is mainly used
+ * for protocol processing where the frame length has already been checked. An
+ * ist made of the output and its length are returned. The destination is not
+ * touched if src.len is null.
+ */
+static inline struct ist ist2bin(char *dst, const struct ist src)
+{
+ size_t ofs = 0;
+
+ /* discourage the compiler from trying to optimize for large strings,
+ * but tell it that most of our strings are not empty.
+ */
+ if (__builtin_expect(ofs < src.len, 1)) {
+ do {
+ dst[ofs] = src.ptr[ofs];
+ ofs++;
+ } while (__builtin_expect(ofs < src.len, 0));
+ }
+ return ist2(dst, ofs);
+}
+
+/* copies the entire <src> over <dst>, which must be allocated large enough to
+ * hold the whole contents as well as a trailing zero which is always appended.
+ * This is mainly used for protocol conversions where the frame length has
+ * already been checked. An ist made of the output and its length (not counting
+ * the trailing zero) are returned.
+ */
+static inline struct ist ist2str(char *dst, const struct ist src)
+{
+ size_t ofs = 0;
+
+ /* discourage the compiler from trying to optimize for large strings,
+ * but tell it that most of our strings are not empty.
+ */
+ if (__builtin_expect(ofs < src.len, 1)) {
+ do {
+ dst[ofs] = src.ptr[ofs];
+ ofs++;
+ } while (__builtin_expect(ofs < src.len, 0));
+ }
+ dst[ofs] = 0;
+ return ist2(dst, ofs);
+}
+
+/* makes a lower case copy of the entire <src> into <dst>, which must have been
+ * allocated large enough to hold the whole contents. No trailing zero is
+ * appended, this is mainly used for protocol processing where the frame length
+ * has already been checked. An ist made of the output and its length are
+ * returned. The destination is not touched if src.len is null.
+ */
+static inline struct ist ist2bin_lc(char *dst, const struct ist src)
+{
+ size_t ofs = 0;
+
+ /* discourage the compiler from trying to optimize for large strings,
+ * but tell it that most of our strings are not empty.
+ */
+ if (__builtin_expect(ofs < src.len, 1)) {
+ do {
+ dst[ofs] = ist_lc[(unsigned char)src.ptr[ofs]];
+ ofs++;
+ } while (__builtin_expect(ofs < src.len, 0));
+ }
+ return ist2(dst, ofs);
+}
+
+/* makes a lower case copy of the entire <src> into <dst>, which must have been
+ * allocated large enough to hold the whole contents as well as a trailing zero
+ * which is always appended. This is mainly used for protocol conversions where
+ * the frame length has already been checked. An ist made of the output and its
+ * length (not counting the trailing zero) are returned.
+ */
+static inline struct ist ist2str_lc(char *dst, const struct ist src)
+{
+ size_t ofs = 0;
+
+ /* discourage the compiler from trying to optimize for large strings,
+ * but tell it that most of our strings are not empty.
+ */
+ if (__builtin_expect(ofs < src.len, 1)) {
+ do {
+ dst[ofs] = ist_lc[(unsigned char)src.ptr[ofs]];
+ ofs++;
+ } while (__builtin_expect(ofs < src.len, 0));
+ }
+ dst[ofs] = 0;
+ return ist2(dst, ofs);
+}
+
+/* makes an upper case copy of the entire <src> into <dst>, which must have
+ * been allocated large enough to hold the whole contents. No trailing zero is
+ * appended, this is mainly used for protocol processing where the frame length
+ * has already been checked. An ist made of the output and its length are
+ * returned. The destination is not touched if src.len is null.
+ */
+static inline struct ist ist2bin_uc(char *dst, const struct ist src)
+{
+ size_t ofs = 0;
+
+ /* discourage the compiler from trying to optimize for large strings,
+ * but tell it that most of our strings are not empty.
+ */
+ if (__builtin_expect(ofs < src.len, 1)) {
+ do {
+ dst[ofs] = ist_uc[(unsigned char)src.ptr[ofs]];
+ ofs++;
+ } while (__builtin_expect(ofs < src.len, 0));
+ }
+ return ist2(dst, ofs);
+}
+
+/* makes an upper case copy of the entire <src> into <dst>, which must have been
+ * allocated large enough to hold the whole contents as well as a trailing zero
+ * which is always appended. This is mainly used for protocol conversions where
+ * the frame length has already been checked. An ist made of the output and its
+ * length (not counting the trailing zero) are returned.
+ */
+static inline struct ist ist2str_uc(char *dst, const struct ist src)
+{
+ size_t ofs = 0;
+
+ /* discourage the compiler from trying to optimize for large strings,
+ * but tell it that most of our strings are not empty.
+ */
+ if (__builtin_expect(ofs < src.len, 1)) {
+ do {
+ dst[ofs] = ist_uc[(unsigned char)src.ptr[ofs]];
+ ofs++;
+ } while (__builtin_expect(ofs < src.len, 0));
+ }
+ dst[ofs] = 0;
+ return ist2(dst, ofs);
+}
+
+/* looks for first occurrence of character <chr> in string <ist>. Returns the
+ * pointer if found, or NULL if not found.
+ */
+static inline char *istchr(const struct ist ist, char chr)
+{
+ char *s = ist.ptr;
+
+ do {
+ if (s >= ist.ptr + ist.len)
+ return NULL;
+ } while (*s++ != chr);
+ return s - 1;
+}
+
+/* Returns a pointer to the first control character found in <ist>, or NULL if
+ * none is present. A control character is defined as a byte whose value is
+ * between 0x00 and 0x1F included. The function is optimized for strings having
+ * no CTL chars by processing up to sizeof(long) bytes at once on architectures
+ * supporting efficient unaligned accesses. Despite this it is not very fast
+ * (~0.43 byte/cycle) and should mostly be used on low match probability when
+ * it can save a call to a much slower function.
+ */
+static inline const char *ist_find_ctl(const struct ist ist)
+{
+ const union { unsigned long v; } __attribute__((packed)) *u;
+ const char *curr = (void *)ist.ptr - sizeof(long);
+ const char *last = curr + ist.len;
+ unsigned long l1, l2;
+
+ do {
+ curr += sizeof(long);
+ if (curr > last)
+ break;
+ u = (void *)curr;
+ /* subtract 0x202020...20 to the value to generate a carry in
+ * the lower byte if the byte contains a lower value. If we
+ * generate a bit 7 that was not there, it means the byte was
+ * within 0x00..0x1F.
+ */
+ l2 = u->v;
+ l1 = ~l2 & ((~0UL / 255) * 0x80); /* 0x808080...80 */
+ l2 -= (~0UL / 255) * 0x20; /* 0x202020...20 */
+ } while ((l1 & l2) == 0);
+
+ last += sizeof(long);
+ if (__builtin_expect(curr < last, 0)) {
+ do {
+ if ((unsigned char)*curr < 0x20)
+ return curr;
+ curr++;
+ } while (curr < last);
+ }
+ return NULL;
+}
+
+/* Returns a pointer to the first character found <ist> that belongs to the
+ * range [min:max] inclusive, or NULL if none is present. The function is
+ * optimized for strings having no such chars by processing up to sizeof(long)
+ * bytes at once on architectures supporting efficient unaligned accesses.
+ * Despite this it is not very fast (~0.43 byte/cycle) and should mostly be
+ * used on low match probability when it can save a call to a much slower
+ * function. Will not work for characters 0x80 and above. It's optimized for
+ * min and max to be known at build time.
+ */
+static inline const char *ist_find_range(const struct ist ist, unsigned char min, unsigned char max)
+{
+ const union { unsigned long v; } __attribute__((packed)) *u;
+ const char *curr = (void *)ist.ptr - sizeof(long);
+ const char *last = curr + ist.len;
+ unsigned long l1, l2;
+
+ /* easier with an exclusive boundary */
+ max++;
+
+ do {
+ curr += sizeof(long);
+ if (curr > last)
+ break;
+ u = (void *)curr;
+ /* add 0x<min><min><min><min>..<min> then subtract
+ * 0x<max><max><max><max>..<max> to the value to generate a
+ * carry in the lower byte if the byte contains a lower value.
+ * If we generate a bit 7 that was not there, it means the byte
+ * was min..max.
+ */
+ l2 = u->v;
+ l1 = ~l2 & ((~0UL / 255) * 0x80); /* 0x808080...80 */
+ l2 += (~0UL / 255) * min; /* 0x<min><min>..<min> */
+ l2 -= (~0UL / 255) * max; /* 0x<max><max>..<max> */
+ } while ((l1 & l2) == 0);
+
+ last += sizeof(long);
+ if (__builtin_expect(curr < last, 0)) {
+ do {
+ if ((unsigned char)(*curr - min) < (unsigned char)(max - min))
+ return curr;
+ curr++;
+ } while (curr < last);
+ }
+ return NULL;
+}
+
+/* looks for first occurrence of character <chr> in string <ist> and returns
+ * the tail of the string starting with this character, or (ist.end,0) if not
+ * found.
+ */
+static inline struct ist istfind(const struct ist ist, char chr)
+{
+ struct ist ret = ist;
+
+ while (ret.len--) {
+ if (*ret.ptr++ == chr)
+ return ist2(ret.ptr - 1, ret.len + 1);
+ }
+ return ist2(ret.ptr, 0);
+}
+
+/* looks for first occurrence of character different from <chr> in string <ist>
+ * and returns the tail of the string starting at this character, or (ist_end,0)
+ * if not found.
+ */
+static inline struct ist istskip(const struct ist ist, char chr)
+{
+ struct ist ret = ist;
+
+ while (ret.len--) {
+ if (*ret.ptr++ != chr)
+ return ist2(ret.ptr - 1, ret.len + 1);
+ }
+ return ist2(ret.ptr, 0);
+}
+
+/* looks for first occurrence of string <pat> in string <ist> and returns the
+ * tail of the string starting at this position, or (NULL,0) if not found. The
+ * empty pattern is found everywhere.
+ */
+static inline struct ist istist(const struct ist ist, const struct ist pat)
+{
+ struct ist ret = ist;
+ size_t pos;
+
+ if (!pat.len)
+ return ret;
+
+ while (1) {
+ loop:
+ ret = istfind(ret, *pat.ptr);
+ if (ret.len < pat.len)
+ break;
+
+ /* ret.len >= 1, pat.len >= 1 and *ret.ptr == *pat.ptr */
+
+ ret = istnext(ret);
+ for (pos = 0; pos < pat.len - 1; ) {
+ ++pos;
+ if (ret.ptr[pos - 1] != pat.ptr[pos])
+ goto loop;
+ }
+ return ist2(ret.ptr - 1, ret.len + 1);
+ }
+ return IST_NULL;
+}
+
+/*
+ * looks for the first occurrence of <chr> in string <ist> and returns a shorter
+ * ist if char is found.
+ */
+static inline struct ist iststop(const struct ist ist, char chr)
+{
+ size_t len = 0;
+
+ while (len++ < ist.len && ist.ptr[len - 1] != chr)
+ ;
+ return ist2(ist.ptr, len - 1);
+}
+
+/*
+ * advance <.ptr> by <nb> characters.
+ * If <ist> is too short, (ist.end,0) is returned.
+ */
+static inline struct ist istadv(const struct ist ist, const size_t nb)
+{
+ if (ist.len < nb)
+ return ist2(ist.ptr + ist.len, 0);
+ return ist2(ist.ptr + nb, ist.len - nb);
+}
+
+/* Splits the given <ist> at the given character. The returned ist is
+ * equivalent to iststop(ist, delim). The passed <ist> will contain the
+ * remainder of the string, not including the delimiter. In other words
+ * it will be advanced by the length of the returned string plus 1.
+ */
+static inline struct ist istsplit(struct ist *ist, char delim)
+{
+ const struct ist result = iststop(*ist, delim);
+
+ *ist = istadv(*ist, result.len + 1);
+
+ return result;
+}
+
+/*
+ * compare 2 ists and return non-zero if they are the same
+ */
+static inline int istissame(const struct ist ist1, const struct ist ist2)
+{
+ return ((ist1.ptr == ist2.ptr) && (ist1.len == ist2.len));
+}
+
+#ifndef IST_FREESTANDING
+/* This function allocates <size> bytes and returns an `ist` pointing to
+ * the allocated area with size `0`.
+ *
+ * If this function fails to allocate memory the return value is equivalent
+ * to IST_NULL.
+ */
+static inline struct ist istalloc(const size_t size)
+{
+ /* Note: do not use ist2 here, as it triggers a gcc11 warning.
+ * €˜<unknown>€™ may be used uninitialized [-Werror=maybe-uninitialized]
+ *
+ * This warning is reported because the uninitialized memory block
+ * allocated by malloc should not be passed to a const argument as in
+ * ist2.
+ * See https://gcc.gnu.org/onlinedocs/gcc-11.1.0/gcc/Warning-Options.html#index-Wmaybe-uninitialized
+ */
+ return (struct ist){ .ptr = malloc(size), .len = 0 };
+}
+
+/* This function performs the equivalent of free() on the given <ist>.
+ *
+ * After this function returns the value of the given <ist> will be
+ * modified to be equivalent to IST_NULL.
+ */
+static inline void istfree(struct ist *ist)
+{
+ free(ist->ptr);
+ *ist = IST_NULL;
+}
+
+/* This function performs the equivalent of strdup() on the given <src>.
+ *
+ * If this function fails to allocate memory the return value is equivalent
+ * to IST_NULL.
+ */
+static inline struct ist istdup(const struct ist src)
+{
+ const size_t src_size = src.len;
+
+ /* Allocate at least 1 byte to allow duplicating an empty string with
+ * malloc implementations that return NULL for a 0-size allocation.
+ */
+ struct ist dst = istalloc(src_size ? src_size : 1);
+
+ if (isttest(dst)) {
+ istcpy(&dst, src, src_size);
+ }
+
+ return dst;
+}
+#endif
+
+#endif
diff --git a/include/import/lru.h b/include/import/lru.h
new file mode 100644
index 0000000..d674e53
--- /dev/null
+++ b/include/import/lru.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright (C) 2015 Willy Tarreau <w@1wt.eu>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <import/eb64tree.h>
+
+/* The LRU supports a global cache shared between multiple domains and multiple
+ * versions of their datasets. The purpose is not to have to flush the whole
+ * LRU once a key is updated and not valid anymore (eg: ACL files), as well as
+ * to reliably support concurrent accesses and handle conflicts gracefully. For
+ * each key a pointer to a dataset and its internal data revision are stored.
+ * All lookups verify that these elements match those passed by the caller and
+ * only return a valid entry upon matching. Otherwise the entry is either
+ * allocated or recycled and considered new. New entries are always initialized
+ * with a NULL domain pointer which is used by the caller to detect that the
+ * entry is new and must be populated. Such entries never expire and are
+ * protected from the risk of being recycled. It's then the caller's
+ * responsibility to perform the operation and commit the entry with its latest
+ * result. This domain thus serves as a lock to protect the entry during all
+ * the computation needed to update it. In a simple use case where the cache is
+ * dedicated, it is recommended to pass the LRU head as the domain pointer and
+ * for example zero as the revision. The most common use case for the caller
+ * consists in simply checking that the return is not null and that the domain
+ * is not null, then to use the result. The get() function returns null if it
+ * cannot allocate a node (memory or key being currently updated).
+ */
+struct lru64_list {
+ struct lru64_list *n;
+ struct lru64_list *p;
+};
+
+struct lru64_head {
+ struct lru64_list list;
+ struct eb_root keys;
+ struct lru64 *spare;
+ int cache_size;
+ int cache_usage;
+};
+
+struct lru64 {
+ struct eb64_node node; /* indexing key, typically a hash64 */
+ struct lru64_list lru; /* LRU list */
+ void *domain; /* who this data belongs to */
+ unsigned long long revision; /* data revision (to avoid use-after-free) */
+ void *data; /* returned value, user decides how to use this */
+ void (*free)(void *data); /* function to release data, if needed */
+};
+
+
+struct lru64 *lru64_lookup(unsigned long long key, struct lru64_head *lru, void *domain, unsigned long long revision);
+struct lru64 *lru64_get(unsigned long long key, struct lru64_head *lru, void *domain, unsigned long long revision);
+void lru64_commit(struct lru64 *elem, void *data, void *domain, unsigned long long revision, void (*free)(void *));
+struct lru64_head *lru64_new(int size);
+int lru64_destroy(struct lru64_head *lru);
+void lru64_kill_oldest(struct lru64_head *lru, unsigned long int nb);
diff --git a/include/import/mjson.h b/include/import/mjson.h
new file mode 100644
index 0000000..b96fd3f
--- /dev/null
+++ b/include/import/mjson.h
@@ -0,0 +1,209 @@
+// Copyright (c) 2018-2020 Cesanta Software Limited
+// All rights reserved
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE.
+
+#ifndef MJSON_H
+#define MJSON_H
+
+#include <stdarg.h>
+#include <stdlib.h>
+#include <string.h>
+
+#ifndef MJSON_ENABLE_PRINT
+#define MJSON_ENABLE_PRINT 1
+#endif
+
+#ifndef MJSON_ENABLE_RPC
+#define MJSON_ENABLE_RPC 1
+#endif
+
+#ifndef MJSON_ENABLE_BASE64
+#define MJSON_ENABLE_BASE64 1
+#endif
+
+#ifndef MJSON_ENABLE_MERGE
+#define MJSON_ENABLE_MERGE 0
+#elif MJSON_ENABLE_MERGE
+#define MJSON_ENABLE_NEXT 1
+#endif
+
+#ifndef MJSON_ENABLE_PRETTY
+#define MJSON_ENABLE_PRETTY 0
+#elif MJSON_ENABLE_PRETTY
+#define MJSON_ENABLE_NEXT 1
+#endif
+
+#ifndef MJSON_ENABLE_NEXT
+#define MJSON_ENABLE_NEXT 0
+#endif
+
+#ifndef MJSON_RPC_LIST_NAME
+#define MJSON_RPC_LIST_NAME "rpc.list"
+#endif
+
+#ifndef MJSON_DYNBUF_CHUNK
+#define MJSON_DYNBUF_CHUNK 256 // Allocation granularity for print_dynamic_buf
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+enum {
+ MJSON_ERROR_INVALID_INPUT = -1,
+ MJSON_ERROR_TOO_DEEP = -2,
+};
+
+enum mjson_tok {
+ MJSON_TOK_INVALID = 0,
+ MJSON_TOK_KEY = 1,
+ MJSON_TOK_STRING = 11,
+ MJSON_TOK_NUMBER = 12,
+ MJSON_TOK_TRUE = 13,
+ MJSON_TOK_FALSE = 14,
+ MJSON_TOK_NULL = 15,
+ MJSON_TOK_ARRAY = 91,
+ MJSON_TOK_OBJECT = 123,
+};
+#define MJSON_TOK_IS_VALUE(t) ((t) > 10 && (t) < 20)
+
+typedef int (*mjson_cb_t)(int ev, const char *s, int off, int len, void *ud);
+
+#ifndef MJSON_MAX_DEPTH
+#define MJSON_MAX_DEPTH 20
+#endif
+
+int mjson(const char *s, int len, mjson_cb_t cb, void *ud);
+enum mjson_tok mjson_find(const char *s, int len, const char *jp,
+ const char **tokptr, int *toklen);
+int mjson_get_number(const char *s, int len, const char *path, double *v);
+int mjson_get_bool(const char *s, int len, const char *path, int *v);
+int mjson_get_string(const char *s, int len, const char *path, char *to, int n);
+int mjson_get_hex(const char *s, int len, const char *path, char *to, int n);
+
+#if MJSON_ENABLE_NEXT
+int mjson_next(const char *s, int n, int off, int *koff, int *klen, int *voff,
+ int *vlen, int *vtype);
+#endif
+
+#if MJSON_ENABLE_BASE64
+int mjson_get_base64(const char *s, int len, const char *path, char *to, int n);
+int mjson_base64_dec(const char *src, int n, char *dst, int dlen);
+#endif
+
+#if MJSON_ENABLE_PRINT
+typedef int (*mjson_print_fn_t)(const char *buf, int len, void *userdata);
+typedef int (*mjson_vprint_fn_t)(mjson_print_fn_t, void *, va_list *);
+
+struct mjson_fixedbuf {
+ char *ptr;
+ int size, len;
+};
+
+int mjson_printf(mjson_print_fn_t, void *, const char *fmt, ...);
+int mjson_vprintf(mjson_print_fn_t, void *, const char *fmt, va_list ap);
+int mjson_print_str(mjson_print_fn_t, void *, const char *s, int len);
+int mjson_print_int(mjson_print_fn_t, void *, int value, int is_signed);
+int mjson_print_long(mjson_print_fn_t, void *, long value, int is_signed);
+int mjson_print_buf(mjson_print_fn_t fn, void *, const char *buf, int len);
+
+int mjson_print_null(const char *ptr, int len, void *userdata);
+int mjson_print_fixed_buf(const char *ptr, int len, void *userdata);
+int mjson_print_dynamic_buf(const char *ptr, int len, void *userdata);
+
+#if MJSON_ENABLE_PRETTY
+int mjson_pretty(const char *, int, const char *, mjson_print_fn_t, void *);
+#endif
+
+#if MJSON_ENABLE_MERGE
+int mjson_merge(const char *, int, const char *, int, mjson_print_fn_t, void *);
+#endif
+
+#endif // MJSON_ENABLE_PRINT
+
+#if MJSON_ENABLE_RPC
+
+void jsonrpc_init(mjson_print_fn_t, void *userdata);
+int mjson_globmatch(const char *s1, int n1, const char *s2, int n2);
+
+struct jsonrpc_request {
+ struct jsonrpc_ctx *ctx;
+ const char *frame; // Points to the whole frame
+ int frame_len; // Frame length
+ const char *params; // Points to the "params" in the request frame
+ int params_len; // Length of the "params"
+ const char *id; // Points to the "id" in the request frame
+ int id_len; // Length of the "id"
+ const char *method; // Points to the "method" in the request frame
+ int method_len; // Length of the "method"
+ mjson_print_fn_t fn; // Printer function
+ void *fndata; // Printer function data
+ void *userdata; // Callback's user data as specified at export time
+};
+
+struct jsonrpc_method {
+ const char *method;
+ int method_sz;
+ void (*cb)(struct jsonrpc_request *);
+ struct jsonrpc_method *next;
+};
+
+// Main RPC context, stores current request information and a list of
+// exported RPC methods.
+struct jsonrpc_ctx {
+ struct jsonrpc_method *methods;
+ mjson_print_fn_t response_cb;
+ void *response_cb_data;
+};
+
+// Registers function fn under the given name within the given RPC context
+#define jsonrpc_ctx_export(ctx, name, fn) \
+ do { \
+ static struct jsonrpc_method m = {(name), sizeof(name) - 1, (fn), 0}; \
+ m.next = (ctx)->methods; \
+ (ctx)->methods = &m; \
+ } while (0)
+
+void jsonrpc_ctx_init(struct jsonrpc_ctx *ctx, mjson_print_fn_t, void *);
+void jsonrpc_return_error(struct jsonrpc_request *r, int code,
+ const char *message, const char *data_fmt, ...);
+void jsonrpc_return_success(struct jsonrpc_request *r, const char *result_fmt,
+ ...);
+void jsonrpc_ctx_process(struct jsonrpc_ctx *ctx, const char *req, int req_sz,
+ mjson_print_fn_t fn, void *fndata, void *userdata);
+
+extern struct jsonrpc_ctx jsonrpc_default_context;
+
+#define jsonrpc_export(name, fn) \
+ jsonrpc_ctx_export(&jsonrpc_default_context, (name), (fn))
+
+#define jsonrpc_process(buf, len, fn, fnd, ud) \
+ jsonrpc_ctx_process(&jsonrpc_default_context, (buf), (len), (fn), (fnd), (ud))
+
+#define JSONRPC_ERROR_INVALID -32700 /* Invalid JSON was received */
+#define JSONRPC_ERROR_NOT_FOUND -32601 /* The method does not exist */
+#define JSONRPC_ERROR_BAD_PARAMS -32602 /* Invalid params passed */
+#define JSONRPC_ERROR_INTERNAL -32603 /* Internal JSON-RPC error */
+
+#endif // MJSON_ENABLE_RPC
+#ifdef __cplusplus
+}
+#endif
+#endif // MJSON_H
diff --git a/include/import/plock.h b/include/import/plock.h
new file mode 100644
index 0000000..fc001e2
--- /dev/null
+++ b/include/import/plock.h
@@ -0,0 +1,1422 @@
+/* plock - progressive locks
+ *
+ * Copyright (C) 2012-2017 Willy Tarreau <w@1wt.eu>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef PL_PLOCK_H
+#define PL_PLOCK_H
+
+#include "atomic-ops.h"
+#ifdef _POSIX_PRIORITY_SCHEDULING
+#include <sched.h>
+#endif
+
+/* 64 bit */
+#define PLOCK64_RL_1 0x0000000000000004ULL
+#define PLOCK64_RL_2PL 0x00000000FFFFFFF8ULL
+#define PLOCK64_RL_ANY 0x00000000FFFFFFFCULL
+#define PLOCK64_SL_1 0x0000000100000000ULL
+#define PLOCK64_SL_ANY 0x0000000300000000ULL
+#define PLOCK64_WL_1 0x0000000400000000ULL
+#define PLOCK64_WL_2PL 0xFFFFFFF800000000ULL
+#define PLOCK64_WL_ANY 0xFFFFFFFC00000000ULL
+
+/* 32 bit */
+#define PLOCK32_RL_1 0x00000004
+#define PLOCK32_RL_2PL 0x0000FFF8
+#define PLOCK32_RL_ANY 0x0000FFFC
+#define PLOCK32_SL_1 0x00010000
+#define PLOCK32_SL_ANY 0x00030000
+#define PLOCK32_WL_1 0x00040000
+#define PLOCK32_WL_2PL 0xFFF80000
+#define PLOCK32_WL_ANY 0xFFFC0000
+
+/* dereferences <*p> as unsigned long without causing aliasing issues */
+#define pl_deref_long(p) ({ volatile unsigned long *__pl_l = (unsigned long *)(p); *__pl_l; })
+
+/* dereferences <*p> as unsigned int without causing aliasing issues */
+#define pl_deref_int(p) ({ volatile unsigned int *__pl_i = (unsigned int *)(p); *__pl_i; })
+
+/* This function waits for <lock> to release all bits covered by <mask>, and
+ * enforces an exponential backoff using CPU pauses to limit the pollution to
+ * the other threads' caches. The progression follows (1.5^N)-1, limited to
+ * 16384 iterations, which is way sufficient even for very large numbers of
+ * threads. It's possible to disable exponential backoff (EBO) for debugging
+ * purposes by setting PLOCK_DISABLE_EBO, in which case the function will be
+ * replaced with a simpler macro. This may for example be useful to more
+ * easily track callers' CPU usage. The macro was not designed to be used
+ * outside of the functions defined here.
+ */
+#if defined(PLOCK_DISABLE_EBO)
+#define pl_wait_unlock_long(lock, mask) \
+ ({ \
+ unsigned long _r; \
+ do { \
+ pl_cpu_relax(); \
+ _r = pl_deref_long(lock); \
+ } while (_r & mask); \
+ _r; /* return value */ \
+ })
+#else /* not PLOCK_DISABLE_EBO */
+__attribute__((unused,always_inline,no_instrument_function)) inline
+static unsigned long __pl_wait_unlock_long(const unsigned long *lock, const unsigned long mask)
+{
+ unsigned long ret;
+ unsigned int m = 0;
+
+ do {
+ unsigned int loops = m;
+
+#ifdef _POSIX_PRIORITY_SCHEDULING
+ if (loops >= 65536) {
+ sched_yield();
+ loops -= 32768;
+ }
+#endif
+ for (; loops >= 60; loops --)
+ pl_cpu_relax();
+
+ for (; loops >= 1; loops--)
+ pl_barrier();
+
+ ret = pl_load(lock);
+ if (__builtin_expect(ret & mask, 0) == 0)
+ break;
+
+ /* the below produces an exponential growth with loops to lower
+ * values and still growing. This allows competing threads to
+ * wait different times once the threshold is reached.
+ */
+ m = ((m + (m >> 1)) + 2) & 0x3ffff;
+ } while (1);
+
+ return ret;
+}
+
+# if defined(PLOCK_INLINE_EBO)
+__attribute__((unused,always_inline,no_instrument_function)) inline
+# else
+__attribute__((unused,noinline,no_instrument_function))
+# endif
+static unsigned long pl_wait_unlock_long(const unsigned long *lock, const unsigned long mask)
+{
+ return __pl_wait_unlock_long(lock, mask);
+}
+#endif /* PLOCK_DISABLE_EBO */
+
+/* This function waits for <lock> to release all bits covered by <mask>, and
+ * enforces an exponential backoff using CPU pauses to limit the pollution to
+ * the other threads' caches. The progression follows (2^N)-1, limited to 255
+ * iterations, which is way sufficient even for very large numbers of threads.
+ * The function slightly benefits from size optimization under gcc, but Clang
+ * cannot do it, so it's not done here, as it doesn't make a big difference.
+ * It is possible to disable exponential backoff (EBO) for debugging purposes
+ * by setting PLOCK_DISABLE_EBO, in which case the function will be replaced
+ * with a simpler macro. This may for example be useful to more easily track
+ * callers' CPU usage. The macro was not designed to be used outside of the
+ * functions defined here.
+ */
+#if defined(PLOCK_DISABLE_EBO)
+#define pl_wait_unlock_int(lock, mask) \
+ ({ \
+ unsigned int _r; \
+ do { \
+ pl_cpu_relax(); \
+ _r = pl_deref_int(lock); \
+ } while (_r & mask); \
+ _r; /* return value */ \
+ })
+#else
+__attribute__((unused,always_inline,no_instrument_function)) inline
+static unsigned int __pl_wait_unlock_int(const unsigned int *lock, const unsigned int mask)
+{
+ unsigned int ret;
+ unsigned int m = 0;
+
+ do {
+ unsigned int loops = m;
+
+#ifdef _POSIX_PRIORITY_SCHEDULING
+ if (loops >= 65536) {
+ sched_yield();
+ loops -= 32768;
+ }
+#endif
+ for (; loops >= 200; loops -= 10)
+ pl_cpu_relax();
+
+ for (; loops >= 1; loops--)
+ pl_barrier();
+
+ ret = pl_deref_int(lock);
+ if (__builtin_expect(ret & mask, 0) == 0)
+ break;
+
+ /* the below produces an exponential growth with loops to lower
+ * values and still growing. This allows competing threads to
+ * wait different times once the threshold is reached.
+ */
+ m = ((m + (m >> 1)) + 2) & 0x3ffff;
+ } while (1);
+
+ return ret;
+}
+
+# if defined(PLOCK_INLINE_EBO)
+__attribute__((unused,always_inline,no_instrument_function)) inline
+# else
+__attribute__((unused,noinline,no_instrument_function))
+# endif
+static unsigned int pl_wait_unlock_int(const unsigned int *lock, const unsigned int mask)
+{
+ return __pl_wait_unlock_int(lock, mask);
+}
+#endif /* PLOCK_DISABLE_EBO */
+
+/* This function waits for <lock> to change from value <prev> and returns the
+ * new value. It enforces an exponential backoff using CPU pauses to limit the
+ * pollution to the other threads' caches. The progression follows (2^N)-1,
+ * limited to 255 iterations, which is way sufficient even for very large
+ * numbers of threads. It is designed to be called after a first test which
+ * retrieves the previous value, so it starts by waiting. The function slightly
+ * benefits from size optimization under gcc, but Clang cannot do it, so it's
+ * not done here, as it doesn't make a big difference.
+ */
+__attribute__((unused,noinline,no_instrument_function))
+static unsigned long pl_wait_new_long(const unsigned long *lock, const unsigned long prev)
+{
+ unsigned char m = 0;
+ unsigned long curr;
+
+ do {
+ unsigned char loops = m + 1;
+ m = (m << 1) + 1;
+ do {
+ pl_cpu_relax();
+ } while (__builtin_expect(--loops, 0));
+ curr = pl_deref_long(lock);
+ } while (__builtin_expect(curr == prev, 0));
+ return curr;
+}
+
+/* This function waits for <lock> to change from value <prev> and returns the
+ * new value. It enforces an exponential backoff using CPU pauses to limit the
+ * pollution to the other threads' caches. The progression follows (2^N)-1,
+ * limited to 255 iterations, which is way sufficient even for very large
+ * numbers of threads. It is designed to be called after a first test which
+ * retrieves the previous value, so it starts by waiting. The function slightly
+ * benefits from size optimization under gcc, but Clang cannot do it, so it's
+ * not done here, as it doesn't make a big difference.
+ */
+__attribute__((unused,noinline,no_instrument_function))
+static unsigned int pl_wait_new_int(const unsigned int *lock, const unsigned int prev)
+{
+ unsigned char m = 0;
+ unsigned int curr;
+
+ do {
+ unsigned char loops = m + 1;
+ m = (m << 1) + 1;
+ do {
+ pl_cpu_relax();
+ } while (__builtin_expect(--loops, 0));
+ curr = pl_deref_int(lock);
+ } while (__builtin_expect(curr == prev, 0));
+ return curr;
+}
+
+/* request shared read access (R), return non-zero on success, otherwise 0 */
+#define pl_try_r(lock) ( \
+ (sizeof(long) == 8 && sizeof(*(lock)) == 8) ? ({ \
+ register unsigned long __pl_r = pl_deref_long(lock) & PLOCK64_WL_ANY; \
+ pl_barrier(); \
+ if (!__builtin_expect(__pl_r, 0)) { \
+ __pl_r = pl_ldadd_acq((lock), PLOCK64_RL_1) & PLOCK64_WL_ANY; \
+ if (__builtin_expect(__pl_r, 0)) \
+ pl_sub_noret((lock), PLOCK64_RL_1); \
+ } \
+ !__pl_r; /* return value */ \
+ }) : (sizeof(*(lock)) == 4) ? ({ \
+ register unsigned int __pl_r = pl_deref_int(lock) & PLOCK32_WL_ANY; \
+ pl_barrier(); \
+ if (!__builtin_expect(__pl_r, 0)) { \
+ __pl_r = pl_ldadd_acq((lock), PLOCK32_RL_1) & PLOCK32_WL_ANY; \
+ if (__builtin_expect(__pl_r, 0)) \
+ pl_sub_noret((lock), PLOCK32_RL_1); \
+ } \
+ !__pl_r; /* return value */ \
+ }) : ({ \
+ void __unsupported_argument_size_for_pl_try_r__(char *,int); \
+ if (sizeof(*(lock)) != 4 && (sizeof(long) != 8 || sizeof(*(lock)) != 8)) \
+ __unsupported_argument_size_for_pl_try_r__(__FILE__,__LINE__); \
+ 0; \
+ }) \
+)
+
+/* request shared read access (R) and wait for it. In order not to disturb a W
+ * lock waiting for all readers to leave, we first check if a W lock is held
+ * before trying to claim the R lock.
+ */
+#define pl_take_r(lock) \
+ (sizeof(long) == 8 && sizeof(*(lock)) == 8) ? ({ \
+ register unsigned long *__lk_r = (unsigned long *)(lock); \
+ register unsigned long __set_r = PLOCK64_RL_1; \
+ register unsigned long __msk_r = PLOCK64_WL_ANY; \
+ register unsigned long __old_r = pl_cmpxchg(__lk_r, 0, __set_r); \
+ if (__old_r) { \
+ while (1) { \
+ if (__old_r & __msk_r) \
+ pl_wait_unlock_long(__lk_r, __msk_r); \
+ if (!(pl_ldadd_acq(__lk_r, __set_r) & __msk_r)) \
+ break; \
+ __old_r = pl_sub_lax(__lk_r, __set_r); \
+ } \
+ } \
+ pl_barrier(); \
+ 0; \
+ }) : (sizeof(*(lock)) == 4) ? ({ \
+ register unsigned int *__lk_r = (unsigned int *)(lock); \
+ register unsigned int __set_r = PLOCK32_RL_1; \
+ register unsigned int __msk_r = PLOCK32_WL_ANY; \
+ register unsigned int __old_r = pl_cmpxchg(__lk_r, 0, __set_r); \
+ if (__old_r) { \
+ while (1) { \
+ if (__old_r & __msk_r) \
+ pl_wait_unlock_int(__lk_r, __msk_r); \
+ if (!(pl_ldadd_acq(__lk_r, __set_r) & __msk_r)) \
+ break; \
+ __old_r = pl_sub_lax(__lk_r, __set_r); \
+ } \
+ } \
+ pl_barrier(); \
+ 0; \
+ }) : ({ \
+ void __unsupported_argument_size_for_pl_take_r__(char *,int); \
+ if (sizeof(*(lock)) != 4 && (sizeof(long) != 8 || sizeof(*(lock)) != 8)) \
+ __unsupported_argument_size_for_pl_take_r__(__FILE__,__LINE__); \
+ 0; \
+ })
+
+/* release the read access (R) lock */
+#define pl_drop_r(lock) ( \
+ (sizeof(long) == 8 && sizeof(*(lock)) == 8) ? ({ \
+ pl_barrier(); \
+ pl_sub_noret_rel(lock, PLOCK64_RL_1); \
+ }) : (sizeof(*(lock)) == 4) ? ({ \
+ pl_barrier(); \
+ pl_sub_noret_rel(lock, PLOCK32_RL_1); \
+ }) : ({ \
+ void __unsupported_argument_size_for_pl_drop_r__(char *,int); \
+ if (sizeof(*(lock)) != 4 && (sizeof(long) != 8 || sizeof(*(lock)) != 8)) \
+ __unsupported_argument_size_for_pl_drop_r__(__FILE__,__LINE__); \
+ }) \
+)
+
+/* request a seek access (S), return non-zero on success, otherwise 0 */
+#define pl_try_s(lock) ( \
+ (sizeof(long) == 8 && sizeof(*(lock)) == 8) ? ({ \
+ register unsigned long __pl_r = pl_deref_long(lock); \
+ pl_barrier(); \
+ if (!__builtin_expect(__pl_r & (PLOCK64_WL_ANY | PLOCK64_SL_ANY), 0)) { \
+ __pl_r = pl_ldadd_acq((lock), PLOCK64_SL_1 | PLOCK64_RL_1) & \
+ (PLOCK64_WL_ANY | PLOCK64_SL_ANY); \
+ if (__builtin_expect(__pl_r, 0)) \
+ pl_sub_noret_lax((lock), PLOCK64_SL_1 | PLOCK64_RL_1); \
+ } \
+ !__pl_r; /* return value */ \
+ }) : (sizeof(*(lock)) == 4) ? ({ \
+ register unsigned int __pl_r = pl_deref_int(lock); \
+ pl_barrier(); \
+ if (!__builtin_expect(__pl_r & (PLOCK32_WL_ANY | PLOCK32_SL_ANY), 0)) { \
+ __pl_r = pl_ldadd_acq((lock), PLOCK32_SL_1 | PLOCK32_RL_1) & \
+ (PLOCK32_WL_ANY | PLOCK32_SL_ANY); \
+ if (__builtin_expect(__pl_r, 0)) \
+ pl_sub_noret_lax((lock), PLOCK32_SL_1 | PLOCK32_RL_1); \
+ } \
+ !__pl_r; /* return value */ \
+ }) : ({ \
+ void __unsupported_argument_size_for_pl_try_s__(char *,int); \
+ if (sizeof(*(lock)) != 4 && (sizeof(long) != 8 || sizeof(*(lock)) != 8)) \
+ __unsupported_argument_size_for_pl_try_s__(__FILE__,__LINE__); \
+ 0; \
+ }) \
+)
+
+/* request a seek access (S) and wait for it. The lock is immediately claimed,
+ * and only upon failure an exponential backoff is used. S locks rarely compete
+ * with W locks so S will generally not disturb W. As the S lock may be used as
+ * a spinlock, it's important to grab it as fast as possible.
+ */
+#define pl_take_s(lock) \
+ (sizeof(long) == 8 && sizeof(*(lock)) == 8) ? ({ \
+ register unsigned long *__lk_r = (unsigned long *)(lock); \
+ register unsigned long __set_r = PLOCK64_SL_1 | PLOCK64_RL_1; \
+ register unsigned long __msk_r = PLOCK64_WL_ANY | PLOCK64_SL_ANY; \
+ while (1) { \
+ if (!__builtin_expect(pl_ldadd_acq(__lk_r, __set_r) & __msk_r, 0)) \
+ break; \
+ pl_sub_noret_lax(__lk_r, __set_r); \
+ pl_wait_unlock_long(__lk_r, __msk_r); \
+ } \
+ pl_barrier(); \
+ 0; \
+ }) : (sizeof(*(lock)) == 4) ? ({ \
+ register unsigned int *__lk_r = (unsigned int *)(lock); \
+ register unsigned int __set_r = PLOCK32_SL_1 | PLOCK32_RL_1; \
+ register unsigned int __msk_r = PLOCK32_WL_ANY | PLOCK32_SL_ANY; \
+ while (1) { \
+ if (!__builtin_expect(pl_ldadd_acq(__lk_r, __set_r) & __msk_r, 0)) \
+ break; \
+ pl_sub_noret_lax(__lk_r, __set_r); \
+ pl_wait_unlock_int(__lk_r, __msk_r); \
+ } \
+ pl_barrier(); \
+ 0; \
+ }) : ({ \
+ void __unsupported_argument_size_for_pl_take_s__(char *,int); \
+ if (sizeof(*(lock)) != 4 && (sizeof(long) != 8 || sizeof(*(lock)) != 8)) \
+ __unsupported_argument_size_for_pl_take_s__(__FILE__,__LINE__); \
+ 0; \
+ })
+
+/* release the seek access (S) lock */
+#define pl_drop_s(lock) ( \
+ (sizeof(long) == 8 && sizeof(*(lock)) == 8) ? ({ \
+ pl_barrier(); \
+ pl_sub_noret_rel(lock, PLOCK64_SL_1 + PLOCK64_RL_1); \
+ }) : (sizeof(*(lock)) == 4) ? ({ \
+ pl_barrier(); \
+ pl_sub_noret_rel(lock, PLOCK32_SL_1 + PLOCK32_RL_1); \
+ }) : ({ \
+ void __unsupported_argument_size_for_pl_drop_s__(char *,int); \
+ if (sizeof(*(lock)) != 4 && (sizeof(long) != 8 || sizeof(*(lock)) != 8)) \
+ __unsupported_argument_size_for_pl_drop_s__(__FILE__,__LINE__); \
+ }) \
+)
+
+/* drop the S lock and go back to the R lock */
+#define pl_stor(lock) ( \
+ (sizeof(long) == 8 && sizeof(*(lock)) == 8) ? ({ \
+ pl_barrier(); \
+ pl_sub_noret(lock, PLOCK64_SL_1); \
+ }) : (sizeof(*(lock)) == 4) ? ({ \
+ pl_barrier(); \
+ pl_sub_noret(lock, PLOCK32_SL_1); \
+ }) : ({ \
+ void __unsupported_argument_size_for_pl_stor__(char *,int); \
+ if (sizeof(*(lock)) != 4 && (sizeof(long) != 8 || sizeof(*(lock)) != 8)) \
+ __unsupported_argument_size_for_pl_stor__(__FILE__,__LINE__); \
+ }) \
+)
+
+/* take the W lock under the S lock */
+#define pl_stow(lock) ( \
+ (sizeof(long) == 8 && sizeof(*(lock)) == 8) ? ({ \
+ register unsigned long __pl_r = pl_ldadd((lock), PLOCK64_WL_1); \
+ if (__pl_r & (PLOCK64_RL_ANY & ~PLOCK64_RL_1)) \
+ __pl_r = pl_wait_unlock_long((const unsigned long*)lock, (PLOCK64_RL_ANY & ~PLOCK64_RL_1)); \
+ pl_barrier(); \
+ }) : (sizeof(*(lock)) == 4) ? ({ \
+ register unsigned int __pl_r = pl_ldadd((lock), PLOCK32_WL_1); \
+ if (__pl_r & (PLOCK32_RL_ANY & ~PLOCK32_RL_1)) \
+ __pl_r = pl_wait_unlock_int((const unsigned int*)lock, (PLOCK32_RL_ANY & ~PLOCK32_RL_1)); \
+ pl_barrier(); \
+ }) : ({ \
+ void __unsupported_argument_size_for_pl_stow__(char *,int); \
+ if (sizeof(*(lock)) != 4 && (sizeof(long) != 8 || sizeof(*(lock)) != 8)) \
+ __unsupported_argument_size_for_pl_stow__(__FILE__,__LINE__); \
+ }) \
+)
+
+/* drop the W lock and go back to the S lock */
+#define pl_wtos(lock) ( \
+ (sizeof(long) == 8 && sizeof(*(lock)) == 8) ? ({ \
+ pl_barrier(); \
+ pl_sub_noret(lock, PLOCK64_WL_1); \
+ }) : (sizeof(*(lock)) == 4) ? ({ \
+ pl_barrier(); \
+ pl_sub_noret(lock, PLOCK32_WL_1); \
+ }) : ({ \
+ void __unsupported_argument_size_for_pl_wtos__(char *,int); \
+ if (sizeof(*(lock)) != 4 && (sizeof(long) != 8 || sizeof(*(lock)) != 8)) \
+ __unsupported_argument_size_for_pl_wtos__(__FILE__,__LINE__); \
+ }) \
+)
+
+/* drop the W lock and go back to the R lock */
+#define pl_wtor(lock) ( \
+ (sizeof(long) == 8 && sizeof(*(lock)) == 8) ? ({ \
+ pl_barrier(); \
+ pl_sub_noret(lock, PLOCK64_WL_1 | PLOCK64_SL_1); \
+ }) : (sizeof(*(lock)) == 4) ? ({ \
+ pl_barrier(); \
+ pl_sub_noret(lock, PLOCK32_WL_1 | PLOCK32_SL_1); \
+ }) : ({ \
+ void __unsupported_argument_size_for_pl_wtor__(char *,int); \
+ if (sizeof(*(lock)) != 4 && (sizeof(long) != 8 || sizeof(*(lock)) != 8)) \
+ __unsupported_argument_size_for_pl_wtor__(__FILE__,__LINE__); \
+ }) \
+)
+
+/* request a write access (W), return non-zero on success, otherwise 0.
+ *
+ * Below there is something important : by taking both W and S, we will cause
+ * an overflow of W at 4/5 of the maximum value that can be stored into W due
+ * to the fact that S is 2 bits, so we're effectively adding 5 to the word
+ * composed by W:S. But for all words multiple of 4 bits, the maximum value is
+ * multiple of 15 thus of 5. So the largest value we can store with all bits
+ * set to one will be met by adding 5, and then adding 5 again will place value
+ * 1 in W and value 0 in S, so we never leave W with 0. Also, even upon such an
+ * overflow, there's no risk to confuse it with an atomic lock because R is not
+ * null since it will not have overflown. For 32-bit locks, this situation
+ * happens when exactly 13108 threads try to grab the lock at once, W=1, S=0
+ * and R=13108. For 64-bit locks, it happens at 858993460 concurrent writers
+ * where W=1, S=0 and R=858993460.
+ */
+#define pl_try_w(lock) ( \
+ (sizeof(long) == 8 && sizeof(*(lock)) == 8) ? ({ \
+ register unsigned long __pl_r = pl_deref_long(lock); \
+ pl_barrier(); \
+ if (!__builtin_expect(__pl_r & (PLOCK64_WL_ANY | PLOCK64_SL_ANY), 0)) { \
+ __pl_r = pl_ldadd_acq((lock), PLOCK64_WL_1 | PLOCK64_SL_1 | PLOCK64_RL_1);\
+ if (__builtin_expect(__pl_r & (PLOCK64_WL_ANY | PLOCK64_SL_ANY), 0)) { \
+ /* a writer, seeker or atomic is present, let's leave */ \
+ pl_sub_noret_lax((lock), PLOCK64_WL_1 | PLOCK64_SL_1 | PLOCK64_RL_1);\
+ __pl_r &= (PLOCK64_WL_ANY | PLOCK64_SL_ANY); /* return value */\
+ } else { \
+ /* wait for all other readers to leave */ \
+ while (__pl_r) \
+ __pl_r = pl_deref_long(lock) - \
+ (PLOCK64_WL_1 | PLOCK64_SL_1 | PLOCK64_RL_1); \
+ } \
+ } \
+ !__pl_r; /* return value */ \
+ }) : (sizeof(*(lock)) == 4) ? ({ \
+ register unsigned int __pl_r = pl_deref_int(lock); \
+ pl_barrier(); \
+ if (!__builtin_expect(__pl_r & (PLOCK32_WL_ANY | PLOCK32_SL_ANY), 0)) { \
+ __pl_r = pl_ldadd_acq((lock), PLOCK32_WL_1 | PLOCK32_SL_1 | PLOCK32_RL_1);\
+ if (__builtin_expect(__pl_r & (PLOCK32_WL_ANY | PLOCK32_SL_ANY), 0)) { \
+ /* a writer, seeker or atomic is present, let's leave */ \
+ pl_sub_noret_lax((lock), PLOCK32_WL_1 | PLOCK32_SL_1 | PLOCK32_RL_1);\
+ __pl_r &= (PLOCK32_WL_ANY | PLOCK32_SL_ANY); /* return value */\
+ } else { \
+ /* wait for all other readers to leave */ \
+ while (__pl_r) \
+ __pl_r = pl_deref_int(lock) - \
+ (PLOCK32_WL_1 | PLOCK32_SL_1 | PLOCK32_RL_1); \
+ } \
+ } \
+ !__pl_r; /* return value */ \
+ }) : ({ \
+ void __unsupported_argument_size_for_pl_try_w__(char *,int); \
+ if (sizeof(*(lock)) != 4 && (sizeof(long) != 8 || sizeof(*(lock)) != 8)) \
+ __unsupported_argument_size_for_pl_try_w__(__FILE__,__LINE__); \
+ 0; \
+ }) \
+)
+
+/* request a write access (W) and wait for it. The lock is immediately claimed,
+ * and only upon failure an exponential backoff is used.
+ */
+#define pl_take_w(lock) \
+ (sizeof(long) == 8 && sizeof(*(lock)) == 8) ? ({ \
+ register unsigned long *__lk_r = (unsigned long *)(lock); \
+ register unsigned long __set_r = PLOCK64_WL_1 | PLOCK64_SL_1 | PLOCK64_RL_1; \
+ register unsigned long __msk_r = PLOCK64_WL_ANY | PLOCK64_SL_ANY; \
+ register unsigned long __pl_r; \
+ while (1) { \
+ __pl_r = pl_ldadd_acq(__lk_r, __set_r); \
+ if (!__builtin_expect(__pl_r & __msk_r, 0)) \
+ break; \
+ pl_sub_noret_lax(__lk_r, __set_r); \
+ __pl_r = pl_wait_unlock_long(__lk_r, __msk_r); \
+ } \
+ /* wait for all other readers to leave */ \
+ if (__builtin_expect(__pl_r & PLOCK64_RL_ANY, 0)) \
+ __pl_r = pl_wait_unlock_long(__lk_r, (PLOCK64_RL_ANY & ~PLOCK64_RL_1)) - __set_r; \
+ pl_barrier(); \
+ 0; \
+ }) : (sizeof(*(lock)) == 4) ? ({ \
+ register unsigned int *__lk_r = (unsigned int *)(lock); \
+ register unsigned int __set_r = PLOCK32_WL_1 | PLOCK32_SL_1 | PLOCK32_RL_1; \
+ register unsigned int __msk_r = PLOCK32_WL_ANY | PLOCK32_SL_ANY; \
+ register unsigned int __pl_r; \
+ while (1) { \
+ __pl_r = pl_ldadd_acq(__lk_r, __set_r); \
+ if (!__builtin_expect(__pl_r & __msk_r, 0)) \
+ break; \
+ pl_sub_noret_lax(__lk_r, __set_r); \
+ __pl_r = pl_wait_unlock_int(__lk_r, __msk_r); \
+ } \
+ /* wait for all other readers to leave */ \
+ if (__builtin_expect(__pl_r & PLOCK32_RL_ANY, 0)) \
+ __pl_r = pl_wait_unlock_int(__lk_r, (PLOCK32_RL_ANY & ~PLOCK32_RL_1)) - __set_r; \
+ pl_barrier(); \
+ 0; \
+ }) : ({ \
+ void __unsupported_argument_size_for_pl_take_w__(char *,int); \
+ if (sizeof(*(lock)) != 4 && (sizeof(long) != 8 || sizeof(*(lock)) != 8)) \
+ __unsupported_argument_size_for_pl_take_w__(__FILE__,__LINE__); \
+ 0; \
+ })
+
+/* drop the write (W) lock entirely */
+#define pl_drop_w(lock) ( \
+ (sizeof(long) == 8 && sizeof(*(lock)) == 8) ? ({ \
+ pl_barrier(); \
+ pl_sub_noret_rel(lock, PLOCK64_WL_1 | PLOCK64_SL_1 | PLOCK64_RL_1); \
+ }) : (sizeof(*(lock)) == 4) ? ({ \
+ pl_barrier(); \
+ pl_sub_noret_rel(lock, PLOCK32_WL_1 | PLOCK32_SL_1 | PLOCK32_RL_1); \
+ }) : ({ \
+ void __unsupported_argument_size_for_pl_drop_w__(char *,int); \
+ if (sizeof(*(lock)) != 4 && (sizeof(long) != 8 || sizeof(*(lock)) != 8)) \
+ __unsupported_argument_size_for_pl_drop_w__(__FILE__,__LINE__); \
+ }) \
+)
+
+/* Try to upgrade from R to S, return non-zero on success, otherwise 0.
+ * This lock will fail if S or W are already held. In case of failure to grab
+ * the lock, it MUST NOT be retried without first dropping R, or it may never
+ * complete due to S waiting for R to leave before upgrading to W.
+ */
+#define pl_try_rtos(lock) ( \
+ (sizeof(long) == 8 && sizeof(*(lock)) == 8) ? ({ \
+ register unsigned long __pl_r; \
+ __pl_r = pl_ldadd_acq((lock), PLOCK64_SL_1) & (PLOCK64_WL_ANY | PLOCK64_SL_ANY);\
+ if (__builtin_expect(__pl_r, 0)) \
+ pl_sub_noret_lax((lock), PLOCK64_SL_1); \
+ !__pl_r; /* return value */ \
+ }) : (sizeof(*(lock)) == 4) ? ({ \
+ register unsigned int __pl_r; \
+ __pl_r = pl_ldadd_acq((lock), PLOCK32_SL_1) & (PLOCK32_WL_ANY | PLOCK32_SL_ANY);\
+ if (__builtin_expect(__pl_r, 0)) \
+ pl_sub_noret_lax((lock), PLOCK32_SL_1); \
+ !__pl_r; /* return value */ \
+ }) : ({ \
+ void __unsupported_argument_size_for_pl_try_rtos__(char *,int); \
+ if (sizeof(*(lock)) != 4 && (sizeof(long) != 8 || sizeof(*(lock)) != 8)) \
+ __unsupported_argument_size_for_pl_try_rtos__(__FILE__,__LINE__); \
+ 0; \
+ }) \
+)
+
+
+/* Try to upgrade from R to W, return non-zero on success, otherwise 0.
+ * This lock will fail if S or W are already held. In case of failure to grab
+ * the lock, it MUST NOT be retried without first dropping R, or it may never
+ * complete due to S waiting for R to leave before upgrading to W. It waits for
+ * the last readers to leave.
+ */
+#define pl_try_rtow(lock) ( \
+ (sizeof(long) == 8 && sizeof(*(lock)) == 8) ? ({ \
+ register unsigned long *__lk_r = (unsigned long *)(lock); \
+ register unsigned long __set_r = PLOCK64_WL_1 | PLOCK64_SL_1; \
+ register unsigned long __msk_r = PLOCK64_WL_ANY | PLOCK64_SL_ANY; \
+ register unsigned long __pl_r; \
+ pl_barrier(); \
+ while (1) { \
+ __pl_r = pl_ldadd_acq(__lk_r, __set_r); \
+ if (__builtin_expect(__pl_r & __msk_r, 0)) { \
+ if (pl_ldadd_lax(__lk_r, - __set_r)) \
+ break; /* the caller needs to drop the lock now */ \
+ continue; /* lock was released, try again */ \
+ } \
+ /* ok we're the only writer, wait for readers to leave */ \
+ while (__builtin_expect(__pl_r, 0)) \
+ __pl_r = pl_deref_long(__lk_r) - (PLOCK64_WL_1|PLOCK64_SL_1|PLOCK64_RL_1); \
+ /* now return with __pl_r = 0 */ \
+ break; \
+ } \
+ !__pl_r; /* return value */ \
+ }) : (sizeof(*(lock)) == 4) ? ({ \
+ register unsigned int *__lk_r = (unsigned int *)(lock); \
+ register unsigned int __set_r = PLOCK32_WL_1 | PLOCK32_SL_1; \
+ register unsigned int __msk_r = PLOCK32_WL_ANY | PLOCK32_SL_ANY; \
+ register unsigned int __pl_r; \
+ pl_barrier(); \
+ while (1) { \
+ __pl_r = pl_ldadd_acq(__lk_r, __set_r); \
+ if (__builtin_expect(__pl_r & __msk_r, 0)) { \
+ if (pl_ldadd_lax(__lk_r, - __set_r)) \
+ break; /* the caller needs to drop the lock now */ \
+ continue; /* lock was released, try again */ \
+ } \
+ /* ok we're the only writer, wait for readers to leave */ \
+ while (__builtin_expect(__pl_r, 0)) \
+ __pl_r = pl_deref_int(__lk_r) - (PLOCK32_WL_1|PLOCK32_SL_1|PLOCK32_RL_1); \
+ /* now return with __pl_r = 0 */ \
+ break; \
+ } \
+ !__pl_r; /* return value */ \
+ }) : ({ \
+ void __unsupported_argument_size_for_pl_try_rtow__(char *,int); \
+ if (sizeof(*(lock)) != 4 && (sizeof(long) != 8 || sizeof(*(lock)) != 8)) \
+ __unsupported_argument_size_for_pl_try_rtow__(__FILE__,__LINE__); \
+ 0; \
+ }) \
+)
+
+
+/* request atomic write access (A), return non-zero on success, otherwise 0.
+ * It's a bit tricky as we only use the W bits for this and want to distinguish
+ * between other atomic users and regular lock users. We have to give up if an
+ * S lock appears. It's possible that such a lock stays hidden in the W bits
+ * after an overflow, but in this case R is still held, ensuring we stay in the
+ * loop until we discover the conflict. The lock only return successfully if all
+ * readers are gone (or converted to A).
+ */
+#define pl_try_a(lock) ( \
+ (sizeof(long) == 8 && sizeof(*(lock)) == 8) ? ({ \
+ register unsigned long __pl_r = pl_deref_long(lock) & PLOCK64_SL_ANY; \
+ pl_barrier(); \
+ if (!__builtin_expect(__pl_r, 0)) { \
+ __pl_r = pl_ldadd_acq((lock), PLOCK64_WL_1); \
+ while (1) { \
+ if (__builtin_expect(__pl_r & PLOCK64_SL_ANY, 0)) { \
+ pl_sub_noret_lax((lock), PLOCK64_WL_1); \
+ break; /* return !__pl_r */ \
+ } \
+ __pl_r &= PLOCK64_RL_ANY; \
+ if (!__builtin_expect(__pl_r, 0)) \
+ break; /* return !__pl_r */ \
+ __pl_r = pl_deref_long(lock); \
+ } \
+ } \
+ !__pl_r; /* return value */ \
+ }) : (sizeof(*(lock)) == 4) ? ({ \
+ register unsigned int __pl_r = pl_deref_int(lock) & PLOCK32_SL_ANY; \
+ pl_barrier(); \
+ if (!__builtin_expect(__pl_r, 0)) { \
+ __pl_r = pl_ldadd_acq((lock), PLOCK32_WL_1); \
+ while (1) { \
+ if (__builtin_expect(__pl_r & PLOCK32_SL_ANY, 0)) { \
+ pl_sub_noret_lax((lock), PLOCK32_WL_1); \
+ break; /* return !__pl_r */ \
+ } \
+ __pl_r &= PLOCK32_RL_ANY; \
+ if (!__builtin_expect(__pl_r, 0)) \
+ break; /* return !__pl_r */ \
+ __pl_r = pl_deref_int(lock); \
+ } \
+ } \
+ !__pl_r; /* return value */ \
+ }) : ({ \
+ void __unsupported_argument_size_for_pl_try_a__(char *,int); \
+ if (sizeof(*(lock)) != 4 && (sizeof(long) != 8 || sizeof(*(lock)) != 8)) \
+ __unsupported_argument_size_for_pl_try_a__(__FILE__,__LINE__); \
+ 0; \
+ }) \
+)
+
+/* request atomic write access (A) and wait for it. See comments in pl_try_a() for
+ * explanations.
+ */
+#define pl_take_a(lock) \
+ (sizeof(long) == 8 && sizeof(*(lock)) == 8) ? ({ \
+ register unsigned long *__lk_r = (unsigned long *)(lock); \
+ register unsigned long __set_r = PLOCK64_WL_1; \
+ register unsigned long __msk_r = PLOCK64_SL_ANY; \
+ register unsigned long __pl_r; \
+ __pl_r = pl_ldadd_acq(__lk_r, __set_r); \
+ while (__builtin_expect(__pl_r & PLOCK64_RL_ANY, 0)) { \
+ if (__builtin_expect(__pl_r & __msk_r, 0)) { \
+ pl_sub_noret_lax(__lk_r, __set_r); \
+ pl_wait_unlock_long(__lk_r, __msk_r); \
+ __pl_r = pl_ldadd_acq(__lk_r, __set_r); \
+ continue; \
+ } \
+ /* wait for all readers to leave or upgrade */ \
+ pl_cpu_relax(); pl_cpu_relax(); pl_cpu_relax(); \
+ __pl_r = pl_deref_long(lock); \
+ } \
+ pl_barrier(); \
+ 0; \
+ }) : (sizeof(*(lock)) == 4) ? ({ \
+ register unsigned int *__lk_r = (unsigned int *)(lock); \
+ register unsigned int __set_r = PLOCK32_WL_1; \
+ register unsigned int __msk_r = PLOCK32_SL_ANY; \
+ register unsigned int __pl_r; \
+ __pl_r = pl_ldadd_acq(__lk_r, __set_r); \
+ while (__builtin_expect(__pl_r & PLOCK32_RL_ANY, 0)) { \
+ if (__builtin_expect(__pl_r & __msk_r, 0)) { \
+ pl_sub_noret_lax(__lk_r, __set_r); \
+ pl_wait_unlock_int(__lk_r, __msk_r); \
+ __pl_r = pl_ldadd_acq(__lk_r, __set_r); \
+ continue; \
+ } \
+ /* wait for all readers to leave or upgrade */ \
+ pl_cpu_relax(); pl_cpu_relax(); pl_cpu_relax(); \
+ __pl_r = pl_deref_int(lock); \
+ } \
+ pl_barrier(); \
+ 0; \
+ }) : ({ \
+ void __unsupported_argument_size_for_pl_take_a__(char *,int); \
+ if (sizeof(*(lock)) != 4 && (sizeof(long) != 8 || sizeof(*(lock)) != 8)) \
+ __unsupported_argument_size_for_pl_take_a__(__FILE__,__LINE__); \
+ 0; \
+ })
+
+/* release atomic write access (A) lock */
+#define pl_drop_a(lock) ( \
+ (sizeof(long) == 8 && sizeof(*(lock)) == 8) ? ({ \
+ pl_barrier(); \
+ pl_sub_noret_rel(lock, PLOCK64_WL_1); \
+ }) : (sizeof(*(lock)) == 4) ? ({ \
+ pl_barrier(); \
+ pl_sub_noret_rel(lock, PLOCK32_WL_1); \
+ }) : ({ \
+ void __unsupported_argument_size_for_pl_drop_a__(char *,int); \
+ if (sizeof(*(lock)) != 4 && (sizeof(long) != 8 || sizeof(*(lock)) != 8)) \
+ __unsupported_argument_size_for_pl_drop_a__(__FILE__,__LINE__); \
+ }) \
+)
+
+/* Downgrade A to R. Inc(R), dec(W) then wait for W==0 */
+#define pl_ator(lock) ( \
+ (sizeof(long) == 8 && sizeof(*(lock)) == 8) ? ({ \
+ register unsigned long *__lk_r = (unsigned long *)(lock); \
+ register unsigned long __set_r = PLOCK64_RL_1 - PLOCK64_WL_1; \
+ register unsigned long __msk_r = PLOCK64_WL_ANY; \
+ register unsigned long __pl_r = pl_ldadd(__lk_r, __set_r) + __set_r; \
+ while (__builtin_expect(__pl_r & __msk_r, 0)) { \
+ __pl_r = pl_wait_unlock_long(__lk_r, __msk_r); \
+ } \
+ pl_barrier(); \
+ }) : (sizeof(*(lock)) == 4) ? ({ \
+ register unsigned int *__lk_r = (unsigned int *)(lock); \
+ register unsigned int __set_r = PLOCK32_RL_1 - PLOCK32_WL_1; \
+ register unsigned int __msk_r = PLOCK32_WL_ANY; \
+ register unsigned int __pl_r = pl_ldadd(__lk_r, __set_r) + __set_r; \
+ while (__builtin_expect(__pl_r & __msk_r, 0)) { \
+ __pl_r = pl_wait_unlock_int(__lk_r, __msk_r); \
+ } \
+ pl_barrier(); \
+ }) : ({ \
+ void __unsupported_argument_size_for_pl_ator__(char *,int); \
+ if (sizeof(*(lock)) != 4 && (sizeof(long) != 8 || sizeof(*(lock)) != 8)) \
+ __unsupported_argument_size_for_pl_ator__(__FILE__,__LINE__); \
+ }) \
+)
+
+/* Try to upgrade from R to A, return non-zero on success, otherwise 0.
+ * This lock will fail if S is held or appears while waiting (typically due to
+ * a previous grab that was disguised as a W due to an overflow). In case of
+ * failure to grab the lock, it MUST NOT be retried without first dropping R,
+ * or it may never complete due to S waiting for R to leave before upgrading
+ * to W. The lock succeeds once there's no more R (ie all of them have either
+ * completed or were turned to A).
+ */
+#define pl_try_rtoa(lock) ( \
+ (sizeof(long) == 8 && sizeof(*(lock)) == 8) ? ({ \
+ register unsigned long __pl_r = pl_deref_long(lock) & PLOCK64_SL_ANY; \
+ pl_barrier(); \
+ if (!__builtin_expect(__pl_r, 0)) { \
+ __pl_r = pl_ldadd_acq((lock), PLOCK64_WL_1 - PLOCK64_RL_1); \
+ while (1) { \
+ if (__builtin_expect(__pl_r & PLOCK64_SL_ANY, 0)) { \
+ pl_sub_noret_lax((lock), PLOCK64_WL_1 - PLOCK64_RL_1); \
+ break; /* return !__pl_r */ \
+ } \
+ __pl_r &= PLOCK64_RL_ANY; \
+ if (!__builtin_expect(__pl_r, 0)) \
+ break; /* return !__pl_r */ \
+ __pl_r = pl_deref_long(lock); \
+ } \
+ } \
+ !__pl_r; /* return value */ \
+ }) : (sizeof(*(lock)) == 4) ? ({ \
+ register unsigned int __pl_r = pl_deref_int(lock) & PLOCK32_SL_ANY; \
+ pl_barrier(); \
+ if (!__builtin_expect(__pl_r, 0)) { \
+ __pl_r = pl_ldadd_acq((lock), PLOCK32_WL_1 - PLOCK32_RL_1); \
+ while (1) { \
+ if (__builtin_expect(__pl_r & PLOCK32_SL_ANY, 0)) { \
+ pl_sub_noret_lax((lock), PLOCK32_WL_1 - PLOCK32_RL_1); \
+ break; /* return !__pl_r */ \
+ } \
+ __pl_r &= PLOCK32_RL_ANY; \
+ if (!__builtin_expect(__pl_r, 0)) \
+ break; /* return !__pl_r */ \
+ __pl_r = pl_deref_int(lock); \
+ } \
+ } \
+ !__pl_r; /* return value */ \
+ }) : ({ \
+ void __unsupported_argument_size_for_pl_try_rtoa__(char *,int); \
+ if (sizeof(*(lock)) != 4 && (sizeof(long) != 8 || sizeof(*(lock)) != 8)) \
+ __unsupported_argument_size_for_pl_try_rtoa__(__FILE__,__LINE__); \
+ 0; \
+ }) \
+)
+
+
+/*
+ * The following operations cover the multiple writers model : U->R->J->C->A
+ */
+
+
+/* Upgrade R to J. Inc(W) then wait for R==W or S != 0 */
+#define pl_rtoj(lock) ( \
+ (sizeof(long) == 8 && sizeof(*(lock)) == 8) ? ({ \
+ register unsigned long *__lk_r = (unsigned long *)(lock); \
+ register unsigned long __pl_r = pl_ldadd_acq(__lk_r, PLOCK64_WL_1) + PLOCK64_WL_1;\
+ register unsigned char __m = 0; \
+ while (!(__pl_r & PLOCK64_SL_ANY) && \
+ (__pl_r / PLOCK64_WL_1 != (__pl_r & PLOCK64_RL_ANY) / PLOCK64_RL_1)) { \
+ unsigned char __loops = __m + 1; \
+ __m = (__m << 1) + 1; \
+ do { \
+ pl_cpu_relax(); \
+ pl_cpu_relax(); \
+ } while (--__loops); \
+ __pl_r = pl_deref_long(__lk_r); \
+ } \
+ pl_barrier(); \
+ }) : (sizeof(*(lock)) == 4) ? ({ \
+ register unsigned int *__lk_r = (unsigned int *)(lock); \
+ register unsigned int __pl_r = pl_ldadd_acq(__lk_r, PLOCK32_WL_1) + PLOCK32_WL_1;\
+ register unsigned char __m = 0; \
+ while (!(__pl_r & PLOCK32_SL_ANY) && \
+ (__pl_r / PLOCK32_WL_1 != (__pl_r & PLOCK32_RL_ANY) / PLOCK32_RL_1)) { \
+ unsigned char __loops = __m + 1; \
+ __m = (__m << 1) + 1; \
+ do { \
+ pl_cpu_relax(); \
+ pl_cpu_relax(); \
+ } while (--__loops); \
+ __pl_r = pl_deref_int(__lk_r); \
+ } \
+ pl_barrier(); \
+ }) : ({ \
+ void __unsupported_argument_size_for_pl_rtoj__(char *,int); \
+ if (sizeof(*(lock)) != 4 && (sizeof(long) != 8 || sizeof(*(lock)) != 8)) \
+ __unsupported_argument_size_for_pl_rtoj__(__FILE__,__LINE__); \
+ }) \
+)
+
+/* Upgrade J to C. Set S. Only one thread needs to do it though it's idempotent */
+#define pl_jtoc(lock) ( \
+ (sizeof(long) == 8 && sizeof(*(lock)) == 8) ? ({ \
+ register unsigned long *__lk_r = (unsigned long *)(lock); \
+ register unsigned long __pl_r = pl_deref_long(__lk_r); \
+ if (!(__pl_r & PLOCK64_SL_ANY)) \
+ pl_or_noret(__lk_r, PLOCK64_SL_1); \
+ pl_barrier(); \
+ }) : (sizeof(*(lock)) == 4) ? ({ \
+ register unsigned int *__lk_r = (unsigned int *)(lock); \
+ register unsigned int __pl_r = pl_deref_int(__lk_r); \
+ if (!(__pl_r & PLOCK32_SL_ANY)) \
+ pl_or_noret(__lk_r, PLOCK32_SL_1); \
+ pl_barrier(); \
+ }) : ({ \
+ void __unsupported_argument_size_for_pl_jtoc__(char *,int); \
+ if (sizeof(*(lock)) != 4 && (sizeof(long) != 8 || sizeof(*(lock)) != 8)) \
+ __unsupported_argument_size_for_pl_jtoc__(__FILE__,__LINE__); \
+ }) \
+)
+
+/* Upgrade R to C. Inc(W) then wait for R==W or S != 0 */
+#define pl_rtoc(lock) ( \
+ (sizeof(long) == 8 && sizeof(*(lock)) == 8) ? ({ \
+ register unsigned long *__lk_r = (unsigned long *)(lock); \
+ register unsigned long __pl_r = pl_ldadd_acq(__lk_r, PLOCK64_WL_1) + PLOCK64_WL_1;\
+ register unsigned char __m = 0; \
+ while (__builtin_expect(!(__pl_r & PLOCK64_SL_ANY), 0)) { \
+ unsigned char __loops; \
+ if (__pl_r / PLOCK64_WL_1 == (__pl_r & PLOCK64_RL_ANY) / PLOCK64_RL_1) { \
+ pl_or_noret(__lk_r, PLOCK64_SL_1); \
+ break; \
+ } \
+ __loops = __m + 1; \
+ __m = (__m << 1) + 1; \
+ do { \
+ pl_cpu_relax(); \
+ pl_cpu_relax(); \
+ } while (--__loops); \
+ __pl_r = pl_deref_long(__lk_r); \
+ } \
+ pl_barrier(); \
+ }) : (sizeof(*(lock)) == 4) ? ({ \
+ register unsigned int *__lk_r = (unsigned int *)(lock); \
+ register unsigned int __pl_r = pl_ldadd_acq(__lk_r, PLOCK32_WL_1) + PLOCK32_WL_1;\
+ register unsigned char __m = 0; \
+ while (__builtin_expect(!(__pl_r & PLOCK32_SL_ANY), 0)) { \
+ unsigned char __loops; \
+ if (__pl_r / PLOCK32_WL_1 == (__pl_r & PLOCK32_RL_ANY) / PLOCK32_RL_1) { \
+ pl_or_noret(__lk_r, PLOCK32_SL_1); \
+ break; \
+ } \
+ __loops = __m + 1; \
+ __m = (__m << 1) + 1; \
+ do { \
+ pl_cpu_relax(); \
+ pl_cpu_relax(); \
+ } while (--__loops); \
+ __pl_r = pl_deref_int(__lk_r); \
+ } \
+ pl_barrier(); \
+ }) : ({ \
+ void __unsupported_argument_size_for_pl_rtoj__(char *,int); \
+ if (sizeof(*(lock)) != 4 && (sizeof(long) != 8 || sizeof(*(lock)) != 8)) \
+ __unsupported_argument_size_for_pl_rtoj__(__FILE__,__LINE__); \
+ }) \
+)
+
+/* Drop the claim (C) lock : R--,W-- then clear S if !R */
+#define pl_drop_c(lock) ( \
+ (sizeof(long) == 8 && sizeof(*(lock)) == 8) ? ({ \
+ register unsigned long *__lk_r = (unsigned long *)(lock); \
+ register unsigned long __set_r = - PLOCK64_RL_1 - PLOCK64_WL_1; \
+ register unsigned long __pl_r = pl_ldadd(__lk_r, __set_r) + __set_r; \
+ if (!(__pl_r & PLOCK64_RL_ANY)) \
+ pl_and_noret(__lk_r, ~PLOCK64_SL_1); \
+ pl_barrier(); \
+ }) : (sizeof(*(lock)) == 4) ? ({ \
+ register unsigned int *__lk_r = (unsigned int *)(lock); \
+ register unsigned int __set_r = - PLOCK32_RL_1 - PLOCK32_WL_1; \
+ register unsigned int __pl_r = pl_ldadd(__lk_r, __set_r) + __set_r; \
+ if (!(__pl_r & PLOCK32_RL_ANY)) \
+ pl_and_noret(__lk_r, ~PLOCK32_SL_1); \
+ pl_barrier(); \
+ }) : ({ \
+ void __unsupported_argument_size_for_pl_drop_c__(char *,int); \
+ if (sizeof(*(lock)) != 4 && (sizeof(long) != 8 || sizeof(*(lock)) != 8)) \
+ __unsupported_argument_size_for_pl_drop_c__(__FILE__,__LINE__); \
+ }) \
+)
+
+/* Upgrade C to A. R-- then wait for !S or clear S if !R */
+#define pl_ctoa(lock) ( \
+ (sizeof(long) == 8 && sizeof(*(lock)) == 8) ? ({ \
+ register unsigned long *__lk_r = (unsigned long *)(lock); \
+ register unsigned long __pl_r = pl_ldadd(__lk_r, -PLOCK64_RL_1) - PLOCK64_RL_1;\
+ while (__pl_r & PLOCK64_SL_ANY) { \
+ if (!(__pl_r & PLOCK64_RL_ANY)) { \
+ pl_and_noret(__lk_r, ~PLOCK64_SL_1); \
+ break; \
+ } \
+ pl_cpu_relax(); \
+ pl_cpu_relax(); \
+ __pl_r = pl_deref_long(__lk_r); \
+ } \
+ pl_barrier(); \
+ }) : (sizeof(*(lock)) == 4) ? ({ \
+ register unsigned int *__lk_r = (unsigned int *)(lock); \
+ register unsigned int __pl_r = pl_ldadd(__lk_r, -PLOCK32_RL_1) - PLOCK32_RL_1; \
+ while (__pl_r & PLOCK32_SL_ANY) { \
+ if (!(__pl_r & PLOCK32_RL_ANY)) { \
+ pl_and_noret(__lk_r, ~PLOCK32_SL_1); \
+ break; \
+ } \
+ pl_cpu_relax(); \
+ pl_cpu_relax(); \
+ __pl_r = pl_deref_int(__lk_r); \
+ } \
+ pl_barrier(); \
+ }) : ({ \
+ void __unsupported_argument_size_for_pl_ctoa__(char *,int); \
+ if (sizeof(*(lock)) != 4 && (sizeof(long) != 8 || sizeof(*(lock)) != 8)) \
+ __unsupported_argument_size_for_pl_ctoa__(__FILE__,__LINE__); \
+ }) \
+)
+
+/* downgrade the atomic write access lock (A) to join (J) */
+#define pl_atoj(lock) ( \
+ (sizeof(long) == 8 && sizeof(*(lock)) == 8) ? ({ \
+ pl_barrier(); \
+ pl_add_noret(lock, PLOCK64_RL_1); \
+ }) : (sizeof(*(lock)) == 4) ? ({ \
+ pl_barrier(); \
+ pl_add_noret(lock, PLOCK32_RL_1); \
+ }) : ({ \
+ void __unsupported_argument_size_for_pl_atoj__(char *,int); \
+ if (sizeof(*(lock)) != 4 && (sizeof(long) != 8 || sizeof(*(lock)) != 8)) \
+ __unsupported_argument_size_for_pl_atoj__(__FILE__,__LINE__); \
+ }) \
+)
+
+/* Returns non-zero if the thread calling it is the last writer, otherwise zero. It is
+ * designed to be called before pl_drop_j(), pl_drop_c() or pl_drop_a() for operations
+ * which need to be called only once.
+ */
+#define pl_last_writer(lock) ( \
+ (sizeof(long) == 8 && sizeof(*(lock)) == 8) ? ({ \
+ !(pl_deref_long(lock) & PLOCK64_WL_2PL); \
+ }) : (sizeof(*(lock)) == 4) ? ({ \
+ !(pl_deref_int(lock) & PLOCK32_WL_2PL); \
+ }) : ({ \
+ void __unsupported_argument_size_for_pl_last_j__(char *,int); \
+ if (sizeof(*(lock)) != 4 && (sizeof(long) != 8 || sizeof(*(lock)) != 8)) \
+ __unsupported_argument_size_for_pl_last_j__(__FILE__,__LINE__); \
+ 0; \
+ }) \
+)
+
+/* attempt to get an exclusive write access via the J lock and wait for it.
+ * Only one thread may succeed in this operation. It will not conflict with
+ * other users and will first wait for all writers to leave, then for all
+ * readers to leave before starting. This offers a solution to obtain an
+ * exclusive access to a shared resource in the R/J/C/A model. A concurrent
+ * take_a() will wait for this one to finish first. Using a CAS instead of XADD
+ * should make the operation converge slightly faster. Returns non-zero on
+ * success otherwise 0.
+ */
+#define pl_try_j(lock) ( \
+ (sizeof(long) == 8 && sizeof(*(lock)) == 8) ? ({ \
+ register unsigned long *__lk_r = (unsigned long *)(lock); \
+ register unsigned long __set_r = PLOCK64_WL_1 | PLOCK64_RL_1; \
+ register unsigned long __msk_r = PLOCK64_WL_ANY; \
+ register unsigned long __pl_r; \
+ register unsigned char __m; \
+ pl_wait_unlock_long(__lk_r, __msk_r); \
+ __pl_r = pl_ldadd_acq(__lk_r, __set_r) + __set_r; \
+ /* wait for all other readers to leave */ \
+ __m = 0; \
+ while (__builtin_expect(__pl_r & PLOCK64_RL_2PL, 0)) { \
+ unsigned char __loops; \
+ /* give up on other writers */ \
+ if (__builtin_expect(__pl_r & PLOCK64_WL_2PL, 0)) { \
+ pl_sub_noret_lax(__lk_r, __set_r); \
+ __pl_r = 0; /* failed to get the lock */ \
+ break; \
+ } \
+ __loops = __m + 1; \
+ __m = (__m << 1) + 1; \
+ do { \
+ pl_cpu_relax(); \
+ pl_cpu_relax(); \
+ } while (--__loops); \
+ __pl_r = pl_deref_long(__lk_r); \
+ } \
+ pl_barrier(); \
+ __pl_r; /* return value, cannot be null on success */ \
+ }) : (sizeof(*(lock)) == 4) ? ({ \
+ register unsigned int *__lk_r = (unsigned int *)(lock); \
+ register unsigned int __set_r = PLOCK32_WL_1 | PLOCK32_RL_1; \
+ register unsigned int __msk_r = PLOCK32_WL_ANY; \
+ register unsigned int __pl_r; \
+ register unsigned char __m; \
+ pl_wait_unlock_int(__lk_r, __msk_r); \
+ __pl_r = pl_ldadd_acq(__lk_r, __set_r) + __set_r; \
+ /* wait for all other readers to leave */ \
+ __m = 0; \
+ while (__builtin_expect(__pl_r & PLOCK32_RL_2PL, 0)) { \
+ unsigned char __loops; \
+ /* but rollback on other writers */ \
+ if (__builtin_expect(__pl_r & PLOCK32_WL_2PL, 0)) { \
+ pl_sub_noret_lax(__lk_r, __set_r); \
+ __pl_r = 0; /* failed to get the lock */ \
+ break; \
+ } \
+ __loops = __m + 1; \
+ __m = (__m << 1) + 1; \
+ do { \
+ pl_cpu_relax(); \
+ pl_cpu_relax(); \
+ } while (--__loops); \
+ __pl_r = pl_deref_int(__lk_r); \
+ } \
+ pl_barrier(); \
+ __pl_r; /* return value, cannot be null on success */ \
+ }) : ({ \
+ void __unsupported_argument_size_for_pl_try_j__(char *,int); \
+ if (sizeof(*(lock)) != 4 && (sizeof(long) != 8 || sizeof(*(lock)) != 8)) \
+ __unsupported_argument_size_for_pl_try_j__(__FILE__,__LINE__); \
+ 0; \
+ }) \
+)
+
+/* request an exclusive write access via the J lock and wait for it. Only one
+ * thread may succeed in this operation. It will not conflict with other users
+ * and will first wait for all writers to leave, then for all readers to leave
+ * before starting. This offers a solution to obtain an exclusive access to a
+ * shared resource in the R/J/C/A model. A concurrent take_a() will wait for
+ * this one to finish first. Using a CAS instead of XADD should make the
+ * operation converge slightly faster.
+ */
+#define pl_take_j(lock) ( \
+ (sizeof(long) == 8 && sizeof(*(lock)) == 8) ? ({ \
+ __label__ __retry; \
+ register unsigned long *__lk_r = (unsigned long *)(lock); \
+ register unsigned long __set_r = PLOCK64_WL_1 | PLOCK64_RL_1; \
+ register unsigned long __msk_r = PLOCK64_WL_ANY; \
+ register unsigned long __pl_r; \
+ register unsigned char __m; \
+ __retry: \
+ pl_wait_unlock_long(__lk_r, __msk_r); \
+ __pl_r = pl_ldadd_acq(__lk_r, __set_r) + __set_r; \
+ /* wait for all other readers to leave */ \
+ __m = 0; \
+ while (__builtin_expect(__pl_r & PLOCK64_RL_2PL, 0)) { \
+ unsigned char __loops; \
+ /* but rollback on other writers */ \
+ if (__builtin_expect(__pl_r & PLOCK64_WL_2PL, 0)) { \
+ pl_sub_noret_lax(__lk_r, __set_r); \
+ goto __retry; \
+ } \
+ __loops = __m + 1; \
+ __m = (__m << 1) + 1; \
+ do { \
+ pl_cpu_relax(); \
+ pl_cpu_relax(); \
+ } while (--__loops); \
+ __pl_r = pl_deref_long(__lk_r); \
+ } \
+ pl_barrier(); \
+ 0; \
+ }) : (sizeof(*(lock)) == 4) ? ({ \
+ __label__ __retry; \
+ register unsigned int *__lk_r = (unsigned int *)(lock); \
+ register unsigned int __set_r = PLOCK32_WL_1 | PLOCK32_RL_1; \
+ register unsigned int __msk_r = PLOCK32_WL_ANY; \
+ register unsigned int __pl_r; \
+ register unsigned char __m; \
+ __retry: \
+ pl_wait_unlock_int(__lk_r, __msk_r); \
+ __pl_r = pl_ldadd_acq(__lk_r, __set_r) + __set_r; \
+ /* wait for all other readers to leave */ \
+ __m = 0; \
+ while (__builtin_expect(__pl_r & PLOCK32_RL_2PL, 0)) { \
+ unsigned char __loops; \
+ /* but rollback on other writers */ \
+ if (__builtin_expect(__pl_r & PLOCK32_WL_2PL, 0)) { \
+ pl_sub_noret_lax(__lk_r, __set_r); \
+ goto __retry; \
+ } \
+ __loops = __m + 1; \
+ __m = (__m << 1) + 1; \
+ do { \
+ pl_cpu_relax(); \
+ pl_cpu_relax(); \
+ } while (--__loops); \
+ __pl_r = pl_deref_int(__lk_r); \
+ } \
+ pl_barrier(); \
+ 0; \
+ }) : ({ \
+ void __unsupported_argument_size_for_pl_take_j__(char *,int); \
+ if (sizeof(*(lock)) != 4 && (sizeof(long) != 8 || sizeof(*(lock)) != 8)) \
+ __unsupported_argument_size_for_pl_take_j__(__FILE__,__LINE__); \
+ 0; \
+ }) \
+)
+
+/* drop the join (J) lock entirely */
+#define pl_drop_j(lock) ( \
+ (sizeof(long) == 8 && sizeof(*(lock)) == 8) ? ({ \
+ pl_barrier(); \
+ pl_sub_noret_rel(lock, PLOCK64_WL_1 | PLOCK64_RL_1); \
+ }) : (sizeof(*(lock)) == 4) ? ({ \
+ pl_barrier(); \
+ pl_sub_noret_rel(lock, PLOCK32_WL_1 | PLOCK32_RL_1); \
+ }) : ({ \
+ void __unsupported_argument_size_for_pl_drop_j__(char *,int); \
+ if (sizeof(*(lock)) != 4 && (sizeof(long) != 8 || sizeof(*(lock)) != 8)) \
+ __unsupported_argument_size_for_pl_drop_j__(__FILE__,__LINE__); \
+ }) \
+)
+
+/*
+ * The part below is for Low Overhead R/W locks (LORW). These ones are not
+ * upgradable and not necessarily fair but they try to be fast when uncontended
+ * and to limit the cost and perturbation during contention. Writers always
+ * have precedence over readers to preserve latency as much as possible.
+ *
+ * The principle is to offer a fast no-contention path and a limited total
+ * number of writes for the contended path. Since R/W locks are expected to be
+ * used in situations where there is a benefit in separating reads from writes,
+ * it is expected that reads are common (typ >= 50%) and that there is often at
+ * least one reader (otherwise a spinlock wouldn't be a problem). As such, a
+ * reader will try to pass instantly, detect contention and immediately retract
+ * and wait in the queue in case there is contention. A writer will first also
+ * try to pass instantly, and if it fails due to pending readers, it will mark
+ * that it's waiting so that readers stop entering. This will leave the writer
+ * waiting as close as possible to the point of being granted access. New
+ * writers will also notice this previous contention and will wait outside.
+ * This means that a successful access for a reader or a writer requires a
+ * single CAS, and a contended attempt will require one failed CAS and one
+ * successful XADD for a reader, or an optional OR and a N+1 CAS for the
+ * writer.
+ *
+ * A counter of shared users indicates the number of active readers, while a
+ * (single-bit) counter of exclusive writers indicates whether the lock is
+ * currently held for writes. This distinction also permits to use a single
+ * function to release the lock if desired, since the exclusive bit indicates
+ * the state of the caller of unlock(). The WRQ bit is cleared during the
+ * unlock.
+ *
+ * Layout: (32/64 bit):
+ * 31 2 1 0
+ * +-----------+--------------+-----+-----+
+ * | | SHR | WRQ | EXC |
+ * +-----------+--------------+-----+-----+
+ *
+ * In order to minimize operations, the WRQ bit is held during EXC so that the
+ * write waiter that had to fight for EXC doesn't have to release WRQ during
+ * its operations, and will just drop it along with EXC upon unlock.
+ *
+ * This means the following costs:
+ * reader:
+ * success: 1 CAS
+ * failure: 1 CAS + 1 XADD
+ * unlock: 1 SUB
+ * writer:
+ * success: 1 RD + 1 CAS
+ * failure: 1 RD + 1 CAS + 0/1 OR + N CAS
+ * unlock: 1 AND
+ */
+
+#define PLOCK_LORW_EXC_BIT ((sizeof(long) == 8) ? 0 : 0)
+#define PLOCK_LORW_EXC_SIZE ((sizeof(long) == 8) ? 1 : 1)
+#define PLOCK_LORW_EXC_BASE (1UL << PLOCK_LORW_EXC_BIT)
+#define PLOCK_LORW_EXC_MASK (((1UL << PLOCK_LORW_EXC_SIZE) - 1UL) << PLOCK_LORW_EXC_BIT)
+
+#define PLOCK_LORW_WRQ_BIT ((sizeof(long) == 8) ? 1 : 1)
+#define PLOCK_LORW_WRQ_SIZE ((sizeof(long) == 8) ? 1 : 1)
+#define PLOCK_LORW_WRQ_BASE (1UL << PLOCK_LORW_WRQ_BIT)
+#define PLOCK_LORW_WRQ_MASK (((1UL << PLOCK_LORW_WRQ_SIZE) - 1UL) << PLOCK_LORW_WRQ_BIT)
+
+#define PLOCK_LORW_SHR_BIT ((sizeof(long) == 8) ? 2 : 2)
+#define PLOCK_LORW_SHR_SIZE ((sizeof(long) == 8) ? 30 : 30)
+#define PLOCK_LORW_SHR_BASE (1UL << PLOCK_LORW_SHR_BIT)
+#define PLOCK_LORW_SHR_MASK (((1UL << PLOCK_LORW_SHR_SIZE) - 1UL) << PLOCK_LORW_SHR_BIT)
+
+__attribute__((unused,always_inline,no_instrument_function))
+static inline void pl_lorw_rdlock(unsigned long *lock)
+{
+ unsigned long lk = 0;
+
+ /* First, assume we're alone and try to get the read lock (fast path).
+ * It often works because read locks are often used on low-contention
+ * structs.
+ */
+ lk = pl_cmpxchg(lock, 0, PLOCK_LORW_SHR_BASE);
+ if (!lk)
+ return;
+
+ /* so we were not alone, make sure there's no writer waiting for the
+ * lock to be empty of visitors.
+ */
+ if (lk & PLOCK_LORW_WRQ_MASK)
+#if defined(PLOCK_LORW_INLINE_WAIT) && !defined(PLOCK_DISABLE_EBO)
+ lk = __pl_wait_unlock_long(lock, PLOCK_LORW_WRQ_MASK);
+#else
+ lk = pl_wait_unlock_long(lock, PLOCK_LORW_WRQ_MASK);
+#endif
+
+ /* count us as visitor among others */
+ lk = pl_ldadd_acq(lock, PLOCK_LORW_SHR_BASE);
+
+ /* wait for end of exclusive access if any */
+ if (lk & PLOCK_LORW_EXC_MASK)
+#if defined(PLOCK_LORW_INLINE_WAIT) && !defined(PLOCK_DISABLE_EBO)
+ lk = __pl_wait_unlock_long(lock, PLOCK_LORW_EXC_MASK);
+#else
+ lk = pl_wait_unlock_long(lock, PLOCK_LORW_EXC_MASK);
+#endif
+}
+
+
+__attribute__((unused,always_inline,no_instrument_function))
+static inline void pl_lorw_wrlock(unsigned long *lock)
+{
+ unsigned long lk = 0;
+ unsigned long old = 0;
+
+ /* first, make sure another writer is not already blocked waiting for
+ * readers to leave. Note that tests have shown that it can be even
+ * faster to avoid the first check and to unconditionally wait.
+ */
+ lk = pl_deref_long(lock);
+ if (__builtin_expect(lk & PLOCK_LORW_WRQ_MASK, 1))
+#if defined(PLOCK_LORW_INLINE_WAIT) && !defined(PLOCK_DISABLE_EBO)
+ lk = __pl_wait_unlock_long(lock, PLOCK_LORW_WRQ_MASK);
+#else
+ lk = pl_wait_unlock_long(lock, PLOCK_LORW_WRQ_MASK);
+#endif
+
+ do {
+ /* let's check for the two sources of contention at once */
+
+ if (__builtin_expect(lk & (PLOCK_LORW_SHR_MASK | PLOCK_LORW_EXC_MASK), 1)) {
+ /* check if there are still readers coming. If so, close the door and
+ * wait for them to leave.
+ */
+ if (lk & PLOCK_LORW_SHR_MASK) {
+ /* note below, an OR is significantly cheaper than BTS or XADD */
+ if (!(lk & PLOCK_LORW_WRQ_MASK))
+ pl_or_noret(lock, PLOCK_LORW_WRQ_BASE);
+#if defined(PLOCK_LORW_INLINE_WAIT) && !defined(PLOCK_DISABLE_EBO)
+ lk = __pl_wait_unlock_long(lock, PLOCK_LORW_SHR_MASK);
+#else
+ lk = pl_wait_unlock_long(lock, PLOCK_LORW_SHR_MASK);
+#endif
+ }
+
+ /* And also wait for a previous writer to finish. */
+ if (lk & PLOCK_LORW_EXC_MASK)
+#if defined(PLOCK_LORW_INLINE_WAIT) && !defined(PLOCK_DISABLE_EBO)
+ lk = __pl_wait_unlock_long(lock, PLOCK_LORW_EXC_MASK);
+#else
+ lk = pl_wait_unlock_long(lock, PLOCK_LORW_EXC_MASK);
+#endif
+ }
+
+ /* A fresh new reader may appear right now if there were none
+ * above and we didn't close the door.
+ */
+ old = lk & ~PLOCK_LORW_SHR_MASK & ~PLOCK_LORW_EXC_MASK;
+ lk = pl_cmpxchg(lock, old, old | PLOCK_LORW_EXC_BASE);
+ } while (lk != old);
+
+ /* done, not waiting anymore, the WRQ bit if any, will be dropped by the
+ * unlock
+ */
+}
+
+
+__attribute__((unused,always_inline,no_instrument_function))
+static inline void pl_lorw_rdunlock(unsigned long *lock)
+{
+ pl_sub_noret_rel(lock, PLOCK_LORW_SHR_BASE);
+}
+
+__attribute__((unused,always_inline,no_instrument_function))
+static inline void pl_lorw_wrunlock(unsigned long *lock)
+{
+ pl_and_noret_rel(lock, ~(PLOCK_LORW_WRQ_MASK | PLOCK_LORW_EXC_MASK));
+}
+
+__attribute__((unused,always_inline,no_instrument_function))
+static inline void pl_lorw_unlock(unsigned long *lock)
+{
+ if (pl_deref_long(lock) & PLOCK_LORW_EXC_MASK)
+ pl_lorw_wrunlock(lock);
+ else
+ pl_lorw_rdunlock(lock);
+}
+
+#endif /* PL_PLOCK_H */
diff --git a/include/import/sha1.h b/include/import/sha1.h
new file mode 100644
index 0000000..33ee530
--- /dev/null
+++ b/include/import/sha1.h
@@ -0,0 +1,35 @@
+/*
+ * Based on the git SHA1 Implementation.
+ *
+ * Copyright (C) 2009-2015, Linus Torvalds and others.
+ *
+ * SHA1 routine optimized to do word accesses rather than byte accesses,
+ * and to avoid unnecessary copies into the context array.
+ *
+ * This was initially based on the Mozilla SHA1 implementation, although
+ * none of the original Mozilla code remains.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+typedef struct {
+ unsigned long long size;
+ unsigned int H[5];
+ unsigned int W[16];
+} blk_SHA_CTX;
+
+void blk_SHA1_Init(blk_SHA_CTX *ctx);
+void blk_SHA1_Update(blk_SHA_CTX *ctx, const void *dataIn, unsigned long len);
+void blk_SHA1_Final(unsigned char hashout[20], blk_SHA_CTX *ctx);
diff --git a/include/import/slz-tables.h b/include/import/slz-tables.h
new file mode 100644
index 0000000..0b3a5b9
--- /dev/null
+++ b/include/import/slz-tables.h
@@ -0,0 +1,257 @@
+/* Fixed Huffman table as per RFC1951.
+ *
+ * Lit Value Bits Codes
+ * --------- ---- -----
+ * 0 - 143 8 00110000 through 10111111
+ * 144 - 255 9 110010000 through 111111111
+ * 256 - 279 7 0000000 through 0010111
+ * 280 - 287 8 11000000 through 11000111
+ *
+ * The codes are encoded in reverse, the high bit of the code appears encoded
+ * as bit 0. The table is built by mkhuff.sh. The 16 bits are encoded this way :
+ * - bits 0..3 : bits
+ * - bits 4..12 : code
+ */
+static const uint16_t fixed_huff[288] = {
+ 0x00c8, 0x08c8, 0x04c8, 0x0cc8, 0x02c8, 0x0ac8, 0x06c8, 0x0ec8, // 0
+ 0x01c8, 0x09c8, 0x05c8, 0x0dc8, 0x03c8, 0x0bc8, 0x07c8, 0x0fc8, // 8
+ 0x0028, 0x0828, 0x0428, 0x0c28, 0x0228, 0x0a28, 0x0628, 0x0e28, // 16
+ 0x0128, 0x0928, 0x0528, 0x0d28, 0x0328, 0x0b28, 0x0728, 0x0f28, // 24
+ 0x00a8, 0x08a8, 0x04a8, 0x0ca8, 0x02a8, 0x0aa8, 0x06a8, 0x0ea8, // 32
+ 0x01a8, 0x09a8, 0x05a8, 0x0da8, 0x03a8, 0x0ba8, 0x07a8, 0x0fa8, // 40
+ 0x0068, 0x0868, 0x0468, 0x0c68, 0x0268, 0x0a68, 0x0668, 0x0e68, // 48
+ 0x0168, 0x0968, 0x0568, 0x0d68, 0x0368, 0x0b68, 0x0768, 0x0f68, // 56
+ 0x00e8, 0x08e8, 0x04e8, 0x0ce8, 0x02e8, 0x0ae8, 0x06e8, 0x0ee8, // 64
+ 0x01e8, 0x09e8, 0x05e8, 0x0de8, 0x03e8, 0x0be8, 0x07e8, 0x0fe8, // 72
+ 0x0018, 0x0818, 0x0418, 0x0c18, 0x0218, 0x0a18, 0x0618, 0x0e18, // 80
+ 0x0118, 0x0918, 0x0518, 0x0d18, 0x0318, 0x0b18, 0x0718, 0x0f18, // 88
+ 0x0098, 0x0898, 0x0498, 0x0c98, 0x0298, 0x0a98, 0x0698, 0x0e98, // 96
+ 0x0198, 0x0998, 0x0598, 0x0d98, 0x0398, 0x0b98, 0x0798, 0x0f98, // 104
+ 0x0058, 0x0858, 0x0458, 0x0c58, 0x0258, 0x0a58, 0x0658, 0x0e58, // 112
+ 0x0158, 0x0958, 0x0558, 0x0d58, 0x0358, 0x0b58, 0x0758, 0x0f58, // 120
+ 0x00d8, 0x08d8, 0x04d8, 0x0cd8, 0x02d8, 0x0ad8, 0x06d8, 0x0ed8, // 128
+ 0x01d8, 0x09d8, 0x05d8, 0x0dd8, 0x03d8, 0x0bd8, 0x07d8, 0x0fd8, // 136
+ 0x0139, 0x1139, 0x0939, 0x1939, 0x0539, 0x1539, 0x0d39, 0x1d39, // 144
+ 0x0339, 0x1339, 0x0b39, 0x1b39, 0x0739, 0x1739, 0x0f39, 0x1f39, // 152
+ 0x00b9, 0x10b9, 0x08b9, 0x18b9, 0x04b9, 0x14b9, 0x0cb9, 0x1cb9, // 160
+ 0x02b9, 0x12b9, 0x0ab9, 0x1ab9, 0x06b9, 0x16b9, 0x0eb9, 0x1eb9, // 168
+ 0x01b9, 0x11b9, 0x09b9, 0x19b9, 0x05b9, 0x15b9, 0x0db9, 0x1db9, // 176
+ 0x03b9, 0x13b9, 0x0bb9, 0x1bb9, 0x07b9, 0x17b9, 0x0fb9, 0x1fb9, // 184
+ 0x0079, 0x1079, 0x0879, 0x1879, 0x0479, 0x1479, 0x0c79, 0x1c79, // 192
+ 0x0279, 0x1279, 0x0a79, 0x1a79, 0x0679, 0x1679, 0x0e79, 0x1e79, // 200
+ 0x0179, 0x1179, 0x0979, 0x1979, 0x0579, 0x1579, 0x0d79, 0x1d79, // 208
+ 0x0379, 0x1379, 0x0b79, 0x1b79, 0x0779, 0x1779, 0x0f79, 0x1f79, // 216
+ 0x00f9, 0x10f9, 0x08f9, 0x18f9, 0x04f9, 0x14f9, 0x0cf9, 0x1cf9, // 224
+ 0x02f9, 0x12f9, 0x0af9, 0x1af9, 0x06f9, 0x16f9, 0x0ef9, 0x1ef9, // 232
+ 0x01f9, 0x11f9, 0x09f9, 0x19f9, 0x05f9, 0x15f9, 0x0df9, 0x1df9, // 240
+ 0x03f9, 0x13f9, 0x0bf9, 0x1bf9, 0x07f9, 0x17f9, 0x0ff9, 0x1ff9, // 248
+ 0x0007, 0x0407, 0x0207, 0x0607, 0x0107, 0x0507, 0x0307, 0x0707, // 256
+ 0x0087, 0x0487, 0x0287, 0x0687, 0x0187, 0x0587, 0x0387, 0x0787, // 264
+ 0x0047, 0x0447, 0x0247, 0x0647, 0x0147, 0x0547, 0x0347, 0x0747, // 272
+ 0x0038, 0x0838, 0x0438, 0x0c38, 0x0238, 0x0a38, 0x0638, 0x0e38 // 280
+};
+
+/* length from 3 to 258 converted to bit strings for use with fixed huffman
+ * coding. It was built by tools/dump_len.c. The format is the following :
+ * - bits 0..15 = code
+ * - bits 16..19 = #bits
+ */
+static const uint32_t len_fh[259] = {
+ 0x000000, 0x000000, 0x000000, 0x070040, /* 0-3 */
+ 0x070020, 0x070060, 0x070010, 0x070050, /* 4-7 */
+ 0x070030, 0x070070, 0x070008, 0x080048, /* 8-11 */
+ 0x0800c8, 0x080028, 0x0800a8, 0x080068, /* 12-15 */
+ 0x0800e8, 0x080018, 0x080098, 0x090058, /* 16-19 */
+ 0x0900d8, 0x090158, 0x0901d8, 0x090038, /* 20-23 */
+ 0x0900b8, 0x090138, 0x0901b8, 0x090078, /* 24-27 */
+ 0x0900f8, 0x090178, 0x0901f8, 0x090004, /* 28-31 */
+ 0x090084, 0x090104, 0x090184, 0x0a0044, /* 32-35 */
+ 0x0a00c4, 0x0a0144, 0x0a01c4, 0x0a0244, /* 36-39 */
+ 0x0a02c4, 0x0a0344, 0x0a03c4, 0x0a0024, /* 40-43 */
+ 0x0a00a4, 0x0a0124, 0x0a01a4, 0x0a0224, /* 44-47 */
+ 0x0a02a4, 0x0a0324, 0x0a03a4, 0x0a0064, /* 48-51 */
+ 0x0a00e4, 0x0a0164, 0x0a01e4, 0x0a0264, /* 52-55 */
+ 0x0a02e4, 0x0a0364, 0x0a03e4, 0x0a0014, /* 56-59 */
+ 0x0a0094, 0x0a0114, 0x0a0194, 0x0a0214, /* 60-63 */
+ 0x0a0294, 0x0a0314, 0x0a0394, 0x0b0054, /* 64-67 */
+ 0x0b00d4, 0x0b0154, 0x0b01d4, 0x0b0254, /* 68-71 */
+ 0x0b02d4, 0x0b0354, 0x0b03d4, 0x0b0454, /* 72-75 */
+ 0x0b04d4, 0x0b0554, 0x0b05d4, 0x0b0654, /* 76-79 */
+ 0x0b06d4, 0x0b0754, 0x0b07d4, 0x0b0034, /* 80-83 */
+ 0x0b00b4, 0x0b0134, 0x0b01b4, 0x0b0234, /* 84-87 */
+ 0x0b02b4, 0x0b0334, 0x0b03b4, 0x0b0434, /* 88-91 */
+ 0x0b04b4, 0x0b0534, 0x0b05b4, 0x0b0634, /* 92-95 */
+ 0x0b06b4, 0x0b0734, 0x0b07b4, 0x0b0074, /* 96-99 */
+ 0x0b00f4, 0x0b0174, 0x0b01f4, 0x0b0274, /* 100-103 */
+ 0x0b02f4, 0x0b0374, 0x0b03f4, 0x0b0474, /* 104-107 */
+ 0x0b04f4, 0x0b0574, 0x0b05f4, 0x0b0674, /* 108-111 */
+ 0x0b06f4, 0x0b0774, 0x0b07f4, 0x0c0003, /* 112-115 */
+ 0x0c0103, 0x0c0203, 0x0c0303, 0x0c0403, /* 116-119 */
+ 0x0c0503, 0x0c0603, 0x0c0703, 0x0c0803, /* 120-123 */
+ 0x0c0903, 0x0c0a03, 0x0c0b03, 0x0c0c03, /* 124-127 */
+ 0x0c0d03, 0x0c0e03, 0x0c0f03, 0x0d0083, /* 128-131 */
+ 0x0d0183, 0x0d0283, 0x0d0383, 0x0d0483, /* 132-135 */
+ 0x0d0583, 0x0d0683, 0x0d0783, 0x0d0883, /* 136-139 */
+ 0x0d0983, 0x0d0a83, 0x0d0b83, 0x0d0c83, /* 140-143 */
+ 0x0d0d83, 0x0d0e83, 0x0d0f83, 0x0d1083, /* 144-147 */
+ 0x0d1183, 0x0d1283, 0x0d1383, 0x0d1483, /* 148-151 */
+ 0x0d1583, 0x0d1683, 0x0d1783, 0x0d1883, /* 152-155 */
+ 0x0d1983, 0x0d1a83, 0x0d1b83, 0x0d1c83, /* 156-159 */
+ 0x0d1d83, 0x0d1e83, 0x0d1f83, 0x0d0043, /* 160-163 */
+ 0x0d0143, 0x0d0243, 0x0d0343, 0x0d0443, /* 164-167 */
+ 0x0d0543, 0x0d0643, 0x0d0743, 0x0d0843, /* 168-171 */
+ 0x0d0943, 0x0d0a43, 0x0d0b43, 0x0d0c43, /* 172-175 */
+ 0x0d0d43, 0x0d0e43, 0x0d0f43, 0x0d1043, /* 176-179 */
+ 0x0d1143, 0x0d1243, 0x0d1343, 0x0d1443, /* 180-183 */
+ 0x0d1543, 0x0d1643, 0x0d1743, 0x0d1843, /* 184-187 */
+ 0x0d1943, 0x0d1a43, 0x0d1b43, 0x0d1c43, /* 188-191 */
+ 0x0d1d43, 0x0d1e43, 0x0d1f43, 0x0d00c3, /* 192-195 */
+ 0x0d01c3, 0x0d02c3, 0x0d03c3, 0x0d04c3, /* 196-199 */
+ 0x0d05c3, 0x0d06c3, 0x0d07c3, 0x0d08c3, /* 200-203 */
+ 0x0d09c3, 0x0d0ac3, 0x0d0bc3, 0x0d0cc3, /* 204-207 */
+ 0x0d0dc3, 0x0d0ec3, 0x0d0fc3, 0x0d10c3, /* 208-211 */
+ 0x0d11c3, 0x0d12c3, 0x0d13c3, 0x0d14c3, /* 212-215 */
+ 0x0d15c3, 0x0d16c3, 0x0d17c3, 0x0d18c3, /* 216-219 */
+ 0x0d19c3, 0x0d1ac3, 0x0d1bc3, 0x0d1cc3, /* 220-223 */
+ 0x0d1dc3, 0x0d1ec3, 0x0d1fc3, 0x0d0023, /* 224-227 */
+ 0x0d0123, 0x0d0223, 0x0d0323, 0x0d0423, /* 228-231 */
+ 0x0d0523, 0x0d0623, 0x0d0723, 0x0d0823, /* 232-235 */
+ 0x0d0923, 0x0d0a23, 0x0d0b23, 0x0d0c23, /* 236-239 */
+ 0x0d0d23, 0x0d0e23, 0x0d0f23, 0x0d1023, /* 240-243 */
+ 0x0d1123, 0x0d1223, 0x0d1323, 0x0d1423, /* 244-247 */
+ 0x0d1523, 0x0d1623, 0x0d1723, 0x0d1823, /* 248-251 */
+ 0x0d1923, 0x0d1a23, 0x0d1b23, 0x0d1c23, /* 252-255 */
+ 0x0d1d23, 0x0d1e23, 0x0800a3 /* 256-258 */
+};
+
+/* This horrible mess is needed to shut up the fallthrough warning since the
+ * stupid comment approach doesn't resist to separate preprocessing (e.g. as
+ * used in distcc). Note that compilers which support the fallthrough attribute
+ * also support __has_attribute.
+ */
+#ifndef __fallthrough
+# ifdef __has_attribute
+# if __has_attribute(fallthrough)
+# define __fallthrough __attribute__((fallthrough))
+# else
+# define __fallthrough do { } while (0)
+# endif
+# else
+# define __fallthrough do { } while (0)
+# endif
+#endif
+
+#if !defined(__ARM_FEATURE_CRC32)
+static uint32_t crc32_fast[4][256];
+#endif
+
+static uint32_t fh_dist_table[32768];
+
+#if !defined(__ARM_FEATURE_CRC32)
+/* Make the table for a fast CRC.
+ * Not thread-safe, must be called exactly once.
+ */
+static inline void __slz_make_crc_table(void)
+{
+ uint32_t c;
+ int n, k;
+
+ for (n = 0; n < 256; n++) {
+ c = (uint32_t) n ^ 255;
+ for (k = 0; k < 8; k++) {
+ if (c & 1) {
+ c = 0xedb88320 ^ (c >> 1);
+ } else {
+ c = c >> 1;
+ }
+ }
+ crc32_fast[0][n] = c ^ 0xff000000;
+ }
+
+ /* Note: here we *do not* have to invert the bits corresponding to the
+ * byte position, because [0] already has the 8 highest bits inverted,
+ * and these bits are shifted by 8 at the end of the operation, which
+ * results in having the next 8 bits shifted in turn. That's why we
+ * have the xor in the index used just after a computation.
+ */
+ for (n = 0; n < 256; n++) {
+ crc32_fast[1][n] = 0xff000000 ^ crc32_fast[0][(0xff000000 ^ crc32_fast[0][n] ^ 0xff) & 0xff] ^ (crc32_fast[0][n] >> 8);
+ crc32_fast[2][n] = 0xff000000 ^ crc32_fast[0][(0x00ff0000 ^ crc32_fast[1][n] ^ 0xff) & 0xff] ^ (crc32_fast[1][n] >> 8);
+ crc32_fast[3][n] = 0xff000000 ^ crc32_fast[0][(0x0000ff00 ^ crc32_fast[2][n] ^ 0xff) & 0xff] ^ (crc32_fast[2][n] >> 8);
+ }
+}
+#endif
+
+/* Returns code for lengths 1 to 32768. The bit size for the next value can be
+ * found this way :
+ *
+ * bits = code >> 1;
+ * if (bits)
+ * bits--;
+ *
+ */
+static inline uint32_t dist_to_code(uint32_t l)
+{
+ uint32_t code;
+
+ code = 0;
+ switch (l) {
+ case 24577 ... 32768: code++; __fallthrough;
+ case 16385 ... 24576: code++; __fallthrough;
+ case 12289 ... 16384: code++; __fallthrough;
+ case 8193 ... 12288: code++; __fallthrough;
+ case 6145 ... 8192: code++; __fallthrough;
+ case 4097 ... 6144: code++; __fallthrough;
+ case 3073 ... 4096: code++; __fallthrough;
+ case 2049 ... 3072: code++; __fallthrough;
+ case 1537 ... 2048: code++; __fallthrough;
+ case 1025 ... 1536: code++; __fallthrough;
+ case 769 ... 1024: code++; __fallthrough;
+ case 513 ... 768: code++; __fallthrough;
+ case 385 ... 512: code++; __fallthrough;
+ case 257 ... 384: code++; __fallthrough;
+ case 193 ... 256: code++; __fallthrough;
+ case 129 ... 192: code++; __fallthrough;
+ case 97 ... 128: code++; __fallthrough;
+ case 65 ... 96: code++; __fallthrough;
+ case 49 ... 64: code++; __fallthrough;
+ case 33 ... 48: code++; __fallthrough;
+ case 25 ... 32: code++; __fallthrough;
+ case 17 ... 24: code++; __fallthrough;
+ case 13 ... 16: code++; __fallthrough;
+ case 9 ... 12: code++; __fallthrough;
+ case 7 ... 8: code++; __fallthrough;
+ case 5 ... 6: code++; __fallthrough;
+ case 4 : code++; __fallthrough;
+ case 3 : code++; __fallthrough;
+ case 2 : code++;
+ }
+
+ return code;
+}
+
+/* not thread-safe, must be called exactly once */
+static inline void __slz_prepare_dist_table()
+{
+ uint32_t dist;
+ uint32_t code;
+ uint32_t bits;
+
+ for (dist = 0; dist < sizeof(fh_dist_table) / sizeof(*fh_dist_table); dist++) {
+ code = dist_to_code(dist + 1);
+ bits = code >> 1;
+ if (bits)
+ bits--;
+
+ /* Distance codes are stored on 5 bits reversed. The RFC
+ * doesn't state that they are reversed, but it's the only
+ * way it works.
+ */
+ code = ((code & 0x01) << 4) | ((code & 0x02) << 2) |
+ (code & 0x04) |
+ ((code & 0x08) >> 2) | ((code & 0x10) >> 4);
+
+ code += (dist & ((1 << bits) - 1)) << 5;
+ fh_dist_table[dist] = (code << 5) + bits + 5;
+ }
+}
diff --git a/include/import/slz.h b/include/import/slz.h
new file mode 100644
index 0000000..901a790
--- /dev/null
+++ b/include/import/slz.h
@@ -0,0 +1,200 @@
+/*
+ * Copyright (C) 2013-2015 Willy Tarreau <w@1wt.eu>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _SLZ_H
+#define _SLZ_H
+
+#include <inttypes.h>
+
+/* We have two macros UNALIGNED_LE_OK and UNALIGNED_FASTER. The latter indicates
+ * that using unaligned data is faster than a simple shift. On x86 32-bit at
+ * least it is not the case as the per-byte access is 30% faster. A core2-duo on
+ * x86_64 is 7% faster to read one byte + shifting by 8 than to read one word,
+ * but a core i5 is 7% faster doing the unaligned read, so we privilege more
+ * recent implementations here.
+ */
+#if defined(__x86_64__)
+#define UNALIGNED_LE_OK
+#define UNALIGNED_FASTER
+#define USE_64BIT_QUEUE
+#elif defined(__i386__) || defined(__i486__) || defined(__i586__) || defined(__i686__)
+#define UNALIGNED_LE_OK
+//#define UNALIGNED_FASTER
+#elif defined(__ARMEL__) && defined(__ARM_ARCH_7A__)
+#define UNALIGNED_LE_OK
+#define UNALIGNED_FASTER
+#elif defined(__ARM_ARCH_8A) || defined(__ARM_FEATURE_UNALIGNED)
+#define UNALIGNED_LE_OK
+#define UNALIGNED_FASTER
+#endif
+
+/* Log2 of the size of the hash table used for the references table. */
+#define HASH_BITS 13
+
+enum slz_state {
+ SLZ_ST_INIT, /* stream initialized */
+ SLZ_ST_EOB, /* header or end of block already sent */
+ SLZ_ST_FIXED, /* inside a fixed huffman sequence */
+ SLZ_ST_LAST, /* last block, BFINAL sent */
+ SLZ_ST_DONE, /* BFINAL+EOB sent BFINAL */
+ SLZ_ST_END /* end sent (BFINAL, EOB, CRC + len) */
+};
+
+enum {
+ SLZ_FMT_GZIP, /* RFC1952: gzip envelope and crc32 for CRC */
+ SLZ_FMT_ZLIB, /* RFC1950: zlib envelope and adler-32 for CRC */
+ SLZ_FMT_DEFLATE, /* RFC1951: raw deflate, and no crc */
+};
+
+struct slz_stream {
+#ifdef USE_64BIT_QUEUE
+ uint64_t queue; /* last pending bits, LSB first */
+#else
+ uint32_t queue; /* last pending bits, LSB first */
+#endif
+ uint32_t qbits; /* number of bits in queue, < 8 on 32-bit, < 32 on 64-bit */
+ unsigned char *outbuf; /* set by encode() */
+ uint16_t state; /* one of slz_state */
+ uint8_t level:1; /* 0 = no compression, 1 = compression */
+ uint8_t format:2; /* SLZ_FMT_* */
+ uint8_t unused1; /* unused for now */
+ uint32_t crc32;
+ uint32_t ilen;
+};
+
+/* Functions specific to rfc1951 (deflate) */
+long slz_rfc1951_encode(struct slz_stream *strm, unsigned char *out, const unsigned char *in, long ilen, int more);
+int slz_rfc1951_init(struct slz_stream *strm, int level);
+int slz_rfc1951_flush(struct slz_stream *strm, unsigned char *buf);
+int slz_rfc1951_finish(struct slz_stream *strm, unsigned char *buf);
+
+/* Functions specific to rfc1952 (gzip) */
+uint32_t slz_crc32_by1(uint32_t crc, const unsigned char *buf, int len);
+uint32_t slz_crc32_by4(uint32_t crc, const unsigned char *buf, int len);
+long slz_rfc1952_encode(struct slz_stream *strm, unsigned char *out, const unsigned char *in, long ilen, int more);
+int slz_rfc1952_send_header(struct slz_stream *strm, unsigned char *buf);
+int slz_rfc1952_init(struct slz_stream *strm, int level);
+int slz_rfc1952_flush(struct slz_stream *strm, unsigned char *buf);
+int slz_rfc1952_finish(struct slz_stream *strm, unsigned char *buf);
+
+/* Functions specific to rfc1950 (zlib) */
+uint32_t slz_adler32_by1(uint32_t crc, const unsigned char *buf, int len);
+uint32_t slz_adler32_block(uint32_t crc, const unsigned char *buf, long len);
+long slz_rfc1950_encode(struct slz_stream *strm, unsigned char *out, const unsigned char *in, long ilen, int more);
+int slz_rfc1950_send_header(struct slz_stream *strm, unsigned char *buf);
+int slz_rfc1950_init(struct slz_stream *strm, int level);
+int slz_rfc1950_flush(struct slz_stream *strm, unsigned char *buf);
+int slz_rfc1950_finish(struct slz_stream *strm, unsigned char *buf);
+
+/* generic functions */
+
+/* Initializes stream <strm>. It will configure the stream to use format
+ * <format> for the data, which must be one of SLZ_FMT_*. The compression level
+ * passed in <level> is set. This value can only be 0 (no compression) or 1
+ * (compression) and other values will lead to unpredictable behaviour. The
+ * function should always return 0.
+ */
+static inline int slz_init(struct slz_stream *strm, int level, int format)
+{
+ int ret;
+
+ if (format == SLZ_FMT_GZIP)
+ ret = slz_rfc1952_init(strm, level);
+ else if (format == SLZ_FMT_ZLIB)
+ ret = slz_rfc1950_init(strm, level);
+ else { /* deflate for anything else */
+ ret = slz_rfc1951_init(strm, level);
+ strm->format = format;
+ }
+ return ret;
+}
+
+/* Encodes the block according to the format used by the stream. This means
+ * that the CRC of the input block may be computed according to the CRC32 or
+ * adler-32 algorithms. The number of output bytes is returned.
+ */
+static inline long slz_encode(struct slz_stream *strm, void *out,
+ const void *in, long ilen, int more)
+{
+ long ret;
+
+ if (strm->format == SLZ_FMT_GZIP)
+ ret = slz_rfc1952_encode(strm, (unsigned char *) out, (const unsigned char *) in, ilen, more);
+ else if (strm->format == SLZ_FMT_ZLIB)
+ ret = slz_rfc1950_encode(strm, (unsigned char *) out, (const unsigned char *) in, ilen, more);
+ else /* deflate for other ones */
+ ret = slz_rfc1951_encode(strm, (unsigned char *) out, (const unsigned char *) in, ilen, more);
+
+ return ret;
+}
+
+/* Flushes pending bits and sends the trailer for stream <strm> into buffer
+ * <buf> if needed. When it's done, the stream state is updated to SLZ_ST_END.
+ * It returns the number of bytes emitted. The trailer consists in flushing the
+ * possibly pending bits from the queue (up to 24 bits), rounding to the next
+ * byte, then 4 bytes for the CRC when doing zlib/gzip, then another 4 bytes
+ * for the input length for gzip. That may about to 4+4+4 = 12 bytes, that the
+ * caller must ensure are available before calling the function. Note that if
+ * the initial header was never sent, it will be sent first as well (up to 10
+ * extra bytes).
+ */
+static inline int slz_finish(struct slz_stream *strm, void *buf)
+{
+ int ret;
+
+ if (strm->format == SLZ_FMT_GZIP)
+ ret = slz_rfc1952_finish(strm, (unsigned char *) buf);
+ else if (strm->format == SLZ_FMT_ZLIB)
+ ret = slz_rfc1950_finish(strm, (unsigned char *) buf);
+ else /* deflate for other ones */
+ ret = slz_rfc1951_finish(strm, (unsigned char *) buf);
+
+ return ret;
+}
+
+/* Flushes any pending data for stream <strm> into buffer <buf>, then emits an
+ * empty literal block to byte-align the output, allowing to completely flush
+ * the queue. Note that if the initial header was never sent, it will be sent
+ * first as well (0, 2 or 10 extra bytes). This requires that the output buffer
+ * still has this plus the size of the queue available (up to 4 bytes), plus
+ * one byte for (BFINAL,BTYPE), plus 4 bytes for LEN+NLEN, or a total of 19
+ * bytes in the worst case. The number of bytes emitted is returned. It is
+ * guaranteed that the queue is empty on return. This may cause some overhead
+ * by adding needless 5-byte blocks if called to often.
+ */
+static inline int slz_flush(struct slz_stream *strm, void *buf)
+{
+ int ret;
+
+ if (strm->format == SLZ_FMT_GZIP)
+ ret = slz_rfc1952_flush(strm, (unsigned char *) buf);
+ else if (strm->format == SLZ_FMT_ZLIB)
+ ret = slz_rfc1950_flush(strm, (unsigned char *) buf);
+ else /* deflate for other ones */
+ ret = slz_rfc1951_flush(strm, (unsigned char *) buf);
+
+ return ret;
+}
+
+#endif
diff --git a/include/import/xxhash.h b/include/import/xxhash.h
new file mode 100644
index 0000000..a18e8c7
--- /dev/null
+++ b/include/import/xxhash.h
@@ -0,0 +1,6773 @@
+/*
+ * xxHash - Extremely Fast Hash algorithm
+ * Header File
+ * Copyright (C) 2012-2021 Yann Collet
+ *
+ * BSD 2-Clause License (https://www.opensource.org/licenses/bsd-license.php)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * You can contact the author at:
+ * - xxHash homepage: https://www.xxhash.com
+ * - xxHash source repository: https://github.com/Cyan4973/xxHash
+ */
+
+/*!
+ * @mainpage xxHash
+ *
+ * xxHash is an extremely fast non-cryptographic hash algorithm, working at RAM speed
+ * limits.
+ *
+ * It is proposed in four flavors, in three families:
+ * 1. @ref XXH32_family
+ * - Classic 32-bit hash function. Simple, compact, and runs on almost all
+ * 32-bit and 64-bit systems.
+ * 2. @ref XXH64_family
+ * - Classic 64-bit adaptation of XXH32. Just as simple, and runs well on most
+ * 64-bit systems (but _not_ 32-bit systems).
+ * 3. @ref XXH3_family
+ * - Modern 64-bit and 128-bit hash function family which features improved
+ * strength and performance across the board, especially on smaller data.
+ * It benefits greatly from SIMD and 64-bit without requiring it.
+ *
+ * Benchmarks
+ * ---
+ * The reference system uses an Intel i7-9700K CPU, and runs Ubuntu x64 20.04.
+ * The open source benchmark program is compiled with clang v10.0 using -O3 flag.
+ *
+ * | Hash Name | ISA ext | Width | Large Data Speed | Small Data Velocity |
+ * | -------------------- | ------- | ----: | ---------------: | ------------------: |
+ * | XXH3_64bits() | @b AVX2 | 64 | 59.4 GB/s | 133.1 |
+ * | MeowHash | AES-NI | 128 | 58.2 GB/s | 52.5 |
+ * | XXH3_128bits() | @b AVX2 | 128 | 57.9 GB/s | 118.1 |
+ * | CLHash | PCLMUL | 64 | 37.1 GB/s | 58.1 |
+ * | XXH3_64bits() | @b SSE2 | 64 | 31.5 GB/s | 133.1 |
+ * | XXH3_128bits() | @b SSE2 | 128 | 29.6 GB/s | 118.1 |
+ * | RAM sequential read | | N/A | 28.0 GB/s | N/A |
+ * | ahash | AES-NI | 64 | 22.5 GB/s | 107.2 |
+ * | City64 | | 64 | 22.0 GB/s | 76.6 |
+ * | T1ha2 | | 64 | 22.0 GB/s | 99.0 |
+ * | City128 | | 128 | 21.7 GB/s | 57.7 |
+ * | FarmHash | AES-NI | 64 | 21.3 GB/s | 71.9 |
+ * | XXH64() | | 64 | 19.4 GB/s | 71.0 |
+ * | SpookyHash | | 64 | 19.3 GB/s | 53.2 |
+ * | Mum | | 64 | 18.0 GB/s | 67.0 |
+ * | CRC32C | SSE4.2 | 32 | 13.0 GB/s | 57.9 |
+ * | XXH32() | | 32 | 9.7 GB/s | 71.9 |
+ * | City32 | | 32 | 9.1 GB/s | 66.0 |
+ * | Blake3* | @b AVX2 | 256 | 4.4 GB/s | 8.1 |
+ * | Murmur3 | | 32 | 3.9 GB/s | 56.1 |
+ * | SipHash* | | 64 | 3.0 GB/s | 43.2 |
+ * | Blake3* | @b SSE2 | 256 | 2.4 GB/s | 8.1 |
+ * | HighwayHash | | 64 | 1.4 GB/s | 6.0 |
+ * | FNV64 | | 64 | 1.2 GB/s | 62.7 |
+ * | Blake2* | | 256 | 1.1 GB/s | 5.1 |
+ * | SHA1* | | 160 | 0.8 GB/s | 5.6 |
+ * | MD5* | | 128 | 0.6 GB/s | 7.8 |
+ * @note
+ * - Hashes which require a specific ISA extension are noted. SSE2 is also noted,
+ * even though it is mandatory on x64.
+ * - Hashes with an asterisk are cryptographic. Note that MD5 is non-cryptographic
+ * by modern standards.
+ * - Small data velocity is a rough average of algorithm's efficiency for small
+ * data. For more accurate information, see the wiki.
+ * - More benchmarks and strength tests are found on the wiki:
+ * https://github.com/Cyan4973/xxHash/wiki
+ *
+ * Usage
+ * ------
+ * All xxHash variants use a similar API. Changing the algorithm is a trivial
+ * substitution.
+ *
+ * @pre
+ * For functions which take an input and length parameter, the following
+ * requirements are assumed:
+ * - The range from [`input`, `input + length`) is valid, readable memory.
+ * - The only exception is if the `length` is `0`, `input` may be `NULL`.
+ * - For C++, the objects must have the *TriviallyCopyable* property, as the
+ * functions access bytes directly as if it was an array of `unsigned char`.
+ *
+ * @anchor single_shot_example
+ * **Single Shot**
+ *
+ * These functions are stateless functions which hash a contiguous block of memory,
+ * immediately returning the result. They are the easiest and usually the fastest
+ * option.
+ *
+ * XXH32(), XXH64(), XXH3_64bits(), XXH3_128bits()
+ *
+ * @code{.c}
+ * #include <string.h>
+ * #include "xxhash.h"
+ *
+ * // Example for a function which hashes a null terminated string with XXH32().
+ * XXH32_hash_t hash_string(const char* string, XXH32_hash_t seed)
+ * {
+ * // NULL pointers are only valid if the length is zero
+ * size_t length = (string == NULL) ? 0 : strlen(string);
+ * return XXH32(string, length, seed);
+ * }
+ * @endcode
+ *
+ * @anchor streaming_example
+ * **Streaming**
+ *
+ * These groups of functions allow incremental hashing of unknown size, even
+ * more than what would fit in a size_t.
+ *
+ * XXH32_reset(), XXH64_reset(), XXH3_64bits_reset(), XXH3_128bits_reset()
+ *
+ * @code{.c}
+ * #include <stdio.h>
+ * #include <assert.h>
+ * #include "xxhash.h"
+ * // Example for a function which hashes a FILE incrementally with XXH3_64bits().
+ * XXH64_hash_t hashFile(FILE* f)
+ * {
+ * // Allocate a state struct. Do not just use malloc() or new.
+ * XXH3_state_t* state = XXH3_createState();
+ * assert(state != NULL && "Out of memory!");
+ * // Reset the state to start a new hashing session.
+ * XXH3_64bits_reset(state);
+ * char buffer[4096];
+ * size_t count;
+ * // Read the file in chunks
+ * while ((count = fread(buffer, 1, sizeof(buffer), f)) != 0) {
+ * // Run update() as many times as necessary to process the data
+ * XXH3_64bits_update(state, buffer, count);
+ * }
+ * // Retrieve the finalized hash. This will not change the state.
+ * XXH64_hash_t result = XXH3_64bits_digest(state);
+ * // Free the state. Do not use free().
+ * XXH3_freeState(state);
+ * return result;
+ * }
+ * @endcode
+ *
+ * @file xxhash.h
+ * xxHash prototypes and implementation
+ */
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+/* ****************************
+ * INLINE mode
+ ******************************/
+/*!
+ * @defgroup public Public API
+ * Contains details on the public xxHash functions.
+ * @{
+ */
+#ifdef XXH_DOXYGEN
+/*!
+ * @brief Gives access to internal state declaration, required for static allocation.
+ *
+ * Incompatible with dynamic linking, due to risks of ABI changes.
+ *
+ * Usage:
+ * @code{.c}
+ * #define XXH_STATIC_LINKING_ONLY
+ * #include "xxhash.h"
+ * @endcode
+ */
+# define XXH_STATIC_LINKING_ONLY
+/* Do not undef XXH_STATIC_LINKING_ONLY for Doxygen */
+
+/*!
+ * @brief Gives access to internal definitions.
+ *
+ * Usage:
+ * @code{.c}
+ * #define XXH_STATIC_LINKING_ONLY
+ * #define XXH_IMPLEMENTATION
+ * #include "xxhash.h"
+ * @endcode
+ */
+# define XXH_IMPLEMENTATION
+/* Do not undef XXH_IMPLEMENTATION for Doxygen */
+
+/*!
+ * @brief Exposes the implementation and marks all functions as `inline`.
+ *
+ * Use these build macros to inline xxhash into the target unit.
+ * Inlining improves performance on small inputs, especially when the length is
+ * expressed as a compile-time constant:
+ *
+ * https://fastcompression.blogspot.com/2018/03/xxhash-for-small-keys-impressive-power.html
+ *
+ * It also keeps xxHash symbols private to the unit, so they are not exported.
+ *
+ * Usage:
+ * @code{.c}
+ * #define XXH_INLINE_ALL
+ * #include "xxhash.h"
+ * @endcode
+ * Do not compile and link xxhash.o as a separate object, as it is not useful.
+ */
+# define XXH_INLINE_ALL
+# undef XXH_INLINE_ALL
+/*!
+ * @brief Exposes the implementation without marking functions as inline.
+ */
+# define XXH_PRIVATE_API
+# undef XXH_PRIVATE_API
+/*!
+ * @brief Emulate a namespace by transparently prefixing all symbols.
+ *
+ * If you want to include _and expose_ xxHash functions from within your own
+ * library, but also want to avoid symbol collisions with other libraries which
+ * may also include xxHash, you can use @ref XXH_NAMESPACE to automatically prefix
+ * any public symbol from xxhash library with the value of @ref XXH_NAMESPACE
+ * (therefore, avoid empty or numeric values).
+ *
+ * Note that no change is required within the calling program as long as it
+ * includes `xxhash.h`: Regular symbol names will be automatically translated
+ * by this header.
+ */
+# define XXH_NAMESPACE /* YOUR NAME HERE */
+# undef XXH_NAMESPACE
+#endif
+
+#if (defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API)) \
+ && !defined(XXH_INLINE_ALL_31684351384)
+ /* this section should be traversed only once */
+# define XXH_INLINE_ALL_31684351384
+ /* give access to the advanced API, required to compile implementations */
+# undef XXH_STATIC_LINKING_ONLY /* avoid macro redef */
+# define XXH_STATIC_LINKING_ONLY
+ /* make all functions private */
+# undef XXH_PUBLIC_API
+# if defined(__GNUC__)
+# define XXH_PUBLIC_API static __inline __attribute__((unused))
+# elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
+# define XXH_PUBLIC_API static inline
+# elif defined(_MSC_VER)
+# define XXH_PUBLIC_API static __inline
+# else
+ /* note: this version may generate warnings for unused static functions */
+# define XXH_PUBLIC_API static
+# endif
+
+ /*
+ * This part deals with the special case where a unit wants to inline xxHash,
+ * but "xxhash.h" has previously been included without XXH_INLINE_ALL,
+ * such as part of some previously included *.h header file.
+ * Without further action, the new include would just be ignored,
+ * and functions would effectively _not_ be inlined (silent failure).
+ * The following macros solve this situation by prefixing all inlined names,
+ * avoiding naming collision with previous inclusions.
+ */
+ /* Before that, we unconditionally #undef all symbols,
+ * in case they were already defined with XXH_NAMESPACE.
+ * They will then be redefined for XXH_INLINE_ALL
+ */
+# undef XXH_versionNumber
+ /* XXH32 */
+# undef XXH32
+# undef XXH32_createState
+# undef XXH32_freeState
+# undef XXH32_reset
+# undef XXH32_update
+# undef XXH32_digest
+# undef XXH32_copyState
+# undef XXH32_canonicalFromHash
+# undef XXH32_hashFromCanonical
+ /* XXH64 */
+# undef XXH64
+# undef XXH64_createState
+# undef XXH64_freeState
+# undef XXH64_reset
+# undef XXH64_update
+# undef XXH64_digest
+# undef XXH64_copyState
+# undef XXH64_canonicalFromHash
+# undef XXH64_hashFromCanonical
+ /* XXH3_64bits */
+# undef XXH3_64bits
+# undef XXH3_64bits_withSecret
+# undef XXH3_64bits_withSeed
+# undef XXH3_64bits_withSecretandSeed
+# undef XXH3_createState
+# undef XXH3_freeState
+# undef XXH3_copyState
+# undef XXH3_64bits_reset
+# undef XXH3_64bits_reset_withSeed
+# undef XXH3_64bits_reset_withSecret
+# undef XXH3_64bits_update
+# undef XXH3_64bits_digest
+# undef XXH3_generateSecret
+ /* XXH3_128bits */
+# undef XXH128
+# undef XXH3_128bits
+# undef XXH3_128bits_withSeed
+# undef XXH3_128bits_withSecret
+# undef XXH3_128bits_reset
+# undef XXH3_128bits_reset_withSeed
+# undef XXH3_128bits_reset_withSecret
+# undef XXH3_128bits_reset_withSecretandSeed
+# undef XXH3_128bits_update
+# undef XXH3_128bits_digest
+# undef XXH128_isEqual
+# undef XXH128_cmp
+# undef XXH128_canonicalFromHash
+# undef XXH128_hashFromCanonical
+ /* Finally, free the namespace itself */
+# undef XXH_NAMESPACE
+
+ /* employ the namespace for XXH_INLINE_ALL */
+# define XXH_NAMESPACE XXH_INLINE_
+ /*
+ * Some identifiers (enums, type names) are not symbols,
+ * but they must nonetheless be renamed to avoid redeclaration.
+ * Alternative solution: do not redeclare them.
+ * However, this requires some #ifdefs, and has a more dispersed impact.
+ * Meanwhile, renaming can be achieved in a single place.
+ */
+# define XXH_IPREF(Id) XXH_NAMESPACE ## Id
+# define XXH_OK XXH_IPREF(XXH_OK)
+# define XXH_ERROR XXH_IPREF(XXH_ERROR)
+# define XXH_errorcode XXH_IPREF(XXH_errorcode)
+# define XXH32_canonical_t XXH_IPREF(XXH32_canonical_t)
+# define XXH64_canonical_t XXH_IPREF(XXH64_canonical_t)
+# define XXH128_canonical_t XXH_IPREF(XXH128_canonical_t)
+# define XXH32_state_s XXH_IPREF(XXH32_state_s)
+# define XXH32_state_t XXH_IPREF(XXH32_state_t)
+# define XXH64_state_s XXH_IPREF(XXH64_state_s)
+# define XXH64_state_t XXH_IPREF(XXH64_state_t)
+# define XXH3_state_s XXH_IPREF(XXH3_state_s)
+# define XXH3_state_t XXH_IPREF(XXH3_state_t)
+# define XXH128_hash_t XXH_IPREF(XXH128_hash_t)
+ /* Ensure the header is parsed again, even if it was previously included */
+# undef XXHASH_H_5627135585666179
+# undef XXHASH_H_STATIC_13879238742
+#endif /* XXH_INLINE_ALL || XXH_PRIVATE_API */
+
+/* ****************************************************************
+ * Stable API
+ *****************************************************************/
+#ifndef XXHASH_H_5627135585666179
+#define XXHASH_H_5627135585666179 1
+
+/*! @brief Marks a global symbol. */
+#if !defined(XXH_INLINE_ALL) && !defined(XXH_PRIVATE_API)
+# if defined(WIN32) && defined(_MSC_VER) && (defined(XXH_IMPORT) || defined(XXH_EXPORT))
+# ifdef XXH_EXPORT
+# define XXH_PUBLIC_API __declspec(dllexport)
+# elif XXH_IMPORT
+# define XXH_PUBLIC_API __declspec(dllimport)
+# endif
+# else
+# define XXH_PUBLIC_API /* do nothing */
+# endif
+#endif
+
+#ifdef XXH_NAMESPACE
+# define XXH_CAT(A,B) A##B
+# define XXH_NAME2(A,B) XXH_CAT(A,B)
+# define XXH_versionNumber XXH_NAME2(XXH_NAMESPACE, XXH_versionNumber)
+/* XXH32 */
+# define XXH32 XXH_NAME2(XXH_NAMESPACE, XXH32)
+# define XXH32_createState XXH_NAME2(XXH_NAMESPACE, XXH32_createState)
+# define XXH32_freeState XXH_NAME2(XXH_NAMESPACE, XXH32_freeState)
+# define XXH32_reset XXH_NAME2(XXH_NAMESPACE, XXH32_reset)
+# define XXH32_update XXH_NAME2(XXH_NAMESPACE, XXH32_update)
+# define XXH32_digest XXH_NAME2(XXH_NAMESPACE, XXH32_digest)
+# define XXH32_copyState XXH_NAME2(XXH_NAMESPACE, XXH32_copyState)
+# define XXH32_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH32_canonicalFromHash)
+# define XXH32_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH32_hashFromCanonical)
+/* XXH64 */
+# define XXH64 XXH_NAME2(XXH_NAMESPACE, XXH64)
+# define XXH64_createState XXH_NAME2(XXH_NAMESPACE, XXH64_createState)
+# define XXH64_freeState XXH_NAME2(XXH_NAMESPACE, XXH64_freeState)
+# define XXH64_reset XXH_NAME2(XXH_NAMESPACE, XXH64_reset)
+# define XXH64_update XXH_NAME2(XXH_NAMESPACE, XXH64_update)
+# define XXH64_digest XXH_NAME2(XXH_NAMESPACE, XXH64_digest)
+# define XXH64_copyState XXH_NAME2(XXH_NAMESPACE, XXH64_copyState)
+# define XXH64_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH64_canonicalFromHash)
+# define XXH64_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH64_hashFromCanonical)
+/* XXH3_64bits */
+# define XXH3_64bits XXH_NAME2(XXH_NAMESPACE, XXH3_64bits)
+# define XXH3_64bits_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_withSecret)
+# define XXH3_64bits_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_withSeed)
+# define XXH3_64bits_withSecretandSeed XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_withSecretandSeed)
+# define XXH3_createState XXH_NAME2(XXH_NAMESPACE, XXH3_createState)
+# define XXH3_freeState XXH_NAME2(XXH_NAMESPACE, XXH3_freeState)
+# define XXH3_copyState XXH_NAME2(XXH_NAMESPACE, XXH3_copyState)
+# define XXH3_64bits_reset XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset)
+# define XXH3_64bits_reset_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset_withSeed)
+# define XXH3_64bits_reset_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset_withSecret)
+# define XXH3_64bits_reset_withSecretandSeed XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset_withSecretandSeed)
+# define XXH3_64bits_update XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_update)
+# define XXH3_64bits_digest XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_digest)
+# define XXH3_generateSecret XXH_NAME2(XXH_NAMESPACE, XXH3_generateSecret)
+# define XXH3_generateSecret_fromSeed XXH_NAME2(XXH_NAMESPACE, XXH3_generateSecret_fromSeed)
+/* XXH3_128bits */
+# define XXH128 XXH_NAME2(XXH_NAMESPACE, XXH128)
+# define XXH3_128bits XXH_NAME2(XXH_NAMESPACE, XXH3_128bits)
+# define XXH3_128bits_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_withSeed)
+# define XXH3_128bits_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_withSecret)
+# define XXH3_128bits_withSecretandSeed XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_withSecretandSeed)
+# define XXH3_128bits_reset XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset)
+# define XXH3_128bits_reset_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset_withSeed)
+# define XXH3_128bits_reset_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset_withSecret)
+# define XXH3_128bits_reset_withSecretandSeed XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset_withSecretandSeed)
+# define XXH3_128bits_update XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_update)
+# define XXH3_128bits_digest XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_digest)
+# define XXH128_isEqual XXH_NAME2(XXH_NAMESPACE, XXH128_isEqual)
+# define XXH128_cmp XXH_NAME2(XXH_NAMESPACE, XXH128_cmp)
+# define XXH128_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH128_canonicalFromHash)
+# define XXH128_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH128_hashFromCanonical)
+#endif
+
+
+/* *************************************
+* Compiler specifics
+***************************************/
+
+/* specific declaration modes for Windows */
+#if !defined(XXH_INLINE_ALL) && !defined(XXH_PRIVATE_API)
+# if defined(WIN32) && defined(_MSC_VER) && (defined(XXH_IMPORT) || defined(XXH_EXPORT))
+# ifdef XXH_EXPORT
+# define XXH_PUBLIC_API __declspec(dllexport)
+# elif XXH_IMPORT
+# define XXH_PUBLIC_API __declspec(dllimport)
+# endif
+# else
+# define XXH_PUBLIC_API /* do nothing */
+# endif
+#endif
+
+#if defined (__GNUC__)
+# define XXH_CONSTF __attribute__((const))
+# define XXH_PUREF __attribute__((pure))
+# define XXH_MALLOCF __attribute__((malloc))
+#else
+# define XXH_CONSTF /* disable */
+# define XXH_PUREF
+# define XXH_MALLOCF
+#endif
+
+/* *************************************
+* Version
+***************************************/
+#define XXH_VERSION_MAJOR 0
+#define XXH_VERSION_MINOR 8
+#define XXH_VERSION_RELEASE 2
+/*! @brief Version number, encoded as two digits each */
+#define XXH_VERSION_NUMBER (XXH_VERSION_MAJOR *100*100 + XXH_VERSION_MINOR *100 + XXH_VERSION_RELEASE)
+
+/*!
+ * @brief Obtains the xxHash version.
+ *
+ * This is mostly useful when xxHash is compiled as a shared library,
+ * since the returned value comes from the library, as opposed to header file.
+ *
+ * @return @ref XXH_VERSION_NUMBER of the invoked library.
+ */
+XXH_PUBLIC_API XXH_CONSTF unsigned XXH_versionNumber (void);
+
+
+/* ****************************
+* Common basic types
+******************************/
+#include <stddef.h> /* size_t */
+/*!
+ * @brief Exit code for the streaming API.
+ */
+typedef enum {
+ XXH_OK = 0, /*!< OK */
+ XXH_ERROR /*!< Error */
+} XXH_errorcode;
+
+
+/*-**********************************************************************
+* 32-bit hash
+************************************************************************/
+#if defined(XXH_DOXYGEN) /* Don't show <stdint.h> include */
+/*!
+ * @brief An unsigned 32-bit integer.
+ *
+ * Not necessarily defined to `uint32_t` but functionally equivalent.
+ */
+typedef uint32_t XXH32_hash_t;
+
+#elif !defined (__VMS) \
+ && (defined (__cplusplus) \
+ || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
+# include <stdint.h>
+ typedef uint32_t XXH32_hash_t;
+
+#else
+# include <limits.h>
+# if UINT_MAX == 0xFFFFFFFFUL
+ typedef unsigned int XXH32_hash_t;
+# elif ULONG_MAX == 0xFFFFFFFFUL
+ typedef unsigned long XXH32_hash_t;
+# else
+# error "unsupported platform: need a 32-bit type"
+# endif
+#endif
+
+/*!
+ * @}
+ *
+ * @defgroup XXH32_family XXH32 family
+ * @ingroup public
+ * Contains functions used in the classic 32-bit xxHash algorithm.
+ *
+ * @note
+ * XXH32 is useful for older platforms, with no or poor 64-bit performance.
+ * Note that the @ref XXH3_family provides competitive speed for both 32-bit
+ * and 64-bit systems, and offers true 64/128 bit hash results.
+ *
+ * @see @ref XXH64_family, @ref XXH3_family : Other xxHash families
+ * @see @ref XXH32_impl for implementation details
+ * @{
+ */
+
+/*!
+ * @brief Calculates the 32-bit hash of @p input using xxHash32.
+ *
+ * Speed on Core 2 Duo @ 3 GHz (single thread, SMHasher benchmark): 5.4 GB/s
+ *
+ * See @ref single_shot_example "Single Shot Example" for an example.
+ *
+ * @param input The block of data to be hashed, at least @p length bytes in size.
+ * @param length The length of @p input, in bytes.
+ * @param seed The 32-bit seed to alter the hash's output predictably.
+ *
+ * @pre
+ * The memory between @p input and @p input + @p length must be valid,
+ * readable, contiguous memory. However, if @p length is `0`, @p input may be
+ * `NULL`. In C++, this also must be *TriviallyCopyable*.
+ *
+ * @return The calculated 32-bit hash value.
+ *
+ * @see
+ * XXH64(), XXH3_64bits_withSeed(), XXH3_128bits_withSeed(), XXH128():
+ * Direct equivalents for the other variants of xxHash.
+ * @see
+ * XXH32_createState(), XXH32_update(), XXH32_digest(): Streaming version.
+ */
+XXH_PUBLIC_API XXH_PUREF XXH32_hash_t XXH32 (const void* input, size_t length, XXH32_hash_t seed);
+
+#ifndef XXH_NO_STREAM
+/*!
+ * Streaming functions generate the xxHash value from an incremental input.
+ * This method is slower than single-call functions, due to state management.
+ * For small inputs, prefer `XXH32()` and `XXH64()`, which are better optimized.
+ *
+ * An XXH state must first be allocated using `XXH*_createState()`.
+ *
+ * Start a new hash by initializing the state with a seed using `XXH*_reset()`.
+ *
+ * Then, feed the hash state by calling `XXH*_update()` as many times as necessary.
+ *
+ * The function returns an error code, with 0 meaning OK, and any other value
+ * meaning there is an error.
+ *
+ * Finally, a hash value can be produced anytime, by using `XXH*_digest()`.
+ * This function returns the nn-bits hash as an int or long long.
+ *
+ * It's still possible to continue inserting input into the hash state after a
+ * digest, and generate new hash values later on by invoking `XXH*_digest()`.
+ *
+ * When done, release the state using `XXH*_freeState()`.
+ *
+ * @see streaming_example at the top of @ref xxhash.h for an example.
+ */
+
+/*!
+ * @typedef struct XXH32_state_s XXH32_state_t
+ * @brief The opaque state struct for the XXH32 streaming API.
+ *
+ * @see XXH32_state_s for details.
+ */
+typedef struct XXH32_state_s XXH32_state_t;
+
+/*!
+ * @brief Allocates an @ref XXH32_state_t.
+ *
+ * Must be freed with XXH32_freeState().
+ * @return An allocated XXH32_state_t on success, `NULL` on failure.
+ */
+XXH_PUBLIC_API XXH_MALLOCF XXH32_state_t* XXH32_createState(void);
+/*!
+ * @brief Frees an @ref XXH32_state_t.
+ *
+ * Must be allocated with XXH32_createState().
+ * @param statePtr A pointer to an @ref XXH32_state_t allocated with @ref XXH32_createState().
+ * @return XXH_OK.
+ */
+XXH_PUBLIC_API XXH_errorcode XXH32_freeState(XXH32_state_t* statePtr);
+/*!
+ * @brief Copies one @ref XXH32_state_t to another.
+ *
+ * @param dst_state The state to copy to.
+ * @param src_state The state to copy from.
+ * @pre
+ * @p dst_state and @p src_state must not be `NULL` and must not overlap.
+ */
+XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* dst_state, const XXH32_state_t* src_state);
+
+/*!
+ * @brief Resets an @ref XXH32_state_t to begin a new hash.
+ *
+ * This function resets and seeds a state. Call it before @ref XXH32_update().
+ *
+ * @param statePtr The state struct to reset.
+ * @param seed The 32-bit seed to alter the hash result predictably.
+ *
+ * @pre
+ * @p statePtr must not be `NULL`.
+ *
+ * @return @ref XXH_OK on success, @ref XXH_ERROR on failure.
+ */
+XXH_PUBLIC_API XXH_errorcode XXH32_reset (XXH32_state_t* statePtr, XXH32_hash_t seed);
+
+/*!
+ * @brief Consumes a block of @p input to an @ref XXH32_state_t.
+ *
+ * Call this to incrementally consume blocks of data.
+ *
+ * @param statePtr The state struct to update.
+ * @param input The block of data to be hashed, at least @p length bytes in size.
+ * @param length The length of @p input, in bytes.
+ *
+ * @pre
+ * @p statePtr must not be `NULL`.
+ * @pre
+ * The memory between @p input and @p input + @p length must be valid,
+ * readable, contiguous memory. However, if @p length is `0`, @p input may be
+ * `NULL`. In C++, this also must be *TriviallyCopyable*.
+ *
+ * @return @ref XXH_OK on success, @ref XXH_ERROR on failure.
+ */
+XXH_PUBLIC_API XXH_errorcode XXH32_update (XXH32_state_t* statePtr, const void* input, size_t length);
+
+/*!
+ * @brief Returns the calculated hash value from an @ref XXH32_state_t.
+ *
+ * @note
+ * Calling XXH32_digest() will not affect @p statePtr, so you can update,
+ * digest, and update again.
+ *
+ * @param statePtr The state struct to calculate the hash from.
+ *
+ * @pre
+ * @p statePtr must not be `NULL`.
+ *
+ * @return The calculated xxHash32 value from that state.
+ */
+XXH_PUBLIC_API XXH_PUREF XXH32_hash_t XXH32_digest (const XXH32_state_t* statePtr);
+#endif /* !XXH_NO_STREAM */
+
+/******* Canonical representation *******/
+
+/*
+ * The default return values from XXH functions are unsigned 32 and 64 bit
+ * integers.
+ * This the simplest and fastest format for further post-processing.
+ *
+ * However, this leaves open the question of what is the order on the byte level,
+ * since little and big endian conventions will store the same number differently.
+ *
+ * The canonical representation settles this issue by mandating big-endian
+ * convention, the same convention as human-readable numbers (large digits first).
+ *
+ * When writing hash values to storage, sending them over a network, or printing
+ * them, it's highly recommended to use the canonical representation to ensure
+ * portability across a wider range of systems, present and future.
+ *
+ * The following functions allow transformation of hash values to and from
+ * canonical format.
+ */
+
+/*!
+ * @brief Canonical (big endian) representation of @ref XXH32_hash_t.
+ */
+typedef struct {
+ unsigned char digest[4]; /*!< Hash bytes, big endian */
+} XXH32_canonical_t;
+
+/*!
+ * @brief Converts an @ref XXH32_hash_t to a big endian @ref XXH32_canonical_t.
+ *
+ * @param dst The @ref XXH32_canonical_t pointer to be stored to.
+ * @param hash The @ref XXH32_hash_t to be converted.
+ *
+ * @pre
+ * @p dst must not be `NULL`.
+ */
+XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t hash);
+
+/*!
+ * @brief Converts an @ref XXH32_canonical_t to a native @ref XXH32_hash_t.
+ *
+ * @param src The @ref XXH32_canonical_t to convert.
+ *
+ * @pre
+ * @p src must not be `NULL`.
+ *
+ * @return The converted hash.
+ */
+XXH_PUBLIC_API XXH_PUREF XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src);
+
+
+/*! @cond Doxygen ignores this part */
+#ifdef __has_attribute
+# define XXH_HAS_ATTRIBUTE(x) __has_attribute(x)
+#else
+# define XXH_HAS_ATTRIBUTE(x) 0
+#endif
+/*! @endcond */
+
+/*! @cond Doxygen ignores this part */
+/*
+ * C23 __STDC_VERSION__ number hasn't been specified yet. For now
+ * leave as `201711L` (C17 + 1).
+ * TODO: Update to correct value when its been specified.
+ */
+#define XXH_C23_VN 201711L
+/*! @endcond */
+
+/*! @cond Doxygen ignores this part */
+/* C-language Attributes are added in C23. */
+#if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= XXH_C23_VN) && defined(__has_c_attribute)
+# define XXH_HAS_C_ATTRIBUTE(x) __has_c_attribute(x)
+#else
+# define XXH_HAS_C_ATTRIBUTE(x) 0
+#endif
+/*! @endcond */
+
+/*! @cond Doxygen ignores this part */
+#if defined(__cplusplus) && defined(__has_cpp_attribute)
+# define XXH_HAS_CPP_ATTRIBUTE(x) __has_cpp_attribute(x)
+#else
+# define XXH_HAS_CPP_ATTRIBUTE(x) 0
+#endif
+/*! @endcond */
+
+/*! @cond Doxygen ignores this part */
+/*
+ * Define XXH_FALLTHROUGH macro for annotating switch case with the 'fallthrough' attribute
+ * introduced in CPP17 and C23.
+ * CPP17 : https://en.cppreference.com/w/cpp/language/attributes/fallthrough
+ * C23 : https://en.cppreference.com/w/c/language/attributes/fallthrough
+ */
+#if XXH_HAS_C_ATTRIBUTE(fallthrough) || XXH_HAS_CPP_ATTRIBUTE(fallthrough)
+# define XXH_FALLTHROUGH [[fallthrough]]
+#elif XXH_HAS_ATTRIBUTE(__fallthrough__)
+# define XXH_FALLTHROUGH __attribute__ ((__fallthrough__))
+#else
+# define XXH_FALLTHROUGH /* fallthrough */
+#endif
+/*! @endcond */
+
+/*! @cond Doxygen ignores this part */
+/*
+ * Define XXH_NOESCAPE for annotated pointers in public API.
+ * https://clang.llvm.org/docs/AttributeReference.html#noescape
+ * As of writing this, only supported by clang.
+ */
+#if XXH_HAS_ATTRIBUTE(noescape)
+# define XXH_NOESCAPE __attribute__((noescape))
+#else
+# define XXH_NOESCAPE
+#endif
+/*! @endcond */
+
+
+/*!
+ * @}
+ * @ingroup public
+ * @{
+ */
+
+#ifndef XXH_NO_LONG_LONG
+/*-**********************************************************************
+* 64-bit hash
+************************************************************************/
+#if defined(XXH_DOXYGEN) /* don't include <stdint.h> */
+/*!
+ * @brief An unsigned 64-bit integer.
+ *
+ * Not necessarily defined to `uint64_t` but functionally equivalent.
+ */
+typedef uint64_t XXH64_hash_t;
+#elif !defined (__VMS) \
+ && (defined (__cplusplus) \
+ || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
+# include <stdint.h>
+ typedef uint64_t XXH64_hash_t;
+#else
+# include <limits.h>
+# if defined(__LP64__) && ULONG_MAX == 0xFFFFFFFFFFFFFFFFULL
+ /* LP64 ABI says uint64_t is unsigned long */
+ typedef unsigned long XXH64_hash_t;
+# else
+ /* the following type must have a width of 64-bit */
+ typedef unsigned long long XXH64_hash_t;
+# endif
+#endif
+
+/*!
+ * @}
+ *
+ * @defgroup XXH64_family XXH64 family
+ * @ingroup public
+ * @{
+ * Contains functions used in the classic 64-bit xxHash algorithm.
+ *
+ * @note
+ * XXH3 provides competitive speed for both 32-bit and 64-bit systems,
+ * and offers true 64/128 bit hash results.
+ * It provides better speed for systems with vector processing capabilities.
+ */
+
+/*!
+ * @brief Calculates the 64-bit hash of @p input using xxHash64.
+ *
+ * This function usually runs faster on 64-bit systems, but slower on 32-bit
+ * systems (see benchmark).
+ *
+ * @param input The block of data to be hashed, at least @p length bytes in size.
+ * @param length The length of @p input, in bytes.
+ * @param seed The 64-bit seed to alter the hash's output predictably.
+ *
+ * @pre
+ * The memory between @p input and @p input + @p length must be valid,
+ * readable, contiguous memory. However, if @p length is `0`, @p input may be
+ * `NULL`. In C++, this also must be *TriviallyCopyable*.
+ *
+ * @return The calculated 64-bit hash.
+ *
+ * @see
+ * XXH32(), XXH3_64bits_withSeed(), XXH3_128bits_withSeed(), XXH128():
+ * Direct equivalents for the other variants of xxHash.
+ * @see
+ * XXH64_createState(), XXH64_update(), XXH64_digest(): Streaming version.
+ */
+XXH_PUBLIC_API XXH_PUREF XXH64_hash_t XXH64(XXH_NOESCAPE const void* input, size_t length, XXH64_hash_t seed);
+
+/******* Streaming *******/
+#ifndef XXH_NO_STREAM
+/*!
+ * @brief The opaque state struct for the XXH64 streaming API.
+ *
+ * @see XXH64_state_s for details.
+ */
+typedef struct XXH64_state_s XXH64_state_t; /* incomplete type */
+
+/*!
+ * @brief Allocates an @ref XXH64_state_t.
+ *
+ * Must be freed with XXH64_freeState().
+ * @return An allocated XXH64_state_t on success, `NULL` on failure.
+ */
+XXH_PUBLIC_API XXH_MALLOCF XXH64_state_t* XXH64_createState(void);
+
+/*!
+ * @brief Frees an @ref XXH64_state_t.
+ *
+ * Must be allocated with XXH64_createState().
+ * @param statePtr A pointer to an @ref XXH64_state_t allocated with @ref XXH64_createState().
+ * @return XXH_OK.
+ */
+XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t* statePtr);
+
+/*!
+ * @brief Copies one @ref XXH64_state_t to another.
+ *
+ * @param dst_state The state to copy to.
+ * @param src_state The state to copy from.
+ * @pre
+ * @p dst_state and @p src_state must not be `NULL` and must not overlap.
+ */
+XXH_PUBLIC_API void XXH64_copyState(XXH_NOESCAPE XXH64_state_t* dst_state, const XXH64_state_t* src_state);
+
+/*!
+ * @brief Resets an @ref XXH64_state_t to begin a new hash.
+ *
+ * This function resets and seeds a state. Call it before @ref XXH64_update().
+ *
+ * @param statePtr The state struct to reset.
+ * @param seed The 64-bit seed to alter the hash result predictably.
+ *
+ * @pre
+ * @p statePtr must not be `NULL`.
+ *
+ * @return @ref XXH_OK on success, @ref XXH_ERROR on failure.
+ */
+XXH_PUBLIC_API XXH_errorcode XXH64_reset (XXH_NOESCAPE XXH64_state_t* statePtr, XXH64_hash_t seed);
+
+/*!
+ * @brief Consumes a block of @p input to an @ref XXH64_state_t.
+ *
+ * Call this to incrementally consume blocks of data.
+ *
+ * @param statePtr The state struct to update.
+ * @param input The block of data to be hashed, at least @p length bytes in size.
+ * @param length The length of @p input, in bytes.
+ *
+ * @pre
+ * @p statePtr must not be `NULL`.
+ * @pre
+ * The memory between @p input and @p input + @p length must be valid,
+ * readable, contiguous memory. However, if @p length is `0`, @p input may be
+ * `NULL`. In C++, this also must be *TriviallyCopyable*.
+ *
+ * @return @ref XXH_OK on success, @ref XXH_ERROR on failure.
+ */
+XXH_PUBLIC_API XXH_errorcode XXH64_update (XXH_NOESCAPE XXH64_state_t* statePtr, XXH_NOESCAPE const void* input, size_t length);
+
+/*!
+ * @brief Returns the calculated hash value from an @ref XXH64_state_t.
+ *
+ * @note
+ * Calling XXH64_digest() will not affect @p statePtr, so you can update,
+ * digest, and update again.
+ *
+ * @param statePtr The state struct to calculate the hash from.
+ *
+ * @pre
+ * @p statePtr must not be `NULL`.
+ *
+ * @return The calculated xxHash64 value from that state.
+ */
+XXH_PUBLIC_API XXH_PUREF XXH64_hash_t XXH64_digest (XXH_NOESCAPE const XXH64_state_t* statePtr);
+#endif /* !XXH_NO_STREAM */
+/******* Canonical representation *******/
+
+/*!
+ * @brief Canonical (big endian) representation of @ref XXH64_hash_t.
+ */
+typedef struct { unsigned char digest[sizeof(XXH64_hash_t)]; } XXH64_canonical_t;
+
+/*!
+ * @brief Converts an @ref XXH64_hash_t to a big endian @ref XXH64_canonical_t.
+ *
+ * @param dst The @ref XXH64_canonical_t pointer to be stored to.
+ * @param hash The @ref XXH64_hash_t to be converted.
+ *
+ * @pre
+ * @p dst must not be `NULL`.
+ */
+XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH_NOESCAPE XXH64_canonical_t* dst, XXH64_hash_t hash);
+
+/*!
+ * @brief Converts an @ref XXH64_canonical_t to a native @ref XXH64_hash_t.
+ *
+ * @param src The @ref XXH64_canonical_t to convert.
+ *
+ * @pre
+ * @p src must not be `NULL`.
+ *
+ * @return The converted hash.
+ */
+XXH_PUBLIC_API XXH_PUREF XXH64_hash_t XXH64_hashFromCanonical(XXH_NOESCAPE const XXH64_canonical_t* src);
+
+#ifndef XXH_NO_XXH3
+
+/*!
+ * @}
+ * ************************************************************************
+ * @defgroup XXH3_family XXH3 family
+ * @ingroup public
+ * @{
+ *
+ * XXH3 is a more recent hash algorithm featuring:
+ * - Improved speed for both small and large inputs
+ * - True 64-bit and 128-bit outputs
+ * - SIMD acceleration
+ * - Improved 32-bit viability
+ *
+ * Speed analysis methodology is explained here:
+ *
+ * https://fastcompression.blogspot.com/2019/03/presenting-xxh3.html
+ *
+ * Compared to XXH64, expect XXH3 to run approximately
+ * ~2x faster on large inputs and >3x faster on small ones,
+ * exact differences vary depending on platform.
+ *
+ * XXH3's speed benefits greatly from SIMD and 64-bit arithmetic,
+ * but does not require it.
+ * Most 32-bit and 64-bit targets that can run XXH32 smoothly can run XXH3
+ * at competitive speeds, even without vector support. Further details are
+ * explained in the implementation.
+ *
+ * XXH3 has a fast scalar implementation, but it also includes accelerated SIMD
+ * implementations for many common platforms:
+ * - AVX512
+ * - AVX2
+ * - SSE2
+ * - ARM NEON
+ * - WebAssembly SIMD128
+ * - POWER8 VSX
+ * - s390x ZVector
+ * This can be controlled via the @ref XXH_VECTOR macro, but it automatically
+ * selects the best version according to predefined macros. For the x86 family, an
+ * automatic runtime dispatcher is included separately in @ref xxh_x86dispatch.c.
+ *
+ * XXH3 implementation is portable:
+ * it has a generic C90 formulation that can be compiled on any platform,
+ * all implementations generate exactly the same hash value on all platforms.
+ * Starting from v0.8.0, it's also labelled "stable", meaning that
+ * any future version will also generate the same hash value.
+ *
+ * XXH3 offers 2 variants, _64bits and _128bits.
+ *
+ * When only 64 bits are needed, prefer invoking the _64bits variant, as it
+ * reduces the amount of mixing, resulting in faster speed on small inputs.
+ * It's also generally simpler to manipulate a scalar return type than a struct.
+ *
+ * The API supports one-shot hashing, streaming mode, and custom secrets.
+ */
+/*-**********************************************************************
+* XXH3 64-bit variant
+************************************************************************/
+
+/*!
+ * @brief 64-bit unseeded variant of XXH3.
+ *
+ * This is equivalent to @ref XXH3_64bits_withSeed() with a seed of 0, however
+ * it may have slightly better performance due to constant propagation of the
+ * defaults.
+ *
+ * @see
+ * XXH32(), XXH64(), XXH3_128bits(): equivalent for the other xxHash algorithms
+ * @see
+ * XXH3_64bits_withSeed(), XXH3_64bits_withSecret(): other seeding variants
+ * @see
+ * XXH3_64bits_reset(), XXH3_64bits_update(), XXH3_64bits_digest(): Streaming version.
+ */
+XXH_PUBLIC_API XXH_PUREF XXH64_hash_t XXH3_64bits(XXH_NOESCAPE const void* input, size_t length);
+
+/*!
+ * @brief 64-bit seeded variant of XXH3
+ *
+ * This variant generates a custom secret on the fly based on default secret
+ * altered using the `seed` value.
+ *
+ * While this operation is decently fast, note that it's not completely free.
+ *
+ * @note
+ * seed == 0 produces the same results as @ref XXH3_64bits().
+ *
+ * @param input The data to hash
+ * @param length The length
+ * @param seed The 64-bit seed to alter the state.
+ */
+XXH_PUBLIC_API XXH_PUREF XXH64_hash_t XXH3_64bits_withSeed(XXH_NOESCAPE const void* input, size_t length, XXH64_hash_t seed);
+
+/*!
+ * The bare minimum size for a custom secret.
+ *
+ * @see
+ * XXH3_64bits_withSecret(), XXH3_64bits_reset_withSecret(),
+ * XXH3_128bits_withSecret(), XXH3_128bits_reset_withSecret().
+ */
+#define XXH3_SECRET_SIZE_MIN 136
+
+/*!
+ * @brief 64-bit variant of XXH3 with a custom "secret".
+ *
+ * It's possible to provide any blob of bytes as a "secret" to generate the hash.
+ * This makes it more difficult for an external actor to prepare an intentional collision.
+ * The main condition is that secretSize *must* be large enough (>= XXH3_SECRET_SIZE_MIN).
+ * However, the quality of the secret impacts the dispersion of the hash algorithm.
+ * Therefore, the secret _must_ look like a bunch of random bytes.
+ * Avoid "trivial" or structured data such as repeated sequences or a text document.
+ * Whenever in doubt about the "randomness" of the blob of bytes,
+ * consider employing "XXH3_generateSecret()" instead (see below).
+ * It will generate a proper high entropy secret derived from the blob of bytes.
+ * Another advantage of using XXH3_generateSecret() is that
+ * it guarantees that all bits within the initial blob of bytes
+ * will impact every bit of the output.
+ * This is not necessarily the case when using the blob of bytes directly
+ * because, when hashing _small_ inputs, only a portion of the secret is employed.
+ */
+XXH_PUBLIC_API XXH_PUREF XXH64_hash_t XXH3_64bits_withSecret(XXH_NOESCAPE const void* data, size_t len, XXH_NOESCAPE const void* secret, size_t secretSize);
+
+
+/******* Streaming *******/
+#ifndef XXH_NO_STREAM
+/*
+ * Streaming requires state maintenance.
+ * This operation costs memory and CPU.
+ * As a consequence, streaming is slower than one-shot hashing.
+ * For better performance, prefer one-shot functions whenever applicable.
+ */
+
+/*!
+ * @brief The state struct for the XXH3 streaming API.
+ *
+ * @see XXH3_state_s for details.
+ */
+typedef struct XXH3_state_s XXH3_state_t;
+XXH_PUBLIC_API XXH_MALLOCF XXH3_state_t* XXH3_createState(void);
+XXH_PUBLIC_API XXH_errorcode XXH3_freeState(XXH3_state_t* statePtr);
+
+/*!
+ * @brief Copies one @ref XXH3_state_t to another.
+ *
+ * @param dst_state The state to copy to.
+ * @param src_state The state to copy from.
+ * @pre
+ * @p dst_state and @p src_state must not be `NULL` and must not overlap.
+ */
+XXH_PUBLIC_API void XXH3_copyState(XXH_NOESCAPE XXH3_state_t* dst_state, XXH_NOESCAPE const XXH3_state_t* src_state);
+
+/*!
+ * @brief Resets an @ref XXH3_state_t to begin a new hash.
+ *
+ * This function resets `statePtr` and generate a secret with default parameters. Call it before @ref XXH3_64bits_update().
+ * Digest will be equivalent to `XXH3_64bits()`.
+ *
+ * @param statePtr The state struct to reset.
+ *
+ * @pre
+ * @p statePtr must not be `NULL`.
+ *
+ * @return @ref XXH_OK on success, @ref XXH_ERROR on failure.
+ *
+ */
+XXH_PUBLIC_API XXH_errorcode XXH3_64bits_reset(XXH_NOESCAPE XXH3_state_t* statePtr);
+
+/*!
+ * @brief Resets an @ref XXH3_state_t with 64-bit seed to begin a new hash.
+ *
+ * This function resets `statePtr` and generate a secret from `seed`. Call it before @ref XXH3_64bits_update().
+ * Digest will be equivalent to `XXH3_64bits_withSeed()`.
+ *
+ * @param statePtr The state struct to reset.
+ * @param seed The 64-bit seed to alter the state.
+ *
+ * @pre
+ * @p statePtr must not be `NULL`.
+ *
+ * @return @ref XXH_OK on success, @ref XXH_ERROR on failure.
+ *
+ */
+XXH_PUBLIC_API XXH_errorcode XXH3_64bits_reset_withSeed(XXH_NOESCAPE XXH3_state_t* statePtr, XXH64_hash_t seed);
+
+/*!
+ * XXH3_64bits_reset_withSecret():
+ * `secret` is referenced, it _must outlive_ the hash streaming session.
+ * Similar to one-shot API, `secretSize` must be >= `XXH3_SECRET_SIZE_MIN`,
+ * and the quality of produced hash values depends on secret's entropy
+ * (secret's content should look like a bunch of random bytes).
+ * When in doubt about the randomness of a candidate `secret`,
+ * consider employing `XXH3_generateSecret()` instead (see below).
+ */
+XXH_PUBLIC_API XXH_errorcode XXH3_64bits_reset_withSecret(XXH_NOESCAPE XXH3_state_t* statePtr, XXH_NOESCAPE const void* secret, size_t secretSize);
+
+/*!
+ * @brief Consumes a block of @p input to an @ref XXH3_state_t.
+ *
+ * Call this to incrementally consume blocks of data.
+ *
+ * @param statePtr The state struct to update.
+ * @param input The block of data to be hashed, at least @p length bytes in size.
+ * @param length The length of @p input, in bytes.
+ *
+ * @pre
+ * @p statePtr must not be `NULL`.
+ * @pre
+ * The memory between @p input and @p input + @p length must be valid,
+ * readable, contiguous memory. However, if @p length is `0`, @p input may be
+ * `NULL`. In C++, this also must be *TriviallyCopyable*.
+ *
+ * @return @ref XXH_OK on success, @ref XXH_ERROR on failure.
+ */
+XXH_PUBLIC_API XXH_errorcode XXH3_64bits_update (XXH_NOESCAPE XXH3_state_t* statePtr, XXH_NOESCAPE const void* input, size_t length);
+
+/*!
+ * @brief Returns the calculated XXH3 64-bit hash value from an @ref XXH3_state_t.
+ *
+ * @note
+ * Calling XXH3_64bits_digest() will not affect @p statePtr, so you can update,
+ * digest, and update again.
+ *
+ * @param statePtr The state struct to calculate the hash from.
+ *
+ * @pre
+ * @p statePtr must not be `NULL`.
+ *
+ * @return The calculated XXH3 64-bit hash value from that state.
+ */
+XXH_PUBLIC_API XXH_PUREF XXH64_hash_t XXH3_64bits_digest (XXH_NOESCAPE const XXH3_state_t* statePtr);
+#endif /* !XXH_NO_STREAM */
+
+/* note : canonical representation of XXH3 is the same as XXH64
+ * since they both produce XXH64_hash_t values */
+
+
+/*-**********************************************************************
+* XXH3 128-bit variant
+************************************************************************/
+
+/*!
+ * @brief The return value from 128-bit hashes.
+ *
+ * Stored in little endian order, although the fields themselves are in native
+ * endianness.
+ */
+typedef struct {
+ XXH64_hash_t low64; /*!< `value & 0xFFFFFFFFFFFFFFFF` */
+ XXH64_hash_t high64; /*!< `value >> 64` */
+} XXH128_hash_t;
+
+/*!
+ * @brief Unseeded 128-bit variant of XXH3
+ *
+ * The 128-bit variant of XXH3 has more strength, but it has a bit of overhead
+ * for shorter inputs.
+ *
+ * This is equivalent to @ref XXH3_128bits_withSeed() with a seed of 0, however
+ * it may have slightly better performance due to constant propagation of the
+ * defaults.
+ *
+ * @see
+ * XXH32(), XXH64(), XXH3_64bits(): equivalent for the other xxHash algorithms
+ * @see
+ * XXH3_128bits_withSeed(), XXH3_128bits_withSecret(): other seeding variants
+ * @see
+ * XXH3_128bits_reset(), XXH3_128bits_update(), XXH3_128bits_digest(): Streaming version.
+ */
+XXH_PUBLIC_API XXH_PUREF XXH128_hash_t XXH3_128bits(XXH_NOESCAPE const void* data, size_t len);
+/*! @brief Seeded 128-bit variant of XXH3. @see XXH3_64bits_withSeed(). */
+XXH_PUBLIC_API XXH_PUREF XXH128_hash_t XXH3_128bits_withSeed(XXH_NOESCAPE const void* data, size_t len, XXH64_hash_t seed);
+/*! @brief Custom secret 128-bit variant of XXH3. @see XXH3_64bits_withSecret(). */
+XXH_PUBLIC_API XXH_PUREF XXH128_hash_t XXH3_128bits_withSecret(XXH_NOESCAPE const void* data, size_t len, XXH_NOESCAPE const void* secret, size_t secretSize);
+
+/******* Streaming *******/
+#ifndef XXH_NO_STREAM
+/*
+ * Streaming requires state maintenance.
+ * This operation costs memory and CPU.
+ * As a consequence, streaming is slower than one-shot hashing.
+ * For better performance, prefer one-shot functions whenever applicable.
+ *
+ * XXH3_128bits uses the same XXH3_state_t as XXH3_64bits().
+ * Use already declared XXH3_createState() and XXH3_freeState().
+ *
+ * All reset and streaming functions have same meaning as their 64-bit counterpart.
+ */
+
+/*!
+ * @brief Resets an @ref XXH3_state_t to begin a new hash.
+ *
+ * This function resets `statePtr` and generate a secret with default parameters. Call it before @ref XXH3_128bits_update().
+ * Digest will be equivalent to `XXH3_128bits()`.
+ *
+ * @param statePtr The state struct to reset.
+ *
+ * @pre
+ * @p statePtr must not be `NULL`.
+ *
+ * @return @ref XXH_OK on success, @ref XXH_ERROR on failure.
+ *
+ */
+XXH_PUBLIC_API XXH_errorcode XXH3_128bits_reset(XXH_NOESCAPE XXH3_state_t* statePtr);
+
+/*!
+ * @brief Resets an @ref XXH3_state_t with 64-bit seed to begin a new hash.
+ *
+ * This function resets `statePtr` and generate a secret from `seed`. Call it before @ref XXH3_128bits_update().
+ * Digest will be equivalent to `XXH3_128bits_withSeed()`.
+ *
+ * @param statePtr The state struct to reset.
+ * @param seed The 64-bit seed to alter the state.
+ *
+ * @pre
+ * @p statePtr must not be `NULL`.
+ *
+ * @return @ref XXH_OK on success, @ref XXH_ERROR on failure.
+ *
+ */
+XXH_PUBLIC_API XXH_errorcode XXH3_128bits_reset_withSeed(XXH_NOESCAPE XXH3_state_t* statePtr, XXH64_hash_t seed);
+/*! @brief Custom secret 128-bit variant of XXH3. @see XXH_64bits_reset_withSecret(). */
+XXH_PUBLIC_API XXH_errorcode XXH3_128bits_reset_withSecret(XXH_NOESCAPE XXH3_state_t* statePtr, XXH_NOESCAPE const void* secret, size_t secretSize);
+
+/*!
+ * @brief Consumes a block of @p input to an @ref XXH3_state_t.
+ *
+ * Call this to incrementally consume blocks of data.
+ *
+ * @param statePtr The state struct to update.
+ * @param input The block of data to be hashed, at least @p length bytes in size.
+ * @param length The length of @p input, in bytes.
+ *
+ * @pre
+ * @p statePtr must not be `NULL`.
+ * @pre
+ * The memory between @p input and @p input + @p length must be valid,
+ * readable, contiguous memory. However, if @p length is `0`, @p input may be
+ * `NULL`. In C++, this also must be *TriviallyCopyable*.
+ *
+ * @return @ref XXH_OK on success, @ref XXH_ERROR on failure.
+ */
+XXH_PUBLIC_API XXH_errorcode XXH3_128bits_update (XXH_NOESCAPE XXH3_state_t* statePtr, XXH_NOESCAPE const void* input, size_t length);
+
+/*!
+ * @brief Returns the calculated XXH3 128-bit hash value from an @ref XXH3_state_t.
+ *
+ * @note
+ * Calling XXH3_128bits_digest() will not affect @p statePtr, so you can update,
+ * digest, and update again.
+ *
+ * @param statePtr The state struct to calculate the hash from.
+ *
+ * @pre
+ * @p statePtr must not be `NULL`.
+ *
+ * @return The calculated XXH3 128-bit hash value from that state.
+ */
+XXH_PUBLIC_API XXH_PUREF XXH128_hash_t XXH3_128bits_digest (XXH_NOESCAPE const XXH3_state_t* statePtr);
+#endif /* !XXH_NO_STREAM */
+
+/* Following helper functions make it possible to compare XXH128_hast_t values.
+ * Since XXH128_hash_t is a structure, this capability is not offered by the language.
+ * Note: For better performance, these functions can be inlined using XXH_INLINE_ALL */
+
+/*!
+ * XXH128_isEqual():
+ * Return: 1 if `h1` and `h2` are equal, 0 if they are not.
+ */
+XXH_PUBLIC_API XXH_PUREF int XXH128_isEqual(XXH128_hash_t h1, XXH128_hash_t h2);
+
+/*!
+ * @brief Compares two @ref XXH128_hash_t
+ * This comparator is compatible with stdlib's `qsort()`/`bsearch()`.
+ *
+ * @return: >0 if *h128_1 > *h128_2
+ * =0 if *h128_1 == *h128_2
+ * <0 if *h128_1 < *h128_2
+ */
+XXH_PUBLIC_API XXH_PUREF int XXH128_cmp(XXH_NOESCAPE const void* h128_1, XXH_NOESCAPE const void* h128_2);
+
+
+/******* Canonical representation *******/
+typedef struct { unsigned char digest[sizeof(XXH128_hash_t)]; } XXH128_canonical_t;
+
+
+/*!
+ * @brief Converts an @ref XXH128_hash_t to a big endian @ref XXH128_canonical_t.
+ *
+ * @param dst The @ref XXH128_canonical_t pointer to be stored to.
+ * @param hash The @ref XXH128_hash_t to be converted.
+ *
+ * @pre
+ * @p dst must not be `NULL`.
+ */
+XXH_PUBLIC_API void XXH128_canonicalFromHash(XXH_NOESCAPE XXH128_canonical_t* dst, XXH128_hash_t hash);
+
+/*!
+ * @brief Converts an @ref XXH128_canonical_t to a native @ref XXH128_hash_t.
+ *
+ * @param src The @ref XXH128_canonical_t to convert.
+ *
+ * @pre
+ * @p src must not be `NULL`.
+ *
+ * @return The converted hash.
+ */
+XXH_PUBLIC_API XXH_PUREF XXH128_hash_t XXH128_hashFromCanonical(XXH_NOESCAPE const XXH128_canonical_t* src);
+
+
+#endif /* !XXH_NO_XXH3 */
+#endif /* XXH_NO_LONG_LONG */
+
+/*!
+ * @}
+ */
+#endif /* XXHASH_H_5627135585666179 */
+
+
+
+#if defined(XXH_STATIC_LINKING_ONLY) && !defined(XXHASH_H_STATIC_13879238742)
+#define XXHASH_H_STATIC_13879238742
+/* ****************************************************************************
+ * This section contains declarations which are not guaranteed to remain stable.
+ * They may change in future versions, becoming incompatible with a different
+ * version of the library.
+ * These declarations should only be used with static linking.
+ * Never use them in association with dynamic linking!
+ ***************************************************************************** */
+
+/*
+ * These definitions are only present to allow static allocation
+ * of XXH states, on stack or in a struct, for example.
+ * Never **ever** access their members directly.
+ */
+
+/*!
+ * @internal
+ * @brief Structure for XXH32 streaming API.
+ *
+ * @note This is only defined when @ref XXH_STATIC_LINKING_ONLY,
+ * @ref XXH_INLINE_ALL, or @ref XXH_IMPLEMENTATION is defined. Otherwise it is
+ * an opaque type. This allows fields to safely be changed.
+ *
+ * Typedef'd to @ref XXH32_state_t.
+ * Do not access the members of this struct directly.
+ * @see XXH64_state_s, XXH3_state_s
+ */
+struct XXH32_state_s {
+ XXH32_hash_t total_len_32; /*!< Total length hashed, modulo 2^32 */
+ XXH32_hash_t large_len; /*!< Whether the hash is >= 16 (handles @ref total_len_32 overflow) */
+ XXH32_hash_t v[4]; /*!< Accumulator lanes */
+ XXH32_hash_t mem32[4]; /*!< Internal buffer for partial reads. Treated as unsigned char[16]. */
+ XXH32_hash_t memsize; /*!< Amount of data in @ref mem32 */
+ XXH32_hash_t reserved; /*!< Reserved field. Do not read nor write to it. */
+}; /* typedef'd to XXH32_state_t */
+
+
+#ifndef XXH_NO_LONG_LONG /* defined when there is no 64-bit support */
+
+/*!
+ * @internal
+ * @brief Structure for XXH64 streaming API.
+ *
+ * @note This is only defined when @ref XXH_STATIC_LINKING_ONLY,
+ * @ref XXH_INLINE_ALL, or @ref XXH_IMPLEMENTATION is defined. Otherwise it is
+ * an opaque type. This allows fields to safely be changed.
+ *
+ * Typedef'd to @ref XXH64_state_t.
+ * Do not access the members of this struct directly.
+ * @see XXH32_state_s, XXH3_state_s
+ */
+struct XXH64_state_s {
+ XXH64_hash_t total_len; /*!< Total length hashed. This is always 64-bit. */
+ XXH64_hash_t v[4]; /*!< Accumulator lanes */
+ XXH64_hash_t mem64[4]; /*!< Internal buffer for partial reads. Treated as unsigned char[32]. */
+ XXH32_hash_t memsize; /*!< Amount of data in @ref mem64 */
+ XXH32_hash_t reserved32; /*!< Reserved field, needed for padding anyways*/
+ XXH64_hash_t reserved64; /*!< Reserved field. Do not read or write to it. */
+}; /* typedef'd to XXH64_state_t */
+
+#ifndef XXH_NO_XXH3
+
+#if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L) /* >= C11 */
+# include <stdalign.h>
+# define XXH_ALIGN(n) alignas(n)
+#elif defined(__cplusplus) && (__cplusplus >= 201103L) /* >= C++11 */
+/* In C++ alignas() is a keyword */
+# define XXH_ALIGN(n) alignas(n)
+#elif defined(__GNUC__)
+# define XXH_ALIGN(n) __attribute__ ((aligned(n)))
+#elif defined(_MSC_VER)
+# define XXH_ALIGN(n) __declspec(align(n))
+#else
+# define XXH_ALIGN(n) /* disabled */
+#endif
+
+/* Old GCC versions only accept the attribute after the type in structures. */
+#if !(defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L)) /* C11+ */ \
+ && ! (defined(__cplusplus) && (__cplusplus >= 201103L)) /* >= C++11 */ \
+ && defined(__GNUC__)
+# define XXH_ALIGN_MEMBER(align, type) type XXH_ALIGN(align)
+#else
+# define XXH_ALIGN_MEMBER(align, type) XXH_ALIGN(align) type
+#endif
+
+/*!
+ * @brief The size of the internal XXH3 buffer.
+ *
+ * This is the optimal update size for incremental hashing.
+ *
+ * @see XXH3_64b_update(), XXH3_128b_update().
+ */
+#define XXH3_INTERNALBUFFER_SIZE 256
+
+/*!
+ * @internal
+ * @brief Default size of the secret buffer (and @ref XXH3_kSecret).
+ *
+ * This is the size used in @ref XXH3_kSecret and the seeded functions.
+ *
+ * Not to be confused with @ref XXH3_SECRET_SIZE_MIN.
+ */
+#define XXH3_SECRET_DEFAULT_SIZE 192
+
+/*!
+ * @internal
+ * @brief Structure for XXH3 streaming API.
+ *
+ * @note This is only defined when @ref XXH_STATIC_LINKING_ONLY,
+ * @ref XXH_INLINE_ALL, or @ref XXH_IMPLEMENTATION is defined.
+ * Otherwise it is an opaque type.
+ * Never use this definition in combination with dynamic library.
+ * This allows fields to safely be changed in the future.
+ *
+ * @note ** This structure has a strict alignment requirement of 64 bytes!! **
+ * Do not allocate this with `malloc()` or `new`,
+ * it will not be sufficiently aligned.
+ * Use @ref XXH3_createState() and @ref XXH3_freeState(), or stack allocation.
+ *
+ * Typedef'd to @ref XXH3_state_t.
+ * Do never access the members of this struct directly.
+ *
+ * @see XXH3_INITSTATE() for stack initialization.
+ * @see XXH3_createState(), XXH3_freeState().
+ * @see XXH32_state_s, XXH64_state_s
+ */
+struct XXH3_state_s {
+ XXH_ALIGN_MEMBER(64, XXH64_hash_t acc[8]);
+ /*!< The 8 accumulators. See @ref XXH32_state_s::v and @ref XXH64_state_s::v */
+ XXH_ALIGN_MEMBER(64, unsigned char customSecret[XXH3_SECRET_DEFAULT_SIZE]);
+ /*!< Used to store a custom secret generated from a seed. */
+ XXH_ALIGN_MEMBER(64, unsigned char buffer[XXH3_INTERNALBUFFER_SIZE]);
+ /*!< The internal buffer. @see XXH32_state_s::mem32 */
+ XXH32_hash_t bufferedSize;
+ /*!< The amount of memory in @ref buffer, @see XXH32_state_s::memsize */
+ XXH32_hash_t useSeed;
+ /*!< Reserved field. Needed for padding on 64-bit. */
+ size_t nbStripesSoFar;
+ /*!< Number or stripes processed. */
+ XXH64_hash_t totalLen;
+ /*!< Total length hashed. 64-bit even on 32-bit targets. */
+ size_t nbStripesPerBlock;
+ /*!< Number of stripes per block. */
+ size_t secretLimit;
+ /*!< Size of @ref customSecret or @ref extSecret */
+ XXH64_hash_t seed;
+ /*!< Seed for _withSeed variants. Must be zero otherwise, @see XXH3_INITSTATE() */
+ XXH64_hash_t reserved64;
+ /*!< Reserved field. */
+ const unsigned char* extSecret;
+ /*!< Reference to an external secret for the _withSecret variants, NULL
+ * for other variants. */
+ /* note: there may be some padding at the end due to alignment on 64 bytes */
+}; /* typedef'd to XXH3_state_t */
+
+#undef XXH_ALIGN_MEMBER
+
+/*!
+ * @brief Initializes a stack-allocated `XXH3_state_s`.
+ *
+ * When the @ref XXH3_state_t structure is merely emplaced on stack,
+ * it should be initialized with XXH3_INITSTATE() or a memset()
+ * in case its first reset uses XXH3_NNbits_reset_withSeed().
+ * This init can be omitted if the first reset uses default or _withSecret mode.
+ * This operation isn't necessary when the state is created with XXH3_createState().
+ * Note that this doesn't prepare the state for a streaming operation,
+ * it's still necessary to use XXH3_NNbits_reset*() afterwards.
+ */
+#define XXH3_INITSTATE(XXH3_state_ptr) \
+ do { \
+ XXH3_state_t* tmp_xxh3_state_ptr = (XXH3_state_ptr); \
+ tmp_xxh3_state_ptr->seed = 0; \
+ tmp_xxh3_state_ptr->extSecret = NULL; \
+ } while(0)
+
+
+/*!
+ * simple alias to pre-selected XXH3_128bits variant
+ */
+XXH_PUBLIC_API XXH_PUREF XXH128_hash_t XXH128(XXH_NOESCAPE const void* data, size_t len, XXH64_hash_t seed);
+
+
+/* === Experimental API === */
+/* Symbols defined below must be considered tied to a specific library version. */
+
+/*!
+ * XXH3_generateSecret():
+ *
+ * Derive a high-entropy secret from any user-defined content, named customSeed.
+ * The generated secret can be used in combination with `*_withSecret()` functions.
+ * The `_withSecret()` variants are useful to provide a higher level of protection
+ * than 64-bit seed, as it becomes much more difficult for an external actor to
+ * guess how to impact the calculation logic.
+ *
+ * The function accepts as input a custom seed of any length and any content,
+ * and derives from it a high-entropy secret of length @p secretSize into an
+ * already allocated buffer @p secretBuffer.
+ *
+ * The generated secret can then be used with any `*_withSecret()` variant.
+ * The functions @ref XXH3_128bits_withSecret(), @ref XXH3_64bits_withSecret(),
+ * @ref XXH3_128bits_reset_withSecret() and @ref XXH3_64bits_reset_withSecret()
+ * are part of this list. They all accept a `secret` parameter
+ * which must be large enough for implementation reasons (>= @ref XXH3_SECRET_SIZE_MIN)
+ * _and_ feature very high entropy (consist of random-looking bytes).
+ * These conditions can be a high bar to meet, so @ref XXH3_generateSecret() can
+ * be employed to ensure proper quality.
+ *
+ * @p customSeed can be anything. It can have any size, even small ones,
+ * and its content can be anything, even "poor entropy" sources such as a bunch
+ * of zeroes. The resulting `secret` will nonetheless provide all required qualities.
+ *
+ * @pre
+ * - @p secretSize must be >= @ref XXH3_SECRET_SIZE_MIN
+ * - When @p customSeedSize > 0, supplying NULL as customSeed is undefined behavior.
+ *
+ * Example code:
+ * @code{.c}
+ * #include <stdio.h>
+ * #include <stdlib.h>
+ * #include <string.h>
+ * #define XXH_STATIC_LINKING_ONLY // expose unstable API
+ * #include "xxhash.h"
+ * // Hashes argv[2] using the entropy from argv[1].
+ * int main(int argc, char* argv[])
+ * {
+ * char secret[XXH3_SECRET_SIZE_MIN];
+ * if (argv != 3) { return 1; }
+ * XXH3_generateSecret(secret, sizeof(secret), argv[1], strlen(argv[1]));
+ * XXH64_hash_t h = XXH3_64bits_withSecret(
+ * argv[2], strlen(argv[2]),
+ * secret, sizeof(secret)
+ * );
+ * printf("%016llx\n", (unsigned long long) h);
+ * }
+ * @endcode
+ */
+XXH_PUBLIC_API XXH_errorcode XXH3_generateSecret(XXH_NOESCAPE void* secretBuffer, size_t secretSize, XXH_NOESCAPE const void* customSeed, size_t customSeedSize);
+
+/*!
+ * @brief Generate the same secret as the _withSeed() variants.
+ *
+ * The generated secret can be used in combination with
+ *`*_withSecret()` and `_withSecretandSeed()` variants.
+ *
+ * Example C++ `std::string` hash class:
+ * @code{.cpp}
+ * #include <string>
+ * #define XXH_STATIC_LINKING_ONLY // expose unstable API
+ * #include "xxhash.h"
+ * // Slow, seeds each time
+ * class HashSlow {
+ * XXH64_hash_t seed;
+ * public:
+ * HashSlow(XXH64_hash_t s) : seed{s} {}
+ * size_t operator()(const std::string& x) const {
+ * return size_t{XXH3_64bits_withSeed(x.c_str(), x.length(), seed)};
+ * }
+ * };
+ * // Fast, caches the seeded secret for future uses.
+ * class HashFast {
+ * unsigned char secret[XXH3_SECRET_SIZE_MIN];
+ * public:
+ * HashFast(XXH64_hash_t s) {
+ * XXH3_generateSecret_fromSeed(secret, seed);
+ * }
+ * size_t operator()(const std::string& x) const {
+ * return size_t{
+ * XXH3_64bits_withSecret(x.c_str(), x.length(), secret, sizeof(secret))
+ * };
+ * }
+ * };
+ * @endcode
+ * @param secretBuffer A writable buffer of @ref XXH3_SECRET_SIZE_MIN bytes
+ * @param seed The seed to seed the state.
+ */
+XXH_PUBLIC_API void XXH3_generateSecret_fromSeed(XXH_NOESCAPE void* secretBuffer, XXH64_hash_t seed);
+
+/*!
+ * These variants generate hash values using either
+ * @p seed for "short" keys (< XXH3_MIDSIZE_MAX = 240 bytes)
+ * or @p secret for "large" keys (>= XXH3_MIDSIZE_MAX).
+ *
+ * This generally benefits speed, compared to `_withSeed()` or `_withSecret()`.
+ * `_withSeed()` has to generate the secret on the fly for "large" keys.
+ * It's fast, but can be perceptible for "not so large" keys (< 1 KB).
+ * `_withSecret()` has to generate the masks on the fly for "small" keys,
+ * which requires more instructions than _withSeed() variants.
+ * Therefore, _withSecretandSeed variant combines the best of both worlds.
+ *
+ * When @p secret has been generated by XXH3_generateSecret_fromSeed(),
+ * this variant produces *exactly* the same results as `_withSeed()` variant,
+ * hence offering only a pure speed benefit on "large" input,
+ * by skipping the need to regenerate the secret for every large input.
+ *
+ * Another usage scenario is to hash the secret to a 64-bit hash value,
+ * for example with XXH3_64bits(), which then becomes the seed,
+ * and then employ both the seed and the secret in _withSecretandSeed().
+ * On top of speed, an added benefit is that each bit in the secret
+ * has a 50% chance to swap each bit in the output, via its impact to the seed.
+ *
+ * This is not guaranteed when using the secret directly in "small data" scenarios,
+ * because only portions of the secret are employed for small data.
+ */
+XXH_PUBLIC_API XXH_PUREF XXH64_hash_t
+XXH3_64bits_withSecretandSeed(XXH_NOESCAPE const void* data, size_t len,
+ XXH_NOESCAPE const void* secret, size_t secretSize,
+ XXH64_hash_t seed);
+/*! @copydoc XXH3_64bits_withSecretandSeed() */
+XXH_PUBLIC_API XXH_PUREF XXH128_hash_t
+XXH3_128bits_withSecretandSeed(XXH_NOESCAPE const void* input, size_t length,
+ XXH_NOESCAPE const void* secret, size_t secretSize,
+ XXH64_hash_t seed64);
+#ifndef XXH_NO_STREAM
+/*! @copydoc XXH3_64bits_withSecretandSeed() */
+XXH_PUBLIC_API XXH_errorcode
+XXH3_64bits_reset_withSecretandSeed(XXH_NOESCAPE XXH3_state_t* statePtr,
+ XXH_NOESCAPE const void* secret, size_t secretSize,
+ XXH64_hash_t seed64);
+/*! @copydoc XXH3_64bits_withSecretandSeed() */
+XXH_PUBLIC_API XXH_errorcode
+XXH3_128bits_reset_withSecretandSeed(XXH_NOESCAPE XXH3_state_t* statePtr,
+ XXH_NOESCAPE const void* secret, size_t secretSize,
+ XXH64_hash_t seed64);
+#endif /* !XXH_NO_STREAM */
+
+#endif /* !XXH_NO_XXH3 */
+#endif /* XXH_NO_LONG_LONG */
+#if defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API)
+# define XXH_IMPLEMENTATION
+#endif
+
+#endif /* defined(XXH_STATIC_LINKING_ONLY) && !defined(XXHASH_H_STATIC_13879238742) */
+
+
+/* ======================================================================== */
+/* ======================================================================== */
+/* ======================================================================== */
+
+
+/*-**********************************************************************
+ * xxHash implementation
+ *-**********************************************************************
+ * xxHash's implementation used to be hosted inside xxhash.c.
+ *
+ * However, inlining requires implementation to be visible to the compiler,
+ * hence be included alongside the header.
+ * Previously, implementation was hosted inside xxhash.c,
+ * which was then #included when inlining was activated.
+ * This construction created issues with a few build and install systems,
+ * as it required xxhash.c to be stored in /include directory.
+ *
+ * xxHash implementation is now directly integrated within xxhash.h.
+ * As a consequence, xxhash.c is no longer needed in /include.
+ *
+ * xxhash.c is still available and is still useful.
+ * In a "normal" setup, when xxhash is not inlined,
+ * xxhash.h only exposes the prototypes and public symbols,
+ * while xxhash.c can be built into an object file xxhash.o
+ * which can then be linked into the final binary.
+ ************************************************************************/
+
+#if ( defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API) \
+ || defined(XXH_IMPLEMENTATION) ) && !defined(XXH_IMPLEM_13a8737387)
+# define XXH_IMPLEM_13a8737387
+
+/* *************************************
+* Tuning parameters
+***************************************/
+
+/*!
+ * @defgroup tuning Tuning parameters
+ * @{
+ *
+ * Various macros to control xxHash's behavior.
+ */
+#ifdef XXH_DOXYGEN
+/*!
+ * @brief Define this to disable 64-bit code.
+ *
+ * Useful if only using the @ref XXH32_family and you have a strict C90 compiler.
+ */
+# define XXH_NO_LONG_LONG
+# undef XXH_NO_LONG_LONG /* don't actually */
+/*!
+ * @brief Controls how unaligned memory is accessed.
+ *
+ * By default, access to unaligned memory is controlled by `memcpy()`, which is
+ * safe and portable.
+ *
+ * Unfortunately, on some target/compiler combinations, the generated assembly
+ * is sub-optimal.
+ *
+ * The below switch allow selection of a different access method
+ * in the search for improved performance.
+ *
+ * @par Possible options:
+ *
+ * - `XXH_FORCE_MEMORY_ACCESS=0` (default): `memcpy`
+ * @par
+ * Use `memcpy()`. Safe and portable. Note that most modern compilers will
+ * eliminate the function call and treat it as an unaligned access.
+ *
+ * - `XXH_FORCE_MEMORY_ACCESS=1`: `__attribute__((aligned(1)))`
+ * @par
+ * Depends on compiler extensions and is therefore not portable.
+ * This method is safe _if_ your compiler supports it,
+ * and *generally* as fast or faster than `memcpy`.
+ *
+ * - `XXH_FORCE_MEMORY_ACCESS=2`: Direct cast
+ * @par
+ * Casts directly and dereferences. This method doesn't depend on the
+ * compiler, but it violates the C standard as it directly dereferences an
+ * unaligned pointer. It can generate buggy code on targets which do not
+ * support unaligned memory accesses, but in some circumstances, it's the
+ * only known way to get the most performance.
+ *
+ * - `XXH_FORCE_MEMORY_ACCESS=3`: Byteshift
+ * @par
+ * Also portable. This can generate the best code on old compilers which don't
+ * inline small `memcpy()` calls, and it might also be faster on big-endian
+ * systems which lack a native byteswap instruction. However, some compilers
+ * will emit literal byteshifts even if the target supports unaligned access.
+ *
+ *
+ * @warning
+ * Methods 1 and 2 rely on implementation-defined behavior. Use these with
+ * care, as what works on one compiler/platform/optimization level may cause
+ * another to read garbage data or even crash.
+ *
+ * See https://fastcompression.blogspot.com/2015/08/accessing-unaligned-memory.html for details.
+ *
+ * Prefer these methods in priority order (0 > 3 > 1 > 2)
+ */
+# define XXH_FORCE_MEMORY_ACCESS 0
+
+/*!
+ * @def XXH_SIZE_OPT
+ * @brief Controls how much xxHash optimizes for size.
+ *
+ * xxHash, when compiled, tends to result in a rather large binary size. This
+ * is mostly due to heavy usage to forced inlining and constant folding of the
+ * @ref XXH3_family to increase performance.
+ *
+ * However, some developers prefer size over speed. This option can
+ * significantly reduce the size of the generated code. When using the `-Os`
+ * or `-Oz` options on GCC or Clang, this is defined to 1 by default,
+ * otherwise it is defined to 0.
+ *
+ * Most of these size optimizations can be controlled manually.
+ *
+ * This is a number from 0-2.
+ * - `XXH_SIZE_OPT` == 0: Default. xxHash makes no size optimizations. Speed
+ * comes first.
+ * - `XXH_SIZE_OPT` == 1: Default for `-Os` and `-Oz`. xxHash is more
+ * conservative and disables hacks that increase code size. It implies the
+ * options @ref XXH_NO_INLINE_HINTS == 1, @ref XXH_FORCE_ALIGN_CHECK == 0,
+ * and @ref XXH3_NEON_LANES == 8 if they are not already defined.
+ * - `XXH_SIZE_OPT` == 2: xxHash tries to make itself as small as possible.
+ * Performance may cry. For example, the single shot functions just use the
+ * streaming API.
+ */
+# define XXH_SIZE_OPT 0
+
+/*!
+ * @def XXH_FORCE_ALIGN_CHECK
+ * @brief If defined to non-zero, adds a special path for aligned inputs (XXH32()
+ * and XXH64() only).
+ *
+ * This is an important performance trick for architectures without decent
+ * unaligned memory access performance.
+ *
+ * It checks for input alignment, and when conditions are met, uses a "fast
+ * path" employing direct 32-bit/64-bit reads, resulting in _dramatically
+ * faster_ read speed.
+ *
+ * The check costs one initial branch per hash, which is generally negligible,
+ * but not zero.
+ *
+ * Moreover, it's not useful to generate an additional code path if memory
+ * access uses the same instruction for both aligned and unaligned
+ * addresses (e.g. x86 and aarch64).
+ *
+ * In these cases, the alignment check can be removed by setting this macro to 0.
+ * Then the code will always use unaligned memory access.
+ * Align check is automatically disabled on x86, x64, ARM64, and some ARM chips
+ * which are platforms known to offer good unaligned memory accesses performance.
+ *
+ * It is also disabled by default when @ref XXH_SIZE_OPT >= 1.
+ *
+ * This option does not affect XXH3 (only XXH32 and XXH64).
+ */
+# define XXH_FORCE_ALIGN_CHECK 0
+
+/*!
+ * @def XXH_NO_INLINE_HINTS
+ * @brief When non-zero, sets all functions to `static`.
+ *
+ * By default, xxHash tries to force the compiler to inline almost all internal
+ * functions.
+ *
+ * This can usually improve performance due to reduced jumping and improved
+ * constant folding, but significantly increases the size of the binary which
+ * might not be favorable.
+ *
+ * Additionally, sometimes the forced inlining can be detrimental to performance,
+ * depending on the architecture.
+ *
+ * XXH_NO_INLINE_HINTS marks all internal functions as static, giving the
+ * compiler full control on whether to inline or not.
+ *
+ * When not optimizing (-O0), using `-fno-inline` with GCC or Clang, or if
+ * @ref XXH_SIZE_OPT >= 1, this will automatically be defined.
+ */
+# define XXH_NO_INLINE_HINTS 0
+
+/*!
+ * @def XXH3_INLINE_SECRET
+ * @brief Determines whether to inline the XXH3 withSecret code.
+ *
+ * When the secret size is known, the compiler can improve the performance
+ * of XXH3_64bits_withSecret() and XXH3_128bits_withSecret().
+ *
+ * However, if the secret size is not known, it doesn't have any benefit. This
+ * happens when xxHash is compiled into a global symbol. Therefore, if
+ * @ref XXH_INLINE_ALL is *not* defined, this will be defined to 0.
+ *
+ * Additionally, this defaults to 0 on GCC 12+, which has an issue with function pointers
+ * that are *sometimes* force inline on -Og, and it is impossible to automatically
+ * detect this optimization level.
+ */
+# define XXH3_INLINE_SECRET 0
+
+/*!
+ * @def XXH32_ENDJMP
+ * @brief Whether to use a jump for `XXH32_finalize`.
+ *
+ * For performance, `XXH32_finalize` uses multiple branches in the finalizer.
+ * This is generally preferable for performance,
+ * but depending on exact architecture, a jmp may be preferable.
+ *
+ * This setting is only possibly making a difference for very small inputs.
+ */
+# define XXH32_ENDJMP 0
+
+/*!
+ * @internal
+ * @brief Redefines old internal names.
+ *
+ * For compatibility with code that uses xxHash's internals before the names
+ * were changed to improve namespacing. There is no other reason to use this.
+ */
+# define XXH_OLD_NAMES
+# undef XXH_OLD_NAMES /* don't actually use, it is ugly. */
+
+/*!
+ * @def XXH_NO_STREAM
+ * @brief Disables the streaming API.
+ *
+ * When xxHash is not inlined and the streaming functions are not used, disabling
+ * the streaming functions can improve code size significantly, especially with
+ * the @ref XXH3_family which tends to make constant folded copies of itself.
+ */
+# define XXH_NO_STREAM
+# undef XXH_NO_STREAM /* don't actually */
+#endif /* XXH_DOXYGEN */
+/*!
+ * @}
+ */
+
+#ifndef XXH_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */
+ /* prefer __packed__ structures (method 1) for GCC
+ * < ARMv7 with unaligned access (e.g. Raspbian armhf) still uses byte shifting, so we use memcpy
+ * which for some reason does unaligned loads. */
+# if defined(__GNUC__) && !(defined(__ARM_ARCH) && __ARM_ARCH < 7 && defined(__ARM_FEATURE_UNALIGNED))
+# define XXH_FORCE_MEMORY_ACCESS 1
+# endif
+#endif
+
+#ifndef XXH_SIZE_OPT
+ /* default to 1 for -Os or -Oz */
+# if (defined(__GNUC__) || defined(__clang__)) && defined(__OPTIMIZE_SIZE__)
+# define XXH_SIZE_OPT 1
+# else
+# define XXH_SIZE_OPT 0
+# endif
+#endif
+
+#ifndef XXH_FORCE_ALIGN_CHECK /* can be defined externally */
+ /* don't check on sizeopt, x86, aarch64, or arm when unaligned access is available */
+# if XXH_SIZE_OPT >= 1 || \
+ defined(__i386) || defined(__x86_64__) || defined(__aarch64__) || defined(__ARM_FEATURE_UNALIGNED) \
+ || defined(_M_IX86) || defined(_M_X64) || defined(_M_ARM64) || defined(_M_ARM) /* visual */
+# define XXH_FORCE_ALIGN_CHECK 0
+# else
+# define XXH_FORCE_ALIGN_CHECK 1
+# endif
+#endif
+
+#ifndef XXH_NO_INLINE_HINTS
+# if XXH_SIZE_OPT >= 1 || defined(__NO_INLINE__) /* -O0, -fno-inline */
+# define XXH_NO_INLINE_HINTS 1
+# else
+# define XXH_NO_INLINE_HINTS 0
+# endif
+#endif
+
+#ifndef XXH3_INLINE_SECRET
+# if (defined(__GNUC__) && !defined(__clang__) && __GNUC__ >= 12) \
+ || !defined(XXH_INLINE_ALL)
+# define XXH3_INLINE_SECRET 0
+# else
+# define XXH3_INLINE_SECRET 1
+# endif
+#endif
+
+#ifndef XXH32_ENDJMP
+/* generally preferable for performance */
+# define XXH32_ENDJMP 0
+#endif
+
+/*!
+ * @defgroup impl Implementation
+ * @{
+ */
+
+
+/* *************************************
+* Includes & Memory related functions
+***************************************/
+#if defined(XXH_NO_STREAM)
+/* nothing */
+#elif defined(XXH_NO_STDLIB)
+
+/* When requesting to disable any mention of stdlib,
+ * the library loses the ability to invoked malloc / free.
+ * In practice, it means that functions like `XXH*_createState()`
+ * will always fail, and return NULL.
+ * This flag is useful in situations where
+ * xxhash.h is integrated into some kernel, embedded or limited environment
+ * without access to dynamic allocation.
+ */
+
+static XXH_CONSTF void* XXH_malloc(size_t s) { (void)s; return NULL; }
+static void XXH_free(void* p) { (void)p; }
+
+#else
+
+/*
+ * Modify the local functions below should you wish to use
+ * different memory routines for malloc() and free()
+ */
+#include <stdlib.h>
+
+/*!
+ * @internal
+ * @brief Modify this function to use a different routine than malloc().
+ */
+static XXH_MALLOCF void* XXH_malloc(size_t s) { return malloc(s); }
+
+/*!
+ * @internal
+ * @brief Modify this function to use a different routine than free().
+ */
+static void XXH_free(void* p) { free(p); }
+
+#endif /* XXH_NO_STDLIB */
+
+#include <string.h>
+
+/*!
+ * @internal
+ * @brief Modify this function to use a different routine than memcpy().
+ */
+static void* XXH_memcpy(void* dest, const void* src, size_t size)
+{
+ return memcpy(dest,src,size);
+}
+
+#include <limits.h> /* ULLONG_MAX */
+
+
+/* *************************************
+* Compiler Specific Options
+***************************************/
+#ifdef _MSC_VER /* Visual Studio warning fix */
+# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
+#endif
+
+#if XXH_NO_INLINE_HINTS /* disable inlining hints */
+# if defined(__GNUC__) || defined(__clang__)
+# define XXH_FORCE_INLINE static __attribute__((unused))
+# else
+# define XXH_FORCE_INLINE static
+# endif
+# define XXH_NO_INLINE static
+/* enable inlining hints */
+#elif defined(__GNUC__) || defined(__clang__)
+# define XXH_FORCE_INLINE static __inline__ __attribute__((always_inline, unused))
+# define XXH_NO_INLINE static __attribute__((noinline))
+#elif defined(_MSC_VER) /* Visual Studio */
+# define XXH_FORCE_INLINE static __forceinline
+# define XXH_NO_INLINE static __declspec(noinline)
+#elif defined (__cplusplus) \
+ || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L)) /* C99 */
+# define XXH_FORCE_INLINE static inline
+# define XXH_NO_INLINE static
+#else
+# define XXH_FORCE_INLINE static
+# define XXH_NO_INLINE static
+#endif
+
+#if XXH3_INLINE_SECRET
+# define XXH3_WITH_SECRET_INLINE XXH_FORCE_INLINE
+#else
+# define XXH3_WITH_SECRET_INLINE XXH_NO_INLINE
+#endif
+
+
+/* *************************************
+* Debug
+***************************************/
+/*!
+ * @ingroup tuning
+ * @def XXH_DEBUGLEVEL
+ * @brief Sets the debugging level.
+ *
+ * XXH_DEBUGLEVEL is expected to be defined externally, typically via the
+ * compiler's command line options. The value must be a number.
+ */
+#ifndef XXH_DEBUGLEVEL
+# ifdef DEBUGLEVEL /* backwards compat */
+# define XXH_DEBUGLEVEL DEBUGLEVEL
+# else
+# define XXH_DEBUGLEVEL 0
+# endif
+#endif
+
+#if (XXH_DEBUGLEVEL>=1)
+# include <assert.h> /* note: can still be disabled with NDEBUG */
+# define XXH_ASSERT(c) assert(c)
+#else
+# if defined(__INTEL_COMPILER)
+# define XXH_ASSERT(c) XXH_ASSUME((unsigned char) (c))
+# else
+# define XXH_ASSERT(c) XXH_ASSUME(c)
+# endif
+#endif
+
+/* note: use after variable declarations */
+#ifndef XXH_STATIC_ASSERT
+# if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L) /* C11 */
+# define XXH_STATIC_ASSERT_WITH_MESSAGE(c,m) do { _Static_assert((c),m); } while(0)
+# elif defined(__cplusplus) && (__cplusplus >= 201103L) /* C++11 */
+# define XXH_STATIC_ASSERT_WITH_MESSAGE(c,m) do { static_assert((c),m); } while(0)
+# else
+# define XXH_STATIC_ASSERT_WITH_MESSAGE(c,m) do { struct xxh_sa { char x[(c) ? 1 : -1]; }; } while(0)
+# endif
+# define XXH_STATIC_ASSERT(c) XXH_STATIC_ASSERT_WITH_MESSAGE((c),#c)
+#endif
+
+/*!
+ * @internal
+ * @def XXH_COMPILER_GUARD(var)
+ * @brief Used to prevent unwanted optimizations for @p var.
+ *
+ * It uses an empty GCC inline assembly statement with a register constraint
+ * which forces @p var into a general purpose register (eg eax, ebx, ecx
+ * on x86) and marks it as modified.
+ *
+ * This is used in a few places to avoid unwanted autovectorization (e.g.
+ * XXH32_round()). All vectorization we want is explicit via intrinsics,
+ * and _usually_ isn't wanted elsewhere.
+ *
+ * We also use it to prevent unwanted constant folding for AArch64 in
+ * XXH3_initCustomSecret_scalar().
+ */
+#if defined(__GNUC__) || defined(__clang__)
+# define XXH_COMPILER_GUARD(var) __asm__("" : "+r" (var))
+#else
+# define XXH_COMPILER_GUARD(var) ((void)0)
+#endif
+
+/* Specifically for NEON vectors which use the "w" constraint, on
+ * Clang. */
+#if defined(__clang__) && defined(__ARM_ARCH) && !defined(__wasm__)
+# define XXH_COMPILER_GUARD_CLANG_NEON(var) __asm__("" : "+w" (var))
+#else
+# define XXH_COMPILER_GUARD_CLANG_NEON(var) ((void)0)
+#endif
+
+/* *************************************
+* Basic Types
+***************************************/
+#if !defined (__VMS) \
+ && (defined (__cplusplus) \
+ || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
+# include <stdint.h>
+ typedef uint8_t xxh_u8;
+#else
+ typedef unsigned char xxh_u8;
+#endif
+typedef XXH32_hash_t xxh_u32;
+
+#ifdef XXH_OLD_NAMES
+# warning "XXH_OLD_NAMES is planned to be removed starting v0.9. If the program depends on it, consider moving away from it by employing newer type names directly"
+# define BYTE xxh_u8
+# define U8 xxh_u8
+# define U32 xxh_u32
+#endif
+
+/* *** Memory access *** */
+
+/*!
+ * @internal
+ * @fn xxh_u32 XXH_read32(const void* ptr)
+ * @brief Reads an unaligned 32-bit integer from @p ptr in native endianness.
+ *
+ * Affected by @ref XXH_FORCE_MEMORY_ACCESS.
+ *
+ * @param ptr The pointer to read from.
+ * @return The 32-bit native endian integer from the bytes at @p ptr.
+ */
+
+/*!
+ * @internal
+ * @fn xxh_u32 XXH_readLE32(const void* ptr)
+ * @brief Reads an unaligned 32-bit little endian integer from @p ptr.
+ *
+ * Affected by @ref XXH_FORCE_MEMORY_ACCESS.
+ *
+ * @param ptr The pointer to read from.
+ * @return The 32-bit little endian integer from the bytes at @p ptr.
+ */
+
+/*!
+ * @internal
+ * @fn xxh_u32 XXH_readBE32(const void* ptr)
+ * @brief Reads an unaligned 32-bit big endian integer from @p ptr.
+ *
+ * Affected by @ref XXH_FORCE_MEMORY_ACCESS.
+ *
+ * @param ptr The pointer to read from.
+ * @return The 32-bit big endian integer from the bytes at @p ptr.
+ */
+
+/*!
+ * @internal
+ * @fn xxh_u32 XXH_readLE32_align(const void* ptr, XXH_alignment align)
+ * @brief Like @ref XXH_readLE32(), but has an option for aligned reads.
+ *
+ * Affected by @ref XXH_FORCE_MEMORY_ACCESS.
+ * Note that when @ref XXH_FORCE_ALIGN_CHECK == 0, the @p align parameter is
+ * always @ref XXH_alignment::XXH_unaligned.
+ *
+ * @param ptr The pointer to read from.
+ * @param align Whether @p ptr is aligned.
+ * @pre
+ * If @p align == @ref XXH_alignment::XXH_aligned, @p ptr must be 4 byte
+ * aligned.
+ * @return The 32-bit little endian integer from the bytes at @p ptr.
+ */
+
+#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==3))
+/*
+ * Manual byteshift. Best for old compilers which don't inline memcpy.
+ * We actually directly use XXH_readLE32 and XXH_readBE32.
+ */
+#elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2))
+
+/*
+ * Force direct memory access. Only works on CPU which support unaligned memory
+ * access in hardware.
+ */
+static xxh_u32 XXH_read32(const void* memPtr) { return *(const xxh_u32*) memPtr; }
+
+#elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1))
+
+/*
+ * __attribute__((aligned(1))) is supported by gcc and clang. Originally the
+ * documentation claimed that it only increased the alignment, but actually it
+ * can decrease it on gcc, clang, and icc:
+ * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=69502,
+ * https://gcc.godbolt.org/z/xYez1j67Y.
+ */
+#ifdef XXH_OLD_NAMES
+typedef union { xxh_u32 u32; } __attribute__((packed)) unalign;
+#endif
+static xxh_u32 XXH_read32(const void* ptr)
+{
+ typedef __attribute__((aligned(1))) xxh_u32 xxh_unalign32;
+ return *((const xxh_unalign32*)ptr);
+}
+
+#else
+
+/*
+ * Portable and safe solution. Generally efficient.
+ * see: https://fastcompression.blogspot.com/2015/08/accessing-unaligned-memory.html
+ */
+static xxh_u32 XXH_read32(const void* memPtr)
+{
+ xxh_u32 val;
+ XXH_memcpy(&val, memPtr, sizeof(val));
+ return val;
+}
+
+#endif /* XXH_FORCE_DIRECT_MEMORY_ACCESS */
+
+
+/* *** Endianness *** */
+
+/*!
+ * @ingroup tuning
+ * @def XXH_CPU_LITTLE_ENDIAN
+ * @brief Whether the target is little endian.
+ *
+ * Defined to 1 if the target is little endian, or 0 if it is big endian.
+ * It can be defined externally, for example on the compiler command line.
+ *
+ * If it is not defined,
+ * a runtime check (which is usually constant folded) is used instead.
+ *
+ * @note
+ * This is not necessarily defined to an integer constant.
+ *
+ * @see XXH_isLittleEndian() for the runtime check.
+ */
+#ifndef XXH_CPU_LITTLE_ENDIAN
+/*
+ * Try to detect endianness automatically, to avoid the nonstandard behavior
+ * in `XXH_isLittleEndian()`
+ */
+# if defined(_WIN32) /* Windows is always little endian */ \
+ || defined(__LITTLE_ENDIAN__) \
+ || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__)
+# define XXH_CPU_LITTLE_ENDIAN 1
+# elif defined(__BIG_ENDIAN__) \
+ || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)
+# define XXH_CPU_LITTLE_ENDIAN 0
+# else
+/*!
+ * @internal
+ * @brief Runtime check for @ref XXH_CPU_LITTLE_ENDIAN.
+ *
+ * Most compilers will constant fold this.
+ */
+static int XXH_isLittleEndian(void)
+{
+ /*
+ * Portable and well-defined behavior.
+ * Don't use static: it is detrimental to performance.
+ */
+ const union { xxh_u32 u; xxh_u8 c[4]; } one = { 1 };
+ return one.c[0];
+}
+# define XXH_CPU_LITTLE_ENDIAN XXH_isLittleEndian()
+# endif
+#endif
+
+
+
+
+/* ****************************************
+* Compiler-specific Functions and Macros
+******************************************/
+#define XXH_GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
+
+#ifdef __has_builtin
+# define XXH_HAS_BUILTIN(x) __has_builtin(x)
+#else
+# define XXH_HAS_BUILTIN(x) 0
+#endif
+
+
+
+/*
+ * C23 and future versions have standard "unreachable()".
+ * Once it has been implemented reliably we can add it as an
+ * additional case:
+ *
+ * ```
+ * #if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= XXH_C23_VN)
+ * # include <stddef.h>
+ * # ifdef unreachable
+ * # define XXH_UNREACHABLE() unreachable()
+ * # endif
+ * #endif
+ * ```
+ *
+ * Note C++23 also has std::unreachable() which can be detected
+ * as follows:
+ * ```
+ * #if defined(__cpp_lib_unreachable) && (__cpp_lib_unreachable >= 202202L)
+ * # include <utility>
+ * # define XXH_UNREACHABLE() std::unreachable()
+ * #endif
+ * ```
+ * NB: `__cpp_lib_unreachable` is defined in the `<version>` header.
+ * We don't use that as including `<utility>` in `extern "C"` blocks
+ * doesn't work on GCC12
+ */
+
+#if XXH_HAS_BUILTIN(__builtin_unreachable)
+# define XXH_UNREACHABLE() __builtin_unreachable()
+
+#elif defined(_MSC_VER)
+# define XXH_UNREACHABLE() __assume(0)
+
+#else
+# define XXH_UNREACHABLE()
+#endif
+
+#if XXH_HAS_BUILTIN(__builtin_assume)
+# define XXH_ASSUME(c) __builtin_assume(c)
+#else
+# define XXH_ASSUME(c) if (!(c)) { XXH_UNREACHABLE(); }
+#endif
+
+/*!
+ * @internal
+ * @def XXH_rotl32(x,r)
+ * @brief 32-bit rotate left.
+ *
+ * @param x The 32-bit integer to be rotated.
+ * @param r The number of bits to rotate.
+ * @pre
+ * @p r > 0 && @p r < 32
+ * @note
+ * @p x and @p r may be evaluated multiple times.
+ * @return The rotated result.
+ */
+#if !defined(NO_CLANG_BUILTIN) && XXH_HAS_BUILTIN(__builtin_rotateleft32) \
+ && XXH_HAS_BUILTIN(__builtin_rotateleft64)
+# define XXH_rotl32 __builtin_rotateleft32
+# define XXH_rotl64 __builtin_rotateleft64
+/* Note: although _rotl exists for minGW (GCC under windows), performance seems poor */
+#elif defined(_MSC_VER)
+# define XXH_rotl32(x,r) _rotl(x,r)
+# define XXH_rotl64(x,r) _rotl64(x,r)
+#else
+# define XXH_rotl32(x,r) (((x) << (r)) | ((x) >> (32 - (r))))
+# define XXH_rotl64(x,r) (((x) << (r)) | ((x) >> (64 - (r))))
+#endif
+
+/*!
+ * @internal
+ * @fn xxh_u32 XXH_swap32(xxh_u32 x)
+ * @brief A 32-bit byteswap.
+ *
+ * @param x The 32-bit integer to byteswap.
+ * @return @p x, byteswapped.
+ */
+#if defined(_MSC_VER) /* Visual Studio */
+# define XXH_swap32 _byteswap_ulong
+#elif XXH_GCC_VERSION >= 403
+# define XXH_swap32 __builtin_bswap32
+#else
+static xxh_u32 XXH_swap32 (xxh_u32 x)
+{
+ return ((x << 24) & 0xff000000 ) |
+ ((x << 8) & 0x00ff0000 ) |
+ ((x >> 8) & 0x0000ff00 ) |
+ ((x >> 24) & 0x000000ff );
+}
+#endif
+
+
+/* ***************************
+* Memory reads
+*****************************/
+
+/*!
+ * @internal
+ * @brief Enum to indicate whether a pointer is aligned.
+ */
+typedef enum {
+ XXH_aligned, /*!< Aligned */
+ XXH_unaligned /*!< Possibly unaligned */
+} XXH_alignment;
+
+/*
+ * XXH_FORCE_MEMORY_ACCESS==3 is an endian-independent byteshift load.
+ *
+ * This is ideal for older compilers which don't inline memcpy.
+ */
+#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==3))
+
+XXH_FORCE_INLINE xxh_u32 XXH_readLE32(const void* memPtr)
+{
+ const xxh_u8* bytePtr = (const xxh_u8 *)memPtr;
+ return bytePtr[0]
+ | ((xxh_u32)bytePtr[1] << 8)
+ | ((xxh_u32)bytePtr[2] << 16)
+ | ((xxh_u32)bytePtr[3] << 24);
+}
+
+XXH_FORCE_INLINE xxh_u32 XXH_readBE32(const void* memPtr)
+{
+ const xxh_u8* bytePtr = (const xxh_u8 *)memPtr;
+ return bytePtr[3]
+ | ((xxh_u32)bytePtr[2] << 8)
+ | ((xxh_u32)bytePtr[1] << 16)
+ | ((xxh_u32)bytePtr[0] << 24);
+}
+
+#else
+XXH_FORCE_INLINE xxh_u32 XXH_readLE32(const void* ptr)
+{
+ return XXH_CPU_LITTLE_ENDIAN ? XXH_read32(ptr) : XXH_swap32(XXH_read32(ptr));
+}
+
+static xxh_u32 XXH_readBE32(const void* ptr)
+{
+ return XXH_CPU_LITTLE_ENDIAN ? XXH_swap32(XXH_read32(ptr)) : XXH_read32(ptr);
+}
+#endif
+
+XXH_FORCE_INLINE xxh_u32
+XXH_readLE32_align(const void* ptr, XXH_alignment align)
+{
+ if (align==XXH_unaligned) {
+ return XXH_readLE32(ptr);
+ } else {
+ return XXH_CPU_LITTLE_ENDIAN ? *(const xxh_u32*)ptr : XXH_swap32(*(const xxh_u32*)ptr);
+ }
+}
+
+
+/* *************************************
+* Misc
+***************************************/
+/*! @ingroup public */
+XXH_PUBLIC_API unsigned XXH_versionNumber (void) { return XXH_VERSION_NUMBER; }
+
+
+/* *******************************************************************
+* 32-bit hash functions
+*********************************************************************/
+/*!
+ * @}
+ * @defgroup XXH32_impl XXH32 implementation
+ * @ingroup impl
+ *
+ * Details on the XXH32 implementation.
+ * @{
+ */
+ /* #define instead of static const, to be used as initializers */
+#define XXH_PRIME32_1 0x9E3779B1U /*!< 0b10011110001101110111100110110001 */
+#define XXH_PRIME32_2 0x85EBCA77U /*!< 0b10000101111010111100101001110111 */
+#define XXH_PRIME32_3 0xC2B2AE3DU /*!< 0b11000010101100101010111000111101 */
+#define XXH_PRIME32_4 0x27D4EB2FU /*!< 0b00100111110101001110101100101111 */
+#define XXH_PRIME32_5 0x165667B1U /*!< 0b00010110010101100110011110110001 */
+
+#ifdef XXH_OLD_NAMES
+# define PRIME32_1 XXH_PRIME32_1
+# define PRIME32_2 XXH_PRIME32_2
+# define PRIME32_3 XXH_PRIME32_3
+# define PRIME32_4 XXH_PRIME32_4
+# define PRIME32_5 XXH_PRIME32_5
+#endif
+
+/*!
+ * @internal
+ * @brief Normal stripe processing routine.
+ *
+ * This shuffles the bits so that any bit from @p input impacts several bits in
+ * @p acc.
+ *
+ * @param acc The accumulator lane.
+ * @param input The stripe of input to mix.
+ * @return The mixed accumulator lane.
+ */
+static xxh_u32 XXH32_round(xxh_u32 acc, xxh_u32 input)
+{
+ acc += input * XXH_PRIME32_2;
+ acc = XXH_rotl32(acc, 13);
+ acc *= XXH_PRIME32_1;
+#if (defined(__SSE4_1__) || defined(__aarch64__) || defined(__wasm_simd128__)) && !defined(XXH_ENABLE_AUTOVECTORIZE)
+ /*
+ * UGLY HACK:
+ * A compiler fence is the only thing that prevents GCC and Clang from
+ * autovectorizing the XXH32 loop (pragmas and attributes don't work for some
+ * reason) without globally disabling SSE4.1.
+ *
+ * The reason we want to avoid vectorization is because despite working on
+ * 4 integers at a time, there are multiple factors slowing XXH32 down on
+ * SSE4:
+ * - There's a ridiculous amount of lag from pmulld (10 cycles of latency on
+ * newer chips!) making it slightly slower to multiply four integers at
+ * once compared to four integers independently. Even when pmulld was
+ * fastest, Sandy/Ivy Bridge, it is still not worth it to go into SSE
+ * just to multiply unless doing a long operation.
+ *
+ * - Four instructions are required to rotate,
+ * movqda tmp, v // not required with VEX encoding
+ * pslld tmp, 13 // tmp <<= 13
+ * psrld v, 19 // x >>= 19
+ * por v, tmp // x |= tmp
+ * compared to one for scalar:
+ * roll v, 13 // reliably fast across the board
+ * shldl v, v, 13 // Sandy Bridge and later prefer this for some reason
+ *
+ * - Instruction level parallelism is actually more beneficial here because
+ * the SIMD actually serializes this operation: While v1 is rotating, v2
+ * can load data, while v3 can multiply. SSE forces them to operate
+ * together.
+ *
+ * This is also enabled on AArch64, as Clang is *very aggressive* in vectorizing
+ * the loop. NEON is only faster on the A53, and with the newer cores, it is less
+ * than half the speed.
+ *
+ * Additionally, this is used on WASM SIMD128 because it JITs to the same
+ * SIMD instructions and has the same issue.
+ */
+ XXH_COMPILER_GUARD(acc);
+#endif
+ return acc;
+}
+
+/*!
+ * @internal
+ * @brief Mixes all bits to finalize the hash.
+ *
+ * The final mix ensures that all input bits have a chance to impact any bit in
+ * the output digest, resulting in an unbiased distribution.
+ *
+ * @param hash The hash to avalanche.
+ * @return The avalanched hash.
+ */
+static xxh_u32 XXH32_avalanche(xxh_u32 hash)
+{
+ hash ^= hash >> 15;
+ hash *= XXH_PRIME32_2;
+ hash ^= hash >> 13;
+ hash *= XXH_PRIME32_3;
+ hash ^= hash >> 16;
+ return hash;
+}
+
+#define XXH_get32bits(p) XXH_readLE32_align(p, align)
+
+/*!
+ * @internal
+ * @brief Processes the last 0-15 bytes of @p ptr.
+ *
+ * There may be up to 15 bytes remaining to consume from the input.
+ * This final stage will digest them to ensure that all input bytes are present
+ * in the final mix.
+ *
+ * @param hash The hash to finalize.
+ * @param ptr The pointer to the remaining input.
+ * @param len The remaining length, modulo 16.
+ * @param align Whether @p ptr is aligned.
+ * @return The finalized hash.
+ * @see XXH64_finalize().
+ */
+static XXH_PUREF xxh_u32
+XXH32_finalize(xxh_u32 hash, const xxh_u8* ptr, size_t len, XXH_alignment align)
+{
+#define XXH_PROCESS1 do { \
+ hash += (*ptr++) * XXH_PRIME32_5; \
+ hash = XXH_rotl32(hash, 11) * XXH_PRIME32_1; \
+} while (0)
+
+#define XXH_PROCESS4 do { \
+ hash += XXH_get32bits(ptr) * XXH_PRIME32_3; \
+ ptr += 4; \
+ hash = XXH_rotl32(hash, 17) * XXH_PRIME32_4; \
+} while (0)
+
+ if (ptr==NULL) XXH_ASSERT(len == 0);
+
+ /* Compact rerolled version; generally faster */
+ if (!XXH32_ENDJMP) {
+ len &= 15;
+ while (len >= 4) {
+ XXH_PROCESS4;
+ len -= 4;
+ }
+ while (len > 0) {
+ XXH_PROCESS1;
+ --len;
+ }
+ return XXH32_avalanche(hash);
+ } else {
+ switch(len&15) /* or switch(bEnd - p) */ {
+ case 12: XXH_PROCESS4;
+ XXH_FALLTHROUGH; /* fallthrough */
+ case 8: XXH_PROCESS4;
+ XXH_FALLTHROUGH; /* fallthrough */
+ case 4: XXH_PROCESS4;
+ return XXH32_avalanche(hash);
+
+ case 13: XXH_PROCESS4;
+ XXH_FALLTHROUGH; /* fallthrough */
+ case 9: XXH_PROCESS4;
+ XXH_FALLTHROUGH; /* fallthrough */
+ case 5: XXH_PROCESS4;
+ XXH_PROCESS1;
+ return XXH32_avalanche(hash);
+
+ case 14: XXH_PROCESS4;
+ XXH_FALLTHROUGH; /* fallthrough */
+ case 10: XXH_PROCESS4;
+ XXH_FALLTHROUGH; /* fallthrough */
+ case 6: XXH_PROCESS4;
+ XXH_PROCESS1;
+ XXH_PROCESS1;
+ return XXH32_avalanche(hash);
+
+ case 15: XXH_PROCESS4;
+ XXH_FALLTHROUGH; /* fallthrough */
+ case 11: XXH_PROCESS4;
+ XXH_FALLTHROUGH; /* fallthrough */
+ case 7: XXH_PROCESS4;
+ XXH_FALLTHROUGH; /* fallthrough */
+ case 3: XXH_PROCESS1;
+ XXH_FALLTHROUGH; /* fallthrough */
+ case 2: XXH_PROCESS1;
+ XXH_FALLTHROUGH; /* fallthrough */
+ case 1: XXH_PROCESS1;
+ XXH_FALLTHROUGH; /* fallthrough */
+ case 0: return XXH32_avalanche(hash);
+ }
+ XXH_ASSERT(0);
+ return hash; /* reaching this point is deemed impossible */
+ }
+}
+
+#ifdef XXH_OLD_NAMES
+# define PROCESS1 XXH_PROCESS1
+# define PROCESS4 XXH_PROCESS4
+#else
+# undef XXH_PROCESS1
+# undef XXH_PROCESS4
+#endif
+
+/*!
+ * @internal
+ * @brief The implementation for @ref XXH32().
+ *
+ * @param input , len , seed Directly passed from @ref XXH32().
+ * @param align Whether @p input is aligned.
+ * @return The calculated hash.
+ */
+XXH_FORCE_INLINE XXH_PUREF xxh_u32
+XXH32_endian_align(const xxh_u8* input, size_t len, xxh_u32 seed, XXH_alignment align)
+{
+ xxh_u32 h32;
+
+ if (input==NULL) XXH_ASSERT(len == 0);
+
+ if (len>=16) {
+ const xxh_u8* const bEnd = input + len;
+ const xxh_u8* const limit = bEnd - 15;
+ xxh_u32 v1 = seed + XXH_PRIME32_1 + XXH_PRIME32_2;
+ xxh_u32 v2 = seed + XXH_PRIME32_2;
+ xxh_u32 v3 = seed + 0;
+ xxh_u32 v4 = seed - XXH_PRIME32_1;
+
+ do {
+ v1 = XXH32_round(v1, XXH_get32bits(input)); input += 4;
+ v2 = XXH32_round(v2, XXH_get32bits(input)); input += 4;
+ v3 = XXH32_round(v3, XXH_get32bits(input)); input += 4;
+ v4 = XXH32_round(v4, XXH_get32bits(input)); input += 4;
+ } while (input < limit);
+
+ h32 = XXH_rotl32(v1, 1) + XXH_rotl32(v2, 7)
+ + XXH_rotl32(v3, 12) + XXH_rotl32(v4, 18);
+ } else {
+ h32 = seed + XXH_PRIME32_5;
+ }
+
+ h32 += (xxh_u32)len;
+
+ return XXH32_finalize(h32, input, len&15, align);
+}
+
+/*! @ingroup XXH32_family */
+XXH_PUBLIC_API XXH32_hash_t XXH32 (const void* input, size_t len, XXH32_hash_t seed)
+{
+#if !defined(XXH_NO_STREAM) && XXH_SIZE_OPT >= 2
+ /* Simple version, good for code maintenance, but unfortunately slow for small inputs */
+ XXH32_state_t state;
+ XXH32_reset(&state, seed);
+ XXH32_update(&state, (const xxh_u8*)input, len);
+ return XXH32_digest(&state);
+#else
+ if (XXH_FORCE_ALIGN_CHECK) {
+ if ((((size_t)input) & 3) == 0) { /* Input is 4-bytes aligned, leverage the speed benefit */
+ return XXH32_endian_align((const xxh_u8*)input, len, seed, XXH_aligned);
+ } }
+
+ return XXH32_endian_align((const xxh_u8*)input, len, seed, XXH_unaligned);
+#endif
+}
+
+
+
+/******* Hash streaming *******/
+#ifndef XXH_NO_STREAM
+/*! @ingroup XXH32_family */
+XXH_PUBLIC_API XXH32_state_t* XXH32_createState(void)
+{
+ return (XXH32_state_t*)XXH_malloc(sizeof(XXH32_state_t));
+}
+/*! @ingroup XXH32_family */
+XXH_PUBLIC_API XXH_errorcode XXH32_freeState(XXH32_state_t* statePtr)
+{
+ XXH_free(statePtr);
+ return XXH_OK;
+}
+
+/*! @ingroup XXH32_family */
+XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* dstState, const XXH32_state_t* srcState)
+{
+ XXH_memcpy(dstState, srcState, sizeof(*dstState));
+}
+
+/*! @ingroup XXH32_family */
+XXH_PUBLIC_API XXH_errorcode XXH32_reset(XXH32_state_t* statePtr, XXH32_hash_t seed)
+{
+ XXH_ASSERT(statePtr != NULL);
+ memset(statePtr, 0, sizeof(*statePtr));
+ statePtr->v[0] = seed + XXH_PRIME32_1 + XXH_PRIME32_2;
+ statePtr->v[1] = seed + XXH_PRIME32_2;
+ statePtr->v[2] = seed + 0;
+ statePtr->v[3] = seed - XXH_PRIME32_1;
+ return XXH_OK;
+}
+
+
+/*! @ingroup XXH32_family */
+XXH_PUBLIC_API XXH_errorcode
+XXH32_update(XXH32_state_t* state, const void* input, size_t len)
+{
+ if (input==NULL) {
+ XXH_ASSERT(len == 0);
+ return XXH_OK;
+ }
+
+ { const xxh_u8* p = (const xxh_u8*)input;
+ const xxh_u8* const bEnd = p + len;
+
+ state->total_len_32 += (XXH32_hash_t)len;
+ state->large_len |= (XXH32_hash_t)((len>=16) | (state->total_len_32>=16));
+
+ if (state->memsize + len < 16) { /* fill in tmp buffer */
+ XXH_memcpy((xxh_u8*)(state->mem32) + state->memsize, input, len);
+ state->memsize += (XXH32_hash_t)len;
+ return XXH_OK;
+ }
+
+ if (state->memsize) { /* some data left from previous update */
+ XXH_memcpy((xxh_u8*)(state->mem32) + state->memsize, input, 16-state->memsize);
+ { const xxh_u32* p32 = state->mem32;
+ state->v[0] = XXH32_round(state->v[0], XXH_readLE32(p32)); p32++;
+ state->v[1] = XXH32_round(state->v[1], XXH_readLE32(p32)); p32++;
+ state->v[2] = XXH32_round(state->v[2], XXH_readLE32(p32)); p32++;
+ state->v[3] = XXH32_round(state->v[3], XXH_readLE32(p32));
+ }
+ p += 16-state->memsize;
+ state->memsize = 0;
+ }
+
+ if (p <= bEnd-16) {
+ const xxh_u8* const limit = bEnd - 16;
+
+ do {
+ state->v[0] = XXH32_round(state->v[0], XXH_readLE32(p)); p+=4;
+ state->v[1] = XXH32_round(state->v[1], XXH_readLE32(p)); p+=4;
+ state->v[2] = XXH32_round(state->v[2], XXH_readLE32(p)); p+=4;
+ state->v[3] = XXH32_round(state->v[3], XXH_readLE32(p)); p+=4;
+ } while (p<=limit);
+
+ }
+
+ if (p < bEnd) {
+ XXH_memcpy(state->mem32, p, (size_t)(bEnd-p));
+ state->memsize = (unsigned)(bEnd-p);
+ }
+ }
+
+ return XXH_OK;
+}
+
+
+/*! @ingroup XXH32_family */
+XXH_PUBLIC_API XXH32_hash_t XXH32_digest(const XXH32_state_t* state)
+{
+ xxh_u32 h32;
+
+ if (state->large_len) {
+ h32 = XXH_rotl32(state->v[0], 1)
+ + XXH_rotl32(state->v[1], 7)
+ + XXH_rotl32(state->v[2], 12)
+ + XXH_rotl32(state->v[3], 18);
+ } else {
+ h32 = state->v[2] /* == seed */ + XXH_PRIME32_5;
+ }
+
+ h32 += state->total_len_32;
+
+ return XXH32_finalize(h32, (const xxh_u8*)state->mem32, state->memsize, XXH_aligned);
+}
+#endif /* !XXH_NO_STREAM */
+
+/******* Canonical representation *******/
+
+/*!
+ * @ingroup XXH32_family
+ * The default return values from XXH functions are unsigned 32 and 64 bit
+ * integers.
+ *
+ * The canonical representation uses big endian convention, the same convention
+ * as human-readable numbers (large digits first).
+ *
+ * This way, hash values can be written into a file or buffer, remaining
+ * comparable across different systems.
+ *
+ * The following functions allow transformation of hash values to and from their
+ * canonical format.
+ */
+XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t hash)
+{
+ XXH_STATIC_ASSERT(sizeof(XXH32_canonical_t) == sizeof(XXH32_hash_t));
+ if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap32(hash);
+ XXH_memcpy(dst, &hash, sizeof(*dst));
+}
+/*! @ingroup XXH32_family */
+XXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src)
+{
+ return XXH_readBE32(src);
+}
+
+
+#ifndef XXH_NO_LONG_LONG
+
+/* *******************************************************************
+* 64-bit hash functions
+*********************************************************************/
+/*!
+ * @}
+ * @ingroup impl
+ * @{
+ */
+/******* Memory access *******/
+
+typedef XXH64_hash_t xxh_u64;
+
+#ifdef XXH_OLD_NAMES
+# define U64 xxh_u64
+#endif
+
+#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==3))
+/*
+ * Manual byteshift. Best for old compilers which don't inline memcpy.
+ * We actually directly use XXH_readLE64 and XXH_readBE64.
+ */
+#elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2))
+
+/* Force direct memory access. Only works on CPU which support unaligned memory access in hardware */
+static xxh_u64 XXH_read64(const void* memPtr)
+{
+ return *(const xxh_u64*) memPtr;
+}
+
+#elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1))
+
+/*
+ * __attribute__((aligned(1))) is supported by gcc and clang. Originally the
+ * documentation claimed that it only increased the alignment, but actually it
+ * can decrease it on gcc, clang, and icc:
+ * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=69502,
+ * https://gcc.godbolt.org/z/xYez1j67Y.
+ */
+#ifdef XXH_OLD_NAMES
+typedef union { xxh_u32 u32; xxh_u64 u64; } __attribute__((packed)) unalign64;
+#endif
+static xxh_u64 XXH_read64(const void* ptr)
+{
+ typedef __attribute__((aligned(1))) xxh_u64 xxh_unalign64;
+ return *((const xxh_unalign64*)ptr);
+}
+
+#else
+
+/*
+ * Portable and safe solution. Generally efficient.
+ * see: https://fastcompression.blogspot.com/2015/08/accessing-unaligned-memory.html
+ */
+static xxh_u64 XXH_read64(const void* memPtr)
+{
+ xxh_u64 val;
+ XXH_memcpy(&val, memPtr, sizeof(val));
+ return val;
+}
+
+#endif /* XXH_FORCE_DIRECT_MEMORY_ACCESS */
+
+#if defined(_MSC_VER) /* Visual Studio */
+# define XXH_swap64 _byteswap_uint64
+#elif XXH_GCC_VERSION >= 403
+# define XXH_swap64 __builtin_bswap64
+#else
+static xxh_u64 XXH_swap64(xxh_u64 x)
+{
+ return ((x << 56) & 0xff00000000000000ULL) |
+ ((x << 40) & 0x00ff000000000000ULL) |
+ ((x << 24) & 0x0000ff0000000000ULL) |
+ ((x << 8) & 0x000000ff00000000ULL) |
+ ((x >> 8) & 0x00000000ff000000ULL) |
+ ((x >> 24) & 0x0000000000ff0000ULL) |
+ ((x >> 40) & 0x000000000000ff00ULL) |
+ ((x >> 56) & 0x00000000000000ffULL);
+}
+#endif
+
+
+/* XXH_FORCE_MEMORY_ACCESS==3 is an endian-independent byteshift load. */
+#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==3))
+
+XXH_FORCE_INLINE xxh_u64 XXH_readLE64(const void* memPtr)
+{
+ const xxh_u8* bytePtr = (const xxh_u8 *)memPtr;
+ return bytePtr[0]
+ | ((xxh_u64)bytePtr[1] << 8)
+ | ((xxh_u64)bytePtr[2] << 16)
+ | ((xxh_u64)bytePtr[3] << 24)
+ | ((xxh_u64)bytePtr[4] << 32)
+ | ((xxh_u64)bytePtr[5] << 40)
+ | ((xxh_u64)bytePtr[6] << 48)
+ | ((xxh_u64)bytePtr[7] << 56);
+}
+
+XXH_FORCE_INLINE xxh_u64 XXH_readBE64(const void* memPtr)
+{
+ const xxh_u8* bytePtr = (const xxh_u8 *)memPtr;
+ return bytePtr[7]
+ | ((xxh_u64)bytePtr[6] << 8)
+ | ((xxh_u64)bytePtr[5] << 16)
+ | ((xxh_u64)bytePtr[4] << 24)
+ | ((xxh_u64)bytePtr[3] << 32)
+ | ((xxh_u64)bytePtr[2] << 40)
+ | ((xxh_u64)bytePtr[1] << 48)
+ | ((xxh_u64)bytePtr[0] << 56);
+}
+
+#else
+XXH_FORCE_INLINE xxh_u64 XXH_readLE64(const void* ptr)
+{
+ return XXH_CPU_LITTLE_ENDIAN ? XXH_read64(ptr) : XXH_swap64(XXH_read64(ptr));
+}
+
+static xxh_u64 XXH_readBE64(const void* ptr)
+{
+ return XXH_CPU_LITTLE_ENDIAN ? XXH_swap64(XXH_read64(ptr)) : XXH_read64(ptr);
+}
+#endif
+
+XXH_FORCE_INLINE xxh_u64
+XXH_readLE64_align(const void* ptr, XXH_alignment align)
+{
+ if (align==XXH_unaligned)
+ return XXH_readLE64(ptr);
+ else
+ return XXH_CPU_LITTLE_ENDIAN ? *(const xxh_u64*)ptr : XXH_swap64(*(const xxh_u64*)ptr);
+}
+
+
+/******* xxh64 *******/
+/*!
+ * @}
+ * @defgroup XXH64_impl XXH64 implementation
+ * @ingroup impl
+ *
+ * Details on the XXH64 implementation.
+ * @{
+ */
+/* #define rather that static const, to be used as initializers */
+#define XXH_PRIME64_1 0x9E3779B185EBCA87ULL /*!< 0b1001111000110111011110011011000110000101111010111100101010000111 */
+#define XXH_PRIME64_2 0xC2B2AE3D27D4EB4FULL /*!< 0b1100001010110010101011100011110100100111110101001110101101001111 */
+#define XXH_PRIME64_3 0x165667B19E3779F9ULL /*!< 0b0001011001010110011001111011000110011110001101110111100111111001 */
+#define XXH_PRIME64_4 0x85EBCA77C2B2AE63ULL /*!< 0b1000010111101011110010100111011111000010101100101010111001100011 */
+#define XXH_PRIME64_5 0x27D4EB2F165667C5ULL /*!< 0b0010011111010100111010110010111100010110010101100110011111000101 */
+
+#ifdef XXH_OLD_NAMES
+# define PRIME64_1 XXH_PRIME64_1
+# define PRIME64_2 XXH_PRIME64_2
+# define PRIME64_3 XXH_PRIME64_3
+# define PRIME64_4 XXH_PRIME64_4
+# define PRIME64_5 XXH_PRIME64_5
+#endif
+
+/*! @copydoc XXH32_round */
+static xxh_u64 XXH64_round(xxh_u64 acc, xxh_u64 input)
+{
+ acc += input * XXH_PRIME64_2;
+ acc = XXH_rotl64(acc, 31);
+ acc *= XXH_PRIME64_1;
+ return acc;
+}
+
+static xxh_u64 XXH64_mergeRound(xxh_u64 acc, xxh_u64 val)
+{
+ val = XXH64_round(0, val);
+ acc ^= val;
+ acc = acc * XXH_PRIME64_1 + XXH_PRIME64_4;
+ return acc;
+}
+
+/*! @copydoc XXH32_avalanche */
+static xxh_u64 XXH64_avalanche(xxh_u64 hash)
+{
+ hash ^= hash >> 33;
+ hash *= XXH_PRIME64_2;
+ hash ^= hash >> 29;
+ hash *= XXH_PRIME64_3;
+ hash ^= hash >> 32;
+ return hash;
+}
+
+
+#define XXH_get64bits(p) XXH_readLE64_align(p, align)
+
+/*!
+ * @internal
+ * @brief Processes the last 0-31 bytes of @p ptr.
+ *
+ * There may be up to 31 bytes remaining to consume from the input.
+ * This final stage will digest them to ensure that all input bytes are present
+ * in the final mix.
+ *
+ * @param hash The hash to finalize.
+ * @param ptr The pointer to the remaining input.
+ * @param len The remaining length, modulo 32.
+ * @param align Whether @p ptr is aligned.
+ * @return The finalized hash
+ * @see XXH32_finalize().
+ */
+static XXH_PUREF xxh_u64
+XXH64_finalize(xxh_u64 hash, const xxh_u8* ptr, size_t len, XXH_alignment align)
+{
+ if (ptr==NULL) XXH_ASSERT(len == 0);
+ len &= 31;
+ while (len >= 8) {
+ xxh_u64 const k1 = XXH64_round(0, XXH_get64bits(ptr));
+ ptr += 8;
+ hash ^= k1;
+ hash = XXH_rotl64(hash,27) * XXH_PRIME64_1 + XXH_PRIME64_4;
+ len -= 8;
+ }
+ if (len >= 4) {
+ hash ^= (xxh_u64)(XXH_get32bits(ptr)) * XXH_PRIME64_1;
+ ptr += 4;
+ hash = XXH_rotl64(hash, 23) * XXH_PRIME64_2 + XXH_PRIME64_3;
+ len -= 4;
+ }
+ while (len > 0) {
+ hash ^= (*ptr++) * XXH_PRIME64_5;
+ hash = XXH_rotl64(hash, 11) * XXH_PRIME64_1;
+ --len;
+ }
+ return XXH64_avalanche(hash);
+}
+
+#ifdef XXH_OLD_NAMES
+# define PROCESS1_64 XXH_PROCESS1_64
+# define PROCESS4_64 XXH_PROCESS4_64
+# define PROCESS8_64 XXH_PROCESS8_64
+#else
+# undef XXH_PROCESS1_64
+# undef XXH_PROCESS4_64
+# undef XXH_PROCESS8_64
+#endif
+
+/*!
+ * @internal
+ * @brief The implementation for @ref XXH64().
+ *
+ * @param input , len , seed Directly passed from @ref XXH64().
+ * @param align Whether @p input is aligned.
+ * @return The calculated hash.
+ */
+XXH_FORCE_INLINE XXH_PUREF xxh_u64
+XXH64_endian_align(const xxh_u8* input, size_t len, xxh_u64 seed, XXH_alignment align)
+{
+ xxh_u64 h64;
+ if (input==NULL) XXH_ASSERT(len == 0);
+
+ if (len>=32) {
+ const xxh_u8* const bEnd = input + len;
+ const xxh_u8* const limit = bEnd - 31;
+ xxh_u64 v1 = seed + XXH_PRIME64_1 + XXH_PRIME64_2;
+ xxh_u64 v2 = seed + XXH_PRIME64_2;
+ xxh_u64 v3 = seed + 0;
+ xxh_u64 v4 = seed - XXH_PRIME64_1;
+
+ do {
+ v1 = XXH64_round(v1, XXH_get64bits(input)); input+=8;
+ v2 = XXH64_round(v2, XXH_get64bits(input)); input+=8;
+ v3 = XXH64_round(v3, XXH_get64bits(input)); input+=8;
+ v4 = XXH64_round(v4, XXH_get64bits(input)); input+=8;
+ } while (input<limit);
+
+ h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18);
+ h64 = XXH64_mergeRound(h64, v1);
+ h64 = XXH64_mergeRound(h64, v2);
+ h64 = XXH64_mergeRound(h64, v3);
+ h64 = XXH64_mergeRound(h64, v4);
+
+ } else {
+ h64 = seed + XXH_PRIME64_5;
+ }
+
+ h64 += (xxh_u64) len;
+
+ return XXH64_finalize(h64, input, len, align);
+}
+
+
+/*! @ingroup XXH64_family */
+XXH_PUBLIC_API XXH64_hash_t XXH64 (XXH_NOESCAPE const void* input, size_t len, XXH64_hash_t seed)
+{
+#if !defined(XXH_NO_STREAM) && XXH_SIZE_OPT >= 2
+ /* Simple version, good for code maintenance, but unfortunately slow for small inputs */
+ XXH64_state_t state;
+ XXH64_reset(&state, seed);
+ XXH64_update(&state, (const xxh_u8*)input, len);
+ return XXH64_digest(&state);
+#else
+ if (XXH_FORCE_ALIGN_CHECK) {
+ if ((((size_t)input) & 7)==0) { /* Input is aligned, let's leverage the speed advantage */
+ return XXH64_endian_align((const xxh_u8*)input, len, seed, XXH_aligned);
+ } }
+
+ return XXH64_endian_align((const xxh_u8*)input, len, seed, XXH_unaligned);
+
+#endif
+}
+
+/******* Hash Streaming *******/
+#ifndef XXH_NO_STREAM
+/*! @ingroup XXH64_family*/
+XXH_PUBLIC_API XXH64_state_t* XXH64_createState(void)
+{
+ return (XXH64_state_t*)XXH_malloc(sizeof(XXH64_state_t));
+}
+/*! @ingroup XXH64_family */
+XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t* statePtr)
+{
+ XXH_free(statePtr);
+ return XXH_OK;
+}
+
+/*! @ingroup XXH64_family */
+XXH_PUBLIC_API void XXH64_copyState(XXH_NOESCAPE XXH64_state_t* dstState, const XXH64_state_t* srcState)
+{
+ XXH_memcpy(dstState, srcState, sizeof(*dstState));
+}
+
+/*! @ingroup XXH64_family */
+XXH_PUBLIC_API XXH_errorcode XXH64_reset(XXH_NOESCAPE XXH64_state_t* statePtr, XXH64_hash_t seed)
+{
+ XXH_ASSERT(statePtr != NULL);
+ memset(statePtr, 0, sizeof(*statePtr));
+ statePtr->v[0] = seed + XXH_PRIME64_1 + XXH_PRIME64_2;
+ statePtr->v[1] = seed + XXH_PRIME64_2;
+ statePtr->v[2] = seed + 0;
+ statePtr->v[3] = seed - XXH_PRIME64_1;
+ return XXH_OK;
+}
+
+/*! @ingroup XXH64_family */
+XXH_PUBLIC_API XXH_errorcode
+XXH64_update (XXH_NOESCAPE XXH64_state_t* state, XXH_NOESCAPE const void* input, size_t len)
+{
+ if (input==NULL) {
+ XXH_ASSERT(len == 0);
+ return XXH_OK;
+ }
+
+ { const xxh_u8* p = (const xxh_u8*)input;
+ const xxh_u8* const bEnd = p + len;
+
+ state->total_len += len;
+
+ if (state->memsize + len < 32) { /* fill in tmp buffer */
+ XXH_memcpy(((xxh_u8*)state->mem64) + state->memsize, input, len);
+ state->memsize += (xxh_u32)len;
+ return XXH_OK;
+ }
+
+ if (state->memsize) { /* tmp buffer is full */
+ XXH_memcpy(((xxh_u8*)state->mem64) + state->memsize, input, 32-state->memsize);
+ state->v[0] = XXH64_round(state->v[0], XXH_readLE64(state->mem64+0));
+ state->v[1] = XXH64_round(state->v[1], XXH_readLE64(state->mem64+1));
+ state->v[2] = XXH64_round(state->v[2], XXH_readLE64(state->mem64+2));
+ state->v[3] = XXH64_round(state->v[3], XXH_readLE64(state->mem64+3));
+ p += 32 - state->memsize;
+ state->memsize = 0;
+ }
+
+ if (p+32 <= bEnd) {
+ const xxh_u8* const limit = bEnd - 32;
+
+ do {
+ state->v[0] = XXH64_round(state->v[0], XXH_readLE64(p)); p+=8;
+ state->v[1] = XXH64_round(state->v[1], XXH_readLE64(p)); p+=8;
+ state->v[2] = XXH64_round(state->v[2], XXH_readLE64(p)); p+=8;
+ state->v[3] = XXH64_round(state->v[3], XXH_readLE64(p)); p+=8;
+ } while (p<=limit);
+
+ }
+
+ if (p < bEnd) {
+ XXH_memcpy(state->mem64, p, (size_t)(bEnd-p));
+ state->memsize = (unsigned)(bEnd-p);
+ }
+ }
+
+ return XXH_OK;
+}
+
+
+/*! @ingroup XXH64_family */
+XXH_PUBLIC_API XXH64_hash_t XXH64_digest(XXH_NOESCAPE const XXH64_state_t* state)
+{
+ xxh_u64 h64;
+
+ if (state->total_len >= 32) {
+ h64 = XXH_rotl64(state->v[0], 1) + XXH_rotl64(state->v[1], 7) + XXH_rotl64(state->v[2], 12) + XXH_rotl64(state->v[3], 18);
+ h64 = XXH64_mergeRound(h64, state->v[0]);
+ h64 = XXH64_mergeRound(h64, state->v[1]);
+ h64 = XXH64_mergeRound(h64, state->v[2]);
+ h64 = XXH64_mergeRound(h64, state->v[3]);
+ } else {
+ h64 = state->v[2] /*seed*/ + XXH_PRIME64_5;
+ }
+
+ h64 += (xxh_u64) state->total_len;
+
+ return XXH64_finalize(h64, (const xxh_u8*)state->mem64, (size_t)state->total_len, XXH_aligned);
+}
+#endif /* !XXH_NO_STREAM */
+
+/******* Canonical representation *******/
+
+/*! @ingroup XXH64_family */
+XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH_NOESCAPE XXH64_canonical_t* dst, XXH64_hash_t hash)
+{
+ XXH_STATIC_ASSERT(sizeof(XXH64_canonical_t) == sizeof(XXH64_hash_t));
+ if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap64(hash);
+ XXH_memcpy(dst, &hash, sizeof(*dst));
+}
+
+/*! @ingroup XXH64_family */
+XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(XXH_NOESCAPE const XXH64_canonical_t* src)
+{
+ return XXH_readBE64(src);
+}
+
+#ifndef XXH_NO_XXH3
+
+/* *********************************************************************
+* XXH3
+* New generation hash designed for speed on small keys and vectorization
+************************************************************************ */
+/*!
+ * @}
+ * @defgroup XXH3_impl XXH3 implementation
+ * @ingroup impl
+ * @{
+ */
+
+/* === Compiler specifics === */
+
+#if ((defined(sun) || defined(__sun)) && __cplusplus) /* Solaris includes __STDC_VERSION__ with C++. Tested with GCC 5.5 */
+# define XXH_RESTRICT /* disable */
+#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* >= C99 */
+# define XXH_RESTRICT restrict
+#elif (defined (__GNUC__) && ((__GNUC__ > 3) || (__GNUC__ == 3 && __GNUC_MINOR__ >= 1))) \
+ || (defined (__clang__)) \
+ || (defined (_MSC_VER) && (_MSC_VER >= 1400)) \
+ || (defined (__INTEL_COMPILER) && (__INTEL_COMPILER >= 1300))
+/*
+ * There are a LOT more compilers that recognize __restrict but this
+ * covers the major ones.
+ */
+# define XXH_RESTRICT __restrict
+#else
+# define XXH_RESTRICT /* disable */
+#endif
+
+#if (defined(__GNUC__) && (__GNUC__ >= 3)) \
+ || (defined(__INTEL_COMPILER) && (__INTEL_COMPILER >= 800)) \
+ || defined(__clang__)
+# define XXH_likely(x) __builtin_expect(x, 1)
+# define XXH_unlikely(x) __builtin_expect(x, 0)
+#else
+# define XXH_likely(x) (x)
+# define XXH_unlikely(x) (x)
+#endif
+
+#ifndef XXH_HAS_INCLUDE
+# ifdef __has_include
+# define XXH_HAS_INCLUDE(x) __has_include(x)
+# else
+# define XXH_HAS_INCLUDE(x) 0
+# endif
+#endif
+
+#if defined(__GNUC__) || defined(__clang__)
+# if defined(__ARM_FEATURE_SVE)
+# include <arm_sve.h>
+# endif
+# if defined(__ARM_NEON__) || defined(__ARM_NEON) \
+ || (defined(_M_ARM) && _M_ARM >= 7) \
+ || defined(_M_ARM64) || defined(_M_ARM64EC) \
+ || (defined(__wasm_simd128__) && XXH_HAS_INCLUDE(<arm_neon.h>)) /* WASM SIMD128 via SIMDe */
+# define inline __inline__ /* circumvent a clang bug */
+# include <arm_neon.h>
+# undef inline
+# elif defined(__AVX2__)
+# include <immintrin.h>
+# elif defined(__SSE2__)
+# include <emmintrin.h>
+# endif
+#endif
+
+#if defined(_MSC_VER)
+# include <intrin.h>
+#endif
+
+/*
+ * One goal of XXH3 is to make it fast on both 32-bit and 64-bit, while
+ * remaining a true 64-bit/128-bit hash function.
+ *
+ * This is done by prioritizing a subset of 64-bit operations that can be
+ * emulated without too many steps on the average 32-bit machine.
+ *
+ * For example, these two lines seem similar, and run equally fast on 64-bit:
+ *
+ * xxh_u64 x;
+ * x ^= (x >> 47); // good
+ * x ^= (x >> 13); // bad
+ *
+ * However, to a 32-bit machine, there is a major difference.
+ *
+ * x ^= (x >> 47) looks like this:
+ *
+ * x.lo ^= (x.hi >> (47 - 32));
+ *
+ * while x ^= (x >> 13) looks like this:
+ *
+ * // note: funnel shifts are not usually cheap.
+ * x.lo ^= (x.lo >> 13) | (x.hi << (32 - 13));
+ * x.hi ^= (x.hi >> 13);
+ *
+ * The first one is significantly faster than the second, simply because the
+ * shift is larger than 32. This means:
+ * - All the bits we need are in the upper 32 bits, so we can ignore the lower
+ * 32 bits in the shift.
+ * - The shift result will always fit in the lower 32 bits, and therefore,
+ * we can ignore the upper 32 bits in the xor.
+ *
+ * Thanks to this optimization, XXH3 only requires these features to be efficient:
+ *
+ * - Usable unaligned access
+ * - A 32-bit or 64-bit ALU
+ * - If 32-bit, a decent ADC instruction
+ * - A 32 or 64-bit multiply with a 64-bit result
+ * - For the 128-bit variant, a decent byteswap helps short inputs.
+ *
+ * The first two are already required by XXH32, and almost all 32-bit and 64-bit
+ * platforms which can run XXH32 can run XXH3 efficiently.
+ *
+ * Thumb-1, the classic 16-bit only subset of ARM's instruction set, is one
+ * notable exception.
+ *
+ * First of all, Thumb-1 lacks support for the UMULL instruction which
+ * performs the important long multiply. This means numerous __aeabi_lmul
+ * calls.
+ *
+ * Second of all, the 8 functional registers are just not enough.
+ * Setup for __aeabi_lmul, byteshift loads, pointers, and all arithmetic need
+ * Lo registers, and this shuffling results in thousands more MOVs than A32.
+ *
+ * A32 and T32 don't have this limitation. They can access all 14 registers,
+ * do a 32->64 multiply with UMULL, and the flexible operand allowing free
+ * shifts is helpful, too.
+ *
+ * Therefore, we do a quick sanity check.
+ *
+ * If compiling Thumb-1 for a target which supports ARM instructions, we will
+ * emit a warning, as it is not a "sane" platform to compile for.
+ *
+ * Usually, if this happens, it is because of an accident and you probably need
+ * to specify -march, as you likely meant to compile for a newer architecture.
+ *
+ * Credit: large sections of the vectorial and asm source code paths
+ * have been contributed by @easyaspi314
+ */
+#if defined(__thumb__) && !defined(__thumb2__) && defined(__ARM_ARCH_ISA_ARM)
+# warning "XXH3 is highly inefficient without ARM or Thumb-2."
+#endif
+
+/* ==========================================
+ * Vectorization detection
+ * ========================================== */
+
+#ifdef XXH_DOXYGEN
+/*!
+ * @ingroup tuning
+ * @brief Overrides the vectorization implementation chosen for XXH3.
+ *
+ * Can be defined to 0 to disable SIMD or any of the values mentioned in
+ * @ref XXH_VECTOR_TYPE.
+ *
+ * If this is not defined, it uses predefined macros to determine the best
+ * implementation.
+ */
+# define XXH_VECTOR XXH_SCALAR
+/*!
+ * @ingroup tuning
+ * @brief Possible values for @ref XXH_VECTOR.
+ *
+ * Note that these are actually implemented as macros.
+ *
+ * If this is not defined, it is detected automatically.
+ * internal macro XXH_X86DISPATCH overrides this.
+ */
+enum XXH_VECTOR_TYPE /* fake enum */ {
+ XXH_SCALAR = 0, /*!< Portable scalar version */
+ XXH_SSE2 = 1, /*!<
+ * SSE2 for Pentium 4, Opteron, all x86_64.
+ *
+ * @note SSE2 is also guaranteed on Windows 10, macOS, and
+ * Android x86.
+ */
+ XXH_AVX2 = 2, /*!< AVX2 for Haswell and Bulldozer */
+ XXH_AVX512 = 3, /*!< AVX512 for Skylake and Icelake */
+ XXH_NEON = 4, /*!<
+ * NEON for most ARMv7-A, all AArch64, and WASM SIMD128
+ * via the SIMDeverywhere polyfill provided with the
+ * Emscripten SDK.
+ */
+ XXH_VSX = 5, /*!< VSX and ZVector for POWER8/z13 (64-bit) */
+ XXH_SVE = 6, /*!< SVE for some ARMv8-A and ARMv9-A */
+};
+/*!
+ * @ingroup tuning
+ * @brief Selects the minimum alignment for XXH3's accumulators.
+ *
+ * When using SIMD, this should match the alignment required for said vector
+ * type, so, for example, 32 for AVX2.
+ *
+ * Default: Auto detected.
+ */
+# define XXH_ACC_ALIGN 8
+#endif
+
+/* Actual definition */
+#ifndef XXH_DOXYGEN
+# define XXH_SCALAR 0
+# define XXH_SSE2 1
+# define XXH_AVX2 2
+# define XXH_AVX512 3
+# define XXH_NEON 4
+# define XXH_VSX 5
+# define XXH_SVE 6
+#endif
+
+#ifndef XXH_VECTOR /* can be defined on command line */
+# if defined(__ARM_FEATURE_SVE)
+# define XXH_VECTOR XXH_SVE
+# elif ( \
+ defined(__ARM_NEON__) || defined(__ARM_NEON) /* gcc */ \
+ || defined(_M_ARM) || defined(_M_ARM64) || defined(_M_ARM64EC) /* msvc */ \
+ || (defined(__wasm_simd128__) && XXH_HAS_INCLUDE(<arm_neon.h>)) /* wasm simd128 via SIMDe */ \
+ ) && ( \
+ defined(_WIN32) || defined(__LITTLE_ENDIAN__) /* little endian only */ \
+ || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) \
+ )
+# define XXH_VECTOR XXH_NEON
+# elif defined(__AVX512F__)
+# define XXH_VECTOR XXH_AVX512
+# elif defined(__AVX2__)
+# define XXH_VECTOR XXH_AVX2
+# elif defined(__SSE2__) || defined(_M_AMD64) || defined(_M_X64) || (defined(_M_IX86_FP) && (_M_IX86_FP == 2))
+# define XXH_VECTOR XXH_SSE2
+# elif (defined(__PPC64__) && defined(__POWER8_VECTOR__)) \
+ || (defined(__s390x__) && defined(__VEC__)) \
+ && defined(__GNUC__) /* TODO: IBM XL */
+# define XXH_VECTOR XXH_VSX
+# else
+# define XXH_VECTOR XXH_SCALAR
+# endif
+#endif
+
+/* __ARM_FEATURE_SVE is only supported by GCC & Clang. */
+#if (XXH_VECTOR == XXH_SVE) && !defined(__ARM_FEATURE_SVE)
+# ifdef _MSC_VER
+# pragma warning(once : 4606)
+# else
+# warning "__ARM_FEATURE_SVE isn't supported. Use SCALAR instead."
+# endif
+# undef XXH_VECTOR
+# define XXH_VECTOR XXH_SCALAR
+#endif
+
+/*
+ * Controls the alignment of the accumulator,
+ * for compatibility with aligned vector loads, which are usually faster.
+ */
+#ifndef XXH_ACC_ALIGN
+# if defined(XXH_X86DISPATCH)
+# define XXH_ACC_ALIGN 64 /* for compatibility with avx512 */
+# elif XXH_VECTOR == XXH_SCALAR /* scalar */
+# define XXH_ACC_ALIGN 8
+# elif XXH_VECTOR == XXH_SSE2 /* sse2 */
+# define XXH_ACC_ALIGN 16
+# elif XXH_VECTOR == XXH_AVX2 /* avx2 */
+# define XXH_ACC_ALIGN 32
+# elif XXH_VECTOR == XXH_NEON /* neon */
+# define XXH_ACC_ALIGN 16
+# elif XXH_VECTOR == XXH_VSX /* vsx */
+# define XXH_ACC_ALIGN 16
+# elif XXH_VECTOR == XXH_AVX512 /* avx512 */
+# define XXH_ACC_ALIGN 64
+# elif XXH_VECTOR == XXH_SVE /* sve */
+# define XXH_ACC_ALIGN 64
+# endif
+#endif
+
+#if defined(XXH_X86DISPATCH) || XXH_VECTOR == XXH_SSE2 \
+ || XXH_VECTOR == XXH_AVX2 || XXH_VECTOR == XXH_AVX512
+# define XXH_SEC_ALIGN XXH_ACC_ALIGN
+#elif XXH_VECTOR == XXH_SVE
+# define XXH_SEC_ALIGN XXH_ACC_ALIGN
+#else
+# define XXH_SEC_ALIGN 8
+#endif
+
+#if defined(__GNUC__) || defined(__clang__)
+# define XXH_ALIASING __attribute__((may_alias))
+#else
+# define XXH_ALIASING /* nothing */
+#endif
+
+/*
+ * UGLY HACK:
+ * GCC usually generates the best code with -O3 for xxHash.
+ *
+ * However, when targeting AVX2, it is overzealous in its unrolling resulting
+ * in code roughly 3/4 the speed of Clang.
+ *
+ * There are other issues, such as GCC splitting _mm256_loadu_si256 into
+ * _mm_loadu_si128 + _mm256_inserti128_si256. This is an optimization which
+ * only applies to Sandy and Ivy Bridge... which don't even support AVX2.
+ *
+ * That is why when compiling the AVX2 version, it is recommended to use either
+ * -O2 -mavx2 -march=haswell
+ * or
+ * -O2 -mavx2 -mno-avx256-split-unaligned-load
+ * for decent performance, or to use Clang instead.
+ *
+ * Fortunately, we can control the first one with a pragma that forces GCC into
+ * -O2, but the other one we can't control without "failed to inline always
+ * inline function due to target mismatch" warnings.
+ */
+#if XXH_VECTOR == XXH_AVX2 /* AVX2 */ \
+ && defined(__GNUC__) && !defined(__clang__) /* GCC, not Clang */ \
+ && defined(__OPTIMIZE__) && XXH_SIZE_OPT <= 0 /* respect -O0 and -Os */
+# pragma GCC push_options
+# pragma GCC optimize("-O2")
+#endif
+
+#if XXH_VECTOR == XXH_NEON
+
+/*
+ * UGLY HACK: While AArch64 GCC on Linux does not seem to care, on macOS, GCC -O3
+ * optimizes out the entire hashLong loop because of the aliasing violation.
+ *
+ * However, GCC is also inefficient at load-store optimization with vld1q/vst1q,
+ * so the only option is to mark it as aliasing.
+ */
+typedef uint64x2_t xxh_aliasing_uint64x2_t XXH_ALIASING;
+
+/*!
+ * @internal
+ * @brief `vld1q_u64` but faster and alignment-safe.
+ *
+ * On AArch64, unaligned access is always safe, but on ARMv7-a, it is only
+ * *conditionally* safe (`vld1` has an alignment bit like `movdq[ua]` in x86).
+ *
+ * GCC for AArch64 sees `vld1q_u8` as an intrinsic instead of a load, so it
+ * prohibits load-store optimizations. Therefore, a direct dereference is used.
+ *
+ * Otherwise, `vld1q_u8` is used with `vreinterpretq_u8_u64` to do a safe
+ * unaligned load.
+ */
+#if defined(__aarch64__) && defined(__GNUC__) && !defined(__clang__)
+XXH_FORCE_INLINE uint64x2_t XXH_vld1q_u64(void const* ptr) /* silence -Wcast-align */
+{
+ return *(xxh_aliasing_uint64x2_t const *)ptr;
+}
+#else
+XXH_FORCE_INLINE uint64x2_t XXH_vld1q_u64(void const* ptr)
+{
+ return vreinterpretq_u64_u8(vld1q_u8((uint8_t const*)ptr));
+}
+#endif
+
+/*!
+ * @internal
+ * @brief `vmlal_u32` on low and high halves of a vector.
+ *
+ * This is a workaround for AArch64 GCC < 11 which implemented arm_neon.h with
+ * inline assembly and were therefore incapable of merging the `vget_{low, high}_u32`
+ * with `vmlal_u32`.
+ */
+#if defined(__aarch64__) && defined(__GNUC__) && !defined(__clang__) && __GNUC__ < 11
+XXH_FORCE_INLINE uint64x2_t
+XXH_vmlal_low_u32(uint64x2_t acc, uint32x4_t lhs, uint32x4_t rhs)
+{
+ /* Inline assembly is the only way */
+ __asm__("umlal %0.2d, %1.2s, %2.2s" : "+w" (acc) : "w" (lhs), "w" (rhs));
+ return acc;
+}
+XXH_FORCE_INLINE uint64x2_t
+XXH_vmlal_high_u32(uint64x2_t acc, uint32x4_t lhs, uint32x4_t rhs)
+{
+ /* This intrinsic works as expected */
+ return vmlal_high_u32(acc, lhs, rhs);
+}
+#else
+/* Portable intrinsic versions */
+XXH_FORCE_INLINE uint64x2_t
+XXH_vmlal_low_u32(uint64x2_t acc, uint32x4_t lhs, uint32x4_t rhs)
+{
+ return vmlal_u32(acc, vget_low_u32(lhs), vget_low_u32(rhs));
+}
+/*! @copydoc XXH_vmlal_low_u32
+ * Assume the compiler converts this to vmlal_high_u32 on aarch64 */
+XXH_FORCE_INLINE uint64x2_t
+XXH_vmlal_high_u32(uint64x2_t acc, uint32x4_t lhs, uint32x4_t rhs)
+{
+ return vmlal_u32(acc, vget_high_u32(lhs), vget_high_u32(rhs));
+}
+#endif
+
+/*!
+ * @ingroup tuning
+ * @brief Controls the NEON to scalar ratio for XXH3
+ *
+ * This can be set to 2, 4, 6, or 8.
+ *
+ * ARM Cortex CPUs are _very_ sensitive to how their pipelines are used.
+ *
+ * For example, the Cortex-A73 can dispatch 3 micro-ops per cycle, but only 2 of those
+ * can be NEON. If you are only using NEON instructions, you are only using 2/3 of the CPU
+ * bandwidth.
+ *
+ * This is even more noticeable on the more advanced cores like the Cortex-A76 which
+ * can dispatch 8 micro-ops per cycle, but still only 2 NEON micro-ops at once.
+ *
+ * Therefore, to make the most out of the pipeline, it is beneficial to run 6 NEON lanes
+ * and 2 scalar lanes, which is chosen by default.
+ *
+ * This does not apply to Apple processors or 32-bit processors, which run better with
+ * full NEON. These will default to 8. Additionally, size-optimized builds run 8 lanes.
+ *
+ * This change benefits CPUs with large micro-op buffers without negatively affecting
+ * most other CPUs:
+ *
+ * | Chipset | Dispatch type | NEON only | 6:2 hybrid | Diff. |
+ * |:----------------------|:--------------------|----------:|-----------:|------:|
+ * | Snapdragon 730 (A76) | 2 NEON/8 micro-ops | 8.8 GB/s | 10.1 GB/s | ~16% |
+ * | Snapdragon 835 (A73) | 2 NEON/3 micro-ops | 5.1 GB/s | 5.3 GB/s | ~5% |
+ * | Marvell PXA1928 (A53) | In-order dual-issue | 1.9 GB/s | 1.9 GB/s | 0% |
+ * | Apple M1 | 4 NEON/8 micro-ops | 37.3 GB/s | 36.1 GB/s | ~-3% |
+ *
+ * It also seems to fix some bad codegen on GCC, making it almost as fast as clang.
+ *
+ * When using WASM SIMD128, if this is 2 or 6, SIMDe will scalarize 2 of the lanes meaning
+ * it effectively becomes worse 4.
+ *
+ * @see XXH3_accumulate_512_neon()
+ */
+# ifndef XXH3_NEON_LANES
+# if (defined(__aarch64__) || defined(__arm64__) || defined(_M_ARM64) || defined(_M_ARM64EC)) \
+ && !defined(__APPLE__) && XXH_SIZE_OPT <= 0
+# define XXH3_NEON_LANES 6
+# else
+# define XXH3_NEON_LANES XXH_ACC_NB
+# endif
+# endif
+#endif /* XXH_VECTOR == XXH_NEON */
+
+/*
+ * VSX and Z Vector helpers.
+ *
+ * This is very messy, and any pull requests to clean this up are welcome.
+ *
+ * There are a lot of problems with supporting VSX and s390x, due to
+ * inconsistent intrinsics, spotty coverage, and multiple endiannesses.
+ */
+#if XXH_VECTOR == XXH_VSX
+/* Annoyingly, these headers _may_ define three macros: `bool`, `vector`,
+ * and `pixel`. This is a problem for obvious reasons.
+ *
+ * These keywords are unnecessary; the spec literally says they are
+ * equivalent to `__bool`, `__vector`, and `__pixel` and may be undef'd
+ * after including the header.
+ *
+ * We use pragma push_macro/pop_macro to keep the namespace clean. */
+# pragma push_macro("bool")
+# pragma push_macro("vector")
+# pragma push_macro("pixel")
+/* silence potential macro redefined warnings */
+# undef bool
+# undef vector
+# undef pixel
+
+# if defined(__s390x__)
+# include <s390intrin.h>
+# else
+# include <altivec.h>
+# endif
+
+/* Restore the original macro values, if applicable. */
+# pragma pop_macro("pixel")
+# pragma pop_macro("vector")
+# pragma pop_macro("bool")
+
+typedef __vector unsigned long long xxh_u64x2;
+typedef __vector unsigned char xxh_u8x16;
+typedef __vector unsigned xxh_u32x4;
+
+/*
+ * UGLY HACK: Similar to aarch64 macOS GCC, s390x GCC has the same aliasing issue.
+ */
+typedef xxh_u64x2 xxh_aliasing_u64x2 XXH_ALIASING;
+
+# ifndef XXH_VSX_BE
+# if defined(__BIG_ENDIAN__) \
+ || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)
+# define XXH_VSX_BE 1
+# elif defined(__VEC_ELEMENT_REG_ORDER__) && __VEC_ELEMENT_REG_ORDER__ == __ORDER_BIG_ENDIAN__
+# warning "-maltivec=be is not recommended. Please use native endianness."
+# define XXH_VSX_BE 1
+# else
+# define XXH_VSX_BE 0
+# endif
+# endif /* !defined(XXH_VSX_BE) */
+
+# if XXH_VSX_BE
+# if defined(__POWER9_VECTOR__) || (defined(__clang__) && defined(__s390x__))
+# define XXH_vec_revb vec_revb
+# else
+/*!
+ * A polyfill for POWER9's vec_revb().
+ */
+XXH_FORCE_INLINE xxh_u64x2 XXH_vec_revb(xxh_u64x2 val)
+{
+ xxh_u8x16 const vByteSwap = { 0x07, 0x06, 0x05, 0x04, 0x03, 0x02, 0x01, 0x00,
+ 0x0F, 0x0E, 0x0D, 0x0C, 0x0B, 0x0A, 0x09, 0x08 };
+ return vec_perm(val, val, vByteSwap);
+}
+# endif
+# endif /* XXH_VSX_BE */
+
+/*!
+ * Performs an unaligned vector load and byte swaps it on big endian.
+ */
+XXH_FORCE_INLINE xxh_u64x2 XXH_vec_loadu(const void *ptr)
+{
+ xxh_u64x2 ret;
+ XXH_memcpy(&ret, ptr, sizeof(xxh_u64x2));
+# if XXH_VSX_BE
+ ret = XXH_vec_revb(ret);
+# endif
+ return ret;
+}
+
+/*
+ * vec_mulo and vec_mule are very problematic intrinsics on PowerPC
+ *
+ * These intrinsics weren't added until GCC 8, despite existing for a while,
+ * and they are endian dependent. Also, their meaning swap depending on version.
+ * */
+# if defined(__s390x__)
+ /* s390x is always big endian, no issue on this platform */
+# define XXH_vec_mulo vec_mulo
+# define XXH_vec_mule vec_mule
+# elif defined(__clang__) && XXH_HAS_BUILTIN(__builtin_altivec_vmuleuw) && !defined(__ibmxl__)
+/* Clang has a better way to control this, we can just use the builtin which doesn't swap. */
+ /* The IBM XL Compiler (which defined __clang__) only implements the vec_* operations */
+# define XXH_vec_mulo __builtin_altivec_vmulouw
+# define XXH_vec_mule __builtin_altivec_vmuleuw
+# else
+/* gcc needs inline assembly */
+/* Adapted from https://github.com/google/highwayhash/blob/master/highwayhash/hh_vsx.h. */
+XXH_FORCE_INLINE xxh_u64x2 XXH_vec_mulo(xxh_u32x4 a, xxh_u32x4 b)
+{
+ xxh_u64x2 result;
+ __asm__("vmulouw %0, %1, %2" : "=v" (result) : "v" (a), "v" (b));
+ return result;
+}
+XXH_FORCE_INLINE xxh_u64x2 XXH_vec_mule(xxh_u32x4 a, xxh_u32x4 b)
+{
+ xxh_u64x2 result;
+ __asm__("vmuleuw %0, %1, %2" : "=v" (result) : "v" (a), "v" (b));
+ return result;
+}
+# endif /* XXH_vec_mulo, XXH_vec_mule */
+#endif /* XXH_VECTOR == XXH_VSX */
+
+#if XXH_VECTOR == XXH_SVE
+#define ACCRND(acc, offset) \
+do { \
+ svuint64_t input_vec = svld1_u64(mask, xinput + offset); \
+ svuint64_t secret_vec = svld1_u64(mask, xsecret + offset); \
+ svuint64_t mixed = sveor_u64_x(mask, secret_vec, input_vec); \
+ svuint64_t swapped = svtbl_u64(input_vec, kSwap); \
+ svuint64_t mixed_lo = svextw_u64_x(mask, mixed); \
+ svuint64_t mixed_hi = svlsr_n_u64_x(mask, mixed, 32); \
+ svuint64_t mul = svmad_u64_x(mask, mixed_lo, mixed_hi, swapped); \
+ acc = svadd_u64_x(mask, acc, mul); \
+} while (0)
+#endif /* XXH_VECTOR == XXH_SVE */
+
+/* prefetch
+ * can be disabled, by declaring XXH_NO_PREFETCH build macro */
+#if defined(XXH_NO_PREFETCH)
+# define XXH_PREFETCH(ptr) (void)(ptr) /* disabled */
+#else
+# if XXH_SIZE_OPT >= 1
+# define XXH_PREFETCH(ptr) (void)(ptr)
+# elif defined(_MSC_VER) && (defined(_M_X64) || defined(_M_IX86)) /* _mm_prefetch() not defined outside of x86/x64 */
+# include <mmintrin.h> /* https://msdn.microsoft.com/fr-fr/library/84szxsww(v=vs.90).aspx */
+# define XXH_PREFETCH(ptr) _mm_prefetch((const char*)(ptr), _MM_HINT_T0)
+# elif defined(__GNUC__) && ( (__GNUC__ >= 4) || ( (__GNUC__ == 3) && (__GNUC_MINOR__ >= 1) ) )
+# define XXH_PREFETCH(ptr) __builtin_prefetch((ptr), 0 /* rw==read */, 3 /* locality */)
+# else
+# define XXH_PREFETCH(ptr) (void)(ptr) /* disabled */
+# endif
+#endif /* XXH_NO_PREFETCH */
+
+
+/* ==========================================
+ * XXH3 default settings
+ * ========================================== */
+
+#define XXH_SECRET_DEFAULT_SIZE 192 /* minimum XXH3_SECRET_SIZE_MIN */
+
+#if (XXH_SECRET_DEFAULT_SIZE < XXH3_SECRET_SIZE_MIN)
+# error "default keyset is not large enough"
+#endif
+
+/*! Pseudorandom secret taken directly from FARSH. */
+XXH_ALIGN(64) static const xxh_u8 XXH3_kSecret[XXH_SECRET_DEFAULT_SIZE] = {
+ 0xb8, 0xfe, 0x6c, 0x39, 0x23, 0xa4, 0x4b, 0xbe, 0x7c, 0x01, 0x81, 0x2c, 0xf7, 0x21, 0xad, 0x1c,
+ 0xde, 0xd4, 0x6d, 0xe9, 0x83, 0x90, 0x97, 0xdb, 0x72, 0x40, 0xa4, 0xa4, 0xb7, 0xb3, 0x67, 0x1f,
+ 0xcb, 0x79, 0xe6, 0x4e, 0xcc, 0xc0, 0xe5, 0x78, 0x82, 0x5a, 0xd0, 0x7d, 0xcc, 0xff, 0x72, 0x21,
+ 0xb8, 0x08, 0x46, 0x74, 0xf7, 0x43, 0x24, 0x8e, 0xe0, 0x35, 0x90, 0xe6, 0x81, 0x3a, 0x26, 0x4c,
+ 0x3c, 0x28, 0x52, 0xbb, 0x91, 0xc3, 0x00, 0xcb, 0x88, 0xd0, 0x65, 0x8b, 0x1b, 0x53, 0x2e, 0xa3,
+ 0x71, 0x64, 0x48, 0x97, 0xa2, 0x0d, 0xf9, 0x4e, 0x38, 0x19, 0xef, 0x46, 0xa9, 0xde, 0xac, 0xd8,
+ 0xa8, 0xfa, 0x76, 0x3f, 0xe3, 0x9c, 0x34, 0x3f, 0xf9, 0xdc, 0xbb, 0xc7, 0xc7, 0x0b, 0x4f, 0x1d,
+ 0x8a, 0x51, 0xe0, 0x4b, 0xcd, 0xb4, 0x59, 0x31, 0xc8, 0x9f, 0x7e, 0xc9, 0xd9, 0x78, 0x73, 0x64,
+ 0xea, 0xc5, 0xac, 0x83, 0x34, 0xd3, 0xeb, 0xc3, 0xc5, 0x81, 0xa0, 0xff, 0xfa, 0x13, 0x63, 0xeb,
+ 0x17, 0x0d, 0xdd, 0x51, 0xb7, 0xf0, 0xda, 0x49, 0xd3, 0x16, 0x55, 0x26, 0x29, 0xd4, 0x68, 0x9e,
+ 0x2b, 0x16, 0xbe, 0x58, 0x7d, 0x47, 0xa1, 0xfc, 0x8f, 0xf8, 0xb8, 0xd1, 0x7a, 0xd0, 0x31, 0xce,
+ 0x45, 0xcb, 0x3a, 0x8f, 0x95, 0x16, 0x04, 0x28, 0xaf, 0xd7, 0xfb, 0xca, 0xbb, 0x4b, 0x40, 0x7e,
+};
+
+static const xxh_u64 PRIME_MX1 = 0x165667919E3779F9ULL; /*!< 0b0001011001010110011001111001000110011110001101110111100111111001 */
+static const xxh_u64 PRIME_MX2 = 0x9FB21C651E98DF25ULL; /*!< 0b1001111110110010000111000110010100011110100110001101111100100101 */
+
+#ifdef XXH_OLD_NAMES
+# define kSecret XXH3_kSecret
+#endif
+
+#ifdef XXH_DOXYGEN
+/*!
+ * @brief Calculates a 32-bit to 64-bit long multiply.
+ *
+ * Implemented as a macro.
+ *
+ * Wraps `__emulu` on MSVC x86 because it tends to call `__allmul` when it doesn't
+ * need to (but it shouldn't need to anyways, it is about 7 instructions to do
+ * a 64x64 multiply...). Since we know that this will _always_ emit `MULL`, we
+ * use that instead of the normal method.
+ *
+ * If you are compiling for platforms like Thumb-1 and don't have a better option,
+ * you may also want to write your own long multiply routine here.
+ *
+ * @param x, y Numbers to be multiplied
+ * @return 64-bit product of the low 32 bits of @p x and @p y.
+ */
+XXH_FORCE_INLINE xxh_u64
+XXH_mult32to64(xxh_u64 x, xxh_u64 y)
+{
+ return (x & 0xFFFFFFFF) * (y & 0xFFFFFFFF);
+}
+#elif defined(_MSC_VER) && defined(_M_IX86)
+# define XXH_mult32to64(x, y) __emulu((unsigned)(x), (unsigned)(y))
+#else
+/*
+ * Downcast + upcast is usually better than masking on older compilers like
+ * GCC 4.2 (especially 32-bit ones), all without affecting newer compilers.
+ *
+ * The other method, (x & 0xFFFFFFFF) * (y & 0xFFFFFFFF), will AND both operands
+ * and perform a full 64x64 multiply -- entirely redundant on 32-bit.
+ */
+# define XXH_mult32to64(x, y) ((xxh_u64)(xxh_u32)(x) * (xxh_u64)(xxh_u32)(y))
+#endif
+
+/*!
+ * @brief Calculates a 64->128-bit long multiply.
+ *
+ * Uses `__uint128_t` and `_umul128` if available, otherwise uses a scalar
+ * version.
+ *
+ * @param lhs , rhs The 64-bit integers to be multiplied
+ * @return The 128-bit result represented in an @ref XXH128_hash_t.
+ */
+static XXH128_hash_t
+XXH_mult64to128(xxh_u64 lhs, xxh_u64 rhs)
+{
+ /*
+ * GCC/Clang __uint128_t method.
+ *
+ * On most 64-bit targets, GCC and Clang define a __uint128_t type.
+ * This is usually the best way as it usually uses a native long 64-bit
+ * multiply, such as MULQ on x86_64 or MUL + UMULH on aarch64.
+ *
+ * Usually.
+ *
+ * Despite being a 32-bit platform, Clang (and emscripten) define this type
+ * despite not having the arithmetic for it. This results in a laggy
+ * compiler builtin call which calculates a full 128-bit multiply.
+ * In that case it is best to use the portable one.
+ * https://github.com/Cyan4973/xxHash/issues/211#issuecomment-515575677
+ */
+#if (defined(__GNUC__) || defined(__clang__)) && !defined(__wasm__) \
+ && defined(__SIZEOF_INT128__) \
+ || (defined(_INTEGRAL_MAX_BITS) && _INTEGRAL_MAX_BITS >= 128)
+
+ __uint128_t const product = (__uint128_t)lhs * (__uint128_t)rhs;
+ XXH128_hash_t r128;
+ r128.low64 = (xxh_u64)(product);
+ r128.high64 = (xxh_u64)(product >> 64);
+ return r128;
+
+ /*
+ * MSVC for x64's _umul128 method.
+ *
+ * xxh_u64 _umul128(xxh_u64 Multiplier, xxh_u64 Multiplicand, xxh_u64 *HighProduct);
+ *
+ * This compiles to single operand MUL on x64.
+ */
+#elif (defined(_M_X64) || defined(_M_IA64)) && !defined(_M_ARM64EC)
+
+#ifndef _MSC_VER
+# pragma intrinsic(_umul128)
+#endif
+ xxh_u64 product_high;
+ xxh_u64 const product_low = _umul128(lhs, rhs, &product_high);
+ XXH128_hash_t r128;
+ r128.low64 = product_low;
+ r128.high64 = product_high;
+ return r128;
+
+ /*
+ * MSVC for ARM64's __umulh method.
+ *
+ * This compiles to the same MUL + UMULH as GCC/Clang's __uint128_t method.
+ */
+#elif defined(_M_ARM64) || defined(_M_ARM64EC)
+
+#ifndef _MSC_VER
+# pragma intrinsic(__umulh)
+#endif
+ XXH128_hash_t r128;
+ r128.low64 = lhs * rhs;
+ r128.high64 = __umulh(lhs, rhs);
+ return r128;
+
+#else
+ /*
+ * Portable scalar method. Optimized for 32-bit and 64-bit ALUs.
+ *
+ * This is a fast and simple grade school multiply, which is shown below
+ * with base 10 arithmetic instead of base 0x100000000.
+ *
+ * 9 3 // D2 lhs = 93
+ * x 7 5 // D2 rhs = 75
+ * ----------
+ * 1 5 // D2 lo_lo = (93 % 10) * (75 % 10) = 15
+ * 4 5 | // D2 hi_lo = (93 / 10) * (75 % 10) = 45
+ * 2 1 | // D2 lo_hi = (93 % 10) * (75 / 10) = 21
+ * + 6 3 | | // D2 hi_hi = (93 / 10) * (75 / 10) = 63
+ * ---------
+ * 2 7 | // D2 cross = (15 / 10) + (45 % 10) + 21 = 27
+ * + 6 7 | | // D2 upper = (27 / 10) + (45 / 10) + 63 = 67
+ * ---------
+ * 6 9 7 5 // D4 res = (27 * 10) + (15 % 10) + (67 * 100) = 6975
+ *
+ * The reasons for adding the products like this are:
+ * 1. It avoids manual carry tracking. Just like how
+ * (9 * 9) + 9 + 9 = 99, the same applies with this for UINT64_MAX.
+ * This avoids a lot of complexity.
+ *
+ * 2. It hints for, and on Clang, compiles to, the powerful UMAAL
+ * instruction available in ARM's Digital Signal Processing extension
+ * in 32-bit ARMv6 and later, which is shown below:
+ *
+ * void UMAAL(xxh_u32 *RdLo, xxh_u32 *RdHi, xxh_u32 Rn, xxh_u32 Rm)
+ * {
+ * xxh_u64 product = (xxh_u64)*RdLo * (xxh_u64)*RdHi + Rn + Rm;
+ * *RdLo = (xxh_u32)(product & 0xFFFFFFFF);
+ * *RdHi = (xxh_u32)(product >> 32);
+ * }
+ *
+ * This instruction was designed for efficient long multiplication, and
+ * allows this to be calculated in only 4 instructions at speeds
+ * comparable to some 64-bit ALUs.
+ *
+ * 3. It isn't terrible on other platforms. Usually this will be a couple
+ * of 32-bit ADD/ADCs.
+ */
+
+ /* First calculate all of the cross products. */
+ xxh_u64 const lo_lo = XXH_mult32to64(lhs & 0xFFFFFFFF, rhs & 0xFFFFFFFF);
+ xxh_u64 const hi_lo = XXH_mult32to64(lhs >> 32, rhs & 0xFFFFFFFF);
+ xxh_u64 const lo_hi = XXH_mult32to64(lhs & 0xFFFFFFFF, rhs >> 32);
+ xxh_u64 const hi_hi = XXH_mult32to64(lhs >> 32, rhs >> 32);
+
+ /* Now add the products together. These will never overflow. */
+ xxh_u64 const cross = (lo_lo >> 32) + (hi_lo & 0xFFFFFFFF) + lo_hi;
+ xxh_u64 const upper = (hi_lo >> 32) + (cross >> 32) + hi_hi;
+ xxh_u64 const lower = (cross << 32) | (lo_lo & 0xFFFFFFFF);
+
+ XXH128_hash_t r128;
+ r128.low64 = lower;
+ r128.high64 = upper;
+ return r128;
+#endif
+}
+
+/*!
+ * @brief Calculates a 64-bit to 128-bit multiply, then XOR folds it.
+ *
+ * The reason for the separate function is to prevent passing too many structs
+ * around by value. This will hopefully inline the multiply, but we don't force it.
+ *
+ * @param lhs , rhs The 64-bit integers to multiply
+ * @return The low 64 bits of the product XOR'd by the high 64 bits.
+ * @see XXH_mult64to128()
+ */
+static xxh_u64
+XXH3_mul128_fold64(xxh_u64 lhs, xxh_u64 rhs)
+{
+ XXH128_hash_t product = XXH_mult64to128(lhs, rhs);
+ return product.low64 ^ product.high64;
+}
+
+/*! Seems to produce slightly better code on GCC for some reason. */
+XXH_FORCE_INLINE XXH_CONSTF xxh_u64 XXH_xorshift64(xxh_u64 v64, int shift)
+{
+ XXH_ASSERT(0 <= shift && shift < 64);
+ return v64 ^ (v64 >> shift);
+}
+
+/*
+ * This is a fast avalanche stage,
+ * suitable when input bits are already partially mixed
+ */
+static XXH64_hash_t XXH3_avalanche(xxh_u64 h64)
+{
+ h64 = XXH_xorshift64(h64, 37);
+ h64 *= PRIME_MX1;
+ h64 = XXH_xorshift64(h64, 32);
+ return h64;
+}
+
+/*
+ * This is a stronger avalanche,
+ * inspired by Pelle Evensen's rrmxmx
+ * preferable when input has not been previously mixed
+ */
+static XXH64_hash_t XXH3_rrmxmx(xxh_u64 h64, xxh_u64 len)
+{
+ /* this mix is inspired by Pelle Evensen's rrmxmx */
+ h64 ^= XXH_rotl64(h64, 49) ^ XXH_rotl64(h64, 24);
+ h64 *= PRIME_MX2;
+ h64 ^= (h64 >> 35) + len ;
+ h64 *= PRIME_MX2;
+ return XXH_xorshift64(h64, 28);
+}
+
+
+/* ==========================================
+ * Short keys
+ * ==========================================
+ * One of the shortcomings of XXH32 and XXH64 was that their performance was
+ * sub-optimal on short lengths. It used an iterative algorithm which strongly
+ * favored lengths that were a multiple of 4 or 8.
+ *
+ * Instead of iterating over individual inputs, we use a set of single shot
+ * functions which piece together a range of lengths and operate in constant time.
+ *
+ * Additionally, the number of multiplies has been significantly reduced. This
+ * reduces latency, especially when emulating 64-bit multiplies on 32-bit.
+ *
+ * Depending on the platform, this may or may not be faster than XXH32, but it
+ * is almost guaranteed to be faster than XXH64.
+ */
+
+/*
+ * At very short lengths, there isn't enough input to fully hide secrets, or use
+ * the entire secret.
+ *
+ * There is also only a limited amount of mixing we can do before significantly
+ * impacting performance.
+ *
+ * Therefore, we use different sections of the secret and always mix two secret
+ * samples with an XOR. This should have no effect on performance on the
+ * seedless or withSeed variants because everything _should_ be constant folded
+ * by modern compilers.
+ *
+ * The XOR mixing hides individual parts of the secret and increases entropy.
+ *
+ * This adds an extra layer of strength for custom secrets.
+ */
+XXH_FORCE_INLINE XXH_PUREF XXH64_hash_t
+XXH3_len_1to3_64b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
+{
+ XXH_ASSERT(input != NULL);
+ XXH_ASSERT(1 <= len && len <= 3);
+ XXH_ASSERT(secret != NULL);
+ /*
+ * len = 1: combined = { input[0], 0x01, input[0], input[0] }
+ * len = 2: combined = { input[1], 0x02, input[0], input[1] }
+ * len = 3: combined = { input[2], 0x03, input[0], input[1] }
+ */
+ { xxh_u8 const c1 = input[0];
+ xxh_u8 const c2 = input[len >> 1];
+ xxh_u8 const c3 = input[len - 1];
+ xxh_u32 const combined = ((xxh_u32)c1 << 16) | ((xxh_u32)c2 << 24)
+ | ((xxh_u32)c3 << 0) | ((xxh_u32)len << 8);
+ xxh_u64 const bitflip = (XXH_readLE32(secret) ^ XXH_readLE32(secret+4)) + seed;
+ xxh_u64 const keyed = (xxh_u64)combined ^ bitflip;
+ return XXH64_avalanche(keyed);
+ }
+}
+
+XXH_FORCE_INLINE XXH_PUREF XXH64_hash_t
+XXH3_len_4to8_64b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
+{
+ XXH_ASSERT(input != NULL);
+ XXH_ASSERT(secret != NULL);
+ XXH_ASSERT(4 <= len && len <= 8);
+ seed ^= (xxh_u64)XXH_swap32((xxh_u32)seed) << 32;
+ { xxh_u32 const input1 = XXH_readLE32(input);
+ xxh_u32 const input2 = XXH_readLE32(input + len - 4);
+ xxh_u64 const bitflip = (XXH_readLE64(secret+8) ^ XXH_readLE64(secret+16)) - seed;
+ xxh_u64 const input64 = input2 + (((xxh_u64)input1) << 32);
+ xxh_u64 const keyed = input64 ^ bitflip;
+ return XXH3_rrmxmx(keyed, len);
+ }
+}
+
+XXH_FORCE_INLINE XXH_PUREF XXH64_hash_t
+XXH3_len_9to16_64b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
+{
+ XXH_ASSERT(input != NULL);
+ XXH_ASSERT(secret != NULL);
+ XXH_ASSERT(9 <= len && len <= 16);
+ { xxh_u64 const bitflip1 = (XXH_readLE64(secret+24) ^ XXH_readLE64(secret+32)) + seed;
+ xxh_u64 const bitflip2 = (XXH_readLE64(secret+40) ^ XXH_readLE64(secret+48)) - seed;
+ xxh_u64 const input_lo = XXH_readLE64(input) ^ bitflip1;
+ xxh_u64 const input_hi = XXH_readLE64(input + len - 8) ^ bitflip2;
+ xxh_u64 const acc = len
+ + XXH_swap64(input_lo) + input_hi
+ + XXH3_mul128_fold64(input_lo, input_hi);
+ return XXH3_avalanche(acc);
+ }
+}
+
+XXH_FORCE_INLINE XXH_PUREF XXH64_hash_t
+XXH3_len_0to16_64b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
+{
+ XXH_ASSERT(len <= 16);
+ { if (XXH_likely(len > 8)) return XXH3_len_9to16_64b(input, len, secret, seed);
+ if (XXH_likely(len >= 4)) return XXH3_len_4to8_64b(input, len, secret, seed);
+ if (len) return XXH3_len_1to3_64b(input, len, secret, seed);
+ return XXH64_avalanche(seed ^ (XXH_readLE64(secret+56) ^ XXH_readLE64(secret+64)));
+ }
+}
+
+/*
+ * DISCLAIMER: There are known *seed-dependent* multicollisions here due to
+ * multiplication by zero, affecting hashes of lengths 17 to 240.
+ *
+ * However, they are very unlikely.
+ *
+ * Keep this in mind when using the unseeded XXH3_64bits() variant: As with all
+ * unseeded non-cryptographic hashes, it does not attempt to defend itself
+ * against specially crafted inputs, only random inputs.
+ *
+ * Compared to classic UMAC where a 1 in 2^31 chance of 4 consecutive bytes
+ * cancelling out the secret is taken an arbitrary number of times (addressed
+ * in XXH3_accumulate_512), this collision is very unlikely with random inputs
+ * and/or proper seeding:
+ *
+ * This only has a 1 in 2^63 chance of 8 consecutive bytes cancelling out, in a
+ * function that is only called up to 16 times per hash with up to 240 bytes of
+ * input.
+ *
+ * This is not too bad for a non-cryptographic hash function, especially with
+ * only 64 bit outputs.
+ *
+ * The 128-bit variant (which trades some speed for strength) is NOT affected
+ * by this, although it is always a good idea to use a proper seed if you care
+ * about strength.
+ */
+XXH_FORCE_INLINE xxh_u64 XXH3_mix16B(const xxh_u8* XXH_RESTRICT input,
+ const xxh_u8* XXH_RESTRICT secret, xxh_u64 seed64)
+{
+#if defined(__GNUC__) && !defined(__clang__) /* GCC, not Clang */ \
+ && defined(__i386__) && defined(__SSE2__) /* x86 + SSE2 */ \
+ && !defined(XXH_ENABLE_AUTOVECTORIZE) /* Define to disable like XXH32 hack */
+ /*
+ * UGLY HACK:
+ * GCC for x86 tends to autovectorize the 128-bit multiply, resulting in
+ * slower code.
+ *
+ * By forcing seed64 into a register, we disrupt the cost model and
+ * cause it to scalarize. See `XXH32_round()`
+ *
+ * FIXME: Clang's output is still _much_ faster -- On an AMD Ryzen 3600,
+ * XXH3_64bits @ len=240 runs at 4.6 GB/s with Clang 9, but 3.3 GB/s on
+ * GCC 9.2, despite both emitting scalar code.
+ *
+ * GCC generates much better scalar code than Clang for the rest of XXH3,
+ * which is why finding a more optimal codepath is an interest.
+ */
+ XXH_COMPILER_GUARD(seed64);
+#endif
+ { xxh_u64 const input_lo = XXH_readLE64(input);
+ xxh_u64 const input_hi = XXH_readLE64(input+8);
+ return XXH3_mul128_fold64(
+ input_lo ^ (XXH_readLE64(secret) + seed64),
+ input_hi ^ (XXH_readLE64(secret+8) - seed64)
+ );
+ }
+}
+
+/* For mid range keys, XXH3 uses a Mum-hash variant. */
+XXH_FORCE_INLINE XXH_PUREF XXH64_hash_t
+XXH3_len_17to128_64b(const xxh_u8* XXH_RESTRICT input, size_t len,
+ const xxh_u8* XXH_RESTRICT secret, size_t secretSize,
+ XXH64_hash_t seed)
+{
+ XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); (void)secretSize;
+ XXH_ASSERT(16 < len && len <= 128);
+
+ { xxh_u64 acc = len * XXH_PRIME64_1;
+#if XXH_SIZE_OPT >= 1
+ /* Smaller and cleaner, but slightly slower. */
+ unsigned int i = (unsigned int)(len - 1) / 32;
+ do {
+ acc += XXH3_mix16B(input+16 * i, secret+32*i, seed);
+ acc += XXH3_mix16B(input+len-16*(i+1), secret+32*i+16, seed);
+ } while (i-- != 0);
+#else
+ if (len > 32) {
+ if (len > 64) {
+ if (len > 96) {
+ acc += XXH3_mix16B(input+48, secret+96, seed);
+ acc += XXH3_mix16B(input+len-64, secret+112, seed);
+ }
+ acc += XXH3_mix16B(input+32, secret+64, seed);
+ acc += XXH3_mix16B(input+len-48, secret+80, seed);
+ }
+ acc += XXH3_mix16B(input+16, secret+32, seed);
+ acc += XXH3_mix16B(input+len-32, secret+48, seed);
+ }
+ acc += XXH3_mix16B(input+0, secret+0, seed);
+ acc += XXH3_mix16B(input+len-16, secret+16, seed);
+#endif
+ return XXH3_avalanche(acc);
+ }
+}
+
+#define XXH3_MIDSIZE_MAX 240
+
+XXH_NO_INLINE XXH_PUREF XXH64_hash_t
+XXH3_len_129to240_64b(const xxh_u8* XXH_RESTRICT input, size_t len,
+ const xxh_u8* XXH_RESTRICT secret, size_t secretSize,
+ XXH64_hash_t seed)
+{
+ XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); (void)secretSize;
+ XXH_ASSERT(128 < len && len <= XXH3_MIDSIZE_MAX);
+
+ #define XXH3_MIDSIZE_STARTOFFSET 3
+ #define XXH3_MIDSIZE_LASTOFFSET 17
+
+ { xxh_u64 acc = len * XXH_PRIME64_1;
+ xxh_u64 acc_end;
+ unsigned int const nbRounds = (unsigned int)len / 16;
+ unsigned int i;
+ XXH_ASSERT(128 < len && len <= XXH3_MIDSIZE_MAX);
+ for (i=0; i<8; i++) {
+ acc += XXH3_mix16B(input+(16*i), secret+(16*i), seed);
+ }
+ /* last bytes */
+ acc_end = XXH3_mix16B(input + len - 16, secret + XXH3_SECRET_SIZE_MIN - XXH3_MIDSIZE_LASTOFFSET, seed);
+ XXH_ASSERT(nbRounds >= 8);
+ acc = XXH3_avalanche(acc);
+#if defined(__clang__) /* Clang */ \
+ && (defined(__ARM_NEON) || defined(__ARM_NEON__)) /* NEON */ \
+ && !defined(XXH_ENABLE_AUTOVECTORIZE) /* Define to disable */
+ /*
+ * UGLY HACK:
+ * Clang for ARMv7-A tries to vectorize this loop, similar to GCC x86.
+ * In everywhere else, it uses scalar code.
+ *
+ * For 64->128-bit multiplies, even if the NEON was 100% optimal, it
+ * would still be slower than UMAAL (see XXH_mult64to128).
+ *
+ * Unfortunately, Clang doesn't handle the long multiplies properly and
+ * converts them to the nonexistent "vmulq_u64" intrinsic, which is then
+ * scalarized into an ugly mess of VMOV.32 instructions.
+ *
+ * This mess is difficult to avoid without turning autovectorization
+ * off completely, but they are usually relatively minor and/or not
+ * worth it to fix.
+ *
+ * This loop is the easiest to fix, as unlike XXH32, this pragma
+ * _actually works_ because it is a loop vectorization instead of an
+ * SLP vectorization.
+ */
+ #pragma clang loop vectorize(disable)
+#endif
+ for (i=8 ; i < nbRounds; i++) {
+ /*
+ * Prevents clang for unrolling the acc loop and interleaving with this one.
+ */
+ XXH_COMPILER_GUARD(acc);
+ acc_end += XXH3_mix16B(input+(16*i), secret+(16*(i-8)) + XXH3_MIDSIZE_STARTOFFSET, seed);
+ }
+ return XXH3_avalanche(acc + acc_end);
+ }
+}
+
+
+/* ======= Long Keys ======= */
+
+#define XXH_STRIPE_LEN 64
+#define XXH_SECRET_CONSUME_RATE 8 /* nb of secret bytes consumed at each accumulation */
+#define XXH_ACC_NB (XXH_STRIPE_LEN / sizeof(xxh_u64))
+
+#ifdef XXH_OLD_NAMES
+# define STRIPE_LEN XXH_STRIPE_LEN
+# define ACC_NB XXH_ACC_NB
+#endif
+
+#ifndef XXH_PREFETCH_DIST
+# ifdef __clang__
+# define XXH_PREFETCH_DIST 320
+# else
+# if (XXH_VECTOR == XXH_AVX512)
+# define XXH_PREFETCH_DIST 512
+# else
+# define XXH_PREFETCH_DIST 384
+# endif
+# endif /* __clang__ */
+#endif /* XXH_PREFETCH_DIST */
+
+/*
+ * These macros are to generate an XXH3_accumulate() function.
+ * The two arguments select the name suffix and target attribute.
+ *
+ * The name of this symbol is XXH3_accumulate_<name>() and it calls
+ * XXH3_accumulate_512_<name>().
+ *
+ * It may be useful to hand implement this function if the compiler fails to
+ * optimize the inline function.
+ */
+#define XXH3_ACCUMULATE_TEMPLATE(name) \
+void \
+XXH3_accumulate_##name(xxh_u64* XXH_RESTRICT acc, \
+ const xxh_u8* XXH_RESTRICT input, \
+ const xxh_u8* XXH_RESTRICT secret, \
+ size_t nbStripes) \
+{ \
+ size_t n; \
+ for (n = 0; n < nbStripes; n++ ) { \
+ const xxh_u8* const in = input + n*XXH_STRIPE_LEN; \
+ XXH_PREFETCH(in + XXH_PREFETCH_DIST); \
+ XXH3_accumulate_512_##name( \
+ acc, \
+ in, \
+ secret + n*XXH_SECRET_CONSUME_RATE); \
+ } \
+}
+
+
+XXH_FORCE_INLINE void XXH_writeLE64(void* dst, xxh_u64 v64)
+{
+ if (!XXH_CPU_LITTLE_ENDIAN) v64 = XXH_swap64(v64);
+ XXH_memcpy(dst, &v64, sizeof(v64));
+}
+
+/* Several intrinsic functions below are supposed to accept __int64 as argument,
+ * as documented in https://software.intel.com/sites/landingpage/IntrinsicsGuide/ .
+ * However, several environments do not define __int64 type,
+ * requiring a workaround.
+ */
+#if !defined (__VMS) \
+ && (defined (__cplusplus) \
+ || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
+ typedef int64_t xxh_i64;
+#else
+ /* the following type must have a width of 64-bit */
+ typedef long long xxh_i64;
+#endif
+
+
+/*
+ * XXH3_accumulate_512 is the tightest loop for long inputs, and it is the most optimized.
+ *
+ * It is a hardened version of UMAC, based off of FARSH's implementation.
+ *
+ * This was chosen because it adapts quite well to 32-bit, 64-bit, and SIMD
+ * implementations, and it is ridiculously fast.
+ *
+ * We harden it by mixing the original input to the accumulators as well as the product.
+ *
+ * This means that in the (relatively likely) case of a multiply by zero, the
+ * original input is preserved.
+ *
+ * On 128-bit inputs, we swap 64-bit pairs when we add the input to improve
+ * cross-pollination, as otherwise the upper and lower halves would be
+ * essentially independent.
+ *
+ * This doesn't matter on 64-bit hashes since they all get merged together in
+ * the end, so we skip the extra step.
+ *
+ * Both XXH3_64bits and XXH3_128bits use this subroutine.
+ */
+
+#if (XXH_VECTOR == XXH_AVX512) \
+ || (defined(XXH_DISPATCH_AVX512) && XXH_DISPATCH_AVX512 != 0)
+
+#ifndef XXH_TARGET_AVX512
+# define XXH_TARGET_AVX512 /* disable attribute target */
+#endif
+
+XXH_FORCE_INLINE XXH_TARGET_AVX512 void
+XXH3_accumulate_512_avx512(void* XXH_RESTRICT acc,
+ const void* XXH_RESTRICT input,
+ const void* XXH_RESTRICT secret)
+{
+ __m512i* const xacc = (__m512i *) acc;
+ XXH_ASSERT((((size_t)acc) & 63) == 0);
+ XXH_STATIC_ASSERT(XXH_STRIPE_LEN == sizeof(__m512i));
+
+ {
+ /* data_vec = input[0]; */
+ __m512i const data_vec = _mm512_loadu_si512 (input);
+ /* key_vec = secret[0]; */
+ __m512i const key_vec = _mm512_loadu_si512 (secret);
+ /* data_key = data_vec ^ key_vec; */
+ __m512i const data_key = _mm512_xor_si512 (data_vec, key_vec);
+ /* data_key_lo = data_key >> 32; */
+ __m512i const data_key_lo = _mm512_srli_epi64 (data_key, 32);
+ /* product = (data_key & 0xffffffff) * (data_key_lo & 0xffffffff); */
+ __m512i const product = _mm512_mul_epu32 (data_key, data_key_lo);
+ /* xacc[0] += swap(data_vec); */
+ __m512i const data_swap = _mm512_shuffle_epi32(data_vec, (_MM_PERM_ENUM)_MM_SHUFFLE(1, 0, 3, 2));
+ __m512i const sum = _mm512_add_epi64(*xacc, data_swap);
+ /* xacc[0] += product; */
+ *xacc = _mm512_add_epi64(product, sum);
+ }
+}
+XXH_FORCE_INLINE XXH_TARGET_AVX512 XXH3_ACCUMULATE_TEMPLATE(avx512)
+
+/*
+ * XXH3_scrambleAcc: Scrambles the accumulators to improve mixing.
+ *
+ * Multiplication isn't perfect, as explained by Google in HighwayHash:
+ *
+ * // Multiplication mixes/scrambles bytes 0-7 of the 64-bit result to
+ * // varying degrees. In descending order of goodness, bytes
+ * // 3 4 2 5 1 6 0 7 have quality 228 224 164 160 100 96 36 32.
+ * // As expected, the upper and lower bytes are much worse.
+ *
+ * Source: https://github.com/google/highwayhash/blob/0aaf66b/highwayhash/hh_avx2.h#L291
+ *
+ * Since our algorithm uses a pseudorandom secret to add some variance into the
+ * mix, we don't need to (or want to) mix as often or as much as HighwayHash does.
+ *
+ * This isn't as tight as XXH3_accumulate, but still written in SIMD to avoid
+ * extraction.
+ *
+ * Both XXH3_64bits and XXH3_128bits use this subroutine.
+ */
+
+XXH_FORCE_INLINE XXH_TARGET_AVX512 void
+XXH3_scrambleAcc_avx512(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret)
+{
+ XXH_ASSERT((((size_t)acc) & 63) == 0);
+ XXH_STATIC_ASSERT(XXH_STRIPE_LEN == sizeof(__m512i));
+ { __m512i* const xacc = (__m512i*) acc;
+ const __m512i prime32 = _mm512_set1_epi32((int)XXH_PRIME32_1);
+
+ /* xacc[0] ^= (xacc[0] >> 47) */
+ __m512i const acc_vec = *xacc;
+ __m512i const shifted = _mm512_srli_epi64 (acc_vec, 47);
+ /* xacc[0] ^= secret; */
+ __m512i const key_vec = _mm512_loadu_si512 (secret);
+ __m512i const data_key = _mm512_ternarylogic_epi32(key_vec, acc_vec, shifted, 0x96 /* key_vec ^ acc_vec ^ shifted */);
+
+ /* xacc[0] *= XXH_PRIME32_1; */
+ __m512i const data_key_hi = _mm512_srli_epi64 (data_key, 32);
+ __m512i const prod_lo = _mm512_mul_epu32 (data_key, prime32);
+ __m512i const prod_hi = _mm512_mul_epu32 (data_key_hi, prime32);
+ *xacc = _mm512_add_epi64(prod_lo, _mm512_slli_epi64(prod_hi, 32));
+ }
+}
+
+XXH_FORCE_INLINE XXH_TARGET_AVX512 void
+XXH3_initCustomSecret_avx512(void* XXH_RESTRICT customSecret, xxh_u64 seed64)
+{
+ XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE & 63) == 0);
+ XXH_STATIC_ASSERT(XXH_SEC_ALIGN == 64);
+ XXH_ASSERT(((size_t)customSecret & 63) == 0);
+ (void)(&XXH_writeLE64);
+ { int const nbRounds = XXH_SECRET_DEFAULT_SIZE / sizeof(__m512i);
+ __m512i const seed_pos = _mm512_set1_epi64((xxh_i64)seed64);
+ __m512i const seed = _mm512_mask_sub_epi64(seed_pos, 0xAA, _mm512_set1_epi8(0), seed_pos);
+
+ const __m512i* const src = (const __m512i*) ((const void*) XXH3_kSecret);
+ __m512i* const dest = ( __m512i*) customSecret;
+ int i;
+ XXH_ASSERT(((size_t)src & 63) == 0); /* control alignment */
+ XXH_ASSERT(((size_t)dest & 63) == 0);
+ for (i=0; i < nbRounds; ++i) {
+ dest[i] = _mm512_add_epi64(_mm512_load_si512(src + i), seed);
+ } }
+}
+
+#endif
+
+#if (XXH_VECTOR == XXH_AVX2) \
+ || (defined(XXH_DISPATCH_AVX2) && XXH_DISPATCH_AVX2 != 0)
+
+#ifndef XXH_TARGET_AVX2
+# define XXH_TARGET_AVX2 /* disable attribute target */
+#endif
+
+XXH_FORCE_INLINE XXH_TARGET_AVX2 void
+XXH3_accumulate_512_avx2( void* XXH_RESTRICT acc,
+ const void* XXH_RESTRICT input,
+ const void* XXH_RESTRICT secret)
+{
+ XXH_ASSERT((((size_t)acc) & 31) == 0);
+ { __m256i* const xacc = (__m256i *) acc;
+ /* Unaligned. This is mainly for pointer arithmetic, and because
+ * _mm256_loadu_si256 requires a const __m256i * pointer for some reason. */
+ const __m256i* const xinput = (const __m256i *) input;
+ /* Unaligned. This is mainly for pointer arithmetic, and because
+ * _mm256_loadu_si256 requires a const __m256i * pointer for some reason. */
+ const __m256i* const xsecret = (const __m256i *) secret;
+
+ size_t i;
+ for (i=0; i < XXH_STRIPE_LEN/sizeof(__m256i); i++) {
+ /* data_vec = xinput[i]; */
+ __m256i const data_vec = _mm256_loadu_si256 (xinput+i);
+ /* key_vec = xsecret[i]; */
+ __m256i const key_vec = _mm256_loadu_si256 (xsecret+i);
+ /* data_key = data_vec ^ key_vec; */
+ __m256i const data_key = _mm256_xor_si256 (data_vec, key_vec);
+ /* data_key_lo = data_key >> 32; */
+ __m256i const data_key_lo = _mm256_srli_epi64 (data_key, 32);
+ /* product = (data_key & 0xffffffff) * (data_key_lo & 0xffffffff); */
+ __m256i const product = _mm256_mul_epu32 (data_key, data_key_lo);
+ /* xacc[i] += swap(data_vec); */
+ __m256i const data_swap = _mm256_shuffle_epi32(data_vec, _MM_SHUFFLE(1, 0, 3, 2));
+ __m256i const sum = _mm256_add_epi64(xacc[i], data_swap);
+ /* xacc[i] += product; */
+ xacc[i] = _mm256_add_epi64(product, sum);
+ } }
+}
+XXH_FORCE_INLINE XXH_TARGET_AVX2 XXH3_ACCUMULATE_TEMPLATE(avx2)
+
+XXH_FORCE_INLINE XXH_TARGET_AVX2 void
+XXH3_scrambleAcc_avx2(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret)
+{
+ XXH_ASSERT((((size_t)acc) & 31) == 0);
+ { __m256i* const xacc = (__m256i*) acc;
+ /* Unaligned. This is mainly for pointer arithmetic, and because
+ * _mm256_loadu_si256 requires a const __m256i * pointer for some reason. */
+ const __m256i* const xsecret = (const __m256i *) secret;
+ const __m256i prime32 = _mm256_set1_epi32((int)XXH_PRIME32_1);
+
+ size_t i;
+ for (i=0; i < XXH_STRIPE_LEN/sizeof(__m256i); i++) {
+ /* xacc[i] ^= (xacc[i] >> 47) */
+ __m256i const acc_vec = xacc[i];
+ __m256i const shifted = _mm256_srli_epi64 (acc_vec, 47);
+ __m256i const data_vec = _mm256_xor_si256 (acc_vec, shifted);
+ /* xacc[i] ^= xsecret; */
+ __m256i const key_vec = _mm256_loadu_si256 (xsecret+i);
+ __m256i const data_key = _mm256_xor_si256 (data_vec, key_vec);
+
+ /* xacc[i] *= XXH_PRIME32_1; */
+ __m256i const data_key_hi = _mm256_srli_epi64 (data_key, 32);
+ __m256i const prod_lo = _mm256_mul_epu32 (data_key, prime32);
+ __m256i const prod_hi = _mm256_mul_epu32 (data_key_hi, prime32);
+ xacc[i] = _mm256_add_epi64(prod_lo, _mm256_slli_epi64(prod_hi, 32));
+ }
+ }
+}
+
+XXH_FORCE_INLINE XXH_TARGET_AVX2 void XXH3_initCustomSecret_avx2(void* XXH_RESTRICT customSecret, xxh_u64 seed64)
+{
+ XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE & 31) == 0);
+ XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE / sizeof(__m256i)) == 6);
+ XXH_STATIC_ASSERT(XXH_SEC_ALIGN <= 64);
+ (void)(&XXH_writeLE64);
+ XXH_PREFETCH(customSecret);
+ { __m256i const seed = _mm256_set_epi64x((xxh_i64)(0U - seed64), (xxh_i64)seed64, (xxh_i64)(0U - seed64), (xxh_i64)seed64);
+
+ const __m256i* const src = (const __m256i*) ((const void*) XXH3_kSecret);
+ __m256i* dest = ( __m256i*) customSecret;
+
+# if defined(__GNUC__) || defined(__clang__)
+ /*
+ * On GCC & Clang, marking 'dest' as modified will cause the compiler:
+ * - do not extract the secret from sse registers in the internal loop
+ * - use less common registers, and avoid pushing these reg into stack
+ */
+ XXH_COMPILER_GUARD(dest);
+# endif
+ XXH_ASSERT(((size_t)src & 31) == 0); /* control alignment */
+ XXH_ASSERT(((size_t)dest & 31) == 0);
+
+ /* GCC -O2 need unroll loop manually */
+ dest[0] = _mm256_add_epi64(_mm256_load_si256(src+0), seed);
+ dest[1] = _mm256_add_epi64(_mm256_load_si256(src+1), seed);
+ dest[2] = _mm256_add_epi64(_mm256_load_si256(src+2), seed);
+ dest[3] = _mm256_add_epi64(_mm256_load_si256(src+3), seed);
+ dest[4] = _mm256_add_epi64(_mm256_load_si256(src+4), seed);
+ dest[5] = _mm256_add_epi64(_mm256_load_si256(src+5), seed);
+ }
+}
+
+#endif
+
+/* x86dispatch always generates SSE2 */
+#if (XXH_VECTOR == XXH_SSE2) || defined(XXH_X86DISPATCH)
+
+#ifndef XXH_TARGET_SSE2
+# define XXH_TARGET_SSE2 /* disable attribute target */
+#endif
+
+XXH_FORCE_INLINE XXH_TARGET_SSE2 void
+XXH3_accumulate_512_sse2( void* XXH_RESTRICT acc,
+ const void* XXH_RESTRICT input,
+ const void* XXH_RESTRICT secret)
+{
+ /* SSE2 is just a half-scale version of the AVX2 version. */
+ XXH_ASSERT((((size_t)acc) & 15) == 0);
+ { __m128i* const xacc = (__m128i *) acc;
+ /* Unaligned. This is mainly for pointer arithmetic, and because
+ * _mm_loadu_si128 requires a const __m128i * pointer for some reason. */
+ const __m128i* const xinput = (const __m128i *) input;
+ /* Unaligned. This is mainly for pointer arithmetic, and because
+ * _mm_loadu_si128 requires a const __m128i * pointer for some reason. */
+ const __m128i* const xsecret = (const __m128i *) secret;
+
+ size_t i;
+ for (i=0; i < XXH_STRIPE_LEN/sizeof(__m128i); i++) {
+ /* data_vec = xinput[i]; */
+ __m128i const data_vec = _mm_loadu_si128 (xinput+i);
+ /* key_vec = xsecret[i]; */
+ __m128i const key_vec = _mm_loadu_si128 (xsecret+i);
+ /* data_key = data_vec ^ key_vec; */
+ __m128i const data_key = _mm_xor_si128 (data_vec, key_vec);
+ /* data_key_lo = data_key >> 32; */
+ __m128i const data_key_lo = _mm_shuffle_epi32 (data_key, _MM_SHUFFLE(0, 3, 0, 1));
+ /* product = (data_key & 0xffffffff) * (data_key_lo & 0xffffffff); */
+ __m128i const product = _mm_mul_epu32 (data_key, data_key_lo);
+ /* xacc[i] += swap(data_vec); */
+ __m128i const data_swap = _mm_shuffle_epi32(data_vec, _MM_SHUFFLE(1,0,3,2));
+ __m128i const sum = _mm_add_epi64(xacc[i], data_swap);
+ /* xacc[i] += product; */
+ xacc[i] = _mm_add_epi64(product, sum);
+ } }
+}
+XXH_FORCE_INLINE XXH_TARGET_SSE2 XXH3_ACCUMULATE_TEMPLATE(sse2)
+
+XXH_FORCE_INLINE XXH_TARGET_SSE2 void
+XXH3_scrambleAcc_sse2(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret)
+{
+ XXH_ASSERT((((size_t)acc) & 15) == 0);
+ { __m128i* const xacc = (__m128i*) acc;
+ /* Unaligned. This is mainly for pointer arithmetic, and because
+ * _mm_loadu_si128 requires a const __m128i * pointer for some reason. */
+ const __m128i* const xsecret = (const __m128i *) secret;
+ const __m128i prime32 = _mm_set1_epi32((int)XXH_PRIME32_1);
+
+ size_t i;
+ for (i=0; i < XXH_STRIPE_LEN/sizeof(__m128i); i++) {
+ /* xacc[i] ^= (xacc[i] >> 47) */
+ __m128i const acc_vec = xacc[i];
+ __m128i const shifted = _mm_srli_epi64 (acc_vec, 47);
+ __m128i const data_vec = _mm_xor_si128 (acc_vec, shifted);
+ /* xacc[i] ^= xsecret[i]; */
+ __m128i const key_vec = _mm_loadu_si128 (xsecret+i);
+ __m128i const data_key = _mm_xor_si128 (data_vec, key_vec);
+
+ /* xacc[i] *= XXH_PRIME32_1; */
+ __m128i const data_key_hi = _mm_shuffle_epi32 (data_key, _MM_SHUFFLE(0, 3, 0, 1));
+ __m128i const prod_lo = _mm_mul_epu32 (data_key, prime32);
+ __m128i const prod_hi = _mm_mul_epu32 (data_key_hi, prime32);
+ xacc[i] = _mm_add_epi64(prod_lo, _mm_slli_epi64(prod_hi, 32));
+ }
+ }
+}
+
+XXH_FORCE_INLINE XXH_TARGET_SSE2 void XXH3_initCustomSecret_sse2(void* XXH_RESTRICT customSecret, xxh_u64 seed64)
+{
+ XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE & 15) == 0);
+ (void)(&XXH_writeLE64);
+ { int const nbRounds = XXH_SECRET_DEFAULT_SIZE / sizeof(__m128i);
+
+# if defined(_MSC_VER) && defined(_M_IX86) && _MSC_VER < 1900
+ /* MSVC 32bit mode does not support _mm_set_epi64x before 2015 */
+ XXH_ALIGN(16) const xxh_i64 seed64x2[2] = { (xxh_i64)seed64, (xxh_i64)(0U - seed64) };
+ __m128i const seed = _mm_load_si128((__m128i const*)seed64x2);
+# else
+ __m128i const seed = _mm_set_epi64x((xxh_i64)(0U - seed64), (xxh_i64)seed64);
+# endif
+ int i;
+
+ const void* const src16 = XXH3_kSecret;
+ __m128i* dst16 = (__m128i*) customSecret;
+# if defined(__GNUC__) || defined(__clang__)
+ /*
+ * On GCC & Clang, marking 'dest' as modified will cause the compiler:
+ * - do not extract the secret from sse registers in the internal loop
+ * - use less common registers, and avoid pushing these reg into stack
+ */
+ XXH_COMPILER_GUARD(dst16);
+# endif
+ XXH_ASSERT(((size_t)src16 & 15) == 0); /* control alignment */
+ XXH_ASSERT(((size_t)dst16 & 15) == 0);
+
+ for (i=0; i < nbRounds; ++i) {
+ dst16[i] = _mm_add_epi64(_mm_load_si128((const __m128i *)src16+i), seed);
+ } }
+}
+
+#endif
+
+#if (XXH_VECTOR == XXH_NEON)
+
+/* forward declarations for the scalar routines */
+XXH_FORCE_INLINE void
+XXH3_scalarRound(void* XXH_RESTRICT acc, void const* XXH_RESTRICT input,
+ void const* XXH_RESTRICT secret, size_t lane);
+
+XXH_FORCE_INLINE void
+XXH3_scalarScrambleRound(void* XXH_RESTRICT acc,
+ void const* XXH_RESTRICT secret, size_t lane);
+
+/*!
+ * @internal
+ * @brief The bulk processing loop for NEON and WASM SIMD128.
+ *
+ * The NEON code path is actually partially scalar when running on AArch64. This
+ * is to optimize the pipelining and can have up to 15% speedup depending on the
+ * CPU, and it also mitigates some GCC codegen issues.
+ *
+ * @see XXH3_NEON_LANES for configuring this and details about this optimization.
+ *
+ * NEON's 32-bit to 64-bit long multiply takes a half vector of 32-bit
+ * integers instead of the other platforms which mask full 64-bit vectors,
+ * so the setup is more complicated than just shifting right.
+ *
+ * Additionally, there is an optimization for 4 lanes at once noted below.
+ *
+ * Since, as stated, the most optimal amount of lanes for Cortexes is 6,
+ * there needs to be *three* versions of the accumulate operation used
+ * for the remaining 2 lanes.
+ *
+ * WASM's SIMD128 uses SIMDe's arm_neon.h polyfill because the intrinsics overlap
+ * nearly perfectly.
+ */
+
+XXH_FORCE_INLINE void
+XXH3_accumulate_512_neon( void* XXH_RESTRICT acc,
+ const void* XXH_RESTRICT input,
+ const void* XXH_RESTRICT secret)
+{
+ XXH_ASSERT((((size_t)acc) & 15) == 0);
+ XXH_STATIC_ASSERT(XXH3_NEON_LANES > 0 && XXH3_NEON_LANES <= XXH_ACC_NB && XXH3_NEON_LANES % 2 == 0);
+ { /* GCC for darwin arm64 does not like aliasing here */
+ xxh_aliasing_uint64x2_t* const xacc = (xxh_aliasing_uint64x2_t*) acc;
+ /* We don't use a uint32x4_t pointer because it causes bus errors on ARMv7. */
+ uint8_t const* xinput = (const uint8_t *) input;
+ uint8_t const* xsecret = (const uint8_t *) secret;
+
+ size_t i;
+#ifdef __wasm_simd128__
+ /*
+ * On WASM SIMD128, Clang emits direct address loads when XXH3_kSecret
+ * is constant propagated, which results in it converting it to this
+ * inside the loop:
+ *
+ * a = v128.load(XXH3_kSecret + 0 + $secret_offset, offset = 0)
+ * b = v128.load(XXH3_kSecret + 16 + $secret_offset, offset = 0)
+ * ...
+ *
+ * This requires a full 32-bit address immediate (and therefore a 6 byte
+ * instruction) as well as an add for each offset.
+ *
+ * Putting an asm guard prevents it from folding (at the cost of losing
+ * the alignment hint), and uses the free offset in `v128.load` instead
+ * of adding secret_offset each time which overall reduces code size by
+ * about a kilobyte and improves performance.
+ */
+ XXH_COMPILER_GUARD(xsecret);
+#endif
+ /* Scalar lanes use the normal scalarRound routine */
+ for (i = XXH3_NEON_LANES; i < XXH_ACC_NB; i++) {
+ XXH3_scalarRound(acc, input, secret, i);
+ }
+ i = 0;
+ /* 4 NEON lanes at a time. */
+ for (; i+1 < XXH3_NEON_LANES / 2; i+=2) {
+ /* data_vec = xinput[i]; */
+ uint64x2_t data_vec_1 = XXH_vld1q_u64(xinput + (i * 16));
+ uint64x2_t data_vec_2 = XXH_vld1q_u64(xinput + ((i+1) * 16));
+ /* key_vec = xsecret[i]; */
+ uint64x2_t key_vec_1 = XXH_vld1q_u64(xsecret + (i * 16));
+ uint64x2_t key_vec_2 = XXH_vld1q_u64(xsecret + ((i+1) * 16));
+ /* data_swap = swap(data_vec) */
+ uint64x2_t data_swap_1 = vextq_u64(data_vec_1, data_vec_1, 1);
+ uint64x2_t data_swap_2 = vextq_u64(data_vec_2, data_vec_2, 1);
+ /* data_key = data_vec ^ key_vec; */
+ uint64x2_t data_key_1 = veorq_u64(data_vec_1, key_vec_1);
+ uint64x2_t data_key_2 = veorq_u64(data_vec_2, key_vec_2);
+
+ /*
+ * If we reinterpret the 64x2 vectors as 32x4 vectors, we can use a
+ * de-interleave operation for 4 lanes in 1 step with `vuzpq_u32` to
+ * get one vector with the low 32 bits of each lane, and one vector
+ * with the high 32 bits of each lane.
+ *
+ * The intrinsic returns a double vector because the original ARMv7-a
+ * instruction modified both arguments in place. AArch64 and SIMD128 emit
+ * two instructions from this intrinsic.
+ *
+ * [ dk11L | dk11H | dk12L | dk12H ] -> [ dk11L | dk12L | dk21L | dk22L ]
+ * [ dk21L | dk21H | dk22L | dk22H ] -> [ dk11H | dk12H | dk21H | dk22H ]
+ */
+ uint32x4x2_t unzipped = vuzpq_u32(
+ vreinterpretq_u32_u64(data_key_1),
+ vreinterpretq_u32_u64(data_key_2)
+ );
+ /* data_key_lo = data_key & 0xFFFFFFFF */
+ uint32x4_t data_key_lo = unzipped.val[0];
+ /* data_key_hi = data_key >> 32 */
+ uint32x4_t data_key_hi = unzipped.val[1];
+ /*
+ * Then, we can split the vectors horizontally and multiply which, as for most
+ * widening intrinsics, have a variant that works on both high half vectors
+ * for free on AArch64. A similar instruction is available on SIMD128.
+ *
+ * sum = data_swap + (u64x2) data_key_lo * (u64x2) data_key_hi
+ */
+ uint64x2_t sum_1 = XXH_vmlal_low_u32(data_swap_1, data_key_lo, data_key_hi);
+ uint64x2_t sum_2 = XXH_vmlal_high_u32(data_swap_2, data_key_lo, data_key_hi);
+ /*
+ * Clang reorders
+ * a += b * c; // umlal swap.2d, dkl.2s, dkh.2s
+ * c += a; // add acc.2d, acc.2d, swap.2d
+ * to
+ * c += a; // add acc.2d, acc.2d, swap.2d
+ * c += b * c; // umlal acc.2d, dkl.2s, dkh.2s
+ *
+ * While it would make sense in theory since the addition is faster,
+ * for reasons likely related to umlal being limited to certain NEON
+ * pipelines, this is worse. A compiler guard fixes this.
+ */
+ XXH_COMPILER_GUARD_CLANG_NEON(sum_1);
+ XXH_COMPILER_GUARD_CLANG_NEON(sum_2);
+ /* xacc[i] = acc_vec + sum; */
+ xacc[i] = vaddq_u64(xacc[i], sum_1);
+ xacc[i+1] = vaddq_u64(xacc[i+1], sum_2);
+ }
+ /* Operate on the remaining NEON lanes 2 at a time. */
+ for (; i < XXH3_NEON_LANES / 2; i++) {
+ /* data_vec = xinput[i]; */
+ uint64x2_t data_vec = XXH_vld1q_u64(xinput + (i * 16));
+ /* key_vec = xsecret[i]; */
+ uint64x2_t key_vec = XXH_vld1q_u64(xsecret + (i * 16));
+ /* acc_vec_2 = swap(data_vec) */
+ uint64x2_t data_swap = vextq_u64(data_vec, data_vec, 1);
+ /* data_key = data_vec ^ key_vec; */
+ uint64x2_t data_key = veorq_u64(data_vec, key_vec);
+ /* For two lanes, just use VMOVN and VSHRN. */
+ /* data_key_lo = data_key & 0xFFFFFFFF; */
+ uint32x2_t data_key_lo = vmovn_u64(data_key);
+ /* data_key_hi = data_key >> 32; */
+ uint32x2_t data_key_hi = vshrn_n_u64(data_key, 32);
+ /* sum = data_swap + (u64x2) data_key_lo * (u64x2) data_key_hi; */
+ uint64x2_t sum = vmlal_u32(data_swap, data_key_lo, data_key_hi);
+ /* Same Clang workaround as before */
+ XXH_COMPILER_GUARD_CLANG_NEON(sum);
+ /* xacc[i] = acc_vec + sum; */
+ xacc[i] = vaddq_u64 (xacc[i], sum);
+ }
+ }
+}
+XXH_FORCE_INLINE XXH3_ACCUMULATE_TEMPLATE(neon)
+
+XXH_FORCE_INLINE void
+XXH3_scrambleAcc_neon(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret)
+{
+ XXH_ASSERT((((size_t)acc) & 15) == 0);
+
+ { xxh_aliasing_uint64x2_t* xacc = (xxh_aliasing_uint64x2_t*) acc;
+ uint8_t const* xsecret = (uint8_t const*) secret;
+
+ size_t i;
+ /* WASM uses operator overloads and doesn't need these. */
+#ifndef __wasm_simd128__
+ /* { prime32_1, prime32_1 } */
+ uint32x2_t const kPrimeLo = vdup_n_u32(XXH_PRIME32_1);
+ /* { 0, prime32_1, 0, prime32_1 } */
+ uint32x4_t const kPrimeHi = vreinterpretq_u32_u64(vdupq_n_u64((xxh_u64)XXH_PRIME32_1 << 32));
+#endif
+
+ /* AArch64 uses both scalar and neon at the same time */
+ for (i = XXH3_NEON_LANES; i < XXH_ACC_NB; i++) {
+ XXH3_scalarScrambleRound(acc, secret, i);
+ }
+ for (i=0; i < XXH3_NEON_LANES / 2; i++) {
+ /* xacc[i] ^= (xacc[i] >> 47); */
+ uint64x2_t acc_vec = xacc[i];
+ uint64x2_t shifted = vshrq_n_u64(acc_vec, 47);
+ uint64x2_t data_vec = veorq_u64(acc_vec, shifted);
+
+ /* xacc[i] ^= xsecret[i]; */
+ uint64x2_t key_vec = XXH_vld1q_u64(xsecret + (i * 16));
+ uint64x2_t data_key = veorq_u64(data_vec, key_vec);
+ /* xacc[i] *= XXH_PRIME32_1 */
+#ifdef __wasm_simd128__
+ /* SIMD128 has multiply by u64x2, use it instead of expanding and scalarizing */
+ xacc[i] = data_key * XXH_PRIME32_1;
+#else
+ /*
+ * Expanded version with portable NEON intrinsics
+ *
+ * lo(x) * lo(y) + (hi(x) * lo(y) << 32)
+ *
+ * prod_hi = hi(data_key) * lo(prime) << 32
+ *
+ * Since we only need 32 bits of this multiply a trick can be used, reinterpreting the vector
+ * as a uint32x4_t and multiplying by { 0, prime, 0, prime } to cancel out the unwanted bits
+ * and avoid the shift.
+ */
+ uint32x4_t prod_hi = vmulq_u32 (vreinterpretq_u32_u64(data_key), kPrimeHi);
+ /* Extract low bits for vmlal_u32 */
+ uint32x2_t data_key_lo = vmovn_u64(data_key);
+ /* xacc[i] = prod_hi + lo(data_key) * XXH_PRIME32_1; */
+ xacc[i] = vmlal_u32(vreinterpretq_u64_u32(prod_hi), data_key_lo, kPrimeLo);
+#endif
+ }
+ }
+}
+#endif
+
+#if (XXH_VECTOR == XXH_VSX)
+
+XXH_FORCE_INLINE void
+XXH3_accumulate_512_vsx( void* XXH_RESTRICT acc,
+ const void* XXH_RESTRICT input,
+ const void* XXH_RESTRICT secret)
+{
+ /* presumed aligned */
+ xxh_aliasing_u64x2* const xacc = (xxh_aliasing_u64x2*) acc;
+ xxh_u8 const* const xinput = (xxh_u8 const*) input; /* no alignment restriction */
+ xxh_u8 const* const xsecret = (xxh_u8 const*) secret; /* no alignment restriction */
+ xxh_u64x2 const v32 = { 32, 32 };
+ size_t i;
+ for (i = 0; i < XXH_STRIPE_LEN / sizeof(xxh_u64x2); i++) {
+ /* data_vec = xinput[i]; */
+ xxh_u64x2 const data_vec = XXH_vec_loadu(xinput + 16*i);
+ /* key_vec = xsecret[i]; */
+ xxh_u64x2 const key_vec = XXH_vec_loadu(xsecret + 16*i);
+ xxh_u64x2 const data_key = data_vec ^ key_vec;
+ /* shuffled = (data_key << 32) | (data_key >> 32); */
+ xxh_u32x4 const shuffled = (xxh_u32x4)vec_rl(data_key, v32);
+ /* product = ((xxh_u64x2)data_key & 0xFFFFFFFF) * ((xxh_u64x2)shuffled & 0xFFFFFFFF); */
+ xxh_u64x2 const product = XXH_vec_mulo((xxh_u32x4)data_key, shuffled);
+ /* acc_vec = xacc[i]; */
+ xxh_u64x2 acc_vec = xacc[i];
+ acc_vec += product;
+
+ /* swap high and low halves */
+#ifdef __s390x__
+ acc_vec += vec_permi(data_vec, data_vec, 2);
+#else
+ acc_vec += vec_xxpermdi(data_vec, data_vec, 2);
+#endif
+ xacc[i] = acc_vec;
+ }
+}
+XXH_FORCE_INLINE XXH3_ACCUMULATE_TEMPLATE(vsx)
+
+XXH_FORCE_INLINE void
+XXH3_scrambleAcc_vsx(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret)
+{
+ XXH_ASSERT((((size_t)acc) & 15) == 0);
+
+ { xxh_aliasing_u64x2* const xacc = (xxh_aliasing_u64x2*) acc;
+ const xxh_u8* const xsecret = (const xxh_u8*) secret;
+ /* constants */
+ xxh_u64x2 const v32 = { 32, 32 };
+ xxh_u64x2 const v47 = { 47, 47 };
+ xxh_u32x4 const prime = { XXH_PRIME32_1, XXH_PRIME32_1, XXH_PRIME32_1, XXH_PRIME32_1 };
+ size_t i;
+ for (i = 0; i < XXH_STRIPE_LEN / sizeof(xxh_u64x2); i++) {
+ /* xacc[i] ^= (xacc[i] >> 47); */
+ xxh_u64x2 const acc_vec = xacc[i];
+ xxh_u64x2 const data_vec = acc_vec ^ (acc_vec >> v47);
+
+ /* xacc[i] ^= xsecret[i]; */
+ xxh_u64x2 const key_vec = XXH_vec_loadu(xsecret + 16*i);
+ xxh_u64x2 const data_key = data_vec ^ key_vec;
+
+ /* xacc[i] *= XXH_PRIME32_1 */
+ /* prod_lo = ((xxh_u64x2)data_key & 0xFFFFFFFF) * ((xxh_u64x2)prime & 0xFFFFFFFF); */
+ xxh_u64x2 const prod_even = XXH_vec_mule((xxh_u32x4)data_key, prime);
+ /* prod_hi = ((xxh_u64x2)data_key >> 32) * ((xxh_u64x2)prime >> 32); */
+ xxh_u64x2 const prod_odd = XXH_vec_mulo((xxh_u32x4)data_key, prime);
+ xacc[i] = prod_odd + (prod_even << v32);
+ } }
+}
+
+#endif
+
+#if (XXH_VECTOR == XXH_SVE)
+
+XXH_FORCE_INLINE void
+XXH3_accumulate_512_sve( void* XXH_RESTRICT acc,
+ const void* XXH_RESTRICT input,
+ const void* XXH_RESTRICT secret)
+{
+ uint64_t *xacc = (uint64_t *)acc;
+ const uint64_t *xinput = (const uint64_t *)(const void *)input;
+ const uint64_t *xsecret = (const uint64_t *)(const void *)secret;
+ svuint64_t kSwap = sveor_n_u64_z(svptrue_b64(), svindex_u64(0, 1), 1);
+ uint64_t element_count = svcntd();
+ if (element_count >= 8) {
+ svbool_t mask = svptrue_pat_b64(SV_VL8);
+ svuint64_t vacc = svld1_u64(mask, xacc);
+ ACCRND(vacc, 0);
+ svst1_u64(mask, xacc, vacc);
+ } else if (element_count == 2) { /* sve128 */
+ svbool_t mask = svptrue_pat_b64(SV_VL2);
+ svuint64_t acc0 = svld1_u64(mask, xacc + 0);
+ svuint64_t acc1 = svld1_u64(mask, xacc + 2);
+ svuint64_t acc2 = svld1_u64(mask, xacc + 4);
+ svuint64_t acc3 = svld1_u64(mask, xacc + 6);
+ ACCRND(acc0, 0);
+ ACCRND(acc1, 2);
+ ACCRND(acc2, 4);
+ ACCRND(acc3, 6);
+ svst1_u64(mask, xacc + 0, acc0);
+ svst1_u64(mask, xacc + 2, acc1);
+ svst1_u64(mask, xacc + 4, acc2);
+ svst1_u64(mask, xacc + 6, acc3);
+ } else {
+ svbool_t mask = svptrue_pat_b64(SV_VL4);
+ svuint64_t acc0 = svld1_u64(mask, xacc + 0);
+ svuint64_t acc1 = svld1_u64(mask, xacc + 4);
+ ACCRND(acc0, 0);
+ ACCRND(acc1, 4);
+ svst1_u64(mask, xacc + 0, acc0);
+ svst1_u64(mask, xacc + 4, acc1);
+ }
+}
+
+XXH_FORCE_INLINE void
+XXH3_accumulate_sve(xxh_u64* XXH_RESTRICT acc,
+ const xxh_u8* XXH_RESTRICT input,
+ const xxh_u8* XXH_RESTRICT secret,
+ size_t nbStripes)
+{
+ if (nbStripes != 0) {
+ uint64_t *xacc = (uint64_t *)acc;
+ const uint64_t *xinput = (const uint64_t *)(const void *)input;
+ const uint64_t *xsecret = (const uint64_t *)(const void *)secret;
+ svuint64_t kSwap = sveor_n_u64_z(svptrue_b64(), svindex_u64(0, 1), 1);
+ uint64_t element_count = svcntd();
+ if (element_count >= 8) {
+ svbool_t mask = svptrue_pat_b64(SV_VL8);
+ svuint64_t vacc = svld1_u64(mask, xacc + 0);
+ do {
+ /* svprfd(svbool_t, void *, enum svfprop); */
+ svprfd(mask, xinput + 128, SV_PLDL1STRM);
+ ACCRND(vacc, 0);
+ xinput += 8;
+ xsecret += 1;
+ nbStripes--;
+ } while (nbStripes != 0);
+
+ svst1_u64(mask, xacc + 0, vacc);
+ } else if (element_count == 2) { /* sve128 */
+ svbool_t mask = svptrue_pat_b64(SV_VL2);
+ svuint64_t acc0 = svld1_u64(mask, xacc + 0);
+ svuint64_t acc1 = svld1_u64(mask, xacc + 2);
+ svuint64_t acc2 = svld1_u64(mask, xacc + 4);
+ svuint64_t acc3 = svld1_u64(mask, xacc + 6);
+ do {
+ svprfd(mask, xinput + 128, SV_PLDL1STRM);
+ ACCRND(acc0, 0);
+ ACCRND(acc1, 2);
+ ACCRND(acc2, 4);
+ ACCRND(acc3, 6);
+ xinput += 8;
+ xsecret += 1;
+ nbStripes--;
+ } while (nbStripes != 0);
+
+ svst1_u64(mask, xacc + 0, acc0);
+ svst1_u64(mask, xacc + 2, acc1);
+ svst1_u64(mask, xacc + 4, acc2);
+ svst1_u64(mask, xacc + 6, acc3);
+ } else {
+ svbool_t mask = svptrue_pat_b64(SV_VL4);
+ svuint64_t acc0 = svld1_u64(mask, xacc + 0);
+ svuint64_t acc1 = svld1_u64(mask, xacc + 4);
+ do {
+ svprfd(mask, xinput + 128, SV_PLDL1STRM);
+ ACCRND(acc0, 0);
+ ACCRND(acc1, 4);
+ xinput += 8;
+ xsecret += 1;
+ nbStripes--;
+ } while (nbStripes != 0);
+
+ svst1_u64(mask, xacc + 0, acc0);
+ svst1_u64(mask, xacc + 4, acc1);
+ }
+ }
+}
+
+#endif
+
+/* scalar variants - universal */
+
+#if defined(__aarch64__) && (defined(__GNUC__) || defined(__clang__))
+/*
+ * In XXH3_scalarRound(), GCC and Clang have a similar codegen issue, where they
+ * emit an excess mask and a full 64-bit multiply-add (MADD X-form).
+ *
+ * While this might not seem like much, as AArch64 is a 64-bit architecture, only
+ * big Cortex designs have a full 64-bit multiplier.
+ *
+ * On the little cores, the smaller 32-bit multiplier is used, and full 64-bit
+ * multiplies expand to 2-3 multiplies in microcode. This has a major penalty
+ * of up to 4 latency cycles and 2 stall cycles in the multiply pipeline.
+ *
+ * Thankfully, AArch64 still provides the 32-bit long multiply-add (UMADDL) which does
+ * not have this penalty and does the mask automatically.
+ */
+XXH_FORCE_INLINE xxh_u64
+XXH_mult32to64_add64(xxh_u64 lhs, xxh_u64 rhs, xxh_u64 acc)
+{
+ xxh_u64 ret;
+ /* note: %x = 64-bit register, %w = 32-bit register */
+ __asm__("umaddl %x0, %w1, %w2, %x3" : "=r" (ret) : "r" (lhs), "r" (rhs), "r" (acc));
+ return ret;
+}
+#else
+XXH_FORCE_INLINE xxh_u64
+XXH_mult32to64_add64(xxh_u64 lhs, xxh_u64 rhs, xxh_u64 acc)
+{
+ return XXH_mult32to64((xxh_u32)lhs, (xxh_u32)rhs) + acc;
+}
+#endif
+
+/*!
+ * @internal
+ * @brief Scalar round for @ref XXH3_accumulate_512_scalar().
+ *
+ * This is extracted to its own function because the NEON path uses a combination
+ * of NEON and scalar.
+ */
+XXH_FORCE_INLINE void
+XXH3_scalarRound(void* XXH_RESTRICT acc,
+ void const* XXH_RESTRICT input,
+ void const* XXH_RESTRICT secret,
+ size_t lane)
+{
+ xxh_u64* xacc = (xxh_u64*) acc;
+ xxh_u8 const* xinput = (xxh_u8 const*) input;
+ xxh_u8 const* xsecret = (xxh_u8 const*) secret;
+ XXH_ASSERT(lane < XXH_ACC_NB);
+ XXH_ASSERT(((size_t)acc & (XXH_ACC_ALIGN-1)) == 0);
+ {
+ xxh_u64 const data_val = XXH_readLE64(xinput + lane * 8);
+ xxh_u64 const data_key = data_val ^ XXH_readLE64(xsecret + lane * 8);
+ xacc[lane ^ 1] += data_val; /* swap adjacent lanes */
+ xacc[lane] = XXH_mult32to64_add64(data_key /* & 0xFFFFFFFF */, data_key >> 32, xacc[lane]);
+ }
+}
+
+/*!
+ * @internal
+ * @brief Processes a 64 byte block of data using the scalar path.
+ */
+XXH_FORCE_INLINE void
+XXH3_accumulate_512_scalar(void* XXH_RESTRICT acc,
+ const void* XXH_RESTRICT input,
+ const void* XXH_RESTRICT secret)
+{
+ size_t i;
+ /* ARM GCC refuses to unroll this loop, resulting in a 24% slowdown on ARMv6. */
+#if defined(__GNUC__) && !defined(__clang__) \
+ && (defined(__arm__) || defined(__thumb2__)) \
+ && defined(__ARM_FEATURE_UNALIGNED) /* no unaligned access just wastes bytes */ \
+ && XXH_SIZE_OPT <= 0
+# pragma GCC unroll 8
+#endif
+ for (i=0; i < XXH_ACC_NB; i++) {
+ XXH3_scalarRound(acc, input, secret, i);
+ }
+}
+XXH_FORCE_INLINE XXH3_ACCUMULATE_TEMPLATE(scalar)
+
+/*!
+ * @internal
+ * @brief Scalar scramble step for @ref XXH3_scrambleAcc_scalar().
+ *
+ * This is extracted to its own function because the NEON path uses a combination
+ * of NEON and scalar.
+ */
+XXH_FORCE_INLINE void
+XXH3_scalarScrambleRound(void* XXH_RESTRICT acc,
+ void const* XXH_RESTRICT secret,
+ size_t lane)
+{
+ xxh_u64* const xacc = (xxh_u64*) acc; /* presumed aligned */
+ const xxh_u8* const xsecret = (const xxh_u8*) secret; /* no alignment restriction */
+ XXH_ASSERT((((size_t)acc) & (XXH_ACC_ALIGN-1)) == 0);
+ XXH_ASSERT(lane < XXH_ACC_NB);
+ {
+ xxh_u64 const key64 = XXH_readLE64(xsecret + lane * 8);
+ xxh_u64 acc64 = xacc[lane];
+ acc64 = XXH_xorshift64(acc64, 47);
+ acc64 ^= key64;
+ acc64 *= XXH_PRIME32_1;
+ xacc[lane] = acc64;
+ }
+}
+
+/*!
+ * @internal
+ * @brief Scrambles the accumulators after a large chunk has been read
+ */
+XXH_FORCE_INLINE void
+XXH3_scrambleAcc_scalar(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret)
+{
+ size_t i;
+ for (i=0; i < XXH_ACC_NB; i++) {
+ XXH3_scalarScrambleRound(acc, secret, i);
+ }
+}
+
+XXH_FORCE_INLINE void
+XXH3_initCustomSecret_scalar(void* XXH_RESTRICT customSecret, xxh_u64 seed64)
+{
+ /*
+ * We need a separate pointer for the hack below,
+ * which requires a non-const pointer.
+ * Any decent compiler will optimize this out otherwise.
+ */
+ const xxh_u8* kSecretPtr = XXH3_kSecret;
+ XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE & 15) == 0);
+
+#if defined(__GNUC__) && defined(__aarch64__)
+ /*
+ * UGLY HACK:
+ * GCC and Clang generate a bunch of MOV/MOVK pairs for aarch64, and they are
+ * placed sequentially, in order, at the top of the unrolled loop.
+ *
+ * While MOVK is great for generating constants (2 cycles for a 64-bit
+ * constant compared to 4 cycles for LDR), it fights for bandwidth with
+ * the arithmetic instructions.
+ *
+ * I L S
+ * MOVK
+ * MOVK
+ * MOVK
+ * MOVK
+ * ADD
+ * SUB STR
+ * STR
+ * By forcing loads from memory (as the asm line causes the compiler to assume
+ * that XXH3_kSecretPtr has been changed), the pipelines are used more
+ * efficiently:
+ * I L S
+ * LDR
+ * ADD LDR
+ * SUB STR
+ * STR
+ *
+ * See XXH3_NEON_LANES for details on the pipsline.
+ *
+ * XXH3_64bits_withSeed, len == 256, Snapdragon 835
+ * without hack: 2654.4 MB/s
+ * with hack: 3202.9 MB/s
+ */
+ XXH_COMPILER_GUARD(kSecretPtr);
+#endif
+ { int const nbRounds = XXH_SECRET_DEFAULT_SIZE / 16;
+ int i;
+ for (i=0; i < nbRounds; i++) {
+ /*
+ * The asm hack causes the compiler to assume that kSecretPtr aliases with
+ * customSecret, and on aarch64, this prevented LDP from merging two
+ * loads together for free. Putting the loads together before the stores
+ * properly generates LDP.
+ */
+ xxh_u64 lo = XXH_readLE64(kSecretPtr + 16*i) + seed64;
+ xxh_u64 hi = XXH_readLE64(kSecretPtr + 16*i + 8) - seed64;
+ XXH_writeLE64((xxh_u8*)customSecret + 16*i, lo);
+ XXH_writeLE64((xxh_u8*)customSecret + 16*i + 8, hi);
+ } }
+}
+
+
+typedef void (*XXH3_f_accumulate)(xxh_u64* XXH_RESTRICT, const xxh_u8* XXH_RESTRICT, const xxh_u8* XXH_RESTRICT, size_t);
+typedef void (*XXH3_f_scrambleAcc)(void* XXH_RESTRICT, const void*);
+typedef void (*XXH3_f_initCustomSecret)(void* XXH_RESTRICT, xxh_u64);
+
+
+#if (XXH_VECTOR == XXH_AVX512)
+
+#define XXH3_accumulate_512 XXH3_accumulate_512_avx512
+#define XXH3_accumulate XXH3_accumulate_avx512
+#define XXH3_scrambleAcc XXH3_scrambleAcc_avx512
+#define XXH3_initCustomSecret XXH3_initCustomSecret_avx512
+
+#elif (XXH_VECTOR == XXH_AVX2)
+
+#define XXH3_accumulate_512 XXH3_accumulate_512_avx2
+#define XXH3_accumulate XXH3_accumulate_avx2
+#define XXH3_scrambleAcc XXH3_scrambleAcc_avx2
+#define XXH3_initCustomSecret XXH3_initCustomSecret_avx2
+
+#elif (XXH_VECTOR == XXH_SSE2)
+
+#define XXH3_accumulate_512 XXH3_accumulate_512_sse2
+#define XXH3_accumulate XXH3_accumulate_sse2
+#define XXH3_scrambleAcc XXH3_scrambleAcc_sse2
+#define XXH3_initCustomSecret XXH3_initCustomSecret_sse2
+
+#elif (XXH_VECTOR == XXH_NEON)
+
+#define XXH3_accumulate_512 XXH3_accumulate_512_neon
+#define XXH3_accumulate XXH3_accumulate_neon
+#define XXH3_scrambleAcc XXH3_scrambleAcc_neon
+#define XXH3_initCustomSecret XXH3_initCustomSecret_scalar
+
+#elif (XXH_VECTOR == XXH_VSX)
+
+#define XXH3_accumulate_512 XXH3_accumulate_512_vsx
+#define XXH3_accumulate XXH3_accumulate_vsx
+#define XXH3_scrambleAcc XXH3_scrambleAcc_vsx
+#define XXH3_initCustomSecret XXH3_initCustomSecret_scalar
+
+#elif (XXH_VECTOR == XXH_SVE)
+#define XXH3_accumulate_512 XXH3_accumulate_512_sve
+#define XXH3_accumulate XXH3_accumulate_sve
+#define XXH3_scrambleAcc XXH3_scrambleAcc_scalar
+#define XXH3_initCustomSecret XXH3_initCustomSecret_scalar
+
+#else /* scalar */
+
+#define XXH3_accumulate_512 XXH3_accumulate_512_scalar
+#define XXH3_accumulate XXH3_accumulate_scalar
+#define XXH3_scrambleAcc XXH3_scrambleAcc_scalar
+#define XXH3_initCustomSecret XXH3_initCustomSecret_scalar
+
+#endif
+
+#if XXH_SIZE_OPT >= 1 /* don't do SIMD for initialization */
+# undef XXH3_initCustomSecret
+# define XXH3_initCustomSecret XXH3_initCustomSecret_scalar
+#endif
+
+XXH_FORCE_INLINE void
+XXH3_hashLong_internal_loop(xxh_u64* XXH_RESTRICT acc,
+ const xxh_u8* XXH_RESTRICT input, size_t len,
+ const xxh_u8* XXH_RESTRICT secret, size_t secretSize,
+ XXH3_f_accumulate f_acc,
+ XXH3_f_scrambleAcc f_scramble)
+{
+ size_t const nbStripesPerBlock = (secretSize - XXH_STRIPE_LEN) / XXH_SECRET_CONSUME_RATE;
+ size_t const block_len = XXH_STRIPE_LEN * nbStripesPerBlock;
+ size_t const nb_blocks = (len - 1) / block_len;
+
+ size_t n;
+
+ XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN);
+
+ for (n = 0; n < nb_blocks; n++) {
+ f_acc(acc, input + n*block_len, secret, nbStripesPerBlock);
+ f_scramble(acc, secret + secretSize - XXH_STRIPE_LEN);
+ }
+
+ /* last partial block */
+ XXH_ASSERT(len > XXH_STRIPE_LEN);
+ { size_t const nbStripes = ((len - 1) - (block_len * nb_blocks)) / XXH_STRIPE_LEN;
+ XXH_ASSERT(nbStripes <= (secretSize / XXH_SECRET_CONSUME_RATE));
+ f_acc(acc, input + nb_blocks*block_len, secret, nbStripes);
+
+ /* last stripe */
+ { const xxh_u8* const p = input + len - XXH_STRIPE_LEN;
+#define XXH_SECRET_LASTACC_START 7 /* not aligned on 8, last secret is different from acc & scrambler */
+ XXH3_accumulate_512(acc, p, secret + secretSize - XXH_STRIPE_LEN - XXH_SECRET_LASTACC_START);
+ } }
+}
+
+XXH_FORCE_INLINE xxh_u64
+XXH3_mix2Accs(const xxh_u64* XXH_RESTRICT acc, const xxh_u8* XXH_RESTRICT secret)
+{
+ return XXH3_mul128_fold64(
+ acc[0] ^ XXH_readLE64(secret),
+ acc[1] ^ XXH_readLE64(secret+8) );
+}
+
+static XXH64_hash_t
+XXH3_mergeAccs(const xxh_u64* XXH_RESTRICT acc, const xxh_u8* XXH_RESTRICT secret, xxh_u64 start)
+{
+ xxh_u64 result64 = start;
+ size_t i = 0;
+
+ for (i = 0; i < 4; i++) {
+ result64 += XXH3_mix2Accs(acc+2*i, secret + 16*i);
+#if defined(__clang__) /* Clang */ \
+ && (defined(__arm__) || defined(__thumb__)) /* ARMv7 */ \
+ && (defined(__ARM_NEON) || defined(__ARM_NEON__)) /* NEON */ \
+ && !defined(XXH_ENABLE_AUTOVECTORIZE) /* Define to disable */
+ /*
+ * UGLY HACK:
+ * Prevent autovectorization on Clang ARMv7-a. Exact same problem as
+ * the one in XXH3_len_129to240_64b. Speeds up shorter keys > 240b.
+ * XXH3_64bits, len == 256, Snapdragon 835:
+ * without hack: 2063.7 MB/s
+ * with hack: 2560.7 MB/s
+ */
+ XXH_COMPILER_GUARD(result64);
+#endif
+ }
+
+ return XXH3_avalanche(result64);
+}
+
+#define XXH3_INIT_ACC { XXH_PRIME32_3, XXH_PRIME64_1, XXH_PRIME64_2, XXH_PRIME64_3, \
+ XXH_PRIME64_4, XXH_PRIME32_2, XXH_PRIME64_5, XXH_PRIME32_1 }
+
+XXH_FORCE_INLINE XXH64_hash_t
+XXH3_hashLong_64b_internal(const void* XXH_RESTRICT input, size_t len,
+ const void* XXH_RESTRICT secret, size_t secretSize,
+ XXH3_f_accumulate f_acc,
+ XXH3_f_scrambleAcc f_scramble)
+{
+ XXH_ALIGN(XXH_ACC_ALIGN) xxh_u64 acc[XXH_ACC_NB] = XXH3_INIT_ACC;
+
+ XXH3_hashLong_internal_loop(acc, (const xxh_u8*)input, len, (const xxh_u8*)secret, secretSize, f_acc, f_scramble);
+
+ /* converge into final hash */
+ XXH_STATIC_ASSERT(sizeof(acc) == 64);
+ /* do not align on 8, so that the secret is different from the accumulator */
+#define XXH_SECRET_MERGEACCS_START 11
+ XXH_ASSERT(secretSize >= sizeof(acc) + XXH_SECRET_MERGEACCS_START);
+ return XXH3_mergeAccs(acc, (const xxh_u8*)secret + XXH_SECRET_MERGEACCS_START, (xxh_u64)len * XXH_PRIME64_1);
+}
+
+/*
+ * It's important for performance to transmit secret's size (when it's static)
+ * so that the compiler can properly optimize the vectorized loop.
+ * This makes a big performance difference for "medium" keys (<1 KB) when using AVX instruction set.
+ * When the secret size is unknown, or on GCC 12 where the mix of NO_INLINE and FORCE_INLINE
+ * breaks -Og, this is XXH_NO_INLINE.
+ */
+XXH3_WITH_SECRET_INLINE XXH64_hash_t
+XXH3_hashLong_64b_withSecret(const void* XXH_RESTRICT input, size_t len,
+ XXH64_hash_t seed64, const xxh_u8* XXH_RESTRICT secret, size_t secretLen)
+{
+ (void)seed64;
+ return XXH3_hashLong_64b_internal(input, len, secret, secretLen, XXH3_accumulate, XXH3_scrambleAcc);
+}
+
+/*
+ * It's preferable for performance that XXH3_hashLong is not inlined,
+ * as it results in a smaller function for small data, easier to the instruction cache.
+ * Note that inside this no_inline function, we do inline the internal loop,
+ * and provide a statically defined secret size to allow optimization of vector loop.
+ */
+XXH_NO_INLINE XXH_PUREF XXH64_hash_t
+XXH3_hashLong_64b_default(const void* XXH_RESTRICT input, size_t len,
+ XXH64_hash_t seed64, const xxh_u8* XXH_RESTRICT secret, size_t secretLen)
+{
+ (void)seed64; (void)secret; (void)secretLen;
+ return XXH3_hashLong_64b_internal(input, len, XXH3_kSecret, sizeof(XXH3_kSecret), XXH3_accumulate, XXH3_scrambleAcc);
+}
+
+/*
+ * XXH3_hashLong_64b_withSeed():
+ * Generate a custom key based on alteration of default XXH3_kSecret with the seed,
+ * and then use this key for long mode hashing.
+ *
+ * This operation is decently fast but nonetheless costs a little bit of time.
+ * Try to avoid it whenever possible (typically when seed==0).
+ *
+ * It's important for performance that XXH3_hashLong is not inlined. Not sure
+ * why (uop cache maybe?), but the difference is large and easily measurable.
+ */
+XXH_FORCE_INLINE XXH64_hash_t
+XXH3_hashLong_64b_withSeed_internal(const void* input, size_t len,
+ XXH64_hash_t seed,
+ XXH3_f_accumulate f_acc,
+ XXH3_f_scrambleAcc f_scramble,
+ XXH3_f_initCustomSecret f_initSec)
+{
+#if XXH_SIZE_OPT <= 0
+ if (seed == 0)
+ return XXH3_hashLong_64b_internal(input, len,
+ XXH3_kSecret, sizeof(XXH3_kSecret),
+ f_acc, f_scramble);
+#endif
+ { XXH_ALIGN(XXH_SEC_ALIGN) xxh_u8 secret[XXH_SECRET_DEFAULT_SIZE];
+ f_initSec(secret, seed);
+ return XXH3_hashLong_64b_internal(input, len, secret, sizeof(secret),
+ f_acc, f_scramble);
+ }
+}
+
+/*
+ * It's important for performance that XXH3_hashLong is not inlined.
+ */
+XXH_NO_INLINE XXH64_hash_t
+XXH3_hashLong_64b_withSeed(const void* XXH_RESTRICT input, size_t len,
+ XXH64_hash_t seed, const xxh_u8* XXH_RESTRICT secret, size_t secretLen)
+{
+ (void)secret; (void)secretLen;
+ return XXH3_hashLong_64b_withSeed_internal(input, len, seed,
+ XXH3_accumulate, XXH3_scrambleAcc, XXH3_initCustomSecret);
+}
+
+
+typedef XXH64_hash_t (*XXH3_hashLong64_f)(const void* XXH_RESTRICT, size_t,
+ XXH64_hash_t, const xxh_u8* XXH_RESTRICT, size_t);
+
+XXH_FORCE_INLINE XXH64_hash_t
+XXH3_64bits_internal(const void* XXH_RESTRICT input, size_t len,
+ XXH64_hash_t seed64, const void* XXH_RESTRICT secret, size_t secretLen,
+ XXH3_hashLong64_f f_hashLong)
+{
+ XXH_ASSERT(secretLen >= XXH3_SECRET_SIZE_MIN);
+ /*
+ * If an action is to be taken if `secretLen` condition is not respected,
+ * it should be done here.
+ * For now, it's a contract pre-condition.
+ * Adding a check and a branch here would cost performance at every hash.
+ * Also, note that function signature doesn't offer room to return an error.
+ */
+ if (len <= 16)
+ return XXH3_len_0to16_64b((const xxh_u8*)input, len, (const xxh_u8*)secret, seed64);
+ if (len <= 128)
+ return XXH3_len_17to128_64b((const xxh_u8*)input, len, (const xxh_u8*)secret, secretLen, seed64);
+ if (len <= XXH3_MIDSIZE_MAX)
+ return XXH3_len_129to240_64b((const xxh_u8*)input, len, (const xxh_u8*)secret, secretLen, seed64);
+ return f_hashLong(input, len, seed64, (const xxh_u8*)secret, secretLen);
+}
+
+
+/* === Public entry point === */
+
+/*! @ingroup XXH3_family */
+XXH_PUBLIC_API XXH64_hash_t XXH3_64bits(XXH_NOESCAPE const void* input, size_t length)
+{
+ return XXH3_64bits_internal(input, length, 0, XXH3_kSecret, sizeof(XXH3_kSecret), XXH3_hashLong_64b_default);
+}
+
+/*! @ingroup XXH3_family */
+XXH_PUBLIC_API XXH64_hash_t
+XXH3_64bits_withSecret(XXH_NOESCAPE const void* input, size_t length, XXH_NOESCAPE const void* secret, size_t secretSize)
+{
+ return XXH3_64bits_internal(input, length, 0, secret, secretSize, XXH3_hashLong_64b_withSecret);
+}
+
+/*! @ingroup XXH3_family */
+XXH_PUBLIC_API XXH64_hash_t
+XXH3_64bits_withSeed(XXH_NOESCAPE const void* input, size_t length, XXH64_hash_t seed)
+{
+ return XXH3_64bits_internal(input, length, seed, XXH3_kSecret, sizeof(XXH3_kSecret), XXH3_hashLong_64b_withSeed);
+}
+
+XXH_PUBLIC_API XXH64_hash_t
+XXH3_64bits_withSecretandSeed(XXH_NOESCAPE const void* input, size_t length, XXH_NOESCAPE const void* secret, size_t secretSize, XXH64_hash_t seed)
+{
+ if (length <= XXH3_MIDSIZE_MAX)
+ return XXH3_64bits_internal(input, length, seed, XXH3_kSecret, sizeof(XXH3_kSecret), NULL);
+ return XXH3_hashLong_64b_withSecret(input, length, seed, (const xxh_u8*)secret, secretSize);
+}
+
+
+/* === XXH3 streaming === */
+#ifndef XXH_NO_STREAM
+/*
+ * Malloc's a pointer that is always aligned to align.
+ *
+ * This must be freed with `XXH_alignedFree()`.
+ *
+ * malloc typically guarantees 16 byte alignment on 64-bit systems and 8 byte
+ * alignment on 32-bit. This isn't enough for the 32 byte aligned loads in AVX2
+ * or on 32-bit, the 16 byte aligned loads in SSE2 and NEON.
+ *
+ * This underalignment previously caused a rather obvious crash which went
+ * completely unnoticed due to XXH3_createState() not actually being tested.
+ * Credit to RedSpah for noticing this bug.
+ *
+ * The alignment is done manually: Functions like posix_memalign or _mm_malloc
+ * are avoided: To maintain portability, we would have to write a fallback
+ * like this anyways, and besides, testing for the existence of library
+ * functions without relying on external build tools is impossible.
+ *
+ * The method is simple: Overallocate, manually align, and store the offset
+ * to the original behind the returned pointer.
+ *
+ * Align must be a power of 2 and 8 <= align <= 128.
+ */
+static XXH_MALLOCF void* XXH_alignedMalloc(size_t s, size_t align)
+{
+ XXH_ASSERT(align <= 128 && align >= 8); /* range check */
+ XXH_ASSERT((align & (align-1)) == 0); /* power of 2 */
+ XXH_ASSERT(s != 0 && s < (s + align)); /* empty/overflow */
+ { /* Overallocate to make room for manual realignment and an offset byte */
+ xxh_u8* base = (xxh_u8*)XXH_malloc(s + align);
+ if (base != NULL) {
+ /*
+ * Get the offset needed to align this pointer.
+ *
+ * Even if the returned pointer is aligned, there will always be
+ * at least one byte to store the offset to the original pointer.
+ */
+ size_t offset = align - ((size_t)base & (align - 1)); /* base % align */
+ /* Add the offset for the now-aligned pointer */
+ xxh_u8* ptr = base + offset;
+
+ XXH_ASSERT((size_t)ptr % align == 0);
+
+ /* Store the offset immediately before the returned pointer. */
+ ptr[-1] = (xxh_u8)offset;
+ return ptr;
+ }
+ return NULL;
+ }
+}
+/*
+ * Frees an aligned pointer allocated by XXH_alignedMalloc(). Don't pass
+ * normal malloc'd pointers, XXH_alignedMalloc has a specific data layout.
+ */
+static void XXH_alignedFree(void* p)
+{
+ if (p != NULL) {
+ xxh_u8* ptr = (xxh_u8*)p;
+ /* Get the offset byte we added in XXH_malloc. */
+ xxh_u8 offset = ptr[-1];
+ /* Free the original malloc'd pointer */
+ xxh_u8* base = ptr - offset;
+ XXH_free(base);
+ }
+}
+/*! @ingroup XXH3_family */
+/*!
+ * @brief Allocate an @ref XXH3_state_t.
+ *
+ * Must be freed with XXH3_freeState().
+ * @return An allocated XXH3_state_t on success, `NULL` on failure.
+ */
+XXH_PUBLIC_API XXH3_state_t* XXH3_createState(void)
+{
+ XXH3_state_t* const state = (XXH3_state_t*)XXH_alignedMalloc(sizeof(XXH3_state_t), 64);
+ if (state==NULL) return NULL;
+ XXH3_INITSTATE(state);
+ return state;
+}
+
+/*! @ingroup XXH3_family */
+/*!
+ * @brief Frees an @ref XXH3_state_t.
+ *
+ * Must be allocated with XXH3_createState().
+ * @param statePtr A pointer to an @ref XXH3_state_t allocated with @ref XXH3_createState().
+ * @return XXH_OK.
+ */
+XXH_PUBLIC_API XXH_errorcode XXH3_freeState(XXH3_state_t* statePtr)
+{
+ XXH_alignedFree(statePtr);
+ return XXH_OK;
+}
+
+/*! @ingroup XXH3_family */
+XXH_PUBLIC_API void
+XXH3_copyState(XXH_NOESCAPE XXH3_state_t* dst_state, XXH_NOESCAPE const XXH3_state_t* src_state)
+{
+ XXH_memcpy(dst_state, src_state, sizeof(*dst_state));
+}
+
+static void
+XXH3_reset_internal(XXH3_state_t* statePtr,
+ XXH64_hash_t seed,
+ const void* secret, size_t secretSize)
+{
+ size_t const initStart = offsetof(XXH3_state_t, bufferedSize);
+ size_t const initLength = offsetof(XXH3_state_t, nbStripesPerBlock) - initStart;
+ XXH_ASSERT(offsetof(XXH3_state_t, nbStripesPerBlock) > initStart);
+ XXH_ASSERT(statePtr != NULL);
+ /* set members from bufferedSize to nbStripesPerBlock (excluded) to 0 */
+ memset((char*)statePtr + initStart, 0, initLength);
+ statePtr->acc[0] = XXH_PRIME32_3;
+ statePtr->acc[1] = XXH_PRIME64_1;
+ statePtr->acc[2] = XXH_PRIME64_2;
+ statePtr->acc[3] = XXH_PRIME64_3;
+ statePtr->acc[4] = XXH_PRIME64_4;
+ statePtr->acc[5] = XXH_PRIME32_2;
+ statePtr->acc[6] = XXH_PRIME64_5;
+ statePtr->acc[7] = XXH_PRIME32_1;
+ statePtr->seed = seed;
+ statePtr->useSeed = (seed != 0);
+ statePtr->extSecret = (const unsigned char*)secret;
+ XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN);
+ statePtr->secretLimit = secretSize - XXH_STRIPE_LEN;
+ statePtr->nbStripesPerBlock = statePtr->secretLimit / XXH_SECRET_CONSUME_RATE;
+}
+
+/*! @ingroup XXH3_family */
+XXH_PUBLIC_API XXH_errorcode
+XXH3_64bits_reset(XXH_NOESCAPE XXH3_state_t* statePtr)
+{
+ if (statePtr == NULL) return XXH_ERROR;
+ XXH3_reset_internal(statePtr, 0, XXH3_kSecret, XXH_SECRET_DEFAULT_SIZE);
+ return XXH_OK;
+}
+
+/*! @ingroup XXH3_family */
+XXH_PUBLIC_API XXH_errorcode
+XXH3_64bits_reset_withSecret(XXH_NOESCAPE XXH3_state_t* statePtr, XXH_NOESCAPE const void* secret, size_t secretSize)
+{
+ if (statePtr == NULL) return XXH_ERROR;
+ XXH3_reset_internal(statePtr, 0, secret, secretSize);
+ if (secret == NULL) return XXH_ERROR;
+ if (secretSize < XXH3_SECRET_SIZE_MIN) return XXH_ERROR;
+ return XXH_OK;
+}
+
+/*! @ingroup XXH3_family */
+XXH_PUBLIC_API XXH_errorcode
+XXH3_64bits_reset_withSeed(XXH_NOESCAPE XXH3_state_t* statePtr, XXH64_hash_t seed)
+{
+ if (statePtr == NULL) return XXH_ERROR;
+ if (seed==0) return XXH3_64bits_reset(statePtr);
+ if ((seed != statePtr->seed) || (statePtr->extSecret != NULL))
+ XXH3_initCustomSecret(statePtr->customSecret, seed);
+ XXH3_reset_internal(statePtr, seed, NULL, XXH_SECRET_DEFAULT_SIZE);
+ return XXH_OK;
+}
+
+/*! @ingroup XXH3_family */
+XXH_PUBLIC_API XXH_errorcode
+XXH3_64bits_reset_withSecretandSeed(XXH_NOESCAPE XXH3_state_t* statePtr, XXH_NOESCAPE const void* secret, size_t secretSize, XXH64_hash_t seed64)
+{
+ if (statePtr == NULL) return XXH_ERROR;
+ if (secret == NULL) return XXH_ERROR;
+ if (secretSize < XXH3_SECRET_SIZE_MIN) return XXH_ERROR;
+ XXH3_reset_internal(statePtr, seed64, secret, secretSize);
+ statePtr->useSeed = 1; /* always, even if seed64==0 */
+ return XXH_OK;
+}
+
+/*!
+ * @internal
+ * @brief Processes a large input for XXH3_update() and XXH3_digest_long().
+ *
+ * Unlike XXH3_hashLong_internal_loop(), this can process data that overlaps a block.
+ *
+ * @param acc Pointer to the 8 accumulator lanes
+ * @param nbStripesSoFarPtr In/out pointer to the number of leftover stripes in the block*
+ * @param nbStripesPerBlock Number of stripes in a block
+ * @param input Input pointer
+ * @param nbStripes Number of stripes to process
+ * @param secret Secret pointer
+ * @param secretLimit Offset of the last block in @p secret
+ * @param f_acc Pointer to an XXH3_accumulate implementation
+ * @param f_scramble Pointer to an XXH3_scrambleAcc implementation
+ * @return Pointer past the end of @p input after processing
+ */
+XXH_FORCE_INLINE const xxh_u8 *
+XXH3_consumeStripes(xxh_u64* XXH_RESTRICT acc,
+ size_t* XXH_RESTRICT nbStripesSoFarPtr, size_t nbStripesPerBlock,
+ const xxh_u8* XXH_RESTRICT input, size_t nbStripes,
+ const xxh_u8* XXH_RESTRICT secret, size_t secretLimit,
+ XXH3_f_accumulate f_acc,
+ XXH3_f_scrambleAcc f_scramble)
+{
+ const xxh_u8* initialSecret = secret + *nbStripesSoFarPtr * XXH_SECRET_CONSUME_RATE;
+ /* Process full blocks */
+ if (nbStripes >= (nbStripesPerBlock - *nbStripesSoFarPtr)) {
+ /* Process the initial partial block... */
+ size_t nbStripesThisIter = nbStripesPerBlock - *nbStripesSoFarPtr;
+
+ do {
+ /* Accumulate and scramble */
+ f_acc(acc, input, initialSecret, nbStripesThisIter);
+ f_scramble(acc, secret + secretLimit);
+ input += nbStripesThisIter * XXH_STRIPE_LEN;
+ nbStripes -= nbStripesThisIter;
+ /* Then continue the loop with the full block size */
+ nbStripesThisIter = nbStripesPerBlock;
+ initialSecret = secret;
+ } while (nbStripes >= nbStripesPerBlock);
+ *nbStripesSoFarPtr = 0;
+ }
+ /* Process a partial block */
+ if (nbStripes > 0) {
+ f_acc(acc, input, initialSecret, nbStripes);
+ input += nbStripes * XXH_STRIPE_LEN;
+ *nbStripesSoFarPtr += nbStripes;
+ }
+ /* Return end pointer */
+ return input;
+}
+
+#ifndef XXH3_STREAM_USE_STACK
+# if XXH_SIZE_OPT <= 0 && !defined(__clang__) /* clang doesn't need additional stack space */
+# define XXH3_STREAM_USE_STACK 1
+# endif
+#endif
+/*
+ * Both XXH3_64bits_update and XXH3_128bits_update use this routine.
+ */
+XXH_FORCE_INLINE XXH_errorcode
+XXH3_update(XXH3_state_t* XXH_RESTRICT const state,
+ const xxh_u8* XXH_RESTRICT input, size_t len,
+ XXH3_f_accumulate f_acc,
+ XXH3_f_scrambleAcc f_scramble)
+{
+ if (input==NULL) {
+ XXH_ASSERT(len == 0);
+ return XXH_OK;
+ }
+
+ XXH_ASSERT(state != NULL);
+ { const xxh_u8* const bEnd = input + len;
+ const unsigned char* const secret = (state->extSecret == NULL) ? state->customSecret : state->extSecret;
+#if defined(XXH3_STREAM_USE_STACK) && XXH3_STREAM_USE_STACK >= 1
+ /* For some reason, gcc and MSVC seem to suffer greatly
+ * when operating accumulators directly into state.
+ * Operating into stack space seems to enable proper optimization.
+ * clang, on the other hand, doesn't seem to need this trick */
+ XXH_ALIGN(XXH_ACC_ALIGN) xxh_u64 acc[8];
+ XXH_memcpy(acc, state->acc, sizeof(acc));
+#else
+ xxh_u64* XXH_RESTRICT const acc = state->acc;
+#endif
+ state->totalLen += len;
+ XXH_ASSERT(state->bufferedSize <= XXH3_INTERNALBUFFER_SIZE);
+
+ /* small input : just fill in tmp buffer */
+ if (len <= XXH3_INTERNALBUFFER_SIZE - state->bufferedSize) {
+ XXH_memcpy(state->buffer + state->bufferedSize, input, len);
+ state->bufferedSize += (XXH32_hash_t)len;
+ return XXH_OK;
+ }
+
+ /* total input is now > XXH3_INTERNALBUFFER_SIZE */
+ #define XXH3_INTERNALBUFFER_STRIPES (XXH3_INTERNALBUFFER_SIZE / XXH_STRIPE_LEN)
+ XXH_STATIC_ASSERT(XXH3_INTERNALBUFFER_SIZE % XXH_STRIPE_LEN == 0); /* clean multiple */
+
+ /*
+ * Internal buffer is partially filled (always, except at beginning)
+ * Complete it, then consume it.
+ */
+ if (state->bufferedSize) {
+ size_t const loadSize = XXH3_INTERNALBUFFER_SIZE - state->bufferedSize;
+ XXH_memcpy(state->buffer + state->bufferedSize, input, loadSize);
+ input += loadSize;
+ XXH3_consumeStripes(acc,
+ &state->nbStripesSoFar, state->nbStripesPerBlock,
+ state->buffer, XXH3_INTERNALBUFFER_STRIPES,
+ secret, state->secretLimit,
+ f_acc, f_scramble);
+ state->bufferedSize = 0;
+ }
+ XXH_ASSERT(input < bEnd);
+ if (bEnd - input > XXH3_INTERNALBUFFER_SIZE) {
+ size_t nbStripes = (size_t)(bEnd - 1 - input) / XXH_STRIPE_LEN;
+ input = XXH3_consumeStripes(acc,
+ &state->nbStripesSoFar, state->nbStripesPerBlock,
+ input, nbStripes,
+ secret, state->secretLimit,
+ f_acc, f_scramble);
+ XXH_memcpy(state->buffer + sizeof(state->buffer) - XXH_STRIPE_LEN, input - XXH_STRIPE_LEN, XXH_STRIPE_LEN);
+
+ }
+ /* Some remaining input (always) : buffer it */
+ XXH_ASSERT(input < bEnd);
+ XXH_ASSERT(bEnd - input <= XXH3_INTERNALBUFFER_SIZE);
+ XXH_ASSERT(state->bufferedSize == 0);
+ XXH_memcpy(state->buffer, input, (size_t)(bEnd-input));
+ state->bufferedSize = (XXH32_hash_t)(bEnd-input);
+#if defined(XXH3_STREAM_USE_STACK) && XXH3_STREAM_USE_STACK >= 1
+ /* save stack accumulators into state */
+ XXH_memcpy(state->acc, acc, sizeof(acc));
+#endif
+ }
+
+ return XXH_OK;
+}
+
+/*! @ingroup XXH3_family */
+XXH_PUBLIC_API XXH_errorcode
+XXH3_64bits_update(XXH_NOESCAPE XXH3_state_t* state, XXH_NOESCAPE const void* input, size_t len)
+{
+ return XXH3_update(state, (const xxh_u8*)input, len,
+ XXH3_accumulate, XXH3_scrambleAcc);
+}
+
+
+XXH_FORCE_INLINE void
+XXH3_digest_long (XXH64_hash_t* acc,
+ const XXH3_state_t* state,
+ const unsigned char* secret)
+{
+ xxh_u8 lastStripe[XXH_STRIPE_LEN];
+ const xxh_u8* lastStripePtr;
+
+ /*
+ * Digest on a local copy. This way, the state remains unaltered, and it can
+ * continue ingesting more input afterwards.
+ */
+ XXH_memcpy(acc, state->acc, sizeof(state->acc));
+ if (state->bufferedSize >= XXH_STRIPE_LEN) {
+ /* Consume remaining stripes then point to remaining data in buffer */
+ size_t const nbStripes = (state->bufferedSize - 1) / XXH_STRIPE_LEN;
+ size_t nbStripesSoFar = state->nbStripesSoFar;
+ XXH3_consumeStripes(acc,
+ &nbStripesSoFar, state->nbStripesPerBlock,
+ state->buffer, nbStripes,
+ secret, state->secretLimit,
+ XXH3_accumulate, XXH3_scrambleAcc);
+ lastStripePtr = state->buffer + state->bufferedSize - XXH_STRIPE_LEN;
+ } else { /* bufferedSize < XXH_STRIPE_LEN */
+ /* Copy to temp buffer */
+ size_t const catchupSize = XXH_STRIPE_LEN - state->bufferedSize;
+ XXH_ASSERT(state->bufferedSize > 0); /* there is always some input buffered */
+ XXH_memcpy(lastStripe, state->buffer + sizeof(state->buffer) - catchupSize, catchupSize);
+ XXH_memcpy(lastStripe + catchupSize, state->buffer, state->bufferedSize);
+ lastStripePtr = lastStripe;
+ }
+ /* Last stripe */
+ XXH3_accumulate_512(acc,
+ lastStripePtr,
+ secret + state->secretLimit - XXH_SECRET_LASTACC_START);
+}
+
+/*! @ingroup XXH3_family */
+XXH_PUBLIC_API XXH64_hash_t XXH3_64bits_digest (XXH_NOESCAPE const XXH3_state_t* state)
+{
+ const unsigned char* const secret = (state->extSecret == NULL) ? state->customSecret : state->extSecret;
+ if (state->totalLen > XXH3_MIDSIZE_MAX) {
+ XXH_ALIGN(XXH_ACC_ALIGN) XXH64_hash_t acc[XXH_ACC_NB];
+ XXH3_digest_long(acc, state, secret);
+ return XXH3_mergeAccs(acc,
+ secret + XXH_SECRET_MERGEACCS_START,
+ (xxh_u64)state->totalLen * XXH_PRIME64_1);
+ }
+ /* totalLen <= XXH3_MIDSIZE_MAX: digesting a short input */
+ if (state->useSeed)
+ return XXH3_64bits_withSeed(state->buffer, (size_t)state->totalLen, state->seed);
+ return XXH3_64bits_withSecret(state->buffer, (size_t)(state->totalLen),
+ secret, state->secretLimit + XXH_STRIPE_LEN);
+}
+#endif /* !XXH_NO_STREAM */
+
+
+/* ==========================================
+ * XXH3 128 bits (a.k.a XXH128)
+ * ==========================================
+ * XXH3's 128-bit variant has better mixing and strength than the 64-bit variant,
+ * even without counting the significantly larger output size.
+ *
+ * For example, extra steps are taken to avoid the seed-dependent collisions
+ * in 17-240 byte inputs (See XXH3_mix16B and XXH128_mix32B).
+ *
+ * This strength naturally comes at the cost of some speed, especially on short
+ * lengths. Note that longer hashes are about as fast as the 64-bit version
+ * due to it using only a slight modification of the 64-bit loop.
+ *
+ * XXH128 is also more oriented towards 64-bit machines. It is still extremely
+ * fast for a _128-bit_ hash on 32-bit (it usually clears XXH64).
+ */
+
+XXH_FORCE_INLINE XXH_PUREF XXH128_hash_t
+XXH3_len_1to3_128b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
+{
+ /* A doubled version of 1to3_64b with different constants. */
+ XXH_ASSERT(input != NULL);
+ XXH_ASSERT(1 <= len && len <= 3);
+ XXH_ASSERT(secret != NULL);
+ /*
+ * len = 1: combinedl = { input[0], 0x01, input[0], input[0] }
+ * len = 2: combinedl = { input[1], 0x02, input[0], input[1] }
+ * len = 3: combinedl = { input[2], 0x03, input[0], input[1] }
+ */
+ { xxh_u8 const c1 = input[0];
+ xxh_u8 const c2 = input[len >> 1];
+ xxh_u8 const c3 = input[len - 1];
+ xxh_u32 const combinedl = ((xxh_u32)c1 <<16) | ((xxh_u32)c2 << 24)
+ | ((xxh_u32)c3 << 0) | ((xxh_u32)len << 8);
+ xxh_u32 const combinedh = XXH_rotl32(XXH_swap32(combinedl), 13);
+ xxh_u64 const bitflipl = (XXH_readLE32(secret) ^ XXH_readLE32(secret+4)) + seed;
+ xxh_u64 const bitfliph = (XXH_readLE32(secret+8) ^ XXH_readLE32(secret+12)) - seed;
+ xxh_u64 const keyed_lo = (xxh_u64)combinedl ^ bitflipl;
+ xxh_u64 const keyed_hi = (xxh_u64)combinedh ^ bitfliph;
+ XXH128_hash_t h128;
+ h128.low64 = XXH64_avalanche(keyed_lo);
+ h128.high64 = XXH64_avalanche(keyed_hi);
+ return h128;
+ }
+}
+
+XXH_FORCE_INLINE XXH_PUREF XXH128_hash_t
+XXH3_len_4to8_128b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
+{
+ XXH_ASSERT(input != NULL);
+ XXH_ASSERT(secret != NULL);
+ XXH_ASSERT(4 <= len && len <= 8);
+ seed ^= (xxh_u64)XXH_swap32((xxh_u32)seed) << 32;
+ { xxh_u32 const input_lo = XXH_readLE32(input);
+ xxh_u32 const input_hi = XXH_readLE32(input + len - 4);
+ xxh_u64 const input_64 = input_lo + ((xxh_u64)input_hi << 32);
+ xxh_u64 const bitflip = (XXH_readLE64(secret+16) ^ XXH_readLE64(secret+24)) + seed;
+ xxh_u64 const keyed = input_64 ^ bitflip;
+
+ /* Shift len to the left to ensure it is even, this avoids even multiplies. */
+ XXH128_hash_t m128 = XXH_mult64to128(keyed, XXH_PRIME64_1 + (len << 2));
+
+ m128.high64 += (m128.low64 << 1);
+ m128.low64 ^= (m128.high64 >> 3);
+
+ m128.low64 = XXH_xorshift64(m128.low64, 35);
+ m128.low64 *= PRIME_MX2;
+ m128.low64 = XXH_xorshift64(m128.low64, 28);
+ m128.high64 = XXH3_avalanche(m128.high64);
+ return m128;
+ }
+}
+
+XXH_FORCE_INLINE XXH_PUREF XXH128_hash_t
+XXH3_len_9to16_128b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
+{
+ XXH_ASSERT(input != NULL);
+ XXH_ASSERT(secret != NULL);
+ XXH_ASSERT(9 <= len && len <= 16);
+ { xxh_u64 const bitflipl = (XXH_readLE64(secret+32) ^ XXH_readLE64(secret+40)) - seed;
+ xxh_u64 const bitfliph = (XXH_readLE64(secret+48) ^ XXH_readLE64(secret+56)) + seed;
+ xxh_u64 const input_lo = XXH_readLE64(input);
+ xxh_u64 input_hi = XXH_readLE64(input + len - 8);
+ XXH128_hash_t m128 = XXH_mult64to128(input_lo ^ input_hi ^ bitflipl, XXH_PRIME64_1);
+ /*
+ * Put len in the middle of m128 to ensure that the length gets mixed to
+ * both the low and high bits in the 128x64 multiply below.
+ */
+ m128.low64 += (xxh_u64)(len - 1) << 54;
+ input_hi ^= bitfliph;
+ /*
+ * Add the high 32 bits of input_hi to the high 32 bits of m128, then
+ * add the long product of the low 32 bits of input_hi and XXH_PRIME32_2 to
+ * the high 64 bits of m128.
+ *
+ * The best approach to this operation is different on 32-bit and 64-bit.
+ */
+ if (sizeof(void *) < sizeof(xxh_u64)) { /* 32-bit */
+ /*
+ * 32-bit optimized version, which is more readable.
+ *
+ * On 32-bit, it removes an ADC and delays a dependency between the two
+ * halves of m128.high64, but it generates an extra mask on 64-bit.
+ */
+ m128.high64 += (input_hi & 0xFFFFFFFF00000000ULL) + XXH_mult32to64((xxh_u32)input_hi, XXH_PRIME32_2);
+ } else {
+ /*
+ * 64-bit optimized (albeit more confusing) version.
+ *
+ * Uses some properties of addition and multiplication to remove the mask:
+ *
+ * Let:
+ * a = input_hi.lo = (input_hi & 0x00000000FFFFFFFF)
+ * b = input_hi.hi = (input_hi & 0xFFFFFFFF00000000)
+ * c = XXH_PRIME32_2
+ *
+ * a + (b * c)
+ * Inverse Property: x + y - x == y
+ * a + (b * (1 + c - 1))
+ * Distributive Property: x * (y + z) == (x * y) + (x * z)
+ * a + (b * 1) + (b * (c - 1))
+ * Identity Property: x * 1 == x
+ * a + b + (b * (c - 1))
+ *
+ * Substitute a, b, and c:
+ * input_hi.hi + input_hi.lo + ((xxh_u64)input_hi.lo * (XXH_PRIME32_2 - 1))
+ *
+ * Since input_hi.hi + input_hi.lo == input_hi, we get this:
+ * input_hi + ((xxh_u64)input_hi.lo * (XXH_PRIME32_2 - 1))
+ */
+ m128.high64 += input_hi + XXH_mult32to64((xxh_u32)input_hi, XXH_PRIME32_2 - 1);
+ }
+ /* m128 ^= XXH_swap64(m128 >> 64); */
+ m128.low64 ^= XXH_swap64(m128.high64);
+
+ { /* 128x64 multiply: h128 = m128 * XXH_PRIME64_2; */
+ XXH128_hash_t h128 = XXH_mult64to128(m128.low64, XXH_PRIME64_2);
+ h128.high64 += m128.high64 * XXH_PRIME64_2;
+
+ h128.low64 = XXH3_avalanche(h128.low64);
+ h128.high64 = XXH3_avalanche(h128.high64);
+ return h128;
+ } }
+}
+
+/*
+ * Assumption: `secret` size is >= XXH3_SECRET_SIZE_MIN
+ */
+XXH_FORCE_INLINE XXH_PUREF XXH128_hash_t
+XXH3_len_0to16_128b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
+{
+ XXH_ASSERT(len <= 16);
+ { if (len > 8) return XXH3_len_9to16_128b(input, len, secret, seed);
+ if (len >= 4) return XXH3_len_4to8_128b(input, len, secret, seed);
+ if (len) return XXH3_len_1to3_128b(input, len, secret, seed);
+ { XXH128_hash_t h128;
+ xxh_u64 const bitflipl = XXH_readLE64(secret+64) ^ XXH_readLE64(secret+72);
+ xxh_u64 const bitfliph = XXH_readLE64(secret+80) ^ XXH_readLE64(secret+88);
+ h128.low64 = XXH64_avalanche(seed ^ bitflipl);
+ h128.high64 = XXH64_avalanche( seed ^ bitfliph);
+ return h128;
+ } }
+}
+
+/*
+ * A bit slower than XXH3_mix16B, but handles multiply by zero better.
+ */
+XXH_FORCE_INLINE XXH128_hash_t
+XXH128_mix32B(XXH128_hash_t acc, const xxh_u8* input_1, const xxh_u8* input_2,
+ const xxh_u8* secret, XXH64_hash_t seed)
+{
+ acc.low64 += XXH3_mix16B (input_1, secret+0, seed);
+ acc.low64 ^= XXH_readLE64(input_2) + XXH_readLE64(input_2 + 8);
+ acc.high64 += XXH3_mix16B (input_2, secret+16, seed);
+ acc.high64 ^= XXH_readLE64(input_1) + XXH_readLE64(input_1 + 8);
+ return acc;
+}
+
+
+XXH_FORCE_INLINE XXH_PUREF XXH128_hash_t
+XXH3_len_17to128_128b(const xxh_u8* XXH_RESTRICT input, size_t len,
+ const xxh_u8* XXH_RESTRICT secret, size_t secretSize,
+ XXH64_hash_t seed)
+{
+ XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); (void)secretSize;
+ XXH_ASSERT(16 < len && len <= 128);
+
+ { XXH128_hash_t acc;
+ acc.low64 = len * XXH_PRIME64_1;
+ acc.high64 = 0;
+
+#if XXH_SIZE_OPT >= 1
+ {
+ /* Smaller, but slightly slower. */
+ unsigned int i = (unsigned int)(len - 1) / 32;
+ do {
+ acc = XXH128_mix32B(acc, input+16*i, input+len-16*(i+1), secret+32*i, seed);
+ } while (i-- != 0);
+ }
+#else
+ if (len > 32) {
+ if (len > 64) {
+ if (len > 96) {
+ acc = XXH128_mix32B(acc, input+48, input+len-64, secret+96, seed);
+ }
+ acc = XXH128_mix32B(acc, input+32, input+len-48, secret+64, seed);
+ }
+ acc = XXH128_mix32B(acc, input+16, input+len-32, secret+32, seed);
+ }
+ acc = XXH128_mix32B(acc, input, input+len-16, secret, seed);
+#endif
+ { XXH128_hash_t h128;
+ h128.low64 = acc.low64 + acc.high64;
+ h128.high64 = (acc.low64 * XXH_PRIME64_1)
+ + (acc.high64 * XXH_PRIME64_4)
+ + ((len - seed) * XXH_PRIME64_2);
+ h128.low64 = XXH3_avalanche(h128.low64);
+ h128.high64 = (XXH64_hash_t)0 - XXH3_avalanche(h128.high64);
+ return h128;
+ }
+ }
+}
+
+XXH_NO_INLINE XXH_PUREF XXH128_hash_t
+XXH3_len_129to240_128b(const xxh_u8* XXH_RESTRICT input, size_t len,
+ const xxh_u8* XXH_RESTRICT secret, size_t secretSize,
+ XXH64_hash_t seed)
+{
+ XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); (void)secretSize;
+ XXH_ASSERT(128 < len && len <= XXH3_MIDSIZE_MAX);
+
+ { XXH128_hash_t acc;
+ unsigned i;
+ acc.low64 = len * XXH_PRIME64_1;
+ acc.high64 = 0;
+ /*
+ * We set as `i` as offset + 32. We do this so that unchanged
+ * `len` can be used as upper bound. This reaches a sweet spot
+ * where both x86 and aarch64 get simple agen and good codegen
+ * for the loop.
+ */
+ for (i = 32; i < 160; i += 32) {
+ acc = XXH128_mix32B(acc,
+ input + i - 32,
+ input + i - 16,
+ secret + i - 32,
+ seed);
+ }
+ acc.low64 = XXH3_avalanche(acc.low64);
+ acc.high64 = XXH3_avalanche(acc.high64);
+ /*
+ * NB: `i <= len` will duplicate the last 32-bytes if
+ * len % 32 was zero. This is an unfortunate necessity to keep
+ * the hash result stable.
+ */
+ for (i=160; i <= len; i += 32) {
+ acc = XXH128_mix32B(acc,
+ input + i - 32,
+ input + i - 16,
+ secret + XXH3_MIDSIZE_STARTOFFSET + i - 160,
+ seed);
+ }
+ /* last bytes */
+ acc = XXH128_mix32B(acc,
+ input + len - 16,
+ input + len - 32,
+ secret + XXH3_SECRET_SIZE_MIN - XXH3_MIDSIZE_LASTOFFSET - 16,
+ (XXH64_hash_t)0 - seed);
+
+ { XXH128_hash_t h128;
+ h128.low64 = acc.low64 + acc.high64;
+ h128.high64 = (acc.low64 * XXH_PRIME64_1)
+ + (acc.high64 * XXH_PRIME64_4)
+ + ((len - seed) * XXH_PRIME64_2);
+ h128.low64 = XXH3_avalanche(h128.low64);
+ h128.high64 = (XXH64_hash_t)0 - XXH3_avalanche(h128.high64);
+ return h128;
+ }
+ }
+}
+
+XXH_FORCE_INLINE XXH128_hash_t
+XXH3_hashLong_128b_internal(const void* XXH_RESTRICT input, size_t len,
+ const xxh_u8* XXH_RESTRICT secret, size_t secretSize,
+ XXH3_f_accumulate f_acc,
+ XXH3_f_scrambleAcc f_scramble)
+{
+ XXH_ALIGN(XXH_ACC_ALIGN) xxh_u64 acc[XXH_ACC_NB] = XXH3_INIT_ACC;
+
+ XXH3_hashLong_internal_loop(acc, (const xxh_u8*)input, len, secret, secretSize, f_acc, f_scramble);
+
+ /* converge into final hash */
+ XXH_STATIC_ASSERT(sizeof(acc) == 64);
+ XXH_ASSERT(secretSize >= sizeof(acc) + XXH_SECRET_MERGEACCS_START);
+ { XXH128_hash_t h128;
+ h128.low64 = XXH3_mergeAccs(acc,
+ secret + XXH_SECRET_MERGEACCS_START,
+ (xxh_u64)len * XXH_PRIME64_1);
+ h128.high64 = XXH3_mergeAccs(acc,
+ secret + secretSize
+ - sizeof(acc) - XXH_SECRET_MERGEACCS_START,
+ ~((xxh_u64)len * XXH_PRIME64_2));
+ return h128;
+ }
+}
+
+/*
+ * It's important for performance that XXH3_hashLong() is not inlined.
+ */
+XXH_NO_INLINE XXH_PUREF XXH128_hash_t
+XXH3_hashLong_128b_default(const void* XXH_RESTRICT input, size_t len,
+ XXH64_hash_t seed64,
+ const void* XXH_RESTRICT secret, size_t secretLen)
+{
+ (void)seed64; (void)secret; (void)secretLen;
+ return XXH3_hashLong_128b_internal(input, len, XXH3_kSecret, sizeof(XXH3_kSecret),
+ XXH3_accumulate, XXH3_scrambleAcc);
+}
+
+/*
+ * It's important for performance to pass @p secretLen (when it's static)
+ * to the compiler, so that it can properly optimize the vectorized loop.
+ *
+ * When the secret size is unknown, or on GCC 12 where the mix of NO_INLINE and FORCE_INLINE
+ * breaks -Og, this is XXH_NO_INLINE.
+ */
+XXH3_WITH_SECRET_INLINE XXH128_hash_t
+XXH3_hashLong_128b_withSecret(const void* XXH_RESTRICT input, size_t len,
+ XXH64_hash_t seed64,
+ const void* XXH_RESTRICT secret, size_t secretLen)
+{
+ (void)seed64;
+ return XXH3_hashLong_128b_internal(input, len, (const xxh_u8*)secret, secretLen,
+ XXH3_accumulate, XXH3_scrambleAcc);
+}
+
+XXH_FORCE_INLINE XXH128_hash_t
+XXH3_hashLong_128b_withSeed_internal(const void* XXH_RESTRICT input, size_t len,
+ XXH64_hash_t seed64,
+ XXH3_f_accumulate f_acc,
+ XXH3_f_scrambleAcc f_scramble,
+ XXH3_f_initCustomSecret f_initSec)
+{
+ if (seed64 == 0)
+ return XXH3_hashLong_128b_internal(input, len,
+ XXH3_kSecret, sizeof(XXH3_kSecret),
+ f_acc, f_scramble);
+ { XXH_ALIGN(XXH_SEC_ALIGN) xxh_u8 secret[XXH_SECRET_DEFAULT_SIZE];
+ f_initSec(secret, seed64);
+ return XXH3_hashLong_128b_internal(input, len, (const xxh_u8*)secret, sizeof(secret),
+ f_acc, f_scramble);
+ }
+}
+
+/*
+ * It's important for performance that XXH3_hashLong is not inlined.
+ */
+XXH_NO_INLINE XXH128_hash_t
+XXH3_hashLong_128b_withSeed(const void* input, size_t len,
+ XXH64_hash_t seed64, const void* XXH_RESTRICT secret, size_t secretLen)
+{
+ (void)secret; (void)secretLen;
+ return XXH3_hashLong_128b_withSeed_internal(input, len, seed64,
+ XXH3_accumulate, XXH3_scrambleAcc, XXH3_initCustomSecret);
+}
+
+typedef XXH128_hash_t (*XXH3_hashLong128_f)(const void* XXH_RESTRICT, size_t,
+ XXH64_hash_t, const void* XXH_RESTRICT, size_t);
+
+XXH_FORCE_INLINE XXH128_hash_t
+XXH3_128bits_internal(const void* input, size_t len,
+ XXH64_hash_t seed64, const void* XXH_RESTRICT secret, size_t secretLen,
+ XXH3_hashLong128_f f_hl128)
+{
+ XXH_ASSERT(secretLen >= XXH3_SECRET_SIZE_MIN);
+ /*
+ * If an action is to be taken if `secret` conditions are not respected,
+ * it should be done here.
+ * For now, it's a contract pre-condition.
+ * Adding a check and a branch here would cost performance at every hash.
+ */
+ if (len <= 16)
+ return XXH3_len_0to16_128b((const xxh_u8*)input, len, (const xxh_u8*)secret, seed64);
+ if (len <= 128)
+ return XXH3_len_17to128_128b((const xxh_u8*)input, len, (const xxh_u8*)secret, secretLen, seed64);
+ if (len <= XXH3_MIDSIZE_MAX)
+ return XXH3_len_129to240_128b((const xxh_u8*)input, len, (const xxh_u8*)secret, secretLen, seed64);
+ return f_hl128(input, len, seed64, secret, secretLen);
+}
+
+
+/* === Public XXH128 API === */
+
+/*! @ingroup XXH3_family */
+XXH_PUBLIC_API XXH128_hash_t XXH3_128bits(XXH_NOESCAPE const void* input, size_t len)
+{
+ return XXH3_128bits_internal(input, len, 0,
+ XXH3_kSecret, sizeof(XXH3_kSecret),
+ XXH3_hashLong_128b_default);
+}
+
+/*! @ingroup XXH3_family */
+XXH_PUBLIC_API XXH128_hash_t
+XXH3_128bits_withSecret(XXH_NOESCAPE const void* input, size_t len, XXH_NOESCAPE const void* secret, size_t secretSize)
+{
+ return XXH3_128bits_internal(input, len, 0,
+ (const xxh_u8*)secret, secretSize,
+ XXH3_hashLong_128b_withSecret);
+}
+
+/*! @ingroup XXH3_family */
+XXH_PUBLIC_API XXH128_hash_t
+XXH3_128bits_withSeed(XXH_NOESCAPE const void* input, size_t len, XXH64_hash_t seed)
+{
+ return XXH3_128bits_internal(input, len, seed,
+ XXH3_kSecret, sizeof(XXH3_kSecret),
+ XXH3_hashLong_128b_withSeed);
+}
+
+/*! @ingroup XXH3_family */
+XXH_PUBLIC_API XXH128_hash_t
+XXH3_128bits_withSecretandSeed(XXH_NOESCAPE const void* input, size_t len, XXH_NOESCAPE const void* secret, size_t secretSize, XXH64_hash_t seed)
+{
+ if (len <= XXH3_MIDSIZE_MAX)
+ return XXH3_128bits_internal(input, len, seed, XXH3_kSecret, sizeof(XXH3_kSecret), NULL);
+ return XXH3_hashLong_128b_withSecret(input, len, seed, secret, secretSize);
+}
+
+/*! @ingroup XXH3_family */
+XXH_PUBLIC_API XXH128_hash_t
+XXH128(XXH_NOESCAPE const void* input, size_t len, XXH64_hash_t seed)
+{
+ return XXH3_128bits_withSeed(input, len, seed);
+}
+
+
+/* === XXH3 128-bit streaming === */
+#ifndef XXH_NO_STREAM
+/*
+ * All initialization and update functions are identical to 64-bit streaming variant.
+ * The only difference is the finalization routine.
+ */
+
+/*! @ingroup XXH3_family */
+XXH_PUBLIC_API XXH_errorcode
+XXH3_128bits_reset(XXH_NOESCAPE XXH3_state_t* statePtr)
+{
+ return XXH3_64bits_reset(statePtr);
+}
+
+/*! @ingroup XXH3_family */
+XXH_PUBLIC_API XXH_errorcode
+XXH3_128bits_reset_withSecret(XXH_NOESCAPE XXH3_state_t* statePtr, XXH_NOESCAPE const void* secret, size_t secretSize)
+{
+ return XXH3_64bits_reset_withSecret(statePtr, secret, secretSize);
+}
+
+/*! @ingroup XXH3_family */
+XXH_PUBLIC_API XXH_errorcode
+XXH3_128bits_reset_withSeed(XXH_NOESCAPE XXH3_state_t* statePtr, XXH64_hash_t seed)
+{
+ return XXH3_64bits_reset_withSeed(statePtr, seed);
+}
+
+/*! @ingroup XXH3_family */
+XXH_PUBLIC_API XXH_errorcode
+XXH3_128bits_reset_withSecretandSeed(XXH_NOESCAPE XXH3_state_t* statePtr, XXH_NOESCAPE const void* secret, size_t secretSize, XXH64_hash_t seed)
+{
+ return XXH3_64bits_reset_withSecretandSeed(statePtr, secret, secretSize, seed);
+}
+
+/*! @ingroup XXH3_family */
+XXH_PUBLIC_API XXH_errorcode
+XXH3_128bits_update(XXH_NOESCAPE XXH3_state_t* state, XXH_NOESCAPE const void* input, size_t len)
+{
+ return XXH3_64bits_update(state, input, len);
+}
+
+/*! @ingroup XXH3_family */
+XXH_PUBLIC_API XXH128_hash_t XXH3_128bits_digest (XXH_NOESCAPE const XXH3_state_t* state)
+{
+ const unsigned char* const secret = (state->extSecret == NULL) ? state->customSecret : state->extSecret;
+ if (state->totalLen > XXH3_MIDSIZE_MAX) {
+ XXH_ALIGN(XXH_ACC_ALIGN) XXH64_hash_t acc[XXH_ACC_NB];
+ XXH3_digest_long(acc, state, secret);
+ XXH_ASSERT(state->secretLimit + XXH_STRIPE_LEN >= sizeof(acc) + XXH_SECRET_MERGEACCS_START);
+ { XXH128_hash_t h128;
+ h128.low64 = XXH3_mergeAccs(acc,
+ secret + XXH_SECRET_MERGEACCS_START,
+ (xxh_u64)state->totalLen * XXH_PRIME64_1);
+ h128.high64 = XXH3_mergeAccs(acc,
+ secret + state->secretLimit + XXH_STRIPE_LEN
+ - sizeof(acc) - XXH_SECRET_MERGEACCS_START,
+ ~((xxh_u64)state->totalLen * XXH_PRIME64_2));
+ return h128;
+ }
+ }
+ /* len <= XXH3_MIDSIZE_MAX : short code */
+ if (state->seed)
+ return XXH3_128bits_withSeed(state->buffer, (size_t)state->totalLen, state->seed);
+ return XXH3_128bits_withSecret(state->buffer, (size_t)(state->totalLen),
+ secret, state->secretLimit + XXH_STRIPE_LEN);
+}
+#endif /* !XXH_NO_STREAM */
+/* 128-bit utility functions */
+
+#include <string.h> /* memcmp, memcpy */
+
+/* return : 1 is equal, 0 if different */
+/*! @ingroup XXH3_family */
+XXH_PUBLIC_API int XXH128_isEqual(XXH128_hash_t h1, XXH128_hash_t h2)
+{
+ /* note : XXH128_hash_t is compact, it has no padding byte */
+ return !(memcmp(&h1, &h2, sizeof(h1)));
+}
+
+/* This prototype is compatible with stdlib's qsort().
+ * @return : >0 if *h128_1 > *h128_2
+ * <0 if *h128_1 < *h128_2
+ * =0 if *h128_1 == *h128_2 */
+/*! @ingroup XXH3_family */
+XXH_PUBLIC_API int XXH128_cmp(XXH_NOESCAPE const void* h128_1, XXH_NOESCAPE const void* h128_2)
+{
+ XXH128_hash_t const h1 = *(const XXH128_hash_t*)h128_1;
+ XXH128_hash_t const h2 = *(const XXH128_hash_t*)h128_2;
+ int const hcmp = (h1.high64 > h2.high64) - (h2.high64 > h1.high64);
+ /* note : bets that, in most cases, hash values are different */
+ if (hcmp) return hcmp;
+ return (h1.low64 > h2.low64) - (h2.low64 > h1.low64);
+}
+
+
+/*====== Canonical representation ======*/
+/*! @ingroup XXH3_family */
+XXH_PUBLIC_API void
+XXH128_canonicalFromHash(XXH_NOESCAPE XXH128_canonical_t* dst, XXH128_hash_t hash)
+{
+ XXH_STATIC_ASSERT(sizeof(XXH128_canonical_t) == sizeof(XXH128_hash_t));
+ if (XXH_CPU_LITTLE_ENDIAN) {
+ hash.high64 = XXH_swap64(hash.high64);
+ hash.low64 = XXH_swap64(hash.low64);
+ }
+ XXH_memcpy(dst, &hash.high64, sizeof(hash.high64));
+ XXH_memcpy((char*)dst + sizeof(hash.high64), &hash.low64, sizeof(hash.low64));
+}
+
+/*! @ingroup XXH3_family */
+XXH_PUBLIC_API XXH128_hash_t
+XXH128_hashFromCanonical(XXH_NOESCAPE const XXH128_canonical_t* src)
+{
+ XXH128_hash_t h;
+ h.high64 = XXH_readBE64(src);
+ h.low64 = XXH_readBE64(src->digest + 8);
+ return h;
+}
+
+
+
+/* ==========================================
+ * Secret generators
+ * ==========================================
+ */
+#define XXH_MIN(x, y) (((x) > (y)) ? (y) : (x))
+
+XXH_FORCE_INLINE void XXH3_combine16(void* dst, XXH128_hash_t h128)
+{
+ XXH_writeLE64( dst, XXH_readLE64(dst) ^ h128.low64 );
+ XXH_writeLE64( (char*)dst+8, XXH_readLE64((char*)dst+8) ^ h128.high64 );
+}
+
+/*! @ingroup XXH3_family */
+XXH_PUBLIC_API XXH_errorcode
+XXH3_generateSecret(XXH_NOESCAPE void* secretBuffer, size_t secretSize, XXH_NOESCAPE const void* customSeed, size_t customSeedSize)
+{
+#if (XXH_DEBUGLEVEL >= 1)
+ XXH_ASSERT(secretBuffer != NULL);
+ XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN);
+#else
+ /* production mode, assert() are disabled */
+ if (secretBuffer == NULL) return XXH_ERROR;
+ if (secretSize < XXH3_SECRET_SIZE_MIN) return XXH_ERROR;
+#endif
+
+ if (customSeedSize == 0) {
+ customSeed = XXH3_kSecret;
+ customSeedSize = XXH_SECRET_DEFAULT_SIZE;
+ }
+#if (XXH_DEBUGLEVEL >= 1)
+ XXH_ASSERT(customSeed != NULL);
+#else
+ if (customSeed == NULL) return XXH_ERROR;
+#endif
+
+ /* Fill secretBuffer with a copy of customSeed - repeat as needed */
+ { size_t pos = 0;
+ while (pos < secretSize) {
+ size_t const toCopy = XXH_MIN((secretSize - pos), customSeedSize);
+ memcpy((char*)secretBuffer + pos, customSeed, toCopy);
+ pos += toCopy;
+ } }
+
+ { size_t const nbSeg16 = secretSize / 16;
+ size_t n;
+ XXH128_canonical_t scrambler;
+ XXH128_canonicalFromHash(&scrambler, XXH128(customSeed, customSeedSize, 0));
+ for (n=0; n<nbSeg16; n++) {
+ XXH128_hash_t const h128 = XXH128(&scrambler, sizeof(scrambler), n);
+ XXH3_combine16((char*)secretBuffer + n*16, h128);
+ }
+ /* last segment */
+ XXH3_combine16((char*)secretBuffer + secretSize - 16, XXH128_hashFromCanonical(&scrambler));
+ }
+ return XXH_OK;
+}
+
+/*! @ingroup XXH3_family */
+XXH_PUBLIC_API void
+XXH3_generateSecret_fromSeed(XXH_NOESCAPE void* secretBuffer, XXH64_hash_t seed)
+{
+ XXH_ALIGN(XXH_SEC_ALIGN) xxh_u8 secret[XXH_SECRET_DEFAULT_SIZE];
+ XXH3_initCustomSecret(secret, seed);
+ XXH_ASSERT(secretBuffer != NULL);
+ memcpy(secretBuffer, secret, XXH_SECRET_DEFAULT_SIZE);
+}
+
+
+
+/* Pop our optimization override from above */
+#if XXH_VECTOR == XXH_AVX2 /* AVX2 */ \
+ && defined(__GNUC__) && !defined(__clang__) /* GCC, not Clang */ \
+ && defined(__OPTIMIZE__) && XXH_SIZE_OPT <= 0 /* respect -O0 and -Os */
+# pragma GCC pop_options
+#endif
+
+#endif /* XXH_NO_LONG_LONG */
+
+#endif /* XXH_NO_XXH3 */
+
+/*!
+ * @}
+ */
+#endif /* XXH_IMPLEMENTATION */
+
+
+#if defined (__cplusplus)
+} /* extern "C" */
+#endif
diff --git a/include/make/compiler.mk b/include/make/compiler.mk
new file mode 100644
index 0000000..f251693
--- /dev/null
+++ b/include/make/compiler.mk
@@ -0,0 +1,42 @@
+# WARNING: Do not change cc-opt, cc-opt-alt or cc-warning without checking if
+# clang bug #49364 is fixed. stderr is redirected to /dev/null on
+# purpose, to work around a clang 11 bug that crashes if stderr is
+# redirected to stdin.
+#
+# Function used to detect support of a given option by the compiler.
+# Usage: CFLAGS += $(call cc-opt,option). Eg: $(call cc-opt,-fwrapv)
+# Note: ensure the referencing variable is assigned using ":=" and not "=" to
+# call it only once.
+cc-opt = $(shell set -e; if $(CC) -Werror $(1) -E -xc - -o /dev/null </dev/null >&0 2>/dev/null; then echo "$(1)"; fi;)
+
+# same but tries with $2 if $1 is not supported
+cc-opt-alt = $(if $(shell set -e; if $(CC) -Werror $(1) -E -xc - -o /dev/null </dev/null >&0 2>/dev/null; then echo 1;fi),$(1),$(call cc-opt,$(2)))
+
+# validate a list of options one at a time
+cc-all-opts = $(foreach a,$(1),$(call cc-opt,$(a)))
+
+# try to pass plenty of options at once, take them on success or try them
+# one at a time on failure and keep successful ones. This is handy to quickly
+# validate most common options.
+cc-all-fast = $(if $(call cc-opt,$(1)),$(1),$(call cc-all-opts,$(1)))
+
+# Below we verify that the compiler supports any -Wno-something option to
+# disable any warning, or if a special option is needed to achieve that. This
+# will allow to get rid of testing when the compiler doesn't care. The result
+# is made of two variables:
+# - cc-anywno that's non-empty if the compiler supports disabling anything
+# - cc-wnouwo that may contain an option needed to enable this behavior
+# Gcc 4.x and above do not need any option but will still complain about unknown
+# options if another warning or error happens, and as such they're not testable.
+# Clang needs a special option -Wno-unknown-warning-option. Compilers not
+# supporting this option will check all warnings individually.
+cc-anywno := $(call cc-opt,-Wno-haproxy-warning)
+cc-wnouwo := $(if $(cc-anywno),,$(call cc-opt,-Wno-unknown-warning-option))
+cc-anywno := $(if $(cc-anywno)$(cc-wnouwo),1)
+
+# Disable a warning when supported by the compiler. Don't put spaces around the
+# warning! And don't use cc-opt which doesn't always report an error until
+# another one is also returned. If "cc-anywno" is set, the compiler supports
+# -Wno- followed by anything so we don't even need to start the compiler.
+# Usage: CFLAGS += $(call cc-nowarn,warning). Eg: $(call cc-opt,format-truncation)
+cc-nowarn = $(if $(cc-anywno),-Wno-$(1),$(shell set -e; if $(CC) -Werror -W$(1) -E -xc - -o /dev/null </dev/null >&0 2>/dev/null; then echo "-Wno-$(1)"; fi;))
diff --git a/include/make/options.mk b/include/make/options.mk
new file mode 100644
index 0000000..022981c
--- /dev/null
+++ b/include/make/options.mk
@@ -0,0 +1,52 @@
+# this contains various functions and macros used to manipulate USE_* options
+# and their flags
+
+# Depending on the target platform, some options are set, as well as some
+# CFLAGS and LDFLAGS. All variables pre-set here will not appear in the build
+# options string. They may be set to any value, but are historically set to
+# "implicit" which eases debugging. You should not have to change anything
+# there unless you're adding support for a new platform.
+default_opts = $(foreach name,$(1),$(eval $(name)=implicit))
+
+# Return USE_xxx=$(USE_xxx) if the variable was set from the environment or the
+# command line.
+ignore_implicit = $(if $(subst environment,,$(origin $(1))), \
+ $(if $(subst command line,,$(origin $(1))),, \
+ $(1)=$($(1))), \
+ $(1)=$($(1))) \
+
+# This macro collects all USE_* values except those set to "implicit". This
+# is used to report a list of all flags which were used to build this version.
+# Do not assign anything to it.
+build_options = $(foreach opt,$(use_opts),$(call ignore_implicit,$(opt)))
+
+# Make a list of all known features with +/- prepended depending on their
+# activation status. Must be a macro so that dynamically enabled ones are
+# evaluated with their current status.
+build_features = $(foreach opt,$(patsubst USE_%,%,$(sort $(use_opts))),$(if $(USE_$(opt)),+$(opt),-$(opt)))
+
+# This returns a list of -DUSE_* for all known USE_* that are set
+opts_as_defines = $(foreach opt,$(use_opts),$(if $($(opt)),-D$(opt),))
+
+# Lists all enabled or disabled options without the "USE_" prefix
+enabled_opts = $(foreach opt,$(patsubst USE_%,%,$(use_opts)),$(if $(USE_$(opt)),$(opt),))
+disabled_opts = $(foreach opt,$(patsubst USE_%,%,$(use_opts)),$(if $(USE_$(opt)),,$(opt)))
+
+# preset all XXX_{INC,LIB,CFLAGS,LDFLAGS,SRC} variables to empty for $1=XXX
+reset_opt_vars = $(foreach name,INC LIB CFLAGS LDFLAGS SRC,$(eval $(1)_$(name)=))
+
+# preset all variables for all supported build options among use_opts
+reset_opts_vars = $(foreach opt,$(patsubst USE_%,%,$(use_opts)),$(call reset_opt_vars,$(opt)))
+
+# append $(1)_{C,LD}FLAGS into OPTIONS_{C,LD}FLAGS if not empty
+define collect_opt_flags
+ ifneq ($$($(1)_CFLAGS),)
+ OPTIONS_CFLAGS += $$($(1)_CFLAGS)
+ endif
+ ifneq ($$($(1)_LDFLAGS),)
+ OPTIONS_LDFLAGS += $$($(1)_LDFLAGS)
+ endif
+endef
+
+# collect all enabled USE_foo's foo_{C,LD}FLAGS into OPTIONS_{C,LD}FLAGS
+collect_opts_flags = $(foreach opt,$(enabled_opts),$(eval $(call collect_opt_flags,$(opt))))
diff --git a/include/make/verbose.mk b/include/make/verbose.mk
new file mode 100644
index 0000000..c37d513
--- /dev/null
+++ b/include/make/verbose.mk
@@ -0,0 +1,30 @@
+# verbosity: pass V=1 for verbose shell invocation
+V = 0
+Q = @
+ifeq ($V,1)
+Q=
+endif
+
+# Some common commands such as CC/LD/AR are redefined with a cmd_ equivalent
+# and are either mapped to a silent rule just indicating what is being done,
+# or to themselves depending on the verbosity level.
+ifeq ($V,1)
+cmd_CC = $(CC)
+cmd_LD = $(LD)
+cmd_AR = $(AR)
+cmd_MAKE = +$(MAKE)
+else
+ifeq (3.81,$(firstword $(sort $(MAKE_VERSION) 3.81)))
+# 3.81 or above
+cmd_CC = $(info $ CC $@) $(Q)$(CC)
+cmd_LD = $(info $ LD $@) $(Q)$(LD)
+cmd_AR = $(info $ AR $@) $(Q)$(AR)
+cmd_MAKE = $(info $ MAKE $@) $(Q)+$(MAKE)
+else
+# 3.80 or older
+cmd_CC = $(Q)echo " CC $@";$(CC)
+cmd_LD = $(Q)echo " LD $@";$(LD)
+cmd_AR = $(Q)echo " AR $@";$(AR)
+cmd_MAKE = $(Q)echo " MAKE $@";$(MAKE)
+endif
+endif
diff --git a/reg-tests/README b/reg-tests/README
new file mode 100644
index 0000000..ef721fd
--- /dev/null
+++ b/reg-tests/README
@@ -0,0 +1,71 @@
+ * Regression testing for HAProxy with VTest *
+
+
+This little README file is about how to compile and run vtest test case files (VTC files)
+to test HAProxy for any regression.
+
+To do so, you will have to compile vtest program sources which depends on
+Varnish cache application sources. vtest, formerly varnishtest, is a very useful
+program which has been developed to test Varnish cache application. vtest has been
+modified in collaboration with Varnish cache conceptor Poul-Henning Kamp to support
+HAProxy in addition to Varnish cache.
+
+See also: doc/regression-testing.txt
+
+* vtest compilation *
+
+ $ git clone https://github.com/vtest/VTest
+
+ $ cd VTest
+
+ $ make vtest
+
+ Then vtest program may be found at the root directory of vtest sources directory.
+ The Varnish cache manuals are located in 'man' directory of Varnish cache sources
+ directory. You will have to have a look at varnishtest(7) and vtc(7) manuals to
+ use vtest.
+
+ Some information may also be found in doc/regression-testing.txt in HAProxy
+ sources.
+
+ Note that VTC files for Varnish cache may be found in bin/varnishtest/tests directory
+ of Varnish cache sources directory which may be found here:
+ https://github.com/varnishcache/varnish-cache
+
+
+* vtest execution *
+
+ You must set HAPROXY_PROGRAM environment variable to give the location
+ of the HAProxy program to test to vtest:
+
+ $ HAPROXY_PROGRAM=<my haproxy program> vtest ...
+
+ The HAProxy VTC files found in HAProxy sources may be run with the reg-tests
+ Makefile target. You must set the VTEST_PROGRAM environment variable to
+ give the location of the vtest program which has been previously compiled.
+
+ $ VTEST_PROGRAM=<my vtest program> make reg-tests
+
+ "reg-tests" Makefile target run scripts/run-regtest.sh script.
+ To get more information about this script run it with --help option.
+
+ Note that vtest is run with -t10 and -l option. -l option is to keep
+ keep vtest temporary directory in case of failed test cases. core files
+ may be found in this directory (if enabled by ulimit).
+
+
+* vtest patches for HAProxy VTC files *
+
+ When producing a patch to add a VTC regression testing file to reg-tests directory,
+ please follow these simple rules:
+
+ - If your VTC file needs others files, if possible, use the same basename as that
+ of the VTC file,
+ - Put these files in a directory with the same name as the code area concerned
+ by the bug ('peers', 'lua', 'acl' etc).
+
+Please note that most tests use a common set of timeouts defined by the
+environment variable HAPROXY_TEST_TIMEOUT. As much as possible, for regular I/O
+(i.e. not errors), please try to reuse that setting so that the value may
+easily be adjusted when running in some particularly slow environments, or be
+shortened to fail faster on developers' machines.
diff --git a/reg-tests/balance/balance-rr.vtc b/reg-tests/balance/balance-rr.vtc
new file mode 100644
index 0000000..908a4f9
--- /dev/null
+++ b/reg-tests/balance/balance-rr.vtc
@@ -0,0 +1,73 @@
+vtest "Test for balance roundrobin"
+feature ignore_unknown_macro
+
+server s1 {
+ rxreq
+ txresp -hdr "Server: s1"
+} -repeat 2 -start
+
+server s2 {
+ rxreq
+ txresp -hdr "Server: s2"
+} -repeat 2 -start
+
+server s3 {
+ rxreq
+ txresp -hdr "Server: s3"
+} -repeat 2 -start
+
+server s4 {
+ rxreq
+ txresp -hdr "Server: s4"
+} -repeat 2 -start
+
+haproxy h1 -arg "-L A" -conf {
+ defaults
+ mode http
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ listen px
+ bind "fd@${px}"
+ balance roundrobin
+ server srv1 ${s1_addr}:${s1_port}
+ server srv2 ${s2_addr}:${s2_port}
+ server srv3 ${s3_addr}:${s3_port}
+ server srv4 ${s4_addr}:${s4_port}
+} -start
+
+client c1 -connect ${h1_px_sock} {
+ txreq -url "/url1"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.Server ~ s1
+} -run
+
+client c2 -connect ${h1_px_sock} {
+ txreq -url "/url1"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.Server ~ s2
+} -run
+
+client c3 -connect ${h1_px_sock} {
+ txreq -url "/url1"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.Server ~ s3
+} -run
+
+client c4 -connect ${h1_px_sock} {
+ txreq -url "/url1"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.Server ~ s4
+} -run
+
+client c5 -connect ${h1_px_sock} {
+ txreq -url "/url1"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.Server ~ s1
+} -run
diff --git a/reg-tests/balance/balance-uri-path-only.vtc b/reg-tests/balance/balance-uri-path-only.vtc
new file mode 100644
index 0000000..f1291ee
--- /dev/null
+++ b/reg-tests/balance/balance-uri-path-only.vtc
@@ -0,0 +1,97 @@
+vtest "Test for balance URI"
+feature ignore_unknown_macro
+#REQUIRE_VERSION=2.3
+
+server s1 {
+ rxreq
+ txresp -hdr "Server: s1" -body "s1"
+} -repeat 5 -start
+
+server s2 {
+ rxreq
+ txresp -hdr "Server: s2" -body "s2"
+} -repeat 5 -start
+
+server s3 {
+ rxreq
+ txresp -hdr "Server: s3" -body "s3"
+} -repeat 5 -start
+
+server s4 {
+ rxreq
+ txresp -hdr "Server: s4" -body "s4"
+} -repeat 5 -start
+
+haproxy h1 -arg "-L A" -conf {
+ defaults
+ mode http
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ listen px
+ bind "fd@${px}"
+ bind "fd@${pxh2}" proto h2
+ balance uri path-only
+ server srv1 ${s1_addr}:${s1_port}
+ server srv2 ${s2_addr}:${s2_port}
+ server srv3 ${s3_addr}:${s3_port}
+ server srv4 ${s4_addr}:${s4_port}
+} -start
+
+client c1 -connect ${h1_px_sock} {
+ txreq -url "http://127.0.0.1/url1"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.Server ~ s2
+} -run
+
+client c2 -connect ${h1_px_sock} {
+ txreq -url "/url1?ignore=this-arg"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.Server ~ s2
+} -run
+
+client c3 -connect ${h1_px_sock} {
+ txreq -url "http://127.0.0.1/url2"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.Server ~ s3
+} -run
+
+client c4 -connect ${h1_px_sock} {
+ txreq -url "/url3"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.Server ~ s4
+} -run
+
+client c5 -connect ${h1_px_sock} {
+ txreq -url "/url4"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.Server ~ s1
+} -run
+
+client c6h2 -connect ${h1_pxh2_sock} {
+ txpri
+ stream 0 {
+ txsettings
+ rxsettings
+ txsettings -ack
+ rxsettings
+ expect settings.ack == true
+ } -run
+
+ stream 1 {
+ txreq \
+ -req "GET" \
+ -scheme "https" \
+ -url "/url1"
+ rxhdrs
+ expect resp.status == 200
+ rxdata -all
+ expect resp.body == "s2"
+ } -run
+} -run
diff --git a/reg-tests/balance/balance-uri.vtc b/reg-tests/balance/balance-uri.vtc
new file mode 100644
index 0000000..e678835
--- /dev/null
+++ b/reg-tests/balance/balance-uri.vtc
@@ -0,0 +1,73 @@
+vtest "Test for balance URI"
+feature ignore_unknown_macro
+
+server s1 {
+ rxreq
+ txresp -hdr "Server: s1"
+} -repeat 2 -start
+
+server s2 {
+ rxreq
+ txresp -hdr "Server: s2"
+} -repeat 2 -start
+
+server s3 {
+ rxreq
+ txresp -hdr "Server: s3"
+} -repeat 2 -start
+
+server s4 {
+ rxreq
+ txresp -hdr "Server: s4"
+} -repeat 2 -start
+
+haproxy h1 -arg "-L A" -conf {
+ defaults
+ mode http
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ listen px
+ bind "fd@${px}"
+ balance uri
+ server srv1 ${s1_addr}:${s1_port}
+ server srv2 ${s2_addr}:${s2_port}
+ server srv3 ${s3_addr}:${s3_port}
+ server srv4 ${s4_addr}:${s4_port}
+} -start
+
+client c1 -connect ${h1_px_sock} {
+ txreq -url "/url1"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.Server ~ s2
+} -run
+
+client c2 -connect ${h1_px_sock} {
+ txreq -url "/url1?ignore=this-arg"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.Server ~ s2
+} -run
+
+client c3 -connect ${h1_px_sock} {
+ txreq -url "/url2"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.Server ~ s3
+} -run
+
+client c4 -connect ${h1_px_sock} {
+ txreq -url "/url3"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.Server ~ s4
+} -run
+
+client c5 -connect ${h1_px_sock} {
+ txreq -url "/url4"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.Server ~ s1
+} -run
diff --git a/reg-tests/cache/basic.vtc b/reg-tests/cache/basic.vtc
new file mode 100644
index 0000000..377cbb3
--- /dev/null
+++ b/reg-tests/cache/basic.vtc
@@ -0,0 +1,53 @@
+varnishtest "Basic cache test"
+
+feature ignore_unknown_macro
+
+server s1 {
+ rxreq
+ txresp -nolen -hdr "Transfer-Encoding: chunked" \
+ -hdr "Cache-Control: max-age=5"
+ chunkedlen 1
+ chunkedlen 1
+ chunkedlen 2
+ chunkedlen 3
+ chunkedlen 5
+ chunkedlen 8
+ chunkedlen 13
+ chunkedlen 21
+ chunkedlen 34
+ chunkedlen 55
+ chunkedlen 89
+ chunkedlen 144
+ chunkedlen 233
+ chunkedlen 0
+} -start
+
+haproxy h1 -conf {
+ defaults
+ mode http
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ frontend fe
+ bind "fd@${fe}"
+ default_backend test
+
+ backend test
+ http-request cache-use my_cache
+ server www ${s1_addr}:${s1_port}
+ http-response cache-store my_cache
+
+ cache my_cache
+ total-max-size 3
+ max-age 20
+ max-object-size 3072
+} -start
+
+
+client c1 -connect ${h1_fe_sock} {
+ txreq
+ rxresp
+ expect resp.status == 200
+ expect resp.bodylen == 609
+} -repeat 4 -run
diff --git a/reg-tests/cache/caching_rules.vtc b/reg-tests/cache/caching_rules.vtc
new file mode 100644
index 0000000..a488875
--- /dev/null
+++ b/reg-tests/cache/caching_rules.vtc
@@ -0,0 +1,320 @@
+varnishtest "Caching rules test"
+# A response will not be cached unless it has an explicit age (Cache-Control max-age of s-maxage, Expires) or a validator (Last-Modified, or ETag)
+# A response will not be cached either if it has an Age header that is either invalid (should be an integer) or greater than its max age.
+
+#REQUIRE_VERSION=2.4
+
+feature ignore_unknown_macro
+
+server s1 {
+ rxreq
+ expect req.url == "/max-age"
+ txresp -hdr "Cache-Control: max-age=5" \
+ -bodylen 150
+
+ rxreq
+ expect req.url == "/s-maxage"
+ txresp -hdr "Cache-Control: s-maxage=5" \
+ -bodylen 160
+
+ rxreq
+ expect req.url == "/last-modified"
+ txresp -hdr "Last-Modified: Thu, 22 Oct 2020 16:51:12 GMT" \
+ -bodylen 180
+
+ rxreq
+ expect req.url == "/etag"
+ txresp -hdr "ETag: \"etag\"" \
+ -bodylen 190
+
+ rxreq
+ expect req.url == "/uncacheable"
+ txresp \
+ -bodylen 200
+
+ rxreq
+ expect req.url == "/uncacheable"
+ txresp \
+ -bodylen 210
+
+ # Age response header checks
+
+ # Invalid age
+ rxreq
+ expect req.url == "/invalid_age"
+ txresp -hdr "Cache-Control: max-age=5" \
+ -hdr "Age: abc" -bodylen 120
+
+ rxreq
+ expect req.url == "/invalid_age"
+ txresp -hdr "Cache-Control: max-age=5" \
+ -hdr "Age: abc" -bodylen 120
+
+ # Old age (greater than max age)
+ rxreq
+ expect req.url == "/old_age"
+ txresp -hdr "Cache-Control: max-age=5" \
+ -hdr "Age: 10" -bodylen 130
+
+ rxreq
+ expect req.url == "/old_age"
+ txresp -hdr "Cache-Control: max-age=5" \
+ -hdr "Age: 10" -bodylen 130
+
+ # Good age
+ rxreq
+ expect req.url == "/good_age"
+ txresp -hdr "Cache-Control: max-age=500" \
+ -hdr "Age: 100" -bodylen 140
+
+
+ # "Control-Cache: no-cache" on client request but still stored in cache
+ rxreq
+ expect req.url == "/nocache"
+ txresp -hdr "Cache-Control: max-age=500" \
+ -hdr "Age: 100" -bodylen 140
+
+ rxreq
+ expect req.url == "/nocache"
+ txresp -hdr "Cache-Control: max-age=500" \
+ -hdr "Age: 100" -bodylen 140
+
+
+ # max-age=0
+ rxreq
+ expect req.url == "/maxage_zero"
+ txresp -hdr "Cache-Control: max-age=0" \
+ -bodylen 150
+
+ rxreq
+ expect req.url == "/maxage_zero"
+ txresp -hdr "Cache-Control: max-age=0" \
+ -bodylen 150
+
+ # Overridden null max-age
+ rxreq
+ expect req.url == "/overridden"
+ txresp -hdr "Cache-Control: max-age=1, s-maxage=5" \
+ -bodylen 160
+
+ rxreq
+ expect req.url == "/overridden_null_maxage"
+ txresp -hdr "Cache-Control: max-age=0, s-maxage=5" \
+ -bodylen 190
+
+
+} -start
+
+server s2 {
+ rxreq
+ expect req.url == "/expires"
+ # Expires header is filled directly by the expires_be backend"
+ txresp \
+ -bodylen 170
+} -start
+
+haproxy h1 -conf {
+ global
+ # WT: limit false-positives causing "HTTP header incomplete" due to
+ # idle server connections being randomly used and randomly expiring
+ # under us.
+ tune.idle-pool.shared off
+
+ defaults
+ mode http
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ frontend fe
+ bind "fd@${fe}"
+ use_backend expires_be if { path_beg /expires }
+ default_backend test
+
+ backend expires_be
+ http-request cache-use my_cache
+ server www ${s2_addr}:${s2_port}
+ http-response set-header X-Cache-Hit %[res.cache_hit]
+ # Expires value set in the future (current_time+5s)
+ http-response set-header Expires %[date(5),http_date]
+ http-response cache-store my_cache
+
+ backend test
+ http-request cache-use my_cache
+ server www ${s1_addr}:${s1_port}
+ http-response cache-store my_cache
+ http-response set-header X-Cache-Hit %[res.cache_hit]
+
+ cache my_cache
+ total-max-size 3
+ max-age 20
+ max-object-size 3072
+} -start
+
+
+client c1 -connect ${h1_fe_sock} {
+ txreq -url "/max-age"
+ rxresp
+ expect resp.status == 200
+ expect resp.bodylen == 150
+
+ txreq -url "/max-age"
+ rxresp
+ expect resp.status == 200
+ expect resp.bodylen == 150
+ expect resp.http.X-Cache-Hit == 1
+
+ txreq -url "/s-maxage"
+ rxresp
+ expect resp.status == 200
+ expect resp.bodylen == 160
+
+ txreq -url "/s-maxage"
+ rxresp
+ expect resp.status == 200
+ expect resp.bodylen == 160
+ expect resp.http.X-Cache-Hit == 1
+
+ txreq -url "/expires"
+ rxresp
+ expect resp.status == 200
+ expect resp.bodylen == 170
+
+ txreq -url "/expires"
+ rxresp
+ expect resp.status == 200
+ expect resp.bodylen == 170
+ expect resp.http.X-Cache-Hit == 1
+
+ txreq -url "/last-modified"
+ rxresp
+ expect resp.status == 200
+ expect resp.bodylen == 180
+
+ txreq -url "/last-modified"
+ rxresp
+ expect resp.status == 200
+ expect resp.bodylen == 180
+ expect resp.http.X-Cache-Hit == 1
+
+ txreq -url "/etag"
+ rxresp
+ expect resp.status == 200
+ expect resp.bodylen == 190
+
+ txreq -url "/etag"
+ rxresp
+ expect resp.status == 200
+ expect resp.bodylen == 190
+ expect resp.http.X-Cache-Hit == 1
+
+ # The next response should not be cached
+ txreq -url "/uncacheable"
+ rxresp
+ expect resp.status == 200
+ expect resp.bodylen == 200
+
+ txreq -url "/uncacheable"
+ rxresp
+ expect resp.status == 200
+ expect resp.bodylen == 210
+ expect resp.http.X-Cache-Hit == 0
+
+ # Age header tests
+ txreq -url "/invalid_age"
+ rxresp
+ expect resp.status == 200
+ expect resp.bodylen == 120
+ expect resp.http.X-Cache-Hit == 0
+
+ txreq -url "/invalid_age"
+ rxresp
+ expect resp.status == 200
+ expect resp.bodylen == 120
+ expect resp.http.X-Cache-Hit == 0
+
+ txreq -url "/old_age"
+ rxresp
+ expect resp.status == 200
+ expect resp.bodylen == 130
+ expect resp.http.X-Cache-Hit == 0
+
+ txreq -url "/old_age"
+ rxresp
+ expect resp.status == 200
+ expect resp.bodylen == 130
+ expect resp.http.X-Cache-Hit == 0
+
+ txreq -url "/good_age"
+ rxresp
+ expect resp.status == 200
+ expect resp.bodylen == 140
+ expect resp.http.X-Cache-Hit == 0
+
+ txreq -url "/good_age"
+ rxresp
+ expect resp.status == 200
+ expect resp.bodylen == 140
+ expect resp.http.X-Cache-Hit == 1
+
+ # Cache-Control: no-cache
+ txreq -url "/nocache" -hdr "Cache-Control: no-cache"
+ rxresp
+ expect resp.status == 200
+ expect resp.bodylen == 140
+ expect resp.http.X-Cache-Hit == 0
+
+ txreq -url "/nocache" -hdr "Cache-Control: no-cache"
+ rxresp
+ expect resp.status == 200
+ expect resp.bodylen == 140
+ expect resp.http.X-Cache-Hit == 0
+
+ txreq -url "/nocache"
+ rxresp
+ expect resp.status == 200
+ expect resp.bodylen == 140
+ expect resp.http.X-Cache-Hit == 1
+
+ # max-age=0 (control test for the overridden null max-age test below)
+ txreq -url "/maxage_zero"
+ rxresp
+ expect resp.status == 200
+ expect resp.bodylen == 150
+ expect resp.http.X-Cache-Hit == 0
+
+ txreq -url "/maxage_zero"
+ rxresp
+ expect resp.status == 200
+ expect resp.bodylen == 150
+ expect resp.http.X-Cache-Hit == 0
+
+ # Overridden max-age directive
+ txreq -url "/overridden"
+ rxresp
+ expect resp.status == 200
+ expect resp.bodylen == 160
+ expect resp.http.X-Cache-Hit == 0
+
+ txreq -url "/overridden"
+ rxresp
+ expect resp.status == 200
+ expect resp.bodylen == 160
+ expect resp.http.X-Cache-Hit == 1
+
+ txreq -url "/overridden_null_maxage"
+ rxresp
+ expect resp.status == 200
+ expect resp.bodylen == 190
+ expect resp.http.X-Cache-Hit == 0
+
+ # The previous response should have been cached even if it had
+ # a max-age=0 since it also had a positive s-maxage
+ txreq -url "/overridden_null_maxage"
+ rxresp
+ expect resp.status == 200
+ expect resp.bodylen == 190
+ expect resp.http.X-Cache-Hit == 1
+
+
+} -run
diff --git a/reg-tests/cache/expires.vtc b/reg-tests/cache/expires.vtc
new file mode 100644
index 0000000..ee5cd77
--- /dev/null
+++ b/reg-tests/cache/expires.vtc
@@ -0,0 +1,127 @@
+varnishtest "Expires support"
+
+#REQUIRE_VERSION=2.3
+
+feature ignore_unknown_macro
+
+server s1 {
+ rxreq
+ txresp -nolen -hdr "Transfer-Encoding: chunked" \
+ -hdr "Cache-Control: max-age=5"
+ chunkedlen 15
+ chunkedlen 15
+ chunkedlen 15
+ chunkedlen 0
+} -start
+
+server s2 {
+ rxreq
+ txresp -nolen -hdr "Transfer-Encoding: chunked"
+ chunkedlen 16
+ chunkedlen 16
+ chunkedlen 16
+ chunkedlen 0
+} -start
+
+server s3 {
+ rxreq
+ txresp -nolen -hdr "Transfer-Encoding: chunked"
+ chunkedlen 16
+ chunkedlen 16
+ chunkedlen 16
+ chunkedlen 0
+
+ rxreq
+ txresp -nolen -hdr "Transfer-Encoding: chunked"
+ chunkedlen 17
+ chunkedlen 17
+ chunkedlen 17
+ chunkedlen 0
+} -start
+
+haproxy h1 -conf {
+ global
+ # WT: limit false-positives causing "HTTP header incomplete" due to
+ # idle server connections being randomly used and randomly expiring
+ # under us.
+ tune.idle-pool.shared off
+
+ defaults
+ mode http
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ frontend fe
+ bind "fd@${fe}"
+ use_backend cache_control_be if { path_beg /cache_control }
+ use_backend future_expires_be if { path_beg /future }
+ default_backend past_expires_be
+
+ backend cache_control_be
+ # Expires header should be ignored since a Cache-Control one is present
+ http-request cache-use my_cache
+ server www ${s1_addr}:${s1_port}
+ http-response set-header X-Cache-Hit %[res.cache_hit]
+ http-response set-header Expires %[date(-1),http_date]
+ http-response cache-store my_cache
+
+ backend future_expires_be
+ # Expires value set in the future (current_time+5s)
+ http-request cache-use my_cache
+ server www ${s2_addr}:${s2_port}
+ http-response set-header X-Cache-Hit %[res.cache_hit]
+ http-response set-header Expires %[date(5),http_date]
+ http-response cache-store my_cache
+
+ backend past_expires_be
+ # Expires value set in the past
+ http-request cache-use my_cache
+ server www ${s3_addr}:${s3_port}
+ http-response set-header X-Cache-Hit %[res.cache_hit]
+ http-response set-header Expires %[date(-1),http_date]
+ http-response cache-store my_cache
+
+ cache my_cache
+ total-max-size 3
+ max-age 20
+ max-object-size 3072
+} -start
+
+
+client c1 -connect ${h1_fe_sock} {
+ txreq -url "/cache_control"
+ rxresp
+ expect resp.status == 200
+ expect resp.bodylen == 45
+
+ txreq -url "/cache_control"
+ rxresp
+ expect resp.status == 200
+ expect resp.bodylen == 45
+ expect resp.http.X-Cache-Hit == 1
+
+ txreq -url "/future"
+ rxresp
+ expect resp.status == 200
+ expect resp.bodylen == 48
+
+ txreq -url "/future"
+ rxresp
+ expect resp.status == 200
+ expect resp.bodylen == 48
+ expect resp.http.X-Cache-Hit == 1
+
+ txreq -url "/past"
+ rxresp
+ expect resp.status == 200
+ expect resp.bodylen == 48
+
+ txreq -url "/past"
+ rxresp
+ expect resp.status == 200
+ expect resp.bodylen == 51
+ expect resp.http.X-Cache-Hit == 0
+
+} -run
+
diff --git a/reg-tests/cache/if-modified-since.vtc b/reg-tests/cache/if-modified-since.vtc
new file mode 100644
index 0000000..1fd9a93
--- /dev/null
+++ b/reg-tests/cache/if-modified-since.vtc
@@ -0,0 +1,144 @@
+varnishtest "If-Modified-Since support"
+
+#REQUIRE_VERSION=2.3
+
+feature ignore_unknown_macro
+
+server s1 {
+ # Response containing a "Last-Modified" field
+ rxreq
+ expect req.url == "/last_modified"
+ txresp -nolen -hdr "Transfer-Encoding: chunked" \
+ -hdr "Last-Modified: Thu, 15 Oct 2020 22:23:24 GMT"
+ chunkedlen 15
+ chunkedlen 15
+ chunkedlen 15
+ chunkedlen 0
+
+ # Response containing a "Date" field
+ rxreq
+ expect req.url == "/date"
+ txresp -nolen -hdr "Transfer-Encoding: chunked" \
+ -hdr "Date: Thu, 22 Oct 2020 16:51:12 GMT" \
+ -hdr "Cache-Control: max-age=5"
+ chunkedlen 16
+ chunkedlen 16
+ chunkedlen 16
+ chunkedlen 0
+
+ # Response containing both a "Last-Modified" and a "Date" fields
+ # Should behave the same way as if the "Date" field was not here.
+ rxreq
+ expect req.url == "/last_modified_and_date"
+ txresp -nolen -hdr "Transfer-Encoding: chunked" \
+ -hdr "Last-Modified: Thu, 15 Oct 2020 14:24:38 GMT" \
+ -hdr "Date: Thu, 22 Oct 2020 16:51:12 GMT"
+ chunkedlen 17
+ chunkedlen 17
+ chunkedlen 17
+ chunkedlen 0
+} -start
+
+haproxy h1 -conf {
+ global
+ # WT: limit false-positives causing "HTTP header incomplete" due to
+ # idle server connections being randomly used and randomly expiring
+ # under us.
+ tune.idle-pool.shared off
+
+ defaults
+ mode http
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ frontend fe
+ bind "fd@${fe}"
+ default_backend test
+
+ backend test
+ http-request cache-use my_cache
+ server www ${s1_addr}:${s1_port}
+ http-response cache-store my_cache
+
+ cache my_cache
+ total-max-size 3
+ max-age 20
+ max-object-size 3072
+} -start
+
+
+client c1 -connect ${h1_fe_sock} {
+ txreq -url "/last_modified"
+ rxresp
+ expect resp.status == 200
+ expect resp.bodylen == 45
+
+ txreq -url "/date"
+ rxresp
+ expect resp.status == 200
+ expect resp.bodylen == 48
+
+ txreq -url "/last_modified_and_date"
+ rxresp
+ expect resp.status == 200
+ expect resp.bodylen == 51
+
+
+ # Earlier date
+ # "Last-Modified" version
+ txreq -url "/last_modified" \
+ -hdr "If-Modified-Since: Thu, 15 Oct 2020 00:00:01 GMT"
+ rxresp
+ expect resp.status == 200
+ expect resp.bodylen == 45
+ # "Date" version
+ txreq -url "/date" \
+ -hdr "If-Modified-Since: Thu, 01 Oct 2020 00:00:01 GMT"
+ rxresp
+ expect resp.status == 200
+ expect resp.bodylen == 48
+ # "Last-Modified" and "Date" version
+ txreq -url "/last_modified_and_date" \
+ -hdr "If-Modified-Since: Thu, 15 Oct 2020 00:00:01 GMT"
+ rxresp
+ expect resp.status == 200
+ expect resp.bodylen == 51
+
+
+ # Same date
+ # "Last-Modified" version
+ txreq -url "/last_modified" \
+ -hdr "If-Modified-Since: Thu, 15 Oct 2020 22:23:24 GMT"
+ rxresphdrs
+ expect resp.status == 304
+ # "Date" version
+ txreq -url "/date" \
+ -hdr "If-Modified-Since: Thu, 22 Oct 2020 16:51:12 GMT"
+ rxresphdrs
+ expect resp.status == 304
+ # "Last-Modified" and "Date" version
+ txreq -url "/last_modified_and_date" \
+ -hdr "If-Modified-Since: Thu, 15 Oct 2020 16:51:12 GMT"
+ rxresphdrs
+ expect resp.status == 304
+
+
+ # Later date
+ # "Last-Modified" version
+ txreq -url "/last_modified" \
+ -hdr "If-Modified-Since: Thu, 22 Oct 2020 23:00:00 GMT"
+ rxresphdrs
+ expect resp.status == 304
+ # "Date" version
+ txreq -url "/date" \
+ -hdr "If-Modified-Since: Thu, 22 Oct 2020 23:00:00 GMT"
+ rxresphdrs
+ expect resp.status == 304
+ # "Last-Modified" and "Date" version
+ txreq -url "/last_modified_and_date" \
+ -hdr "If-Modified-Since: Thu, 22 Oct 2020 23:00:00 GMT"
+ rxresphdrs
+ expect resp.status == 304
+
+} -run
diff --git a/reg-tests/cache/if-none-match.vtc b/reg-tests/cache/if-none-match.vtc
new file mode 100644
index 0000000..3dcaec5
--- /dev/null
+++ b/reg-tests/cache/if-none-match.vtc
@@ -0,0 +1,89 @@
+varnishtest "If-None-Match support"
+
+#REQUIRE_VERSION=2.3
+
+feature ignore_unknown_macro
+
+server s1 {
+ rxreq
+ txresp -nolen -hdr "Transfer-Encoding: chunked" \
+ -hdr "ETag: \"etag\""
+ chunkedlen 1
+ chunkedlen 1
+ chunkedlen 2
+ chunkedlen 3
+ chunkedlen 5
+ chunkedlen 8
+ chunkedlen 13
+ chunkedlen 21
+ chunkedlen 34
+ chunkedlen 55
+ chunkedlen 89
+ chunkedlen 144
+ chunkedlen 233
+ chunkedlen 0
+} -start
+
+haproxy h1 -conf {
+ global
+ # WT: limit false-positives causing "HTTP header incomplete" due to
+ # idle server connections being randomly used and randomly expiring
+ # under us.
+ tune.idle-pool.shared off
+
+ defaults
+ mode http
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ frontend fe
+ bind "fd@${fe}"
+ default_backend test
+
+ backend test
+ http-request cache-use my_cache
+ server www ${s1_addr}:${s1_port}
+ http-response cache-store my_cache
+
+ cache my_cache
+ total-max-size 3
+ max-age 20
+ max-object-size 3072
+} -start
+
+
+client c1 -connect ${h1_fe_sock} {
+ txreq
+ rxresp
+ expect resp.status == 200
+ expect resp.bodylen == 609
+ txreq
+ rxresp
+ expect resp.status == 200
+ expect resp.bodylen == 609
+ txreq \
+ -hdr "if-none-match: *"
+ rxresphdrs
+ expect resp.status == 304
+ txreq \
+ -hdr "if-none-match: \"etag\""
+ rxresphdrs
+ expect resp.status == 304
+ txreq \
+ -hdr "if-none-match: W/\"etag\""
+ rxresphdrs
+ expect resp.status == 304
+} -run
+
+client c2 -connect ${h1_fe_sock} {
+ txreq \
+ -hdr "if-none-match: \"wrong_etag\""
+ rxresp
+ expect resp.status == 200
+ expect resp.bodylen == 609
+ txreq \
+ -hdr "if-none-match: W/\"wrong_etag\", W/\"etag\""
+ rxresphdrs
+ expect resp.status == 304
+} -run
diff --git a/reg-tests/cache/post_on_entry.vtc b/reg-tests/cache/post_on_entry.vtc
new file mode 100644
index 0000000..66c89e4
--- /dev/null
+++ b/reg-tests/cache/post_on_entry.vtc
@@ -0,0 +1,65 @@
+varnishtest "A successful unsafe method (POST for instance) on a cached entry must disable it."
+
+#REQUIRE_VERSION=2.4
+
+feature ignore_unknown_macro
+
+server s1 {
+ rxreq
+ expect req.url == "/cached"
+ txresp -hdr "Cache-Control: max-age=5" \
+ -bodylen 150
+
+ rxreq
+ expect req.url == "/cached"
+ expect req.method == "POST"
+ txresp
+
+ rxreq
+ expect req.url == "/cached"
+ txresp -hdr "Cache-Control: max-age=5" \
+ -bodylen 100
+
+} -start
+
+haproxy h1 -conf {
+ defaults
+ mode http
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ frontend fe
+ bind "fd@${fe}"
+ default_backend test
+
+ backend test
+ http-request cache-use my_cache
+ server www ${s1_addr}:${s1_port}
+ http-response cache-store my_cache
+ http-response set-header X-Cache-Hit %[res.cache_hit]
+
+ cache my_cache
+ total-max-size 3
+ max-age 20
+ max-object-size 3072
+} -start
+
+
+client c1 -connect ${h1_fe_sock} {
+ txreq -url "/cached"
+ rxresp
+ expect resp.status == 200
+ expect resp.bodylen == 150
+
+ txreq -method "POST" -url "/cached" -bodylen 100
+ rxresp
+ expect resp.status == 200
+ expect resp.http.X-Cache-Hit == 0
+
+ txreq -url "/cached"
+ rxresp
+ expect resp.status == 200
+ expect resp.bodylen == 100
+ expect resp.http.X-Cache-Hit == 0
+} -run
diff --git a/reg-tests/cache/sample_fetches.vtc b/reg-tests/cache/sample_fetches.vtc
new file mode 100644
index 0000000..c2b99c2
--- /dev/null
+++ b/reg-tests/cache/sample_fetches.vtc
@@ -0,0 +1,137 @@
+
+varnishtest "Basic cache test"
+
+feature ignore_unknown_macro
+
+server s1 {
+ rxreq
+ txresp -nolen -hdr "Transfer-Encoding: chunked" \
+ -hdr "Cache-Control: max-age=5"
+ chunkedlen 15
+ chunkedlen 15
+ chunkedlen 15
+ chunkedlen 0
+} -start
+
+server s2 {
+ rxreq
+ txresp -nolen -hdr "Transfer-Encoding: chunked" \
+ -hdr "Cache-Control: max-age=5"
+ chunkedlen 16
+ chunkedlen 16
+ chunkedlen 16
+ chunkedlen 0
+} -start
+
+server s3 {
+ rxreq
+ txresp -nolen -hdr "Transfer-Encoding: chunked" \
+ -hdr "Cache-Control: max-age=5"
+ chunkedlen 17
+ chunkedlen 17
+ chunkedlen 17
+ chunkedlen 0
+
+ rxreq
+ txresp -nolen -hdr "Transfer-Encoding: chunked" \
+ -hdr "Cache-Control: max-age=5"
+ chunkedlen 17
+ chunkedlen 17
+ chunkedlen 17
+ chunkedlen 0
+} -start
+
+haproxy h1 -conf {
+ global
+ # WT: limit false-positives causing "HTTP header incomplete" due to
+ # idle server connections being randomly used and randomly expiring
+ # under us.
+ tune.idle-pool.shared off
+
+ defaults
+ mode http
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ frontend fe
+ bind "fd@${fe}"
+ use_backend first_be if { path_beg /first }
+ use_backend nocache_be if { path_beg /nocache }
+ default_backend second_be
+
+ backend first_be
+ http-request cache-use first_cache
+ server www ${s1_addr}:${s1_port}
+ http-response cache-store first_cache
+ http-response set-header X-Cache-Hit %[res.cache_hit]
+ http-response set-header X-Cache-Name %[res.cache_name]
+
+ backend second_be
+ http-request cache-use second_cache
+ server www ${s2_addr}:${s2_port}
+ http-response cache-store second_cache
+ http-response set-header X-Cache-Hit %[res.cache_hit]
+ http-response set-header X-Cache-Name %[res.cache_name]
+
+ backend nocache_be
+ server www ${s3_addr}:${s3_port}
+ http-response set-header X-Cache-Hit %[res.cache_hit]
+ http-response set-header X-Cache-Name %[res.cache_name]
+
+ cache first_cache
+ total-max-size 3
+ max-age 40
+ max-object-size 3000
+
+ cache second_cache
+ total-max-size 3
+ max-age 20
+ max-object-size 3072
+} -start
+
+
+client c1 -connect ${h1_fe_sock} {
+ txreq -url "/first"
+ rxresp
+ expect resp.status == 200
+ expect resp.bodylen == 45
+ expect resp.http.X-Cache-Hit == 0
+ expect resp.http.X-Cache-Name == ""
+
+ txreq -url "/second"
+ rxresp
+ expect resp.status == 200
+ expect resp.bodylen == 48
+ expect resp.http.X-Cache-Hit == 0
+ expect resp.http.X-Cache-Name == ""
+
+ txreq -url "/nocache"
+ rxresp
+ expect resp.status == 200
+ expect resp.bodylen == 51
+ expect resp.http.X-Cache-Hit == 0
+ expect resp.http.X-Cache-Name == ""
+
+ # Response should come form the cache now
+ txreq -url "/nocache"
+ rxresp
+ expect resp.status == 200
+ expect resp.bodylen == 51
+ expect resp.http.X-Cache-Hit == 0
+ expect resp.http.X-Cache-Name == ""
+
+ txreq -url "/first"
+ rxresp
+ expect resp.status == 200
+ expect resp.bodylen == 45
+ expect resp.http.X-Cache-Hit == 1
+ expect resp.http.X-Cache-Name == "first_cache"
+
+ txreq -url "/second"
+ rxresp
+ expect resp.status == 200
+ expect resp.bodylen == 48
+ expect resp.http.X-Cache-Hit == 1
+ expect resp.http.X-Cache-Name == "second_cache"
+} -run
diff --git a/reg-tests/cache/vary.vtc b/reg-tests/cache/vary.vtc
new file mode 100644
index 0000000..6c8cedf
--- /dev/null
+++ b/reg-tests/cache/vary.vtc
@@ -0,0 +1,461 @@
+varnishtest "Vary support"
+
+#REQUIRE_VERSION=2.4
+
+feature ignore_unknown_macro
+
+server s1 {
+ # Response varying on "accept-encoding" with
+ # an unacceptable content-encoding
+ rxreq
+ expect req.url == "/accept-encoding"
+ txresp -hdr "Content-Encoding: gzip" \
+ -hdr "Vary: accept-encoding" \
+ -hdr "Cache-Control: max-age=5" \
+ -bodylen 45
+
+ # Response varying on "accept-encoding"
+ rxreq
+ expect req.url == "/accept-encoding"
+ txresp -hdr "Content-Encoding: gzip" \
+ -hdr "Vary: accept-encoding" \
+ -hdr "Cache-Control: max-age=5" \
+ -bodylen 45
+
+ # Response varying on "accept-encoding" with
+ # no content-encoding
+ rxreq
+ expect req.url == "/accept-encoding"
+ txresp -hdr "Content-Type: text/plain" \
+ -hdr "Vary: accept-encoding" \
+ -hdr "Cache-Control: max-age=5" \
+ -bodylen 48
+
+ # Response varying on "accept-encoding" but having two different encodings
+ rxreq
+ expect req.url == "/accept-encoding-multiple"
+ txresp -hdr "Vary: accept-encoding" \
+ -hdr "Cache-Control: max-age=5" \
+ -bodylen 51
+
+
+ # Unmanaged vary
+ rxreq
+ expect req.url == "/unmanaged"
+ txresp -hdr "Vary: accept-encoding,unmanaged" \
+ -hdr "Cache-Control: max-age=5" \
+ -bodylen 51
+
+
+ rxreq
+ expect req.url == "/unmanaged"
+ txresp -hdr "Vary: accept-encoding,unmanaged" \
+ -hdr "Cache-Control: max-age=5" \
+ -bodylen 51
+
+
+
+ # Mixed Vary (Accept-Encoding + Referer)
+ rxreq
+ expect req.url == "/referer-accept-encoding"
+ txresp -hdr "Vary: accept-encoding,referer" \
+ -hdr "Cache-Control: max-age=5" \
+ -hdr "Content-Encoding: gzip" \
+ -bodylen 51
+
+ rxreq
+ expect req.url == "/referer-accept-encoding"
+ txresp -hdr "Vary: referer,accept-encoding" \
+ -hdr "Cache-Control: max-age=5" \
+ -hdr "Content-Encoding: br" \
+ -bodylen 54
+
+ rxreq
+ expect req.url == "/referer-accept-encoding"
+ txresp -hdr "Vary: referer,accept-encoding" \
+ -hdr "Cache-Control: max-age=5" \
+ -hdr "Content-Encoding: gzip" \
+ -bodylen 57
+
+ rxreq
+ expect req.url == "/origin-referer-accept-encoding"
+ txresp -hdr "Vary: origin,referer,accept-encoding" \
+ -hdr "Cache-Control: max-age=5" \
+ -hdr "Content-Encoding: gzip" \
+ -bodylen 58
+
+ rxreq
+ expect req.url == "/origin-referer-accept-encoding"
+ txresp -hdr "Vary: origin,referer,accept-encoding" \
+ -hdr "Cache-Control: max-age=5" \
+ -hdr "Content-Encoding: gzip" \
+ -bodylen 59
+
+ # Multiple Accept-Encoding headers
+ rxreq
+ expect req.url == "/multiple_headers"
+ txresp -hdr "Vary: accept-encoding" \
+ -hdr "Cache-Control: max-age=5" \
+ -hdr "Content-Encoding: br" \
+ -bodylen 155
+
+ rxreq
+ expect req.url == "/multiple_headers"
+ txresp -hdr "Vary: accept-encoding" \
+ -hdr "Cache-Control: max-age=5" \
+ -hdr "Content-Encoding: br" \
+ -bodylen 166
+
+
+ # Too many Accept-Encoding values (we will not cache responses with more than 16 encodings)
+ rxreq
+ expect req.url == "/too_many_encodings"
+ txresp -hdr "Vary: accept-encoding" \
+ -hdr "Cache-Control: max-age=5" \
+ -hdr "Content-Encoding: gzip" \
+ -bodylen 177
+
+ rxreq
+ expect req.url == "/too_many_encodings"
+ txresp -hdr "Vary: accept-encoding" \
+ -hdr "Cache-Control: max-age=5" \
+ -hdr "Content-Encoding: gzip" \
+ -bodylen 188
+
+ rxreq
+ expect req.url == "/empty-vs-missing"
+ txresp -hdr "Content-Encoding: gzip" \
+ -hdr "Vary: accept-encoding" \
+ -hdr "Cache-Control: max-age=5" \
+ -bodylen 234
+
+ rxreq
+ expect req.url == "/empty-vs-missing"
+ txresp -hdr "Vary: accept-encoding" \
+ -hdr "Cache-Control: max-age=5" \
+ -bodylen 256
+} -start
+
+server s2 {
+ # Responses that should not be cached
+ rxreq
+ expect req.url == "/no_vary_support"
+ txresp -hdr "Vary: accept-encoding" \
+ -hdr "Cache-Control: max-age=5" \
+ -bodylen 57
+
+ rxreq
+ expect req.url == "/no_vary_support"
+ txresp -hdr "Vary: accept-encoding" \
+ -hdr "Cache-Control: max-age=5" \
+ -bodylen 57
+} -start
+
+haproxy h1 -conf {
+ global
+ # WT: limit false-positives causing "HTTP header incomplete" due to
+ # idle server connections being randomly used and randomly expiring
+ # under us.
+ tune.idle-pool.shared off
+
+ defaults
+ mode http
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ frontend fe
+ bind "fd@${fe}"
+ use_backend no_vary_be if { path_beg /no_vary_support }
+ default_backend test
+
+ backend test
+ http-request cache-use my_cache
+ server www ${s1_addr}:${s1_port}
+ http-response cache-store my_cache
+ http-response set-header X-Cache-Hit %[res.cache_hit]
+
+ backend no_vary_be
+ http-request cache-use no_vary_cache
+ server www ${s2_addr}:${s2_port}
+ http-response cache-store no_vary_cache
+ http-response set-header X-Cache-Hit %[res.cache_hit]
+
+ cache my_cache
+ total-max-size 3
+ max-age 20
+ max-object-size 3072
+ process-vary on
+
+ cache no_vary_cache
+ total-max-size 3
+ max-age 20
+ max-object-size 3072
+ process-vary off
+} -start
+
+
+client c1 -connect ${h1_fe_sock} {
+ # Accept-Encoding Vary
+ txreq -url "/accept-encoding" -hdr "Accept-Encoding: first_value"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.content-encoding == "gzip"
+ expect resp.bodylen == 45
+
+ # The response for the first request had an unacceptable `content-encoding`
+ # which might happen if that's the only thing the server supports, but
+ # we must not cache that and instead defer to the server.
+ txreq -url "/accept-encoding" -hdr "Accept-Encoding: first_value"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.content-encoding == "gzip"
+ expect resp.bodylen == 45
+ expect resp.http.X-Cache-Hit == 0
+
+ txreq -url "/accept-encoding" -hdr "Accept-Encoding: second_value"
+ rxresp
+ expect resp.status == 200
+ expect resp.bodylen == 48
+ expect resp.http.content-type == "text/plain"
+ expect resp.http.X-Cache-Hit == 0
+
+ # This request matches the cache entry for the request above, despite
+ # matching the `accept-encoding` of the first request because the
+ # request above only has the `identity` encoding which is implicitly
+ # added, unless explicitly forbidden.
+ txreq -url "/accept-encoding" -hdr "Accept-Encoding: first_value"
+ rxresp
+ expect resp.status == 200
+ expect resp.bodylen == 48
+ expect resp.http.content-type == "text/plain"
+ expect resp.http.X-Cache-Hit == 1
+
+ txreq -url "/accept-encoding" -hdr "Accept-Encoding: second_value"
+ rxresp
+ expect resp.status == 200
+ expect resp.bodylen == 48
+ expect resp.http.content-type == "text/plain"
+ expect resp.http.X-Cache-Hit == 1
+
+ # The accept-encoding normalizer function converts the header values
+ # to lower case then calculates the hash of every sub part before
+ # sorting the hashes and xor'ing them (while removing duplicates).
+ txreq -url "/accept-encoding-multiple" -hdr "Accept-Encoding: first,second"
+ rxresp
+ expect resp.status == 200
+ expect resp.bodylen == 51
+ expect resp.http.X-Cache-Hit == 0
+
+ txreq -url "/accept-encoding-multiple" -hdr "Accept-Encoding: first,second"
+ rxresp
+ expect resp.status == 200
+ expect resp.bodylen == 51
+ expect resp.http.X-Cache-Hit == 1
+
+ txreq -url "/accept-encoding-multiple" -hdr "Accept-Encoding: second,first"
+ rxresp
+ expect resp.status == 200
+ expect resp.bodylen == 51
+ expect resp.http.X-Cache-Hit == 1
+
+ txreq -url "/accept-encoding-multiple" -hdr "Accept-Encoding: FirsT,SECOND,first"
+ rxresp
+ expect resp.status == 200
+ expect resp.bodylen == 51
+ expect resp.http.X-Cache-Hit == 1
+
+ # Unmanaged vary
+ txreq -url "/unmanaged" -hdr "Accept-Encoding: first_value"
+ rxresp
+ expect resp.status == 200
+ expect resp.bodylen == 51
+ expect resp.http.X-Cache-Hit == 0
+
+ txreq -url "/unmanaged" -hdr "Accept-Encoding: first_value"
+ rxresp
+ expect resp.status == 200
+ expect resp.bodylen == 51
+ expect resp.http.X-Cache-Hit == 0
+
+
+ # Mixed Vary (Accept-Encoding + Referer)
+ txreq -url "/referer-accept-encoding" \
+ -hdr "Accept-Encoding: br, gzip" \
+ -hdr "Referer: referer"
+ rxresp
+ expect resp.status == 200
+ expect resp.bodylen == 51
+ expect resp.http.X-Cache-Hit == 0
+
+ txreq -url "/referer-accept-encoding" \
+ -hdr "Accept-Encoding: br" \
+ -hdr "Referer: other-referer"
+ rxresp
+ expect resp.status == 200
+ expect resp.bodylen == 54
+ expect resp.http.X-Cache-Hit == 0
+
+ txreq -url "/referer-accept-encoding" \
+ -hdr "Accept-Encoding: gzip" \
+ -hdr "Referer: other-referer"
+ rxresp
+ expect resp.status == 200
+ expect resp.bodylen == 57
+ expect resp.http.X-Cache-Hit == 0
+
+ txreq -url "/referer-accept-encoding" \
+ -hdr "Referer: referer" \
+ -hdr "Accept-Encoding: gzip, br"
+ rxresp
+ expect resp.status == 200
+ expect resp.bodylen == 51
+ expect resp.http.X-Cache-Hit == 1
+
+ txreq -url "/referer-accept-encoding" \
+ -hdr "Accept-Encoding: br" \
+ -hdr "Referer: other-referer"
+ rxresp
+ expect resp.status == 200
+ expect resp.bodylen == 54
+ expect resp.http.X-Cache-Hit == 1
+
+ txreq -url "/referer-accept-encoding" \
+ -hdr "Accept-Encoding: gzip" \
+ -hdr "Referer: other-referer"
+ rxresp
+ expect resp.status == 200
+ expect resp.bodylen == 57
+ expect resp.http.X-Cache-Hit == 1
+
+
+ # Mixed Vary (Accept-Encoding + Referer + Origin)
+ txreq -url "/origin-referer-accept-encoding" \
+ -hdr "Accept-Encoding: br, gzip" \
+ -hdr "Referer: referer" \
+ -hdr "Origin: origin"
+ rxresp
+ expect resp.status == 200
+ expect resp.bodylen == 58
+ expect resp.http.X-Cache-Hit == 0
+
+ txreq -url "/origin-referer-accept-encoding" \
+ -hdr "Accept-Encoding: br, gzip" \
+ -hdr "Referer: referer" \
+ -hdr "Origin: origin"
+ rxresp
+ expect resp.status == 200
+ expect resp.bodylen == 58
+ expect resp.http.X-Cache-Hit == 1
+
+ txreq -url "/origin-referer-accept-encoding" \
+ -hdr "Accept-Encoding: br, gzip" \
+ -hdr "Referer: referer" \
+ -hdr "Origin: other-origin"
+ rxresp
+ expect resp.status == 200
+ expect resp.bodylen == 59
+ expect resp.http.X-Cache-Hit == 0
+
+ txreq -url "/origin-referer-accept-encoding" \
+ -hdr "Accept-Encoding: br, gzip" \
+ -hdr "Referer: referer" \
+ -hdr "Origin: other-origin"
+ rxresp
+ expect resp.status == 200
+ expect resp.bodylen == 59
+ expect resp.http.X-Cache-Hit == 1
+
+ # Multiple Accept-encoding headers
+ txreq -url "/multiple_headers" \
+ -hdr "Accept-Encoding: gzip" \
+ -hdr "Accept-Encoding: br, deflate"
+ rxresp
+ expect resp.status == 200
+ expect resp.bodylen == 155
+ expect resp.http.X-Cache-Hit == 0
+
+ txreq -url "/multiple_headers" \
+ -hdr "Accept-Encoding: deflate" \
+ -hdr "Accept-Encoding: br,gzip"
+ rxresp
+ expect resp.status == 200
+ expect resp.bodylen == 155
+ expect resp.http.X-Cache-Hit == 1
+
+ # Should not match a cache entry
+ txreq -url "/multiple_headers" \
+ -hdr "Accept-Encoding: first_encoding"
+ rxresp
+ expect resp.status == 200
+ expect resp.bodylen == 166
+ expect resp.http.X-Cache-Hit == 0
+
+ # Too many accept encodings
+ txreq -url "/too_many_encodings" \
+ -hdr "Accept-Encoding: a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11,a12,a13,a14,a15,a16,a17"
+ rxresp
+ expect resp.status == 200
+ expect resp.bodylen == 177
+ expect resp.http.X-Cache-Hit == 0
+
+ txreq -url "/too_many_encodings" \
+ -hdr "Accept-Encoding: a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11,a12,a13,a14,a15,a16,a17"
+ rxresp
+ expect resp.status == 200
+ expect resp.bodylen == 188
+ expect resp.http.X-Cache-Hit == 0
+
+ # A missing 'Accept-Encoding' implies that anything is acceptable,
+ # while an empty 'Accept-Encoding' implies nothing is acceptable.
+
+ # Start by caching a gzip response.
+ txreq -url "/empty-vs-missing" -hdr "Accept-Encoding: gzip"
+ rxresp
+ expect resp.status == 200
+ expect resp.bodylen == 234
+ expect resp.http.content-encoding == "gzip"
+ expect resp.http.X-Cache-Hit == 0
+
+ # Check that it is cached.
+ txreq -url "/empty-vs-missing" -hdr "Accept-Encoding: gzip"
+ rxresp
+ expect resp.status == 200
+ expect resp.bodylen == 234
+ expect resp.http.content-encoding == "gzip"
+ expect resp.http.X-Cache-Hit == 1
+
+ # Check that the cached response is returned when no accept-encoding is
+ # specified.
+ txreq -url "/empty-vs-missing"
+ rxresp
+ expect resp.status == 200
+ expect resp.bodylen == 234
+ expect resp.http.content-encoding == "gzip"
+ expect resp.http.X-Cache-Hit == 1
+
+ # Check that the cached response is not returned when an empty
+ # accept-encoding is specified.
+ txreq -url "/empty-vs-missing" -hdr "Accept-Encoding:"
+ rxresp
+ expect resp.status == 200
+ expect resp.bodylen == 256
+ expect resp.http.content-encoding == "<undef>"
+ expect resp.http.X-Cache-Hit == 0
+
+ # The following requests are treated by a backend that does not cache
+ # responses containing a Vary header
+ txreq -url "/no_vary_support"
+ rxresp
+ expect resp.status == 200
+ expect resp.bodylen == 57
+ expect resp.http.X-Cache-Hit == 0
+
+ txreq -url "/no_vary_support"
+ rxresp
+ expect resp.status == 200
+ expect resp.bodylen == 57
+ expect resp.http.X-Cache-Hit == 0
+
+
+} -run
diff --git a/reg-tests/cache/vary_accept_encoding.vtc b/reg-tests/cache/vary_accept_encoding.vtc
new file mode 100644
index 0000000..4b828a8
--- /dev/null
+++ b/reg-tests/cache/vary_accept_encoding.vtc
@@ -0,0 +1,333 @@
+varnishtest "Check the Accept-Encoding processing implemented in the Vary mechanism"
+
+#REQUIRE_VERSION=2.4
+
+feature ignore_unknown_macro
+
+server s1 {
+ # Response varying on "accept-encoding" with a gzip content-encoding
+ rxreq
+ expect req.url == "/accept-encoding"
+ txresp -hdr "Content-Encoding: gzip" \
+ -hdr "Vary: accept-encoding" \
+ -hdr "Cache-Control: max-age=5" \
+ -bodylen 45
+
+ # Response varying on "accept-encoding" with a deflate content-encoding
+ rxreq
+ expect req.url == "/accept-encoding"
+ txresp -hdr "Content-Encoding: deflate" \
+ -hdr "Vary: accept-encoding" \
+ -hdr "Cache-Control: max-age=5" \
+ -bodylen 55
+
+
+ # Response varying on "accept-encoding" with no content-encoding (identity)
+ rxreq
+ expect req.url == "/accept-encoding-identity"
+ txresp -hdr "Vary: accept-encoding" \
+ -hdr "Cache-Control: max-age=5" \
+ -bodylen 65
+
+ # Response varying on "accept-encoding" with refused identity encoding
+ rxreq
+ expect req.url == "/accept-encoding-identity"
+ txresp -hdr "Vary: accept-encoding" \
+ -hdr "Cache-Control: max-age=5" \
+ -hdr "Content-Encoding: deflate" \
+ -bodylen 75
+
+
+ rxreq
+ expect req.url == "/accept-encoding-star"
+ txresp -hdr "Vary: accept-encoding" \
+ -hdr "Cache-Control: max-age=5" \
+ -hdr "Content-Encoding: br" \
+ -bodylen 89
+
+ rxreq
+ expect req.url == "/accept-encoding-star"
+ txresp -hdr "Vary: accept-encoding" \
+ -hdr "Cache-Control: max-age=5" \
+ -hdr "Content-Encoding: deflate" \
+ -bodylen 99
+
+
+ rxreq
+ expect req.url == "/multiple-content-encoding"
+ txresp -hdr "Vary: accept-encoding" \
+ -hdr "Cache-Control: max-age=5" \
+ -hdr "Content-Encoding: deflate, gzip" \
+ -bodylen 109
+
+ rxreq
+ expect req.url == "/unknown-content-encoding"
+ txresp -hdr "Vary: accept-encoding" \
+ -hdr "Cache-Control: max-age=5" \
+ -hdr "Content-Encoding: unknown_encoding" \
+ -bodylen 119
+
+ rxreq
+ expect req.url == "/unknown-content-encoding"
+ txresp -hdr "Vary: accept-encoding" \
+ -hdr "Cache-Control: max-age=5" \
+ -hdr "Content-Encoding: unknown_encoding" \
+ -bodylen 119
+
+
+ rxreq
+ expect req.url == "/hash-collision"
+ txresp -hdr "Vary: accept-encoding" \
+ -hdr "Cache-Control: max-age=5" \
+ -hdr "Content-Encoding: br" \
+ -bodylen 129
+
+ rxreq
+ expect req.url == "/hash-collision"
+ txresp -hdr "Vary: accept-encoding" \
+ -hdr "Cache-Control: max-age=5" \
+ -hdr "Content-Encoding: gzip" \
+ -bodylen 139
+} -start
+
+
+haproxy h1 -conf {
+ global
+ # WT: limit false-positives causing "HTTP header incomplete" due to
+ # idle server connections being randomly used and randomly expiring
+ # under us.
+ tune.idle-pool.shared off
+
+ defaults
+ mode http
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ frontend fe
+ bind "fd@${fe}"
+ default_backend test
+
+ backend test
+ http-request cache-use my_cache
+ server www ${s1_addr}:${s1_port}
+ http-response cache-store my_cache
+ http-response set-header X-Cache-Hit %[res.cache_hit]
+
+ cache my_cache
+ total-max-size 3
+ max-age 20
+ max-object-size 3072
+ process-vary on
+} -start
+
+
+client c1 -connect ${h1_fe_sock} {
+ #
+ # Accept-Encoding Vary
+ #
+
+ # First request
+ txreq -url "/accept-encoding" -hdr "Accept-Encoding: gzip"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.content-encoding == "gzip"
+ expect resp.bodylen == 45
+
+ # Regular case
+ txreq -url "/accept-encoding" -hdr "Accept-Encoding: gzip"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.content-encoding == "gzip"
+ expect resp.bodylen == 45
+ expect resp.http.X-Cache-Hit == 1
+
+ # Regular case with upper case encoding
+ txreq -url "/accept-encoding" -hdr "Accept-Encoding: GZIP"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.content-encoding == "gzip"
+ expect resp.bodylen == 45
+ expect resp.http.X-Cache-Hit == 1
+
+ # Multiple accepted encodings (all standard)
+ txreq -url "/accept-encoding" -hdr "Accept-Encoding: deflate,gzip"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.content-encoding == "gzip"
+ expect resp.bodylen == 45
+ expect resp.http.X-Cache-Hit == 1
+
+ # Multiple accept-encoding headers + non-standard accepted encodings
+ txreq -url "/accept-encoding" -hdr "Accept-Encoding: first_encoding,second_encoding" -hdr "Accept-Encoding: gzip"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.content-encoding == "gzip"
+ expect resp.bodylen == 45
+ expect resp.http.X-Cache-Hit == 1
+
+ # Regular case with positive weight
+ txreq -url "/accept-encoding" -hdr "Accept-Encoding: gzip;q=0.8"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.content-encoding == "gzip"
+ expect resp.bodylen == 45
+ expect resp.http.X-Cache-Hit == 1
+
+ # Regular case with positive weight and extra whitespaces
+ txreq -url "/accept-encoding" -hdr "Accept-Encoding: gzip ; q=0.8"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.content-encoding == "gzip"
+ expect resp.bodylen == 45
+ expect resp.http.X-Cache-Hit == 1
+
+ # Regular case with null weight
+ txreq -url "/accept-encoding" -hdr "Accept-Encoding: deflate;q=0.8, gzip;q=0"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.content-encoding == "deflate"
+ expect resp.bodylen == 55
+ expect resp.http.X-Cache-Hit == 0
+
+
+ #
+ # Identity tests
+ #
+ txreq -url "/accept-encoding-identity"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.content-encoding == "<undef>"
+ expect resp.bodylen == 65
+ expect resp.http.X-Cache-Hit == 0
+
+ # Regular case
+ txreq -url "/accept-encoding-identity"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.content-encoding == "<undef>"
+ expect resp.bodylen == 65
+ expect resp.http.X-Cache-Hit == 1
+
+ # Identity is allowed by default even if another encoding is specified
+ txreq -url "/accept-encoding-identity" -hdr "Accept-Encoding: gzip"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.content-encoding == "<undef>"
+ expect resp.bodylen == 65
+ expect resp.http.X-Cache-Hit == 1
+
+ # Refused identity encoding (explicit null weight)
+ txreq -url "/accept-encoding-identity" -hdr "Accept-Encoding: deflate, identity;q=0"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.content-encoding == "deflate"
+ expect resp.bodylen == 75
+ expect resp.http.X-Cache-Hit == 0
+
+
+ #
+ # Star tests
+ #
+ txreq -url "/accept-encoding-star" -hdr "Accept-Encoding: *"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.content-encoding == "br"
+ expect resp.bodylen == 89
+ expect resp.http.X-Cache-Hit == 0
+
+ # Regular case
+ txreq -url "/accept-encoding-star" -hdr "Accept-Encoding: *"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.content-encoding == "br"
+ expect resp.bodylen == 89
+ expect resp.http.X-Cache-Hit == 1
+
+ # Reject some encodings
+ txreq -url "/accept-encoding-star" -hdr "Accept-Encoding: gzip;q=0, deflate,*"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.content-encoding == "br"
+ expect resp.bodylen == 89
+ expect resp.http.X-Cache-Hit == 1
+
+ # Weighted star
+ txreq -url "/accept-encoding-star" -hdr "Accept-Encoding: gzip;q=0, deflate,*;q=0.1"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.content-encoding == "br"
+ expect resp.bodylen == 89
+ expect resp.http.X-Cache-Hit == 1
+
+ # Rejected identity
+ txreq -url "/accept-encoding-star" -hdr "Accept-Encoding: gzip;q=0, deflate,*;q=0.1,identity;q=0"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.content-encoding == "br"
+ expect resp.bodylen == 89
+ expect resp.http.X-Cache-Hit == 1
+
+ # Rejected star and "br" not accepted
+ txreq -url "/accept-encoding-star" -hdr "Accept-Encoding: gzip;q=0, deflate,*;q=0"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.content-encoding == "deflate"
+ expect resp.bodylen == 99
+ expect resp.http.X-Cache-Hit == 0
+
+
+ #
+ # Multiple content-encodings
+ #
+ txreq -url "/multiple-content-encoding" -hdr "Accept-Encoding: gzip;q=0.8, deflate"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.content-encoding == "deflate, gzip"
+ expect resp.bodylen == 109
+ expect resp.http.X-Cache-Hit == 0
+
+ txreq -url "/multiple-content-encoding" -hdr "Accept-Encoding: deflate,gzip;q=0.7"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.content-encoding == "deflate, gzip"
+ expect resp.bodylen == 109
+ expect resp.http.X-Cache-Hit == 1
+
+
+ #
+ # Unknown content-encoding
+ # The response should not be cached since it has an unknown content encoding
+ #
+ txreq -url "/unknown-content-encoding" -hdr "Accept-Encoding: gzip;q=0.8, deflate, first_encoding"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.content-encoding == "unknown_encoding"
+ expect resp.bodylen == 119
+ expect resp.http.X-Cache-Hit == 0
+
+ txreq -url "/unknown-content-encoding" -hdr "Accept-Encoding: deflate,gzip;q=0.8, first_encoding"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.content-encoding == "unknown_encoding"
+ expect resp.bodylen == 119
+ expect resp.http.X-Cache-Hit == 0
+
+ #
+ # Hash collision (https://github.com/haproxy/haproxy/issues/988)
+ #
+ # crc32(gzip) ^ crc32(br) ^ crc32(xxx) ^ crc32(jdcqiab) == crc32(gzip)
+ txreq -url "/hash-collision" -hdr "Accept-Encoding: br,gzip,xxx,jdcqiab"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.content-encoding == "br"
+ expect resp.bodylen == 129
+ expect resp.http.X-Cache-Hit == 0
+
+ txreq -url "/hash-collision" -hdr "Accept-Encoding: gzip"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.content-encoding == "gzip"
+ expect resp.bodylen == 139
+ expect resp.http.X-Cache-Hit == 0
+} -run
diff --git a/reg-tests/checks/1be_40srv_odd_health_checks.vtc b/reg-tests/checks/1be_40srv_odd_health_checks.vtc
new file mode 100644
index 0000000..d0f3be5
--- /dev/null
+++ b/reg-tests/checks/1be_40srv_odd_health_checks.vtc
@@ -0,0 +1,117 @@
+varnishtest "Health-checks: only for servers with 'check' set"
+feature ignore_unknown_macro
+
+# This test start 40 servers in the same backend, named srv0 up to srv39.
+# Only the odd servers have health-checks enabled.
+# The first health-checks passed tests are checked for all these servers
+# thanks to syslog messages.
+
+#REQUIRE_VERSION=2.4
+#EXCLUDE_TARGETS=freebsd
+#REGTEST_TYPE=slow
+
+syslog S -repeat 20 -level notice {
+ recv
+ expect ~ "[^:\\[ ]\\[${h1_pid}\\]: Health check for server be1/srv([13579]|[123][13579]) succeeded.+reason: Layer4 check passed.+check duration: [[:digit:]]+ms.+status: 1/1 UP"
+} -start
+
+server s0 {} -start
+server s1 {} -start
+server s2 {} -start
+server s3 {} -start
+server s4 {} -start
+server s5 {} -start
+server s6 {} -start
+server s7 {} -start
+server s8 {} -start
+server s9 {} -start
+server s10 {} -start
+server s11 {} -start
+server s12 {} -start
+server s13 {} -start
+server s14 {} -start
+server s15 {} -start
+server s16 {} -start
+server s17 {} -start
+server s18 {} -start
+server s19 {} -start
+server s20 {} -start
+server s21 {} -start
+server s22 {} -start
+server s23 {} -start
+server s24 {} -start
+server s25 {} -start
+server s26 {} -start
+server s27 {} -start
+server s28 {} -start
+server s29 {} -start
+server s30 {} -start
+server s31 {} -start
+server s32 {} -start
+server s33 {} -start
+server s34 {} -start
+server s35 {} -start
+server s36 {} -start
+server s37 {} -start
+server s38 {} -start
+server s39 {} -start
+
+haproxy h1 -conf {
+ defaults
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ default-server no-check inter 200ms downinter 100ms rise 1 fall 1
+
+ backend be1
+ option log-health-checks
+ log ${S_addr}:${S_port} daemon
+ server srv0 ${s0_addr}:${s0_port}
+ server srv1 ${s1_addr}:${s1_port} check
+ server srv2 ${s2_addr}:${s2_port}
+ server srv3 ${s3_addr}:${s3_port} check
+ server srv4 ${s4_addr}:${s4_port}
+ server srv5 ${s5_addr}:${s5_port} check
+ server srv6 ${s6_addr}:${s6_port}
+ server srv7 ${s7_addr}:${s7_port} check
+ server srv8 ${s8_addr}:${s8_port}
+ server srv9 ${s9_addr}:${s9_port} check
+ server srv10 ${s10_addr}:${s10_port}
+ server srv11 ${s11_addr}:${s11_port} check
+ server srv12 ${s12_addr}:${s12_port}
+ server srv13 ${s13_addr}:${s13_port} check
+ server srv14 ${s14_addr}:${s14_port}
+ server srv15 ${s15_addr}:${s15_port} check
+ server srv16 ${s16_addr}:${s16_port}
+ server srv17 ${s17_addr}:${s17_port} check
+ server srv18 ${s18_addr}:${s18_port}
+ server srv19 ${s19_addr}:${s19_port} check
+ server srv20 ${s20_addr}:${s20_port}
+ server srv21 ${s21_addr}:${s21_port} check
+ server srv22 ${s22_addr}:${s22_port}
+ server srv23 ${s23_addr}:${s23_port} check
+ server srv24 ${s24_addr}:${s24_port}
+ server srv25 ${s25_addr}:${s25_port} check
+ server srv26 ${s26_addr}:${s26_port}
+ server srv27 ${s27_addr}:${s27_port} check
+ server srv28 ${s28_addr}:${s28_port}
+ server srv29 ${s29_addr}:${s29_port} check
+ server srv30 ${s30_addr}:${s30_port}
+ server srv31 ${s31_addr}:${s31_port} check
+ server srv32 ${s32_addr}:${s32_port}
+ server srv33 ${s33_addr}:${s33_port} check
+ server srv34 ${s34_addr}:${s34_port}
+ server srv35 ${s35_addr}:${s35_port} check
+ server srv36 ${s36_addr}:${s36_port}
+ server srv37 ${s37_addr}:${s37_port} check
+ server srv38 ${s38_addr}:${s38_port}
+ server srv39 ${s39_addr}:${s39_port} check
+} -start
+
+syslog S -wait
+
+haproxy h1 -cli {
+ send "show servers state"
+ expect ~ "# be_id be_name srv_id srv_name srv_addr srv_op_state srv_admin_state srv_uweight srv_iweight srv_time_since_last_change srv_check_status srv_check_result srv_check_health srv_check_state srv_agent_state bk_f_forced_id srv_f_forced_id srv_fqdn srv_port srvrecord srv_use_ssl srv_check_port srv_check_addr srv_agent_addr srv_agent_port\n2 be1 1 srv0 ${s0_addr} 2 0 1 1 [[:digit:]]+ 1 0 1 0 0 0 0 - ${s0_port} - 0 0 - - 0\n2 be1 2 srv1 ${s1_addr} 2 0 1 1 [[:digit:]]+ 6 ([[:digit:]]+ ){3}0 0 0 - ${s1_port} - 0 0 - - 0\n2 be1 3 srv2 ${s2_addr} 2 0 1 1 [[:digit:]]+ 1 0 1 0 0 0 0 - ${s2_port} - 0 0 - - 0\n2 be1 4 srv3 ${s3_addr} 2 0 1 1 [[:digit:]]+ 6 ([[:digit:]]+ ){3}0 0 0 - ${s3_port} - 0 0 - - 0\n2 be1 5 srv4 ${s4_addr} 2 0 1 1 [[:digit:]]+ 1 0 1 0 0 0 0 - ${s4_port} - 0 0 - - 0\n2 be1 6 srv5 ${s5_addr} 2 0 1 1 [[:digit:]]+ 6 ([[:digit:]]+ ){3}0 0 0 - ${s5_port} - 0 0 - - 0\n2 be1 7 srv6 ${s6_addr} 2 0 1 1 [[:digit:]]+ 1 0 1 0 0 0 0 - ${s6_port} - 0 0 - - 0\n2 be1 8 srv7 ${s7_addr} 2 0 1 1 [[:digit:]]+ 6 ([[:digit:]]+ ){3}0 0 0 - ${s7_port} - 0 0 - - 0\n2 be1 9 srv8 ${s8_addr} 2 0 1 1 [[:digit:]]+ 1 0 1 0 0 0 0 - ${s8_port} - 0 0 - - 0\n2 be1 10 srv9 ${s9_addr} 2 0 1 1 [[:digit:]]+ 6 ([[:digit:]]+ ){3}0 0 0 - ${s9_port} - 0 0 - - 0\n2 be1 11 srv10 ${s10_addr} 2 0 1 1 [[:digit:]]+ 1 0 1 0 0 0 0 - ${s10_port} - 0 0 - - 0\n2 be1 12 srv11 ${s11_addr} 2 0 1 1 [[:digit:]]+ 6 ([[:digit:]]+ ){3}0 0 0 - ${s11_port} - 0 0 - - 0\n2 be1 13 srv12 ${s12_addr} 2 0 1 1 [[:digit:]]+ 1 0 1 0 0 0 0 - ${s12_port} - 0 0 - - 0\n2 be1 14 srv13 ${s13_addr} 2 0 1 1 [[:digit:]]+ 6 ([[:digit:]]+ ){3}0 0 0 - ${s13_port} - 0 0 - - 0\n2 be1 15 srv14 ${s14_addr} 2 0 1 1 [[:digit:]]+ 1 0 1 0 0 0 0 - ${s14_port} - 0 0 - - 0\n2 be1 16 srv15 ${s15_addr} 2 0 1 1 [[:digit:]]+ 6 ([[:digit:]]+ ){3}0 0 0 - ${s15_port} - 0 0 - - 0\n2 be1 17 srv16 ${s16_addr} 2 0 1 1 [[:digit:]]+ 1 0 1 0 0 0 0 - ${s16_port} - 0 0 - - 0\n2 be1 18 srv17 ${s17_addr} 2 0 1 1 [[:digit:]]+ 6 ([[:digit:]]+ ){3}0 0 0 - ${s17_port} - 0 0 - - 0\n2 be1 19 srv18 ${s18_addr} 2 0 1 1 [[:digit:]]+ 1 0 1 0 0 0 0 - ${s18_port} - 0 0 - - 0\n2 be1 20 srv19 ${s19_addr} 2 0 1 1 [[:digit:]]+ 6 ([[:digit:]]+ ){3}0 0 0 - ${s19_port} - 0 0 - - 0\n2 be1 21 srv20 ${s20_addr} 2 0 1 1 [[:digit:]]+ 1 0 1 0 0 0 0 - ${s20_port} - 0 0 - - 0\n2 be1 22 srv21 ${s21_addr} 2 0 1 1 [[:digit:]]+ 6 ([[:digit:]]+ ){3}0 0 0 - ${s21_port} - 0 0 - - 0\n2 be1 23 srv22 ${s22_addr} 2 0 1 1 [[:digit:]]+ 1 0 1 0 0 0 0 - ${s22_port} - 0 0 - - 0\n2 be1 24 srv23 ${s23_addr} 2 0 1 1 [[:digit:]]+ 6 ([[:digit:]]+ ){3}0 0 0 - ${s23_port} - 0 0 - - 0\n2 be1 25 srv24 ${s24_addr} 2 0 1 1 [[:digit:]]+ 1 0 1 0 0 0 0 - ${s24_port} - 0 0 - - 0\n2 be1 26 srv25 ${s25_addr} 2 0 1 1 [[:digit:]]+ 6 ([[:digit:]]+ ){3}0 0 0 - ${s25_port} - 0 0 - - 0\n2 be1 27 srv26 ${s26_addr} 2 0 1 1 [[:digit:]]+ 1 0 1 0 0 0 0 - ${s26_port} - 0 0 - - 0\n2 be1 28 srv27 ${s27_addr} 2 0 1 1 [[:digit:]]+ 6 ([[:digit:]]+ ){3}0 0 0 - ${s27_port} - 0 0 - - 0\n2 be1 29 srv28 ${s28_addr} 2 0 1 1 [[:digit:]]+ 1 0 1 0 0 0 0 - ${s28_port} - 0 0 - - 0\n2 be1 30 srv29 ${s29_addr} 2 0 1 1 [[:digit:]]+ 6 ([[:digit:]]+ ){3}0 0 0 - ${s29_port} - 0 0 - - 0\n2 be1 31 srv30 ${s30_addr} 2 0 1 1 [[:digit:]]+ 1 0 1 0 0 0 0 - ${s30_port} - 0 0 - - 0\n2 be1 32 srv31 ${s31_addr} 2 0 1 1 [[:digit:]]+ 6 ([[:digit:]]+ ){3}0 0 0 - ${s31_port} - 0 0 - - 0\n2 be1 33 srv32 ${s32_addr} 2 0 1 1 [[:digit:]]+ 1 0 1 0 0 0 0 - ${s32_port} - 0 0 - - 0\n2 be1 34 srv33 ${s33_addr} 2 0 1 1 [[:digit:]]+ 6 ([[:digit:]]+ ){3}0 0 0 - ${s33_port} - 0 0 - - 0\n2 be1 35 srv34 ${s34_addr} 2 0 1 1 [[:digit:]]+ 1 0 1 0 0 0 0 - ${s34_port} - 0 0 - - 0\n2 be1 36 srv35 ${s35_addr} 2 0 1 1 [[:digit:]]+ 6 ([[:digit:]]+ ){3}0 0 0 - ${s35_port} - 0 0 - - 0\n2 be1 37 srv36 ${s36_addr} 2 0 1 1 [[:digit:]]+ 1 0 1 0 0 0 0 - ${s36_port} - 0 0 - - 0\n2 be1 38 srv37 ${s37_addr} 2 0 1 1 [[:digit:]]+ 6 ([[:digit:]]+ ){3}0 0 0 - ${s37_port} - 0 0 - - 0\n2 be1 39 srv38 ${s38_addr} 2 0 1 1 [[:digit:]]+ 1 0 1 0 0 0 0 - ${s38_port} - 0 0 - - 0\n2 be1 40 srv39 ${s39_addr} 2 0 1 1 [[:digit:]]+ 6 ([[:digit:]]+ ){3}0 0 0 - ${s39_port} - 0 0 - - 0\n"
+}
+
diff --git a/reg-tests/checks/40be_2srv_odd_health_checks.vtc b/reg-tests/checks/40be_2srv_odd_health_checks.vtc
new file mode 100644
index 0000000..cbd4fc0
--- /dev/null
+++ b/reg-tests/checks/40be_2srv_odd_health_checks.vtc
@@ -0,0 +1,645 @@
+varnishtest "Health-checks"
+feature ignore_unknown_macro
+
+#REQUIRE_VERSION=2.4
+#EXCLUDE_TARGETS=freebsd,osx,generic
+#REGTEST_TYPE=slow
+
+# This script start 40 servers named s0 up to s39.
+# For 0 <= i <= 19:
+# - s(i) and s(i+1) belong to backend be(2*i+1),
+# - fe(2*i+1) backend is configured to used be(2*i+1) backend.
+# - only s(2*i+1) servers have health-checks enabled,
+# - we start 20 clients named s(2*i+1) which connect to fe(2*i+1) frontend,
+# - so that to ensure that health-checks do not consume any connection
+# (any varnishtest server without -repeat <n> with n > 1 accepts
+# only one connection).
+# - we take care of sending the clients to the unchecked servers using the
+# "first" lb algo so that servers always receive a valid request
+
+syslog S1 -level notice {
+ recv
+ expect ~ "[^:\\[ ]\\[${h1_pid}\\]: Health check for server be1/srv1 succeeded, reason: Layer4 check passed, check duration: [[:digit:]]+ms, status: 1/1 UP"
+} -start
+
+syslog S3 -level notice {
+ recv
+ expect ~ "[^:\\[ ]\\[${h1_pid}\\]: Health check for server be3/srv3 succeeded, reason: Layer4 check passed, check duration: [[:digit:]]+ms, status: 1/1 UP"
+} -start
+
+syslog S5 -level notice {
+ recv
+ expect ~ "[^:\\[ ]\\[${h1_pid}\\]: Health check for server be5/srv5 succeeded, reason: Layer4 check passed, check duration: [[:digit:]]+ms, status: 1/1 UP"
+} -start
+
+syslog S7 -level notice {
+ recv
+ expect ~ "[^:\\[ ]\\[${h1_pid}\\]: Health check for server be7/srv7 succeeded, reason: Layer4 check passed, check duration: [[:digit:]]+ms, status: 1/1 UP"
+} -start
+
+syslog S9 -level notice {
+ recv
+ expect ~ "[^:\\[ ]\\[${h1_pid}\\]: Health check for server be9/srv9 succeeded, reason: Layer4 check passed, check duration: [[:digit:]]+ms, status: 1/1 UP"
+} -start
+
+syslog S11 -level notice {
+ recv
+ expect ~ "[^:\\[ ]\\[${h1_pid}\\]: Health check for server be11/srv11 succeeded, reason: Layer4 check passed, check duration: [[:digit:]]+ms, status: 1/1 UP"
+} -start
+
+syslog S13 -level notice {
+ recv
+ expect ~ "[^:\\[ ]\\[${h1_pid}\\]: Health check for server be13/srv13 succeeded, reason: Layer4 check passed, check duration: [[:digit:]]+ms, status: 1/1 UP"
+} -start
+
+syslog S15 -level notice {
+ recv
+ expect ~ "[^:\\[ ]\\[${h1_pid}\\]: Health check for server be15/srv15 succeeded, reason: Layer4 check passed, check duration: [[:digit:]]+ms, status: 1/1 UP"
+} -start
+
+syslog S17 -level notice {
+ recv
+ expect ~ "[^:\\[ ]\\[${h1_pid}\\]: Health check for server be17/srv17 succeeded, reason: Layer4 check passed, check duration: [[:digit:]]+ms, status: 1/1 UP"
+} -start
+
+syslog S19 -level notice {
+ recv
+ expect ~ "[^:\\[ ]\\[${h1_pid}\\]: Health check for server be19/srv19 succeeded, reason: Layer4 check passed, check duration: [[:digit:]]+ms, status: 1/1 UP"
+} -start
+
+syslog S21 -level notice {
+ recv
+ expect ~ "[^:\\[ ]\\[${h1_pid}\\]: Health check for server be21/srv21 succeeded, reason: Layer4 check passed, check duration: [[:digit:]]+ms, status: 1/1 UP"
+} -start
+
+syslog S23 -level notice {
+ recv
+ expect ~ "[^:\\[ ]\\[${h1_pid}\\]: Health check for server be23/srv23 succeeded, reason: Layer4 check passed, check duration: [[:digit:]]+ms, status: 1/1 UP"
+} -start
+
+syslog S25 -level notice {
+ recv
+ expect ~ "[^:\\[ ]\\[${h1_pid}\\]: Health check for server be25/srv25 succeeded, reason: Layer4 check passed, check duration: [[:digit:]]+ms, status: 1/1 UP"
+} -start
+
+syslog S27 -level notice {
+ recv
+ expect ~ "[^:\\[ ]\\[${h1_pid}\\]: Health check for server be27/srv27 succeeded, reason: Layer4 check passed, check duration: [[:digit:]]+ms, status: 1/1 UP"
+} -start
+
+syslog S29 -level notice {
+ recv
+ expect ~ "[^:\\[ ]\\[${h1_pid}\\]: Health check for server be29/srv29 succeeded, reason: Layer4 check passed, check duration: [[:digit:]]+ms, status: 1/1 UP"
+} -start
+
+syslog S31 -level notice {
+ recv
+ expect ~ "[^:\\[ ]\\[${h1_pid}\\]: Health check for server be31/srv31 succeeded, reason: Layer4 check passed, check duration: [[:digit:]]+ms, status: 1/1 UP"
+} -start
+
+syslog S33 -level notice {
+ recv
+ expect ~ "[^:\\[ ]\\[${h1_pid}\\]: Health check for server be33/srv33 succeeded, reason: Layer4 check passed, check duration: [[:digit:]]+ms, status: 1/1 UP"
+} -start
+
+syslog S35 -level notice {
+ recv
+ expect ~ "[^:\\[ ]\\[${h1_pid}\\]: Health check for server be35/srv35 succeeded, reason: Layer4 check passed, check duration: [[:digit:]]+ms, status: 1/1 UP"
+} -start
+
+syslog S37 -level notice {
+ recv
+ expect ~ "[^:\\[ ]\\[${h1_pid}\\]: Health check for server be37/srv37 succeeded, reason: Layer4 check passed, check duration: [[:digit:]]+ms, status: 1/1 UP"
+} -start
+
+syslog S39 -level notice {
+ recv
+ expect ~ "[^:\\[ ]\\[${h1_pid}\\]: Health check for server be39/srv39 succeeded, reason: Layer4 check passed, check duration: [[:digit:]]+ms, status: 1/1 UP"
+} -start
+
+server s0 {
+ rxreq
+ txresp
+} -start
+
+server s2 {
+ rxreq
+ txresp
+} -start
+
+server s4 {
+ rxreq
+ txresp
+} -start
+
+server s6 {
+ rxreq
+ txresp
+} -start
+
+server s8 {
+ rxreq
+ txresp
+} -start
+
+server s10 {
+ rxreq
+ txresp
+} -start
+
+server s12 {
+ rxreq
+ txresp
+} -start
+
+server s14 {
+ rxreq
+ txresp
+} -start
+
+server s16 {
+ rxreq
+ txresp
+} -start
+
+server s18 {
+ rxreq
+ txresp
+} -start
+
+server s20 {
+ rxreq
+ txresp
+} -start
+
+server s22 {
+ rxreq
+ txresp
+} -start
+
+server s24 {
+ rxreq
+ txresp
+} -start
+
+server s26 {
+ rxreq
+ txresp
+} -start
+
+server s28 {
+ rxreq
+ txresp
+} -start
+
+server s30 {
+ rxreq
+ txresp
+} -start
+
+server s32 {
+ rxreq
+ txresp
+} -start
+
+server s34 {
+ rxreq
+ txresp
+} -start
+
+server s36 {
+ rxreq
+ txresp
+} -start
+
+server s38 {
+ rxreq
+ txresp
+} -start
+
+server s1 {} -start
+server s3 {} -start
+server s5 {} -start
+server s7 {} -start
+server s9 {} -start
+server s11 {} -start
+server s13 {} -start
+server s15 {} -start
+server s17 {} -start
+server s19 {} -start
+server s21 {} -start
+server s23 {} -start
+server s25 {} -start
+server s27 {} -start
+server s29 {} -start
+server s31 {} -start
+server s33 {} -start
+server s35 {} -start
+server s37 {} -start
+server s39 {} -start
+
+haproxy h1 -conf {
+ defaults
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ balance first
+ default-server no-check inter 20ms downinter 1s rise 1 fall 1
+
+ backend be1
+ option log-health-checks
+ log ${S1_addr}:${S1_port} daemon
+ server srv0 ${s0_addr}:${s0_port}
+ server srv1 ${s1_addr}:${s1_port} check
+
+ backend be3
+ option log-health-checks
+ log ${S3_addr}:${S3_port} daemon
+ server srv2 ${s2_addr}:${s2_port}
+ server srv3 ${s3_addr}:${s3_port} check
+
+ backend be5
+ option log-health-checks
+ log ${S5_addr}:${S5_port} daemon
+ server srv4 ${s4_addr}:${s4_port}
+ server srv5 ${s5_addr}:${s5_port} check
+
+ backend be7
+ option log-health-checks
+ log ${S7_addr}:${S7_port} daemon
+ server srv6 ${s6_addr}:${s6_port}
+ server srv7 ${s7_addr}:${s7_port} check
+
+ backend be9
+ option log-health-checks
+ log ${S9_addr}:${S9_port} daemon
+ server srv8 ${s8_addr}:${s8_port}
+ server srv9 ${s9_addr}:${s9_port} check
+
+ backend be11
+ option log-health-checks
+ log ${S11_addr}:${S11_port} daemon
+ server srv10 ${s10_addr}:${s10_port}
+ server srv11 ${s11_addr}:${s11_port} check
+
+ backend be13
+ option log-health-checks
+ log ${S13_addr}:${S13_port} daemon
+ server srv12 ${s12_addr}:${s12_port}
+ server srv13 ${s13_addr}:${s13_port} check
+
+ backend be15
+ option log-health-checks
+ log ${S15_addr}:${S15_port} daemon
+ server srv14 ${s14_addr}:${s14_port}
+ server srv15 ${s15_addr}:${s15_port} check
+
+ backend be17
+ option log-health-checks
+ log ${S17_addr}:${S17_port} daemon
+ server srv16 ${s16_addr}:${s16_port}
+ server srv17 ${s17_addr}:${s17_port} check
+
+ backend be19
+ option log-health-checks
+ log ${S19_addr}:${S19_port} daemon
+ server srv18 ${s18_addr}:${s18_port}
+ server srv19 ${s19_addr}:${s19_port} check
+
+ backend be21
+ option log-health-checks
+ log ${S21_addr}:${S21_port} daemon
+ server srv20 ${s20_addr}:${s20_port}
+ server srv21 ${s21_addr}:${s21_port} check
+
+ backend be23
+ option log-health-checks
+ log ${S23_addr}:${S23_port} daemon
+ server srv22 ${s22_addr}:${s22_port}
+ server srv23 ${s23_addr}:${s23_port} check
+
+ backend be25
+ option log-health-checks
+ log ${S25_addr}:${S25_port} daemon
+ server srv24 ${s24_addr}:${s24_port}
+ server srv25 ${s25_addr}:${s25_port} check
+
+ backend be27
+ option log-health-checks
+ log ${S27_addr}:${S27_port} daemon
+ server srv26 ${s26_addr}:${s26_port}
+ server srv27 ${s27_addr}:${s27_port} check
+
+ backend be29
+ option log-health-checks
+ log ${S29_addr}:${S29_port} daemon
+ server srv28 ${s28_addr}:${s28_port}
+ server srv29 ${s29_addr}:${s29_port} check
+
+ backend be31
+ option log-health-checks
+ log ${S31_addr}:${S31_port} daemon
+ server srv30 ${s30_addr}:${s30_port}
+ server srv31 ${s31_addr}:${s31_port} check
+
+ backend be33
+ option log-health-checks
+ log ${S33_addr}:${S33_port} daemon
+ server srv32 ${s32_addr}:${s32_port}
+ server srv33 ${s33_addr}:${s33_port} check
+
+ backend be35
+ option log-health-checks
+ log ${S35_addr}:${S35_port} daemon
+ server srv34 ${s34_addr}:${s34_port}
+ server srv35 ${s35_addr}:${s35_port} check
+
+ backend be37
+ option log-health-checks
+ log ${S37_addr}:${S37_port} daemon
+ server srv36 ${s36_addr}:${s36_port}
+ server srv37 ${s37_addr}:${s37_port} check
+
+ backend be39
+ option log-health-checks
+ log ${S39_addr}:${S39_port} daemon
+ server srv38 ${s38_addr}:${s38_port}
+ server srv39 ${s39_addr}:${s39_port} check
+
+ frontend fe1
+ bind "fd@${fe1}"
+ use_backend be1
+
+ frontend fe3
+ bind "fd@${fe3}"
+ use_backend be3
+
+ frontend fe5
+ bind "fd@${fe5}"
+ use_backend be5
+
+ frontend fe7
+ bind "fd@${fe7}"
+ use_backend be7
+
+ frontend fe9
+ bind "fd@${fe9}"
+ use_backend be9
+
+ frontend fe11
+ bind "fd@${fe11}"
+ use_backend be11
+
+ frontend fe13
+ bind "fd@${fe13}"
+ use_backend be13
+
+ frontend fe15
+ bind "fd@${fe15}"
+ use_backend be15
+
+ frontend fe17
+ bind "fd@${fe17}"
+ use_backend be17
+
+ frontend fe19
+ bind "fd@${fe19}"
+ use_backend be19
+
+ frontend fe21
+ bind "fd@${fe21}"
+ use_backend be21
+
+ frontend fe23
+ bind "fd@${fe23}"
+ use_backend be23
+
+ frontend fe25
+ bind "fd@${fe25}"
+ use_backend be25
+
+ frontend fe27
+ bind "fd@${fe27}"
+ use_backend be27
+
+ frontend fe29
+ bind "fd@${fe29}"
+ use_backend be29
+
+ frontend fe31
+ bind "fd@${fe31}"
+ use_backend be31
+
+ frontend fe33
+ bind "fd@${fe33}"
+ use_backend be33
+
+ frontend fe35
+ bind "fd@${fe35}"
+ use_backend be35
+
+ frontend fe37
+ bind "fd@${fe37}"
+ use_backend be37
+
+ frontend fe39
+ bind "fd@${fe39}"
+ use_backend be39
+} -start
+
+# This is a sort of synchronization: after having waited for all the syslog
+# servers we are sure that all the health-checks have succeeded.
+syslog S1 -wait
+syslog S3 -wait
+syslog S5 -wait
+syslog S7 -wait
+syslog S9 -wait
+syslog S11 -wait
+syslog S13 -wait
+syslog S15 -wait
+syslog S17 -wait
+syslog S19 -wait
+syslog S21 -wait
+syslog S23 -wait
+syslog S25 -wait
+syslog S27 -wait
+syslog S29 -wait
+syslog S31 -wait
+syslog S33 -wait
+syslog S35 -wait
+syslog S37 -wait
+syslog S39 -wait
+
+client c1 -connect ${h1_fe1_sock} {
+ txreq
+ rxresp
+ expect resp.status == 200
+} -start
+
+client c3 -connect ${h1_fe3_sock} {
+ txreq
+ rxresp
+ expect resp.status == 200
+} -start
+
+client c5 -connect ${h1_fe5_sock} {
+ txreq
+ rxresp
+ expect resp.status == 200
+} -start
+
+client c7 -connect ${h1_fe7_sock} {
+ txreq
+ rxresp
+ expect resp.status == 200
+} -start
+
+client c9 -connect ${h1_fe9_sock} {
+ txreq
+ rxresp
+ expect resp.status == 200
+} -start
+
+client c11 -connect ${h1_fe11_sock} {
+ txreq
+ rxresp
+ expect resp.status == 200
+} -start
+
+client c13 -connect ${h1_fe13_sock} {
+ txreq
+ rxresp
+ expect resp.status == 200
+} -start
+
+client c15 -connect ${h1_fe15_sock} {
+ txreq
+ rxresp
+ expect resp.status == 200
+} -start
+
+client c17 -connect ${h1_fe17_sock} {
+ txreq
+ rxresp
+ expect resp.status == 200
+} -start
+
+client c19 -connect ${h1_fe19_sock} {
+ txreq
+ rxresp
+ expect resp.status == 200
+} -start
+
+client c21 -connect ${h1_fe21_sock} {
+ txreq
+ rxresp
+ expect resp.status == 200
+} -start
+
+client c23 -connect ${h1_fe23_sock} {
+ txreq
+ rxresp
+ expect resp.status == 200
+} -start
+
+client c25 -connect ${h1_fe25_sock} {
+ txreq
+ rxresp
+ expect resp.status == 200
+} -start
+
+client c27 -connect ${h1_fe27_sock} {
+ txreq
+ rxresp
+ expect resp.status == 200
+} -start
+
+client c29 -connect ${h1_fe29_sock} {
+ txreq
+ rxresp
+ expect resp.status == 200
+} -start
+
+client c31 -connect ${h1_fe31_sock} {
+ txreq
+ rxresp
+ expect resp.status == 200
+} -start
+
+client c33 -connect ${h1_fe33_sock} {
+ txreq
+ rxresp
+ expect resp.status == 200
+} -start
+
+client c35 -connect ${h1_fe35_sock} {
+ txreq
+ rxresp
+ expect resp.status == 200
+} -start
+
+client c37 -connect ${h1_fe37_sock} {
+ txreq
+ rxresp
+ expect resp.status == 200
+} -start
+
+client c39 -connect ${h1_fe39_sock} {
+ txreq
+ rxresp
+ expect resp.status == 200
+} -start
+
+client c1 -wait
+client c3 -wait
+client c5 -wait
+client c7 -wait
+client c9 -wait
+client c11 -wait
+client c13 -wait
+client c15 -wait
+client c17 -wait
+client c19 -wait
+client c21 -wait
+client c23 -wait
+client c25 -wait
+client c27 -wait
+client c29 -wait
+client c31 -wait
+client c33 -wait
+client c35 -wait
+client c37 -wait
+client c39 -wait
+
+server s0 -wait
+server s2 -wait
+server s4 -wait
+server s6 -wait
+server s8 -wait
+server s10 -wait
+server s12 -wait
+server s14 -wait
+server s16 -wait
+server s18 -wait
+server s20 -wait
+server s22 -wait
+server s24 -wait
+server s26 -wait
+server s28 -wait
+server s30 -wait
+server s32 -wait
+server s34 -wait
+server s36 -wait
+server s38 -wait
+
+
+haproxy h1 -cli {
+ send "show servers state"
+ # output produced using the command below (warning, a bug inserts a "be0" every other line:
+ # for ((i=0;i<40;i++)); do id=$((i/2+2)); be=$((i|1)); si=$(((i&1)+1));
+ # if ((i&1)); then chk="6 ([[:digit:]]+ ){3}"; else chk="1 0 1 0 ";fi;
+ # printf "%d be%d %d srv%d \${s%d_addr} 2 0 1 1 [[:digit:]]+ %s0 0 0 - \${s%d_port} - 0 0 - - 0\n" "$id" "$be" "$si" "$i" "$i" "$chk" "$i" "$i" ;
+ # done|grep -v be0|sed 's,$,\\n,'| tr -d '\n'
+ expect ~ "# be_id be_name srv_id srv_name srv_addr srv_op_state srv_admin_state srv_uweight srv_iweight srv_time_since_last_change srv_check_status srv_check_result srv_check_health srv_check_state srv_agent_state bk_f_forced_id srv_f_forced_id srv_fqdn srv_port srvrecord srv_use_ssl srv_check_port srv_check_addr srv_agent_addr srv_agent_port\n2 be1 1 srv0 ${s0_addr} 2 0 1 1 [[:digit:]]+ 1 0 1 0 0 0 0 - ${s0_port} - 0 0 - - 0\n2 be1 2 srv1 ${s1_addr} 2 0 1 1 [[:digit:]]+ 6 ([[:digit:]]+ ){3}0 0 0 - ${s1_port} - 0 0 - - 0\n3 be3 1 srv2 ${s2_addr} 2 0 1 1 [[:digit:]]+ 1 0 1 0 0 0 0 - ${s2_port} - 0 0 - - 0\n3 be3 2 srv3 ${s3_addr} 2 0 1 1 [[:digit:]]+ 6 ([[:digit:]]+ ){3}0 0 0 - ${s3_port} - 0 0 - - 0\n4 be5 1 srv4 ${s4_addr} 2 0 1 1 [[:digit:]]+ 1 0 1 0 0 0 0 - ${s4_port} - 0 0 - - 0\n4 be5 2 srv5 ${s5_addr} 2 0 1 1 [[:digit:]]+ 6 ([[:digit:]]+ ){3}0 0 0 - ${s5_port} - 0 0 - - 0\n5 be7 1 srv6 ${s6_addr} 2 0 1 1 [[:digit:]]+ 1 0 1 0 0 0 0 - ${s6_port} - 0 0 - - 0\n5 be7 2 srv7 ${s7_addr} 2 0 1 1 [[:digit:]]+ 6 ([[:digit:]]+ ){3}0 0 0 - ${s7_port} - 0 0 - - 0\n6 be9 1 srv8 ${s8_addr} 2 0 1 1 [[:digit:]]+ 1 0 1 0 0 0 0 - ${s8_port} - 0 0 - - 0\n6 be9 2 srv9 ${s9_addr} 2 0 1 1 [[:digit:]]+ 6 ([[:digit:]]+ ){3}0 0 0 - ${s9_port} - 0 0 - - 0\n7 be11 1 srv10 ${s10_addr} 2 0 1 1 [[:digit:]]+ 1 0 1 0 0 0 0 - ${s10_port} - 0 0 - - 0\n7 be11 2 srv11 ${s11_addr} 2 0 1 1 [[:digit:]]+ 6 ([[:digit:]]+ ){3}0 0 0 - ${s11_port} - 0 0 - - 0\n8 be13 1 srv12 ${s12_addr} 2 0 1 1 [[:digit:]]+ 1 0 1 0 0 0 0 - ${s12_port} - 0 0 - - 0\n8 be13 2 srv13 ${s13_addr} 2 0 1 1 [[:digit:]]+ 6 ([[:digit:]]+ ){3}0 0 0 - ${s13_port} - 0 0 - - 0\n9 be15 1 srv14 ${s14_addr} 2 0 1 1 [[:digit:]]+ 1 0 1 0 0 0 0 - ${s14_port} - 0 0 - - 0\n9 be15 2 srv15 ${s15_addr} 2 0 1 1 [[:digit:]]+ 6 ([[:digit:]]+ ){3}0 0 0 - ${s15_port} - 0 0 - - 0\n10 be17 1 srv16 ${s16_addr} 2 0 1 1 [[:digit:]]+ 1 0 1 0 0 0 0 - ${s16_port} - 0 0 - - 0\n10 be17 2 srv17 ${s17_addr} 2 0 1 1 [[:digit:]]+ 6 ([[:digit:]]+ ){3}0 0 0 - ${s17_port} - 0 0 - - 0\n11 be19 1 srv18 ${s18_addr} 2 0 1 1 [[:digit:]]+ 1 0 1 0 0 0 0 - ${s18_port} - 0 0 - - 0\n11 be19 2 srv19 ${s19_addr} 2 0 1 1 [[:digit:]]+ 6 ([[:digit:]]+ ){3}0 0 0 - ${s19_port} - 0 0 - - 0\n12 be21 1 srv20 ${s20_addr} 2 0 1 1 [[:digit:]]+ 1 0 1 0 0 0 0 - ${s20_port} - 0 0 - - 0\n12 be21 2 srv21 ${s21_addr} 2 0 1 1 [[:digit:]]+ 6 ([[:digit:]]+ ){3}0 0 0 - ${s21_port} - 0 0 - - 0\n13 be23 1 srv22 ${s22_addr} 2 0 1 1 [[:digit:]]+ 1 0 1 0 0 0 0 - ${s22_port} - 0 0 - - 0\n13 be23 2 srv23 ${s23_addr} 2 0 1 1 [[:digit:]]+ 6 ([[:digit:]]+ ){3}0 0 0 - ${s23_port} - 0 0 - - 0\n14 be25 1 srv24 ${s24_addr} 2 0 1 1 [[:digit:]]+ 1 0 1 0 0 0 0 - ${s24_port} - 0 0 - - 0\n14 be25 2 srv25 ${s25_addr} 2 0 1 1 [[:digit:]]+ 6 ([[:digit:]]+ ){3}0 0 0 - ${s25_port} - 0 0 - - 0\n15 be27 1 srv26 ${s26_addr} 2 0 1 1 [[:digit:]]+ 1 0 1 0 0 0 0 - ${s26_port} - 0 0 - - 0\n15 be27 2 srv27 ${s27_addr} 2 0 1 1 [[:digit:]]+ 6 ([[:digit:]]+ ){3}0 0 0 - ${s27_port} - 0 0 - - 0\n16 be29 1 srv28 ${s28_addr} 2 0 1 1 [[:digit:]]+ 1 0 1 0 0 0 0 - ${s28_port} - 0 0 - - 0\n16 be29 2 srv29 ${s29_addr} 2 0 1 1 [[:digit:]]+ 6 ([[:digit:]]+ ){3}0 0 0 - ${s29_port} - 0 0 - - 0\n17 be31 1 srv30 ${s30_addr} 2 0 1 1 [[:digit:]]+ 1 0 1 0 0 0 0 - ${s30_port} - 0 0 - - 0\n17 be31 2 srv31 ${s31_addr} 2 0 1 1 [[:digit:]]+ 6 ([[:digit:]]+ ){3}0 0 0 - ${s31_port} - 0 0 - - 0\n18 be33 1 srv32 ${s32_addr} 2 0 1 1 [[:digit:]]+ 1 0 1 0 0 0 0 - ${s32_port} - 0 0 - - 0\n18 be33 2 srv33 ${s33_addr} 2 0 1 1 [[:digit:]]+ 6 ([[:digit:]]+ ){3}0 0 0 - ${s33_port} - 0 0 - - 0\n19 be35 1 srv34 ${s34_addr} 2 0 1 1 [[:digit:]]+ 1 0 1 0 0 0 0 - ${s34_port} - 0 0 - - 0\n19 be35 2 srv35 ${s35_addr} 2 0 1 1 [[:digit:]]+ 6 ([[:digit:]]+ ){3}0 0 0 - ${s35_port} - 0 0 - - 0\n20 be37 1 srv36 ${s36_addr} 2 0 1 1 [[:digit:]]+ 1 0 1 0 0 0 0 - ${s36_port} - 0 0 - - 0\n20 be37 2 srv37 ${s37_addr} 2 0 1 1 [[:digit:]]+ 6 ([[:digit:]]+ ){3}0 0 0 - ${s37_port} - 0 0 - - 0\n21 be39 1 srv38 ${s38_addr} 2 0 1 1 [[:digit:]]+ 1 0 1 0 0 0 0 - ${s38_port} - 0 0 - - 0\n21 be39 2 srv39 ${s39_addr} 2 0 1 1 [[:digit:]]+ 6 ([[:digit:]]+ ){3}0 0 0 - ${s39_port} - 0 0 - - 0\n"
+}
+
diff --git a/reg-tests/checks/4be_1srv_health_checks.vtc b/reg-tests/checks/4be_1srv_health_checks.vtc
new file mode 100644
index 0000000..02564be
--- /dev/null
+++ b/reg-tests/checks/4be_1srv_health_checks.vtc
@@ -0,0 +1,201 @@
+varnishtest "Health-check test"
+feature ignore_unknown_macro
+
+#REQUIRE_VERSION=2.4
+#EXCLUDE_TARGETS=freebsd
+#REGTEST_TYPE=slow
+
+# This script test health-checks for four backends with one server by backend.
+# A syslog server is attached to each backend to check the syslog messages
+# in the right order.
+
+# First, we check a health-check has passed for all the servers thanks to the syslog
+# messages. Then each server is disabled. The health-check status are checked.
+# Then each server is re-enabled. Finally health-check status
+# verifications for each server terminate the execution of this script.
+
+# Note that the CLI is synchronized with the syslog servers so that
+# to be sure to receive the passed health-checks status messages before
+# disabling the servers. Same thing, when we check that the servers are down
+# before enabling the servers.
+
+# Cyclic barrier to synchronize the CLI with the syslog servers
+barrier b1 cond 5 -cyclic
+
+# These servers are there only for the health-check test.
+server s1 {
+} -start
+
+server s2 {
+} -start
+
+server s3 {
+} -start
+
+server s4 {
+} -start
+
+syslog S1 -level notice {
+ recv
+ expect ~ "[^:\\[ ]\\[${h1_pid}\\]: Health check for server be1/srv1 succeeded.+reason: Layer4 check passed.+check duration: [[:digit:]]+ms.+status: 1/1 UP"
+ barrier b1 sync
+ recv alert
+ expect ~ "[^:\\[ ]\\[${h1_pid}\\]: Server be1/srv1 is going DOWN for maintenance. 0 active and 0 backup servers left. [01] sessions active, 0 requeued, 0 remaining in queue."
+ recv emerg
+ expect ~ "[^:\\[ ]\\[${h1_pid}\\]: backend be1 has no server available!"
+ barrier b1 sync
+ recv
+ expect ~ "[^:\\[ ]\\[${h1_pid}\\]: (Server be1/srv1 is UP/READY \\(leaving forced maintenance\\).|Health check for server be1/srv1 succeeded.+reason: Layer4 check passed.+check duration: [[:digit:]]+ms.+status: 1/1 UP)"
+ barrier b1 sync
+} -start
+
+syslog S2 -level notice {
+ recv
+ expect ~ "[^:\\[ ]\\[${h1_pid}\\]: Health check for server be2/srv2 succeeded.+reason: Layer4 check passed.+check duration: [[:digit:]]+ms.+status: 1/1 UP"
+ barrier b1 sync
+ recv alert
+ expect ~ "[^:\\[ ]\\[${h1_pid}\\]: Server be2/srv2 is going DOWN for maintenance. 0 active and 0 backup servers left. [01] sessions active, 0 requeued, 0 remaining in queue."
+ recv emerg
+ expect ~ "[^:\\[ ]\\[${h1_pid}\\]: backend be2 has no server available!"
+ barrier b1 sync
+ recv
+ expect ~ "[^:\\[ ]\\[${h1_pid}\\]: (Server be2/srv2 is UP/READY \\(leaving forced maintenance\\).|Health check for server be2/srv2 succeeded.+reason: Layer4 check passed.+check duration: [[:digit:]]+ms.+status: 1/1 UP)"
+ barrier b1 sync
+} -start
+
+syslog S3 -level notice {
+ recv
+ expect ~ "[^:\\[ ]\\[${h1_pid}\\]: Health check for server be3/srv3 succeeded.+reason: Layer4 check passed.+check duration: [[:digit:]]+ms.+status: 1/1 UP"
+ barrier b1 sync
+ recv alert
+ expect ~ "[^:\\[ ]\\[${h1_pid}\\]: Server be3/srv3 is going DOWN for maintenance. 0 active and 0 backup servers left. [01] sessions active, 0 requeued, 0 remaining in queue."
+ recv emerg
+ expect ~ "[^:\\[ ]\\[${h1_pid}\\]: backend be3 has no server available!"
+ barrier b1 sync
+ recv
+ expect ~ "[^:\\[ ]\\[${h1_pid}\\]: (Server be3/srv3 is UP/READY \\(leaving forced maintenance\\).|Health check for server be3/srv3 succeeded.+reason: Layer4 check passed.+check duration: [[:digit:]]+ms.+status: 1/1 UP)"
+ barrier b1 sync
+} -start
+
+syslog S4 -level notice {
+ recv
+ expect ~ "[^:\\[ ]\\[${h1_pid}\\]: Health check for server be4/srv4 succeeded.+reason: Layer4 check passed.+check duration: [[:digit:]]+ms.+status: 1/1 UP"
+ barrier b1 sync
+ recv alert
+ expect ~ "[^:\\[ ]\\[${h1_pid}\\]: Server be4/srv4 is going DOWN for maintenance. 0 active and 0 backup servers left. [01] sessions active, 0 requeued, 0 remaining in queue."
+ recv emerg
+ expect ~ "[^:\\[ ]\\[${h1_pid}\\]: backend be4 has no server available!"
+ barrier b1 sync
+ recv
+ expect ~ "[^:\\[ ]\\[${h1_pid}\\]: (Server be4/srv4 is UP/READY \\(leaving forced maintenance\\).|Health check for server be4/srv4 succeeded.+reason: Layer4 check passed.+check duration: [[:digit:]]+ms.+status: 1/1 UP)"
+ barrier b1 sync
+} -start
+
+
+haproxy h1 -conf {
+ defaults
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ default-server check inter 200ms downinter 100s rise 1 fall 1
+
+ frontend fe1
+ bind "fd@${fe1}"
+ use_backend be1
+
+ frontend fe2
+ bind "fd@${fe2}"
+ use_backend be2
+
+ frontend fe3
+ bind "fd@${fe3}"
+ use_backend be3
+
+ frontend fe4
+ bind "fd@${fe4}"
+ use_backend be4
+
+ backend be1
+ option log-health-checks
+ log ${S1_addr}:${S1_port} daemon
+ server srv1 ${s1_addr}:${s1_port}
+
+ backend be2
+ option log-health-checks
+ log ${S2_addr}:${S2_port} daemon
+ server srv2 ${s2_addr}:${s2_port}
+
+ backend be3
+ option log-health-checks
+ log ${S3_addr}:${S3_port} daemon
+ server srv3 ${s3_addr}:${s3_port}
+
+ backend be4
+ option log-health-checks
+ log ${S4_addr}:${S4_port} daemon
+ server srv4 ${s4_addr}:${s4_port}
+} -start
+
+haproxy h1 -cli {
+ barrier b1 sync
+ send "show servers state"
+ expect ~ "# be_id be_name srv_id srv_name srv_addr srv_op_state srv_admin_state srv_uweight srv_iweight srv_time_since_last_change srv_check_status srv_check_result srv_check_health srv_check_state srv_agent_state bk_f_forced_id srv_f_forced_id srv_fqdn srv_port srvrecord srv_use_ssl srv_check_port srv_check_addr srv_agent_addr srv_agent_port\n6 be1 1 srv1 ${s1_addr} 2 0 1 1 [[:digit:]]+ 6 3 1 [67] 0 0 0 - ${s1_port} - 0 0 - - 0\n7 be2 1 srv2 ${s2_addr} 2 0 1 1 [[:digit:]]+ 6 3 1 [67] 0 0 0 - ${s2_port} - 0 0 - - 0\n8 be3 1 srv3 ${s3_addr} 2 0 1 1 [[:digit:]]+ 6 3 1 [67] 0 0 0 - ${s3_port} - 0 0 - - 0\n9 be4 1 srv4 ${s4_addr} 2 0 1 1 [[:digit:]]+ 6 3 1 [67] 0 0 0 - ${s4_port} - 0 0 - - 0"
+}
+
+haproxy h1 -cli {
+ send "disable server be1/srv1"
+ expect ~ .*
+}
+
+haproxy h1 -cli {
+ send "disable server be2/srv2"
+ expect ~ .*
+}
+
+haproxy h1 -cli {
+ send "disable server be3/srv3"
+ expect ~ .*
+}
+
+haproxy h1 -cli {
+ send "disable server be4/srv4"
+ expect ~ .*
+}
+
+haproxy h1 -cli {
+ barrier b1 sync
+ send "show servers state"
+ expect ~ "# be_id be_name srv_id srv_name srv_addr srv_op_state srv_admin_state srv_uweight srv_iweight srv_time_since_last_change srv_check_status srv_check_result srv_check_health srv_check_state srv_agent_state bk_f_forced_id srv_f_forced_id srv_fqdn srv_port srvrecord srv_use_ssl srv_check_port srv_check_addr srv_agent_addr srv_agent_port\n6 be1 1 srv1 ${s1_addr} 0 1 1 1 [[:digit:]]+ 6 3 [01] 1[45] 0 0 0 - ${s1_port} - 0 0 - - 0\n7 be2 1 srv2 ${s2_addr} 0 1 1 1 [[:digit:]]+ 6 3 [01] 1[45] 0 0 0 - ${s2_port} - 0 0 - - 0\n8 be3 1 srv3 ${s3_addr} 0 1 1 1 [[:digit:]]+ 6 3 [01] 1[45] 0 0 0 - ${s3_port} - 0 0 - - 0\n9 be4 1 srv4 ${s4_addr} 0 1 1 1 [[:digit:]]+ 6 3 [01] 1[45] 0 0 0 - ${s4_port} - 0 0 - - 0"
+}
+
+haproxy h1 -cli {
+ send "enable server be1/srv1"
+ expect ~ .*
+}
+
+haproxy h1 -cli {
+ send "enable server be2/srv2"
+ expect ~ .*
+}
+
+haproxy h1 -cli {
+ send "enable server be3/srv3"
+ expect ~ .*
+}
+
+haproxy h1 -cli {
+ send "enable server be4/srv4"
+ expect ~ .*
+}
+
+haproxy h1 -cli {
+ barrier b1 sync
+ send "show servers state"
+ expect ~ "# be_id be_name srv_id srv_name srv_addr srv_op_state srv_admin_state srv_uweight srv_iweight srv_time_since_last_change srv_check_status srv_check_result srv_check_health srv_check_state srv_agent_state bk_f_forced_id srv_f_forced_id srv_fqdn srv_port srvrecord srv_use_ssl srv_check_port srv_check_addr srv_agent_addr srv_agent_port\n6 be1 1 srv1 ${s1_addr} 2 0 1 1 [[:digit:]]+ 6 [03] 1 [67] 0 0 0 - ${s1_port} - 0 0 - - 0\n7 be2 1 srv2 ${s2_addr} 2 0 1 1 [[:digit:]]+ 6 [03] 1 [67] 0 0 0 - ${s2_port} - 0 0 - - 0\n8 be3 1 srv3 ${s3_addr} 2 0 1 1 [[:digit:]]+ 6 [03] 1 [67] 0 0 0 - ${s3_port} - 0 0 - - 0\n9 be4 1 srv4 ${s4_addr} 2 0 1 1 [[:digit:]]+ 6 [03] 1 [67] 0 0 0 - ${s4_port} - 0 0 - - 0"
+}
+
+syslog S1 -wait
+syslog S2 -wait
+syslog S3 -wait
+syslog S4 -wait
+
diff --git a/reg-tests/checks/4be_1srv_smtpchk_httpchk_layer47errors.vtc b/reg-tests/checks/4be_1srv_smtpchk_httpchk_layer47errors.vtc
new file mode 100644
index 0000000..3d36491
--- /dev/null
+++ b/reg-tests/checks/4be_1srv_smtpchk_httpchk_layer47errors.vtc
@@ -0,0 +1,100 @@
+varnishtest "Check: smptchk option"
+feature ignore_unknown_macro
+
+#EXCLUDE_TARGETS=freebsd,osx,generic
+#REGTEST_TYPE=slow
+
+barrier b cond 3
+
+syslog S1 -level notice {
+ recv
+ expect ~ "[^:\\[ ]\\[${h1_pid}\\]: Health check for server be1/srv1 succeeded.+reason: Layer7 check passed.+code: 221.+check duration: [[:digit:]]+ms.+status: 1/1 UP."
+ barrier b sync
+ recv
+ expect ~ "Health check for server be1/srv1 failed.+reason: Layer7 timeout.+check duration: [[:digit:]]+ms.+status: 0/1 DOWN"
+} -start
+
+syslog S2 -level notice {
+ recv
+ expect ~ "[^:\\[ ]\\[${h1_pid}\\]: Health check for server be2/srv2 succeeded.+reason: Layer7 check passed.+code: 200.+.+check duration: [[:digit:]]+ms.+status: 1/1 UP."
+ barrier b sync
+ recv
+ expect ~ "[^:\\[ ]\\[${h1_pid}\\]: Health check for server be2/srv2 failed.+reason: Layer7 timeout.+check duration: [[:digit:]]+ms.+status: 0/1 DOWN"
+} -start
+
+syslog S3 -level notice {
+ recv
+ expect ~ "[^:\\[ ]\\[${h1_pid}\\]: Health check for server be3/srv3 failed.+reason: Layer4 connection problem.+info: \"General socket error \\(Network is unreachable\\)\".+check duration: [[:digit:]]+ms.+status: 0/1 DOWN."
+} -start
+
+syslog S4 -level notice {
+ recv
+ expect ~ "[^:\\[ ]\\[${h1_pid}\\]: Health check for server be4/srv4 failed.+reason: Layer4 connection problem.+info: \"Connection refused\".+check duration: [[:digit:]]+ms.+status: 0/1 DOWN."
+} -start
+
+server s1 {
+ send "2"
+ send "2"
+ send "0"
+ send "\r\n"
+ recv 16
+ send "2"
+ send "4"
+ send "8"
+ send "\r\n"
+ recv 6
+ send "2"
+ send "2"
+ send "1"
+ send " ok\r\n"
+} -start
+
+server s2 {
+ rxreq
+ txresp
+} -start
+
+haproxy h1 -conf {
+ defaults
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ option log-health-checks
+ default-server inter 200ms downinter 100ms rise 1 fall 1
+
+ backend be1
+ option smtpchk
+ log ${S1_addr}:${S1_port} daemon
+ server srv1 ${s1_addr}:${s1_port} check
+
+ backend be2
+ mode tcp
+ log ${S2_addr}:${S2_port} daemon
+ option httpchk OPTIONS * HTTP/1.1
+ http-check send hdr Host www
+ server srv2 ${s2_addr}:${s2_port} check
+
+ backend be3
+ log ${S3_addr}:${S3_port} daemon
+ server srv3 255.255.255.255:11111 check
+
+ backend be4
+ log ${S4_addr}:${S4_port} daemon
+ server srv4 localhost:11111 check
+} -start
+
+haproxy h1 -cli {
+ barrier b sync
+ send "show servers state"
+ expect ~ .*
+}
+
+server s1 -wait
+server s2 -wait
+
+syslog S1 -wait
+syslog S2 -wait
+syslog S3 -wait
+syslog S4 -wait
+
+
diff --git a/reg-tests/checks/agent-check.vtc b/reg-tests/checks/agent-check.vtc
new file mode 100644
index 0000000..5cf51c6
--- /dev/null
+++ b/reg-tests/checks/agent-check.vtc
@@ -0,0 +1,42 @@
+varnishtest "Health-checks: agent-check"
+#REGTEST_TYPE=slow
+feature ignore_unknown_macro
+
+barrier b1 cond 2 -cyclic
+barrier b2 cond 2 -cyclic
+
+server s1 {
+ barrier b1 sync
+ recv 5
+ send "75%,maxconn:30,maint,down\n"
+ expect_close
+ barrier b2 sync
+} -start
+
+
+haproxy h1 -conf {
+ defaults
+ mode tcp
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ backend be1
+ log ${S1_addr}:${S1_port} daemon
+ option log-health-checks
+ server srv ${s1_addr}:${s1_port} weight 100 agent-check agent-addr ${s1_addr} agent-port ${s1_port} agent-send "pouet" agent-inter 100ms
+} -start
+
+haproxy h1 -cli {
+ send "show servers state"
+ expect ~ "be1 1 srv 127.0.0.1 2 0 100 100 [[:digit:]]+ 1 0 [[:digit:]] 0 [[:digit:]]+ 0 0 - ${s1_port} -"
+ send "show stat"
+ expect ~ "be1,srv,0,0,0,0,,"
+
+ barrier b1 sync
+ barrier b2 sync
+ send "show servers state"
+ expect ~ "be1 1 srv 127.0.0.1 0 1 75 100 [[:digit:]]+ 1 0 [[:digit:]] 0 [[:digit:]]+ 0 0 - ${s1_port} -"
+ send "show stat"
+ expect ~ "be1,srv,0,0,0,0,30"
+}
diff --git a/reg-tests/checks/common.pem b/reg-tests/checks/common.pem
new file mode 120000
index 0000000..a4433d5
--- /dev/null
+++ b/reg-tests/checks/common.pem
@@ -0,0 +1 @@
+../ssl/common.pem \ No newline at end of file
diff --git a/reg-tests/checks/http-check-expect.vtc b/reg-tests/checks/http-check-expect.vtc
new file mode 100644
index 0000000..637eec6
--- /dev/null
+++ b/reg-tests/checks/http-check-expect.vtc
@@ -0,0 +1,64 @@
+varnishtest "Health-checks: some http-check expect tests"
+feature ignore_unknown_macro
+#REQUIRE_VERSION=2.2
+#REGTEST_TYPE=slow
+# This script tests http-check expect rules.
+
+server s1 {
+ rxreq
+ expect req.method == OPTIONS
+ expect req.url == /
+ expect req.proto == HTTP/1.0
+ txresp -status 202 \
+ -hdr "x-test1: true, next value" \
+ -hdr "x-test2: true, begin-value, value-end, value-sub-string, value-reg-123ABC" \
+ -hdr "x-begin-test: 1" \
+ -hdr "x-test-end: 1" \
+ -hdr "x-sub-test: 1" \
+ -hdr "x-reg-test1: 1" \
+ -hdr "x-hdr-name: x-test1"
+} -start
+
+syslog S1 -level notice {
+ recv
+ expect ~ "[^:\\[ ]\\[${h1_pid}\\]: Health check for server be1/srv succeeded.*code: 202"
+} -start
+
+haproxy h1 -conf {
+ defaults
+ mode http
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ option log-health-checks
+
+ backend be1
+ log ${S1_addr}:${S1_port} len 2048 local0
+ option httpchk
+ http-check expect status 200-399
+
+ http-check expect hdr name "x-test1"
+ http-check expect hdr name -m str "X-Test2"
+ http-check expect hdr name -m beg "X-Begin-"
+ http-check expect hdr name -m end "-End"
+ http-check expect hdr name -m sub "-Sub-"
+ http-check expect hdr name -m reg "^[a-z]+-Reg-[a-z]+[0-9]\$"
+ http-check set-var(check.hdr_name) res.fhdr(x-hdr-name)
+ http-check expect hdr name-lf -m str "%[var(check.hdr_name)]"
+ http-check expect hdr name-lf -m str "%[res.fhdr(x-hdr-name)]"
+
+ http-check expect fhdr name "x-test1" value "true, next value"
+ http-check expect hdr name "x-test2" value -m str "true"
+ http-check expect hdr name -m beg "x-test" value -m beg "begin-"
+ http-check expect hdr name -m beg "x-test" value -m end "-end"
+ http-check expect hdr name -m beg "x-test" value -m sub "-sub-"
+ http-check expect hdr name -m beg "x-test" value -m reg "^value-reg-[A-Z0-9]+\$"
+ http-check expect fhdr name -m beg "x-test" value -m reg "value-reg-[A-Z0-9]+"
+ http-check set-var(check.hdr_value) str(x-test1)
+ http-check expect hdr name -m beg "x-" value-lf -m str "%[var(check.hdr_value)]"
+ http-check expect fhdr name -m beg "x-" value-lf -m str "%[res.fhdr(x-hdr-name)]"
+
+ server srv ${s1_addr}:${s1_port} check inter 100ms rise 1 fall 1
+} -start
+
+syslog S1 -wait
diff --git a/reg-tests/checks/http-check-send.vtc b/reg-tests/checks/http-check-send.vtc
new file mode 100644
index 0000000..530ad75
--- /dev/null
+++ b/reg-tests/checks/http-check-send.vtc
@@ -0,0 +1,165 @@
+varnishtest "Health-checks: http-check send test"
+#REGTEST_TYPE=slow
+#REQUIRE_VERSION=2.4
+feature ignore_unknown_macro
+
+# This script tests HTTP health-checks and more particularly the "http-check
+# send" directive.
+
+server s1 {
+ rxreq
+ expect req.method == OPTIONS
+ expect req.url == /
+ expect req.proto == HTTP/1.0
+ txresp
+} -start
+
+server s2 {
+ rxreq
+ expect req.method == GET
+ expect req.url == /test
+ expect req.proto == HTTP/1.1
+ expect req.http.connection == "close"
+ txresp
+} -start
+
+server s3 {
+ rxreq
+ expect req.method == OPTIONS
+ expect req.url == /
+ expect req.proto == HTTP/1.0
+ expect req.http.host == <undef>
+ expect req.http.x-test == <undef>
+ expect req.bodylen == 0
+ txresp
+} -start
+
+server s4 {
+ rxreq
+ expect req.method == GET
+ expect req.url == /status
+ expect req.proto == HTTP/1.1
+ expect req.http.connection == "close"
+ expect req.http.host == "my-www-host"
+ expect req.http.x-test == true
+ expect req.http.content-length == 4
+ expect req.bodylen == 4
+ expect req.body == "test"
+ txresp
+} -start
+
+server s5 {
+ rxreq
+ expect req.method == OPTIONS
+ expect req.url == /
+ expect req.proto == HTTP/1.0
+ expect req.http.host == "other-www-host"
+ expect req.http.x-test == <undef>
+ expect req.http.x-new-test == true
+ expect req.http.content-length == 10
+ expect req.bodylen == 10
+ expect req.body == "other test"
+ txresp
+} -start
+
+server s6 {
+ rxreq
+ expect req.method == GET
+ expect req.url == /
+ expect req.proto == HTTP/1.1
+ expect req.http.host == "ws-host"
+ expect req.http.connection == "upgrade"
+ expect req.http.upgrade == "raw-proto"
+ txresp \
+ -status 101 \
+ -hdr "connection: upgrade" \
+ -hdr "upgrade: raw-proto"
+} -start
+
+
+syslog S1 -level notice {
+ recv
+ expect ~ "[^:\\[ ]\\[${h1_pid}\\]: Health check for server be1/srv succeeded.*code: 200"
+} -start
+
+syslog S2 -level notice {
+ recv
+ expect ~ "[^:\\[ ]\\[${h1_pid}\\]: Health check for server be2/srv succeeded.*code: 200"
+} -start
+
+syslog S3 -level notice {
+ recv
+ expect ~ "[^:\\[ ]\\[${h1_pid}\\]: Health check for server be3/srv succeeded.*code: 200"
+} -start
+
+syslog S4 -level notice {
+ recv
+ expect ~ "[^:\\[ ]\\[${h1_pid}\\]: Health check for server be4/srv succeeded.*code: 200"
+} -start
+
+syslog S5 -level notice {
+ recv
+ expect ~ "[^:\\[ ]\\[${h1_pid}\\]: Health check for server be5/srv succeeded.*code: 200"
+} -start
+
+syslog S6 -level notice {
+ recv
+ expect ~ "[^:\\[ ]\\[${h1_pid}\\]: Health check for server be6_ws/srv succeeded.*code: 101"
+} -start
+
+
+haproxy h1 -conf {
+ defaults
+ mode http
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ option httpchk
+ option log-health-checks
+
+ backend be1
+ log ${S1_addr}:${S1_port} len 2048 local0
+ server srv ${s1_addr}:${s1_port} check inter 200ms rise 1 fall 1
+
+ backend be2
+ log ${S2_addr}:${S2_port} len 2048 local0
+ option httpchk GET /test HTTP/1.1
+ server srv ${s2_addr}:${s2_port} check inter 200ms rise 1 fall 1
+
+ defaults
+ mode http
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ option httpchk GET /status HTTP/1.1
+ option log-health-checks
+ http-check send hdr Host "my-www-host" hdr X-test true body "test"
+
+ backend be3
+ option httpchk
+ log ${S3_addr}:${S3_port} len 2048 local0
+ server srv ${s3_addr}:${s3_port} check inter 200ms rise 1 fall 1
+
+ backend be4
+ log ${S4_addr}:${S4_port} len 2048 local0
+ server srv ${s4_addr}:${s4_port} check inter 200ms rise 1 fall 1
+
+ backend be5
+ log ${S5_addr}:${S5_port} len 2058 local0
+ http-check send hdr Host "other-www-host" hdr X-New-Test true body "other test"
+ server srv ${s5_addr}:${s5_port} check inter 200ms rise 1 fall 1
+
+ backend be6_ws
+ log ${S6_addr}:${S6_port} len 2048 local0
+ http-check send meth GET uri / ver HTTP/1.1 hdr host ws-host hdr connection upgrade hdr upgrade raw-proto
+ http-check expect status 101
+ server srv ${s6_addr}:${s6_port} check inter 200ms rise 1 fall 1
+
+} -start
+
+syslog S1 -wait
+syslog S2 -wait
+syslog S3 -wait
+syslog S4 -wait
+syslog S5 -wait
+syslog S6 -wait
diff --git a/reg-tests/checks/http-check.vtc b/reg-tests/checks/http-check.vtc
new file mode 100644
index 0000000..3353060
--- /dev/null
+++ b/reg-tests/checks/http-check.vtc
@@ -0,0 +1,157 @@
+varnishtest "Health-checks: some http-check tests"
+feature ignore_unknown_macro
+#REQUIRE_VERSION=2.2
+#REGTEST_TYPE=slow
+# This script tests HTTP health-checks.
+
+server s1 {
+ rxreq
+ expect req.method == OPTIONS
+ expect req.url == /
+ expect req.proto == HTTP/1.0
+ txresp
+} -start
+
+server s2 {
+ rxreq
+ expect req.method == GET
+ expect req.url == /status
+ expect req.proto == HTTP/1.1
+ txresp
+} -start
+
+server s3 {
+ rxreq
+ expect req.method == GET
+ expect req.url == /status
+ expect req.proto == HTTP/1.1
+ txresp
+} -start
+
+server s4 {
+ rxreq
+ expect req.method == GET
+ expect req.url == /req1
+ expect req.proto == HTTP/1.1
+ expect req.http.x-test == "server=srv"
+ expect req.http.x-haproxy-server-state ~ "UP.+name=be4/srv"
+ expect req.bodylen == 0
+ txresp
+
+ accept
+ rxreq
+ expect req.method == GET
+ expect req.url == /req2
+ expect req.proto == HTTP/1.1
+ expect req.http.x-test == "server="
+ expect req.http.x-haproxy-server-state ~ "UP.+name=be4/srv"
+ expect req.http.content-length == 17
+ expect req.bodylen == 17
+ expect req.body == "health-check body"
+ txresp
+
+ accept
+ rxreq
+ expect req.method == GET
+ expect req.url == /req3
+ expect req.proto == HTTP/1.0
+ expect req.http.x-test == <undef>
+ expect req.http.x-haproxy-server-state ~ "UP.+name=be4/srv"
+ expect req.bodylen == 0
+ txresp
+
+ accept
+ rxreq
+ expect req.method == GET
+ expect req.url == /
+ expect req.proto == HTTP/1.0
+ expect req.http.x-test == <undef>
+ expect req.http.x-haproxy-server-state ~ "UP.+name=be4/srv"
+ expect req.bodylen == 23
+ expect req.body == "health-check on be4-srv"
+ txresp
+
+} -start
+
+syslog S1 -level notice {
+ recv
+ expect ~ "[^:\\[ ]\\[${h1_pid}\\]: Health check for server be[0-9]/srv succeeded.*code: 200"
+ recv
+ expect ~ "[^:\\[ ]\\[${h1_pid}\\]: Health check for server be[0-9]/srv succeeded.*code: 200"
+ recv
+ expect ~ "[^:\\[ ]\\[${h1_pid}\\]: Health check for server be[0-9]/srv succeeded.*code: 200"
+ recv
+ expect ~ "[^:\\[ ]\\[${h1_pid}\\]: Health check for server be[0-9]/srv succeeded.*code: 200"
+ recv
+ expect ~ "[^:\\[ ]\\[${h1_pid}\\]: Health check for server be[0-9]/srv succeeded.*code: 200"
+ recv
+ expect ~ "[^:\\[ ]\\[${h1_pid}\\]: Health check for server be[0-9]/srv succeeded.*code: 200"
+} -start
+
+haproxy h1 -conf {
+ defaults
+ mode http
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ option log-health-checks
+
+ backend be1
+ log ${S1_addr}:${S1_port} len 2048 local0
+ option httpchk
+ server srv ${s1_addr}:${s1_port} check inter 100ms rise 1 fall 1
+
+ backend be2
+ log ${S1_addr}:${S1_port} len 2048 local0
+ option httpchk GET /status HTTP/1.1
+ server srv ${s2_addr}:${s2_port} check inter 100ms rise 1 fall 1
+
+ backend be3
+ log ${S1_addr}:${S1_port} len 2048 local0
+ option httpchk
+ http-check send meth GET uri /status ver HTTP/1.1
+ server srv ${s3_addr}:${s3_port} check inter 100ms rise 1 fall 1
+
+ backend be4
+ mode tcp
+ log ${S1_addr}:${S1_port} len 2048 local0
+ option httpchk
+ http-check send-state
+ http-check connect addr ${s4_addr}:${s4_port}
+ http-check set-var(check.server) "str(srv)"
+ http-check set-var(check.path) "str(/req1)"
+ http-check send meth GET uri-lf "%[var(check.path)]" ver HTTP/1.1 hdr x-test "server=%[var(check.server)]"
+ http-check expect status 200
+ http-check connect addr ${s4_addr} port ${s4_port}
+ http-check unset-var(check.server)
+ http-check set-var(check.path) "str(/req2)"
+ http-check send meth GET uri-lf "%[var(check.path)]" ver HTTP/1.1 hdr x-test "server=%[var(check.server)]" body "health-check body"
+ http-check expect rstatus "^2[0-9]{2}"
+ http-check connect addr ${s4_addr} port ${s4_port}
+ http-check set-var(check.path) "str(/req3)"
+ http-check send meth GET uri-lf "%[var(check.path)]"
+ http-check expect rstatus "^2[0-9]{2}"
+ http-check connect addr ${s4_addr} port ${s4_port}
+ http-check unset-var(check.path)
+ http-check send meth GET uri-lf "%[var(check.path)]" body-lf "health-check on %[be_name]-%[srv_name]"
+ ## implicit expect rule
+ server srv ${s1_addr}:${s1_port} check inter 100ms rise 1 fall 1
+
+ backend be5
+ log ${S1_addr}:${S1_port} len 2048 local0
+ option httpchk
+ server srv ${h1_li1_addr}:${h1_li1_port} proto h2 check inter 100ms rise 1 fall 1
+
+ backend be6
+ log ${S1_addr}:${S1_port} len 2048 local0
+ option httpchk GET /status HTTP/1.1
+ server srv ${h1_li1_addr}:${h1_li1_port} check check-proto h2 inter 100ms rise 1 fall 1
+
+ listen li1
+ mode http
+ bind "fd@${li1}" proto h2
+ http-request return status 200
+
+} -start
+
+syslog S1 -wait
diff --git a/reg-tests/checks/http-monitor-uri.vtc b/reg-tests/checks/http-monitor-uri.vtc
new file mode 100644
index 0000000..b6c8ccb
--- /dev/null
+++ b/reg-tests/checks/http-monitor-uri.vtc
@@ -0,0 +1,56 @@
+varnishtest "Test the HTTP directive monitor-uri"
+#REQUIRE_VERSION=2.2
+
+# This config tests the HTTP directive monitor-uri. Especially the path matching
+# when an absolute-form uri is received from the client. But also the
+# case-sensitivity of the matching.
+
+feature ignore_unknown_macro
+
+haproxy h1 -conf {
+ defaults
+ mode http
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ frontend fe1
+ bind "fd@${fe1}"
+ monitor-uri /health
+
+ frontend fe2
+ bind "fd@${fe2}"
+ monitor-uri http://www.haproxy.org/health
+} -start
+
+client c1 -connect ${h1_fe1_sock} {
+ txreq -req GET -url /health
+ rxresp
+ expect resp.status == 200
+} -run
+
+client c2 -connect ${h1_fe1_sock} {
+ txreq -req GET -url http://www.haproxy.org/health \
+ -hdr "Host: www.haproxy.org"
+ rxresp
+ expect resp.status == 200
+} -run
+
+client c3 -connect ${h1_fe1_sock} {
+ txreq -req GET -url /hEAlth
+ rxresp
+ expect resp.status == 503
+} -run
+
+client c4 -connect ${h1_fe2_sock} {
+ txreq -req GET -url http://www.haproxy.org/health \
+ -hdr "Host: www.haproxy.org"
+ rxresp
+ expect resp.status == 200
+} -run
+
+client c5 -connect ${h1_fe2_sock} {
+ txreq -req GET -url /health
+ rxresp
+ expect resp.status == 503
+} -run
diff --git a/reg-tests/checks/ldap-check.vtc b/reg-tests/checks/ldap-check.vtc
new file mode 100644
index 0000000..a0e5509
--- /dev/null
+++ b/reg-tests/checks/ldap-check.vtc
@@ -0,0 +1,96 @@
+varnishtest "Health-checks: LDAP health-check"
+#REQUIRE_VERSION=2.2
+#REGTEST_TYPE=slow
+feature ignore_unknown_macro
+
+# This scripts tests health-checks for LDAP application, enabled using
+# "option ldap-check" line. A intermediate listener is used to validate
+# the request because it is impossible with VTEST to read and match raw
+# text.
+
+server s1 {
+ recv 14
+ sendhex "300C020101 61 070A01 00 04000400"
+} -start
+
+server s2 {
+ recv 14
+ sendhex "300C020101 60 070A01 00 04000400"
+} -start
+
+server s3 {
+ recv 14
+ sendhex "300C020101 61 070A01 01 04000400"
+} -start
+
+server s4 {
+ recv 14
+ sendhex "308400000010020101 61 84000000070A01"
+ delay 0.1
+ sendhex "00 04000400"
+} -start
+
+syslog S1 -level notice {
+ recv
+ expect ~ "[^:\\[ ]\\[${h1_pid}\\]: Health check for server be1/srv succeeded, reason: Layer7 check passed.+info: \"Success\".+check duration: [[:digit:]]+ms, status: 1/1 UP."
+} -start
+
+syslog S2 -level notice {
+ recv
+ expect ~ "[^:\\[ ]\\[${h1_pid}\\]: Health check for server be2/srv failed, reason: Layer7 invalid response.+info: \"Not LDAPv3 protocol\".+check duration: [[:digit:]]+ms, status: 0/1 DOWN."
+} -start
+
+syslog S3 -level notice {
+ recv
+ expect ~ "[^:\\[ ]\\[${h1_pid}\\]: Health check for server be3/srv failed, reason: Layer7 wrong status.+code: 1.+info: \"See RFC: http://tools.ietf.org/html/rfc4511#section-4.1.9\".+check duration: [[:digit:]]+ms, status: 0/1 DOWN."
+} -start
+
+syslog S4 -level notice {
+ recv
+ expect ~ "[^:\\[ ]\\[${h1_pid}\\]: Health check for server be4/srv succeeded, reason: Layer7 check passed.+info: \"Success\".+check duration: [[:digit:]]+ms, status: 1/1 UP."
+} -start
+
+haproxy h1 -conf {
+ defaults
+ mode tcp
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ backend be1
+ log ${S1_addr}:${S1_port} daemon
+ option log-health-checks
+ option ldap-check
+ server srv ${h1_ldap1_addr}:${h1_ldap1_port} check inter 1s rise 1 fall 1
+
+ backend be2
+ log ${S2_addr}:${S2_port} daemon
+ option log-health-checks
+ option ldap-check
+ server srv ${s2_addr}:${s2_port} check inter 1s rise 1 fall 1
+
+ backend be3
+ log ${S3_addr}:${S3_port} daemon
+ option log-health-checks
+ option ldap-check
+ server srv ${s3_addr}:${s3_port} check inter 1s rise 1 fall 1
+
+ backend be4
+ log ${S4_addr}:${S4_port} daemon
+ option log-health-checks
+ option ldap-check
+ server srv ${s4_addr}:${s4_port} check inter 1s rise 1 fall 1
+
+ listen ldap1
+ bind "fd@${ldap1}"
+ tcp-request inspect-delay 100ms
+ tcp-request content accept if { req.len eq 14 } { req.payload(0,14) -m bin "300C020101600702010304008000" }
+ tcp-request content reject
+ server srv ${s1_addr}:${s1_port}
+
+} -start
+
+syslog S1 -wait
+syslog S2 -wait
+syslog S3 -wait
+syslog S4 -wait
diff --git a/reg-tests/checks/mysql-check.vtc b/reg-tests/checks/mysql-check.vtc
new file mode 100644
index 0000000..b2348c3
--- /dev/null
+++ b/reg-tests/checks/mysql-check.vtc
@@ -0,0 +1,123 @@
+varnishtest "Health-checks: MySQL health-check"
+#REQUIRE_VERSION=2.2
+#REGTEST_TYPE=slow
+feature ignore_unknown_macro
+
+# This scripts tests health-checks for MySQL application, enabled using
+# "option mysql-check" line. A intermediate listener is used to validate
+# the request because it is impossible with VTEST to read and match raw
+# text.
+
+server s1 {
+ sendhex "4A0000000A382E302E3139000A0000006F3C025E6249410D00FFFFFF0200FFC715000000000000000000007C182159106E2761144322200063616368696E675F736861325F70617373776F726400"
+ expect_close
+} -start
+
+server s2 {
+ sendhex "4A0000000A382E302E3139000A0000006F3C025E6249410D00FFFFFF0200FFC715000000000000000000007C182159106E2761144322200063616368696E675F736861325F70617373776F726400"
+ recv 20
+ sendhex "03000002000000"
+} -start
+
+server s3 {
+ sendhex "4A0000000A382E302E3139000A0000006F3C025E6249410D00FFFFFF0200FFC715000000000000000000007C182159106E2761144322200063616368696E675F736861325F70617373776F726400"
+ recv 47
+ sendhex "0700000200000002000000"
+} -start
+
+server s4 {
+ sendhex "4A0000000A382E302E3139000A0000006F3C025E6249410D00FFFFFF0200FFC715000000000000000000007C182159106E2761144322200063616368696E675F736861325F70617373776F726400"
+ recv 21
+ sendhex "67000002FFE304436C69656E7420646F6573206E6F7420737570706F72742061757468656E7469636174696F6E2070726F746F636F6C20726571756573746564206279207365727665723B20636F6E736964657220757067726164696E67204D7953514C20636C69656E74"
+} -start
+
+server s5 {
+ sendhex "4A0000000A382E302E3139000A0000006F3C025E6249410D00FFFFFF0200FFC715000000000000000000007C182159106E2761144322200063616368696E675F736861325F70617373776F726400"
+ recv 48
+ sendhex "67000002FFE304436C69656E7420646F6573206E6F7420737570706F72742061757468656E7469636174696F6E2070726F746F636F6C20726571756573746564206279207365727665723B20636F6E736964657220757067726164696E67204D7953514C20636C69656E74"
+} -start
+
+syslog S1 -level notice {
+ recv
+ expect ~ "[^:\\[ ]\\[${h1_pid}\\]: Health check for server be1/srv succeeded, reason: Layer7 check passed.+info: \"8.0.19\".+check duration: [[:digit:]]+ms, status: 1/1 UP."
+} -start
+
+syslog S2 -level notice {
+ recv
+ expect ~ "[^:\\[ ]\\[${h1_pid}\\]: Health check for server be2/srv succeeded, reason: Layer7 check passed.+info: \"8.0.19\".+check duration: [[:digit:]]+ms, status: 1/1 UP."
+} -start
+
+syslog S3 -level notice {
+ recv
+ expect ~ "[^:\\[ ]\\[${h1_pid}\\]: Health check for server be3/srv succeeded, reason: Layer7 check passed.+info: \"8.0.19\".+check duration: [[:digit:]]+ms, status: 1/1 UP."
+} -start
+
+syslog S4 -level notice {
+ recv
+ expect ~ "[^:\\[ ]\\[${h1_pid}\\]: Health check for server be4/srv failed, reason: Layer7 wrong status.+code: 1251.+info: \"Client does not support authentication protocol requested by server; consider upgrading MySQL client\".+check duration: [[:digit:]]+ms, status: 0/1 DOWN."
+} -start
+
+syslog S5 -level notice {
+ recv
+ expect ~ "[^:\\[ ]\\[${h1_pid}\\]: Health check for server be5/srv failed, reason: Layer7 wrong status.+code: 1251.+info: \"Client does not support authentication protocol requested by server; consider upgrading MySQL client\".+check duration: [[:digit:]]+ms, status: 0/1 DOWN."
+} -start
+
+
+haproxy h1 -conf {
+ defaults
+ mode tcp
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ backend be1
+ log ${S1_addr}:${S1_port} daemon
+ option log-health-checks
+ option mysql-check
+ server srv ${s1_addr}:${s1_port} check inter 1s rise 1 fall 1
+
+ backend be2
+ log ${S2_addr}:${S2_port} daemon
+ option log-health-checks
+ option mysql-check user user pre-41
+ server srv ${h1_mysql1_addr}:${h1_mysql1_port} check inter 1s rise 1 fall 1
+
+ backend be3
+ log ${S3_addr}:${S3_port} daemon
+ option log-health-checks
+ option mysql-check user user
+ server srv ${h1_mysql2_addr}:${h1_mysql2_port} check inter 1s rise 1 fall 1
+
+ backend be4
+ log ${S4_addr}:${S4_port} daemon
+ option log-health-checks
+ option mysql-check user pouet
+ server srv ${s4_addr}:${s4_port} check inter 1s rise 1 fall 1
+
+ backend be5
+ log ${S5_addr}:${S5_port} daemon
+ option log-health-checks
+ option mysql-check user pouet post-41
+ server srv ${s5_addr}:${s5_port} check inter 1s rise 1 fall 1
+
+ listen mysql1
+ bind "fd@${mysql1}"
+ tcp-request inspect-delay 100ms
+ tcp-request content accept if { req.len eq 20 } { req.payload(0,20) -m bin "0B00000100800000017573657200000100000001" }
+ tcp-request content reject
+ server srv ${s2_addr}:${s2_port}
+
+ listen mysql2
+ bind "fd@${mysql2}"
+ tcp-request inspect-delay 100ms
+ tcp-request content accept if { req.len eq 47 } { req.payload(0,47) -m bin "2600000100820000008000012100000000000000000000000000000000000000000000007573657200000100000001" }
+ tcp-request content reject
+ server srv ${s3_addr}:${s3_port}
+
+} -start
+
+syslog S1 -wait
+syslog S2 -wait
+syslog S3 -wait
+syslog S4 -wait
+syslog S5 -wait
diff --git a/reg-tests/checks/pgsql-check.vtc b/reg-tests/checks/pgsql-check.vtc
new file mode 100644
index 0000000..2c9c65b
--- /dev/null
+++ b/reg-tests/checks/pgsql-check.vtc
@@ -0,0 +1,93 @@
+varnishtest "Health-checks: PostgreSQL health-check"
+#REQUIRE_VERSION=2.2
+#REGTEST_TYPE=slow
+feature ignore_unknown_macro
+
+# This scripts tests health-checks for PostgreSQL application, enabled using
+# "option pgsql-check" line. A intermediate listener is used to validate
+# the request because it is impossible with VTEST to read and match raw
+# text.
+
+server s1 {
+ recv 23
+ sendhex "520000000800000000"
+} -start
+
+server s2 {
+ recv 23
+ sendhex "450000000B53464154414C00"
+} -start
+
+server s3 {
+ recv 23
+ send "Not a PostgreSQL response"
+} -start
+
+server s4 {
+ recv 23
+ sendhex "52000000170000000A534352414D2D5348412D3235360000"
+} -start
+
+syslog S1 -level notice {
+ recv
+ expect ~ "[^:\\[ ]\\[${h1_pid}\\]: Health check for server be1/srv succeeded, reason: Layer7 check passed.+info: \"PostgreSQL server is ok\".+check duration: [[:digit:]]+ms, status: 1/1 UP."
+} -start
+
+syslog S2 -level notice {
+ recv
+ expect ~ "[^:\\[ ]\\[${h1_pid}\\]: Health check for server be2/srv failed, reason: Layer7 invalid response.+info: \"FATAL\".+check duration: [[:digit:]]+ms, status: 0/1 DOWN."
+} -start
+
+syslog S3 -level notice {
+ recv
+ expect ~ "[^:\\[ ]\\[${h1_pid}\\]: Health check for server be3/srv failed, reason: Layer7 wrong status.+info: \"PostgreSQL unknown error\".+check duration: [[:digit:]]+ms, status: 0/1 DOWN."
+} -start
+
+syslog S4 -level notice {
+ recv
+ expect ~ "[^:\\[ ]\\[${h1_pid}\\]: Health check for server be4/srv succeeded, reason: Layer7 check passed.+info: \"PostgreSQL server is ok\".+check duration: [[:digit:]]+ms, status: 1/1 UP."
+} -start
+
+haproxy h1 -conf {
+ defaults
+ mode tcp
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ backend be1
+ log ${S1_addr}:${S1_port} daemon
+ option log-health-checks
+ option pgsql-check user postgres
+ server srv ${h1_pgsql_addr}:${h1_pgsql_port} check inter 1s rise 1 fall 1
+
+ backend be2
+ log ${S2_addr}:${S2_port} daemon
+ option log-health-checks
+ option pgsql-check user postgres
+ server srv ${s2_addr}:${s2_port} check inter 1s rise 1 fall 1
+
+ backend be3
+ log ${S3_addr}:${S3_port} daemon
+ option log-health-checks
+ option pgsql-check user postgres
+ server srv ${s3_addr}:${s3_port} check inter 1s rise 1 fall 1
+
+ backend be4
+ log ${S4_addr}:${S4_port} daemon
+ option log-health-checks
+ option pgsql-check user postgres
+ server srv ${s4_addr}:${s4_port} check inter 1s rise 1 fall 1
+
+ listen pgsql1
+ bind "fd@${pgsql}"
+ tcp-request inspect-delay 100ms
+ tcp-request content accept if { req.len eq 23 } { req.payload(0,23) -m bin "00000017000300007573657200706f7374677265730000" }
+ tcp-request content reject
+ server srv ${s1_addr}:${s1_port}
+} -start
+
+syslog S1 -wait
+syslog S2 -wait
+syslog S3 -wait
+syslog S4 -wait
diff --git a/reg-tests/checks/redis-check.vtc b/reg-tests/checks/redis-check.vtc
new file mode 100644
index 0000000..78b6ed3
--- /dev/null
+++ b/reg-tests/checks/redis-check.vtc
@@ -0,0 +1,61 @@
+varnishtest "Health-checks: Redis health-check"
+#REQUIRE_VERSION=2.2
+#REGTEST_TYPE=slow
+feature ignore_unknown_macro
+
+# This scripts tests health-checks for Redis application, enabled using
+# "option redis-check" line. A intermediate listener is used to validate
+# the request because it is impossible with VTEST to read and match raw
+# text.
+
+server s1 {
+ recv 14
+ send "+PONG\r\n"
+} -start
+
+server s2 {
+ recv 14
+ send "-Error message\r\n"
+} -start
+
+syslog S1 -level notice {
+ recv
+ expect ~ "[^:\\[ ]\\[${h1_pid}\\]: Health check for server be1/srv succeeded, reason: Layer7 check passed.+info: \"Redis server is ok\".+check duration: [[:digit:]]+ms, status: 1/1 UP."
+} -start
+
+syslog S2 -level notice {
+ recv
+ expect ~ "[^:\\[ ]\\[${h1_pid}\\]: Health check for server be2/srv failed, reason: Layer7 wrong status.+info: \"-Error message\".+check duration: [[:digit:]]+ms, status: 0/1 DOWN."
+} -start
+
+
+haproxy h1 -conf {
+ defaults
+ mode tcp
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ backend be1
+ log ${S1_addr}:${S1_port} daemon
+ option log-health-checks
+ option redis-check
+ server srv ${h1_redis_addr}:${h1_redis_port} check inter 1s rise 1 fall 1
+
+ backend be2
+ log ${S2_addr}:${S2_port} daemon
+ option log-health-checks
+ option redis-check
+ server srv ${s2_addr}:${s2_port} check inter 1s rise 1 fall 1
+
+ listen redis1
+ bind "fd@${redis}"
+ tcp-request inspect-delay 100ms
+ tcp-request content accept if { req.len eq 14 } { req.payload(0,14) -m str "*1\r\n\$4\r\nPING\r\n" }
+ tcp-request content reject
+ server srv ${s1_addr}:${s1_port}
+
+} -start
+
+syslog S1 -wait
+syslog S2 -wait
diff --git a/reg-tests/checks/smtp-check.vtc b/reg-tests/checks/smtp-check.vtc
new file mode 100644
index 0000000..723f5f0
--- /dev/null
+++ b/reg-tests/checks/smtp-check.vtc
@@ -0,0 +1,110 @@
+varnishtest "Health-checks: SMTP health-check"
+#REQUIRE_VERSION=2.2
+#REGTEST_TYPE=slow
+feature ignore_unknown_macro
+
+# This scripts tests health-checks for SMTP servers, enabled using
+# "option smtpchk" line.
+
+server s1 {
+ send "220 smtp-check.vtc SMTP Server\r\n"
+ recv 16
+ send "250 smtp-check.vtc\r\n"
+ recv 6
+ send "221 smtp-check.vtc closing\r\n"
+} -start
+
+server s2 {
+ send "220 smtp-check.vtc SMTP Server\r\n"
+ recv 17
+ send "250-smtp-check.vtc\r\n"
+ send "250-KEYWORD\r\n"
+ send "250 LAST KEYWORD\r\n"
+ recv 6
+ send "221 smtp-check.vtc closing\r\n"
+} -start
+
+server s3 {
+ send "I'm not a SMTP server\r\n"
+} -start
+
+server s4 {
+ send "421 Try again later\r\n"
+} -start
+
+server s5 {
+ send "220 smtp-check.vtc SMTP Server\r\n"
+ recv 16
+ send "512 DNS error\r\n"
+} -start
+
+syslog S1 -level notice {
+ recv
+ expect ~ "[^:\\[ ]\\[${h1_pid}\\]: Health check for server be1/srv succeeded, reason: Layer7 check passed.+code: 221.+info: \"smtp-check.vtc closing\".+check duration: [[:digit:]]+ms, status: 1/1 UP."
+} -start
+
+syslog S2 -level notice {
+ recv
+ expect ~ "[^:\\[ ]\\[${h1_pid}\\]: Health check for server be2/srv succeeded, reason: Layer7 check passed.+code: 221.+info: \"smtp-check.vtc closing\".+check duration: [[:digit:]]+ms, status: 1/1 UP."
+} -start
+
+syslog S3 -level notice {
+ recv
+ expect ~ "[^:\\[ ]\\[${h1_pid}\\]: Health check for server be3/srv failed, reason: Layer7 invalid response.+info: \"I'm not a SMTP server\".+check duration: [[:digit:]]+ms, status: 0/1 DOWN."
+} -start
+
+syslog S4 -level notice {
+ recv
+ expect ~ "[^:\\[ ]\\[${h1_pid}\\]: Health check for server be4/srv failed, reason: Layer7 wrong status.+code: 421.+info: \"Try again later\".+check duration: [[:digit:]]+ms, status: 0/1 DOWN."
+} -start
+
+syslog S5 -level notice {
+ recv
+ expect ~ "[^:\\[ ]\\[${h1_pid}\\]: Health check for server be5/srv failed, reason: Layer7 wrong status.+code: 512.+info: \"DNS error\".+check duration: [[:digit:]]+ms, status: 0/1 DOWN."
+} -start
+
+
+haproxy h1 -conf {
+ defaults
+ mode tcp
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ backend be1
+ log ${S1_addr}:${S1_port} daemon
+ option log-health-checks
+ option smtpchk
+ server srv ${s1_addr}:${s1_port} check inter 1s rise 1 fall 1
+
+ backend be2
+ log ${S2_addr}:${S2_port} daemon
+ option log-health-checks
+ option smtpchk EHLO domain.tld
+ server srv ${s2_addr}:${s2_port} check inter 1s rise 1 fall 1
+
+ backend be3
+ log ${S3_addr}:${S3_port} daemon
+ option log-health-checks
+ option smtpchk
+ server srv ${s3_addr}:${s3_port} check inter 1s rise 1 fall 1
+
+ backend be4
+ log ${S4_addr}:${S4_port} daemon
+ option log-health-checks
+ option smtpchk
+ server srv ${s4_addr}:${s4_port} check inter 1s rise 1 fall 1
+
+ backend be5
+ log ${S5_addr}:${S5_port} daemon
+ option log-health-checks
+ option smtpchk EHLO domain.tld
+ server srv ${s5_addr}:${s5_port} check inter 1s rise 1 fall 1
+
+} -start
+
+syslog S1 -wait
+syslog S2 -wait
+syslog S3 -wait
+syslog S4 -wait
+syslog S5 -wait
diff --git a/reg-tests/checks/spop-check.vtc b/reg-tests/checks/spop-check.vtc
new file mode 100644
index 0000000..93cef59
--- /dev/null
+++ b/reg-tests/checks/spop-check.vtc
@@ -0,0 +1,94 @@
+varnishtest "Health-checks: SPOP health-check"
+#REQUIRE_VERSION=2.2
+#REGTEST_TYPE=slow
+feature ignore_unknown_macro
+
+# This scripts tests health-checks for SPOE agent, enabled using
+# "option spop-check" line. A intermediate listener is used to validate
+# the request because it is impossible with VTEST to read and match raw
+# text.
+
+server s1 {
+ recv 82
+ sendhex "00000036 65 00000001 0000 0776657273696F6E 0803322E30 0E6D61782D6672616D652D73697A65 03FCF0 060C6361706162696C6974696573 0800"
+} -start
+
+server s2 {
+ recv 82
+ sendhex "00000000"
+} -start
+
+server s3 {
+ recv 82
+ sendhex "00000007 65 00000000 0000"
+} -start
+
+server s4 {
+ recv 82
+ sendhex "00000014 65 00000001 0000 0776657273696F6E 0803312E30"
+} -start
+
+syslog S1 -level notice {
+ recv
+ expect ~ "[^:\\[ ]\\[${h1_pid}\\]: Health check for server be1/srv succeeded, reason: Layer7 check passed.+info: \"SPOA server is ok\".+check duration: [[:digit:]]+ms, status: 1/1 UP."
+} -start
+
+syslog S2 -level notice {
+ recv
+ expect ~ "[^:\\[ ]\\[${h1_pid}\\]: Health check for server be2/srv failed, reason: Layer7 invalid response.+info: \"invalid frame received\".+check duration: [[:digit:]]+ms, status: 0/1 DOWN."
+} -start
+
+syslog S3 -level notice {
+ recv
+ expect ~ "[^:\\[ ]\\[${h1_pid}\\]: Health check for server be3/srv failed, reason: Layer7 invalid response.+info: \"fragmentation not supported\".+check duration: [[:digit:]]+ms, status: 0/1 DOWN."
+} -start
+
+syslog S4 -level notice {
+ recv
+ expect ~ "[^:\\[ ]\\[${h1_pid}\\]: Health check for server be4/srv failed, reason: Layer7 invalid response.+info: \"unsupported version\".+check duration: [[:digit:]]+ms, status: 0/1 DOWN."
+} -start
+
+haproxy h1 -conf {
+ defaults
+ mode tcp
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ backend be1
+ log ${S1_addr}:${S1_port} daemon
+ option log-health-checks
+ option spop-check
+ server srv ${h1_spop1_addr}:${h1_spop1_port} check inter 1s rise 1 fall 1
+
+ backend be2
+ log ${S2_addr}:${S2_port} daemon
+ option log-health-checks
+ option spop-check
+ server srv ${s2_addr}:${s2_port} check inter 1s rise 1 fall 1
+
+ backend be3
+ log ${S3_addr}:${S3_port} daemon
+ option log-health-checks
+ option spop-check
+ server srv ${s3_addr}:${s3_port} check inter 1s rise 1 fall 1
+
+ backend be4
+ log ${S4_addr}:${S4_port} daemon
+ option log-health-checks
+ option spop-check
+ server srv ${s4_addr}:${s4_port} check inter 1s rise 1 fall 1
+
+ listen spop1
+ bind "fd@${spop1}"
+ tcp-request inspect-delay 100ms
+ tcp-request content accept if { req.len eq 82 } { req.payload(0,4) -m bin "0000004E" } #{ req.payload(4,4) -m bin "00000001" } { req.payload(8,2) -m bin "0000" } { req.payload(12,17) -m str "supported-version" }
+ tcp-request content reject
+ server srv ${s1_addr}:${s1_port}
+
+} -start
+
+syslog S1 -wait
+syslog S2 -wait
+syslog S3 -wait
+syslog S4 -wait
diff --git a/reg-tests/checks/ssl-hello-check.vtc b/reg-tests/checks/ssl-hello-check.vtc
new file mode 100644
index 0000000..49abc0b
--- /dev/null
+++ b/reg-tests/checks/ssl-hello-check.vtc
@@ -0,0 +1,76 @@
+varnishtest "Health-checks: ssl-hello health-check"
+#REQUIRE_OPTION=OPENSSL
+#REQUIRE_VERSION=2.2
+#REGTEST_TYPE=slow
+feature ignore_unknown_macro
+
+# This scripts tests health-checks for SSL application, enabled using
+# "option ssl-hello-chk" line.
+
+syslog S1 -level notice {
+ recv
+ expect ~ "[^:\\[ ]\\[${h1_pid}\\]: Health check for server be1/srv succeeded, reason: Layer6 check passed.+check duration: [[:digit:]]+ms, status: 1/1 UP."
+} -start
+
+
+syslog S2 -level notice {
+ recv
+ expect ~ "[^:\\[ ]\\[${h1_pid}\\]: Health check for server be2/srv failed, reason: Layer6 invalid response.+info: \"TCPCHK got an empty response at step 2\".+check duration: [[:digit:]]+ms, status: 0/1 DOWN."
+} -start
+
+syslog S3 -level notice {
+ recv
+ expect ~ "[^:\\[ ]\\[${h1_pid}\\]: Health check for server be3/srv failed, reason: Layer6 invalid response.+check duration: [[:digit:]]+ms, status: 0/1 DOWN."
+} -start
+
+haproxy htst -conf {
+ global
+ tune.ssl.default-dh-param 2048
+
+ defaults
+ mode tcp
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ frontend fe1
+ bind "fd@${fe1}" ssl crt ${testdir}/common.pem
+
+ frontend fe2
+ bind "fd@${fe2}"
+
+ frontend fe3
+ mode http
+ bind "fd@${fe3}"
+
+} -start
+
+haproxy h1 -conf {
+ defaults
+ mode tcp
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ backend be1
+ log ${S1_addr}:${S1_port} daemon
+ option log-health-checks
+ option ssl-hello-chk
+ server srv ${htst_fe1_addr}:${htst_fe1_port} check inter 1s rise 1 fall 1
+
+ backend be2
+ log ${S2_addr}:${S2_port} daemon
+ option log-health-checks
+ option ssl-hello-chk
+ server srv ${htst_fe2_addr}:${htst_fe2_port} check inter 1s rise 1 fall 1
+
+ backend be3
+ log ${S3_addr}:${S3_port} daemon
+ option log-health-checks
+ option ssl-hello-chk
+ server srv ${htst_fe3_addr}:${htst_fe3_port} check inter 1s rise 1 fall 1
+} -start
+
+syslog S1 -wait
+syslog S2 -wait
+syslog S3 -wait
diff --git a/reg-tests/checks/tcp-check-ssl.vtc b/reg-tests/checks/tcp-check-ssl.vtc
new file mode 100644
index 0000000..6ac1782
--- /dev/null
+++ b/reg-tests/checks/tcp-check-ssl.vtc
@@ -0,0 +1,118 @@
+varnishtest "Health-checks: tcp-check health-check with ssl options"
+#REQUIRE_OPTION=OPENSSL
+#REQUIRE_VERSION=2.2
+#REGTEST_TYPE=slow
+feature ignore_unknown_macro
+
+syslog S_ok -level notice {
+ recv
+ expect ~ "[^:\\[ ]\\[${h1_pid}\\]: Health check for server be[0-9]+/srv succeeded, reason: Layer6 check passed.+check duration: [[:digit:]]+ms, status: 1/1 UP."
+ recv
+ expect ~ "[^:\\[ ]\\[${h1_pid}\\]: Health check for server be[0-9]+/srv succeeded, reason: Layer6 check passed.+check duration: [[:digit:]]+ms, status: 1/1 UP."
+ recv
+ expect ~ "[^:\\[ ]\\[${h1_pid}\\]: Health check for server be[0-9]+/srv succeeded, reason: Layer6 check passed.+check duration: [[:digit:]]+ms, status: 1/1 UP."
+ recv
+ expect ~ "[^:\\[ ]\\[${h1_pid}\\]: Health check for server be[0-9]+/srv succeeded, reason: Layer6 check passed.+check duration: [[:digit:]]+ms, status: 1/1 UP."
+ recv
+ expect ~ "[^:\\[ ]\\[${h1_pid}\\]: Health check for server be[0-9]+/srv succeeded, reason: Layer6 check passed.+check duration: [[:digit:]]+ms, status: 1/1 UP."
+} -start
+
+syslog S3 -level notice {
+ recv
+ expect ~ "[^:\\[ ]\\[${h1_pid}\\]: Health check for server be3/srv failed, reason: Layer6 invalid response.+info: \"(Connection closed during SSL handshake|SSL handshake failure)\".+check duration: [[:digit:]]+ms, status: 0/1 DOWN."
+} -start
+
+syslog S4 -level notice {
+ recv
+ expect ~ "[^:\\[ ]\\[${h1_pid}\\]: Health check for server be4/srv failed, reason: Layer6 invalid response.+info: \"(Connection closed during SSL handshake|SSL handshake failure) at step 1 of tcp-check \\(connect\\)\".+check duration: [[:digit:]]+ms, status: 0/1 DOWN."
+} -start
+
+
+haproxy htst -conf {
+ global
+ tune.ssl.default-dh-param 2048
+
+ defaults
+ mode tcp
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ listen li1
+ bind "fd@${li1}"
+ tcp-request inspect-delay 100ms
+ tcp-request content reject if { req.ssl_hello_type 0 }
+ tcp-request content accept if { req.ssl_sni check.haproxy.org }
+ tcp-request content accept if { req.ssl_sni connect.haproxy.org }
+ tcp-request content reject
+ server fe1 ${htst_fe1_addr}:${htst_fe1_port}
+
+ listen li2
+ bind "fd@${li2}"
+ tcp-request inspect-delay 100ms
+ tcp-request content reject if { req.ssl_hello_type 0 }
+ tcp-request content accept if { req.ssl_alpn h2 }
+ tcp-request content accept if { req.ssl_alpn http/1.1 }
+ tcp-request content reject
+ server fe1 ${htst_fe1_addr}:${htst_fe1_port}
+
+ frontend fe1
+ bind "fd@${fe1}" ssl crt ${testdir}/common.pem
+
+} -start
+
+haproxy h1 -conf {
+ defaults
+ mode tcp
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ backend be1
+ log ${S_ok_addr}:${S_ok_port} daemon
+ option log-health-checks
+ server srv ${htst_li1_addr}:${htst_li1_port} check check-ssl check-sni check.haproxy.org inter 1s rise 1 fall 1 verify none
+
+ backend be2
+ log ${S_ok_addr}:${S_ok_port} daemon
+ option log-health-checks
+ option tcp-check
+ tcp-check connect ssl sni connect.haproxy.org
+ server srv ${htst_li1_addr}:${htst_li1_port} check inter 1s rise 1 fall 1 verify none
+
+ backend be3
+ log ${S3_addr}:${S3_port} daemon
+ option log-health-checks
+ server srv ${htst_li1_addr}:${htst_li1_port} check check-ssl check-sni bad.haproxy.org inter 1s rise 1 fall 1 verify none
+
+ backend be4
+ log ${S4_addr}:${S4_port} daemon
+ option log-health-checks
+ option tcp-check
+ tcp-check connect ssl sni bad.haproxy.org
+ server srv ${htst_li1_addr}:${htst_li1_port} check inter 1s rise 1 fall 1 verify none
+
+ backend be5
+ log ${S_ok_addr}:${S_ok_port} daemon
+ option log-health-checks
+ option tcp-check
+ tcp-check connect default
+ server srv ${htst_li1_addr}:${htst_li1_port} check check-ssl check-sni check.haproxy.org inter 1s rise 1 fall 1 verify none
+
+ backend be6
+ log ${S_ok_addr}:${S_ok_port} daemon
+ option log-health-checks
+ server srv ${htst_li2_addr}:${htst_li2_port} check check-ssl check-alpn "h2,http/1.1" inter 1s rise 1 fall 1 verify none
+
+ backend be7
+ log ${S_ok_addr}:${S_ok_port} daemon
+ option log-health-checks
+ option tcp-check
+ tcp-check connect ssl alpn "h2,http/1.1"
+ server srv ${htst_li2_addr}:${htst_li2_port} check inter 1s rise 1 fall 1 verify none
+
+} -start
+
+syslog S_ok -wait
+syslog S3 -wait
+syslog S4 -wait
diff --git a/reg-tests/checks/tcp-check_min-recv.vtc b/reg-tests/checks/tcp-check_min-recv.vtc
new file mode 100644
index 0000000..81f93e0
--- /dev/null
+++ b/reg-tests/checks/tcp-check_min-recv.vtc
@@ -0,0 +1,68 @@
+varnishtest "tcp-check negative bounded regex match"
+#EXCLUDE_TARGETS=freebsd,osx,generic
+#REGTEST_TYPE=slow
+#REQUIRE_VERSION=2.2
+# This test use a negative expect rule and verify that setting a required
+# minimum amount of data to match.
+feature ignore_unknown_macro
+
+syslog S1 -level notice {
+ recv
+ expect ~ "[^:\\[ ]\\[${h1_pid}\\]: Health check for server be1/srv1 failed, reason: Layer7 timeout.*at step 2 of tcp-check"
+} -start
+
+syslog S2 -level notice {
+ recv
+ expect ~ "[^:\\[ ]\\[${h1_pid}\\]: Health check for server be2/srv1 succeeded"
+} -start
+
+server s1 {
+ send "valid"
+ delay 0.2
+ expect_close
+} -start
+
+server s2 {
+ send "valid"
+ recv 10
+ send "valid"
+ delay 0.2
+ expect_close
+} -start
+
+haproxy h1 -conf {
+ defaults
+ mode tcp
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout check "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ backend be1
+ # must fail fast
+ timeout check 1
+ timeout server 1
+ log ${S1_addr}:${S1_port} len 2048 local0
+ option tcp-check
+ option log-health-checks
+ tcp-check connect
+ tcp-check expect !rstring "^error" comment "negative check"
+ tcp-check expect string "valid" comment "positive check"
+ tcp-check send "0123456789"
+ tcp-check expect string "valid" comment "positive check"
+ server srv1 ${s1_addr}:${s1_port} check inter 200ms rise 1 fall 1
+
+ backend be2
+ log ${S2_addr}:${S2_port} len 2048 local0
+ option tcp-check
+ option log-health-checks
+ tcp-check connect
+ tcp-check expect min-recv 5 !rstring "^error" comment "negative check"
+ tcp-check expect string "valid" comment "positive check"
+ tcp-check send "0123456789"
+ tcp-check expect string "valid" comment "positive check"
+ server srv1 ${s2_addr}:${s2_port} check inter 200ms rise 1 fall 1
+} -start
+
+syslog S1 -wait
+syslog S2 -wait
diff --git a/reg-tests/checks/tcp-check_multiple_ports.vtc b/reg-tests/checks/tcp-check_multiple_ports.vtc
new file mode 100644
index 0000000..356ddf6
--- /dev/null
+++ b/reg-tests/checks/tcp-check_multiple_ports.vtc
@@ -0,0 +1,48 @@
+varnishtest "tcp-check multiple ports"
+#EXCLUDE_TARGETS=freebsd,osx,generic
+#REGTEST_TYPE=slow
+# This test uses multiple tcp-check connect rules to perform health checking on
+# a target. It relies on port 1 being unbound on the local system.
+feature ignore_unknown_macro
+
+syslog S1 -level notice {
+ recv
+ expect ~ "[^:\\[ ]\\[${h1_pid}\\]: Health check for server be1/srv1 failed.*Connection refused at step 2 of tcp-check.*connect port 1"
+} -start
+
+syslog S2 -level notice {
+ recv
+ expect ~ "[^:\\[ ]\\[${h1_pid}\\]: Health check for server be2/srv1 failed.*Connection refused at step 1 of tcp-check.*connect port 1"
+} -start
+
+server s1 {
+} -start
+
+haproxy h1 -conf {
+ defaults
+ mode tcp
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout check "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ #default-server check inter 200ms rise 1 fall 1
+
+ backend be1
+ log ${S1_addr}:${S1_port} len 2048 local0
+ option tcp-check
+ option log-health-checks
+ tcp-check connect port ${s1_port}
+ tcp-check connect port 1
+ server srv1 ${s1_addr}:${s1_port} check inter 200ms rise 1 fall 1
+
+ backend be2
+ log ${S2_addr}:${S2_port} len 2048 local0
+ option tcp-check
+ option log-health-checks
+ tcp-check connect port 1
+ tcp-check connect port ${s1_port}
+ server srv1 ${s1_addr}:${s1_port} check inter 200ms rise 1 fall 1
+} -start
+
+syslog S1 -wait
+syslog S2 -wait
diff --git a/reg-tests/checks/tcp-checks-socks4.vtc b/reg-tests/checks/tcp-checks-socks4.vtc
new file mode 100644
index 0000000..8a730f5
--- /dev/null
+++ b/reg-tests/checks/tcp-checks-socks4.vtc
@@ -0,0 +1,60 @@
+varnishtest "Health-checks: basic HTTP health-check though a socks4 proxy"
+#REGTEST_TYPE=slow
+feature ignore_unknown_macro
+
+# This scripts tests a simple HTTP health-checks though a socks4 proxy.
+
+server s1 {
+} -start
+
+server socks {
+ ## get socks4 request
+ recv 16
+
+ ## send socks4 response :
+ ## constant(1): 0x00
+ ## statut(1) : 0x5a (success)
+ ## padding(6) : ignored
+ sendhex "005A000000000000"
+
+ rxreq
+ expect req.method == OPTIONS
+ expect req.url == /
+ expect req.proto == HTTP/1.0
+ txresp
+} -start
+
+syslog S1 -level notice {
+ recv
+ expect ~ "[^:\\[ ]\\[${h1_pid}\\]: Health check for server be1/srv succeeded, reason: Layer7 check passed.+code: 200.+check duration: [[:digit:]]+ms, status: 1/1 UP."
+} -start
+
+haproxy h1 -conf {
+ defaults
+ mode tcp
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ backend be1
+ log ${S1_addr}:${S1_port} daemon
+ option log-health-checks
+ option httpchk
+ server srv ${s1_addr}:${s1_port} socks4 ${h1_socks_addr}:${h1_socks_port} check-via-socks4 check inter 1s rise 1 fall 1
+
+ listen socks
+ bind "fd@${socks}"
+ tcp-request inspect-delay 500ms
+ ## Accept socks4 request on 16 bytes :
+ ## version(1) : 0x04
+ ## command(1) : 0x01
+ ## port(2) : ${s1_port}
+ ## addr(4) : ${s1_addr}
+ ## user-id : "HAProxy\0"
+ tcp-request content accept if { req.len eq 16 } { req.payload(0,1) -m bin "04" } { req.payload(1,1) -m bin "01" } { req.payload(2,2),hex,hex2i eq ${s1_port} } { req.payload(4,4),hex,hex2i -m ip ${s1_addr} } { req.payload(8,8) -m bin "484150726F787900" }
+ tcp-request content reject
+ server srv ${socks_addr}:${socks_port}
+
+} -start
+
+syslog S1 -wait
diff --git a/reg-tests/checks/tls_health_checks.vtc b/reg-tests/checks/tls_health_checks.vtc
new file mode 100644
index 0000000..9c268f4
--- /dev/null
+++ b/reg-tests/checks/tls_health_checks.vtc
@@ -0,0 +1,120 @@
+varnishtest "Health-check test over TLS/SSL"
+#REQUIRE_OPTIONS=OPENSSL
+#REGTEST_TYPE=slow
+feature ignore_unknown_macro
+
+
+# This script tests health-checks for a TLS/SSL backend with "option httpchk"
+# and "check-ssl" option enabled attached to h2 haproxy process. This haproxy
+# h2 process is chained to h1 other one.
+#
+server s1 {
+ rxreq
+ expect req.method == OPTIONS
+ expect req.url == *
+ expect req.proto == HTTP/1.1
+ txresp
+} -start
+
+server s2 {
+} -start
+
+server s3 {
+ rxreq
+ expect req.method == OPTIONS
+ expect req.url == *
+ expect req.proto == HTTP/1.1
+ txresp
+} -start
+
+syslog S1 -level notice {
+ recv info
+ expect ~ "[^:\\[ ]\\[${h1_pid}\\]: .* fe1~ be1/srv1 .* 200 [[:digit:]]+ - - ---- .* \"OPTIONS \\* HTTP/1.1\""
+} -start
+
+haproxy h1 -conf {
+ global
+ tune.ssl.default-dh-param 2048
+
+ defaults
+ mode http
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ backend be1
+ server srv1 ${s1_addr}:${s1_port}
+
+ backend be2
+ server srv2 ${s2_addr}:${s2_port}
+
+ backend be3
+ server srv3 ${s3_addr}:${s3_port}
+
+ frontend fe1
+ option httplog
+ log ${S1_addr}:${S1_port} len 2048 local0 debug err
+ bind "fd@${fe1}" ssl crt ${testdir}/common.pem
+ use_backend be1
+
+ frontend fe2
+ option tcplog
+ bind "fd@${fe2}" ssl crt ${testdir}/common.pem
+ use_backend be2
+
+ frontend fe3
+ option httplog
+ bind "fd@${fe3}" ssl crt ${testdir}/common.pem
+ use_backend be3
+} -start
+
+syslog S2 -level notice {
+ recv
+ expect ~ "[^:\\[ ]\\[${h2_pid}\\]: Health check for server be2/srv1 succeeded, reason: Layer7 check passed.+code: 200.+check duration: [[:digit:]]+ms, status: 1/1 UP."
+} -start
+
+syslog S4 -level notice {
+ recv
+ expect ~ "[^:\\[ ]\\[${h2_pid}\\]: Health check for server be4/srv2 succeeded, reason: Layer6 check passed.+check duration: [[:digit:]]+ms, status: 1/1 UP."
+} -start
+
+syslog S6 -level notice {
+ recv
+ expect ~ "[^:\\[ ]\\[${h2_pid}\\]: Health check for server be6/srv3 succeeded, reason: Layer7 check passed.+code: 200.+check duration: [[:digit:]]+ms, status: 1/1 UP."
+} -start
+
+haproxy h2 -conf {
+ global
+ tune.ssl.default-dh-param 2048
+
+ defaults
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ default-server downinter 1s inter 500 rise 1 fall 1
+
+ backend be2
+ option log-health-checks
+ option httpchk OPTIONS * HTTP/1.1
+ http-check send hdr Host www
+ log ${S2_addr}:${S2_port} daemon
+ server srv1 ${h1_fe1_addr}:${h1_fe1_port} ssl crt ${testdir}/common.pem verify none check
+
+ backend be4
+ option log-health-checks
+ log ${S4_addr}:${S4_port} daemon
+ server srv2 ${h1_fe2_addr}:${h1_fe2_port} ssl crt ${testdir}/common.pem verify none check-ssl check
+
+ backend be6
+ option log-health-checks
+ option httpchk OPTIONS * HTTP/1.1
+ http-check send hdr Host www
+ log ${S6_addr}:${S6_port} daemon
+ server srv3 127.0.0.1:80 crt ${testdir}/common.pem verify none check check-ssl port ${h1_fe3_port} addr ${h1_fe3_addr}:80
+} -start
+
+syslog S1 -wait
+
+syslog S2 -wait
+syslog S4 -wait
+syslog S6 -wait
diff --git a/reg-tests/compression/basic.vtc b/reg-tests/compression/basic.vtc
new file mode 100644
index 0000000..5d9eada
--- /dev/null
+++ b/reg-tests/compression/basic.vtc
@@ -0,0 +1,377 @@
+varnishtest "Basic compression test"
+
+#REQUIRE_OPTION=ZLIB|SLZ
+
+feature ignore_unknown_macro
+
+server s1 {
+ # client c1 - request 1
+ rxreq
+ expect req.url == "/c1.1"
+ expect req.http.accept-encoding == "<undef>"
+ txresp \
+ -hdr "Content-Type: text/plain" \
+ -bodylen 100
+
+ # client c1 - request 2
+ rxreq
+ expect req.url == "/c1.2"
+ expect req.http.user-agent == "Mozilla/4"
+ expect req.http.accept-encoding == "gzip"
+ txresp \
+ -hdr "Content-Type: text/plain" \
+ -bodylen 100
+
+ # client c1 - request 3
+ rxreq
+ expect req.url == "/c1.3"
+ expect req.proto == "HTTP/1.0"
+ expect req.http.accept-encoding == "gzip"
+ txresp -bodylen 100
+
+ # client c1 - request 4
+ rxreq
+ expect req.url == "/c1.4"
+ expect req.http.accept-encoding == "gzip"
+ txresp \
+ -proto "HTTP/1.0" \
+ -hdr "Connection: keep-alive" \
+ -hdr "Content-Type: text/plain" \
+ -bodylen 100
+
+ # client c1 - request 5
+ rxreq
+ expect req.url == "/c1.5"
+ expect req.method == "HEAD"
+ expect req.http.accept-encoding == "gzip"
+ txresp -nolen \
+ -hdr "Content-Length: 100" \
+ -hdr "Content-Type: text/plain" \
+
+ # client c1 - request 6
+ rxreq
+ expect req.url == "/c1.6"
+ expect req.http.accept-encoding == "gzip"
+ txresp \
+ -status 400 \
+ -hdr "Content-Type: text/plain" \
+ -bodylen 100
+
+ # client c1 - request 7
+ rxreq
+ expect req.url == "/c1.7"
+ expect req.http.accept-encoding == "gzip"
+ txresp \
+ -hdr "Content-Type: text/plain" \
+ -hdr "Content-Encoding: something" \
+ -body "FOO"
+
+ # client c1 - request 8
+ rxreq
+ expect req.url == "/c1.8"
+ expect req.http.accept-encoding == "gzip"
+ txresp \
+ -hdr "Content-Type: text/plain" \
+ -hdr "Cache-Control: no-transform" \
+ -bodylen 100
+
+ # client c1 - request 9
+ rxreq
+ expect req.url == "/c1.9"
+ expect req.http.accept-encoding == "gzip"
+ txresp \
+ -hdr "Content-Type: text/css" \
+ -bodylen 100
+
+ # client c1 - request 10
+ rxreq
+ expect req.url == "/c1.10"
+ expect req.http.accept-encoding == "gzip"
+ txresp \
+ -hdr "Content-Type: multipart/mixed; boundary=\"aaa\"" \
+ -bodylen 100
+
+ # Close the connection with HAProxy and wait for a new one
+ # (C1 has finished and C2 will start)
+ accept
+
+ # client c2 - request 1
+ rxreq
+ expect req.url == "/c2.1"
+ expect req.http.accept-encoding == "gzip"
+ txresp \
+ -hdr "Content-Type: text/plain" \
+ -bodylen 100
+
+ # client c2 - request 2
+ rxreq
+ expect req.url == "/c2.2"
+ expect req.http.accept-encoding == "gzip"
+ txresp -nolen \
+ -hdr "Content-Type: text/plain" \
+ -hdr "Transfer-Encoding: chunked"
+ chunkedlen 1
+ chunkedlen 1
+ chunkedlen 2
+ chunkedlen 3
+ chunkedlen 5
+ chunkedlen 8
+ chunkedlen 13
+ chunkedlen 21
+ chunkedlen 34
+ chunkedlen 55
+ chunkedlen 89
+ chunkedlen 144
+ chunkedlen 233
+ chunkedlen 0
+
+ # Close the connection with HAProxy and wait for a new one
+ # (C2 has finished and C3 will start)
+ accept
+
+ # client c3 - request 1
+ rxreq
+ expect req.url == "/c3.1"
+ expect req.http.accept-encoding == "<undef>"
+ txresp \
+ -hdr "Content-Type: text/plain" \
+ -bodylen 50000
+
+ # client c3 - request 2
+ rxreq
+ expect req.url == "/c3.2"
+ expect req.http.accept-encoding == "<undef>"
+ txresp -nolen \
+ -hdr "Content-Type: text/plain" \
+ -hdr "Transfer-Encoding: chunked"
+ chunkedlen 1000
+ chunkedlen 1000
+ chunkedlen 1000
+ chunkedlen 1000
+ chunkedlen 1000
+ chunkedlen 5000
+ chunkedlen 10000
+ chunkedlen 30000
+ chunkedlen 0
+
+ # Close the connection with HAProxy and wait for a new one
+ # (C3 has finished and C4 will start)
+ accept
+
+ # client c4 - request 1
+ rxreq
+ expect req.url == "/c4.1"
+ expect req.http.accept-encoding == "<undef>"
+ txresp \
+ -hdr "Content-Type: text/plain" \
+ -bodylen 100
+
+ # client c4 - request 2
+ rxreq
+ expect req.url == "/c4.2"
+ expect req.http.accept-encoding == "<undef>"
+ txresp \
+ -hdr "Content-Type: text/plain" \
+ -bodylen 100
+} -start
+
+
+haproxy h1 -conf {
+ defaults
+ mode http
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ frontend fe-gzip
+ bind "fd@${fe_gzip}"
+ default_backend be-gzip
+
+ frontend fe-identity
+ bind "fd@${fe_identity}"
+ default_backend be-identity
+
+ frontend fe-gzip-deflate
+ bind "fd@${fe_gzip_deflate}"
+ default_backend be-gzip-defalte
+
+ backend be-gzip
+ compression algo gzip
+ compression type text/html text/plain
+ server www ${s1_addr}:${s1_port}
+
+ backend be-identity
+ compression algo identity
+ server www ${s1_addr}:${s1_port}
+
+ backend be-gzip-defalte
+ compression algo gzip deflate
+ compression offload
+ server www ${s1_addr}:${s1_port}
+
+} -start
+
+# No compression expected because not supported by the client or because
+# something in the request or response headers forbids it.
+client c1 -connect ${h1_fe_gzip_sock} {
+ # 1. no "Accept-Encoding header"
+ txreq -url "/c1.1"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.content-encoding == "<undef>"
+ expect resp.http.transfer-encoding == "<undef>"
+ expect resp.bodylen == 100
+
+ # 2. Buggy User-Agent
+ txreq -url "/c1.2" \
+ -hdr "Accept-Encoding: gzip" \
+ -hdr "User-Agent: Mozilla/4"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.content-encoding == "<undef>"
+ expect resp.http.transfer-encoding == "<undef>"
+ expect resp.bodylen == 100
+
+ # 3. HTTP/1.0 request
+ txreq -url "/c1.3" \
+ -proto "HTTP/1.0" \
+ -hdr "Accept-Encoding: gzip" \
+ -hdr "Connection: keep-alive"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.content-encoding == "<undef>"
+ expect resp.http.transfer-encoding == "<undef>"
+ expect resp.bodylen == 100
+
+ # 4. HTTP/1.0 response
+ txreq -url "/c1.4" \
+ -hdr "Accept-Encoding: gzip"
+ rxresp
+ expect resp.status == 200
+ expect resp.proto == "HTTP/1.0"
+ expect resp.http.content-encoding == "<undef>"
+ expect resp.http.transfer-encoding == "<undef>"
+ expect resp.bodylen == 100
+
+ # 5. HEAD method
+ txreq -req "HEAD" -url "/c1.5" \
+ -hdr "Accept-Encoding: gzip"
+ rxresp -no_obj
+ expect resp.status == 200
+ expect resp.http.content-length == "100"
+ expect resp.http.content-encoding == "<undef>"
+ expect resp.http.transfer-encoding == "<undef>"
+
+ # 6. Response status code != 20[0-3]
+ txreq -url "/c1.6" \
+ -hdr "Accept-Encoding: gzip"
+ rxresp
+ expect resp.status == 400
+ expect resp.http.content-encoding == "<undef>"
+ expect resp.http.transfer-encoding == "<undef>"
+ expect resp.bodylen == 100
+
+ # 7. Response alerady compressed by the server (with "Accept-Encoding")
+ txreq -url "/c1.7" \
+ -hdr "Accept-Encoding: gzip"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.content-encoding == "something"
+ expect resp.http.transfer-encoding == "<undef>"
+ expect resp.bodylen == 3
+ expect resp.body == "FOO"
+
+ # 8. Response with "Cache-Control: no-transform"
+ txreq -url "/c1.8" \
+ -hdr "Accept-Encoding: gzip"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.content-encoding == "<undef>"
+ expect resp.http.transfer-encoding == "<undef>"
+ expect resp.http.cache-control == "no-transform"
+ expect resp.bodylen == 100
+
+ # 9. Response with uncompressable content-type
+ txreq -url "/c1.9" \
+ -hdr "Accept-Encoding: gzip"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.content-encoding == "<undef>"
+ expect resp.http.transfer-encoding == "<undef>"
+ expect resp.http.content-type == "text/css"
+ expect resp.bodylen == 100
+
+ # 10. Response with uncompressable content-type
+ txreq -url "/c1.10" \
+ -hdr "Accept-Encoding: gzip"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.content-encoding == "<undef>"
+ expect resp.http.transfer-encoding == "<undef>"
+ expect resp.http.content-type == "multipart/mixed; boundary=\"aaa\""
+ expect resp.bodylen == 100
+} -run
+
+# GZIP Compression expected (small body)
+client c2 -connect ${h1_fe_gzip_sock} {
+ # 1. response from the server with a small body with a C-L
+ txreq -url "/c2.1" \
+ -hdr "Accept-Encoding: gzip"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.content-encoding == "gzip"
+ expect resp.http.transfer-encoding == "chunked"
+ gunzip
+ expect resp.bodylen == 100
+
+ # 2. response from the server with a small chunked body
+ txreq -url "/c2.2" \
+ -hdr "Accept-Encoding: gzip"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.content-encoding == "gzip"
+ expect resp.http.transfer-encoding == "chunked"
+ gunzip
+ expect resp.bodylen == 609
+} -run
+
+# Identity compression expect (Huge body)
+# Identity is used because of a limitation of vtest (the uncompressed body size
+# must be lower than 10 times of the compressed one)
+client c3 -connect ${h1_fe_identity_sock} {
+ # 1. response from the server with a huge body with a C-L
+ txreq -url "/c3.1"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.content-encoding == "<undef>"
+ expect resp.http.transfer-encoding == "chunked"
+ expect resp.bodylen == 50000
+
+ # 2. response from the server with a huge chunked body
+ txreq -url "/c3.2"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.content-encoding == "<undef>"
+ expect resp.http.transfer-encoding == "chunked"
+ expect resp.bodylen == 50000
+} -run
+
+
+# Compression expected with priority
+client c4 -connect ${h1_fe_gzip_deflate_sock} {
+ # 1. response from the server with a small body with a C-L
+ txreq -url "/c4.1" \
+ -hdr "Accept-Encoding: *;q=0, gzip;q=0.750, deflate;q=0.500"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.content-encoding == "gzip"
+ expect resp.http.transfer-encoding == "chunked"
+
+ # 2. response from the server with a small body with a C-L
+ txreq -url "/c4.2" \
+ -hdr "Accept-Encoding: *;q=0, gzip;q=0.500, deflate;q=0.750"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.content-encoding == "deflate"
+ expect resp.http.transfer-encoding == "chunked"
+} -run
diff --git a/reg-tests/compression/common.pem b/reg-tests/compression/common.pem
new file mode 120000
index 0000000..a4433d5
--- /dev/null
+++ b/reg-tests/compression/common.pem
@@ -0,0 +1 @@
+../ssl/common.pem \ No newline at end of file
diff --git a/reg-tests/compression/etags_conversion.vtc b/reg-tests/compression/etags_conversion.vtc
new file mode 100644
index 0000000..96e34bc
--- /dev/null
+++ b/reg-tests/compression/etags_conversion.vtc
@@ -0,0 +1,230 @@
+varnishtest "Compression converts strong ETags to weak ETags"
+
+#REQUIRE_OPTION=ZLIB|SLZ
+
+feature ignore_unknown_macro
+
+server s1 {
+ rxreq
+ expect req.url == "/strong"
+ expect req.http.accept-encoding == "gzip"
+ txresp \
+ -hdr "Content-Type: text/plain" \
+ -hdr "ETag: \"123\"" \
+ -bodylen 100
+
+ rxreq
+ expect req.url == "/weak"
+ expect req.http.accept-encoding == "gzip"
+ txresp \
+ -hdr "Content-Type: text/plain" \
+ -hdr "ETag: W/\"456\"" \
+ -bodylen 100
+
+ rxreq
+ expect req.url == "/weak-incorrect-quoting"
+ expect req.http.accept-encoding == "gzip"
+ txresp \
+ -hdr "Content-Type: text/plain" \
+ -hdr "ETag: \"W/789\"" \
+ -bodylen 100
+
+ rxreq
+ expect req.url == "/empty-strong"
+ expect req.http.accept-encoding == "gzip"
+ txresp \
+ -hdr "Content-Type: text/plain" \
+ -hdr "ETag: \"\"" \
+ -bodylen 100
+
+ rxreq
+ expect req.url == "/empty-weak"
+ expect req.http.accept-encoding == "gzip"
+ txresp \
+ -hdr "Content-Type: text/plain" \
+ -hdr "ETag: W/\"\"" \
+ -bodylen 100
+
+ rxreq
+ expect req.url == "/invalid1"
+ expect req.http.accept-encoding == "gzip"
+ txresp \
+ -hdr "Content-Type: text/plain" \
+ -hdr "ETag: \"invalid" \
+ -bodylen 100
+
+ rxreq
+ expect req.url == "/invalid2"
+ expect req.http.accept-encoding == "gzip"
+ txresp \
+ -hdr "Content-Type: text/plain" \
+ -hdr "ETag: invalid\"" \
+ -bodylen 100
+
+ rxreq
+ expect req.url == "/invalid3"
+ expect req.http.accept-encoding == "gzip"
+ txresp \
+ -hdr "Content-Type: text/plain" \
+ -hdr "ETag: invalid" \
+ -bodylen 100
+
+ rxreq
+ expect req.url == "/invalid4"
+ expect req.http.accept-encoding == "gzip"
+ txresp \
+ -hdr "Content-Type: text/plain" \
+ -hdr "ETag: W/\"invalid" \
+ -bodylen 100
+
+ rxreq
+ expect req.url == "/invalid5"
+ expect req.http.accept-encoding == "gzip"
+ txresp \
+ -hdr "Content-Type: text/plain" \
+ -hdr "ETag: W/invalid\"" \
+ -bodylen 100
+
+ rxreq
+ expect req.url == "/invalid6"
+ expect req.http.accept-encoding == "gzip"
+ txresp \
+ -hdr "Content-Type: text/plain" \
+ -hdr "ETag: W/invalid" \
+ -bodylen 100
+
+ rxreq
+ expect req.url == "/multiple"
+ expect req.http.accept-encoding == "gzip"
+ txresp \
+ -hdr "Content-Type: text/plain" \
+ -hdr "ETag: \"one\"" \
+ -hdr "ETag: \"two\"" \
+ -bodylen 100
+} -start
+
+
+haproxy h1 -conf {
+ global
+ # WT: limit false-positives causing "HTTP header incomplete" due to
+ # idle server connections being randomly used and randomly expiring
+ # under us.
+ tune.idle-pool.shared off
+
+ defaults
+ mode http
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ frontend fe-gzip
+ bind "fd@${fe_gzip}"
+ default_backend be-gzip
+
+ backend be-gzip
+ compression algo gzip
+ compression type text/html text/plain
+ server www ${s1_addr}:${s1_port}
+} -start
+
+client c1 -connect ${h1_fe_gzip_sock} {
+ txreq -url "/strong" \
+ -hdr "Accept-Encoding: gzip"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.content-encoding == "gzip"
+ expect resp.http.etag == "W/\"123\""
+ gunzip
+ expect resp.bodylen == 100
+
+ txreq -url "/weak" \
+ -hdr "Accept-Encoding: gzip"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.content-encoding == "gzip"
+ expect resp.http.etag == "W/\"456\""
+ gunzip
+ expect resp.bodylen == 100
+
+ txreq -url "/weak-incorrect-quoting" \
+ -hdr "Accept-Encoding: gzip"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.content-encoding == "gzip"
+ expect resp.http.etag == "W/\"W/789\""
+ gunzip
+ expect resp.bodylen == 100
+
+ txreq -url "/empty-strong" \
+ -hdr "Accept-Encoding: gzip"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.content-encoding == "gzip"
+ expect resp.http.etag == "W/\"\""
+ gunzip
+ expect resp.bodylen == 100
+
+ txreq -url "/empty-weak" \
+ -hdr "Accept-Encoding: gzip"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.content-encoding == "gzip"
+ expect resp.http.etag == "W/\"\""
+ gunzip
+ expect resp.bodylen == 100
+
+ txreq -url "/invalid1" \
+ -hdr "Accept-Encoding: gzip"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.content-encoding == "<undef>"
+ expect resp.http.etag == "\"invalid"
+ expect resp.bodylen == 100
+
+ txreq -url "/invalid2" \
+ -hdr "Accept-Encoding: gzip"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.content-encoding == "<undef>"
+ expect resp.http.etag == "invalid\""
+ expect resp.bodylen == 100
+
+ txreq -url "/invalid3" \
+ -hdr "Accept-Encoding: gzip"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.content-encoding == "<undef>"
+ expect resp.http.etag == "invalid"
+ expect resp.bodylen == 100
+
+ txreq -url "/invalid4" \
+ -hdr "Accept-Encoding: gzip"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.content-encoding == "<undef>"
+ expect resp.http.etag == "W/\"invalid"
+ expect resp.bodylen == 100
+
+ txreq -url "/invalid5" \
+ -hdr "Accept-Encoding: gzip"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.content-encoding == "<undef>"
+ expect resp.http.etag == "W/invalid\""
+ expect resp.bodylen == 100
+
+ txreq -url "/invalid6" \
+ -hdr "Accept-Encoding: gzip"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.content-encoding == "<undef>"
+ expect resp.http.etag == "W/invalid"
+ expect resp.bodylen == 100
+
+ txreq -url "/multiple" \
+ -hdr "Accept-Encoding: gzip"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.content-encoding == "<undef>"
+ expect resp.bodylen == 100
+} -run
diff --git a/reg-tests/compression/lua_validation.lua b/reg-tests/compression/lua_validation.lua
new file mode 100644
index 0000000..2cc874b
--- /dev/null
+++ b/reg-tests/compression/lua_validation.lua
@@ -0,0 +1,19 @@
+
+local data = "abcdefghijklmnopqrstuvwxyz"
+local responseblob = ""
+for i = 1,10000 do
+ responseblob = responseblob .. "\r\n" .. i .. data:sub(1, math.floor(i % 27))
+end
+
+http01applet = function(applet)
+ local response = responseblob
+ applet:set_status(200)
+ applet:add_header("Content-Type", "application/javascript")
+ applet:add_header("Content-Length", string.len(response)*10)
+ applet:start_response()
+ for i = 1,10 do
+ applet:send(response)
+ end
+end
+
+core.register_service("fileloader-http01", "http", http01applet)
diff --git a/reg-tests/compression/lua_validation.vtc b/reg-tests/compression/lua_validation.vtc
new file mode 100644
index 0000000..b10cbd9
--- /dev/null
+++ b/reg-tests/compression/lua_validation.vtc
@@ -0,0 +1,59 @@
+# Checks that compression doesn't cause corruption..
+
+varnishtest "Compression validation"
+#REQUIRE_OPTIONS=ZLIB|SLZ,LUA,OPENSSL
+#REGTEST_TYPE=slow
+
+feature ignore_unknown_macro
+
+haproxy h1 -conf {
+global
+# log stdout format short daemon
+ lua-load ${testdir}/lua_validation.lua
+
+defaults
+ mode http
+ log global
+ option httplog
+
+frontend main-https
+ bind "fd@${fe1}" ssl crt ${testdir}/common.pem
+ compression algo gzip
+ compression type text/html text/plain application/json application/javascript
+ compression offload
+ use_backend TestBack if TRUE
+
+backend TestBack
+ server LocalSrv ${h1_fe2_addr}:${h1_fe2_port}
+
+listen fileloader
+ mode http
+ bind "fd@${fe2}"
+ http-request use-service lua.fileloader-http01
+} -start
+
+shell {
+ HOST=${h1_fe1_addr}
+ if [ "${h1_fe1_addr}" = "::1" ] ; then
+ HOST="\[::1\]"
+ fi
+
+ md5=$(command -v md5 || command -v md5sum)
+
+ if [ -z $md5 ] ; then
+ echo "MD5 checksum utility not found"
+ exit 1
+ fi
+
+ expectchecksum="4d9c62aa5370b8d5f84f17ec2e78f483"
+
+ for opt in "" "--limit-rate 300K" "--limit-rate 500K" ; do
+ checksum=$(curl --max-time 15 --compressed -k "https://$HOST:${h1_fe1_port}" $opt | $md5 | cut -d ' ' -f1)
+ if [ "$checksum" != "$expectchecksum" ] ; then
+ echo "Expecting checksum $expectchecksum"
+ echo "Received checksum: $checksum"
+ exit 1;
+ fi
+ done
+
+} -run
diff --git a/reg-tests/compression/vary.vtc b/reg-tests/compression/vary.vtc
new file mode 100644
index 0000000..489de30
--- /dev/null
+++ b/reg-tests/compression/vary.vtc
@@ -0,0 +1,308 @@
+varnishtest "Compression sets Vary header"
+
+#REQUIRE_OPTION=ZLIB|SLZ
+
+feature ignore_unknown_macro
+
+server s1 {
+ rxreq
+ expect req.url == "/plain/accept-encoding-gzip"
+ expect req.http.accept-encoding == "gzip"
+ txresp \
+ -hdr "Content-Type: text/plain" \
+ -bodylen 100
+
+ rxreq
+ expect req.url == "/plain/accept-encoding-invalid"
+ expect req.http.accept-encoding == "invalid"
+ txresp \
+ -hdr "Content-Type: text/plain" \
+ -bodylen 100
+
+ rxreq
+ expect req.url == "/plain/accept-encoding-null"
+ expect req.http.accept-encoding == "<undef>"
+ txresp \
+ -hdr "Content-Type: text/plain" \
+ -bodylen 100
+
+ rxreq
+ expect req.url == "/html/accept-encoding-gzip"
+ expect req.http.accept-encoding == "gzip"
+ txresp \
+ -hdr "Content-Type: text/html" \
+ -bodylen 100
+
+ rxreq
+ expect req.url == "/html/accept-encoding-invalid"
+ expect req.http.accept-encoding == "invalid"
+ txresp \
+ -hdr "Content-Type: text/html" \
+ -bodylen 100
+
+
+ rxreq
+ expect req.url == "/html/accept-encoding-null"
+ expect req.http.accept-encoding == "<undef>"
+ txresp \
+ -hdr "Content-Type: text/html" \
+ -bodylen 100
+
+ rxreq
+ expect req.url == "/dup-etag/accept-encoding-gzip"
+ expect req.http.accept-encoding == "gzip"
+ txresp \
+ -hdr "Content-Type: text/plain" \
+ -hdr "ETag: \"123\"" \
+ -hdr "ETag: \"123\"" \
+ -bodylen 100
+} -repeat 2 -start
+
+
+server s2 {
+ rxreq
+ expect req.url == "/vary/no-vary"
+ expect req.http.accept-encoding == "gzip"
+ txresp \
+ -hdr "Content-Type: text/plain" \
+ -bodylen 100
+
+ rxreq
+ expect req.url == "/vary/accept-encoding"
+ expect req.http.accept-encoding == "gzip"
+ txresp \
+ -hdr "Content-Type: text/plain" \
+ -hdr "Vary: Accept-Encoding" \
+ -bodylen 100
+
+ rxreq
+ expect req.url == "/vary/other"
+ expect req.http.accept-encoding == "gzip"
+ txresp \
+ -hdr "Content-Type: text/plain" \
+ -hdr "Vary: Other" \
+ -bodylen 100
+
+ rxreq
+ expect req.url == "/vary/accept-encoding-and-other"
+ expect req.http.accept-encoding == "gzip"
+ txresp \
+ -hdr "Content-Type: text/plain" \
+ -hdr "Vary: Accept-Encoding,Other" \
+ -bodylen 100
+
+ rxreq
+ expect req.url == "/vary/other-and-accept-encoding"
+ expect req.http.accept-encoding == "gzip"
+ txresp \
+ -hdr "Content-Type: text/plain" \
+ -hdr "Vary: Other,Accept-Encoding" \
+ -bodylen 100
+
+ rxreq
+ expect req.url == "/vary/empty"
+ expect req.http.accept-encoding == "gzip"
+ txresp \
+ -hdr "Content-Type: text/plain" \
+ -hdr "Vary: " \
+ -bodylen 100
+} -start
+
+
+haproxy h1 -conf {
+ global
+ # WT: limit false-positives causing "HTTP header incomplete" due to
+ # idle server connections being randomly used and randomly expiring
+ # under us.
+ tune.idle-pool.shared off
+
+ defaults
+ mode http
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ frontend fe-gzip
+ bind "fd@${fe_gzip}"
+ default_backend be-gzip
+
+ backend be-gzip
+ compression algo gzip
+ compression type text/plain
+ server www ${s1_addr}:${s1_port}
+
+ frontend fe-nothing
+ bind "fd@${fe_nothing}"
+ default_backend be-nothing
+
+ backend be-nothing
+ server www ${s1_addr}:${s1_port}
+
+ frontend fe-vary
+ bind "fd@${fe_vary}"
+ default_backend be-vary
+
+ backend be-vary
+ compression algo gzip
+ compression type text/plain
+ server www ${s2_addr}:${s2_port}
+
+} -start
+
+client c1 -connect ${h1_fe_gzip_sock} {
+ txreq -url "/plain/accept-encoding-gzip" \
+ -hdr "Accept-Encoding: gzip"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.content-encoding == "gzip"
+ expect resp.http.vary == "Accept-Encoding"
+ gunzip
+ expect resp.bodylen == 100
+
+ txreq -url "/plain/accept-encoding-invalid" \
+ -hdr "Accept-Encoding: invalid"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.vary == "<undef>"
+ expect resp.bodylen == 100
+
+ txreq -url "/plain/accept-encoding-null"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.vary == "<undef>"
+ expect resp.bodylen == 100
+
+ txreq -url "/html/accept-encoding-gzip" \
+ -hdr "Accept-Encoding: gzip"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.vary == "<undef>"
+ expect resp.bodylen == 100
+
+ txreq -url "/html/accept-encoding-invalid" \
+ -hdr "Accept-Encoding: invalid"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.vary == "<undef>"
+ expect resp.bodylen == 100
+
+ txreq -url "/html/accept-encoding-null"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.vary == "<undef>"
+ expect resp.bodylen == 100
+
+ txreq -url "/dup-etag/accept-encoding-gzip" \
+ -hdr "Accept-Encoding: gzip"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.vary == "<undef>"
+ expect resp.bodylen == 100
+} -run
+
+# This Client duplicates c1, against the "nothing" frontend, ensuring no Vary header is ever set.
+client c2 -connect ${h1_fe_nothing_sock} {
+ txreq -url "/plain/accept-encoding-gzip" \
+ -hdr "Accept-Encoding: gzip"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.vary == "<undef>"
+ expect resp.bodylen == 100
+
+ txreq -url "/plain/accept-encoding-invalid" \
+ -hdr "Accept-Encoding: invalid"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.vary == "<undef>"
+ expect resp.bodylen == 100
+
+ txreq -url "/plain/accept-encoding-null"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.vary == "<undef>"
+ expect resp.bodylen == 100
+
+ txreq -url "/html/accept-encoding-gzip" \
+ -hdr "Accept-Encoding: gzip"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.vary == "<undef>"
+ expect resp.bodylen == 100
+
+ txreq -url "/html/accept-encoding-invalid" \
+ -hdr "Accept-Encoding: invalid"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.vary == "<undef>"
+ expect resp.bodylen == 100
+
+ txreq -url "/html/accept-encoding-null"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.vary == "<undef>"
+ expect resp.bodylen == 100
+
+ txreq -url "/dup-etag/accept-encoding-gzip" \
+ -hdr "Accept-Encoding: gzip"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.vary == "<undef>"
+ expect resp.bodylen == 100
+} -run
+
+
+client c3 -connect ${h1_fe_vary_sock} {
+ txreq -url "/vary/no-vary" \
+ -hdr "Accept-Encoding: gzip"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.content-encoding == "gzip"
+ expect resp.http.vary == "Accept-Encoding"
+ gunzip
+ expect resp.bodylen == 100
+
+ txreq -url "/vary/accept-encoding" \
+ -hdr "Accept-Encoding: gzip"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.content-encoding == "gzip"
+ expect resp.http.vary == "Accept-Encoding"
+ gunzip
+ expect resp.bodylen == 100
+
+ txreq -url "/vary/other" \
+ -hdr "Accept-Encoding: gzip"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.content-encoding == "gzip"
+ expect resp.http.vary == "Other,Accept-Encoding"
+ gunzip
+ expect resp.bodylen == 100
+
+ txreq -url "/vary/accept-encoding-and-other" \
+ -hdr "Accept-Encoding: gzip"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.content-encoding == "gzip"
+ expect resp.http.vary == "Accept-Encoding,Other"
+ gunzip
+ expect resp.bodylen == 100
+
+ txreq -url "/vary/other-and-accept-encoding" \
+ -hdr "Accept-Encoding: gzip"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.content-encoding == "gzip"
+ expect resp.http.vary == "Other,Accept-Encoding"
+ gunzip
+ expect resp.bodylen == 100
+
+ txreq -url "/vary/empty" \
+ -hdr "Accept-Encoding: gzip"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.content-encoding == "gzip"
+ expect resp.http.vary == "Accept-Encoding"
+ gunzip
+ expect resp.bodylen == 100
+} -run
diff --git a/reg-tests/connection/ca-auth.crt b/reg-tests/connection/ca-auth.crt
new file mode 120000
index 0000000..815a970
--- /dev/null
+++ b/reg-tests/connection/ca-auth.crt
@@ -0,0 +1 @@
+../ssl/ca-auth.crt \ No newline at end of file
diff --git a/reg-tests/connection/cli_src_dst.vtc b/reg-tests/connection/cli_src_dst.vtc
new file mode 100644
index 0000000..6809d39
--- /dev/null
+++ b/reg-tests/connection/cli_src_dst.vtc
@@ -0,0 +1,290 @@
+varnishtest "Test multi-level client source and destination addresses"
+
+feature cmd "$HAPROXY_PROGRAM -cc 'version_atleast(2.5-dev0)'"
+feature ignore_unknown_macro
+
+haproxy h1 -conf {
+ defaults
+ mode http
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ frontend fe1
+ bind "fd@${fe1}"
+ tcp-request connection set-src ipv4(10.0.0.1)
+ tcp-request connection set-dst ipv4(10.0.0.2)
+
+ tcp-request session set-var(sess.sess_fc_src) fc_src
+ tcp-request session set-var(sess.sess_fc_dst) fc_dst
+ tcp-request session set-var(sess.sess_src) src
+ tcp-request session set-var(sess.sess_dst) dst
+
+ tcp-request inspect-delay 100ms
+ tcp-request content set-var(txn.strm_fc_src) fc_src
+ tcp-request content set-var(txn.strm_fc_dst) fc_dst
+ tcp-request content set-var(txn.strm_src) src
+ tcp-request content set-var(txn.strm_dst) dst
+
+ http-after-response set-header sess-fc-src %[var(sess.sess_fc_src)]
+ http-after-response set-header sess-src %[var(sess.sess_src)]
+ http-after-response set-header sess-fc-dst %[var(sess.sess_fc_dst)]
+ http-after-response set-header sess-dst %[var(sess.sess_dst)]
+ http-after-response set-header strm-fc-src %[var(txn.strm_fc_src)]
+ http-after-response set-header strm-src %[var(txn.strm_src)]
+ http-after-response set-header strm-fc-dst %[var(txn.strm_fc_dst)]
+ http-after-response set-header strm-dst %[var(txn.strm_dst)]
+
+ default_backend be
+
+ frontend fe2
+ bind "fd@${fe2}"
+ tcp-request connection set-src ipv4(10.0.0.1)
+ tcp-request connection set-dst ipv4(10.0.0.2)
+
+ tcp-request session set-src ipv4(10.1.0.1)
+ tcp-request session set-dst ipv4(10.1.0.2)
+ tcp-request session set-var(sess.sess_fc_src) fc_src
+ tcp-request session set-var(sess.sess_fc_dst) fc_dst
+ tcp-request session set-var(sess.sess_src) src
+ tcp-request session set-var(sess.sess_dst) dst
+
+ tcp-request inspect-delay 100ms
+ tcp-request content set-var(txn.strm_fc_src) fc_src
+ tcp-request content set-var(txn.strm_fc_dst) fc_dst
+ tcp-request content set-var(txn.strm_src) src
+ tcp-request content set-var(txn.strm_dst) dst
+
+ http-after-response set-header sess-fc-src %[var(sess.sess_fc_src)]
+ http-after-response set-header sess-src %[var(sess.sess_src)]
+ http-after-response set-header sess-fc-dst %[var(sess.sess_fc_dst)]
+ http-after-response set-header sess-dst %[var(sess.sess_dst)]
+ http-after-response set-header strm-fc-src %[var(txn.strm_fc_src)]
+ http-after-response set-header strm-src %[var(txn.strm_src)]
+ http-after-response set-header strm-fc-dst %[var(txn.strm_fc_dst)]
+ http-after-response set-header strm-dst %[var(txn.strm_dst)]
+
+ default_backend be
+
+ frontend fe3
+ bind "fd@${fe3}"
+ tcp-request connection set-src ipv4(10.0.0.1)
+ tcp-request connection set-dst ipv4(10.0.0.2)
+
+ tcp-request session set-src ipv4(10.1.0.1)
+ tcp-request session set-dst ipv4(10.1.0.2)
+ tcp-request session set-var(sess.sess_fc_src) fc_src
+ tcp-request session set-var(sess.sess_fc_dst) fc_dst
+ tcp-request session set-var(sess.sess_src) src
+ tcp-request session set-var(sess.sess_dst) dst
+
+ tcp-request inspect-delay 100ms
+ tcp-request content set-src ipv4(10.2.0.1)
+ tcp-request content set-dst ipv4(10.2.0.2)
+ tcp-request content set-var(txn.strm_fc_src) fc_src
+ tcp-request content set-var(txn.strm_fc_dst) fc_dst
+ tcp-request content set-var(txn.strm_src) src
+ tcp-request content set-var(txn.strm_dst) dst
+
+ http-after-response set-header sess-fc-src %[var(sess.sess_fc_src)]
+ http-after-response set-header sess-src %[var(sess.sess_src)]
+ http-after-response set-header sess-fc-dst %[var(sess.sess_fc_dst)]
+ http-after-response set-header sess-dst %[var(sess.sess_dst)]
+ http-after-response set-header strm-fc-src %[var(txn.strm_fc_src)]
+ http-after-response set-header strm-src %[var(txn.strm_src)]
+ http-after-response set-header strm-fc-dst %[var(txn.strm_fc_dst)]
+ http-after-response set-header strm-dst %[var(txn.strm_dst)]
+
+
+ frontend fe4
+ bind "fd@${fe4}"
+
+ tcp-request connection set-src ipv4(10.0.0.1)
+ tcp-request connection set-dst ipv4(10.0.0.2)
+
+ tcp-request session set-var(sess.sess_fc_src) fc_src
+ tcp-request session set-var(sess.sess_fc_dst) fc_dst
+ tcp-request session set-var(sess.sess_src) src
+ tcp-request session set-var(sess.sess_dst) dst
+
+ http-request set-src hdr(x-forwarded-for)
+ http-request set-dst hdr(x-original-to)
+ http-request set-var(txn.strm_fc_src) fc_src
+ http-request set-var(txn.strm_fc_dst) fc_dst
+ http-request set-var(txn.strm_src) src
+ http-request set-var(txn.strm_dst) dst
+
+ http-after-response set-header sess-fc-src %[var(sess.sess_fc_src)]
+ http-after-response set-header sess-src %[var(sess.sess_src)]
+ http-after-response set-header sess-fc-dst %[var(sess.sess_fc_dst)]
+ http-after-response set-header sess-dst %[var(sess.sess_dst)]
+ http-after-response set-header strm-fc-src %[var(txn.strm_fc_src)]
+ http-after-response set-header strm-src %[var(txn.strm_src)]
+ http-after-response set-header strm-fc-dst %[var(txn.strm_fc_dst)]
+ http-after-response set-header strm-dst %[var(txn.strm_dst)]
+
+ default_backend be
+
+ backend be
+ http-request return status 200
+
+ listen li1
+ bind "fd@${li1}"
+
+ tcp-request connection set-src ipv4(10.0.0.1)
+ tcp-request connection set-dst ipv4(10.0.0.2)
+
+ http-request set-src ipv4(192.168.0.1)
+ http-request set-dst ipv4(192.168.0.2)
+
+ http-after-response set-header li1-fc-src %[fc_src]
+ http-after-response set-header li1-src %[src]
+ http-after-response set-header li1-fc-dst %[fc_dst]
+ http-after-response set-header li1-dst %[dst]
+
+
+ server srv ${h1_li3_addr}:${h1_li3_port} send-proxy
+
+ listen li2
+ bind "fd@${li2}"
+
+ tcp-request connection set-src ipv4(10.0.0.1)
+ tcp-request connection set-dst ipv4(10.0.0.2)
+
+ http-request set-src ipv4(192.168.0.1)
+ http-request set-dst ipv4(192.168.0.2)
+
+ http-after-response set-header li2-fc-src %[fc_src]
+ http-after-response set-header li2-src %[src]
+ http-after-response set-header li2-fc-dst %[fc_dst]
+ http-after-response set-header li2-dst %[dst]
+
+ server srv ${h1_li3_addr}:${h1_li3_port} send-proxy-v2
+
+ listen li3
+ bind "fd@${li3}" accept-proxy
+
+ tcp-request connection set-src ipv4(10.1.0.1)
+ tcp-request connection set-dst ipv4(10.1.0.2)
+
+ http-after-response set-header li3-fc-src %[fc_src]
+ http-after-response set-header li3-src %[src]
+ http-after-response set-header li3-fc-dst %[fc_dst]
+ http-after-response set-header li3-dst %[dst]
+
+ http-request return status 200
+
+} -start
+
+
+client c1 -connect ${h1_fe1_sock} {
+ txreq
+ rxresp
+ expect resp.http.sess-fc-src == 10.0.0.1
+ expect resp.http.sess-src == 10.0.0.1
+ expect resp.http.strm-fc-src == 10.0.0.1
+ expect resp.http.strm-src == 10.0.0.1
+
+ expect resp.http.sess-fc-dst == 10.0.0.2
+ expect resp.http.sess-dst == 10.0.0.2
+ expect resp.http.strm-fc-dst == 10.0.0.2
+ expect resp.http.strm-dst == 10.0.0.2
+} -run
+
+client c2 -connect ${h1_fe2_sock} {
+ txreq
+ rxresp
+ expect resp.http.sess-fc-src == 10.0.0.1
+ expect resp.http.sess-src == 10.1.0.1
+ expect resp.http.strm-fc-src == 10.0.0.1
+ expect resp.http.strm-src == 10.1.0.1
+
+ expect resp.http.sess-fc-dst == 10.0.0.2
+ expect resp.http.sess-dst == 10.1.0.2
+ expect resp.http.strm-fc-dst == 10.0.0.2
+ expect resp.http.strm-dst == 10.1.0.2
+} -run
+
+client c3 -connect ${h1_fe3_sock} {
+ txreq
+ rxresp
+ expect resp.http.sess-fc-src == 10.0.0.1
+ expect resp.http.sess-src == 10.1.0.1
+ expect resp.http.strm-fc-src == 10.0.0.1
+ expect resp.http.strm-src == 10.2.0.1
+
+ expect resp.http.sess-fc-dst == 10.0.0.2
+ expect resp.http.sess-dst == 10.1.0.2
+ expect resp.http.strm-fc-dst == 10.0.0.2
+ expect resp.http.strm-dst == 10.2.0.2
+} -run
+
+client c4 -connect ${h1_fe4_sock} {
+ txreq \
+ -hdr "x-forwarded-for: 192.168.0.1" \
+ -hdr "x-original-to: 192.168.0.2"
+ rxresp
+ expect resp.http.sess-fc-src == 10.0.0.1
+ expect resp.http.sess-src == 10.0.0.1
+ expect resp.http.strm-fc-src == 10.0.0.1
+ expect resp.http.strm-src == 192.168.0.1
+
+ expect resp.http.sess-fc-dst == 10.0.0.2
+ expect resp.http.sess-dst == 10.0.0.2
+ expect resp.http.strm-fc-dst == 10.0.0.2
+ expect resp.http.strm-dst == 192.168.0.2
+
+ txreq \
+ -hdr "x-forwarded-for: 192.168.1.1" \
+ -hdr "x-original-to: 192.168.1.2"
+ rxresp
+ expect resp.http.sess-fc-src == 10.0.0.1
+ expect resp.http.sess-src == 10.0.0.1
+ expect resp.http.strm-fc-src == 10.0.0.1
+ expect resp.http.strm-src == 192.168.1.1
+
+ expect resp.http.sess-fc-dst == 10.0.0.2
+ expect resp.http.sess-dst == 10.0.0.2
+ expect resp.http.strm-fc-dst == 10.0.0.2
+ expect resp.http.strm-dst == 192.168.1.2
+
+ txreq
+ rxresp
+ expect resp.http.sess-fc-src == 10.0.0.1
+ expect resp.http.sess-src == 10.0.0.1
+ expect resp.http.strm-fc-src == 10.0.0.1
+ expect resp.http.strm-src == 10.0.0.1
+
+ expect resp.http.sess-fc-dst == 10.0.0.2
+ expect resp.http.sess-dst == 10.0.0.2
+ expect resp.http.strm-fc-dst == 10.0.0.2
+ expect resp.http.strm-dst == 10.0.0.2
+} -run
+
+client c5 -connect ${h1_li1_sock} {
+ txreq
+ rxresp
+ expect resp.http.li1-fc-src == 10.0.0.1
+ expect resp.http.li1-src == 192.168.0.1
+ expect resp.http.li1-fc-dst == 10.0.0.2
+ expect resp.http.li1-dst == 192.168.0.2
+
+ expect resp.http.li3-fc-src == 10.1.0.1
+ expect resp.http.li3-src == 192.168.0.1
+ expect resp.http.li3-fc-dst == 10.1.0.2
+ expect resp.http.li3-dst == 192.168.0.2
+} -run
+
+client c6 -connect ${h1_li2_sock} {
+ txreq
+ rxresp
+ expect resp.http.li2-fc-src == 10.0.0.1
+ expect resp.http.li2-src == 192.168.0.1
+ expect resp.http.li2-fc-dst == 10.0.0.2
+ expect resp.http.li2-dst == 192.168.0.2
+
+ expect resp.http.li3-fc-src == 10.1.0.1
+ expect resp.http.li3-src == 192.168.0.1
+ expect resp.http.li3-fc-dst == 10.1.0.2
+ expect resp.http.li3-dst == 192.168.0.2
+} -run
diff --git a/reg-tests/connection/client1.pem b/reg-tests/connection/client1.pem
new file mode 120000
index 0000000..c4d14f0
--- /dev/null
+++ b/reg-tests/connection/client1.pem
@@ -0,0 +1 @@
+../ssl/client1.pem \ No newline at end of file
diff --git a/reg-tests/connection/common.pem b/reg-tests/connection/common.pem
new file mode 120000
index 0000000..a4433d5
--- /dev/null
+++ b/reg-tests/connection/common.pem
@@ -0,0 +1 @@
+../ssl/common.pem \ No newline at end of file
diff --git a/reg-tests/connection/dispatch.vtc b/reg-tests/connection/dispatch.vtc
new file mode 100644
index 0000000..8696b50
--- /dev/null
+++ b/reg-tests/connection/dispatch.vtc
@@ -0,0 +1,42 @@
+varnishtest "Validate proper operation of the 'dispatch' mode"
+feature ignore_unknown_macro
+
+server s1 {
+ rxreq
+ txresp
+} -start
+
+server s2 {
+ rxreq
+ txresp
+} -start
+
+haproxy h1 -conf {
+defaults
+ log global
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+listen fe_tcp
+ bind "fd@${fe_tcp}"
+ mode tcp
+ dispatch ${s1_addr}:${s1_port}
+
+listen fe_http
+ bind "fd@${fe_http}"
+ mode http
+ dispatch ${s2_addr}:${s2_port}
+} -start
+
+client c1 -connect ${h1_fe_tcp_sock} {
+ txreq -url "/"
+ rxresp
+ expect resp.status == 200
+} -run
+
+client c2 -connect ${h1_fe_http_sock} {
+ txreq -url "/"
+ rxresp
+ expect resp.status == 200
+} -run
diff --git a/reg-tests/connection/http_reuse_aggressive.vtc b/reg-tests/connection/http_reuse_aggressive.vtc
new file mode 100644
index 0000000..71f4cee
--- /dev/null
+++ b/reg-tests/connection/http_reuse_aggressive.vtc
@@ -0,0 +1,45 @@
+varnishtest "Test the http-reuse aggressive mode"
+
+#REQUIRE_VERSION=2.2
+
+feature ignore_unknown_macro
+
+haproxy h1 -conf {
+ defaults
+ mode http
+
+ listen sender
+ bind "fd@${feS}"
+ http-reuse aggressive
+ server srv ${h1_feR_addr}:${h1_feR_port}
+
+ listen receiver
+ bind "fd@${feR}"
+ http-request return status 200
+ http-after-response set-header http_first_request %[http_first_req]
+} -start
+
+# bootstrap
+client c1 -connect ${h1_feS_sock} {
+ txreq
+ rxresp
+ expect resp.http.http_first_request == "1"
+} -run
+
+# first request should not be reused as no safe connection for the moment
+client c2 -connect ${h1_feS_sock} {
+ txreq
+ rxresp
+ expect resp.http.http_first_request == "1"
+
+ txreq
+ rxresp
+ expect resp.http.http_first_request == "0"
+} -run
+
+# first request must be reused with the safe connection
+client c3 -connect ${h1_feS_sock} {
+ txreq
+ rxresp
+ expect resp.http.http_first_request == "0"
+} -run
diff --git a/reg-tests/connection/http_reuse_always.vtc b/reg-tests/connection/http_reuse_always.vtc
new file mode 100644
index 0000000..3dbd7c0
--- /dev/null
+++ b/reg-tests/connection/http_reuse_always.vtc
@@ -0,0 +1,43 @@
+varnishtest "Test the http-reuse always mode"
+
+#REQUIRE_VERSION=2.2
+
+feature ignore_unknown_macro
+
+haproxy h1 -W -conf {
+ defaults
+ mode http
+
+ listen sender
+ bind "fd@${feS}"
+ http-reuse always
+ server srv ${h1_feR_addr}:${h1_feR_port}
+
+ listen receiver
+ bind "fd@${feR}"
+ http-request return status 200
+ http-after-response set-header http_first_request %[http_first_req]
+} -start
+
+# bootstrap
+client c1 -connect ${h1_feS_sock} {
+ txreq
+ rxresp
+ expect resp.http.http_first_request == "1"
+} -run
+
+client c2 -connect ${h1_feS_sock} {
+ txreq
+ rxresp
+ expect resp.http.http_first_request == "0"
+
+ txreq
+ rxresp
+ expect resp.http.http_first_request == "0"
+} -run
+
+client c3 -connect ${h1_feS_sock} {
+ txreq
+ rxresp
+ expect resp.http.http_first_request == "0"
+} -run
diff --git a/reg-tests/connection/http_reuse_be_transparent.vtc b/reg-tests/connection/http_reuse_be_transparent.vtc
new file mode 100644
index 0000000..3fb9e7a
--- /dev/null
+++ b/reg-tests/connection/http_reuse_be_transparent.vtc
@@ -0,0 +1,82 @@
+varnishtest "Test the proper interaction between http-reuse and backend in transparent mode"
+feature cmd "$HAPROXY_PROGRAM -cc 'feature(TPROXY)'"
+
+# If backend is used with the transparent mode, the connection are considered
+# as private and should only be reused for requests of the same session.
+# This is similar to the http-reuse never mode
+
+#REQUIRE_VERSION=2.4
+
+feature ignore_unknown_macro
+
+haproxy h1 -conf {
+ defaults
+ mode http
+
+ listen sender
+ bind "fd@${feS}"
+ http-request set-header client-id %[req.hdr(client-id)] if { req.hdr(client-id) -m found }
+ option transparent
+ http-request set-dst-port int(${h1_feR_port})
+
+ listen receiver
+ bind "fd@${feR}"
+ http-request set-var(sess.client_id) req.hdr(client-id)
+ http-request return status 200
+ http-after-response set-header http_first_request %[http_first_req]
+ http-after-response set-header client-id %[var(sess.client_id)]
+} -start
+
+client c1 -connect ${h1_feS_sock} {
+ txreq \
+ -hdr "client-id: c1"
+ rxresp
+ expect resp.http.http_first_request == "1"
+
+ txreq
+ rxresp
+ expect resp.http.http_first_request == "0"
+ expect resp.http.client-id == "c1"
+
+ txreq
+ rxresp
+ expect resp.http.http_first_request == "0"
+ expect resp.http.client-id == "c1"
+} -run
+
+client c2 -connect ${h1_feS_sock} {
+ txreq \
+ -hdr "client-id: c2"
+ rxresp
+ expect resp.http.http_first_request == "1"
+ expect resp.http.client-id == "c2"
+
+ txreq
+ rxresp
+ expect resp.http.http_first_request == "0"
+ expect resp.http.client-id == "c2"
+
+ txreq
+ rxresp
+ expect resp.http.http_first_request == "0"
+ expect resp.http.client-id == "c2"
+} -run
+
+client c3 -connect ${h1_feS_sock} {
+ txreq \
+ -hdr "client-id: c3"
+ rxresp
+ expect resp.http.http_first_request == "1"
+ expect resp.http.client-id == "c3"
+
+ txreq
+ rxresp
+ expect resp.http.http_first_request == "0"
+ expect resp.http.client-id == "c3"
+
+ txreq
+ rxresp
+ expect resp.http.http_first_request == "0"
+ expect resp.http.client-id == "c3"
+} -run
+
diff --git a/reg-tests/connection/http_reuse_conn_hash.vtc b/reg-tests/connection/http_reuse_conn_hash.vtc
new file mode 100644
index 0000000..991e86f
--- /dev/null
+++ b/reg-tests/connection/http_reuse_conn_hash.vtc
@@ -0,0 +1,163 @@
+varnishtest "Test the http-reuse with special connection parameters"
+#REQUIRE_VERSION=2.4
+#REQUIRE_OPTIONS=OPENSSL
+
+feature ignore_unknown_macro
+
+haproxy h1 -conf {
+ defaults
+ mode http
+
+ # sni
+ listen sender-sni
+ bind "fd@${feS_sni}"
+ server srv2 ${h1_feR_ssl_addr}:${h1_feR_ssl_port} ssl sni "req.hdr(x-sni)" verify none pool-low-conn 2
+
+ # set-dst
+ # specify dst1_addr for server, which should be identical to dst2_addr
+ # port is specified by the client in header x-dst-port
+ listen sender-set-dst
+ bind "fd@${feS_dst}"
+ http-request set-dst-port hdr(x-dst-port)
+ server srv2 ${h1_feR_dst1_addr}:0 pool-low-conn 2
+
+ # proxy protocol
+ # must use reuse always as consecutive requests are from different client
+ listen sender-proxy
+ bind "fd@${feS_proxy}" accept-proxy
+ http-reuse always
+ server srv2 ${h1_feR_proxy_addr}:${h1_feR_proxy_port} send-proxy pool-low-conn 2
+
+ listen receiver
+ bind "fd@${feR_ssl}" ssl crt ${testdir}/common.pem
+ bind "fd@${feR_proxy}" accept-proxy
+ http-request return status 200
+ http-after-response set-header http_first_request %[http_first_req]
+
+ listen receiver-dst1
+ bind "fd@${feR_dst1}"
+ http-request return status 200 hdr "x-dst" "dst1"
+ http-after-response set-header http_first_request %[http_first_req]
+
+ listen receiver-dst2
+ bind "fd@${feR_dst2}"
+ http-request return status 200 hdr "x-dst" "dst2"
+ http-after-response set-header http_first_request %[http_first_req]
+} -start
+
+# http-reuse with sni parameters
+client c_sni -connect ${h1_feS_sni_sock} {
+ # first request
+ txreq \
+ -hdr "x-sni: www.custom.com"
+ rxresp
+ expect resp.http.http_first_request == "1"
+
+ # second request with same sni, connection must be reused
+ txreq \
+ -hdr "x-sni: www.custom.com"
+ rxresp
+ expect resp.http.http_first_request == "0"
+
+ # third request with a different sni, a new connection must be used
+ txreq \
+ -hdr "x-sni: www.custom2.com"
+ rxresp
+ expect resp.http.http_first_request == "1"
+
+ # fourth request, reuse sni2
+ txreq \
+ -hdr "x-sni: www.custom2.com"
+ rxresp
+ expect resp.http.http_first_request == "0"
+} -run
+
+# http-reuse with destination address
+client c_dst1 -connect ${h1_feS_dst_sock} {
+ txreq \
+ -hdr "x-dst-port: ${h1_feR_dst1_port}"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.x-dst == "dst1"
+ expect resp.http.http_first_request == "1"
+
+ txreq \
+ -hdr "x-dst-port: ${h1_feR_dst1_port}"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.x-dst == "dst1"
+ expect resp.http.http_first_request == "0"
+
+ txreq \
+ -hdr "x-dst-port: ${h1_feR_dst2_port}"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.x-dst == "dst2"
+ expect resp.http.http_first_request == "1"
+
+ txreq \
+ -hdr "x-dst-port: ${h1_feR_dst1_port}"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.x-dst == "dst1"
+ expect resp.http.http_first_request == "0"
+
+ txreq \
+ -hdr "x-dst-port: ${h1_feR_dst2_port}"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.x-dst == "dst2"
+ expect resp.http.http_first_request == "0"
+} -run
+
+## first request with proxy protocol
+client c_proxy -connect ${h1_feS_proxy_sock} -proxy1 "127.0.0.1:40000 ${h1_feS_proxy_addr}:${h1_feS_proxy_port}" {
+ txreq
+ rxresp
+ expect resp.status == 200
+ expect resp.http.http_first_request == "1"
+
+ txreq
+ rxresp
+ expect resp.status == 200
+ expect resp.http.http_first_request == "0"
+} -run
+
+## second request with different proxy protocol
+# this have the nice effect to fill the server pool to 2 connection
+# (pool-low-conn value) to allow takeover on multi thread run
+client c_proxy -connect ${h1_feS_proxy_sock} -proxy1 "127.0.0.1:50000 ${h1_feS_proxy_addr}:${h1_feS_proxy_port}" {
+ txreq
+ rxresp
+ expect resp.status == 200
+ expect resp.http.http_first_request == "1"
+} -run
+
+## third request, reuse same proxy protocol entry
+client c_proxy -connect ${h1_feS_proxy_sock} -proxy1 "127.0.0.1:40000 ${h1_feS_proxy_addr}:${h1_feS_proxy_port}" {
+ txreq
+ rxresp
+ expect resp.status == 200
+ expect resp.http.http_first_request == "0"
+} -run
+
+## fourth request with different proxy protocol entry, no reuse
+client c_proxy -connect ${h1_feS_proxy_sock} -proxy1 "127.0.0.1:60000 ${h1_feS_proxy_addr}:${h1_feS_proxy_port}" {
+ txreq
+ rxresp
+ expect resp.status == 200
+ expect resp.http.http_first_request == "1"
+} -run
+
+## fifth request, reuse proxy protocol
+client c_proxy -connect ${h1_feS_proxy_sock} -proxy1 "127.0.0.1:50000 ${h1_feS_proxy_addr}:${h1_feS_proxy_port}" {
+ txreq
+ rxresp
+ expect resp.status == 200
+ expect resp.http.http_first_request == "0"
+
+ txreq
+ rxresp
+ expect resp.status == 200
+ expect resp.http.http_first_request == "0"
+} -run
diff --git a/reg-tests/connection/http_reuse_dispatch.vtc b/reg-tests/connection/http_reuse_dispatch.vtc
new file mode 100644
index 0000000..a419727
--- /dev/null
+++ b/reg-tests/connection/http_reuse_dispatch.vtc
@@ -0,0 +1,79 @@
+varnishtest "Test the proper interaction between http-reuse and dispatch mode"
+
+# With dispatch, the connection are considered as private and should only be
+# reused for requests of the same session
+# This is similar to the http-reuse never mode
+
+#REQUIRE_VERSION=2.4
+
+feature ignore_unknown_macro
+
+haproxy h1 -conf {
+ defaults
+ mode http
+
+ listen sender
+ bind "fd@${feS}"
+ http-request set-header client-id %[req.hdr(client-id)] if { req.hdr(client-id) -m found }
+ dispatch ${h1_feR_addr}:${h1_feR_port}
+
+ listen receiver
+ bind "fd@${feR}"
+ http-request set-var(sess.client_id) req.hdr(client-id)
+ http-request return status 200
+ http-after-response set-header http_first_request %[http_first_req]
+ http-after-response set-header client-id %[var(sess.client_id)]
+} -start
+
+client c1 -connect ${h1_feS_sock} {
+ txreq \
+ -hdr "client-id: c1"
+ rxresp
+ expect resp.http.http_first_request == "1"
+
+ txreq
+ rxresp
+ expect resp.http.http_first_request == "0"
+ expect resp.http.client-id == "c1"
+
+ txreq
+ rxresp
+ expect resp.http.http_first_request == "0"
+ expect resp.http.client-id == "c1"
+} -run
+
+client c2 -connect ${h1_feS_sock} {
+ txreq \
+ -hdr "client-id: c2"
+ rxresp
+ expect resp.http.http_first_request == "1"
+ expect resp.http.client-id == "c2"
+
+ txreq
+ rxresp
+ expect resp.http.http_first_request == "0"
+ expect resp.http.client-id == "c2"
+
+ txreq
+ rxresp
+ expect resp.http.http_first_request == "0"
+ expect resp.http.client-id == "c2"
+} -run
+
+client c3 -connect ${h1_feS_sock} {
+ txreq \
+ -hdr "client-id: c3"
+ rxresp
+ expect resp.http.http_first_request == "1"
+ expect resp.http.client-id == "c3"
+
+ txreq
+ rxresp
+ expect resp.http.http_first_request == "0"
+ expect resp.http.client-id == "c3"
+
+ txreq
+ rxresp
+ expect resp.http.http_first_request == "0"
+ expect resp.http.client-id == "c3"
+} -run
diff --git a/reg-tests/connection/http_reuse_never.vtc b/reg-tests/connection/http_reuse_never.vtc
new file mode 100644
index 0000000..fc74631
--- /dev/null
+++ b/reg-tests/connection/http_reuse_never.vtc
@@ -0,0 +1,79 @@
+varnishtest "Test the http-reuse never mode"
+
+#REQUIRE_VERSION=2.2
+
+feature ignore_unknown_macro
+
+haproxy h1 -conf {
+ defaults
+ mode http
+
+ # limit idle pool to one connection
+ # this is to mirror http-reuse safe test, but in this case to ensure that
+ # connection are never reused as expected
+ listen sender
+ bind "fd@${feS}"
+ http-reuse never
+ http-request set-header client-id %[req.hdr(client-id)] if { req.hdr(client-id) -m found }
+ server srv ${h1_feR_addr}:${h1_feR_port} pool-max-conn 1
+
+ listen receiver
+ bind "fd@${feR}"
+ http-request set-var(sess.client_id) req.hdr(client-id)
+ http-request return status 200
+ http-after-response set-header http_first_request %[http_first_req]
+ http-after-response set-header client-id %[var(sess.client_id)]
+} -start
+
+client c1 -connect ${h1_feS_sock} {
+ txreq \
+ -hdr "client-id: c1"
+ rxresp
+ expect resp.http.http_first_request == "1"
+
+ txreq
+ rxresp
+ expect resp.http.http_first_request == "0"
+ expect resp.http.client-id == "c1"
+
+ txreq
+ rxresp
+ expect resp.http.http_first_request == "0"
+ expect resp.http.client-id == "c1"
+} -run
+
+client c2 -connect ${h1_feS_sock} {
+ txreq \
+ -hdr "client-id: c2"
+ rxresp
+ expect resp.http.http_first_request == "1"
+ expect resp.http.client-id == "c2"
+
+ txreq
+ rxresp
+ expect resp.http.http_first_request == "0"
+ expect resp.http.client-id == "c2"
+
+ txreq
+ rxresp
+ expect resp.http.http_first_request == "0"
+ expect resp.http.client-id == "c2"
+} -run
+
+client c3 -connect ${h1_feS_sock} {
+ txreq \
+ -hdr "client-id: c3"
+ rxresp
+ expect resp.http.http_first_request == "1"
+ expect resp.http.client-id == "c3"
+
+ txreq
+ rxresp
+ expect resp.http.http_first_request == "0"
+ expect resp.http.client-id == "c3"
+
+ txreq
+ rxresp
+ expect resp.http.http_first_request == "0"
+ expect resp.http.client-id == "c3"
+} -run
diff --git a/reg-tests/connection/http_reuse_safe.vtc b/reg-tests/connection/http_reuse_safe.vtc
new file mode 100644
index 0000000..fa00dd4
--- /dev/null
+++ b/reg-tests/connection/http_reuse_safe.vtc
@@ -0,0 +1,78 @@
+varnishtest "Test the http-reuse safe mode"
+
+#REQUIRE_VERSION=2.2
+
+feature ignore_unknown_macro
+
+haproxy h1 -conf {
+ defaults
+ mode http
+
+ # limit idle pool to one connection
+ # this forces connection reuse for the transaction after the first one
+ listen sender
+ bind "fd@${feS}"
+ http-reuse safe
+ http-request set-header client-id %[req.hdr(client-id)] if { req.hdr(client-id) -m found }
+ server srv ${h1_feR_addr}:${h1_feR_port} pool-max-conn 1
+
+ listen receiver
+ bind "fd@${feR}"
+ http-request set-var(sess.client_id) req.hdr(client-id)
+ http-request return status 200
+ http-after-response set-header http_first_request %[http_first_req]
+ http-after-response set-header client-id %[var(sess.client_id)]
+} -start
+
+client c1 -connect ${h1_feS_sock} {
+ txreq \
+ -hdr "client-id: c1"
+ rxresp
+ expect resp.http.http_first_request == "1"
+
+ txreq
+ rxresp
+ expect resp.http.http_first_request == "0"
+ expect resp.http.client-id == "c1"
+
+ txreq
+ rxresp
+ expect resp.http.http_first_request == "0"
+ expect resp.http.client-id == "c1"
+} -run
+
+client c2 -connect ${h1_feS_sock} {
+ txreq \
+ -hdr "client-id: c2"
+ rxresp
+ expect resp.http.http_first_request == "1"
+ expect resp.http.client-id == "c2"
+
+ txreq
+ rxresp
+ expect resp.http.http_first_request == "0"
+ expect resp.http.client-id == "c1"
+
+ txreq
+ rxresp
+ expect resp.http.http_first_request == "0"
+ expect resp.http.client-id == "c1"
+} -run
+
+client c3 -connect ${h1_feS_sock} {
+ txreq \
+ -hdr "client-id: c3"
+ rxresp
+ expect resp.http.http_first_request == "1"
+ expect resp.http.client-id == "c3"
+
+ txreq
+ rxresp
+ expect resp.http.http_first_request == "0"
+ expect resp.http.client-id == "c1"
+
+ txreq
+ rxresp
+ expect resp.http.http_first_request == "0"
+ expect resp.http.client-id == "c1"
+} -run
diff --git a/reg-tests/connection/proxy_protocol_random_fail.vtc b/reg-tests/connection/proxy_protocol_random_fail.vtc
new file mode 100644
index 0000000..1ae33de
--- /dev/null
+++ b/reg-tests/connection/proxy_protocol_random_fail.vtc
@@ -0,0 +1,59 @@
+#commit b406b87
+# BUG/MEDIUM: connection: don't store recv() result into trash.data
+#
+# Cyril Bonté discovered that the proxy protocol randomly fails since
+# commit 843b7cb ("MEDIUM: chunks: make the chunk struct's fields match
+# the buffer struct"). This is because we used to store recv()'s return
+# code into trash.data which is now unsigned, so it never compares as
+# negative against 0. Let's clean this up and test the result itself
+# without storing it first.
+
+varnishtest "PROXY protocol random failures"
+#REQUIRE_OPTIONS=OPENSSL
+
+feature ignore_unknown_macro
+
+#REGTEST_TYPE=broken
+
+syslog Slog_1 -repeat 8 -level info {
+ recv
+ expect ~ "Connect from .* to ${h1_ssl_addr}:${h1_ssl_port}"
+ recv
+ expect ~ "ssl-offload-http/http .* \"POST (https://.*:${h1_ssl_port})?/[1-8] HTTP/(2\\.0|1\\.1)\""
+} -start
+
+haproxy h1 -conf {
+ global
+ tune.ssl.default-dh-param 2048
+ log ${Slog_1_addr}:${Slog_1_port} len 2048 local0 debug err
+
+ defaults
+ mode http
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ log global
+
+ listen http
+ bind unix@"${tmpdir}/http.socket" accept-proxy name ssl-offload-http
+ option forwardfor
+
+ listen ssl-offload-http
+ option httplog
+ bind "fd@${ssl}" ssl crt ${testdir}/common.pem ssl no-sslv3 alpn h2,http/1.1
+ server http unix@"${tmpdir}/http.socket" send-proxy
+} -start
+
+
+shell {
+ HOST=${h1_ssl_addr}
+ if [ "$HOST" = "::1" ] ; then
+ HOST="\[::1\]"
+ fi
+ for i in 1 2 3 4 5 6 7 8 ; do
+ urls="$urls https://$HOST:${h1_ssl_port}/$i"
+ done
+ curl -i -k -d 'x=x' $urls & wait $!
+}
+
+syslog Slog_1 -wait
diff --git a/reg-tests/connection/proxy_protocol_send_generic.vtc b/reg-tests/connection/proxy_protocol_send_generic.vtc
new file mode 100644
index 0000000..605f38c
--- /dev/null
+++ b/reg-tests/connection/proxy_protocol_send_generic.vtc
@@ -0,0 +1,74 @@
+varnishtest "Check that generic TLV IDs are sent properly"
+
+#REQUIRE_VERSION=2.2
+
+feature ignore_unknown_macro
+
+haproxy h1 -conf {
+ defaults
+ mode http
+ log global
+
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ listen sender
+ bind "fd@${feS}"
+ server example ${h1_feR_addr}:${h1_feR_port} send-proxy-v2 set-proxy-v2-tlv-fmt(0xE1) %[str("foo")] set-proxy-v2-tlv-fmt(0xE2)
+
+ listen receiver
+ bind "fd@${feR}" accept-proxy
+
+ # Check that the TLV value is set in the backend.
+ http-request set-var(txn.custom_tlv_a) fc_pp_tlv(0xE1)
+ http-after-response set-header proxy_custom_tlv_a %[var(txn.custom_tlv_a)]
+
+ # Check that TLVs without an value are sent out.
+ http-request set-var(txn.custom_tlv_b) fc_pp_tlv(0xE2)
+ http-after-response set-header proxy_custom_tlv_b %[var(txn.custom_tlv_b)]
+
+ # Note that we do not check for an invalid TLV ID as that would result in an
+ # parser error anyway.
+
+ http-request return status 200
+} -start
+
+
+client c1 -connect ${h1_feS_sock} {
+ txreq -url "/"
+ rxresp
+ expect resp.http.proxy_custom_tlv_a == "foo"
+ expect resp.http.proxy_custom_tlv_b == ""
+} -run
+
+# Ensure that we achieve the same via a default-server.
+haproxy h2 -conf {
+ defaults
+ mode http
+ log global
+
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ listen sender
+ bind "fd@${feS}"
+ default-server send-proxy-v2 set-proxy-v2-tlv-fmt(0xE1) %[str("bar")]
+ server example ${h1_feR_addr}:${h1_feR_port}
+
+ listen receiver
+ bind "fd@${feR}" accept-proxy
+
+ http-request set-var(txn.custom_tlv_a) fc_pp_tlv(0xE1)
+ http-after-response set-header proxy_custom_tlv_a %[var(txn.custom_tlv_a)]
+
+ http-request return status 200
+} -start
+
+
+client c2 -connect ${h2_feS_sock} {
+ txreq -url "/"
+ rxresp
+ expect resp.http.proxy_custom_tlv_a == "bar"
+} -run
diff --git a/reg-tests/connection/proxy_protocol_send_unique_id.vtc b/reg-tests/connection/proxy_protocol_send_unique_id.vtc
new file mode 100644
index 0000000..4f6b848
--- /dev/null
+++ b/reg-tests/connection/proxy_protocol_send_unique_id.vtc
@@ -0,0 +1,42 @@
+varnishtest "Check that the unique ID TLV is properly sent"
+
+#REQUIRE_VERSION=2.2
+
+feature ignore_unknown_macro
+
+haproxy h1 -conf {
+ defaults
+ mode http
+ log global
+ unique-id-format %{+X}o\ TEST-%[req.hdr(in)]
+
+ listen sender
+ bind "fd@${feS}"
+
+ unique-id-header unique_id
+
+ server example ${h1_feR_addr}:${h1_feR_port} send-proxy-v2 proxy-v2-options unique-id
+
+ listen receiver
+ bind "fd@${feR}" accept-proxy
+
+ http-request set-var(txn.http_unique_id) req.hdr(unique_id)
+ http-request set-var(txn.proxy_unique_id) fc_pp_unique_id
+ http-after-response set-header http_unique_id %[var(txn.http_unique_id)]
+ http-after-response set-header proxy_unique_id %[var(txn.proxy_unique_id)]
+ http-request return status 200
+} -start
+
+# Validate that a correct header passes
+client c1 -connect ${h1_feS_sock} {
+ txreq -url "/" \
+ -hdr "in: foo"
+ rxresp
+ expect resp.http.http_unique_id == "TEST-foo"
+ expect resp.http.proxy_unique_id == "TEST-foo"
+ txreq -url "/" \
+ -hdr "in: bar"
+ rxresp
+ expect resp.http.http_unique_id == "TEST-bar"
+ expect resp.http.proxy_unique_id == "TEST-bar"
+} -run
diff --git a/reg-tests/connection/proxy_protocol_send_unique_id_alpn.vtc b/reg-tests/connection/proxy_protocol_send_unique_id_alpn.vtc
new file mode 100644
index 0000000..87e590a
--- /dev/null
+++ b/reg-tests/connection/proxy_protocol_send_unique_id_alpn.vtc
@@ -0,0 +1,33 @@
+varnishtest "Check that the unique ID TLV is properly sent for servers with ALPN option"
+
+#REQUIRE_VERSION=2.2
+#REQUIRE_OPTIONS=OPENSSL
+
+feature ignore_unknown_macro
+
+haproxy h1 -conf {
+ defaults
+ mode http
+ log global
+ unique-id-format %{+X}o\ TEST-%[req.hdr(in)]
+
+ listen sender
+ bind "fd@${feS}"
+
+ server example ${h1_feR_addr}:${h1_feR_port} send-proxy-v2 proxy-v2-options unique-id ssl alpn XXX verify none
+
+ listen receiver
+ bind "fd@${feR}" ssl crt ${testdir}/common.pem accept-proxy
+
+ http-request set-var(txn.proxy_unique_id) fc_pp_unique_id
+ http-after-response set-header proxy_unique_id %[var(txn.proxy_unique_id)]
+ http-request return status 200
+} -start
+
+# Validate that a correct header passes
+client c1 -connect ${h1_feS_sock} {
+ txreq -url "/" \
+ -hdr "in: foo"
+ rxresp
+ expect resp.http.proxy_unique_id == "TEST-foo"
+} -run
diff --git a/reg-tests/connection/proxy_protocol_tlv_validation.vtc b/reg-tests/connection/proxy_protocol_tlv_validation.vtc
new file mode 100644
index 0000000..8c7d734
--- /dev/null
+++ b/reg-tests/connection/proxy_protocol_tlv_validation.vtc
@@ -0,0 +1,142 @@
+varnishtest "Check that the TLVs are properly validated"
+
+#REQUIRE_VERSION=2.4
+
+feature ignore_unknown_macro
+
+# We need one HAProxy for each test, because apparently the connection by
+# the client is reused, leading to connection resets.
+
+haproxy h1 -conf {
+ defaults
+ mode http
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ frontend a
+ bind "fd@${fe1}" accept-proxy
+ http-after-response set-header echo %[fc_pp_authority,hex]
+ http-request return status 200
+} -start
+
+# Validate that a correct header passes
+client c1 -connect ${h1_fe1_sock} {
+ # PROXY v2 signature
+ sendhex "0d 0a 0d 0a 00 0d 0a 51 55 49 54 0a"
+ # version + PROXY
+ sendhex "21"
+ # TCP4
+ sendhex "11"
+ # length of the address (12) + length of the TLV (8)
+ sendhex "00 14"
+ # 127.0.0.1 42 127.0.0.1 1337
+ sendhex "7F 00 00 01 7F 00 00 01 00 2A 05 39"
+ # PP2_TYPE_AUTHORITY + length of the value + "12345"
+ sendhex "02 00 05 31 32 33 34 35"
+
+ txreq -url "/"
+ rxresp
+ expect resp.http.echo == "3132333435"
+} -run
+
+haproxy h2 -conf {
+ defaults
+ mode http
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ frontend a
+ bind "fd@${fe1}" accept-proxy
+ http-after-response set-header echo %[fc_pp_authority,hex]
+ http-request return status 200
+} -start
+
+# Validate that a TLV after the end of the PROXYv2 header is not parsed
+# and handle by the HTTP parser, leading to a 400 bad request error
+client c2 -connect ${h2_fe1_sock} {
+ # PROXY v2 signature
+ sendhex "0d 0a 0d 0a 00 0d 0a 51 55 49 54 0a"
+ # version + PROXY
+ sendhex "21"
+ # TCP4
+ sendhex "11"
+ # length of the address (12) + length of the TLV (8)
+ sendhex "00 14"
+ # 127.0.0.1 42 127.0.0.1 1337
+ sendhex "7F 00 00 01 7F 00 00 01 00 2A 05 39"
+ # PP2_TYPE_AUTHORITY + length of the value + "12345"
+ sendhex "02 00 05 31 32 33 34 35"
+ # after the end of the PROXYv2 header: PP2_TYPE_AUTHORITY + length of the value + "54321"
+ sendhex "02 00 05 35 34 33 32 31"
+
+ txreq -url "/"
+ rxresp
+ expect resp.status == 400
+ expect resp.http.echo == <undef>
+} -run
+
+haproxy h3 -conf {
+ defaults
+ mode http
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ frontend a
+ bind "fd@${fe1}" accept-proxy
+ http-after-response set-header echo %[fc_pp_authority,hex]
+ http-request return status 200
+} -start
+
+# Validate that a TLV length exceeding the PROXYv2 length fails
+client c3 -connect ${h3_fe1_sock} {
+ # PROXY v2 signature
+ sendhex "0d 0a 0d 0a 00 0d 0a 51 55 49 54 0a"
+ # version + PROXY
+ sendhex "21"
+ # TCP4
+ sendhex "11"
+ # length of the address (12) + too small length of the TLV (8)
+ sendhex "00 14"
+ # 127.0.0.1 42 127.0.0.1 1337
+ sendhex "7F 00 00 01 7F 00 00 01 00 2A 05 39"
+ # PP2_TYPE_AUTHORITY + length of the value + "1234512345"
+ sendhex "02 00 0A 31 32 33 34 35 31 32 33 34 35"
+
+ txreq -url "/"
+ expect_close
+} -run
+
+haproxy h4 -conf {
+ defaults
+ mode http
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ frontend a
+ bind "fd@${fe1}" accept-proxy
+ http-after-response set-header echo %[fc_pp_authority,hex]
+ http-request return status 200
+} -start
+
+# Validate that TLVs not ending with the PROXYv2 header fail
+client c4 -connect ${h4_fe1_sock} {
+ # PROXY v2 signature
+ sendhex "0d 0a 0d 0a 00 0d 0a 51 55 49 54 0a"
+ # version + PROXY
+ sendhex "21"
+ # TCP4
+ sendhex "11"
+ # length of the address (12) + too big length of the TLV (8)
+ sendhex "00 14"
+ # 127.0.0.1 42 127.0.0.1 1337
+ sendhex "7F 00 00 01 7F 00 00 01 00 2A 05 39"
+ # PP2_TYPE_AUTHORITY + length of the value + "1234"
+ sendhex "02 00 04 31 32 33 34"
+
+ txreq -url "/"
+ expect_close
+} -run
diff --git a/reg-tests/connection/reverse_connect_full.vtc b/reg-tests/connection/reverse_connect_full.vtc
new file mode 100644
index 0000000..238831f
--- /dev/null
+++ b/reg-tests/connection/reverse_connect_full.vtc
@@ -0,0 +1,70 @@
+varnishtest "Reverse connect full test"
+feature ignore_unknown_macro
+
+#REQUIRE_VERSION=2.9
+
+server s1 {
+ rxreq
+ txresp
+} -start
+
+haproxy h_edge -conf {
+global
+ expose-experimental-directives
+
+defaults
+ log global
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+ mode http
+
+frontend pub
+ bind "fd@${pub}"
+ use_backend be-reverse
+
+frontend priv
+ bind "fd@${priv}" proto h2
+ tcp-request session attach-srv be-reverse/dev
+
+backend be-reverse
+ server dev rhttp@
+} -start
+
+haproxy h_dev -conf {
+global
+ expose-experimental-directives
+
+defaults
+ log global
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+ mode http
+
+frontend fe
+ bind "rhttp@be-pre-connect/srv"
+ use_backend be
+
+backend be-pre-connect
+ server srv ${h_edge_priv_addr}:${h_edge_priv_port} proto h2
+
+backend be
+ server srv ${s1_addr}:${s1_port}
+}
+
+client c1 -connect ${h_edge_pub_sock} {
+ txreq -url "/"
+ rxresp
+ expect resp.status == 503
+} -run
+
+haproxy h_dev -start
+# TODO replace delay by a proper wait
+delay 3
+
+client c1 -connect ${h_edge_pub_sock} {
+ txreq -url "/"
+ rxresp
+ expect resp.status == 200
+} -run
diff --git a/reg-tests/connection/reverse_server.vtc b/reg-tests/connection/reverse_server.vtc
new file mode 100644
index 0000000..50fe8ce
--- /dev/null
+++ b/reg-tests/connection/reverse_server.vtc
@@ -0,0 +1,69 @@
+varnishtest "Reverse server test"
+feature ignore_unknown_macro
+
+#REQUIRE_VERSION=2.9
+
+barrier b1 cond 2
+
+haproxy h_edge -conf {
+global
+ expose-experimental-directives
+
+defaults
+ log global
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+ mode http
+
+frontend pub
+ bind "fd@${pub}"
+ use_backend be-reverse
+
+backend be-reverse
+ server dev rhttp@
+
+frontend priv
+ bind "fd@${priv}" proto h2
+ tcp-request session attach-srv be-reverse/dev
+} -start
+
+# Run a client through public endpoint
+# Reverse server has no connection available thus a 503 is expected
+client c1 -connect ${h_edge_pub_sock} {
+ txreq -url "/"
+ rxresp
+ expect resp.status == 503
+} -run
+
+# Run a client through private endpoint
+# Connection will be attached to the reverse server
+client c_dev -connect ${h_edge_priv_sock} {
+ txpri
+
+ stream 0 {
+ txsettings
+ rxsettings
+ txsettings -ack
+ rxsettings
+ expect settings.ack == true
+ } -run
+
+ barrier b1 sync
+ stream 1 {
+ rxhdrs
+ } -run
+
+ sendhex "000004 01 05 00000001 88 5c 01 30"
+} -start
+
+# Wait for dev client to be ready to process connection
+barrier b1 sync
+
+# Run a client through public endpoint
+# Reverse server should now be able to proceed with the request
+client c2 -connect ${h_edge_pub_sock} {
+ txreq -url "/"
+ rxresp
+ expect resp.status == 200
+} -run
diff --git a/reg-tests/connection/reverse_server_name.vtc b/reg-tests/connection/reverse_server_name.vtc
new file mode 100644
index 0000000..0fd850f
--- /dev/null
+++ b/reg-tests/connection/reverse_server_name.vtc
@@ -0,0 +1,87 @@
+varnishtest "Reverse server with a name parameter test"
+feature cmd "$HAPROXY_PROGRAM -cc 'feature(OPENSSL)'"
+feature ignore_unknown_macro
+
+#REQUIRE_VERSION=2.9
+
+barrier b1 cond 2
+
+haproxy h_edge -conf {
+global
+ expose-experimental-directives
+
+defaults
+ log global
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+ mode http
+
+frontend pub
+ bind "fd@${pub}"
+ use_backend be-reverse
+
+backend be-reverse
+ server dev rhttp@ ssl sni hdr(x-name) verify none
+
+frontend priv
+ bind "fd@${priv}" ssl crt ${testdir}/common.pem verify required ca-verify-file ${testdir}/ca-auth.crt alpn h2
+ tcp-request session attach-srv be-reverse/dev name ssl_c_s_dn(CN)
+} -start
+
+# Simple clear <-> SSL bridge between clients and h_edge haproxy
+# Used certificate has the name "client1"
+haproxy h_ssl_bridge -conf {
+defaults
+ log global
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+ mode tcp
+
+listen li
+ bind "fd@${li}"
+ server h_edge "${h_edge_priv_addr}:${h_edge_priv_port}" ssl crt ${testdir}/client1.pem verify none alpn h2
+} -start
+
+# Run a client through private endpoint
+# Connection will be attached to the reverse server
+client c_dev -connect ${h_ssl_bridge_li_sock} {
+ txpri
+
+ stream 0 {
+ txsettings
+ rxsettings
+ txsettings -ack
+ rxsettings
+ expect settings.ack == true
+ } -run
+
+ barrier b1 sync
+ stream 1 {
+ rxhdrs
+ } -run
+
+ sendhex "000004 01 05 00000001 88 5c 01 30"
+} -start
+
+# Wait for dev client to be ready to process connection
+barrier b1 sync
+
+# Run a client through public endpoint
+# Use a different name than the client certificate thus resulting in a 503
+client c1 -connect ${h_edge_pub_sock} {
+ txreq -url "/" \
+ -hdr "x-name: client99"
+ rxresp
+ expect resp.status == 503
+} -run
+
+# Run a client through public endpoint
+# Use the correct name
+client c2 -connect ${h_edge_pub_sock} {
+ txreq -url "/" \
+ -hdr "x-name: client1"
+ rxresp
+ expect resp.status == 200
+} -run
diff --git a/reg-tests/connection/tcp_to_http_upgrade.vtc b/reg-tests/connection/tcp_to_http_upgrade.vtc
new file mode 100644
index 0000000..48ebeba
--- /dev/null
+++ b/reg-tests/connection/tcp_to_http_upgrade.vtc
@@ -0,0 +1,169 @@
+varnishtest "Test connection upgrades from TCP to HTTP"
+
+#REQUIRE_VERSION=2.4
+
+feature ignore_unknown_macro
+
+server s1 {
+ # TCP > H1 using "switch-mode http"
+ rxreq
+ expect req.http.x-stream-mode == tcp
+ expect req.http.x-name == fe1
+ txresp
+ rxreq
+ expect req.http.x-stream-mode == http
+ expect req.http.x-name == fe1
+ txresp
+
+ accept
+
+ # TCP > H1 using backend mode
+ rxreq
+ expect req.http.x-name == be
+ txresp
+ rxreq
+ expect req.http.x-name == be
+ txresp
+
+ accept
+
+ # TCP > H2 using "switch-mode http"
+ rxreq
+ expect req.http.x-stream-mode == http
+ expect req.http.x-name == fe1
+ txresp
+ rxreq
+ expect req.http.x-stream-mode == http
+ expect req.http.x-name == fe1
+ txresp
+
+ # To be sure no other request was received
+ accept
+ rxreq
+ txresp
+} -start
+
+haproxy h1 -conf {
+ frontend fe1
+ mode tcp
+ bind "fd@${fe1h1}"
+
+ tcp-request inspect-delay 1s
+ tcp-request content set-var(req.stream_mode) internal.strm.is_htx,iif(http,tcp)
+ tcp-request content switch-mode http if HTTP
+ tcp-request content reject # never reached
+
+ http-request set-header x-stream-mode %[var(req.stream_mode)]
+ http-request set-header x-name %[fe_name]
+
+ default_backend be
+
+ frontend fe2
+ mode tcp
+ bind "fd@${fe2h1}"
+ default_backend be
+
+ backend be
+ mode http
+ http-request set-header x-name %[be_name] unless { req.fhdr(x-name) -m found }
+ server s1 ${s1_addr}:${s1_port}
+
+ listen li1
+ mode http
+ bind "fd@${li1h1}"
+ server s1 ${h1_fe1h1_addr}:${h1_fe1h1_port} proto h2
+
+ listen err1
+ mode http
+ bind "fd@${err1h1}" proto h1
+ server s1 ${s1_addr}:${s1_port}
+
+ listen err2
+ mode tcp
+ bind "fd@${err2h1}"
+
+ tcp-request inspect-delay 1s
+ tcp-request content switch-mode http proto h1 if HTTP
+ tcp-request content reject # never reached
+
+ default_backend be
+
+ listen err3
+ mode tcp
+ bind "fd@${err3h1}" proto none
+
+ tcp-request inspect-delay 1s
+ tcp-request content switch-mode http if HTTP
+ tcp-request content reject # never reached
+
+ default_backend be
+} -start
+
+# TCP > H1 using "switch-mode http"
+client c1 -connect ${h1_fe1h1_sock} {
+ txreq
+ rxresp
+ expect resp.status == 200
+
+ txreq
+ rxresp
+ expect resp.status == 200
+} -run
+
+# TCP > H1 using backend mode
+client c2 -connect ${h1_fe2h1_sock} {
+ txreq
+ rxresp
+ expect resp.status == 200
+
+ txreq
+ rxresp
+ expect resp.status == 200
+} -run
+
+
+# TCP > H2 using "switch-mode http"
+client c3 -connect ${h1_li1h1_sock} {
+ txreq
+ rxresp
+ expect resp.status == 200
+
+ txreq
+ rxresp
+ expect resp.status == 200
+} -run
+
+# implicit H1 > H2 upgrade not performed
+client c_err1 -connect ${h1_err1h1_sock} {
+ send "PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n"
+ rxresp
+ expect resp.status == 400
+} -run
+
+
+# TCP > H1 > H2 upgrade not allowed
+client c_err2 -connect ${h1_err2h1_sock} {
+ send "PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n"
+ rxresp
+ expect resp.status == 400
+} -run
+
+
+# TCP > HTTP upgrade not allowed
+client c_err3 -connect ${h1_err3h1_sock} {
+ txreq
+ expect_close
+} -run
+
+# TCP > HTTP upgrade with a parsing error
+client c_err4 -connect ${h1_fe2h1_sock} {
+ send "GET / BAD-VERSION\r\n\r\n"
+ rxresp
+ expect resp.status == 400
+} -run
+
+# To be sure no other request was received by the server
+client c_end -connect ${s1_sock} {
+ txreq
+ rxresp
+} -run
diff --git a/reg-tests/contrib/prometheus.vtc b/reg-tests/contrib/prometheus.vtc
new file mode 100644
index 0000000..a481240
--- /dev/null
+++ b/reg-tests/contrib/prometheus.vtc
@@ -0,0 +1,113 @@
+varnishtest "prometheus exporter test"
+
+#REQUIRE_VERSION=2.4
+#REQUIRE_SERVICES=prometheus-exporter
+
+feature ignore_unknown_macro
+
+server s1 {
+ rxreq
+ txresp
+} -start
+
+server s2 {
+ rxreq
+ txresp
+} -start
+
+haproxy h1 -conf {
+ defaults
+ mode http
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+ option socket-stats
+
+ listen stats
+ bind "fd@${stats}"
+ http-request use-service prometheus-exporter if { path /metrics }
+
+ frontend fe
+ bind "fd@${fe}"
+ default_backend be
+
+ backend be
+ stick-table type ip size 1m expire 10s store http_req_rate(10s)
+ option httpchk
+ server s1 ${s1_addr}:${s1_port}
+ server s2 ${s2_addr}:${s2_port} check inter 5s maxqueue 10 maxconn 12 pool-max-conn 42
+} -start
+
+client c1 -connect ${h1_stats_sock} {
+ txreq -url "/metrics"
+ rxresp
+ # test general metrics
+ expect resp.status == 200
+ expect resp.body ~ ".*haproxy_process.*"
+ expect resp.body ~ ".*haproxy_frontend.*"
+ expect resp.body ~ ".*haproxy_listener.*"
+ expect resp.body ~ ".*haproxy_backend.*"
+ expect resp.body ~ ".*haproxy_server.*"
+ expect resp.body ~ ".*haproxy_sticktable.*"
+
+ # test expected NaN values
+ expect resp.body ~ ".*haproxy_server_check_failures_total{proxy=\"be\",server=\"s1\"} NaN.*"
+ expect resp.body ~ ".*haproxy_server_check_up_down_total{proxy=\"be\",server=\"s1\"} NaN.*"
+ expect resp.body ~ ".*haproxy_server_check_failures_total{proxy=\"be\",server=\"s2\"} 0.*"
+ expect resp.body ~ ".*haproxy_server_check_up_down_total{proxy=\"be\",server=\"s2\"} 0.*"
+
+ expect resp.body ~ ".*haproxy_server_queue_limit{proxy=\"be\",server=\"s1\"} NaN.*"
+ expect resp.body ~ ".*haproxy_server_queue_limit{proxy=\"be\",server=\"s2\"} 10.*"
+
+ expect resp.body ~ ".*haproxy_server_limit_sessions{proxy=\"be\",server=\"s1\"} NaN.*"
+ expect resp.body ~ ".*haproxy_server_limit_sessions{proxy=\"be\",server=\"s2\"} 12.*"
+
+ expect resp.body ~ ".*haproxy_backend_downtime_seconds_total{proxy=\"stats\"} NaN.*"
+ expect resp.body ~ ".*haproxy_backend_downtime_seconds_total{proxy=\"be\"} 0.*"
+ expect resp.body ~ ".*haproxy_server_downtime_seconds_total{proxy=\"be\",server=\"s1\"} NaN.*"
+ expect resp.body ~ ".*haproxy_server_downtime_seconds_total{proxy=\"be\",server=\"s2\"} 0.*"
+
+ expect resp.body ~ ".*haproxy_server_current_throttle{proxy=\"be\",server=\"s1\"} NaN.*"
+
+ expect resp.body ~ ".*haproxy_server_idle_connections_limit{proxy=\"be\",server=\"s1\"} NaN.*"
+ expect resp.body ~ ".*haproxy_server_idle_connections_limit{proxy=\"be\",server=\"s2\"} 42.*"
+
+ # test well known labels presence
+ expect resp.body ~ ".*haproxy_process_build_info{version=\".*\"} 1.*"
+ expect resp.body ~ ".*haproxy_frontend_http_responses_total{proxy=\"stats\",code=\"4xx\"} 0.*"
+ expect resp.body ~ ".*haproxy_frontend_status{proxy=\"fe\",state=\"UP\"} 1.*"
+ expect resp.body ~ ".*haproxy_listener_status{proxy=\"stats\",listener=\"sock-1\",state=\"WAITING\"} 0.*"
+ expect resp.body ~ ".*haproxy_backend_status{proxy=\"be\",state=\"UP\"} 1.*"
+ expect resp.body ~ ".*haproxy_server_status{proxy=\"be\",server=\"s1\",state=\"DOWN\"} 0.*"
+ expect resp.body ~ ".*haproxy_server_check_status{proxy=\"be\",server=\"s2\",state=\"HANA\"} 0.*"
+
+ # test scope
+ txreq -url "/metrics?scope="
+ rxresp
+ expect resp.status == 200
+ expect resp.bodylen == 0
+
+ txreq -url "/metrics?scope=server"
+ rxresp
+ expect resp.status == 200
+ expect resp.body !~ ".*haproxy_process.*"
+ expect resp.body !~ ".*haproxy_frontend.*"
+ expect resp.body !~ ".*haproxy_listener.*"
+ expect resp.body !~ ".*haproxy_backend.*"
+ expect resp.body ~ ".*haproxy_server.*"
+ expect resp.body !~ ".*haproxy_sticktable.*"
+
+ txreq -url "/metrics?scope=frontend&scope=backend"
+ rxresp
+ expect resp.status == 200
+ expect resp.body !~ ".*haproxy_process.*"
+ expect resp.body ~ ".*haproxy_frontend.*"
+ expect resp.body !~ ".*haproxy_listener.*"
+ expect resp.body ~ ".*haproxy_backend.*"
+ expect resp.body !~ ".*haproxy_server.*"
+ expect resp.body !~ ".*haproxy_sticktable.*"
+
+ txreq -url "/metrics?scope"
+ rxresp
+ expect resp.status == 400
+} -run
diff --git a/reg-tests/converter/add_item.vtc b/reg-tests/converter/add_item.vtc
new file mode 100644
index 0000000..474ad7b
--- /dev/null
+++ b/reg-tests/converter/add_item.vtc
@@ -0,0 +1,50 @@
+varnishtest "be2dec converter Test"
+
+feature cmd "$HAPROXY_PROGRAM -cc 'version_atleast(2.6-dev0)'"
+feature ignore_unknown_macro
+
+server s1 {
+ rxreq
+ txresp -hdr "Connection: close"
+} -repeat 3 -start
+
+haproxy h1 -conf {
+ defaults
+ mode http
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ frontend fe
+ bind "fd@${fe}"
+
+ #### requests
+ http-request set-var(txn.input) req.hdr(input)
+ http-request set-var(txn.var) str("var_content")
+
+ http-response set-header add_item-1 "%[var(txn.input),add_item(',',txn.var,_suff_)]"
+ http-response set-header add_item-2 "%[var(txn.input),add_item(',',txn.var)]"
+ http-response set-header add_item-3 "%[var(txn.input),add_item(',',,_suff_)]"
+
+ default_backend be
+
+ backend be
+ server s1 ${s1_addr}:${s1_port}
+} -start
+
+client c1 -connect ${h1_fe_sock} {
+ txreq -url "/" \
+ -hdr "input:"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.add_item-1 == "var_content_suff_"
+ expect resp.http.add_item-2 == "var_content"
+ expect resp.http.add_item-3 == "_suff_"
+ txreq -url "/" \
+ -hdr "input: input_string"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.add_item-1 == "input_string,var_content_suff_"
+ expect resp.http.add_item-2 == "input_string,var_content"
+ expect resp.http.add_item-3 == "input_string,_suff_"
+} -run
diff --git a/reg-tests/converter/be2dec.vtc b/reg-tests/converter/be2dec.vtc
new file mode 100644
index 0000000..a0b7104
--- /dev/null
+++ b/reg-tests/converter/be2dec.vtc
@@ -0,0 +1,56 @@
+varnishtest "be2dec converter Test"
+
+feature cmd "$HAPROXY_PROGRAM -cc 'version_atleast(2.5-dev0)'"
+feature ignore_unknown_macro
+
+server s1 {
+ rxreq
+ txresp -hdr "Connection: close"
+} -repeat 3 -start
+
+haproxy h1 -conf {
+ defaults
+ mode http
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ frontend fe
+ bind "fd@${fe}"
+
+ #### requests
+ http-request set-var(txn.input) req.hdr(input)
+
+ http-response set-header be2dec-1 "%[var(txn.input),be2dec(:,1)]"
+ http-response set-header be2dec-2 "%[var(txn.input),be2dec(-,3)]"
+ http-response set-header be2dec-3 "%[var(txn.input),be2dec(::,3,1)]"
+
+ default_backend be
+
+ backend be
+ server s1 ${s1_addr}:${s1_port}
+} -start
+
+client c1 -connect ${h1_fe_sock} {
+ txreq -url "/" \
+ -hdr "input:"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.be2dec-1 == ""
+ expect resp.http.be2dec-2 == ""
+ expect resp.http.be2dec-3 == ""
+ txreq -url "/" \
+ -hdr "input: 0123456789"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.be2dec-1 == "48:49:50:51:52:53:54:55:56:57"
+ expect resp.http.be2dec-2 == "3158322-3355701-3553080-57"
+ expect resp.http.be2dec-3 == "3158322::3355701::3553080"
+ txreq -url "/" \
+ -hdr "input: abcdefghijklmnopqrstuvwxyz"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.be2dec-1 == "97:98:99:100:101:102:103:104:105:106:107:108:109:110:111:112:113:114:115:116:117:118:119:120:121:122"
+ expect resp.http.be2dec-2 == "6382179-6579558-6776937-6974316-7171695-7369074-7566453-7763832-31098"
+ expect resp.http.be2dec-3 == "6382179::6579558::6776937::6974316::7171695::7369074::7566453::7763832"
+} -run
diff --git a/reg-tests/converter/be2hex.vtc b/reg-tests/converter/be2hex.vtc
new file mode 100644
index 0000000..4cf3dc1
--- /dev/null
+++ b/reg-tests/converter/be2hex.vtc
@@ -0,0 +1,60 @@
+varnishtest "be2hex converter Test"
+
+feature cmd "$HAPROXY_PROGRAM -cc 'version_atleast(2.5-dev0)'"
+feature ignore_unknown_macro
+
+server s1 {
+ rxreq
+ txresp -hdr "Connection: close"
+} -repeat 3 -start
+
+haproxy h1 -conf {
+ defaults
+ mode http
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ frontend fe
+ bind "fd@${fe}"
+
+ #### requests
+ http-request set-var(txn.input) req.hdr(input)
+
+ http-response set-header be2hex "%[var(txn.input),be2hex,lower]"
+ http-response set-header be2hex-1 "%[var(txn.input),be2hex(:,1),lower]"
+ http-response set-header be2hex-2 "%[var(txn.input),be2hex(--,3),lower]"
+ http-response set-header be2hex-3 "%[var(txn.input),be2hex(.,3,1),lower]"
+
+ default_backend be
+
+ backend be
+ server s1 ${s1_addr}:${s1_port}
+} -start
+
+client c1 -connect ${h1_fe_sock} {
+ txreq -url "/" \
+ -hdr "input:"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.be2hex == ""
+ expect resp.http.be2hex-1 == ""
+ expect resp.http.be2hex-2 == ""
+ expect resp.http.be2hex-3 == ""
+ txreq -url "/" \
+ -hdr "input: 0123456789"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.be2hex == "30313233343536373839"
+ expect resp.http.be2hex-1 == "30:31:32:33:34:35:36:37:38:39"
+ expect resp.http.be2hex-2 == "303132--333435--363738--39"
+ expect resp.http.be2hex-3 == "303132.333435.363738"
+ txreq -url "/" \
+ -hdr "input: abcdefghijklmnopqrstuvwxyz"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.be2hex == "6162636465666768696a6b6c6d6e6f707172737475767778797a"
+ expect resp.http.be2hex-1 == "61:62:63:64:65:66:67:68:69:6a:6b:6c:6d:6e:6f:70:71:72:73:74:75:76:77:78:79:7a"
+ expect resp.http.be2hex-2 == "616263--646566--676869--6a6b6c--6d6e6f--707172--737475--767778--797a"
+ expect resp.http.be2hex-3 == "616263.646566.676869.6a6b6c.6d6e6f.707172.737475.767778"
+} -run
diff --git a/reg-tests/converter/bytes.vtc b/reg-tests/converter/bytes.vtc
new file mode 100644
index 0000000..8abe401
--- /dev/null
+++ b/reg-tests/converter/bytes.vtc
@@ -0,0 +1,156 @@
+varnishtest "bytes converter Test"
+
+feature cmd "$HAPROXY_PROGRAM -cc 'version_atleast(2.9-dev4)'"
+
+feature ignore_unknown_macro
+
+# TEST - 1
+server s1 {
+ rxreq
+ txresp -hdr "Connection: close"
+} -repeat 1 -start
+
+haproxy h1 -conf {
+ defaults
+ mode http
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ frontend fe
+ bind "fd@${fe}"
+
+ #### requests
+ http-request set-var(txn.input) req.hdr(input)
+
+ http-response set-header bytes_0 "%[var(txn.input),bytes(0)]"
+ http-response set-header bytes_1 "%[var(txn.input),bytes(1)]"
+ http-response set-header bytes_0_3 "%[var(txn.input),bytes(0,3)]"
+ http-response set-header bytes_1_3 "%[var(txn.input),bytes(1,3)]"
+ http-response set-header bytes_99 "%[var(txn.input),bytes(99)]"
+ http-response set-header bytes_5 "%[var(txn.input),bytes(5)]"
+ http-response set-header bytes_6 "%[var(txn.input),bytes(6)]"
+ http-response set-header bytes_0_6 "%[var(txn.input),bytes(0,6)]"
+ http-response set-header bytes_0_7 "%[var(txn.input),bytes(0,7)]"
+
+ http-response set-var(txn.var_start) int(0)
+ http-response set-header bytes_var0 "%[var(txn.input),bytes(txn.var_start)]"
+
+ http-response set-var(txn.var_start) int(1)
+ http-response set-var(txn.var_length) int(3)
+ http-response set-header bytes_var1_var3 "%[var(txn.input),bytes(txn.var_start,txn.var_length)]"
+
+ http-response set-var(txn.var_start) int(99)
+ http-response set-header bytes_var99 "%[var(txn.input),bytes(txn.var_start)]"
+
+ http-response set-var(txn.var_start) int(0)
+ http-response set-var(txn.var_length) int(7)
+ http-response set-header bytes_var0_var7 "%[var(txn.input),bytes(txn.var_start,txn.var_length)]"
+
+ http-response set-var(txn.var_start) int(1)
+ http-response set-var(txn.var_length) int(3)
+ http-response set-header bytes_var1_3 "%[var(txn.input),bytes(txn.var_start,3)]"
+ http-response set-header bytes_1_var3 "%[var(txn.input),bytes(1,txn.var_length)]"
+
+ http-response set-var(txn.var_start) int(-1)
+ http-response set-var(txn.var_length) int(-1)
+ http-response set-header bytes_varminus1 "%[var(txn.input),bytes(txn.var_start)]"
+ http-response set-header bytes_0_varminus1 "%[var(txn.input),bytes(0,txn.var_length)]"
+
+ http-response set-header bytes_varNA "%[var(txn.input),bytes(txn.NA)]"
+ http-response set-header bytes_1_varNA "%[var(txn.input),bytes(1,txn.NA)]"
+
+ default_backend be
+
+ backend be
+ server s1 ${s1_addr}:${s1_port}
+} -start
+
+client c1 -connect ${h1_fe_sock} {
+ txreq -url "/" \
+ -hdr "input: 012345"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.bytes_0 == "012345"
+ expect resp.http.bytes_1 == "12345"
+ expect resp.http.bytes_0_3 == "012"
+ expect resp.http.bytes_1_3 == "123"
+ expect resp.http.bytes_99 == ""
+ expect resp.http.bytes_5 == "5"
+ expect resp.http.bytes_6 == ""
+ expect resp.http.bytes_0_6 == "012345"
+
+ # since specified length is > input length, response contains the input till the end
+ expect resp.http.bytes_0_7 == "012345"
+
+ expect resp.http.bytes_var0 == "012345"
+ expect resp.http.bytes_var1_var3 == "123"
+ expect resp.http.bytes_var99 == ""
+ expect resp.http.bytes_var0_var7 == "012345"
+ expect resp.http.bytes_var1_3 == "123"
+ expect resp.http.bytes_1_var3 == "123"
+ expect resp.http.bytes_varminus1 == ""
+ expect resp.http.bytes_0_varminus1 == ""
+ expect resp.http.bytes_varNA == ""
+ expect resp.http.bytes_1_varNA == ""
+} -run
+
+# TEST - 2
+# negative starting index causes startup failure
+haproxy h2 -conf {
+ defaults
+ mode http
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ frontend fe
+ bind "fd@${fe}"
+
+ http-response set-header bytes_output "%[var(txn.input),bytes(-1)]"
+
+ default_backend be
+
+ backend be
+ server s1 ${s1_addr}:${s1_port}
+} -start -expectexit 1
+
+# TEST - 3
+# negative length causes startup failure
+haproxy h3 -conf {
+ defaults
+ mode http
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ frontend fe
+ bind "fd@${fe}"
+
+ http-response set-header bytes_output "%[var(txn.input),bytes(0,-1)]"
+
+ default_backend be
+
+ backend be
+ server s1 ${s1_addr}:${s1_port}
+} -start -expectexit 1
+
+# TEST - 4
+# 0 length causes startup failure
+haproxy h4 -conf {
+ defaults
+ mode http
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ frontend fe
+ bind "fd@${fe}"
+
+ http-response set-header bytes_output "%[var(txn.input),bytes(0,0)]"
+
+ default_backend be
+
+ backend be
+ server s1 ${s1_addr}:${s1_port}
+} -start -expectexit 1
diff --git a/reg-tests/converter/digest.vtc b/reg-tests/converter/digest.vtc
new file mode 100644
index 0000000..e911ff4
--- /dev/null
+++ b/reg-tests/converter/digest.vtc
@@ -0,0 +1,57 @@
+varnishtest "digest converter Test"
+
+#REQUIRE_VERSION=2.2
+#REQUIRE_OPTION=OPENSSL
+
+feature ignore_unknown_macro
+
+server s1 {
+ rxreq
+ txresp -hdr "Connection: close"
+} -repeat 2 -start
+
+haproxy h1 -conf {
+ defaults
+ mode http
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ frontend fe
+ bind "fd@${fe}"
+
+ #### requests
+ http-request set-var(txn.hash) req.hdr(hash)
+
+ http-response set-header SHA1 "%[var(txn.hash),digest(sha1),hex,lower]"
+ http-response set-header SHA224 "%[var(txn.hash),digest(sha224),hex,lower]"
+ http-response set-header SHA256 "%[var(txn.hash),digest(sha256),hex,lower]"
+ http-response set-header SHA384 "%[var(txn.hash),digest(sha384),hex,lower]"
+ http-response set-header SHA512 "%[var(txn.hash),digest(sha512),hex,lower]"
+
+ default_backend be
+
+ backend be
+ server s1 ${s1_addr}:${s1_port}
+} -start
+
+client c1 -connect ${h1_fe_sock} {
+ txreq -url "/" \
+ -hdr "Hash: 1"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.sha1 == "356a192b7913b04c54574d18c28d46e6395428ab"
+ expect resp.http.sha224 == "e25388fde8290dc286a6164fa2d97e551b53498dcbf7bc378eb1f178"
+ expect resp.http.sha256 == "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b"
+ expect resp.http.sha384 == "47f05d367b0c32e438fb63e6cf4a5f35c2aa2f90dc7543f8a41a0f95ce8a40a313ab5cf36134a2068c4c969cb50db776"
+ expect resp.http.sha512 == "4dff4ea340f0a823f15d3f4f01ab62eae0e5da579ccb851f8db9dfe84c58b2b37b89903a740e1ee172da793a6e79d560e5f7f9bd058a12a280433ed6fa46510a"
+ txreq -url "/" \
+ -hdr "Hash: 2"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.sha1 == "da4b9237bacccdf19c0760cab7aec4a8359010b0"
+ expect resp.http.sha224 == "58b2aaa0bfae7acc021b3260e941117b529b2e69de878fd7d45c61a9"
+ expect resp.http.sha256 == "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35"
+ expect resp.http.sha384 == "d063457705d66d6f016e4cdd747db3af8d70ebfd36badd63de6c8ca4a9d8bfb5d874e7fbd750aa804dcaddae7eeef51e"
+ expect resp.http.sha512 == "40b244112641dd78dd4f93b6c9190dd46e0099194d5a44257b7efad6ef9ff4683da1eda0244448cb343aa688f5d3efd7314dafe580ac0bcbf115aeca9e8dc114"
+} -run
diff --git a/reg-tests/converter/field.vtc b/reg-tests/converter/field.vtc
new file mode 100644
index 0000000..3b1d819
--- /dev/null
+++ b/reg-tests/converter/field.vtc
@@ -0,0 +1,43 @@
+varnishtest "field converter Test"
+
+feature ignore_unknown_macro
+
+server s1 {
+ rxreq
+ txresp -hdr "Connection: close"
+} -repeat 3 -start
+
+haproxy h1 -conf {
+ defaults
+ mode http
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ frontend fe
+ bind "fd@${fe}"
+
+ #### requests
+ http-request set-var(txn.uri) path
+ http-response set-header Found %[var(txn.uri),field(3,/)] if { var(txn.uri),field(3,/) -m found }
+
+ default_backend be
+
+ backend be
+ server s1 ${s1_addr}:${s1_port}
+} -start
+
+client c1 -connect ${h1_fe_sock} {
+ txreq -url "/foo/bar/baz"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.found == "bar"
+ txreq -url "/foo//bar/baz"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.found == ""
+ txreq -url "/foo"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.found == "<undef>"
+} -run
diff --git a/reg-tests/converter/fix.vtc b/reg-tests/converter/fix.vtc
new file mode 100644
index 0000000..8206da3
--- /dev/null
+++ b/reg-tests/converter/fix.vtc
@@ -0,0 +1,235 @@
+varnishtest "fix converters Test"
+#REQUIRE_VERSION=2.4
+
+feature ignore_unknown_macro
+
+server s1 {
+ # Valid FIX-4.0 logon
+ recv 92
+ # 8=FIX|4.0|9=66|35=A|34=1|49=EXECUTOR|52=20201029-10:54:19|56=CLIENT1|98=0|108=30|10=147|
+ sendhex "383d4649582e342e3001393d36360133353d410133343d310134393d4558454355544f520135323d32303230313032392d31303a35343a31390135363d434c49454e54310139383d30013130383d33300131303d31343701"
+ close
+
+ # Valid FIX-4.1 logon
+ accept
+ recv 98
+ # 8=FIX.4.1|9=72|35=A|34=1|49=EXECUTOR|52=20201029-12:43:07|56=CLIENT1|98=0|108=30|141=Y|10=187|
+ sendhex "383d4649582e342e3101393d37320133353d410133343d310134393d4558454355544f520135323d32303230313032392d31323a34333a30370135363d434c49454e54310139383d30013130383d3330013134313d590131303d31383701"
+ close
+
+ # Valid FIX-4.2 logon
+ accept
+ recv 98
+ # 8=FIX.4.2|9=79|35=A|34=1|49=EXECUTOR|52=20201029-12:55:12.101414|56=CLIENT1|98=0|108=30|141=Y|10=027|
+ sendhex "383d4649582e342e3201393d37390133353d410133343d310134393d4558454355544f520135323d32303230313032392d31323a35353a31322e3130313431340135363d434c49454e54310139383d30013130383d3330013134313d590131303d30323701"
+ close
+
+ # Valid FIX-4.3 logon
+ accept
+ recv 125
+ # 8=FIX.4.3|9=79|35=A|34=1|49=EXECUTOR|52=20201029-12:58:50.891371|56=CLIENT1|98=0|108=30|141=Y|10=051|
+ sendhex "383d4649582e342e3301393d37390133353d410133343d310134393d4558454355544f520135323d32303230313032392d31323a35383a35302e3839313337310135363d434c49454e54310139383d30013130383d3330013134313d590131303d30353101"
+ close
+
+ # Valid FIX-4.4 logon
+ accept
+ recv 125
+ # 8=FIX.4.4|9=79|35=A|34=1|49=EXECUTOR|52=20201029-13:02:44.535360|56=CLIENT1|98=0|108=30|141=Y|10=038|
+ sendhex "383d4649582e342e3401393d37390133353d410133343d310134393d4558454355544f520135323d32303230313032392d31333a30323a34342e3533353336300135363d434c49454e54310139383d30013130383d3330013134313d590131303d30333801"
+ close
+
+ # Valid FIX-5.0 logon
+ accept
+ recv 140
+ # 8=FIXT.1.1|9=86|35=A|34=1|49=EXECUTOR|52=20201029-13:13:22.626384|56=CLIENT1|98=0|108=30|141=Y|1137=7|10=184|
+ sendhex "383d464958542e312e3101393d38360133353d410133343d310134393d4558454355544f520135323d32303230313032392d31333a31333a32322e3632363338340135363d434c49454e54310139383d30013130383d3330013134313d5901313133373d370131303d31383401"
+} -start
+
+server s2 {
+ # Valid FIX-4.4 logon
+ recv 125
+ # 8=FIX.4.4|9=79|35=A|34=1|49=EXECUTOR|52=20201029-13:02:44.535360|56=CLIENT1|98=0|108=30|141=Y|10=038|
+ sendhex "383d4649582e342e3401393d37390133353d410133343d310134393d4558454355544f520135323d32303230313032392d31333a30323a34342e3533353336300135363d434c49454e54310139383d30013130383d3330013134313d590131303d30333801"
+
+} -start
+
+haproxy h1 -conf {
+ defaults
+ mode tcp
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ frontend fe1
+ bind "fd@${fe1}"
+ tcp-request inspect-delay 1s
+ tcp-request content reject unless { req.payload(0,0),fix_is_valid }
+ default_backend be1
+
+ frontend fe2
+ bind "fd@${fe2}"
+ tcp-request inspect-delay 1s
+ tcp-request content reject unless { req.payload(0,0),fix_is_valid }
+ tcp-request content set-var(req.fix_vsn) req.payload(0,0),fix_tag_value(BeginString)
+ tcp-request content set-var(req.fix_len) req.payload(0,0),fix_tag_value(BodyLength)
+ tcp-request content set-var(req.fix_type) req.payload(0,0),fix_tag_value(MsgType)
+ tcp-request content set-var(req.fix_sender) req.payload(0,0),fix_tag_value(SenderCompID)
+ tcp-request content set-var(req.fix_target) req.payload(0,0),fix_tag_value(TargetCompID)
+ tcp-request content set-var(req.fix_chksum) req.payload(0,0),fix_tag_value(CheckSum)
+ tcp-request content reject if ! { var(req.fix_vsn) -m str "FIX.4.4" } || ! { var(req.fix_len) -m str "102" }
+ tcp-request content reject if ! { var(req.fix_type) -m str "A" } || ! { var(req.fix_sender) -m str "CLIENT1" }
+ tcp-request content reject if ! { var(req.fix_target) -m str "EXECUTOR" } || ! { var(req.fix_chksum) -m str "252" }
+ default_backend be2
+
+ backend be1
+ server s1 ${s1_addr}:${s1_port}
+ tcp-response inspect-delay 1s
+ tcp-response content reject unless { res.payload(0,0),fix_is_valid }
+
+ backend be2
+ server s2 ${s2_addr}:${s2_port}
+ tcp-response inspect-delay 1s
+ tcp-response content reject unless { res.payload(0,0),fix_is_valid }
+ tcp-response content set-var(res.fix_vsn) res.payload(0,0),fix_tag_value(8)
+ tcp-response content set-var(res.fix_len) res.payload(0,0),fix_tag_value(9)
+ tcp-response content set-var(res.fix_type) res.payload(0,0),fix_tag_value(35)
+ tcp-response content set-var(res.fix_sender) res.payload(0,0),fix_tag_value(49)
+ tcp-response content set-var(res.fix_target) res.payload(0,0),fix_tag_value(56)
+ tcp-response content set-var(res.fix_chksum) res.payload(0,0),fix_tag_value(10)
+ tcp-response content reject if ! { var(res.fix_vsn) -m str "FIX.4.4" } || ! { var(res.fix_len) -m str "79" }
+ tcp-response content reject if ! { var(res.fix_type) -m str "A" } || ! { var(res.fix_sender) -m str "EXECUTOR" }
+ tcp-response content reject if ! { var(res.fix_target) -m str "CLIENT1" } || ! { var(res.fix_chksum) -m str "038" }
+} -start
+
+client c1_4_0 -connect ${h1_fe1_sock} {
+ # Valid FIX-4.0 logon
+ # 8=FIX|4.0|9=70|35=A|49=CLIENT1|56=EXECUTOR|34=1|52=20201029-10:54:19.617|98=0|108=30|10=090|
+ sendhex "383d4649582e342e3001393d37300133353d410134393d434c49454e54310135363d4558454355544f520133343d310135323d32303230313032392d31303a35343a31392e3631370139383d30013130383d33300131303d30393001"
+ recv 88
+ expect_close
+} -run
+
+client c1_4_1 -connect ${h1_fe1_sock} {
+ # Valid FIX-4.1 logon
+ # 8=FIX.4.1|9=76|35=A|49=CLIENT1|56=EXECUTOR|34=1|52=20201029-12:43:07.940|98=0|108=30|141=Y|10=138|
+ sendhex "383d4649582e342e3101393d37360133353d410134393d434c49454e54310135363d4558454355544f520133343d310135323d32303230313032392d31323a34333a30372e3934300139383d30013130383d3330013134313d590131303d31333801"
+ recv 94
+ expect_close
+} -run
+
+client c1_4_2 -connect ${h1_fe1_sock} {
+ # Valid FIX-4.2 logon
+ # 8=FIX.4.2|9=76|35=A|49=CLIENT1|56=EXECUTOR|34=1|52=20201029-12:55:12.100|98=0|108=30|141=Y|10=126|
+ sendhex "383d4649582e342e3201393d37360133353d410134393d434c49454e54310135363d4558454355544f520133343d310135323d32303230313032392d31323a35353a31322e3130300139383d30013130383d3330013134313d590131303d31323601"
+ recv 101
+ expect_close
+} -run
+
+client c1_4_3 -connect ${h1_fe1_sock} {
+ # Valid FIX-4.3 logon
+ # 8=FIX.4.3|9=102|35=A|49=CLIENT1|56=EXECUTOR|34=1|52=20201029-12:58:50.889|98=0|108=30|141=Y|553=Username|554=Password|10=012|
+ sendhex "383d4649582e342e3301393d3130320133353d410134393d434c49454e54310135363d4558454355544f520133343d310135323d32303230313032392d31323a35383a35302e3838390139383d30013130383d3330013134313d59013535333d557365726e616d65013535343d50617373776f72640131303d30313201"
+ recv 101
+ expect_close
+} -run
+
+client c1_4_4 -connect ${h1_fe1_sock} {
+ # Valid FIX-4.4 logon
+ # 8=FIX.4.4|9=102|35=A|49=CLIENT1|56=EXECUTOR|34=1|52=20201029-13:02:44.528|98=0|108=30|141=Y|553=Username|554=Password|10=252|
+ sendhex "383d4649582e342e3401393d3130320133353d410134393d434c49454e54310135363d4558454355544f520133343d310135323d32303230313032392d31333a30323a34342e3532380139383d30013130383d3330013134313d59013535333d557365726e616d65013535343d50617373776f72640131303d32353201"
+ recv 101
+ expect_close
+} -run
+
+client c1_5_0 -connect ${h1_fe1_sock} {
+ # Valid FIX-5.0 logon
+ # 8=FIXT.1.1|9=116|35=A|49=CLIENT1|56=EXECUTOR|34=1|52=20201029-13:13:22.624|1128=7|98=0|108=30|141=Y|553=Username|554=Password|1137=7|10=204|
+ sendhex "383d464958542e312e3101393d3131360133353d410134393d434c49454e54310135363d4558454355544f520133343d310135323d32303230313032392d31333a31333a32322e36323401313132383d370139383d30013130383d3330013134313d59013535333d557365726e616d65013535343d50617373776f726401313133373d370131303d32303401"
+ recv 109
+ expect_close
+} -run
+
+client c2_1 -connect ${h1_fe1_sock} {
+ # InValid FIX-4.4: Empty TagName (missing EncryptMethod <98> tag name)
+ # 8=FIX.4.4|9=100|35=A|49=CLIENT1|56=EXECUTOR|34=1|52=20201029-13:02:44.528|=0|108=30|141=Y|553=Username|554=Password|10=252|
+ sendhex "383d4649582e342e3401393d3130300133353d410134393d434c49454e54310135363d4558454355544f520133343d310135323d32303230313032392d31333a30323a34342e353238013d30013130383d3330013134313d59013535333d557365726e616d65013535343d50617373776f72640131303d32353201"
+ expect_close
+} -run
+
+client c2_2 -connect ${h1_fe1_sock} {
+ # InValid FIX-4.4: Empty TagValue (missing EncryptMethod <98> tag value)
+ # 8=FIX.4.4|9=101|35=A|49=CLIENT1|56=EXECUTOR|34=1|52=20201029-13:02:44.528|98=|108=30|141=Y|553=Username|554=Password|10=252|
+ sendhex "383d4649582e342e3401393d3130310133353d410134393d434c49454e54310135363d4558454355544f520133343d310135323d32303230313032392d31333a30323a34342e3532380139383d013130383d3330013134313d59013535333d557365726e616d65013535343d50617373776f72640131303d32353201"
+ expect_close
+} -run
+
+client c2_3 -connect ${h1_fe1_sock} {
+ # InValid FIX-4.4: Empty Tag no delimiter (missing delimiter for EncryptMethod <98> tag)
+ # 8=FIX.4.4|9=101|35=A|49=CLIENT1|56=EXECUTOR|34=1|52=20201029-13:02:44.528|98|108=30|141=Y|553=Username|554=Password|10=252|
+ sendhex "383d4649582e342e3401393d3130300133353d410134393d434c49454e54310135363d4558454355544f520133343d310135323d32303230313032392d31333a30323a34342e353238013938013130383d3330013134313d59013535333d557365726e616d65013535343d50617373776f72640131303d32353201"
+ expect_close
+} -run
+
+client c2_4 -connect ${h1_fe1_sock} {
+ # Invalid FIX-4.4: First tag != BeginString
+ # 9=102|8=FIX.4.4|35=A|49=CLIENT1|56=EXECUTOR|34=1|52=20201029-13:02:44.528|98=0|108=30|141=Y|553=Username|554=Password|10=252|
+ sendhex "393d31303201383d4649582e342e340133353d410134393d434c49454e54310135363d4558454355544f520133343d310135323d32303230313032392d31333a30323a34342e3532380139383d30013130383d3330013134313d59013535333d557365726e616d65013535343d50617373776f72640131303d32353201"
+ expect_close
+} -run
+
+client c2_5 -connect ${h1_fe1_sock} {
+ # Invalid FIX-4.4: Second tag != BodyLength
+ # 8=FIX.4.4|35=A|9=102|49=CLIENT1|56=EXECUTOR|34=1|52=20201029-13:02:44.528|98=0|108=30|141=Y|553=Username|554=Password|10=252|
+ sendhex "383d4649582e342e340133353d4101393d3130320134393d434c49454e54310135363d4558454355544f520133343d310135323d32303230313032392d31333a30323a34342e3532380139383d30013130383d3330013134313d59013535333d557365726e616d65013535343d50617373776f72640131303d32353201"
+ expect_close
+} -run
+
+client c2_6 -connect ${h1_fe1_sock} {
+ # Invalid FIX-4.4: Third tag != MsgType
+ # 8=FIX.4.4|9=102|49=CLIENT1|35=A|56=EXECUTOR|34=1|52=20201029-13:02:44.528|98=0|108=30|141=Y|553=Username|554=Password|10=252|
+ sendhex "383d4649582e342e3401393d3130320134393d434c49454e54310133353d410135363d4558454355544f520133343d310135323d32303230313032392d31333a30323a34342e3532380139383d30013130383d3330013134313d59013535333d557365726e616d65013535343d50617373776f72640131303d32353201"
+ expect_close
+} -run
+
+client c2_7 -connect ${h1_fe1_sock} {
+ # Invalid FIX-4.4: Bad body length (too short 100 != 102)
+ # 8=FIX.4.4|9=100|35=A|49=CLIENT1|56=EXECUTOR|34=1|52=20201029-13:02:44.528|98=0|108=30|141=Y|553=Username|554=Password|10=252|
+ sendhex "383d4649582e342e3401393d3130300133353d410134393d434c49454e54310135363d4558454355544f520133343d310135323d32303230313032392d31333a30323a34342e3532380139383d30013130383d3330013134313d59013535333d557365726e616d65013535343d50617373776f72640131303d32353201"
+ expect_close
+} -run
+
+client c2_8 -connect ${h1_fe1_sock} {
+ # Invalid FIX-4.4: Bad body length (too long 105 != 102)
+ # 8=FIX.4.4|9=105|35=A|49=CLIENT1|56=EXECUTOR|34=1|52=20201029-13:02:44.528|98=0|108=30|141=Y|553=Username|554=Password|10=252|XXX
+ sendhex "383d4649582e342e3401393d3130350133353d410134393d434c49454e54310135363d4558454355544f520133343d310135323d32303230313032392d31333a30323a34342e3532380139383d30013130383d3330013134313d59013535333d557365726e616d65013535343d50617373776f72640131303d32353201585858"
+ expect_close
+} -run
+
+client c2_9 -connect ${h1_fe1_sock} {
+ # Invalid FIX-4.4: Too short checksum value (< 3 digit)
+ # 8=FIX.4.4|9=102|35=A|49=CLIENT1|56=EXECUTOR|34=1|52=20201029-13:02:44.528|98=0|108=30|141=Y|553=Username|554=Password|10=25|
+ sendhex "383d4649582e342e3401393d3130320133353d410134393d434c49454e54310135363d4558454355544f520133343d310135323d32303230313032392d31333a30323a34342e3532380139383d30013130383d3330013134313d59013535333d557365726e616d65013535343d50617373776f72640131303d323501"
+ expect_close
+} -run
+
+client c2_10 -connect ${h1_fe1_sock} {
+ # Invalid FIX-4.4: Too long checksum value (> 3 digit)
+ # 8=FIX.4.4|9=102|35=A|49=CLIENT1|56=EXECUTOR|34=1|52=20201029-13:02:44.528|98=0|108=30|141=Y|553=Username|554=Password|10=2520|
+ sendhex "383d4649582e342e3401393d3130320133353d410134393d434c49454e54310135363d4558454355544f520133343d310135323d32303230313032392d31333a30323a34342e3532380139383d30013130383d3330013134313d59013535333d557365726e616d65013535343d50617373776f72640131303d3235323001"
+ expect_close
+} -run
+
+client c2_11 -connect ${h1_fe1_sock} {
+ # Invalid FIX-4.4: invalid checksum value (253 != 252)
+ # 8=FIX.4.4|9=102|35=A|49=CLIENT1|56=EXECUTOR|34=1|52=20201029-13:02:44.528|98=0|108=30|141=Y|553=Username|554=Password|10=253|
+ sendhex "383d4649582e342e3401393d3130320133353d410134393d434c49454e54310135363d4558454355544f520133343d310135323d32303230313032392d31333a30323a34342e3532380139383d30013130383d3330013134313d59013535333d557365726e616d65013535343d50617373776f72640131303d32353301"
+ expect_close
+} -run
+
+
+client c3_1 -connect ${h1_fe2_sock} {
+ # 8=FIX.4.4|9=102|35=A|49=CLIENT1|56=EXECUTOR|34=1|52=20201029-13:02:44.528|98=0|108=30|141=Y|553=Username|554=Password|10=252|
+ sendhex "383d4649582e342e3401393d3130320133353d410134393d434c49454e54310135363d4558454355544f520133343d310135323d32303230313032392d31333a30323a34342e3532380139383d30013130383d3330013134313d59013535333d557365726e616d65013535343d50617373776f72640131303d32353201"
+ recv 101
+ expect_close
+} -run
diff --git a/reg-tests/converter/hmac.vtc b/reg-tests/converter/hmac.vtc
new file mode 100644
index 0000000..230a44d
--- /dev/null
+++ b/reg-tests/converter/hmac.vtc
@@ -0,0 +1,55 @@
+varnishtest "HMAC converter Test"
+
+#REQUIRE_VERSION=2.2
+#REQUIRE_OPTION=OPENSSL
+
+feature ignore_unknown_macro
+
+server s1 {
+ rxreq
+ txresp -hdr "Connection: close"
+} -repeat 2 -start
+
+haproxy h1 -conf {
+ defaults
+ mode http
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ frontend fe
+ bind "fd@${fe}"
+
+ #### requests
+ http-request set-var(txn.hash) req.hdr(hash)
+ http-request set-var(txn.key) str(my_super_secret_long_key),base64
+
+ http-response set-header SHA1-short "%[var(txn.hash),hmac(sha1,a2V5),hex,lower]"
+ http-response set-header SHA1-long "%[var(txn.hash),hmac(sha1,txn.key),hex,lower]"
+ http-response set-header SHA256-short "%[var(txn.hash),hmac(sha256,a2V5),hex,lower]"
+ http-response set-header SHA256-long "%[var(txn.hash),hmac(sha256,txn.key),hex,lower]"
+
+ default_backend be
+
+ backend be
+ server s1 ${s1_addr}:${s1_port}
+} -start
+
+client c1 -connect ${h1_fe_sock} {
+ txreq -url "/" \
+ -hdr "Hash: 1"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.sha1-short == "e23feb105f9622241bf23db1638cd2b4208b1f53"
+ expect resp.http.sha1-long == "87b10ddcf39e26f6bd7c3b0e38e0125997b255be"
+ expect resp.http.sha256-short == "6da91fb91517be1f5cdcf3af91d7d40c717dd638a306157606fb2e584f7ae926"
+ expect resp.http.sha256-long == "2fb3de6a462c54d1803f946b52202f3a8cd46548ffb3f789b4ac11a4361ffef2"
+ txreq -url "/" \
+ -hdr "Hash: 2"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.sha1-short == "311219c4a80c5ef81b1cee5505236c1d0ab1922c"
+ expect resp.http.sha1-long == "c5758af565ba4b87b3db49c8b32d4a94d430cb78"
+ expect resp.http.sha256-short == "ae7b3ee87b8c9214f714df1c2042c7a985b9d711e9938a063937ad1636775a88"
+ expect resp.http.sha256-long == "c073191a2ebf29f510444b92c187d62199d84b58f58dceeadb91994c170a9a16"
+} -run
diff --git a/reg-tests/converter/iif.vtc b/reg-tests/converter/iif.vtc
new file mode 100644
index 0000000..f412daf
--- /dev/null
+++ b/reg-tests/converter/iif.vtc
@@ -0,0 +1,46 @@
+varnishtest "iif converter Test"
+#REQUIRE_VERSION=2.3
+
+feature ignore_unknown_macro
+
+server s1 {
+ rxreq
+ txresp -hdr "Connection: close"
+} -repeat 3 -start
+
+haproxy h1 -conf {
+ defaults
+ mode http
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ frontend fe
+ bind "fd@${fe}"
+
+ #### requests
+ http-request set-var(txn.iif) req.hdr_cnt(count),iif(ok,ko)
+ http-response set-header iif %[var(txn.iif)]
+
+ default_backend be
+
+ backend be
+ server s1 ${s1_addr}:${s1_port}
+} -start
+
+client c1 -connect ${h1_fe_sock} {
+ txreq
+ rxresp
+ expect resp.status == 200
+ expect resp.http.iif == "ko"
+ txreq \
+ -hdr "count: 1"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.iif == "ok"
+ txreq \
+ -hdr "count: 1,2"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.iif == "ok"
+} -run
diff --git a/reg-tests/converter/json.vtc b/reg-tests/converter/json.vtc
new file mode 100644
index 0000000..1f37c9f
--- /dev/null
+++ b/reg-tests/converter/json.vtc
@@ -0,0 +1,40 @@
+varnishtest "json converter test"
+
+
+feature ignore_unknown_macro
+
+server s1 {
+ rxreq
+ txresp
+} -repeat 2 -start
+
+haproxy h1 -conf {
+ defaults
+ mode http
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ frontend fe
+ bind "fd@${fe}"
+
+ http-response set-header json0 "%[str(foo 1/2),json]"
+ # bad UTF-8 sequence
+ http-response set-header json1 "%[str(\xE0),json(utf8)]"
+ # bad UTF-8 sequence, but removes them
+ http-response set-header json2 "%[str(-\xE0-),json(utf8s)]"
+
+ default_backend be
+
+ backend be
+ server s1 ${s1_addr}:${s1_port}
+} -start
+
+client c1 -connect ${h1_fe_sock} {
+ txreq -url "/"
+ rxresp
+ expect resp.http.json0 == "foo 1\\/2"
+ expect resp.http.json1 == ""
+ expect resp.http.json2 == "--"
+ expect resp.status == 200
+} -run
diff --git a/reg-tests/converter/json_query.vtc b/reg-tests/converter/json_query.vtc
new file mode 100644
index 0000000..f4e3bb2
--- /dev/null
+++ b/reg-tests/converter/json_query.vtc
@@ -0,0 +1,107 @@
+varnishtest "JSON Query converters Test"
+#REQUIRE_VERSION=2.4
+
+feature ignore_unknown_macro
+
+server s1 {
+ rxreq
+ txresp -hdr "Connection: close"
+} -repeat 8 -start
+
+haproxy h1 -conf {
+ global
+ # WT: limit false-positives causing "HTTP header incomplete" due to
+ # idle server connections being randomly used and randomly expiring
+ # under us.
+ tune.idle-pool.shared off
+
+ defaults
+ mode http
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+ option http-buffer-request
+
+ frontend fe
+ bind "fd@${fe}"
+ tcp-request inspect-delay 1s
+
+ http-request set-var(sess.header_json) req.hdr(Authorization),json_query('$.iss')
+ http-request set-var(sess.pay_json) req.body,json_query('$.iss')
+ http-request set-var(sess.pay_int) req.body,json_query('$.integer',"int"),add(1)
+ http-request set-var(sess.pay_neg_int) req.body,json_query('$.negativ-integer',"int"),add(1)
+ http-request set-var(sess.pay_double) req.body,json_query('$.double')
+ http-request set-var(sess.pay_boolean_true) req.body,json_query('$.boolean-true')
+ http-request set-var(sess.pay_boolean_false) req.body,json_query('$.boolean-false')
+ http-request set-var(sess.pay_mykey) req.body,json_query('$.my\\.key')
+
+ http-response set-header x-var_header %[var(sess.header_json)]
+ http-response set-header x-var_body %[var(sess.pay_json)]
+ http-response set-header x-var_body_int %[var(sess.pay_int)]
+ http-response set-header x-var_body_neg_int %[var(sess.pay_neg_int)]
+ http-response set-header x-var_body_double %[var(sess.pay_double)]
+ http-response set-header x-var_body_boolean_true %[var(sess.pay_boolean_true)]
+ http-response set-header x-var_body_boolean_false %[var(sess.pay_boolean_false)]
+ http-response set-header x-var_body_mykey %[var(sess.pay_mykey)]
+
+ default_backend be
+
+ backend be
+ server s1 ${s1_addr}:${s1_port}
+} -start
+
+client c1 -connect ${h1_fe_sock} {
+ txreq -url "/" \
+ -hdr "Authorization: {\"iss\":\"kubernetes.io/serviceaccount\"}"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.x-var_header ~ "kubernetes.io/serviceaccount"
+
+ txreq -url "/" \
+ -body "{\"iss\":\"kubernetes.io/serviceaccount\"}"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.x-var_body ~ "kubernetes.io/serviceaccount"
+
+ txreq -url "/" \
+ -body "{\"integer\":4}"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.x-var_body_int ~ "5"
+
+ txreq -url "/" \
+ -body "{\"integer\":-4}"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.x-var_body_int ~ "-3"
+
+ txreq -url "/" \
+ -body "{\"double\":4.5}"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.x-var_body_double ~ "4.5"
+
+ txreq -url "/" \
+ -body "{\"boolean-true\":true}"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.x-var_body_boolean_true == 1
+
+ txreq -url "/" \
+ -body "{\"boolean-false\":false}"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.x-var_body_boolean_false == 0
+
+ txreq -url "/" \
+ -body "{\"my.key\":\"myvalue\"}"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.x-var_body_mykey ~ "myvalue"
+
+ txreq -url "/" \
+ -body "{\"my.key\":[\"val1\",\"val2\",\"val3\"],\"key2\":\"val4\"}"
+ expect resp.status == 200
+ expect resp.http.x-var_body_mykey ~ "[\"val1\",\"val2\",\"val3\"]"
+
+} -run
diff --git a/reg-tests/converter/mqtt.vtc b/reg-tests/converter/mqtt.vtc
new file mode 100644
index 0000000..fc3daca
--- /dev/null
+++ b/reg-tests/converter/mqtt.vtc
@@ -0,0 +1,238 @@
+varnishtest "mqtt converters Test"
+#REQUIRE_VERSION=2.4
+
+feature ignore_unknown_macro
+
+server s1 {
+ # MQTT 3.1.1 CONNECT packet (id: test_subaaaaaa... [len = 200])
+ recv 215
+ sendhex "20020000"
+ close
+
+ # MQTT 3.1.1 CONNECT packet (id: <empty> - username: test - passwd: passwd)
+ accept
+ recv 28
+ sendhex "20020000"
+ close
+
+ # MQTT 3.1.1 CONNECT packet (id: test_sub - username: test - passwd: passwd - will_topic: willtopic - will_payload: willpayload)
+ accept
+ recv 60
+ sendhex "20020000"
+ close
+
+ # MQTT 5.0 CONNECT packet (id: test_sub)
+ accept
+ recv 26
+ sendhex "200600000322000a"
+
+ # MQTT 5.0 CONNECT packet (id: test_sub - username: test - passwd: passwd)
+ accept
+ recv 40
+ sendhex "200600000322000a"
+
+ # MQTT 5.0 complex CONNECT/CONNACK packet
+ accept
+ recv 128
+ sendhex "20250000221100000078217fff24012501270000ffff22000a2600016100016226000163000164"
+ close
+
+ # Invalid MQTT 3.1.1 CONNACK packet with invalid flags (!= 0x00)
+ accept
+ recv 22
+ sendhex "21020000"
+ expect_close
+
+ # MQTT 3.1 CONNECT packet (id: test_sub - username: test - passwd: passwd)
+ accept
+ recv 38
+ sendhex "20020000"
+} -start
+
+server s2 {
+ # MQTT 5.0 complex CONNECT packet
+ recv 128
+ sendhex "20250000221100000078217fff24012501270000ffff22000a2600016100016226000163000164"
+} -start
+
+haproxy h1 -conf {
+ defaults
+ mode tcp
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ frontend fe1
+ bind "fd@${fe1}"
+ tcp-request inspect-delay 1s
+ tcp-request content reject unless { req.payload(0,0),mqtt_is_valid }
+ default_backend be1
+
+ frontend fe2
+ bind "fd@${fe2}"
+ tcp-request inspect-delay 1s
+ tcp-request content reject unless { req.payload(0,0),mqtt_is_valid }
+ tcp-request content set-var(req.flags) req.payload(0,0),mqtt_field_value(connect,flags)
+ tcp-request content set-var(req.protoname) req.payload(0,0),mqtt_field_value(connect,protocol_name)
+ tcp-request content set-var(req.protovsn) req.payload(0,0),mqtt_field_value(connect,protocol_version)
+ tcp-request content set-var(req.clientid) req.payload(0,0),mqtt_field_value(connect,client_identifier)
+ tcp-request content set-var(req.willtopic) req.payload(0,0),mqtt_field_value(connect,will_topic)
+ tcp-request content set-var(req.willbody) req.payload(0,0),mqtt_field_value(connect,will_payload)
+ tcp-request content set-var(req.user) req.payload(0,0),mqtt_field_value(connect,username)
+ tcp-request content set-var(req.pass) req.payload(0,0),mqtt_field_value(connect,password)
+ tcp-request content set-var(req.maxpktsz) req.payload(0,0),mqtt_field_value(connect,39)
+ tcp-request content set-var(req.reqpbinfo) req.payload(0,0),mqtt_field_value(connect,23)
+ tcp-request content set-var(req.ctype) req.payload(0,0),mqtt_field_value(connect,3)
+ tcp-request content set-var(req.willrsptopic) req.payload(0,0),mqtt_field_value(connect,8)
+ tcp-request content reject if ! { var(req.protoname) -m str "MQTT" } || ! { var(req.protovsn) -m str "5" }
+ tcp-request content reject if ! { var(req.flags) -m str "238" } || ! { var(req.clientid) -m str "test_sub" }
+ tcp-request content reject if ! { var(req.user) -m str "test" } || ! { var(req.pass) -m str "passwd" }
+ tcp-request content reject if ! { var(req.willtopic) -m str "willtopic" } || ! { var(req.willbody) -m str "willpayload" }
+ tcp-request content reject if ! { var(req.maxpktsz) -m str "20" } || ! { var(req.reqpbinfo) -m str "1" }
+ tcp-request content reject if ! { var(req.ctype) -m str "text/plain" } || ! { var(req.willrsptopic) -m str "willrsptopic" }
+ default_backend be2
+
+ backend be1
+ server s1 ${s1_addr}:${s1_port}
+ tcp-response inspect-delay 1s
+ tcp-response content reject unless { res.payload(0,0),mqtt_is_valid }
+
+ backend be2
+ server s2 ${s2_addr}:${s2_port}
+ tcp-response inspect-delay 1s
+ tcp-response content reject unless { res.payload(0,0),mqtt_is_valid }
+ tcp-response content set-var(res.flags) res.payload(0,0),mqtt_field_value(connack,flags)
+ tcp-response content set-var(res.protovsn) res.payload(0,0),mqtt_field_value(connack,protocol_version)
+ tcp-response content set-var(res.rcode) res.payload(0,0),mqtt_field_value(connack,reason_code)
+ tcp-response content set-var(res.sessexpint) res.payload(0,0),mqtt_field_value(connack,17)
+ tcp-response content set-var(res.recvmax) res.payload(0,0),mqtt_field_value(connack,33)
+ tcp-response content set-var(res.maxqos) res.payload(0,0),mqtt_field_value(connack,36)
+ tcp-response content set-var(res.retainavail) res.payload(0,0),mqtt_field_value(connack,37)
+ tcp-response content set-var(res.maxpktsz) res.payload(0,0),mqtt_field_value(connack,39)
+ tcp-response content set-var(res.topicaliasmax) res.payload(0,0),mqtt_field_value(connack,34)
+ tcp-response content reject if ! { var(res.protovsn) -m str "5" } || ! { var(res.flags) -m str "0" }
+ tcp-response content reject if ! { var(res.rcode) -m str "0" } || ! { var(res.sessexpint) -m str "120" }
+ tcp-response content reject if ! { var(res.recvmax) -m str "32767" } || ! { var(res.maxqos) -m str "1" }
+ tcp-response content reject if ! { var(res.retainavail) -m str "1" } || ! { var(res.maxpktsz) -m str "65535" }
+ tcp-response content reject if ! { var(res.topicaliasmax) -m str "10" }
+} -start
+
+client c1_311_1 -connect ${h1_fe1_sock} {
+ # Valid MQTT 3.1.1 CONNECT packet (id: test_sub)
+ sendhex "10d40100044d5154540402003c00c8746573745f737562616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161"
+ recv 4
+ expect_close
+} -run
+
+client c1_311_2 -connect ${h1_fe1_sock} {
+ # Valid MQTT 3.1.1 CONNECT packet (id: <empty> - username: test - passwd: passwd)
+ sendhex "101a00044d51545404c2003c00000004746573740006706173737764"
+ recv 4
+ expect_close
+} -run
+
+client c1_311_3 -connect ${h1_fe1_sock} {
+ # Valid MQTT 3.1.1 CONNECT packet (id: test_sub - username: test - passwd: passwd - will_topic: willtopic - will_payload: willpayload)
+ sendhex "103a00044d51545404ee003c0008746573745f737562000977696c6c746f706963000b77696c6c7061796c6f61640004746573740006706173737764"
+ recv 4
+ expect_close
+} -run
+
+client c1_50_1 -connect ${h1_fe1_sock} {
+ # Valid MQTT 5.0 CONNECT packet (id: test_sub)
+ sendhex "101800044d5154540502003c032100140008746573745f737562"
+ recv 8
+ expect_close
+} -run
+
+client c1_50_2 -connect ${h1_fe1_sock} {
+ # Valid MQTT 5.0 CONNECT packet (id: test_sub - username: test - passwd: passwd)
+ sendhex "102600044d51545405c2003c032100140008746573745f7375620004746573740006706173737764"
+ recv 8
+ expect_close
+} -run
+
+client c1_50_3 -connect ${h1_fe1_sock} {
+ # Valid MQTT 5.0 complex CONNECT/CONNACK packet
+ sendhex "107e00044d51545405ee003c182700000014170126000161000162260001630001642100140008746573745f7375622a03000a746578742f706c61696e08000c77696c6c727370746f7069632600016500016626000167000168000977696c6c746f706963000b77696c6c7061796c6f61640004746573740006706173737764"
+ recv 39
+ expect_close
+} -run
+
+client c2_311_1 -connect ${h1_fe1_sock} {
+ # Invalid MQTT 3.1.1 PINREQ
+ sendhex "d000"
+ expect_close
+} -run
+
+client c2_311_2 -connect ${h1_fe1_sock} {
+ # Invalid MQTT 3.1.1 CONNECT packet with invalid flags (!= 0x00)
+ sendhex "111400044d5154540402003c0008746573745f737562"
+ expect_close
+} -run
+
+client c2_311_3 -connect ${h1_fe1_sock} {
+ # Invalid MQTT 3.1.1 CONNACK packet with invalid flags (!= 0x00)
+ sendhex "101400044d5154540402003c0008746573745f737562"
+ expect_close
+} -run
+
+client c2_311_4 -connect ${h1_fe1_sock} {
+ # Invalid MQTT 3.1.1 CONNECT with too long remaing_length ( > 4 bytes)
+ sendhex "10ffffffff1400044d5154540402003c0008746573745f737562"
+ expect_close
+} -run
+
+client c2_311_4 -connect ${h1_fe1_sock} {
+ # Invalid MQTT 3.1.1 CONNECT with not matching ( 0x13 != 0x14)
+ sendhex "101300044d5154540402003c000874657374a5f737562"
+ expect_close
+} -run
+
+client c2_311_4 -connect ${h1_fe1_sock} {
+ # Invalid MQTT 3.1.1 CONNECT with not matching ( 0x18 != 0x14)
+ sendhex "101800044d5154540402003c000874657374a5f737562ffffffff"
+ expect_close
+} -run
+
+
+client c2_50_1 -connect ${h1_fe2_sock} {
+ # complex MQTT 5.0 CONNECT/CONNACK packet
+ # - CONNECT :
+ # client-id : test_sub
+ # username : test
+ # password : passwd
+ # will-topic : willtopic
+ # will-payload: willpayload
+ # connect props:
+ # maximum-packet-size : 20
+ # request-problem-information: 1
+ # user-property : name=a value=b
+ # user-property : name=c value=d
+ # will props:
+ # content-type : text/plain
+ # response-topic: willrsptopic
+ # user-property : name=e value=f
+ # user-property : name=g value=h
+ # - CONNACK :
+ # flags : 0x00
+ # reason-code: 0x00
+ # connack props:
+ # session-Expiry-interval: 120
+ # receive-maximum : 32767
+ # maximum-qos : 1
+ # retain-available : 1
+ # maximum-packet-size : 65535
+ # topic-alias-maximum : 10
+ # user-property : name=a value=b
+ # user-property : name=c value=d
+ sendhex "107e00044d51545405ee003c182700000014170126000161000162260001630001642100140008746573745f7375622a03000a746578742f706c61696e08000c77696c6c727370746f7069632600016500016626000167000168000977696c6c746f706963000b77696c6c7061796c6f61640004746573740006706173737764"
+ recv 39
+ expect_close
+} -run
+
+client c3_31_1 -connect ${h1_fe1_sock} {
+ # Valid MQTT 3.1 CONNECT packet (id: test_sub - username: test - passwd: passwd)
+ sendhex "102400064d514973647003c200000008746573745f7375620004746573740006706173737764"
+ recv 4
+} -run \ No newline at end of file
diff --git a/reg-tests/converter/param.vtc b/reg-tests/converter/param.vtc
new file mode 100644
index 0000000..1633603
--- /dev/null
+++ b/reg-tests/converter/param.vtc
@@ -0,0 +1,80 @@
+varnishtest "param converter Test"
+
+feature ignore_unknown_macro
+
+server s1 {
+ rxreq
+ txresp -hdr "Connection: close"
+} -repeat 10 -start
+
+haproxy h1 -conf {
+ defaults
+ mode http
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ frontend fe
+ bind "fd@${fe}"
+
+ ### requests
+ http-request set-var(txn.query) query
+ http-response set-header Found %[var(txn.query),param(test)] if { var(txn.query),param(test) -m found }
+
+ default_backend be
+
+ backend be
+ server s1 ${s1_addr}:${s1_port}
+} -start
+
+client c1 -connect ${h1_fe_sock} {
+ txreq -url "/foo/?test=1&b=4&d"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.found == "1"
+
+ txreq -url "/?a=1&b=4&test=34"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.found == "34"
+
+ txreq -url "/?test=bar"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.found == "bar"
+
+ txreq -url "/?a=b&c=d"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.found == "<undef>"
+
+ txreq -url "/?a=b&test=t&c=d"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.found == "t"
+
+ txreq -url "/?a=b&test&c=d"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.found == ""
+
+ txreq -url "/?test="
+ rxresp
+ expect resp.status == 200
+ expect resp.http.found == ""
+
+ txreq -url "/?a=b&test"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.found == ""
+
+ txreq -url "/?testing=123"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.found == "<undef>"
+
+ txreq -url "/?testing=123&test=4"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.found == "4"
+} -run
diff --git a/reg-tests/converter/secure_memcmp.vtc b/reg-tests/converter/secure_memcmp.vtc
new file mode 100644
index 0000000..6ff74e6
--- /dev/null
+++ b/reg-tests/converter/secure_memcmp.vtc
@@ -0,0 +1,143 @@
+varnishtest "secure_memcmp converter Test"
+
+#REQUIRE_VERSION=2.2
+#REQUIRE_OPTION=OPENSSL
+
+feature ignore_unknown_macro
+
+server s1 {
+ rxreq
+ txresp -hdr "Connection: close"
+} -repeat 4 -start
+
+server s2 {
+ rxreq
+ txresp -hdr "Connection: close"
+} -repeat 7 -start
+
+haproxy h1 -conf {
+ global
+ # WT: limit false-positives causing "HTTP header incomplete" due to
+ # idle server connections being randomly used and randomly expiring
+ # under us.
+ tune.idle-pool.shared off
+
+ defaults
+ mode http
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ frontend fe
+ # This frontend matches two base64 encoded values and does not need to
+ # handle null bytes.
+
+ bind "fd@${fe}"
+
+ #### requests
+ http-request set-var(txn.hash) req.hdr(hash)
+ http-request set-var(txn.raw) req.hdr(raw)
+
+ acl is_match var(txn.raw),sha1,base64,secure_memcmp(txn.hash)
+
+ http-response set-header Match true if is_match
+ http-response set-header Match false if !is_match
+
+ default_backend be
+
+ frontend fe2
+ # This frontend matches two binary values, needing to handle null
+ # bytes.
+ bind "fd@${fe2}"
+
+ #### requests
+ http-request set-var(txn.hash) req.hdr(hash),b64dec
+ http-request set-var(txn.raw) req.hdr(raw)
+
+ acl is_match var(txn.raw),sha1,secure_memcmp(txn.hash)
+
+ http-response set-header Match true if is_match
+ http-response set-header Match false if !is_match
+
+ default_backend be2
+
+ backend be
+ server s1 ${s1_addr}:${s1_port}
+
+ backend be2
+ server s2 ${s2_addr}:${s2_port}
+} -start
+
+client c1 -connect ${h1_fe_sock} {
+ txreq -url "/" \
+ -hdr "Raw: 1" \
+ -hdr "Hash: NWoZK3kTsExUV00Ywo1G5jlUKKs="
+ rxresp
+ expect resp.status == 200
+ expect resp.http.match == "true"
+ txreq -url "/" \
+ -hdr "Raw: 2" \
+ -hdr "Hash: 2kuSN7rMzfGcB2DKt67EqDWQELA="
+ rxresp
+ expect resp.status == 200
+ expect resp.http.match == "true"
+ txreq -url "/" \
+ -hdr "Raw: 2" \
+ -hdr "Hash: 2kuSN7rMzfGcB2DKt67EqDWQELX="
+ rxresp
+ expect resp.status == 200
+ expect resp.http.match == "false"
+ txreq -url "/" \
+ -hdr "Raw: 3" \
+ -hdr "Hash: 2kuSN7rMzfGcB2DKt67EqDWQELA="
+ rxresp
+ expect resp.status == 200
+ expect resp.http.match == "false"
+} -run
+
+client c2 -connect ${h1_fe2_sock} {
+ txreq -url "/" \
+ -hdr "Raw: 1" \
+ -hdr "Hash: NWoZK3kTsExUV00Ywo1G5jlUKKs="
+ rxresp
+ expect resp.status == 200
+ expect resp.http.match == "true"
+ txreq -url "/" \
+ -hdr "Raw: 2" \
+ -hdr "Hash: 2kuSN7rMzfGcB2DKt67EqDWQELA="
+ rxresp
+ expect resp.status == 200
+ expect resp.http.match == "true"
+ txreq -url "/" \
+ -hdr "Raw: 2" \
+ -hdr "Hash: 2kuSN7rMzfGcB2DKt67EqDWQELX="
+ rxresp
+ expect resp.status == 200
+ expect resp.http.match == "false"
+ txreq -url "/" \
+ -hdr "Raw: 3" \
+ -hdr "Hash: 2kuSN7rMzfGcB2DKt67EqDWQELA="
+ rxresp
+ expect resp.status == 200
+ expect resp.http.match == "false"
+
+ # Test for values with leading nullbytes.
+ txreq -url "/" \
+ -hdr "Raw: 6132845" \
+ -hdr "Hash: AAAAVaeL9nNcSok1j6sd40EEw8s="
+ rxresp
+ expect resp.status == 200
+ expect resp.http.match == "true"
+ txreq -url "/" \
+ -hdr "Raw: 49177200" \
+ -hdr "Hash: AAAA9GLglTNv2JoMv2n/w9Xadhc="
+ rxresp
+ expect resp.status == 200
+ expect resp.http.match == "true"
+ txreq -url "/" \
+ -hdr "Raw: 6132845" \
+ -hdr "Hash: AAAA9GLglTNv2JoMv2n/w9Xadhc="
+ rxresp
+ expect resp.status == 200
+ expect resp.http.match == "false"
+} -run
diff --git a/reg-tests/converter/sha2.vtc b/reg-tests/converter/sha2.vtc
new file mode 100644
index 0000000..e90e274
--- /dev/null
+++ b/reg-tests/converter/sha2.vtc
@@ -0,0 +1,57 @@
+varnishtest "sha2 converter Test"
+
+#REQUIRE_VERSION=2.1
+#REQUIRE_OPTION=OPENSSL
+
+feature ignore_unknown_macro
+
+server s1 {
+ rxreq
+ txresp
+} -repeat 2 -start
+
+haproxy h1 -conf {
+ defaults
+ mode http
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ frontend fe
+ bind "fd@${fe}"
+
+ #### requests
+ http-request set-var(txn.hash) req.hdr(hash)
+
+ http-response set-header SHA2 "%[var(txn.hash),sha2,hex,lower]"
+ http-response set-header SHA2-224 "%[var(txn.hash),sha2(224),hex,lower]"
+ http-response set-header SHA2-256 "%[var(txn.hash),sha2(256),hex,lower]"
+ http-response set-header SHA2-384 "%[var(txn.hash),sha2(384),hex,lower]"
+ http-response set-header SHA2-512 "%[var(txn.hash),sha2(512),hex,lower]"
+
+ default_backend be
+
+ backend be
+ server s1 ${s1_addr}:${s1_port}
+} -start
+
+client c1 -connect ${h1_fe_sock} {
+ txreq -url "/" \
+ -hdr "Hash: 1"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.sha2 == "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b"
+ expect resp.http.sha2-224 == "e25388fde8290dc286a6164fa2d97e551b53498dcbf7bc378eb1f178"
+ expect resp.http.sha2-256 == "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b"
+ expect resp.http.sha2-384 == "47f05d367b0c32e438fb63e6cf4a5f35c2aa2f90dc7543f8a41a0f95ce8a40a313ab5cf36134a2068c4c969cb50db776"
+ expect resp.http.sha2-512 == "4dff4ea340f0a823f15d3f4f01ab62eae0e5da579ccb851f8db9dfe84c58b2b37b89903a740e1ee172da793a6e79d560e5f7f9bd058a12a280433ed6fa46510a"
+ txreq -url "/" \
+ -hdr "Hash: 2"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.sha2 == "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35"
+ expect resp.http.sha2-224 == "58b2aaa0bfae7acc021b3260e941117b529b2e69de878fd7d45c61a9"
+ expect resp.http.sha2-256 == "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35"
+ expect resp.http.sha2-384 == "d063457705d66d6f016e4cdd747db3af8d70ebfd36badd63de6c8ca4a9d8bfb5d874e7fbd750aa804dcaddae7eeef51e"
+ expect resp.http.sha2-512 == "40b244112641dd78dd4f93b6c9190dd46e0099194d5a44257b7efad6ef9ff4683da1eda0244448cb343aa688f5d3efd7314dafe580ac0bcbf115aeca9e8dc114"
+} -run
diff --git a/reg-tests/converter/url_dec.vtc b/reg-tests/converter/url_dec.vtc
new file mode 100644
index 0000000..d5e317b
--- /dev/null
+++ b/reg-tests/converter/url_dec.vtc
@@ -0,0 +1,37 @@
+varnishtest "url_dec converter Test"
+
+
+feature ignore_unknown_macro
+
+server s1 {
+ rxreq
+ txresp
+} -repeat 2 -start
+
+haproxy h1 -conf {
+ defaults
+ mode http
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ frontend fe
+ bind "fd@${fe}"
+
+ http-request set-var(txn.url) url
+ http-response set-header url_dec0 "%[var(txn.url),url_dec]"
+ http-response set-header url_dec1 "%[var(txn.url),url_dec(1)]"
+
+ default_backend be
+
+ backend be
+ server s1 ${s1_addr}:${s1_port}
+} -start
+
+client c1 -connect ${h1_fe_sock} {
+ txreq -url "/bla+%20?foo%3Dbar%2B42+42%20"
+ rxresp
+ expect resp.http.url_dec0 == "/bla+ ?foo=bar+42 42 "
+ expect resp.http.url_dec1 == "/bla ?foo=bar+42 42 "
+ expect resp.status == 200
+} -run
diff --git a/reg-tests/converter/url_enc.vtc b/reg-tests/converter/url_enc.vtc
new file mode 100644
index 0000000..74acac8
--- /dev/null
+++ b/reg-tests/converter/url_enc.vtc
@@ -0,0 +1,43 @@
+varnishtest "url_enc converter test"
+
+#REQUIRE_VERSION=2.4
+
+feature ignore_unknown_macro
+
+server s1 {
+ rxreq
+ txresp
+} -repeat 2 -start
+
+haproxy h1 -conf {
+ defaults
+ mode http
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ frontend fe
+ bind "fd@${fe}"
+
+ http-request set-var(txn.url0) "str(foo=bar+42 42 )"
+ http-request set-var(txn.url1) "var(txn.url0),url_enc"
+ http-request set-var(txn.url2) "var(txn.url1),url_dec"
+ http-request set-var(txn.url3) "var(txn.url2),url_enc(query)"
+ http-response set-header url_enc0 "%[var(txn.url1)]"
+ http-response set-header url_dec "%[var(txn.url2)]"
+ http-response set-header url_enc1 "%[var(txn.url3)]"
+
+ default_backend be
+
+ backend be
+ server s1 ${s1_addr}:${s1_port}
+} -start
+
+client c1 -connect ${h1_fe_sock} {
+ txreq -url "/"
+ rxresp
+ expect resp.http.url_enc0 == "foo%3Dbar%2B42%2042%20"
+ expect resp.http.url_dec == "foo=bar+42 42 "
+ expect resp.http.url_enc1 == "foo%3Dbar%2B42%2042%20"
+ expect resp.status == 200
+} -run
diff --git a/reg-tests/converter/word.vtc b/reg-tests/converter/word.vtc
new file mode 100644
index 0000000..acd4678
--- /dev/null
+++ b/reg-tests/converter/word.vtc
@@ -0,0 +1,43 @@
+varnishtest "word converter Test"
+
+feature ignore_unknown_macro
+
+server s1 {
+ rxreq
+ txresp -hdr "Connection: close"
+} -repeat 3 -start
+
+haproxy h1 -conf {
+ defaults
+ mode http
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ frontend fe
+ bind "fd@${fe}"
+
+ #### requests
+ http-request set-var(txn.uri) path
+ http-response set-header Found %[var(txn.uri),word(2,/)] if { var(txn.uri),word(2,/) -m found }
+
+ default_backend be
+
+ backend be
+ server s1 ${s1_addr}:${s1_port}
+} -start
+
+client c1 -connect ${h1_fe_sock} {
+ txreq -url "/foo/bar/baz"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.found == "bar"
+ txreq -url "/foo//bar/baz"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.found == "bar"
+ txreq -url "/foo"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.found == "<undef>"
+} -run
diff --git a/reg-tests/filters/random-forwarding.vtc b/reg-tests/filters/random-forwarding.vtc
new file mode 100644
index 0000000..abb2bcc
--- /dev/null
+++ b/reg-tests/filters/random-forwarding.vtc
@@ -0,0 +1,138 @@
+varnishtest "Filtering test with several filters and random forwarding (via trace filter)"
+
+#REQUIRE_VERSION=2.4
+#REQUIRE_OPTION=ZLIB|SLZ
+#REGTEST_TYPE=slow
+
+feature ignore_unknown_macro
+
+barrier b1 cond 2 -cyclic
+
+server s1 {
+ rxreq
+ expect req.url == "/"
+ expect req.bodylen == 1048576
+ expect req.http.accept-encoding == "<undef>"
+ txresp \
+ -hdr "Content-Type: text/plain" \
+ -bodylen 1048576
+
+ rxreq
+ expect req.url == "127.0.0.1:80"
+ txresp -nolen
+ recv 36000
+ send_n 1000 "0123456789abcdefghijklmnopqrstuvwxyz"
+ barrier b1 sync
+
+ accept
+ rxreq
+ expect req.url == "/"
+ txresp -nolen \
+ -hdr "Content-Type: text/plain" \
+ -bodylen 20480
+ close
+
+ accept
+ rxreq
+ expect req.url == "/"
+ txresp -nolen
+ close
+
+ accept
+ rxreq
+ expect req.url == "/"
+ expect req.bodylen == 20480
+ txresp -nolen \
+ -hdr "Content-Type: text/plain" \
+ -bodylen 20480
+
+} -start
+
+haproxy h1 -conf {
+ defaults
+ mode http
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ frontend fe1
+ bind "fd@${fe1}"
+
+ compression offload
+ compression algo gzip
+
+ filter trace name "BEFORE" random-forwarding quiet
+ filter compression
+ filter trace name "AFTER" random-forwarding quiet
+ default_backend be1
+
+ backend be1
+ server www ${s1_addr}:${s1_port}
+
+ listen li1
+ mode tcp
+ bind "fd@${li1}"
+ # Validate nothing is blocked in TCP mode
+ filter compression
+ server www ${s1_addr}:${s1_port}
+
+
+} -start
+
+client c1 -connect ${h1_fe1_sock} {
+ txreq -url "/" \
+ -hdr "Accept-Encoding: gzip" \
+ -hdr "Content-Type: text/plain" \
+ -bodylen 1048576
+ rxresp
+ expect resp.status == 200
+ expect resp.http.content-encoding == "gzip"
+ expect resp.http.transfer-encoding == "chunked"
+ gunzip
+ expect resp.bodylen == 1048576
+
+ txreq -method "CONNECT" -url "127.0.0.1:80" -nolen
+ rxresp -no_obj
+ expect resp.status == 200
+ send_n 1000 "0123456789abcdefghijklmnopqrstuvwxyz"
+ recv 36000
+ barrier b1 sync
+} -run
+
+client c2 -connect ${h1_fe1_sock} {
+ txreq -url "/" \
+ -hdr "Accept-Encoding: gzip" \
+ -hdr "Content-Type: text/plain"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.content-encoding == "<undef>"
+ expect resp.http.transfer-encoding == "<undef>"
+ expect resp.http.content-length == "<undef>"
+ expect resp.bodylen == 20480
+} -run
+
+client c3 -connect ${h1_fe1_sock} {
+ txreq -url "/" \
+ -hdr "Accept-Encoding: gzip" \
+ -hdr "Content-Type: text/plain"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.content-encoding == "<undef>"
+ expect resp.http.transfer-encoding == "<undef>"
+ expect resp.http.content-length == "<undef>"
+ expect resp.bodylen == 0
+} -run
+
+client c4 -connect ${h1_li1_sock} {
+ txreq -url "/" \
+ -hdr "Accept-Encoding: gzip" \
+ -hdr "Content-Type: text/plain" \
+ -bodylen 20480
+ rxresp
+ expect resp.status == 200
+ expect resp.http.content-encoding == "<undef>"
+ expect resp.http.transfer-encoding == "<undef>"
+ expect resp.http.content-length == "<undef>"
+ expect resp.bodylen == 20480
+ expect_close
+} -run
diff --git a/reg-tests/http-capture/multiple_headers.vtc b/reg-tests/http-capture/multiple_headers.vtc
new file mode 100644
index 0000000..1ae210b
--- /dev/null
+++ b/reg-tests/http-capture/multiple_headers.vtc
@@ -0,0 +1,91 @@
+varnishtest "Tests for 'capture (request|response) header"
+feature ignore_unknown_macro
+
+# This script checks that the last occurrences of "fooresp" and "fooreq" header
+# are correctly captured and added to the logs.
+# Note that varnishtest does not support more than MAX_HDR header.
+
+syslog S -level info {
+ recv
+ expect ~ "[^:\\[ ]\\[${h_pid}\\]: .* .* fe be/srv .* 200 1[0-9]{4} - - ---- .* .* {HPhx8n59qjjNBLjP} {htb56qDdCcbRVTfS} \"GET / HTTP/1\\.1\""
+} -start
+
+server s {
+ rxreq
+ txresp -hdr "fooresp: HnFDGJ6KvhSG5QjX" -hdr "fooresp: 8dp7vBMQjTMkVwtG" \
+ -hdr "fooresp: NTpxWmvsNKGxvH6K" -hdr "fooresp: sPKNNJ5VRBDz9qXP" \
+ -hdr "fooresp: HnFDGJ6KvhSG5QjX" -hdr "fooresp: 8dp7vBMQjTMkVwtG" \
+ -hdr "fooresp: VSNnccbGkvfM9JK9" -hdr "fooresp: 9D5cjwtK3LCxmg4F" \
+ -hdr "fooresp: dsbxGqlBPRWGP3vX" -hdr "fooresp: xf6VK6GXlgdj5mwc" \
+ -hdr "fooresp: 8jzM3clRKtdL2WWb" -hdr "fooresp: v7ZHrTPjDR6lm6Bg" \
+ -hdr "fooresp: FQT6th9whMqQ7Z6C" -hdr "fooresp: KM22HH6lRBw6SHQT" \
+ -hdr "fooresp: PmRHphHXmTV9kZNS" -hdr "fooresp: CkGRbTJrD5nSVpFk" \
+ -hdr "fooresp: KQ9mzmMHpmmZ2SXP" -hdr "fooresp: W5FqfFDN6dqBxjK7" \
+ -hdr "fooresp: bvcxNPK4gpnTvn3z" -hdr "fooresp: BSXRLSWMsgQN54cC" \
+ -hdr "fooresp: ZX9ttTnlbXtJK55d" -hdr "fooresp: KH9StjMHF73NqzL8" \
+ -hdr "fooresp: W2q2m6MvMLcnXsX7" -hdr "fooresp: wtrjnJgFzHDvMg5r" \
+ -hdr "fooresp: Vpk2c2DsbWf2Gtwh" -hdr "fooresp: sCcW2qpRhFHHRDpH" \
+ -hdr "fooresp: P4mltXtvxLsnPcNS" -hdr "fooresp: TXgdSKNMmsJ8x9zq" \
+ -hdr "fooresp: n5t8pdZgnGFXZDd3" -hdr "fooresp: pD84GCtkWZqWbCM9" \
+ -hdr "fooresp: wx2FPxsGqSRjNVws" -hdr "fooresp: TXmtBCqPTVGFc3NK" \
+ -hdr "fooresp: 4DrFTLxpcPk2n3Zv" -hdr "fooresp: vrcFr9MWpqJWhK4h" \
+ -hdr "fooresp: HMsCHMZnHT3q8qD2" -hdr "fooresp: HsCXQGTxDpsMf4z6" \
+ -hdr "fooresp: 9rb2vjvvd2SzCQVT" -hdr "fooresp: qn5C2fZTWHVp7NkC" \
+ -hdr "fooresp: ZVd5ltcngZFHXfvr" -hdr "fooresp: j6BZVdV8fkz5tgjR" \
+ -hdr "fooresp: 6qfVwfHqdfntQjmP" -hdr "fooresp: RRr9nTnwjG6d2x7X" \
+ -hdr "fooresp: RJXtWtdJRTss6JgZ" -hdr "fooresp: zzHZWm6bqXDN9k47" \
+ -hdr "fooresp: htb56qDdCcbRVTfS" \
+ -bodylen 16384
+} -start
+
+haproxy h -conf {
+ defaults
+ mode http
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ backend be
+ server srv ${s_addr}:${s_port}
+
+ frontend fe
+ option httplog
+ log ${S_addr}:${S_port} local0 debug err
+ capture request header fooreq len 25
+ capture response header fooresp len 25
+
+ bind "fd@${fe}"
+ use_backend be
+} -start
+
+client c1 -connect ${h_fe_sock} {
+ txreq -hdr "fooreq: c8Ck8sx8qfXk5pSS" -hdr "fooreq: TGNXbG2DF3TmLWK3" \
+ -hdr "fooreq: mBxq9Cgr8GN6hkt6" -hdr "fooreq: MHZ6VBCPgs564KfR" \
+ -hdr "fooreq: BCCwX2kL9BSMCqvt" -hdr "fooreq: 8rXw87xVTphpRQb7" \
+ -hdr "fooreq: gJ3Tp9kXQlqLC8Qp" -hdr "fooreq: dFnLs6wpMl2M5N7c" \
+ -hdr "fooreq: r3f9WgQ8Brqw37Kj" -hdr "fooreq: dbJzSSdCqV3ZVtXK" \
+ -hdr "fooreq: 5HxHd6g4n2Rj2CNG" -hdr "fooreq: HNqQSNfkt6q4zK26" \
+ -hdr "fooreq: rzqNcfskPR7vW4jG" -hdr "fooreq: 9c7txWhsdrwmkR6d" \
+ -hdr "fooreq: 3v8Nztg9l9vLSKJm" -hdr "fooreq: lh4WDxMX577h4z3l" \
+ -hdr "fooreq: mFtHj5SKDvfcGzfq" -hdr "fooreq: PZ5B5wRM9D7GLm7W" \
+ -hdr "fooreq: fFpN4zCkLTxzp5Dz" -hdr "fooreq: J5XMdfCCHmmwkr2f" \
+ -hdr "fooreq: KqssZ3SkZnZJF8mz" -hdr "fooreq: HrGgsnBnslKN7Msz" \
+ -hdr "fooreq: d8TQltZ39xFZBNx2" -hdr "fooreq: mwDt2k2tvqM8x5kQ" \
+ -hdr "fooreq: 7Qh6tM7s7z3P8XCl" -hdr "fooreq: S3mTVbbPhJbLR7n2" \
+ -hdr "fooreq: zr7hMDvrrwfvpmTT" -hdr "fooreq: lV9TnZX2CtSnr4k8" \
+ -hdr "fooreq: bMdJx8pVDG2nWFNg" -hdr "fooreq: FkGvB2cBwNrB3cm4" \
+ -hdr "fooreq: 5ckNn3m6m8r2CXLF" -hdr "fooreq: sk4pJGTSZ5HMPJP5" \
+ -hdr "fooreq: HgVgQ73zhLwX6Wzq" -hdr "fooreq: T5k2QbFKvCVJlz4c" \
+ -hdr "fooreq: SKcNPw8CXGKhtxNP" -hdr "fooreq: n9fFrcR2kRQJrCpZ" \
+ -hdr "fooreq: hrJ2MXCdcSCDhQ6n" -hdr "fooreq: 9xsWQ8srzLDvG9F4" \
+ -hdr "fooreq: GNcP9NBTFJkg4hbk" -hdr "fooreq: Vg8B8MNwz4T7q5Tj" \
+ -hdr "fooreq: XXns3qPCzZmt9j4G" -hdr "fooreq: hD7TnP43bcPHm5g2" \
+ -hdr "fooreq: wZbxVq7MwmfBSqb5" -hdr "fooreq: HPhx8n59qjjNBLjP" \
+ -bodylen 16384
+ rxresp
+ expect resp.status == 200
+} -start
+
+server s -wait
+syslog S -wait
+
diff --git a/reg-tests/http-cookies/cookie_insert_indirect.vtc b/reg-tests/http-cookies/cookie_insert_indirect.vtc
new file mode 100644
index 0000000..6b86360
--- /dev/null
+++ b/reg-tests/http-cookies/cookie_insert_indirect.vtc
@@ -0,0 +1,54 @@
+varnishtest "HTTP cookie basic test"
+feature ignore_unknown_macro
+
+# This script tests "cookie <name> insert indirect" directive.
+# The client sends a wrong "SRVID=s2" cookie.
+# haproxy removes it.
+# The server replies with "SRVID=S1" after having checked that
+# no cookies were sent by haproxy.
+# haproxy replies "SRVID=server-one" to the client.
+# We log the HTTP request to a syslog server and check their "--II"
+# (invalid, insert) flags.
+
+syslog S1 -level notice {
+ recv info
+ expect ~ "[^:\\[ ]\\[${h1_pid}\\]: .* fe1 be1/srv1 .* --II .* \"GET / HTTP/1\\.1\""
+} -start
+
+server s1 {
+ rxreq
+ expect req.http.cookie == <undef>
+ txresp -hdr "Cookie: SRVID=S1"
+} -start
+
+haproxy h1 -conf {
+ global
+ log ${S1_addr}:${S1_port} len 2048 local0 debug err
+
+ defaults
+ mode http
+ option httplog
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ log global
+
+ backend be1
+ cookie SRVID insert indirect
+ server srv1 ${s1_addr}:${s1_port} cookie server-one
+
+ frontend fe1
+ option httplog
+ bind "fd@${fe1}"
+ use_backend be1
+} -start
+
+client c1 -connect ${h1_fe1_sock} {
+ txreq -hdr "Cookie: SRVID=s2"
+ rxresp
+ expect resp.http.Set-Cookie ~ "^SRVID=server-one;.*"
+} -start
+
+
+client c1 -wait
+syslog S1 -wait
diff --git a/reg-tests/http-cookies/h2_cookie_concat.vtc b/reg-tests/http-cookies/h2_cookie_concat.vtc
new file mode 100644
index 0000000..e2e6d81
--- /dev/null
+++ b/reg-tests/http-cookies/h2_cookie_concat.vtc
@@ -0,0 +1,42 @@
+varnishtest "HTTP/2 cookie concatenation"
+feature ignore_unknown_macro
+
+server s1 {
+ rxreq
+ expect req.http.cookie == "c1=foo; c2=bar; c3=baz"
+ txresp
+} -start
+
+haproxy h1 -conf {
+ defaults
+ mode http
+
+ frontend fe1
+ bind "fd@${fe1}" proto h2
+ use_backend be1
+
+ backend be1
+ server srv1 ${s1_addr}:${s1_port}
+} -start
+
+client c1 -connect ${h1_fe1_sock} {
+ txpri
+ stream 0 {
+ txsettings
+ rxsettings
+ txsettings -ack
+ rxsettings
+ expect settings.ack == true
+ } -run
+
+ stream 1 {
+ txreq \
+ -req "GET" \
+ -scheme "http" \
+ -url "/" \
+ -hdr "cookie" "c1=foo" \
+ -hdr "cookie" "c2=bar" \
+ -hdr "cookie" "c3=baz"
+ rxhdrs
+ } -run
+} -run
diff --git a/reg-tests/http-errorfiles/errorfiles.vtc b/reg-tests/http-errorfiles/errorfiles.vtc
new file mode 100644
index 0000000..1ace744
--- /dev/null
+++ b/reg-tests/http-errorfiles/errorfiles.vtc
@@ -0,0 +1,51 @@
+varnishtest "Test the errofile directive in proxy sections"
+
+# This config tests the errorfile directive in proxy sections (including the
+# defaults section).
+
+feature ignore_unknown_macro
+
+
+haproxy h1 -conf {
+ defaults
+ mode http
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+ errorfile 400 ${testdir}/errors/400.http
+ errorfile 403 ${testdir}/errors/403.http
+ errorfile 408 /dev/null
+
+ frontend fe1
+ bind "fd@${fe1}"
+
+ errorfile 403 ${testdir}/errors/403-1.http
+ errorfile 500 /dev/null
+
+ http-request deny deny_status 400 if { path /400 }
+ http-request deny deny_status 403 if { path /403 }
+ http-request deny deny_status 408 if { path /408 }
+ http-request deny deny_status 500 if { path /500 }
+
+} -start
+
+client c1r1 -connect ${h1_fe1_sock} {
+ txreq -req GET -url /400
+ rxresp
+ expect resp.status == 400
+ expect resp.http.x-err-type == "default"
+} -run
+client c1r2 -connect ${h1_fe1_sock} {
+ txreq -req GET -url /403
+ rxresp
+ expect resp.status == 403
+ expect resp.http.x-err-type == "errors-1"
+} -run
+client c1r3 -connect ${h1_fe1_sock} {
+ txreq -req GET -url /408
+ expect_close
+} -run
+client c1r4 -connect ${h1_fe1_sock} {
+ txreq -req GET -url /500
+ expect_close
+} -run
diff --git a/reg-tests/http-errorfiles/errors/400-1.http b/reg-tests/http-errorfiles/errors/400-1.http
new file mode 100644
index 0000000..86a2e69
--- /dev/null
+++ b/reg-tests/http-errorfiles/errors/400-1.http
@@ -0,0 +1,9 @@
+HTTP/1.1 400 Bad request
+Cache-Control: no-cache
+Connection: close
+Content-Type: text/html
+x-err-type: errors-1
+
+<html><body><h1>400 Bad request</h1>
+Your browser sent an invalid request.
+</body></html>
diff --git a/reg-tests/http-errorfiles/errors/400-2.http b/reg-tests/http-errorfiles/errors/400-2.http
new file mode 100644
index 0000000..c108510
--- /dev/null
+++ b/reg-tests/http-errorfiles/errors/400-2.http
@@ -0,0 +1,9 @@
+HTTP/1.1 400 Bad request
+Cache-Control: no-cache
+Connection: close
+Content-Type: text/html
+x-err-type: errors-2
+
+<html><body><h1>400 Bad request</h1>
+Your browser sent an invalid request.
+</body></html>
diff --git a/reg-tests/http-errorfiles/errors/400-3.http b/reg-tests/http-errorfiles/errors/400-3.http
new file mode 100644
index 0000000..1fe1841
--- /dev/null
+++ b/reg-tests/http-errorfiles/errors/400-3.http
@@ -0,0 +1,9 @@
+HTTP/1.1 400 Bad request
+Cache-Control: no-cache
+Connection: close
+Content-Type: text/html
+x-err-type: errors-3
+
+<html><body><h1>400 Bad request</h1>
+Your browser sent an invalid request.
+</body></html>
diff --git a/reg-tests/http-errorfiles/errors/400.http b/reg-tests/http-errorfiles/errors/400.http
new file mode 100644
index 0000000..ce229aa
--- /dev/null
+++ b/reg-tests/http-errorfiles/errors/400.http
@@ -0,0 +1,9 @@
+HTTP/1.1 400 Bad request
+Cache-Control: no-cache
+Connection: close
+Content-Type: text/html
+x-err-type: default
+
+<html><body><h1>400 Bad request</h1>
+Your browser sent an invalid request.
+</body></html>
diff --git a/reg-tests/http-errorfiles/errors/403-1.http b/reg-tests/http-errorfiles/errors/403-1.http
new file mode 100644
index 0000000..08bdf02
--- /dev/null
+++ b/reg-tests/http-errorfiles/errors/403-1.http
@@ -0,0 +1,9 @@
+HTTP/1.0 403 Forbidden
+Cache-Control: no-cache
+Connection: close
+Content-Type: text/html
+x-err-type: errors-1
+
+<html><body><h1>403 Forbidden</h1>
+Request forbidden by administrative rules.
+</body></html>
diff --git a/reg-tests/http-errorfiles/errors/403-2.http b/reg-tests/http-errorfiles/errors/403-2.http
new file mode 100644
index 0000000..9c07e5d
--- /dev/null
+++ b/reg-tests/http-errorfiles/errors/403-2.http
@@ -0,0 +1,9 @@
+HTTP/1.0 403 Forbidden
+Cache-Control: no-cache
+Connection: close
+Content-Type: text/html
+x-err-type: errors-2
+
+<html><body><h1>403 Forbidden</h1>
+Request forbidden by administrative rules.
+</body></html>
diff --git a/reg-tests/http-errorfiles/errors/403.http b/reg-tests/http-errorfiles/errors/403.http
new file mode 100644
index 0000000..fd969b2
--- /dev/null
+++ b/reg-tests/http-errorfiles/errors/403.http
@@ -0,0 +1,9 @@
+HTTP/1.0 403 Forbidden
+Cache-Control: no-cache
+Connection: close
+Content-Type: text/html
+x-err-type: default
+
+<html><body><h1>403 Forbidden</h1>
+Request forbidden by administrative rules.
+</body></html>
diff --git a/reg-tests/http-errorfiles/errors/404-1.http b/reg-tests/http-errorfiles/errors/404-1.http
new file mode 100644
index 0000000..154ed0b
--- /dev/null
+++ b/reg-tests/http-errorfiles/errors/404-1.http
@@ -0,0 +1,9 @@
+HTTP/1.1 404 Not Found
+Cache-Control: no-cache
+Connection: close
+Content-Type: text/html
+x-err-type: errors-1
+
+<html><body><h1>404 Not Found</h1>
+The resource could not be found.
+</body></html>
diff --git a/reg-tests/http-errorfiles/errors/404-2.http b/reg-tests/http-errorfiles/errors/404-2.http
new file mode 100644
index 0000000..e26f91d
--- /dev/null
+++ b/reg-tests/http-errorfiles/errors/404-2.http
@@ -0,0 +1,9 @@
+HTTP/1.1 404 Not Found
+Cache-Control: no-cache
+Connection: close
+Content-Type: text/html
+x-err-type: errors-2
+
+<html><body><h1>404 Not Found</h1>
+The resource could not be found.
+</body></html>
diff --git a/reg-tests/http-errorfiles/errors/404-3.http b/reg-tests/http-errorfiles/errors/404-3.http
new file mode 100644
index 0000000..4bc1661
--- /dev/null
+++ b/reg-tests/http-errorfiles/errors/404-3.http
@@ -0,0 +1,9 @@
+HTTP/1.1 404 Not Found
+Cache-Control: no-cache
+Connection: close
+Content-Type: text/html
+x-err-type: errors-3
+
+<html><body><h1>404 Not Found</h1>
+The resource could not be found.
+</body></html>
diff --git a/reg-tests/http-errorfiles/errors/404.http b/reg-tests/http-errorfiles/errors/404.http
new file mode 100644
index 0000000..8dacd95
--- /dev/null
+++ b/reg-tests/http-errorfiles/errors/404.http
@@ -0,0 +1,9 @@
+HTTP/1.1 404 Not Found
+Cache-Control: no-cache
+Connection: close
+Content-Type: text/html
+x-err-type: default
+
+<html><body><h1>404 Not Found</h1>
+The resource could not be found.
+</body></html>
diff --git a/reg-tests/http-errorfiles/errors/500-1.http b/reg-tests/http-errorfiles/errors/500-1.http
new file mode 100644
index 0000000..4e4f7e4
--- /dev/null
+++ b/reg-tests/http-errorfiles/errors/500-1.http
@@ -0,0 +1,9 @@
+HTTP/1.0 500 Internal Server Error
+Cache-Control: no-cache
+Connection: close
+Content-Type: text/html
+x-err-type: errors-1
+
+<html><body><h1>500 Internal Server Error</h1>
+An internal server error occurred.
+</body></html>
diff --git a/reg-tests/http-errorfiles/errors/500.http b/reg-tests/http-errorfiles/errors/500.http
new file mode 100644
index 0000000..68a31ff
--- /dev/null
+++ b/reg-tests/http-errorfiles/errors/500.http
@@ -0,0 +1,9 @@
+HTTP/1.0 500 Internal Server Error
+Cache-Control: no-cache
+Connection: close
+Content-Type: text/html
+x-err-type: default
+
+<html><body><h1>500 Internal Server Error</h1>
+An internal server error occurred.
+</body></html>
diff --git a/reg-tests/http-errorfiles/errors/lf-403.txt b/reg-tests/http-errorfiles/errors/lf-403.txt
new file mode 100644
index 0000000..3a3c3aa
--- /dev/null
+++ b/reg-tests/http-errorfiles/errors/lf-403.txt
@@ -0,0 +1 @@
+The path "%[path]" is forbidden
diff --git a/reg-tests/http-errorfiles/http-error.vtc b/reg-tests/http-errorfiles/http-error.vtc
new file mode 100644
index 0000000..1af909b
--- /dev/null
+++ b/reg-tests/http-errorfiles/http-error.vtc
@@ -0,0 +1,75 @@
+varnishtest "Test the http-error directive"
+#REQUIRE_VERSION=2.2
+
+# This config tests the http-error directive.
+
+feature ignore_unknown_macro
+
+
+haproxy h1 -conf {
+ http-errors errors-1
+ errorfile 400 ${testdir}/errors/400-1.http
+ errorfile 403 ${testdir}/errors/403-1.http
+ errorfile 404 ${testdir}/errors/404-1.http
+ errorfile 500 ${testdir}/errors/500-1.http
+
+ defaults
+ mode http
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+ errorfile 400 ${testdir}/errors/400.http
+ errorfile 404 ${testdir}/errors/404.http
+
+ frontend fe1
+ bind "fd@${fe1}"
+
+ http-error status 400
+ http-error status 403 default-errorfiles
+ http-error status 404 errorfiles errors-1
+ http-error status 500 errorfile ${testdir}/errors/500.http
+ http-error status 200 content-type "text/plain" hdr x-path "path=%[path]" lf-string "The path is \"%[path]\""
+
+ http-request return status 200 default-errorfiles if { path /200 }
+ http-request deny deny_status 400 if { path /400 }
+ http-request deny deny_status 403 if { path /403 }
+ http-request deny deny_status 404 if { path /404 }
+ http-request deny deny_status 500 if { path /500 }
+
+} -start
+
+client c1r1 -connect ${h1_fe1_sock} {
+ txreq -req GET -url /200
+ rxresp
+ expect resp.status == 200
+ expect resp.http.x-path == "path=/200"
+ expect resp.http.content-type == "text/plain"
+ expect resp.body == "The path is \"/200\""
+} -run
+client c1r2 -connect ${h1_fe1_sock} {
+ txreq -req GET -url /400
+ rxresp
+ expect resp.status == 400
+ expect resp.http.x-err-type == <undef>
+ expect resp.http.content-length == 0
+} -run
+client c1r3 -connect ${h1_fe1_sock} {
+ txreq -req GET -url /403
+ rxresp
+ expect resp.status == 403
+ expect resp.http.x-err-type == <undef>
+ expect resp.http.content-length == 93
+ expect resp.http.content-type == "text/html"
+} -run
+client c1r3 -connect ${h1_fe1_sock} {
+ txreq -req GET -url /404
+ rxresp
+ expect resp.status == 404
+ expect resp.http.x-err-type == "errors-1"
+} -run
+client c1r4 -connect ${h1_fe1_sock} {
+ txreq -req GET -url /500
+ rxresp
+ expect resp.status == 500
+ expect resp.http.x-err-type == "default"
+} -run
diff --git a/reg-tests/http-errorfiles/http_deny_errors.vtc b/reg-tests/http-errorfiles/http_deny_errors.vtc
new file mode 100644
index 0000000..353045d
--- /dev/null
+++ b/reg-tests/http-errorfiles/http_deny_errors.vtc
@@ -0,0 +1,77 @@
+varnishtest "Test the custom errors for HTTP deny rules"
+#REQUIRE_VERSION=2.2
+
+# This config tests the custom errors for HTTP deny rules.
+
+feature ignore_unknown_macro
+
+
+haproxy h1 -conf {
+ http-errors errors-1
+ errorfile 400 ${testdir}/errors/400-1.http
+ errorfile 403 ${testdir}/errors/403-1.http
+ errorfile 404 ${testdir}/errors/404-1.http
+ errorfile 500 /dev/null
+
+ defaults
+ mode http
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ frontend fe1
+ bind "fd@${fe1}"
+ http-request deny deny_status 400 if { path /400 }
+ http-request deny deny_status 403 errorfile ${testdir}/errors/403.http if { path /403 }
+ http-request deny deny_status 404 errorfiles errors-1 if { path /404 }
+ http-request deny deny_status 500 errorfile /dev/null if { path /500-1 }
+ http-request deny deny_status 500 errorfiles errors-1 if { path /500-2 }
+
+ http-request deny status 500 hdr x-err-info "path=%[path]" content-type "text/plain" string "Internal Error" if { path /int-err }
+ http-request deny status 403 hdr x-err-info "path=%[path]" content-type "text/plain" lf-file ${testdir}/errors/lf-403.txt if { path /forbidden }
+
+} -start
+
+client c1r1 -connect ${h1_fe1_sock} {
+ txreq -req GET -url /400
+ rxresp
+ expect resp.status == 400
+ expect resp.http.x-err-type == <undef>
+} -run
+client c1r2 -connect ${h1_fe1_sock} {
+ txreq -req GET -url /403
+ rxresp
+ expect resp.status == 403
+ expect resp.http.x-err-type == "default"
+} -run
+client c1r3 -connect ${h1_fe1_sock} {
+ txreq -req GET -url /404
+ rxresp
+ expect resp.status == 404
+ expect resp.http.x-err-type == "errors-1"
+} -run
+client c1r4 -connect ${h1_fe1_sock} {
+ txreq -req GET -url /500-1
+ expect_close
+} -run
+client c1r5 -connect ${h1_fe1_sock} {
+ txreq -req GET -url /500-2
+ expect_close
+} -run
+client c1r6 -connect ${h1_fe1_sock} {
+ txreq -req GET -url /int-err
+ rxresp
+ expect resp.status == 500
+ expect resp.http.x-err-info == "path=/int-err"
+ expect resp.http.content-type == "text/plain"
+ expect resp.http.content-length == 14
+ expect resp.body == "Internal Error"
+} -run
+client c1r7 -connect ${h1_fe1_sock} {
+ txreq -req GET -url /forbidden
+ rxresp
+ expect resp.status == 403
+ expect resp.http.x-err-info == "path=/forbidden"
+ expect resp.http.content-type == "text/plain"
+ expect resp.body == "The path \"/forbidden\" is forbidden\n"
+} -run
diff --git a/reg-tests/http-errorfiles/http_errors.vtc b/reg-tests/http-errorfiles/http_errors.vtc
new file mode 100644
index 0000000..6b20be7
--- /dev/null
+++ b/reg-tests/http-errorfiles/http_errors.vtc
@@ -0,0 +1,134 @@
+varnishtest "Test the errorfiles directive"
+#REQUIRE_VERSION=2.2
+
+# This config tests the errorfiles directive.
+
+feature ignore_unknown_macro
+
+
+haproxy h1 -conf {
+ http-errors errors-1
+ errorfile 400 ${testdir}/errors/400-1.http
+ errorfile 403 ${testdir}/errors/403-1.http
+ errorfile 404 ${testdir}/errors/404-1.http
+ errorfile 500 ${testdir}/errors/500-1.http
+
+ http-errors errors-2
+ errorfile 400 ${testdir}/errors/400-2.http
+ errorfile 403 ${testdir}/errors/403-2.http
+ errorfile 404 ${testdir}/errors/404-2.http
+
+ http-errors errors-3
+ errorfile 400 ${testdir}/errors/400-3.http
+ errorfile 404 ${testdir}/errors/404-3.http
+
+ defaults
+ mode http
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+ errorfiles errors-2
+ errorfile 400 ${testdir}/errors/400.http
+ errorfile 404 ${testdir}/errors/404.http
+
+ frontend fe1
+ bind "fd@${fe1}"
+ http-request deny deny_status 400 if { path /400 }
+ http-request deny if { path /403 }
+ http-request deny deny_status 404 if { path /404 }
+ http-request deny deny_status 500 if { path /500 }
+
+ frontend fe2
+ bind "fd@${fe2}"
+ errorfiles errors-1
+ errorfile 500 ${testdir}/errors/500.http
+ http-request deny deny_status 400 if { path /400 }
+ http-request deny if { path /403 }
+ http-request deny deny_status 404 if { path /404 }
+ http-request deny deny_status 500 if { path /500 }
+
+ frontend fe3
+ bind "fd@${fe3}"
+ errorfile 500 ${testdir}/errors/500.http
+ errorfiles errors-1 500
+ errorfiles errors-3 400
+ http-request deny deny_status 400 if { path /400 }
+ http-request deny if { path /403 }
+ http-request deny deny_status 404 if { path /404 }
+ http-request deny deny_status 500 if { path /500 }
+} -start
+
+client c1r1 -connect ${h1_fe1_sock} {
+ txreq -req GET -url /400
+ rxresp
+ expect resp.status == 400
+ expect resp.http.x-err-type == "default"
+} -run
+client c1r2 -connect ${h1_fe1_sock} {
+ txreq -req GET -url /403
+ rxresp
+ expect resp.status == 403
+ expect resp.http.x-err-type == "errors-2"
+} -run
+client c1r3 -connect ${h1_fe1_sock} {
+ txreq -req GET -url /404
+ rxresp
+ expect resp.status == 404
+ expect resp.http.x-err-type == "default"
+} -run
+client c1r4 -connect ${h1_fe1_sock} {
+ txreq -req GET -url /500
+ rxresp
+ expect resp.status == 500
+ expect resp.http.x-err-type == <undef>
+} -run
+
+client c2r1 -connect ${h1_fe2_sock} {
+ txreq -req GET -url /400
+ rxresp
+ expect resp.status == 400
+ expect resp.http.x-err-type == "errors-1"
+} -run
+client c2r2 -connect ${h1_fe2_sock} {
+ txreq -req GET -url /403
+ rxresp
+ expect resp.status == 403
+ expect resp.http.x-err-type == "errors-1"
+} -run
+client c2r3 -connect ${h1_fe2_sock} {
+ txreq -req GET -url /404
+ rxresp
+ expect resp.status == 404
+ expect resp.http.x-err-type == "errors-1"
+} -run
+client c2r4 -connect ${h1_fe2_sock} {
+ txreq -req GET -url /500
+ rxresp
+ expect resp.status == 500
+ expect resp.http.x-err-type == "default"
+} -run
+
+client c3r1 -connect ${h1_fe3_sock} {
+ txreq -req GET -url /400
+ rxresp
+ expect resp.status == 400
+ expect resp.http.x-err-type == "errors-3"
+} -run
+client c3r2 -connect ${h1_fe3_sock} {
+ txreq -req GET -url /403
+ rxresp
+ expect resp.status == 403
+ expect resp.http.x-err-type == "errors-2"
+} -run
+client c3r3 -connect ${h1_fe3_sock} {
+ txreq -req GET -url /404
+ rxresp
+ expect resp.status == 404
+ expect resp.http.x-err-type == "default"
+} -run
+client c3r4 -connect ${h1_fe3_sock} {
+ txreq -req GET -url /500
+ rxresp
+ expect resp.status == 500
+ expect resp.http.x-err-type == "errors-1"
+} -run
diff --git a/reg-tests/http-errorfiles/http_return.vtc b/reg-tests/http-errorfiles/http_return.vtc
new file mode 100644
index 0000000..8db77a9
--- /dev/null
+++ b/reg-tests/http-errorfiles/http_return.vtc
@@ -0,0 +1,46 @@
+varnishtest "Test the HTTP return action with errorfiles"
+#REQUIRE_VERSION=2.2
+
+# This config tests the HTTP return action when error files are used to reply to
+# the client.
+
+feature ignore_unknown_macro
+
+haproxy h1 -conf {
+ http-errors errors-2
+ errorfile 400 ${testdir}/errors/400-2.http
+
+ defaults
+ mode http
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ frontend fe1
+ bind "fd@${fe1}"
+ errorfile 400 ${testdir}/errors/400.http
+ http-request return status 400 default-errorfiles if { path /def }
+ http-request return status 400 errorfile ${testdir}/errors/400-1.http if { path /400-1 }
+ http-request return status 400 errorfiles errors-2 if { path /400-2 }
+} -start
+
+client c1 -connect ${h1_fe1_sock} {
+ txreq -req GET -url /def
+ rxresp
+ expect resp.status == 400
+ expect resp.http.x-err-type == "default"
+} -run
+
+client c1r2 -connect ${h1_fe1_sock} {
+ txreq -req GET -url /400-1
+ rxresp
+ expect resp.status == 400
+ expect resp.http.x-err-type == "errors-1"
+} -run
+
+client c1r3 -connect ${h1_fe1_sock} {
+ txreq -req GET -url /400-2
+ rxresp
+ expect resp.status == 400
+ expect resp.http.x-err-type == "errors-2"
+} -run
diff --git a/reg-tests/http-messaging/common.pem b/reg-tests/http-messaging/common.pem
new file mode 120000
index 0000000..a4433d5
--- /dev/null
+++ b/reg-tests/http-messaging/common.pem
@@ -0,0 +1 @@
+../ssl/common.pem \ No newline at end of file
diff --git a/reg-tests/http-messaging/h1_host_normalization.vtc b/reg-tests/http-messaging/h1_host_normalization.vtc
new file mode 100644
index 0000000..48174b8
--- /dev/null
+++ b/reg-tests/http-messaging/h1_host_normalization.vtc
@@ -0,0 +1,762 @@
+varnishtest "H1 authority validation and host normalizarion based on the scheme (rfc3982 6.3.2) or the method (connect)"
+
+feature cmd "$HAPROXY_PROGRAM -cc 'version_atleast(2.6-dev0)'"
+feature ignore_unknown_macro
+
+barrier b1 cond 2 -cyclic
+
+syslog S1 -level info {
+ # C1
+ recv
+ expect ~ "^.* uri: GET http://toto:poue@hostname/c1 HTTP/1.1; host: {hostname}$"
+ barrier b1 sync
+
+ # C2
+ recv
+ expect ~ "^.* uri: GET http://hostname:8080/c2 HTTP/1.1; host: {hostname:8080}$"
+ barrier b1 sync
+
+ # C3
+ recv
+ expect ~ "^.* uri: GET https://hostname/c3 HTTP/1.1; host: {hostname}$"
+ barrier b1 sync
+
+ # C4
+ recv
+ expect ~ "^.* uri: GET https://hostname:80/c4 HTTP/1.1; host: {hostname:80}$"
+ barrier b1 sync
+
+ # C5
+ recv
+ expect ~ "^.* uri: CONNECT hostname:80 HTTP/1.1; host: {hostname}$"
+ barrier b1 sync
+ recv
+ expect ~ "^.* uri: CONNECT hostname:80 HTTP/1.1; host: {hostname}$"
+ barrier b1 sync
+ recv
+ expect ~ "^.* uri: CONNECT hostname:80 HTTP/1.1; host: {hostname:}$"
+ barrier b1 sync
+
+ # C6
+ recv
+ expect ~ "^.* uri: CONNECT hostname:443 HTTP/1.1; host: {hostname}$"
+ barrier b1 sync
+ recv
+ expect ~ "^.* uri: CONNECT hostname:443 HTTP/1.1; host: {hostname}$"
+ barrier b1 sync
+ recv
+ expect ~ "^.* uri: CONNECT hostname:443 HTTP/1.1; host: {hostname:}$"
+ barrier b1 sync
+
+ # C7
+ recv
+ expect ~ "^.* uri: CONNECT hostname:8443 HTTP/1.1; host: {hostname:8443}$"
+ barrier b1 sync
+
+ # C8
+ recv
+ expect ~ "^.* uri: <BADREQ>; host: $"
+ barrier b1 sync
+
+ # C9
+ recv
+ expect ~ "^.* uri: <BADREQ>; host: $"
+ barrier b1 sync
+
+ # C10
+ recv
+ expect ~ "^.* uri: <BADREQ>; host: $"
+ barrier b1 sync
+
+ # C11
+ recv
+ expect ~ "^.* uri: <BADREQ>; host: $"
+ barrier b1 sync
+
+ # C12
+ recv
+ expect ~ "^.* uri: <BADREQ>; host: $"
+ barrier b1 sync
+
+ # C13
+ recv
+ expect ~ "^.* uri: <BADREQ>; host: $"
+ barrier b1 sync
+
+ # C14
+ recv
+ expect ~ "^.* uri: <BADREQ>; host: $"
+ barrier b1 sync
+
+ # C15
+ recv
+ expect ~ "^.* uri: <BADREQ>; host: $"
+ barrier b1 sync
+
+ # C16
+ recv
+ expect ~ "^.* uri: <BADREQ>; host: $"
+ barrier b1 sync
+
+ # C17
+ recv
+ barrier b1 sync
+ expect ~ "^.* uri: <BADREQ>; host: $"
+
+ # C18
+ recv
+ expect ~ "^.* uri: <BADREQ>; host: $"
+ barrier b1 sync
+
+ # C19
+ recv
+ expect ~ "^.* uri: <BADREQ>; host: $"
+ barrier b1 sync
+
+ # C20
+ recv
+ expect ~ "^.* uri: GET http://hostname/c20 HTTP/1.1; host: {hostname}$"
+ barrier b1 sync
+
+ # C21
+ recv
+ expect ~ "^.* uri: GET https://hostname/c21 HTTP/1.1; host: {hostname}$"
+ barrier b1 sync
+
+ # C22
+ recv
+ expect ~ "^.* uri: GET http://hostname/c22 HTTP/1.1; host: {hostname:80}$"
+ barrier b1 sync
+
+ # C23
+ recv
+ expect ~ "^.* uri: GET https://hostname/c23 HTTP/1.1; host: {hostname:443}$"
+ barrier b1 sync
+
+ # C24
+ recv
+ expect ~ "^.* uri: GET http://hostname/c24 HTTP/1.1; host: {hostname}$"
+ barrier b1 sync
+
+ # C25
+ recv
+ expect ~ "^.* uri: GET https://hostname/c25 HTTP/1.1; host: {hostname}$"
+ barrier b1 sync
+
+ # C26
+ recv
+ expect ~ "^.* uri: GET http://hostname/c26 HTTP/1.1; host: {hostname:}$"
+ barrier b1 sync
+
+ # C27
+ recv
+ expect ~ "^.* uri: GET https://hostname/c27 HTTP/1.1; host: {hostname:}$"
+ barrier b1 sync
+
+ # C28
+ recv
+ expect ~ "^.* uri: GET http://hostname/c28 HTTP/1.1; host: {hostname}$"
+ barrier b1 sync
+
+ # C29
+ recv
+ expect ~ "^.* uri: GET http://hostname/c29 HTTP/1.1; host: {hostname}$"
+ barrier b1 sync
+
+ # C30
+ recv
+ expect ~ "^.* uri: GET https://hostname/c30 HTTP/1.1; host: {hostname}$"
+ barrier b1 sync
+
+ # C31
+ recv
+ expect ~ "^.* uri: GET https://hostname/c31 HTTP/1.1; host: {hostname}$"
+ barrier b1 sync
+
+ # C32
+ recv
+ expect ~ "^.* uri: GET http:// HTTP/1.1; host: {}$"
+ barrier b1 sync
+
+ # C33
+ recv
+ expect ~ "^.* uri: GET https:// HTTP/1.1; host: {}$"
+ barrier b1 sync
+
+ # C34
+ recv
+ expect ~ "^.* uri: GET http:// HTTP/1.1; host: {}$"
+ barrier b1 sync
+
+ # C35
+ recv
+ expect ~ "^.* uri: GET https:// HTTP/1.1; host: {}$"
+
+} -start
+
+haproxy h1 -conf {
+ defaults
+ mode http
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ frontend fe
+ bind "fd@${fe}"
+
+ http-request capture req.hdr(host) len 512
+ log-format "uri: %r; host: %hr"
+ log ${S1_addr}:${S1_port} len 2048 local0 debug err
+
+ http-request return status 200
+} -start
+
+# default port 80 with http scheme => should be normalized
+# Be sure userinfo are skipped
+client c1 -connect ${h1_fe_sock} {
+ txreq \
+ -req "GET" \
+ -url "http://toto:poue@hostname:80/c1" \
+ -hdr "host: hostname:80"
+
+ rxresp
+ expect resp.status == 200
+} -run
+
+# Wait matching on log message
+barrier b1 sync
+
+# port 8080 with http scheme => no normalization
+client c2 -connect ${h1_fe_sock} {
+ txreq \
+ -req "GET" \
+ -url "http://hostname:8080/c2" \
+ -hdr "host: hostname:8080"
+
+ rxresp
+ expect resp.status == 200
+} -run
+
+# Wait matching on log message
+barrier b1 sync
+
+# default port 443 with https scheme => should be normalized
+client c3 -connect ${h1_fe_sock} {
+ txreq \
+ -req "GET" \
+ -url "https://hostname:443/c3" \
+ -hdr "host: hostname:443"
+
+ rxresp
+ expect resp.status == 200
+} -run
+
+# Wait matching on log message
+barrier b1 sync
+
+# port 80 with https scheme => no normalization
+client c4 -connect ${h1_fe_sock} {
+ txreq \
+ -req "GET" \
+ -url "https://hostname:80/c4" \
+ -hdr "host: hostname:80"
+
+ rxresp
+ expect resp.status == 200
+} -run
+
+# Wait matching on log message
+barrier b1 sync
+
+# CONNECT on port 80 => should be normalized
+client c5 -connect ${h1_fe_sock} {
+ txreq \
+ -req "CONNECT" \
+ -url "hostname:80" \
+ -hdr "host: hostname:80"
+
+ rxresp
+ expect resp.status == 200
+} -run
+
+# Wait matching on log message
+barrier b1 sync
+
+client c5 -connect ${h1_fe_sock} {
+
+ txreq \
+ -req "CONNECT" \
+ -url "hostname:80" \
+ -hdr "host: hostname"
+
+ rxresp
+ expect resp.status == 200
+} -run
+
+# Wait matching on log message
+barrier b1 sync
+
+client c5 -connect ${h1_fe_sock} {
+
+ txreq \
+ -req "CONNECT" \
+ -url "hostname:80" \
+ -hdr "host: hostname:"
+
+ rxresp
+ expect resp.status == 200
+} -run
+
+# Wait matching on log message
+barrier b1 sync
+
+# CONNECT on port 443 => should be normalized
+client c6 -connect ${h1_fe_sock} {
+ txreq \
+ -req "CONNECT" \
+ -url "hostname:443" \
+ -hdr "host: hostname:443"
+
+ rxresp
+ expect resp.status == 200
+} -run
+
+# Wait matching on log message
+barrier b1 sync
+
+client c6 -connect ${h1_fe_sock} {
+ txreq \
+ -req "CONNECT" \
+ -url "hostname:443" \
+ -hdr "host: hostname"
+
+ rxresp
+ expect resp.status == 200
+} -run
+
+# Wait matching on log message
+barrier b1 sync
+
+client c6 -connect ${h1_fe_sock} {
+ txreq \
+ -req "CONNECT" \
+ -url "hostname:443" \
+ -hdr "host: hostname:"
+
+ rxresp
+ expect resp.status == 200
+} -run
+
+# Wait matching on log message
+barrier b1 sync
+
+# CONNECT on port non-default port => no normalization
+client c7 -connect ${h1_fe_sock} {
+ txreq \
+ -req "CONNECT" \
+ -url "hostname:8443" \
+ -hdr "host: hostname:8443"
+
+ rxresp
+ expect resp.status == 200
+} -run
+
+# Wait matching on log message
+barrier b1 sync
+
+# host miss-match => error
+client c8 -connect ${h1_fe_sock} {
+ txreq \
+ -req "GET" \
+ -url "http://hostname1/" \
+ -hdr "host: hostname2"
+
+ rxresp
+ expect resp.status == 400
+} -run
+
+# Wait matching on log message
+barrier b1 sync
+
+# port miss-match => error
+client c9 -connect ${h1_fe_sock} {
+ txreq \
+ -req "GET" \
+ -url "http://hostname:80/" \
+ -hdr "host: hostname:81"
+
+ rxresp
+ expect resp.status == 400
+} -run
+
+# Wait matching on log message
+barrier b1 sync
+
+# no host port with a non-default port in abs-uri => error
+client c10 -connect ${h1_fe_sock} {
+ txreq \
+ -req "GET" \
+ -url "http://hostname:8080/" \
+ -hdr "host: hostname"
+
+ rxresp
+ expect resp.status == 400
+} -run
+
+# Wait matching on log message
+barrier b1 sync
+
+# non-default host port with a default in abs-uri => error
+client c11 -connect ${h1_fe_sock} {
+ txreq \
+ -req "GET" \
+ -url "http://hostname/" \
+ -hdr "host: hostname:81"
+
+ rxresp
+ expect resp.status == 400
+} -run
+
+# Wait matching on log message
+barrier b1 sync
+
+# miss-match between host headers => error
+client c12 -connect ${h1_fe_sock} {
+ txreq \
+ -req "GET" \
+ -url "http://hostname1/" \
+ -hdr "host: hostname1" \
+ -hdr "host: hostname2"
+
+ rxresp
+ expect resp.status == 400
+} -run
+
+# Wait matching on log message
+barrier b1 sync
+
+# miss-match between host headers but with a normalization => error
+client c13 -connect ${h1_fe_sock} {
+ txreq \
+ -req "GET" \
+ -url "http://hostname1/" \
+ -hdr "host: hostname1:80" \
+ -hdr "host: hostname1"
+
+ rxresp
+ expect resp.status == 400
+} -run
+
+# Wait matching on log message
+barrier b1 sync
+
+# CONNECT authoriy without port => error
+client c14 -connect ${h1_fe_sock} {
+ txreq \
+ -req "CONNECT" \
+ -url "hostname" \
+ -hdr "host: hostname"
+
+ rxresp
+ expect resp.status == 400
+} -run
+
+# Wait matching on log message
+barrier b1 sync
+
+# host miss-match with CONNECT => error
+client c15 -connect ${h1_fe_sock} {
+ txreq \
+ -req "CONNECT" \
+ -url "hostname1:80" \
+ -hdr "host: hostname2:80"
+
+ rxresp
+ expect resp.status == 400
+} -run
+
+# Wait matching on log message
+barrier b1 sync
+
+# port miss-match with CONNECT => error
+client c16 -connect ${h1_fe_sock} {
+ txreq \
+ -req "CONNECT" \
+ -url "hostname:80" \
+ -hdr "host: hostname:443"
+
+ rxresp
+ expect resp.status == 400
+} -run
+
+# Wait matching on log message
+barrier b1 sync
+
+# no host port with non-default port in CONNECT authority => error
+client c17 -connect ${h1_fe_sock} {
+ txreq \
+ -req "CONNECT" \
+ -url "hostname:8080" \
+ -hdr "host: hostname"
+
+ rxresp
+ expect resp.status == 400
+} -run
+
+# Wait matching on log message
+barrier b1 sync
+
+# no authority => error
+client c18 -connect ${h1_fe_sock} {
+ txreq \
+ -req "CONNECT" \
+ -url "/" \
+ -hdr "host: hostname"
+
+ rxresp
+ expect resp.status == 400
+} -run
+
+# Wait matching on log message
+barrier b1 sync
+
+# no authority => error
+client c19 -connect ${h1_fe_sock} {
+ txreq \
+ -req "CONNECT" \
+ -url "hostname:" \
+ -hdr "host: hostname"
+
+ rxresp
+ expect resp.status == 400
+} -run
+
+# Wait matching on log message
+barrier b1 sync
+
+
+# default port 80 with http scheme but no port for host value => should be normalized
+client c20 -connect ${h1_fe_sock} {
+ txreq \
+ -req "GET" \
+ -url "http://hostname:80/c20" \
+ -hdr "host: hostname"
+
+ rxresp
+ expect resp.status == 200
+} -run
+
+# Wait matching on log message
+barrier b1 sync
+
+
+# default port 443 with https scheme but no port for host value => should be normalized
+client c21 -connect ${h1_fe_sock} {
+ txreq \
+ -req "GET" \
+ -url "https://hostname:443/c21" \
+ -hdr "host: hostname"
+
+ rxresp
+ expect resp.status == 200
+} -run
+
+# Wait matching on log message
+barrier b1 sync
+
+
+# http scheme, no port for the authority but default port for host value => no normalization
+client c22 -connect ${h1_fe_sock} {
+ txreq \
+ -req "GET" \
+ -url "http://hostname/c22" \
+ -hdr "host: hostname:80"
+
+ rxresp
+ expect resp.status == 200
+} -run
+
+# Wait matching on log message
+barrier b1 sync
+
+# https scheme, no port for the authority but default port for host value => no normalization
+client c23 -connect ${h1_fe_sock} {
+ txreq \
+ -req "GET" \
+ -url "https://hostname/c23" \
+ -hdr "host: hostname:443"
+
+ rxresp
+ expect resp.status == 200
+} -run
+
+# Wait matching on log message
+barrier b1 sync
+
+
+# http scheme, empty port for the authority and no port for host value => should be normalized
+client c24 -connect ${h1_fe_sock} {
+ txreq \
+ -req "GET" \
+ -url "http://hostname:/c24" \
+ -hdr "host: hostname"
+
+ rxresp
+ expect resp.status == 200
+} -run
+
+# Wait matching on log message
+barrier b1 sync
+
+# https scheme, empty port for the authority and no port for host value => should be normalized
+client c25 -connect ${h1_fe_sock} {
+ txreq \
+ -req "GET" \
+ -url "https://hostname:/c25" \
+ -hdr "host: hostname"
+
+ rxresp
+ expect resp.status == 200
+} -run
+
+# Wait matching on log message
+barrier b1 sync
+
+# http scheme, no port for the authority and empty port for host value => no normalization
+client c26 -connect ${h1_fe_sock} {
+ txreq \
+ -req "GET" \
+ -url "http://hostname/c26" \
+ -hdr "host: hostname:"
+
+ rxresp
+ expect resp.status == 200
+} -run
+
+# Wait matching on log message
+barrier b1 sync
+
+# https scheme, no port for the authority and empty port for host value => no normalization
+client c27 -connect ${h1_fe_sock} {
+ txreq \
+ -req "GET" \
+ -url "https://hostname/c27" \
+ -hdr "host: hostname:"
+
+ rxresp
+ expect resp.status == 200
+} -run
+
+# Wait matching on log message
+barrier b1 sync
+
+# http scheme, default port for the authority and empty port for host value => should be normalized
+client c28 -connect ${h1_fe_sock} {
+ txreq \
+ -req "GET" \
+ -url "http://hostname:80/c28" \
+ -hdr "host: hostname:"
+
+ rxresp
+ expect resp.status == 200
+} -run
+
+# Wait matching on log message
+barrier b1 sync
+
+# http scheme, empty port for the authority and default port for host value => should be normalized
+client c29 -connect ${h1_fe_sock} {
+ txreq \
+ -req "GET" \
+ -url "http://hostname:/c29" \
+ -hdr "host: hostname:80"
+
+ rxresp
+ expect resp.status == 200
+} -run
+
+# Wait matching on log message
+barrier b1 sync
+
+# https scheme, default port for the authority and empty port for host value => should be normalized
+client c30 -connect ${h1_fe_sock} {
+ txreq \
+ -req "GET" \
+ -url "https://hostname:443/c30" \
+ -hdr "host: hostname:"
+
+ rxresp
+ expect resp.status == 200
+} -run
+
+# Wait matching on log message
+barrier b1 sync
+
+# https scheme, empty port for the authority and default port for host value => should be normalized
+client c31 -connect ${h1_fe_sock} {
+ txreq \
+ -req "GET" \
+ -url "https://hostname:/c31" \
+ -hdr "host: hostname:443"
+
+ rxresp
+ expect resp.status == 200
+} -run
+
+# Wait matching on log message
+barrier b1 sync
+
+# Strange cases
+client c32 -connect ${h1_fe_sock} {
+ txreq \
+ -req "GET" \
+ -url "http://:" \
+ -hdr "host: :80"
+
+ rxresp
+ expect resp.status == 200
+} -run
+
+# Wait matching on log message
+barrier b1 sync
+
+
+client c33 -connect ${h1_fe_sock} {
+ txreq \
+ -req "GET" \
+ -url "https://:" \
+ -hdr "host: :443"
+
+ rxresp
+ expect resp.status == 200
+} -run
+
+# Wait matching on log message
+barrier b1 sync
+
+# Strange cases
+client c34 -connect ${h1_fe_sock} {
+ txreq \
+ -req "GET" \
+ -url "http://:" \
+ -hdr "host: :"
+
+ rxresp
+ expect resp.status == 200
+} -run
+
+# Wait matching on log message
+barrier b1 sync
+
+
+client c35 -connect ${h1_fe_sock} {
+ txreq \
+ -req "GET" \
+ -url "https://:" \
+ -hdr "host: :"
+
+ rxresp
+ expect resp.status == 200
+} -run
+
+syslog S1 -wait
diff --git a/reg-tests/http-messaging/h1_to_h1.vtc b/reg-tests/http-messaging/h1_to_h1.vtc
new file mode 100644
index 0000000..67aba14
--- /dev/null
+++ b/reg-tests/http-messaging/h1_to_h1.vtc
@@ -0,0 +1,301 @@
+varnishtest "HTTP request tests: H1 to H1 (HTX mode supported only for HAProxy >= 1.9)"
+
+# Run it with HAPROXY_PROGRAM=$PWD/haproxy varnishtest -l -k -t 1 "$1"
+
+feature ignore_unknown_macro
+
+server s1 {
+ ##
+ ## Handle GET requests
+ ##
+ rxreq
+ expect req.bodylen == 0
+ expect req.body == ""
+ txresp \
+ -status 200 \
+ -body "response 1"
+
+ rxreq
+ expect req.bodylen == 0
+ expect req.body == ""
+ txresp \
+ -status 200 \
+ -body "response 2"
+
+ rxreq
+ expect req.bodylen == 38
+ expect req.body == "this must be delivered, like it or not"
+ txresp \
+ -status 200 \
+ -body "response 3"
+
+ rxreq
+ expect req.bodylen == 0
+ expect req.body == ""
+ txresp \
+ -status 200 \
+ -body "response 4"
+
+ accept
+
+ ##
+ ## Handle HEAD requests
+ ##
+ rxreq
+ expect req.bodylen == 0
+ expect req.body == ""
+ txresp \
+ -status 200 \
+ -body "response 1"
+
+ accept
+
+ rxreq
+ expect req.bodylen == 0
+ expect req.body == ""
+ txresp \
+ -status 200 \
+ -body "response 2"
+
+ accept
+
+ rxreq
+ expect req.bodylen == 38
+ expect req.body == "this must be delivered, like it or not"
+ txresp \
+ -status 200 \
+ -body "response 3"
+
+ accept
+
+ rxreq
+ expect req.bodylen == 0
+ expect req.body == ""
+ txresp \
+ -status 200 \
+ -body "response 4"
+
+ accept
+
+ ##
+ ## Handle POST requests
+ ##
+ # POST request without body
+ rxreq
+ expect req.bodylen == 0
+ expect req.body == ""
+ txresp \
+ -status 200 \
+ -body "response 1"
+
+ # POST request without body
+ rxreq
+ expect req.bodylen == 0
+ expect req.body == ""
+ txresp \
+ -status 200 \
+ -body "response 2"
+
+ # POST request with a body
+ rxreq
+ expect req.bodylen == 12
+ expect req.body == "this is sent"
+ txresp \
+ -status 200 \
+ -body "response 3"
+
+ # POST request without body
+ rxreq
+ expect req.bodylen == 0
+ expect req.body == ""
+ txresp \
+ -status 200 \
+ -body "response 4"
+} -repeat 3 -start
+
+haproxy h1 -conf {
+ global
+ # WT: limit false-positives causing "HTTP header incomplete" due to
+ # idle server connections being randomly used and randomly expiring
+ # under us.
+ tune.idle-pool.shared off
+
+ defaults
+ mode http
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ listen feh1
+ bind "fd@${feh1}"
+ #bind "fd@${feh2}" proto h2
+ server s1 ${s1_addr}:${s1_port}
+} -start
+
+# GET requests
+client c1h1 -connect ${h1_feh1_sock} {
+ # first request is valid
+ txreq \
+ -req "GET" \
+ -url "/test1.html"
+ rxresp
+ expect resp.status == 200
+ expect resp.body == "response 1"
+
+ # second request is valid and advertises C-L:0
+ txreq \
+ -req "GET" \
+ -url "/test2.html" \
+ -hdr "content-length: 0"
+ rxresp
+ expect resp.status == 200
+ expect resp.body == "response 2"
+
+ # third request sends a body with a GET
+ txreq \
+ -req "GET" \
+ -url "/test3.html" \
+ -body "this must be delivered, like it or not"
+ rxresp
+ expect resp.status == 200
+ expect resp.body == "response 3"
+
+ # fourth request is valid and advertises C-L:0, and close, and is
+ # followed by a string "this is not sent\r\n\r\n" which must be
+ # dropped.
+ txreq \
+ -req "GET" \
+ -url "/test4.html" \
+ -hdr "content-length: 0" \
+ -hdr "connection: close"
+ # "this is not sent"
+ sendhex "74787973207973206E6F742073656E740D0A0D0A"
+ rxresp
+ expect resp.status == 200
+ expect resp.body == "response 4"
+
+ # the connection is expected to be closed and no more response must
+ # arrive here.
+ expect_close
+} -run
+
+# HEAD requests
+# Note: for now they fail with varnishtest, which expects the amount of
+# data advertised in the content-length response.
+client c2h1 -connect ${h1_feh1_sock} {
+ # first request is valid
+ txreq \
+ -req "HEAD" \
+ -url "/test11.html"
+ rxresp
+ expect resp.status == 200
+ expect resp.body == ""
+
+ # second request is valid and advertises C-L:0
+ txreq \
+ -req "HEAD" \
+ -url "/test12.html" \
+ -hdr "content-length: 0"
+ rxresp
+ expect resp.status == 200
+ expect resp.body == ""
+
+ # third request sends a body with a HEAD
+ txreq \
+ -req "HEAD" \
+ -url "/test13.html" \
+ -body "this must be delivered, like it or not"
+ rxresp
+ expect resp.status == 200
+ expect resp.body == ""
+
+ # fourth request is valid and advertises C-L:0, and close, and is
+ # followed by a string "this is not sent\r\n\r\n" which must be
+ # dropped.
+ txreq \
+ -req "HEAD" \
+ -url "/test14.html" \
+ -hdr "content-length: 0" \
+ -hdr "connection: close"
+ # "this is not sent"
+ sendhex "74787973207973206E6F742073656E740D0A0D0A"
+ rxresp
+ expect resp.status == 200
+ expect resp.body == ""
+
+ # the connection is expected to be closed and no more response must
+ # arrive here.
+ expect_close
+} -run
+
+client c3h1 -connect ${h1_feh1_sock} {
+ # first request is valid
+ txreq \
+ -req "POST" \
+ -url "/test21.html"
+ rxresp
+ expect resp.status == 200
+ expect resp.body == "response 1"
+
+ # second request is valid and advertises C-L:0
+ txreq \
+ -req "POST" \
+ -url "/test22.html" \
+ -hdr "content-length: 0"
+ rxresp
+ expect resp.status == 200
+ expect resp.body == "response 2"
+
+ # third request is valid and advertises (and sends) some contents
+ txreq \
+ -req "POST" \
+ -url "/test23.html" \
+ -body "this is sent"
+ rxresp
+ expect resp.status == 200
+ expect resp.body == "response 3"
+
+ # fourth request is valid and advertises C-L:0, and close, and is
+ # followed by a string "this is not sent\r\n\r\n" which must be
+ # dropped.
+ txreq \
+ -req "POST" \
+ -url "/test24.html" \
+ -hdr "content-length: 0" \
+ -hdr "connection: close"
+ # "this is not sent"
+ sendhex "74787973207973206E6F742073656E740D0A0D0A"
+ rxresp
+ expect resp.status == 200
+ expect resp.body == "response 4"
+
+ # the connection is expected to be closed and no more response must
+ # arrive here.
+ expect_close
+} -run
+
+client c4h1 -connect ${h1_feh1_sock} {
+ # this request is invalid and advertises an invalid C-L ending with an
+ # empty value, which results in a stream error.
+ txreq \
+ -req "GET" \
+ -url "/test31.html" \
+ -hdr "content-length: 0," \
+ -hdr "connection: close"
+ rxresp
+ expect resp.status == 400
+ expect_close
+} -run
+
+client c5h1 -connect ${h1_feh1_sock} {
+ # this request is invalid and advertises an empty C-L, which results
+ # in a stream error.
+ txreq \
+ -req "GET" \
+ -url "/test41.html" \
+ -hdr "content-length:" \
+ -hdr "connection: close"
+ rxresp
+ expect resp.status == 400
+ expect_close
+} -run
diff --git a/reg-tests/http-messaging/h2_desync_attacks.vtc b/reg-tests/http-messaging/h2_desync_attacks.vtc
new file mode 100644
index 0000000..112bc60
--- /dev/null
+++ b/reg-tests/http-messaging/h2_desync_attacks.vtc
@@ -0,0 +1,167 @@
+# This test ensures that h2 requests with invalid pseudo-headers are properly
+# rejected. Also, the host header must be ignored if authority is present. This
+# is necessary to avoid http/2 desync attacks through haproxy as described here
+# https://portswigger.net/research/http2
+
+varnishtest "h2 desync attacks"
+feature ignore_unknown_macro
+
+server s1 {
+ rxreq
+ expect req.http.host == "hostname"
+ txresp
+} -start
+
+# haproxy frontend
+haproxy hap -conf {
+ defaults
+ mode http
+
+ listen feSrvH1
+ bind "fd@${feSrvH1}"
+ http-request return status 200
+
+ listen feSrvH2
+ bind "fd@${feSrvH2}" proto h2
+ http-request return status 200
+
+ listen fe1
+ bind "fd@${fe1}" proto h2
+ server srv-hapSrv "${hap_feSrvH1_addr}:${hap_feSrvH1_port}"
+
+ listen fe2
+ bind "fd@${fe2}" proto h2
+ server srv-hapSrv "${hap_feSrvH2_addr}:${hap_feSrvH2_port}" proto h2
+
+ listen fe3
+ bind "fd@${fe3}" proto h2
+ server s1 "${s1_addr}:${s1_port}"
+} -start
+
+# valid request
+client c1 -connect ${hap_fe1_sock} {
+ txpri
+ stream 0 {
+ txsettings
+ rxsettings
+ txsettings -ack
+ rxsettings
+ expect settings.ack == true
+ } -run
+
+ stream 1 {
+ txreq \
+ -method "GET" \
+ -scheme "http" \
+ -url "/"
+ rxresp
+ expect resp.status == 200
+ } -run
+} -run
+
+# valid request
+client c2 -connect ${hap_fe2_sock} {
+ txpri
+ stream 0 {
+ txsettings
+ rxsettings
+ txsettings -ack
+ rxsettings
+ expect settings.ack == true
+ } -run
+
+ stream 1 {
+ txreq \
+ -method "GET" \
+ -scheme "http" \
+ -url "/"
+ rxresp
+ expect resp.status == 200
+ } -run
+} -run
+
+# invalid path
+client c3-path -connect ${hap_fe1_sock} {
+ txpri
+ stream 0 {
+ txsettings
+ rxsettings
+ txsettings -ack
+ rxsettings
+ expect settings.ack == true
+ } -run
+
+ stream 1 {
+ txreq \
+ -method "GET" \
+ -scheme "http" \
+ -url "hello-world"
+ rxrst
+ } -run
+} -run
+
+# invalid scheme
+client c4-scheme -connect ${hap_fe1_sock} {
+ txpri
+ stream 0 {
+ txsettings
+ rxsettings
+ txsettings -ack
+ rxsettings
+ expect settings.ack == true
+ } -run
+
+ stream 1 {
+ txreq \
+ -method "GET" \
+ -scheme "http://localhost/?" \
+ -url "/"
+ rxresp
+ expect resp.status == 400
+ } -run
+} -run
+
+# invalid method
+client c5-method -connect ${hap_fe2_sock} {
+ txpri
+ stream 0 {
+ txsettings
+ rxsettings
+ txsettings -ack
+ rxsettings
+ expect settings.ack == true
+ } -run
+
+ stream 1 {
+ txreq \
+ -method "GET?" \
+ -scheme "http" \
+ -url "/"
+ rxresp
+ expect resp.status == 400
+ } -run
+} -run
+
+# different authority and host headers
+# in this case, host should be ignored in favor of the authority
+client c6-host-authority -connect ${hap_fe3_sock} {
+ txpri
+ stream 0 {
+ txsettings
+ rxsettings
+ txsettings -ack
+ rxsettings
+ expect settings.ack == true
+ } -run
+
+ stream 1 {
+ txreq \
+ -method "GET" \
+ -scheme "http" \
+ -url "/" \
+ -hdr ":authority" "hostname" \
+ -hdr "host" "other_host"
+ } -run
+} -run
+
+server s1 -wait
diff --git a/reg-tests/http-messaging/h2_to_h1.vtc b/reg-tests/http-messaging/h2_to_h1.vtc
new file mode 100644
index 0000000..637b664
--- /dev/null
+++ b/reg-tests/http-messaging/h2_to_h1.vtc
@@ -0,0 +1,324 @@
+varnishtest "HTTP request tests: H2 to H1 (HTX and legacy mode)"
+
+# Run it with HAPROXY_PROGRAM=$PWD/haproxy varnishtest -l -k -t 1 "$1"
+
+feature ignore_unknown_macro
+
+# synchronize requests between streams
+barrier b1 cond 2 -cyclic
+barrier b2 cond 2 -cyclic
+barrier b3 cond 2 -cyclic
+barrier b4 cond 2 -cyclic
+barrier b5 cond 2 -cyclic
+barrier b6 cond 2 -cyclic
+
+server s1 {
+ rxreq
+ txresp \
+ -status 200 \
+ -body "response 1"
+
+ barrier b2 sync
+ rxreq
+ txresp \
+ -status 200 \
+ -body "response 2"
+
+ barrier b3 sync
+ rxreq
+ txresp \
+ -status 200 \
+ -body "response 3"
+
+ barrier b4 sync
+ # the next request is never received
+
+ barrier b5 sync
+ # the next request is never received
+
+ barrier b6 sync
+ # the next request is never received
+} -repeat 2 -start
+
+haproxy h1 -conf {
+ global
+ # WT: limit false-positives causing "HTTP header incomplete" due to
+ # idle server connections being randomly used and randomly expiring
+ # under us.
+ tune.idle-pool.shared off
+
+ defaults
+ #log stdout format raw daemon
+ mode http
+ option http-buffer-request
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ listen feh1
+ bind "fd@${feh1}"
+ bind "fd@${feh2}" proto h2
+ server s1 ${s1_addr}:${s1_port}
+} -start
+
+client c1h2 -connect ${h1_feh2_sock} {
+ txpri
+ stream 0 {
+ txsettings
+ rxsettings
+ txsettings -ack
+ rxsettings
+ expect settings.ack == true
+ } -run
+
+ # first request is valid
+ stream 1 {
+ txreq \
+ -req "GET" \
+ -scheme "https" \
+ -url "/test1.html"
+ rxhdrs
+ expect resp.status == 200
+ rxdata -all
+ expect resp.body == "response 1"
+ } -run
+
+ # second request is valid and advertises C-L:0
+ stream 3 {
+ barrier b2 sync
+ txreq \
+ -req "GET" \
+ -scheme "https" \
+ -url "/test2.html" \
+ -hdr "content-length" "0"
+ rxhdrs
+ expect resp.status == 200
+ rxdata -all
+ expect resp.body == "response 2"
+ } -run
+
+ # third request sends a body with a GET
+ stream 5 {
+ barrier b3 sync
+ txreq \
+ -req "GET" \
+ -scheme "https" \
+ -url "/test3.html" \
+ -nostrend \
+ -body "this must be delivered, like it or not"
+ rxwinup
+ rxhdrs
+ expect resp.status == 200
+ rxdata -all
+ expect resp.body == "response 3"
+ } -run
+
+ # fourth request is valid and advertises C-L:2, and close, and is
+ # followed by a string "this is not sent\r\n\r\n" which causes a
+ # stream error of type PROTOCOL_ERROR.
+ stream 7 {
+ barrier b4 sync
+ txreq \
+ -req "GET" \
+ -scheme "https" \
+ -url "/test4.html" \
+ -hdr "content-length" "2" \
+ -nostrend
+ txdata -data "this is sent and ignored"
+ rxrst
+ } -run
+
+ # fifth request is invalid and advertises an invalid C-L ending with an
+ # empty value, which results in a stream error.
+ stream 9 {
+ barrier b5 sync
+ txreq \
+ -req "GET" \
+ -scheme "https" \
+ -url "/test5.html" \
+ -hdr "content-length" "0," \
+ -nostrend
+ rxrst
+ } -run
+
+ # sixth request is invalid and advertises an empty C-L, which results
+ # in a stream error.
+ stream 11 {
+ barrier b6 sync
+ txreq \
+ -req "GET" \
+ -scheme "https" \
+ -url "/test6.html" \
+ -hdr "content-length" "" \
+ -nostrend
+ rxrst
+ } -run
+} -run
+
+# HEAD requests : don't work well yet
+#client c2h2 -connect ${h1_feh2_sock} {
+# txpri
+# stream 0 {
+# txsettings
+# rxsettings
+# txsettings -ack
+# rxsettings
+# expect settings.ack == true
+# } -run
+#
+# # first request is valid
+# stream 1 {
+# txreq \
+# -req "HEAD" \
+# -scheme "https" \
+# -url "/test11.html"
+# rxhdrs
+# expect resp.status == 200
+# rxdata -all
+# expect resp.bodylen == 0
+# } -run
+#
+# # second request is valid and advertises C-L:0
+# stream 3 {
+# barrier b2 sync
+# txreq \
+# -req "HEAD" \
+# -scheme "https" \
+# -url "/test12.html" \
+# -hdr "content-length" "0"
+# rxhdrs
+# expect resp.status == 200
+# rxdata -all
+# expect resp.bodylen == 0
+# } -run
+#
+# # third request sends a body with a GET
+# stream 5 {
+# barrier b3 sync
+# txreq \
+# -req "HEAD" \
+# -scheme "https" \
+# -url "/test13.html" \
+# -nostrend \
+# -body "this must be delivered, like it or not"
+# rxwinup
+# rxhdrs
+# expect resp.status == 200
+# rxdata -all
+# expect resp.bodylen == 0
+# } -run
+#
+# # fourth request is valid and advertises C-L:0, and close, and is
+# # followed by a string "this is not sent\r\n\r\n" which must be
+# # dropped.
+# stream 7 {
+# barrier b4 sync
+# txreq \
+# -req "HEAD" \
+# -scheme "https" \
+# -url "/test14.html" \
+# -hdr "content-length" "0" \
+# -nostrend
+# txdata -data "this is sent and ignored"
+# rxwinup
+# rxhdrs
+# expect resp.status == 200
+# rxdata -all
+# expect resp.bodylen == 0
+# } -run
+#} -run
+
+# POST requests
+client c3h2 -connect ${h1_feh2_sock} {
+ txpri
+ stream 0 {
+ txsettings
+ rxsettings
+ txsettings -ack
+ rxsettings
+ expect settings.ack == true
+ } -run
+
+ # first request is valid
+ stream 1 {
+ txreq \
+ -req "POST" \
+ -scheme "https" \
+ -url "/test21.html"
+ rxhdrs
+ expect resp.status == 200
+ rxdata -all
+ expect resp.body == "response 1"
+ } -run
+
+ # second request is valid and advertises C-L:0
+ stream 3 {
+ barrier b2 sync
+ txreq \
+ -req "POST" \
+ -scheme "https" \
+ -url "/test22.html" \
+ -hdr "content-length" "0"
+ rxhdrs
+ expect resp.status == 200
+ rxdata -all
+ expect resp.body == "response 2"
+ } -run
+
+ # third request sends a body with a GET
+ stream 5 {
+ barrier b3 sync
+ txreq \
+ -req "POST" \
+ -scheme "https" \
+ -url "/test23.html" \
+ -nostrend \
+ -body "this must be delivered, like it or not"
+ rxwinup
+ rxhdrs
+ expect resp.status == 200
+ rxdata -all
+ expect resp.body == "response 3"
+ } -run
+
+ # fourth request is valid and advertises C-L:2, and close, and is
+ # followed by a string "this is not sent\r\n\r\n" which results
+ # in a stream error.
+ stream 7 {
+ barrier b4 sync
+ txreq \
+ -req "POST" \
+ -scheme "https" \
+ -url "/test24.html" \
+ -hdr "content-length" "2" \
+ -nostrend
+ txdata -data "this is sent and ignored"
+ rxrst
+ } -run
+
+ # fifth request is invalid and advertises invalid C-L ending with an
+ # empty value, which results in a stream error.
+ stream 9 {
+ barrier b5 sync
+ txreq \
+ -req "POST" \
+ -scheme "https" \
+ -url "/test25.html" \
+ -hdr "content-length" "0," \
+ -nostrend
+ rxrst
+ } -run
+
+ # sixth request is invalid and advertises an empty C-L, which results
+ # in a stream error.
+ stream 11 {
+ barrier b6 sync
+ txreq \
+ -req "POST" \
+ -scheme "https" \
+ -url "/test26.html" \
+ -hdr "content-length" "" \
+ -nostrend
+ rxrst
+ } -run
+} -run
diff --git a/reg-tests/http-messaging/http_abortonclose.vtc b/reg-tests/http-messaging/http_abortonclose.vtc
new file mode 100644
index 0000000..ea57f3d
--- /dev/null
+++ b/reg-tests/http-messaging/http_abortonclose.vtc
@@ -0,0 +1,226 @@
+varnishtest "A test for the abortonclose option (H1 only)"
+feature ignore_unknown_macro
+
+# NOTE : This test may fail if too many vtest are running in parallel because
+# the port reserved for closed s1 server may be reused by another vtest
+
+feature cmd "$HAPROXY_PROGRAM -cc 'version_atleast(2.2-dev0)'"
+#REGTEST_TYPE=slow
+
+# b0 : Wait s1 was detected as DOWN to be sure it is stopped
+# b1 : Don't send /c4 before /c3 was received by s2 server
+# b2 : Used to receive syslog messages in the right order
+# b3 : finish c3 before s2
+
+barrier b0 cond 3 -cyclic
+barrier b1 cond 2 -cyclic
+barrier b2 cond 2 -cyclic
+barrier b3 cond 2 -cyclic
+
+server s1 {
+} -start
+server s1 -break
+
+server s2 {
+ rxreq
+
+ # unlock c4
+ barrier b1 sync
+
+ # wait end of c3
+ barrier b3 sync
+
+ expect_close
+} -start
+
+syslog S1 -level info {
+ recv alert
+ expect ~ "[^:\\[ ]*\\[[0-9]*\\]: Server check/srv1 is DOWN.*"
+ barrier b0 sync
+
+ recv
+ expect ~ "[^:\\[ ]*\\[[0-9]*\\]: .* .* fe1 be1_1/srv1 [0-9]*/[0-9]*/-1/-1/[0-9]* 503 .* - - SC-- .* .* \"GET /c1 HTTP/1\\.1\""
+ barrier b2 sync
+ recv
+ expect ~ "[^:\\[ ]*\\[[0-9]*\\]: .* .* fe1 be1_2/srv1 [0-9]*/[0-9]*/-1/-1/[0-9]* -1 .* - - CC-- .* .* \"GET /c2 HTTP/1\\.1\""
+ barrier b2 sync
+ recv
+ expect ~ "[^:\\[ ]*\\[[0-9]*\\]: .* .* fe2 be2/<NOSRV> [0-9]*/[0-9]*/-1/-1/[0-9]* -1 .* - - CQ-- .* .* \"GET /c4 HTTP/1\\.1\""
+ barrier b2 sync
+ recv
+ expect ~ "[^:\\[ ]*\\[[0-9]*\\]: .* .* fe2 be2/srv1 [0-9]*/[0-9]*/[0-9]*/-1/[0-9]* 400 .* - - CH-- .* .* \"GET /c3 HTTP/1\\.1\""
+} -start
+
+syslog S2 -level info {
+ recv alert
+ expect ~ "[^:\\[ ]*\\[[0-9]*\\]: Server check/srv1 is DOWN.*"
+ barrier b0 sync
+
+ recv
+ expect ~ "[^:\\[ ]*\\[[0-9]*\\]: .* .* fe1 be1/srv1 [0-9]*/[0-9]*/-1/-1/[0-9]* -1 .* - - CC-- .* .* \"GET /c5 HTTP/1\\.1\""
+ barrier b2 sync
+ recv
+ expect ~ "[^:\\[ ]*\\[[0-9]*\\]: .* .* fe2 be2/srv1 [0-9]*/[0-9]*/-1/-1/[0-9]* 503 .* - - SC-- .* .* \"GET /c6 HTTP/1\\.1\""
+} -start
+
+haproxy h1 -conf {
+ global
+ # WT: limit false-positives causing "HTTP header incomplete" due to
+ # idle server connections being randomly used and randomly expiring
+ # under us.
+ tune.idle-pool.shared off
+
+ defaults
+ mode http
+ option abortonclose
+ retries 1
+ timeout client 10s
+ timeout server 10s
+ timeout connect 100ms
+ timeout queue 5s
+
+ frontend fe1
+ option httplog
+ log ${S1_addr}:${S1_port} local0 debug err
+ bind "fd@${fe1}"
+ use_backend be1_1 if { path /c1 }
+ use_backend be1_2 if { path /c2 }
+
+ frontend fe2
+ option httplog
+ log ${S1_addr}:${S1_port} local0 debug err
+ bind "fd@${fe2}"
+ use_backend be2
+
+ backend be1_1
+ server srv1 ${s1_addr}:${s1_port}
+
+ backend be1_2
+ timeout connect 1s
+ retries 10
+ server srv1 ${s1_addr}:${s1_port}
+
+ backend be2
+ server srv1 ${s2_addr}:${s2_port} maxconn 1
+
+ backend check
+ server srv1 ${s1_addr}:${s1_port} check
+ log ${S1_addr}:${S1_port} local0 debug alert
+} -start
+
+
+haproxy h2 -conf {
+ global
+ # WT: limit false-positives causing "HTTP header incomplete" due to
+ # idle server connections being randomly used and randomly expiring
+ # under us.
+ tune.idle-pool.shared off
+
+ defaults
+ mode http
+ retries 1
+ timeout client 10s
+ timeout server 10s
+ timeout connect 100ms
+ timeout queue 5s
+
+ frontend fe1
+ option httplog
+ log ${S2_addr}:${S2_port} local0 debug err
+ bind "fd@${fe1}"
+ use_backend be1
+
+ backend be1
+ option abortonclose
+ server srv1 ${s1_addr}:${s1_port}
+
+ defaults
+ mode http
+ option abortonclose
+ retries 1
+ timeout client 10s
+ timeout server 10s
+ timeout connect 100ms
+ timeout queue 5s
+
+ frontend fe2
+ option httplog
+ log ${S2_addr}:${S2_port} local0 debug err
+ bind "fd@${fe2}"
+ use_backend be2
+
+ backend be2
+ no option abortonclose
+ server srv1 ${s1_addr}:${s1_port}
+
+ backend check
+ server srv1 ${s1_addr}:${s1_port} check
+ log ${S2_addr}:${S2_port} local0 debug alert
+} -start
+
+
+# Wait s1 was detected as DOWN
+barrier b0 sync
+
+# No server, wait all connection retries : SC--
+client c1 -connect ${h1_fe1_sock} {
+ txreq -url /c1
+ rxresp
+ expect resp.status == 503
+} -run
+
+# Wait c1 log entry
+barrier b2 sync
+
+# No server, abort during connections retries : CC--
+client c2 -connect ${h1_fe1_sock} {
+ txreq -url /c2
+} -run
+
+# Wait c2 log entry
+barrier b2 sync
+
+# server with maxconn=1, abort waiting the server reply : CH--
+client c3 -connect ${h1_fe2_sock} {
+ txreq -url /c3
+
+ # Wait c4 log entry
+ barrier b2 sync
+} -start
+
+# server with maxconn=1, abort waiting in the queue (c3 still attached) : CQ--
+client c4 -connect ${h1_fe2_sock} {
+ # Wait s2 receives c3 request
+ barrier b1 sync
+
+ txreq -url /c4
+ delay .2
+} -run
+
+client c3 -wait
+
+# unlock s2
+barrier b3 sync
+
+syslog S1 -wait
+
+
+# No server, abort during connections retries : CC--
+# abortonclose on backend only
+client c5 -connect ${h2_fe1_sock} {
+ txreq -url /c5
+} -run
+
+# Wait c5 log entry
+barrier b2 sync
+
+# No server, wait all connection retries : SC--
+# abortonclose in defaults section but disabled by backend
+client c6 -connect ${h2_fe2_sock} {
+ txreq -url /c6
+ rxresp
+ expect resp.status == 503
+} -run
+
+
+syslog S2 -wait
diff --git a/reg-tests/http-messaging/http_bodyless_response.vtc b/reg-tests/http-messaging/http_bodyless_response.vtc
new file mode 100644
index 0000000..6b53bc4
--- /dev/null
+++ b/reg-tests/http-messaging/http_bodyless_response.vtc
@@ -0,0 +1,128 @@
+varnishtest "A test to be sure payload is skipped for bodyless responses"
+feature ignore_unknown_macro
+
+#REQUIRE_VERSION=2.4
+
+server s1 {
+ rxreq
+ txresp \
+ -status 200 \
+ -body "skipped data"
+
+ rxreq
+ txresp \
+ -status 200 \
+ -bodylen 50000
+
+ rxreq
+ txresp \
+ -status 200 \
+ -nolen -hdr "Transfer-Encoding: chunked"
+ chunkedlen 15
+ chunkedlen 1024
+ chunkedlen 4048
+ chunkedlen 50000
+ chunkedlen 0
+
+ rxreq
+ txresp \
+ -status 200 \
+ -body "last response"
+} -repeat 3 -start
+
+haproxy h1 -conf {
+ global
+ # WT: limit false-positives causing "HTTP header incomplete" due to
+ # idle server connections being randomly used and randomly expiring
+ # under us.
+ tune.idle-pool.shared off
+
+ defaults
+ mode http
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ listen fe1
+ bind "fd@${fe1}"
+ # Rewrite the method to be sure to get the response payload
+ # on the server side
+ http-request set-method GET
+ server s1 ${s1_addr}:${s1_port}
+
+ listen int
+ bind "fd@${int}" proto h2
+ # Rewrite the method to be sure to get the response payload
+ # on the server side
+ http-request set-method GET
+ server s1 ${s1_addr}:${s1_port}
+ #server s1 ${h1_fe1_addr}:${h1_fe1_port}
+
+ listen fe2
+ bind "fd@${fe2}"
+ server s1 ${h1_int_addr}:${h1_int_port} proto h2
+} -start
+
+client c1 -connect ${h1_fe1_sock} {
+ txreq \
+ -req "HEAD" \
+ -url "/req1"
+ rxresp
+ expect resp.status == 200
+ expect resp.body == ""
+
+ txreq \
+ -req "HEAD" \
+ -url "/req2"
+ rxresp
+ expect resp.status == 200
+ expect resp.body == ""
+
+ txreq \
+ -req "HEAD" \
+ -url "/req3"
+ rxresp
+ expect resp.status == 200
+ expect resp.body == ""
+
+ # The last one have a body and validate the connection was not closed
+ # unexpectedly and no payload was received for previous requests
+ txreq \
+ -req "GET" \
+ -url "/req4"
+ rxresp
+ expect resp.status == 200
+ expect resp.body == "last response"
+} -run
+
+client c2 -connect ${h1_fe2_sock} {
+ txreq \
+ -req "HEAD" \
+ -url "/req1"
+ rxresp
+ expect resp.status == 200
+ expect resp.body == ""
+
+ txreq \
+ -req "HEAD" \
+ -url "/req2"
+ rxresp
+ expect resp.status == 200
+ expect resp.body == ""
+
+ txreq \
+ -req "HEAD" \
+ -url "/req3"
+ rxresp
+ expect resp.status == 200
+ expect resp.body == ""
+
+ # The last one have a body and validate the connection was not closed
+ # unexpectedly and no payload was received for previous requests
+ txreq \
+ -req "GET" \
+ -url "/req4"
+ rxresp
+ expect resp.status == 200
+ expect resp.body == "last response"
+} -run
diff --git a/reg-tests/http-messaging/http_bodyless_spliced_response.vtc b/reg-tests/http-messaging/http_bodyless_spliced_response.vtc
new file mode 100644
index 0000000..73916f2
--- /dev/null
+++ b/reg-tests/http-messaging/http_bodyless_spliced_response.vtc
@@ -0,0 +1,89 @@
+varnishtest "A test to be sure payload is skipped for bodyless responses when splicing is used"
+
+feature cmd "$HAPROXY_PROGRAM -cc 'feature(LINUX_SPLICE)'"
+feature cmd "$HAPROXY_PROGRAM $HAPROXY_ARGS -cc 'enabled(FAST-FORWARD)'"
+feature cmd "$HAPROXY_PROGRAM $HAPROXY_ARGS -cc 'enabled(SPLICE)'"
+feature ignore_unknown_macro
+
+#REQUIRE_VERSION=2.4
+
+server s1 {
+ rxreq
+ txresp \
+ -status 200 \
+ -body "skipped data"
+
+ rxreq
+ txresp \
+ -status 200 \
+ -bodylen 50000
+
+ rxreq
+ txresp \
+ -status 200 \
+ -nolen -hdr "Transfer-Encoding: chunked"
+ chunkedlen 15
+ chunkedlen 1024
+ chunkedlen 4048
+ chunkedlen 50000
+ chunkedlen 0
+
+ rxreq
+ txresp \
+ -status 200 \
+ -body "last response"
+} -start
+
+haproxy h1 -conf {
+ global
+ # WT: limit false-positives causing "HTTP header incomplete" due to
+ # idle server connections being randomly used and randomly expiring
+ # under us.
+ tune.idle-pool.shared off
+
+ defaults
+ mode http
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ listen fe1
+ bind "fd@${fe1}"
+ # Rewrite the method to be sure to get the response payload
+ # on the server side
+ http-request set-method GET
+ option splice-response
+ server s1 ${s1_addr}:${s1_port}
+} -start
+
+client c1 -connect ${h1_fe1_sock} {
+ txreq \
+ -req "HEAD" \
+ -url "/req1"
+ rxresp
+ expect resp.status == 200
+ expect resp.body == ""
+
+ txreq \
+ -req "HEAD" \
+ -url "/req2"
+ rxresp
+ expect resp.status == 200
+ expect resp.body == ""
+
+ txreq \
+ -req "HEAD" \
+ -url "/req3"
+ rxresp
+ expect resp.status == 200
+ expect resp.body == ""
+
+ # The last one have a body and validate the connection was not closed
+ # unexpectedly and no payload was received for previous requests
+ txreq \
+ -req "GET" \
+ -url "/req4"
+ rxresp
+ expect resp.status == 200
+ expect resp.body == "last response"
+} -run
diff --git a/reg-tests/http-messaging/http_msg_full_on_eom.vtc b/reg-tests/http-messaging/http_msg_full_on_eom.vtc
new file mode 100644
index 0000000..2edba7d
--- /dev/null
+++ b/reg-tests/http-messaging/http_msg_full_on_eom.vtc
@@ -0,0 +1,62 @@
+varnishtest "cannot add the HTX EOM block because the buffer is full"
+feature ignore_unknown_macro
+
+#REQUIRE_VERSION=2.2
+#REQUIRE_VERSION_BELOW=2.4
+#REGTEST_TYPE=devel
+
+# This test checks that an HTTP message is properly processed when we failed to
+# add the HTX EOM block in an HTX message during the parsing because the buffer
+# is full. Some space must be released in the buffer to make it possible. This
+# requires an extra pass in the H1 multiplexer. Here, we must be sure the mux is
+# called while there is no more incoming data.
+
+server s1 {
+ rxreq
+ expect req.bodylen == 15200
+ txresp -bodylen 15220
+} -start
+
+syslog S -level info {
+ recv
+ expect ~ "[^:\\[ ]*\\[[0-9]*\\]: .* .* fe1 be1/srv1 [0-9]*/[0-9]*/[0-9]*/[0-9]*/[0-9]* 200 .* - - ---- .* .* \"GET / HTTP/1\\.1\""
+} -start
+
+haproxy h1 -conf {
+ global
+ tune.bufsize 16384
+ tune.maxrewrite 1024
+
+ defaults
+ mode http
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ backend be1
+ tcp-response inspect-delay 100ms
+ tcp-response content accept if { res.len gt 15272 }
+ tcp-response content reject
+
+ http-response deny if { internal.htx.has_eom -m bool } or { internal.htx.free_data gt 1024 }
+ server srv1 ${s1_addr}:${s1_port}
+
+ frontend fe1
+ option httplog
+ option http-buffer-request
+ log ${S_addr}:${S_port} local0 debug err
+ bind "fd@${fe1}"
+ http-request deny if ! { req.body_len eq 15200 } or { internal.htx.has_eom -m bool } or { internal.htx.free_data gt 1024 }
+ use_backend be1
+} -start
+
+haproxy h1 -cli {
+ send "trace h1 sink stderr; trace h1 level developer; trace h1 verbosity complete; trace h1 start now"
+}
+
+client c1 -connect ${h1_fe1_sock} {
+ txreq -bodylen 15200
+ rxresp
+ expect resp.status == 200
+ expect resp.bodylen == 15220
+} -run
diff --git a/reg-tests/http-messaging/http_request_buffer.vtc b/reg-tests/http-messaging/http_request_buffer.vtc
new file mode 100644
index 0000000..302db4a
--- /dev/null
+++ b/reg-tests/http-messaging/http_request_buffer.vtc
@@ -0,0 +1,135 @@
+varnishtest "A test for http-request-buffer option"
+feature ignore_unknown_macro
+
+
+# This test checks HTTP request buffering feature.
+# We run one server s1 which can serve only one client (no -repeat argument here).
+# c1 client uses a malformed request which is not transferred to s1 server
+# thanks to "http-buffer-request". If this was the case, c2 client
+# could not connect to s1 server and this would lead to make this test fail.
+
+barrier b1 cond 2 -cyclic
+
+server s1 {
+ rxreq
+ expect req.bodylen == 257
+ txresp
+
+ accept
+
+ rxreq
+ expect req.bodylen == 2
+ txresp
+} -start
+
+syslog S -level info {
+ recv
+ expect ~ "[^:\\[ ]*\\[[0-9]*\\]: .* .* fe1 fe1/<NOSRV> .* 408 .* - - cR-- .* .* \"GET /this-is-a-long-url-this-is-a-long-url-this-is-a-long-url-this-is-a-long-url-this-is-a-long-url-this-is-a-long-url-this-is-a-long-url HTTP/1\\.1\""
+ barrier b1 sync
+
+ recv
+ expect ~ "[^:\\[ ]*\\[[0-9]*\\]: .* .* fe1 be1/srv1 [0-9]*/[0-9]*/[0-9]*/[0-9]*/[0-9]* 200 .* - - ---- .* .* \"GET / HTTP/1\\.1\""
+ barrier b1 sync
+
+ recv
+ expect ~ "[^:\\[ ]*\\[[0-9]*\\]: .* .* fe2 be1/srv1 [0-9]*/[0-9]*/[0-9]*/[0-9]*/[0-9]* 200 .* - - ---- .* .* \"POST /1 HTTP/1\\.1\""
+ barrier b1 sync
+
+ recv
+ expect ~ "[^:\\[ ]*\\[[0-9]*\\]: .* .* fe2 be1/<NOSRV> [0-9]*/-1/-1/-1/[0-9]* 400 .* - - CR-- .* .* \"POST /2 HTTP/1\\.1\""
+} -start
+
+haproxy h1 -conf {
+ defaults
+ mode http
+ timeout client 100
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ backend be1
+ server srv1 ${s1_addr}:${s1_port}
+
+ frontend fe1
+ option httplog
+ option http-buffer-request
+ log ${S_addr}:${S_port} local0 debug err
+ bind "fd@${fe1}"
+ use_backend be1
+
+ frontend fe2
+ timeout client 10s
+ option httplog
+ option http-buffer-request
+ log ${S_addr}:${S_port} local0 debug err
+ bind "fd@${fe2}"
+ use_backend be1
+} -start
+
+# 1 byte of the payload is missing.
+# ==> The request must timed out with a 408 response
+client c1 -connect ${h1_fe1_sock} {
+ send "GET"
+ send " "
+ send "/this-is-a-long-url"
+ send "-this-is-a-long-url"
+ send "-this-is-a-long-url"
+ send "-this-is-a-long-url"
+ send "-this-is-a-long-url"
+ send "-this-is-a-long-url"
+ send "-this-is-a-long-url"
+ send " HTT"
+ send "P/1.1"
+ send "\r"
+ send "\n"
+ send "Content-Length: 209\r\n\r\n"
+ send "abcdefghijklmnopqrstuvwxyz"
+ send "abcdefghijklmnopqrstuvwxyz"
+ send "abcdefghijklmnopqrstuvwxyz"
+ send "abcdefghijklmnopqrstuvwxyz"
+ send "abcdefghijklmnopqrstuvwxyz"
+ send "abcdefghijklmnopqrstuvwxyz"
+ send "abcdefghijklmnopqrstuvwxyz"
+ send "abcdefghijklmnopqrstuvwxyz"
+ rxresp
+ expect resp.status == 408
+} -run
+
+# Wait matching on log message
+barrier b1 sync
+
+# Payload is fully sent
+# ==> Request must be sent to the server. A 200 must be received
+client c2 -connect ${h1_fe1_sock} {
+ txreq -bodylen 257
+ rxresp
+ expect resp.status == 200
+} -run
+
+# Wait matching on log message
+barrier b1 sync
+
+# Payload is fully sent in 2 steps (with a small delay, smaller than the client
+# timeout) and split on a chunk size.
+# ==> Request must be sent to the server. A 200 must be received
+client c3 -connect ${h1_fe2_sock} {
+ send "POST /1 HTTP/1.1\r\nTransfer-Encoding: chunked\r\n\r\n1\r\n1\r\n1"
+ delay 0.01
+ send "\r\n1\r\n0\r\n\r\n"
+ rxresp
+ expect resp.status == 200
+} -run
+
+# Wait matching on log message
+barrier b1 sync
+
+# Last CRLF of the request payload is missing but payload is sent in 2 steps
+# (with a small delay, smaller than the client timeout) and split on a chunk
+# size. The client aborts before sending the last CRLF.
+# ==> Request must be handled as an error with 'CR--' termination state.
+client c4 -connect ${h1_fe2_sock} {
+ send "POST /2 HTTP/1.1\r\nTransfer-Encoding: chunked\r\n\r\n1\r\n1\r\n1"
+ delay 0.01
+ send "\r\n1\r\n0\r\n"
+} -run
+
+syslog S -wait
diff --git a/reg-tests/http-messaging/http_splicing.vtc b/reg-tests/http-messaging/http_splicing.vtc
new file mode 100644
index 0000000..e86680b
--- /dev/null
+++ b/reg-tests/http-messaging/http_splicing.vtc
@@ -0,0 +1,77 @@
+# This reg-test checks splicing support for the H1 multiplexer
+
+varnishtest "A test to validate h1 splicing support"
+
+feature cmd "$HAPROXY_PROGRAM -cc 'version_atleast(2.6-dev0)'"
+feature cmd "$HAPROXY_PROGRAM -cc 'feature(LINUX_SPLICE)'"
+feature cmd "$HAPROXY_PROGRAM $HAPROXY_ARGS -cc 'enabled(FAST-FORWARD)'"
+feature cmd "$HAPROXY_PROGRAM $HAPROXY_ARGS -cc 'enabled(SPLICE)'"
+feature ignore_unknown_macro
+
+#REGTEST_TYPE=slow
+
+server s1 {
+ rxreq
+ expect req.http.content-length == "1048576"
+ expect req.bodylen == 1048576
+ txresp -status 200 -bodylen 1048576
+} -start
+
+server s2 {
+ rxreq
+ txresp -status 200 -nolen -bodylen 1048576
+} -start
+
+haproxy h1 -conf {
+ global
+ log stderr len 4096 local0 debug
+
+ defaults
+ mode http
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ option splice-request
+ option splice-response
+ log global
+ option httplog
+
+ listen li1
+ bind "fd@${li1}"
+ id 10
+ server srv1 ${s1_addr}:${s1_port}
+
+ listen li2
+ bind "fd@${li2}"
+ id 20
+ server srv2 ${s2_addr}:${s2_port}
+} -start
+
+
+client c1 -connect ${h1_li1_sock} {
+ txreq -method POST -url "/" -bodylen 1048576
+ rxresp
+ expect resp.status == 200
+ expect resp.http.content-length == "1048576"
+ expect resp.bodylen == 1048576
+} -run
+
+client c2 -connect ${h1_li2_sock} {
+ txreq -url "/"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.content-length == <undef>
+ expect resp.bodylen == 1048576
+} -run
+
+haproxy h1 -cli {
+ send "show stat typed"
+ expect ~ "F.10.0.[[:digit:]]+.h1_spliced_bytes_in.1:MCP:u64:[1-9][[:digit:]]+\nF.10.0.[[:digit:]]+.h1_spliced_bytes_out.1:MCP:u64:[1-9][[:digit:]]+"
+ send "show stat typed"
+ expect ~ "B.10.0.[[:digit:]]+.h1_spliced_bytes_in.1:MCP:u64:[1-9][[:digit:]]+\nB.10.0.[[:digit:]]+.h1_spliced_bytes_out.1:MCP:u64:[1-9][[:digit:]]+"
+
+ send "show stat typed"
+ expect ~ "F.20.0.[[:digit:]]+.h1_spliced_bytes_in.1:MCP:u64:0\nF.20.0.[[:digit:]]+.h1_spliced_bytes_out.1:MCP:u64:[1-9][[:digit:]]+"
+ send "show stat typed"
+ expect ~ "B.20.0.[[:digit:]]+.h1_spliced_bytes_in.1:MCP:u64:[1-9][[:digit:]]+\nB.20.0.[[:digit:]]+.h1_spliced_bytes_out.1:MCP:u64:0"
+}
diff --git a/reg-tests/http-messaging/http_splicing_chunk.vtc b/reg-tests/http-messaging/http_splicing_chunk.vtc
new file mode 100644
index 0000000..e2e9f32
--- /dev/null
+++ b/reg-tests/http-messaging/http_splicing_chunk.vtc
@@ -0,0 +1,74 @@
+# This reg-test checks splicing support for chunked message in the H1 multiplexer
+
+varnishtest "A test to validate h1 splicing support for chunked messages"
+
+feature cmd "$HAPROXY_PROGRAM -cc 'version_atleast(2.9-dev0)'"
+feature cmd "$HAPROXY_PROGRAM -cc 'feature(LINUX_SPLICE)'"
+feature cmd "$HAPROXY_PROGRAM $HAPROXY_ARGS -cc 'enabled(FAST-FORWARD)'"
+feature cmd "$HAPROXY_PROGRAM $HAPROXY_ARGS -cc 'enabled(SPLICE)'"
+feature ignore_unknown_macro
+
+#REGTEST_TYPE=slow
+
+server s1 {
+ rxreq
+ expect req.http.content-length == <undef>
+ expect req.http.transfer-encoding == "chunked"
+ expect req.bodylen == 500000
+
+ txresp -status 200 -nolen \
+ -hdr "Transfer-Encoding: chunked"
+ chunkedlen 204800
+ chunkedlen 204800
+ chunkedlen 204800
+ chunkedlen 204800
+ chunkedlen 204800
+ chunkedlen 0
+} -start
+
+haproxy h1 -conf {
+ global
+ log stderr len 4096 local0 debug
+
+ defaults
+ mode http
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ option splice-request
+ option splice-response
+ log global
+ option httplog
+
+ listen li1
+ bind "fd@${li1}"
+ id 10
+ server srv1 ${s1_addr}:${s1_port}
+} -start
+
+
+client c1 -connect ${h1_li1_sock} {
+ txreq -method POST -url "/" -nolen \
+ -hdr "Transfer-Encoding: chunked"
+ chunkedlen 10000
+ chunkedlen 10000
+ chunkedlen 10000
+ chunkedlen 10000
+ chunkedlen 10000
+ chunkedlen 50000
+ chunkedlen 100000
+ chunkedlen 300000
+ chunkedlen 0
+
+ rxresp
+ expect resp.http.content-length == <undef>
+ expect resp.http.transfer-encoding == "chunked"
+ expect resp.bodylen == 1024000
+} -run
+
+haproxy h1 -cli {
+ send "show stat typed"
+ expect ~ "F.10.0.[[:digit:]]+.h1_spliced_bytes_in.1:MCP:u64:[1-9][[:digit:]]+\nF.10.0.[[:digit:]]+.h1_spliced_bytes_out.1:MCP:u64:[1-9][[:digit:]]+"
+ send "show stat typed"
+ expect ~ "B.10.0.[[:digit:]]+.h1_spliced_bytes_in.1:MCP:u64:[1-9][[:digit:]]+\nB.10.0.[[:digit:]]+.h1_spliced_bytes_out.1:MCP:u64:[1-9][[:digit:]]+"
+}
diff --git a/reg-tests/http-messaging/http_transfer_encoding.vtc b/reg-tests/http-messaging/http_transfer_encoding.vtc
new file mode 100644
index 0000000..322dfe2
--- /dev/null
+++ b/reg-tests/http-messaging/http_transfer_encoding.vtc
@@ -0,0 +1,202 @@
+varnishtest "A test to validate Transfer-Encoding header conformance to the spec"
+
+feature cmd "$HAPROXY_PROGRAM -cc 'version_atleast(2.5-dev0)'"
+feature ignore_unknown_macro
+
+server s1 {
+ rxreq
+ expect req.http.content-length == <undef>
+ expect req.http.transfer-encoding == "chunked"
+ expect req.bodylen == 0
+ expect req.body == ""
+ txresp -status 200
+
+ accept
+ rxreq
+ expect req.http.content-length == <undef>
+ expect req.http.transfer-encoding == "chunked"
+ expect req.bodylen == 0
+ expect req.body == ""
+ txresp -status 200
+
+ accept
+ rxreq
+ send "HTTP/1.0 200 Ok\r\n"
+ send "Transfer-Encoding: chunked\r\n\r\n"
+ send "0\r\n\r\n"
+
+ accept
+ rxreq
+ send "HTTP/1.1 200 Ok\r\n"
+ send "Transfer-Encoding: chunked\r\n"
+ send "Content-Length: 30\r\n\r\n"
+ send "0\r\n\r\nResponse splitting attach"
+
+ accept
+ rxreq
+ expect req.url == "/1"
+ expect req.http.transfer-encoding == "chunked"
+ expect req.http.te == "trailers"
+ txresp
+
+ rxreq
+ expect req.url == "/2"
+ expect req.http.transfer-encoding == "chunked"
+ expect req.http.te == <undef>
+ txresp
+
+ rxreq
+ expect req.url == "/3"
+ expect req.http.transfer-encoding == "chunked"
+ expect req.http.te == <undef>
+ txresp
+} -start
+
+server s2 {
+ rxreq
+ txresp -nolen \
+ -hdr "Transfer-Encoding: chunked, chunked" \
+ -body "0\r\n\r\n"
+
+ accept
+ rxreq
+ txresp -nolen \
+ -hdr "Transfer-Encoding: chunked, gzip, chunked" \
+ -body "0\r\n\r\n"
+
+ accept
+ rxreq
+ txresp -nolen \
+ -hdr "Transfer-Encoding: chunked, gzip" \
+ -body "0\r\n\r\n"
+
+ accept
+ rxreq
+ txresp \
+ -hdr "Transfer-Encoding: gzip"
+} -start
+
+haproxy h1 -conf {
+ defaults
+ mode http
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ listen fe1
+ bind "fd@${fe1}"
+ server s1 ${s1_addr}:${s1_port}
+
+ listen fe2
+ bind "fd@${fe2}"
+ server s2 ${s2_addr}:${s2_port}
+} -start
+
+client c1 -connect ${h1_fe1_sock} {
+ txreq -method POST -nolen \
+ -hdr "Transfer-Encoding: chunked" \
+ -hdr "Content-Length: 31" \
+ -body "0\r\n\r\nGET /smuggled HTTP/1.1\r\n\r\n"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.connection == "close"
+ expect_close
+} -run
+
+client c2 -connect ${h1_fe1_sock} {
+ send "POST / HTTP/1.0\r\n"
+ send "Transfer-Encoding: chunked\r\n\r\n"
+ send "0\r\n\r\n"
+ rxresp
+ expect resp.status == 200
+ expect_close
+} -run
+
+client c3 -connect ${h1_fe1_sock} {
+ txreq
+ rxresp
+ expect resp.status == 200
+ expect resp.http.content-length == <undef>
+ expect resp.http.transfer-encoding == "chunked"
+ expect resp.bodylen == 0
+ expect resp.body == ""
+ expect_close
+} -run
+
+client c4 -connect ${h1_fe1_sock} {
+ txreq
+ rxresp
+ expect resp.status == 200
+ expect resp.http.content-length == <undef>
+ expect resp.http.transfer-encoding == "chunked"
+ expect resp.bodylen == 0
+ expect resp.body == ""
+} -run
+
+client c5 -connect ${h1_fe1_sock} {
+ txreq -method POST -url "/1" -nolen \
+ -hdr "Transfer-Encoding: chunked" \
+ -hdr "TE: trailers, gzip" \
+ -body "0\r\n\r\n"
+ rxresp
+ expect resp.status == 200
+
+ txreq -method POST -url "/2" -nolen \
+ -hdr "Transfer-Encoding: chunked" \
+ -hdr "TE: gzip" \
+ -body "0\r\n\r\n"
+ rxresp
+ expect resp.status == 200
+
+ txreq -method POST -url "/3" -nolen \
+ -hdr "Transfer-Encoding: chunked" \
+ -hdr "TE: trailers;q=0.5" \
+ -body "0\r\n\r\n"
+ rxresp
+ expect resp.status == 200
+} -run
+
+client c6 -connect ${h1_fe1_sock} {
+ txreq -nolen \
+ -hdr "Transfer-Encoding: chunked, chunked" \
+ -body "0\r\n\r\n"
+ rxresp
+ expect resp.status == 400
+} -run
+
+client c7 -connect ${h1_fe1_sock} {
+ txreq -nolen \
+ -hdr "Transfer-Encoding: chunked, gzip, chunked" \
+ -body "0\r\n\r\n"
+ rxresp
+ expect resp.status == 400
+} -run
+
+client c8 -connect ${h1_fe1_sock} {
+ txreq -nolen \
+ -hdr "Transfer-Encoding: chunked, gzip" \
+ -body "0\r\n\r\n"
+ rxresp
+ expect resp.status == 400
+} -run
+
+client c9 -connect ${h1_fe1_sock} {
+ txreq \
+ -hdr "Transfer-Encoding: gzip"
+ rxresp
+ expect resp.status == 400
+} -run
+
+client c10 -connect ${h1_fe1_sock} {
+ txreq -nolen \
+ -hdr "Transfer-Encoding: gzip, chunked" \
+ -body "0\r\n\r\n"
+ rxresp
+ expect resp.status == 422
+} -run
+
+client c11 -connect ${h1_fe2_sock} {
+ txreq
+ rxresp
+ expect resp.status == 502
+} -run -repeat 4
diff --git a/reg-tests/http-messaging/http_wait_for_body.vtc b/reg-tests/http-messaging/http_wait_for_body.vtc
new file mode 100644
index 0000000..a9f8191
--- /dev/null
+++ b/reg-tests/http-messaging/http_wait_for_body.vtc
@@ -0,0 +1,171 @@
+varnishtest "A test for the wait-for-body HTTP action"
+feature ignore_unknown_macro
+
+#REQUIRE_VERSION=2.4
+#REGTEST_TYPE=slow
+
+server s1 {
+ rxreq
+ expect req.bodylen == 1001
+ txresp
+
+ rxreq
+ expect req.bodylen == 1001
+ txresp
+} -start
+
+
+server s2 {
+ rxreq
+ send "HTTP/1.1 200 OK\r\n"
+ send "Content-Length: 1001\r\n\r\n"
+ delay 0.01
+ send "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789=====================================\n"
+ send "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789=====================================\n"
+ send "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789=====================================\n"
+ send "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789=====================================\n"
+ send "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789=====================================\n"
+ send "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789=====================================\n"
+ send "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789=====================================\n"
+ send "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789=====================================\n"
+ send "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789=====================================\n"
+ send "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789=====================================\n"
+
+ expect_close
+ accept
+
+ rxreq
+ send "HTTP/1.1 200 OK\r\n"
+ send "Content-Length: 1001\r\n\r\n"
+ delay 0.01
+ send "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789=====================================\n"
+ send "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789=====================================\n"
+ send "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789=====================================\n"
+ send "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789=====================================\n"
+ send "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789=====================================\n"
+ send "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789=====================================\n"
+ send "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789=====================================\n"
+ send "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789=====================================\n"
+ send "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789=====================================\n"
+ send "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789=====================================\n"
+ delay 0.01
+ send "1"
+
+ rxreq
+ send "HTTP/1.1 201 OK\r\n"
+ send "Content-Length: 1001\r\n\r\n"
+ delay 0.01
+ send "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789=====================================\n"
+ send "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789=====================================\n"
+ send "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789=====================================\n"
+ send "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789=====================================\n"
+ send "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789=====================================\n"
+ send "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789=====================================\n"
+ send "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789=====================================\n"
+ send "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789=====================================\n"
+ send "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789=====================================\n"
+ send "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789=====================================\n"
+ delay 0.1
+ send "1"
+} -start
+
+haproxy h1 -conf {
+ defaults
+ mode http
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ frontend fe1
+ bind "fd@${fe1}"
+ http-request wait-for-body time 100ms if { path /a }
+ http-request wait-for-body time 100ms at-least 1000 if { path /b }
+ use_backend be1
+
+ backend be1
+ server srv1 ${s1_addr}:${s1_port}
+
+ frontend fe2
+ bind "fd@${fe2}"
+ use_backend be2
+
+ backend be2
+ http-response wait-for-body time 100ms if { status eq 200 }
+ http-response wait-for-body time 100ms at-least 1000 if { status eq 201 }
+ server srv1 ${s2_addr}:${s2_port}
+} -start
+
+
+client c1 -connect ${h1_fe1_sock} {
+ send "GET /a HTTP/1.1\r\n"
+ send "Content-Length: 1001\r\n\r\n"
+ delay 0.01
+ send "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789=====================================\n"
+ send "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789=====================================\n"
+ send "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789=====================================\n"
+ send "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789=====================================\n"
+ send "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789=====================================\n"
+ send "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789=====================================\n"
+ send "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789=====================================\n"
+ send "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789=====================================\n"
+ send "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789=====================================\n"
+ send "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789=====================================\n"
+ rxresp
+ expect resp.status == 408
+} -run
+
+client c2 -connect ${h1_fe1_sock} {
+ send "GET /a HTTP/1.1\r\n"
+ send "Content-Length: 1001\r\n\r\n"
+ delay 0.01
+ send "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789=====================================\n"
+ send "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789=====================================\n"
+ send "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789=====================================\n"
+ send "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789=====================================\n"
+ send "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789=====================================\n"
+ send "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789=====================================\n"
+ send "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789=====================================\n"
+ send "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789=====================================\n"
+ send "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789=====================================\n"
+ send "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789=====================================\n"
+ delay 0.01
+ send "1"
+ rxresp
+ expect resp.status == 200
+
+ send "GET /b HTTP/1.1\r\n"
+ send "Content-Length: 1001\r\n\r\n"
+ delay 0.01
+ send "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789=====================================\n"
+ send "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789=====================================\n"
+ send "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789=====================================\n"
+ send "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789=====================================\n"
+ send "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789=====================================\n"
+ send "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789=====================================\n"
+ send "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789=====================================\n"
+ send "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789=====================================\n"
+ send "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789=====================================\n"
+ send "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789=====================================\n"
+ delay 0.1
+ send "1"
+ rxresp
+ expect resp.status == 200
+} -run
+
+client c3 -connect ${h1_fe2_sock} {
+ txreq
+ rxresp
+ expect resp.status == 504
+} -run
+
+client c4 -connect ${h1_fe2_sock} {
+ txreq
+ rxresp
+ expect resp.status == 200
+ expect resp.bodylen == 1001
+
+ txreq
+ rxresp
+ expect resp.status == 201
+ expect resp.bodylen == 1001
+} -run
diff --git a/reg-tests/http-messaging/protocol_upgrade.vtc b/reg-tests/http-messaging/protocol_upgrade.vtc
new file mode 100644
index 0000000..ebb6328
--- /dev/null
+++ b/reg-tests/http-messaging/protocol_upgrade.vtc
@@ -0,0 +1,228 @@
+# This reg-test checks the full support of HTTP protocol upgrade, using a GET
+# method and a Connection: Upgrade header. The equivalent mechanism has been
+# defined in rfc8441 for HTTP/2 using CONNECT and a new pseudo-header
+# :protocol. Check that haproxy handles properly h1/h2 translation of protocol
+# upgrade requests and responses.
+
+varnishtest "h1/h2 support for protocol upgrade test"
+
+feature ignore_unknown_macro
+
+#REQUIRE_VERSION=2.4
+
+# http/1.1 server
+server srv_h1 {
+ rxreq
+ expect req.method == "GET"
+ expect req.http.connection == "upgrade"
+ expect req.http.upgrade == "custom_protocol"
+
+ txresp \
+ -status 101 \
+ -hdr "connection: upgrade" \
+ -hdr "upgrade: custom_protocol"
+} -repeat 2 -start
+
+# http2 server
+server srv_h2 {
+ rxpri
+
+ stream 0 {
+ # manually send RFC8441 SETTINGS_ENABLE_CONNECT_PROTOCOL
+ sendhex "00 00 06 04 00 00 00 00 00 00 08 00 00 00 01"
+ rxsettings
+ txsettings -ack
+ rxsettings
+ expect settings.ack == true
+ } -run
+
+ stream 1 {
+ rxhdrs
+ expect req.method == "CONNECT"
+ expect req.http.:scheme == "https"
+ expect req.http.:path == "/"
+ expect req.http.:authority == "127.0.0.1"
+ expect req.http.:protocol == "custom_protocol"
+
+ txresp \
+ -status 200
+ } -run
+} -repeat 2 -start
+
+# http2 server without support for RFC8441
+server srv_h2_no_ws {
+ rxpri
+
+ stream 0 {
+ txsettings
+ rxsettings
+ txsettings -ack
+ rxsettings
+ expect settings.ack == true
+ } -run
+
+ stream 1 {
+ rxrst
+ } -run
+} -start
+
+# http2 server without support for RFC8441 : settings announced with value 0
+server srv_h2_no_ws2 {
+ rxpri
+
+ stream 0 {
+ # manually send RFC8441 SETTINGS_ENABLE_CONNECT_PROTOCOL with a value of 0
+ sendhex "00 00 06 04 00 00 00 00 00 00 08 00 00 00 00"
+ txsettings
+ rxsettings
+ txsettings -ack
+ rxsettings
+ expect settings.ack == true
+ } -run
+
+ stream 1 {
+ rxrst
+ } -run
+} -start
+
+haproxy hap -conf {
+ defaults
+ mode http
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ # h1 frontend connected to h2 frontend
+ listen frt_h1_h2
+ bind "fd@${frt_h1_h2}"
+ server feh2_srv ${hap_frt_h2_addr}:${hap_frt_h2_port} proto h2
+
+ # h2 frontend connected to srv_h1
+ listen frt_h2
+ bind "fd@${frt_h2}" proto h2
+ server srv_h1 ${srv_h1_addr}:${srv_h1_port}
+
+ # h1 frontend connected to srv_h2
+ listen frt_h1
+ bind "fd@${frt_h1}"
+ server srv_h2 ${srv_h2_addr}:${srv_h2_port} proto h2
+
+ # h1 frontend connected to srv_h2_no_ws
+ listen frt_h1_no_ws
+ bind "fd@${frt_h1_no_ws}"
+ server srv_h2_no_ws ${srv_h2_no_ws_addr}:${srv_h2_no_ws_port} proto h2
+
+ # h1 frontend connected to srv_h2_no_ws2
+ listen frt_h1_no_ws2
+ bind "fd@${frt_h1_no_ws2}"
+ server srv_h2_no_ws2 ${srv_h2_no_ws2_addr}:${srv_h2_no_ws2_port} proto h2
+
+ # h2 frontend connected to h1 frontend
+ listen frt_h2_h1
+ bind "fd@${frt_h2_h1}" proto h2
+ server frt_h1 ${hap_frt_h1_addr}:${hap_frt_h1_port}
+} -start
+
+## connect to h1 translation frontend
+client c1_h1_h2 -connect ${hap_frt_h1_h2_sock} {
+ txreq \
+ -req "GET" \
+ -url "/" \
+ -hdr "host: 127.0.0.1" \
+ -hdr "connection: upgrade" \
+ -hdr "upgrade: custom_protocol"
+
+ rxresp
+ expect resp.status == 101
+ expect resp.http.connection == "upgrade"
+ expect resp.http.upgrade == "custom_protocol"
+} -run
+
+# connect to h2 server frontend
+client c2_h2 -connect ${hap_frt_h2_sock} {
+ txpri
+ stream 0 {
+ txsettings
+ rxsettings
+ txsettings -ack
+ rxsettings
+ expect settings.ack == true
+ } -run
+
+ stream 1 {
+ txreq \
+ -req "CONNECT" \
+ -scheme "http" \
+ -url "/" \
+ -hdr ":authority" "127.0.0.1" \
+ -hdr ":protocol" "custom_protocol"
+
+ rxhdrs
+ expect resp.status == 200
+ } -run
+} -run
+
+# connect to h2 translation frontend
+client c3_h2_h1 -connect ${hap_frt_h2_h1_sock} {
+ txpri
+ stream 0 {
+ txsettings
+ rxsettings
+ txsettings -ack
+ rxsettings
+ expect settings.ack == true
+ } -run
+
+ stream 1 {
+ txreq \
+ -req "CONNECT" \
+ -scheme "http" \
+ -url "/" \
+ -hdr ":authority" "127.0.0.1" \
+ -hdr ":protocol" "custom_protocol"
+
+ rxhdrs
+ expect resp.status == 200
+ } -run
+} -run
+
+# connect to h1 server frontend
+client c4_h1 -connect ${hap_frt_h1_sock} {
+ txreq \
+ -req "GET" \
+ -url "/" \
+ -hdr "host: 127.0.0.1" \
+ -hdr "connection: upgrade" \
+ -hdr "upgrade: custom_protocol"
+
+ rxresp
+ expect resp.status == 101
+ expect resp.http.connection == "upgrade"
+ expect resp.http.upgrade == "custom_protocol"
+} -run
+
+# connect via h1 server frontend to h2 server without RFC8441 support
+client c5 -connect ${hap_frt_h1_no_ws_sock} {
+ txreq \
+ -req "GET" \
+ -url "/" \
+ -hdr "host: 127.0.0.1" \
+ -hdr "connection: upgrade" \
+ -hdr "upgrade: custom_protocol"
+
+ rxresp
+ expect resp.status == 502
+} -run
+
+# connect via h1 server frontend to h2 server without RFC8441 support
+client c6 -connect ${hap_frt_h1_no_ws2_sock} {
+ txreq \
+ -req "GET" \
+ -url "/" \
+ -hdr "host: 127.0.0.1" \
+ -hdr "connection: upgrade" \
+ -hdr "upgrade: custom_protocol"
+
+ rxresp
+ expect resp.status == 502
+} -run
diff --git a/reg-tests/http-messaging/scheme_based_normalize.vtc b/reg-tests/http-messaging/scheme_based_normalize.vtc
new file mode 100644
index 0000000..3edbafb
--- /dev/null
+++ b/reg-tests/http-messaging/scheme_based_normalize.vtc
@@ -0,0 +1,125 @@
+varnishtest "scheme based normalization (rfc3982 6.3.2)"
+
+feature cmd "$HAPROXY_PROGRAM -cc 'version_atleast(2.5-dev0)'"
+feature ignore_unknown_macro
+
+syslog S1 -level info {
+ recv
+ expect ~ "^.* uri: GET http://hostname/ HTTP/2.0; host: {hostname}$"
+
+ recv
+ expect ~ "^.* uri: GET http://hostname:8080/ HTTP/2.0; host: {hostname:8080}$"
+
+ recv
+ expect ~ "^.* uri: GET https://hostname/ HTTP/2.0; host: {hostname}$"
+
+ recv
+ expect ~ "^.* uri: GET https://hostname:80/ HTTP/2.0; host: {hostname:80}$"
+} -start
+
+haproxy h1 -conf {
+ defaults
+ mode http
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ frontend fe
+ bind "fd@${fe}" proto h2
+
+ http-request capture req.hdr(host) len 512
+ log-format "uri: %r; host: %hr"
+ log ${S1_addr}:${S1_port} len 2048 local0 debug err
+
+ http-request return status 200
+} -start
+
+# default port 80 with http scheme => should be normalized
+client c1 -connect ${h1_fe_sock} {
+ txpri
+ stream 0 {
+ txsettings
+ rxsettings
+ txsettings -ack
+ rxsettings
+ expect settings.ack == true
+ } -run
+
+ stream 1 {
+ txreq \
+ -req "GET" \
+ -scheme "http" \
+ -url "/" \
+ -hdr ":authority" "hostname:80"
+ rxhdrs
+ expect resp.status == 200
+ } -run
+} -run
+
+# port 8080 with http scheme => no normalization
+client c2 -connect ${h1_fe_sock} {
+ txpri
+ stream 0 {
+ txsettings
+ rxsettings
+ txsettings -ack
+ rxsettings
+ expect settings.ack == true
+ } -run
+
+ stream 1 {
+ txreq \
+ -req "GET" \
+ -scheme "http" \
+ -url "/" \
+ -hdr ":authority" "hostname:8080"
+ rxhdrs
+ expect resp.status == 200
+ } -run
+} -run
+
+# default port 443 with https scheme => should be normalized
+client c3 -connect ${h1_fe_sock} {
+ txpri
+ stream 0 {
+ txsettings
+ rxsettings
+ txsettings -ack
+ rxsettings
+ expect settings.ack == true
+ } -run
+
+ stream 1 {
+ txreq \
+ -req "GET" \
+ -scheme "https" \
+ -url "/" \
+ -hdr ":authority" "hostname:443"
+ rxhdrs
+ expect resp.status == 200
+ } -run
+} -run
+
+# port 80 with https scheme => no normalization
+client c4 -connect ${h1_fe_sock} {
+ txpri
+ stream 0 {
+ txsettings
+ rxsettings
+ txsettings -ack
+ rxsettings
+ expect settings.ack == true
+ } -run
+
+ stream 1 {
+ txreq \
+ -req "GET" \
+ -scheme "https" \
+ -url "/" \
+ -hdr ":authority" "hostname:80"
+ rxhdrs
+ expect resp.status == 200
+ } -run
+} -run
+
+syslog S1 -wait
diff --git a/reg-tests/http-messaging/srv_ws.vtc b/reg-tests/http-messaging/srv_ws.vtc
new file mode 100644
index 0000000..f0f5f8b
--- /dev/null
+++ b/reg-tests/http-messaging/srv_ws.vtc
@@ -0,0 +1,180 @@
+# This reg-test checks websocket support in regards with the server keyword
+# 'ws'
+
+varnishtest "h2 backend websocket management via server keyword"
+
+feature cmd "$HAPROXY_PROGRAM -cc 'version_atleast(2.5-dev0)'"
+feature cmd "$HAPROXY_PROGRAM -cc 'feature(OPENSSL)' && !ssllib_name_startswith(wolfSSL)'"
+feature ignore_unknown_macro
+
+# haproxy server
+haproxy hapsrv -conf {
+ defaults
+ mode http
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ frontend fe
+ bind "fd@${fe}"
+ bind "fd@${fessl}" ssl crt ${testdir}/common.pem alpn h2,http/1.1
+ capture request header sec-websocket-key len 128
+ http-request set-var(txn.ver) req.ver
+ use_backend be
+
+ backend be
+ # define websocket ACL
+ acl ws_handshake hdr(upgrade) -m str websocket
+
+ # handle non-ws streams
+ http-request return status 200 if !ws_handshake
+
+ # handle ws streams
+ #capture request header sec-websocket-key len 128
+ http-request return status 200 hdr connection upgrade hdr upgrade websocket hdr sec-websocket-accept "%[capture.req.hdr(0),concat(258EAFA5-E914-47DA-95CA-C5AB0DC85B11,,),sha1,base64]" if ws_handshake
+ http-after-response set-status 101 if { status eq 200 } { res.hdr(upgrade) -m str websocket }
+ http-after-response set-header x-backend-protocol "%[var(txn.ver)]"
+} -start
+
+# haproxy LB
+haproxy hap -conf {
+ defaults
+ mode http
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ # proto X ws h1 -> websocket on h1
+ listen li
+ bind "fd@${li}"
+ server hap_srv ${hapsrv_fe_addr}:${hapsrv_fe_port} proto h2 ws h1
+
+ # proto X ws h2 -> websocket on h2
+ listen lih2
+ bind "fd@${lih2}"
+ server hap_srv ${hapsrv_fe_addr}:${hapsrv_fe_port} proto h2 ws h2
+
+ # alpn h2,http/1.1 ws h2 -> websocket on h2
+ listen lisslh2
+ bind "fd@${lisslh2}"
+ server hap_srv ${hapsrv_fessl_addr}:${hapsrv_fessl_port} ssl verify none alpn h2,http/1.1 ws h2
+ http-response set-header x-alpn "%[ssl_bc_alpn]"
+
+ # ws auto -> websocket on h1
+ listen liauto
+ bind "fd@${liauto}"
+ server hap_srv ${hapsrv_fe_addr}:${hapsrv_fe_port}
+
+ # alpn h2,http/1.1 ws auto -> websocket on h1
+ listen lissl
+ bind "fd@${lissl}"
+ server hap_srv ${hapsrv_fessl_addr}:${hapsrv_fessl_port} ssl verify none alpn h2,http/1.1 ws auto
+ http-response set-header x-alpn "%[ssl_bc_alpn]"
+ # alpn h2,http/1.1 ws auto -> websocket on h1
+ listen lisslauto
+ bind "fd@${lisslauto}"
+ server hap_srv ${hapsrv_fessl_addr}:${hapsrv_fessl_port} ssl verify none alpn h2,http/1.1
+ http-response set-header x-alpn "%[ssl_bc_alpn]"
+
+ # proto h2 ws auto -> websocket on h2
+ listen liauto2
+ bind "fd@${liauto2}"
+ server hap_srv ${hapsrv_fe_addr}:${hapsrv_fe_port} proto h2
+
+ # alpn h2 ws auto -> websocket on h2
+ listen lisslauto2
+ bind "fd@${lisslauto2}"
+ server hap_srv ${hapsrv_fessl_addr}:${hapsrv_fessl_port} ssl verify none alpn h2 ws auto
+ http-response set-header x-alpn "%[ssl_bc_alpn]"
+} -start
+
+client c1 -connect ${hap_li_sock} {
+ txreq \
+ -req "GET" \
+ -url "/" \
+ -hdr "host: 127.0.0.1" \
+ -hdr "connection: upgrade" \
+ -hdr "upgrade: websocket" \
+ -hdr "sec-websocket-key: dGhlIHNhbXBsZSBub25jZQ=="
+ rxresp
+ expect resp.status == 101
+ expect resp.http.x-backend-protocol == "1.1"
+} -run
+
+client c1.2 -connect ${hap_lih2_sock} {
+ txreq \
+ -req "GET" \
+ -url "/" \
+ -hdr "host: 127.0.0.1" \
+ -hdr "connection: upgrade" \
+ -hdr "upgrade: websocket" \
+ -hdr "sec-websocket-key: dGhlIHNhbXBsZSBub25jZQ=="
+ rxresp
+ expect resp.status == 101
+ expect resp.http.x-backend-protocol == "2.0"
+} -run
+
+client c1.3 -connect ${hap_liauto_sock} {
+ txreq \
+ -req "GET" \
+ -url "/" \
+ -hdr "host: 127.0.0.1" \
+ -hdr "connection: upgrade" \
+ -hdr "upgrade: websocket" \
+ -hdr "sec-websocket-key: dGhlIHNhbXBsZSBub25jZQ=="
+ rxresp
+ expect resp.status == 101
+ expect resp.http.x-backend-protocol == "1.1"
+} -run
+
+client c1.4 -connect ${hap_liauto2_sock} {
+ txreq \
+ -req "GET" \
+ -url "/" \
+ -hdr "host: 127.0.0.1" \
+ -hdr "connection: upgrade" \
+ -hdr "upgrade: websocket" \
+ -hdr "sec-websocket-key: dGhlIHNhbXBsZSBub25jZQ=="
+ rxresp
+ expect resp.status == 101
+ expect resp.http.x-backend-protocol == "2.0"
+} -run
+
+client c2 -connect ${hap_lisslauto_sock} {
+ txreq \
+ -req "GET" \
+ -url "/" \
+ -hdr "host: 127.0.0.1" \
+ -hdr "connection: upgrade" \
+ -hdr "upgrade: websocket" \
+ -hdr "sec-websocket-key: dGhlIHNhbXBsZSBub25jZQ=="
+ rxresp
+ expect resp.status == 101
+ expect resp.http.x-alpn == "http/1.1"
+} -run
+
+client c2.2 -connect ${hap_lisslauto2_sock} {
+ txreq \
+ -req "GET" \
+ -url "/" \
+ -hdr "host: 127.0.0.1" \
+ -hdr "connection: upgrade" \
+ -hdr "upgrade: websocket" \
+ -hdr "sec-websocket-key: dGhlIHNhbXBsZSBub25jZQ=="
+ rxresp
+ expect resp.status == 101
+ expect resp.http.x-alpn == "h2"
+} -run
+
+client c2.3 -connect ${hap_lisslh2_sock} {
+ txreq \
+ -req "GET" \
+ -url "/" \
+ -hdr "host: 127.0.0.1" \
+ -hdr "connection: upgrade" \
+ -hdr "upgrade: websocket" \
+ -hdr "sec-websocket-key: dGhlIHNhbXBsZSBub25jZQ=="
+ rxresp
+ expect resp.status == 101
+ expect resp.http.x-alpn == "h2"
+} -run
diff --git a/reg-tests/http-messaging/truncated.vtc b/reg-tests/http-messaging/truncated.vtc
new file mode 100644
index 0000000..7579f6d
--- /dev/null
+++ b/reg-tests/http-messaging/truncated.vtc
@@ -0,0 +1,101 @@
+varnishtest "HTTP response size tests: H2->H1 (HTX and legacy mode)"
+#REQUIRE_VERSION=1.9
+
+feature ignore_unknown_macro
+
+server s1 {
+ rxreq
+ txresp \
+ -status 200 \
+ -nolen \
+ -hdr "Transfer-encoding: chunked"
+ # -bodylen 16300
+ #chunkedlen 16300
+ #delay 0.05
+ chunkedlen 815
+ #delay 0.05
+ chunkedlen 815
+ delay 0.05
+ chunkedlen 815
+ #delay 0.05
+ chunkedlen 815
+ delay 0.05
+
+ chunkedlen 815
+ #delay 0.05
+ chunkedlen 815
+ delay 0.05
+ chunkedlen 815
+ #delay 0.05
+ chunkedlen 815
+ delay 0.05
+
+ chunkedlen 815
+ #delay 0.05
+ chunkedlen 815
+ delay 0.05
+ chunkedlen 815
+ #delay 0.05
+ chunkedlen 815
+ delay 0.05
+
+ chunkedlen 815
+ #delay 0.05
+ chunkedlen 815
+ delay 0.05
+ chunkedlen 815
+ #delay 0.05
+ chunkedlen 815
+ delay 0.05
+
+ chunkedlen 815
+ #delay 0.05
+ chunkedlen 815
+ delay 0.05
+ chunkedlen 815
+ #delay 0.05
+ chunkedlen 815
+ delay 0.05
+} -repeat 2 -start
+
+haproxy h1 -conf {
+ defaults
+ #log stdout format raw daemon
+ mode http
+ option http-buffer-request
+ timeout connect 1s
+ timeout client 1s
+ timeout server 1s
+
+ listen feh1
+ bind "fd@${feh1}"
+ bind "fd@${feh2}" proto h2
+ http-response add-header a b
+ #http-response del-header content-length
+ server s1 ${s1_addr}:${s1_port}
+} -start
+
+client c1h2 -connect ${h1_feh2_sock} {
+ txpri
+ stream 0 {
+ txsettings
+ rxsettings
+ txsettings -ack
+ rxsettings
+ expect settings.ack == true
+ } -run
+
+ # first request is valid
+ stream 1 {
+ txreq \
+ -req "GET" \
+ -scheme "https" \
+ -url "/test1.html"
+ rxhdrs
+ #delay 0.1
+ expect resp.status == 200
+ rxdata -all
+ expect resp.bodylen == 16300
+ #expect resp.chunkedlen == 16300
+ } -run
+} -repeat 2 -run
diff --git a/reg-tests/http-messaging/websocket.vtc b/reg-tests/http-messaging/websocket.vtc
new file mode 100644
index 0000000..aed55fe
--- /dev/null
+++ b/reg-tests/http-messaging/websocket.vtc
@@ -0,0 +1,211 @@
+# This reg-test is uses to test respect of the websocket protocol according to
+# rfc6455.
+#
+# In particular, a request/response without a websocket key must be rejected by
+# haproxy. Note that in the tested case (h1 on both sides), haproxy does not
+# validate the key of the server but only checks its presence.
+#
+# For the case h2 client/h1 server, haproxy would add the key and validates it.
+# However, there is no way to check this case quickly at the moment using vtest.
+
+varnishtest "WebSocket test"
+
+feature ignore_unknown_macro
+
+#REQUIRE_VERSION=2.4
+
+# valid websocket server
+server s1 {
+ rxreq
+ expect req.method == "GET"
+ expect req.http.connection == "upgrade"
+ expect req.http.upgrade == "websocket"
+ expect req.http.sec-websocket-key == "dGhlIHNhbXBsZSBub25jZQ=="
+
+ txresp \
+ -status 101 \
+ -hdr "connection: upgrade" \
+ -hdr "upgrade: websocket" \
+ -hdr "sec-websocket-accept: s3pPLMBiTxaQ9kYGzzhZRbK+xOo="
+
+ recv 4
+ send "PONG"
+} -start
+
+# non-conformant server: no websocket key
+server s2 {
+ rxreq
+ expect req.method == "GET"
+ expect req.http.connection == "upgrade"
+ expect req.http.upgrade == "websocket"
+
+ txresp \
+ -status 101 \
+ -hdr "connection: upgrade" \
+ -hdr "upgrade: websocket"
+} -start
+
+# haproxy instance used as a server
+# generate a http/1.1 websocket response with the valid key
+haproxy hap_srv -conf {
+ defaults
+ mode http
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ listen fe1
+ bind "fd@${fe1}"
+
+ # reject if the request does not contains a websocket key
+ acl ws_handshake hdr(sec-websocket-key) -m found
+ http-request reject unless ws_handshake
+
+ # return a valid websocket handshake response
+ capture request header sec-websocket-key len 128
+ http-request return status 200 hdr connection upgrade hdr upgrade websocket hdr sec-websocket-accept "%[capture.req.hdr(0),concat(258EAFA5-E914-47DA-95CA-C5AB0DC85B11,,),sha1,base64]"
+ http-after-response set-status 101 if { status eq 200 }
+} -start
+
+# haproxy instance used as a server
+# generate a http/1.1 websocket response with an invalid key
+haproxy hap_srv_bad_key -conf {
+ defaults
+ mode http
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ listen fe1
+ bind "fd@${fe1}"
+
+ # reject if the request does not contains a websocket key
+ acl ws_handshake hdr(sec-websocket-key) -m found
+ http-request reject unless ws_handshake
+
+ # return an invalid websocket handshake response
+ capture request header sec-websocket-key len 128
+ http-request return status 200 hdr connection upgrade hdr upgrade websocket hdr sec-websocket-accept "invalid_key"
+ http-after-response set-status 101 if { status eq 200 }
+} -start
+
+haproxy hap -conf {
+ defaults
+ mode http
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ listen fe1
+ bind "fd@${fe1}"
+ server s1 ${s1_addr}:${s1_port}
+
+ listen fe2
+ bind "fd@${fe2}"
+ server s2 ${s2_addr}:${s2_port}
+
+ listen fe3
+ bind "fd@${fe3}" proto h2
+ server hap_srv ${hap_srv_fe1_addr}:${hap_srv_fe1_port}
+
+ listen fe4
+ bind "fd@${fe4}" proto h2
+ server hap_srv_bad_key ${hap_srv_bad_key_fe1_addr}:${hap_srv_bad_key_fe1_port}
+} -start
+
+# standard request
+client c1 -connect ${hap_fe1_sock} {
+ txreq \
+ -req "GET" \
+ -url "/" \
+ -hdr "host: 127.0.0.1" \
+ -hdr "connection: upgrade" \
+ -hdr "upgrade: websocket" \
+ -hdr "sec-websocket-key: dGhlIHNhbXBsZSBub25jZQ=="
+ rxresp
+ expect resp.status == 101
+ expect resp.http.connection == "upgrade"
+ expect resp.http.upgrade == "websocket"
+ expect resp.http.sec-websocket-accept == "s3pPLMBiTxaQ9kYGzzhZRbK+xOo="
+
+ send "PING"
+ recv 4
+} -run
+
+# missing websocket key
+client c2 -connect ${hap_fe1_sock} {
+ txreq \
+ -req "GET" \
+ -url "/" \
+ -hdr "host: 127.0.0.1" \
+ -hdr "connection: upgrade" \
+ -hdr "upgrade: websocket"
+
+ rxresp
+ expect resp.status == 400
+} -run
+
+# missing key on server side
+client c3 -connect ${hap_fe2_sock} {
+ txreq \
+ -req "GET" \
+ -url "/" \
+ -hdr "host: 127.0.0.1" \
+ -hdr "connection: upgrade" \
+ -hdr "upgrade: websocket" \
+ -hdr "sec-websocket-key: dGhlIHNhbXBsZSBub25jZQ=="
+
+ rxresp
+ expect resp.status == 502
+} -run
+
+# connect with http/2 on a http/1.1 websocket server
+# the key must be provided by haproxy
+client c4 -connect ${hap_fe3_sock} {
+ txpri
+ stream 0 {
+ txsettings
+ rxsettings
+ txsettings -ack
+ rxsettings
+ expect settings.ack == true
+ } -run
+
+ stream 1 {
+ txreq \
+ -req "CONNECT" \
+ -scheme "http" \
+ -url "/" \
+ -hdr ":authority" "127.0.0.1" \
+ -hdr ":protocol" "websocket"
+
+ rxhdrs
+ expect resp.status == 200
+ } -run
+} -run
+
+# connect with http/2 on a http/1.1 websocket server
+# however, the server will respond with an invalid key
+# haproxy is responsible to reject the request and returning a 502 to the client
+client c5 -connect ${hap_fe4_sock} {
+ txpri
+ stream 0 {
+ txsettings
+ rxsettings
+ txsettings -ack
+ rxsettings
+ expect settings.ack == true
+ } -run
+
+ stream 1 {
+ txreq \
+ -req "CONNECT" \
+ -scheme "http" \
+ -url "/" \
+ -hdr ":authority" "127.0.0.1" \
+ -hdr ":protocol" "websocket"
+
+ rxhdrs
+ expect resp.status == 502
+ } -run
+} -run
diff --git a/reg-tests/http-rules/1k.txt b/reg-tests/http-rules/1k.txt
new file mode 100644
index 0000000..bbdd7e7
--- /dev/null
+++ b/reg-tests/http-rules/1k.txt
@@ -0,0 +1,16 @@
+s313l7hzIJ5FIXCmpr+zAyFK80lNfjOIRNZSXJJn/GQVNQsBqDl3AFcb7JQt1ler
+KgBNE1LAiU02vTj4+hlW2qi4Xg1T3lshEOSSxJQN/ITQG/1KVmDiTtsjjSv8iWUj
+T403xvLKQd0FB2h00N9pwd9ApbGPYF8uG1kjnNQmzJOqQ2Pz7jUkNpF+sAAQHaRD
+ocjEucTsb676w8l9EqWNE+DK5IqoO2AK47bHbr4u38ZOwXjQWGw9MiUJZmVQEqdC
+QZlmpFuSKQiig1SZFZlmKVidf1genz6q+4BT80IFU2UE+pWiay/HcZttwM++eG7w
+n/Va7yd3D+ryK2j4rw0sOYM7Cu7AwleZeGEaCZINZmnVAWtg2OVFOTxx6jz8wNuY
+VJPb3VFD72WnnBhtbik5mEqjzVJy530sQBlGlcxi3Tivq69ZnAk55RBN0LO+jWf4
+DI4189LTIfY5WroA8AQeCCQYnzyXo5O/vDmic+uwKALlQ6TXzSuCNpHO8fL1UwHH
+7KBqxHi+/yYJ0431V/LAvRBpVFPYJ8iED7Md67GRVQWy8o+tgC1PmycJtS5ADQGO
+Jys46KjhL9cnaS3aP1YcuuGuSUOVMA7BjqPcz7r+hqYTCZ3akaY4w7AGRCZyRf8e
+finlAkgFpzKSFwaa2M6H3vUE14WzHC0hJ/bO2epjlcOeoMcgBVn5uUMYMVroAK0+
+vI9jg1RDV17oHberVmWj8VAXolDNS0pW2rt+JbqHsAVDDk/Ex3NJWFSYByHFyglQ
+cjYMwrzIsWC09ykW6WhUN6IsopLlDk7O2jcKaHxZ6WaFiVxIGFkepfNhf4wYZ6O9
+tjwMuOqTQtSjdP3MbbVEM6VpAkJW/Si1jmSW02ObMcdEpxJHTB9fC30waMM7e+T4
+FT/AlwB49un/3yYU2iUndW+wenoED9UkdbZ7uvjyu+UgQ3bMaQhX9Z9eHxhfi6Gy
+aRM2IJVeEpk5w0ifAQxrL4Wp+dFbzfGN1yPkI2UAo6WbWi63D \ No newline at end of file
diff --git a/reg-tests/http-rules/acl_cli_spaces.vtc b/reg-tests/http-rules/acl_cli_spaces.vtc
new file mode 100644
index 0000000..a554977
--- /dev/null
+++ b/reg-tests/http-rules/acl_cli_spaces.vtc
@@ -0,0 +1,77 @@
+varnishtest "haproxy ACL, CLI and mCLI spaces"
+feature ignore_unknown_macro
+
+server s1 {
+ rxreq
+ expect req.method == "GET"
+ txresp
+} -repeat 2 -start
+
+haproxy h1 -W -S -conf {
+ defaults
+ mode http
+ log global
+ option httplog
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ frontend fe1
+ bind "fd@${fe1}"
+
+ http-request deny if { req.hdr(user-agent) -i -m str -f ${testdir}/agents.acl }
+
+ default_backend be1
+
+ backend be1
+ server s1 ${s1_addr}:${s1_port}
+
+} -start
+
+client c1 -connect ${h1_fe1_sock} {
+ txreq -hdr "User-Agent: Mon User Agent"
+ rxresp
+ expect resp.status == 200
+} -run
+
+haproxy h1 -cli {
+ send "add acl ${testdir}/agents.acl Mon\\ User\\ Agent\\;"
+ expect ~ .*
+
+ send "show acl ${testdir}/agents.acl"
+ expect ~ ".*Mon User Agent.*"
+}
+
+client c1 -connect ${h1_fe1_sock} {
+ txreq -hdr "User-Agent: Mon User Agent;"
+ rxresp
+ expect resp.status == 403
+} -run
+
+
+haproxy h1 -cli {
+ send "del acl ${testdir}/agents.acl Mon\\ User\\ Agent\\;"
+ expect ~ .*
+
+ send "show acl ${testdir}/agents.acl"
+ expect ~ .*
+}
+
+client c1 -connect ${h1_fe1_sock} {
+ txreq -hdr "User-Agent: Mon User Agent;"
+ rxresp
+ expect resp.status == 200
+} -run
+
+
+# Try it with the master CLI
+haproxy h1 -mcli {
+ send "@1 add acl ${testdir}/agents.acl Mon\\ User\\ Agent\\;;@1 show acl ${testdir}/agents.acl"
+ expect ~ ".*Mon User Agent;.*"
+}
+
+client c1 -connect ${h1_fe1_sock} {
+ txreq -hdr "User-Agent: Mon User Agent;"
+ rxresp
+ expect resp.status == 403
+} -run
diff --git a/reg-tests/http-rules/agents.acl b/reg-tests/http-rules/agents.acl
new file mode 100644
index 0000000..345e6ae
--- /dev/null
+++ b/reg-tests/http-rules/agents.acl
@@ -0,0 +1 @@
+Test
diff --git a/reg-tests/http-rules/converters_ipmask_concat_strcmp_field_word.map b/reg-tests/http-rules/converters_ipmask_concat_strcmp_field_word.map
new file mode 100644
index 0000000..9a3e8e6
--- /dev/null
+++ b/reg-tests/http-rules/converters_ipmask_concat_strcmp_field_word.map
@@ -0,0 +1 @@
+^(.+)_(.+)$ \2_\1
diff --git a/reg-tests/http-rules/converters_ipmask_concat_strcmp_field_word.vtc b/reg-tests/http-rules/converters_ipmask_concat_strcmp_field_word.vtc
new file mode 100644
index 0000000..55bcb5f
--- /dev/null
+++ b/reg-tests/http-rules/converters_ipmask_concat_strcmp_field_word.vtc
@@ -0,0 +1,231 @@
+varnishtest "Minimal tests for 1.9 converters: ipmask,concat,strcmp,field,word"
+feature ignore_unknown_macro
+
+# ipmask,hdr_ip tests server
+server s1 {
+ rxreq
+ expect req.method == "GET"
+ expect req.http.srciphdr == "192.168.1.101"
+ expect req.http.srcmask1 == "192.168.1.0"
+ expect req.http.srcmask2 == "192.168.0.0"
+ expect req.http.srcmask3 == "192.0.0.0"
+
+ expect req.http.test1mask128 ~ "2001:db8:[0:]*:1"
+ expect req.http.test2mask64 ~ "2001:db8:[0:]+"
+ expect req.http.test2mask128 ~ "2001:db8:[0:]*:bad:c0f:ffff"
+ expect req.http.test2mask120 ~ "2001:db8:[0:]*:bad:c0f:ff00"
+ expect req.http.test2maskff00 ~ "2001:db8:[0:]*:bad:c0f:ff00"
+ expect req.http.test2maskfee0 ~ "2001:db8:[0:]*:bad:c0f:fee0"
+
+ expect req.http.test3mask64 ~ "2001:db8:c001:c01a:[0:]+"
+ expect req.http.test3mask64v2 ~ "2001:db8:c001:c01a:[0:]+"
+ expect req.http.test3mask64v3 ~ "2001:db8:c001:c01a:[0:]+"
+ expect req.http.test3maskff ~ "2001:db8:c001:c01a:[0:]*:ffff:10:[0:]+"
+ expect req.http.test3maskv2 ~ "2001:db8:c001:c01a:c001:c001:[0:]+"
+
+ expect req.http.test4mask32 == "192.168.1.101"
+
+ expect req.http.test5mask24 == "192.168.1.0"
+ expect req.http.test6mask24 == "192.168.1.0"
+ expect req.http.test6mask25 == "192.168.1.128"
+
+ expect req.http.ipv4plain == "192.168.2.1"
+ expect req.http.ipv4port == "192.168.2.1"
+ expect req.http.ipv6plain == "2001:db8:c001:c01a:ffff:ffff:20:ffff"
+ expect req.http.ipv6brackets == "2001:db8:c001:c01a:ffff:ffff:20:ffff"
+
+ txresp
+} -start
+
+# concat,strcmp,field,word tests server
+server s2 {
+ rxreq
+ expect req.method == "GET"
+ expect req.http.fieldconcat == "f1_f2_f3__f5"
+ expect req.http.fieldconcat2 == "f1_f2_f3__f5"
+ expect req.http.fieldconcat3 == "f1_f2_f3__f5"
+ expect req.http.fieldstrcmp == "0"
+
+ # field tests
+ expect req.http.fieldtest1 == "f5"
+ expect req.http.fieldtest2 == "f2_f3__f5"
+ expect req.http.fieldtest3 == "f2_f3"
+ expect req.http.fieldtest4 == "f2_f3_"
+ expect req.http.fieldtest5 == "f1_f2_f3"
+ expect req.http.okfieldtest == "ok"
+ expect req.http.qsfieldtest == "IT_IS"
+ expect req.http.qsfieldconcat == "IT_IS_ok"
+ expect req.http.fieldtest1strcmp == "0"
+
+ # word tests
+ expect req.http.wordtest1 == "f5"
+ expect req.http.wordtest2 == "f2_f3__f5"
+ expect req.http.wordtest3 == "f3__f5"
+ expect req.http.wordtest4 == "f1_f2_f3"
+ expect req.http.wordtest5 == "f1_f2"
+ expect req.http.okwordtest == "OK"
+ expect req.http.qswordtest == "Yes_It"
+ expect req.http.qswordregmtest == "It_Yes"
+ expect req.http.wordtest1strcmp == "0"
+ txresp
+} -start
+
+
+# ipmask,hdr_ip tests with accept-proxy bind
+haproxy h1 -conf {
+ defaults
+ mode http
+ log global
+ option httplog
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ frontend fe1
+ # accept-proxy so test client can send src ip
+ bind "fd@${fe1}" accept-proxy
+
+ # ipmask tests w/src
+ http-request set-header Srciphdr %[src]
+ http-request set-header Srcmask1 %[src,ipmask(24)] # 192.168.1.0
+ http-request set-header Srcmask2 %[src,ipmask(16)] # 192.168.0.0
+ http-request set-header Srcmask3 %[src,ipmask(8)] # 192.0.0.0
+
+ # ipmask tests from headers
+ http-request set-header Test1mask128 %[req.hdr_ip(Addr1),ipmask(24,128)]
+
+ http-request set-header Test2mask64 %[req.hdr_ip(Addr2),ipmask(24,64)]
+ http-request set-header Test2mask128 %[req.hdr_ip(Addr2),ipmask(24,128)]
+ http-request set-header Test2mask120 %[req.hdr_ip(Addr2),ipmask(24,120)]
+ http-request set-header Test2maskff00 %[req.hdr_ip(Addr2),ipmask(24,ffff:ffff:ffff:ffff:ffff:ffff:ffff:ff00)]
+ http-request set-header Test2maskfee0 %[req.hdr_ip(Addr2),ipmask(24,ffff:ffff:ffff:ffff:ffff:ffff:ffff:fee0)]
+
+ http-request set-header Test3mask64 %[req.hdr_ip(Addr3),ipmask(24,64)]
+ http-request set-header Test3mask64v2 %[req.hdr_ip(Addr3),ipmask(24,ffff:ffff:ffff:ffff:0:0:0:0)]
+ http-request set-header Test3mask64v3 %[req.hdr_ip(Addr3),ipmask(24,ffff:ffff:ffff:ffff::)]
+ http-request set-header Test3maskff %[req.hdr_ip(Addr3),ipmask(24,ffff:ffff:ffff:ffff:0:ffff:ffff:0)]
+ http-request set-header Test3maskv2 %[req.hdr_ip(Addr3),ipmask(24,ffff:ffff:ffff:ffff:c001:c001:0000:0000)]
+
+ # ipv4 mask applied to ipv4 mapped address
+ http-request set-header Test4mask32 %[req.hdr_ip(Addr4),ipmask(32,64)]
+
+ http-request set-header Test5mask24 %[req.hdr_ip(Addr5),ipmask(24)]
+
+ http-request set-header Test6mask24 %[req.hdr_ip(Addr6),ipmask(24)]
+ http-request set-header Test6mask25 %[req.hdr_ip(Addr6),ipmask(25)]
+
+ # track addr/mask in stick table
+ http-request track-sc0 src,ipmask(24) table be1
+ http-request track-sc1 hdr_ip(Addr4),ipmask(32) table be1
+ http-request track-sc2 hdr_ip(Addr3),ipmask(24,64) table be1
+
+ # hdr_ip tests
+ http-request set-header IPv4plain %[req.hdr_ip(AddrIPv4plain)]
+ http-request set-header IPv4port %[req.hdr_ip(AddrIPv4port)]
+ http-request set-header IPv6plain %[req.hdr_ip(AddrIPv6plain)]
+ http-request set-header IPv6brackets %[req.hdr_ip(AddrIPv6brackets)]
+
+ default_backend be1
+
+ backend be1
+ stick-table type ipv6 size 20 expire 360s store gpc0,conn_cnt
+ server s1 ${s1_addr}:${s1_port}
+} -start
+
+# concat,strcmp,word,field haproxy
+haproxy h2 -conf {
+ defaults
+ mode http
+ log global
+ option httplog
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ frontend fe2
+ bind "fd@${fe2}"
+
+ # concat f1_f2 + _ + f3__f5 tests
+ http-request set-var(sess.field1) hdr(Field1)
+ http-request set-var(sess.field2) hdr(Field2)
+ http-request set-var(sess.fieldhdr) hdr(Fieldhdr)
+ http-request set-var(sess.fieldconcat) hdr(Field1),concat(_,sess.field2,)
+ http-request set-header Fieldconcat2 %[var(sess.field1),concat(_,sess.field2,)]
+ http-request set-header Fieldconcat3 %[hdr(Field1),concat(_,sess.field2,)]
+ http-request set-header Fieldconcat %[var(sess.fieldconcat)]
+ http-request set-header Fieldstrcmp %[hdr(Fieldhdr),strcmp(sess.fieldconcat)]
+ http-request deny unless { hdr(Fieldhdr),strcmp(sess.fieldconcat) eq 0 }
+
+ # field tests
+ http-request set-header Fieldtest1 %[hdr(Fieldhdr),field(5,_)] #f5
+ http-request set-var(sess.fieldtest1var) hdr(Fieldtest1)
+ http-request set-var(sess.okfield) path,lower,field(4,/,1) #ok
+ http-request set-header Okfieldtest %[var(sess.okfield)] #ok
+ http-request set-var(sess.qsfield) url_param(qs),upper,field(2,_,2) #IT_IS
+ http-request set-header Qsfieldtest %[var(sess.qsfield)] #IT_IS
+ http-request set-header Qsfieldconcat %[var(sess.qsfield),concat(_,sess.okfield,)] #IT_IS_ok
+ http-request set-header Fieldtest2 %[var(sess.fieldhdr),field(2,_,0)] #f2_f3__f5
+ http-request set-header Fieldtest3 %[var(sess.fieldconcat),field(2,_,2)] #f2_f3
+ http-request set-header Fieldtest4 %[hdr(Fieldconcat2),field(-2,_,3)] #f2_f3_
+ http-request set-header Fieldtest5 %[hdr(Fieldconcat3),field(-3,_,0)] #f1_f2_f3
+ http-request set-header Fieldtest1strcmp %[str(f5),strcmp(sess.fieldtest1var)]
+ http-request deny unless { str(f5),strcmp(sess.fieldtest1var) eq 0 }
+ http-request deny unless { str(ok),strcmp(sess.okfield) eq 0 }
+ http-request deny unless { str(IT_IS),strcmp(sess.qsfield) eq 0 }
+
+ # word tests
+ http-request set-header Wordtest1 %[hdr(Fieldhdr),word(4,_)] #f5
+ http-request set-var(sess.wordtest1var) hdr(Wordtest1)
+ http-request set-var(sess.okword) path,upper,word(3,/,1) #OK
+ http-request set-header Okwordtest %[var(sess.okword)] #OK
+ http-request set-var(sess.qsword) url_param(qs),word(1,_,2) #Yes_It
+ http-request set-header Qswordtest %[var(sess.qsword)] #Yes_It
+ http-request set-header Qswordregmtest %[var(sess.qsword),map_regm(${testdir}/converters_ipmask_concat_strcmp_field_word.map)] #It_Yes
+ http-request set-header Wordtest2 %[var(sess.fieldhdr),word(2,_,0)] #f2_f3__f5
+ http-request set-header Wordtest3 %[var(sess.fieldconcat),word(3,_,2)] #f3__f5
+ http-request set-header Wordtest4 %[hdr(Fieldconcat2),word(-2,_,3)] #f1_f2_f3
+ http-request set-header Wordtest5 %[hdr(Fieldconcat3),word(-3,_,0)] #f1_f2
+ http-request set-header Wordtest1strcmp %[str(f5),strcmp(sess.wordtest1var)]
+ http-request deny unless { str(f5),strcmp(sess.wordtest1var) eq 0 }
+ http-request deny unless { str(OK),strcmp(sess.okword) eq 0 }
+ http-request deny unless { str(Yes_It),strcmp(sess.qsword) eq 0 }
+
+ default_backend be2
+
+ backend be2
+ server s2 ${s2_addr}:${s2_port}
+} -start
+
+# ipmask,hdr_ip tests
+client c1 -connect ${h1_fe1_sock} -proxy2 "192.168.1.101:1234 127.0.0.1:2345" {
+ txreq -hdr "Addr1: 2001:db8::1" \
+ -hdr "Addr2: 2001:db8::bad:c0f:ffff" \
+ -hdr "Addr3: 2001:db8:c001:c01a:ffff:ffff:10:ffff" \
+ -hdr "Addr4: ::FFFF:192.168.1.101" \
+ -hdr "Addr5: 192.168.1.2" \
+ -hdr "Addr6: 192.168.1.255" \
+ -hdr "AddrIPv4plain: 192.168.2.1" \
+ -hdr "AddrIPv4port: 192.168.2.1:6789" \
+ -hdr "AddrIPv6plain: 2001:db8:c001:c01a:ffff:ffff:20:ffff" \
+ -hdr "AddrIPv6brackets: [2001:db8:c001:c01a:ffff:ffff:20:ffff]"
+ rxresp
+ expect resp.status == 200
+} -run
+
+# cli show be1 stick table
+haproxy h1 -cli {
+ send "show table be1"
+ expect ~ "^# table: be1, type: ipv6, size:20, used:3\\n0x[a-f0-9]+: key=::ffff:192\\.168\\.1\\.0 use=0 exp=[[:digit:]]+ shard=0 gpc0=0 conn_cnt=1\\n0x[a-f0-9]+: key=::ffff:192\\.168\\.1\\.101 use=0 exp=[[:digit:]]+ shard=0 gpc0=0 conn_cnt=1\\n0x[a-f0-9]+: key=2001:db8:c001:c01a:[0:]+ use=0 exp=[[:digit:]]+ shard=0 gpc0=0 conn_cnt=1\\n"
+}
+
+# concat,strcmp,word,field tests
+client c2 -connect ${h2_fe2_sock} {
+ txreq -req GET \
+ -url /is/this/Ok/or/not?qs=Yes_It_Is \
+ -hdr "Fieldhdr: f1_f2_f3__f5" \
+ -hdr "Field1: f1_f2" \
+ -hdr "Field2: f3__f5"
+ rxresp
+ expect resp.status == 200
+} -run
+
diff --git a/reg-tests/http-rules/default_rules.vtc b/reg-tests/http-rules/default_rules.vtc
new file mode 100644
index 0000000..cc726ab
--- /dev/null
+++ b/reg-tests/http-rules/default_rules.vtc
@@ -0,0 +1,159 @@
+varnishtest "Test declaration of HTTP rules in default sections"
+
+feature cmd "$HAPROXY_PROGRAM -cc 'version_atleast(2.5-dev0)'"
+feature ignore_unknown_macro
+
+server s1 {
+ rxreq
+ expect req.http.x-frontend == "fe"
+ expect req.http.x-backend == "be"
+ expect req.http.x-test1-frt == "def_front"
+ expect req.http.x-test1-bck == "def_back"
+ txresp
+} -start
+
+server s2 {
+ rxreq
+ txresp
+} -start
+
+haproxy h1 -conf {
+ defaults common
+ mode http
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ defaults def_front from common
+ http-request set-header x-frontend "%[fe_name]"
+ http-request set-var(txn.test1) "str(def_front)"
+ http-response set-header x-frontend "%[fe_name]"
+ http-response set-var(txn.test2) "str(def_front)"
+ http-after-response set-var(txn.test3) "str(def_front)"
+
+ defaults def_back from common
+ http-request set-header x-backend "%[be_name]"
+ http-request set-var(txn.test1) "str(def_back)"
+ http-response set-header x-backend "%[be_name]"
+ http-response set-var(txn.test2) "str(def_back)"
+ http-after-response set-var(txn.test3) "str(def_back)"
+
+ frontend fe from def_front
+ bind "fd@${feh1}"
+
+ http-request set-header x-test1-frt "%[var(txn.test1)]"
+ http-response set-header x-test2-frt "%[var(txn.test2)]"
+ http-after-response set-header x-test3-frt "%[var(txn.test3)]"
+
+ default_backend be
+
+ backend be from def_back
+ http-request set-header x-test1-bck "%[var(txn.test1)]"
+ http-response set-header x-test2-bck "%[var(txn.test2)]"
+ http-after-response set-header x-test3-bck "%[var(txn.test3)]"
+
+ server s1 ${s1_addr}:${s1_port}
+
+} -start
+
+
+haproxy h2 -conf {
+ defaults common
+ mode http
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ defaults def_front from common
+ http-request allow
+ http-response allow
+ http-after-response allow
+
+ defaults def_back from common
+ http-request allow
+ http-response allow
+ http-after-response allow
+
+ frontend fe from def_front
+ bind "fd@${feh2}"
+
+ http-request deny status 403
+ http-response deny status 502
+ http-after-response set-status 502
+
+ default_backend be
+
+ backend be from def_back
+ http-request deny status 403
+ http-response deny status 502
+ http-after-response set-status 502
+
+ server s2 ${s2_addr}:${s2_port}
+
+} -start
+
+haproxy h3 -conf {
+ defaults base-http
+ mode http
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ http-request capture hdr(Host) len 64 # idx 0
+ http-request capture hdr(X-req-1) len 32 # idx 1
+
+ frontend fe1 from base-http
+ bind "fd@${fe1h3}"
+ declare capture request len 32 # idx 2
+
+ http-request capture hdr(X-req-2) id 2
+ http-request return status 200 hdr "X-Capture-1" "%[capture.req.hdr(0)]" hdr "X-Capture-2" "%[capture.req.hdr(1)]" hdr "X-Capture-3" "%[capture.req.hdr(2)]"
+
+ frontend fe2 from base-http
+ bind "fd@${fe2h3}"
+ http-request capture hdr(X-req-2) id 1
+ http-request return status 200 hdr "X-Capture-1" "%[capture.req.hdr(0)]" hdr "X-Capture-2" "%[capture.req.hdr(1)]"
+
+} -start
+
+client c1 -connect ${h1_feh1_sock} {
+ txreq -req GET -url /
+ rxresp
+ expect resp.status == 200
+ expect resp.http.x-frontend == "fe"
+ expect resp.http.x-backend == "be"
+ expect resp.http.x-test2-bck == "def_back"
+ expect resp.http.x-test2-frt == "def_front"
+ expect resp.http.x-test3-bck == "def_back"
+ expect resp.http.x-test3-frt == "def_front"
+} -run
+
+client c2 -connect ${h2_feh2_sock} {
+ txreq -req GET -url /
+ rxresp
+ expect resp.status == 200
+} -run
+
+client c3 -connect ${h3_fe1h3_sock} {
+ txreq -req GET -url / \
+ -hdr "host: v-test" \
+ -hdr "x-req-1: val1" \
+ -hdr "x-req-2: val2"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.x-capture-1 == "v-test"
+ expect resp.http.x-capture-2 == "val1"
+ expect resp.http.x-capture-3 == "val2"
+} -run
+
+client c4 -connect ${h3_fe2h3_sock} {
+ txreq -req GET -url / \
+ -hdr "host: v-test" \
+ -hdr "x-req-1: val1" \
+ -hdr "x-req-2: val2"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.x-capture-1 == "v-test"
+ expect resp.http.x-capture-2 == "val2"
+ expect resp.http.x-capture-3 == "<undef>"
+} -run
diff --git a/reg-tests/http-rules/del_header.vtc b/reg-tests/http-rules/del_header.vtc
new file mode 100644
index 0000000..0f74a60
--- /dev/null
+++ b/reg-tests/http-rules/del_header.vtc
@@ -0,0 +1,93 @@
+varnishtest "del-header tests"
+
+# This config tests various http-request/response del-header operations
+# with or without specified header name matching method.
+
+feature ignore_unknown_macro
+
+server s1 {
+ rxreq
+ expect req.url == /
+ expect req.http.x-always == always
+ expect req.http.x-str1 == <undef>
+ expect req.http.x-str2 == <undef>
+ expect req.http.x-beg1 == <undef>
+ expect req.http.x-beg2 == <undef>
+ expect req.http.x-end1 == <undef>
+ expect req.http.x-end2 == end2
+ expect req.http.x-sub1 == <undef>
+ expect req.http.x-sub2 == <undef>
+ expect req.http.x-reg1 == <undef>
+ expect req.http.x-reg2 == <undef>
+ txresp -hdr "x-always: always" \
+ -hdr "x-str1: str1" \
+ -hdr "x-str2: str2" \
+ -hdr "x-beg1: beg1" \
+ -hdr "x-beg2: beg2" \
+ -hdr "x-end1: end1" \
+ -hdr "x-end2: end2" \
+ -hdr "x-sub1: sub1" \
+ -hdr "x-sub2: sub2" \
+ -hdr "x-reg1: reg1" \
+ -hdr "x-reg2: reg2"
+
+} -start
+
+haproxy h1 -conf {
+ defaults
+ mode http
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ frontend fe
+ bind "fd@${fe}"
+
+ http-request del-header x-str1
+ http-request del-header x-str2 -m str
+ http-request del-header x-beg -m beg
+ http-request del-header end1 -m end
+ http-request del-header sub -m sub
+ http-request del-header ^x.reg.$ -m reg
+
+ http-response del-header x-str1
+ http-response del-header x-str2 -m str
+ http-response del-header x-beg -m beg
+ http-response del-header end1 -m end
+ http-response del-header sub -m sub
+ http-response del-header ^x.reg.$ -m reg
+
+ default_backend be
+
+ backend be
+ server s1 ${s1_addr}:${s1_port}
+
+} -start
+
+client c1 -connect ${h1_fe_sock} {
+ txreq -req GET -url / \
+ -hdr "x-always: always" \
+ -hdr "x-str1: str1" \
+ -hdr "x-str2: str2" \
+ -hdr "x-beg1: beg1" \
+ -hdr "x-beg2: beg2" \
+ -hdr "x-end1: end1" \
+ -hdr "x-end2: end2" \
+ -hdr "x-sub1: sub1" \
+ -hdr "x-sub2: sub2" \
+ -hdr "x-reg1: reg1" \
+ -hdr "x-reg2: reg2"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.x-always == always
+ expect resp.http.x-str1 == <undef>
+ expect resp.http.x-str2 == <undef>
+ expect resp.http.x-beg1 == <undef>
+ expect resp.http.x-beg2 == <undef>
+ expect resp.http.x-end1 == <undef>
+ expect resp.http.x-end2 == end2
+ expect resp.http.x-sub1 == <undef>
+ expect resp.http.x-sub2 == <undef>
+ expect resp.http.x-reg1 == <undef>
+ expect resp.http.x-reg2 == <undef>
+} -run
diff --git a/reg-tests/http-rules/except-forwardfor-originalto.vtc b/reg-tests/http-rules/except-forwardfor-originalto.vtc
new file mode 100644
index 0000000..a859160
--- /dev/null
+++ b/reg-tests/http-rules/except-forwardfor-originalto.vtc
@@ -0,0 +1,143 @@
+varnishtest "Test IPv4/IPv6 except param for the forwardfor and originalto options"
+#REQUIRE_VERSION=2.4
+
+# This config tests the except parameter for the HTTP forwardfor and originalto
+# options.
+
+feature ignore_unknown_macro
+
+haproxy h1 -conf {
+ global
+ # WT: limit false-positives causing "HTTP header incomplete" due to
+ # idle server connections being randomly used and randomly expiring
+ # under us.
+ tune.idle-pool.shared off
+
+ defaults
+ mode http
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ frontend fe1
+ bind "fd@${fe1}"
+ http-request set-src hdr(x-src)
+ http-request set-dst hdr(x-dst)
+ use_backend be1 if { path /req1 }
+ use_backend be2 if { path /req2 }
+ use_backend be3 if { path /req3 }
+ use_backend be4 if { path /req4 }
+ use_backend be5 if { path /req5 }
+
+ frontend fe2
+ bind "fd@${fe2}"
+ http-request return status 200 hdr x-ff "%[req.hdr(x-forwarded-for)]" hdr x-ot "%[req.hdr(x-original-to)]"
+
+ backend be1
+ option forwardfor except 127.0.0.1
+ option originalto except 127.0.0.1
+ server s1 ${h1_fe2_addr}:${h1_fe2_port}
+
+ backend be2
+ option forwardfor except 10.0.0.1/25
+ option originalto except 10.0.0.1/25
+ server s1 ${h1_fe2_addr}:${h1_fe2_port}
+
+ backend be3
+ option forwardfor except ::1
+ option originalto except ::1
+ server s1 ${h1_fe2_addr}:${h1_fe2_port}
+
+ backend be4
+ option forwardfor except 2001:db8::1:0:0:1
+ option originalto except 2001:db8::1:0:0:1
+ server s1 ${h1_fe2_addr}:${h1_fe2_port}
+
+ backend be5
+ option forwardfor except 2001:db8:1f89::/48
+ option originalto except 2001:db8:1f89::/48
+ server s1 ${h1_fe2_addr}:${h1_fe2_port}
+} -start
+
+client c1 -connect ${h1_fe1_sock} {
+ txreq -req GET -url /req1 \
+ -hdr "x-src: 127.0.0.1" \
+ -hdr "x-dst: 127.0.0.1"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.x-ff == <undef>
+ expect resp.http.x-ot == <undef>
+
+ txreq -req GET -url /req1 \
+ -hdr "x-src: 127.0.0.2" \
+ -hdr "x-dst: 127.0.0.2"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.x-ff == "127.0.0.2"
+ expect resp.http.x-ot == "127.0.0.2"
+
+
+ txreq -req GET -url /req2 \
+ -hdr "x-src: 10.0.0.1" \
+ -hdr "x-dst: 10.0.0.1"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.x-ff == <undef>
+ expect resp.http.x-ot == <undef>
+
+ txreq -req GET -url /req2 \
+ -hdr "x-src: 10.0.0.128" \
+ -hdr "x-dst: 10.0.0.128"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.x-ff == "10.0.0.128"
+ expect resp.http.x-ot == "10.0.0.128"
+
+ txreq -req GET -url /req3 \
+ -hdr "x-src: ::1" \
+ -hdr "x-dst: ::1"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.x-ff == <undef>
+ expect resp.http.x-ot == <undef>
+
+ txreq -req GET -url /req3 \
+ -hdr "x-src: ::2" \
+ -hdr "x-dst: ::2"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.x-ff == "::2"
+ expect resp.http.x-ot == "::2"
+
+ txreq -req GET -url /req4 \
+ -hdr "x-src: 2001:db8::1:0:0:1" \
+ -hdr "x-dst: 2001:db8::1:0:0:1"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.x-ff == <undef>
+ expect resp.http.x-ot == <undef>
+
+ txreq -req GET -url /req4 \
+ -hdr "x-src: 2001:db8::1:0:0:2" \
+ -hdr "x-dst: 2001:db8::1:0:0:2"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.x-ff == "2001:db8::1:0:0:2"
+ expect resp.http.x-ot == "2001:db8::1:0:0:2"
+
+ txreq -req GET -url /req5 \
+ -hdr "x-src: 2001:db8:1f89::1" \
+ -hdr "x-dst: 2001:db8:1f89::1"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.x-ff == <undef>
+ expect resp.http.x-ot == <undef>
+
+ txreq -req GET -url /req5 \
+ -hdr "x-src: 2001:db8:1f90::1" \
+ -hdr "x-dst: 2001:db8:1f90::1"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.x-ff == "2001:db8:1f90::1"
+ expect resp.http.x-ot == "2001:db8:1f90::1"
+} -run
diff --git a/reg-tests/http-rules/forwarded-header-7239.vtc b/reg-tests/http-rules/forwarded-header-7239.vtc
new file mode 100644
index 0000000..a894113
--- /dev/null
+++ b/reg-tests/http-rules/forwarded-header-7239.vtc
@@ -0,0 +1,171 @@
+varnishtest "Test RFC 7239 forwarded header support (forwarded option and related converters)"
+feature cmd "$HAPROXY_PROGRAM -cc 'version_atleast(2.8-dev0)'"
+
+# This config tests the HTTP forwarded option and RFC7239 related converters.
+
+feature ignore_unknown_macro
+
+#test: converters, parsing and header injection logic
+haproxy h1 -conf {
+ global
+ # WT: limit false-positives causing "HTTP header incomplete" due to
+ # idle server connections being randomly used and randomly expiring
+ # under us.
+ tune.idle-pool.shared off
+
+ defaults
+ mode http
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ frontend fe1
+ bind "fd@${fe1}"
+ http-request set-src hdr(x-src)
+ http-request set-dst hdr(x-dst)
+ http-request set-header host %[str(vtest)]
+ use_backend be1 if { path /req1 }
+ use_backend be2 if { path /req2 }
+ use_backend be3 if { path /req3 }
+ use_backend be4 if { path /req4 }
+
+ frontend fe2
+ bind "fd@${fe2}"
+ http-request return status 200 hdr forwarded "%[req.hdr(forwarded)]"
+
+ backend be1
+ option forwarded
+ server s1 ${h1_fe2_addr}:${h1_fe2_port}
+
+ backend be2
+ option forwarded for-expr src for_port-expr str(id) by by_port-expr int(10)
+ server s1 ${h1_fe2_addr}:${h1_fe2_port}
+
+ backend be3
+ acl valid req.hdr(forwarded),rfc7239_is_valid
+ http-request return status 200 if valid
+ http-request return status 400
+
+ backend be4
+ http-request set-var(req.fnode) req.hdr(forwarded),rfc7239_field(for)
+ http-request return status 200 hdr nodename "%[var(req.fnode),rfc7239_n2nn]" hdr nodeport "%[var(req.fnode),rfc7239_n2np]"
+
+} -start
+
+#test: "default" and "no option forwarded"
+haproxy h2 -conf {
+ global
+ # WT: limit false-positives causing "HTTP header incomplete" due to
+ # idle server connections being randomly used and randomly expiring
+ # under us.
+ tune.idle-pool.shared off
+
+ defaults
+ mode http
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+ option forwarded
+
+ frontend fe1
+ bind "fd@${fe1h2}"
+ use_backend default if { path /default }
+ use_backend override if { path /override }
+ use_backend disabled if { path /disabled }
+
+ backend default
+ server s1 ${h1_fe2_addr}:${h1_fe2_port}
+
+ backend override
+ option forwarded host-expr str(override)
+ server s1 ${h1_fe2_addr}:${h1_fe2_port}
+
+ backend disabled
+ no option forwarded
+ server s1 ${h1_fe2_addr}:${h1_fe2_port}
+
+} -start
+
+client c1 -connect ${h1_fe1_sock} {
+ txreq -req GET -url /req1 \
+ -hdr "x-src: 127.0.0.1"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.forwarded == "proto=http;for=127.0.0.1"
+
+ txreq -req GET -url /req2 \
+ -hdr "x-src: 127.0.0.2" \
+ -hdr "x-dst: 127.0.0.3"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.forwarded == "by=\"127.0.0.3:10\";for=\"127.0.0.2:_id\""
+
+ txreq -req GET -url /req3 \
+ -hdr "forwarded: for=\"unknown:132\";host=\"[::1]:65535\";by=\"_obfs:_port\";proto=https"
+ rxresp
+ expect resp.status == 200
+
+ txreq -req GET -url /req3 \
+ -hdr "forwarded: for=\"127.0.0.1\";host=v.test"
+ rxresp
+ expect resp.status == 200
+
+ txreq -req GET -url /req3 \
+ -hdr "forwarded: fore=\"unknown:132\""
+ rxresp
+ expect resp.status == 400
+
+ txreq -req GET -url /req3 \
+ -hdr "forwarded: proto=http;proto=http"
+ rxresp
+ expect resp.status == 400
+
+ txreq -req GET -url /req3 \
+ -hdr "forwarded: \""
+ rxresp
+ expect resp.status == 400
+
+ txreq -req GET -url /req3 \
+ -hdr "forwarded: by=[::1]"
+ rxresp
+ expect resp.status == 400
+
+ txreq -req GET -url /req3 \
+ -hdr "forwarded: by=\"[::1]\""
+ rxresp
+ expect resp.status == 200
+
+ txreq -req GET -url /req3 \
+ -hdr "forwarded: by=\"[::1]:\""
+ rxresp
+ expect resp.status == 400
+
+ txreq -req GET -url /req3 \
+ -hdr "forwarded: by=\"[::1]:3\""
+ rxresp
+ expect resp.status == 200
+
+ txreq -req GET -url /req4 \
+ -hdr "forwarded: proto=http;for=\"[::1]:_id\""
+ rxresp
+ expect resp.status == 200
+ expect resp.http.nodename == "::1"
+ expect resp.http.nodeport == "_id"
+} -run
+
+client c2 -connect ${h2_fe1h2_sock} {
+ txreq -req GET -url /default
+ rxresp
+ expect resp.status == 200
+ expect resp.http.forwarded != <undef>
+
+ txreq -req GET -url /override
+ rxresp
+ expect resp.status == 200
+ expect resp.http.forwarded == "host=\"override\""
+
+ txreq -req GET -url /disabled
+ rxresp
+ expect resp.status == 200
+ expect resp.http.forwarded == <undef>
+} -run
diff --git a/reg-tests/http-rules/h1or2_to_h1c.vtc b/reg-tests/http-rules/h1or2_to_h1c.vtc
new file mode 100644
index 0000000..3dd907e
--- /dev/null
+++ b/reg-tests/http-rules/h1or2_to_h1c.vtc
@@ -0,0 +1,233 @@
+varnishtest "Composite HTTP manipulation test (H1 and H2 clear to H1 clear)"
+
+# This config tests several http-request features and their interactions.
+# It extracts some samples, places them into variables, modifies some header
+# fields, appends multiple identical header fields, overwrites the start line
+# using several methods, then dumps the initial list of variables and the final
+# one, then applies CRC32 to these values as signatures that are easy to test.
+# Then it does it again in the backend after saving the current headers into
+# the same names prefixed by "fe-". Then it does the same on the response path.
+# If some modifications are performed, the crc values need to be adjusted based
+# on the failed logs.
+#
+# Run it with HAPROXY_PROGRAM=$PWD/haproxy varnishtest -l -k -t 1 "$1"
+
+feature ignore_unknown_macro
+
+server s1 {
+ rxreq
+ txresp \
+ -status 234 \
+ -hdr "hdr1: val1" \
+ -hdr "hdr2: val2a" \
+ -hdr "hdr2: val2b" \
+ -hdr "hdr3: val3a, val3b" \
+ -hdr "hdr4:" \
+ -body "This is a body"
+
+ expect req.method == "GET"
+ expect req.http.fe-sl1-crc == 1874847043
+ expect req.http.fe-sl2-crc == 1142278307
+ expect req.http.fe-hdr-crc == 1719311923
+ expect req.http.be-sl1-crc == 3455320059
+ expect req.http.be-sl2-crc == 2509326257
+ expect req.http.be-hdr-crc == 3634102538
+} -repeat 2 -start
+
+haproxy h1 -conf {
+ defaults
+ mode http
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ frontend fe
+ bind "fd@${feh1}"
+ bind "fd@${feh2}" proto h2
+
+ #### requests
+ http-request set-var(req.method) method
+ http-request set-var(req.uri) url
+ http-request set-var(req.path) path
+ http-request set-var(req.query) query
+ http-request set-var(req.param) url_param(qs_arg)
+ http-request set-var(req.cl) req.fhdr(content-length)
+
+ http-request set-header sl1 "sl1: "
+
+ http-request set-method "%[str(GET)]"
+ http-request set-uri %[str(),concat(/bu/,req.uri,/eu)]
+ http-request set-path "/bp/%[var(req.path)]/ep"
+ http-request set-query "bq&%[var(req.query)]&eq"
+
+ http-request set-header sl2 "sl2: "
+
+ http-request set-header sl1 "%[req.fhdr(sl1)] method=<%[var(req.method)]>; uri=<%[var(req.uri)]>; path=<%[var(req.path)]>;"
+ http-request set-header sl1 "%[req.fhdr(sl1)] query=<%[var(req.query)]>; param=<%[var(req.param)]>"
+ http-request set-header sl1 "%[req.fhdr(sl1)] cl=<%[var(req.cl)]>"
+ http-request set-header sl2 "%[req.fhdr(sl2)] method=<%[method]>; uri=<%[url]>; path=<%[path]>; "
+ http-request set-header sl2 "%[req.fhdr(sl2)] query=<%[query]>; param=<%[url_param(qs_arg)]>"
+ http-request set-header sl2 "%[req.fhdr(sl2)] cl=<%[req.fhdr(content-length)]>"
+ http-request set-header hdr "%[req.fhdr(hdr)] hdr1=<%[req.hdr(hdr1)]>; fhdr1=<%[req.fhdr(hdr1)]>;"
+ http-request set-header hdr "%[req.fhdr(hdr)] hdr2=<%[req.hdr(hdr2)]>; fhdr2=<%[req.fhdr(hdr2)]>;"
+ http-request set-header hdr "%[req.fhdr(hdr)] hdr3=<%[req.hdr(hdr3)]>; fhdr3=<%[req.fhdr(hdr3)]>;"
+ http-request set-header hdr "%[req.fhdr(hdr)] hdr4=<%[req.hdr(hdr4)]>; fhdr4=<%[req.fhdr(hdr4)]>;"
+
+ http-request set-header sl1-crc "%[req.fhdr(sl1),crc32]"
+ http-request set-header sl2-crc "%[req.fhdr(sl2),crc32]"
+ http-request set-header hdr-crc "%[req.fhdr(hdr),crc32]"
+
+ #### responses
+ http-response set-header be-sl1 "%[res.fhdr(sl1)]"
+ http-response set-header be-sl2 "%[res.fhdr(sl2)]"
+ http-response set-header be-hdr "%[res.fhdr(hdr)]"
+
+ http-response set-header be-sl1-crc "%[res.fhdr(sl1-crc)]"
+ http-response set-header be-sl2-crc "%[res.fhdr(sl2-crc)]"
+ http-response set-header be-hdr-crc "%[res.fhdr(hdr-crc)]"
+
+ http-response set-var(res.status) status
+ http-response set-header sl1 "sl1: "
+
+ http-response set-status 200
+
+ http-response set-header sl2 "sl2: "
+
+ http-response set-header sl1 "%[res.fhdr(sl1)] status=<%[var(res.status)]>;"
+ http-response set-header sl2 "%[res.fhdr(sl2)] status=<%[status]>;"
+ http-response set-header hdr "%[res.fhdr(hdr)] hdr1=<%[res.hdr(hdr1)]>; fhdr1=<%[res.fhdr(hdr1)]>;"
+ http-response set-header hdr "%[res.fhdr(hdr)] hdr2=<%[res.hdr(hdr2)]>; fhdr2=<%[res.fhdr(hdr2)]>;"
+ http-response set-header hdr "%[res.fhdr(hdr)] hdr3=<%[res.hdr(hdr3)]>; fhdr3=<%[res.fhdr(hdr3)]>;"
+ http-response set-header hdr "%[res.fhdr(hdr)] hdr4=<%[res.hdr(hdr4)]>; fhdr4=<%[res.fhdr(hdr4)]>;"
+
+ http-response set-header fe-sl1-crc "%[res.fhdr(sl1),crc32]"
+ http-response set-header fe-sl2-crc "%[res.fhdr(sl2),crc32]"
+ http-response set-header fe-hdr-crc "%[res.fhdr(hdr),crc32]"
+
+ default_backend be
+
+ backend be
+ #### requests
+ http-request set-header fe-sl1 "%[req.fhdr(sl1)]"
+ http-request set-header fe-sl2 "%[req.fhdr(sl2)]"
+ http-request set-header fe-hdr "%[req.fhdr(hdr)]"
+
+ http-request set-header fe-sl1-crc "%[req.fhdr(sl1-crc)]"
+ http-request set-header fe-sl2-crc "%[req.fhdr(sl2-crc)]"
+ http-request set-header fe-hdr-crc "%[req.fhdr(hdr-crc)]"
+
+ http-request set-var(req.method) method
+ http-request set-var(req.uri) url
+ http-request set-var(req.path) path
+ http-request set-var(req.query) query
+ http-request set-var(req.param) url_param(qs_arg)
+ http-request set-var(req.cl) req.fhdr(content-length)
+
+ http-request set-header sl1 "sl1: "
+
+ http-request set-method "%[str(GET)]"
+ http-request set-uri %[str(),concat(/bu/,req.uri,/eu)]
+ http-request set-path "/bp/%[var(req.path)]/ep"
+ http-request set-query "bq&%[var(req.query)]&eq"
+
+ http-request set-header sl2 "sl2: "
+
+ http-request set-header sl1 "%[req.fhdr(sl1)] method=<%[var(req.method)]>; uri=<%[var(req.uri)]>; path=<%[var(req.path)]>;"
+ http-request set-header sl1 "%[req.fhdr(sl1)] query=<%[var(req.query)]>; param=<%[var(req.param)]>"
+ http-request set-header sl1 "%[req.fhdr(sl1)] cl=<%[var(req.cl)]>"
+ http-request set-header sl2 "%[req.fhdr(sl2)] method=<%[method]>; uri=<%[url]>; path=<%[path]>; "
+ http-request set-header sl2 "%[req.fhdr(sl2)] query=<%[query]>; param=<%[url_param(QS_arg,,i)]>"
+ http-request set-header sl2 "%[req.fhdr(sl2)] cl=<%[req.fhdr(content-length)]>"
+ http-request set-header hdr "%[req.fhdr(hdr)] hdr1=<%[req.hdr(hdr1)]>; fhdr1=<%[req.fhdr(hdr1)]>;"
+ http-request set-header hdr "%[req.fhdr(hdr)] hdr2=<%[req.hdr(hdr2)]>; fhdr2=<%[req.fhdr(hdr2)]>;"
+ http-request set-header hdr "%[req.fhdr(hdr)] hdr3=<%[req.hdr(hdr3)]>; fhdr3=<%[req.fhdr(hdr3)]>;"
+ http-request set-header hdr "%[req.fhdr(hdr)] hdr4=<%[req.hdr(hdr4)]>; fhdr4=<%[req.fhdr(hdr4)]>;"
+
+ http-request set-header be-sl1-crc "%[req.fhdr(sl1),crc32]"
+ http-request set-header be-sl2-crc "%[req.fhdr(sl2),crc32]"
+ http-request set-header be-hdr-crc "%[req.fhdr(hdr),crc32]"
+
+ #### responses
+ http-response set-var(res.status) status
+ http-response set-header sl1 "sl1: "
+
+ http-response set-status 200
+
+ http-response set-header sl2 "sl2: "
+
+ http-response set-header sl1 "%[res.fhdr(sl1)] status=<%[var(res.status)]>;"
+ http-response set-header sl2 "%[res.fhdr(sl2)] status=<%[status]>;"
+ http-response set-header hdr "%[res.fhdr(hdr)] hdr1=<%[res.hdr(hdr1)]>; fhdr1=<%[res.fhdr(hdr1)]>;"
+ http-response set-header hdr "%[res.fhdr(hdr)] hdr2=<%[res.hdr(hdr2)]>; fhdr2=<%[res.fhdr(hdr2)]>;"
+ http-response set-header hdr "%[res.fhdr(hdr)] hdr3=<%[res.hdr(hdr3)]>; fhdr3=<%[res.fhdr(hdr3)]>;"
+ http-response set-header hdr "%[res.fhdr(hdr)] hdr4=<%[res.hdr(hdr4)]>; fhdr4=<%[res.fhdr(hdr4)]>;"
+
+ http-response set-header sl1-crc "%[res.fhdr(sl1),crc32]"
+ http-response set-header sl2-crc "%[res.fhdr(sl2),crc32]"
+ http-response set-header hdr-crc "%[res.fhdr(hdr),crc32]"
+ http-response allow
+ http-response deny # must not be evaluated
+
+ server s1 ${s1_addr}:${s1_port}
+} -start
+
+client c1h1 -connect ${h1_feh1_sock} {
+ txreq \
+ -req GET \
+ -url /path/to/file.extension?qs_arg=qs_value \
+ -hdr "content-length: 000, 00" \
+ -hdr "hdr1: val1" \
+ -hdr "hdr2: val2a" \
+ -hdr "hdr2: val2b" \
+ -hdr "hdr3: val3a, val3b" \
+ -hdr "hdr4:"
+ rxresp
+
+ expect resp.status == 200
+ expect resp.http.be-sl1-crc == 487202719
+ expect resp.http.be-sl2-crc == 561949791
+ expect resp.http.be-hdr-crc == 1719311923
+ expect resp.http.fe-sl1-crc == 146151597
+ expect resp.http.fe-sl2-crc == 561949791
+ expect resp.http.fe-hdr-crc == 3634102538
+ expect resp.bodylen == 14
+ expect resp.body == "This is a body"
+} -run
+
+client c1h2 -connect ${h1_feh2_sock} {
+ txpri
+ stream 0 {
+ txsettings
+ rxsettings
+ txsettings -ack
+ rxsettings
+ expect settings.ack == true
+ } -run
+ stream 1 {
+ # warning: -req, -scheme, -url MUST be placed first otherwise
+ # the H2 protocol is invalid since they are pseudo-headers
+ txreq \
+ -req GET \
+ -scheme "https" \
+ -url /path/to/file.extension?qs_arg=qs_value \
+ -hdr "content-length" "000, 00" \
+ -hdr "hdr1" "val1" \
+ -hdr "hdr2" " val2a" \
+ -hdr "hdr2" " val2b" \
+ -hdr "hdr3" " val3a, val3b" \
+ -hdr "hdr4" ""
+
+ rxhdrs
+ expect resp.status == 200
+ expect resp.http.be-sl1-crc == 487202719
+ expect resp.http.be-sl2-crc == 561949791
+ expect resp.http.be-hdr-crc == 1719311923
+ expect resp.http.fe-sl1-crc == 146151597
+ expect resp.http.fe-sl2-crc == 561949791
+ expect resp.http.fe-hdr-crc == 3634102538
+ expect resp.http.content-length == 14
+
+ rxdata -all
+ expect resp.body == "This is a body"
+ } -run
+} -run
diff --git a/reg-tests/http-rules/http_after_response.vtc b/reg-tests/http-rules/http_after_response.vtc
new file mode 100644
index 0000000..7e8cc1d
--- /dev/null
+++ b/reg-tests/http-rules/http_after_response.vtc
@@ -0,0 +1,192 @@
+varnishtest "Test HTTP response manipulation under the http-after-response rulesets"
+#REQUIRE_VERSION=2.2
+
+# This config tests various http-after-response rules for HTTP responses from a
+# server and the stats applet, but also for internal responses
+# (deny/redirect/auth/return).
+
+feature ignore_unknown_macro
+
+server s1 {
+ rxreq
+ txresp \
+ -status 234 \
+ -hdr "hdr1: val1" \
+ -hdr "hdr2: val2a" \
+ -hdr "hdr2: val2b" \
+ -hdr "hdr3: val3a, val3b" \
+ -hdr "hdr4:" \
+ -body "This is a body"
+} -repeat 2 -start
+
+haproxy h1 -conf {
+ defaults
+ mode http
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ frontend fe
+ bind "fd@${feh1}"
+
+ http-request deny if { path /deny }
+ http-request redirect location / if { path /redir }
+ http-request auth if { path /auth }
+
+ http-after-response allow if { status eq 403 }
+ http-after-response allow if { status eq 302 }
+ http-after-response allow if { status eq 401 }
+
+ http-after-response set-header be-sl1 "%[res.fhdr(sl1)]"
+ http-after-response set-header be-sl2 "%[res.fhdr(sl2)]"
+ http-after-response set-header be-hdr "%[res.fhdr(hdr)]"
+
+ http-after-response set-header be-sl1-crc "%[res.fhdr(sl1-crc)]"
+ http-after-response set-header be-sl2-crc "%[res.fhdr(sl2-crc)]"
+ http-after-response set-header be-hdr-crc "%[res.fhdr(hdr-crc)]"
+
+ http-after-response set-var(res.status) status
+ http-after-response set-header sl1 "sl1: "
+
+ http-after-response set-status 200
+
+ http-after-response set-header sl2 "sl2: "
+
+ http-after-response set-header sl1 "%[res.fhdr(sl1)] status=<%[var(res.status)]>;"
+ http-after-response set-header sl2 "%[res.fhdr(sl2)] status=<%[status]>;"
+ http-after-response set-header hdr "%[res.fhdr(hdr)] hdr1=<%[res.hdr(hdr1)]>; fhdr1=<%[res.fhdr(hdr1)]>;"
+ http-after-response set-header hdr "%[res.fhdr(hdr)] hdr2=<%[res.hdr(hdr2)]>; fhdr2=<%[res.fhdr(hdr2)]>;"
+ http-after-response set-header hdr "%[res.fhdr(hdr)] hdr3=<%[res.hdr(hdr3)]>; fhdr3=<%[res.fhdr(hdr3)]>;"
+ http-after-response set-header hdr "%[res.fhdr(hdr)] hdr4=<%[res.hdr(hdr4)]>; fhdr4=<%[res.fhdr(hdr4)]>;"
+
+ http-after-response set-header fe-sl1-crc "%[res.fhdr(sl1),crc32]"
+ http-after-response set-header fe-sl2-crc "%[res.fhdr(sl2),crc32]"
+ http-after-response set-header fe-hdr-crc "%[res.fhdr(hdr),crc32]"
+
+ default_backend be
+
+ backend be
+ stats enable
+ stats uri /stats
+
+ http-request return status 234 content-type "text/plain" string "This is a body" if { path /return }
+
+ http-response deny if { capture.req.uri /deny-srv }
+
+ http-after-response allow if { status eq 502 }
+
+ http-after-response set-status 234 if { capture.req.uri /stats }
+ http-after-response add-header hdr1 val1 unless { capture.req.uri / }
+ http-after-response add-header hdr2 val2a unless { capture.req.uri / }
+ http-after-response add-header hdr2 val2b unless { capture.req.uri / }
+ http-after-response add-header hdr3 "val3a, val3b" unless { capture.req.uri / }
+ http-after-response add-header hdr4 "%[str()]" unless { capture.req.uri / }
+ http-after-response del-header content-type
+
+ http-after-response set-var(res.status) status
+ http-after-response set-header sl1 "sl1: "
+
+ http-after-response set-status 200
+
+ http-after-response set-header sl2 "sl2: "
+
+ http-after-response set-header sl1 "%[res.fhdr(sl1)] status=<%[var(res.status)]>;"
+ http-after-response set-header sl2 "%[res.fhdr(sl2)] status=<%[status]>;"
+ http-after-response set-header hdr "%[res.fhdr(hdr)] hdr1=<%[res.hdr(hdr1)]>; fhdr1=<%[res.fhdr(hdr1)]>;"
+ http-after-response set-header hdr "%[res.fhdr(hdr)] hdr2=<%[res.hdr(hdr2)]>; fhdr2=<%[res.fhdr(hdr2)]>;"
+ http-after-response set-header hdr "%[res.fhdr(hdr)] hdr3=<%[res.hdr(hdr3)]>; fhdr3=<%[res.fhdr(hdr3)]>;"
+ http-after-response set-header hdr "%[res.fhdr(hdr)] hdr4=<%[res.hdr(hdr4)]>; fhdr4=<%[res.fhdr(hdr4)]>;"
+
+ http-after-response set-header sl1-crc "%[res.fhdr(sl1),crc32]"
+ http-after-response set-header sl2-crc "%[res.fhdr(sl2),crc32]"
+ http-after-response set-header hdr-crc "%[res.fhdr(hdr),crc32]"
+
+ server s1 ${s1_addr}:${s1_port}
+} -start
+
+client c1 -connect ${h1_feh1_sock} {
+ txreq -req GET -url /
+ rxresp
+ expect resp.status == 200
+ expect resp.http.be-sl1-crc == 487202719
+ expect resp.http.be-sl2-crc == 561949791
+ expect resp.http.be-hdr-crc == 1719311923
+ expect resp.http.fe-sl1-crc == 146151597
+ expect resp.http.fe-sl2-crc == 561949791
+ expect resp.http.fe-hdr-crc == 3634102538
+ expect resp.http.content-type == <undef>
+ expect resp.bodylen == 14
+ expect resp.body == "This is a body"
+
+ txreq -req GET -url /return
+ rxresp
+ expect resp.status == 200
+ expect resp.http.be-sl1-crc == 487202719
+ expect resp.http.be-sl2-crc == 561949791
+ expect resp.http.be-hdr-crc == 1719311923
+ expect resp.http.fe-sl1-crc == 146151597
+ expect resp.http.fe-sl2-crc == 561949791
+ expect resp.http.fe-hdr-crc == 3634102538
+ expect resp.http.content-type == <undef>
+ expect resp.bodylen == 14
+ expect resp.body == "This is a body"
+
+ txreq -req GET -url /stats
+ rxresp
+ expect resp.status == 200
+ expect resp.http.be-sl1-crc == 487202719
+ expect resp.http.be-sl2-crc == 561949791
+ expect resp.http.be-hdr-crc == 1719311923
+ expect resp.http.fe-sl1-crc == 146151597
+ expect resp.http.fe-sl2-crc == 561949791
+ expect resp.http.fe-hdr-crc == 3634102538
+ expect resp.http.content-type == <undef>
+} -run
+
+client c2 -connect ${h1_feh1_sock} {
+ txreq -req GET -url /deny
+ rxresp
+ expect resp.status == 403
+ expect resp.http.be-sl1 == <undef>
+ expect resp.http.be-sl2 == <undef>
+ expect resp.http.be-hdr == <undef>
+ expect resp.http.sl1 == <undef>
+ expect resp.http.sl2 == <undef>
+ expect resp.http.hdr == <undef>
+} -run
+
+client c3 -connect ${h1_feh1_sock} {
+ txreq -req GET -url /redir
+ rxresp
+ expect resp.status == 302
+ expect resp.http.be-sl1 == <undef>
+ expect resp.http.be-sl2 == <undef>
+ expect resp.http.be-hdr == <undef>
+ expect resp.http.sl1 == <undef>
+ expect resp.http.sl2 == <undef>
+ expect resp.http.hdr == <undef>
+} -run
+
+client c4 -connect ${h1_feh1_sock} {
+ txreq -req GET -url /auth
+ rxresp
+ expect resp.status == 401
+ expect resp.http.be-sl1 == <undef>
+ expect resp.http.be-sl2 == <undef>
+ expect resp.http.be-hdr == <undef>
+ expect resp.http.sl1 == <undef>
+ expect resp.http.sl2 == <undef>
+ expect resp.http.hdr == <undef>
+} -run
+
+client c5 -connect ${h1_feh1_sock} {
+ txreq -req GET -url /deny-srv
+ rxresp
+ expect resp.status == 200
+ expect resp.http.be-sl1 == ""
+ expect resp.http.be-sl2 == ""
+ expect resp.http.be-hdr == ""
+ expect resp.http.fe-sl1-crc == 3104968915
+ expect resp.http.fe-sl2-crc == 561949791
+ expect resp.http.fe-hdr-crc == 623352154
+} -run
diff --git a/reg-tests/http-rules/http_return.vtc b/reg-tests/http-rules/http_return.vtc
new file mode 100644
index 0000000..ae96775
--- /dev/null
+++ b/reg-tests/http-rules/http_return.vtc
@@ -0,0 +1,99 @@
+varnishtest "Test the HTTP return action"
+#REQUIRE_VERSION=2.2
+
+# This config tests the HTTP return action.
+
+feature ignore_unknown_macro
+
+haproxy h1 -conf {
+ global
+ # WT: limit false-positives causing "HTTP header incomplete" due to
+ # idle server connections being randomly used and randomly expiring
+ # under us.
+ tune.idle-pool.shared off
+
+ defaults
+ mode http
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ frontend fe1
+ bind "fd@${fe1}"
+ http-request return if { path /def-1 }
+ http-request return hdr "x-custom-hdr" "%[url]" if { path /def-2 }
+ http-request return status 403 if { path /def-3 }
+ http-request return content-type "text/plain" if { path /def-4 }
+
+ http-request return content-type "text/plain" string "hello" hdr "x-custom-hdr" "%[url]" if { path /string }
+ http-request return content-type "text/plain" lf-string "path is %[url]" hdr "x-custom-hdr" "%[url]" if { path /lf-string }
+ http-request return content-type "text/plain" file /dev/null hdr "x-custom-hdr" "%[url]" if { path /empty-file }
+ http-request return content-type "text/plain" file ${testdir}/1k.txt hdr "x-custom-hdr" "%[url]" if { path /file }
+ http-request return content-type "text/plain" lf-file ${testdir}/lf-file.txt hdr "x-custom-hdr" "%[url]" if { path /lf-file }
+} -start
+
+client c1 -connect ${h1_fe1_sock} {
+ txreq -req GET -url /def-1
+ rxresp
+ expect resp.status == 200
+ expect resp.http.content-length == 0
+ expect resp.http.content-type == <undef>
+ expect resp.http.x-custom-hdr == <undef>
+
+ txreq -req GET -url /def-2
+ rxresp
+ expect resp.status == 200
+ expect resp.http.content-length == 0
+ expect resp.http.content-type == <undef>
+ expect resp.http.x-custom-hdr == "/def-2"
+
+ txreq -req GET -url /def-3
+ rxresp
+ expect resp.status == 403
+ expect resp.http.content-length == 0
+ expect resp.http.content-type == <undef>
+
+ txreq -req GET -url /def-4
+ rxresp
+ expect resp.status == 200
+ expect resp.http.content-length == 0
+ expect resp.http.content-type == <undef>
+
+ txreq -req GET -url /string
+ rxresp
+ expect resp.status == 200
+ expect resp.http.content-length == 5
+ expect resp.http.content-type == "text/plain"
+ expect resp.http.x-custom-hdr == "/string"
+ expect resp.body == "hello"
+
+ txreq -req GET -url /lf-string
+ rxresp
+ expect resp.status == 200
+ expect resp.http.content-length == 18
+ expect resp.http.content-type == "text/plain"
+ expect resp.http.x-custom-hdr == "/lf-string"
+ expect resp.body == "path is /lf-string"
+
+ txreq -req GET -url /empty-file
+ rxresp
+ expect resp.status == 200
+ expect resp.http.content-length == 0
+ expect resp.http.content-type == <undef>
+ expect resp.http.x-custom-hdr == "/empty-file"
+
+ txreq -req GET -url /file
+ rxresp
+ expect resp.status == 200
+ expect resp.http.content-length == 1024
+ expect resp.http.content-type == "text/plain"
+ expect resp.http.x-custom-hdr == "/file"
+
+ txreq -req GET -url /lf-file
+ rxresp
+ expect resp.status == 200
+ expect resp.http.content-length == 17
+ expect resp.http.content-type == "text/plain"
+ expect resp.http.x-custom-hdr == "/lf-file"
+ expect resp.body == "path is /lf-file\n"
+} -run
diff --git a/reg-tests/http-rules/ifnone-forwardfor.vtc b/reg-tests/http-rules/ifnone-forwardfor.vtc
new file mode 100644
index 0000000..a743b10
--- /dev/null
+++ b/reg-tests/http-rules/ifnone-forwardfor.vtc
@@ -0,0 +1,98 @@
+varnishtest "Test if-none param for the forwardfor option"
+feature cmd "$HAPROXY_PROGRAM -cc 'version_atleast(2.6-dev0)'"
+
+# This config tests the if-none parameter for the HTTP forwardfor option.
+
+feature ignore_unknown_macro
+
+haproxy h1 -conf {
+ global
+ # WT: limit false-positives causing "HTTP header incomplete" due to
+ # idle server connections being randomly used and randomly expiring
+ # under us.
+ tune.idle-pool.shared off
+
+ defaults
+ mode http
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ frontend none
+ bind "fd@${none}"
+ http-request set-src hdr(x-src)
+ option forwardfor if-none
+ use_backend be1 if { path /req1 }
+ use_backend be2 if { path /req2 }
+ use_backend be3 if { path /req3 }
+
+ frontend normal
+ bind "fd@${normal}"
+ http-request set-src hdr(x-src)
+ option forwardfor
+ use_backend be1 if { path /req1 }
+ use_backend be2 if { path /req2 }
+
+
+ frontend fe2
+ bind "fd@${fe2}"
+ http-request return status 200 hdr x-ff "%[req.fhdr_cnt(x-forwarded-for)]"
+
+ backend be1
+ option forwardfor
+ server s1 ${h1_fe2_addr}:${h1_fe2_port}
+
+ backend be2
+ option forwardfor if-none
+ server s1 ${h1_fe2_addr}:${h1_fe2_port}
+
+ backend be3
+ server s1 ${h1_fe2_addr}:${h1_fe2_port}
+} -start
+
+client c1 -connect ${h1_none_sock} {
+ txreq -req GET -url /req1 \
+ -hdr "x-src: 10.0.0.128" \
+ -hdr "x-forwarded-for: 127.0.0.1"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.x-ff == 2
+
+ txreq -req GET -url /req2 \
+ -hdr "x-src: 10.0.0.128" \
+ -hdr "x-forwarded-for: 127.0.0.1"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.x-ff == 1
+
+ txreq -req GET -url /req2 \
+ -hdr "x-src: 10.0.0.128"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.x-ff == 1
+
+ txreq -req GET -url /req3 \
+ -hdr "x-src: 10.0.0.128" \
+ -hdr "x-forwarded-for: 127.0.0.1"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.x-ff == 1
+
+} -run
+
+client c1 -connect ${h1_normal_sock} {
+ txreq -req GET -url /req1 \
+ -hdr "x-src: 10.0.0.128" \
+ -hdr "x-forwarded-for: 127.0.0.1"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.x-ff == 2
+
+ txreq -req GET -url /req2 \
+ -hdr "x-src: 10.0.0.128" \
+ -hdr "x-forwarded-for: 127.0.0.1"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.x-ff == 2
+
+} -run
diff --git a/reg-tests/http-rules/lf-file.txt b/reg-tests/http-rules/lf-file.txt
new file mode 100644
index 0000000..7fda1d4
--- /dev/null
+++ b/reg-tests/http-rules/lf-file.txt
@@ -0,0 +1 @@
+path is %[url]
diff --git a/reg-tests/http-rules/map_ordering.map b/reg-tests/http-rules/map_ordering.map
new file mode 100644
index 0000000..dcd9529
--- /dev/null
+++ b/reg-tests/http-rules/map_ordering.map
@@ -0,0 +1,4 @@
+# These entries are used for list-based match ordering tests
+first.domain.tld first
+domain.tld domain
+second.domain.tld second
diff --git a/reg-tests/http-rules/map_ordering.vtc b/reg-tests/http-rules/map_ordering.vtc
new file mode 100644
index 0000000..40da465
--- /dev/null
+++ b/reg-tests/http-rules/map_ordering.vtc
@@ -0,0 +1,32 @@
+varnishtest "Test list-based matching types ordering"
+feature cmd "$HAPROXY_PROGRAM -cc 'version_atleast(2.5-dev0)'"
+feature ignore_unknown_macro
+
+haproxy h1 -conf {
+ defaults
+ mode http
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ frontend fe1
+ bind "fd@${fe1}"
+
+ # check list ordering using map_dom (list-based match)
+ http-request return hdr dom %[req.hdr(Host),lower,map_dom(${testdir}/map_ordering.map)] if { url_beg /dom }
+} -start
+
+# Check map ordering
+client c1 -connect ${h1_fe1_sock} {
+ # first.domain.tld is above domain.tld so it should match first
+ txreq -url "/dom" -hdr "Host: first.domain.tld"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.dom == "first"
+
+ # second.domain.tld is below domain.tld so domain.tld should match first
+ txreq -url "/dom" -hdr "Host: second.domain.tld"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.dom == "domain"
+} -run
diff --git a/reg-tests/http-rules/map_redirect-be.map b/reg-tests/http-rules/map_redirect-be.map
new file mode 100644
index 0000000..c8822fc
--- /dev/null
+++ b/reg-tests/http-rules/map_redirect-be.map
@@ -0,0 +1,4 @@
+# These entries are used for use_backend rules
+test1.example.com test1_be
+test1.example.invalid test1_be
+test2.example.com test2_be
diff --git a/reg-tests/http-rules/map_redirect.map b/reg-tests/http-rules/map_redirect.map
new file mode 100644
index 0000000..c4743f6
--- /dev/null
+++ b/reg-tests/http-rules/map_redirect.map
@@ -0,0 +1,5 @@
+# These entries are used for http-request redirect rules
+example.org https://www.example.org
+subdomain.example.org https://www.subdomain.example.org
+
+/path/to/old/file /path/to/new/file
diff --git a/reg-tests/http-rules/map_redirect.vtc b/reg-tests/http-rules/map_redirect.vtc
new file mode 100644
index 0000000..f55e0d8
--- /dev/null
+++ b/reg-tests/http-rules/map_redirect.vtc
@@ -0,0 +1,200 @@
+varnishtest "haproxy host header: map / redirect tests"
+feature cmd "$HAPROXY_PROGRAM -cc 'version_atleast(2.5-dev5) && (feature(PCRE) || feature(PCRE2))'"
+feature ignore_unknown_macro
+
+
+server s1 {
+ rxreq
+ expect req.method == "GET"
+ expect req.http.host == "test1.example.com"
+ txresp -body "test1 ok"
+} -start
+
+server s2 {
+ rxreq
+ expect req.method == "GET"
+ expect req.http.host == "test2.example.com"
+ txresp -body "test2 ok"
+} -start
+
+server s3 {
+ rxreq
+ expect req.method == "GET"
+ expect req.http.host == "test3.example.com"
+ txresp -body "test3 ok"
+} -start
+
+server s4 {
+ rxreq
+ expect req.method == "GET"
+ expect req.http.host == "test1.example.invalid"
+ txresp -body "test1 after del map ok"
+} -start
+
+haproxy h1 -conf {
+ defaults
+ mode http
+ log global
+ option httplog
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ frontend fe1
+ bind "fd@${fe1}"
+
+ # automatically redirect matching paths from maps but skip rule on no-match
+ http-request redirect code 301 location %[path,map_str(${testdir}/map_redirect.map)] ignore-empty
+
+ # redirect Host: example.org / subdomain.example.org
+ http-request redirect prefix %[req.hdr(Host),lower,regsub(:\d+$,,),map_str(${testdir}/map_redirect.map)] code 301 if { hdr(Host),lower,regsub(:\d+$,,),map_str(${testdir}/map_redirect.map) -m found }
+
+ # set var and redirect in be1
+ http-request set-var(txn.testvar) req.hdr(Testvar),lower,regsub(:\d+$,,),map_str(${testdir}/map_redirect.map) if { hdr(Testvar),lower,regsub(:\d+$,,),map_str(${testdir}/map_redirect.map) -m found }
+
+ # use map to select backend (no default map value)
+ use_backend %[req.hdr(Host),lower,map_dom(${testdir}/map_redirect-be.map)] if { hdr_dom(Host) -i test1.example.com || hdr_dom(Host) -i test2.example.com }
+
+ # use map to select backend with default value(test3_be)
+ use_backend %[req.hdr(Host),lower,map_dom(${testdir}/map_redirect-be.map,test3_be)] if { hdr_dom(Host) -m end -i example.com }
+
+ # use map(after del map test1.example.com) default value(test4_be)
+ use_backend %[req.hdr(Host),lower,map_dom(${testdir}/map_redirect-be.map,test4_be)] if { hdr_dom(Host) -m end -i example.invalid }
+
+ default_backend be1
+
+ backend be1
+ http-request redirect prefix %[var(txn.testvar)] code 301 if { var(txn.testvar) -m found }
+ http-request deny
+
+ backend test1_be
+ server s1 ${s1_addr}:${s1_port}
+
+ backend test2_be
+ server s2 ${s2_addr}:${s2_port}
+
+ backend test3_be
+ server s3 ${s3_addr}:${s3_port}
+
+ backend test4_be
+ server s4 ${s4_addr}:${s4_port}
+} -start
+
+# Check map redirects
+client c1 -connect ${h1_fe1_sock} {
+ txreq -hdr "Host: example.org:8443"
+ rxresp
+ expect resp.status == 301
+ expect resp.http.location ~ "https://www.example.org"
+
+ txreq -url /path/to/old/file
+ rxresp
+ expect resp.status == 301
+ expect resp.http.location ~ "/path/to/new/file"
+
+ # Closes connection
+} -run
+
+client c2 -connect ${h1_fe1_sock} {
+ txreq -hdr "Host: subdomain.example.org"
+ rxresp
+ expect resp.status == 301
+ expect resp.http.location ~ "https://www.subdomain.example.org"
+ # Closes connection
+} -run
+
+client c3 -connect ${h1_fe1_sock} {
+ # redirect on Testvar header
+ txreq -hdr "Testvar: subdomain.example.org"
+ rxresp
+ expect resp.status == 301
+ expect resp.http.location ~ "https://www.subdomain.example.org"
+ # Closes connection
+} -run
+
+client c4 -connect ${h1_fe1_sock} {
+ txreq -hdr "Host: www.subdomain.example.org"
+ rxresp
+ expect resp.status == 403
+ # Closes connection
+} -run
+
+client c5 -connect ${h1_fe1_sock} {
+ txreq -hdr "Testvar: www.subdomain.example.org"
+ rxresp
+ expect resp.status == 403
+ # Closes connection
+} -run
+
+client c6 -connect ${h1_fe1_sock} {
+ txreq -hdr "Host: :8443example.org"
+ rxresp
+ expect resp.status == 403
+ # Closes connection
+} -run
+
+# Check map backend selection
+client c7 -connect ${h1_fe1_sock} {
+ txreq -hdr "Host: test1.example.com"
+ rxresp
+ expect resp.status == 200
+ expect resp.body == "test1 ok"
+
+ txreq -hdr "Host: test2.example.com"
+ rxresp
+ expect resp.status == 200
+ expect resp.body == "test2 ok"
+
+ txreq -hdr "Host: test3.example.com"
+ rxresp
+ expect resp.status == 200
+ expect resp.body == "test3 ok"
+} -run
+
+# cli show maps
+haproxy h1 -cli {
+ send "show map ${testdir}/map_redirect.map"
+ expect ~ "^0x[a-f0-9]+ example\\.org https://www\\.example\\.org\\n0x[a-f0-9]+ subdomain\\.example\\.org https://www\\.subdomain\\.example\\.org\\n0x[a-f0-9]+ /path/to/old/file /path/to/new/file\n$"
+
+ send "show map ${testdir}/map_redirect-be.map"
+ expect ~ "^0x[a-f0-9]+ test1\\.example\\.com test1_be\\n0x[a-f0-9]+ test1\\.example\\.invalid test1_be\\n0x[a-f0-9]+ test2\\.example\\.com test2_be\\n$"
+}
+
+haproxy h1 -cli {
+ # clear map ${testdir}/map_redirect.map
+ send "clear map ${testdir}/map_redirect.map"
+ expect ~ "^\\n"
+
+ send "show map ${testdir}/map_redirect.map"
+ expect ~ "^\\n"
+
+ # add map ${testdir}/map_redirect.map
+ send "add map ${testdir}/map_redirect.map site1_key site1_value"
+ expect ~ "^\\n"
+
+ # add 2 more entries as payload
+ send "add map ${testdir}/map_redirect.map <<\nsite2_key site2_value\nsite3_key site3_value\n"
+ expect ~ "^\\n"
+
+ send "show map ${testdir}/map_redirect.map"
+ expect ~ "^0x[a-f0-9]+ site1_key site1_value\\n0x[a-f0-9]+ site2_key site2_value\\n0x[a-f0-9]+ site3_key site3_value\\n$"
+
+ # del map ${testdir}/map_redirect-be.map test1.example.{com,invalid}
+ send "del map ${testdir}/map_redirect-be.map test1.example.com"
+ expect ~ "^\\n"
+
+ send "del map ${testdir}/map_redirect-be.map test1.example.invalid"
+ expect ~ "^\\n"
+
+ send "show map ${testdir}/map_redirect-be.map"
+ expect ~ "^0x[a-f0-9]+ test2\\.example\\.com test2_be\\n$"
+}
+
+# Check map backend after del map
+client c6 -connect ${h1_fe1_sock} {
+ # test1.example.invalid should go to test4_be after del map
+ txreq -hdr "Host: test1.example.invalid"
+ rxresp
+ expect resp.status == 200
+ expect resp.body == "test1 after del map ok"
+} -run
diff --git a/reg-tests/http-rules/map_regm_with_backref.map b/reg-tests/http-rules/map_regm_with_backref.map
new file mode 100644
index 0000000..08ffcfb
--- /dev/null
+++ b/reg-tests/http-rules/map_regm_with_backref.map
@@ -0,0 +1 @@
+^(.*)\.(.*)$ \1_AND_\2
diff --git a/reg-tests/http-rules/map_regm_with_backref.vtc b/reg-tests/http-rules/map_regm_with_backref.vtc
new file mode 100644
index 0000000..c3b21fb
--- /dev/null
+++ b/reg-tests/http-rules/map_regm_with_backref.vtc
@@ -0,0 +1,73 @@
+#commit 271022150d7961b9aa39dbfd88e0c6a4bc48c3ee
+# BUG/MINOR: map: fix map_regm with backref
+#
+# Due to a cascade of get_trash_chunk calls the sample is
+# corrupted when we want to read it.
+#
+# The fix consist to use a temporary chunk to copy the sample
+# value and use it.
+
+varnishtest "map_regm get_trash_chunk test"
+feature ignore_unknown_macro
+
+#REGTEST_TYPE=bug
+
+syslog S1 -level notice {
+ recv info
+ # not expecting ${h1_pid} with master-worker
+ expect ~ "[^:\\[ ]\\[[[:digit:]]+\\]: .* fe1 be1/s1 [[:digit:]]+/[[:digit:]]+/[[:digit:]]+/[[:digit:]]+/[[:digit:]]+ 200 [[:digit:]]+ - - ---- .* \"GET / HTTP/(1|2)(\\.1)?\""
+} -start
+
+server s1 {
+ rxreq
+ expect req.method == "GET"
+ expect req.http.x-mapped-from-header == example_AND_org
+ expect req.http.x-mapped-from-var == example_AND_org
+ txresp
+
+ rxreq
+ expect req.method == "GET"
+ expect req.http.x-mapped-from-header == www.example_AND_org
+ expect req.http.x-mapped-from-var == www.example_AND_org
+ txresp
+} -start
+
+haproxy h1 -conf {
+ global
+ log ${S1_addr}:${S1_port} local0 debug err
+
+ defaults
+ mode http
+ log global
+ option httplog
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ frontend fe1
+ bind "fd@${fe1}"
+ # Remove port from Host header
+ http-request replace-value Host '(.*):.*' '\1'
+ # Store host header in variable
+ http-request set-var(txn.host) req.hdr(Host)
+ # This works correctly
+ http-request set-header X-Mapped-From-Header %[req.hdr(Host),map_regm(${testdir}/map_regm_with_backref.map,"unknown")]
+ # This breaks before commit 271022150d7961b9aa39dbfd88e0c6a4bc48c3ee
+ http-request set-header X-Mapped-From-Var %[var(txn.host),map_regm(${testdir}/map_regm_with_backref.map,"unknown")]
+
+ default_backend be1
+
+ backend be1
+ server s1 ${s1_addr}:${s1_port}
+} -start
+
+client c1 -connect ${h1_fe1_sock} {
+ txreq -hdr "Host: example.org:8443"
+ rxresp
+ expect resp.status == 200
+
+ txreq -hdr "Host: www.example.org"
+ rxresp
+ expect resp.status == 200
+} -run
+
diff --git a/reg-tests/http-rules/normalize_uri.vtc b/reg-tests/http-rules/normalize_uri.vtc
new file mode 100644
index 0000000..ad7b44a
--- /dev/null
+++ b/reg-tests/http-rules/normalize_uri.vtc
@@ -0,0 +1,549 @@
+varnishtest "normalize-uri tests"
+#REQUIRE_VERSION=2.4
+
+# This reg-test tests the http-request normalize-uri action.
+
+feature ignore_unknown_macro
+
+server s1 {
+ rxreq
+ txresp -hdr "connection: close"
+} -repeat 70 -start
+
+haproxy h1 -conf {
+ global
+ # WT: limit false-positives causing "HTTP header incomplete" due to
+ # idle server connections being randomly used and randomly expiring
+ # under us.
+ tune.idle-pool.shared off
+ expose-experimental-directives
+
+ defaults
+ mode http
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ frontend fe_path_merge_slashes
+ bind "fd@${fe_path_merge_slashes}"
+
+ http-request set-var(txn.before) url
+ http-request normalize-uri path-merge-slashes
+ http-request set-var(txn.after) url
+
+ http-response add-header before %[var(txn.before)]
+ http-response add-header after %[var(txn.after)]
+
+ default_backend be
+
+ frontend fe_path_strip_dotdot
+ bind "fd@${fe_path_strip_dotdot}"
+
+ http-request set-var(txn.before) url
+ http-request normalize-uri path-strip-dotdot
+ http-request set-var(txn.after) url
+
+ http-request set-uri %[var(txn.before)]
+ http-request normalize-uri path-strip-dotdot full
+ http-request set-var(txn.after_full) url
+
+ http-response add-header before %[var(txn.before)]
+ http-response add-header after %[var(txn.after)]
+ http-response add-header after-full %[var(txn.after_full)]
+
+ default_backend be
+
+ frontend fe_sort_query_by_name
+ bind "fd@${fe_sort_query_by_name}"
+
+ http-request set-var(txn.before) url
+ http-request normalize-uri query-sort-by-name
+ http-request set-var(txn.after) url
+
+ http-response add-header before %[var(txn.before)]
+ http-response add-header after %[var(txn.after)]
+
+ default_backend be
+
+ frontend fe_percent_to_uppercase
+ bind "fd@${fe_percent_to_uppercase}"
+
+ http-request set-var(txn.before) url
+ http-request normalize-uri percent-to-uppercase
+ http-request set-var(txn.after) url
+
+ http-response add-header before %[var(txn.before)]
+ http-response add-header after %[var(txn.after)]
+
+ default_backend be
+
+ frontend fe_percent_to_uppercase_strict
+ bind "fd@${fe_percent_to_uppercase_strict}"
+
+ http-request set-var(txn.before) url
+ http-request normalize-uri percent-to-uppercase strict
+ http-request set-var(txn.after) url
+
+ http-response add-header before %[var(txn.before)]
+ http-response add-header after %[var(txn.after)]
+
+ default_backend be
+
+ frontend fe_dot
+ bind "fd@${fe_dot}"
+
+ http-request set-var(txn.before) url
+ http-request normalize-uri path-strip-dot
+ http-request set-var(txn.after) url
+
+ http-response add-header before %[var(txn.before)]
+ http-response add-header after %[var(txn.after)]
+
+ default_backend be
+
+ frontend fe_percent_decode_unreserved
+ bind "fd@${fe_percent_decode_unreserved}"
+
+ http-request set-var(txn.before) url
+ http-request normalize-uri percent-decode-unreserved
+ http-request set-var(txn.after) url
+
+ http-response add-header before %[var(txn.before)]
+ http-response add-header after %[var(txn.after)]
+
+ default_backend be
+
+ frontend fe_percent_decode_unreserved_strict
+ bind "fd@${fe_percent_decode_unreserved_strict}"
+
+ http-request set-var(txn.before) url
+ http-request normalize-uri percent-decode-unreserved strict
+ http-request set-var(txn.after) url
+
+ http-response add-header before %[var(txn.before)]
+ http-response add-header after %[var(txn.after)]
+
+ default_backend be
+
+ frontend fe_fragment_strip
+ bind "fd@${fe_fragment_strip}"
+ option accept-invalid-http-request
+
+ http-request set-var(txn.before) url
+ http-request normalize-uri fragment-strip
+ http-request set-var(txn.after) url
+
+ http-response add-header before %[var(txn.before)]
+ http-response add-header after %[var(txn.after)]
+
+ default_backend be
+
+ frontend fe_fragment_encode
+ bind "fd@${fe_fragment_encode}"
+ option accept-invalid-http-request
+
+ http-request set-var(txn.before) url
+ http-request normalize-uri fragment-encode
+ http-request set-var(txn.after) url
+
+ http-response add-header before %[var(txn.before)]
+ http-response add-header after %[var(txn.after)]
+
+ default_backend be
+
+ frontend fe_fragment_block
+ bind "fd@${fe_fragment_block}"
+ http-request normalize-uri fragment-strip
+ default_backend be
+
+ backend be
+ server s1 ${s1_addr}:${s1_port}
+
+} -start
+
+client c1 -connect ${h1_fe_path_merge_slashes_sock} {
+ txreq -url "/foo/bar"
+ rxresp
+ expect resp.http.before == "/foo/bar"
+ expect resp.http.after == "/foo/bar"
+
+ txreq -url "/foo//bar"
+ rxresp
+ expect resp.http.before == "/foo//bar"
+ expect resp.http.after == "/foo/bar"
+
+ txreq -url "/foo///bar"
+ rxresp
+ expect resp.http.before == "/foo///bar"
+ expect resp.http.after == "/foo/bar"
+
+ txreq -url "///foo///bar"
+ rxresp
+ expect resp.http.before == "///foo///bar"
+ expect resp.http.after == "/foo/bar"
+
+ txreq -url "///foo/bar"
+ rxresp
+ expect resp.http.before == "///foo/bar"
+ expect resp.http.after == "/foo/bar"
+
+ txreq -url "///foo///bar///"
+ rxresp
+ expect resp.http.before == "///foo///bar///"
+ expect resp.http.after == "/foo/bar/"
+
+ txreq -url "///"
+ rxresp
+ expect resp.http.before == "///"
+ expect resp.http.after == "/"
+
+ txreq -url "/foo?bar=///"
+ rxresp
+ expect resp.http.before == "/foo?bar=///"
+ expect resp.http.after == "/foo?bar=///"
+
+ txreq -url "//foo?bar=///"
+ rxresp
+ expect resp.http.before == "//foo?bar=///"
+ expect resp.http.after == "/foo?bar=///"
+
+ txreq -req OPTIONS -url "*"
+ rxresp
+ expect resp.http.before == "*"
+ expect resp.http.after == "*"
+} -run
+
+client c2 -connect ${h1_fe_path_strip_dotdot_sock} {
+ txreq -url "/foo/bar"
+ rxresp
+ expect resp.http.before == "/foo/bar"
+ expect resp.http.after == "/foo/bar"
+ expect resp.http.after-full == "/foo/bar"
+
+ txreq -url "/foo/.."
+ rxresp
+ expect resp.http.before == "/foo/.."
+ expect resp.http.after == "/"
+ expect resp.http.after-full == "/"
+
+ txreq -url "/foo/../"
+ rxresp
+ expect resp.http.before == "/foo/../"
+ expect resp.http.after == "/"
+ expect resp.http.after-full == "/"
+
+ txreq -url "/foo/bar/../"
+ rxresp
+ expect resp.http.before == "/foo/bar/../"
+ expect resp.http.after == "/foo/"
+ expect resp.http.after-full == "/foo/"
+
+ txreq -url "/foo/../bar"
+ rxresp
+ expect resp.http.before == "/foo/../bar"
+ expect resp.http.after == "/bar"
+ expect resp.http.after-full == "/bar"
+
+ txreq -url "/foo/../bar/"
+ rxresp
+ expect resp.http.before == "/foo/../bar/"
+ expect resp.http.after == "/bar/"
+ expect resp.http.after-full == "/bar/"
+
+ txreq -url "/foo/../../bar/"
+ rxresp
+ expect resp.http.before == "/foo/../../bar/"
+ expect resp.http.after == "/../bar/"
+ expect resp.http.after-full == "/bar/"
+
+ txreq -url "/foo//../../bar/"
+ rxresp
+ expect resp.http.before == "/foo//../../bar/"
+ expect resp.http.after == "/bar/"
+ expect resp.http.after-full == "/bar/"
+
+ txreq -url "/foo/?bar=/foo/../"
+ rxresp
+ expect resp.http.before == "/foo/?bar=/foo/../"
+ expect resp.http.after == "/foo/?bar=/foo/../"
+ expect resp.http.after-full == "/foo/?bar=/foo/../"
+
+ txreq -url "/foo/../?bar=/foo/../"
+ rxresp
+ expect resp.http.before == "/foo/../?bar=/foo/../"
+ expect resp.http.after == "/?bar=/foo/../"
+ expect resp.http.after-full == "/?bar=/foo/../"
+
+ txreq -req OPTIONS -url "*"
+ rxresp
+ expect resp.http.before == "*"
+ expect resp.http.after == "*"
+ expect resp.http.after-full == "*"
+} -run
+
+client c3 -connect ${h1_fe_sort_query_by_name_sock} {
+ txreq -url "/?a=a"
+ rxresp
+ expect resp.http.before == "/?a=a"
+ expect resp.http.after == "/?a=a"
+
+ txreq -url "/?a=a&z=z"
+ rxresp
+ expect resp.http.before == "/?a=a&z=z"
+ expect resp.http.after == "/?a=a&z=z"
+
+ txreq -url "/?z=z&a=a"
+ rxresp
+ expect resp.http.before == "/?z=z&a=a"
+ expect resp.http.after == "/?a=a&z=z"
+
+ txreq -url "/?a=z&z=a"
+ rxresp
+ expect resp.http.before == "/?a=z&z=a"
+ expect resp.http.after == "/?a=z&z=a"
+
+ txreq -url "/?z=a&a=z"
+ rxresp
+ expect resp.http.before == "/?z=a&a=z"
+ expect resp.http.after == "/?a=z&z=a"
+
+ txreq -url "/?c&b&a&z&x&y"
+ rxresp
+ expect resp.http.before == "/?c&b&a&z&x&y"
+ expect resp.http.after == "/?a&b&c&x&y&z"
+
+ txreq -url "/?a=&aa=&aaa=&aaaa="
+ rxresp
+ expect resp.http.before == "/?a=&aa=&aaa=&aaaa="
+ expect resp.http.after == "/?a=&aa=&aaa=&aaaa="
+
+ txreq -url "/?aaaa=&a=&aa=&aaa="
+ rxresp
+ expect resp.http.before == "/?aaaa=&a=&aa=&aaa="
+ expect resp.http.after == "/?a=&aa=&aaa=&aaaa="
+
+ txreq -url "/?a=5&a=3&a=1&a=2&a=4"
+ rxresp
+ expect resp.http.before == "/?a=5&a=3&a=1&a=2&a=4"
+ expect resp.http.after == "/?a=5&a=3&a=1&a=2&a=4"
+
+ txreq -url "/?a=5&b=3&a=1&a=2&b=4"
+ rxresp
+ expect resp.http.before == "/?a=5&b=3&a=1&a=2&b=4"
+ expect resp.http.after == "/?a=5&a=1&a=2&b=3&b=4"
+
+ txreq -url "/"
+ rxresp
+ expect resp.http.before == "/"
+ expect resp.http.after == "/"
+
+ txreq -url "/?"
+ rxresp
+ expect resp.http.before == "/?"
+ expect resp.http.after == "/?"
+
+ txreq -req OPTIONS -url "*"
+ rxresp
+ expect resp.http.before == "*"
+ expect resp.http.after == "*"
+} -run
+
+client c4 -connect ${h1_fe_percent_to_uppercase_sock} {
+ txreq -url "/a?a=a"
+ rxresp
+ expect resp.http.before == "/a?a=a"
+ expect resp.http.after == "/a?a=a"
+
+ txreq -url "/%aa?a=%aa"
+ rxresp
+ expect resp.http.before == "/%aa?a=%aa"
+ expect resp.http.after == "/%AA?a=%AA"
+
+ txreq -url "/%zz?a=%zz"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.before == "/%zz?a=%zz"
+ expect resp.http.after == "/%zz?a=%zz"
+
+ txreq -req OPTIONS -url "*"
+ rxresp
+ expect resp.http.before == "*"
+ expect resp.http.after == "*"
+} -run
+
+client c5 -connect ${h1_fe_percent_to_uppercase_strict_sock} {
+ txreq -url "/a?a=a"
+ rxresp
+ expect resp.http.before == "/a?a=a"
+ expect resp.http.after == "/a?a=a"
+
+ txreq -url "/%aa?a=%aa"
+ rxresp
+ expect resp.http.before == "/%aa?a=%aa"
+ expect resp.http.after == "/%AA?a=%AA"
+
+ txreq -url "/%zz?a=%zz"
+ rxresp
+ expect resp.status == 400
+} -run
+
+client c6 -connect ${h1_fe_dot_sock} {
+ txreq -url "/"
+ rxresp
+ expect resp.http.before == "/"
+ expect resp.http.after == "/"
+
+ txreq -url "/a/b"
+ rxresp
+ expect resp.http.before == "/a/b"
+ expect resp.http.after == "/a/b"
+
+ txreq -url "/."
+ rxresp
+ expect resp.http.before == "/."
+ expect resp.http.after == "/"
+
+ txreq -url "/./"
+ rxresp
+ expect resp.http.before == "/./"
+ expect resp.http.after == "/"
+
+ txreq -url "/a/."
+ rxresp
+ expect resp.http.before == "/a/."
+ expect resp.http.after == "/a/"
+
+ txreq -url "/a."
+ rxresp
+ expect resp.http.before == "/a."
+ expect resp.http.after == "/a."
+
+ txreq -url "/.a"
+ rxresp
+ expect resp.http.before == "/.a"
+ expect resp.http.after == "/.a"
+
+ txreq -url "/a/."
+ rxresp
+ expect resp.http.before == "/a/."
+ expect resp.http.after == "/a/"
+
+ txreq -url "/a/./"
+ rxresp
+ expect resp.http.before == "/a/./"
+ expect resp.http.after == "/a/"
+
+ txreq -url "/a/./a"
+ rxresp
+ expect resp.http.before == "/a/./a"
+ expect resp.http.after == "/a/a"
+
+ txreq -url "/a/../"
+ rxresp
+ expect resp.http.before == "/a/../"
+ expect resp.http.after == "/a/../"
+
+ txreq -url "/a/../a"
+ rxresp
+ expect resp.http.before == "/a/../a"
+ expect resp.http.after == "/a/../a"
+
+ txreq -url "/?a=/./"
+ rxresp
+ expect resp.http.before == "/?a=/./"
+ expect resp.http.after == "/?a=/./"
+} -run
+
+client c7 -connect ${h1_fe_percent_decode_unreserved_sock} {
+ txreq -url "/a?a=a"
+ rxresp
+ expect resp.http.before == "/a?a=a"
+ expect resp.http.after == "/a?a=a"
+
+ txreq -url "/%61?%61=%61"
+ rxresp
+ expect resp.http.before == "/%61?%61=%61"
+ expect resp.http.after == "/a?a=a"
+
+ txreq -url "/%3F?foo=bar"
+ rxresp
+ expect resp.http.before == "/%3F?foo=bar"
+ expect resp.http.after == "/%3F?foo=bar"
+
+ txreq -url "/%%36%36"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.before == "/%%36%36"
+ expect resp.http.after == "/%66"
+
+ txreq -req OPTIONS -url "*"
+ rxresp
+ expect resp.http.before == "*"
+ expect resp.http.after == "*"
+} -run
+
+client c8 -connect ${h1_fe_percent_decode_unreserved_strict_sock} {
+ txreq -url "/a?a=a"
+ rxresp
+ expect resp.http.before == "/a?a=a"
+ expect resp.http.after == "/a?a=a"
+
+ txreq -url "/%61?%61=%61"
+ rxresp
+ expect resp.http.before == "/%61?%61=%61"
+ expect resp.http.after == "/a?a=a"
+
+ txreq -url "/%3F?foo=bar"
+ rxresp
+ expect resp.http.before == "/%3F?foo=bar"
+ expect resp.http.after == "/%3F?foo=bar"
+
+ txreq -url "/%%36%36"
+ rxresp
+ expect resp.status == 400
+} -run
+
+client c9 -connect ${h1_fe_fragment_strip_sock} {
+ txreq -url "/#foo"
+ rxresp
+ expect resp.http.before == "/#foo"
+ expect resp.http.after == "/"
+
+ txreq -url "/%23foo"
+ rxresp
+ expect resp.http.before == "/%23foo"
+ expect resp.http.after == "/%23foo"
+
+ txreq -req OPTIONS -url "*"
+ rxresp
+ expect resp.http.before == "*"
+ expect resp.http.after == "*"
+} -run
+
+client c10 -connect ${h1_fe_fragment_encode_sock} {
+ txreq -url "/#foo"
+ rxresp
+ expect resp.http.before == "/#foo"
+ expect resp.http.after == "/%23foo"
+
+ txreq -url "/#foo/#foo"
+ rxresp
+ expect resp.http.before == "/#foo/#foo"
+ expect resp.http.after == "/%23foo/%23foo"
+
+ txreq -url "/%23foo"
+ rxresp
+ expect resp.http.before == "/%23foo"
+ expect resp.http.after == "/%23foo"
+
+ txreq -req OPTIONS -url "*"
+ rxresp
+ expect resp.http.before == "*"
+ expect resp.http.after == "*"
+} -run
+
+client c11 -connect ${h1_fe_fragment_block_sock} {
+ txreq -url "/#foo"
+ rxresp
+ expect resp.status == 400
+} -run
diff --git a/reg-tests/http-rules/path_and_pathq.vtc b/reg-tests/http-rules/path_and_pathq.vtc
new file mode 100644
index 0000000..31e85be
--- /dev/null
+++ b/reg-tests/http-rules/path_and_pathq.vtc
@@ -0,0 +1,64 @@
+varnishtest "path vs pathq tests"
+#REQUIRE_VERSION=2.2
+
+# This config tests various http request rules (set/replace) manipulating the
+# path, with or without the query-string. It also test path and pathq sample
+# fetches.
+
+feature ignore_unknown_macro
+
+server s1 {
+ rxreq
+ expect req.url == /regtest/foo/fe/req1/bar?param1=val1&param2=val2
+ expect req.http.x-path == /req1
+ expect req.http.x-pathq == /req1?param1=val1&param2=val2
+ expect req.http.x-query == param1=val1&param2=val2
+ expect req.http.x-url == /req1?param1=val1&param2=val2
+ txresp
+
+ rxreq
+ expect req.url == http://127.0.0.1/regtest/foo/fe/req2/bar?param1=val1&param2=val2
+ expect req.http.x-path == /req2
+ expect req.http.x-pathq == /req2?param1=val1&param2=val2
+ expect req.http.x-query == param1=val1&param2=val2
+ expect req.http.x-url == http://127.0.0.1/req2?param1=val1&param2=val2
+ txresp
+} -start
+
+haproxy h1 -conf {
+ defaults
+ mode http
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ frontend fe
+ bind "fd@${fe}"
+
+ http-request add-header x-path %[path]
+ http-request add-header x-pathq %[pathq]
+ http-request add-header x-query %[query]
+ http-request add-header x-url %[url]
+
+ http-request set-path /fe%[path]
+ http-request replace-path (.*) /foo\1
+ http-request replace-path (.*) \1/bar
+ http-request set-pathq %[path]?app=regtest&%[query]
+ http-request replace-pathq /([^?]*)\?app=([^&]*)&?(.*) /\2/\1?\3
+
+ default_backend be
+
+ backend be
+ server s1 ${s1_addr}:${s1_port}
+
+} -start
+
+client c1 -connect ${h1_fe_sock} {
+ txreq -req GET -url /req1?param1=val1&param2=val2
+ rxresp
+ expect resp.status == 200
+
+ txreq -req GET -url http://127.0.0.1/req2?param1=val1&param2=val2
+ rxresp
+ expect resp.status == 200
+} -run
diff --git a/reg-tests/http-rules/restrict_req_hdr_names.vtc b/reg-tests/http-rules/restrict_req_hdr_names.vtc
new file mode 100644
index 0000000..4b26e33
--- /dev/null
+++ b/reg-tests/http-rules/restrict_req_hdr_names.vtc
@@ -0,0 +1,185 @@
+varnishtest "http-restrict-req-hdr-names option tests"
+feature cmd "$HAPROXY_PROGRAM -cc 'version_atleast(2.0-dev0)'"
+
+# This config tests "http-restrict-req-hdr-names" option
+
+feature ignore_unknown_macro
+
+server s1 {
+ rxreq
+ expect req.http.x-my_hdr == on
+ txresp
+} -start
+
+server s2 {
+ rxreq
+ expect req.http.x-my_hdr == <undef>
+ txresp
+} -start
+
+server s3 {
+ rxreq
+ expect req.http.x-my_hdr == on
+ txresp
+} -start
+
+server s4 {
+ rxreq
+ expect req.http.x-my_hdr == <undef>
+ txresp
+} -start
+
+server s5 {
+ rxreq
+ expect req.http.x-my_hdr == on
+ txresp
+} -start
+
+server s6 {
+ rxreq
+ expect req.http.x_my_hdr_with_lots_of_underscores == <undef>
+ txresp
+} -start
+
+server s7 {
+ rxreq
+ expect req.http.x_my_hdr-1 == <undef>
+ expect req.http.x-my-hdr-2 == on
+ txresp
+} -start
+
+server s8 {
+ rxreq
+ expect req.http.x-my_hdr-1 == <undef>
+ expect req.http.x-my_hdr-2 == <undef>
+ txresp
+} -start
+
+server s9 {
+ rxreq
+ expect req.http.x-my-hdr-with-trailing-underscore_ == <undef>
+ txresp
+} -start
+
+haproxy h1 -conf {
+ defaults
+ mode http
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ frontend fe1
+ bind "fd@${fe1}"
+ use_backend be-http1 if { path /req1 }
+ use_backend be-http2 if { path /req2 }
+ use_backend be-http3 if { path /req3 }
+ use_backend be-fcgi1 if { path /req4 }
+ use_backend be-fcgi2 if { path /req5 }
+ use_backend be-fcgi3 if { path /req6 }
+ use_backend be-http4 if { path /req7 }
+ use_backend be-http5 if { path /req8 }
+ use_backend be-http6 if { path /req9 }
+ use_backend be-http7 if { path /req10 }
+
+ backend be-http1
+ server s1 ${s1_addr}:${s1_port}
+
+ backend be-http2
+ option http-restrict-req-hdr-names delete
+ server s2 ${s2_addr}:${s2_port}
+
+ backend be-http3
+ option http-restrict-req-hdr-names reject
+
+ backend be-fcgi1
+ option http-restrict-req-hdr-names preserve
+ server s3 ${s3_addr}:${s3_port}
+
+ backend be-fcgi2
+ option http-restrict-req-hdr-names delete
+ server s4 ${s4_addr}:${s4_port}
+
+ backend be-fcgi3
+ option http-restrict-req-hdr-names reject
+
+ backend be-http4
+ option http-restrict-req-hdr-names delete
+ server s6 ${s6_addr}:${s6_port}
+
+ backend be-http5
+ option http-restrict-req-hdr-names delete
+ server s7 ${s7_addr}:${s7_port}
+
+ backend be-http6
+ option http-restrict-req-hdr-names delete
+ server s8 ${s8_addr}:${s8_port}
+
+ backend be-http7
+ option http-restrict-req-hdr-names delete
+ server s9 ${s9_addr}:${s9_port}
+
+ defaults
+ mode http
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+ option http-restrict-req-hdr-names preserve
+
+ frontend fe2
+ bind "fd@${fe2}"
+ default_backend be-fcgi4
+
+ backend be-fcgi4
+ server s5 ${s5_addr}:${s5_port}
+
+ fcgi-app my-fcgi-app
+ docroot ${testdir}
+} -start
+
+client c1 -connect ${h1_fe1_sock} {
+ txreq -req GET -url /req1 -hdr "X-my_hdr: on"
+ rxresp
+ expect resp.status == 200
+
+ txreq -req GET -url /req2 -hdr "X-my_hdr: on"
+ rxresp
+ expect resp.status == 200
+
+ txreq -req GET -url /req3 -hdr "X-my_hdr: on"
+ rxresp
+ expect resp.status == 403
+
+ txreq -req GET -url /req4 -hdr "X-my_hdr: on"
+ rxresp
+ expect resp.status == 200
+
+ txreq -req GET -url /req5 -hdr "X-my_hdr: on"
+ rxresp
+ expect resp.status == 200
+
+ txreq -req GET -url /req6 -hdr "X-my_hdr: on"
+ rxresp
+ expect resp.status == 403
+
+ txreq -req GET -url /req7 -hdr "X_my_hdr_with_lots_of_underscores: on"
+ rxresp
+ expect resp.status == 200
+
+ txreq -req GET -url /req8 -hdr "X_my_hdr-1: on" -hdr "X-my-hdr-2: on"
+ rxresp
+ expect resp.status == 200
+
+ txreq -req GET -url /req9 -hdr "X-my_hdr-1: on" -hdr "X-my_hdr-2: on"
+ rxresp
+ expect resp.status == 200
+
+ txreq -req GET -url /req10 -hdr "X-my-hdr-with-trailing-underscore_: on"
+ rxresp
+ expect resp.status == 200
+} -run
+
+client c2 -connect ${h1_fe2_sock} {
+ txreq -req GET -url /req1 -hdr "X-my_hdr: on"
+ rxresp
+ expect resp.status == 200
+} -run
diff --git a/reg-tests/http-rules/strict_rw_mode.vtc b/reg-tests/http-rules/strict_rw_mode.vtc
new file mode 100644
index 0000000..14e6901
--- /dev/null
+++ b/reg-tests/http-rules/strict_rw_mode.vtc
@@ -0,0 +1,164 @@
+varnishtest "Test the strict rewriting mode"
+#REQUIRE_VERSION=2.2
+
+# This config tests the strict-mode of HTTP rules.
+
+feature ignore_unknown_macro
+
+server s1 {
+ rxreq
+ txresp \
+ -status 200
+ expect req.method == "GET"
+ expect req.url == "/req1"
+ expect req.http.x-hdr1 == "123456789012345678901234567890123456789012345678901234567890"
+
+ accept
+ rxreq
+ txresp \
+ -status 200
+ expect req.method == "GET"
+ expect req.url == "/req3"
+ expect req.http.x-hdr1 == "123456789012345678901234567890123456789012345678901234567890"
+ expect req.http.x-hdr3 == <undef>
+} -start
+
+server s2 {
+ rxreq
+ txresp \
+ -status 200 \
+ -hdr "x-req: /req1" \
+ -bodylen 2000
+ expect req.method == "GET"
+ expect req.url == "/req1"
+
+ accept
+ rxreq
+ txresp \
+ -status 200 \
+ -hdr "x-req: /req2" \
+ -bodylen 2000
+ expect req.method == "GET"
+ expect req.url == "/req2"
+
+ accept
+ rxreq
+ txresp \
+ -status 200 \
+ -hdr "x-req: /req3" \
+ -bodylen 2000
+ expect req.method == "GET"
+ expect req.url == "/req3"
+
+ accept
+ rxreq
+ txresp \
+ -status 200 \
+ -hdr "x-req: /req4" \
+ -bodylen 2000
+ expect req.method == "GET"
+ expect req.url == "/req4"
+
+} -start
+
+haproxy h1 -conf {
+ global
+ tune.bufsize 2048
+ tune.maxrewrite 128
+
+ defaults
+ mode http
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+ option http-buffer-request
+
+ frontend fe1
+ bind "fd@${fe1}"
+ http-request set-header x-hdr1 123456789012345678901234567890123456789012345678901234567890
+ http-request set-header x-hdr2 123456789012345678901234567890123456789012345678901234567890 if { path /req2 }
+ http-request strict-mode off if { path /req3 }
+ http-request set-header x-hdr3 123456789012345678901234567890123456789012345678901234567890 if { path /req3 }
+ default_backend be1
+
+ backend be1
+ http-request set-header x-hdr3 123456789012345678901234567890123456789012345678901234567890 if { path /req4 }
+ server s1 ${s1_addr}:${s1_port}
+
+ frontend fe2
+ bind "fd@${fe2}"
+ http-response set-header x-hdr4 123456789012345678901234567890123456789012345678901234567890 if { res.hdr(x-req) /req4 }
+ default_backend be2
+ backend be2
+ http-response set-header x-hdr1 123456789012345678901234567890123456789012345678901234567890
+ http-response set-header x-hdr2 123456789012345678901234567890123456789012345678901234567890 if { res.hdr(x-req) /req2 }
+ http-response strict-mode off if { res.hdr(x-req) /req3 }
+ http-response set-header x-hdr3 123456789012345678901234567890123456789012345678901234567890 if { res.hdr(-req) /req3 }
+ server s2 ${s2_addr}:${s2_port}
+
+} -start
+
+client c1r1 -connect ${h1_fe1_sock} {
+ txreq \
+ -req GET \
+ -url /req1 \
+ -bodylen 2000
+ rxresp
+ expect resp.status == 200
+} -run
+client c1r2 -connect ${h1_fe1_sock} {
+ txreq \
+ -req GET \
+ -url /req2 \
+ -bodylen 2000
+ rxresp
+ expect resp.status == 500
+} -run
+client c1r3 -connect ${h1_fe1_sock} {
+ txreq \
+ -req GET \
+ -url /req3 \
+ -bodylen 2000
+ rxresp
+ expect resp.status == 200
+} -run
+client c1r4 -connect ${h1_fe1_sock} {
+ txreq \
+ -req GET \
+ -url /req4 \
+ -bodylen 2000
+ rxresp
+ expect resp.status == 500
+} -run
+
+client c2r1 -connect ${h1_fe2_sock} {
+ txreq \
+ -req GET \
+ -url /req1
+ rxresp
+ expect resp.status == 200
+ expect resp.http.x-hdr1 == "123456789012345678901234567890123456789012345678901234567890"
+} -run
+client c2r2 -connect ${h1_fe2_sock} {
+ txreq \
+ -req GET \
+ -url /req2
+ rxresp
+ expect resp.status == 500
+} -run
+client c2r3 -connect ${h1_fe2_sock} {
+ txreq \
+ -req GET \
+ -url /req3
+ rxresp
+ expect resp.status == 200
+ expect resp.http.x-hdr1 == "123456789012345678901234567890123456789012345678901234567890"
+ expect resp.http.x-hdr3 == <undef>
+} -run
+client c2r4 -connect ${h1_fe2_sock} {
+ txreq \
+ -req GET \
+ -url /req4
+ rxresp
+ expect resp.status == 500
+} -run
diff --git a/reg-tests/http-set-timeout/set_timeout.vtc b/reg-tests/http-set-timeout/set_timeout.vtc
new file mode 100644
index 0000000..a112bc5
--- /dev/null
+++ b/reg-tests/http-set-timeout/set_timeout.vtc
@@ -0,0 +1,214 @@
+varnishtest "http-request set-timeout test"
+
+feature ignore_unknown_macro
+
+#REQUIRE_VERSION=2.4
+
+server srv_h1 -repeat 9 {
+ rxreq
+ txresp
+} -start
+
+syslog Slog1 -level info {
+ recv
+ expect ~ "^.*timeout: 5000 5000.*$"
+} -start
+
+syslog Slog2 -level info {
+ recv
+ expect ~ "^.*timeout: 5000 5000.*$"
+} -start
+
+syslog Slog3 -level info {
+ recv
+ expect ~ "^.*timeout: 5000 3000.*$"
+} -start
+
+syslog Slog4 -level info {
+ recv
+ expect ~ "^.*timeout: 5000 5000.*$"
+} -start
+
+syslog Slog5 -level info {
+ recv
+ expect ~ "^.*timeout: 5000 3000.*$"
+} -start
+
+syslog Slog6 -level info {
+ recv
+ expect ~ "^.*timeout: 5000 5000.*$"
+} -start
+
+syslog Slog7 -level info {
+ recv
+ expect ~ "^.*timeout: 5000 5000.*$"
+} -start
+
+syslog Slog8 -level info {
+ recv
+ expect ~ "^.*timeout: 5000 3000.*$"
+} -start
+
+syslog Slog9 -level info {
+ recv
+ expect ~ "^.*timeout: 5000 3000.*$"
+} -start
+
+haproxy hap -conf {
+ defaults
+ timeout connect 5s
+ timeout client 5s
+ timeout server 5s
+ log global
+
+ listen li1
+ mode http
+ bind "fd@${li1}"
+ log-format "timeout: %[be_server_timeout] %[cur_server_timeout]"
+ log ${Slog1_addr}:${Slog1_port} len 2048 local0 debug err
+ server srv_h1 ${srv_h1_addr}:${srv_h1_port}
+
+ listen li2
+ mode http
+ bind "fd@${li2}"
+ log-format "timeout: %[be_server_timeout] %[cur_server_timeout]"
+ log ${Slog2_addr}:${Slog2_port} len 2048 local0 debug err
+ http-request set-timeout server 5s
+ server srv_h1 ${srv_h1_addr}:${srv_h1_port}
+
+ listen li3
+ mode http
+ bind "fd@${li3}"
+ log-format "timeout: %[fe_client_timeout] %[cur_client_timeout]"
+ log ${Slog4_addr}:${Slog4_port} len 2048 local0 debug err
+ http-request set-timeout client 5s
+ server srv_h1 ${srv_h1_addr}:${srv_h1_port}
+
+ frontend fe1
+ mode http
+ bind "fd@${fe1}"
+ log-format "timeout: %[be_server_timeout] %[cur_server_timeout]"
+ log ${Slog3_addr}:${Slog3_port} len 2048 local0 debug err
+ default_backend be1
+
+ backend be1
+ mode http
+ http-request set-timeout server int(3),mul(1000)
+ server srv_h1 ${srv_h1_addr}:${srv_h1_port}
+
+ frontend fe2
+ mode http
+ bind "fd@${fe2}"
+ log-format "timeout: %[fe_client_timeout] %[cur_client_timeout]"
+ log ${Slog5_addr}:${Slog5_port} len 2048 local0 debug err
+ http-request set-timeout client int(3),mul(1000)
+ default_backend be2
+
+ backend be2
+ mode http
+ server srv_h1 ${srv_h1_addr}:${srv_h1_port}
+
+ listen li4
+ mode http
+ bind "fd@${li4}"
+ log-format "timeout: %[be_server_timeout] %[cur_server_timeout]"
+ log ${Slog6_addr}:${Slog6_port} len 2048 local0 debug err
+ http-response set-timeout server 5s
+ server srv_h1 ${srv_h1_addr}:${srv_h1_port}
+
+ listen li5
+ mode http
+ bind "fd@${li5}"
+ log-format "timeout: %[fe_client_timeout] %[cur_client_timeout]"
+ log ${Slog7_addr}:${Slog7_port} len 2048 local0 debug err
+ http-response set-timeout client 5s
+ server srv_h1 ${srv_h1_addr}:${srv_h1_port}
+
+ frontend fe3
+ mode http
+ bind "fd@${fe3}"
+ log-format "timeout: %[be_server_timeout] %[cur_server_timeout]"
+ log ${Slog8_addr}:${Slog8_port} len 2048 local0 debug err
+ default_backend be1
+
+ backend be3
+ mode http
+ http-response set-timeout server int(3),mul(1000)
+ server srv_h1 ${srv_h1_addr}:${srv_h1_port}
+
+ frontend fe4
+ mode http
+ bind "fd@${fe4}"
+ log-format "timeout: %[fe_client_timeout] %[cur_client_timeout]"
+ log ${Slog9_addr}:${Slog9_port} len 2048 local0 debug err
+ http-response set-timeout client int(3),mul(1000)
+ default_backend be2
+
+ backend be4
+ mode http
+ server srv_h1 ${srv_h1_addr}:${srv_h1_port}
+} -start
+
+client c1 -connect ${hap_li1_sock} {
+ txreq
+ rxresp
+ expect resp.status == 200
+} -run
+
+client c2 -connect ${hap_li2_sock} {
+ txreq
+ rxresp
+ expect resp.status == 200
+} -run
+
+client c3 -connect ${hap_fe1_sock} {
+ txreq
+ rxresp
+ expect resp.status == 200
+} -run
+
+client c4 -connect ${hap_li3_sock} {
+ txreq
+ rxresp
+ expect resp.status == 200
+} -run
+
+client c5 -connect ${hap_fe2_sock} {
+ txreq
+ rxresp
+ expect resp.status == 200
+} -run
+
+client c6 -connect ${hap_li4_sock} {
+ txreq
+ rxresp
+ expect resp.status == 200
+} -run
+
+client c7 -connect ${hap_fe3_sock} {
+ txreq
+ rxresp
+ expect resp.status == 200
+} -run
+
+client c8 -connect ${hap_li5_sock} {
+ txreq
+ rxresp
+ expect resp.status == 200
+} -run
+
+client c9 -connect ${hap_fe4_sock} {
+ txreq
+ rxresp
+ expect resp.status == 200
+} -run
+
+syslog Slog1 -wait
+syslog Slog2 -wait
+syslog Slog3 -wait
+syslog Slog4 -wait
+syslog Slog5 -wait
+syslog Slog6 -wait
+syslog Slog7 -wait
+syslog Slog8 -wait
+syslog Slog9 -wait
diff --git a/reg-tests/jwt/build_token.py b/reg-tests/jwt/build_token.py
new file mode 100755
index 0000000..2f368ab
--- /dev/null
+++ b/reg-tests/jwt/build_token.py
@@ -0,0 +1,22 @@
+#!/usr/bin/python
+
+# JWT package can be installed via 'pip install pyjwt' command
+
+import sys
+import jwt
+import json
+
+if len(sys.argv) != 4:
+ print(sys.argv[0],"<alg> <json_to_sign> <priv_key>")
+ quit()
+
+
+alg=sys.argv[1]
+json_to_sign=sys.argv[2]
+priv_key_file=sys.argv[3]
+
+with open(priv_key_file) as file:
+ priv_key = file.read()
+
+print(jwt.encode(json.loads(json_to_sign),priv_key,algorithm=alg))
+
diff --git a/reg-tests/jwt/es256-public.pem b/reg-tests/jwt/es256-public.pem
new file mode 100644
index 0000000..ac69e6d
--- /dev/null
+++ b/reg-tests/jwt/es256-public.pem
@@ -0,0 +1,4 @@
+-----BEGIN PUBLIC KEY-----
+MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEjq7vv/FURryqr7ukvkrn1ek5rjCM
+hOngjD17uQTZN7fo1QRIV18lPx5O2Ed5ok/j8j/hZaFOB6TNshNmthk3dA==
+-----END PUBLIC KEY-----
diff --git a/reg-tests/jwt/es384-public.pem b/reg-tests/jwt/es384-public.pem
new file mode 100644
index 0000000..b726e12
--- /dev/null
+++ b/reg-tests/jwt/es384-public.pem
@@ -0,0 +1,5 @@
+-----BEGIN PUBLIC KEY-----
+MHYwEAYHKoZIzj0CAQYFK4EEACIDYgAEm1LU84aybo84c3LWQtaILtzzQsU9sT1b
+uda6u6NBJ9FrVEAQkk5tABimCcn60bxSe7s1+oM8xLsu2RuGibQzbTuL75pEs5kx
+HPQW4nmOz0zXCjvAvtQTA7vMirb/Oste
+-----END PUBLIC KEY-----
diff --git a/reg-tests/jwt/es512-public.pem b/reg-tests/jwt/es512-public.pem
new file mode 100644
index 0000000..46520ac
--- /dev/null
+++ b/reg-tests/jwt/es512-public.pem
@@ -0,0 +1,6 @@
+-----BEGIN PUBLIC KEY-----
+MIGbMBAGByqGSM49AgEGBSuBBAAjA4GGAAQAMJ5MagTv8l+AjWZLLJ+xxV9/iNhb
+xE52xa8uMCiuBM5VHPcBLEPi1haY17abA6j0F173bK/AN7MBOpT4pAFP07IAEpF7
+QWzw+YH7hrWcT66gzfPysgpzktY+xpMFYhmLH1h9DGiJE+5t5FF5+mCg4GXi1Aez
+UzHc9yLw+6meeTWKcv4=
+-----END PUBLIC KEY-----
diff --git a/reg-tests/jwt/jws_verify.vtc b/reg-tests/jwt/jws_verify.vtc
new file mode 100644
index 0000000..43d37c7
--- /dev/null
+++ b/reg-tests/jwt/jws_verify.vtc
@@ -0,0 +1,418 @@
+#REGTEST_TYPE=devel
+
+# This reg-test uses the JSON Web Token (JWT) converters to verify a token's signature.
+# It uses the http_auth_bearer sample fetch to fetch a token contained in an
+# HTTP Authorization header (with the Bearer scheme) which is the common way of
+# transmitting a token (see RFC6750). It then uses the jwt_header_query
+# converter to get the "alg" field declared in the token's JOSE header and
+# gives it to the jwt_verify converter with the appropriate certificate.
+#
+# All the supported algorithms are tested at least once (HMAC, RSA and ECDSA)
+# and the errors codes returned by jwt_verify are tested as well.
+
+varnishtest "Test the 'set ssl ca-file' feature of the CLI"
+feature cmd "$HAPROXY_PROGRAM -cc 'version_atleast(2.5-dev0)'"
+feature cmd "$HAPROXY_PROGRAM -cc 'feature(OPENSSL)'"
+feature cmd "command -v socat"
+feature ignore_unknown_macro
+
+server s1 -repeat 24 {
+ rxreq
+ txresp
+} -start
+
+haproxy h1 -conf {
+ global
+ tune.ssl.default-dh-param 2048
+ tune.ssl.capture-buffer-size 1
+ stats socket "${tmpdir}/h1/stats" level admin
+
+ defaults
+ mode http
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ listen main-fe
+ bind "fd@${mainfe}"
+
+ use_backend hsXXX_be if { path_beg /hs }
+ use_backend rsXXX_be if { path_beg /rs }
+ use_backend esXXX_be if { path_beg /es }
+ use_backend psXXX_be if { path_beg /ps }
+ use_backend auth_bearer_be if { path /auth_bearer }
+ default_backend dflt_be
+
+
+ backend hsXXX_be
+ http-request set-var(txn.bearer) http_auth_bearer
+ http-request set-var(txn.jwt_alg) var(txn.bearer),jwt_header_query('$.alg')
+
+ http-request deny unless { var(txn.jwt_alg) -m beg "HS" }
+
+ http-response set-header x-jwt-token %[var(txn.bearer)]
+ http-response set-header x-jwt-alg %[var(txn.jwt_alg)]
+
+ http-response set-header x-jwt-verify-HS256 %[var(txn.bearer),jwt_verify(txn.jwt_alg,"hmac key hs256")] if { var(txn.jwt_alg) -m str "HS256" }
+ http-response set-header x-jwt-verify-HS384 %[var(txn.bearer),jwt_verify(txn.jwt_alg,"hmac key hs384")] if { var(txn.jwt_alg) -m str "HS384" }
+ http-response set-header x-jwt-verify-HS512 %[var(txn.bearer),jwt_verify(txn.jwt_alg,"hmac key hs512")] if { var(txn.jwt_alg) -m str "HS512" }
+ server s1 ${s1_addr}:${s1_port}
+
+ backend rsXXX_be
+ http-request set-var(txn.bearer) http_auth_bearer
+ http-request set-var(txn.jwt_alg) var(txn.bearer),jwt_header_query('$.alg')
+
+ http-request deny unless { var(txn.jwt_alg) -m beg "RS" }
+
+ http-response set-header x-jwt-token %[var(txn.bearer)]
+ http-response set-header x-jwt-alg %[var(txn.jwt_alg)]
+
+ http-response set-header x-jwt-verify-RS256 %[var(txn.bearer),jwt_verify(txn.jwt_alg,"${testdir}/rsa-public.pem")] if { var(txn.jwt_alg) -m str "RS256" }
+ http-response set-header x-jwt-verify-RS384 %[var(txn.bearer),jwt_verify(txn.jwt_alg,"${testdir}/rsa-public.pem")] if { var(txn.jwt_alg) -m str "RS384" }
+ http-response set-header x-jwt-verify-RS512 %[var(txn.bearer),jwt_verify(txn.jwt_alg,"${testdir}/rsa-public.pem")] if { var(txn.jwt_alg) -m str "RS512" }
+ server s1 ${s1_addr}:${s1_port}
+
+ backend esXXX_be
+ http-request set-var(txn.bearer) http_auth_bearer
+ http-request set-var(txn.jwt_alg) var(txn.bearer),jwt_header_query('$.alg')
+
+ http-request deny unless { var(txn.jwt_alg) -m beg "ES" }
+
+ http-response set-header x-jwt-token %[var(txn.bearer)]
+ http-response set-header x-jwt-alg %[var(txn.jwt_alg)]
+
+ http-response set-header x-jwt-verify-ES256 %[var(txn.bearer),jwt_verify(txn.jwt_alg,"${testdir}/es256-public.pem")] if { var(txn.jwt_alg) -m str "ES256" }
+ http-response set-header x-jwt-verify-ES384 %[var(txn.bearer),jwt_verify(txn.jwt_alg,"${testdir}/es384-public.pem")] if { var(txn.jwt_alg) -m str "ES384" }
+ http-response set-header x-jwt-verify-ES512 %[var(txn.bearer),jwt_verify(txn.jwt_alg,"${testdir}/es512-public.pem")] if { var(txn.jwt_alg) -m str "ES512" }
+ server s1 ${s1_addr}:${s1_port}
+
+ backend psXXX_be
+ http-request set-var(txn.bearer) http_auth_bearer
+ http-request set-var(txn.jwt_alg) var(txn.bearer),jwt_header_query('$.alg')
+
+ http-request deny unless { var(txn.jwt_alg) -m beg "PS" }
+
+ http-response set-header x-jwt-token %[var(txn.bearer)]
+ http-response set-header x-jwt-alg %[var(txn.jwt_alg)]
+
+ http-response set-header x-jwt-verify-PS256 %[var(txn.bearer),jwt_verify(txn.jwt_alg,"${testdir}/rsa-public.pem")] if { var(txn.jwt_alg) -m str "PS256" }
+ http-response set-header x-jwt-verify-PS384 %[var(txn.bearer),jwt_verify(txn.jwt_alg,"${testdir}/rsa-public.pem")] if { var(txn.jwt_alg) -m str "PS384" }
+ http-response set-header x-jwt-verify-PS512 %[var(txn.bearer),jwt_verify(txn.jwt_alg,"${testdir}/rsa-public.pem")] if { var(txn.jwt_alg) -m str "PS512" }
+ server s1 ${s1_addr}:${s1_port}
+
+
+ # This backend will only be used to test the http_auth_bearer sample fetch.
+ # No jwt_verify will then be performed.
+ backend auth_bearer_be
+ http-request set-var(txn.bearer) http_auth_bearer("Custom-Authorization")
+
+ http-response set-header x-jwt-token %[var(txn.bearer)]
+
+ server s1 ${s1_addr}:${s1_port}
+
+ # This backend will mostly be used to test error cases (invalid tokens, algorithm and so on)
+ backend dflt_be
+ http-request set-var(txn.bearer) http_auth_bearer
+ http-request set-var(txn.jwt_alg) var(txn.bearer),jwt_header_query('$.alg')
+
+ http-request set-var(txn.jwt_verify) var(txn.bearer),jwt_verify(txn.jwt_alg,"unknown_cert.pem")
+
+ http-response set-header x-jwt-token %[var(txn.bearer)]
+ http-response set-header x-jwt-alg %[var(txn.jwt_alg)]
+ http-response set-header x-jwt-verify %[var(txn.jwt_verify)]
+
+ server s1 ${s1_addr}:${s1_port}
+
+} -start
+
+
+client c1 -connect ${h1_mainfe_sock} {
+ # Token content : {"alg":"HS256","typ":"JWT"}
+ # {"sub":"1234567890","name":"John Doe","iat":1516239022}
+ # HMAC key : 'hmac key hs256'
+ # OpenSSL cmd : openssl dgst -sha256 -mac HMAC -macopt key:'hmac key hs256' data.txt | base64 | tr -d '=\n' | tr '/+' '_-'
+
+ txreq -url "/hs256" -hdr "Authorization: Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiaWF0IjoxNTE2MjM5MDIyfQ.hhj1mbYgezxFoYwinThsZQbckYHt4jJlRoQ7W8ksrFM"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.x-jwt-alg == "HS256"
+ expect resp.http.x-jwt-verify-HS256 == "1"
+} -run
+
+client c2 -connect ${h1_mainfe_sock} {
+ # Token content : {"alg":"HS384","typ":"JWT"}
+ # {"sub":"1234567890","name":"John Doe","iat":1516239022}
+ # HMAC key : 'hmac key hs384'
+ # OpenSSL cmd : openssl dgst -sha384 -mac HMAC -macopt key:'hmac key hs384' data.txt | base64 | tr -d '=\n' | tr '/+' '_-'
+
+ txreq -url "/hs384" -hdr "Authorization: Bearer eyJhbGciOiJIUzM4NCIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiaWF0IjoxNTE2MjM5MDIyfQ.3EsbLfl6DDh5nZMkLWg3ssCurFHyOhXP28a4PDS48aPAIoYLzHchtXmNaYI8He-R"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.x-jwt-alg == "HS384"
+ expect resp.http.x-jwt-verify-HS384 == "1"
+} -run
+
+client c3 -connect ${h1_mainfe_sock} {
+ # Token content : {"alg":"HS512","typ":"JWT"}
+ # {"sub":"1234567890","name":"John Doe","iat":1516239022}
+ # HMAC key : 'hmac key hs512'
+ # OpenSSL cmd : openssl dgst -sha512 -mac HMAC -macopt key:'hmac key hs512' data.txt | base64 | tr -d '=\n' | tr '/+' '_-'
+
+ txreq -url "/hs512" -hdr "Authorization: Bearer eyJhbGciOiJIUzUxMiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiaWF0IjoxNTE2MjM5MDIyfQ.K4Yze5N7jeJrDbJymphaH1YsFlYph5F-U75HzBRKDybrN7WBO494EgNG77mAQj4CVci_xbTD_IsqY2umO0f47A"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.x-jwt-alg == "HS512"
+ expect resp.http.x-jwt-verify-HS512 == "1"
+} -run
+
+# The following token is invalid (it has three extra characters at the end of the signature)
+client c4 -connect ${h1_mainfe_sock} {
+ # Token content : {"alg":"HS512","typ":"JWT"}
+ # {"sub":"1234567890","name":"John Doe","iat":1516239022}
+ # HMAC key : 'hmac key hs512'
+ # OpenSSL cmd : openssl dgst -sha512 -mac HMAC -macopt key:'hmac key hs512' data.txt | base64 | tr -d '=\n' | tr '/+' '_-'
+
+ txreq -url "/hs512" -hdr "Authorization: Bearer eyJhbGciOiJIUzUxMiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiaWF0IjoxNTE2MjM5MDIyfQ.K4Yze5N7jeJrDbJymphaH1YsFlYph5F-U75HzBRKDybrN7WBO494EgNG77mAQj4CVci_xbTD_IsqY2umO0f47AAAA"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.x-jwt-alg == "HS512"
+ expect resp.http.x-jwt-verify-HS512 == "-3"
+} -run
+
+
+client c5 -connect ${h1_mainfe_sock} {
+ # Token content : {"alg":"RS256","typ":"JWT"}
+ # {"sub":"1234567890","name":"John Doe","iat":1516239022}
+ # OpenSSL cmd : openssl dgst -sha256 -sign rsa-private.pem data.txt | base64 | tr -d '=\n' | tr '/+' '_-'
+
+ txreq -url "/rs256" -hdr "Authorization: Bearer eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiaWF0IjoxNTE2MjM5MDIyfQ.hRqFM87JzV_YinYhdERp2E9BLhl6s7I5J37GTXAeT5fixJx-OCjTFvwKssyVo7fWAFcQMdQU7vGEXDOiWbNaMUFGIsMxx0Uflk0BeNwk6pWvNGk8KZGMtiqOv-IuPdAiaSW_xhxLHIk7eOwVefvBfk8j2hgU9yoHN87AYnl8oEnzrkzwWvEt-x-P2zB4s_VwhF0gbL1G4FsP5hxWL1HWmSFLBpvWaL5Lx3OJE7mLRLRf8TpMwEe4ROakzMpiv9Xk1H3mZth6d2a91F5Bm65MIJpJ7P2kEL3tdS62VRx8DM_SlsFuWcsqryO3CDQquMbwzAvfRgLPy8PBLRLT64wM3mZtue5GI2KUlqSYsSwKwK580b4drosLvAS75l_4jJwdwuQEvVd8Gry3DWS2mKJSMefmGfD-cdty1vvszs5sUa96Gf7Ro5DvkgXtVCKYk8KJLI62YgZd5S3M0ucP5NLBc_flUi4A2B_aSkd7NDM0ELddk0y48pcF95tejcvliGIy1GRRwevdqensXXQrFweFSZVvuKo8c9pcCBVfKTSllgL0lFGyI_vz6dUYt69I1gqWBDeGcA2XQUBJqfX3o9nkhZspA7b7QxMESatoATsM_XmfhbwsyY-sTq25XIGC4awaZHViZr1YFVD6BwNZWBCEBvW5zObiD5h5A5AgWoBv14E"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.x-jwt-alg == "RS256"
+ expect resp.http.x-jwt-verify-RS256 == "1"
+} -run
+
+client c6 -connect ${h1_mainfe_sock} {
+ # Token content : {"alg":"RS384","typ":"JWT"}
+ # {"sub":"1234567890","name":"John Doe","iat":1516239022}
+ # OpenSSL cmd : openssl dgst -sha384 -sign rsa-private.pem data.txt | base64 | tr -d '=\n' | tr '/+' '_-'
+
+ txreq -url "/rs384" -hdr "Authorization: Bearer eyJhbGciOiJSUzM4NCIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiaWF0IjoxNTE2MjM5MDIyfQ.GuR-v91EMCVvvTTLiE56O0oDAKeQ5JdLqvHtrgOp2MbUtF7zCDutV0LTmMo4qDNVpvTnD3GZfTTGaVUTvW7kIQ3_1iEVAg61qVWkT9rtHHxifDX70RDBKkvNcMWyQH-dFP_FUvCmhCu7q-AzgBT6PHvs5ZqYyQvlQ1gSWZEPFi184dhvcUQrQC6CySEAdOzIryIHH2oQjN_a9lA9V9M_CH3P-AAwFE7NwUE1H1SGIYM4NHcngEZ3B4lBCHOhhgQMpfagcxQjjXv7VfeSqza6OZDpupwlOl34bb0gnFDGMh4hHSS6iHvvwCeCkclbyvKV0Vq0MaRtJuoKRF-_Oww-nKT_bfNtbF6MeOQLNRlYjGCHerWoBtjv3w2KjoLvQ5iGIFI3cEguyrrKNimpovF4Y5uINH0pWdRF99zOwVUlcJBk3RivIb--Y6s47aNFIVWimUpSn-8MSHTla20TYbcdVaZaMur09Cw500jPrOy6jFqVydSnmU6r13NkmCD5-Bl0mgwGtpZcOQExrnIcPQky12kQJAIrffVblvtkd-8FIBPBy1uBKCgkE-q9_suEvBTdvaoTocBmPcIxfPjZUVXeU3UmnRrXEz17pue0YfrwK9CUR9UoP0F5C7O5eSbAtZNm4Hpkiah0w7qugWG3esMgku3-xx0B2xwg6Ul7bAgEJFg"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.x-jwt-alg == "RS384"
+ expect resp.http.x-jwt-verify-RS384 == "1"
+} -run
+
+client c7 -connect ${h1_mainfe_sock} {
+ # Token content : {"alg":"RS512","typ":"JWT"}
+ # {"sub":"1234567890","name":"John Doe","iat":1516239022}
+ # OpenSSL cmd : openssl dgst -sha512 -sign rsa-private.pem data.txt | base64 | tr -d '=\n' | tr '/+' '_-'
+
+ txreq -url "/rs512" -hdr "Authorization: Bearer eyJhbGciOiJSUzUxMiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiaWF0IjoxNTE2MjM5MDIyfQ.dgUDvxbWXV-q9lVFDVDt6zffrAjCMkKL7UURz-vvc6utCNMEgt8jSkDWi-mt-jmttkD5mwHqUf3HxWPhfjYNmkTok_XL79F5RXhiF_cu_2oDLDc-RuXdrHaRt9xjUIyZhVJMhaMLdmpcAokQlZxc2W6aj92HKzk3EjyHwfdwfKQNgMooXNzxjE9vCHUbahyLZvtPwiqDtYUSnvN_XOpAMUilxByJStwNqdB7MaOxeAzn76nITh6DqD1bNtxBiLzA7MxYdfsUSmXHMLpkWNAhlrcEIJui9PKm9E0OLFD3M7cCqi6rVvzDxvHqXz3-fcXiSJSRrSmSTu1_ok35TT4WwA9SkHpGe2MJ3uc-8CRlYmjDTcLyXWs_d8i3iNozo6xgiwqIkty4HqScTjhXndRQdmiK-RcUfNLM0Iqm6wYgOifWj728_9GCtdjup-C2uVPdwVwuOjwLbzctZLlFqH3i5IGrCfuOOCAcc_vN3REFqSrDEi4-9qpXuh7yk5pOaiCZYr3-uVhmY5neo55_eV8N3NooDyztwkzRtB_DdbaNrqxk3WEHU79Hseg7c1mkXGm6Djqt3dkkrdpbltzRLrnGKxA4-FzccKOT_P27UYmxQSkyfpAQhfH3jpOE0n9-UYyULbMOY7ZIypXUTquJnrZM3rD_NypU7Jg8uBBGqcziZFc"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.x-jwt-alg == "RS512"
+ expect resp.http.x-jwt-verify-RS512 == "1"
+} -run
+
+# The following token is invalid (the signature used SHA384 instead of SHA512)
+client c8 -connect ${h1_mainfe_sock} {
+ # Token content : {"alg":"RS512","typ":"JWT"}
+ # {"sub":"1234567890","name":"John Doe","iat":1516239022}
+ # OpenSSL cmd : openssl dgst -sha512 -sign rsa-private.pem data.txt | base64 | tr -d '=\n' | tr '/+' '_-'
+
+ txreq -url "/rs512" -hdr "Authorization: Bearer eyJhbGciOiJSUzUxMiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiaWF0IjoxNTE2MjM5MDIyfQ.GuR-v91EMCVvvTTLiE56O0oDAKeQ5JdLqvHtrgOp2MbUtF7zCDutV0LTmMo4qDNVpvTnD3GZfTTGaVUTvW7kIQ3_1iEVAg61qVWkT9rtHHxifDX70RDBKkvNcMWyQH-dFP_FUvCmhCu7q-AzgBT6PHvs5ZqYyQvlQ1gSWZEPFi184dhvcUQrQC6CySEAdOzIryIHH2oQjN_a9lA9V9M_CH3P-AAwFE7NwUE1H1SGIYM4NHcngEZ3B4lBCHOhhgQMpfagcxQjjXv7VfeSqza6OZDpupwlOl34bb0gnFDGMh4hHSS6iHvvwCeCkclbyvKV0Vq0MaRtJuoKRF-_Oww-nKT_bfNtbF6MeOQLNRlYjGCHerWoBtjv3w2KjoLvQ5iGIFI3cEguyrrKNimpovF4Y5uINH0pWdRF99zOwVUlcJBk3RivIb--Y6s47aNFIVWimUpSn-8MSHTla20TYbcdVaZaMur09Cw500jPrOy6jFqVydSnmU6r13NkmCD5-Bl0mgwGtpZcOQExrnIcPQky12kQJAIrffVblvtkd-8FIBPBy1uBKCgkE-q9_suEvBTdvaoTocBmPcIxfPjZUVXeU3UmnRrXEz17pue0YfrwK9CUR9UoP0F5C7O5eSbAtZNm4Hpkiah0w7qugWG3esMgku3-xx0B2xwg6Ul7bAgEJFg"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.x-jwt-alg == "RS512"
+ expect resp.http.x-jwt-verify-RS512 == "0"
+} -run
+
+
+
+client c9 -connect ${h1_mainfe_sock} {
+ # Token content : {"alg":"ES256","typ":"JWT"}
+ # {"sub":"1234567890","name":"John Doe","iat":1516239022}
+ # Key gen process : openssl genpkey -algorithm EC -pkeyopt ec_paramgen_curve:P-256 -out es256-private.pem; openssl ec -in es256-private.pem -pubout -out es256-public.pem
+ # Token creation : ./build_token.py ES256 '{"sub":"1234567890","name":"John Doe","iat":1516239022}' es256-private.pem
+
+ txreq -url "/es256" -hdr "Authorization: Bearer eyJ0eXAiOiJKV1QiLCJhbGciOiJFUzI1NiJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiaWF0IjoxNTE2MjM5MDIyfQ.pNI_c5mHE3mLV0YDpstlP4l3t5XARLl6OmcKLuvF5r60m-C63mbgfKWdPjmJPMTCmX_y50YW_v2SKw0ju0tJHw"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.x-jwt-alg == "ES256"
+ expect resp.http.x-jwt-verify-ES256 == "1"
+} -run
+
+client c10 -connect ${h1_mainfe_sock} {
+ # Token content : {"alg":"ES384","typ":"JWT"}
+ # {"sub":"1234567890","name":"John Doe","iat":1516239022}
+ # Key gen process : openssl genpkey -algorithm EC -pkeyopt ec_paramgen_curve:P-384 -out es384-private.pem; openssl ec -in es384-private.pem -pubout -out es384-public.pem
+ # Token creation : ./build_token.py ES384 '{"sub":"1234567890","name":"John Doe","iat":1516239022}' es384-private.pem
+
+ txreq -url "/es384" -hdr "Authorization: Bearer eyJ0eXAiOiJKV1QiLCJhbGciOiJFUzM4NCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiaWF0IjoxNTE2MjM5MDIyfQ.cs59CQiCI_Pl8J-PKQ2y73L5IJascZXkf7MfRXycO1HkT9pqDW2bFr1bh7pFyPA85GaML4BPYVH_zDhcmjSMn_EIvUV8cPDuuUu69Au7n9LYGVkVJ-k7qN4DAR5eLCiU"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.x-jwt-alg == "ES384"
+ expect resp.http.x-jwt-verify-ES384 == "1"
+} -run
+
+client c11 -connect ${h1_mainfe_sock} {
+ # Token content : {"alg":"ES512","typ":"JWT"}
+ # {"sub":"1234567890","name":"John Doe","iat":1516239022}
+ # Key gen process : openssl genpkey -algorithm EC -pkeyopt ec_paramgen_curve:P-521 -out es512-private.pem; openssl ec -in es512-private.pem -pubout -out es512-public.pem
+ # Token creation : ./build_token.py ES512 '{"sub":"1234567890","name":"John Doe","iat":1516239022}' es512-private.pem
+
+ txreq -url "/es512" -hdr "Authorization: Bearer eyJ0eXAiOiJKV1QiLCJhbGciOiJFUzUxMiJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiaWF0IjoxNTE2MjM5MDIyfQ.AJcyt0OYf2wg7SggJJVKYysLUkBQA0f0Zc0EbKgud2fQLeT65n42A9l9hhGje79VLWhEyisQmDpFXTpfFXeD_NiaAXyNnX5b8TbZALqxbjx8iIpbcObgUh_g5Gi81bKmRmfXUHW7L5iAwoNjYbUpXGipCpCD0N6-8zCrjcFD2UX01f0Y"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.x-jwt-alg == "ES512"
+ expect resp.http.x-jwt-verify-ES512 == "1"
+} -run
+
+
+
+client c12 -connect ${h1_mainfe_sock} {
+ # Token content : {"alg":"PS256","typ":"JWT"}
+ # {"sub":"1234567890","name":"John Doe","iat":1516239022}
+ # Token creation : ./build_token.py PS256 '{"sub":"1234567890","name":"John Doe","iat":1516239022}' rsa-private.pem
+ txreq -url "/ps256" -hdr "Authorization: Bearer eyJ0eXAiOiJKV1QiLCJhbGciOiJQUzI1NiJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiaWF0IjoxNTE2MjM5MDIyfQ.eXzN8m41ejgmbtJPhgifU_jMuYwVXL3HsLMOJ_ERipNcuqVQUmfHib1MWydSOYkgNBIm2lK9LjUmRCs1LvMUsbcqnokebFUNmO6IFdbMj3kD4cvqXHyK0yogQ7fdtJZf3_ukcJQ_-IdCG6mwowq6-OLjv-v2EflwPsT33EGmEDuE-7Z8AVTOVPiKUrqq1KqBi7NnnzdghqKfXn4b0yT7CnxQ_GK4F-ghBxBiMXK2J8M6pvS1vof7PyzVQmpeNzn2Rpbk-Ez88WeoTQXqZL1_BeW0z8FeyWXoIiqAzluRHSfZf2iUwrHuiH-tZ5BkAsJXHMDhMoL8_TKdD2hAnCWdVA9W9bQpzfaCbF5xv8lkGcy01ekrh-rN6ZOjItYeDj3BuaQgrKa5YAs_Grei_iSLqAu_YmDiVJxBfv5ahe1I8rwBQ7lIsZqv6p8BKqBFNylLzIFioAtmHJBF0HtItLoj0Mp_bUuU6RLIwf7C8ZWPQVTVsTgHMAlnZLNnQ3vhcxCjLm-r45M3AUFQfMEy1ajiqpFb3z2ElEwiOS9uLYJs3AOAoJDc-e62VJ7tRlw7KB-Vw0mvztvXgYdit48KOxdbn15HQ0lbBM_jJHvbYjDFC0iGUaizBPqmOJcTvObvKv5itEhPT6ffsv9XBnRSv9f3kW_rI7chrCyRZc0nFUvEJ9o"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.x-jwt-alg == "PS256"
+ expect resp.http.x-jwt-verify-PS256 == "1"
+} -run
+
+client c13 -connect ${h1_mainfe_sock} {
+ # Token content : {"alg":"PS384","typ":"JWT"}
+ # {"sub":"1234567890","name":"John Doe","iat":1516239022}
+ # Token creation : ./build_token.py PS384 '{"sub":"1234567890","name":"John Doe","iat":1516239022}' rsa-private.pem
+ txreq -url "/ps384" -hdr "Authorization: Bearer eyJ0eXAiOiJKV1QiLCJhbGciOiJQUzM4NCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiaWF0IjoxNTE2MjM5MDIyfQ.f-il5pRvC_vYuJ5jI-q9zxgqStCzvICKJyJEmVSjK47uLtt24SNLsQ1V24gqGuDOkXAhxlVu9rUwvtbzOQbF6N1YFFKbCuJ7zbGG81j5r3IuFl_5y6v077PW3hSNn62WX1GDv8w_kGedAZqGwKhJR7D1CbPBE5v-b4PskVF1V7IrFx8PufS_LUeJq1Etei0iU7H9OWD0yVApE_nmeELy4Kz1cc1fQZTBzd-b6kB562JbUbENM14HoiuKpnZvDtQks93A7y_B14SZPrxDaiVI-fR1n8Ja10wyBqbw8mWzt4s7vkxQI8U0eTBcj6bpWcm6S947G_jjoum_Lu3ZSKXE4UxcZ2IIuM74PEUgWJUjr4f9klB8kplJS5AIXMUNG6QbgZhOdSfZmlfzZUmSt1CLI22rTadXjvn-5CG_VxWJUjcPF9hViFFKQ7qQw3Tcn73ZKf5toK3imQBay4vR11DYWP5flLscFtqPvFcV4qhNarG-kVTI2xO8wXDXEoKeIXvsr3GTmCmBbs-kxvtyI80GUTzKN2I9vp0W9Qo5GNa3DDU1-io3olVwtMFh_0qfhmdO1Rt-j11gGnYTz3S5zMMMG2Ihy8ho3ayNZlZf7MJvVBSPqbCpHdiRa8VgTyYdYvK81lgkSc3wE8CygFEBMEi9b181OKPODlpux6k-3AL_2Hs"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.x-jwt-alg == "PS384"
+ expect resp.http.x-jwt-verify-PS384 == "1"
+} -run
+
+client c14 -connect ${h1_mainfe_sock} {
+ # Token content : {"alg":"PS512","typ":"JWT"}
+ # {"sub":"1234567890","name":"John Doe","iat":1516239022}
+ # Token creation : ./build_token.py PS512 '{"sub":"1234567890","name":"John Doe","iat":1516239022}' rsa-private.pem
+ txreq -url "/ps512" -hdr "Authorization: Bearer eyJ0eXAiOiJKV1QiLCJhbGciOiJQUzUxMiJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiaWF0IjoxNTE2MjM5MDIyfQ.tJZQn0ksGx7vFpBzhNkP8vupyRiAAy5Rf6UdR2MEnO6-iwisbXOUrwwh8XQWngEe2O5FJabCxJRI_shSVEUuWY2Vz6kvRAQ6sWv_4uoPTUk9zjSXkS6C_nb_UY_6tUz39qA-OI80JKcLadvjB66CGWHI00C5Xz2gyWQuFgSItBIV6l0wI6Spf4NJa2Lefo7XbobQ7-u-yzgbIJ1BgXFOTWHYsgJ67n39gj7MDDsUjSaNbFlKfbvGJrdli5_PNNSdoNiF0pdsd6vldnucs5Rfysp4V-nbBzrORuJhl0_BlPG7_Wbap0sm6NCnzp1ks3D5_OWLZxJZNw_TJ2OuVHOX2PNj2MuHjMPDMKKxgxIXQJ8ry39-sk56ZrCJ8UqZofk8NX7Z4ypeWrK62BNSTLY8Le4WzF6dYcuawxiyt7xsC0MkaplXpRFLdmHrMhvyZz6S8BFhtlGD-PnRnEr8qZkThiZSs5kcEW8ryneKlN5TQ7E0H1HekUUii3_T9MtC5rNsT1vzyGr0XAn5TLxeal4Gvp3WyOHs4l7Q1EyQXPkAX8bWwODtLZ3DrREwdLb7Ex2k9wRDF52aww9EMpeLM3at6MQKggWQhNEClahN9AWBj7Vz-RqliWEIdUdNTL3d1JgLX41GZqXjOGZIwiVJwYpVRh1jKVhUn8pN8jCtoeiUxh8"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.x-jwt-alg == "PS512"
+ expect resp.http.x-jwt-verify-PS512 == "1"
+} -run
+
+
+
+# The following token is invalid (too short)
+client c15 -connect ${h1_mainfe_sock} {
+ # Token content : {"alg":"ES512","typ":"JWT"}
+ # {"sub":"1234567890","name":"John Doe","iat":1516239022}
+ # OpenSSL cmd : openssl dgst -sha512 -sign es512-private.pem data.txt | base64 | tr -d '=\n' | tr '/+' '_-'
+
+ txreq -url "/es512" -hdr "Authorization: Bearer eyJhbGciOiJFUzUxMiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiaWF0IjoxNTE2MjM5MDIyfQ.MIGHAkEEPEgIrFKIDofBpFKX_mtya55QboGr09P6--v8uO85DwQWR0iKgMNSzYkL3K1lwyExG0Vtwfnife0lNe7Fn5TigAJCAY95NShiTn3tvleXVGCkkD0-HcribnMhd34QPGRc4rlwTkUg9umIUhxnEhPR--OohlmhJyIYGHuH8Ksm5f"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.x-jwt-alg == "ES512"
+ # Invalid token
+ expect resp.http.x-jwt-verify-ES512 == "-3"
+} -run
+
+# Unknown algorithm
+client c16 -connect ${h1_mainfe_sock} {
+ # Token content : {"alg":"UNKNOWN_ALG","typ":"JWT"}
+ # {"sub":"1234567890","name":"John Doe","iat":1516239022}
+ txreq -url "/errors" -hdr "Authorization: Bearer eyJhbGciOiJVTktOT1dOX0FMRyIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiaWF0IjoxNTE2MjM5MDIyfQ.MIGHAkEEPEgIrFKIDofBpFKX_mtya55QboGr09P6--v8uO85DwQWR0iKgMNSzYkL3K1lwyExG0Vtwfnife0lNe7Fn5TigAJCAY95NShiTn3tvleXVGCkkD0-HcribnMhd34QPGRc4rlwTkUg9umIUhxnEhPR--OohlmhJyIYGHuH8Ksm5f"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.x-jwt-alg == "UNKNOWN_ALG"
+ # Unmanaged algorithm
+ expect resp.http.x-jwt-verify == "-1"
+} -run
+
+# Invalid token (not enough fields)
+client c17 -connect ${h1_mainfe_sock} {
+ # Token content : {"alg":"ES512","typ":"JWT"}
+ # {"sub":"1234567890","name":"John Doe","iat":1516239022}
+ txreq -url "/errors" -hdr "Authorization: Bearer eyJhbGciOiJFUzUxMiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiaWF0IjoxNTE2MjM5MDIyfQ"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.x-jwt-alg == "ES512"
+ # Invalid token
+ expect resp.http.x-jwt-verify == "-3"
+} -run
+
+# Invalid token (too many fields)
+client c18 -connect ${h1_mainfe_sock} {
+ # Token content : {"alg":"ES512","typ":"JWT"}
+ # {"sub":"1234567890","name":"John Doe","iat":1516239022}
+ txreq -url "/errors" -hdr "Authorization: Bearer eyJhbGciOiJFUzUxMiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiaWF0IjoxNTE2MjM5MDIyfQ.MIGHAkEEPEgIrFKIDofBpFKX_mtya55QboGr09P6--v8uO85DwQWR0iKgMNSzYkL3K1lwyExG0Vtwfnife0lNe7Fn5TigAJCAY95NShiTn3tvleXVGCkkD0-HcribnMhd34QPGRc4rlwTkUg9umIUhxnEhPR--OohlmhJyIYGHuH8Ksm5f.unexpectedextrafield"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.x-jwt-alg == "ES512"
+ # Invalid token
+ expect resp.http.x-jwt-verify == "-3"
+} -run
+
+# Invalid token (empty signature)
+client c19 -connect ${h1_mainfe_sock} {
+ # Token content : {"alg":"ES512","typ":"JWT"}
+ # {"sub":"1234567890","name":"John Doe","iat":1516239022}
+ txreq -url "/errors" -hdr "Authorization: Bearer eyJhbGciOiJFUzUxMiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiaWF0IjoxNTE2MjM5MDIyfQ."
+ rxresp
+ expect resp.status == 200
+ expect resp.http.x-jwt-alg == "ES512"
+ # Invalid token
+ expect resp.http.x-jwt-verify == "-3"
+} -run
+
+# Unknown certificate
+client c20 -connect ${h1_mainfe_sock} {
+ # Token content : {"alg":"ES512","typ":"JWT"}
+ # {"sub":"1234567890","name":"John Doe","iat":1516239022}
+ # Key gen process : openssl genpkey -algorithm EC -pkeyopt ec_paramgen_curve:P-521 -out es512-private.pem; openssl ec -in es512-private.pem -pubout -out es512-public.pem
+ # OpenSSL cmd : openssl dgst -sha512 -sign es512-private.pem data.txt | base64 | tr -d '=\n' | tr '/+' '_-'
+
+ txreq -url "/errors" -hdr "Authorization: Bearer eyJhbGciOiJFUzUxMiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiaWF0IjoxNTE2MjM5MDIyfQ.MIGHAkEEPEgIrFKIDofBpFKX_mtya55QboGr09P6--v8uO85DwQWR0iKgMNSzYkL3K1lwyExG0Vtwfnife0lNe7Fn5TigAJCAY95NShiTn3tvleXVGCkkD0-HcribnMhd34QPGRc4rlwTkUg9umIUhxnEhPR--OohlmhJyIYGHuH8Ksm5fSIWfRa"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.x-jwt-alg == "ES512"
+ # Unknown certificate
+ expect resp.http.x-jwt-verify == "-5"
+} -run
+
+
+# Test the http_auth_bearer special cases (other header than the default "Authorization" one)
+client c21 -connect ${h1_mainfe_sock} {
+ txreq -url "/auth_bearer" -hdr "Custom-Authorization: Bearer random_value"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.x-jwt-token == "random_value"
+} -run
+
+# Test the http_auth_bearer special cases (multiple spaces after the scheme)
+client c22 -connect ${h1_mainfe_sock} {
+ txreq -url "/auth_bearer" -hdr "Custom-Authorization: Bearer random_value"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.x-jwt-token == "random_value"
+} -run
+
+# Test the http_auth_bearer special cases (no value after the scheme)
+client c23 -connect ${h1_mainfe_sock} {
+ txreq -url "/auth_bearer" -hdr "Custom-Authorization: Bearer "
+ rxresp
+ expect resp.status == 200
+ expect resp.http.x-jwt-token == ""
+} -run
+
+# Test the http_auth_bearer special cases (no value after the scheme)
+client c24 -connect ${h1_mainfe_sock} {
+ txreq -url "/errors" -hdr "Authorization: Bearer "
+ rxresp
+ expect resp.status == 200
+ expect resp.http.x-jwt-token == ""
+} -run
diff --git a/reg-tests/jwt/rsa-public.pem b/reg-tests/jwt/rsa-public.pem
new file mode 100644
index 0000000..a87a89d
--- /dev/null
+++ b/reg-tests/jwt/rsa-public.pem
@@ -0,0 +1,14 @@
+-----BEGIN PUBLIC KEY-----
+MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAxCPdKRUDpwNqrka4OYaI
+9bweoN/YoMYR8sddqK39S0pmzVIWZpZ51wXJU7oT4umSGAP0VpexxKNZdKnq6b9S
+caIfLCazl8EaU3Wg16l5ZD/OmHggaD5iHtI3lV2JhxTFlIdLI6sGoJxaDne0oelv
+tsE2dbBZBPT0OPKWyXgL2qQHCtYnqZI7d9czA61rg1PfiUqV6zh9MC7NW5mKPVS9
+5/MCIILyP4smljh5cUGkzhZaBy/mfKobTRe5xTP+DJ78wZhTAapOY/GmyQ4rFWZF
+ISH2tVQ7Ic32lbxeYXycTcPxEUcijNklnFHfpZ3Hhbz9hBuCWTaujcdYVxkRfMoc
+nz9InY8FCic3vgcOPrpqhZMxjeuVwUV9cjJhsWTjZeIne5P4l6DHmDIdoVJVatKR
++O4AL2q+VZ+d5euSmUe6bwrz1ufczIcRYAo1mnYD+USwjT5rGWSjG8brtfxtrzJz
+QP4oqMgLH2QBEgVDKlvsHiEC2K16tTf1pSEAh9Lyo2t8Tbc1BbuuJPafixNGFEQI
+J7sAwYoWNkncGOfwrPUpU13KtAGoW8hMBlLSuGb70FLbei/Qiz/YsWi86ybetN4W
+MpF096lcgqa/JH8IeYvGa/MQYoavloGv05OhaGrvGRy0GV6I9elnLEaSdBROnA4k
+yPaHW8jKmj04T8EBFmx5Lu0CAwEAAQ==
+-----END PUBLIC KEY-----
diff --git a/reg-tests/log/last_rule.vtc b/reg-tests/log/last_rule.vtc
new file mode 100644
index 0000000..f2b89e4
--- /dev/null
+++ b/reg-tests/log/last_rule.vtc
@@ -0,0 +1,165 @@
+varnishtest "Verify logging of last final rule"
+
+feature cmd "$HAPROXY_PROGRAM -cc 'version_atleast(2.6-dev0)'"
+feature ignore_unknown_macro
+
+server s1 {
+ rxreq
+ txresp
+} -repeat 15 -start
+
+syslog Slg_1 -level info {
+ recv
+ # /trqacc1
+ expect ~ "[^:\\[ ]\\[${h1_pid}\\]: .* lr=.*/h1/cfg:30"
+ recv
+ expect ~ "[^:\\[ ]\\[${h1_pid}\\]: .* lr=.*/h1/cfg:31"
+ recv
+ expect ~ "[^:\\[ ]\\[${h1_pid}\\]: .* lr=.*/h1/cfg:32"
+ recv
+ # /trsacc1
+ expect ~ "[^:\\[ ]\\[${h1_pid}\\]: .* lr=.*/h1/cfg:36"
+ recv
+ expect ~ "[^:\\[ ]\\[${h1_pid}\\]: .* lr=.*/h1/cfg:37"
+ recv
+ expect ~ "[^:\\[ ]\\[${h1_pid}\\]: .* lr=.*/h1/cfg:38"
+ recv
+ # /hrqvar
+ expect ~ "[^:\\[ ]\\[${h1_pid}\\]: .* lr=.*-:-"
+ recv
+ expect ~ "[^:\\[ ]\\[${h1_pid}\\]: .* lr=.*/h1/cfg:41"
+ recv
+ expect ~ "[^:\\[ ]\\[${h1_pid}\\]: .* lr=.*/h1/cfg:42"
+ recv
+ expect ~ "[^:\\[ ]\\[${h1_pid}\\]: .* lr=.*/h1/cfg:43"
+ recv
+ expect ~ "[^:\\[ ]\\[${h1_pid}\\]: .* lr=.*/h1/cfg:44"
+ recv
+ # /hrsacc1
+ expect ~ "[^:\\[ ]\\[${h1_pid}\\]: .* lr=.*/h1/cfg:46"
+ recv
+ expect ~ "[^:\\[ ]\\[${h1_pid}\\]: .* lr=.*/h1/cfg:47"
+ recv
+ expect ~ "[^:\\[ ]\\[${h1_pid}\\]: .* lr=.*/h1/cfg:48"
+ recv
+ expect ~ "[^:\\[ ]\\[${h1_pid}\\]: .* lr=.*/h1/cfg:49"
+} -start
+
+haproxy h1 -conf {
+ global
+ nbthread 1
+
+ defaults
+ mode http
+ option httplog
+ option http-server-close
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ frontend fe1
+ bind "fd@${fe_1}"
+ log ${Slg_1_addr}:${Slg_1_port} local0
+ log-format "ci:%cp [%tr] lr=%[last_rule_file]:%[last_rule_line]"
+ default_backend be
+
+ backend be
+ # handle these URLs:
+ # /trqacc1, /trqrej1, /trqrej2, /trsacc1, /trsrej1, /trsrej2
+ # /hrqvar, /hrqacc1, /hrqred1, /hrqrej1, /hrqrej2,
+ # /hrsacc1, /hrsred1, /hrsrej1, /hrsrej2
+
+ tcp-response inspect-delay 100ms
+ tcp-request content set-var(txn.path) path # must have no effect
+ tcp-request content accept if { var(txn.path) -m beg /trqacc1 /hrqrej1 }
+ tcp-request content reject if { var(txn.path) -m beg /trqrej1 }
+ tcp-request content reject if { var(txn.path) -m beg /trqrej2 }
+
+ tcp-response content reject unless WAIT_END
+ tcp-response content set-var(txn.foo) var(txn.path) # must have no effect
+ tcp-response content accept if { var(txn.path) -m beg /trsacc1 /hrsrej1 }
+ tcp-response content reject if { var(txn.path) -m beg /trsrej1 }
+ tcp-response content reject if { var(txn.path) -m beg /trsrej2 }
+
+ http-request set-var(txn.bar) var(txn.path) if { path_beg /hrqvar } # must have no effect
+ http-request allow if { var(txn.path) -m beg /hrqacc1 /hrsrej2 }
+ http-request redirect location / if { var(txn.path) -m beg /hrqred1 }
+ http-request deny if { var(txn.path) -m beg /hrqrej1 } # accepted by tcp-rq above
+ http-request deny if { var(txn.path) -m beg /hrqrej2 }
+
+ http-response allow if { var(txn.path) -m beg /hrsacc1 }
+ http-response redirect location / if { var(txn.path) -m beg /hrsred1 }
+ http-response deny if { var(txn.path) -m beg /hrsrej1 } # accepted by tcp-rs above
+ http-response deny if { var(txn.path) -m beg /hrsrej2 } # accepted by http-rq above
+ http-response deny if { var(txn.path) -m beg /hrsrej3 }
+
+ server app1 ${s1_addr}:${s1_port}
+} -start
+
+client c1 -connect ${h1_fe_1_sock} {
+ txreq -url /trqacc1
+ rxresp
+
+ txreq -url /trqrej1
+ expect_close
+} -run
+
+# The following client are started in background and synchronized
+client c2 -connect ${h1_fe_1_sock} {
+ txreq -url /trqrej2
+ expect_close
+} -run
+
+client c3 -connect ${h1_fe_1_sock} {
+ txreq -url /trsacc1
+ rxresp
+ expect resp.status == 200
+
+ txreq -url /trsrej1
+ expect_close
+} -run
+
+client c4 -connect ${h1_fe_1_sock} {
+ txreq -url /trsrej2
+ expect_close
+} -run
+
+client c5 -connect ${h1_fe_1_sock} {
+ txreq -url /hrqvar
+ rxresp
+ expect resp.status == 200
+
+ txreq -url /hrqacc1
+ rxresp
+ expect resp.status == 200
+
+ txreq -url /hrqred1
+ rxresp
+ expect resp.status == 302
+
+ txreq -url /hrqrej1
+ rxresp
+ expect resp.status == 403
+
+ txreq -url /hrqrej2
+ rxresp
+ expect resp.status == 403
+
+ txreq -url /hrsacc1
+ rxresp
+ expect resp.status == 200
+
+ txreq -url /hrsred1
+ rxresp
+ expect resp.status == 302
+
+ txreq -url /hrsrej1
+ rxresp
+ expect resp.status == 502
+
+ txreq -url /hrsrej2
+ rxresp
+ expect resp.status == 502
+} -run
+
+syslog Slg_1 -wait
diff --git a/reg-tests/log/load_balancing.vtc b/reg-tests/log/load_balancing.vtc
new file mode 100644
index 0000000..5c56e65
--- /dev/null
+++ b/reg-tests/log/load_balancing.vtc
@@ -0,0 +1,159 @@
+varnishtest "Basic log load-balancing test"
+feature ignore_unknown_macro
+
+barrier b1 cond 2 -cyclic
+barrier b2 cond 2 -cyclic
+barrier b3 cond 2 -cyclic
+barrier b4 cond 2 -cyclic
+barrier b5 cond 2 -cyclic
+
+server s1 {
+ rxreq
+ txresp
+} -repeat 500 -start
+
+syslog Slg_1 -level info {
+ recv
+ expect ~ "[^:\\[ ]\\[${h1_pid}\\]: .* \"GET /client_c1 HTTP/1.1\""
+ recv
+ expect ~ "[^:\\[ ]\\[${h1_pid}\\]: .* \"GET /client_c2 HTTP/1.1\""
+ recv
+ expect ~ "[^:\\[ ]\\[${h1_pid}\\]: .* \"GET /client_c3 HTTP/1.1\""
+ recv
+ expect ~ "[^:\\[ ]\\[${h1_pid}\\]: .* \"GET /client_c4 HTTP/1.1\""
+ recv
+ expect ~ "[^:\\[ ]\\[${h1_pid}\\]: .* \"GET /client_c5 HTTP/1.1\""
+} -repeat 50 -start
+
+# Here are the syslog messages received by Slg_2:
+syslog Slg_2 -level info {
+ recv
+ expect ~ "[^:\\[ ]\\[${h1_pid}\\]: .* \"GET /client_c6 HTTP/1.1\""
+ recv
+ expect ~ "[^:\\[ ]\\[${h1_pid}\\]: .* \"GET /client_c8 HTTP/1.1\""
+} -repeat 50 -start
+
+haproxy h1 -conf {
+ global
+ nbthread 1
+
+ defaults
+ mode http
+ option httplog
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ frontend fe1
+ bind "fd@${fe_1}"
+ log ${Slg_1_addr}:${Slg_1_port} local0
+ default_backend be
+
+ frontend fe2
+ bind "fd@${fe_2}"
+ log ${Slg_2_addr}:${Slg_2_port} sample 1,3:5 local0
+ default_backend be
+
+ backend be
+ server app1 ${s1_addr}:${s1_port}
+} -start
+
+# The following client are started in background and synchronized
+client c1 -connect ${h1_fe_1_sock} {
+ txreq -url "/client_c1"
+ rxresp
+ expect resp.status == 200
+ barrier b1 sync
+ barrier b5 sync
+} -repeat 50 -start
+
+client c2 -connect ${h1_fe_1_sock} {
+ barrier b1 sync
+ txreq -url "/client_c2"
+ rxresp
+ expect resp.status == 200
+ barrier b2 sync
+} -repeat 50 -start
+
+client c3 -connect ${h1_fe_1_sock} {
+ barrier b2 sync
+ txreq -url "/client_c3"
+ rxresp
+ expect resp.status == 200
+ barrier b3 sync
+} -repeat 50 -start
+
+client c4 -connect ${h1_fe_1_sock} {
+ barrier b3 sync
+ txreq -url "/client_c4"
+ rxresp
+ expect resp.status == 200
+ barrier b4 sync
+} -repeat 50 -start
+
+client c5 -connect ${h1_fe_1_sock} {
+ barrier b4 sync
+ txreq -url "/client_c5"
+ rxresp
+ expect resp.status == 200
+ barrier b5 sync
+} -repeat 50 -start
+
+syslog Slg_1 -wait
+
+client c1 -wait
+client c2 -wait
+client c3 -wait
+client c4 -wait
+client c5 -wait
+
+# Same test as before but with fe2 frontend.
+# The following client are started in background and synchronized
+client c6 -connect ${h1_fe_2_sock} {
+ txreq -url "/client_c6"
+ rxresp
+ expect resp.status == 200
+ barrier b1 sync
+ barrier b5 sync
+} -repeat 50 -start
+
+client c7 -connect ${h1_fe_2_sock} {
+ barrier b1 sync
+ txreq -url "/client_c7"
+ rxresp
+ expect resp.status == 200
+ barrier b2 sync
+} -repeat 50 -start
+
+client c8 -connect ${h1_fe_2_sock} {
+ barrier b2 sync
+ txreq -url "/client_c8"
+ rxresp
+ expect resp.status == 200
+ barrier b3 sync
+} -repeat 50 -start
+
+client c9 -connect ${h1_fe_2_sock} {
+ barrier b3 sync
+ txreq -url "/client_c9"
+ rxresp
+ expect resp.status == 200
+ barrier b4 sync
+} -repeat 50 -start
+
+client c10 -connect ${h1_fe_2_sock} {
+ barrier b4 sync
+ txreq -url "/client_c10"
+ rxresp
+ expect resp.status == 200
+ barrier b5 sync
+} -repeat 50 -start
+
+syslog Slg_2 -wait
+
+client c6 -wait
+client c7 -wait
+client c8 -wait
+client c9 -wait
+client c10 -wait
+
diff --git a/reg-tests/log/log_backend.vtc b/reg-tests/log/log_backend.vtc
new file mode 100644
index 0000000..a9223ee
--- /dev/null
+++ b/reg-tests/log/log_backend.vtc
@@ -0,0 +1,185 @@
+varnishtest "Test the log backend target"
+feature cmd "$HAPROXY_PROGRAM -cc 'version_atleast(2.9-dev0)'"
+feature ignore_unknown_macro
+
+server s1 {
+ rxreq
+ txresp
+} -repeat 500 -start
+
+syslog Slg1 -level info {
+ recv
+ expect ~ "[^:\\[ ]\\[${h1_pid}\\]: .* \"GET /0 HTTP/1.1\""
+} -repeat 100 -start
+
+syslog Slg2 -level info {
+ recv
+ expect ~ "[^:\\[ ]\\[${h1_pid}\\]: .* \"GET /1 HTTP/1.1\""
+} -repeat 100 -start
+
+syslog Slg21 -level info {
+ recv
+ expect ~ "[^:\\[ ]\\[${h1_pid}\\]: .* \"GET /srv1 HTTP/1.1\""
+} -repeat 1 -start
+
+syslog Slg22 -level info {
+ recv
+ expect ~ "[^:\\[ ]\\[${h1_pid}\\]: .* \"GET /srv2 HTTP/1.1\""
+} -repeat 1 -start
+
+syslog Slg23 -level info {
+ recv
+ expect ~ "[^:\\[ ]\\[${h1_pid}\\]: .* \"GET /srv3 HTTP/1.1\""
+} -repeat 2 -start
+
+syslog Slg24 -level info {
+ recv
+ expect ~ "[^:\\[ ]\\[${h1_pid}\\]: .* \"GET /backup HTTP/1.1\""
+} -repeat 1 -start
+
+haproxy h1 -conf {
+ defaults
+ mode http
+ option httplog
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ frontend fe1
+ bind "fd@${fe_1}"
+ log backend@mylog-tcp local0
+ log backend@mylog-udp local0
+ default_backend be
+
+ frontend fe2
+ bind "fd@${fe_2}"
+ log backend@mylog-failover local0
+ default_backend be
+
+ backend be
+ server app1 ${s1_addr}:${s1_port}
+
+ backend mylog-tcp
+ mode log
+ server s1 tcp@127.0.0.1:1514 #TCP: to log-forward
+
+ backend mylog-udp
+ mode log
+
+ # extract id (integer) from URL in the form "GET /id" and use it as hash key
+ balance log-hash 'field(-2,\"),field(2,/),field(1, )'
+ hash-type map-based none
+
+ server s1 udp@${Slg1_addr}:${Slg1_port} # syslog 1 only receives "GET /0" requests
+ server s2 udp@${Slg2_addr}:${Slg2_port} # syslog 2 only receives "GET /1" requests
+
+ log-forward syslog2udp
+ bind 127.0.0.1:1514
+ log backend@mylog-udp local0 # Back to UDP log backend
+
+ backend mylog-failover
+ mode log
+ balance sticky
+
+ server s1 udp@${Slg21_addr}:${Slg21_port} # only receives "GET /srv1" request
+ server s2 udp@${Slg22_addr}:${Slg22_port} # only receives "GET /srv2" request
+ server s3 udp@${Slg23_addr}:${Slg23_port} # only receives "GET /srv3" request
+
+ server s4 udp@${Slg24_addr}:${Slg24_port} backup # only receives "GET /backup" request
+} -start
+
+# Test log distribution reliability
+
+# all logs should go to s1
+client c1 -connect ${h1_fe_1_sock} {
+ txreq -url "/0"
+ rxresp
+ expect resp.status == 200
+} -repeat 50 -start
+
+# all logs should go to s2
+client c2 -connect ${h1_fe_1_sock} {
+ txreq -url "/1"
+ rxresp
+ expect resp.status == 200
+} -repeat 50 -start
+
+syslog Slg1 -wait
+syslog Slg2 -wait
+
+# Test server queue/dequeue/failover mechanism
+
+# s1 should handle this
+client c21 -connect ${h1_fe_2_sock} {
+ txreq -url "/srv1"
+ rxresp
+ expect resp.status == 200
+} -run
+
+haproxy h1 -cli {
+ send "disable server mylog-failover/s1"
+ expect ~ ".*"
+}
+
+# s2 should handle this
+client c22 -connect ${h1_fe_2_sock} {
+ txreq -url "/srv2"
+ rxresp
+ expect resp.status == 200
+} -run
+
+haproxy h1 -cli {
+ send "disable server mylog-failover/s2"
+ expect ~ ".*"
+}
+
+haproxy h1 -cli {
+ send "enable server mylog-failover/s1"
+ expect ~ ".*"
+}
+
+# s3 should handle this
+client c23 -connect ${h1_fe_2_sock} {
+ txreq -url "/srv3"
+ rxresp
+ expect resp.status == 200
+} -run
+
+haproxy h1 -cli {
+ send "disable server mylog-failover/s1"
+ expect ~ ".*"
+}
+
+haproxy h1 -cli {
+ send "disable server mylog-failover/s3"
+ expect ~ ".*"
+}
+
+# backup should handle this
+client c24 -connect ${h1_fe_2_sock} {
+ txreq -url "/backup"
+ rxresp
+ expect resp.status == 200
+} -run
+
+haproxy h1 -cli {
+ send "enable server mylog-failover/s3"
+ expect ~ ".*"
+}
+
+haproxy h1 -cli {
+ send "enable server mylog-failover/s2"
+ expect ~ ".*"
+}
+
+# s3 should handle this
+client c25 -connect ${h1_fe_2_sock} {
+ txreq -url "/srv3"
+ rxresp
+ expect resp.status == 200
+} -run
+
+syslog Slg21 -wait
+syslog Slg22 -wait
+syslog Slg23 -wait
+syslog Slg24 -wait
diff --git a/reg-tests/log/log_forward.vtc b/reg-tests/log/log_forward.vtc
new file mode 100644
index 0000000..3977f4c
--- /dev/null
+++ b/reg-tests/log/log_forward.vtc
@@ -0,0 +1,57 @@
+varnishtest "Test the TCP load-forward"
+feature cmd "$HAPROXY_PROGRAM -cc 'version_atleast(2.3-dev1)'"
+feature ignore_unknown_macro
+
+server s1 {
+ rxreq
+ txresp
+} -repeat 500 -start
+
+syslog Slg1 -level info {
+ recv
+ expect ~ "[^:\\[ ]\\[${h1_pid}\\]: .* \"GET /client_c1 HTTP/1.1\""
+} -repeat 50 -start
+
+haproxy h1 -conf {
+ defaults
+ mode http
+ option httplog
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ frontend fe1
+ bind "fd@${fe_1}"
+ log 127.0.0.1:1514 local0
+# log ${Slg1_addr}:${Slg1_port} local0
+ default_backend be
+
+ backend be
+ server app1 ${s1_addr}:${s1_port}
+
+ ring myring
+ description "My local buffer"
+ format rfc5424
+ maxlen 1200
+ size 32764
+ timeout connect 5s
+ timeout server 10s
+ # syslog tcp server
+ server mysyslogsrv 127.0.0.1:2514
+
+ log-forward syslog2tcp
+ dgram-bind 127.0.0.1:1514
+ log ring@myring local0 # To TCP log
+
+ log-forward syslog2local
+ bind 127.0.0.1:2514
+ log ${Slg1_addr}:${Slg1_port} local0 # To VTest syslog
+} -start
+
+client c1 -connect ${h1_fe_1_sock} {
+ txreq -url "/client_c1"
+ rxresp
+ expect resp.status == 200
+} -repeat 50 -start
+
+syslog Slg1 -wait
diff --git a/reg-tests/log/log_uri.vtc b/reg-tests/log/log_uri.vtc
new file mode 100644
index 0000000..6dd50d2
--- /dev/null
+++ b/reg-tests/log/log_uri.vtc
@@ -0,0 +1,61 @@
+varnishtest "Verify logging of relative/absolute URI path"
+feature ignore_unknown_macro
+
+#REQUIRE_VERSION=2.4
+
+server s1 {
+ rxreq
+ txresp -hdr "Connection: close"
+} -repeat 4 -start
+
+syslog Slg_1 -level info {
+ recv
+ expect ~ "[^:\\[ ]\\[${h1_pid}\\]: .* hpo=/r/1 hp=/r/1 hu=/r/1 hq="
+ recv
+ expect ~ "[^:\\[ ]\\[${h1_pid}\\]: .* hpo=/r/2 hp=/r/2 hu=/r/2\\?q=2 hq=\\?q=2"
+ recv
+ expect ~ "[^:\\[ ]\\[${h1_pid}\\]: .* hpo=/r/3 hp=http://localhost/r/3 hu=http://localhost/r/3 hq="
+ recv
+ expect ~ "[^:\\[ ]\\[${h1_pid}\\]: .* hpo=/r/4 hp=http://localhost/r/4 hu=http://localhost/r/4\\?q=4 hq=\\?q=4"
+} -start
+
+haproxy h1 -conf {
+ global
+ nbthread 1
+
+ defaults
+ mode http
+ option httplog
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ frontend fe1
+ bind "fd@${fe_1}"
+ log ${Slg_1_addr}:${Slg_1_port} local0
+ log-format "ci:%cp [%tr] hpo=%HPO hp=%HP hu=%HU hq=%HQ"
+ default_backend be
+
+ backend be
+ server app1 ${s1_addr}:${s1_port}
+} -start
+
+# The following client are started in background and synchronized
+client c1 -connect ${h1_fe_1_sock} {
+ txreq -url "/r/1"
+ rxresp
+ expect resp.status == 200
+ txreq -url "/r/2?q=2"
+ rxresp
+ expect resp.status == 200
+ txreq -url "http://localhost/r/3" -hdr "host: localhost"
+ rxresp
+ expect resp.status == 200
+ txreq -url "http://localhost/r/4?q=4" -hdr "host: localhost"
+ rxresp
+ expect resp.status == 200
+} -start
+
+syslog Slg_1 -wait
+
+client c1 -wait
diff --git a/reg-tests/log/wrong_ip_port_logging.vtc b/reg-tests/log/wrong_ip_port_logging.vtc
new file mode 100644
index 0000000..af8ca84
--- /dev/null
+++ b/reg-tests/log/wrong_ip_port_logging.vtc
@@ -0,0 +1,62 @@
+# commit d02286d
+# BUG/MINOR: log: pin the front connection when front ip/ports are logged
+#
+# Mathias Weiersmueller reported an interesting issue with logs which Lukas
+# diagnosed as dating back from commit 9b061e332 (1.5-dev9). When front
+# connection information (ip, port) are logged in TCP mode and the log is
+# emitted at the end of the connection (eg: because %B or any log tag
+# requiring LW_BYTES is set), the log is emitted after the connection is
+# closed, so the address and ports cannot be retrieved anymore.
+#
+# It could be argued that we'd make a special case of these to immediately
+# retrieve the source and destination addresses from the connection, but it
+# seems cleaner to simply pin the front connection, marking it "tracked" by
+# adding the LW_XPRT flag to mention that we'll need some of these elements
+# at the last moment. Only LW_FRTIP and LW_CLIP are affected. Note that after
+# this change, LW_FRTIP could simply be removed as it's not used anywhere.
+#
+# Note that the problem doesn't happen when using %[src] or %[dst] since
+# all sample expressions set LW_XPRT.
+
+#REGTEST_TYPE=bug
+
+varnishtest "Wrong ip/port logging"
+feature ignore_unknown_macro
+
+server s1 {
+ rxreq
+ delay 0.02
+} -start
+
+syslog Slg_1 -level notice {
+ recv info
+ expect ~ \"dip\":\"${h1_fe_1_addr}\",\"dport\":\"${h1_fe_1_port}.*\"ts\":\"[cC]D\",\"
+} -start
+
+haproxy h1 -conf {
+ global
+ log ${Slg_1_addr}:${Slg_1_port} local0
+
+defaults
+ log global
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client 1
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+frontend fe1
+ bind "fd@${fe_1}"
+ mode tcp
+ log-format {\"dip\":\"%fi\",\"dport\":\"%fp\",\"c_ip\":\"%ci\",\"c_port\":\"%cp\",\"fe_name\":\"%ft\",\"be_name\":\"%b\",\"s_name\":\"%s\",\"ts\":\"%ts\",\"bytes_read\":\"%B\"}
+ default_backend be_app
+
+backend be_app
+ server app1 ${s1_addr}:${s1_port}
+} -start
+
+client c1 -connect ${h1_fe_1_sock} {
+ txreq -url "/"
+ expect_close
+} -run
+
+syslog Slg_1 -wait
+
diff --git a/reg-tests/lua/bad_http_clt_req_duration.lua b/reg-tests/lua/bad_http_clt_req_duration.lua
new file mode 100644
index 0000000..2c2ab1d
--- /dev/null
+++ b/reg-tests/lua/bad_http_clt_req_duration.lua
@@ -0,0 +1,8 @@
+core.register_service("foo.http", "http", function(applet)
+ core.msleep(10)
+ applet:start_response()
+end)
+
+core.register_service("foo.tcp", "tcp", function(applet)
+ applet:send("HTTP/1.1 200 OK\r\nTransfer-encoding: chunked\r\n\r\n0\r\n\r\n")
+end)
diff --git a/reg-tests/lua/bad_http_clt_req_duration.vtc b/reg-tests/lua/bad_http_clt_req_duration.vtc
new file mode 100644
index 0000000..5cfdf1a
--- /dev/null
+++ b/reg-tests/lua/bad_http_clt_req_duration.vtc
@@ -0,0 +1,76 @@
+# commit 7b6cc52784526c32efda44b873a4258d3ae0b8c7
+# BUG/MINOR: lua: Bad HTTP client request duration.
+#
+# HTTP LUA applet callback should not update the date on which the HTTP client requests
+# arrive. This was done just after the LUA applet has completed its job.
+#
+# This patch simply removes the affected statement. The same fix has been applied
+# to TCP LUA applet callback.
+#
+# To reproduce this issue, as reported by Patrick Hemmer, implement an HTTP LUA applet
+# which sleeps a bit before replying:
+#
+# core.register_service("foo", "http", function(applet)
+# core.msleep(100)
+# applet:set_status(200)
+# applet:start_response()
+# end)
+#
+# This had as a consequence to log %TR field with approximately the same value as
+# the LUA sleep time.
+
+varnishtest "LUA bug"
+#REQUIRE_OPTIONS=LUA
+#REGTEST_TYPE=bug
+
+feature ignore_unknown_macro
+
+syslog Slog {
+ recv info
+ expect ~ "[^:\\[ ]\\[[0-9]*\\]: Ta=[0-9]* Tc=[0-9]* Td=[0-9]* Th=[0-9]* Ti=[0-9]* Tq=[0-9]* TR=[0-9]* Tr=[0-9]* Tt=[0-9]* Tw=[0-9]*$"
+
+ recv info
+ expect ~ "[^:\\[ ]\\[[0-9]*\\]: Tc=[0-9]* Td=[0-9]* Th=[0-9]* Tt=[0-9]* Tw=[0-9]*$"
+} -start
+
+haproxy h1 -conf {
+ global
+ lua-load ${testdir}/bad_http_clt_req_duration.lua
+
+ defaults
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ frontend f1
+ mode http
+ bind "fd@${f1}"
+ log ${Slog_addr}:${Slog_port} daemon
+ log-format Ta=%Ta\ Tc=%Tc\ Td=%Td\ Th=%Th\ Ti=%Ti\ Tq=%Tq\ TR=%TR\ Tr=%Tr\ Tt=%Tt\ Tw=%Tw
+ default_backend b1
+
+ backend b1
+ mode http
+ http-request use-service lua.foo.http
+
+ frontend f2
+ mode tcp
+ bind "fd@${f2}"
+ log ${Slog_addr}:${Slog_port} daemon
+ log-format Tc=%Tc\ Td=%Td\ Th=%Th\ Tt=%Tt\ Tw=%Tw
+
+ tcp-request inspect-delay 1s
+ tcp-request content use-service lua.foo.tcp
+} -start
+
+client c1 -connect "${h1_f1_sock}" {
+ txreq
+ rxresp
+} -run
+
+client c2 -connect "${h1_f2_sock}" {
+ txreq
+ rxresp
+} -run
+
+syslog Slog -wait
diff --git a/reg-tests/lua/close_wait_lf.lua b/reg-tests/lua/close_wait_lf.lua
new file mode 100644
index 0000000..cc897e7
--- /dev/null
+++ b/reg-tests/lua/close_wait_lf.lua
@@ -0,0 +1 @@
+core.register_service("donothing", "http", function(applet) end)
diff --git a/reg-tests/lua/close_wait_lf.vtc b/reg-tests/lua/close_wait_lf.vtc
new file mode 100644
index 0000000..7bed3fd
--- /dev/null
+++ b/reg-tests/lua/close_wait_lf.vtc
@@ -0,0 +1,53 @@
+# commit 70d318c
+# BUG/MEDIUM: lua: possible CLOSE-WAIT state with '\n' headers
+#
+# The Lua parser doesn't takes in account end-of-headers containing
+# only '\n'. It expects always '\r\n'. If a '\n' is processes the Lua
+# parser considers it miss 1 byte, and wait indefinitely for new data.
+#
+# When the client reaches their timeout, it closes the connection.
+# This close is not detected and the connection keep in CLOSE-WAIT
+# state.
+#
+# I guess that this patch fix only a visible part of the problem.
+# If the Lua HTTP parser wait for data, the timeout server or the
+# connectio closed by the client may stop the applet.
+
+varnishtest "possible CLOSE-WAIT with '\n' headers"
+#REQUIRE_OPTIONS=LUA
+#REGTEST_TYPE=bug
+
+feature ignore_unknown_macro
+
+syslog Slog -level info -repeat 100 {
+ recv info
+ expect ~ "[^:\\[ ]\\[${h1_pid}\\]: Ta=[0-9]* Tc=[0-9]* Td=[0-9]* Th=[0-9]* Ti=[0-9]* Tq=[0-9]* TR=[0-9]* Tr=[0-9]* Tt=[0-9]* Tw=[0-9]*"
+} -start
+
+haproxy h1 -conf {
+ defaults
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ global
+ lua-load ${testdir}/close_wait_lf.lua
+
+ frontend frt
+ log ${Slog_addr}:${Slog_port} local0 debug err
+ log-format Ta=%Ta\ Tc=%Tc\ Td=%Td\ Th=%Th\ Ti=%Ti\ Tq=%Tq\ TR=%TR\ Tr=%Tr\ Tt=%Tt\ Tw=%Tw
+ mode http
+ bind "fd@${frt}"
+ http-request use-service lua.donothing
+} -start
+
+
+client c1 -connect ${h1_frt_sock} -repeat 100 {
+ send "GET / HTTP/1.1\n\n"
+} -run
+
+syslog Slog -wait
+
+shell {
+ ss -pt | grep CLOSE-WAIT.*haproxy.*pid=${h1_pid}
+ exit $((!$?))
+}
diff --git a/reg-tests/lua/common.pem b/reg-tests/lua/common.pem
new file mode 120000
index 0000000..a4433d5
--- /dev/null
+++ b/reg-tests/lua/common.pem
@@ -0,0 +1 @@
+../ssl/common.pem \ No newline at end of file
diff --git a/reg-tests/lua/h_txn_get_priv.lua b/reg-tests/lua/h_txn_get_priv.lua
new file mode 100644
index 0000000..999ea88
--- /dev/null
+++ b/reg-tests/lua/h_txn_get_priv.lua
@@ -0,0 +1,15 @@
+core.register_action("bug", { "http-res" }, function(txn)
+ data = txn:get_priv()
+ if not data then
+ data = 0
+ end
+ data = data + 1
+ print(string.format("set to %d", data))
+ txn.http:res_set_status(200 + data)
+ txn:set_priv(data)
+end)
+
+core.register_service("fakeserv", "http", function(applet)
+ applet:set_status(200)
+ applet:start_response()
+end)
diff --git a/reg-tests/lua/h_txn_get_priv.vtc b/reg-tests/lua/h_txn_get_priv.vtc
new file mode 100644
index 0000000..bd8c069
--- /dev/null
+++ b/reg-tests/lua/h_txn_get_priv.vtc
@@ -0,0 +1,33 @@
+varnishtest "Lua: txn:get_priv() scope"
+#REQUIRE_OPTIONS=LUA
+
+feature ignore_unknown_macro
+
+haproxy h1 -conf {
+ global
+ lua-load ${testdir}/h_txn_get_priv.lua
+
+ frontend fe1
+ mode http
+ bind "fd@${fe1}"
+ default_backend b1
+
+ http-response lua.bug
+
+ backend b1
+ mode http
+ http-request use-service lua.fakeserv
+} -start
+
+client c0 -connect ${h1_fe1_sock} {
+ txreq -url "/"
+ rxresp
+ expect resp.status == 201
+ txreq -url "/"
+ rxresp
+ expect resp.status == 201
+}
+
+client c0 -start
+
+client c0 -wait
diff --git a/reg-tests/lua/httpclient_action.lua b/reg-tests/lua/httpclient_action.lua
new file mode 100644
index 0000000..9a7209c
--- /dev/null
+++ b/reg-tests/lua/httpclient_action.lua
@@ -0,0 +1,8 @@
+function test()
+ local httpclient = core.httpclient()
+ local response = httpclient:get{url="http://127.0.0.1", headers={ [ "Host" ] = { "localhost" } }}
+
+end
+
+
+core.register_action("test", {"tcp-req"}, test, 0)
diff --git a/reg-tests/lua/httpclient_action.vtc b/reg-tests/lua/httpclient_action.vtc
new file mode 100644
index 0000000..11c7d62
--- /dev/null
+++ b/reg-tests/lua/httpclient_action.vtc
@@ -0,0 +1,39 @@
+varnishtest "Lua: test the httpclient when the lua action timeout"
+#
+# Start an httpclient from "lua.test" whose lua task will expire before the
+# httpclient is ended.
+
+
+feature cmd "$HAPROXY_PROGRAM -cc 'version_atleast(2.5-dev7)'"
+feature ignore_unknown_macro
+
+#REQUIRE_OPTIONS=LUA
+
+haproxy h1 -conf {
+
+ global
+ lua-load ${testdir}/httpclient_action.lua
+ defaults
+ mode tcp
+ timeout http-request 10s
+ timeout queue 1m
+ timeout connect 10s
+ timeout client 1m
+ timeout server 1m
+ timeout check 10s
+
+ listen li1
+ mode http
+ bind "fd@${fe1}"
+ tcp-request inspect-delay 10ms
+ tcp-request content lua.test
+ http-request return status 503
+
+} -start
+
+client c0 -connect ${h1_fe1_sock} {
+ txreq
+ rxresp
+ expect resp.status == 503
+} -run
+
diff --git a/reg-tests/lua/lua_httpclient.lua b/reg-tests/lua/lua_httpclient.lua
new file mode 100644
index 0000000..b5a5180
--- /dev/null
+++ b/reg-tests/lua/lua_httpclient.lua
@@ -0,0 +1,49 @@
+
+local vtc_port = 0
+local vtc_port2 = 0
+local vtc_port3 = 0
+
+core.register_service("fakeserv", "http", function(applet)
+ vtc_port = applet.headers["vtcport"][0]
+ vtc_port2 = applet.headers["vtcport2"][0]
+ vtc_port3 = applet.headers["vtcport3"][0]
+ core.Info("APPLET START")
+ local response = "OK"
+ applet:add_header("Server", "haproxy/webstats")
+ applet:add_header("Content-Length", string.len(response))
+ applet:add_header("Content-Type", "text/html")
+ applet:start_response()
+ applet:send(response)
+ core.Info("APPLET DONE")
+end)
+
+local function cron()
+ -- wait for until the correct port is set through the c0 request..
+ while vtc_port == 0 do
+ core.msleep(1)
+ end
+ core.Debug('CRON port:' .. vtc_port)
+
+ local body = ""
+
+ for i = 0, 2000 do
+ body = body .. i .. ' ABCDEFGHIJKLMNOPQRSTUVWXYZ\n'
+ end
+ core.Info("First httpclient request")
+ local httpclient = core.httpclient()
+ local response = httpclient:post{url="http://127.0.0.1:" .. vtc_port, body=body}
+ core.Info("Received: " .. response.body)
+
+ body = response.body
+
+ core.Info("Second httpclient request")
+ local httpclient2 = core.httpclient()
+ local response2 = httpclient2:post{url="http://127.0.0.1:" .. vtc_port2, body=body}
+
+ core.Info("Third httpclient request")
+ local httpclient3 = core.httpclient()
+ local response3 = httpclient3:get{url="http://127.0.0.1", dst = vtc_port3, headers={ [ "Host" ] = { "foobar.haproxy.local" } }}
+
+end
+
+core.register_task(cron)
diff --git a/reg-tests/lua/lua_httpclient.vtc b/reg-tests/lua/lua_httpclient.vtc
new file mode 100644
index 0000000..0a27493
--- /dev/null
+++ b/reg-tests/lua/lua_httpclient.vtc
@@ -0,0 +1,68 @@
+varnishtest "Lua: check httpclient functionality from a lua-task"
+
+# A request if first made with c0 with the port of s1 and s2 so the httpclient
+# can generate its URI with it.
+#
+# This reg-test sends a payload with the httpclient to s1, s1 returns another
+# payload. The 2nd lua httpclient sends back the payload from s1 to s2.
+#
+
+feature cmd "$HAPROXY_PROGRAM -cc 'version_atleast(2.5-dev7)'"
+feature ignore_unknown_macro
+
+#REQUIRE_OPTIONS=LUA
+
+server s1 {
+ rxreq
+ txresp -bodylen 54000
+ expect req.body ~ ".*0 ABCDEFGHIJKLMNOPQRSTUVWXYZ.*"
+ expect req.body ~ ".*500 ABCDEFGHIJKLMNOPQRSTUVWXYZ.*"
+ expect req.body ~ ".*1000 ABCDEFGHIJKLMNOPQRSTUVWXYZ.*"
+ expect req.body ~ ".*1500 ABCDEFGHIJKLMNOPQRSTUVWXYZ.*"
+ expect req.body ~ ".*2000 ABCDEFGHIJKLMNOPQRSTUVWXYZ"
+} -start
+
+server s2 {
+ rxreq
+ txresp
+ expect req.bodylen == 54000
+} -start
+
+server s3 {
+ rxreq
+ txresp -bodylen 54000
+ expect req.method == "GET"
+ expect req.http.host == "foobar.haproxy.local"
+} -start
+
+
+haproxy h1 -conf {
+ global
+ lua-load ${testdir}/lua_httpclient.lua
+
+ frontend fe1
+ mode http
+ bind "fd@${fe1}"
+ default_backend b1
+
+ backend b1
+ mode http
+ http-request use-service lua.fakeserv
+
+ listen li1
+ mode http
+ bind unix@${tmpdir}/srv3
+ server srv3 ${s3_addr}:${s3_port}
+
+} -start
+
+client c0 -connect ${h1_fe1_sock} {
+ txreq -url "/" -hdr "vtcport: ${s1_port}" -hdr "vtcport2: ${s2_port}" -hdr "vtcport3: unix@${tmpdir}/srv3"
+ rxresp
+ expect resp.status == 200
+} -run
+
+
+server s1 -wait
+server s2 -wait
+server s3 -wait
diff --git a/reg-tests/lua/lua_socket.lua b/reg-tests/lua/lua_socket.lua
new file mode 100644
index 0000000..3ad14fe
--- /dev/null
+++ b/reg-tests/lua/lua_socket.lua
@@ -0,0 +1,44 @@
+
+local vtc_port = 0
+
+core.register_service("fakeserv", "http", function(applet)
+ vtc_port = applet.headers["vtcport"][0]
+ core.Info("APPLET START")
+ local response = "OK"
+ applet:add_header("Server", "haproxy/webstats")
+ applet:add_header("Content-Length", string.len(response))
+ applet:add_header("Content-Type", "text/html")
+ applet:start_response()
+ applet:send(response)
+ core.Info("APPLET DONE")
+end)
+
+local function cron()
+ -- wait for until the correct port is set through the c0 request..
+ while vtc_port == 0 do
+ core.msleep(1)
+ end
+ core.Debug('CRON port:' .. vtc_port)
+
+ local socket = core.tcp()
+ local success = socket:connect("127.0.0.1", vtc_port)
+ core.Info("SOCKET MADE ".. (success or "??"))
+ if success ~= 1 then
+ core.Info("CONNECT SOCKET FAILED?")
+ return
+ end
+ local request = "GET / HTTP/1.1\r\n\r\n"
+ core.Info("SENDING REQUEST")
+ socket:send(request)
+ local result = ""
+ repeat
+ core.Info("4")
+ local d = socket:receive("*a")
+ if d ~= nil then
+ result = result .. d
+ end
+ until d == nil or d == 0
+ core.Info("Received: "..result)
+end
+
+core.register_task(cron) \ No newline at end of file
diff --git a/reg-tests/lua/lua_socket.vtc b/reg-tests/lua/lua_socket.vtc
new file mode 100644
index 0000000..83e06a6
--- /dev/null
+++ b/reg-tests/lua/lua_socket.vtc
@@ -0,0 +1,33 @@
+varnishtest "Lua: check socket functionality from a lua-task"
+feature ignore_unknown_macro
+
+#REQUIRE_OPTIONS=LUA
+
+server s1 {
+ rxreq
+ txresp -bodylen 20
+} -start
+
+haproxy h1 -conf {
+ global
+ lua-load ${testdir}/lua_socket.lua
+
+ frontend fe1
+ mode http
+ bind "fd@${fe1}"
+ default_backend b1
+
+ backend b1
+ mode http
+ http-request use-service lua.fakeserv
+
+} -start
+
+client c0 -connect ${h1_fe1_sock} {
+ txreq -url "/" -hdr "vtcport: ${s1_port}"
+ rxresp
+ expect resp.status == 200
+} -run
+
+
+server s1 -wait
diff --git a/reg-tests/lua/set_var.lua b/reg-tests/lua/set_var.lua
new file mode 100644
index 0000000..f4d5e7a
--- /dev/null
+++ b/reg-tests/lua/set_var.lua
@@ -0,0 +1,25 @@
+core.register_service("set_var", "http", function(applet)
+ local var_name = applet.headers["var"][0]
+ local result = applet:set_var(var_name, "value")
+ if result then
+ applet:set_status(202)
+ else
+ applet:set_status(400)
+ end
+ applet:add_header("echo", applet:get_var(var_name) or "(nil)")
+ applet:start_response()
+ applet:send("")
+end)
+
+core.register_service("set_var_ifexist", "http", function(applet)
+ local var_name = applet.headers["var"][0]
+ local result = applet:set_var(var_name, "value", true)
+ if result then
+ applet:set_status(202)
+ else
+ applet:set_status(400)
+ end
+ applet:add_header("echo", applet:get_var(var_name) or "(nil)")
+ applet:start_response()
+ applet:send("")
+end)
diff --git a/reg-tests/lua/set_var.vtc b/reg-tests/lua/set_var.vtc
new file mode 100644
index 0000000..0c8a4b1
--- /dev/null
+++ b/reg-tests/lua/set_var.vtc
@@ -0,0 +1,83 @@
+varnishtest "Lua: set_var"
+#REQUIRE_VERSION=2.2
+#REQUIRE_OPTIONS=LUA
+
+feature ignore_unknown_macro
+
+haproxy h1 -conf {
+ global
+ # WT: limit false-positives causing "HTTP header incomplete" due to
+ # idle server connections being randomly used and randomly expiring
+ # under us.
+ tune.idle-pool.shared off
+
+ global
+ lua-load ${testdir}/set_var.lua
+
+ frontend fe1
+ mode http
+ bind "fd@${fe1}"
+
+ http-request use-service lua.set_var
+
+ frontend fe2
+ mode http
+ bind "fd@${fe2}"
+ # just make sure the variable exists
+ http-request set-header Dummy %[var(proc.fe2_foo)]
+
+ http-request use-service lua.set_var_ifexist
+} -start
+
+client c0 -connect ${h1_fe1_sock} {
+ # create var
+ txreq -url "/" \
+ -hdr "Var: txn.fe1_foo"
+ rxresp
+ expect resp.status == 202
+ expect resp.http.echo == "value"
+
+ # rewrite var
+ txreq -url "/" \
+ -hdr "Var: txn.fe1_foo"
+ rxresp
+ expect resp.status == 202
+ expect resp.http.echo == "value"
+
+ # create var under scope "proc"
+ txreq -url "/" \
+ -hdr "Var: proc.fe1_foo"
+ rxresp
+ expect resp.status == 202
+ expect resp.http.echo == "value"
+
+ # fail to create bad scope
+ txreq -url "/" \
+ -hdr "Var: invalid.var"
+ rxresp
+ expect resp.status == 400
+ expect resp.http.echo == "(nil)"
+} -run
+
+client c1 -connect ${h1_fe2_sock} {
+ # this one exists in the conf, it must succeed
+ txreq -url "/" \
+ -hdr "Var: proc.fe2_foo"
+ rxresp
+ expect resp.status == 202
+ expect resp.http.echo == "value"
+
+ # this one does not exist in the conf, it must fail
+ txreq -url "/" \
+ -hdr "Var: proc.fe2_bar"
+ rxresp
+ expect resp.status == 400
+ expect resp.http.echo == "(nil)"
+
+ # this one is under txn, it must succeed
+ txreq -url "/" \
+ -hdr "Var: txn.fe2_foo"
+ rxresp
+ expect resp.status == 202
+ expect resp.http.echo == "value"
+} -run
diff --git a/reg-tests/lua/txn_get_priv-print_r.lua b/reg-tests/lua/txn_get_priv-print_r.lua
new file mode 100644
index 0000000..185614f
--- /dev/null
+++ b/reg-tests/lua/txn_get_priv-print_r.lua
@@ -0,0 +1,96 @@
+-- Copyright 2016 Thierry Fournier
+
+function color(index, str)
+ return "\x1b[" .. index .. "m" .. str .. "\x1b[00m"
+end
+
+function nocolor(index, str)
+ return str
+end
+
+function sp(count)
+ local spaces = ""
+ while count > 0 do
+ spaces = spaces .. " "
+ count = count - 1
+ end
+ return spaces
+end
+
+function escape(str)
+ local s = ""
+ for i = 1, #str do
+ local c = str:sub(i,i)
+ local ascii = string.byte(c, 1)
+ if ascii > 126 or ascii < 20 then
+ s = s .. string.format("\\x%02x", ascii)
+ else
+ s = s .. c
+ end
+ end
+ return s
+end
+
+function print_rr(p, indent, c, wr, hist)
+ local i = 0
+ local nl = ""
+
+ if type(p) == "table" then
+ wr(c("33", "(table)") .. " " .. c("36", tostring(p)) .. " [")
+
+ for idx, value in ipairs(hist) do
+ if value == p then
+ wr(" " .. c("35", "/* recursion */") .. " ]")
+ return
+ end
+ end
+ hist[indent + 1] = p
+
+ mt = getmetatable(p)
+ if mt ~= nil then
+ wr("\n" .. sp(indent+1) .. c("31", "METATABLE") .. ": ")
+ print_rr(mt, indent+1, c, wr, hist)
+ end
+
+ for k,v in pairs(p) do
+ if i > 0 then
+ nl = "\n"
+ else
+ wr("\n")
+ end
+ wr(nl .. sp(indent+1))
+ if type(k) == "number" then
+ wr(c("32", tostring(k)))
+ else
+ wr("\"" .. c("32", escape(tostring(k))) .. "\"")
+ end
+ wr(": ")
+ print_rr(v, indent+1, c, wr, hist)
+ i = i + 1
+ end
+ if i == 0 then
+ wr(" " .. c("35", "/* empty */") .. " ]")
+ else
+ wr("\n" .. sp(indent) .. "]")
+ end
+
+ hist[indent + 1] = nil
+
+ elseif type(p) == "string" then
+ wr(c("33", "(string)") .. " \"" .. c("36", escape(p)) .. "\"")
+ else
+ wr(c("33", "(" .. type(p) .. ")") .. " " .. c("36", tostring(p)))
+ end
+end
+
+function print_r(p, col, wr)
+ if col == nil then col = true end
+ if wr == nil then wr = function(msg) io.stdout:write(msg) end end
+ local hist = {}
+ if col == true then
+ print_rr(p, 0, color, wr, hist)
+ else
+ print_rr(p, 0, nocolor, wr, hist)
+ end
+ wr("\n")
+end
diff --git a/reg-tests/lua/txn_get_priv-thread.vtc b/reg-tests/lua/txn_get_priv-thread.vtc
new file mode 100644
index 0000000..9538363
--- /dev/null
+++ b/reg-tests/lua/txn_get_priv-thread.vtc
@@ -0,0 +1,69 @@
+varnishtest "Lua: txn:get_priv() scope"
+#REQUIRE_OPTIONS=LUA,OPENSSL
+#REQUIRE_VERSION=2.4
+#REGTEST_TYPE=bug
+
+feature ignore_unknown_macro
+
+haproxy h1 -conf {
+ global
+ # WT: limit false-positives causing "HTTP header incomplete" due to
+ # idle server connections being randomly used and randomly expiring
+ # under us.
+ tune.idle-pool.shared off
+
+ lua-load-per-thread ${testdir}/txn_get_priv.lua
+ lua-load-per-thread ${testdir}/txn_get_priv-print_r.lua
+
+ frontend fe1
+ mode http
+ bind "fd@${fe1}"
+ default_backend b1
+
+ frontend fe2
+ mode http
+ bind ":8443" ssl crt ${testdir}/common.pem
+ stats enable
+ stats uri /
+
+ backend b1
+ mode http
+ http-request use-service lua.fakeserv
+} -start
+
+client c0 -repeat 4 -connect ${h1_fe1_sock} {
+ txreq -url "/0"
+ rxresp
+ expect resp.status == 200
+ txreq -url "/0"
+ rxresp
+ expect resp.status == 200
+} -run
+
+client c1 -repeat 4 -connect ${h1_fe1_sock} {
+ txreq -url "/1"
+ rxresp
+ expect resp.status == 200
+ txreq -url "/1"
+ rxresp
+ expect resp.status == 200
+} -run
+
+client c2 -repeat 4 -connect ${h1_fe1_sock} {
+ txreq -url "/2"
+ rxresp
+ expect resp.status == 200
+ txreq -url "/2"
+ rxresp
+ expect resp.status == 200
+} -run
+
+client c3 -repeat 4 -connect ${h1_fe1_sock} {
+ txreq -url "/3"
+ rxresp
+ expect resp.status == 200
+ txreq -url "/3"
+ rxresp
+ expect resp.status == 200
+} -run
+
diff --git a/reg-tests/lua/txn_get_priv.lua b/reg-tests/lua/txn_get_priv.lua
new file mode 100644
index 0000000..dd5623c
--- /dev/null
+++ b/reg-tests/lua/txn_get_priv.lua
@@ -0,0 +1,180 @@
+Luacurl = {}
+Luacurl.__index = Luacurl
+setmetatable(Luacurl, {
+ __call = function (cls, ...)
+ return cls.new(...)
+ end,
+})
+function Luacurl.new(server, port, ssl)
+ local self = setmetatable({}, Luacurl)
+ self.sockconnected = false
+ self.server = server
+ self.port = port
+ self.ssl = ssl
+ self.cookies = {}
+ return self
+end
+
+function Luacurl:get(method,url,headers,data)
+ core.Info("MAKING SOCKET")
+ if self.sockconnected == false then
+ self.sock = core.tcp()
+ if self.ssl then
+ local r = self.sock:connect_ssl(self.server,self.port)
+ else
+ local r = self.sock:connect(self.server,self.port)
+ end
+ self.sockconnected = true
+ end
+ core.Info("SOCKET MADE")
+ local request = method.." "..url.." HTTP/1.1"
+ if data ~= nil then
+ request = request .. "\r\nContent-Length: "..string.len(data)
+ end
+ if headers ~= null then
+ for h,v in pairs(headers) do
+ request = request .. "\r\n"..h..": "..v
+ end
+ end
+ cookstring = ""
+ for cook,cookval in pairs(self.cookies) do
+ cookstring = cookstring .. cook.."="..cookval.."; "
+ end
+ if string.len(cookstring) > 0 then
+ request = request .. "\r\nCookie: "..cookstring
+ end
+
+ request = request .. "\r\n\r\n"
+ if data and string.len(data) > 0 then
+ request = request .. data
+ end
+--print(request)
+ core.Info("SENDING REQUEST")
+ self.sock:send(request)
+
+-- core.Info("PROCESSING RESPONSE")
+ return processhttpresponse(self.sock)
+end
+
+function processhttpresponse(socket)
+ local res = {}
+core.Info("1")
+ res.status = socket:receive("*l")
+core.Info("2")
+
+ if res.status == nil then
+ core.Info(" processhttpresponse RECEIVING status: NIL")
+ return res
+ end
+ core.Info(" processhttpresponse RECEIVING status:"..res.status)
+ res.headers = {}
+ res.headerslist = {}
+ repeat
+core.Info("3")
+ local header = socket:receive("*l")
+ if header == nil then
+ return "error"
+ end
+ local valuestart = header:find(":")
+ if valuestart ~= nil then
+ local head = header:sub(1,valuestart-1)
+ local value = header:sub(valuestart+2)
+ table.insert(res.headerslist, {head,value})
+ res.headers[head] = value
+ end
+ until header == ""
+ local bodydone = false
+ if res.headers["Connection"] ~= nil and res.headers["Connection"] == "close" then
+-- core.Info("luacurl processresponse with connection:close")
+ res.body = ""
+ repeat
+core.Info("4")
+ local d = socket:receive("*a")
+ if d ~= nil then
+ res.body = res.body .. d
+ end
+ until d == nil or d == 0
+ bodydone = true
+ end
+ if bodydone == false and res.headers["Content-Length"] ~= nil then
+ res.contentlength = tonumber(res.headers["Content-Length"])
+ if res.contentlength == nil then
+ core.Warning("res.contentlength ~NIL = "..res.headers["Content-Length"])
+ end
+-- core.Info("luacur, contentlength="..res.contentlength)
+ res.body = ""
+ repeat
+ local d = socket:receive(res.contentlength)
+ if d == nil then
+-- core.Info("luacurl, ERROR?: received NIL, expecting "..res.contentlength.." bytes only got "..string.len(res.body).." sofar")
+ return
+ else
+ res.body = res.body..d
+-- core.Info("luacurl, COMPLETE?: expecting "..res.contentlength.." bytes, got "..string.len(res.body))
+ if string.len(res.body) >= res.contentlength then
+-- core.Info("luacurl, COMPLETE?: expecting "..res.contentlength.." bytes, got "..string.len(res.body))
+ break
+ end
+ end
+-- core.Info("processhttpresponse, Loopy, get more body data! to receive complete contentlenght")
+ until false
+ end
+ if res.headers["Transfer-Encoding"] ~= nil and res.headers["Transfer-Encoding"] == "chunked" then
+ local chunksize = 0
+ res.contentlength = 0
+ res.body = ""
+ repeat
+core.Info("5")
+ local chunksizestr = socket:receive("*l")
+ if chunksizestr == nil then
+ break
+ end
+ chunksize = tonumber("0x"..chunksizestr)
+ if chunksize ~= nil then
+ res.contentlength = res.contentlength + chunksize
+ if chunksize ~= 0 then
+ local chunk = socket:receive(chunksize)
+ res.body = res.body .. chunk
+ chunksizestr = socket:receive("*l")
+ if chunksizestr ~= "" then
+ return "ERROR Chunk-end expected."
+ end
+ end
+ else
+ break
+ end
+ until false
+ end
+core.Info("6")
+ return res
+end
+
+function Luacurl:close()
+ if self.sockconnected == true then
+ self.sock:close()
+ self.sockconnected = false
+ end
+end
+
+function print_r_string(object)
+ local res = ""
+ print_r(object,false,function(x) res = res .. x end)
+ return res
+end
+
+core.register_service("fakeserv", "http", function(applet)
+ core.Info("APPLET START")
+ local mc = Luacurl("127.0.0.1",8443, true)
+ local headers = {}
+ local body = ""
+ core.Info("APPLET GET")
+ local res = mc:get("GET", "/", headers, body)
+ core.Info("APPLET GET done")
+ local response = print_r_string(res)
+ applet:add_header("Server", "haproxy/webstats")
+ applet:add_header("Content-Length", string.len(response))
+ applet:add_header("Content-Type", "text/html")
+ applet:start_response()
+ applet:send(response)
+ core.Info("APPLET DONE")
+end)
diff --git a/reg-tests/lua/txn_get_priv.vtc b/reg-tests/lua/txn_get_priv.vtc
new file mode 100644
index 0000000..71e7bb5
--- /dev/null
+++ b/reg-tests/lua/txn_get_priv.vtc
@@ -0,0 +1,35 @@
+varnishtest "Lua: txn:get_priv() scope"
+#REQUIRE_OPTIONS=LUA,OPENSSL
+#REGTEST_TYPE=bug
+
+feature ignore_unknown_macro
+
+haproxy h1 -conf {
+ global
+ lua-load ${testdir}/txn_get_priv.lua
+ lua-load ${testdir}/txn_get_priv-print_r.lua
+
+ frontend fe1
+ mode http
+ bind "fd@${fe1}"
+ default_backend b1
+
+ frontend fe2
+ mode http
+ bind ":8443" ssl crt ${testdir}/common.pem
+ stats enable
+ stats uri /
+
+ backend b1
+ mode http
+ http-request use-service lua.fakeserv
+} -start
+
+client c0 -connect ${h1_fe1_sock} {
+ txreq -url "/"
+ rxresp
+ expect resp.status == 200
+ txreq -url "/"
+ rxresp
+ expect resp.status == 200
+} -run
diff --git a/reg-tests/lua/wrong_types_usage.lua b/reg-tests/lua/wrong_types_usage.lua
new file mode 100644
index 0000000..d2401fa
--- /dev/null
+++ b/reg-tests/lua/wrong_types_usage.lua
@@ -0,0 +1,3 @@
+core.register_action("foo", { "http-req" }, function(txn)
+ txn.sc:ipmask(txn.f:src(), 24, 112)
+end)
diff --git a/reg-tests/lua/wrong_types_usage.vtc b/reg-tests/lua/wrong_types_usage.vtc
new file mode 100644
index 0000000..ed76579
--- /dev/null
+++ b/reg-tests/lua/wrong_types_usage.vtc
@@ -0,0 +1,77 @@
+# commit f874a83
+# BUG/MINOR: lua: Segfaults with wrong usage of types.
+#
+# Patrick reported that this simple configuration made haproxy segfaults:
+#
+# global
+# lua-load /tmp/haproxy.lua
+#
+# frontend f1
+# mode http
+# bind :8000
+# default_backend b1
+#
+# http-request lua.foo
+#
+# backend b1
+# mode http
+# server s1 127.0.0.1:8080
+#
+# with this '/tmp/haproxy.lua' script:
+#
+# core.register_action("foo", { "http-req" }, function(txn)
+# txn.sc:ipmask(txn.f:src(), 24, 112)
+# end)
+#
+# This is due to missing initialization of the array of arguments
+# passed to hlua_lua2arg_check() which makes it enter code with
+# corrupted arguments.
+#
+# Thanks a lot to Patrick Hemmer for having reported this issue.
+
+
+varnishtest "Basic LUA test h00000"
+#REQUIRE_OPTIONS=LUA
+#REGTEST_TYPE=bug
+
+feature ignore_unknown_macro
+
+server s1 -repeat 2 {
+ rxreq
+ txresp
+} -start
+
+haproxy h1 -conf {
+ global
+ lua-load ${testdir}/wrong_types_usage.lua
+
+ frontend fe1
+ mode http
+ bind "fd@${fe1}"
+ default_backend b1
+
+ http-request lua.foo
+
+ backend b1
+ mode http
+ server s1 ${s1_addr}:${s1_port}
+
+} -start
+
+client c0 -connect ${h1_fe1_sock} {
+ txreq -url "/foo"
+ rxresp
+ expect resp.status == 200
+}
+
+client c1 -connect ${h1_fe1_sock} {
+ txreq -url "/foo"
+ rxresp
+ expect resp.status == 200
+}
+
+client c0 -start
+client c1 -start
+
+client c0 -wait
+client c1 -wait
diff --git a/reg-tests/mailers/healthcheckmail.lua b/reg-tests/mailers/healthcheckmail.lua
new file mode 100644
index 0000000..4cb0e9d
--- /dev/null
+++ b/reg-tests/mailers/healthcheckmail.lua
@@ -0,0 +1,70 @@
+
+local vtc_port1 = 0
+local mailsreceived = 0
+local mailconnectionsmade = 0
+local healthcheckcounter = 0
+
+function RecieveAndCheck(applet, expect)
+ data = applet:getline()
+ if data:sub(1,expect:len()) ~= expect then
+ core.Info("Expected: "..expect.." but got:"..data:sub(1,expect:len()))
+ applet:send("Expected: "..expect.." but got:"..data.."\r\n")
+ return false
+ end
+ return true
+end
+
+core.register_service("mailservice", "tcp", function(applet)
+ core.Info("############# Mailservice Called #############")
+ mailconnectionsmade = mailconnectionsmade + 1
+ applet:send("220 Welcome\r\n")
+ local data
+
+ if RecieveAndCheck(applet, "HELO") == false then
+ applet:set_var("txn.result", "ERROR (step: HELO)")
+ return
+ end
+ applet:send("250 OK\r\n")
+ if RecieveAndCheck(applet, "MAIL FROM:") == false then
+ applet:set_var("txn.result", "ERROR (step: MAIL FROM)")
+ return
+ end
+ applet:send("250 OK\r\n")
+ if RecieveAndCheck(applet, "RCPT TO:") == false then
+ applet:set_var("txn.result", "ERROR (step: RCPT TO)")
+ return
+ end
+ applet:send("250 OK\r\n")
+ if RecieveAndCheck(applet, "DATA") == false then
+ applet:set_var("txn.result", "ERROR (step: DATA)")
+ return
+ end
+ applet:send("354 OK\r\n")
+ core.Info("#### Send your mailbody")
+ local endofmail = false
+ local subject = ""
+ while endofmail ~= true do
+ data = applet:getline() -- BODY CONTENT
+ --core.Info(data)
+ if data:sub(1, 9) == "Subject: " then
+ subject = data
+ end
+ if (data == "\r\n") then
+ data = applet:getline() -- BODY CONTENT
+ core.Info(data)
+ if (data == ".\r\n") then
+ endofmail = true
+ end
+ end
+ end
+ core.Info("#### Body received OK")
+ applet:send("250 OK\r\n")
+
+ if RecieveAndCheck(applet, "QUIT") == false then
+ applet:set_var("txn.result", "ERROR (step: QUIT)")
+ return
+ end
+ applet:send("221 Mail queued for delivery to /dev/null \r\n")
+ core.Info("Mail queued for delivery to /dev/null subject: "..subject)
+ applet:set_var("txn.result", "SUCCESS")
+end)
diff --git a/reg-tests/mailers/healthcheckmail.vtc b/reg-tests/mailers/healthcheckmail.vtc
new file mode 100644
index 0000000..208d350
--- /dev/null
+++ b/reg-tests/mailers/healthcheckmail.vtc
@@ -0,0 +1,60 @@
+varnishtest "Check health-check email alerts"
+#REQUIRE_OPTIONS=LUA
+
+feature ignore_unknown_macro
+
+syslog S1 -level notice {
+ recv
+ expect ~ "[^:\\[ ]\\[${h1_pid}\\]: Health check for server be1/srv1 failed.+check duration: [[:digit:]]+ms.+status: 0/1 DOWN."
+ recv info
+ expect ~ "[^:\\[ ]\\[${h1_pid}\\]: Result=SUCCESS Bytes=[[:digit:]]+"
+} -start
+
+haproxy h1 -conf {
+ global
+ lua-load ${testdir}/mailers.lua
+ lua-load ${testdir}/healthcheckmail.lua
+
+ defaults
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ listen lisrv
+ mode tcp
+ bind "fd@${lisrv}"
+ tcp-request connection reject
+
+ listen lismtp
+ mode tcp
+ bind "fd@${lismtp}"
+ log ${S1_addr}:${S1_port} daemon
+ log-format "Result=%[var(txn.result)] Bytes=%B"
+ tcp-request content use-service lua.mailservice
+
+ frontend fe1
+ mode http
+ bind "fd@${fe1}"
+ default_backend be1
+
+ backend be1
+ mode http
+ log ${S1_addr}:${S1_port} daemon
+ option httpchk
+ option log-health-checks
+
+ default-server inter 200ms downinter 100ms rise 1 fall 1
+
+ email-alert mailers mymailers
+ email-alert level info
+ email-alert from from@domain.tld
+ email-alert to to@domain.tld
+
+ server srv1 ${h1_lisrv_addr}:${h1_lisrv_port} check
+
+ mailers mymailers
+ mailer smtp1 ${h1_lismtp_addr}:${h1_lismtp_port}
+
+} -start
+
+syslog S1 -wait
diff --git a/reg-tests/mailers/mailers.lua b/reg-tests/mailers/mailers.lua
new file mode 120000
index 0000000..5ea4673
--- /dev/null
+++ b/reg-tests/mailers/mailers.lua
@@ -0,0 +1 @@
+../../examples/lua/mailers.lua \ No newline at end of file
diff --git a/reg-tests/mcli/mcli_show_info.vtc b/reg-tests/mcli/mcli_show_info.vtc
new file mode 100644
index 0000000..3c44461
--- /dev/null
+++ b/reg-tests/mcli/mcli_show_info.vtc
@@ -0,0 +1,27 @@
+varnishtest "Show info of process 1"
+
+feature ignore_unknown_macro
+
+# Do nothing. Is there only to create s1_* macros
+server s1 {
+} -start
+
+haproxy h1 -W -S -conf {
+ defaults
+ mode http
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ frontend myfrontend
+ bind "fd@${my_fe}"
+ default_backend test
+
+ backend test
+ server www1 ${s1_addr}:${s1_port}
+} -start
+
+haproxy h1 -mcli {
+ send "@1 show info"
+ expect ~ ".*\nProcess_num: 1\n.*"
+} -wait
diff --git a/reg-tests/mcli/mcli_start_progs.vtc b/reg-tests/mcli/mcli_start_progs.vtc
new file mode 100644
index 0000000..51b335c
--- /dev/null
+++ b/reg-tests/mcli/mcli_start_progs.vtc
@@ -0,0 +1,36 @@
+varnishtest "Try to start a master CLI with 2 programs"
+#REGTEST_TYPE=bug
+feature cmd "command -v sleep"
+
+feature ignore_unknown_macro
+
+# Do nothing. Is there only to create s1_* macros
+server s1 {
+} -start
+
+haproxy h1 -W -S -conf {
+ defaults
+ mode http
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ frontend myfrontend
+ bind "fd@${my_fe}"
+ default_backend test
+
+ backend test
+ server www1 ${s1_addr}:${s1_port}
+
+ program foo
+ command sleep 10
+
+ program bar
+ command sleep 10
+
+} -start
+
+haproxy h1 -mcli {
+ send "show proc"
+ expect ~ ".*foo.*\n.*bar.*\n"
+} -wait
diff --git a/reg-tests/peers/basic_sync.vtc b/reg-tests/peers/basic_sync.vtc
new file mode 100644
index 0000000..5c0cb41
--- /dev/null
+++ b/reg-tests/peers/basic_sync.vtc
@@ -0,0 +1,120 @@
+vtest "Basic test for peers protocol"
+feature ignore_unknown_macro
+
+#REGTEST_TYPE=slow
+
+haproxy h1 -arg "-L A" -conf {
+ defaults
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ backend stkt
+ stick-table type string size 10m store server_id,gpc0,conn_cur,conn_rate(50000) peers peers
+
+ peers peers
+ bind "fd@${A}"
+ server A
+ server B ${h2_B_addr}:${h2_B_port}
+ server C ${h3_C_addr}:${h3_C_port}
+
+ frontend fe
+ bind "fd@${fe}"
+ tcp-request inspect-delay 100ms
+ tcp-request content track-sc0 url table stkt
+ tcp-request content sc-inc-gpc0(0)
+}
+
+haproxy h2 -arg "-L B" -conf {
+ defaults
+ mode http
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ backend stkt
+ stick-table type string size 10m store server_id,gpc0,conn_cur,conn_rate(50000) peers peers
+
+ peers peers
+ bind "fd@${B}"
+ server A ${h1_A_addr}:${h1_A_port}
+ server B
+ server C ${h3_C_addr}:${h3_C_port}
+
+ frontend fe
+ bind "fd@${fe}"
+ http-request track-sc0 url table stkt
+ http-request sc-inc-gpc0(0)
+}
+
+haproxy h3 -arg "-L C" -conf {
+ defaults
+ mode http
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ backend stkt
+ stick-table type string size 10m store server_id,gpc0,conn_cur,conn_rate(50000) peers peers
+
+ peers peers
+ bind "fd@${C}"
+ server A ${h1_A_addr}:${h1_A_port}
+ server B ${h2_B_addr}:${h2_B_port}
+ server C
+
+ frontend fe
+ bind "fd@${fe}"
+ http-request track-sc0 url table stkt
+ http-request sc-inc-gpc0(0)
+}
+
+client c1 -connect ${h1_fe_sock} {
+ txreq -url "/c1_client"
+ expect_close
+} -start
+
+client c2 -connect ${h1_fe_sock} {
+ txreq -url "/c2_client"
+ expect_close
+} -start
+
+client c3 -connect ${h1_fe_sock} {
+ txreq -url "/c3_client"
+ expect_close
+} -start
+
+client c4 -connect ${h1_fe_sock} {
+ txreq -url "/c4_client"
+ expect_close
+} -start
+
+haproxy h1 -start
+delay 0.2
+haproxy h2 -start
+delay 0.2
+haproxy h3 -start
+delay 0.2
+
+client c1 -wait
+client c2 -wait
+client c3 -wait
+client c4 -wait
+
+delay 2
+
+haproxy h1 -cli {
+ send "show table stkt"
+ expect ~ "# table: stkt, type: string, size:1048[0-9]{4}, used:4(\n0x[0-9a-f]*: key=/c[1-4]_client use=0 exp=0 shard=0 server_id=0 gpc0=1 conn_rate\\(50000\\)=1 conn_cur=0){4}"
+}
+
+haproxy h2 -cli {
+ send "show table stkt"
+ expect ~ "# table: stkt, type: string, size:1048[0-9]{4}, used:4(\n0x[0-9a-f]*: key=/c[1-4]_client use=0 exp=0 shard=0 server_id=0 gpc0=1 conn_rate\\(50000\\)=1 conn_cur=0){4}"
+}
+
+haproxy h3 -cli {
+ send "show table stkt"
+ expect ~ "# table: stkt, type: string, size:1048[0-9]{4}, used:4(\n0x[0-9a-f]*: key=/c[1-4]_client use=0 exp=0 shard=0 server_id=0 gpc0=1 conn_rate\\(50000\\)=1 conn_cur=0){4}"
+}
+
diff --git a/reg-tests/peers/basic_sync_wo_stkt_backend.vtc b/reg-tests/peers/basic_sync_wo_stkt_backend.vtc
new file mode 100644
index 0000000..9f97ff5
--- /dev/null
+++ b/reg-tests/peers/basic_sync_wo_stkt_backend.vtc
@@ -0,0 +1,115 @@
+vtest "Basic test for peers protocol stick-table declared in peers sections"
+feature ignore_unknown_macro
+
+#REGTEST_TYPE=slow
+
+haproxy h1 -arg "-L A" -conf {
+ defaults
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+
+ peers peers
+ bind "fd@${A}"
+ server A
+ server B ${h2_B_addr}:${h2_B_port}
+ server C ${h3_C_addr}:${h3_C_port}
+ table stkt type string size 10m store server_id,gpc0,conn_cur,conn_rate(50000)
+
+ frontend fe
+ bind "fd@${fe}"
+ tcp-request inspect-delay 100ms
+ tcp-request content track-sc0 url table peers/stkt
+ tcp-request content sc-inc-gpc0(0)
+}
+
+haproxy h2 -arg "-L B" -conf {
+ defaults
+ mode http
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ peers peers
+ bind "fd@${B}"
+ server A ${h1_A_addr}:${h1_A_port}
+ server B
+ server C ${h3_C_addr}:${h3_C_port}
+ table stkt type string size 10m store server_id,gpc0,conn_cur,conn_rate(50000)
+
+ frontend fe
+ bind "fd@${fe}"
+ http-request track-sc0 url table peers/stkt
+ http-request sc-inc-gpc0(0)
+}
+
+haproxy h3 -arg "-L C" -conf {
+ defaults
+ mode http
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ peers peers
+ bind "fd@${C}"
+ server A ${h1_A_addr}:${h1_A_port}
+ server B ${h2_B_addr}:${h2_B_port}
+ server C
+ table stkt type string size 10m store server_id,gpc0,conn_cur,conn_rate(50000)
+
+ frontend fe
+ bind "fd@${fe}"
+ http-request track-sc0 url table peers/stkt
+ http-request sc-inc-gpc0(0)
+}
+
+client c1 -connect ${h1_fe_sock} {
+ txreq -url "/c1_client"
+ expect_close
+} -start
+
+client c2 -connect ${h1_fe_sock} {
+ txreq -url "/c2_client"
+ expect_close
+} -start
+
+client c3 -connect ${h1_fe_sock} {
+ txreq -url "/c3_client"
+ expect_close
+} -start
+
+client c4 -connect ${h1_fe_sock} {
+ txreq -url "/c4_client"
+ expect_close
+} -start
+
+haproxy h1 -start
+delay 0.2
+haproxy h2 -start
+delay 0.2
+haproxy h3 -start
+delay 0.2
+
+client c1 -wait
+client c2 -wait
+client c3 -wait
+client c4 -wait
+
+delay 2
+
+haproxy h1 -cli {
+ send "show table peers/stkt"
+ expect ~ "# table: peers/stkt, type: string, size:1048[0-9]{4}, used:4(\n0x[0-9a-f]*: key=/c[1-4]_client use=0 exp=0 shard=0 server_id=0 gpc0=1 conn_rate\\(50000\\)=1 conn_cur=0){4}"
+}
+
+haproxy h2 -cli {
+ send "show table peers/stkt"
+ expect ~ "# table: peers/stkt, type: string, size:1048[0-9]{4}, used:4(\n0x[0-9a-f]*: key=/c[1-4]_client use=0 exp=0 shard=0 server_id=0 gpc0=1 conn_rate\\(50000\\)=1 conn_cur=0){4}"
+}
+
+haproxy h3 -cli {
+ send "show table peers/stkt"
+ expect ~ "# table: peers/stkt, type: string, size:1048[0-9]{4}, used:4(\n0x[0-9a-f]*: key=/c[1-4]_client use=0 exp=0 shard=0 server_id=0 gpc0=1 conn_rate\\(50000\\)=1 conn_cur=0){4}"
+}
+
diff --git a/reg-tests/peers/common.pem b/reg-tests/peers/common.pem
new file mode 120000
index 0000000..a4433d5
--- /dev/null
+++ b/reg-tests/peers/common.pem
@@ -0,0 +1 @@
+../ssl/common.pem \ No newline at end of file
diff --git a/reg-tests/peers/tls_basic_sync.vtc b/reg-tests/peers/tls_basic_sync.vtc
new file mode 100644
index 0000000..95e3d73
--- /dev/null
+++ b/reg-tests/peers/tls_basic_sync.vtc
@@ -0,0 +1,157 @@
+vtest "Basic test for peers protocol over SSL/TLS"
+#REQUIRE_OPTIONS=OPENSSL
+feature ignore_unknown_macro
+
+#REGTEST_TYPE=slow
+
+haproxy h1 -arg "-L A" -conf {
+ defaults
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ backend stkt
+ stick-table type string size 10m store server_id,gpc0,conn_cur,conn_rate(50000) peers peers
+
+ peers peers
+ default-server ssl crt ${testdir}/common.pem verify none
+ bind "fd@${A}" ssl crt ${testdir}/common.pem
+ server A
+ server B ${h2_B_addr}:${h2_B_port}
+ server C ${h3_C_addr}:${h3_C_port}
+ server D ${h4_D_addr}:${h4_D_port}
+
+ frontend fe
+ bind "fd@${fe}"
+ tcp-request inspect-delay 100ms
+ tcp-request content track-sc0 url table stkt
+ tcp-request content sc-inc-gpc0(0)
+}
+
+haproxy h2 -arg "-L B" -conf {
+ defaults
+ mode http
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ backend stkt
+ stick-table type string size 10m store server_id,gpc0,conn_cur,conn_rate(50000) peers peers
+
+ peers peers
+ default-server ssl crt ${testdir}/common.pem verify none
+ bind "fd@${B}" ssl crt ${testdir}/common.pem
+ server A ${h1_A_addr}:${h1_A_port}
+ server B
+ server C ${h3_C_addr}:${h3_C_port}
+ server D ${h4_D_addr}:${h4_D_port}
+
+ frontend fe
+ bind "fd@${fe}"
+ http-request track-sc0 url table stkt
+ http-request sc-inc-gpc0(0)
+}
+
+haproxy h3 -arg "-L C" -conf {
+ defaults
+ mode http
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ backend stkt
+ stick-table type string size 10m store server_id,gpc0,conn_cur,conn_rate(50000) peers peers
+
+ peers peers
+ default-server ssl crt ${testdir}/common.pem verify none
+ bind "fd@${C}" ssl crt ${testdir}/common.pem
+ server A ${h1_A_addr}:${h1_A_port}
+ server B ${h2_B_addr}:${h2_B_port}
+ server C
+ server D ${h4_D_addr}:${h4_D_port}
+
+ frontend fe
+ bind "fd@${fe}"
+ http-request track-sc0 url table stkt
+ http-request sc-inc-gpc0(0)
+}
+
+haproxy h4 -arg "-L D" -conf {
+ defaults
+ mode http
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ backend stkt
+ stick-table type string size 10m store server_id,gpc0,conn_cur,conn_rate(50000) peers peers
+
+ peers peers
+ bind "fd@${D}"
+ server A ${h1_A_addr}:${h1_A_port}
+ server B ${h2_B_addr}:${h2_B_port}
+ server C ${h3_C_addr}:${h3_C_port}
+ server D
+
+ frontend fe
+ bind "fd@${fe}"
+ http-request track-sc0 url table stkt
+ http-request sc-inc-gpc0(0)
+}
+
+client c1 -connect ${h1_fe_sock} {
+ txreq -url "/c1_client"
+ expect_close
+} -start
+
+client c2 -connect ${h1_fe_sock} {
+ txreq -url "/c2_client"
+ expect_close
+} -start
+
+client c3 -connect ${h1_fe_sock} {
+ txreq -url "/c3_client"
+ expect_close
+} -start
+
+client c4 -connect ${h1_fe_sock} {
+ txreq -url "/c4_client"
+ expect_close
+} -start
+
+haproxy h1 -start
+delay 0.2
+haproxy h2 -start
+delay 0.2
+haproxy h3 -start
+delay 0.2
+haproxy h4 -start
+delay 0.2
+
+client c1 -wait
+client c2 -wait
+client c3 -wait
+client c4 -wait
+
+delay 2
+
+haproxy h1 -cli {
+ send "show table stkt"
+ expect ~ "# table: stkt, type: string, size:1048[0-9]{4}, used:4(\n0x[0-9a-f]*: key=/c[1-4]_client use=0 exp=0 shard=0 server_id=0 gpc0=1 conn_rate\\(50000\\)=1 conn_cur=0){4}"
+}
+
+haproxy h2 -cli {
+ send "show table stkt"
+ expect ~ "# table: stkt, type: string, size:1048[0-9]{4}, used:4(\n0x[0-9a-f]*: key=/c[1-4]_client use=0 exp=0 shard=0 server_id=0 gpc0=1 conn_rate\\(50000\\)=1 conn_cur=0){4}"
+}
+
+haproxy h3 -cli {
+ send "show table stkt"
+ expect ~ "# table: stkt, type: string, size:1048[0-9]{4}, used:4(\n0x[0-9a-f]*: key=/c[1-4]_client use=0 exp=0 shard=0 server_id=0 gpc0=1 conn_rate\\(50000\\)=1 conn_cur=0){4}"
+}
+
+haproxy h4 -cli {
+ send "show table stkt"
+ expect ~ "# table: stkt, type: string, size:1048[0-9]{4}, used:0\n"
+}
+
diff --git a/reg-tests/peers/tls_basic_sync_wo_stkt_backend.vtc b/reg-tests/peers/tls_basic_sync_wo_stkt_backend.vtc
new file mode 100644
index 0000000..bf46708
--- /dev/null
+++ b/reg-tests/peers/tls_basic_sync_wo_stkt_backend.vtc
@@ -0,0 +1,151 @@
+vtest "Basic test for peers protocol over SSL/TLS with stick-table declared in peers sections"
+feature ignore_unknown_macro
+
+#REQUIRE_OPTIONS=OPENSSL
+#REGTEST_TYPE=slow
+
+haproxy h1 -arg "-L A" -conf {
+ defaults
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ peers peers
+ table stkt type string size 10m store server_id,gpc0,conn_cur,conn_rate(50000)
+ default-server ssl crt ${testdir}/common.pem verify none
+ bind "fd@${A}" ssl crt ${testdir}/common.pem
+ server A
+ server B ${h2_B_addr}:${h2_B_port}
+ server C ${h3_C_addr}:${h3_C_port}
+ server D ${h4_D_addr}:${h4_D_port}
+
+ frontend fe
+ bind "fd@${fe}"
+ tcp-request inspect-delay 100ms
+ tcp-request content track-sc0 url table peers/stkt
+ tcp-request content sc-inc-gpc0(0)
+}
+
+haproxy h2 -arg "-L B" -conf {
+ defaults
+ mode http
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ peers peers
+ table stkt type string size 10m store server_id,gpc0,conn_cur,conn_rate(50000)
+ default-server ssl crt ${testdir}/common.pem verify none
+ bind "fd@${B}" ssl crt ${testdir}/common.pem
+ server A ${h1_A_addr}:${h1_A_port}
+ server B
+ server C ${h3_C_addr}:${h3_C_port}
+ server D ${h4_D_addr}:${h4_D_port}
+
+ frontend fe
+ bind "fd@${fe}"
+ http-request track-sc0 url table peers/stkt
+ http-request sc-inc-gpc0(0)
+}
+
+haproxy h3 -arg "-L C" -conf {
+ defaults
+ mode http
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ peers peers
+ table stkt type string size 10m store server_id,gpc0,conn_cur,conn_rate(50000)
+ default-server ssl crt ${testdir}/common.pem verify none
+ bind "fd@${C}" ssl crt ${testdir}/common.pem
+ server A ${h1_A_addr}:${h1_A_port}
+ server B ${h2_B_addr}:${h2_B_port}
+ server C
+ server D ${h4_D_addr}:${h4_D_port}
+
+ frontend fe
+ bind "fd@${fe}"
+ http-request track-sc0 url table peers/stkt
+ http-request sc-inc-gpc0(0)
+}
+
+haproxy h4 -arg "-L D" -conf {
+ defaults
+ mode http
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ backend stkt
+
+ peers peers
+ table stkt type string size 10m store server_id,gpc0,conn_cur,conn_rate(50000)
+ bind "fd@${D}"
+ server A ${h1_A_addr}:${h1_A_port}
+ server B ${h2_B_addr}:${h2_B_port}
+ server C ${h3_C_addr}:${h3_C_port}
+ server D
+
+ frontend fe
+ bind "fd@${fe}"
+ http-request track-sc0 url table peers/stkt
+ http-request sc-inc-gpc0(0)
+}
+
+client c1 -connect ${h1_fe_sock} {
+ txreq -url "/c1_client"
+ expect_close
+} -start
+
+client c2 -connect ${h1_fe_sock} {
+ txreq -url "/c2_client"
+ expect_close
+} -start
+
+client c3 -connect ${h1_fe_sock} {
+ txreq -url "/c3_client"
+ expect_close
+} -start
+
+client c4 -connect ${h1_fe_sock} {
+ txreq -url "/c4_client"
+ expect_close
+} -start
+
+haproxy h1 -start
+delay 0.02
+haproxy h2 -start
+delay 0.02
+haproxy h3 -start
+delay 0.02
+haproxy h4 -start
+delay 0.02
+
+client c1 -wait
+client c2 -wait
+client c3 -wait
+client c4 -wait
+
+delay 3
+
+haproxy h1 -cli {
+ send "show table peers/stkt"
+ expect ~ "# table: peers/stkt, type: string, size:1048[0-9]{4}, used:4(\n0x[0-9a-f]*: key=/c[1-4]_client use=0 exp=0 shard=0 server_id=0 gpc0=1 conn_rate\\(50000\\)=1 conn_cur=0){4}"
+}
+
+haproxy h2 -cli {
+ send "show table peers/stkt"
+ expect ~ "# table: peers/stkt, type: string, size:1048[0-9]{4}, used:4(\n0x[0-9a-f]*: key=/c[1-4]_client use=0 exp=0 shard=0 server_id=0 gpc0=1 conn_rate\\(50000\\)=1 conn_cur=0){4}"
+}
+
+haproxy h3 -cli {
+ send "show table peers/stkt"
+ expect ~ "# table: peers/stkt, type: string, size:1048[0-9]{4}, used:4(\n0x[0-9a-f]*: key=/c[1-4]_client use=0 exp=0 shard=0 server_id=0 gpc0=1 conn_rate\\(50000\\)=1 conn_cur=0){4}"
+}
+
+haproxy h4 -cli {
+ send "show table peers/stkt"
+ expect ~ "# table: peers/stkt, type: string, size:1048[0-9]{4}, used:0\n"
+}
+
diff --git a/reg-tests/pki/README b/reg-tests/pki/README
new file mode 100644
index 0000000..df801bf
--- /dev/null
+++ b/reg-tests/pki/README
@@ -0,0 +1,23 @@
+How it was generated:
+
+cfssl gencert -initca root/root-csr.json \
+| cfssljson -bare root/root-ca
+
+cfssl genkey intermediate/intermediate-csr.json \
+| cfssljson -bare intermediate/intermediate-ca
+
+cfssl sign -ca root/root-ca.pem \
+ -ca-key root/root-ca-key.pem \
+ -config config.json \
+ -profile intermediate \
+ intermediate/intermediate-ca.csr \
+| cfssljson -bare intermediate/intermediate-ca
+
+
+
+ cfssl gencert \
+ -ca intermediate/intermediate-ca.pem \
+ -ca-key intermediate/intermediate-ca-key.pem \
+ -config config.json \
+ -profile host \
+ certificates/www.test1.com-csr.json| cfssljson -bare certificates/www.test1.com
diff --git a/reg-tests/pki/certificates/www.test1.com-csr.json b/reg-tests/pki/certificates/www.test1.com-csr.json
new file mode 100644
index 0000000..734452b
--- /dev/null
+++ b/reg-tests/pki/certificates/www.test1.com-csr.json
@@ -0,0 +1,15 @@
+{
+ "CN": "www.test1.com",
+ "hosts": ["www.test1.com"],
+ "key": {
+ "algo": "rsa",
+ "size": 2048
+ },
+ "names": [
+ {
+ "C": "FR",
+ "L": "Paris",
+ "O": "test1"
+ }
+ ]
+}
diff --git a/reg-tests/pki/certificates/www.test1.com-key.pem b/reg-tests/pki/certificates/www.test1.com-key.pem
new file mode 100644
index 0000000..c46b8fb
--- /dev/null
+++ b/reg-tests/pki/certificates/www.test1.com-key.pem
@@ -0,0 +1,27 @@
+-----BEGIN RSA PRIVATE KEY-----
+MIIEowIBAAKCAQEA3fgUyE3616u0t/nSFAZVxHTSl2T6ksIXIK4lTbw4btEJwW4q
+jFfejIhLWvja1r1ct1WONWNKbw0tT61rJwpQTHQRXT5CEaQMdtHM9zOJQQ4v6deJ
+viW2L7FYSxg6MzBG2jsTEnonUxQx0Ku5NG9LDDLydr9PUbz2g8rVmkbQtO51ZWT1
+NHz0AksN6GX9SvbvW3W7jCTnjtg9Pz13s21q3Yt/lbmiuO3vtQYo4ekJvLg/H+0k
+ShhZtukJFfO8ClsPMYT52S9TzVskDXwMxkH+rI6ZmoYc/i72t8sG7+V2lKfKI4MO
+Czq89X7wvHQ4/cFYNdyYVoZsCwv+cxuXmNy7mQIDAQABAoIBAAtHSbcLz00aGmqm
+tPfzgnQjA3hR1zGRzx8H+jlC8RMgaAB+GodbB6HYYwvSTTxQDt/55unri6Ks5rp/
+s0weiAn6c89rFGxVC5UV//YnljfKAsE9BHC29dTii/v01TA4qcD483Ui49Ci3A0g
+TJ7PcN7Dz/IcsmkS0uvLaKMYKg6nXu9UnKkLBqThTiLA5I+eZZ4zX862Yurku8NI
+HwbMtBsICbe1H0Ebdc/PPAShB8pvV3nQMGFjADOEYolaByQAltolADmIc5K9E6wL
+SsHzAjGTjarSYdqjZRuoEtQrWQTG1fnvJZBXB8L1Brv9RbrPWN2TW/A1uhUR4qYd
+wuxB1mkCgYEA9ao05RsJzDVc4qLBvDXuqo1WapwnYUyc8Jeq+r5l67Ota1ykQyiQ
+BJZDM/mdFRzZZjMAAMN9cxsDdY7gp0ebN190F443tSxjvlVOGJ/e8UJ+Au+9WEYM
+xZQo5VquU8XlxfwFYtYANMvr7DB8yEr25S7S2v3jZ70NZQDDR6G+5L8CgYEA506s
+JJM/NfP82e6JtSB9fOEcjEeddPCdf2RkB+E3Ppt7AQX5Aje/8VOSiZkIbOap9oLc
+rBd9GrER2m2pahd9DFkSs1td70IM5CwwgYIKyWkCCkLXyMrLpelFEqZsFvWD14Go
+c29NSDOpVfVJkPr46Gym6dBvy1rCMh+/ZrgsPacCgYAXFU0vEeFTzNfTe9OUacTp
+RaqPDektXVOJz9aDBQzynsl2wxe0L8WIiM37vkKiTzmmPriLWdMZ07n5e9lkHnhZ
+NaDk7boSTf95TO6vpaX/KW5JvFkgSES+8/L7yg+DU58JgWrIJK+URmz+nne7IkAc
+vM+XQC/z+XGewfmXa31SZQKBgGSDpHyUXBCXARa2+LJz9vQQYrZ23G6XrOlR98IQ
+1U/ogrA0JzwWe9tjxLb0gFvKdKtBvKEv59Z0gdyYcNlAcb+u6Vh1aMFyw2VX6pAs
+sYFKl29cEqcXsR1c2/45wZjMgclhd5EKGdw5TumimKBe31Eo/fN29024F9FuSF9b
+wyXbAoGBALGIKzPgV7Tt6SbzcCjJGQHlH/RKBcuFJjJS+Qph3w7K3L5b6Y35zPOY
+3+FxT2Z5wAlOGYeF9Qa8K3/VX1l7Vhktu9EcTqM59fMGuTM0mEgwwdFM4oFgRIau
+wmlIuAFmo7OwlsggHuHJ7lDk+r7AoNVW7l7Gd1JnG4CasvymVc3N
+-----END RSA PRIVATE KEY-----
diff --git a/reg-tests/pki/certificates/www.test1.com.csr b/reg-tests/pki/certificates/www.test1.com.csr
new file mode 100644
index 0000000..6482270
--- /dev/null
+++ b/reg-tests/pki/certificates/www.test1.com.csr
@@ -0,0 +1,17 @@
+-----BEGIN CERTIFICATE REQUEST-----
+MIICtTCCAZ0CAQAwRTELMAkGA1UEBhMCRlIxDjAMBgNVBAcTBVBhcmlzMQ4wDAYD
+VQQKEwV0ZXN0MTEWMBQGA1UEAxMNd3d3LnRlc3QxLmNvbTCCASIwDQYJKoZIhvcN
+AQEBBQADggEPADCCAQoCggEBAN34FMhN+tertLf50hQGVcR00pdk+pLCFyCuJU28
+OG7RCcFuKoxX3oyIS1r42ta9XLdVjjVjSm8NLU+taycKUEx0EV0+QhGkDHbRzPcz
+iUEOL+nXib4lti+xWEsYOjMwRto7ExJ6J1MUMdCruTRvSwwy8na/T1G89oPK1ZpG
+0LTudWVk9TR89AJLDehl/Ur271t1u4wk547YPT89d7Ntat2Lf5W5orjt77UGKOHp
+Cby4Px/tJEoYWbbpCRXzvApbDzGE+dkvU81bJA18DMZB/qyOmZqGHP4u9rfLBu/l
+dpSnyiODDgs6vPV+8Lx0OP3BWDXcmFaGbAsL/nMbl5jcu5kCAwEAAaArMCkGCSqG
+SIb3DQEJDjEcMBowGAYDVR0RBBEwD4INd3d3LnRlc3QxLmNvbTANBgkqhkiG9w0B
+AQsFAAOCAQEAoQLfxYGBPJljiwlODlnvjjgVcM4OwDYs3Qr/clGxwi3zKfGA9ngh
+t1qfjooypzMNnzeU2OEc7A81h3UY6zd/IrAR6D2+c09+kpiXHB5w5rVpTPGnihDZ
+Y0PtD9+vZV8/zPMQYOJApfaNJ3wyhMnNmg6t6L+G322mJS77A1qw5M19a6/38fPe
+DQSCeR+d5fBZj6/3k/Wizl5Mh2zget53SRCtvHsixO9JdJxzHJwNZ2rN+Q8fXk/F
+7kL1g/0bW64mofyMm3Iq0zDs2quj/MPgKUBx9qHRh69zDoWtNby3Brep0Js57Cds
+QLhwxEcJvd/OeNSOr4iXQXj9D3sj9EpTMQ==
+-----END CERTIFICATE REQUEST-----
diff --git a/reg-tests/pki/certificates/www.test1.com.pem b/reg-tests/pki/certificates/www.test1.com.pem
new file mode 100644
index 0000000..8d0f530
--- /dev/null
+++ b/reg-tests/pki/certificates/www.test1.com.pem
@@ -0,0 +1,23 @@
+-----BEGIN CERTIFICATE-----
+MIIDxzCCAq+gAwIBAgIURbbHd6AXFZoZEmNAwQU1IbkeEjswDQYJKoZIhvcNAQEL
+BQAwYzELMAkGA1UEBhMCRlIxDjAMBgNVBAcTBVBhcmlzMR0wGwYDVQQKExRIQVBy
+b3h5IFRlY2hub2xvZ2llczElMCMGA1UEAxMcSEFQcm94eSBUZXN0IEludGVybWVk
+aWF0ZSBDQTAeFw0yMzA5MjAxNjI2MDBaFw0zMzA5MTcxNjI2MDBaMEUxCzAJBgNV
+BAYTAkZSMQ4wDAYDVQQHEwVQYXJpczEOMAwGA1UEChMFdGVzdDExFjAUBgNVBAMT
+DXd3dy50ZXN0MS5jb20wggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDd
++BTITfrXq7S3+dIUBlXEdNKXZPqSwhcgriVNvDhu0QnBbiqMV96MiEta+NrWvVy3
+VY41Y0pvDS1PrWsnClBMdBFdPkIRpAx20cz3M4lBDi/p14m+JbYvsVhLGDozMEba
+OxMSeidTFDHQq7k0b0sMMvJ2v09RvPaDytWaRtC07nVlZPU0fPQCSw3oZf1K9u9b
+dbuMJOeO2D0/PXezbWrdi3+VuaK47e+1Bijh6Qm8uD8f7SRKGFm26QkV87wKWw8x
+hPnZL1PNWyQNfAzGQf6sjpmahhz+Lva3ywbv5XaUp8ojgw4LOrz1fvC8dDj9wVg1
+3JhWhmwLC/5zG5eY3LuZAgMBAAGjgZAwgY0wDgYDVR0PAQH/BAQDAgWgMBMGA1Ud
+JQQMMAoGCCsGAQUFBwMBMAwGA1UdEwEB/wQCMAAwHQYDVR0OBBYEFFc8xugp1dGZ
++KqMGcfUmbTYEEurMB8GA1UdIwQYMBaAFHr9tGo8KeCWI09L6wA8+zwp7FEMMBgG
+A1UdEQQRMA+CDXd3dy50ZXN0MS5jb20wDQYJKoZIhvcNAQELBQADggEBAIzfQ//U
+1jqTmdjUNZHOqFvCcc06W9vDUJbeKSyStZnE/J3WHrJeLLNaUV00G93zLLRs6slT
+kZ4eEmUZlPGGz6hFK6d8jSIIxyaw/O5l9Ix/Z5cUMiScHNbiLBiyhy6AvF/NcJYl
+xQ6EUIcOqMxEL0dSRq6ckGZvnyFusPuNgfNeIy0Y1eI30En1mPNGQiu2DP7Ex4Ht
+dAiHT6ITXk43wHyXDqXpt97Rdbq1dNNP6sSkQ8r0IeDi5f/qSsBGbR1a9UoKkJOZ
+OO6IGhEb2XCWc2xquJpUHCOXhzaXj/SmxCDpWVW5tdKNZ96gUlp2Wtf0Rp25yFge
+4mCry3J674p8Oto=
+-----END CERTIFICATE-----
diff --git a/reg-tests/pki/config.json b/reg-tests/pki/config.json
new file mode 100644
index 0000000..4e4af21
--- /dev/null
+++ b/reg-tests/pki/config.json
@@ -0,0 +1,27 @@
+{
+ "signing": {
+ "default": {
+ "expiry": "87600h"
+ },
+ "profiles": {
+ "intermediate": {
+ "usages": ["cert sign", "crl sign"],
+ "expiry": "87600h",
+ "ca_constraint": {
+ "is_ca": true,
+ "max_path_len": 0,
+ "max_path_len_zero": true
+ }
+ },
+ "host": {
+ "usages": [
+ "signing",
+ "digital signing",
+ "key encipherment",
+ "server auth"
+ ],
+ "expiry": "87600h"
+ }
+ }
+ }
+}
diff --git a/reg-tests/pki/intermediate/intermediate-ca-key.pem b/reg-tests/pki/intermediate/intermediate-ca-key.pem
new file mode 100644
index 0000000..0d5203a
--- /dev/null
+++ b/reg-tests/pki/intermediate/intermediate-ca-key.pem
@@ -0,0 +1,27 @@
+-----BEGIN RSA PRIVATE KEY-----
+MIIEowIBAAKCAQEAldl9M8BbM0iHUQzcFGU7rcEGlfnEMxvjI4/Y23F72Zkewjl1
+NEaBnKCzBlBXguAp3sHzrw4RtoMY6NPC8Mkjbx+hLWdqWaGeIf+xBqYKl4wRf1ut
+HInV6/aZMszVeQCEqbMZFFsaL4Dwzqkgxo4uafyC+wR6R9q3M4bfBBqYkNctr8vj
+893YYj5xvbA4tG0gfG4Chwv4ODL4shrWreyUbJuhT1dklw6NNrvTkfgVZuFUngpX
+R0jqJJe073Eb/xGS5j1XJmHf2mpT1pDEtYVTLLRyh5xvtHPJStUOVO+ZX1fZ+i1a
+YCdB9ST7jBzn+uE5IU2DWWjAq3swH82Ow86YTwIDAQABAoIBAHGPiwSnJ/ZrZrET
+r819EocRG26ZBAk/fxk6MDvNevBizB6g8KOXMy6B0mpchJVYUCLSlVv6q43HOB9q
+K/fSVpomPD1ue/hFNNPLNFs3hhtnOPNthHl2M7pI/byz4qt6qw73/W+hwuxxKQ1U
+utP+eEo15QVaVVkS9XeFJIm1ph1M7BXdfBRweyQNrZpbpIc7OsHYyZbXsXRbL/ay
+4FnXH4gXHzS/iitbBfGn3hEKD3hOTwNWuZD5DM+QsmaXzJ3MB3vftSYPKfjlcSXT
+o9WRHR5uEy/a9z1GKJuTbF12cV1Sjt5OtSTF+AP7+tY6g//eUft9T8nKERIliQe/
+gVjsY9ECgYEAwC0xp2kebwWtC/oCHnFSJdwp2JTwNwWXtsDPxSf7W+Bq4CgtRJvR
+OoZIagQ7bNJGcsiTmCbI1+ckG/AFMIMEG4sE6BMAQ49mB6fm4CRNIli6Nyc6v21/
+3lY9DE7oi0Axb6nyXlohV0X/XFjqkv4eAY9EjNwSw9oqF6t4U5cfJBMCgYEAx52q
+XB5Q8ab+ZRgXy3eMPheNbo2Zx2yJ+nDccEn1LCPKCmXryvWoA1akF5qUxVnDXls+
+SmQU/oQZyXlCz+u3Am8HJvoKXc+KTWHCt+SKiM5k65tbmcgwFLfQBCnI61yE5SD0
+xgyK+r7g2OMm0Niraigykrmc0Lm8vrLnrFfRqlUCgYEAi/1f+k98t+suj1bjTEHI
+UYP7h6WXBr2wMpRpxXjGhbsIn5kkwKGw3C0NewzyLOdQIjUyLtex3nSlvSQu45PM
+3jN6s4OD4bMH4/PPHt/SwH5Zx+tvxBqD1vaDKgAhLa7pVCo8P7bwKp720dKDregm
++4Pdr65wjPLTTsK9uEaRjLkCgYBkrKsIDK4sQlnZaL5Pevq7Miu9jyL4xlAMhDFZ
+XKOPYHl8pwy17xGOpXrbALZ5lh0HkKMo57MC7mMHDuZYcPx6+L41tTPXPl31SxHm
+pxgLiXSys0cOlz1Qpaf/AYf41E+eM/HflOHfNre2C1I/UJebUay3/W8Ogi6kSxy2
++g+fhQKBgAgXincPNVyvlxbgrYsifwvdX5pZMdpL5OJ59tSf7wTkQTN9K+t1utff
+jU9erDntbetIE2r1SxJvgFLWbmZE4j8ORA3dcJUesToZM+4vKtQlzVS0w/iNZood
+I0NlWjfL0QTkMqBVjU2Inw/nkLlRqgZGP77BVD+KEqbqDBxwC9Ms
+-----END RSA PRIVATE KEY-----
diff --git a/reg-tests/pki/intermediate/intermediate-ca.csr b/reg-tests/pki/intermediate/intermediate-ca.csr
new file mode 100644
index 0000000..02e4cbf
--- /dev/null
+++ b/reg-tests/pki/intermediate/intermediate-ca.csr
@@ -0,0 +1,17 @@
+-----BEGIN CERTIFICATE REQUEST-----
+MIICqDCCAZACAQAwYzELMAkGA1UEBhMCRlIxDjAMBgNVBAcTBVBhcmlzMR0wGwYD
+VQQKExRIQVByb3h5IFRlY2hub2xvZ2llczElMCMGA1UEAxMcSEFQcm94eSBUZXN0
+IEludGVybWVkaWF0ZSBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB
+AJXZfTPAWzNIh1EM3BRlO63BBpX5xDMb4yOP2Ntxe9mZHsI5dTRGgZygswZQV4Lg
+Kd7B868OEbaDGOjTwvDJI28foS1nalmhniH/sQamCpeMEX9brRyJ1ev2mTLM1XkA
+hKmzGRRbGi+A8M6pIMaOLmn8gvsEekfatzOG3wQamJDXLa/L4/Pd2GI+cb2wOLRt
+IHxuAocL+Dgy+LIa1q3slGyboU9XZJcOjTa705H4FWbhVJ4KV0dI6iSXtO9xG/8R
+kuY9VyZh39pqU9aQxLWFUyy0coecb7RzyUrVDlTvmV9X2fotWmAnQfUk+4wc5/rh
+OSFNg1lowKt7MB/NjsPOmE8CAwEAAaAAMA0GCSqGSIb3DQEBCwUAA4IBAQCQwtG4
+0Lvcpbp3YiYQGRkhP/2SEipTcQ+aVQXiZYpB0gHUZMDJ9xP1wZ+E6lhE9NGQp6Xj
+ewLCup/4UEAEDM6vSbYafpgN3pTJiSjhy548xkKwrm/UvzG68IpfGDgQzwaEtrux
+SmM/v5EH0p3THYIhysqdgJu3M0nhrRiuBx1w/MRPSQW58U2JarOQwFF9OUYPcsAV
+hQJWp8waWEyolMhaCx8z+sUngbV9c0PdsillV++4PMOCl/1RdswufYiYBcvo5js6
+cM0V8y3nFcJnki3NYHe6fZ8J576Lptfwfzn3hlas3vG2kTetEbWaKR5iPzEMibtZ
+AFpgLkgbAY7pDtry
+-----END CERTIFICATE REQUEST-----
diff --git a/reg-tests/pki/intermediate/intermediate-ca.pem b/reg-tests/pki/intermediate/intermediate-ca.pem
new file mode 100644
index 0000000..9856300
--- /dev/null
+++ b/reg-tests/pki/intermediate/intermediate-ca.pem
@@ -0,0 +1,22 @@
+-----BEGIN CERTIFICATE-----
+MIIDuTCCAqGgAwIBAgIUQQtTP1aDfRAnDdjKkaUT2py2fmswDQYJKoZIhvcNAQEL
+BQAwYjELMAkGA1UEBhMCRlIxDjAMBgNVBAcTBVBhcmlzMR0wGwYDVQQKExRIQVBy
+b3h5IFRlY2hub2xvZ2llczEkMCIGA1UEAxMbSEFQcm94eSBSb290IFRlc3QgQXV0
+aG9yaXR5MB4XDTIzMDkyMDE2MjQwMFoXDTMzMDkxNzE2MjQwMFowYzELMAkGA1UE
+BhMCRlIxDjAMBgNVBAcTBVBhcmlzMR0wGwYDVQQKExRIQVByb3h5IFRlY2hub2xv
+Z2llczElMCMGA1UEAxMcSEFQcm94eSBUZXN0IEludGVybWVkaWF0ZSBDQTCCASIw
+DQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAJXZfTPAWzNIh1EM3BRlO63BBpX5
+xDMb4yOP2Ntxe9mZHsI5dTRGgZygswZQV4LgKd7B868OEbaDGOjTwvDJI28foS1n
+almhniH/sQamCpeMEX9brRyJ1ev2mTLM1XkAhKmzGRRbGi+A8M6pIMaOLmn8gvsE
+ekfatzOG3wQamJDXLa/L4/Pd2GI+cb2wOLRtIHxuAocL+Dgy+LIa1q3slGyboU9X
+ZJcOjTa705H4FWbhVJ4KV0dI6iSXtO9xG/8RkuY9VyZh39pqU9aQxLWFUyy0coec
+b7RzyUrVDlTvmV9X2fotWmAnQfUk+4wc5/rhOSFNg1lowKt7MB/NjsPOmE8CAwEA
+AaNmMGQwDgYDVR0PAQH/BAQDAgEGMBIGA1UdEwEB/wQIMAYBAf8CAQAwHQYDVR0O
+BBYEFHr9tGo8KeCWI09L6wA8+zwp7FEMMB8GA1UdIwQYMBaAFB4wopglmOnetKea
+jkq9OymofpZfMA0GCSqGSIb3DQEBCwUAA4IBAQDQamqX11VfTB5USqAL1E0/qHqG
+WivOWX2K5lCfbalg7Fqlm1d0hKffPODD3RAOU7UhP9B1xc9NRBgbNPEEMu/O92PS
+C5H8WxGoKfa4TuX8JfhhpUGpRelFFHU7mVVyMh7RDQmfFdxC8ej8+iTvBXtacMhh
+VWokTIakyFCj7u/qcQKhpzoTDq9VRE+lmOFYzMtHqk+qGVXDgG8z/e7z5UP98ttI
+XXsQ50Mi6ow8P118eRjJI01DZUu8QnYt/+jhqAFipS2OjyV4Jlq+wGZ4xB9gonlf
+lTdqR19oFnIFi30boIB33MRxz9qWZy7ay5jUV6jGauymQI/1waPtv/KIjXzt
+-----END CERTIFICATE-----
diff --git a/reg-tests/pki/intermediate/intermediate-csr.json b/reg-tests/pki/intermediate/intermediate-csr.json
new file mode 100644
index 0000000..2cbcaef
--- /dev/null
+++ b/reg-tests/pki/intermediate/intermediate-csr.json
@@ -0,0 +1,14 @@
+{
+ "CN": "HAProxy Test Intermediate CA",
+ "key": {
+ "algo": "rsa",
+ "size": 2048
+ },
+ "names": [
+ {
+ "C": "FR",
+ "L": "Paris",
+ "O": "HAProxy Technologies"
+ }
+ ]
+}
diff --git a/reg-tests/pki/root/root-ca-key.pem b/reg-tests/pki/root/root-ca-key.pem
new file mode 100644
index 0000000..a51d46c
--- /dev/null
+++ b/reg-tests/pki/root/root-ca-key.pem
@@ -0,0 +1,27 @@
+-----BEGIN RSA PRIVATE KEY-----
+MIIEpAIBAAKCAQEA6lV5gcewWGoyPcQWuK75SHyFHBCJVIaTQhDrFTdUTAZeAELk
+cbRhaDoXaPZoisDnWIH2WrfJLmVWO4MfYdx4rzAd5yh5GDeII60bSYyDBPgmC0TA
+3RdvwjT1Dnwgsi/S/+BmpIsjoCwvmKuXXYkFX18wScYbAqEKD/8eWnqUysY2epBY
+0Uv8v2N6I2qtMXl0JUFm65+XZmUmEQe25u9DEMDm+8aPCwCM+wPsFaWXanOHjSe6
+IeG/1E2AoWB5MN3td40zuoGHFF/UWi5nMQL6XbBolv7WOIL92tM8pPfO0QNMSe3i
+CqxnkNyqEX9is8+2O5J/WFdVyr9JzGg6IEQ9wwIDAQABAoIBACHtyE8LK7RRg/6/
+co7A/uUI6nj6x/eOGKo7Kt6CiVkMnvWouczg0i22I4eWDeLxNU3yW0odTCrEj2zE
+enm/41VyQRIkIQxplfOcb9QBXUMN7uw4wzf/2bWj10uGm3FRTVqxEguG/dyGKUoQ
+pjTrzvRVIve7kFOcH4nbZXiyQPLu4yakdS/o5jKjpkhhzbSxAJ62mrC41GEWmQqb
+419iVsyj5iSV4ixQAowpBfkIu33ZTiNFd5Pd/OjPV90ID9ebwcBhiXUAvZvU8K9M
+W0d7fjqeCooNuVBFwi5E/Bxa3zYy8Bo6SCSv9U6RnTmPOdId0LBx8ClI/sZvZo+n
+cEwg5dECgYEA7mSny6+jPp5D6JRfyX5X3VQMagBKCY6UREk0x79C5XDGxHrIG3Dk
+xAxSpyjYMFV4Ju+5pL5o71ej6RzslHSVcVVufudD0CxBOOPWoK/7EuceGkwoXuPu
+cralaA8iyamArT8DRNgDELoxTVp30Ihfv3OCx0MRe2P27SgrkGtFhMkCgYEA+6QR
+uanKdopl4H9Pylp8xLcqpO7oh+IKnNg6liUsDO61c/5BQdI4FteOb68+wKmIGSIR
+R/3NIOpnlYLINH4+QBM/OE3oT4lGYwQ/zYXW1n2bCuyYKaHRbAjDqMoys5iZu9nw
+nspcBMhKuvk5EQOLuu+YWW+loCSgWkr94pAHcCsCgYAfLlM0NRjpC9JeDWop9LiR
+XnjxNHB6p4eFFv4sUjvxvrqA4H5j+HQzoFj/PDxqtgMtNJXXkoAvLgtmZj4bfNQ/
+Kql1N9KK2/777fEYu2vfoRMy5VsybiW2I2WnIrOuJNmLz34h9w8PlQr1zyt0vyGm
+7PjiD0euF/koM+Te0ukY4QKBgQDqZ5L7V+Z5NcJgtTcmT9QZimX27XU93pn0MJei
+dU3rXhE404cGQURuQq0z7BKQdVuJzeo7HXiPSMrkwsar300tcsBZOnlEXaNraPHm
+CN5vPuHF805quYnTEPbxrCg+k8/g5Mr3n5jH+6thL/awmpT2tWk9JnsqCfQ1J9YH
+zhKfsQKBgQDlg8uKdBrQwM7bWeFEwlwRMMIBTcfvBqXtthckU8BmxwrQ8bk8az03
+3pQp3BiEYlHyI0F+ITDMci6fTega5ehJmQWAdFO+hiBPVkU7rdmukB5gCU1aoHcz
+VwGZ5ZXe60aew4jILdkmrPC4AJYQ0dil0yN6Q8pucSfOThhgiq2gFg==
+-----END RSA PRIVATE KEY-----
diff --git a/reg-tests/pki/root/root-ca.csr b/reg-tests/pki/root/root-ca.csr
new file mode 100644
index 0000000..f9817b8
--- /dev/null
+++ b/reg-tests/pki/root/root-ca.csr
@@ -0,0 +1,17 @@
+-----BEGIN CERTIFICATE REQUEST-----
+MIICyTCCAbECAQAwYjELMAkGA1UEBhMCRlIxDjAMBgNVBAcTBVBhcmlzMR0wGwYD
+VQQKExRIQVByb3h5IFRlY2hub2xvZ2llczEkMCIGA1UEAxMbSEFQcm94eSBSb290
+IFRlc3QgQXV0aG9yaXR5MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA
+6lV5gcewWGoyPcQWuK75SHyFHBCJVIaTQhDrFTdUTAZeAELkcbRhaDoXaPZoisDn
+WIH2WrfJLmVWO4MfYdx4rzAd5yh5GDeII60bSYyDBPgmC0TA3RdvwjT1Dnwgsi/S
+/+BmpIsjoCwvmKuXXYkFX18wScYbAqEKD/8eWnqUysY2epBY0Uv8v2N6I2qtMXl0
+JUFm65+XZmUmEQe25u9DEMDm+8aPCwCM+wPsFaWXanOHjSe6IeG/1E2AoWB5MN3t
+d40zuoGHFF/UWi5nMQL6XbBolv7WOIL92tM8pPfO0QNMSe3iCqxnkNyqEX9is8+2
+O5J/WFdVyr9JzGg6IEQ9wwIDAQABoCIwIAYJKoZIhvcNAQkOMRMwETAPBgNVHRMB
+Af8EBTADAQH/MA0GCSqGSIb3DQEBCwUAA4IBAQDC6yA0gkpzHntF8lzeHTUMLlEy
+s7tIIStZcmhVjd4u2oNC6fSGoygVgITemb0aQXz49oWFPHRLbZrsDva2jJQfA3nU
+MuHWC9FtFEhkFMbHj9apr+vEDhUXPYc9jr4j2lYwhVcvT5XhL+fN6Hl2WHtJJhyP
+nYbUQHlLdKLE6aNfrb6pyXZ3G0nLrH+Zs0FibnSW0tMk7MTS2eHWn793VxuPpCHg
++oCshOk6CdWUY9p5lC4NynNXUeqP6xRmQxzeVneNouTLHsGdY2lZVBdOgIvjQMAg
+ZPIC6kLgFI50A8SDMIuIGwQ1imS5govlcicemXh2ee/IBR6PmwF5MG9SVZ3h
+-----END CERTIFICATE REQUEST-----
diff --git a/reg-tests/pki/root/root-ca.pem b/reg-tests/pki/root/root-ca.pem
new file mode 100644
index 0000000..4e3c903
--- /dev/null
+++ b/reg-tests/pki/root/root-ca.pem
@@ -0,0 +1,22 @@
+-----BEGIN CERTIFICATE-----
+MIIDlDCCAnygAwIBAgIUaa8nhmRvY/xmnTuVk0gnbpyms8MwDQYJKoZIhvcNAQEL
+BQAwYjELMAkGA1UEBhMCRlIxDjAMBgNVBAcTBVBhcmlzMR0wGwYDVQQKExRIQVBy
+b3h5IFRlY2hub2xvZ2llczEkMCIGA1UEAxMbSEFQcm94eSBSb290IFRlc3QgQXV0
+aG9yaXR5MB4XDTIzMDkyMDE2MjMwMFoXDTQzMDkxNTE2MjMwMFowYjELMAkGA1UE
+BhMCRlIxDjAMBgNVBAcTBVBhcmlzMR0wGwYDVQQKExRIQVByb3h5IFRlY2hub2xv
+Z2llczEkMCIGA1UEAxMbSEFQcm94eSBSb290IFRlc3QgQXV0aG9yaXR5MIIBIjAN
+BgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA6lV5gcewWGoyPcQWuK75SHyFHBCJ
+VIaTQhDrFTdUTAZeAELkcbRhaDoXaPZoisDnWIH2WrfJLmVWO4MfYdx4rzAd5yh5
+GDeII60bSYyDBPgmC0TA3RdvwjT1Dnwgsi/S/+BmpIsjoCwvmKuXXYkFX18wScYb
+AqEKD/8eWnqUysY2epBY0Uv8v2N6I2qtMXl0JUFm65+XZmUmEQe25u9DEMDm+8aP
+CwCM+wPsFaWXanOHjSe6IeG/1E2AoWB5MN3td40zuoGHFF/UWi5nMQL6XbBolv7W
+OIL92tM8pPfO0QNMSe3iCqxnkNyqEX9is8+2O5J/WFdVyr9JzGg6IEQ9wwIDAQAB
+o0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU
+HjCimCWY6d60p5qOSr07Kah+ll8wDQYJKoZIhvcNAQELBQADggEBAIA+oiN1UsQi
+XQTM4YnBNMU0X0TDBG2tzg48rJBbyJrhYynd4EgHfxGIuNsgGlhzF31QIbEDf4Et
+Rs7SSPhozBaQ81Nnk4HqHH3kSoXt4q3FQQa6kmH6vVcQ121lMa29f/VvcRYwzgzi
+p29aSVpCsz0PTL7IWgXs2AiH0VoK00ULTzaxIJbeByY+hZkaUmTnTDRxSv5z7i4f
+GoJikS8wUogc0QFSg6lwA0B5lGO6JBHpZ8iFErGPKwZXcwKQfTFA6v6vFQspIwF0
+GllXWObls+I4clEqtr0QK4V00GSS/YmiTDNOGMYv61RUQPmrpsomofznbS1jQfLG
+QskPnU6PKhQ=
+-----END CERTIFICATE-----
diff --git a/reg-tests/pki/root/root-csr.json b/reg-tests/pki/root/root-csr.json
new file mode 100644
index 0000000..90f6274
--- /dev/null
+++ b/reg-tests/pki/root/root-csr.json
@@ -0,0 +1,17 @@
+{
+ "CN": "HAProxy Root Test Authority",
+ "key": {
+ "algo": "rsa",
+ "size": 2048
+ },
+ "names": [
+ {
+ "C": "FR",
+ "L": "Paris",
+ "O": "HAProxy Technologies"
+ }
+ ],
+ "ca": {
+ "expiry": "175200h"
+ }
+}
diff --git a/reg-tests/sample_fetches/cond_set_var.vtc b/reg-tests/sample_fetches/cond_set_var.vtc
new file mode 100644
index 0000000..67786ae
--- /dev/null
+++ b/reg-tests/sample_fetches/cond_set_var.vtc
@@ -0,0 +1,362 @@
+#REGTEST_TYPE=devel
+
+# This regtest checks the multiple conditions that can be specified to a
+# set-var call (be it a converter or HTTP or TCP action). It mainly uses the
+# actions but since the "var_set" function is used for the converter and for
+# the actions, it should be enough to focus on one type of set-var.
+# Among the variables that can be defined and the multiple scopes they can
+# have, the proc scope is the only one having a specific behaviour. Proc scoped
+# variables are created during init when a variable of any other scope is
+# created during the first successful set-var.
+# Since this test uses variables of different scopes, the validation cannot be
+# based on the "show var" command of the CLI because it only displays process
+# variables. It then always follows the same logic, for every sub test case :
+# an HTTP header is added to the response in which we add a concatenation of
+# all the tested variables (which exist in the request/response scope). These
+# HTTP headers are then tested upon an expected result (which changes for every
+# test case).
+#
+
+varnishtest "Test the conditional set-var converter and action"
+feature cmd "$HAPROXY_PROGRAM -cc 'version_atleast(2.5-dev0)'"
+feature cmd "command -v socat"
+feature ignore_unknown_macro
+
+server s1 -repeat 10 {
+ rxreq
+ txresp
+} -start
+
+haproxy h1 -conf {
+ global
+ set-var proc.int12 int(12)
+ set-var proc.int5 var(proc.str60,60),div(proc.int12)
+
+ defaults
+ mode http
+ timeout connect 100ms
+ timeout client 1s
+ timeout server 1s
+
+ listen main-fe
+ bind "fd@${mainfe}"
+
+ use_backend ifexists_be if { path_beg /ifexists }
+ use_backend ifnotexists_be if { path_beg /ifnotexists }
+ use_backend ifempty_be if { path_beg /ifempty }
+ use_backend ifnotempty_be if { path_beg /ifnotempty }
+ use_backend ifset_be if { path_beg /ifset }
+ use_backend ifnotset_be if { path_beg /ifnotset }
+ use_backend ifgt_be if { path_beg /ifgt }
+ use_backend iflt_be if { path_beg /iflt }
+ use_backend combined_be if { path_beg /combined }
+ use_backend converter_be if { path_beg /converter }
+
+
+ backend ifexists_be
+ server s1 ${s1_addr}:${s1_port}
+
+ # proc scope variables are created during configuration parsing so the
+ # ifexists condition will always be true for those variables
+ http-response set-var(proc.ifexists_proc,ifexists) var(proc.int12)
+ http-response set-var(sess.ifexists_sess,ifexists) var(proc.int5)
+ http-response set-var(res.ifexists_res,ifexists) str(toto)
+
+ http-response set-header x-var "proc.ifexists=%[var(proc.ifexists_proc)] sess.ifexists=%[var(sess.ifexists_sess)] res.ifexists=%[var(res.ifexists_res)]"
+
+
+ backend ifnotexists_be
+ server s1 ${s1_addr}:${s1_port}
+
+ http-response set-header x-var-init "proc.ifnotexists=%[var(proc.ifnotexists_proc)] sess.ifnotexists=%[var(sess.ifnotexists_sess)] res.ifnotexists=%[var(res.ifnotexists_res)]"
+
+ http-response set-var(proc.ifnotexists_proc,ifnotexists) var(proc.int12)
+ http-response set-var(sess.ifnotexists_sess,ifnotexists) var(proc.int5)
+ http-response set-var(res.ifnotexists_res,ifnotexists) str(toto)
+
+ http-response set-header x-var "proc.ifnotexists=%[var(proc.ifnotexists_proc)] sess.ifnotexists=%[var(sess.ifnotexists_sess)] res.ifnotexists=%[var(res.ifnotexists_res)]"
+
+
+ backend ifempty_be
+ server s1 ${s1_addr}:${s1_port}
+ # init
+ http-response set-var(proc.ifempty_proc) str(ifempty_proc)
+ http-response set-var(sess.ifempty_sess) bin(6966656d7074795f73657373) #ifempty_sess
+ http-response set-var(res.ifempty_res) str(ifempty_res)
+ http-response set-var(res.ifempty_res_int) int(5)
+
+ http-response set-header x-var-init "proc.ifempty=%[var(proc.ifempty_proc)] sess.ifempty=%[var(sess.ifempty_sess)] res.ifempty=%[var(res.ifempty_res)] res.ifempty_res_int=%[var(res.ifempty_res_int)]"
+
+ # None of those set-var calls should actually change their respective variables
+ # since none of the samples is empty
+ http-response set-var(proc.ifempty_proc,ifempty) int(12)
+ http-response set-var(sess.ifempty_sess,ifempty) bool(false)
+ http-response set-var(res.ifempty_res,ifempty) bin(746F746F) # "toto"
+ http-response set-var(res.ifempty_res_int,ifempty) str(toto)
+
+ http-response set-header x-var1 "proc.ifempty=%[var(proc.ifempty_proc)] sess.ifempty=%[var(sess.ifempty_sess)] res.ifempty=%[var(res.ifempty_res)] res.ifempty_res_int=%[var(res.ifempty_res_int)]"
+
+ http-response set-var(proc.ifempty_proc,ifempty) str()
+ http-response set-var(sess.ifempty_sess,ifempty) str()
+ http-response set-var(res.ifempty_res,ifempty) str()
+ http-response set-var(res.ifempty_res_int,ifempty) int(7) # should not work, scalar types are never empty
+
+ http-response set-header x-var2 "proc.ifempty=%[var(proc.ifempty_proc)] sess.ifempty=%[var(sess.ifempty_sess)] res.ifempty=%[var(res.ifempty_res)] res.ifempty_res_int=%[var(res.ifempty_res_int)]"
+
+
+ backend ifnotempty_be
+ server s1 ${s1_addr}:${s1_port}
+ # init
+ http-response set-var(proc.ifnotempty_proc) str(ifnotempty_proc)
+ http-response set-var(sess.ifnotempty_sess) bin(69666e6f74656d7074795f73657373) # "ifnotempty_sess"
+ http-response set-var(res.ifnotempty_res) str(ifnotempty_res)
+ http-response set-var(res.ifnotempty_res_int) int(5)
+
+ http-response set-header x-var-init "proc.ifnotempty=%[var(proc.ifnotempty_proc)] sess.ifnotempty=%[var(sess.ifnotempty_sess)] res.ifnotempty=%[var(res.ifnotempty_res)] res.ifnotempty_res_int=%[var(res.ifnotempty_res_int)]"
+
+ # None of those set-var calls should actually change their respective variables
+ # since none of the samples is not empty
+ http-response set-var(proc.ifnotempty_proc,ifnotempty) str(toto)
+ http-response set-var(sess.ifnotempty_sess,ifnotempty) bin(746F746F) # "toto"
+ http-response set-var(res.ifnotempty_res,ifnotempty) str(tata)
+ http-response set-var(res.ifnotempty_res_int,ifnotempty) int(6)
+
+ http-response set-header x-var1 "proc.ifnotempty=%[var(proc.ifnotempty_proc)] sess.ifnotempty=%[var(sess.ifnotempty_sess)] res.ifnotempty=%[var(res.ifnotempty_res)] res.ifnotempty_res_int=%[var(res.ifnotempty_res_int)]"
+
+ # The first three variables should remain unchanged.
+ http-response set-var(proc.ifnotempty_proc,ifnotempty) str()
+ http-response set-var(sess.ifnotempty_sess,ifnotempty) str()
+ http-response set-var(res.ifnotempty_res,ifnotempty) str()
+ http-response set-var(res.ifnotempty_res_int,ifnotempty) int(7) # should not work
+
+ http-response set-header x-var2 "proc.ifnotempty=%[var(proc.ifnotempty_proc)] sess.ifnotempty=%[var(sess.ifnotempty_sess)] res.ifnotempty=%[var(res.ifnotempty_res)] res.ifnotempty_res_int=%[var(res.ifnotempty_res_int)]"
+
+
+ backend ifset_be
+ server s1 ${s1_addr}:${s1_port}
+ # init
+ http-response set-var(proc.ifset_proc) str(ifset_proc)
+ http-response set-var(sess.ifset_sess) bin(69667365745f73657373) # "ifset_sess"
+ http-response set-var(res.ifset_res) str(ifset_res)
+ http-response set-var(res.ifset_res_int) int(5)
+
+ http-response set-header x-var-init "proc.ifset=%[var(proc.ifset_proc)] sess.ifset=%[var(sess.ifset_sess)] res.ifset=%[var(res.ifset_res)] res.ifset_res_int=%[var(res.ifset_res_int)]"
+
+ # All those set-var calls should succeed
+ http-response set-var(proc.ifset_proc,ifset) str(toto)
+ http-response set-var(sess.ifset_sess,ifset) bin(746F746F) # "toto"
+ http-response set-var(res.ifset_res,ifset) int(123)
+ http-response set-var(res.ifset_res_int,ifset) str(azerty)
+
+ http-response set-header x-var1 "proc.ifset=%[var(proc.ifset_proc)] sess.ifset=%[var(sess.ifset_sess)] res.ifset=%[var(res.ifset_res)] res.ifset_res_int=%[var(res.ifset_res_int)]"
+
+ http-response unset-var(proc.ifset_proc)
+ http-response unset-var(sess.ifset_sess)
+ http-response unset-var(res.ifset_res)
+ http-response unset-var(res.ifset_res_int)
+
+ http-response set-header x-var2 "proc.ifset=%[var(proc.ifset_proc)] sess.ifset=%[var(sess.ifset_sess)] res.ifset=%[var(res.ifset_res)] res.ifset_res_int=%[var(res.ifset_res_int)]"
+
+ # None of those set-var calls should succeed
+ http-response set-var(proc.ifset_proc,ifset) str(toto)
+ http-response set-var(sess.ifset_sess,ifset) bin(746F746F) # "toto"
+ http-response set-var(res.ifset_res,ifset) int(123)
+ http-response set-var(res.ifset_res_int,ifset) str(azerty)
+
+ http-response set-header x-var3 "proc.ifset=%[var(proc.ifset_proc)] sess.ifset=%[var(sess.ifset_sess)] res.ifset=%[var(res.ifset_res)] res.ifset_res_int=%[var(res.ifset_res_int)]"
+
+
+ backend ifnotset_be
+ server s1 ${s1_addr}:${s1_port}
+ # init
+ http-response set-var(proc.ifnotset_proc) str(ifnotset_proc)
+ http-response set-var(sess.ifnotset_sess) bin(69666e6f747365745f73657373) # "ifnotset_sess"
+ http-response set-var(res.ifnotset_res) str(ifnotset_res)
+ http-response set-var(res.ifnotset_res_int) int(5)
+
+ http-response set-header x-var-init "proc.ifnotset=%[var(proc.ifnotset_proc)] sess.ifnotset=%[var(sess.ifnotset_sess)] res.ifnotset=%[var(res.ifnotset_res)] res.ifnotset_res_int=%[var(res.ifnotset_res_int)]"
+
+ # None of those set-var calls should succeed
+ http-response set-var(proc.ifnotset_proc,ifnotset) str(toto)
+ http-response set-var(sess.ifnotset_sess,ifnotset) bin(746F746F) # "toto"
+ http-response set-var(res.ifnotset_res,ifnotset) int(123)
+ http-response set-var(res.ifnotset_res_int,ifnotset) str(azerty)
+
+ http-response set-header x-var1 "proc.ifnotset=%[var(proc.ifnotset_proc)] sess.ifnotset=%[var(sess.ifnotset_sess)] res.ifnotset=%[var(res.ifnotset_res)] res.ifnotset_res_int=%[var(res.ifnotset_res_int)]"
+
+ http-response unset-var(proc.ifnotset_proc)
+ http-response unset-var(sess.ifnotset_sess)
+ http-response unset-var(res.ifnotset_res)
+ http-response unset-var(res.ifnotset_res_int)
+
+ http-response set-header x-var2 "proc.ifnotset=%[var(proc.ifnotset_proc)] sess.ifnotset=%[var(sess.ifnotset_sess)] res.ifnotset=%[var(res.ifnotset_res)] res.ifnotset_res_int=%[var(res.ifnotset_res_int)]"
+
+ # All of those set-var calls should succeed
+ http-response set-var(proc.ifnotset_proc,ifnotset) str(toto)
+ http-response set-var(sess.ifnotset_sess,ifnotset) bin(746F746F) # "toto"
+ http-response set-var(res.ifnotset_res,ifnotset) int(123)
+ http-response set-var(res.ifnotset_res_int,ifnotset) str(azerty)
+
+ http-response set-header x-var3 "proc.ifnotset=%[var(proc.ifnotset_proc)] sess.ifnotset=%[var(sess.ifnotset_sess)] res.ifnotset=%[var(res.ifnotset_res)] res.ifnotset_res_int=%[var(res.ifnotset_res_int)]"
+
+ backend ifgt_be
+ server s1 ${s1_addr}:${s1_port}
+ # init
+ http-response set-var(proc.ifgt_proc) str(ifgt_proc)
+ http-response set-var(sess.ifgt_sess) bin(696667745f73657373) # "ifgt_sess"
+ http-response set-var(res.ifgt_res1) str(ifgt_res)
+ http-response set-var(res.ifgt_res2) int(5)
+ http-response set-var(res.ifgt_res_int1) int(5)
+ http-response set-var(res.ifgt_res_int2) int(5)
+
+ http-response set-header x-var-init "proc.ifgt=%[var(proc.ifgt_proc)] sess.ifgt=%[var(sess.ifgt_sess)] res.ifgt1=%[var(res.ifgt_res1)] res.ifgt2=%[var(res.ifgt_res2)] res.ifgt_res_int1=%[var(res.ifgt_res_int1)] res.ifgt_res_int2=%[var(res.ifgt_res_int2)]"
+
+ # ifgt does not apply on non scalar type so the two following set-var will ignore the condition
+ http-response set-var(proc.ifgt_proc,ifgt) str(toto)
+ http-response set-var(sess.ifgt_sess,ifgt) bin(746F746F) # "toto"
+ # ifgt can only apply when the variable and the sample are both scalar. In this case, the variable was a string so the condition is ignored
+ http-response set-var(res.ifgt_res1,ifgt) int(55)
+ # ifgt can only apply when the variable and the sample are both scalar. In this case, the sample is a string so the condition is ignored
+ http-response set-var(res.ifgt_res2,ifgt) str(text)
+ http-response set-var(res.ifgt_res_int1,ifgt) int(55) # should not work
+ http-response set-var(res.ifgt_res_int2,ifgt) int(2) # should work
+
+ http-response set-header x-var1 "proc.ifgt=%[var(proc.ifgt_proc)] sess.ifgt=%[var(sess.ifgt_sess)] res.ifgt1=%[var(res.ifgt_res1)] res.ifgt2=%[var(res.ifgt_res2)] res.ifgt_res_int1=%[var(res.ifgt_res_int1)] res.ifgt_res_int2=%[var(res.ifgt_res_int2)]"
+
+
+ backend iflt_be
+ server s1 ${s1_addr}:${s1_port}
+ # init
+ http-response set-var(proc.iflt_proc) str(iflt_proc)
+ http-response set-var(sess.iflt_sess) bin(69666c745f73657373) # "iflt_sess"
+ http-response set-var(res.iflt_res1) str(iflt_res)
+ http-response set-var(res.iflt_res2) int(5)
+ http-response set-var(res.iflt_res_int1) int(5)
+ http-response set-var(res.iflt_res_int2) int(5)
+
+ http-response set-header x-var-init "proc.iflt=%[var(proc.iflt_proc)] sess.iflt=%[var(sess.iflt_sess)] res.iflt1=%[var(res.iflt_res1)] res.iflt2=%[var(res.iflt_res2)] res.iflt_res_int1=%[var(res.iflt_res_int1)] res.iflt_res_int2=%[var(res.iflt_res_int2)]"
+
+ # iflt does not apply on non scalar type so the two following set-var will ignore the condition
+ http-response set-var(proc.iflt_proc,iflt) str(toto)
+ http-response set-var(sess.iflt_sess,iflt) bin(746F746F) # "toto"
+ # iflt can only apply when the variable and the sample are both scalar. In this case, the variable was a string so the condition is ignored
+ http-response set-var(res.iflt_res1,iflt) int(55)
+ # iflt can only apply when the variable and the sample are both scalar. In this case, the sample is a string so the condition is ignored
+ http-response set-var(res.iflt_res2,iflt) str(text)
+ http-response set-var(res.iflt_res_int1,iflt) int(55) # should work
+ http-response set-var(res.iflt_res_int2,iflt) int(2) # should not work
+
+ http-response set-header x-var1 "proc.iflt=%[var(proc.iflt_proc)] sess.iflt=%[var(sess.iflt_sess)] res.iflt1=%[var(res.iflt_res1)] res.iflt2=%[var(res.iflt_res2)] res.iflt_res_int1=%[var(res.iflt_res_int1)] res.iflt_res_int2=%[var(res.iflt_res_int2)]"
+
+
+ # Test multiple conditions at once
+ backend combined_be
+ server s1 ${s1_addr}:${s1_port}
+ # init
+ http-response set-var(proc.combined_proc) str(combined_proc)
+ http-response set-var(res.combined_res) int(5)
+ http-response unset-var(proc.combined_proc)
+
+ http-response set-header x-var-init "proc.combined=%[var(proc.combined_proc)] res.combined=%[var(res.combined_res)]"
+
+ http-response set-var(proc.combined_proc,ifnotset,ifnotempty) str(toto)
+ http-response set-var(res.combined_res,ifset,iflt) int(55)
+
+ http-response set-header x-var1 "proc.combined=%[var(proc.combined_proc)] res.combined=%[var(res.combined_res)]"
+
+
+ # Test the set-var converter
+ backend converter_be
+ server s1 ${s1_addr}:${s1_port}
+
+ http-request deny if { req.len,set-var(proc.req_len,ifexists) -m int 0 }
+ http-request deny if { req.hdr("X-Cust"),set-var(sess.x_cust,ifnotempty,ifnotset),length -m int 0 }
+
+ http-response set-header x-var "proc.req_len=%[var(proc.req_len)] sess.x_cust=%[var(sess.x_cust)]"
+
+} -start
+
+
+client c1 -connect ${h1_mainfe_sock} {
+ txreq -url "/ifexists"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.x-var == "proc.ifexists=12 sess.ifexists= res.ifexists="
+} -run
+
+client c2 -connect ${h1_mainfe_sock} {
+ txreq -url "/ifnotexists"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.x-var-init == "proc.ifnotexists= sess.ifnotexists= res.ifnotexists="
+ expect resp.http.x-var == "proc.ifnotexists= sess.ifnotexists=5 res.ifnotexists=toto"
+} -run
+
+client c3 -connect ${h1_mainfe_sock} {
+ txreq -url "/ifempty"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.x-var-init == "proc.ifempty=ifempty_proc sess.ifempty=ifempty_sess res.ifempty=ifempty_res res.ifempty_res_int=5"
+ expect resp.http.x-var1 == "proc.ifempty=ifempty_proc sess.ifempty=ifempty_sess res.ifempty=ifempty_res res.ifempty_res_int=5"
+ expect resp.http.x-var2 == "proc.ifempty= sess.ifempty= res.ifempty= res.ifempty_res_int=5"
+} -run
+
+client c4 -connect ${h1_mainfe_sock} {
+ txreq -url "/ifnotempty"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.x-var-init == "proc.ifnotempty=ifnotempty_proc sess.ifnotempty=ifnotempty_sess res.ifnotempty=ifnotempty_res res.ifnotempty_res_int=5"
+ expect resp.http.x-var1 == "proc.ifnotempty=toto sess.ifnotempty=toto res.ifnotempty=tata res.ifnotempty_res_int=6"
+ expect resp.http.x-var2 == "proc.ifnotempty=toto sess.ifnotempty=toto res.ifnotempty=tata res.ifnotempty_res_int=7"
+} -run
+
+client c5 -connect ${h1_mainfe_sock} {
+ txreq -url "/ifset"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.x-var-init == "proc.ifset=ifset_proc sess.ifset=ifset_sess res.ifset=ifset_res res.ifset_res_int=5"
+ expect resp.http.x-var1 == "proc.ifset=toto sess.ifset=toto res.ifset=123 res.ifset_res_int=azerty"
+ expect resp.http.x-var2 == "proc.ifset= sess.ifset= res.ifset= res.ifset_res_int="
+ expect resp.http.x-var3 == "proc.ifset= sess.ifset= res.ifset= res.ifset_res_int="
+} -run
+
+client c6 -connect ${h1_mainfe_sock} {
+ txreq -url "/ifnotset"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.x-var-init == "proc.ifnotset=ifnotset_proc sess.ifnotset=ifnotset_sess res.ifnotset=ifnotset_res res.ifnotset_res_int=5"
+ expect resp.http.x-var1 == "proc.ifnotset=ifnotset_proc sess.ifnotset=ifnotset_sess res.ifnotset=ifnotset_res res.ifnotset_res_int=5"
+ expect resp.http.x-var2 == "proc.ifnotset= sess.ifnotset= res.ifnotset= res.ifnotset_res_int="
+ expect resp.http.x-var3 == "proc.ifnotset=toto sess.ifnotset=toto res.ifnotset=123 res.ifnotset_res_int=azerty"
+} -run
+
+client c7 -connect ${h1_mainfe_sock} {
+ txreq -url "/ifgt"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.x-var-init == "proc.ifgt=ifgt_proc sess.ifgt=ifgt_sess res.ifgt1=ifgt_res res.ifgt2=5 res.ifgt_res_int1=5 res.ifgt_res_int2=5"
+ expect resp.http.x-var1 == "proc.ifgt=toto sess.ifgt=toto res.ifgt1=55 res.ifgt2=text res.ifgt_res_int1=5 res.ifgt_res_int2=2"
+} -run
+
+client c8 -connect ${h1_mainfe_sock} {
+ txreq -url "/iflt"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.x-var-init == "proc.iflt=iflt_proc sess.iflt=iflt_sess res.iflt1=iflt_res res.iflt2=5 res.iflt_res_int1=5 res.iflt_res_int2=5"
+ expect resp.http.x-var1 == "proc.iflt=toto sess.iflt=toto res.iflt1=55 res.iflt2=text res.iflt_res_int1=55 res.iflt_res_int2=5"
+} -run
+
+client c9 -connect ${h1_mainfe_sock} {
+ txreq -url "/combined"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.x-var-init == "proc.combined= res.combined=5"
+ expect resp.http.x-var1 == "proc.combined=toto res.combined=55"
+} -run
+
+client c10 -connect ${h1_mainfe_sock} {
+ txreq -url "/converter" -hdr "X-Cust: foobar"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.x-var == "proc.req_len=67 sess.x_cust=foobar"
+} -run
diff --git a/reg-tests/sample_fetches/cook.vtc b/reg-tests/sample_fetches/cook.vtc
new file mode 100644
index 0000000..b0f5472
--- /dev/null
+++ b/reg-tests/sample_fetches/cook.vtc
@@ -0,0 +1,132 @@
+varnishtest "cook sample fetch Test"
+
+feature ignore_unknown_macro
+
+# TEST - 1
+# Cookie from request
+server s1 {
+ rxreq
+ txresp
+} -start
+
+haproxy h1 -conf {
+ defaults
+ mode http
+
+ frontend fe
+ bind "fd@${fe}"
+ http-request set-var(txn.count) req.cook_cnt()
+ http-request set-var(txn.val) req.cook_val()
+ http-request set-var(txn.val_cook2) req.cook_val(cook2)
+ http-request set-var(txn.cook_names) req.cook_names
+ http-response set-header count %[var(txn.count)]
+ http-response set-header val %[var(txn.val)]
+ http-response set-header val_cook2 %[var(txn.val_cook2)]
+ http-response set-header cook_names %[var(txn.cook_names)]
+
+ default_backend be
+
+ backend be
+ server srv1 ${s1_addr}:${s1_port}
+} -start
+
+client c1 -connect ${h1_fe_sock} {
+ txreq -url "/" \
+ -hdr "cookie: cook1=0; cook2=123; cook3=22"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.count == "3"
+ expect resp.http.val == "0"
+ expect resp.http.val_cook2 == "123"
+ expect resp.http.cook_names == "cook1,cook2,cook3"
+} -run
+
+# TEST - 2
+# Set-Cookie from response
+server s2 {
+ rxreq
+ txresp -hdr "Set-Cookie: cook1=0; cook2=123; cook3=22"
+} -start
+
+haproxy h2 -conf {
+ defaults
+ mode http
+
+ frontend fe
+ bind "fd@${fe}"
+ http-response set-var(txn.cook_names) res.cook_names
+ http-response set-header cook_names %[var(txn.cook_names)]
+
+ default_backend be
+
+ backend be
+ server srv2 ${s2_addr}:${s2_port}
+} -start
+
+client c2 -connect ${h2_fe_sock} {
+ txreq -url "/"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.cook_names == "cook1"
+} -run
+
+# TEST - 3
+# Multiple Cookie headers from request
+server s3 {
+ rxreq
+ txresp
+} -start
+
+haproxy h3 -conf {
+ defaults
+ mode http
+
+ frontend fe
+ bind "fd@${fe}"
+ http-request set-var(txn.cook_names) req.cook_names
+ http-response set-header cook_names %[var(txn.cook_names)]
+
+ default_backend be
+
+ backend be
+ server srv3 ${s3_addr}:${s3_port}
+} -start
+
+client c3 -connect ${h3_fe_sock} {
+ txreq -url "/" \
+ -hdr "cookie: cook1=0; cook2=123; cook3=22" \
+ -hdr "cookie: cook4=1; cook5=2; cook6=3"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.cook_names == "cook1,cook2,cook3,cook4,cook5,cook6"
+} -run
+
+# TEST - 4
+# Multiple Set-Cookie headers from response
+server s4 {
+ rxreq
+ txresp -hdr "Set-Cookie: cook1=0; cook2=123; cook3=22" \
+ -hdr "Set-Cookie: cook4=1; cook5=2; cook6=3"
+} -start
+
+haproxy h4 -conf {
+ defaults
+ mode http
+
+ frontend fe
+ bind "fd@${fe}"
+ http-response set-var(txn.cook_names) res.cook_names
+ http-response set-header cook_names %[var(txn.cook_names)]
+
+ default_backend be
+
+ backend be
+ server srv4 ${s4_addr}:${s4_port}
+} -start
+
+client c4 -connect ${h4_fe_sock} {
+ txreq -url "/"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.cook_names == "cook1,cook4"
+} -run
diff --git a/reg-tests/sample_fetches/hashes.vtc b/reg-tests/sample_fetches/hashes.vtc
new file mode 100644
index 0000000..2c2f60d
--- /dev/null
+++ b/reg-tests/sample_fetches/hashes.vtc
@@ -0,0 +1,101 @@
+varnishtest "Hash validity test"
+
+#REQUIRE_VERSION=2.4
+
+feature ignore_unknown_macro
+
+server s1 {
+ rxreq
+ txresp
+} -start
+
+haproxy h1 -conf {
+ defaults
+ mode http
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ frontend fe
+ bind "fd@${fe}"
+
+ # base64 encoding of \x00\x01\x02...\xFF
+ http-response set-var(res.key) "str(AAECAwQFBgcICQoLDA0ODxAREhMUFRYXGBkaGxwdHh8gISIjJCUmJygpKissLS4vMDEyMzQ1Njc4OTo7PD0+P0BBQkNERUZHSElKS0xNTk9QUVJTVFVWV1hZWltcXV5fYGFiY2RlZmdoaWprbG1ub3BxcnN0dXZ3eHl6e3x9fn+AgYKDhIWGh4iJiouMjY6PkJGSk5SVlpeYmZqbnJ2en6ChoqOkpaanqKmqq6ytrq+wsbKztLW2t7i5uru8vb6/wMHCw8TFxsfIycrLzM3Oz9DR0tPU1dbX2Nna29zd3t/g4eLj5OXm5+jp6uvs7e7v8PHy8/T19vf4+fr7/P3+/w==),b64dec"
+
+ # length (start:0, next:255)
+ http-response set-header x-len0 "%[var(res.key),length]"
+ http-response set-header x-len1 "%[var(res.key),bytes(1),length]"
+
+ # text-based encoding
+ http-response set-header x-hex "%[var(res.key),hex]"
+ http-response set-header x-b64 "%[var(res.key),base64]"
+
+ # SHA family
+ http-response set-header x-sha1 "%[var(res.key),sha1,hex]"
+ #http-response set-header x-sha2 "%[var(res.key),sha2,hex]"
+ #http-response set-header x-sha2-224 "%[var(res.key),sha2(224),hex]"
+ #http-response set-header x-sha2-256 "%[var(res.key),sha2(256),hex]"
+ #http-response set-header x-sha2-384 "%[var(res.key),sha2(384),hex]"
+ #http-response set-header x-sha2-512 "%[var(res.key),sha2(512),hex]"
+
+ # 32-bit hashes, and their avalanche variants
+ http-response set-header x-crc32 "%[var(res.key),crc32]"
+ http-response set-header x-crc32-1 "%[var(res.key),crc32(1)]"
+
+ http-response set-header x-crc32c "%[var(res.key),crc32c]"
+ http-response set-header x-crc32c-1 "%[var(res.key),crc32c(1)]"
+
+ http-response set-header x-djb2 "%[var(res.key),djb2]"
+ http-response set-header x-djb2-1 "%[var(res.key),djb2(1)]"
+
+ http-response set-header x-sdbm "%[var(res.key),sdbm]"
+ http-response set-header x-sdbm-1 "%[var(res.key),sdbm(1)]"
+
+ http-response set-header x-wt6 "%[var(res.key),wt6]"
+ http-response set-header x-wt6-1 "%[var(res.key),wt6(1)]"
+
+ # 32/64-bit hashes, with seed variant
+ http-response set-header x-xxh3 "%[var(res.key),xxh3]"
+ http-response set-header x-xxh3-1 "%[var(res.key),xxh3(1)]"
+ http-response set-header x-xxh32 "%[var(res.key),xxh32]"
+ http-response set-header x-xxh32-1 "%[var(res.key),xxh32(1)]"
+ http-response set-header x-xxh64 "%[var(res.key),xxh64]"
+ http-response set-header x-xxh64-1 "%[var(res.key),xxh64(1)]"
+ default_backend be
+
+ backend be
+ server srv1 ${s1_addr}:${s1_port}
+} -start
+
+client c1 -connect ${h1_fe_sock} {
+ txreq -url "/"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.x-len0 == "0"
+ expect resp.http.x-len1 == "255"
+ expect resp.http.x-hex == "000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F202122232425262728292A2B2C2D2E2F303132333435363738393A3B3C3D3E3F404142434445464748494A4B4C4D4E4F505152535455565758595A5B5C5D5E5F606162636465666768696A6B6C6D6E6F707172737475767778797A7B7C7D7E7F808182838485868788898A8B8C8D8E8F909192939495969798999A9B9C9D9E9FA0A1A2A3A4A5A6A7A8A9AAABACADAEAFB0B1B2B3B4B5B6B7B8B9BABBBCBDBEBFC0C1C2C3C4C5C6C7C8C9CACBCCCDCECFD0D1D2D3D4D5D6D7D8D9DADBDCDDDEDFE0E1E2E3E4E5E6E7E8E9EAEBECEDEEEFF0F1F2F3F4F5F6F7F8F9FAFBFCFDFEFF"
+ expect resp.http.x-b64 == "AAECAwQFBgcICQoLDA0ODxAREhMUFRYXGBkaGxwdHh8gISIjJCUmJygpKissLS4vMDEyMzQ1Njc4OTo7PD0+P0BBQkNERUZHSElKS0xNTk9QUVJTVFVWV1hZWltcXV5fYGFiY2RlZmdoaWprbG1ub3BxcnN0dXZ3eHl6e3x9fn+AgYKDhIWGh4iJiouMjY6PkJGSk5SVlpeYmZqbnJ2en6ChoqOkpaanqKmqq6ytrq+wsbKztLW2t7i5uru8vb6/wMHCw8TFxsfIycrLzM3Oz9DR0tPU1dbX2Nna29zd3t/g4eLj5OXm5+jp6uvs7e7v8PHy8/T19vf4+fr7/P3+/w=="
+
+ expect resp.http.x-sha1 == "4916D6BDB7F78E6803698CAB32D1586EA457DFC8"
+ #expect resp.http.x-sha2 == "40AFF2E9D2D8922E47AFD4648E6967497158785FBD1DA870E7110266BF944880"
+ #expect resp.http.x-sha2-224 == "88702E63237824C4EB0D0FCFE41469A462493E8BEB2A75BBE5981734"
+ #expect resp.http.x-sha2-256 == "40AFF2E9D2D8922E47AFD4648E6967497158785FBD1DA870E7110266BF944880"
+ #expect resp.http.x-sha2-384 == "FFDAEBFF65ED05CF400F0221C4CCFB4B2104FB6A51F87E40BE6C4309386BFDEC2892E9179B34632331A59592737DB5C5"
+ #expect resp.http.x-sha2-512 == "1E7B80BC8EDC552C8FEEB2780E111477E5BC70465FAC1A77B29B35980C3F0CE4A036A6C9462036824BD56801E62AF7E9FEBA5C22ED8A5AF877BF7DE117DCAC6D"
+ expect resp.http.x-crc32 == "688229491"
+ expect resp.http.x-crc32-1 == "4230317029"
+ expect resp.http.x-crc32c == "2621708363"
+ expect resp.http.x-crc32c-1 == "2242979626"
+ expect resp.http.x-djb2 == "2589693061"
+ expect resp.http.x-djb2-1 == "600622701"
+ expect resp.http.x-sdbm == "905707648"
+ expect resp.http.x-sdbm-1 == "3103804144"
+ expect resp.http.x-wt6 == "4090277559"
+ expect resp.http.x-wt6-1 == "1192658767"
+ expect resp.http.x-xxh3 == "-7779787747613135503"
+ expect resp.http.x-xxh3-1 == "5930632130106562027"
+ expect resp.http.x-xxh32 == "1497633363"
+ expect resp.http.x-xxh32-1 == "1070421674"
+ expect resp.http.x-xxh64 == "2282408585429094475"
+ expect resp.http.x-xxh64-1 == "-4689339368900765961"
+} -run
diff --git a/reg-tests/sample_fetches/so_name.vtc b/reg-tests/sample_fetches/so_name.vtc
new file mode 100644
index 0000000..c6211fa
--- /dev/null
+++ b/reg-tests/sample_fetches/so_name.vtc
@@ -0,0 +1,22 @@
+varnishtest "so_name sample fetche Test"
+
+#REQUIRE_VERSION=2.2
+
+feature ignore_unknown_macro
+
+haproxy h1 -conf {
+ defaults
+ mode http
+
+ frontend fe
+ bind "fd@${fe}" name foo
+ http-request return status 200 hdr so-name %[so_name]
+
+} -start
+
+client c1 -connect ${h1_fe_sock} {
+ txreq -url "/"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.so-name == "foo"
+} -run
diff --git a/reg-tests/sample_fetches/srv_name.vtc b/reg-tests/sample_fetches/srv_name.vtc
new file mode 100644
index 0000000..900957e
--- /dev/null
+++ b/reg-tests/sample_fetches/srv_name.vtc
@@ -0,0 +1,46 @@
+varnishtest "srv_name sample fetche Test"
+
+#REQUIRE_VERSION=2.1
+
+feature ignore_unknown_macro
+
+server s1 {
+ rxreq
+ txresp
+} -start
+
+server s2 {
+ rxreq
+ txresp
+} -start
+
+haproxy h1 -conf {
+ defaults
+ mode http
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ frontend fe
+ bind "fd@${fe}"
+ http-response set-header srv-id "%[srv_id]"
+ http-response set-header srv-name "%[srv_name]"
+ default_backend be
+
+ backend be
+ server srv1 ${s1_addr}:${s1_port}
+ server srv2 ${s2_addr}:${s2_port}
+} -start
+
+client c1 -connect ${h1_fe_sock} {
+ txreq -url "/"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.srv-id == "1"
+ expect resp.http.srv-name == "srv1"
+ txreq -url "/"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.srv-id == "2"
+ expect resp.http.srv-name == "srv2"
+} -run
diff --git a/reg-tests/sample_fetches/tcpinfo_rtt.vtc b/reg-tests/sample_fetches/tcpinfo_rtt.vtc
new file mode 100644
index 0000000..e21c542
--- /dev/null
+++ b/reg-tests/sample_fetches/tcpinfo_rtt.vtc
@@ -0,0 +1,39 @@
+varnishtest "Test declaration of TCP rtt fetches"
+
+# feature cmd "$HAPROXY_PROGRAM -cc 'version_atleast(v2.8-dev8)'"
+feature ignore_unknown_macro
+
+server s1 {
+ rxreq
+ txresp
+} -start
+
+haproxy h1 -conf {
+ defaults common
+ mode http
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ frontend fe from common
+ bind "fd@${feh1}"
+
+ default_backend be
+
+ backend be from common
+
+ http-response set-header x-test1 "%[fc_rtt]"
+ http-response set-header x-test2 "%[bc_rtt(us)]"
+ http-response set-header x-test3 "%[fc_rttvar]"
+ http-response set-header x-test4 "%[bc_rttvar]"
+
+ server s1 ${s1_addr}:${s1_port}
+
+} -start
+
+client c1 -connect ${h1_feh1_sock} {
+ txreq -req GET -url /
+ rxresp
+ expect resp.status == 200
+ expect resp.http.x-test2 ~ "[0-9]+"
+} -run \ No newline at end of file
diff --git a/reg-tests/sample_fetches/tlvs.vtc b/reg-tests/sample_fetches/tlvs.vtc
new file mode 100644
index 0000000..9312b1d
--- /dev/null
+++ b/reg-tests/sample_fetches/tlvs.vtc
@@ -0,0 +1,57 @@
+varnishtest "Tests for fetching PROXY protocol v2 TLVs"
+feature ignore_unknown_macro
+
+haproxy h1 -conf {
+ defaults
+ mode http
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ frontend echo
+ bind "fd@${fe1}" accept-proxy
+ tcp-request content set-var(sess.aws) fc_pp_tlv(0xEA),bytes(1) if { fc_pp_tlv(0xEE),bytes(0,1),hex eq 01 }
+ tcp-request content set-var(sess.azure) fc_pp_tlv(0xEE),bytes(1) if { fc_pp_tlv(0xEA),bytes(0,1),hex eq 01 }
+
+ http-after-response set-header echo1 %[var(sess.aws)]
+ http-after-response set-header echo2 %[var(sess.azure)]
+ http-after-response set-header echo3 %[fc_pp_tlv(0xEB)]
+ http-after-response set-header echo4 %[fc_pp_tlv(0xEC),length]
+ http-request return status 200
+} -start
+
+client c1 -connect ${h1_fe1_sock} {
+ # PROXY v2 signature
+ sendhex "0d 0a 0d 0a 00 0d 0a 51 55 49 54 0a"
+ # version + PROXY
+ sendhex "21"
+ # TCP4
+ sendhex "11"
+ # length of the address (12) + length of the TLVs (14 + 10 + 9 + 131)
+ sendhex "00 B0"
+ # 127.0.0.1 42 127.0.0.1 1337
+ sendhex "7F 00 00 01 7F 00 00 01 00 2A 05 39"
+
+ # PP2_TYPE_AWS (0xEA) + length of the value + PP2_SUBTYPE_AWS_VPCE_ID (0x01) + "aws-vpc-id"
+ # See https://docs.aws.amazon.com/elasticloadbalancing/latest/network/load-balancer-target-groups.html#custom-tlv for the respective definitions.
+ sendhex "EA 00 0B 01 61 77 73 2D 76 70 63 2D 69 64"
+
+ # PP2_TYPE_AZURE (0xEE) + length of the value + PP2_SUBTYPE_AZURE_PRIVATEENDPOINT_LINKID (0x01) + "LINKID"
+ # See https://learn.microsoft.com/en-us/azure/private-link/private-link-service-overview#getting-connection-information-using-tcp-proxy-v2
+ # for the respective definitions.
+ sendhex "EE 00 07 01 4C 49 4E 4B 49 44"
+
+ # custom type (0xEB) + length of the value + "custom"
+ sendhex "EB 00 06 63 75 73 74 6F 6D"
+
+ # custom type (0xEC) + length of the value (128, does not fit in pool) + random data
+ sendhex "EC 00 80 3A D9 32 9B 11 A7 29 81 14 B2 33 F0 C2 0D 7A 53 D1 97 28 74 4B 78 8A D3 10 C4 B1 88 42 9C 63 8E 8B 8A A0 B4 B0 E7 9D 20 27 0F 1E 53 4D 33 F7 5A D0 91 3F B8 C9 E9 16 C4 61 C5 13 02 92 64 9D D4 22 5C 8E 4E 0B 2D 2D 7D 9F 5D 97 9B 25 C4 12 7D 21 75 C8 15 92 6B 64 F2 5F C0 A9 0F 9A 7D 0A 6D 68 79 F4 56 18 6F 23 45 2A 9B 36 34 3A 47 43 32 29 18 6F 23 45 2A 9B 36 34 3A 47 43 32 29 32 29"
+
+ txreq -url "/"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.echo1 == "aws-vpc-id"
+ expect resp.http.echo2 == "LINKID"
+ expect resp.http.echo3 == "custom"
+ expect resp.http.echo4 == 128
+} -run
diff --git a/reg-tests/sample_fetches/ubase64.vtc b/reg-tests/sample_fetches/ubase64.vtc
new file mode 100644
index 0000000..8e47d86
--- /dev/null
+++ b/reg-tests/sample_fetches/ubase64.vtc
@@ -0,0 +1,57 @@
+varnishtest "ub64dec sample fetche Test"
+
+#REQUIRE_VERSION=2.4
+
+feature ignore_unknown_macro
+
+haproxy h1 -conf {
+ global
+ # WT: limit false-positives causing "HTTP header incomplete" due to
+ # idle server connections being randomly used and randomly expiring
+ # under us.
+ tune.idle-pool.shared off
+
+ defaults
+ mode http
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ frontend fe
+ bind "fd@${fe}"
+ acl input hdr(encode) -m found
+ http-request return content-type text/plain hdr encode %[hdr(encode),ub64enc] hdr decode %[hdr(decode),ub64dec] if input
+ http-request return content-type text/plain hdr encode %[bin(14fb9c03d97f12d97e),ub64enc] hdr decode %[str(FPucA9l_Etl-),ub64dec,hex,lower] if !input
+
+} -start
+
+client c1 -connect ${h1_fe_sock} {
+ txreq -hdr "encode: f" -hdr "decode: Zg"
+ rxresp
+ expect resp.http.encode == "Zg"
+ expect resp.http.decode == "f"
+ txreq -hdr "encode: fo" -hdr "decode: Zm8"
+ rxresp
+ expect resp.http.encode == "Zm8"
+ expect resp.http.decode == "fo"
+ txreq -hdr "encode: foo" -hdr "decode: Zm9v"
+ rxresp
+ expect resp.http.encode == "Zm9v"
+ expect resp.http.decode == "foo"
+ txreq -hdr "encode: foob" -hdr "decode: Zm9vYg"
+ rxresp
+ expect resp.http.encode == "Zm9vYg"
+ expect resp.http.decode == "foob"
+ txreq -hdr "encode: fooba" -hdr "decode: Zm9vYmE"
+ rxresp
+ expect resp.http.encode == "Zm9vYmE"
+ expect resp.http.decode == "fooba"
+ txreq -hdr "encode: foobar" -hdr "decode: Zm9vYmFy"
+ rxresp
+ expect resp.http.encode == "Zm9vYmFy"
+ expect resp.http.decode == "foobar"
+ txreq
+ rxresp
+ expect resp.http.encode == "FPucA9l_Etl-"
+ expect resp.http.decode == "14fb9c03d97f12d97e"
+} -run
diff --git a/reg-tests/sample_fetches/vars.vtc b/reg-tests/sample_fetches/vars.vtc
new file mode 100644
index 0000000..29d474c
--- /dev/null
+++ b/reg-tests/sample_fetches/vars.vtc
@@ -0,0 +1,84 @@
+varnishtest "Test a few set-var() in global, tcp and http rule sets, at different scopes"
+feature cmd "$HAPROXY_PROGRAM -cc 'version_atleast(2.5-dev5)'"
+
+feature ignore_unknown_macro
+
+haproxy h1 -conf {
+ global
+ # note below, str60 is purposely not defined so that the default is used
+ set-var proc.int12 int(12)
+ set-var proc.int5 var(proc.str60,60),div(proc.int12)
+ set-var proc.str1 str("this is")
+ set-var proc.str2 str("a string")
+ set-var proc.str var(proc.str1)
+ set-var-fmt proc.str "%[var(proc.str)] a string"
+ set-var proc.uuid uuid()
+
+ defaults
+ mode http
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ frontend fe1
+ bind "fd@${fe1}"
+ tcp-request session set-var-fmt(sess.str3) "%[var(proc.str1)] %[var(proc.str2)]"
+ tcp-request session set-var(sess.int5) var(proc.int5)
+ tcp-request session set-var(proc.int5) var(proc.int5),add(sess.int5) ## proc. becomes 10
+ tcp-request content set-var-fmt(req.str4) "%[var(sess.str3),regsub(is a,is also a)]"
+ http-request set-var-fmt(txn.str5) "%[var(req.str4)]"
+ http-request set-var(req.int5) var(sess.int5)
+ http-request set-var(sess.int5) var(sess.int5),add(req.int5) ## sess. becomes 10 first time, then 15...
+ http-request return status 200 hdr x-var "proc=%[var(proc.int5)] sess=%[var(sess.int5)] req=%[var(req.int5)] str=%[var(proc.str)] str5=%[var(txn.str5)] uuid=%[var(proc.uuid)]"
+} -start
+
+haproxy h1 -cli {
+ send "get var proc.int5"
+ expect ~ "^proc.int5: type=sint value=<5>"
+}
+
+client c1 -connect ${h1_fe1_sock} {
+ txreq -req GET -url /req1_1
+ rxresp
+ expect resp.status == 200
+ expect resp.http.x-var ~ "proc=10 sess=10 req=5 str=this is a string str5=this is also a string uuid=[0-9a-f]*-[0-9a-f]*-[0-9a-f]*-[0-9a-f]*-[0-9a-f]*"
+
+ txreq -req GET -url /req1_2
+ rxresp
+ expect resp.status == 200
+ expect resp.http.x-var ~ "proc=10 sess=20 req=10 str=this is a string str5=this is also a string uuid=[0-9a-f]*-[0-9a-f]*-[0-9a-f]*-[0-9a-f]*-[0-9a-f]*"
+} -run
+
+haproxy h1 -cli {
+ send "get var proc.int5"
+ expect ~ "^proc.int5: type=sint value=<10>"
+}
+
+client c2 -connect ${h1_fe1_sock} {
+ txreq -req GET -url /req2_1
+ rxresp
+ expect resp.status == 200
+ expect resp.http.x-var ~ "proc=20 sess=20 req=10 str=this is a string str5=this is also a string uuid=[0-9a-f]*-[0-9a-f]*-[0-9a-f]*-[0-9a-f]*-[0-9a-f]*"
+
+ txreq -req GET -url /req2_2
+ rxresp
+ expect resp.status == 200
+ expect resp.http.x-var ~ "proc=20 sess=40 req=20 str=this is a string str5=this is also a string uuid=[0-9a-f]*-[0-9a-f]*-[0-9a-f]*-[0-9a-f]*-[0-9a-f]*"
+} -run
+
+haproxy h1 -cli {
+ send "get var proc.int5"
+ expect ~ "^proc.int5: type=sint value=<20>"
+}
+
+haproxy h1 -cli {
+ send "experimental-mode on; set var proc.str str(updating); set var proc.str fmt %[var(proc.str),regsub(ing,ed)]"
+ expect ~ .*
+}
+
+client c3 -connect ${h1_fe1_sock} {
+ txreq -req GET -url /req3_1
+ rxresp
+ expect resp.status == 200
+ expect resp.http.x-var ~ "proc=40 sess=40 req=20 str=updated str5=this is also a string uuid=[0-9a-f]*-[0-9a-f]*-[0-9a-f]*-[0-9a-f]*-[0-9a-f]*"
+} -run
diff --git a/reg-tests/seamless-reload/abns_socket.vtc b/reg-tests/seamless-reload/abns_socket.vtc
new file mode 100644
index 0000000..e8e5f28
--- /dev/null
+++ b/reg-tests/seamless-reload/abns_socket.vtc
@@ -0,0 +1,55 @@
+# commit b4dd15b
+# BUG/MINOR: unix: Make sure we can transfer abns sockets on seamless reload.
+#
+# When checking if a socket we got from the parent is suitable for a listener,
+# we just checked that the path matched sockname.tmp, however this is
+# unsuitable for abns sockets, where we don't have to create a temporary
+# file and rename it later.
+# To detect that, check that the first character of the sun_path is 0 for
+# both, and if so, that &sun_path[1] is the same too.
+#
+# Note: there are some tricks here. One of them is that we must not bind the
+# same abns address to multiple processes that may run in parallel. Since
+# vtest cannot provide abns sockets, we're instead concatenating the number
+# of the listening port that vtest allocated for another frontend to the abns
+# path, which guarantees to make them unique in the system.
+
+varnishtest "Seamless reload issue with abns sockets"
+feature ignore_unknown_macro
+feature cmd "command -v socat"
+feature cmd "command -v grep"
+
+# abns@ sockets are not available on freebsd
+#EXCLUDE_TARGETS=freebsd,osx,generic
+#REGTEST_TYPE=devel
+
+haproxy h1 -W -S -conf {
+ global
+ stats socket "${tmpdir}/h1/stats" level admin expose-fd listeners
+
+ defaults
+ mode http
+ log global
+ option httplog
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ listen testme
+ bind "fd@${testme}"
+ server test_abns_server abns@wpproc1_${h1_testme_port} send-proxy-v2
+
+ frontend test_abns
+ bind abns@wpproc1_${h1_testme_port} accept-proxy
+ http-request deny deny_status 200
+} -start
+
+shell {
+ echo "reload" | socat -t1000 TCP:${h1_mcli_addr}:${h1_mcli_port} - | grep 'Success=1'
+}
+
+client c1 -connect ${h1_testme_sock} {
+ txreq -url "/"
+ rxresp
+} -repeat 50 -run
+
diff --git a/reg-tests/server/cli_add_check_server.vtc b/reg-tests/server/cli_add_check_server.vtc
new file mode 100644
index 0000000..c63710c
--- /dev/null
+++ b/reg-tests/server/cli_add_check_server.vtc
@@ -0,0 +1,161 @@
+varnishtest "Add/Delete server via cli with check support"
+
+feature cmd "$HAPROXY_PROGRAM -cc 'version_atleast(2.5-dev3)'"
+feature cmd "$HAPROXY_PROGRAM -cc 'feature(OPENSSL)'"
+feature ignore_unknown_macro
+
+barrier b1 cond 2 -cyclic
+barrier b2 cond 2 -cyclic
+
+server s1 {
+ rxreq
+ txresp
+} -start
+
+server s2 {
+} -start
+
+# used for agent checks
+server s3 {
+ recv 5
+ send "ready up\n"
+ barrier b2 sync
+} -start
+
+syslog S1 -level notice {
+ recv
+ expect ~ ".*Server be1/s1 is UP/READY \\(leaving forced maintenance\\)."
+ recv
+ expect ~ "[^:\\[ ]\\[${h1_pid}\\]: Health check for server be1/s1 succeeded.+reason: Layer7 check passed, code: 200, check duration: [[:digit:]]+ms.+status: 1/1 UP"
+
+ barrier b1 sync
+
+ recv
+ expect ~ ".*Server be1/s2 is UP/READY \\(leaving forced maintenance\\)."
+ recv
+ expect ~ "[^:\\[ ]\\[${h1_pid}\\]: Health check for server be1/s2 failed.+reason: Layer7 timeout, check duration: [[:digit:]]+ms.+status: 0/1 DOWN"
+
+ barrier b1 sync
+
+ recv
+ expect ~ ".*Server be1/s2 was DOWN and now enters maintenance."
+
+ recv
+ expect ~ ".*Server be1/s3 is UP/READY \\(leaving forced maintenance\\)."
+
+ recv
+ expect ~ "[^:\\[ ]\\[${h1_pid}\\]: Agent check for server be1/s3 succeeded.+reason: Layer7 check passed, code: 0, info: \"via agent : up\", check duration: [[:digit:]]+ms.+status: 1/1 UP"
+
+ barrier b1 sync
+ barrier b2 sync
+
+ recv
+ expect ~ ".*Server be1/s4 is UP/READY \\(leaving forced maintenance\\)."
+ recv
+ expect ~ "Health check for server be1/s4 failed"
+
+ barrier b1 sync
+
+ recv
+ expect ~ ".*Server be1/s5 is UP/READY \\(leaving forced maintenance\\)."
+ recv
+ expect ~ "Health check for server be1/s5 succeeded."
+} -start
+
+haproxy h1 -conf {
+ global
+ stats socket "${tmpdir}/h1/stats" level admin
+
+ backend be1
+ option log-health-checks
+ option httpchk GET /
+ log ${S1_addr}:${S1_port} daemon
+
+ frontend fe-proxy
+ mode http
+ bind "fd@${hapsrv}" accept-proxy
+ http-request return status 200
+} -start
+
+# check on a functional server
+haproxy h1 -cli {
+ send "add server be1/s1 ${s1_addr}:${s1_port} check inter 200ms rise 1 fall 1"
+ expect ~ "New server registered."
+
+ send "enable server be1/s1"
+ expect ~ ".*"
+ send "enable health be1/s1"
+ expect ~ ".*"
+
+ barrier b1 sync
+
+ send "disable server be1/s1"
+ expect ~ ".*"
+
+ send "del server be1/s1"
+ expect ~ "Server deleted."
+}
+
+server s2 -break
+
+# check on a disabled server
+haproxy h1 -cli {
+ send "add server be1/s2 ${s2_addr}:${s2_port} check inter 200ms rise 1 fall 1"
+ expect ~ "New server registered."
+
+ send "enable server be1/s2"
+ expect ~ ".*"
+ send "enable health be1/s2"
+ expect ~ ".*"
+
+ barrier b1 sync
+
+ send "disable server be1/s2"
+ expect ~ ".*"
+
+ send "del server be1/s2"
+ expect ~ "Server deleted."
+}
+
+# agent check
+haproxy h1 -cli {
+ send "add server be1/s3 ${s1_addr}:${s1_port} agent-check agent-addr ${s3_addr} agent-port ${s3_port} agent-send 'hello' agent-inter 200ms rise 1 fall 1"
+ expect ~ "New server registered."
+
+ send "enable agent be1/s3"
+ expect ~ ".*"
+
+ barrier b1 sync
+
+ send "disable agent be1/s3; disable server be1/s3"
+ expect ~ ".*"
+
+ send "del server be1/s3"
+ expect ~ "Server deleted."
+}
+
+# check PROXY protocol interaction with checks
+haproxy h1 -cli {
+ # no explicit check-send-proxy
+ # The health check should failed.
+ send "add server be1/s4 ${h1_hapsrv_addr}:${h1_hapsrv_port} send-proxy check rise 1 fall 1"
+ expect ~ "New server registered."
+
+ send "enable server be1/s4"
+ expect ~ ".*"
+ send "enable health be1/s4"
+ expect ~ ".*"
+
+ barrier b1 sync
+
+ # explicit check-send-proxy : health check should succeeded
+ send "add server be1/s5 ${h1_hapsrv_addr}:${h1_hapsrv_port} send-proxy check rise 1 fall 1 check-send-proxy"
+ expect ~ "New server registered."
+
+ send "enable server be1/s5"
+ expect ~ ".*"
+ send "enable health be1/s5"
+ expect ~ ".*"
+}
+
+syslog S1 -wait
diff --git a/reg-tests/server/cli_add_server.vtc b/reg-tests/server/cli_add_server.vtc
new file mode 100644
index 0000000..8c29305
--- /dev/null
+++ b/reg-tests/server/cli_add_server.vtc
@@ -0,0 +1,87 @@
+varnishtest "Add server via cli"
+
+feature ignore_unknown_macro
+
+#REQUIRE_VERSION=2.4
+
+server s1 {
+ rxreq
+ txresp
+} -start
+
+haproxy h1 -conf {
+ defaults
+ mode http
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ frontend fe
+ bind "fd@${feS}"
+ default_backend test
+
+ backend test
+ balance random
+
+ backend other
+ balance static-rr
+
+ backend other2
+ balance random
+ mode tcp
+} -start
+
+client c1 -connect ${h1_feS_sock} {
+ txreq
+ rxresp
+ expect resp.status == 503
+} -run
+
+haproxy h1 -cli {
+ # non existent backend
+ send "add server foo/s1 ${s1_addr}:${s1_port}"
+ expect ~ "No such backend."
+
+ # missing address
+ send "add server test/s1"
+ expect ~ "'server' expects <name> and <addr>\\[:<port>\\] as arguments."
+
+ # invalid load-balancing algo
+ send "add server other/s1 ${s1_addr}:${s1_port}"
+ expect ~ "Backend must use a dynamic load balancing to support dynamic servers."
+
+ # invalid mux proto
+ send "add server other2/s1 ${s1_addr}:${s1_port} proto h2"
+ expect ~ "MUX protocol is not usable for server."
+
+ # valid command
+ send "add server test/s1 ${s1_addr}:${s1_port}"
+ expect ~ "New server registered."
+
+ # duplicate server
+ send "add server test/s1 ${s1_addr}:${s1_port}"
+ expect ~ "Already exists a server with the same name in backend."
+
+ # valid command
+ # specify the proto, it should be accepted for this backend
+ send "add server test/s2 ${s1_addr}:${s1_port} proto h2"
+ expect ~ "New server registered."
+}
+
+# dynamic servers are created on MAINT mode and should not be available at first
+client c2 -connect ${h1_feS_sock} {
+ txreq
+ rxresp
+ expect resp.status == 503
+} -run
+
+haproxy h1 -cli {
+ send "enable server test/s1"
+ expect ~ ".*"
+}
+
+client c3 -connect ${h1_feS_sock} {
+ txreq
+ rxresp
+ expect resp.status == 200
+} -run
diff --git a/reg-tests/server/cli_add_ssl_server.vtc b/reg-tests/server/cli_add_ssl_server.vtc
new file mode 100644
index 0000000..48faee1
--- /dev/null
+++ b/reg-tests/server/cli_add_ssl_server.vtc
@@ -0,0 +1,110 @@
+varnishtest "Add server via cli with SSL activated"
+
+feature cmd "$HAPROXY_PROGRAM -cc 'version_atleast(2.5-dev0)'"
+feature cmd "$HAPROXY_PROGRAM -cc 'feature(OPENSSL)'"
+feature cmd "command -v socat"
+feature ignore_unknown_macro
+
+barrier b1 cond 2 -cyclic
+
+syslog S1 -level notice {
+ recv
+ expect ~ ".*Server li-ssl/s1 is UP/READY \\(leaving forced maintenance\\)."
+ recv
+ expect ~ ".*Server li-ssl/s2 is UP/READY \\(leaving forced maintenance\\)."
+ recv
+ expect ~ "Health check for server li-ssl/s2 failed"
+
+ barrier b1 sync
+
+ recv
+ expect ~ ".*Server li-ssl/s3 is UP/READY \\(leaving forced maintenance\\)."
+ recv
+ expect ~ "Health check for server li-ssl/s3 succeeded."
+} -start
+
+haproxy h1 -conf {
+ global
+ stats socket "${tmpdir}/h1/stats" level admin
+
+ defaults
+ mode http
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+ option log-health-checks
+ option httpchk GET /
+
+ # proxy to attach a ssl server
+ listen li-ssl
+ bind "fd@${feSsl}"
+ balance random
+ log ${S1_addr}:${S1_port} daemon
+
+ # frontend used to respond to ssl connection
+ frontend fe-ssl-term
+ bind "fd@${feSslTerm}" ssl crt ${testdir}/common.pem
+ http-request return status 200
+} -start
+
+### SSL SUPPORT
+# 1. first create a ca-file using CLI
+# 2. create an SSL server and use it
+
+client c1 -connect ${h1_feSsl_sock} {
+ txreq
+ rxresp
+ expect resp.status == 503
+} -run
+
+shell {
+ echo "new ssl ca-file common.pem" | socat "${tmpdir}/h1/stats" -
+ printf "set ssl ca-file common.pem <<\n$(cat ${testdir}/common.pem)\n\n" | socat "${tmpdir}/h1/stats" -
+ echo "commit ssl ca-file common.pem" | socat "${tmpdir}/h1/stats" -
+} -run
+
+haproxy h1 -cli {
+ send "show ssl ca-file common.pem"
+ expect ~ ".*SHA1 FingerPrint: DF3B6E847A7BF83DFAAFCFEC65EE9BC36230D3EA"
+}
+
+haproxy h1 -cli {
+ # non existent backend
+ send "add server li-ssl/s1 ${h1_feSslTerm_addr}:${h1_feSslTerm_port} ssl ca-file common.pem verify none"
+ expect ~ "New server registered."
+
+ send "enable server li-ssl/s1"
+ expect ~ ".*"
+}
+
+client c2 -connect ${h1_feSsl_sock} {
+ txreq
+ rxresp
+ expect resp.status == 200
+} -run
+
+# test interaction between SSL and checks for dynamic servers
+haproxy h1 -cli {
+ # no explicit check-ssl
+ # The health check should failed.
+ send "add server li-ssl/s2 ${h1_feSslTerm_addr}:${h1_feSslTerm_port} ssl verify none check"
+ expect ~ "New server registered."
+
+ send "enable server li-ssl/s2"
+ expect ~ ".*"
+ send "enable health li-ssl/s2"
+ expect ~ ".*"
+
+ barrier b1 sync
+
+ # explicit check-ssl : health check should succeeded
+ send "add server li-ssl/s3 ${h1_feSslTerm_addr}:${h1_feSslTerm_port} ssl verify none check check-ssl"
+ expect ~ "New server registered."
+
+ send "enable server li-ssl/s3"
+ expect ~ ".*"
+ send "enable health li-ssl/s3"
+ expect ~ ".*"
+}
+
+syslog S1 -wait
diff --git a/reg-tests/server/cli_add_track_server.vtc b/reg-tests/server/cli_add_track_server.vtc
new file mode 100644
index 0000000..318f236
--- /dev/null
+++ b/reg-tests/server/cli_add_track_server.vtc
@@ -0,0 +1,242 @@
+varnishtest "Add/Delete server via cli with track support"
+
+feature cmd "$HAPROXY_PROGRAM -cc 'version_atleast(2.5-dev0)'"
+feature cmd "$HAPROXY_PROGRAM -cc 'feature(OPENSSL)'"
+feature ignore_unknown_macro
+
+
+# just use to provide s1_{addr,port} macros
+server s1 {
+}
+
+
+# scenario 1
+# -> 3 dynamic servers, delete the first one
+syslog S1 {
+ recv notice
+ expect ~ "Server be1/s1 is UP/READY"
+ recv notice
+ expect ~ "Server be1/s2 is UP/READY"
+ recv notice
+ expect ~ "Server be1/s3 is UP/READY"
+
+ recv alert
+ expect ~ "Server be1/srv is going DOWN for maintenance."
+ recv alert
+ expect ~ "Server be1/s3 is going DOWN for maintenance."
+ recv alert
+ expect ~ "Server be1/s2 is going DOWN for maintenance."
+ recv alert
+ expect ~ "Server be1/s1 is going DOWN for maintenance."
+
+ recv notice
+ expect ~ "Server be1/srv is UP/READY"
+ recv notice
+ expect ~ "Server be1/s3 is UP/READY"
+ recv notice
+ expect ~ "Server be1/s2 is UP/READY"
+} -start
+
+# scenario 2
+# -> 3 dynamic servers, delete the middle one
+syslog S2 {
+ recv notice
+ expect ~ "Server be2/s1 is UP/READY"
+ recv notice
+ expect ~ "Server be2/s2 is UP/READY"
+ recv notice
+ expect ~ "Server be2/s3 is UP/READY"
+
+ recv alert
+ expect ~ "Server be2/srv is going DOWN for maintenance."
+ recv alert
+ expect ~ "Server be2/s3 is going DOWN for maintenance."
+ recv alert
+ expect ~ "Server be2/s2 is going DOWN for maintenance."
+ recv alert
+ expect ~ "Server be2/s1 is going DOWN for maintenance."
+
+ recv notice
+ expect ~ "Server be2/srv is UP/READY"
+ recv notice
+ expect ~ "Server be2/s3 is UP/READY"
+ recv notice
+ expect ~ "Server be2/s1 is UP/READY"
+} -start
+
+# scenario 3
+# -> 3 dynamic servers, delete all of them
+syslog S3 {
+ recv notice
+ expect ~ "Server be3/s1 is UP/READY"
+ recv notice
+ expect ~ "Server be3/s2 is UP/READY"
+ recv notice
+ expect ~ "Server be3/s3 is UP/READY"
+
+ recv alert
+ expect ~ "Server be3/s1 is going DOWN for maintenance."
+ recv alert
+ expect ~ "Server be3/s3 is going DOWN for maintenance."
+ recv alert
+ expect ~ "Server be3/s2 is going DOWN for maintenance."
+
+ recv alert
+ expect ~ "Server be3/srv is going DOWN for maintenance."
+
+ recv notice
+ expect ~ "Server be3/srv is UP/READY"
+} -start
+
+
+haproxy h1 -conf {
+ global
+ stats socket "${tmpdir}/h1/stats" level admin
+
+ backend be_check
+ server srv_check ${s1_addr}:${s1_port} check
+ server srv_no_check ${s1_addr}:${s1_port}
+
+ backend be1
+ log ${S1_addr}:${S1_port} daemon
+ server srv ${s1_addr}:${s1_port} check
+
+ backend be2
+ log ${S2_addr}:${S2_port} daemon
+ server srv ${s1_addr}:${s1_port} check
+
+ backend be3
+ log ${S3_addr}:${S3_port} daemon
+ server srv ${s1_addr}:${s1_port} check
+} -start
+
+
+###
+# check the support of the 'track' keyword on 'add server' CLI command.
+# rejection must happen if track on a non-checked or a dynamic server
+###
+haproxy h1 -cli {
+ # invalid command: track on a non-checked server
+ send "add server be_check/s1 ${s1_addr}:${s1_port} track be_check/srv_no_check"
+ expect ~ "unable to use be_check/srv_no_check for tracking as it does not have any check nor agent enabled."
+
+ # valid track usage
+ send "add server be_check/s1 ${s1_addr}:${s1_port} track be_check/srv_check"
+ expect ~ "New server registered."
+
+ # invalid command: track on a dynamic server
+ send "add server be_check/s3 ${s1_addr}:${s1_port} track be_check/s1"
+ expect ~ "unable to use be_check/s1 for tracking as it is a dynamic server."
+}
+
+###
+# scenario 1
+#
+# Add 3 dynamic servers with tracking on be1/srv
+# Disable be1/srv, S1 should report all servers DOWN
+# Delete the first dynamic server, enable be1/srv, S1 should report s1 and s3 UP
+####
+haproxy h1 -cli {
+ send "add server be1/s1 ${s1_addr}:${s1_port} track be1/srv"
+ expect ~ "New server registered."
+ send "enable server be1/s1"
+ expect ~ ".*"
+
+ send "add server be1/s2 ${s1_addr}:${s1_port} track be1/srv"
+ expect ~ "New server registered."
+ send "enable server be1/s2"
+ expect ~ ".*"
+
+ send "add server be1/s3 ${s1_addr}:${s1_port} track be1/srv"
+ expect ~ "New server registered."
+ send "enable server be1/s3"
+ expect ~ ".*"
+
+ send "disable server be1/srv"
+ expect ~ ".*"
+
+ send "del server be1/s1"
+ expect ~ "Server deleted."
+
+ send "enable server be1/srv"
+ expect ~ ".*"
+}
+
+###
+# scenario 2
+#
+# Add 3 dynamic servers with tracking on be2/srv
+# Disable be2/srv, S3 should report all servers DOWN
+# Delete the second dynamic server, enable be2/srv, S2 should report s2 and s3 UP
+####
+haproxy h1 -cli {
+ send "add server be2/s1 ${s1_addr}:${s1_port} track be2/srv"
+ expect ~ "New server registered."
+ send "enable server be2/s1"
+ expect ~ ".*"
+
+ send "add server be2/s2 ${s1_addr}:${s1_port} track be2/srv"
+ expect ~ "New server registered."
+ send "enable server be2/s2"
+ expect ~ ".*"
+
+ send "add server be2/s3 ${s1_addr}:${s1_port} track be2/srv"
+ expect ~ "New server registered."
+ send "enable server be2/s3"
+ expect ~ ".*"
+
+ send "disable server be2/srv"
+ expect ~ ".*"
+
+ send "del server be2/s2"
+ expect ~ "Server deleted."
+
+ send "enable server be2/srv"
+ expect ~ ".*"
+}
+
+###
+# scenario 3
+#
+# Add 3 dynamic servers with tracking on be3/srv
+# Delete all of them, disable/enable be3/srv, only be3/srv should be reported
+# as DOWN/UP.
+####
+haproxy h1 -cli {
+ # create server 1, track on be3/srv
+ send "add server be3/s1 ${s1_addr}:${s1_port} track be3/srv"
+ expect ~ "New server registered."
+ send "enable server be3/s1"
+ expect ~ ".*"
+
+ # create server 2, track on be3/srv
+ send "add server be3/s2 ${s1_addr}:${s1_port} track be3/srv"
+ expect ~ "New server registered."
+ send "enable server be3/s2"
+ expect ~ ".*"
+
+ # create server 3, track on be3/srv
+ send "add server be3/s3 ${s1_addr}:${s1_port} track be3/srv"
+ expect ~ "New server registered."
+ send "enable server be3/s3"
+ expect ~ ".*"
+
+ # delete all dynamic servers
+ send "disable server be3/s1; del server be3/s1"
+ expect ~ "Server deleted."
+ send "disable server be3/s3; del server be3/s3"
+ expect ~ "Server deleted."
+ send "disable server be3/s2; del server be3/s2"
+ expect ~ "Server deleted."
+
+ # disable / enable the static server
+ send "disable server be3/srv"
+ expect ~ ".*"
+ send "enable server be3/srv"
+ expect ~ ".*"
+}
+
+
+syslog S1 -wait
+syslog S2 -wait
+syslog S3 -wait
diff --git a/reg-tests/server/cli_delete_dynamic_server.vtc b/reg-tests/server/cli_delete_dynamic_server.vtc
new file mode 100644
index 0000000..e667641
--- /dev/null
+++ b/reg-tests/server/cli_delete_dynamic_server.vtc
@@ -0,0 +1,94 @@
+# This script is to test the proper behavior with dynamic servers insertion and
+# deletion, in particular with the load-balancing of requests.
+#
+varnishtest "Delete server via cli"
+
+feature ignore_unknown_macro
+
+#REQUIRE_VERSION=2.4
+
+# static server
+server s1 -repeat 3 {
+ rxreq
+ txresp \
+ -body "resp from s1"
+} -start
+
+# use as a dynamic server, added then deleted via CLI
+server s2 -repeat 3 {
+ rxreq
+ txresp \
+ -body "resp from s2"
+} -start
+
+haproxy h1 -conf {
+ defaults
+ mode http
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ frontend fe
+ bind "fd@${feS}"
+ default_backend test
+
+ backend test
+ server s1 ${s1_addr}:${s1_port}
+} -start
+
+# add a new dynamic server to be able to delete it then
+haproxy h1 -cli {
+ # add a dynamic server and enable it
+ send "add server test/s2 ${s2_addr}:${s2_port}"
+ expect ~ "New server registered."
+
+ send "enable server test/s2"
+ expect ~ ".*"
+}
+
+haproxy h1 -cli {
+ # non existent backend
+ send "del server foo/s1"
+ expect ~ "No such backend."
+
+ # non existent server
+ send "del server test/other"
+ expect ~ "No such server."
+}
+
+# first check that both servers are active
+client c1 -connect ${h1_feS_sock} {
+ txreq
+ rxresp
+ expect resp.body == "resp from s1"
+
+ txreq
+ rxresp
+ expect resp.body == "resp from s2"
+} -run
+
+# delete the dynamic server
+haproxy h1 -cli {
+ # server not in maintenance mode
+ send "del server test/s2"
+ expect ~ "Only servers in maintenance mode can be deleted."
+
+ send "disable server test/s2"
+ expect ~ ".*"
+
+ # valid command
+ send "del server test/s2"
+ expect ~ "Server deleted."
+}
+
+# now check that only the first server is used
+client c2 -connect ${h1_feS_sock} {
+ txreq
+ rxresp
+ expect resp.body == "resp from s1"
+
+ txreq
+ rxresp
+ expect resp.body == "resp from s1"
+} -run
+
diff --git a/reg-tests/server/cli_delete_server.vtc b/reg-tests/server/cli_delete_server.vtc
new file mode 100644
index 0000000..61d241c
--- /dev/null
+++ b/reg-tests/server/cli_delete_server.vtc
@@ -0,0 +1,60 @@
+# This script is to test the ability to remove servers, unless they are
+# referenced by some elements from the configuration.
+#
+varnishtest "Delete server via cli"
+
+feature cmd "$HAPROXY_PROGRAM -cc 'version_atleast(2.5-dev0)'"
+feature ignore_unknown_macro
+
+haproxy h1 -conf {
+ defaults
+ mode http
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ frontend fe
+ bind "fd@${feS}"
+ acl s1_full srv_sess_rate(test/s1) gt 50
+ default_backend test
+
+ backend test
+ use-server s3 unless { always_false }
+ server s1 ${s1_addr}:${s1_port} # referenced in ACL
+ server s2 ${s1_addr}:${s1_port} check # referenced in track
+ server s3 ${s1_addr}:${s1_port} track s2 # referenced in use-server
+ server s4 ${s1_addr}:${s1_port} # removable server
+} -start
+
+haproxy h1 -cli {
+ # non existent backend
+ send "del server foo/s1"
+ expect ~ "No such backend."
+
+ # non existent server
+ send "del server test/other"
+ expect ~ "No such server."
+
+ # server referenced in ACL
+ send "del server test/s1"
+ expect ~ "This server cannot be removed at runtime due to other configuration elements pointing to it."
+
+ # tracked server
+ send "del server test/s2"
+ expect ~ "This server cannot be removed at runtime due to other configuration elements pointing to it."
+
+ # tracked server
+ send "del server test/s3"
+ expect ~ "This server cannot be removed at runtime due to other configuration elements pointing to it."
+
+ # server in running mode
+ send "del server test/s4"
+ expect ~ "Only servers in maintenance mode can be deleted."
+
+ send "disable server test/s4"
+ expect ~ ".*"
+
+ # valid command
+ send "del server test/s4"
+ expect ~ "Server deleted."
+}
diff --git a/reg-tests/server/cli_set_fqdn.vtc b/reg-tests/server/cli_set_fqdn.vtc
new file mode 100644
index 0000000..f82674f
--- /dev/null
+++ b/reg-tests/server/cli_set_fqdn.vtc
@@ -0,0 +1,57 @@
+varnishtest "Set server FQDN via CLI crash"
+
+feature ignore_unknown_macro
+
+# for "set server <srv> fqdn"
+#REGTEST_TYPE=bug
+
+# Do nothing. Is there only to create s1_* macros
+server s1 {
+} -start
+
+haproxy h1 -conf {
+ defaults
+ mode http
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ frontend myfrontend
+ bind "fd@${my_fe}"
+ default_backend test
+
+ backend test
+ server www1 ${s1_addr}:${s1_port}
+} -start
+
+haproxy h2 -conf {
+ defaults
+ mode http
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ resolvers systemdns
+ parse-resolv-conf
+
+ frontend myfrontend
+ bind "fd@${my_fe}"
+ default_backend test
+
+ backend test
+ server www1 ${s1_addr}:${s1_port} resolvers systemdns resolve-prefer ipv4
+} -start
+
+haproxy h1 -cli {
+ send "set server test/www1 fqdn foo.fqdn"
+ expect ~ "set server <b>/<s> fqdn failed because no resolution is configured."
+ send "show servers state test"
+ expect ~ "test 1 www1 ${s1_addr} .* - ${s1_port}"
+} -wait
+
+haproxy h2 -cli {
+ send "set server test/www1 fqdn localhost"
+ expect ~ "test/www1 changed its FQDN from \\(null\\) to localhost"
+ send "show servers state test"
+ expect ~ "test 1 www1 127.0.0.1 .* localhost"
+} -wait
diff --git a/reg-tests/server/cli_set_ssl.vtc b/reg-tests/server/cli_set_ssl.vtc
new file mode 100644
index 0000000..fa6fe68
--- /dev/null
+++ b/reg-tests/server/cli_set_ssl.vtc
@@ -0,0 +1,60 @@
+varnishtest "Set server ssl via CLI"
+
+feature ignore_unknown_macro
+
+# for "set server <srv> ssl"
+#REQUIRE_VERSION=2.4
+#REGTEST_TYPE=devel
+#REQUIRE_OPTIONS=OPENSSL
+
+# Do nothing. Is there only to create s1_* macros
+server s1 {
+} -start
+
+haproxy h1 -conf {
+ global
+ ssl-server-verify none
+
+ defaults
+ mode http
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ frontend myfrontend
+ bind "fd@${my_fe}"
+ default_backend test0
+
+ backend test0
+ server www0 ${s1_addr}:${s1_port} no-ssl
+ default-server ssl
+ server www1 ${s1_addr}:${s1_port} no-ssl
+
+ backend test1
+ server www0 ${s1_addr}:${s1_port} no-ssl
+} -start
+
+haproxy h1 -cli {
+ # supported case
+ send "show servers state test0"
+ expect ~ "test0 2 www1 ${s1_addr} .* - ${s1_port} - -1"
+ send "set server test0/www1 ssl on"
+ expect ~ "server ssl setting updated"
+ send "show servers state test0"
+ expect ~ "test0 2 www1 ${s1_addr} .* - ${s1_port} - 1"
+ send "set server test0/www1 ssl off"
+ expect ~ "server ssl setting updated"
+ send "show servers state test0"
+ expect ~ "test0 2 www1 ${s1_addr} .* - ${s1_port} - 0"
+
+ # unsupported cases
+ send "show servers state test0"
+ expect ~ "test0 1 www0 ${s1_addr} .* - ${s1_port} - -1"
+ send "set server test0/www0 ssl on"
+ expect ~ "'set server <srv> ssl' cannot be set"
+
+ send "show servers state test1"
+ expect ~ "test1 1 www0 ${s1_addr} .* - ${s1_port} - -1"
+ send "set server test1/www0 ssl on"
+ expect ~ "'set server <srv> ssl' cannot be set"
+} -wait
diff --git a/reg-tests/server/common.pem b/reg-tests/server/common.pem
new file mode 120000
index 0000000..a4433d5
--- /dev/null
+++ b/reg-tests/server/common.pem
@@ -0,0 +1 @@
+../ssl/common.pem \ No newline at end of file
diff --git a/reg-tests/server/get_srv_stats.lua b/reg-tests/server/get_srv_stats.lua
new file mode 100644
index 0000000..105b954
--- /dev/null
+++ b/reg-tests/server/get_srv_stats.lua
@@ -0,0 +1,11 @@
+local function lua_get_srv_stats(txn, name)
+ for _, backend in pairs(core.backends) do
+ for _, server in pairs(backend.servers) do
+ if server.name == name then
+ return server:get_stats()
+ end
+ end
+ end
+end
+
+core.register_fetches('get_srv_stats', lua_get_srv_stats)
diff --git a/reg-tests/spoe/wrong_init.vtc b/reg-tests/spoe/wrong_init.vtc
new file mode 100644
index 0000000..152622c
--- /dev/null
+++ b/reg-tests/spoe/wrong_init.vtc
@@ -0,0 +1,22 @@
+# commit 84c844eb12b250aa86f2aadaff77c42dfc3cb619
+# BUG/MINOR: spoe: Initialize variables used during conf parsing before any check
+#
+# Some initializations must be done at the beginning of parse_spoe_flt to avoid
+# segmentation fault when first errors are caught, when the "filter spoe" line is
+# parsed.
+
+#REGTEST_TYPE=bug
+
+varnishtest "SPOE bug: missing configuration file"
+
+feature ignore_unknown_macro
+
+haproxy h1 -conf-BAD {} {
+ defaults
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ frontend my-front
+ filter spoe
+}
diff --git a/reg-tests/ssl/README b/reg-tests/ssl/README
new file mode 100644
index 0000000..f2fc534
--- /dev/null
+++ b/reg-tests/ssl/README
@@ -0,0 +1,2 @@
+File list:
+ - common.pem: PEM file which may be used by most of the VTC files.
diff --git a/reg-tests/ssl/add_ssl_crt-list.vtc b/reg-tests/ssl/add_ssl_crt-list.vtc
new file mode 100644
index 0000000..8810b72
--- /dev/null
+++ b/reg-tests/ssl/add_ssl_crt-list.vtc
@@ -0,0 +1,114 @@
+#REGTEST_TYPE=devel
+
+# This reg-test uses the "add ssl crt-list" command to add a certificate over the CLI.
+# It requires socat to upload the certificate
+
+# this check does 2 requests, the first one will use "www.test1.com" as SNI, and
+# the second one will use "localhost". Since vtest can't do SSL, we use haproxy
+# as an SSL client with 2 chained listen section.
+
+# If this test does not work anymore:
+# - Check that you have socat
+
+varnishtest "Test the 'add ssl crt-list' feature of the CLI"
+#REQUIRE_VERSION=2.2
+#REQUIRE_OPTIONS=OPENSSL
+feature cmd "command -v socat"
+feature ignore_unknown_macro
+
+server s1 -repeat 2 {
+ rxreq
+ txresp
+} -start
+
+haproxy h1 -conf {
+ global
+ tune.ssl.default-dh-param 2048
+ tune.ssl.capture-buffer-size 1
+ crt-base ${testdir}
+ stats socket "${tmpdir}/h1/stats" level admin
+
+ defaults
+ mode http
+ option httplog
+ log stderr local0 debug err
+ option logasap
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+
+ listen clear-lst
+ bind "fd@${clearlst}"
+ balance roundrobin
+ server s1 "${tmpdir}/ssl.sock" ssl verify none sni str(www.test1.com)
+ server s2 "${tmpdir}/ssl.sock" ssl verify none sni str(localhost)
+
+
+ listen ssl-lst
+ mode http
+ bind "${tmpdir}/ssl.sock" ssl strict-sni crt-list ${testdir}/localhost.crt-list
+
+ server s1 ${s1_addr}:${s1_port}
+ server s2 ${s1_addr}:${s1_port} ssl crt "${testdir}/common.pem" weight 0 verify none
+} -start
+
+
+haproxy h1 -cli {
+ send "show ssl cert ${testdir}/common.pem"
+ expect ~ ".*SHA1 FingerPrint: DF3B6E847A7BF83DFAAFCFEC65EE9BC36230D3EA"
+}
+
+client c1 -connect ${h1_clearlst_sock} {
+ txreq
+ rxresp
+ expect resp.status == 200
+} -run
+
+shell {
+ echo "new ssl cert ${testdir}/ecdsa.pem" | socat "${tmpdir}/h1/stats" -
+ printf "set ssl cert ${testdir}/ecdsa.pem <<\n$(cat ${testdir}/ecdsa.pem)\n\n" | socat "${tmpdir}/h1/stats" -
+ echo "commit ssl cert ${testdir}/ecdsa.pem" | socat "${tmpdir}/h1/stats" -
+ printf "add ssl crt-list ${testdir}/localhost.crt-list/ <<\n${testdir}/common.pem [ssl-min-ver SSLv3 verify none allow-0rtt] !*\n\n" | socat "${tmpdir}/h1/stats" -
+ printf "add ssl crt-list ${testdir}/localhost.crt-list/ <<\n${testdir}/ecdsa.pem [ssl-min-ver SSLv3 verify none allow-0rtt] localhost !www.test1.com\n\n" | socat "${tmpdir}/h1/stats" -
+ printf "add ssl crt-list ${testdir}/localhost.crt-list <<\n${testdir}/ecdsa.pem [verify none allow-0rtt]\n\n" | socat "${tmpdir}/h1/stats" -
+ printf "add ssl crt-list ${testdir}/localhost.crt-list/// <<\n${testdir}/ecdsa.pem localhost !www.test1.com\n\n" | socat "${tmpdir}/h1/stats" -
+ printf "add ssl crt-list ${testdir}/localhost.crt-list///// <<\n${testdir}/ecdsa.pem\n\n" | socat "${tmpdir}/h1/stats" -
+ printf "add ssl crt-list ${testdir}/localhost.crt-list// ${testdir}/ecdsa.pem\n" | socat "${tmpdir}/h1/stats" -
+}
+
+haproxy h1 -cli {
+ send "show ssl cert ${testdir}/ecdsa.pem"
+ expect ~ ".*SHA1 FingerPrint: A490D069DBAFBEE66DE434BEC34030ADE8BCCBF1"
+}
+
+haproxy h1 -cli {
+ send "show ssl crt-list ${testdir}/localhost.crt-list//"
+ # check the options and the filters in any order
+ expect ~ ".*${testdir}/ecdsa.pem \\[(?=.*verify none)(?=.*allow-0rtt)(?=.*ssl-min-ver SSLv3).*\\](?=.*!www.test1.com)(?=.*localhost).*"
+}
+
+client c1 -connect ${h1_clearlst_sock} {
+ txreq
+ rxresp
+ expect resp.status == 200
+} -run
+
+
+# Try to add a new line that mentions an "unknown" CA file (not loaded yet).
+# It should fail since no disk access are allowed during runtime.
+shell {
+ printf "add ssl crt-list ${testdir}/localhost.crt-list/ <<\n${testdir}/ecdsa.pem [ca-file ${testdir}/ca-auth.crt] localhost\n\n" | socat "${tmpdir}/h1/stats" - | grep "unable to load ${testdir}/ca-auth.crt"
+}
+shell {
+ printf "add ssl crt-list ${testdir}/localhost.crt-list/ <<\n${testdir}/ecdsa.pem [ca-verify-file ${testdir}/ca-auth.crt] localhost\n\n" | socat "${tmpdir}/h1/stats" - | grep "unable to load ${testdir}/ca-auth.crt"
+}
+shell {
+ printf "add ssl crt-list ${testdir}/localhost.crt-list/ <<\n${testdir}/ecdsa.pem [crl-file ${testdir}/ca-auth.crt] localhost\n\n" | socat "${tmpdir}/h1/stats" - | grep "unable to load ${testdir}/ca-auth.crt"
+}
+
+# Check that the new line was not added to the crt-list.
+haproxy h1 -cli {
+ send "show ssl crt-list ${testdir}/localhost.crt-list//"
+ expect !~ ".*ca-file ${testdir}/ca-auth.crt"
+}
diff --git a/reg-tests/ssl/bug-2265.crt b/reg-tests/ssl/bug-2265.crt
new file mode 120000
index 0000000..1b7cb2c
--- /dev/null
+++ b/reg-tests/ssl/bug-2265.crt
@@ -0,0 +1 @@
+common.pem \ No newline at end of file
diff --git a/reg-tests/ssl/ca-auth.crt b/reg-tests/ssl/ca-auth.crt
new file mode 100644
index 0000000..1695af5
--- /dev/null
+++ b/reg-tests/ssl/ca-auth.crt
@@ -0,0 +1,33 @@
+-----BEGIN CERTIFICATE-----
+MIIFyzCCA7OgAwIBAgIURpSju/jEN7LJUV4vEibyeuJwd5kwDQYJKoZIhvcNAQEL
+BQAwdDELMAkGA1UEBhMCRlIxEzARBgNVBAgMClNvbWUtU3RhdGUxHTAbBgNVBAoM
+FEhBUHJveHkgVGVjaG5vbG9naWVzMTEwLwYDVQQDDChIQVByb3h5IFRlY2hub2xv
+Z2llcyBDQSBUZXN0IENsaWVudCBBdXRoMCAXDTIwMDQyODE4NTIwMloYDzIwNTAw
+NDIxMTg1MjAyWjB0MQswCQYDVQQGEwJGUjETMBEGA1UECAwKU29tZS1TdGF0ZTEd
+MBsGA1UECgwUSEFQcm94eSBUZWNobm9sb2dpZXMxMTAvBgNVBAMMKEhBUHJveHkg
+VGVjaG5vbG9naWVzIENBIFRlc3QgQ2xpZW50IEF1dGgwggIiMA0GCSqGSIb3DQEB
+AQUAA4ICDwAwggIKAoICAQDGlgKEhw5RLOBgaTsPJJYglozt3SVv94084RoA19kw
+udemrMaJdXV517MsR9qoHoxFVFdYP//W6vx7c5RadPqMZrWT9QXhJSR0Kr5KdHUs
++t8H8pmlDicxIx0cuRtmKmRuAMoDI1E+5EsRspemyq1ExcBm42zM9Oj9QysKF0wc
+FXq56eHgXSmKQGAiGuGB5v6CeVu3DVhZHuGyv3XVeOsf35s2M757hi+N6mqqvtw8
+JiQyw53YxBaB80CRtiIXzLd07S4GZnRCNOWgXLwo6+8K1gId3KRk4DhUIEIMrchy
+aqeZmJVToF+8fbjZ97pREJyQo4EXsgPrLE3Ck5Y+TfYJli3NJNhEWhucu4RQ6XXg
+lTruatM9uj9ZZEvtJreu5KRvAOfRLBj+C3f+VRoDrE9RSEn/XSGek+/D7+n3U0GO
+h2KcrUn7R+Yy6DdwxhGImqDnYaKaZds+vEjtvP4ViOC982eVl5/lFAw3JBHR57iL
+/K0zTRwjSasUvlJFQNUNAG9HktCYTdEj0U3C/xBDPayY04BFvn8piZeMpoCN9dre
+UxuctmMrz1pIvYAdZSseraf4W0psx6oeU/CcFZnkc5lbUDvn7u6Ozk4gnfyo1fxJ
+8a2X7dl3joqUABwaS/FkP/CPoEKBMFf4pcZUhuWbPkhiVNXZIkQYQISY6JOr5qDn
+TwIDAQABo1MwUTAdBgNVHQ4EFgQUW4t2W4MUuBG0EyFdHObYYZbtjEowHwYDVR0j
+BBgwFoAUW4t2W4MUuBG0EyFdHObYYZbtjEowDwYDVR0TAQH/BAUwAwEB/zANBgkq
+hkiG9w0BAQsFAAOCAgEAIqQJu2nX0Rn9EUPuVDhCrirQFDwMFb7Ifoqr6rMoD9OT
+pgyQb198TkW550Rhg36LnnmBzifOoPBmHVJQWvMAVnH/BQrkRqXFRk2M3PRoEv44
+twMlUPU/NMLVKnXE+neBlXhBWeyY/bCmVftk/TdLwom0Mer4Nw+rt1JQAXKKNRdj
+3b8EnJHGy7Es4fv/traZQ6ZSHoD0GsxydweCjZEO2hLw9/VVrjdM6rNDZlz7cST5
+rhyUeG3mlwWweGY6ahlMx//Z11m/1JLGyDcwMgunRoBiDep7I3ZMcWH1PjM3CyrL
+ZrDoUvwtMSEAuT/be5SfU/CzS/DTyBtfSpEUbm1dg9cqm1vG7/GFdzJqafv8ppwh
+fZhtxKXcyH4C1BeWlDqA06aNM3ClxWNyrAjdcyI45tosxgDuWyRyUC5IhyK6s81O
+6AP7xQH6s+i0k3mzgOxieV/QRo4E67y31XZHJz6uFKSaHOIdpV7li7mAiswFfhMl
++C3ud2rU79X2vTYLzELR05djzAXHJT9sc5NjbODw3RRKRkcB78IoNM7D0Mcctz+3
+1DHcmk6crsxPRDmvKj9zQTjbG1UpjTogdsbh1afuqJ1atxBgav+/YhefAziXazAy
+P1CHU/OYq/vjfGobIz6RVUjkg50RwkD58TR3LzQPOpSNoM55U/jGd3x4X3kh8tU=
+-----END CERTIFICATE-----
diff --git a/reg-tests/ssl/cert1-example.com.pem.ecdsa b/reg-tests/ssl/cert1-example.com.pem.ecdsa
new file mode 100644
index 0000000..060d92b
--- /dev/null
+++ b/reg-tests/ssl/cert1-example.com.pem.ecdsa
@@ -0,0 +1,17 @@
+-----BEGIN CERTIFICATE-----
+MIIBhzCCAQ2gAwIBAgIUWnUgbYQBOPUC1tc9NFqD2gjVBawwCgYIKoZIzj0EAwIw
+FjEUMBIGA1UEAwwLZXhhbXBsZS5jb20wIBcNMjEwNDAyMTI0NzAyWhgPMjA1MTAz
+MjYxMjQ3MDJaMBYxFDASBgNVBAMMC2V4YW1wbGUuY29tMHYwEAYHKoZIzj0CAQYF
+K4EEACIDYgAEWuf05jTK9E7VNfDVknHTdp7FHB+LNOMVPB/XBRiLmU/+/EzF0D+5
+t4APkwa4vSw3UckWUMoAxOrJ1dUk8T8Y5AxWGBomcuAQGtfmUlDBXvhUjsJ1s9Zz
+iy6WyRkU/fcsoxowGDAWBgNVHREEDzANggtleGFtcGxlLmNvbTAKBggqhkjOPQQD
+AgNoADBlAjEAwDVLrc9jL2zx9byM1qGyHKnuk8xsEvZEkUPMor1hrTyqkLGIEu3h
+1vyRvboYvGh6AjB45GdtABrNeRHI7QeA1ZX0j34dj7lYP0NvYjSVSyvRhpe/nzl7
+CzU2IkkQ4fmxosI=
+-----END CERTIFICATE-----
+-----BEGIN PRIVATE KEY-----
+MIG2AgEAMBAGByqGSM49AgEGBSuBBAAiBIGeMIGbAgEBBDCSlVR2c8kUsBYDAqrH
+M60zwqNVVB0FGafWXBJBn4kgTKRQPCqmwkAJp+yd62Z05iKhZANiAARa5/TmNMr0
+TtU18NWScdN2nsUcH4s04xU8H9cFGIuZT/78TMXQP7m3gA+TBri9LDdRyRZQygDE
+6snV1STxPxjkDFYYGiZy4BAa1+ZSUMFe+FSOwnWz1nOLLpbJGRT99yw=
+-----END PRIVATE KEY-----
diff --git a/reg-tests/ssl/cert1-example.com.pem.rsa b/reg-tests/ssl/cert1-example.com.pem.rsa
new file mode 100644
index 0000000..4639b75
--- /dev/null
+++ b/reg-tests/ssl/cert1-example.com.pem.rsa
@@ -0,0 +1,80 @@
+-----BEGIN CERTIFICATE-----
+MIIE1jCCAr6gAwIBAgIUJUqgFv3XQuBU7FxDOYZDO/DZFPowDQYJKoZIhvcNAQEL
+BQAwFjEUMBIGA1UEAwwLZXhhbXBsZS5jb20wIBcNMjEwNDAyMTI0NzAzWhgPMjA1
+MTAzMjYxMjQ3MDNaMBYxFDASBgNVBAMMC2V4YW1wbGUuY29tMIICIjANBgkqhkiG
+9w0BAQEFAAOCAg8AMIICCgKCAgEA1Qyp+JCxptby6yTjlF6GKoSiYXMuTC15GqkQ
+9cA5wExRvRj444ZDeltt4qFh50MQGaPL1Uq5pk2LxVhIMApn3aFv0vVXXLOpkcWL
+tknYhcL7y1wZCGrYff0jJsi/en2YKbzdJ+avFlkrae7uhTmEwLcDRVhJpJYj0nj7
+7NIRZEzzvYxdNVVDkdNacZtJrtanTagse15OV7w6dniIjzyr7P5backq8EyQTWvg
+hf56gx8r/JVoMZdxSd3EXcIXBnyDOU6KTiHu970DJmcz4oEaAlKFCehquNfGyVw5
++jzUPyMP/IzvJZY68s3TjKYnJhoyu2GRf+SH2DBjYVL/I9ULK5G68Oqrjl3lZMM9
+NCjvLykBVAeQ2wYscCUChmLU9Vor1N5Z0EqZx9Wx/SBSPmlpTR4p1eoEmcrrZjUW
+TjDBVk4F3cBrFrMEq0rr+aUSluPzpfYEv/tn1h0WTW/8PbSoQluf85i/BXnzmW1L
+JplcembL1cbm0idJjzRvQx8/WGoSSIYHzWFgRhagvQ7xGf88pGGh0+n/K/xPXZ+Z
+I1b89rLqs5pdBJtAgj7wd2oTxiKDILkpvwRBq9q2p7+yEnaIhWVQr3UudiSRcB8O
+lEk8YHpa8wiKMksezCqs4zfdk3Wh1JEwgy1zYk+penzfvQGaySv5Q20P8V2ZK8i1
+HHnTRLUCAwEAAaMaMBgwFgYDVR0RBA8wDYILZXhhbXBsZS5jb20wDQYJKoZIhvcN
+AQELBQADggIBAD6LkOmRDupXUyDmvA1PsZoNAnN6/ikZOzgcLoPbmPto2MAG16VD
+VJF+2i4FddUJmuPSYkGoo+eEcIJ6pyifWm0f673dvHSn/Epgkyp+uOQcLnVGE5QK
+cYk7ETlw9BQ/uRYi70hXLk8yi/XbBXIZdtICuxzEJrB+uE3tBK33Zy+KoDweifAV
+vGNLDdhK2Slq0/ExaifeO2Agkz0Cb5nihsMnNlSiJPh+Qqhcyn0+o5hW80AozD3A
+MZYVhiPtCfOoHYO02GpsPkYq1mfez79O+t5d3akLLPXEMO8iK4HUtlkYj84wP220
+fRct1E1apRCCfHORqnlPEYcinoEvlsl+c0olH6L2L3t4sDzWGHQoAzNQMSMAwdPr
+NShvuWmKdYoPrTfdp73neP4jkzNMi2FR1SL7M/Mr272njrBrYLayVbb5Aogp9Myp
+PrWohhrYaMCeCVLdtX0C8Ijjo+WhQjMJ5I7J2CCsRifhCnloD3nP3Cfd+obmGxTV
+spGxTfQxn8BH/rqEkTKZgqz8McpMXJChzSe7JduGnv5E8nZH1UQBqbtgDP+JndI3
+5Ncs7GsU0JLfju4w3IaAjslOmu4TLS0MDSDJo5heo1U/OB/kqocbKcoP39mCiWPy
+juW/VTheRaszG8tuPhXYovg9LXZX5HW7eWjgwm9kn9c4fu/3NY7PJbmO
+-----END CERTIFICATE-----
+-----BEGIN PRIVATE KEY-----
+MIIJQgIBADANBgkqhkiG9w0BAQEFAASCCSwwggkoAgEAAoICAQDVDKn4kLGm1vLr
+JOOUXoYqhKJhcy5MLXkaqRD1wDnATFG9GPjjhkN6W23ioWHnQxAZo8vVSrmmTYvF
+WEgwCmfdoW/S9Vdcs6mRxYu2SdiFwvvLXBkIath9/SMmyL96fZgpvN0n5q8WWStp
+7u6FOYTAtwNFWEmkliPSePvs0hFkTPO9jF01VUOR01pxm0mu1qdNqCx7Xk5XvDp2
+eIiPPKvs/ltpySrwTJBNa+CF/nqDHyv8lWgxl3FJ3cRdwhcGfIM5TopOIe73vQMm
+ZzPigRoCUoUJ6Gq418bJXDn6PNQ/Iw/8jO8lljryzdOMpicmGjK7YZF/5IfYMGNh
+Uv8j1Qsrkbrw6quOXeVkwz00KO8vKQFUB5DbBixwJQKGYtT1WivU3lnQSpnH1bH9
+IFI+aWlNHinV6gSZyutmNRZOMMFWTgXdwGsWswSrSuv5pRKW4/Ol9gS/+2fWHRZN
+b/w9tKhCW5/zmL8FefOZbUsmmVx6ZsvVxubSJ0mPNG9DHz9YahJIhgfNYWBGFqC9
+DvEZ/zykYaHT6f8r/E9dn5kjVvz2suqzml0Em0CCPvB3ahPGIoMguSm/BEGr2ran
+v7ISdoiFZVCvdS52JJFwHw6USTxgelrzCIoySx7MKqzjN92TdaHUkTCDLXNiT6l6
+fN+9AZrJK/lDbQ/xXZkryLUcedNEtQIDAQABAoICAAfQoxt/E0UvdVGy1LZIkVtV
+6i7w7q3UrTCRKxIYrwWixwzMsbSG5ErEt88sZE77YsfN/lggmZbEGXBvwJYii5TR
+qyxt23qHDJ1QRcO2Cb8+W8Yl5rUsViyo8HUnv/5aRQ6i4unnyFxlgPYt0YoJhhkb
+nX8ZsfnbmAzMa1FQk1q+h+JYF8MxEX1z50lrjNRhA1oR5S/RUcZeHTbjTP8UFqpm
+2iuTOYP/CvwMDPxdTVkp948YW+4VxA4VmHJoADg4sQeVHfWnwQBNaqQp/Pk+Cxoy
+tLacU+3b3GreezH2sUJvotJ8yPjz/c2SR0RNg/od0+aTuaabV3BSthKH3NwPoI0z
+bfLkwrR5KyJobB399UN3aqg2s4toKNy+6l9x2dh+QimwDOivptvynEd9BIXd0ZCn
+ohdE9b9j9eq0l36WX+u30JMyevjjumnZjKCh80Pf7MnTcqzggcWvoPYtjPqBj0ig
+WvKwPCmV0TG8wN441mjObUXLa1mFlb8b+NM8k8gy5odkyRGm8ZOOxYlOWmtu/sNM
+VBdjG3U6yONDf+TO+v7OVsOVs/IHFOX3RtpCt8wnFZfTxkxjqrk3E8O7RTXcrIny
+Tgzmi0h0bSTahsKm/0roQNPK6XNw6S6CW9B2kPz2gBEIpjrEl+C8hmsiYEzNJ9kM
+oLWlKEuwcMaXS1oazTqBAoIBAQD3S7icGxwTVypEKq7ZT4859UOtsdrqTKEFIVtf
+z4IIwmlo65mfNA7/w2TSV8p/o3NH4yznkEnVzvYYNXKt316oZM2CqCoA3XjeFlO8
+hUoScVn1VV/66E6wTIbRUCMdBfyPVNQ12bTZ/rPpmSlatXfUGarVRlJ15DDS0TpV
+s+ohxpT1IUnCx7N0z8cPbTFy2qguSbID6UydajXtM/h8up4866wg8nzT4PBssiqf
+NzWgAA+XP7oigfncgqSuQ2zk8Bedbm+tE6bKgK3O6VfTDRIV2Kw89Kvt0OWQYpOD
+F/CTarNdlp0kYmos/rC57AVSpdTNQm3944WFi1ts+aL74+b9AoIBAQDcjF0TnKr0
++uSAFNHDIxf7LHnX+uOZ7cTs284hIHZJ4z/GgwHKimWeG4XZsOGPh9Lk5GGMyDBB
+N9daaGYskoQ9qh0e3IyRbbzdcwUMV9xzulYzUg5OKoezpBlp8Ydd8Gp3/9SBQtTi
+9jjLZ45Qea7/F/Kk1TebUvqGQa+c7HdeJ60/6121QPw7eFqJIOVqf47Tkaq3Wmpr
+csfQulNwN4Gi+v2gp3iMR5q/agKCOtI56daheYyNgPxX+chjiqOqC5WElTxPihde
+lKtYtKh3rnboKGUQ4fJOVFoV/wrfo5wfcYkPDB32Ct1B2hsI3oHbnPkBPgvCB0Xa
+/HPrEqWP5W4ZAoIBACQgVbnIZBOXOj93FM/+RWgsIlTvlJGB3EwJkXWvtMlezVNc
+h7awPjiy7LmlxZlb4W1xDJBPjdnEQENNG5G2/fcPss4RjwFNWWjoThdOSYHkOUYT
+0M+wvD4ZD+DoGhkVVM4DkHTFdxwZj2Li0x3DQNwlW8WIXmeGjHNfyWvXuq5wejZN
+RJ9F2TuJVwUz6HNk6gjJD05u+JhOec5LN1PRV2iC7URq6D1zsOvQI1XbFORo3d40
+mxaLclr6YuBqTTAsuuZuybW5FzaiEcIWaJQWZrv2SUMmYy98wuyS2gXeq3B9t/JG
+HHLCRcyI8HxYtHZcb3gE6liasljOAO8skNjHdGkCggEBANF9dm/Jkc2vf1p17CWJ
+8R6BSZ8wzf6JjlNaGjr3JcTbWdnK2Om1ef6rsAFudWKrplQK5uodwVBBpYpXvi26
+YmhcbNrCrbb54LsMpQ/raRh4N6b522K+HTYyun0akfVWBxvC4uyBOcv4C0ySKekh
+HGtsKOwPJ4mfUR4zyIarSlsiHvunKtSfTLeEg6Lbn28AiP9HzzvoY0t6tHf8dIMU
+Bkx0UnPGf8fnwALvxEBFdSjTiC7LUQmcKpW6SnDa4MkFxdkxFB+NUNNjLjrNJ3S/
+QG0W6aEWrd1fXE6meoKhWwu3AXRMky0Bdtc1QBa1m+2p9hALCoob9Guk/sqcZK0B
+RgkCggEAHjEa/4q05VPbMm7TOgF2m5QTdap47LyTBti9TRurGtB/9nWvIHpM9sAy
+0xVvGcoZOqVHYvRZGpZ8IX4B+9FGMNUDBMc8shj3oA514tCZVPCEolnHcuwERiZD
+c5zh2PccktAmT5EXGch0+eRuxJ1ROKgR0coeo8KMOxtrm0hRFTznsJ0nzNjAoCA4
+zW6DVY7qIb9ksI44rWlgGSwXG1OuUpqH8+tBAvR3uNa/j59psBb7Pu5zmg/qhx1m
+Ljd/0JTxE8A00l0bC8S1F15wGn8GQD63pjq8nr/biI0Y39g3TEAffkI33FfCjBxQ
+gO96WUZwPEimQAnu4Jw+RlpLtWjOBg==
+-----END PRIVATE KEY-----
diff --git a/reg-tests/ssl/cert2-example.com.pem.ecdsa b/reg-tests/ssl/cert2-example.com.pem.ecdsa
new file mode 100644
index 0000000..9dbf25f
--- /dev/null
+++ b/reg-tests/ssl/cert2-example.com.pem.ecdsa
@@ -0,0 +1,17 @@
+-----BEGIN CERTIFICATE-----
+MIIBhzCCAQ2gAwIBAgIUJ2zhyUgHjXsPzANqN5ZSHX0RVHYwCgYIKoZIzj0EAwIw
+FjEUMBIGA1UEAwwLZXhhbXBsZS5jb20wIBcNMjEwNDAyMTI0ODMxWhgPMjA1MTAz
+MjYxMjQ4MzFaMBYxFDASBgNVBAMMC2V4YW1wbGUuY29tMHYwEAYHKoZIzj0CAQYF
+K4EEACIDYgAEx1lz/PGGGGI9EG5L7qx8JGwt15HNuJI9SsdwzkRV/N8sBFzAEVbS
+UWVthQ8tIAdW1y7d9fwkHrzkPulDVwZGGr3qrnZSAgb7NCxBICxgdDI7ku3oPdNd
+bsSASmhJrQO4oxowGDAWBgNVHREEDzANggtleGFtcGxlLmNvbTAKBggqhkjOPQQD
+AgNoADBlAjEAnHx8jSzldb5z4FR3oZ3twWCzRR98n1IBuBi5fe6hhBlQF5u0iAyb
+oDcZ2Tx9UfWhAjB/DKDFrlXAkow4rQxHU602c9SI6hJTCKxIfWWoBYP7zqZXEUjj
+2QK7BQb3sHNpsqY=
+-----END CERTIFICATE-----
+-----BEGIN PRIVATE KEY-----
+MIG2AgEAMBAGByqGSM49AgEGBSuBBAAiBIGeMIGbAgEBBDDsiqzn+NewEL5bc3CA
+sY4ADwk42yQJCPZalIct5i4e5u660YCUMHqbVAUQe2R6YFyhZANiAATHWXP88YYY
+Yj0QbkvurHwkbC3Xkc24kj1Kx3DORFX83ywEXMARVtJRZW2FDy0gB1bXLt31/CQe
+vOQ+6UNXBkYavequdlICBvs0LEEgLGB0MjuS7eg9011uxIBKaEmtA7g=
+-----END PRIVATE KEY-----
diff --git a/reg-tests/ssl/cert2-example.com.pem.rsa b/reg-tests/ssl/cert2-example.com.pem.rsa
new file mode 100644
index 0000000..7a6678f
--- /dev/null
+++ b/reg-tests/ssl/cert2-example.com.pem.rsa
@@ -0,0 +1,80 @@
+-----BEGIN CERTIFICATE-----
+MIIE1jCCAr6gAwIBAgIUCMeB9uw+PcBIqW8cDI21s7SxWVYwDQYJKoZIhvcNAQEL
+BQAwFjEUMBIGA1UEAwwLZXhhbXBsZS5jb20wIBcNMjEwNDAyMTI0ODMyWhgPMjA1
+MTAzMjYxMjQ4MzJaMBYxFDASBgNVBAMMC2V4YW1wbGUuY29tMIICIjANBgkqhkiG
+9w0BAQEFAAOCAg8AMIICCgKCAgEAzt3oEBc1jWk2PaN/tJA/PTTdwfi6ZXqXCrCA
+ZScmo1jvM3CcoOM1BUhiMcoeK4uHRryYUO/eL/ZM5OA11GAIaMevhK65rtBYIh2Q
+klRH+IojmRL91U9tXno+oMBS8WwF7K6eCCj4XUTAKuolQ4yiFHTvdwOsqSrVY3m/
+m2Pp4VTqjDSsljmv8GJ0lQpxan5bZt6WWQiCIbdS7ExgJIALDemg+JOIz/bDmCr/
+3tihmHOK94lCcV/CFOs2XctVnkS6W8x/S4U41Y/eciUbLWr5CxAvfZLOQBuriWiU
+SMHPJI63VPijGKStnBn/zRMvDJhaadkRqAqXlJUZ7nkcZ5WlPuIMgAOc2uCZioW8
+DvyJmplBjBBQdGqRFaeX2lDvJwDECDxSHglfQgVVz3II3ZMSlDsystu4MCgeFa0e
+S0UCvl+5mK1/QVzkzxYj1o9iXhtq5VSLmbaAssDcn20ashJMxmruagsOR4MhaKA0
+RsMosrAiCbcBiY/Q8W6NoOwxNUC8agsqDRNSoJfQgYhTJXqxbnteyy3TXtF4zW+S
+7D0ZsRXM+u2z6V7lP8rvS8ZwzI7nDA/hH34IIw4H875IESLA/8ZiMA3luzMNxwWr
+xCn58JCJM0lJmgkO+NvKctGAGxgtdKzgHemzczx6GuA3V5mOOD01KUbMpZITN4lP
+vAt++qkCAwEAAaMaMBgwFgYDVR0RBA8wDYILZXhhbXBsZS5jb20wDQYJKoZIhvcN
+AQELBQADggIBAMc0Z6hDp5VuihQ1LpmfisQtrs0F5SpfxlbCshg9MOrgRGwViRBM
+bCw1UhDZPT7sQ47JucUkw4RguJTsNQO6Iacq04EKSfHmbxznlZ9eBpAMdK8vWLQH
+jrpmNVE6At3kuyFJrXEc4BOrvzwDqcbG8cFFwT+l9C5BGSZCG/muLPuW3S36IY7i
+uVGc4MqrOQLRghyZbjkXrReGzBZVbuCiz9O+zsjorEzt58gdwIhrl8WyHTJ/Nqy7
+ibfFDh+tJxdNkipa0PZEqovMUcMG1N1E+n4nl6QooUsIx8JmeL5OD4J15ZuvrK3A
+emggxAMs+rkooocc8SL8i0C7l1m74qRKCP/dhIw8R8XiSKaSU5PQxlmY62qHJNkh
+RIkwvv+VcGdUzC74eEPUagKABzYARXBC2410E8vekxVYAZ3U31ypB+/3nWBJOqH0
+P//I1ZKwYLQCuC02O2Uy44kwZsZ1Syh2BYJxjdIeg5oMVnrDhi9kYnMtDmtzLsnC
+kP/cMKX7NZ7d/qbF6Aa9vVE/Ta/OrLxETF8CrjSa/nDLdLpm9sDC26/aqZv5L554
+xeSKVxvZyRFtObSKW1qzK40RMkWUarh72urtd9aM1t5PHOnwY77jO/yemjxfhgvp
+jUKM0pxIe7EmNqoEay+zdN58x8VPDtLFNehorGUnUGkaS57BFBjpEUvY
+-----END CERTIFICATE-----
+-----BEGIN PRIVATE KEY-----
+MIIJQQIBADANBgkqhkiG9w0BAQEFAASCCSswggknAgEAAoICAQDO3egQFzWNaTY9
+o3+0kD89NN3B+LplepcKsIBlJyajWO8zcJyg4zUFSGIxyh4ri4dGvJhQ794v9kzk
+4DXUYAhox6+Errmu0FgiHZCSVEf4iiOZEv3VT21eej6gwFLxbAXsrp4IKPhdRMAq
+6iVDjKIUdO93A6ypKtVjeb+bY+nhVOqMNKyWOa/wYnSVCnFqfltm3pZZCIIht1Ls
+TGAkgAsN6aD4k4jP9sOYKv/e2KGYc4r3iUJxX8IU6zZdy1WeRLpbzH9LhTjVj95y
+JRstavkLEC99ks5AG6uJaJRIwc8kjrdU+KMYpK2cGf/NEy8MmFpp2RGoCpeUlRnu
+eRxnlaU+4gyAA5za4JmKhbwO/ImamUGMEFB0apEVp5faUO8nAMQIPFIeCV9CBVXP
+cgjdkxKUOzKy27gwKB4VrR5LRQK+X7mYrX9BXOTPFiPWj2JeG2rlVIuZtoCywNyf
+bRqyEkzGau5qCw5HgyFooDRGwyiysCIJtwGJj9Dxbo2g7DE1QLxqCyoNE1Kgl9CB
+iFMlerFue17LLdNe0XjNb5LsPRmxFcz67bPpXuU/yu9LxnDMjucMD+EffggjDgfz
+vkgRIsD/xmIwDeW7Mw3HBavEKfnwkIkzSUmaCQ7428py0YAbGC10rOAd6bNzPHoa
+4DdXmY44PTUpRsylkhM3iU+8C376qQIDAQABAoICAEQ6PiKodPop3EDiHul/tcvL
+FuS100xK7WwSIJa8Hes8FtCBcLdDmKYgZHqFbgPwpfI3m4j+Q+rPsja+mCJudfeQ
+/JunQQieIKNH2vnYIFChxvHiqKNk6e6CJQvBwtlrRlz0jpykXp3sYfEFfrrTtFVI
+5/350UWOIgkIC6EFiArQhfcuHEoDxrpizo6lfhigiibYfP/qZXkXTJsw6XjAXmT9
+TCEQD8x/V61laTSngEyWtxvDQo3ABnP9y9WNjbSAeHJ0dPuEeeU96SD+igMlx/PV
+J8Sj2bCdL6tHObjxaw9knqTAyJIFJllY3dxWWmsuCIvmkwM4UxwnPQFBIpQrb+9A
+rguNl+t31zljmToDIEF97G/QcbFqMQEKeNCkwIdtD/8tND7RrchcqQPc96rdHbB7
+Hfb/ZXqCSsYNahurEmeAUZJkLO9U6/0GbWHcxkHBTkrmUs2qV4LrhWP71tKpbNY7
+mGXK6Ok6ZfkAD4uau1oQkndqdlKg/rBOjcT+HGPtxWL9gPtG7om+O9mu++BngrGr
+oyNgujkVRN0fpJhKLhsT6OiZF+7CVQo4ZIw9dBQ2hzLNw5tKgW36GAVTfFxNRTje
+SerlyEog/P3s1tnDn7BngdVOdnDfiOi1O4TEb4btwqP3BSs2p0wJKaJGoClFFuwN
+n5dtHMABtSOKPbmWurbtAoIBAQDqPmZjSLfEwSXph33m7Ms2/AbQJltzU0ftRJU9
+TQGVHBajouupVcyrZ+WiWcltLov+JNlseXG/PsIWEmqSiLodIZJyjWSDUiC5iFEM
+fn2d9X4NLz0A508pFR5FQnULFEDMDryLn+4ta8Bf5NeL2p/ZavKh9rxX/8LAanse
+6Lst59RiiRMkazkjC4DHDmqUAZBt+uQVaHVFpTBJLa1k1nIc82GjsJwWsbADL3+o
+PKiggSir/Uf3nOOPhXsegVTZBiq9DNFciCa+kqT4eluUopjWxIuOKnp5mVh2DnTr
+NXyZ6jDb2JwjcJpy6HLk9EsqY1YuMpT+OCNnLM3l2Gxp/KovAoIBAQDiFJEh/LHl
+++7Z4TE0whMdjkFdSCuPyEnU4WFRKLMTPQRCdS+5GxHDy4lzpArde+51C6UkAjxe
+jaAGzQvabKBl4Al6eFpYvv0d8CQMWIrOffzVMRXuHWgm/SBg7um6ok0rM4/BOdUr
+CN2nWvBF02ZTSsGzzBmzTo4vMkcAQOiGes0Haefxm0DiVvoElL20Fv/iuEzbf60p
+W/0TzeiOBar8WxpTTcnHc6QWQ2t/Zon3/5E1LIOEU2/GQiS6zqNBRGr+kfWtz2wB
+d1IFLXITiqAQb+F3EjKqGS8ln0JYLSLRk3ALbb0EtN59lYwrabUYq9WzA1MlprLp
+GFqzAHNPc+qnAoIBAFg4DAOUXXGCdK7Q0n/n6ljY7g/ygjqawNoBHFur5s6rd3NF
+Zo+tuplLVdahDhVKlHqwkhoiWs516k65vN1XFRDnleoCijpS8fQt/KhB8zlMPZ7l
+jYoLk2qbg3z+HGqBxC2V1ziWkPMWQ6tZ2jvXqKAPgTWyYRibQFOLRrdLW0NcrkY1
+7bmnkCs8p9FQAp+fPy/Mb54IazJBlj/ZLhZuFSgGGV22o/KAFRP+DYvk3HUmb5Tm
+nPYKZkGlOcsxVi0t/2aXrzm0JTNcszjJjDgcTIeGGjD+moW1VPWRWENFL5of8yq5
+F4TZYbGpDaxgvPZH1ysq7aYYqmyvGRRZP+titw0CggEAaPoB1hU/Cbps0xDEx2mi
+dKPcaBMd3xqyZb3tcUEDvdgkRTOi4EHYguDcxyyRuvxT4ldw7AJ5w7Hhb6cAbQDp
+jcR0wkBmOzUb1S3vnyfx9CX+I4QyWamf9hKtWTU2pGm+iWPcyW0wNVZdjdslHFcn
++V8KCJGqEV9VmEaxP0XkcqVM1LdxcveTLkYSu9PRLnFLihvn3Dgx0LWdEvgWlvO8
+zIcE9090dT+WHpxZqwOS5uvtohI0U1pm2VlXMsLGfYTmZaSivn1E+n1MQYkzoi1J
+W7iHqcFycxdUlBSaOtViiIv8h+IB1dCiSxAI0RO5emY3yXKuxhnck22yl9GKuYbq
+mwKCAQA25h2vjVD9x1Yci/qWnKnchjVlTkKWb0D404fhibJpSXHtFOYiE8YXsBBS
+zLYDeDXFagl+AorvG45SoodJGl1/uqGbZMPBs0Yh211nBVtR5W+8vHLPEbw/Qvl/
+AXSmwnVT+K3oeJRxUBIlOLQcDtXcFGBhF3CbbjKU7+9gRdj0oq+O4DZXZVnJPeI4
+Rf42bfQYXub1bB+kH4WwkuLYItrzv4vLgS7kO6Z1GXz7mIBZi7zlUI7Wl5pWg1fq
+H5X6u6V6N2LKS7Sqwa7ihL1ScUMhfmcPE362FyxqwkSMWOx3F/L812MKCgwVoil6
+yupxw0d9CircRDDG93pWn3WxCHpV
+-----END PRIVATE KEY-----
diff --git a/reg-tests/ssl/client.ecdsa.pem b/reg-tests/ssl/client.ecdsa.pem
new file mode 100644
index 0000000..b9940f6
--- /dev/null
+++ b/reg-tests/ssl/client.ecdsa.pem
@@ -0,0 +1,28 @@
+-----BEGIN CERTIFICATE-----
+MIIDPzCCAScCAQQwDQYJKoZIhvcNAQELBQAwPjELMAkGA1UEBhMCRlIxHTAbBgNV
+BAoMFEhBUHJveHkgVGVjaG5vbG9naWVzMRAwDgYDVQQDDAdSb290IENBMB4XDTIx
+MTEzMDExMDIwN1oXDTQ5MDQxNzExMDIwN1owWDELMAkGA1UEBhMCRlIxEzARBgNV
+BAgMClNvbWUtU3RhdGUxHTAbBgNVBAoMFEhBUHJveHkgVGVjaG5vbG9naWVzMRUw
+EwYDVQQDDAxFQ0RTQSBjbGllbnQwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAAQd
+LnaDhFB0RmWq5WQMdPuFXqjBNZutLQRZ5t+AJ1afpujuwr4G79Q0eZKeYyFHldq5
+PMQDBL69D285AsUbMT42MA0GCSqGSIb3DQEBCwUAA4ICAQACnqZefAYCf51Pxhwp
+NElVCzLFcrNfMnCcOxHkuaWjdWbKZi+G4PNIT0ghWgX4k5Gzjx6cVjNmWVkLnJxg
+r6fL31u+Edl9BLr6KKrh830EOK7jN62zySFVjd9sqqBPiEBnT+3OCI9sXWXWg5nB
+B00E6Ll2axwEVrQFIVYnTPC8CJyDvF1t1Jmw/caaiWWVVoUu7Zoq1kVzMCuj7aCO
+BmhvDh237+Cjkly829/Q41aKVBSQ6yDsds4uNceOpAcXOQ8A5ZXa2yearIttvvAz
+LHvXcJZD3h/23mnLQZWo3YisQb3u7O9iIpIXdxpbVHtJ6JvshmiFHTCNB9KG+q2W
+CltrL8lYu2bWzNT8CPJRa5CsFyolIi5fEOfVOWLHKYkOgb9h2hiI9hT9Ujg5H1vM
+d7AeqE+frF5dzxslcQ/wLQoUc+v4bfhh3ffeAdNul8bydoSu3Lq1nXWchNkE6rcg
+pN2uD0eLC1hAXvxi6kQPlutmFJ8yXHySEA5uCek2Kf7dxudqIRKShT7aDVw6pd6R
+ShX4dXTGEO4eBxTcooK4mYQhf+ivyTxfkACnML85C84hXPSoIffMk+Y+PMfNgW/2
+9OH6IwEq7+dSSsAlYweGnqznPCyVfpesMaQCoG3l+5Ciznt5/WA4Mh5HzLE2PE15
+VTEdimSkNohOKYdqQHA3mcH3Dg==
+-----END CERTIFICATE-----
+-----BEGIN EC PARAMETERS-----
+BggqhkjOPQMBBw==
+-----END EC PARAMETERS-----
+-----BEGIN EC PRIVATE KEY-----
+MHcCAQEEIAe2GvrgpqaNk1wzyawK9CJYQz4lTVLDyf5MtbDDMYrcoAoGCCqGSM49
+AwEHoUQDQgAEHS52g4RQdEZlquVkDHT7hV6owTWbrS0EWebfgCdWn6bo7sK+Bu/U
+NHmSnmMhR5XauTzEAwS+vQ9vOQLFGzE+Ng==
+-----END EC PRIVATE KEY-----
diff --git a/reg-tests/ssl/client1.pem b/reg-tests/ssl/client1.pem
new file mode 100644
index 0000000..d830a42
--- /dev/null
+++ b/reg-tests/ssl/client1.pem
@@ -0,0 +1,187 @@
+-----BEGIN CERTIFICATE-----
+MIIFLTCCAxUCAQIwDQYJKoZIhvcNAQELBQAwdDELMAkGA1UEBhMCRlIxEzARBgNV
+BAgMClNvbWUtU3RhdGUxHTAbBgNVBAoMFEhBUHJveHkgVGVjaG5vbG9naWVzMTEw
+LwYDVQQDDChIQVByb3h5IFRlY2hub2xvZ2llcyBDQSBUZXN0IENsaWVudCBBdXRo
+MCAXDTIwMDQyODE4NTk0MloYDzIwNTAwNDIxMTg1OTQyWjBDMQswCQYDVQQGEwJG
+UjEiMCAGA1UECgwZSEFQcm94eSBUZWNobm9sb2dpZXMgVGVzdDEQMA4GA1UEAwwH
+Y2xpZW50MTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAMCUnq4y/rYG
+n2BPYutNd/dQX3KV1qVKKXBsXXqBzE84qjBg6SeQQVwcVN0UYK+l2Rqnkt9m+sV+
+CxAwDAVHts/QmD/4tbjuP8tMQiZcUsl7hxRzLRK2lXGwoX3B7GJgXxjDckCllert
+FgUMylb4ACt8oA1e4c75fY8ppmOcjaNcPBghk09uKVUOKy+UZ/HWkNncF6cO9N82
+Y+bPdL1hg8mr6n7U+jv0bdyBjjN+b/3ggInY8NGyPHpl6ezvmgaI5+stA77YolCY
+NoG7ZexMpBbtv/2M7+PHlx5c5lzd4HbuC5fOtfkMvoosIZJaI8/mM5J6aeu6JpPB
+XvGRRE1Opmmhk1M3aQvU4q9LPYLkXIivuH+sHZnVZHZ32hhpZ4GhTpgayF22n/hI
+fMOzSIMhpao/1YuLVbLgXdWJZx9uOIT//a/3Bd4I/c1/Pt11oNSIhiEAS7beWj0c
+QtsSabeQwEIOOlbxWFA1aRogFNNE3iW4gps4p/4oHmT9Warb5AadE6nzh7N1nCiD
+oO7JoHUzOj8VunLn2RZ8vWuBJI/2fh1TJVjOBmQBl6YGHD8BaRWlzv/VOiq2z8at
+90rXGUb58KYvcfOTOZmYjKK16r/112pEgJuivXXr+N6qJKYxw46m+MAD2eDQ0Bc6
+gFZMlcyBAyJwuxIejUTvWwoddfRnaFajAgMBAAEwDQYJKoZIhvcNAQELBQADggIB
+AFk00NuZDresZ9voh2E9J2GvUbG9x+NSjZR6pQ3MiPPXpLYskV2xAvxFSayGcQhG
+mIfHshsnEhE35WYU80R5Ag1Mxh+XPbZUiNj/oOEFdBj45c0HGorChaVkZtnLilMo
+B0yW+0pnkqKaRkgmVsSrNCgimBtZX1hsZRLDxa2vldJ9lTIg3OuveqBv/uwbMOUC
+eT+il/sdl68K6oNHvAFdY1U34oJnvj4yF6ZZM1jRERK38wY0+2C+mlcXNL648k+2
+lOMeBddaDUKhscWdw1+Ui8Sn6lc6H+iPpGo3xdj9awc0568SCH+D5cpuTMNTREuM
+p3paOMGpLWuQQisltdmz8Ms8lAcJUDeyDmrgE9CPx9DiydB6Z1uP9y9sozqB2SIN
++QqfQLv+lAaUB6cu5xIWfZIFeTxxziABZ2jDF4vVvK+NN2IdBahbI63HQpfeK4tG
+Bkmuny4vlCnHBnzVaAboaQk2xzI9Yp79IN6yhmuO8AjCvd0XlH/nYF6b7WjVy0gU
+LpmkYVHWhADLY4q06PUz8gFGsfDHnx9RQIV01SXbcFxhmAjJBequBbTpucW4UAK4
+sTmg59wlYEeNdomLBPW8f0zkY3Nm/IbyJ8kEofUa4kbwdD/osS5fgWgARiVQXEMW
+W4oMGWpplJan6qe+hInvd+5syZXtO+K/uSOj63H6BwAu
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIJazCCBVOgAwIBAgIUWHoc5e2FUECgyCvyVf8wCtt8gTYwDQYJKoZIhvcNAQEL
+BQAwRTELMAkGA1UEBhMCRlIxEzARBgNVBAgMClNvbWUtU3RhdGUxITAfBgNVBAoM
+GEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZDAeFw0yMDA4MDQxODU4MTZaFw0yMDA5
+MDMxODU4MTZaMEUxCzAJBgNVBAYTAkZSMRMwEQYDVQQIDApTb21lLVN0YXRlMSEw
+HwYDVQQKDBhJbnRlcm5ldCBXaWRnaXRzIFB0eSBMdGQwggQiMA0GCSqGSIb3DQEB
+AQUAA4IEDwAwggQKAoIEAQDARiuHkhrnf38Md1nxGDSneJfwv/QksdNNMNTJBdjg
+OVmaRCIAyz43oefTWDQ/TebbSwB+Lg9pud1zadGWhlZRhCgBPP8JDMhIKH4eXIRk
+5IIa8WD08EwvSlqJL0r4gsMtVsxy7BZHAkka/2Ket9pyGt4kG5n75RFdc6BI80/8
+RwJt/MDxPrcVBAT7LnCluxQpyya9mZCabj7l+9a2yU2hgWS6QqfZJ133krkP/MMh
+AEQkSoA4mmBwWk9yPqXmUqiOi7v6iLkIUEh5SgYVPRk9BtU/kDaUdSwuqRrpCZo4
+SsWZWFLxBmLHkSh+G+BWjCVYMQr2ye7e+VMT/20+5xAfq4fj9n5BsPcx3QcVuTof
+RAc/Oygnt4MYnIcUb7zRFvCAvgpUHL7BnEn6nhyXjHJGqGDchsg8m9t3v/Y3ohq+
+qmrSzdeuylE1n3W5aWJlbFmyXegNP45MJ0xicesVrXEWF7YD/ir9mGJ8bQYr4blf
+77PrbF02komC6AzVPKOJa0jR+eW1wErzYlkYgez6ylBWCiHJd1dhEHlK3h2rXdYa
+Gnb45ILCLpEDjNEUrHifLLNXwqJpgZQsJU6BgMgk7ZgBfAKrCfTeg0rkCqCAPeVb
+8eSLf7FBF7YBRJ5P6u8qXc4RtgEu607GaWV0gIMfyVBY52oV+OaNsEdFetrJnp3c
+friG8vJ+7jdq6zjUCGgnfUIHoViJPh3JuFfhA3jT0gQDKW5PeI7dxhrNvlqdYfHI
+fxX7Y1/J6cTQkqJ1cai2f0bwJIJiTAThNbG+zrtjJ7fZ3wJ4udyU/IKrwShqtmTb
+1Ofj0tJDdwOH8i84vIySLUvR9aAb7ClFlnsx6rzwOxG90W7C0LA2M0EHm4FezJm/
+FfujnZwEWr1T9Wki6qE0MHCbdN/TTDws//EKkkE44FC+amL96w0IQl70vpE37j2A
+zlDWvFFID95SIxfmpkwWDvXDKv6gr1GMLeysCl2fgpY05Xidw5cEo9/tEkuWn/dG
+x/D9hnLBGeroA0251ES12jemqDjI2U0tfaeHakjwSsoWElf94Qmuh2iPZ+1zIxQs
+7o6nAWN8X9hfsmrDTTHlww0TEfrjlbzG5Yh+0ZRxmejgiUyOCXck+eh/ZXMXvfWh
+y3CorIIuWgkRjm80PYkdaRDJdZuyP6R7tXfTXNVzAiSQf0Qx9ru2KB2Fs/XZPamH
+KjItAU5Q6msIVvaRMS0muQgV+b6hqSEBzqXqJfAlpVLHXr5FqK+U7EB9y02B6piB
+tAmxqXP8OOCoQql6/vgIcrDFUOo6KtGBW36ef74XE3KCUVaIzVJZSIt6i/Vi0bZj
+bAjsJUQ3qDlHdorv9TRVOhnC1GUz7SuYnpEOyiXmyx3LAgMBAAGjUzBRMB0GA1Ud
+DgQWBBQ62csZcH/meQcENHhNbqz9LMzwjjAfBgNVHSMEGDAWgBQ62csZcH/meQcE
+NHhNbqz9LMzwjjAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBCwUAA4IEAQBA
+wLsGf3R1+/I2zQE+lsj7RasZtA/Cos92iEGDAPvFbx9e+roG8Gg8KBsEJu/HN0JH
+lMMiQ8dDRHSBMvRBENL5/57oOOhmqc+1u5sazLuANhzAYPZG17Klib7YpEwWoXar
+FDDiJYtCyLW0oNLpCswYopWK9GC0RJNucB0NFvOxehJ2sP2/fxGBQMB09L6mjKjd
+4KsOzyd3dNf0VYS6jB+/1pcKSHKQUo9HRHB5FK04PsYHoh4AtmEHvmYQKcWWidgU
+v26ftlH00ERzuW2juqBbz9mghlNRqXi0IyZ9b4tSj29dxW+WWFzo7j2zEPaD6z2W
+DEHq7zvON+g+q6qLgWeszqMgJzjvWjMj00E/t06PoHPiz/cAnDKEqp+ZzxCIFrxj
+/qneChpogDWyLbawhyyzbZvbirx5znOSbWjPZgydqaNEFViqbxwinBx4Xxabo6XN
+TU020FuMWmgfbIcvtgjKgyKqc97l7JMNNm7LQV9+9W0U5zdIqQKLZ9MMrd2w3xh4
+MAB8NKnwzHReK0TWwUU9HSgFAGdEX6HnyZ3bQ13ijg+sNBRMEi0gBHaqZKDdyoft
+B2u2uasSwioV48dbSIcHl+rTBKxiMh5XQ7ENnaGOJkjsIqTVzizqnPHU8eMBnSbb
+dsXlamROYII44+j3Ku6OGt51w86eGk4VxI3tmaECcJKqTkwUFD8AcNDrkjtmLuxK
+12yjnoM+u1cclfqQ5NOtRc6MJZ27jCobfBBhVdKVDp4X1WNyqGlbsU5adDAzknuI
+GT7MJO7lGjkZX2n54BNPSfrSknYMOVYcZqL0Dbcrhx5IyEmg+iOlOu1HO1tdnZop
+ej4vT+1V2w9Sa4Wo3UCo84jcm5v/4z7jCYh4BRQ60CFb7GLxZoqXIslcGSPool3n
+jl8JWoaLXrJUPfZGXo1iAlayJ5EiMyZl4eB/TBUf6TMm8vLvsPiUT+CEsjLppOdS
+eYppZAZ6H1JrJGs5kKBdOJHGn6Pkp5QsHIswOBd1HqHrBbYbZmDaDLRHduILWLrM
+e0/IfDdeXB/bKfmZoEpT8xRiauw15p0AHLumiK7KISAehfgBqUnxx+YmgGoZ7EWX
+KnMYAfCuC6oJ1DL0gp4Z9yMK1eu+GV1sLxPq9ZruEHW1R+H+4sGyiA5Gso2tgB6/
+XW//wxKclNp5LZR7hqfs/kGuh5asrJrnEbMwWn2+tr/LqfYtYh1D6nHfIXpT0o1d
+rNy/HrsKnRDMWxjm03r4hCViuNVD3Zb9anAF/NSPDVu8ATM5JbJNrCYX4eipz6ZE
+aQBkwIBkTPgtgP4r8v2G+uMYDw8nq7xh72FK107aeTTwc6MgU5jfeFNMr2XJisJd
+lSem1ngKYQSEzjVsTE4c
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIJazCCBVOgAwIBAgIUJ67hHFw8DWW8omAyqE92SPRxENcwDQYJKoZIhvcNAQEL
+BQAwRTELMAkGA1UEBhMCRlIxEzARBgNVBAgMClNvbWUtU3RhdGUxITAfBgNVBAoM
+GEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZDAeFw0yMDA4MDQxODU4NTRaFw0yMDA5
+MDMxODU4NTRaMEUxCzAJBgNVBAYTAkZSMRMwEQYDVQQIDApTb21lLVN0YXRlMSEw
+HwYDVQQKDBhJbnRlcm5ldCBXaWRnaXRzIFB0eSBMdGQwggQiMA0GCSqGSIb3DQEB
+AQUAA4IEDwAwggQKAoIEAQDARiuHkhrnf38Md1nxGDSneJfwv/QksdNNMNTJBdjg
+OVmaRCIAyz43oefTWDQ/TebbSwB+Lg9pud1zadGWhlZRhCgBPP8JDMhIKH4eXIRk
+5IIa8WD08EwvSlqJL0r4gsMtVsxy7BZHAkka/2Ket9pyGt4kG5n75RFdc6BI80/8
+RwJt/MDxPrcVBAT7LnCluxQpyya9mZCabj7l+9a2yU2hgWS6QqfZJ133krkP/MMh
+AEQkSoA4mmBwWk9yPqXmUqiOi7v6iLkIUEh5SgYVPRk9BtU/kDaUdSwuqRrpCZo4
+SsWZWFLxBmLHkSh+G+BWjCVYMQr2ye7e+VMT/20+5xAfq4fj9n5BsPcx3QcVuTof
+RAc/Oygnt4MYnIcUb7zRFvCAvgpUHL7BnEn6nhyXjHJGqGDchsg8m9t3v/Y3ohq+
+qmrSzdeuylE1n3W5aWJlbFmyXegNP45MJ0xicesVrXEWF7YD/ir9mGJ8bQYr4blf
+77PrbF02komC6AzVPKOJa0jR+eW1wErzYlkYgez6ylBWCiHJd1dhEHlK3h2rXdYa
+Gnb45ILCLpEDjNEUrHifLLNXwqJpgZQsJU6BgMgk7ZgBfAKrCfTeg0rkCqCAPeVb
+8eSLf7FBF7YBRJ5P6u8qXc4RtgEu607GaWV0gIMfyVBY52oV+OaNsEdFetrJnp3c
+friG8vJ+7jdq6zjUCGgnfUIHoViJPh3JuFfhA3jT0gQDKW5PeI7dxhrNvlqdYfHI
+fxX7Y1/J6cTQkqJ1cai2f0bwJIJiTAThNbG+zrtjJ7fZ3wJ4udyU/IKrwShqtmTb
+1Ofj0tJDdwOH8i84vIySLUvR9aAb7ClFlnsx6rzwOxG90W7C0LA2M0EHm4FezJm/
+FfujnZwEWr1T9Wki6qE0MHCbdN/TTDws//EKkkE44FC+amL96w0IQl70vpE37j2A
+zlDWvFFID95SIxfmpkwWDvXDKv6gr1GMLeysCl2fgpY05Xidw5cEo9/tEkuWn/dG
+x/D9hnLBGeroA0251ES12jemqDjI2U0tfaeHakjwSsoWElf94Qmuh2iPZ+1zIxQs
+7o6nAWN8X9hfsmrDTTHlww0TEfrjlbzG5Yh+0ZRxmejgiUyOCXck+eh/ZXMXvfWh
+y3CorIIuWgkRjm80PYkdaRDJdZuyP6R7tXfTXNVzAiSQf0Qx9ru2KB2Fs/XZPamH
+KjItAU5Q6msIVvaRMS0muQgV+b6hqSEBzqXqJfAlpVLHXr5FqK+U7EB9y02B6piB
+tAmxqXP8OOCoQql6/vgIcrDFUOo6KtGBW36ef74XE3KCUVaIzVJZSIt6i/Vi0bZj
+bAjsJUQ3qDlHdorv9TRVOhnC1GUz7SuYnpEOyiXmyx3LAgMBAAGjUzBRMB0GA1Ud
+DgQWBBQ62csZcH/meQcENHhNbqz9LMzwjjAfBgNVHSMEGDAWgBQ62csZcH/meQcE
+NHhNbqz9LMzwjjAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBCwUAA4IEAQCa
+SXUWwou6JG/0ubilpl4nBPIhK5sp/7jKBSsOEwn4jROz656Qf5M+mSgbQAjp/4I1
+qwCBktTF14bUDrxKpAga4M1KYilal8kiExd3WSGxbeNpdazbjLGdoDKKIBa6++df
+lCrxs/2Mg4RvTN4GOaj9A9LanWLj+rhIi27WD039dzHpZwYwgKpLwpvGHz+7bJzE
+93BqLqoG2q7/Gj+Y/uVfy9Vn1ikxHGJlS5pggH38F0iGy1QhmVHDp7umNUTHBG3p
+Q9a+wcNycrEkHQ/sniXiEaWzn1CFmVt6VcP2AAlioyfv9Q0hF6DRFeQrgNFYixj8
+kpomkqEtFO5Yj+J2FQZFq8UE7Boqv1oSdVnON+7Hy5gb4x5flKvTx5Sok1dg7W9B
+bYfICLwKCEi4mr1toQLT7e7PicGJXKh0nyHWHhpn9SeSElQniIlZbVrkDHx7zwOq
+fcYbjMha3uyqJbd10Rs0ytlL3uiQcHVs+bc9apSW9QPPFW1r5PC05Wn/9+iwU5Vx
+2s9WNgncvvdete/UjGBSbpXROe0fSuJf4+VYNK1SF9DJFaDim1zrOJWiT5bSxJGi
+MGKnQjEZZEs304dfunuH/I16l+UzTecd7QHgHgCfRN+pJnGyYbpT2lt9CCBD4YZX
+qBSQm1iR/7OjgFuLniOF4GLmatuNgVQdKQd6IcllPVK/E0khUwZ3LNV1RRrkvb0c
+9mNsnvhW81rBoD6+KHVgaiA9v9fSqeH8KDNbaqKImt9f9/hZJE1joy2hJIkkc4vz
+KNQy4aWmRUU37xlvF2yTWt8MuSf6UcM1IC5pfl+cEXNM3kyUs6dps2D66AfAsz7w
+C82xUPJ5blKhEWcskmiGXDL64NnD465WoMHPGVorRlRvdHy2mXQWaePF0OpmGtJh
+7LqRuV5ou9M4/fmPHrfLJ81ZDoGoBvKpibr4V/3wxdWYjIaQ97MePssVnBFtBKxI
+lcPsvunxL6dyxL16FfQ2WPqWe6Fq3UT39Lz+3y6SjtrIcASKJAE77HIPypITSoRI
+7Od5OT7ZxB1hxtvqHz45Wyer/aDMq2YDBDDs45s8qEMSPYozvs7LNprU42SJC/LG
+GjVFiIXjeBzwTUIjZOAjQ8lLFN2eBOPljdDLmqNjuVV7CgWrlIQ9PafPRXLsfC11
+71Xx4Kmb+I3v/upagQXKikNQZ3IFuXmCofRoOZEnpIvIj9+Dp3TgvK1Fpe9ldFhN
+h4Q09rb/zCMvB/yRMkp/JP6+9qySBCHl9kl5W9/bsgLgvdZKR0nDKSwxu/doyPQg
+/lgbeYbaZes520gwORtgSYJzuCt0n1nuYxbxINzo9Dw1hH0xgWEhDNL3gjZonmf5
+gCN9CPQlyEFKI9Q2QVUC
+-----END CERTIFICATE-----
+-----BEGIN RSA PRIVATE KEY-----
+MIIJKQIBAAKCAgEAwJSerjL+tgafYE9i601391BfcpXWpUopcGxdeoHMTziqMGDp
+J5BBXBxU3RRgr6XZGqeS32b6xX4LEDAMBUe2z9CYP/i1uO4/y0xCJlxSyXuHFHMt
+EraVcbChfcHsYmBfGMNyQKWV6u0WBQzKVvgAK3ygDV7hzvl9jymmY5yNo1w8GCGT
+T24pVQ4rL5Rn8daQ2dwXpw703zZj5s90vWGDyavqftT6O/Rt3IGOM35v/eCAidjw
+0bI8emXp7O+aBojn6y0DvtiiUJg2gbtl7EykFu2//Yzv48eXHlzmXN3gdu4Ll861
++Qy+iiwhklojz+Yzknpp67omk8Fe8ZFETU6maaGTUzdpC9Tir0s9guRciK+4f6wd
+mdVkdnfaGGlngaFOmBrIXbaf+Eh8w7NIgyGlqj/Vi4tVsuBd1YlnH244hP/9r/cF
+3gj9zX8+3XWg1IiGIQBLtt5aPRxC2xJpt5DAQg46VvFYUDVpGiAU00TeJbiCmzin
+/igeZP1ZqtvkBp0TqfOHs3WcKIOg7smgdTM6PxW6cufZFny9a4Ekj/Z+HVMlWM4G
+ZAGXpgYcPwFpFaXO/9U6KrbPxq33StcZRvnwpi9x85M5mZiMorXqv/XXakSAm6K9
+dev43qokpjHDjqb4wAPZ4NDQFzqAVkyVzIEDInC7Eh6NRO9bCh119GdoVqMCAwEA
+AQKCAgEAuDVxE2/z9GmhSZ6mIC2Z8xcONazeBH1L5h3BzM0bgSvSnzQT0aRK4LC4
+/D/hvCIH6VchRlBaz04hhvpuhR5z35TIDWj5akt6+huXqtnk1pUyQH1rP9smV/l8
+f65fTjqgvC83ul6paG4gAfSaF1Zh0zcCYcfAdxpu3+IXJnE5imlPkkWLgw78uj8z
+T+/E/a8gH0RH26SS1nBQXxdRs1TzmpS3WVqfgXntHF9QhjELLuzwButcbzjuYKXb
+fKgzzMxoCqykSIkvuaffe3ilpcIps0T3wLBvRpJEGucB8xLJAvnwXOV9axylpcVQ
+140hdFveON6fMrx86hitmKQ7kTcKNZXEnaduehhQgDn6bqeJoAfHs4EX1JJqPZyw
+ibON1LYeUoFw9yWRy35Wr/XMkBoPwAykCWUwmOm2QEbmwhC7ORdjUVVuzrFdn/c8
+beoBfJTJ19GQjqSNcUv0cevfwYMxvimTh6oC0yPn3prRXCzL5Xd6ssSW9ISlIpu1
+etbhkvP1GNDKiAbH5uTZNIYMANbdOybfFHDUDWXHg0ObvXVLOhjH3OzdAORHKugS
+PPygnW4eXKt5R/uDRW/B0aUWLDtuB2Uj/+YQoA6Bm2AD75e4BkW9tRTqNBXOCCtk
+onvyAVJC4NoBZZQBRaOMBa0FIIxrjPLS9zmlyLehLjg5vjwjbAECggEBAPKmWUhO
+0HmwQ4/167CMwn+lIW/v9U1gQ1fvpBClFeJF/Px1AUQP+foajGxuXoMMnwH0fTrE
++ya2PA5ShZCkV+ajlBd1B8ymCRu1lp4ilCzEgjNU5U7nhJESCXBNRFEGFSjmO26a
+66sny4FiV7d6DeiJ36vPSn2BV0GezHedhbZBuaE+vVWahCXESsAqhAgejRB0A1uf
+sSyxXDaJl365J17jdO3YbS7p8LsovsK/Gfn6tTqxMNDnSJWgfFUPLOBznK64L1bJ
+RhW9HVrOeIrgV/l7mWEN/LFmWNMEcvi1E1oF08ZamjcwCmtBdGADaPcx07xQyJnQ
+6czKf52RMMcQK1ECggEBAMstD3JinyrNb71kb3fL0qlx3kO2HRsqbFVvuItd5SF8
+3/y6GuKmAikhmiZTx/DtGDGUIHD/ioiWkgswyoNKbuKcuDHklc9v3zk2A2Kac35c
+KXg//3keNouPLX/NEsu8haVas6GJhlvA+FxgtuNYQeCRMWzrzyCDBNf9KVnV8uV6
+/DBs7W306Q07im1MGMCc3P9Jl1cA0auyxOPD3mSnoDZfdzJmwNjrELSunM2BfzRn
+6h9Yi6adFkIgcEKg1a6/R6CyCTGGYs3QQ7+a+UoiNCXj99td5KWW+WMpSns6I7pw
+0bmiCb87lXrit1nn7Zgkk7W46jO6H/KyGgMQDs7b/bMCggEAC4S4AkWzgcNLQb8z
+w/q6lOKa1rx2UYj7SWZXzG55vncCDl3jhH6ZqDSwa8lFdUUZGzem5i5JmcnWyftC
+2d3jSlCDjCWDDETpc8ZH8xPDIujlIVirVfaJhXVsu0b/sjTutjVPpu11uHc4Itkv
+Psdtd5dr5bT+XTzRjoziOd9hZIh0LmJTDIg8M6rAvaSHBfelTJ2lNyk6eNume/RG
+G77gTpHPkCbQ8AQs6EWD4Ky/p+0twy58Gb6Q5IFsxYDl6XWzf1vA64a8a/XBOflJ
+IZaKto4WdtP6JdWs679vUb0OwRw7tFPCtFH0fKjrxE8FIY7c9TiEfUC8iHsoWat1
+vE1ZMQKCAQBBLl6TQsJvd+LOLsd39kLKK4az0Fv8GBsTOblJtMDKgoZVTNtNNRbi
+XS5X927ggx/M4AmcVs75zNxjjK6beiiiuMZ68yuAlhJWB1cErio5MpX3RwjNsXys
+Py3In2DVTdDOYI/aVgVtsDW9ZSWnP+w6gDoMCIa1lnLaXBSFBpdbOZ5oZrmxGe4e
+WaiqMcyLFofruo870T5yx/JUY8UYI5LJfsz9tWtO6/K7FH2njFDj3iaFEeITfLfk
+VQXOykxjOGhhTFyYr9VI0/S4Jp6tQtXaBg3BKZkt6oZtYpTLfbZynLkbxbk8yX/G
+Ia/Svw5BThK5LO6t05tmP+8KZn9pq7fzAoIBAQC/JwXRuUelycI7+tIXvcDX1ydu
+xASH6fyYcB1378KWNzWddcEb4NscfdRK4ekMO+oPyd40DNFl9ESXSUrxoILUIERV
+DywvQPMh+2sEblzDXvKO70BmSBSwq0tgfLSXpnu6nv+EHMRARA/qTk9R+Gl/REF/
+mH4ojpv2jHE50ysWFWvxK6whSG1/bMXBsT7YocR1TLBxZpaB1mVxUJ11ESKDMy+A
+lf79rIhGfU41mjzr4fkuYbERQy0yM3+lfG5qShAFAl52Fa2eFVBFso090+1TMhlR
+1ZmG9ZnE31uXoKU6OGcAGyFmvwhBIkjczH0z74CIYkD9gZJ1lW4RohgiZUja
+-----END RSA PRIVATE KEY-----
diff --git a/reg-tests/ssl/client2_expired.pem b/reg-tests/ssl/client2_expired.pem
new file mode 100644
index 0000000..9d0d2e5
--- /dev/null
+++ b/reg-tests/ssl/client2_expired.pem
@@ -0,0 +1,81 @@
+-----BEGIN CERTIFICATE-----
+MIIFKzCCAxMCAQMwDQYJKoZIhvcNAQELBQAwdDELMAkGA1UEBhMCRlIxEzARBgNV
+BAgMClNvbWUtU3RhdGUxHTAbBgNVBAoMFEhBUHJveHkgVGVjaG5vbG9naWVzMTEw
+LwYDVQQDDChIQVByb3h5IFRlY2hub2xvZ2llcyBDQSBUZXN0IENsaWVudCBBdXRo
+MB4XDTE4MDQyOTE5MDEwMVoXDTE5MDQyOTE5MDEwMVowQzELMAkGA1UEBhMCRlIx
+IjAgBgNVBAoMGUhBUHJveHkgVGVjaG5vbG9naWVzIFRlc3QxEDAOBgNVBAMMB2Ns
+aWVudDIwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQD8JQiW54JVNyFa
+lPvw6skL0W5790n5R5kx10H4RUT3sIErV0K6Hvz/KwVG/jYb8yA7YSHZKYaklNis
+Zjpxj7tnKop7QwyWViXlbW1hRC+imsyO8PLrrc6YkLujKBmB1/z4523/opgNE0+m
+ROEjLIEB/nPHSPy57qdS3RdbCkQoBT/1fG8yyKbhcyHbL1Aj3Hk/553ZSgOo/Xl7
+HJ8wM+MzgkoSvPFGHn4WGckBEtiz9Fvt7v8RQJhMePjOXmDLdoiaRmeyhu0a8drq
+fg55s4LFbM58vW/pXAPyb6KzPFC1htFY+yBk2l5s4JpggNuvXEJIiP+9COY4D/oy
+79mMxZXWY/6VY5NQu54LN6vt24q9pBtaF6OjsaXUz4ZW5pj8Qpej1uXS8N69jgy1
+3CR4kFDb7pa1roe9zXq14h64kpoLA86Y17B3rRAIkIDGf/LdwL1il92Jdcl+K4g4
+YycbWCzgNb4whgokfYGfwsVV01SG1+19h+Nsme5hYROQmYbCbC94lAWJD/U/7EUN
+6KN4A7WgCxTt7Vvz2GSEE+HU/WVO+tfgxOPs40M5R3D2LKC0owEyXqkFxAANstd3
+ky6KZfkVQP0U+iz8m54o5HKvoF6EAzEHR/l2kPNCBj/hhyYGi44SwjXEOdzOcJVM
+buA7Hp2U4eOhoAJ/VoWJhY2gntcQJQIDAQABMA0GCSqGSIb3DQEBCwUAA4ICAQCl
+h7ITBQcBe/Rhc7Q7YE/1Sr9duVrUAUgS5bO1xHzqlBeUxPXqQhBMYBYLnvoVdJUz
+Hk/7JgvuTgQWUHHabSmKiQ5ug/8sRJSJpOavWelJW+gKaBbMUDZ2xiVYsVXJSmCk
+RpvZV+Gb4Q3JRPxkz7+KddB8FnvPYg16LyfoRKk5aVPD4vjT3ePgFZRRLY2w6BH3
+tQFB/xjCTLyX6Bhu+fC37S2N/+a+i7/vEpcOcjKpqkE/Kvb9W5Usjz9kIy5ceq6h
+i0t6FfYVcpwO6ZCSB6DT9OnzbdzPbYILdYhpCua5i64YS4cSaW9ltFvsTMDy1Nvm
+VbRh3kEtrkywXa5XmYbQE1Zm56jc7MIiyQRLBS60/SA5IzFQFZQh/NDzysLlvDMf
+vdExBQ5HJGKje+GN9deYoN3WXKpK+Qik6YZ3cVKMhBD6hYTM/da/4A6XGJEKvARy
+21rreRz/D3YMwac1/b5DPViU/6pXMGKud9/ZtlkEi7NHdzVJHMl/htcVQGlTnZdK
+Q6Yeug24JbnBZxIbhcp5aaJ+rzQeswL2SkWpF4td87Ku0gFEBShxG4tQNtfvewML
+XFybPNAkKOhR84K2rdMKwjva7vxm3tI22wll6LTVP8YUd1SS3/t9yL4jWxHX4Ds8
+gxdxuGWjJe6zm9R6OX63sv0meKNUsesmKQTpdu/gLw==
+-----END CERTIFICATE-----
+-----BEGIN RSA PRIVATE KEY-----
+MIIJKQIBAAKCAgEA/CUIlueCVTchWpT78OrJC9Fue/dJ+UeZMddB+EVE97CBK1dC
+uh78/ysFRv42G/MgO2Eh2SmGpJTYrGY6cY+7ZyqKe0MMllYl5W1tYUQvoprMjvDy
+663OmJC7oygZgdf8+Odt/6KYDRNPpkThIyyBAf5zx0j8ue6nUt0XWwpEKAU/9Xxv
+Msim4XMh2y9QI9x5P+ed2UoDqP15exyfMDPjM4JKErzxRh5+FhnJARLYs/Rb7e7/
+EUCYTHj4zl5gy3aImkZnsobtGvHa6n4OebOCxWzOfL1v6VwD8m+iszxQtYbRWPsg
+ZNpebOCaYIDbr1xCSIj/vQjmOA/6Mu/ZjMWV1mP+lWOTULueCzer7duKvaQbWhej
+o7Gl1M+GVuaY/EKXo9bl0vDevY4MtdwkeJBQ2+6Wta6Hvc16teIeuJKaCwPOmNew
+d60QCJCAxn/y3cC9YpfdiXXJfiuIOGMnG1gs4DW+MIYKJH2Bn8LFVdNUhtftfYfj
+bJnuYWETkJmGwmwveJQFiQ/1P+xFDeijeAO1oAsU7e1b89hkhBPh1P1lTvrX4MTj
+7ONDOUdw9iygtKMBMl6pBcQADbLXd5MuimX5FUD9FPos/JueKORyr6BehAMxB0f5
+dpDzQgY/4YcmBouOEsI1xDncznCVTG7gOx6dlOHjoaACf1aFiYWNoJ7XECUCAwEA
+AQKCAgEApmQJ9wtvhqyK5ivK1oEZiyL5tfTKbCZDghB7CEst6AYiN2euMQSiEAFj
+yiWXr3oRmx3OKHCu2Y0gLySHDMm88aexGwZ0GAFhoLVFqRpGFRfyRaHbrItV+ngI
+WvLrYjQWTGrsu/WgQYCs3xw1NfD4cUhpPul7XXeQE66y6vEraP2N54HmH60p8zz2
+6p2eVQv5N6KxF+Mv5yTeNc/9fOHA3QzttM/aqFsW+Z6qdnrpZlerEqjUyZ3G4zAx
+gH3ngl0GaEhtxfIkJdPUk0n8Y3OCqKXU3Zxlbam7MRFaXM1AtYnyPLX7+pHgHhlZ
+xrVCQ8auNw+xNB3bTsO8aEC/X5ZD+ZdO/NCbhzEXPdx5XF6LDlB9uthC/i5G79DB
+5DK3GsrPjFjmeY3gmvKm5ikiLNiAvMqghIrKKdLhMJe/AfUTkwVh0Hh5St1o9zPT
+ZZP0sNIw+da5/qW2iB1uBdP8h6sdrZVOsfkY/fynny+wEkkP40FAVRHH42p/evRY
+qLu0/4MVUjHEgkC0G2ZLFw3n2Eq5omwH2/4u2xzN8W83+kMdBj8gB3qNFjFXLba0
+Z9izOc8xcFsvnmjWIIZ1RZsby0DqefVSfYuc1ON4qOA4hiZZNywS8Uk52i/+7MDi
+Q8eGOdUzFPmM6nTPwMLFspzzLTiflTvGDeEITJO5/DFa2ZWf3AECggEBAP/QNzTx
+vxOw3nuEvoa/4yp7TBSwvw1V8KC9298IQ3wIqNs699wNqxADGz3P3Vb37psPyBmk
+oOclX7we8hUpP0F7OIQo5oENcRu20fMY9Lvbygr8T9j6rxUj5CCqO/HqiD07J59w
+4/DE3kuzG6wLV+Tbuo+kV1ywNT3NnjzjFgaZYVjp1P71uUQfsg9ccX8N6jaF5LTn
+UTxMAxxvLJ+7qk+4OmFDLZ5y3LiCyezdsCUQeEcHf3VQKNdafkGSmMlpyIpa7D1b
+CLfJcR/UOYMezNzuHlLc18pxATOzbCLLZFmGfhdgI4SnNFpdqGk/tUAyiQrN8tTv
+JeSDi/usSQaZXlECggEBAPxUIfMCaSPpG8KM8gHgp7yz+KqY+cErD4Oh/8j9do/x
+0JkO2sV8EMSuD/alKzbN9O40KCPPw8unnlvkKE0C4of0ugzRg/e1L8SpOYzKWX5J
+zMjO6g7m+QU2kKwrcrjosmaWH32ANPY16fUiUOyl1Md7U0trYFRt9+4eqNdlej9j
+xMql9KCii1SrGrrh1sTzluvkIedqKYB7S9nv/z5diTm4F+IiBXU536YfJXJ16LpM
+aJE8+yECnV8x5Hr9LEGayjRtxIvLObBrCyeVDCQSXT0fB9sAL+gr5baFO6fq3kS0
+pk2hCkx+r0lqwIOOd8guKSIHIpTFOmd0x2RtoxNOu5UCggEBAKL76yCdYYtPJgD2
+i1lZCof3dHq+PYtmlOAk9uA76Jsu/T5obKDUSIf0IrgYJsKRRoGe5XOJE0cR5dP6
+t2xKElZLBrAVSv8wLD9nFI7Y+Jx0JV8ocEsjNMe4TVGOhJDWR6UTemQ4TdIJ7EO4
+wsmzlHVolY2NPGf+kH1m8wmB+XeM45v9p0omDrrbotvsnxc/K1k/p36m3ngXSegk
+4P6IV7NhAjkTzw3jysL3+WUjvWVv/+HpYgjBYLQMoOJwX038StvzoA5bYMuP2bZY
+xafHyOh+Ae3zbL07kHN7PktQ4Qe1C8Mi6p5K1a05fOJJx9Y2HGA45R1LnQ3hzh80
+HnbI4nECggEAPGu0+WixXnz6PbrcVGDEKaZ6u/cHjx7NhzqqcilnU46W4Z+x+Sn9
+Jet8PRZN48CrjsKEfhbJDqIjhGN81vwC3IVYa6tby1vihVf0ROdLSLdJRyhs2Yar
+SHlJaUC6JtbpqTD3d2jUxcQhMqa19AS9j8rTJjMfDPiMLsO+sF1HSZiNTe0xR6nE
+bVDPhMKBWAXwNKobCDveljpv7k7OstNZAa44Ydi9r9Vc3X2FzQO456tWOrj8dWoX
+3uymhmDLUSZMlwNV1heix8DKGf9Rue1/0Bv3GJTR4+lnBy6eG1ZdRNxxGhOe0LRh
+KtZaJOZfflq3VMOanz8e/hjzifPK4duvhQKCAQB8Mo5dWvs5fCpWAQrNqj+ua8gY
+a8ftp7R+idGGgOLSCUArjY7sS1RvZzCB28I3/5QpAuEEhaLFTABNonhbD5MdB5SL
+xVxfXqcW/WfXkGF+QqB1AMXpE4zLeGSRERWpWJSaD7B2I8UdS/Leo3lVchvA66qx
+SG+Pojcp5DsoZP3hrh54fsPdGorzezoTIwfQtsy3P8DnzPohjzbqDKmloglDbo4Q
+rBuJVs/Gc7UwZGvka+roi6VUaVdRa5PAluCE4GS9HSwf31k74jw3TfYVIlQgL5Yi
+kIHsC0yXfJ0FPXiw62CMEr51ssX3QNGTIKfos24smCjK09eInNZpIZm+p+SL
+-----END RSA PRIVATE KEY-----
diff --git a/reg-tests/ssl/client3_revoked.pem b/reg-tests/ssl/client3_revoked.pem
new file mode 100644
index 0000000..0aba2ce
--- /dev/null
+++ b/reg-tests/ssl/client3_revoked.pem
@@ -0,0 +1,81 @@
+-----BEGIN CERTIFICATE-----
+MIIFLTCCAxUCAQMwDQYJKoZIhvcNAQELBQAwdDELMAkGA1UEBhMCRlIxEzARBgNV
+BAgMClNvbWUtU3RhdGUxHTAbBgNVBAoMFEhBUHJveHkgVGVjaG5vbG9naWVzMTEw
+LwYDVQQDDChIQVByb3h5IFRlY2hub2xvZ2llcyBDQSBUZXN0IENsaWVudCBBdXRo
+MCAXDTIwMDQyODE5MjkxN1oYDzIwNTAwNDIxMTkyOTE3WjBDMQswCQYDVQQGEwJG
+UjEiMCAGA1UECgwZSEFQcm94eSBUZWNobm9sb2dpZXMgVGVzdDEQMA4GA1UEAwwH
+Y2xpZW50MzCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAM+8CxcAKMMh
+BILdtSx27Zjlri9ygpI55eW94t9rYb967glYbF+ZGZ2LiqXHzIpabasquXD/oH9l
+fQpaeb498ZUblbVb0OPyVkSSBpt4y0wPBYYiUGU7T94drdMwEySIyMEIRNlfXePB
+EQJLbdksdFBu4QCQEzzdL3aMBCogFfN85zJ6WJhDHnkbtKdUpKJ5irBB/3Hs+pMq
+I3Y4cdeWmFkJ+xQpu9oh0igAhkbSPYXu+asSCzExO4G4ttBnQQh4RYUUe+IqO8z0
+QQ/La+m+OBXWR1ti+/3ImeZWdRlA7xpTNYTOxAg0eO1FuUhwvw6Fpvo5KV7wre4W
+Qkmsjc5vpxubWWrdfSK/YB8jZJsdx2zgk8thdhj31Zhv0PUP64fhX03DKFSF+qNG
+0POpjPthu+f96umHfIFNNKiLPyWBpl0+ppI1FB8uW9xXRZw00iXl89bXNa1lbQqr
+c3cj893HUnEpx3H0Q3piEsKu0mchGXiVVJsoZgbLn6yOXDnkWBQhAFvvRcfrAzki
+w3f/gU+BiT05csRCXtmbL28RaK70fBxD6fDhGRHyMt/0MFhYnJRxmIB3GniQAgC0
+lyqyMOplkHVeHO0LhjrLTZwbbD083A/KRzpsrVLHd8sjCEjojae0tPDj65u0xg1R
+JrszrjO8ZNLQoXr2rl6hjxeLC2Yn08W1AgMBAAEwDQYJKoZIhvcNAQELBQADggIB
+AIzgQBfDxEdowxYsdZ4cb0wySg+xB11XRLeR9k6c1kExDkpTKRyAy+6CNS0X2mAz
+3v/jVoh3G4crlBkL7UJn6ycunuJ2SdiUexsJAOveVgPPml7YnRRfPW9ddM+gn0y/
+TtTB0D52XaXczeIqQKFD67OtjbVvObbrO1cITkh9q+mMtTO8T/V1gBRd1VH1YFdi
+nPqTYYA9QqJ2zAaufhZVCkpJJn5onpT5t+GBpe9O0lKlkQrduLzjr2rrfJCg2Uuw
+xBXwpvFdOK4BY5tDqVLb7BOLkEUUltWKTYd4IFjonRE6OSxtY+1L/RnEYMfSSVIf
+GsTkKugTuVSmmyFmh5H10YjwMMD3j36hjxJcGJDzZIuOQMY+2UKI61eF7StqZTXE
+wRj+JMHHRHIEw0181lHxGSCArWyLEoSn57NSAqJEzdhq7wb6eZwqZzRo8EJUSYIK
+3fLnfjSLHS/XaH9mCbx7VpYfC310UGzQ1QXSOIp1LtKtxUbT1YL7RTwa7GVfvQ0e
+9nsY9/qd9Oo2VJtxKQRsfro6Z/MdP97lUpTaigQEUpB7KICl1ks56oQrunRLXkO7
+EoDNlnDGkp8DghO+tPqx44OogbXBFCRTO7ncYxSE83WcG0UMUvVfGoMJKqF8V0n3
+LmKLNCvzLQ2Gt21Cp/zNiwHSjMNIIqybjAe+nVT4+sSI
+-----END CERTIFICATE-----
+-----BEGIN RSA PRIVATE KEY-----
+MIIJKgIBAAKCAgEAz7wLFwAowyEEgt21LHbtmOWuL3KCkjnl5b3i32thv3ruCVhs
+X5kZnYuKpcfMilptqyq5cP+gf2V9Clp5vj3xlRuVtVvQ4/JWRJIGm3jLTA8FhiJQ
+ZTtP3h2t0zATJIjIwQhE2V9d48ERAktt2Sx0UG7hAJATPN0vdowEKiAV83znMnpY
+mEMeeRu0p1SkonmKsEH/cez6kyojdjhx15aYWQn7FCm72iHSKACGRtI9he75qxIL
+MTE7gbi20GdBCHhFhRR74io7zPRBD8tr6b44FdZHW2L7/ciZ5lZ1GUDvGlM1hM7E
+CDR47UW5SHC/DoWm+jkpXvCt7hZCSayNzm+nG5tZat19Ir9gHyNkmx3HbOCTy2F2
+GPfVmG/Q9Q/rh+FfTcMoVIX6o0bQ86mM+2G75/3q6Yd8gU00qIs/JYGmXT6mkjUU
+Hy5b3FdFnDTSJeXz1tc1rWVtCqtzdyPz3cdScSnHcfRDemISwq7SZyEZeJVUmyhm
+BsufrI5cOeRYFCEAW+9Fx+sDOSLDd/+BT4GJPTlyxEJe2ZsvbxForvR8HEPp8OEZ
+EfIy3/QwWFiclHGYgHcaeJACALSXKrIw6mWQdV4c7QuGOstNnBtsPTzcD8pHOmyt
+Usd3yyMISOiNp7S08OPrm7TGDVEmuzOuM7xk0tChevauXqGPF4sLZifTxbUCAwEA
+AQKCAgEAr48B6ExQJqhOwbJRHOTdY6woCx1BUAbyTbiudZawozxm0ysRW7FtvoFh
+iT1TlVFbAZ833VGL+F5y0D8qSCbddeA8I2sXHK1/TrACOX5agUropjV1sDfyBYsb
+jjFA3E1lLA2q8fHwzTwq/b91dGZnXlp2eR1JxNRA+nTWSCuZoY9bbIONQBDpPfy2
+LfwQrub82HzOPx/BnIGuOoj1XPd+hTE0KhQjF/QhQYE/+bZQHFKcWYEESGyNF9Jd
+Xb8FbP0H87IeCEMdCtcZ7RlDd+U0TPAsmgULZa0Us1850z/XUm4j+rsrXKvLzupv
+7dKrBMDbHvkUB5Jry5ywJMdZiK8/j0QW5bw9Hw8tEyXxT2gzXFze4DrEHC1cPLod
+3bcMOnp8axtdfm23tlFQuq4fGsERABFWByylF2Pu9KQ1AgH8/53IcVnNjd7Z4ZlA
+eBrZynEDg67sggFNRa6EnjAYFS0Zqgmfo/160awcGSLPLSkE5FhtByQOEzyAumXn
+UmDO4zlP3dc54WzCnxdS8GpbbmjniXGSDe9D16D0uWQeo+LCoDDTExT3wDjY3tDt
+R4VjIBVs1vWXH2oG/oC/ulgXwcSKSSvtqftcGnPj9EE6exNzanTQwCFHosYWl5Tn
+MkRsxNRFITKksNH5sXAkzwogWlyG4PK4ink0IixpYh2N0WgncDkCggEBAPqZ7z45
+F/YusR+eJdMrloECYhP1BId7zUzZglAUof/KJH86TVnQ/wGBcl6x9SXGzy5Mix8S
+q0qUILJ3nXkyRqFjlch9c8NlMR5P/IPyQWupSGNFPTIBvNefCfqosEFD9635P4ND
+sN19gwrx9IqMYgyPzw05G2CJPafemjF5NLIHKyhUnjGUij+D/WZYCKvHIrq085ac
+0dLncRvlnzloa8PCGXDXcTuFacVGcj6QZJvn7ZrprMOwpwh7RT02U9cqlLYKfjEg
+9xnTSmxbb8esRtWDlJPoj3+P732Cj7BynSpvLFSsvHAAFS4j/g3XIjdY/yocd0xG
+UfYDEcilgPiaEnMCggEBANQ1st6aaf/7oH4hJ/mgOc2RhGYxpjHgXs+ux77llHks
+o4VlAILV9CLQyQ9/3PU/4vidw/rseZwPmONDlBeU4319MQzjZeihaqfaTQXnQRBj
+xX2sJ/7EeBoq6Xlkvc+lVbiWcA7i97dYEumV/q7ozYRyAhP/D/VAdmT9ZWK3qJ+l
+/L7h43ch9PWGDOjiUIfWx/xAodyDUEM1iforv/S7D4+3j9BriRnhpZRLZT5bG24h
+vVN344ETxaoVWwjw9yzmJHF/4ooqJgdTsjJ8ujPPLvJKXAD7ZVerhcUpZWmyMACL
+Dj8IuNZeB7IN9LOCr1xPenR0jdltz7+LU6WTK0F4NTcCggEBALvxvOL+sMDaTc63
+rgiM1ShWIDZ1ePsfV15+dmQWxVRwRyUAFcj5nHaFnb/1WTUGwJUppOEeAEdDhq4y
+VXDyytP5OvmNVMfDWa4xMOHIS1YyNG73G6kocneH+FT8NIwOLHBW0VJh7wB+RExu
+IAfUtyhSpmd1X9nrs8j1gtD95Q5rn+t0YtwuWey+0cny1jX2eE5srY9Ud2zkVQkm
+El9cuA2twaTGf06zhRsF9WKEql/e9m1LOV3eW7dZtBjvaLujXLqWbgPshEXjGBri
+DJhE1S8GquSu8wgpa+TGiXs8yjBsBmRO1FhvR3M+XSgGI8w8u8naZYJX7tpBfRHt
+RiesbY8CggEBAKMcm14rBmryOIx6Y8Wl+Igf38rFQt93fKjZyULcKGFzhEUWO2xV
+lA/mt4SoXWhNMOK0MV1/woHII1YcLYpqsOlV/wvPBhfnapmWXDm7ZPF6HuTYHO3g
+ighjD451dshSZy84wu1OW+WbVv4gguBipQW2tA42sUdrwxUhCHr2fDAqX7lA25xI
+h4bpAKdIvWmMF6+25jMe4+SlFGcslaFA31cyWuJypbE1FhaEVU+2q8DdQi8UKdSf
+JAH15EFdJkBmrYBoMfLNLunW1VOlN2J3T7iAm3NNyLm4Z1wC06aIhgkE2XBt/dUX
+9YZQ39PTEYM8u/0jUZzcoSCzsRnFoyvxf8UCggEAblaNsi8/Nx8FId/aW3klrIxY
+UfSVXL3InIkrr2hJs7GYMpdWRwQZpo+Nv45cBnGoRwWWIsYkcWEbrs1vxvkNg0Ro
+pTa0Pt5gb1u7BfvpSqC/VyFBd66BcTQeJqTUHzWPKhMyCrP/eRYfFFQtpy5EZ+/O
+cjEVO1Tv5VhqM1PtANHdsS6o0jKMWFQ/Ofu4sOp6hQl4E1oOAzjLdtyBgJzSk1Jg
+M1lKPzSpYgRWcMB9CFTE2JO/4b+iMhxQjvGtD5nkeA6ZD7DSDItH6lhAQsho1pMi
+uoFlxDSFYHt0KcFp9zMrB2810mmNvjiEaqVXkA7QRH3XCA0BTkIXxzQe5QgTNg==
+-----END RSA PRIVATE KEY-----
diff --git a/reg-tests/ssl/common.4096.dh b/reg-tests/ssl/common.4096.dh
new file mode 100644
index 0000000..8db27ac
--- /dev/null
+++ b/reg-tests/ssl/common.4096.dh
@@ -0,0 +1,13 @@
+-----BEGIN DH PARAMETERS-----
+MIICCAKCAgEAvpZPDLMGhdop4RgoSzRJfr681WeWplvSvBsIqyKB8D3uNPZchSg7
+Aci6+yupRDtVeaLMmJgqjiTb9wYXhJNxyuVMPfnFrYwGSm32OUcMhECD6N2elOj5
+WS8fvoYIMvnENyDsutmBObXISKLxeaY+PJEbwyEeJmdzEV6oM0qM/2bEJcgQ00p2
+V1Nge6OZpjahlRCpKHsQAIgtUpchZVTKZCrO9WbYUPVYcUIAmyNLmTlPmM08EcsN
+dJqkhse0xZN2isnGJybe1ABIW8D31aWqfWhjmuNqe9JTqz8BS00WOeKGYiEENIIF
+lHmU1uKYm+9ii1stT7WyrtAMRjbQSVsye9CEkne5jsQuhF2gzLMFhsEwE5svDBn9
+CeJC7V0WHef0kHNUSm9yzRQWFp4Y9sJI7Uq3Po1xOBBCDUQnDJTFsNiJSF84gYGo
+fvjMsvf3mLNkDE12g3trHUMjrns4MLpla21bA3FKEqyfUuR/yYQRtLOkR7sxF4+J
+lporo7jHhgPK57euhG8YLOgSEa0LIYXsNSHI7yDpkXFmwtPBQRE5ZOaN4mw1fsHp
+/+adsUAh30KDeoXXyZg9dpZFnq/GZlAHdiO48oVsYnloNNYwrPH9bU53u5oj73bo
+CTCZOb7V2BvfvnfwNmzwuofXMFXBvNqDSKcM3rkMSi3OomuHBZ/QQwsCAQI=
+-----END DH PARAMETERS-----
diff --git a/reg-tests/ssl/common.crt b/reg-tests/ssl/common.crt
new file mode 100644
index 0000000..1f0c275
--- /dev/null
+++ b/reg-tests/ssl/common.crt
@@ -0,0 +1,90 @@
+-----BEGIN CERTIFICATE-----
+MIIGeTCCBGGgAwIBAgIBAjANBgkqhkiG9w0BAQsFADB+MQswCQYDVQQGEwJGUjEW
+MBQGA1UECBMNSWxlLWRlLUZyYW5jZTEOMAwGA1UEBxMFUGFyaXMxEDAOBgNVBAoT
+B296b24uaW8xFTATBgNVBAMTDE96b24gVGVzdCBDQTEeMBwGCSqGSIb3DQEJARYP
+c3VwcG9ydEBvem9uLmlvMB4XDTE2MDExNzIzMDIzOFoXDTE4MDExNjIzMDIzOFow
+gb4xCzAJBgNVBAYTAkZSMRYwFAYDVQQIEw1JbGUtZGUtRnJhbmNlMRowGAYDVQQH
+ExFOZXVpbGx5LXN1ci1TZWluZTEYMBYGA1UEChMPVE9BRCBDb25zdWx0aW5nMRcw
+FQYDVQQLEw5lUGFyYXBoZXIgVGVhbTEWMBQGA1UEAxMNd3d3LnRlc3QxLmNvbTEw
+MC4GCSqGSIb3DQEJARYhYXJuYXVsdC5taWNoZWxAdG9hZC1jb25zdWx0aW5nLmZy
+MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAnb0BDF7FsqzslakNg7u/
+n/JQkq6nheuKwvyTqECfpc9y7uSBe/vrEFqBaDSLQagJxuZdL5geFeVtRbdAoB97
+N1/LZa6vecjjgGSP0Aag/gS/ocnMRIyvlVWWT9MrD46OG3qZY1ORU1ltrVL0NKtt
+JP8xME7j3bTwIDElx/hNI0n7L+ySkAe2xb/7CbZRfoOhjTVAcGv4aSLVc/Hi8k6V
+kIzdOEtH6TcghXmuGcuqvLNH9BuosyngKTcQ8zg6J+e64aVvC+e7vi94uil9Qu+J
+Hm0pkDzAZ2WluNsuXlrJToPirWyj6/YdN6xgSI1hbZkBmUPAebgYuxBt6huvfyQd
+3wIDAQABo4IBvzCCAbswCwYDVR0PBAQDAgOoMBMGA1UdJQQMMAoGCCsGAQUFBwMB
+MB0GA1UdDgQWBBTIihFNVNgOseQnsWEcAQxAbIKE4TCBsgYDVR0jBIGqMIGngBRv
+G9At9gzk2MW5Z7JVey1LtPIZ8KGBg6SBgDB+MQswCQYDVQQGEwJGUjEWMBQGA1UE
+CBMNSWxlLWRlLUZyYW5jZTEOMAwGA1UEBxMFUGFyaXMxEDAOBgNVBAoTB296b24u
+aW8xFTATBgNVBAMTDE96b24gVGVzdCBDQTEeMBwGCSqGSIb3DQEJARYPc3VwcG9y
+dEBvem9uLmlvggkA15FtIaGcrk8wDAYDVR0TAQH/BAIwADAaBgNVHREEEzARgg9j
+b21tb25OYW1lOmNvcHkwCQYDVR0SBAIwADBIBgNVHR8EQTA/MD2gO6A5hjdodHRw
+Oi8vb3BlbnNzbGNhLnRvYWQtY29uc3VsdGluZy5jb20vb3BlbnZwbi9MYXRlc3Qu
+Y3JsMBEGCWCGSAGG+EIBAQQEAwIGQDAxBglghkgBhvhCAQ0EJBYiVE9BRC1Db25z
+dWx0aW5nIHNlcnZlciBjZXJ0aWZpY2F0ZTANBgkqhkiG9w0BAQsFAAOCAgEAewDa
+9BukGNJMex8gsXmmdaczTr8yh9Uvw4NJcZS38I+26o//2g+d6i7wxcQg8hIm62Hj
+0TblGU3+RsJo4uzcWxxA5YUYlVszbHNBRpQengEE5pjwHvoXVMNES6Bt8xP04+Vj
+0qVnA8gUaDMk9lN5anK7tF/mbHOIJwHJZYCa2t3y95dIOVEXFwOIzzbSbaprjkLN
+w0BgR5paJz7NZWNqo4sZHUUz94uH2bPEd01SqHO0dJwEVxadgxuPnD05I9gqGpGX
+Zf3Rn7EQylvUtX9mpPaulQPXc3emefewLUSSAdnZrVikZK2J/B4lSi9FpUwl4iQH
+pZoE0QLQHtB1SBKacnOAddGSTLSdFvpzjErjjWSpMukF0vutmrP86GG3xtshWVhI
+u+yLfDJVm/pXfaeDtWMXpxIT/U1i0avpk5MZtFMRC0MTaxEWBTnnJm+/yiaAXQYg
+E1ZIP0mkZkiUojIawTR7JTjHGhIraP9UVPNceVy0DLfETHEou3vhwBn7PFOz7piJ
+wjp3A47DStJD4fapaX6B1fqM+n34CMD9ZAiJFgQEIQfObAWC9hyr4m+pqkp1Qfuw
+vsAP/ZoS1CBirJfm3i+Gshh+VeH+TAmO/NBBYCfzBdgkNz4tJCkOc7CUT/NQTR/L
+N2OskR/Fkge149RJi7hHvE3gk/mtGtNmHJPuQ+s=
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIJazCCBVOgAwIBAgIUWHoc5e2FUECgyCvyVf8wCtt8gTYwDQYJKoZIhvcNAQEL
+BQAwRTELMAkGA1UEBhMCRlIxEzARBgNVBAgMClNvbWUtU3RhdGUxITAfBgNVBAoM
+GEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZDAeFw0yMDA4MDQxODU4MTZaFw0yMDA5
+MDMxODU4MTZaMEUxCzAJBgNVBAYTAkZSMRMwEQYDVQQIDApTb21lLVN0YXRlMSEw
+HwYDVQQKDBhJbnRlcm5ldCBXaWRnaXRzIFB0eSBMdGQwggQiMA0GCSqGSIb3DQEB
+AQUAA4IEDwAwggQKAoIEAQDARiuHkhrnf38Md1nxGDSneJfwv/QksdNNMNTJBdjg
+OVmaRCIAyz43oefTWDQ/TebbSwB+Lg9pud1zadGWhlZRhCgBPP8JDMhIKH4eXIRk
+5IIa8WD08EwvSlqJL0r4gsMtVsxy7BZHAkka/2Ket9pyGt4kG5n75RFdc6BI80/8
+RwJt/MDxPrcVBAT7LnCluxQpyya9mZCabj7l+9a2yU2hgWS6QqfZJ133krkP/MMh
+AEQkSoA4mmBwWk9yPqXmUqiOi7v6iLkIUEh5SgYVPRk9BtU/kDaUdSwuqRrpCZo4
+SsWZWFLxBmLHkSh+G+BWjCVYMQr2ye7e+VMT/20+5xAfq4fj9n5BsPcx3QcVuTof
+RAc/Oygnt4MYnIcUb7zRFvCAvgpUHL7BnEn6nhyXjHJGqGDchsg8m9t3v/Y3ohq+
+qmrSzdeuylE1n3W5aWJlbFmyXegNP45MJ0xicesVrXEWF7YD/ir9mGJ8bQYr4blf
+77PrbF02komC6AzVPKOJa0jR+eW1wErzYlkYgez6ylBWCiHJd1dhEHlK3h2rXdYa
+Gnb45ILCLpEDjNEUrHifLLNXwqJpgZQsJU6BgMgk7ZgBfAKrCfTeg0rkCqCAPeVb
+8eSLf7FBF7YBRJ5P6u8qXc4RtgEu607GaWV0gIMfyVBY52oV+OaNsEdFetrJnp3c
+friG8vJ+7jdq6zjUCGgnfUIHoViJPh3JuFfhA3jT0gQDKW5PeI7dxhrNvlqdYfHI
+fxX7Y1/J6cTQkqJ1cai2f0bwJIJiTAThNbG+zrtjJ7fZ3wJ4udyU/IKrwShqtmTb
+1Ofj0tJDdwOH8i84vIySLUvR9aAb7ClFlnsx6rzwOxG90W7C0LA2M0EHm4FezJm/
+FfujnZwEWr1T9Wki6qE0MHCbdN/TTDws//EKkkE44FC+amL96w0IQl70vpE37j2A
+zlDWvFFID95SIxfmpkwWDvXDKv6gr1GMLeysCl2fgpY05Xidw5cEo9/tEkuWn/dG
+x/D9hnLBGeroA0251ES12jemqDjI2U0tfaeHakjwSsoWElf94Qmuh2iPZ+1zIxQs
+7o6nAWN8X9hfsmrDTTHlww0TEfrjlbzG5Yh+0ZRxmejgiUyOCXck+eh/ZXMXvfWh
+y3CorIIuWgkRjm80PYkdaRDJdZuyP6R7tXfTXNVzAiSQf0Qx9ru2KB2Fs/XZPamH
+KjItAU5Q6msIVvaRMS0muQgV+b6hqSEBzqXqJfAlpVLHXr5FqK+U7EB9y02B6piB
+tAmxqXP8OOCoQql6/vgIcrDFUOo6KtGBW36ef74XE3KCUVaIzVJZSIt6i/Vi0bZj
+bAjsJUQ3qDlHdorv9TRVOhnC1GUz7SuYnpEOyiXmyx3LAgMBAAGjUzBRMB0GA1Ud
+DgQWBBQ62csZcH/meQcENHhNbqz9LMzwjjAfBgNVHSMEGDAWgBQ62csZcH/meQcE
+NHhNbqz9LMzwjjAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBCwUAA4IEAQBA
+wLsGf3R1+/I2zQE+lsj7RasZtA/Cos92iEGDAPvFbx9e+roG8Gg8KBsEJu/HN0JH
+lMMiQ8dDRHSBMvRBENL5/57oOOhmqc+1u5sazLuANhzAYPZG17Klib7YpEwWoXar
+FDDiJYtCyLW0oNLpCswYopWK9GC0RJNucB0NFvOxehJ2sP2/fxGBQMB09L6mjKjd
+4KsOzyd3dNf0VYS6jB+/1pcKSHKQUo9HRHB5FK04PsYHoh4AtmEHvmYQKcWWidgU
+v26ftlH00ERzuW2juqBbz9mghlNRqXi0IyZ9b4tSj29dxW+WWFzo7j2zEPaD6z2W
+DEHq7zvON+g+q6qLgWeszqMgJzjvWjMj00E/t06PoHPiz/cAnDKEqp+ZzxCIFrxj
+/qneChpogDWyLbawhyyzbZvbirx5znOSbWjPZgydqaNEFViqbxwinBx4Xxabo6XN
+TU020FuMWmgfbIcvtgjKgyKqc97l7JMNNm7LQV9+9W0U5zdIqQKLZ9MMrd2w3xh4
+MAB8NKnwzHReK0TWwUU9HSgFAGdEX6HnyZ3bQ13ijg+sNBRMEi0gBHaqZKDdyoft
+B2u2uasSwioV48dbSIcHl+rTBKxiMh5XQ7ENnaGOJkjsIqTVzizqnPHU8eMBnSbb
+dsXlamROYII44+j3Ku6OGt51w86eGk4VxI3tmaECcJKqTkwUFD8AcNDrkjtmLuxK
+12yjnoM+u1cclfqQ5NOtRc6MJZ27jCobfBBhVdKVDp4X1WNyqGlbsU5adDAzknuI
+GT7MJO7lGjkZX2n54BNPSfrSknYMOVYcZqL0Dbcrhx5IyEmg+iOlOu1HO1tdnZop
+ej4vT+1V2w9Sa4Wo3UCo84jcm5v/4z7jCYh4BRQ60CFb7GLxZoqXIslcGSPool3n
+jl8JWoaLXrJUPfZGXo1iAlayJ5EiMyZl4eB/TBUf6TMm8vLvsPiUT+CEsjLppOdS
+eYppZAZ6H1JrJGs5kKBdOJHGn6Pkp5QsHIswOBd1HqHrBbYbZmDaDLRHduILWLrM
+e0/IfDdeXB/bKfmZoEpT8xRiauw15p0AHLumiK7KISAehfgBqUnxx+YmgGoZ7EWX
+KnMYAfCuC6oJ1DL0gp4Z9yMK1eu+GV1sLxPq9ZruEHW1R+H+4sGyiA5Gso2tgB6/
+XW//wxKclNp5LZR7hqfs/kGuh5asrJrnEbMwWn2+tr/LqfYtYh1D6nHfIXpT0o1d
+rNy/HrsKnRDMWxjm03r4hCViuNVD3Zb9anAF/NSPDVu8ATM5JbJNrCYX4eipz6ZE
+aQBkwIBkTPgtgP4r8v2G+uMYDw8nq7xh72FK107aeTTwc6MgU5jfeFNMr2XJisJd
+lSem1ngKYQSEzjVsTE4c
+-----END CERTIFICATE-----
diff --git a/reg-tests/ssl/common.key b/reg-tests/ssl/common.key
new file mode 100644
index 0000000..4b06553
--- /dev/null
+++ b/reg-tests/ssl/common.key
@@ -0,0 +1,28 @@
+-----BEGIN RSA PRIVATE KEY-----
+MIIEpAIBAAKCAQEAnb0BDF7FsqzslakNg7u/n/JQkq6nheuKwvyTqECfpc9y7uSB
+e/vrEFqBaDSLQagJxuZdL5geFeVtRbdAoB97N1/LZa6vecjjgGSP0Aag/gS/ocnM
+RIyvlVWWT9MrD46OG3qZY1ORU1ltrVL0NKttJP8xME7j3bTwIDElx/hNI0n7L+yS
+kAe2xb/7CbZRfoOhjTVAcGv4aSLVc/Hi8k6VkIzdOEtH6TcghXmuGcuqvLNH9Buo
+syngKTcQ8zg6J+e64aVvC+e7vi94uil9Qu+JHm0pkDzAZ2WluNsuXlrJToPirWyj
+6/YdN6xgSI1hbZkBmUPAebgYuxBt6huvfyQd3wIDAQABAoIBABojc8UE/2W4WgwC
+04Z82ig7Ezb7Ui9S9M+S4zUCYHItijIkE4DkIfO3y7Hk4x6iJdyb191HK9UdC5p9
+32upS9XFPgM/izx3GZvxDhO+xXbSep7ovbyuQ3pPkHTx3TTavpm3GyvmcTKKoy4R
+jP4dWhzDXPdQW1ol3ZS4EDau4rlyClY6oi1mq9aBEX3MqVjB/nO7s2AbdgclAgP2
+OZMhTzWYR1k5tYySHCXh3ggGMCikyvHU0+SsGyrstYzP1VYi/n3f0VgqW/5ZjG8x
+6SHpe04unErPF3HuSun2ZMCFdBxaTFZ8FENb8evrSXe3nQOc9W21RQdRRrNNUbjl
+JYI4veECgYEA0ATYKMS1VCUYRZoQ49b5GTg7avUYqfW4bEo4fSfBue8NrnKR3Wu8
+PPBiCTuIYq1vSF+60B7Vu+hW0A8OuQ2UuMxLpYcQ7lKfNad/+yAfoWWafIqCqNU9
+at0QMdbW6A69d6jZt7OrXtleBsphCnN58jTz4ch4PIa2Oyq46NUXCvUCgYEAwh8t
+G6BOHOs3yRNI2s9Y9EEfwoil2uIKrZhqiL3AwdIpu5uNIMuPnbaEpXvRX6jv/qtL
+321i8vZLc31aM7zfxQ6B4ReQFJfYC80FJsWvcLwT9hB9mTJpLS4sIu5tzQc87O6w
+RtjFMom+5ns5hfPB4Eccy0EtbQWVY4nCzUeO6QMCgYBSvqqRRPXwG7VU8lznlHqP
+upuABzChYrnScY+Y0TixUlL54l79Wb6N6vzEOWceAWkzu8iewrU4QspNhr/PgoR3
+IeSxWlG0yy7Dc/ZnmTabx8O06I/iwrfkizzG5nOj6UEamRLJjPGNEB/jyZriQl7u
+pnugg1K4mMliLbNSAnlhBQKBgQCmYepbv260Qrex1KGhSg9Ia3k5V74weYYFfJnz
+UhChD+1NK+ourcsOtp3C6PlwMHBjq5aAjlU9QfUxq8NgjQaO8/xGXdfUjsFSfAtq
+TA4vZkUFpuTAJgEYBHc4CXx7OzTxLzRPxQRgaMgC7KNFOMR34vu/CsJQq3R7uFwL
+bsYC2QKBgQCtEmg1uDZVdByX9zyUMuRxz5Tq/vDcp+A5lJj2mha1+bUMaKX2+lxQ
+vPxY55Vaw/ukWkJirRrpGv6IytBn0dLAFSlKZworZGBaxsm8OGTFJ5Oe9+kZTjI9
+hvjpClOA1otbmj2F2uZAbuIjxQGDNUkLoifN5yDYCC8JPujHuHmULw==
+-----END RSA PRIVATE KEY-----
+
diff --git a/reg-tests/ssl/common.pem b/reg-tests/ssl/common.pem
new file mode 100644
index 0000000..042fe4e
--- /dev/null
+++ b/reg-tests/ssl/common.pem
@@ -0,0 +1,72 @@
+-----BEGIN CERTIFICATE-----
+MIIDxzCCAq+gAwIBAgIURbbHd6AXFZoZEmNAwQU1IbkeEjswDQYJKoZIhvcNAQEL
+BQAwYzELMAkGA1UEBhMCRlIxDjAMBgNVBAcTBVBhcmlzMR0wGwYDVQQKExRIQVBy
+b3h5IFRlY2hub2xvZ2llczElMCMGA1UEAxMcSEFQcm94eSBUZXN0IEludGVybWVk
+aWF0ZSBDQTAeFw0yMzA5MjAxNjI2MDBaFw0zMzA5MTcxNjI2MDBaMEUxCzAJBgNV
+BAYTAkZSMQ4wDAYDVQQHEwVQYXJpczEOMAwGA1UEChMFdGVzdDExFjAUBgNVBAMT
+DXd3dy50ZXN0MS5jb20wggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDd
++BTITfrXq7S3+dIUBlXEdNKXZPqSwhcgriVNvDhu0QnBbiqMV96MiEta+NrWvVy3
+VY41Y0pvDS1PrWsnClBMdBFdPkIRpAx20cz3M4lBDi/p14m+JbYvsVhLGDozMEba
+OxMSeidTFDHQq7k0b0sMMvJ2v09RvPaDytWaRtC07nVlZPU0fPQCSw3oZf1K9u9b
+dbuMJOeO2D0/PXezbWrdi3+VuaK47e+1Bijh6Qm8uD8f7SRKGFm26QkV87wKWw8x
+hPnZL1PNWyQNfAzGQf6sjpmahhz+Lva3ywbv5XaUp8ojgw4LOrz1fvC8dDj9wVg1
+3JhWhmwLC/5zG5eY3LuZAgMBAAGjgZAwgY0wDgYDVR0PAQH/BAQDAgWgMBMGA1Ud
+JQQMMAoGCCsGAQUFBwMBMAwGA1UdEwEB/wQCMAAwHQYDVR0OBBYEFFc8xugp1dGZ
++KqMGcfUmbTYEEurMB8GA1UdIwQYMBaAFHr9tGo8KeCWI09L6wA8+zwp7FEMMBgG
+A1UdEQQRMA+CDXd3dy50ZXN0MS5jb20wDQYJKoZIhvcNAQELBQADggEBAIzfQ//U
+1jqTmdjUNZHOqFvCcc06W9vDUJbeKSyStZnE/J3WHrJeLLNaUV00G93zLLRs6slT
+kZ4eEmUZlPGGz6hFK6d8jSIIxyaw/O5l9Ix/Z5cUMiScHNbiLBiyhy6AvF/NcJYl
+xQ6EUIcOqMxEL0dSRq6ckGZvnyFusPuNgfNeIy0Y1eI30En1mPNGQiu2DP7Ex4Ht
+dAiHT6ITXk43wHyXDqXpt97Rdbq1dNNP6sSkQ8r0IeDi5f/qSsBGbR1a9UoKkJOZ
+OO6IGhEb2XCWc2xquJpUHCOXhzaXj/SmxCDpWVW5tdKNZ96gUlp2Wtf0Rp25yFge
+4mCry3J674p8Oto=
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIDuTCCAqGgAwIBAgIUQQtTP1aDfRAnDdjKkaUT2py2fmswDQYJKoZIhvcNAQEL
+BQAwYjELMAkGA1UEBhMCRlIxDjAMBgNVBAcTBVBhcmlzMR0wGwYDVQQKExRIQVBy
+b3h5IFRlY2hub2xvZ2llczEkMCIGA1UEAxMbSEFQcm94eSBSb290IFRlc3QgQXV0
+aG9yaXR5MB4XDTIzMDkyMDE2MjQwMFoXDTMzMDkxNzE2MjQwMFowYzELMAkGA1UE
+BhMCRlIxDjAMBgNVBAcTBVBhcmlzMR0wGwYDVQQKExRIQVByb3h5IFRlY2hub2xv
+Z2llczElMCMGA1UEAxMcSEFQcm94eSBUZXN0IEludGVybWVkaWF0ZSBDQTCCASIw
+DQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAJXZfTPAWzNIh1EM3BRlO63BBpX5
+xDMb4yOP2Ntxe9mZHsI5dTRGgZygswZQV4LgKd7B868OEbaDGOjTwvDJI28foS1n
+almhniH/sQamCpeMEX9brRyJ1ev2mTLM1XkAhKmzGRRbGi+A8M6pIMaOLmn8gvsE
+ekfatzOG3wQamJDXLa/L4/Pd2GI+cb2wOLRtIHxuAocL+Dgy+LIa1q3slGyboU9X
+ZJcOjTa705H4FWbhVJ4KV0dI6iSXtO9xG/8RkuY9VyZh39pqU9aQxLWFUyy0coec
+b7RzyUrVDlTvmV9X2fotWmAnQfUk+4wc5/rhOSFNg1lowKt7MB/NjsPOmE8CAwEA
+AaNmMGQwDgYDVR0PAQH/BAQDAgEGMBIGA1UdEwEB/wQIMAYBAf8CAQAwHQYDVR0O
+BBYEFHr9tGo8KeCWI09L6wA8+zwp7FEMMB8GA1UdIwQYMBaAFB4wopglmOnetKea
+jkq9OymofpZfMA0GCSqGSIb3DQEBCwUAA4IBAQDQamqX11VfTB5USqAL1E0/qHqG
+WivOWX2K5lCfbalg7Fqlm1d0hKffPODD3RAOU7UhP9B1xc9NRBgbNPEEMu/O92PS
+C5H8WxGoKfa4TuX8JfhhpUGpRelFFHU7mVVyMh7RDQmfFdxC8ej8+iTvBXtacMhh
+VWokTIakyFCj7u/qcQKhpzoTDq9VRE+lmOFYzMtHqk+qGVXDgG8z/e7z5UP98ttI
+XXsQ50Mi6ow8P118eRjJI01DZUu8QnYt/+jhqAFipS2OjyV4Jlq+wGZ4xB9gonlf
+lTdqR19oFnIFi30boIB33MRxz9qWZy7ay5jUV6jGauymQI/1waPtv/KIjXzt
+-----END CERTIFICATE-----
+-----BEGIN RSA PRIVATE KEY-----
+MIIEowIBAAKCAQEA3fgUyE3616u0t/nSFAZVxHTSl2T6ksIXIK4lTbw4btEJwW4q
+jFfejIhLWvja1r1ct1WONWNKbw0tT61rJwpQTHQRXT5CEaQMdtHM9zOJQQ4v6deJ
+viW2L7FYSxg6MzBG2jsTEnonUxQx0Ku5NG9LDDLydr9PUbz2g8rVmkbQtO51ZWT1
+NHz0AksN6GX9SvbvW3W7jCTnjtg9Pz13s21q3Yt/lbmiuO3vtQYo4ekJvLg/H+0k
+ShhZtukJFfO8ClsPMYT52S9TzVskDXwMxkH+rI6ZmoYc/i72t8sG7+V2lKfKI4MO
+Czq89X7wvHQ4/cFYNdyYVoZsCwv+cxuXmNy7mQIDAQABAoIBAAtHSbcLz00aGmqm
+tPfzgnQjA3hR1zGRzx8H+jlC8RMgaAB+GodbB6HYYwvSTTxQDt/55unri6Ks5rp/
+s0weiAn6c89rFGxVC5UV//YnljfKAsE9BHC29dTii/v01TA4qcD483Ui49Ci3A0g
+TJ7PcN7Dz/IcsmkS0uvLaKMYKg6nXu9UnKkLBqThTiLA5I+eZZ4zX862Yurku8NI
+HwbMtBsICbe1H0Ebdc/PPAShB8pvV3nQMGFjADOEYolaByQAltolADmIc5K9E6wL
+SsHzAjGTjarSYdqjZRuoEtQrWQTG1fnvJZBXB8L1Brv9RbrPWN2TW/A1uhUR4qYd
+wuxB1mkCgYEA9ao05RsJzDVc4qLBvDXuqo1WapwnYUyc8Jeq+r5l67Ota1ykQyiQ
+BJZDM/mdFRzZZjMAAMN9cxsDdY7gp0ebN190F443tSxjvlVOGJ/e8UJ+Au+9WEYM
+xZQo5VquU8XlxfwFYtYANMvr7DB8yEr25S7S2v3jZ70NZQDDR6G+5L8CgYEA506s
+JJM/NfP82e6JtSB9fOEcjEeddPCdf2RkB+E3Ppt7AQX5Aje/8VOSiZkIbOap9oLc
+rBd9GrER2m2pahd9DFkSs1td70IM5CwwgYIKyWkCCkLXyMrLpelFEqZsFvWD14Go
+c29NSDOpVfVJkPr46Gym6dBvy1rCMh+/ZrgsPacCgYAXFU0vEeFTzNfTe9OUacTp
+RaqPDektXVOJz9aDBQzynsl2wxe0L8WIiM37vkKiTzmmPriLWdMZ07n5e9lkHnhZ
+NaDk7boSTf95TO6vpaX/KW5JvFkgSES+8/L7yg+DU58JgWrIJK+URmz+nne7IkAc
+vM+XQC/z+XGewfmXa31SZQKBgGSDpHyUXBCXARa2+LJz9vQQYrZ23G6XrOlR98IQ
+1U/ogrA0JzwWe9tjxLb0gFvKdKtBvKEv59Z0gdyYcNlAcb+u6Vh1aMFyw2VX6pAs
+sYFKl29cEqcXsR1c2/45wZjMgclhd5EKGdw5TumimKBe31Eo/fN29024F9FuSF9b
+wyXbAoGBALGIKzPgV7Tt6SbzcCjJGQHlH/RKBcuFJjJS+Qph3w7K3L5b6Y35zPOY
+3+FxT2Z5wAlOGYeF9Qa8K3/VX1l7Vhktu9EcTqM59fMGuTM0mEgwwdFM4oFgRIau
+wmlIuAFmo7OwlsggHuHJ7lDk+r7AoNVW7l7Gd1JnG4CasvymVc3N
+-----END RSA PRIVATE KEY-----
diff --git a/reg-tests/ssl/crl-auth.pem b/reg-tests/ssl/crl-auth.pem
new file mode 100644
index 0000000..af59d1d
--- /dev/null
+++ b/reg-tests/ssl/crl-auth.pem
@@ -0,0 +1,18 @@
+-----BEGIN X509 CRL-----
+MIIC0jCBuzANBgkqhkiG9w0BAQUFADB0MQswCQYDVQQGEwJGUjETMBEGA1UECAwK
+U29tZS1TdGF0ZTEdMBsGA1UECgwUSEFQcm94eSBUZWNobm9sb2dpZXMxMTAvBgNV
+BAMMKEhBUHJveHkgVGVjaG5vbG9naWVzIENBIFRlc3QgQ2xpZW50IEF1dGgXDTIw
+MDQyODE5MjkyNloYDzIwNTAwNDIxMTkyOTI2WjAUMBICAQMXDTIwMDQyODE5MDE1
+MVowDQYJKoZIhvcNAQEFBQADggIBAMPJgdU6bsFMFKBop0dngtAG1DXSrHo1XlYY
+J1uWEuVcNnimH1EHQXMmL5C26ALrHlQILLzq3RVcNZIT0tVF6jvcf8tzcaGeybS1
+tpDloE2A2jPz3Pf/uS4MB7eTPiMvY7cUl7fk4Oif/PjGPxdu+E5SP6HWVdjCvBHb
+2yye/KjN/vj3g5uI6z2l1Hxh2yzYmMVS8cTRG5SfUXgH+IXJOS8zE7CsMB/IRctQ
+TXD0q0iZLn7Q0liA/wxxJHYg2m3RdFa82THdWaqsIM4ao2KLz324ycQpWT0eRWpv
+6gyVXbEU/sX8HdZdNpfgQADiU8eK4XlnEmXehSE3TwyM1ysnoFRtOqDvaQrHbAMh
+Av0/9JLOPGDqCjof4lLfAW6JDtU55J4SxCYlaRj152939eXwDkb70WefZMssfqcw
+ZPDK6afY358kb7Yb0U2pE73+Z3VDcczBF085nc6q/2m5lvA+XwZYr4xBkVzHbdP3
+USEFd06FHlh2i2rpaiihR7sQx9KJ75ko3TjDbeg/QryMBKsS2CeJoHPDcFjjzFZF
+RW1HYReV1MZT8UEuskMvl+w57OYbfqf/pwhQcJTL8XE9PRtzntmLMofmiN/X5PQV
+YS6JvGVAIC7HFDiZ8Wn8B+WT93ecCNQL1FpIpo1JxuRfx6jTtGqGg65R3CzwbqUH
+dBkieO8E
+-----END X509 CRL-----
diff --git a/reg-tests/ssl/del_ssl_crt-list.vtc b/reg-tests/ssl/del_ssl_crt-list.vtc
new file mode 100644
index 0000000..83eda24
--- /dev/null
+++ b/reg-tests/ssl/del_ssl_crt-list.vtc
@@ -0,0 +1,102 @@
+#REGTEST_TYPE=devel
+
+# This reg-test uses the "del ssl crt-list" command to remove a line from a crt-list.
+
+# It performs three requests towards a frontend that uses simple.crt-list.
+# Between the second and third requests, a line is deleted from the crt-list,
+# which makes the third request fail since it would have used the deleted line
+# and the strict-sni option is enabled on the frontend.
+# Another test is performed as well. A line corresponding to the default instance
+# of a frontend that does not have the strict-sni option enabled cannot be deleted.
+
+varnishtest "Test the 'del ssl crt-list' feature of the CLI"
+#REQUIRE_VERSION=2.2
+#REQUIRE_OPTIONS=OPENSSL
+feature ignore_unknown_macro
+
+server s1 -repeat 2 {
+ rxreq
+ txresp
+} -start
+
+haproxy h1 -conf {
+ global
+ tune.ssl.default-dh-param 2048
+ tune.ssl.capture-buffer-size 1
+ crt-base ${testdir}
+ stats socket "${tmpdir}/h1/stats" level admin
+
+ defaults
+ mode http
+ option httplog
+ retries 0
+ log stderr local0 debug err
+ option logasap
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+
+ listen clear-lst
+ bind "fd@${clearlst}"
+ balance roundrobin
+ http-response set-header X-SSL-Server-SHA1 %[ssl_s_sha1,hex]
+ server s1 "${tmpdir}/first-ssl.sock" ssl verify none sni str(record2.bug940.domain.tld)
+ server s2 "${tmpdir}/first-ssl.sock" ssl verify none sni str(record3.bug940.domain.tld)
+ server s3 "${tmpdir}/first-ssl.sock" ssl verify none sni str(record2.bug940.domain.tld)
+
+ listen first-ssl-fe
+ mode http
+ bind "${tmpdir}/first-ssl.sock" ssl strict-sni crt-list ${testdir}/simple.crt-list
+ server s1 ${s1_addr}:${s1_port}
+
+ listen second-ssl-fe
+ mode http
+ bind "${tmpdir}/second-ssl.sock" ssl crt-list ${testdir}/localhost.crt-list
+ server s1 ${s1_addr}:${s1_port}
+} -start
+
+client c1 -connect ${h1_clearlst_sock} {
+ txreq
+ rxresp
+ expect resp.http.X-SSL-Server-SHA1 == "DF3B6E847A7BF83DFAAFCFEC65EE9BC36230D3EA"
+ expect resp.status == 200
+} -run
+
+client c1 -connect ${h1_clearlst_sock} {
+ txreq
+ rxresp
+ expect resp.http.X-SSL-Server-SHA1 == "A490D069DBAFBEE66DE434BEC34030ADE8BCCBF1"
+ expect resp.status == 200
+} -run
+
+haproxy h1 -cli {
+ send "del ssl crt-list ${testdir}/simple.crt-list ${testdir}/common.pem:2"
+ expect ~ "Entry '${testdir}/common.pem' deleted in crtlist '${testdir}/simple.crt-list'!"
+}
+
+haproxy h1 -cli {
+ send "show ssl crt-list -n ${testdir}/simple.crt-list"
+ expect !~ "common.pem:2"
+}
+
+# This connection should fail since the corresponding line was deleted from the crt-list
+# and the strict-sni option is enabled.
+client c1 -connect ${h1_clearlst_sock} {
+ txreq
+ rxresp
+ expect resp.status == 503
+} -run
+
+# We should not be able to delete the crt-list's first line since it is the
+# default certificate of this bind line and the strict-sni option is not enabled.
+haproxy h1 -cli {
+ send "del ssl crt-list ${testdir}/localhost.crt-list ${testdir}/common.pem:1"
+ expect ~ "Can't delete the entry: certificate '${testdir}/common.pem' cannot be deleted, it is used as default certificate by the following frontends:"
+}
+
+# We should be able to delete any line of the crt-list since the strict-sni option is enabled.
+haproxy h1 -cli {
+ send "del ssl crt-list ${testdir}/simple.crt-list ${testdir}/common.pem:1"
+ expect ~ "Entry '${testdir}/common.pem' deleted in crtlist '${testdir}/simple.crt-list'!"
+}
diff --git a/reg-tests/ssl/dynamic_server_ssl.vtc b/reg-tests/ssl/dynamic_server_ssl.vtc
new file mode 100644
index 0000000..b7730f5
--- /dev/null
+++ b/reg-tests/ssl/dynamic_server_ssl.vtc
@@ -0,0 +1,113 @@
+#REGTEST_TYPE=bug
+# Test if a certificate can be dynamically updated once a server which used it
+# was removed.
+#
+varnishtest "Delete server via cli and update certificates"
+
+feature ignore_unknown_macro
+
+#REQUIRE_VERSION=2.4
+#REQUIRE_OPTIONS=OPENSSL
+feature cmd "command -v socat"
+
+# static server
+server s1 -repeat 3 {
+ rxreq
+ txresp \
+ -body "resp from s1"
+} -start
+
+haproxy h1 -conf {
+ global
+ stats socket "${tmpdir}/h1/stats" level admin
+
+ defaults
+ mode http
+ option httpclose
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ frontend fe
+ bind "fd@${feS}"
+ default_backend test
+
+ backend test
+ server s1 "${tmpdir}/ssl.sock" ssl verify none crt "${testdir}/client1.pem"
+ server s2 "${tmpdir}/ssl.sock" ssl verify none crt "${testdir}/client1.pem"
+ server s3 "${tmpdir}/ssl.sock" ssl verify none crt "${testdir}/client1.pem"
+
+
+ listen ssl-lst
+ bind "${tmpdir}/ssl.sock" ssl crt "${testdir}/common.pem"
+ server s1 ${s1_addr}:${s1_port}
+
+} -start
+
+
+haproxy h1 -cli {
+ send "show ssl cert ${testdir}/client1.pem"
+ expect ~ ".*SHA1 FingerPrint: D9C3BAE37EA5A7EDB7B3C9BDD4DCB2FE58A412E4"
+}
+client c1 -connect ${h1_feS_sock} {
+ txreq
+ rxresp
+ expect resp.body == "resp from s1"
+} -run
+
+haproxy h1 -cli {
+ send "show ssl cert ${testdir}/client1.pem"
+ expect ~ ".*SHA1 FingerPrint: D9C3BAE37EA5A7EDB7B3C9BDD4DCB2FE58A412E4"
+}
+
+## delete the servers
+haproxy h1 -cli {
+ send "disable server test/s1"
+ expect ~ ".*"
+ send "disable server test/s2"
+ expect ~ ".*"
+ send "disable server test/s3"
+ expect ~ ".*"
+
+ # valid command
+ send "del server test/s1"
+ expect ~ "Server deleted."
+ send "del server test/s2"
+ expect ~ "Server deleted."
+ send "del server test/s3"
+ expect ~ "Server deleted."
+}
+
+# Replace certificate with an expired one
+shell {
+ printf "set ssl cert ${testdir}/client1.pem <<\n$(cat ${testdir}/client2_expired.pem)\n\n" | socat "${tmpdir}/h1/stats" -
+ echo "commit ssl cert ${testdir}/client1.pem" | socat "${tmpdir}/h1/stats" -
+}
+
+haproxy h1 -cli {
+ send "show ssl cert ${testdir}/client1.pem"
+ expect ~ ".*SHA1 FingerPrint: C625EB01A0A660294B9D7F44C5CEEE5AFC495BE4"
+}
+
+haproxy h1 -cli {
+ send "show ssl cert ${testdir}/client1.pem"
+ expect ~ ".*Status: Unused"
+}
+
+haproxy h1 -cli {
+ send "add server test/s1 ${tmpdir}/ssl.sock ssl verify none crt ${testdir}/client1.pem"
+ expect ~ "New server registered."
+ send "enable server test/s1"
+ expect ~ ".*"
+ send "show ssl cert ${testdir}/client1.pem"
+ expect ~ ".*Status: Used"
+}
+
+
+# check that servers are active
+client c1 -connect ${h1_feS_sock} {
+ txreq
+ rxresp
+ expect resp.body == "resp from s1"
+} -run
+
diff --git a/reg-tests/ssl/ecdsa.crt b/reg-tests/ssl/ecdsa.crt
new file mode 100644
index 0000000..27b5f5d
--- /dev/null
+++ b/reg-tests/ssl/ecdsa.crt
@@ -0,0 +1,12 @@
+-----BEGIN CERTIFICATE-----
+MIIBfzCCAQWgAwIBAgIUYDgleyiLJSKbSWzlU3PTCB/PPYIwCgYIKoZIzj0EAwIw
+FDESMBAGA1UEAwwJbG9jYWxob3N0MB4XDTE5MTIxOTA5MzExMloXDTIwMDExODA5
+MzExMlowFDESMBAGA1UEAwwJbG9jYWxob3N0MHYwEAYHKoZIzj0CAQYFK4EEACID
+YgAEHNNG/ZSuS7CXvL03ye/Y+LpWnX818mnYkxqUQdFO2N1CO0p6kSIMHrzMQIRe
+v3+j2g6drKehMGjBmeZJwsbD6nYyUO1z+0MatW5UiTMWFmPq4v08TDDtd8sNcWgs
+SWrToxgwFjAUBgNVHREEDTALgglsb2NhbGhvc3QwCgYIKoZIzj0EAwIDaAAwZQIw
+N2BdTJOH3BZlJ7HRIJNRC7jjByI9+QYAHiBoXmJVi9aoKd7OIz1Nb2DPe3QS1sDw
+AjEA9KzI8BVIZJEmsVA6rs+vRjX0tUfBhD7BCHKas0roOny9Smj/TkBFxVTNnjzM
+8iLn
+-----END CERTIFICATE-----
+
diff --git a/reg-tests/ssl/ecdsa.key b/reg-tests/ssl/ecdsa.key
new file mode 100644
index 0000000..6eec0ec
--- /dev/null
+++ b/reg-tests/ssl/ecdsa.key
@@ -0,0 +1,6 @@
+-----BEGIN PRIVATE KEY-----
+MIG2AgEAMBAGByqGSM49AgEGBSuBBAAiBIGeMIGbAgEBBDDZMkuztqaUgCAC9/7P
+CsmlC2ac7rWerq5+NKbP0Cz1+mao6+F5Hc8DKNXHgi5GPr2hZANiAAQc00b9lK5L
+sJe8vTfJ79j4uladfzXyadiTGpRB0U7Y3UI7SnqRIgwevMxAhF6/f6PaDp2sp6Ew
+aMGZ5knCxsPqdjJQ7XP7Qxq1blSJMxYWY+ri/TxMMO13yw1xaCxJatM=
+-----END PRIVATE KEY-----
diff --git a/reg-tests/ssl/ecdsa.pem b/reg-tests/ssl/ecdsa.pem
new file mode 100644
index 0000000..e737689
--- /dev/null
+++ b/reg-tests/ssl/ecdsa.pem
@@ -0,0 +1,17 @@
+-----BEGIN CERTIFICATE-----
+MIIBfzCCAQWgAwIBAgIUYDgleyiLJSKbSWzlU3PTCB/PPYIwCgYIKoZIzj0EAwIw
+FDESMBAGA1UEAwwJbG9jYWxob3N0MB4XDTE5MTIxOTA5MzExMloXDTIwMDExODA5
+MzExMlowFDESMBAGA1UEAwwJbG9jYWxob3N0MHYwEAYHKoZIzj0CAQYFK4EEACID
+YgAEHNNG/ZSuS7CXvL03ye/Y+LpWnX818mnYkxqUQdFO2N1CO0p6kSIMHrzMQIRe
+v3+j2g6drKehMGjBmeZJwsbD6nYyUO1z+0MatW5UiTMWFmPq4v08TDDtd8sNcWgs
+SWrToxgwFjAUBgNVHREEDTALgglsb2NhbGhvc3QwCgYIKoZIzj0EAwIDaAAwZQIw
+N2BdTJOH3BZlJ7HRIJNRC7jjByI9+QYAHiBoXmJVi9aoKd7OIz1Nb2DPe3QS1sDw
+AjEA9KzI8BVIZJEmsVA6rs+vRjX0tUfBhD7BCHKas0roOny9Smj/TkBFxVTNnjzM
+8iLn
+-----END CERTIFICATE-----
+-----BEGIN PRIVATE KEY-----
+MIG2AgEAMBAGByqGSM49AgEGBSuBBAAiBIGeMIGbAgEBBDDZMkuztqaUgCAC9/7P
+CsmlC2ac7rWerq5+NKbP0Cz1+mao6+F5Hc8DKNXHgi5GPr2hZANiAAQc00b9lK5L
+sJe8vTfJ79j4uladfzXyadiTGpRB0U7Y3UI7SnqRIgwevMxAhF6/f6PaDp2sp6Ew
+aMGZ5knCxsPqdjJQ7XP7Qxq1blSJMxYWY+ri/TxMMO13yw1xaCxJatM=
+-----END PRIVATE KEY-----
diff --git a/reg-tests/ssl/filters.crt-list b/reg-tests/ssl/filters.crt-list
new file mode 100644
index 0000000..e72ee0b
--- /dev/null
+++ b/reg-tests/ssl/filters.crt-list
@@ -0,0 +1,2 @@
+common.pem *.bug810.domain.tld record.bug810.domain.tld *.bug818.domain.tld !another-record.bug818.domain.tld
+ecdsa.pem record.bug810.domain.tld another-record.bug810.domain.tld *.bug818.domain.tld
diff --git a/reg-tests/ssl/generate_certificates/gen_cert_ca.pem b/reg-tests/ssl/generate_certificates/gen_cert_ca.pem
new file mode 100644
index 0000000..1aae9a7
--- /dev/null
+++ b/reg-tests/ssl/generate_certificates/gen_cert_ca.pem
@@ -0,0 +1,23 @@
+-----BEGIN CERTIFICATE-----
+MIICOjCCAcCgAwIBAgIUf+VQOeilN1b1jiOroaMItFRozf8wCgYIKoZIzj0EAwIw
+VDELMAkGA1UEBhMCRlIxEzARBgNVBAgMClNvbWUtU3RhdGUxHTAbBgNVBAoMFEhB
+UHJveHkgVGVjaG5vbG9naWVzMREwDwYDVQQDDAhFQ0RTQSBDQTAeFw0yMjAxMTIx
+NDAzNTlaFw00OTA1MzAxNDAzNTlaMFQxCzAJBgNVBAYTAkZSMRMwEQYDVQQIDApT
+b21lLVN0YXRlMR0wGwYDVQQKDBRIQVByb3h5IFRlY2hub2xvZ2llczERMA8GA1UE
+AwwIRUNEU0EgQ0EwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAARyx1wAgb1/fuAflF73
+j3Z1intP7+11kGtVZ1EAKd//xqtxFuJ+98/gc5cpiOBMWcn6FyEZ+GShTpQeqsFs
+2C4k0LTtKadXwuQaIs05QMpahTN2vmc6LPgzOrEJxFafjdejUzBRMB0GA1UdDgQW
+BBTX2Q6ojJB88kEKjdnoufDv8TGphzAfBgNVHSMEGDAWgBTX2Q6ojJB88kEKjdno
+ufDv8TGphzAPBgNVHRMBAf8EBTADAQH/MAoGCCqGSM49BAMCA2gAMGUCMQCLVP3+
+dvfS2k6GYplmmkyC7YVlmNre5gZwIE9zYDDvKDxsS95oqXLT5dTVm9W0MhACMAgB
+D9uOlqoGaHbRGBE8wlV33bVdpzD6JEqVyGCdEtdCW4T5Vsg3pAsUiG2tPWQ2LA==
+-----END CERTIFICATE-----
+-----BEGIN EC PARAMETERS-----
+BgUrgQQAIg==
+-----END EC PARAMETERS-----
+-----BEGIN EC PRIVATE KEY-----
+MIGkAgEBBDDosJpJuqxVdp/wuJYM1k2OTK8Pri+ChDRVlDySnHYP92aFT0GXX8A5
+X5rLNDtbaCGgBwYFK4EEACKhZANiAARyx1wAgb1/fuAflF73j3Z1intP7+11kGtV
+Z1EAKd//xqtxFuJ+98/gc5cpiOBMWcn6FyEZ+GShTpQeqsFs2C4k0LTtKadXwuQa
+Is05QMpahTN2vmc6LPgzOrEJxFafjdc=
+-----END EC PRIVATE KEY-----
diff --git a/reg-tests/ssl/generate_certificates/gen_cert_server.pem b/reg-tests/ssl/generate_certificates/gen_cert_server.pem
new file mode 100644
index 0000000..ce2f621
--- /dev/null
+++ b/reg-tests/ssl/generate_certificates/gen_cert_server.pem
@@ -0,0 +1,18 @@
+-----BEGIN CERTIFICATE-----
+MIIBujCCAV8CAQEwCgYIKoZIzj0EAwIwWDELMAkGA1UEBhMCRlIxEzARBgNVBAgM
+ClNvbWUtU3RhdGUxITAfBgNVBAoMGEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZDER
+MA8GA1UEAwwIRUNEU0EgQ0EwHhcNMjIwMjA4MTU0MjMxWhcNNDkwNjI2MTU0MjMx
+WjBcMQswCQYDVQQGEwJGUjETMBEGA1UECAwKU29tZS1TdGF0ZTEdMBsGA1UECgwU
+SEFQcm94eSBUZWNobm9sb2dpZXMxGTAXBgNVBAMMEHNlcnZlci5lY2RzYS5jb20w
+djAQBgcqhkjOPQIBBgUrgQQAIgNiAARXlODrnr208aoToRb8MqTp4GYgnk9V4LJ5
+XE8HyM7EWbqx46PdUpLUseFOtF/Yr9nyzMcdd6GNZrHkgM2NaQ/13tTbLJ84wXRQ
+jS9FSqFmDmmgbEARiyEf0K8D9lxI0bgwCgYIKoZIzj0EAwIDSQAwRgIhAJlwV5oJ
+Uz4nYUEWIrgFd7de5GZseFBIbW+UWr17Ip6gAiEAhrVEpmd4Tl5JPTwQznPa6ZlJ
+Zc8S6ipcwXPCJzsSOnQ=
+-----END CERTIFICATE-----
+-----BEGIN PRIVATE KEY-----
+MIG2AgEAMBAGByqGSM49AgEGBSuBBAAiBIGeMIGbAgEBBDD6ONh7kiRD6TxwQGIa
+bY5kUclHcPXiWO1QNscmeVtObmTKYiVcRR+Mj4tNRXWH6lyhZANiAARXlODrnr20
+8aoToRb8MqTp4GYgnk9V4LJ5XE8HyM7EWbqx46PdUpLUseFOtF/Yr9nyzMcdd6GN
+ZrHkgM2NaQ/13tTbLJ84wXRQjS9FSqFmDmmgbEARiyEf0K8D9lxI0bg=
+-----END PRIVATE KEY-----
diff --git a/reg-tests/ssl/interCA1_crl.pem b/reg-tests/ssl/interCA1_crl.pem
new file mode 100644
index 0000000..b4b8b03
--- /dev/null
+++ b/reg-tests/ssl/interCA1_crl.pem
@@ -0,0 +1,27 @@
+-----BEGIN X509 CRL-----
+MIIBpDCBjTANBgkqhkiG9w0BAQsFADBHMQswCQYDVQQGEwJGUjEdMBsGA1UECgwU
+SEFQcm94eSBUZWNobm9sb2dpZXMxGTAXBgNVBAMMEEludGVybWVkaWF0ZSBDQTEX
+DTIxMDQyMzE0MzYyNloXDTQ4MDkwODE0MzYyNlowFTATAgIQBxcNMjEwNDIzMTQz
+NjE1WjANBgkqhkiG9w0BAQsFAAOCAQEAi9NKPoPVgYo68ZvTJP2STnFLk71bLoB/
+PbQsM7gpJvgStmLs2lVpSxL2y5CUEG8Ok73yNkQIcOZq9DAXVL/49QHXQOZ0pPMD
+XbUn5py3mEQfuuM46n3wPP8lDgbfbDMPxs2yDf7FZKQQpxBVBq9H3m+nc3RIPP9B
+3kDvYuo3PeRlqkzIdP9ceEfBGY8+cOfvPHFzLl+BEeUI2dhhdRxtWUrfPXfLXSks
+TWp0hgu9vFHguuV3mZGcxzxvdRTsq/vu0rxg2aqGOVJyNhshRn14Tt7z9uQty4Qr
+vrkvpoKVzq93bI6ITRzXlgKKzXK70wGm8tAfbioNostRJ6/gcjaWAg==
+-----END X509 CRL-----
+-----BEGIN X509 CRL-----
+MIICgzBtMA0GCSqGSIb3DQEBCwUAMD4xCzAJBgNVBAYTAkZSMR0wGwYDVQQKDBRI
+QVByb3h5IFRlY2hub2xvZ2llczEQMA4GA1UEAwwHUm9vdCBDQRcNMjEwNDIzMTQz
+MDQ0WhcNNDgwOTA4MTQzMDQ0WjANBgkqhkiG9w0BAQsFAAOCAgEAa39JkwPzmyPc
+1SY8HfJjrkvvaIO4qV/lMUzYjg6yxuTw6g7hoH0fyxK+2+RCoplXwFS7NTDG+jS1
+H3sZWvTg/aY3g4SRudJGSxeqT2a43+U4QjmTm8uClXAA7tuOcz+wSXP7sDGQ0kyg
+PCQGGmiOL5Q2lpziVRuWTHVmUkH48Na7Lyeq5cCry2AwAPjUQtcUiivuASjhUGXh
+Gya8gPV5MXNVq5T2WcZWJnkMGbWLvSFAm5POX1i8c3o2rlIjoYws/VAwOi6wqe9K
+NGNNUUXbOhyqocbzhZvWreyNUx63Pk4TxAAHwSn7H7fJe0yzfqjbZRF3KHCBPRbB
+NBOCYr5YKH6i1xQbrEGzj8+jrXWVvpYF0FXkjzO78I5c366HXPwBCPEsoIYlgjBN
++AqgiyB7xGWiRa2SZVPn+j6wHWdohar6zw4UIBLS7EIjvb/RAM6SduScIbc9l+0I
+VrciCgPScQXbkxLoh0sVBPdfR0cU08JNG+eZ8gUNce8PKdWO2mnFSNiaEz6ESgRn
+0j7q+s4V15LX/vkKyA6u2JTYZCJi23DDBzZp57sgXsQwzp50lkoFmNNASXdURJL3
+60PvFGxzBDgOUhq4yTRbz945SF3jwF9CEl9kFOffOHigwrcbKS0Cib2ac+IpXYrE
+BCm2lSjFCK0p/mAgK8yiNQFKnCAgsXU=
+-----END X509 CRL-----
diff --git a/reg-tests/ssl/interCA1_crl_empty.pem b/reg-tests/ssl/interCA1_crl_empty.pem
new file mode 100644
index 0000000..ce9e0f4
--- /dev/null
+++ b/reg-tests/ssl/interCA1_crl_empty.pem
@@ -0,0 +1,27 @@
+-----BEGIN X509 CRL-----
+MIIBjDB2MA0GCSqGSIb3DQEBCwUAMEcxCzAJBgNVBAYTAkZSMR0wGwYDVQQKDBRI
+QVByb3h5IFRlY2hub2xvZ2llczEZMBcGA1UEAwwQSW50ZXJtZWRpYXRlIENBMRcN
+MjEwNDIzMTQzNDI3WhcNNDgwOTA4MTQzNDI3WjANBgkqhkiG9w0BAQsFAAOCAQEA
+As2A6ys84+wpG7vpjDnxD1Pri45M3sxn9Wb0v7kLBV1AsRADE6bquKYH73pUwsQB
+FmfpgE6KfagM5d/1Pap9rV0PuMc3n8Uc0Q1c30AhHELlzObEzVVeT2WTHBm5XPWn
++jY5eijsWp9NCivDwzCXP/dEXxjXPtO0l4Mctq1vv5c1Ipq9FczpfDiJJMFh7Pxy
+uVDhsnfx6uel851NxA5h2US0tQLbL/50t6vtdAgWaQdUprQwFc9oLStePbLEzT43
+zOQh7DIx0hZltd0p+OqQnfZFR0P/TlLLMGE/HZSzNi0L7WOwuG3MtrFzJwV/wqT+
+AH08jXjU5Pc3XThatlTvGA==
+-----END X509 CRL-----
+-----BEGIN X509 CRL-----
+MIICgzBtMA0GCSqGSIb3DQEBCwUAMD4xCzAJBgNVBAYTAkZSMR0wGwYDVQQKDBRI
+QVByb3h5IFRlY2hub2xvZ2llczEQMA4GA1UEAwwHUm9vdCBDQRcNMjEwNDIzMTQz
+MDQ0WhcNNDgwOTA4MTQzMDQ0WjANBgkqhkiG9w0BAQsFAAOCAgEAa39JkwPzmyPc
+1SY8HfJjrkvvaIO4qV/lMUzYjg6yxuTw6g7hoH0fyxK+2+RCoplXwFS7NTDG+jS1
+H3sZWvTg/aY3g4SRudJGSxeqT2a43+U4QjmTm8uClXAA7tuOcz+wSXP7sDGQ0kyg
+PCQGGmiOL5Q2lpziVRuWTHVmUkH48Na7Lyeq5cCry2AwAPjUQtcUiivuASjhUGXh
+Gya8gPV5MXNVq5T2WcZWJnkMGbWLvSFAm5POX1i8c3o2rlIjoYws/VAwOi6wqe9K
+NGNNUUXbOhyqocbzhZvWreyNUx63Pk4TxAAHwSn7H7fJe0yzfqjbZRF3KHCBPRbB
+NBOCYr5YKH6i1xQbrEGzj8+jrXWVvpYF0FXkjzO78I5c366HXPwBCPEsoIYlgjBN
++AqgiyB7xGWiRa2SZVPn+j6wHWdohar6zw4UIBLS7EIjvb/RAM6SduScIbc9l+0I
+VrciCgPScQXbkxLoh0sVBPdfR0cU08JNG+eZ8gUNce8PKdWO2mnFSNiaEz6ESgRn
+0j7q+s4V15LX/vkKyA6u2JTYZCJi23DDBzZp57sgXsQwzp50lkoFmNNASXdURJL3
+60PvFGxzBDgOUhq4yTRbz945SF3jwF9CEl9kFOffOHigwrcbKS0Cib2ac+IpXYrE
+BCm2lSjFCK0p/mAgK8yiNQFKnCAgsXU=
+-----END X509 CRL-----
diff --git a/reg-tests/ssl/interCA2_crl.pem b/reg-tests/ssl/interCA2_crl.pem
new file mode 100644
index 0000000..798096c
--- /dev/null
+++ b/reg-tests/ssl/interCA2_crl.pem
@@ -0,0 +1,27 @@
+-----BEGIN X509 CRL-----
+MIIBpDCBjTANBgkqhkiG9w0BAQsFADBHMQswCQYDVQQGEwJGUjEdMBsGA1UECgwU
+SEFQcm94eSBUZWNobm9sb2dpZXMxGTAXBgNVBAMMEEludGVybWVkaWF0ZSBDQTIX
+DTIxMDQyMzE0NDUzOVoXDTQ4MDkwODE0NDUzOVowFTATAgIQCBcNMjEwNDIzMTQ0
+NTM2WjANBgkqhkiG9w0BAQsFAAOCAQEAdD35Sf47YUxG6GXiMsT4jFY0hXWgc8QS
+vR6gx6MQkWFV973ALVe1bfIXBGLZ2bTU/IppFUEJxVtyXyMCJIIpdYHirF1Y7kTi
+DLVuWE4I0ZnDSF4LI5g73dYciKeVCq+ZvKx2dZ7Y37pKqNYvhVwp+HwtB4536XvQ
+m7WjFYJFFR71gAscGky621XiRflQoGvpCOVRiJxFQFYRWRA+eR+vjQ4NTYvotDKe
+O9ejZNEpfTeil+wxi5h38GVIBa2aocMVLIu5o0EQGg8d0SEU46rJKowaUz7kESuf
+Al4jnmsb1W8LSD9Agp4GQE8pV2d42kXwpWk/JrUovHRPV2vy5PQuGA==
+-----END X509 CRL-----
+-----BEGIN X509 CRL-----
+MIICgzBtMA0GCSqGSIb3DQEBCwUAMD4xCzAJBgNVBAYTAkZSMR0wGwYDVQQKDBRI
+QVByb3h5IFRlY2hub2xvZ2llczEQMA4GA1UEAwwHUm9vdCBDQRcNMjEwNDIzMTQz
+MDQ0WhcNNDgwOTA4MTQzMDQ0WjANBgkqhkiG9w0BAQsFAAOCAgEAa39JkwPzmyPc
+1SY8HfJjrkvvaIO4qV/lMUzYjg6yxuTw6g7hoH0fyxK+2+RCoplXwFS7NTDG+jS1
+H3sZWvTg/aY3g4SRudJGSxeqT2a43+U4QjmTm8uClXAA7tuOcz+wSXP7sDGQ0kyg
+PCQGGmiOL5Q2lpziVRuWTHVmUkH48Na7Lyeq5cCry2AwAPjUQtcUiivuASjhUGXh
+Gya8gPV5MXNVq5T2WcZWJnkMGbWLvSFAm5POX1i8c3o2rlIjoYws/VAwOi6wqe9K
+NGNNUUXbOhyqocbzhZvWreyNUx63Pk4TxAAHwSn7H7fJe0yzfqjbZRF3KHCBPRbB
+NBOCYr5YKH6i1xQbrEGzj8+jrXWVvpYF0FXkjzO78I5c366HXPwBCPEsoIYlgjBN
++AqgiyB7xGWiRa2SZVPn+j6wHWdohar6zw4UIBLS7EIjvb/RAM6SduScIbc9l+0I
+VrciCgPScQXbkxLoh0sVBPdfR0cU08JNG+eZ8gUNce8PKdWO2mnFSNiaEz6ESgRn
+0j7q+s4V15LX/vkKyA6u2JTYZCJi23DDBzZp57sgXsQwzp50lkoFmNNASXdURJL3
+60PvFGxzBDgOUhq4yTRbz945SF3jwF9CEl9kFOffOHigwrcbKS0Cib2ac+IpXYrE
+BCm2lSjFCK0p/mAgK8yiNQFKnCAgsXU=
+-----END X509 CRL-----
diff --git a/reg-tests/ssl/interCA2_crl_empty.pem b/reg-tests/ssl/interCA2_crl_empty.pem
new file mode 100644
index 0000000..175528b
--- /dev/null
+++ b/reg-tests/ssl/interCA2_crl_empty.pem
@@ -0,0 +1,27 @@
+-----BEGIN X509 CRL-----
+MIIBjDB2MA0GCSqGSIb3DQEBCwUAMEcxCzAJBgNVBAYTAkZSMR0wGwYDVQQKDBRI
+QVByb3h5IFRlY2hub2xvZ2llczEZMBcGA1UEAwwQSW50ZXJtZWRpYXRlIENBMhcN
+MjEwNDIzMTQ0NTE2WhcNNDgwOTA4MTQ0NTE2WjANBgkqhkiG9w0BAQsFAAOCAQEA
+IriCgDMzPowZl99/LoDW42xKFL5Db9mdPPNMY1Xk/6BowIhugz2vP5z38Ryfxy8B
+f1IFaGSf6Twl+F1RHv8twHMi4Vf8hbzPG4PRoEhy0gvzbD8YBtaV/GPyJY8iQt2o
+nuecskDhRp/D2YU5GXy90BMwBfH89yGPW4fUpFn3/83fZ1hhvkewTQedcLihxWGC
+KPuuWyrIN8qw/VKLARlXoFPIqyEdqttJliR1/GHej5iY1msMCftUQpC5sowse3B7
+F2oNySIPxm4jZ+QBrtMNbY7E1EHDBjcLInAfY17fKs6P0HytInBOhpeqz3Jcft2i
+b3qzM/7Ac5k6KPXs/UplMg==
+-----END X509 CRL-----
+-----BEGIN X509 CRL-----
+MIICgzBtMA0GCSqGSIb3DQEBCwUAMD4xCzAJBgNVBAYTAkZSMR0wGwYDVQQKDBRI
+QVByb3h5IFRlY2hub2xvZ2llczEQMA4GA1UEAwwHUm9vdCBDQRcNMjEwNDIzMTQz
+MDQ0WhcNNDgwOTA4MTQzMDQ0WjANBgkqhkiG9w0BAQsFAAOCAgEAa39JkwPzmyPc
+1SY8HfJjrkvvaIO4qV/lMUzYjg6yxuTw6g7hoH0fyxK+2+RCoplXwFS7NTDG+jS1
+H3sZWvTg/aY3g4SRudJGSxeqT2a43+U4QjmTm8uClXAA7tuOcz+wSXP7sDGQ0kyg
+PCQGGmiOL5Q2lpziVRuWTHVmUkH48Na7Lyeq5cCry2AwAPjUQtcUiivuASjhUGXh
+Gya8gPV5MXNVq5T2WcZWJnkMGbWLvSFAm5POX1i8c3o2rlIjoYws/VAwOi6wqe9K
+NGNNUUXbOhyqocbzhZvWreyNUx63Pk4TxAAHwSn7H7fJe0yzfqjbZRF3KHCBPRbB
+NBOCYr5YKH6i1xQbrEGzj8+jrXWVvpYF0FXkjzO78I5c366HXPwBCPEsoIYlgjBN
++AqgiyB7xGWiRa2SZVPn+j6wHWdohar6zw4UIBLS7EIjvb/RAM6SduScIbc9l+0I
+VrciCgPScQXbkxLoh0sVBPdfR0cU08JNG+eZ8gUNce8PKdWO2mnFSNiaEz6ESgRn
+0j7q+s4V15LX/vkKyA6u2JTYZCJi23DDBzZp57sgXsQwzp50lkoFmNNASXdURJL3
+60PvFGxzBDgOUhq4yTRbz945SF3jwF9CEl9kFOffOHigwrcbKS0Cib2ac+IpXYrE
+BCm2lSjFCK0p/mAgK8yiNQFKnCAgsXU=
+-----END X509 CRL-----
diff --git a/reg-tests/ssl/localhost.crt-list b/reg-tests/ssl/localhost.crt-list
new file mode 100644
index 0000000..a0d9756
--- /dev/null
+++ b/reg-tests/ssl/localhost.crt-list
@@ -0,0 +1,5 @@
+common.pem !not.test1.com *.test1.com !localhost # comment
+
+
+ common.pem !not.test1.com *.test1.com !localhost
+# comment
diff --git a/reg-tests/ssl/log_forward_ssl.vtc b/reg-tests/ssl/log_forward_ssl.vtc
new file mode 100644
index 0000000..6b7515b
--- /dev/null
+++ b/reg-tests/ssl/log_forward_ssl.vtc
@@ -0,0 +1,60 @@
+varnishtest "Test the TCP+SSL load-forward"
+feature cmd "$HAPROXY_PROGRAM -cc 'version_atleast(2.3-dev1)'"
+feature cmd "$HAPROXY_PROGRAM -cc 'feature(OPENSSL)'"
+feature ignore_unknown_macro
+
+server s1 {
+ rxreq
+ txresp
+} -repeat 500 -start
+
+syslog Slg1 -level info {
+ recv
+ expect ~ "[^:\\[ ]\\[${h1_pid}\\]: .* \"GET /client_c1 HTTP/1.1\""
+} -repeat 50 -start
+
+haproxy h1 -conf {
+ global
+ insecure-fork-wanted
+ defaults
+ mode http
+ option httplog
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ frontend fe1
+ bind "fd@${fe_1}"
+ log 127.0.0.1:1514 local0
+# log ${Slg1_addr}:${Slg1_port} local0
+ default_backend be
+
+ backend be
+ server app1 ${s1_addr}:${s1_port}
+
+ ring myring
+ description "My local buffer"
+ format rfc5424
+ maxlen 1200
+ size 32764
+ timeout connect 5s
+ timeout server 10s
+ # syslog tcp server
+ server mysyslogsrv 127.0.0.1:2514 ssl verify none
+
+ log-forward syslog2tcp
+ dgram-bind 127.0.0.1:1514
+ log ring@myring local0 # To TCP log
+
+ log-forward syslog2local
+ bind 127.0.0.1:2514 ssl crt ${testdir}/common.pem
+ log ${Slg1_addr}:${Slg1_port} local0 # To VTest syslog
+} -start
+
+client c1 -connect ${h1_fe_1_sock} {
+ txreq -url "/client_c1"
+ rxresp
+ expect resp.status == 200
+} -repeat 50 -start
+
+syslog Slg1 -wait
diff --git a/reg-tests/ssl/new_del_ssl_cafile.vtc b/reg-tests/ssl/new_del_ssl_cafile.vtc
new file mode 100644
index 0000000..2123fb0
--- /dev/null
+++ b/reg-tests/ssl/new_del_ssl_cafile.vtc
@@ -0,0 +1,157 @@
+#REGTEST_TYPE=devel
+
+# This test uses the "new ssl ca-file" and "del ssl ca-file" commands to create
+# a new CA file or delete an unused CA file.
+#
+# It requires socat to upload the CA file.
+#
+# If this test does not work anymore:
+# - Check that you have socat
+
+varnishtest "Test the 'new ssl ca-file' and 'del ssl ca-file' commands of the CLI"
+feature cmd "$HAPROXY_PROGRAM -cc 'version_atleast(2.5-dev0)'"
+feature cmd "$HAPROXY_PROGRAM -cc 'feature(OPENSSL)'"
+feature cmd "command -v socat"
+feature ignore_unknown_macro
+
+server s1 -repeat 2 {
+ rxreq
+ txresp
+} -start
+
+haproxy h1 -conf {
+ global
+ tune.ssl.default-dh-param 2048
+ tune.ssl.capture-buffer-size 1
+ stats socket "${tmpdir}/h1/stats" level admin
+ crt-base ${testdir}
+
+ defaults
+ mode http
+ option httplog
+ retries 0
+ log stderr local0 debug err
+ option logasap
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ listen clear-lst
+ bind "fd@${clearlst}"
+ balance roundrobin
+ use_backend with_ca_be if { path /with-ca }
+ default_backend default_be
+
+ backend default_be
+ server s1 "${tmpdir}/ssl.sock" ssl verify none crt ${testdir}/set_cafile_client.pem sni str(www.test1.com)
+
+ backend with_ca_be
+ server s1 "${tmpdir}/ssl.sock" ssl verify none crt ${testdir}/set_cafile_client.pem sni str(with-ca.com)
+
+ listen ssl-lst
+ bind "${tmpdir}/ssl.sock" ssl strict-sni crt-list ${testdir}/localhost.crt-list ca-verify-file ${testdir}/set_cafile_rootCA.crt ca-file ${testdir}/set_cafile_interCA2.crt verify required crt-ignore-err all
+ http-response add-header X-SSL-Client-Verify %[ssl_c_verify]
+ server s1 ${s1_addr}:${s1_port}
+} -start
+
+# Request using the default backend and the www.test1.com sni
+client c1 -connect ${h1_clearlst_sock} {
+ txreq
+ rxresp
+ expect resp.status == 200
+ # The CA file known by the frontend does not allow to verify the client's certificate
+ expect resp.http.X-SSL-Client-Verify ~ "20|21"
+} -run
+
+# This connection should fail because the with-ca.com sni is not mentioned in the crt-list yet.
+client c1 -connect ${h1_clearlst_sock} {
+ txreq -url "/with-ca"
+ rxresp
+ expect resp.status == 503
+} -run
+
+# Create a new unlinked CA file
+haproxy h1 -cli {
+ send "new ssl ca-file new_cafile.crt"
+ expect ~ "New CA file created 'new_cafile.crt'!"
+}
+
+shell {
+ printf "set ssl ca-file new_cafile.crt <<\n$(cat ${testdir}/set_cafile_interCA1.crt)\n\n" | socat "${tmpdir}/h1/stats" -
+ echo "commit ssl ca-file new_cafile.crt" | socat "${tmpdir}/h1/stats" -
+}
+
+# Remove the unliked CA file and create a new one with the "add ssl ca-file method"
+
+haproxy h1 -cli {
+ send "del ssl ca-file new_cafile.crt"
+ expect ~ "CA file 'new_cafile.crt' deleted!"
+
+ send "new ssl ca-file new_cafile.crt"
+ expect ~ "New CA file created 'new_cafile.crt'!"
+}
+
+shell {
+ printf "add ssl ca-file new_cafile.crt <<\n$(cat ${testdir}/set_cafile_interCA1.crt)\n\n" | socat "${tmpdir}/h1/stats" -
+ echo "commit ssl ca-file new_cafile.crt" | socat "${tmpdir}/h1/stats" -
+}
+
+shell {
+ printf "set ssl ca-file new_cafile.crt <<\n$(cat ${testdir}/set_cafile_interCA1.crt)\n\n" | socat "${tmpdir}/h1/stats" -
+ echo "commit ssl ca-file new_cafile.crt" | socat "${tmpdir}/h1/stats" -
+}
+
+haproxy h1 -cli {
+ send "show ssl ca-file"
+ expect ~ ".*new_cafile.crt - 1 certificate.*"
+
+ send "show ssl ca-file new_cafile.crt"
+ expect ~ ".*SHA1 FingerPrint: 4FFF535278883264693CEA72C4FAD13F995D0098"
+}
+
+# The new CA file is still not linked anywhere so the request should fail.
+client c1 -connect ${h1_clearlst_sock} {
+ txreq -url "/with-ca"
+ rxresp
+ expect resp.status == 503
+} -run
+
+# Add a new certificate that will use the new CA file
+shell {
+ echo "new ssl cert ${testdir}/set_cafile_server.pem" | socat "${tmpdir}/h1/stats" -
+ printf "set ssl cert ${testdir}/set_cafile_server.pem <<\n$(cat ${testdir}/set_cafile_server.pem)\n\n" | socat "${tmpdir}/h1/stats" -
+ echo "commit ssl cert ${testdir}/set_cafile_server.pem" | socat "${tmpdir}/h1/stats" -
+}
+
+# Create a new crt-list line that will use the new CA file
+shell {
+ printf "add ssl crt-list ${testdir}/localhost.crt-list <<\n${testdir}/set_cafile_server.pem [ca-file new_cafile.crt] with-ca.com\n\n" | socat "${tmpdir}/h1/stats" -
+}
+
+client c1 -connect ${h1_clearlst_sock} {
+ txreq -url "/with-ca"
+ rxresp
+ expect resp.status == 200
+ # Thanks to the newly added CA file, the client's certificate can be verified
+ expect resp.http.X-SSL-Client-Verify == 0
+} -run
+
+# Delete the newly added crt-list line and CA file
+haproxy h1 -cli {
+ send "del ssl crt-list ${testdir}/localhost.crt-list ${testdir}/set_cafile_server.pem"
+ expect ~ "Entry '${testdir}/set_cafile_server.pem' deleted in crtlist '${testdir}/localhost.crt-list'!"
+
+ send "del ssl ca-file new_cafile.crt"
+ expect ~ "CA file 'new_cafile.crt' deleted!"
+
+ send "show ssl ca-file"
+ expect !~ "new_cafile.crt"
+}
+
+# The connection should now fail since the crt-list line was deleted
+client c1 -connect ${h1_clearlst_sock} {
+ txreq -url "/with-ca"
+ rxresp
+ expect resp.status == 503
+} -run
+
diff --git a/reg-tests/ssl/new_del_ssl_crlfile.vtc b/reg-tests/ssl/new_del_ssl_crlfile.vtc
new file mode 100644
index 0000000..8658a1a
--- /dev/null
+++ b/reg-tests/ssl/new_del_ssl_crlfile.vtc
@@ -0,0 +1,139 @@
+#REGTEST_TYPE=devel
+
+# This test uses the "new ssl crl-file" and "del ssl crl-file" commands to create
+# a new CRL file or delete an unused CRL file.
+#
+# It requires socat to upload the CRL file.
+#
+# If this test does not work anymore:
+# - Check that you have socat
+
+varnishtest "Test the 'new ssl crl-file' and 'del ssl crl-file' commands of the CLI"
+feature cmd "$HAPROXY_PROGRAM -cc 'version_atleast(2.5-dev0)'"
+feature cmd "$HAPROXY_PROGRAM -cc 'feature(OPENSSL)'"
+feature cmd "command -v socat"
+feature ignore_unknown_macro
+
+server s1 -repeat 3 {
+ rxreq
+ txresp
+} -start
+
+haproxy h1 -conf {
+ global
+ tune.ssl.default-dh-param 2048
+ tune.ssl.capture-buffer-size 1
+ stats socket "${tmpdir}/h1/stats" level admin
+ crt-base ${testdir}
+
+ defaults
+ mode http
+ option httplog
+ retries 0
+ log stderr local0 debug err
+ option logasap
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ listen clear-lst
+ bind "fd@${clearlst}"
+ balance roundrobin
+ use_backend with_crl_be if { path /with-crl }
+ default_backend default_be
+
+ backend default_be
+ server s1 "${tmpdir}/ssl.sock" ssl verify none crt ${testdir}/client3_revoked.pem sni str(www.test1.com)
+
+ backend with_crl_be
+ server s1 "${tmpdir}/ssl.sock" ssl verify none crt ${testdir}/client3_revoked.pem sni str(with-crl.com)
+
+ listen ssl-lst
+ bind "${tmpdir}/ssl.sock" ssl strict-sni crt-list ${testdir}/localhost.crt-list ca-file ${testdir}/ca-auth.crt verify required crt-ignore-err all
+ http-response add-header X-SSL-Client-Verify %[ssl_c_verify]
+ server s1 ${s1_addr}:${s1_port}
+} -start
+
+# Request using the default backend and the www.test1.com sni
+client c1 -connect ${h1_clearlst_sock} {
+ txreq
+ rxresp
+ expect resp.status == 200
+ # The backend has no CRL so the connection should succeed
+ expect resp.http.X-SSL-Client-Verify == 0
+} -run
+
+# This connection should fail because the with-crl.com sni is not mentioned in the crt-list yet.
+client c1 -connect ${h1_clearlst_sock} {
+ txreq -url "/with-crl"
+ rxresp
+ expect resp.status == 503
+} -run
+
+# Create a new unlinked CRL file
+haproxy h1 -cli {
+ send "new ssl crl-file new_crlfile.crt"
+ expect ~ "New CRL file created 'new_crlfile.crt'!"
+}
+
+shell {
+ printf "set ssl crl-file new_crlfile.crt <<\n$(cat ${testdir}/crl-auth.pem)\n\n" | socat "${tmpdir}/h1/stats" -
+ echo "commit ssl crl-file new_crlfile.crt" | socat "${tmpdir}/h1/stats" -
+}
+
+haproxy h1 -cli {
+ send "show ssl crl-file"
+ expect ~ ".*new_crlfile.crt"
+
+ send "show ssl crl-file new_crlfile.crt"
+ expect ~ ".*Issuer:.*/CN=HAProxy Technologies CA Test Client Auth"
+}
+
+# Add a new certificate that will use the new CA file
+shell {
+ echo "new ssl cert ${testdir}/set_cafile_server.pem" | socat "${tmpdir}/h1/stats" -
+ printf "set ssl cert ${testdir}/set_cafile_server.pem <<\n$(cat ${testdir}/set_cafile_server.pem)\n\n" | socat "${tmpdir}/h1/stats" -
+ echo "commit ssl cert ${testdir}/set_cafile_server.pem" | socat "${tmpdir}/h1/stats" -
+}
+
+# Create a new crt-list line that will use the new CA file
+shell {
+ printf "add ssl crt-list ${testdir}/localhost.crt-list <<\n${testdir}/set_cafile_server.pem [crl-file new_crlfile.crt] with-crl.com\n\n" | socat "${tmpdir}/h1/stats" -
+}
+
+client c1 -connect ${h1_clearlst_sock} {
+ txreq -url "/with-crl"
+ rxresp
+ expect resp.status == 200
+ # The frontend's certificate is revoked in the newly added CRL, connection should fail
+ expect resp.http.X-SSL-Client-Verify == 23
+} -run
+
+# Request using the default backend and the www.test1.com sni
+client c1 -connect ${h1_clearlst_sock} {
+ txreq
+ rxresp
+ expect resp.status == 200
+ # The backend has no CRL for this SNI so the connection should still succeed
+ expect resp.http.X-SSL-Client-Verify == 0
+} -run
+
+# Delete the newly added crt-list line and CRL file
+haproxy h1 -cli {
+ send "del ssl crt-list ${testdir}/localhost.crt-list ${testdir}/set_cafile_server.pem"
+ expect ~ "Entry '${testdir}/set_cafile_server.pem' deleted in crtlist '${testdir}/localhost.crt-list'!"
+
+ send "del ssl crl-file new_crlfile.crt"
+ expect ~ "CRL file 'new_crlfile.crt' deleted!"
+
+ send "show ssl crl-file"
+ expect !~ "new_crlfile.crt"
+}
+
+# The connection should now fail since the crt-list line was deleted
+client c1 -connect ${h1_clearlst_sock} {
+ txreq -url "/with-crl"
+ rxresp
+ expect resp.status == 503
+} -run
+
diff --git a/reg-tests/ssl/ocsp_auto_update.vtc b/reg-tests/ssl/ocsp_auto_update.vtc
new file mode 100644
index 0000000..2ab4a4a
--- /dev/null
+++ b/reg-tests/ssl/ocsp_auto_update.vtc
@@ -0,0 +1,718 @@
+#REGTEST_TYPE=slow
+
+# broken with BoringSSL.
+
+# This reg-test focuses on the OCSP response auto-update functionality. It does
+# not test the full scope of the feature because most of it is based on
+# expiration times and long delays between updates of valid OCSP responses.
+# Automatic update of valid OCSP responses loaded during init will not be
+# tested because by design, such a response would no be automatically updated
+# until init+1H.
+#
+# This test will then focus on certificates that have a specified OCSP URI but
+# no known OCSP response. For those certificates, OCSP requests are sent as
+# soon as possible by the update task.
+#
+# The ocsp responder used in all the tests will be an openssl using the
+# certificate database in ocsp_update/index.txt. It will listen on port 12346
+# which is not the same as the one specified in the certificates' OCSP URI
+# which point to port 12345. The link from port 12345 to port 12346 will be
+# ensured through HAProxy instances that will enable logs, later used as a
+# synchronization mean.
+#
+# Unfortunately some arbitrary "sleep" calls are still needed to leave some
+# time for the ocsp update task to actually process the ocsp responses and
+# reinsert them into the tree. This explains why the test's mode is set to
+# "slow".
+#
+# The fourth test case focuses on the "update ssl ocsp-response" CLI command
+# and tests two certificates that have a known OCSP response loaded during init
+# but no OCSP auto update. The only difference between the two certificates is
+# that one has a separate .issuer file while the other one has the issuer
+# certificate directly in the main .pem file.
+#
+# If this test does not work anymore:
+# - Check that you have openssl and socat
+
+varnishtest "Test the OCSP auto update feature"
+feature cmd "$HAPROXY_PROGRAM -cc 'version_atleast(2.7-dev0)'"
+feature cmd "$HAPROXY_PROGRAM -cc 'feature(OPENSSL) && !ssllib_name_startswith(BoringSSL) && openssl_version_atleast(1.1.1)'"
+feature cmd "command -v openssl && command -v socat"
+feature ignore_unknown_macro
+
+
+###################
+# #
+# FIRST TEST CASE #
+# #
+###################
+
+# No automatic update should occur in this test case since we load two already
+# valid OCSP responses during init which have a "Next Update" date really far
+# in the future. So they should only be updated after one hour.
+# This test will only be the most basic one where we check that ocsp response
+# loading still works as expected.
+
+haproxy h1 -conf {
+ global
+ tune.ssl.default-dh-param 2048
+ tune.ssl.capture-buffer-size 1
+ stats socket "${tmpdir}/h1/stats" level admin
+ crt-base ${testdir}/ocsp_update
+
+ defaults
+ mode http
+ option httplog
+ log stderr local0 debug err
+ option logasap
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ frontend ssl-fe
+ bind "${tmpdir}/ssl.sock" ssl crt multicert/server_ocsp.pem ca-file ${testdir}/set_cafile_rootCA.crt verify none crt-ignore-err all
+ http-request return status 200
+} -start
+
+
+# We should have two distinct ocsp responses known that were loaded at build time
+haproxy h1 -cli {
+ send "show ssl ocsp-response"
+ expect ~ "Certificate ID key : 303b300906052b0e03021a050004148a83e0060faff709ca7e9b95522a2e81635fda0a0414f652b0e435d5ea923851508f0adbe92d85de007a02021015"
+ send "show ssl ocsp-response"
+ expect ~ "Certificate ID key : 303b300906052b0e03021a050004148a83e0060faff709ca7e9b95522a2e81635fda0a0414f652b0e435d5ea923851508f0adbe92d85de007a02021016"
+
+ send "show ssl ocsp-response 303b300906052b0e03021a050004148a83e0060faff709ca7e9b95522a2e81635fda0a0414f652b0e435d5ea923851508f0adbe92d85de007a02021015"
+ expect ~ "Cert Status: revoked"
+
+ send "show ssl ocsp-response 303b300906052b0e03021a050004148a83e0060faff709ca7e9b95522a2e81635fda0a0414f652b0e435d5ea923851508f0adbe92d85de007a02021016"
+ expect ~ "Cert Status: good"
+}
+
+haproxy h1 -wait
+
+
+
+####################
+# #
+# SECOND TEST CASE #
+# #
+####################
+
+# This test will focus on two separate certificates that have the same OCSP uri
+# (http://ocsp.haproxy.com:12345) but no OCSP response loaded at build time.
+# The update mode is set to 'on' in the two crt-lists used. The two ocsp
+# responses should then be fetched automatically after init. We use an http
+# listener as a rebound on which http log is enabled towards Syslog_http. This
+# ensures that two requests are sent by the ocsp auto update task and it
+# enables to use a barrier to synchronize the ocsp task and the subsequent cli
+# calls. Thanks to the barrier we know that when calling "show ssl
+# ocsp-response" on the cli, the two answers should already have been received
+# and processed.
+
+process p1 "openssl ocsp -index ${testdir}/ocsp_update/index.txt -rsigner ${testdir}/ocsp_update/ocsp.haproxy.com.pem -CA ${testdir}/ocsp_update/ocsp_update_rootca.crt -nrequest 2 -ndays 1 -port 12346 -timeout 5" -start
+
+barrier b1 cond 2 -cyclic
+
+syslog Syslog_http -level info {
+ recv
+ expect ~ "GET /MEMwQTA%2FMD0wOzAJBgUrDgMCGgUABBSKg%2BAGD6%2F3Ccp%2Bm5VSKi6BY1%2FaCgQU9lKw5DXV6pI4UVCPCtvpLYXeAHoCAhAV HTTP/1.1"
+
+ recv
+ expect ~ "GET /MEMwQTA%2FMD0wOzAJBgUrDgMCGgUABBSKg%2BAGD6%2F3Ccp%2Bm5VSKi6BY1%2FaCgQU9lKw5DXV6pI4UVCPCtvpLYXeAHoCAhAW HTTP/1.1"
+
+ barrier b1 sync
+} -start
+
+haproxy h2 -conf {
+ global
+ tune.ssl.default-dh-param 2048
+ tune.ssl.capture-buffer-size 1
+ stats socket "${tmpdir}/h2/stats" level admin
+ crt-base ${testdir}/ocsp_update
+
+ defaults
+ mode http
+ option httplog
+ log stderr local0 debug err
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ frontend ssl-rsa-fe
+ bind "${tmpdir}/ssl2.sock" ssl crt-list ${testdir}/ocsp_update/multicert_rsa.crt-list ca-file ${testdir}/set_cafile_rootCA.crt verify none crt-ignore-err all
+ http-request return status 200
+
+ frontend ssl-ecdsa-fe
+ bind "${tmpdir}/ssl3.sock" ssl crt-list ${testdir}/ocsp_update/multicert_ecdsa.crt-list ca-file ${testdir}/set_cafile_rootCA.crt verify none crt-ignore-err all
+ http-request return status 200
+
+ listen http_rebound_lst
+ mode http
+ option httplog
+ log ${Syslog_http_addr}:${Syslog_http_port} local0
+ bind "127.0.0.1:12345"
+ server s1 "127.0.0.1:12346"
+} -start
+
+barrier b1 sync
+
+shell "sleep 1"
+
+# We should have two distinct ocsp IDs known that were loaded at build time and
+# the responses' contents should have been filled automatically by the ocsp
+# update task after init
+haproxy h2 -cli {
+ send "show ssl ocsp-response"
+ expect ~ "Certificate ID key : 303b300906052b0e03021a050004148a83e0060faff709ca7e9b95522a2e81635fda0a0414f652b0e435d5ea923851508f0adbe92d85de007a02021015"
+ send "show ssl ocsp-response"
+ expect ~ "Certificate ID key : 303b300906052b0e03021a050004148a83e0060faff709ca7e9b95522a2e81635fda0a0414f652b0e435d5ea923851508f0adbe92d85de007a02021016"
+
+ send "show ssl ocsp-response 303b300906052b0e03021a050004148a83e0060faff709ca7e9b95522a2e81635fda0a0414f652b0e435d5ea923851508f0adbe92d85de007a02021015"
+ expect ~ "Cert Status: revoked"
+
+ send "show ssl ocsp-response 303b300906052b0e03021a050004148a83e0060faff709ca7e9b95522a2e81635fda0a0414f652b0e435d5ea923851508f0adbe92d85de007a02021016"
+ expect ~ "Cert Status: revoked"
+}
+
+haproxy h2 -wait
+process p1 -wait -expect-exit 0
+
+
+###################
+# #
+# THIRD TEST CASE #
+# #
+###################
+
+# This test will be roughly the same as the second one but one of the crt-lists
+# will not enable ocsp-update on its certificate. Only one request should then
+# be sent.
+
+process p2 "openssl ocsp -index ${testdir}/ocsp_update/index.txt -rsigner ${testdir}/ocsp_update/ocsp.haproxy.com.pem -CA ${testdir}/ocsp_update/ocsp_update_rootca.crt -nrequest 1 -ndays 1 -port 12346 -timeout 5" -start
+
+barrier b2 cond 2 -cyclic
+
+syslog Syslog_http2 -level info {
+ recv
+ expect ~ "GET /MEMwQTA%2FMD0wOzAJBgUrDgMCGgUABBSKg%2BAGD6%2F3Ccp%2Bm5VSKi6BY1%2FaCgQU9lKw5DXV6pI4UVCPCtvpLYXeAHoCAhAV HTTP/1.1"
+
+ barrier b2 sync
+} -start
+
+haproxy h3 -conf {
+ global
+ tune.ssl.default-dh-param 2048
+ tune.ssl.capture-buffer-size 1
+ stats socket "${tmpdir}/h3/stats" level admin
+ crt-base ${testdir}/ocsp_update
+
+ defaults
+ mode http
+ option httplog
+ log stderr local0 debug err
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ frontend ssl-rsa-fe
+ bind "${tmpdir}/ssl4.sock" ssl crt-list ${testdir}/ocsp_update/multicert_rsa.crt-list ca-file ${testdir}/set_cafile_rootCA.crt verify none crt-ignore-err all
+ http-request return status 200
+
+ frontend ssl-ecdsa-fe
+ bind "${tmpdir}/ssl5.sock" ssl crt-list ${testdir}/ocsp_update/multicert_ecdsa_no_update.crt-list ca-file ${testdir}/set_cafile_rootCA.crt verify none crt-ignore-err all
+ http-request return status 200
+
+ listen http_rebound_lst
+ mode http
+ option httplog
+ log ${Syslog_http2_addr}:${Syslog_http2_port} local0
+ bind "127.0.0.1:12345"
+ server s1 "127.0.0.1:12346"
+} -start
+
+barrier b2 sync
+
+shell "sleep 1"
+
+# We should have a single ocsp ID known that was loaded at build time and the
+# response should be filled
+haproxy h3 -cli {
+ send "show ssl ocsp-response"
+ expect ~ "Certificate ID key : 303b300906052b0e03021a050004148a83e0060faff709ca7e9b95522a2e81635fda0a0414f652b0e435d5ea923851508f0adbe92d85de007a02021015"
+ send "show ssl ocsp-response"
+ expect !~ "Certificate ID key : 303b300906052b0e03021a050004148a83e0060faff709ca7e9b95522a2e81635fda0a0414f652b0e435d5ea923851508f0adbe92d85de007a02021016"
+
+ send "show ssl ocsp-response 303b300906052b0e03021a050004148a83e0060faff709ca7e9b95522a2e81635fda0a0414f652b0e435d5ea923851508f0adbe92d85de007a02021015"
+ expect ~ "Cert Status: revoked"
+}
+
+haproxy h3 -wait
+process p2 -wait
+
+
+
+####################
+# #
+# FOURTH TEST CASE #
+# (CLI COMMAND) #
+# #
+####################
+
+process p3 "openssl ocsp -index ${testdir}/ocsp_update/index.txt -rsigner ${testdir}/ocsp_update/ocsp.haproxy.com.pem -CA ${testdir}/ocsp_update/ocsp_update_rootca.crt -nrequest 2 -ndays 1 -port 12346 -timeout 5" -start
+
+haproxy h4 -conf {
+ global
+ tune.ssl.default-dh-param 2048
+ tune.ssl.capture-buffer-size 1
+ stats socket "${tmpdir}/h4/stats" level admin
+ crt-base ${testdir}/ocsp_update
+
+ defaults
+ mode http
+ option httplog
+ log stderr local0 debug err
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ frontend ssl-rsa-ocsp
+ bind "${tmpdir}/ssl5.sock" ssl crt ${testdir}/ocsp_update/multicert/server_ocsp.pem.rsa ca-file ${testdir}/set_cafile_rootCA.crt verify none crt-ignore-err all
+ http-request return status 200
+
+ frontend ssl-ecdsa-ocsp
+ bind "${tmpdir}/ssl6.sock" ssl crt ${testdir}/ocsp_update/multicert/server_ocsp_ecdsa.pem ca-file ${testdir}/set_cafile_rootCA.crt verify none crt-ignore-err all
+ http-request return status 200
+
+ listen http_rebound_lst
+ mode http
+ option httplog
+ bind "127.0.0.1:12345"
+ http-response set-var(proc.processed) int(1)
+ server s1 "127.0.0.1:12346"
+} -start
+
+# We need to "enable" the cli with a first cli call before using it only through socats
+haproxy h4 -cli {
+ send "show ssl ocsp-response"
+ expect ~ "Certificate ID key : 303b300906052b0e03021a050004148a83e0060faff709ca7e9b95522a2e81635fda0a0414f652b0e435d5ea923851508f0adbe92d85de007a02021016"
+}
+
+# We should have two OCSP responses loaded during init
+shell {
+ responses=$(echo "show ssl ocsp-response" | socat "${tmpdir}/h4/stats" -)
+
+ [ $(echo "$responses" | grep -c "^Certificate ID key") -eq 2 ] && \
+ echo "$responses" | grep "Serial Number: 1016" && \
+ echo "$responses" | grep "Serial Number: 1015"
+}
+
+shell {
+ echo "show ssl ocsp-response 303b300906052b0e03021a050004148a83e0060faff709ca7e9b95522a2e81635fda0a0414f652b0e435d5ea923851508f0adbe92d85de007a02021015" | socat "${tmpdir}/h4/stats" - | grep "Cert Status: revoked"
+}
+
+shell {
+ echo "show ssl ocsp-response 303b300906052b0e03021a050004148a83e0060faff709ca7e9b95522a2e81635fda0a0414f652b0e435d5ea923851508f0adbe92d85de007a02021016" | socat "${tmpdir}/h4/stats" - | grep "Cert Status: good"
+}
+
+# Update the first ocsp response (ckch_data has a non-NULL ocsp_issuer pointer)
+shell {
+ # Store the current "Produced At" in order to ensure that after the update
+ # the OCSP response actually changed
+ produced_at=$(echo "show ssl ocsp-response 303b300906052b0e03021a050004148a83e0060faff709ca7e9b95522a2e81635fda0a0414f652b0e435d5ea923851508f0adbe92d85de007a02021015" | socat "${tmpdir}/h4/stats" - | grep "Produced At")
+
+ echo "update ssl ocsp-response ${testdir}/ocsp_update/multicert/server_ocsp.pem.rsa" | socat "${tmpdir}/h4/stats" -
+ while ! echo "get var proc.processed" | socat "${tmpdir}/h4/stats" - | grep 'proc.processed: type=sint value=<1>'
+ do
+ echo "get var proc.processed" | socat "${tmpdir}/h4/stats" - >> /tmp/toto
+ sleep 0.5
+ done
+
+ echo "experimental-mode on;set var proc.processed int(0)" | socat "${tmpdir}/h4/stats" -
+
+ ocsp_response=$(echo "show ssl ocsp-response 303b300906052b0e03021a050004148a83e0060faff709ca7e9b95522a2e81635fda0a0414f652b0e435d5ea923851508f0adbe92d85de007a02021015" | socat "${tmpdir}/h4/stats" -)
+ new_produced_at=$(echo "$ocsp_response" | grep "Produced At")
+
+ echo "$ocsp_response" | grep -q "Serial Number: 1015" && \
+ echo "$ocsp_response" | grep -q "Cert Status: revoked" && \
+ [ "$new_produced_at" != "$produced_at" ]
+}
+
+# Update the second ocsp response (ckch_data has a NULL ocsp_issuer pointer)
+shell {
+ # Store the current "Produced At" in order to ensure that after the update
+ # the OCSP response actually changed
+ produced_at=$(echo "show ssl ocsp-response 303b300906052b0e03021a050004148a83e0060faff709ca7e9b95522a2e81635fda0a0414f652b0e435d5ea923851508f0adbe92d85de007a02021016" | socat "${tmpdir}/h4/stats" - | grep "Produced At")
+
+ echo "update ssl ocsp-response ${testdir}/ocsp_update/multicert/server_ocsp_ecdsa.pem" | socat "${tmpdir}/h4/stats" -
+ while ! echo "get var proc.processed" | socat "${tmpdir}/h4/stats" - | grep 'proc.processed: type=sint value=<1>'
+ do
+ echo "get var proc.processed" | socat "${tmpdir}/h4/stats" - >> /tmp/toto
+ sleep 0.5
+ done
+
+ echo "experimental-mode on;set var proc.processed int(0)" | socat "${tmpdir}/h4/stats" -
+
+ ocsp_response=$(echo "show ssl ocsp-response 303b300906052b0e03021a050004148a83e0060faff709ca7e9b95522a2e81635fda0a0414f652b0e435d5ea923851508f0adbe92d85de007a02021016" | socat "${tmpdir}/h4/stats" -)
+ new_produced_at=$(echo "$ocsp_response" | grep "Produced At")
+
+ echo "$ocsp_response" | grep -q "Serial Number: 1016" && \
+ echo "$ocsp_response" | grep -q "Cert Status: revoked" && \
+ [ "$new_produced_at" != "$produced_at" ]
+}
+
+haproxy h4 -wait
+process p3 -wait
+
+
+####################
+# #
+# FIFTH TEST CASE #
+# (CLI COMMAND) #
+# #
+####################
+
+# Test the "show ssl ocsp-updates" command as well as the new 'base64' parameter
+# to the "show ssl ocsp-response" command.
+
+
+process p5 "openssl ocsp -index ${testdir}/ocsp_update/index.txt -rsigner ${testdir}/ocsp_update/ocsp.haproxy.com.pem -CA ${testdir}/ocsp_update/ocsp_update_rootca.crt -nrequest 2 -ndays 1 -port 12346 -timeout 5" -start
+
+barrier b5 cond 2 -cyclic
+
+syslog Syslog_http5 -level info {
+ recv
+ expect ~ "GET /MEMwQTA%2FMD0wOzAJBgUrDgMCGgUABBSKg%2BAGD6%2F3Ccp%2Bm5VSKi6BY1%2FaCgQU9lKw5DXV6pI4UVCPCtvpLYXeAHoCAhAV HTTP/1.1"
+
+ recv
+ expect ~ "GET /MEMwQTA%2FMD0wOzAJBgUrDgMCGgUABBSKg%2BAGD6%2F3Ccp%2Bm5VSKi6BY1%2FaCgQU9lKw5DXV6pI4UVCPCtvpLYXeAHoCAhAW HTTP/1.1"
+
+ barrier b5 sync
+} -start
+
+haproxy h5 -conf {
+ global
+ tune.ssl.default-dh-param 2048
+ tune.ssl.capture-buffer-size 1
+ stats socket "${tmpdir}/h5/stats" level admin
+ crt-base ${testdir}/ocsp_update
+
+ defaults
+ mode http
+ option httplog
+ log stderr local0 debug err
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ frontend ssl-rsa-fe
+ bind "${tmpdir}/ssl7.sock" ssl crt-list ${testdir}/ocsp_update/multicert_rsa.crt-list ca-file ${testdir}/set_cafile_rootCA.crt verify none crt-ignore-err all
+ http-request return status 200
+
+ frontend ssl-ecdsa-fe
+ bind "${tmpdir}/ssl8.sock" ssl crt-list ${testdir}/ocsp_update/multicert_ecdsa.crt-list ca-file ${testdir}/set_cafile_rootCA.crt verify none crt-ignore-err all
+ http-request return status 200
+
+ listen http_rebound_lst
+ mode http
+ option httplog
+ log ${Syslog_http5_addr}:${Syslog_http5_port} local0
+ bind "127.0.0.1:12345"
+ server s1 "127.0.0.1:12346"
+} -start
+
+barrier b5 sync
+
+shell "sleep 1"
+
+# Use "show ssl ocsp-updates" CLI command
+# We should have one line per OCSP response and each one of them should have been successfully updated once
+# The command's output follows this format:
+# OCSP Certid | Next Update | Last Update | Successes | Failures | Last Update Status | Last Update Status (str)
+haproxy h5 -cli {
+ send "show ssl ocsp-updates"
+ expect ~ "303b300906052b0e03021a050004148a83e0060faff709ca7e9b95522a2e81635fda0a0414f652b0e435d5ea923851508f0adbe92d85de007a02021015 .*| 1 | 0 | 1 | Update successful"
+
+ send "show ssl ocsp-updates"
+ expect ~ "303b300906052b0e03021a050004148a83e0060faff709ca7e9b95522a2e81635fda0a0414f652b0e435d5ea923851508f0adbe92d85de007a02021016 .*| 1 | 0 | 1 | Update successful"
+}
+
+# Use "show ssl ocsp-response" command to dump an OCSP response in base64
+shell {
+ ocsp_resp_file="${tmpdir}.ocsp_resp.der"
+
+ echo "show ssl ocsp-response base64 303b300906052b0e03021a050004148a83e0060faff709ca7e9b95522a2e81635fda0a0414f652b0e435d5ea923851508f0adbe92d85de007a02021015" | socat "${tmpdir}/h5/stats" - | base64 -d > $ocsp_resp_file
+
+ if [ $? -eq 0 ]
+ then
+ ocsp_resp_txt="$(openssl ocsp -respin $ocsp_resp_file -noverify -text)"
+ echo "$ocsp_resp_txt" | grep "Issuer Name Hash: 8A83E0060FAFF709CA7E9B95522A2E81635FDA0A" && \
+ echo "$ocsp_resp_txt" | grep "Issuer Key Hash: F652B0E435D5EA923851508F0ADBE92D85DE007A" && \
+ echo "$ocsp_resp_txt" | grep "Serial Number: 1015" && \
+ echo "$ocsp_resp_txt" | grep "Cert Status: revoked"
+ else
+ return 1
+ fi
+}
+
+haproxy h5 -wait
+process p5 -wait
+
+
+####################
+# #
+# SIXTH TEST CASE #
+# #
+####################
+
+# Check that a new certificate added via the CLI to a crt-list with
+# the 'ocsp-update on' option will be taken into account by the OCSP
+# auto update task
+#
+process p6 "openssl ocsp -index ${testdir}/ocsp_update/index.txt -rsigner ${testdir}/ocsp_update/ocsp.haproxy.com.pem -CA ${testdir}/ocsp_update/ocsp_update_rootca.crt -nrequest 1 -ndays 1 -port 12346 -timeout 5" -start
+
+barrier b6 cond 2 -cyclic
+
+syslog Syslog_http6 -level info {
+ recv
+ expect ~ "GET /MEMwQTA%2FMD0wOzAJBgUrDgMCGgUABBSKg%2BAGD6%2F3Ccp%2Bm5VSKi6BY1%2FaCgQU9lKw5DXV6pI4UVCPCtvpLYXeAHoCAhAV HTTP/1.1"
+
+ barrier b6 sync
+} -start
+
+haproxy h6 -conf {
+ global
+ tune.ssl.default-dh-param 2048
+ tune.ssl.capture-buffer-size 1
+ stats socket "${tmpdir}/h6/stats" level admin
+ crt-base ${testdir}
+
+ defaults
+ mode http
+ option httplog
+ log stderr local0 debug err
+ option logasap
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ frontend ssl-fe
+ bind "${tmpdir}/ssl9.sock" ssl crt-list ${testdir}/simple.crt-list ca-file ${testdir}/set_cafile_rootCA.crt verify none crt-ignore-err all
+ http-request return status 200
+
+ listen http_rebound_lst
+ mode http
+ option httplog
+ log ${Syslog_http6_addr}:${Syslog_http6_port} local0
+ bind "127.0.0.1:12345"
+ server s1 "127.0.0.1:12346"
+} -start
+
+# We need to "enable" the cli with a first cli call before using it only through socats
+haproxy h6 -cli {
+ send "show ssl cert"
+ expect ~ ""
+}
+
+# Create a new certificate that has an OCSP uri and add it to the
+# existing CLI with the 'ocsp-update on' command.
+shell {
+ echo "new ssl cert ${testdir}/ocsp_update/multicert/server_ocsp.pem.rsa" | socat "${tmpdir}/h6/stats" -
+ printf "set ssl cert ${testdir}/ocsp_update/multicert/server_ocsp.pem.rsa <<\n$(cat ${testdir}/ocsp_update/multicert/server_ocsp.pem.rsa)\n\n" | socat "${tmpdir}/h6/stats" -
+ printf "set ssl cert ${testdir}/ocsp_update/multicert/server_ocsp.pem.rsa.issuer <<\n$(cat ${testdir}/ocsp_update/multicert/server_ocsp.pem.rsa.issuer)\n\n" | socat "${tmpdir}/h6/stats" -
+ echo "commit ssl cert ${testdir}/ocsp_update/multicert/server_ocsp.pem.rsa" | socat "${tmpdir}/h6/stats" -
+
+ printf "add ssl crt-list ${testdir}/simple.crt-list <<\n${testdir}/ocsp_update/multicert/server_ocsp.pem.rsa [ocsp-update on] foo.com\n\n" | socat "${tmpdir}/h6/stats" -
+}
+
+barrier b6 sync
+
+shell "sleep 1"
+
+haproxy h6 -cli {
+ send "show ssl ocsp-updates"
+ expect ~ "303b300906052b0e03021a050004148a83e0060faff709ca7e9b95522a2e81635fda0a0414f652b0e435d5ea923851508f0adbe92d85de007a02021016 .*| 1 | 0 | 1 | Update successful"
+}
+
+haproxy h6 -wait
+process p6 -wait
+
+
+######################
+# #
+# SEVENTH TEST CASE #
+# #
+######################
+
+#
+# Check that removing crt-list instances does not remove the OCSP responses
+# from the tree but that they will not be auto updated anymore if the last
+# instance is removed (via del ssl crt-list).
+#
+
+haproxy h7 -conf {
+ global
+ tune.ssl.default-dh-param 2048
+ tune.ssl.capture-buffer-size 1
+ stats socket "${tmpdir}/h7/stats" level admin
+ crt-base ${testdir}/ocsp_update
+
+ defaults
+ mode http
+ option httplog
+ log stderr local0 debug err
+ option logasap
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ frontend ssl-fe
+ bind "${tmpdir}/ssl-h7.sock" ssl crt-list ${testdir}/ocsp_update/multicert_both_certs.crt-list ca-file ${testdir}/set_cafile_rootCA.crt verify none crt-ignore-err all
+ http-request return status 200
+
+ listen http_rebound_lst
+ mode http
+ bind "127.0.0.1:12345"
+ server s1 "127.0.0.1:12346"
+} -start
+
+# Check that the two certificates are taken into account in the auto update process
+haproxy h7 -cli {
+ send "show ssl ocsp-updates"
+ expect ~ "303b300906052b0e03021a050004148a83e0060faff709ca7e9b95522a2e81635fda0a0414f652b0e435d5ea923851508f0adbe92d85de007a02021015 .*"
+
+ send "show ssl ocsp-updates"
+ expect ~ "303b300906052b0e03021a050004148a83e0060faff709ca7e9b95522a2e81635fda0a0414f652b0e435d5ea923851508f0adbe92d85de007a02021016 .*"
+}
+
+# Remove the second line from the crt-list and check that the corresponding
+# ocsp response was removed from the auto update list but is still present in the
+# system
+haproxy h7 -cli {
+ send "del ssl crt-list ${testdir}/ocsp_update/multicert_both_certs.crt-list ${testdir}/ocsp_update/multicert/server_ocsp.pem.ecdsa"
+ expect ~ "Entry.*deleted in crtlist"
+
+ send "show ssl ocsp-updates"
+ expect !~ "303b300906052b0e03021a050004148a83e0060faff709ca7e9b95522a2e81635fda0a0414f652b0e435d5ea923851508f0adbe92d85de007a02021016 .*"
+
+ send "show ssl ocsp-response"
+ expect ~ "Certificate ID key : 303b300906052b0e03021a050004148a83e0060faff709ca7e9b95522a2e81635fda0a0414f652b0e435d5ea923851508f0adbe92d85de007a02021016"
+
+ send "show ssl ocsp-response ${testdir}/ocsp_update/multicert/server_ocsp.pem.ecdsa"
+ expect ~ ".* Cert Status: good.*"
+}
+
+# Add the previously removed crt-list line with auto-update enabled and check that
+# the ocsp response appears in the auto update list
+shell {
+ printf "add ssl crt-list ${testdir}/ocsp_update/multicert_both_certs.crt-list <<\nmulticert/server_ocsp.pem.ecdsa [ocsp-update on] foo.bar\n\n" | socat "${tmpdir}/h7/stats" - | grep "Inserting certificate.*in crt-list"
+}
+
+haproxy h7 -cli {
+ send "show ssl ocsp-updates"
+ expect ~ "303b300906052b0e03021a050004148a83e0060faff709ca7e9b95522a2e81635fda0a0414f652b0e435d5ea923851508f0adbe92d85de007a02021016 .*"
+}
+
+# Check that the auto update option consistency check work even when crt-list
+# lines are added through the cli
+shell {
+ printf "add ssl crt-list ${testdir}/ocsp_update/multicert_both_certs.crt-list <<\nmulticert/server_ocsp.pem.ecdsa foo.foo\n\n" | socat "${tmpdir}/h7/stats" - | grep "Incompatibilities found in OCSP update mode for certificate"
+}
+
+haproxy h7 -wait
+
+####################
+# #
+# EIGTH TEST CASE #
+# #
+####################
+
+#
+# Check that a certificate created through the CLI and which does not have ocsp
+# update enabled can be updated via "update ssl ocsp-response" command.
+#
+
+process p8 "openssl ocsp -index ${testdir}/ocsp_update/index.txt -rsigner ${testdir}/ocsp_update/ocsp.haproxy.com.pem -CA ${testdir}/ocsp_update/ocsp_update_rootca.crt -nrequest 1 -ndays 1 -port 12346 -timeout 5" -start
+
+barrier b8 cond 2 -cyclic
+
+syslog Syslog_h8 -level info {
+ recv
+ expect ~ "GET /MEMwQTA%2FMD0wOzAJBgUrDgMCGgUABBSKg%2BAGD6%2F3Ccp%2Bm5VSKi6BY1%2FaCgQU9lKw5DXV6pI4UVCPCtvpLYXeAHoCAhAV HTTP/1.1"
+
+ barrier b8 sync
+} -start
+
+
+haproxy h8 -conf {
+ global
+ tune.ssl.default-dh-param 2048
+ tune.ssl.capture-buffer-size 1
+ stats socket "${tmpdir}/h8/stats" level admin
+ crt-base ${testdir}/ocsp_update
+
+ defaults
+ mode http
+ option httplog
+ log stderr local0 debug err
+ option logasap
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ frontend ssl-fe
+ bind "${tmpdir}/ssl-h8.sock" ssl crt-list ${testdir}/ocsp_update/multicert_ecdsa_no_update.crt-list ca-file ${testdir}/set_cafile_rootCA.crt verify none crt-ignore-err all
+ http-request return status 200
+
+ listen http_rebound_lst
+ mode http
+ option httplog
+ log ${Syslog_h8_addr}:${Syslog_h8_port} local0
+ bind "127.0.0.1:12345"
+ server s1 "127.0.0.1:12346"
+} -start
+
+# We need to "enable" the cli with a first cli call before using it only through socats
+haproxy h8 -cli {
+ send "show ssl cert"
+ expect ~ ""
+}
+
+# Create a new certificate and add it in the crt-list with ocsp auto-update enabled
+shell {
+ echo "new ssl cert ${testdir}/ocsp_update/rsa.pem" | socat "${tmpdir}/h8/stats" -
+ printf "set ssl cert ${testdir}/ocsp_update/rsa.pem <<\n$(cat ${testdir}/ocsp_update/multicert/server_ocsp.pem.rsa)\n\n" | socat "${tmpdir}/h8/stats" -
+ printf "set ssl cert ${testdir}/ocsp_update/rsa.pem.issuer <<\n$(cat ${testdir}/ocsp_update/ocsp_update_rootca.crt)\n\n" | socat "${tmpdir}/h8/stats" -
+ printf "set ssl cert ${testdir}/ocsp_update/rsa.pem.ocsp <<\n$(base64 -w 1000 ${testdir}/ocsp_update/multicert/server_ocsp.pem.rsa.ocsp)\n\n" | socat "${tmpdir}/h8/stats" -
+ echo "commit ssl cert ${testdir}/ocsp_update/rsa.pem" | socat "${tmpdir}/h8/stats" -
+
+ printf "add ssl crt-list ${testdir}/ocsp_update/multicert_ecdsa_no_update.crt-list <<\nrsa.pem [ocsp-update off] foo.bar\n\n" | socat "${tmpdir}/h8/stats" -
+}
+
+# Check that the line is in the crt-list
+haproxy h8 -cli {
+ send "show ssl crt-list ${testdir}/ocsp_update/multicert_ecdsa_no_update.crt-list"
+ expect ~ "${testdir}/ocsp_update/rsa.pem .* foo.bar"
+}
+
+# Check that the new certificate is NOT in the auto update list
+haproxy h8 -cli {
+ send "show ssl ocsp-updates"
+ expect !~ "303b300906052b0e03021a050004148a83e0060faff709ca7e9b95522a2e81635fda0a0414f652b0e435d5ea923851508f0adbe92d85de007a02021015.*"
+}
+
+shell {
+ echo "update ssl ocsp-response ${testdir}/ocsp_update/rsa.pem" | socat "${tmpdir}/h8/stats" -
+}
+
+shell "sleep 1"
+
+barrier b8 sync
+
+haproxy h8 -cli {
+ send "show ssl ocsp-response ${testdir}/ocsp_update/rsa.pem"
+ expect ~ ".* Cert Status: revoked.*"
+}
+
+haproxy h8 -wait
+process p8 -wait
diff --git a/reg-tests/ssl/ocsp_update/index.txt b/reg-tests/ssl/ocsp_update/index.txt
new file mode 100644
index 0000000..111ea47
--- /dev/null
+++ b/reg-tests/ssl/ocsp_update/index.txt
@@ -0,0 +1,2 @@
+R 20500410103904Z 221123104541Z 1015 unknown /C=FR/O=HAProxy Technologies/CN=rsa.haproxy.com
+R 20500410103956Z 221123104430Z 1016 unknown /C=FR/O=HAProxy Technologies/CN=ecdsa.haproxy.com
diff --git a/reg-tests/ssl/ocsp_update/multicert/server_ocsp.pem.ecdsa b/reg-tests/ssl/ocsp_update/multicert/server_ocsp.pem.ecdsa
new file mode 100644
index 0000000..a04fd2e
--- /dev/null
+++ b/reg-tests/ssl/ocsp_update/multicert/server_ocsp.pem.ecdsa
@@ -0,0 +1,33 @@
+-----BEGIN CERTIFICATE-----
+MIIEODCCAiCgAwIBAgICEBYwDQYJKoZIhvcNAQELBQAwPjELMAkGA1UEBhMCRlIx
+HTAbBgNVBAoMFEhBUHJveHkgVGVjaG5vbG9naWVzMRAwDgYDVQQDDAdSb290IENB
+MCAXDTIyMTEyMzEwMzk1NloYDzIwNTAwNDEwMTAzOTU2WjBIMQswCQYDVQQGEwJG
+UjEdMBsGA1UECgwUSEFQcm94eSBUZWNobm9sb2dpZXMxGjAYBgNVBAMMEWVjZHNh
+LmhhcHJveHkuY29tMIGbMBAGByqGSM49AgEGBSuBBAAjA4GGAAQB5Id0dJy6Vubt
+/ICfwLOOwgvyeOHOvC/yrqU/NCBNDVZLcOXbncm8Lxzl9Rn2t0VV9pla82/Qlexu
+2jhx8LD3du8AmEn/4tkJMz85Jv4TN/eY7Tsfbqy2NtX17eBWkDA/S1v+9uw9m7UJ
+mzwHIkQHi4S+flXt2ZtQKwgmYcuFYsP6jSGjgbswgbgwMgYIKwYBBQUHAQEEJjAk
+MCIGCCsGAQUFBzABhhZodHRwOi8vMTI3LjAuMC4xOjEyMzQ1MB0GA1UdDgQWBBTS
+Tdzvp9SeMDDfWVNdLPzVCaE/oDBjBgNVHSMEXDBaoUKkQDA+MQswCQYDVQQGEwJG
+UjEdMBsGA1UECgwUSEFQcm94eSBUZWNobm9sb2dpZXMxEDAOBgNVBAMMB1Jvb3Qg
+Q0GCFB4L4lCTIAmZTjzoVXNPaWeDYX8XMA0GCSqGSIb3DQEBCwUAA4ICAQBsoRvT
+LPipFUSvGWWFphrqhri40e6GEKio2RNrHSwq6PBPd+FAjIan1yoZX3C/I/octhoq
+/jHAlCB5GQzU3R3M/gaCyDk4x3wbR52zSNzgyh464B7HwlNyC9jCeh3yB8ylUZCu
+Lc8NRTYavceUoDq2ebO8wpWX0LBd0oh7hMcQzWQrmU1B0NYVsTn65Ogcfokz2r0M
+A3YjwT8vH9i9QFx1Fxy4OYJJQmskKrwAQ+MEtyBJvck2nthZA7KNX+OxuJjOh+lW
++WpTudaoMUd188zHFFjeM4C40uPsePlf1gpdjuTdir1sIH8GNa9XP1wEtvD6mNFU
+6KCFSuZSkBqo2iD6yYzsd1H2DSMVQL67ATP8zSMjEccDYwkO72BR3InxWDFnFEQN
+wosdBFKqqKNKkkdSW1QUsVd90Bi5pHFW0l4FaDk2SJRfzwa1Dc+LfQv9Wf+LcENW
+6HOjqcRdU1PU1evVmq5xoHRDovQGNCStfwX3eW+jnHFYqovg51g5pEPEsmQccJXj
+DMCGoQjM+4i+R0GhyJZ/Kr2Lnj5RyT6RVK8hNCx5NjJBK5z/pJK9pbPGoS9fkK8N
+iQvPgw2+Y3rcVKHUw2epz/2mEzDb4rRiSIOIeuHB4PBL41jUNPwSxkjtjkPwVMuU
+TlD6A5wDj3Sq0B4MoxWgIOyWENABvGl+VBtDNQ==
+-----END CERTIFICATE-----
+-----BEGIN PRIVATE KEY-----
+MIHuAgEAMBAGByqGSM49AgEGBSuBBAAjBIHWMIHTAgEBBEIBkWJB8IW867HHc2iB
+7J714zyea0hVD1Z/MEuEyKRZ7aekbjEQKmUfc5MLlQS0nedCqmiLuXObG/PyxxWs
+mWTeH5qhgYkDgYYABAHkh3R0nLpW5u38gJ/As47CC/J44c68L/KupT80IE0NVktw
+5dudybwvHOX1Gfa3RVX2mVrzb9CV7G7aOHHwsPd27wCYSf/i2QkzPzkm/hM395jt
+Ox9urLY21fXt4FaQMD9LW/727D2btQmbPAciRAeLhL5+Ve3Zm1ArCCZhy4Viw/qN
+IQ==
+-----END PRIVATE KEY-----
diff --git a/reg-tests/ssl/ocsp_update/multicert/server_ocsp.pem.ecdsa.issuer b/reg-tests/ssl/ocsp_update/multicert/server_ocsp.pem.ecdsa.issuer
new file mode 100644
index 0000000..bed2061
--- /dev/null
+++ b/reg-tests/ssl/ocsp_update/multicert/server_ocsp.pem.ecdsa.issuer
@@ -0,0 +1,30 @@
+-----BEGIN CERTIFICATE-----
+MIIFGjCCAwKgAwIBAgIUHgviUJMgCZlOPOhVc09pZ4NhfxcwDQYJKoZIhvcNAQEL
+BQAwPjELMAkGA1UEBhMCRlIxHTAbBgNVBAoMFEhBUHJveHkgVGVjaG5vbG9naWVz
+MRAwDgYDVQQDDAdSb290IENBMB4XDTIxMDQyMjE0MDEyMFoXDTQ4MDkwNzE0MDEy
+MFowPjELMAkGA1UEBhMCRlIxHTAbBgNVBAoMFEhBUHJveHkgVGVjaG5vbG9naWVz
+MRAwDgYDVQQDDAdSb290IENBMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKC
+AgEAti+5onUeFJNyF5s6xlnBxDnFhw7Q5VbBestHeQttjBWN31zq5yaf/+CYXdu+
+lY6gNZj6JBiFJ5P7VXX3DqUIJBX6byXWfIUWM+auBAMKlTz0+hWrF/UxI/3uG67N
++Z6NVffEPYbA4Emqozr0DIicWorRyHnrhEQQP87xBCUboUr3QEkNngfiJ0fPm3fj
+7HfQemGL2OnTA8qdy0q1l4aUhVr9bgedP2Klvs0XhbszCGLI0Gq5lyNadlH1MEiw
+SXa9rklE6NCNcyamO7Wt8LVrg6pxopa7oGnkLbnjzSuE+xsN0isOLaHH5LfYg6gT
+aAHpnBHiWuDZQIyzKc+Z37gNksd46/y9B+oBZoCTcYMOsn7PK+gPzTbu3ic4L9hO
+WCsTV0tn+qUGj6/J98gRgvuvZGA7NPDKNZU5p34oyApBPBUOgpn6pCuT5NlkPYAe
+Rp/ypiy5NCHp0JW3JWkJ4+wEasZM34TZUYrOsicA0GV4ZVkoQ3WYyAjmLvRXmo/w
+Z3sSlmHvCg9MrQ9pk24+OtvCbii0bb/Zmlx0Y4lU5TogcuJffJDVbj7oxTc2gRmI
+SIZsnYLv2qVoeBoMY5otj+ef0Y8v98mKCbiWe2MzBkC2h5wmwyWedez8RysTaFHS
+Z4yOYoCsEAtCxnib9d5fXf0+6aOuFtKMknkuWbYj6En647ECAwEAAaMQMA4wDAYD
+VR0TBAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAgEAjVzxHzq/87uj24It5hYj4mq4
+ero0zix4fA4tJNuTpZ/5r7GUYaf/uT4xfDilBX2fGMsxVTxJC25KzhdFeTzg1Tde
+/N0LAeLWHfe6jR/P5XDATD0ZA73DQALOxRM5uRMeWJDVaUeco/aXsdQaCz2STDI3
+h7VVFoaOlmxQW3BBEvg2VUp9DS2UjqqdwsUDtzwKfrmj/FqyBvGrvNeIMv28HCu7
+r1WE1Z0UEJhpc1BPbu7F/vl60gRF3bQjh2tL8pWThxTJe6Qy+pLoSShyi85AM9XK
+scCmUtQWjy7KQDL8XVFvuCWvMzknZQjJcncbKddPaaSIDkKUpz9FDv+wSJj/LKf7
+bGSFPM6sblioLbLNJByRYI8G7VHvKDbUnYHbHp75NTGA2eDeNqx5bC2G/EJUTwLM
+bfcZr9hv+z1QpvSLEpar30kJjc1QMQcf60ToGYIC93rsVAKou2GPGry4h/nzwro0
+jjFWNgORTXllfcQDbDNOPkV1kFFibPbAU4faZMgC+xwIwDBsndvcvXjLaRUa4fmw
+1xNkOO5Lj9AuvTXdCc9yUXRzmPZhU6Q4YB2daWvs3vbMTtvkAXGyQL4b2HD+NYZs
+cMUtbteGgQzwM1gpMBn4GX53vhlCXq28r3cH1/1tLDweglSrxyvZbB7pZU7BAmLk
+TEj2fXcvdcX+TtYhC10=
+-----END CERTIFICATE-----
diff --git a/reg-tests/ssl/ocsp_update/multicert/server_ocsp.pem.ecdsa.ocsp b/reg-tests/ssl/ocsp_update/multicert/server_ocsp.pem.ecdsa.ocsp
new file mode 100644
index 0000000..793aff1
--- /dev/null
+++ b/reg-tests/ssl/ocsp_update/multicert/server_ocsp.pem.ecdsa.ocsp
Binary files differ
diff --git a/reg-tests/ssl/ocsp_update/multicert/server_ocsp.pem.rsa b/reg-tests/ssl/ocsp_update/multicert/server_ocsp.pem.rsa
new file mode 100644
index 0000000..058e46d
--- /dev/null
+++ b/reg-tests/ssl/ocsp_update/multicert/server_ocsp.pem.rsa
@@ -0,0 +1,56 @@
+-----BEGIN CERTIFICATE-----
+MIIEvjCCAqagAwIBAgICEBUwDQYJKoZIhvcNAQELBQAwPjELMAkGA1UEBhMCRlIx
+HTAbBgNVBAoMFEhBUHJveHkgVGVjaG5vbG9naWVzMRAwDgYDVQQDDAdSb290IENB
+MCAXDTIyMTEyMzEwMzkwNFoYDzIwNTAwNDEwMTAzOTA0WjBGMQswCQYDVQQGEwJG
+UjEdMBsGA1UECgwUSEFQcm94eSBUZWNobm9sb2dpZXMxGDAWBgNVBAMMD3JzYS5o
+YXByb3h5LmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAIRBd2HB
+WFxKohqOWKCZkQcszMx0tpA48sxlDgjsy6iVEycM1eOQopbFOiSAig2gf+8llKUv
+DM66f98FsBKJ/rVksOS07rDBOO9LCGE7JF8o/Cjc3vIX2gvTd0H19ENHFlxCSBn8
+q5NsLmCSCFHFDSPXL3uhrX/9ScBeU1j7M8nF/AEX50q1ubGRHMbYrBkhUDlI+s92
+fvFpuFPf9vcjPLihHEofYKErKVeNfn+3aD/V55Aw1NO15Dt1Vc+TypeuL7jqgJRg
+OVk2MJmedXKUA4A8SaY4gqVKy1aAe6JYWrCGqr8oHNt3nwqMYyhLkeyqmLh+VMXv
+Bdqj3JbwiGGRou8CAwEAAaOBuzCBuDAyBggrBgEFBQcBAQQmMCQwIgYIKwYBBQUH
+MAGGFmh0dHA6Ly8xMjcuMC4wLjE6MTIzNDUwHQYDVR0OBBYEFNGC81nNOAJDX1V+
+vlnE5/iy9ciNMGMGA1UdIwRcMFqhQqRAMD4xCzAJBgNVBAYTAkZSMR0wGwYDVQQK
+DBRIQVByb3h5IFRlY2hub2xvZ2llczEQMA4GA1UEAwwHUm9vdCBDQYIUHgviUJMg
+CZlOPOhVc09pZ4NhfxcwDQYJKoZIhvcNAQELBQADggIBAJmKCsKn0LGUJ5xhSd0c
+d8Aq7TpJImXNUNqoQsyLU5FK8qF3HfJBA2pLkROLZKTXGnwSVndOpxlZy7IpdhZZ
+Ya23mxi6G3iXYAGmiVwGXxZfCwISqARr+CR8psIUQLdi21P5UkLG2LU0+b/ManQD
+4MPvyzi7qf4qaao/miZiT9idrdy0XNQoRy1vJzMMfcRhzWzuGvnr2NVOhZpuDe8p
+K6Hc+8AGZX8qY0DQ30YHU4Ygq0NGRR/oHOoAdJSAuIvfLkKiNZ0s3XTOKu8bogGh
+NbkffborINbB6MG8ZSM+KUrsQbFl6e2lk6VVk1gYIMx/L3MF3WFK9212+8ak0pr1
+JZOd87aWg3WcNqpRgcu3FXZSDfF5JH8jBAoXTZ5YHLMRjrfFLaMmyPC8egcDpogR
+sM4wXyo+5SEX4YWTsd2FRcmPbOFcmwQOy/zmZQyFPnpp+ORRDEkTJmT/VRoexHrt
+8EcKX/CIJ+nzBQtEVThgOCWrE6c9MF+MGkI+TMXy932jEvK14GU2U4aE7uhvyiJt
+RJ+iZGTqwsu7wOqvP8+SsxhpY4ZlNL+LSeHLoq2nBmBwCgHj0ikdEMMLbjciUVGu
+Zb44d9hPea+nfljju5m4VLmonGW2cbzFL4r5mC0/xk6JrB9buw5swkwhslR0guCu
+3knMr1pjkbf8W6DDGKvxHJIX
+-----END CERTIFICATE-----
+-----BEGIN PRIVATE KEY-----
+MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQCEQXdhwVhcSqIa
+jligmZEHLMzMdLaQOPLMZQ4I7MuolRMnDNXjkKKWxTokgIoNoH/vJZSlLwzOun/f
+BbASif61ZLDktO6wwTjvSwhhOyRfKPwo3N7yF9oL03dB9fRDRxZcQkgZ/KuTbC5g
+kghRxQ0j1y97oa1//UnAXlNY+zPJxfwBF+dKtbmxkRzG2KwZIVA5SPrPdn7xabhT
+3/b3Izy4oRxKH2ChKylXjX5/t2g/1eeQMNTTteQ7dVXPk8qXri+46oCUYDlZNjCZ
+nnVylAOAPEmmOIKlSstWgHuiWFqwhqq/KBzbd58KjGMoS5Hsqpi4flTF7wXao9yW
+8IhhkaLvAgMBAAECggEAIQA46sKU6sqQsnGseb536sNqAuZom4oqQ4g/vUhg9Rrl
+oYvZXyQ6/cYO4QbV69qNsb293o3j8z2kJKFFswqN7PNIFHl1SdOdAlDFsYVRaRFQ
+Al5Cn0QGW4cTrfjST2tQkArV9O4QXgPTerNVshmqUrQiHAZWxaYNHhwrTfu4i3Mo
+v4hfPfXuVLFWzdVFyvBQ+u+yxwqCnKKrKj7uXiPyFwQ0g4wFKs8O48ZZoVryZFJn
+nuUKBr0JBaHpgPTfx1QavvoUeQzDshEAcMXq0Lh4LTzp95jfwsiBj3fEwcrXuJyr
+o3TGHwGHILL8vKpZpw/Ub9Rr4xpyb0Ij+UHzVir5+QKBgQC3a4YNMOy9UD3XSmwU
+qMn1YXpZYv6hz7rFYrQFPjd42b8Orl6v0KrsPVk2hc4KQpiMaEa+IgnD9guMdri4
+oNMri9reoLHDzxN/Wh/jTVVaO2b3mljzF62JF6SJOjeLYvKRqRH4whdCku/1D0xR
+DfhBIVZzCj2tTI1CMZl42vNK6wKBgQC4lv4PakdIY6W3bu2/fuX4PwnrSUmsJV+d
+UAmCls38hnoNHIDrEWbF+StSA/PsHQGOa4w1iYBsD3PptQ43zF7nwvjxKYeXu1/A
+y+0pW/ADlcAm+PcJfgym0663mWZG5bA1s3C1qMM30PM+Z0jTO/GUOeNFofuOWVK+
+mUiGG5U/DQKBgQCmbz74gUiQkFtNHA7uwCpiKs2mhpmfoqtLqMDJcSdM1ej0HW12
+A9bU/uYQ/2FzFfLulUB8Ds7lrkHUd3YusmBrx0AXe6FSmHiMuu7shqPIeNZ6HuhP
+zVB+caGvk9AK/wI1AkF4hEYu9r4elH8fnZmDIAkd4lENC8WyJueoLqVNeQKBgAsj
+uZNOk5yvvslyHVDoJJK1ozCazKJh4wJIWTqTRT0PFICEDtegxjX+UnnxmR/PpE9m
++CAm+yQKTrF05rXBVJzh7EoJepBSk3W8GMTdMn/U4rK3ZZkiDTtoHOwhisWOiPLE
+sHGWDKnqpzNF4mQ1AuAyGiASpW6yv0aXU4QcWAZlAoGBAISfKc6i2akMXufuqj5q
+B6OnFMkFR6JPJhxYo1aYKX0He4WW5RmXhm0lB6UKC7CtE9uofhEn3Tl2AcvwmY7G
+6UE9J/dAUVLGQV07aPyjAMq4ky+ZruI6ptxYgsdPmYZbXhMKIa2vNpB8/bgOKPA5
+3SgdB3ibaIMQtiJqdKjCbWqP
+-----END PRIVATE KEY-----
diff --git a/reg-tests/ssl/ocsp_update/multicert/server_ocsp.pem.rsa.issuer b/reg-tests/ssl/ocsp_update/multicert/server_ocsp.pem.rsa.issuer
new file mode 100644
index 0000000..bed2061
--- /dev/null
+++ b/reg-tests/ssl/ocsp_update/multicert/server_ocsp.pem.rsa.issuer
@@ -0,0 +1,30 @@
+-----BEGIN CERTIFICATE-----
+MIIFGjCCAwKgAwIBAgIUHgviUJMgCZlOPOhVc09pZ4NhfxcwDQYJKoZIhvcNAQEL
+BQAwPjELMAkGA1UEBhMCRlIxHTAbBgNVBAoMFEhBUHJveHkgVGVjaG5vbG9naWVz
+MRAwDgYDVQQDDAdSb290IENBMB4XDTIxMDQyMjE0MDEyMFoXDTQ4MDkwNzE0MDEy
+MFowPjELMAkGA1UEBhMCRlIxHTAbBgNVBAoMFEhBUHJveHkgVGVjaG5vbG9naWVz
+MRAwDgYDVQQDDAdSb290IENBMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKC
+AgEAti+5onUeFJNyF5s6xlnBxDnFhw7Q5VbBestHeQttjBWN31zq5yaf/+CYXdu+
+lY6gNZj6JBiFJ5P7VXX3DqUIJBX6byXWfIUWM+auBAMKlTz0+hWrF/UxI/3uG67N
++Z6NVffEPYbA4Emqozr0DIicWorRyHnrhEQQP87xBCUboUr3QEkNngfiJ0fPm3fj
+7HfQemGL2OnTA8qdy0q1l4aUhVr9bgedP2Klvs0XhbszCGLI0Gq5lyNadlH1MEiw
+SXa9rklE6NCNcyamO7Wt8LVrg6pxopa7oGnkLbnjzSuE+xsN0isOLaHH5LfYg6gT
+aAHpnBHiWuDZQIyzKc+Z37gNksd46/y9B+oBZoCTcYMOsn7PK+gPzTbu3ic4L9hO
+WCsTV0tn+qUGj6/J98gRgvuvZGA7NPDKNZU5p34oyApBPBUOgpn6pCuT5NlkPYAe
+Rp/ypiy5NCHp0JW3JWkJ4+wEasZM34TZUYrOsicA0GV4ZVkoQ3WYyAjmLvRXmo/w
+Z3sSlmHvCg9MrQ9pk24+OtvCbii0bb/Zmlx0Y4lU5TogcuJffJDVbj7oxTc2gRmI
+SIZsnYLv2qVoeBoMY5otj+ef0Y8v98mKCbiWe2MzBkC2h5wmwyWedez8RysTaFHS
+Z4yOYoCsEAtCxnib9d5fXf0+6aOuFtKMknkuWbYj6En647ECAwEAAaMQMA4wDAYD
+VR0TBAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAgEAjVzxHzq/87uj24It5hYj4mq4
+ero0zix4fA4tJNuTpZ/5r7GUYaf/uT4xfDilBX2fGMsxVTxJC25KzhdFeTzg1Tde
+/N0LAeLWHfe6jR/P5XDATD0ZA73DQALOxRM5uRMeWJDVaUeco/aXsdQaCz2STDI3
+h7VVFoaOlmxQW3BBEvg2VUp9DS2UjqqdwsUDtzwKfrmj/FqyBvGrvNeIMv28HCu7
+r1WE1Z0UEJhpc1BPbu7F/vl60gRF3bQjh2tL8pWThxTJe6Qy+pLoSShyi85AM9XK
+scCmUtQWjy7KQDL8XVFvuCWvMzknZQjJcncbKddPaaSIDkKUpz9FDv+wSJj/LKf7
+bGSFPM6sblioLbLNJByRYI8G7VHvKDbUnYHbHp75NTGA2eDeNqx5bC2G/EJUTwLM
+bfcZr9hv+z1QpvSLEpar30kJjc1QMQcf60ToGYIC93rsVAKou2GPGry4h/nzwro0
+jjFWNgORTXllfcQDbDNOPkV1kFFibPbAU4faZMgC+xwIwDBsndvcvXjLaRUa4fmw
+1xNkOO5Lj9AuvTXdCc9yUXRzmPZhU6Q4YB2daWvs3vbMTtvkAXGyQL4b2HD+NYZs
+cMUtbteGgQzwM1gpMBn4GX53vhlCXq28r3cH1/1tLDweglSrxyvZbB7pZU7BAmLk
+TEj2fXcvdcX+TtYhC10=
+-----END CERTIFICATE-----
diff --git a/reg-tests/ssl/ocsp_update/multicert/server_ocsp.pem.rsa.ocsp b/reg-tests/ssl/ocsp_update/multicert/server_ocsp.pem.rsa.ocsp
new file mode 100644
index 0000000..5aa51d7
--- /dev/null
+++ b/reg-tests/ssl/ocsp_update/multicert/server_ocsp.pem.rsa.ocsp
Binary files differ
diff --git a/reg-tests/ssl/ocsp_update/multicert/server_ocsp_ecdsa.pem b/reg-tests/ssl/ocsp_update/multicert/server_ocsp_ecdsa.pem
new file mode 100644
index 0000000..c33cf58
--- /dev/null
+++ b/reg-tests/ssl/ocsp_update/multicert/server_ocsp_ecdsa.pem
@@ -0,0 +1,63 @@
+-----BEGIN CERTIFICATE-----
+MIIEODCCAiCgAwIBAgICEBYwDQYJKoZIhvcNAQELBQAwPjELMAkGA1UEBhMCRlIx
+HTAbBgNVBAoMFEhBUHJveHkgVGVjaG5vbG9naWVzMRAwDgYDVQQDDAdSb290IENB
+MCAXDTIyMTEyMzEwMzk1NloYDzIwNTAwNDEwMTAzOTU2WjBIMQswCQYDVQQGEwJG
+UjEdMBsGA1UECgwUSEFQcm94eSBUZWNobm9sb2dpZXMxGjAYBgNVBAMMEWVjZHNh
+LmhhcHJveHkuY29tMIGbMBAGByqGSM49AgEGBSuBBAAjA4GGAAQB5Id0dJy6Vubt
+/ICfwLOOwgvyeOHOvC/yrqU/NCBNDVZLcOXbncm8Lxzl9Rn2t0VV9pla82/Qlexu
+2jhx8LD3du8AmEn/4tkJMz85Jv4TN/eY7Tsfbqy2NtX17eBWkDA/S1v+9uw9m7UJ
+mzwHIkQHi4S+flXt2ZtQKwgmYcuFYsP6jSGjgbswgbgwMgYIKwYBBQUHAQEEJjAk
+MCIGCCsGAQUFBzABhhZodHRwOi8vMTI3LjAuMC4xOjEyMzQ1MB0GA1UdDgQWBBTS
+Tdzvp9SeMDDfWVNdLPzVCaE/oDBjBgNVHSMEXDBaoUKkQDA+MQswCQYDVQQGEwJG
+UjEdMBsGA1UECgwUSEFQcm94eSBUZWNobm9sb2dpZXMxEDAOBgNVBAMMB1Jvb3Qg
+Q0GCFB4L4lCTIAmZTjzoVXNPaWeDYX8XMA0GCSqGSIb3DQEBCwUAA4ICAQBsoRvT
+LPipFUSvGWWFphrqhri40e6GEKio2RNrHSwq6PBPd+FAjIan1yoZX3C/I/octhoq
+/jHAlCB5GQzU3R3M/gaCyDk4x3wbR52zSNzgyh464B7HwlNyC9jCeh3yB8ylUZCu
+Lc8NRTYavceUoDq2ebO8wpWX0LBd0oh7hMcQzWQrmU1B0NYVsTn65Ogcfokz2r0M
+A3YjwT8vH9i9QFx1Fxy4OYJJQmskKrwAQ+MEtyBJvck2nthZA7KNX+OxuJjOh+lW
++WpTudaoMUd188zHFFjeM4C40uPsePlf1gpdjuTdir1sIH8GNa9XP1wEtvD6mNFU
+6KCFSuZSkBqo2iD6yYzsd1H2DSMVQL67ATP8zSMjEccDYwkO72BR3InxWDFnFEQN
+wosdBFKqqKNKkkdSW1QUsVd90Bi5pHFW0l4FaDk2SJRfzwa1Dc+LfQv9Wf+LcENW
+6HOjqcRdU1PU1evVmq5xoHRDovQGNCStfwX3eW+jnHFYqovg51g5pEPEsmQccJXj
+DMCGoQjM+4i+R0GhyJZ/Kr2Lnj5RyT6RVK8hNCx5NjJBK5z/pJK9pbPGoS9fkK8N
+iQvPgw2+Y3rcVKHUw2epz/2mEzDb4rRiSIOIeuHB4PBL41jUNPwSxkjtjkPwVMuU
+TlD6A5wDj3Sq0B4MoxWgIOyWENABvGl+VBtDNQ==
+-----END CERTIFICATE-----
+-----BEGIN PRIVATE KEY-----
+MIHuAgEAMBAGByqGSM49AgEGBSuBBAAjBIHWMIHTAgEBBEIBkWJB8IW867HHc2iB
+7J714zyea0hVD1Z/MEuEyKRZ7aekbjEQKmUfc5MLlQS0nedCqmiLuXObG/PyxxWs
+mWTeH5qhgYkDgYYABAHkh3R0nLpW5u38gJ/As47CC/J44c68L/KupT80IE0NVktw
+5dudybwvHOX1Gfa3RVX2mVrzb9CV7G7aOHHwsPd27wCYSf/i2QkzPzkm/hM395jt
+Ox9urLY21fXt4FaQMD9LW/727D2btQmbPAciRAeLhL5+Ve3Zm1ArCCZhy4Viw/qN
+IQ==
+-----END PRIVATE KEY-----
+-----BEGIN CERTIFICATE-----
+MIIFGjCCAwKgAwIBAgIUHgviUJMgCZlOPOhVc09pZ4NhfxcwDQYJKoZIhvcNAQEL
+BQAwPjELMAkGA1UEBhMCRlIxHTAbBgNVBAoMFEhBUHJveHkgVGVjaG5vbG9naWVz
+MRAwDgYDVQQDDAdSb290IENBMB4XDTIxMDQyMjE0MDEyMFoXDTQ4MDkwNzE0MDEy
+MFowPjELMAkGA1UEBhMCRlIxHTAbBgNVBAoMFEhBUHJveHkgVGVjaG5vbG9naWVz
+MRAwDgYDVQQDDAdSb290IENBMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKC
+AgEAti+5onUeFJNyF5s6xlnBxDnFhw7Q5VbBestHeQttjBWN31zq5yaf/+CYXdu+
+lY6gNZj6JBiFJ5P7VXX3DqUIJBX6byXWfIUWM+auBAMKlTz0+hWrF/UxI/3uG67N
++Z6NVffEPYbA4Emqozr0DIicWorRyHnrhEQQP87xBCUboUr3QEkNngfiJ0fPm3fj
+7HfQemGL2OnTA8qdy0q1l4aUhVr9bgedP2Klvs0XhbszCGLI0Gq5lyNadlH1MEiw
+SXa9rklE6NCNcyamO7Wt8LVrg6pxopa7oGnkLbnjzSuE+xsN0isOLaHH5LfYg6gT
+aAHpnBHiWuDZQIyzKc+Z37gNksd46/y9B+oBZoCTcYMOsn7PK+gPzTbu3ic4L9hO
+WCsTV0tn+qUGj6/J98gRgvuvZGA7NPDKNZU5p34oyApBPBUOgpn6pCuT5NlkPYAe
+Rp/ypiy5NCHp0JW3JWkJ4+wEasZM34TZUYrOsicA0GV4ZVkoQ3WYyAjmLvRXmo/w
+Z3sSlmHvCg9MrQ9pk24+OtvCbii0bb/Zmlx0Y4lU5TogcuJffJDVbj7oxTc2gRmI
+SIZsnYLv2qVoeBoMY5otj+ef0Y8v98mKCbiWe2MzBkC2h5wmwyWedez8RysTaFHS
+Z4yOYoCsEAtCxnib9d5fXf0+6aOuFtKMknkuWbYj6En647ECAwEAAaMQMA4wDAYD
+VR0TBAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAgEAjVzxHzq/87uj24It5hYj4mq4
+ero0zix4fA4tJNuTpZ/5r7GUYaf/uT4xfDilBX2fGMsxVTxJC25KzhdFeTzg1Tde
+/N0LAeLWHfe6jR/P5XDATD0ZA73DQALOxRM5uRMeWJDVaUeco/aXsdQaCz2STDI3
+h7VVFoaOlmxQW3BBEvg2VUp9DS2UjqqdwsUDtzwKfrmj/FqyBvGrvNeIMv28HCu7
+r1WE1Z0UEJhpc1BPbu7F/vl60gRF3bQjh2tL8pWThxTJe6Qy+pLoSShyi85AM9XK
+scCmUtQWjy7KQDL8XVFvuCWvMzknZQjJcncbKddPaaSIDkKUpz9FDv+wSJj/LKf7
+bGSFPM6sblioLbLNJByRYI8G7VHvKDbUnYHbHp75NTGA2eDeNqx5bC2G/EJUTwLM
+bfcZr9hv+z1QpvSLEpar30kJjc1QMQcf60ToGYIC93rsVAKou2GPGry4h/nzwro0
+jjFWNgORTXllfcQDbDNOPkV1kFFibPbAU4faZMgC+xwIwDBsndvcvXjLaRUa4fmw
+1xNkOO5Lj9AuvTXdCc9yUXRzmPZhU6Q4YB2daWvs3vbMTtvkAXGyQL4b2HD+NYZs
+cMUtbteGgQzwM1gpMBn4GX53vhlCXq28r3cH1/1tLDweglSrxyvZbB7pZU7BAmLk
+TEj2fXcvdcX+TtYhC10=
+-----END CERTIFICATE-----
diff --git a/reg-tests/ssl/ocsp_update/multicert/server_ocsp_ecdsa.pem.ocsp b/reg-tests/ssl/ocsp_update/multicert/server_ocsp_ecdsa.pem.ocsp
new file mode 100644
index 0000000..793aff1
--- /dev/null
+++ b/reg-tests/ssl/ocsp_update/multicert/server_ocsp_ecdsa.pem.ocsp
Binary files differ
diff --git a/reg-tests/ssl/ocsp_update/multicert_both_certs.crt-list b/reg-tests/ssl/ocsp_update/multicert_both_certs.crt-list
new file mode 100644
index 0000000..0ec641f
--- /dev/null
+++ b/reg-tests/ssl/ocsp_update/multicert_both_certs.crt-list
@@ -0,0 +1,2 @@
+multicert/server_ocsp.pem.rsa [ocsp-update on ssl-min-ver TLSv1.2] *
+multicert/server_ocsp.pem.ecdsa [ocsp-update on ssl-min-ver TLSv1.2] *
diff --git a/reg-tests/ssl/ocsp_update/multicert_ecdsa.crt-list b/reg-tests/ssl/ocsp_update/multicert_ecdsa.crt-list
new file mode 100644
index 0000000..8d28025
--- /dev/null
+++ b/reg-tests/ssl/ocsp_update/multicert_ecdsa.crt-list
@@ -0,0 +1 @@
+multicert_no_ocsp/server_ocsp_ecdsa.pem [ocsp-update on] *
diff --git a/reg-tests/ssl/ocsp_update/multicert_ecdsa_no_update.crt-list b/reg-tests/ssl/ocsp_update/multicert_ecdsa_no_update.crt-list
new file mode 100644
index 0000000..22935ba
--- /dev/null
+++ b/reg-tests/ssl/ocsp_update/multicert_ecdsa_no_update.crt-list
@@ -0,0 +1 @@
+multicert_no_ocsp/server_ocsp_ecdsa.pem *
diff --git a/reg-tests/ssl/ocsp_update/multicert_no_ocsp/server_ocsp_ecdsa.pem b/reg-tests/ssl/ocsp_update/multicert_no_ocsp/server_ocsp_ecdsa.pem
new file mode 100644
index 0000000..c33cf58
--- /dev/null
+++ b/reg-tests/ssl/ocsp_update/multicert_no_ocsp/server_ocsp_ecdsa.pem
@@ -0,0 +1,63 @@
+-----BEGIN CERTIFICATE-----
+MIIEODCCAiCgAwIBAgICEBYwDQYJKoZIhvcNAQELBQAwPjELMAkGA1UEBhMCRlIx
+HTAbBgNVBAoMFEhBUHJveHkgVGVjaG5vbG9naWVzMRAwDgYDVQQDDAdSb290IENB
+MCAXDTIyMTEyMzEwMzk1NloYDzIwNTAwNDEwMTAzOTU2WjBIMQswCQYDVQQGEwJG
+UjEdMBsGA1UECgwUSEFQcm94eSBUZWNobm9sb2dpZXMxGjAYBgNVBAMMEWVjZHNh
+LmhhcHJveHkuY29tMIGbMBAGByqGSM49AgEGBSuBBAAjA4GGAAQB5Id0dJy6Vubt
+/ICfwLOOwgvyeOHOvC/yrqU/NCBNDVZLcOXbncm8Lxzl9Rn2t0VV9pla82/Qlexu
+2jhx8LD3du8AmEn/4tkJMz85Jv4TN/eY7Tsfbqy2NtX17eBWkDA/S1v+9uw9m7UJ
+mzwHIkQHi4S+flXt2ZtQKwgmYcuFYsP6jSGjgbswgbgwMgYIKwYBBQUHAQEEJjAk
+MCIGCCsGAQUFBzABhhZodHRwOi8vMTI3LjAuMC4xOjEyMzQ1MB0GA1UdDgQWBBTS
+Tdzvp9SeMDDfWVNdLPzVCaE/oDBjBgNVHSMEXDBaoUKkQDA+MQswCQYDVQQGEwJG
+UjEdMBsGA1UECgwUSEFQcm94eSBUZWNobm9sb2dpZXMxEDAOBgNVBAMMB1Jvb3Qg
+Q0GCFB4L4lCTIAmZTjzoVXNPaWeDYX8XMA0GCSqGSIb3DQEBCwUAA4ICAQBsoRvT
+LPipFUSvGWWFphrqhri40e6GEKio2RNrHSwq6PBPd+FAjIan1yoZX3C/I/octhoq
+/jHAlCB5GQzU3R3M/gaCyDk4x3wbR52zSNzgyh464B7HwlNyC9jCeh3yB8ylUZCu
+Lc8NRTYavceUoDq2ebO8wpWX0LBd0oh7hMcQzWQrmU1B0NYVsTn65Ogcfokz2r0M
+A3YjwT8vH9i9QFx1Fxy4OYJJQmskKrwAQ+MEtyBJvck2nthZA7KNX+OxuJjOh+lW
++WpTudaoMUd188zHFFjeM4C40uPsePlf1gpdjuTdir1sIH8GNa9XP1wEtvD6mNFU
+6KCFSuZSkBqo2iD6yYzsd1H2DSMVQL67ATP8zSMjEccDYwkO72BR3InxWDFnFEQN
+wosdBFKqqKNKkkdSW1QUsVd90Bi5pHFW0l4FaDk2SJRfzwa1Dc+LfQv9Wf+LcENW
+6HOjqcRdU1PU1evVmq5xoHRDovQGNCStfwX3eW+jnHFYqovg51g5pEPEsmQccJXj
+DMCGoQjM+4i+R0GhyJZ/Kr2Lnj5RyT6RVK8hNCx5NjJBK5z/pJK9pbPGoS9fkK8N
+iQvPgw2+Y3rcVKHUw2epz/2mEzDb4rRiSIOIeuHB4PBL41jUNPwSxkjtjkPwVMuU
+TlD6A5wDj3Sq0B4MoxWgIOyWENABvGl+VBtDNQ==
+-----END CERTIFICATE-----
+-----BEGIN PRIVATE KEY-----
+MIHuAgEAMBAGByqGSM49AgEGBSuBBAAjBIHWMIHTAgEBBEIBkWJB8IW867HHc2iB
+7J714zyea0hVD1Z/MEuEyKRZ7aekbjEQKmUfc5MLlQS0nedCqmiLuXObG/PyxxWs
+mWTeH5qhgYkDgYYABAHkh3R0nLpW5u38gJ/As47CC/J44c68L/KupT80IE0NVktw
+5dudybwvHOX1Gfa3RVX2mVrzb9CV7G7aOHHwsPd27wCYSf/i2QkzPzkm/hM395jt
+Ox9urLY21fXt4FaQMD9LW/727D2btQmbPAciRAeLhL5+Ve3Zm1ArCCZhy4Viw/qN
+IQ==
+-----END PRIVATE KEY-----
+-----BEGIN CERTIFICATE-----
+MIIFGjCCAwKgAwIBAgIUHgviUJMgCZlOPOhVc09pZ4NhfxcwDQYJKoZIhvcNAQEL
+BQAwPjELMAkGA1UEBhMCRlIxHTAbBgNVBAoMFEhBUHJveHkgVGVjaG5vbG9naWVz
+MRAwDgYDVQQDDAdSb290IENBMB4XDTIxMDQyMjE0MDEyMFoXDTQ4MDkwNzE0MDEy
+MFowPjELMAkGA1UEBhMCRlIxHTAbBgNVBAoMFEhBUHJveHkgVGVjaG5vbG9naWVz
+MRAwDgYDVQQDDAdSb290IENBMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKC
+AgEAti+5onUeFJNyF5s6xlnBxDnFhw7Q5VbBestHeQttjBWN31zq5yaf/+CYXdu+
+lY6gNZj6JBiFJ5P7VXX3DqUIJBX6byXWfIUWM+auBAMKlTz0+hWrF/UxI/3uG67N
++Z6NVffEPYbA4Emqozr0DIicWorRyHnrhEQQP87xBCUboUr3QEkNngfiJ0fPm3fj
+7HfQemGL2OnTA8qdy0q1l4aUhVr9bgedP2Klvs0XhbszCGLI0Gq5lyNadlH1MEiw
+SXa9rklE6NCNcyamO7Wt8LVrg6pxopa7oGnkLbnjzSuE+xsN0isOLaHH5LfYg6gT
+aAHpnBHiWuDZQIyzKc+Z37gNksd46/y9B+oBZoCTcYMOsn7PK+gPzTbu3ic4L9hO
+WCsTV0tn+qUGj6/J98gRgvuvZGA7NPDKNZU5p34oyApBPBUOgpn6pCuT5NlkPYAe
+Rp/ypiy5NCHp0JW3JWkJ4+wEasZM34TZUYrOsicA0GV4ZVkoQ3WYyAjmLvRXmo/w
+Z3sSlmHvCg9MrQ9pk24+OtvCbii0bb/Zmlx0Y4lU5TogcuJffJDVbj7oxTc2gRmI
+SIZsnYLv2qVoeBoMY5otj+ef0Y8v98mKCbiWe2MzBkC2h5wmwyWedez8RysTaFHS
+Z4yOYoCsEAtCxnib9d5fXf0+6aOuFtKMknkuWbYj6En647ECAwEAAaMQMA4wDAYD
+VR0TBAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAgEAjVzxHzq/87uj24It5hYj4mq4
+ero0zix4fA4tJNuTpZ/5r7GUYaf/uT4xfDilBX2fGMsxVTxJC25KzhdFeTzg1Tde
+/N0LAeLWHfe6jR/P5XDATD0ZA73DQALOxRM5uRMeWJDVaUeco/aXsdQaCz2STDI3
+h7VVFoaOlmxQW3BBEvg2VUp9DS2UjqqdwsUDtzwKfrmj/FqyBvGrvNeIMv28HCu7
+r1WE1Z0UEJhpc1BPbu7F/vl60gRF3bQjh2tL8pWThxTJe6Qy+pLoSShyi85AM9XK
+scCmUtQWjy7KQDL8XVFvuCWvMzknZQjJcncbKddPaaSIDkKUpz9FDv+wSJj/LKf7
+bGSFPM6sblioLbLNJByRYI8G7VHvKDbUnYHbHp75NTGA2eDeNqx5bC2G/EJUTwLM
+bfcZr9hv+z1QpvSLEpar30kJjc1QMQcf60ToGYIC93rsVAKou2GPGry4h/nzwro0
+jjFWNgORTXllfcQDbDNOPkV1kFFibPbAU4faZMgC+xwIwDBsndvcvXjLaRUa4fmw
+1xNkOO5Lj9AuvTXdCc9yUXRzmPZhU6Q4YB2daWvs3vbMTtvkAXGyQL4b2HD+NYZs
+cMUtbteGgQzwM1gpMBn4GX53vhlCXq28r3cH1/1tLDweglSrxyvZbB7pZU7BAmLk
+TEj2fXcvdcX+TtYhC10=
+-----END CERTIFICATE-----
diff --git a/reg-tests/ssl/ocsp_update/multicert_no_ocsp/server_ocsp_rsa.pem b/reg-tests/ssl/ocsp_update/multicert_no_ocsp/server_ocsp_rsa.pem
new file mode 100644
index 0000000..26c10e3
--- /dev/null
+++ b/reg-tests/ssl/ocsp_update/multicert_no_ocsp/server_ocsp_rsa.pem
@@ -0,0 +1,86 @@
+-----BEGIN CERTIFICATE-----
+MIIEvjCCAqagAwIBAgICEBUwDQYJKoZIhvcNAQELBQAwPjELMAkGA1UEBhMCRlIx
+HTAbBgNVBAoMFEhBUHJveHkgVGVjaG5vbG9naWVzMRAwDgYDVQQDDAdSb290IENB
+MCAXDTIyMTEyMzEwMzkwNFoYDzIwNTAwNDEwMTAzOTA0WjBGMQswCQYDVQQGEwJG
+UjEdMBsGA1UECgwUSEFQcm94eSBUZWNobm9sb2dpZXMxGDAWBgNVBAMMD3JzYS5o
+YXByb3h5LmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAIRBd2HB
+WFxKohqOWKCZkQcszMx0tpA48sxlDgjsy6iVEycM1eOQopbFOiSAig2gf+8llKUv
+DM66f98FsBKJ/rVksOS07rDBOO9LCGE7JF8o/Cjc3vIX2gvTd0H19ENHFlxCSBn8
+q5NsLmCSCFHFDSPXL3uhrX/9ScBeU1j7M8nF/AEX50q1ubGRHMbYrBkhUDlI+s92
+fvFpuFPf9vcjPLihHEofYKErKVeNfn+3aD/V55Aw1NO15Dt1Vc+TypeuL7jqgJRg
+OVk2MJmedXKUA4A8SaY4gqVKy1aAe6JYWrCGqr8oHNt3nwqMYyhLkeyqmLh+VMXv
+Bdqj3JbwiGGRou8CAwEAAaOBuzCBuDAyBggrBgEFBQcBAQQmMCQwIgYIKwYBBQUH
+MAGGFmh0dHA6Ly8xMjcuMC4wLjE6MTIzNDUwHQYDVR0OBBYEFNGC81nNOAJDX1V+
+vlnE5/iy9ciNMGMGA1UdIwRcMFqhQqRAMD4xCzAJBgNVBAYTAkZSMR0wGwYDVQQK
+DBRIQVByb3h5IFRlY2hub2xvZ2llczEQMA4GA1UEAwwHUm9vdCBDQYIUHgviUJMg
+CZlOPOhVc09pZ4NhfxcwDQYJKoZIhvcNAQELBQADggIBAJmKCsKn0LGUJ5xhSd0c
+d8Aq7TpJImXNUNqoQsyLU5FK8qF3HfJBA2pLkROLZKTXGnwSVndOpxlZy7IpdhZZ
+Ya23mxi6G3iXYAGmiVwGXxZfCwISqARr+CR8psIUQLdi21P5UkLG2LU0+b/ManQD
+4MPvyzi7qf4qaao/miZiT9idrdy0XNQoRy1vJzMMfcRhzWzuGvnr2NVOhZpuDe8p
+K6Hc+8AGZX8qY0DQ30YHU4Ygq0NGRR/oHOoAdJSAuIvfLkKiNZ0s3XTOKu8bogGh
+NbkffborINbB6MG8ZSM+KUrsQbFl6e2lk6VVk1gYIMx/L3MF3WFK9212+8ak0pr1
+JZOd87aWg3WcNqpRgcu3FXZSDfF5JH8jBAoXTZ5YHLMRjrfFLaMmyPC8egcDpogR
+sM4wXyo+5SEX4YWTsd2FRcmPbOFcmwQOy/zmZQyFPnpp+ORRDEkTJmT/VRoexHrt
+8EcKX/CIJ+nzBQtEVThgOCWrE6c9MF+MGkI+TMXy932jEvK14GU2U4aE7uhvyiJt
+RJ+iZGTqwsu7wOqvP8+SsxhpY4ZlNL+LSeHLoq2nBmBwCgHj0ikdEMMLbjciUVGu
+Zb44d9hPea+nfljju5m4VLmonGW2cbzFL4r5mC0/xk6JrB9buw5swkwhslR0guCu
+3knMr1pjkbf8W6DDGKvxHJIX
+-----END CERTIFICATE-----
+-----BEGIN PRIVATE KEY-----
+MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQCEQXdhwVhcSqIa
+jligmZEHLMzMdLaQOPLMZQ4I7MuolRMnDNXjkKKWxTokgIoNoH/vJZSlLwzOun/f
+BbASif61ZLDktO6wwTjvSwhhOyRfKPwo3N7yF9oL03dB9fRDRxZcQkgZ/KuTbC5g
+kghRxQ0j1y97oa1//UnAXlNY+zPJxfwBF+dKtbmxkRzG2KwZIVA5SPrPdn7xabhT
+3/b3Izy4oRxKH2ChKylXjX5/t2g/1eeQMNTTteQ7dVXPk8qXri+46oCUYDlZNjCZ
+nnVylAOAPEmmOIKlSstWgHuiWFqwhqq/KBzbd58KjGMoS5Hsqpi4flTF7wXao9yW
+8IhhkaLvAgMBAAECggEAIQA46sKU6sqQsnGseb536sNqAuZom4oqQ4g/vUhg9Rrl
+oYvZXyQ6/cYO4QbV69qNsb293o3j8z2kJKFFswqN7PNIFHl1SdOdAlDFsYVRaRFQ
+Al5Cn0QGW4cTrfjST2tQkArV9O4QXgPTerNVshmqUrQiHAZWxaYNHhwrTfu4i3Mo
+v4hfPfXuVLFWzdVFyvBQ+u+yxwqCnKKrKj7uXiPyFwQ0g4wFKs8O48ZZoVryZFJn
+nuUKBr0JBaHpgPTfx1QavvoUeQzDshEAcMXq0Lh4LTzp95jfwsiBj3fEwcrXuJyr
+o3TGHwGHILL8vKpZpw/Ub9Rr4xpyb0Ij+UHzVir5+QKBgQC3a4YNMOy9UD3XSmwU
+qMn1YXpZYv6hz7rFYrQFPjd42b8Orl6v0KrsPVk2hc4KQpiMaEa+IgnD9guMdri4
+oNMri9reoLHDzxN/Wh/jTVVaO2b3mljzF62JF6SJOjeLYvKRqRH4whdCku/1D0xR
+DfhBIVZzCj2tTI1CMZl42vNK6wKBgQC4lv4PakdIY6W3bu2/fuX4PwnrSUmsJV+d
+UAmCls38hnoNHIDrEWbF+StSA/PsHQGOa4w1iYBsD3PptQ43zF7nwvjxKYeXu1/A
+y+0pW/ADlcAm+PcJfgym0663mWZG5bA1s3C1qMM30PM+Z0jTO/GUOeNFofuOWVK+
+mUiGG5U/DQKBgQCmbz74gUiQkFtNHA7uwCpiKs2mhpmfoqtLqMDJcSdM1ej0HW12
+A9bU/uYQ/2FzFfLulUB8Ds7lrkHUd3YusmBrx0AXe6FSmHiMuu7shqPIeNZ6HuhP
+zVB+caGvk9AK/wI1AkF4hEYu9r4elH8fnZmDIAkd4lENC8WyJueoLqVNeQKBgAsj
+uZNOk5yvvslyHVDoJJK1ozCazKJh4wJIWTqTRT0PFICEDtegxjX+UnnxmR/PpE9m
++CAm+yQKTrF05rXBVJzh7EoJepBSk3W8GMTdMn/U4rK3ZZkiDTtoHOwhisWOiPLE
+sHGWDKnqpzNF4mQ1AuAyGiASpW6yv0aXU4QcWAZlAoGBAISfKc6i2akMXufuqj5q
+B6OnFMkFR6JPJhxYo1aYKX0He4WW5RmXhm0lB6UKC7CtE9uofhEn3Tl2AcvwmY7G
+6UE9J/dAUVLGQV07aPyjAMq4ky+ZruI6ptxYgsdPmYZbXhMKIa2vNpB8/bgOKPA5
+3SgdB3ibaIMQtiJqdKjCbWqP
+-----END PRIVATE KEY-----
+-----BEGIN CERTIFICATE-----
+MIIFGjCCAwKgAwIBAgIUHgviUJMgCZlOPOhVc09pZ4NhfxcwDQYJKoZIhvcNAQEL
+BQAwPjELMAkGA1UEBhMCRlIxHTAbBgNVBAoMFEhBUHJveHkgVGVjaG5vbG9naWVz
+MRAwDgYDVQQDDAdSb290IENBMB4XDTIxMDQyMjE0MDEyMFoXDTQ4MDkwNzE0MDEy
+MFowPjELMAkGA1UEBhMCRlIxHTAbBgNVBAoMFEhBUHJveHkgVGVjaG5vbG9naWVz
+MRAwDgYDVQQDDAdSb290IENBMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKC
+AgEAti+5onUeFJNyF5s6xlnBxDnFhw7Q5VbBestHeQttjBWN31zq5yaf/+CYXdu+
+lY6gNZj6JBiFJ5P7VXX3DqUIJBX6byXWfIUWM+auBAMKlTz0+hWrF/UxI/3uG67N
++Z6NVffEPYbA4Emqozr0DIicWorRyHnrhEQQP87xBCUboUr3QEkNngfiJ0fPm3fj
+7HfQemGL2OnTA8qdy0q1l4aUhVr9bgedP2Klvs0XhbszCGLI0Gq5lyNadlH1MEiw
+SXa9rklE6NCNcyamO7Wt8LVrg6pxopa7oGnkLbnjzSuE+xsN0isOLaHH5LfYg6gT
+aAHpnBHiWuDZQIyzKc+Z37gNksd46/y9B+oBZoCTcYMOsn7PK+gPzTbu3ic4L9hO
+WCsTV0tn+qUGj6/J98gRgvuvZGA7NPDKNZU5p34oyApBPBUOgpn6pCuT5NlkPYAe
+Rp/ypiy5NCHp0JW3JWkJ4+wEasZM34TZUYrOsicA0GV4ZVkoQ3WYyAjmLvRXmo/w
+Z3sSlmHvCg9MrQ9pk24+OtvCbii0bb/Zmlx0Y4lU5TogcuJffJDVbj7oxTc2gRmI
+SIZsnYLv2qVoeBoMY5otj+ef0Y8v98mKCbiWe2MzBkC2h5wmwyWedez8RysTaFHS
+Z4yOYoCsEAtCxnib9d5fXf0+6aOuFtKMknkuWbYj6En647ECAwEAAaMQMA4wDAYD
+VR0TBAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAgEAjVzxHzq/87uj24It5hYj4mq4
+ero0zix4fA4tJNuTpZ/5r7GUYaf/uT4xfDilBX2fGMsxVTxJC25KzhdFeTzg1Tde
+/N0LAeLWHfe6jR/P5XDATD0ZA73DQALOxRM5uRMeWJDVaUeco/aXsdQaCz2STDI3
+h7VVFoaOlmxQW3BBEvg2VUp9DS2UjqqdwsUDtzwKfrmj/FqyBvGrvNeIMv28HCu7
+r1WE1Z0UEJhpc1BPbu7F/vl60gRF3bQjh2tL8pWThxTJe6Qy+pLoSShyi85AM9XK
+scCmUtQWjy7KQDL8XVFvuCWvMzknZQjJcncbKddPaaSIDkKUpz9FDv+wSJj/LKf7
+bGSFPM6sblioLbLNJByRYI8G7VHvKDbUnYHbHp75NTGA2eDeNqx5bC2G/EJUTwLM
+bfcZr9hv+z1QpvSLEpar30kJjc1QMQcf60ToGYIC93rsVAKou2GPGry4h/nzwro0
+jjFWNgORTXllfcQDbDNOPkV1kFFibPbAU4faZMgC+xwIwDBsndvcvXjLaRUa4fmw
+1xNkOO5Lj9AuvTXdCc9yUXRzmPZhU6Q4YB2daWvs3vbMTtvkAXGyQL4b2HD+NYZs
+cMUtbteGgQzwM1gpMBn4GX53vhlCXq28r3cH1/1tLDweglSrxyvZbB7pZU7BAmLk
+TEj2fXcvdcX+TtYhC10=
+-----END CERTIFICATE-----
diff --git a/reg-tests/ssl/ocsp_update/multicert_rsa.crt-list b/reg-tests/ssl/ocsp_update/multicert_rsa.crt-list
new file mode 100644
index 0000000..5b9a341
--- /dev/null
+++ b/reg-tests/ssl/ocsp_update/multicert_rsa.crt-list
@@ -0,0 +1 @@
+multicert_no_ocsp/server_ocsp_rsa.pem [ocsp-update on] *
diff --git a/reg-tests/ssl/ocsp_update/ocsp.haproxy.com.pem b/reg-tests/ssl/ocsp_update/ocsp.haproxy.com.pem
new file mode 100644
index 0000000..17a4abf
--- /dev/null
+++ b/reg-tests/ssl/ocsp_update/ocsp.haproxy.com.pem
@@ -0,0 +1,84 @@
+-----BEGIN CERTIFICATE-----
+MIIFvDCCA6SgAwIBAgICEAkwDQYJKoZIhvcNAQELBQAwPjELMAkGA1UEBhMCRlIx
+HTAbBgNVBAoMFEhBUHJveHkgVGVjaG5vbG9naWVzMRAwDgYDVQQDDAdSb290IENB
+MB4XDTIxMDUyNzA5MjAyN1oXDTQ4MTAxMjA5MjAyN1owRzELMAkGA1UEBhMCRlIx
+HTAbBgNVBAoMFEhBUHJveHkgVGVjaG5vbG9naWVzMRkwFwYDVQQDDBBvY3NwLmhh
+cHJveHkuY29tMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA2CY6WYOd
+Tu9g91tHEsvnPpDicSTzU+jvirLUHlDvak/u8Kd/uTcon43G6H0B8+YMbDl3rIi6
+DniNicsTo9ivZrfeo3QHBf8UW2Mbx1Jda7uEKoBx6CJypsyN4dyNXDueT5UyBWGd
+jt6zvPEZbWLsMBkqyx6HZzKhGP8DGE0opVQJxBqwTOsYTL5bEIKsUp9Wt+X3mrCO
+fyCjrUU0XYoclJnK2RIQH2GSc5X6YBZq2ozh+J5S/tb9YRZ4GglF2PHjpZvOJ0I1
+HuBrVCkheN63hBymE0IfstjWTSoAHrT6NZkAvAV/2PsiXQuwihwTaKhYY8NTP0pH
+qNB++ShUMhuLpp38/IHr8ac1A58B5zSxKNtp5y1miZoM3i9oL7v3RxIg5xqKS21G
+zr4xJfdXzNqL4azxfcLREJ4oLiRDDEfxO7IYw5pOZcQlOnHYOUaJ1aKbqOdVTvlQ
+muwCwATfqGgFgfM4Qc95UxpxvFH3I+PMX8I/djZgtNOYwAGxAhITat2nPHqm3sQX
+W86zTrWhlCT+UG/Tx3YxhPPEWjJlFE+1yh9nEHiuqW9YflcDObfGY+LaPBBBc4yR
+8wtcQxGldaNGhsk87+hvRQM3Rvy69LhtAf2ppBfQFUo41qnI+tWiBpA4U3gvmend
+/y2jyHQImSartuHP701DLtg0Poj3E3mXd9cCAwEAAaOBujCBtzAJBgNVHRMEAjAA
+MB0GA1UdDgQWBBSJgNJnuyjilp8FTQAAE11jjwenkzBjBgNVHSMEXDBaoUKkQDA+
+MQswCQYDVQQGEwJGUjEdMBsGA1UECgwUSEFQcm94eSBUZWNobm9sb2dpZXMxEDAO
+BgNVBAMMB1Jvb3QgQ0GCFB4L4lCTIAmZTjzoVXNPaWeDYX8XMA4GA1UdDwEB/wQE
+AwIHgDAWBgNVHSUBAf8EDDAKBggrBgEFBQcDCTANBgkqhkiG9w0BAQsFAAOCAgEA
+qz05vqeL54ahtAmPA0gwUuuEGnS/OKgmjs0IPfwgcZ+o355XVs66HJv2KPuQ1ed8
+gbu6Q4B+Hb3QW/42DsFAuFeX0FOmoc9AGBvNnWIu/guys3Wdf/OZ08VhQz96vai8
+bdCcdyBbTVj/P3zx0pZRQ1ZS7V1o9iH73KCckyN6Qi2rYI0R04KfUMqQ/ZBWvUM9
+N231qf6pzcGbIfECb+Gk3DRvzqylagDQztiCMuEnZ2caUhEq2hvKNXcga1KaWYVr
+aryCee3pL2GqyY615Dt8Jtt2adI7hp8FLUJs2BZtaBelxUwqdfzOXjypYFpIaZY5
+uMQqYTinolPdtfKY67oe7XylyL/rMAbdzCWHpG6Z+vFP16lnHE2dpO2OQKNCVoJb
+RoNsirHLwOugGJFpXxhXNfyeLiumMpbWZ1IT5WkL85y/y8JpwYM6H1SMnVYoqNEc
+qrboP1xo4olIPMskbYMXK4MLJzWf1mvRRjhosX/CWC9KhVL8tZiDJnFhXJLG8sX9
+CRjkKcuXXITpHVFpuIL4TkzmvHQ7Q7+gKRdOJLgXzqVccPZRXkyW9miev8cwRq4w
+eQysfIhT4uEugBog7GTDQWEMUE0pphosddKsFth8jFXFWeuf9XLD1Zx8HczZQtC8
+JgAYxF/HFELzZ2aPdBxJ1WzlH2ehTBxC07Ag0+FBxEk=
+-----END CERTIFICATE-----
+-----BEGIN RSA PRIVATE KEY-----
+MIIJKAIBAAKCAgEA2CY6WYOdTu9g91tHEsvnPpDicSTzU+jvirLUHlDvak/u8Kd/
+uTcon43G6H0B8+YMbDl3rIi6DniNicsTo9ivZrfeo3QHBf8UW2Mbx1Jda7uEKoBx
+6CJypsyN4dyNXDueT5UyBWGdjt6zvPEZbWLsMBkqyx6HZzKhGP8DGE0opVQJxBqw
+TOsYTL5bEIKsUp9Wt+X3mrCOfyCjrUU0XYoclJnK2RIQH2GSc5X6YBZq2ozh+J5S
+/tb9YRZ4GglF2PHjpZvOJ0I1HuBrVCkheN63hBymE0IfstjWTSoAHrT6NZkAvAV/
+2PsiXQuwihwTaKhYY8NTP0pHqNB++ShUMhuLpp38/IHr8ac1A58B5zSxKNtp5y1m
+iZoM3i9oL7v3RxIg5xqKS21Gzr4xJfdXzNqL4azxfcLREJ4oLiRDDEfxO7IYw5pO
+ZcQlOnHYOUaJ1aKbqOdVTvlQmuwCwATfqGgFgfM4Qc95UxpxvFH3I+PMX8I/djZg
+tNOYwAGxAhITat2nPHqm3sQXW86zTrWhlCT+UG/Tx3YxhPPEWjJlFE+1yh9nEHiu
+qW9YflcDObfGY+LaPBBBc4yR8wtcQxGldaNGhsk87+hvRQM3Rvy69LhtAf2ppBfQ
+FUo41qnI+tWiBpA4U3gvmend/y2jyHQImSartuHP701DLtg0Poj3E3mXd9cCAwEA
+AQKCAgB6w0uEp7HisSablqYJUPHnoRZbOKdS0wup9ONwzHsOIJQO7rMmGOPjqvx7
+8vP2+IO5u/Hydj1mFqYcytA+0MTeTDQRFccfar6/IM0YKfmRRJFOKmGHfHktryQu
+Ubuf1OSXQp+EWurHyEjBWRYeAH8w2jpp3s78l87TiZLSbJBXRiG91YKoTSYiAENs
+XytMSd9Q1zYID5r/LSSJNrMFJXoSFD8XhqDNkfdB2r63cEQEGNwG/rUYtDZ4u/A+
+qWGYU9n9pz4xIfNVtBSBWlL+eVA1oqfYbEfgpjMg5GfpCNTLODkokN8J96iOvCLq
+bgO//00kbD2NxrxobvKOxI79XpOzZCRyfwcxIi8sIALT3b6pRwm6+NSY+7cJ5s5x
+FKDngYc5IYy3ByKmFIjRj5rl/fY6RxwX5eMjEXOGe4TyuftBfBxNzpF6oDbc2Ws9
+Ay0PZv6fPLBSKJOEEOXg2v3djdN5DBpqJVFeMkse/uh4XLcUWXuk62fejmKCdOue
+I5xPtAva9ehLykUkgExch46gncNr12npDVixY4nKbLbNaZf8IgpJyA9UdbRH120m
+kUGZp9qRiUbDNA9dfd5+Boq8vfqvsS7Sbl5o0103qW6QWq/aEDNAZGLCssehAlGG
+PmJ0VsSVImFdUdOeL4/cDVsptd15weTnxaU9oLw2yrKy4GdXgQKCAQEA/m3ECOGU
+R4wOGO31NHNRsN0Y9luyZ+jC6wPnpT/9+pwDUB0TqZ0sJh92i0o6jecXKwX/xNbt
+BBsk8v74l3adi0YlZf1qFOPTXURsm07OwM9hjuutG4tjYibwpdokOneJl8LqOQRe
+zPuy0dXgQW35UyCeqBakMtn8g5zorXD4p4+XCvNlECDZCj2hpnjQdrxRWzwwEH9a
+cJJaxrDp8XO+zaQAYndRcxj/SsCuw8se8iWBchvkCx45ino4Jz0iEbGjhGpOccVC
+9UVAap53V6nQNTctLcNl2g398HKAoGGzV4wx7NJ7+ne2gzRFmj3etjmpyzLxCrIM
+xaibh/DCZjBJ4QKCAQEA2Xvx7OrWIsNd/LX70XvQm2RBO6dA47VIW61WOMm3eah/
+bTxJRp2JLgl5ANBAyxS65lhGpBC3/W3OEKDlYfM21wQRogUSuhIXQaOdeLh4pts3
+IfHU3WMxNU4eSws8Gi4W87SvASj8shX5ld3zZDLX8GEOqoYGS8HvcdJwFZftlIaM
+YDQ2oNKov+ob5mh2yS06hm7KOmxr7l5YWGWKD6aK2Dse0Ppj7nzVlaQfeuR07CpJ
+OFd2JiPyahcuG74Yf5t6k/6qAto2T/v9v8cAgzDKpTXzaoCfDwn1hyefeMVxp0lF
+ttdAmqWLXvI3eiEQILKmDTpCiZ6v48rRb4ZJBXeotwKCAQAkbUS+3MUlBTlTemY7
+7zLH9q/HPdOqKtoVWcbFkwbi5YlX5AHXq+gRQTnwsVz2yho4D7DR1s+yYcyFednP
+nazqrs1V79VLTl8JoG1IQx0437ghBT8QjYFaIScdJ8E+GbU6ZC6yoRyNjo/ImS11
+ULB8pVPxzuQNX8ZWdZWel2kSXG2MpNJYX8uTOsW1FuEJzuZ7AIAFLKafLWUPw26L
+Ij40JQHlFx4zM2YBptqer6srkhEZbELXEKm+WMdHXupMzDkUEUBP66Utho+1dCC0
+DV0A8Xhnb+1aLdyom0wtKi/KHglb1broXlFkMYyxi6AiSNk1fYKjPGC1v/EcomzC
+wrEhAoIBAQCyQL9qEpROS9h172alLRkus74vuYca25Oh6HFZ/CMQaMWAb8ATS72K
+6SKvQwFIMgZ6E3JauIVFB0G1KVq4rJKPKvuU0xmlPnynRQYlUvU4tUX74W05wzoq
+2YtEsMGjJ5GST858Ye6zvAUkC5WY039fuv09ULpKT3sEzJknaa3FZX4av9Digabk
+HWqer5Jkk1h7pMTFm+Xeqp84XIkLCNKWJea9G+zaJKEelDVlEWivxHzc2/qvihj/
+UV5uSKFlvbZ7JGiOC/ImHoC9Ncs6u7vsK0sGSMOVnPELxLMVVqcvmIO2N7jwx6xy
+to434G+KjUJCZzTv/Qtm5e5AvUyOWaQDAoIBAA4YxhRf2Zy1wXENoQP6wTZMmwHq
+p1w7Jqk7+u+W1UEhaugN5v7D5Xw6tnVF7tdiaUMREPUZKu9bj7T61DJFS8LlCaTQ
+i6DwV78vXQY9QOaZ85oqC/Cq8ehnTX8Y5nLvTC8daKZsmRv8z/kG84yOGXWX0zVs
+sskcLj0Wk5rz0kUrxiDkDToEmDAHXGmaTW1Z/z4HFGhvRXx3bAViSJgPEVog4+i2
+10E7RevBeWf2dmfizj0qj9RzvJVmB59rleeWCLCAcdsUjwTSMu4NP+KszqiqKyoY
+tuC6t5cgGbPJAY+I0a6+gWoLZpnZu4/dBrj664j3k9a8AhmY9zlrwTybdYM=
+-----END RSA PRIVATE KEY-----
diff --git a/reg-tests/ssl/ocsp_update/ocsp_update_rootca.crt b/reg-tests/ssl/ocsp_update/ocsp_update_rootca.crt
new file mode 100644
index 0000000..bed2061
--- /dev/null
+++ b/reg-tests/ssl/ocsp_update/ocsp_update_rootca.crt
@@ -0,0 +1,30 @@
+-----BEGIN CERTIFICATE-----
+MIIFGjCCAwKgAwIBAgIUHgviUJMgCZlOPOhVc09pZ4NhfxcwDQYJKoZIhvcNAQEL
+BQAwPjELMAkGA1UEBhMCRlIxHTAbBgNVBAoMFEhBUHJveHkgVGVjaG5vbG9naWVz
+MRAwDgYDVQQDDAdSb290IENBMB4XDTIxMDQyMjE0MDEyMFoXDTQ4MDkwNzE0MDEy
+MFowPjELMAkGA1UEBhMCRlIxHTAbBgNVBAoMFEhBUHJveHkgVGVjaG5vbG9naWVz
+MRAwDgYDVQQDDAdSb290IENBMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKC
+AgEAti+5onUeFJNyF5s6xlnBxDnFhw7Q5VbBestHeQttjBWN31zq5yaf/+CYXdu+
+lY6gNZj6JBiFJ5P7VXX3DqUIJBX6byXWfIUWM+auBAMKlTz0+hWrF/UxI/3uG67N
++Z6NVffEPYbA4Emqozr0DIicWorRyHnrhEQQP87xBCUboUr3QEkNngfiJ0fPm3fj
+7HfQemGL2OnTA8qdy0q1l4aUhVr9bgedP2Klvs0XhbszCGLI0Gq5lyNadlH1MEiw
+SXa9rklE6NCNcyamO7Wt8LVrg6pxopa7oGnkLbnjzSuE+xsN0isOLaHH5LfYg6gT
+aAHpnBHiWuDZQIyzKc+Z37gNksd46/y9B+oBZoCTcYMOsn7PK+gPzTbu3ic4L9hO
+WCsTV0tn+qUGj6/J98gRgvuvZGA7NPDKNZU5p34oyApBPBUOgpn6pCuT5NlkPYAe
+Rp/ypiy5NCHp0JW3JWkJ4+wEasZM34TZUYrOsicA0GV4ZVkoQ3WYyAjmLvRXmo/w
+Z3sSlmHvCg9MrQ9pk24+OtvCbii0bb/Zmlx0Y4lU5TogcuJffJDVbj7oxTc2gRmI
+SIZsnYLv2qVoeBoMY5otj+ef0Y8v98mKCbiWe2MzBkC2h5wmwyWedez8RysTaFHS
+Z4yOYoCsEAtCxnib9d5fXf0+6aOuFtKMknkuWbYj6En647ECAwEAAaMQMA4wDAYD
+VR0TBAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAgEAjVzxHzq/87uj24It5hYj4mq4
+ero0zix4fA4tJNuTpZ/5r7GUYaf/uT4xfDilBX2fGMsxVTxJC25KzhdFeTzg1Tde
+/N0LAeLWHfe6jR/P5XDATD0ZA73DQALOxRM5uRMeWJDVaUeco/aXsdQaCz2STDI3
+h7VVFoaOlmxQW3BBEvg2VUp9DS2UjqqdwsUDtzwKfrmj/FqyBvGrvNeIMv28HCu7
+r1WE1Z0UEJhpc1BPbu7F/vl60gRF3bQjh2tL8pWThxTJe6Qy+pLoSShyi85AM9XK
+scCmUtQWjy7KQDL8XVFvuCWvMzknZQjJcncbKddPaaSIDkKUpz9FDv+wSJj/LKf7
+bGSFPM6sblioLbLNJByRYI8G7VHvKDbUnYHbHp75NTGA2eDeNqx5bC2G/EJUTwLM
+bfcZr9hv+z1QpvSLEpar30kJjc1QMQcf60ToGYIC93rsVAKou2GPGry4h/nzwro0
+jjFWNgORTXllfcQDbDNOPkV1kFFibPbAU4faZMgC+xwIwDBsndvcvXjLaRUa4fmw
+1xNkOO5Lj9AuvTXdCc9yUXRzmPZhU6Q4YB2daWvs3vbMTtvkAXGyQL4b2HD+NYZs
+cMUtbteGgQzwM1gpMBn4GX53vhlCXq28r3cH1/1tLDweglSrxyvZbB7pZU7BAmLk
+TEj2fXcvdcX+TtYhC10=
+-----END CERTIFICATE-----
diff --git a/reg-tests/ssl/rootCA_crl.pem b/reg-tests/ssl/rootCA_crl.pem
new file mode 100644
index 0000000..cee411e
--- /dev/null
+++ b/reg-tests/ssl/rootCA_crl.pem
@@ -0,0 +1,16 @@
+-----BEGIN X509 CRL-----
+MIICmzCBhDANBgkqhkiG9w0BAQsFADA+MQswCQYDVQQGEwJGUjEdMBsGA1UECgwU
+SEFQcm94eSBUZWNobm9sb2dpZXMxEDAOBgNVBAMMB1Jvb3QgQ0EXDTIxMDQyMzA4
+MjM0NVoXDTQ4MDkwODA4MjM0NVowFTATAgIQBxcNMjEwNDIzMDgyMDM5WjANBgkq
+hkiG9w0BAQsFAAOCAgEAgECfAAcCu1yojdIa3BxpfXgnUoi/Kgp796w67fAOZ9ZS
+0r68n754rWNC6QXsolrMVB4xIHe9PWWY5aCFcdmrZOts3JWaP8/UD/CeUSK30+jR
+jPhDaZJHarHfocPAOvhR2faFmFMrT2NWC9swX1UMPXKAeWg8YubxT7ACx/Yrja3F
+3p/UAAHpGmfPpRPGC6G2zN2zmpycpsH7vDQ7vS/pImyjuOYjMY9qKJeyHhwBIZXK
+C0fuK/40HkFpcWBq6rFoiWRX8gfuKwo0i6BUDyHoFXrptvkXW/ufk+H3uM82/g4I
+ZxLaCSoST+S2aoJOzF8JtjOEjCokP0I4Qs/4uVhbd5PNofgAZhdZY/CREErlVgIa
+OT4hGgyjom7T8+QWApSWRdAkkSDpITSFnXJYXScmxfeT1nRjG9HBX3NHCgQWL8a/
+VwCrzBkCsLfXxFoCuMIKQ2JwhHMTl+gm1YaO6p9BrGMVfxgXvCPWKH0D52pM0z4L
+6F1pKV3OA/LhQMW2tfZpvoWYtlSEy9RnaThS8OdEDI2pxlnI2F4Z6BAMVHUtlBHA
+raklj6ZnD8NkzpVlU7+0OK1rSasP/UEFBXhAOHxDEGXWA8nJCVQiOUjMbitEvQRS
++L+aSMfdpXQcIA3mTJQGXMgsnR75YXllWeHv9EYMHRkoBBUuDi4QX3MvTwa97DM=
+-----END X509 CRL-----
diff --git a/reg-tests/ssl/set_cafile_client.pem b/reg-tests/ssl/set_cafile_client.pem
new file mode 100644
index 0000000..f2fe6f3
--- /dev/null
+++ b/reg-tests/ssl/set_cafile_client.pem
@@ -0,0 +1,95 @@
+Certificate:
+ Data:
+ Version: 1 (0x0)
+ Serial Number: 4103 (0x1007)
+ Signature Algorithm: sha256WithRSAEncryption
+ Issuer: C=FR, O=HAProxy Technologies, CN=Intermediate CA1
+ Validity
+ Not Before: Apr 22 15:16:27 2021 GMT
+ Not After : Sep 7 15:16:27 2048 GMT
+ Subject: C=FR, O=HAProxy Technologies, CN=Client
+ Subject Public Key Info:
+ Public Key Algorithm: rsaEncryption
+ RSA Public-Key: (2048 bit)
+ Modulus:
+ 00:bb:d6:23:03:04:ae:d2:41:19:3c:6a:91:f1:41:
+ 07:2f:db:87:fa:ac:d5:c3:ad:db:cd:b3:fd:fa:55:
+ 78:3d:eb:b1:50:98:ce:de:f7:1d:44:42:56:15:e6:
+ cf:f3:75:d5:54:90:39:07:54:70:d1:d2:71:f9:26:
+ 96:79:14:8b:20:e0:7c:dd:8f:d0:13:f1:16:a5:85:
+ 52:5f:ff:16:bf:3d:f3:5b:78:e8:00:6e:0b:79:6a:
+ b7:c3:17:49:df:96:1a:7f:c7:e0:cf:c6:01:03:55:
+ af:36:03:95:aa:95:93:08:75:e4:46:86:9f:af:23:
+ 69:ac:fa:65:f0:5d:5a:97:f6:36:78:b2:a7:11:a7:
+ 93:8b:6b:4b:c4:54:67:b1:82:23:91:72:0f:d9:8b:
+ d8:1a:b4:d4:99:9e:cd:3f:3c:34:73:48:ba:cd:f4:
+ 7d:c8:9f:b2:17:a8:90:9c:e6:c2:f3:46:39:8b:06:
+ af:d0:df:e7:7d:05:92:33:4d:08:80:17:e7:a7:39:
+ 49:eb:f1:35:06:ac:07:d6:0b:1a:4d:55:ae:26:1a:
+ 49:4a:a1:b2:5f:c5:8b:39:98:2c:0c:63:41:2e:be:
+ 0e:3f:5c:c3:3a:39:25:2a:23:a3:a2:d3:51:03:cb:
+ 81:5e:76:04:76:a8:fb:80:a8:5b:19:9f:6c:e3:d7:
+ 31:ff
+ Exponent: 65537 (0x10001)
+ Signature Algorithm: sha256WithRSAEncryption
+ 71:76:f0:82:f0:06:c8:ed:5a:dd:92:37:16:82:c5:9c:dd:7d:
+ 65:b7:47:ee:d5:3c:cc:cd:69:d2:57:83:6a:c1:20:ef:28:a8:
+ b9:c2:db:1b:2b:e1:36:95:e5:e7:03:84:67:64:20:ff:ed:a4:
+ 3c:8b:d5:35:32:fe:7e:c8:c5:fc:04:15:ef:be:de:56:dc:f6:
+ d8:cc:1c:fb:03:02:01:66:fa:e4:2b:3f:2c:3e:9b:46:ec:29:
+ f3:02:1b:d4:c2:be:fe:fa:4d:0c:48:e0:d7:af:30:ca:6c:b3:
+ ea:0b:60:5b:a5:17:17:6f:f6:2f:0b:25:db:7c:ce:65:a5:94:
+ 94:09:84:10:39:1e:69:16:e5:0e:bc:1e:96:68:88:54:39:83:
+ b6:0f:74:61:6a:1c:d3:b6:65:36:bc:4f:75:30:9a:84:8f:98:
+ 68:ab:61:ab:57:88:8b:7c:64:7b:7f:39:a7:56:8a:e0:88:e3:
+ 66:7d:2c:0a:eb:f3:aa:9c:a6:f4:88:e1:0b:58:66:69:06:6b:
+ 93:e8:78:52:56:fc:7f:96:69:1d:76:40:30:fa:d6:4a:c7:2a:
+ 47:24:e0:cd:14:32:74:70:ba:b7:b4:0f:33:ca:3a:3c:75:49:
+ ff:65:2e:4f:65:e3:79:14:1f:76:5e:3f:44:39:60:42:df:97:
+ 0e:f3:a2:2e
+-----BEGIN CERTIFICATE-----
+MIIC+TCCAeECAhAHMA0GCSqGSIb3DQEBCwUAMEcxCzAJBgNVBAYTAkZSMR0wGwYD
+VQQKDBRIQVByb3h5IFRlY2hub2xvZ2llczEZMBcGA1UEAwwQSW50ZXJtZWRpYXRl
+IENBMTAeFw0yMTA0MjIxNTE2MjdaFw00ODA5MDcxNTE2MjdaMD0xCzAJBgNVBAYT
+AkZSMR0wGwYDVQQKDBRIQVByb3h5IFRlY2hub2xvZ2llczEPMA0GA1UEAwwGQ2xp
+ZW50MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAu9YjAwSu0kEZPGqR
+8UEHL9uH+qzVw63bzbP9+lV4PeuxUJjO3vcdREJWFebP83XVVJA5B1Rw0dJx+SaW
+eRSLIOB83Y/QE/EWpYVSX/8Wvz3zW3joAG4LeWq3wxdJ35Yaf8fgz8YBA1WvNgOV
+qpWTCHXkRoafryNprPpl8F1al/Y2eLKnEaeTi2tLxFRnsYIjkXIP2YvYGrTUmZ7N
+Pzw0c0i6zfR9yJ+yF6iQnObC80Y5iwav0N/nfQWSM00IgBfnpzlJ6/E1BqwH1gsa
+TVWuJhpJSqGyX8WLOZgsDGNBLr4OP1zDOjklKiOjotNRA8uBXnYEdqj7gKhbGZ9s
+49cx/wIDAQABMA0GCSqGSIb3DQEBCwUAA4IBAQBxdvCC8AbI7VrdkjcWgsWc3X1l
+t0fu1TzMzWnSV4NqwSDvKKi5wtsbK+E2leXnA4RnZCD/7aQ8i9U1Mv5+yMX8BBXv
+vt5W3PbYzBz7AwIBZvrkKz8sPptG7CnzAhvUwr7++k0MSODXrzDKbLPqC2BbpRcX
+b/YvCyXbfM5lpZSUCYQQOR5pFuUOvB6WaIhUOYO2D3RhahzTtmU2vE91MJqEj5ho
+q2GrV4iLfGR7fzmnVorgiONmfSwK6/OqnKb0iOELWGZpBmuT6HhSVvx/lmkddkAw
++tZKxypHJODNFDJ0cLq3tA8zyjo8dUn/ZS5PZeN5FB92Xj9EOWBC35cO86Iu
+-----END CERTIFICATE-----
+-----BEGIN PRIVATE KEY-----
+MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQC71iMDBK7SQRk8
+apHxQQcv24f6rNXDrdvNs/36VXg967FQmM7e9x1EQlYV5s/zddVUkDkHVHDR0nH5
+JpZ5FIsg4Hzdj9AT8RalhVJf/xa/PfNbeOgAbgt5arfDF0nflhp/x+DPxgEDVa82
+A5WqlZMIdeRGhp+vI2ms+mXwXVqX9jZ4sqcRp5OLa0vEVGexgiORcg/Zi9gatNSZ
+ns0/PDRzSLrN9H3In7IXqJCc5sLzRjmLBq/Q3+d9BZIzTQiAF+enOUnr8TUGrAfW
+CxpNVa4mGklKobJfxYs5mCwMY0Euvg4/XMM6OSUqI6Oi01EDy4FedgR2qPuAqFsZ
+n2zj1zH/AgMBAAECggEAE60Fs948tdeN3i9HpF7scs3rO435Qmsm2DBfjWaAlvPm
+egvXt7FpBmpwfBDWfak9NIN7BdKJkuEZgUDSiFJnlIUPb2IOKNibR7FkhJvC9Tt3
+D4DlxI3Cc/CC2VPKMDqYAgSc/wa9umyyUtUjS2Apq7w1slGNzpnGCxGbtgcBY2OA
+ILjPffpVYJv87LijTIozScjx/Xdub5fWgcLtByWEDk8SxAb47qAAIAjbilpARWmf
+CHOeF+BG7ku2PT9+tLeMDabwRctNs88pef4+Dbe9+2Ess+2bdsG8As1/fw49QCnm
+ODNV1wPXdpS5wHEEdLxnQNXOQEVSRrVOhz5KWG3F0QKBgQDo/LXfjzcrNoVRG29/
+4l9aInk0+5tE4MCsM828LBmGxbYsQqt2g3ZGQCNW2IfnwQOYlujIm+F7ZYivT5Dq
+j3QvuuHjo/EGz4JuJef1oSkWeYVLm+gSzlmt5EzYximtfnEBBeJJh1Zl0R8nQfWh
+RjRMTboVC22dcBSVJdCM+lg8uQKBgQDOY8A2HxDuK3RyVgz9/YtIEqtMg2tzh0jE
+NnqPcy0AGMc1V1lmhn9ZHuUwspc3ZCi7gHRSjFoW+SWIIDFcPqRcZ5ZPxIejhwtF
+vbi20OAx+mbSdXjyYH0Z/CaVOIMHKaOWv6EbYLWIjVRGfLsMHl1xzYjE8SiNdcMf
+naLjF564dwKBgQDStSmuw5D7TdWIIq3WFF5z39WKazpjMnhNxJP96Ew1rL0yjiEP
+j5j5s6vCMRXILLEZ4PEp7IAh3xOcqPLAj3heaj88ZtnmdOjawQFlDZlhMAmy9Y8O
+4vwL3fr52U18EGwWpsGeCf3DGzt3f5mrfxhxIaJ2wd0ik2ip8ocH2KjQUQKBgQCS
+D23730hwBTjHobZYGZL0UqH/6BGnFNqeYZ+i3XO/WcnBKiwOrqh5PbAdIoZ5oNxi
+tamcsc8f6vpwt7e2/G39JyHtGbyUMgH8PSP33SKMvBUAZDpP7ZEbTqNPf0rbStCG
+4t71LR/Ln5lAuQz2qpae70IXfkOguPJ58WlRJWoiSQKBgHwgYsjep0Ms/ZJkkhAp
+59vwahpyWmnq/Wm6I9eyM84H+VQNqJf9/pQ3q3afPl4hRQydpenPBQF3GJ1m/9Nb
+BenesNrmJabCBYsaZEnwOnb7xlai5xjBEQxAJ5ROxdmTYmA0xWzuGwuu9fHwdCvW
+2ZXtTzEEzU7mLUhbiQWAF+H4
+-----END PRIVATE KEY-----
diff --git a/reg-tests/ssl/set_cafile_interCA1.crt b/reg-tests/ssl/set_cafile_interCA1.crt
new file mode 100644
index 0000000..840af61
--- /dev/null
+++ b/reg-tests/ssl/set_cafile_interCA1.crt
@@ -0,0 +1,24 @@
+-----BEGIN CERTIFICATE-----
+MIIEETCCAfmgAwIBAgICEAAwDQYJKoZIhvcNAQELBQAwPjELMAkGA1UEBhMCRlIx
+HTAbBgNVBAoMFEhBUHJveHkgVGVjaG5vbG9naWVzMRAwDgYDVQQDDAdSb290IENB
+MB4XDTIxMDQyMjE0MDEyMFoXDTQ4MDkwNzE0MDEyMFowRzELMAkGA1UEBhMCRlIx
+HTAbBgNVBAoMFEhBUHJveHkgVGVjaG5vbG9naWVzMRkwFwYDVQQDDBBJbnRlcm1l
+ZGlhdGUgQ0ExMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAteNSI+ds
+/DWtxa69I3qQ2dn6nKTCHx3IxS5/UprcO/4t0z/gOgsAn+jDVnMRh57SNQGm5/7o
+DkiYnhV04qfdR8amKVKqAhFHUdpKXRZhP6XpqmpKLwvJH/kQmz1oIg27LPlvrAyV
+UqV1Y0vKkMCVEPuap5sJYQasYfYaavpATOAEAC10dlnpYjQQYt4fHetVi10Jmtzr
+Yea5BDsdeajbq8jWgSWGxA3BrbCubCwhCZfih5ct1KTjotj1hsfVyjPwtp8xVpMs
++amWSL/OI6pxdkVBH/dQa4M67rkQdCezLq3UAryQbdbJJLeJKqgGyIpYpKcS0GQA
+JF3UeWxWgur1IQIDAQABoxAwDjAMBgNVHRMEBTADAQH/MA0GCSqGSIb3DQEBCwUA
+A4ICAQCiVX9SJFgXpoHHRw42A6AZDyhdv+gagQRhcjtRzdhJM+syot5WL+jxGU7U
+26v3MGDRpn0zDRJ8Hdy+IYefB3NO/D5OPxTiziQcx15qf2fj8VtCygbC77u8QHHR
+08b2uvEDgj9K3b5skfwwez4HabD0Ol1QRZOyykG/wl2g1Bz2lgS6Jfz1wYpmT3Ju
+omufTFfB1a07DkkokGe7qAAsnfUidBScIJbLoD7xMr/zY9iMTVo+MI+Pb/4fivxq
++T75ybJwxlCpv74Zk2ATI/VEfHgPpidYtTkScRe8fsnFrE9z5TNKgNwXRhiDZe8U
+F2oggdnA0fBux5H4AmjbXHgAB8x4NjRZ9yrurjdP7AIOdOn5vvXfDehFKN+OP0Pk
++5ZoUEtd876UrupaeEPd7XZIRrGItmytuntA1pW3RCU2yGsA9ep9Ur84ogzBxXpp
+5/8eGnV/TLfaB56f9cYqL9rcTCI+VLhaTFpBpEjCae5EtS50gsAoaideb45F0wCt
+P266qAc69MU7hPqc6z2rzumcQiAKM6krELhTOVSfRo+Gzuei6bFbTK0RqwGYn+Oc
+shQN++eqrT6YAVudGUJWrKp6JEj3Y24fVhW1x631NRW5JinV2jWVWPgObDrReyya
+4KlIwKMR8vPGOa3qlIRP/QPozktkoonRYg+DsLqAD6sQhJllww==
+-----END CERTIFICATE-----
diff --git a/reg-tests/ssl/set_cafile_interCA2.crt b/reg-tests/ssl/set_cafile_interCA2.crt
new file mode 100644
index 0000000..dab7bc0
--- /dev/null
+++ b/reg-tests/ssl/set_cafile_interCA2.crt
@@ -0,0 +1,24 @@
+-----BEGIN CERTIFICATE-----
+MIIEETCCAfmgAwIBAgICEAQwDQYJKoZIhvcNAQELBQAwPjELMAkGA1UEBhMCRlIx
+HTAbBgNVBAoMFEhBUHJveHkgVGVjaG5vbG9naWVzMRAwDgYDVQQDDAdSb290IENB
+MB4XDTIxMDQyMjE0MDEyMVoXDTQ4MDkwNzE0MDEyMVowRzELMAkGA1UEBhMCRlIx
+HTAbBgNVBAoMFEhBUHJveHkgVGVjaG5vbG9naWVzMRkwFwYDVQQDDBBJbnRlcm1l
+ZGlhdGUgQ0EyMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA0jJiCfXy
+yzBDFTW3kaznyYZOZ6+IK1xnN6HhrB0nHwpNtC5nNtFKqbN2uNhOs9qvsX6Lx+oI
+4+811OEdn269EGdlsdni/fLo+nofoD/bkcnvUGTnnmJNp4SGL6npHbHhJaAB0ETe
+3F+blo9N6i+g41c+/8gD2VTgjoSiB6/Cm1sJw5jMxlmZ6dkk+HiHqg2B2o4pkOQm
+0DmRuT3c6mN8I1sMWvZeMq7WaAzwbGmERK88p79QhPKr2kl5gfOOOphhPYqyN8dk
+c1xooWoeUo8ZI+uI8LFVljR7+VMKmkG59wtYW6vVoUrGMH8tKPZVEmKVG57wjoqP
+72n7IodhBFWnBwIDAQABoxAwDjAMBgNVHRMEBTADAQH/MA0GCSqGSIb3DQEBCwUA
+A4ICAQBHTi+Wzg6g5N5/R88Zt9MFiNfQQJwLm5arVjmw+y8uSIsi5e8l2ZrqSi8i
+AtbduX5PID0kzaDAAzH37YpbJScdXBymh2kX6qJOs+bRFl/GkCD7nD9VjV23h72R
+F67bNpNsT3crcxZTD4QytahdKY43XQwicyrmLl9NahxOOfWR6RD2RriBe1Wj5v/R
+SEu5pVcON5qIgo5mgO0GM7X/IW7hZaUyCdboqd29zgYWsiqIGB23V6RTwAZ2WN/0
+xz3IPgwBwzif9L0RNZaGbg+jlmgMJTv+m+/VYoZsFwxgWIaHkR6dYxEZ2ak/djeb
+DOc764Obg+5XlxIgK+hZQbvK2zRkysUTMdzuPZtIgQU4+V4NzeEdsU2Y3IcsUzvG
+29n9CCZxyeG2hTQ1eJrvLqolPUZAn/u+EbH59h01nrycd3k7AJtr05UCrTKk+6EJ
+YwSNvnNEkmw0MX+aYNE0JYtHp8IrFgnO1vbAT6YCxR69LKWAWMy9eDVxK0bTEnrW
+7lRTTgbUCaM31g2peNoiQdSS5xVwO5bcWmi3CHJtrLavMOV7OVi9f+ggTju4CZqK
+v5U6stVyrLSUkdLZP3uMLvDVSPzPt6kGbeyHxqHfE2ywOwFtF3uxKskTNwdxYwuJ
+T8kCxcSLTyefVwkCn7P6r+LdRpJhcbDRdt9cmyfiePElj2uq2w==
+-----END CERTIFICATE-----
diff --git a/reg-tests/ssl/set_cafile_rootCA.crt b/reg-tests/ssl/set_cafile_rootCA.crt
new file mode 100644
index 0000000..bed2061
--- /dev/null
+++ b/reg-tests/ssl/set_cafile_rootCA.crt
@@ -0,0 +1,30 @@
+-----BEGIN CERTIFICATE-----
+MIIFGjCCAwKgAwIBAgIUHgviUJMgCZlOPOhVc09pZ4NhfxcwDQYJKoZIhvcNAQEL
+BQAwPjELMAkGA1UEBhMCRlIxHTAbBgNVBAoMFEhBUHJveHkgVGVjaG5vbG9naWVz
+MRAwDgYDVQQDDAdSb290IENBMB4XDTIxMDQyMjE0MDEyMFoXDTQ4MDkwNzE0MDEy
+MFowPjELMAkGA1UEBhMCRlIxHTAbBgNVBAoMFEhBUHJveHkgVGVjaG5vbG9naWVz
+MRAwDgYDVQQDDAdSb290IENBMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKC
+AgEAti+5onUeFJNyF5s6xlnBxDnFhw7Q5VbBestHeQttjBWN31zq5yaf/+CYXdu+
+lY6gNZj6JBiFJ5P7VXX3DqUIJBX6byXWfIUWM+auBAMKlTz0+hWrF/UxI/3uG67N
++Z6NVffEPYbA4Emqozr0DIicWorRyHnrhEQQP87xBCUboUr3QEkNngfiJ0fPm3fj
+7HfQemGL2OnTA8qdy0q1l4aUhVr9bgedP2Klvs0XhbszCGLI0Gq5lyNadlH1MEiw
+SXa9rklE6NCNcyamO7Wt8LVrg6pxopa7oGnkLbnjzSuE+xsN0isOLaHH5LfYg6gT
+aAHpnBHiWuDZQIyzKc+Z37gNksd46/y9B+oBZoCTcYMOsn7PK+gPzTbu3ic4L9hO
+WCsTV0tn+qUGj6/J98gRgvuvZGA7NPDKNZU5p34oyApBPBUOgpn6pCuT5NlkPYAe
+Rp/ypiy5NCHp0JW3JWkJ4+wEasZM34TZUYrOsicA0GV4ZVkoQ3WYyAjmLvRXmo/w
+Z3sSlmHvCg9MrQ9pk24+OtvCbii0bb/Zmlx0Y4lU5TogcuJffJDVbj7oxTc2gRmI
+SIZsnYLv2qVoeBoMY5otj+ef0Y8v98mKCbiWe2MzBkC2h5wmwyWedez8RysTaFHS
+Z4yOYoCsEAtCxnib9d5fXf0+6aOuFtKMknkuWbYj6En647ECAwEAAaMQMA4wDAYD
+VR0TBAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAgEAjVzxHzq/87uj24It5hYj4mq4
+ero0zix4fA4tJNuTpZ/5r7GUYaf/uT4xfDilBX2fGMsxVTxJC25KzhdFeTzg1Tde
+/N0LAeLWHfe6jR/P5XDATD0ZA73DQALOxRM5uRMeWJDVaUeco/aXsdQaCz2STDI3
+h7VVFoaOlmxQW3BBEvg2VUp9DS2UjqqdwsUDtzwKfrmj/FqyBvGrvNeIMv28HCu7
+r1WE1Z0UEJhpc1BPbu7F/vl60gRF3bQjh2tL8pWThxTJe6Qy+pLoSShyi85AM9XK
+scCmUtQWjy7KQDL8XVFvuCWvMzknZQjJcncbKddPaaSIDkKUpz9FDv+wSJj/LKf7
+bGSFPM6sblioLbLNJByRYI8G7VHvKDbUnYHbHp75NTGA2eDeNqx5bC2G/EJUTwLM
+bfcZr9hv+z1QpvSLEpar30kJjc1QMQcf60ToGYIC93rsVAKou2GPGry4h/nzwro0
+jjFWNgORTXllfcQDbDNOPkV1kFFibPbAU4faZMgC+xwIwDBsndvcvXjLaRUa4fmw
+1xNkOO5Lj9AuvTXdCc9yUXRzmPZhU6Q4YB2daWvs3vbMTtvkAXGyQL4b2HD+NYZs
+cMUtbteGgQzwM1gpMBn4GX53vhlCXq28r3cH1/1tLDweglSrxyvZbB7pZU7BAmLk
+TEj2fXcvdcX+TtYhC10=
+-----END CERTIFICATE-----
diff --git a/reg-tests/ssl/set_cafile_server.pem b/reg-tests/ssl/set_cafile_server.pem
new file mode 100644
index 0000000..04e2c22
--- /dev/null
+++ b/reg-tests/ssl/set_cafile_server.pem
@@ -0,0 +1,95 @@
+Certificate:
+ Data:
+ Version: 1 (0x0)
+ Serial Number: 4104 (0x1008)
+ Signature Algorithm: sha256WithRSAEncryption
+ Issuer: C=FR, O=HAProxy Technologies, CN=Intermediate CA2
+ Validity
+ Not Before: Apr 22 15:18:37 2021 GMT
+ Not After : Sep 7 15:18:37 2048 GMT
+ Subject: C=FR, O=HAProxy Technologies, CN=Server
+ Subject Public Key Info:
+ Public Key Algorithm: rsaEncryption
+ RSA Public-Key: (2048 bit)
+ Modulus:
+ 00:a3:9f:14:1d:de:57:84:a9:8e:17:2a:75:92:be:
+ 70:0e:6d:95:82:36:7a:2d:b2:57:e3:82:fd:20:be:
+ 83:e5:71:0a:ae:3e:58:cc:31:bc:42:12:e7:42:50:
+ 9d:47:c6:f4:02:01:1f:6e:a1:74:38:12:27:df:45:
+ 23:56:9a:b7:74:cb:f1:5a:1a:35:60:0d:6d:59:5e:
+ 71:87:19:9e:84:16:3a:69:ff:8d:ea:b2:77:dd:40:
+ d1:8f:c8:5d:35:c1:53:a4:0b:3f:73:c4:c2:03:52:
+ 2a:f1:bf:dc:2f:32:75:d3:2b:d2:e7:3a:de:ac:ac:
+ 43:59:f1:be:52:a7:30:51:54:ff:3d:a4:5f:97:e7:
+ f8:aa:65:86:b4:7d:a6:9d:c4:2d:94:68:2d:71:dc:
+ 5c:d2:2f:bf:d4:9c:ca:7e:2e:97:a7:10:ad:d9:ad:
+ 8b:74:c9:dd:91:54:71:83:1c:51:17:7e:1b:10:fc:
+ 00:c3:f7:5b:43:76:2d:a3:1e:93:a5:c4:cb:c1:eb:
+ 8e:df:a3:6c:6e:31:1e:27:fc:40:54:ac:8e:a2:ba:
+ 6d:d3:26:0d:ef:8b:e6:20:18:55:fd:11:37:61:90:
+ 40:48:d9:86:fc:34:0b:9c:65:1b:d5:02:02:28:16:
+ 08:1f:df:d8:91:8b:be:89:63:1a:09:27:00:4c:a4:
+ f4:59
+ Exponent: 65537 (0x10001)
+ Signature Algorithm: sha256WithRSAEncryption
+ 01:be:4e:27:fe:cd:03:c9:df:30:5f:a8:e5:b7:33:21:a1:9e:
+ d3:1f:cb:4c:00:64:a0:47:c6:73:c8:f8:f5:a5:f3:ee:8d:b4:
+ 2c:b9:7a:47:71:fb:4a:bd:a4:df:c9:b2:2b:06:f5:77:69:ec:
+ c6:90:8d:16:d0:3d:fa:c0:fb:30:50:39:56:0f:2b:78:15:0a:
+ c2:62:6a:98:59:70:aa:6b:61:55:58:ee:50:b7:cf:d3:7c:0a:
+ 24:04:3d:db:ab:bc:c4:ba:82:52:0c:62:4b:aa:48:47:f4:4f:
+ 05:d8:4f:b2:88:f1:d6:1a:10:e1:bc:98:0b:b9:7f:f9:47:21:
+ 89:7a:37:61:f0:1a:e3:1d:c1:23:ba:71:8d:c8:de:cc:b0:da:
+ 6a:21:5c:41:02:a1:8a:6a:d4:02:32:de:a9:84:97:38:27:de:
+ 2d:8c:bc:c4:fa:a9:fc:3a:7c:58:92:62:20:4b:be:60:25:f6:
+ f4:4e:49:a1:b2:f3:e3:97:7c:84:cd:6c:f5:42:e6:3f:ca:34:
+ a3:26:c7:91:e4:0c:8c:df:36:5e:6b:68:e6:45:2d:c0:af:56:
+ 3c:1e:85:46:79:db:85:6e:98:49:69:ea:4f:fc:00:fc:23:8c:
+ dc:b8:fe:b9:fd:f9:fb:ec:28:f0:1a:f7:3c:b4:74:38:5e:71:
+ bc:1e:39:90
+-----BEGIN CERTIFICATE-----
+MIIC+TCCAeECAhAIMA0GCSqGSIb3DQEBCwUAMEcxCzAJBgNVBAYTAkZSMR0wGwYD
+VQQKDBRIQVByb3h5IFRlY2hub2xvZ2llczEZMBcGA1UEAwwQSW50ZXJtZWRpYXRl
+IENBMjAeFw0yMTA0MjIxNTE4MzdaFw00ODA5MDcxNTE4MzdaMD0xCzAJBgNVBAYT
+AkZSMR0wGwYDVQQKDBRIQVByb3h5IFRlY2hub2xvZ2llczEPMA0GA1UEAwwGU2Vy
+dmVyMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAo58UHd5XhKmOFyp1
+kr5wDm2VgjZ6LbJX44L9IL6D5XEKrj5YzDG8QhLnQlCdR8b0AgEfbqF0OBIn30Uj
+Vpq3dMvxWho1YA1tWV5xhxmehBY6af+N6rJ33UDRj8hdNcFTpAs/c8TCA1Iq8b/c
+LzJ10yvS5zrerKxDWfG+UqcwUVT/PaRfl+f4qmWGtH2mncQtlGgtcdxc0i+/1JzK
+fi6XpxCt2a2LdMndkVRxgxxRF34bEPwAw/dbQ3Ytox6TpcTLweuO36NsbjEeJ/xA
+VKyOorpt0yYN74vmIBhV/RE3YZBASNmG/DQLnGUb1QICKBYIH9/YkYu+iWMaCScA
+TKT0WQIDAQABMA0GCSqGSIb3DQEBCwUAA4IBAQABvk4n/s0Dyd8wX6jltzMhoZ7T
+H8tMAGSgR8ZzyPj1pfPujbQsuXpHcftKvaTfybIrBvV3aezGkI0W0D36wPswUDlW
+Dyt4FQrCYmqYWXCqa2FVWO5Qt8/TfAokBD3bq7zEuoJSDGJLqkhH9E8F2E+yiPHW
+GhDhvJgLuX/5RyGJejdh8BrjHcEjunGNyN7MsNpqIVxBAqGKatQCMt6phJc4J94t
+jLzE+qn8OnxYkmIgS75gJfb0TkmhsvPjl3yEzWz1QuY/yjSjJseR5AyM3zZea2jm
+RS3Ar1Y8HoVGeduFbphJaepP/AD8I4zcuP65/fn77CjwGvc8tHQ4XnG8HjmQ
+-----END CERTIFICATE-----
+-----BEGIN PRIVATE KEY-----
+MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCjnxQd3leEqY4X
+KnWSvnAObZWCNnotslfjgv0gvoPlcQquPljMMbxCEudCUJ1HxvQCAR9uoXQ4Eiff
+RSNWmrd0y/FaGjVgDW1ZXnGHGZ6EFjpp/43qsnfdQNGPyF01wVOkCz9zxMIDUirx
+v9wvMnXTK9LnOt6srENZ8b5SpzBRVP89pF+X5/iqZYa0faadxC2UaC1x3FzSL7/U
+nMp+LpenEK3ZrYt0yd2RVHGDHFEXfhsQ/ADD91tDdi2jHpOlxMvB647fo2xuMR4n
+/EBUrI6ium3TJg3vi+YgGFX9ETdhkEBI2Yb8NAucZRvVAgIoFggf39iRi76JYxoJ
+JwBMpPRZAgMBAAECggEAAj1OKC5/4ifz8us42r5SiFAFqNeYCoITY+DKGCWjZoOK
+kuH1ky3nFrxtf+HclTvq4RAk3v3EunO9KfgnSKsrcaTM89/B9UOZyIxbX28BVWt8
+dzDxP1IcA+I8PAyRAghYXbltr+b0hNkeD1sB5394T2CdLV8H5zMgZN3DLhxts99B
+V6fp77eSxKvjCByKzTvlECYwxt6GnkAfZulmYGtThBoTyIz9clzl2lcpoPwp9VpF
+IwYn6qig4Gfkrwj/2iMg3b6KOQIGcbH493cGmU+ujK1l4ZbkG6VIYHqLnbD+myui
+LpUjUeZPigvgvhkdakGyrwqBxqNFDMOFTdunKuZ65QKBgQDVKKuAf5NeBWAPUiaN
+AI82+4RTIecw5svrgk/9qSNCVnUwm9qJHyDpJZLvYUXcfB1CW6iYUQ9oo/+RvK5O
+YhouwQotKMI7moIyUiRhvOSFC/7QFYLSf8uMOPlYOxofq1OAqzAsGTHItrydu709
+sdox1alxroScpRfZm8I2fm9l4wKBgQDEgaKDTY3UgpY/KWH4SWMc0+UD9ordf96m
+E9rYTLW7pleZB/L5YvmpAiewUvwk1YipiLh0fQZVEx1BKirzmiWeLm2FO4SX7z9t
+kMeVb3XiGgeoTdPV98YNfB6tx3+2WEYQ5FkvyABsdoUp6e8AkwbFPZnFmM/a0ZSU
+Ob/Sfq8xkwKBgCfzTmlv/7PAeCeG8xi8QRtB+qQGF6mPqCqEqu9U0vns8Fvi6guH
+HQj1dNuOtKRFUsqMGUYq8yNekVjELzsboeKfZYPfPsAjDkHWKWF0ILRa8jAXyAQh
+1Yl7aChEM3o6BxV3gDjTpAQFU8aQWECG4+kxLWfUGKCvRJARZE4IVmKXAoGAU2Hy
+tKaW9ULIQFruAG4biWL8fbcC68RTlMM+DKRYRRzrdLsjxeDSsX2Bm9dKuNKHH/Es
+2/klU7o9oqYi/aU+KyXmQS+lLtdNYc+acPWP3vZOo4MKzXNK7fPqDLFnptdEO+y5
+T4Ydb+jGzqc+TE8XA2EFPAyAvohJ9K+gjtBExNMCgYEAhQSFwr8FRE4TVJT9zTxG
+PUsKzCMin5ewrYSVReBBKSEymrEC2MhsDgikfJHbDF4N3o8gbhXJKf3LcLJH0761
+y6Wt+0tyfUWk4Zv8oliiZi9vcFeNmArLW5+NHQLBh5SX2UXGRmtguZUAs1gkAe5E
+S3GzLHPhcWNEOE/PxejIRKI=
+-----END PRIVATE KEY-----
diff --git a/reg-tests/ssl/set_default_cert.crt-list b/reg-tests/ssl/set_default_cert.crt-list
new file mode 100644
index 0000000..a0d2caa
--- /dev/null
+++ b/reg-tests/ssl/set_default_cert.crt-list
@@ -0,0 +1,2 @@
+set_default_cert.pem !*
+set_default_cert.pem www.test1.com
diff --git a/reg-tests/ssl/set_default_cert.pem b/reg-tests/ssl/set_default_cert.pem
new file mode 100644
index 0000000..550208a
--- /dev/null
+++ b/reg-tests/ssl/set_default_cert.pem
@@ -0,0 +1,52 @@
+-----BEGIN CERTIFICATE-----
+MIIENjCCAh4CAQEwDQYJKoZIhvcNAQELBQAwWzELMAkGA1UEBhMCRlIxDjAMBgNV
+BAgMBVBhcmlzMQ4wDAYDVQQHDAVQYXJpczEVMBMGA1UECgwMSEFQcm94eSBUZWNo
+MRUwEwYDVQQDDAxIQVByb3h5IFRlY2gwHhcNMjEwMzAyMTcxODUwWhcNMjIwMzAy
+MTcxODUwWjBnMQswCQYDVQQGEwJGUjETMBEGA1UECAwKU29tZS1TdGF0ZTEOMAwG
+A1UEBwwFUGFyaXMxHTAbBgNVBAoMFEhBUHJveHkgVGVjaG5vbG9naWVzMRQwEgYD
+VQQDDAsqLnRlc3QxLmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB
+APjmyWLJ1olKg/EOarln7oQB7pdUrF6kS1YG+Nz0sgFzxnU0PHn/IeARCprHyEZ4
+eBOrQ0CHhM5hdEFDX8iq32rektcQqwfH83iwD9zXxFBJ7ItoWG6YAN6WLXjEDYEI
+hxLJMlW3kfYODKhNMvoqXyZi2wTyAJI+aLJI7pbeD+YNb0AwOnSH5ag5ohZIr3QU
+99UD/VUhndv4OP8JZwBiV6Qy79jVDVbPFGaOc70VkMQSCHytyudQicUZrYQdIw1E
+981JF/UpbnECLWyB3V+4t1KtWOW90vkUoBEj8Nxe6kYnMaNSjQhfKSF6zGmUOXYp
+oHPCgms8v4JaovQygo02Qi8CAwEAATANBgkqhkiG9w0BAQsFAAOCAgEAAz8IntYc
+zrbIqweHfD9CZTNIQiobhQmgykT0KQ23Gm2y/e3o63XOqxDv0bEctg4zE83w3g7d
+mJlEyCB0N0qC8UGGsbRm5Cny7H//g3u06NqSMBYbdU+BgZBBj16I5Kcw/kSBb9dA
+wslLlrUjBj6dK83EB1cpyqpyZHIXkR/E424ggfc45rmD60AtU0SvzVNZfIK0PmB0
+3YKiUlO7cl5CzTeTg2BooRvqwblya62SRkhfFL7NCRX1/S9tO/XiaYzgP7J6J09x
+yYs2XhQqJFgtS+1vDp8rHKhcANFVXBJ6rDSbp1qBv7qZkQhlFf8hQtd5iBXvCb0a
+KtN9L4o6t1wvyo0BbERroGU7rkPPUNiMc3gWEf/mgwGLsNNOYqY5eYoeAF7arX5f
+c4LCHiAYMWa/bEY29zmm51GH5ddxFSu1j95Hfd+HlNcX8Oyfed2oCoSamochmbzA
+Kktk0QfCYIv4LlaG5pUliLa6DCLK7yMfT5RC5GGb350p3uDobVj/taY2cVwXOBQb
+MjXK32K9CFrnqKQptPV1ohlWgNiqhvxiGp3Yx17Cn54WL9ksO+8TlwWAttazKVlT
+40tHqGOu6ld90xGZitxL2oA9kBg9Nkxas/f9+9p6sJe5wj09dj/cqRjyiKv7nek1
+TIPtsNbJghDRDQ3uPEYHdX0h490qGMyGARw=
+-----END CERTIFICATE-----
+-----BEGIN RSA PRIVATE KEY-----
+MIIEpgIBAAKCAQEA+ObJYsnWiUqD8Q5quWfuhAHul1SsXqRLVgb43PSyAXPGdTQ8
+ef8h4BEKmsfIRnh4E6tDQIeEzmF0QUNfyKrfat6S1xCrB8fzeLAP3NfEUEnsi2hY
+bpgA3pYteMQNgQiHEskyVbeR9g4MqE0y+ipfJmLbBPIAkj5oskjult4P5g1vQDA6
+dIflqDmiFkivdBT31QP9VSGd2/g4/wlnAGJXpDLv2NUNVs8UZo5zvRWQxBIIfK3K
+51CJxRmthB0jDUT3zUkX9SlucQItbIHdX7i3Uq1Y5b3S+RSgESPw3F7qRicxo1KN
+CF8pIXrMaZQ5dimgc8KCazy/glqi9DKCjTZCLwIDAQABAoIBAQC/arWb7L+56/2W
+iFDZb62GBfpYlXzOeCmb6la/jsvKxB/vCRItfGGv8Usnh9dlIsat0bsxyEcBdP80
+Jb1nFMonZS6miSIPJN4Ahd5dJ+7JFGD/QWso+mtIw1QLGTONdWJztxmnxDpTcbCY
+Sm6W57kvSz1HC1oXHjnkSqR6kCLH9y6/i7ox6IPYyDA1t/TKJMnKFOPkxKJ8A96v
+1avPrCWfXWYdn6Og5ERd8FJF2L5BYImmmkPpoUeWPyMBfAYqdK5FRijO6JMn/h5k
+XkJm+2bru+cRwcNYUNPuDIa+ZBWhjFfZfSOhOoECeKLe+lhfcFPC7cCSeDJAjGtR
+dakm15ohAoGBAP4+rVBeSCBhPH27T3HWp74qMWkYJzkdqTV0wUUJ1wtuWZFDg/RP
+OYKC+6cM0nW3K+j/9pTWMS1eM61x/VNyFQGUq/rMJGEWFH08NXnV8AxCtwKUV/rP
+Uq3MB4QWfSYGMo9QL+9lu23fMWYpBLo+KIcqPjLb+8FEJGmaC9JCIYQfAoGBAPqe
+qp7NzMmX2b1VR2XXm1CZzwTEFXb4NeDIxSfNbsqXCmws3jgBX3Lx7dQ9k8ymvaA5
+ucYLU3ppozeB//8Ir9lSA1A4w3VN9a+l1ZdQpKQ4SuHtqDwkmKAT85vmGHCPhwlq
+Er9ests3wQ4T/8HPG92QWs+Gg34F+x9U6h2FMv/xAoGBAOM6h1HWAeaWoSbKWvWm
+YKNQOHryMFQW011IbVfTtJOt23U9/1hB2mdvw5SInCzDOgZzhiF90dP3Zn5063FB
++84+3vo2q6jtwAAx6KVsdK+wjLpMdNlfpEhamrkOFGoAjf2SMFVo+fv3x8HDlUsT
+NMuhEJgKDlasHVMYb8pKeoQHAoGBAMAF7ij6+lvD03tz6d6oUkJxduLp8qBTEcUH
+T7hteOQU0lGMFz/GHYIOx/EEtUfqwgQP9r09VFrIsdwH6UNZPpM+eXdv5qLsdsB8
+SalEisGguA9fbrWWPLL6Vn8uz67+6bJW6cJjJps8ntjQjffLXkhnII09PWbD4mNh
+RngT5L2hAoGBANqa+yYSvEGNAxvdfxE0u3U/4OtjCl168nNwHXmyaCKZ1e4XYflz
+wGI4J1ngcCKN37RkCgfu/XRKrc82XhAhV+YYjAUqQYrTyh26b4v9Dp9tBUWiv7bk
+6L+ZlCms+HpsuYmsCAu/od41OWSSpdg+R3VOE0t3rp0r1QdAGYd1nwQC
+-----END RSA PRIVATE KEY-----
diff --git a/reg-tests/ssl/set_ssl_bug_2265.vtc b/reg-tests/ssl/set_ssl_bug_2265.vtc
new file mode 100644
index 0000000..21e837a
--- /dev/null
+++ b/reg-tests/ssl/set_ssl_bug_2265.vtc
@@ -0,0 +1,90 @@
+#REGTEST_TYPE=devel
+
+# This reg-test uses the "set ssl cert" command to update a certificate over the CLI.
+# It requires socat to upload the certificate
+#
+# this check does 3 requests, the first one will use "www.test1.com" as SNI,
+# the second one with the same but that must fail and the third one will use
+# "localhost". Since vtest can't do SSL, we use haproxy as an SSL client with 2
+# chained listen section.
+#
+# This is the same as "set_ssl_cert_noext.vtc" but the .crt contains both the certificate and the key.
+#
+# If this test does not work anymore:
+# - Check that you have socat
+
+varnishtest "Test the 'set ssl cert' feature of the CLI with separate key and crt"
+#REQUIRE_VERSION=2.2
+#REQUIRE_OPTIONS=OPENSSL
+feature cmd "command -v socat"
+feature ignore_unknown_macro
+
+server s1 -repeat 3 {
+ rxreq
+ txresp
+} -start
+
+haproxy h1 -conf {
+ global
+ tune.ssl.default-dh-param 2048
+ tune.ssl.capture-buffer-size 1
+ stats socket "${tmpdir}/h1/stats" level admin
+
+ defaults
+ mode http
+ option httplog
+ retries 0
+ log stderr local0 debug err
+ option logasap
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ listen clear-lst
+ bind "fd@${clearlst}"
+ balance roundrobin
+ retries 0 # 2nd SSL connection must fail so skip the retry
+ server s1 "${tmpdir}/ssl.sock" ssl verify none sni str(www.test1.com)
+ server s2 "${tmpdir}/ssl.sock" ssl verify none sni str(www.test1.com)
+ server s3 "${tmpdir}/ssl.sock" ssl verify none sni str(localhost)
+
+ listen ssl-lst
+ bind "${tmpdir}/ssl.sock" ssl crt ${testdir}/bug-2265.crt strict-sni
+
+ server s1 ${s1_addr}:${s1_port}
+} -start
+
+
+haproxy h1 -cli {
+ send "show ssl cert ${testdir}/bug-2265.crt"
+ expect ~ ".*SHA1 FingerPrint: DF3B6E847A7BF83DFAAFCFEC65EE9BC36230D3EA"
+}
+
+client c1 -connect ${h1_clearlst_sock} {
+ txreq
+ rxresp
+ expect resp.status == 200
+} -run
+
+shell {
+ printf "set ssl cert ${testdir}/bug-2265.crt <<\n$(cat ${testdir}/ecdsa.pem)\n\n" | socat "${tmpdir}/h1/stats" -
+ echo "commit ssl cert ${testdir}/bug-2265.crt" | socat "${tmpdir}/h1/stats" -
+}
+
+haproxy h1 -cli {
+ send "show ssl cert ${testdir}/bug-2265.crt"
+ expect ~ ".*SHA1 FingerPrint: A490D069DBAFBEE66DE434BEC34030ADE8BCCBF1"
+}
+
+# check that the "www.test1.com" SNI was removed
+client c1 -connect ${h1_clearlst_sock} {
+ txreq
+ rxresp
+ expect resp.status == 503
+} -run
+
+client c1 -connect ${h1_clearlst_sock} {
+ txreq
+ rxresp
+ expect resp.status == 200
+} -run
diff --git a/reg-tests/ssl/set_ssl_cafile.vtc b/reg-tests/ssl/set_ssl_cafile.vtc
new file mode 100644
index 0000000..3881a42
--- /dev/null
+++ b/reg-tests/ssl/set_ssl_cafile.vtc
@@ -0,0 +1,167 @@
+#REGTEST_TYPE=devel
+
+# This reg-test uses the "set ssl ca-file" command to update a CA file over the CLI.
+# It also tests the "abort ssl ca-file" and "show ssl ca-file" commands.
+#
+# It is based on two CA certificates, set_cafile_interCA1.crt and set_cafile_interCA2.crt,
+# and a client certificate that was signed with set_cafile_interCA1.crt (set_cafile_client.pem)
+# and a server certificate that was signed with set_cafile_interCA2.crt (set_cafile_server.pem).
+# The CA files used by the client and the server will be updated through the CLI until a
+# proper connection can be established between them.
+#
+# It requires socat to upload the certificate
+#
+# If this test does not work anymore:
+# - Check that you have socat
+
+varnishtest "Test the 'set ssl ca-file' feature of the CLI"
+feature cmd "$HAPROXY_PROGRAM -cc 'version_atleast(2.5-dev0)'"
+feature cmd "$HAPROXY_PROGRAM -cc 'feature(OPENSSL)' && !ssllib_name_startswith(wolfSSL)'"
+feature cmd "command -v socat"
+feature ignore_unknown_macro
+
+server s1 -repeat 4 {
+ rxreq
+ txresp
+} -start
+
+haproxy h1 -conf {
+ global
+ tune.ssl.default-dh-param 2048
+ tune.ssl.capture-buffer-size 1
+ stats socket "${tmpdir}/h1/stats" level admin
+
+ defaults
+ mode http
+ option httplog
+ retries 0
+ log stderr local0 debug err
+ option logasap
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ listen clear-lst
+ bind "fd@${clearlst}"
+ # dummy bind used to test a change when the same crt is used as server and bind
+ bind "fd@${foobarlst}" ssl crt ${testdir}/set_cafile_client.pem ca-file ${testdir}/set_cafile_interCA1.crt verify none
+ server s1 "${tmpdir}/ssl.sock" ssl crt ${testdir}/set_cafile_client.pem ca-file ${testdir}/set_cafile_interCA1.crt verify none
+
+ listen clear-verified-lst
+ bind "fd@${clearverifiedlst}"
+ server s1 "${tmpdir}/ssl.sock" ssl crt ${testdir}/set_cafile_client.pem ca-file ${testdir}/set_cafile_interCA1.crt verify required
+
+ listen ssl-lst
+ # crt: certificate of the server
+ # ca-file: CA used for client authentication request
+ bind "${tmpdir}/ssl.sock" ssl crt ${testdir}/set_cafile_server.pem ca-verify-file ${testdir}/set_cafile_rootCA.crt ca-file ${testdir}/set_cafile_interCA2.crt verify required crt-ignore-err all
+ http-response add-header X-SSL-Client-Verify %[ssl_c_verify]
+ server s1 ${s1_addr}:${s1_port}
+} -start
+
+
+# Test the "show ssl ca-file" command
+haproxy h1 -cli {
+ send "show ssl ca-file"
+ expect ~ ".*${testdir}/set_cafile_interCA1.crt - 1 certificate.*"
+ send "show ssl ca-file"
+ expect ~ ".*${testdir}/set_cafile_interCA2.crt - 1 certificate.*"
+
+ send "show ssl ca-file ${testdir}/set_cafile_interCA2.crt"
+ expect ~ ".*SHA1 FingerPrint: 3D3D1D10AD74A8135F05A818E10E5FA91433954D"
+}
+
+
+# This first connection should fail because the client's certificate was signed with the
+# set_cafile_interCA1.crt certificate which is not known by the backend.
+client c1 -connect ${h1_clearlst_sock} {
+ txreq
+ rxresp
+ expect resp.status == 200
+ # unable to verify the client certificate
+ expect resp.http.X-SSL-Client-Verify ~ "20|21"
+} -run
+
+# Set a new ca-file without committing it and check that the new ca-file is not taken into account
+shell {
+ printf "set ssl ca-file ${testdir}/set_cafile_interCA2.crt <<\n$(cat ${testdir}/set_cafile_interCA1.crt)\n\n" | socat "${tmpdir}/h1/stats" -
+}
+
+# Test the "show ssl ca-file" command
+# The transaction should be mentioned in the list
+haproxy h1 -cli {
+ send "show ssl ca-file"
+ expect ~ "\\*${testdir}/set_cafile_interCA2.crt - 1 certificate.*"
+
+# The original CA file did not change
+ send "show ssl ca-file ${testdir}/set_cafile_interCA2.crt"
+ expect ~ ".*SHA1 FingerPrint: 3D3D1D10AD74A8135F05A818E10E5FA91433954D"
+
+# Only the current transaction displays a new certificate
+ send "show ssl ca-file *${testdir}/set_cafile_interCA2.crt"
+ expect ~ ".*SHA1 FingerPrint: 4FFF535278883264693CEA72C4FAD13F995D0098"
+}
+
+# This connection should still fail for the same reasons as previously
+client c1 -connect ${h1_clearlst_sock} {
+ txreq
+ rxresp
+ expect resp.status == 200
+ # unable to verify the client certificate
+ expect resp.http.X-SSL-Client-Verify ~ "20|21"
+} -run
+
+haproxy h1 -cli {
+ send "abort ssl ca-file ${testdir}/set_cafile_interCA2.crt"
+ expect ~ "Transaction aborted for certificate '${testdir}/set_cafile_interCA2.crt'!"
+ send "commit ssl ca-file ${testdir}/set_cafile_interCA2.crt"
+ expect ~ "No ongoing transaction!"
+}
+
+
+# Update the bind line's ca-file in order to accept the client certificate
+shell {
+ printf "set ssl ca-file ${testdir}/set_cafile_interCA2.crt <<\n$(cat ${testdir}/set_cafile_interCA1.crt)\n$(cat ${testdir}/set_cafile_rootCA.crt)\n\n" | socat "${tmpdir}/h1/stats" -
+ echo "commit ssl ca-file ${testdir}/set_cafile_interCA2.crt" | socat "${tmpdir}/h1/stats" -
+}
+
+
+# The backend's certificate can't be verified by the frontend because it was signed with
+# the set_cafile_interCA2.crt certificate.
+client c1 -connect ${h1_clearverifiedlst_sock} {
+ txreq
+ rxresp
+ expect resp.status == 503
+} -run
+
+
+# Update the server line's ca-file. The server certificate should now be accepted by
+# the frontend. We replace the single CA by a list of CAs that includes the correct one.
+shell {
+ printf "set ssl ca-file ${testdir}/set_cafile_interCA1.crt <<\n$(cat ${testdir}/set_cafile_interCA1.crt)\n\n" | socat "${tmpdir}/h1/stats" -
+ printf "add ssl ca-file ${testdir}/set_cafile_interCA1.crt <<\n$(cat ${testdir}/set_cafile_interCA2.crt)\n\n" | socat "${tmpdir}/h1/stats" -
+ printf "add ssl ca-file ${testdir}/set_cafile_interCA1.crt <<\n$(cat ${testdir}/set_cafile_rootCA.crt)\n\n" | socat "${tmpdir}/h1/stats" -
+ echo "commit ssl ca-file ${testdir}/set_cafile_interCA1.crt" | socat "${tmpdir}/h1/stats" -
+}
+
+# Test the "show ssl ca-file" with a certificate index
+haproxy h1 -cli {
+ send "show ssl ca-file"
+ expect ~ ".*${testdir}/set_cafile_interCA1.crt - 3 certificate.*"
+
+ send "show ssl ca-file ${testdir}/set_cafile_interCA1.crt:1"
+ expect ~ ".*SHA1 FingerPrint: 4FFF535278883264693CEA72C4FAD13F995D0098"
+
+ send "show ssl ca-file ${testdir}/set_cafile_interCA1.crt:2"
+ expect !~ ".*SHA1 FingerPrint: 4FFF535278883264693CEA72C4FAD13F995D0098"
+ send "show ssl ca-file ${testdir}/set_cafile_interCA1.crt:2"
+ expect ~ ".*SHA1 FingerPrint: 3D3D1D10AD74A8135F05A818E10E5FA91433954D"
+}
+
+client c1 -connect ${h1_clearverifiedlst_sock} {
+ txreq
+ rxresp
+ expect resp.status == 200
+ # there should be no error on the backend side but one on the frontend side
+ expect resp.http.X-SSL-Client-Verify == 0
+} -run
diff --git a/reg-tests/ssl/set_ssl_cert.vtc b/reg-tests/ssl/set_ssl_cert.vtc
new file mode 100644
index 0000000..9d4d5a0
--- /dev/null
+++ b/reg-tests/ssl/set_ssl_cert.vtc
@@ -0,0 +1,206 @@
+#REGTEST_TYPE=devel
+
+# This reg-test uses the "set ssl cert" command to update a certificate over the CLI.
+# It requires socat to upload the certificate
+#
+# This check has two separate parts.
+# In the first part, there are 3 requests, the first one will use "www.test1.com" as SNI,
+# the second one with the same but that must fail and the third one will use
+# "localhost". Since vtest can't do SSL, we use haproxy as an SSL client with 2
+# chained listen section.
+#
+# In the second part, we check the update of a default certificate in a crt-list.
+# This corresponds to a bug raised in https://github.com/haproxy/haproxy/issues/1143.
+# A certificate is used as default certificate as well as regular one, and during the update
+# the default certificate would not be properly updated if the default instance did not have
+# any SNI. The test consists in checking that the used certificate is the right one after
+# updating it via a "set ssl cert" call.
+#
+# If this test does not work anymore:
+# - Check that you have socat
+
+varnishtest "Test the 'set ssl cert' feature of the CLI"
+#REQUIRE_VERSION=2.2
+#REQUIRE_OPTIONS=OPENSSL
+feature cmd "command -v socat"
+feature ignore_unknown_macro
+
+server s1 -repeat 9 {
+ rxreq
+ txresp
+} -start
+
+haproxy h1 -conf {
+ global
+ tune.ssl.default-dh-param 2048
+ tune.ssl.capture-buffer-size 1
+ stats socket "${tmpdir}/h1/stats" level admin
+ crt-base ${testdir}
+
+ defaults
+ mode http
+ option httplog
+ retries 0
+ log stderr local0 debug err
+ option logasap
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ listen clear-lst
+ bind "fd@${clearlst}"
+ balance roundrobin
+
+ http-response set-header X-SSL-Server-SHA1 %[ssl_s_sha1,hex]
+
+ retries 0 # 2nd SSL connection must fail so skip the retry
+ server s1 "${tmpdir}/ssl.sock" ssl verify none sni str(www.test1.com)
+ server s2 "${tmpdir}/ssl.sock" ssl verify none sni str(www.test1.com)
+ server s3 "${tmpdir}/ssl.sock" ssl verify none sni str(localhost)
+
+ server s4 "${tmpdir}/other-ssl.sock" ssl verify none sni str(www.test1.com)
+ server s5 "${tmpdir}/other-ssl.sock" ssl verify none sni str(other.test1.com) # uses the default certificate
+ server s6 "${tmpdir}/other-ssl.sock" ssl verify none sni str(www.test1.com)
+ server s7 "${tmpdir}/other-ssl.sock" ssl verify none sni str(other.test1.com) # uses the default certificate
+
+ server s8 "${tmpdir}/other-ssl.sock" ssl verify none sni str(www.test1.com)
+ server s9 "${tmpdir}/other-ssl.sock" ssl verify none sni str(other.test1.com) # uses the default certificate
+
+ listen ssl-lst
+ bind "${tmpdir}/ssl.sock" ssl crt ${testdir}/common.pem strict-sni
+ server s1 ${s1_addr}:${s1_port}
+ # dummy server used to test a change when the same crt is used as server and bind
+ server s2 ${s1_addr}:${s1_port} ssl crt ${testdir}/common.pem verify none weight 0
+
+ listen other-ssl-lst
+ bind "${tmpdir}/other-ssl.sock" ssl crt-list ${testdir}/set_default_cert.crt-list
+ server s1 ${s1_addr}:${s1_port}
+
+} -start
+
+
+haproxy h1 -cli {
+ send "show ssl cert ${testdir}/common.pem"
+ expect ~ ".*SHA1 FingerPrint: DF3B6E847A7BF83DFAAFCFEC65EE9BC36230D3EA"
+}
+
+client c1 -connect ${h1_clearlst_sock} {
+ txreq
+ rxresp
+ expect resp.status == 200
+} -run
+
+shell {
+ printf "set ssl cert ${testdir}/common.pem <<\n$(cat ${testdir}/ecdsa.pem)\n\n" | socat "${tmpdir}/h1/stats" -
+ echo "commit ssl cert ${testdir}/common.pem" | socat "${tmpdir}/h1/stats" -
+}
+
+haproxy h1 -cli {
+ send "show ssl cert ${testdir}/common.pem"
+ expect ~ ".*SHA1 FingerPrint: A490D069DBAFBEE66DE434BEC34030ADE8BCCBF1"
+}
+
+# check that the "www.test1.com" SNI was removed
+client c1 -connect ${h1_clearlst_sock} {
+ txreq
+ rxresp
+ expect resp.status == 503
+} -run
+
+client c1 -connect ${h1_clearlst_sock} {
+ txreq
+ rxresp
+ expect resp.status == 200
+} -run
+
+shell {
+ printf "set ssl cert ${testdir}/common.pem <<\n$(cat ${testdir}/common.pem)\n\n" | socat "${tmpdir}/h1/stats" -
+ echo "abort ssl cert ${testdir}/common.pem" | socat "${tmpdir}/h1/stats" -
+}
+
+haproxy h1 -cli {
+ send "show ssl cert ${testdir}/common.pem"
+ expect ~ ".*SHA1 FingerPrint: A490D069DBAFBEE66DE434BEC34030ADE8BCCBF1"
+}
+
+
+
+# The following requests are aimed at a backend that uses the set_default_cert.crt-list file
+
+# Uses the www.test1.com sni
+client c1 -connect ${h1_clearlst_sock} {
+ txreq
+ rxresp
+ expect resp.http.X-SSL-Server-SHA1 == "9DC18799428875976DDE706E9956035EE88A4CB3"
+ expect resp.status == 200
+} -run
+
+# Uses the other.test1.com sni and the default line of the crt-list
+client c1 -connect ${h1_clearlst_sock} {
+ txreq
+ rxresp
+ expect resp.http.X-SSL-Server-SHA1 == "9DC18799428875976DDE706E9956035EE88A4CB3"
+ expect resp.status == 200
+} -run
+
+shell {
+ printf "set ssl cert ${testdir}/set_default_cert.pem <<\n$(cat ${testdir}/common.pem)\n\n" | socat "${tmpdir}/h1/stats" -
+}
+
+# Certificate should not have changed yet
+haproxy h1 -cli {
+ send "show ssl cert ${testdir}/set_default_cert.pem"
+ expect ~ ".*SHA1 FingerPrint: 9DC18799428875976DDE706E9956035EE88A4CB3"
+}
+
+shell {
+ echo "commit ssl cert ${testdir}/set_default_cert.pem" | socat "${tmpdir}/h1/stats" -
+}
+
+haproxy h1 -cli {
+ send "show ssl cert ${testdir}/set_default_cert.pem"
+ expect ~ ".*SHA1 FingerPrint: DF3B6E847A7BF83DFAAFCFEC65EE9BC36230D3EA"
+}
+
+# Uses the www.test1.com sni
+client c1 -connect ${h1_clearlst_sock} {
+ txreq
+ rxresp
+ expect resp.http.X-SSL-Server-SHA1 == "DF3B6E847A7BF83DFAAFCFEC65EE9BC36230D3EA"
+ expect resp.status == 200
+} -run
+
+# Uses the other.test1.com sni and the default line of the crt-list
+client c1 -connect ${h1_clearlst_sock} {
+ txreq
+ rxresp
+ expect resp.http.X-SSL-Server-SHA1 == "DF3B6E847A7BF83DFAAFCFEC65EE9BC36230D3EA"
+ expect resp.status == 200
+} -run
+
+# Restore original certificate
+shell {
+ printf "set ssl cert ${testdir}/set_default_cert.pem <<\n$(cat ${testdir}/set_default_cert.pem)\n\n" | socat "${tmpdir}/h1/stats" -
+ echo "commit ssl cert ${testdir}/set_default_cert.pem" | socat "${tmpdir}/h1/stats" -
+}
+
+haproxy h1 -cli {
+ send "show ssl cert ${testdir}/set_default_cert.pem"
+ expect ~ ".*SHA1 FingerPrint: 9DC18799428875976DDE706E9956035EE88A4CB"
+}
+
+# Uses the www.test1.com sni
+client c1 -connect ${h1_clearlst_sock} {
+ txreq
+ rxresp
+ expect resp.http.X-SSL-Server-SHA1 == "9DC18799428875976DDE706E9956035EE88A4CB3"
+ expect resp.status == 200
+} -run
+
+# Uses the other.test1.com sni and the default line of the crt-list
+client c1 -connect ${h1_clearlst_sock} {
+ txreq
+ rxresp
+ expect resp.http.X-SSL-Server-SHA1 == "9DC18799428875976DDE706E9956035EE88A4CB3"
+ expect resp.status == 200
+} -run
diff --git a/reg-tests/ssl/set_ssl_cert_bundle.vtc b/reg-tests/ssl/set_ssl_cert_bundle.vtc
new file mode 100644
index 0000000..270cba6
--- /dev/null
+++ b/reg-tests/ssl/set_ssl_cert_bundle.vtc
@@ -0,0 +1,111 @@
+#REGTEST_TYPE=devel
+
+# This reg-test uses the "set ssl cert" command to update a multi-certificate
+# bundle over the CLI.
+# It requires socat to upload the certificate
+#
+# This regtests loads a multi-certificates bundle "cert1-example.com.pem"
+# composed of a .rsa and a .ecdsa
+#
+# After verifying that the RSA and ECDSA algorithms were avalailble with the
+# right certificate, the test changes the certificates and try new requests.
+#
+# If this test does not work anymore:
+# - Check that you have socat
+# - Check that you have at least OpenSSL 1.1.1
+
+varnishtest "Test the 'set ssl cert' feature of the CLI with bundles"
+# could work with haproxy 2.3 but the -cc is not available
+feature cmd "$HAPROXY_PROGRAM -cc 'version_atleast(2.5-dev9)'"
+feature cmd "$HAPROXY_PROGRAM -cc 'feature(OPENSSL) && ssllib_name_startswith(OpenSSL) && openssl_version_atleast(1.1.1)'"
+feature cmd "command -v socat"
+feature ignore_unknown_macro
+
+server s1 -repeat 9 {
+ rxreq
+ txresp
+} -start
+
+haproxy h1 -conf {
+ global
+ tune.ssl.default-dh-param 2048
+ tune.ssl.capture-buffer-size 1
+ stats socket "${tmpdir}/h1/stats" level admin
+ crt-base ${testdir}
+
+ defaults
+ mode http
+ option httplog
+ log stderr local0 debug err
+ option logasap
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ listen clear-lst
+ bind "fd@${clearlst}"
+ balance roundrobin
+
+ http-response set-header X-SSL-Server-SHA1 %[ssl_s_sha1,hex]
+
+ retries 0 # 2nd SSL connection must fail so skip the retry
+ server s1 "${tmpdir}/ssl.sock" ssl verify none sni str(example.com) force-tlsv12 ciphers ECDHE-RSA-AES128-GCM-SHA256
+ server s2 "${tmpdir}/ssl.sock" ssl verify none sni str(example.com) force-tlsv12 ciphers ECDHE-ECDSA-AES256-GCM-SHA384
+
+ server s3 "${tmpdir}/ssl.sock" ssl verify none sni str(example.com) force-tlsv12 ciphers ECDHE-RSA-AES128-GCM-SHA256
+ server s4 "${tmpdir}/ssl.sock" ssl verify none sni str(example.com) force-tlsv12 ciphers ECDHE-ECDSA-AES256-GCM-SHA384
+
+ listen ssl-lst
+ bind "${tmpdir}/ssl.sock" ssl crt ${testdir}/cert1-example.com.pem
+ server s1 ${s1_addr}:${s1_port}
+
+} -start
+
+
+haproxy h1 -cli {
+ send "show ssl cert ${testdir}/cert1-example.com.pem.rsa"
+ expect ~ ".*SHA1 FingerPrint: 94F720DACA71B8B1A0AC9BD48C65BA688FF047DE"
+ send "show ssl cert ${testdir}/cert1-example.com.pem.ecdsa"
+ expect ~ ".*SHA1 FingerPrint: C1BA055D452F92EB02D449F0498C289F50698300"
+}
+
+client c1 -connect ${h1_clearlst_sock} {
+# RSA
+ txreq
+ rxresp
+ expect resp.http.X-SSL-Server-SHA1 == "94F720DACA71B8B1A0AC9BD48C65BA688FF047DE"
+ expect resp.status == 200
+# ECDSA
+ txreq
+ rxresp
+ expect resp.http.X-SSL-Server-SHA1 == "C1BA055D452F92EB02D449F0498C289F50698300"
+ expect resp.status == 200
+} -run
+
+shell {
+ printf "set ssl cert ${testdir}/cert1-example.com.pem.rsa <<\n$(cat ${testdir}/cert2-example.com.pem.rsa)\n\n" | socat "${tmpdir}/h1/stats" -
+ echo "commit ssl cert ${testdir}/cert1-example.com.pem.rsa" | socat "${tmpdir}/h1/stats" -
+ printf "set ssl cert ${testdir}/cert1-example.com.pem.ecdsa <<\n$(cat ${testdir}/cert2-example.com.pem.ecdsa)\n\n" | socat "${tmpdir}/h1/stats" -
+ echo "commit ssl cert ${testdir}/cert1-example.com.pem.ecdsa" | socat "${tmpdir}/h1/stats" -
+}
+
+haproxy h1 -cli {
+ send "show ssl cert ${testdir}/cert1-example.com.pem.rsa"
+ expect ~ ".*SHA1 FingerPrint: ADC863817FC40C2A9CA913CE45C9A92232558F90"
+ send "show ssl cert ${testdir}/cert1-example.com.pem.ecdsa"
+ expect ~ ".*SHA1 FingerPrint: F49FFA446D072262445C197B85D2F400B3F58808"
+}
+
+client c1 -connect ${h1_clearlst_sock} {
+# RSA
+ txreq
+ rxresp
+ expect resp.http.X-SSL-Server-SHA1 == "ADC863817FC40C2A9CA913CE45C9A92232558F90"
+ expect resp.status == 200
+# ECDSA
+ txreq
+ rxresp
+ expect resp.http.X-SSL-Server-SHA1 == "F49FFA446D072262445C197B85D2F400B3F58808"
+ expect resp.status == 200
+} -run
+
diff --git a/reg-tests/ssl/set_ssl_cert_noext.vtc b/reg-tests/ssl/set_ssl_cert_noext.vtc
new file mode 100644
index 0000000..4326711
--- /dev/null
+++ b/reg-tests/ssl/set_ssl_cert_noext.vtc
@@ -0,0 +1,90 @@
+#REGTEST_TYPE=devel
+
+# This reg-test uses the "set ssl cert" command to update a certificate over the CLI.
+# It requires socat to upload the certificate
+#
+# this check does 3 requests, the first one will use "www.test1.com" as SNI,
+# the second one with the same but that must fail and the third one will use
+# "localhost". Since vtest can't do SSL, we use haproxy as an SSL client with 2
+# chained listen section.
+#
+# If this test does not work anymore:
+# - Check that you have socat
+
+varnishtest "Test the 'set ssl cert' feature of the CLI with separate key and crt"
+#REQUIRE_VERSION=2.2
+#REQUIRE_OPTIONS=OPENSSL
+feature cmd "command -v socat"
+feature ignore_unknown_macro
+
+server s1 -repeat 3 {
+ rxreq
+ txresp
+} -start
+
+haproxy h1 -conf {
+ global
+ tune.ssl.default-dh-param 2048
+ tune.ssl.capture-buffer-size 1
+ ssl-load-extra-del-ext
+ stats socket "${tmpdir}/h1/stats" level admin
+
+ defaults
+ mode http
+ option httplog
+ retries 0
+ log stderr local0 debug err
+ option logasap
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ listen clear-lst
+ bind "fd@${clearlst}"
+ balance roundrobin
+ retries 0 # 2nd SSL connection must fail so skip the retry
+ server s1 "${tmpdir}/ssl.sock" ssl verify none sni str(www.test1.com)
+ server s2 "${tmpdir}/ssl.sock" ssl verify none sni str(www.test1.com)
+ server s3 "${tmpdir}/ssl.sock" ssl verify none sni str(localhost)
+
+ listen ssl-lst
+ bind "${tmpdir}/ssl.sock" ssl crt ${testdir}/common.crt strict-sni
+
+ server s1 ${s1_addr}:${s1_port}
+} -start
+
+
+haproxy h1 -cli {
+ send "show ssl cert ${testdir}/common.crt"
+ expect ~ ".*SHA1 FingerPrint: 2195C9F0FD58470313013FC27C1B9CF9864BD1C6"
+}
+
+client c1 -connect ${h1_clearlst_sock} {
+ txreq
+ rxresp
+ expect resp.status == 200
+} -run
+
+shell {
+ printf "set ssl cert ${testdir}/common.crt <<\n$(cat ${testdir}/ecdsa.crt)\n\n" | socat "${tmpdir}/h1/stats" -
+ printf "set ssl cert ${testdir}/common.key <<\n$(cat ${testdir}/ecdsa.key)\n\n" | socat "${tmpdir}/h1/stats" -
+ echo "commit ssl cert ${testdir}/common.crt" | socat "${tmpdir}/h1/stats" -
+}
+
+haproxy h1 -cli {
+ send "show ssl cert ${testdir}/common.crt"
+ expect ~ ".*SHA1 FingerPrint: A490D069DBAFBEE66DE434BEC34030ADE8BCCBF1"
+}
+
+# check that the "www.test1.com" SNI was removed
+client c1 -connect ${h1_clearlst_sock} {
+ txreq
+ rxresp
+ expect resp.status == 503
+} -run
+
+client c1 -connect ${h1_clearlst_sock} {
+ txreq
+ rxresp
+ expect resp.status == 200
+} -run
diff --git a/reg-tests/ssl/set_ssl_crlfile.vtc b/reg-tests/ssl/set_ssl_crlfile.vtc
new file mode 100644
index 0000000..f018a33
--- /dev/null
+++ b/reg-tests/ssl/set_ssl_crlfile.vtc
@@ -0,0 +1,146 @@
+#REGTEST_TYPE=devel
+
+# This reg-test uses the "set ssl crl-file" command to update a CRL file over the CLI.
+# It also tests the "abort ssl crl-file" and "show ssl crl-file" commands.
+#
+# The frontend's certificate is signed by set_cafile_interCA1.crt and is revoked in interCA1_crl.pem
+# but not in interCA1_crl_empty.pem.
+# The backend's certificate is signed by set_cafile_interCA2.crt and is revoked in interCA2_crl.pem
+# but not in interCA2_crl_empty.pem.
+#
+# The test consists in replacing the two empty CRLs by their not empty equivalent thanks to CLI
+# calls and to check that the certificates (frontend and backend) are indeed revoked after the
+# update.
+#
+# It requires socat to upload the certificate
+#
+# If this test does not work anymore:
+# - Check that you have socat
+
+varnishtest "Test the 'set ssl crl-file' feature of the CLI"
+feature cmd "$HAPROXY_PROGRAM -cc 'version_atleast(2.5-dev0)'"
+feature cmd "$HAPROXY_PROGRAM -cc 'feature(OPENSSL)' && !ssllib_name_startswith(wolfSSL)'"
+feature cmd "command -v socat"
+feature ignore_unknown_macro
+
+server s1 -repeat 4 {
+ rxreq
+ txresp
+} -start
+
+haproxy h1 -conf {
+ global
+ tune.ssl.default-dh-param 2048
+ tune.ssl.capture-buffer-size 1
+ stats socket "${tmpdir}/h1/stats" level admin
+
+ defaults
+ mode http
+ option httplog
+ retries 0
+ log stderr local0 debug err
+ option logasap
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ listen clear-lst
+ bind "fd@${clearlst}"
+ server s1 "${tmpdir}/ssl.sock" ssl crt ${testdir}/set_cafile_client.pem ca-file ${testdir}/set_cafile_interCA2.crt crl-file ${testdir}/interCA2_crl_empty.pem verify required
+
+ listen ssl-lst
+ # crt: certificate of the server
+ # ca-file: CA used for client authentication request
+ # crl-file: revocation list for client auth
+ bind "${tmpdir}/ssl.sock" ssl crt ${testdir}/set_cafile_server.pem ca-file ${testdir}/set_cafile_interCA1.crt ca-verify-file ${testdir}/set_cafile_rootCA.crt crl-file ${testdir}/interCA1_crl_empty.pem verify required crt-ignore-err all
+ http-response add-header X-SSL-Client-Verify %[ssl_c_verify]
+ server s1 ${s1_addr}:${s1_port}
+} -start
+
+# Test the "show ssl ca-file" command
+haproxy h1 -cli {
+ send "show ssl ca-file"
+ expect ~ ".*${testdir}/set_cafile_interCA1.crt - 1 certificate.*"
+ send "show ssl ca-file"
+ expect ~ ".*${testdir}/set_cafile_interCA2.crt - 1 certificate.*"
+}
+
+# Add the rootCA certificate to set_cafile_interCA2.crt in order for the frontend to
+# be able to validate the server's certificate
+shell {
+ printf "set ssl ca-file ${testdir}/set_cafile_interCA2.crt <<\n$(cat ${testdir}/set_cafile_interCA2.crt)\n$(cat ${testdir}/set_cafile_rootCA.crt)\n\n" | socat "${tmpdir}/h1/stats" -
+ echo "commit ssl ca-file ${testdir}/set_cafile_interCA2.crt" | socat "${tmpdir}/h1/stats" -
+}
+
+haproxy h1 -cli {
+ send "show ssl ca-file"
+ expect ~ ".*${testdir}/set_cafile_interCA2.crt - 2 certificate.*"
+
+ send "show ssl ca-file ${testdir}/set_cafile_interCA2.crt"
+ expect ~ ".*Subject.*/CN=Root CA"
+}
+
+# This first connection should succeed
+client c1 -connect ${h1_clearlst_sock} {
+ txreq
+ rxresp
+ expect resp.status == 200
+ expect resp.http.X-SSL-Client-Verify == 0
+} -run
+
+# Change the frontend's crl-file to one in which the server certificate is revoked
+shell {
+ printf "set ssl crl-file ${testdir}/interCA2_crl_empty.pem <<\n$(cat ${testdir}/interCA2_crl.pem)\n\n" | socat "${tmpdir}/h1/stats" -
+}
+
+# Check that the transaction is displayed in the output of "show ssl crl-list"
+haproxy h1 -cli {
+ send "show ssl crl-file"
+ expect ~ "\\*${testdir}/interCA2_crl_empty.pem"
+
+ send "show ssl crl-file \\*${testdir}/interCA2_crl_empty.pem"
+ expect ~ "Revoked Certificates:"
+ send "show ssl crl-file \\*${testdir}/interCA2_crl_empty.pem:1"
+ expect ~ "Serial Number: 1008"
+}
+
+# This connection should still succeed since the transaction was not committed
+client c1 -connect ${h1_clearlst_sock} {
+ txreq
+ rxresp
+ expect resp.status == 200
+ expect resp.http.X-SSL-Client-Verify == 0
+} -run
+
+haproxy h1 -cli {
+ send "commit ssl crl-file ${testdir}/interCA2_crl_empty.pem"
+ expect ~ "Committing ${testdir}/interCA2_crl_empty.pem"
+}
+
+# This connection should fail, the server's certificate is revoked in the newly updated CRL file
+client c1 -connect ${h1_clearlst_sock} {
+ txreq
+ rxresp
+ expect resp.status == 503
+} -run
+
+# Restore the frontend's CRL
+shell {
+ printf "set ssl crl-file ${testdir}/interCA2_crl_empty.pem <<\n$(cat ${testdir}/interCA2_crl_empty.pem)\n\n" | socat "${tmpdir}/h1/stats" -
+ echo "commit ssl crl-file ${testdir}/interCA2_crl_empty.pem" | socat "${tmpdir}/h1/stats" -
+}
+
+# Change the backend's CRL file to one in which the frontend's certificate is revoked
+shell {
+ printf "set ssl crl-file ${testdir}/interCA1_crl_empty.pem <<\n$(cat ${testdir}/interCA1_crl.pem)\n\n" | socat "${tmpdir}/h1/stats" -
+ echo "commit ssl crl-file ${testdir}/interCA1_crl_empty.pem" | socat "${tmpdir}/h1/stats" -
+}
+
+# This connection should fail, the client's certificate is revoked in the newly updated CRL file
+client c1 -connect ${h1_clearlst_sock} {
+ txreq
+ rxresp
+ expect resp.status == 200
+ # Revoked certificate
+ expect resp.http.X-SSL-Client-Verify == 23
+} -run
diff --git a/reg-tests/ssl/set_ssl_server_cert.vtc b/reg-tests/ssl/set_ssl_server_cert.vtc
new file mode 100644
index 0000000..847d45b
--- /dev/null
+++ b/reg-tests/ssl/set_ssl_server_cert.vtc
@@ -0,0 +1,129 @@
+#REGTEST_TYPE=devel
+
+# This reg-test uses the "set ssl cert" command to update a backend certificate over the CLI.
+# It requires socat to upload the certificate
+
+varnishtest "Test the 'set ssl cert' feature of the CLI"
+feature cmd "$HAPROXY_PROGRAM -cc 'version_atleast(2.4)'"
+feature cmd "$HAPROXY_PROGRAM -cc 'feature(OPENSSL) && !ssllib_name_startswith(wolfSSL)'"
+feature cmd "command -v socat"
+feature ignore_unknown_macro
+
+server s1 -repeat 4 {
+ rxreq
+ txresp
+} -start
+
+haproxy h1 -conf {
+ global
+ tune.ssl.default-dh-param 2048
+ tune.ssl.capture-buffer-size 1
+ stats socket "${tmpdir}/h1/stats" level admin
+ nbthread 1
+
+ defaults
+ mode http
+ option httplog
+ log stderr local0 debug err
+ option logasap
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ listen clear-lst
+ bind "fd@${clearlst}"
+ retries 0 # 2nd SSL connection must fail so skip the retry
+ server s1 "${tmpdir}/ssl.sock" ssl verify none crt ${testdir}/client1.pem
+
+ listen ssl-lst
+ # crt: certificate of the server
+ # ca-file: CA used for client authentication request
+ # crl-file: revocation list for client auth: the client1 certificate is revoked
+ bind "${tmpdir}/ssl.sock" ssl crt ${testdir}/common.pem ca-file ${testdir}/ca-auth.crt verify optional crt-ignore-err all crl-file ${testdir}/crl-auth.pem
+
+ acl cert_expired ssl_c_verify 10
+ acl cert_revoked ssl_c_verify 23
+ acl cert_ok ssl_c_verify 0
+
+ http-response add-header X-SSL Ok if cert_ok
+ http-response add-header X-SSL Expired if cert_expired
+ http-response add-header X-SSL Revoked if cert_revoked
+ http-response add-header x-ssl-sha1 %[ssl_c_sha1,hex]
+
+ server s1 ${s1_addr}:${s1_port}
+} -start
+
+client c1 -connect ${h1_clearlst_sock} {
+ txreq
+ rxresp
+ expect resp.status == 200
+ expect resp.http.x-ssl-sha1 == "D9C3BAE37EA5A7EDB7B3C9BDD4DCB2FE58A412E4"
+ expect resp.http.x-ssl == "Ok"
+} -run
+
+haproxy h1 -cli {
+ send "show ssl cert ${testdir}/client1.pem"
+ expect ~ ".*SHA1 FingerPrint: D9C3BAE37EA5A7EDB7B3C9BDD4DCB2FE58A412E4"
+}
+
+# Replace certificate with an expired one
+shell {
+ printf "set ssl cert ${testdir}/client1.pem <<\n$(cat ${testdir}/client2_expired.pem)\n\n" | socat "${tmpdir}/h1/stats" -
+ echo "commit ssl cert ${testdir}/client1.pem" | socat "${tmpdir}/h1/stats" -
+}
+
+haproxy h1 -cli {
+ send "show ssl cert ${testdir}/client1.pem"
+ expect ~ ".*SHA1 FingerPrint: C625EB01A0A660294B9D7F44C5CEEE5AFC495BE4"
+}
+
+
+# The updated client certificate is an expired one so this request should fail
+client c1 -connect ${h1_clearlst_sock} {
+ txreq
+ rxresp
+ expect resp.status == 200
+ expect resp.http.x-ssl-sha1 == "C625EB01A0A660294B9D7F44C5CEEE5AFC495BE4"
+ expect resp.http.x-ssl == "Expired"
+} -run
+
+# Replace certificate with a revoked one
+shell {
+ printf "set ssl cert ${testdir}/client1.pem <<\n$(cat ${testdir}/client3_revoked.pem)\n\n" | socat "${tmpdir}/h1/stats" -
+ echo "commit ssl cert ${testdir}/client1.pem" | socat "${tmpdir}/h1/stats" -
+}
+
+haproxy h1 -cli {
+ send "show ssl cert ${testdir}/client1.pem"
+ expect ~ ".*SHA1 FingerPrint: 992386628A40C9D49C89BAC0058B5D45D8575151"
+}
+
+# The updated client certificate is a revoked one so this request should fail
+client c1 -connect ${h1_clearlst_sock} {
+ txreq
+ rxresp
+ expect resp.status == 200
+ expect resp.http.x-ssl-sha1 == "992386628A40C9D49C89BAC0058B5D45D8575151"
+ expect resp.http.x-ssl == "Revoked"
+} -run
+
+# Abort a transaction
+shell {
+ printf "set ssl cert ${testdir}/client1.pem <<\n$(cat ${testdir}/client3_revoked.pem)\n\n" | socat "${tmpdir}/h1/stats" -
+ echo "abort ssl cert ${testdir}/client1.pem" | socat "${tmpdir}/h1/stats" -
+}
+
+haproxy h1 -cli {
+ send "show ssl cert ${testdir}/client1.pem"
+ expect ~ ".*SHA1 FingerPrint: 992386628A40C9D49C89BAC0058B5D45D8575151"
+}
+
+# The certificate was not updated so it should still be revoked
+client c1 -connect ${h1_clearlst_sock} {
+ txreq
+ rxresp
+ expect resp.status == 200
+ expect resp.http.x-ssl == "Revoked"
+} -run
+
+
diff --git a/reg-tests/ssl/show_ocsp_server.pem b/reg-tests/ssl/show_ocsp_server.pem
new file mode 100644
index 0000000..a652359
--- /dev/null
+++ b/reg-tests/ssl/show_ocsp_server.pem
@@ -0,0 +1,119 @@
+Certificate:
+ Data:
+ Version: 3 (0x2)
+ Serial Number: 4111 (0x100f)
+ Signature Algorithm: sha256WithRSAEncryption
+ Issuer: C=FR, O=HAProxy Technologies, CN=Root CA
+ Validity
+ Not Before: Jun 10 08:54:19 2021 GMT
+ Not After : Oct 26 08:54:19 2048 GMT
+ Subject: C=FR, O=HAProxy Technologies, CN=Server Certificate
+ Subject Public Key Info:
+ Public Key Algorithm: rsaEncryption
+ RSA Public-Key: (2048 bit)
+ Modulus:
+ 00:e9:88:7e:5e:ec:81:d0:f7:2b:9b:c9:5d:81:ea:
+ 9c:ff:61:2f:4b:a2:ad:08:4d:44:7c:65:fa:ab:3a:
+ f2:be:63:ac:34:5c:c4:05:35:be:d4:79:af:a5:fc:
+ 9e:92:10:75:b1:4d:70:d6:82:a3:7e:7e:b0:e6:2c:
+ ba:ec:1b:e9:7f:55:f3:98:6e:d5:b2:00:37:05:76:
+ df:28:be:3e:89:52:ec:47:58:45:7a:dd:7d:89:ae:
+ 7f:43:d6:a5:ce:f6:8d:8d:32:fe:33:dc:16:15:01:
+ 82:23:d1:77:12:75:a2:e2:2a:08:eb:cd:32:1e:5b:
+ 54:12:68:83:21:3a:6e:07:f5:99:f4:e7:79:eb:f7:
+ d0:d9:71:f2:1d:79:08:a2:63:df:ab:59:f3:ac:33:
+ 18:d6:0a:9c:48:0b:9a:b0:ae:79:7b:8e:5a:1d:d2:
+ fc:5c:6c:a5:d5:61:88:e8:50:c2:0f:f2:5b:0d:0c:
+ 82:18:c8:a1:98:19:8a:fc:28:c6:27:e7:94:de:3d:
+ 13:44:16:12:9e:e1:a8:b0:17:a1:4d:14:84:3e:44:
+ bc:76:5d:cd:4e:67:9c:e6:69:0b:5a:fe:cf:08:bb:
+ 6d:0b:be:d6:8e:5d:c6:fc:53:e2:ab:34:28:2f:ef:
+ 03:5a:c4:ad:b7:e8:4e:1c:89:67:78:f5:a4:41:fd:
+ 80:f3
+ Exponent: 65537 (0x10001)
+ X509v3 extensions:
+ Authority Information Access:
+ OCSP - URI:http://ocsp.haproxy.com
+
+ Signature Algorithm: sha256WithRSAEncryption
+ 14:c3:1a:2c:37:d4:91:74:10:be:eb:f3:1e:f3:da:cf:ed:0d:
+ b1:37:8e:e8:0c:44:cb:28:ce:4b:5c:ed:02:35:13:55:e1:34:
+ 93:aa:7d:91:fa:4c:a7:31:09:6a:23:b7:0a:d3:37:70:dd:48:
+ 9c:b6:af:31:d7:28:c1:cf:7d:44:f0:d5:ac:58:56:74:40:48:
+ a6:21:85:ea:bf:38:52:fc:8e:16:7c:4d:79:d3:b4:18:11:90:
+ 95:a7:f4:b6:5f:91:dc:3e:bd:e7:58:96:ff:c2:d2:59:20:ed:
+ 4e:de:e5:92:c9:a6:5a:37:a1:fd:00:cb:13:51:ef:ce:98:c8:
+ 01:b5:a1:9a:74:63:a0:da:dc:39:1e:08:8b:60:04:7f:96:c8:
+ 02:cd:cc:dc:04:a4:4c:84:8f:a1:30:49:99:e1:6c:0c:39:65:
+ 2c:03:f8:60:46:cb:28:42:6a:c4:b0:bb:7f:be:67:de:1e:55:
+ 10:2a:55:1f:58:d4:fc:b0:74:9e:11:95:0b:c0:cc:f6:fc:6d:
+ ce:25:17:48:dc:30:5e:b3:29:44:10:11:2d:47:2d:06:81:21:
+ 51:55:4a:4d:72:79:49:ad:29:77:64:92:e7:4e:c9:4f:4c:25:
+ 4d:24:3c:49:07:af:53:74:b5:14:05:e2:f2:fc:ba:d7:a0:db:
+ e4:e4:38:74:fe:f0:34:98:78:f4:2c:68:2d:a6:1e:2d:16:d6:
+ 2b:1d:95:3c:ac:9d:16:6a:7e:d4:cd:0c:94:2b:f4:94:1c:ef:
+ 3b:23:13:78:14:ea:ea:2f:08:f4:ed:21:3d:50:77:4b:50:fe:
+ db:47:19:d1:36:92:7d:7e:e3:18:40:1d:65:0e:fe:95:4f:54:
+ 60:15:16:57:72:06:93:03:ee:8c:89:4e:7b:0b:13:a5:ef:52:
+ c9:53:8d:77:b4:7f:11:f8:03:f1:ce:a0:f8:33:06:89:44:7b:
+ f7:14:4a:51:ba:0e:35:88:ea:69:44:bd:3f:76:78:23:86:79:
+ 13:00:40:1a:d0:69:42:41:72:e6:81:a7:b2:11:25:37:73:15:
+ 89:a7:36:5d:75:3c:e9:1b:dc:ea:8c:98:6e:24:f9:98:e1:62:
+ d6:12:34:a4:c1:bc:08:fd:4d:86:8e:43:a9:9a:36:26:ba:f5:
+ ab:13:9c:08:09:8d:bf:13:84:a0:5f:52:78:fc:1d:11:0c:d6:
+ e1:a3:0c:ce:4d:21:79:90:2a:bb:04:03:d9:76:71:81:36:2a:
+ 1c:56:79:e7:32:03:d8:41:cc:73:e5:6e:45:4e:2d:c9:b0:cc:
+ 70:6b:47:93:6b:00:d0:6d:94:5f:db:e1:d5:dd:73:11:9f:b7:
+ c1:75:50:43:17:b5:e6:51
+-----BEGIN CERTIFICATE-----
+MIIEOjCCAiKgAwIBAgICEA8wDQYJKoZIhvcNAQELBQAwPjELMAkGA1UEBhMCRlIx
+HTAbBgNVBAoMFEhBUHJveHkgVGVjaG5vbG9naWVzMRAwDgYDVQQDDAdSb290IENB
+MB4XDTIxMDYxMDA4NTQxOVoXDTQ4MTAyNjA4NTQxOVowSTELMAkGA1UEBhMCRlIx
+HTAbBgNVBAoMFEhBUHJveHkgVGVjaG5vbG9naWVzMRswGQYDVQQDDBJTZXJ2ZXIg
+Q2VydGlmaWNhdGUwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDpiH5e
+7IHQ9yubyV2B6pz/YS9Loq0ITUR8ZfqrOvK+Y6w0XMQFNb7Uea+l/J6SEHWxTXDW
+gqN+frDmLLrsG+l/VfOYbtWyADcFdt8ovj6JUuxHWEV63X2Jrn9D1qXO9o2NMv4z
+3BYVAYIj0XcSdaLiKgjrzTIeW1QSaIMhOm4H9Zn053nr99DZcfIdeQiiY9+rWfOs
+MxjWCpxIC5qwrnl7jlod0vxcbKXVYYjoUMIP8lsNDIIYyKGYGYr8KMYn55TePRNE
+FhKe4aiwF6FNFIQ+RLx2Xc1OZ5zmaQta/s8Iu20LvtaOXcb8U+KrNCgv7wNaxK23
+6E4ciWd49aRB/YDzAgMBAAGjNzA1MDMGCCsGAQUFBwEBBCcwJTAjBggrBgEFBQcw
+AYYXaHR0cDovL29jc3AuaGFwcm94eS5jb20wDQYJKoZIhvcNAQELBQADggIBABTD
+Giw31JF0EL7r8x7z2s/tDbE3jugMRMsozktc7QI1E1XhNJOqfZH6TKcxCWojtwrT
+N3DdSJy2rzHXKMHPfUTw1axYVnRASKYhheq/OFL8jhZ8TXnTtBgRkJWn9LZfkdw+
+vedYlv/C0lkg7U7e5ZLJplo3of0AyxNR786YyAG1oZp0Y6Da3DkeCItgBH+WyALN
+zNwEpEyEj6EwSZnhbAw5ZSwD+GBGyyhCasSwu3++Z94eVRAqVR9Y1PywdJ4RlQvA
+zPb8bc4lF0jcMF6zKUQQES1HLQaBIVFVSk1yeUmtKXdkkudOyU9MJU0kPEkHr1N0
+tRQF4vL8uteg2+TkOHT+8DSYePQsaC2mHi0W1isdlTysnRZqftTNDJQr9JQc7zsj
+E3gU6uovCPTtIT1Qd0tQ/ttHGdE2kn1+4xhAHWUO/pVPVGAVFldyBpMD7oyJTnsL
+E6XvUslTjXe0fxH4A/HOoPgzBolEe/cUSlG6DjWI6mlEvT92eCOGeRMAQBrQaUJB
+cuaBp7IRJTdzFYmnNl11POkb3OqMmG4k+ZjhYtYSNKTBvAj9TYaOQ6maNia69asT
+nAgJjb8ThKBfUnj8HREM1uGjDM5NIXmQKrsEA9l2cYE2KhxWeecyA9hBzHPlbkVO
+LcmwzHBrR5NrANBtlF/b4dXdcxGft8F1UEMXteZR
+-----END CERTIFICATE-----
+-----BEGIN RSA PRIVATE KEY-----
+MIIEpQIBAAKCAQEA6Yh+XuyB0Pcrm8ldgeqc/2EvS6KtCE1EfGX6qzryvmOsNFzE
+BTW+1HmvpfyekhB1sU1w1oKjfn6w5iy67Bvpf1XzmG7VsgA3BXbfKL4+iVLsR1hF
+et19ia5/Q9alzvaNjTL+M9wWFQGCI9F3EnWi4ioI680yHltUEmiDITpuB/WZ9Od5
+6/fQ2XHyHXkIomPfq1nzrDMY1gqcSAuasK55e45aHdL8XGyl1WGI6FDCD/JbDQyC
+GMihmBmK/CjGJ+eU3j0TRBYSnuGosBehTRSEPkS8dl3NTmec5mkLWv7PCLttC77W
+jl3G/FPiqzQoL+8DWsStt+hOHIlnePWkQf2A8wIDAQABAoIBAQDktypU2zrUpo6O
+F6u9xkIWl17Tq7HddJdDYjkbJDODJWkNK2FLXPTVcYwGe5/tm7M4f4iofe+Tvo6Q
+D3TOMxP/AvX872fY2f8JGf+7Dn9+zLjdsuTxTSVbB4xaq0lepffCNxPhRIZX8k87
+tzTv3kg1SkfMcP3J31Y6ZSMwEuKaZR9bkIT2MlLw89Qrg/o1Z1Yuu4CoJhgJ9x4Q
+smJmu6uu152i0tqQDK76nHfTgK6GTyHQpP/njXZ3gD/4vTOKsZPoXEtM9gq1Ihqm
+c7Pcy71q9nOBWfG3KUVhIlOahyVPewAFG7vNsPWVE0mN3FhCIEUPPLNnvAydSPaV
+vbwohs4BAoGBAPqXF6cTKWIfHTn4TrcOcKslKEzVSgJabZeYw1kTRsSLCsvV3ojx
+txW4A8FM+EVwX+K6FmpAxN9aKERVv1Ez3xvjmZf6czgREd8F2X2j6SwkcSwVZaxz
+FCl81jz6r/9CGP6Wbq0uVKGhEdNYddhc3RvR8oWwnMEgwIkOvfnpCevzAoGBAO6T
+IljTIzsZmLLFdhvS49C4bQ71vQbEnybqHENZcPdjrgbwRDLjQ4ZEGLm/O1zmKVZh
+C5rRqd/fWVtzMPmZJr0aNeVN3dYob/1SS6ixu/D55jRII6RtkTrm8bmOlUXIx3BB
+sgDOhG61U4LJ8n4Utcgv4go1feRNQkIo5qXkLFcBAoGALB0HE+liopxZl8fni4Am
+Q2qiIox1n95tZn+E/BxRm+3iM6ntp+vtUAx51MCJAChdKNubcI8AWVVUu1rg+BmK
+kC1L754uRFN08u7jr6N4O8YaiikmIeqMRRVt3YRAEU6AeejfiOscCOwC6FKtRC5s
+2iXmbLR/k9wBKN+IgAMPNRMCgYEA44MIxDBFbrzQM9u+8HXCr27RAe0y4Fttcszb
+Oxb2ddVnRlKmlujHoikaczh8wfD0Bt3xFSlQmKAENQO69qwolzmBoDULkolpkuiC
+IlOsaPfHoqAQ7WNXlhZa+puQmsYH+3OK7t4CyRi+lQFE8RuK52dSZm3wqmFLCJC8
+tALOjgECgYEAjREmEh/o/moOfIp8x18GYkYkJCv3+/UwMD8kJUu3KtXhER6Kgi2t
+GgqGV7nHm+sZjck+tcWdT7s+SJWQ2t8QkOf9xavy6mhG6ptJT7xoXSCxAUzNjLQZ
+WpoLVecRfaiAwj9DbbVWhjy8RDkyAHcHveVSIH40I7K0oTbNPqyJk6U=
+-----END RSA PRIVATE KEY-----
diff --git a/reg-tests/ssl/show_ocsp_server.pem.issuer b/reg-tests/ssl/show_ocsp_server.pem.issuer
new file mode 100644
index 0000000..bed2061
--- /dev/null
+++ b/reg-tests/ssl/show_ocsp_server.pem.issuer
@@ -0,0 +1,30 @@
+-----BEGIN CERTIFICATE-----
+MIIFGjCCAwKgAwIBAgIUHgviUJMgCZlOPOhVc09pZ4NhfxcwDQYJKoZIhvcNAQEL
+BQAwPjELMAkGA1UEBhMCRlIxHTAbBgNVBAoMFEhBUHJveHkgVGVjaG5vbG9naWVz
+MRAwDgYDVQQDDAdSb290IENBMB4XDTIxMDQyMjE0MDEyMFoXDTQ4MDkwNzE0MDEy
+MFowPjELMAkGA1UEBhMCRlIxHTAbBgNVBAoMFEhBUHJveHkgVGVjaG5vbG9naWVz
+MRAwDgYDVQQDDAdSb290IENBMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKC
+AgEAti+5onUeFJNyF5s6xlnBxDnFhw7Q5VbBestHeQttjBWN31zq5yaf/+CYXdu+
+lY6gNZj6JBiFJ5P7VXX3DqUIJBX6byXWfIUWM+auBAMKlTz0+hWrF/UxI/3uG67N
++Z6NVffEPYbA4Emqozr0DIicWorRyHnrhEQQP87xBCUboUr3QEkNngfiJ0fPm3fj
+7HfQemGL2OnTA8qdy0q1l4aUhVr9bgedP2Klvs0XhbszCGLI0Gq5lyNadlH1MEiw
+SXa9rklE6NCNcyamO7Wt8LVrg6pxopa7oGnkLbnjzSuE+xsN0isOLaHH5LfYg6gT
+aAHpnBHiWuDZQIyzKc+Z37gNksd46/y9B+oBZoCTcYMOsn7PK+gPzTbu3ic4L9hO
+WCsTV0tn+qUGj6/J98gRgvuvZGA7NPDKNZU5p34oyApBPBUOgpn6pCuT5NlkPYAe
+Rp/ypiy5NCHp0JW3JWkJ4+wEasZM34TZUYrOsicA0GV4ZVkoQ3WYyAjmLvRXmo/w
+Z3sSlmHvCg9MrQ9pk24+OtvCbii0bb/Zmlx0Y4lU5TogcuJffJDVbj7oxTc2gRmI
+SIZsnYLv2qVoeBoMY5otj+ef0Y8v98mKCbiWe2MzBkC2h5wmwyWedez8RysTaFHS
+Z4yOYoCsEAtCxnib9d5fXf0+6aOuFtKMknkuWbYj6En647ECAwEAAaMQMA4wDAYD
+VR0TBAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAgEAjVzxHzq/87uj24It5hYj4mq4
+ero0zix4fA4tJNuTpZ/5r7GUYaf/uT4xfDilBX2fGMsxVTxJC25KzhdFeTzg1Tde
+/N0LAeLWHfe6jR/P5XDATD0ZA73DQALOxRM5uRMeWJDVaUeco/aXsdQaCz2STDI3
+h7VVFoaOlmxQW3BBEvg2VUp9DS2UjqqdwsUDtzwKfrmj/FqyBvGrvNeIMv28HCu7
+r1WE1Z0UEJhpc1BPbu7F/vl60gRF3bQjh2tL8pWThxTJe6Qy+pLoSShyi85AM9XK
+scCmUtQWjy7KQDL8XVFvuCWvMzknZQjJcncbKddPaaSIDkKUpz9FDv+wSJj/LKf7
+bGSFPM6sblioLbLNJByRYI8G7VHvKDbUnYHbHp75NTGA2eDeNqx5bC2G/EJUTwLM
+bfcZr9hv+z1QpvSLEpar30kJjc1QMQcf60ToGYIC93rsVAKou2GPGry4h/nzwro0
+jjFWNgORTXllfcQDbDNOPkV1kFFibPbAU4faZMgC+xwIwDBsndvcvXjLaRUa4fmw
+1xNkOO5Lj9AuvTXdCc9yUXRzmPZhU6Q4YB2daWvs3vbMTtvkAXGyQL4b2HD+NYZs
+cMUtbteGgQzwM1gpMBn4GX53vhlCXq28r3cH1/1tLDweglSrxyvZbB7pZU7BAmLk
+TEj2fXcvdcX+TtYhC10=
+-----END CERTIFICATE-----
diff --git a/reg-tests/ssl/show_ocsp_server.pem.ocsp b/reg-tests/ssl/show_ocsp_server.pem.ocsp
new file mode 100644
index 0000000..5ac1457
--- /dev/null
+++ b/reg-tests/ssl/show_ocsp_server.pem.ocsp
Binary files differ
diff --git a/reg-tests/ssl/show_ocsp_server.pem.ocsp.revoked b/reg-tests/ssl/show_ocsp_server.pem.ocsp.revoked
new file mode 100644
index 0000000..bf69b3d
--- /dev/null
+++ b/reg-tests/ssl/show_ocsp_server.pem.ocsp.revoked
Binary files differ
diff --git a/reg-tests/ssl/show_ssl_ocspresponse.vtc b/reg-tests/ssl/show_ssl_ocspresponse.vtc
new file mode 100644
index 0000000..8b1db16
--- /dev/null
+++ b/reg-tests/ssl/show_ssl_ocspresponse.vtc
@@ -0,0 +1,144 @@
+#REGTEST_TYPE=devel
+
+# broken with BoringSSL.
+
+# This reg-test uses the "show ssl ocsp-response" command to display the details
+# of the OCSP responses used by HAProxy.
+# It also uses the new special cases of the "show ssl cert" command, where an OCSP
+# extension is provided to the certificate name (with or without preceding * for an
+# ongoing transaction).
+#
+# It uses the show_ocsp_server.pem server certificate, signed off by set_cafile_rootCA.crt,
+# which has two OCSP responses, show_ocsp_server.pem.ocsp which is loaded by default and in
+# which it is valid, and show_ocsp_server.pem.ocsp.revoked in which it is revoked.
+# The OSCP response is updated through the two means available in the CLI, the
+# "set ssl ocsp-response" command and the update through a "set ssl cert foo.ocsp".
+#
+# It requires socat to upload the new OCSP responses.
+#
+# If this test does not work anymore:
+# - Check that you have socat
+
+varnishtest "Test the 'show ssl ocsp-response' and 'show ssl cert foo.pem.ocsp' features of the CLI"
+feature cmd "$HAPROXY_PROGRAM -cc 'version_atleast(2.5-dev0)'"
+feature cmd "$HAPROXY_PROGRAM -cc 'feature(OPENSSL) && !ssllib_name_startswith(BoringSSL) && !ssllib_name_startswith(wolfSSL)'"
+feature cmd "command -v socat && command -v openssl"
+feature ignore_unknown_macro
+
+haproxy h1 -conf {
+ global
+ tune.ssl.default-dh-param 2048
+ tune.ssl.capture-buffer-size 1
+ stats socket "${tmpdir}/h1/stats" level admin
+
+ defaults
+ mode http
+ option httplog
+ log stderr local0 debug err
+ option logasap
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ listen clear-lst
+ bind "fd@${clearlst}"
+ server s1 "${tmpdir}/ssl.sock" ssl ca-file ${testdir}/set_cafile_rootCA.crt verify none
+
+ listen ssl-lst
+ # crt: certificate of the server
+ # ca-file: CA used for client authentication request
+ bind "${tmpdir}/ssl.sock" ssl crt ${testdir}/show_ocsp_server.pem ca-file ${testdir}/set_cafile_rootCA.crt verify none crt-ignore-err all
+ http-response add-header X-SSL-Client-Verify %[ssl_c_verify]
+ server s1 ${s1_addr}:${s1_port}
+} -start
+
+
+# Test the "show ssl ocsp-response" command
+haproxy h1 -cli {
+ send "show ssl ocsp-response"
+ expect ~ "Certificate ID key : 303b300906052b0e03021a050004148a83e0060faff709ca7e9b95522a2e81635fda0a0414f652b0e435d5ea923851508f0adbe92d85de007a0202100f"
+
+ send "show ssl ocsp-response 303b300906052b0e03021a050004148a83e0060faff709ca7e9b95522a2e81635fda0a0414f652b0e435d5ea923851508f0adbe92d85de007a0202100f"
+ expect ~ "Responder Id: C = FR, O = HAProxy Technologies, CN = ocsp.haproxy.com"
+ send "show ssl ocsp-response 303b300906052b0e03021a050004148a83e0060faff709ca7e9b95522a2e81635fda0a0414f652b0e435d5ea923851508f0adbe92d85de007a0202100f"
+ expect ~ "Cert Status: good"
+}
+
+# Test the "show ssl ocsp-response" command with a certificate path as parameter
+shell {
+ ocsp_response=$(echo "show ssl ocsp-response ${testdir}/show_ocsp_server.pem" | socat "${tmpdir}/h1/stats" -)
+
+ echo "$ocsp_response" | grep "Responder Id: C = FR, O = HAProxy Technologies, CN = ocsp.haproxy.com" &&
+ echo "$ocsp_response" | grep "Cert Status: good"
+}
+
+# Test the "show ssl cert foo.pem.ocsp" command
+haproxy h1 -cli {
+ send "show ssl cert"
+ expect ~ ".*show_ocsp_server.pem"
+
+ send "show ssl cert ${testdir}/show_ocsp_server.pem"
+ expect ~ "Serial: 100F"
+ send "show ssl cert ${testdir}/show_ocsp_server.pem"
+ expect ~ "OCSP Response Key: 303b300906052b0e03021a050004148a83e0060faff709ca7e9b95522a2e81635fda0a0414f652b0e435d5ea923851508f0adbe92d85de007a0202100f"
+
+ send "show ssl cert ${testdir}/show_ocsp_server.pem.ocsp"
+ expect ~ "Responder Id: C = FR, O = HAProxy Technologies, CN = ocsp.haproxy.com"
+ send "show ssl cert ${testdir}/show_ocsp_server.pem.ocsp"
+ expect ~ "Cert Status: good"
+}
+
+
+# Change the server certificate's OCSP response through "set ssl ocsp-response"
+shell {
+ printf "set ssl ocsp-response <<\n$(cat ${testdir}/show_ocsp_server.pem.ocsp.revoked|openssl base64)\n\n" | socat "${tmpdir}/h1/stats" -
+}
+
+# Check that the change was taken into account
+haproxy h1 -cli {
+ send "show ssl ocsp-response"
+ expect ~ "Certificate ID key : 303b300906052b0e03021a050004148a83e0060faff709ca7e9b95522a2e81635fda0a0414f652b0e435d5ea923851508f0adbe92d85de007a0202100f"
+
+ send "show ssl ocsp-response 303b300906052b0e03021a050004148a83e0060faff709ca7e9b95522a2e81635fda0a0414f652b0e435d5ea923851508f0adbe92d85de007a0202100f"
+ expect ~ "Responder Id: C = FR, O = HAProxy Technologies, CN = ocsp.haproxy.com"
+ send "show ssl ocsp-response 303b300906052b0e03021a050004148a83e0060faff709ca7e9b95522a2e81635fda0a0414f652b0e435d5ea923851508f0adbe92d85de007a0202100f"
+ expect ~ "Cert Status: revoked"
+
+ send "show ssl cert ${testdir}/show_ocsp_server.pem.ocsp"
+ expect ~ "Cert Status: revoked"
+}
+
+
+# Change the server certificate's OCSP response through a transaction
+shell {
+ printf "set ssl cert ${testdir}/show_ocsp_server.pem <<\n$(cat ${testdir}/show_ocsp_server.pem | sed '/^$/d')\n\n" | socat "${tmpdir}/h1/stats" -
+ printf "set ssl cert ${testdir}/show_ocsp_server.pem.issuer <<\n$(cat ${testdir}/show_ocsp_server.pem.issuer | sed '/^$/d')\n\n" | socat "${tmpdir}/h1/stats" -
+ printf "set ssl cert ${testdir}/show_ocsp_server.pem.ocsp <<\n$(cat ${testdir}/show_ocsp_server.pem.ocsp|openssl base64)\n\n" | socat "${tmpdir}/h1/stats" -
+}
+
+
+# Check that the actual tree entry was not changed and that the uncommitted
+# transaction's OCSP response is the new one
+haproxy h1 -cli {
+ send "show ssl ocsp-response 303b300906052b0e03021a050004148a83e0060faff709ca7e9b95522a2e81635fda0a0414f652b0e435d5ea923851508f0adbe92d85de007a0202100f"
+ expect ~ "Cert Status: revoked"
+ send "show ssl ocsp-response 303b300906052b0e03021a050004148a83e0060faff709ca7e9b95522a2e81635fda0a0414f652b0e435d5ea923851508f0adbe92d85de007a0202100f"
+ expect ~ "This Update: Jun 10 08:57:45 2021 GMT"
+
+ send "show ssl cert *${testdir}/show_ocsp_server.pem.ocsp"
+ expect ~ "Cert Status: good"
+ send "show ssl cert *${testdir}/show_ocsp_server.pem.ocsp"
+ expect ~ "This Update: Jun 10 08:55:04 2021 GMT"
+}
+
+
+# Commit the transaction and check that it was taken into account
+haproxy h1 -cli {
+ send "commit ssl cert ${testdir}/show_ocsp_server.pem"
+ expect ~ "Success!"
+
+ send "show ssl ocsp-response 303b300906052b0e03021a050004148a83e0060faff709ca7e9b95522a2e81635fda0a0414f652b0e435d5ea923851508f0adbe92d85de007a0202100f"
+ expect ~ "Cert Status: good"
+ send "show ssl ocsp-response 303b300906052b0e03021a050004148a83e0060faff709ca7e9b95522a2e81635fda0a0414f652b0e435d5ea923851508f0adbe92d85de007a0202100f"
+ expect ~ "This Update: Jun 10 08:55:04 2021 GMT"
+}
diff --git a/reg-tests/ssl/simple.crt-list b/reg-tests/ssl/simple.crt-list
new file mode 100644
index 0000000..9ffacb4
--- /dev/null
+++ b/reg-tests/ssl/simple.crt-list
@@ -0,0 +1,5 @@
+common.pem record1.bug940.domain.tld
+common.pem record2.bug940.domain.tld
+ecdsa.pem record3.bug940.domain.tld
+ecdsa.pem record4.bug940.domain.tld
+
diff --git a/reg-tests/ssl/ssl_alpn.vtc b/reg-tests/ssl/ssl_alpn.vtc
new file mode 100644
index 0000000..dfc63ac
--- /dev/null
+++ b/reg-tests/ssl/ssl_alpn.vtc
@@ -0,0 +1,212 @@
+#REGTEST_TYPE=devel
+
+# This teg-test verifies that different ALPN values on the "server" line
+# will negotiate the expected protocol depending on the ALPN "bind" line.
+# It requires OpenSSL >= 1.0.2 for ALPN
+
+varnishtest "Test the bind 'alpn' setting"
+feature cmd "$HAPROXY_PROGRAM -cc 'version_atleast(2.8-dev7)'"
+feature cmd "$HAPROXY_PROGRAM -cc 'feature(OPENSSL) && openssl_version_atleast(1.0.2)'"
+feature ignore_unknown_macro
+
+haproxy h1 -conf {
+ global
+ tune.ssl.default-dh-param 2048
+
+ defaults
+ mode http
+ option httplog
+ log stderr local0 debug err
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ listen px-clr
+ bind "fd@${clearfe}"
+ default-server ssl verify none
+
+ # first digit select the alpn sent by the client, second digit, the server one
+ use-server s00 if { path /00 }
+ server s00 "${tmpdir}/ssl0.sock"
+ use-server s01 if { path /01 }
+ server s01 "${tmpdir}/ssl1.sock"
+ use-server s02 if { path /02 }
+ server s02 "${tmpdir}/ssl2.sock"
+ use-server s03 if { path /03 }
+ server s03 "${tmpdir}/ssl3.sock"
+ use-server s04 if { path /04 }
+ server s04 "${tmpdir}/ssl4.sock"
+
+ use-server s10 if { path /10 }
+ server s10 "${tmpdir}/ssl0.sock" alpn http/1.1
+ use-server s11 if { path /11 }
+ server s11 "${tmpdir}/ssl1.sock" alpn http/1.1
+ use-server s12 if { path /12 }
+ server s12 "${tmpdir}/ssl2.sock" alpn http/1.1
+ use-server s13 if { path /13 }
+ server s13 "${tmpdir}/ssl3.sock" alpn http/1.1
+ use-server s14 if { path /14 }
+ server s14 "${tmpdir}/ssl4.sock" alpn http/1.1
+
+ use-server s20 if { path /20 }
+ server s20 "${tmpdir}/ssl0.sock" alpn h2
+ use-server s21 if { path /21 }
+ server s21 "${tmpdir}/ssl1.sock" alpn h2
+ use-server s22 if { path /22 }
+ server s22 "${tmpdir}/ssl2.sock" alpn h2
+ use-server s23 if { path /23 }
+ server s23 "${tmpdir}/ssl3.sock" alpn h2
+ use-server s24 if { path /24 }
+ server s24 "${tmpdir}/ssl4.sock" alpn h2
+
+ use-server s30 if { path /30 }
+ server s30 "${tmpdir}/ssl0.sock" alpn h2,http/1.1
+ use-server s31 if { path /31 }
+ server s31 "${tmpdir}/ssl1.sock" alpn h2,http/1.1
+ use-server s32 if { path /32 }
+ server s32 "${tmpdir}/ssl2.sock" alpn h2,http/1.1
+ use-server s33 if { path /33 }
+ server s33 "${tmpdir}/ssl3.sock" alpn h2,http/1.1
+ use-server s34 if { path /34 }
+ server s34 "${tmpdir}/ssl4.sock" alpn h2,http/1.1
+
+ frontend fe-ssl
+ bind "${tmpdir}/ssl0.sock" ssl crt ${testdir}/common.pem
+ bind "${tmpdir}/ssl1.sock" ssl crt ${testdir}/common.pem alpn http/1.1
+ bind "${tmpdir}/ssl2.sock" ssl crt ${testdir}/common.pem alpn h2
+ bind "${tmpdir}/ssl3.sock" ssl crt ${testdir}/common.pem alpn h2,http/1.1
+ bind "${tmpdir}/ssl4.sock" ssl crt ${testdir}/common.pem no-alpn
+ http-request return status 200 hdr x-alpn _%[ssl_fc_alpn] hdr x-path %[path] hdr x-ver _%[req.ver]
+} -start
+
+# client sends no alpn
+client c1 -connect ${h1_clearfe_sock} {
+ txreq -url "/00"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.x-alpn == "_"
+ expect resp.http.x-ver == "_1.1"
+
+ txreq -url "/01"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.x-alpn == "_"
+ expect resp.http.x-ver == "_1.1"
+
+ txreq -url "/02"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.x-alpn == "_"
+ expect resp.http.x-ver == "_1.1"
+
+ txreq -url "/03"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.x-alpn == "_"
+ expect resp.http.x-ver == "_1.1"
+
+ txreq -url "/04"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.x-alpn == "_"
+ expect resp.http.x-ver == "_1.1"
+} -run
+
+# client sends alpn=http/1.1
+client c1 -connect ${h1_clearfe_sock} {
+ txreq -url "/10"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.x-alpn == "_http/1.1"
+ expect resp.http.x-ver == "_1.1"
+
+ txreq -url "/11"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.x-alpn == "_http/1.1"
+ expect resp.http.x-ver == "_1.1"
+
+ txreq -url "/12"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.x-alpn == "_"
+ expect resp.http.x-ver == "_1.1"
+
+ txreq -url "/13"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.x-alpn == "_http/1.1"
+ expect resp.http.x-ver == "_1.1"
+
+ txreq -url "/14"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.x-alpn == "_"
+ expect resp.http.x-ver == "_1.1"
+} -run
+
+# client sends alpn=h2
+client c1 -connect ${h1_clearfe_sock} {
+ txreq -url "/20"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.x-alpn == "_h2"
+ expect resp.http.x-ver == "_2.0"
+
+ txreq -url "/21"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.x-alpn == "_"
+ expect resp.http.x-ver == "_1.1"
+
+ txreq -url "/22"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.x-alpn == "_h2"
+ expect resp.http.x-ver == "_2.0"
+
+ txreq -url "/23"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.x-alpn == "_h2"
+ expect resp.http.x-ver == "_2.0"
+
+ txreq -url "/24"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.x-alpn == "_"
+ expect resp.http.x-ver == "_1.1"
+} -run
+
+# client sends alpn=h2,http/1.1
+client c1 -connect ${h1_clearfe_sock} {
+ txreq -url "/30"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.x-alpn == "_h2"
+ expect resp.http.x-ver == "_2.0"
+
+ txreq -url "/31"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.x-alpn == "_http/1.1"
+ expect resp.http.x-ver == "_1.1"
+
+ txreq -url "/32"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.x-alpn == "_h2"
+ expect resp.http.x-ver == "_2.0"
+
+ txreq -url "/33"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.x-alpn == "_h2"
+ expect resp.http.x-ver == "_2.0"
+
+ txreq -url "/34"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.x-alpn == "_"
+ expect resp.http.x-ver == "_1.1"
+} -run
diff --git a/reg-tests/ssl/ssl_client_auth.vtc b/reg-tests/ssl/ssl_client_auth.vtc
new file mode 100644
index 0000000..ab8ba18
--- /dev/null
+++ b/reg-tests/ssl/ssl_client_auth.vtc
@@ -0,0 +1,76 @@
+#REGTEST_TYPE=devel
+
+# This reg-test tests the client auth feature of HAProxy for both the backend
+# and frontend section with a CRL list
+#
+# This reg-test uses 2 chained listeners because vtest does not handle the SSL.
+# Test the frontend client auth and the backend side at the same time.
+#
+# The sends 3 requests one with a correct certificate, one with an expired one and one which was revoked.
+# The client then check if we received the right one with the right error.
+#
+# Certificates, CA and CRL are expiring in 2050 so it should be fine for the CI.
+#
+# Detail about configuration is explained there:
+# https://www.haproxy.com/blog/ssl-client-certificate-management-at-application-level/
+
+varnishtest "Test the client auth"
+#REQUIRE_OPTIONS=OPENSSL
+feature ignore_unknown_macro
+
+server s1 -repeat 3 {
+ rxreq
+ txresp
+} -start
+
+haproxy h1 -conf {
+ global
+ tune.ssl.default-dh-param 2048
+
+ defaults
+ mode http
+ option httplog
+ log stderr local0 debug err
+ option logasap
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ listen clear-lst
+ bind "fd@${clearlst}"
+ balance roundrobin
+ # crt: certificate sent for a client certificate request
+ server s1 "${tmpdir}/ssl.sock" ssl verify none crt ${testdir}/client1.pem
+ server s2 "${tmpdir}/ssl.sock" ssl verify none crt ${testdir}/client2_expired.pem # expired
+ server s3 "${tmpdir}/ssl.sock" ssl verify none crt ${testdir}/client3_revoked.pem # revoked
+
+ listen ssl-lst
+ # crt: certificate of the server
+ # ca-file: CA used for client authentication request
+ # crl-file: revocation list for client auth: the client1 certificate is revoked
+ bind "${tmpdir}/ssl.sock" ssl crt ${testdir}/common.pem ca-file ${testdir}/ca-auth.crt verify optional crt-ignore-err X509_V_ERR_CERT_REVOKED,X509_V_ERR_CERT_HAS_EXPIRED crl-file ${testdir}/crl-auth.pem
+
+ http-response add-header X-SSL %[ssl_c_verify,x509_v_err_str]
+ server s1 ${s1_addr}:${s1_port}
+} -start
+
+client c1 -connect ${h1_clearlst_sock} {
+ txreq
+ rxresp
+ expect resp.status == 200
+ expect resp.http.x-ssl == "X509_V_OK"
+} -run
+
+client c1 -connect ${h1_clearlst_sock} {
+ txreq
+ rxresp
+ expect resp.status == 200
+ expect resp.http.x-ssl == "X509_V_ERR_CERT_HAS_EXPIRED"
+} -run
+
+client c1 -connect ${h1_clearlst_sock} {
+ txreq
+ rxresp
+ expect resp.status == 200
+ expect resp.http.x-ssl == "X509_V_ERR_CERT_REVOKED"
+} -run
diff --git a/reg-tests/ssl/ssl_client_samples.vtc b/reg-tests/ssl/ssl_client_samples.vtc
new file mode 100644
index 0000000..5a84e4b
--- /dev/null
+++ b/reg-tests/ssl/ssl_client_samples.vtc
@@ -0,0 +1,74 @@
+#REGTEST_TYPE=devel
+
+varnishtest "Test the ssl_c_* sample fetches"
+feature cmd "$HAPROXY_PROGRAM -cc 'version_atleast(2.8-dev11)'"
+feature cmd "$HAPROXY_PROGRAM -cc 'feature(OPENSSL) && openssl_version_atleast(1.1.1)'"
+feature ignore_unknown_macro
+
+server s1 -repeat 3 {
+ rxreq
+ txresp
+} -start
+
+haproxy h1 -conf {
+ global
+ tune.ssl.default-dh-param 2048
+ tune.ssl.capture-buffer-size 1
+ crt-base ${testdir}
+
+ defaults
+ mode http
+ option httplog
+ log stderr local0 debug err
+ option logasap
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+
+ listen clear-lst
+ bind "fd@${clearlst}"
+ balance roundrobin
+ server s1 "${tmpdir}/ssl.sock" ssl verify none crt ${testdir}/client1.pem
+
+ listen ssl-lst
+ mode http
+
+ http-response add-header x-ssl-der %[ssl_c_der,hex]
+ http-response add-header x-ssl-chain-der %[ssl_c_chain_der,hex]
+ http-response add-header x-ssl-sha1 %[ssl_c_sha1,hex]
+ http-response add-header x-ssl-notafter %[ssl_c_notafter]
+ http-response add-header x-ssl-notbefore %[ssl_c_notbefore]
+ http-response add-header x-ssl-sig_alg %[ssl_c_sig_alg]
+ http-response add-header x-ssl-i_dn %[ssl_c_i_dn]
+ http-response add-header x-ssl-s_dn %[ssl_c_s_dn]
+ http-response add-header x-ssl-r_dn %[ssl_c_r_dn]
+ http-response add-header x-ssl-s_serial %[ssl_c_serial,hex]
+ http-response add-header x-ssl-key_alg %[ssl_c_key_alg]
+ http-response add-header x-ssl-version %[ssl_c_version]
+
+ bind "${tmpdir}/ssl.sock" ssl crt ${testdir}/common.pem ca-file ${testdir}/ca-auth.crt verify optional crt-ignore-err all crl-file ${testdir}/crl-auth.pem
+
+ server s1 ${s1_addr}:${s1_port}
+} -start
+
+
+client c1 -connect ${h1_clearlst_sock} {
+ txreq
+ rxresp
+ expect resp.status == 200
+ expect resp.http.x-ssl-der ~ 3082052D30820315020102300D0.*995ED3BE2BFB923A3EB71FA07002E
+ expect resp.http.x-ssl-chain-der ~ 3082096B30820553A0030201020.*0237D08F425C8414A23D436415502
+ expect resp.http.x-ssl-sha1 == "D9C3BAE37EA5A7EDB7B3C9BDD4DCB2FE58A412E4"
+ expect resp.http.x-ssl-notafter == "500421185942Z"
+ expect resp.http.x-ssl-notbefore == "200428185942Z"
+ expect resp.http.x-ssl-sig_alg == "RSA-SHA256"
+ expect resp.http.x-ssl-i_dn == "/C=FR/ST=Some-State/O=HAProxy Technologies/CN=HAProxy Technologies CA Test Client Auth"
+ expect resp.http.x-ssl-s_dn == "/C=FR/O=HAProxy Technologies Test/CN=client1"
+ expect resp.http.x-ssl-r_dn == "/C=FR/ST=Some-State/O=HAProxy Technologies/CN=HAProxy Technologies CA Test Client Auth"
+ expect resp.http.x-ssl-s_serial == "02"
+ expect resp.http.x-ssl-key_alg == "rsaEncryption"
+ expect resp.http.x-ssl-version == "1"
+} -run
+
+
diff --git a/reg-tests/ssl/ssl_crt-list_filters.vtc b/reg-tests/ssl/ssl_crt-list_filters.vtc
new file mode 100644
index 0000000..e98efb7
--- /dev/null
+++ b/reg-tests/ssl/ssl_crt-list_filters.vtc
@@ -0,0 +1,124 @@
+#REGTEST_TYPE=bug
+varnishtest "Test for ECDSA/RSA selection and crt-list filters"
+feature cmd "$HAPROXY_PROGRAM -cc 'version_atleast(2.8)'"
+feature cmd "$HAPROXY_PROGRAM -cc 'feature(OPENSSL) && ssllib_name_startswith(OpenSSL) && openssl_version_atleast(1.1.1)'"
+# This test checks if the multiple certificate types works correctly with the
+# SNI, and that the negative filters are correctly excluded
+#
+# The selection is done with ciphers in TLSv1.2 and with the sigalgs in TLSv1.3
+#
+feature ignore_unknown_macro
+
+server s1 -repeat 6 {
+ rxreq
+ txresp
+} -start
+
+haproxy h1 -conf {
+ global
+ tune.ssl.default-dh-param 2048
+ crt-base ${testdir}
+ stats socket "${tmpdir}/h1/stats" level admin
+
+ defaults
+ mode http
+ option httplog
+ retries 0
+ log stderr local0 debug err
+ option logasap
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+
+ listen clear-lst
+ bind "fd@${clearlst}"
+ balance roundrobin
+
+ http-response add-header x-ssl-sha1 '%[ssl_s_sha1,hex]'
+ http-response add-header x-ssl-keyalg '%[ssl_s_key_alg]'
+
+## TLSv1.2
+
+ server s1 "${tmpdir}/ssl.sock" ssl verify none sni str(another-record.bug810.domain.tld) ssl-min-ver TLSv1.2 ssl-max-ver TLSv1.2 ciphers "kRSA"
+ server s2 "${tmpdir}/ssl.sock" ssl verify none sni str(another-record.bug810.domain.tld) ssl-min-ver TLSv1.2 ssl-max-ver TLSv1.2 ciphers "aECDSA"
+
+ server s3 "${tmpdir}/ssl.sock" ssl verify none sni str(another-record.bug818.domain.tld) ssl-min-ver TLSv1.2 ssl-max-ver TLSv1.2 ciphers "kRSA"
+
+## TLSv1.3
+
+ server s4 "${tmpdir}/ssl2.sock" ssl verify none sni str(another-record.bug810.domain.tld) ssl-min-ver TLSv1.3 sigalgs rsa_pss_rsae_sha384:rsa_pkcs1_sha256:ecdsa_secp384r1_sha384
+ server s5 "${tmpdir}/ssl2.sock" ssl verify none sni str(another-record.bug810.domain.tld) ssl-min-ver TLSv1.3 sigalgs rsa_pss_rsae_sha384:rsa_pkcs1_sha256
+ server s6 "${tmpdir}/ssl2.sock" ssl verify none sni str(another-record.bug810.domain.tld) ssl-min-ver TLSv1.3 sigalgs ecdsa_secp384r1_sha384
+
+ server s7 "${tmpdir}/ssl2.sock" ssl verify none sni str(another-record.bug818.domain.tld) ssl-min-ver TLSv1.3 sigalgs rsa_pss_rsae_sha384:rsa_pkcs1_sha256
+
+
+ listen ssl-lst
+ mode http
+ bind "${tmpdir}/ssl.sock" ssl strict-sni ssl-min-ver TLSv1.2 ssl-max-ver TLSv1.2 crt-list ${testdir}/filters.crt-list
+ bind "${tmpdir}/ssl2.sock" ssl strict-sni ssl-min-ver TLSv1.3 ssl-max-ver TLSv1.3 crt-list ${testdir}/filters.crt-list
+
+ server s1 ${s1_addr}:${s1_port}
+} -start
+
+## TLSv1.2
+
+# RSA + TLSv1.2 + another-record.bug810.domain.tld OK
+client c1 -connect ${h1_clearlst_sock} {
+ txreq
+ rxresp
+ expect resp.status == 200
+ expect resp.http.x-ssl-keyalg == "rsaEncryption"
+} -run
+
+# ECDSA + TLSv1.2 + another-record.bug810.domain.tld OK
+client c1 -connect ${h1_clearlst_sock} {
+ txreq
+ rxresp
+ expect resp.status == 200
+ expect resp.http.x-ssl-keyalg == "id-ecPublicKey"
+} -run
+
+# RSA + TLSv1.2 + another-record.bug818.domain.tld OK, domain not available in
+# RSA because of the '!another-record.bug818.domain.tld' in the configuration.
+client c1 -connect ${h1_clearlst_sock} {
+ txreq
+ rxresp
+ expect resp.status == 503
+} -run
+
+## TLSv1.3
+
+# ECDSA/RSA sigalgs + TLSv1.3 + another-record.bug810.domain.tld should return the ECDSA cert
+client c1 -connect ${h1_clearlst_sock} {
+ txreq
+ rxresp
+ expect resp.status == 200
+ expect resp.http.x-ssl-keyalg == "id-ecPublicKey"
+} -run
+
+# RSA sigalgs + TLSv1.3 + another-record.bug810.domain.tld should return the RSA cert
+client c1 -connect ${h1_clearlst_sock} {
+ txreq
+ rxresp
+ expect resp.status == 200
+ expect resp.http.x-ssl-keyalg == "rsaEncryption"
+} -run
+
+
+# ECDSA sigalgs + TLSv1.3 + another-record.bug810.domain.tld should return the ECDSA cert
+client c1 -connect ${h1_clearlst_sock} {
+ txreq
+ rxresp
+ expect resp.status == 200
+ expect resp.http.x-ssl-keyalg == "id-ecPublicKey"
+} -run
+
+# RSA sigalgs + TLSv1.3 + another-record.bug818.domain.tld must fail because
+# this domain is not available with RSA
+client c1 -connect ${h1_clearlst_sock} {
+ txreq
+ rxresp
+ expect resp.status == 503
+} -run
diff --git a/reg-tests/ssl/ssl_curve_name.vtc b/reg-tests/ssl/ssl_curve_name.vtc
new file mode 100644
index 0000000..a285a8f
--- /dev/null
+++ b/reg-tests/ssl/ssl_curve_name.vtc
@@ -0,0 +1,51 @@
+#REGTEST_TYPE=devel
+
+varnishtest "Test the ssl_fc_curve/ssl_bc_curve sample fetches"
+feature cmd "$HAPROXY_PROGRAM -cc 'feature(OPENSSL) && ssllib_name_startswith(OpenSSL) && openssl_version_atleast(3.0.0)'"
+feature ignore_unknown_macro
+
+server s1 -repeat 3 {
+ rxreq
+ txresp
+} -start
+
+haproxy h1 -conf {
+ global
+ tune.ssl.default-dh-param 2048
+ tune.ssl.capture-buffer-size 1
+ crt-base ${testdir}
+
+ defaults
+ mode http
+ option httplog
+ log stderr local0 debug err
+ option logasap
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+
+ listen clear-lst
+ bind "fd@${clearlst}"
+ balance roundrobin
+ http-response add-header x-ssl-bc-curve-name %[ssl_bc_curve]
+ server s1 "${tmpdir}/ssl.sock" ssl verify none crt ${testdir}/client.ecdsa.pem
+
+ listen ssl-lst
+ mode http
+ http-response add-header x-ssl-fc-curve-name %[ssl_fc_curve]
+ bind "${tmpdir}/ssl.sock" ssl crt ${testdir}/common.pem ca-file ${testdir}/set_cafile_rootCA.crt verify optional curves X25519:P-256:P-384
+
+ server s1 ${s1_addr}:${s1_port}
+} -start
+
+
+client c1 -connect ${h1_clearlst_sock} {
+ txreq
+ rxresp
+ expect resp.status == 200
+ expect resp.http.x-ssl-fc-curve-name == "X25519"
+ expect resp.http.x-ssl-bc-curve-name == "X25519"
+
+} -run
+
diff --git a/reg-tests/ssl/ssl_curves.vtc b/reg-tests/ssl/ssl_curves.vtc
new file mode 100644
index 0000000..6a8b1b6
--- /dev/null
+++ b/reg-tests/ssl/ssl_curves.vtc
@@ -0,0 +1,134 @@
+#REGTEST_TYPE=devel
+
+# This reg-test checks the behaviour of the 'curves' and 'ecdhe' options on a
+# bind line. Its main point is to ensure that the default curve used in
+# HAProxy is indeed prime256v1 (or P-256 depending on the curve's
+# representation). In order to check this, is uses two ssl frontends that have
+# different lists of accepted curves, one of them accepting this default curve
+# while the other one does not. A backend tries to connect to those two
+# frontends by using the default curve, and it should succeed in one case and
+# fail in the other.
+# For some strange reason, OpenSSL 1.0.2 does not behave the same way as later
+# versions when it comes to ECDH and curves related matters. Instead of trying
+# to make it work the same way as the other (more used) versions, we will
+# ignore it and disable this test on OpenSSL 1.0.2.
+# For the same reason, this test is disabled for other SSL libraries as well.
+#
+
+varnishtest "Test the 'curves' and 'ecdhe' options and default curve value"
+feature cmd "$HAPROXY_PROGRAM -cc 'feature(OPENSSL) && ssllib_name_startswith(OpenSSL) && openssl_version_atleast(1.1.1)'"
+feature ignore_unknown_macro
+
+server s1 -repeat 2 {
+ rxreq
+ txresp
+} -start
+
+barrier b1 cond 2 -cyclic
+
+syslog Slg_cust_fmt -level info {
+ recv
+ expect ~ "ERROR.*conn_status:\"34:SSL handshake failure\" hsk_err:\".*wrong curve\".*"
+
+ barrier b1 sync
+
+ recv
+ expect ~ "ERROR ECDHE.*conn_status:\"34:SSL handshake failure\" hsk_err:\".*wrong curve\".*"
+} -start
+
+
+haproxy h1 -conf {
+ global
+ tune.ssl.default-dh-param 2048
+
+ defaults
+ mode http
+ option httpslog
+ log stderr local0 debug err
+ option logasap
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+ retries 0
+
+ listen clear-lst
+ bind "fd@${clearlst}"
+
+ use_backend ssl-curves-be if { path /curves }
+ use_backend ssl-ecdhe-521-be if { path /ecdhe-521 }
+ use_backend ssl-ecdhe-256-be if { path /ecdhe-256 }
+ default_backend ssl-be
+
+ backend ssl-be
+ server s1 "${tmpdir}/ssl1.sock" ssl verify none crt ${testdir}/client.ecdsa.pem force-tlsv12 curves P-256:P-384
+
+ backend ssl-curves-be
+ server s1 "${tmpdir}/ssl2.sock" ssl verify none crt ${testdir}/client.ecdsa.pem force-tlsv12 curves P-384
+
+ backend ssl-ecdhe-256-be
+ server s1 "${tmpdir}/ssl-ecdhe-256.sock" ssl verify none crt ${testdir}/client.ecdsa.pem force-tlsv12
+
+ backend ssl-ecdhe-521-be
+ server s1 "${tmpdir}/ssl-ecdhe-521.sock" ssl verify none crt ${testdir}/client.ecdsa.pem force-tlsv12
+
+
+ listen ssl1-lst
+ bind "${tmpdir}/ssl1.sock" ssl crt ${testdir}/common.pem ca-file ${testdir}/set_cafile_rootCA.crt verify optional curves P-256:P-384
+ server s1 ${s1_addr}:${s1_port}
+
+ # The prime256v1 curve, which is used by default by a backend when no
+ # 'curves' or 'ecdhe' option is specified, is not allowed on this listener
+ listen ssl2-lst
+ log ${Slg_cust_fmt_addr}:${Slg_cust_fmt_port} local0
+ error-log-format "ERROR conn_status:\"%[fc_err]:%[fc_err_str]\" hsk_err:%{+Q}[ssl_fc_err_str]"
+
+ bind "${tmpdir}/ssl2.sock" ssl crt ${testdir}/common.pem ca-file ${testdir}/set_cafile_rootCA.crt verify optional curves P-384
+ server s1 ${s1_addr}:${s1_port}
+
+ listen ssl-ecdhe-521-lst
+ log ${Slg_cust_fmt_addr}:${Slg_cust_fmt_port} local0
+ error-log-format "ERROR ECDHE-521 conn_status:\"%[fc_err]:%[fc_err_str]\" hsk_err:%{+Q}[ssl_fc_err_str]"
+
+ bind "${tmpdir}/ssl-ecdhe-521.sock" ssl crt ${testdir}/common.pem ca-file ${testdir}/set_cafile_rootCA.crt verify optional ecdhe secp521r1
+ server s1 ${s1_addr}:${s1_port}
+
+ listen ssl-ecdhe-256-lst
+ log ${Slg_cust_fmt_addr}:${Slg_cust_fmt_port} local0
+ error-log-format "ERROR ECDHE-256 conn_status:\"%[fc_err]:%[fc_err_str]\" hsk_err:%{+Q}[ssl_fc_err_str]"
+
+ bind "${tmpdir}/ssl-ecdhe-256.sock" ssl crt ${testdir}/common.pem ca-file ${testdir}/set_cafile_rootCA.crt verify optional ecdhe prime256v1
+ server s1 ${s1_addr}:${s1_port}
+
+} -start
+
+client c1 -connect ${h1_clearlst_sock} {
+ txreq
+ rxresp
+ expect resp.status == 200
+} -run
+
+# The backend tries to use the prime256v1 curve that is not accepted by the
+# frontend so the handshake should fail.
+client c2 -connect ${h1_clearlst_sock} {
+ txreq -url "/curves"
+ rxresp
+ expect resp.status == 503
+} -run
+
+barrier b1 sync
+
+# The backend tries to use the prime256v1 curve that is not accepted by the
+# frontend so the handshake should fail.
+client c3 -connect ${h1_clearlst_sock} {
+ txreq -url "/ecdhe-521"
+ rxresp
+ expect resp.status == 503
+} -run
+
+client c4 -connect ${h1_clearlst_sock} {
+ txreq -url "/ecdhe-256"
+ rxresp
+ expect resp.status == 200
+} -run
+
+syslog Slg_cust_fmt -wait
diff --git a/reg-tests/ssl/ssl_default_server.vtc b/reg-tests/ssl/ssl_default_server.vtc
new file mode 100644
index 0000000..485a9ba
--- /dev/null
+++ b/reg-tests/ssl/ssl_default_server.vtc
@@ -0,0 +1,142 @@
+#REGTEST_TYPE=devel
+
+# This reg-test ensures that SSL related configuration specified in a
+# default-server option are properly taken into account by the servers
+# (frontend). It mainly focuses on the client certificate used by the frontend,
+# that can either be defined in the server line itself, in the default-server
+# line or in both.
+#
+# It was created following a bug raised in redmine (issue #3906) in which a
+# server used an "empty" SSL context instead of the proper one.
+#
+
+varnishtest "Test the 'set ssl cert' feature of the CLI"
+feature cmd "$HAPROXY_PROGRAM -cc 'version_atleast(2.5-dev0)'"
+feature cmd "$HAPROXY_PROGRAM -cc 'feature(OPENSSL)'"
+feature ignore_unknown_macro
+
+server s1 -repeat 7 {
+ rxreq
+ txresp
+} -start
+
+haproxy h1 -conf {
+ global
+ tune.ssl.default-dh-param 2048
+ tune.ssl.capture-buffer-size 1
+ stats socket "${tmpdir}/h1/stats" level admin
+ crt-base ${testdir}
+ ca-base ${testdir}
+
+ defaults
+ mode http
+ option httplog
+ log stderr local0 debug err
+ option logasap
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ listen clear-lst
+ bind "fd@${clearlst}"
+ use_backend first_be if { path /first }
+ use_backend second_be if { path /second }
+ use_backend third_be if { path /third }
+ use_backend fourth_be if { path /fourth }
+ use_backend fifth_be if { path /fifth }
+
+
+ backend first_be
+ default-server ssl crt client1.pem ca-file ca-auth.crt verify none
+ server s1 "${tmpdir}/ssl.sock"
+
+ backend second_be
+ default-server ssl ca-file ca-auth.crt verify none
+ server s1 "${tmpdir}/ssl.sock" crt client1.pem
+
+ backend third_be
+ default-server ssl crt client1.pem ca-file ca-auth.crt verify none
+ server s1 "${tmpdir}/ssl.sock" crt client2_expired.pem
+
+ backend fourth_be
+ default-server ssl crt client1.pem verify none
+ server s1 "${tmpdir}/ssl.sock" ca-file ca-auth.crt
+
+ backend fifth_be
+ balance roundrobin
+ default-server ssl crt client1.pem verify none
+ server s1 "${tmpdir}/ssl.sock"
+ server s2 "${tmpdir}/ssl.sock" crt client2_expired.pem
+ server s3 "${tmpdir}/ssl.sock"
+
+
+ listen ssl-lst
+ bind "${tmpdir}/ssl.sock" ssl crt ${testdir}/common.pem ca-file ca-auth.crt verify required crt-ignore-err all
+
+ acl cert_expired ssl_c_verify 10
+ acl cert_revoked ssl_c_verify 23
+ acl cert_ok ssl_c_verify 0
+
+ http-response add-header X-SSL Ok if cert_ok
+ http-response add-header X-SSL Expired if cert_expired
+ http-response add-header X-SSL Revoked if cert_revoked
+
+ server s1 ${s1_addr}:${s1_port}
+} -start
+
+
+
+client c1 -connect ${h1_clearlst_sock} {
+ txreq -url "/first"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.x-ssl == "Ok"
+} -run
+
+client c1 -connect ${h1_clearlst_sock} {
+ txreq -url "/second"
+ txreq
+ rxresp
+ expect resp.status == 200
+ expect resp.http.x-ssl == "Ok"
+} -run
+
+client c1 -connect ${h1_clearlst_sock} {
+ txreq -url "/third"
+ txreq
+ rxresp
+ expect resp.status == 200
+ expect resp.http.x-ssl == "Expired"
+} -run
+
+client c1 -connect ${h1_clearlst_sock} {
+ txreq -url "/fourth"
+ txreq
+ rxresp
+ expect resp.status == 200
+ expect resp.http.x-ssl == "Ok"
+} -run
+
+client c1 -connect ${h1_clearlst_sock} {
+ txreq -url "/fifth"
+ txreq
+ rxresp
+ expect resp.status == 200
+ expect resp.http.x-ssl == "Ok"
+} -run
+
+client c1 -connect ${h1_clearlst_sock} {
+ txreq -url "/fifth"
+ txreq
+ rxresp
+ expect resp.status == 200
+ expect resp.http.x-ssl == "Expired"
+} -run
+
+client c1 -connect ${h1_clearlst_sock} {
+ txreq -url "/fifth"
+ txreq
+ rxresp
+ expect resp.status == 200
+ expect resp.http.x-ssl == "Ok"
+} -run
diff --git a/reg-tests/ssl/ssl_dh.vtc b/reg-tests/ssl/ssl_dh.vtc
new file mode 100644
index 0000000..5fe7c88
--- /dev/null
+++ b/reg-tests/ssl/ssl_dh.vtc
@@ -0,0 +1,234 @@
+#REGTEST_TYPE=devel
+
+# This reg-tests checks that the DH-related mechanisms works properly.
+# When no DH is specified, either directly in the server's PEM or through a
+# ssl-dh-param-file global option, and no tune.ssl.default-dh-param is defined,
+# DHE ciphers are disabled.
+# If a default-dh-param is defined, we will use DH parameters of the same size
+# as the server's RSA or DSA key, or default-dh-param if it is smaller.
+# This test has three distinct HAProxy instances, one with no DH-related option
+# used, one with the tune.ssl.default-dh-param global parameter set, and one
+# with an ssl-dh-param-file global option.
+# We use "openssl s_client" calls in order to check the size of the "Server
+# Temp Key" (which will be the same as the DH parameters in case a DHE cipher
+# is used).
+#
+# The main goal of this test was to check that the newly added OpenSSLv3
+# specific DH code worked as before, since it needed to be created in order to
+# stop using deprecated APIs.
+
+varnishtest "Test the DH related SSL options"
+# AWS-LC does not support any FFDH ciphersuites
+feature cmd "$HAPROXY_PROGRAM -cc 'feature(OPENSSL) && !ssllib_name_startswith(AWS-LC)' && !ssllib_name_startswith(wolfSSL)'"
+feature cmd "command -v openssl && command -v grep && command -v socat"
+feature ignore_unknown_macro
+
+server s1 -repeat 8 {
+ rxreq
+ txresp
+} -start
+
+
+haproxy h1 -conf {
+ global
+ stats socket "${tmpdir}/h1/stats" level admin
+
+ defaults
+ mode http
+ option httpslog
+ log stderr local0 debug err
+ option logasap
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+ retries 0
+
+ frontend clear-fe
+ bind "fd@${clearlst}"
+ use_backend gen_cert_be if { path /gencert }
+ default_backend dflt_be
+
+ backend dflt_be
+ server s1 "${tmpdir}/ssl_dflt.sock" ssl verify none ssl-max-ver TLSv1.2
+
+ backend gen_cert_be
+ server s1 "${tmpdir}/ssl_dflt_gencert.sock" ssl verify none ssl-max-ver TLSv1.2
+
+ listen ssl-dflt-lst
+ bind "${tmpdir}/ssl_dflt.sock" ssl crt ${testdir}/common.pem ca-file ${testdir}/set_cafile_rootCA.crt verify optional ciphers "DHE-RSA-AES256-GCM-SHA384" ssl-max-ver TLSv1.2
+ http-response set-header x-ssl-cipher %[ssl_fc_cipher]
+ server s1 ${s1_addr}:${s1_port}
+
+ listen ssl-dflt-gencert-lst
+ bind "${tmpdir}/ssl_dflt_gencert.sock" ssl generate-certificates crt ${testdir}/common.pem ca-file ${testdir}/set_cafile_rootCA.crt ca-sign-file ${testdir}/generate_certificates/gen_cert_ca.pem verify optional ciphers "DHE-RSA-AES256-GCM-SHA384" ssl-max-ver TLSv1.2
+ http-response set-header x-ssl-cipher %[ssl_fc_cipher]
+ server s1 ${s1_addr}:${s1_port}
+} -start
+
+haproxy h2 -conf {
+ global
+ stats socket "${tmpdir}/h2/stats" level admin
+
+ global
+ tune.ssl.default-dh-param 4096
+
+ defaults
+ mode http
+ option httpslog
+ log stderr local0 debug err
+ option logasap
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+ retries 0
+
+ listen clear-lst
+ bind "fd@${clearlst_dfltdh}"
+ server s1 "${tmpdir}/ssl_dfltdh.sock" ssl verify none ssl-max-ver TLSv1.2
+
+ listen ssl-4096dh-dflt-lst
+ bind "${tmpdir}/ssl_dfltdh.sock" ssl crt ${testdir}/common.pem ca-file ${testdir}/set_cafile_rootCA.crt verify optional ciphers "DHE-RSA-AES256-GCM-SHA384" ssl-max-ver TLSv1.2
+ http-response set-header x-ssl-cipher %[ssl_fc_cipher]
+ server s1 ${s1_addr}:${s1_port}
+} -start
+
+haproxy h3 -conf {
+ global
+ stats socket "${tmpdir}/h3/stats" level admin
+
+ global
+ ssl-dh-param-file ${testdir}/common.4096.dh
+
+ defaults
+ mode http
+ option httpslog
+ log stderr local0 debug err
+ option logasap
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+ retries 0
+
+ listen clear-lst
+ bind "fd@${clearlst_dhfile}"
+ server s1 "${tmpdir}/ssl_dhfile.sock" ssl verify none ssl-max-ver TLSv1.2
+
+ listen ssl-dhfile-lst
+ bind "${tmpdir}/ssl_dhfile.sock" ssl crt ${testdir}/common.pem ca-file ${testdir}/set_cafile_rootCA.crt verify optional ciphers "DHE-RSA-AES256-GCM-SHA384" ssl-max-ver TLSv1.2
+ http-response set-header x-ssl-cipher %[ssl_fc_cipher]
+ server s1 ${s1_addr}:${s1_port}
+} -start
+
+#
+# Check that all the SSL backend <-> SSL frontend connections work
+#
+client c1 -connect ${h1_clearlst_sock} {
+ txreq
+ rxresp
+ # No DH parameters are defined, DHE ciphers are unavailable
+ expect resp.status == 503
+} -run
+
+client c2 -connect ${h2_clearlst_dfltdh_sock} {
+ txreq
+ rxresp
+ expect resp.status == 200
+ expect resp.http.x-ssl-cipher == "DHE-RSA-AES256-GCM-SHA384"
+} -run
+
+client c3 -connect ${h3_clearlst_dhfile_sock} {
+ txreq
+ rxresp
+ expect resp.status == 200
+ expect resp.http.x-ssl-cipher == "DHE-RSA-AES256-GCM-SHA384"
+} -run
+
+client c4 -connect ${h1_clearlst_sock} {
+ txreq -url "/gencert"
+ rxresp
+ # No DH parameters are defined, DHE ciphers are unavailable
+ expect resp.status == 503
+} -run
+
+
+# On the second HAProxy instance, even if default-dh-param is set to 4096, this
+# value is only considered as a maximum DH key length and we will always try to
+# match the server's certificate key length in our DHE key exchange (2048 bits
+# in the case of common.pem).
+shell {
+ echo "Q" | openssl s_client -unix "${tmpdir}/ssl_dfltdh.sock" -tls1_2 2>/dev/null | grep -E "Server Temp Key: DH, 2048 bits"
+}
+
+shell {
+ echo "Q" | openssl s_client -unix "${tmpdir}/ssl_dhfile.sock" -tls1_2 2>/dev/null | grep -E "Server Temp Key: DH, 4096 bits"
+}
+
+
+#
+# Add a custom DH to the server's PEM certificate
+#
+shell {
+ printf "set ssl cert ${testdir}/common.pem <<\n$(cat ${testdir}/common.pem)\n$(cat ${testdir}/common.4096.dh)\n\n" | socat "${tmpdir}/h1/stats" -
+ echo "commit ssl cert ${testdir}/common.pem" | socat "${tmpdir}/h1/stats" -
+
+ printf "set ssl cert ${testdir}/common.pem <<\n$(cat ${testdir}/common.pem)\n$(cat ${testdir}/common.4096.dh)\n\n" | socat "${tmpdir}/h2/stats" -
+ echo "commit ssl cert ${testdir}/common.pem" | socat "${tmpdir}/h2/stats" -
+
+ printf "set ssl cert ${testdir}/common.pem <<\n$(cat ${testdir}/common.pem)\n$(cat ${testdir}/common.4096.dh)\n\n" | socat "${tmpdir}/h3/stats" -
+ echo "commit ssl cert ${testdir}/common.pem" | socat "${tmpdir}/h3/stats" -
+}
+
+
+#
+# Check that all the SSL backend <-> SSL frontend connections still work
+# Common.pem now contains DH parameters so the first instance's frontends
+# can now use DHE ciphers.
+#
+client c5 -connect ${h1_clearlst_sock} {
+ txreq
+ rxresp
+ expect resp.status == 200
+ expect resp.http.x-ssl-cipher == "DHE-RSA-AES256-GCM-SHA384"
+} -run
+
+client c6 -connect ${h2_clearlst_dfltdh_sock} {
+ txreq
+ rxresp
+ expect resp.status == 200
+ expect resp.http.x-ssl-cipher == "DHE-RSA-AES256-GCM-SHA384"
+} -run
+
+client c7 -connect ${h3_clearlst_dhfile_sock} {
+ txreq
+ rxresp
+ expect resp.status == 200
+ expect resp.http.x-ssl-cipher == "DHE-RSA-AES256-GCM-SHA384"
+} -run
+
+client c8 -connect ${h1_clearlst_sock} {
+ txreq -url "/gencert"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.x-ssl-cipher == "DHE-RSA-AES256-GCM-SHA384"
+} -run
+
+
+
+#
+# Check the new size of the DH key
+#
+shell {
+ echo "Q" | openssl s_client -unix "${tmpdir}/ssl_dflt.sock" -tls1_2 2>/dev/null | grep -E "Server Temp Key: DH, 4096 bits"
+}
+
+shell {
+ echo "Q" | openssl s_client -unix "${tmpdir}/ssl_dfltdh.sock" -tls1_2 2>/dev/null | grep -E "Server Temp Key: DH, 4096 bits"
+}
+
+shell {
+ echo "Q" | openssl s_client -unix "${tmpdir}/ssl_dhfile.sock" -tls1_2 2>/dev/null | grep -E "Server Temp Key: DH, 4096 bits"
+}
+
+shell {
+ echo "Q" | openssl s_client -unix "${tmpdir}/ssl_dflt_gencert.sock" -tls1_2 2>/dev/null | grep -E "Server Temp Key: DH, 4096 bits"
+}
diff --git a/reg-tests/ssl/ssl_errors.vtc b/reg-tests/ssl/ssl_errors.vtc
new file mode 100644
index 0000000..8fb9c5a
--- /dev/null
+++ b/reg-tests/ssl/ssl_errors.vtc
@@ -0,0 +1,439 @@
+#REGTEST_TYPE=devel
+
+# This reg-test checks that the connection and SSL sample fetches related to
+# errors are functioning properly. It also tests the proper behaviour of the
+# default HTTPS log format and of the error-log-format option which allows to
+# define a specific log format used only in case of connection error (otherwise
+# a line following the configured log-format is output).
+#
+# It works by sending request through three different paths, one using a custom
+# log-format line that contains the connection error and SSL handshake error
+# sample fetches, one using the default HTTPS log-format and one using the
+# legacy error log format.
+#
+# The output log lines are caught by syslog blocks (one for each path) and
+# compared to an expected format.
+# Since the syslog is not by design synchronized with the Varnish clients and
+# servers, synchronization is achieved through barriers, which ensure that
+# syslog messages arrive in the right order.
+#
+# In order to ensure that the log line raised in case of connection error if an
+# error-log-format is defined still follows the log-separate-error option, the
+# log lines raised by the https_fmt_lst listener will be sent to two separate
+# syslog servers.
+#
+
+varnishtest "Test the connection and SSL error fetches."
+feature cmd "$HAPROXY_PROGRAM -cc 'version_atleast(2.5-dev2)'"
+feature cmd "$HAPROXY_PROGRAM -cc 'feature(OPENSSL) && ssllib_name_startswith(OpenSSL)'"
+feature cmd "command -v socat"
+feature ignore_unknown_macro
+
+server s1 -repeat 4 {
+ rxreq
+ txresp
+} -start
+
+barrier b1 cond 4 -cyclic
+barrier b2 cond 2 -cyclic
+
+
+syslog Slg_cust_fmt -level info {
+ recv
+ expect ~ ".*conn_status:\"0:Success\" hsk_err:\"0:-\" CN=\"/C=FR/O=HAProxy Technologies/CN=Client\",serial=1007,hash=063DCC2E6A9159E66994B325D6D2EF3D17A75B6F"
+
+ barrier b1 sync
+
+ recv
+ expect ~ "ERROR.*conn_status:\"30:SSL client CA chain cannot be verified\" hsk_err:\"134:.*:certificate verify failed\" CN=\"/C=FR/O=HAProxy Technologies/CN=Client\",serial=1007,hash=063DCC2E6A9159E66994B325D6D2EF3D17A75B6F"
+
+ barrier b1 sync
+
+ recv
+ expect ~ "ERROR.*conn_status:\"31:SSL client certificate not trusted\" hsk_err:\"134:.*:certificate verify failed\" CN=\"/C=FR/O=HAProxy Technologies/CN=Client\",serial=1007,hash=063DCC2E6A9159E66994B325D6D2EF3D17A75B6F"
+
+ barrier b1 sync
+
+ # In case of an error occurring before the certificate verification process,
+ # the client certificate chain is never parsed and verified so we can't
+ # have information about the client's certificate.
+ recv
+ expect ~ "ERROR.*conn_status:\"34:SSL handshake failure\" hsk_err:\"193:.*:no shared cipher\" CN=\"\",serial=-,hash=-"
+} -start
+
+syslog Slg_https_fmt -level info {
+ recv
+ expect ~ ".*https_logfmt_ssl_lst~ https_logfmt_ssl_lst/s1.*0/0000000000000000/0/0/.? foo.com/TLSv1.2/AES256-GCM-SHA384"
+
+ barrier b1 sync
+} -start
+
+syslog Slg_https_fmt_err -level info {
+ recv
+ expect ~ "ERROR.*https_logfmt_ssl_lst~ https_logfmt_ssl_lst/<NOSRV>.*30/0000000000000086/0/2/.? foo.com/TLSv1.2/\\(NONE\\)"
+
+ barrier b1 sync
+
+ recv
+ expect ~ "ERROR.*https_logfmt_ssl_lst~ https_logfmt_ssl_lst/<NOSRV>.*31/0000000000000086/20/0/.? foo.com/TLSv1.2/\\(NONE\\)"
+
+ barrier b1 sync
+
+ recv
+ expect ~ "ERROR.*https_logfmt_ssl_lst~ https_logfmt_ssl_lst/<NOSRV>.*34/00000000000000C1/0/0/.? foo.com/TLSv1.2/\\(NONE\\)"
+} -start
+
+syslog Slg_logconnerror -level info {
+ recv
+ expect ~ ".*logconnerror_ssl_lst~ logconnerror_ssl_lst/s1"
+
+ barrier b1 sync
+
+ recv
+ expect ~ ".*logconnerror_ssl_lst/1: SSL client CA chain cannot be verified"
+
+ barrier b1 sync
+
+ recv
+ expect ~ ".*logconnerror_ssl_lst/1: SSL client certificate not trusted"
+
+ barrier b1 sync
+
+ recv
+ expect ~ ".*logconnerror_ssl_lst/1: SSL handshake failure"
+} -start
+
+syslog Slg_bcknd -level info {
+ recv
+ expect ~ ".*bc_err:0:\"Success\" ssl_bc_err:0:"
+
+ barrier b2 sync
+
+ recv
+ expect ~ ".*bc_err:34:\"SSL handshake failure\" ssl_bc_err:134:.*:certificate verify failed"
+
+ barrier b2 sync
+
+ recv
+ expect ~ ".*bc_err:33:\"Server presented an SSL certificate different from the expected one\" ssl_bc_err:134:.*:certificate verify failed"
+
+ barrier b2 sync
+
+ # Verify errors on the server side cannot be caught when using TLSv1.3 but it works for TLSv1.2
+ recv
+ expect ~ ".*bc_err:34:\"SSL handshake failure\" ssl_bc_err:1048:.*:tlsv1 alert unknown ca"
+
+ barrier b2 sync
+
+ recv
+ expect ~ ".*bc_err:34:\"SSL handshake failure\" ssl_bc_err:1040:.* alert handshake failure"
+
+ barrier b2 sync
+
+ recv
+ expect ~ ".*bc_err:34:\"SSL handshake failure\" ssl_bc_err:1040:.* alert handshake failure"
+} -start
+
+syslog Slg_bcknd_fe -level info {
+ # Client c13 - No error
+ # Depending on the version of OpenSSL, the TLS version and ciphersuite will change
+ recv
+ expect ~ ".* Server/(TLSv1.3/TLS_AES_256_GCM_SHA384|TLSv1.2/ECDHE-RSA-AES256-GCM-SHA384)"
+
+ # Client c14 - Server certificate rejected
+ # Depending on the version of OpenSSL, the TLS version and ciphersuite will change
+ recv
+ expect ~ ".* foo.com/(TLSv1.3/TLS_AES_256_GCM_SHA384|TLSv1.2/\\(NONE\\))"
+
+ # Client c15 - Server certificate mismatch (verifyhost option on backend)
+ # Depending on the version of OpenSSL, the TLS version and ciphersuite will change
+ recv
+ expect ~ ".* foo.com/(TLSv1.3/TLS_AES_256_GCM_SHA384|TLSv1.2/\\(NONE\\))"
+
+ # Client c16 - Client certificate rejected
+ recv
+ expect ~ ".* foo.com/TLSv1.2/\\(NONE\\)"
+
+ # Client c17 - Wrong ciphers TLSv1.2
+ recv
+ expect ~ ".* foo.com/TLSv1.2/\\(NONE\\)"
+
+ # Client c18
+ # With OpenSSL1.0.2 -Wrong ciphers TLSv1.2 (same as c17)
+ # With newer versions - Wrong ciphers TLSv1.3 - the client does not get to send its certificate because the error happens before
+ recv
+ expect ~ ".* (foo.com/TLSv1.2|-/TLSv1.3)/\\(NONE\\)"
+} -start
+
+
+haproxy h1 -conf {
+ global
+ tune.ssl.default-dh-param 2048
+ tune.ssl.capture-buffer-size 1
+ stats socket "${tmpdir}/h1/stats" level admin
+ .if openssl_version_atleast(3.0.0)
+ set-var proc.ssl_error_mask str(7FFFFF),hex2i
+ .else
+ set-var proc.ssl_error_mask str(FFF),hex2i
+ .endif
+
+ defaults
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+ retries 0
+
+ listen clear_lst
+ bind "fd@${clearlst}"
+ default-server ssl crt ${testdir}/set_cafile_client.pem ca-file ${testdir}/set_cafile_interCA2.crt verify none no-ssl-reuse force-tlsv12 sni str(foo.com)
+
+ balance roundrobin
+ server cust_fmt "${tmpdir}/cust_logfmt_ssl.sock"
+ server https_fmt "${tmpdir}/https_logfmt_ssl.sock"
+ server logconnerror "${tmpdir}/logconnerror_ssl.sock"
+
+
+ listen clear_wrong_ciphers_lst
+ bind "fd@${wrongcipherslst}"
+ default-server ssl crt ${testdir}/set_cafile_client.pem ca-file ${testdir}/set_cafile_interCA2.crt verify none no-ssl-reuse force-tlsv12 ciphers "aECDSA" sni str(foo.com)
+
+ balance roundrobin
+ server cust_fmt "${tmpdir}/cust_logfmt_ssl.sock"
+ server https_fmt "${tmpdir}/https_logfmt_ssl.sock"
+ server logconnerror "${tmpdir}/logconnerror_ssl.sock"
+
+
+ # This listener will be used to test backend fetches (bc_err and ssl_bc_err)
+ listen clear_backend_errors_lst
+ bind "fd@${backenderrorslst}"
+ log ${Slg_bcknd_addr}:${Slg_bcknd_port} local0
+ log-format "bc_err:%[bc_err]:%{+Q}[bc_err_str]\ ssl_bc_err:%[ssl_bc_err,and(proc.ssl_error_mask)]:%{+Q}[ssl_bc_err_str]"
+ error-log-format "ERROR bc_err:%[bc_err]:%{+Q}[bc_err_str]\ ssl_bc_err:%[ssl_bc_err,and(proc.ssl_error_mask)]:%[ssl_bc_err_str]"
+
+ balance roundrobin
+ server no_err "${tmpdir}/no_err_ssl.sock" ssl crt ${testdir}/set_cafile_client.pem ca-file ${testdir}/set_cafile_interCA2.crt verify required sni str(Server)
+ server srv_cert_rejected "${tmpdir}/srv_rejected_ssl.sock" ssl crt ${testdir}/set_cafile_client.pem ca-file ${testdir}/set_cafile_interCA1.crt verify required sni str(foo.com)
+ server mismatch_frontend "${tmpdir}/mismatch_fe_ssl.sock" ssl crt ${testdir}/set_cafile_client.pem ca-file ${testdir}/set_cafile_interCA2.crt verify required sni str(foo.com) verifyhost str(toto) # We force TLSv1.2 for this specific case because server-side
+ # verification errors cannot be caught by the backend fetches when
+ # using TLSv1.3
+ server clt_cert_rejected "${tmpdir}/rejected_ssl.sock" ssl crt ${testdir}/set_cafile_client.pem ca-file ${testdir}/set_cafile_interCA2.crt verify none force-tlsv12 sni str(foo.com)
+ server wrong_ciphers "${tmpdir}/wrong_ciphers_ssl.sock" ssl verify none crt ${testdir}/client1.pem ca-file ${testdir}/ca-auth.crt force-tlsv12 ciphers "aECDSA" sni str(foo.com)
+
+ # No TLSv1.3 support with OpenSSL 1.0.2 so we duplicate the previous
+ # wrong cipher test in this case so that the error log remains the same
+.if openssl_version_before(1.1.1)
+ server wrong_ciphers2 "${tmpdir}/wrong_ciphers_ssl.sock" ssl verify none crt ${testdir}/client1.pem ca-file ${testdir}/ca-auth.crt force-tlsv12 ciphers "aECDSA" sni str(foo.com)
+.else
+ server wrong_ciphers_tls13 "${tmpdir}/wrong_ciphers_tls13_ssl.sock" ssl verify none crt ${testdir}/client1.pem ca-file ${testdir}/ca-auth.crt ciphersuites "TLS_AES_256_GCM_SHA384:TLS_CHACHA20_POLY1305_SHA256" force-tlsv13 sni str(foo.com)
+.endif
+
+
+
+
+ listen cust_logfmt_ssl_lst
+ log ${Slg_cust_fmt_addr}:${Slg_cust_fmt_port} local0
+ mode http
+ log-format "conn_status:\"%[fc_err]:%[fc_err_str]\" hsk_err:\"%[ssl_fc_err]:%[ssl_fc_err_str]\" CN=%{+Q}[ssl_c_s_dn],serial=%[ssl_c_serial,hex],hash=%[ssl_c_sha1,hex]"
+ error-log-format "ERROR conn_status:\"%[fc_err]:%[fc_err_str]\" hsk_err:\"%[ssl_fc_err,and(proc.ssl_error_mask)]:%[ssl_fc_err_str]\" CN=%{+Q}[ssl_c_s_dn],serial=%[ssl_c_serial,hex],hash=%[ssl_c_sha1,hex]"
+ bind "${tmpdir}/cust_logfmt_ssl.sock" ssl crt ${testdir}/set_cafile_server.pem ca-verify-file ${testdir}/set_cafile_rootCA.crt ca-file ${testdir}/set_cafile_interCA1.crt verify required ciphers "kRSA"
+ server s1 ${s1_addr}:${s1_port}
+
+ listen https_logfmt_ssl_lst
+ log ${Slg_https_fmt_addr}:${Slg_https_fmt_port} local0 info
+ log ${Slg_https_fmt_err_addr}:${Slg_https_fmt_err_port} local0 err info
+ option log-separate-errors
+ mode http
+ option httpslog
+ error-log-format "ERROR %ci:%cp [%tr] %ft %b/%s %TR/%Tw/%Tc/%Tr/%Ta %ST %B %CC %CS %tsc %ac/%fc/%bc/%sc/%rc %sq/%bq %hr %hs %{+Q}r %[fc_err]/%[ssl_fc_err,and(proc.ssl_error_mask),hex]/%[ssl_c_err]/%[ssl_c_ca_err]/%[ssl_fc_is_resumed] %[ssl_fc_sni]/%sslv/%sslc"
+ bind "${tmpdir}/https_logfmt_ssl.sock" ssl crt ${testdir}/set_cafile_server.pem ca-verify-file ${testdir}/set_cafile_rootCA.crt ca-file ${testdir}/set_cafile_interCA1.crt verify required ciphers "kRSA"
+ server s1 ${s1_addr}:${s1_port}
+
+ listen logconnerror_ssl_lst
+ log ${Slg_logconnerror_addr}:${Slg_logconnerror_port} local0 info
+ mode http
+ option httplog
+ bind "${tmpdir}/logconnerror_ssl.sock" ssl crt ${testdir}/set_cafile_server.pem ca-verify-file ${testdir}/set_cafile_rootCA.crt ca-file ${testdir}/set_cafile_interCA1.crt verify required ciphers "kRSA"
+ server s1 ${s1_addr}:${s1_port}
+
+
+
+ defaults bknd_err_dflt
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+ retries 0
+ log ${Slg_bcknd_fe_addr}:${Slg_bcknd_fe_port} local0
+ log-format "%ci:%cp %[ssl_fc_sni]/%sslv/%sslc"
+ error-log-format "ERROR %ci:%cp %[ssl_fc_sni]/%sslv/%sslc"
+
+ # The following listeners allow to test backend error fetches
+ listen no_backend_err_ssl_lst from bknd_err_dflt
+ bind "${tmpdir}/no_err_ssl.sock" ssl crt ${testdir}/set_cafile_server.pem ca-file ${testdir}/set_cafile_interCA2.crt verify none
+ server s1 ${s1_addr}:${s1_port}
+
+ listen srv_rejected_ssl_lst from bknd_err_dflt
+ bind "${tmpdir}/srv_rejected_ssl.sock" ssl crt ${testdir}/set_cafile_server.pem ca-file ${testdir}/set_cafile_interCA2.crt verify none
+ server s1 ${s1_addr}:${s1_port}
+
+ listen mismatch_fe_ssl_lst from bknd_err_dflt
+ bind "${tmpdir}/mismatch_fe_ssl.sock" ssl crt ${testdir}/set_cafile_server.pem ca-file ${testdir}/set_cafile_interCA2.crt verify none
+ server s1 ${s1_addr}:${s1_port}
+
+ listen rejected_clt_ssl_lst from bknd_err_dflt
+ bind "${tmpdir}/rejected_ssl.sock" ssl crt ${testdir}/set_cafile_server.pem ca-file ${testdir}/set_cafile_interCA2.crt verify required
+ server s1 ${s1_addr}:${s1_port}
+
+ listen wrong_ciphers_ssl_lst from bknd_err_dflt
+ bind "${tmpdir}/wrong_ciphers_ssl.sock" ssl crt ${testdir}/common.pem ca-file ${testdir}/ca-auth.crt verify none force-tlsv12 ciphers "kRSA"
+ server s1 ${s1_addr}:${s1_port}
+
+.if openssl_version_atleast(1.1.1)
+ listen wrong_ciphers_tls13_ssl_lst from bknd_err_dflt
+ bind "${tmpdir}/wrong_ciphers_tls13_ssl.sock" ssl crt ${testdir}/common.pem ca-file ${testdir}/ca-auth.crt verify none force-tlsv13 ciphersuites "TLS_AES_128_GCM_SHA256"
+ server s1 ${s1_addr}:${s1_port}
+.endif
+
+} -start
+
+
+# The three following requests should all succeed
+client c1 -connect ${h1_clearlst_sock} {
+ txreq
+ rxresp
+ expect resp.status == 200
+} -run
+
+client c2 -connect ${h1_clearlst_sock} {
+ txreq
+ rxresp
+ expect resp.status == 200
+} -run
+
+client c3 -connect ${h1_clearlst_sock} {
+ txreq
+ rxresp
+ expect resp.status == 200
+} -run
+
+
+barrier b1 sync
+
+
+# Change the root CA in the frontends
+shell {
+ printf "set ssl ca-file ${testdir}/set_cafile_rootCA.crt <<\n$(cat ${testdir}/set_cafile_interCA1.crt)\n\n" | socat "${tmpdir}/h1/stats" -
+ echo "commit ssl ca-file ${testdir}/set_cafile_rootCA.crt" | socat "${tmpdir}/h1/stats" -
+}
+
+client c4 -connect ${h1_clearlst_sock} {
+ txreq
+ expect_close
+} -run
+
+client c5 -connect ${h1_clearlst_sock} {
+ txreq
+ expect_close
+} -run
+
+client c6 -connect ${h1_clearlst_sock} {
+ txreq
+ expect_close
+} -run
+
+barrier b1 sync
+
+
+
+# Restore the root CA
+shell {
+ printf "set ssl ca-file ${testdir}/set_cafile_rootCA.crt <<\n$(cat ${testdir}/set_cafile_rootCA.crt)\n\n" | socat "${tmpdir}/h1/stats" -
+ echo "commit ssl ca-file ${testdir}/set_cafile_rootCA.crt" | socat "${tmpdir}/h1/stats" -
+}
+
+# Change the intermediate CA in the frontends
+shell {
+ printf "set ssl ca-file ${testdir}/set_cafile_interCA1.crt <<\n$(cat ${testdir}/set_cafile_interCA2.crt)\n\n" | socat "${tmpdir}/h1/stats" -
+ echo "commit ssl ca-file ${testdir}/set_cafile_interCA1.crt" | socat "${tmpdir}/h1/stats" -
+}
+
+client c7 -connect ${h1_clearlst_sock} {
+ txreq
+ expect_close
+} -run
+
+client c8 -connect ${h1_clearlst_sock} {
+ txreq
+ expect_close
+} -run
+
+client c9 -connect ${h1_clearlst_sock} {
+ txreq
+ expect_close
+} -run
+
+barrier b1 sync
+
+
+# Restore the intermediate CA in the frontends
+shell {
+ printf "set ssl ca-file ${testdir}/set_cafile_interCA1.crt <<\n$(cat ${testdir}/set_cafile_interCA1.crt)\n\n" | socat "${tmpdir}/h1/stats" -
+ echo "commit ssl ca-file ${testdir}/set_cafile_interCA1.crt" | socat "${tmpdir}/h1/stats" -
+}
+
+# "No shared cipher" errors
+client c10 -connect ${h1_wrongcipherslst_sock} {
+ txreq
+ expect_close
+} -run
+client c11 -connect ${h1_wrongcipherslst_sock} {
+ txreq
+ expect_close
+} -run
+client c12 -connect ${h1_wrongcipherslst_sock} {
+ txreq
+ expect_close
+} -run
+
+
+shell {
+ printf "set ssl ca-file ${testdir}/set_cafile_interCA2.crt <<\n$(cat ${testdir}/set_cafile_interCA2.crt)\n$(cat ${testdir}/set_cafile_rootCA.crt)\n\n" | socat "${tmpdir}/h1/stats" -
+ echo "commit ssl ca-file ${testdir}/set_cafile_interCA2.crt" | socat "${tmpdir}/h1/stats" -
+}
+
+client c13 -connect ${h1_backenderrorslst_sock} {
+ txreq
+ rxresp
+ expect resp.status == 200
+} -run
+barrier b2 sync
+client c14 -connect ${h1_backenderrorslst_sock} {
+ txreq
+ expect_close
+} -run
+barrier b2 sync
+client c15 -connect ${h1_backenderrorslst_sock} {
+ txreq
+ expect_close
+} -run
+barrier b2 sync
+client c16 -connect ${h1_backenderrorslst_sock} {
+ txreq
+ expect_close
+} -run
+barrier b2 sync
+client c17 -connect ${h1_backenderrorslst_sock} {
+ txreq
+ expect_close
+} -run
+barrier b2 sync
+client c18 -connect ${h1_backenderrorslst_sock} {
+ txreq
+ expect_close
+} -run
+
+syslog Slg_cust_fmt -wait
+syslog Slg_https_fmt -wait
+syslog Slg_https_fmt_err -wait
+syslog Slg_logconnerror -wait
+syslog Slg_bcknd -wait
+syslog Slg_bcknd_fe -wait
diff --git a/reg-tests/ssl/ssl_frontend_samples.vtc b/reg-tests/ssl/ssl_frontend_samples.vtc
new file mode 100644
index 0000000..401e193
--- /dev/null
+++ b/reg-tests/ssl/ssl_frontend_samples.vtc
@@ -0,0 +1,69 @@
+#REGTEST_TYPE=devel
+
+varnishtest "Test the ssl_f_* sample fetches"
+#REQUIRE_OPTIONS=OPENSSL
+feature ignore_unknown_macro
+
+server s1 -repeat 3 {
+ rxreq
+ txresp
+} -start
+
+haproxy h1 -conf {
+ global
+ tune.ssl.default-dh-param 2048
+ tune.ssl.capture-buffer-size 1
+ crt-base ${testdir}
+
+ defaults
+ mode http
+ option httplog
+ log stderr local0 debug err
+ option logasap
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+
+ listen clear-lst
+ bind "fd@${clearlst}"
+ balance roundrobin
+ server s1 "${tmpdir}/ssl.sock" ssl verify none
+
+ listen ssl-lst
+ mode http
+
+ http-response add-header x-ssl-der %[ssl_f_der,hex]
+ http-response add-header x-ssl-sha1 %[ssl_f_sha1,hex]
+ http-response add-header x-ssl-notafter %[ssl_f_notafter]
+ http-response add-header x-ssl-notbefore %[ssl_f_notbefore]
+ http-response add-header x-ssl-sig_alg %[ssl_f_sig_alg]
+ http-response add-header x-ssl-i_dn %[ssl_f_i_dn]
+ http-response add-header x-ssl-s_dn %[ssl_f_s_dn]
+ http-response add-header x-ssl-s_serial %[ssl_f_serial,hex]
+ http-response add-header x-ssl-key_alg %[ssl_f_key_alg]
+ http-response add-header x-ssl-version %[ssl_f_version]
+
+ bind "${tmpdir}/ssl.sock" ssl crt ${testdir}/common.pem
+
+ server s1 ${s1_addr}:${s1_port}
+} -start
+
+
+client c1 -connect ${h1_clearlst_sock} {
+ txreq
+ rxresp
+ expect resp.status == 200
+ expect resp.http.x-ssl-der ~ 308203C7308202AFA003020102021445B6C777.*65AD7F4469DB9C8581EE260ABCB727AEF8A7C3ADA
+ expect resp.http.x-ssl-sha1 == "DF3B6E847A7BF83DFAAFCFEC65EE9BC36230D3EA"
+ expect resp.http.x-ssl-notafter == "330917162600Z"
+ expect resp.http.x-ssl-notbefore == "230920162600Z"
+ expect resp.http.x-ssl-sig_alg == "RSA-SHA256"
+ expect resp.http.x-ssl-i_dn == "/C=FR/L=Paris/O=HAProxy Technologies/CN=HAProxy Test Intermediate CA"
+ expect resp.http.x-ssl-s_dn == "/C=FR/L=Paris/O=test1/CN=www.test1.com"
+ expect resp.http.x-ssl-s_serial == "45B6C777A017159A19126340C1053521B91E123B"
+ expect resp.http.x-ssl-key_alg == "rsaEncryption"
+ expect resp.http.x-ssl-version == "3"
+} -run
+
+
diff --git a/reg-tests/ssl/ssl_generate_certificate.vtc b/reg-tests/ssl/ssl_generate_certificate.vtc
new file mode 100644
index 0000000..0f8fe2c
--- /dev/null
+++ b/reg-tests/ssl/ssl_generate_certificate.vtc
@@ -0,0 +1,168 @@
+#REGTEST_TYPE=devel
+
+# This reg-test checks that the 'generate-certificates' SSL option works
+# properly. This option allows to generate server-side certificates on the fly
+# for clients that use an SNI for which no certificate was specified in the
+# configuration file.
+# This test also aims at checking that the 'generate-certificates' and the
+# 'ecdhe' bind options work correctly together.
+# Any bind line having a 'generate-certificates' needs to have a ca-sign-file
+# option as well that specifies the path to a CA pem file (containing a
+# certificate as well as its private key). For this reason, a new
+# ssl_gen_ca.pem CA certificate was created, along with the ssl_gen_server.pem
+# server certificate signed by the CA. This server certificate will be used as
+# a default certificate and will serve as a base for any newly created
+# certificate.
+
+varnishtest "Test the 'generate-certificates' SSL option"
+feature cmd "$HAPROXY_PROGRAM -cc 'feature(OPENSSL) && !ssllib_name_startswith(wolfSSL)'"
+feature cmd "command -v openssl && command -v grep"
+feature ignore_unknown_macro
+
+server s1 -repeat 6 {
+ rxreq
+ txresp
+} -start
+
+
+haproxy h1 -conf {
+ global
+ tune.ssl.default-dh-param 2048
+ tune.ssl.capture-buffer-size 2048
+
+ defaults
+ mode http
+ option httpslog
+ log stderr local0 debug err
+ option logasap
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+ option httpslog
+
+ listen clear-lst
+ bind "fd@${clearlst}"
+ http-request set-var(sess.sni) hdr(x-sni)
+
+ use_backend P-384_backend if { path /P-384 }
+ default_backend default_backend
+
+ backend default_backend
+ server s1 "${tmpdir}/ssl.sock" ssl verify none ssl-max-ver TLSv1.2 sni var(sess.sni)
+
+ backend P-384_backend
+ server s1 "${tmpdir}/ssl_P-384.sock" ssl verify none ssl-max-ver TLSv1.2 sni var(sess.sni)
+
+ listen ssl-lst
+ bind "${tmpdir}/ssl.sock" ssl generate-certificates crt ${testdir}/generate_certificates/gen_cert_server.pem ca-sign-file ${testdir}/generate_certificates/gen_cert_ca.pem ca-file ${testdir}/generate_certificates/gen_cert_ca.pem verify optional
+ http-response add-header x-ssl-s_dn %[ssl_f_s_dn(CN)]
+ http-response add-header x-ssl-i_dn %[ssl_f_i_dn(CN)]
+ http-response add-header x-ssl-sig_alg %[ssl_f_sig_alg]
+ http-response add-header x-ssl-key_alg %[ssl_f_key_alg]
+ http-response add-header x-ssl-sha1 %[ssl_f_sha1,hex]
+
+ server s1 ${s1_addr}:${s1_port}
+
+ listen ssl-lst-P-384
+ bind "${tmpdir}/ssl_P-384.sock" ssl generate-certificates crt ${testdir}/generate_certificates/gen_cert_server.pem ca-sign-file ${testdir}/generate_certificates/gen_cert_ca.pem ca-file ${testdir}/generate_certificates/gen_cert_ca.pem verify optional ecdhe secp384r1
+ http-response add-header x-ssl-s_dn %[ssl_f_s_dn(CN)]
+ http-response add-header x-ssl-i_dn %[ssl_f_i_dn(CN)]
+ http-response add-header x-ssl-sig_alg %[ssl_f_sig_alg]
+ http-response add-header x-ssl-key_alg %[ssl_f_key_alg]
+ http-response add-header x-ssl-sha1 %[ssl_f_sha1,hex]
+
+ server s1 ${s1_addr}:${s1_port}
+
+} -start
+
+# Use default certificate
+client c1 -connect ${h1_clearlst_sock} {
+ txreq
+ rxresp
+ expect resp.status == 200
+ expect resp.http.x-ssl-sig_alg == "ecdsa-with-SHA256"
+ expect resp.http.x-ssl-i_dn == "ECDSA CA"
+ expect resp.http.x-ssl-s_dn == "server.ecdsa.com"
+ expect resp.http.x-ssl-key_alg == "id-ecPublicKey"
+ expect resp.http.x-ssl-sha1 == "66AC64728CEA0C1F614A89C278FA2F94EDE9AB11"
+} -run
+
+
+# Use default certificate's sni
+client c2 -connect ${h1_clearlst_sock} {
+ txreq -hdr "x-sni: server.ecdsa.com"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.x-ssl-sig_alg == "ecdsa-with-SHA256"
+ expect resp.http.x-ssl-i_dn == "ECDSA CA"
+ expect resp.http.x-ssl-s_dn == "server.ecdsa.com"
+ expect resp.http.x-ssl-key_alg == "id-ecPublicKey"
+ expect resp.http.x-ssl-sha1 == "66AC64728CEA0C1F614A89C278FA2F94EDE9AB11"
+} -run
+
+
+
+# Use another SNI - the server certificate should be generated and different
+# than the default one
+client c3 -connect ${h1_clearlst_sock} {
+ txreq -hdr "x-sni: unknown-sni.com"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.x-ssl-sig_alg == "ecdsa-with-SHA256"
+ expect resp.http.x-ssl-i_dn == "ECDSA CA"
+ expect resp.http.x-ssl-s_dn == "ECDSA CA"
+ expect resp.http.x-ssl-key_alg == "id-ecPublicKey"
+ expect resp.http.x-ssl-sha1 != "66AC64728CEA0C1F614A89C278FA2F94EDE9AB11"
+} -run
+
+
+# Use default certificate
+client c4 -connect ${h1_clearlst_sock} {
+ txreq -url "/P-384"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.x-ssl-sig_alg == "ecdsa-with-SHA256"
+ expect resp.http.x-ssl-i_dn == "ECDSA CA"
+ expect resp.http.x-ssl-s_dn == "server.ecdsa.com"
+ expect resp.http.x-ssl-key_alg == "id-ecPublicKey"
+ expect resp.http.x-ssl-sha1 == "66AC64728CEA0C1F614A89C278FA2F94EDE9AB11"
+} -run
+
+
+# Use default certificate's sni
+client c5 -connect ${h1_clearlst_sock} {
+ txreq -url "/P-384" -hdr "x-sni: server.ecdsa.com"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.x-ssl-sig_alg == "ecdsa-with-SHA256"
+ expect resp.http.x-ssl-i_dn == "ECDSA CA"
+ expect resp.http.x-ssl-s_dn == "server.ecdsa.com"
+ expect resp.http.x-ssl-key_alg == "id-ecPublicKey"
+ expect resp.http.x-ssl-sha1 == "66AC64728CEA0C1F614A89C278FA2F94EDE9AB11"
+} -run
+
+
+# Use another SNI - the server certificate should be generated and different
+# than the default one
+client c6 -connect ${h1_clearlst_sock} {
+ txreq -url "/P-384" -hdr "x-sni: unknown-sni.com"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.x-ssl-sig_alg == "ecdsa-with-SHA256"
+ expect resp.http.x-ssl-i_dn == "ECDSA CA"
+ expect resp.http.x-ssl-s_dn == "ECDSA CA"
+ expect resp.http.x-ssl-key_alg == "id-ecPublicKey"
+ expect resp.http.x-ssl-sha1 != "66AC64728CEA0C1F614A89C278FA2F94EDE9AB11"
+} -run
+
+# Check that the curves that the server accepts to use correspond to what we
+# expect it to be (according to ecdhe option).
+# The curve with the highest priority is X25519 for OpenSSL 1.1.1 and later,
+# and P-256 for OpenSSL 1.0.2.
+shell {
+ echo "Q" | openssl s_client -unix "${tmpdir}/ssl.sock" -servername server.ecdsa.com -tls1_2 2>/dev/null | grep -E "Server Temp Key: (ECDH, P-256, 256 bits|ECDH, prime256v1, 256 bits|X25519, 253 bits)"
+}
+
+shell {
+ echo "Q" | openssl s_client -unix "${tmpdir}/ssl_P-384.sock" -servername server.ecdsa.com 2>/dev/null| grep -E "Temp Key: ECDH,.+, 384 bits"
+}
diff --git a/reg-tests/ssl/ssl_reuse.vtc b/reg-tests/ssl/ssl_reuse.vtc
new file mode 100644
index 0000000..d7244ee
--- /dev/null
+++ b/reg-tests/ssl/ssl_reuse.vtc
@@ -0,0 +1,141 @@
+#REGTEST_TYPE=devel
+
+# This reg-test tests 4 scenarios with and without resumption tickets, with TLSv1.3 and TLSv1.2
+# Each client will try to established a connection, then try to reconnect 20 times resuming.
+
+
+varnishtest "Test if the SSL session/ticket reuse work correctly"
+feature cmd "$HAPROXY_PROGRAM -cc 'feature(OPENSSL_WOLFSSL) || feature(OPENSSL) && ssllib_name_startswith(OpenSSL) && openssl_version_atleast(1.1.1)'"
+feature ignore_unknown_macro
+
+server s1 -repeat 84 {
+ rxreq
+ txresp
+} -start
+
+haproxy h1 -conf {
+ global
+ # forced to 1 here, because there is a cached session per thread
+ nbthread 1
+
+
+ defaults
+ mode http
+ option httplog
+ option logasap
+ log stderr local0 debug err
+ option httpclose
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ listen clst1
+ bind "fd@${clst1}"
+ server s1 "${h1_fe1_addr}:${h1_fe1_port}" ssl verify none sni str(www.test1.com)
+ http-response add-header x-ssl-bc-resumed %[ssl_bc_is_resumed]
+
+ listen clst2
+ bind "fd@${clst2}"
+ server s1 "${h1_fe2_addr}:${h1_fe2_port}" ssl verify none sni str(www.test1.com)
+ http-response add-header x-ssl-bc-resumed %[ssl_bc_is_resumed]
+
+ listen clst3
+ bind "fd@${clst3}"
+ server s1 "${h1_fe3_addr}:${h1_fe3_port}" ssl verify none sni str(www.test1.com)
+ http-response add-header x-ssl-bc-resumed %[ssl_bc_is_resumed]
+
+ listen clst4
+ bind "fd@${clst4}"
+ server s1 "${h1_fe4_addr}:${h1_fe4_port}" ssl verify none sni str(www.test1.com)
+ http-response add-header x-ssl-bc-resumed %[ssl_bc_is_resumed]
+
+ listen ssl
+ bind "fd@${fe1}" ssl crt ${testdir}/common.pem ssl-max-ver TLSv1.2
+ bind "fd@${fe2}" ssl crt ${testdir}/common.pem ssl-max-ver TLSv1.2 no-tls-tickets
+ bind "fd@${fe3}" ssl crt ${testdir}/common.pem ssl-min-ver TLSv1.3
+ bind "fd@${fe4}" ssl crt ${testdir}/common.pem ssl-min-ver TLSv1.3 no-tls-tickets
+
+ http-response add-header x-ssl-resumed %[ssl_fc_is_resumed]
+ server s1 ${s1_addr}:${s1_port}
+} -start
+
+
+# first bind
+# the first connection is not resumed
+client c1 -connect ${h1_clst1_sock} {
+ txreq
+ rxresp
+ expect resp.status == 200
+ expect resp.http.x-ssl-resumed == 0
+} -run
+# the next 20 connections are resumed
+client c1 -connect ${h1_clst1_sock} -repeat 20 {
+ txreq
+ rxresp
+ expect resp.status == 200
+ expect resp.http.x-ssl-resumed == 1
+} -run
+
+# second bind
+client c2 -connect ${h1_clst2_sock} {
+ txreq
+ rxresp
+ expect resp.status == 200
+ expect resp.http.x-ssl-resumed == 0
+} -run
+
+client c2 -connect ${h1_clst2_sock} -repeat 20 {
+ txreq
+ rxresp
+ expect resp.status == 200
+ expect resp.http.x-ssl-resumed == 1
+} -run
+
+# third bind
+client c3 -connect ${h1_clst3_sock} {
+ txreq
+ rxresp
+ expect resp.status == 200
+ expect resp.http.x-ssl-resumed == 0
+} -run
+
+client c3 -connect ${h1_clst3_sock} -repeat 20 {
+ txreq
+ rxresp
+ expect resp.status == 200
+ expect resp.http.x-ssl-resumed == 1
+} -run
+
+# fourth bind
+client c4 -connect ${h1_clst4_sock} {
+ txreq
+ rxresp
+ expect resp.status == 200
+ expect resp.http.x-ssl-resumed == 0
+} -run
+
+client c4 -connect ${h1_clst4_sock} -repeat 20 {
+ txreq
+ rxresp
+ expect resp.status == 200
+ expect resp.http.x-ssl-resumed == 1
+} -run
+
+
+# Could be useful to debug the result, the ssl_fc_is_resumed field in the log must be 1 after the 2nd command
+#shell {
+#
+# HOST=${h1_fe4_addr}
+# if [ "${h1_fe4_addr}" = "::1" ] ; then
+# HOST="\[::1\]"
+# fi
+#
+# rm sess.pem; (echo -e -n "GET / HTTP/1.1\r\n\r\n"; sleep 1) | openssl s_client -connect $HOST:${h1_fe4_port} -tls1_3 -sess_out sess.pem -keylogfile keys1.txt -servername www.test1.com > /tmp/ssl_debug1; echo | openssl s_client -connect ${HOST}:${h1_fe4_port} -tls1_3 -sess_in sess.pem -keylogfile keys2.txt -servername www.test1.com >> /tmp/ssl_debug1
+# echo "GET / HTTP/1.1" | openssl s_client -connect $HOST:${h1_fe4_port} -tls1_3 -servername www.test1.com
+#}
+
+haproxy h1 -cli {
+ send "show info"
+ expect ~ ".*SslFrontendSessionReuse_pct: 95.*"
+}
+
diff --git a/reg-tests/ssl/ssl_server_samples.vtc b/reg-tests/ssl/ssl_server_samples.vtc
new file mode 100644
index 0000000..cd97634
--- /dev/null
+++ b/reg-tests/ssl/ssl_server_samples.vtc
@@ -0,0 +1,73 @@
+#REGTEST_TYPE=devel
+
+varnishtest "Test the ssl_s_* sample fetches"
+#REQUIRE_VERSION=2.2
+#REQUIRE_OPTIONS=OPENSSL
+feature ignore_unknown_macro
+
+server s1 -repeat 3 {
+ rxreq
+ txresp
+} -start
+
+haproxy h1 -conf {
+ global
+ tune.ssl.default-dh-param 2048
+ tune.ssl.capture-buffer-size 1
+ crt-base ${testdir}
+ stats socket "${tmpdir}/h1/stats" level admin
+
+ defaults
+ mode http
+ option httplog
+ log stderr local0 debug err
+ option logasap
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+
+ listen clear-lst
+ bind "fd@${clearlst}"
+ balance roundrobin
+ http-response add-header x-ssl-sha1 %[ssl_s_sha1,hex]
+ http-response add-header x-ssl-notafter %[ssl_s_notafter]
+ http-response add-header x-ssl-notbefore %[ssl_s_notbefore]
+ http-response add-header x-ssl-sig_alg %[ssl_s_sig_alg]
+ http-response add-header x-ssl-i_dn %[ssl_s_i_dn]
+ http-response add-header x-ssl-s_dn %[ssl_s_s_dn]
+ http-response add-header x-ssl-s_serial %[ssl_s_serial,hex]
+ http-response add-header x-ssl-key_alg %[ssl_s_key_alg]
+ http-response add-header x-ssl-der %[ssl_s_der,hex]
+ http-response add-header x-ssl-chain-der %[ssl_s_chain_der,hex]
+ http-response add-header x-ssl-version %[ssl_s_version]
+
+ server s1 "${tmpdir}/ssl.sock" ssl verify none sni str(www.test1.com)
+
+ listen ssl-lst
+ mode http
+
+ bind "${tmpdir}/ssl.sock" ssl strict-sni crt-list ${testdir}/localhost.crt-list
+
+ server s1 ${s1_addr}:${s1_port}
+} -start
+
+
+client c1 -connect ${h1_clearlst_sock} {
+ txreq
+ rxresp
+ expect resp.status == 200
+ expect resp.http.x-ssl-sha1 == "DF3B6E847A7BF83DFAAFCFEC65EE9BC36230D3EA"
+ expect resp.http.x-ssl-notafter == "330917162600Z"
+ expect resp.http.x-ssl-notbefore == "230920162600Z"
+ expect resp.http.x-ssl-sig_alg == "RSA-SHA256"
+ expect resp.http.x-ssl-i_dn == "/C=FR/L=Paris/O=HAProxy Technologies/CN=HAProxy Test Intermediate CA"
+ expect resp.http.x-ssl-s_dn == "/C=FR/L=Paris/O=test1/CN=www.test1.com"
+ expect resp.http.x-ssl-s_serial == "45B6C777A017159A19126340C1053521B91E123B"
+ expect resp.http.x-ssl-key_alg == "rsaEncryption"
+ expect resp.http.x-ssl-version == "3"
+ expect resp.http.x-ssl-der ~ 308203C7308202AFA003020102021445B6C777.*65AD7F4469DB9C8581EE260ABCB727AEF8A7C3ADA
+ expect resp.http.x-ssl-chain-der ~ 308203C7308202AFA003020102021445B6C777.*D457A8C66AECA6408FF5C1A3EDBFF2888D7CED
+} -run
+
+
diff --git a/reg-tests/ssl/ssl_simple_crt-list.vtc b/reg-tests/ssl/ssl_simple_crt-list.vtc
new file mode 100644
index 0000000..7f15056
--- /dev/null
+++ b/reg-tests/ssl/ssl_simple_crt-list.vtc
@@ -0,0 +1,50 @@
+#REGTEST_TYPE=bug
+varnishtest "Test for the bug #940"
+# Test that the SNI are correctly inserted with the same file multiple times.
+
+#REQUIRE_VERSION=2.2
+#REQUIRE_OPTIONS=OPENSSL
+feature ignore_unknown_macro
+
+server s1 -repeat 4 {
+ rxreq
+ txresp
+} -start
+
+haproxy h1 -conf {
+ global
+ tune.ssl.default-dh-param 2048
+ crt-base ${testdir}
+ stats socket "${tmpdir}/h1/stats" level admin
+
+ defaults
+ mode http
+ option httplog
+ log stderr local0 debug err
+ option logasap
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+
+ listen clear-lst
+ bind "fd@${clearlst}"
+ balance roundrobin
+ server s1 "${tmpdir}/ssl.sock" ssl verify none sni str(record1.bug940.domain.tld)
+ server s2 "${tmpdir}/ssl.sock" ssl verify none sni str(record2.bug940.domain.tld)
+ server s3 "${tmpdir}/ssl.sock" ssl verify none sni str(record3.bug940.domain.tld)
+ server s4 "${tmpdir}/ssl.sock" ssl verify none sni str(record4.bug940.domain.tld)
+
+ listen ssl-lst
+ mode http
+ bind "${tmpdir}/ssl.sock" ssl strict-sni crt-list ${testdir}/simple.crt-list
+
+ server s1 ${s1_addr}:${s1_port}
+} -start
+
+
+client c1 -repeat 4 -connect ${h1_clearlst_sock} {
+ txreq
+ rxresp
+ expect resp.status == 200
+} -run
diff --git a/reg-tests/ssl/wrong_ctx_storage.vtc b/reg-tests/ssl/wrong_ctx_storage.vtc
new file mode 100644
index 0000000..c6cb19a
--- /dev/null
+++ b/reg-tests/ssl/wrong_ctx_storage.vtc
@@ -0,0 +1,45 @@
+# commit 28962c9
+# BUG/MAJOR: ssl: OpenSSL context is stored in non-reserved memory slot
+#
+# We never saw unexplicated crash with SSL, so I suppose that we are
+# luck, or the slot 0 is always reserved. Anyway the usage of the macro
+# SSL_get_app_data() and SSL_set_app_data() seem wrong. This patch change
+# the deprecated functions SSL_get_app_data() and SSL_set_app_data()
+# by the new functions SSL_get_ex_data() and SSL_set_ex_data(), and
+# it reserves the slot in the SSL memory space.
+#
+# For information, this is the two declaration which seems wrong or
+# incomplete in the OpenSSL ssl.h file. We can see the usage of the
+# slot 0 whoch is hardcoded, but never reserved.
+#
+# #define SSL_set_app_data(s,arg) (SSL_set_ex_data(s,0,(char *)arg))
+# #define SSL_get_app_data(s) (SSL_get_ex_data(s,0))
+
+#REGTEST_TYPE=bug
+
+varnishtest "OpenSSL bug: Random crashes"
+#REQUIRE_OPTIONS=OPENSSL
+feature ignore_unknown_macro
+
+
+haproxy h1 -conf {
+ global
+ tune.ssl.default-dh-param 2048
+ tune.ssl.capture-buffer-size 1
+
+ listen frt
+ mode http
+ bind "fd@${frt}" ssl crt ${testdir}/common.pem
+ http-request redirect location /
+} -start
+
+shell {
+ HOST=${h1_frt_addr}
+ if [ "${h1_frt_addr}" = "::1" ] ; then
+ HOST="\[::1\]"
+ fi
+ for i in 1 2 3 4 5; do
+ curl -i -k https://$HOST:${h1_frt_port} & pids="$pids $!"
+ done
+ wait $pids
+}
diff --git a/reg-tests/startup/automatic_maxconn.vtc b/reg-tests/startup/automatic_maxconn.vtc
new file mode 100644
index 0000000..0173916
--- /dev/null
+++ b/reg-tests/startup/automatic_maxconn.vtc
@@ -0,0 +1,104 @@
+varnishtest "Automatic maxconn computation"
+feature cmd "$HAPROXY_PROGRAM -cc 'version_atleast(2.5-dev0)'"
+feature cmd "$HAPROXY_PROGRAM -cc '!feature(OBSOLETE_LINKER)'"
+feature cmd "$HAPROXY_PROGRAM -cc 'feature(OPENSSL)'"
+feature ignore_unknown_macro
+#REGTEST_TYPE=broken
+
+
+# Check the maxconn computation with the -m parameter
+# Broken because it can't work with ASAN.
+
+
+feature ignore_unknown_macro
+
+server s1 {
+ rxreq
+ txresp
+} -start
+
+
+haproxy h1 -arg "-m 1024" -conf {
+} -start
+
+haproxy h1 -cli {
+ send "show info"
+ expect ~ ".*Maxconn: (29000|28000)\n.*"
+}
+
+haproxy h2 -arg "-m 384" -conf {
+} -start
+
+haproxy h2 -cli {
+ send "show info"
+ expect ~ ".*Maxconn: (11000|10000)\n.*"
+}
+
+haproxy h3 -arg "-m 256" -conf {
+} -start
+
+haproxy h3 -cli {
+ send "show info"
+ expect ~ ".*Maxconn: (7300|7000)\n.*"
+}
+
+# 1 SSL front but no back
+
+haproxy h4 -arg "-m 256" -conf {
+ defaults
+ mode http
+ timeout connect 1s
+ timeout client 1s
+ timeout server 1s
+
+ frontend fe1
+ bind "fd@${fe1}" ssl crt ${testdir}/common.pem
+
+} -start
+
+haproxy h4 -cli {
+ send "show info"
+ expect ~ ".*Maxconn: 1900\n.*"
+}
+
+# 1 SSL back but not front
+
+haproxy h5 -arg "-m 256" -conf {
+ defaults
+ mode http
+ timeout connect 1s
+ timeout client 1s
+ timeout server 1s
+
+ listen li2
+ bind "fd@${li2}"
+ server ssl "${s1_addr}:${s1_port}" ssl verify none
+
+} -start
+
+haproxy h5 -cli {
+ send "show info"
+ expect ~ ".*Maxconn: 1900\n.*"
+}
+
+
+# 1 SSL front and 1 back
+
+haproxy h6 -arg "-m 256" -conf {
+ defaults
+ mode http
+ timeout connect 1s
+ timeout client 1s
+ timeout server 1s
+
+ listen li3
+ bind "fd@${li3}" ssl crt ${testdir}/common.pem
+ server ssl "${s1_addr}:${s1_port}" ssl verify none
+
+} -start
+
+haproxy h6 -cli {
+ send "show info"
+ expect ~ ".*Maxconn: 1700\n.*"
+}
+
diff --git a/reg-tests/startup/check_condition.vtc b/reg-tests/startup/check_condition.vtc
new file mode 100644
index 0000000..3ab6ae4
--- /dev/null
+++ b/reg-tests/startup/check_condition.vtc
@@ -0,0 +1,32 @@
+varnishtest "Tests the -cc argument"
+
+feature cmd "$HAPROXY_PROGRAM -cc 'version_atleast(2.5-dev0)'"
+
+shell {
+ set -e
+ $HAPROXY_PROGRAM -cc "version_atleast(2.4)"
+ ! $HAPROXY_PROGRAM -cc "version_atleast(1024)"
+
+ $HAPROXY_PROGRAM -cc "streq(foo,'foo')"
+ $HAPROXY_PROGRAM -cc "streq(\"foo bar\",'foo bar')"
+ ! $HAPROXY_PROGRAM -cc "streq(foo,bar)"
+
+ if $HAPROXY_PROGRAM -cc "version_atleast(2.5-dev2)"; then
+ export TESTVAR=1
+ $HAPROXY_PROGRAM -cc 'defined(TESTVAR) && streq("$TESTVAR","1")'
+ $HAPROXY_PROGRAM -cc 'feature(OPENSSL) || !feature(OPENSSL)'
+ $HAPROXY_PROGRAM -cc '1&&!0&&!((streq(a,b)||!streq(a,a)&&1)||strneq(a,a))'
+ $HAPROXY_PROGRAM -cc '1 &&! 0&& !((streq(a,b)||!streq(a,a)&&1)||strneq(a,a))'
+ $HAPROXY_PROGRAM -cc '1 && !0 && !((streq(a,b) || !streq(a,a) && 1) || strneq(a,a))'
+ ! $HAPROXY_PROGRAM -cc '1 && !0 && !((streq(a,b) || !streq(a,a) && 1) || strneq(a,b))'
+ ! $HAPROXY_PROGRAM -cc '1 && !0 && !((streq(a,a) || !streq(a,a) && 1) || strneq(a,a))'
+ # empty string is always false
+ ! $HAPROXY_PROGRAM -cc ''
+ # non-zero is true
+ $HAPROXY_PROGRAM -cc '-1000 && 200'
+ # check for various parsing errors (extra/missing chars)
+ ! $HAPROXY_PROGRAM -cc '200rrr'
+ ! $HAPROXY_PROGRAM -cc '!(0))'
+ ! $HAPROXY_PROGRAM -cc 'streq(a,"a)'
+ fi
+} -run
diff --git a/reg-tests/startup/common.pem b/reg-tests/startup/common.pem
new file mode 100644
index 0000000..206e417
--- /dev/null
+++ b/reg-tests/startup/common.pem
@@ -0,0 +1,117 @@
+-----BEGIN RSA PRIVATE KEY-----
+MIIEpAIBAAKCAQEAnb0BDF7FsqzslakNg7u/n/JQkq6nheuKwvyTqECfpc9y7uSB
+e/vrEFqBaDSLQagJxuZdL5geFeVtRbdAoB97N1/LZa6vecjjgGSP0Aag/gS/ocnM
+RIyvlVWWT9MrD46OG3qZY1ORU1ltrVL0NKttJP8xME7j3bTwIDElx/hNI0n7L+yS
+kAe2xb/7CbZRfoOhjTVAcGv4aSLVc/Hi8k6VkIzdOEtH6TcghXmuGcuqvLNH9Buo
+syngKTcQ8zg6J+e64aVvC+e7vi94uil9Qu+JHm0pkDzAZ2WluNsuXlrJToPirWyj
+6/YdN6xgSI1hbZkBmUPAebgYuxBt6huvfyQd3wIDAQABAoIBABojc8UE/2W4WgwC
+04Z82ig7Ezb7Ui9S9M+S4zUCYHItijIkE4DkIfO3y7Hk4x6iJdyb191HK9UdC5p9
+32upS9XFPgM/izx3GZvxDhO+xXbSep7ovbyuQ3pPkHTx3TTavpm3GyvmcTKKoy4R
+jP4dWhzDXPdQW1ol3ZS4EDau4rlyClY6oi1mq9aBEX3MqVjB/nO7s2AbdgclAgP2
+OZMhTzWYR1k5tYySHCXh3ggGMCikyvHU0+SsGyrstYzP1VYi/n3f0VgqW/5ZjG8x
+6SHpe04unErPF3HuSun2ZMCFdBxaTFZ8FENb8evrSXe3nQOc9W21RQdRRrNNUbjl
+JYI4veECgYEA0ATYKMS1VCUYRZoQ49b5GTg7avUYqfW4bEo4fSfBue8NrnKR3Wu8
+PPBiCTuIYq1vSF+60B7Vu+hW0A8OuQ2UuMxLpYcQ7lKfNad/+yAfoWWafIqCqNU9
+at0QMdbW6A69d6jZt7OrXtleBsphCnN58jTz4ch4PIa2Oyq46NUXCvUCgYEAwh8t
+G6BOHOs3yRNI2s9Y9EEfwoil2uIKrZhqiL3AwdIpu5uNIMuPnbaEpXvRX6jv/qtL
+321i8vZLc31aM7zfxQ6B4ReQFJfYC80FJsWvcLwT9hB9mTJpLS4sIu5tzQc87O6w
+RtjFMom+5ns5hfPB4Eccy0EtbQWVY4nCzUeO6QMCgYBSvqqRRPXwG7VU8lznlHqP
+upuABzChYrnScY+Y0TixUlL54l79Wb6N6vzEOWceAWkzu8iewrU4QspNhr/PgoR3
+IeSxWlG0yy7Dc/ZnmTabx8O06I/iwrfkizzG5nOj6UEamRLJjPGNEB/jyZriQl7u
+pnugg1K4mMliLbNSAnlhBQKBgQCmYepbv260Qrex1KGhSg9Ia3k5V74weYYFfJnz
+UhChD+1NK+ourcsOtp3C6PlwMHBjq5aAjlU9QfUxq8NgjQaO8/xGXdfUjsFSfAtq
+TA4vZkUFpuTAJgEYBHc4CXx7OzTxLzRPxQRgaMgC7KNFOMR34vu/CsJQq3R7uFwL
+bsYC2QKBgQCtEmg1uDZVdByX9zyUMuRxz5Tq/vDcp+A5lJj2mha1+bUMaKX2+lxQ
+vPxY55Vaw/ukWkJirRrpGv6IytBn0dLAFSlKZworZGBaxsm8OGTFJ5Oe9+kZTjI9
+hvjpClOA1otbmj2F2uZAbuIjxQGDNUkLoifN5yDYCC8JPujHuHmULw==
+-----END RSA PRIVATE KEY-----
+-----BEGIN CERTIFICATE-----
+MIIGeTCCBGGgAwIBAgIBAjANBgkqhkiG9w0BAQsFADB+MQswCQYDVQQGEwJGUjEW
+MBQGA1UECBMNSWxlLWRlLUZyYW5jZTEOMAwGA1UEBxMFUGFyaXMxEDAOBgNVBAoT
+B296b24uaW8xFTATBgNVBAMTDE96b24gVGVzdCBDQTEeMBwGCSqGSIb3DQEJARYP
+c3VwcG9ydEBvem9uLmlvMB4XDTE2MDExNzIzMDIzOFoXDTE4MDExNjIzMDIzOFow
+gb4xCzAJBgNVBAYTAkZSMRYwFAYDVQQIEw1JbGUtZGUtRnJhbmNlMRowGAYDVQQH
+ExFOZXVpbGx5LXN1ci1TZWluZTEYMBYGA1UEChMPVE9BRCBDb25zdWx0aW5nMRcw
+FQYDVQQLEw5lUGFyYXBoZXIgVGVhbTEWMBQGA1UEAxMNd3d3LnRlc3QxLmNvbTEw
+MC4GCSqGSIb3DQEJARYhYXJuYXVsdC5taWNoZWxAdG9hZC1jb25zdWx0aW5nLmZy
+MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAnb0BDF7FsqzslakNg7u/
+n/JQkq6nheuKwvyTqECfpc9y7uSBe/vrEFqBaDSLQagJxuZdL5geFeVtRbdAoB97
+N1/LZa6vecjjgGSP0Aag/gS/ocnMRIyvlVWWT9MrD46OG3qZY1ORU1ltrVL0NKtt
+JP8xME7j3bTwIDElx/hNI0n7L+ySkAe2xb/7CbZRfoOhjTVAcGv4aSLVc/Hi8k6V
+kIzdOEtH6TcghXmuGcuqvLNH9BuosyngKTcQ8zg6J+e64aVvC+e7vi94uil9Qu+J
+Hm0pkDzAZ2WluNsuXlrJToPirWyj6/YdN6xgSI1hbZkBmUPAebgYuxBt6huvfyQd
+3wIDAQABo4IBvzCCAbswCwYDVR0PBAQDAgOoMBMGA1UdJQQMMAoGCCsGAQUFBwMB
+MB0GA1UdDgQWBBTIihFNVNgOseQnsWEcAQxAbIKE4TCBsgYDVR0jBIGqMIGngBRv
+G9At9gzk2MW5Z7JVey1LtPIZ8KGBg6SBgDB+MQswCQYDVQQGEwJGUjEWMBQGA1UE
+CBMNSWxlLWRlLUZyYW5jZTEOMAwGA1UEBxMFUGFyaXMxEDAOBgNVBAoTB296b24u
+aW8xFTATBgNVBAMTDE96b24gVGVzdCBDQTEeMBwGCSqGSIb3DQEJARYPc3VwcG9y
+dEBvem9uLmlvggkA15FtIaGcrk8wDAYDVR0TAQH/BAIwADAaBgNVHREEEzARgg9j
+b21tb25OYW1lOmNvcHkwCQYDVR0SBAIwADBIBgNVHR8EQTA/MD2gO6A5hjdodHRw
+Oi8vb3BlbnNzbGNhLnRvYWQtY29uc3VsdGluZy5jb20vb3BlbnZwbi9MYXRlc3Qu
+Y3JsMBEGCWCGSAGG+EIBAQQEAwIGQDAxBglghkgBhvhCAQ0EJBYiVE9BRC1Db25z
+dWx0aW5nIHNlcnZlciBjZXJ0aWZpY2F0ZTANBgkqhkiG9w0BAQsFAAOCAgEAewDa
+9BukGNJMex8gsXmmdaczTr8yh9Uvw4NJcZS38I+26o//2g+d6i7wxcQg8hIm62Hj
+0TblGU3+RsJo4uzcWxxA5YUYlVszbHNBRpQengEE5pjwHvoXVMNES6Bt8xP04+Vj
+0qVnA8gUaDMk9lN5anK7tF/mbHOIJwHJZYCa2t3y95dIOVEXFwOIzzbSbaprjkLN
+w0BgR5paJz7NZWNqo4sZHUUz94uH2bPEd01SqHO0dJwEVxadgxuPnD05I9gqGpGX
+Zf3Rn7EQylvUtX9mpPaulQPXc3emefewLUSSAdnZrVikZK2J/B4lSi9FpUwl4iQH
+pZoE0QLQHtB1SBKacnOAddGSTLSdFvpzjErjjWSpMukF0vutmrP86GG3xtshWVhI
+u+yLfDJVm/pXfaeDtWMXpxIT/U1i0avpk5MZtFMRC0MTaxEWBTnnJm+/yiaAXQYg
+E1ZIP0mkZkiUojIawTR7JTjHGhIraP9UVPNceVy0DLfETHEou3vhwBn7PFOz7piJ
+wjp3A47DStJD4fapaX6B1fqM+n34CMD9ZAiJFgQEIQfObAWC9hyr4m+pqkp1Qfuw
+vsAP/ZoS1CBirJfm3i+Gshh+VeH+TAmO/NBBYCfzBdgkNz4tJCkOc7CUT/NQTR/L
+N2OskR/Fkge149RJi7hHvE3gk/mtGtNmHJPuQ+s=
+-----END CERTIFICATE-----
+-----BEGIN CERTIFICATE-----
+MIIJazCCBVOgAwIBAgIUWHoc5e2FUECgyCvyVf8wCtt8gTYwDQYJKoZIhvcNAQEL
+BQAwRTELMAkGA1UEBhMCRlIxEzARBgNVBAgMClNvbWUtU3RhdGUxITAfBgNVBAoM
+GEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZDAeFw0yMDA4MDQxODU4MTZaFw0yMDA5
+MDMxODU4MTZaMEUxCzAJBgNVBAYTAkZSMRMwEQYDVQQIDApTb21lLVN0YXRlMSEw
+HwYDVQQKDBhJbnRlcm5ldCBXaWRnaXRzIFB0eSBMdGQwggQiMA0GCSqGSIb3DQEB
+AQUAA4IEDwAwggQKAoIEAQDARiuHkhrnf38Md1nxGDSneJfwv/QksdNNMNTJBdjg
+OVmaRCIAyz43oefTWDQ/TebbSwB+Lg9pud1zadGWhlZRhCgBPP8JDMhIKH4eXIRk
+5IIa8WD08EwvSlqJL0r4gsMtVsxy7BZHAkka/2Ket9pyGt4kG5n75RFdc6BI80/8
+RwJt/MDxPrcVBAT7LnCluxQpyya9mZCabj7l+9a2yU2hgWS6QqfZJ133krkP/MMh
+AEQkSoA4mmBwWk9yPqXmUqiOi7v6iLkIUEh5SgYVPRk9BtU/kDaUdSwuqRrpCZo4
+SsWZWFLxBmLHkSh+G+BWjCVYMQr2ye7e+VMT/20+5xAfq4fj9n5BsPcx3QcVuTof
+RAc/Oygnt4MYnIcUb7zRFvCAvgpUHL7BnEn6nhyXjHJGqGDchsg8m9t3v/Y3ohq+
+qmrSzdeuylE1n3W5aWJlbFmyXegNP45MJ0xicesVrXEWF7YD/ir9mGJ8bQYr4blf
+77PrbF02komC6AzVPKOJa0jR+eW1wErzYlkYgez6ylBWCiHJd1dhEHlK3h2rXdYa
+Gnb45ILCLpEDjNEUrHifLLNXwqJpgZQsJU6BgMgk7ZgBfAKrCfTeg0rkCqCAPeVb
+8eSLf7FBF7YBRJ5P6u8qXc4RtgEu607GaWV0gIMfyVBY52oV+OaNsEdFetrJnp3c
+friG8vJ+7jdq6zjUCGgnfUIHoViJPh3JuFfhA3jT0gQDKW5PeI7dxhrNvlqdYfHI
+fxX7Y1/J6cTQkqJ1cai2f0bwJIJiTAThNbG+zrtjJ7fZ3wJ4udyU/IKrwShqtmTb
+1Ofj0tJDdwOH8i84vIySLUvR9aAb7ClFlnsx6rzwOxG90W7C0LA2M0EHm4FezJm/
+FfujnZwEWr1T9Wki6qE0MHCbdN/TTDws//EKkkE44FC+amL96w0IQl70vpE37j2A
+zlDWvFFID95SIxfmpkwWDvXDKv6gr1GMLeysCl2fgpY05Xidw5cEo9/tEkuWn/dG
+x/D9hnLBGeroA0251ES12jemqDjI2U0tfaeHakjwSsoWElf94Qmuh2iPZ+1zIxQs
+7o6nAWN8X9hfsmrDTTHlww0TEfrjlbzG5Yh+0ZRxmejgiUyOCXck+eh/ZXMXvfWh
+y3CorIIuWgkRjm80PYkdaRDJdZuyP6R7tXfTXNVzAiSQf0Qx9ru2KB2Fs/XZPamH
+KjItAU5Q6msIVvaRMS0muQgV+b6hqSEBzqXqJfAlpVLHXr5FqK+U7EB9y02B6piB
+tAmxqXP8OOCoQql6/vgIcrDFUOo6KtGBW36ef74XE3KCUVaIzVJZSIt6i/Vi0bZj
+bAjsJUQ3qDlHdorv9TRVOhnC1GUz7SuYnpEOyiXmyx3LAgMBAAGjUzBRMB0GA1Ud
+DgQWBBQ62csZcH/meQcENHhNbqz9LMzwjjAfBgNVHSMEGDAWgBQ62csZcH/meQcE
+NHhNbqz9LMzwjjAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBCwUAA4IEAQBA
+wLsGf3R1+/I2zQE+lsj7RasZtA/Cos92iEGDAPvFbx9e+roG8Gg8KBsEJu/HN0JH
+lMMiQ8dDRHSBMvRBENL5/57oOOhmqc+1u5sazLuANhzAYPZG17Klib7YpEwWoXar
+FDDiJYtCyLW0oNLpCswYopWK9GC0RJNucB0NFvOxehJ2sP2/fxGBQMB09L6mjKjd
+4KsOzyd3dNf0VYS6jB+/1pcKSHKQUo9HRHB5FK04PsYHoh4AtmEHvmYQKcWWidgU
+v26ftlH00ERzuW2juqBbz9mghlNRqXi0IyZ9b4tSj29dxW+WWFzo7j2zEPaD6z2W
+DEHq7zvON+g+q6qLgWeszqMgJzjvWjMj00E/t06PoHPiz/cAnDKEqp+ZzxCIFrxj
+/qneChpogDWyLbawhyyzbZvbirx5znOSbWjPZgydqaNEFViqbxwinBx4Xxabo6XN
+TU020FuMWmgfbIcvtgjKgyKqc97l7JMNNm7LQV9+9W0U5zdIqQKLZ9MMrd2w3xh4
+MAB8NKnwzHReK0TWwUU9HSgFAGdEX6HnyZ3bQ13ijg+sNBRMEi0gBHaqZKDdyoft
+B2u2uasSwioV48dbSIcHl+rTBKxiMh5XQ7ENnaGOJkjsIqTVzizqnPHU8eMBnSbb
+dsXlamROYII44+j3Ku6OGt51w86eGk4VxI3tmaECcJKqTkwUFD8AcNDrkjtmLuxK
+12yjnoM+u1cclfqQ5NOtRc6MJZ27jCobfBBhVdKVDp4X1WNyqGlbsU5adDAzknuI
+GT7MJO7lGjkZX2n54BNPSfrSknYMOVYcZqL0Dbcrhx5IyEmg+iOlOu1HO1tdnZop
+ej4vT+1V2w9Sa4Wo3UCo84jcm5v/4z7jCYh4BRQ60CFb7GLxZoqXIslcGSPool3n
+jl8JWoaLXrJUPfZGXo1iAlayJ5EiMyZl4eB/TBUf6TMm8vLvsPiUT+CEsjLppOdS
+eYppZAZ6H1JrJGs5kKBdOJHGn6Pkp5QsHIswOBd1HqHrBbYbZmDaDLRHduILWLrM
+e0/IfDdeXB/bKfmZoEpT8xRiauw15p0AHLumiK7KISAehfgBqUnxx+YmgGoZ7EWX
+KnMYAfCuC6oJ1DL0gp4Z9yMK1eu+GV1sLxPq9ZruEHW1R+H+4sGyiA5Gso2tgB6/
+XW//wxKclNp5LZR7hqfs/kGuh5asrJrnEbMwWn2+tr/LqfYtYh1D6nHfIXpT0o1d
+rNy/HrsKnRDMWxjm03r4hCViuNVD3Zb9anAF/NSPDVu8ATM5JbJNrCYX4eipz6ZE
+aQBkwIBkTPgtgP4r8v2G+uMYDw8nq7xh72FK107aeTTwc6MgU5jfeFNMr2XJisJd
+lSem1ngKYQSEzjVsTE4c
+-----END CERTIFICATE-----
diff --git a/reg-tests/startup/default_rules.vtc b/reg-tests/startup/default_rules.vtc
new file mode 100644
index 0000000..1cbbfa9
--- /dev/null
+++ b/reg-tests/startup/default_rules.vtc
@@ -0,0 +1,185 @@
+varnishtest "Misuses of defaults section defining TCP/HTTP rules"
+
+feature cmd "$HAPROXY_PROGRAM -cc 'version_atleast(2.5-dev0)'"
+feature ignore_unknown_macro
+
+#
+# anonymous defaults section cannot define TCP/HTTP rules
+#
+haproxy h1 -conf-BAD {} {
+ defaults
+ http-request set-header X-Hdr 1
+}
+
+haproxy h2 -conf-BAD {} {
+ defaults
+ http-response set-header X-Hdr 1
+}
+
+haproxy h3 -conf-BAD {} {
+ defaults
+ http-after-request set-header X-Hdr 1
+}
+
+haproxy h4 -conf-BAD {} {
+ defaults
+ tcp-request connection accept
+}
+
+haproxy h5 -conf-BAD {} {
+ defaults
+ tcp-request session accept
+}
+
+haproxy h6 -conf-BAD {} {
+ defaults
+ tcp-request inspect-delay 5s
+ tcp-request content accept
+}
+
+haproxy h7 -conf-BAD {} {
+ defaults
+ tcp-response inspect-delay 5s
+ tcp-response content accept
+}
+
+#
+# defaults section defining TCP/HTTP rules cannot be used to init another
+# defaults section
+#
+haproxy h8 -conf-BAD {} {
+ defaults invalid
+ tcp-response inspect-delay 5s
+ tcp-response content accept
+
+ defaults from invalid
+ mode tcp
+}
+
+#
+# defaults section defining TCP/HTTP rules cannot be used to init a listen
+# section
+#
+haproxy h9 -conf-BAD {} {
+ defaults invalid
+ tcp-request inspect-delay 5s
+ tcp-request content accept
+
+ listen li from invalid
+ mode tcp
+ bind "fd@${lih9}"
+ server www 127.0.0.1:80
+}
+
+#
+# defaults section defining TCP/HTTP rules cannot be used to init frontend and
+# backend sections at the same time
+#
+#
+haproxy h10 -conf-BAD {} {
+ defaults invalid
+ tcp-request inspect-delay 5s
+ tcp-request content accept
+
+ frontend fe from invalid
+ mode tcp
+ bind "fd@${feh10}"
+ default_backend be1
+
+ backend be from invalid
+ mode tcp
+ server www 127.0.0.1:80
+}
+
+#
+# defaults section defining 'tcp-request connection' or 'tcp-request session'
+# rules cannot be used to init backend sections
+#
+haproxy h11 -conf-BAD {} {
+ defaults invalid
+ tcp-request connection accept
+
+ backend be from invalid
+ mode tcp
+ server www 127.0.0.1:80
+}
+
+haproxy h12 -conf-BAD {} {
+ defaults invalid
+ tcp-request session accept
+
+ backend be from invalid
+ mode tcp
+ server www 127.0.0.1:80
+}
+
+#
+# defaults section defining 'tcp-response content' rules cannot be used to init
+# a frontend section
+#
+haproxy h13 -conf-BAD {} {
+ defaults invalid
+ tcp-response inspect-delay 5s
+ tcp-response content accept
+
+ frontend fe from invalid
+ mode tcp
+ bind "fd@${feh10}"
+}
+
+haproxy h14 -arg -V -conf-OK {
+ defaults tcp
+ tcp-response inspect-delay 5s
+ tcp-response content accept
+
+ backend be from tcp
+ mode tcp
+ server www 127.0.0.1:80
+}
+
+#
+# Check arguments resolutions in rules. FE/BE arguments must be resolved, but
+# SRV/TAB arguments without an explicit proxy name are not allowed.
+#
+
+haproxy h15 -conf-BAD {} {
+ defaults invalid
+ mode http
+ http-request set-header x-test "%[srv_conn(www)]"
+
+ backend be from invalid
+ server www 127.0.0.1:80
+}
+
+haproxy h16 -conf-BAD {} {
+ defaults invalid
+ mode http
+ http-request track-sc0 src
+ http-request deny deny_status 429 if { sc_http_req_rate(0) gt 20 }
+
+ backend be
+ stick-table type ip size 100k expire 30s store http_req_rate(10s)
+ server www 127.0.0.1:80
+}
+
+haproxy h17 -arg -V -conf-OK {
+ defaults common
+ mode http
+
+ defaults def_front from common
+ http-request set-header x-test1 "%[fe_conn]"
+
+ defaults def_back from common
+ http-request track-sc0 src table be
+ http-request deny deny_status 429 if { sc_http_req_rate(0,be) gt 20 }
+ http-request set-header x-test2 "%[be_conn]"
+ http-request set-header x-test3 "%[srv_conn(be/www)]"
+
+ frontend fe from def_front
+ bind "fd@${feh15}"
+ default_backend be
+
+ backend be from def_back
+ stick-table type ip size 100k expire 30s store http_req_rate(10s)
+ server www 127.0.0.1:80
+}
diff --git a/reg-tests/stick-table/converteers_ref_cnt_never_dec.vtc b/reg-tests/stick-table/converteers_ref_cnt_never_dec.vtc
new file mode 100644
index 0000000..533765f
--- /dev/null
+++ b/reg-tests/stick-table/converteers_ref_cnt_never_dec.vtc
@@ -0,0 +1,75 @@
+# commit 3e60b11
+# BUG/MEDIUM: stick-tables: Decrement ref_cnt in table_* converters
+#
+# When using table_* converters ref_cnt was incremented
+# and never decremented causing entries to not expire.
+#
+# The root cause appears to be that stktable_lookup_key()
+# was called within all sample_conv_table_* functions which was
+# incrementing ref_cnt and not decrementing after completion.
+#
+# Added stktable_release() to the end of each sample_conv_table_*
+# function and reworked the end logic to ensure that ref_cnt is
+# always decremented after use.
+#
+# This should be backported to 1.8
+
+#REGTEST_TYPE=bug
+#REQUIRE_VERSION=2.4
+
+varnishtest "stick-tables: Test expirations when used with table_*"
+
+# As some macros for haproxy are used in this file, this line is mandatory.
+feature ignore_unknown_macro
+
+# Do nothing.
+server s1 {
+} -start
+
+haproxy h1 -conf {
+ # Configuration file of 'h1' haproxy instance.
+ defaults
+ mode http
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ frontend http1
+ bind "fd@${my_frontend_fd}"
+ stick-table size 1k expire 1ms type ip store conn_rate(10s),http_req_cnt,http_err_cnt,http_fail_cnt,http_req_rate(10s),http_err_rate(10s),http_fail_rate(10s),gpc0,gpc0_rate(10s),gpt0
+ http-request track-sc0 req.hdr(X-Forwarded-For)
+ http-request redirect location https://${s1_addr}:${s1_port}/ if { req.hdr(X-Forwarded-For),table_http_req_cnt(http1) -m int lt 0 }
+ http-request redirect location https://${s1_addr}:${s1_port}/ if { req.hdr(X-Forwarded-For),table_trackers(http1) -m int lt 0 }
+ http-request redirect location https://${s1_addr}:${s1_port}/ if { req.hdr(X-Forwarded-For),in_table(http1) -m int lt 0 }
+ http-request redirect location https://${s1_addr}:${s1_port}/ if { req.hdr(X-Forwarded-For),table_bytes_in_rate(http1) -m int lt 0 }
+ http-request redirect location https://${s1_addr}:${s1_port}/ if { req.hdr(X-Forwarded-For),table_bytes_out_rate(http1) -m int lt 0 }
+ http-request redirect location https://${s1_addr}:${s1_port}/ if { req.hdr(X-Forwarded-For),table_conn_cnt(http1) -m int lt 0 }
+ http-request redirect location https://${s1_addr}:${s1_port}/ if { req.hdr(X-Forwarded-For),table_conn_cur(http1) -m int lt 0 }
+ http-request redirect location https://${s1_addr}:${s1_port}/ if { req.hdr(X-Forwarded-For),table_conn_rate(http1) -m int lt 0 }
+ http-request redirect location https://${s1_addr}:${s1_port}/ if { req.hdr(X-Forwarded-For),table_gpt0(http1) -m int lt 0 }
+ http-request redirect location https://${s1_addr}:${s1_port}/ if { req.hdr(X-Forwarded-For),table_gpc0(http1) -m int lt 0 }
+ http-request redirect location https://${s1_addr}:${s1_port}/ if { req.hdr(X-Forwarded-For),table_gpc0_rate(http1) -m int lt 0 }
+ http-request redirect location https://${s1_addr}:${s1_port}/ if { req.hdr(X-Forwarded-For),table_http_err_cnt(http1) -m int lt 0 }
+ http-request redirect location https://${s1_addr}:${s1_port}/ if { req.hdr(X-Forwarded-For),table_http_err_rate(http1) -m int lt 0 }
+ http-request redirect location https://${s1_addr}:${s1_port}/ if { req.hdr(X-Forwarded-For),table_http_fail_cnt(http1) -m int lt 0 }
+ http-request redirect location https://${s1_addr}:${s1_port}/ if { req.hdr(X-Forwarded-For),table_http_fail_rate(http1) -m int lt 0 }
+ http-request redirect location https://${s1_addr}:${s1_port}/ if { req.hdr(X-Forwarded-For),table_http_req_cnt(http1) -m int lt 0 }
+ http-request redirect location https://${s1_addr}:${s1_port}/ if { req.hdr(X-Forwarded-For),table_http_req_rate(http1) -m int lt 0 }
+ http-request redirect location https://${s1_addr}:${s1_port}/ if { req.hdr(X-Forwarded-For),table_kbytes_in(http1) -m int lt 0 }
+ http-request redirect location https://${s1_addr}:${s1_port}/ if { req.hdr(X-Forwarded-For),table_kbytes_out(http1) -m int lt 0 }
+ http-request redirect location https://${s1_addr}:${s1_port}/ if { req.hdr(X-Forwarded-For),table_server_id(http1) -m int lt 0 }
+ http-request redirect location https://${s1_addr}:${s1_port}/ if { req.hdr(X-Forwarded-For),table_sess_cnt(http1) -m int lt 0 }
+ http-request redirect location https://${s1_addr}:${s1_port}/ if { req.hdr(X-Forwarded-For),table_sess_rate(http1) -m int lt 0 }
+ http-request redirect location https://${s1_addr}:${s1_port}/ if { req.hdr(X-Forwarded-For),table_trackers(http1) -m int lt 0 }
+} -start
+
+client c1 -connect ${h1_my_frontend_fd_sock} {
+ txreq -url "/" -hdr "X-Forwarded-For: 127.0.0.1"
+ rxresp
+ expect resp.status == 503
+} -run
+
+haproxy h1 -cli {
+ send "show table http1"
+ expect ~ "table: http1, type: ip, size:1024, used:(0|1\\n0x[0-9a-f]*: key=127\\.0\\.0\\.1 use=0 exp=[0-9]* shard=0 gpt0=0 gpc0=0 gpc0_rate\\(10000\\)=0 conn_rate\\(10000\\)=1 http_req_cnt=1 http_req_rate\\(10000\\)=1 http_err_cnt=0 http_err_rate\\(10000\\)=0 http_fail_cnt=0 http_fail_rate\\(10000\\)=0)\\n$"
+} -wait
diff --git a/reg-tests/stick-table/src_conn_rate.vtc b/reg-tests/stick-table/src_conn_rate.vtc
new file mode 100644
index 0000000..bdf8869
--- /dev/null
+++ b/reg-tests/stick-table/src_conn_rate.vtc
@@ -0,0 +1,43 @@
+varnishtest "stick table: src_conn_rate"
+feature ignore_unknown_macro
+
+haproxy h0 -conf {
+ defaults
+ mode http
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ listen li
+ bind "fd@${fe1}"
+ http-request track-sc0 src table conn_rate_table
+ http-request deny if { src_conn_rate(conn_rate_table) gt 3 }
+ http-request return status 200
+
+ backend conn_rate_table
+ stick-table type ip size 1m expire 1m store conn_rate(1m)
+} -start
+
+client c0 -connect ${h0_fe1_addr}:${h0_fe1_port} {
+ txreq
+ rxresp
+ expect resp.status == 200
+} -run
+
+client c1 -connect ${h0_fe1_addr}:${h0_fe1_port} {
+ txreq
+ rxresp
+ expect resp.status == 200
+} -run
+
+client c2 -connect ${h0_fe1_addr}:${h0_fe1_port} {
+ txreq
+ rxresp
+ expect resp.status == 200
+} -run
+
+client c3 -connect ${h0_fe1_addr}:${h0_fe1_port} {
+ txreq
+ rxresp
+ expect resp.status == 403
+} -run
diff --git a/reg-tests/stick-table/unknown_key.vtc b/reg-tests/stick-table/unknown_key.vtc
new file mode 100644
index 0000000..f0307cb
--- /dev/null
+++ b/reg-tests/stick-table/unknown_key.vtc
@@ -0,0 +1,32 @@
+# Shipped with the commit fixing the bug.
+
+#REGTEST_TYPE=bug
+
+varnishtest "Stick Table: Crash when accessing unknown key."
+feature ignore_unknown_macro
+
+server s0 {
+ rxreq
+ txresp
+} -start
+
+haproxy h0 -conf {
+ defaults
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ frontend test
+ mode http
+ bind "fd@${fe1}"
+ stick-table type ip size 1m expire 1h store gpc0
+ http-request deny if { src,table_trackers(test) eq 1 }
+ http-request deny if { src,in_table(test) }
+ http-request deny deny_status 200
+} -start
+
+client c0 -connect ${h0_fe1_sock} {
+ txreq -url "/"
+ rxresp
+ expect resp.status == 200
+} -run
diff --git a/reg-tests/stickiness/lb-services.vtc b/reg-tests/stickiness/lb-services.vtc
new file mode 100644
index 0000000..23c9a9a
--- /dev/null
+++ b/reg-tests/stickiness/lb-services.vtc
@@ -0,0 +1,292 @@
+vtest "A reg test for stickiness"
+feature ignore_unknown_macro
+#REGTEST_TYPE=slow
+
+# The aim of this test is to check that "stick on" rules
+# do the job they are supposed to do.
+# If we remove one of the "stick on" rule, this script fails.
+
+server s_not_used_1 {}
+server s_not_used_2 {}
+server s_not_used_3 {}
+server s_not_used_4 {}
+server s_not_used_5 {}
+server s_not_used_6 {}
+server s_not_used_7 {}
+server s_not_used_8 {}
+server s_not_used_9 {}
+server s_not_used_10 {}
+server s_not_used_11 {}
+server s_not_used_12 {}
+
+# h1/be1 servers
+server s1 {
+ rxreq
+ txresp -hdr "Server: be1/s1"
+} -repeat 2 -start
+
+server s2 {
+ rxreq
+ txresp -hdr "Server: be1/s2"
+} -repeat 2 -start
+
+# h1/be2 servers
+server s3 {
+ rxreq
+ txresp -hdr "Server: be2/s3"
+} -repeat 2 -start
+
+server s4 {
+ rxreq
+ txresp -hdr "Server: be2/s4"
+} -repeat 2 -start
+
+haproxy h1 -arg "-L A" -conf {
+ defaults
+ mode http
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ log stdout format raw local0 debug
+
+ peers mypeers
+ bind "fd@${A}"
+ server A
+ server B ${h2_B_addr}:${h2_B_port}
+ table mytable type string size 10m
+
+ backend be1
+ balance roundrobin
+ stick on urlp(client) table mypeers/mytable
+ server srv1 ${s1_addr}:${s1_port}
+ server srv2 ${s2_addr}:${s2_port}
+
+ backend be2
+ balance roundrobin
+ stick on urlp(client) table mypeers/mytable
+ server s_not_used_1 ${s_not_used_1_addr}:${s_not_used_1_port}
+ server s_not_used_2 ${s_not_used_2_addr}:${s_not_used_2_port}
+ server s_not_used_3 ${s_not_used_3_addr}:${s_not_used_3_port}
+ server srv2 ${s4_addr}:${s4_port}
+ server s_not_used_4 ${s_not_used_4_addr}:${s_not_used_4_port}
+ server s_not_used_5 ${s_not_used_5_addr}:${s_not_used_5_port}
+ server s_not_used_6 ${s_not_used_6_addr}:${s_not_used_6_port}
+ server srv1 ${s3_addr}:${s3_port}
+
+ frontend fe
+ acl acl_be1 path_beg /be1
+ acl acl_be2 path_beg /be2
+ use_backend be1 if acl_be1
+ use_backend be2 if acl_be2
+ bind "fd@${fe}"
+}
+
+# h2/be1 servers
+server s5 {
+ rxreq
+ txresp -hdr "Server: be1/s5"
+} -repeat 2 -start
+
+server s6 {
+ rxreq
+ txresp -hdr "Server: be1/s6"
+} -repeat 2 -start
+
+# h2/be2 servers
+server s7 {
+ rxreq
+ txresp -hdr "Server: be2/s7"
+} -repeat 2 -start
+
+server s8 {
+ rxreq
+ txresp -hdr "Server: be2/s8"
+} -repeat 2 -start
+
+
+haproxy h2 -arg "-L B" -conf {
+ defaults
+ mode http
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ peers mypeers
+ bind "fd@${B}"
+ server A ${h1_A_addr}:${h1_A_port}
+ server B
+ table mytable type string size 10m
+
+ backend be1
+ balance roundrobin
+ stick on urlp(client) table mypeers/mytable
+ server s_not_used_7 ${s_not_used_7_addr}:${s_not_used_7_port}
+ server s_not_used_8 ${s_not_used_8_addr}:${s_not_used_8_port}
+ server s_not_used_9 ${s_not_used_9_addr}:${s_not_used_9_port}
+ server srv1 ${s5_addr}:${s5_port}
+ server s_not_used_10 ${s_not_used_10_addr}:${s_not_used_10_port}
+ server s_not_used_11 ${s_not_used_11_addr}:${s_not_used_11_port}
+ server s_not_used_12 ${s_not_used_12_addr}:${s_not_used_12_port}
+ server srv2 ${s6_addr}:${s6_port}
+
+ backend be2
+ balance roundrobin
+ stick on urlp(client) table mypeers/mytable
+ server s_not_used_1 ${s_not_used_1_addr}:${s_not_used_1_port}
+ server s_not_used_2 ${s_not_used_2_addr}:${s_not_used_2_port}
+ server s_not_used_3 ${s_not_used_3_addr}:${s_not_used_3_port}
+ server s_not_used_4 ${s_not_used_4_addr}:${s_not_used_4_port}
+ server s_not_used_5 ${s_not_used_5_addr}:${s_not_used_5_port}
+ server s_not_used_6 ${s_not_used_6_addr}:${s_not_used_6_port}
+ server srv1 ${s7_addr}:${s7_port}
+ server srv2 ${s8_addr}:${s8_port}
+
+ frontend fe
+ acl acl_be1 path_beg /be1
+ acl acl_be2 path_beg /be2
+ use_backend be1 if acl_be1
+ use_backend be2 if acl_be2
+ bind "fd@${fe}"
+}
+
+haproxy h1 -start
+delay 0.2
+haproxy h2 -start
+delay 2
+
+client cx -connect ${h1_fe_sock} {
+ txreq -url "/be1?client=c1"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.Server ~ be1/s1
+} -repeat 2 -run
+
+haproxy h1 -cli {
+ send "show table mypeers/mytable"
+ expect ~ .*
+}
+
+haproxy h2 -cli {
+ send "show table mypeers/mytable"
+ expect ~ .*
+}
+
+client cy -connect ${h1_fe_sock} {
+ txreq -url "/be2?client=c1"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.Server ~ be2/s3
+} -repeat 2 -run
+
+haproxy h1 -cli {
+ send "show table mypeers/mytable"
+ expect ~ .*
+}
+
+haproxy h2 -cli {
+ send "show table mypeers/mytable"
+ expect ~ .*
+}
+
+client cx -connect ${h2_fe_sock} {
+ txreq -url "/be1?client=c1"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.Server ~ be1/s5
+} -repeat 2 -run
+
+haproxy h1 -cli {
+ send "show table mypeers/mytable"
+ expect ~ .*
+}
+
+haproxy h2 -cli {
+ send "show table mypeers/mytable"
+ expect ~ .*
+}
+
+client cy -connect ${h2_fe_sock} {
+ txreq -url "/be2?client=c1"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.Server ~ be2/s7
+} -repeat 2 -run
+
+haproxy h1 -cli {
+ send "show table mypeers/mytable"
+ expect ~ .*
+}
+
+haproxy h2 -cli {
+ send "show table mypeers/mytable"
+ expect ~ .*
+}
+
+client cX -connect ${h1_fe_sock} {
+ txreq -url "/be1?client=c2"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.Server ~ be1/s2
+} -repeat 2 -run
+
+haproxy h1 -cli {
+ send "show table mypeers/mytable"
+ expect ~ .*
+}
+
+haproxy h2 -cli {
+ send "show table mypeers/mytable"
+ expect ~ .*
+}
+
+client cY -connect ${h1_fe_sock} {
+ txreq -url "/be2?client=c2"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.Server ~ be2/s4
+} -repeat 2 -run
+
+haproxy h1 -cli {
+ send "show table mypeers/mytable"
+ expect ~ .*
+}
+
+haproxy h2 -cli {
+ send "show table mypeers/mytable"
+ expect ~ .*
+}
+
+client cX -connect ${h2_fe_sock} {
+ txreq -url "/be1?client=c2"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.Server ~ be1/s6
+} -repeat 2 -run
+
+haproxy h1 -cli {
+ send "show table mypeers/mytable"
+ expect ~ .*
+}
+
+haproxy h2 -cli {
+ send "show table mypeers/mytable"
+ expect ~ .*
+}
+
+client cY -connect ${h2_fe_sock} {
+ txreq -url "/be2?client=c2"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.Server ~ be2/s8
+} -repeat 2 -run
+
+haproxy h1 -cli {
+ send "show table mypeers/mytable"
+ expect ~ .*
+}
+
+haproxy h2 -cli {
+ send "show table mypeers/mytable"
+ expect ~ .*
+}
+
diff --git a/reg-tests/stickiness/srvkey-addr.vtc b/reg-tests/stickiness/srvkey-addr.vtc
new file mode 100644
index 0000000..0dc1148
--- /dev/null
+++ b/reg-tests/stickiness/srvkey-addr.vtc
@@ -0,0 +1,263 @@
+vtest "A reg test for stickiness with srvkey addr"
+feature ignore_unknown_macro
+#REGTEST_TYPE=slow
+
+# The aim of this test is to check that "stick on" rules
+# do the job they are supposed to do.
+# If we remove one of the "stick on" rule, this script fails.
+
+#REQUIRE_VERSION=2.4
+
+server s_not_used_1 {}
+server s_not_used_2 {}
+server s_not_used_3 {}
+server s_not_used_4 {}
+server s_not_used_5 {}
+server s_not_used_6 {}
+server s_not_used_7 {}
+server s_not_used_8 {}
+server s_not_used_9 {}
+server s_not_used_10 {}
+server s_not_used_11 {}
+server s_not_used_12 {}
+
+# h1/be1 servers
+server s1 {
+ rxreq
+ txresp -hdr "Server: s1"
+} -repeat 8 -start
+
+server s2 {
+ rxreq
+ txresp -hdr "Server: s2"
+} -repeat 8 -start
+
+haproxy h1 -arg "-L A" -conf {
+ defaults
+ mode http
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ log stdout format raw local0 debug
+
+ peers mypeers
+ bind "fd@${A}"
+ server A
+ server B ${h2_B_addr}:${h2_B_port}
+ table mytable type string size 10m srvkey addr
+
+ backend be1
+ balance roundrobin
+ stick on urlp(client) table mypeers/mytable
+ server srv1 ${s1_addr}:${s1_port}
+ server srv2 ${s2_addr}:${s2_port}
+
+ backend be2
+ balance roundrobin
+ stick on urlp(client) table mypeers/mytable
+ server s_not_used_1 ${s_not_used_1_addr}:${s_not_used_1_port}
+ server s_not_used_2 ${s_not_used_2_addr}:${s_not_used_2_port}
+ server s_not_used_3 ${s_not_used_3_addr}:${s_not_used_3_port}
+ server srv2_2 ${s2_addr}:${s2_port}
+ server s_not_used_4 ${s_not_used_4_addr}:${s_not_used_4_port}
+ server s_not_used_5 ${s_not_used_5_addr}:${s_not_used_5_port}
+ server s_not_used_6 ${s_not_used_6_addr}:${s_not_used_6_port}
+ server srv1_2 ${s1_addr}:${s1_port}
+ server s_no_addr_1 unresolvable1.addr.local init-addr none
+
+ frontend fe
+ acl acl_be1 path_beg /be1
+ acl acl_be2 path_beg /be2
+ use_backend be1 if acl_be1
+ use_backend be2 if acl_be2
+ bind "fd@${fe}"
+}
+
+haproxy h2 -arg "-L B" -conf {
+ defaults
+ mode http
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ peers mypeers
+ bind "fd@${B}"
+ server A ${h1_A_addr}:${h1_A_port}
+ server B
+ table mytable type string size 10m srvkey addr
+
+ backend be1
+ balance roundrobin
+ stick on urlp(client) table mypeers/mytable
+ server s_not_used_7 ${s_not_used_7_addr}:${s_not_used_7_port}
+ server s_not_used_8 ${s_not_used_8_addr}:${s_not_used_8_port}
+ server s_not_used_9 ${s_not_used_9_addr}:${s_not_used_9_port}
+ server srv1_h2_1 ${s1_addr}:${s1_port}
+ server s_not_used_10 ${s_not_used_10_addr}:${s_not_used_10_port}
+ server s_not_used_11 ${s_not_used_11_addr}:${s_not_used_11_port}
+ server s_not_used_12 ${s_not_used_12_addr}:${s_not_used_12_port}
+ server srv2_h2_1 ${s2_addr}:${s2_port}
+ server s_no_addr_1 unresolvable1.addr.local init-addr none
+
+ backend be2
+ balance roundrobin
+ stick on urlp(client) table mypeers/mytable
+ server s_not_used_1 ${s_not_used_1_addr}:${s_not_used_1_port}
+ server s_not_used_2 ${s_not_used_2_addr}:${s_not_used_2_port}
+ server s_not_used_3 ${s_not_used_3_addr}:${s_not_used_3_port}
+ server s_not_used_4 ${s_not_used_4_addr}:${s_not_used_4_port}
+ server s_not_used_5 ${s_not_used_5_addr}:${s_not_used_5_port}
+ server s_not_used_6 ${s_not_used_6_addr}:${s_not_used_6_port}
+ server srv1_h2_2 ${s1_addr}:${s1_port}
+ server srv2_h2_2 ${s2_addr}:${s2_port}
+ server s_no_addr_2 unresolvable2.addr.local init-addr none
+
+ frontend fe
+ acl acl_be1 path_beg /be1
+ acl acl_be2 path_beg /be2
+ use_backend be1 if acl_be1
+ use_backend be2 if acl_be2
+ bind "fd@${fe}"
+}
+
+haproxy h1 -start
+delay 0.2
+haproxy h2 -start
+delay 2
+
+client cx -connect ${h1_fe_sock} {
+ txreq -url "/be1?client=c1"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.Server ~ s1
+} -repeat 2 -run
+
+haproxy h1 -cli {
+ send "show table mypeers/mytable"
+ expect ~ .*
+}
+
+haproxy h2 -cli {
+ send "show table mypeers/mytable"
+ expect ~ .*
+}
+
+client cy -connect ${h1_fe_sock} {
+ txreq -url "/be2?client=c1"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.Server ~ s1
+} -repeat 2 -run
+
+haproxy h1 -cli {
+ send "show table mypeers/mytable"
+ expect ~ .*
+}
+
+haproxy h2 -cli {
+ send "show table mypeers/mytable"
+ expect ~ .*
+}
+
+client cx -connect ${h2_fe_sock} {
+ txreq -url "/be1?client=c1"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.Server ~ s1
+} -repeat 2 -run
+
+haproxy h1 -cli {
+ send "show table mypeers/mytable"
+ expect ~ .*
+}
+
+haproxy h2 -cli {
+ send "show table mypeers/mytable"
+ expect ~ .*
+}
+
+client cy -connect ${h2_fe_sock} {
+ txreq -url "/be2?client=c1"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.Server ~ s1
+} -repeat 2 -run
+
+haproxy h1 -cli {
+ send "show table mypeers/mytable"
+ expect ~ .*
+}
+
+haproxy h2 -cli {
+ send "show table mypeers/mytable"
+ expect ~ .*
+}
+
+client cX -connect ${h1_fe_sock} {
+ txreq -url "/be1?client=c2"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.Server ~ s2
+} -repeat 2 -run
+
+haproxy h1 -cli {
+ send "show table mypeers/mytable"
+ expect ~ .*
+}
+
+haproxy h2 -cli {
+ send "show table mypeers/mytable"
+ expect ~ .*
+}
+
+client cY -connect ${h1_fe_sock} {
+ txreq -url "/be2?client=c2"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.Server ~ s2
+} -repeat 2 -run
+
+haproxy h1 -cli {
+ send "show table mypeers/mytable"
+ expect ~ .*
+}
+
+haproxy h2 -cli {
+ send "show table mypeers/mytable"
+ expect ~ .*
+}
+
+client cX -connect ${h2_fe_sock} {
+ txreq -url "/be1?client=c2"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.Server ~ s2
+} -repeat 2 -run
+
+haproxy h1 -cli {
+ send "show table mypeers/mytable"
+ expect ~ .*
+}
+
+haproxy h2 -cli {
+ send "show table mypeers/mytable"
+ expect ~ .*
+}
+
+client cY -connect ${h2_fe_sock} {
+ txreq -url "/be2?client=c2"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.Server ~ s2
+} -repeat 2 -run
+
+haproxy h1 -cli {
+ send "show table mypeers/mytable"
+ expect ~ .*
+}
+
+haproxy h2 -cli {
+ send "show table mypeers/mytable"
+ expect ~ .*
+}
+
diff --git a/reg-tests/stream/unique-id-from-proxy.vtc b/reg-tests/stream/unique-id-from-proxy.vtc
new file mode 100644
index 0000000..eaac065
--- /dev/null
+++ b/reg-tests/stream/unique-id-from-proxy.vtc
@@ -0,0 +1,38 @@
+varnishtest "Check that we are able to read a unique-id from PROXYv2"
+
+#REQUIRE_VERSION=2.2
+
+feature ignore_unknown_macro
+
+haproxy h1 -conf {
+ defaults
+ mode http
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ frontend echo
+ bind "fd@${fe1}" accept-proxy
+ http-after-response set-header echo %[fc_pp_unique_id,hex]
+ http-request return status 200
+} -start
+
+client c1 -connect ${h1_fe1_sock} {
+ # PROXY v2 signature
+ sendhex "0d 0a 0d 0a 00 0d 0a 51 55 49 54 0a"
+ # version + PROXY
+ sendhex "21"
+ # TCP4
+ sendhex "11"
+ # length of the address (12) + length of the TLV (8)
+ sendhex "00 14"
+ # 127.0.0.1 42 127.0.0.1 1337
+ sendhex "7F 00 00 01 7F 00 00 01 00 2A 05 39"
+ # PP2_TYPE_UNIQUE_ID + length of the value + "12345"
+ sendhex "05 00 05 31 32 33 34 35"
+
+ txreq -url "/"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.echo == "3132333435"
+} -run
diff --git a/reg-tests/stream/unique-id.vtc b/reg-tests/stream/unique-id.vtc
new file mode 100644
index 0000000..0607b2a
--- /dev/null
+++ b/reg-tests/stream/unique-id.vtc
@@ -0,0 +1,47 @@
+varnishtest "unique-id test"
+
+feature ignore_unknown_macro
+
+server s1 {
+ rxreq
+ txresp
+} -repeat 2 -start
+
+haproxy h1 -conf {
+ defaults
+ mode http
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ frontend stable
+ bind "fd@${fe1}"
+ unique-id-format TEST-%[uuid]
+ http-response set-header A %[unique-id]
+ http-response set-header B %[unique-id]
+ default_backend be
+
+ frontend request_data
+ bind "fd@${fe2}"
+ unique-id-format TEST-%[req.hdr(in)]
+ http-response set-header out %[unique-id]
+ default_backend be
+
+ backend be
+ server srv1 ${s1_addr}:${s1_port}
+} -start
+
+client c1 -connect ${h1_fe1_sock} {
+ txreq -url "/"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.a == resp.http.b
+} -run
+
+client c2 -connect ${h1_fe2_sock} {
+ txreq -url "/" \
+ -hdr "in: 12345678"
+ rxresp
+ expect resp.status == 200
+ expect resp.http.out == "TEST-12345678"
+} -run
diff --git a/reg-tests/tcp-rules/default_rules.vtc b/reg-tests/tcp-rules/default_rules.vtc
new file mode 100644
index 0000000..8c05f43
--- /dev/null
+++ b/reg-tests/tcp-rules/default_rules.vtc
@@ -0,0 +1,61 @@
+varnishtest "Test declaration of TCP rules in default sections"
+
+feature cmd "$HAPROXY_PROGRAM -cc 'version_atleast(2.5-dev0)'"
+feature ignore_unknown_macro
+
+server s1 {
+ rxreq
+ txresp
+ expect req.http.x-test1-frt == "def_front"
+ expect req.http.x-test1-bck == "def_back"
+} -start
+
+haproxy h1 -conf {
+ defaults common
+ mode http
+ timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
+ timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
+
+ defaults def_front from common
+ tcp-request connection accept
+ tcp-request session accept
+ tcp-request inspect-delay 5s
+ tcp-request content set-var(txn.test1) "str(def_front)"
+ tcp-request content accept
+
+ defaults def_back from common
+ tcp-request inspect-delay 5s
+ tcp-request content set-var(txn.test1) "str(def_back)"
+ tcp-request content accept
+
+ tcp-response inspect-delay 5s
+ tcp-response content set-var(txn.test2) "str(def_back)"
+ tcp-response content accept
+
+ frontend fe from def_front
+ bind "fd@${feh1}"
+ tcp-request connection reject
+ tcp-request session reject
+ tcp-request content reject
+
+ http-request set-header x-test1-frt "%[var(txn.test1)]"
+
+ default_backend be
+
+ backend be from def_back
+ tcp-response content reject
+
+ http-request set-header x-test1-bck "%[var(txn.test1)]"
+ http-response set-header x-test2 "%[var(txn.test2)]"
+
+ server s1 ${s1_addr}:${s1_port}
+
+} -start
+
+client c1 -connect ${h1_feh1_sock} {
+ txreq -req GET -url /
+ rxresp
+ expect resp.status == 200
+ expect resp.http.x-test2 == "def_back"
+} -run
diff --git a/reg-tests/webstats/missing-stats-fields.vtc b/reg-tests/webstats/missing-stats-fields.vtc
new file mode 100644
index 0000000..c85855d
--- /dev/null
+++ b/reg-tests/webstats/missing-stats-fields.vtc
@@ -0,0 +1,14 @@
+varnishtest "Verifies the absence of (null) in 'show stats' header"
+
+# This can happen if a new ST_F_xxx enum is added without updating
+# stats_fields[].
+
+feature ignore_unknown_macro
+
+haproxy h1 -conf {
+} -start
+
+haproxy h1 -cli {
+ send "show stat"
+ expect !~ (null)
+}
diff --git a/reg-tests/webstats/webstats-scope-and-post-change.vtc b/reg-tests/webstats/webstats-scope-and-post-change.vtc
new file mode 100644
index 0000000..e896c05
--- /dev/null
+++ b/reg-tests/webstats/webstats-scope-and-post-change.vtc
@@ -0,0 +1,84 @@
+varnishtest "Webgui stats page check filtering with scope and changing server state"
+
+feature ignore_unknown_macro
+
+server s1 {
+} -start
+
+haproxy h1 -conf {
+ global
+ # WT: limit false-positives causing "HTTP header incomplete" due to
+ # idle server connections being randomly used and randomly expiring
+ # under us.
+ tune.idle-pool.shared off
+
+ defaults
+ mode http
+
+ frontend fe1
+ bind "fd@${fe1}"
+ stats enable
+ stats refresh 5s
+ stats uri /
+ stats admin if TRUE
+
+ backend b1
+ server srv1 ${s1_addr}:${s1_port}
+ server srv2 ${s1_addr}:${s1_port}
+ server srv3 ${s1_addr}:${s1_port}
+
+ backend b2
+ server srv1 ${s1_addr}:${s1_port}
+ server srv2 ${s1_addr}:${s1_port}
+
+} -start
+
+client c1 -connect ${h1_fe1_sock} {
+ txreq -url "/;csv;"
+ rxresp
+ expect resp.status == 200
+} -run
+
+client c2 -connect ${h1_fe1_sock} {
+ txreq -url "/?;csv;scope=b1"
+ rxresp
+ expect resp.status == 200
+} -run
+
+haproxy h1 -cli {
+ send "show stat"
+ expect ~ .*
+}
+
+client c3 -connect ${h1_fe1_sock} {
+ txreq -url "/"
+ rxresp
+ expect resp.status == 200
+
+ txreq -url "/?;csv;scope=b1"
+ rxresp
+ expect resp.status == 200
+ expect resp.body ~ ".*\nb1,BACKEND.*"
+ expect resp.body !~ ".*\nb2,BACKEND.*"
+
+ txreq -req "POST" -url "/?scope=b2" -body "s=srv1&s=srv2&s=srv3&action=maint&b=%233"
+ rxresp
+ expect resp.status == 303
+
+ txreq -req "POST" -url "/" -body "s=srv2&action=drain&b=%233"
+ rxresp
+ expect resp.status == 303
+
+ txreq -req "POST" -url "/" -body "s=srv1&action=maint&b=%234"
+ rxresp
+ expect resp.status == 303
+
+ txreq -url "/?;csv;scope=fe1"
+ rxresp
+ expect resp.status == 200
+} -run
+
+haproxy h1 -cli {
+ send "show stat"
+ expect ~ "\nb1,srv1.*MAINT.*\nb1,srv2.*DRAIN.*\nb1,srv3.*MAINT.*\nb1,BACKEND.*DOWN.*\nb2,srv1.*MAINT.*\nb2,srv2.*no check.*\nb2,BACKEND.*UP"
+} -wait
diff --git a/scripts/announce-release b/scripts/announce-release
new file mode 100755
index 0000000..c990821
--- /dev/null
+++ b/scripts/announce-release
@@ -0,0 +1,279 @@
+#!/usr/bin/env bash
+# prepares a template e-mail and HTML file to announce a new release
+# Copyright (c) 2006-2016 Willy Tarreau <w@1wt.eu>
+#
+# In short :
+# - requires git
+# - wants that last commit is a release/tag
+# - no restriction to master, uses last tag
+# - creates mail-$version.txt
+# - creates web-$version.html
+# - indicates how to edit the mail and how to send it
+
+USAGE="Usage: ${0##*/} [-f] [-p] [-b branch] [-d date] [-o oldver] [-n newver]
+ -f: force to overwrite existing files and ignore local changes
+ -p: prepare future release (skip branch and tags existence checks)
+ -b: force the project branch name to this (def: inherited from the version)
+ -d: force the release date (e.g. to rework a failed announce)
+ -o: previous version (def: newver-1)
+ -n: new version (if not last tag)
+"
+PREPARE=
+FORCE=
+OUTPUT=
+BRANCH=
+HTML=
+DATE=
+YEAR=
+OLD=
+LASTCOM=
+NEWVER=
+NEWTAG=
+DIR=
+
+die() {
+ [ "$#" -eq 0 ] || echo "$*" >&2
+ exit 1
+}
+
+err() {
+ echo "$*" >&2
+}
+
+quit() {
+ [ "$#" -eq 0 ] || echo "$*"
+ exit 0
+}
+
+while [ -n "$1" -a -z "${1##-*}" ]; do
+ case "$1" in
+ -d) DATE="$2" ; shift 2 ;;
+ -b) BRANCH="$2" ; shift 2 ;;
+ -f) FORCE=1 ; shift ;;
+ -p) PREPARE=1 ; shift ;;
+ -o) OLD="$2" ; shift 2 ;;
+ -n) NEWVER="$2" ; shift 2 ;;
+ -h|--help) quit "$USAGE" ;;
+ *) die "$USAGE" ;;
+ esac
+done
+
+if [ $# -gt 0 ]; then
+ die "$USAGE"
+fi
+
+if ! git rev-parse --verify -q HEAD >/dev/null; then
+ die "Failed to check git HEAD."
+fi
+
+# we want to go to the git root dir
+DIR="$PWD"
+cd $(git rev-parse --show-toplevel)
+
+if [ -z "$FORCE" -a "$(git diff HEAD|wc -c)" != 0 ]; then
+ err "You appear to have uncommitted local changes, please commit them first :"
+ git status -s -uno >&2
+ die
+fi
+
+if [ -z "$PREPARE" -a "$(git rev-parse --verify -q HEAD)" != "$(git rev-parse --verify -q master)" ]; then
+ die "git HEAD doesn't match master branch."
+fi
+
+if [ -n "$NEWVER" ]; then
+ if git show-ref --tags "v$NEWVER" >/dev/null; then
+ NEWTAG="v$NEWVER"
+ else
+ echo "Note: no matching tag v$NEWVER, using HEAD".
+ fi
+fi
+
+# version unspecified or no existing tag for it
+if [ -z "$NEWTAG" ]; then
+ NEWTAG="$(git describe --tags HEAD --abbrev=0)"
+
+ if [ -z "$NEWTAG" ]; then
+ die "Fatal: cannot determine new version, please specify it."
+ elif [ -n "$PREPARE" ] && ! git show-ref --tags HEAD >/dev/null; then
+ # HEAD not tagged, hence we have to pretend we're on one version
+ # after the current tag
+ echo "Current version not tagged, trying to determine next one."
+ NEWTAG="${NEWTAG#v}"
+ if [ -z "$OLD" ]; then
+ OLD="$NEWTAG"
+ fi
+ radix="$NEWTAG"
+ while [ -n "$radix" -a -z "${radix%%*[0-9]}" ]; do
+ radix="${radix%[0-9]}"
+ done
+
+ number=${NEWTAG#$radix}
+ if [ -z "$number" -o "$radix" = "$NEWTAG" ]; then
+ die "Fatal: cannot determine new version, please specify it."
+ fi
+ NEWTAG="${radix}$((number+1))"
+ if [ -z "$NEWVER" ]; then
+ NEWVER="${NEWTAG}"
+ fi
+ NEWTAG="v$NEWTAG"
+ LASTCOM="$(git rev-parse --short HEAD)"
+ echo "Next version expected to be $NEWVER and next tag $NEWTAG based on commit $LASTCOM"
+ elif [ "$(git describe --tags HEAD)" != "$NEWTAG" ]; then
+ die "About to use current HEAD which doesn't seem tagged, it reports '$(git describe --tags HEAD 2>/dev/null)'. Did you release it ?"
+ fi
+elif ! git show-ref --tags "$NEWTAG" >/dev/null 2>&1; then
+ die "git tag $NEWTAG doesn't exist, did you create the release ?"
+fi
+
+if [ -z "$NEWVER" ]; then
+ NEWVER="${NEWTAG#v}"
+fi
+
+if [ -z "$LASTCOM" ]; then
+ LASTCOM="$(git rev-parse --short ${NEWTAG}^)"
+fi
+
+if [ -z "$OLD" ]; then
+ OLD="$(git describe --tags ${LASTCOM} --abbrev=0)"
+ OLD="${OLD#v}"
+fi
+
+if ! git rev-parse --verify -q "v$OLD" >/dev/null; then
+ die "git tag v$OLD doesn't exist."
+fi
+
+# determine the product branch from the new release
+if [ -z "$BRANCH" ]; then
+ subvers=${NEWVER#[0-9]*.[0-9]*[-.]*[0-9].}
+ [ "${subvers}" = "${NEWVER}" ] && subvers=""
+ major=${NEWVER%.$subvers}
+ branch_ext=${major#*[0-9].*[0-9]}
+ BRANCH=${major%${branch_ext}}
+fi
+
+# determine the release date
+if [ -z "$DATE" ]; then
+ DATE="$(git log -1 --pretty=fuller ${NEWTAG} 2>/dev/null | sed -ne '/^CommitDate:/{s/\(^[^ ]*:\)\|\( [-+].*\)//gp;q}')"
+ DATE="$(date +%Y/%m/%d -d "$DATE")"
+fi
+YEAR="${DATE%%/*}"
+
+OUTPUT="$DIR/mail-haproxy-$NEWVER.txt"
+HTML="$DIR/web-haproxy-$NEWVER.html"
+
+[ -z "$FORCE" ] || rm -f "${OUTPUT}" "${HTML}"
+
+if [ -e "$OUTPUT" ]; then
+ die "${OUTPUT##*/} already exists, please remove it or retry with -f."
+fi
+
+if [ -e "$HTML" ]; then
+ die "$HTML already exists, please remove it or retry with -f."
+fi
+
+(
+ echo "# Send this using:"
+ echo "# mutt -H <(tail -n +4 ${OUTPUT##*/}) -s \"[ANNOUNCE] haproxy-$NEWVER\" haproxy@formilux.org"
+) >> "$OUTPUT"
+
+(echo
+ echo "Hi,"
+ echo
+ echo -n "HAProxy $NEWVER was released on $DATE. It added "
+ echo -n $(git log --oneline --reverse --format="%s" "v$OLD".."$LASTCOM" | wc -l)
+ echo " new commits"
+ echo "after version $OLD."
+ echo
+ echo "- per tag :"
+ git log --oneline --reverse --format="%s" "v$OLD".."$LASTCOM" | cut -f1 -d':' | sort | uniq -c
+ echo
+ echo "major commits :"
+ git log --oneline --reverse --format=" - %s" "v$OLD".."$LASTCOM" | grep MAJOR
+ echo
+ echo "- per file :"
+ git show "v$OLD".."$LASTCOM" -- src/ | grep ^diff | awk '{ print substr($3,7)}' | sort | uniq -c | sort -nr | head -15
+ echo
+ echo "- per topic :"
+ git log --oneline --reverse --format="%s" "v$OLD".."$LASTCOM" | cut -f2 -d':' | awk '{sub("s$","",$1); print $1}' | sort | uniq -c
+ echo
+ echo "- sorted changelog :"
+ git log --oneline --reverse --format="%s" "v$OLD".."$LASTCOM" | sort
+ echo
+ echo "#############################################################################################"
+) >> "$OUTPUT"
+
+# report the download paths
+if [ -z "${NEWVER##*-dev*}" ]; then
+ gitdir="haproxy.git"
+else
+ gitdir="haproxy-$BRANCH.git"
+fi
+
+(echo "Please find the usual URLs below :"
+ echo " Site index : https://www.haproxy.org/"
+ echo " Documentation : https://docs.haproxy.org/"
+ echo " Wiki : https://github.com/haproxy/wiki/wiki"
+ echo " Discourse : https://discourse.haproxy.org/"
+ echo " Slack channel : https://slack.haproxy.org/"
+ echo " Issue tracker : https://github.com/haproxy/haproxy/issues"
+ echo " Sources : https://www.haproxy.org/download/${BRANCH}/src/"
+ echo " Git repository : https://git.haproxy.org/git/${gitdir}/"
+ echo " Git Web browsing : https://git.haproxy.org/?p=${gitdir}"
+ echo " Changelog : https://www.haproxy.org/download/${BRANCH}/src/CHANGELOG"
+ echo " Dataplane API : https://github.com/haproxytech/dataplaneapi/releases/latest"
+ echo " Pending bugs : https://www.haproxy.org/l/pending-bugs"
+ echo " Reviewed bugs : https://www.haproxy.org/l/reviewed-bugs"
+ echo " Code reports : https://www.haproxy.org/l/code-reports"
+ echo " Latest builds : https://www.haproxy.org/l/dev-packages"
+) >> "$OUTPUT"
+
+# sign
+(echo
+ echo "${GIT_COMMITTER_NAME% *}"
+) >> "$OUTPUT"
+
+(echo "---"
+ echo "Complete changelog :"
+ git shortlog "v$OLD".."$LASTCOM"
+ echo "---"
+) >> "$OUTPUT"
+
+
+# prepare the HTML update
+set -- $(date +%e -d "$DATE")
+case "$1" in
+ 11|12|13) day="${1}th" ;;
+ *1) day="${1}st" ;;
+ *2) day="${2}nd" ;;
+ *3) day="${1}rd" ;;
+ *) day="${1}th" ;;
+esac
+
+humandate=$(date "+%B, $day, %Y" -d "$DATE")
+(echo "$humandate</b> : <i>$NEWVER</i>"
+ echo " <p>"
+ echo " <ul>"
+ echo "<--------------------------- edit contents below --------------------------->"
+ echo "- per tag :"
+ git log --oneline --reverse --format="%s" "v$OLD".."$LASTCOM" | cut -f1 -d':' | sort | uniq -c
+ echo
+ echo "- per topic :"
+ git log --oneline --reverse --format="%s" "v$OLD".."$LASTCOM" | cut -f2 -d':' | awk '{sub("s$","",$1); print $1}' | sort | uniq -c
+ echo
+ echo "major commits :"
+ git log --oneline --reverse --format=" - %s" "v$OLD".."$LASTCOM" | grep MAJOR
+ echo
+ echo "<--------------------------------------------------------------------------->"
+ echo " Code and changelog are available <a href=\"/download/${BRANCH}/src/\">here</a> as usual."
+ echo " </ul>"
+ echo " <p>"
+ echo " <b>"
+) >> "$HTML"
+
+echo "The announce was emitted into file $OUTPUT."
+echo "You can edit it and send it this way :"
+echo
+echo " mutt -H <(tail -n +4 ${OUTPUT##*/}) -s \"[ANNOUNCE] haproxy-$NEWVER\" haproxy@formilux.org"
+echo
+echo "The HTML block was emitted into $HTML and needs to be finished by hand."
+echo
diff --git a/scripts/backport b/scripts/backport
new file mode 100755
index 0000000..4f60140
--- /dev/null
+++ b/scripts/backport
@@ -0,0 +1,146 @@
+#!/usr/bin/env bash
+
+USAGE="Usage: ${0##*/} <last> <commit> [...]"
+START="$PWD"
+LAST=
+UPSTREAM=
+COMMIT=
+BRANCH=
+
+die() {
+ [ "$#" -eq 0 ] || echo "$*" >&2
+ exit 1
+}
+
+err() {
+ echo "$*" >&2
+}
+
+quit() {
+ [ "$#" -eq 0 ] || echo "$*"
+ exit 0
+}
+
+short() {
+ git rev-parse --short "$1"
+}
+
+# returns the latest commit ID in $REPLY. Returns 0 on success, non-zero on
+# failure with $REPLY empty.
+get_last_commit() {
+ REPLY=$(git rev-parse HEAD)
+ test -n "$REPLY"
+}
+
+# returns the name of the current branch (1.8, 1.9, etc) in $REPLY. Returns 0
+# on success, non-zero on failure with $REPLY empty.
+get_branch() {
+ local major subver ext
+ REPLY=$(git describe --tags HEAD --abbrev=0 2>/dev/null)
+ REPLY=${REPLY#v}
+ subver=${REPLY#[0-9]*.[0-9]*[-.]*[0-9].}
+ [ "${subver}" != "${REPLY}" ] || subver=""
+ major=${REPLY%.$subver}
+ ext=${major#*[0-9].*[0-9]}
+ REPLY=${major%${ext}}
+ test -n "$REPLY"
+}
+
+# returns the path to the next "up" remote in $REPLY, and zero on success
+# or non-zero when the last one was reached.
+up() {
+ REPLY=$(git remote -v | awk '/^up\t.*\(fetch\)$/{print $2}')
+ test -n "$REPLY"
+}
+
+# returns the path to the next "down" remote in $REPLY, and zero on success
+# or non-zero when the last one was reached.
+down() {
+ REPLY=$(git remote -v | awk '/^down\t.*\(fetch\)$/{print $2}')
+ test -n "$REPLY"
+}
+
+# verifies that the repository is clean of any pending changes
+check_clean() {
+ test -z "$(git status -s -uno)"
+}
+
+# verifies that HEAD is the master
+check_master() {
+ test "$(git rev-parse --verify -q HEAD 2>&1)" = "$(git rev-parse --verify -q master 2>&1)"
+}
+
+# tries to switch to the master branch, only if the current one is clean. Dies on failure.
+switch_master() {
+ check_clean || die "$BRANCH: local changes, stopping on commit $COMMIT (upstream $UPSTREAM)"
+ git checkout master >/dev/null 2>&1 || die "$BRANCH: failed to checkout master, stopping on commit $COMMIT (upstream $UPSTREAM)"
+}
+
+# walk up to the first repo
+walk_up() {
+ cd "$START"
+}
+
+# updates the "up" remote repository. Returns non-zero on error.
+update_up() {
+ git remote update up >/dev/null 2>&1
+}
+
+# backports commit "$1" with a signed-off by tag. In case of failure, aborts
+# the change and returns non-zero. Unneeded cherry-picks do return an error
+# because we don't want to accidentally backport the latest commit instead of
+# this one, and we don't know this one's ID.
+backport_commit() {
+ local empty=1
+
+ if ! git cherry-pick -sx "$1"; then
+ [ -n "$(git diff)" -o -n "$(git diff HEAD)" ] || empty=0
+ git cherry-pick --abort
+ return 1
+ fi
+}
+
+[ "$1" != "-h" -a "$1" != "--help" ] || quit "$USAGE"
+[ -n "$1" -a -n "$2" ] || die "$USAGE"
+
+LAST="$1"
+shift
+
+# go back to the root of the repo
+cd $(git rev-parse --show-toplevel)
+START="$PWD"
+
+while [ -n "$1" ]; do
+ UPSTREAM="$(short $1)"
+ [ -n "$UPSTREAM" ] || die "branch $BRANCH: unknown commit ID $1, cannot backport."
+ COMMIT="$UPSTREAM"
+ BRANCH="-source-"
+ while :; do
+ if ! down; then
+ err "branch $BRANCH: can't go further, is repository 'down' properly set ?"
+ break
+ fi
+
+ cd "$REPLY" || die "Failed to 'cd' to '$REPLY' from '$PWD', is repository 'down' properly set ?"
+
+ check_clean || die "Local changes in $PWD, stopping before backporting commit $COMMIT (upstream $UPSTREAM)"
+
+ check_master || switch_master || die "Cannot switch to 'master' branch in $PWD, stopping before backporting commit $COMMIT (upstream $UPSTREAM)"
+ get_branch || die "Failed to get branch name in $PWD, stopping before backporting commit $COMMIT (upstream $UPSTREAM)"
+ BRANCH="$REPLY"
+
+ update_up || die "$BRANCH: failed to update repository 'up', stopping before backporting commit $COMMIT (upstream $UPSTREAM)"
+
+ backport_commit "$COMMIT" || die "$BRANCH: failed to backport commit $COMMIT (upstream $UPSTREAM). Leaving repository $PWD intact."
+
+ if [ "$BRANCH" = "$LAST" ]; then
+ # reached the stop point, don't apply further
+ break
+ fi
+
+ get_last_commit || die "$BRANCH: cannot retrieve last commit ID, stopping after backporting commit $COMMIT (upstream $UPSTREAM)"
+ COMMIT="$(short $REPLY)"
+ done
+ walk_up || die "Failed to go back to $PWD, stopping *after* backporting upstream $UPSTREAM"
+ shift
+done
diff --git a/scripts/build-ot.sh b/scripts/build-ot.sh
new file mode 100755
index 0000000..fb128b3
--- /dev/null
+++ b/scripts/build-ot.sh
@@ -0,0 +1,27 @@
+#!/bin/bash
+
+#
+# OT helper. script built from documentation: https://github.com/haproxytech/opentracing-c-wrapper
+#
+
+set -e
+
+OT_CPP_VERSION="${OT_CPP_VERSION:-1.6.0}"
+OT_PREFIX="${OT_PREFIX:-${HOME}/opt}"
+
+wget -P download-cache/ "https://github.com/opentracing/opentracing-cpp/archive/v${OT_CPP_VERSION}.tar.gz"
+
+tar xf download-cache/v${OT_CPP_VERSION}.tar.gz
+cd opentracing-cpp-${OT_CPP_VERSION}
+mkdir build
+cd build
+cmake -DCMAKE_INSTALL_PREFIX=${OT_PREFIX} -DBUILD_STATIC_LIBS=OFF -DBUILD_MOCKTRACER=OFF -DBUILD_TESTING=OFF ..
+make -j$(nproc)
+make install
+
+git clone https://github.com/haproxytech/opentracing-c-wrapper.git
+cd opentracing-c-wrapper
+ ./scripts/bootstrap
+ ./configure --prefix=${OT_PREFIX} --with-opentracing=${OT_PREFIX}
+ make -j$(nproc)
+ make install
diff --git a/scripts/build-ssl.sh b/scripts/build-ssl.sh
new file mode 100755
index 0000000..1c17775
--- /dev/null
+++ b/scripts/build-ssl.sh
@@ -0,0 +1,208 @@
+#!/bin/sh
+set -eux
+
+download_openssl () {
+ if [ ! -f "download-cache/openssl-${OPENSSL_VERSION}.tar.gz" ]; then
+
+#
+# OpenSSL has different links for latest and previous releases
+# since we want to download several versions, let us try to treat
+# current version as latest, if it fails, follow with previous
+#
+
+ wget -P download-cache/ \
+ "https://www.openssl.org/source/openssl-${OPENSSL_VERSION}.tar.gz" || \
+ wget -P download-cache/ \
+ "https://www.openssl.org/source/old/${OPENSSL_VERSION%[a-z]}/openssl-${OPENSSL_VERSION}.tar.gz"
+ fi
+}
+
+# recent openssl versions support parallel builds and skipping the docs,
+# while older ones require to build everything sequentially.
+build_openssl_linux () {
+ (
+ cd "openssl-${OPENSSL_VERSION}/"
+ ./config shared --prefix="${HOME}/opt" --openssldir="${HOME}/opt" --libdir=lib -DPURIFY
+ if [ -z "${OPENSSL_VERSION##1.*}" ]; then
+ make all
+ else
+ make -j$(nproc) build_sw
+ fi
+ make install_sw
+ )
+}
+
+build_openssl_osx () {
+ (
+ cd "openssl-${OPENSSL_VERSION}/"
+ ./Configure darwin64-x86_64-cc shared \
+ --prefix="${HOME}/opt" --openssldir="${HOME}/opt" --libdir=lib -DPURIFY
+ make depend build_sw install_sw
+ )
+}
+
+build_openssl () {
+ if [ "$(cat ${HOME}/opt/.openssl-version)" != "${OPENSSL_VERSION}" ]; then
+ tar zxf "download-cache/openssl-${OPENSSL_VERSION}.tar.gz"
+ case `uname` in
+ 'Darwin')
+ build_openssl_osx
+ ;;
+ 'Linux')
+ build_openssl_linux
+ ;;
+ esac
+ echo "${OPENSSL_VERSION}" > "${HOME}/opt/.openssl-version"
+ fi
+}
+
+download_libressl () {
+ if [ ! -f "download-cache/libressl-${LIBRESSL_VERSION}.tar.gz" ]; then
+ wget -P download-cache/ \
+ "https://cdn.openbsd.org/pub/OpenBSD/LibreSSL/libressl-${LIBRESSL_VERSION}.tar.gz"
+ fi
+}
+
+build_libressl () {
+ if [ "$(cat ${HOME}/opt/.libressl-version)" != "${LIBRESSL_VERSION}" ]; then
+ tar zxf "download-cache/libressl-${LIBRESSL_VERSION}.tar.gz"
+ (
+ cd "libressl-${LIBRESSL_VERSION}/"
+ ./configure --prefix="${HOME}/opt"
+ make all install
+ )
+ echo "${LIBRESSL_VERSION}" > "${HOME}/opt/.libressl-version"
+ fi
+}
+
+download_boringssl () {
+ if [ ! -d "download-cache/boringssl" ]; then
+ git clone --depth=1 https://boringssl.googlesource.com/boringssl download-cache/boringssl
+ else
+ (
+ cd download-cache/boringssl
+ git pull
+ )
+ fi
+}
+
+download_aws_lc () {
+ if [ ! -f "download-cache/aws-lc-${AWS_LC_VERSION}.tar.gz" ]; then
+ mkdir -p download-cache
+ wget -q -O "download-cache/aws-lc-${AWS_LC_VERSION}.tar.gz" \
+ "https://github.com/aws/aws-lc/archive/refs/tags/v${AWS_LC_VERSION}.tar.gz"
+ fi
+}
+
+build_aws_lc () {
+ if [ "$(cat ${HOME}/opt/.aws_lc-version)" != "${AWS_LC_VERSION}" ]; then
+ tar zxf "download-cache/aws-lc-${AWS_LC_VERSION}.tar.gz"
+ (
+ cd "aws-lc-${AWS_LC_VERSION}/"
+ mkdir -p build
+ cd build
+ cmake -version
+ cmake -DCMAKE_BUILD_TYPE=Release -DBUILD_SHARED_LIBS=1 -DDISABLE_GO=1 -DDISABLE_PERL=1 \
+ -DBUILD_TESTING=0 -DCMAKE_INSTALL_PREFIX=${HOME}/opt ..
+ make -j$(nproc)
+ make install
+ )
+ echo "${AWS_LC_VERSION}" > "${HOME}/opt/.aws_lc-version"
+ fi
+}
+
+download_quictls () {
+ if [ ! -d "download-cache/quictls" ]; then
+ git clone --depth=1 https://github.com/quictls/openssl download-cache/quictls
+ else
+ (
+ cd download-cache/quictls
+ git pull
+ )
+ fi
+}
+
+download_wolfssl () {
+ if [ ! -f "download-cache/wolfssl-${WOLFSSL_VERSION}.tar.gz" ]; then
+ mkdir -p download-cache
+ if [ "${WOLFSSL_VERSION%%-*}" != "git" ]; then
+ wget -q -O "download-cache/wolfssl-${WOLFSSL_VERSION}.tar.gz" \
+ "https://github.com/wolfSSL/wolfssl/archive/refs/tags/v${WOLFSSL_VERSION}-stable.tar.gz"
+ else
+ wget -q -O "download-cache/wolfssl-${WOLFSSL_VERSION}.tar.gz" \
+ "https://github.com/wolfSSL/wolfssl/archive/${WOLFSSL_VERSION##git-}.tar.gz"
+ fi
+ fi
+}
+
+build_wolfssl () {
+ if [ "$(cat ${HOME}/opt/.wolfssl-version)" != "${WOLFSSL_VERSION}" ]; then
+ mkdir "wolfssl-${WOLFSSL_VERSION}/"
+ tar zxf "download-cache/wolfssl-${WOLFSSL_VERSION}.tar.gz" -C "wolfssl-${WOLFSSL_VERSION}/" --strip-components=1
+ (
+ cd "wolfssl-${WOLFSSL_VERSION}/"
+ autoreconf -i
+ ./configure --enable-haproxy --enable-quic --prefix="${HOME}/opt"
+ make -j$(nproc)
+ make install
+ )
+ echo "${WOLFSSL_VERSION}" > "${HOME}/opt/.wolfssl-version"
+ fi
+}
+
+if [ ! -z ${LIBRESSL_VERSION+x} ]; then
+ download_libressl
+ build_libressl
+fi
+
+if [ ! -z ${OPENSSL_VERSION+x} ]; then
+ download_openssl
+ build_openssl
+fi
+
+if [ ! -z ${BORINGSSL+x} ]; then
+ (
+
+ # travis-ci comes with go-1.11, while boringssl requires go-1.13
+ eval "$(curl -sL https://raw.githubusercontent.com/travis-ci/gimme/master/gimme | GIMME_GO_VERSION=1.13 bash)"
+
+ download_boringssl
+ cd download-cache/boringssl
+ if [ -d build ]; then rm -rf build; fi
+ mkdir build
+ cd build
+ cmake -GNinja -DCMAKE_BUILD_TYPE=release -DBUILD_SHARED_LIBS=1 ..
+ ninja
+
+ rm -rf ${HOME}/opt/lib || exit 0
+ rm -rf ${HOME}/opt/include || exit 0
+
+ mkdir -p ${HOME}/opt/lib
+ cp crypto/libcrypto.so ssl/libssl.so ${HOME}/opt/lib
+
+ mkdir -p ${HOME}/opt/include
+ cp -r ../include/* ${HOME}/opt/include
+ )
+fi
+
+if [ ! -z ${AWS_LC_VERSION+x} ]; then
+ download_aws_lc
+ build_aws_lc
+fi
+
+if [ ! -z ${QUICTLS+x} ]; then
+ (
+ download_quictls
+ cd download-cache/quictls
+
+ ./config shared no-tests ${QUICTLS_EXTRA_ARGS:-} --prefix="${HOME}/opt" --openssldir="${HOME}/opt" --libdir=lib -DPURIFY
+ make -j$(nproc) build_sw
+ make install_sw
+
+ )
+fi
+
+if [ ! -z ${WOLFSSL_VERSION+x} ]; then
+ download_wolfssl
+ build_wolfssl
+fi
diff --git a/scripts/build-vtest.sh b/scripts/build-vtest.sh
new file mode 100755
index 0000000..4db35d6
--- /dev/null
+++ b/scripts/build-vtest.sh
@@ -0,0 +1,10 @@
+#!/bin/sh
+
+set -eux
+
+curl -fsSL https://github.com/vtest/VTest/archive/master.tar.gz -o VTest.tar.gz
+mkdir ../vtest
+tar xvf VTest.tar.gz -C ../vtest --strip-components=1
+# Special flags due to: https://github.com/vtest/VTest/issues/12
+make -C ../vtest FLAGS="-O2 -s -Wall"
+
diff --git a/scripts/create-release b/scripts/create-release
new file mode 100755
index 0000000..b8a867c
--- /dev/null
+++ b/scripts/create-release
@@ -0,0 +1,237 @@
+#!/usr/bin/env bash
+# creates a new haproxy release at the current commit
+# Copyright (c) 2006-2016 Willy Tarreau <w@1wt.eu>
+#
+# In short :
+# - requires git
+# - works only from master branch
+# - finds old and new version by itself
+# - builds changelog
+# - updates dates and versions in files
+# - commits + tags + signs
+# - no upload!
+
+USAGE="Usage: ${0##*/} [-i] [-y] [-t] [-b branch] [-d date] [-o oldver] [-n newver]"
+INTERACTIVE=
+TAGONLY=
+SAYYES=
+BRANCH=
+DATE=
+YEAR=
+OLD=
+NEW=
+
+die() {
+ [ "$#" -eq 0 ] || echo "$*" >&2
+ exit 1
+}
+
+err() {
+ echo "$*" >&2
+}
+
+quit() {
+ [ "$#" -eq 0 ] || echo "$*"
+ exit 0
+}
+
+do_commit() {
+ (
+ echo "[RELEASE] Released version $NEW"
+ echo
+ echo "Released version $NEW with the following main changes :"
+ sed -ne '/^[ ]*-/,/^$/{p;b a};d;:a;/^$/q' CHANGELOG
+ ) | git commit -a -F -
+}
+
+do_tag() {
+ git tag -u "$GIT_GPG_KEY" -s -m "HAProxy $NEW" v$NEW && echo "Tagged as v$NEW"
+}
+
+if [ -z "$GIT_COMMITTER_NAME" ]; then
+ GIT_COMMITTER_NAME=$(git config --get user.name)
+ [ -n "$GIT_COMMITTER_NAME" ] || die "GIT_COMMITTER_NAME not set"
+fi
+
+if [ -z "$GIT_COMMITTER_EMAIL" ]; then
+ GIT_COMMITTER_EMAIL=$(git config --get user.email)
+ [ -n "$GIT_COMMITTER_EMAIL" ] || die "GIT_COMMITTER_EMAIL not set"
+fi
+
+while [ -n "$1" -a -z "${1##-*}" ]; do
+ case "$1" in
+ -y) SAYYES=1 ; shift ;;
+ -i) INTERACTIVE=1 ; shift ;;
+ -t) TAGONLY=1 ; shift ;;
+ -d) DATE="$2" ; shift 2 ;;
+ -b) BRANCH="$2" ; shift 2 ;;
+ -o) OLD="$2" ; shift 2 ;;
+ -n) NEW="$2" ; shift 2 ;;
+ -h|--help) quit "$USAGE" ;;
+ *) die "$USAGE" ;;
+ esac
+done
+
+if [ $# -gt 0 ]; then
+ die "$USAGE"
+fi
+
+if [ -z "$GIT_GPG_KEY" ]; then
+ die "GIT_GPG_KEY is not set, it must contain your GPG key ID."
+fi
+
+if ! git rev-parse --verify -q HEAD >/dev/null; then
+ die "Failed to check git HEAD."
+fi
+
+# we want to go to the git top dir
+cd $(git rev-parse --show-toplevel)
+
+if [ "$(git rev-parse --verify -q HEAD)" != "$(git rev-parse --verify -q master)" ]; then
+ die "git HEAD doesn't match master branch."
+fi
+
+if [ "$(git diff HEAD|wc -c)" != 0 ]; then
+ err "You appear to have uncommitted local changes, please commit them first :"
+ git status -s -uno >&2
+ die
+fi
+
+if [ -z "$OLD" ]; then
+ OLD="$(git describe --tags HEAD --abbrev=0)"
+ OLD="${OLD#v}"
+fi
+
+if ! git rev-parse --verify -q "v$OLD" >/dev/null; then
+ die "git tag v$OLD doesn't exist."
+fi
+
+if [ -z "$NEW" ]; then
+ radix="$OLD"
+ while [ -n "$radix" -a -z "${radix%%*[0-9]}" ]; do
+ radix="${radix%[0-9]}"
+ done
+
+ number=${OLD#$radix}
+ if [ -z "$number" -o "$radix" = "$OLD" ]; then
+ die "Fatal: cannot determine new version, please specify it."
+ fi
+ NEW=${radix}$((number+1))
+fi
+
+if git show-ref --tags "v$NEW" >/dev/null; then
+ die "git tag v$NEW already exists, please remove it first."
+fi
+
+# determine the product branch from the new release
+if [ -z "$BRANCH" ]; then
+ subvers=${NEW#[0-9]*.[0-9]*[-.]*[0-9].}
+ [ "${subvers}" = "${NEW}" ] && subvers=""
+ major=${NEW%.$subvers}
+ branch_ext=${major#*[0-9].*[0-9]}
+ BRANCH=${major%${branch_ext}}
+fi
+
+
+# determine the release date
+if [ -z "$DATE" ]; then
+ # Uncomment the line below to use the date of the last commit,
+ # otherwise fall back to current date
+ DATE="$(git log --pretty=fuller -1 v$NEW 2>/dev/null | sed -ne '/^CommitDate:/{s/\(^[^ ]*:\)\|\( [-+].*\)//gp;q}')"
+ DATE="$(date +%Y/%m/%d -d "$DATE")"
+else
+ if [ "$DATE" != "$(date +%Y/%m/%d -d "$DATE")" ]; then
+ die "Date format must exclusively be YYYY/MM/DD ; date was '$DATE'."
+ fi
+fi
+YEAR="${DATE%%/*}"
+
+if [ -n "$TAGONLY" ]; then
+ do_tag || die "Failed to tag changes"
+ echo "Done. You may have to push changes."
+ exit 0
+fi
+
+echo "About to release version $NEW from $OLD at $DATE (branch $BRANCH)."
+if [ -z "$SAYYES" ]; then
+ echo "Press ENTER to continue or Ctrl-C to abort now!"
+ read
+fi
+
+echo "Updating CHANGELOG ..."
+( echo "ChangeLog :"
+ echo "==========="
+ echo
+ echo "$DATE : $NEW"
+ #git shortlog v$OLD.. | sed -ne 's/^ / - /p'
+ if [ $(git log --oneline v$OLD.. | wc -l) = 0 ]; then
+ echo " - exact copy of $OLD"
+ else
+ git log --oneline --reverse --format=" - %s" v$OLD..
+ fi
+ echo
+ tail -n +4 CHANGELOG
+) >.chglog.tmp && mv .chglog.tmp CHANGELOG
+
+echo "Updating VERSION ..."
+rm -f VERSION VERDATE
+echo "$NEW" > VERSION
+
+echo "Updating VERDATE ..."
+echo '$Format:%ci$' > VERDATE
+echo "$DATE" >> VERDATE
+
+# updating branch and date in all modified doc files except the outdated architecture.txt
+for file in doc/intro.txt doc/configuration.txt doc/management.txt $(git diff --name-only v${OLD}.. -- doc); do
+ if [ ! -e "$file" ]; then continue; fi
+ if [ "$file" = doc/architecture.txt ]; then continue; fi
+ echo "Updating $file ..."
+ sed -e "1,10s:\(\sversion\s\).*:\1$BRANCH:" \
+ -e "1,10s:\(\s\)\(20[0-9]\{2\}/[0-9]\{1,2\}/[0-9]\{1,2\}\):\1$DATE:" \
+ -i "$file"
+done
+
+echo "Updating haproxy.c ..."
+sed -e "s:Copyright 2000-[0-9]*\s*Willy Tarreau.*>:Copyright 2000-$YEAR Willy Tarreau <willy@haproxy.org>:" \
+ -i src/haproxy.c
+
+echo "Updating version.h ..."
+sed -e "s:^\(#define\s*PRODUCT_BRANCH\s*\)\"[^\"]*\":\1\"$BRANCH\":" \
+ -i include/haproxy/version.h
+
+if [ -n "$INTERACTIVE" ]; then
+ vi CHANGELOG VERSION VERDATE \
+ src/haproxy.c doc/configuration.txt \
+ $(git diff --name-only v${OLD}.. -- doc)
+fi
+
+if [ "$(git diff -- CHANGELOG | wc -c)" = 0 ]; then
+ die "CHANGELOG must be updated."
+fi
+
+if [ -z "$SAYYES" ]; then
+ echo "Press ENTER to review the changes..."
+ read
+fi
+
+git diff
+
+echo
+echo "About to commit and tag version $NEW with the following message:"
+echo
+echo "[RELEASE] Released version $NEW with the following main changes :"
+sed -ne '/^[ ]*-/,/^$/{p;b a};d;:a;/^$/q' CHANGELOG
+
+echo
+echo "LAST chance to cancel! Press ENTER to proceed now or Ctrl-C to abort."
+read
+
+do_commit || die "Failed to commit changes"
+do_tag || die "Failed to tag changes"
+
+remote=$(git config --get branch.master.remote)
+echo "Do not forget to push updates, publish and announce this version :"
+echo
+echo "git push ${remote:-origin} master v$NEW"
+echo "${0%/*}/publish-release"
+echo "${0%/*}/announce-release"
diff --git a/scripts/git-show-backports b/scripts/git-show-backports
new file mode 100755
index 0000000..f2c40fe
--- /dev/null
+++ b/scripts/git-show-backports
@@ -0,0 +1,336 @@
+#!/usr/bin/env bash
+#
+# Compares multiple branches against a reference and shows which ones contain
+# each commit, and the level of backports since the origin or its own ancestors.
+#
+# Copyright (c) 2016 Willy Tarreau <w@1wt.eu>
+#
+# The purpose is to make it easy to visualize what backports might be missing
+# in a maintenance branch, and to easily spot the ones that are needed and the
+# ones that are not. It solely relies on the "cherry-picked from" tags in the
+# commit messages to find what commit is available where, and can even find a
+# reference commit's ancestor in another branch's commit ancestors as well to
+# detect that the patch is present. When done with the proper references and
+# a correct ordering of the branches, it can be used to quickly apply a set of
+# fixes to a branch since it dumps suggested commands at the end. When doing
+# so it is a good idea to use "HEAD" as the last branch to avoid doing mistakes.
+#
+# Examples :
+# - find what's in master and not in current branch :
+# show-backports -q -m -r master HEAD
+# - find what's in 1.6/master and in hapee-maint-1.5r2 but not in current branch :
+# show-backports -q -m -r 1.6/master hapee-maint-1.5r2 HEAD | grep ' [a-f0-9]\{8\}[-+][0-9] '
+# - check that no recent fix from master is missing in any maintenance branch :
+# show-backports -r master hapee-maint-1.5r2 aloha-7.5 hapee-maint-1.5r1 aloha-7.0
+# - see what was recently merged into 1.6 and has no equivalent in local master :
+# show-backports -q -m -r 1.6/master -b "1.6/master@{1 week ago}" master
+# - check what extra backports are present in hapee-r2 compared to hapee-r1 :
+# show-backports -q -m -r hapee-r2 hapee-r1
+
+
+USAGE="Usage: ${0##*/} [-q] [-H] [-m] [-u] [-r reference] [-l logexpr] [-s subject] [-b base] {branch|range} [...] [-- file*]"
+BASES=( )
+BRANCHES=( )
+REF=
+BASE=
+QUIET=
+LOGEXPR=
+SUBJECT=
+MISSING=
+UPSTREAM=
+BODYHASH=
+
+die() {
+ [ "$#" -eq 0 ] || echo "$*" >&2
+ exit 1
+}
+
+err() {
+ echo "$*" >&2
+}
+
+quit() {
+ [ "$#" -eq 0 ] || echo "$*"
+ exit 0
+}
+
+short() {
+ # git rev-parse --short $1
+ echo "${1::8}"
+}
+
+dump_commit_matrix() {
+ title=":$REF:"
+ for branch in "${BRANCHES[@]}"; do
+ #echo -n " $branch"
+ title="$title :${branch}:"
+ done
+ title="$title |"
+
+ count=0
+ # now look up commits
+ while read ref subject; do
+ if [ -n "$MISSING" -a "${subject:0:9}" = "[RELEASE]" ]; then
+ continue
+ fi
+
+ upstream="none"
+ missing=0
+ refbhash=""
+ line=""
+ for branch in "${BRANCHES[@]}"; do
+ set -- $(grep -m 1 $ref "$WORK/${branch//\//_}")
+ newhash=$1 ; shift
+ bhash=""
+ # count the number of cherry-picks after this one. Since we shift,
+ # the result is in "$#"
+ while [ -n "$1" -a "$1" != "$ref" ]; do
+ shift
+ done
+ if [ -n "$newhash" ]; then
+ line="${line} $(short $newhash)-$#"
+ else
+ # before giving up we can check if our current commit was
+ # itself cherry-picked and check this again. In order not
+ # to have to do it all the time, we can cache the result
+ # for the current line. If a match is found we report it
+ # with the '+' delimiter instead of '-'.
+ if [ "$upstream" = "none" ]; then
+ upstream=( $(git log -1 --pretty --format=%B "$ref" | \
+ sed -n 's/^commit \([^)]*\) upstream\.$/\1/p;s/^(cherry picked from commit \([^)]*\))/\1/p') )
+ fi
+ newhash=""
+ for h in ${upstream[@]}; do
+ set -- $(grep -m 1 $h "$WORK/${branch//\//_}")
+ newhash=$1 ; shift
+ while [ -n "$1" -a "$1" != "$h" ]; do
+ shift
+ done
+ if [ -n "$newhash" ]; then
+ line="${line} $(short $newhash)+$#"
+ break
+ fi
+ done
+ if [ -z "$newhash" -a -n "$BODYHASH" ]; then
+ if [ -z "$refbhash" ]; then
+ refbhash=$(git log -1 --pretty="%an|%ae|%at|%B" "$ref" | sed -n '/^\(Signed-off-by\|(cherry picked\)/q;p' | md5sum)
+ fi
+
+
+ set -- $(grep -m 1 "H$refbhash\$" "$WORK/${branch//\//_}")
+ newhash=$1 ; shift
+ if [ -n "$newhash" ]; then
+ line="${line} $(short $newhash)+?"
+ break
+ fi
+ fi
+ if [ -z "$newhash" ]; then
+ line="${line} -"
+ missing=1
+ fi
+ fi
+ done
+ line="${line} |"
+ if [ -z "$MISSING" -o $missing -gt 0 ]; then
+ [ $((count++)) -gt 0 ] || echo "$title"
+ [ "$QUIET" != "" -o $count -lt 20 ] || count=0
+ if [ -z "$UPSTREAM" -o "$upstream" = "none" -o -z "$upstream" ]; then
+ echo "$(short $ref) $line"
+ else
+ echo "$(short $upstream) $line"
+ fi
+ fi
+ done < "$WORK/${REF//\//_}"
+}
+
+while [ -n "$1" -a -z "${1##-*}" ]; do
+ case "$1" in
+ -b) BASE="$2" ; shift 2 ;;
+ -r) REF="$2" ; shift 2 ;;
+ -l) LOGEXPR="$2" ; shift 2 ;;
+ -s) SUBJECT="$2" ; shift 2 ;;
+ -q) QUIET=1 ; shift ;;
+ -m) MISSING=1 ; shift ;;
+ -u) UPSTREAM=1 ; shift ;;
+ -H) BODYHASH=1 ; shift ;;
+ -h|--help) quit "$USAGE" ;;
+ *) die "$USAGE" ;;
+ esac
+done
+
+# if no ref, either we're checking missing backports and we'll guess
+# the upstream reference branch based on which one contains most of
+# the latest commits, or we'll use master.
+if [ -z "$REF" ]; then
+ if [ -n "$MISSING" ]; then
+ # check the last 10 commits in the base branch, and see where
+ # the seem to be coming from.
+ TAG="$(git describe --tags ${BASE:-HEAD} --abbrev=0)"
+ LAST_COMMITS=( $(git rev-list --abbrev-commit --reverse "$TAG^^.." | tail -n10) )
+ REF=$(for i in "${LAST_COMMITS[@]}"; do
+ upstream=$(git log -1 --pretty --format=%B $i |
+ sed -n 's/^commit \([^)]*\) upstream\.$/\1/p;s/^(cherry picked from commit \([^)]*\))/\1/p' |
+ tail -n1)
+ if [ -n "$upstream" ]; then
+ # use local first then remote branch
+ ( git branch --sort=refname --contains $upstream | head -n1 ;
+ git branch -r --sort=refname --contains $upstream | head -n1) 2>&1 |
+ grep 'master\|maint' | head -n1
+ fi
+ done | sort | uniq -c | sort -nr | awk '{ print $NF; exit;}')
+ # here we have a name, e.g. "2.6/master" in REF
+ REF="${REF:-master}"
+ err "Warning! No ref specified, using $REF."
+ else
+ REF=master
+ fi
+fi
+
+# branches may also appear as id1..id2 to limit the history instead of looking
+# back to the common base. The field is left empty if not set.
+BRANCHES=( )
+BASES=( )
+while [ $# -gt 0 ]; do
+ if [ "$1" = "--" ]; then
+ shift
+ break
+ fi
+ branch="${1##*..}"
+ if [ "$branch" == "$1" ]; then
+ base=""
+ else
+ base="${1%%..*}"
+ fi
+ BASES[${#BRANCHES[@]}]="$base"
+ BRANCHES[${#BRANCHES[@]}]="$branch"
+ shift
+done
+
+# args left for git-log
+ARGS=( "$@" )
+
+if [ ${#BRANCHES[@]} = 0 ]; then
+ if [ -n "$MISSING" ]; then
+ BRANCHES=( HEAD )
+ else
+ die "$USAGE"
+ fi
+fi
+
+for branch in "$REF" "${BRANCHES[@]}"; do
+ if ! git rev-parse --verify -q "$branch" >/dev/null; then
+ die "Failed to check git branch $branch."
+ fi
+done
+
+if [ -z "$BASE" -a -n "$MISSING" ]; then
+ err "Warning! No base specified, checking latest backports from current branch since last tag."
+
+ TAG="$(git describe --tags HEAD --abbrev=0)"
+ COMMITS=( $(git rev-list --abbrev-commit --reverse "$TAG^^..") )
+ tip=""
+ for commit in "${COMMITS[@]}"; do
+ parent=$(git log -1 --pretty --format=%B $commit |
+ sed -n 's/^commit \([^)]*\) upstream\.$/\1/p;s/^(cherry picked from commit \([^)]*\))/\1/p' |
+ tail -n1)
+ if [ -z "$tip" ]; then
+ tip=$parent
+ elif [ -n "$parent" ]; then
+ base=$(git merge-base "$tip" "$parent")
+ if [ "$base" = "$tip" ]; then
+ # tip is older than parent, switch tip to it if it
+ # belongs to the upstream branch
+ if [ "$(git merge-base $parent $REF)" = "$parent" ]; then
+ tip=$parent
+ fi
+ fi
+ fi
+ done
+ BASE="$tip"
+ if [ -n "$BASE" ]; then
+ echo "Restarting from $(git log -1 --no-decorate --oneline $BASE)"
+ else
+ echo "Could not figure the base."
+ fi
+fi
+
+if [ -z "$BASE" ]; then
+ err "Warning! No base specified, looking for common ancestor."
+ BASE=$(git merge-base --all "$REF" "${BRANCHES[@]}")
+ if [ -z "$BASE" ]; then
+ die "Couldn't find a common ancestor between these branches"
+ fi
+fi
+
+# we want to go to the git root dir
+DIR="$PWD"
+cd $(git rev-parse --show-toplevel)
+
+mkdir -p .git/.show-backports #|| die "Can't create .git/.show-backports"
+WORK=.git/.show-backports
+
+rm -f "$WORK/${REF//\//_}"
+git log --reverse ${LOGEXPR:+--grep $LOGEXPR} --pretty="%H %s" "$BASE".."$REF" -- "${ARGS[@]}" | grep "${SUBJECT}" > "$WORK/${REF//\//_}"
+
+# for each branch, enumerate all commits and their ancestry
+
+branch_num=0;
+while [ $branch_num -lt "${#BRANCHES[@]}" ]; do
+ branch="${BRANCHES[$branch_num]}"
+ base="${BASES[$branch_num]}"
+ base="${base:-$BASE}"
+ rm -f "$WORK/${branch//\//_}"
+ git log --reverse --pretty="%H %s" "$base".."$branch" -- "${ARGS[@]}" | grep "${SUBJECT}" | while read h subject; do
+ echo -n "$h" $(git log -1 --pretty --format=%B "$h" | \
+ sed -n 's/^commit \([^)]*\) upstream\.$/\1/p;s/^(cherry picked from commit \([^)]*\))/\1/p')
+ if [ -n "$BODYHASH" ]; then
+ echo " H$(git log -1 --pretty="%an|%ae|%at|%B" "$h" | sed -n '/^\(Signed-off-by\|(cherry picked\)/q;p' | md5sum)"
+ else
+ echo
+ fi
+ done > "$WORK/${branch//\//_}"
+ (( branch_num++ ))
+done
+
+count=0
+dump_commit_matrix | column -t | \
+(
+ left_commits=( )
+ right_commits=( )
+ while read line; do
+ # append the subject at the end of the line
+ set -- $line
+ echo -n "$line "
+ if [ "${line::1}" = ":" ]; then
+ echo "---- Subject ----"
+ else
+ # doing it this way prevents git from abusing the terminal
+ echo "$(git log -1 --pretty="%s" "$1")"
+ left_commits[${#left_commits[@]}]="$1"
+ comm=""
+ while [ -n "$1" -a "$1" != "-" -a "$1" != "|" ]; do
+ comm="${1%-*}"
+ shift
+ done
+ right_commits[${#right_commits[@]}]="$comm"
+ fi
+ done
+ if [ -n "$MISSING" -a ${#left_commits[@]} -eq 0 ]; then
+ echo "No missing commit to apply."
+ elif [ -n "$MISSING" ]; then
+ echo
+ echo
+ echo "In order to show and/or apply all leftmost commits to current branch :"
+ echo " git show --pretty=format:'%C(yellow)commit %H%C(normal)%nAuthor: %an <%ae>%nDate: %aD%n%n%C(green)%C(bold)git cherry-pick -sx %h%n%n%w(72,4,4)%B%N' ${left_commits[@]}"
+ echo
+ echo " git cherry-pick -sx ${left_commits[@]}"
+ echo
+ if [ "${left_commits[*]}" != "${right_commits[*]}" ]; then
+ echo "In order to show and/or apply all rightmost commits to current branch :"
+ echo " git show --pretty=format:'%C(yellow)commit %H%C(normal)%nAuthor: %an <%ae>%nDate: %aD%n%n%C(green)%C(bold)git cherry-pick -sx %h%n%n%w(72,4,4)%B%N' ${right_commits[@]}"
+ echo
+ echo " git cherry-pick -sx ${right_commits[@]}"
+ echo
+ fi
+ fi
+)
diff --git a/scripts/make-releases-json b/scripts/make-releases-json
new file mode 100755
index 0000000..ba05665
--- /dev/null
+++ b/scripts/make-releases-json
@@ -0,0 +1,103 @@
+#!/usr/bin/env bash
+#
+# Scan a branch directory for source tarballs and rebuild the releases.json
+# file for that branch. md5 and sha256 are added if present. The highest
+# numbered version is referenced as the latest release.
+#
+# Usage: $0 [-b branch] [-o outfile] /path/to/download/branch
+#
+
+USAGE="Usage: ${0##*/} [-b branch] [-o outfile] DIR"
+OUTPUT=
+BRANCH=
+DIR=
+
+die() {
+ [ "$#" -eq 0 ] || echo "$*" >&2
+ exit 1
+}
+
+err() {
+ echo "$*" >&2
+}
+
+quit() {
+ [ "$#" -eq 0 -o -n "$QUIET" ] || echo "$*"
+ exit 0
+}
+
+emit_json() {
+ printf '{\n "branch": "%s",\n' ${BRANCH}
+ latest=""
+ for file in $(find "$DIR/src" -name 'haproxy-[0-9]*.gz' -printf "%P\n" |grep -v '[0-9]-patches*' | sort -rV ); do
+ rel="${file##*haproxy-}"
+ rel="${rel%%.tar.*}"
+ if [ -z "$latest" ]; then
+ latest="$rel";
+ printf ' "latest_release": "%s",\n' ${latest}
+ printf ' "releases": {\n'
+ else
+ printf ",\n"
+ fi
+ printf ' "%s": {\n' ${rel}
+ printf ' "file": "%s"' ${file}
+ if [ -s "$DIR/src/$file.md5" ]; then
+ printf ',\n "md5": "%s"' $(awk '{print $1}' "$DIR/src/$file.md5")
+ fi
+ if [ -s "$DIR/src/$file.sha256" ]; then
+ printf ',\n "sha256": "%s"' $(awk '{print $1}' "$DIR/src/$file.sha256")
+ fi
+ printf '\n }'
+ done
+
+ if [ -n "$latest" ]; then
+ printf "\n }" ## "releases"
+ fi
+
+ printf '\n}\n'
+}
+
+
+### main
+
+while [ -n "$1" -a -z "${1##-*}" ]; do
+ case "$1" in
+ -b) BRANCH="$2" ; shift 2 ;;
+ -o) OUTPUT="$2" ; shift 2 ;;
+ -h|--help) quit "$USAGE" ;;
+ *) die "$USAGE" ;;
+ esac
+done
+
+if [ $# -ne 1 ]; then
+ die "$USAGE"
+fi
+
+DIR="$1" ; shift
+if [ -z "$DIR" ]; then
+ die "Missing download directory name."
+fi
+
+if [ ! -d "$DIR/." ]; then
+ die "Download directory doesn't exist : $DIR"
+fi
+
+if [ ! -d "$DIR/src" ]; then
+ die "Download directory must contain 'src' : $DIR"
+fi
+
+if [ -z "$BRANCH" ]; then
+ BRANCH=${DIR##*/}
+ if [ -n "${BRANCH//[0-9.]}" ]; then
+ die "Couldn't determine branch number from dir name: $BRANCH"
+ fi
+fi
+
+# echo "debug: DIR=$DIR BRANCH=$BRANCH"
+if [ -n "$OUTPUT" ]; then
+ emit_json > "$OUTPUT.tmp"
+ mv -f "$OUTPUT.tmp" "$OUTPUT"
+ rm -f "$OUTPUT.tmp"
+else
+ emit_json
+fi
diff --git a/scripts/publish-release b/scripts/publish-release
new file mode 100755
index 0000000..9066d4a
--- /dev/null
+++ b/scripts/publish-release
@@ -0,0 +1,194 @@
+#!/usr/bin/env bash
+# puts the public files online after a release
+# Copyright (c) 2006-2016 Willy Tarreau <w@1wt.eu>
+#
+# In short :
+# - requires git
+# - no restriction to master, uses last tag
+# - copies & compresses files, changelog & docs to the final destination
+# - shows a listing of the final file
+
+USAGE="Usage: ${0##*/} [-a] [-q] [-y] [-b branch] [-n newver] DIR"
+CMD_GZIP="${CMD_GZIP:-gzip -nc9}"
+TARGET_DIR=
+OUTPUT=
+SAYYES=
+BRANCH=
+DEVEL=
+QUIET=
+AUTO=
+ARG0="$0"
+NEW=
+DIR=
+DOC=( )
+
+# need to have group write on emitted files for others to update
+umask 002
+
+die() {
+ [ "$#" -eq 0 ] || echo "$*" >&2
+ exit 1
+}
+
+err() {
+ echo "$*" >&2
+}
+
+quit() {
+ [ "$#" -eq 0 -o -n "$QUIET" ] || echo "$*"
+ exit 0
+}
+
+while [ -n "$1" -a -z "${1##-*}" ]; do
+ case "$1" in
+ -a) AUTO=1 ; shift ;;
+ -q) QUIET=1 ; shift ;;
+ -y) SAYYES=1 ; shift ;;
+ -b) BRANCH="$2" ; shift 2 ;;
+ -n) NEW="$2" ; shift 2 ;;
+ -h|--help) quit "$USAGE" ;;
+ *) die "$USAGE" ;;
+ esac
+done
+
+if [ $# -ne 1 ]; then
+ die "$USAGE"
+fi
+
+DIR="$1" ; shift
+if [ -z "$DIR" ]; then
+ die "Missing target directory name."
+fi
+
+if [ -n "${DIR##/*}" ]; then
+ DIR="$PWD/$DIR"
+fi
+
+if [ ! -d "$DIR/." ]; then
+ die "Target directory doesn't exist : $DIR"
+fi
+
+if ! git rev-parse --verify -q HEAD >/dev/null; then
+ die "Failed to check git HEAD."
+fi
+
+# we want to go to the git top dir
+toplvl=$(git rev-parse --show-toplevel)
+if [ -n "$toplvl" ]; then
+ cd "$toplvl"
+fi
+
+# ensure that a master branch exists here
+if [ -z "$(git rev-parse --verify -q master 2>/dev/null)" ]; then
+ die "Current directory doesn't seem to be a valid git directory (no master branch)."
+fi
+
+if [ "$(git rev-parse --verify -q HEAD)" != "$(git rev-parse --verify -q master)" ]; then
+ die "git HEAD doesn't match master branch."
+fi
+
+if [ "$(git diff HEAD 2>/dev/null |wc -c)" != 0 ]; then
+ err "You appear to have uncommitted local changes, please commit them first :"
+ git status -s -uno >&2
+ die
+fi
+
+if [ -z "$NEW" -o -n "$AUTO" ]; then
+ if [ -z "$NEW" ]; then
+ NEW="$(git describe --tags HEAD --abbrev=0)"
+ NEW="${NEW#v}"
+ if [ -z "$NEW" ]; then
+ die "Fatal: cannot determine new version, please specify it."
+ fi
+ fi
+
+ if [ "$(git describe --tags HEAD)" != "v$NEW" ]; then
+ if [ -n "$AUTO" ]; then
+ quit "Not tagged, nothing to do."
+ fi
+ die "Current version doesn't seem tagged, it reports $(git describe --tags "v$NEW"). Did you release it ?"
+ fi
+fi
+
+if ! git show-ref --tags "v$NEW" >/dev/null; then
+ die "git tag v$NEW doesn't exist, did you create the release ?"
+fi
+
+# determine the product branch from the new release
+if [ -z "$BRANCH" ]; then
+ subvers=${NEW#[0-9]*.[0-9]*[-.]*[0-9].}
+ [ "${subvers}" = "${NEW}" ] && subvers=""
+ major=${NEW%.$subvers}
+ branch_ext=${major#*[0-9].*[0-9]}
+ BRANCH=${major%${branch_ext}}
+fi
+
+TARGET_DIR="$DIR/$BRANCH"
+if [ ! -d "$TARGET_DIR/." ]; then
+ die "Target directory doesn't contain branch $BRANCH. You may have to create it in $DIR."
+fi
+
+if [ -z "${NEW##*-dev*}" ]; then
+ DEVEL="/devel"
+fi
+
+if [ -n "$AUTO" -a -e "$TARGET_DIR/src${DEVEL}/haproxy-$NEW.tar.gz.md5" ]; then
+ quit "Version $NEW Already released."
+fi
+
+if ! mkdir -p "$TARGET_DIR/src$DEVEL" "$TARGET_DIR/doc"; then
+ die "failed to create target directories."
+fi
+
+case "$BRANCH" in
+ 1.3) DOC=( doc/{haproxy-en,haproxy-fr,configuration,architecture}.txt ) ;;
+ 1.4) DOC=( doc/{haproxy-en,haproxy-fr,configuration}.txt ) ;;
+ 1.5) DOC=( doc/{coding-style,configuration,proxy-protocol}.txt ) ;;
+ 1.6) DOC=( doc/{coding-style,intro,management,configuration,proxy-protocol,lua}.txt ) ;;
+ *) DOC=( doc/{coding-style,intro,management,configuration,proxy-protocol,lua,SPOE}.txt ) ;;
+esac
+
+if [ -z "$AUTO" ]; then
+ echo "Ready to produce the following files in $TARGET_DIR/ :"
+ echo " haproxy-$NEW.tar.gz -> src${DEVEL}/"
+ echo " CHANGELOG -> src/CHANGELOG"
+ echo " ${DOC[@]} -> doc/*{,.gz}"
+ echo
+
+ git ls-tree -l --abbrev=12 "v$NEW" -- CHANGELOG "${DOC[@]}"
+
+ if [ -z "$SAYYES" ]; then
+ echo "Press ENTER to continue or Ctrl-C to abort now!"
+ read
+ fi
+fi
+
+echo "Archiving sources for version $NEW ..."
+rm -f "${TARGET_DIR}/src${DEVEL}/haproxy-${NEW}.tar.gz"{,.md5,.sha256}
+if ! git archive --format=tar --prefix="haproxy-${NEW}/" "v$NEW" | \
+ $CMD_GZIP > "${TARGET_DIR}/src${DEVEL}/haproxy-${NEW}.tar.gz"; then
+ die "Failed to produce the tar.gz archive"
+fi
+
+( cd "$TARGET_DIR/src${DEVEL}" ; \
+ md5sum haproxy-$NEW.tar.gz > haproxy-$NEW.tar.gz.md5 ; \
+ sha256sum haproxy-$NEW.tar.gz > haproxy-$NEW.tar.gz.sha256 )
+
+echo "Extracting doc ..."
+git show "v$NEW:CHANGELOG" > "$TARGET_DIR/src/CHANGELOG"
+
+for i in "${DOC[@]}"; do
+ git show "v$NEW:$i" > "$TARGET_DIR/doc/${i#doc/}"
+ $CMD_GZIP < "$TARGET_DIR/doc/${i#doc/}" > "$TARGET_DIR/doc/${i#doc/}.gz"
+done
+
+if [ -x "${ARG0%/*}/make-releases-json" ]; then
+ # regenerate versions
+ "${ARG0%/*}/make-releases-json" -o "$TARGET_DIR/src/releases.json" "$TARGET_DIR"
+fi
+
+echo "Done : ls -l ${TARGET_DIR}"
+( cd "$TARGET_DIR" ;
+ ls -l src/CHANGELOG "src${DEVEL}/haproxy-${NEW}".tar.gz{,.md5,.sha256} $(for i in "${DOC[@]}"; do echo "doc/${i#doc/}"{,.gz}; done)
+)
+echo
diff --git a/scripts/run-regtests.sh b/scripts/run-regtests.sh
new file mode 100755
index 0000000..85f1341
--- /dev/null
+++ b/scripts/run-regtests.sh
@@ -0,0 +1,427 @@
+#!/bin/sh
+
+_help()
+{
+ cat << EOF
+### run-regtests.sh ###
+ Running run-regtests.sh --help shows this information about how to use it
+
+ Run without parameters to run all tests in the current folder (including subfolders)
+ run-regtests.sh
+
+ Provide paths to run tests from (including subfolders):
+ run-regtests.sh ./tests1 ./tests2
+
+ Parameters:
+ --j <NUM>, To run vtest with multiple jobs / threads for a faster overall result
+ run-regtests.sh ./fasttest --j 16
+
+ --v, to run verbose
+ run-regtests.sh --v, disables the default vtest 'quiet' parameter
+
+ --debug to show test logs on standard output (implies --v)
+ run-regtests.sh --debug
+
+ --keep-logs to keep all log directories (by default kept if test fails)
+ run-regtests.sh --keep-logs
+
+ --vtestparams <ARGS>, passes custom ARGS to vtest
+ run-regtests.sh --vtestparams "-n 10"
+
+ --type <reg tests types> filter the types of the tests to be run, depending on
+ the commented REGTESTS_TYPE variable value in each VTC file.
+ The value of REGTESTS_TYPE supported are: default, slow, bug, broken, devel
+ and experimental. When not specified, it is set to 'default' as default value.
+
+ run-regtest.sh --type slow,default
+
+ --clean to cleanup previous reg-tests log directories and exit
+ run-regtests.sh --clean
+
+ Including text below into a .vtc file will check for its requirements
+ related to haproxy's target and compilation options
+ # Below targets are not capable of completing this test successfully
+ #EXCLUDE_TARGET=freebsd, abns sockets are not available on freebsd
+
+ #EXCLUDE_TARGETS=dos,freebsd,windows
+
+ # Below option is required to complete this test successfully
+ #REQUIRE_OPTION=OPENSSL, this test needs OPENSSL compiled in.
+ #REQUIRE_OPTIONS=ZLIB|SLZ,OPENSSL,LUA
+
+ #REQUIRE_SERVICE=prometheus-exporter
+ #REQUIRE_SERVICES=prometheus-exporter,foo
+
+ # To define a range of versions that a test can run with:
+ #REQUIRE_VERSION=0.0
+ #REQUIRE_VERSION_BELOW=99.9
+
+ Configure environment variables to set the haproxy and vtest binaries to use
+ setenv HAPROXY_PROGRAM /usr/local/sbin/haproxy
+ setenv VTEST_PROGRAM /usr/local/bin/vtest
+ setenv HAPROXY_ARGS "-dM -de -m 50"
+ or
+ export HAPROXY_PROGRAM=/usr/local/sbin/haproxy
+ export VTEST_PROGRAM=/usr/local/bin/vtest
+ export HAPROXY_ARGS="-dM -de -m 50"
+EOF
+ exit 0
+}
+
+add_range_to_test_list()
+{
+ level0="*.vtc"
+ level1="h*.vtc"
+ level2="s*.vtc"
+ level3="l*.vtc"
+ level4="b*.vtc"
+ level5="k*.vtc"
+ level6="e*.vtc"
+
+ new_range=$(echo $1 | tr '-' ' ')
+ non_digit=$(echo $new_range | grep '[^0-9 ]')
+ if [ -n "$non_digit" ] ; then
+ return
+ fi
+ if [ "$new_range" = "$1" ] ; then
+ if [ $1 -gt 6 ] ; then
+ return
+ fi
+ eval echo '$'level$1
+ return
+ fi
+ if [ -z "$new_range" ] ; then
+ return
+ fi
+ list=
+ for l in $(seq $new_range) ; do
+ if [ -n "l" ] ; then
+ if [ -z "$list" ] ; then
+ list="$(eval echo '$'level${l})"
+ else
+ list="$list $(eval echo '$'level${l})"
+ fi
+ fi
+ done
+
+ echo $list
+}
+
+_startswith() {
+ _str="$1"
+ _sub="$2"
+ echo "$_str" | grep "^$_sub" >/dev/null 2>&1
+}
+
+_findtests() {
+ set -f
+
+ REGTESTS_TYPES="${REGTESTS_TYPES:-default,bug,devel,slow}"
+ any_test=$(echo $REGTESTS_TYPES | grep -cw "any")
+ for i in $( find "$1" -name *.vtc ); do
+ skiptest=
+ OLDIFS="$IFS"; IFS="$LINEFEED"
+ set -- $(grep '^#[0-9A-Z_]*=' "$i")
+ IFS="$OLDIFS"
+
+ require_version=""; require_version_below=""; require_options="";
+ require_services=""; exclude_targets=""; regtest_type=""
+ requiredoption=""; requiredservice=""; excludedtarget="";
+
+ while [ $# -gt 0 ]; do
+ v="$1"; v="${v#*=}"
+ case "$1" in
+ "#REQUIRE_VERSION="*) require_version="$v" ;;
+ "#REQUIRE_VERSION_BELOW="*) require_version_below="$v" ;;
+ "#REQUIRE_OPTIONS="*) require_options="$v" ;;
+ "#REQUIRE_SERVICES="*) require_services="$v" ;;
+ "#EXCLUDE_TARGETS="*) exclude_targets="$v" ;;
+ "#REGTEST_TYPE="*) regtest_type="$v" ;;
+ "#REQUIRE_OPTION="*) requiredoption="${v%,*}" ;;
+ "#REQUIRE_SERVICE="*) required_service="${v%,*}" ;;
+ "#EXCLUDE_TARGET="*) excludedtarget="${v%,*}" ;;
+ # Note: any new variable declared here must be initialized above.
+ esac
+ shift
+ done
+
+ if [ $any_test -ne 1 ] ; then
+ if [ -z $regtest_type ] ; then
+ regtest_type=default
+ fi
+ if ! $(echo $REGTESTS_TYPES | grep -wq $regtest_type) ; then
+ echo " Skip $i because its type '"$regtest_type"' is excluded"
+ skiptest=1
+ fi
+ fi
+
+ if [ -n "$requiredoption" ]; then
+ require_options="$require_options,$requiredoption"
+ fi
+
+ if [ -n "$requiredservice" ]; then
+ require_services="$require_services,$requiredservice"
+ fi
+
+ if [ -n "$excludedtarget" ]; then
+ exclude_targets="$exclude_targets,$excludedtarget"
+ fi
+
+ IFS=","; set -- $require_options; IFS=$OLDIFS; require_options="$*"
+ IFS=","; set -- $require_services; IFS=$OLDIFS; require_services="$*"
+ IFS=","; set -- $exclude_targets; IFS=$OLDIFS; exclude_targets="$*"
+
+ if [ -n "$require_version" ]; then
+ if [ $(_version "$HAPROXY_VERSION") -lt $(_version "$require_version") ]; then
+ echo " Skip $i because option haproxy is version: $HAPROXY_VERSION"
+ echo " REASON: this test requires at least version: $require_version"
+ skiptest=1
+ fi
+ fi
+ if [ -n "$require_version_below" ]; then
+ if [ $(_version "$HAPROXY_VERSION") -ge $(_version "$require_version_below") ]; then
+ echo " Skip $i because option haproxy is version: $HAPROXY_VERSION"
+ echo " REASON: this test requires a version below: $require_version_below"
+ skiptest=1
+ fi
+ fi
+
+ for excludedtarget in $exclude_targets; do
+ if [ "$excludedtarget" = "$TARGET" ]; then
+ echo " Skip $i because haproxy is compiled for the excluded target $TARGET"
+ skiptest=1
+ fi
+ done
+
+ for requiredoption in $require_options; do
+ IFS="|"; set -- $requiredoption; IFS=$OLDIFS; alternatives="$*"
+ found=
+ for alt in $alternatives; do
+ if [ -z "${FEATURES_PATTERN##* +$alt *}" ]; then
+ found=1;
+ fi
+ done
+ if [ -z $found ]; then
+ echo " Skip $i because haproxy is not compiled with the required option $requiredoption"
+ skiptest=1
+ fi
+ done
+
+ for requiredservice in $require_services; do
+ IFS="|"; set -- $requiredservice; IFS=$OLDIFS; alternatives="$*"
+ found=
+ for alt in $alternatives; do
+ if [ -z "${SERVICES_PATTERN##* $alt *}" ]; then
+ found=1;
+ fi
+ done
+ if [ -z $found ]; then
+ echo " Skip $i because haproxy is not compiled with the required service $requiredservice"
+ skiptest=1
+ fi
+ done
+
+ if [ -z $skiptest ]; then
+ echo " Add test: $i"
+ testlist="$testlist $i"
+ fi
+ done
+}
+
+_cleanup()
+{
+ DIRS=$(find "${TESTDIR}" -maxdepth 1 -type d -name "haregtests-*" -exec basename {} \; 2>/dev/null)
+ if [ -z "${DIRS}" ]; then
+ echo "No reg-tests log directory found"
+ else
+ echo "Cleanup following reg-tests log directories:"
+ for d in ${DIRS}; do
+ echo " o ${TESTDIR}/$d"
+ done
+ read -p "Continue (y/n)?" reply
+ case "$reply" in
+ y|Y)
+ for d in ${DIRS}; do
+ rm -r "${TESTDIR}/$d"
+ done
+ echo "done"
+ exit 0
+ ;;
+ *)
+ echo "aborted"
+ exit 1
+ ;;
+ esac
+ fi
+}
+
+
+_process() {
+ while [ ${#} -gt 0 ]; do
+ if _startswith "$1" "-"; then
+ case "${1}" in
+ --j)
+ jobcount="$2"
+ shift
+ ;;
+ --vtestparams)
+ vtestparams="$2"
+ shift
+ ;;
+ --v)
+ verbose=""
+ ;;
+ --debug)
+ verbose=""
+ debug="-v"
+ ;;
+ --keep-logs)
+ keep_logs="-L"
+ ;;
+ --type)
+ REGTESTS_TYPES="$2"
+ shift
+ ;;
+ --clean)
+ _cleanup
+ exit 0
+ ;;
+ --help)
+ _help
+ ;;
+ *)
+ echo "Unknown parameter : $1"
+ exit 1
+ ;;
+ esac
+ else
+ REGTESTS="${REGTESTS} $1"
+ fi
+ shift 1
+ done
+}
+
+# compute a version from up to 4 sub-version components, each multiplied
+# by a power of 1000, and padded left with 0, 1 or 2 zeroes.
+_version() {
+ OLDIFS="$IFS"; IFS="."; set -- $*; IFS="$OLDIFS"
+ set -- ${1%%[!0-9]*} 000${2%%[!0-9]*} 000${3%%[!0-9]*} 000${4%%[!0-9]*}
+ prf2=${2%???}; prf3=${3%???}; prf4=${4%???}
+ echo ${1}${2#$prf2}${3#$prf3}${4#$prf4}
+}
+
+
+HAPROXY_PROGRAM="${HAPROXY_PROGRAM:-${PWD}/haproxy}"
+HAPROXY_ARGS="${HAPROXY_ARGS--dM}"
+VTEST_PROGRAM="${VTEST_PROGRAM:-vtest}"
+TESTDIR="${TMPDIR:-/tmp}"
+REGTESTS=""
+LINEFEED="
+"
+
+jobcount=""
+verbose="-q"
+debug=""
+keep_logs="-l"
+testlist=""
+
+_process "$@";
+
+echo ""
+echo "########################## Preparing to run tests ##########################"
+
+preparefailed=
+if ! [ -x "$(command -v $HAPROXY_PROGRAM)" ]; then
+ echo "haproxy not found in path, please specify HAPROXY_PROGRAM environment variable"
+ preparefailed=1
+fi
+if ! [ -x "$(command -v $VTEST_PROGRAM)" ]; then
+ echo "vtest not found in path, please specify VTEST_PROGRAM environment variable"
+ preparefailed=1
+fi
+if [ $preparefailed ]; then
+ exit 1
+fi
+
+{ read HAPROXY_VERSION; read TARGET; read FEATURES; read SERVICES; } << EOF
+$($HAPROXY_PROGRAM $HAPROXY_ARGS -vv | grep 'HA-\?Proxy version\|TARGET.*=\|^Feature\|^Available services' | sed 's/.* [:=] //')
+EOF
+
+HAPROXY_VERSION=$(echo $HAPROXY_VERSION | cut -d " " -f 3)
+echo "Testing with haproxy version: $HAPROXY_VERSION"
+
+PROJECT_VERSION=$(${MAKE:-make} version 2>&1 | grep '^VERSION:\|^SUBVERS:'|cut -f2 -d' '|tr -d '\012')
+if [ -z "${PROJECT_VERSION}${MAKE}" ]; then
+ # try again with gmake, just in case
+ PROJECT_VERSION=$(gmake version 2>&1 | grep '^VERSION:\|^SUBVERS:'|cut -f2 -d' '|tr -d '\012')
+fi
+
+FEATURES_PATTERN=" $FEATURES "
+SERVICES_PATTERN=" $SERVICES "
+
+TESTRUNDATETIME="$(date '+%Y-%m-%d_%H-%M-%S')"
+
+mkdir -p "$TESTDIR" || exit 1
+TESTDIR=$(mktemp -d "$TESTDIR/haregtests-$TESTRUNDATETIME.XXXXXX") || exit 1
+
+export TMPDIR="$TESTDIR"
+export HAPROXY_PROGRAM="$HAPROXY_PROGRAM"
+if [ -n "$HAPROXY_ARGS" ]; then
+ export HAPROXY_ARGS
+fi
+
+echo "Target : $TARGET"
+echo "Options : $FEATURES"
+echo "Services : $SERVICES"
+
+echo "########################## Gathering tests to run ##########################"
+
+if [ -z "$REGTESTS" ]; then
+ _findtests reg-tests/
+else
+ for t in $REGTESTS; do
+ _findtests $t
+ done
+fi
+
+echo "########################## Starting vtest ##########################"
+echo "Testing with haproxy version: $HAPROXY_VERSION"
+
+if [ -n "$PROJECT_VERSION" -a "$PROJECT_VERSION" != "$HAPROXY_VERSION" ]; then
+ echo "Warning: version does not match the current tree ($PROJECT_VERSION)"
+fi
+
+_vtresult=0
+if [ -n "$testlist" ]; then
+ if [ -n "$jobcount" ]; then
+ jobcount="-j $jobcount"
+ fi
+ cmd="$VTEST_PROGRAM -b $((2<<20)) -k -t 10 $keep_logs $verbose $debug $jobcount $vtestparams $testlist"
+ eval $cmd
+ _vtresult=$?
+else
+ echo "No tests found that meet the required criteria"
+fi
+
+
+if [ $_vtresult -eq 0 ]; then
+ # all tests were successful, removing tempdir (the last part.)
+ # ignore errors is the directory is not empty or if it does not exist
+ rmdir "$TESTDIR" 2>/dev/null
+fi
+
+if [ -d "${TESTDIR}" ]; then
+ echo "########################## Gathering results ##########################"
+ export TESTDIR
+ find "$TESTDIR" -type d -name "vtc.*" -exec sh -c 'for i; do
+ if [ ! -e "$i/LOG" ] ; then continue; fi
+
+ cat <<- EOF | tee -a "$TESTDIR/failedtests.log"
+$(echo "###### $(cat "$i/INFO") ######")
+$(echo "## test results in: \"$i\"")
+$(grep -E -- "^(----|\* diag)" "$i/LOG")
+EOF
+ done' sh {} +
+fi
+
+exit $_vtresult
diff --git a/src/acl.c b/src/acl.c
new file mode 100644
index 0000000..8ef2b7d
--- /dev/null
+++ b/src/acl.c
@@ -0,0 +1,1377 @@
+/*
+ * ACL management functions.
+ *
+ * Copyright 2000-2013 Willy Tarreau <w@1wt.eu>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <ctype.h>
+#include <stdio.h>
+#include <string.h>
+
+#include <import/ebsttree.h>
+
+#include <haproxy/acl.h>
+#include <haproxy/api.h>
+#include <haproxy/arg.h>
+#include <haproxy/auth.h>
+#include <haproxy/errors.h>
+#include <haproxy/global.h>
+#include <haproxy/list.h>
+#include <haproxy/pattern.h>
+#include <haproxy/proxy-t.h>
+#include <haproxy/sample.h>
+#include <haproxy/stick_table.h>
+#include <haproxy/tools.h>
+#include <haproxy/cfgparse.h>
+
+/* List head of all known ACL keywords */
+static struct acl_kw_list acl_keywords = {
+ .list = LIST_HEAD_INIT(acl_keywords.list)
+};
+
+/* input values are 0 or 3, output is the same */
+static inline enum acl_test_res pat2acl(struct pattern *pat)
+{
+ if (pat)
+ return ACL_TEST_PASS;
+ else
+ return ACL_TEST_FAIL;
+}
+
+/*
+ * Registers the ACL keyword list <kwl> as a list of valid keywords for next
+ * parsing sessions.
+ */
+void acl_register_keywords(struct acl_kw_list *kwl)
+{
+ LIST_APPEND(&acl_keywords.list, &kwl->list);
+}
+
+/*
+ * Unregisters the ACL keyword list <kwl> from the list of valid keywords.
+ */
+void acl_unregister_keywords(struct acl_kw_list *kwl)
+{
+ LIST_DELETE(&kwl->list);
+ LIST_INIT(&kwl->list);
+}
+
+/* Return a pointer to the ACL <name> within the list starting at <head>, or
+ * NULL if not found.
+ */
+struct acl *find_acl_by_name(const char *name, struct list *head)
+{
+ struct acl *acl;
+ list_for_each_entry(acl, head, list) {
+ if (strcmp(acl->name, name) == 0)
+ return acl;
+ }
+ return NULL;
+}
+
+/* Return a pointer to the ACL keyword <kw>, or NULL if not found. Note that if
+ * <kw> contains an opening parenthesis or a comma, only the left part of it is
+ * checked.
+ */
+struct acl_keyword *find_acl_kw(const char *kw)
+{
+ int index;
+ const char *kwend;
+ struct acl_kw_list *kwl;
+
+ kwend = kw;
+ while (is_idchar(*kwend))
+ kwend++;
+
+ list_for_each_entry(kwl, &acl_keywords.list, list) {
+ for (index = 0; kwl->kw[index].kw != NULL; index++) {
+ if ((strncmp(kwl->kw[index].kw, kw, kwend - kw) == 0) &&
+ kwl->kw[index].kw[kwend-kw] == 0)
+ return &kwl->kw[index];
+ }
+ }
+ return NULL;
+}
+
+static struct acl_expr *prune_acl_expr(struct acl_expr *expr)
+{
+ struct arg *arg;
+
+ pattern_prune(&expr->pat);
+
+ for (arg = expr->smp->arg_p; arg; arg++) {
+ if (arg->type == ARGT_STOP)
+ break;
+ if (arg->type == ARGT_STR || arg->unresolved) {
+ chunk_destroy(&arg->data.str);
+ arg->unresolved = 0;
+ }
+ }
+
+ release_sample_expr(expr->smp);
+
+ return expr;
+}
+
+/* Parse an ACL expression starting at <args>[0], and return it. If <err> is
+ * not NULL, it will be filled with a pointer to an error message in case of
+ * error. This pointer must be freeable or NULL. <al> is an arg_list serving
+ * as a list head to report missing dependencies. It may be NULL if such
+ * dependencies are not allowed.
+ *
+ * Right now, the only accepted syntax is :
+ * <subject> [<value>...]
+ */
+struct acl_expr *parse_acl_expr(const char **args, char **err, struct arg_list *al,
+ const char *file, int line)
+{
+ __label__ out_return, out_free_expr;
+ struct acl_expr *expr;
+ struct acl_keyword *aclkw;
+ int refflags, patflags;
+ const char *arg;
+ struct sample_expr *smp = NULL;
+ int idx = 0;
+ char *ckw = NULL;
+ const char *endt;
+ int cur_type;
+ int nbargs;
+ int operator = STD_OP_EQ;
+ int op;
+ int contain_colon, have_dot;
+ const char *dot;
+ signed long long value, minor;
+ /* The following buffer contain two numbers, a ':' separator and the final \0. */
+ char buffer[NB_LLMAX_STR + 1 + NB_LLMAX_STR + 1];
+ int is_loaded;
+ int unique_id;
+ char *error;
+ struct pat_ref *ref;
+ struct pattern_expr *pattern_expr;
+ int load_as_map = 0;
+ int acl_conv_found = 0;
+
+ /* First, we look for an ACL keyword. And if we don't find one, then
+ * we look for a sample fetch expression starting with a sample fetch
+ * keyword.
+ */
+
+ if (al) {
+ al->ctx = ARGC_ACL; // to report errors while resolving args late
+ al->kw = *args;
+ al->conv = NULL;
+ }
+
+ aclkw = find_acl_kw(args[0]);
+ if (aclkw) {
+ /* OK we have a real ACL keyword */
+
+ /* build new sample expression for this ACL */
+ smp = calloc(1, sizeof(*smp));
+ if (!smp) {
+ memprintf(err, "out of memory when parsing ACL expression");
+ goto out_return;
+ }
+ LIST_INIT(&(smp->conv_exprs));
+ smp->fetch = aclkw->smp;
+ smp->arg_p = empty_arg_list;
+
+ /* look for the beginning of the subject arguments */
+ for (arg = args[0]; is_idchar(*arg); arg++)
+ ;
+
+ /* At this point, we have :
+ * - args[0] : beginning of the keyword
+ * - arg : end of the keyword, first character not part of keyword
+ */
+ nbargs = make_arg_list(arg, -1, smp->fetch->arg_mask, &smp->arg_p,
+ err, &endt, NULL, al);
+ if (nbargs < 0) {
+ /* note that make_arg_list will have set <err> here */
+ memprintf(err, "ACL keyword '%s' : %s", aclkw->kw, *err);
+ goto out_free_smp;
+ }
+
+ if (!smp->arg_p) {
+ smp->arg_p = empty_arg_list;
+ }
+ else if (smp->fetch->val_args && !smp->fetch->val_args(smp->arg_p, err)) {
+ /* invalid keyword argument, error must have been
+ * set by val_args().
+ */
+ memprintf(err, "in argument to '%s', %s", aclkw->kw, *err);
+ goto out_free_smp;
+ }
+
+ /* look for the beginning of the converters list. Those directly attached
+ * to the ACL keyword are found just after the comma.
+ * If we find any converter, then we don't use the ACL keyword's match
+ * anymore but the one related to the converter's output type.
+ */
+ if (!sample_parse_expr_cnv((char **)args, NULL, NULL, err, al, file, line, smp, endt)) {
+ if (err)
+ memprintf(err, "ACL keyword '%s' : %s", aclkw->kw, *err);
+ goto out_free_smp;
+ }
+ acl_conv_found = !LIST_ISEMPTY(&smp->conv_exprs);
+ }
+ else {
+ /* This is not an ACL keyword, so we hope this is a sample fetch
+ * keyword that we're going to transparently use as an ACL. If
+ * so, we retrieve a completely parsed expression with args and
+ * convs already done.
+ */
+ smp = sample_parse_expr((char **)args, &idx, file, line, err, al, NULL);
+ if (!smp) {
+ memprintf(err, "%s in ACL expression '%s'", *err, *args);
+ goto out_return;
+ }
+ }
+
+ /* get last effective output type for smp */
+ cur_type = smp_expr_output_type(smp);
+
+ expr = calloc(1, sizeof(*expr));
+ if (!expr) {
+ memprintf(err, "out of memory when parsing ACL expression");
+ goto out_free_smp;
+ }
+
+ pattern_init_head(&expr->pat);
+
+ expr->pat.expect_type = cur_type;
+ expr->smp = smp;
+ expr->kw = smp->fetch->kw;
+ smp = NULL; /* don't free it anymore */
+
+ if (aclkw && !acl_conv_found) {
+ expr->kw = aclkw->kw;
+ expr->pat.parse = aclkw->parse ? aclkw->parse : pat_parse_fcts[aclkw->match_type];
+ expr->pat.index = aclkw->index ? aclkw->index : pat_index_fcts[aclkw->match_type];
+ expr->pat.match = aclkw->match ? aclkw->match : pat_match_fcts[aclkw->match_type];
+ expr->pat.prune = aclkw->prune ? aclkw->prune : pat_prune_fcts[aclkw->match_type];
+ }
+
+ if (!expr->pat.parse) {
+ /* Parse/index/match functions depend on the expression type,
+ * so we have to map them now. Some types can be automatically
+ * converted.
+ */
+ switch (cur_type) {
+ case SMP_T_BOOL:
+ expr->pat.parse = pat_parse_fcts[PAT_MATCH_BOOL];
+ expr->pat.index = pat_index_fcts[PAT_MATCH_BOOL];
+ expr->pat.match = pat_match_fcts[PAT_MATCH_BOOL];
+ expr->pat.prune = pat_prune_fcts[PAT_MATCH_BOOL];
+ expr->pat.expect_type = pat_match_types[PAT_MATCH_BOOL];
+ break;
+ case SMP_T_SINT:
+ expr->pat.parse = pat_parse_fcts[PAT_MATCH_INT];
+ expr->pat.index = pat_index_fcts[PAT_MATCH_INT];
+ expr->pat.match = pat_match_fcts[PAT_MATCH_INT];
+ expr->pat.prune = pat_prune_fcts[PAT_MATCH_INT];
+ expr->pat.expect_type = pat_match_types[PAT_MATCH_INT];
+ break;
+ case SMP_T_ADDR:
+ case SMP_T_IPV4:
+ case SMP_T_IPV6:
+ expr->pat.parse = pat_parse_fcts[PAT_MATCH_IP];
+ expr->pat.index = pat_index_fcts[PAT_MATCH_IP];
+ expr->pat.match = pat_match_fcts[PAT_MATCH_IP];
+ expr->pat.prune = pat_prune_fcts[PAT_MATCH_IP];
+ expr->pat.expect_type = pat_match_types[PAT_MATCH_IP];
+ break;
+ case SMP_T_STR:
+ expr->pat.parse = pat_parse_fcts[PAT_MATCH_STR];
+ expr->pat.index = pat_index_fcts[PAT_MATCH_STR];
+ expr->pat.match = pat_match_fcts[PAT_MATCH_STR];
+ expr->pat.prune = pat_prune_fcts[PAT_MATCH_STR];
+ expr->pat.expect_type = pat_match_types[PAT_MATCH_STR];
+ break;
+ }
+ }
+
+ /* Additional check to protect against common mistakes */
+ if (expr->pat.parse && cur_type != SMP_T_BOOL && !*args[1]) {
+ ha_warning("parsing acl keyword '%s' :\n"
+ " no pattern to match against were provided, so this ACL will never match.\n"
+ " If this is what you intended, please add '--' to get rid of this warning.\n"
+ " If you intended to match only for existence, please use '-m found'.\n"
+ " If you wanted to force an int to match as a bool, please use '-m bool'.\n"
+ "\n",
+ args[0]);
+ }
+
+ args++;
+
+ /* check for options before patterns. Supported options are :
+ * -i : ignore case for all patterns by default
+ * -f : read patterns from those files
+ * -m : force matching method (must be used before -f)
+ * -M : load the file as map file
+ * -u : force the unique id of the acl
+ * -- : everything after this is not an option
+ */
+ refflags = PAT_REF_ACL;
+ patflags = 0;
+ is_loaded = 0;
+ unique_id = -1;
+ while (**args == '-') {
+ if (strcmp(*args, "-i") == 0)
+ patflags |= PAT_MF_IGNORE_CASE;
+ else if (strcmp(*args, "-n") == 0)
+ patflags |= PAT_MF_NO_DNS;
+ else if (strcmp(*args, "-u") == 0) {
+ unique_id = strtol(args[1], &error, 10);
+ if (*error != '\0') {
+ memprintf(err, "the argument of -u must be an integer");
+ goto out_free_expr;
+ }
+
+ /* Check if this id is really unique. */
+ if (pat_ref_lookupid(unique_id)) {
+ memprintf(err, "the id is already used");
+ goto out_free_expr;
+ }
+
+ args++;
+ }
+ else if (strcmp(*args, "-f") == 0) {
+ if (!expr->pat.parse) {
+ memprintf(err, "matching method must be specified first (using '-m') when using a sample fetch of this type ('%s')", expr->kw);
+ goto out_free_expr;
+ }
+
+ if (!pattern_read_from_file(&expr->pat, refflags, args[1], patflags, load_as_map, err, file, line))
+ goto out_free_expr;
+ is_loaded = 1;
+ args++;
+ }
+ else if (strcmp(*args, "-m") == 0) {
+ int idx;
+
+ if (is_loaded) {
+ memprintf(err, "'-m' must only be specified before patterns and files in parsing ACL expression");
+ goto out_free_expr;
+ }
+
+ idx = pat_find_match_name(args[1]);
+ if (idx < 0) {
+ memprintf(err, "unknown matching method '%s' when parsing ACL expression", args[1]);
+ goto out_free_expr;
+ }
+
+ /* Note: -m found is always valid, bool/int are compatible, str/bin/reg/len are compatible */
+ if (idx != PAT_MATCH_FOUND && !sample_casts[cur_type][pat_match_types[idx]]) {
+ memprintf(err, "matching method '%s' cannot be used with fetch keyword '%s'", args[1], expr->kw);
+ goto out_free_expr;
+ }
+ expr->pat.parse = pat_parse_fcts[idx];
+ expr->pat.index = pat_index_fcts[idx];
+ expr->pat.match = pat_match_fcts[idx];
+ expr->pat.prune = pat_prune_fcts[idx];
+ expr->pat.expect_type = pat_match_types[idx];
+ args++;
+ }
+ else if (strcmp(*args, "-M") == 0) {
+ refflags |= PAT_REF_MAP;
+ load_as_map = 1;
+ }
+ else if (strcmp(*args, "--") == 0) {
+ args++;
+ break;
+ }
+ else {
+ memprintf(err, "'%s' is not a valid ACL option. Please use '--' before any pattern beginning with a '-'", args[0]);
+ goto out_free_expr;
+ break;
+ }
+ args++;
+ }
+
+ if (!expr->pat.parse) {
+ memprintf(err, "matching method must be specified first (using '-m') when using a sample fetch of this type ('%s')", expr->kw);
+ goto out_free_expr;
+ }
+
+ /* Create displayed reference */
+ snprintf(trash.area, trash.size, "acl '%s' file '%s' line %d",
+ expr->kw, file, line);
+ trash.area[trash.size - 1] = '\0';
+
+ /* Create new pattern reference. */
+ ref = pat_ref_newid(unique_id, trash.area, PAT_REF_ACL);
+ if (!ref) {
+ memprintf(err, "memory error");
+ goto out_free_expr;
+ }
+
+ /* Create new pattern expression associated to this reference. */
+ pattern_expr = pattern_new_expr(&expr->pat, ref, patflags, err, NULL);
+ if (!pattern_expr)
+ goto out_free_expr;
+
+ /* now parse all patterns */
+ while (**args) {
+ arg = *args;
+
+ /* Compatibility layer. Each pattern can parse only one string per pattern,
+ * but the pat_parser_int() and pat_parse_dotted_ver() parsers were need
+ * optionally two operators. The first operator is the match method: eq,
+ * le, lt, ge and gt. pat_parse_int() and pat_parse_dotted_ver() functions
+ * can have a compatibility syntax based on ranges:
+ *
+ * pat_parse_int():
+ *
+ * "eq x" -> "x" or "x:x"
+ * "le x" -> ":x"
+ * "lt x" -> ":y" (with y = x - 1)
+ * "ge x" -> "x:"
+ * "gt x" -> "y:" (with y = x + 1)
+ *
+ * pat_parse_dotted_ver():
+ *
+ * "eq x.y" -> "x.y" or "x.y:x.y"
+ * "le x.y" -> ":x.y"
+ * "lt x.y" -> ":w.z" (with w.z = x.y - 1)
+ * "ge x.y" -> "x.y:"
+ * "gt x.y" -> "w.z:" (with w.z = x.y + 1)
+ *
+ * If y is not present, assume that is "0".
+ *
+ * The syntax eq, le, lt, ge and gt are proper to the acl syntax. The
+ * following block of code detect the operator, and rewrite each value
+ * in parsable string.
+ */
+ if (expr->pat.parse == pat_parse_int ||
+ expr->pat.parse == pat_parse_dotted_ver) {
+ /* Check for operator. If the argument is operator, memorise it and
+ * continue to the next argument.
+ */
+ op = get_std_op(arg);
+ if (op != -1) {
+ operator = op;
+ args++;
+ continue;
+ }
+
+ /* Check if the pattern contain ':' or '-' character. */
+ contain_colon = (strchr(arg, ':') || strchr(arg, '-'));
+
+ /* If the pattern contain ':' or '-' character, give it to the parser as is.
+ * If no contain ':' and operator is STD_OP_EQ, give it to the parser as is.
+ * In other case, try to convert the value according with the operator.
+ */
+ if (!contain_colon && operator != STD_OP_EQ) {
+ /* Search '.' separator. */
+ dot = strchr(arg, '.');
+ if (!dot) {
+ have_dot = 0;
+ minor = 0;
+ dot = arg + strlen(arg);
+ }
+ else
+ have_dot = 1;
+
+ /* convert the integer minor part for the pat_parse_dotted_ver() function. */
+ if (expr->pat.parse == pat_parse_dotted_ver && have_dot) {
+ if (strl2llrc(dot+1, strlen(dot+1), &minor) != 0) {
+ memprintf(err, "'%s' is neither a number nor a supported operator", arg);
+ goto out_free_expr;
+ }
+ if (minor >= 65536) {
+ memprintf(err, "'%s' contains too large a minor value", arg);
+ goto out_free_expr;
+ }
+ }
+
+ /* convert the integer value for the pat_parse_int() function, and the
+ * integer major part for the pat_parse_dotted_ver() function.
+ */
+ if (strl2llrc(arg, dot - arg, &value) != 0) {
+ memprintf(err, "'%s' is neither a number nor a supported operator", arg);
+ goto out_free_expr;
+ }
+ if (expr->pat.parse == pat_parse_dotted_ver) {
+ if (value >= 65536) {
+ memprintf(err, "'%s' contains too large a major value", arg);
+ goto out_free_expr;
+ }
+ value = (value << 16) | (minor & 0xffff);
+ }
+
+ switch (operator) {
+
+ case STD_OP_EQ: /* this case is not possible. */
+ memprintf(err, "internal error");
+ goto out_free_expr;
+
+ case STD_OP_GT:
+ value++; /* gt = ge + 1 */
+ __fallthrough;
+
+ case STD_OP_GE:
+ if (expr->pat.parse == pat_parse_int)
+ snprintf(buffer, NB_LLMAX_STR+NB_LLMAX_STR+2, "%lld:", value);
+ else
+ snprintf(buffer, NB_LLMAX_STR+NB_LLMAX_STR+2, "%lld.%lld:",
+ value >> 16, value & 0xffff);
+ arg = buffer;
+ break;
+
+ case STD_OP_LT:
+ value--; /* lt = le - 1 */
+ __fallthrough;
+
+ case STD_OP_LE:
+ if (expr->pat.parse == pat_parse_int)
+ snprintf(buffer, NB_LLMAX_STR+NB_LLMAX_STR+2, ":%lld", value);
+ else
+ snprintf(buffer, NB_LLMAX_STR+NB_LLMAX_STR+2, ":%lld.%lld",
+ value >> 16, value & 0xffff);
+ arg = buffer;
+ break;
+ }
+ }
+ }
+
+ /* Add sample to the reference, and try to compile it fior each pattern
+ * using this value.
+ */
+ if (!pat_ref_add(ref, arg, NULL, err))
+ goto out_free_expr;
+ args++;
+ }
+
+ return expr;
+
+ out_free_expr:
+ prune_acl_expr(expr);
+ free(expr);
+ out_free_smp:
+ free(ckw);
+ free(smp);
+ out_return:
+ return NULL;
+}
+
+/* Purge everything in the acl <acl>, then return <acl>. */
+struct acl *prune_acl(struct acl *acl) {
+
+ struct acl_expr *expr, *exprb;
+
+ free(acl->name);
+
+ list_for_each_entry_safe(expr, exprb, &acl->expr, list) {
+ LIST_DELETE(&expr->list);
+ prune_acl_expr(expr);
+ free(expr);
+ }
+
+ return acl;
+}
+
+/* Walk the ACL tree, following nested acl() sample fetches, for no more than
+ * max_recurse evaluations. Returns -1 if a recursive loop is detected, 0 if
+ * the max_recurse was reached, otherwise the number of max_recurse left.
+ */
+static int parse_acl_recurse(struct acl *acl, struct acl_expr *expr, int max_recurse)
+{
+ struct acl_term *term;
+ struct acl_sample *sample;
+
+ if (strcmp(expr->smp->fetch->kw, "acl") != 0)
+ return max_recurse;
+
+ if (--max_recurse <= 0)
+ return 0;
+
+ sample = (struct acl_sample *)expr->smp->arg_p->data.ptr;
+ list_for_each_entry(term, &sample->suite.terms, list) {
+ if (term->acl == acl)
+ return -1;
+ list_for_each_entry(expr, &term->acl->expr, list) {
+ max_recurse = parse_acl_recurse(acl, expr, max_recurse);
+ if (max_recurse <= 0)
+ return max_recurse;
+ }
+ }
+
+ return max_recurse;
+}
+
+/* Parse an ACL with the name starting at <args>[0], and with a list of already
+ * known ACLs in <acl>. If the ACL was not in the list, it will be added.
+ * A pointer to that ACL is returned. If the ACL has an empty name, then it's
+ * an anonymous one and it won't be merged with any other one. If <err> is not
+ * NULL, it will be filled with an appropriate error. This pointer must be
+ * freeable or NULL. <al> is the arg_list serving as a head for unresolved
+ * dependencies. It may be NULL if such dependencies are not allowed.
+ *
+ * args syntax: <aclname> <acl_expr>
+ */
+struct acl *parse_acl(const char **args, struct list *known_acl, char **err, struct arg_list *al,
+ const char *file, int line)
+{
+ __label__ out_return, out_free_acl_expr, out_free_name;
+ struct acl *cur_acl;
+ struct acl_expr *acl_expr;
+ char *name;
+ const char *pos;
+
+ if (**args && (pos = invalid_char(*args))) {
+ memprintf(err, "invalid character in ACL name : '%c'", *pos);
+ goto out_return;
+ }
+
+ acl_expr = parse_acl_expr(args + 1, err, al, file, line);
+ if (!acl_expr) {
+ /* parse_acl_expr will have filled <err> here */
+ goto out_return;
+ }
+
+ /* Check for args beginning with an opening parenthesis just after the
+ * subject, as this is almost certainly a typo. Right now we can only
+ * emit a warning, so let's do so.
+ */
+ if (!strchr(args[1], '(') && *args[2] == '(')
+ ha_warning("parsing acl '%s' :\n"
+ " matching '%s' for pattern '%s' is likely a mistake and probably\n"
+ " not what you want. Maybe you need to remove the extraneous space before '('.\n"
+ " If you are really sure this is not an error, please insert '--' between the\n"
+ " match and the pattern to make this warning message disappear.\n",
+ args[0], args[1], args[2]);
+
+ if (*args[0])
+ cur_acl = find_acl_by_name(args[0], known_acl);
+ else
+ cur_acl = NULL;
+
+ if (cur_acl) {
+ int ret = parse_acl_recurse(cur_acl, acl_expr, ACL_MAX_RECURSE);
+ if (ret <= 0) {
+ if (ret < 0)
+ memprintf(err, "have a recursive loop");
+ else
+ memprintf(err, "too deep acl() tree");
+ goto out_free_acl_expr;
+ }
+ } else {
+ name = strdup(args[0]);
+ if (!name) {
+ memprintf(err, "out of memory when parsing ACL");
+ goto out_free_acl_expr;
+ }
+ cur_acl = calloc(1, sizeof(*cur_acl));
+ if (cur_acl == NULL) {
+ memprintf(err, "out of memory when parsing ACL");
+ goto out_free_name;
+ }
+
+ LIST_INIT(&cur_acl->expr);
+ LIST_APPEND(known_acl, &cur_acl->list);
+ cur_acl->name = name;
+ }
+
+ /* We want to know what features the ACL needs (typically HTTP parsing),
+ * and where it may be used. If an ACL relies on multiple matches, it is
+ * OK if at least one of them may match in the context where it is used.
+ */
+ cur_acl->use |= acl_expr->smp->fetch->use;
+ cur_acl->val |= acl_expr->smp->fetch->val;
+ LIST_APPEND(&cur_acl->expr, &acl_expr->list);
+ return cur_acl;
+
+ out_free_name:
+ free(name);
+ out_free_acl_expr:
+ prune_acl_expr(acl_expr);
+ free(acl_expr);
+ out_return:
+ return NULL;
+}
+
+/* Some useful ACLs provided by default. Only those used are allocated. */
+
+const struct {
+ const char *name;
+ const char *expr[4]; /* put enough for longest expression */
+} default_acl_list[] = {
+ { .name = "TRUE", .expr = {"always_true",""}},
+ { .name = "FALSE", .expr = {"always_false",""}},
+ { .name = "LOCALHOST", .expr = {"src","127.0.0.1/8","::1",""}},
+ { .name = "HTTP", .expr = {"req.proto_http",""}},
+ { .name = "HTTP_1.0", .expr = {"req.ver","1.0",""}},
+ { .name = "HTTP_1.1", .expr = {"req.ver","1.1",""}},
+ { .name = "HTTP_2.0", .expr = {"req.ver","2.0",""}},
+ { .name = "HTTP_3.0", .expr = {"req.ver","3.0",""}},
+ { .name = "METH_CONNECT", .expr = {"method","CONNECT",""}},
+ { .name = "METH_DELETE", .expr = {"method","DELETE",""}},
+ { .name = "METH_GET", .expr = {"method","GET","HEAD",""}},
+ { .name = "METH_HEAD", .expr = {"method","HEAD",""}},
+ { .name = "METH_OPTIONS", .expr = {"method","OPTIONS",""}},
+ { .name = "METH_POST", .expr = {"method","POST",""}},
+ { .name = "METH_PUT", .expr = {"method","PUT",""}},
+ { .name = "METH_TRACE", .expr = {"method","TRACE",""}},
+ { .name = "HTTP_URL_ABS", .expr = {"url_reg","^[^/:]*://",""}},
+ { .name = "HTTP_URL_SLASH", .expr = {"url_beg","/",""}},
+ { .name = "HTTP_URL_STAR", .expr = {"url","*",""}},
+ { .name = "HTTP_CONTENT", .expr = {"req.hdr_val(content-length)","gt","0",""}},
+ { .name = "RDP_COOKIE", .expr = {"req.rdp_cookie_cnt","gt","0",""}},
+ { .name = "REQ_CONTENT", .expr = {"req.len","gt","0",""}},
+ { .name = "WAIT_END", .expr = {"wait_end",""}},
+ { .name = NULL, .expr = {""}}
+};
+
+/* Find a default ACL from the default_acl list, compile it and return it.
+ * If the ACL is not found, NULL is returned. In theory, it cannot fail,
+ * except when default ACLs are broken, in which case it will return NULL.
+ * If <known_acl> is not NULL, the ACL will be queued at its tail. If <err> is
+ * not NULL, it will be filled with an error message if an error occurs. This
+ * pointer must be freeable or NULL. <al> is an arg_list serving as a list head
+ * to report missing dependencies. It may be NULL if such dependencies are not
+ * allowed.
+ */
+static struct acl *find_acl_default(const char *acl_name, struct list *known_acl,
+ char **err, struct arg_list *al,
+ const char *file, int line)
+{
+ __label__ out_return, out_free_acl_expr, out_free_name;
+ struct acl *cur_acl;
+ struct acl_expr *acl_expr;
+ char *name;
+ int index;
+
+ for (index = 0; default_acl_list[index].name != NULL; index++) {
+ if (strcmp(acl_name, default_acl_list[index].name) == 0)
+ break;
+ }
+
+ if (default_acl_list[index].name == NULL) {
+ memprintf(err, "no such ACL : '%s'", acl_name);
+ return NULL;
+ }
+
+ acl_expr = parse_acl_expr((const char **)default_acl_list[index].expr, err, al, file, line);
+ if (!acl_expr) {
+ /* parse_acl_expr must have filled err here */
+ goto out_return;
+ }
+
+ name = strdup(acl_name);
+ if (!name) {
+ memprintf(err, "out of memory when building default ACL '%s'", acl_name);
+ goto out_free_acl_expr;
+ }
+
+ cur_acl = calloc(1, sizeof(*cur_acl));
+ if (cur_acl == NULL) {
+ memprintf(err, "out of memory when building default ACL '%s'", acl_name);
+ goto out_free_name;
+ }
+
+ cur_acl->name = name;
+ cur_acl->use |= acl_expr->smp->fetch->use;
+ cur_acl->val |= acl_expr->smp->fetch->val;
+ LIST_INIT(&cur_acl->expr);
+ LIST_APPEND(&cur_acl->expr, &acl_expr->list);
+ if (known_acl)
+ LIST_APPEND(known_acl, &cur_acl->list);
+
+ return cur_acl;
+
+ out_free_name:
+ free(name);
+ out_free_acl_expr:
+ prune_acl_expr(acl_expr);
+ free(acl_expr);
+ out_return:
+ return NULL;
+}
+
+/* Parse an ACL condition starting at <args>[0], relying on a list of already
+ * known ACLs passed in <known_acl>. The new condition is returned (or NULL in
+ * case of low memory). Supports multiple conditions separated by "or". If
+ * <err> is not NULL, it will be filled with a pointer to an error message in
+ * case of error, that the caller is responsible for freeing. The initial
+ * location must either be freeable or NULL. The list <al> serves as a list head
+ * for unresolved dependencies. It may be NULL if such dependencies are not
+ * allowed.
+ */
+struct acl_cond *parse_acl_cond(const char **args, struct list *known_acl,
+ enum acl_cond_pol pol, char **err, struct arg_list *al,
+ const char *file, int line)
+{
+ __label__ out_return, out_free_suite, out_free_term;
+ int arg, neg;
+ const char *word;
+ struct acl *cur_acl;
+ struct acl_term *cur_term;
+ struct acl_term_suite *cur_suite;
+ struct acl_cond *cond;
+ unsigned int suite_val;
+
+ cond = calloc(1, sizeof(*cond));
+ if (cond == NULL) {
+ memprintf(err, "out of memory when parsing condition");
+ goto out_return;
+ }
+
+ LIST_INIT(&cond->list);
+ LIST_INIT(&cond->suites);
+ cond->pol = pol;
+ cond->val = 0;
+
+ cur_suite = NULL;
+ suite_val = ~0U;
+ neg = 0;
+ for (arg = 0; *args[arg]; arg++) {
+ word = args[arg];
+
+ /* remove as many exclamation marks as we can */
+ while (*word == '!') {
+ neg = !neg;
+ word++;
+ }
+
+ /* an empty word is allowed because we cannot force the user to
+ * always think about not leaving exclamation marks alone.
+ */
+ if (!*word)
+ continue;
+
+ if (strcasecmp(word, "or") == 0 || strcmp(word, "||") == 0) {
+ /* new term suite */
+ cond->val |= suite_val;
+ suite_val = ~0U;
+ cur_suite = NULL;
+ neg = 0;
+ continue;
+ }
+
+ if (strcmp(word, "{") == 0) {
+ /* we may have a complete ACL expression between two braces,
+ * find the last one.
+ */
+ int arg_end = arg + 1;
+ const char **args_new;
+
+ while (*args[arg_end] && strcmp(args[arg_end], "}") != 0)
+ arg_end++;
+
+ if (!*args[arg_end]) {
+ memprintf(err, "missing closing '}' in condition");
+ goto out_free_suite;
+ }
+
+ args_new = calloc(1, (arg_end - arg + 1) * sizeof(*args_new));
+ if (!args_new) {
+ memprintf(err, "out of memory when parsing condition");
+ goto out_free_suite;
+ }
+
+ args_new[0] = "";
+ memcpy(args_new + 1, args + arg + 1, (arg_end - arg) * sizeof(*args_new));
+ args_new[arg_end - arg] = "";
+ cur_acl = parse_acl(args_new, known_acl, err, al, file, line);
+ free(args_new);
+
+ if (!cur_acl) {
+ /* note that parse_acl() must have filled <err> here */
+ goto out_free_suite;
+ }
+ arg = arg_end;
+ }
+ else {
+ /* search for <word> in the known ACL names. If we do not find
+ * it, let's look for it in the default ACLs, and if found, add
+ * it to the list of ACLs of this proxy. This makes it possible
+ * to override them.
+ */
+ cur_acl = find_acl_by_name(word, known_acl);
+ if (cur_acl == NULL) {
+ cur_acl = find_acl_default(word, known_acl, err, al, file, line);
+ if (cur_acl == NULL) {
+ /* note that find_acl_default() must have filled <err> here */
+ goto out_free_suite;
+ }
+ }
+ }
+
+ cur_term = calloc(1, sizeof(*cur_term));
+ if (cur_term == NULL) {
+ memprintf(err, "out of memory when parsing condition");
+ goto out_free_suite;
+ }
+
+ cur_term->acl = cur_acl;
+ cur_term->neg = neg;
+
+ /* Here it is a bit complex. The acl_term_suite is a conjunction
+ * of many terms. It may only be used if all of its terms are
+ * usable at the same time. So the suite's validity domain is an
+ * AND between all ACL keywords' ones. But, the global condition
+ * is valid if at least one term suite is OK. So it's an OR between
+ * all of their validity domains. We could emit a warning as soon
+ * as suite_val is null because it means that the last ACL is not
+ * compatible with the previous ones. Let's remain simple for now.
+ */
+ cond->use |= cur_acl->use;
+ suite_val &= cur_acl->val;
+
+ if (!cur_suite) {
+ cur_suite = calloc(1, sizeof(*cur_suite));
+ if (cur_suite == NULL) {
+ memprintf(err, "out of memory when parsing condition");
+ goto out_free_term;
+ }
+ LIST_INIT(&cur_suite->terms);
+ LIST_APPEND(&cond->suites, &cur_suite->list);
+ }
+ LIST_APPEND(&cur_suite->terms, &cur_term->list);
+ neg = 0;
+ }
+
+ cond->val |= suite_val;
+ return cond;
+
+ out_free_term:
+ free(cur_term);
+ out_free_suite:
+ free_acl_cond(cond);
+ out_return:
+ return NULL;
+}
+
+/* Builds an ACL condition starting at the if/unless keyword. The complete
+ * condition is returned. NULL is returned in case of error or if the first
+ * word is neither "if" nor "unless". It automatically sets the file name and
+ * the line number in the condition for better error reporting, and sets the
+ * HTTP initialization requirements in the proxy. If <err> is not NULL, it will
+ * be filled with a pointer to an error message in case of error, that the
+ * caller is responsible for freeing. The initial location must either be
+ * freeable or NULL.
+ */
+struct acl_cond *build_acl_cond(const char *file, int line, struct list *known_acl,
+ struct proxy *px, const char **args, char **err)
+{
+ enum acl_cond_pol pol = ACL_COND_NONE;
+ struct acl_cond *cond = NULL;
+
+ if (err)
+ *err = NULL;
+
+ if (strcmp(*args, "if") == 0) {
+ pol = ACL_COND_IF;
+ args++;
+ }
+ else if (strcmp(*args, "unless") == 0) {
+ pol = ACL_COND_UNLESS;
+ args++;
+ }
+ else {
+ memprintf(err, "conditions must start with either 'if' or 'unless'");
+ return NULL;
+ }
+
+ cond = parse_acl_cond(args, known_acl, pol, err, &px->conf.args, file, line);
+ if (!cond) {
+ /* note that parse_acl_cond must have filled <err> here */
+ return NULL;
+ }
+
+ cond->file = file;
+ cond->line = line;
+ px->http_needed |= !!(cond->use & SMP_USE_HTTP_ANY);
+ return cond;
+}
+
+/* Execute condition <cond> and return either ACL_TEST_FAIL, ACL_TEST_MISS or
+ * ACL_TEST_PASS depending on the test results. ACL_TEST_MISS may only be
+ * returned if <opt> does not contain SMP_OPT_FINAL, indicating that incomplete
+ * data is being examined. The function automatically sets SMP_OPT_ITERATE. This
+ * function only computes the condition, it does not apply the polarity required
+ * by IF/UNLESS, it's up to the caller to do this using something like this :
+ *
+ * res = acl_pass(res);
+ * if (res == ACL_TEST_MISS)
+ * return 0;
+ * if (cond->pol == ACL_COND_UNLESS)
+ * res = !res;
+ */
+enum acl_test_res acl_exec_cond(struct acl_cond *cond, struct proxy *px, struct session *sess, struct stream *strm, unsigned int opt)
+{
+ __label__ fetch_next;
+ struct acl_term_suite *suite;
+ struct acl_term *term;
+ struct acl_expr *expr;
+ struct acl *acl;
+ struct sample smp;
+ enum acl_test_res acl_res, suite_res, cond_res;
+
+ /* ACLs are iterated over all values, so let's always set the flag to
+ * indicate this to the fetch functions.
+ */
+ opt |= SMP_OPT_ITERATE;
+
+ /* We're doing a logical OR between conditions so we initialize to FAIL.
+ * The MISS status is propagated down from the suites.
+ */
+ cond_res = ACL_TEST_FAIL;
+ list_for_each_entry(suite, &cond->suites, list) {
+ /* Evaluate condition suite <suite>. We stop at the first term
+ * which returns ACL_TEST_FAIL. The MISS status is still propagated
+ * in case of uncertainty in the result.
+ */
+
+ /* we're doing a logical AND between terms, so we must set the
+ * initial value to PASS.
+ */
+ suite_res = ACL_TEST_PASS;
+ list_for_each_entry(term, &suite->terms, list) {
+ acl = term->acl;
+
+ /* FIXME: use cache !
+ * check acl->cache_idx for this.
+ */
+
+ /* ACL result not cached. Let's scan all the expressions
+ * and use the first one to match.
+ */
+ acl_res = ACL_TEST_FAIL;
+ list_for_each_entry(expr, &acl->expr, list) {
+ /* we need to reset context and flags */
+ memset(&smp, 0, sizeof(smp));
+ fetch_next:
+ if (!sample_process(px, sess, strm, opt, expr->smp, &smp)) {
+ /* maybe we could not fetch because of missing data */
+ if (smp.flags & SMP_F_MAY_CHANGE && !(opt & SMP_OPT_FINAL))
+ acl_res |= ACL_TEST_MISS;
+ continue;
+ }
+
+ acl_res |= pat2acl(pattern_exec_match(&expr->pat, &smp, 0));
+ /*
+ * OK now acl_res holds the result of this expression
+ * as one of ACL_TEST_FAIL, ACL_TEST_MISS or ACL_TEST_PASS.
+ *
+ * Then if (!MISS) we can cache the result, and put
+ * (smp.flags & SMP_F_VOLATILE) in the cache flags.
+ *
+ * FIXME: implement cache.
+ *
+ */
+
+ /* we're ORing these terms, so a single PASS is enough */
+ if (acl_res == ACL_TEST_PASS)
+ break;
+
+ if (smp.flags & SMP_F_NOT_LAST)
+ goto fetch_next;
+
+ /* sometimes we know the fetched data is subject to change
+ * later and give another chance for a new match (eg: request
+ * size, time, ...)
+ */
+ if (smp.flags & SMP_F_MAY_CHANGE && !(opt & SMP_OPT_FINAL))
+ acl_res |= ACL_TEST_MISS;
+ }
+ /*
+ * Here we have the result of an ACL (cached or not).
+ * ACLs are combined, negated or not, to form conditions.
+ */
+
+ if (term->neg)
+ acl_res = acl_neg(acl_res);
+
+ suite_res &= acl_res;
+
+ /* we're ANDing these terms, so a single FAIL or MISS is enough */
+ if (suite_res != ACL_TEST_PASS)
+ break;
+ }
+ cond_res |= suite_res;
+
+ /* we're ORing these terms, so a single PASS is enough */
+ if (cond_res == ACL_TEST_PASS)
+ break;
+ }
+ return cond_res;
+}
+
+/* Returns a pointer to the first ACL conflicting with usage at place <where>
+ * which is one of the SMP_VAL_* bits indicating a check place, or NULL if
+ * no conflict is found. Only full conflicts are detected (ACL is not usable).
+ * Use the next function to check for useless keywords.
+ */
+const struct acl *acl_cond_conflicts(const struct acl_cond *cond, unsigned int where)
+{
+ struct acl_term_suite *suite;
+ struct acl_term *term;
+ struct acl *acl;
+
+ list_for_each_entry(suite, &cond->suites, list) {
+ list_for_each_entry(term, &suite->terms, list) {
+ acl = term->acl;
+ if (!(acl->val & where))
+ return acl;
+ }
+ }
+ return NULL;
+}
+
+/* Returns a pointer to the first ACL and its first keyword to conflict with
+ * usage at place <where> which is one of the SMP_VAL_* bits indicating a check
+ * place. Returns true if a conflict is found, with <acl> and <kw> set (if non
+ * null), or false if not conflict is found. The first useless keyword is
+ * returned.
+ */
+int acl_cond_kw_conflicts(const struct acl_cond *cond, unsigned int where, struct acl const **acl, char const **kw)
+{
+ struct acl_term_suite *suite;
+ struct acl_term *term;
+ struct acl_expr *expr;
+
+ list_for_each_entry(suite, &cond->suites, list) {
+ list_for_each_entry(term, &suite->terms, list) {
+ list_for_each_entry(expr, &term->acl->expr, list) {
+ if (!(expr->smp->fetch->val & where)) {
+ if (acl)
+ *acl = term->acl;
+ if (kw)
+ *kw = expr->kw;
+ return 1;
+ }
+ }
+ }
+ }
+ return 0;
+}
+
+/*
+ * Find targets for userlist and groups in acl. Function returns the number
+ * of errors or OK if everything is fine. It must be called only once sample
+ * fetch arguments have been resolved (after smp_resolve_args()).
+ */
+int acl_find_targets(struct proxy *p)
+{
+
+ struct acl *acl;
+ struct acl_expr *expr;
+ struct pattern_list *pattern;
+ int cfgerr = 0;
+ struct pattern_expr_list *pexp;
+
+ list_for_each_entry(acl, &p->acl, list) {
+ list_for_each_entry(expr, &acl->expr, list) {
+ if (strcmp(expr->kw, "http_auth_group") == 0) {
+ /* Note: the ARGT_USR argument may only have been resolved earlier
+ * by smp_resolve_args().
+ */
+ if (expr->smp->arg_p->unresolved) {
+ ha_alert("Internal bug in proxy %s: %sacl %s %s() makes use of unresolved userlist '%s'. Please report this.\n",
+ p->id, *acl->name ? "" : "anonymous ", acl->name, expr->kw,
+ expr->smp->arg_p->data.str.area);
+ cfgerr++;
+ continue;
+ }
+
+ if (LIST_ISEMPTY(&expr->pat.head)) {
+ ha_alert("proxy %s: acl %s %s(): no groups specified.\n",
+ p->id, acl->name, expr->kw);
+ cfgerr++;
+ continue;
+ }
+
+ /* For each pattern, check if the group exists. */
+ list_for_each_entry(pexp, &expr->pat.head, list) {
+ if (LIST_ISEMPTY(&pexp->expr->patterns)) {
+ ha_alert("proxy %s: acl %s %s(): no groups specified.\n",
+ p->id, acl->name, expr->kw);
+ cfgerr++;
+ continue;
+ }
+
+ list_for_each_entry(pattern, &pexp->expr->patterns, list) {
+ /* this keyword only has one argument */
+ if (!check_group(expr->smp->arg_p->data.usr, pattern->pat.ptr.str)) {
+ ha_alert("proxy %s: acl %s %s(): invalid group '%s'.\n",
+ p->id, acl->name, expr->kw, pattern->pat.ptr.str);
+ cfgerr++;
+ }
+ }
+ }
+ }
+ }
+ }
+
+ return cfgerr;
+}
+
+/* initializes ACLs by resolving the sample fetch names they rely upon.
+ * Returns 0 on success, otherwise an error.
+ */
+int init_acl()
+{
+ int err = 0;
+ int index;
+ const char *name;
+ struct acl_kw_list *kwl;
+ struct sample_fetch *smp;
+
+ list_for_each_entry(kwl, &acl_keywords.list, list) {
+ for (index = 0; kwl->kw[index].kw != NULL; index++) {
+ name = kwl->kw[index].fetch_kw;
+ if (!name)
+ name = kwl->kw[index].kw;
+
+ smp = find_sample_fetch(name, strlen(name));
+ if (!smp) {
+ ha_alert("Critical internal error: ACL keyword '%s' relies on sample fetch '%s' which was not registered!\n",
+ kwl->kw[index].kw, name);
+ err++;
+ continue;
+ }
+ kwl->kw[index].smp = smp;
+ }
+ }
+ return err;
+}
+
+/* dump known ACL keywords on stdout */
+void acl_dump_kwd(void)
+{
+ struct acl_kw_list *kwl;
+ const struct acl_keyword *kwp, *kw;
+ const char *name;
+ int index;
+
+ for (kw = kwp = NULL;; kwp = kw) {
+ list_for_each_entry(kwl, &acl_keywords.list, list) {
+ for (index = 0; kwl->kw[index].kw != NULL; index++) {
+ if (strordered(kwp ? kwp->kw : NULL,
+ kwl->kw[index].kw,
+ kw != kwp ? kw->kw : NULL))
+ kw = &kwl->kw[index];
+ }
+ }
+
+ if (kw == kwp)
+ break;
+
+ name = kw->fetch_kw;
+ if (!name)
+ name = kw->kw;
+
+ printf("%s = %s -m %s\n", kw->kw, name, pat_match_names[kw->match_type]);
+ }
+}
+
+/* Purge everything in the acl_cond <cond>, then free <cond> */
+void free_acl_cond(struct acl_cond *cond)
+{
+ struct acl_term_suite *suite, *suiteb;
+ struct acl_term *term, *termb;
+
+ if (!cond)
+ return;
+
+ list_for_each_entry_safe(suite, suiteb, &cond->suites, list) {
+ list_for_each_entry_safe(term, termb, &suite->terms, list) {
+ LIST_DELETE(&term->list);
+ free(term);
+ }
+ LIST_DELETE(&suite->list);
+ free(suite);
+ }
+
+ free(cond);
+}
+
+
+static int smp_fetch_acl(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ struct acl_sample *acl_sample = (struct acl_sample *)args->data.ptr;
+ enum acl_test_res ret;
+
+ ret = acl_exec_cond(&acl_sample->cond, smp->px, smp->sess, smp->strm, smp->opt);
+ if (ret == ACL_TEST_MISS)
+ return 0;
+ smp->data.u.sint = ret == ACL_TEST_PASS;
+ smp->data.type = SMP_T_BOOL;
+ return 1;
+}
+
+int smp_fetch_acl_parse(struct arg *args, char **err_msg)
+{
+ struct acl_sample *acl_sample;
+ char *name;
+ int i;
+
+ for (i = 0; args[i].type != ARGT_STOP; i++)
+ ;
+ acl_sample = calloc(1, sizeof(struct acl_sample) + sizeof(struct acl_term) * i);
+ LIST_INIT(&acl_sample->suite.terms);
+ LIST_INIT(&acl_sample->cond.suites);
+ LIST_APPEND(&acl_sample->cond.suites, &acl_sample->suite.list);
+ acl_sample->cond.val = ~0U; // the keyword is valid everywhere for now.
+
+ args->data.ptr = acl_sample;
+
+ for (i = 0; args[i].type != ARGT_STOP; i++) {
+ name = args[i].data.str.area;
+ if (name[0] == '!') {
+ acl_sample->terms[i].neg = 1;
+ name++;
+ }
+
+ if (!(acl_sample->terms[i].acl = find_acl_by_name(name, &curproxy->acl))) {
+ memprintf(err_msg, "ACL '%s' not found", name);
+ goto err;
+ }
+
+ acl_sample->cond.use |= acl_sample->terms[i].acl->use;
+ acl_sample->cond.val &= acl_sample->terms[i].acl->val;
+
+ LIST_APPEND(&acl_sample->suite.terms, &acl_sample->terms[i].list);
+ }
+
+ return 1;
+
+err:
+ free(acl_sample);
+ return 0;
+}
+
+/************************************************************************/
+/* All supported sample and ACL keywords must be declared here. */
+/************************************************************************/
+
+/* Note: must not be declared <const> as its list will be overwritten.
+ * Please take care of keeping this list alphabetically sorted.
+ */
+static struct acl_kw_list acl_kws = {ILH, {
+ { /* END */ },
+}};
+
+INITCALL1(STG_REGISTER, acl_register_keywords, &acl_kws);
+
+static struct sample_fetch_kw_list smp_kws = {ILH, {
+ { "acl", smp_fetch_acl, ARG12(1,STR,STR,STR,STR,STR,STR,STR,STR,STR,STR,STR,STR), smp_fetch_acl_parse, SMP_T_BOOL, SMP_USE_CONST },
+ { /* END */ },
+}};
+
+INITCALL1(STG_REGISTER, sample_register_fetches, &smp_kws);
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/src/action.c b/src/action.c
new file mode 100644
index 0000000..47f5f86
--- /dev/null
+++ b/src/action.c
@@ -0,0 +1,363 @@
+/*
+ * Action management functions.
+ *
+ * Copyright 2017 HAProxy Technologies, Christopher Faulet <cfaulet@haproxy.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <haproxy/acl.h>
+#include <haproxy/action.h>
+#include <haproxy/api.h>
+#include <haproxy/cfgparse.h>
+#include <haproxy/errors.h>
+#include <haproxy/list.h>
+#include <haproxy/obj_type.h>
+#include <haproxy/pool.h>
+#include <haproxy/proxy.h>
+#include <haproxy/stick_table.h>
+#include <haproxy/task.h>
+#include <haproxy/tools.h>
+
+
+/* Check an action ruleset validity. It returns the number of error encountered
+ * and err_code is updated if a warning is emitted.
+ */
+int check_action_rules(struct list *rules, struct proxy *px, int *err_code)
+{
+ struct act_rule *rule;
+ char *errmsg = NULL;
+ int err = 0;
+
+ list_for_each_entry(rule, rules, list) {
+ if (rule->check_ptr && !rule->check_ptr(rule, px, &errmsg)) {
+ ha_alert("Proxy '%s': %s.\n", px->id, errmsg);
+ err++;
+ }
+ *err_code |= warnif_tcp_http_cond(px, rule->cond);
+ ha_free(&errmsg);
+ }
+
+ return err;
+}
+
+/* Find and check the target table used by an action track-sc*. This
+ * function should be called during the configuration validity check.
+ *
+ * The function returns 1 in success case, otherwise, it returns 0 and err is
+ * filled.
+ */
+int check_trk_action(struct act_rule *rule, struct proxy *px, char **err)
+{
+ struct stktable *target;
+
+ if (rule->arg.trk_ctr.table.n)
+ target = stktable_find_by_name(rule->arg.trk_ctr.table.n);
+ else
+ target = px->table;
+
+ if (!target) {
+ memprintf(err, "unable to find table '%s' referenced by track-sc%d",
+ rule->arg.trk_ctr.table.n ? rule->arg.trk_ctr.table.n : px->id,
+ rule->action);
+ return 0;
+ }
+
+ if (!stktable_compatible_sample(rule->arg.trk_ctr.expr, target->type)) {
+ memprintf(err, "stick-table '%s' uses a type incompatible with the 'track-sc%d' rule",
+ rule->arg.trk_ctr.table.n ? rule->arg.trk_ctr.table.n : px->id,
+ rule->action);
+ return 0;
+ }
+ else {
+ if (!in_proxies_list(target->proxies_list, px)) {
+ px->next_stkt_ref = target->proxies_list;
+ target->proxies_list = px;
+ }
+ free(rule->arg.trk_ctr.table.n);
+ rule->arg.trk_ctr.table.t = target;
+ /* Note: if we decide to enhance the track-sc syntax, we may be
+ * able to pass a list of counters to track and allocate them
+ * right here using stktable_alloc_data_type().
+ */
+ }
+
+ if (rule->from == ACT_F_TCP_REQ_CNT && (px->cap & PR_CAP_FE)) {
+ if (!px->tcp_req.inspect_delay && !(rule->arg.trk_ctr.expr->fetch->val & SMP_VAL_FE_SES_ACC)) {
+ ha_warning("%s '%s' : a 'tcp-request content track-sc*' rule explicitly depending on request"
+ " contents without any 'tcp-request inspect-delay' setting."
+ " This means that this rule will randomly find its contents. This can be fixed by"
+ " setting the tcp-request inspect-delay.\n",
+ proxy_type_str(px), px->id);
+ }
+
+ /* The following warning is emitted because HTTP multiplexers are able to catch errors
+ * or timeouts at the session level, before instantiating any stream.
+ * Thus the tcp-request content ruleset will not be evaluated in such case. It means,
+ * http_req and http_err counters will not be incremented as expected, even if the tracked
+ * counter does not use the request content. To track invalid requests it should be
+ * performed at the session level using a tcp-request session rule.
+ */
+ if (px->mode == PR_MODE_HTTP &&
+ !(rule->arg.trk_ctr.expr->fetch->use & (SMP_USE_L6REQ|SMP_USE_HRQHV|SMP_USE_HRQHP|SMP_USE_HRQBO)) &&
+ (!rule->cond || !(rule->cond->use & (SMP_USE_L6REQ|SMP_USE_HRQHV|SMP_USE_HRQHP|SMP_USE_HRQBO)))) {
+ ha_warning("%s '%s' : a 'tcp-request content track-sc*' rule not depending on request"
+ " contents for an HTTP frontend should be executed at the session level, using a"
+ " 'tcp-request session' rule (mandatory to track invalid HTTP requests).\n",
+ proxy_type_str(px), px->id);
+ }
+ }
+
+ return 1;
+}
+
+/* check a capture rule. This function should be called during the configuration
+ * validity check.
+ *
+ * The function returns 1 in success case, otherwise, it returns 0 and err is
+ * filled.
+ */
+int check_capture(struct act_rule *rule, struct proxy *px, char **err)
+{
+ if (rule->from == ACT_F_TCP_REQ_CNT && (px->cap & PR_CAP_FE) && !px->tcp_req.inspect_delay &&
+ !(rule->arg.cap.expr->fetch->val & SMP_VAL_FE_SES_ACC)) {
+ ha_warning("%s '%s' : a 'tcp-request capture' rule explicitly depending on request"
+ " contents without any 'tcp-request inspect-delay' setting."
+ " This means that this rule will randomly find its contents. This can be fixed by"
+ " setting the tcp-request inspect-delay.\n",
+ proxy_type_str(px), px->id);
+ }
+
+ return 1;
+}
+
+int act_resolution_cb(struct resolv_requester *requester, struct dns_counters *counters)
+{
+ struct stream *stream;
+
+ if (requester->resolution == NULL)
+ return 0;
+
+ stream = objt_stream(requester->owner);
+ if (stream == NULL)
+ return 0;
+
+ task_wakeup(stream->task, TASK_WOKEN_MSG);
+
+ return 0;
+}
+
+/*
+ * Do resolve error management callback
+ * returns:
+ * 0 if we can trash answser items.
+ * 1 when safely ignored and we must kept answer items
+ */
+int act_resolution_error_cb(struct resolv_requester *requester, int error_code)
+{
+ struct stream *stream;
+
+ if (requester->resolution == NULL)
+ return 0;
+
+ stream = objt_stream(requester->owner);
+ if (stream == NULL)
+ return 0;
+
+ task_wakeup(stream->task, TASK_WOKEN_MSG);
+
+ return 0;
+}
+
+/* Parse a set-timeout rule statement. It first checks if the timeout name is
+ * valid and proxy is capable of handling it, and returns it in <rule->arg.timeout.type>.
+ * Then the timeout is parsed as a plain value and * returned in <rule->arg.timeout.value>.
+ * If there is a parsing error, the value is reparsed as an expression and
+ * returned in <rule->arg.timeout.expr>.
+ *
+ * Returns -1 if the name is invalid or neither a time or an expression can be
+ * parsed, or if the timeout value is 0.
+ */
+int cfg_parse_rule_set_timeout(const char **args, int idx, struct act_rule *rule,
+ struct proxy *px, char **err)
+{
+ const char *res;
+ const char *timeout_name = args[idx++];
+
+ if (strcmp(timeout_name, "server") == 0) {
+ if (!(px->cap & PR_CAP_BE)) {
+ memprintf(err, "'%s' has no backend capability", px->id);
+ return -1;
+ }
+ rule->arg.timeout.type = ACT_TIMEOUT_SERVER;
+ }
+ else if (strcmp(timeout_name, "tunnel") == 0) {
+ if (!(px->cap & PR_CAP_BE)) {
+ memprintf(err, "'%s' has no backend capability", px->id);
+ return -1;
+ }
+ rule->arg.timeout.type = ACT_TIMEOUT_TUNNEL;
+ }
+ else if (strcmp(timeout_name, "client") == 0) {
+ if (!(px->cap & PR_CAP_FE)) {
+ memprintf(err, "'%s' has no frontend capability", px->id);
+ return -1;
+ }
+ rule->arg.timeout.type = ACT_TIMEOUT_CLIENT;
+ }
+ else {
+ memprintf(err,
+ "'set-timeout' rule supports 'server'/'tunnel'/'client' (got '%s')",
+ timeout_name);
+ return -1;
+ }
+
+ res = parse_time_err(args[idx], (unsigned int *)&rule->arg.timeout.value, TIME_UNIT_MS);
+ if (res == PARSE_TIME_OVER) {
+ memprintf(err, "timer overflow in argument '%s' to rule 'set-timeout %s' (maximum value is 2147483647 ms or ~24.8 days)",
+ args[idx], timeout_name);
+ return -1;
+ }
+ else if (res == PARSE_TIME_UNDER) {
+ memprintf(err, "timer underflow in argument '%s' to rule 'set-timeout %s' (minimum value is 1 ms)",
+ args[idx], timeout_name);
+ return -1;
+ }
+ /* res not NULL, parsing error */
+ else if (res) {
+ rule->arg.timeout.expr = sample_parse_expr((char **)args, &idx, px->conf.args.file,
+ px->conf.args.line, err, &px->conf.args, NULL);
+ if (!rule->arg.timeout.expr) {
+ memprintf(err, "unexpected character '%c' in rule 'set-timeout %s'", *res, timeout_name);
+ return -1;
+ }
+ }
+ /* res NULL, parsing ok but value is 0 */
+ else if (!(rule->arg.timeout.value)) {
+ memprintf(err, "null value is not valid for a 'set-timeout %s' rule",
+ timeout_name);
+ return -1;
+ }
+
+ return 0;
+}
+
+/* tries to find in list <keywords> a similar looking action as the one in
+ * <word>, and returns it otherwise NULL. <word> may be NULL or empty. An
+ * optional array of extra words to compare may be passed in <extra>, but it
+ * must then be terminated by a NULL entry. If unused it may be NULL.
+ */
+const char *action_suggest(const char *word, const struct list *keywords, const char **extra)
+{
+ uint8_t word_sig[1024];
+ uint8_t list_sig[1024];
+ const struct action_kw_list *kwl;
+ const struct action_kw *best_kw = NULL;
+ const char *best_ptr = NULL;
+ int dist, best_dist = INT_MAX;
+ int index;
+
+ if (!word || !*word)
+ return NULL;
+
+ make_word_fingerprint(word_sig, word);
+ list_for_each_entry(kwl, keywords, list) {
+ for (index = 0; kwl->kw[index].kw != NULL; index++) {
+ make_word_fingerprint(list_sig, kwl->kw[index].kw);
+ dist = word_fingerprint_distance(word_sig, list_sig);
+ if (dist < best_dist) {
+ best_dist = dist;
+ best_kw = &kwl->kw[index];
+ best_ptr = best_kw->kw;
+ }
+ }
+ }
+
+ while (extra && *extra) {
+ make_word_fingerprint(list_sig, *extra);
+ dist = word_fingerprint_distance(word_sig, list_sig);
+ if (dist < best_dist) {
+ best_dist = dist;
+ best_kw = NULL;
+ best_ptr = *extra;
+ }
+ extra++;
+ }
+
+ /* eliminate too different ones, with more tolerance for prefixes
+ * when they're known to exist (not from extra list).
+ */
+ if (best_ptr &&
+ (best_dist > (2 + (best_kw && (best_kw->flags & KWF_MATCH_PREFIX))) * strlen(word) ||
+ best_dist > (2 + (best_kw && (best_kw->flags & KWF_MATCH_PREFIX))) * strlen(best_ptr)))
+ best_ptr = NULL;
+
+ return best_ptr;
+}
+
+/* allocates a rule for ruleset <from> (ACT_F_*), from file name <file> and
+ * line <linenum>. <file> and <linenum> may be zero if unknown. Returns the
+ * rule, otherwise NULL in case of memory allocation error.
+ */
+struct act_rule *new_act_rule(enum act_from from, const char *file, int linenum)
+{
+ struct act_rule *rule;
+
+ rule = calloc(1, sizeof(*rule));
+ if (!rule)
+ return NULL;
+ rule->from = from;
+ rule->conf.file = file ? strdup(file) : NULL;
+ rule->conf.line = linenum;
+ LIST_INIT(&rule->list);
+ return rule;
+}
+
+/* fees rule <rule> and its elements as well as the condition */
+void free_act_rule(struct act_rule *rule)
+{
+ LIST_DELETE(&rule->list);
+ free_acl_cond(rule->cond);
+ if (rule->release_ptr)
+ rule->release_ptr(rule);
+ free(rule->conf.file);
+ free(rule);
+}
+
+void free_act_rules(struct list *rules)
+{
+ struct act_rule *rule, *ruleb;
+
+ list_for_each_entry_safe(rule, ruleb, rules, list) {
+ free_act_rule(rule);
+ }
+}
+
+/* dumps all known actions registered in action rules <rules> after prefix
+ * <pfx> to stdout. The actions are alphabetically sorted. Those with the
+ * KWF_MATCH_PREFIX flag have their name suffixed with '*'.
+ */
+void dump_act_rules(const struct list *rules, const char *pfx)
+{
+ const struct action_kw *akwp, *akwn;
+ struct action_kw_list *akwl;
+ int index;
+
+ for (akwn = akwp = NULL;; akwp = akwn) {
+ list_for_each_entry(akwl, rules, list) {
+ for (index = 0; akwl->kw[index].kw != NULL; index++)
+ if (strordered(akwp ? akwp->kw : NULL,
+ akwl->kw[index].kw,
+ akwn != akwp ? akwn->kw : NULL))
+ akwn = &akwl->kw[index];
+ }
+ if (akwn == akwp)
+ break;
+ printf("%s%s%s\n", pfx ? pfx : "", akwn->kw,
+ (akwn->flags & KWF_MATCH_PREFIX) ? "*" : "");
+ }
+}
diff --git a/src/activity.c b/src/activity.c
new file mode 100644
index 0000000..07a30e6
--- /dev/null
+++ b/src/activity.c
@@ -0,0 +1,1248 @@
+/*
+ * activity measurement functions.
+ *
+ * Copyright 2000-2018 Willy Tarreau <w@1wt.eu>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <haproxy/activity-t.h>
+#include <haproxy/api.h>
+#include <haproxy/applet.h>
+#include <haproxy/cfgparse.h>
+#include <haproxy/clock.h>
+#include <haproxy/channel.h>
+#include <haproxy/cli.h>
+#include <haproxy/freq_ctr.h>
+#include <haproxy/listener.h>
+#include <haproxy/sc_strm.h>
+#include <haproxy/stconn.h>
+#include <haproxy/tools.h>
+
+/* CLI context for the "show profiling" command */
+struct show_prof_ctx {
+ int dump_step; /* 0,1,2,4,5,6; see cli_iohandler_show_profiling() */
+ int linenum; /* next line to be dumped (starts at 0) */
+ int maxcnt; /* max line count per step (0=not set) */
+ int by_what; /* 0=sort by usage, 1=sort by address, 2=sort by time */
+ int aggr; /* 0=dump raw, 1=aggregate on callee */
+};
+
+/* CLI context for the "show activity" command */
+struct show_activity_ctx {
+ int thr; /* thread ID to show or -1 for all */
+ int line; /* line number being dumped */
+ int col; /* columnline being dumped, 0 to nbt+1 */
+};
+
+#if defined(DEBUG_MEM_STATS)
+/* these ones are macros in bug.h when DEBUG_MEM_STATS is set, and will
+ * prevent the new ones from being redefined.
+ */
+#undef calloc
+#undef malloc
+#undef realloc
+#endif
+
+/* bit field of profiling options. Beware, may be modified at runtime! */
+unsigned int profiling __read_mostly = HA_PROF_TASKS_AOFF;
+
+/* start/stop dates of profiling */
+uint64_t prof_task_start_ns = 0;
+uint64_t prof_task_stop_ns = 0;
+uint64_t prof_mem_start_ns = 0;
+uint64_t prof_mem_stop_ns = 0;
+
+/* One struct per thread containing all collected measurements */
+struct activity activity[MAX_THREADS] __attribute__((aligned(64))) = { };
+
+/* One struct per function pointer hash entry (SCHED_ACT_HASH_BUCKETS values, 0=collision) */
+struct sched_activity sched_activity[SCHED_ACT_HASH_BUCKETS] __attribute__((aligned(64))) = { };
+
+
+#ifdef USE_MEMORY_PROFILING
+
+static const char *const memprof_methods[MEMPROF_METH_METHODS] = {
+ "unknown", "malloc", "calloc", "realloc", "free", "p_alloc", "p_free",
+};
+
+/* last one is for hash collisions ("others") and has no caller address */
+struct memprof_stats memprof_stats[MEMPROF_HASH_BUCKETS + 1] = { };
+
+/* used to detect recursive calls */
+static THREAD_LOCAL int in_memprof = 0;
+
+/* These ones are used by glibc and will be called early. They are in charge of
+ * initializing the handlers with the original functions.
+ */
+static void *memprof_malloc_initial_handler(size_t size);
+static void *memprof_calloc_initial_handler(size_t nmemb, size_t size);
+static void *memprof_realloc_initial_handler(void *ptr, size_t size);
+static void memprof_free_initial_handler(void *ptr);
+
+/* Fallback handlers for the main alloc/free functions. They are preset to
+ * the initializer in order to save a test in the functions's critical path.
+ */
+static void *(*memprof_malloc_handler)(size_t size) = memprof_malloc_initial_handler;
+static void *(*memprof_calloc_handler)(size_t nmemb, size_t size) = memprof_calloc_initial_handler;
+static void *(*memprof_realloc_handler)(void *ptr, size_t size) = memprof_realloc_initial_handler;
+static void (*memprof_free_handler)(void *ptr) = memprof_free_initial_handler;
+
+/* Used to force to die if it's not possible to retrieve the allocation
+ * functions. We cannot even use stdio in this case.
+ */
+static __attribute__((noreturn)) void memprof_die(const char *msg)
+{
+ DISGUISE(write(2, msg, strlen(msg)));
+ exit(1);
+}
+
+/* Resolve original allocation functions and initialize all handlers.
+ * This must be called very early at boot, before the very first malloc()
+ * call, and is not thread-safe! It's not even possible to use stdio there.
+ * Worse, we have to account for the risk of reentrance from dlsym() when
+ * it tries to prepare its error messages. Here its ahndled by in_memprof
+ * that makes allocators return NULL. dlsym() handles it gracefully. An
+ * alternate approach consists in calling aligned_alloc() from these places
+ * but that would mean not being able to intercept it later if considered
+ * useful to do so.
+ */
+static void memprof_init()
+{
+ in_memprof++;
+ memprof_malloc_handler = get_sym_next_addr("malloc");
+ if (!memprof_malloc_handler)
+ memprof_die("FATAL: malloc() function not found.\n");
+
+ memprof_calloc_handler = get_sym_next_addr("calloc");
+ if (!memprof_calloc_handler)
+ memprof_die("FATAL: calloc() function not found.\n");
+
+ memprof_realloc_handler = get_sym_next_addr("realloc");
+ if (!memprof_realloc_handler)
+ memprof_die("FATAL: realloc() function not found.\n");
+
+ memprof_free_handler = get_sym_next_addr("free");
+ if (!memprof_free_handler)
+ memprof_die("FATAL: free() function not found.\n");
+ in_memprof--;
+}
+
+/* the initial handlers will initialize all regular handlers and will call the
+ * one they correspond to. A single one of these functions will typically be
+ * called, though it's unknown which one (as any might be called before main).
+ */
+static void *memprof_malloc_initial_handler(size_t size)
+{
+ if (in_memprof) {
+ /* it's likely that dlsym() needs malloc(), let's fail */
+ return NULL;
+ }
+
+ memprof_init();
+ return memprof_malloc_handler(size);
+}
+
+static void *memprof_calloc_initial_handler(size_t nmemb, size_t size)
+{
+ if (in_memprof) {
+ /* it's likely that dlsym() needs calloc(), let's fail */
+ return NULL;
+ }
+ memprof_init();
+ return memprof_calloc_handler(nmemb, size);
+}
+
+static void *memprof_realloc_initial_handler(void *ptr, size_t size)
+{
+ if (in_memprof) {
+ /* it's likely that dlsym() needs realloc(), let's fail */
+ return NULL;
+ }
+
+ memprof_init();
+ return memprof_realloc_handler(ptr, size);
+}
+
+static void memprof_free_initial_handler(void *ptr)
+{
+ memprof_init();
+ memprof_free_handler(ptr);
+}
+
+/* Assign a bin for the memprof_stats to the return address. May perform a few
+ * attempts before finding the right one, but always succeeds (in the worst
+ * case, returns a default bin). The caller address is atomically set except
+ * for the default one which is never set.
+ */
+struct memprof_stats *memprof_get_bin(const void *ra, enum memprof_method meth)
+{
+ int retries = 16; // up to 16 consecutive entries may be tested.
+ const void *old;
+ unsigned int bin;
+
+ bin = ptr_hash(ra, MEMPROF_HASH_BITS);
+ for (; memprof_stats[bin].caller != ra; bin = (bin + 1) & (MEMPROF_HASH_BUCKETS - 1)) {
+ if (!--retries) {
+ bin = MEMPROF_HASH_BUCKETS;
+ break;
+ }
+
+ old = NULL;
+ if (!memprof_stats[bin].caller &&
+ HA_ATOMIC_CAS(&memprof_stats[bin].caller, &old, ra)) {
+ memprof_stats[bin].method = meth;
+ break;
+ }
+ }
+ return &memprof_stats[bin];
+}
+
+/* This is the new global malloc() function. It must optimize for the normal
+ * case (i.e. profiling disabled) hence the first test to permit a direct jump.
+ * It must remain simple to guarantee the lack of reentrance. stdio is not
+ * possible there even for debugging. The reported size is the really allocated
+ * one as returned by malloc_usable_size(), because this will allow it to be
+ * compared to the one before realloc() or free(). This is a GNU and jemalloc
+ * extension but other systems may also store this size in ptr[-1].
+ */
+void *malloc(size_t size)
+{
+ struct memprof_stats *bin;
+ void *ret;
+
+ if (likely(!(profiling & HA_PROF_MEMORY)))
+ return memprof_malloc_handler(size);
+
+ ret = memprof_malloc_handler(size);
+ size = malloc_usable_size(ret) + sizeof(void *);
+
+ bin = memprof_get_bin(__builtin_return_address(0), MEMPROF_METH_MALLOC);
+ _HA_ATOMIC_ADD(&bin->alloc_calls, 1);
+ _HA_ATOMIC_ADD(&bin->alloc_tot, size);
+ return ret;
+}
+
+/* This is the new global calloc() function. It must optimize for the normal
+ * case (i.e. profiling disabled) hence the first test to permit a direct jump.
+ * It must remain simple to guarantee the lack of reentrance. stdio is not
+ * possible there even for debugging. The reported size is the really allocated
+ * one as returned by malloc_usable_size(), because this will allow it to be
+ * compared to the one before realloc() or free(). This is a GNU and jemalloc
+ * extension but other systems may also store this size in ptr[-1].
+ */
+void *calloc(size_t nmemb, size_t size)
+{
+ struct memprof_stats *bin;
+ void *ret;
+
+ if (likely(!(profiling & HA_PROF_MEMORY)))
+ return memprof_calloc_handler(nmemb, size);
+
+ ret = memprof_calloc_handler(nmemb, size);
+ size = malloc_usable_size(ret) + sizeof(void *);
+
+ bin = memprof_get_bin(__builtin_return_address(0), MEMPROF_METH_CALLOC);
+ _HA_ATOMIC_ADD(&bin->alloc_calls, 1);
+ _HA_ATOMIC_ADD(&bin->alloc_tot, size);
+ return ret;
+}
+
+/* This is the new global realloc() function. It must optimize for the normal
+ * case (i.e. profiling disabled) hence the first test to permit a direct jump.
+ * It must remain simple to guarantee the lack of reentrance. stdio is not
+ * possible there even for debugging. The reported size is the really allocated
+ * one as returned by malloc_usable_size(), because this will allow it to be
+ * compared to the one before realloc() or free(). This is a GNU and jemalloc
+ * extension but other systems may also store this size in ptr[-1].
+ * Depending on the old vs new size, it's considered as an allocation or a free
+ * (or neither if the size remains the same).
+ */
+void *realloc(void *ptr, size_t size)
+{
+ struct memprof_stats *bin;
+ size_t size_before;
+ void *ret;
+
+ if (likely(!(profiling & HA_PROF_MEMORY)))
+ return memprof_realloc_handler(ptr, size);
+
+ size_before = malloc_usable_size(ptr);
+ ret = memprof_realloc_handler(ptr, size);
+ size = malloc_usable_size(ret);
+
+ /* only count the extra link for new allocations */
+ if (!ptr)
+ size += sizeof(void *);
+
+ bin = memprof_get_bin(__builtin_return_address(0), MEMPROF_METH_REALLOC);
+ if (size > size_before) {
+ _HA_ATOMIC_ADD(&bin->alloc_calls, 1);
+ _HA_ATOMIC_ADD(&bin->alloc_tot, size - size_before);
+ } else if (size < size_before) {
+ _HA_ATOMIC_ADD(&bin->free_calls, 1);
+ _HA_ATOMIC_ADD(&bin->free_tot, size_before - size);
+ }
+ return ret;
+}
+
+/* This is the new global free() function. It must optimize for the normal
+ * case (i.e. profiling disabled) hence the first test to permit a direct jump.
+ * It must remain simple to guarantee the lack of reentrance. stdio is not
+ * possible there even for debugging. The reported size is the really allocated
+ * one as returned by malloc_usable_size(), because this will allow it to be
+ * compared to the one before realloc() or free(). This is a GNU and jemalloc
+ * extension but other systems may also store this size in ptr[-1]. Since
+ * free() is often called on NULL pointers to collect garbage at the end of
+ * many functions or during config parsing, as a special case free(NULL)
+ * doesn't update any stats.
+ */
+void free(void *ptr)
+{
+ struct memprof_stats *bin;
+ size_t size_before;
+
+ if (likely(!(profiling & HA_PROF_MEMORY) || !ptr)) {
+ memprof_free_handler(ptr);
+ return;
+ }
+
+ size_before = malloc_usable_size(ptr) + sizeof(void *);
+ memprof_free_handler(ptr);
+
+ bin = memprof_get_bin(__builtin_return_address(0), MEMPROF_METH_FREE);
+ _HA_ATOMIC_ADD(&bin->free_calls, 1);
+ _HA_ATOMIC_ADD(&bin->free_tot, size_before);
+}
+
+#endif // USE_MEMORY_PROFILING
+
+/* Updates the current thread's statistics about stolen CPU time. The unit for
+ * <stolen> is half-milliseconds.
+ */
+void report_stolen_time(uint64_t stolen)
+{
+ activity[tid].cpust_total += stolen;
+ update_freq_ctr(&activity[tid].cpust_1s, stolen);
+ update_freq_ctr_period(&activity[tid].cpust_15s, 15000, stolen);
+}
+
+/* Update avg_loop value for the current thread and possibly decide to enable
+ * task-level profiling on the current thread based on its average run time.
+ * The <run_time> argument is the number of microseconds elapsed since the
+ * last time poll() returned.
+ */
+void activity_count_runtime(uint32_t run_time)
+{
+ uint32_t up, down;
+
+ /* 1 millisecond per loop on average over last 1024 iterations is
+ * enough to turn on profiling.
+ */
+ up = 1000;
+ down = up * 99 / 100;
+
+ run_time = swrate_add(&activity[tid].avg_loop_us, TIME_STATS_SAMPLES, run_time);
+
+ /* In automatic mode, reaching the "up" threshold on average switches
+ * profiling to "on" when automatic, and going back below the "down"
+ * threshold switches to off. The forced modes don't check the load.
+ */
+ if (!(_HA_ATOMIC_LOAD(&th_ctx->flags) & TH_FL_TASK_PROFILING)) {
+ if (unlikely((profiling & HA_PROF_TASKS_MASK) == HA_PROF_TASKS_ON ||
+ ((profiling & HA_PROF_TASKS_MASK) == HA_PROF_TASKS_AON &&
+ swrate_avg(run_time, TIME_STATS_SAMPLES) >= up)))
+ _HA_ATOMIC_OR(&th_ctx->flags, TH_FL_TASK_PROFILING);
+ } else {
+ if (unlikely((profiling & HA_PROF_TASKS_MASK) == HA_PROF_TASKS_OFF ||
+ ((profiling & HA_PROF_TASKS_MASK) == HA_PROF_TASKS_AOFF &&
+ swrate_avg(run_time, TIME_STATS_SAMPLES) <= down)))
+ _HA_ATOMIC_AND(&th_ctx->flags, ~TH_FL_TASK_PROFILING);
+ }
+}
+
+#ifdef USE_MEMORY_PROFILING
+/* config parser for global "profiling.memory", accepts "on" or "off" */
+static int cfg_parse_prof_memory(char **args, int section_type, struct proxy *curpx,
+ const struct proxy *defpx, const char *file, int line,
+ char **err)
+{
+ if (too_many_args(1, args, err, NULL))
+ return -1;
+
+ if (strcmp(args[1], "on") == 0) {
+ profiling |= HA_PROF_MEMORY;
+ HA_ATOMIC_STORE(&prof_mem_start_ns, now_ns);
+ }
+ else if (strcmp(args[1], "off") == 0)
+ profiling &= ~HA_PROF_MEMORY;
+ else {
+ memprintf(err, "'%s' expects either 'on' or 'off' but got '%s'.", args[0], args[1]);
+ return -1;
+ }
+ return 0;
+}
+#endif // USE_MEMORY_PROFILING
+
+/* config parser for global "profiling.tasks", accepts "on" or "off" */
+static int cfg_parse_prof_tasks(char **args, int section_type, struct proxy *curpx,
+ const struct proxy *defpx, const char *file, int line,
+ char **err)
+{
+ if (too_many_args(1, args, err, NULL))
+ return -1;
+
+ if (strcmp(args[1], "on") == 0) {
+ profiling = (profiling & ~HA_PROF_TASKS_MASK) | HA_PROF_TASKS_ON;
+ HA_ATOMIC_STORE(&prof_task_start_ns, now_ns);
+ }
+ else if (strcmp(args[1], "auto") == 0) {
+ profiling = (profiling & ~HA_PROF_TASKS_MASK) | HA_PROF_TASKS_AOFF;
+ HA_ATOMIC_STORE(&prof_task_start_ns, now_ns);
+ }
+ else if (strcmp(args[1], "off") == 0)
+ profiling = (profiling & ~HA_PROF_TASKS_MASK) | HA_PROF_TASKS_OFF;
+ else {
+ memprintf(err, "'%s' expects either 'on', 'auto', or 'off' but got '%s'.", args[0], args[1]);
+ return -1;
+ }
+ return 0;
+}
+
+/* parse a "set profiling" command. It always returns 1. */
+static int cli_parse_set_profiling(char **args, char *payload, struct appctx *appctx, void *private)
+{
+ if (!cli_has_level(appctx, ACCESS_LVL_ADMIN))
+ return 1;
+
+ if (strcmp(args[2], "memory") == 0) {
+#ifdef USE_MEMORY_PROFILING
+ if (strcmp(args[3], "on") == 0) {
+ unsigned int old = profiling;
+ int i;
+
+ while (!_HA_ATOMIC_CAS(&profiling, &old, old | HA_PROF_MEMORY))
+ ;
+
+ HA_ATOMIC_STORE(&prof_mem_start_ns, now_ns);
+ HA_ATOMIC_STORE(&prof_mem_stop_ns, 0);
+
+ /* also flush current profiling stats */
+ for (i = 0; i < sizeof(memprof_stats) / sizeof(memprof_stats[0]); i++) {
+ HA_ATOMIC_STORE(&memprof_stats[i].alloc_calls, 0);
+ HA_ATOMIC_STORE(&memprof_stats[i].free_calls, 0);
+ HA_ATOMIC_STORE(&memprof_stats[i].alloc_tot, 0);
+ HA_ATOMIC_STORE(&memprof_stats[i].free_tot, 0);
+ HA_ATOMIC_STORE(&memprof_stats[i].caller, NULL);
+ }
+ }
+ else if (strcmp(args[3], "off") == 0) {
+ unsigned int old = profiling;
+
+ while (!_HA_ATOMIC_CAS(&profiling, &old, old & ~HA_PROF_MEMORY))
+ ;
+
+ if (HA_ATOMIC_LOAD(&prof_mem_start_ns))
+ HA_ATOMIC_STORE(&prof_mem_stop_ns, now_ns);
+ }
+ else
+ return cli_err(appctx, "Expects either 'on' or 'off'.\n");
+ return 1;
+#else
+ return cli_err(appctx, "Memory profiling not compiled in.\n");
+#endif
+ }
+
+ if (strcmp(args[2], "tasks") != 0)
+ return cli_err(appctx, "Expects either 'tasks' or 'memory'.\n");
+
+ if (strcmp(args[3], "on") == 0) {
+ unsigned int old = profiling;
+ int i;
+
+ while (!_HA_ATOMIC_CAS(&profiling, &old, (old & ~HA_PROF_TASKS_MASK) | HA_PROF_TASKS_ON))
+ ;
+
+ HA_ATOMIC_STORE(&prof_task_start_ns, now_ns);
+ HA_ATOMIC_STORE(&prof_task_stop_ns, 0);
+
+ /* also flush current profiling stats */
+ for (i = 0; i < SCHED_ACT_HASH_BUCKETS; i++) {
+ HA_ATOMIC_STORE(&sched_activity[i].calls, 0);
+ HA_ATOMIC_STORE(&sched_activity[i].cpu_time, 0);
+ HA_ATOMIC_STORE(&sched_activity[i].lat_time, 0);
+ HA_ATOMIC_STORE(&sched_activity[i].func, NULL);
+ HA_ATOMIC_STORE(&sched_activity[i].caller, NULL);
+ }
+ }
+ else if (strcmp(args[3], "auto") == 0) {
+ unsigned int old = profiling;
+ unsigned int new;
+
+ do {
+ if ((old & HA_PROF_TASKS_MASK) >= HA_PROF_TASKS_AON)
+ new = (old & ~HA_PROF_TASKS_MASK) | HA_PROF_TASKS_AON;
+ else
+ new = (old & ~HA_PROF_TASKS_MASK) | HA_PROF_TASKS_AOFF;
+ } while (!_HA_ATOMIC_CAS(&profiling, &old, new));
+
+ HA_ATOMIC_STORE(&prof_task_start_ns, now_ns);
+ HA_ATOMIC_STORE(&prof_task_stop_ns, 0);
+ }
+ else if (strcmp(args[3], "off") == 0) {
+ unsigned int old = profiling;
+ while (!_HA_ATOMIC_CAS(&profiling, &old, (old & ~HA_PROF_TASKS_MASK) | HA_PROF_TASKS_OFF))
+ ;
+
+ if (HA_ATOMIC_LOAD(&prof_task_start_ns))
+ HA_ATOMIC_STORE(&prof_task_stop_ns, now_ns);
+ }
+ else
+ return cli_err(appctx, "Expects 'on', 'auto', or 'off'.\n");
+
+ return 1;
+}
+
+static int cmp_sched_activity_calls(const void *a, const void *b)
+{
+ const struct sched_activity *l = (const struct sched_activity *)a;
+ const struct sched_activity *r = (const struct sched_activity *)b;
+
+ if (l->calls > r->calls)
+ return -1;
+ else if (l->calls < r->calls)
+ return 1;
+ else
+ return 0;
+}
+
+/* sort by address first, then by call count */
+static int cmp_sched_activity_addr(const void *a, const void *b)
+{
+ const struct sched_activity *l = (const struct sched_activity *)a;
+ const struct sched_activity *r = (const struct sched_activity *)b;
+
+ if (l->func > r->func)
+ return -1;
+ else if (l->func < r->func)
+ return 1;
+ else if (l->calls > r->calls)
+ return -1;
+ else if (l->calls < r->calls)
+ return 1;
+ else
+ return 0;
+}
+
+/* sort by cpu time first, then by inverse call count (to spot highest offenders) */
+static int cmp_sched_activity_cpu(const void *a, const void *b)
+{
+ const struct sched_activity *l = (const struct sched_activity *)a;
+ const struct sched_activity *r = (const struct sched_activity *)b;
+
+ if (l->cpu_time > r->cpu_time)
+ return -1;
+ else if (l->cpu_time < r->cpu_time)
+ return 1;
+ else if (l->calls < r->calls)
+ return -1;
+ else if (l->calls > r->calls)
+ return 1;
+ else
+ return 0;
+}
+
+#ifdef USE_MEMORY_PROFILING
+/* used by qsort below */
+static int cmp_memprof_stats(const void *a, const void *b)
+{
+ const struct memprof_stats *l = (const struct memprof_stats *)a;
+ const struct memprof_stats *r = (const struct memprof_stats *)b;
+
+ if (l->alloc_tot + l->free_tot > r->alloc_tot + r->free_tot)
+ return -1;
+ else if (l->alloc_tot + l->free_tot < r->alloc_tot + r->free_tot)
+ return 1;
+ else
+ return 0;
+}
+
+static int cmp_memprof_addr(const void *a, const void *b)
+{
+ const struct memprof_stats *l = (const struct memprof_stats *)a;
+ const struct memprof_stats *r = (const struct memprof_stats *)b;
+
+ if (l->caller > r->caller)
+ return -1;
+ else if (l->caller < r->caller)
+ return 1;
+ else
+ return 0;
+}
+#endif // USE_MEMORY_PROFILING
+
+/* Computes the index of function pointer <func> and caller <caller> for use
+ * with sched_activity[] or any other similar array passed in <array>, and
+ * returns a pointer to the entry after having atomically assigned it to this
+ * function pointer and caller combination. Note that in case of collision,
+ * the first entry is returned instead ("other").
+ */
+struct sched_activity *sched_activity_entry(struct sched_activity *array, const void *func, const void *caller)
+{
+ uint32_t hash = ptr2_hash(func, caller, SCHED_ACT_HASH_BITS);
+ struct sched_activity *ret;
+ const void *old;
+ int tries = 16;
+
+ for (tries = 16; tries > 0; tries--, hash++) {
+ ret = &array[hash];
+
+ while (1) {
+ if (likely(ret->func)) {
+ if (likely(ret->func == func && ret->caller == caller))
+ return ret;
+ break;
+ }
+
+ /* try to create the new entry. Func is sufficient to
+ * reserve the node.
+ */
+ old = NULL;
+ if (HA_ATOMIC_CAS(&ret->func, &old, func)) {
+ ret->caller = caller;
+ return ret;
+ }
+ /* changed in parallel, check again */
+ }
+ }
+
+ return array;
+}
+
+/* This function dumps all profiling settings. It returns 0 if the output
+ * buffer is full and it needs to be called again, otherwise non-zero.
+ * It dumps some parts depending on the following states from show_prof_ctx:
+ * dump_step:
+ * 0, 4: dump status, then jump to 1 if 0
+ * 1, 5: dump tasks, then jump to 2 if 1
+ * 2, 6: dump memory, then stop
+ * linenum:
+ * restart line for each step (starts at zero)
+ * maxcnt:
+ * may contain a configured max line count for each step (0=not set)
+ * byaddr:
+ * 0: sort by usage
+ * 1: sort by address
+ */
+static int cli_io_handler_show_profiling(struct appctx *appctx)
+{
+ struct show_prof_ctx *ctx = appctx->svcctx;
+ struct sched_activity tmp_activity[SCHED_ACT_HASH_BUCKETS] __attribute__((aligned(64)));
+#ifdef USE_MEMORY_PROFILING
+ struct memprof_stats tmp_memstats[MEMPROF_HASH_BUCKETS + 1];
+ unsigned long long tot_alloc_calls, tot_free_calls;
+ unsigned long long tot_alloc_bytes, tot_free_bytes;
+#endif
+ struct stconn *sc = appctx_sc(appctx);
+ struct buffer *name_buffer = get_trash_chunk();
+ const struct ha_caller *caller;
+ const char *str;
+ int max_lines;
+ int i, j, max;
+
+ /* FIXME: Don't watch the other side ! */
+ if (unlikely(sc_opposite(sc)->flags & SC_FL_SHUT_DONE))
+ return 1;
+
+ chunk_reset(&trash);
+
+ switch (profiling & HA_PROF_TASKS_MASK) {
+ case HA_PROF_TASKS_AOFF: str="auto-off"; break;
+ case HA_PROF_TASKS_AON: str="auto-on"; break;
+ case HA_PROF_TASKS_ON: str="on"; break;
+ default: str="off"; break;
+ }
+
+ if ((ctx->dump_step & 3) != 0)
+ goto skip_status;
+
+ chunk_printf(&trash,
+ "Per-task CPU profiling : %-8s # set profiling tasks {on|auto|off}\n"
+ "Memory usage profiling : %-8s # set profiling memory {on|off}\n",
+ str, (profiling & HA_PROF_MEMORY) ? "on" : "off");
+
+ if (applet_putchk(appctx, &trash) == -1) {
+ /* failed, try again */
+ return 0;
+ }
+
+ ctx->linenum = 0; // reset first line to dump
+ if ((ctx->dump_step & 4) == 0)
+ ctx->dump_step++; // next step
+
+ skip_status:
+ if ((ctx->dump_step & 3) != 1)
+ goto skip_tasks;
+
+ memcpy(tmp_activity, sched_activity, sizeof(tmp_activity));
+ /* for addr sort and for callee aggregation we have to first sort by address */
+ if (ctx->aggr || ctx->by_what == 1) // sort by addr
+ qsort(tmp_activity, SCHED_ACT_HASH_BUCKETS, sizeof(tmp_activity[0]), cmp_sched_activity_addr);
+
+ if (ctx->aggr) {
+ /* merge entries for the same callee and reset their count */
+ for (i = j = 0; i < SCHED_ACT_HASH_BUCKETS; i = j) {
+ for (j = i + 1; j < SCHED_ACT_HASH_BUCKETS && tmp_activity[j].func == tmp_activity[i].func; j++) {
+ tmp_activity[i].calls += tmp_activity[j].calls;
+ tmp_activity[i].cpu_time += tmp_activity[j].cpu_time;
+ tmp_activity[i].lat_time += tmp_activity[j].lat_time;
+ tmp_activity[j].calls = 0;
+ }
+ }
+ }
+
+ if (!ctx->by_what) // sort by usage
+ qsort(tmp_activity, SCHED_ACT_HASH_BUCKETS, sizeof(tmp_activity[0]), cmp_sched_activity_calls);
+ else if (ctx->by_what == 2) // by cpu_tot
+ qsort(tmp_activity, SCHED_ACT_HASH_BUCKETS, sizeof(tmp_activity[0]), cmp_sched_activity_cpu);
+
+ if (!ctx->linenum)
+ chunk_appendf(&trash, "Tasks activity over %.3f sec till %.3f sec ago:\n"
+ " function calls cpu_tot cpu_avg lat_tot lat_avg\n",
+ (prof_task_start_ns ? (prof_task_stop_ns ? prof_task_stop_ns : now_ns) - prof_task_start_ns : 0) / 1000000000.0,
+ (prof_task_stop_ns ? now_ns - prof_task_stop_ns : 0) / 1000000000.0);
+
+ max_lines = ctx->maxcnt;
+ if (!max_lines)
+ max_lines = SCHED_ACT_HASH_BUCKETS;
+
+ for (i = ctx->linenum; i < max_lines; i++) {
+ if (!tmp_activity[i].calls)
+ continue; // skip aggregated or empty entries
+
+ ctx->linenum = i;
+ chunk_reset(name_buffer);
+ caller = HA_ATOMIC_LOAD(&tmp_activity[i].caller);
+
+ if (!tmp_activity[i].func)
+ chunk_printf(name_buffer, "other");
+ else
+ resolve_sym_name(name_buffer, "", tmp_activity[i].func);
+
+ /* reserve 35 chars for name+' '+#calls, knowing that longer names
+ * are often used for less often called functions.
+ */
+ max = 35 - name_buffer->data;
+ if (max < 1)
+ max = 1;
+ chunk_appendf(&trash, " %s%*llu", name_buffer->area, max, (unsigned long long)tmp_activity[i].calls);
+
+ print_time_short(&trash, " ", tmp_activity[i].cpu_time, "");
+ print_time_short(&trash, " ", tmp_activity[i].cpu_time / tmp_activity[i].calls, "");
+ print_time_short(&trash, " ", tmp_activity[i].lat_time, "");
+ print_time_short(&trash, " ", tmp_activity[i].lat_time / tmp_activity[i].calls, "");
+
+ if (caller && !ctx->aggr && caller->what <= WAKEUP_TYPE_APPCTX_WAKEUP)
+ chunk_appendf(&trash, " <- %s@%s:%d %s",
+ caller->func, caller->file, caller->line,
+ task_wakeup_type_str(caller->what));
+
+ b_putchr(&trash, '\n');
+
+ if (applet_putchk(appctx, &trash) == -1) {
+ /* failed, try again */
+ return 0;
+ }
+ }
+
+ if (applet_putchk(appctx, &trash) == -1) {
+ /* failed, try again */
+ return 0;
+ }
+
+ ctx->linenum = 0; // reset first line to dump
+ if ((ctx->dump_step & 4) == 0)
+ ctx->dump_step++; // next step
+
+ skip_tasks:
+
+#ifdef USE_MEMORY_PROFILING
+ if ((ctx->dump_step & 3) != 2)
+ goto skip_mem;
+
+ memcpy(tmp_memstats, memprof_stats, sizeof(tmp_memstats));
+ if (ctx->by_what)
+ qsort(tmp_memstats, MEMPROF_HASH_BUCKETS+1, sizeof(tmp_memstats[0]), cmp_memprof_addr);
+ else
+ qsort(tmp_memstats, MEMPROF_HASH_BUCKETS+1, sizeof(tmp_memstats[0]), cmp_memprof_stats);
+
+ if (!ctx->linenum)
+ chunk_appendf(&trash,
+ "Alloc/Free statistics by call place over %.3f sec till %.3f sec ago:\n"
+ " Calls | Tot Bytes | Caller and method\n"
+ "<- alloc -> <- free ->|<-- alloc ---> <-- free ---->|\n",
+ (prof_mem_start_ns ? (prof_mem_stop_ns ? prof_mem_stop_ns : now_ns) - prof_mem_start_ns : 0) / 1000000000.0,
+ (prof_mem_stop_ns ? now_ns - prof_mem_stop_ns : 0) / 1000000000.0);
+
+ max_lines = ctx->maxcnt;
+ if (!max_lines)
+ max_lines = MEMPROF_HASH_BUCKETS + 1;
+
+ for (i = ctx->linenum; i < max_lines; i++) {
+ struct memprof_stats *entry = &tmp_memstats[i];
+
+ ctx->linenum = i;
+ if (!entry->alloc_calls && !entry->free_calls)
+ continue;
+ chunk_appendf(&trash, "%11llu %11llu %14llu %14llu| %16p ",
+ entry->alloc_calls, entry->free_calls,
+ entry->alloc_tot, entry->free_tot,
+ entry->caller);
+
+ if (entry->caller)
+ resolve_sym_name(&trash, NULL, entry->caller);
+ else
+ chunk_appendf(&trash, "[other]");
+
+ chunk_appendf(&trash," %s(%lld)", memprof_methods[entry->method],
+ (long long)(entry->alloc_tot - entry->free_tot) / (long long)(entry->alloc_calls + entry->free_calls));
+
+ if (entry->alloc_tot && entry->free_tot) {
+ /* that's a realloc, show the total diff to help spot leaks */
+ chunk_appendf(&trash," [delta=%lld]", (long long)(entry->alloc_tot - entry->free_tot));
+ }
+
+ if (entry->info) {
+ /* that's a pool name */
+ const struct pool_head *pool = entry->info;
+ chunk_appendf(&trash," [pool=%s]", pool->name);
+ }
+
+ chunk_appendf(&trash, "\n");
+
+ if (applet_putchk(appctx, &trash) == -1)
+ return 0;
+ }
+
+ if (applet_putchk(appctx, &trash) == -1)
+ return 0;
+
+ tot_alloc_calls = tot_free_calls = tot_alloc_bytes = tot_free_bytes = 0;
+ for (i = 0; i < max_lines; i++) {
+ tot_alloc_calls += tmp_memstats[i].alloc_calls;
+ tot_free_calls += tmp_memstats[i].free_calls;
+ tot_alloc_bytes += tmp_memstats[i].alloc_tot;
+ tot_free_bytes += tmp_memstats[i].free_tot;
+ }
+
+ chunk_appendf(&trash,
+ "-----------------------|-----------------------------|\n"
+ "%11llu %11llu %14llu %14llu| <- Total; Delta_calls=%lld; Delta_bytes=%lld\n",
+ tot_alloc_calls, tot_free_calls,
+ tot_alloc_bytes, tot_free_bytes,
+ tot_alloc_calls - tot_free_calls,
+ tot_alloc_bytes - tot_free_bytes);
+
+ if (applet_putchk(appctx, &trash) == -1)
+ return 0;
+
+ ctx->linenum = 0; // reset first line to dump
+ if ((ctx->dump_step & 4) == 0)
+ ctx->dump_step++; // next step
+
+ skip_mem:
+#endif // USE_MEMORY_PROFILING
+
+ return 1;
+}
+
+/* parse a "show profiling" command. It returns 1 on failure, 0 if it starts to dump.
+ * - cli.i0 is set to the first state (0=all, 4=status, 5=tasks, 6=memory)
+ * - cli.o1 is set to 1 if the output must be sorted by addr instead of usage
+ * - cli.o0 is set to the number of lines of output
+ */
+static int cli_parse_show_profiling(char **args, char *payload, struct appctx *appctx, void *private)
+{
+ struct show_prof_ctx *ctx = applet_reserve_svcctx(appctx, sizeof(*ctx));
+ int arg;
+
+ if (!cli_has_level(appctx, ACCESS_LVL_ADMIN))
+ return 1;
+
+ for (arg = 2; *args[arg]; arg++) {
+ if (strcmp(args[arg], "all") == 0) {
+ ctx->dump_step = 0; // will cycle through 0,1,2; default
+ }
+ else if (strcmp(args[arg], "status") == 0) {
+ ctx->dump_step = 4; // will visit status only
+ }
+ else if (strcmp(args[arg], "tasks") == 0) {
+ ctx->dump_step = 5; // will visit tasks only
+ }
+ else if (strcmp(args[arg], "memory") == 0) {
+ ctx->dump_step = 6; // will visit memory only
+ }
+ else if (strcmp(args[arg], "byaddr") == 0) {
+ ctx->by_what = 1; // sort output by address instead of usage
+ }
+ else if (strcmp(args[arg], "bytime") == 0) {
+ ctx->by_what = 2; // sort output by total time instead of usage
+ }
+ else if (strcmp(args[arg], "aggr") == 0) {
+ ctx->aggr = 1; // aggregate output by callee
+ }
+ else if (isdigit((unsigned char)*args[arg])) {
+ ctx->maxcnt = atoi(args[arg]); // number of entries to dump
+ }
+ else
+ return cli_err(appctx, "Expects either 'all', 'status', 'tasks', 'memory', 'byaddr', 'bytime', 'aggr' or a max number of output lines.\n");
+ }
+ return 0;
+}
+
+/* This function scans all threads' run queues and collects statistics about
+ * running tasks. It returns 0 if the output buffer is full and it needs to be
+ * called again, otherwise non-zero.
+ */
+static int cli_io_handler_show_tasks(struct appctx *appctx)
+{
+ struct sched_activity tmp_activity[SCHED_ACT_HASH_BUCKETS] __attribute__((aligned(64)));
+ struct stconn *sc = appctx_sc(appctx);
+ struct buffer *name_buffer = get_trash_chunk();
+ struct sched_activity *entry;
+ const struct tasklet *tl;
+ const struct task *t;
+ uint64_t now_ns, lat;
+ struct eb32_node *rqnode;
+ uint64_t tot_calls;
+ int thr, queue;
+ int i, max;
+
+ /* FIXME: Don't watch the other side ! */
+ if (unlikely(sc_opposite(sc)->flags & SC_FL_SHUT_DONE))
+ return 1;
+
+ /* It's not possible to scan queues in small chunks and yield in the
+ * middle of the dump and come back again. So what we're doing instead
+ * is to freeze all threads and inspect their queues at once as fast as
+ * possible, using a sched_activity array to collect metrics with
+ * limited collision, then we'll report statistics only. The tasks'
+ * #calls will reflect the number of occurrences, and the lat_time will
+ * reflect the latency when set. We prefer to take the time before
+ * calling thread_isolate() so that the wait time doesn't impact the
+ * measurement accuracy. However this requires to take care of negative
+ * times since tasks might be queued after we retrieve it.
+ */
+
+ now_ns = now_mono_time();
+ memset(tmp_activity, 0, sizeof(tmp_activity));
+
+ thread_isolate();
+
+ /* 1. global run queue */
+
+#ifdef USE_THREAD
+ for (thr = 0; thr < global.nbthread; thr++) {
+ /* task run queue */
+ rqnode = eb32_first(&ha_thread_ctx[thr].rqueue_shared);
+ while (rqnode) {
+ t = eb32_entry(rqnode, struct task, rq);
+ entry = sched_activity_entry(tmp_activity, t->process, NULL);
+ if (t->wake_date) {
+ lat = now_ns - t->wake_date;
+ if ((int64_t)lat > 0)
+ entry->lat_time += lat;
+ }
+ entry->calls++;
+ rqnode = eb32_next(rqnode);
+ }
+ }
+#endif
+ /* 2. all threads's local run queues */
+ for (thr = 0; thr < global.nbthread; thr++) {
+ /* task run queue */
+ rqnode = eb32_first(&ha_thread_ctx[thr].rqueue);
+ while (rqnode) {
+ t = eb32_entry(rqnode, struct task, rq);
+ entry = sched_activity_entry(tmp_activity, t->process, NULL);
+ if (t->wake_date) {
+ lat = now_ns - t->wake_date;
+ if ((int64_t)lat > 0)
+ entry->lat_time += lat;
+ }
+ entry->calls++;
+ rqnode = eb32_next(rqnode);
+ }
+
+ /* shared tasklet list */
+ list_for_each_entry(tl, mt_list_to_list(&ha_thread_ctx[thr].shared_tasklet_list), list) {
+ t = (const struct task *)tl;
+ entry = sched_activity_entry(tmp_activity, t->process, NULL);
+ if (!TASK_IS_TASKLET(t) && t->wake_date) {
+ lat = now_ns - t->wake_date;
+ if ((int64_t)lat > 0)
+ entry->lat_time += lat;
+ }
+ entry->calls++;
+ }
+
+ /* classful tasklets */
+ for (queue = 0; queue < TL_CLASSES; queue++) {
+ list_for_each_entry(tl, &ha_thread_ctx[thr].tasklets[queue], list) {
+ t = (const struct task *)tl;
+ entry = sched_activity_entry(tmp_activity, t->process, NULL);
+ if (!TASK_IS_TASKLET(t) && t->wake_date) {
+ lat = now_ns - t->wake_date;
+ if ((int64_t)lat > 0)
+ entry->lat_time += lat;
+ }
+ entry->calls++;
+ }
+ }
+ }
+
+ /* hopefully we're done */
+ thread_release();
+
+ chunk_reset(&trash);
+
+ tot_calls = 0;
+ for (i = 0; i < SCHED_ACT_HASH_BUCKETS; i++)
+ tot_calls += tmp_activity[i].calls;
+
+ qsort(tmp_activity, SCHED_ACT_HASH_BUCKETS, sizeof(tmp_activity[0]), cmp_sched_activity_calls);
+
+ chunk_appendf(&trash, "Running tasks: %d (%d threads)\n"
+ " function places %% lat_tot lat_avg\n",
+ (int)tot_calls, global.nbthread);
+
+ for (i = 0; i < SCHED_ACT_HASH_BUCKETS && tmp_activity[i].calls; i++) {
+ chunk_reset(name_buffer);
+
+ if (!tmp_activity[i].func)
+ chunk_printf(name_buffer, "other");
+ else
+ resolve_sym_name(name_buffer, "", tmp_activity[i].func);
+
+ /* reserve 35 chars for name+' '+#calls, knowing that longer names
+ * are often used for less often called functions.
+ */
+ max = 35 - name_buffer->data;
+ if (max < 1)
+ max = 1;
+ chunk_appendf(&trash, " %s%*llu %3d.%1d",
+ name_buffer->area, max, (unsigned long long)tmp_activity[i].calls,
+ (int)(100ULL * tmp_activity[i].calls / tot_calls),
+ (int)((1000ULL * tmp_activity[i].calls / tot_calls)%10));
+ print_time_short(&trash, " ", tmp_activity[i].lat_time, "");
+ print_time_short(&trash, " ", tmp_activity[i].lat_time / tmp_activity[i].calls, "\n");
+ }
+
+ if (applet_putchk(appctx, &trash) == -1) {
+ /* failed, try again */
+ return 0;
+ }
+ return 1;
+}
+
+/* This function dumps some activity counters used by developers and support to
+ * rule out some hypothesis during bug reports. It returns 0 if the output
+ * buffer is full and it needs to be called again, otherwise non-zero. It dumps
+ * everything at once in the buffer and is not designed to do it in multiple
+ * passes.
+ */
+static int cli_io_handler_show_activity(struct appctx *appctx)
+{
+ struct stconn *sc = appctx_sc(appctx);
+ struct show_activity_ctx *actctx = appctx->svcctx;
+ int tgt = actctx->thr; // target thread, -1 for all, 0 for total only
+ uint up_sec, up_usec;
+ int base_line;
+ ullong up;
+
+ /* FIXME: Don't watch the other side ! */
+ if (unlikely(sc_opposite(sc)->flags & SC_FL_SHUT_DONE))
+ return 1;
+
+ /* this macro is used below to dump values. The thread number is "thr",
+ * and runs from 0 to nbt-1 when values are printed using the formula.
+ * We normally try to dmup integral lines in order to keep counters
+ * consistent. If we fail once on a line, we'll detect it next time
+ * because we'll have committed actctx->col=1 thanks to the header
+ * always being dumped individually. We'll be called again thanks to
+ * the header being present, leaving some data in the buffer. In this
+ * case once we restart we'll proceed one column at a time to make sure
+ * we don't overflow the buffer again.
+ */
+#undef SHOW_VAL
+#define SHOW_VAL(header, x, formula) \
+ do { \
+ unsigned int _v[MAX_THREADS]; \
+ unsigned int _tot; \
+ const int _nbt = global.nbthread; \
+ int restarted = actctx->col > 0; \
+ int thr; \
+ _tot = thr = 0; \
+ do { \
+ _tot += _v[thr] = (x); \
+ } while (++thr < _nbt); \
+ for (thr = actctx->col - 2; thr <= _nbt; thr++) { \
+ if (thr == -2) { \
+ /* line header */ \
+ chunk_appendf(&trash, "%s", header); \
+ } \
+ else if (thr == -1) { \
+ /* aggregate value only for multi-thread: all & 0 */ \
+ if (_nbt > 1 && tgt <= 0) \
+ chunk_appendf(&trash, " %u%s", \
+ (formula), \
+ (tgt < 0) ? \
+ " [" : ""); \
+ } \
+ else if (thr < _nbt) { \
+ /* individual value only for all or exact value */ \
+ if (tgt == -1 || tgt == thr+1) \
+ chunk_appendf(&trash, " %u", \
+ _v[thr]); \
+ } \
+ else /* thr == _nbt */ { \
+ chunk_appendf(&trash, "%s\n", \
+ (_nbt > 1 && tgt < 0) ? \
+ " ]" : ""); \
+ } \
+ if (thr == -2 || restarted) { \
+ /* failed once, emit one column at a time */\
+ if (applet_putchk(appctx, &trash) == -1) \
+ break; /* main loop handles it */ \
+ chunk_reset(&trash); \
+ actctx->col = thr + 3; \
+ } \
+ } \
+ if (applet_putchk(appctx, &trash) == -1) \
+ break; /* main loop will handle it */ \
+ /* OK dump done for this line */ \
+ chunk_reset(&trash); \
+ if (thr > _nbt) \
+ actctx->col = 0; \
+ } while (0)
+
+ /* retrieve uptime */
+ up = now_ns - start_time_ns;
+ up_sec = ns_to_sec(up);
+ up_usec = (up / 1000U) % 1000000U;
+
+ /* iterate over all dump lines. It happily skips over holes so it's
+ * not a problem not to have an exact match, we just need to have
+ * stable and consistent lines during a dump.
+ */
+ base_line = __LINE__;
+ do {
+ chunk_reset(&trash);
+
+ switch (actctx->line + base_line) {
+ case __LINE__: chunk_appendf(&trash, "thread_id: %u (%u..%u)\n", tid + 1, 1, global.nbthread); break;
+ case __LINE__: chunk_appendf(&trash, "date_now: %lu.%06lu\n", (ulong)date.tv_sec, (ulong)date.tv_usec); break;
+ case __LINE__: chunk_appendf(&trash, "uptime_now: %u.%06u\n", up_sec, up_usec); break;
+ case __LINE__: SHOW_VAL("ctxsw:", activity[thr].ctxsw, _tot); break;
+ case __LINE__: SHOW_VAL("tasksw:", activity[thr].tasksw, _tot); break;
+ case __LINE__: SHOW_VAL("empty_rq:", activity[thr].empty_rq, _tot); break;
+ case __LINE__: SHOW_VAL("long_rq:", activity[thr].long_rq, _tot); break;
+ case __LINE__: SHOW_VAL("curr_rq:", _HA_ATOMIC_LOAD(&ha_thread_ctx[thr].rq_total), _tot); break;
+ case __LINE__: SHOW_VAL("loops:", activity[thr].loops, _tot); break;
+ case __LINE__: SHOW_VAL("wake_tasks:", activity[thr].wake_tasks, _tot); break;
+ case __LINE__: SHOW_VAL("wake_signal:", activity[thr].wake_signal, _tot); break;
+ case __LINE__: SHOW_VAL("poll_io:", activity[thr].poll_io, _tot); break;
+ case __LINE__: SHOW_VAL("poll_exp:", activity[thr].poll_exp, _tot); break;
+ case __LINE__: SHOW_VAL("poll_drop_fd:", activity[thr].poll_drop_fd, _tot); break;
+ case __LINE__: SHOW_VAL("poll_skip_fd:", activity[thr].poll_skip_fd, _tot); break;
+ case __LINE__: SHOW_VAL("conn_dead:", activity[thr].conn_dead, _tot); break;
+ case __LINE__: SHOW_VAL("stream_calls:", activity[thr].stream_calls, _tot); break;
+ case __LINE__: SHOW_VAL("pool_fail:", activity[thr].pool_fail, _tot); break;
+ case __LINE__: SHOW_VAL("buf_wait:", activity[thr].buf_wait, _tot); break;
+ case __LINE__: SHOW_VAL("cpust_ms_tot:", activity[thr].cpust_total / 2, _tot); break;
+ case __LINE__: SHOW_VAL("cpust_ms_1s:", read_freq_ctr(&activity[thr].cpust_1s) / 2, _tot); break;
+ case __LINE__: SHOW_VAL("cpust_ms_15s:", read_freq_ctr_period(&activity[thr].cpust_15s, 15000) / 2, _tot); break;
+ case __LINE__: SHOW_VAL("avg_cpu_pct:", (100 - ha_thread_ctx[thr].idle_pct), (_tot + _nbt/2) / _nbt); break;
+ case __LINE__: SHOW_VAL("avg_loop_us:", swrate_avg(activity[thr].avg_loop_us, TIME_STATS_SAMPLES), (_tot + _nbt/2) / _nbt); break;
+ case __LINE__: SHOW_VAL("accepted:", activity[thr].accepted, _tot); break;
+ case __LINE__: SHOW_VAL("accq_pushed:", activity[thr].accq_pushed, _tot); break;
+ case __LINE__: SHOW_VAL("accq_full:", activity[thr].accq_full, _tot); break;
+#ifdef USE_THREAD
+ case __LINE__: SHOW_VAL("accq_ring:", accept_queue_ring_len(&accept_queue_rings[thr]), _tot); break;
+ case __LINE__: SHOW_VAL("fd_takeover:", activity[thr].fd_takeover, _tot); break;
+ case __LINE__: SHOW_VAL("check_adopted:",activity[thr].check_adopted, _tot); break;
+#endif
+ case __LINE__: SHOW_VAL("check_started:",activity[thr].check_started, _tot); break;
+ case __LINE__: SHOW_VAL("check_active:", _HA_ATOMIC_LOAD(&ha_thread_ctx[thr].active_checks), _tot); break;
+ case __LINE__: SHOW_VAL("check_running:",_HA_ATOMIC_LOAD(&ha_thread_ctx[thr].running_checks), _tot); break;
+
+#if defined(DEBUG_DEV)
+ /* keep these ones at the end */
+ case __LINE__: SHOW_VAL("ctr0:", activity[thr].ctr0, _tot); break;
+ case __LINE__: SHOW_VAL("ctr1:", activity[thr].ctr1, _tot); break;
+ case __LINE__: SHOW_VAL("ctr2:", activity[thr].ctr2, _tot); break;
+#endif
+ }
+#undef SHOW_VAL
+
+ /* try to dump what was possibly not dumped yet */
+
+ if (applet_putchk(appctx, &trash) == -1) {
+ /* buffer full, retry later */
+ return 0;
+ }
+ /* line was dumped, let's commit it */
+ actctx->line++;
+ } while (actctx->line + base_line < __LINE__);
+
+ /* dump complete */
+ return 1;
+}
+
+/* parse a "show activity" CLI request. Returns 0 if it needs to continue, 1 if it
+ * wants to stop here. It sets a show_activity_ctx context where, if a specific
+ * thread is requested, it puts the thread number into ->thr otherwise sets it to
+ * -1.
+ */
+static int cli_parse_show_activity(char **args, char *payload, struct appctx *appctx, void *private)
+{
+ struct show_activity_ctx *ctx = applet_reserve_svcctx(appctx, sizeof(*ctx));
+
+ if (!cli_has_level(appctx, ACCESS_LVL_OPER))
+ return 1;
+
+ ctx->thr = -1; // show all by default
+ if (*args[2])
+ ctx->thr = atoi(args[2]);
+
+ if (ctx->thr < -1 || ctx->thr > global.nbthread)
+ return cli_err(appctx, "Thread ID number must be between -1 and nbthread\n");
+
+ return 0;
+}
+
+/* config keyword parsers */
+static struct cfg_kw_list cfg_kws = {ILH, {
+#ifdef USE_MEMORY_PROFILING
+ { CFG_GLOBAL, "profiling.memory", cfg_parse_prof_memory },
+#endif
+ { CFG_GLOBAL, "profiling.tasks", cfg_parse_prof_tasks },
+ { 0, NULL, NULL }
+}};
+
+INITCALL1(STG_REGISTER, cfg_register_keywords, &cfg_kws);
+
+/* register cli keywords */
+static struct cli_kw_list cli_kws = {{ },{
+ { { "set", "profiling", NULL }, "set profiling <what> {auto|on|off} : enable/disable resource profiling (tasks,memory)", cli_parse_set_profiling, NULL },
+ { { "show", "activity", NULL }, "show activity [-1|0|thread_num] : show per-thread activity stats (for support/developers)", cli_parse_show_activity, cli_io_handler_show_activity, NULL },
+ { { "show", "profiling", NULL }, "show profiling [<what>|<#lines>|<opts>]*: show profiling state (all,status,tasks,memory)", cli_parse_show_profiling, cli_io_handler_show_profiling, NULL },
+ { { "show", "tasks", NULL }, "show tasks : show running tasks", NULL, cli_io_handler_show_tasks, NULL },
+ {{},}
+}};
+
+INITCALL1(STG_REGISTER, cli_register_kw, &cli_kws);
diff --git a/src/applet.c b/src/applet.c
new file mode 100644
index 0000000..a5b0946
--- /dev/null
+++ b/src/applet.c
@@ -0,0 +1,501 @@
+/*
+ * Functions managing applets
+ *
+ * Copyright 2000-2015 Willy Tarreau <w@1wt.eu>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include <haproxy/api.h>
+#include <haproxy/applet.h>
+#include <haproxy/channel.h>
+#include <haproxy/list.h>
+#include <haproxy/sc_strm.h>
+#include <haproxy/stconn.h>
+#include <haproxy/stream.h>
+#include <haproxy/task.h>
+#include <haproxy/trace.h>
+
+unsigned int nb_applets = 0;
+
+DECLARE_POOL(pool_head_appctx, "appctx", sizeof(struct appctx));
+
+
+/* trace source and events */
+static void applet_trace(enum trace_level level, uint64_t mask,
+ const struct trace_source *src,
+ const struct ist where, const struct ist func,
+ const void *a1, const void *a2, const void *a3, const void *a4);
+
+/* The event representation is split like this :
+ * app - applet
+ */
+static const struct trace_event applet_trace_events[] = {
+#define APPLET_EV_NEW (1ULL << 0)
+ { .mask = APPLET_EV_NEW, .name = "app_new", .desc = "new appctx" },
+#define APPLET_EV_FREE (1ULL << 1)
+ { .mask = APPLET_EV_FREE, .name = "app_free", .desc = "free appctx" },
+#define APPLET_EV_RELEASE (1ULL << 2)
+ { .mask = APPLET_EV_RELEASE, .name = "app_release", .desc = "release appctx" },
+#define APPLET_EV_PROCESS (1ULL << 3)
+ { .mask = APPLET_EV_PROCESS, .name = "app_proc", .desc = "process appctx" },
+#define APPLET_EV_ERR (1ULL << 4)
+ { .mask = APPLET_EV_ERR, .name = "app_err", .desc = "error on appctx" },
+#define APPLET_EV_START (1ULL << 5)
+ { .mask = APPLET_EV_START, .name = "app_start", .desc = "start appctx" },
+ {}
+};
+
+static const struct name_desc applet_trace_lockon_args[4] = {
+ /* arg1 */ { /* already used by the applet */ },
+ /* arg2 */ { },
+ /* arg3 */ { },
+ /* arg4 */ { }
+};
+
+static const struct name_desc applet_trace_decoding[] = {
+#define STRM_VERB_CLEAN 1
+ { .name="clean", .desc="only user-friendly stuff, generally suitable for level \"user\"" },
+#define STRM_VERB_MINIMAL 2
+ { .name="minimal", .desc="report info on streams and connectors" },
+#define STRM_VERB_SIMPLE 3
+ { .name="simple", .desc="add info on request and response channels" },
+#define STRM_VERB_ADVANCED 4
+ { .name="advanced", .desc="add info on channel's buffer for data and developer levels only" },
+#define STRM_VERB_COMPLETE 5
+ { .name="complete", .desc="add info on channel's buffer" },
+ { /* end */ }
+};
+
+static struct trace_source trace_applet = {
+ .name = IST("applet"),
+ .desc = "Applet endpoint",
+ .arg_def = TRC_ARG1_APPCTX, // TRACE()'s first argument is always an appctx
+ .default_cb = applet_trace,
+ .known_events = applet_trace_events,
+ .lockon_args = applet_trace_lockon_args,
+ .decoding = applet_trace_decoding,
+ .report_events = ~0, // report everything by default
+};
+
+#define TRACE_SOURCE &trace_applet
+INITCALL1(STG_REGISTER, trace_register_source, TRACE_SOURCE);
+
+/* the applet traces always expect that arg1, if non-null, is of a appctx (from
+ * which we can derive everything).
+ */
+static void applet_trace(enum trace_level level, uint64_t mask, const struct trace_source *src,
+ const struct ist where, const struct ist func,
+ const void *a1, const void *a2, const void *a3, const void *a4)
+{
+ const struct appctx *appctx = a1;
+ const struct stconn *sc = NULL, *sco = NULL;
+ const struct stream *s = NULL;
+ const struct channel *ic = NULL, *oc = NULL;
+
+ if (!appctx || src->verbosity < STRM_VERB_CLEAN)
+ return;
+
+ sc = appctx_sc(appctx);
+ if (sc) {
+ s = __sc_strm(sc);
+ sco = sc_opposite(sc);
+ ic = sc_ic(sc);
+ oc = sc_oc(sc);
+ }
+
+ /* General info about the stream (htx/tcp, id...) */
+ if (s)
+ chunk_appendf(&trace_buf, " : [%s,%s]",
+ appctx->applet->name, ((s->flags & SF_HTX) ? "HTX" : "TCP"));
+ else
+ chunk_appendf(&trace_buf, " : [%s]", appctx->applet->name);
+
+ if (sc)
+ /* local and opposite stream connector state */
+ chunk_appendf(&trace_buf, " SC=(%s,%s)",
+ sc_state_str(sc->state), sc_state_str(sco->state));
+ else
+ /* local and opposite stream connector state */
+ chunk_appendf(&trace_buf, " SC=(none,none)");
+
+ if (src->verbosity == STRM_VERB_CLEAN)
+ return;
+
+ chunk_appendf(&trace_buf, " appctx=%p .t=%p .t.exp=%d .state=%d .st0=%d .st1=%d",
+ appctx, appctx->t, tick_isset(appctx->t->expire) ? TICKS_TO_MS(appctx->t->expire - now_ms) : TICK_ETERNITY,
+ appctx->state, appctx->st0, appctx->st1);
+
+ if (!sc || src->verbosity == STRM_VERB_MINIMAL)
+ return;
+
+ chunk_appendf(&trace_buf, " - s=(%p,0x%08x,0x%x)", s, s->flags, s->conn_err_type);
+
+ chunk_appendf(&trace_buf, " sc=(%p,%d,0x%08x,0x%x) sco=(%p,%d,0x%08x,0x%x) sc.exp(r,w)=(%d,%d) sco.exp(r,w)=(%d,%d)",
+ sc, sc->state, sc->flags, sc->sedesc->flags,
+ sco, sco->state, sco->flags, sco->sedesc->flags,
+ tick_isset(sc_ep_rcv_ex(sc)) ? TICKS_TO_MS(sc_ep_rcv_ex(sc) - now_ms) : TICK_ETERNITY,
+ tick_isset(sc_ep_snd_ex(sc)) ? TICKS_TO_MS(sc_ep_snd_ex(sc) - now_ms) : TICK_ETERNITY,
+ tick_isset(sc_ep_rcv_ex(sco)) ? TICKS_TO_MS(sc_ep_rcv_ex(sco) - now_ms) : TICK_ETERNITY,
+ tick_isset(sc_ep_snd_ex(sco)) ? TICKS_TO_MS(sc_ep_snd_ex(sco) - now_ms) : TICK_ETERNITY);
+
+
+ /* If txn defined, don't display all channel info */
+ if (src->verbosity == STRM_VERB_SIMPLE) {
+ chunk_appendf(&trace_buf, " ic=(%p .fl=0x%08x .exp=%d)",
+ ic, ic->flags, tick_isset(ic->analyse_exp) ? TICKS_TO_MS(ic->analyse_exp - now_ms) : TICK_ETERNITY);
+ chunk_appendf(&trace_buf, " oc=(%p .fl=0x%08x .exp=%d)",
+ oc, oc->flags, tick_isset(oc->analyse_exp) ? TICKS_TO_MS(oc->analyse_exp - now_ms) : TICK_ETERNITY);
+ }
+ else {
+ chunk_appendf(&trace_buf, " ic=(%p .fl=0x%08x .ana=0x%08x .exp=%u .o=%lu .tot=%llu .to_fwd=%u)",
+ ic, ic->flags, ic->analysers, ic->analyse_exp,
+ (long)ic->output, ic->total, ic->to_forward);
+ chunk_appendf(&trace_buf, " oc=(%p .fl=0x%08x .ana=0x%08x .exp=%u .o=%lu .tot=%llu .to_fwd=%u)",
+ oc, oc->flags, oc->analysers, oc->analyse_exp,
+ (long)oc->output, oc->total, oc->to_forward);
+ }
+
+ if (src->verbosity == STRM_VERB_SIMPLE ||
+ (src->verbosity == STRM_VERB_ADVANCED && src->level < TRACE_LEVEL_DATA))
+ return;
+
+ /* channels' buffer info */
+ if (s->flags & SF_HTX) {
+ struct htx *ichtx = htxbuf(&ic->buf);
+ struct htx *ochtx = htxbuf(&oc->buf);
+
+ chunk_appendf(&trace_buf, " htx=(%u/%u#%u, %u/%u#%u)",
+ ichtx->data, ichtx->size, htx_nbblks(ichtx),
+ ochtx->data, ochtx->size, htx_nbblks(ochtx));
+ }
+ else {
+ chunk_appendf(&trace_buf, " buf=(%u@%p+%u/%u, %u@%p+%u/%u)",
+ (unsigned int)b_data(&ic->buf), b_orig(&ic->buf),
+ (unsigned int)b_head_ofs(&ic->buf), (unsigned int)b_size(&ic->buf),
+ (unsigned int)b_data(&oc->buf), b_orig(&oc->buf),
+ (unsigned int)b_head_ofs(&oc->buf), (unsigned int)b_size(&oc->buf));
+ }
+}
+
+/* Tries to allocate a new appctx and initialize all of its fields. The appctx
+ * is returned on success, NULL on failure. The appctx must be released using
+ * appctx_free(). <applet> is assigned as the applet, but it can be NULL. <thr>
+ * is the thread ID to start the applet on, and a negative value allows the
+ * applet to start anywhere. Backend applets may only be created on the current
+ * thread.
+ */
+struct appctx *appctx_new_on(struct applet *applet, struct sedesc *sedesc, int thr)
+{
+ struct appctx *appctx;
+
+ /* Backend appctx cannot be started on another thread than the local one */
+ BUG_ON(thr != tid && sedesc);
+
+ TRACE_ENTER(APPLET_EV_NEW);
+
+ appctx = pool_zalloc(pool_head_appctx);
+ if (unlikely(!appctx)) {
+ TRACE_ERROR("APPCTX allocation failure", APPLET_EV_NEW|APPLET_EV_ERR);
+ goto fail_appctx;
+ }
+
+ LIST_INIT(&appctx->wait_entry);
+ appctx->obj_type = OBJ_TYPE_APPCTX;
+ appctx->applet = applet;
+ appctx->sess = NULL;
+
+ appctx->t = task_new_on(thr);
+ if (unlikely(!appctx->t)) {
+ TRACE_ERROR("APPCTX task allocation failure", APPLET_EV_NEW|APPLET_EV_ERR);
+ goto fail_task;
+ }
+
+ if (!sedesc) {
+ sedesc = sedesc_new();
+ if (unlikely(!sedesc)) {
+ TRACE_ERROR("APPCTX sedesc allocation failure", APPLET_EV_NEW|APPLET_EV_ERR);
+ goto fail_endp;
+ }
+ sedesc->se = appctx;
+ se_fl_set(sedesc, SE_FL_T_APPLET | SE_FL_ORPHAN);
+ }
+
+ appctx->sedesc = sedesc;
+ appctx->t->process = task_run_applet;
+ appctx->t->context = appctx;
+
+ LIST_INIT(&appctx->buffer_wait.list);
+ appctx->buffer_wait.target = appctx;
+ appctx->buffer_wait.wakeup_cb = appctx_buf_available;
+
+ _HA_ATOMIC_INC(&nb_applets);
+
+ TRACE_LEAVE(APPLET_EV_NEW, appctx);
+ return appctx;
+
+ fail_endp:
+ task_destroy(appctx->t);
+ fail_task:
+ pool_free(pool_head_appctx, appctx);
+ fail_appctx:
+ return NULL;
+}
+
+/* Finalize the frontend appctx startup. It must not be called for a backend
+ * appctx. This function is responsible to create the appctx's session and the
+ * frontend stream connector. By transitivity, the stream is also created.
+ *
+ * It returns 0 on success and -1 on error. In this case, it is the caller
+ * responsibility to release the appctx. However, the session is released if it
+ * was created. On success, if an error is encountered in the caller function,
+ * the stream must be released instead of the appctx. To be sure,
+ * appctx_free_on_early_error() must be called in this case.
+ */
+int appctx_finalize_startup(struct appctx *appctx, struct proxy *px, struct buffer *input)
+{
+ struct session *sess;
+
+ /* async startup is only possible for frontend appctx. Thus for orphan
+ * appctx. Because no backend appctx can be orphan.
+ */
+ BUG_ON(!se_fl_test(appctx->sedesc, SE_FL_ORPHAN));
+
+ TRACE_ENTER(APPLET_EV_START, appctx);
+
+ sess = session_new(px, NULL, &appctx->obj_type);
+ if (!sess) {
+ TRACE_ERROR("APPCTX session allocation failure", APPLET_EV_START|APPLET_EV_ERR, appctx);
+ return -1;
+ }
+ if (!sc_new_from_endp(appctx->sedesc, sess, input)) {
+ session_free(sess);
+ TRACE_ERROR("APPCTX sc allocation failure", APPLET_EV_START|APPLET_EV_ERR, appctx);
+ return -1;
+ }
+
+ appctx->sess = sess;
+ TRACE_LEAVE(APPLET_EV_START, appctx);
+ return 0;
+}
+
+/* Release function to call when an error occurred during init stage of a
+ * frontend appctx. For a backend appctx, it just calls appctx_free()
+ */
+void appctx_free_on_early_error(struct appctx *appctx)
+{
+ /* If a frontend appctx is attached to a stream connector, release the stream
+ * instead of the appctx.
+ */
+ if (!se_fl_test(appctx->sedesc, SE_FL_ORPHAN) && !(appctx_sc(appctx)->flags & SC_FL_ISBACK)) {
+ stream_free(appctx_strm(appctx));
+ return;
+ }
+ appctx_free(appctx);
+}
+
+void appctx_free(struct appctx *appctx)
+{
+ /* The task is supposed to be run on this thread, so we can just
+ * check if it's running already (or about to run) or not
+ */
+ if (!(appctx->t->state & (TASK_QUEUED | TASK_RUNNING))) {
+ TRACE_POINT(APPLET_EV_FREE, appctx);
+ __appctx_free(appctx);
+ }
+ else {
+ /* if it's running, or about to run, defer the freeing
+ * until the callback is called.
+ */
+ appctx->state |= APPLET_WANT_DIE;
+ task_wakeup(appctx->t, TASK_WOKEN_OTHER);
+ TRACE_DEVEL("Cannot release APPCTX now, wake it up", APPLET_EV_FREE, appctx);
+ }
+}
+
+/* reserves a command context of at least <size> bytes in the <appctx>, for
+ * use by a CLI command or any regular applet. The pointer to this context is
+ * stored in ctx.svcctx and is returned. The caller doesn't need to release
+ * it as it's allocated from reserved space. If the size is larger than
+ * APPLET_MAX_SVCCTX a crash will occur (hence that will never happen outside
+ * of development).
+ *
+ * Note that the command does *not* initialize the area, so that it can easily
+ * be used upon each entry in a function. It's left to the initialization code
+ * to do it if needed. The CLI will always zero the whole area before calling
+ * a keyword's ->parse() function.
+ */
+void *applet_reserve_svcctx(struct appctx *appctx, size_t size)
+{
+ BUG_ON(size > APPLET_MAX_SVCCTX);
+ appctx->svcctx = &appctx->svc.storage;
+ return appctx->svcctx;
+}
+
+/* This is used to reset an svcctx and the svc.storage without releasing the
+ * appctx. In fact this is only used by the CLI applet between commands.
+ */
+void applet_reset_svcctx(struct appctx *appctx)
+{
+ memset(&appctx->svc.storage, 0, APPLET_MAX_SVCCTX);
+ appctx->svcctx = NULL;
+}
+
+/* call the applet's release() function if any, and marks the sedesc as shut.
+ * Needs to be called upon close().
+ */
+void appctx_shut(struct appctx *appctx)
+{
+ if (se_fl_test(appctx->sedesc, SE_FL_SHR | SE_FL_SHW))
+ return;
+
+ TRACE_ENTER(APPLET_EV_RELEASE, appctx);
+ if (appctx->applet->release)
+ appctx->applet->release(appctx);
+
+ if (LIST_INLIST(&appctx->buffer_wait.list))
+ LIST_DEL_INIT(&appctx->buffer_wait.list);
+
+ se_fl_set(appctx->sedesc, SE_FL_SHRR | SE_FL_SHWN);
+ TRACE_LEAVE(APPLET_EV_RELEASE, appctx);
+}
+
+/* Callback used to wake up an applet when a buffer is available. The applet
+ * <appctx> is woken up if an input buffer was requested for the associated
+ * stream connector. In this case the buffer is immediately allocated and the
+ * function returns 1. Otherwise it returns 0. Note that this automatically
+ * covers multiple wake-up attempts by ensuring that the same buffer will not
+ * be accounted for multiple times.
+ */
+int appctx_buf_available(void *arg)
+{
+ struct appctx *appctx = arg;
+ struct stconn *sc = appctx_sc(appctx);
+
+ /* allocation requested ? */
+ if (!(sc->flags & SC_FL_NEED_BUFF))
+ return 0;
+
+ sc_have_buff(sc);
+
+ /* was already allocated another way ? if so, don't take this one */
+ if (c_size(sc_ic(sc)) || sc_ep_have_ff_data(sc_opposite(sc)))
+ return 0;
+
+ /* allocation possible now ? */
+ if (!b_alloc(&sc_ic(sc)->buf)) {
+ sc_need_buff(sc);
+ return 0;
+ }
+
+ task_wakeup(appctx->t, TASK_WOKEN_RES);
+ return 1;
+}
+
+/* Default applet handler */
+struct task *task_run_applet(struct task *t, void *context, unsigned int state)
+{
+ struct appctx *app = context;
+ struct stconn *sc, *sco;
+ unsigned int rate;
+ size_t count;
+ int did_send = 0;
+
+ TRACE_ENTER(APPLET_EV_PROCESS, app);
+
+ if (app->state & APPLET_WANT_DIE) {
+ TRACE_DEVEL("APPCTX want die, release it", APPLET_EV_FREE, app);
+ __appctx_free(app);
+ return NULL;
+ }
+
+ if (se_fl_test(app->sedesc, SE_FL_ORPHAN)) {
+ /* Finalize init of orphan appctx. .init callback function must
+ * be defined and it must finalize appctx startup.
+ */
+ BUG_ON(!app->applet->init);
+
+ if (appctx_init(app) == -1) {
+ TRACE_DEVEL("APPCTX init failed", APPLET_EV_FREE|APPLET_EV_ERR, app);
+ appctx_free_on_early_error(app);
+ return NULL;
+ }
+ BUG_ON(!app->sess || !appctx_sc(app) || !appctx_strm(app));
+ TRACE_DEVEL("APPCTX initialized", APPLET_EV_PROCESS, app);
+ }
+
+ sc = appctx_sc(app);
+ sco = sc_opposite(sc);
+
+ /* We always pretend the applet can't get and doesn't want to
+ * put, it's up to it to change this if needed. This ensures
+ * that one applet which ignores any event will not spin.
+ */
+ applet_need_more_data(app);
+ applet_have_no_more_data(app);
+
+ /* Now we'll try to allocate the input buffer. We wake up the applet in
+ * all cases. So this is the applet's responsibility to check if this
+ * buffer was allocated or not. This leaves a chance for applets to do
+ * some other processing if needed. The applet doesn't have anything to
+ * do if it needs the buffer, it will be called again upon readiness.
+ */
+ if (!sc_alloc_ibuf(sc, &app->buffer_wait))
+ applet_have_more_data(app);
+
+ count = co_data(sc_oc(sc));
+ app->applet->fct(app);
+
+ TRACE_POINT(APPLET_EV_PROCESS, app);
+
+ /* now check if the applet has released some room and forgot to
+ * notify the other side about it.
+ */
+ if (count != co_data(sc_oc(sc))) {
+ sc_oc(sc)->flags |= CF_WRITE_EVENT | CF_WROTE_DATA;
+ if (sco->room_needed < 0 || channel_recv_max(sc_oc(sc)) >= sco->room_needed)
+ sc_have_room(sco);
+ did_send = 1;
+ }
+ else {
+ if (!sco->room_needed)
+ sc_have_room(sco);
+ }
+
+ if (sc_ic(sc)->flags & CF_READ_EVENT)
+ sc_ep_report_read_activity(sc);
+
+ if (sc_waiting_room(sc) && (sc->flags & SC_FL_ABRT_DONE)) {
+ sc_ep_set(sc, SE_FL_EOS|SE_FL_ERROR);
+ }
+
+ if (!co_data(sc_oc(sc))) {
+ if (did_send)
+ sc_ep_report_send_activity(sc);
+ }
+ else
+ sc_ep_report_blocked_send(sc, did_send);
+
+ /* measure the call rate and check for anomalies when too high */
+ if (((b_size(sc_ib(sc)) && sc->flags & SC_FL_NEED_BUFF) || // asks for a buffer which is present
+ (b_size(sc_ib(sc)) && !b_data(sc_ib(sc)) && sc->flags & SC_FL_NEED_ROOM) || // asks for room in an empty buffer
+ (b_data(sc_ob(sc)) && sc_is_send_allowed(sc)) || // asks for data already present
+ (!b_data(sc_ib(sc)) && b_data(sc_ob(sc)) && // didn't return anything ...
+ (!(sc_oc(sc)->flags & CF_WRITE_EVENT) && (sc->flags & SC_FL_SHUT_WANTED))))) { // ... and left data pending after a shut
+ rate = update_freq_ctr(&app->call_rate, 1);
+ if (rate >= 100000 && app->call_rate.prev_ctr) // looped like this more than 100k times over last second
+ stream_dump_and_crash(&app->obj_type, read_freq_ctr(&app->call_rate));
+ }
+
+ sc->app_ops->wake(sc);
+ channel_release_buffer(sc_ic(sc), &app->buffer_wait);
+ TRACE_LEAVE(APPLET_EV_PROCESS, app);
+ return t;
+}
diff --git a/src/arg.c b/src/arg.c
new file mode 100644
index 0000000..2810050
--- /dev/null
+++ b/src/arg.c
@@ -0,0 +1,479 @@
+/*
+ * Functions used to parse typed argument lists
+ *
+ * Copyright 2012 Willy Tarreau <w@1wt.eu>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <arpa/inet.h>
+
+#include <haproxy/arg.h>
+#include <haproxy/chunk.h>
+#include <haproxy/global.h>
+#include <haproxy/regex.h>
+#include <haproxy/tools.h>
+
+const char *arg_type_names[ARGT_NBTYPES] = {
+ [ARGT_STOP] = "end of arguments",
+ [ARGT_SINT] = "integer",
+ [ARGT_STR] = "string",
+ [ARGT_IPV4] = "IPv4 address",
+ [ARGT_MSK4] = "IPv4 mask",
+ [ARGT_IPV6] = "IPv6 address",
+ [ARGT_MSK6] = "IPv6 mask",
+ [ARGT_TIME] = "delay",
+ [ARGT_SIZE] = "size",
+ [ARGT_FE] = "frontend",
+ [ARGT_BE] = "backend",
+ [ARGT_TAB] = "table",
+ [ARGT_SRV] = "server",
+ [ARGT_USR] = "user list",
+ [ARGT_MAP] = "map",
+ [ARGT_REG] = "regex",
+ [ARGT_VAR] = "variable",
+ [ARGT_PBUF_FNUM] = "Protocol buffers field number",
+ /* Unassigned types must never happen. Better crash during parsing if they do. */
+};
+
+/* This dummy arg list may be used by default when no arg is found, it helps
+ * parsers by removing pointer checks.
+ */
+struct arg empty_arg_list[ARGM_NBARGS] = { };
+
+/* This function clones a struct arg_list template into a new one which is
+ * returned.
+ */
+struct arg_list *arg_list_clone(const struct arg_list *orig)
+{
+ struct arg_list *new;
+
+ if ((new = calloc(1, sizeof(*new))) != NULL) {
+ /* ->list will be set by the caller when inserting the element.
+ * ->arg and ->arg_pos will be set by the caller.
+ */
+ new->ctx = orig->ctx;
+ new->kw = orig->kw;
+ new->conv = orig->conv;
+ new->file = orig->file;
+ new->line = orig->line;
+ }
+ return new;
+}
+
+/* This function clones a struct <arg_list> template into a new one which is
+ * set to point to arg <arg> at pos <pos>, and which is returned if the caller
+ * wants to apply further changes.
+ */
+struct arg_list *arg_list_add(struct arg_list *orig, struct arg *arg, int pos)
+{
+ struct arg_list *new;
+
+ new = arg_list_clone(orig);
+ if (new) {
+ new->arg = arg;
+ new->arg_pos = pos;
+ LIST_APPEND(&orig->list, &new->list);
+ }
+ return new;
+}
+
+/* This function builds an argument list from a config line, and stops at the
+ * first non-matching character, which is pointed to in <end_ptr>. A valid arg
+ * list starts with an opening parenthesis '(', contains a number of comma-
+ * delimited words, and ends with the closing parenthesis ')'. An empty list
+ * (with or without the parenthesis) will lead to a valid empty argument if the
+ * keyword has a mandatory one. The function returns the number of arguments
+ * emitted, or <0 in case of any error. Everything needed it automatically
+ * allocated. A pointer to an error message might be returned in err_msg if not
+ * NULL, in which case it would be allocated and the caller will have to check
+ * it and free it. The output arg list is returned in argp which must be valid.
+ * The returned array is always terminated by an arg of type ARGT_STOP (0),
+ * unless the mask indicates that no argument is supported. Unresolved arguments
+ * are appended to arg list <al>, which also serves as a template to create new
+ * entries. <al> may be NULL if unresolved arguments are not allowed. The mask
+ * is composed of a number of mandatory arguments in its lower ARGM_BITS bits,
+ * and a concatenation of each argument type in each subsequent ARGT_BITS-bit
+ * sblock. If <err_msg> is not NULL, it must point to a freeable or NULL
+ * pointer. The caller is expected to restart the parsing from the new pointer
+ * set in <end_ptr>, which is the first character considered as not being part
+ * of the arg list. The input string ends on the first between <len> characters
+ * (when len is positive) or the first NUL character. Placing -1 in <len> will
+ * make it virtually unbounded (~2GB long strings).
+ */
+int make_arg_list(const char *in, int len, uint64_t mask, struct arg **argp,
+ char **err_msg, const char **end_ptr, int *err_arg,
+ struct arg_list *al)
+{
+ int nbarg;
+ int pos;
+ struct arg *arg;
+ const char *beg;
+ const char *ptr_err = NULL;
+ int min_arg;
+ int empty;
+ struct arg_list *new_al = al;
+
+ *argp = NULL;
+
+ empty = 0;
+ if (!len || *in != '(') {
+ /* it's already not for us, stop here */
+ empty = 1;
+ len = 0;
+ } else {
+ /* skip opening parenthesis */
+ len--;
+ in++;
+ }
+
+ min_arg = mask & ARGM_MASK;
+ mask >>= ARGM_BITS;
+
+ pos = 0;
+ /* find between 0 and NBARGS the max number of args supported by the mask */
+ for (nbarg = 0; nbarg < ARGM_NBARGS && ((mask >> (nbarg * ARGT_BITS)) & ARGT_MASK); nbarg++);
+
+ if (!nbarg)
+ goto end_parse;
+
+ /* Note: an empty input string contains an empty argument if this argument
+ * is marked mandatory. Otherwise we can ignore it.
+ */
+ if (empty && !min_arg)
+ goto end_parse;
+
+ arg = *argp = calloc(nbarg + 1, sizeof(**argp));
+
+ if (!arg)
+ goto alloc_err;
+
+ /* Note: empty arguments after a comma always exist. */
+ while (pos < nbarg) {
+ unsigned int uint;
+ int squote = 0, dquote = 0;
+ char *out;
+
+ chunk_reset(&trash);
+ out = trash.area;
+
+ while (len && *in && trash.data < trash.size - 1) {
+ if (*in == '"' && !squote) { /* double quote outside single quotes */
+ if (dquote)
+ dquote = 0;
+ else
+ dquote = 1;
+ in++; len--;
+ continue;
+ }
+ else if (*in == '\'' && !dquote) { /* single quote outside double quotes */
+ if (squote)
+ squote = 0;
+ else
+ squote = 1;
+ in++; len--;
+ continue;
+ }
+ else if (*in == '\\' && !squote && len != 1) {
+ /* '\', ', ' ', '"' support being escaped by '\' */
+ if (in[1] == 0)
+ goto unquote_err;
+
+ if (in[1] == '\\' || in[1] == ' ' || in[1] == '"' || in[1] == '\'') {
+ in++; len--;
+ *out++ = *in;
+ }
+ else if (in[1] == 'r') {
+ in++; len--;
+ *out++ = '\r';
+ }
+ else if (in[1] == 'n') {
+ in++; len--;
+ *out++ = '\n';
+ }
+ else if (in[1] == 't') {
+ in++; len--;
+ *out++ = '\t';
+ }
+ else {
+ /* just a lone '\' */
+ *out++ = *in;
+ }
+ in++; len--;
+ }
+ else {
+ if (!squote && !dquote && (*in == ',' || *in == ')')) {
+ /* end of argument */
+ break;
+ }
+ /* verbatim copy */
+ *out++ = *in++;
+ len--;
+ }
+ trash.data = out - trash.area;
+ }
+
+ if (len && *in && *in != ',' && *in != ')')
+ goto buffer_err;
+
+ trash.area[trash.data] = 0;
+
+ arg->type = (mask >> (pos * ARGT_BITS)) & ARGT_MASK;
+
+ switch (arg->type) {
+ case ARGT_SINT:
+ if (!trash.data) // empty number
+ goto empty_err;
+ beg = trash.area;
+ arg->data.sint = read_int64(&beg, trash.area + trash.data);
+ if (beg < trash.area + trash.data)
+ goto parse_err;
+ arg->type = ARGT_SINT;
+ break;
+
+ case ARGT_FE:
+ case ARGT_BE:
+ case ARGT_TAB:
+ case ARGT_SRV:
+ case ARGT_USR:
+ case ARGT_REG:
+ /* These argument types need to be stored as strings during
+ * parsing then resolved later.
+ */
+ if (!al)
+ goto resolve_err;
+ arg->unresolved = 1;
+ new_al = arg_list_add(al, arg, pos);
+ __fallthrough;
+
+ case ARGT_STR:
+ /* all types that must be resolved are stored as strings
+ * during the parsing. The caller must at one point resolve
+ * them and free the string.
+ */
+ arg->data.str.area = my_strndup(trash.area, trash.data);
+ arg->data.str.data = trash.data;
+ arg->data.str.size = trash.data + 1;
+ break;
+
+ case ARGT_IPV4:
+ if (!trash.data) // empty address
+ goto empty_err;
+
+ if (inet_pton(AF_INET, trash.area, &arg->data.ipv4) <= 0)
+ goto parse_err;
+ break;
+
+ case ARGT_MSK4:
+ if (!trash.data) // empty mask
+ goto empty_err;
+
+ if (!str2mask(trash.area, &arg->data.ipv4))
+ goto parse_err;
+
+ arg->type = ARGT_IPV4;
+ break;
+
+ case ARGT_IPV6:
+ if (!trash.data) // empty address
+ goto empty_err;
+
+ if (inet_pton(AF_INET6, trash.area, &arg->data.ipv6) <= 0)
+ goto parse_err;
+ break;
+
+ case ARGT_MSK6:
+ if (!trash.data) // empty mask
+ goto empty_err;
+
+ if (!str2mask6(trash.area, &arg->data.ipv6))
+ goto parse_err;
+
+ arg->type = ARGT_IPV6;
+ break;
+
+ case ARGT_TIME:
+ if (!trash.data) // empty time
+ goto empty_err;
+
+ ptr_err = parse_time_err(trash.area, &uint, TIME_UNIT_MS);
+ if (ptr_err) {
+ if (ptr_err == PARSE_TIME_OVER || ptr_err == PARSE_TIME_UNDER)
+ ptr_err = trash.area;
+ goto parse_err;
+ }
+ arg->data.sint = uint;
+ arg->type = ARGT_SINT;
+ break;
+
+ case ARGT_SIZE:
+ if (!trash.data) // empty size
+ goto empty_err;
+
+ ptr_err = parse_size_err(trash.area, &uint);
+ if (ptr_err)
+ goto parse_err;
+
+ arg->data.sint = uint;
+ arg->type = ARGT_SINT;
+ break;
+
+ case ARGT_PBUF_FNUM:
+ if (!trash.data)
+ goto empty_err;
+
+ if (!parse_dotted_uints(trash.area, &arg->data.fid.ids, &arg->data.fid.sz))
+ goto parse_err;
+
+ break;
+
+ /* FIXME: other types need to be implemented here */
+ default:
+ goto not_impl;
+ }
+
+ pos++;
+ arg++;
+
+ /* don't go back to parsing if we reached end */
+ if (!len || !*in || *in == ')' || pos >= nbarg)
+ break;
+
+ /* skip comma */
+ in++; len--;
+ }
+
+ end_parse:
+ if (pos < min_arg) {
+ /* not enough arguments */
+ memprintf(err_msg,
+ "missing arguments (got %d/%d), type '%s' expected",
+ pos, min_arg, arg_type_names[(mask >> (pos * ARGT_BITS)) & ARGT_MASK]);
+ goto err;
+ }
+
+ if (empty) {
+ /* nothing to do */
+ } else if (*in == ')') {
+ /* skip the expected closing parenthesis */
+ in++;
+ } else {
+ /* the caller is responsible for freeing this message */
+ char *word = (len > 0) ? my_strndup(in, len) : (char *)in;
+
+ if (*word)
+ memprintf(err_msg, "expected ')' before '%s'", word);
+ else
+ memprintf(err_msg, "expected ')'");
+
+ if (len > 0)
+ free(word);
+ /* when we're missing a right paren, the empty part preceding
+ * already created an empty arg, adding one to the position, so
+ * let's fix the reporting to avoid being confusing.
+ */
+ if (pos > 1)
+ pos--;
+ goto err;
+ }
+
+ /* note that pos might be < nbarg and this is not an error, it's up to the
+ * caller to decide what to do with optional args.
+ */
+ if (err_arg)
+ *err_arg = pos;
+ if (end_ptr)
+ *end_ptr = in;
+ return pos;
+
+ err:
+ if (new_al == al) {
+ /* only free the arg area if we have not queued unresolved args
+ * still pointing to it.
+ */
+ free_args(*argp);
+ free(*argp);
+ }
+ *argp = NULL;
+ if (err_arg)
+ *err_arg = pos;
+ if (end_ptr)
+ *end_ptr = in;
+ return -1;
+
+ empty_err:
+ /* If we've only got an empty set of parenthesis with nothing
+ * in between, there is no arg at all.
+ */
+ if (!pos) {
+ ha_free(argp);
+ }
+
+ if (pos >= min_arg)
+ goto end_parse;
+
+ memprintf(err_msg, "expected type '%s' at position %d, but got nothing",
+ arg_type_names[(mask >> (pos * ARGT_BITS)) & ARGT_MASK], pos + 1);
+ goto err;
+
+ parse_err:
+ /* come here with the word attempted to parse in trash */
+ memprintf(err_msg, "failed to parse '%s' as type '%s' at position %d",
+ trash.area, arg_type_names[(mask >> (pos * ARGT_BITS)) & ARGT_MASK], pos + 1);
+ goto err;
+
+ not_impl:
+ memprintf(err_msg, "parsing for type '%s' was not implemented, please report this bug",
+ arg_type_names[(mask >> (pos * ARGT_BITS)) & ARGT_MASK]);
+ goto err;
+
+ buffer_err:
+ memprintf(err_msg, "too small buffer size to store decoded argument %d, increase bufsize ?",
+ pos + 1);
+ goto err;
+
+ unquote_err:
+ /* come here with the parsed part in <trash.area>:<trash.data> and the
+ * unparsable part in <in>.
+ */
+ trash.area[trash.data] = 0;
+ memprintf(err_msg, "failed to parse '%s' after '%s' as type '%s' at position %d",
+ in, trash.area, arg_type_names[(mask >> (pos * ARGT_BITS)) & ARGT_MASK], pos + 1);
+ goto err;
+
+alloc_err:
+ memprintf(err_msg, "out of memory");
+ goto err;
+
+ resolve_err:
+ memprintf(err_msg, "unresolved argument of type '%s' at position %d not allowed",
+ arg_type_names[(mask >> (pos * ARGT_BITS)) & ARGT_MASK], pos + 1);
+ goto err;
+}
+
+/* Free all args of an args array, taking care of unresolved arguments as well.
+ * It stops at the ARGT_STOP, which must be present. The array itself is not
+ * freed, it's up to the caller to do it. However it is returned, allowing to
+ * call free(free_args(argptr)). It is valid to call it with a NULL args, and
+ * nothing will be done).
+ */
+struct arg *free_args(struct arg *args)
+{
+ struct arg *arg;
+
+ for (arg = args; arg && arg->type != ARGT_STOP; arg++) {
+ if (arg->type == ARGT_STR || arg->unresolved)
+ chunk_destroy(&arg->data.str);
+ else if (arg->type == ARGT_REG)
+ regex_free(arg->data.reg);
+ else if (arg->type == ARGT_PBUF_FNUM)
+ ha_free(&arg->data.fid.ids);
+ }
+ return args;
+}
diff --git a/src/auth.c b/src/auth.c
new file mode 100644
index 0000000..0031300
--- /dev/null
+++ b/src/auth.c
@@ -0,0 +1,316 @@
+/*
+ * User authentication & authorization
+ *
+ * Copyright 2010 Krzysztof Piotr Oledzki <ole@ans.pl>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#ifdef USE_LIBCRYPT
+/* This is to have crypt() defined on Linux */
+#define _GNU_SOURCE
+
+#ifdef USE_CRYPT_H
+/* some platforms such as Solaris need this */
+#include <crypt.h>
+#endif
+#endif /* USE_LIBCRYPT */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+#include <haproxy/api.h>
+#include <haproxy/auth-t.h>
+#include <haproxy/errors.h>
+#include <haproxy/global.h>
+#include <haproxy/list.h>
+#include <haproxy/pattern-t.h>
+#include <haproxy/sample-t.h>
+#include <haproxy/thread.h>
+
+struct userlist *userlist = NULL; /* list of all existing userlists */
+
+#ifdef USE_LIBCRYPT
+#define CRYPT_STATE_MSG "yes"
+#ifdef HA_HAVE_CRYPT_R
+/* context for crypt_r() */
+static THREAD_LOCAL struct crypt_data crypt_data = { .initialized = 0 };
+#else
+/* lock for crypt() */
+__decl_thread(static HA_SPINLOCK_T auth_lock);
+#endif
+#else /* USE_LIBCRYPT */
+#define CRYPT_STATE_MSG "no"
+#endif
+
+/* find targets for selected groups. The function returns pointer to
+ * the userlist struct or NULL if name is NULL/empty or unresolvable.
+ */
+
+struct userlist *
+auth_find_userlist(char *name)
+{
+ struct userlist *l;
+
+ if (!name || !*name)
+ return NULL;
+
+ for (l = userlist; l; l = l->next)
+ if (strcmp(l->name, name) == 0)
+ return l;
+
+ return NULL;
+}
+
+int check_group(struct userlist *ul, char *name)
+{
+ struct auth_groups *ag;
+
+ for (ag = ul->groups; ag; ag = ag->next)
+ if (strcmp(name, ag->name) == 0)
+ return 1;
+ return 0;
+}
+
+void
+userlist_free(struct userlist *ul)
+{
+ struct userlist *tul;
+ struct auth_users *au, *tau;
+ struct auth_groups_list *agl, *tagl;
+ struct auth_groups *ag, *tag;
+
+ while (ul) {
+ /* Free users. */
+ au = ul->users;
+ while (au) {
+ /* Free groups that own current user. */
+ agl = au->u.groups;
+ while (agl) {
+ tagl = agl;
+ agl = agl->next;
+ free(tagl);
+ }
+
+ tau = au;
+ au = au->next;
+ free(tau->user);
+ free(tau->pass);
+ free(tau);
+ }
+
+ /* Free grouplist. */
+ ag = ul->groups;
+ while (ag) {
+ tag = ag;
+ ag = ag->next;
+ free(tag->name);
+ free(tag);
+ }
+
+ tul = ul;
+ ul = ul->next;
+ free(tul->name);
+ free(tul);
+ };
+}
+
+int userlist_postinit()
+{
+ struct userlist *curuserlist = NULL;
+
+ /* Resolve usernames and groupnames. */
+ for (curuserlist = userlist; curuserlist; curuserlist = curuserlist->next) {
+ struct auth_groups *ag;
+ struct auth_users *curuser;
+ struct auth_groups_list *grl;
+
+ for (curuser = curuserlist->users; curuser; curuser = curuser->next) {
+ char *group = NULL;
+ struct auth_groups_list *groups = NULL;
+
+ if (!curuser->u.groups_names)
+ continue;
+
+ while ((group = strtok(group?NULL:curuser->u.groups_names, ","))) {
+ for (ag = curuserlist->groups; ag; ag = ag->next) {
+ if (strcmp(ag->name, group) == 0)
+ break;
+ }
+
+ if (!ag) {
+ ha_alert("userlist '%s': no such group '%s' specified in user '%s'\n",
+ curuserlist->name, group, curuser->user);
+ free(groups);
+ return ERR_ALERT | ERR_FATAL;
+ }
+
+ /* Add this group at the group userlist. */
+ grl = calloc(1, sizeof(*grl));
+ if (!grl) {
+ ha_alert("userlist '%s': no more memory when trying to allocate the user groups.\n",
+ curuserlist->name);
+ free(groups);
+ return ERR_ALERT | ERR_FATAL;
+ }
+
+ grl->group = ag;
+ grl->next = groups;
+ groups = grl;
+ }
+
+ free(curuser->u.groups);
+ curuser->u.groups = groups;
+ }
+
+ for (ag = curuserlist->groups; ag; ag = ag->next) {
+ char *user = NULL;
+
+ if (!ag->groupusers)
+ continue;
+
+ while ((user = strtok(user?NULL:ag->groupusers, ","))) {
+ for (curuser = curuserlist->users; curuser; curuser = curuser->next) {
+ if (strcmp(curuser->user, user) == 0)
+ break;
+ }
+
+ if (!curuser) {
+ ha_alert("userlist '%s': no such user '%s' specified in group '%s'\n",
+ curuserlist->name, user, ag->name);
+ return ERR_ALERT | ERR_FATAL;
+ }
+
+ /* Add this group at the group userlist. */
+ grl = calloc(1, sizeof(*grl));
+ if (!grl) {
+ ha_alert("userlist '%s': no more memory when trying to allocate the user groups.\n",
+ curuserlist->name);
+ return ERR_ALERT | ERR_FATAL;
+ }
+
+ grl->group = ag;
+ grl->next = curuser->u.groups;
+ curuser->u.groups = grl;
+ }
+
+ ha_free(&ag->groupusers);
+ }
+
+#ifdef DEBUG_AUTH
+ for (ag = curuserlist->groups; ag; ag = ag->next) {
+ struct auth_groups_list *agl;
+
+ fprintf(stderr, "group %s, id %p, users:", ag->name, ag);
+ for (curuser = curuserlist->users; curuser; curuser = curuser->next) {
+ for (agl = curuser->u.groups; agl; agl = agl->next) {
+ if (agl->group == ag)
+ fprintf(stderr, " %s", curuser->user);
+ }
+ }
+ fprintf(stderr, "\n");
+ }
+#endif
+ }
+
+ return ERR_NONE;
+}
+
+/*
+ * Authenticate and authorize user; return 1 if OK, 0 if case of error.
+ */
+int
+check_user(struct userlist *ul, const char *user, const char *pass)
+{
+
+ struct auth_users *u;
+#ifdef DEBUG_AUTH
+ struct auth_groups_list *agl;
+#endif
+ const char *ep;
+
+#ifdef DEBUG_AUTH
+ fprintf(stderr, "req: userlist=%s, user=%s, pass=%s\n",
+ ul->name, user, pass);
+#endif
+
+ for (u = ul->users; u; u = u->next)
+ if (strcmp(user, u->user) == 0)
+ break;
+
+ if (!u)
+ return 0;
+
+#ifdef DEBUG_AUTH
+ fprintf(stderr, "cfg: user=%s, pass=%s, flags=%X, groups=",
+ u->user, u->pass, u->flags);
+ for (agl = u->u.groups; agl; agl = agl->next)
+ fprintf(stderr, " %s", agl->group->name);
+#endif
+
+ if (!(u->flags & AU_O_INSECURE)) {
+#ifdef USE_LIBCRYPT
+#ifdef HA_HAVE_CRYPT_R
+ ep = crypt_r(pass, u->pass, &crypt_data);
+#else
+ HA_SPIN_LOCK(AUTH_LOCK, &auth_lock);
+ ep = crypt(pass, u->pass);
+ HA_SPIN_UNLOCK(AUTH_LOCK, &auth_lock);
+#endif
+#else
+ return 0;
+#endif
+ } else
+ ep = pass;
+
+#ifdef DEBUG_AUTH
+ fprintf(stderr, ", crypt=%s\n", ((ep) ? ep : ""));
+#endif
+
+ if (ep && strcmp(ep, u->pass) == 0)
+ return 1;
+ else
+ return 0;
+}
+
+struct pattern *
+pat_match_auth(struct sample *smp, struct pattern_expr *expr, int fill)
+{
+ struct userlist *ul = smp->ctx.a[0];
+ struct pattern_list *lst;
+ struct auth_users *u;
+ struct auth_groups_list *agl;
+ struct pattern *pattern;
+
+ /* Check if the userlist is present in the context data. */
+ if (!ul)
+ return NULL;
+
+ /* Browse the userlist for searching user. */
+ for (u = ul->users; u; u = u->next) {
+ if (strcmp(smp->data.u.str.area, u->user) == 0)
+ break;
+ }
+ if (!u)
+ return NULL;
+
+ /* Browse each pattern. */
+ list_for_each_entry(lst, &expr->patterns, list) {
+ pattern = &lst->pat;
+
+ /* Browse each group for searching group name that match the pattern. */
+ for (agl = u->u.groups; agl; agl = agl->next) {
+ if (strcmp(agl->group->name, pattern->ptr.str) == 0)
+ return pattern;
+ }
+ }
+ return NULL;
+}
+
+REGISTER_BUILD_OPTS("Encrypted password support via crypt(3): "CRYPT_STATE_MSG);
diff --git a/src/backend.c b/src/backend.c
new file mode 100644
index 0000000..39d2c75
--- /dev/null
+++ b/src/backend.c
@@ -0,0 +1,3401 @@
+/*
+ * Backend variables and functions.
+ *
+ * Copyright 2000-2013 Willy Tarreau <w@1wt.eu>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <syslog.h>
+#include <string.h>
+#include <ctype.h>
+#include <sys/types.h>
+
+#include <import/ebmbtree.h>
+
+#include <haproxy/api.h>
+#include <haproxy/acl.h>
+#include <haproxy/activity.h>
+#include <haproxy/arg.h>
+#include <haproxy/backend.h>
+#include <haproxy/channel.h>
+#include <haproxy/check.h>
+#include <haproxy/frontend.h>
+#include <haproxy/global.h>
+#include <haproxy/hash.h>
+#include <haproxy/http.h>
+#include <haproxy/http_ana.h>
+#include <haproxy/http_htx.h>
+#include <haproxy/htx.h>
+#include <haproxy/lb_chash.h>
+#include <haproxy/lb_fas.h>
+#include <haproxy/lb_fwlc.h>
+#include <haproxy/lb_fwrr.h>
+#include <haproxy/lb_map.h>
+#include <haproxy/log.h>
+#include <haproxy/namespace.h>
+#include <haproxy/obj_type.h>
+#include <haproxy/payload.h>
+#include <haproxy/proto_tcp.h>
+#include <haproxy/protocol.h>
+#include <haproxy/proxy.h>
+#include <haproxy/queue.h>
+#include <haproxy/sample.h>
+#include <haproxy/sc_strm.h>
+#include <haproxy/server.h>
+#include <haproxy/session.h>
+#include <haproxy/ssl_sock.h>
+#include <haproxy/stconn.h>
+#include <haproxy/stream.h>
+#include <haproxy/task.h>
+#include <haproxy/ticks.h>
+#include <haproxy/time.h>
+#include <haproxy/trace.h>
+
+#define TRACE_SOURCE &trace_strm
+
+int be_lastsession(const struct proxy *be)
+{
+ if (be->be_counters.last_sess)
+ return ns_to_sec(now_ns) - be->be_counters.last_sess;
+
+ return -1;
+}
+
+/* helper function to invoke the correct hash method */
+unsigned int gen_hash(const struct proxy* px, const char* key, unsigned long len)
+{
+ unsigned int hash;
+
+ switch (px->lbprm.algo & BE_LB_HASH_FUNC) {
+ case BE_LB_HFCN_DJB2:
+ hash = hash_djb2(key, len);
+ break;
+ case BE_LB_HFCN_WT6:
+ hash = hash_wt6(key, len);
+ break;
+ case BE_LB_HFCN_CRC32:
+ hash = hash_crc32(key, len);
+ break;
+ case BE_LB_HFCN_NONE:
+ /* use key as a hash */
+ {
+ const char *_key = key;
+
+ hash = read_int64(&_key, _key + len);
+ }
+ break;
+ case BE_LB_HFCN_SDBM:
+ /* this is the default hash function */
+ default:
+ hash = hash_sdbm(key, len);
+ break;
+ }
+
+ if ((px->lbprm.algo & BE_LB_HASH_MOD) == BE_LB_HMOD_AVAL)
+ hash = full_hash(hash);
+
+ return hash;
+}
+
+/*
+ * This function recounts the number of usable active and backup servers for
+ * proxy <p>. These numbers are returned into the p->srv_act and p->srv_bck.
+ * This function also recomputes the total active and backup weights. However,
+ * it does not update tot_weight nor tot_used. Use update_backend_weight() for
+ * this.
+ * This functions is designed to be called before server's weight and state
+ * commit so it uses 'next' weight and states values.
+ *
+ * threads: this is the caller responsibility to lock data. For now, this
+ * function is called from lb modules, so it should be ok. But if you need to
+ * call it from another place, be careful (and update this comment).
+ */
+void recount_servers(struct proxy *px)
+{
+ struct server *srv;
+
+ px->srv_act = px->srv_bck = 0;
+ px->lbprm.tot_wact = px->lbprm.tot_wbck = 0;
+ px->lbprm.fbck = NULL;
+ for (srv = px->srv; srv != NULL; srv = srv->next) {
+ if (!srv_willbe_usable(srv))
+ continue;
+
+ if (srv->flags & SRV_F_BACKUP) {
+ if (!px->srv_bck &&
+ !(px->options & PR_O_USE_ALL_BK))
+ px->lbprm.fbck = srv;
+ px->srv_bck++;
+ srv->cumulative_weight = px->lbprm.tot_wbck;
+ px->lbprm.tot_wbck += srv->next_eweight;
+ } else {
+ px->srv_act++;
+ srv->cumulative_weight = px->lbprm.tot_wact;
+ px->lbprm.tot_wact += srv->next_eweight;
+ }
+ }
+}
+
+/* This function simply updates the backend's tot_weight and tot_used values
+ * after servers weights have been updated. It is designed to be used after
+ * recount_servers() or equivalent.
+ *
+ * threads: this is the caller responsibility to lock data. For now, this
+ * function is called from lb modules, so it should be ok. But if you need to
+ * call it from another place, be careful (and update this comment).
+ */
+void update_backend_weight(struct proxy *px)
+{
+ if (px->srv_act) {
+ px->lbprm.tot_weight = px->lbprm.tot_wact;
+ px->lbprm.tot_used = px->srv_act;
+ }
+ else if (px->lbprm.fbck) {
+ /* use only the first backup server */
+ px->lbprm.tot_weight = px->lbprm.fbck->next_eweight;
+ px->lbprm.tot_used = 1;
+ }
+ else {
+ px->lbprm.tot_weight = px->lbprm.tot_wbck;
+ px->lbprm.tot_used = px->srv_bck;
+ }
+}
+
+/*
+ * This function tries to find a running server for the proxy <px> following
+ * the source hash method. Depending on the number of active/backup servers,
+ * it will either look for active servers, or for backup servers.
+ * If any server is found, it will be returned. If no valid server is found,
+ * NULL is returned.
+ */
+static struct server *get_server_sh(struct proxy *px, const char *addr, int len, const struct server *avoid)
+{
+ unsigned int h, l;
+
+ if (px->lbprm.tot_weight == 0)
+ return NULL;
+
+ l = h = 0;
+
+ /* note: we won't hash if there's only one server left */
+ if (px->lbprm.tot_used == 1)
+ goto hash_done;
+
+ while ((l + sizeof (int)) <= len) {
+ h ^= ntohl(*(unsigned int *)(&addr[l]));
+ l += sizeof (int);
+ }
+ /* FIXME: why don't we use gen_hash() here as well?
+ * -> we don't take into account hash function from "hash_type"
+ * options here..
+ */
+ if ((px->lbprm.algo & BE_LB_HASH_MOD) == BE_LB_HMOD_AVAL)
+ h = full_hash(h);
+ hash_done:
+ if ((px->lbprm.algo & BE_LB_LKUP) == BE_LB_LKUP_CHTREE)
+ return chash_get_server_hash(px, h, avoid);
+ else
+ return map_get_server_hash(px, h);
+}
+
+/*
+ * This function tries to find a running server for the proxy <px> following
+ * the URI hash method. In order to optimize cache hits, the hash computation
+ * ends at the question mark. Depending on the number of active/backup servers,
+ * it will either look for active servers, or for backup servers.
+ * If any server is found, it will be returned. If no valid server is found,
+ * NULL is returned. The lbprm.arg_opt{1,2,3} values correspond respectively to
+ * the "whole" optional argument (boolean, bit0), the "len" argument (numeric)
+ * and the "depth" argument (numeric).
+ *
+ * This code was contributed by Guillaume Dallaire, who also selected this hash
+ * algorithm out of a tens because it gave him the best results.
+ *
+ */
+static struct server *get_server_uh(struct proxy *px, char *uri, int uri_len, const struct server *avoid)
+{
+ unsigned int hash = 0;
+ int c;
+ int slashes = 0;
+ const char *start, *end;
+
+ if (px->lbprm.tot_weight == 0)
+ return NULL;
+
+ /* note: we won't hash if there's only one server left */
+ if (px->lbprm.tot_used == 1)
+ goto hash_done;
+
+ if (px->lbprm.arg_opt2) // "len"
+ uri_len = MIN(uri_len, px->lbprm.arg_opt2);
+
+ start = end = uri;
+ while (uri_len--) {
+ c = *end;
+ if (c == '/') {
+ slashes++;
+ if (slashes == px->lbprm.arg_opt3) /* depth+1 */
+ break;
+ }
+ else if (c == '?' && !(px->lbprm.arg_opt1 & 1)) // "whole"
+ break;
+ end++;
+ }
+
+ hash = gen_hash(px, start, (end - start));
+
+ hash_done:
+ if ((px->lbprm.algo & BE_LB_LKUP) == BE_LB_LKUP_CHTREE)
+ return chash_get_server_hash(px, hash, avoid);
+ else
+ return map_get_server_hash(px, hash);
+}
+
+/*
+ * This function tries to find a running server for the proxy <px> following
+ * the URL parameter hash method. It looks for a specific parameter in the
+ * URL and hashes it to compute the server ID. This is useful to optimize
+ * performance by avoiding bounces between servers in contexts where sessions
+ * are shared but cookies are not usable. If the parameter is not found, NULL
+ * is returned. If any server is found, it will be returned. If no valid server
+ * is found, NULL is returned.
+ */
+static struct server *get_server_ph(struct proxy *px, const char *uri, int uri_len, const struct server *avoid)
+{
+ unsigned int hash = 0;
+ const char *start, *end;
+ const char *p;
+ const char *params;
+ int plen;
+
+ /* when tot_weight is 0 then so is srv_count */
+ if (px->lbprm.tot_weight == 0)
+ return NULL;
+
+ if ((p = memchr(uri, '?', uri_len)) == NULL)
+ return NULL;
+
+ p++;
+
+ uri_len -= (p - uri);
+ plen = px->lbprm.arg_len;
+ params = p;
+
+ while (uri_len > plen) {
+ /* Look for the parameter name followed by an equal symbol */
+ if (params[plen] == '=') {
+ if (memcmp(params, px->lbprm.arg_str, plen) == 0) {
+ /* OK, we have the parameter here at <params>, and
+ * the value after the equal sign, at <p>
+ * skip the equal symbol
+ */
+ p += plen + 1;
+ start = end = p;
+ uri_len -= plen + 1;
+
+ while (uri_len && *end != '&') {
+ uri_len--;
+ end++;
+ }
+ hash = gen_hash(px, start, (end - start));
+
+ if ((px->lbprm.algo & BE_LB_LKUP) == BE_LB_LKUP_CHTREE)
+ return chash_get_server_hash(px, hash, avoid);
+ else
+ return map_get_server_hash(px, hash);
+ }
+ }
+ /* skip to next parameter */
+ p = memchr(params, '&', uri_len);
+ if (!p)
+ return NULL;
+ p++;
+ uri_len -= (p - params);
+ params = p;
+ }
+ return NULL;
+}
+
+/*
+ * this does the same as the previous server_ph, but check the body contents
+ */
+static struct server *get_server_ph_post(struct stream *s, const struct server *avoid)
+{
+ unsigned int hash = 0;
+ struct channel *req = &s->req;
+ struct proxy *px = s->be;
+ struct htx *htx = htxbuf(&req->buf);
+ struct htx_blk *blk;
+ unsigned int plen = px->lbprm.arg_len;
+ unsigned long len;
+ const char *params, *p, *start, *end;
+
+ if (px->lbprm.tot_weight == 0)
+ return NULL;
+
+ p = params = NULL;
+ len = 0;
+ for (blk = htx_get_first_blk(htx); blk; blk = htx_get_next_blk(htx, blk)) {
+ enum htx_blk_type type = htx_get_blk_type(blk);
+ struct ist v;
+
+ if (type != HTX_BLK_DATA)
+ continue;
+ v = htx_get_blk_value(htx, blk);
+ p = params = v.ptr;
+ len = v.len;
+ break;
+ }
+
+ while (len > plen) {
+ /* Look for the parameter name followed by an equal symbol */
+ if (params[plen] == '=') {
+ if (memcmp(params, px->lbprm.arg_str, plen) == 0) {
+ /* OK, we have the parameter here at <params>, and
+ * the value after the equal sign, at <p>
+ * skip the equal symbol
+ */
+ p += plen + 1;
+ start = end = p;
+ len -= plen + 1;
+
+ while (len && *end != '&') {
+ if (unlikely(!HTTP_IS_TOKEN(*p))) {
+ /* if in a POST, body must be URI encoded or it's not a URI.
+ * Do not interpret any possible binary data as a parameter.
+ */
+ if (likely(HTTP_IS_LWS(*p))) /* eol, uncertain uri len */
+ break;
+ return NULL; /* oh, no; this is not uri-encoded.
+ * This body does not contain parameters.
+ */
+ }
+ len--;
+ end++;
+ /* should we break if vlen exceeds limit? */
+ }
+ hash = gen_hash(px, start, (end - start));
+
+ if ((px->lbprm.algo & BE_LB_LKUP) == BE_LB_LKUP_CHTREE)
+ return chash_get_server_hash(px, hash, avoid);
+ else
+ return map_get_server_hash(px, hash);
+ }
+ }
+ /* skip to next parameter */
+ p = memchr(params, '&', len);
+ if (!p)
+ return NULL;
+ p++;
+ len -= (p - params);
+ params = p;
+ }
+ return NULL;
+}
+
+
+/*
+ * This function tries to find a running server for the proxy <px> following
+ * the Header parameter hash method. It looks for a specific parameter in the
+ * URL and hashes it to compute the server ID. This is useful to optimize
+ * performance by avoiding bounces between servers in contexts where sessions
+ * are shared but cookies are not usable. If the parameter is not found, NULL
+ * is returned. If any server is found, it will be returned. If no valid server
+ * is found, NULL is returned. When lbprm.arg_opt1 is set, the hash will only
+ * apply to the middle part of a domain name ("use_domain_only" option).
+ */
+static struct server *get_server_hh(struct stream *s, const struct server *avoid)
+{
+ unsigned int hash = 0;
+ struct proxy *px = s->be;
+ unsigned int plen = px->lbprm.arg_len;
+ unsigned long len;
+ const char *p;
+ const char *start, *end;
+ struct htx *htx = htxbuf(&s->req.buf);
+ struct http_hdr_ctx ctx = { .blk = NULL };
+
+ /* tot_weight appears to mean srv_count */
+ if (px->lbprm.tot_weight == 0)
+ return NULL;
+
+ /* note: we won't hash if there's only one server left */
+ if (px->lbprm.tot_used == 1)
+ goto hash_done;
+
+ http_find_header(htx, ist2(px->lbprm.arg_str, plen), &ctx, 0);
+
+ /* if the header is not found or empty, let's fallback to round robin */
+ if (!ctx.blk || !ctx.value.len)
+ return NULL;
+
+ /* Found a the param_name in the headers.
+ * we will compute the hash based on this value ctx.val.
+ */
+ len = ctx.value.len;
+ p = ctx.value.ptr;
+
+ if (!px->lbprm.arg_opt1) {
+ hash = gen_hash(px, p, len);
+ } else {
+ int dohash = 0;
+ p += len;
+ /* special computation, use only main domain name, not tld/host
+ * going back from the end of string, start hashing at first
+ * dot stop at next.
+ * This is designed to work with the 'Host' header, and requires
+ * a special option to activate this.
+ */
+ end = p;
+ while (len) {
+ if (dohash) {
+ /* Rewind the pointer until the previous char
+ * is a dot, this will allow to set the start
+ * position of the domain. */
+ if (*(p - 1) == '.')
+ break;
+ }
+ else if (*p == '.') {
+ /* The pointer is rewinded to the dot before the
+ * tld, we memorize the end of the domain and
+ * can enter the domain processing. */
+ end = p;
+ dohash = 1;
+ }
+ p--;
+ len--;
+ }
+ start = p;
+ hash = gen_hash(px, start, (end - start));
+ }
+
+ hash_done:
+ if ((px->lbprm.algo & BE_LB_LKUP) == BE_LB_LKUP_CHTREE)
+ return chash_get_server_hash(px, hash, avoid);
+ else
+ return map_get_server_hash(px, hash);
+}
+
+/* RDP Cookie HASH. */
+static struct server *get_server_rch(struct stream *s, const struct server *avoid)
+{
+ unsigned int hash = 0;
+ struct proxy *px = s->be;
+ unsigned long len;
+ int ret;
+ struct sample smp;
+ int rewind;
+
+ /* tot_weight appears to mean srv_count */
+ if (px->lbprm.tot_weight == 0)
+ return NULL;
+
+ memset(&smp, 0, sizeof(smp));
+
+ rewind = co_data(&s->req);
+ c_rew(&s->req, rewind);
+
+ ret = fetch_rdp_cookie_name(s, &smp, px->lbprm.arg_str, px->lbprm.arg_len);
+ len = smp.data.u.str.data;
+
+ c_adv(&s->req, rewind);
+
+ if (ret == 0 || (smp.flags & SMP_F_MAY_CHANGE) || len == 0)
+ return NULL;
+
+ /* note: we won't hash if there's only one server left */
+ if (px->lbprm.tot_used == 1)
+ goto hash_done;
+
+ /* Found the param_name in the headers.
+ * we will compute the hash based on this value ctx.val.
+ */
+ hash = gen_hash(px, smp.data.u.str.area, len);
+
+ hash_done:
+ if ((px->lbprm.algo & BE_LB_LKUP) == BE_LB_LKUP_CHTREE)
+ return chash_get_server_hash(px, hash, avoid);
+ else
+ return map_get_server_hash(px, hash);
+}
+
+/* sample expression HASH. Returns NULL if the sample is not found or if there
+ * are no server, relying on the caller to fall back to round robin instead.
+ */
+static struct server *get_server_expr(struct stream *s, const struct server *avoid)
+{
+ struct proxy *px = s->be;
+ struct sample *smp;
+ unsigned int hash = 0;
+
+ if (px->lbprm.tot_weight == 0)
+ return NULL;
+
+ /* note: no need to hash if there's only one server left */
+ if (px->lbprm.tot_used == 1)
+ goto hash_done;
+
+ smp = sample_fetch_as_type(px, s->sess, s, SMP_OPT_DIR_REQ | SMP_OPT_FINAL, px->lbprm.expr, SMP_T_BIN);
+ if (!smp)
+ return NULL;
+
+ /* We have the desired data. Let's hash it according to the configured
+ * options and algorithm.
+ */
+ hash = gen_hash(px, smp->data.u.str.area, smp->data.u.str.data);
+
+ hash_done:
+ if ((px->lbprm.algo & BE_LB_LKUP) == BE_LB_LKUP_CHTREE)
+ return chash_get_server_hash(px, hash, avoid);
+ else
+ return map_get_server_hash(px, hash);
+}
+
+/* random value */
+static struct server *get_server_rnd(struct stream *s, const struct server *avoid)
+{
+ unsigned int hash = 0;
+ struct proxy *px = s->be;
+ struct server *prev, *curr;
+ int draws = px->lbprm.arg_opt1; // number of draws
+
+ /* tot_weight appears to mean srv_count */
+ if (px->lbprm.tot_weight == 0)
+ return NULL;
+
+ curr = NULL;
+ do {
+ prev = curr;
+ hash = statistical_prng();
+ curr = chash_get_server_hash(px, hash, avoid);
+ if (!curr)
+ break;
+
+ /* compare the new server to the previous best choice and pick
+ * the one with the least currently served requests.
+ */
+ if (prev && prev != curr &&
+ curr->served * prev->cur_eweight > prev->served * curr->cur_eweight)
+ curr = prev;
+ } while (--draws > 0);
+
+ /* if the selected server is full, pretend we have none so that we reach
+ * the backend's queue instead.
+ */
+ if (curr &&
+ (curr->queue.length || (curr->maxconn && curr->served >= srv_dynamic_maxconn(curr))))
+ curr = NULL;
+
+ return curr;
+}
+
+/*
+ * This function applies the load-balancing algorithm to the stream, as
+ * defined by the backend it is assigned to. The stream is then marked as
+ * 'assigned'.
+ *
+ * This function MAY NOT be called with SF_ASSIGNED already set. If the stream
+ * had a server previously assigned, it is rebalanced, trying to avoid the same
+ * server, which should still be present in target_srv(&s->target) before the call.
+ * The function tries to keep the original connection slot if it reconnects to
+ * the same server, otherwise it releases it and tries to offer it.
+ *
+ * It is illegal to call this function with a stream in a queue.
+ *
+ * It may return :
+ * SRV_STATUS_OK if everything is OK. ->srv and ->target are assigned.
+ * SRV_STATUS_NOSRV if no server is available. Stream is not ASSIGNED
+ * SRV_STATUS_FULL if all servers are saturated. Stream is not ASSIGNED
+ * SRV_STATUS_INTERNAL for other unrecoverable errors.
+ *
+ * Upon successful return, the stream flag SF_ASSIGNED is set to indicate that
+ * it does not need to be called anymore. This means that target_srv(&s->target)
+ * can be trusted in balance and direct modes.
+ *
+ */
+
+int assign_server(struct stream *s)
+{
+ struct connection *conn = NULL;
+ struct server *conn_slot;
+ struct server *srv = NULL, *prev_srv;
+ int err;
+
+ err = SRV_STATUS_INTERNAL;
+ if (unlikely(s->pend_pos || s->flags & SF_ASSIGNED))
+ goto out_err;
+
+ prev_srv = objt_server(s->target);
+ conn_slot = s->srv_conn;
+
+ /* We have to release any connection slot before applying any LB algo,
+ * otherwise we may erroneously end up with no available slot.
+ */
+ if (conn_slot)
+ sess_change_server(s, NULL);
+
+ /* We will now try to find the good server and store it into <objt_server(s->target)>.
+ * Note that <objt_server(s->target)> may be NULL in case of dispatch or proxy mode,
+ * as well as if no server is available (check error code).
+ */
+
+ srv = NULL;
+ s->target = NULL;
+
+ if ((s->be->lbprm.algo & BE_LB_KIND) != BE_LB_KIND_HI &&
+ ((s->sess->flags & SESS_FL_PREFER_LAST) ||
+ (s->be->options & PR_O_PREF_LAST))) {
+ struct sess_srv_list *srv_list;
+ list_for_each_entry(srv_list, &s->sess->srv_list, srv_list) {
+ struct server *tmpsrv = objt_server(srv_list->target);
+
+ if (tmpsrv && tmpsrv->proxy == s->be &&
+ ((s->sess->flags & SESS_FL_PREFER_LAST) ||
+ (!s->be->max_ka_queue ||
+ server_has_room(tmpsrv) || (
+ tmpsrv->queue.length + 1 < s->be->max_ka_queue))) &&
+ srv_currently_usable(tmpsrv)) {
+ list_for_each_entry(conn, &srv_list->conn_list, session_list) {
+ if (!(conn->flags & CO_FL_WAIT_XPRT)) {
+ srv = tmpsrv;
+ s->target = &srv->obj_type;
+ if (conn->flags & CO_FL_SESS_IDLE) {
+ conn->flags &= ~CO_FL_SESS_IDLE;
+ s->sess->idle_conns--;
+ }
+ goto out_ok;
+ }
+ }
+ }
+ }
+ }
+
+ if (s->be->lbprm.algo & BE_LB_KIND) {
+ /* we must check if we have at least one server available */
+ if (!s->be->lbprm.tot_weight) {
+ err = SRV_STATUS_NOSRV;
+ goto out;
+ }
+
+ /* if there's some queue on the backend, with certain algos we
+ * know it's because all servers are full.
+ */
+ if (s->be->queue.length && s->be->queue.length != s->be->beconn &&
+ (((s->be->lbprm.algo & (BE_LB_KIND|BE_LB_NEED|BE_LB_PARM)) == BE_LB_ALGO_FAS)|| // first
+ ((s->be->lbprm.algo & (BE_LB_KIND|BE_LB_NEED|BE_LB_PARM)) == BE_LB_ALGO_RR) || // roundrobin
+ ((s->be->lbprm.algo & (BE_LB_KIND|BE_LB_NEED|BE_LB_PARM)) == BE_LB_ALGO_SRR))) { // static-rr
+ err = SRV_STATUS_FULL;
+ goto out;
+ }
+
+ /* First check whether we need to fetch some data or simply call
+ * the LB lookup function. Only the hashing functions will need
+ * some input data in fact, and will support multiple algorithms.
+ */
+ switch (s->be->lbprm.algo & BE_LB_LKUP) {
+ case BE_LB_LKUP_RRTREE:
+ srv = fwrr_get_next_server(s->be, prev_srv);
+ break;
+
+ case BE_LB_LKUP_FSTREE:
+ srv = fas_get_next_server(s->be, prev_srv);
+ break;
+
+ case BE_LB_LKUP_LCTREE:
+ srv = fwlc_get_next_server(s->be, prev_srv);
+ break;
+
+ case BE_LB_LKUP_CHTREE:
+ case BE_LB_LKUP_MAP:
+ if ((s->be->lbprm.algo & BE_LB_KIND) == BE_LB_KIND_RR) {
+ /* static-rr (map) or random (chash) */
+ if ((s->be->lbprm.algo & BE_LB_PARM) == BE_LB_RR_RANDOM)
+ srv = get_server_rnd(s, prev_srv);
+ else
+ srv = map_get_server_rr(s->be, prev_srv);
+ break;
+ }
+ else if ((s->be->lbprm.algo & BE_LB_KIND) != BE_LB_KIND_HI) {
+ /* unknown balancing algorithm */
+ err = SRV_STATUS_INTERNAL;
+ goto out;
+ }
+
+ switch (s->be->lbprm.algo & BE_LB_PARM) {
+ const struct sockaddr_storage *src;
+
+ case BE_LB_HASH_SRC:
+ src = sc_src(s->scf);
+ if (src && src->ss_family == AF_INET) {
+ srv = get_server_sh(s->be,
+ (void *)&((struct sockaddr_in *)src)->sin_addr,
+ 4, prev_srv);
+ }
+ else if (src && src->ss_family == AF_INET6) {
+ srv = get_server_sh(s->be,
+ (void *)&((struct sockaddr_in6 *)src)->sin6_addr,
+ 16, prev_srv);
+ }
+ break;
+
+ case BE_LB_HASH_URI:
+ /* URI hashing */
+ if (IS_HTX_STRM(s) && s->txn->req.msg_state >= HTTP_MSG_BODY) {
+ struct ist uri;
+
+ uri = htx_sl_req_uri(http_get_stline(htxbuf(&s->req.buf)));
+ if (s->be->lbprm.arg_opt1 & 2) {
+ struct http_uri_parser parser =
+ http_uri_parser_init(uri);
+
+ uri = http_parse_path(&parser);
+ if (!isttest(uri))
+ uri = ist("");
+ }
+ srv = get_server_uh(s->be, uri.ptr, uri.len, prev_srv);
+ }
+ break;
+
+ case BE_LB_HASH_PRM:
+ /* URL Parameter hashing */
+ if (IS_HTX_STRM(s) && s->txn->req.msg_state >= HTTP_MSG_BODY) {
+ struct ist uri;
+
+ uri = htx_sl_req_uri(http_get_stline(htxbuf(&s->req.buf)));
+ srv = get_server_ph(s->be, uri.ptr, uri.len, prev_srv);
+
+ if (!srv && s->txn->meth == HTTP_METH_POST)
+ srv = get_server_ph_post(s, prev_srv);
+ }
+ break;
+
+ case BE_LB_HASH_HDR:
+ /* Header Parameter hashing */
+ if (IS_HTX_STRM(s) && s->txn->req.msg_state >= HTTP_MSG_BODY)
+ srv = get_server_hh(s, prev_srv);
+ break;
+
+ case BE_LB_HASH_RDP:
+ /* RDP Cookie hashing */
+ srv = get_server_rch(s, prev_srv);
+ break;
+
+ case BE_LB_HASH_SMP:
+ /* sample expression hashing */
+ srv = get_server_expr(s, prev_srv);
+ break;
+
+ default:
+ /* unknown balancing algorithm */
+ err = SRV_STATUS_INTERNAL;
+ goto out;
+ }
+
+ /* If the hashing parameter was not found, let's fall
+ * back to round robin on the map.
+ */
+ if (!srv) {
+ if ((s->be->lbprm.algo & BE_LB_LKUP) == BE_LB_LKUP_CHTREE)
+ srv = chash_get_next_server(s->be, prev_srv);
+ else
+ srv = map_get_server_rr(s->be, prev_srv);
+ }
+
+ /* end of map-based LB */
+ break;
+
+ default:
+ /* unknown balancing algorithm */
+ err = SRV_STATUS_INTERNAL;
+ goto out;
+ }
+
+ if (!srv) {
+ err = SRV_STATUS_FULL;
+ goto out;
+ }
+ else if (srv != prev_srv) {
+ _HA_ATOMIC_INC(&s->be->be_counters.cum_lbconn);
+ _HA_ATOMIC_INC(&srv->counters.cum_lbconn);
+ }
+ s->target = &srv->obj_type;
+ }
+ else if (s->be->options & (PR_O_DISPATCH | PR_O_TRANSP)) {
+ s->target = &s->be->obj_type;
+ }
+ else {
+ err = SRV_STATUS_NOSRV;
+ goto out;
+ }
+
+out_ok:
+ s->flags |= SF_ASSIGNED;
+ err = SRV_STATUS_OK;
+ out:
+
+ /* Either we take back our connection slot, or we offer it to someone
+ * else if we don't need it anymore.
+ */
+ if (conn_slot) {
+ if (conn_slot == srv) {
+ sess_change_server(s, srv);
+ } else {
+ if (may_dequeue_tasks(conn_slot, s->be))
+ process_srv_queue(conn_slot);
+ }
+ }
+
+ out_err:
+ return err;
+}
+
+/* Allocate an address for the destination endpoint
+ * The address is taken from the currently assigned server, or from the
+ * dispatch or transparent address.
+ *
+ * Returns SRV_STATUS_OK on success. Does nothing if the address was
+ * already set.
+ * On error, no address is allocated and SRV_STATUS_INTERNAL is returned.
+ */
+static int alloc_dst_address(struct sockaddr_storage **ss,
+ struct server *srv, struct stream *s)
+{
+ const struct sockaddr_storage *dst;
+
+ if (*ss)
+ return SRV_STATUS_OK;
+
+ if ((s->flags & SF_DIRECT) || (s->be->lbprm.algo & BE_LB_KIND)) {
+ /* A server is necessarily known for this stream */
+ if (!(s->flags & SF_ASSIGNED))
+ return SRV_STATUS_INTERNAL;
+
+ if (!sockaddr_alloc(ss, NULL, 0))
+ return SRV_STATUS_INTERNAL;
+
+ **ss = srv->addr;
+ set_host_port(*ss, srv->svc_port);
+ if (!is_addr(*ss)) {
+ /* if the server has no address, we use the same address
+ * the client asked, which is handy for remapping ports
+ * locally on multiple addresses at once. Nothing is done
+ * for AF_UNIX addresses.
+ */
+ dst = sc_dst(s->scf);
+ if (dst && dst->ss_family == AF_INET) {
+ ((struct sockaddr_in *)*ss)->sin_family = AF_INET;
+ ((struct sockaddr_in *)*ss)->sin_addr =
+ ((struct sockaddr_in *)dst)->sin_addr;
+ } else if (dst && dst->ss_family == AF_INET6) {
+ ((struct sockaddr_in6 *)*ss)->sin6_family = AF_INET6;
+ ((struct sockaddr_in6 *)*ss)->sin6_addr =
+ ((struct sockaddr_in6 *)dst)->sin6_addr;
+ }
+ }
+
+ /* if this server remaps proxied ports, we'll use
+ * the port the client connected to with an offset. */
+ if ((srv->flags & SRV_F_MAPPORTS)) {
+ int base_port;
+
+ dst = sc_dst(s->scf);
+ if (dst) {
+ /* First, retrieve the port from the incoming connection */
+ base_port = get_host_port(dst);
+
+ /* Second, assign the outgoing connection's port */
+ base_port += get_host_port(*ss);
+ set_host_port(*ss, base_port);
+ }
+ }
+ }
+ else if (s->be->options & PR_O_DISPATCH) {
+ if (!sockaddr_alloc(ss, NULL, 0))
+ return SRV_STATUS_INTERNAL;
+
+ /* connect to the defined dispatch addr */
+ **ss = s->be->dispatch_addr;
+ }
+ else if ((s->be->options & PR_O_TRANSP)) {
+ if (!sockaddr_alloc(ss, NULL, 0))
+ return SRV_STATUS_INTERNAL;
+
+ /* in transparent mode, use the original dest addr if no dispatch specified */
+ dst = sc_dst(s->scf);
+ if (dst && (dst->ss_family == AF_INET || dst->ss_family == AF_INET6))
+ **ss = *dst;
+ }
+ else {
+ /* no server and no LB algorithm ! */
+ return SRV_STATUS_INTERNAL;
+ }
+
+ return SRV_STATUS_OK;
+}
+
+/* This function assigns a server to stream <s> if required, and can add the
+ * connection to either the assigned server's queue or to the proxy's queue.
+ * If ->srv_conn is set, the stream is first released from the server.
+ * It may also be called with SF_DIRECT and/or SF_ASSIGNED though. It will
+ * be called before any connection and after any retry or redispatch occurs.
+ *
+ * It is not allowed to call this function with a stream in a queue.
+ *
+ * Returns :
+ *
+ * SRV_STATUS_OK if everything is OK.
+ * SRV_STATUS_NOSRV if no server is available. objt_server(s->target) = NULL.
+ * SRV_STATUS_QUEUED if the connection has been queued.
+ * SRV_STATUS_FULL if the server(s) is/are saturated and the
+ * connection could not be queued at the server's,
+ * which may be NULL if we queue on the backend.
+ * SRV_STATUS_INTERNAL for other unrecoverable errors.
+ *
+ */
+int assign_server_and_queue(struct stream *s)
+{
+ struct pendconn *p;
+ struct server *srv;
+ int err;
+
+ if (s->pend_pos)
+ return SRV_STATUS_INTERNAL;
+
+ err = SRV_STATUS_OK;
+ if (!(s->flags & SF_ASSIGNED)) {
+ struct server *prev_srv = objt_server(s->target);
+
+ err = assign_server(s);
+ if (prev_srv) {
+ /* This stream was previously assigned to a server. We have to
+ * update the stream's and the server's stats :
+ * - if the server changed :
+ * - set TX_CK_DOWN if txn.flags was TX_CK_VALID
+ * - set SF_REDISP if it was successfully redispatched
+ * - increment srv->redispatches and be->redispatches
+ * - if the server remained the same : update retries.
+ */
+
+ if (prev_srv != objt_server(s->target)) {
+ if (s->txn && (s->txn->flags & TX_CK_MASK) == TX_CK_VALID) {
+ s->txn->flags &= ~TX_CK_MASK;
+ s->txn->flags |= TX_CK_DOWN;
+ }
+ s->flags |= SF_REDISP;
+ _HA_ATOMIC_INC(&prev_srv->counters.redispatches);
+ _HA_ATOMIC_INC(&s->be->be_counters.redispatches);
+ } else {
+ _HA_ATOMIC_INC(&prev_srv->counters.retries);
+ _HA_ATOMIC_INC(&s->be->be_counters.retries);
+ }
+ }
+ }
+
+ switch (err) {
+ case SRV_STATUS_OK:
+ /* we have SF_ASSIGNED set */
+ srv = objt_server(s->target);
+ if (!srv)
+ return SRV_STATUS_OK; /* dispatch or proxy mode */
+
+ /* If we already have a connection slot, no need to check any queue */
+ if (s->srv_conn == srv)
+ return SRV_STATUS_OK;
+
+ /* OK, this stream already has an assigned server, but no
+ * connection slot yet. Either it is a redispatch, or it was
+ * assigned from persistence information (direct mode).
+ */
+ if ((s->flags & SF_REDIRECTABLE) && srv->rdr_len) {
+ /* server scheduled for redirection, and already assigned. We
+ * don't want to go further nor check the queue.
+ */
+ sess_change_server(s, srv); /* not really needed in fact */
+ return SRV_STATUS_OK;
+ }
+
+ /* We might have to queue this stream if the assigned server is full.
+ * We know we have to queue it into the server's queue, so if a maxqueue
+ * is set on the server, we must also check that the server's queue is
+ * not full, in which case we have to return FULL.
+ */
+ if (srv->maxconn &&
+ (srv->queue.length || srv->served >= srv_dynamic_maxconn(srv))) {
+
+ if (srv->maxqueue > 0 && srv->queue.length >= srv->maxqueue)
+ return SRV_STATUS_FULL;
+
+ p = pendconn_add(s);
+ if (p)
+ return SRV_STATUS_QUEUED;
+ else
+ return SRV_STATUS_INTERNAL;
+ }
+
+ /* OK, we can use this server. Let's reserve our place */
+ sess_change_server(s, srv);
+ return SRV_STATUS_OK;
+
+ case SRV_STATUS_FULL:
+ /* queue this stream into the proxy's queue */
+ p = pendconn_add(s);
+ if (p)
+ return SRV_STATUS_QUEUED;
+ else
+ return SRV_STATUS_INTERNAL;
+
+ case SRV_STATUS_NOSRV:
+ return err;
+
+ case SRV_STATUS_INTERNAL:
+ return err;
+
+ default:
+ return SRV_STATUS_INTERNAL;
+ }
+}
+
+/* Allocate an address if an explicit source address must be used for a backend
+ * connection.
+ *
+ * Two parameters are taken into account to check if specific source address is
+ * configured. The first one is <srv> which is the server instance to connect
+ * to. It may be NULL when dispatching is used. The second one <be> is the
+ * backend instance which contains the target server or dispatch.
+ *
+ * A stream instance <s> can be used to set the stream owner of the backend
+ * connection. It is a required parameter if the source address is a dynamic
+ * parameter.
+ *
+ * Returns SRV_STATUS_OK if either no specific source address specified or its
+ * allocation is done correctly. On error returns SRV_STATUS_INTERNAL.
+ */
+int alloc_bind_address(struct sockaddr_storage **ss,
+ struct server *srv, struct proxy *be,
+ struct stream *s)
+{
+#if defined(CONFIG_HAP_TRANSPARENT)
+ const struct sockaddr_storage *addr;
+ struct conn_src *src = NULL;
+ struct sockaddr_in *sin;
+ char *vptr;
+ size_t vlen;
+#endif
+
+ /* Ensure the function will not overwrite an allocated address. */
+ BUG_ON(*ss);
+
+#if defined(CONFIG_HAP_TRANSPARENT)
+ if (srv && srv->conn_src.opts & CO_SRC_BIND)
+ src = &srv->conn_src;
+ else if (be->conn_src.opts & CO_SRC_BIND)
+ src = &be->conn_src;
+
+ /* no transparent mode, no need to allocate an address, returns OK */
+ if (!src)
+ return SRV_STATUS_OK;
+
+ switch (src->opts & CO_SRC_TPROXY_MASK) {
+ case CO_SRC_TPROXY_ADDR:
+ if (!sockaddr_alloc(ss, NULL, 0))
+ return SRV_STATUS_INTERNAL;
+
+ **ss = src->tproxy_addr;
+ break;
+
+ case CO_SRC_TPROXY_CLI:
+ case CO_SRC_TPROXY_CIP:
+ BUG_ON(!s); /* Dynamic source setting requires a stream instance. */
+
+ /* FIXME: what can we do if the client connects in IPv6 or unix socket ? */
+ addr = sc_src(s->scf);
+ if (!addr)
+ return SRV_STATUS_INTERNAL;
+
+ if (!sockaddr_alloc(ss, NULL, 0))
+ return SRV_STATUS_INTERNAL;
+
+ **ss = *addr;
+ break;
+
+ case CO_SRC_TPROXY_DYN:
+ BUG_ON(!s); /* Dynamic source setting requires a stream instance. */
+
+ if (!src->bind_hdr_occ || !IS_HTX_STRM(s))
+ return SRV_STATUS_INTERNAL;
+
+ if (!sockaddr_alloc(ss, NULL, 0))
+ return SRV_STATUS_INTERNAL;
+
+ /* bind to the IP in a header */
+ sin = (struct sockaddr_in *)*ss;
+ sin->sin_family = AF_INET;
+ sin->sin_port = 0;
+ sin->sin_addr.s_addr = 0;
+ if (!http_get_htx_hdr(htxbuf(&s->req.buf),
+ ist2(src->bind_hdr_name, src->bind_hdr_len),
+ src->bind_hdr_occ, NULL, &vptr, &vlen)) {
+ sockaddr_free(ss);
+ return SRV_STATUS_INTERNAL;
+ }
+
+ sin->sin_addr.s_addr = htonl(inetaddr_host_lim(vptr, vptr + vlen));
+ break;
+
+ default:
+ ;
+ }
+#endif
+
+ return SRV_STATUS_OK;
+}
+
+/* Attempt to get a backend connection from the specified mt_list array
+ * (safe or idle connections). The <is_safe> argument means what type of
+ * connection the caller wants.
+ */
+struct connection *conn_backend_get(struct stream *s, struct server *srv, int is_safe, int64_t hash)
+{
+ struct connection *conn = NULL;
+ int i; // thread number
+ int found = 0;
+ int stop;
+
+ /* We need to lock even if this is our own list, because another
+ * thread may be trying to migrate that connection, and we don't want
+ * to end up with two threads using the same connection.
+ */
+ i = tid;
+ HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
+ conn = srv_lookup_conn(is_safe ? &srv->per_thr[tid].safe_conns : &srv->per_thr[tid].idle_conns, hash);
+ if (conn)
+ conn_delete_from_tree(conn);
+
+ /* If we failed to pick a connection from the idle list, let's try again with
+ * the safe list.
+ */
+ if (!conn && !is_safe && srv->curr_safe_nb > 0) {
+ conn = srv_lookup_conn(&srv->per_thr[tid].safe_conns, hash);
+ if (conn) {
+ conn_delete_from_tree(conn);
+ is_safe = 1;
+ }
+ }
+ HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
+
+ /* If we found a connection in our own list, and we don't have to
+ * steal one from another thread, then we're done.
+ */
+ if (conn)
+ goto done;
+
+ /* pool sharing globally disabled ? */
+ if (!(global.tune.options & GTUNE_IDLE_POOL_SHARED))
+ goto done;
+
+ /* Are we allowed to pick from another thread ? We'll still try
+ * it if we're running low on FDs as we don't want to create
+ * extra conns in this case, otherwise we can give up if we have
+ * too few idle conns and the server protocol supports establishing
+ * connections (i.e. not a reverse-http server for example).
+ */
+ if (srv->curr_idle_conns < srv->low_idle_conns &&
+ ha_used_fds < global.tune.pool_low_count) {
+ const struct protocol *srv_proto = protocol_lookup(srv->addr.ss_family, PROTO_TYPE_STREAM, 0);
+
+ if (srv_proto && srv_proto->connect)
+ goto done;
+ }
+
+ /* Lookup all other threads for an idle connection, starting from last
+ * unvisited thread, but always staying in the same group.
+ */
+ stop = srv->per_tgrp[tgid - 1].next_takeover;
+ if (stop >= tg->count)
+ stop %= tg->count;
+
+ stop += tg->base;
+ i = stop;
+ do {
+ if (!srv->curr_idle_thr[i] || i == tid)
+ continue;
+
+ if (HA_SPIN_TRYLOCK(IDLE_CONNS_LOCK, &idle_conns[i].idle_conns_lock) != 0)
+ continue;
+ conn = srv_lookup_conn(is_safe ? &srv->per_thr[i].safe_conns : &srv->per_thr[i].idle_conns, hash);
+ while (conn) {
+ if (conn->mux->takeover && conn->mux->takeover(conn, i) == 0) {
+ conn_delete_from_tree(conn);
+ _HA_ATOMIC_INC(&activity[tid].fd_takeover);
+ found = 1;
+ break;
+ }
+
+ conn = srv_lookup_conn_next(conn);
+ }
+
+ if (!found && !is_safe && srv->curr_safe_nb > 0) {
+ conn = srv_lookup_conn(&srv->per_thr[i].safe_conns, hash);
+ while (conn) {
+ if (conn->mux->takeover && conn->mux->takeover(conn, i) == 0) {
+ conn_delete_from_tree(conn);
+ _HA_ATOMIC_INC(&activity[tid].fd_takeover);
+ found = 1;
+ is_safe = 1;
+ break;
+ }
+
+ conn = srv_lookup_conn_next(conn);
+ }
+ }
+ HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[i].idle_conns_lock);
+ } while (!found && (i = (i + 1 == tg->base + tg->count) ? tg->base : i + 1) != stop);
+
+ if (!found)
+ conn = NULL;
+ done:
+ if (conn) {
+ _HA_ATOMIC_STORE(&srv->per_tgrp[tgid - 1].next_takeover, (i + 1 == tg->base + tg->count) ? tg->base : i + 1);
+
+ srv_use_conn(srv, conn);
+
+ _HA_ATOMIC_DEC(&srv->curr_idle_conns);
+ _HA_ATOMIC_DEC(conn->flags & CO_FL_SAFE_LIST ? &srv->curr_safe_nb : &srv->curr_idle_nb);
+ _HA_ATOMIC_DEC(&srv->curr_idle_thr[i]);
+ conn->flags &= ~CO_FL_LIST_MASK;
+ __ha_barrier_atomic_store();
+
+ if ((s->be->options & PR_O_REUSE_MASK) == PR_O_REUSE_SAFE &&
+ conn->mux->flags & MX_FL_HOL_RISK) {
+ /* attach the connection to the session private list
+ */
+ conn->owner = s->sess;
+ session_add_conn(s->sess, conn, conn->target);
+ }
+ else {
+ srv_add_to_avail_list(srv, conn);
+ }
+ }
+ return conn;
+}
+
+static int do_connect_server(struct stream *s, struct connection *conn)
+{
+ int ret = SF_ERR_NONE;
+ int conn_flags = 0;
+
+ if (unlikely(!conn || !conn->ctrl || !conn->ctrl->connect))
+ return SF_ERR_INTERNAL;
+
+ if (co_data(&s->res))
+ conn_flags |= CONNECT_HAS_DATA;
+ if (s->conn_retries == s->be->conn_retries)
+ conn_flags |= CONNECT_CAN_USE_TFO;
+ if (!conn_ctrl_ready(conn) || !conn_xprt_ready(conn)) {
+ ret = conn->ctrl->connect(conn, conn_flags);
+ if (ret != SF_ERR_NONE)
+ return ret;
+
+ /* we're in the process of establishing a connection */
+ s->scb->state = SC_ST_CON;
+ }
+ else {
+ /* try to reuse the existing connection, it will be
+ * confirmed once we can send on it.
+ */
+ /* Is the connection really ready ? */
+ if (conn->mux->ctl(conn, MUX_CTL_STATUS, NULL) & MUX_STATUS_READY)
+ s->scb->state = SC_ST_RDY;
+ else
+ s->scb->state = SC_ST_CON;
+ }
+
+ /* needs src ip/port for logging */
+ if (s->flags & SF_SRC_ADDR)
+ conn_get_src(conn);
+
+ return ret;
+}
+
+/*
+ * This function initiates a connection to the server assigned to this stream
+ * (s->target, (s->scb)->addr.to). It will assign a server if none
+ * is assigned yet.
+ * It can return one of :
+ * - SF_ERR_NONE if everything's OK
+ * - SF_ERR_SRVTO if there are no more servers
+ * - SF_ERR_SRVCL if the connection was refused by the server
+ * - SF_ERR_PRXCOND if the connection has been limited by the proxy (maxconn)
+ * - SF_ERR_RESOURCE if a system resource is lacking (eg: fd limits, ports, ...)
+ * - SF_ERR_INTERNAL for any other purely internal errors
+ * Additionally, in the case of SF_ERR_RESOURCE, an emergency log will be emitted.
+ * The server-facing stream connector is expected to hold a pre-allocated connection.
+ */
+int connect_server(struct stream *s)
+{
+ struct connection *cli_conn = objt_conn(strm_orig(s));
+ struct connection *srv_conn = NULL;
+ struct server *srv;
+ int reuse_mode = s->be->options & PR_O_REUSE_MASK;
+ int reuse = 0;
+ int init_mux = 0;
+ int err;
+#ifdef USE_OPENSSL
+ struct sample *sni_smp = NULL;
+#endif
+ struct sockaddr_storage *bind_addr = NULL;
+ int proxy_line_ret;
+ int64_t hash = 0;
+ struct conn_hash_params hash_params;
+
+ /* in standard configuration, srv will be valid
+ * it can be NULL for dispatch mode or transparent backend */
+ srv = objt_server(s->target);
+
+ /* Override reuse-mode if reverse-connect is used. */
+ if (srv && srv->flags & SRV_F_RHTTP)
+ reuse_mode = PR_O_REUSE_ALWS;
+
+ err = alloc_dst_address(&s->scb->dst, srv, s);
+ if (err != SRV_STATUS_OK)
+ return SF_ERR_INTERNAL;
+
+ err = alloc_bind_address(&bind_addr, srv, s->be, s);
+ if (err != SRV_STATUS_OK)
+ return SF_ERR_INTERNAL;
+
+#ifdef USE_OPENSSL
+ if (srv && srv->ssl_ctx.sni) {
+ sni_smp = sample_fetch_as_type(s->be, s->sess, s,
+ SMP_OPT_DIR_REQ | SMP_OPT_FINAL,
+ srv->ssl_ctx.sni, SMP_T_STR);
+ }
+#endif
+
+ /* do not reuse if mode is not http */
+ if (!IS_HTX_STRM(s)) {
+ DBG_TRACE_STATE("skip idle connections reuse: no htx", STRM_EV_STRM_PROC|STRM_EV_CS_ST, s);
+ goto skip_reuse;
+ }
+
+ /* disable reuse if websocket stream and the protocol to use is not the
+ * same as the main protocol of the server.
+ */
+ if (unlikely(s->flags & SF_WEBSOCKET) && srv) {
+ if (!srv_check_reuse_ws(srv)) {
+ DBG_TRACE_STATE("skip idle connections reuse: websocket stream", STRM_EV_STRM_PROC|STRM_EV_CS_ST, s);
+ goto skip_reuse;
+ }
+ }
+
+ /* first, set unique connection parameters and then calculate hash */
+ memset(&hash_params, 0, sizeof(hash_params));
+
+ /* 1. target */
+ hash_params.target = s->target;
+
+#ifdef USE_OPENSSL
+ /* 2. sni
+ * only test if the sample is not null as smp_make_safe (called before
+ * ssl_sock_set_servername) can only fails if this is not the case
+ */
+ if (sni_smp) {
+ hash_params.sni_prehash =
+ conn_hash_prehash(sni_smp->data.u.str.area,
+ sni_smp->data.u.str.data);
+ }
+#endif /* USE_OPENSSL */
+
+ /* 3. destination address */
+ if (srv && srv_is_transparent(srv))
+ hash_params.dst_addr = s->scb->dst;
+
+ /* 4. source address */
+ hash_params.src_addr = bind_addr;
+
+ /* 5. proxy protocol */
+ if (srv && srv->pp_opts) {
+ proxy_line_ret = make_proxy_line(trash.area, trash.size, srv, cli_conn, s);
+ if (proxy_line_ret) {
+ hash_params.proxy_prehash =
+ conn_hash_prehash(trash.area, proxy_line_ret);
+ }
+ }
+
+ hash = conn_calculate_hash(&hash_params);
+
+ /* first, search for a matching connection in the session's idle conns */
+ srv_conn = session_get_conn(s->sess, s->target, hash);
+ if (srv_conn) {
+ DBG_TRACE_STATE("reuse connection from session", STRM_EV_STRM_PROC|STRM_EV_CS_ST, s);
+ reuse = 1;
+ }
+
+ if (srv && !reuse && reuse_mode != PR_O_REUSE_NEVR) {
+ /* Below we pick connections from the safe, idle or
+ * available (which are safe too) lists based
+ * on the strategy, the fact that this is a first or second
+ * (retryable) request, with the indicated priority (1 or 2) :
+ *
+ * SAFE AGGR ALWS
+ *
+ * +-----+-----+ +-----+-----+ +-----+-----+
+ * req| 1st | 2nd | req| 1st | 2nd | req| 1st | 2nd |
+ * ----+-----+-----+ ----+-----+-----+ ----+-----+-----+
+ * safe| - | 2 | safe| 1 | 2 | safe| 1 | 2 |
+ * ----+-----+-----+ ----+-----+-----+ ----+-----+-----+
+ * idle| - | 1 | idle| - | 1 | idle| 2 | 1 |
+ * ----+-----+-----+ ----+-----+-----+ ----+-----+-----+
+ *
+ * Idle conns are necessarily looked up on the same thread so
+ * that there is no concurrency issues.
+ */
+ if (!eb_is_empty(&srv->per_thr[tid].avail_conns)) {
+ srv_conn = srv_lookup_conn(&srv->per_thr[tid].avail_conns, hash);
+ if (srv_conn) {
+ /* connection cannot be in idle list if used as an avail idle conn. */
+ BUG_ON(LIST_INLIST(&srv_conn->idle_list));
+
+ DBG_TRACE_STATE("reuse connection from avail", STRM_EV_STRM_PROC|STRM_EV_CS_ST, s);
+ reuse = 1;
+ }
+ }
+
+ /* if no available connections found, search for an idle/safe */
+ if (!srv_conn && srv->max_idle_conns && srv->curr_idle_conns > 0) {
+ const int not_first_req = s->txn && s->txn->flags & TX_NOT_FIRST;
+ const int idle = srv->curr_idle_nb > 0;
+ const int safe = srv->curr_safe_nb > 0;
+ const int retry_safe = (s->be->retry_type & (PR_RE_CONN_FAILED | PR_RE_DISCONNECTED | PR_RE_TIMEOUT)) ==
+ (PR_RE_CONN_FAILED | PR_RE_DISCONNECTED | PR_RE_TIMEOUT);
+
+ /* second column of the tables above,
+ * search for an idle then safe conn */
+ if (not_first_req || retry_safe) {
+ if (idle || safe)
+ srv_conn = conn_backend_get(s, srv, 0, hash);
+ }
+ /* first column of the tables above */
+ else if (reuse_mode >= PR_O_REUSE_AGGR) {
+ /* search for a safe conn */
+ if (safe)
+ srv_conn = conn_backend_get(s, srv, 1, hash);
+
+ /* search for an idle conn if no safe conn found
+ * on always reuse mode */
+ if (!srv_conn &&
+ reuse_mode == PR_O_REUSE_ALWS && idle) {
+ /* TODO conn_backend_get should not check the
+ * safe list is this case */
+ srv_conn = conn_backend_get(s, srv, 0, hash);
+ }
+ }
+
+ if (srv_conn) {
+ DBG_TRACE_STATE("reuse connection from idle/safe", STRM_EV_STRM_PROC|STRM_EV_CS_ST, s);
+ reuse = 1;
+ }
+ }
+ }
+
+
+ /* here reuse might have been set above, indicating srv_conn finally
+ * is OK.
+ */
+
+ if (ha_used_fds > global.tune.pool_high_count && srv) {
+ struct connection *tokill_conn = NULL;
+ /* We can't reuse a connection, and e have more FDs than deemd
+ * acceptable, attempt to kill an idling connection
+ */
+ /* First, try from our own idle list */
+ HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
+ if (!LIST_ISEMPTY(&srv->per_thr[tid].idle_conn_list)) {
+ tokill_conn = LIST_ELEM(srv->per_thr[tid].idle_conn_list.n, struct connection *, idle_list);
+ conn_delete_from_tree(tokill_conn);
+ HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
+
+ /* Release the idle lock before calling mux->destroy.
+ * It will in turn call srv_release_conn through
+ * conn_free which also uses it.
+ */
+ tokill_conn->mux->destroy(tokill_conn->ctx);
+ }
+ else {
+ HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
+ }
+
+ /* If not, iterate over other thread's idling pool, and try to grab one */
+ if (!tokill_conn) {
+ int i;
+
+ for (i = tid; (i = ((i + 1 == global.nbthread) ? 0 : i + 1)) != tid;) {
+ // just silence stupid gcc which reports an absurd
+ // out-of-bounds warning for <i> which is always
+ // exactly zero without threads, but it seems to
+ // see it possibly larger.
+ ALREADY_CHECKED(i);
+
+ if (HA_SPIN_TRYLOCK(IDLE_CONNS_LOCK, &idle_conns[i].idle_conns_lock) != 0)
+ continue;
+
+ if (!LIST_ISEMPTY(&srv->per_thr[i].idle_conn_list)) {
+ tokill_conn = LIST_ELEM(srv->per_thr[i].idle_conn_list.n, struct connection *, idle_list);
+ conn_delete_from_tree(tokill_conn);
+ }
+ HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[i].idle_conns_lock);
+
+ if (tokill_conn) {
+ /* We got one, put it into the concerned thread's to kill list, and wake it's kill task */
+
+ MT_LIST_APPEND(&idle_conns[i].toremove_conns,
+ &tokill_conn->toremove_list);
+ task_wakeup(idle_conns[i].cleanup_task, TASK_WOKEN_OTHER);
+ break;
+ }
+ }
+ }
+
+ }
+
+ if (reuse) {
+ if (srv_conn->mux) {
+ int avail = srv_conn->mux->avail_streams(srv_conn);
+
+ if (avail <= 1) {
+ /* No more streams available, remove it from the list */
+ HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
+ conn_delete_from_tree(srv_conn);
+ HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
+ }
+
+ if (avail >= 1) {
+ if (srv_conn->mux->attach(srv_conn, s->scb->sedesc, s->sess) == -1) {
+ srv_conn = NULL;
+ if (sc_reset_endp(s->scb) < 0)
+ return SF_ERR_INTERNAL;
+ sc_ep_clr(s->scb, ~SE_FL_DETACHED);
+ }
+ }
+ else
+ srv_conn = NULL;
+ }
+ /* otherwise srv_conn is left intact */
+ }
+ else
+ srv_conn = NULL;
+
+skip_reuse:
+ /* no reuse or failed to reuse the connection above, pick a new one */
+ if (!srv_conn) {
+ if (srv && (srv->flags & SRV_F_RHTTP)) {
+ DBG_TRACE_USER("cannot open a new connection for reverse server", STRM_EV_STRM_PROC|STRM_EV_CS_ST, s);
+ s->conn_err_type = STRM_ET_CONN_ERR;
+ return SF_ERR_INTERNAL;
+ }
+
+ srv_conn = conn_new(s->target);
+ if (srv_conn) {
+ DBG_TRACE_STATE("alloc new be connection", STRM_EV_STRM_PROC|STRM_EV_CS_ST, s);
+ srv_conn->owner = s->sess;
+
+ /* connection will be attached to the session if
+ * http-reuse mode is never or it is not targeted to a
+ * server */
+ if (reuse_mode == PR_O_REUSE_NEVR || !srv)
+ conn_set_private(srv_conn);
+
+ /* assign bind_addr to srv_conn */
+ srv_conn->src = bind_addr;
+ bind_addr = NULL;
+
+ srv_conn->hash_node->node.key = hash;
+ }
+ }
+
+ /* if bind_addr is non NULL free it */
+ sockaddr_free(&bind_addr);
+
+ /* srv_conn is still NULL only on allocation failure */
+ if (!srv_conn)
+ return SF_ERR_RESOURCE;
+
+ /* copy the target address into the connection */
+ *srv_conn->dst = *s->scb->dst;
+
+ /* Copy network namespace from client connection */
+ srv_conn->proxy_netns = cli_conn ? cli_conn->proxy_netns : NULL;
+
+ if (!srv_conn->xprt) {
+ /* set the correct protocol on the output stream connector */
+ if (srv) {
+ if (conn_prepare(srv_conn, protocol_lookup(srv_conn->dst->ss_family, PROTO_TYPE_STREAM, 0), srv->xprt)) {
+ conn_free(srv_conn);
+ return SF_ERR_INTERNAL;
+ }
+ } else if (obj_type(s->target) == OBJ_TYPE_PROXY) {
+ int ret;
+
+ /* proxies exclusively run on raw_sock right now */
+ ret = conn_prepare(srv_conn, protocol_lookup(srv_conn->dst->ss_family, PROTO_TYPE_STREAM, 0), xprt_get(XPRT_RAW));
+ if (ret < 0 || !(srv_conn->ctrl)) {
+ conn_free(srv_conn);
+ return SF_ERR_INTERNAL;
+ }
+ }
+ else {
+ conn_free(srv_conn);
+ return SF_ERR_INTERNAL; /* how did we get there ? */
+ }
+
+ if (sc_attach_mux(s->scb, NULL, srv_conn) < 0) {
+ conn_free(srv_conn);
+ return SF_ERR_INTERNAL; /* how did we get there ? */
+ }
+ srv_conn->ctx = s->scb;
+
+#if defined(USE_OPENSSL) && defined(TLSEXT_TYPE_application_layer_protocol_negotiation)
+ if (!srv ||
+ (srv->use_ssl != 1 || (!(srv->ssl_ctx.alpn_str) && !(srv->ssl_ctx.npn_str)) ||
+ srv->mux_proto || !IS_HTX_STRM(s)))
+#endif
+ init_mux = 1;
+
+ /* process the case where the server requires the PROXY protocol to be sent */
+ srv_conn->send_proxy_ofs = 0;
+
+ if (srv && srv->pp_opts) {
+ srv_conn->flags |= CO_FL_SEND_PROXY;
+ srv_conn->send_proxy_ofs = 1; /* must compute size */
+ }
+
+ if (srv && (srv->flags & SRV_F_SOCKS4_PROXY)) {
+ srv_conn->send_proxy_ofs = 1;
+ srv_conn->flags |= CO_FL_SOCKS4;
+ }
+
+#if defined(USE_OPENSSL) && defined(TLSEXT_TYPE_application_layer_protocol_negotiation)
+ /* if websocket stream, try to update connection ALPN. */
+ if (unlikely(s->flags & SF_WEBSOCKET) &&
+ srv && srv->use_ssl && srv->ssl_ctx.alpn_str) {
+ char *alpn = "";
+ int force = 0;
+
+ switch (srv->ws) {
+ case SRV_WS_AUTO:
+ alpn = "\x08http/1.1";
+ force = 0;
+ break;
+ case SRV_WS_H1:
+ alpn = "\x08http/1.1";
+ force = 1;
+ break;
+ case SRV_WS_H2:
+ alpn = "\x02h2";
+ force = 1;
+ break;
+ }
+
+ if (!conn_update_alpn(srv_conn, ist(alpn), force))
+ DBG_TRACE_STATE("update alpn for websocket", STRM_EV_STRM_PROC|STRM_EV_CS_ST, s);
+ }
+#endif
+ }
+ else {
+ s->flags |= SF_SRV_REUSED;
+
+ /* Currently there seems to be no known cases of xprt ready
+ * without the mux installed here.
+ */
+ BUG_ON(!srv_conn->mux);
+
+ if (!(srv_conn->mux->ctl(srv_conn, MUX_CTL_STATUS, NULL) & MUX_STATUS_READY))
+ s->flags |= SF_SRV_REUSED_ANTICIPATED;
+ }
+
+ /* flag for logging source ip/port */
+ if (strm_fe(s)->options2 & PR_O2_SRC_ADDR)
+ s->flags |= SF_SRC_ADDR;
+
+ /* disable lingering */
+ if (s->be->options & PR_O_TCP_NOLING)
+ s->scb->flags |= SC_FL_NOLINGER;
+
+ if (s->flags & SF_SRV_REUSED) {
+ _HA_ATOMIC_INC(&s->be->be_counters.reuse);
+ if (srv)
+ _HA_ATOMIC_INC(&srv->counters.reuse);
+ } else {
+ _HA_ATOMIC_INC(&s->be->be_counters.connect);
+ if (srv)
+ _HA_ATOMIC_INC(&srv->counters.connect);
+ }
+
+ err = do_connect_server(s, srv_conn);
+ if (err != SF_ERR_NONE)
+ return err;
+
+#ifdef USE_OPENSSL
+ if (!(s->flags & SF_SRV_REUSED)) {
+ if (smp_make_safe(sni_smp))
+ ssl_sock_set_servername(srv_conn, sni_smp->data.u.str.area);
+ }
+#endif /* USE_OPENSSL */
+
+ /* The CO_FL_SEND_PROXY flag may have been set by the connect method,
+ * if so, add our handshake pseudo-XPRT now.
+ */
+ if ((srv_conn->flags & CO_FL_HANDSHAKE)) {
+ if (xprt_add_hs(srv_conn) < 0) {
+ conn_full_close(srv_conn);
+ return SF_ERR_INTERNAL;
+ }
+ }
+ conn_xprt_start(srv_conn);
+
+ /* We have to defer the mux initialization until after si_connect()
+ * has been called, as we need the xprt to have been properly
+ * initialized, or any attempt to recv during the mux init may
+ * fail, and flag the connection as CO_FL_ERROR.
+ */
+ if (init_mux) {
+ const struct mux_ops *alt_mux =
+ likely(!(s->flags & SF_WEBSOCKET)) ? NULL : srv_get_ws_proto(srv);
+ if (conn_install_mux_be(srv_conn, s->scb, s->sess, alt_mux) < 0) {
+ conn_full_close(srv_conn);
+ return SF_ERR_INTERNAL;
+ }
+ if (IS_HTX_STRM(s)) {
+ /* If we're doing http-reuse always, and the connection
+ * is not private with available streams (an http2
+ * connection), add it to the available list, so that
+ * others can use it right away. If the connection is
+ * private or we're doing http-reuse safe and the mux
+ * protocol supports multiplexing, add it in the
+ * session server list.
+ */
+ if (srv && reuse_mode == PR_O_REUSE_ALWS &&
+ !(srv_conn->flags & CO_FL_PRIVATE) &&
+ srv_conn->mux->avail_streams(srv_conn) > 0) {
+ srv_add_to_avail_list(srv, srv_conn);
+ }
+ else if (srv_conn->flags & CO_FL_PRIVATE ||
+ (reuse_mode == PR_O_REUSE_SAFE &&
+ srv_conn->mux->flags & MX_FL_HOL_RISK)) {
+ /* If it fail now, the same will be done in mux->detach() callback */
+ session_add_conn(s->sess, srv_conn, srv_conn->target);
+ }
+ }
+ }
+
+#if defined(USE_OPENSSL) && (defined(OPENSSL_IS_BORINGSSL) || (HA_OPENSSL_VERSION_NUMBER >= 0x10101000L))
+
+ if (!reuse && cli_conn && srv && srv_conn->mux &&
+ (srv->ssl_ctx.options & SRV_SSL_O_EARLY_DATA) &&
+ /* Only attempt to use early data if either the client sent
+ * early data, so that we know it can handle a 425, or if
+ * we are allowed to retry requests on early data failure, and
+ * it's our first try
+ */
+ ((cli_conn->flags & CO_FL_EARLY_DATA) ||
+ ((s->be->retry_type & PR_RE_EARLY_ERROR) && !s->conn_retries)) &&
+ co_data(sc_oc(s->scb)) &&
+ srv_conn->flags & CO_FL_SSL_WAIT_HS)
+ srv_conn->flags &= ~(CO_FL_SSL_WAIT_HS | CO_FL_WAIT_L6_CONN);
+#endif
+
+ /* set connect timeout */
+ s->conn_exp = tick_add_ifset(now_ms, s->be->timeout.connect);
+
+ if (srv) {
+ int count;
+
+ s->flags |= SF_CURR_SESS;
+ count = _HA_ATOMIC_ADD_FETCH(&srv->cur_sess, 1);
+ HA_ATOMIC_UPDATE_MAX(&srv->counters.cur_sess_max, count);
+ if (s->be->lbprm.server_take_conn)
+ s->be->lbprm.server_take_conn(srv);
+ }
+
+ /* Now handle synchronously connected sockets. We know the stream connector
+ * is at least in state SC_ST_CON. These ones typically are UNIX
+ * sockets, socket pairs, andoccasionally TCP connections on the
+ * loopback on a heavily loaded system.
+ */
+ if (srv_conn->flags & CO_FL_ERROR)
+ s->scb->flags |= SC_FL_ERROR;
+
+ /* If we had early data, and the handshake ended, then
+ * we can remove the flag, and attempt to wake the task up,
+ * in the event there's an analyser waiting for the end of
+ * the handshake.
+ */
+ if (!(srv_conn->flags & (CO_FL_WAIT_XPRT | CO_FL_EARLY_SSL_HS)))
+ sc_ep_clr(s->scb, SE_FL_WAIT_FOR_HS);
+
+ if (!sc_state_in(s->scb->state, SC_SB_EST|SC_SB_DIS|SC_SB_CLO) &&
+ (srv_conn->flags & CO_FL_WAIT_XPRT) == 0) {
+ s->conn_exp = TICK_ETERNITY;
+ sc_oc(s->scb)->flags |= CF_WRITE_EVENT;
+ if (s->scb->state == SC_ST_CON)
+ s->scb->state = SC_ST_RDY;
+ }
+
+ /* Report EOI on the channel if it was reached from the mux point of
+ * view.
+ *
+ * Note: This test is only required because si_cs_process is also the SI
+ * wake callback. Otherwise si_cs_recv()/si_cs_send() already take
+ * care of it.
+ */
+ if (sc_ep_test(s->scb, SE_FL_EOI) && !(s->scb->flags & SC_FL_EOI)) {
+ s->scb->flags |= SC_FL_EOI;
+ sc_ic(s->scb)->flags |= CF_READ_EVENT;
+ }
+
+ /* catch all sync connect while the mux is not already installed */
+ if (!srv_conn->mux && !(srv_conn->flags & CO_FL_WAIT_XPRT)) {
+ if (conn_create_mux(srv_conn) < 0) {
+ conn_full_close(srv_conn);
+ return SF_ERR_INTERNAL;
+ }
+ }
+
+ return SF_ERR_NONE; /* connection is OK */
+}
+
+
+/* This function performs the "redispatch" part of a connection attempt. It
+ * will assign a server if required, queue the connection if required, and
+ * handle errors that might arise at this level. It can change the server
+ * state. It will return 1 if it encounters an error, switches the server
+ * state, or has to queue a connection. Otherwise, it will return 0 indicating
+ * that the connection is ready to use.
+ */
+
+int srv_redispatch_connect(struct stream *s)
+{
+ struct server *srv;
+ int conn_err;
+
+ /* We know that we don't have any connection pending, so we will
+ * try to get a new one, and wait in this state if it's queued
+ */
+ redispatch:
+ conn_err = assign_server_and_queue(s);
+ srv = objt_server(s->target);
+
+ switch (conn_err) {
+ case SRV_STATUS_OK:
+ break;
+
+ case SRV_STATUS_FULL:
+ /* The server has reached its maxqueue limit. Either PR_O_REDISP is set
+ * and we can redispatch to another server, or it is not and we return
+ * 503. This only makes sense in DIRECT mode however, because normal LB
+ * algorithms would never select such a server, and hash algorithms
+ * would bring us on the same server again. Note that s->target is set
+ * in this case.
+ */
+ if (((s->flags & (SF_DIRECT|SF_FORCE_PRST)) == SF_DIRECT) &&
+ (s->be->options & PR_O_REDISP)) {
+ s->flags &= ~(SF_DIRECT | SF_ASSIGNED);
+ sockaddr_free(&s->scb->dst);
+ goto redispatch;
+ }
+
+ if (!s->conn_err_type) {
+ s->conn_err_type = STRM_ET_QUEUE_ERR;
+ }
+
+ _HA_ATOMIC_INC(&srv->counters.failed_conns);
+ _HA_ATOMIC_INC(&s->be->be_counters.failed_conns);
+ return 1;
+
+ case SRV_STATUS_NOSRV:
+ /* note: it is guaranteed that srv == NULL here */
+ if (!s->conn_err_type) {
+ s->conn_err_type = STRM_ET_CONN_ERR;
+ }
+
+ _HA_ATOMIC_INC(&s->be->be_counters.failed_conns);
+ return 1;
+
+ case SRV_STATUS_QUEUED:
+ s->conn_exp = tick_add_ifset(now_ms, s->be->timeout.queue);
+ s->scb->state = SC_ST_QUE;
+ /* do nothing else and do not wake any other stream up */
+ return 1;
+
+ case SRV_STATUS_INTERNAL:
+ default:
+ if (!s->conn_err_type) {
+ s->conn_err_type = STRM_ET_CONN_OTHER;
+ }
+
+ if (srv)
+ srv_inc_sess_ctr(srv);
+ if (srv)
+ srv_set_sess_last(srv);
+ if (srv)
+ _HA_ATOMIC_INC(&srv->counters.failed_conns);
+ _HA_ATOMIC_INC(&s->be->be_counters.failed_conns);
+
+ /* release other streams waiting for this server */
+ if (may_dequeue_tasks(srv, s->be))
+ process_srv_queue(srv);
+ return 1;
+ }
+ /* if we get here, it's because we got SRV_STATUS_OK, which also
+ * means that the connection has not been queued.
+ */
+ return 0;
+}
+
+/* Check if the connection request is in such a state that it can be aborted. */
+static int back_may_abort_req(struct channel *req, struct stream *s)
+{
+ return ((s->scf->flags & SC_FL_ERROR) ||
+ ((s->scb->flags & (SC_FL_SHUT_WANTED|SC_FL_SHUT_DONE)) && /* empty and client aborted */
+ (!co_data(req) || (s->be->options & PR_O_ABRT_CLOSE))));
+}
+
+/* Update back stream connector status for input states SC_ST_ASS, SC_ST_QUE,
+ * SC_ST_TAR. Other input states are simply ignored.
+ * Possible output states are SC_ST_CLO, SC_ST_TAR, SC_ST_ASS, SC_ST_REQ, SC_ST_CON
+ * and SC_ST_EST. Flags must have previously been updated for timeouts and other
+ * conditions.
+ */
+void back_try_conn_req(struct stream *s)
+{
+ struct server *srv = objt_server(s->target);
+ struct stconn *sc = s->scb;
+ struct channel *req = &s->req;
+
+ DBG_TRACE_ENTER(STRM_EV_STRM_PROC|STRM_EV_CS_ST, s);
+
+ if (sc->state == SC_ST_ASS) {
+ /* Server assigned to connection request, we have to try to connect now */
+ int conn_err;
+
+ /* Before we try to initiate the connection, see if the
+ * request may be aborted instead.
+ */
+ if (back_may_abort_req(req, s)) {
+ s->conn_err_type |= STRM_ET_CONN_ABRT;
+ DBG_TRACE_STATE("connection aborted", STRM_EV_STRM_PROC|STRM_EV_CS_ST|STRM_EV_STRM_ERR, s);
+ goto abort_connection;
+ }
+
+ conn_err = connect_server(s);
+ srv = objt_server(s->target);
+
+ if (conn_err == SF_ERR_NONE) {
+ /* state = SC_ST_CON or SC_ST_EST now */
+ if (srv)
+ srv_inc_sess_ctr(srv);
+ if (srv)
+ srv_set_sess_last(srv);
+ DBG_TRACE_STATE("connection attempt", STRM_EV_STRM_PROC|STRM_EV_CS_ST, s);
+ goto end;
+ }
+
+ /* We have received a synchronous error. We might have to
+ * abort, retry immediately or redispatch.
+ */
+ if (conn_err == SF_ERR_INTERNAL) {
+ if (!s->conn_err_type) {
+ s->conn_err_type = STRM_ET_CONN_OTHER;
+ }
+
+ if (srv)
+ srv_inc_sess_ctr(srv);
+ if (srv)
+ srv_set_sess_last(srv);
+ if (srv)
+ _HA_ATOMIC_INC(&srv->counters.failed_conns);
+ _HA_ATOMIC_INC(&s->be->be_counters.failed_conns);
+
+ /* release other streams waiting for this server */
+ sess_change_server(s, NULL);
+ if (may_dequeue_tasks(srv, s->be))
+ process_srv_queue(srv);
+
+ /* Failed and not retryable. */
+ sc_abort(sc);
+ sc_shutdown(sc);
+ sc->flags |= SC_FL_ERROR;
+
+ s->logs.t_queue = ns_to_ms(now_ns - s->logs.accept_ts);
+
+ /* we may need to know the position in the queue for logging */
+ pendconn_cond_unlink(s->pend_pos);
+
+ /* no stream was ever accounted for this server */
+ sc->state = SC_ST_CLO;
+ if (s->srv_error)
+ s->srv_error(s, sc);
+ DBG_TRACE_STATE("internal error during connection", STRM_EV_STRM_PROC|STRM_EV_CS_ST|STRM_EV_STRM_ERR, s);
+ goto end;
+ }
+
+ /* We are facing a retryable error, but we don't want to run a
+ * turn-around now, as the problem is likely a source port
+ * allocation problem, so we want to retry now.
+ */
+ sc->state = SC_ST_CER;
+ sc->flags &= ~SC_FL_ERROR;
+ back_handle_st_cer(s);
+
+ DBG_TRACE_STATE("connection error, retry", STRM_EV_STRM_PROC|STRM_EV_CS_ST|STRM_EV_STRM_ERR, s);
+ /* now sc->state is one of SC_ST_CLO, SC_ST_TAR, SC_ST_ASS, SC_ST_REQ */
+ }
+ else if (sc->state == SC_ST_QUE) {
+ /* connection request was queued, check for any update */
+ if (!pendconn_dequeue(s)) {
+ /* The connection is not in the queue anymore. Either
+ * we have a server connection slot available and we
+ * go directly to the assigned state, or we need to
+ * load-balance first and go to the INI state.
+ */
+ s->conn_exp = TICK_ETERNITY;
+ if (unlikely(!(s->flags & SF_ASSIGNED)))
+ sc->state = SC_ST_REQ;
+ else {
+ s->logs.t_queue = ns_to_ms(now_ns - s->logs.accept_ts);
+ sc->state = SC_ST_ASS;
+ }
+ DBG_TRACE_STATE("dequeue connection request", STRM_EV_STRM_PROC|STRM_EV_CS_ST, s);
+ goto end;
+ }
+
+ /* Connection request still in queue... */
+ if (s->flags & SF_CONN_EXP) {
+ /* ... and timeout expired */
+ s->conn_exp = TICK_ETERNITY;
+ s->flags &= ~SF_CONN_EXP;
+ s->logs.t_queue = ns_to_ms(now_ns - s->logs.accept_ts);
+
+ /* we may need to know the position in the queue for logging */
+ pendconn_cond_unlink(s->pend_pos);
+
+ if (srv)
+ _HA_ATOMIC_INC(&srv->counters.failed_conns);
+ _HA_ATOMIC_INC(&s->be->be_counters.failed_conns);
+ sc_abort(sc);
+ sc_shutdown(sc);
+ req->flags |= CF_WRITE_TIMEOUT;
+ if (!s->conn_err_type)
+ s->conn_err_type = STRM_ET_QUEUE_TO;
+ sc->state = SC_ST_CLO;
+ if (s->srv_error)
+ s->srv_error(s, sc);
+ DBG_TRACE_STATE("connection request still queued", STRM_EV_STRM_PROC|STRM_EV_CS_ST, s);
+ goto end;
+ }
+
+ /* Connection remains in queue, check if we have to abort it */
+ if (back_may_abort_req(req, s)) {
+ s->logs.t_queue = ns_to_ms(now_ns - s->logs.accept_ts);
+
+ /* we may need to know the position in the queue for logging */
+ pendconn_cond_unlink(s->pend_pos);
+
+ s->conn_err_type |= STRM_ET_QUEUE_ABRT;
+ DBG_TRACE_STATE("abort queued connection request", STRM_EV_STRM_PROC|STRM_EV_CS_ST|STRM_EV_STRM_ERR, s);
+ goto abort_connection;
+ }
+
+ /* Nothing changed */
+ }
+ else if (sc->state == SC_ST_TAR) {
+ /* Connection request might be aborted */
+ if (back_may_abort_req(req, s)) {
+ s->conn_err_type |= STRM_ET_CONN_ABRT;
+ DBG_TRACE_STATE("connection aborted", STRM_EV_STRM_PROC|STRM_EV_CS_ST|STRM_EV_STRM_ERR, s);
+ goto abort_connection;
+ }
+
+ if (!(s->flags & SF_CONN_EXP))
+ return; /* still in turn-around */
+
+ s->flags &= ~SF_CONN_EXP;
+ s->conn_exp = TICK_ETERNITY;
+
+ /* we keep trying on the same server as long as the stream is
+ * marked "assigned".
+ * FIXME: Should we force a redispatch attempt when the server is down ?
+ */
+ if (s->flags & SF_ASSIGNED)
+ sc->state = SC_ST_ASS;
+ else
+ sc->state = SC_ST_REQ;
+
+ DBG_TRACE_STATE("retry connection now", STRM_EV_STRM_PROC|STRM_EV_CS_ST, s);
+ }
+
+ end:
+ DBG_TRACE_LEAVE(STRM_EV_STRM_PROC|STRM_EV_CS_ST, s);
+ return;
+
+abort_connection:
+ /* give up */
+ s->conn_exp = TICK_ETERNITY;
+ s->flags &= ~SF_CONN_EXP;
+ sc_abort(sc);
+ sc_shutdown(sc);
+ sc->state = SC_ST_CLO;
+ if (s->srv_error)
+ s->srv_error(s, sc);
+ DBG_TRACE_DEVEL("leaving on error", STRM_EV_STRM_PROC|STRM_EV_CS_ST|STRM_EV_STRM_ERR, s);
+ return;
+}
+
+/* This function initiates a server connection request on a stream connector
+ * already in SC_ST_REQ state. Upon success, the state goes to SC_ST_ASS for
+ * a real connection to a server, indicating that a server has been assigned,
+ * or SC_ST_RDY for a successful connection to an applet. It may also return
+ * SC_ST_QUE, or SC_ST_CLO upon error.
+ */
+void back_handle_st_req(struct stream *s)
+{
+ struct stconn *sc = s->scb;
+
+ if (sc->state != SC_ST_REQ)
+ return;
+
+ DBG_TRACE_ENTER(STRM_EV_STRM_PROC|STRM_EV_CS_ST, s);
+
+ if (unlikely(obj_type(s->target) == OBJ_TYPE_APPLET)) {
+ struct appctx *appctx;
+
+ /* The target is an applet but the SC is in SC_ST_REQ. Thus it
+ * means no appctx are attached to the SC. Otherwise, it will be
+ * in SC_ST_RDY state. So, try to create the appctx now.
+ */
+ BUG_ON(sc_appctx(sc));
+ appctx = sc_applet_create(sc, objt_applet(s->target));
+ if (!appctx) {
+ /* No more memory, let's immediately abort. Force the
+ * error code to ignore the ERR_LOCAL which is not a
+ * real error.
+ */
+ s->flags &= ~(SF_ERR_MASK | SF_FINST_MASK);
+
+ sc_abort(sc);
+ sc_shutdown(sc);
+ sc->flags |= SC_FL_ERROR;
+ s->conn_err_type = STRM_ET_CONN_RES;
+ sc->state = SC_ST_CLO;
+ if (s->srv_error)
+ s->srv_error(s, sc);
+ DBG_TRACE_STATE("failed to register applet", STRM_EV_STRM_PROC|STRM_EV_CS_ST|STRM_EV_STRM_ERR, s);
+ goto end;
+ }
+
+ DBG_TRACE_STATE("applet registered", STRM_EV_STRM_PROC|STRM_EV_CS_ST, s);
+ goto end;
+ }
+
+ /* Try to assign a server */
+ if (srv_redispatch_connect(s) != 0) {
+ /* We did not get a server. Either we queued the
+ * connection request, or we encountered an error.
+ */
+ if (sc->state == SC_ST_QUE) {
+ DBG_TRACE_STATE("connection request queued", STRM_EV_STRM_PROC|STRM_EV_CS_ST, s);
+ goto end;
+ }
+
+ /* we did not get any server, let's check the cause */
+ sc_abort(sc);
+ sc_shutdown(sc);
+ sc->flags |= SC_FL_ERROR;
+ if (!s->conn_err_type)
+ s->conn_err_type = STRM_ET_CONN_OTHER;
+ sc->state = SC_ST_CLO;
+ if (s->srv_error)
+ s->srv_error(s, sc);
+ DBG_TRACE_STATE("connection request failed", STRM_EV_STRM_PROC|STRM_EV_CS_ST|STRM_EV_STRM_ERR, s);
+ goto end;
+ }
+
+ /* The server is assigned */
+ s->logs.t_queue = ns_to_ms(now_ns - s->logs.accept_ts);
+ sc->state = SC_ST_ASS;
+ be_set_sess_last(s->be);
+ DBG_TRACE_STATE("connection request assigned to a server", STRM_EV_STRM_PROC|STRM_EV_CS_ST, s);
+
+ end:
+ DBG_TRACE_LEAVE(STRM_EV_STRM_PROC|STRM_EV_CS_ST, s);
+}
+
+/* This function is called with (sc->state == SC_ST_CON) meaning that a
+ * connection was attempted and that the file descriptor is already allocated.
+ * We must check for timeout, error and abort. Possible output states are
+ * SC_ST_CER (error), SC_ST_DIS (abort), and SC_ST_CON (no change). This only
+ * works with connection-based streams. We know that there were no I/O event
+ * when reaching this function. Timeouts and errors are *not* cleared.
+ */
+void back_handle_st_con(struct stream *s)
+{
+ struct stconn *sc = s->scb;
+ struct channel *req = &s->req;
+
+ DBG_TRACE_ENTER(STRM_EV_STRM_PROC|STRM_EV_CS_ST, s);
+
+ /* the client might want to abort */
+ if ((s->scf->flags & SC_FL_SHUT_DONE) ||
+ ((s->scb->flags & SC_FL_SHUT_WANTED) &&
+ (!co_data(req) || (s->be->options & PR_O_ABRT_CLOSE)))) {
+ sc->flags |= SC_FL_NOLINGER;
+ sc_shutdown(sc);
+ s->conn_err_type |= STRM_ET_CONN_ABRT;
+ if (s->srv_error)
+ s->srv_error(s, sc);
+ /* Note: state = SC_ST_DIS now */
+ DBG_TRACE_STATE("client abort during connection attempt", STRM_EV_STRM_PROC|STRM_EV_CS_ST|STRM_EV_STRM_ERR, s);
+ goto end;
+ }
+
+ done:
+ /* retryable error ? */
+ if ((s->flags & SF_CONN_EXP) || (sc->flags & SC_FL_ERROR)) {
+ if (!s->conn_err_type) {
+ if ((sc->flags & SC_FL_ERROR))
+ s->conn_err_type = STRM_ET_CONN_ERR;
+ else
+ s->conn_err_type = STRM_ET_CONN_TO;
+ }
+
+ sc->state = SC_ST_CER;
+ DBG_TRACE_STATE("connection failed, retry", STRM_EV_STRM_PROC|STRM_EV_CS_ST|STRM_EV_STRM_ERR, s);
+ }
+
+ end:
+ DBG_TRACE_LEAVE(STRM_EV_STRM_PROC|STRM_EV_CS_ST, s);
+}
+
+/* This function is called with (sc->state == SC_ST_CER) meaning that a
+ * previous connection attempt has failed and that the file descriptor
+ * has already been released. Possible causes include asynchronous error
+ * notification and time out. Possible output states are SC_ST_CLO when
+ * retries are exhausted, SC_ST_TAR when a delay is wanted before a new
+ * connection attempt, SC_ST_ASS when it's wise to retry on the same server,
+ * and SC_ST_REQ when an immediate redispatch is wanted. The buffers are
+ * marked as in error state. Timeouts and errors are cleared before retrying.
+ */
+void back_handle_st_cer(struct stream *s)
+{
+ struct stconn *sc = s->scb;
+ int must_tar = !!(sc->flags & SC_FL_ERROR);
+
+ DBG_TRACE_ENTER(STRM_EV_STRM_PROC|STRM_EV_CS_ST, s);
+
+ s->conn_exp = TICK_ETERNITY;
+ s->flags &= ~SF_CONN_EXP;
+
+ /* we probably have to release last stream from the server */
+ if (objt_server(s->target)) {
+ struct connection *conn = sc_conn(sc);
+
+ health_adjust(__objt_server(s->target), HANA_STATUS_L4_ERR);
+
+ if (s->flags & SF_CURR_SESS) {
+ s->flags &= ~SF_CURR_SESS;
+ _HA_ATOMIC_DEC(&__objt_server(s->target)->cur_sess);
+ }
+
+ if ((sc->flags & SC_FL_ERROR) &&
+ conn && conn->err_code == CO_ER_SSL_MISMATCH_SNI) {
+ /* We tried to connect to a server which is configured
+ * with "verify required" and which doesn't have the
+ * "verifyhost" directive. The server presented a wrong
+ * certificate (a certificate for an unexpected name),
+ * which implies that we have used SNI in the handshake,
+ * and that the server doesn't have the associated cert
+ * and presented a default one.
+ *
+ * This is a serious enough issue not to retry. It's
+ * especially important because this wrong name might
+ * either be the result of a configuration error, and
+ * retrying will only hammer the server, or is caused
+ * by the use of a wrong SNI value, most likely
+ * provided by the client and we don't want to let the
+ * client provoke retries.
+ */
+ s->conn_retries = s->be->conn_retries;
+ DBG_TRACE_DEVEL("Bad SSL cert, disable connection retries", STRM_EV_STRM_PROC|STRM_EV_CS_ST|STRM_EV_STRM_ERR, s);
+ }
+ }
+
+ /* ensure that we have enough retries left */
+ if (s->conn_retries >= s->be->conn_retries || !(s->be->retry_type & PR_RE_CONN_FAILED)) {
+ if (!s->conn_err_type) {
+ s->conn_err_type = STRM_ET_CONN_ERR;
+ }
+
+ if (objt_server(s->target))
+ _HA_ATOMIC_INC(&objt_server(s->target)->counters.failed_conns);
+ _HA_ATOMIC_INC(&s->be->be_counters.failed_conns);
+ sess_change_server(s, NULL);
+ if (may_dequeue_tasks(objt_server(s->target), s->be))
+ process_srv_queue(objt_server(s->target));
+
+ /* shutw is enough to stop a connecting socket */
+ sc_shutdown(sc);
+ sc->flags |= SC_FL_ERROR;
+
+ sc->state = SC_ST_CLO;
+ if (s->srv_error)
+ s->srv_error(s, sc);
+
+ DBG_TRACE_STATE("connection failed", STRM_EV_STRM_PROC|STRM_EV_CS_ST|STRM_EV_STRM_ERR, s);
+ goto end;
+ }
+
+ /* At this stage, we will trigger a connection retry (with or without
+ * redispatch). Thus we must reset the SI endpoint on the server side
+ * an close the attached connection. It is especially important to do it
+ * now if the retry is not immediately performed, to be sure to release
+ * resources as soon as possible and to not catch errors from the lower
+ * layers in an unexpected state (i.e < ST_CONN).
+ *
+ * Note: the stream connector will be switched to ST_REQ, ST_ASS or
+ * ST_TAR and SC_FL_ERROR and SF_CONN_EXP flags will be unset.
+ */
+ if (sc_reset_endp(sc) < 0) {
+ if (!s->conn_err_type)
+ s->conn_err_type = STRM_ET_CONN_OTHER;
+
+ if (objt_server(s->target))
+ _HA_ATOMIC_INC(&objt_server(s->target)->counters.internal_errors);
+ _HA_ATOMIC_INC(&s->be->be_counters.internal_errors);
+ sess_change_server(s, NULL);
+ if (may_dequeue_tasks(objt_server(s->target), s->be))
+ process_srv_queue(objt_server(s->target));
+
+ /* shutw is enough to stop a connecting socket */
+ sc_shutdown(sc);
+ sc->flags |= SC_FL_ERROR;
+
+ sc->state = SC_ST_CLO;
+ if (s->srv_error)
+ s->srv_error(s, sc);
+
+ DBG_TRACE_STATE("error resetting endpoint", STRM_EV_STRM_PROC|STRM_EV_CS_ST|STRM_EV_STRM_ERR, s);
+ goto end;
+ }
+
+ s->conn_retries++;
+ stream_choose_redispatch(s);
+
+ if (must_tar) {
+ /* The error was an asynchronous connection error, and we will
+ * likely have to retry connecting to the same server, most
+ * likely leading to the same result. To avoid this, we wait
+ * MIN(one second, connect timeout) before retrying. We don't
+ * do it when the failure happened on a reused connection
+ * though.
+ */
+
+ int delay = 1000;
+ const int reused = (s->flags & SF_SRV_REUSED) &&
+ !(s->flags & SF_SRV_REUSED_ANTICIPATED);
+
+ if (s->be->timeout.connect && s->be->timeout.connect < delay)
+ delay = s->be->timeout.connect;
+
+ if (!s->conn_err_type)
+ s->conn_err_type = STRM_ET_CONN_ERR;
+
+ /* only wait when we're retrying on the same server */
+ if ((sc->state == SC_ST_ASS ||
+ (s->be->srv_act <= 1)) && !reused) {
+ sc->state = SC_ST_TAR;
+ s->conn_exp = tick_add(now_ms, MS_TO_TICKS(delay));
+ }
+ DBG_TRACE_STATE("retry a new connection", STRM_EV_STRM_PROC|STRM_EV_CS_ST, s);
+ }
+
+ end:
+ DBG_TRACE_LEAVE(STRM_EV_STRM_PROC|STRM_EV_CS_ST, s);
+}
+
+/* This function is called with (sc->state == SC_ST_RDY) meaning that a
+ * connection was attempted, that the file descriptor is already allocated,
+ * and that it has succeeded. We must still check for errors and aborts.
+ * Possible output states are SC_ST_EST (established), SC_ST_CER (error),
+ * and SC_ST_DIS (abort). This only works with connection-based streams.
+ * Timeouts and errors are *not* cleared.
+ */
+void back_handle_st_rdy(struct stream *s)
+{
+ struct stconn *sc = s->scb;
+ struct channel *req = &s->req;
+
+ DBG_TRACE_ENTER(STRM_EV_STRM_PROC|STRM_EV_CS_ST, s);
+
+ if (unlikely(obj_type(s->target) == OBJ_TYPE_APPLET)) {
+ /* Here the appctx must exists because the SC was set to
+ * SC_ST_RDY state when the appctx was created.
+ */
+ BUG_ON(!sc_appctx(s->scb));
+
+ if (!s->logs.request_ts)
+ s->logs.request_ts = now_ns;
+ s->logs.t_queue = ns_to_ms(now_ns - s->logs.accept_ts);
+ be_set_sess_last(s->be);
+ }
+
+ /* We know the connection at least succeeded, though it could have
+ * since met an error for any other reason. At least it didn't time out
+ * even though the timeout might have been reported right after success.
+ * We need to take care of various situations here :
+ * - everything might be OK. We have to switch to established.
+ * - an I/O error might have been reported after a successful transfer,
+ * which is not retryable and needs to be logged correctly, and needs
+ * established as well
+ * - SC_ST_CON implies !CF_WROTE_DATA but not conversely as we could
+ * have validated a connection with incoming data (e.g. TCP with a
+ * banner protocol), or just a successful connect() probe.
+ * - the client might have requested a connection abort, this needs to
+ * be checked before we decide to retry anything.
+ */
+
+ /* it's still possible to handle client aborts or connection retries
+ * before any data were sent.
+ */
+ if (!(req->flags & CF_WROTE_DATA)) {
+ /* client abort ? */
+ if ((s->scf->flags & SC_FL_SHUT_DONE) ||
+ ((s->scb->flags & SC_FL_SHUT_WANTED) &&
+ (!co_data(req) || (s->be->options & PR_O_ABRT_CLOSE)))) {
+ /* give up */
+ sc->flags |= SC_FL_NOLINGER;
+ sc_shutdown(sc);
+ s->conn_err_type |= STRM_ET_CONN_ABRT;
+ if (s->srv_error)
+ s->srv_error(s, sc);
+ DBG_TRACE_STATE("client abort during connection attempt", STRM_EV_STRM_PROC|STRM_EV_CS_ST|STRM_EV_STRM_ERR, s);
+ goto end;
+ }
+
+ /* retryable error ? */
+ if (sc->flags & SC_FL_ERROR) {
+ if (!s->conn_err_type)
+ s->conn_err_type = STRM_ET_CONN_ERR;
+ sc->state = SC_ST_CER;
+ DBG_TRACE_STATE("connection failed, retry", STRM_EV_STRM_PROC|STRM_EV_CS_ST|STRM_EV_STRM_ERR, s);
+ goto end;
+ }
+ }
+
+ /* data were sent and/or we had no error, back_establish() will
+ * now take over.
+ */
+ DBG_TRACE_STATE("connection established", STRM_EV_STRM_PROC|STRM_EV_CS_ST, s);
+ s->conn_err_type = STRM_ET_NONE;
+ sc->state = SC_ST_EST;
+
+ end:
+ DBG_TRACE_LEAVE(STRM_EV_STRM_PROC|STRM_EV_CS_ST, s);
+}
+
+/* sends a log message when a backend goes down, and also sets last
+ * change date.
+ */
+void set_backend_down(struct proxy *be)
+{
+ be->last_change = ns_to_sec(now_ns);
+ _HA_ATOMIC_INC(&be->down_trans);
+
+ if (!(global.mode & MODE_STARTING)) {
+ ha_alert("%s '%s' has no server available!\n", proxy_type_str(be), be->id);
+ send_log(be, LOG_EMERG, "%s %s has no server available!\n", proxy_type_str(be), be->id);
+ }
+}
+
+/* Apply RDP cookie persistence to the current stream. For this, the function
+ * tries to extract an RDP cookie from the request buffer, and look for the
+ * matching server in the list. If the server is found, it is assigned to the
+ * stream. This always returns 1, and the analyser removes itself from the
+ * list. Nothing is performed if a server was already assigned.
+ */
+int tcp_persist_rdp_cookie(struct stream *s, struct channel *req, int an_bit)
+{
+ struct proxy *px = s->be;
+ int ret;
+ struct sample smp;
+ struct server *srv = px->srv;
+ uint16_t port;
+ uint32_t addr;
+ char *p;
+
+ DBG_TRACE_ENTER(STRM_EV_STRM_ANA|STRM_EV_TCP_ANA, s);
+
+ if (s->flags & SF_ASSIGNED)
+ goto no_cookie;
+
+ memset(&smp, 0, sizeof(smp));
+
+ ret = fetch_rdp_cookie_name(s, &smp, s->be->rdp_cookie_name, s->be->rdp_cookie_len);
+ if (ret == 0 || (smp.flags & SMP_F_MAY_CHANGE) || smp.data.u.str.data == 0)
+ goto no_cookie;
+
+ /* Considering an rdp cookie detected using acl, str ended with <cr><lf> and should return.
+ * The cookie format is <ip> "." <port> where "ip" is the integer corresponding to the
+ * server's IP address in network order, and "port" is the integer corresponding to the
+ * server's port in network order. Comments please Emeric.
+ */
+ addr = strtoul(smp.data.u.str.area, &p, 10);
+ if (*p != '.')
+ goto no_cookie;
+ p++;
+
+ port = ntohs(strtoul(p, &p, 10));
+ if (*p != '.')
+ goto no_cookie;
+
+ s->target = NULL;
+ while (srv) {
+ if (srv->addr.ss_family == AF_INET &&
+ port == srv->svc_port &&
+ addr == ((struct sockaddr_in *)&srv->addr)->sin_addr.s_addr) {
+ if ((srv->cur_state != SRV_ST_STOPPED) || (px->options & PR_O_PERSIST)) {
+ /* we found the server and it is usable */
+ s->flags |= SF_DIRECT | SF_ASSIGNED;
+ s->target = &srv->obj_type;
+ break;
+ }
+ }
+ srv = srv->next;
+ }
+
+no_cookie:
+ req->analysers &= ~an_bit;
+ req->analyse_exp = TICK_ETERNITY;
+ DBG_TRACE_LEAVE(STRM_EV_STRM_ANA|STRM_EV_TCP_ANA, s);
+ return 1;
+}
+
+int be_downtime(struct proxy *px) {
+ if (px->lbprm.tot_weight && px->last_change < ns_to_sec(now_ns)) // ignore negative time
+ return px->down_time;
+
+ return ns_to_sec(now_ns) - px->last_change + px->down_time;
+}
+
+/*
+ * This function returns a string containing the balancing
+ * mode of the proxy in a format suitable for stats.
+ */
+
+const char *backend_lb_algo_str(int algo) {
+
+ if (algo == BE_LB_ALGO_RR)
+ return "roundrobin";
+ else if (algo == BE_LB_ALGO_SRR)
+ return "static-rr";
+ else if (algo == BE_LB_ALGO_FAS)
+ return "first";
+ else if (algo == BE_LB_ALGO_LC)
+ return "leastconn";
+ else if (algo == BE_LB_ALGO_SH)
+ return "source";
+ else if (algo == BE_LB_ALGO_UH)
+ return "uri";
+ else if (algo == BE_LB_ALGO_PH)
+ return "url_param";
+ else if (algo == BE_LB_ALGO_HH)
+ return "hdr";
+ else if (algo == BE_LB_ALGO_RCH)
+ return "rdp-cookie";
+ else if (algo == BE_LB_ALGO_SMP)
+ return "hash";
+ else if (algo == BE_LB_ALGO_NONE)
+ return "none";
+ else
+ return "unknown";
+}
+
+/* This function parses a "balance" statement in a backend section describing
+ * <curproxy>. It returns -1 if there is any error, otherwise zero. If it
+ * returns -1, it will write an error message into the <err> buffer which will
+ * automatically be allocated and must be passed as NULL. The trailing '\n'
+ * will not be written. The function must be called with <args> pointing to the
+ * first word after "balance".
+ */
+int backend_parse_balance(const char **args, char **err, struct proxy *curproxy)
+{
+ if (!*(args[0])) {
+ /* if no option is set, use round-robin by default */
+ curproxy->lbprm.algo &= ~BE_LB_ALGO;
+ curproxy->lbprm.algo |= BE_LB_ALGO_RR;
+ return 0;
+ }
+
+ if (strcmp(args[0], "roundrobin") == 0) {
+ curproxy->lbprm.algo &= ~BE_LB_ALGO;
+ curproxy->lbprm.algo |= BE_LB_ALGO_RR;
+ }
+ else if (strcmp(args[0], "static-rr") == 0) {
+ curproxy->lbprm.algo &= ~BE_LB_ALGO;
+ curproxy->lbprm.algo |= BE_LB_ALGO_SRR;
+ }
+ else if (strcmp(args[0], "first") == 0) {
+ curproxy->lbprm.algo &= ~BE_LB_ALGO;
+ curproxy->lbprm.algo |= BE_LB_ALGO_FAS;
+ }
+ else if (strcmp(args[0], "leastconn") == 0) {
+ curproxy->lbprm.algo &= ~BE_LB_ALGO;
+ curproxy->lbprm.algo |= BE_LB_ALGO_LC;
+ }
+ else if (!strncmp(args[0], "random", 6)) {
+ curproxy->lbprm.algo &= ~BE_LB_ALGO;
+ curproxy->lbprm.algo |= BE_LB_ALGO_RND;
+ curproxy->lbprm.arg_opt1 = 2;
+
+ if (*(args[0] + 6) == '(' && *(args[0] + 7) != ')') { /* number of draws */
+ const char *beg;
+ char *end;
+
+ beg = args[0] + 7;
+ curproxy->lbprm.arg_opt1 = strtol(beg, &end, 0);
+
+ if (*end != ')') {
+ if (!*end)
+ memprintf(err, "random : missing closing parenthesis.");
+ else
+ memprintf(err, "random : unexpected character '%c' after argument.", *end);
+ return -1;
+ }
+
+ if (curproxy->lbprm.arg_opt1 < 1) {
+ memprintf(err, "random : number of draws must be at least 1.");
+ return -1;
+ }
+ }
+ }
+ else if (strcmp(args[0], "source") == 0) {
+ curproxy->lbprm.algo &= ~BE_LB_ALGO;
+ curproxy->lbprm.algo |= BE_LB_ALGO_SH;
+ }
+ else if (strcmp(args[0], "uri") == 0) {
+ int arg = 1;
+
+ curproxy->lbprm.algo &= ~BE_LB_ALGO;
+ curproxy->lbprm.algo |= BE_LB_ALGO_UH;
+ curproxy->lbprm.arg_opt1 = 0; // "whole", "path-only"
+ curproxy->lbprm.arg_opt2 = 0; // "len"
+ curproxy->lbprm.arg_opt3 = 0; // "depth"
+
+ while (*args[arg]) {
+ if (strcmp(args[arg], "len") == 0) {
+ if (!*args[arg+1] || (atoi(args[arg+1]) <= 0)) {
+ memprintf(err, "%s : '%s' expects a positive integer (got '%s').", args[0], args[arg], args[arg+1]);
+ return -1;
+ }
+ curproxy->lbprm.arg_opt2 = atoi(args[arg+1]);
+ arg += 2;
+ }
+ else if (strcmp(args[arg], "depth") == 0) {
+ if (!*args[arg+1] || (atoi(args[arg+1]) <= 0)) {
+ memprintf(err, "%s : '%s' expects a positive integer (got '%s').", args[0], args[arg], args[arg+1]);
+ return -1;
+ }
+ /* hint: we store the position of the ending '/' (depth+1) so
+ * that we avoid a comparison while computing the hash.
+ */
+ curproxy->lbprm.arg_opt3 = atoi(args[arg+1]) + 1;
+ arg += 2;
+ }
+ else if (strcmp(args[arg], "whole") == 0) {
+ curproxy->lbprm.arg_opt1 |= 1;
+ arg += 1;
+ }
+ else if (strcmp(args[arg], "path-only") == 0) {
+ curproxy->lbprm.arg_opt1 |= 2;
+ arg += 1;
+ }
+ else {
+ memprintf(err, "%s only accepts parameters 'len', 'depth', 'path-only', and 'whole' (got '%s').", args[0], args[arg]);
+ return -1;
+ }
+ }
+ }
+ else if (strcmp(args[0], "url_param") == 0) {
+ if (!*args[1]) {
+ memprintf(err, "%s requires an URL parameter name.", args[0]);
+ return -1;
+ }
+ curproxy->lbprm.algo &= ~BE_LB_ALGO;
+ curproxy->lbprm.algo |= BE_LB_ALGO_PH;
+
+ free(curproxy->lbprm.arg_str);
+ curproxy->lbprm.arg_str = strdup(args[1]);
+ curproxy->lbprm.arg_len = strlen(args[1]);
+ if (*args[2]) {
+ if (strcmp(args[2], "check_post") != 0) {
+ memprintf(err, "%s only accepts 'check_post' modifier (got '%s').", args[0], args[2]);
+ return -1;
+ }
+ }
+ }
+ else if (strcmp(args[0], "hash") == 0) {
+ if (!*args[1]) {
+ memprintf(err, "%s requires a sample expression.", args[0]);
+ return -1;
+ }
+ curproxy->lbprm.algo &= ~BE_LB_ALGO;
+ curproxy->lbprm.algo |= BE_LB_ALGO_SMP;
+
+ ha_free(&curproxy->lbprm.arg_str);
+ curproxy->lbprm.arg_str = strdup(args[1]);
+ curproxy->lbprm.arg_len = strlen(args[1]);
+
+ if (*args[2]) {
+ memprintf(err, "%s takes no other argument (got '%s').", args[0], args[2]);
+ return -1;
+ }
+ }
+ else if (!strncmp(args[0], "hdr(", 4)) {
+ const char *beg, *end;
+
+ beg = args[0] + 4;
+ end = strchr(beg, ')');
+
+ if (!end || end == beg) {
+ memprintf(err, "hdr requires an http header field name.");
+ return -1;
+ }
+
+ curproxy->lbprm.algo &= ~BE_LB_ALGO;
+ curproxy->lbprm.algo |= BE_LB_ALGO_HH;
+
+ free(curproxy->lbprm.arg_str);
+ curproxy->lbprm.arg_len = end - beg;
+ curproxy->lbprm.arg_str = my_strndup(beg, end - beg);
+ curproxy->lbprm.arg_opt1 = 0;
+
+ if (*args[1]) {
+ if (strcmp(args[1], "use_domain_only") != 0) {
+ memprintf(err, "%s only accepts 'use_domain_only' modifier (got '%s').", args[0], args[1]);
+ return -1;
+ }
+ curproxy->lbprm.arg_opt1 = 1;
+ }
+ }
+ else if (!strncmp(args[0], "rdp-cookie", 10)) {
+ curproxy->lbprm.algo &= ~BE_LB_ALGO;
+ curproxy->lbprm.algo |= BE_LB_ALGO_RCH;
+
+ if ( *(args[0] + 10 ) == '(' ) { /* cookie name */
+ const char *beg, *end;
+
+ beg = args[0] + 11;
+ end = strchr(beg, ')');
+
+ if (!end || end == beg) {
+ memprintf(err, "rdp-cookie : missing cookie name.");
+ return -1;
+ }
+
+ free(curproxy->lbprm.arg_str);
+ curproxy->lbprm.arg_str = my_strndup(beg, end - beg);
+ curproxy->lbprm.arg_len = end - beg;
+ }
+ else if ( *(args[0] + 10 ) == '\0' ) { /* default cookie name 'mstshash' */
+ free(curproxy->lbprm.arg_str);
+ curproxy->lbprm.arg_str = strdup("mstshash");
+ curproxy->lbprm.arg_len = strlen(curproxy->lbprm.arg_str);
+ }
+ else { /* syntax */
+ memprintf(err, "rdp-cookie : missing cookie name.");
+ return -1;
+ }
+ }
+ else if (strcmp(args[0], "log-hash") == 0) {
+ if (!*args[1]) {
+ memprintf(err, "%s requires a converter list.", args[0]);
+ return -1;
+ }
+ curproxy->lbprm.algo &= ~BE_LB_ALGO;
+ curproxy->lbprm.algo |= BE_LB_ALGO_LH;
+
+ ha_free(&curproxy->lbprm.arg_str);
+ curproxy->lbprm.arg_str = strdup(args[1]);
+ }
+ else if (strcmp(args[0], "sticky") == 0) {
+ curproxy->lbprm.algo &= ~BE_LB_ALGO;
+ curproxy->lbprm.algo |= BE_LB_ALGO_LS;
+ }
+ else {
+ memprintf(err, "only supports 'roundrobin', 'static-rr', 'leastconn', 'source', 'uri', 'url_param', 'hash', 'hdr(name)', 'rdp-cookie(name)', 'log-hash' and 'sticky' options.");
+ return -1;
+ }
+ return 0;
+}
+
+
+/************************************************************************/
+/* All supported sample and ACL keywords must be declared here. */
+/************************************************************************/
+
+/* set temp integer to the number of enabled servers on the proxy.
+ * Accepts exactly 1 argument. Argument is a backend, other types will lead to
+ * undefined behaviour.
+ */
+static int
+smp_fetch_nbsrv(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ struct proxy *px = args->data.prx;
+
+ if (px == NULL)
+ return 0;
+ if (px->cap & PR_CAP_DEF)
+ px = smp->px;
+
+ smp->flags = SMP_F_VOL_TEST;
+ smp->data.type = SMP_T_SINT;
+
+ smp->data.u.sint = be_usable_srv(px);
+
+ return 1;
+}
+
+/* report in smp->flags a success or failure depending on the designated
+ * server's state. There is no match function involved since there's no pattern.
+ * Accepts exactly 1 argument. Argument is a server, other types will lead to
+ * undefined behaviour.
+ */
+static int
+smp_fetch_srv_is_up(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ struct server *srv = args->data.srv;
+
+ smp->flags = SMP_F_VOL_TEST;
+ smp->data.type = SMP_T_BOOL;
+ if (!(srv->cur_admin & SRV_ADMF_MAINT) &&
+ (!(srv->check.state & CHK_ST_CONFIGURED) || (srv->cur_state != SRV_ST_STOPPED)))
+ smp->data.u.sint = 1;
+ else
+ smp->data.u.sint = 0;
+ return 1;
+}
+
+/* set temp integer to the number of enabled servers on the proxy.
+ * Accepts exactly 1 argument. Argument is a backend, other types will lead to
+ * undefined behaviour.
+ */
+static int
+smp_fetch_connslots(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ struct server *iterator;
+ struct proxy *px = args->data.prx;
+
+ if (px == NULL)
+ return 0;
+ if (px->cap & PR_CAP_DEF)
+ px = smp->px;
+
+ smp->flags = SMP_F_VOL_TEST;
+ smp->data.type = SMP_T_SINT;
+ smp->data.u.sint = 0;
+
+ for (iterator = px->srv; iterator; iterator = iterator->next) {
+ if (iterator->cur_state == SRV_ST_STOPPED)
+ continue;
+
+ if (iterator->maxconn == 0 || iterator->maxqueue == 0) {
+ /* configuration is stupid */
+ smp->data.u.sint = -1; /* FIXME: stupid value! */
+ return 1;
+ }
+
+ smp->data.u.sint += (iterator->maxconn - iterator->cur_sess)
+ + (iterator->maxqueue - iterator->queue.length);
+ }
+
+ return 1;
+}
+
+/* set temp integer to the id of the backend */
+static int
+smp_fetch_be_id(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ struct proxy *px = NULL;
+
+ if (smp->strm)
+ px = smp->strm->be;
+ else if (obj_type(smp->sess->origin) == OBJ_TYPE_CHECK)
+ px = __objt_check(smp->sess->origin)->proxy;
+ if (!px)
+ return 0;
+
+ smp->flags = SMP_F_VOL_TXN;
+ smp->data.type = SMP_T_SINT;
+ smp->data.u.sint = px->uuid;
+ return 1;
+}
+
+/* set string to the name of the backend */
+static int
+smp_fetch_be_name(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ struct proxy *px = NULL;
+
+ if (smp->strm)
+ px = smp->strm->be;
+ else if (obj_type(smp->sess->origin) == OBJ_TYPE_CHECK)
+ px = __objt_check(smp->sess->origin)->proxy;
+ if (!px)
+ return 0;
+
+ smp->data.u.str.area = (char *)px->id;
+ if (!smp->data.u.str.area)
+ return 0;
+
+ smp->data.type = SMP_T_STR;
+ smp->flags = SMP_F_CONST;
+ smp->data.u.str.data = strlen(smp->data.u.str.area);
+
+ return 1;
+}
+
+/* set temp integer to the id of the server */
+static int
+smp_fetch_srv_id(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ struct server *srv = NULL;
+
+ if (smp->strm)
+ srv = objt_server(smp->strm->target);
+ else if (obj_type(smp->sess->origin) == OBJ_TYPE_CHECK)
+ srv = __objt_check(smp->sess->origin)->server;
+ if (!srv)
+ return 0;
+
+ smp->data.type = SMP_T_SINT;
+ smp->data.u.sint = srv->puid;
+
+ return 1;
+}
+
+/* set string to the name of the server */
+static int
+smp_fetch_srv_name(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ struct server *srv = NULL;
+
+ if (smp->strm)
+ srv = objt_server(smp->strm->target);
+ else if (obj_type(smp->sess->origin) == OBJ_TYPE_CHECK)
+ srv = __objt_check(smp->sess->origin)->server;
+ if (!srv)
+ return 0;
+
+ smp->data.u.str.area = srv->id;
+ if (!smp->data.u.str.area)
+ return 0;
+
+ smp->data.type = SMP_T_STR;
+ smp->data.u.str.data = strlen(smp->data.u.str.area);
+
+ return 1;
+}
+
+/* set temp integer to the number of connections per second reaching the backend.
+ * Accepts exactly 1 argument. Argument is a backend, other types will lead to
+ * undefined behaviour.
+ */
+static int
+smp_fetch_be_sess_rate(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ struct proxy *px = args->data.prx;
+
+ if (px == NULL)
+ return 0;
+ if (px->cap & PR_CAP_DEF)
+ px = smp->px;
+
+ smp->flags = SMP_F_VOL_TEST;
+ smp->data.type = SMP_T_SINT;
+ smp->data.u.sint = read_freq_ctr(&px->be_sess_per_sec);
+ return 1;
+}
+
+/* set temp integer to the number of concurrent connections on the backend.
+ * Accepts exactly 1 argument. Argument is a backend, other types will lead to
+ * undefined behaviour.
+ */
+static int
+smp_fetch_be_conn(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ struct proxy *px = args->data.prx;
+
+ if (px == NULL)
+ return 0;
+ if (px->cap & PR_CAP_DEF)
+ px = smp->px;
+
+ smp->flags = SMP_F_VOL_TEST;
+ smp->data.type = SMP_T_SINT;
+ smp->data.u.sint = px->beconn;
+ return 1;
+}
+
+/* set temp integer to the number of available connections across available
+ * servers on the backend.
+ * Accepts exactly 1 argument. Argument is a backend, other types will lead to
+ * undefined behaviour.
+ */
+static int
+smp_fetch_be_conn_free(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ struct server *iterator;
+ struct proxy *px = args->data.prx;
+ unsigned int maxconn;
+
+ if (px == NULL)
+ return 0;
+ if (px->cap & PR_CAP_DEF)
+ px = smp->px;
+
+ smp->flags = SMP_F_VOL_TEST;
+ smp->data.type = SMP_T_SINT;
+ smp->data.u.sint = 0;
+
+ for (iterator = px->srv; iterator; iterator = iterator->next) {
+ if (iterator->cur_state == SRV_ST_STOPPED)
+ continue;
+
+ px = iterator->proxy;
+ if (!srv_currently_usable(iterator) ||
+ ((iterator->flags & SRV_F_BACKUP) &&
+ (px->srv_act || (iterator != px->lbprm.fbck && !(px->options & PR_O_USE_ALL_BK)))))
+ continue;
+
+ if (iterator->maxconn == 0) {
+ /* one active server is unlimited, return -1 */
+ smp->data.u.sint = -1;
+ return 1;
+ }
+
+ maxconn = srv_dynamic_maxconn(iterator);
+ if (maxconn > iterator->cur_sess)
+ smp->data.u.sint += maxconn - iterator->cur_sess;
+ }
+
+ return 1;
+}
+
+/* set temp integer to the total number of queued connections on the backend.
+ * Accepts exactly 1 argument. Argument is a backend, other types will lead to
+ * undefined behaviour.
+ */
+static int
+smp_fetch_queue_size(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ struct proxy *px = args->data.prx;
+
+ if (px == NULL)
+ return 0;
+ if (px->cap & PR_CAP_DEF)
+ px = smp->px;
+
+ smp->flags = SMP_F_VOL_TEST;
+ smp->data.type = SMP_T_SINT;
+ smp->data.u.sint = px->totpend;
+ return 1;
+}
+
+/* set temp integer to the total number of queued connections on the backend divided
+ * by the number of running servers and rounded up. If there is no running
+ * server, we return twice the total, just as if we had half a running server.
+ * This is more or less correct anyway, since we expect the last server to come
+ * back soon.
+ * Accepts exactly 1 argument. Argument is a backend, other types will lead to
+ * undefined behaviour.
+ */
+static int
+smp_fetch_avg_queue_size(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ struct proxy *px = args->data.prx;
+ int nbsrv;
+
+ if (px == NULL)
+ return 0;
+ if (px->cap & PR_CAP_DEF)
+ px = smp->px;
+
+ smp->flags = SMP_F_VOL_TEST;
+ smp->data.type = SMP_T_SINT;
+
+ nbsrv = be_usable_srv(px);
+
+ if (nbsrv > 0)
+ smp->data.u.sint = (px->totpend + nbsrv - 1) / nbsrv;
+ else
+ smp->data.u.sint = px->totpend * 2;
+
+ return 1;
+}
+
+/* set temp integer to the number of concurrent connections on the server in the backend.
+ * Accepts exactly 1 argument. Argument is a server, other types will lead to
+ * undefined behaviour.
+ */
+static int
+smp_fetch_srv_conn(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ smp->flags = SMP_F_VOL_TEST;
+ smp->data.type = SMP_T_SINT;
+ smp->data.u.sint = args->data.srv->cur_sess;
+ return 1;
+}
+
+/* set temp integer to the number of available connections on the server in the backend.
+ * Accepts exactly 1 argument. Argument is a server, other types will lead to
+ * undefined behaviour.
+ */
+static int
+smp_fetch_srv_conn_free(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ unsigned int maxconn;
+
+ smp->flags = SMP_F_VOL_TEST;
+ smp->data.type = SMP_T_SINT;
+
+ if (args->data.srv->maxconn == 0) {
+ /* one active server is unlimited, return -1 */
+ smp->data.u.sint = -1;
+ return 1;
+ }
+
+ maxconn = srv_dynamic_maxconn(args->data.srv);
+ if (maxconn > args->data.srv->cur_sess)
+ smp->data.u.sint = maxconn - args->data.srv->cur_sess;
+ else
+ smp->data.u.sint = 0;
+
+ return 1;
+}
+
+/* set temp integer to the number of connections pending in the server's queue.
+ * Accepts exactly 1 argument. Argument is a server, other types will lead to
+ * undefined behaviour.
+ */
+static int
+smp_fetch_srv_queue(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ smp->flags = SMP_F_VOL_TEST;
+ smp->data.type = SMP_T_SINT;
+ smp->data.u.sint = args->data.srv->queue.length;
+ return 1;
+}
+
+/* set temp integer to the number of enabled servers on the proxy.
+ * Accepts exactly 1 argument. Argument is a server, other types will lead to
+ * undefined behaviour.
+ */
+static int
+smp_fetch_srv_sess_rate(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ smp->flags = SMP_F_VOL_TEST;
+ smp->data.type = SMP_T_SINT;
+ smp->data.u.sint = read_freq_ctr(&args->data.srv->sess_per_sec);
+ return 1;
+}
+
+/* set temp integer to the server weight.
+ * Accepts exactly 1 argument. Argument is a server, other types will lead to
+ * undefined behaviour.
+ */
+static int
+smp_fetch_srv_weight(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ struct server *srv = args->data.srv;
+ struct proxy *px = srv->proxy;
+
+ smp->flags = SMP_F_VOL_TEST;
+ smp->data.type = SMP_T_SINT;
+ smp->data.u.sint = (srv->cur_eweight * px->lbprm.wmult + px->lbprm.wdiv - 1) / px->lbprm.wdiv;
+ return 1;
+}
+
+/* set temp integer to the server initial weight.
+ * Accepts exactly 1 argument. Argument is a server, other types will lead to
+ * undefined behaviour.
+ */
+static int
+smp_fetch_srv_iweight(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ smp->flags = SMP_F_VOL_TEST;
+ smp->data.type = SMP_T_SINT;
+ smp->data.u.sint = args->data.srv->iweight;
+ return 1;
+}
+
+/* set temp integer to the server user-specified weight.
+ * Accepts exactly 1 argument. Argument is a server, other types will lead to
+ * undefined behaviour.
+ */
+static int
+smp_fetch_srv_uweight(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ smp->flags = SMP_F_VOL_TEST;
+ smp->data.type = SMP_T_SINT;
+ smp->data.u.sint = args->data.srv->uweight;
+ return 1;
+}
+
+static int
+smp_fetch_be_server_timeout(const struct arg *args, struct sample *smp, const char *km, void *private)
+{
+ struct proxy *px = NULL;
+
+ if (smp->strm)
+ px = smp->strm->be;
+ else if (obj_type(smp->sess->origin) == OBJ_TYPE_CHECK)
+ px = __objt_check(smp->sess->origin)->proxy;
+ if (!px)
+ return 0;
+
+ smp->flags = SMP_F_VOL_TXN;
+ smp->data.type = SMP_T_SINT;
+ smp->data.u.sint = TICKS_TO_MS(px->timeout.server);
+ return 1;
+}
+
+static int
+smp_fetch_be_tunnel_timeout(const struct arg *args, struct sample *smp, const char *km, void *private)
+{
+ struct proxy *px = NULL;
+
+ if (smp->strm)
+ px = smp->strm->be;
+ else if (obj_type(smp->sess->origin) == OBJ_TYPE_CHECK)
+ px = __objt_check(smp->sess->origin)->proxy;
+ if (!px)
+ return 0;
+
+ smp->flags = SMP_F_VOL_TXN;
+ smp->data.type = SMP_T_SINT;
+ smp->data.u.sint = TICKS_TO_MS(px->timeout.tunnel);
+ return 1;
+}
+
+static int sample_conv_nbsrv(const struct arg *args, struct sample *smp, void *private)
+{
+
+ struct proxy *px;
+
+ if (!smp_make_safe(smp))
+ return 0;
+
+ px = proxy_find_by_name(smp->data.u.str.area, PR_CAP_BE, 0);
+ if (!px)
+ return 0;
+
+ smp->data.type = SMP_T_SINT;
+ smp->data.u.sint = be_usable_srv(px);
+
+ return 1;
+}
+
+static int
+sample_conv_srv_queue(const struct arg *args, struct sample *smp, void *private)
+{
+ struct proxy *px;
+ struct server *srv;
+ char *bksep;
+
+ if (!smp_make_safe(smp))
+ return 0;
+
+ bksep = strchr(smp->data.u.str.area, '/');
+
+ if (bksep) {
+ *bksep = '\0';
+ px = proxy_find_by_name(smp->data.u.str.area, PR_CAP_BE, 0);
+ if (!px)
+ return 0;
+ smp->data.u.str.area = bksep + 1;
+ } else {
+ if (!(smp->px->cap & PR_CAP_BE))
+ return 0;
+ px = smp->px;
+ }
+
+ srv = server_find_by_name(px, smp->data.u.str.area);
+ if (!srv)
+ return 0;
+
+ smp->data.type = SMP_T_SINT;
+ smp->data.u.sint = srv->queue.length;
+ return 1;
+}
+
+/* Note: must not be declared <const> as its list will be overwritten.
+ * Please take care of keeping this list alphabetically sorted.
+ */
+static struct sample_fetch_kw_list smp_kws = {ILH, {
+ { "avg_queue", smp_fetch_avg_queue_size, ARG1(1,BE), NULL, SMP_T_SINT, SMP_USE_INTRN, },
+ { "be_conn", smp_fetch_be_conn, ARG1(1,BE), NULL, SMP_T_SINT, SMP_USE_INTRN, },
+ { "be_conn_free", smp_fetch_be_conn_free, ARG1(1,BE), NULL, SMP_T_SINT, SMP_USE_INTRN, },
+ { "be_id", smp_fetch_be_id, 0, NULL, SMP_T_SINT, SMP_USE_BKEND, },
+ { "be_name", smp_fetch_be_name, 0, NULL, SMP_T_STR, SMP_USE_BKEND, },
+ { "be_server_timeout", smp_fetch_be_server_timeout, 0, NULL, SMP_T_SINT, SMP_USE_BKEND, },
+ { "be_sess_rate", smp_fetch_be_sess_rate, ARG1(1,BE), NULL, SMP_T_SINT, SMP_USE_INTRN, },
+ { "be_tunnel_timeout", smp_fetch_be_tunnel_timeout, 0, NULL, SMP_T_SINT, SMP_USE_BKEND, },
+ { "connslots", smp_fetch_connslots, ARG1(1,BE), NULL, SMP_T_SINT, SMP_USE_INTRN, },
+ { "nbsrv", smp_fetch_nbsrv, ARG1(1,BE), NULL, SMP_T_SINT, SMP_USE_INTRN, },
+ { "queue", smp_fetch_queue_size, ARG1(1,BE), NULL, SMP_T_SINT, SMP_USE_INTRN, },
+ { "srv_conn", smp_fetch_srv_conn, ARG1(1,SRV), NULL, SMP_T_SINT, SMP_USE_INTRN, },
+ { "srv_conn_free", smp_fetch_srv_conn_free, ARG1(1,SRV), NULL, SMP_T_SINT, SMP_USE_INTRN, },
+ { "srv_id", smp_fetch_srv_id, 0, NULL, SMP_T_SINT, SMP_USE_SERVR, },
+ { "srv_is_up", smp_fetch_srv_is_up, ARG1(1,SRV), NULL, SMP_T_BOOL, SMP_USE_INTRN, },
+ { "srv_name", smp_fetch_srv_name, 0, NULL, SMP_T_STR, SMP_USE_SERVR, },
+ { "srv_queue", smp_fetch_srv_queue, ARG1(1,SRV), NULL, SMP_T_SINT, SMP_USE_INTRN, },
+ { "srv_sess_rate", smp_fetch_srv_sess_rate, ARG1(1,SRV), NULL, SMP_T_SINT, SMP_USE_INTRN, },
+ { "srv_weight", smp_fetch_srv_weight, ARG1(1,SRV), NULL, SMP_T_SINT, SMP_USE_INTRN, },
+ { "srv_iweight", smp_fetch_srv_iweight, ARG1(1,SRV), NULL, SMP_T_SINT, SMP_USE_INTRN, },
+ { "srv_uweight", smp_fetch_srv_uweight, ARG1(1,SRV), NULL, SMP_T_SINT, SMP_USE_INTRN, },
+ { /* END */ },
+}};
+
+INITCALL1(STG_REGISTER, sample_register_fetches, &smp_kws);
+
+/* Note: must not be declared <const> as its list will be overwritten */
+static struct sample_conv_kw_list sample_conv_kws = {ILH, {
+ { "nbsrv", sample_conv_nbsrv, 0, NULL, SMP_T_STR, SMP_T_SINT },
+ { "srv_queue", sample_conv_srv_queue, 0, NULL, SMP_T_STR, SMP_T_SINT },
+ { /* END */ },
+}};
+
+INITCALL1(STG_REGISTER, sample_register_convs, &sample_conv_kws);
+
+/* Note: must not be declared <const> as its list will be overwritten.
+ * Please take care of keeping this list alphabetically sorted.
+ */
+static struct acl_kw_list acl_kws = {ILH, {
+ { /* END */ },
+}};
+
+INITCALL1(STG_REGISTER, acl_register_keywords, &acl_kws);
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/src/base64.c b/src/base64.c
new file mode 100644
index 0000000..0601bf6
--- /dev/null
+++ b/src/base64.c
@@ -0,0 +1,303 @@
+/*
+ * ASCII <-> Base64 conversion as described in RFC1421.
+ *
+ * Copyright 2006-2010 Willy Tarreau <w@1wt.eu>
+ * Copyright 2009-2010 Krzysztof Piotr Oledzki <ole@ans.pl>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <stdlib.h>
+#include <string.h>
+
+#include <haproxy/api.h>
+#include <haproxy/base64.h>
+
+#define B64BASE '#' /* arbitrary chosen base value */
+#define B64CMIN '+'
+#define UB64CMIN '-'
+#define B64CMAX 'z'
+#define B64PADV 64 /* Base64 chosen special pad value */
+
+const char base64tab[65]="ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
+const char base64rev[]="b###cXYZ[\\]^_`a###d###$%&'()*+,-./0123456789:;<=######>?@ABCDEFGHIJKLMNOPQRSTUVW";
+const char ubase64tab[65]="ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_";
+const char ubase64rev[]="b##XYZ[\\]^_`a###c###$%&'()*+,-./0123456789:;<=####c#>?@ABCDEFGHIJKLMNOPQRSTUVW";
+
+/* Encodes <ilen> bytes from <in> to <out> for at most <olen> chars (including
+ * the trailing zero). Returns the number of bytes written. No check is made
+ * for <in> or <out> to be NULL. Returns negative value if <olen> is too short
+ * to accept <ilen>. 4 output bytes are produced for 1 to 3 input bytes.
+ */
+int a2base64(char *in, int ilen, char *out, int olen)
+{
+ int convlen;
+
+ convlen = ((ilen + 2) / 3) * 4;
+
+ if (convlen >= olen)
+ return -1;
+
+ /* we don't need to check olen anymore */
+ while (ilen >= 3) {
+ out[0] = base64tab[(((unsigned char)in[0]) >> 2)];
+ out[1] = base64tab[(((unsigned char)in[0] & 0x03) << 4) | (((unsigned char)in[1]) >> 4)];
+ out[2] = base64tab[(((unsigned char)in[1] & 0x0F) << 2) | (((unsigned char)in[2]) >> 6)];
+ out[3] = base64tab[(((unsigned char)in[2] & 0x3F))];
+ out += 4;
+ in += 3; ilen -= 3;
+ }
+
+ if (!ilen) {
+ out[0] = '\0';
+ } else {
+ out[0] = base64tab[((unsigned char)in[0]) >> 2];
+ if (ilen == 1) {
+ out[1] = base64tab[((unsigned char)in[0] & 0x03) << 4];
+ out[2] = '=';
+ } else {
+ out[1] = base64tab[(((unsigned char)in[0] & 0x03) << 4) |
+ (((unsigned char)in[1]) >> 4)];
+ out[2] = base64tab[((unsigned char)in[1] & 0x0F) << 2];
+ }
+ out[3] = '=';
+ out[4] = '\0';
+ }
+
+ return convlen;
+}
+
+/* url variant of a2base64 */
+int a2base64url(const char *in, size_t ilen, char *out, size_t olen)
+{
+ int convlen;
+
+ convlen = ((ilen + 2) / 3) * 4;
+
+ if (convlen >= olen)
+ return -1;
+
+ /* we don't need to check olen anymore */
+ while (ilen >= 3) {
+ out[0] = ubase64tab[(((unsigned char)in[0]) >> 2)];
+ out[1] = ubase64tab[(((unsigned char)in[0] & 0x03) << 4) | (((unsigned char)in[1]) >> 4)];
+ out[2] = ubase64tab[(((unsigned char)in[1] & 0x0F) << 2) | (((unsigned char)in[2]) >> 6)];
+ out[3] = ubase64tab[(((unsigned char)in[2] & 0x3F))];
+ out += 4;
+ in += 3;
+ ilen -= 3;
+ }
+
+ if (!ilen) {
+ out[0] = '\0';
+ return convlen;
+ }
+
+ out[0] = ubase64tab[((unsigned char)in[0]) >> 2];
+ if (ilen == 1) {
+ out[1] = ubase64tab[((unsigned char)in[0] & 0x03) << 4];
+ out[2] = '\0';
+ convlen -= 2;
+ } else {
+ out[1] = ubase64tab[(((unsigned char)in[0] & 0x03) << 4) |
+ (((unsigned char)in[1]) >> 4)];
+ out[2] = ubase64tab[((unsigned char)in[1] & 0x0F) << 2];
+ out[3] = '\0';
+ convlen -= 1;
+ }
+
+ return convlen;
+}
+
+/* Decodes <ilen> bytes from <in> to <out> for at most <olen> chars.
+ * Returns the number of bytes converted. No check is made for
+ * <in> or <out> to be NULL. Returns -1 if <in> is invalid or ilen
+ * has wrong size, -2 if <olen> is too short.
+ * 1 to 3 output bytes are produced for 4 input bytes.
+ */
+int base64dec(const char *in, size_t ilen, char *out, size_t olen) {
+
+ unsigned char t[4];
+ signed char b;
+ int convlen = 0, i = 0, pad = 0;
+
+ if (ilen % 4)
+ return -1;
+
+ if (olen < ((ilen / 4 * 3)
+ - (in[ilen-1] == '=' ? 1 : 0)
+ - (in[ilen-2] == '=' ? 1 : 0)))
+ return -2;
+
+ while (ilen) {
+
+ /* if (*p < B64CMIN || *p > B64CMAX) */
+ b = (signed char)*in - B64CMIN;
+ if ((unsigned char)b > (B64CMAX-B64CMIN))
+ return -1;
+
+ b = base64rev[b] - B64BASE - 1;
+
+ /* b == -1: invalid character */
+ if (b < 0)
+ return -1;
+
+ /* padding has to be continuous */
+ if (pad && b != B64PADV)
+ return -1;
+
+ /* valid padding: "XX==" or "XXX=", but never "X===" or "====" */
+ if (pad && i < 2)
+ return -1;
+
+ if (b == B64PADV)
+ pad++;
+
+ t[i++] = b;
+
+ if (i == 4) {
+ /*
+ * WARNING: we allow to write little more data than we
+ * should, but the checks from the beginning of the
+ * functions guarantee that we can safely do that.
+ */
+
+ /* xx000000 xx001111 xx111122 xx222222 */
+ if (convlen < olen)
+ out[convlen] = ((t[0] << 2) + (t[1] >> 4));
+ if (convlen+1 < olen)
+ out[convlen+1] = ((t[1] << 4) + (t[2] >> 2));
+ if (convlen+2 < olen)
+ out[convlen+2] = ((t[2] << 6) + (t[3] >> 0));
+
+ convlen += 3-pad;
+
+ pad = i = 0;
+ }
+
+ in++;
+ ilen--;
+ }
+
+ return convlen;
+}
+
+/* url variant of base64dec */
+/* The reverse tab used to decode base64 is generated via /dev/base64/base64rev-gen.c */
+int base64urldec(const char *in, size_t ilen, char *out, size_t olen)
+{
+ unsigned char t[4];
+ signed char b;
+ int convlen = 0, i = 0, pad = 0, padlen = 0;
+
+ switch (ilen % 4) {
+ case 0:
+ break;
+ case 2:
+ padlen = pad = 2;
+ break;
+ case 3:
+ padlen = pad = 1;
+ break;
+ default:
+ return -1;
+ }
+
+ if (olen < (((ilen + pad) / 4 * 3) - pad))
+ return -2;
+
+ while (ilen + pad) {
+ if (ilen) {
+ /* if (*p < UB64CMIN || *p > B64CMAX) */
+ b = (signed char) * in - UB64CMIN;
+ if ((unsigned char)b > (B64CMAX - UB64CMIN))
+ return -1;
+
+ b = ubase64rev[b] - B64BASE - 1;
+ /* b == -1: invalid character */
+ if (b < 0)
+ return -1;
+
+ in++;
+ ilen--;
+
+ } else {
+ b = B64PADV;
+ pad--;
+ }
+
+ t[i++] = b;
+
+ if (i == 4) {
+ /*
+ * WARNING: we allow to write little more data than we
+ * should, but the checks from the beginning of the
+ * functions guarantee that we can safely do that.
+ */
+
+ /* xx000000 xx001111 xx111122 xx222222 */
+ if (convlen < olen)
+ out[convlen] = ((t[0] << 2) + (t[1] >> 4));
+ if (convlen+1 < olen)
+ out[convlen+1] = ((t[1] << 4) + (t[2] >> 2));
+ if (convlen+2 < olen)
+ out[convlen+2] = ((t[2] << 6) + (t[3] >> 0));
+
+ convlen += 3;
+ i = 0;
+ }
+ }
+ convlen -= padlen;
+
+ return convlen;
+}
+
+/* Converts the lower 30 bits of an integer to a 5-char base64 string. The
+ * caller is responsible for ensuring that the output buffer can accept 6 bytes
+ * (5 + the trailing zero). The pointer to the string is returned. The
+ * conversion is performed with MSB first and in a format that can be
+ * decoded with b64tos30(). This format is not padded and thus is not
+ * compatible with usual base64 routines.
+ */
+const char *s30tob64(int in, char *out)
+{
+ int i;
+ for (i = 0; i < 5; i++) {
+ out[i] = base64tab[(in >> 24) & 0x3F];
+ in <<= 6;
+ }
+ out[5] = '\0';
+ return out;
+}
+
+/* Converts a 5-char base64 string encoded by s30tob64() into a 30-bit integer.
+ * The caller is responsible for ensuring that the input contains at least 5
+ * chars. If any unexpected character is encountered, a negative value is
+ * returned. Otherwise the decoded value is returned.
+ */
+int b64tos30(const char *in)
+{
+ int i, out;
+ signed char b;
+
+ out = 0;
+ for (i = 0; i < 5; i++) {
+ b = (signed char)in[i] - B64CMIN;
+ if ((unsigned char)b > (B64CMAX - B64CMIN))
+ return -1; /* input character out of range */
+
+ b = base64rev[b] - B64BASE - 1;
+ if (b < 0) /* invalid character */
+ return -1;
+
+ if (b == B64PADV) /* padding not allowed */
+ return -1;
+
+ out = (out << 6) + b;
+ }
+ return out;
+}
diff --git a/src/cache.c b/src/cache.c
new file mode 100644
index 0000000..9f12f10
--- /dev/null
+++ b/src/cache.c
@@ -0,0 +1,3014 @@
+/*
+ * Cache management
+ *
+ * Copyright 2017 HAProxy Technologies
+ * William Lallemand <wlallemand@haproxy.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <import/eb32tree.h>
+#include <import/sha1.h>
+
+#include <haproxy/action-t.h>
+#include <haproxy/api.h>
+#include <haproxy/applet.h>
+#include <haproxy/cfgparse.h>
+#include <haproxy/channel.h>
+#include <haproxy/cli.h>
+#include <haproxy/errors.h>
+#include <haproxy/filters.h>
+#include <haproxy/hash.h>
+#include <haproxy/http.h>
+#include <haproxy/http_ana.h>
+#include <haproxy/http_htx.h>
+#include <haproxy/http_rules.h>
+#include <haproxy/htx.h>
+#include <haproxy/net_helper.h>
+#include <haproxy/proxy.h>
+#include <haproxy/sample.h>
+#include <haproxy/sc_strm.h>
+#include <haproxy/shctx.h>
+#include <haproxy/stconn.h>
+#include <haproxy/stream.h>
+#include <haproxy/tools.h>
+#include <haproxy/xxhash.h>
+
+#define CACHE_FLT_F_IMPLICIT_DECL 0x00000001 /* The cache filtre was implicitly declared (ie without
+ * the filter keyword) */
+#define CACHE_FLT_INIT 0x00000002 /* Whether the cache name was freed. */
+
+static uint64_t cache_hash_seed = 0;
+
+const char *cache_store_flt_id = "cache store filter";
+
+extern struct applet http_cache_applet;
+
+struct flt_ops cache_ops;
+
+struct cache_tree {
+ struct eb_root entries; /* head of cache entries based on keys */
+ __decl_thread(HA_RWLOCK_T lock);
+
+ struct list cleanup_list;
+ __decl_thread(HA_SPINLOCK_T cleanup_lock);
+} ALIGNED(64);
+
+struct cache {
+ struct cache_tree trees[CACHE_TREE_NUM];
+ struct list list; /* cache linked list */
+ unsigned int maxage; /* max-age */
+ unsigned int maxblocks;
+ unsigned int maxobjsz; /* max-object-size (in bytes) */
+ unsigned int max_secondary_entries; /* maximum number of secondary entries with the same primary hash */
+ uint8_t vary_processing_enabled; /* boolean : manage Vary header (disabled by default) */
+ char id[33]; /* cache name */
+};
+
+/* the appctx context of a cache applet, stored in appctx->svcctx */
+struct cache_appctx {
+ struct cache_tree *cache_tree;
+ struct cache_entry *entry; /* Entry to be sent from cache. */
+ unsigned int sent; /* The number of bytes already sent for this cache entry. */
+ unsigned int offset; /* start offset of remaining data relative to beginning of the next block */
+ unsigned int rem_data; /* Remaining bytes for the last data block (HTX only, 0 means process next block) */
+ unsigned int send_notmodified:1; /* In case of conditional request, we might want to send a "304 Not Modified" response instead of the stored data. */
+ unsigned int unused:31;
+ struct shared_block *next; /* The next block of data to be sent for this cache entry. */
+};
+
+/* cache config for filters */
+struct cache_flt_conf {
+ union {
+ struct cache *cache; /* cache used by the filter */
+ char *name; /* cache name used during conf parsing */
+ } c;
+ unsigned int flags; /* CACHE_FLT_F_* */
+};
+
+/* CLI context used during "show cache" */
+struct show_cache_ctx {
+ struct cache *cache;
+ struct cache_tree *cache_tree;
+ uint next_key;
+};
+
+
+/*
+ * Vary-related structures and functions
+ */
+enum vary_header_bit {
+ VARY_ACCEPT_ENCODING = (1 << 0),
+ VARY_REFERER = (1 << 1),
+ VARY_ORIGIN = (1 << 2),
+ VARY_LAST /* should always be last */
+};
+
+/*
+ * Encoding list extracted from
+ * https://www.iana.org/assignments/http-parameters/http-parameters.xhtml
+ * and RFC7231#5.3.4.
+ */
+enum vary_encoding {
+ VARY_ENCODING_GZIP = (1 << 0),
+ VARY_ENCODING_DEFLATE = (1 << 1),
+ VARY_ENCODING_BR = (1 << 2),
+ VARY_ENCODING_COMPRESS = (1 << 3),
+ VARY_ENCODING_AES128GCM = (1 << 4),
+ VARY_ENCODING_EXI = (1 << 5),
+ VARY_ENCODING_PACK200_GZIP = (1 << 6),
+ VARY_ENCODING_ZSTD = (1 << 7),
+ VARY_ENCODING_IDENTITY = (1 << 8),
+ VARY_ENCODING_STAR = (1 << 9),
+ VARY_ENCODING_OTHER = (1 << 10)
+};
+
+struct vary_hashing_information {
+ struct ist hdr_name; /* Header name */
+ enum vary_header_bit value; /* Bit representing the header in a vary signature */
+ unsigned int hash_length; /* Size of the sub hash for this header's value */
+ int(*norm_fn)(struct htx*,struct ist hdr_name,char* buf,unsigned int* buf_len); /* Normalization function */
+ int(*cmp_fn)(const void *ref, const void *new, unsigned int len); /* Comparison function, should return 0 if the hashes are alike */
+};
+
+static int http_request_prebuild_full_secondary_key(struct stream *s);
+static int http_request_build_secondary_key(struct stream *s, int vary_signature);
+static int http_request_reduce_secondary_key(unsigned int vary_signature,
+ char prebuilt_key[HTTP_CACHE_SEC_KEY_LEN]);
+
+static int parse_encoding_value(struct ist value, unsigned int *encoding_value,
+ unsigned int *has_null_weight);
+
+static int accept_encoding_normalizer(struct htx *htx, struct ist hdr_name,
+ char *buf, unsigned int *buf_len);
+static int default_normalizer(struct htx *htx, struct ist hdr_name,
+ char *buf, unsigned int *buf_len);
+
+static int accept_encoding_bitmap_cmp(const void *ref, const void *new, unsigned int len);
+
+/* Warning : do not forget to update HTTP_CACHE_SEC_KEY_LEN when new items are
+ * added to this array. */
+const struct vary_hashing_information vary_information[] = {
+ { IST("accept-encoding"), VARY_ACCEPT_ENCODING, sizeof(uint32_t), &accept_encoding_normalizer, &accept_encoding_bitmap_cmp },
+ { IST("referer"), VARY_REFERER, sizeof(uint64_t), &default_normalizer, NULL },
+ { IST("origin"), VARY_ORIGIN, sizeof(uint64_t), &default_normalizer, NULL },
+};
+
+
+static inline void cache_rdlock(struct cache_tree *cache)
+{
+ HA_RWLOCK_RDLOCK(CACHE_LOCK, &cache->lock);
+}
+
+static inline void cache_rdunlock(struct cache_tree *cache)
+{
+ HA_RWLOCK_RDUNLOCK(CACHE_LOCK, &cache->lock);
+}
+
+static inline void cache_wrlock(struct cache_tree *cache)
+{
+ HA_RWLOCK_WRLOCK(CACHE_LOCK, &cache->lock);
+}
+
+static inline void cache_wrunlock(struct cache_tree *cache)
+{
+ HA_RWLOCK_WRUNLOCK(CACHE_LOCK, &cache->lock);
+}
+
+/*
+ * cache ctx for filters
+ */
+struct cache_st {
+ struct shared_block *first_block;
+ struct list detached_head;
+};
+
+#define DEFAULT_MAX_SECONDARY_ENTRY 10
+
+struct cache_entry {
+ unsigned int complete; /* An entry won't be valid until complete is not null. */
+ unsigned int latest_validation; /* latest validation date */
+ unsigned int expire; /* expiration date (wall clock time) */
+ unsigned int age; /* Origin server "Age" header value */
+
+ int refcount;
+
+ struct eb32_node eb; /* ebtree node used to hold the cache object */
+ char hash[20];
+
+ struct list cleanup_list;/* List used between the cache_free_blocks and cache_reserve_finish calls */
+
+ char secondary_key[HTTP_CACHE_SEC_KEY_LEN]; /* Optional secondary key. */
+ unsigned int secondary_key_signature; /* Bitfield of the HTTP headers that should be used
+ * to build secondary keys for this cache entry. */
+ unsigned int secondary_entries_count; /* Should only be filled in the last entry of a list of dup entries */
+ unsigned int last_clear_ts; /* Timestamp of the last call to clear_expired_duplicates. */
+
+ unsigned int etag_length; /* Length of the ETag value (if one was found in the response). */
+ unsigned int etag_offset; /* Offset of the ETag value in the data buffer. */
+
+ time_t last_modified; /* Origin server "Last-Modified" header value converted in
+ * seconds since epoch. If no "Last-Modified"
+ * header is found, use "Date" header value,
+ * otherwise use reception time. This field will
+ * be used in case of an "If-Modified-Since"-based
+ * conditional request. */
+
+ unsigned char data[0];
+};
+
+#define CACHE_BLOCKSIZE 1024
+#define CACHE_ENTRY_MAX_AGE 2147483648U
+
+static struct list caches = LIST_HEAD_INIT(caches);
+static struct list caches_config = LIST_HEAD_INIT(caches_config); /* cache config to init */
+static struct cache *tmp_cache_config = NULL;
+
+DECLARE_STATIC_POOL(pool_head_cache_st, "cache_st", sizeof(struct cache_st));
+
+static struct eb32_node *insert_entry(struct cache *cache, struct cache_tree *tree, struct cache_entry *new_entry);
+static void delete_entry(struct cache_entry *del_entry);
+static void release_entry_locked(struct cache_tree *cache, struct cache_entry *entry);
+static void release_entry_unlocked(struct cache_tree *cache, struct cache_entry *entry);
+
+/*
+ * Find a cache_entry in the <cache>'s tree that has the hash <hash>.
+ * If <delete_expired> is 0 then the entry is left untouched if it is found but
+ * is already expired, and NULL is returned. Otherwise, the expired entry is
+ * removed from the tree and NULL is returned.
+ * Returns a valid (not expired) cache_tree pointer.
+ * The returned entry is not retained, it should be explicitly retained only
+ * when necessary.
+ *
+ * This function must be called under a cache lock, either read if
+ * delete_expired==0, write otherwise.
+ */
+struct cache_entry *get_entry(struct cache_tree *cache_tree, char *hash, int delete_expired)
+{
+ struct eb32_node *node;
+ struct cache_entry *entry;
+
+ node = eb32_lookup(&cache_tree->entries, read_u32(hash));
+ if (!node)
+ return NULL;
+
+ entry = eb32_entry(node, struct cache_entry, eb);
+
+ /* if that's not the right node */
+ if (memcmp(entry->hash, hash, sizeof(entry->hash)))
+ return NULL;
+
+ if (entry->expire > date.tv_sec) {
+ return entry;
+ } else if (delete_expired) {
+ release_entry_locked(cache_tree, entry);
+ }
+ return NULL;
+}
+
+/*
+ * Increment a cache_entry's reference counter.
+ */
+static void retain_entry(struct cache_entry *entry)
+{
+ if (entry)
+ HA_ATOMIC_INC(&entry->refcount);
+}
+
+/*
+ * Decrement a cache_entry's reference counter and remove it from the <cache>'s
+ * tree if the reference counter becomes 0.
+ * If <needs_locking> is 0 then the cache lock was already taken by the caller,
+ * otherwise it must be taken in write mode before actually deleting the entry.
+ */
+static void release_entry(struct cache_tree *cache, struct cache_entry *entry, int needs_locking)
+{
+ if (!entry)
+ return;
+
+ if (HA_ATOMIC_SUB_FETCH(&entry->refcount, 1) <= 0) {
+ if (needs_locking) {
+ cache_wrlock(cache);
+ /* The value might have changed between the last time we
+ * checked it and now, we need to recheck it just in
+ * case.
+ */
+ if (HA_ATOMIC_LOAD(&entry->refcount) > 0) {
+ cache_wrunlock(cache);
+ return;
+ }
+ }
+ delete_entry(entry);
+ if (needs_locking) {
+ cache_wrunlock(cache);
+ }
+ }
+}
+
+/*
+ * Decrement a cache_entry's reference counter and remove it from the <cache>'s
+ * tree if the reference counter becomes 0.
+ * This function must be called under the cache lock in write mode.
+ */
+static inline void release_entry_locked(struct cache_tree *cache, struct cache_entry *entry)
+{
+ release_entry(cache, entry, 0);
+}
+
+/*
+ * Decrement a cache_entry's reference counter and remove it from the <cache>'s
+ * tree if the reference counter becomes 0.
+ * This function must not be called under the cache lock or the shctx lock. The
+ * cache lock might be taken in write mode (if the entry gets deleted).
+ */
+static inline void release_entry_unlocked(struct cache_tree *cache, struct cache_entry *entry)
+{
+ release_entry(cache, entry, 1);
+}
+
+
+/*
+ * Compare a newly built secondary key to the one found in a cache_entry.
+ * Every sub-part of the key is compared to the reference through the dedicated
+ * comparison function of the sub-part (that might do more than a simple
+ * memcmp).
+ * Returns 0 if the keys are alike.
+ */
+static int secondary_key_cmp(const char *ref_key, const char *new_key)
+{
+ int retval = 0;
+ size_t idx = 0;
+ unsigned int offset = 0;
+ const struct vary_hashing_information *info;
+
+ for (idx = 0; idx < sizeof(vary_information)/sizeof(*vary_information) && !retval; ++idx) {
+ info = &vary_information[idx];
+
+ if (info->cmp_fn)
+ retval = info->cmp_fn(&ref_key[offset], &new_key[offset], info->hash_length);
+ else
+ retval = memcmp(&ref_key[offset], &new_key[offset], info->hash_length);
+
+ offset += info->hash_length;
+ }
+
+ return retval;
+}
+
+/*
+ * There can be multiple entries with the same primary key in the ebtree so in
+ * order to get the proper one out of the list, we use a secondary_key.
+ * This function simply iterates over all the entries with the same primary_key
+ * until it finds the right one.
+ * If <delete_expired> is 0 then the entry is left untouched if it is found but
+ * is already expired, and NULL is returned. Otherwise, the expired entry is
+ * removed from the tree and NULL is returned.
+ * Returns the cache_entry in case of success, NULL otherwise.
+ *
+ * This function must be called under a cache lock, either read if
+ * delete_expired==0, write otherwise.
+ */
+struct cache_entry *get_secondary_entry(struct cache_tree *cache, struct cache_entry *entry,
+ const char *secondary_key, int delete_expired)
+{
+ struct eb32_node *node = &entry->eb;
+
+ if (!entry->secondary_key_signature)
+ return NULL;
+
+ while (entry && secondary_key_cmp(entry->secondary_key, secondary_key) != 0) {
+ node = eb32_next_dup(node);
+
+ /* Make the best use of this iteration and clear expired entries
+ * when we find them. Calling delete_entry would be too costly
+ * so we simply call eb32_delete. The secondary_entry count will
+ * be updated when we try to insert a new entry to this list. */
+ if (entry->expire <= date.tv_sec && delete_expired) {
+ release_entry_locked(cache, entry);
+ }
+
+ entry = node ? eb32_entry(node, struct cache_entry, eb) : NULL;
+ }
+
+ /* Expired entry */
+ if (entry && entry->expire <= date.tv_sec) {
+ if (delete_expired) {
+ release_entry_locked(cache, entry);
+ }
+ entry = NULL;
+ }
+
+ return entry;
+}
+
+static inline struct cache_tree *get_cache_tree_from_hash(struct cache *cache, unsigned int hash)
+{
+ if (!cache)
+ return NULL;
+
+ return &cache->trees[hash % CACHE_TREE_NUM];
+}
+
+
+/*
+ * Remove all expired entries from a list of duplicates.
+ * Return the number of alive entries in the list and sets dup_tail to the
+ * current last item of the list.
+ *
+ * This function must be called under a cache write lock.
+ */
+static unsigned int clear_expired_duplicates(struct cache_tree *cache, struct eb32_node **dup_tail)
+{
+ unsigned int entry_count = 0;
+ struct cache_entry *entry = NULL;
+ struct eb32_node *prev = *dup_tail;
+ struct eb32_node *tail = NULL;
+
+ while (prev) {
+ entry = container_of(prev, struct cache_entry, eb);
+ prev = eb32_prev_dup(prev);
+ if (entry->expire <= date.tv_sec) {
+ release_entry_locked(cache, entry);
+ }
+ else {
+ if (!tail)
+ tail = &entry->eb;
+ ++entry_count;
+ }
+ }
+
+ *dup_tail = tail;
+
+ return entry_count;
+}
+
+
+/*
+ * This function inserts a cache_entry in the cache's ebtree. In case of
+ * duplicate entries (vary), it then checks that the number of entries did not
+ * reach the max number of secondary entries. If this entry should not have been
+ * created, remove it.
+ * In the regular case (unique entries), this function does not do more than a
+ * simple insert. In case of secondary entries, it will at most cost an
+ * insertion+max_sec_entries time checks and entry deletion.
+ * Returns the newly inserted node in case of success, NULL otherwise.
+ *
+ * This function must be called under a cache write lock.
+ */
+static struct eb32_node *insert_entry(struct cache *cache, struct cache_tree *tree, struct cache_entry *new_entry)
+{
+ struct eb32_node *prev = NULL;
+ struct cache_entry *entry = NULL;
+ unsigned int entry_count = 0;
+ unsigned int last_clear_ts = date.tv_sec;
+
+ struct eb32_node *node = eb32_insert(&tree->entries, &new_entry->eb);
+
+ new_entry->refcount = 1;
+
+ /* We should not have multiple entries with the same primary key unless
+ * the entry has a non null vary signature. */
+ if (!new_entry->secondary_key_signature)
+ return node;
+
+ prev = eb32_prev_dup(node);
+ if (prev != NULL) {
+ /* The last entry of a duplicate list should contain the current
+ * number of entries in the list. */
+ entry = container_of(prev, struct cache_entry, eb);
+ entry_count = entry->secondary_entries_count;
+ last_clear_ts = entry->last_clear_ts;
+
+ if (entry_count >= cache->max_secondary_entries) {
+ /* Some entries of the duplicate list might be expired so
+ * we will iterate over all the items in order to free some
+ * space. In order to avoid going over the same list too
+ * often, we first check the timestamp of the last check
+ * performed. */
+ if (last_clear_ts == date.tv_sec) {
+ /* Too many entries for this primary key, clear the
+ * one that was inserted. */
+ release_entry_locked(tree, entry);
+ return NULL;
+ }
+
+ entry_count = clear_expired_duplicates(tree, &prev);
+ if (entry_count >= cache->max_secondary_entries) {
+ /* Still too many entries for this primary key, delete
+ * the newly inserted one. */
+ entry = container_of(prev, struct cache_entry, eb);
+ entry->last_clear_ts = date.tv_sec;
+ release_entry_locked(tree, entry);
+ return NULL;
+ }
+ }
+ }
+
+ new_entry->secondary_entries_count = entry_count + 1;
+ new_entry->last_clear_ts = last_clear_ts;
+
+ return node;
+}
+
+
+/*
+ * This function removes an entry from the ebtree. If the entry was a duplicate
+ * (in case of Vary), it updates the secondary entry counter in another
+ * duplicate entry (the last entry of the dup list).
+ *
+ * This function must be called under a cache write lock.
+ */
+static void delete_entry(struct cache_entry *del_entry)
+{
+ struct eb32_node *prev = NULL, *next = NULL;
+ struct cache_entry *entry = NULL;
+ struct eb32_node *last = NULL;
+
+ /* The entry might have been removed from the cache before. In such a
+ * case calling eb32_next_dup would crash. */
+ if (del_entry->secondary_key_signature && del_entry->eb.key != 0) {
+ next = &del_entry->eb;
+
+ /* Look for last entry of the duplicates list. */
+ while ((next = eb32_next_dup(next))) {
+ last = next;
+ }
+
+ if (last) {
+ entry = container_of(last, struct cache_entry, eb);
+ --entry->secondary_entries_count;
+ }
+ else {
+ /* The current entry is the last one, look for the
+ * previous one to update its counter. */
+ prev = eb32_prev_dup(&del_entry->eb);
+ if (prev) {
+ entry = container_of(prev, struct cache_entry, eb);
+ entry->secondary_entries_count = del_entry->secondary_entries_count - 1;
+ }
+ }
+ }
+ eb32_delete(&del_entry->eb);
+ del_entry->eb.key = 0;
+}
+
+
+static inline struct shared_context *shctx_ptr(struct cache *cache)
+{
+ return (struct shared_context *)((unsigned char *)cache - offsetof(struct shared_context, data));
+}
+
+static inline struct shared_block *block_ptr(struct cache_entry *entry)
+{
+ return (struct shared_block *)((unsigned char *)entry - offsetof(struct shared_block, data));
+}
+
+
+
+static int
+cache_store_init(struct proxy *px, struct flt_conf *fconf)
+{
+ fconf->flags |= FLT_CFG_FL_HTX;
+ return 0;
+}
+
+static void
+cache_store_deinit(struct proxy *px, struct flt_conf *fconf)
+{
+ struct cache_flt_conf *cconf = fconf->conf;
+
+ if (!(cconf->flags & CACHE_FLT_INIT))
+ free(cconf->c.name);
+ free(cconf);
+}
+
+static int
+cache_store_check(struct proxy *px, struct flt_conf *fconf)
+{
+ struct cache_flt_conf *cconf = fconf->conf;
+ struct flt_conf *f;
+ struct cache *cache;
+ int comp = 0;
+
+ /* Find the cache corresponding to the name in the filter config. The
+ * cache will not be referenced now in the filter config because it is
+ * not fully allocated. This step will be performed during the cache
+ * post_check.
+ */
+ list_for_each_entry(cache, &caches_config, list) {
+ if (strcmp(cache->id, cconf->c.name) == 0)
+ goto found;
+ }
+
+ ha_alert("config: %s '%s': unable to find the cache '%s' referenced by the filter 'cache'.\n",
+ proxy_type_str(px), px->id, (char *)cconf->c.name);
+ return 1;
+
+ found:
+ /* Here <cache> points on the cache the filter must use and <cconf>
+ * points on the cache filter configuration. */
+
+ /* Check all filters for proxy <px> to know if the compression is
+ * enabled and if it is after the cache. When the compression is before
+ * the cache, an error is returned. Also check if the cache filter must
+ * be explicitly declaired or not. */
+ list_for_each_entry(f, &px->filter_configs, list) {
+ if (f == fconf) {
+ /* The compression filter must be evaluated after the cache. */
+ if (comp) {
+ ha_alert("config: %s '%s': unable to enable the compression filter before "
+ "the cache '%s'.\n", proxy_type_str(px), px->id, cache->id);
+ return 1;
+ }
+ }
+ else if (f->id == http_comp_flt_id)
+ comp = 1;
+ else if (f->id == fcgi_flt_id)
+ continue;
+ else if ((f->id != fconf->id) && (cconf->flags & CACHE_FLT_F_IMPLICIT_DECL)) {
+ /* Implicit declaration is only allowed with the
+ * compression and fcgi. For other filters, an implicit
+ * declaration is required. */
+ ha_alert("config: %s '%s': require an explicit filter declaration "
+ "to use the cache '%s'.\n", proxy_type_str(px), px->id, cache->id);
+ return 1;
+ }
+
+ }
+ return 0;
+}
+
+static int
+cache_store_strm_init(struct stream *s, struct filter *filter)
+{
+ struct cache_st *st;
+
+ st = pool_alloc(pool_head_cache_st);
+ if (st == NULL)
+ return -1;
+
+ st->first_block = NULL;
+ filter->ctx = st;
+
+ /* Register post-analyzer on AN_RES_WAIT_HTTP */
+ filter->post_analyzers |= AN_RES_WAIT_HTTP;
+ return 1;
+}
+
+static void
+cache_store_strm_deinit(struct stream *s, struct filter *filter)
+{
+ struct cache_st *st = filter->ctx;
+ struct cache_flt_conf *cconf = FLT_CONF(filter);
+ struct cache *cache = cconf->c.cache;
+ struct shared_context *shctx = shctx_ptr(cache);
+
+ /* Everything should be released in the http_end filter, but we need to do it
+ * there too, in case of errors */
+ if (st && st->first_block) {
+ struct cache_entry *object = (struct cache_entry *)st->first_block->data;
+ if (!object->complete) {
+ /* The stream was closed but the 'complete' flag was not
+ * set which means that cache_store_http_end was not
+ * called. The stream must have been closed before we
+ * could store the full answer in the cache.
+ */
+ release_entry_unlocked(&cache->trees[object->eb.key % CACHE_TREE_NUM], object);
+ }
+ shctx_wrlock(shctx);
+ shctx_row_reattach(shctx, st->first_block);
+ shctx_wrunlock(shctx);
+ }
+ if (st) {
+ pool_free(pool_head_cache_st, st);
+ filter->ctx = NULL;
+ }
+}
+
+static int
+cache_store_post_analyze(struct stream *s, struct filter *filter, struct channel *chn,
+ unsigned an_bit)
+{
+ struct http_txn *txn = s->txn;
+ struct http_msg *msg = &txn->rsp;
+ struct cache_st *st = filter->ctx;
+
+ if (an_bit != AN_RES_WAIT_HTTP)
+ goto end;
+
+ /* Here we need to check if any compression filter precedes the cache
+ * filter. This is only possible when the compression is configured in
+ * the frontend while the cache filter is configured on the
+ * backend. This case cannot be detected during HAProxy startup. So in
+ * such cases, the cache is disabled.
+ */
+ if (st && (msg->flags & HTTP_MSGF_COMPRESSING)) {
+ pool_free(pool_head_cache_st, st);
+ filter->ctx = NULL;
+ }
+
+ end:
+ return 1;
+}
+
+static int
+cache_store_http_headers(struct stream *s, struct filter *filter, struct http_msg *msg)
+{
+ struct cache_st *st = filter->ctx;
+
+ if (!(msg->chn->flags & CF_ISRESP) || !st)
+ return 1;
+
+ if (st->first_block)
+ register_data_filter(s, msg->chn, filter);
+ return 1;
+}
+
+static inline void disable_cache_entry(struct cache_st *st,
+ struct filter *filter, struct shared_context *shctx)
+{
+ struct cache_entry *object;
+ struct cache *cache = (struct cache*)shctx->data;
+
+ object = (struct cache_entry *)st->first_block->data;
+ filter->ctx = NULL; /* disable cache */
+ release_entry_unlocked(&cache->trees[object->eb.key % CACHE_TREE_NUM], object);
+ shctx_wrlock(shctx);
+ shctx_row_reattach(shctx, st->first_block);
+ shctx_wrunlock(shctx);
+ pool_free(pool_head_cache_st, st);
+}
+
+static int
+cache_store_http_payload(struct stream *s, struct filter *filter, struct http_msg *msg,
+ unsigned int offset, unsigned int len)
+{
+ struct cache_flt_conf *cconf = FLT_CONF(filter);
+ struct shared_context *shctx = shctx_ptr(cconf->c.cache);
+ struct cache_st *st = filter->ctx;
+ struct htx *htx = htxbuf(&msg->chn->buf);
+ struct htx_blk *blk;
+ struct shared_block *fb;
+ struct htx_ret htxret;
+ unsigned int orig_len, to_forward;
+ int ret;
+
+ if (!len)
+ return len;
+
+ if (!st->first_block) {
+ unregister_data_filter(s, msg->chn, filter);
+ return len;
+ }
+
+ chunk_reset(&trash);
+ orig_len = len;
+ to_forward = 0;
+
+ htxret = htx_find_offset(htx, offset);
+ blk = htxret.blk;
+ offset = htxret.ret;
+ for (; blk && len; blk = htx_get_next_blk(htx, blk)) {
+ enum htx_blk_type type = htx_get_blk_type(blk);
+ uint32_t info, sz = htx_get_blksz(blk);
+ struct ist v;
+
+ switch (type) {
+ case HTX_BLK_UNUSED:
+ break;
+
+ case HTX_BLK_DATA:
+ v = htx_get_blk_value(htx, blk);
+ v = istadv(v, offset);
+ v = isttrim(v, len);
+
+ info = (type << 28) + v.len;
+ chunk_memcat(&trash, (char *)&info, sizeof(info));
+ chunk_istcat(&trash, v);
+ to_forward += v.len;
+ len -= v.len;
+ break;
+
+ default:
+ /* Here offset must always be 0 because only
+ * DATA blocks can be partially transferred. */
+ if (offset)
+ goto no_cache;
+ if (sz > len)
+ goto end;
+
+ chunk_memcat(&trash, (char *)&blk->info, sizeof(blk->info));
+ chunk_memcat(&trash, htx_get_blk_ptr(htx, blk), sz);
+ to_forward += sz;
+ len -= sz;
+ break;
+ }
+
+ offset = 0;
+ }
+
+ end:
+
+ fb = shctx_row_reserve_hot(shctx, st->first_block, trash.data);
+ if (!fb) {
+ goto no_cache;
+ }
+
+ ret = shctx_row_data_append(shctx, st->first_block,
+ (unsigned char *)b_head(&trash), b_data(&trash));
+ if (ret < 0)
+ goto no_cache;
+
+ return to_forward;
+
+ no_cache:
+ disable_cache_entry(st, filter, shctx);
+ unregister_data_filter(s, msg->chn, filter);
+ return orig_len;
+}
+
+static int
+cache_store_http_end(struct stream *s, struct filter *filter,
+ struct http_msg *msg)
+{
+ struct cache_st *st = filter->ctx;
+ struct cache_flt_conf *cconf = FLT_CONF(filter);
+ struct cache *cache = cconf->c.cache;
+ struct shared_context *shctx = shctx_ptr(cache);
+ struct cache_entry *object;
+
+ if (!(msg->chn->flags & CF_ISRESP))
+ return 1;
+
+ if (st && st->first_block) {
+
+ object = (struct cache_entry *)st->first_block->data;
+
+ shctx_wrlock(shctx);
+ /* The whole payload was cached, the entry can now be used. */
+ object->complete = 1;
+ /* remove from the hotlist */
+ shctx_row_reattach(shctx, st->first_block);
+ shctx_wrunlock(shctx);
+
+ }
+ if (st) {
+ pool_free(pool_head_cache_st, st);
+ filter->ctx = NULL;
+ }
+
+ return 1;
+}
+
+ /*
+ * This intends to be used when checking HTTP headers for some
+ * word=value directive. Return a pointer to the first character of value, if
+ * the word was not found or if there wasn't any value assigned to it return NULL
+ */
+char *directive_value(const char *sample, int slen, const char *word, int wlen)
+{
+ int st = 0;
+
+ if (slen < wlen)
+ return 0;
+
+ while (wlen) {
+ char c = *sample ^ *word;
+ if (c && c != ('A' ^ 'a'))
+ return NULL;
+ sample++;
+ word++;
+ slen--;
+ wlen--;
+ }
+
+ while (slen) {
+ if (st == 0) {
+ if (*sample != '=')
+ return NULL;
+ sample++;
+ slen--;
+ st = 1;
+ continue;
+ } else {
+ return (char *)sample;
+ }
+ }
+
+ return NULL;
+}
+
+/*
+ * Return the maxage in seconds of an HTTP response.
+ * The returned value will always take the cache's configuration into account
+ * (cache->maxage) but the actual max age of the response will be set in the
+ * true_maxage parameter. It will be used to determine if a response is already
+ * stale or not.
+ * Compute the maxage using either:
+ * - the assigned max-age of the cache
+ * - the s-maxage directive
+ * - the max-age directive
+ * - (Expires - Data) headers
+ * - the default-max-age of the cache
+ *
+ */
+int http_calc_maxage(struct stream *s, struct cache *cache, int *true_maxage)
+{
+ struct htx *htx = htxbuf(&s->res.buf);
+ struct http_hdr_ctx ctx = { .blk = NULL };
+ long smaxage = -1;
+ long maxage = -1;
+ int expires = -1;
+ struct tm tm = {};
+ time_t expires_val = 0;
+ char *endptr = NULL;
+ int offset = 0;
+
+ /* The Cache-Control max-age and s-maxage directives should be followed by
+ * a positive numerical value (see RFC 7234#5.2.1.1). According to the
+ * specs, a sender "should not" generate a quoted-string value but we will
+ * still accept this format since it isn't strictly forbidden. */
+ while (http_find_header(htx, ist("cache-control"), &ctx, 0)) {
+ char *value;
+
+ value = directive_value(ctx.value.ptr, ctx.value.len, "s-maxage", 8);
+ if (value) {
+ struct buffer *chk = get_trash_chunk();
+
+ chunk_memcat(chk, value, ctx.value.len - 8 + 1);
+ chunk_memcat(chk, "", 1);
+ offset = (*chk->area == '"') ? 1 : 0;
+ smaxage = strtol(chk->area + offset, &endptr, 10);
+ if (unlikely(smaxage < 0 || endptr == chk->area + offset))
+ return -1;
+ }
+
+ value = directive_value(ctx.value.ptr, ctx.value.len, "max-age", 7);
+ if (value) {
+ struct buffer *chk = get_trash_chunk();
+
+ chunk_memcat(chk, value, ctx.value.len - 7 + 1);
+ chunk_memcat(chk, "", 1);
+ offset = (*chk->area == '"') ? 1 : 0;
+ maxage = strtol(chk->area + offset, &endptr, 10);
+ if (unlikely(maxage < 0 || endptr == chk->area + offset))
+ return -1;
+ }
+ }
+
+ /* Look for Expires header if no s-maxage or max-age Cache-Control data
+ * was found. */
+ if (maxage == -1 && smaxage == -1) {
+ ctx.blk = NULL;
+ if (http_find_header(htx, ist("expires"), &ctx, 1)) {
+ if (parse_http_date(istptr(ctx.value), istlen(ctx.value), &tm)) {
+ expires_val = my_timegm(&tm);
+ /* A request having an expiring date earlier
+ * than the current date should be considered as
+ * stale. */
+ expires = (expires_val >= date.tv_sec) ?
+ (expires_val - date.tv_sec) : 0;
+ }
+ else {
+ /* Following RFC 7234#5.3, an invalid date
+ * format must be treated as a date in the past
+ * so the cache entry must be seen as already
+ * expired. */
+ expires = 0;
+ }
+ }
+ }
+
+
+ if (smaxage > 0) {
+ if (true_maxage)
+ *true_maxage = smaxage;
+ return MIN(smaxage, cache->maxage);
+ }
+
+ if (maxage > 0) {
+ if (true_maxage)
+ *true_maxage = maxage;
+ return MIN(maxage, cache->maxage);
+ }
+
+ if (expires >= 0) {
+ if (true_maxage)
+ *true_maxage = expires;
+ return MIN(expires, cache->maxage);
+ }
+
+ return cache->maxage;
+
+}
+
+
+static void cache_free_blocks(struct shared_block *first, void *data)
+{
+ struct cache_entry *object = (struct cache_entry *)first->data;
+ struct cache *cache = (struct cache *)data;
+ struct cache_tree *cache_tree;
+
+ if (object->eb.key) {
+ object->complete = 0;
+ cache_tree = &cache->trees[object->eb.key % CACHE_TREE_NUM];
+ retain_entry(object);
+ HA_SPIN_LOCK(CACHE_LOCK, &cache_tree->cleanup_lock);
+ LIST_INSERT(&cache_tree->cleanup_list, &object->cleanup_list);
+ HA_SPIN_UNLOCK(CACHE_LOCK, &cache_tree->cleanup_lock);
+ }
+}
+
+static void cache_reserve_finish(struct shared_context *shctx)
+{
+ struct cache_entry *object, *back;
+ struct cache *cache = (struct cache *)shctx->data;
+ struct cache_tree *cache_tree;
+ int cache_tree_idx = 0;
+
+ for (; cache_tree_idx < CACHE_TREE_NUM; ++cache_tree_idx) {
+ cache_tree = &cache->trees[cache_tree_idx];
+
+ cache_wrlock(cache_tree);
+ HA_SPIN_LOCK(CACHE_LOCK, &cache_tree->cleanup_lock);
+
+ list_for_each_entry_safe(object, back, &cache_tree->cleanup_list, cleanup_list) {
+ LIST_DELETE(&object->cleanup_list);
+ /*
+ * At this point we locked the cache tree in write mode
+ * so no new thread could retain the current entry
+ * because the only two places where it can happen is in
+ * the cache_use case which is under cache_rdlock and
+ * the reserve_hot case which would require the
+ * corresponding block to still be in the avail list,
+ * which is impossible (we reserved it for a thread and
+ * took it out of the avail list already). The only two
+ * references are then the default one (upon cache_entry
+ * creation) and the one in this cleanup list.
+ */
+ BUG_ON(object->refcount > 2);
+ delete_entry(object);
+ }
+
+ HA_SPIN_UNLOCK(CACHE_LOCK, &cache_tree->cleanup_lock);
+ cache_wrunlock(cache_tree);
+ }
+}
+
+
+/* As per RFC 7234#4.3.2, in case of "If-Modified-Since" conditional request, the
+ * date value should be compared to a date determined by in a previous response (for
+ * the same entity). This date could either be the "Last-Modified" value, or the "Date"
+ * value of the response's reception time (by decreasing order of priority). */
+static time_t get_last_modified_time(struct htx *htx)
+{
+ time_t last_modified = 0;
+ struct http_hdr_ctx ctx = { .blk = NULL };
+ struct tm tm = {};
+
+ if (http_find_header(htx, ist("last-modified"), &ctx, 1)) {
+ if (parse_http_date(istptr(ctx.value), istlen(ctx.value), &tm)) {
+ last_modified = my_timegm(&tm);
+ }
+ }
+
+ if (!last_modified) {
+ ctx.blk = NULL;
+ if (http_find_header(htx, ist("date"), &ctx, 1)) {
+ if (parse_http_date(istptr(ctx.value), istlen(ctx.value), &tm)) {
+ last_modified = my_timegm(&tm);
+ }
+ }
+ }
+
+ /* Fallback on the current time if no "Last-Modified" or "Date" header
+ * was found. */
+ if (!last_modified)
+ last_modified = date.tv_sec;
+
+ return last_modified;
+}
+
+/*
+ * Checks the vary header's value. The headers on which vary should be applied
+ * must be explicitly supported in the vary_information array (see cache.c). If
+ * any other header is mentioned, we won't store the response.
+ * Returns 1 if Vary-based storage can work, 0 otherwise.
+ */
+static int http_check_vary_header(struct htx *htx, unsigned int *vary_signature)
+{
+ unsigned int vary_idx;
+ unsigned int vary_info_count;
+ const struct vary_hashing_information *vary_info;
+ struct http_hdr_ctx ctx = { .blk = NULL };
+
+ int retval = 1;
+
+ *vary_signature = 0;
+
+ vary_info_count = sizeof(vary_information)/sizeof(*vary_information);
+ while (retval && http_find_header(htx, ist("Vary"), &ctx, 0)) {
+ for (vary_idx = 0; vary_idx < vary_info_count; ++vary_idx) {
+ vary_info = &vary_information[vary_idx];
+ if (isteqi(ctx.value, vary_info->hdr_name)) {
+ *vary_signature |= vary_info->value;
+ break;
+ }
+ }
+ retval = (vary_idx < vary_info_count);
+ }
+
+ return retval;
+}
+
+
+/*
+ * Look for the accept-encoding part of the secondary_key and replace the
+ * encoding bitmap part of the hash with the actual encoding of the response,
+ * extracted from the content-encoding header value.
+ * Responses that have an unknown encoding will not be cached if they also
+ * "vary" on the accept-encoding value.
+ * Returns 0 if we found a known encoding in the response, -1 otherwise.
+ */
+static int set_secondary_key_encoding(struct htx *htx, char *secondary_key)
+{
+ unsigned int resp_encoding_bitmap = 0;
+ const struct vary_hashing_information *info = vary_information;
+ unsigned int offset = 0;
+ unsigned int count = 0;
+ unsigned int hash_info_count = sizeof(vary_information)/sizeof(*vary_information);
+ unsigned int encoding_value;
+ struct http_hdr_ctx ctx = { .blk = NULL };
+
+ /* Look for the accept-encoding part of the secondary_key. */
+ while (count < hash_info_count && info->value != VARY_ACCEPT_ENCODING) {
+ offset += info->hash_length;
+ ++info;
+ ++count;
+ }
+
+ if (count == hash_info_count)
+ return -1;
+
+ while (http_find_header(htx, ist("content-encoding"), &ctx, 0)) {
+ if (parse_encoding_value(ctx.value, &encoding_value, NULL))
+ return -1; /* Do not store responses with an unknown encoding */
+ resp_encoding_bitmap |= encoding_value;
+ }
+
+ if (!resp_encoding_bitmap)
+ resp_encoding_bitmap |= VARY_ENCODING_IDENTITY;
+
+ /* Rewrite the bitmap part of the hash with the new bitmap that only
+ * corresponds the the response's encoding. */
+ write_u32(secondary_key + offset, resp_encoding_bitmap);
+
+ return 0;
+}
+
+
+/*
+ * This function will store the headers of the response in a buffer and then
+ * register a filter to store the data
+ */
+enum act_return http_action_store_cache(struct act_rule *rule, struct proxy *px,
+ struct session *sess, struct stream *s, int flags)
+{
+ int effective_maxage = 0;
+ int true_maxage = 0;
+ struct http_txn *txn = s->txn;
+ struct http_msg *msg = &txn->rsp;
+ struct filter *filter;
+ struct shared_block *first = NULL;
+ struct cache_flt_conf *cconf = rule->arg.act.p[0];
+ struct cache *cache = cconf->c.cache;
+ struct shared_context *shctx = shctx_ptr(cache);
+ struct cache_st *cache_ctx = NULL;
+ struct cache_entry *object, *old;
+ unsigned int key = read_u32(txn->cache_hash);
+ struct htx *htx;
+ struct http_hdr_ctx ctx;
+ size_t hdrs_len = 0;
+ int32_t pos;
+ unsigned int vary_signature = 0;
+ struct cache_tree *cache_tree = NULL;
+
+ /* Don't cache if the response came from a cache */
+ if ((obj_type(s->target) == OBJ_TYPE_APPLET) &&
+ s->target == &http_cache_applet.obj_type) {
+ goto out;
+ }
+
+ /* cache only HTTP/1.1 */
+ if (!(txn->req.flags & HTTP_MSGF_VER_11))
+ goto out;
+
+ cache_tree = get_cache_tree_from_hash(cache, read_u32(txn->cache_hash));
+
+ /* cache only GET method */
+ if (txn->meth != HTTP_METH_GET) {
+ /* In case of successful unsafe method on a stored resource, the
+ * cached entry must be invalidated (see RFC7234#4.4).
+ * A "non-error response" is one with a 2xx (Successful) or 3xx
+ * (Redirection) status code. */
+ if (txn->status >= 200 && txn->status < 400) {
+ switch (txn->meth) {
+ case HTTP_METH_OPTIONS:
+ case HTTP_METH_GET:
+ case HTTP_METH_HEAD:
+ case HTTP_METH_TRACE:
+ break;
+
+ default: /* Any unsafe method */
+ /* Discard any corresponding entry in case of successful
+ * unsafe request (such as PUT, POST or DELETE). */
+ cache_wrlock(cache_tree);
+
+ old = get_entry(cache_tree, txn->cache_hash, 1);
+ if (old)
+ release_entry_locked(cache_tree, old);
+ cache_wrunlock(cache_tree);
+ }
+ }
+ goto out;
+ }
+
+ /* cache key was not computed */
+ if (!key)
+ goto out;
+
+ /* cache only 200 status code */
+ if (txn->status != 200)
+ goto out;
+
+ /* Find the corresponding filter instance for the current stream */
+ list_for_each_entry(filter, &s->strm_flt.filters, list) {
+ if (FLT_ID(filter) == cache_store_flt_id && FLT_CONF(filter) == cconf) {
+ /* No filter ctx, don't cache anything */
+ if (!filter->ctx)
+ goto out;
+ cache_ctx = filter->ctx;
+ break;
+ }
+ }
+
+ /* from there, cache_ctx is always defined */
+ htx = htxbuf(&s->res.buf);
+
+ /* Do not cache too big objects. */
+ if ((msg->flags & HTTP_MSGF_CNT_LEN) && shctx->max_obj_size > 0 &&
+ htx->data + htx->extra > shctx->max_obj_size)
+ goto out;
+
+ /* Only a subset of headers are supported in our Vary implementation. If
+ * any other header is present in the Vary header value, we won't be
+ * able to use the cache. Likewise, if Vary header support is disabled,
+ * avoid caching responses that contain such a header. */
+ ctx.blk = NULL;
+ if (cache->vary_processing_enabled) {
+ if (!http_check_vary_header(htx, &vary_signature))
+ goto out;
+ if (vary_signature) {
+ /* If something went wrong during the secondary key
+ * building, do not store the response. */
+ if (!(txn->flags & TX_CACHE_HAS_SEC_KEY))
+ goto out;
+ http_request_reduce_secondary_key(vary_signature, txn->cache_secondary_hash);
+ }
+ }
+ else if (http_find_header(htx, ist("Vary"), &ctx, 0)) {
+ goto out;
+ }
+
+ http_check_response_for_cacheability(s, &s->res);
+
+ if (!(txn->flags & TX_CACHEABLE) || !(txn->flags & TX_CACHE_COOK))
+ goto out;
+
+ cache_wrlock(cache_tree);
+ old = get_entry(cache_tree, txn->cache_hash, 1);
+ if (old) {
+ if (vary_signature)
+ old = get_secondary_entry(cache_tree, old,
+ txn->cache_secondary_hash, 1);
+ if (old) {
+ if (!old->complete) {
+ /* An entry with the same primary key is already being
+ * created, we should not try to store the current
+ * response because it will waste space in the cache. */
+ cache_wrunlock(cache_tree);
+ goto out;
+ }
+ release_entry_locked(cache_tree, old);
+ }
+ }
+ cache_wrunlock(cache_tree);
+
+ first = shctx_row_reserve_hot(shctx, NULL, sizeof(struct cache_entry));
+ if (!first) {
+ goto out;
+ }
+
+ /* the received memory is not initialized, we need at least to mark
+ * the object as not indexed yet.
+ */
+ object = (struct cache_entry *)first->data;
+ memset(object, 0, sizeof(*object));
+ object->eb.key = key;
+ object->secondary_key_signature = vary_signature;
+ /* We need to temporarily set a valid expiring time until the actual one
+ * is set by the end of this function (in case of concurrent accesses to
+ * the same resource). This way the second access will find an existing
+ * but not yet usable entry in the tree and will avoid storing its data. */
+ object->expire = date.tv_sec + 2;
+
+ memcpy(object->hash, txn->cache_hash, sizeof(object->hash));
+ if (vary_signature)
+ memcpy(object->secondary_key, txn->cache_secondary_hash, HTTP_CACHE_SEC_KEY_LEN);
+
+ cache_wrlock(cache_tree);
+ /* Insert the entry in the tree even if the payload is not cached yet. */
+ if (insert_entry(cache, cache_tree, object) != &object->eb) {
+ object->eb.key = 0;
+ cache_wrunlock(cache_tree);
+ goto out;
+ }
+ cache_wrunlock(cache_tree);
+
+ /* reserve space for the cache_entry structure */
+ first->len = sizeof(struct cache_entry);
+ first->last_append = NULL;
+
+ /* Determine the entry's maximum age (taking into account the cache's
+ * configuration) as well as the response's explicit max age (extracted
+ * from cache-control directives or the expires header). */
+ effective_maxage = http_calc_maxage(s, cache, &true_maxage);
+
+ ctx.blk = NULL;
+ if (http_find_header(htx, ist("Age"), &ctx, 0)) {
+ long long hdr_age;
+ if (!strl2llrc(ctx.value.ptr, ctx.value.len, &hdr_age) && hdr_age > 0) {
+ if (unlikely(hdr_age > CACHE_ENTRY_MAX_AGE))
+ hdr_age = CACHE_ENTRY_MAX_AGE;
+ /* A response with an Age value greater than its
+ * announced max age is stale and should not be stored. */
+ object->age = hdr_age;
+ if (unlikely(object->age > true_maxage))
+ goto out;
+ }
+ else
+ goto out;
+ http_remove_header(htx, &ctx);
+ }
+
+ /* Build a last-modified time that will be stored in the cache_entry and
+ * compared to a future If-Modified-Since client header. */
+ object->last_modified = get_last_modified_time(htx);
+
+ chunk_reset(&trash);
+ for (pos = htx_get_first(htx); pos != -1; pos = htx_get_next(htx, pos)) {
+ struct htx_blk *blk = htx_get_blk(htx, pos);
+ enum htx_blk_type type = htx_get_blk_type(blk);
+ uint32_t sz = htx_get_blksz(blk);
+
+ hdrs_len += sizeof(*blk) + sz;
+ chunk_memcat(&trash, (char *)&blk->info, sizeof(blk->info));
+ chunk_memcat(&trash, htx_get_blk_ptr(htx, blk), sz);
+
+ /* Look for optional ETag header.
+ * We need to store the offset of the ETag value in order for
+ * future conditional requests to be able to perform ETag
+ * comparisons. */
+ if (type == HTX_BLK_HDR) {
+ struct ist header_name = htx_get_blk_name(htx, blk);
+ if (isteq(header_name, ist("etag"))) {
+ object->etag_length = sz - istlen(header_name);
+ object->etag_offset = sizeof(struct cache_entry) + b_data(&trash) - sz + istlen(header_name);
+ }
+ }
+ if (type == HTX_BLK_EOH)
+ break;
+ }
+
+ /* Do not cache objects if the headers are too big. */
+ if (hdrs_len > htx->size - global.tune.maxrewrite)
+ goto out;
+
+ /* If the response has a secondary_key, fill its key part related to
+ * encodings with the actual encoding of the response. This way any
+ * subsequent request having the same primary key will have its accepted
+ * encodings tested upon the cached response's one.
+ * We will not cache a response that has an unknown encoding (not
+ * explicitly supported in parse_encoding_value function). */
+ if (cache->vary_processing_enabled && vary_signature)
+ if (set_secondary_key_encoding(htx, object->secondary_key))
+ goto out;
+
+ if (!shctx_row_reserve_hot(shctx, first, trash.data)) {
+ goto out;
+ }
+
+ /* cache the headers in a http action because it allows to chose what
+ * to cache, for example you might want to cache a response before
+ * modifying some HTTP headers, or on the contrary after modifying
+ * those headers.
+ */
+ /* does not need to be locked because it's in the "hot" list,
+ * copy the headers */
+ if (shctx_row_data_append(shctx, first, (unsigned char *)trash.area, trash.data) < 0)
+ goto out;
+
+ /* register the buffer in the filter ctx for filling it with data*/
+ if (cache_ctx) {
+ cache_ctx->first_block = first;
+ LIST_INIT(&cache_ctx->detached_head);
+ /* store latest value and expiration time */
+ object->latest_validation = date.tv_sec;
+ object->expire = date.tv_sec + effective_maxage;
+ return ACT_RET_CONT;
+ }
+
+out:
+ /* if does not cache */
+ if (first) {
+ first->len = 0;
+ if (object->eb.key) {
+ release_entry_unlocked(cache_tree, object);
+ }
+ shctx_wrlock(shctx);
+ shctx_row_reattach(shctx, first);
+ shctx_wrunlock(shctx);
+ }
+
+ return ACT_RET_CONT;
+}
+
+#define HTX_CACHE_INIT 0 /* Initial state. */
+#define HTX_CACHE_HEADER 1 /* Cache entry headers forwarding */
+#define HTX_CACHE_DATA 2 /* Cache entry data forwarding */
+#define HTX_CACHE_EOM 3 /* Cache entry completely forwarded. Finish the HTX message */
+#define HTX_CACHE_END 4 /* Cache entry treatment terminated */
+
+static void http_cache_applet_release(struct appctx *appctx)
+{
+ struct cache_appctx *ctx = appctx->svcctx;
+ struct cache_flt_conf *cconf = appctx->rule->arg.act.p[0];
+ struct cache_entry *cache_ptr = ctx->entry;
+ struct cache *cache = cconf->c.cache;
+ struct shared_context *shctx = shctx_ptr(cache);
+ struct shared_block *first = block_ptr(cache_ptr);
+
+ release_entry(ctx->cache_tree, cache_ptr, 1);
+
+ shctx_wrlock(shctx);
+ shctx_row_reattach(shctx, first);
+ shctx_wrunlock(shctx);
+}
+
+
+static unsigned int htx_cache_dump_blk(struct appctx *appctx, struct htx *htx, enum htx_blk_type type,
+ uint32_t info, struct shared_block *shblk, unsigned int offset)
+{
+ struct cache_appctx *ctx = appctx->svcctx;
+ struct cache_flt_conf *cconf = appctx->rule->arg.act.p[0];
+ struct shared_context *shctx = shctx_ptr(cconf->c.cache);
+ struct htx_blk *blk;
+ char *ptr;
+ unsigned int max, total;
+ uint32_t blksz;
+
+ max = htx_get_max_blksz(htx,
+ channel_htx_recv_max(sc_ic(appctx_sc(appctx)), htx));
+ if (!max)
+ return 0;
+ blksz = ((type == HTX_BLK_HDR || type == HTX_BLK_TLR)
+ ? (info & 0xff) + ((info >> 8) & 0xfffff)
+ : info & 0xfffffff);
+ if (blksz > max)
+ return 0;
+
+ blk = htx_add_blk(htx, type, blksz);
+ if (!blk)
+ return 0;
+
+ blk->info = info;
+ total = 4;
+ ptr = htx_get_blk_ptr(htx, blk);
+ while (blksz) {
+ max = MIN(blksz, shctx->block_size - offset);
+ memcpy(ptr, (const char *)shblk->data + offset, max);
+ offset += max;
+ blksz -= max;
+ total += max;
+ ptr += max;
+ if (blksz || offset == shctx->block_size) {
+ shblk = LIST_NEXT(&shblk->list, typeof(shblk), list);
+ offset = 0;
+ }
+ }
+ ctx->offset = offset;
+ ctx->next = shblk;
+ ctx->sent += total;
+ return total;
+}
+
+static unsigned int htx_cache_dump_data_blk(struct appctx *appctx, struct htx *htx,
+ uint32_t info, struct shared_block *shblk, unsigned int offset)
+{
+ struct cache_appctx *ctx = appctx->svcctx;
+ struct cache_flt_conf *cconf = appctx->rule->arg.act.p[0];
+ struct shared_context *shctx = shctx_ptr(cconf->c.cache);
+ unsigned int max, total, rem_data;
+ uint32_t blksz;
+
+ max = htx_get_max_blksz(htx,
+ channel_htx_recv_max(sc_ic(appctx_sc(appctx)), htx));
+ if (!max)
+ return 0;
+
+ rem_data = 0;
+ if (ctx->rem_data) {
+ blksz = ctx->rem_data;
+ total = 0;
+ }
+ else {
+ blksz = (info & 0xfffffff);
+ total = 4;
+ }
+ if (blksz > max) {
+ rem_data = blksz - max;
+ blksz = max;
+ }
+
+ while (blksz) {
+ size_t sz;
+
+ max = MIN(blksz, shctx->block_size - offset);
+ sz = htx_add_data(htx, ist2(shblk->data + offset, max));
+ offset += sz;
+ blksz -= sz;
+ total += sz;
+ if (sz < max)
+ break;
+ if (blksz || offset == shctx->block_size) {
+ shblk = LIST_NEXT(&shblk->list, typeof(shblk), list);
+ offset = 0;
+ }
+ }
+
+ ctx->offset = offset;
+ ctx->next = shblk;
+ ctx->sent += total;
+ ctx->rem_data = rem_data + blksz;
+ return total;
+}
+
+static size_t htx_cache_dump_msg(struct appctx *appctx, struct htx *htx, unsigned int len,
+ enum htx_blk_type mark)
+{
+ struct cache_appctx *ctx = appctx->svcctx;
+ struct cache_flt_conf *cconf = appctx->rule->arg.act.p[0];
+ struct shared_context *shctx = shctx_ptr(cconf->c.cache);
+ struct shared_block *shblk;
+ unsigned int offset, sz;
+ unsigned int ret, total = 0;
+
+ while (len) {
+ enum htx_blk_type type;
+ uint32_t info;
+
+ shblk = ctx->next;
+ offset = ctx->offset;
+ if (ctx->rem_data) {
+ type = HTX_BLK_DATA;
+ info = 0;
+ goto add_data_blk;
+ }
+
+ /* Get info of the next HTX block. May be split on 2 shblk */
+ sz = MIN(4, shctx->block_size - offset);
+ memcpy((char *)&info, (const char *)shblk->data + offset, sz);
+ offset += sz;
+ if (sz < 4) {
+ shblk = LIST_NEXT(&shblk->list, typeof(shblk), list);
+ memcpy(((char *)&info)+sz, (const char *)shblk->data, 4 - sz);
+ offset = (4 - sz);
+ }
+
+ /* Get payload of the next HTX block and insert it. */
+ type = (info >> 28);
+ if (type != HTX_BLK_DATA)
+ ret = htx_cache_dump_blk(appctx, htx, type, info, shblk, offset);
+ else {
+ add_data_blk:
+ ret = htx_cache_dump_data_blk(appctx, htx, info, shblk, offset);
+ }
+
+ if (!ret)
+ break;
+ total += ret;
+ len -= ret;
+
+ if (ctx->rem_data || type == mark)
+ break;
+ }
+
+ return total;
+}
+
+static int htx_cache_add_age_hdr(struct appctx *appctx, struct htx *htx)
+{
+ struct cache_appctx *ctx = appctx->svcctx;
+ struct cache_entry *cache_ptr = ctx->entry;
+ unsigned int age;
+ char *end;
+
+ chunk_reset(&trash);
+ age = MAX(0, (int)(date.tv_sec - cache_ptr->latest_validation)) + cache_ptr->age;
+ if (unlikely(age > CACHE_ENTRY_MAX_AGE))
+ age = CACHE_ENTRY_MAX_AGE;
+ end = ultoa_o(age, b_head(&trash), b_size(&trash));
+ b_set_data(&trash, end - b_head(&trash));
+ if (!http_add_header(htx, ist("Age"), ist2(b_head(&trash), b_data(&trash))))
+ return 0;
+ return 1;
+}
+
+static void http_cache_io_handler(struct appctx *appctx)
+{
+ struct cache_appctx *ctx = appctx->svcctx;
+ struct cache_entry *cache_ptr = ctx->entry;
+ struct shared_block *first = block_ptr(cache_ptr);
+ struct stconn *sc = appctx_sc(appctx);
+ struct channel *req = sc_oc(sc);
+ struct channel *res = sc_ic(sc);
+ struct htx *req_htx, *res_htx;
+ struct buffer *errmsg;
+ unsigned int len;
+ size_t ret, total = 0;
+
+ res_htx = htx_from_buf(&res->buf);
+ total = res_htx->data;
+
+ if (unlikely(se_fl_test(appctx->sedesc, (SE_FL_EOS|SE_FL_ERROR|SE_FL_SHR|SE_FL_SHW))))
+ goto out;
+
+ /* Check if the input buffer is available. */
+ if (!b_size(&res->buf)) {
+ sc_need_room(sc, 0);
+ goto out;
+ }
+
+ if (appctx->st0 == HTX_CACHE_INIT) {
+ ctx->next = block_ptr(cache_ptr);
+ ctx->offset = sizeof(*cache_ptr);
+ ctx->sent = 0;
+ ctx->rem_data = 0;
+ appctx->st0 = HTX_CACHE_HEADER;
+ }
+
+ if (appctx->st0 == HTX_CACHE_HEADER) {
+ /* Headers must be dump at once. Otherwise it is an error */
+ len = first->len - sizeof(*cache_ptr) - ctx->sent;
+ ret = htx_cache_dump_msg(appctx, res_htx, len, HTX_BLK_EOH);
+ if (!ret || (htx_get_tail_type(res_htx) != HTX_BLK_EOH) ||
+ !htx_cache_add_age_hdr(appctx, res_htx))
+ goto error;
+
+ /* In case of a conditional request, we might want to send a
+ * "304 Not Modified" response instead of the stored data. */
+ if (ctx->send_notmodified) {
+ if (!http_replace_res_status(res_htx, ist("304"), ist("Not Modified"))) {
+ /* If replacing the status code fails we need to send the full response. */
+ ctx->send_notmodified = 0;
+ }
+ }
+
+ /* Skip response body for HEAD requests or in case of "304 Not
+ * Modified" response. */
+ if (__sc_strm(sc)->txn->meth == HTTP_METH_HEAD || ctx->send_notmodified)
+ appctx->st0 = HTX_CACHE_EOM;
+ else
+ appctx->st0 = HTX_CACHE_DATA;
+ }
+
+ if (appctx->st0 == HTX_CACHE_DATA) {
+ len = first->len - sizeof(*cache_ptr) - ctx->sent;
+ if (len) {
+ ret = htx_cache_dump_msg(appctx, res_htx, len, HTX_BLK_UNUSED);
+ if (ret < len) {
+ sc_need_room(sc, channel_htx_recv_max(res, res_htx) + 1);
+ goto out;
+ }
+ }
+ appctx->st0 = HTX_CACHE_EOM;
+ }
+
+ if (appctx->st0 == HTX_CACHE_EOM) {
+ /* no more data are expected. */
+ res_htx->flags |= HTX_FL_EOM;
+ se_fl_set(appctx->sedesc, SE_FL_EOI);
+
+ appctx->st0 = HTX_CACHE_END;
+ }
+
+ end:
+ if (appctx->st0 == HTX_CACHE_END)
+ se_fl_set(appctx->sedesc, SE_FL_EOS);
+
+ out:
+ total = res_htx->data - total;
+ if (total)
+ channel_add_input(res, total);
+ htx_to_buf(res_htx, &res->buf);
+
+ /* eat the whole request */
+ if (co_data(req)) {
+ req_htx = htx_from_buf(&req->buf);
+ co_htx_skip(req, req_htx, co_data(req));
+ htx_to_buf(req_htx, &req->buf);
+ }
+ return;
+
+ error:
+ /* Sent and HTTP error 500 */
+ b_reset(&res->buf);
+ errmsg = &http_err_chunks[HTTP_ERR_500];
+ res->buf.data = b_data(errmsg);
+ memcpy(res->buf.area, b_head(errmsg), b_data(errmsg));
+ res_htx = htx_from_buf(&res->buf);
+
+ total = 0;
+ se_fl_set(appctx->sedesc, SE_FL_ERROR);
+ appctx->st0 = HTX_CACHE_END;
+ goto end;
+}
+
+
+static int parse_cache_rule(struct proxy *proxy, const char *name, struct act_rule *rule, char **err)
+{
+ struct flt_conf *fconf;
+ struct cache_flt_conf *cconf = NULL;
+
+ if (!*name || strcmp(name, "if") == 0 || strcmp(name, "unless") == 0) {
+ memprintf(err, "expects a cache name");
+ goto err;
+ }
+
+ /* check if a cache filter was already registered with this cache
+ * name, if that's the case, must use it. */
+ list_for_each_entry(fconf, &proxy->filter_configs, list) {
+ if (fconf->id == cache_store_flt_id) {
+ cconf = fconf->conf;
+ if (cconf && strcmp((char *)cconf->c.name, name) == 0) {
+ rule->arg.act.p[0] = cconf;
+ return 1;
+ }
+ }
+ }
+
+ /* Create the filter cache config */
+ cconf = calloc(1, sizeof(*cconf));
+ if (!cconf) {
+ memprintf(err, "out of memory\n");
+ goto err;
+ }
+ cconf->flags = CACHE_FLT_F_IMPLICIT_DECL;
+ cconf->c.name = strdup(name);
+ if (!cconf->c.name) {
+ memprintf(err, "out of memory\n");
+ goto err;
+ }
+
+ /* register a filter to fill the cache buffer */
+ fconf = calloc(1, sizeof(*fconf));
+ if (!fconf) {
+ memprintf(err, "out of memory\n");
+ goto err;
+ }
+ fconf->id = cache_store_flt_id;
+ fconf->conf = cconf;
+ fconf->ops = &cache_ops;
+ LIST_APPEND(&proxy->filter_configs, &fconf->list);
+
+ rule->arg.act.p[0] = cconf;
+ return 1;
+
+ err:
+ free(cconf);
+ return 0;
+}
+
+enum act_parse_ret parse_cache_store(const char **args, int *orig_arg, struct proxy *proxy,
+ struct act_rule *rule, char **err)
+{
+ rule->action = ACT_CUSTOM;
+ rule->action_ptr = http_action_store_cache;
+
+ if (!parse_cache_rule(proxy, args[*orig_arg], rule, err))
+ return ACT_RET_PRS_ERR;
+
+ (*orig_arg)++;
+ return ACT_RET_PRS_OK;
+}
+
+/* This produces a sha1 hash of the concatenation of the HTTP method,
+ * the first occurrence of the Host header followed by the path component
+ * if it begins with a slash ('/'). */
+int sha1_hosturi(struct stream *s)
+{
+ struct http_txn *txn = s->txn;
+ struct htx *htx = htxbuf(&s->req.buf);
+ struct htx_sl *sl;
+ struct http_hdr_ctx ctx;
+ struct ist uri;
+ blk_SHA_CTX sha1_ctx;
+ struct buffer *trash;
+
+ trash = get_trash_chunk();
+ ctx.blk = NULL;
+
+ sl = http_get_stline(htx);
+ uri = htx_sl_req_uri(sl); // whole uri
+ if (!uri.len)
+ return 0;
+
+ /* In HTTP/1, most URIs are seen in origin form ('/path/to/resource'),
+ * unless haproxy is deployed in front of an outbound cache. In HTTP/2,
+ * URIs are almost always sent in absolute form with their scheme. In
+ * this case, the scheme is almost always "https". In order to support
+ * sharing of cache objects between H1 and H2, we'll hash the absolute
+ * URI whenever known, or prepend "https://" + the Host header for
+ * relative URIs. The difference will only appear on absolute HTTP/1
+ * requests sent to an origin server, which practically is never met in
+ * the real world so we don't care about the ability to share the same
+ * key here.URIs are normalized from the absolute URI to an origin form as
+ * well.
+ */
+ if (!(sl->flags & HTX_SL_F_HAS_AUTHORITY)) {
+ chunk_istcat(trash, ist("https://"));
+ if (!http_find_header(htx, ist("Host"), &ctx, 0))
+ return 0;
+ chunk_istcat(trash, ctx.value);
+ }
+
+ chunk_istcat(trash, uri);
+
+ /* hash everything */
+ blk_SHA1_Init(&sha1_ctx);
+ blk_SHA1_Update(&sha1_ctx, trash->area, trash->data);
+ blk_SHA1_Final((unsigned char *)txn->cache_hash, &sha1_ctx);
+
+ return 1;
+}
+
+/* Looks for "If-None-Match" headers in the request and compares their value
+ * with the one that might have been stored in the cache_entry. If any of them
+ * matches, a "304 Not Modified" response should be sent instead of the cached
+ * data.
+ * Although unlikely in a GET/HEAD request, the "If-None-Match: *" syntax is
+ * valid and should receive a "304 Not Modified" response (RFC 7234#4.3.2).
+ *
+ * If no "If-None-Match" header was found, look for an "If-Modified-Since"
+ * header and compare its value (date) to the one stored in the cache_entry.
+ * If the request's date is later than the cached one, we also send a
+ * "304 Not Modified" response (see RFCs 7232#3.3 and 7234#4.3.2).
+ *
+ * Returns 1 if "304 Not Modified" should be sent, 0 otherwise.
+ */
+static int should_send_notmodified_response(struct cache *cache, struct htx *htx,
+ struct cache_entry *entry)
+{
+ int retval = 0;
+
+ struct http_hdr_ctx ctx = { .blk = NULL };
+ struct ist cache_entry_etag = IST_NULL;
+ struct buffer *etag_buffer = NULL;
+ int if_none_match_found = 0;
+
+ struct tm tm = {};
+ time_t if_modified_since = 0;
+
+ /* If we find a "If-None-Match" header in the request, rebuild the
+ * cache_entry's ETag in order to perform comparisons.
+ * There could be multiple "if-none-match" header lines. */
+ while (http_find_header(htx, ist("if-none-match"), &ctx, 0)) {
+ if_none_match_found = 1;
+
+ /* A '*' matches everything. */
+ if (isteq(ctx.value, ist("*")) != 0) {
+ retval = 1;
+ break;
+ }
+
+ /* No need to rebuild an etag if none was stored in the cache. */
+ if (entry->etag_length == 0)
+ break;
+
+ /* Rebuild the stored ETag. */
+ if (etag_buffer == NULL) {
+ etag_buffer = get_trash_chunk();
+
+ if (shctx_row_data_get(shctx_ptr(cache), block_ptr(entry),
+ (unsigned char*)b_orig(etag_buffer),
+ entry->etag_offset, entry->etag_length) == 0) {
+ cache_entry_etag = ist2(b_orig(etag_buffer), entry->etag_length);
+ } else {
+ /* We could not rebuild the ETag in one go, we
+ * won't send a "304 Not Modified" response. */
+ break;
+ }
+ }
+
+ if (http_compare_etags(cache_entry_etag, ctx.value) == 1) {
+ retval = 1;
+ break;
+ }
+ }
+
+ /* If the request did not contain an "If-None-Match" header, we look for
+ * an "If-Modified-Since" header (see RFC 7232#3.3). */
+ if (retval == 0 && if_none_match_found == 0) {
+ ctx.blk = NULL;
+ if (http_find_header(htx, ist("if-modified-since"), &ctx, 1)) {
+ if (parse_http_date(istptr(ctx.value), istlen(ctx.value), &tm)) {
+ if_modified_since = my_timegm(&tm);
+
+ /* We send a "304 Not Modified" response if the
+ * entry's last modified date is earlier than
+ * the one found in the "If-Modified-Since"
+ * header. */
+ retval = (entry->last_modified <= if_modified_since);
+ }
+ }
+ }
+
+ return retval;
+}
+
+enum act_return http_action_req_cache_use(struct act_rule *rule, struct proxy *px,
+ struct session *sess, struct stream *s, int flags)
+{
+
+ struct http_txn *txn = s->txn;
+ struct cache_entry *res, *sec_entry = NULL;
+ struct cache_flt_conf *cconf = rule->arg.act.p[0];
+ struct cache *cache = cconf->c.cache;
+ struct shared_context *shctx = shctx_ptr(cache);
+ struct shared_block *entry_block;
+
+ struct cache_tree *cache_tree = NULL;
+
+ /* Ignore cache for HTTP/1.0 requests and for requests other than GET
+ * and HEAD */
+ if (!(txn->req.flags & HTTP_MSGF_VER_11) ||
+ (txn->meth != HTTP_METH_GET && txn->meth != HTTP_METH_HEAD))
+ txn->flags |= TX_CACHE_IGNORE;
+
+ http_check_request_for_cacheability(s, &s->req);
+
+ /* The request's hash has to be calculated for all requests, even POSTs
+ * or PUTs for instance because RFC7234 specifies that a successful
+ * "unsafe" method on a stored resource must invalidate it
+ * (see RFC7234#4.4). */
+ if (!sha1_hosturi(s))
+ return ACT_RET_CONT;
+
+ if (s->txn->flags & TX_CACHE_IGNORE)
+ return ACT_RET_CONT;
+
+ if (px == strm_fe(s))
+ _HA_ATOMIC_INC(&px->fe_counters.p.http.cache_lookups);
+ else
+ _HA_ATOMIC_INC(&px->be_counters.p.http.cache_lookups);
+
+ cache_tree = get_cache_tree_from_hash(cache, read_u32(s->txn->cache_hash));
+
+ if (!cache_tree)
+ return ACT_RET_CONT;
+
+ cache_rdlock(cache_tree);
+ res = get_entry(cache_tree, s->txn->cache_hash, 0);
+ /* We must not use an entry that is not complete but the check will be
+ * performed after we look for a potential secondary entry (in case of
+ * Vary). */
+ if (res) {
+ struct appctx *appctx;
+ int detached = 0;
+
+ retain_entry(res);
+
+ entry_block = block_ptr(res);
+ shctx_wrlock(shctx);
+ if (res->complete) {
+ shctx_row_detach(shctx, entry_block);
+ detached = 1;
+ } else {
+ release_entry(cache_tree, res, 0);
+ res = NULL;
+ }
+ shctx_wrunlock(shctx);
+ cache_rdunlock(cache_tree);
+
+ /* In case of Vary, we could have multiple entries with the same
+ * primary hash. We need to calculate the secondary hash in order
+ * to find the actual entry we want (if it exists). */
+ if (res && res->secondary_key_signature) {
+ if (!http_request_build_secondary_key(s, res->secondary_key_signature)) {
+ cache_rdlock(cache_tree);
+ sec_entry = get_secondary_entry(cache_tree, res,
+ s->txn->cache_secondary_hash, 0);
+ if (sec_entry && sec_entry != res) {
+ /* The wrong row was added to the hot list. */
+ release_entry(cache_tree, res, 0);
+ retain_entry(sec_entry);
+ shctx_wrlock(shctx);
+ if (detached)
+ shctx_row_reattach(shctx, entry_block);
+ entry_block = block_ptr(sec_entry);
+ shctx_row_detach(shctx, entry_block);
+ shctx_wrunlock(shctx);
+ }
+ res = sec_entry;
+ cache_rdunlock(cache_tree);
+ }
+ else {
+ release_entry(cache_tree, res, 1);
+
+ res = NULL;
+ shctx_wrlock(shctx);
+ shctx_row_reattach(shctx, entry_block);
+ shctx_wrunlock(shctx);
+ }
+ }
+
+ /* We either looked for a valid secondary entry and could not
+ * find one, or the entry we want to use is not complete. We
+ * can't use the cache's entry and must forward the request to
+ * the server. */
+ if (!res) {
+ return ACT_RET_CONT;
+ } else if (!res->complete) {
+ release_entry(cache_tree, res, 1);
+ return ACT_RET_CONT;
+ }
+
+ s->target = &http_cache_applet.obj_type;
+ if ((appctx = sc_applet_create(s->scb, objt_applet(s->target)))) {
+ struct cache_appctx *ctx = applet_reserve_svcctx(appctx, sizeof(*ctx));
+
+ appctx->st0 = HTX_CACHE_INIT;
+ appctx->rule = rule;
+ ctx->cache_tree = cache_tree;
+ ctx->entry = res;
+ ctx->next = NULL;
+ ctx->sent = 0;
+ ctx->send_notmodified =
+ should_send_notmodified_response(cache, htxbuf(&s->req.buf), res);
+
+ if (px == strm_fe(s))
+ _HA_ATOMIC_INC(&px->fe_counters.p.http.cache_hits);
+ else
+ _HA_ATOMIC_INC(&px->be_counters.p.http.cache_hits);
+ return ACT_RET_CONT;
+ } else {
+ s->target = NULL;
+ release_entry(cache_tree, res, 1);
+ shctx_wrlock(shctx);
+ shctx_row_reattach(shctx, entry_block);
+ shctx_wrunlock(shctx);
+ return ACT_RET_CONT;
+ }
+ }
+ cache_rdunlock(cache_tree);
+
+ /* Shared context does not need to be locked while we calculate the
+ * secondary hash. */
+ if (!res && cache->vary_processing_enabled) {
+ /* Build a complete secondary hash until the server response
+ * tells us which fields should be kept (if any). */
+ http_request_prebuild_full_secondary_key(s);
+ }
+ return ACT_RET_CONT;
+}
+
+
+enum act_parse_ret parse_cache_use(const char **args, int *orig_arg, struct proxy *proxy,
+ struct act_rule *rule, char **err)
+{
+ rule->action = ACT_CUSTOM;
+ rule->action_ptr = http_action_req_cache_use;
+
+ if (!parse_cache_rule(proxy, args[*orig_arg], rule, err))
+ return ACT_RET_PRS_ERR;
+
+ (*orig_arg)++;
+ return ACT_RET_PRS_OK;
+}
+
+int cfg_parse_cache(const char *file, int linenum, char **args, int kwm)
+{
+ int err_code = 0;
+
+ if (strcmp(args[0], "cache") == 0) { /* new cache section */
+
+ if (!*args[1]) {
+ ha_alert("parsing [%s:%d] : '%s' expects a <name> argument\n",
+ file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_ABORT;
+ goto out;
+ }
+
+ if (alertif_too_many_args(1, file, linenum, args, &err_code)) {
+ err_code |= ERR_ABORT;
+ goto out;
+ }
+
+ if (tmp_cache_config == NULL) {
+ struct cache *cache_config;
+
+ tmp_cache_config = calloc(1, sizeof(*tmp_cache_config));
+ if (!tmp_cache_config) {
+ ha_alert("parsing [%s:%d]: out of memory.\n", file, linenum);
+ err_code |= ERR_ALERT | ERR_ABORT;
+ goto out;
+ }
+
+ strlcpy2(tmp_cache_config->id, args[1], 33);
+ if (strlen(args[1]) > 32) {
+ ha_warning("parsing [%s:%d]: cache name is limited to 32 characters, truncate to '%s'.\n",
+ file, linenum, tmp_cache_config->id);
+ err_code |= ERR_WARN;
+ }
+
+ list_for_each_entry(cache_config, &caches_config, list) {
+ if (strcmp(tmp_cache_config->id, cache_config->id) == 0) {
+ ha_alert("parsing [%s:%d]: Duplicate cache name '%s'.\n",
+ file, linenum, tmp_cache_config->id);
+ err_code |= ERR_ALERT | ERR_ABORT;
+ goto out;
+ }
+ }
+
+ tmp_cache_config->maxage = 60;
+ tmp_cache_config->maxblocks = 0;
+ tmp_cache_config->maxobjsz = 0;
+ tmp_cache_config->max_secondary_entries = DEFAULT_MAX_SECONDARY_ENTRY;
+ }
+ } else if (strcmp(args[0], "total-max-size") == 0) {
+ unsigned long int maxsize;
+ char *err;
+
+ if (alertif_too_many_args(1, file, linenum, args, &err_code)) {
+ err_code |= ERR_ABORT;
+ goto out;
+ }
+
+ maxsize = strtoul(args[1], &err, 10);
+ if (err == args[1] || *err != '\0') {
+ ha_warning("parsing [%s:%d]: total-max-size wrong value '%s'\n",
+ file, linenum, args[1]);
+ err_code |= ERR_ABORT;
+ goto out;
+ }
+
+ if (maxsize > (UINT_MAX >> 20)) {
+ ha_warning("parsing [%s:%d]: \"total-max-size\" (%s) must not be greater than %u\n",
+ file, linenum, args[1], UINT_MAX >> 20);
+ err_code |= ERR_ABORT;
+ goto out;
+ }
+
+ /* size in megabytes */
+ maxsize *= 1024 * 1024 / CACHE_BLOCKSIZE;
+ tmp_cache_config->maxblocks = maxsize;
+ } else if (strcmp(args[0], "max-age") == 0) {
+ if (alertif_too_many_args(1, file, linenum, args, &err_code)) {
+ err_code |= ERR_ABORT;
+ goto out;
+ }
+
+ if (!*args[1]) {
+ ha_warning("parsing [%s:%d]: '%s' expects an age parameter in seconds.\n",
+ file, linenum, args[0]);
+ err_code |= ERR_WARN;
+ }
+
+ tmp_cache_config->maxage = atoi(args[1]);
+ } else if (strcmp(args[0], "max-object-size") == 0) {
+ unsigned int maxobjsz;
+ char *err;
+
+ if (alertif_too_many_args(1, file, linenum, args, &err_code)) {
+ err_code |= ERR_ABORT;
+ goto out;
+ }
+
+ if (!*args[1]) {
+ ha_warning("parsing [%s:%d]: '%s' expects a maximum file size parameter in bytes.\n",
+ file, linenum, args[0]);
+ err_code |= ERR_WARN;
+ }
+
+ maxobjsz = strtoul(args[1], &err, 10);
+ if (err == args[1] || *err != '\0') {
+ ha_warning("parsing [%s:%d]: max-object-size wrong value '%s'\n",
+ file, linenum, args[1]);
+ err_code |= ERR_ABORT;
+ goto out;
+ }
+ tmp_cache_config->maxobjsz = maxobjsz;
+ } else if (strcmp(args[0], "process-vary") == 0) {
+ if (alertif_too_many_args(1, file, linenum, args, &err_code)) {
+ err_code |= ERR_ABORT;
+ goto out;
+ }
+
+ if (!*args[1]) {
+ ha_warning("parsing [%s:%d]: '%s' expects \"on\" or \"off\" (enable or disable vary processing).\n",
+ file, linenum, args[0]);
+ err_code |= ERR_WARN;
+ }
+ if (strcmp(args[1], "on") == 0)
+ tmp_cache_config->vary_processing_enabled = 1;
+ else if (strcmp(args[1], "off") == 0)
+ tmp_cache_config->vary_processing_enabled = 0;
+ else {
+ ha_warning("parsing [%s:%d]: '%s' expects \"on\" or \"off\" (enable or disable vary processing).\n",
+ file, linenum, args[0]);
+ err_code |= ERR_WARN;
+ }
+ } else if (strcmp(args[0], "max-secondary-entries") == 0) {
+ unsigned int max_sec_entries;
+ char *err;
+
+ if (alertif_too_many_args(1, file, linenum, args, &err_code)) {
+ err_code |= ERR_ABORT;
+ goto out;
+ }
+
+ if (!*args[1]) {
+ ha_warning("parsing [%s:%d]: '%s' expects a strictly positive number.\n",
+ file, linenum, args[0]);
+ err_code |= ERR_WARN;
+ }
+
+ max_sec_entries = strtoul(args[1], &err, 10);
+ if (err == args[1] || *err != '\0' || max_sec_entries == 0) {
+ ha_warning("parsing [%s:%d]: max-secondary-entries wrong value '%s'\n",
+ file, linenum, args[1]);
+ err_code |= ERR_ABORT;
+ goto out;
+ }
+ tmp_cache_config->max_secondary_entries = max_sec_entries;
+ }
+ else if (*args[0] != 0) {
+ ha_alert("parsing [%s:%d] : unknown keyword '%s' in 'cache' section\n", file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+out:
+ return err_code;
+}
+
+/* once the cache section is parsed */
+
+int cfg_post_parse_section_cache()
+{
+ int err_code = 0;
+
+ if (tmp_cache_config) {
+
+ if (tmp_cache_config->maxblocks <= 0) {
+ ha_alert("Size not specified for cache '%s'\n", tmp_cache_config->id);
+ err_code |= ERR_FATAL | ERR_ALERT;
+ goto out;
+ }
+
+ if (!tmp_cache_config->maxobjsz) {
+ /* Default max. file size is a 256th of the cache size. */
+ tmp_cache_config->maxobjsz =
+ (tmp_cache_config->maxblocks * CACHE_BLOCKSIZE) >> 8;
+ }
+ else if (tmp_cache_config->maxobjsz > tmp_cache_config->maxblocks * CACHE_BLOCKSIZE / 2) {
+ ha_alert("\"max-object-size\" is limited to an half of \"total-max-size\" => %u\n", tmp_cache_config->maxblocks * CACHE_BLOCKSIZE / 2);
+ err_code |= ERR_FATAL | ERR_ALERT;
+ goto out;
+ }
+
+ /* add to the list of cache to init and reinit tmp_cache_config
+ * for next cache section, if any.
+ */
+ LIST_APPEND(&caches_config, &tmp_cache_config->list);
+ tmp_cache_config = NULL;
+ return err_code;
+ }
+out:
+ ha_free(&tmp_cache_config);
+ return err_code;
+
+}
+
+int post_check_cache()
+{
+ struct proxy *px;
+ struct cache *back, *cache_config, *cache;
+ struct shared_context *shctx;
+ int ret_shctx;
+ int err_code = ERR_NONE;
+ int i;
+
+ list_for_each_entry_safe(cache_config, back, &caches_config, list) {
+
+ ret_shctx = shctx_init(&shctx, cache_config->maxblocks, CACHE_BLOCKSIZE,
+ cache_config->maxobjsz, sizeof(struct cache));
+
+ if (ret_shctx <= 0) {
+ if (ret_shctx == SHCTX_E_INIT_LOCK)
+ ha_alert("Unable to initialize the lock for the cache.\n");
+ else
+ ha_alert("Unable to allocate cache.\n");
+
+ err_code |= ERR_FATAL | ERR_ALERT;
+ goto out;
+ }
+ shctx->free_block = cache_free_blocks;
+ shctx->reserve_finish = cache_reserve_finish;
+ shctx->cb_data = (void*)shctx->data;
+ /* the cache structure is stored in the shctx and added to the
+ * caches list, we can remove the entry from the caches_config
+ * list */
+ memcpy(shctx->data, cache_config, sizeof(struct cache));
+ cache = (struct cache *)shctx->data;
+ LIST_APPEND(&caches, &cache->list);
+ LIST_DELETE(&cache_config->list);
+ free(cache_config);
+ for (i = 0; i < CACHE_TREE_NUM; ++i) {
+ cache->trees[i].entries = EB_ROOT;
+ HA_RWLOCK_INIT(&cache->trees[i].lock);
+
+ LIST_INIT(&cache->trees[i].cleanup_list);
+ HA_SPIN_INIT(&cache->trees[i].cleanup_lock);
+ }
+
+ /* Find all references for this cache in the existing filters
+ * (over all proxies) and reference it in matching filters.
+ */
+ for (px = proxies_list; px; px = px->next) {
+ struct flt_conf *fconf;
+ struct cache_flt_conf *cconf;
+
+ list_for_each_entry(fconf, &px->filter_configs, list) {
+ if (fconf->id != cache_store_flt_id)
+ continue;
+
+ cconf = fconf->conf;
+ if (strcmp(cache->id, cconf->c.name) == 0) {
+ free(cconf->c.name);
+ cconf->flags |= CACHE_FLT_INIT;
+ cconf->c.cache = cache;
+ break;
+ }
+ }
+ }
+ }
+
+out:
+ return err_code;
+
+}
+
+struct flt_ops cache_ops = {
+ .init = cache_store_init,
+ .check = cache_store_check,
+ .deinit = cache_store_deinit,
+
+ /* Handle stream init/deinit */
+ .attach = cache_store_strm_init,
+ .detach = cache_store_strm_deinit,
+
+ /* Handle channels activity */
+ .channel_post_analyze = cache_store_post_analyze,
+
+ /* Filter HTTP requests and responses */
+ .http_headers = cache_store_http_headers,
+ .http_payload = cache_store_http_payload,
+ .http_end = cache_store_http_end,
+};
+
+
+#define CHECK_ENCODING(str, encoding_name, encoding_value) \
+ ({ \
+ int retval = 0; \
+ if (istmatch(str, (struct ist){ .ptr = encoding_name+1, .len = sizeof(encoding_name) - 2 })) { \
+ retval = encoding_value; \
+ encoding = istadv(encoding, sizeof(encoding_name) - 2); \
+ } \
+ (retval); \
+ })
+
+/*
+ * Parse the encoding <encoding> and try to match the encoding part upon an
+ * encoding list of explicitly supported encodings (which all have a specific
+ * bit in an encoding bitmap). If a weight is included in the value, find out if
+ * it is null or not. The bit value will be set in the <encoding_value>
+ * parameter and the <has_null_weight> will be set to 1 if the weight is strictly
+ * 0, 1 otherwise.
+ * The encodings list is extracted from
+ * https://www.iana.org/assignments/http-parameters/http-parameters.xhtml.
+ * Returns 0 in case of success and -1 in case of error.
+ */
+static int parse_encoding_value(struct ist encoding, unsigned int *encoding_value,
+ unsigned int *has_null_weight)
+{
+ int retval = 0;
+
+ if (!encoding_value)
+ return -1;
+
+ if (!istlen(encoding))
+ return -1; /* Invalid encoding */
+
+ *encoding_value = 0;
+ if (has_null_weight)
+ *has_null_weight = 0;
+
+ switch (*encoding.ptr) {
+ case 'a':
+ encoding = istnext(encoding);
+ *encoding_value = CHECK_ENCODING(encoding, "aes128gcm", VARY_ENCODING_AES128GCM);
+ break;
+ case 'b':
+ encoding = istnext(encoding);
+ *encoding_value = CHECK_ENCODING(encoding, "br", VARY_ENCODING_BR);
+ break;
+ case 'c':
+ encoding = istnext(encoding);
+ *encoding_value = CHECK_ENCODING(encoding, "compress", VARY_ENCODING_COMPRESS);
+ break;
+ case 'd':
+ encoding = istnext(encoding);
+ *encoding_value = CHECK_ENCODING(encoding, "deflate", VARY_ENCODING_DEFLATE);
+ break;
+ case 'e':
+ encoding = istnext(encoding);
+ *encoding_value = CHECK_ENCODING(encoding, "exi", VARY_ENCODING_EXI);
+ break;
+ case 'g':
+ encoding = istnext(encoding);
+ *encoding_value = CHECK_ENCODING(encoding, "gzip", VARY_ENCODING_GZIP);
+ break;
+ case 'i':
+ encoding = istnext(encoding);
+ *encoding_value = CHECK_ENCODING(encoding, "identity", VARY_ENCODING_IDENTITY);
+ break;
+ case 'p':
+ encoding = istnext(encoding);
+ *encoding_value = CHECK_ENCODING(encoding, "pack200-gzip", VARY_ENCODING_PACK200_GZIP);
+ break;
+ case 'x':
+ encoding = istnext(encoding);
+ *encoding_value = CHECK_ENCODING(encoding, "x-gzip", VARY_ENCODING_GZIP);
+ if (!*encoding_value)
+ *encoding_value = CHECK_ENCODING(encoding, "x-compress", VARY_ENCODING_COMPRESS);
+ break;
+ case 'z':
+ encoding = istnext(encoding);
+ *encoding_value = CHECK_ENCODING(encoding, "zstd", VARY_ENCODING_ZSTD);
+ break;
+ case '*':
+ encoding = istnext(encoding);
+ *encoding_value = VARY_ENCODING_STAR;
+ break;
+ default:
+ retval = -1; /* Unmanaged encoding */
+ break;
+ }
+
+ /* Process the optional weight part of the encoding. */
+ if (*encoding_value) {
+ encoding = http_trim_leading_spht(encoding);
+ if (istlen(encoding)) {
+ if (*encoding.ptr != ';')
+ return -1;
+
+ if (has_null_weight) {
+ encoding = istnext(encoding);
+
+ encoding = http_trim_leading_spht(encoding);
+
+ *has_null_weight = isteq(encoding, ist("q=0"));
+ }
+ }
+ }
+
+ return retval;
+}
+
+#define ACCEPT_ENCODING_MAX_ENTRIES 16
+/*
+ * Build a bitmap of the accept-encoding header.
+ *
+ * The bitmap is built by matching every sub-part of the accept-encoding value
+ * with a subset of explicitly supported encodings, which all have their own bit
+ * in the bitmap. This bitmap will be used to determine if a response can be
+ * served to a client (that is if it has an encoding that is accepted by the
+ * client). Any unknown encodings will be indicated by the VARY_ENCODING_OTHER
+ * bit.
+ *
+ * Returns 0 in case of success and -1 in case of error.
+ */
+static int accept_encoding_normalizer(struct htx *htx, struct ist hdr_name,
+ char *buf, unsigned int *buf_len)
+{
+ size_t count = 0;
+ uint32_t encoding_bitmap = 0;
+ unsigned int encoding_bmp_bl = -1;
+ struct http_hdr_ctx ctx = { .blk = NULL };
+ unsigned int encoding_value;
+ unsigned int rejected_encoding;
+
+ /* A user agent always accepts an unencoded value unless it explicitly
+ * refuses it through an "identity;q=0" accept-encoding value. */
+ encoding_bitmap |= VARY_ENCODING_IDENTITY;
+
+ /* Iterate over all the ACCEPT_ENCODING_MAX_ENTRIES first accept-encoding
+ * values that might span acrosse multiple accept-encoding headers. */
+ while (http_find_header(htx, hdr_name, &ctx, 0) && count < ACCEPT_ENCODING_MAX_ENTRIES) {
+ count++;
+
+ /* As per RFC7231#5.3.4, "An Accept-Encoding header field with a
+ * combined field-value that is empty implies that the user agent
+ * does not want any content-coding in response."
+ *
+ * We must (and did) count the existence of this empty header to not
+ * hit the `count == 0` case below, but must ignore the value to not
+ * include VARY_ENCODING_OTHER into the final bitmap.
+ */
+ if (istlen(ctx.value) == 0)
+ continue;
+
+ /* Turn accept-encoding value to lower case */
+ ist2bin_lc(istptr(ctx.value), ctx.value);
+
+ /* Try to identify a known encoding and to manage null weights. */
+ if (!parse_encoding_value(ctx.value, &encoding_value, &rejected_encoding)) {
+ if (rejected_encoding)
+ encoding_bmp_bl &= ~encoding_value;
+ else
+ encoding_bitmap |= encoding_value;
+ }
+ else {
+ /* Unknown encoding */
+ encoding_bitmap |= VARY_ENCODING_OTHER;
+ }
+ }
+
+ /* If a "*" was found in the accepted encodings (without a null weight),
+ * all the encoding are accepted except the ones explicitly rejected. */
+ if (encoding_bitmap & VARY_ENCODING_STAR) {
+ encoding_bitmap = ~0;
+ }
+
+ /* Clear explicitly rejected encodings from the bitmap */
+ encoding_bitmap &= encoding_bmp_bl;
+
+ /* As per RFC7231#5.3.4, "If no Accept-Encoding field is in the request,
+ * any content-coding is considered acceptable by the user agent". */
+ if (count == 0)
+ encoding_bitmap = ~0;
+
+ /* A request with more than ACCEPT_ENCODING_MAX_ENTRIES accepted
+ * encodings might be illegitimate so we will not use it. */
+ if (count == ACCEPT_ENCODING_MAX_ENTRIES)
+ return -1;
+
+ write_u32(buf, encoding_bitmap);
+ *buf_len = sizeof(encoding_bitmap);
+
+ /* This function fills the hash buffer correctly even if no header was
+ * found, hence the 0 return value (success). */
+ return 0;
+}
+#undef ACCEPT_ENCODING_MAX_ENTRIES
+
+/*
+ * Normalizer used by default for the Referer and Origin header. It only
+ * calculates a hash of the whole value using xxhash algorithm.
+ * Only the first occurrence of the header will be taken into account in the
+ * hash.
+ * Returns 0 in case of success, 1 if the hash buffer should be filled with 0s
+ * and -1 in case of error.
+ */
+static int default_normalizer(struct htx *htx, struct ist hdr_name,
+ char *buf, unsigned int *buf_len)
+{
+ int retval = 1;
+ struct http_hdr_ctx ctx = { .blk = NULL };
+
+ if (http_find_header(htx, hdr_name, &ctx, 1)) {
+ retval = 0;
+ write_u64(buf, XXH3(istptr(ctx.value), istlen(ctx.value), cache_hash_seed));
+ *buf_len = sizeof(uint64_t);
+ }
+
+ return retval;
+}
+
+/*
+ * Accept-Encoding bitmap comparison function.
+ * Returns 0 if the bitmaps are compatible.
+ */
+static int accept_encoding_bitmap_cmp(const void *ref, const void *new, unsigned int len)
+{
+ uint32_t ref_bitmap = read_u32(ref);
+ uint32_t new_bitmap = read_u32(new);
+
+ if (!(ref_bitmap & VARY_ENCODING_OTHER)) {
+ /* All the bits set in the reference bitmap correspond to the
+ * stored response' encoding and should all be set in the new
+ * encoding bitmap in order for the client to be able to manage
+ * the response.
+ *
+ * If this is the case the cached response has encodings that
+ * are accepted by the client. It can be served directly by
+ * the cache (as far as the accept-encoding part is concerned).
+ */
+
+ return (ref_bitmap & new_bitmap) != ref_bitmap;
+ }
+ else {
+ return 1;
+ }
+}
+
+
+/*
+ * Pre-calculate the hashes of all the supported headers (in our Vary
+ * implementation) of a given request. We have to calculate all the hashes
+ * in advance because the actual Vary signature won't be known until the first
+ * response.
+ * Only the first occurrence of every header will be taken into account in the
+ * hash.
+ * If the header is not present, the hash portion of the given header will be
+ * filled with zeros.
+ * Returns 0 in case of success.
+ */
+static int http_request_prebuild_full_secondary_key(struct stream *s)
+{
+ /* The fake signature (second parameter) will ensure that every part of the
+ * secondary key is calculated. */
+ return http_request_build_secondary_key(s, ~0);
+}
+
+
+/*
+ * Calculate the secondary key for a request for which we already have a known
+ * vary signature. The key is made by aggregating hashes calculated for every
+ * header mentioned in the vary signature.
+ * Only the first occurrence of every header will be taken into account in the
+ * hash.
+ * If the header is not present, the hash portion of the given header will be
+ * filled with zeros.
+ * Returns 0 in case of success.
+ */
+static int http_request_build_secondary_key(struct stream *s, int vary_signature)
+{
+ struct http_txn *txn = s->txn;
+ struct htx *htx = htxbuf(&s->req.buf);
+
+ unsigned int idx;
+ const struct vary_hashing_information *info = NULL;
+ unsigned int hash_length = 0;
+ int retval = 0;
+ int offset = 0;
+
+ for (idx = 0; idx < sizeof(vary_information)/sizeof(*vary_information) && retval >= 0; ++idx) {
+ info = &vary_information[idx];
+
+ /* The normalizing functions will be in charge of getting the
+ * header values from the htx. This way they can manage multiple
+ * occurrences of their processed header. */
+ if ((vary_signature & info->value) && info->norm_fn != NULL &&
+ !(retval = info->norm_fn(htx, info->hdr_name, &txn->cache_secondary_hash[offset], &hash_length))) {
+ offset += hash_length;
+ }
+ else {
+ /* Fill hash with 0s. */
+ hash_length = info->hash_length;
+ memset(&txn->cache_secondary_hash[offset], 0, hash_length);
+ offset += hash_length;
+ }
+ }
+
+ if (retval >= 0)
+ txn->flags |= TX_CACHE_HAS_SEC_KEY;
+
+ return (retval < 0);
+}
+
+/*
+ * Build the actual secondary key of a given request out of the prebuilt key and
+ * the actual vary signature (extracted from the response).
+ * Returns 0 in case of success.
+ */
+static int http_request_reduce_secondary_key(unsigned int vary_signature,
+ char prebuilt_key[HTTP_CACHE_SEC_KEY_LEN])
+{
+ int offset = 0;
+ int global_offset = 0;
+ int vary_info_count = 0;
+ int keep = 0;
+ unsigned int vary_idx;
+ const struct vary_hashing_information *vary_info;
+
+ vary_info_count = sizeof(vary_information)/sizeof(*vary_information);
+ for (vary_idx = 0; vary_idx < vary_info_count; ++vary_idx) {
+ vary_info = &vary_information[vary_idx];
+ keep = (vary_signature & vary_info->value) ? 0xff : 0;
+
+ for (offset = 0; offset < vary_info->hash_length; ++offset,++global_offset) {
+ prebuilt_key[global_offset] &= keep;
+ }
+ }
+
+ return 0;
+}
+
+
+
+static int
+parse_cache_flt(char **args, int *cur_arg, struct proxy *px,
+ struct flt_conf *fconf, char **err, void *private)
+{
+ struct flt_conf *f, *back;
+ struct cache_flt_conf *cconf = NULL;
+ char *name = NULL;
+ int pos = *cur_arg;
+
+ /* Get the cache filter name. <pos> point on "cache" keyword */
+ if (!*args[pos + 1]) {
+ memprintf(err, "%s : expects a <name> argument", args[pos]);
+ goto error;
+ }
+ name = strdup(args[pos + 1]);
+ if (!name) {
+ memprintf(err, "%s '%s' : out of memory", args[pos], args[pos + 1]);
+ goto error;
+ }
+ pos += 2;
+
+ /* Check if an implicit filter with the same name already exists. If so,
+ * we remove the implicit filter to use the explicit one. */
+ list_for_each_entry_safe(f, back, &px->filter_configs, list) {
+ if (f->id != cache_store_flt_id)
+ continue;
+
+ cconf = f->conf;
+ if (strcmp(name, cconf->c.name) != 0) {
+ cconf = NULL;
+ continue;
+ }
+
+ if (!(cconf->flags & CACHE_FLT_F_IMPLICIT_DECL)) {
+ cconf = NULL;
+ memprintf(err, "%s: multiple explicit declarations of the cache filter '%s'",
+ px->id, name);
+ goto error;
+ }
+
+ /* Remove the implicit filter. <cconf> is kept for the explicit one */
+ LIST_DELETE(&f->list);
+ free(f);
+ free(name);
+ break;
+ }
+
+ /* No implicit cache filter found, create configuration for the explicit one */
+ if (!cconf) {
+ cconf = calloc(1, sizeof(*cconf));
+ if (!cconf) {
+ memprintf(err, "%s: out of memory", args[*cur_arg]);
+ goto error;
+ }
+ cconf->c.name = name;
+ }
+
+ cconf->flags = 0;
+ fconf->id = cache_store_flt_id;
+ fconf->conf = cconf;
+ fconf->ops = &cache_ops;
+
+ *cur_arg = pos;
+ return 0;
+
+ error:
+ free(name);
+ free(cconf);
+ return -1;
+}
+
+/* It reserves a struct show_cache_ctx for the local variables */
+static int cli_parse_show_cache(char **args, char *payload, struct appctx *appctx, void *private)
+{
+ struct show_cache_ctx *ctx = applet_reserve_svcctx(appctx, sizeof(*ctx));
+
+ if (!cli_has_level(appctx, ACCESS_LVL_ADMIN))
+ return 1;
+
+ ctx->cache = LIST_ELEM((caches).n, typeof(struct cache *), list);
+ return 0;
+}
+
+/* It uses a struct show_cache_ctx for the local variables */
+static int cli_io_handler_show_cache(struct appctx *appctx)
+{
+ struct show_cache_ctx *ctx = appctx->svcctx;
+ struct cache* cache = ctx->cache;
+ struct buffer *buf = alloc_trash_chunk();
+
+ if (buf == NULL)
+ return 1;
+
+ list_for_each_entry_from(cache, &caches, list) {
+ struct eb32_node *node = NULL;
+ unsigned int next_key;
+ struct cache_entry *entry;
+ unsigned int i;
+ struct shared_context *shctx = shctx_ptr(cache);
+ int cache_tree_index = 0;
+ struct cache_tree *cache_tree = NULL;
+
+ next_key = ctx->next_key;
+ if (!next_key) {
+ shctx_rdlock(shctx);
+ chunk_printf(buf, "%p: %s (shctx:%p, available blocks:%d)\n", cache, cache->id, shctx_ptr(cache), shctx_ptr(cache)->nbav);
+ shctx_rdunlock(shctx);
+ if (applet_putchk(appctx, buf) == -1) {
+ goto yield;
+ }
+ }
+
+ ctx->cache = cache;
+
+ if (ctx->cache_tree)
+ cache_tree_index = (ctx->cache_tree - ctx->cache->trees);
+
+ for (;cache_tree_index < CACHE_TREE_NUM; ++cache_tree_index) {
+
+ ctx->cache_tree = cache_tree = &ctx->cache->trees[cache_tree_index];
+
+ cache_rdlock(cache_tree);
+
+ while (1) {
+ node = eb32_lookup_ge(&cache_tree->entries, next_key);
+ if (!node) {
+ ctx->next_key = 0;
+ break;
+ }
+
+ entry = container_of(node, struct cache_entry, eb);
+ next_key = node->key + 1;
+
+ if (entry->expire > date.tv_sec) {
+ chunk_printf(buf, "%p hash:%u vary:0x", entry, read_u32(entry->hash));
+ for (i = 0; i < HTTP_CACHE_SEC_KEY_LEN; ++i)
+ chunk_appendf(buf, "%02x", (unsigned char)entry->secondary_key[i]);
+ chunk_appendf(buf, " size:%u (%u blocks), refcount:%u, expire:%d\n",
+ block_ptr(entry)->len, block_ptr(entry)->block_count,
+ block_ptr(entry)->refcount, entry->expire - (int)date.tv_sec);
+ }
+
+ ctx->next_key = next_key;
+
+ if (applet_putchk(appctx, buf) == -1) {
+ cache_rdunlock(cache_tree);
+ goto yield;
+ }
+ }
+ cache_rdunlock(cache_tree);
+ }
+ }
+
+ free_trash_chunk(buf);
+ return 1;
+
+yield:
+ free_trash_chunk(buf);
+ return 0;
+}
+
+
+/*
+ * boolean, returns true if response was built out of a cache entry.
+ */
+static int
+smp_fetch_res_cache_hit(const struct arg *args, struct sample *smp,
+ const char *kw, void *private)
+{
+ smp->data.type = SMP_T_BOOL;
+ smp->data.u.sint = (smp->strm ? (smp->strm->target == &http_cache_applet.obj_type) : 0);
+
+ return 1;
+}
+
+/*
+ * string, returns cache name (if response came from a cache).
+ */
+static int
+smp_fetch_res_cache_name(const struct arg *args, struct sample *smp,
+ const char *kw, void *private)
+{
+ struct appctx *appctx = NULL;
+
+ struct cache_flt_conf *cconf = NULL;
+ struct cache *cache = NULL;
+
+ if (!smp->strm || smp->strm->target != &http_cache_applet.obj_type)
+ return 0;
+
+ /* Get appctx from the stream connector. */
+ appctx = sc_appctx(smp->strm->scb);
+ if (appctx && appctx->rule) {
+ cconf = appctx->rule->arg.act.p[0];
+ if (cconf) {
+ cache = cconf->c.cache;
+
+ smp->data.type = SMP_T_STR;
+ smp->flags = SMP_F_CONST;
+ smp->data.u.str.area = cache->id;
+ smp->data.u.str.data = strlen(cache->id);
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+
+/* early boot initialization */
+static void cache_init()
+{
+ cache_hash_seed = ha_random64();
+}
+
+INITCALL0(STG_PREPARE, cache_init);
+
+/* Declare the filter parser for "cache" keyword */
+static struct flt_kw_list filter_kws = { "CACHE", { }, {
+ { "cache", parse_cache_flt, NULL },
+ { NULL, NULL, NULL },
+ }
+};
+
+INITCALL1(STG_REGISTER, flt_register_keywords, &filter_kws);
+
+static struct cli_kw_list cli_kws = {{},{
+ { { "show", "cache", NULL }, "show cache : show cache status", cli_parse_show_cache, cli_io_handler_show_cache, NULL, NULL },
+ {{},}
+}};
+
+INITCALL1(STG_REGISTER, cli_register_kw, &cli_kws);
+
+static struct action_kw_list http_res_actions = {
+ .kw = {
+ { "cache-store", parse_cache_store },
+ { NULL, NULL }
+ }
+};
+
+INITCALL1(STG_REGISTER, http_res_keywords_register, &http_res_actions);
+
+static struct action_kw_list http_req_actions = {
+ .kw = {
+ { "cache-use", parse_cache_use },
+ { NULL, NULL }
+ }
+};
+
+INITCALL1(STG_REGISTER, http_req_keywords_register, &http_req_actions);
+
+struct applet http_cache_applet = {
+ .obj_type = OBJ_TYPE_APPLET,
+ .name = "<CACHE>", /* used for logging */
+ .fct = http_cache_io_handler,
+ .release = http_cache_applet_release,
+};
+
+/* config parsers for this section */
+REGISTER_CONFIG_SECTION("cache", cfg_parse_cache, cfg_post_parse_section_cache);
+REGISTER_POST_CHECK(post_check_cache);
+
+
+/* Note: must not be declared <const> as its list will be overwritten */
+static struct sample_fetch_kw_list sample_fetch_keywords = {ILH, {
+ { "res.cache_hit", smp_fetch_res_cache_hit, 0, NULL, SMP_T_BOOL, SMP_USE_HRSHP, SMP_VAL_RESPONSE },
+ { "res.cache_name", smp_fetch_res_cache_name, 0, NULL, SMP_T_STR, SMP_USE_HRSHP, SMP_VAL_RESPONSE },
+ { /* END */ },
+ }
+};
+
+INITCALL1(STG_REGISTER, sample_register_fetches, &sample_fetch_keywords);
diff --git a/src/calltrace.c b/src/calltrace.c
new file mode 100644
index 0000000..3946b28
--- /dev/null
+++ b/src/calltrace.c
@@ -0,0 +1,286 @@
+/*
+ * Function call tracing for gcc >= 2.95
+ * WARNING! THIS CODE IS NOT THREAD-SAFE!
+ *
+ * Copyright 2012 Willy Tarreau <w@1wt.eu>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * gcc is able to call a specific function when entering and leaving any
+ * function when compiled with -finstrument-functions. This code must not
+ * be built with this argument. The performance impact is huge, so this
+ * feature should only be used when debugging.
+ *
+ * The entry and exits of all functions will be dumped into a file designated
+ * by the HAPROXY_TRACE environment variable, or by default "trace.out". If the
+ * trace file name is empty or "/dev/null", then traces are disabled. If
+ * opening the trace file fails, then stderr is used. If HAPROXY_TRACE_FAST is
+ * used, then the time is taken from the global <now> variable. Last, if
+ * HAPROXY_TRACE_TSC is used, then the machine's TSC is used instead of the
+ * real time (almost twice as fast).
+ *
+ * The output format is :
+ *
+ * <sec.usec> <level> <caller_ptr> <dir> <callee_ptr>
+ * or :
+ * <tsc> <level> <caller_ptr> <dir> <callee_ptr>
+ *
+ * where <dir> is '>' when entering a function and '<' when leaving.
+ *
+ * It is also possible to emit comments using the calltrace() function which uses
+ * the printf() format. Such comments are then inserted by replacing the caller
+ * pointer with a sharp ('#') like this :
+ *
+ * <sec.usec> <level> # <comment>
+ * or :
+ * <tsc> <level> # <comment>
+ *
+ * The article below is a nice explanation of how this works :
+ * http://balau82.wordpress.com/2010/10/06/trace-and-profile-function-calls-with-gcc/
+ */
+
+#include <sys/time.h>
+#include <stdarg.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <haproxy/api.h>
+#include <haproxy/clock.h>
+#include <haproxy/tools.h>
+
+static FILE *log;
+static int level;
+static int disabled;
+static int fast_time;
+static int use_tsc;
+static struct timeval trace_now;
+static struct timeval *now_ptr;
+static char line[128]; /* more than enough for a message (9+1+6+1+3+1+18+1+1+18+1+1) */
+
+static int open_trace()
+{
+ const char *output = getenv("HAPROXY_TRACE");
+
+ if (!output)
+ output = "trace.out";
+
+ if (!*output || strcmp(output, "/dev/null") == 0) {
+ disabled = 1;
+ return 0;
+ }
+
+ log = fopen(output, "w");
+ if (!log)
+ log = stderr;
+
+ now_ptr = &date;
+ if (getenv("HAPROXY_TRACE_FAST") != NULL) {
+ fast_time = 1;
+ now_ptr = &trace_now;
+ }
+ if (getenv("HAPROXY_TRACE_TSC") != NULL) {
+ fast_time = 1;
+ use_tsc = 1;
+ }
+ return 1;
+}
+
+/* This function first divides the number by 100M then iteratively multiplies it
+ * by 100 (using adds and shifts). The trick is that dividing by 100M is equivalent
+ * to multiplying by 1/100M, which approximates to 1441151881/2^57. All local
+ * variables fit in registers on x86. This version outputs two digits per round.
+ * <min_pairs> indicates the minimum number of pairs of digits that have to be
+ * emitted, which might be left-padded with zeroes.
+ * It returns the pointer to the ending '\0'.
+ */
+static char *ultoad2(unsigned int x, char *out, int min_pairs)
+{
+ unsigned int q;
+ char *p = out;
+ int pos = 4;
+ unsigned long long y;
+
+ static const unsigned short bcd[100] = {
+ 0x3030, 0x3130, 0x3230, 0x3330, 0x3430, 0x3530, 0x3630, 0x3730, 0x3830, 0x3930,
+ 0x3031, 0x3131, 0x3231, 0x3331, 0x3431, 0x3531, 0x3631, 0x3731, 0x3831, 0x3931,
+ 0x3032, 0x3132, 0x3232, 0x3332, 0x3432, 0x3532, 0x3632, 0x3732, 0x3832, 0x3932,
+ 0x3033, 0x3133, 0x3233, 0x3333, 0x3433, 0x3533, 0x3633, 0x3733, 0x3833, 0x3933,
+ 0x3034, 0x3134, 0x3234, 0x3334, 0x3434, 0x3534, 0x3634, 0x3734, 0x3834, 0x3934,
+ 0x3035, 0x3135, 0x3235, 0x3335, 0x3435, 0x3535, 0x3635, 0x3735, 0x3835, 0x3935,
+ 0x3036, 0x3136, 0x3236, 0x3336, 0x3436, 0x3536, 0x3636, 0x3736, 0x3836, 0x3936,
+ 0x3037, 0x3137, 0x3237, 0x3337, 0x3437, 0x3537, 0x3637, 0x3737, 0x3837, 0x3937,
+ 0x3038, 0x3138, 0x3238, 0x3338, 0x3438, 0x3538, 0x3638, 0x3738, 0x3838, 0x3938,
+ 0x3039, 0x3139, 0x3239, 0x3339, 0x3439, 0x3539, 0x3639, 0x3739, 0x3839, 0x3939 };
+
+ y = x * 1441151881ULL; /* y>>57 will be the integer part of x/100M */
+ while (1) {
+ q = y >> 57;
+ /* Q is composed of the first digit in the lower byte and the second
+ * digit in the higher byte.
+ */
+ if (p != out || q > 9 || pos < min_pairs) {
+#if defined(__i386__) || defined(__x86_64__)
+ /* unaligned accesses are fast on x86 */
+ *(unsigned short *)p = bcd[q];
+ p += 2;
+#else
+ *(p++) = bcd[q];
+ *(p++) = bcd[q] >> 8;
+#endif
+ }
+ else if (q || !pos) {
+ /* only at most one digit */
+ *(p++) = bcd[q] >> 8;
+ }
+ if (--pos < 0)
+ break;
+
+ y &= 0x1FFFFFFFFFFFFFFULL; // remainder
+
+ if (sizeof(long) >= sizeof(long long)) {
+ /* shifting is preferred on 64-bit archs, while mult is faster on 32-bit.
+ * We multiply by 100 by doing *5, *5 and *4, all of which are trivial.
+ */
+ y += (y << 2);
+ y += (y << 2);
+ y <<= 2;
+ }
+ else
+ y *= 100;
+ }
+
+ *p = '\0';
+ return p;
+}
+
+/* Send <h> as hex into <out>. Returns the pointer to the ending '\0'. */
+static char *emit_hex(unsigned long h, char *out)
+{
+ static unsigned char hextab[16] = "0123456789abcdef";
+ int shift = sizeof(h) * 8 - 4;
+ unsigned int idx;
+
+ do {
+ idx = (h >> shift);
+ if (idx || !shift)
+ *out++ = hextab[idx & 15];
+ shift -= 4;
+ } while (shift >= 0);
+ *out = '\0';
+ return out;
+}
+
+static void make_line(void *from, void *to, int level, char dir, long ret)
+{
+ char *p = line;
+
+ if (unlikely(!log) && !open_trace())
+ return;
+
+ if (unlikely(!fast_time))
+ gettimeofday(now_ptr, NULL);
+
+#ifdef USE_SLOW_FPRINTF
+ if (!use_tsc)
+ fprintf(log, "%u.%06u %d %p %c %p\n",
+ (unsigned int)now_ptr->tv_sec,
+ (unsigned int)now_ptr->tv_usec,
+ level, from, dir, to);
+ else
+ fprintf(log, "%llx %d %p %c %p\n",
+ rdtsc(), level, from, dir, to);
+ return;
+#endif
+
+ if (unlikely(!use_tsc)) {
+ /* "%u.06u", tv_sec, tv_usec */
+ p = ultoad2(now_ptr->tv_sec, p, 0);
+ *p++ = '.';
+ p = ultoad2(now_ptr->tv_usec, p, 3);
+ } else {
+ /* "%08x%08x", high, low */
+ unsigned long long t = rdtsc();
+ if (sizeof(long) < sizeof(long long))
+ p = emit_hex((unsigned long)(t >> 32U), p);
+ p = emit_hex((unsigned long)(t), p);
+ }
+
+ /* " %u", level */
+ *p++ = ' ';
+ p = ultoad2(level, p, 0);
+
+ /* " %p", from */
+ *p++ = ' '; *p++ = '0'; *p++ = 'x';
+ p = emit_hex((unsigned long)from, p);
+
+ /* " %c", dir */
+ *p++ = ' '; *p++ = dir;
+
+ /* " %p", to */
+ *p++ = ' '; *p++ = '0'; *p++ = 'x';
+ p = emit_hex((unsigned long)to, p);
+
+ if (dir == '<') {
+ /* " %x", ret */
+ *p++ = ' '; *p++ = '0'; *p++ = 'x';
+ p = emit_hex(ret, p);
+ }
+
+ *p++ = '\n';
+
+ fwrite(line, p - line, 1, log);
+}
+
+/* These are the functions GCC calls */
+void __cyg_profile_func_enter(void *to, void *from)
+{
+ if (!disabled)
+ return make_line(from, to, ++level, '>', 0);
+}
+
+void __cyg_profile_func_exit(void *to, void *from)
+{
+ long ret = 0;
+
+#if defined(__x86_64__)
+ /* on x86_64, the return value (eax) is temporarily stored in ebx
+ * during the call to __cyg_profile_func_exit() so we can snoop it.
+ */
+ asm volatile("mov %%rbx, %0" : "=r"(ret));
+#endif
+ if (!disabled)
+ return make_line(from, to, level--, '<', ret);
+}
+
+/* the one adds comments in the trace above. The output format is :
+ * <timestamp> <level> # <string>
+ */
+__attribute__((format(printf, 1, 2)))
+void calltrace(char *fmt, ...)
+{
+ va_list ap;
+
+ if (unlikely(!log) && !open_trace())
+ return;
+
+ if (unlikely(!fast_time))
+ gettimeofday(now_ptr, NULL);
+
+ if (!use_tsc)
+ fprintf(log, "%u.%06u %d # ",
+ (unsigned int)now_ptr->tv_sec,
+ (unsigned int)now_ptr->tv_usec,
+ level + 1);
+ else
+ fprintf(log, "%llx %d # ",
+ rdtsc(), level + 1);
+
+ va_start(ap, fmt);
+ vfprintf(log, fmt, ap);
+ va_end(ap);
+ fputc('\n', log);
+ fflush(log);
+}
diff --git a/src/cbuf.c b/src/cbuf.c
new file mode 100644
index 0000000..b36bbeb
--- /dev/null
+++ b/src/cbuf.c
@@ -0,0 +1,59 @@
+/*
+ * Circular buffer management
+ *
+ * Copyright 2021 HAProxy Technologies, Frederic Lecaille <flecaill@haproxy.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <haproxy/list.h>
+#include <haproxy/pool.h>
+#include <haproxy/cbuf-t.h>
+
+DECLARE_POOL(pool_head_cbuf, "cbuf", sizeof(struct cbuf));
+
+/* Allocate and return a new circular buffer with <buf> as <sz> byte internal buffer
+ * if succeeded, NULL if not.
+ */
+struct cbuf *cbuf_new(unsigned char *buf, size_t sz)
+{
+ struct cbuf *cbuf;
+
+ cbuf = pool_alloc(pool_head_cbuf);
+ if (cbuf) {
+ cbuf->sz = sz;
+ cbuf->buf = buf;
+ cbuf->wr = 0;
+ cbuf->rd = 0;
+ }
+
+ return cbuf;
+}
+
+/* Free QUIC ring <cbuf> */
+void cbuf_free(struct cbuf *cbuf)
+{
+ if (!cbuf)
+ return;
+
+ pool_free(pool_head_cbuf, cbuf);
+}
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/src/cfgcond.c b/src/cfgcond.c
new file mode 100644
index 0000000..117cf6c
--- /dev/null
+++ b/src/cfgcond.c
@@ -0,0 +1,559 @@
+/*
+ * Configuration condition preprocessor
+ *
+ * Copyright 2000-2021 Willy Tarreau <w@1wt.eu>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <haproxy/api.h>
+#include <haproxy/arg.h>
+#include <haproxy/cfgcond.h>
+#include <haproxy/global.h>
+#include <haproxy/proto_tcp.h>
+#include <haproxy/tools.h>
+
+/* supported condition predicates */
+const struct cond_pred_kw cond_predicates[] = {
+ { "defined", CFG_PRED_DEFINED, ARG1(1, STR) },
+ { "feature", CFG_PRED_FEATURE, ARG1(1, STR) },
+ { "streq", CFG_PRED_STREQ, ARG2(2, STR, STR) },
+ { "strneq", CFG_PRED_STRNEQ, ARG2(2, STR, STR) },
+ { "strstr", CFG_PRED_STRSTR, ARG2(2, STR, STR) },
+ { "version_atleast", CFG_PRED_VERSION_ATLEAST, ARG1(1, STR) },
+ { "version_before", CFG_PRED_VERSION_BEFORE, ARG1(1, STR) },
+ { "openssl_version_atleast", CFG_PRED_OSSL_VERSION_ATLEAST, ARG1(1, STR) },
+ { "openssl_version_before", CFG_PRED_OSSL_VERSION_BEFORE, ARG1(1, STR) },
+ { "ssllib_name_startswith", CFG_PRED_SSLLIB_NAME_STARTSWITH, ARG1(1, STR) },
+ { "enabled", CFG_PRED_ENABLED, ARG1(1, STR) },
+ { NULL, CFG_PRED_NONE, 0 }
+};
+
+/* looks up a cond predicate matching the keyword in <str>, possibly followed
+ * by a parenthesis. Returns a pointer to it or NULL if not found.
+ */
+const struct cond_pred_kw *cfg_lookup_cond_pred(const char *str)
+{
+ const struct cond_pred_kw *ret;
+ int len = strcspn(str, " (");
+
+ for (ret = &cond_predicates[0]; ret->word; ret++) {
+ if (len != strlen(ret->word))
+ continue;
+ if (strncmp(str, ret->word, len) != 0)
+ continue;
+ return ret;
+ }
+ return NULL;
+}
+
+/* Frees <term> and its args. NULL is supported and does nothing. */
+void cfg_free_cond_term(struct cfg_cond_term *term)
+{
+ if (!term)
+ return;
+
+ if (term->type == CCTT_PAREN) {
+ cfg_free_cond_expr(term->expr);
+ term->expr = NULL;
+ }
+
+ free_args(term->args);
+ free(term->args);
+ free(term);
+}
+
+/* Parse an indirect input text as a possible config condition term.
+ * Returns <0 on parsing error, 0 if the parser is desynchronized, or >0 on
+ * success. <term> is allocated and filled with the parsed info, and <text>
+ * is updated on success to point to the first unparsed character, or is left
+ * untouched on failure. On success, the caller must free <term> using
+ * cfg_free_cond_term(). An error will be set in <err> on error, and only
+ * in this case. In this case the first bad character will be reported in
+ * <errptr>. <maxdepth> corresponds to the maximum recursion depth permitted,
+ * it is decremented on each recursive call and the parsing will fail one
+ * reaching <= 0.
+ */
+int cfg_parse_cond_term(const char **text, struct cfg_cond_term **term, char **err, const char **errptr, int maxdepth)
+{
+ struct cfg_cond_term *t;
+ const char *in = *text;
+ const char *end_ptr;
+ int err_arg;
+ int nbargs;
+ char *end;
+ long val;
+
+ while (*in == ' ' || *in == '\t')
+ in++;
+
+ if (!*in) /* empty term does not parse */
+ return 0;
+
+ *term = NULL;
+ if (maxdepth <= 0)
+ goto fail0;
+
+ t = *term = calloc(1, sizeof(**term));
+ if (!t) {
+ memprintf(err, "memory allocation error while parsing conditional expression '%s'", *text);
+ goto fail1;
+ }
+
+ t->type = CCTT_NONE;
+ t->args = NULL;
+ t->neg = 0;
+
+ /* !<term> negates the term. White spaces permitted */
+ while (*in == '!') {
+ t->neg = !t->neg;
+ do { in++; } while (*in == ' ' || *in == '\t');
+ }
+
+ val = strtol(in, &end, 0);
+ if (end != in) {
+ t->type = val ? CCTT_TRUE : CCTT_FALSE;
+ *text = end;
+ return 1;
+ }
+
+ /* Try to parse '(' EXPR ')' */
+ if (*in == '(') {
+ int ret;
+
+ t->type = CCTT_PAREN;
+ t->args = NULL;
+
+ do { in++; } while (*in == ' ' || *in == '\t');
+ ret = cfg_parse_cond_expr(&in, &t->expr, err, errptr, maxdepth - 1);
+ if (ret == -1)
+ goto fail2;
+ if (ret == 0)
+ goto fail0;
+
+ /* find the closing ')' */
+ while (*in == ' ' || *in == '\t')
+ in++;
+ if (*in != ')') {
+ memprintf(err, "expected ')' after conditional expression '%s'", *text);
+ goto fail1;
+ }
+ do { in++; } while (*in == ' ' || *in == '\t');
+ *text = in;
+ return 1;
+ }
+
+ /* below we'll likely all make_arg_list() so we must return only via
+ * the <done> label which frees the arg list.
+ */
+ t->pred = cfg_lookup_cond_pred(in);
+ if (t->pred) {
+ t->type = CCTT_PRED;
+ nbargs = make_arg_list(in + strlen(t->pred->word), -1,
+ t->pred->arg_mask, &t->args, err,
+ &end_ptr, &err_arg, NULL);
+ if (nbargs < 0) {
+ memprintf(err, "%s in argument %d of predicate '%s' used in conditional expression", *err, err_arg, t->pred->word);
+ if (errptr)
+ *errptr = end_ptr;
+ goto fail2;
+ }
+ *text = end_ptr;
+ return 1;
+ }
+
+ fail0:
+ memprintf(err, "unparsable conditional expression '%s'", *text);
+ fail1:
+ if (errptr)
+ *errptr = *text;
+ fail2:
+ cfg_free_cond_term(*term);
+ *term = NULL;
+ return -1;
+}
+
+/* evaluate a "enabled" expression. Only a subset of options are matched. It
+ * returns 1 if the option is enabled. 0 is returned is the option is not
+ * enabled or if it is not recognized.
+ */
+static int cfg_eval_cond_enabled(const char *str)
+{
+ if (strcmp(str, "POLL") == 0)
+ return !!(global.tune.options & GTUNE_USE_POLL);
+ else if (strcmp(str, "EPOLL") == 0)
+ return !!(global.tune.options & GTUNE_USE_EPOLL);
+ else if (strcmp(str, "KQUEUE") == 0)
+ return !!(global.tune.options & GTUNE_USE_EPOLL);
+ else if (strcmp(str, "EVPORTS") == 0)
+ return !!(global.tune.options & GTUNE_USE_EVPORTS);
+ else if (strcmp(str, "SPLICE") == 0)
+ return !!(global.tune.options & GTUNE_USE_SPLICE);
+ else if (strcmp(str, "GETADDRINFO") == 0)
+ return !!(global.tune.options & GTUNE_USE_GAI);
+ else if (strcmp(str, "REUSEPORT") == 0)
+ return !!(proto_tcpv4.flags & PROTO_F_REUSEPORT_SUPPORTED);
+ else if (strcmp(str, "FAST-FORWARD") == 0)
+ return !!(global.tune.options & GTUNE_USE_FAST_FWD);
+ else if (strcmp(str, "SERVER-SSL-VERIFY-NONE") == 0)
+ return !!(global.ssl_server_verify == SSL_SERVER_VERIFY_NONE);
+ return 0;
+}
+
+/* evaluate a condition term on a .if/.elif line. The condition was already
+ * parsed in <term>. Returns -1 on error (in which case err is filled with a
+ * message, and only in this case), 0 if the condition is false, 1 if it's
+ * true.
+ */
+int cfg_eval_cond_term(const struct cfg_cond_term *term, char **err)
+{
+ int ret = -1;
+
+ if (term->type == CCTT_FALSE)
+ ret = 0;
+ else if (term->type == CCTT_TRUE)
+ ret = 1;
+ else if (term->type == CCTT_PRED) {
+ /* here we know we have a valid predicate with valid arguments
+ * placed in term->args (which the caller will free).
+ */
+ switch (term->pred->prd) {
+ case CFG_PRED_DEFINED: // checks if arg exists as an environment variable
+ ret = getenv(term->args[0].data.str.area) != NULL;
+ break;
+
+ case CFG_PRED_FEATURE: { // checks if the arg matches an enabled feature
+ const char *p;
+
+ ret = 0; // assume feature not found
+ for (p = build_features; (p = strstr(p, term->args[0].data.str.area)); p++) {
+ if (p > build_features &&
+ (p[term->args[0].data.str.data] == ' ' ||
+ p[term->args[0].data.str.data] == 0)) {
+ if (*(p-1) == '+') { // e.g. "+OPENSSL"
+ ret = 1;
+ break;
+ }
+ else if (*(p-1) == '-') { // e.g. "-OPENSSL"
+ ret = 0;
+ break;
+ }
+ /* it was a sub-word, let's restart from next place */
+ }
+ }
+ break;
+ }
+ case CFG_PRED_STREQ: // checks if the two arg are equal
+ ret = strcmp(term->args[0].data.str.area, term->args[1].data.str.area) == 0;
+ break;
+
+ case CFG_PRED_STRNEQ: // checks if the two arg are different
+ ret = strcmp(term->args[0].data.str.area, term->args[1].data.str.area) != 0;
+ break;
+
+ case CFG_PRED_STRSTR: // checks if the 2nd arg is found in the first one
+ ret = strstr(term->args[0].data.str.area, term->args[1].data.str.area) != NULL;
+ break;
+
+ case CFG_PRED_VERSION_ATLEAST: // checks if the current version is at least this one
+ ret = compare_current_version(term->args[0].data.str.area) <= 0;
+ break;
+
+ case CFG_PRED_VERSION_BEFORE: // checks if the current version is older than this one
+ ret = compare_current_version(term->args[0].data.str.area) > 0;
+ break;
+
+ case CFG_PRED_OSSL_VERSION_ATLEAST: { // checks if the current openssl version is at least this one
+ int opensslret = openssl_compare_current_version(term->args[0].data.str.area);
+
+ if (opensslret < -1) /* can't parse the string or no openssl available */
+ ret = -1;
+ else
+ ret = opensslret <= 0;
+ break;
+ }
+ case CFG_PRED_OSSL_VERSION_BEFORE: { // checks if the current openssl version is older than this one
+ int opensslret = openssl_compare_current_version(term->args[0].data.str.area);
+
+ if (opensslret < -1) /* can't parse the string or no openssl available */
+ ret = -1;
+ else
+ ret = opensslret > 0;
+ break;
+ }
+ case CFG_PRED_SSLLIB_NAME_STARTSWITH: { // checks if the current SSL library's name starts with a specified string (can be used to distinguish OpenSSL from LibreSSL or BoringSSL)
+ ret = openssl_compare_current_name(term->args[0].data.str.area) == 0;
+ break;
+ }
+ case CFG_PRED_ENABLED: { // checks if the arg matches on a subset of enabled options
+ ret = cfg_eval_cond_enabled(term->args[0].data.str.area) != 0;
+ break;
+ }
+ default:
+ memprintf(err, "internal error: unhandled conditional expression predicate '%s'", term->pred->word);
+ break;
+ }
+ }
+ else if (term->type == CCTT_PAREN) {
+ ret = cfg_eval_cond_expr(term->expr, err);
+ }
+ else {
+ memprintf(err, "internal error: unhandled condition term type %d", (int)term->type);
+ }
+
+ if (ret >= 0 && term->neg)
+ ret = !ret;
+ return ret;
+}
+
+
+/* Frees <expr> and its terms and args. NULL is supported and does nothing. */
+void cfg_free_cond_and(struct cfg_cond_and *expr)
+{
+ struct cfg_cond_and *prev;
+
+ while (expr) {
+ cfg_free_cond_term(expr->left);
+ prev = expr;
+ expr = expr->right;
+ free(prev);
+ }
+}
+
+/* Frees <expr> and its terms and args. NULL is supported and does nothing. */
+void cfg_free_cond_expr(struct cfg_cond_expr *expr)
+{
+ struct cfg_cond_expr *prev;
+
+ while (expr) {
+ cfg_free_cond_and(expr->left);
+ prev = expr;
+ expr = expr->right;
+ free(prev);
+ }
+}
+
+/* Parse an indirect input text as a possible config condition sub-expr.
+ * Returns <0 on parsing error, 0 if the parser is desynchronized, or >0 on
+ * success. <expr> is filled with the parsed info, and <text> is updated on
+ * success to point to the first unparsed character, or is left untouched
+ * on failure. On success, the caller will have to free all lower-level
+ * allocated structs using cfg_free_cond_expr(). An error will be set in
+ * <err> on error, and only in this case. In this case the first bad
+ * character will be reported in <errptr>. <maxdepth> corresponds to the
+ * maximum recursion depth permitted, it is decremented on each recursive
+ * call and the parsing will fail one reaching <= 0.
+ */
+int cfg_parse_cond_and(const char **text, struct cfg_cond_and **expr, char **err, const char **errptr, int maxdepth)
+{
+ struct cfg_cond_and *e;
+ const char *in = *text;
+ int ret = -1;
+
+ if (!*in) /* empty expr does not parse */
+ return 0;
+
+ *expr = NULL;
+ if (maxdepth <= 0) {
+ memprintf(err, "unparsable conditional sub-expression '%s'", in);
+ if (errptr)
+ *errptr = in;
+ goto done;
+ }
+
+ e = *expr = calloc(1, sizeof(**expr));
+ if (!e) {
+ memprintf(err, "memory allocation error while parsing conditional expression '%s'", *text);
+ goto done;
+ }
+
+ ret = cfg_parse_cond_term(&in, &e->left, err, errptr, maxdepth - 1);
+ if (ret == -1) // parse error, error already reported
+ goto done;
+
+ if (ret == 0) {
+ /* ret == 0, no other way to parse this */
+ memprintf(err, "unparsable conditional sub-expression '%s'", in);
+ if (errptr)
+ *errptr = in;
+ ret = -1;
+ goto done;
+ }
+
+ /* ret=1, we have a term in the left hand set */
+
+ /* find an optional '&&' */
+ while (*in == ' ' || *in == '\t')
+ in++;
+
+ *text = in;
+ if (in[0] != '&' || in[1] != '&')
+ goto done;
+
+ /* we have a '&&', let's parse the right handset's subexp */
+ in += 2;
+ while (*in == ' ' || *in == '\t')
+ in++;
+
+ ret = cfg_parse_cond_and(&in, &e->right, err, errptr, maxdepth - 1);
+ if (ret > 0)
+ *text = in;
+ done:
+ if (ret < 0) {
+ cfg_free_cond_and(*expr);
+ *expr = NULL;
+ }
+ return ret;
+}
+
+/* Parse an indirect input text as a possible config condition term.
+ * Returns <0 on parsing error, 0 if the parser is desynchronized, or >0 on
+ * success. <expr> is filled with the parsed info, and <text> is updated on
+ * success to point to the first unparsed character, or is left untouched
+ * on failure. On success, the caller will have to free all lower-level
+ * allocated structs using cfg_free_cond_expr(). An error will be set in
+ * <err> on error, and only in this case. In this case the first bad
+ * character will be reported in <errptr>. <maxdepth> corresponds to the
+ * maximum recursion depth permitted, it is decremented on each recursive call
+ * and the parsing will fail one reaching <= 0.
+ */
+int cfg_parse_cond_expr(const char **text, struct cfg_cond_expr **expr, char **err, const char **errptr, int maxdepth)
+{
+ struct cfg_cond_expr *e;
+ const char *in = *text;
+ int ret = -1;
+
+ if (!*in) /* empty expr does not parse */
+ return 0;
+
+ *expr = NULL;
+ if (maxdepth <= 0) {
+ memprintf(err, "unparsable conditional expression '%s'", in);
+ if (errptr)
+ *errptr = in;
+ goto done;
+ }
+
+ e = *expr = calloc(1, sizeof(**expr));
+ if (!e) {
+ memprintf(err, "memory allocation error while parsing conditional expression '%s'", *text);
+ goto done;
+ }
+
+ ret = cfg_parse_cond_and(&in, &e->left, err, errptr, maxdepth - 1);
+ if (ret == -1) // parse error, error already reported
+ goto done;
+
+ if (ret == 0) {
+ /* ret == 0, no other way to parse this */
+ memprintf(err, "unparsable conditional expression '%s'", in);
+ if (errptr)
+ *errptr = in;
+ ret = -1;
+ goto done;
+ }
+
+ /* ret=1, we have a sub-expr in the left hand set */
+
+ /* find an optional '||' */
+ while (*in == ' ' || *in == '\t')
+ in++;
+
+ *text = in;
+ if (in[0] != '|' || in[1] != '|')
+ goto done;
+
+ /* we have a '||', let's parse the right handset's subexp */
+ in += 2;
+ while (*in == ' ' || *in == '\t')
+ in++;
+
+ ret = cfg_parse_cond_expr(&in, &e->right, err, errptr, maxdepth - 1);
+ if (ret > 0)
+ *text = in;
+ done:
+ if (ret < 0) {
+ cfg_free_cond_expr(*expr);
+ *expr = NULL;
+ }
+ return ret;
+}
+
+/* evaluate an sub-expression on a .if/.elif line. The expression is valid and
+ * was already parsed in <expr>. Returns -1 on error (in which case err is
+ * filled with a message, and only in this case), 0 if the condition is false,
+ * 1 if it's true.
+ */
+int cfg_eval_cond_and(struct cfg_cond_and *expr, char **err)
+{
+ int ret;
+
+ /* AND: loop on terms and sub-exp's terms as long as they're TRUE
+ * (stop on FALSE and ERROR).
+ */
+ while ((ret = cfg_eval_cond_term(expr->left, err)) > 0 && expr->right)
+ expr = expr->right;
+ return ret;
+}
+
+/* evaluate an expression on a .if/.elif line. The expression is valid and was
+ * already parsed in <expr>. Returns -1 on error (in which case err is filled
+ * with a message, and only in this case), 0 if the condition is false, 1 if
+ * it's true.
+ */
+int cfg_eval_cond_expr(struct cfg_cond_expr *expr, char **err)
+{
+ int ret;
+
+ /* OR: loop on sub-exps as long as they're FALSE (stop on TRUE and ERROR) */
+ while ((ret = cfg_eval_cond_and(expr->left, err)) == 0 && expr->right)
+ expr = expr->right;
+ return ret;
+}
+
+/* evaluate a condition on a .if/.elif line. The condition is already tokenized
+ * in <err>. Returns -1 on error (in which case err is filled with a message,
+ * and only in this case), 0 if the condition is false, 1 if it's true. If
+ * <errptr> is not NULL, it's set to the first invalid character on error.
+ */
+int cfg_eval_condition(char **args, char **err, const char **errptr)
+{
+ struct cfg_cond_expr *expr = NULL;
+ const char *text = args[0];
+ int ret = -1;
+
+ if (!*text) /* note: empty = false */
+ return 0;
+
+ ret = cfg_parse_cond_expr(&text, &expr, err, errptr, MAX_CFG_RECURSION);
+ if (ret != 0) {
+ if (ret == -1) // parse error, error already reported
+ goto done;
+ while (*text == ' ' || *text == '\t')
+ text++;
+
+ if (*text) {
+ ret = -1;
+ memprintf(err, "unexpected character '%c' at the end of conditional expression '%s'",
+ *text, args[0]);
+ goto fail;
+ }
+
+ ret = cfg_eval_cond_expr(expr, err);
+ goto done;
+ }
+
+ /* ret == 0, no other way to parse this */
+ ret = -1;
+ memprintf(err, "unparsable conditional expression '%s'", args[0]);
+ fail:
+ if (errptr)
+ *errptr = text;
+ done:
+ cfg_free_cond_expr(expr);
+ return ret;
+}
diff --git a/src/cfgdiag.c b/src/cfgdiag.c
new file mode 100644
index 0000000..f8e4a9e
--- /dev/null
+++ b/src/cfgdiag.c
@@ -0,0 +1,97 @@
+#include <stdarg.h>
+#include <stdlib.h>
+
+#include <import/ebistree.h>
+
+#include <haproxy/cfgdiag.h>
+#include <haproxy/log.h>
+#include <haproxy/proxy.h>
+#include <haproxy/server.h>
+
+/* Use this function to emit diagnostic.
+ * This can be used as a shortcut to set value pointed by <ret> to 1 at the
+ * same time.
+ */
+static inline void diag_warning(int *ret, char *fmt, ...)
+{
+ va_list argp;
+
+ va_start(argp, fmt);
+ *ret = 1;
+ _ha_vdiag_warning(fmt, argp);
+ va_end(argp);
+}
+
+/* Use this for dynamic allocation in diagnostics.
+ * In case of allocation failure, this will immediately terminates haproxy.
+ */
+static inline void *diag_alloc(size_t size)
+{
+ void *out = NULL;
+
+ if (!(out = malloc(size))) {
+ fprintf(stderr, "out of memory\n");
+ exit(1);
+ }
+
+ return out;
+}
+
+/* Checks that two servers from the same backend does not share the same cookie
+ * value. Backup servers are not taken into account as it can be quite common to
+ * share cookie values in this case.
+ */
+static void check_server_cookies(int *ret)
+{
+ struct cookie_entry {
+ struct ebpt_node node;
+ };
+
+ struct proxy *px;
+ struct server *srv;
+
+ struct eb_root cookies_tree = EB_ROOT_UNIQUE;
+ struct ebpt_node *cookie_node;
+ struct cookie_entry *cookie_entry;
+ struct ebpt_node *node;
+
+ for (px = proxies_list; px; px = px->next) {
+ for (srv = px->srv; srv; srv = srv->next) {
+ /* do not take into account backup servers */
+ if (!srv->cookie || (srv->flags & SRV_F_BACKUP))
+ continue;
+
+ cookie_node = ebis_lookup(&cookies_tree, srv->cookie);
+ if (cookie_node) {
+ diag_warning(ret, "parsing [%s:%d] : 'server %s' : same cookie value is set for a previous non-backup server in the same backend, it may break connection persistence\n",
+ srv->conf.file, srv->conf.line, srv->id);
+ continue;
+ }
+
+ cookie_entry = diag_alloc(sizeof(*cookie_entry));
+ cookie_entry->node.key = srv->cookie;
+ ebis_insert(&cookies_tree, &cookie_entry->node);
+ }
+
+ /* clear the tree and free its entries */
+ while ((node = ebpt_first(&cookies_tree))) {
+ cookie_entry = ebpt_entry(node, struct cookie_entry, node);
+ eb_delete(&node->node);
+ free(cookie_entry);
+ }
+ }
+}
+
+/* Placeholder to execute various diagnostic checks after the configuration file
+ * has been fully parsed. It will output a warning for each diagnostic found.
+ *
+ * Returns 0 if no diagnostic message has been found else 1.
+ */
+int cfg_run_diagnostics()
+{
+ int ret = 0;
+
+ check_server_cookies(&ret);
+
+ return ret;
+}
diff --git a/src/cfgparse-global.c b/src/cfgparse-global.c
new file mode 100644
index 0000000..f31e7a0
--- /dev/null
+++ b/src/cfgparse-global.c
@@ -0,0 +1,1396 @@
+#define _GNU_SOURCE /* for cpu_set_t from haproxy/cpuset.h */
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <netdb.h>
+#include <ctype.h>
+#include <pwd.h>
+#include <grp.h>
+#include <errno.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <unistd.h>
+
+#include <import/sha1.h>
+
+#include <haproxy/buf.h>
+#include <haproxy/cfgparse.h>
+#ifdef USE_CPU_AFFINITY
+#include <haproxy/cpuset.h>
+#endif
+#include <haproxy/compression.h>
+#include <haproxy/global.h>
+#include <haproxy/log.h>
+#include <haproxy/peers.h>
+#include <haproxy/protocol.h>
+#include <haproxy/tools.h>
+
+int cluster_secret_isset;
+
+/* some keywords that are still being parsed using strcmp() and are not
+ * registered anywhere. They are used as suggestions for mistyped words.
+ */
+static const char *common_kw_list[] = {
+ "global", "daemon", "master-worker", "noepoll", "nokqueue",
+ "noevports", "nopoll", "busy-polling", "set-dumpable",
+ "insecure-fork-wanted", "insecure-setuid-wanted", "nosplice",
+ "nogetaddrinfo", "noreuseport", "quiet", "zero-warning",
+ "tune.runqueue-depth", "tune.maxpollevents", "tune.maxaccept",
+ "tune.recv_enough", "tune.buffers.limit",
+ "tune.buffers.reserve", "tune.bufsize", "tune.maxrewrite",
+ "tune.idletimer", "tune.rcvbuf.client", "tune.rcvbuf.server",
+ "tune.sndbuf.client", "tune.sndbuf.server", "tune.pipesize",
+ "tune.http.cookielen", "tune.http.logurilen", "tune.http.maxhdr",
+ "tune.comp.maxlevel", "tune.pattern.cache-size",
+ "tune.fast-forward", "uid", "gid",
+ "external-check", "user", "group", "nbproc", "maxconn",
+ "ssl-server-verify", "maxconnrate", "maxsessrate", "maxsslrate",
+ "maxcomprate", "maxpipes", "maxzlibmem", "maxcompcpuusage", "ulimit-n",
+ "chroot", "description", "node", "pidfile", "unix-bind", "log",
+ "log-send-hostname", "server-state-base", "server-state-file",
+ "log-tag", "spread-checks", "max-spread-checks", "cpu-map", "setenv",
+ "presetenv", "unsetenv", "resetenv", "strict-limits", "localpeer",
+ "numa-cpu-mapping", "defaults", "listen", "frontend", "backend",
+ "peers", "resolvers", "cluster-secret", "no-quic", "limited-quic",
+ NULL /* must be last */
+};
+
+/*
+ * parse a line in a <global> section. Returns the error code, 0 if OK, or
+ * any combination of :
+ * - ERR_ABORT: must abort ASAP
+ * - ERR_FATAL: we can continue parsing but not start the service
+ * - ERR_WARN: a warning has been emitted
+ * - ERR_ALERT: an alert has been emitted
+ * Only the two first ones can stop processing, the two others are just
+ * indicators.
+ */
+int cfg_parse_global(const char *file, int linenum, char **args, int kwm)
+{
+ int err_code = 0;
+ char *errmsg = NULL;
+
+ if (strcmp(args[0], "global") == 0) { /* new section */
+ /* no option, nothing special to do */
+ alertif_too_many_args(0, file, linenum, args, &err_code);
+ goto out;
+ }
+ else if (strcmp(args[0], "expose-experimental-directives") == 0) {
+ experimental_directives_allowed = 1;
+ }
+ else if (strcmp(args[0], "daemon") == 0) {
+ if (alertif_too_many_args(0, file, linenum, args, &err_code))
+ goto out;
+ global.mode |= MODE_DAEMON;
+ }
+ else if (strcmp(args[0], "master-worker") == 0) {
+ if (alertif_too_many_args(1, file, linenum, args, &err_code))
+ goto out;
+ if (*args[1]) {
+ if (strcmp(args[1], "no-exit-on-failure") == 0) {
+ global.tune.options |= GTUNE_NOEXIT_ONFAILURE;
+ } else {
+ ha_alert("parsing [%s:%d] : '%s' only supports 'no-exit-on-failure' option.\n", file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ }
+ global.mode |= MODE_MWORKER;
+ }
+ else if (strcmp(args[0], "noepoll") == 0) {
+ if (alertif_too_many_args(0, file, linenum, args, &err_code))
+ goto out;
+ global.tune.options &= ~GTUNE_USE_EPOLL;
+ }
+ else if (strcmp(args[0], "nokqueue") == 0) {
+ if (alertif_too_many_args(0, file, linenum, args, &err_code))
+ goto out;
+ global.tune.options &= ~GTUNE_USE_KQUEUE;
+ }
+ else if (strcmp(args[0], "noevports") == 0) {
+ if (alertif_too_many_args(0, file, linenum, args, &err_code))
+ goto out;
+ global.tune.options &= ~GTUNE_USE_EVPORTS;
+ }
+ else if (strcmp(args[0], "nopoll") == 0) {
+ if (alertif_too_many_args(0, file, linenum, args, &err_code))
+ goto out;
+ global.tune.options &= ~GTUNE_USE_POLL;
+ }
+ else if (strcmp(args[0], "limited-quic") == 0) {
+ if (alertif_too_many_args(0, file, linenum, args, &err_code))
+ goto out;
+
+ global.tune.options |= GTUNE_LIMITED_QUIC;
+ }
+ else if (strcmp(args[0], "no-quic") == 0) {
+ if (alertif_too_many_args(0, file, linenum, args, &err_code))
+ goto out;
+
+ global.tune.options |= GTUNE_NO_QUIC;
+ }
+ else if (strcmp(args[0], "busy-polling") == 0) { /* "no busy-polling" or "busy-polling" */
+ if (alertif_too_many_args(0, file, linenum, args, &err_code))
+ goto out;
+ if (kwm == KWM_NO)
+ global.tune.options &= ~GTUNE_BUSY_POLLING;
+ else
+ global.tune.options |= GTUNE_BUSY_POLLING;
+ }
+ else if (strcmp(args[0], "set-dumpable") == 0) { /* "no set-dumpable" or "set-dumpable" */
+ if (alertif_too_many_args(0, file, linenum, args, &err_code))
+ goto out;
+ if (kwm == KWM_NO)
+ global.tune.options &= ~GTUNE_SET_DUMPABLE;
+ else
+ global.tune.options |= GTUNE_SET_DUMPABLE;
+ }
+ else if (strcmp(args[0], "h2-workaround-bogus-websocket-clients") == 0) { /* "no h2-workaround-bogus-websocket-clients" or "h2-workaround-bogus-websocket-clients" */
+ if (alertif_too_many_args(0, file, linenum, args, &err_code))
+ goto out;
+ if (kwm == KWM_NO)
+ global.tune.options &= ~GTUNE_DISABLE_H2_WEBSOCKET;
+ else
+ global.tune.options |= GTUNE_DISABLE_H2_WEBSOCKET;
+ }
+ else if (strcmp(args[0], "insecure-fork-wanted") == 0) { /* "no insecure-fork-wanted" or "insecure-fork-wanted" */
+ if (alertif_too_many_args(0, file, linenum, args, &err_code))
+ goto out;
+ if (kwm == KWM_NO)
+ global.tune.options &= ~GTUNE_INSECURE_FORK;
+ else
+ global.tune.options |= GTUNE_INSECURE_FORK;
+ }
+ else if (strcmp(args[0], "insecure-setuid-wanted") == 0) { /* "no insecure-setuid-wanted" or "insecure-setuid-wanted" */
+ if (alertif_too_many_args(0, file, linenum, args, &err_code))
+ goto out;
+ if (kwm == KWM_NO)
+ global.tune.options &= ~GTUNE_INSECURE_SETUID;
+ else
+ global.tune.options |= GTUNE_INSECURE_SETUID;
+ }
+ else if (strcmp(args[0], "nosplice") == 0) {
+ if (alertif_too_many_args(0, file, linenum, args, &err_code))
+ goto out;
+ global.tune.options &= ~GTUNE_USE_SPLICE;
+ }
+ else if (strcmp(args[0], "nogetaddrinfo") == 0) {
+ if (alertif_too_many_args(0, file, linenum, args, &err_code))
+ goto out;
+ global.tune.options &= ~GTUNE_USE_GAI;
+ }
+ else if (strcmp(args[0], "noreuseport") == 0) {
+ if (alertif_too_many_args(0, file, linenum, args, &err_code))
+ goto out;
+ protocol_clrf_all(PROTO_F_REUSEPORT_SUPPORTED);
+ }
+ else if (strcmp(args[0], "quiet") == 0) {
+ if (alertif_too_many_args(0, file, linenum, args, &err_code))
+ goto out;
+ global.mode |= MODE_QUIET;
+ }
+ else if (strcmp(args[0], "zero-warning") == 0) {
+ if (alertif_too_many_args(0, file, linenum, args, &err_code))
+ goto out;
+ global.mode |= MODE_ZERO_WARNING;
+ }
+ else if (strcmp(args[0], "tune.runqueue-depth") == 0) {
+ if (alertif_too_many_args(1, file, linenum, args, &err_code))
+ goto out;
+ if (global.tune.runqueue_depth != 0) {
+ ha_alert("parsing [%s:%d] : '%s' already specified. Continuing.\n", file, linenum, args[0]);
+ err_code |= ERR_ALERT;
+ goto out;
+ }
+ if (*(args[1]) == 0) {
+ ha_alert("parsing [%s:%d] : '%s' expects an integer argument.\n", file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ global.tune.runqueue_depth = atol(args[1]);
+
+ }
+ else if (strcmp(args[0], "tune.maxpollevents") == 0) {
+ if (alertif_too_many_args(1, file, linenum, args, &err_code))
+ goto out;
+ if (global.tune.maxpollevents != 0) {
+ ha_alert("parsing [%s:%d] : '%s' already specified. Continuing.\n", file, linenum, args[0]);
+ err_code |= ERR_ALERT;
+ goto out;
+ }
+ if (*(args[1]) == 0) {
+ ha_alert("parsing [%s:%d] : '%s' expects an integer argument.\n", file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ global.tune.maxpollevents = atol(args[1]);
+ }
+ else if (strcmp(args[0], "tune.maxaccept") == 0) {
+ long max;
+
+ if (alertif_too_many_args(1, file, linenum, args, &err_code))
+ goto out;
+ if (global.tune.maxaccept != 0) {
+ ha_alert("parsing [%s:%d] : '%s' already specified. Continuing.\n", file, linenum, args[0]);
+ err_code |= ERR_ALERT;
+ goto out;
+ }
+ if (*(args[1]) == 0) {
+ ha_alert("parsing [%s:%d] : '%s' expects an integer argument.\n", file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ max = atol(args[1]);
+ if (/*max < -1 || */max > INT_MAX) {
+ ha_alert("parsing [%s:%d] : '%s' expects -1 or an integer from 0 to INT_MAX.\n", file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ global.tune.maxaccept = max;
+ }
+ else if (strcmp(args[0], "tune.chksize") == 0) {
+ ha_alert("parsing [%s:%d]: option '%s' is not supported any more (tune.bufsize is used instead).\n", file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ else if (strcmp(args[0], "tune.recv_enough") == 0) {
+ if (alertif_too_many_args(1, file, linenum, args, &err_code))
+ goto out;
+ if (*(args[1]) == 0) {
+ ha_alert("parsing [%s:%d] : '%s' expects an integer argument.\n", file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ global.tune.recv_enough = atol(args[1]);
+ }
+ else if (strcmp(args[0], "tune.buffers.limit") == 0) {
+ if (alertif_too_many_args(1, file, linenum, args, &err_code))
+ goto out;
+ if (*(args[1]) == 0) {
+ ha_alert("parsing [%s:%d] : '%s' expects an integer argument.\n", file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ global.tune.buf_limit = atol(args[1]);
+ if (global.tune.buf_limit) {
+ if (global.tune.buf_limit < 3)
+ global.tune.buf_limit = 3;
+ if (global.tune.buf_limit <= global.tune.reserved_bufs)
+ global.tune.buf_limit = global.tune.reserved_bufs + 1;
+ }
+ }
+ else if (strcmp(args[0], "tune.buffers.reserve") == 0) {
+ if (alertif_too_many_args(1, file, linenum, args, &err_code))
+ goto out;
+ if (*(args[1]) == 0) {
+ ha_alert("parsing [%s:%d] : '%s' expects an integer argument.\n", file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ global.tune.reserved_bufs = atol(args[1]);
+ if (global.tune.reserved_bufs < 2)
+ global.tune.reserved_bufs = 2;
+ if (global.tune.buf_limit && global.tune.buf_limit <= global.tune.reserved_bufs)
+ global.tune.buf_limit = global.tune.reserved_bufs + 1;
+ }
+ else if (strcmp(args[0], "tune.bufsize") == 0) {
+ if (alertif_too_many_args(1, file, linenum, args, &err_code))
+ goto out;
+ if (*(args[1]) == 0) {
+ ha_alert("parsing [%s:%d] : '%s' expects an integer argument.\n", file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ global.tune.bufsize = atol(args[1]);
+ /* round it up to support a two-pointer alignment at the end */
+ global.tune.bufsize = (global.tune.bufsize + 2 * sizeof(void *) - 1) & -(2 * sizeof(void *));
+ if (global.tune.bufsize <= 0) {
+ ha_alert("parsing [%s:%d] : '%s' expects a positive integer argument.\n", file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ }
+ else if (strcmp(args[0], "tune.maxrewrite") == 0) {
+ if (alertif_too_many_args(1, file, linenum, args, &err_code))
+ goto out;
+ if (*(args[1]) == 0) {
+ ha_alert("parsing [%s:%d] : '%s' expects an integer argument.\n", file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ global.tune.maxrewrite = atol(args[1]);
+ if (global.tune.maxrewrite < 0) {
+ ha_alert("parsing [%s:%d] : '%s' expects a positive integer argument.\n", file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ }
+ else if (strcmp(args[0], "tune.idletimer") == 0) {
+ unsigned int idle;
+ const char *res;
+
+ if (alertif_too_many_args(1, file, linenum, args, &err_code))
+ goto out;
+ if (*(args[1]) == 0) {
+ ha_alert("parsing [%s:%d] : '%s' expects a timer value between 0 and 65535 ms.\n", file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+
+ res = parse_time_err(args[1], &idle, TIME_UNIT_MS);
+ if (res == PARSE_TIME_OVER) {
+ ha_alert("parsing [%s:%d]: timer overflow in argument <%s> to <%s>, maximum value is 65535 ms.\n",
+ file, linenum, args[1], args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ else if (res == PARSE_TIME_UNDER) {
+ ha_alert("parsing [%s:%d]: timer underflow in argument <%s> to <%s>, minimum non-null value is 1 ms.\n",
+ file, linenum, args[1], args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ else if (res) {
+ ha_alert("parsing [%s:%d]: unexpected character '%c' in argument to <%s>.\n",
+ file, linenum, *res, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+
+ if (idle > 65535) {
+ ha_alert("parsing [%s:%d] : '%s' expects a timer value between 0 and 65535 ms.\n", file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ global.tune.idle_timer = idle;
+ }
+ else if (strcmp(args[0], "tune.rcvbuf.client") == 0) {
+ if (alertif_too_many_args(1, file, linenum, args, &err_code))
+ goto out;
+ if (global.tune.client_rcvbuf != 0) {
+ ha_alert("parsing [%s:%d] : '%s' already specified. Continuing.\n", file, linenum, args[0]);
+ err_code |= ERR_ALERT;
+ goto out;
+ }
+ if (*(args[1]) == 0) {
+ ha_alert("parsing [%s:%d] : '%s' expects an integer argument.\n", file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ global.tune.client_rcvbuf = atol(args[1]);
+ }
+ else if (strcmp(args[0], "tune.rcvbuf.server") == 0) {
+ if (alertif_too_many_args(1, file, linenum, args, &err_code))
+ goto out;
+ if (global.tune.server_rcvbuf != 0) {
+ ha_alert("parsing [%s:%d] : '%s' already specified. Continuing.\n", file, linenum, args[0]);
+ err_code |= ERR_ALERT;
+ goto out;
+ }
+ if (*(args[1]) == 0) {
+ ha_alert("parsing [%s:%d] : '%s' expects an integer argument.\n", file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ global.tune.server_rcvbuf = atol(args[1]);
+ }
+ else if (strcmp(args[0], "tune.sndbuf.client") == 0) {
+ if (alertif_too_many_args(1, file, linenum, args, &err_code))
+ goto out;
+ if (global.tune.client_sndbuf != 0) {
+ ha_alert("parsing [%s:%d] : '%s' already specified. Continuing.\n", file, linenum, args[0]);
+ err_code |= ERR_ALERT;
+ goto out;
+ }
+ if (*(args[1]) == 0) {
+ ha_alert("parsing [%s:%d] : '%s' expects an integer argument.\n", file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ global.tune.client_sndbuf = atol(args[1]);
+ }
+ else if (strcmp(args[0], "tune.sndbuf.server") == 0) {
+ if (alertif_too_many_args(1, file, linenum, args, &err_code))
+ goto out;
+ if (global.tune.server_sndbuf != 0) {
+ ha_alert("parsing [%s:%d] : '%s' already specified. Continuing.\n", file, linenum, args[0]);
+ err_code |= ERR_ALERT;
+ goto out;
+ }
+ if (*(args[1]) == 0) {
+ ha_alert("parsing [%s:%d] : '%s' expects an integer argument.\n", file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ global.tune.server_sndbuf = atol(args[1]);
+ }
+ else if (strcmp(args[0], "tune.pipesize") == 0) {
+ if (alertif_too_many_args(1, file, linenum, args, &err_code))
+ goto out;
+ if (*(args[1]) == 0) {
+ ha_alert("parsing [%s:%d] : '%s' expects an integer argument.\n", file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ global.tune.pipesize = atol(args[1]);
+ }
+ else if (strcmp(args[0], "tune.http.cookielen") == 0) {
+ if (alertif_too_many_args(1, file, linenum, args, &err_code))
+ goto out;
+ if (*(args[1]) == 0) {
+ ha_alert("parsing [%s:%d] : '%s' expects an integer argument.\n", file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ global.tune.cookie_len = atol(args[1]) + 1;
+ }
+ else if (strcmp(args[0], "tune.http.logurilen") == 0) {
+ if (alertif_too_many_args(1, file, linenum, args, &err_code))
+ goto out;
+ if (*(args[1]) == 0) {
+ ha_alert("parsing [%s:%d] : '%s' expects an integer argument.\n", file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ global.tune.requri_len = atol(args[1]) + 1;
+ }
+ else if (strcmp(args[0], "tune.http.maxhdr") == 0) {
+ if (alertif_too_many_args(1, file, linenum, args, &err_code))
+ goto out;
+ if (*(args[1]) == 0) {
+ ha_alert("parsing [%s:%d] : '%s' expects an integer argument.\n", file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ global.tune.max_http_hdr = atoi(args[1]);
+ if (global.tune.max_http_hdr < 1 || global.tune.max_http_hdr > 32767) {
+ ha_alert("parsing [%s:%d] : '%s' expects a numeric value between 1 and 32767\n",
+ file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ }
+ else if (strcmp(args[0], "tune.comp.maxlevel") == 0) {
+ if (alertif_too_many_args(1, file, linenum, args, &err_code))
+ goto out;
+ if (*args[1]) {
+ global.tune.comp_maxlevel = atoi(args[1]);
+ if (global.tune.comp_maxlevel < 1 || global.tune.comp_maxlevel > 9) {
+ ha_alert("parsing [%s:%d] : '%s' expects a numeric value between 1 and 9\n",
+ file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ } else {
+ ha_alert("parsing [%s:%d] : '%s' expects a numeric value between 1 and 9\n",
+ file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ }
+ else if (strcmp(args[0], "tune.pattern.cache-size") == 0) {
+ if (*args[1]) {
+ global.tune.pattern_cache = atoi(args[1]);
+ if (global.tune.pattern_cache < 0) {
+ ha_alert("parsing [%s:%d] : '%s' expects a positive numeric value\n",
+ file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ } else {
+ ha_alert("parsing [%s:%d] : '%s' expects a positive numeric value\n",
+ file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ }
+ else if (strcmp(args[0], "tune.disable-fast-forward") == 0) {
+ if (!experimental_directives_allowed) {
+ ha_alert("parsing [%s:%d] : '%s' directive is experimental, must be allowed via a global 'expose-experimental-directives'",
+ file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ mark_tainted(TAINTED_CONFIG_EXP_KW_DECLARED);
+
+ if (alertif_too_many_args(0, file, linenum, args, &err_code))
+ goto out;
+ global.tune.options &= ~GTUNE_USE_FAST_FWD;
+ }
+ else if (strcmp(args[0], "tune.disable-zero-copy-forwarding") == 0) {
+ if (alertif_too_many_args(0, file, linenum, args, &err_code))
+ goto out;
+ global.tune.no_zero_copy_fwd |= NO_ZERO_COPY_FWD;
+ }
+ else if (strcmp(args[0], "cluster-secret") == 0) {
+ blk_SHA_CTX sha1_ctx;
+ unsigned char sha1_out[20];
+
+ if (alertif_too_many_args(1, file, linenum, args, &err_code))
+ goto out;
+ if (*args[1] == 0) {
+ ha_alert("parsing [%s:%d] : expects an ASCII string argument.\n", file, linenum);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ if (cluster_secret_isset) {
+ ha_alert("parsing [%s:%d] : '%s' already specified. Continuing.\n", file, linenum, args[0]);
+ err_code |= ERR_ALERT;
+ goto out;
+ }
+
+ blk_SHA1_Init(&sha1_ctx);
+ blk_SHA1_Update(&sha1_ctx, args[1], strlen(args[1]));
+ blk_SHA1_Final(sha1_out, &sha1_ctx);
+ BUG_ON(sizeof sha1_out < sizeof global.cluster_secret);
+ memcpy(global.cluster_secret, sha1_out, sizeof global.cluster_secret);
+ cluster_secret_isset = 1;
+ }
+ else if (strcmp(args[0], "uid") == 0) {
+ if (alertif_too_many_args(1, file, linenum, args, &err_code))
+ goto out;
+ if (global.uid != 0) {
+ ha_alert("parsing [%s:%d] : user/uid already specified. Continuing.\n", file, linenum);
+ err_code |= ERR_ALERT;
+ goto out;
+ }
+ if (*(args[1]) == 0) {
+ ha_alert("parsing [%s:%d] : '%s' expects an integer argument.\n", file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ if (strl2irc(args[1], strlen(args[1]), &global.uid) != 0) {
+ ha_warning("parsing [%s:%d] : uid: string '%s' is not a number.\n | You might want to use the 'user' parameter to use a system user name.\n", file, linenum, args[1]);
+ err_code |= ERR_WARN;
+ goto out;
+ }
+
+ }
+ else if (strcmp(args[0], "gid") == 0) {
+ if (alertif_too_many_args(1, file, linenum, args, &err_code))
+ goto out;
+ if (global.gid != 0) {
+ ha_alert("parsing [%s:%d] : group/gid already specified. Continuing.\n", file, linenum);
+ err_code |= ERR_ALERT;
+ goto out;
+ }
+ if (*(args[1]) == 0) {
+ ha_alert("parsing [%s:%d] : '%s' expects an integer argument.\n", file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ if (strl2irc(args[1], strlen(args[1]), &global.gid) != 0) {
+ ha_warning("parsing [%s:%d] : gid: string '%s' is not a number.\n | You might want to use the 'group' parameter to use a system group name.\n", file, linenum, args[1]);
+ err_code |= ERR_WARN;
+ goto out;
+ }
+ }
+ else if (strcmp(args[0], "external-check") == 0) {
+ if (alertif_too_many_args(1, file, linenum, args, &err_code))
+ goto out;
+ global.external_check = 1;
+ if (strcmp(args[1], "preserve-env") == 0) {
+ global.external_check = 2;
+ } else if (*args[1]) {
+ ha_alert("parsing [%s:%d] : '%s' only supports 'preserve-env' as an argument, found '%s'.\n", file, linenum, args[0], args[1]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ }
+ /* user/group name handling */
+ else if (strcmp(args[0], "user") == 0) {
+ struct passwd *ha_user;
+ if (alertif_too_many_args(1, file, linenum, args, &err_code))
+ goto out;
+ if (global.uid != 0) {
+ ha_alert("parsing [%s:%d] : user/uid already specified. Continuing.\n", file, linenum);
+ err_code |= ERR_ALERT;
+ goto out;
+ }
+ errno = 0;
+ ha_user = getpwnam(args[1]);
+ if (ha_user != NULL) {
+ global.uid = (int)ha_user->pw_uid;
+ }
+ else {
+ ha_alert("parsing [%s:%d] : cannot find user id for '%s' (%d:%s)\n", file, linenum, args[1], errno, strerror(errno));
+ err_code |= ERR_ALERT | ERR_FATAL;
+ }
+ }
+ else if (strcmp(args[0], "group") == 0) {
+ struct group *ha_group;
+ if (alertif_too_many_args(1, file, linenum, args, &err_code))
+ goto out;
+ if (global.gid != 0) {
+ ha_alert("parsing [%s:%d] : gid/group was already specified. Continuing.\n", file, linenum);
+ err_code |= ERR_ALERT;
+ goto out;
+ }
+ errno = 0;
+ ha_group = getgrnam(args[1]);
+ if (ha_group != NULL) {
+ global.gid = (int)ha_group->gr_gid;
+ }
+ else {
+ ha_alert("parsing [%s:%d] : cannot find group id for '%s' (%d:%s)\n", file, linenum, args[1], errno, strerror(errno));
+ err_code |= ERR_ALERT | ERR_FATAL;
+ }
+ }
+ /* end of user/group name handling*/
+ else if (strcmp(args[0], "nbproc") == 0) {
+ ha_alert("parsing [%s:%d] : nbproc is not supported any more since HAProxy 2.5. Threads will automatically be used on multi-processor machines if available.\n", file, linenum);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ else if (strcmp(args[0], "maxconn") == 0) {
+ char *stop;
+
+ if (alertif_too_many_args(1, file, linenum, args, &err_code))
+ goto out;
+ if (global.maxconn != 0) {
+ ha_alert("parsing [%s:%d] : '%s' already specified. Continuing.\n", file, linenum, args[0]);
+ err_code |= ERR_ALERT;
+ goto out;
+ }
+ if (*(args[1]) == 0) {
+ ha_alert("parsing [%s:%d] : '%s' expects an integer argument.\n", file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ global.maxconn = strtol(args[1], &stop, 10);
+ if (*stop != '\0') {
+ ha_alert("parsing [%s:%d] : cannot parse '%s' value '%s', an integer is expected.\n", file, linenum, args[0], args[1]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+#ifdef SYSTEM_MAXCONN
+ if (global.maxconn > SYSTEM_MAXCONN && cfg_maxconn <= SYSTEM_MAXCONN) {
+ ha_alert("parsing [%s:%d] : maxconn value %d too high for this system.\nLimiting to %d. Please use '-n' to force the value.\n", file, linenum, global.maxconn, SYSTEM_MAXCONN);
+ global.maxconn = SYSTEM_MAXCONN;
+ err_code |= ERR_ALERT;
+ }
+#endif /* SYSTEM_MAXCONN */
+ }
+ else if (strcmp(args[0], "ssl-server-verify") == 0) {
+ if (alertif_too_many_args(1, file, linenum, args, &err_code))
+ goto out;
+ if (*(args[1]) == 0) {
+ ha_alert("parsing [%s:%d] : '%s' expects an integer argument.\n", file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ if (strcmp(args[1],"none") == 0)
+ global.ssl_server_verify = SSL_SERVER_VERIFY_NONE;
+ else if (strcmp(args[1],"required") == 0)
+ global.ssl_server_verify = SSL_SERVER_VERIFY_REQUIRED;
+ else {
+ ha_alert("parsing [%s:%d] : '%s' expects 'none' or 'required' as argument.\n", file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ }
+ else if (strcmp(args[0], "maxconnrate") == 0) {
+ if (alertif_too_many_args(1, file, linenum, args, &err_code))
+ goto out;
+ if (global.cps_lim != 0) {
+ ha_alert("parsing [%s:%d] : '%s' already specified. Continuing.\n", file, linenum, args[0]);
+ err_code |= ERR_ALERT;
+ goto out;
+ }
+ if (*(args[1]) == 0) {
+ ha_alert("parsing [%s:%d] : '%s' expects an integer argument.\n", file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ global.cps_lim = atol(args[1]);
+ }
+ else if (strcmp(args[0], "maxsessrate") == 0) {
+ if (alertif_too_many_args(1, file, linenum, args, &err_code))
+ goto out;
+ if (global.sps_lim != 0) {
+ ha_alert("parsing [%s:%d] : '%s' already specified. Continuing.\n", file, linenum, args[0]);
+ err_code |= ERR_ALERT;
+ goto out;
+ }
+ if (*(args[1]) == 0) {
+ ha_alert("parsing [%s:%d] : '%s' expects an integer argument.\n", file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ global.sps_lim = atol(args[1]);
+ }
+ else if (strcmp(args[0], "maxsslrate") == 0) {
+ if (alertif_too_many_args(1, file, linenum, args, &err_code))
+ goto out;
+ if (global.ssl_lim != 0) {
+ ha_alert("parsing [%s:%d] : '%s' already specified. Continuing.\n", file, linenum, args[0]);
+ err_code |= ERR_ALERT;
+ goto out;
+ }
+ if (*(args[1]) == 0) {
+ ha_alert("parsing [%s:%d] : '%s' expects an integer argument.\n", file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ global.ssl_lim = atol(args[1]);
+ }
+ else if (strcmp(args[0], "maxcomprate") == 0) {
+ if (alertif_too_many_args(1, file, linenum, args, &err_code))
+ goto out;
+ if (*(args[1]) == 0) {
+ ha_alert("parsing [%s:%d] : '%s' expects an integer argument in kb/s.\n", file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ global.comp_rate_lim = atoi(args[1]) * 1024;
+ }
+ else if (strcmp(args[0], "maxpipes") == 0) {
+ if (alertif_too_many_args(1, file, linenum, args, &err_code))
+ goto out;
+ if (global.maxpipes != 0) {
+ ha_alert("parsing [%s:%d] : '%s' already specified. Continuing.\n", file, linenum, args[0]);
+ err_code |= ERR_ALERT;
+ goto out;
+ }
+ if (*(args[1]) == 0) {
+ ha_alert("parsing [%s:%d] : '%s' expects an integer argument.\n", file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ global.maxpipes = atol(args[1]);
+ }
+ else if (strcmp(args[0], "maxzlibmem") == 0) {
+ if (alertif_too_many_args(1, file, linenum, args, &err_code))
+ goto out;
+ if (*(args[1]) == 0) {
+ ha_alert("parsing [%s:%d] : '%s' expects an integer argument.\n", file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ global.maxzlibmem = atol(args[1]) * 1024L * 1024L;
+ }
+ else if (strcmp(args[0], "maxcompcpuusage") == 0) {
+ if (alertif_too_many_args(1, file, linenum, args, &err_code))
+ goto out;
+ if (*(args[1]) == 0) {
+ ha_alert("parsing [%s:%d] : '%s' expects an integer argument between 0 and 100.\n", file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ compress_min_idle = 100 - atoi(args[1]);
+ if (compress_min_idle > 100) {
+ ha_alert("parsing [%s:%d] : '%s' expects an integer argument between 0 and 100.\n", file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ }
+ else if (strcmp(args[0], "fd-hard-limit") == 0) {
+ if (alertif_too_many_args(1, file, linenum, args, &err_code))
+ goto out;
+ if (global.fd_hard_limit != 0) {
+ ha_alert("parsing [%s:%d] : '%s' already specified. Continuing.\n", file, linenum, args[0]);
+ err_code |= ERR_ALERT;
+ goto out;
+ }
+ if (*(args[1]) == 0) {
+ ha_alert("parsing [%s:%d] : '%s' expects an integer argument.\n", file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ global.fd_hard_limit = atol(args[1]);
+ }
+ else if (strcmp(args[0], "ulimit-n") == 0) {
+ if (alertif_too_many_args(1, file, linenum, args, &err_code))
+ goto out;
+ if (global.rlimit_nofile != 0) {
+ ha_alert("parsing [%s:%d] : '%s' already specified. Continuing.\n", file, linenum, args[0]);
+ err_code |= ERR_ALERT;
+ goto out;
+ }
+ if (*(args[1]) == 0) {
+ ha_alert("parsing [%s:%d] : '%s' expects an integer argument.\n", file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ global.rlimit_nofile = atol(args[1]);
+ }
+ else if (strcmp(args[0], "chroot") == 0) {
+ if (alertif_too_many_args(1, file, linenum, args, &err_code))
+ goto out;
+ if (global.chroot != NULL) {
+ ha_alert("parsing [%s:%d] : '%s' already specified. Continuing.\n", file, linenum, args[0]);
+ err_code |= ERR_ALERT;
+ goto out;
+ }
+ if (*(args[1]) == 0) {
+ ha_alert("parsing [%s:%d] : '%s' expects a directory as an argument.\n", file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ global.chroot = strdup(args[1]);
+ }
+ else if (strcmp(args[0], "description") == 0) {
+ int i, len=0;
+ char *d;
+
+ if (!*args[1]) {
+ ha_alert("parsing [%s:%d]: '%s' expects a string argument.\n",
+ file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+
+ for (i = 1; *args[i]; i++)
+ len += strlen(args[i]) + 1;
+
+ if (global.desc)
+ free(global.desc);
+
+ global.desc = d = calloc(1, len);
+
+ d += snprintf(d, global.desc + len - d, "%s", args[1]);
+ for (i = 2; *args[i]; i++)
+ d += snprintf(d, global.desc + len - d, " %s", args[i]);
+ }
+ else if (strcmp(args[0], "node") == 0) {
+ int i;
+ char c;
+
+ if (alertif_too_many_args(1, file, linenum, args, &err_code))
+ goto out;
+
+ for (i=0; args[1][i]; i++) {
+ c = args[1][i];
+ if (!isupper((unsigned char)c) && !islower((unsigned char)c) &&
+ !isdigit((unsigned char)c) && c != '_' && c != '-' && c != '.')
+ break;
+ }
+
+ if (!i || args[1][i]) {
+ ha_alert("parsing [%s:%d]: '%s' requires valid node name - non-empty string"
+ " with digits(0-9), letters(A-Z, a-z), dot(.), hyphen(-) or underscode(_).\n",
+ file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+
+ if (global.node)
+ free(global.node);
+
+ global.node = strdup(args[1]);
+ }
+ else if (strcmp(args[0], "pidfile") == 0) {
+ if (alertif_too_many_args(1, file, linenum, args, &err_code))
+ goto out;
+ if (global.pidfile != NULL) {
+ ha_alert("parsing [%s:%d] : '%s' already specified. Continuing.\n", file, linenum, args[0]);
+ err_code |= ERR_ALERT;
+ goto out;
+ }
+ if (*(args[1]) == 0) {
+ ha_alert("parsing [%s:%d] : '%s' expects a file name as an argument.\n", file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ global.pidfile = strdup(args[1]);
+ }
+ else if (strcmp(args[0], "unix-bind") == 0) {
+ int cur_arg = 1;
+ while (*(args[cur_arg])) {
+ if (strcmp(args[cur_arg], "prefix") == 0) {
+ if (global.unix_bind.prefix != NULL) {
+ ha_alert("parsing [%s:%d] : unix-bind '%s' already specified. Continuing.\n", file, linenum, args[cur_arg]);
+ err_code |= ERR_ALERT;
+ cur_arg += 2;
+ continue;
+ }
+
+ if (*(args[cur_arg+1]) == 0) {
+ ha_alert("parsing [%s:%d] : unix_bind '%s' expects a path as an argument.\n", file, linenum, args[cur_arg]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ global.unix_bind.prefix = strdup(args[cur_arg+1]);
+ cur_arg += 2;
+ continue;
+ }
+
+ if (strcmp(args[cur_arg], "mode") == 0) {
+
+ global.unix_bind.ux.mode = strtol(args[cur_arg + 1], NULL, 8);
+ cur_arg += 2;
+ continue;
+ }
+
+ if (strcmp(args[cur_arg], "uid") == 0) {
+
+ global.unix_bind.ux.uid = atol(args[cur_arg + 1 ]);
+ cur_arg += 2;
+ continue;
+ }
+
+ if (strcmp(args[cur_arg], "gid") == 0) {
+
+ global.unix_bind.ux.gid = atol(args[cur_arg + 1 ]);
+ cur_arg += 2;
+ continue;
+ }
+
+ if (strcmp(args[cur_arg], "user") == 0) {
+ struct passwd *user;
+
+ user = getpwnam(args[cur_arg + 1]);
+ if (!user) {
+ ha_alert("parsing [%s:%d] : '%s' : '%s' unknown user.\n",
+ file, linenum, args[0], args[cur_arg + 1 ]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+
+ global.unix_bind.ux.uid = user->pw_uid;
+ cur_arg += 2;
+ continue;
+ }
+
+ if (strcmp(args[cur_arg], "group") == 0) {
+ struct group *group;
+
+ group = getgrnam(args[cur_arg + 1]);
+ if (!group) {
+ ha_alert("parsing [%s:%d] : '%s' : '%s' unknown group.\n",
+ file, linenum, args[0], args[cur_arg + 1 ]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+
+ global.unix_bind.ux.gid = group->gr_gid;
+ cur_arg += 2;
+ continue;
+ }
+
+ ha_alert("parsing [%s:%d] : '%s' only supports the 'prefix', 'mode', 'uid', 'gid', 'user' and 'group' options.\n",
+ file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ }
+ else if (strcmp(args[0], "log") == 0) { /* "no log" or "log ..." */
+ if (!parse_logger(args, &global.loggers, (kwm == KWM_NO), file, linenum, &errmsg)) {
+ ha_alert("parsing [%s:%d] : %s : %s\n", file, linenum, args[0], errmsg);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ }
+ else if (strcmp(args[0], "log-send-hostname") == 0) { /* set the hostname in syslog header */
+ char *name;
+
+ if (global.log_send_hostname != NULL) {
+ ha_alert("parsing [%s:%d] : '%s' already specified. Continuing.\n", file, linenum, args[0]);
+ err_code |= ERR_ALERT;
+ goto out;
+ }
+
+ if (*(args[1]))
+ name = args[1];
+ else
+ name = hostname;
+
+ free(global.log_send_hostname);
+ global.log_send_hostname = strdup(name);
+ }
+ else if (strcmp(args[0], "server-state-base") == 0) { /* path base where HAProxy can find server state files */
+ if (global.server_state_base != NULL) {
+ ha_alert("parsing [%s:%d] : '%s' already specified. Continuing.\n", file, linenum, args[0]);
+ err_code |= ERR_ALERT;
+ goto out;
+ }
+
+ if (!*(args[1])) {
+ ha_alert("parsing [%s:%d] : '%s' expects one argument: a directory path.\n", file, linenum, args[0]);
+ err_code |= ERR_FATAL;
+ goto out;
+ }
+
+ global.server_state_base = strdup(args[1]);
+ }
+ else if (strcmp(args[0], "server-state-file") == 0) { /* path to the file where HAProxy can load the server states */
+ if (global.server_state_file != NULL) {
+ ha_alert("parsing [%s:%d] : '%s' already specified. Continuing.\n", file, linenum, args[0]);
+ err_code |= ERR_ALERT;
+ goto out;
+ }
+
+ if (!*(args[1])) {
+ ha_alert("parsing [%s:%d] : '%s' expect one argument: a file path.\n", file, linenum, args[0]);
+ err_code |= ERR_FATAL;
+ goto out;
+ }
+
+ global.server_state_file = strdup(args[1]);
+ }
+ else if (strcmp(args[0], "log-tag") == 0) { /* tag to report to syslog */
+ if (alertif_too_many_args(1, file, linenum, args, &err_code))
+ goto out;
+ if (*(args[1]) == 0) {
+ ha_alert("parsing [%s:%d] : '%s' expects a tag for use in syslog.\n", file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ chunk_destroy(&global.log_tag);
+ chunk_initlen(&global.log_tag, strdup(args[1]), strlen(args[1]), strlen(args[1]));
+ if (b_orig(&global.log_tag) == NULL) {
+ chunk_destroy(&global.log_tag);
+ ha_alert("parsing [%s:%d]: cannot allocate memory for '%s'.\n", file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ }
+ else if (strcmp(args[0], "spread-checks") == 0) { /* random time between checks (0-50) */
+ if (alertif_too_many_args(1, file, linenum, args, &err_code))
+ goto out;
+ if (global.spread_checks != 0) {
+ ha_alert("parsing [%s:%d]: spread-checks already specified. Continuing.\n", file, linenum);
+ err_code |= ERR_ALERT;
+ goto out;
+ }
+ if (*(args[1]) == 0) {
+ ha_alert("parsing [%s:%d]: '%s' expects an integer argument (0..50).\n", file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ global.spread_checks = atol(args[1]);
+ if (global.spread_checks < 0 || global.spread_checks > 50) {
+ ha_alert("parsing [%s:%d]: 'spread-checks' needs a positive value in range 0..50.\n", file, linenum);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ }
+ }
+ else if (strcmp(args[0], "max-spread-checks") == 0) { /* maximum time between first and last check */
+ const char *err;
+ unsigned int val;
+
+ if (alertif_too_many_args(1, file, linenum, args, &err_code))
+ goto out;
+ if (*(args[1]) == 0) {
+ ha_alert("parsing [%s:%d]: '%s' expects an integer argument (0..50).\n", file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+
+ err = parse_time_err(args[1], &val, TIME_UNIT_MS);
+ if (err == PARSE_TIME_OVER) {
+ ha_alert("parsing [%s:%d]: timer overflow in argument <%s> to <%s>, maximum value is 2147483647 ms (~24.8 days).\n",
+ file, linenum, args[1], args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ }
+ else if (err == PARSE_TIME_UNDER) {
+ ha_alert("parsing [%s:%d]: timer underflow in argument <%s> to <%s>, minimum non-null value is 1 ms.\n",
+ file, linenum, args[1], args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ }
+ else if (err) {
+ ha_alert("parsing [%s:%d]: unsupported character '%c' in '%s' (wants an integer delay).\n", file, linenum, *err, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ }
+ global.max_spread_checks = val;
+ }
+ else if (strcmp(args[0], "cpu-map") == 0) {
+ /* map a process list to a CPU set */
+#ifdef USE_CPU_AFFINITY
+ char *slash;
+ unsigned long tgroup = 0, thread = 0;
+ int g, j, n, autoinc;
+ struct hap_cpuset cpus, cpus_copy;
+
+ if (!*args[1] || !*args[2]) {
+ ha_alert("parsing [%s:%d] : %s expects a thread group number "
+ " ('all', 'odd', 'even', a number from 1 to %d or a range), "
+ " followed by a list of CPU ranges with numbers from 0 to %d.\n",
+ file, linenum, args[0], LONGBITS, LONGBITS - 1);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+
+ if ((slash = strchr(args[1], '/')) != NULL)
+ *slash = 0;
+
+ /* note: we silently ignore thread group numbers over MAX_TGROUPS
+ * and threads over MAX_THREADS so as not to make configurations a
+ * pain to maintain.
+ */
+ if (parse_process_number(args[1], &tgroup, LONGBITS, &autoinc, &errmsg)) {
+ ha_alert("parsing [%s:%d] : %s : %s\n", file, linenum, args[0], errmsg);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+
+ if (slash) {
+ if (parse_process_number(slash+1, &thread, LONGBITS, NULL, &errmsg)) {
+ ha_alert("parsing [%s:%d] : %s : %s\n", file, linenum, args[0], errmsg);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ *slash = '/';
+ } else
+ thread = ~0UL; /* missing '/' = 'all' */
+
+ /* from now on, thread cannot be NULL anymore */
+
+ if (parse_cpu_set((const char **)args+2, &cpus, &errmsg)) {
+ ha_alert("parsing [%s:%d] : %s : %s\n", file, linenum, args[0], errmsg);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+
+ if (autoinc &&
+ my_popcountl(tgroup) != ha_cpuset_count(&cpus) &&
+ my_popcountl(thread) != ha_cpuset_count(&cpus)) {
+ ha_alert("parsing [%s:%d] : %s : TGROUP/THREAD range and CPU sets "
+ "must have the same size to be automatically bound\n",
+ file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+
+ /* we now have to deal with 3 real cases :
+ * cpu-map P-Q => mapping for whole tgroups, numbers P to Q
+ * cpu-map P-Q/1 => mapping of first thread of groups P to Q
+ * cpu-map P/T-U => mapping of threads T to U of tgroup P
+ */
+ /* first tgroup, iterate on threads. E.g. cpu-map 1/1-4 0-3 */
+ for (g = 0; g < MAX_TGROUPS; g++) {
+ /* No mapping for this tgroup */
+ if (!(tgroup & (1UL << g)))
+ continue;
+
+ ha_cpuset_assign(&cpus_copy, &cpus);
+
+ /* a thread set is specified, apply the
+ * CPU set to these threads.
+ */
+ for (j = n = 0; j < MAX_THREADS_PER_GROUP; j++) {
+ /* No mapping for this thread */
+ if (!(thread & (1UL << j)))
+ continue;
+
+ if (!autoinc)
+ ha_cpuset_assign(&cpu_map[g].thread[j], &cpus);
+ else {
+ ha_cpuset_zero(&cpu_map[g].thread[j]);
+ n = ha_cpuset_ffs(&cpus_copy) - 1;
+ ha_cpuset_clr(&cpus_copy, n);
+ ha_cpuset_set(&cpu_map[g].thread[j], n);
+ }
+ }
+ }
+#else
+ ha_alert("parsing [%s:%d] : '%s' is not enabled, please check build options for USE_CPU_AFFINITY.\n",
+ file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+#endif /* ! USE_CPU_AFFINITY */
+ }
+ else if (strcmp(args[0], "setenv") == 0 || strcmp(args[0], "presetenv") == 0) {
+ if (alertif_too_many_args(3, file, linenum, args, &err_code))
+ goto out;
+
+ if (*(args[2]) == 0) {
+ ha_alert("parsing [%s:%d]: '%s' expects a name and a value.\n", file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+
+ /* "setenv" overwrites, "presetenv" only sets if not yet set */
+ if (setenv(args[1], args[2], (args[0][0] == 's')) != 0) {
+ ha_alert("parsing [%s:%d]: '%s' failed on variable '%s' : %s.\n", file, linenum, args[0], args[1], strerror(errno));
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ }
+ else if (strcmp(args[0], "unsetenv") == 0) {
+ int arg;
+
+ if (*(args[1]) == 0) {
+ ha_alert("parsing [%s:%d]: '%s' expects at least one variable name.\n", file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+
+ for (arg = 1; *args[arg]; arg++) {
+ if (unsetenv(args[arg]) != 0) {
+ ha_alert("parsing [%s:%d]: '%s' failed on variable '%s' : %s.\n", file, linenum, args[0], args[arg], strerror(errno));
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ }
+ }
+ else if (strcmp(args[0], "resetenv") == 0) {
+ extern char **environ;
+ char **env = environ;
+
+ /* args contain variable names to keep, one per argument */
+ while (*env) {
+ int arg;
+
+ /* look for current variable in among all those we want to keep */
+ for (arg = 1; *args[arg]; arg++) {
+ if (strncmp(*env, args[arg], strlen(args[arg])) == 0 &&
+ (*env)[strlen(args[arg])] == '=')
+ break;
+ }
+
+ /* delete this variable */
+ if (!*args[arg]) {
+ char *delim = strchr(*env, '=');
+
+ if (!delim || delim - *env >= trash.size) {
+ ha_alert("parsing [%s:%d]: '%s' failed to unset invalid variable '%s'.\n", file, linenum, args[0], *env);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+
+ memcpy(trash.area, *env, delim - *env);
+ trash.area[delim - *env] = 0;
+
+ if (unsetenv(trash.area) != 0) {
+ ha_alert("parsing [%s:%d]: '%s' failed to unset variable '%s' : %s.\n", file, linenum, args[0], *env, strerror(errno));
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ }
+ else
+ env++;
+ }
+ }
+ else if (strcmp(args[0], "quick-exit") == 0) {
+ if (alertif_too_many_args(0, file, linenum, args, &err_code))
+ goto out;
+ global.tune.options |= GTUNE_QUICK_EXIT;
+ }
+ else if (strcmp(args[0], "strict-limits") == 0) { /* "no strict-limits" or "strict-limits" */
+ if (alertif_too_many_args(0, file, linenum, args, &err_code))
+ goto out;
+ if (kwm == KWM_NO)
+ global.tune.options &= ~GTUNE_STRICT_LIMITS;
+ }
+ else if (strcmp(args[0], "localpeer") == 0) {
+ if (alertif_too_many_args(1, file, linenum, args, &err_code))
+ goto out;
+
+ if (*(args[1]) == 0) {
+ ha_alert("parsing [%s:%d] : '%s' expects a name as an argument.\n",
+ file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+
+ if (global.localpeer_cmdline != 0) {
+ ha_warning("parsing [%s:%d] : '%s' ignored since it is already set by using the '-L' "
+ "command line argument.\n", file, linenum, args[0]);
+ err_code |= ERR_WARN;
+ goto out;
+ }
+
+ if (cfg_peers) {
+ ha_warning("parsing [%s:%d] : '%s' ignored since it is used after 'peers' section.\n",
+ file, linenum, args[0]);
+ err_code |= ERR_WARN;
+ goto out;
+ }
+
+ free(localpeer);
+ if ((localpeer = strdup(args[1])) == NULL) {
+ ha_alert("parsing [%s:%d]: cannot allocate memory for '%s'.\n",
+ file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ setenv("HAPROXY_LOCALPEER", localpeer, 1);
+ }
+ else if (strcmp(args[0], "numa-cpu-mapping") == 0) {
+ global.numa_cpu_mapping = (kwm == KWM_NO) ? 0 : 1;
+ }
+ else if (strcmp(args[0], "anonkey") == 0) {
+ long long tmp = 0;
+
+ if (*args[1] == 0) {
+ ha_alert("parsing [%s:%d]: a key is expected after '%s'.\n",
+ file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+
+ if (HA_ATOMIC_LOAD(&global.anon_key) == 0) {
+ tmp = atoll(args[1]);
+ if (tmp < 0 || tmp > UINT_MAX) {
+ ha_alert("parsing [%s:%d]: '%s' value must be within range %u-%u (was '%s').\n",
+ file, linenum, args[0], 0, UINT_MAX, args[1]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+
+ HA_ATOMIC_STORE(&global.anon_key, tmp);
+ }
+ }
+ else {
+ struct cfg_kw_list *kwl;
+ const char *best;
+ int index;
+ int rc;
+
+ list_for_each_entry(kwl, &cfg_keywords.list, list) {
+ for (index = 0; kwl->kw[index].kw != NULL; index++) {
+ if (kwl->kw[index].section != CFG_GLOBAL)
+ continue;
+ if (strcmp(kwl->kw[index].kw, args[0]) == 0) {
+ if (check_kw_experimental(&kwl->kw[index], file, linenum, &errmsg)) {
+ ha_alert("%s\n", errmsg);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+
+ rc = kwl->kw[index].parse(args, CFG_GLOBAL, NULL, NULL, file, linenum, &errmsg);
+ if (rc < 0) {
+ ha_alert("parsing [%s:%d] : %s\n", file, linenum, errmsg);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ }
+ else if (rc > 0) {
+ ha_warning("parsing [%s:%d] : %s\n", file, linenum, errmsg);
+ err_code |= ERR_WARN;
+ goto out;
+ }
+ goto out;
+ }
+ }
+ }
+
+ best = cfg_find_best_match(args[0], &cfg_keywords.list, CFG_GLOBAL, common_kw_list);
+ if (best)
+ ha_alert("parsing [%s:%d] : unknown keyword '%s' in '%s' section; did you mean '%s' maybe ?\n", file, linenum, args[0], cursection, best);
+ else
+ ha_alert("parsing [%s:%d] : unknown keyword '%s' in '%s' section\n", file, linenum, args[0], "global");
+ err_code |= ERR_ALERT | ERR_FATAL;
+ }
+
+ out:
+ free(errmsg);
+ return err_code;
+}
+
+static int cfg_parse_prealloc_fd(char **args, int section_type, struct proxy *curpx,
+ const struct proxy *defpx, const char *file, int line,
+ char **err)
+{
+ if (too_many_args(0, args, err, NULL))
+ return -1;
+
+ global.prealloc_fd = 1;
+
+ return 0;
+}
+
+static struct cfg_kw_list cfg_kws = {ILH, {
+ { CFG_GLOBAL, "prealloc-fd", cfg_parse_prealloc_fd },
+ { 0, NULL, NULL },
+}};
+
+INITCALL1(STG_REGISTER, cfg_register_keywords, &cfg_kws);
diff --git a/src/cfgparse-listen.c b/src/cfgparse-listen.c
new file mode 100644
index 0000000..4f88b77
--- /dev/null
+++ b/src/cfgparse-listen.c
@@ -0,0 +1,3073 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <netdb.h>
+#include <ctype.h>
+#include <pwd.h>
+#include <grp.h>
+#include <errno.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <unistd.h>
+
+#include <haproxy/acl.h>
+#include <haproxy/buf.h>
+#include <haproxy/capture-t.h>
+#include <haproxy/cfgparse.h>
+#include <haproxy/check.h>
+#include <haproxy/compression-t.h>
+#include <haproxy/connection.h>
+#include <haproxy/extcheck.h>
+#include <haproxy/http_ana.h>
+#include <haproxy/http_htx.h>
+#include <haproxy/http_ext.h>
+#include <haproxy/http_rules.h>
+#include <haproxy/listener.h>
+#include <haproxy/log.h>
+#include <haproxy/peers.h>
+#include <haproxy/protocol.h>
+#include <haproxy/proxy.h>
+#include <haproxy/sample.h>
+#include <haproxy/server.h>
+#include <haproxy/stats-t.h>
+#include <haproxy/stick_table.h>
+#include <haproxy/tcpcheck.h>
+#include <haproxy/tools.h>
+#include <haproxy/uri_auth.h>
+
+/* some keywords that are still being parsed using strcmp() and are not
+ * registered anywhere. They are used as suggestions for mistyped words.
+ */
+static const char *common_kw_list[] = {
+ "listen", "frontend", "backend", "defaults", "server",
+ "default-server", "server-template", "bind", "monitor-net",
+ "monitor-uri", "mode", "id", "description", "disabled", "enabled",
+ "acl", "dynamic-cookie-key", "cookie", "email-alert",
+ "persist", "appsession", "load-server-state-from-file",
+ "server-state-file-name", "max-session-srv-conns", "capture",
+ "retries", "http-request", "http-response", "http-after-response",
+ "http-send-name-header", "block", "redirect", "use_backend",
+ "use-server", "force-persist", "ignore-persist", "force-persist",
+ "stick-table", "stick", "stats", "option", "default_backend",
+ "http-reuse", "monitor", "transparent", "maxconn", "backlog",
+ "fullconn", "dispatch", "balance", "log-balance", "hash-type",
+ "hash-balance-factor", "unique-id-format", "unique-id-header",
+ "log-format", "log-format-sd", "log-tag", "log", "source", "usesrc",
+ "error-log-format",
+ NULL /* must be last */
+};
+
+static const char *common_options[] = {
+ "httpclose", "http-server-close", "http-keep-alive",
+ "redispatch", "httplog", "tcplog", "tcpka", "httpchk",
+ "ssl-hello-chk", "smtpchk", "pgsql-check", "redis-check",
+ "mysql-check", "ldap-check", "spop-check", "tcp-check",
+ "external-check", "forwardfor", "original-to", "forwarded",
+ NULL /* must be last */
+};
+
+/* Report a warning if a rule is placed after a 'tcp-request session' rule.
+ * Return 1 if the warning has been emitted, otherwise 0.
+ */
+int warnif_rule_after_tcp_sess(struct proxy *proxy, const char *file, int line, const char *arg)
+{
+ if (!LIST_ISEMPTY(&proxy->tcp_req.l5_rules)) {
+ ha_warning("parsing [%s:%d] : a '%s' rule placed after a 'tcp-request session' rule will still be processed before.\n",
+ file, line, arg);
+ return 1;
+ }
+ return 0;
+}
+
+/* Report a warning if a rule is placed after a 'tcp-request content' rule.
+ * Return 1 if the warning has been emitted, otherwise 0.
+ */
+int warnif_rule_after_tcp_cont(struct proxy *proxy, const char *file, int line, const char *arg)
+{
+ if (!LIST_ISEMPTY(&proxy->tcp_req.inspect_rules)) {
+ ha_warning("parsing [%s:%d] : a '%s' rule placed after a 'tcp-request content' rule will still be processed before.\n",
+ file, line, arg);
+ return 1;
+ }
+ return 0;
+}
+
+/* Report a warning if a rule is placed after a 'monitor fail' rule.
+ * Return 1 if the warning has been emitted, otherwise 0.
+ */
+int warnif_rule_after_monitor(struct proxy *proxy, const char *file, int line, const char *arg)
+{
+ if (!LIST_ISEMPTY(&proxy->mon_fail_cond)) {
+ ha_warning("parsing [%s:%d] : a '%s' rule placed after a 'monitor fail' rule will still be processed before.\n",
+ file, line, arg);
+ return 1;
+ }
+ return 0;
+}
+
+/* Report a warning if a rule is placed after an 'http_request' rule.
+ * Return 1 if the warning has been emitted, otherwise 0.
+ */
+int warnif_rule_after_http_req(struct proxy *proxy, const char *file, int line, const char *arg)
+{
+ if (!LIST_ISEMPTY(&proxy->http_req_rules)) {
+ ha_warning("parsing [%s:%d] : a '%s' rule placed after an 'http-request' rule will still be processed before.\n",
+ file, line, arg);
+ return 1;
+ }
+ return 0;
+}
+
+/* Report a warning if a rule is placed after a redirect rule.
+ * Return 1 if the warning has been emitted, otherwise 0.
+ */
+int warnif_rule_after_redirect(struct proxy *proxy, const char *file, int line, const char *arg)
+{
+ if (!LIST_ISEMPTY(&proxy->redirect_rules)) {
+ ha_warning("parsing [%s:%d] : a '%s' rule placed after a 'redirect' rule will still be processed before.\n",
+ file, line, arg);
+ return 1;
+ }
+ return 0;
+}
+
+/* Report a warning if a rule is placed after a 'use_backend' rule.
+ * Return 1 if the warning has been emitted, otherwise 0.
+ */
+int warnif_rule_after_use_backend(struct proxy *proxy, const char *file, int line, const char *arg)
+{
+ if (!LIST_ISEMPTY(&proxy->switching_rules)) {
+ ha_warning("parsing [%s:%d] : a '%s' rule placed after a 'use_backend' rule will still be processed before.\n",
+ file, line, arg);
+ return 1;
+ }
+ return 0;
+}
+
+/* Report a warning if a rule is placed after a 'use-server' rule.
+ * Return 1 if the warning has been emitted, otherwise 0.
+ */
+int warnif_rule_after_use_server(struct proxy *proxy, const char *file, int line, const char *arg)
+{
+ if (!LIST_ISEMPTY(&proxy->server_rules)) {
+ ha_warning("parsing [%s:%d] : a '%s' rule placed after a 'use-server' rule will still be processed before.\n",
+ file, line, arg);
+ return 1;
+ }
+ return 0;
+}
+
+/* report a warning if a redirect rule is dangerously placed */
+int warnif_misplaced_redirect(struct proxy *proxy, const char *file, int line, const char *arg)
+{
+ return warnif_rule_after_use_backend(proxy, file, line, arg) ||
+ warnif_rule_after_use_server(proxy, file, line, arg);
+}
+
+/* report a warning if an http-request rule is dangerously placed */
+int warnif_misplaced_http_req(struct proxy *proxy, const char *file, int line, const char *arg)
+{
+ return warnif_rule_after_redirect(proxy, file, line, arg) ||
+ warnif_misplaced_redirect(proxy, file, line, arg);
+}
+
+/* report a warning if a block rule is dangerously placed */
+int warnif_misplaced_monitor(struct proxy *proxy, const char *file, int line, const char *arg)
+{
+ return warnif_rule_after_http_req(proxy, file, line, arg) ||
+ warnif_misplaced_http_req(proxy, file, line, arg);
+}
+
+/* report a warning if a "tcp request content" rule is dangerously placed */
+int warnif_misplaced_tcp_cont(struct proxy *proxy, const char *file, int line, const char *arg)
+{
+ return warnif_rule_after_monitor(proxy, file, line, arg) ||
+ warnif_misplaced_monitor(proxy, file, line, arg);
+}
+
+/* report a warning if a "tcp request session" rule is dangerously placed */
+int warnif_misplaced_tcp_sess(struct proxy *proxy, const char *file, int line, const char *arg)
+{
+ return warnif_rule_after_tcp_cont(proxy, file, line, arg) ||
+ warnif_misplaced_tcp_cont(proxy, file, line, arg);
+}
+
+/* report a warning if a "tcp request connection" rule is dangerously placed */
+int warnif_misplaced_tcp_conn(struct proxy *proxy, const char *file, int line, const char *arg)
+{
+ return warnif_rule_after_tcp_sess(proxy, file, line, arg) ||
+ warnif_misplaced_tcp_sess(proxy, file, line, arg);
+}
+
+int cfg_parse_listen(const char *file, int linenum, char **args, int kwm)
+{
+ static struct proxy *curr_defproxy = NULL;
+ static struct proxy *last_defproxy = NULL;
+ const char *err;
+ int rc;
+ int err_code = 0;
+ struct acl_cond *cond = NULL;
+ char *errmsg = NULL;
+ struct bind_conf *bind_conf;
+
+ if (!last_defproxy) {
+ /* we need a default proxy and none was created yet */
+ last_defproxy = alloc_new_proxy("", PR_CAP_DEF|PR_CAP_LISTEN, &errmsg);
+
+ curr_defproxy = last_defproxy;
+ if (!last_defproxy) {
+ ha_alert("parsing [%s:%d] : %s\n", file, linenum, errmsg);
+ err_code |= ERR_ALERT | ERR_ABORT;
+ goto out;
+ }
+ }
+
+ if (strcmp(args[0], "listen") == 0)
+ rc = PR_CAP_LISTEN | PR_CAP_LB;
+ else if (strcmp(args[0], "frontend") == 0)
+ rc = PR_CAP_FE | PR_CAP_LB;
+ else if (strcmp(args[0], "backend") == 0)
+ rc = PR_CAP_BE | PR_CAP_LB;
+ else if (strcmp(args[0], "defaults") == 0) {
+ /* "defaults" must first delete the last no-name defaults if any */
+ curr_defproxy = NULL;
+ rc = PR_CAP_DEF | PR_CAP_LISTEN;
+ }
+ else
+ rc = PR_CAP_NONE;
+
+ if ((rc & PR_CAP_LISTEN) && !(rc & PR_CAP_DEF)) { /* new proxy */
+ if (!*args[1]) {
+ ha_alert("parsing [%s:%d] : '%s' expects an <id> argument\n",
+ file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_ABORT;
+ goto out;
+ }
+
+ err = invalid_char(args[1]);
+ if (err) {
+ ha_alert("parsing [%s:%d] : character '%c' is not permitted in '%s' name '%s'.\n",
+ file, linenum, *err, args[0], args[1]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ }
+
+ curproxy = (rc & PR_CAP_FE) ? proxy_fe_by_name(args[1]) : proxy_be_by_name(args[1]);
+ if (curproxy) {
+ ha_alert("Parsing [%s:%d]: %s '%s' has the same name as %s '%s' declared at %s:%d.\n",
+ file, linenum, proxy_cap_str(rc), args[1], proxy_type_str(curproxy),
+ curproxy->id, curproxy->conf.file, curproxy->conf.line);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ }
+
+ curproxy = log_forward_by_name(args[1]);
+ if (curproxy) {
+ ha_alert("Parsing [%s:%d]: %s '%s' has the same name as log forward section '%s' declared at %s:%d.\n",
+ file, linenum, proxy_cap_str(rc), args[1],
+ curproxy->id, curproxy->conf.file, curproxy->conf.line);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ }
+
+ if ((*args[2] && (!*args[3] || strcmp(args[2], "from") != 0)) ||
+ alertif_too_many_args(3, file, linenum, args, &err_code)) {
+ if (rc & PR_CAP_FE)
+ ha_alert("parsing [%s:%d] : please use the 'bind' keyword for listening addresses.\n", file, linenum);
+ goto out;
+ }
+ }
+
+ if (rc & PR_CAP_LISTEN) { /* new proxy or defaults section */
+ const char *name = args[1];
+ int arg = 2;
+
+ if (rc & PR_CAP_DEF && strcmp(args[1], "from") == 0 && *args[2] && !*args[3]) {
+ // also support "defaults from blah" (no name then)
+ arg = 1;
+ name = "";
+ }
+
+ /* only regular proxies inherit from the previous defaults section */
+ if (!(rc & PR_CAP_DEF))
+ curr_defproxy = last_defproxy;
+
+ if (strcmp(args[arg], "from") == 0) {
+ struct ebpt_node *next_by_name;
+
+ curr_defproxy = proxy_find_by_name(args[arg+1], PR_CAP_DEF, 0);
+
+ if (!curr_defproxy) {
+ ha_alert("parsing [%s:%d] : defaults section '%s' not found for %s '%s'.\n", file, linenum, args[arg+1], proxy_cap_str(rc), name);
+ err_code |= ERR_ALERT | ERR_ABORT;
+ goto out;
+ }
+
+ if ((next_by_name = ebpt_next_dup(&curr_defproxy->conf.by_name))) {
+ struct proxy *px2 = container_of(next_by_name, struct proxy, conf.by_name);
+
+ ha_alert("parsing [%s:%d] : ambiguous defaults section name '%s' referenced by %s '%s' exists at least at %s:%d and %s:%d.\n",
+ file, linenum, args[arg+1], proxy_cap_str(rc), name,
+ curr_defproxy->conf.file, curr_defproxy->conf.line, px2->conf.file, px2->conf.line);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ }
+
+ err = invalid_char(args[arg+1]);
+ if (err) {
+ ha_alert("parsing [%s:%d] : character '%c' is not permitted in defaults section name '%s' when designated by its name (section found at %s:%d).\n",
+ file, linenum, *err, args[arg+1], curr_defproxy->conf.file, curr_defproxy->conf.line);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ }
+ curr_defproxy->flags |= PR_FL_EXPLICIT_REF;
+ }
+ else if (curr_defproxy)
+ curr_defproxy->flags |= PR_FL_IMPLICIT_REF;
+
+ if (curr_defproxy && (curr_defproxy->flags & (PR_FL_EXPLICIT_REF|PR_FL_IMPLICIT_REF)) == (PR_FL_EXPLICIT_REF|PR_FL_IMPLICIT_REF)) {
+ ha_warning("parsing [%s:%d] : defaults section '%s' (declared at %s:%d) is explicitly referenced by another proxy and implicitly used here."
+ " To avoid any ambiguity don't mix both usage. Add a last defaults section not explicitly used or always use explicit references.\n",
+ file, linenum, curr_defproxy->id, curr_defproxy->conf.file, curr_defproxy->conf.line);
+ err_code |= ERR_WARN;
+ }
+
+ curproxy = parse_new_proxy(name, rc, file, linenum, curr_defproxy);
+ if (!curproxy) {
+ err_code |= ERR_ALERT | ERR_ABORT;
+ goto out;
+ }
+
+ if (curr_defproxy && (!LIST_ISEMPTY(&curr_defproxy->http_req_rules) ||
+ !LIST_ISEMPTY(&curr_defproxy->http_res_rules) ||
+ !LIST_ISEMPTY(&curr_defproxy->http_after_res_rules) ||
+ !LIST_ISEMPTY(&curr_defproxy->tcp_req.l4_rules) ||
+ !LIST_ISEMPTY(&curr_defproxy->tcp_req.l5_rules) ||
+ !LIST_ISEMPTY(&curr_defproxy->tcp_req.inspect_rules) ||
+ !LIST_ISEMPTY(&curr_defproxy->tcp_rep.inspect_rules))) {
+ /* If the current default proxy defines TCP/HTTP rules, the
+ * current proxy will keep a reference on it. But some sanity
+ * checks are performed first:
+ *
+ * - It cannot be used to init a defaults section
+ * - It cannot be used to init a listen section
+ * - It cannot be used to init backend and frontend sections at
+ * same time. It can be used to init several sections of the
+ * same type only.
+ * - It cannot define L4/L5 TCP rules if it is used to init
+ * backend sections.
+ * - It cannot define 'tcp-response content' rules if it
+ * is used to init frontend sections.
+ *
+ * If no error is found, refcount of the default proxy is incremented.
+ */
+
+ /* Note: Add tcpcheck_rules too if unresolve args become allowed in defaults section */
+ if (rc & PR_CAP_DEF) {
+ ha_alert("parsing [%s:%d]: a defaults section cannot inherit from a defaults section defining TCP/HTTP rules (defaults section at %s:%d).\n",
+ file, linenum, curr_defproxy->conf.file, curr_defproxy->conf.line);
+ err_code |= ERR_ALERT | ERR_ABORT;
+ }
+ else if ((rc & PR_CAP_LISTEN) == PR_CAP_LISTEN) {
+ ha_alert("parsing [%s:%d]: a listen section cannot inherit from a defaults section defining TCP/HTTP rules.\n",
+ file, linenum);
+ err_code |= ERR_ALERT | ERR_ABORT;
+ }
+ else {
+ char defcap = (curr_defproxy->cap & PR_CAP_LISTEN);
+
+ if ((defcap == PR_CAP_BE || defcap == PR_CAP_FE) && (rc & PR_CAP_LISTEN) != defcap) {
+ ha_alert("parsing [%s:%d]: frontends and backends cannot inherit from the same defaults section"
+ " if it defines TCP/HTTP rules (defaults section at %s:%d).\n",
+ file, linenum, curr_defproxy->conf.file, curr_defproxy->conf.line);
+ err_code |= ERR_ALERT | ERR_ABORT;
+ }
+ else if (!(rc & PR_CAP_FE) && (!LIST_ISEMPTY(&curr_defproxy->tcp_req.l4_rules) ||
+ !LIST_ISEMPTY(&curr_defproxy->tcp_req.l5_rules))) {
+ ha_alert("parsing [%s:%d]: a backend section cannot inherit from a defaults section defining"
+ " 'tcp-request connection' or 'tcp-request session' rules (defaults section at %s:%d).\n",
+ file, linenum, curr_defproxy->conf.file, curr_defproxy->conf.line);
+ err_code |= ERR_ALERT | ERR_ABORT;
+ }
+ else if (!(rc & PR_CAP_BE) && !LIST_ISEMPTY(&curr_defproxy->tcp_rep.inspect_rules)) {
+ ha_alert("parsing [%s:%d]: a frontend section cannot inherit from a defaults section defining"
+ " 'tcp-response content' rules (defaults section at %s:%d).\n",
+ file, linenum, curr_defproxy->conf.file, curr_defproxy->conf.line);
+ err_code |= ERR_ALERT | ERR_ABORT;
+ }
+ else {
+ curr_defproxy->cap = (curr_defproxy->cap & ~PR_CAP_LISTEN) | (rc & PR_CAP_LISTEN);
+ proxy_ref_defaults(curproxy, curr_defproxy);
+ }
+ }
+ }
+
+ if (curr_defproxy && (curr_defproxy->tcpcheck_rules.flags & TCPCHK_RULES_PROTO_CHK) &&
+ (curproxy->cap & PR_CAP_LISTEN) == PR_CAP_BE) {
+ /* If the current default proxy defines tcpcheck rules, the
+ * current proxy will keep a reference on it. but only if the
+ * current proxy has the backend capability.
+ */
+ proxy_ref_defaults(curproxy, curr_defproxy);
+ }
+
+ if ((rc & PR_CAP_BE) && curr_defproxy && (curr_defproxy->nb_req_cap || curr_defproxy->nb_rsp_cap)) {
+ ha_alert("parsing [%s:%d]: backend or defaults sections cannot inherit from a defaults section defining"
+ " capptures (defaults section at %s:%d).\n",
+ file, linenum, curr_defproxy->conf.file, curr_defproxy->conf.line);
+ err_code |= ERR_ALERT | ERR_ABORT;
+ }
+
+ if (rc & PR_CAP_DEF) {
+ /* last and current proxies must be updated to this one */
+ curr_defproxy = last_defproxy = curproxy;
+ } else {
+ /* regular proxies are in a list */
+ curproxy->next = proxies_list;
+ proxies_list = curproxy;
+ }
+ goto out;
+ }
+ else if (curproxy == NULL) {
+ ha_alert("parsing [%s:%d] : 'listen' or 'defaults' expected.\n", file, linenum);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+
+ /* update the current file and line being parsed */
+ curproxy->conf.args.file = curproxy->conf.file;
+ curproxy->conf.args.line = linenum;
+
+ /* Now let's parse the proxy-specific keywords */
+ if ((strcmp(args[0], "server") == 0)) {
+ err_code |= parse_server(file, linenum, args,
+ curproxy, curr_defproxy,
+ SRV_PARSE_PARSE_ADDR);
+
+ if (err_code & ERR_FATAL)
+ goto out;
+ }
+ else if (strcmp(args[0], "default-server") == 0) {
+ err_code |= parse_server(file, linenum, args,
+ curproxy, curr_defproxy,
+ SRV_PARSE_DEFAULT_SERVER);
+
+ if (err_code & ERR_FATAL)
+ goto out;
+ }
+ else if (strcmp(args[0], "server-template") == 0) {
+ err_code |= parse_server(file, linenum, args,
+ curproxy, curr_defproxy,
+ SRV_PARSE_TEMPLATE|SRV_PARSE_PARSE_ADDR);
+
+ if (err_code & ERR_FATAL)
+ goto out;
+ }
+ else if (strcmp(args[0], "bind") == 0) { /* new listen addresses */
+ struct listener *l;
+ int cur_arg;
+
+ if (curproxy->cap & PR_CAP_DEF) {
+ ha_alert("parsing [%s:%d] : '%s' not allowed in 'defaults' section.\n", file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ if (warnifnotcap(curproxy, PR_CAP_FE, file, linenum, args[0], NULL))
+ err_code |= ERR_WARN;
+
+ if (!*(args[1])) {
+ ha_alert("parsing [%s:%d] : '%s' expects {<path>|[addr1]:port1[-end1]}{,[addr]:port[-end]}... as arguments.\n",
+ file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+
+ bind_conf = bind_conf_alloc(curproxy, file, linenum, args[1], xprt_get(XPRT_RAW));
+ if (!bind_conf)
+ goto alloc_error;
+
+ /* use default settings for unix sockets */
+ bind_conf->settings.ux.uid = global.unix_bind.ux.uid;
+ bind_conf->settings.ux.gid = global.unix_bind.ux.gid;
+ bind_conf->settings.ux.mode = global.unix_bind.ux.mode;
+
+ /* NOTE: the following line might create several listeners if there
+ * are comma-separated IPs or port ranges. So all further processing
+ * will have to be applied to all listeners created after last_listen.
+ */
+ if (!str2listener(args[1], curproxy, bind_conf, file, linenum, &errmsg)) {
+ if (errmsg && *errmsg) {
+ indent_msg(&errmsg, 2);
+ ha_alert("parsing [%s:%d] : '%s' : %s\n", file, linenum, args[0], errmsg);
+ }
+ else
+ ha_alert("parsing [%s:%d] : '%s' : error encountered while parsing listening address '%s'.\n",
+ file, linenum, args[0], args[1]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+
+ list_for_each_entry(l, &bind_conf->listeners, by_bind) {
+ /* Set default global rights and owner for unix bind */
+ global.maxsock++;
+ }
+
+ cur_arg = 2;
+ err_code |= bind_parse_args_list(bind_conf, args, cur_arg, cursection, file, linenum);
+ goto out;
+ }
+ else if (strcmp(args[0], "monitor-net") == 0) { /* set the range of IPs to ignore */
+ ha_alert("parsing [%s:%d] : 'monitor-net' doesn't exist anymore. Please use 'http-request return status 200 if { src %s }' instead.\n", file, linenum, args[1]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ else if (strcmp(args[0], "monitor-uri") == 0) { /* set the URI to intercept */
+ if (warnifnotcap(curproxy, PR_CAP_FE, file, linenum, args[0], NULL))
+ err_code |= ERR_WARN;
+
+ if (alertif_too_many_args(1, file, linenum, args, &err_code))
+ goto out;
+
+ if (!*args[1]) {
+ ha_alert("parsing [%s:%d] : '%s' expects an URI.\n",
+ file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+
+ istfree(&curproxy->monitor_uri);
+ curproxy->monitor_uri = istdup(ist(args[1]));
+ if (!isttest(curproxy->monitor_uri))
+ goto alloc_error;
+
+ goto out;
+ }
+ else if (strcmp(args[0], "mode") == 0) { /* sets the proxy mode */
+ if (alertif_too_many_args(1, file, linenum, args, &err_code))
+ goto out;
+
+ if (strcmp(args[1], "http") == 0) curproxy->mode = PR_MODE_HTTP;
+ else if (strcmp(args[1], "tcp") == 0) curproxy->mode = PR_MODE_TCP;
+ else if (strcmp(args[1], "log") == 0 && (curproxy->cap & PR_CAP_BE)) curproxy->mode = PR_MODE_SYSLOG;
+ else if (strcmp(args[1], "health") == 0) {
+ ha_alert("parsing [%s:%d] : 'mode health' doesn't exist anymore. Please use 'http-request return status 200' instead.\n", file, linenum);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ else {
+ ha_alert("parsing [%s:%d] : unknown proxy mode '%s'.\n", file, linenum, args[1]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ }
+ else if (strcmp(args[0], "id") == 0) {
+ struct eb32_node *node;
+
+ if (curproxy->cap & PR_CAP_DEF) {
+ ha_alert("parsing [%s:%d]: '%s' not allowed in 'defaults' section.\n",
+ file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+
+ if (alertif_too_many_args(1, file, linenum, args, &err_code))
+ goto out;
+
+ if (!*args[1]) {
+ ha_alert("parsing [%s:%d]: '%s' expects an integer argument.\n",
+ file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+
+ curproxy->uuid = atol(args[1]);
+ curproxy->conf.id.key = curproxy->uuid;
+ curproxy->options |= PR_O_FORCED_ID;
+
+ if (curproxy->uuid <= 0) {
+ ha_alert("parsing [%s:%d]: custom id has to be > 0.\n",
+ file, linenum);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+
+ node = eb32_lookup(&used_proxy_id, curproxy->uuid);
+ if (node) {
+ struct proxy *target = container_of(node, struct proxy, conf.id);
+ ha_alert("parsing [%s:%d]: %s %s reuses same custom id as %s %s (declared at %s:%d).\n",
+ file, linenum, proxy_type_str(curproxy), curproxy->id,
+ proxy_type_str(target), target->id, target->conf.file, target->conf.line);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ eb32_insert(&used_proxy_id, &curproxy->conf.id);
+ }
+ else if (strcmp(args[0], "description") == 0) {
+ int i, len=0;
+ char *d;
+
+ if (curproxy->cap & PR_CAP_DEF) {
+ ha_alert("parsing [%s:%d]: '%s' not allowed in 'defaults' section.\n",
+ file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+
+ if (!*args[1]) {
+ ha_alert("parsing [%s:%d]: '%s' expects a string argument.\n",
+ file, linenum, args[0]);
+ return -1;
+ }
+
+ for (i = 1; *args[i]; i++)
+ len += strlen(args[i]) + 1;
+
+ d = calloc(1, len);
+ if (!d)
+ goto alloc_error;
+ curproxy->desc = d;
+
+ d += snprintf(d, curproxy->desc + len - d, "%s", args[1]);
+ for (i = 2; *args[i]; i++)
+ d += snprintf(d, curproxy->desc + len - d, " %s", args[i]);
+
+ }
+ else if (strcmp(args[0], "disabled") == 0) { /* disables this proxy */
+ if (alertif_too_many_args(0, file, linenum, args, &err_code))
+ goto out;
+ curproxy->flags |= PR_FL_DISABLED;
+ }
+ else if (strcmp(args[0], "enabled") == 0) { /* enables this proxy (used to revert a disabled default) */
+ if (alertif_too_many_args(0, file, linenum, args, &err_code))
+ goto out;
+ curproxy->flags &= ~PR_FL_DISABLED;
+ }
+ else if (strcmp(args[0], "bind-process") == 0) { /* enable this proxy only on some processes */
+ ha_alert("parsing [%s:%d]: '%s' is not supported anymore.\n", file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ }
+ else if (strcmp(args[0], "acl") == 0) { /* add an ACL */
+ if ((curproxy->cap & PR_CAP_DEF) && strlen(curproxy->id) == 0) {
+ ha_alert("parsing [%s:%d] : '%s' not allowed in anonymous 'defaults' section.\n", file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+
+ err = invalid_char(args[1]);
+ if (err) {
+ ha_alert("parsing [%s:%d] : character '%c' is not permitted in acl name '%s'.\n",
+ file, linenum, *err, args[1]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+
+ if (strcasecmp(args[1], "or") == 0) {
+ ha_alert("parsing [%s:%d] : acl name '%s' will never match. 'or' is used to express a "
+ "logical disjunction within a condition.\n",
+ file, linenum, args[1]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+
+ if (parse_acl((const char **)args + 1, &curproxy->acl, &errmsg, &curproxy->conf.args, file, linenum) == NULL) {
+ ha_alert("parsing [%s:%d] : error detected while parsing ACL '%s' : %s.\n",
+ file, linenum, args[1], errmsg);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ }
+ else if (strcmp(args[0], "dynamic-cookie-key") == 0) { /* Dynamic cookies secret key */
+
+ if (warnifnotcap(curproxy, PR_CAP_BE, file, linenum, args[0], NULL))
+ err_code |= ERR_WARN;
+
+ if (*(args[1]) == 0) {
+ ha_alert("parsing [%s:%d] : '%s' expects <secret_key> as argument.\n",
+ file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ free(curproxy->dyncookie_key);
+ curproxy->dyncookie_key = strdup(args[1]);
+ }
+ else if (strcmp(args[0], "cookie") == 0) { /* cookie name */
+ int cur_arg;
+
+ if (warnifnotcap(curproxy, PR_CAP_BE, file, linenum, args[0], NULL))
+ err_code |= ERR_WARN;
+
+ if (*(args[1]) == 0) {
+ ha_alert("parsing [%s:%d] : '%s' expects <cookie_name> as argument.\n",
+ file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+
+ curproxy->ck_opts = 0;
+ curproxy->cookie_maxidle = curproxy->cookie_maxlife = 0;
+ ha_free(&curproxy->cookie_domain);
+ free(curproxy->cookie_name);
+ curproxy->cookie_name = strdup(args[1]);
+ if (!curproxy->cookie_name)
+ goto alloc_error;
+ curproxy->cookie_len = strlen(curproxy->cookie_name);
+
+ cur_arg = 2;
+ while (*(args[cur_arg])) {
+ if (strcmp(args[cur_arg], "rewrite") == 0) {
+ curproxy->ck_opts |= PR_CK_RW;
+ }
+ else if (strcmp(args[cur_arg], "indirect") == 0) {
+ curproxy->ck_opts |= PR_CK_IND;
+ }
+ else if (strcmp(args[cur_arg], "insert") == 0) {
+ curproxy->ck_opts |= PR_CK_INS;
+ }
+ else if (strcmp(args[cur_arg], "nocache") == 0) {
+ curproxy->ck_opts |= PR_CK_NOC;
+ }
+ else if (strcmp(args[cur_arg], "postonly") == 0) {
+ curproxy->ck_opts |= PR_CK_POST;
+ }
+ else if (strcmp(args[cur_arg], "preserve") == 0) {
+ curproxy->ck_opts |= PR_CK_PSV;
+ }
+ else if (strcmp(args[cur_arg], "prefix") == 0) {
+ curproxy->ck_opts |= PR_CK_PFX;
+ }
+ else if (strcmp(args[cur_arg], "httponly") == 0) {
+ curproxy->ck_opts |= PR_CK_HTTPONLY;
+ }
+ else if (strcmp(args[cur_arg], "secure") == 0) {
+ curproxy->ck_opts |= PR_CK_SECURE;
+ }
+ else if (strcmp(args[cur_arg], "domain") == 0) {
+ if (!*args[cur_arg + 1]) {
+ ha_alert("parsing [%s:%d]: '%s' expects <domain> as argument.\n",
+ file, linenum, args[cur_arg]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+
+ if (!strchr(args[cur_arg + 1], '.')) {
+ /* rfc6265, 5.2.3 The Domain Attribute */
+ ha_warning("parsing [%s:%d]: domain '%s' contains no embedded dot,"
+ " this configuration may not work properly (see RFC6265#5.2.3).\n",
+ file, linenum, args[cur_arg + 1]);
+ err_code |= ERR_WARN;
+ }
+
+ err = invalid_domainchar(args[cur_arg + 1]);
+ if (err) {
+ ha_alert("parsing [%s:%d]: character '%c' is not permitted in domain name '%s'.\n",
+ file, linenum, *err, args[cur_arg + 1]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+
+ if (!curproxy->cookie_domain) {
+ curproxy->cookie_domain = strdup(args[cur_arg + 1]);
+ } else {
+ /* one domain was already specified, add another one by
+ * building the string which will be returned along with
+ * the cookie.
+ */
+ memprintf(&curproxy->cookie_domain, "%s; domain=%s", curproxy->cookie_domain, args[cur_arg+1]);
+ }
+
+ if (!curproxy->cookie_domain)
+ goto alloc_error;
+ cur_arg++;
+ }
+ else if (strcmp(args[cur_arg], "maxidle") == 0) {
+ unsigned int maxidle;
+ const char *res;
+
+ if (!*args[cur_arg + 1]) {
+ ha_alert("parsing [%s:%d]: '%s' expects <idletime> in seconds as argument.\n",
+ file, linenum, args[cur_arg]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+
+ res = parse_time_err(args[cur_arg + 1], &maxidle, TIME_UNIT_S);
+ if (res == PARSE_TIME_OVER) {
+ ha_alert("parsing [%s:%d]: timer overflow in argument <%s> to <%s>, maximum value is 2147483647 s (~68 years).\n",
+ file, linenum, args[cur_arg+1], args[cur_arg]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ else if (res == PARSE_TIME_UNDER) {
+ ha_alert("parsing [%s:%d]: timer underflow in argument <%s> to <%s>, minimum non-null value is 1 s.\n",
+ file, linenum, args[cur_arg+1], args[cur_arg]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ else if (res) {
+ ha_alert("parsing [%s:%d]: unexpected character '%c' in argument to <%s>.\n",
+ file, linenum, *res, args[cur_arg]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ curproxy->cookie_maxidle = maxidle;
+ cur_arg++;
+ }
+ else if (strcmp(args[cur_arg], "maxlife") == 0) {
+ unsigned int maxlife;
+ const char *res;
+
+ if (!*args[cur_arg + 1]) {
+ ha_alert("parsing [%s:%d]: '%s' expects <lifetime> in seconds as argument.\n",
+ file, linenum, args[cur_arg]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+
+
+ res = parse_time_err(args[cur_arg + 1], &maxlife, TIME_UNIT_S);
+ if (res == PARSE_TIME_OVER) {
+ ha_alert("parsing [%s:%d]: timer overflow in argument <%s> to <%s>, maximum value is 2147483647 s (~68 years).\n",
+ file, linenum, args[cur_arg+1], args[cur_arg]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ else if (res == PARSE_TIME_UNDER) {
+ ha_alert("parsing [%s:%d]: timer underflow in argument <%s> to <%s>, minimum non-null value is 1 s.\n",
+ file, linenum, args[cur_arg+1], args[cur_arg]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ else if (res) {
+ ha_alert("parsing [%s:%d]: unexpected character '%c' in argument to <%s>.\n",
+ file, linenum, *res, args[cur_arg]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ curproxy->cookie_maxlife = maxlife;
+ cur_arg++;
+ }
+ else if (strcmp(args[cur_arg], "dynamic") == 0) { /* Dynamic persistent cookies secret key */
+
+ if (warnifnotcap(curproxy, PR_CAP_BE, file, linenum, args[cur_arg], NULL))
+ err_code |= ERR_WARN;
+ curproxy->ck_opts |= PR_CK_DYNAMIC;
+ }
+ else if (strcmp(args[cur_arg], "attr") == 0) {
+ char *val;
+ if (!*args[cur_arg + 1]) {
+ ha_alert("parsing [%s:%d]: '%s' expects <value> as argument.\n",
+ file, linenum, args[cur_arg]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ val = args[cur_arg + 1];
+ while (*val) {
+ if (iscntrl((unsigned char)*val) || *val == ';') {
+ ha_alert("parsing [%s:%d]: character '%%x%02X' is not permitted in attribute value.\n",
+ file, linenum, *val);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ val++;
+ }
+ /* don't add ';' for the first attribute */
+ if (!curproxy->cookie_attrs)
+ curproxy->cookie_attrs = strdup(args[cur_arg + 1]);
+ else
+ memprintf(&curproxy->cookie_attrs, "%s; %s", curproxy->cookie_attrs, args[cur_arg + 1]);
+
+ if (!curproxy->cookie_attrs)
+ goto alloc_error;
+ cur_arg++;
+ }
+
+ else {
+ ha_alert("parsing [%s:%d] : '%s' supports 'rewrite', 'insert', 'prefix', 'indirect', 'nocache', 'postonly', 'domain', 'maxidle', 'dynamic', 'maxlife' and 'attr' options.\n",
+ file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ cur_arg++;
+ }
+ if (!POWEROF2(curproxy->ck_opts & (PR_CK_RW|PR_CK_IND))) {
+ ha_alert("parsing [%s:%d] : cookie 'rewrite' and 'indirect' modes are incompatible.\n",
+ file, linenum);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ }
+
+ if (!POWEROF2(curproxy->ck_opts & (PR_CK_RW|PR_CK_INS|PR_CK_PFX))) {
+ ha_alert("parsing [%s:%d] : cookie 'rewrite', 'insert' and 'prefix' modes are incompatible.\n",
+ file, linenum);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ }
+
+ if ((curproxy->ck_opts & (PR_CK_PSV | PR_CK_INS | PR_CK_IND)) == PR_CK_PSV) {
+ ha_alert("parsing [%s:%d] : cookie 'preserve' requires at least 'insert' or 'indirect'.\n",
+ file, linenum);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ }
+ }/* end else if (!strcmp(args[0], "cookie")) */
+ else if (strcmp(args[0], "email-alert") == 0) {
+ if (*(args[1]) == 0) {
+ ha_alert("parsing [%s:%d] : missing argument after '%s'.\n",
+ file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+
+ if (strcmp(args[1], "from") == 0) {
+ if (*(args[1]) == 0) {
+ ha_alert("parsing [%s:%d] : missing argument after '%s'.\n",
+ file, linenum, args[1]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ free(curproxy->email_alert.from);
+ curproxy->email_alert.from = strdup(args[2]);
+ if (!curproxy->email_alert.from)
+ goto alloc_error;
+ }
+ else if (strcmp(args[1], "mailers") == 0) {
+ if (*(args[1]) == 0) {
+ ha_alert("parsing [%s:%d] : missing argument after '%s'.\n",
+ file, linenum, args[1]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ free(curproxy->email_alert.mailers.name);
+ curproxy->email_alert.mailers.name = strdup(args[2]);
+ if (!curproxy->email_alert.mailers.name)
+ goto alloc_error;
+ }
+ else if (strcmp(args[1], "myhostname") == 0) {
+ if (*(args[1]) == 0) {
+ ha_alert("parsing [%s:%d] : missing argument after '%s'.\n",
+ file, linenum, args[1]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ free(curproxy->email_alert.myhostname);
+ curproxy->email_alert.myhostname = strdup(args[2]);
+ if (!curproxy->email_alert.myhostname)
+ goto alloc_error;
+ }
+ else if (strcmp(args[1], "level") == 0) {
+ curproxy->email_alert.level = get_log_level(args[2]);
+ if (curproxy->email_alert.level < 0) {
+ ha_alert("parsing [%s:%d] : unknown log level '%s' after '%s'\n",
+ file, linenum, args[1], args[2]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ }
+ else if (strcmp(args[1], "to") == 0) {
+ if (*(args[1]) == 0) {
+ ha_alert("parsing [%s:%d] : missing argument after '%s'.\n",
+ file, linenum, args[1]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ free(curproxy->email_alert.to);
+ curproxy->email_alert.to = strdup(args[2]);
+ if (!curproxy->email_alert.to)
+ goto alloc_error;
+ }
+ else {
+ ha_alert("parsing [%s:%d] : email-alert: unknown argument '%s'.\n",
+ file, linenum, args[1]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ /* Indicate that the email_alert is at least partially configured */
+ curproxy->email_alert.set = 1;
+ }/* end else if (!strcmp(args[0], "email-alert")) */
+ else if (strcmp(args[0], "persist") == 0) { /* persist */
+ if (*(args[1]) == 0) {
+ ha_alert("parsing [%s:%d] : missing persist method.\n",
+ file, linenum);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+
+ if (!strncmp(args[1], "rdp-cookie", 10)) {
+ curproxy->options2 |= PR_O2_RDPC_PRST;
+
+ if (*(args[1] + 10) == '(') { /* cookie name */
+ const char *beg, *end;
+
+ beg = args[1] + 11;
+ end = strchr(beg, ')');
+
+ if (alertif_too_many_args(1, file, linenum, args, &err_code))
+ goto out;
+
+ if (!end || end == beg) {
+ ha_alert("parsing [%s:%d] : persist rdp-cookie(name)' requires an rdp cookie name.\n",
+ file, linenum);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+
+ free(curproxy->rdp_cookie_name);
+ curproxy->rdp_cookie_name = my_strndup(beg, end - beg);
+ if (!curproxy->rdp_cookie_name)
+ goto alloc_error;
+ curproxy->rdp_cookie_len = end-beg;
+ }
+ else if (*(args[1] + 10) == '\0') { /* default cookie name 'msts' */
+ free(curproxy->rdp_cookie_name);
+ curproxy->rdp_cookie_name = strdup("msts");
+ if (!curproxy->rdp_cookie_name)
+ goto alloc_error;
+ curproxy->rdp_cookie_len = strlen(curproxy->rdp_cookie_name);
+ }
+ else { /* syntax */
+ ha_alert("parsing [%s:%d] : persist rdp-cookie(name)' requires an rdp cookie name.\n",
+ file, linenum);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ }
+ else {
+ ha_alert("parsing [%s:%d] : unknown persist method.\n",
+ file, linenum);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ }
+ else if (strcmp(args[0], "appsession") == 0) { /* cookie name */
+ ha_alert("parsing [%s:%d] : '%s' is not supported anymore since HAProxy 1.6.\n", file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ else if (strcmp(args[0], "load-server-state-from-file") == 0) {
+ if (warnifnotcap(curproxy, PR_CAP_BE, file, linenum, args[0], NULL))
+ err_code |= ERR_WARN;
+ if (strcmp(args[1], "global") == 0) { /* use the file pointed to by global server-state-file directive */
+ curproxy->load_server_state_from_file = PR_SRV_STATE_FILE_GLOBAL;
+ }
+ else if (strcmp(args[1], "local") == 0) { /* use the server-state-file-name variable to locate the server-state file */
+ curproxy->load_server_state_from_file = PR_SRV_STATE_FILE_LOCAL;
+ }
+ else if (strcmp(args[1], "none") == 0) { /* don't use server-state-file directive for this backend */
+ curproxy->load_server_state_from_file = PR_SRV_STATE_FILE_NONE;
+ }
+ else {
+ ha_alert("parsing [%s:%d] : '%s' expects 'global', 'local' or 'none'. Got '%s'\n",
+ file, linenum, args[0], args[1]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ }
+ else if (strcmp(args[0], "server-state-file-name") == 0) {
+ if (warnifnotcap(curproxy, PR_CAP_BE, file, linenum, args[0], NULL))
+ err_code |= ERR_WARN;
+ if (alertif_too_many_args(1, file, linenum, args, &err_code))
+ goto out;
+
+ ha_free(&curproxy->server_state_file_name);
+
+ if (*(args[1]) == 0 || strcmp(args[1], "use-backend-name") == 0)
+ curproxy->server_state_file_name = strdup(curproxy->id);
+ else
+ curproxy->server_state_file_name = strdup(args[1]);
+
+ if (!curproxy->server_state_file_name)
+ goto alloc_error;
+ }
+ else if (strcmp(args[0], "max-session-srv-conns") == 0) {
+ if (warnifnotcap(curproxy, PR_CAP_FE, file, linenum, args[0], NULL))
+ err_code |= ERR_WARN;
+ if (*(args[1]) == 0) {
+ ha_alert("parsine [%s:%d] : '%s' expects a number. Got no argument\n",
+ file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ curproxy->max_out_conns = atoi(args[1]);
+ }
+ else if (strcmp(args[0], "capture") == 0) {
+ if (warnifnotcap(curproxy, PR_CAP_FE, file, linenum, args[0], NULL))
+ err_code |= ERR_WARN;
+
+ if (strcmp(args[1], "cookie") == 0) { /* name of a cookie to capture */
+ if (curproxy->cap & PR_CAP_DEF) {
+ ha_alert("parsing [%s:%d] : '%s %s' not allowed in 'defaults' section.\n", file, linenum, args[0], args[1]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+
+ if (alertif_too_many_args_idx(4, 1, file, linenum, args, &err_code))
+ goto out;
+
+ if (*(args[4]) == 0) {
+ ha_alert("parsing [%s:%d] : '%s' expects 'cookie' <cookie_name> 'len' <len>.\n",
+ file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ free(curproxy->capture_name);
+ curproxy->capture_name = strdup(args[2]);
+ if (!curproxy->capture_name)
+ goto alloc_error;
+ curproxy->capture_namelen = strlen(curproxy->capture_name);
+ curproxy->capture_len = atol(args[4]);
+ curproxy->to_log |= LW_COOKIE;
+ }
+ else if (strcmp(args[1], "request") == 0 && strcmp(args[2], "header") == 0) {
+ struct cap_hdr *hdr;
+
+ if (curproxy->cap & PR_CAP_DEF) {
+ ha_alert("parsing [%s:%d] : '%s %s' not allowed in 'defaults' section.\n", file, linenum, args[0], args[1]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+
+ if (alertif_too_many_args_idx(4, 1, file, linenum, args, &err_code))
+ goto out;
+
+ if (*(args[3]) == 0 || strcmp(args[4], "len") != 0 || *(args[5]) == 0) {
+ ha_alert("parsing [%s:%d] : '%s %s' expects 'header' <header_name> 'len' <len>.\n",
+ file, linenum, args[0], args[1]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+
+ hdr = calloc(1, sizeof(*hdr));
+ if (!hdr)
+ goto req_caphdr_alloc_error;
+ hdr->next = curproxy->req_cap;
+ hdr->name = strdup(args[3]);
+ if (!hdr->name)
+ goto req_caphdr_alloc_error;
+ hdr->namelen = strlen(args[3]);
+ hdr->len = atol(args[5]);
+ hdr->pool = create_pool("caphdr", hdr->len + 1, MEM_F_SHARED);
+ if (!hdr->pool) {
+ req_caphdr_alloc_error:
+ if (hdr)
+ ha_free(&hdr->name);
+ ha_free(&hdr);
+ goto alloc_error;
+ }
+ hdr->index = curproxy->nb_req_cap++;
+ curproxy->req_cap = hdr;
+ curproxy->to_log |= LW_REQHDR;
+ }
+ else if (strcmp(args[1], "response") == 0 && strcmp(args[2], "header") == 0) {
+ struct cap_hdr *hdr;
+
+ if (curproxy->cap & PR_CAP_DEF) {
+ ha_alert("parsing [%s:%d] : '%s %s' not allowed in 'defaults' section.\n", file, linenum, args[0], args[1]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+
+ if (alertif_too_many_args_idx(4, 1, file, linenum, args, &err_code))
+ goto out;
+
+ if (*(args[3]) == 0 || strcmp(args[4], "len") != 0 || *(args[5]) == 0) {
+ ha_alert("parsing [%s:%d] : '%s %s' expects 'header' <header_name> 'len' <len>.\n",
+ file, linenum, args[0], args[1]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ hdr = calloc(1, sizeof(*hdr));
+ if (!hdr)
+ goto res_caphdr_alloc_error;
+ hdr->next = curproxy->rsp_cap;
+ hdr->name = strdup(args[3]);
+ if (!hdr->name)
+ goto res_caphdr_alloc_error;
+ hdr->namelen = strlen(args[3]);
+ hdr->len = atol(args[5]);
+ hdr->pool = create_pool("caphdr", hdr->len + 1, MEM_F_SHARED);
+ if (!hdr->pool) {
+ res_caphdr_alloc_error:
+ if (hdr)
+ ha_free(&hdr->name);
+ ha_free(&hdr);
+ goto alloc_error;
+ }
+ hdr->index = curproxy->nb_rsp_cap++;
+ curproxy->rsp_cap = hdr;
+ curproxy->to_log |= LW_RSPHDR;
+ }
+ else {
+ ha_alert("parsing [%s:%d] : '%s' expects 'cookie' or 'request header' or 'response header'.\n",
+ file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ }
+ else if (strcmp(args[0], "retries") == 0) { /* connection retries */
+ if (warnifnotcap(curproxy, PR_CAP_BE, file, linenum, args[0], NULL))
+ err_code |= ERR_WARN;
+
+ if (alertif_too_many_args(1, file, linenum, args, &err_code))
+ goto out;
+
+ if (*(args[1]) == 0) {
+ ha_alert("parsing [%s:%d] : '%s' expects an integer argument (dispatch counts for one).\n",
+ file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ curproxy->conn_retries = atol(args[1]);
+ }
+ else if (strcmp(args[0], "http-request") == 0) { /* request access control: allow/deny/auth */
+ struct act_rule *rule;
+ int where = 0;
+
+ if ((curproxy->cap & PR_CAP_DEF) && strlen(curproxy->id) == 0) {
+ ha_alert("parsing [%s:%d] : '%s' not allowed in anonymous 'defaults' section.\n", file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+
+ if (!LIST_ISEMPTY(&curproxy->http_req_rules) &&
+ !LIST_PREV(&curproxy->http_req_rules, struct act_rule *, list)->cond &&
+ (LIST_PREV(&curproxy->http_req_rules, struct act_rule *, list)->flags & ACT_FLAG_FINAL)) {
+ ha_warning("parsing [%s:%d]: previous '%s' action is final and has no condition attached, further entries are NOOP.\n",
+ file, linenum, args[0]);
+ err_code |= ERR_WARN;
+ }
+
+ rule = parse_http_req_cond((const char **)args + 1, file, linenum, curproxy);
+
+ if (!rule) {
+ err_code |= ERR_ALERT | ERR_ABORT;
+ goto out;
+ }
+
+ err_code |= warnif_misplaced_http_req(curproxy, file, linenum, args[0]);
+
+ if (curproxy->cap & PR_CAP_FE)
+ where |= SMP_VAL_FE_HRQ_HDR;
+ if (curproxy->cap & PR_CAP_BE)
+ where |= SMP_VAL_BE_HRQ_HDR;
+ err_code |= warnif_cond_conflicts(rule->cond, where, file, linenum);
+
+ LIST_APPEND(&curproxy->http_req_rules, &rule->list);
+ }
+ else if (strcmp(args[0], "http-response") == 0) { /* response access control */
+ struct act_rule *rule;
+ int where = 0;
+
+ if ((curproxy->cap & PR_CAP_DEF) && strlen(curproxy->id) == 0) {
+ ha_alert("parsing [%s:%d] : '%s' not allowed in anonymous 'defaults' section.\n", file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+
+ if (!LIST_ISEMPTY(&curproxy->http_res_rules) &&
+ !LIST_PREV(&curproxy->http_res_rules, struct act_rule *, list)->cond &&
+ (LIST_PREV(&curproxy->http_res_rules, struct act_rule *, list)->flags & ACT_FLAG_FINAL)) {
+ ha_warning("parsing [%s:%d]: previous '%s' action is final and has no condition attached, further entries are NOOP.\n",
+ file, linenum, args[0]);
+ err_code |= ERR_WARN;
+ }
+
+ rule = parse_http_res_cond((const char **)args + 1, file, linenum, curproxy);
+
+ if (!rule) {
+ err_code |= ERR_ALERT | ERR_ABORT;
+ goto out;
+ }
+
+ if (curproxy->cap & PR_CAP_FE)
+ where |= SMP_VAL_FE_HRS_HDR;
+ if (curproxy->cap & PR_CAP_BE)
+ where |= SMP_VAL_BE_HRS_HDR;
+ err_code |= warnif_cond_conflicts(rule->cond, where, file, linenum);
+
+ LIST_APPEND(&curproxy->http_res_rules, &rule->list);
+ }
+ else if (strcmp(args[0], "http-after-response") == 0) {
+ struct act_rule *rule;
+ int where = 0;
+ if ((curproxy->cap & PR_CAP_DEF) && strlen(curproxy->id) == 0) {
+ ha_alert("parsing [%s:%d] : '%s' not allowed in anonymous 'defaults' section.\n", file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+
+ if (!LIST_ISEMPTY(&curproxy->http_after_res_rules) &&
+ !LIST_PREV(&curproxy->http_after_res_rules, struct act_rule *, list)->cond &&
+ (LIST_PREV(&curproxy->http_after_res_rules, struct act_rule *, list)->flags & ACT_FLAG_FINAL)) {
+ ha_warning("parsing [%s:%d]: previous '%s' action is final and has no condition attached, further entries are NOOP.\n",
+ file, linenum, args[0]);
+ err_code |= ERR_WARN;
+ }
+
+ rule = parse_http_after_res_cond((const char **)args + 1, file, linenum, curproxy);
+
+ if (!rule) {
+ err_code |= ERR_ALERT | ERR_ABORT;
+ goto out;
+ }
+
+ if (curproxy->cap & PR_CAP_FE)
+ where |= SMP_VAL_FE_HRS_HDR;
+ if (curproxy->cap & PR_CAP_BE)
+ where |= SMP_VAL_BE_HRS_HDR;
+ err_code |= warnif_cond_conflicts(rule->cond, where, file, linenum);
+
+ LIST_APPEND(&curproxy->http_after_res_rules, &rule->list);
+ }
+ else if (strcmp(args[0], "http-send-name-header") == 0) { /* send server name in request header */
+ /* set the header name and length into the proxy structure */
+ if (warnifnotcap(curproxy, PR_CAP_BE, file, linenum, args[0], NULL))
+ err_code |= ERR_WARN;
+
+ if (!*args[1]) {
+ ha_alert("parsing [%s:%d] : '%s' requires a header string.\n",
+ file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+
+ /* set the desired header name, in lower case */
+ istfree(&curproxy->server_id_hdr_name);
+ curproxy->server_id_hdr_name = istdup(ist(args[1]));
+ if (!isttest(curproxy->server_id_hdr_name))
+ goto alloc_error;
+ ist2bin_lc(istptr(curproxy->server_id_hdr_name), curproxy->server_id_hdr_name);
+ }
+ else if (strcmp(args[0], "block") == 0) {
+ ha_alert("parsing [%s:%d] : The '%s' directive is not supported anymore since HAProxy 2.1. Use 'http-request deny' which uses the exact same syntax.\n", file, linenum, args[0]);
+
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ else if (strcmp(args[0], "redirect") == 0) {
+ struct redirect_rule *rule;
+ int where = 0;
+
+ if (curproxy->cap & PR_CAP_DEF) {
+ ha_alert("parsing [%s:%d] : '%s' not allowed in 'defaults' section.\n", file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+
+ if ((rule = http_parse_redirect_rule(file, linenum, curproxy, (const char **)args + 1, &errmsg, 0, 0)) == NULL) {
+ ha_alert("parsing [%s:%d] : error detected in %s '%s' while parsing redirect rule : %s.\n",
+ file, linenum, proxy_type_str(curproxy), curproxy->id, errmsg);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+
+ LIST_APPEND(&curproxy->redirect_rules, &rule->list);
+ err_code |= warnif_misplaced_redirect(curproxy, file, linenum, args[0]);
+
+ if (curproxy->cap & PR_CAP_FE)
+ where |= SMP_VAL_FE_HRQ_HDR;
+ if (curproxy->cap & PR_CAP_BE)
+ where |= SMP_VAL_BE_HRQ_HDR;
+ err_code |= warnif_cond_conflicts(rule->cond, where, file, linenum);
+ }
+ else if (strcmp(args[0], "use_backend") == 0) {
+ struct switching_rule *rule;
+
+ if (curproxy->cap & PR_CAP_DEF) {
+ ha_alert("parsing [%s:%d] : '%s' not allowed in 'defaults' section.\n", file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+
+ if (warnifnotcap(curproxy, PR_CAP_FE, file, linenum, args[0], NULL))
+ err_code |= ERR_WARN;
+
+ if (*(args[1]) == 0) {
+ ha_alert("parsing [%s:%d] : '%s' expects a backend name.\n", file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+
+ if (strcmp(args[2], "if") == 0 || strcmp(args[2], "unless") == 0) {
+ if ((cond = build_acl_cond(file, linenum, &curproxy->acl, curproxy, (const char **)args + 2, &errmsg)) == NULL) {
+ ha_alert("parsing [%s:%d] : error detected while parsing switching rule : %s.\n",
+ file, linenum, errmsg);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+
+ err_code |= warnif_cond_conflicts(cond, SMP_VAL_FE_SET_BCK, file, linenum);
+ }
+ else if (*args[2]) {
+ ha_alert("parsing [%s:%d] : unexpected keyword '%s' after switching rule, only 'if' and 'unless' are allowed.\n",
+ file, linenum, args[2]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+
+ rule = calloc(1, sizeof(*rule));
+ if (!rule)
+ goto use_backend_alloc_error;
+ rule->cond = cond;
+ rule->be.name = strdup(args[1]);
+ if (!rule->be.name)
+ goto use_backend_alloc_error;
+ rule->line = linenum;
+ rule->file = strdup(file);
+ if (!rule->file) {
+ use_backend_alloc_error:
+ free_acl_cond(cond);
+ if (rule)
+ ha_free(&(rule->be.name));
+ ha_free(&rule);
+ goto alloc_error;
+ }
+ LIST_INIT(&rule->list);
+ LIST_APPEND(&curproxy->switching_rules, &rule->list);
+ }
+ else if (strcmp(args[0], "use-server") == 0) {
+ struct server_rule *rule;
+
+ if (curproxy->cap & PR_CAP_DEF) {
+ ha_alert("parsing [%s:%d] : '%s' not allowed in 'defaults' section.\n", file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+
+ if (warnifnotcap(curproxy, PR_CAP_BE, file, linenum, args[0], NULL))
+ err_code |= ERR_WARN;
+
+ if (*(args[1]) == 0) {
+ ha_alert("parsing [%s:%d] : '%s' expects a server name.\n", file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+
+ if (strcmp(args[2], "if") != 0 && strcmp(args[2], "unless") != 0) {
+ ha_alert("parsing [%s:%d] : '%s' requires either 'if' or 'unless' followed by a condition.\n",
+ file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+
+ if ((cond = build_acl_cond(file, linenum, &curproxy->acl, curproxy, (const char **)args + 2, &errmsg)) == NULL) {
+ ha_alert("parsing [%s:%d] : error detected while parsing switching rule : %s.\n",
+ file, linenum, errmsg);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+
+ err_code |= warnif_cond_conflicts(cond, SMP_VAL_BE_SET_SRV, file, linenum);
+
+ rule = calloc(1, sizeof(*rule));
+ if (!rule)
+ goto use_server_alloc_error;
+ rule->cond = cond;
+ rule->srv.name = strdup(args[1]);
+ if (!rule->srv.name)
+ goto use_server_alloc_error;
+ rule->line = linenum;
+ rule->file = strdup(file);
+ if (!rule->file) {
+ use_server_alloc_error:
+ free_acl_cond(cond);
+ if (rule)
+ ha_free(&(rule->srv.name));
+ ha_free(&rule);
+ goto alloc_error;
+ }
+ LIST_INIT(&rule->list);
+ LIST_APPEND(&curproxy->server_rules, &rule->list);
+ curproxy->be_req_ana |= AN_REQ_SRV_RULES;
+ }
+ else if ((strcmp(args[0], "force-persist") == 0) ||
+ (strcmp(args[0], "ignore-persist") == 0)) {
+ struct persist_rule *rule;
+
+ if (curproxy->cap & PR_CAP_DEF) {
+ ha_alert("parsing [%s:%d] : '%s' not allowed in 'defaults' section.\n", file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+
+ if (warnifnotcap(curproxy, PR_CAP_BE, file, linenum, args[0], NULL))
+ err_code |= ERR_WARN;
+
+ if (strcmp(args[1], "if") != 0 && strcmp(args[1], "unless") != 0) {
+ ha_alert("parsing [%s:%d] : '%s' requires either 'if' or 'unless' followed by a condition.\n",
+ file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+
+ if ((cond = build_acl_cond(file, linenum, &curproxy->acl, curproxy, (const char **)args + 1, &errmsg)) == NULL) {
+ ha_alert("parsing [%s:%d] : error detected while parsing a '%s' rule : %s.\n",
+ file, linenum, args[0], errmsg);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+
+ /* note: BE_REQ_CNT is the first one after FE_SET_BCK, which is
+ * where force-persist is applied.
+ */
+ err_code |= warnif_cond_conflicts(cond, SMP_VAL_BE_REQ_CNT, file, linenum);
+
+ rule = calloc(1, sizeof(*rule));
+ if (!rule) {
+ free_acl_cond(cond);
+ goto alloc_error;
+ }
+ rule->cond = cond;
+ if (strcmp(args[0], "force-persist") == 0) {
+ rule->type = PERSIST_TYPE_FORCE;
+ } else {
+ rule->type = PERSIST_TYPE_IGNORE;
+ }
+ LIST_INIT(&rule->list);
+ LIST_APPEND(&curproxy->persist_rules, &rule->list);
+ }
+ else if (strcmp(args[0], "stick-table") == 0) {
+ struct stktable *other;
+
+ if (curproxy->cap & PR_CAP_DEF) {
+ ha_alert("parsing [%s:%d] : 'stick-table' is not supported in 'defaults' section.\n",
+ file, linenum);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+
+ other = stktable_find_by_name(curproxy->id);
+ if (other) {
+ ha_alert("parsing [%s:%d] : stick-table name '%s' conflicts with table declared in %s '%s' at %s:%d.\n",
+ file, linenum, curproxy->id,
+ other->proxy ? proxy_cap_str(other->proxy->cap) : "peers",
+ other->proxy ? other->id : other->peers.p->id,
+ other->conf.file, other->conf.line);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+
+ curproxy->table = calloc(1, sizeof *curproxy->table);
+ if (!curproxy->table) {
+ ha_alert("parsing [%s:%d]: '%s %s' : memory allocation failed\n",
+ file, linenum, args[0], args[1]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+
+ err_code |= parse_stick_table(file, linenum, args, curproxy->table,
+ curproxy->id, curproxy->id, NULL);
+ if (err_code & ERR_FATAL) {
+ ha_free(&curproxy->table);
+ goto out;
+ }
+
+ /* Store the proxy in the stick-table. */
+ curproxy->table->proxy = curproxy;
+
+ stktable_store_name(curproxy->table);
+ curproxy->table->next = stktables_list;
+ stktables_list = curproxy->table;
+
+ /* Add this proxy to the list of proxies which refer to its stick-table. */
+ if (curproxy->table->proxies_list != curproxy) {
+ curproxy->next_stkt_ref = curproxy->table->proxies_list;
+ curproxy->table->proxies_list = curproxy;
+ }
+ }
+ else if (strcmp(args[0], "stick") == 0) {
+ struct sticking_rule *rule;
+ struct sample_expr *expr;
+ int myidx = 0;
+ const char *name = NULL;
+ int flags;
+
+ if (curproxy->cap & PR_CAP_DEF) {
+ ha_alert("parsing [%s:%d] : '%s' not allowed in 'defaults' section.\n", file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+
+ if (warnifnotcap(curproxy, PR_CAP_BE, file, linenum, args[0], NULL)) {
+ err_code |= ERR_WARN;
+ goto out;
+ }
+
+ myidx++;
+ if ((strcmp(args[myidx], "store") == 0) ||
+ (strcmp(args[myidx], "store-request") == 0)) {
+ myidx++;
+ flags = STK_IS_STORE;
+ }
+ else if (strcmp(args[myidx], "store-response") == 0) {
+ myidx++;
+ flags = STK_IS_STORE | STK_ON_RSP;
+ }
+ else if (strcmp(args[myidx], "match") == 0) {
+ myidx++;
+ flags = STK_IS_MATCH;
+ }
+ else if (strcmp(args[myidx], "on") == 0) {
+ myidx++;
+ flags = STK_IS_MATCH | STK_IS_STORE;
+ }
+ else {
+ ha_alert("parsing [%s:%d] : '%s' expects 'on', 'match', or 'store'.\n", file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+
+ if (*(args[myidx]) == 0) {
+ ha_alert("parsing [%s:%d] : '%s' expects a fetch method.\n", file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+
+ curproxy->conf.args.ctx = ARGC_STK;
+ expr = sample_parse_expr(args, &myidx, file, linenum, &errmsg, &curproxy->conf.args, NULL);
+ if (!expr) {
+ ha_alert("parsing [%s:%d] : '%s': %s\n", file, linenum, args[0], errmsg);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+
+ if (flags & STK_ON_RSP) {
+ if (!(expr->fetch->val & SMP_VAL_BE_STO_RUL)) {
+ ha_alert("parsing [%s:%d] : '%s': fetch method '%s' extracts information from '%s', none of which is available for 'store-response'.\n",
+ file, linenum, args[0], expr->fetch->kw, sample_src_names(expr->fetch->use));
+ err_code |= ERR_ALERT | ERR_FATAL;
+ free(expr);
+ goto out;
+ }
+ } else {
+ if (!(expr->fetch->val & SMP_VAL_BE_SET_SRV)) {
+ ha_alert("parsing [%s:%d] : '%s': fetch method '%s' extracts information from '%s', none of which is available during request.\n",
+ file, linenum, args[0], expr->fetch->kw, sample_src_names(expr->fetch->use));
+ err_code |= ERR_ALERT | ERR_FATAL;
+ free(expr);
+ goto out;
+ }
+ }
+
+ /* check if we need to allocate an http_txn struct for HTTP parsing */
+ curproxy->http_needed |= !!(expr->fetch->use & SMP_USE_HTTP_ANY);
+
+ if (strcmp(args[myidx], "table") == 0) {
+ myidx++;
+ name = args[myidx++];
+ }
+
+ if (strcmp(args[myidx], "if") == 0 || strcmp(args[myidx], "unless") == 0) {
+ if ((cond = build_acl_cond(file, linenum, &curproxy->acl, curproxy, (const char **)args + myidx, &errmsg)) == NULL) {
+ ha_alert("parsing [%s:%d] : '%s': error detected while parsing sticking condition : %s.\n",
+ file, linenum, args[0], errmsg);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ free(expr);
+ goto out;
+ }
+ }
+ else if (*(args[myidx])) {
+ ha_alert("parsing [%s:%d] : '%s': unknown keyword '%s'.\n",
+ file, linenum, args[0], args[myidx]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ free(expr);
+ goto out;
+ }
+ if (flags & STK_ON_RSP)
+ err_code |= warnif_cond_conflicts(cond, SMP_VAL_BE_STO_RUL, file, linenum);
+ else
+ err_code |= warnif_cond_conflicts(cond, SMP_VAL_BE_SET_SRV, file, linenum);
+
+ rule = calloc(1, sizeof(*rule));
+ if (!rule) {
+ free_acl_cond(cond);
+ goto alloc_error;
+ }
+ rule->cond = cond;
+ rule->expr = expr;
+ rule->flags = flags;
+ rule->table.name = name ? strdup(name) : NULL;
+ LIST_INIT(&rule->list);
+ if (flags & STK_ON_RSP)
+ LIST_APPEND(&curproxy->storersp_rules, &rule->list);
+ else
+ LIST_APPEND(&curproxy->sticking_rules, &rule->list);
+ }
+ else if (strcmp(args[0], "stats") == 0) {
+ if (!(curproxy->cap & PR_CAP_DEF) && curproxy->uri_auth == curr_defproxy->uri_auth)
+ curproxy->uri_auth = NULL; /* we must detach from the default config */
+
+ if (!*args[1]) {
+ goto stats_error_parsing;
+ } else if (strcmp(args[1], "admin") == 0) {
+ struct stats_admin_rule *rule;
+ int where = 0;
+
+ if (curproxy->cap & PR_CAP_DEF) {
+ ha_alert("parsing [%s:%d]: '%s %s' not allowed in 'defaults' section.\n", file, linenum, args[0], args[1]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+
+ if (!stats_check_init_uri_auth(&curproxy->uri_auth))
+ goto alloc_error;
+
+ if (strcmp(args[2], "if") != 0 && strcmp(args[2], "unless") != 0) {
+ ha_alert("parsing [%s:%d] : '%s %s' requires either 'if' or 'unless' followed by a condition.\n",
+ file, linenum, args[0], args[1]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ if ((cond = build_acl_cond(file, linenum, &curproxy->acl, curproxy, (const char **)args + 2, &errmsg)) == NULL) {
+ ha_alert("parsing [%s:%d] : error detected while parsing a '%s %s' rule : %s.\n",
+ file, linenum, args[0], args[1], errmsg);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+
+ if (curproxy->cap & PR_CAP_FE)
+ where |= SMP_VAL_FE_HRQ_HDR;
+ if (curproxy->cap & PR_CAP_BE)
+ where |= SMP_VAL_BE_HRQ_HDR;
+ err_code |= warnif_cond_conflicts(cond, where, file, linenum);
+
+ rule = calloc(1, sizeof(*rule));
+ if (!rule) {
+ free_acl_cond(cond);
+ goto alloc_error;
+ }
+ rule->cond = cond;
+ LIST_INIT(&rule->list);
+ LIST_APPEND(&curproxy->uri_auth->admin_rules, &rule->list);
+ } else if (strcmp(args[1], "uri") == 0) {
+ if (*(args[2]) == 0) {
+ ha_alert("parsing [%s:%d] : 'uri' needs an URI prefix.\n", file, linenum);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ } else if (!stats_set_uri(&curproxy->uri_auth, args[2]))
+ goto alloc_error;
+ } else if (strcmp(args[1], "realm") == 0) {
+ if (*(args[2]) == 0) {
+ ha_alert("parsing [%s:%d] : 'realm' needs an realm name.\n", file, linenum);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ } else if (!stats_set_realm(&curproxy->uri_auth, args[2]))
+ goto alloc_error;
+ } else if (strcmp(args[1], "refresh") == 0) {
+ unsigned interval;
+
+ err = parse_time_err(args[2], &interval, TIME_UNIT_S);
+ if (err == PARSE_TIME_OVER) {
+ ha_alert("parsing [%s:%d]: timer overflow in argument <%s> to stats refresh interval, maximum value is 2147483647 s (~68 years).\n",
+ file, linenum, args[2]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ else if (err == PARSE_TIME_UNDER) {
+ ha_alert("parsing [%s:%d]: timer underflow in argument <%s> to stats refresh interval, minimum non-null value is 1 s.\n",
+ file, linenum, args[2]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ else if (err) {
+ ha_alert("parsing [%s:%d]: unexpected character '%c' in argument to stats refresh interval.\n",
+ file, linenum, *err);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ } else if (!stats_set_refresh(&curproxy->uri_auth, interval))
+ goto alloc_error;
+ } else if (strcmp(args[1], "http-request") == 0) { /* request access control: allow/deny/auth */
+ struct act_rule *rule;
+ int where = 0;
+
+ if (curproxy->cap & PR_CAP_DEF) {
+ ha_alert("parsing [%s:%d]: '%s' not allowed in 'defaults' section.\n", file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+
+ if (!stats_check_init_uri_auth(&curproxy->uri_auth))
+ goto alloc_error;
+
+ if (!LIST_ISEMPTY(&curproxy->uri_auth->http_req_rules) &&
+ !LIST_PREV(&curproxy->uri_auth->http_req_rules, struct act_rule *, list)->cond) {
+ ha_warning("parsing [%s:%d]: previous '%s' action has no condition attached, further entries are NOOP.\n",
+ file, linenum, args[0]);
+ err_code |= ERR_WARN;
+ }
+
+ rule = parse_http_req_cond((const char **)args + 2, file, linenum, curproxy);
+
+ if (!rule) {
+ err_code |= ERR_ALERT | ERR_ABORT;
+ goto out;
+ }
+
+ if (curproxy->cap & PR_CAP_FE)
+ where |= SMP_VAL_FE_HRQ_HDR;
+ if (curproxy->cap & PR_CAP_BE)
+ where |= SMP_VAL_BE_HRQ_HDR;
+ err_code |= warnif_cond_conflicts(rule->cond, where, file, linenum);
+ LIST_APPEND(&curproxy->uri_auth->http_req_rules, &rule->list);
+
+ } else if (strcmp(args[1], "auth") == 0) {
+ if (*(args[2]) == 0) {
+ ha_alert("parsing [%s:%d] : 'auth' needs a user:password account.\n", file, linenum);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ } else if (!stats_add_auth(&curproxy->uri_auth, args[2]))
+ goto alloc_error;
+ } else if (strcmp(args[1], "scope") == 0) {
+ if (*(args[2]) == 0) {
+ ha_alert("parsing [%s:%d] : 'scope' needs a proxy name.\n", file, linenum);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ } else if (!stats_add_scope(&curproxy->uri_auth, args[2]))
+ goto alloc_error;
+ } else if (strcmp(args[1], "enable") == 0) {
+ if (!stats_check_init_uri_auth(&curproxy->uri_auth))
+ goto alloc_error;
+ } else if (strcmp(args[1], "hide-version") == 0) {
+ if (!stats_set_flag(&curproxy->uri_auth, STAT_HIDEVER))
+ goto alloc_error;
+ } else if (strcmp(args[1], "show-legends") == 0) {
+ if (!stats_set_flag(&curproxy->uri_auth, STAT_SHLGNDS))
+ goto alloc_error;
+ } else if (strcmp(args[1], "show-modules") == 0) {
+ if (!stats_set_flag(&curproxy->uri_auth, STAT_SHMODULES))
+ goto alloc_error;
+ } else if (strcmp(args[1], "show-node") == 0) {
+
+ if (*args[2]) {
+ int i;
+ char c;
+
+ for (i=0; args[2][i]; i++) {
+ c = args[2][i];
+ if (!isupper((unsigned char)c) && !islower((unsigned char)c) &&
+ !isdigit((unsigned char)c) && c != '_' && c != '-' && c != '.')
+ break;
+ }
+
+ if (!i || args[2][i]) {
+ ha_alert("parsing [%s:%d]: '%s %s' invalid node name - should be a string"
+ "with digits(0-9), letters(A-Z, a-z), hyphen(-) or underscode(_).\n",
+ file, linenum, args[0], args[1]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ }
+
+ if (!stats_set_node(&curproxy->uri_auth, args[2]))
+ goto alloc_error;
+ } else if (strcmp(args[1], "show-desc") == 0) {
+ char *desc = NULL;
+
+ if (*args[2]) {
+ int i, len=0;
+ char *d;
+
+ for (i = 2; *args[i]; i++)
+ len += strlen(args[i]) + 1;
+
+ desc = d = calloc(1, len);
+
+ d += snprintf(d, desc + len - d, "%s", args[2]);
+ for (i = 3; *args[i]; i++)
+ d += snprintf(d, desc + len - d, " %s", args[i]);
+ }
+
+ if (!*args[2] && !global.desc)
+ ha_warning("parsing [%s:%d]: '%s' requires a parameter or 'desc' to be set in the global section.\n",
+ file, linenum, args[1]);
+ else {
+ if (!stats_set_desc(&curproxy->uri_auth, desc)) {
+ free(desc);
+ goto alloc_error;
+ }
+ free(desc);
+ }
+ } else {
+stats_error_parsing:
+ ha_alert("parsing [%s:%d]: %s '%s', expects 'admin', 'uri', 'realm', 'auth', 'scope', 'enable', 'hide-version', 'show-node', 'show-desc' or 'show-legends'.\n",
+ file, linenum, *args[1]?"unknown stats parameter":"missing keyword in", args[*args[1]?1:0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ }
+ else if (strcmp(args[0], "option") == 0) {
+ int optnum;
+
+ if (*(args[1]) == '\0') {
+ ha_alert("parsing [%s:%d]: '%s' expects an option name.\n",
+ file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+
+ for (optnum = 0; cfg_opts[optnum].name; optnum++) {
+ if (strcmp(args[1], cfg_opts[optnum].name) == 0) {
+ if (cfg_opts[optnum].cap == PR_CAP_NONE) {
+ ha_alert("parsing [%s:%d]: option '%s' is not supported due to build options.\n",
+ file, linenum, cfg_opts[optnum].name);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ if (alertif_too_many_args_idx(0, 1, file, linenum, args, &err_code))
+ goto out;
+
+ if (warnifnotcap(curproxy, cfg_opts[optnum].cap, file, linenum, args[1], NULL)) {
+ err_code |= ERR_WARN;
+ goto out;
+ }
+
+ curproxy->no_options &= ~cfg_opts[optnum].val;
+ curproxy->options &= ~cfg_opts[optnum].val;
+
+ switch (kwm) {
+ case KWM_STD:
+ curproxy->options |= cfg_opts[optnum].val;
+ break;
+ case KWM_NO:
+ curproxy->no_options |= cfg_opts[optnum].val;
+ break;
+ case KWM_DEF: /* already cleared */
+ break;
+ }
+
+ goto out;
+ }
+ }
+
+ for (optnum = 0; cfg_opts2[optnum].name; optnum++) {
+ if (strcmp(args[1], cfg_opts2[optnum].name) == 0) {
+ if (cfg_opts2[optnum].cap == PR_CAP_NONE) {
+ ha_alert("parsing [%s:%d]: option '%s' is not supported due to build options.\n",
+ file, linenum, cfg_opts2[optnum].name);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ if (alertif_too_many_args_idx(0, 1, file, linenum, args, &err_code))
+ goto out;
+ if (warnifnotcap(curproxy, cfg_opts2[optnum].cap, file, linenum, args[1], NULL)) {
+ err_code |= ERR_WARN;
+ goto out;
+ }
+
+ curproxy->no_options2 &= ~cfg_opts2[optnum].val;
+ curproxy->options2 &= ~cfg_opts2[optnum].val;
+
+ switch (kwm) {
+ case KWM_STD:
+ curproxy->options2 |= cfg_opts2[optnum].val;
+ break;
+ case KWM_NO:
+ curproxy->no_options2 |= cfg_opts2[optnum].val;
+ break;
+ case KWM_DEF: /* already cleared */
+ break;
+ }
+ goto out;
+ }
+ }
+
+ /* HTTP options override each other. They can be cancelled using
+ * "no option xxx" which only switches to default mode if the mode
+ * was this one (useful for cancelling options set in defaults
+ * sections).
+ */
+ if (strcmp(args[1], "forceclose") == 0) {
+ ha_alert("parsing [%s:%d]: option '%s' is not supported any more since HAProxy 2.0, please just remove it, or use 'option httpclose' if absolutely needed.\n",
+ file, linenum, args[1]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ else if (strcmp(args[1], "httpclose") == 0) {
+ if (alertif_too_many_args_idx(0, 1, file, linenum, args, &err_code))
+ goto out;
+ if (kwm == KWM_STD) {
+ curproxy->options &= ~PR_O_HTTP_MODE;
+ curproxy->options |= PR_O_HTTP_CLO;
+ goto out;
+ }
+ else if (kwm == KWM_NO) {
+ if ((curproxy->options & PR_O_HTTP_MODE) == PR_O_HTTP_CLO)
+ curproxy->options &= ~PR_O_HTTP_MODE;
+ goto out;
+ }
+ }
+ else if (strcmp(args[1], "http-server-close") == 0) {
+ if (alertif_too_many_args_idx(0, 1, file, linenum, args, &err_code))
+ goto out;
+ if (kwm == KWM_STD) {
+ curproxy->options &= ~PR_O_HTTP_MODE;
+ curproxy->options |= PR_O_HTTP_SCL;
+ goto out;
+ }
+ else if (kwm == KWM_NO) {
+ if ((curproxy->options & PR_O_HTTP_MODE) == PR_O_HTTP_SCL)
+ curproxy->options &= ~PR_O_HTTP_MODE;
+ goto out;
+ }
+ }
+ else if (strcmp(args[1], "http-keep-alive") == 0) {
+ if (alertif_too_many_args_idx(0, 1, file, linenum, args, &err_code))
+ goto out;
+ if (kwm == KWM_STD) {
+ curproxy->options &= ~PR_O_HTTP_MODE;
+ curproxy->options |= PR_O_HTTP_KAL;
+ goto out;
+ }
+ else if (kwm == KWM_NO) {
+ if ((curproxy->options & PR_O_HTTP_MODE) == PR_O_HTTP_KAL)
+ curproxy->options &= ~PR_O_HTTP_MODE;
+ goto out;
+ }
+ }
+ else if (strcmp(args[1], "http-tunnel") == 0) {
+ ha_alert("parsing [%s:%d]: option '%s' is not supported any more since HAProxy 2.1, please just remove it, it shouldn't be needed.\n",
+ file, linenum, args[1]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ else if (strcmp(args[1], "forwarded") == 0) {
+ if (kwm == KWM_STD) {
+ err_code |= proxy_http_parse_7239(args, 0, curproxy, curr_defproxy, file, linenum);
+ goto out;
+ }
+ else if (kwm == KWM_NO) {
+ if (curproxy->http_ext)
+ http_ext_7239_clean(curproxy);
+ goto out;
+ }
+ }
+
+ /* Redispatch can take an integer argument that control when the
+ * resispatch occurs. All values are relative to the retries option.
+ * This can be cancelled using "no option xxx".
+ */
+ if (strcmp(args[1], "redispatch") == 0) {
+ if (warnifnotcap(curproxy, PR_CAP_BE, file, linenum, args[1], NULL)) {
+ err_code |= ERR_WARN;
+ goto out;
+ }
+
+ curproxy->no_options &= ~PR_O_REDISP;
+ curproxy->options &= ~PR_O_REDISP;
+
+ switch (kwm) {
+ case KWM_STD:
+ curproxy->options |= PR_O_REDISP;
+ curproxy->redispatch_after = -1;
+ if(*args[2]) {
+ curproxy->redispatch_after = atol(args[2]);
+ }
+ break;
+ case KWM_NO:
+ curproxy->no_options |= PR_O_REDISP;
+ curproxy->redispatch_after = 0;
+ break;
+ case KWM_DEF: /* already cleared */
+ break;
+ }
+ goto out;
+ }
+
+ if (strcmp(args[1], "http_proxy") == 0) {
+ ha_alert("parsing [%s:%d]: option '%s' is not supported any more since HAProxy 2.5. This option stopped working in HAProxy 1.9 and usually had nasty side effects. It can be more reliably implemented with combinations of 'http-request set-dst' and 'http-request set-uri', and even 'http-request do-resolve' if DNS resolution is desired.\n",
+ file, linenum, args[1]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+
+ if (kwm != KWM_STD) {
+ ha_alert("parsing [%s:%d]: negation/default is not supported for option '%s'.\n",
+ file, linenum, args[1]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+
+ if (strcmp(args[1], "httplog") == 0) {
+ char *logformat;
+ /* generate a complete HTTP log */
+ logformat = default_http_log_format;
+ if (*(args[2]) != '\0') {
+ if (strcmp(args[2], "clf") == 0) {
+ curproxy->options2 |= PR_O2_CLFLOG;
+ logformat = clf_http_log_format;
+ } else {
+ ha_alert("parsing [%s:%d] : keyword '%s' only supports option 'clf'.\n", file, linenum, args[1]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ if (alertif_too_many_args_idx(1, 1, file, linenum, args, &err_code))
+ goto out;
+ }
+ if (curproxy->conf.logformat_string && curproxy->cap & PR_CAP_DEF) {
+ char *oldlogformat = "log-format";
+ char *clflogformat = "";
+
+ if (curproxy->conf.logformat_string == default_http_log_format)
+ oldlogformat = "option httplog";
+ else if (curproxy->conf.logformat_string == default_tcp_log_format)
+ oldlogformat = "option tcplog";
+ else if (curproxy->conf.logformat_string == clf_http_log_format)
+ oldlogformat = "option httplog clf";
+ else if (curproxy->conf.logformat_string == default_https_log_format)
+ oldlogformat = "option httpslog";
+ if (logformat == clf_http_log_format)
+ clflogformat = " clf";
+ ha_warning("parsing [%s:%d]: 'option httplog%s' overrides previous '%s' in 'defaults' section.\n",
+ file, linenum, clflogformat, oldlogformat);
+ }
+ if (curproxy->conf.logformat_string != default_http_log_format &&
+ curproxy->conf.logformat_string != default_tcp_log_format &&
+ curproxy->conf.logformat_string != clf_http_log_format &&
+ curproxy->conf.logformat_string != default_https_log_format)
+ free(curproxy->conf.logformat_string);
+ curproxy->conf.logformat_string = logformat;
+
+ free(curproxy->conf.lfs_file);
+ curproxy->conf.lfs_file = strdup(curproxy->conf.args.file);
+ curproxy->conf.lfs_line = curproxy->conf.args.line;
+
+ if (!(curproxy->cap & PR_CAP_DEF) && !(curproxy->cap & PR_CAP_FE)) {
+ ha_warning("parsing [%s:%d] : backend '%s' : 'option httplog' directive is ignored in backends.\n",
+ file, linenum, curproxy->id);
+ err_code |= ERR_WARN;
+ }
+ }
+ else if (strcmp(args[1], "tcplog") == 0) {
+ if (curproxy->conf.logformat_string && curproxy->cap & PR_CAP_DEF) {
+ char *oldlogformat = "log-format";
+
+ if (curproxy->conf.logformat_string == default_http_log_format)
+ oldlogformat = "option httplog";
+ else if (curproxy->conf.logformat_string == default_tcp_log_format)
+ oldlogformat = "option tcplog";
+ else if (curproxy->conf.logformat_string == clf_http_log_format)
+ oldlogformat = "option httplog clf";
+ else if (curproxy->conf.logformat_string == default_https_log_format)
+ oldlogformat = "option httpslog";
+ ha_warning("parsing [%s:%d]: 'option tcplog' overrides previous '%s' in 'defaults' section.\n",
+ file, linenum, oldlogformat);
+ }
+ /* generate a detailed TCP log */
+ if (curproxy->conf.logformat_string != default_http_log_format &&
+ curproxy->conf.logformat_string != default_tcp_log_format &&
+ curproxy->conf.logformat_string != clf_http_log_format &&
+ curproxy->conf.logformat_string != default_https_log_format)
+ free(curproxy->conf.logformat_string);
+ curproxy->conf.logformat_string = default_tcp_log_format;
+
+ free(curproxy->conf.lfs_file);
+ curproxy->conf.lfs_file = strdup(curproxy->conf.args.file);
+ curproxy->conf.lfs_line = curproxy->conf.args.line;
+
+ if (alertif_too_many_args_idx(0, 1, file, linenum, args, &err_code))
+ goto out;
+
+ if (!(curproxy->cap & PR_CAP_DEF) && !(curproxy->cap & PR_CAP_FE)) {
+ ha_warning("parsing [%s:%d] : backend '%s' : 'option tcplog' directive is ignored in backends.\n",
+ file, linenum, curproxy->id);
+ err_code |= ERR_WARN;
+ }
+ }
+ else if (strcmp(args[1], "httpslog") == 0) {
+ char *logformat;
+ /* generate a complete HTTP log */
+ logformat = default_https_log_format;
+ if (curproxy->conf.logformat_string && curproxy->cap & PR_CAP_DEF) {
+ char *oldlogformat = "log-format";
+
+ if (curproxy->conf.logformat_string == default_http_log_format)
+ oldlogformat = "option httplog";
+ else if (curproxy->conf.logformat_string == default_tcp_log_format)
+ oldlogformat = "option tcplog";
+ else if (curproxy->conf.logformat_string == clf_http_log_format)
+ oldlogformat = "option httplog clf";
+ else if (curproxy->conf.logformat_string == default_https_log_format)
+ oldlogformat = "option httpslog";
+ ha_warning("parsing [%s:%d]: 'option httplog' overrides previous '%s' in 'defaults' section.\n",
+ file, linenum, oldlogformat);
+ }
+ if (curproxy->conf.logformat_string != default_http_log_format &&
+ curproxy->conf.logformat_string != default_tcp_log_format &&
+ curproxy->conf.logformat_string != clf_http_log_format &&
+ curproxy->conf.logformat_string != default_https_log_format)
+ free(curproxy->conf.logformat_string);
+ curproxy->conf.logformat_string = logformat;
+
+ free(curproxy->conf.lfs_file);
+ curproxy->conf.lfs_file = strdup(curproxy->conf.args.file);
+ curproxy->conf.lfs_line = curproxy->conf.args.line;
+
+ if (!(curproxy->cap & PR_CAP_DEF) && !(curproxy->cap & PR_CAP_FE)) {
+ ha_warning("parsing [%s:%d] : backend '%s' : 'option httpslog' directive is ignored in backends.\n",
+ file, linenum, curproxy->id);
+ err_code |= ERR_WARN;
+ }
+ }
+ else if (strcmp(args[1], "tcpka") == 0) {
+ /* enable TCP keep-alives on client and server streams */
+ if (warnifnotcap(curproxy, PR_CAP_BE | PR_CAP_FE, file, linenum, args[1], NULL))
+ err_code |= ERR_WARN;
+
+ if (alertif_too_many_args_idx(0, 1, file, linenum, args, &err_code))
+ goto out;
+
+ if (curproxy->cap & PR_CAP_FE)
+ curproxy->options |= PR_O_TCP_CLI_KA;
+ if (curproxy->cap & PR_CAP_BE)
+ curproxy->options |= PR_O_TCP_SRV_KA;
+ }
+ else if (strcmp(args[1], "httpchk") == 0) {
+ err_code |= proxy_parse_httpchk_opt(args, 0, curproxy, curr_defproxy, file, linenum);
+ if (err_code & ERR_FATAL)
+ goto out;
+ }
+ else if (strcmp(args[1], "ssl-hello-chk") == 0) {
+ err_code |= proxy_parse_ssl_hello_chk_opt(args, 0, curproxy, curr_defproxy, file, linenum);
+ if (err_code & ERR_FATAL)
+ goto out;
+ }
+ else if (strcmp(args[1], "smtpchk") == 0) {
+ err_code |= proxy_parse_smtpchk_opt(args, 0, curproxy, curr_defproxy, file, linenum);
+ if (err_code & ERR_FATAL)
+ goto out;
+ }
+ else if (strcmp(args[1], "pgsql-check") == 0) {
+ err_code |= proxy_parse_pgsql_check_opt(args, 0, curproxy, curr_defproxy, file, linenum);
+ if (err_code & ERR_FATAL)
+ goto out;
+ }
+ else if (strcmp(args[1], "redis-check") == 0) {
+ err_code |= proxy_parse_redis_check_opt(args, 0, curproxy, curr_defproxy, file, linenum);
+ if (err_code & ERR_FATAL)
+ goto out;
+ }
+ else if (strcmp(args[1], "mysql-check") == 0) {
+ err_code |= proxy_parse_mysql_check_opt(args, 0, curproxy, curr_defproxy, file, linenum);
+ if (err_code & ERR_FATAL)
+ goto out;
+ }
+ else if (strcmp(args[1], "ldap-check") == 0) {
+ err_code |= proxy_parse_ldap_check_opt(args, 0, curproxy, curr_defproxy, file, linenum);
+ if (err_code & ERR_FATAL)
+ goto out;
+ }
+ else if (strcmp(args[1], "spop-check") == 0) {
+ err_code |= proxy_parse_spop_check_opt(args, 0, curproxy, curr_defproxy, file, linenum);
+ if (err_code & ERR_FATAL)
+ goto out;
+ }
+ else if (strcmp(args[1], "tcp-check") == 0) {
+ err_code |= proxy_parse_tcp_check_opt(args, 0, curproxy, curr_defproxy, file, linenum);
+ if (err_code & ERR_FATAL)
+ goto out;
+ }
+ else if (strcmp(args[1], "external-check") == 0) {
+ err_code |= proxy_parse_external_check_opt(args, 0, curproxy, curr_defproxy, file, linenum);
+ if (err_code & ERR_FATAL)
+ goto out;
+ }
+ else if (strcmp(args[1], "forwardfor") == 0) {
+ err_code |= proxy_http_parse_xff(args, 0, curproxy, curr_defproxy, file, linenum);
+ if (err_code & ERR_FATAL)
+ goto out;
+ }
+ else if (strcmp(args[1], "originalto") == 0) {
+ err_code |= proxy_http_parse_xot(args, 0, curproxy, curr_defproxy, file, linenum);
+ if (err_code & ERR_FATAL)
+ goto out;
+ }
+ else if (strcmp(args[1], "http-restrict-req-hdr-names") == 0) {
+ if (alertif_too_many_args(2, file, linenum, args, &err_code))
+ goto out;
+
+ if (*(args[2]) == 0) {
+ ha_alert("parsing [%s:%d] : missing parameter. option '%s' expects 'preserve', 'reject' or 'delete' option.\n",
+ file, linenum, args[1]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+
+ curproxy->options2 &= ~PR_O2_RSTRICT_REQ_HDR_NAMES_MASK;
+ if (strcmp(args[2], "preserve") == 0)
+ curproxy->options2 |= PR_O2_RSTRICT_REQ_HDR_NAMES_NOOP;
+ else if (strcmp(args[2], "reject") == 0)
+ curproxy->options2 |= PR_O2_RSTRICT_REQ_HDR_NAMES_BLK;
+ else if (strcmp(args[2], "delete") == 0)
+ curproxy->options2 |= PR_O2_RSTRICT_REQ_HDR_NAMES_DEL;
+ else {
+ ha_alert("parsing [%s:%d] : invalid parameter '%s'. option '%s' expects 'preserve', 'reject' or 'delete' option.\n",
+ file, linenum, args[2], args[1]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ }
+ else {
+ const char *best = proxy_find_best_option(args[1], common_options);
+
+ if (best)
+ ha_alert("parsing [%s:%d] : unknown option '%s'; did you mean '%s' maybe ?\n", file, linenum, args[1], best);
+ else
+ ha_alert("parsing [%s:%d] : unknown option '%s'.\n", file, linenum, args[1]);
+
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ goto out;
+ }
+ else if (strcmp(args[0], "default_backend") == 0) {
+ if (warnifnotcap(curproxy, PR_CAP_FE, file, linenum, args[0], NULL))
+ err_code |= ERR_WARN;
+
+ if (*(args[1]) == 0) {
+ ha_alert("parsing [%s:%d] : '%s' expects a backend name.\n", file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ free(curproxy->defbe.name);
+ curproxy->defbe.name = strdup(args[1]);
+ if (!curproxy->defbe.name)
+ goto alloc_error;
+
+ if (alertif_too_many_args_idx(1, 0, file, linenum, args, &err_code))
+ goto out;
+ }
+ else if (strcmp(args[0], "redispatch") == 0 || strcmp(args[0], "redisp") == 0) {
+ ha_alert("parsing [%s:%d] : keyword '%s' directive is not supported anymore since HAProxy 2.1. Use 'option redispatch'.\n", file, linenum, args[0]);
+
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ else if (strcmp(args[0], "http-reuse") == 0) {
+ if (warnifnotcap(curproxy, PR_CAP_BE, file, linenum, args[0], NULL))
+ err_code |= ERR_WARN;
+
+ if (strcmp(args[1], "never") == 0) {
+ /* enable a graceful server shutdown on an HTTP 404 response */
+ curproxy->options &= ~PR_O_REUSE_MASK;
+ curproxy->options |= PR_O_REUSE_NEVR;
+ if (alertif_too_many_args_idx(0, 1, file, linenum, args, &err_code))
+ goto out;
+ }
+ else if (strcmp(args[1], "safe") == 0) {
+ /* enable a graceful server shutdown on an HTTP 404 response */
+ curproxy->options &= ~PR_O_REUSE_MASK;
+ curproxy->options |= PR_O_REUSE_SAFE;
+ if (alertif_too_many_args_idx(0, 1, file, linenum, args, &err_code))
+ goto out;
+ }
+ else if (strcmp(args[1], "aggressive") == 0) {
+ curproxy->options &= ~PR_O_REUSE_MASK;
+ curproxy->options |= PR_O_REUSE_AGGR;
+ if (alertif_too_many_args_idx(0, 1, file, linenum, args, &err_code))
+ goto out;
+ }
+ else if (strcmp(args[1], "always") == 0) {
+ /* enable a graceful server shutdown on an HTTP 404 response */
+ curproxy->options &= ~PR_O_REUSE_MASK;
+ curproxy->options |= PR_O_REUSE_ALWS;
+ if (alertif_too_many_args_idx(0, 1, file, linenum, args, &err_code))
+ goto out;
+ }
+ else {
+ ha_alert("parsing [%s:%d] : '%s' only supports 'never', 'safe', 'aggressive', 'always'.\n", file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ }
+ else if (strcmp(args[0], "monitor") == 0) {
+ if (curproxy->cap & PR_CAP_DEF) {
+ ha_alert("parsing [%s:%d] : '%s' not allowed in 'defaults' section.\n", file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+
+ if (warnifnotcap(curproxy, PR_CAP_FE, file, linenum, args[0], NULL))
+ err_code |= ERR_WARN;
+
+ if (strcmp(args[1], "fail") == 0) {
+ /* add a condition to fail monitor requests */
+ if (strcmp(args[2], "if") != 0 && strcmp(args[2], "unless") != 0) {
+ ha_alert("parsing [%s:%d] : '%s %s' requires either 'if' or 'unless' followed by a condition.\n",
+ file, linenum, args[0], args[1]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+
+ err_code |= warnif_misplaced_monitor(curproxy, file, linenum, "monitor fail");
+ if ((cond = build_acl_cond(file, linenum, &curproxy->acl, curproxy, (const char **)args + 2, &errmsg)) == NULL) {
+ ha_alert("parsing [%s:%d] : error detected while parsing a '%s %s' condition : %s.\n",
+ file, linenum, args[0], args[1], errmsg);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ LIST_APPEND(&curproxy->mon_fail_cond, &cond->list);
+ }
+ else {
+ ha_alert("parsing [%s:%d] : '%s' only supports 'fail'.\n", file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ }
+#ifdef USE_TPROXY
+ else if (strcmp(args[0], "transparent") == 0) {
+ /* enable transparent proxy connections */
+ curproxy->options |= PR_O_TRANSP;
+ if (alertif_too_many_args(0, file, linenum, args, &err_code))
+ goto out;
+ }
+#endif
+ else if (strcmp(args[0], "maxconn") == 0) { /* maxconn */
+ if (warnifnotcap(curproxy, PR_CAP_FE, file, linenum, args[0], " Maybe you want 'fullconn' instead ?"))
+ err_code |= ERR_WARN;
+
+ if (*(args[1]) == 0) {
+ ha_alert("parsing [%s:%d] : '%s' expects an integer argument.\n", file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ curproxy->maxconn = atol(args[1]);
+ if (alertif_too_many_args(1, file, linenum, args, &err_code))
+ goto out;
+ }
+ else if (strcmp(args[0], "backlog") == 0) { /* backlog */
+ if (warnifnotcap(curproxy, PR_CAP_FE, file, linenum, args[0], NULL))
+ err_code |= ERR_WARN;
+
+ if (*(args[1]) == 0) {
+ ha_alert("parsing [%s:%d] : '%s' expects an integer argument.\n", file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ curproxy->backlog = atol(args[1]);
+ if (alertif_too_many_args(1, file, linenum, args, &err_code))
+ goto out;
+ }
+ else if (strcmp(args[0], "fullconn") == 0) { /* fullconn */
+ if (warnifnotcap(curproxy, PR_CAP_BE, file, linenum, args[0], " Maybe you want 'maxconn' instead ?"))
+ err_code |= ERR_WARN;
+
+ if (*(args[1]) == 0) {
+ ha_alert("parsing [%s:%d] : '%s' expects an integer argument.\n", file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ curproxy->fullconn = atol(args[1]);
+ if (alertif_too_many_args(1, file, linenum, args, &err_code))
+ goto out;
+ }
+ else if (strcmp(args[0], "grace") == 0) { /* grace time (ms) */
+ ha_alert("parsing [%s:%d]: the '%s' keyword is not supported any more since HAProxy version 2.5.\n",
+ file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ else if (strcmp(args[0], "dispatch") == 0) { /* dispatch address */
+ struct sockaddr_storage *sk;
+ int port1, port2;
+
+ if (curproxy->cap & PR_CAP_DEF) {
+ ha_alert("parsing [%s:%d] : '%s' not allowed in 'defaults' section.\n", file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ else if (warnifnotcap(curproxy, PR_CAP_BE, file, linenum, args[0], NULL))
+ err_code |= ERR_WARN;
+
+ sk = str2sa_range(args[1], NULL, &port1, &port2, NULL, NULL, NULL,
+ &errmsg, NULL, NULL,
+ PA_O_RESOLVE | PA_O_PORT_OK | PA_O_PORT_MAND | PA_O_STREAM | PA_O_XPRT | PA_O_CONNECT);
+ if (!sk) {
+ ha_alert("parsing [%s:%d] : '%s' : %s\n", file, linenum, args[0], errmsg);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+
+ if (alertif_too_many_args(1, file, linenum, args, &err_code))
+ goto out;
+
+ curproxy->dispatch_addr = *sk;
+ curproxy->options |= PR_O_DISPATCH;
+ }
+ else if (strcmp(args[0], "balance") == 0) { /* set balancing with optional algorithm */
+ if (warnifnotcap(curproxy, PR_CAP_BE, file, linenum, args[0], NULL))
+ err_code |= ERR_WARN;
+
+ if (backend_parse_balance((const char **)args + 1, &errmsg, curproxy) < 0) {
+ ha_alert("parsing [%s:%d] : %s %s\n", file, linenum, args[0], errmsg);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ }
+ else if (strcmp(args[0], "hash-type") == 0) { /* set hashing method */
+ /**
+ * The syntax for hash-type config element is
+ * hash-type {map-based|consistent} [[<algo>] avalanche]
+ *
+ * The default hash function is sdbm for map-based and sdbm+avalanche for consistent.
+ */
+ curproxy->lbprm.algo &= ~(BE_LB_HASH_TYPE | BE_LB_HASH_FUNC | BE_LB_HASH_MOD);
+
+ if (warnifnotcap(curproxy, PR_CAP_BE, file, linenum, args[0], NULL))
+ err_code |= ERR_WARN;
+
+ if (strcmp(args[1], "consistent") == 0) { /* use consistent hashing */
+ curproxy->lbprm.algo |= BE_LB_HASH_CONS;
+ }
+ else if (strcmp(args[1], "map-based") == 0) { /* use map-based hashing */
+ curproxy->lbprm.algo |= BE_LB_HASH_MAP;
+ }
+ else if (strcmp(args[1], "avalanche") == 0) {
+ ha_alert("parsing [%s:%d] : experimental feature '%s %s' is not supported anymore, please use '%s map-based sdbm avalanche' instead.\n", file, linenum, args[0], args[1], args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ else {
+ ha_alert("parsing [%s:%d] : '%s' only supports 'consistent' and 'map-based'.\n", file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+
+ /* set the hash function to use */
+ if (!*args[2]) {
+ /* the default algo is sdbm */
+ curproxy->lbprm.algo |= BE_LB_HFCN_SDBM;
+
+ /* if consistent with no argument, then avalanche modifier is also applied */
+ if ((curproxy->lbprm.algo & BE_LB_HASH_TYPE) == BE_LB_HASH_CONS)
+ curproxy->lbprm.algo |= BE_LB_HMOD_AVAL;
+ } else {
+ /* set the hash function */
+ if (strcmp(args[2], "sdbm") == 0) {
+ curproxy->lbprm.algo |= BE_LB_HFCN_SDBM;
+ }
+ else if (strcmp(args[2], "djb2") == 0) {
+ curproxy->lbprm.algo |= BE_LB_HFCN_DJB2;
+ }
+ else if (strcmp(args[2], "wt6") == 0) {
+ curproxy->lbprm.algo |= BE_LB_HFCN_WT6;
+ }
+ else if (strcmp(args[2], "crc32") == 0) {
+ curproxy->lbprm.algo |= BE_LB_HFCN_CRC32;
+ }
+ else if (strcmp(args[2], "none") == 0) {
+ curproxy->lbprm.algo |= BE_LB_HFCN_NONE;
+ }
+ else {
+ ha_alert("parsing [%s:%d] : '%s' only supports 'sdbm', 'djb2', 'crc32', or 'wt6' hash functions.\n", file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+
+ /* set the hash modifier */
+ if (strcmp(args[3], "avalanche") == 0) {
+ curproxy->lbprm.algo |= BE_LB_HMOD_AVAL;
+ }
+ else if (*args[3]) {
+ ha_alert("parsing [%s:%d] : '%s' only supports 'avalanche' as a modifier for hash functions.\n", file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ }
+ }
+ else if (strcmp(args[0], "hash-balance-factor") == 0) {
+ if (*(args[1]) == 0) {
+ ha_alert("parsing [%s:%d] : '%s' expects an integer argument.\n", file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ curproxy->lbprm.hash_balance_factor = atol(args[1]);
+ if (curproxy->lbprm.hash_balance_factor != 0 && curproxy->lbprm.hash_balance_factor <= 100) {
+ ha_alert("parsing [%s:%d] : '%s' must be 0 or greater than 100.\n", file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ }
+ else if (strcmp(args[0], "unique-id-format") == 0) {
+ if (!*(args[1])) {
+ ha_alert("parsing [%s:%d] : %s expects an argument.\n", file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ if (*(args[2])) {
+ ha_alert("parsing [%s:%d] : %s expects only one argument, don't forget to escape spaces!\n", file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ free(curproxy->conf.uniqueid_format_string);
+ curproxy->conf.uniqueid_format_string = strdup(args[1]);
+ if (!curproxy->conf.uniqueid_format_string)
+ goto alloc_error;
+
+ free(curproxy->conf.uif_file);
+ curproxy->conf.uif_file = strdup(curproxy->conf.args.file);
+ curproxy->conf.uif_line = curproxy->conf.args.line;
+ }
+
+ else if (strcmp(args[0], "unique-id-header") == 0) {
+ char *copy;
+ if (!*(args[1])) {
+ ha_alert("parsing [%s:%d] : %s expects an argument.\n", file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ copy = strdup(args[1]);
+ if (copy == NULL) {
+ ha_alert("parsing [%s:%d] : failed to allocate memory for unique-id-header\n", file, linenum);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+
+ istfree(&curproxy->header_unique_id);
+ curproxy->header_unique_id = ist(copy);
+ }
+
+ else if (strcmp(args[0], "log-format") == 0) {
+ if (!*(args[1])) {
+ ha_alert("parsing [%s:%d] : %s expects an argument.\n", file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ if (*(args[2])) {
+ ha_alert("parsing [%s:%d] : %s expects only one argument, don't forget to escape spaces!\n", file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ if (curproxy->conf.logformat_string && curproxy->cap & PR_CAP_DEF) {
+ char *oldlogformat = "log-format";
+
+ if (curproxy->conf.logformat_string == default_http_log_format)
+ oldlogformat = "option httplog";
+ else if (curproxy->conf.logformat_string == default_tcp_log_format)
+ oldlogformat = "option tcplog";
+ else if (curproxy->conf.logformat_string == clf_http_log_format)
+ oldlogformat = "option httplog clf";
+ else if (curproxy->conf.logformat_string == default_https_log_format)
+ oldlogformat = "option httpslog";
+ ha_warning("parsing [%s:%d]: 'log-format' overrides previous '%s' in 'defaults' section.\n",
+ file, linenum, oldlogformat);
+ }
+ if (curproxy->conf.logformat_string != default_http_log_format &&
+ curproxy->conf.logformat_string != default_tcp_log_format &&
+ curproxy->conf.logformat_string != clf_http_log_format &&
+ curproxy->conf.logformat_string != default_https_log_format)
+ free(curproxy->conf.logformat_string);
+ curproxy->conf.logformat_string = strdup(args[1]);
+ if (!curproxy->conf.logformat_string)
+ goto alloc_error;
+
+ free(curproxy->conf.lfs_file);
+ curproxy->conf.lfs_file = strdup(curproxy->conf.args.file);
+ curproxy->conf.lfs_line = curproxy->conf.args.line;
+
+ /* get a chance to improve log-format error reporting by
+ * reporting the correct line-number when possible.
+ */
+ if (!(curproxy->cap & PR_CAP_DEF) && !(curproxy->cap & PR_CAP_FE)) {
+ ha_warning("parsing [%s:%d] : backend '%s' : 'log-format' directive is ignored in backends.\n",
+ file, linenum, curproxy->id);
+ err_code |= ERR_WARN;
+ }
+ }
+ else if (strcmp(args[0], "log-format-sd") == 0) {
+ if (!*(args[1])) {
+ ha_alert("parsing [%s:%d] : %s expects an argument.\n", file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ if (*(args[2])) {
+ ha_alert("parsing [%s:%d] : %s expects only one argument, don't forget to escape spaces!\n", file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+
+ if (curproxy->conf.logformat_sd_string != default_rfc5424_sd_log_format)
+ free(curproxy->conf.logformat_sd_string);
+ curproxy->conf.logformat_sd_string = strdup(args[1]);
+ if (!curproxy->conf.logformat_sd_string)
+ goto alloc_error;
+
+ free(curproxy->conf.lfsd_file);
+ curproxy->conf.lfsd_file = strdup(curproxy->conf.args.file);
+ curproxy->conf.lfsd_line = curproxy->conf.args.line;
+
+ /* get a chance to improve log-format-sd error reporting by
+ * reporting the correct line-number when possible.
+ */
+ if (!(curproxy->cap & PR_CAP_DEF) && !(curproxy->cap & PR_CAP_FE)) {
+ ha_warning("parsing [%s:%d] : backend '%s' : 'log-format-sd' directive is ignored in backends.\n",
+ file, linenum, curproxy->id);
+ err_code |= ERR_WARN;
+ }
+ }
+ else if (strcmp(args[0], "error-log-format") == 0) {
+ if (!*(args[1])) {
+ ha_alert("parsing [%s:%d] : %s expects an argument.\n", file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ if (*(args[2])) {
+ ha_alert("parsing [%s:%d] : %s expects only one argument, don't forget to escape spaces!\n", file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ if (curproxy->conf.error_logformat_string && curproxy->cap & PR_CAP_DEF) {
+ ha_warning("parsing [%s:%d]: 'error-log-format' overrides previous 'error-log-format' in 'defaults' section.\n",
+ file, linenum);
+ }
+ free(curproxy->conf.error_logformat_string);
+ curproxy->conf.error_logformat_string = strdup(args[1]);
+ if (!curproxy->conf.error_logformat_string)
+ goto alloc_error;
+
+ free(curproxy->conf.elfs_file);
+ curproxy->conf.elfs_file = strdup(curproxy->conf.args.file);
+ curproxy->conf.elfs_line = curproxy->conf.args.line;
+
+ /* get a chance to improve log-format error reporting by
+ * reporting the correct line-number when possible.
+ */
+ if (!(curproxy->cap & PR_CAP_DEF) && !(curproxy->cap & PR_CAP_FE)) {
+ ha_warning("parsing [%s:%d] : backend '%s' : 'error-log-format' directive is ignored in backends.\n",
+ file, linenum, curproxy->id);
+ err_code |= ERR_WARN;
+ }
+ }
+ else if (strcmp(args[0], "log-tag") == 0) { /* tag to report to syslog */
+ if (*(args[1]) == 0) {
+ ha_alert("parsing [%s:%d] : '%s' expects a tag for use in syslog.\n", file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ chunk_destroy(&curproxy->log_tag);
+ chunk_initlen(&curproxy->log_tag, strdup(args[1]), strlen(args[1]), strlen(args[1]));
+ if (b_orig(&curproxy->log_tag) == NULL) {
+ chunk_destroy(&curproxy->log_tag);
+ ha_alert("parsing [%s:%d]: cannot allocate memory for '%s'.\n", file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ }
+ else if (strcmp(args[0], "log") == 0) { /* "no log" or "log ..." */
+ if (!parse_logger(args, &curproxy->loggers, (kwm == KWM_NO), file, linenum, &errmsg)) {
+ ha_alert("parsing [%s:%d] : %s : %s\n", file, linenum, args[0], errmsg);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ }
+ else if (strcmp(args[0], "source") == 0) { /* address to which we bind when connecting */
+ int cur_arg;
+ int port1, port2;
+ struct sockaddr_storage *sk;
+
+ if (warnifnotcap(curproxy, PR_CAP_BE, file, linenum, args[0], NULL))
+ err_code |= ERR_WARN;
+
+ if (!*args[1]) {
+ ha_alert("parsing [%s:%d] : '%s' expects <addr>[:<port>], and optionally '%s' <addr>, and '%s' <name>.\n",
+ file, linenum, "source", "usesrc", "interface");
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+
+ /* we must first clear any optional default setting */
+ curproxy->conn_src.opts &= ~CO_SRC_TPROXY_MASK;
+ ha_free(&curproxy->conn_src.iface_name);
+ curproxy->conn_src.iface_len = 0;
+
+ sk = str2sa_range(args[1], NULL, &port1, &port2, NULL, NULL, NULL,
+ &errmsg, NULL, NULL, PA_O_RESOLVE | PA_O_PORT_OK | PA_O_STREAM | PA_O_CONNECT);
+ if (!sk) {
+ ha_alert("parsing [%s:%d] : '%s %s' : %s\n",
+ file, linenum, args[0], args[1], errmsg);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+
+ curproxy->conn_src.source_addr = *sk;
+ curproxy->conn_src.opts |= CO_SRC_BIND;
+
+ cur_arg = 2;
+ while (*(args[cur_arg])) {
+ if (strcmp(args[cur_arg], "usesrc") == 0) { /* address to use outside */
+#if defined(CONFIG_HAP_TRANSPARENT)
+ if (!*args[cur_arg + 1]) {
+ ha_alert("parsing [%s:%d] : '%s' expects <addr>[:<port>], 'client', or 'clientip' as argument.\n",
+ file, linenum, "usesrc");
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+
+ if (strcmp(args[cur_arg + 1], "client") == 0) {
+ curproxy->conn_src.opts &= ~CO_SRC_TPROXY_MASK;
+ curproxy->conn_src.opts |= CO_SRC_TPROXY_CLI;
+ } else if (strcmp(args[cur_arg + 1], "clientip") == 0) {
+ curproxy->conn_src.opts &= ~CO_SRC_TPROXY_MASK;
+ curproxy->conn_src.opts |= CO_SRC_TPROXY_CIP;
+ } else if (!strncmp(args[cur_arg + 1], "hdr_ip(", 7)) {
+ char *name, *end;
+
+ name = args[cur_arg+1] + 7;
+ while (isspace((unsigned char)*name))
+ name++;
+
+ end = name;
+ while (*end && !isspace((unsigned char)*end) && *end != ',' && *end != ')')
+ end++;
+
+ curproxy->conn_src.opts &= ~CO_SRC_TPROXY_MASK;
+ curproxy->conn_src.opts |= CO_SRC_TPROXY_DYN;
+ free(curproxy->conn_src.bind_hdr_name);
+ curproxy->conn_src.bind_hdr_name = calloc(1, end - name + 1);
+ if (!curproxy->conn_src.bind_hdr_name)
+ goto alloc_error;
+ curproxy->conn_src.bind_hdr_len = end - name;
+ memcpy(curproxy->conn_src.bind_hdr_name, name, end - name);
+ curproxy->conn_src.bind_hdr_name[end-name] = '\0';
+ curproxy->conn_src.bind_hdr_occ = -1;
+
+ /* now look for an occurrence number */
+ while (isspace((unsigned char)*end))
+ end++;
+ if (*end == ',') {
+ end++;
+ name = end;
+ if (*end == '-')
+ end++;
+ while (isdigit((unsigned char)*end))
+ end++;
+ curproxy->conn_src.bind_hdr_occ = strl2ic(name, end-name);
+ }
+
+ if (curproxy->conn_src.bind_hdr_occ < -MAX_HDR_HISTORY) {
+ ha_alert("parsing [%s:%d] : usesrc hdr_ip(name,num) does not support negative"
+ " occurrences values smaller than %d.\n",
+ file, linenum, MAX_HDR_HISTORY);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ } else {
+ struct sockaddr_storage *sk;
+
+ sk = str2sa_range(args[cur_arg + 1], NULL, &port1, &port2, NULL, NULL, NULL,
+ &errmsg, NULL, NULL, PA_O_RESOLVE | PA_O_PORT_OK | PA_O_STREAM | PA_O_CONNECT);
+ if (!sk) {
+ ha_alert("parsing [%s:%d] : '%s %s' : %s\n",
+ file, linenum, args[cur_arg], args[cur_arg+1], errmsg);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+
+ curproxy->conn_src.tproxy_addr = *sk;
+ curproxy->conn_src.opts |= CO_SRC_TPROXY_ADDR;
+ }
+ global.last_checks |= LSTCHK_NETADM;
+#else /* no TPROXY support */
+ ha_alert("parsing [%s:%d] : '%s' not allowed here because support for TPROXY was not compiled in.\n",
+ file, linenum, "usesrc");
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+#endif
+ cur_arg += 2;
+ continue;
+ }
+
+ if (strcmp(args[cur_arg], "interface") == 0) { /* specifically bind to this interface */
+#ifdef SO_BINDTODEVICE
+ if (!*args[cur_arg + 1]) {
+ ha_alert("parsing [%s:%d] : '%s' : missing interface name.\n",
+ file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ free(curproxy->conn_src.iface_name);
+ curproxy->conn_src.iface_name = strdup(args[cur_arg + 1]);
+ if (!curproxy->conn_src.iface_name)
+ goto alloc_error;
+ curproxy->conn_src.iface_len = strlen(curproxy->conn_src.iface_name);
+ global.last_checks |= LSTCHK_NETADM;
+#else
+ ha_alert("parsing [%s:%d] : '%s' : '%s' option not implemented.\n",
+ file, linenum, args[0], args[cur_arg]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+#endif
+ cur_arg += 2;
+ continue;
+ }
+ ha_alert("parsing [%s:%d] : '%s' only supports optional keywords '%s' and '%s'.\n",
+ file, linenum, args[0], "interface", "usesrc");
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ }
+ else if (strcmp(args[0], "usesrc") == 0) { /* address to use outside: needs "source" first */
+ ha_alert("parsing [%s:%d] : '%s' only allowed after a '%s' statement.\n",
+ file, linenum, "usesrc", "source");
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ else if (strcmp(args[0], "cliexp") == 0 || strcmp(args[0], "reqrep") == 0) { /* replace request header from a regex */
+ ha_alert("parsing [%s:%d] : The '%s' directive is not supported anymore since HAProxy 2.1. "
+ "Use 'http-request replace-path', 'http-request replace-uri' or 'http-request replace-header' instead.\n",
+ file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ else if (strcmp(args[0], "reqdel") == 0) { /* delete request header from a regex */
+ ha_alert("parsing [%s:%d] : The '%s' directive is not supported anymore since HAProxy 2.1. "
+ "Use 'http-request del-header' instead.\n", file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ else if (strcmp(args[0], "reqdeny") == 0) { /* deny a request if a header matches this regex */
+ ha_alert("parsing [%s:%d] : The '%s' not supported anymore since HAProxy 2.1. "
+ "Use 'http-request deny' instead.\n", file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ else if (strcmp(args[0], "reqpass") == 0) { /* pass this header without allowing or denying the request */
+ ha_alert("parsing [%s:%d] : The '%s' not supported anymore since HAProxy 2.1.\n", file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ else if (strcmp(args[0], "reqallow") == 0) { /* allow a request if a header matches this regex */
+ ha_alert("parsing [%s:%d] : The '%s' directive is not supported anymore since HAProxy 2.1. "
+ "Use 'http-request allow' instead.\n", file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ else if (strcmp(args[0], "reqtarpit") == 0) { /* tarpit a request if a header matches this regex */
+ ha_alert("parsing [%s:%d] : The '%s' directive is not supported anymore since HAProxy 2.1. "
+ "Use 'http-request tarpit' instead.\n", file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ else if (strcmp(args[0], "reqirep") == 0) { /* replace request header from a regex, ignoring case */
+ ha_alert("parsing [%s:%d] : The '%s' directive is not supported anymore since HAProxy 2.1. "
+ "Use 'http-request replace-header' instead.\n", file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ else if (strcmp(args[0], "reqidel") == 0) { /* delete request header from a regex ignoring case */
+ ha_alert("parsing [%s:%d] : The '%s' directive is not supported anymore since HAProxy 2.1. "
+ "Use 'http-request del-header' instead.\n", file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ else if (strcmp(args[0], "reqideny") == 0) { /* deny a request if a header matches this regex ignoring case */
+ ha_alert("parsing [%s:%d] : The '%s' directive is not supported anymore since HAProxy 2.1. "
+ "Use 'http-request deny' instead.\n", file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ else if (strcmp(args[0], "reqipass") == 0) { /* pass this header without allowing or denying the request */
+ ha_alert("parsing [%s:%d] : The '%s' directive is not supported anymore since HAProxy 2.1.\n", file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ else if (strcmp(args[0], "reqiallow") == 0) { /* allow a request if a header matches this regex ignoring case */
+ ha_alert("parsing [%s:%d] : The '%s' directive is not supported anymore since HAProxy 2.1. "
+ "Use 'http-request allow' instead.\n", file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ else if (strcmp(args[0], "reqitarpit") == 0) { /* tarpit a request if a header matches this regex ignoring case */
+ ha_alert("parsing [%s:%d] : The '%s' directive is not supported anymore since HAProxy 2.1. "
+ "Use 'http-request tarpit' instead.\n", file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ else if (strcmp(args[0], "reqadd") == 0) { /* add request header */
+ ha_alert("parsing [%s:%d] : The '%s' directive is not supported anymore since HAProxy 2.1. "
+ "Use 'http-request add-header' instead.\n", file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ else if (strcmp(args[0], "srvexp") == 0 || strcmp(args[0], "rsprep") == 0) { /* replace response header from a regex */
+ ha_alert("parsing [%s:%d] : The '%s' directive is not supported anymore since HAProxy 2.1. "
+ "Use 'http-response replace-header' instead.\n", file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ else if (strcmp(args[0], "rspdel") == 0) { /* delete response header from a regex */
+ ha_alert("parsing [%s:%d] : The '%s' directive is not supported anymore since HAProxy 2.1. "
+ "Use 'http-response del-header' .\n", file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ else if (strcmp(args[0], "rspdeny") == 0) { /* block response header from a regex */
+ ha_alert("parsing [%s:%d] : The '%s' directive is not supported anymore since HAProxy 2.1. "
+ "Use 'http-response deny' instead.\n", file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ else if (strcmp(args[0], "rspirep") == 0) { /* replace response header from a regex ignoring case */
+ ha_alert("parsing [%s:%d] : The '%s' directive is not supported anymore since HAProxy 2.1. "
+ "Use 'http-response replace-header' instead.\n", file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ else if (strcmp(args[0], "rspidel") == 0) { /* delete response header from a regex ignoring case */
+ ha_alert("parsing [%s:%d] : The '%s' directive is not supported anymore since HAProxy 2.1. "
+ "Use 'http-response del-header' instead.\n", file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ else if (strcmp(args[0], "rspideny") == 0) { /* block response header from a regex ignoring case */
+ ha_alert("parsing [%s:%d] : The '%s' directive is not supported anymore since HAProxy 2.1. "
+ "Use 'http-response deny' instead.\n", file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ else if (strcmp(args[0], "rspadd") == 0) { /* add response header */
+ ha_alert("parsing [%s:%d] : The '%s' directive is not supported anymore since HAProxy 2.1. "
+ "Use 'http-response add-header' instead.\n", file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ else {
+ struct cfg_kw_list *kwl;
+ const char *best;
+ int index;
+
+ list_for_each_entry(kwl, &cfg_keywords.list, list) {
+ for (index = 0; kwl->kw[index].kw != NULL; index++) {
+ if (kwl->kw[index].section != CFG_LISTEN)
+ continue;
+ if (strcmp(kwl->kw[index].kw, args[0]) == 0) {
+ if (check_kw_experimental(&kwl->kw[index], file, linenum, &errmsg)) {
+ ha_alert("%s\n", errmsg);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+
+ /* prepare error message just in case */
+ rc = kwl->kw[index].parse(args, CFG_LISTEN, curproxy, curr_defproxy, file, linenum, &errmsg);
+ if (rc < 0) {
+ ha_alert("parsing [%s:%d] : %s\n", file, linenum, errmsg);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ else if (rc > 0) {
+ ha_warning("parsing [%s:%d] : %s\n", file, linenum, errmsg);
+ err_code |= ERR_WARN;
+ goto out;
+ }
+ goto out;
+ }
+ }
+ }
+
+ best = cfg_find_best_match(args[0], &cfg_keywords.list, CFG_LISTEN, common_kw_list);
+ if (best)
+ ha_alert("parsing [%s:%d] : unknown keyword '%s' in '%s' section; did you mean '%s' maybe ?\n", file, linenum, args[0], cursection, best);
+ else
+ ha_alert("parsing [%s:%d] : unknown keyword '%s' in '%s' section\n", file, linenum, args[0], cursection);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ out:
+ free(errmsg);
+ return err_code;
+
+ alloc_error:
+ ha_alert("parsing [%s:%d]: out of memory.\n", file, linenum);
+ err_code |= ERR_ALERT | ERR_ABORT;
+ goto out;
+}
diff --git a/src/cfgparse-quic.c b/src/cfgparse-quic.c
new file mode 100644
index 0000000..3b38efa
--- /dev/null
+++ b/src/cfgparse-quic.c
@@ -0,0 +1,292 @@
+#include <errno.h>
+#include <string.h>
+
+#include <haproxy/api.h>
+#include <haproxy/cfgparse.h>
+#include <haproxy/errors.h>
+#include <haproxy/global.h>
+#include <haproxy/listener.h>
+#include <haproxy/proxy-t.h>
+#include <haproxy/quic_cc-t.h>
+#include <haproxy/tools.h>
+
+#define QUIC_CC_NEWRENO_STR "newreno"
+#define QUIC_CC_CUBIC_STR "cubic"
+#define QUIC_CC_NO_CC_STR "nocc"
+
+static int bind_parse_quic_force_retry(char **args, int cur_arg, struct proxy *px, struct bind_conf *conf, char **err)
+{
+ conf->options |= BC_O_QUIC_FORCE_RETRY;
+ return 0;
+}
+
+/* parse "quic-cc-algo" bind keyword */
+static int bind_parse_quic_cc_algo(char **args, int cur_arg, struct proxy *px,
+ struct bind_conf *conf, char **err)
+{
+ struct quic_cc_algo *cc_algo;
+ const char *algo = NULL;
+ char *arg;
+
+ if (!*args[cur_arg + 1]) {
+ memprintf(err, "'%s' : missing control congestion algorithm", args[cur_arg]);
+ goto fail;
+ }
+
+ arg = args[cur_arg + 1];
+ if (strncmp(arg, QUIC_CC_NEWRENO_STR, strlen(QUIC_CC_NEWRENO_STR)) == 0) {
+ /* newreno */
+ algo = QUIC_CC_NEWRENO_STR;
+ cc_algo = &quic_cc_algo_nr;
+ arg += strlen(QUIC_CC_NEWRENO_STR);
+ }
+ else if (strncmp(arg, QUIC_CC_CUBIC_STR, strlen(QUIC_CC_CUBIC_STR)) == 0) {
+ /* cubic */
+ algo = QUIC_CC_CUBIC_STR;
+ cc_algo = &quic_cc_algo_cubic;
+ arg += strlen(QUIC_CC_CUBIC_STR);
+ }
+ else if (strncmp(arg, QUIC_CC_NO_CC_STR, strlen(QUIC_CC_NO_CC_STR)) == 0) {
+ /* nocc */
+ if (!experimental_directives_allowed) {
+ ha_alert("'%s' algo is experimental, must be allowed via a global "
+ "'expose-experimental-directives'\n", arg);
+ goto fail;
+ }
+
+ algo = QUIC_CC_NO_CC_STR;
+ cc_algo = &quic_cc_algo_nocc;
+ arg += strlen(QUIC_CC_NO_CC_STR);
+ }
+ else {
+ memprintf(err, "'%s' : unknown control congestion algorithm", args[cur_arg + 1]);
+ goto fail;
+ }
+
+ if (*arg++ == '(') {
+ unsigned long cwnd;
+ char *end_opt;
+
+ errno = 0;
+ cwnd = strtoul(arg, &end_opt, 0);
+ if (end_opt == arg || errno != 0) {
+ memprintf(err, "'%s' : could not parse congestion window value", args[cur_arg + 1]);
+ goto fail;
+ }
+
+ if (*end_opt == 'k') {
+ cwnd <<= 10;
+ end_opt++;
+ }
+ else if (*end_opt == 'm') {
+ cwnd <<= 20;
+ end_opt++;
+ }
+ else if (*end_opt == 'g') {
+ cwnd <<= 30;
+ end_opt++;
+ }
+
+ if (*end_opt != ')') {
+ memprintf(err, "'%s' : expects %s(<max window>)", args[cur_arg + 1], algo);
+ goto fail;
+ }
+
+ if (cwnd < 10240 || cwnd > (4UL << 30)) {
+ memprintf(err, "'%s' : should be greater than 10k and smaller than 4g", args[cur_arg + 1]);
+ goto fail;
+ }
+
+ conf->max_cwnd = cwnd;
+ }
+
+ conf->quic_cc_algo = cc_algo;
+ return 0;
+
+ fail:
+ return ERR_ALERT | ERR_FATAL;
+}
+
+static int bind_parse_quic_socket(char **args, int cur_arg, struct proxy *px,
+ struct bind_conf *conf, char **err)
+{
+ char *arg;
+ if (!*args[cur_arg + 1]) {
+ memprintf(err, "'%s' : missing argument, use either connection or listener.", args[cur_arg]);
+ return ERR_ALERT | ERR_FATAL;
+ }
+
+ arg = args[cur_arg + 1];
+ if (strcmp(arg, "connection") == 0) {
+ conf->quic_mode = QUIC_SOCK_MODE_CONN;
+ }
+ else if (strcmp(arg, "listener") == 0) {
+ conf->quic_mode = QUIC_SOCK_MODE_LSTNR;
+ }
+ else {
+ memprintf(err, "'%s' : unknown argument, use either connection or listener.", args[cur_arg]);
+ return ERR_ALERT | ERR_FATAL;
+ }
+
+ return 0;
+}
+
+static struct bind_kw_list bind_kws = { "QUIC", { }, {
+ { "quic-force-retry", bind_parse_quic_force_retry, 0 },
+ { "quic-cc-algo", bind_parse_quic_cc_algo, 1 },
+ { "quic-socket", bind_parse_quic_socket, 1 },
+ { NULL, NULL, 0 },
+}};
+
+INITCALL1(STG_REGISTER, bind_register_keywords, &bind_kws);
+
+/* parse "tune.quic.socket-owner", accepts "listener" or "connection" */
+static int cfg_parse_quic_tune_socket_owner(char **args, int section_type,
+ struct proxy *curpx,
+ const struct proxy *defpx,
+ const char *file, int line, char **err)
+{
+ if (too_many_args(1, args, err, NULL))
+ return -1;
+
+ if (strcmp(args[1], "connection") == 0) {
+ global.tune.options |= GTUNE_QUIC_SOCK_PER_CONN;
+ }
+ else if (strcmp(args[1], "listener") == 0) {
+ global.tune.options &= ~GTUNE_QUIC_SOCK_PER_CONN;
+ }
+ else {
+ memprintf(err, "'%s' expects either 'listener' or 'connection' but got '%s'.", args[0], args[1]);
+ return -1;
+ }
+
+ return 0;
+}
+
+/* Must be used to parse tune.quic.* setting which requires a time
+ * as value.
+ * Return -1 on alert, or 0 if succeeded.
+ */
+static int cfg_parse_quic_time(char **args, int section_type,
+ struct proxy *curpx,
+ const struct proxy *defpx,
+ const char *file, int line, char **err)
+{
+ unsigned int time;
+ const char *res, *name, *value;
+ int prefix_len = strlen("tune.quic.");
+
+ if (too_many_args(1, args, err, NULL))
+ return -1;
+
+ name = args[0];
+ value = args[1];
+ res = parse_time_err(value, &time, TIME_UNIT_MS);
+ if (res == PARSE_TIME_OVER) {
+ memprintf(err, "timer overflow in argument '%s' to '%s' "
+ "(maximum value is 2147483647 ms or ~24.8 days)", value, name);
+ return -1;
+ }
+ else if (res == PARSE_TIME_UNDER) {
+ memprintf(err, "timer underflow in argument '%s' to '%s' "
+ "(minimum non-null value is 1 ms)", value, name);
+ return -1;
+ }
+ else if (res) {
+ memprintf(err, "unexpected character '%c' in '%s'", *res, name);
+ return -1;
+ }
+
+ if (strcmp(name + prefix_len, "frontend.max-idle-timeout") == 0)
+ global.tune.quic_frontend_max_idle_timeout = time;
+ else if (strcmp(name + prefix_len, "backend.max-idle-timeout") == 0)
+ global.tune.quic_backend_max_idle_timeout = time;
+ else {
+ memprintf(err, "'%s' keyword not unhandled (please report this bug).", args[0]);
+ return -1;
+ }
+
+ return 0;
+}
+
+/* Parse any tune.quic.* setting with strictly positive integer values.
+ * Return -1 on alert, or 0 if succeeded.
+ */
+static int cfg_parse_quic_tune_setting(char **args, int section_type,
+ struct proxy *curpx,
+ const struct proxy *defpx,
+ const char *file, int line, char **err)
+{
+ unsigned int arg = 0;
+ int prefix_len = strlen("tune.quic.");
+ const char *suffix;
+
+ if (too_many_args(1, args, err, NULL))
+ return -1;
+
+ if (*(args[1]) != 0)
+ arg = atoi(args[1]);
+
+ if (arg < 1) {
+ memprintf(err, "'%s' expects a positive integer.", args[0]);
+ return -1;
+ }
+
+ suffix = args[0] + prefix_len;
+ if (strcmp(suffix, "frontend.conn-tx-buffers.limit") == 0)
+ global.tune.quic_streams_buf = arg;
+ else if (strcmp(suffix, "frontend.max-streams-bidi") == 0)
+ global.tune.quic_frontend_max_streams_bidi = arg;
+ else if (strcmp(suffix, "max-frame-loss") == 0)
+ global.tune.quic_max_frame_loss = arg;
+ else if (strcmp(suffix, "reorder-ratio") == 0) {
+ if (arg > 100) {
+ memprintf(err, "'%s' expects an integer argument between 0 and 100.", args[0]);
+ return -1;
+ }
+
+ global.tune.quic_reorder_ratio = arg;
+ }
+ else if (strcmp(suffix, "retry-threshold") == 0)
+ global.tune.quic_retry_threshold = arg;
+ else {
+ memprintf(err, "'%s' keyword not unhandled (please report this bug).", args[0]);
+ return -1;
+ }
+
+ return 0;
+}
+
+/* config parser for global "tune.quic.zero-copy-fwd-send" */
+static int cfg_parse_quic_zero_copy_fwd_snd(char **args, int section_type, struct proxy *curpx,
+ const struct proxy *defpx, const char *file, int line,
+ char **err)
+{
+ if (too_many_args(1, args, err, NULL))
+ return -1;
+
+ if (strcmp(args[1], "on") == 0)
+ global.tune.no_zero_copy_fwd &= ~NO_ZERO_COPY_FWD_QUIC_SND;
+ else if (strcmp(args[1], "off") == 0)
+ global.tune.no_zero_copy_fwd |= NO_ZERO_COPY_FWD_QUIC_SND;
+ else {
+ memprintf(err, "'%s' expects 'on' or 'off'.", args[0]);
+ return -1;
+ }
+ return 0;
+}
+
+static struct cfg_kw_list cfg_kws = {ILH, {
+ { CFG_GLOBAL, "tune.quic.socket-owner", cfg_parse_quic_tune_socket_owner },
+ { CFG_GLOBAL, "tune.quic.backend.max-idle-timeou", cfg_parse_quic_time },
+ { CFG_GLOBAL, "tune.quic.frontend.conn-tx-buffers.limit", cfg_parse_quic_tune_setting },
+ { CFG_GLOBAL, "tune.quic.frontend.max-streams-bidi", cfg_parse_quic_tune_setting },
+ { CFG_GLOBAL, "tune.quic.frontend.max-idle-timeout", cfg_parse_quic_time },
+ { CFG_GLOBAL, "tune.quic.max-frame-loss", cfg_parse_quic_tune_setting },
+ { CFG_GLOBAL, "tune.quic.reorder-ratio", cfg_parse_quic_tune_setting },
+ { CFG_GLOBAL, "tune.quic.retry-threshold", cfg_parse_quic_tune_setting },
+ { CFG_GLOBAL, "tune.quic.zero-copy-fwd-send", cfg_parse_quic_zero_copy_fwd_snd },
+ { 0, NULL, NULL }
+}};
+
+INITCALL1(STG_REGISTER, cfg_register_keywords, &cfg_kws);
diff --git a/src/cfgparse-ssl.c b/src/cfgparse-ssl.c
new file mode 100644
index 0000000..5666336
--- /dev/null
+++ b/src/cfgparse-ssl.c
@@ -0,0 +1,2382 @@
+/*
+ *
+ * Copyright (C) 2012 EXCELIANCE, Emeric Brun <ebrun@exceliance.fr>
+ * Copyright (C) 2020 HAProxy Technologies, William Lallemand <wlallemand@haproxy.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ *
+ * Configuration parsing for SSL.
+ * This file is split in 3 parts:
+ * - global section parsing
+ * - bind keyword parsing
+ * - server keyword parsing
+ *
+ * Please insert the new keywords at the right place
+ */
+
+#define _GNU_SOURCE
+#include <ctype.h>
+#include <dirent.h>
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+#include <sys/stat.h>
+#include <sys/types.h>
+
+#include <haproxy/api.h>
+#include <haproxy/base64.h>
+#include <haproxy/cfgparse.h>
+#include <haproxy/errors.h>
+#include <haproxy/listener.h>
+#include <haproxy/openssl-compat.h>
+#include <haproxy/ssl_sock.h>
+#include <haproxy/ssl_utils.h>
+#include <haproxy/tools.h>
+#include <haproxy/ssl_ckch.h>
+#include <haproxy/ssl_ocsp.h>
+
+
+/****************** Global Section Parsing ********************************************/
+
+static int ssl_load_global_issuers_from_path(char **args, int section_type, struct proxy *curpx,
+ const struct proxy *defpx, const char *file, int line,
+ char **err)
+{
+ char *path;
+ struct dirent **de_list;
+ int i, n;
+ struct stat buf;
+ char *end;
+ char fp[MAXPATHLEN+1];
+
+ if (too_many_args(1, args, err, NULL))
+ return -1;
+
+ path = args[1];
+ if (*path == 0 || stat(path, &buf)) {
+ memprintf(err, "%sglobal statement '%s' expects a directory path as an argument.\n",
+ err && *err ? *err : "", args[0]);
+ return -1;
+ }
+ if (S_ISDIR(buf.st_mode) == 0) {
+ memprintf(err, "%sglobal statement '%s': %s is not a directory.\n",
+ err && *err ? *err : "", args[0], path);
+ return -1;
+ }
+
+ /* strip trailing slashes, including first one */
+ for (end = path + strlen(path) - 1; end >= path && *end == '/'; end--)
+ *end = 0;
+ /* path already parsed? */
+ if (global_ssl.issuers_chain_path && strcmp(global_ssl.issuers_chain_path, path) == 0)
+ return 0;
+ /* overwrite old issuers_chain_path */
+ free(global_ssl.issuers_chain_path);
+ global_ssl.issuers_chain_path = strdup(path);
+ ssl_free_global_issuers();
+
+ n = scandir(path, &de_list, 0, alphasort);
+ if (n < 0) {
+ memprintf(err, "%sglobal statement '%s': unable to scan directory '%s' : %s.\n",
+ err && *err ? *err : "", args[0], path, strerror(errno));
+ return -1;
+ }
+ for (i = 0; i < n; i++) {
+ struct dirent *de = de_list[i];
+ BIO *in = NULL;
+ char *warn = NULL;
+
+ snprintf(fp, sizeof(fp), "%s/%s", path, de->d_name);
+ free(de);
+ if (stat(fp, &buf) != 0) {
+ ha_warning("unable to stat certificate from file '%s' : %s.\n", fp, strerror(errno));
+ goto next;
+ }
+ if (!S_ISREG(buf.st_mode))
+ goto next;
+
+ in = BIO_new(BIO_s_file());
+ if (in == NULL)
+ goto next;
+ if (BIO_read_filename(in, fp) <= 0)
+ goto next;
+ ssl_load_global_issuer_from_BIO(in, fp, &warn);
+ if (warn) {
+ ha_warning("%s", warn);
+ ha_free(&warn);
+ }
+ next:
+ if (in)
+ BIO_free(in);
+ }
+ free(de_list);
+
+ return 0;
+}
+
+/* parse the "ssl-mode-async" keyword in global section.
+ * Returns <0 on alert, >0 on warning, 0 on success.
+ */
+static int ssl_parse_global_ssl_async(char **args, int section_type, struct proxy *curpx,
+ const struct proxy *defpx, const char *file, int line,
+ char **err)
+{
+#ifdef SSL_MODE_ASYNC
+ global_ssl.async = 1;
+ global.ssl_used_async_engines = nb_engines;
+ return 0;
+#else
+ memprintf(err, "'%s': openssl library does not support async mode", args[0]);
+ return -1;
+#endif
+}
+
+#if defined(USE_ENGINE) && !defined(OPENSSL_NO_ENGINE)
+/* parse the "ssl-engine" keyword in global section.
+ * Returns <0 on alert, >0 on warning, 0 on success.
+ */
+static int ssl_parse_global_ssl_engine(char **args, int section_type, struct proxy *curpx,
+ const struct proxy *defpx, const char *file, int line,
+ char **err)
+{
+ char *algo;
+ int ret = -1;
+
+ if (*(args[1]) == 0) {
+ memprintf(err, "global statement '%s' expects a valid engine name as an argument.", args[0]);
+ return ret;
+ }
+
+ if (*(args[2]) == 0) {
+ /* if no list of algorithms is given, it defaults to ALL */
+ algo = strdup("ALL");
+ goto add_engine;
+ }
+
+ /* otherwise the expected format is ssl-engine <engine_name> algo <list of algo> */
+ if (strcmp(args[2], "algo") != 0) {
+ memprintf(err, "global statement '%s' expects to have algo keyword.", args[0]);
+ return ret;
+ }
+
+ if (*(args[3]) == 0) {
+ memprintf(err, "global statement '%s' expects algorithm names as an argument.", args[0]);
+ return ret;
+ }
+ algo = strdup(args[3]);
+
+add_engine:
+ if (ssl_init_single_engine(args[1], algo)==0) {
+ openssl_engines_initialized++;
+ ret = 0;
+ }
+ free(algo);
+ return ret;
+}
+#endif
+
+#ifdef HAVE_SSL_PROVIDERS
+/* parse the "ssl-propquery" keyword in global section.
+ * Returns <0 on alert, >0 on warning, 0 on success.
+ */
+static int ssl_parse_global_ssl_propquery(char **args, int section_type, struct proxy *curpx,
+ const struct proxy *defpx, const char *file, int line,
+ char **err)
+{
+ int ret = -1;
+
+ if (*(args[1]) == 0) {
+ memprintf(err, "global statement '%s' expects a property string as an argument.", args[0]);
+ return ret;
+ }
+
+ if (EVP_set_default_properties(NULL, args[1]))
+ ret = 0;
+
+ return ret;
+}
+
+/* parse the "ssl-provider" keyword in global section.
+ * Returns <0 on alert, >0 on warning, 0 on success.
+ */
+static int ssl_parse_global_ssl_provider(char **args, int section_type, struct proxy *curpx,
+ const struct proxy *defpx, const char *file, int line,
+ char **err)
+{
+ int ret = -1;
+
+ if (*(args[1]) == 0) {
+ memprintf(err, "global statement '%s' expects a valid engine provider name as an argument.", args[0]);
+ return ret;
+ }
+
+ if (ssl_init_provider(args[1]) == 0)
+ ret = 0;
+
+ return ret;
+}
+
+/* parse the "ssl-provider-path" keyword in global section.
+ * Returns <0 on alert, >0 on warning, 0 on success.
+ */
+static int ssl_parse_global_ssl_provider_path(char **args, int section_type, struct proxy *curpx,
+ const struct proxy *defpx, const char *file, int line,
+ char **err)
+{
+ if (*(args[1]) == 0) {
+ memprintf(err, "global statement '%s' expects a directory path as an argument.", args[0]);
+ return -1;
+ }
+
+ OSSL_PROVIDER_set_default_search_path(NULL, args[1]);
+
+ return 0;
+}
+#endif
+
+/* parse the "ssl-default-bind-ciphers" / "ssl-default-server-ciphers" keywords
+ * in global section. Returns <0 on alert, >0 on warning, 0 on success.
+ */
+static int ssl_parse_global_ciphers(char **args, int section_type, struct proxy *curpx,
+ const struct proxy *defpx, const char *file, int line,
+ char **err)
+{
+ char **target;
+
+ target = (args[0][12] == 'b') ? &global_ssl.listen_default_ciphers : &global_ssl.connect_default_ciphers;
+
+ if (too_many_args(1, args, err, NULL))
+ return -1;
+
+ if (*(args[1]) == 0) {
+ memprintf(err, "global statement '%s' expects a cipher suite as an argument.", args[0]);
+ return -1;
+ }
+
+ free(*target);
+ *target = strdup(args[1]);
+ return 0;
+}
+
+/* parse the "ssl-default-bind-ciphersuites" / "ssl-default-server-ciphersuites" keywords
+ * in global section. Returns <0 on alert, >0 on warning, 0 on success.
+ */
+static int ssl_parse_global_ciphersuites(char **args, int section_type, struct proxy *curpx,
+ const struct proxy *defpx, const char *file, int line,
+ char **err)
+{
+#ifdef HAVE_SSL_CTX_SET_CIPHERSUITES
+ char **target;
+
+ target = (args[0][12] == 'b') ? &global_ssl.listen_default_ciphersuites : &global_ssl.connect_default_ciphersuites;
+
+ if (too_many_args(1, args, err, NULL))
+ return -1;
+
+ if (*(args[1]) == 0) {
+ memprintf(err, "global statement '%s' expects a cipher suite as an argument.", args[0]);
+ return -1;
+ }
+
+ free(*target);
+ *target = strdup(args[1]);
+ return 0;
+#else /* ! HAVE_SSL_CTX_SET_CIPHERSUITES */
+ memprintf(err, "'%s' not supported for your SSL library (%s).", args[0], OPENSSL_VERSION_TEXT);
+ return -1;
+
+#endif
+}
+
+#if defined(SSL_CTX_set1_curves_list)
+/*
+ * parse the "ssl-default-bind-curves" keyword in a global section.
+ * Returns <0 on alert, >0 on warning, 0 on success.
+ */
+static int ssl_parse_global_curves(char **args, int section_type, struct proxy *curpx,
+ const struct proxy *defpx, const char *file, int line,
+ char **err)
+{
+ char **target;
+ target = (args[0][12] == 'b') ? &global_ssl.listen_default_curves : &global_ssl.connect_default_curves;
+
+ if (too_many_args(1, args, err, NULL))
+ return -1;
+
+ if (*(args[1]) == 0) {
+ memprintf(err, "global statement '%s' expects a curves suite as an arguments.", args[0]);
+ return -1;
+ }
+
+ free(*target);
+ *target = strdup(args[1]);
+ return 0;
+}
+#endif
+
+#if defined(SSL_CTX_set1_sigalgs_list)
+/*
+ * parse the "ssl-default-bind-sigalgs" and "ssl-default-server-sigalgs" keyword in a global section.
+ * Returns <0 on alert, >0 on warning, 0 on success.
+ */
+static int ssl_parse_global_sigalgs(char **args, int section_type, struct proxy *curpx,
+ const struct proxy *defpx, const char *file, int line,
+ char **err)
+{
+ char **target;
+
+ target = (args[0][12] == 'b') ? &global_ssl.listen_default_sigalgs : &global_ssl.connect_default_sigalgs;
+
+ if (too_many_args(1, args, err, NULL))
+ return -1;
+
+ if (*(args[1]) == 0) {
+ memprintf(err, "global statement '%s' expects a curves suite as an arguments.", args[0]);
+ return -1;
+ }
+
+ free(*target);
+ *target = strdup(args[1]);
+ return 0;
+}
+#endif
+
+#if defined(SSL_CTX_set1_client_sigalgs_list)
+/*
+ * parse the "ssl-default-bind-client-sigalgs" keyword in a global section.
+ * Returns <0 on alert, >0 on warning, 0 on success.
+ */
+static int ssl_parse_global_client_sigalgs(char **args, int section_type, struct proxy *curpx,
+ const struct proxy *defpx, const char *file, int line,
+ char **err)
+{
+ char **target;
+
+ target = (args[0][12] == 'b') ? &global_ssl.listen_default_client_sigalgs : &global_ssl.connect_default_client_sigalgs;
+
+ if (too_many_args(1, args, err, NULL))
+ return -1;
+
+ if (*(args[1]) == 0) {
+ memprintf(err, "global statement '%s' expects signature algorithms as an arguments.", args[0]);
+ return -1;
+ }
+
+ free(*target);
+ *target = strdup(args[1]);
+ return 0;
+}
+#endif
+
+/* parse various global tune.ssl settings consisting in positive integers.
+ * Returns <0 on alert, >0 on warning, 0 on success.
+ */
+static int ssl_parse_global_int(char **args, int section_type, struct proxy *curpx,
+ const struct proxy *defpx, const char *file, int line,
+ char **err)
+{
+ int *target;
+
+ if (strcmp(args[0], "tune.ssl.cachesize") == 0)
+ target = &global.tune.sslcachesize;
+ else if (strcmp(args[0], "tune.ssl.maxrecord") == 0)
+ target = (int *)&global_ssl.max_record;
+ else if (strcmp(args[0], "tune.ssl.hard-maxrecord") == 0)
+ target = (int *)&global_ssl.hard_max_record;
+ else if (strcmp(args[0], "tune.ssl.ssl-ctx-cache-size") == 0)
+ target = &global_ssl.ctx_cache;
+ else if (strcmp(args[0], "maxsslconn") == 0)
+ target = &global.maxsslconn;
+ else if (strcmp(args[0], "tune.ssl.capture-buffer-size") == 0)
+ target = &global_ssl.capture_buffer_size;
+ else if (strcmp(args[0], "tune.ssl.capture-cipherlist-size") == 0) {
+ target = &global_ssl.capture_buffer_size;
+ ha_warning("parsing [%s:%d]: '%s' is deprecated and will be removed in version 2.7. Please use 'tune.ssl.capture-buffer-size' instead.\n",
+ file, line, args[0]);
+ }
+ else {
+ memprintf(err, "'%s' keyword not unhandled (please report this bug).", args[0]);
+ return -1;
+ }
+
+ if (too_many_args(1, args, err, NULL))
+ return -1;
+
+ if (*(args[1]) == 0) {
+ memprintf(err, "'%s' expects an integer argument.", args[0]);
+ return -1;
+ }
+
+ *target = atoi(args[1]);
+ if (*target < 0) {
+ memprintf(err, "'%s' expects a positive numeric value.", args[0]);
+ return -1;
+ }
+ return 0;
+}
+
+static int ssl_parse_global_capture_buffer(char **args, int section_type, struct proxy *curpx,
+ const struct proxy *defpx, const char *file, int line,
+ char **err)
+{
+ int ret;
+
+ ret = ssl_parse_global_int(args, section_type, curpx, defpx, file, line, err);
+ if (ret != 0)
+ return ret;
+
+ if (pool_head_ssl_capture) {
+ memprintf(err, "'%s' is already configured.", args[0]);
+ return -1;
+ }
+
+ pool_head_ssl_capture = create_pool("ssl-capture", sizeof(struct ssl_capture) + global_ssl.capture_buffer_size, MEM_F_SHARED);
+ if (!pool_head_ssl_capture) {
+ memprintf(err, "Out of memory error.");
+ return -1;
+ }
+ return 0;
+}
+
+/* init the SSLKEYLOGFILE pool */
+#ifdef HAVE_SSL_KEYLOG
+static int ssl_parse_global_keylog(char **args, int section_type, struct proxy *curpx,
+ const struct proxy *defpx, const char *file, int line,
+ char **err)
+{
+
+ if (too_many_args(1, args, err, NULL))
+ return -1;
+
+ if (strcmp(args[1], "on") == 0)
+ global_ssl.keylog = 1;
+ else if (strcmp(args[1], "off") == 0)
+ global_ssl.keylog = 0;
+ else {
+ memprintf(err, "'%s' expects either 'on' or 'off' but got '%s'.", args[0], args[1]);
+ return -1;
+ }
+
+ if (pool_head_ssl_keylog) /* already configured */
+ return 0;
+
+ pool_head_ssl_keylog = create_pool("ssl-keylogfile", sizeof(struct ssl_keylog), MEM_F_SHARED);
+ if (!pool_head_ssl_keylog) {
+ memprintf(err, "Out of memory error.");
+ return -1;
+ }
+
+ pool_head_ssl_keylog_str = create_pool("ssl-keylogfile-str", sizeof(char) * SSL_KEYLOG_MAX_SECRET_SIZE, MEM_F_SHARED);
+ if (!pool_head_ssl_keylog_str) {
+ memprintf(err, "Out of memory error.");
+ return -1;
+ }
+
+ return 0;
+}
+#else
+static int ssl_parse_global_keylog(char **args, int section_type, struct proxy *curpx,
+ const struct proxy *defpx, const char *file, int line,
+ char **err)
+{
+ memprintf(err, "'%s' requires at least OpenSSL 1.1.1.", args[0]);
+ return -1;
+}
+#endif
+
+/* parse "ssl.force-private-cache".
+ * Returns <0 on alert, >0 on warning, 0 on success.
+ */
+static int ssl_parse_global_private_cache(char **args, int section_type, struct proxy *curpx,
+ const struct proxy *defpx, const char *file, int line,
+ char **err)
+{
+ if (too_many_args(0, args, err, NULL))
+ return -1;
+
+ global_ssl.private_cache = 1;
+ return 0;
+}
+
+/* parse "ssl.lifetime".
+ * Returns <0 on alert, >0 on warning, 0 on success.
+ */
+static int ssl_parse_global_lifetime(char **args, int section_type, struct proxy *curpx,
+ const struct proxy *defpx, const char *file, int line,
+ char **err)
+{
+ const char *res;
+
+ if (too_many_args(1, args, err, NULL))
+ return -1;
+
+ if (*(args[1]) == 0) {
+ memprintf(err, "'%s' expects ssl sessions <lifetime> in seconds as argument.", args[0]);
+ return -1;
+ }
+
+ res = parse_time_err(args[1], &global_ssl.life_time, TIME_UNIT_S);
+ if (res == PARSE_TIME_OVER) {
+ memprintf(err, "timer overflow in argument '%s' to <%s> (maximum value is 2147483647 s or ~68 years).",
+ args[1], args[0]);
+ return -1;
+ }
+ else if (res == PARSE_TIME_UNDER) {
+ memprintf(err, "timer underflow in argument '%s' to <%s> (minimum non-null value is 1 s).",
+ args[1], args[0]);
+ return -1;
+ }
+ else if (res) {
+ memprintf(err, "unexpected character '%c' in argument to <%s>.", *res, args[0]);
+ return -1;
+ }
+ return 0;
+}
+
+#ifndef OPENSSL_NO_DH
+/* parse "ssl-dh-param-file".
+ * Returns <0 on alert, >0 on warning, 0 on success.
+ */
+static int ssl_parse_global_dh_param_file(char **args, int section_type, struct proxy *curpx,
+ const struct proxy *defpx, const char *file, int line,
+ char **err)
+{
+ if (too_many_args(1, args, err, NULL))
+ return -1;
+
+ if (*(args[1]) == 0) {
+ memprintf(err, "'%s' expects a file path as an argument.", args[0]);
+ return -1;
+ }
+
+ if (ssl_sock_load_global_dh_param_from_file(args[1])) {
+ memprintf(err, "'%s': unable to load DH parameters from file <%s>.", args[0], args[1]);
+ return -1;
+ }
+ return 0;
+}
+
+/* parse "ssl.default-dh-param".
+ * Returns <0 on alert, >0 on warning, 0 on success.
+ */
+static int ssl_parse_global_default_dh(char **args, int section_type, struct proxy *curpx,
+ const struct proxy *defpx, const char *file, int line,
+ char **err)
+{
+ if (too_many_args(1, args, err, NULL))
+ return -1;
+
+ if (*(args[1]) == 0) {
+ memprintf(err, "'%s' expects an integer argument.", args[0]);
+ return -1;
+ }
+
+ global_ssl.default_dh_param = atoi(args[1]);
+ if (global_ssl.default_dh_param < 1024) {
+ memprintf(err, "'%s' expects a value >= 1024.", args[0]);
+ return -1;
+ }
+ return 0;
+}
+#endif
+
+
+/*
+ * parse "ssl-load-extra-files".
+ * multiple arguments are allowed: "bundle", "sctl", "ocsp", "issuer", "all", "none"
+ */
+static int ssl_parse_global_extra_files(char **args, int section_type, struct proxy *curpx,
+ const struct proxy *defpx, const char *file, int line,
+ char **err)
+{
+ int i;
+ int gf = SSL_GF_NONE;
+
+ if (*(args[1]) == 0)
+ goto err_arg;
+
+ for (i = 1; *args[i]; i++) {
+
+ if (strcmp("bundle", args[i]) == 0) {
+ gf |= SSL_GF_BUNDLE;
+
+ } else if (strcmp("sctl", args[i]) == 0) {
+ gf |= SSL_GF_SCTL;
+
+ } else if (strcmp("ocsp", args[i]) == 0){
+ gf |= SSL_GF_OCSP;
+
+ } else if (strcmp("issuer", args[i]) == 0){
+ gf |= SSL_GF_OCSP_ISSUER;
+
+ } else if (strcmp("key", args[i]) == 0) {
+ gf |= SSL_GF_KEY;
+
+ } else if (strcmp("none", args[i]) == 0) {
+ if (gf != SSL_GF_NONE)
+ goto err_alone;
+ gf = SSL_GF_NONE;
+ i++;
+ break;
+
+ } else if (strcmp("all", args[i]) == 0) {
+ if (gf != SSL_GF_NONE)
+ goto err_alone;
+ gf = SSL_GF_ALL;
+ i++;
+ break;
+ } else {
+ goto err_arg;
+ }
+ }
+ /* break from loop but there are still arguments */
+ if (*args[i])
+ goto err_alone;
+
+ global_ssl.extra_files = gf;
+
+ return 0;
+
+err_alone:
+ memprintf(err, "'%s' 'none' and 'all' can be only used alone", args[0]);
+ return -1;
+
+err_arg:
+ memprintf(err, "'%s' expects one or multiple arguments (none, all, bundle, sctl, ocsp, issuer).", args[0]);
+ return -1;
+}
+
+
+/* parse 'ssl-load-extra-del-ext */
+static int ssl_parse_global_extra_noext(char **args, int section_type, struct proxy *curpx,
+ const struct proxy *defpx, const char *file, int line,
+ char **err)
+{
+ global_ssl.extra_files_noext = 1;
+ return 0;
+}
+
+
+/***************************** Bind keyword Parsing ********************************************/
+
+/* for ca-file and ca-verify-file */
+static int ssl_bind_parse_ca_file_common(char **args, int cur_arg, char **ca_file_p, int from_cli, char **err)
+{
+ if (!*args[cur_arg + 1]) {
+ memprintf(err, "'%s' : missing CAfile path", args[cur_arg]);
+ return ERR_ALERT | ERR_FATAL;
+ }
+
+ if ((*args[cur_arg + 1] != '/') && (*args[cur_arg + 1] != '@') && global_ssl.ca_base)
+ memprintf(ca_file_p, "%s/%s", global_ssl.ca_base, args[cur_arg + 1]);
+ else
+ memprintf(ca_file_p, "%s", args[cur_arg + 1]);
+
+ if (!ssl_store_load_locations_file(*ca_file_p, !from_cli, CAFILE_CERT)) {
+ memprintf(err, "'%s' : unable to load %s", args[cur_arg], *ca_file_p);
+ return ERR_ALERT | ERR_FATAL;
+ }
+ return 0;
+}
+
+/* parse the "ca-file" bind keyword */
+static int ssl_bind_parse_ca_file(char **args, int cur_arg, struct proxy *px, struct ssl_bind_conf *conf, int from_cli, char **err)
+{
+ return ssl_bind_parse_ca_file_common(args, cur_arg, &conf->ca_file, from_cli, err);
+}
+static int bind_parse_ca_file(char **args, int cur_arg, struct proxy *px, struct bind_conf *conf, char **err)
+{
+ return ssl_bind_parse_ca_file(args, cur_arg, px, &conf->ssl_conf, 0, err);
+}
+
+/* parse the "ca-verify-file" bind keyword */
+static int ssl_bind_parse_ca_verify_file(char **args, int cur_arg, struct proxy *px, struct ssl_bind_conf *conf, int from_cli, char **err)
+{
+ return ssl_bind_parse_ca_file_common(args, cur_arg, &conf->ca_verify_file, from_cli, err);
+}
+static int bind_parse_ca_verify_file(char **args, int cur_arg, struct proxy *px, struct bind_conf *conf, char **err)
+{
+ return ssl_bind_parse_ca_verify_file(args, cur_arg, px, &conf->ssl_conf, 0, err);
+}
+
+/* parse the "ca-sign-file" bind keyword */
+static int bind_parse_ca_sign_file(char **args, int cur_arg, struct proxy *px, struct bind_conf *conf, char **err)
+{
+ if (!*args[cur_arg + 1]) {
+ memprintf(err, "'%s' : missing CAfile path", args[cur_arg]);
+ return ERR_ALERT | ERR_FATAL;
+ }
+
+ if ((*args[cur_arg + 1] != '/') && (*args[cur_arg + 1] != '@') && global_ssl.ca_base)
+ memprintf(&conf->ca_sign_file, "%s/%s", global_ssl.ca_base, args[cur_arg + 1]);
+ else
+ memprintf(&conf->ca_sign_file, "%s", args[cur_arg + 1]);
+
+ return 0;
+}
+
+/* parse the "ca-sign-pass" bind keyword */
+static int bind_parse_ca_sign_pass(char **args, int cur_arg, struct proxy *px, struct bind_conf *conf, char **err)
+{
+ if (!*args[cur_arg + 1]) {
+ memprintf(err, "'%s' : missing CAkey password", args[cur_arg]);
+ return ERR_ALERT | ERR_FATAL;
+ }
+ memprintf(&conf->ca_sign_pass, "%s", args[cur_arg + 1]);
+ return 0;
+}
+
+/* parse the "ciphers" bind keyword */
+static int ssl_bind_parse_ciphers(char **args, int cur_arg, struct proxy *px, struct ssl_bind_conf *conf, int from_cli, char **err)
+{
+ if (!*args[cur_arg + 1]) {
+ memprintf(err, "'%s' : missing cipher suite", args[cur_arg]);
+ return ERR_ALERT | ERR_FATAL;
+ }
+
+ free(conf->ciphers);
+ conf->ciphers = strdup(args[cur_arg + 1]);
+ return 0;
+}
+static int bind_parse_ciphers(char **args, int cur_arg, struct proxy *px, struct bind_conf *conf, char **err)
+{
+ return ssl_bind_parse_ciphers(args, cur_arg, px, &conf->ssl_conf, 0, err);
+}
+
+/* parse the "ciphersuites" bind keyword */
+static int ssl_bind_parse_ciphersuites(char **args, int cur_arg, struct proxy *px, struct ssl_bind_conf *conf, int from_cli, char **err)
+{
+#ifdef HAVE_SSL_CTX_SET_CIPHERSUITES
+ if (!*args[cur_arg + 1]) {
+ memprintf(err, "'%s' : missing cipher suite", args[cur_arg]);
+ return ERR_ALERT | ERR_FATAL;
+ }
+
+ free(conf->ciphersuites);
+ conf->ciphersuites = strdup(args[cur_arg + 1]);
+ return 0;
+#else
+ memprintf(err, "'%s' keyword not supported for this SSL library version (%s).", args[cur_arg], OPENSSL_VERSION_TEXT);
+ return ERR_ALERT | ERR_FATAL;
+#endif
+}
+
+static int bind_parse_ciphersuites(char **args, int cur_arg, struct proxy *px, struct bind_conf *conf, char **err)
+{
+ return ssl_bind_parse_ciphersuites(args, cur_arg, px, &conf->ssl_conf, 0, err);
+}
+
+/* parse the "crt" bind keyword. Returns a set of ERR_* flags possibly with an error in <err>. */
+static int bind_parse_crt(char **args, int cur_arg, struct proxy *px, struct bind_conf *conf, char **err)
+{
+ char path[MAXPATHLEN];
+
+ if (!*args[cur_arg + 1]) {
+ memprintf(err, "'%s' : missing certificate location", args[cur_arg]);
+ return ERR_ALERT | ERR_FATAL;
+ }
+
+ if ((*args[cur_arg + 1] != '/' ) && global_ssl.crt_base) {
+ if ((strlen(global_ssl.crt_base) + 1 + strlen(args[cur_arg + 1]) + 1) > sizeof(path) ||
+ snprintf(path, sizeof(path), "%s/%s", global_ssl.crt_base, args[cur_arg + 1]) > sizeof(path)) {
+ memprintf(err, "'%s' : path too long", args[cur_arg]);
+ return ERR_ALERT | ERR_FATAL;
+ }
+ return ssl_sock_load_cert(path, conf, err);
+ }
+
+ return ssl_sock_load_cert(args[cur_arg + 1], conf, err);
+}
+
+/* parse the "crt-list" bind keyword. Returns a set of ERR_* flags possibly with an error in <err>. */
+static int bind_parse_crt_list(char **args, int cur_arg, struct proxy *px, struct bind_conf *conf, char **err)
+{
+ int err_code;
+
+ if (!*args[cur_arg + 1]) {
+ memprintf(err, "'%s' : missing certificate location", args[cur_arg]);
+ return ERR_ALERT | ERR_FATAL;
+ }
+
+ err_code = ssl_sock_load_cert_list_file(args[cur_arg + 1], 0, conf, px, err);
+ if (err_code)
+ memprintf(err, "'%s' : %s", args[cur_arg], *err);
+
+ return err_code;
+}
+
+/* parse the "crl-file" bind keyword */
+static int ssl_bind_parse_crl_file(char **args, int cur_arg, struct proxy *px, struct ssl_bind_conf *conf, int from_cli, char **err)
+{
+#ifndef X509_V_FLAG_CRL_CHECK
+ memprintf(err, "'%s' : library does not support CRL verify", args[cur_arg]);
+ return ERR_ALERT | ERR_FATAL;
+#else
+ if (!*args[cur_arg + 1]) {
+ memprintf(err, "'%s' : missing CRLfile path", args[cur_arg]);
+ return ERR_ALERT | ERR_FATAL;
+ }
+
+ if ((*args[cur_arg + 1] != '/') && (*args[cur_arg + 1] != '@') && global_ssl.ca_base)
+ memprintf(&conf->crl_file, "%s/%s", global_ssl.ca_base, args[cur_arg + 1]);
+ else
+ memprintf(&conf->crl_file, "%s", args[cur_arg + 1]);
+
+ if (!ssl_store_load_locations_file(conf->crl_file, !from_cli, CAFILE_CRL)) {
+ memprintf(err, "'%s' : unable to load %s", args[cur_arg], conf->crl_file);
+ return ERR_ALERT | ERR_FATAL;
+ }
+ return 0;
+#endif
+}
+static int bind_parse_crl_file(char **args, int cur_arg, struct proxy *px, struct bind_conf *conf, char **err)
+{
+ return ssl_bind_parse_crl_file(args, cur_arg, px, &conf->ssl_conf, 0, err);
+}
+
+/* parse the "curves" bind keyword keyword */
+static int ssl_bind_parse_curves(char **args, int cur_arg, struct proxy *px, struct ssl_bind_conf *conf, int from_cli, char **err)
+{
+#if defined(SSL_CTX_set1_curves_list)
+ if (!*args[cur_arg + 1]) {
+ memprintf(err, "'%s' : missing curve suite", args[cur_arg]);
+ return ERR_ALERT | ERR_FATAL;
+ }
+ conf->curves = strdup(args[cur_arg + 1]);
+ return 0;
+#else
+ memprintf(err, "'%s' : library does not support curve suite", args[cur_arg]);
+ return ERR_ALERT | ERR_FATAL;
+#endif
+}
+static int bind_parse_curves(char **args, int cur_arg, struct proxy *px, struct bind_conf *conf, char **err)
+{
+ return ssl_bind_parse_curves(args, cur_arg, px, &conf->ssl_conf, 0, err);
+}
+
+/* parse the "sigalgs" bind keyword */
+static int ssl_bind_parse_sigalgs(char **args, int cur_arg, struct proxy *px, struct ssl_bind_conf *conf, int from_cli, char **err)
+{
+#if defined(SSL_CTX_set1_sigalgs_list)
+ if (!*args[cur_arg + 1]) {
+ memprintf(err, "'%s' : missing signature algorithm list", args[cur_arg]);
+ return ERR_ALERT | ERR_FATAL;
+ }
+ conf->sigalgs = strdup(args[cur_arg + 1]);
+ return 0;
+#else
+ memprintf(err, "'%s' : library does not support setting signature algorithms", args[cur_arg]);
+ return ERR_ALERT | ERR_FATAL;
+#endif
+}
+static int bind_parse_sigalgs(char **args, int cur_arg, struct proxy *px, struct bind_conf *conf, char **err)
+{
+ return ssl_bind_parse_sigalgs(args, cur_arg, px, &conf->ssl_conf, 0, err);
+}
+
+/* parse the "client-sigalgs" bind keyword */
+static int ssl_bind_parse_client_sigalgs(char **args, int cur_arg, struct proxy *px, struct ssl_bind_conf *conf, int from_cli, char **err)
+{
+#if defined(SSL_CTX_set1_client_sigalgs_list)
+ if (!*args[cur_arg + 1]) {
+ memprintf(err, "'%s' : missing signature algorithm list", args[cur_arg]);
+ return ERR_ALERT | ERR_FATAL;
+ }
+ conf->client_sigalgs = strdup(args[cur_arg + 1]);
+ return 0;
+#else
+ memprintf(err, "'%s' : library does not support setting signature algorithms", args[cur_arg]);
+ return ERR_ALERT | ERR_FATAL;
+#endif
+}
+static int bind_parse_client_sigalgs(char **args, int cur_arg, struct proxy *px, struct bind_conf *conf, char **err)
+{
+ return ssl_bind_parse_client_sigalgs(args, cur_arg, px, &conf->ssl_conf, 0, err);
+}
+
+
+/* parse the "ecdhe" bind keyword keyword */
+static int ssl_bind_parse_ecdhe(char **args, int cur_arg, struct proxy *px, struct ssl_bind_conf *conf, int from_cli, char **err)
+{
+#if !defined(SSL_CTX_set_tmp_ecdh)
+ memprintf(err, "'%s' : library does not support elliptic curve Diffie-Hellman (too old)", args[cur_arg]);
+ return ERR_ALERT | ERR_FATAL;
+#elif defined(OPENSSL_NO_ECDH)
+ memprintf(err, "'%s' : library does not support elliptic curve Diffie-Hellman (disabled via OPENSSL_NO_ECDH)", args[cur_arg]);
+ return ERR_ALERT | ERR_FATAL;
+#else
+ if (!*args[cur_arg + 1]) {
+ memprintf(err, "'%s' : missing named curve", args[cur_arg]);
+ return ERR_ALERT | ERR_FATAL;
+ }
+
+ conf->ecdhe = strdup(args[cur_arg + 1]);
+
+ return 0;
+#endif
+}
+static int bind_parse_ecdhe(char **args, int cur_arg, struct proxy *px, struct bind_conf *conf, char **err)
+{
+ return ssl_bind_parse_ecdhe(args, cur_arg, px, &conf->ssl_conf, 0, err);
+}
+
+/* parse the "crt-ignore-err" and "ca-ignore-err" bind keywords */
+static int bind_parse_ignore_err(char **args, int cur_arg, struct proxy *px, struct bind_conf *conf, char **err)
+{
+ int code;
+ char *s1 = NULL, *s2 = NULL;
+ char *token = NULL;
+ char *p = args[cur_arg + 1];
+ char *str;
+ unsigned long long *ignerr = conf->crt_ignerr_bitfield;
+
+ if (!*p) {
+ memprintf(err, "'%s' : missing error IDs list", args[cur_arg]);
+ return ERR_ALERT | ERR_FATAL;
+ }
+
+ if (strcmp(args[cur_arg], "ca-ignore-err") == 0)
+ ignerr = conf->ca_ignerr_bitfield;
+
+ if (strcmp(p, "all") == 0) {
+ cert_ignerr_bitfield_set_all(ignerr);
+ return 0;
+ }
+
+ /* copy the string to be able to dump the complete one in case of
+ * error, because strtok_r is writing \0 inside. */
+ str = strdup(p);
+ if (!str) {
+ memprintf(err, "'%s' : Could not allocate memory", args[cur_arg]);
+ return ERR_ALERT | ERR_FATAL;
+ }
+
+ s1 = str;
+ while ((token = strtok_r(s1, ",", &s2))) {
+ s1 = NULL;
+ if (isdigit((int)*token)) {
+ code = atoi(token);
+ if ((code <= 0) || (code > SSL_MAX_VFY_ERROR_CODE)) {
+ memprintf(err, "'%s' : ID '%d' out of range (1..%d) in error IDs list '%s'",
+ args[cur_arg], code, SSL_MAX_VFY_ERROR_CODE, args[cur_arg + 1]);
+ free(str);
+ return ERR_ALERT | ERR_FATAL;
+ }
+ } else {
+ code = x509_v_err_str_to_int(token);
+ if (code < 0) {
+ memprintf(err, "'%s' : error constant '%s' unknown in error IDs list '%s'",
+ args[cur_arg], token, args[cur_arg + 1]);
+ free(str);
+ return ERR_ALERT | ERR_FATAL;
+ }
+ }
+ cert_ignerr_bitfield_set(ignerr, code);
+ }
+
+ free(str);
+ return 0;
+}
+
+/* parse tls_method_options "no-xxx" and "force-xxx" */
+static int parse_tls_method_options(char *arg, struct tls_version_filter *methods, char **err)
+{
+ uint16_t v;
+ char *p;
+ p = strchr(arg, '-');
+ if (!p)
+ goto fail;
+ p++;
+ if (strcmp(p, "sslv3") == 0)
+ v = CONF_SSLV3;
+ else if (strcmp(p, "tlsv10") == 0)
+ v = CONF_TLSV10;
+ else if (strcmp(p, "tlsv11") == 0)
+ v = CONF_TLSV11;
+ else if (strcmp(p, "tlsv12") == 0)
+ v = CONF_TLSV12;
+ else if (strcmp(p, "tlsv13") == 0)
+ v = CONF_TLSV13;
+ else
+ goto fail;
+ if (!strncmp(arg, "no-", 3))
+ methods->flags |= methodVersions[v].flag;
+ else if (!strncmp(arg, "force-", 6))
+ methods->min = methods->max = v;
+ else
+ goto fail;
+ return 0;
+ fail:
+ memprintf(err, "'%s' : option not implemented", arg);
+ return ERR_ALERT | ERR_FATAL;
+}
+
+static int bind_parse_tls_method_options(char **args, int cur_arg, struct proxy *px, struct bind_conf *conf, char **err)
+{
+ return parse_tls_method_options(args[cur_arg], &conf->ssl_conf.ssl_methods, err);
+}
+
+static int srv_parse_tls_method_options(char **args, int *cur_arg, struct proxy *px, struct server *newsrv, char **err)
+{
+ return parse_tls_method_options(args[*cur_arg], &newsrv->ssl_ctx.methods, err);
+}
+
+/* parse tls_method min/max: "ssl-min-ver" and "ssl-max-ver" */
+static int parse_tls_method_minmax(char **args, int cur_arg, struct tls_version_filter *methods, char **err)
+{
+ uint16_t i, v = 0;
+ char *argv = args[cur_arg + 1];
+ if (!*argv) {
+ memprintf(err, "'%s' : missing the ssl/tls version", args[cur_arg]);
+ return ERR_ALERT | ERR_FATAL;
+ }
+ for (i = CONF_TLSV_MIN; i <= CONF_TLSV_MAX; i++)
+ if (strcmp(argv, methodVersions[i].name) == 0)
+ v = i;
+ if (!v) {
+ memprintf(err, "'%s' : unknown ssl/tls version", args[cur_arg + 1]);
+ return ERR_ALERT | ERR_FATAL;
+ }
+ if (strcmp("ssl-min-ver", args[cur_arg]) == 0)
+ methods->min = v;
+ else if (strcmp("ssl-max-ver", args[cur_arg]) == 0)
+ methods->max = v;
+ else {
+ memprintf(err, "'%s' : option not implemented", args[cur_arg]);
+ return ERR_ALERT | ERR_FATAL;
+ }
+ return 0;
+}
+
+static int ssl_bind_parse_tls_method_minmax(char **args, int cur_arg, struct proxy *px, struct ssl_bind_conf *conf, int from_cli, char **err)
+{
+ int ret;
+
+#if (HA_OPENSSL_VERSION_NUMBER < 0x10101000L) && !defined(OPENSSL_IS_BORINGSSL)
+ ha_warning("crt-list: ssl-min-ver and ssl-max-ver are not supported with this Openssl version (skipped).\n");
+#endif
+ ret = parse_tls_method_minmax(args, cur_arg, &conf->ssl_methods_cfg, err);
+ if (ret != ERR_NONE)
+ return ret;
+
+ conf->ssl_methods.min = conf->ssl_methods_cfg.min;
+ conf->ssl_methods.max = conf->ssl_methods_cfg.max;
+
+ return ret;
+}
+static int bind_parse_tls_method_minmax(char **args, int cur_arg, struct proxy *px, struct bind_conf *conf, char **err)
+{
+ return parse_tls_method_minmax(args, cur_arg, &conf->ssl_conf.ssl_methods, err);
+}
+
+static int srv_parse_tls_method_minmax(char **args, int *cur_arg, struct proxy *px, struct server *newsrv, char **err)
+{
+ return parse_tls_method_minmax(args, *cur_arg, &newsrv->ssl_ctx.methods, err);
+}
+
+/* parse the "no-tls-tickets" bind keyword */
+static int bind_parse_no_tls_tickets(char **args, int cur_arg, struct proxy *px, struct bind_conf *conf, char **err)
+{
+ conf->ssl_options |= BC_SSL_O_NO_TLS_TICKETS;
+ return 0;
+}
+
+/* parse the "allow-0rtt" bind keyword */
+static int ssl_bind_parse_allow_0rtt(char **args, int cur_arg, struct proxy *px, struct ssl_bind_conf *conf, int from_cli, char **err)
+{
+ conf->early_data = 1;
+ return 0;
+}
+
+static int bind_parse_allow_0rtt(char **args, int cur_arg, struct proxy *px, struct bind_conf *conf, char **err)
+{
+ conf->ssl_conf.early_data = 1;
+ return 0;
+}
+
+/* parse the "npn" bind keyword */
+static int ssl_bind_parse_npn(char **args, int cur_arg, struct proxy *px, struct ssl_bind_conf *conf, int from_cli, char **err)
+{
+#if defined(OPENSSL_NPN_NEGOTIATED) && !defined(OPENSSL_NO_NEXTPROTONEG)
+ char *p1, *p2;
+
+ if (!*args[cur_arg + 1]) {
+ memprintf(err, "'%s' : missing the comma-delimited NPN protocol suite", args[cur_arg]);
+ return ERR_ALERT | ERR_FATAL;
+ }
+
+ free(conf->npn_str);
+
+ /* the NPN string is built as a suite of (<len> <name>)*,
+ * so we reuse each comma to store the next <len> and need
+ * one more for the end of the string.
+ */
+ conf->npn_len = strlen(args[cur_arg + 1]) + 1;
+ conf->npn_str = calloc(1, conf->npn_len + 1);
+ if (!conf->npn_str) {
+ memprintf(err, "out of memory");
+ return ERR_ALERT | ERR_FATAL;
+ }
+
+ memcpy(conf->npn_str + 1, args[cur_arg + 1], conf->npn_len);
+
+ /* replace commas with the name length */
+ p1 = conf->npn_str;
+ p2 = p1 + 1;
+ while (1) {
+ p2 = memchr(p1 + 1, ',', conf->npn_str + conf->npn_len - (p1 + 1));
+ if (!p2)
+ p2 = p1 + 1 + strlen(p1 + 1);
+
+ if (p2 - (p1 + 1) > 255) {
+ *p2 = '\0';
+ memprintf(err, "'%s' : NPN protocol name too long : '%s'", args[cur_arg], p1 + 1);
+ return ERR_ALERT | ERR_FATAL;
+ }
+
+ *p1 = p2 - (p1 + 1);
+ p1 = p2;
+
+ if (!*p2)
+ break;
+
+ *(p2++) = '\0';
+ }
+ return 0;
+#else
+ memprintf(err, "'%s' : library does not support TLS NPN extension", args[cur_arg]);
+ return ERR_ALERT | ERR_FATAL;
+#endif
+}
+
+static int bind_parse_npn(char **args, int cur_arg, struct proxy *px, struct bind_conf *conf, char **err)
+{
+ return ssl_bind_parse_npn(args, cur_arg, px, &conf->ssl_conf, 0, err);
+}
+
+
+/* Parses a alpn string and converts it to the right format for the SSL api */
+int ssl_sock_parse_alpn(char *arg, char **alpn_str, int *alpn_len, char **err)
+{
+ char *p1, *p2, *alpn = NULL;
+ int len, ret = 0;
+
+ *alpn_str = NULL;
+ *alpn_len = 0;
+
+ if (!*arg) {
+ memprintf(err, "missing the comma-delimited ALPN protocol suite");
+ goto error;
+ }
+
+ /* the ALPN string is built as a suite of (<len> <name>)*,
+ * so we reuse each comma to store the next <len> and need
+ * one more for the end of the string.
+ */
+ len = strlen(arg) + 1;
+ alpn = calloc(1, len+1);
+ if (!alpn) {
+ memprintf(err, "'%s' : out of memory", arg);
+ goto error;
+ }
+ memcpy(alpn+1, arg, len);
+
+ /* replace commas with the name length */
+ p1 = alpn;
+ p2 = p1 + 1;
+ while (1) {
+ p2 = memchr(p1 + 1, ',', alpn + len - (p1 + 1));
+ if (!p2)
+ p2 = p1 + 1 + strlen(p1 + 1);
+
+ if (p2 - (p1 + 1) > 255) {
+ *p2 = '\0';
+ memprintf(err, "ALPN protocol name too long : '%s'", p1 + 1);
+ goto error;
+ }
+
+ *p1 = p2 - (p1 + 1);
+ p1 = p2;
+
+ if (!*p2)
+ break;
+
+ *(p2++) = '\0';
+ }
+
+ *alpn_str = alpn;
+ *alpn_len = len;
+
+ out:
+ return ret;
+
+ error:
+ free(alpn);
+ ret = ERR_ALERT | ERR_FATAL;
+ goto out;
+}
+
+/* parse the "alpn" bind keyword */
+static int ssl_bind_parse_alpn(char **args, int cur_arg, struct proxy *px, struct ssl_bind_conf *conf, int from_cli, char **err)
+{
+#ifdef TLSEXT_TYPE_application_layer_protocol_negotiation
+ int ret;
+
+ free(conf->alpn_str);
+
+ ret = ssl_sock_parse_alpn(args[cur_arg + 1], &conf->alpn_str, &conf->alpn_len, err);
+ if (ret)
+ memprintf(err, "'%s' : %s", args[cur_arg], *err);
+ return ret;
+#else
+ memprintf(err, "'%s' : library does not support TLS ALPN extension", args[cur_arg]);
+ return ERR_ALERT | ERR_FATAL;
+#endif
+}
+
+static int bind_parse_alpn(char **args, int cur_arg, struct proxy *px, struct bind_conf *conf, char **err)
+{
+ return ssl_bind_parse_alpn(args, cur_arg, px, &conf->ssl_conf, 0, err);
+}
+
+/* parse the "ssl" bind keyword */
+static int bind_parse_ssl(char **args, int cur_arg, struct proxy *px, struct bind_conf *conf, char **err)
+{
+ conf->options |= BC_O_USE_SSL;
+
+ if (global_ssl.listen_default_ciphers && !conf->ssl_conf.ciphers)
+ conf->ssl_conf.ciphers = strdup(global_ssl.listen_default_ciphers);
+#if defined(SSL_CTX_set1_curves_list)
+ if (global_ssl.listen_default_curves && !conf->ssl_conf.curves)
+ conf->ssl_conf.curves = strdup(global_ssl.listen_default_curves);
+#endif
+#if defined(SSL_CTX_set1_sigalgs_list)
+ if (global_ssl.listen_default_sigalgs && !conf->ssl_conf.sigalgs)
+ conf->ssl_conf.sigalgs = strdup(global_ssl.listen_default_sigalgs);
+#endif
+#ifdef HAVE_SSL_CTX_SET_CIPHERSUITES
+ if (global_ssl.listen_default_ciphersuites && !conf->ssl_conf.ciphersuites)
+ conf->ssl_conf.ciphersuites = strdup(global_ssl.listen_default_ciphersuites);
+#endif
+ conf->ssl_options |= global_ssl.listen_default_ssloptions;
+ conf->ssl_conf.ssl_methods.flags |= global_ssl.listen_default_sslmethods.flags;
+ if (!conf->ssl_conf.ssl_methods.min)
+ conf->ssl_conf.ssl_methods.min = global_ssl.listen_default_sslmethods.min;
+ if (!conf->ssl_conf.ssl_methods.max)
+ conf->ssl_conf.ssl_methods.max = global_ssl.listen_default_sslmethods.max;
+
+ return 0;
+}
+
+/* parse the "prefer-client-ciphers" bind keyword */
+static int bind_parse_pcc(char **args, int cur_arg, struct proxy *px, struct bind_conf *conf, char **err)
+{
+ conf->ssl_options |= BC_SSL_O_PREF_CLIE_CIPH;
+ return 0;
+}
+
+/* parse the "generate-certificates" bind keyword */
+static int bind_parse_generate_certs(char **args, int cur_arg, struct proxy *px, struct bind_conf *conf, char **err)
+{
+#if (defined SSL_CTRL_SET_TLSEXT_HOSTNAME && !defined SSL_NO_GENERATE_CERTIFICATES)
+ conf->options |= BC_O_GENERATE_CERTS;
+#else
+ memprintf(err, "%sthis version of openssl cannot generate SSL certificates.\n",
+ err && *err ? *err : "");
+#endif
+ return 0;
+}
+
+/* parse the "strict-sni" bind keyword */
+static int bind_parse_strict_sni(char **args, int cur_arg, struct proxy *px, struct bind_conf *conf, char **err)
+{
+ conf->strict_sni = 1;
+ return 0;
+}
+
+/* parse the "tls-ticket-keys" bind keyword */
+static int bind_parse_tls_ticket_keys(char **args, int cur_arg, struct proxy *px, struct bind_conf *conf, char **err)
+{
+#if (defined SSL_CTRL_SET_TLSEXT_TICKET_KEY_CB && TLS_TICKETS_NO > 0)
+ FILE *f = NULL;
+ int i = 0;
+ char thisline[LINESIZE];
+ struct tls_keys_ref *keys_ref = NULL;
+
+ if (!*args[cur_arg + 1]) {
+ memprintf(err, "'%s' : missing TLS ticket keys file path", args[cur_arg]);
+ goto fail;
+ }
+
+ keys_ref = tlskeys_ref_lookup(args[cur_arg + 1]);
+ if (keys_ref) {
+ keys_ref->refcount++;
+ conf->keys_ref = keys_ref;
+ return 0;
+ }
+
+ keys_ref = calloc(1, sizeof(*keys_ref));
+ if (!keys_ref) {
+ memprintf(err, "'%s' : allocation error", args[cur_arg+1]);
+ goto fail;
+ }
+
+ keys_ref->tlskeys = malloc(TLS_TICKETS_NO * sizeof(union tls_sess_key));
+ if (!keys_ref->tlskeys) {
+ memprintf(err, "'%s' : allocation error", args[cur_arg+1]);
+ goto fail;
+ }
+
+ if ((f = fopen(args[cur_arg + 1], "r")) == NULL) {
+ memprintf(err, "'%s' : unable to load ssl tickets keys file", args[cur_arg+1]);
+ goto fail;
+ }
+
+ keys_ref->filename = strdup(args[cur_arg + 1]);
+ if (!keys_ref->filename) {
+ memprintf(err, "'%s' : allocation error", args[cur_arg+1]);
+ goto fail;
+ }
+
+ keys_ref->key_size_bits = 0;
+ while (fgets(thisline, sizeof(thisline), f) != NULL) {
+ int len = strlen(thisline);
+ int dec_size;
+
+ /* Strip newline characters from the end */
+ if(thisline[len - 1] == '\n')
+ thisline[--len] = 0;
+
+ if(thisline[len - 1] == '\r')
+ thisline[--len] = 0;
+
+ dec_size = base64dec(thisline, len, (char *) (keys_ref->tlskeys + i % TLS_TICKETS_NO), sizeof(union tls_sess_key));
+ if (dec_size < 0) {
+ memprintf(err, "'%s' : unable to decode base64 key on line %d", args[cur_arg+1], i + 1);
+ goto fail;
+ }
+ else if (!keys_ref->key_size_bits && (dec_size == sizeof(struct tls_sess_key_128))) {
+ keys_ref->key_size_bits = 128;
+ }
+ else if (!keys_ref->key_size_bits && (dec_size == sizeof(struct tls_sess_key_256))) {
+ keys_ref->key_size_bits = 256;
+ }
+ else if (((dec_size != sizeof(struct tls_sess_key_128)) && (dec_size != sizeof(struct tls_sess_key_256)))
+ || ((dec_size == sizeof(struct tls_sess_key_128) && (keys_ref->key_size_bits != 128)))
+ || ((dec_size == sizeof(struct tls_sess_key_256) && (keys_ref->key_size_bits != 256)))) {
+ memprintf(err, "'%s' : wrong sized key on line %d", args[cur_arg+1], i + 1);
+ goto fail;
+ }
+ i++;
+ }
+
+ if (i < TLS_TICKETS_NO) {
+ memprintf(err, "'%s' : please supply at least %d keys in the tls-tickets-file", args[cur_arg+1], TLS_TICKETS_NO);
+ goto fail;
+ }
+
+ fclose(f);
+
+ /* Use penultimate key for encryption, handle when TLS_TICKETS_NO = 1 */
+ i -= 2;
+ keys_ref->tls_ticket_enc_index = i < 0 ? 0 : i % TLS_TICKETS_NO;
+ keys_ref->unique_id = -1;
+ keys_ref->refcount = 1;
+ HA_RWLOCK_INIT(&keys_ref->lock);
+ conf->keys_ref = keys_ref;
+
+ LIST_INSERT(&tlskeys_reference, &keys_ref->list);
+
+ return 0;
+
+ fail:
+ if (f)
+ fclose(f);
+ if (keys_ref) {
+ free(keys_ref->filename);
+ free(keys_ref->tlskeys);
+ free(keys_ref);
+ }
+ return ERR_ALERT | ERR_FATAL;
+
+#else
+ memprintf(err, "'%s' : TLS ticket callback extension not supported", args[cur_arg]);
+ return ERR_ALERT | ERR_FATAL;
+#endif /* SSL_CTRL_SET_TLSEXT_TICKET_KEY_CB */
+}
+
+/* parse the "verify" bind keyword */
+static int ssl_bind_parse_verify(char **args, int cur_arg, struct proxy *px, struct ssl_bind_conf *conf, int from_cli, char **err)
+{
+ if (!*args[cur_arg + 1]) {
+ memprintf(err, "'%s' : missing verify method", args[cur_arg]);
+ return ERR_ALERT | ERR_FATAL;
+ }
+
+ if (strcmp(args[cur_arg + 1], "none") == 0)
+ conf->verify = SSL_SOCK_VERIFY_NONE;
+ else if (strcmp(args[cur_arg + 1], "optional") == 0)
+ conf->verify = SSL_SOCK_VERIFY_OPTIONAL;
+ else if (strcmp(args[cur_arg + 1], "required") == 0)
+ conf->verify = SSL_SOCK_VERIFY_REQUIRED;
+ else {
+ memprintf(err, "'%s' : unknown verify method '%s', only 'none', 'optional', and 'required' are supported\n",
+ args[cur_arg], args[cur_arg + 1]);
+ return ERR_ALERT | ERR_FATAL;
+ }
+
+ return 0;
+}
+static int bind_parse_verify(char **args, int cur_arg, struct proxy *px, struct bind_conf *conf, char **err)
+{
+ return ssl_bind_parse_verify(args, cur_arg, px, &conf->ssl_conf, 0, err);
+}
+
+/* parse the "no-alpn" ssl-bind keyword, storing an empty ALPN string */
+static int ssl_bind_parse_no_alpn(char **args, int cur_arg, struct proxy *px, struct ssl_bind_conf *conf, int from_cli, char **err)
+{
+ free(conf->alpn_str);
+ conf->alpn_len = 0;
+ conf->alpn_str = strdup("");
+
+ if (!conf->alpn_str) {
+ memprintf(err, "'%s' : out of memory", *args);
+ return ERR_ALERT | ERR_FATAL;
+ }
+ return 0;
+}
+
+/* parse the "no-alpn" bind keyword, storing an empty ALPN string */
+static int bind_parse_no_alpn(char **args, int cur_arg, struct proxy *px, struct bind_conf *conf, char **err)
+{
+ return ssl_bind_parse_no_alpn(args, cur_arg, px, &conf->ssl_conf, 0, err);
+}
+
+
+/* parse the "no-ca-names" bind keyword */
+static int ssl_bind_parse_no_ca_names(char **args, int cur_arg, struct proxy *px, struct ssl_bind_conf *conf, int from_cli, char **err)
+{
+ conf->no_ca_names = 1;
+ return 0;
+}
+
+static int bind_parse_no_ca_names(char **args, int cur_arg, struct proxy *px, struct bind_conf *conf, char **err)
+{
+ return ssl_bind_parse_no_ca_names(args, cur_arg, px, &conf->ssl_conf, 0, err);
+}
+
+
+static int ssl_bind_parse_ocsp_update(char **args, int cur_arg, struct proxy *px,
+ struct ssl_bind_conf *ssl_conf, int from_cli, char **err)
+{
+ if (!*args[cur_arg + 1]) {
+ memprintf(err, "'%s' : expecting <on|off>", args[cur_arg]);
+ return ERR_ALERT | ERR_FATAL;
+ }
+
+ if (strcmp(args[cur_arg + 1], "on") == 0)
+ ssl_conf->ocsp_update = SSL_SOCK_OCSP_UPDATE_ON;
+ else if (strcmp(args[cur_arg + 1], "off") == 0)
+ ssl_conf->ocsp_update = SSL_SOCK_OCSP_UPDATE_OFF;
+ else {
+ memprintf(err, "'%s' : expecting <on|off>", args[cur_arg]);
+ return ERR_ALERT | ERR_FATAL;
+ }
+
+ if (ssl_conf->ocsp_update == SSL_SOCK_OCSP_UPDATE_ON) {
+ /* We might need to create the main ocsp update task */
+ int ret = ssl_create_ocsp_update_task(err);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+
+/***************************** "server" keywords Parsing ********************************************/
+
+/* parse the "npn" bind keyword */
+static int srv_parse_npn(char **args, int *cur_arg, struct proxy *px, struct server *newsrv, char **err)
+{
+#if defined(OPENSSL_NPN_NEGOTIATED) && !defined(OPENSSL_NO_NEXTPROTONEG)
+ char *p1, *p2;
+
+ if (!*args[*cur_arg + 1]) {
+ memprintf(err, "'%s' : missing the comma-delimited NPN protocol suite", args[*cur_arg]);
+ return ERR_ALERT | ERR_FATAL;
+ }
+
+ free(newsrv->ssl_ctx.npn_str);
+
+ /* the NPN string is built as a suite of (<len> <name>)*,
+ * so we reuse each comma to store the next <len> and need
+ * one more for the end of the string.
+ */
+ newsrv->ssl_ctx.npn_len = strlen(args[*cur_arg + 1]) + 1;
+ newsrv->ssl_ctx.npn_str = calloc(1, newsrv->ssl_ctx.npn_len + 1);
+ if (!newsrv->ssl_ctx.npn_str) {
+ memprintf(err, "out of memory");
+ return ERR_ALERT | ERR_FATAL;
+ }
+
+ memcpy(newsrv->ssl_ctx.npn_str + 1, args[*cur_arg + 1],
+ newsrv->ssl_ctx.npn_len);
+
+ /* replace commas with the name length */
+ p1 = newsrv->ssl_ctx.npn_str;
+ p2 = p1 + 1;
+ while (1) {
+ p2 = memchr(p1 + 1, ',', newsrv->ssl_ctx.npn_str +
+ newsrv->ssl_ctx.npn_len - (p1 + 1));
+ if (!p2)
+ p2 = p1 + 1 + strlen(p1 + 1);
+
+ if (p2 - (p1 + 1) > 255) {
+ *p2 = '\0';
+ memprintf(err, "'%s' : NPN protocol name too long : '%s'", args[*cur_arg], p1 + 1);
+ return ERR_ALERT | ERR_FATAL;
+ }
+
+ *p1 = p2 - (p1 + 1);
+ p1 = p2;
+
+ if (!*p2)
+ break;
+
+ *(p2++) = '\0';
+ }
+ return 0;
+#else
+ memprintf(err, "'%s' : library does not support TLS NPN extension", args[*cur_arg]);
+ return ERR_ALERT | ERR_FATAL;
+#endif
+}
+
+#ifdef TLSEXT_TYPE_application_layer_protocol_negotiation
+static int parse_alpn(char *alpn, char **out_alpn_str, int *out_alpn_len, char **err)
+{
+ free(*out_alpn_str);
+ return ssl_sock_parse_alpn(alpn, out_alpn_str, out_alpn_len, err);
+}
+#endif
+
+/* parse the "alpn" server keyword */
+static int srv_parse_alpn(char **args, int *cur_arg, struct proxy *px, struct server *newsrv, char **err)
+{
+#ifdef TLSEXT_TYPE_application_layer_protocol_negotiation
+ int ret = parse_alpn(args[*cur_arg + 1],
+ &newsrv->ssl_ctx.alpn_str,
+ &newsrv->ssl_ctx.alpn_len, err);
+ if (ret)
+ memprintf(err, "'%s' : %s", args[*cur_arg], *err);
+ return ret;
+#else
+ memprintf(err, "'%s' : library does not support TLS ALPN extension", args[*cur_arg]);
+ return ERR_ALERT | ERR_FATAL;
+#endif
+}
+
+/* parse the "check-alpn" server keyword */
+static int srv_parse_check_alpn(char **args, int *cur_arg, struct proxy *px, struct server *newsrv, char **err)
+{
+#ifdef TLSEXT_TYPE_application_layer_protocol_negotiation
+ int ret = parse_alpn(args[*cur_arg + 1],
+ &newsrv->check.alpn_str,
+ &newsrv->check.alpn_len, err);
+ if (ret)
+ memprintf(err, "'%s' : %s", args[*cur_arg], *err);
+ return ret;
+#else
+ memprintf(err, "'%s' : library does not support TLS ALPN extension", args[*cur_arg]);
+ return ERR_ALERT | ERR_FATAL;
+#endif
+}
+
+/* parse the "ca-file" server keyword */
+static int srv_parse_ca_file(char **args, int *cur_arg, struct proxy *px, struct server *newsrv, char **err)
+{
+ const int create_if_none = newsrv->flags & SRV_F_DYNAMIC ? 0 : 1;
+
+ if (!*args[*cur_arg + 1]) {
+ memprintf(err, "'%s' : missing CAfile path", args[*cur_arg]);
+ return ERR_ALERT | ERR_FATAL;
+ }
+
+ if ((*args[*cur_arg + 1] != '/') && (*args[*cur_arg + 1] != '@') && global_ssl.ca_base)
+ memprintf(&newsrv->ssl_ctx.ca_file, "%s/%s", global_ssl.ca_base, args[*cur_arg + 1]);
+ else
+ memprintf(&newsrv->ssl_ctx.ca_file, "%s", args[*cur_arg + 1]);
+
+ if (!ssl_store_load_locations_file(newsrv->ssl_ctx.ca_file, create_if_none, CAFILE_CERT)) {
+ memprintf(err, "'%s' : unable to load %s", args[*cur_arg], newsrv->ssl_ctx.ca_file);
+ return ERR_ALERT | ERR_FATAL;
+ }
+
+ return 0;
+}
+
+/* parse the "check-sni" server keyword */
+static int srv_parse_check_sni(char **args, int *cur_arg, struct proxy *px, struct server *newsrv, char **err)
+{
+ if (!*args[*cur_arg + 1]) {
+ memprintf(err, "'%s' : missing SNI", args[*cur_arg]);
+ return ERR_ALERT | ERR_FATAL;
+ }
+
+ newsrv->check.sni = strdup(args[*cur_arg + 1]);
+ if (!newsrv->check.sni) {
+ memprintf(err, "'%s' : failed to allocate memory", args[*cur_arg]);
+ return ERR_ALERT | ERR_FATAL;
+ }
+ return 0;
+
+}
+
+/* common function to init ssl_ctx */
+static int ssl_sock_init_srv(struct server *s)
+{
+ if (global_ssl.connect_default_ciphers && !s->ssl_ctx.ciphers)
+ s->ssl_ctx.ciphers = strdup(global_ssl.connect_default_ciphers);
+#ifdef HAVE_SSL_CTX_SET_CIPHERSUITES
+ if (global_ssl.connect_default_ciphersuites && !s->ssl_ctx.ciphersuites) {
+ s->ssl_ctx.ciphersuites = strdup(global_ssl.connect_default_ciphersuites);
+ if (!s->ssl_ctx.ciphersuites)
+ return 1;
+ }
+#endif
+ s->ssl_ctx.options |= global_ssl.connect_default_ssloptions;
+ s->ssl_ctx.methods.flags |= global_ssl.connect_default_sslmethods.flags;
+
+ if (!s->ssl_ctx.methods.min)
+ s->ssl_ctx.methods.min = global_ssl.connect_default_sslmethods.min;
+
+ if (!s->ssl_ctx.methods.max)
+ s->ssl_ctx.methods.max = global_ssl.connect_default_sslmethods.max;
+
+#if defined(SSL_CTX_set1_sigalgs_list)
+ if (global_ssl.connect_default_sigalgs && !s->ssl_ctx.sigalgs) {
+ s->ssl_ctx.sigalgs = strdup(global_ssl.connect_default_sigalgs);
+ if (!s->ssl_ctx.sigalgs)
+ return 1;
+ }
+#endif
+
+#if defined(SSL_CTX_set1_client_sigalgs_list)
+ if (global_ssl.connect_default_client_sigalgs && !s->ssl_ctx.client_sigalgs) {
+ s->ssl_ctx.client_sigalgs = strdup(global_ssl.connect_default_client_sigalgs);
+ if (!s->ssl_ctx.client_sigalgs)
+ return 1;
+ }
+#endif
+
+#if defined(SSL_CTX_set1_curves_list)
+ if (global_ssl.connect_default_curves && !s->ssl_ctx.curves) {
+ s->ssl_ctx.curves = strdup(global_ssl.connect_default_curves);
+ if (!s->ssl_ctx.curves)
+ return 1;
+ }
+#endif
+
+ return 0;
+}
+
+/* parse the "check-ssl" server keyword */
+static int srv_parse_check_ssl(char **args, int *cur_arg, struct proxy *px, struct server *newsrv, char **err)
+{
+ newsrv->check.use_ssl = 1;
+ if (ssl_sock_init_srv(newsrv)) {
+ memprintf(err, "'%s' : not enough memory", args[*cur_arg]);
+ return ERR_ALERT | ERR_FATAL;
+ }
+
+ return 0;
+}
+
+/* parse the "ciphers" server keyword */
+static int srv_parse_ciphers(char **args, int *cur_arg, struct proxy *px, struct server *newsrv, char **err)
+{
+ if (!*args[*cur_arg + 1]) {
+ memprintf(err, "'%s' : missing cipher suite", args[*cur_arg]);
+ return ERR_ALERT | ERR_FATAL;
+ }
+
+ free(newsrv->ssl_ctx.ciphers);
+ newsrv->ssl_ctx.ciphers = strdup(args[*cur_arg + 1]);
+
+ if (!newsrv->ssl_ctx.ciphers) {
+ memprintf(err, "'%s' : not enough memory", args[*cur_arg]);
+ return ERR_ALERT | ERR_FATAL;
+ }
+
+ return 0;
+}
+
+/* parse the "ciphersuites" server keyword */
+static int srv_parse_ciphersuites(char **args, int *cur_arg, struct proxy *px, struct server *newsrv, char **err)
+{
+#ifdef HAVE_SSL_CTX_SET_CIPHERSUITES
+ if (!*args[*cur_arg + 1]) {
+ memprintf(err, "'%s' : missing cipher suite", args[*cur_arg]);
+ return ERR_ALERT | ERR_FATAL;
+ }
+
+ free(newsrv->ssl_ctx.ciphersuites);
+ newsrv->ssl_ctx.ciphersuites = strdup(args[*cur_arg + 1]);
+
+ if (!newsrv->ssl_ctx.ciphersuites) {
+ memprintf(err, "'%s' : not enough memory", args[*cur_arg]);
+ return ERR_ALERT | ERR_FATAL;
+ }
+
+ return 0;
+#else /* ! HAVE_SSL_CTX_SET_CIPHERSUITES */
+ memprintf(err, "'%s' not supported for your SSL library (%s).", args[*cur_arg], OPENSSL_VERSION_TEXT);
+ return ERR_ALERT | ERR_FATAL;
+
+#endif
+}
+
+/* parse the "client-sigalgs" server keyword */
+static int srv_parse_client_sigalgs(char **args, int *cur_arg, struct proxy *px, struct server *newsrv, char **err)
+{
+#ifndef SSL_CTX_set1_client_sigalgs_list
+ memprintf(err, "'%s' : library does not support setting signature algorithms", args[*cur_arg]);
+ return ERR_ALERT | ERR_FATAL;
+#else
+ char *arg;
+
+ arg = args[*cur_arg + 1];
+ if (!*arg) {
+ memprintf(err, "'%s' : missing signature algorithm list", args[*cur_arg]);
+ return ERR_ALERT | ERR_FATAL;
+ }
+ newsrv->ssl_ctx.client_sigalgs = strdup(arg);
+ if (!newsrv->ssl_ctx.client_sigalgs) {
+ memprintf(err, "out of memory");
+ return ERR_ALERT | ERR_FATAL;
+ }
+ return 0;
+#endif
+}
+
+
+/* parse the "crl-file" server keyword */
+static int srv_parse_crl_file(char **args, int *cur_arg, struct proxy *px, struct server *newsrv, char **err)
+{
+#ifndef X509_V_FLAG_CRL_CHECK
+ memprintf(err, "'%s' : library does not support CRL verify", args[*cur_arg]);
+ return ERR_ALERT | ERR_FATAL;
+#else
+ const int create_if_none = newsrv->flags & SRV_F_DYNAMIC ? 0 : 1;
+
+ if (!*args[*cur_arg + 1]) {
+ memprintf(err, "'%s' : missing CRLfile path", args[*cur_arg]);
+ return ERR_ALERT | ERR_FATAL;
+ }
+
+ if ((*args[*cur_arg + 1] != '/') && (*args[*cur_arg + 1] != '@') && global_ssl.ca_base)
+ memprintf(&newsrv->ssl_ctx.crl_file, "%s/%s", global_ssl.ca_base, args[*cur_arg + 1]);
+ else
+ memprintf(&newsrv->ssl_ctx.crl_file, "%s", args[*cur_arg + 1]);
+
+ if (!ssl_store_load_locations_file(newsrv->ssl_ctx.crl_file, create_if_none, CAFILE_CRL)) {
+ memprintf(err, "'%s' : unable to load %s", args[*cur_arg], newsrv->ssl_ctx.crl_file);
+ return ERR_ALERT | ERR_FATAL;
+ }
+ return 0;
+#endif
+}
+
+/* parse the "curves" server keyword */
+static int srv_parse_curves(char **args, int *cur_arg, struct proxy *px, struct server *newsrv, char **err)
+{
+#ifndef SSL_CTX_set1_curves_list
+ memprintf(err, "'%s' : library does not support setting curves list", args[*cur_arg]);
+ return ERR_ALERT | ERR_FATAL;
+#else
+ char *arg;
+
+ arg = args[*cur_arg + 1];
+ if (!*arg) {
+ memprintf(err, "'%s' : missing curves list", args[*cur_arg]);
+ return ERR_ALERT | ERR_FATAL;
+ }
+ newsrv->ssl_ctx.curves = strdup(arg);
+ if (!newsrv->ssl_ctx.curves) {
+ memprintf(err, "out of memory");
+ return ERR_ALERT | ERR_FATAL;
+ }
+ return 0;
+#endif
+}
+
+/* parse the "crt" server keyword */
+static int srv_parse_crt(char **args, int *cur_arg, struct proxy *px, struct server *newsrv, char **err)
+{
+
+ if (!*args[*cur_arg + 1]) {
+ memprintf(err, "'%s' : missing certificate file path", args[*cur_arg]);
+ return ERR_ALERT | ERR_FATAL;
+ }
+
+ if ((*args[*cur_arg + 1] != '/') && global_ssl.crt_base)
+ memprintf(&newsrv->ssl_ctx.client_crt, "%s/%s", global_ssl.crt_base, args[*cur_arg + 1]);
+ else
+ memprintf(&newsrv->ssl_ctx.client_crt, "%s", args[*cur_arg + 1]);
+
+ return 0;
+}
+
+/* parse the "no-check-ssl" server keyword */
+static int srv_parse_no_check_ssl(char **args, int *cur_arg, struct proxy *px, struct server *newsrv, char **err)
+{
+ newsrv->check.use_ssl = -1;
+ ha_free(&newsrv->ssl_ctx.ciphers);
+ newsrv->ssl_ctx.options &= ~global_ssl.connect_default_ssloptions;
+ return 0;
+}
+
+/* parse the "no-send-proxy-v2-ssl" server keyword */
+static int srv_parse_no_send_proxy_ssl(char **args, int *cur_arg, struct proxy *px, struct server *newsrv, char **err)
+{
+ newsrv->pp_opts &= ~SRV_PP_V2;
+ newsrv->pp_opts &= ~SRV_PP_V2_SSL;
+ return 0;
+}
+
+/* parse the "no-send-proxy-v2-ssl-cn" server keyword */
+static int srv_parse_no_send_proxy_cn(char **args, int *cur_arg, struct proxy *px, struct server *newsrv, char **err)
+{
+ newsrv->pp_opts &= ~SRV_PP_V2;
+ newsrv->pp_opts &= ~SRV_PP_V2_SSL;
+ newsrv->pp_opts &= ~SRV_PP_V2_SSL_CN;
+ return 0;
+}
+
+/* parse the "no-ssl" server keyword */
+static int srv_parse_no_ssl(char **args, int *cur_arg, struct proxy *px, struct server *newsrv, char **err)
+{
+ /* if default-server have use_ssl, prepare ssl settings */
+ if (newsrv->use_ssl == 1) {
+ if (ssl_sock_init_srv(newsrv)) {
+ memprintf(err, "'%s' : not enough memory", args[*cur_arg]);
+ return ERR_ALERT | ERR_FATAL;
+ }
+ }
+ else {
+ ha_free(&newsrv->ssl_ctx.ciphers);
+ }
+ newsrv->use_ssl = -1;
+ return 0;
+}
+
+/* parse the "allow-0rtt" server keyword */
+static int srv_parse_allow_0rtt(char **args, int *cur_arg, struct proxy *px, struct server *newsrv, char **err)
+{
+ newsrv->ssl_ctx.options |= SRV_SSL_O_EARLY_DATA;
+ return 0;
+}
+
+/* parse the "no-ssl-reuse" server keyword */
+static int srv_parse_no_ssl_reuse(char **args, int *cur_arg, struct proxy *px, struct server *newsrv, char **err)
+{
+ newsrv->ssl_ctx.options |= SRV_SSL_O_NO_REUSE;
+ return 0;
+}
+
+/* parse the "no-tls-tickets" server keyword */
+static int srv_parse_no_tls_tickets(char **args, int *cur_arg, struct proxy *px, struct server *newsrv, char **err)
+{
+ newsrv->ssl_ctx.options |= SRV_SSL_O_NO_TLS_TICKETS;
+ return 0;
+}
+/* parse the "send-proxy-v2-ssl" server keyword */
+static int srv_parse_send_proxy_ssl(char **args, int *cur_arg, struct proxy *px, struct server *newsrv, char **err)
+{
+ newsrv->pp_opts |= SRV_PP_V2;
+ newsrv->pp_opts |= SRV_PP_V2_SSL;
+ return 0;
+}
+
+/* parse the "send-proxy-v2-ssl-cn" server keyword */
+static int srv_parse_send_proxy_cn(char **args, int *cur_arg, struct proxy *px, struct server *newsrv, char **err)
+{
+ newsrv->pp_opts |= SRV_PP_V2;
+ newsrv->pp_opts |= SRV_PP_V2_SSL;
+ newsrv->pp_opts |= SRV_PP_V2_SSL_CN;
+ return 0;
+}
+
+/* parse the "sigalgs" server keyword */
+static int srv_parse_sigalgs(char **args, int *cur_arg, struct proxy *px, struct server *newsrv, char **err)
+{
+#ifndef SSL_CTX_set1_sigalgs_list
+ memprintf(err, "'%s' : library does not support setting signature algorithms", args[*cur_arg]);
+ return ERR_ALERT | ERR_FATAL;
+#else
+ char *arg;
+
+ arg = args[*cur_arg + 1];
+ if (!*arg) {
+ memprintf(err, "'%s' : missing signature algorithm list", args[*cur_arg]);
+ return ERR_ALERT | ERR_FATAL;
+ }
+ newsrv->ssl_ctx.sigalgs = strdup(arg);
+ if (!newsrv->ssl_ctx.sigalgs) {
+ memprintf(err, "out of memory");
+ return ERR_ALERT | ERR_FATAL;
+ }
+ return 0;
+#endif
+}
+
+/* parse the "sni" server keyword */
+static int srv_parse_sni(char **args, int *cur_arg, struct proxy *px, struct server *newsrv, char **err)
+{
+#ifndef SSL_CTRL_SET_TLSEXT_HOSTNAME
+ memprintf(err, "'%s' : the current SSL library doesn't support the SNI TLS extension", args[*cur_arg]);
+ return ERR_ALERT | ERR_FATAL;
+#else
+ char *arg;
+
+ arg = args[*cur_arg + 1];
+ if (!*arg) {
+ memprintf(err, "'%s' : missing sni expression", args[*cur_arg]);
+ return ERR_ALERT | ERR_FATAL;
+ }
+
+ free(newsrv->sni_expr);
+ newsrv->sni_expr = strdup(arg);
+ if (!newsrv->sni_expr) {
+ memprintf(err, "out of memory");
+ return ERR_ALERT | ERR_FATAL;
+ }
+
+ return 0;
+#endif
+}
+
+/* parse the "ssl" server keyword */
+static int srv_parse_ssl(char **args, int *cur_arg, struct proxy *px, struct server *newsrv, char **err)
+{
+ newsrv->use_ssl = 1;
+ if (ssl_sock_init_srv(newsrv)) {
+ memprintf(err, "'%s' : not enough memory", args[*cur_arg]);
+ return ERR_ALERT | ERR_FATAL;
+ }
+
+ return 0;
+}
+
+/* parse the "ssl-reuse" server keyword */
+static int srv_parse_ssl_reuse(char **args, int *cur_arg, struct proxy *px, struct server *newsrv, char **err)
+{
+ newsrv->ssl_ctx.options &= ~SRV_SSL_O_NO_REUSE;
+ return 0;
+}
+
+/* parse the "tls-tickets" server keyword */
+static int srv_parse_tls_tickets(char **args, int *cur_arg, struct proxy *px, struct server *newsrv, char **err)
+{
+ newsrv->ssl_ctx.options &= ~SRV_SSL_O_NO_TLS_TICKETS;
+ return 0;
+}
+
+/* parse the "verify" server keyword */
+static int srv_parse_verify(char **args, int *cur_arg, struct proxy *px, struct server *newsrv, char **err)
+{
+ if (!*args[*cur_arg + 1]) {
+ memprintf(err, "'%s' : missing verify method", args[*cur_arg]);
+ return ERR_ALERT | ERR_FATAL;
+ }
+
+ if (strcmp(args[*cur_arg + 1], "none") == 0)
+ newsrv->ssl_ctx.verify = SSL_SOCK_VERIFY_NONE;
+ else if (strcmp(args[*cur_arg + 1], "required") == 0)
+ newsrv->ssl_ctx.verify = SSL_SOCK_VERIFY_REQUIRED;
+ else {
+ memprintf(err, "'%s' : unknown verify method '%s', only 'none' and 'required' are supported\n",
+ args[*cur_arg], args[*cur_arg + 1]);
+ return ERR_ALERT | ERR_FATAL;
+ }
+
+ return 0;
+}
+
+/* parse the "verifyhost" server keyword */
+static int srv_parse_verifyhost(char **args, int *cur_arg, struct proxy *px, struct server *newsrv, char **err)
+{
+ if (!*args[*cur_arg + 1]) {
+ memprintf(err, "'%s' : missing hostname to verify against", args[*cur_arg]);
+ return ERR_ALERT | ERR_FATAL;
+ }
+
+ free(newsrv->ssl_ctx.verify_host);
+ newsrv->ssl_ctx.verify_host = strdup(args[*cur_arg + 1]);
+
+ if (!newsrv->ssl_ctx.verify_host) {
+ memprintf(err, "'%s' : not enough memory", args[*cur_arg]);
+ return ERR_ALERT | ERR_FATAL;
+ }
+
+ return 0;
+}
+
+/* parse the "ssl-default-bind-options" keyword in global section */
+static int ssl_parse_default_bind_options(char **args, int section_type, struct proxy *curpx,
+ const struct proxy *defpx, const char *file, int line,
+ char **err) {
+ int i = 1;
+
+ if (*(args[i]) == 0) {
+ memprintf(err, "global statement '%s' expects an option as an argument.", args[0]);
+ return -1;
+ }
+ while (*(args[i])) {
+ if (strcmp(args[i], "no-tls-tickets") == 0)
+ global_ssl.listen_default_ssloptions |= BC_SSL_O_NO_TLS_TICKETS;
+ else if (strcmp(args[i], "prefer-client-ciphers") == 0)
+ global_ssl.listen_default_ssloptions |= BC_SSL_O_PREF_CLIE_CIPH;
+ else if (strcmp(args[i], "ssl-min-ver") == 0 || strcmp(args[i], "ssl-max-ver") == 0) {
+ if (!parse_tls_method_minmax(args, i, &global_ssl.listen_default_sslmethods, err))
+ i++;
+ else {
+ memprintf(err, "%s on global statement '%s'.", *err, args[0]);
+ return -1;
+ }
+ }
+ else if (parse_tls_method_options(args[i], &global_ssl.listen_default_sslmethods, err)) {
+ memprintf(err, "unknown option '%s' on global statement '%s'.", args[i], args[0]);
+ return -1;
+ }
+ i++;
+ }
+ return 0;
+}
+
+/* parse the "ssl-default-server-options" keyword in global section */
+static int ssl_parse_default_server_options(char **args, int section_type, struct proxy *curpx,
+ const struct proxy *defpx, const char *file, int line,
+ char **err) {
+ int i = 1;
+
+ if (*(args[i]) == 0) {
+ memprintf(err, "global statement '%s' expects an option as an argument.", args[0]);
+ return -1;
+ }
+ while (*(args[i])) {
+ if (strcmp(args[i], "no-tls-tickets") == 0)
+ global_ssl.connect_default_ssloptions |= SRV_SSL_O_NO_TLS_TICKETS;
+ else if (strcmp(args[i], "ssl-min-ver") == 0 || strcmp(args[i], "ssl-max-ver") == 0) {
+ if (!parse_tls_method_minmax(args, i, &global_ssl.connect_default_sslmethods, err))
+ i++;
+ else {
+ memprintf(err, "%s on global statement '%s'.", *err, args[0]);
+ return -1;
+ }
+ }
+ else if (parse_tls_method_options(args[i], &global_ssl.connect_default_sslmethods, err)) {
+ memprintf(err, "unknown option '%s' on global statement '%s'.", args[i], args[0]);
+ return -1;
+ }
+ i++;
+ }
+ return 0;
+}
+
+/* parse the "ca-base" / "crt-base" keywords in global section.
+ * Returns <0 on alert, >0 on warning, 0 on success.
+ */
+static int ssl_parse_global_ca_crt_base(char **args, int section_type, struct proxy *curpx,
+ const struct proxy *defpx, const char *file, int line,
+ char **err)
+{
+ char **target;
+
+ target = (args[0][1] == 'a') ? &global_ssl.ca_base : &global_ssl.crt_base;
+
+ if (too_many_args(1, args, err, NULL))
+ return -1;
+
+ if (*target) {
+ memprintf(err, "'%s' already specified.", args[0]);
+ return -1;
+ }
+
+ if (*(args[1]) == 0) {
+ memprintf(err, "global statement '%s' expects a directory path as an argument.", args[0]);
+ return -1;
+ }
+ *target = strdup(args[1]);
+ return 0;
+}
+
+/* parse the "ssl-skip-self-issued-ca" keyword in global section. */
+static int ssl_parse_skip_self_issued_ca(char **args, int section_type, struct proxy *curpx,
+ const struct proxy *defpx, const char *file, int line,
+ char **err)
+{
+#ifdef SSL_CTX_build_cert_chain
+ global_ssl.skip_self_issued_ca = 1;
+ return 0;
+#else
+ memprintf(err, "global statement '%s' requires at least OpenSSL 1.0.2.", args[0]);
+ return -1;
+#endif
+}
+
+
+static int ssl_parse_global_ocsp_maxdelay(char **args, int section_type, struct proxy *curpx,
+ const struct proxy *defpx, const char *file, int line,
+ char **err)
+{
+ int value = 0;
+
+ if (*(args[1]) == 0) {
+ memprintf(err, "'%s' expects an integer argument.", args[0]);
+ return -1;
+ }
+
+ value = atoi(args[1]);
+ if (value < 0) {
+ memprintf(err, "'%s' expects a positive numeric value.", args[0]);
+ return -1;
+ }
+
+ if (global_ssl.ocsp_update.delay_min > value) {
+ memprintf(err, "'%s' can not be lower than tune.ssl.ocsp-update.mindelay.", args[0]);
+ return -1;
+ }
+
+ global_ssl.ocsp_update.delay_max = value;
+
+ return 0;
+}
+
+static int ssl_parse_global_ocsp_mindelay(char **args, int section_type, struct proxy *curpx,
+ const struct proxy *defpx, const char *file, int line,
+ char **err)
+{
+ int value = 0;
+
+ if (*(args[1]) == 0) {
+ memprintf(err, "'%s' expects an integer argument.", args[0]);
+ return -1;
+ }
+
+ value = atoi(args[1]);
+ if (value < 0) {
+ memprintf(err, "'%s' expects a positive numeric value.", args[0]);
+ return -1;
+ }
+
+ if (value > global_ssl.ocsp_update.delay_max) {
+ memprintf(err, "'%s' can not be higher than tune.ssl.ocsp-update.maxdelay.", args[0]);
+ return -1;
+ }
+
+ global_ssl.ocsp_update.delay_min = value;
+
+ return 0;
+}
+
+
+
+/* Note: must not be declared <const> as its list will be overwritten.
+ * Please take care of keeping this list alphabetically sorted, doing so helps
+ * all code contributors.
+ * Optional keywords are also declared with a NULL ->parse() function so that
+ * the config parser can report an appropriate error when a known keyword was
+ * not enabled.
+ */
+
+/* the <ssl_crtlist_kws> keywords are used for crt-list parsing, they *MUST* be safe
+ * with their proxy argument NULL and must only fill the ssl_bind_conf */
+struct ssl_crtlist_kw ssl_crtlist_kws[] = {
+ { "allow-0rtt", ssl_bind_parse_allow_0rtt, 0 }, /* allow 0-RTT */
+ { "alpn", ssl_bind_parse_alpn, 1 }, /* set ALPN supported protocols */
+ { "ca-file", ssl_bind_parse_ca_file, 1 }, /* set CAfile to process ca-names and verify on client cert */
+ { "ca-verify-file", ssl_bind_parse_ca_verify_file, 1 }, /* set CAverify file to process verify on client cert */
+ { "ciphers", ssl_bind_parse_ciphers, 1 }, /* set SSL cipher suite */
+ { "ciphersuites", ssl_bind_parse_ciphersuites, 1 }, /* set TLS 1.3 cipher suite */
+ { "client-sigalgs", ssl_bind_parse_client_sigalgs, 1 }, /* set SSL client signature algorithms */
+ { "crl-file", ssl_bind_parse_crl_file, 1 }, /* set certificate revocation list file use on client cert verify */
+ { "curves", ssl_bind_parse_curves, 1 }, /* set SSL curve suite */
+ { "ecdhe", ssl_bind_parse_ecdhe, 1 }, /* defines named curve for elliptic curve Diffie-Hellman */
+ { "no-alpn", ssl_bind_parse_no_alpn, 0 }, /* disable sending ALPN */
+ { "no-ca-names", ssl_bind_parse_no_ca_names, 0 }, /* do not send ca names to clients (ca_file related) */
+ { "npn", ssl_bind_parse_npn, 1 }, /* set NPN supported protocols */
+ { "sigalgs", ssl_bind_parse_sigalgs, 1 }, /* set SSL signature algorithms */
+ { "ssl-min-ver", ssl_bind_parse_tls_method_minmax,1 }, /* minimum version */
+ { "ssl-max-ver", ssl_bind_parse_tls_method_minmax,1 }, /* maximum version */
+ { "verify", ssl_bind_parse_verify, 1 }, /* set SSL verify method */
+ { "ocsp-update", ssl_bind_parse_ocsp_update, 1 }, /* ocsp update mode (on or off) */
+ { NULL, NULL, 0 },
+};
+
+/* no initcall for ssl_bind_kws, these ones are parsed in the parser loop */
+
+static struct bind_kw_list bind_kws = { "SSL", { }, {
+ { "allow-0rtt", bind_parse_allow_0rtt, 0 }, /* Allow 0RTT */
+ { "alpn", bind_parse_alpn, 1 }, /* set ALPN supported protocols */
+ { "ca-file", bind_parse_ca_file, 1 }, /* set CAfile to process ca-names and verify on client cert */
+ { "ca-verify-file", bind_parse_ca_verify_file, 1 }, /* set CAverify file to process verify on client cert */
+ { "ca-ignore-err", bind_parse_ignore_err, 1 }, /* set error IDs to ignore on verify depth > 0 */
+ { "ca-sign-file", bind_parse_ca_sign_file, 1 }, /* set CAFile used to generate and sign server certs */
+ { "ca-sign-pass", bind_parse_ca_sign_pass, 1 }, /* set CAKey passphrase */
+ { "ciphers", bind_parse_ciphers, 1 }, /* set SSL cipher suite */
+ { "ciphersuites", bind_parse_ciphersuites, 1 }, /* set TLS 1.3 cipher suite */
+ { "client-sigalgs", bind_parse_client_sigalgs, 1 }, /* set SSL client signature algorithms */
+ { "crl-file", bind_parse_crl_file, 1 }, /* set certificate revocation list file use on client cert verify */
+ { "crt", bind_parse_crt, 1 }, /* load SSL certificates from this location */
+ { "crt-ignore-err", bind_parse_ignore_err, 1 }, /* set error IDs to ignore on verify depth == 0 */
+ { "crt-list", bind_parse_crt_list, 1 }, /* load a list of crt from this location */
+ { "curves", bind_parse_curves, 1 }, /* set SSL curve suite */
+ { "ecdhe", bind_parse_ecdhe, 1 }, /* defines named curve for elliptic curve Diffie-Hellman */
+ { "force-sslv3", bind_parse_tls_method_options, 0 }, /* force SSLv3 */
+ { "force-tlsv10", bind_parse_tls_method_options, 0 }, /* force TLSv10 */
+ { "force-tlsv11", bind_parse_tls_method_options, 0 }, /* force TLSv11 */
+ { "force-tlsv12", bind_parse_tls_method_options, 0 }, /* force TLSv12 */
+ { "force-tlsv13", bind_parse_tls_method_options, 0 }, /* force TLSv13 */
+ { "generate-certificates", bind_parse_generate_certs, 0 }, /* enable the server certificates generation */
+ { "no-alpn", bind_parse_no_alpn, 0 }, /* disable sending ALPN */
+ { "no-ca-names", bind_parse_no_ca_names, 0 }, /* do not send ca names to clients (ca_file related) */
+ { "no-sslv3", bind_parse_tls_method_options, 0 }, /* disable SSLv3 */
+ { "no-tlsv10", bind_parse_tls_method_options, 0 }, /* disable TLSv10 */
+ { "no-tlsv11", bind_parse_tls_method_options, 0 }, /* disable TLSv11 */
+ { "no-tlsv12", bind_parse_tls_method_options, 0 }, /* disable TLSv12 */
+ { "no-tlsv13", bind_parse_tls_method_options, 0 }, /* disable TLSv13 */
+ { "no-tls-tickets", bind_parse_no_tls_tickets, 0 }, /* disable session resumption tickets */
+ { "sigalgs", bind_parse_sigalgs, 1 }, /* set SSL signature algorithms */
+ { "ssl", bind_parse_ssl, 0 }, /* enable SSL processing */
+ { "ssl-min-ver", bind_parse_tls_method_minmax, 1 }, /* minimum version */
+ { "ssl-max-ver", bind_parse_tls_method_minmax, 1 }, /* maximum version */
+ { "strict-sni", bind_parse_strict_sni, 0 }, /* refuse negotiation if sni doesn't match a certificate */
+ { "tls-ticket-keys", bind_parse_tls_ticket_keys, 1 }, /* set file to load TLS ticket keys from */
+ { "verify", bind_parse_verify, 1 }, /* set SSL verify method */
+ { "npn", bind_parse_npn, 1 }, /* set NPN supported protocols */
+ { "prefer-client-ciphers", bind_parse_pcc, 0 }, /* prefer client ciphers */
+ { NULL, NULL, 0 },
+}};
+
+INITCALL1(STG_REGISTER, bind_register_keywords, &bind_kws);
+
+/* Note: must not be declared <const> as its list will be overwritten.
+ * Please take care of keeping this list alphabetically sorted, doing so helps
+ * all code contributors.
+ * Optional keywords are also declared with a NULL ->parse() function so that
+ * the config parser can report an appropriate error when a known keyword was
+ * not enabled.
+ */
+static struct srv_kw_list srv_kws = { "SSL", { }, {
+ { "allow-0rtt", srv_parse_allow_0rtt, 0, 1, 1 }, /* Allow using early data on this server */
+ { "alpn", srv_parse_alpn, 1, 1, 1 }, /* Set ALPN supported protocols */
+ { "ca-file", srv_parse_ca_file, 1, 1, 1 }, /* set CAfile to process verify server cert */
+ { "check-alpn", srv_parse_check_alpn, 1, 1, 1 }, /* Set ALPN used for checks */
+ { "check-sni", srv_parse_check_sni, 1, 1, 1 }, /* set SNI */
+ { "check-ssl", srv_parse_check_ssl, 0, 1, 1 }, /* enable SSL for health checks */
+ { "ciphers", srv_parse_ciphers, 1, 1, 1 }, /* select the cipher suite */
+ { "ciphersuites", srv_parse_ciphersuites, 1, 1, 1 }, /* select the cipher suite */
+ { "client-sigalgs", srv_parse_client_sigalgs, 1, 1, 1 }, /* signature algorithms */
+ { "crl-file", srv_parse_crl_file, 1, 1, 1 }, /* set certificate revocation list file use on server cert verify */
+ { "curves", srv_parse_curves, 1, 1, 1 }, /* set TLS curves list */
+ { "crt", srv_parse_crt, 1, 1, 1 }, /* set client certificate */
+ { "force-sslv3", srv_parse_tls_method_options, 0, 1, 1 }, /* force SSLv3 */
+ { "force-tlsv10", srv_parse_tls_method_options, 0, 1, 1 }, /* force TLSv10 */
+ { "force-tlsv11", srv_parse_tls_method_options, 0, 1, 1 }, /* force TLSv11 */
+ { "force-tlsv12", srv_parse_tls_method_options, 0, 1, 1 }, /* force TLSv12 */
+ { "force-tlsv13", srv_parse_tls_method_options, 0, 1, 1 }, /* force TLSv13 */
+ { "no-check-ssl", srv_parse_no_check_ssl, 0, 1, 0 }, /* disable SSL for health checks */
+ { "no-send-proxy-v2-ssl", srv_parse_no_send_proxy_ssl, 0, 1, 0 }, /* do not send PROXY protocol header v2 with SSL info */
+ { "no-send-proxy-v2-ssl-cn", srv_parse_no_send_proxy_cn, 0, 1, 0 }, /* do not send PROXY protocol header v2 with CN */
+ { "no-ssl", srv_parse_no_ssl, 0, 1, 0 }, /* disable SSL processing */
+ { "no-ssl-reuse", srv_parse_no_ssl_reuse, 0, 1, 1 }, /* disable session reuse */
+ { "no-sslv3", srv_parse_tls_method_options, 0, 0, 1 }, /* disable SSLv3 */
+ { "no-tlsv10", srv_parse_tls_method_options, 0, 0, 1 }, /* disable TLSv10 */
+ { "no-tlsv11", srv_parse_tls_method_options, 0, 0, 1 }, /* disable TLSv11 */
+ { "no-tlsv12", srv_parse_tls_method_options, 0, 0, 1 }, /* disable TLSv12 */
+ { "no-tlsv13", srv_parse_tls_method_options, 0, 0, 1 }, /* disable TLSv13 */
+ { "no-tls-tickets", srv_parse_no_tls_tickets, 0, 1, 1 }, /* disable session resumption tickets */
+ { "npn", srv_parse_npn, 1, 1, 1 }, /* Set NPN supported protocols */
+ { "send-proxy-v2-ssl", srv_parse_send_proxy_ssl, 0, 1, 1 }, /* send PROXY protocol header v2 with SSL info */
+ { "send-proxy-v2-ssl-cn", srv_parse_send_proxy_cn, 0, 1, 1 }, /* send PROXY protocol header v2 with CN */
+ { "sigalgs", srv_parse_sigalgs, 1, 1, 1 }, /* signature algorithms */
+ { "sni", srv_parse_sni, 1, 1, 1 }, /* send SNI extension */
+ { "ssl", srv_parse_ssl, 0, 1, 1 }, /* enable SSL processing */
+ { "ssl-min-ver", srv_parse_tls_method_minmax, 1, 1, 1 }, /* minimum version */
+ { "ssl-max-ver", srv_parse_tls_method_minmax, 1, 1, 1 }, /* maximum version */
+ { "ssl-reuse", srv_parse_ssl_reuse, 0, 1, 0 }, /* enable session reuse */
+ { "tls-tickets", srv_parse_tls_tickets, 0, 1, 1 }, /* enable session resumption tickets */
+ { "verify", srv_parse_verify, 1, 1, 1 }, /* set SSL verify method */
+ { "verifyhost", srv_parse_verifyhost, 1, 1, 1 }, /* require that SSL cert verifies for hostname */
+ { NULL, NULL, 0, 0 },
+}};
+
+INITCALL1(STG_REGISTER, srv_register_keywords, &srv_kws);
+
+static struct cfg_kw_list cfg_kws = {ILH, {
+ { CFG_GLOBAL, "ca-base", ssl_parse_global_ca_crt_base },
+ { CFG_GLOBAL, "crt-base", ssl_parse_global_ca_crt_base },
+ { CFG_GLOBAL, "issuers-chain-path", ssl_load_global_issuers_from_path },
+ { CFG_GLOBAL, "maxsslconn", ssl_parse_global_int },
+ { CFG_GLOBAL, "ssl-default-bind-options", ssl_parse_default_bind_options },
+ { CFG_GLOBAL, "ssl-default-server-options", ssl_parse_default_server_options },
+#ifndef OPENSSL_NO_DH
+ { CFG_GLOBAL, "ssl-dh-param-file", ssl_parse_global_dh_param_file },
+#endif
+ { CFG_GLOBAL, "ssl-mode-async", ssl_parse_global_ssl_async },
+#if defined(USE_ENGINE) && !defined(OPENSSL_NO_ENGINE)
+ { CFG_GLOBAL, "ssl-engine", ssl_parse_global_ssl_engine },
+#endif
+#ifdef HAVE_SSL_PROVIDERS
+ { CFG_GLOBAL, "ssl-propquery", ssl_parse_global_ssl_propquery },
+ { CFG_GLOBAL, "ssl-provider", ssl_parse_global_ssl_provider },
+ { CFG_GLOBAL, "ssl-provider-path", ssl_parse_global_ssl_provider_path },
+#endif
+ { CFG_GLOBAL, "ssl-skip-self-issued-ca", ssl_parse_skip_self_issued_ca },
+ { CFG_GLOBAL, "tune.ssl.cachesize", ssl_parse_global_int },
+#ifndef OPENSSL_NO_DH
+ { CFG_GLOBAL, "tune.ssl.default-dh-param", ssl_parse_global_default_dh },
+#endif
+ { CFG_GLOBAL, "tune.ssl.force-private-cache", ssl_parse_global_private_cache },
+ { CFG_GLOBAL, "tune.ssl.lifetime", ssl_parse_global_lifetime },
+ { CFG_GLOBAL, "tune.ssl.maxrecord", ssl_parse_global_int },
+ { CFG_GLOBAL, "tune.ssl.hard-maxrecord", ssl_parse_global_int },
+ { CFG_GLOBAL, "tune.ssl.ssl-ctx-cache-size", ssl_parse_global_int },
+ { CFG_GLOBAL, "tune.ssl.capture-cipherlist-size", ssl_parse_global_capture_buffer },
+ { CFG_GLOBAL, "tune.ssl.capture-buffer-size", ssl_parse_global_capture_buffer },
+ { CFG_GLOBAL, "tune.ssl.keylog", ssl_parse_global_keylog },
+ { CFG_GLOBAL, "ssl-default-bind-ciphers", ssl_parse_global_ciphers },
+ { CFG_GLOBAL, "ssl-default-server-ciphers", ssl_parse_global_ciphers },
+#if defined(SSL_CTX_set1_curves_list)
+ { CFG_GLOBAL, "ssl-default-bind-curves", ssl_parse_global_curves },
+ { CFG_GLOBAL, "ssl-default-server-curves", ssl_parse_global_curves },
+#endif
+#if defined(SSL_CTX_set1_sigalgs_list)
+ { CFG_GLOBAL, "ssl-default-bind-sigalgs", ssl_parse_global_sigalgs },
+ { CFG_GLOBAL, "ssl-default-server-sigalgs", ssl_parse_global_sigalgs },
+#endif
+#if defined(SSL_CTX_set1_client_sigalgs_list)
+ { CFG_GLOBAL, "ssl-default-bind-client-sigalgs", ssl_parse_global_client_sigalgs },
+ { CFG_GLOBAL, "ssl-default-server-client-sigalgs", ssl_parse_global_client_sigalgs },
+#endif
+ { CFG_GLOBAL, "ssl-default-bind-ciphersuites", ssl_parse_global_ciphersuites },
+ { CFG_GLOBAL, "ssl-default-server-ciphersuites", ssl_parse_global_ciphersuites },
+ { CFG_GLOBAL, "ssl-load-extra-files", ssl_parse_global_extra_files },
+ { CFG_GLOBAL, "ssl-load-extra-del-ext", ssl_parse_global_extra_noext },
+#ifndef OPENSSL_NO_OCSP
+ { CFG_GLOBAL, "tune.ssl.ocsp-update.maxdelay", ssl_parse_global_ocsp_maxdelay },
+ { CFG_GLOBAL, "tune.ssl.ocsp-update.mindelay", ssl_parse_global_ocsp_mindelay },
+#endif
+ { 0, NULL, NULL },
+}};
+
+INITCALL1(STG_REGISTER, cfg_register_keywords, &cfg_kws);
diff --git a/src/cfgparse-tcp.c b/src/cfgparse-tcp.c
new file mode 100644
index 0000000..a4f6f29
--- /dev/null
+++ b/src/cfgparse-tcp.c
@@ -0,0 +1,275 @@
+/*
+ * Configuration parsing for TCP (bind and server keywords)
+ *
+ * Copyright 2000-2020 Willy Tarreau <w@1wt.eu>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <ctype.h>
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <time.h>
+
+#include <sys/param.h>
+#include <sys/socket.h>
+#include <sys/types.h>
+
+#include <netinet/tcp.h>
+#include <netinet/in.h>
+
+#include <haproxy/api.h>
+#include <haproxy/arg.h>
+#include <haproxy/errors.h>
+#include <haproxy/list.h>
+#include <haproxy/listener.h>
+#include <haproxy/namespace.h>
+#include <haproxy/proxy-t.h>
+#include <haproxy/server.h>
+#include <haproxy/tools.h>
+
+
+#ifdef IPV6_V6ONLY
+/* parse the "v4v6" bind keyword */
+static int bind_parse_v4v6(char **args, int cur_arg, struct proxy *px, struct bind_conf *conf, char **err)
+{
+ conf->settings.options |= RX_O_V4V6;
+ return 0;
+}
+
+/* parse the "v6only" bind keyword */
+static int bind_parse_v6only(char **args, int cur_arg, struct proxy *px, struct bind_conf *conf, char **err)
+{
+ conf->settings.options |= RX_O_V6ONLY;
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_HAP_TRANSPARENT
+/* parse the "transparent" bind keyword */
+static int bind_parse_transparent(char **args, int cur_arg, struct proxy *px, struct bind_conf *conf, char **err)
+{
+ conf->settings.options |= RX_O_FOREIGN;
+ return 0;
+}
+#endif
+
+#if defined(TCP_DEFER_ACCEPT) || defined(SO_ACCEPTFILTER)
+/* parse the "defer-accept" bind keyword */
+static int bind_parse_defer_accept(char **args, int cur_arg, struct proxy *px, struct bind_conf *conf, char **err)
+{
+ conf->options |= BC_O_DEF_ACCEPT;
+ return 0;
+}
+#endif
+
+#ifdef TCP_FASTOPEN
+/* parse the "tfo" bind keyword */
+static int bind_parse_tfo(char **args, int cur_arg, struct proxy *px, struct bind_conf *conf, char **err)
+{
+ conf->options |= BC_O_TCP_FO;
+ return 0;
+}
+#endif
+
+#ifdef TCP_MAXSEG
+/* parse the "mss" bind keyword */
+static int bind_parse_mss(char **args, int cur_arg, struct proxy *px, struct bind_conf *conf, char **err)
+{
+ int mss;
+
+ if (!*args[cur_arg + 1]) {
+ memprintf(err, "'%s' : missing MSS value", args[cur_arg]);
+ return ERR_ALERT | ERR_FATAL;
+ }
+
+ mss = atoi(args[cur_arg + 1]);
+ if (!mss || abs(mss) > 65535) {
+ memprintf(err, "'%s' : expects an MSS with and absolute value between 1 and 65535", args[cur_arg]);
+ return ERR_ALERT | ERR_FATAL;
+ }
+
+ conf->maxseg = mss;
+ return 0;
+}
+#endif
+
+#ifdef TCP_USER_TIMEOUT
+/* parse the "tcp-ut" bind keyword */
+static int bind_parse_tcp_ut(char **args, int cur_arg, struct proxy *px, struct bind_conf *conf, char **err)
+{
+ const char *ptr = NULL;
+ unsigned int timeout;
+
+ if (!*args[cur_arg + 1]) {
+ memprintf(err, "'%s' : missing TCP User Timeout value", args[cur_arg]);
+ return ERR_ALERT | ERR_FATAL;
+ }
+
+ ptr = parse_time_err(args[cur_arg + 1], &timeout, TIME_UNIT_MS);
+ if (ptr == PARSE_TIME_OVER) {
+ memprintf(err, "timer overflow in argument '%s' to '%s' (maximum value is 2147483647 ms or ~24.8 days)",
+ args[cur_arg+1], args[cur_arg]);
+ return ERR_ALERT | ERR_FATAL;
+ }
+ else if (ptr == PARSE_TIME_UNDER) {
+ memprintf(err, "timer underflow in argument '%s' to '%s' (minimum non-null value is 1 ms)",
+ args[cur_arg+1], args[cur_arg]);
+ return ERR_ALERT | ERR_FATAL;
+ }
+ else if (ptr) {
+ memprintf(err, "'%s' : expects a positive delay in milliseconds", args[cur_arg]);
+ return ERR_ALERT | ERR_FATAL;
+ }
+
+ conf->tcp_ut = timeout;
+ return 0;
+}
+#endif
+
+#ifdef SO_BINDTODEVICE
+/* parse the "interface" bind keyword */
+static int bind_parse_interface(char **args, int cur_arg, struct proxy *px, struct bind_conf *conf, char **err)
+{
+ if (!*args[cur_arg + 1]) {
+ memprintf(err, "'%s' : missing interface name", args[cur_arg]);
+ return ERR_ALERT | ERR_FATAL;
+ }
+
+ ha_free(&conf->settings.interface);
+ conf->settings.interface = strdup(args[cur_arg + 1]);
+ return 0;
+}
+#endif
+
+#ifdef USE_NS
+/* parse the "namespace" bind keyword */
+static int bind_parse_namespace(char **args, int cur_arg, struct proxy *px, struct bind_conf *conf, char **err)
+{
+ char *namespace = NULL;
+
+ if (!*args[cur_arg + 1]) {
+ memprintf(err, "'%s' : missing namespace id", args[cur_arg]);
+ return ERR_ALERT | ERR_FATAL;
+ }
+ namespace = args[cur_arg + 1];
+
+ conf->settings.netns = netns_store_lookup(namespace, strlen(namespace));
+
+ if (conf->settings.netns == NULL)
+ conf->settings.netns = netns_store_insert(namespace);
+
+ if (conf->settings.netns == NULL) {
+ ha_alert("Cannot open namespace '%s'.\n", args[cur_arg + 1]);
+ return ERR_ALERT | ERR_FATAL;
+ }
+ return 0;
+}
+#endif
+
+#ifdef TCP_USER_TIMEOUT
+/* parse the "tcp-ut" server keyword */
+static int srv_parse_tcp_ut(char **args, int *cur_arg, struct proxy *px, struct server *newsrv, char **err)
+{
+ const char *ptr = NULL;
+ unsigned int timeout;
+
+ if (!*args[*cur_arg + 1]) {
+ memprintf(err, "'%s' : missing TCP User Timeout value", args[*cur_arg]);
+ return ERR_ALERT | ERR_FATAL;
+ }
+
+ ptr = parse_time_err(args[*cur_arg + 1], &timeout, TIME_UNIT_MS);
+ if (ptr == PARSE_TIME_OVER) {
+ memprintf(err, "timer overflow in argument '%s' to '%s' (maximum value is 2147483647 ms or ~24.8 days)",
+ args[*cur_arg+1], args[*cur_arg]);
+ return ERR_ALERT | ERR_FATAL;
+ }
+ else if (ptr == PARSE_TIME_UNDER) {
+ memprintf(err, "timer underflow in argument '%s' to '%s' (minimum non-null value is 1 ms)",
+ args[*cur_arg+1], args[*cur_arg]);
+ return ERR_ALERT | ERR_FATAL;
+ }
+ else if (ptr) {
+ memprintf(err, "'%s' : expects a positive delay in milliseconds", args[*cur_arg]);
+ return ERR_ALERT | ERR_FATAL;
+ }
+
+ if (newsrv->addr.ss_family == AF_INET || newsrv->addr.ss_family == AF_INET6)
+ newsrv->tcp_ut = timeout;
+
+ return 0;
+}
+#endif
+
+
+/************************************************************************/
+/* All supported bind keywords must be declared here. */
+/************************************************************************/
+
+/* Note: must not be declared <const> as its list will be overwritten.
+ * Please take care of keeping this list alphabetically sorted, doing so helps
+ * all code contributors.
+ * Optional keywords are also declared with a NULL ->parse() function so that
+ * the config parser can report an appropriate error when a known keyword was
+ * not enabled.
+ */
+static struct bind_kw_list bind_kws = { "TCP", { }, {
+#if defined(TCP_DEFER_ACCEPT) || defined(SO_ACCEPTFILTER)
+ { "defer-accept", bind_parse_defer_accept, 0 }, /* wait for some data for 1 second max before doing accept */
+#endif
+#ifdef SO_BINDTODEVICE
+ { "interface", bind_parse_interface, 1 }, /* specifically bind to this interface */
+#endif
+#ifdef TCP_MAXSEG
+ { "mss", bind_parse_mss, 1 }, /* set MSS of listening socket */
+#endif
+#ifdef TCP_USER_TIMEOUT
+ { "tcp-ut", bind_parse_tcp_ut, 1 }, /* set User Timeout on listening socket */
+#endif
+#ifdef TCP_FASTOPEN
+ { "tfo", bind_parse_tfo, 0 }, /* enable TCP_FASTOPEN of listening socket */
+#endif
+#ifdef CONFIG_HAP_TRANSPARENT
+ { "transparent", bind_parse_transparent, 0 }, /* transparently bind to the specified addresses */
+#endif
+#ifdef IPV6_V6ONLY
+ { "v4v6", bind_parse_v4v6, 0 }, /* force socket to bind to IPv4+IPv6 */
+ { "v6only", bind_parse_v6only, 0 }, /* force socket to bind to IPv6 only */
+#endif
+#ifdef USE_NS
+ { "namespace", bind_parse_namespace, 1 },
+#endif
+ /* the versions with the NULL parse function*/
+ { "defer-accept", NULL, 0 },
+ { "interface", NULL, 1 },
+ { "mss", NULL, 1 },
+ { "transparent", NULL, 0 },
+ { "v4v6", NULL, 0 },
+ { "v6only", NULL, 0 },
+ { NULL, NULL, 0 },
+}};
+
+INITCALL1(STG_REGISTER, bind_register_keywords, &bind_kws);
+
+static struct srv_kw_list srv_kws = { "TCP", { }, {
+#ifdef TCP_USER_TIMEOUT
+ { "tcp-ut", srv_parse_tcp_ut, 1, 1, 0 }, /* set TCP user timeout on server */
+#endif
+ { NULL, NULL, 0 },
+}};
+
+INITCALL1(STG_REGISTER, srv_register_keywords, &srv_kws);
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/src/cfgparse-unix.c b/src/cfgparse-unix.c
new file mode 100644
index 0000000..b1fb1e2
--- /dev/null
+++ b/src/cfgparse-unix.c
@@ -0,0 +1,135 @@
+/*
+ * Configuration parsing for UNIX sockets (bind and server keywords)
+ *
+ * Copyright 2000-2020 Willy Tarreau <w@1wt.eu>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <ctype.h>
+#include <errno.h>
+#include <grp.h>
+#include <pwd.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <time.h>
+
+#include <sys/param.h>
+#include <sys/socket.h>
+#include <sys/types.h>
+#include <sys/un.h>
+
+#include <netinet/tcp.h>
+#include <netinet/in.h>
+
+#include <haproxy/api.h>
+#include <haproxy/arg.h>
+#include <haproxy/errors.h>
+#include <haproxy/list.h>
+#include <haproxy/listener.h>
+#include <haproxy/namespace.h>
+#include <haproxy/proxy-t.h>
+#include <haproxy/server.h>
+#include <haproxy/tools.h>
+
+/* parse the "mode" bind keyword */
+static int bind_parse_mode(char **args, int cur_arg, struct proxy *px, struct bind_conf *conf, char **err)
+{
+ char *endptr;
+
+ conf->settings.ux.mode = strtol(args[cur_arg + 1], &endptr, 8);
+
+ if (!*args[cur_arg + 1] || *endptr) {
+ memprintf(err, "'%s' : missing or invalid mode '%s' (octal integer expected)", args[cur_arg], args[cur_arg + 1]);
+ return ERR_ALERT | ERR_FATAL;
+ }
+
+ return 0;
+}
+
+/* parse the "gid" bind keyword */
+static int bind_parse_gid(char **args, int cur_arg, struct proxy *px, struct bind_conf *conf, char **err)
+{
+ if (!*args[cur_arg + 1]) {
+ memprintf(err, "'%s' : missing value", args[cur_arg]);
+ return ERR_ALERT | ERR_FATAL;
+ }
+
+ conf->settings.ux.gid = atol(args[cur_arg + 1]);
+ return 0;
+}
+
+/* parse the "group" bind keyword */
+static int bind_parse_group(char **args, int cur_arg, struct proxy *px, struct bind_conf *conf, char **err)
+{
+ struct group *group;
+
+ if (!*args[cur_arg + 1]) {
+ memprintf(err, "'%s' : missing group name", args[cur_arg]);
+ return ERR_ALERT | ERR_FATAL;
+ }
+
+ group = getgrnam(args[cur_arg + 1]);
+ if (!group) {
+ memprintf(err, "'%s' : unknown group name '%s'", args[cur_arg], args[cur_arg + 1]);
+ return ERR_ALERT | ERR_FATAL;
+ }
+
+ conf->settings.ux.gid = group->gr_gid;
+ return 0;
+}
+
+/* parse the "uid" bind keyword */
+static int bind_parse_uid(char **args, int cur_arg, struct proxy *px, struct bind_conf *conf, char **err)
+{
+ if (!*args[cur_arg + 1]) {
+ memprintf(err, "'%s' : missing value", args[cur_arg]);
+ return ERR_ALERT | ERR_FATAL;
+ }
+
+ conf->settings.ux.uid = atol(args[cur_arg + 1]);
+ return 0;
+}
+
+/* parse the "user" bind keyword */
+static int bind_parse_user(char **args, int cur_arg, struct proxy *px, struct bind_conf *conf, char **err)
+{
+ struct passwd *user;
+
+ if (!*args[cur_arg + 1]) {
+ memprintf(err, "'%s' : missing user name", args[cur_arg]);
+ return ERR_ALERT | ERR_FATAL;
+ }
+
+ user = getpwnam(args[cur_arg + 1]);
+ if (!user) {
+ memprintf(err, "'%s' : unknown user name '%s'", args[cur_arg], args[cur_arg + 1]);
+ return ERR_ALERT | ERR_FATAL;
+ }
+
+ conf->settings.ux.uid = user->pw_uid;
+ return 0;
+}
+
+/* Note: must not be declared <const> as its list will be overwritten.
+ * Please take care of keeping this list alphabetically sorted, doing so helps
+ * all code contributors.
+ * Optional keywords are also declared with a NULL ->parse() function so that
+ * the config parser can report an appropriate error when a known keyword was
+ * not enabled.
+ */
+static struct bind_kw_list bind_kws = { "UNIX", { }, {
+ { "gid", bind_parse_gid, 1 }, /* set the socket's gid */
+ { "group", bind_parse_group, 1 }, /* set the socket's gid from the group name */
+ { "mode", bind_parse_mode, 1 }, /* set the socket's mode (eg: 0644)*/
+ { "uid", bind_parse_uid, 1 }, /* set the socket's uid */
+ { "user", bind_parse_user, 1 }, /* set the socket's uid from the user name */
+ { NULL, NULL, 0 },
+}};
+
+INITCALL1(STG_REGISTER, bind_register_keywords, &bind_kws);
diff --git a/src/cfgparse.c b/src/cfgparse.c
new file mode 100644
index 0000000..2744f97
--- /dev/null
+++ b/src/cfgparse.c
@@ -0,0 +1,4798 @@
+/*
+ * Configuration parser
+ *
+ * Copyright 2000-2011 Willy Tarreau <w@1wt.eu>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+/* This is to have crypt() and sched_setaffinity() defined on Linux */
+#define _GNU_SOURCE
+
+#ifdef USE_LIBCRYPT
+#ifdef USE_CRYPT_H
+/* some platforms such as Solaris need this */
+#include <crypt.h>
+#endif
+#endif /* USE_LIBCRYPT */
+
+#include <dirent.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <netdb.h>
+#include <ctype.h>
+#include <pwd.h>
+#include <grp.h>
+#include <errno.h>
+#ifdef USE_CPU_AFFINITY
+#include <sched.h>
+#endif
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <unistd.h>
+
+#include <haproxy/acl.h>
+#include <haproxy/action.h>
+#include <haproxy/api.h>
+#include <haproxy/arg.h>
+#include <haproxy/auth.h>
+#include <haproxy/backend.h>
+#include <haproxy/capture.h>
+#include <haproxy/cfgcond.h>
+#include <haproxy/cfgparse.h>
+#include <haproxy/channel.h>
+#include <haproxy/check.h>
+#include <haproxy/chunk.h>
+#include <haproxy/clock.h>
+#ifdef USE_CPU_AFFINITY
+#include <haproxy/cpuset.h>
+#endif
+#include <haproxy/connection.h>
+#include <haproxy/errors.h>
+#include <haproxy/filters.h>
+#include <haproxy/frontend.h>
+#include <haproxy/global.h>
+#include <haproxy/http_ana.h>
+#include <haproxy/http_rules.h>
+#include <haproxy/lb_chash.h>
+#include <haproxy/lb_fas.h>
+#include <haproxy/lb_fwlc.h>
+#include <haproxy/lb_fwrr.h>
+#include <haproxy/lb_map.h>
+#include <haproxy/listener.h>
+#include <haproxy/log.h>
+#include <haproxy/sink.h>
+#include <haproxy/mailers.h>
+#include <haproxy/namespace.h>
+#include <haproxy/quic_sock.h>
+#include <haproxy/obj_type-t.h>
+#include <haproxy/openssl-compat.h>
+#include <haproxy/peers-t.h>
+#include <haproxy/peers.h>
+#include <haproxy/pool.h>
+#include <haproxy/protocol.h>
+#include <haproxy/proxy.h>
+#include <haproxy/resolvers.h>
+#include <haproxy/sample.h>
+#include <haproxy/server.h>
+#include <haproxy/session.h>
+#include <haproxy/stats-t.h>
+#include <haproxy/stick_table.h>
+#include <haproxy/stream.h>
+#include <haproxy/task.h>
+#include <haproxy/tcp_rules.h>
+#include <haproxy/tcpcheck.h>
+#include <haproxy/thread.h>
+#include <haproxy/tools.h>
+#include <haproxy/uri_auth-t.h>
+
+
+/* Used to chain configuration sections definitions. This list
+ * stores struct cfg_section
+ */
+struct list sections = LIST_HEAD_INIT(sections);
+
+struct list postparsers = LIST_HEAD_INIT(postparsers);
+
+extern struct proxy *mworker_proxy;
+
+/* curproxy is only valid during parsing and will be NULL afterwards. */
+struct proxy *curproxy = NULL;
+
+char *cursection = NULL;
+int cfg_maxpconn = 0; /* # of simultaneous connections per proxy (-N) */
+int cfg_maxconn = 0; /* # of simultaneous connections, (-n) */
+char *cfg_scope = NULL; /* the current scope during the configuration parsing */
+int non_global_section_parsed = 0;
+
+/* how to handle default paths */
+static enum default_path_mode {
+ DEFAULT_PATH_CURRENT = 0, /* "current": paths are relative to CWD (this is the default) */
+ DEFAULT_PATH_CONFIG, /* "config": paths are relative to config file */
+ DEFAULT_PATH_PARENT, /* "parent": paths are relative to config file's ".." */
+ DEFAULT_PATH_ORIGIN, /* "origin": paths are relative to default_path_origin */
+} default_path_mode;
+
+static char initial_cwd[PATH_MAX];
+static char current_cwd[PATH_MAX];
+
+/* List head of all known configuration keywords */
+struct cfg_kw_list cfg_keywords = {
+ .list = LIST_HEAD_INIT(cfg_keywords.list)
+};
+
+/*
+ * converts <str> to a list of listeners which are dynamically allocated.
+ * The format is "{addr|'*'}:port[-end][,{addr|'*'}:port[-end]]*", where :
+ * - <addr> can be empty or "*" to indicate INADDR_ANY ;
+ * - <port> is a numerical port from 1 to 65535 ;
+ * - <end> indicates to use the range from <port> to <end> instead (inclusive).
+ * This can be repeated as many times as necessary, separated by a coma.
+ * Function returns 1 for success or 0 if error. In case of errors, if <err> is
+ * not NULL, it must be a valid pointer to either NULL or a freeable area that
+ * will be replaced with an error message.
+ */
+int str2listener(char *str, struct proxy *curproxy, struct bind_conf *bind_conf, const char *file, int line, char **err)
+{
+ struct protocol *proto;
+ char *next, *dupstr;
+ int port, end;
+
+ next = dupstr = strdup(str);
+
+ while (next && *next) {
+ struct sockaddr_storage *ss2;
+ int fd = -1;
+
+ str = next;
+ /* 1) look for the end of the first address */
+ if ((next = strchr(str, ',')) != NULL) {
+ *next++ = 0;
+ }
+
+ ss2 = str2sa_range(str, NULL, &port, &end, &fd, &proto, NULL, err,
+ (curproxy == global.cli_fe || curproxy == mworker_proxy) ? NULL : global.unix_bind.prefix,
+ NULL, PA_O_RESOLVE | PA_O_PORT_OK | PA_O_PORT_MAND | PA_O_PORT_RANGE |
+ PA_O_SOCKET_FD | PA_O_STREAM | PA_O_XPRT);
+ if (!ss2)
+ goto fail;
+
+ if (ss2->ss_family == AF_CUST_RHTTP_SRV) {
+ /* Check if a previous non reverse HTTP present is
+ * already defined. If DGRAM or STREAM is set, this
+ * indicates that we are currently parsing the second
+ * or more address.
+ */
+ if (bind_conf->options & (BC_O_USE_SOCK_DGRAM|BC_O_USE_SOCK_STREAM) &&
+ !(bind_conf->options & BC_O_REVERSE_HTTP)) {
+ memprintf(err, "Cannot mix reverse HTTP bind with others.\n");
+ goto fail;
+ }
+
+ bind_conf->rhttp_srvname = strdup(str + strlen("rhttp@"));
+ if (!bind_conf->rhttp_srvname) {
+ memprintf(err, "Cannot allocate reverse HTTP bind.\n");
+ goto fail;
+ }
+
+ bind_conf->options |= BC_O_REVERSE_HTTP;
+ }
+ else if (bind_conf->options & BC_O_REVERSE_HTTP) {
+ /* Standard address mixed with a previous reverse HTTP one. */
+ memprintf(err, "Cannot mix reverse HTTP bind with others.\n");
+ goto fail;
+ }
+
+ /* OK the address looks correct */
+ if (proto->proto_type == PROTO_TYPE_DGRAM)
+ bind_conf->options |= BC_O_USE_SOCK_DGRAM;
+ else
+ bind_conf->options |= BC_O_USE_SOCK_STREAM;
+
+ if (proto->xprt_type == PROTO_TYPE_DGRAM)
+ bind_conf->options |= BC_O_USE_XPRT_DGRAM;
+ else
+ bind_conf->options |= BC_O_USE_XPRT_STREAM;
+
+ if (!create_listeners(bind_conf, ss2, port, end, fd, proto, err)) {
+ memprintf(err, "%s for address '%s'.\n", *err, str);
+ goto fail;
+ }
+ } /* end while(next) */
+ free(dupstr);
+ return 1;
+ fail:
+ free(dupstr);
+ return 0;
+}
+
+/*
+ * converts <str> to a list of datagram-oriented listeners which are dynamically
+ * allocated.
+ * The format is "{addr|'*'}:port[-end][,{addr|'*'}:port[-end]]*", where :
+ * - <addr> can be empty or "*" to indicate INADDR_ANY ;
+ * - <port> is a numerical port from 1 to 65535 ;
+ * - <end> indicates to use the range from <port> to <end> instead (inclusive).
+ * This can be repeated as many times as necessary, separated by a coma.
+ * Function returns 1 for success or 0 if error. In case of errors, if <err> is
+ * not NULL, it must be a valid pointer to either NULL or a freeable area that
+ * will be replaced with an error message.
+ */
+int str2receiver(char *str, struct proxy *curproxy, struct bind_conf *bind_conf, const char *file, int line, char **err)
+{
+ struct protocol *proto;
+ char *next, *dupstr;
+ int port, end;
+
+ next = dupstr = strdup(str);
+
+ while (next && *next) {
+ struct sockaddr_storage *ss2;
+ int fd = -1;
+
+ str = next;
+ /* 1) look for the end of the first address */
+ if ((next = strchr(str, ',')) != NULL) {
+ *next++ = 0;
+ }
+
+ ss2 = str2sa_range(str, NULL, &port, &end, &fd, &proto, NULL, err,
+ curproxy == global.cli_fe ? NULL : global.unix_bind.prefix,
+ NULL, PA_O_RESOLVE | PA_O_PORT_OK | PA_O_PORT_MAND | PA_O_PORT_RANGE |
+ PA_O_SOCKET_FD | PA_O_DGRAM | PA_O_XPRT);
+ if (!ss2)
+ goto fail;
+
+ /* OK the address looks correct */
+ if (!create_listeners(bind_conf, ss2, port, end, fd, proto, err)) {
+ memprintf(err, "%s for address '%s'.\n", *err, str);
+ goto fail;
+ }
+ } /* end while(next) */
+ free(dupstr);
+ return 1;
+ fail:
+ free(dupstr);
+ return 0;
+}
+
+/*
+ * Sends a warning if proxy <proxy> does not have at least one of the
+ * capabilities in <cap>. An optional <hint> may be added at the end
+ * of the warning to help the user. Returns 1 if a warning was emitted
+ * or 0 if the condition is valid.
+ */
+int warnifnotcap(struct proxy *proxy, int cap, const char *file, int line, const char *arg, const char *hint)
+{
+ char *msg;
+
+ switch (cap) {
+ case PR_CAP_BE: msg = "no backend"; break;
+ case PR_CAP_FE: msg = "no frontend"; break;
+ case PR_CAP_BE|PR_CAP_FE: msg = "neither frontend nor backend"; break;
+ default: msg = "not enough"; break;
+ }
+
+ if (!(proxy->cap & cap)) {
+ ha_warning("parsing [%s:%d] : '%s' ignored because %s '%s' has %s capability.%s\n",
+ file, line, arg, proxy_type_str(proxy), proxy->id, msg, hint ? hint : "");
+ return 1;
+ }
+ return 0;
+}
+
+/*
+ * Sends an alert if proxy <proxy> does not have at least one of the
+ * capabilities in <cap>. An optional <hint> may be added at the end
+ * of the alert to help the user. Returns 1 if an alert was emitted
+ * or 0 if the condition is valid.
+ */
+int failifnotcap(struct proxy *proxy, int cap, const char *file, int line, const char *arg, const char *hint)
+{
+ char *msg;
+
+ switch (cap) {
+ case PR_CAP_BE: msg = "no backend"; break;
+ case PR_CAP_FE: msg = "no frontend"; break;
+ case PR_CAP_BE|PR_CAP_FE: msg = "neither frontend nor backend"; break;
+ default: msg = "not enough"; break;
+ }
+
+ if (!(proxy->cap & cap)) {
+ ha_alert("parsing [%s:%d] : '%s' not allowed because %s '%s' has %s capability.%s\n",
+ file, line, arg, proxy_type_str(proxy), proxy->id, msg, hint ? hint : "");
+ return 1;
+ }
+ return 0;
+}
+
+/*
+ * Report an error in <msg> when there are too many arguments. This version is
+ * intended to be used by keyword parsers so that the message will be included
+ * into the general error message. The index is the current keyword in args.
+ * Return 0 if the number of argument is correct, otherwise build a message and
+ * return 1. Fill err_code with an ERR_ALERT and an ERR_FATAL if not null. The
+ * message may also be null, it will simply not be produced (useful to check only).
+ * <msg> and <err_code> are only affected on error.
+ */
+int too_many_args_idx(int maxarg, int index, char **args, char **msg, int *err_code)
+{
+ int i;
+
+ if (!*args[index + maxarg + 1])
+ return 0;
+
+ if (msg) {
+ *msg = NULL;
+ memprintf(msg, "%s", args[0]);
+ for (i = 1; i <= index; i++)
+ memprintf(msg, "%s %s", *msg, args[i]);
+
+ memprintf(msg, "'%s' cannot handle unexpected argument '%s'.", *msg, args[index + maxarg + 1]);
+ }
+ if (err_code)
+ *err_code |= ERR_ALERT | ERR_FATAL;
+
+ return 1;
+}
+
+/*
+ * same as too_many_args_idx with a 0 index
+ */
+int too_many_args(int maxarg, char **args, char **msg, int *err_code)
+{
+ return too_many_args_idx(maxarg, 0, args, msg, err_code);
+}
+
+/*
+ * Report a fatal Alert when there is too much arguments
+ * The index is the current keyword in args
+ * Return 0 if the number of argument is correct, otherwise emit an alert and return 1
+ * Fill err_code with an ERR_ALERT and an ERR_FATAL
+ */
+int alertif_too_many_args_idx(int maxarg, int index, const char *file, int linenum, char **args, int *err_code)
+{
+ char *kw = NULL;
+ int i;
+
+ if (!*args[index + maxarg + 1])
+ return 0;
+
+ memprintf(&kw, "%s", args[0]);
+ for (i = 1; i <= index; i++) {
+ memprintf(&kw, "%s %s", kw, args[i]);
+ }
+
+ ha_alert("parsing [%s:%d] : '%s' cannot handle unexpected argument '%s'.\n", file, linenum, kw, args[index + maxarg + 1]);
+ free(kw);
+ *err_code |= ERR_ALERT | ERR_FATAL;
+ return 1;
+}
+
+/*
+ * same as alertif_too_many_args_idx with a 0 index
+ */
+int alertif_too_many_args(int maxarg, const char *file, int linenum, char **args, int *err_code)
+{
+ return alertif_too_many_args_idx(maxarg, 0, file, linenum, args, err_code);
+}
+
+
+/* Report it if a request ACL condition uses some keywords that are incompatible
+ * with the place where the ACL is used. It returns either 0 or ERR_WARN so that
+ * its result can be or'ed with err_code. Note that <cond> may be NULL and then
+ * will be ignored.
+ */
+int warnif_cond_conflicts(const struct acl_cond *cond, unsigned int where, const char *file, int line)
+{
+ const struct acl *acl;
+ const char *kw;
+
+ if (!cond)
+ return 0;
+
+ acl = acl_cond_conflicts(cond, where);
+ if (acl) {
+ if (acl->name && *acl->name)
+ ha_warning("parsing [%s:%d] : acl '%s' will never match because it only involves keywords that are incompatible with '%s'\n",
+ file, line, acl->name, sample_ckp_names(where));
+ else
+ ha_warning("parsing [%s:%d] : anonymous acl will never match because it uses keyword '%s' which is incompatible with '%s'\n",
+ file, line, LIST_ELEM(acl->expr.n, struct acl_expr *, list)->kw, sample_ckp_names(where));
+ return ERR_WARN;
+ }
+ if (!acl_cond_kw_conflicts(cond, where, &acl, &kw))
+ return 0;
+
+ if (acl->name && *acl->name)
+ ha_warning("parsing [%s:%d] : acl '%s' involves keywords '%s' which is incompatible with '%s'\n",
+ file, line, acl->name, kw, sample_ckp_names(where));
+ else
+ ha_warning("parsing [%s:%d] : anonymous acl involves keyword '%s' which is incompatible with '%s'\n",
+ file, line, kw, sample_ckp_names(where));
+ return ERR_WARN;
+}
+
+/* Report it if an ACL uses a L6 sample fetch from an HTTP proxy. It returns
+ * either 0 or ERR_WARN so that its result can be or'ed with err_code. Note that
+ * <cond> may be NULL and then will be ignored.
+*/
+int warnif_tcp_http_cond(const struct proxy *px, const struct acl_cond *cond)
+{
+ if (!cond || px->mode != PR_MODE_HTTP)
+ return 0;
+
+ if (cond->use & (SMP_USE_L6REQ|SMP_USE_L6RES)) {
+ ha_warning("Proxy '%s': L6 sample fetches ignored on HTTP proxies (declared at %s:%d).\n",
+ px->id, cond->file, cond->line);
+ return ERR_WARN;
+ }
+ return 0;
+}
+
+/* try to find in <list> the word that looks closest to <word> by counting
+ * transitions between letters, digits and other characters. Will return the
+ * best matching word if found, otherwise NULL. An optional array of extra
+ * words to compare may be passed in <extra>, but it must then be terminated
+ * by a NULL entry. If unused it may be NULL.
+ */
+const char *cfg_find_best_match(const char *word, const struct list *list, int section, const char **extra)
+{
+ uint8_t word_sig[1024]; // 0..25=letter, 26=digit, 27=other, 28=begin, 29=end
+ uint8_t list_sig[1024];
+ const struct cfg_kw_list *kwl;
+ int index;
+ const char *best_ptr = NULL;
+ int dist, best_dist = INT_MAX;
+
+ make_word_fingerprint(word_sig, word);
+ list_for_each_entry(kwl, list, list) {
+ for (index = 0; kwl->kw[index].kw != NULL; index++) {
+ if (kwl->kw[index].section != section)
+ continue;
+
+ make_word_fingerprint(list_sig, kwl->kw[index].kw);
+ dist = word_fingerprint_distance(word_sig, list_sig);
+ if (dist < best_dist) {
+ best_dist = dist;
+ best_ptr = kwl->kw[index].kw;
+ }
+ }
+ }
+
+ while (extra && *extra) {
+ make_word_fingerprint(list_sig, *extra);
+ dist = word_fingerprint_distance(word_sig, list_sig);
+ if (dist < best_dist) {
+ best_dist = dist;
+ best_ptr = *extra;
+ }
+ extra++;
+ }
+
+ if (best_dist > 2 * strlen(word) || (best_ptr && best_dist > 2 * strlen(best_ptr)))
+ best_ptr = NULL;
+ return best_ptr;
+}
+
+/* Parse a string representing a process number or a set of processes. It must
+ * be "all", "odd", "even", a number between 1 and <max> or a range with
+ * two such numbers delimited by a dash ('-'). On success, it returns
+ * 0. otherwise it returns 1 with an error message in <err>.
+ *
+ * Note: this function can also be used to parse a thread number or a set of
+ * threads.
+ */
+int parse_process_number(const char *arg, unsigned long *proc, int max, int *autoinc, char **err)
+{
+ if (autoinc) {
+ *autoinc = 0;
+ if (strncmp(arg, "auto:", 5) == 0) {
+ arg += 5;
+ *autoinc = 1;
+ }
+ }
+
+ if (strcmp(arg, "all") == 0)
+ *proc |= ~0UL;
+ else if (strcmp(arg, "odd") == 0)
+ *proc |= ~0UL/3UL; /* 0x555....555 */
+ else if (strcmp(arg, "even") == 0)
+ *proc |= (~0UL/3UL) << 1; /* 0xAAA...AAA */
+ else {
+ const char *p, *dash = NULL;
+ unsigned int low, high;
+
+ for (p = arg; *p; p++) {
+ if (*p == '-' && !dash)
+ dash = p;
+ else if (!isdigit((unsigned char)*p)) {
+ memprintf(err, "'%s' is not a valid number/range.", arg);
+ return -1;
+ }
+ }
+
+ low = high = str2uic(arg);
+ if (dash)
+ high = ((!*(dash+1)) ? max : str2uic(dash + 1));
+
+ if (high < low) {
+ unsigned int swap = low;
+ low = high;
+ high = swap;
+ }
+
+ if (low < 1 || low > max || high > max) {
+ memprintf(err, "'%s' is not a valid number/range."
+ " It supports numbers from 1 to %d.\n",
+ arg, max);
+ return 1;
+ }
+
+ for (;low <= high; low++)
+ *proc |= 1UL << (low-1);
+ }
+ *proc &= ~0UL >> (LONGBITS - max);
+
+ return 0;
+}
+
+/* Allocate and initialize the frontend of a "peers" section found in
+ * file <file> at line <linenum> with <id> as ID.
+ * Return 0 if succeeded, -1 if not.
+ * Note that this function may be called from "default-server"
+ * or "peer" lines.
+ */
+static int init_peers_frontend(const char *file, int linenum,
+ const char *id, struct peers *peers)
+{
+ struct proxy *p;
+
+ if (peers->peers_fe) {
+ p = peers->peers_fe;
+ goto out;
+ }
+
+ p = calloc(1, sizeof *p);
+ if (!p) {
+ ha_alert("parsing [%s:%d] : out of memory.\n", file, linenum);
+ return -1;
+ }
+
+ init_new_proxy(p);
+ peers_setup_frontend(p);
+ p->parent = peers;
+ /* Finally store this frontend. */
+ peers->peers_fe = p;
+
+ out:
+ if (id && !p->id)
+ p->id = strdup(id);
+ free(p->conf.file);
+ p->conf.args.file = p->conf.file = strdup(file);
+ if (linenum != -1)
+ p->conf.args.line = p->conf.line = linenum;
+
+ return 0;
+}
+
+/* Only change ->file, ->line and ->arg struct bind_conf member values
+ * if already present.
+ */
+static struct bind_conf *bind_conf_uniq_alloc(struct proxy *p,
+ const char *file, int line,
+ const char *arg, struct xprt_ops *xprt)
+{
+ struct bind_conf *bind_conf;
+
+ if (!LIST_ISEMPTY(&p->conf.bind)) {
+ bind_conf = LIST_ELEM((&p->conf.bind)->n, typeof(bind_conf), by_fe);
+ /*
+ * We keep bind_conf->file and bind_conf->line unchanged
+ * to make them available for error messages
+ */
+ if (arg) {
+ free(bind_conf->arg);
+ bind_conf->arg = strdup(arg);
+ }
+ }
+ else {
+ bind_conf = bind_conf_alloc(p, file, line, arg, xprt);
+ }
+
+ return bind_conf;
+}
+
+/*
+ * Allocate a new struct peer parsed at line <linenum> in file <file>
+ * to be added to <peers>.
+ * Returns the new allocated structure if succeeded, NULL if not.
+ */
+static struct peer *cfg_peers_add_peer(struct peers *peers,
+ const char *file, int linenum,
+ const char *id, int local)
+{
+ struct peer *p;
+
+ p = calloc(1, sizeof *p);
+ if (!p) {
+ ha_alert("parsing [%s:%d] : out of memory.\n", file, linenum);
+ return NULL;
+ }
+
+ /* the peers are linked backwards first */
+ peers->count++;
+ p->peers = peers;
+ p->next = peers->remote;
+ peers->remote = p;
+ p->conf.file = strdup(file);
+ p->conf.line = linenum;
+ p->last_change = ns_to_sec(now_ns);
+ p->xprt = xprt_get(XPRT_RAW);
+ p->sock_init_arg = NULL;
+ HA_SPIN_INIT(&p->lock);
+ if (id)
+ p->id = strdup(id);
+ if (local) {
+ p->local = 1;
+ peers->local = p;
+ }
+
+ return p;
+}
+
+/*
+ * Parse a line in a <listen>, <frontend> or <backend> section.
+ * Returns the error code, 0 if OK, or any combination of :
+ * - ERR_ABORT: must abort ASAP
+ * - ERR_FATAL: we can continue parsing but not start the service
+ * - ERR_WARN: a warning has been emitted
+ * - ERR_ALERT: an alert has been emitted
+ * Only the two first ones can stop processing, the two others are just
+ * indicators.
+ */
+int cfg_parse_peers(const char *file, int linenum, char **args, int kwm)
+{
+ static struct peers *curpeers = NULL;
+ static int nb_shards = 0;
+ struct peer *newpeer = NULL;
+ const char *err;
+ struct bind_conf *bind_conf;
+ int err_code = 0;
+ char *errmsg = NULL;
+ static int bind_line, peer_line;
+
+ if (strcmp(args[0], "bind") == 0 || strcmp(args[0], "default-bind") == 0) {
+ int cur_arg;
+ struct bind_conf *bind_conf;
+ int ret;
+
+ cur_arg = 1;
+
+ if (init_peers_frontend(file, linenum, NULL, curpeers) != 0) {
+ err_code |= ERR_ALERT | ERR_ABORT;
+ goto out;
+ }
+
+ bind_conf = bind_conf_uniq_alloc(curpeers->peers_fe, file, linenum,
+ args[1], xprt_get(XPRT_RAW));
+ if (!bind_conf) {
+ ha_alert("parsing [%s:%d] : '%s %s' : cannot allocate memory.\n", file, linenum, args[0], args[1]);
+ err_code |= ERR_FATAL;
+ goto out;
+ }
+
+ bind_conf->maxaccept = 1;
+ bind_conf->accept = session_accept_fd;
+ bind_conf->options |= BC_O_UNLIMITED; /* don't make the peers subject to global limits */
+
+ if (*args[0] == 'b') {
+ struct listener *l;
+
+ if (peer_line) {
+ ha_alert("parsing [%s:%d] : mixing \"peer\" and \"bind\" line is forbidden\n", file, linenum);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+
+ if (!LIST_ISEMPTY(&bind_conf->listeners)) {
+ ha_alert("parsing [%s:%d] : One listener per \"peers\" section is authorized but another is already configured at [%s:%d].\n", file, linenum, bind_conf->file, bind_conf->line);
+ err_code |= ERR_FATAL;
+ }
+
+ if (!str2listener(args[1], curpeers->peers_fe, bind_conf, file, linenum, &errmsg)) {
+ if (errmsg && *errmsg) {
+ indent_msg(&errmsg, 2);
+ ha_alert("parsing [%s:%d] : '%s %s' : %s\n", file, linenum, args[0], args[1], errmsg);
+ }
+ else
+ ha_alert("parsing [%s:%d] : '%s %s' : error encountered while parsing listening address %s.\n",
+ file, linenum, args[0], args[1], args[1]);
+ err_code |= ERR_FATAL;
+ goto out;
+ }
+
+ /* Only one listener supported. Compare first listener
+ * against the last one. It must be the same one.
+ */
+ if (bind_conf->listeners.n != bind_conf->listeners.p) {
+ ha_alert("parsing [%s:%d] : Only one listener per \"peers\" section is authorized. Multiple listening addresses or port range are not supported.\n", file, linenum);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ /*
+ * Newly allocated listener is at the end of the list
+ */
+ l = LIST_ELEM(bind_conf->listeners.p, typeof(l), by_bind);
+
+ global.maxsock++; /* for the listening socket */
+
+ bind_line = 1;
+ if (cfg_peers->local) {
+ newpeer = cfg_peers->local;
+ }
+ else {
+ /* This peer is local.
+ * Note that we do not set the peer ID. This latter is initialized
+ * when parsing "peer" or "server" line.
+ */
+ newpeer = cfg_peers_add_peer(curpeers, file, linenum, NULL, 1);
+ if (!newpeer) {
+ err_code |= ERR_ALERT | ERR_ABORT;
+ goto out;
+ }
+ }
+ newpeer->addr = l->rx.addr;
+ newpeer->proto = l->rx.proto;
+ cur_arg++;
+ }
+
+ ret = bind_parse_args_list(bind_conf, args, cur_arg, cursection, file, linenum);
+ err_code |= ret;
+ if (ret != 0)
+ goto out;
+ }
+ else if (strcmp(args[0], "default-server") == 0) {
+ if (init_peers_frontend(file, -1, NULL, curpeers) != 0) {
+ err_code |= ERR_ALERT | ERR_ABORT;
+ goto out;
+ }
+ err_code |= parse_server(file, linenum, args, curpeers->peers_fe, NULL,
+ SRV_PARSE_DEFAULT_SERVER|SRV_PARSE_IN_PEER_SECTION|SRV_PARSE_INITIAL_RESOLVE);
+ }
+ else if (strcmp(args[0], "log") == 0) {
+ if (init_peers_frontend(file, linenum, NULL, curpeers) != 0) {
+ err_code |= ERR_ALERT | ERR_ABORT;
+ goto out;
+ }
+ if (!parse_logger(args, &curpeers->peers_fe->loggers, (kwm == KWM_NO), file, linenum, &errmsg)) {
+ ha_alert("parsing [%s:%d] : %s : %s\n", file, linenum, args[0], errmsg);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ }
+ else if (strcmp(args[0], "peers") == 0) { /* new peers section */
+ /* Initialize these static variables when entering a new "peers" section*/
+ bind_line = peer_line = 0;
+ if (!*args[1]) {
+ ha_alert("parsing [%s:%d] : missing name for peers section.\n", file, linenum);
+ err_code |= ERR_ALERT | ERR_ABORT;
+ goto out;
+ }
+
+ if (alertif_too_many_args(1, file, linenum, args, &err_code))
+ goto out;
+
+ err = invalid_char(args[1]);
+ if (err) {
+ ha_alert("parsing [%s:%d] : character '%c' is not permitted in '%s' name '%s'.\n",
+ file, linenum, *err, args[0], args[1]);
+ err_code |= ERR_ALERT | ERR_ABORT;
+ goto out;
+ }
+
+ for (curpeers = cfg_peers; curpeers != NULL; curpeers = curpeers->next) {
+ /*
+ * If there are two proxies with the same name only following
+ * combinations are allowed:
+ */
+ if (strcmp(curpeers->id, args[1]) == 0) {
+ ha_alert("Parsing [%s:%d]: peers section '%s' has the same name as another peers section declared at %s:%d.\n",
+ file, linenum, args[1], curpeers->conf.file, curpeers->conf.line);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ }
+ }
+
+ if ((curpeers = calloc(1, sizeof(*curpeers))) == NULL) {
+ ha_alert("parsing [%s:%d] : out of memory.\n", file, linenum);
+ err_code |= ERR_ALERT | ERR_ABORT;
+ goto out;
+ }
+
+ curpeers->next = cfg_peers;
+ cfg_peers = curpeers;
+ curpeers->conf.file = strdup(file);
+ curpeers->conf.line = linenum;
+ curpeers->last_change = ns_to_sec(now_ns);
+ curpeers->id = strdup(args[1]);
+ curpeers->disabled = 0;
+ }
+ else if (strcmp(args[0], "peer") == 0 ||
+ strcmp(args[0], "server") == 0) { /* peer or server definition */
+ int local_peer, peer;
+ int parse_addr = 0;
+
+ peer = *args[0] == 'p';
+ local_peer = strcmp(args[1], localpeer) == 0;
+ /* The local peer may have already partially been parsed on a "bind" line. */
+ if (*args[0] == 'p') {
+ if (bind_line) {
+ ha_alert("parsing [%s:%d] : mixing \"peer\" and \"bind\" line is forbidden\n", file, linenum);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ peer_line = 1;
+ }
+ if (cfg_peers->local && !cfg_peers->local->id && local_peer) {
+ /* The local peer has already been initialized on a "bind" line.
+ * Let's use it and store its ID.
+ */
+ newpeer = cfg_peers->local;
+ newpeer->id = strdup(localpeer);
+ }
+ else {
+ if (local_peer && cfg_peers->local) {
+ ha_alert("parsing [%s:%d] : '%s %s' : local peer name already referenced at %s:%d. %s\n",
+ file, linenum, args[0], args[1],
+ curpeers->peers_fe->conf.file, curpeers->peers_fe->conf.line, cfg_peers->local->id);
+ err_code |= ERR_FATAL;
+ goto out;
+ }
+ newpeer = cfg_peers_add_peer(curpeers, file, linenum, args[1], local_peer);
+ if (!newpeer) {
+ err_code |= ERR_ALERT | ERR_ABORT;
+ goto out;
+ }
+ }
+
+ /* Line number and peer ID are updated only if this peer is the local one. */
+ if (init_peers_frontend(file,
+ newpeer->local ? linenum: -1,
+ newpeer->local ? newpeer->id : NULL,
+ curpeers) != 0) {
+ err_code |= ERR_ALERT | ERR_ABORT;
+ goto out;
+ }
+
+ /* This initializes curpeer->peers->peers_fe->srv.
+ * The server address is parsed only if we are parsing a "peer" line,
+ * or if we are parsing a "server" line and the current peer is not the local one.
+ */
+ parse_addr = (peer || !local_peer) ? SRV_PARSE_PARSE_ADDR : 0;
+ err_code |= parse_server(file, linenum, args, curpeers->peers_fe, NULL,
+ SRV_PARSE_IN_PEER_SECTION|parse_addr|SRV_PARSE_INITIAL_RESOLVE);
+ if (!curpeers->peers_fe->srv) {
+ /* Remove the newly allocated peer. */
+ if (newpeer != curpeers->local) {
+ struct peer *p;
+
+ p = curpeers->remote;
+ curpeers->remote = curpeers->remote->next;
+ free(p->id);
+ free(p);
+ }
+ goto out;
+ }
+
+ if (nb_shards && curpeers->peers_fe->srv->shard > nb_shards) {
+ ha_warning("parsing [%s:%d] : '%s %s' : %d peer shard greater value than %d shards value is ignored.\n",
+ file, linenum, args[0], args[1], curpeers->peers_fe->srv->shard, nb_shards);
+ curpeers->peers_fe->srv->shard = 0;
+ err_code |= ERR_WARN;
+ }
+
+ if (curpeers->peers_fe->srv->init_addr_methods || curpeers->peers_fe->srv->resolvers_id ||
+ curpeers->peers_fe->srv->do_check || curpeers->peers_fe->srv->do_agent) {
+ ha_warning("parsing [%s:%d] : '%s %s' : init_addr, resolvers, check and agent are ignored for peers.\n", file, linenum, args[0], args[1]);
+ err_code |= ERR_WARN;
+ }
+
+ /* If the peer address has just been parsed, let's copy it to <newpeer>
+ * and initializes ->proto.
+ */
+ if (peer || !local_peer) {
+ newpeer->addr = curpeers->peers_fe->srv->addr;
+ newpeer->proto = protocol_lookup(newpeer->addr.ss_family, PROTO_TYPE_STREAM, 0);
+ }
+
+ newpeer->xprt = xprt_get(XPRT_RAW);
+ newpeer->sock_init_arg = NULL;
+ HA_SPIN_INIT(&newpeer->lock);
+
+ newpeer->srv = curpeers->peers_fe->srv;
+ if (!newpeer->local)
+ goto out;
+
+ /* The lines above are reserved to "peer" lines. */
+ if (*args[0] == 's')
+ goto out;
+
+ bind_conf = bind_conf_uniq_alloc(curpeers->peers_fe, file, linenum, args[2], xprt_get(XPRT_RAW));
+ if (!bind_conf) {
+ ha_alert("parsing [%s:%d] : '%s %s' : Cannot allocate memory.\n", file, linenum, args[0], args[1]);
+ err_code |= ERR_FATAL;
+ goto out;
+ }
+
+ bind_conf->maxaccept = 1;
+ bind_conf->accept = session_accept_fd;
+ bind_conf->options |= BC_O_UNLIMITED; /* don't make the peers subject to global limits */
+
+ if (!LIST_ISEMPTY(&bind_conf->listeners)) {
+ ha_alert("parsing [%s:%d] : One listener per \"peers\" section is authorized but another is already configured at [%s:%d].\n", file, linenum, bind_conf->file, bind_conf->line);
+ err_code |= ERR_FATAL;
+ }
+
+ if (!str2listener(args[2], curpeers->peers_fe, bind_conf, file, linenum, &errmsg)) {
+ if (errmsg && *errmsg) {
+ indent_msg(&errmsg, 2);
+ ha_alert("parsing [%s:%d] : '%s %s' : %s\n", file, linenum, args[0], args[1], errmsg);
+ }
+ else
+ ha_alert("parsing [%s:%d] : '%s %s' : error encountered while parsing listening address %s.\n",
+ file, linenum, args[0], args[1], args[2]);
+ err_code |= ERR_FATAL;
+ goto out;
+ }
+
+ global.maxsock++; /* for the listening socket */
+ }
+ else if (strcmp(args[0], "shards") == 0) {
+ char *endptr;
+
+ if (!*args[1]) {
+ ha_alert("parsing [%s:%d] : '%s' : missing value\n", file, linenum, args[0]);
+ err_code |= ERR_FATAL;
+ goto out;
+ }
+
+ curpeers->nb_shards = strtol(args[1], &endptr, 10);
+ if (*endptr != '\0') {
+ ha_alert("parsing [%s:%d] : '%s' : expects an integer argument, found '%s'\n",
+ file, linenum, args[0], args[1]);
+ err_code |= ERR_FATAL;
+ goto out;
+ }
+
+ if (!curpeers->nb_shards) {
+ ha_alert("parsing [%s:%d] : '%s' : expects a strictly positive integer argument\n",
+ file, linenum, args[0]);
+ err_code |= ERR_FATAL;
+ goto out;
+ }
+
+ nb_shards = curpeers->nb_shards;
+ }
+ else if (strcmp(args[0], "table") == 0) {
+ struct stktable *t, *other;
+ char *id;
+ size_t prefix_len;
+
+ /* Line number and peer ID are updated only if this peer is the local one. */
+ if (init_peers_frontend(file, -1, NULL, curpeers) != 0) {
+ err_code |= ERR_ALERT | ERR_ABORT;
+ goto out;
+ }
+
+ /* Build the stick-table name, concatenating the "peers" section name
+ * followed by a '/' character and the table name argument.
+ */
+ chunk_reset(&trash);
+ if (!chunk_strcpy(&trash, curpeers->id)) {
+ ha_alert("parsing [%s:%d]: '%s %s' : stick-table name too long.\n",
+ file, linenum, args[0], args[1]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+
+ prefix_len = trash.data;
+ if (!chunk_memcat(&trash, "/", 1) || !chunk_strcat(&trash, args[1])) {
+ ha_alert("parsing [%s:%d]: '%s %s' : stick-table name too long.\n",
+ file, linenum, args[0], args[1]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+
+ t = calloc(1, sizeof *t);
+ id = strdup(trash.area);
+ if (!t || !id) {
+ ha_alert("parsing [%s:%d]: '%s %s' : memory allocation failed\n",
+ file, linenum, args[0], args[1]);
+ free(t);
+ free(id);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+
+ other = stktable_find_by_name(trash.area);
+ if (other) {
+ ha_alert("parsing [%s:%d] : stick-table name '%s' conflicts with table declared in %s '%s' at %s:%d.\n",
+ file, linenum, args[1],
+ other->proxy ? proxy_cap_str(other->proxy->cap) : "peers",
+ other->proxy ? other->id : other->peers.p->id,
+ other->conf.file, other->conf.line);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+
+
+ err_code |= parse_stick_table(file, linenum, args, t, id, id + prefix_len, curpeers);
+ if (err_code & ERR_FATAL) {
+ free(t);
+ free(id);
+ goto out;
+ }
+
+ stktable_store_name(t);
+ t->next = stktables_list;
+ stktables_list = t;
+ }
+ else if (strcmp(args[0], "disabled") == 0) { /* disables this peers section */
+ curpeers->disabled |= PR_FL_DISABLED;
+ }
+ else if (strcmp(args[0], "enabled") == 0) { /* enables this peers section (used to revert a disabled default) */
+ curpeers->disabled = 0;
+ }
+ else if (*args[0] != 0) {
+ struct peers_kw_list *pkwl;
+ int index;
+ int rc = -1;
+
+ list_for_each_entry(pkwl, &peers_keywords.list, list) {
+ for (index = 0; pkwl->kw[index].kw != NULL; index++) {
+ if (strcmp(pkwl->kw[index].kw, args[0]) == 0) {
+ rc = pkwl->kw[index].parse(args, curpeers, file, linenum, &errmsg);
+ if (rc < 0) {
+ ha_alert("parsing [%s:%d] : %s\n", file, linenum, errmsg);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ else if (rc > 0) {
+ ha_warning("parsing [%s:%d] : %s\n", file, linenum, errmsg);
+ err_code |= ERR_WARN;
+ goto out;
+ }
+ goto out;
+ }
+ }
+ }
+
+ ha_alert("parsing [%s:%d] : unknown keyword '%s' in '%s' section\n", file, linenum, args[0], cursection);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+
+out:
+ free(errmsg);
+ return err_code;
+}
+
+/*
+ * Parse a line in a <listen>, <frontend> or <backend> section.
+ * Returns the error code, 0 if OK, or any combination of :
+ * - ERR_ABORT: must abort ASAP
+ * - ERR_FATAL: we can continue parsing but not start the service
+ * - ERR_WARN: a warning has been emitted
+ * - ERR_ALERT: an alert has been emitted
+ * Only the two first ones can stop processing, the two others are just
+ * indicators.
+ */
+int cfg_parse_mailers(const char *file, int linenum, char **args, int kwm)
+{
+ static struct mailers *curmailers = NULL;
+ struct mailer *newmailer = NULL;
+ const char *err;
+ int err_code = 0;
+ char *errmsg = NULL;
+
+ if (strcmp(args[0], "mailers") == 0) { /* new mailers section */
+ if (!*args[1]) {
+ ha_alert("parsing [%s:%d] : missing name for mailers section.\n", file, linenum);
+ err_code |= ERR_ALERT | ERR_ABORT;
+ goto out;
+ }
+
+ err = invalid_char(args[1]);
+ if (err) {
+ ha_alert("parsing [%s:%d] : character '%c' is not permitted in '%s' name '%s'.\n",
+ file, linenum, *err, args[0], args[1]);
+ err_code |= ERR_ALERT | ERR_ABORT;
+ goto out;
+ }
+
+ for (curmailers = mailers; curmailers != NULL; curmailers = curmailers->next) {
+ /*
+ * If there are two proxies with the same name only following
+ * combinations are allowed:
+ */
+ if (strcmp(curmailers->id, args[1]) == 0) {
+ ha_alert("Parsing [%s:%d]: mailers section '%s' has the same name as another mailers section declared at %s:%d.\n",
+ file, linenum, args[1], curmailers->conf.file, curmailers->conf.line);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ }
+ }
+
+ if ((curmailers = calloc(1, sizeof(*curmailers))) == NULL) {
+ ha_alert("parsing [%s:%d] : out of memory.\n", file, linenum);
+ err_code |= ERR_ALERT | ERR_ABORT;
+ goto out;
+ }
+
+ curmailers->next = mailers;
+ mailers = curmailers;
+ curmailers->conf.file = strdup(file);
+ curmailers->conf.line = linenum;
+ curmailers->id = strdup(args[1]);
+ curmailers->timeout.mail = DEF_MAILALERTTIME;/* XXX: Would like to Skip to the next alert, if any, ASAP.
+ * But need enough time so that timeouts don't occur
+ * during tcp procssing. For now just us an arbitrary default. */
+ }
+ else if (strcmp(args[0], "mailer") == 0) { /* mailer definition */
+ struct sockaddr_storage *sk;
+ int port1, port2;
+ struct protocol *proto;
+
+ if (!*args[2]) {
+ ha_alert("parsing [%s:%d] : '%s' expects <name> and <addr>[:<port>] as arguments.\n",
+ file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+
+ err = invalid_char(args[1]);
+ if (err) {
+ ha_alert("parsing [%s:%d] : character '%c' is not permitted in server name '%s'.\n",
+ file, linenum, *err, args[1]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+
+ if ((newmailer = calloc(1, sizeof(*newmailer))) == NULL) {
+ ha_alert("parsing [%s:%d] : out of memory.\n", file, linenum);
+ err_code |= ERR_ALERT | ERR_ABORT;
+ goto out;
+ }
+
+ /* the mailers are linked backwards first */
+ curmailers->count++;
+ newmailer->next = curmailers->mailer_list;
+ curmailers->mailer_list = newmailer;
+ newmailer->mailers = curmailers;
+ newmailer->conf.file = strdup(file);
+ newmailer->conf.line = linenum;
+
+ newmailer->id = strdup(args[1]);
+
+ sk = str2sa_range(args[2], NULL, &port1, &port2, NULL, &proto, NULL,
+ &errmsg, NULL, NULL,
+ PA_O_RESOLVE | PA_O_PORT_OK | PA_O_PORT_MAND | PA_O_STREAM | PA_O_XPRT | PA_O_CONNECT);
+ if (!sk) {
+ ha_alert("parsing [%s:%d] : '%s %s' : %s\n", file, linenum, args[0], args[1], errmsg);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+
+ if (proto->sock_prot != IPPROTO_TCP) {
+ ha_alert("parsing [%s:%d] : '%s %s' : TCP not supported for this address family.\n",
+ file, linenum, args[0], args[1]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+
+ newmailer->addr = *sk;
+ newmailer->proto = proto;
+ newmailer->xprt = xprt_get(XPRT_RAW);
+ newmailer->sock_init_arg = NULL;
+ }
+ else if (strcmp(args[0], "timeout") == 0) {
+ if (!*args[1]) {
+ ha_alert("parsing [%s:%d] : '%s' expects 'mail' and <time> as arguments.\n",
+ file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ else if (strcmp(args[1], "mail") == 0) {
+ const char *res;
+ unsigned int timeout_mail;
+ if (!*args[2]) {
+ ha_alert("parsing [%s:%d] : '%s %s' expects <time> as argument.\n",
+ file, linenum, args[0], args[1]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ res = parse_time_err(args[2], &timeout_mail, TIME_UNIT_MS);
+ if (res == PARSE_TIME_OVER) {
+ ha_alert("parsing [%s:%d]: timer overflow in argument <%s> to <%s %s>, maximum value is 2147483647 ms (~24.8 days).\n",
+ file, linenum, args[2], args[0], args[1]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ else if (res == PARSE_TIME_UNDER) {
+ ha_alert("parsing [%s:%d]: timer underflow in argument <%s> to <%s %s>, minimum non-null value is 1 ms.\n",
+ file, linenum, args[2], args[0], args[1]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ else if (res) {
+ ha_alert("parsing [%s:%d]: unexpected character '%c' in argument to <%s %s>.\n",
+ file, linenum, *res, args[0], args[1]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ curmailers->timeout.mail = timeout_mail;
+ } else {
+ ha_alert("parsing [%s:%d] : '%s' expects 'mail' and <time> as arguments got '%s'.\n",
+ file, linenum, args[0], args[1]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ }
+ else if (*args[0] != 0) {
+ ha_alert("parsing [%s:%d] : unknown keyword '%s' in '%s' section\n", file, linenum, args[0], cursection);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+
+out:
+ free(errmsg);
+ return err_code;
+}
+
+void free_email_alert(struct proxy *p)
+{
+ ha_free(&p->email_alert.mailers.name);
+ ha_free(&p->email_alert.from);
+ ha_free(&p->email_alert.to);
+ ha_free(&p->email_alert.myhostname);
+}
+
+
+int
+cfg_parse_netns(const char *file, int linenum, char **args, int kwm)
+{
+#ifdef USE_NS
+ const char *err;
+ const char *item = args[0];
+
+ if (strcmp(item, "namespace_list") == 0) {
+ return 0;
+ }
+ else if (strcmp(item, "namespace") == 0) {
+ size_t idx = 1;
+ const char *current;
+ while (*(current = args[idx++])) {
+ err = invalid_char(current);
+ if (err) {
+ ha_alert("parsing [%s:%d]: character '%c' is not permitted in '%s' name '%s'.\n",
+ file, linenum, *err, item, current);
+ return ERR_ALERT | ERR_FATAL;
+ }
+
+ if (netns_store_lookup(current, strlen(current))) {
+ ha_alert("parsing [%s:%d]: Namespace '%s' is already added.\n",
+ file, linenum, current);
+ return ERR_ALERT | ERR_FATAL;
+ }
+ if (!netns_store_insert(current)) {
+ ha_alert("parsing [%s:%d]: Cannot open namespace '%s'.\n",
+ file, linenum, current);
+ return ERR_ALERT | ERR_FATAL;
+ }
+ }
+ }
+
+ return 0;
+#else
+ ha_alert("parsing [%s:%d]: namespace support is not compiled in.",
+ file, linenum);
+ return ERR_ALERT | ERR_FATAL;
+#endif
+}
+
+int
+cfg_parse_users(const char *file, int linenum, char **args, int kwm)
+{
+
+ int err_code = 0;
+ const char *err;
+
+ if (strcmp(args[0], "userlist") == 0) { /* new userlist */
+ struct userlist *newul;
+
+ if (!*args[1]) {
+ ha_alert("parsing [%s:%d]: '%s' expects <name> as arguments.\n",
+ file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ if (alertif_too_many_args(1, file, linenum, args, &err_code))
+ goto out;
+
+ err = invalid_char(args[1]);
+ if (err) {
+ ha_alert("parsing [%s:%d]: character '%c' is not permitted in '%s' name '%s'.\n",
+ file, linenum, *err, args[0], args[1]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+
+ for (newul = userlist; newul; newul = newul->next)
+ if (strcmp(newul->name, args[1]) == 0) {
+ ha_warning("parsing [%s:%d]: ignoring duplicated userlist '%s'.\n",
+ file, linenum, args[1]);
+ err_code |= ERR_WARN;
+ goto out;
+ }
+
+ newul = calloc(1, sizeof(*newul));
+ if (!newul) {
+ ha_alert("parsing [%s:%d]: out of memory.\n", file, linenum);
+ err_code |= ERR_ALERT | ERR_ABORT;
+ goto out;
+ }
+
+ newul->name = strdup(args[1]);
+ if (!newul->name) {
+ ha_alert("parsing [%s:%d]: out of memory.\n", file, linenum);
+ err_code |= ERR_ALERT | ERR_ABORT;
+ free(newul);
+ goto out;
+ }
+
+ newul->next = userlist;
+ userlist = newul;
+
+ } else if (strcmp(args[0], "group") == 0) { /* new group */
+ int cur_arg;
+ const char *err;
+ struct auth_groups *ag;
+
+ if (!*args[1]) {
+ ha_alert("parsing [%s:%d]: '%s' expects <name> as arguments.\n",
+ file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+
+ err = invalid_char(args[1]);
+ if (err) {
+ ha_alert("parsing [%s:%d]: character '%c' is not permitted in '%s' name '%s'.\n",
+ file, linenum, *err, args[0], args[1]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+
+ if (!userlist)
+ goto out;
+
+ for (ag = userlist->groups; ag; ag = ag->next)
+ if (strcmp(ag->name, args[1]) == 0) {
+ ha_warning("parsing [%s:%d]: ignoring duplicated group '%s' in userlist '%s'.\n",
+ file, linenum, args[1], userlist->name);
+ err_code |= ERR_ALERT;
+ goto out;
+ }
+
+ ag = calloc(1, sizeof(*ag));
+ if (!ag) {
+ ha_alert("parsing [%s:%d]: out of memory.\n", file, linenum);
+ err_code |= ERR_ALERT | ERR_ABORT;
+ goto out;
+ }
+
+ ag->name = strdup(args[1]);
+ if (!ag->name) {
+ ha_alert("parsing [%s:%d]: out of memory.\n", file, linenum);
+ err_code |= ERR_ALERT | ERR_ABORT;
+ free(ag);
+ goto out;
+ }
+
+ cur_arg = 2;
+
+ while (*args[cur_arg]) {
+ if (strcmp(args[cur_arg], "users") == 0) {
+ ag->groupusers = strdup(args[cur_arg + 1]);
+ cur_arg += 2;
+ continue;
+ } else {
+ ha_alert("parsing [%s:%d]: '%s' only supports 'users' option.\n",
+ file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ free(ag->groupusers);
+ free(ag->name);
+ free(ag);
+ goto out;
+ }
+ }
+
+ ag->next = userlist->groups;
+ userlist->groups = ag;
+
+ } else if (strcmp(args[0], "user") == 0) { /* new user */
+ struct auth_users *newuser;
+ int cur_arg;
+
+ if (!*args[1]) {
+ ha_alert("parsing [%s:%d]: '%s' expects <name> as arguments.\n",
+ file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ if (!userlist)
+ goto out;
+
+ for (newuser = userlist->users; newuser; newuser = newuser->next)
+ if (strcmp(newuser->user, args[1]) == 0) {
+ ha_warning("parsing [%s:%d]: ignoring duplicated user '%s' in userlist '%s'.\n",
+ file, linenum, args[1], userlist->name);
+ err_code |= ERR_ALERT;
+ goto out;
+ }
+
+ newuser = calloc(1, sizeof(*newuser));
+ if (!newuser) {
+ ha_alert("parsing [%s:%d]: out of memory.\n", file, linenum);
+ err_code |= ERR_ALERT | ERR_ABORT;
+ goto out;
+ }
+
+ newuser->user = strdup(args[1]);
+
+ newuser->next = userlist->users;
+ userlist->users = newuser;
+
+ cur_arg = 2;
+
+ while (*args[cur_arg]) {
+ if (strcmp(args[cur_arg], "password") == 0) {
+#ifdef USE_LIBCRYPT
+ if (!crypt("", args[cur_arg + 1])) {
+ ha_alert("parsing [%s:%d]: the encrypted password used for user '%s' is not supported by crypt(3).\n",
+ file, linenum, newuser->user);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+#else
+ ha_warning("parsing [%s:%d]: no crypt(3) support compiled, encrypted passwords will not work.\n",
+ file, linenum);
+ err_code |= ERR_ALERT;
+#endif
+ newuser->pass = strdup(args[cur_arg + 1]);
+ cur_arg += 2;
+ continue;
+ } else if (strcmp(args[cur_arg], "insecure-password") == 0) {
+ newuser->pass = strdup(args[cur_arg + 1]);
+ newuser->flags |= AU_O_INSECURE;
+ cur_arg += 2;
+ continue;
+ } else if (strcmp(args[cur_arg], "groups") == 0) {
+ newuser->u.groups_names = strdup(args[cur_arg + 1]);
+ cur_arg += 2;
+ continue;
+ } else {
+ ha_alert("parsing [%s:%d]: '%s' only supports 'password', 'insecure-password' and 'groups' options.\n",
+ file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ }
+ } else {
+ ha_alert("parsing [%s:%d]: unknown keyword '%s' in '%s' section\n", file, linenum, args[0], "users");
+ err_code |= ERR_ALERT | ERR_FATAL;
+ }
+
+out:
+ return err_code;
+}
+
+int
+cfg_parse_scope(const char *file, int linenum, char *line)
+{
+ char *beg, *end, *scope = NULL;
+ int err_code = 0;
+ const char *err;
+
+ beg = line + 1;
+ end = strchr(beg, ']');
+
+ /* Detect end of scope declaration */
+ if (!end || end == beg) {
+ ha_alert("parsing [%s:%d] : empty scope name is forbidden.\n",
+ file, linenum);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+
+ /* Get scope name and check its validity */
+ scope = my_strndup(beg, end-beg);
+ err = invalid_char(scope);
+ if (err) {
+ ha_alert("parsing [%s:%d] : character '%c' is not permitted in a scope name.\n",
+ file, linenum, *err);
+ err_code |= ERR_ALERT | ERR_ABORT;
+ goto out;
+ }
+
+ /* Be sure to have a scope declaration alone on its line */
+ line = end+1;
+ while (isspace((unsigned char)*line))
+ line++;
+ if (*line && *line != '#' && *line != '\n' && *line != '\r') {
+ ha_alert("parsing [%s:%d] : character '%c' is not permitted after scope declaration.\n",
+ file, linenum, *line);
+ err_code |= ERR_ALERT | ERR_ABORT;
+ goto out;
+ }
+
+ /* We have a valid scope declaration, save it */
+ free(cfg_scope);
+ cfg_scope = scope;
+ scope = NULL;
+
+ out:
+ free(scope);
+ return err_code;
+}
+
+int
+cfg_parse_track_sc_num(unsigned int *track_sc_num,
+ const char *arg, const char *end, char **errmsg)
+{
+ const char *p;
+ unsigned int num;
+
+ p = arg;
+ num = read_uint64(&arg, end);
+
+ if (arg != end) {
+ memprintf(errmsg, "Wrong track-sc number '%s'", p);
+ return -1;
+ }
+
+ if (num >= global.tune.nb_stk_ctr) {
+ if (!global.tune.nb_stk_ctr)
+ memprintf(errmsg, "%u track-sc number not usable, stick-counters "
+ "are disabled by tune.stick-counters", num);
+ else
+ memprintf(errmsg, "%u track-sc number exceeding "
+ "%d (tune.stick-counters-1) value", num, global.tune.nb_stk_ctr - 1);
+ return -1;
+ }
+
+ *track_sc_num = num;
+ return 0;
+}
+
+/*
+ * Detect a global section after a non-global one and output a diagnostic
+ * warning.
+ */
+static void check_section_position(char *section_name, const char *file, int linenum)
+{
+ if (strcmp(section_name, "global") == 0) {
+ if ((global.mode & MODE_DIAG) && non_global_section_parsed == 1)
+ _ha_diag_warning("parsing [%s:%d] : global section detected after a non-global one, the prevalence of their statements is unspecified\n", file, linenum);
+ }
+ else if (non_global_section_parsed == 0) {
+ non_global_section_parsed = 1;
+ }
+}
+
+/* apply the current default_path setting for config file <file>, and
+ * optionally replace the current path to <origin> if not NULL while the
+ * default-path mode is set to "origin". Errors are returned into an
+ * allocated string passed to <err> if it's not NULL. Returns 0 on failure
+ * or non-zero on success.
+ */
+static int cfg_apply_default_path(const char *file, const char *origin, char **err)
+{
+ const char *beg, *end;
+
+ /* make path start at <beg> and end before <end>, and switch it to ""
+ * if no slash was passed.
+ */
+ beg = file;
+ end = strrchr(beg, '/');
+ if (!end)
+ end = beg;
+
+ if (!*initial_cwd) {
+ if (getcwd(initial_cwd, sizeof(initial_cwd)) == NULL) {
+ if (err)
+ memprintf(err, "Impossible to retrieve startup directory name: %s", strerror(errno));
+ return 0;
+ }
+ }
+ else if (chdir(initial_cwd) == -1) {
+ if (err)
+ memprintf(err, "Impossible to get back to initial directory '%s': %s", initial_cwd, strerror(errno));
+ return 0;
+ }
+
+ /* OK now we're (back) to initial_cwd */
+
+ switch (default_path_mode) {
+ case DEFAULT_PATH_CURRENT:
+ /* current_cwd never set, nothing to do */
+ return 1;
+
+ case DEFAULT_PATH_ORIGIN:
+ /* current_cwd set in the config */
+ if (origin &&
+ snprintf(current_cwd, sizeof(current_cwd), "%s", origin) > sizeof(current_cwd)) {
+ if (err)
+ memprintf(err, "Absolute path too long: '%s'", origin);
+ return 0;
+ }
+ break;
+
+ case DEFAULT_PATH_CONFIG:
+ if (end - beg >= sizeof(current_cwd)) {
+ if (err)
+ memprintf(err, "Config file path too long, cannot use for relative paths: '%s'", file);
+ return 0;
+ }
+ memcpy(current_cwd, beg, end - beg);
+ current_cwd[end - beg] = 0;
+ break;
+
+ case DEFAULT_PATH_PARENT:
+ if (end - beg + 3 >= sizeof(current_cwd)) {
+ if (err)
+ memprintf(err, "Config file path too long, cannot use for relative paths: '%s'", file);
+ return 0;
+ }
+ memcpy(current_cwd, beg, end - beg);
+ if (end > beg)
+ memcpy(current_cwd + (end - beg), "/..\0", 4);
+ else
+ memcpy(current_cwd + (end - beg), "..\0", 3);
+ break;
+ }
+
+ if (*current_cwd && chdir(current_cwd) == -1) {
+ if (err)
+ memprintf(err, "Impossible to get back to directory '%s': %s", initial_cwd, strerror(errno));
+ return 0;
+ }
+
+ return 1;
+}
+
+/* parses a global "default-path" directive. */
+static int cfg_parse_global_def_path(char **args, int section_type, struct proxy *curpx,
+ const struct proxy *defpx, const char *file, int line,
+ char **err)
+{
+ int ret = -1;
+
+ /* "current", "config", "parent", "origin <path>" */
+
+ if (strcmp(args[1], "current") == 0)
+ default_path_mode = DEFAULT_PATH_CURRENT;
+ else if (strcmp(args[1], "config") == 0)
+ default_path_mode = DEFAULT_PATH_CONFIG;
+ else if (strcmp(args[1], "parent") == 0)
+ default_path_mode = DEFAULT_PATH_PARENT;
+ else if (strcmp(args[1], "origin") == 0)
+ default_path_mode = DEFAULT_PATH_ORIGIN;
+ else {
+ memprintf(err, "%s default-path mode '%s' for '%s', supported modes include 'current', 'config', 'parent', and 'origin'.", *args[1] ? "unsupported" : "missing", args[1], args[0]);
+ goto end;
+ }
+
+ if (default_path_mode == DEFAULT_PATH_ORIGIN) {
+ if (!*args[2]) {
+ memprintf(err, "'%s %s' expects a directory as an argument.", args[0], args[1]);
+ goto end;
+ }
+ if (!cfg_apply_default_path(file, args[2], err)) {
+ memprintf(err, "couldn't set '%s' to origin '%s': %s.", args[0], args[2], *err);
+ goto end;
+ }
+ }
+ else if (!cfg_apply_default_path(file, NULL, err)) {
+ memprintf(err, "couldn't set '%s' to '%s': %s.", args[0], args[1], *err);
+ goto end;
+ }
+
+ /* note that once applied, the path is immediately updated */
+
+ ret = 0;
+ end:
+ return ret;
+}
+
+/*
+ * This function reads and parses the configuration file given in the argument.
+ * Returns the error code, 0 if OK, -1 if the config file couldn't be opened,
+ * or any combination of :
+ * - ERR_ABORT: must abort ASAP
+ * - ERR_FATAL: we can continue parsing but not start the service
+ * - ERR_WARN: a warning has been emitted
+ * - ERR_ALERT: an alert has been emitted
+ * Only the two first ones can stop processing, the two others are just
+ * indicators.
+ */
+int readcfgfile(const char *file)
+{
+ char *thisline = NULL;
+ int linesize = LINESIZE;
+ FILE *f = NULL;
+ int linenum = 0;
+ int err_code = 0;
+ struct cfg_section *cs = NULL, *pcs = NULL;
+ struct cfg_section *ics;
+ int readbytes = 0;
+ char *outline = NULL;
+ size_t outlen = 0;
+ size_t outlinesize = 0;
+ int fatal = 0;
+ int missing_lf = -1;
+ int nested_cond_lvl = 0;
+ enum nested_cond_state nested_conds[MAXNESTEDCONDS];
+ char *errmsg = NULL;
+
+ global.cfg_curr_line = 0;
+ global.cfg_curr_file = file;
+
+ if ((thisline = malloc(sizeof(*thisline) * linesize)) == NULL) {
+ ha_alert("Out of memory trying to allocate a buffer for a configuration line.\n");
+ err_code = -1;
+ goto err;
+ }
+
+ if ((f = fopen(file,"r")) == NULL) {
+ err_code = -1;
+ goto err;
+ }
+
+ /* change to the new dir if required */
+ if (!cfg_apply_default_path(file, NULL, &errmsg)) {
+ ha_alert("parsing [%s:%d]: failed to apply default-path: %s.\n", file, linenum, errmsg);
+ free(errmsg);
+ err_code = -1;
+ goto err;
+ }
+
+next_line:
+ while (fgets(thisline + readbytes, linesize - readbytes, f) != NULL) {
+ int arg, kwm = KWM_STD;
+ char *end;
+ char *args[MAX_LINE_ARGS + 1];
+ char *line = thisline;
+
+ if (missing_lf != -1) {
+ ha_alert("parsing [%s:%d]: Stray NUL character at position %d.\n",
+ file, linenum, (missing_lf + 1));
+ err_code |= ERR_ALERT | ERR_FATAL;
+ missing_lf = -1;
+ break;
+ }
+
+ linenum++;
+ global.cfg_curr_line = linenum;
+
+ if (fatal >= 50) {
+ ha_alert("parsing [%s:%d]: too many fatal errors (%d), stopping now.\n", file, linenum, fatal);
+ break;
+ }
+
+ end = line + strlen(line);
+
+ if (end-line == linesize-1 && *(end-1) != '\n') {
+ /* Check if we reached the limit and the last char is not \n.
+ * Watch out for the last line without the terminating '\n'!
+ */
+ char *newline;
+ int newlinesize = linesize * 2;
+
+ newline = realloc(thisline, sizeof(*thisline) * newlinesize);
+ if (newline == NULL) {
+ ha_alert("parsing [%s:%d]: line too long, cannot allocate memory.\n",
+ file, linenum);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ fatal++;
+ linenum--;
+ continue;
+ }
+
+ readbytes = linesize - 1;
+ linesize = newlinesize;
+ thisline = newline;
+ linenum--;
+ continue;
+ }
+
+ readbytes = 0;
+
+ if (end > line && *(end-1) == '\n') {
+ /* kill trailing LF */
+ *(end - 1) = 0;
+ }
+ else {
+ /* mark this line as truncated */
+ missing_lf = end - line;
+ }
+
+ /* skip leading spaces */
+ while (isspace((unsigned char)*line))
+ line++;
+
+ if (*line == '[') {/* This is the beginning if a scope */
+ err_code |= cfg_parse_scope(file, linenum, line);
+ goto next_line;
+ }
+
+ while (1) {
+ uint32_t err;
+ const char *errptr;
+
+ arg = sizeof(args) / sizeof(*args);
+ outlen = outlinesize;
+ err = parse_line(line, outline, &outlen, args, &arg,
+ PARSE_OPT_ENV | PARSE_OPT_DQUOTE | PARSE_OPT_SQUOTE |
+ PARSE_OPT_BKSLASH | PARSE_OPT_SHARP | PARSE_OPT_WORD_EXPAND,
+ &errptr);
+
+ if (err & PARSE_ERR_QUOTE) {
+ size_t newpos = sanitize_for_printing(line, errptr - line, 80);
+
+ ha_alert("parsing [%s:%d]: unmatched quote at position %d:\n"
+ " %s\n %*s\n", file, linenum, (int)(errptr-thisline+1), line, (int)(newpos+1), "^");
+ err_code |= ERR_ALERT | ERR_FATAL;
+ fatal++;
+ goto next_line;
+ }
+
+ if (err & PARSE_ERR_BRACE) {
+ size_t newpos = sanitize_for_printing(line, errptr - line, 80);
+
+ ha_alert("parsing [%s:%d]: unmatched brace in environment variable name at position %d:\n"
+ " %s\n %*s\n", file, linenum, (int)(errptr-thisline+1), line, (int)(newpos+1), "^");
+ err_code |= ERR_ALERT | ERR_FATAL;
+ fatal++;
+ goto next_line;
+ }
+
+ if (err & PARSE_ERR_VARNAME) {
+ size_t newpos = sanitize_for_printing(line, errptr - line, 80);
+
+ ha_alert("parsing [%s:%d]: forbidden first char in environment variable name at position %d:\n"
+ " %s\n %*s\n", file, linenum, (int)(errptr-thisline+1), line, (int)(newpos+1), "^");
+ err_code |= ERR_ALERT | ERR_FATAL;
+ fatal++;
+ goto next_line;
+ }
+
+ if (err & PARSE_ERR_HEX) {
+ size_t newpos = sanitize_for_printing(line, errptr - line, 80);
+
+ ha_alert("parsing [%s:%d]: truncated or invalid hexadecimal sequence at position %d:\n"
+ " %s\n %*s\n", file, linenum, (int)(errptr-thisline+1), line, (int)(newpos+1), "^");
+ err_code |= ERR_ALERT | ERR_FATAL;
+ fatal++;
+ goto next_line;
+ }
+
+ if (err & PARSE_ERR_WRONG_EXPAND) {
+ size_t newpos = sanitize_for_printing(line, errptr - line, 80);
+
+ ha_alert("parsing [%s:%d]: truncated or invalid word expansion sequence at position %d:\n"
+ " %s\n %*s\n", file, linenum, (int)(errptr-thisline+1), line, (int)(newpos+1), "^");
+ err_code |= ERR_ALERT | ERR_FATAL;
+ fatal++;
+ goto next_line;
+ }
+
+ if (err & (PARSE_ERR_TOOLARGE|PARSE_ERR_OVERLAP)) {
+ outlinesize = (outlen + 1023) & -1024;
+ outline = my_realloc2(outline, outlinesize);
+ if (outline == NULL) {
+ ha_alert("parsing [%s:%d]: line too long, cannot allocate memory.\n",
+ file, linenum);
+ err_code |= ERR_ALERT | ERR_FATAL | ERR_ABORT;
+ fatal++;
+ outlinesize = 0;
+ goto err;
+ }
+ /* try again */
+ continue;
+ }
+
+ if (err & PARSE_ERR_TOOMANY) {
+ /* only check this *after* being sure the output is allocated */
+ ha_alert("parsing [%s:%d]: too many words, truncating after word %d, position %ld: <%s>.\n",
+ file, linenum, MAX_LINE_ARGS, (long)(args[MAX_LINE_ARGS-1] - outline + 1), args[MAX_LINE_ARGS-1]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ fatal++;
+ goto next_line;
+ }
+
+ /* everything's OK */
+ break;
+ }
+
+ /* dump cfg */
+ if (global.mode & MODE_DUMP_CFG) {
+ if (args[0] != NULL) {
+ struct cfg_section *sect;
+ int is_sect = 0;
+ int i = 0;
+ uint32_t g_key = HA_ATOMIC_LOAD(&global.anon_key);
+
+ if (global.mode & MODE_DUMP_NB_L)
+ qfprintf(stdout, "%d\t", linenum);
+
+ /* if a word is in sections list, is_sect = 1 */
+ list_for_each_entry(sect, &sections, list) {
+ if (strcmp(args[0], sect->section_name) == 0) {
+ is_sect = 1;
+ break;
+ }
+ }
+
+ if (g_key == 0) {
+ /* no anonymizing needed, dump the config as-is (but without comments).
+ * Note: tabs were lost during tokenizing, so we reinsert for non-section
+ * keywords.
+ */
+ if (!is_sect)
+ qfprintf(stdout, "\t");
+
+ for (i = 0; i < arg; i++) {
+ qfprintf(stdout, "%s ", args[i]);
+ }
+ qfprintf(stdout, "\n");
+ continue;
+ }
+
+ /* We're anonymizing */
+
+ if (is_sect) {
+ /* new sections are optionally followed by an identifier */
+ if (arg >= 2) {
+ qfprintf(stdout, "%s %s\n", args[0], HA_ANON_ID(g_key, args[1]));
+ }
+ else {
+ qfprintf(stdout, "%s\n", args[0]);
+ }
+ continue;
+ }
+
+ /* non-section keywords start indented */
+ qfprintf(stdout, "\t");
+
+ /* some keywords deserve special treatment */
+ if (!*args[0]) {
+ qfprintf(stdout, "\n");
+ }
+
+ else if (strcmp(args[0], "anonkey") == 0) {
+ qfprintf(stdout, "%s [...]\n", args[0]);
+ }
+
+ else if (strcmp(args[0], "maxconn") == 0) {
+ qfprintf(stdout, "%s %s\n", args[0], args[1]);
+ }
+
+ else if (strcmp(args[0], "stats") == 0 &&
+ (strcmp(args[1], "timeout") == 0 || strcmp(args[1], "maxconn") == 0)) {
+ qfprintf(stdout, "%s %s %s\n", args[0], args[1], args[2]);
+ }
+
+ else if (strcmp(args[0], "stats") == 0 && strcmp(args[1], "socket") == 0) {
+ qfprintf(stdout, "%s %s ", args[0], args[1]);
+
+ if (arg > 2) {
+ qfprintf(stdout, "%s ", hash_ipanon(g_key, args[2], 1));
+
+ if (arg > 3) {
+ qfprintf(stdout, "[...]\n");
+ }
+ else {
+ qfprintf(stdout, "\n");
+ }
+ }
+ else {
+ qfprintf(stdout, "\n");
+ }
+ }
+
+ else if (strcmp(args[0], "timeout") == 0) {
+ qfprintf(stdout, "%s %s %s\n", args[0], args[1], args[2]);
+ }
+
+ else if (strcmp(args[0], "mode") == 0) {
+ qfprintf(stdout, "%s %s\n", args[0], args[1]);
+ }
+
+ /* It concerns user in global section and in userlist */
+ else if (strcmp(args[0], "user") == 0) {
+ qfprintf(stdout, "%s %s ", args[0], HA_ANON_ID(g_key, args[1]));
+
+ if (arg > 2) {
+ qfprintf(stdout, "[...]\n");
+ }
+ else {
+ qfprintf(stdout, "\n");
+ }
+ }
+
+ else if (strcmp(args[0], "bind") == 0) {
+ qfprintf(stdout, "%s ", args[0]);
+ qfprintf(stdout, "%s ", hash_ipanon(g_key, args[1], 1));
+ if (arg > 2) {
+ qfprintf(stdout, "[...]\n");
+ }
+ else {
+ qfprintf(stdout, "\n");
+ }
+ }
+
+ else if (strcmp(args[0], "server") == 0) {
+ qfprintf(stdout, "%s %s ", args[0], HA_ANON_ID(g_key, args[1]));
+
+ if (arg > 2) {
+ qfprintf(stdout, "%s ", hash_ipanon(g_key, args[2], 1));
+ }
+ if (arg > 3) {
+ qfprintf(stdout, "[...]\n");
+ }
+ else {
+ qfprintf(stdout, "\n");
+ }
+ }
+
+ else if (strcmp(args[0], "redirect") == 0) {
+ qfprintf(stdout, "%s %s ", args[0], args[1]);
+
+ if (strcmp(args[1], "prefix") == 0 || strcmp(args[1], "location") == 0) {
+ qfprintf(stdout, "%s ", HA_ANON_PATH(g_key, args[2]));
+ }
+ else {
+ qfprintf(stdout, "%s ", args[2]);
+ }
+ if (arg > 3) {
+ qfprintf(stdout, "[...]");
+ }
+ qfprintf(stdout, "\n");
+ }
+
+ else if (strcmp(args[0], "acl") == 0) {
+ qfprintf(stdout, "%s %s %s ", args[0], HA_ANON_ID(g_key, args[1]), args[2]);
+
+ if (arg > 3) {
+ qfprintf(stdout, "[...]");
+ }
+ qfprintf(stdout, "\n");
+ }
+
+ else if (strcmp(args[0], "log") == 0) {
+ qfprintf(stdout, "log ");
+
+ if (strcmp(args[1], "global") == 0) {
+ qfprintf(stdout, "%s ", args[1]);
+ }
+ else {
+ qfprintf(stdout, "%s ", hash_ipanon(g_key, args[1], 1));
+ }
+ if (arg > 2) {
+ qfprintf(stdout, "[...]");
+ }
+ qfprintf(stdout, "\n");
+ }
+
+ else if (strcmp(args[0], "peer") == 0) {
+ qfprintf(stdout, "%s %s ", args[0], HA_ANON_ID(g_key, args[1]));
+ qfprintf(stdout, "%s ", hash_ipanon(g_key, args[2], 1));
+
+ if (arg > 3) {
+ qfprintf(stdout, "[...]");
+ }
+ qfprintf(stdout, "\n");
+ }
+
+ else if (strcmp(args[0], "use_backend") == 0) {
+ qfprintf(stdout, "%s %s ", args[0], HA_ANON_ID(g_key, args[1]));
+
+ if (arg > 2) {
+ qfprintf(stdout, "[...]");
+ }
+ qfprintf(stdout, "\n");
+ }
+
+ else if (strcmp(args[0], "default_backend") == 0) {
+ qfprintf(stdout, "%s %s\n", args[0], HA_ANON_ID(g_key, args[1]));
+ }
+
+ else if (strcmp(args[0], "source") == 0) {
+ qfprintf(stdout, "%s %s ", args[0], hash_ipanon(g_key, args[1], 1));
+
+ if (arg > 2) {
+ qfprintf(stdout, "[...]");
+ }
+ qfprintf(stdout, "\n");
+ }
+
+ else if (strcmp(args[0], "nameserver") == 0) {
+ qfprintf(stdout, "%s %s %s ", args[0],
+ HA_ANON_ID(g_key, args[1]), hash_ipanon(g_key, args[2], 1));
+ if (arg > 3) {
+ qfprintf(stdout, "[...]");
+ }
+ qfprintf(stdout, "\n");
+ }
+
+ else if (strcmp(args[0], "http-request") == 0) {
+ qfprintf(stdout, "%s %s ", args[0], args[1]);
+ if (arg > 2)
+ qfprintf(stdout, "[...]");
+ qfprintf(stdout, "\n");
+ }
+
+ else if (strcmp(args[0], "http-response") == 0) {
+ qfprintf(stdout, "%s %s ", args[0], args[1]);
+ if (arg > 2)
+ qfprintf(stdout, "[...]");
+ qfprintf(stdout, "\n");
+ }
+
+ else if (strcmp(args[0], "http-after-response") == 0) {
+ qfprintf(stdout, "%s %s ", args[0], args[1]);
+ if (arg > 2)
+ qfprintf(stdout, "[...]");
+ qfprintf(stdout, "\n");
+ }
+
+ else if (strcmp(args[0], "filter") == 0) {
+ qfprintf(stdout, "%s %s ", args[0], args[1]);
+ if (arg > 2)
+ qfprintf(stdout, "[...]");
+ qfprintf(stdout, "\n");
+ }
+
+ else if (strcmp(args[0], "errorfile") == 0) {
+ qfprintf(stdout, "%s %s %s\n", args[0], args[1], HA_ANON_PATH(g_key, args[2]));
+ }
+
+ else if (strcmp(args[0], "cookie") == 0) {
+ qfprintf(stdout, "%s %s ", args[0], HA_ANON_ID(g_key, args[1]));
+ if (arg > 2)
+ qfprintf(stdout, "%s ", args[2]);
+ if (arg > 3)
+ qfprintf(stdout, "[...]");
+ qfprintf(stdout, "\n");
+ }
+
+ else if (strcmp(args[0], "stats") == 0 && strcmp(args[1], "auth") == 0) {
+ qfprintf(stdout, "%s %s %s\n", args[0], args[1], HA_ANON_STR(g_key, args[2]));
+ }
+
+ else {
+ /* display up to 3 words and mask the rest which might be confidential */
+ for (i = 0; i < MIN(arg, 3); i++) {
+ qfprintf(stdout, "%s ", args[i]);
+ }
+ if (arg > 3) {
+ qfprintf(stdout, "[...]");
+ }
+ qfprintf(stdout, "\n");
+ }
+ }
+ continue;
+ }
+ /* end of config dump */
+
+ /* empty line */
+ if (!**args)
+ continue;
+
+ /* check for config macros */
+ if (*args[0] == '.') {
+ if (strcmp(args[0], ".if") == 0) {
+ const char *errptr = NULL;
+ char *errmsg = NULL;
+ int cond;
+ char *w;
+
+ /* remerge all words into a single expression */
+ for (w = *args; (w += strlen(w)) < outline + outlen - 1; *w = ' ')
+ ;
+
+ nested_cond_lvl++;
+ if (nested_cond_lvl >= MAXNESTEDCONDS) {
+ ha_alert("parsing [%s:%d]: too many nested '.if', max is %d.\n", file, linenum, MAXNESTEDCONDS);
+ err_code |= ERR_ALERT | ERR_FATAL | ERR_ABORT;
+ goto err;
+ }
+
+ if (nested_cond_lvl > 1 &&
+ (nested_conds[nested_cond_lvl - 1] == NESTED_COND_IF_DROP ||
+ nested_conds[nested_cond_lvl - 1] == NESTED_COND_IF_SKIP ||
+ nested_conds[nested_cond_lvl - 1] == NESTED_COND_ELIF_DROP ||
+ nested_conds[nested_cond_lvl - 1] == NESTED_COND_ELIF_SKIP ||
+ nested_conds[nested_cond_lvl - 1] == NESTED_COND_ELSE_DROP)) {
+ nested_conds[nested_cond_lvl] = NESTED_COND_IF_SKIP;
+ goto next_line;
+ }
+
+ cond = cfg_eval_condition(args + 1, &errmsg, &errptr);
+ if (cond < 0) {
+ size_t newpos = sanitize_for_printing(args[1], errptr - args[1], 76);
+
+ ha_alert("parsing [%s:%d]: %s in '.if' at position %d:\n .if %s\n %*s\n",
+ file, linenum, errmsg,
+ (int)(errptr-args[1]+1), args[1], (int)(newpos+5), "^");
+
+ free(errmsg);
+ err_code |= ERR_ALERT | ERR_FATAL | ERR_ABORT;
+ goto err;
+ }
+
+ if (cond)
+ nested_conds[nested_cond_lvl] = NESTED_COND_IF_TAKE;
+ else
+ nested_conds[nested_cond_lvl] = NESTED_COND_IF_DROP;
+
+ goto next_line;
+ }
+ else if (strcmp(args[0], ".elif") == 0) {
+ const char *errptr = NULL;
+ char *errmsg = NULL;
+ int cond;
+ char *w;
+
+ /* remerge all words into a single expression */
+ for (w = *args; (w += strlen(w)) < outline + outlen - 1; *w = ' ')
+ ;
+
+ if (!nested_cond_lvl) {
+ ha_alert("parsing [%s:%d]: lone '.elif' with no matching '.if'.\n", file, linenum);
+ err_code |= ERR_ALERT | ERR_FATAL | ERR_ABORT;
+ goto err;
+ }
+
+ if (nested_conds[nested_cond_lvl] == NESTED_COND_ELSE_TAKE ||
+ nested_conds[nested_cond_lvl] == NESTED_COND_ELSE_DROP) {
+ ha_alert("parsing [%s:%d]: '.elif' after '.else' is not permitted.\n", file, linenum);
+ err_code |= ERR_ALERT | ERR_FATAL | ERR_ABORT;
+ goto err;
+ }
+
+ if (nested_conds[nested_cond_lvl] == NESTED_COND_IF_TAKE ||
+ nested_conds[nested_cond_lvl] == NESTED_COND_IF_SKIP ||
+ nested_conds[nested_cond_lvl] == NESTED_COND_ELIF_TAKE ||
+ nested_conds[nested_cond_lvl] == NESTED_COND_ELIF_SKIP) {
+ nested_conds[nested_cond_lvl] = NESTED_COND_ELIF_SKIP;
+ goto next_line;
+ }
+
+ cond = cfg_eval_condition(args + 1, &errmsg, &errptr);
+ if (cond < 0) {
+ size_t newpos = sanitize_for_printing(args[1], errptr - args[1], 74);
+
+ ha_alert("parsing [%s:%d]: %s in '.elif' at position %d:\n .elif %s\n %*s\n",
+ file, linenum, errmsg,
+ (int)(errptr-args[1]+1), args[1], (int)(newpos+7), "^");
+
+ free(errmsg);
+ err_code |= ERR_ALERT | ERR_FATAL | ERR_ABORT;
+ goto err;
+ }
+
+ if (cond)
+ nested_conds[nested_cond_lvl] = NESTED_COND_ELIF_TAKE;
+ else
+ nested_conds[nested_cond_lvl] = NESTED_COND_ELIF_DROP;
+
+ goto next_line;
+ }
+ else if (strcmp(args[0], ".else") == 0) {
+ if (*args[1]) {
+ ha_alert("parsing [%s:%d]: Unexpected argument '%s' for '%s'.\n",
+ file, linenum, args[1], args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL | ERR_ABORT;
+ break;
+ }
+
+ if (!nested_cond_lvl) {
+ ha_alert("parsing [%s:%d]: lone '.else' with no matching '.if'.\n", file, linenum);
+ err_code |= ERR_ALERT | ERR_FATAL | ERR_ABORT;
+ goto err;
+ }
+
+ if (nested_conds[nested_cond_lvl] == NESTED_COND_ELSE_TAKE ||
+ nested_conds[nested_cond_lvl] == NESTED_COND_ELSE_DROP) {
+ ha_alert("parsing [%s:%d]: '.else' after '.else' is not permitted.\n", file, linenum);
+ err_code |= ERR_ALERT | ERR_FATAL | ERR_ABORT;
+ goto err;
+ }
+
+ if (nested_conds[nested_cond_lvl] == NESTED_COND_IF_TAKE ||
+ nested_conds[nested_cond_lvl] == NESTED_COND_IF_SKIP ||
+ nested_conds[nested_cond_lvl] == NESTED_COND_ELIF_TAKE ||
+ nested_conds[nested_cond_lvl] == NESTED_COND_ELIF_SKIP) {
+ nested_conds[nested_cond_lvl] = NESTED_COND_ELSE_DROP;
+ } else {
+ /* otherwise we take the "else" */
+ nested_conds[nested_cond_lvl] = NESTED_COND_ELSE_TAKE;
+ }
+ goto next_line;
+ }
+ else if (strcmp(args[0], ".endif") == 0) {
+ if (*args[1]) {
+ ha_alert("parsing [%s:%d]: Unexpected argument '%s' for '%s'.\n",
+ file, linenum, args[1], args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL | ERR_ABORT;
+ break;
+ }
+
+ if (!nested_cond_lvl) {
+ ha_alert("parsing [%s:%d]: lone '.endif' with no matching '.if'.\n", file, linenum);
+ err_code |= ERR_ALERT | ERR_FATAL | ERR_ABORT;
+ break;
+ }
+ nested_cond_lvl--;
+ goto next_line;
+ }
+ }
+
+ if (nested_cond_lvl &&
+ (nested_conds[nested_cond_lvl] == NESTED_COND_IF_DROP ||
+ nested_conds[nested_cond_lvl] == NESTED_COND_IF_SKIP ||
+ nested_conds[nested_cond_lvl] == NESTED_COND_ELIF_DROP ||
+ nested_conds[nested_cond_lvl] == NESTED_COND_ELIF_SKIP ||
+ nested_conds[nested_cond_lvl] == NESTED_COND_ELSE_DROP)) {
+ /* The current block is masked out by the conditions */
+ goto next_line;
+ }
+
+ /* .warning/.error/.notice/.diag */
+ if (*args[0] == '.') {
+ if (strcmp(args[0], ".alert") == 0) {
+ if (*args[2]) {
+ ha_alert("parsing [%s:%d]: Unexpected argument '%s' for '%s'. Use quotes if the message should contain spaces.\n",
+ file, linenum, args[2], args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto next_line;
+ }
+
+ ha_alert("parsing [%s:%d]: '%s'.\n", file, linenum, args[1]);
+ err_code |= ERR_ALERT | ERR_FATAL | ERR_ABORT;
+ goto err;
+ }
+ else if (strcmp(args[0], ".warning") == 0) {
+ if (*args[2]) {
+ ha_alert("parsing [%s:%d]: Unexpected argument '%s' for '%s'. Use quotes if the message should contain spaces.\n",
+ file, linenum, args[2], args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto next_line;
+ }
+
+ ha_warning("parsing [%s:%d]: '%s'.\n", file, linenum, args[1]);
+ err_code |= ERR_WARN;
+ goto next_line;
+ }
+ else if (strcmp(args[0], ".notice") == 0) {
+ if (*args[2]) {
+ ha_alert("parsing [%s:%d]: Unexpected argument '%s' for '%s'. Use quotes if the message should contain spaces.\n",
+ file, linenum, args[2], args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto next_line;
+ }
+
+ ha_notice("parsing [%s:%d]: '%s'.\n", file, linenum, args[1]);
+ goto next_line;
+ }
+ else if (strcmp(args[0], ".diag") == 0) {
+ if (*args[2]) {
+ ha_alert("parsing [%s:%d]: Unexpected argument '%s' for '%s'. Use quotes if the message should contain spaces.\n",
+ file, linenum, args[2], args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto next_line;
+ }
+
+ ha_diag_warning("parsing [%s:%d]: '%s'.\n", file, linenum, args[1]);
+ goto next_line;
+ }
+ else {
+ ha_alert("parsing [%s:%d]: unknown directive '%s'.\n", file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ fatal++;
+ break;
+ }
+ }
+
+ /* check for keyword modifiers "no" and "default" */
+ if (strcmp(args[0], "no") == 0) {
+ char *tmp;
+
+ kwm = KWM_NO;
+ tmp = args[0];
+ for (arg=0; *args[arg+1]; arg++)
+ args[arg] = args[arg+1]; // shift args after inversion
+ *tmp = '\0'; // fix the next arg to \0
+ args[arg] = tmp;
+ }
+ else if (strcmp(args[0], "default") == 0) {
+ kwm = KWM_DEF;
+ for (arg=0; *args[arg+1]; arg++)
+ args[arg] = args[arg+1]; // shift args after inversion
+ }
+
+ if (kwm != KWM_STD && strcmp(args[0], "option") != 0 &&
+ strcmp(args[0], "log") != 0 && strcmp(args[0], "busy-polling") != 0 &&
+ strcmp(args[0], "set-dumpable") != 0 && strcmp(args[0], "strict-limits") != 0 &&
+ strcmp(args[0], "insecure-fork-wanted") != 0 &&
+ strcmp(args[0], "numa-cpu-mapping") != 0) {
+ ha_alert("parsing [%s:%d]: negation/default currently "
+ "supported only for options, log, busy-polling, "
+ "set-dumpable, strict-limits, insecure-fork-wanted "
+ "and numa-cpu-mapping.\n", file, linenum);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ fatal++;
+ }
+
+ /* detect section start */
+ list_for_each_entry(ics, &sections, list) {
+ if (strcmp(args[0], ics->section_name) == 0) {
+ cursection = ics->section_name;
+ pcs = cs;
+ cs = ics;
+ free(global.cfg_curr_section);
+ global.cfg_curr_section = strdup(*args[1] ? args[1] : args[0]);
+ check_section_position(args[0], file, linenum);
+ break;
+ }
+ }
+
+ if (pcs && pcs->post_section_parser) {
+ int status;
+
+ status = pcs->post_section_parser();
+ err_code |= status;
+ if (status & ERR_FATAL)
+ fatal++;
+
+ if (err_code & ERR_ABORT)
+ goto err;
+ }
+ pcs = NULL;
+
+ if (!cs) {
+ ha_alert("parsing [%s:%d]: unknown keyword '%s' out of section.\n", file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ fatal++;
+ } else {
+ int status;
+
+ status = cs->section_parser(file, linenum, args, kwm);
+ err_code |= status;
+ if (status & ERR_FATAL)
+ fatal++;
+
+ if (err_code & ERR_ABORT)
+ goto err;
+ }
+ }
+
+ if (missing_lf != -1) {
+ ha_alert("parsing [%s:%d]: Missing LF on last line, file might have been truncated at position %d.\n",
+ file, linenum, (missing_lf + 1));
+ err_code |= ERR_ALERT | ERR_FATAL;
+ }
+
+ ha_free(&global.cfg_curr_section);
+ if (cs && cs->post_section_parser)
+ err_code |= cs->post_section_parser();
+
+ if (nested_cond_lvl) {
+ ha_alert("parsing [%s:%d]: non-terminated '.if' block.\n", file, linenum);
+ err_code |= ERR_ALERT | ERR_FATAL | ERR_ABORT;
+ }
+
+ if (*initial_cwd && chdir(initial_cwd) == -1) {
+ ha_alert("Impossible to get back to initial directory '%s' : %s\n", initial_cwd, strerror(errno));
+ err_code |= ERR_ALERT | ERR_FATAL;
+ }
+
+err:
+ ha_free(&cfg_scope);
+ cursection = NULL;
+ free(thisline);
+ free(outline);
+ global.cfg_curr_line = 0;
+ global.cfg_curr_file = NULL;
+
+ if (f)
+ fclose(f);
+
+ return err_code;
+}
+
+#if defined(USE_THREAD) && defined USE_CPU_AFFINITY
+#if defined(__linux__)
+
+/* filter directory name of the pattern node<X> */
+static int numa_filter(const struct dirent *dir)
+{
+ char *endptr;
+
+ /* dir name must start with "node" prefix */
+ if (strncmp(dir->d_name, "node", 4))
+ return 0;
+
+ /* dir name must be at least 5 characters long */
+ if (!dir->d_name[4])
+ return 0;
+
+ /* dir name must end with a numeric id */
+ if (strtol(&dir->d_name[4], &endptr, 10) < 0 || *endptr)
+ return 0;
+
+ /* all tests succeeded */
+ return 1;
+}
+
+/* Inspect the cpu topology of the machine on startup. If a multi-socket
+ * machine is detected, try to bind on the first node with active cpu. This is
+ * done to prevent an impact on the overall performance when the topology of
+ * the machine is unknown. This function is not called if one of the conditions
+ * is met :
+ * - a non-null nbthread directive is active
+ * - a restrictive cpu-map directive is active
+ * - a restrictive affinity is already applied, for example via taskset
+ *
+ * Returns the count of cpus selected. If no automatic binding was required or
+ * an error occurred and the topology is unknown, 0 is returned.
+ */
+static int numa_detect_topology()
+{
+ struct dirent **node_dirlist;
+ int node_dirlist_size;
+
+ struct hap_cpuset active_cpus, node_cpu_set;
+ const char *parse_cpu_set_args[2];
+ char *err = NULL;
+ int grp, thr;
+
+ /* node_cpu_set count is used as return value */
+ ha_cpuset_zero(&node_cpu_set);
+
+ /* 1. count the sysfs node<X> directories */
+ node_dirlist = NULL;
+ node_dirlist_size = scandir(NUMA_DETECT_SYSTEM_SYSFS_PATH"/node", &node_dirlist, numa_filter, alphasort);
+ if (node_dirlist_size <= 1)
+ goto free_scandir_entries;
+
+ /* 2. read and parse the list of currently online cpu */
+ if (read_line_to_trash("%s/cpu/online", NUMA_DETECT_SYSTEM_SYSFS_PATH) < 0) {
+ ha_notice("Cannot read online CPUs list, will not try to refine binding\n");
+ goto free_scandir_entries;
+ }
+
+ parse_cpu_set_args[0] = trash.area;
+ parse_cpu_set_args[1] = "\0";
+ if (parse_cpu_set(parse_cpu_set_args, &active_cpus, &err) != 0) {
+ ha_notice("Cannot read online CPUs list: '%s'. Will not try to refine binding\n", err);
+ free(err);
+ goto free_scandir_entries;
+ }
+
+ /* 3. loop through nodes dirs and find the first one with active cpus */
+ while (node_dirlist_size--) {
+ const char *node = node_dirlist[node_dirlist_size]->d_name;
+ ha_cpuset_zero(&node_cpu_set);
+
+ if (read_line_to_trash("%s/node/%s/cpumap", NUMA_DETECT_SYSTEM_SYSFS_PATH, node) < 0) {
+ ha_notice("Cannot read CPUs list of '%s', will not select them to refine binding\n", node);
+ free(node_dirlist[node_dirlist_size]);
+ continue;
+ }
+
+ parse_cpumap(trash.area, &node_cpu_set);
+ ha_cpuset_and(&node_cpu_set, &active_cpus);
+
+ /* 5. set affinity on the first found node with active cpus */
+ if (!ha_cpuset_count(&node_cpu_set)) {
+ free(node_dirlist[node_dirlist_size]);
+ continue;
+ }
+
+ ha_diag_warning("Multi-socket cpu detected, automatically binding on active CPUs of '%s' (%u active cpu(s))\n", node, ha_cpuset_count(&node_cpu_set));
+ for (grp = 0; grp < MAX_TGROUPS; grp++)
+ for (thr = 0; thr < MAX_THREADS_PER_GROUP; thr++)
+ ha_cpuset_assign(&cpu_map[grp].thread[thr], &node_cpu_set);
+
+ free(node_dirlist[node_dirlist_size]);
+ break;
+ }
+
+ free_scandir_entries:
+ while (node_dirlist_size-- > 0)
+ free(node_dirlist[node_dirlist_size]);
+ free(node_dirlist);
+
+ return ha_cpuset_count(&node_cpu_set);
+}
+
+#elif defined(__FreeBSD__)
+static int numa_detect_topology()
+{
+ struct hap_cpuset node_cpu_set;
+ int ndomains = 0, i;
+ size_t len = sizeof(ndomains);
+ int grp, thr;
+
+ if (sysctlbyname("vm.ndomains", &ndomains, &len, NULL, 0) == -1) {
+ ha_notice("Cannot assess the number of CPUs domains\n");
+ return 0;
+ }
+
+ BUG_ON(ndomains > MAXMEMDOM);
+ ha_cpuset_zero(&node_cpu_set);
+
+ if (ndomains < 2)
+ goto leave;
+
+ /*
+ * We retrieve the first active valid CPU domain
+ * with active cpu and binding it, we returns
+ * the number of cpu from the said domain
+ */
+ for (i = 0; i < ndomains; i ++) {
+ struct hap_cpuset dom;
+ ha_cpuset_zero(&dom);
+ if (cpuset_getaffinity(CPU_LEVEL_WHICH, CPU_WHICH_DOMAIN, i, sizeof(dom.cpuset), &dom.cpuset) == -1)
+ continue;
+
+ if (!ha_cpuset_count(&dom))
+ continue;
+
+ ha_cpuset_assign(&node_cpu_set, &dom);
+
+ ha_diag_warning("Multi-socket cpu detected, automatically binding on active CPUs of '%d' (%u active cpu(s))\n", i, ha_cpuset_count(&node_cpu_set));
+ for (grp = 0; grp < MAX_TGROUPS; grp++)
+ for (thr = 0; thr < MAX_THREADS_PER_GROUP; thr++)
+ ha_cpuset_assign(&cpu_map[grp].thread[thr], &node_cpu_set);
+ break;
+ }
+ leave:
+ return ha_cpuset_count(&node_cpu_set);
+}
+
+#else
+static int numa_detect_topology()
+{
+ return 0;
+}
+
+#endif
+#endif /* USE_THREAD && USE_CPU_AFFINITY */
+
+/*
+ * Returns the error code, 0 if OK, or any combination of :
+ * - ERR_ABORT: must abort ASAP
+ * - ERR_FATAL: we can continue parsing but not start the service
+ * - ERR_WARN: a warning has been emitted
+ * - ERR_ALERT: an alert has been emitted
+ * Only the two first ones can stop processing, the two others are just
+ * indicators.
+ */
+int check_config_validity()
+{
+ int cfgerr = 0;
+ struct proxy *curproxy = NULL;
+ struct proxy *init_proxies_list = NULL;
+ struct stktable *t;
+ struct server *newsrv = NULL;
+ int err_code = 0;
+ unsigned int next_pxid = 1;
+ struct bind_conf *bind_conf;
+ char *err;
+ struct cfg_postparser *postparser;
+ struct resolvers *curr_resolvers = NULL;
+ int i;
+
+ bind_conf = NULL;
+ /*
+ * Now, check for the integrity of all that we have collected.
+ */
+
+ if (!global.tune.max_http_hdr)
+ global.tune.max_http_hdr = MAX_HTTP_HDR;
+
+ if (!global.tune.cookie_len)
+ global.tune.cookie_len = CAPTURE_LEN;
+
+ if (!global.tune.requri_len)
+ global.tune.requri_len = REQURI_LEN;
+
+ if (!global.nbthread) {
+ /* nbthread not set, thus automatic. In this case, and only if
+ * running on a single process, we enable the same number of
+ * threads as the number of CPUs the process is bound to. This
+ * allows to easily control the number of threads using taskset.
+ */
+ global.nbthread = 1;
+
+#if defined(USE_THREAD)
+ {
+ int numa_cores = 0;
+#if defined(USE_CPU_AFFINITY)
+ if (global.numa_cpu_mapping && !thread_cpu_mask_forced() && !cpu_map_configured())
+ numa_cores = numa_detect_topology();
+#endif
+ global.nbthread = numa_cores ? numa_cores :
+ thread_cpus_enabled_at_boot;
+
+ /* Note that we cannot have more than 32 or 64 threads per group */
+ if (!global.nbtgroups)
+ global.nbtgroups = 1;
+
+ if (global.nbthread > MAX_THREADS_PER_GROUP * global.nbtgroups) {
+ ha_diag_warning("nbthread not set, found %d CPUs, limiting to %d threads (maximum is %d per thread group). Please set nbthreads and/or increase thread-groups in the global section to silence this warning.\n",
+ global.nbthread, MAX_THREADS_PER_GROUP * global.nbtgroups, MAX_THREADS_PER_GROUP);
+ global.nbthread = MAX_THREADS_PER_GROUP * global.nbtgroups;
+ }
+ }
+#endif
+ }
+
+ if (!global.nbtgroups)
+ global.nbtgroups = 1;
+
+ if (thread_map_to_groups() < 0) {
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+
+ pool_head_requri = create_pool("requri", global.tune.requri_len , MEM_F_SHARED);
+
+ pool_head_capture = create_pool("capture", global.tune.cookie_len, MEM_F_SHARED);
+
+ /* Post initialisation of the users and groups lists. */
+ err_code = userlist_postinit();
+ if (err_code != ERR_NONE)
+ goto out;
+
+ /* first, we will invert the proxy list order */
+ curproxy = NULL;
+ while (proxies_list) {
+ struct proxy *next;
+
+ next = proxies_list->next;
+ proxies_list->next = curproxy;
+ curproxy = proxies_list;
+ if (!next)
+ break;
+ proxies_list = next;
+ }
+
+ /* starting to initialize the main proxies list */
+ init_proxies_list = proxies_list;
+
+init_proxies_list_stage1:
+ for (curproxy = init_proxies_list; curproxy; curproxy = curproxy->next) {
+ struct switching_rule *rule;
+ struct server_rule *srule;
+ struct sticking_rule *mrule;
+ struct logger *tmplogger;
+ unsigned int next_id;
+
+ if (!(curproxy->cap & PR_CAP_INT) && curproxy->uuid < 0) {
+ /* proxy ID not set, use automatic numbering with first
+ * spare entry starting with next_pxid. We don't assign
+ * numbers for internal proxies as they may depend on
+ * build or config options and we don't want them to
+ * possibly reuse existing IDs.
+ */
+ next_pxid = get_next_id(&used_proxy_id, next_pxid);
+ curproxy->conf.id.key = curproxy->uuid = next_pxid;
+ eb32_insert(&used_proxy_id, &curproxy->conf.id);
+ }
+
+ if (curproxy->mode == PR_MODE_HTTP && global.tune.bufsize >= (256 << 20) && ONLY_ONCE()) {
+ ha_alert("global.tune.bufsize must be below 256 MB when HTTP is in use (current value = %d).\n",
+ global.tune.bufsize);
+ cfgerr++;
+ }
+
+ /* next IDs are shifted even if the proxy is disabled, this
+ * guarantees that a proxy that is temporarily disabled in the
+ * configuration doesn't cause a renumbering. Internal proxies
+ * that are not assigned a static ID must never shift the IDs
+ * either since they may appear in any order (Lua, logs, etc).
+ * The GLOBAL proxy that carries the stats socket has its ID
+ * forced to zero.
+ */
+ if (curproxy->uuid >= 0)
+ next_pxid++;
+
+ if (curproxy->flags & PR_FL_DISABLED) {
+ /* ensure we don't keep listeners uselessly bound. We
+ * can't disable their listeners yet (fdtab not
+ * allocated yet) but let's skip them.
+ */
+ if (curproxy->table) {
+ ha_free(&curproxy->table->peers.name);
+ curproxy->table->peers.p = NULL;
+ }
+ continue;
+ }
+
+ /* The current proxy is referencing a default proxy. We must
+ * finalize its config, but only once. If the default proxy is
+ * ready (PR_FL_READY) it means it was already fully configured.
+ */
+ if (curproxy->defpx) {
+ if (!(curproxy->defpx->flags & PR_FL_READY)) {
+ /* check validity for 'tcp-request' layer 4/5/6/7 rules */
+ cfgerr += check_action_rules(&curproxy->defpx->tcp_req.l4_rules, curproxy->defpx, &err_code);
+ cfgerr += check_action_rules(&curproxy->defpx->tcp_req.l5_rules, curproxy->defpx, &err_code);
+ cfgerr += check_action_rules(&curproxy->defpx->tcp_req.inspect_rules, curproxy->defpx, &err_code);
+ cfgerr += check_action_rules(&curproxy->defpx->tcp_rep.inspect_rules, curproxy->defpx, &err_code);
+ cfgerr += check_action_rules(&curproxy->defpx->http_req_rules, curproxy->defpx, &err_code);
+ cfgerr += check_action_rules(&curproxy->defpx->http_res_rules, curproxy->defpx, &err_code);
+ cfgerr += check_action_rules(&curproxy->defpx->http_after_res_rules, curproxy->defpx, &err_code);
+
+ err = NULL;
+ i = smp_resolve_args(curproxy->defpx, &err);
+ cfgerr += i;
+ if (i) {
+ indent_msg(&err, 8);
+ ha_alert("%s%s\n", i > 1 ? "multiple argument resolution errors:" : "", err);
+ ha_free(&err);
+ }
+ else
+ cfgerr += acl_find_targets(curproxy->defpx);
+
+ /* default proxy is now ready. Set the right FE/BE capabilities */
+ curproxy->defpx->flags |= PR_FL_READY;
+ }
+ }
+
+ /* check and reduce the bind-proc of each listener */
+ list_for_each_entry(bind_conf, &curproxy->conf.bind, by_fe) {
+ int ret;
+
+ /* HTTP frontends with "h2" as ALPN/NPN will work in
+ * HTTP/2 and absolutely require buffers 16kB or larger.
+ */
+#ifdef USE_OPENSSL
+ /* no-alpn ? If so, it's the right moment to remove it */
+ if (bind_conf->ssl_conf.alpn_str && !bind_conf->ssl_conf.alpn_len) {
+ free(bind_conf->ssl_conf.alpn_str);
+ bind_conf->ssl_conf.alpn_str = NULL;
+ }
+#ifdef TLSEXT_TYPE_application_layer_protocol_negotiation
+ else if (!bind_conf->ssl_conf.alpn_str && !bind_conf->ssl_conf.npn_str &&
+ ((bind_conf->options & BC_O_USE_SSL) || bind_conf->xprt == xprt_get(XPRT_QUIC)) &&
+ curproxy->mode == PR_MODE_HTTP && global.tune.bufsize >= 16384) {
+
+ /* Neither ALPN nor NPN were explicitly set nor disabled, we're
+ * in HTTP mode with an SSL or QUIC listener, we can enable ALPN.
+ * Note that it's in binary form.
+ */
+ if (bind_conf->xprt == xprt_get(XPRT_QUIC))
+ bind_conf->ssl_conf.alpn_str = strdup("\002h3");
+ else
+ bind_conf->ssl_conf.alpn_str = strdup("\002h2\010http/1.1");
+
+ if (!bind_conf->ssl_conf.alpn_str) {
+ ha_alert("Proxy '%s': out of memory while trying to allocate a default alpn string in 'bind %s' at [%s:%d].\n",
+ curproxy->id, bind_conf->arg, bind_conf->file, bind_conf->line);
+ cfgerr++;
+ err_code |= ERR_FATAL | ERR_ALERT;
+ goto out;
+ }
+ bind_conf->ssl_conf.alpn_len = strlen(bind_conf->ssl_conf.alpn_str);
+ }
+#endif
+
+ if (curproxy->mode == PR_MODE_HTTP && global.tune.bufsize < 16384) {
+#ifdef OPENSSL_NPN_NEGOTIATED
+ /* check NPN */
+ if (bind_conf->ssl_conf.npn_str && strstr(bind_conf->ssl_conf.npn_str, "\002h2")) {
+ ha_alert("HTTP frontend '%s' enables HTTP/2 via NPN at [%s:%d], so global.tune.bufsize must be at least 16384 bytes (%d now).\n",
+ curproxy->id, bind_conf->file, bind_conf->line, global.tune.bufsize);
+ cfgerr++;
+ }
+#endif
+#ifdef TLSEXT_TYPE_application_layer_protocol_negotiation
+ /* check ALPN */
+ if (bind_conf->ssl_conf.alpn_str && strstr(bind_conf->ssl_conf.alpn_str, "\002h2")) {
+ ha_alert("HTTP frontend '%s' enables HTTP/2 via ALPN at [%s:%d], so global.tune.bufsize must be at least 16384 bytes (%d now).\n",
+ curproxy->id, bind_conf->file, bind_conf->line, global.tune.bufsize);
+ cfgerr++;
+ }
+#endif
+ } /* HTTP && bufsize < 16384 */
+#endif
+
+ /* finish the bind setup */
+ ret = bind_complete_thread_setup(bind_conf, &err_code);
+ if (ret != 0) {
+ cfgerr += ret;
+ if (err_code & ERR_FATAL)
+ goto out;
+ }
+ }
+
+ switch (curproxy->mode) {
+ case PR_MODE_TCP:
+ cfgerr += proxy_cfg_ensure_no_http(curproxy);
+ cfgerr += proxy_cfg_ensure_no_log(curproxy);
+ break;
+
+ case PR_MODE_HTTP:
+ cfgerr += proxy_cfg_ensure_no_log(curproxy);
+ curproxy->http_needed = 1;
+ break;
+
+ case PR_MODE_CLI:
+ cfgerr += proxy_cfg_ensure_no_http(curproxy);
+ cfgerr += proxy_cfg_ensure_no_log(curproxy);
+ break;
+
+ case PR_MODE_SYSLOG:
+ /* this mode is initialized as the classic tcp proxy */
+ cfgerr += proxy_cfg_ensure_no_http(curproxy);
+ break;
+
+ case PR_MODE_PEERS:
+ case PR_MODES:
+ /* should not happen, bug gcc warn missing switch statement */
+ ha_alert("%s '%s' cannot initialize this proxy mode (peers) in this way. NOTE: PLEASE REPORT THIS TO DEVELOPERS AS YOU'RE NOT SUPPOSED TO BE ABLE TO CREATE A CONFIGURATION TRIGGERING THIS!\n",
+ proxy_type_str(curproxy), curproxy->id);
+ cfgerr++;
+ break;
+ }
+
+ if (!(curproxy->cap & PR_CAP_INT) && (curproxy->cap & PR_CAP_FE) && LIST_ISEMPTY(&curproxy->conf.listeners)) {
+ ha_warning("%s '%s' has no 'bind' directive. Please declare it as a backend if this was intended.\n",
+ proxy_type_str(curproxy), curproxy->id);
+ err_code |= ERR_WARN;
+ }
+
+ if (curproxy->cap & PR_CAP_BE) {
+ if (curproxy->lbprm.algo & BE_LB_KIND) {
+ if (curproxy->options & PR_O_TRANSP) {
+ ha_alert("%s '%s' cannot use both transparent and balance mode.\n",
+ proxy_type_str(curproxy), curproxy->id);
+ cfgerr++;
+ }
+#ifdef WE_DONT_SUPPORT_SERVERLESS_LISTENERS
+ else if (curproxy->srv == NULL) {
+ ha_alert("%s '%s' needs at least 1 server in balance mode.\n",
+ proxy_type_str(curproxy), curproxy->id);
+ cfgerr++;
+ }
+#endif
+ else if (curproxy->options & PR_O_DISPATCH) {
+ ha_warning("dispatch address of %s '%s' will be ignored in balance mode.\n",
+ proxy_type_str(curproxy), curproxy->id);
+ err_code |= ERR_WARN;
+ }
+ }
+ else if (!(curproxy->options & (PR_O_TRANSP | PR_O_DISPATCH))) {
+ /* If no LB algo is set in a backend, and we're not in
+ * transparent mode, dispatch mode nor proxy mode, we
+ * want to use balance roundrobin by default.
+ */
+ curproxy->lbprm.algo &= ~BE_LB_ALGO;
+ curproxy->lbprm.algo |= BE_LB_ALGO_RR;
+ }
+ }
+
+ if (curproxy->options & PR_O_DISPATCH)
+ curproxy->options &= ~PR_O_TRANSP;
+ else if (curproxy->options & PR_O_TRANSP)
+ curproxy->options &= ~PR_O_DISPATCH;
+
+ if ((curproxy->tcpcheck_rules.flags & TCPCHK_RULES_UNUSED_HTTP_RS)) {
+ ha_warning("%s '%s' uses http-check rules without 'option httpchk', so the rules are ignored.\n",
+ proxy_type_str(curproxy), curproxy->id);
+ err_code |= ERR_WARN;
+ }
+
+ if ((curproxy->options2 & PR_O2_CHK_ANY) == PR_O2_TCPCHK_CHK &&
+ (curproxy->tcpcheck_rules.flags & TCPCHK_RULES_PROTO_CHK) != TCPCHK_RULES_HTTP_CHK) {
+ if (curproxy->options & PR_O_DISABLE404) {
+ ha_warning("'%s' will be ignored for %s '%s' (requires 'option httpchk').\n",
+ "disable-on-404", proxy_type_str(curproxy), curproxy->id);
+ err_code |= ERR_WARN;
+ curproxy->options &= ~PR_O_DISABLE404;
+ }
+ if (curproxy->options2 & PR_O2_CHK_SNDST) {
+ ha_warning("'%s' will be ignored for %s '%s' (requires 'option httpchk').\n",
+ "send-state", proxy_type_str(curproxy), curproxy->id);
+ err_code |= ERR_WARN;
+ curproxy->options &= ~PR_O2_CHK_SNDST;
+ }
+ }
+
+ if ((curproxy->options2 & PR_O2_CHK_ANY) == PR_O2_EXT_CHK) {
+ if (!global.external_check) {
+ ha_alert("Proxy '%s' : '%s' unable to find required 'global.external-check'.\n",
+ curproxy->id, "option external-check");
+ cfgerr++;
+ }
+ if (!curproxy->check_command) {
+ ha_alert("Proxy '%s' : '%s' unable to find required 'external-check command'.\n",
+ curproxy->id, "option external-check");
+ cfgerr++;
+ }
+ if (!(global.tune.options & GTUNE_INSECURE_FORK)) {
+ ha_warning("Proxy '%s' : 'insecure-fork-wanted' not enabled in the global section, '%s' will likely fail.\n",
+ curproxy->id, "option external-check");
+ err_code |= ERR_WARN;
+ }
+ }
+
+ if (curproxy->email_alert.set) {
+ if (!(curproxy->email_alert.mailers.name && curproxy->email_alert.from && curproxy->email_alert.to)) {
+ ha_warning("'email-alert' will be ignored for %s '%s' (the presence any of "
+ "'email-alert from', 'email-alert level' 'email-alert mailers', "
+ "'email-alert myhostname', or 'email-alert to' "
+ "requires each of 'email-alert from', 'email-alert mailers' and 'email-alert to' "
+ "to be present).\n",
+ proxy_type_str(curproxy), curproxy->id);
+ err_code |= ERR_WARN;
+ free_email_alert(curproxy);
+ }
+ if (!curproxy->email_alert.myhostname)
+ curproxy->email_alert.myhostname = strdup(hostname);
+ }
+
+ if (curproxy->check_command) {
+ int clear = 0;
+ if ((curproxy->options2 & PR_O2_CHK_ANY) != PR_O2_EXT_CHK) {
+ ha_warning("'%s' will be ignored for %s '%s' (requires 'option external-check').\n",
+ "external-check command", proxy_type_str(curproxy), curproxy->id);
+ err_code |= ERR_WARN;
+ clear = 1;
+ }
+ if (curproxy->check_command[0] != '/' && !curproxy->check_path) {
+ ha_alert("Proxy '%s': '%s' does not have a leading '/' and 'external-check path' is not set.\n",
+ curproxy->id, "external-check command");
+ cfgerr++;
+ }
+ if (clear) {
+ ha_free(&curproxy->check_command);
+ }
+ }
+
+ if (curproxy->check_path) {
+ if ((curproxy->options2 & PR_O2_CHK_ANY) != PR_O2_EXT_CHK) {
+ ha_warning("'%s' will be ignored for %s '%s' (requires 'option external-check').\n",
+ "external-check path", proxy_type_str(curproxy), curproxy->id);
+ err_code |= ERR_WARN;
+ ha_free(&curproxy->check_path);
+ }
+ }
+
+ /* if a default backend was specified, let's find it */
+ if (curproxy->defbe.name) {
+ struct proxy *target;
+
+ target = proxy_be_by_name(curproxy->defbe.name);
+ if (!target) {
+ ha_alert("Proxy '%s': unable to find required default_backend: '%s'.\n",
+ curproxy->id, curproxy->defbe.name);
+ cfgerr++;
+ } else if (target == curproxy) {
+ ha_alert("Proxy '%s': loop detected for default_backend: '%s'.\n",
+ curproxy->id, curproxy->defbe.name);
+ cfgerr++;
+ } else if (target->mode != curproxy->mode &&
+ !(curproxy->mode == PR_MODE_TCP && target->mode == PR_MODE_HTTP)) {
+
+ ha_alert("%s %s '%s' (%s:%d) tries to use incompatible %s %s '%s' (%s:%d) as its default backend (see 'mode').\n",
+ proxy_mode_str(curproxy->mode), proxy_type_str(curproxy), curproxy->id,
+ curproxy->conf.file, curproxy->conf.line,
+ proxy_mode_str(target->mode), proxy_type_str(target), target->id,
+ target->conf.file, target->conf.line);
+ cfgerr++;
+ } else {
+ free(curproxy->defbe.name);
+ curproxy->defbe.be = target;
+ /* Emit a warning if this proxy also has some servers */
+ if (curproxy->srv) {
+ ha_warning("In proxy '%s', the 'default_backend' rule always has precedence over the servers, which will never be used.\n",
+ curproxy->id);
+ err_code |= ERR_WARN;
+ }
+ }
+ }
+
+ /* find the target proxy for 'use_backend' rules */
+ list_for_each_entry(rule, &curproxy->switching_rules, list) {
+ struct proxy *target;
+ struct logformat_node *node;
+ char *pxname;
+
+ /* Try to parse the string as a log format expression. If the result
+ * of the parsing is only one entry containing a simple string, then
+ * it's a standard string corresponding to a static rule, thus the
+ * parsing is cancelled and be.name is restored to be resolved.
+ */
+ pxname = rule->be.name;
+ LIST_INIT(&rule->be.expr);
+ curproxy->conf.args.ctx = ARGC_UBK;
+ curproxy->conf.args.file = rule->file;
+ curproxy->conf.args.line = rule->line;
+ err = NULL;
+ if (!parse_logformat_string(pxname, curproxy, &rule->be.expr, 0, SMP_VAL_FE_HRQ_HDR, &err)) {
+ ha_alert("Parsing [%s:%d]: failed to parse use_backend rule '%s' : %s.\n",
+ rule->file, rule->line, pxname, err);
+ free(err);
+ cfgerr++;
+ continue;
+ }
+ node = LIST_NEXT(&rule->be.expr, struct logformat_node *, list);
+
+ if (!LIST_ISEMPTY(&rule->be.expr)) {
+ if (node->type != LOG_FMT_TEXT || node->list.n != &rule->be.expr) {
+ rule->dynamic = 1;
+ free(pxname);
+ continue;
+ }
+ /* Only one element in the list, a simple string: free the expression and
+ * fall back to static rule
+ */
+ LIST_DELETE(&node->list);
+ free(node->arg);
+ free(node);
+ }
+
+ rule->dynamic = 0;
+ rule->be.name = pxname;
+
+ target = proxy_be_by_name(rule->be.name);
+ if (!target) {
+ ha_alert("Proxy '%s': unable to find required use_backend: '%s'.\n",
+ curproxy->id, rule->be.name);
+ cfgerr++;
+ } else if (target == curproxy) {
+ ha_alert("Proxy '%s': loop detected for use_backend: '%s'.\n",
+ curproxy->id, rule->be.name);
+ cfgerr++;
+ } else if (target->mode != curproxy->mode &&
+ !(curproxy->mode == PR_MODE_TCP && target->mode == PR_MODE_HTTP)) {
+
+ ha_alert("%s %s '%s' (%s:%d) tries to use incompatible %s %s '%s' (%s:%d) in a 'use_backend' rule (see 'mode').\n",
+ proxy_mode_str(curproxy->mode), proxy_type_str(curproxy), curproxy->id,
+ curproxy->conf.file, curproxy->conf.line,
+ proxy_mode_str(target->mode), proxy_type_str(target), target->id,
+ target->conf.file, target->conf.line);
+ cfgerr++;
+ } else {
+ ha_free(&rule->be.name);
+ rule->be.backend = target;
+ }
+ err_code |= warnif_tcp_http_cond(curproxy, rule->cond);
+ }
+
+ /* find the target server for 'use_server' rules */
+ list_for_each_entry(srule, &curproxy->server_rules, list) {
+ struct server *target;
+ struct logformat_node *node;
+ char *server_name;
+
+ /* We try to parse the string as a log format expression. If the result of the parsing
+ * is only one entry containing a single string, then it's a standard string corresponding
+ * to a static rule, thus the parsing is cancelled and we fall back to setting srv.ptr.
+ */
+ server_name = srule->srv.name;
+ LIST_INIT(&srule->expr);
+ curproxy->conf.args.ctx = ARGC_USRV;
+ err = NULL;
+ if (!parse_logformat_string(server_name, curproxy, &srule->expr, 0, SMP_VAL_FE_HRQ_HDR, &err)) {
+ ha_alert("Parsing [%s:%d]; use-server rule failed to parse log-format '%s' : %s.\n",
+ srule->file, srule->line, server_name, err);
+ free(err);
+ cfgerr++;
+ continue;
+ }
+ node = LIST_NEXT(&srule->expr, struct logformat_node *, list);
+
+ if (!LIST_ISEMPTY(&srule->expr)) {
+ if (node->type != LOG_FMT_TEXT || node->list.n != &srule->expr) {
+ srule->dynamic = 1;
+ free(server_name);
+ continue;
+ }
+ /* Only one element in the list, a simple string: free the expression and
+ * fall back to static rule
+ */
+ LIST_DELETE(&node->list);
+ free(node->arg);
+ free(node);
+ }
+
+ srule->dynamic = 0;
+ srule->srv.name = server_name;
+ target = findserver(curproxy, srule->srv.name);
+ err_code |= warnif_tcp_http_cond(curproxy, srule->cond);
+
+ if (!target) {
+ ha_alert("%s '%s' : unable to find server '%s' referenced in a 'use-server' rule.\n",
+ proxy_type_str(curproxy), curproxy->id, srule->srv.name);
+ cfgerr++;
+ continue;
+ }
+ ha_free(&srule->srv.name);
+ srule->srv.ptr = target;
+ target->flags |= SRV_F_NON_PURGEABLE;
+ }
+
+ /* find the target table for 'stick' rules */
+ list_for_each_entry(mrule, &curproxy->sticking_rules, list) {
+ curproxy->be_req_ana |= AN_REQ_STICKING_RULES;
+ if (mrule->flags & STK_IS_STORE)
+ curproxy->be_rsp_ana |= AN_RES_STORE_RULES;
+
+ if (!resolve_stick_rule(curproxy, mrule))
+ cfgerr++;
+
+ err_code |= warnif_tcp_http_cond(curproxy, mrule->cond);
+ }
+
+ /* find the target table for 'store response' rules */
+ list_for_each_entry(mrule, &curproxy->storersp_rules, list) {
+ curproxy->be_rsp_ana |= AN_RES_STORE_RULES;
+
+ if (!resolve_stick_rule(curproxy, mrule))
+ cfgerr++;
+ }
+
+ /* check validity for 'tcp-request' layer 4/5/6/7 rules */
+ cfgerr += check_action_rules(&curproxy->tcp_req.l4_rules, curproxy, &err_code);
+ cfgerr += check_action_rules(&curproxy->tcp_req.l5_rules, curproxy, &err_code);
+ cfgerr += check_action_rules(&curproxy->tcp_req.inspect_rules, curproxy, &err_code);
+ cfgerr += check_action_rules(&curproxy->tcp_rep.inspect_rules, curproxy, &err_code);
+ cfgerr += check_action_rules(&curproxy->http_req_rules, curproxy, &err_code);
+ cfgerr += check_action_rules(&curproxy->http_res_rules, curproxy, &err_code);
+ cfgerr += check_action_rules(&curproxy->http_after_res_rules, curproxy, &err_code);
+
+ /* Warn is a switch-mode http is used on a TCP listener with servers but no backend */
+ if (!curproxy->defbe.name && LIST_ISEMPTY(&curproxy->switching_rules) && curproxy->srv) {
+ if ((curproxy->options & PR_O_HTTP_UPG) && curproxy->mode == PR_MODE_TCP)
+ ha_warning("Proxy '%s' : 'switch-mode http' configured for a %s %s with no backend. "
+ "Incoming connections upgraded to HTTP cannot be routed to TCP servers\n",
+ curproxy->id, proxy_mode_str(curproxy->mode), proxy_type_str(curproxy));
+ }
+
+ if (curproxy->table && curproxy->table->peers.name) {
+ struct peers *curpeers;
+
+ for (curpeers = cfg_peers; curpeers; curpeers = curpeers->next) {
+ if (strcmp(curpeers->id, curproxy->table->peers.name) == 0) {
+ ha_free(&curproxy->table->peers.name);
+ curproxy->table->peers.p = curpeers;
+ break;
+ }
+ }
+
+ if (!curpeers) {
+ ha_alert("Proxy '%s': unable to find sync peers '%s'.\n",
+ curproxy->id, curproxy->table->peers.name);
+ ha_free(&curproxy->table->peers.name);
+ curproxy->table->peers.p = NULL;
+ cfgerr++;
+ }
+ else if (curpeers->disabled) {
+ /* silently disable this peers section */
+ curproxy->table->peers.p = NULL;
+ }
+ else if (!curpeers->peers_fe) {
+ ha_alert("Proxy '%s': unable to find local peer '%s' in peers section '%s'.\n",
+ curproxy->id, localpeer, curpeers->id);
+ curproxy->table->peers.p = NULL;
+ cfgerr++;
+ }
+ }
+
+
+ if (curproxy->email_alert.mailers.name) {
+ struct mailers *curmailers = mailers;
+
+ for (curmailers = mailers; curmailers; curmailers = curmailers->next) {
+ if (strcmp(curmailers->id, curproxy->email_alert.mailers.name) == 0)
+ break;
+ }
+ if (!curmailers) {
+ ha_alert("Proxy '%s': unable to find mailers '%s'.\n",
+ curproxy->id, curproxy->email_alert.mailers.name);
+ free_email_alert(curproxy);
+ cfgerr++;
+ }
+ else {
+ err = NULL;
+ if (init_email_alert(curmailers, curproxy, &err)) {
+ ha_alert("Proxy '%s': %s.\n", curproxy->id, err);
+ free(err);
+ cfgerr++;
+ }
+ }
+ }
+
+ if (curproxy->uri_auth && !(curproxy->uri_auth->flags & STAT_CONVDONE) &&
+ !LIST_ISEMPTY(&curproxy->uri_auth->http_req_rules) &&
+ (curproxy->uri_auth->userlist || curproxy->uri_auth->auth_realm )) {
+ ha_alert("%s '%s': stats 'auth'/'realm' and 'http-request' can't be used at the same time.\n",
+ "proxy", curproxy->id);
+ cfgerr++;
+ goto out_uri_auth_compat;
+ }
+
+ if (curproxy->uri_auth && curproxy->uri_auth->userlist &&
+ (!(curproxy->uri_auth->flags & STAT_CONVDONE) ||
+ LIST_ISEMPTY(&curproxy->uri_auth->http_req_rules))) {
+ const char *uri_auth_compat_req[10];
+ struct act_rule *rule;
+ i = 0;
+
+ /* build the ACL condition from scratch. We're relying on anonymous ACLs for that */
+ uri_auth_compat_req[i++] = "auth";
+
+ if (curproxy->uri_auth->auth_realm) {
+ uri_auth_compat_req[i++] = "realm";
+ uri_auth_compat_req[i++] = curproxy->uri_auth->auth_realm;
+ }
+
+ uri_auth_compat_req[i++] = "unless";
+ uri_auth_compat_req[i++] = "{";
+ uri_auth_compat_req[i++] = "http_auth(.internal-stats-userlist)";
+ uri_auth_compat_req[i++] = "}";
+ uri_auth_compat_req[i++] = "";
+
+ rule = parse_http_req_cond(uri_auth_compat_req, "internal-stats-auth-compat", 0, curproxy);
+ if (!rule) {
+ cfgerr++;
+ break;
+ }
+
+ LIST_APPEND(&curproxy->uri_auth->http_req_rules, &rule->list);
+
+ if (curproxy->uri_auth->auth_realm) {
+ ha_free(&curproxy->uri_auth->auth_realm);
+ }
+ curproxy->uri_auth->flags |= STAT_CONVDONE;
+ }
+out_uri_auth_compat:
+
+ /* check whether we have a logger that uses RFC5424 log format */
+ list_for_each_entry(tmplogger, &curproxy->loggers, list) {
+ if (tmplogger->format == LOG_FORMAT_RFC5424) {
+ if (!curproxy->conf.logformat_sd_string) {
+ /* set the default logformat_sd_string */
+ curproxy->conf.logformat_sd_string = default_rfc5424_sd_log_format;
+ }
+ break;
+ }
+ }
+
+ /* compile the log format */
+ if (!(curproxy->cap & PR_CAP_FE)) {
+ if (curproxy->conf.logformat_string != default_http_log_format &&
+ curproxy->conf.logformat_string != default_tcp_log_format &&
+ curproxy->conf.logformat_string != clf_http_log_format)
+ free(curproxy->conf.logformat_string);
+ curproxy->conf.logformat_string = NULL;
+ ha_free(&curproxy->conf.lfs_file);
+ curproxy->conf.lfs_line = 0;
+
+ if (curproxy->conf.logformat_sd_string != default_rfc5424_sd_log_format)
+ free(curproxy->conf.logformat_sd_string);
+ curproxy->conf.logformat_sd_string = NULL;
+ ha_free(&curproxy->conf.lfsd_file);
+ curproxy->conf.lfsd_line = 0;
+ }
+
+ if (curproxy->conf.logformat_string) {
+ curproxy->conf.args.ctx = ARGC_LOG;
+ curproxy->conf.args.file = curproxy->conf.lfs_file;
+ curproxy->conf.args.line = curproxy->conf.lfs_line;
+ err = NULL;
+ if (!parse_logformat_string(curproxy->conf.logformat_string, curproxy, &curproxy->logformat,
+ LOG_OPT_MANDATORY|LOG_OPT_MERGE_SPACES,
+ SMP_VAL_FE_LOG_END, &err)) {
+ ha_alert("Parsing [%s:%d]: failed to parse log-format : %s.\n",
+ curproxy->conf.lfs_file, curproxy->conf.lfs_line, err);
+ free(err);
+ cfgerr++;
+ }
+ curproxy->conf.args.file = NULL;
+ curproxy->conf.args.line = 0;
+ }
+
+ if (curproxy->conf.logformat_sd_string) {
+ curproxy->conf.args.ctx = ARGC_LOGSD;
+ curproxy->conf.args.file = curproxy->conf.lfsd_file;
+ curproxy->conf.args.line = curproxy->conf.lfsd_line;
+ err = NULL;
+ if (!parse_logformat_string(curproxy->conf.logformat_sd_string, curproxy, &curproxy->logformat_sd,
+ LOG_OPT_MANDATORY|LOG_OPT_MERGE_SPACES,
+ SMP_VAL_FE_LOG_END, &err)) {
+ ha_alert("Parsing [%s:%d]: failed to parse log-format-sd : %s.\n",
+ curproxy->conf.lfs_file, curproxy->conf.lfs_line, err);
+ free(err);
+ cfgerr++;
+ } else if (!add_to_logformat_list(NULL, NULL, LF_SEPARATOR, &curproxy->logformat_sd, &err)) {
+ ha_alert("Parsing [%s:%d]: failed to parse log-format-sd : %s.\n",
+ curproxy->conf.lfs_file, curproxy->conf.lfs_line, err);
+ free(err);
+ cfgerr++;
+ }
+ curproxy->conf.args.file = NULL;
+ curproxy->conf.args.line = 0;
+ }
+
+ if (curproxy->conf.uniqueid_format_string) {
+ int where = 0;
+
+ curproxy->conf.args.ctx = ARGC_UIF;
+ curproxy->conf.args.file = curproxy->conf.uif_file;
+ curproxy->conf.args.line = curproxy->conf.uif_line;
+ err = NULL;
+ if (curproxy->cap & PR_CAP_FE)
+ where |= SMP_VAL_FE_HRQ_HDR;
+ if (curproxy->cap & PR_CAP_BE)
+ where |= SMP_VAL_BE_HRQ_HDR;
+ if (!parse_logformat_string(curproxy->conf.uniqueid_format_string, curproxy, &curproxy->format_unique_id,
+ LOG_OPT_HTTP|LOG_OPT_MERGE_SPACES, where, &err)) {
+ ha_alert("Parsing [%s:%d]: failed to parse unique-id : %s.\n",
+ curproxy->conf.uif_file, curproxy->conf.uif_line, err);
+ free(err);
+ cfgerr++;
+ }
+ curproxy->conf.args.file = NULL;
+ curproxy->conf.args.line = 0;
+ }
+
+ if (curproxy->conf.error_logformat_string) {
+ curproxy->conf.args.ctx = ARGC_LOG;
+ curproxy->conf.args.file = curproxy->conf.elfs_file;
+ curproxy->conf.args.line = curproxy->conf.elfs_line;
+ err = NULL;
+ if (!parse_logformat_string(curproxy->conf.error_logformat_string, curproxy, &curproxy->logformat_error,
+ LOG_OPT_MANDATORY|LOG_OPT_MERGE_SPACES,
+ SMP_VAL_FE_LOG_END, &err)) {
+ ha_alert("Parsing [%s:%d]: failed to parse error-log-format : %s.\n",
+ curproxy->conf.elfs_file, curproxy->conf.elfs_line, err);
+ free(err);
+ cfgerr++;
+ }
+ curproxy->conf.args.file = NULL;
+ curproxy->conf.args.line = 0;
+ }
+
+ /* "balance hash" needs to compile its expression
+ * (log backends will handle this in proxy log postcheck)
+ */
+ if (curproxy->mode != PR_MODE_SYSLOG &&
+ (curproxy->lbprm.algo & BE_LB_ALGO) == BE_LB_ALGO_SMP) {
+ int idx = 0;
+ const char *args[] = {
+ curproxy->lbprm.arg_str,
+ NULL,
+ };
+
+ err = NULL;
+ curproxy->conf.args.ctx = ARGC_USRV; // same context as use_server.
+ curproxy->lbprm.expr =
+ sample_parse_expr((char **)args, &idx,
+ curproxy->conf.file, curproxy->conf.line,
+ &err, &curproxy->conf.args, NULL);
+
+ if (!curproxy->lbprm.expr) {
+ ha_alert("%s '%s' [%s:%d]: failed to parse 'balance hash' expression '%s' in : %s.\n",
+ proxy_type_str(curproxy), curproxy->id,
+ curproxy->conf.file, curproxy->conf.line,
+ curproxy->lbprm.arg_str, err);
+ ha_free(&err);
+ cfgerr++;
+ }
+ else if (!(curproxy->lbprm.expr->fetch->val & SMP_VAL_BE_SET_SRV)) {
+ ha_alert("%s '%s' [%s:%d]: error detected while parsing 'balance hash' expression '%s' "
+ "which requires information from %s, which is not available here.\n",
+ proxy_type_str(curproxy), curproxy->id,
+ curproxy->conf.file, curproxy->conf.line,
+ curproxy->lbprm.arg_str, sample_src_names(curproxy->lbprm.expr->fetch->use));
+ cfgerr++;
+ }
+ else if (curproxy->mode == PR_MODE_HTTP && (curproxy->lbprm.expr->fetch->use & SMP_USE_L6REQ)) {
+ ha_warning("%s '%s' [%s:%d]: L6 sample fetch <%s> will be ignored in 'balance hash' expression in HTTP mode.\n",
+ proxy_type_str(curproxy), curproxy->id,
+ curproxy->conf.file, curproxy->conf.line,
+ curproxy->lbprm.arg_str);
+ }
+ else
+ curproxy->http_needed |= !!(curproxy->lbprm.expr->fetch->use & SMP_USE_HTTP_ANY);
+ }
+
+ /* only now we can check if some args remain unresolved.
+ * This must be done after the users and groups resolution.
+ */
+ err = NULL;
+ i = smp_resolve_args(curproxy, &err);
+ cfgerr += i;
+ if (i) {
+ indent_msg(&err, 8);
+ ha_alert("%s%s\n", i > 1 ? "multiple argument resolution errors:" : "", err);
+ ha_free(&err);
+ } else
+ cfgerr += acl_find_targets(curproxy);
+
+ if (!(curproxy->cap & PR_CAP_INT) && (curproxy->mode == PR_MODE_TCP || curproxy->mode == PR_MODE_HTTP) &&
+ (((curproxy->cap & PR_CAP_FE) && !curproxy->timeout.client) ||
+ ((curproxy->cap & PR_CAP_BE) && (curproxy->srv) &&
+ (!curproxy->timeout.connect ||
+ (!curproxy->timeout.server && (curproxy->mode == PR_MODE_HTTP || !curproxy->timeout.tunnel)))))) {
+ ha_warning("missing timeouts for %s '%s'.\n"
+ " | While not properly invalid, you will certainly encounter various problems\n"
+ " | with such a configuration. To fix this, please ensure that all following\n"
+ " | timeouts are set to a non-zero value: 'client', 'connect', 'server'.\n",
+ proxy_type_str(curproxy), curproxy->id);
+ err_code |= ERR_WARN;
+ }
+
+ /* Historically, the tarpit and queue timeouts were inherited from contimeout.
+ * We must still support older configurations, so let's find out whether those
+ * parameters have been set or must be copied from contimeouts.
+ */
+ if (!curproxy->timeout.tarpit)
+ curproxy->timeout.tarpit = curproxy->timeout.connect;
+ if ((curproxy->cap & PR_CAP_BE) && !curproxy->timeout.queue)
+ curproxy->timeout.queue = curproxy->timeout.connect;
+
+ if ((curproxy->tcpcheck_rules.flags & TCPCHK_RULES_UNUSED_TCP_RS)) {
+ ha_warning("%s '%s' uses tcp-check rules without 'option tcp-check', so the rules are ignored.\n",
+ proxy_type_str(curproxy), curproxy->id);
+ err_code |= ERR_WARN;
+ }
+
+ /* ensure that cookie capture length is not too large */
+ if (curproxy->capture_len >= global.tune.cookie_len) {
+ ha_warning("truncating capture length to %d bytes for %s '%s'.\n",
+ global.tune.cookie_len - 1, proxy_type_str(curproxy), curproxy->id);
+ err_code |= ERR_WARN;
+ curproxy->capture_len = global.tune.cookie_len - 1;
+ }
+
+ /* The small pools required for the capture lists */
+ if (curproxy->nb_req_cap) {
+ curproxy->req_cap_pool = create_pool("ptrcap",
+ curproxy->nb_req_cap * sizeof(char *),
+ MEM_F_SHARED);
+ }
+
+ if (curproxy->nb_rsp_cap) {
+ curproxy->rsp_cap_pool = create_pool("ptrcap",
+ curproxy->nb_rsp_cap * sizeof(char *),
+ MEM_F_SHARED);
+ }
+
+ switch (curproxy->load_server_state_from_file) {
+ case PR_SRV_STATE_FILE_UNSPEC:
+ curproxy->load_server_state_from_file = PR_SRV_STATE_FILE_NONE;
+ break;
+ case PR_SRV_STATE_FILE_GLOBAL:
+ if (!global.server_state_file) {
+ ha_warning("backend '%s' configured to load server state file from global section 'server-state-file' directive. Unfortunately, 'server-state-file' is not set!\n",
+ curproxy->id);
+ err_code |= ERR_WARN;
+ }
+ break;
+ }
+
+ /* first, we will invert the servers list order */
+ newsrv = NULL;
+ while (curproxy->srv) {
+ struct server *next;
+
+ next = curproxy->srv->next;
+ curproxy->srv->next = newsrv;
+ newsrv = curproxy->srv;
+ if (!next)
+ break;
+ curproxy->srv = next;
+ }
+
+ /* Check that no server name conflicts. This causes trouble in the stats.
+ * We only emit a warning for the first conflict affecting each server,
+ * in order to avoid combinatory explosion if all servers have the same
+ * name. We do that only for servers which do not have an explicit ID,
+ * because these IDs were made also for distinguishing them and we don't
+ * want to annoy people who correctly manage them.
+ */
+ for (newsrv = curproxy->srv; newsrv; newsrv = newsrv->next) {
+ struct server *other_srv;
+
+ if (newsrv->puid)
+ continue;
+
+ for (other_srv = curproxy->srv; other_srv && other_srv != newsrv; other_srv = other_srv->next) {
+ if (!other_srv->puid && strcmp(other_srv->id, newsrv->id) == 0) {
+ ha_alert("parsing [%s:%d] : %s '%s', another server named '%s' was already defined at line %d, please use distinct names.\n",
+ newsrv->conf.file, newsrv->conf.line,
+ proxy_type_str(curproxy), curproxy->id,
+ newsrv->id, other_srv->conf.line);
+ cfgerr++;
+ break;
+ }
+ }
+ }
+
+ /* assign automatic UIDs to servers which don't have one yet */
+ next_id = 1;
+ newsrv = curproxy->srv;
+ while (newsrv != NULL) {
+ if (!newsrv->puid) {
+ /* server ID not set, use automatic numbering with first
+ * spare entry starting with next_svid.
+ */
+ next_id = get_next_id(&curproxy->conf.used_server_id, next_id);
+ newsrv->conf.id.key = newsrv->puid = next_id;
+ eb32_insert(&curproxy->conf.used_server_id, &newsrv->conf.id);
+ }
+ newsrv->conf.name.key = newsrv->id;
+ ebis_insert(&curproxy->conf.used_server_name, &newsrv->conf.name);
+
+ next_id++;
+ newsrv = newsrv->next;
+ }
+
+ curproxy->lbprm.wmult = 1; /* default weight multiplier */
+ curproxy->lbprm.wdiv = 1; /* default weight divider */
+
+ /*
+ * If this server supports a maxconn parameter, it needs a dedicated
+ * tasks to fill the emptied slots when a connection leaves.
+ * Also, resolve deferred tracking dependency if needed.
+ */
+ newsrv = curproxy->srv;
+ while (newsrv != NULL) {
+ set_usermsgs_ctx(newsrv->conf.file, newsrv->conf.line, &newsrv->obj_type);
+
+ srv_minmax_conn_apply(newsrv);
+
+ /* this will also properly set the transport layer for
+ * prod and checks
+ * if default-server have use_ssl, prerare ssl init
+ * without activating it */
+ if (newsrv->use_ssl == 1 || newsrv->check.use_ssl == 1 ||
+ (newsrv->proxy->options & PR_O_TCPCHK_SSL) ||
+ ((newsrv->flags & SRV_F_DEFSRV_USE_SSL) && newsrv->use_ssl != 1)) {
+ if (xprt_get(XPRT_SSL) && xprt_get(XPRT_SSL)->prepare_srv)
+ cfgerr += xprt_get(XPRT_SSL)->prepare_srv(newsrv);
+ }
+
+ if ((newsrv->flags & SRV_F_FASTOPEN) &&
+ ((curproxy->retry_type & (PR_RE_DISCONNECTED | PR_RE_TIMEOUT)) !=
+ (PR_RE_DISCONNECTED | PR_RE_TIMEOUT)))
+ ha_warning("server has tfo activated, the backend should be configured with at least 'conn-failure', 'empty-response' and 'response-timeout' or we wouldn't be able to retry the connection on failure.\n");
+
+ if (newsrv->trackit) {
+ if (srv_apply_track(newsrv, curproxy)) {
+ ++cfgerr;
+ goto next_srv;
+ }
+ }
+
+ next_srv:
+ reset_usermsgs_ctx();
+ newsrv = newsrv->next;
+ }
+
+ /*
+ * Try to generate dynamic cookies for servers now.
+ * It couldn't be done earlier, since at the time we parsed
+ * the server line, we may not have known yet that we
+ * should use dynamic cookies, or the secret key may not
+ * have been provided yet.
+ */
+ if (curproxy->ck_opts & PR_CK_DYNAMIC) {
+ newsrv = curproxy->srv;
+ while (newsrv != NULL) {
+ srv_set_dyncookie(newsrv);
+ newsrv = newsrv->next;
+ }
+
+ }
+ /* We have to initialize the server lookup mechanism depending
+ * on what LB algorithm was chosen.
+ */
+
+ if (curproxy->mode == PR_MODE_SYSLOG) {
+ /* log load-balancing requires special init that is performed
+ * during log-postparsing step
+ */
+ goto skip_server_lb_init;
+ }
+ curproxy->lbprm.algo &= ~(BE_LB_LKUP | BE_LB_PROP_DYN);
+ switch (curproxy->lbprm.algo & BE_LB_KIND) {
+ case BE_LB_KIND_RR:
+ if ((curproxy->lbprm.algo & BE_LB_PARM) == BE_LB_RR_STATIC) {
+ curproxy->lbprm.algo |= BE_LB_LKUP_MAP;
+ init_server_map(curproxy);
+ } else if ((curproxy->lbprm.algo & BE_LB_PARM) == BE_LB_RR_RANDOM) {
+ curproxy->lbprm.algo |= BE_LB_LKUP_CHTREE | BE_LB_PROP_DYN;
+ if (chash_init_server_tree(curproxy) < 0) {
+ cfgerr++;
+ }
+ } else {
+ curproxy->lbprm.algo |= BE_LB_LKUP_RRTREE | BE_LB_PROP_DYN;
+ fwrr_init_server_groups(curproxy);
+ }
+ break;
+
+ case BE_LB_KIND_CB:
+ if ((curproxy->lbprm.algo & BE_LB_PARM) == BE_LB_CB_LC) {
+ curproxy->lbprm.algo |= BE_LB_LKUP_LCTREE | BE_LB_PROP_DYN;
+ fwlc_init_server_tree(curproxy);
+ } else {
+ curproxy->lbprm.algo |= BE_LB_LKUP_FSTREE | BE_LB_PROP_DYN;
+ fas_init_server_tree(curproxy);
+ }
+ break;
+
+ case BE_LB_KIND_HI:
+ if ((curproxy->lbprm.algo & BE_LB_HASH_TYPE) == BE_LB_HASH_CONS) {
+ curproxy->lbprm.algo |= BE_LB_LKUP_CHTREE | BE_LB_PROP_DYN;
+ if (chash_init_server_tree(curproxy) < 0) {
+ cfgerr++;
+ }
+ } else {
+ curproxy->lbprm.algo |= BE_LB_LKUP_MAP;
+ init_server_map(curproxy);
+ }
+ break;
+ }
+ skip_server_lb_init:
+ HA_RWLOCK_INIT(&curproxy->lbprm.lock);
+
+ if (curproxy->options & PR_O_LOGASAP)
+ curproxy->to_log &= ~LW_BYTES;
+
+ if (!(curproxy->cap & PR_CAP_INT) && (curproxy->mode == PR_MODE_TCP || curproxy->mode == PR_MODE_HTTP) &&
+ (curproxy->cap & PR_CAP_FE) && LIST_ISEMPTY(&curproxy->loggers) &&
+ (!LIST_ISEMPTY(&curproxy->logformat) || !LIST_ISEMPTY(&curproxy->logformat_sd))) {
+ ha_warning("log format ignored for %s '%s' since it has no log address.\n",
+ proxy_type_str(curproxy), curproxy->id);
+ err_code |= ERR_WARN;
+ }
+
+ if (curproxy->mode != PR_MODE_HTTP && !(curproxy->options & PR_O_HTTP_UPG)) {
+ int optnum;
+
+ if (curproxy->uri_auth) {
+ ha_warning("'stats' statement ignored for %s '%s' as it requires HTTP mode.\n",
+ proxy_type_str(curproxy), curproxy->id);
+ err_code |= ERR_WARN;
+ curproxy->uri_auth = NULL;
+ }
+
+ if (curproxy->capture_name) {
+ ha_warning("'capture' statement ignored for %s '%s' as it requires HTTP mode.\n",
+ proxy_type_str(curproxy), curproxy->id);
+ err_code |= ERR_WARN;
+ }
+
+ if (!LIST_ISEMPTY(&curproxy->http_req_rules)) {
+ ha_warning("'http-request' rules ignored for %s '%s' as they require HTTP mode.\n",
+ proxy_type_str(curproxy), curproxy->id);
+ err_code |= ERR_WARN;
+ }
+
+ if (!LIST_ISEMPTY(&curproxy->http_res_rules)) {
+ ha_warning("'http-response' rules ignored for %s '%s' as they require HTTP mode.\n",
+ proxy_type_str(curproxy), curproxy->id);
+ err_code |= ERR_WARN;
+ }
+
+ if (!LIST_ISEMPTY(&curproxy->http_after_res_rules)) {
+ ha_warning("'http-after-response' rules ignored for %s '%s' as they require HTTP mode.\n",
+ proxy_type_str(curproxy), curproxy->id);
+ err_code |= ERR_WARN;
+ }
+
+ if (!LIST_ISEMPTY(&curproxy->redirect_rules)) {
+ ha_warning("'redirect' rules ignored for %s '%s' as they require HTTP mode.\n",
+ proxy_type_str(curproxy), curproxy->id);
+ err_code |= ERR_WARN;
+ }
+
+ for (optnum = 0; cfg_opts[optnum].name; optnum++) {
+ if (cfg_opts[optnum].mode == PR_MODE_HTTP &&
+ (curproxy->cap & cfg_opts[optnum].cap) &&
+ (curproxy->options & cfg_opts[optnum].val)) {
+ ha_warning("'option %s' ignored for %s '%s' as it requires HTTP mode.\n",
+ cfg_opts[optnum].name, proxy_type_str(curproxy), curproxy->id);
+ err_code |= ERR_WARN;
+ curproxy->options &= ~cfg_opts[optnum].val;
+ }
+ }
+
+ for (optnum = 0; cfg_opts2[optnum].name; optnum++) {
+ if (cfg_opts2[optnum].mode == PR_MODE_HTTP &&
+ (curproxy->cap & cfg_opts2[optnum].cap) &&
+ (curproxy->options2 & cfg_opts2[optnum].val)) {
+ ha_warning("'option %s' ignored for %s '%s' as it requires HTTP mode.\n",
+ cfg_opts2[optnum].name, proxy_type_str(curproxy), curproxy->id);
+ err_code |= ERR_WARN;
+ curproxy->options2 &= ~cfg_opts2[optnum].val;
+ }
+ }
+
+#if defined(CONFIG_HAP_TRANSPARENT)
+ if (curproxy->conn_src.bind_hdr_occ) {
+ curproxy->conn_src.bind_hdr_occ = 0;
+ ha_warning("%s '%s' : ignoring use of header %s as source IP in non-HTTP mode.\n",
+ proxy_type_str(curproxy), curproxy->id, curproxy->conn_src.bind_hdr_name);
+ err_code |= ERR_WARN;
+ }
+#endif
+ }
+
+ /*
+ * ensure that we're not cross-dressing a TCP server into HTTP.
+ */
+ newsrv = curproxy->srv;
+ while (newsrv != NULL) {
+ if ((curproxy->mode != PR_MODE_HTTP) && newsrv->rdr_len) {
+ ha_alert("%s '%s' : server cannot have cookie or redirect prefix in non-HTTP mode.\n",
+ proxy_type_str(curproxy), curproxy->id);
+ cfgerr++;
+ }
+
+ if ((curproxy->mode != PR_MODE_HTTP) && newsrv->cklen) {
+ ha_warning("%s '%s' : ignoring cookie for server '%s' as HTTP mode is disabled.\n",
+ proxy_type_str(curproxy), curproxy->id, newsrv->id);
+ err_code |= ERR_WARN;
+ }
+
+ if ((newsrv->flags & SRV_F_MAPPORTS) && (curproxy->options2 & PR_O2_RDPC_PRST)) {
+ ha_warning("%s '%s' : RDP cookie persistence will not work for server '%s' because it lacks an explicit port number.\n",
+ proxy_type_str(curproxy), curproxy->id, newsrv->id);
+ err_code |= ERR_WARN;
+ }
+
+#if defined(CONFIG_HAP_TRANSPARENT)
+ if (curproxy->mode != PR_MODE_HTTP && newsrv->conn_src.bind_hdr_occ) {
+ newsrv->conn_src.bind_hdr_occ = 0;
+ ha_warning("%s '%s' : server %s cannot use header %s as source IP in non-HTTP mode.\n",
+ proxy_type_str(curproxy), curproxy->id, newsrv->id, newsrv->conn_src.bind_hdr_name);
+ err_code |= ERR_WARN;
+ }
+#endif
+
+ if ((curproxy->mode != PR_MODE_HTTP) && (curproxy->options & PR_O_REUSE_MASK) != PR_O_REUSE_NEVR)
+ curproxy->options &= ~PR_O_REUSE_MASK;
+
+ if ((curproxy->mode != PR_MODE_HTTP) && newsrv->flags & SRV_F_RHTTP) {
+ ha_alert("%s '%s' : server %s uses reverse HTTP addressing which can only be used with HTTP mode.\n",
+ proxy_type_str(curproxy), curproxy->id, newsrv->id);
+ cfgerr++;
+ err_code |= ERR_FATAL | ERR_ALERT;
+ goto out;
+ }
+
+ newsrv = newsrv->next;
+ }
+
+ /* Check filter configuration, if any */
+ cfgerr += flt_check(curproxy);
+
+ if (curproxy->cap & PR_CAP_FE) {
+ if (!curproxy->accept)
+ curproxy->accept = frontend_accept;
+
+ if (!LIST_ISEMPTY(&curproxy->tcp_req.inspect_rules) ||
+ (curproxy->defpx && !LIST_ISEMPTY(&curproxy->defpx->tcp_req.inspect_rules)))
+ curproxy->fe_req_ana |= AN_REQ_INSPECT_FE;
+
+ if (curproxy->mode == PR_MODE_HTTP) {
+ curproxy->fe_req_ana |= AN_REQ_WAIT_HTTP | AN_REQ_HTTP_PROCESS_FE;
+ curproxy->fe_rsp_ana |= AN_RES_WAIT_HTTP | AN_RES_HTTP_PROCESS_FE;
+ }
+
+ if (curproxy->mode == PR_MODE_CLI) {
+ curproxy->fe_req_ana |= AN_REQ_WAIT_CLI;
+ curproxy->fe_rsp_ana |= AN_RES_WAIT_CLI;
+ }
+
+ /* both TCP and HTTP must check switching rules */
+ curproxy->fe_req_ana |= AN_REQ_SWITCHING_RULES;
+
+ /* Add filters analyzers if needed */
+ if (!LIST_ISEMPTY(&curproxy->filter_configs)) {
+ curproxy->fe_req_ana |= AN_REQ_FLT_START_FE | AN_REQ_FLT_XFER_DATA | AN_REQ_FLT_END;
+ curproxy->fe_rsp_ana |= AN_RES_FLT_START_FE | AN_RES_FLT_XFER_DATA | AN_RES_FLT_END;
+ }
+ }
+
+ if (curproxy->cap & PR_CAP_BE) {
+ if (!LIST_ISEMPTY(&curproxy->tcp_req.inspect_rules) ||
+ (curproxy->defpx && !LIST_ISEMPTY(&curproxy->defpx->tcp_req.inspect_rules)))
+ curproxy->be_req_ana |= AN_REQ_INSPECT_BE;
+
+ if (!LIST_ISEMPTY(&curproxy->tcp_rep.inspect_rules) ||
+ (curproxy->defpx && !LIST_ISEMPTY(&curproxy->defpx->tcp_rep.inspect_rules)))
+ curproxy->be_rsp_ana |= AN_RES_INSPECT;
+
+ if (curproxy->mode == PR_MODE_HTTP) {
+ curproxy->be_req_ana |= AN_REQ_WAIT_HTTP | AN_REQ_HTTP_INNER | AN_REQ_HTTP_PROCESS_BE;
+ curproxy->be_rsp_ana |= AN_RES_WAIT_HTTP | AN_RES_HTTP_PROCESS_BE;
+ }
+
+ /* If the backend does requires RDP cookie persistence, we have to
+ * enable the corresponding analyser.
+ */
+ if (curproxy->options2 & PR_O2_RDPC_PRST)
+ curproxy->be_req_ana |= AN_REQ_PRST_RDP_COOKIE;
+
+ /* Add filters analyzers if needed */
+ if (!LIST_ISEMPTY(&curproxy->filter_configs)) {
+ curproxy->be_req_ana |= AN_REQ_FLT_START_BE | AN_REQ_FLT_XFER_DATA | AN_REQ_FLT_END;
+ curproxy->be_rsp_ana |= AN_RES_FLT_START_BE | AN_RES_FLT_XFER_DATA | AN_RES_FLT_END;
+ }
+ }
+
+ /* Check the mux protocols, if any, for each listener and server
+ * attached to the current proxy */
+ list_for_each_entry(bind_conf, &curproxy->conf.bind, by_fe) {
+ int mode = conn_pr_mode_to_proto_mode(curproxy->mode);
+ const struct mux_proto_list *mux_ent;
+
+ if (!bind_conf->mux_proto) {
+ /* No protocol was specified. If we're using QUIC at the transport
+ * layer, we'll instantiate it as a mux as well. If QUIC is not
+ * compiled in, this will remain NULL.
+ */
+ if (bind_conf->xprt && bind_conf->xprt == xprt_get(XPRT_QUIC))
+ bind_conf->mux_proto = get_mux_proto(ist("quic"));
+ }
+
+ if (!bind_conf->mux_proto)
+ continue;
+
+ /* it is possible that an incorrect mux was referenced
+ * due to the proxy's mode not being taken into account
+ * on first pass. Let's adjust it now.
+ */
+ mux_ent = conn_get_best_mux_entry(bind_conf->mux_proto->token, PROTO_SIDE_FE, mode);
+
+ if (!mux_ent || !isteq(mux_ent->token, bind_conf->mux_proto->token)) {
+ ha_alert("%s '%s' : MUX protocol '%.*s' is not usable for 'bind %s' at [%s:%d].\n",
+ proxy_type_str(curproxy), curproxy->id,
+ (int)bind_conf->mux_proto->token.len,
+ bind_conf->mux_proto->token.ptr,
+ bind_conf->arg, bind_conf->file, bind_conf->line);
+ cfgerr++;
+ } else {
+ if ((mux_ent->mux->flags & MX_FL_FRAMED) && !(bind_conf->options & BC_O_USE_SOCK_DGRAM)) {
+ ha_alert("%s '%s' : frame-based MUX protocol '%.*s' is incompatible with stream transport of 'bind %s' at [%s:%d].\n",
+ proxy_type_str(curproxy), curproxy->id,
+ (int)bind_conf->mux_proto->token.len,
+ bind_conf->mux_proto->token.ptr,
+ bind_conf->arg, bind_conf->file, bind_conf->line);
+ cfgerr++;
+ }
+ else if (!(mux_ent->mux->flags & MX_FL_FRAMED) && !(bind_conf->options & BC_O_USE_SOCK_STREAM)) {
+ ha_alert("%s '%s' : stream-based MUX protocol '%.*s' is incompatible with framed transport of 'bind %s' at [%s:%d].\n",
+ proxy_type_str(curproxy), curproxy->id,
+ (int)bind_conf->mux_proto->token.len,
+ bind_conf->mux_proto->token.ptr,
+ bind_conf->arg, bind_conf->file, bind_conf->line);
+ cfgerr++;
+ }
+ }
+
+ /* update the mux */
+ bind_conf->mux_proto = mux_ent;
+ }
+ for (newsrv = curproxy->srv; newsrv; newsrv = newsrv->next) {
+ int mode = conn_pr_mode_to_proto_mode(curproxy->mode);
+ const struct mux_proto_list *mux_ent;
+
+ if (!newsrv->mux_proto)
+ continue;
+
+ /* it is possible that an incorrect mux was referenced
+ * due to the proxy's mode not being taken into account
+ * on first pass. Let's adjust it now.
+ */
+ mux_ent = conn_get_best_mux_entry(newsrv->mux_proto->token, PROTO_SIDE_BE, mode);
+
+ if (!mux_ent || !isteq(mux_ent->token, newsrv->mux_proto->token)) {
+ ha_alert("%s '%s' : MUX protocol '%.*s' is not usable for server '%s' at [%s:%d].\n",
+ proxy_type_str(curproxy), curproxy->id,
+ (int)newsrv->mux_proto->token.len,
+ newsrv->mux_proto->token.ptr,
+ newsrv->id, newsrv->conf.file, newsrv->conf.line);
+ cfgerr++;
+ }
+
+ /* update the mux */
+ newsrv->mux_proto = mux_ent;
+ }
+
+ /* Allocate default tcp-check rules for proxies without
+ * explicit rules.
+ */
+ if (curproxy->cap & PR_CAP_BE) {
+ if (!(curproxy->options2 & PR_O2_CHK_ANY)) {
+ struct tcpcheck_ruleset *rs = NULL;
+ struct tcpcheck_rules *rules = &curproxy->tcpcheck_rules;
+
+ curproxy->options2 |= PR_O2_TCPCHK_CHK;
+
+ rs = find_tcpcheck_ruleset("*tcp-check");
+ if (!rs) {
+ rs = create_tcpcheck_ruleset("*tcp-check");
+ if (rs == NULL) {
+ ha_alert("config: %s '%s': out of memory.\n",
+ proxy_type_str(curproxy), curproxy->id);
+ cfgerr++;
+ }
+ }
+
+ free_tcpcheck_vars(&rules->preset_vars);
+ rules->list = &rs->rules;
+ rules->flags = 0;
+ }
+ }
+ }
+
+ /*
+ * We have just initialized the main proxies list
+ * we must also configure the log-forward proxies list
+ */
+ if (init_proxies_list == proxies_list) {
+ init_proxies_list = cfg_log_forward;
+ /* check if list is not null to avoid infinite loop */
+ if (init_proxies_list)
+ goto init_proxies_list_stage1;
+ }
+
+ if (init_proxies_list == cfg_log_forward) {
+ init_proxies_list = sink_proxies_list;
+ /* check if list is not null to avoid infinite loop */
+ if (init_proxies_list)
+ goto init_proxies_list_stage1;
+ }
+
+ /***********************************************************/
+ /* At this point, target names have already been resolved. */
+ /***********************************************************/
+
+ /* we must finish to initialize certain things on the servers */
+
+ list_for_each_entry(newsrv, &servers_list, global_list) {
+ /* initialize idle conns lists */
+ if (srv_init_per_thr(newsrv) == -1) {
+ ha_alert("parsing [%s:%d] : failed to allocate per-thread lists for server '%s'.\n",
+ newsrv->conf.file, newsrv->conf.line, newsrv->id);
+ cfgerr++;
+ continue;
+ }
+
+ if (newsrv->max_idle_conns != 0) {
+ newsrv->curr_idle_thr = calloc(global.nbthread, sizeof(*newsrv->curr_idle_thr));
+ if (!newsrv->curr_idle_thr) {
+ ha_alert("parsing [%s:%d] : failed to allocate idle connection tasks for server '%s'.\n",
+ newsrv->conf.file, newsrv->conf.line, newsrv->id);
+ cfgerr++;
+ continue;
+ }
+
+ }
+ }
+
+ idle_conn_task = task_new_anywhere();
+ if (!idle_conn_task) {
+ ha_alert("parsing : failed to allocate global idle connection task.\n");
+ cfgerr++;
+ }
+ else {
+ idle_conn_task->process = srv_cleanup_idle_conns;
+ idle_conn_task->context = NULL;
+
+ for (i = 0; i < global.nbthread; i++) {
+ idle_conns[i].cleanup_task = task_new_on(i);
+ if (!idle_conns[i].cleanup_task) {
+ ha_alert("parsing : failed to allocate idle connection tasks for thread '%d'.\n", i);
+ cfgerr++;
+ break;
+ }
+
+ idle_conns[i].cleanup_task->process = srv_cleanup_toremove_conns;
+ idle_conns[i].cleanup_task->context = NULL;
+ HA_SPIN_INIT(&idle_conns[i].idle_conns_lock);
+ MT_LIST_INIT(&idle_conns[i].toremove_conns);
+ }
+ }
+
+ /* perform the final checks before creating tasks */
+
+ /* starting to initialize the main proxies list */
+ init_proxies_list = proxies_list;
+
+init_proxies_list_stage2:
+ for (curproxy = init_proxies_list; curproxy; curproxy = curproxy->next) {
+ struct listener *listener;
+ unsigned int next_id;
+
+ /* Configure SSL for each bind line.
+ * Note: if configuration fails at some point, the ->ctx member
+ * remains NULL so that listeners can later detach.
+ */
+ list_for_each_entry(bind_conf, &curproxy->conf.bind, by_fe) {
+ if (bind_conf->xprt->prepare_bind_conf &&
+ bind_conf->xprt->prepare_bind_conf(bind_conf) < 0)
+ cfgerr++;
+ bind_conf->analysers |= curproxy->fe_req_ana;
+ if (!bind_conf->maxaccept)
+ bind_conf->maxaccept = global.tune.maxaccept ? global.tune.maxaccept : MAX_ACCEPT;
+ bind_conf->accept = session_accept_fd;
+ if (curproxy->options & PR_O_TCP_NOLING)
+ bind_conf->options |= BC_O_NOLINGER;
+
+ /* smart accept mode is automatic in HTTP mode */
+ if ((curproxy->options2 & PR_O2_SMARTACC) ||
+ ((curproxy->mode == PR_MODE_HTTP || (bind_conf->options & BC_O_USE_SSL)) &&
+ !(curproxy->no_options2 & PR_O2_SMARTACC)))
+ bind_conf->options |= BC_O_NOQUICKACK;
+ }
+
+ /* adjust this proxy's listeners */
+ bind_conf = NULL;
+ next_id = 1;
+ list_for_each_entry(listener, &curproxy->conf.listeners, by_fe) {
+ if (!listener->luid) {
+ /* listener ID not set, use automatic numbering with first
+ * spare entry starting with next_luid.
+ */
+ next_id = get_next_id(&curproxy->conf.used_listener_id, next_id);
+ listener->conf.id.key = listener->luid = next_id;
+ eb32_insert(&curproxy->conf.used_listener_id, &listener->conf.id);
+ }
+ next_id++;
+
+ /* enable separate counters */
+ if (curproxy->options2 & PR_O2_SOCKSTAT) {
+ listener->counters = calloc(1, sizeof(*listener->counters));
+ if (!listener->name)
+ memprintf(&listener->name, "sock-%d", listener->luid);
+ }
+
+#ifdef USE_QUIC
+ if (listener->bind_conf->xprt == xprt_get(XPRT_QUIC)) {
+ /* quic_conn are counted against maxconn. */
+ listener->bind_conf->options |= BC_O_XPRT_MAXCONN;
+ listener->rx.quic_curr_handshake = 0;
+ listener->rx.quic_curr_accept = 0;
+
+# ifdef USE_QUIC_OPENSSL_COMPAT
+ /* store the last checked bind_conf in bind_conf */
+ if (!(global.tune.options & GTUNE_NO_QUIC) &&
+ !(global.tune.options & GTUNE_LIMITED_QUIC) &&
+ listener->bind_conf != bind_conf) {
+ bind_conf = listener->bind_conf;
+ ha_alert("Binding [%s:%d] for %s %s: this SSL library does not support the "
+ "QUIC protocol. A limited compatibility layer may be enabled using "
+ "the \"limited-quic\" global option if desired.\n",
+ listener->bind_conf->file, listener->bind_conf->line,
+ proxy_type_str(curproxy), curproxy->id);
+ cfgerr++;
+ }
+# endif
+
+ li_init_per_thr(listener);
+ }
+#endif
+ }
+
+ /* Release unused SSL configs */
+ list_for_each_entry(bind_conf, &curproxy->conf.bind, by_fe) {
+ if (!(bind_conf->options & BC_O_USE_SSL) && bind_conf->xprt->destroy_bind_conf)
+ bind_conf->xprt->destroy_bind_conf(bind_conf);
+ }
+
+ /* create the task associated with the proxy */
+ curproxy->task = task_new_anywhere();
+ if (curproxy->task) {
+ curproxy->task->context = curproxy;
+ curproxy->task->process = manage_proxy;
+ curproxy->flags |= PR_FL_READY;
+ } else {
+ ha_alert("Proxy '%s': no more memory when trying to allocate the management task\n",
+ curproxy->id);
+ cfgerr++;
+ }
+ }
+
+ /*
+ * We have just initialized the main proxies list
+ * we must also configure the log-forward proxies list
+ */
+ if (init_proxies_list == proxies_list) {
+ init_proxies_list = cfg_log_forward;
+ /* check if list is not null to avoid infinite loop */
+ if (init_proxies_list)
+ goto init_proxies_list_stage2;
+ }
+
+ /*
+ * Recount currently required checks.
+ */
+
+ for (curproxy=proxies_list; curproxy; curproxy=curproxy->next) {
+ int optnum;
+
+ for (optnum = 0; cfg_opts[optnum].name; optnum++)
+ if (curproxy->options & cfg_opts[optnum].val)
+ global.last_checks |= cfg_opts[optnum].checks;
+
+ for (optnum = 0; cfg_opts2[optnum].name; optnum++)
+ if (curproxy->options2 & cfg_opts2[optnum].val)
+ global.last_checks |= cfg_opts2[optnum].checks;
+ }
+
+ if (cfg_peers) {
+ struct peers *curpeers = cfg_peers, **last;
+ struct peer *p, *pb;
+
+ /* Remove all peers sections which don't have a valid listener,
+ * which are not used by any table, or which are bound to more
+ * than one process.
+ */
+ last = &cfg_peers;
+ while (*last) {
+ struct peer *peer;
+ struct stktable *t;
+ curpeers = *last;
+
+ if (curpeers->disabled) {
+ /* the "disabled" keyword was present */
+ if (curpeers->peers_fe)
+ stop_proxy(curpeers->peers_fe);
+ curpeers->peers_fe = NULL;
+ }
+ else if (!curpeers->peers_fe || !curpeers->peers_fe->id) {
+ ha_warning("Removing incomplete section 'peers %s' (no peer named '%s').\n",
+ curpeers->id, localpeer);
+ if (curpeers->peers_fe)
+ stop_proxy(curpeers->peers_fe);
+ curpeers->peers_fe = NULL;
+ }
+ else {
+ /* Initializes the transport layer of the server part of all the peers belonging to
+ * <curpeers> section if required.
+ * Note that ->srv is used by the local peer of a new process to connect to the local peer
+ * of an old process.
+ */
+ curpeers->peers_fe->flags |= PR_FL_READY;
+ p = curpeers->remote;
+ while (p) {
+ struct peer *other_peer;
+
+ for (other_peer = curpeers->remote; other_peer && other_peer != p; other_peer = other_peer->next) {
+ if (strcmp(other_peer->id, p->id) == 0) {
+ ha_alert("Peer section '%s' [%s:%d]: another peer named '%s' was already defined at line %s:%d, please use distinct names.\n",
+ curpeers->peers_fe->id,
+ p->conf.file, p->conf.line,
+ other_peer->id, other_peer->conf.file, other_peer->conf.line);
+ cfgerr++;
+ break;
+ }
+ }
+
+ if (p->srv) {
+ if (p->srv->use_ssl == 1 && xprt_get(XPRT_SSL) && xprt_get(XPRT_SSL)->prepare_srv)
+ cfgerr += xprt_get(XPRT_SSL)->prepare_srv(p->srv);
+ }
+ p = p->next;
+ }
+ /* Configure the SSL bindings of the local peer if required. */
+ if (!LIST_ISEMPTY(&curpeers->peers_fe->conf.bind)) {
+ struct list *l;
+ struct bind_conf *bind_conf;
+ int ret;
+
+ l = &curpeers->peers_fe->conf.bind;
+ bind_conf = LIST_ELEM(l->n, typeof(bind_conf), by_fe);
+
+ if (curpeers->local->srv) {
+ if (curpeers->local->srv->use_ssl == 1 && !(bind_conf->options & BC_O_USE_SSL)) {
+ ha_warning("Peers section '%s': local peer have a non-SSL listener and a SSL server configured at line %s:%d.\n",
+ curpeers->peers_fe->id, curpeers->local->conf.file, curpeers->local->conf.line);
+ }
+ else if (curpeers->local->srv->use_ssl != 1 && (bind_conf->options & BC_O_USE_SSL)) {
+ ha_warning("Peers section '%s': local peer have a SSL listener and a non-SSL server configured at line %s:%d.\n",
+ curpeers->peers_fe->id, curpeers->local->conf.file, curpeers->local->conf.line);
+ }
+ }
+
+ /* finish the bind setup */
+ ret = bind_complete_thread_setup(bind_conf, &err_code);
+ if (ret != 0) {
+ cfgerr += ret;
+ if (err_code & ERR_FATAL)
+ goto out;
+ }
+
+ if (bind_conf->xprt->prepare_bind_conf &&
+ bind_conf->xprt->prepare_bind_conf(bind_conf) < 0)
+ cfgerr++;
+ }
+ if (!peers_init_sync(curpeers) || !peers_alloc_dcache(curpeers)) {
+ ha_alert("Peers section '%s': out of memory, giving up on peers.\n",
+ curpeers->id);
+ cfgerr++;
+ break;
+ }
+ last = &curpeers->next;
+
+ /* Ignore the peer shard greater than the number of peer shard for this section.
+ * Also ignore the peer shard of the local peer.
+ */
+ for (peer = curpeers->remote; peer; peer = peer->next) {
+ if (peer == curpeers->local) {
+ if (peer->srv->shard) {
+ ha_warning("Peers section '%s': shard ignored for '%s' local peer\n",
+ curpeers->id, peer->id);
+ peer->srv->shard = 0;
+ }
+ }
+ else if (peer->srv->shard > curpeers->nb_shards) {
+ ha_warning("Peers section '%s': shard ignored for '%s' local peer because "
+ "%d shard value is greater than the section number of shards (%d)\n",
+ curpeers->id, peer->id, peer->srv->shard, curpeers->nb_shards);
+ peer->srv->shard = 0;
+ }
+ }
+
+ continue;
+ }
+
+ /* clean what has been detected above */
+ p = curpeers->remote;
+ while (p) {
+ pb = p->next;
+ free(p->id);
+ free(p);
+ p = pb;
+ }
+
+ /* Destroy and unlink this curpeers section.
+ * Note: curpeers is backed up into *last.
+ */
+ free(curpeers->id);
+ curpeers = curpeers->next;
+ /* Reset any refereance to this peers section in the list of stick-tables */
+ for (t = stktables_list; t; t = t->next) {
+ if (t->peers.p && t->peers.p == *last)
+ t->peers.p = NULL;
+ }
+ free(*last);
+ *last = curpeers;
+ }
+ }
+
+ for (t = stktables_list; t; t = t->next) {
+ if (t->proxy)
+ continue;
+ err = NULL;
+ if (!stktable_init(t, &err)) {
+ ha_alert("Parsing [%s:%d]: failed to initialize '%s' stick-table: %s.\n", t->conf.file, t->conf.line, t->id, err);
+ ha_free(&err);
+ cfgerr++;
+ }
+ }
+
+ /* initialize stick-tables on backend capable proxies. This must not
+ * be done earlier because the data size may be discovered while parsing
+ * other proxies.
+ */
+ for (curproxy = proxies_list; curproxy; curproxy = curproxy->next) {
+ if ((curproxy->flags & PR_FL_DISABLED) || !curproxy->table)
+ continue;
+
+ err = NULL;
+ if (!stktable_init(curproxy->table, &err)) {
+ ha_alert("Proxy '%s': failed to initialize stick-table: %s.\n", curproxy->id, err);
+ ha_free(&err);
+ cfgerr++;
+ }
+ }
+
+ if (mailers) {
+ struct mailers *curmailers = mailers, **last;
+ struct mailer *m, *mb;
+
+ /* Remove all mailers sections which don't have a valid listener.
+ * This can happen when a mailers section is never referenced.
+ */
+ last = &mailers;
+ while (*last) {
+ curmailers = *last;
+ if (curmailers->users) {
+ last = &curmailers->next;
+ continue;
+ }
+
+ ha_warning("Removing incomplete section 'mailers %s'.\n",
+ curmailers->id);
+
+ m = curmailers->mailer_list;
+ while (m) {
+ mb = m->next;
+ free(m->id);
+ free(m);
+ m = mb;
+ }
+
+ /* Destroy and unlink this curmailers section.
+ * Note: curmailers is backed up into *last.
+ */
+ free(curmailers->id);
+ curmailers = curmailers->next;
+ free(*last);
+ *last = curmailers;
+ }
+ }
+
+ /* Update server_state_file_name to backend name if backend is supposed to use
+ * a server-state file locally defined and none has been provided */
+ for (curproxy = proxies_list; curproxy; curproxy = curproxy->next) {
+ if (curproxy->load_server_state_from_file == PR_SRV_STATE_FILE_LOCAL &&
+ curproxy->server_state_file_name == NULL)
+ curproxy->server_state_file_name = strdup(curproxy->id);
+ }
+
+ list_for_each_entry(curr_resolvers, &sec_resolvers, list) {
+ if (LIST_ISEMPTY(&curr_resolvers->nameservers)) {
+ ha_warning("resolvers '%s' [%s:%d] has no nameservers configured!\n",
+ curr_resolvers->id, curr_resolvers->conf.file,
+ curr_resolvers->conf.line);
+ err_code |= ERR_WARN;
+ }
+ }
+
+ list_for_each_entry(postparser, &postparsers, list) {
+ if (postparser->func)
+ cfgerr += postparser->func();
+ }
+
+ if (cfgerr > 0)
+ err_code |= ERR_ALERT | ERR_FATAL;
+ out:
+ return err_code;
+}
+
+/*
+ * Registers the CFG keyword list <kwl> as a list of valid keywords for next
+ * parsing sessions.
+ */
+void cfg_register_keywords(struct cfg_kw_list *kwl)
+{
+ LIST_APPEND(&cfg_keywords.list, &kwl->list);
+}
+
+/*
+ * Unregisters the CFG keyword list <kwl> from the list of valid keywords.
+ */
+void cfg_unregister_keywords(struct cfg_kw_list *kwl)
+{
+ LIST_DELETE(&kwl->list);
+ LIST_INIT(&kwl->list);
+}
+
+/* this function register new section in the haproxy configuration file.
+ * <section_name> is the name of this new section and <section_parser>
+ * is the called parser. If two section declaration have the same name,
+ * only the first declared is used.
+ */
+int cfg_register_section(char *section_name,
+ int (*section_parser)(const char *, int, char **, int),
+ int (*post_section_parser)())
+{
+ struct cfg_section *cs;
+
+ list_for_each_entry(cs, &sections, list) {
+ if (strcmp(cs->section_name, section_name) == 0) {
+ ha_alert("register section '%s': already registered.\n", section_name);
+ return 0;
+ }
+ }
+
+ cs = calloc(1, sizeof(*cs));
+ if (!cs) {
+ ha_alert("register section '%s': out of memory.\n", section_name);
+ return 0;
+ }
+
+ cs->section_name = section_name;
+ cs->section_parser = section_parser;
+ cs->post_section_parser = post_section_parser;
+
+ LIST_APPEND(&sections, &cs->list);
+
+ return 1;
+}
+
+/* this function register a new function which will be called once the haproxy
+ * configuration file has been parsed. It's useful to check dependencies
+ * between sections or to resolve items once everything is parsed.
+ */
+int cfg_register_postparser(char *name, int (*func)())
+{
+ struct cfg_postparser *cp;
+
+ cp = calloc(1, sizeof(*cp));
+ if (!cp) {
+ ha_alert("register postparser '%s': out of memory.\n", name);
+ return 0;
+ }
+ cp->name = name;
+ cp->func = func;
+
+ LIST_APPEND(&postparsers, &cp->list);
+
+ return 1;
+}
+
+/*
+ * free all config section entries
+ */
+void cfg_unregister_sections(void)
+{
+ struct cfg_section *cs, *ics;
+
+ list_for_each_entry_safe(cs, ics, &sections, list) {
+ LIST_DELETE(&cs->list);
+ free(cs);
+ }
+}
+
+void cfg_backup_sections(struct list *backup_sections)
+{
+ struct cfg_section *cs, *ics;
+
+ list_for_each_entry_safe(cs, ics, &sections, list) {
+ LIST_DELETE(&cs->list);
+ LIST_APPEND(backup_sections, &cs->list);
+ }
+}
+
+void cfg_restore_sections(struct list *backup_sections)
+{
+ struct cfg_section *cs, *ics;
+
+ list_for_each_entry_safe(cs, ics, backup_sections, list) {
+ LIST_DELETE(&cs->list);
+ LIST_APPEND(&sections, &cs->list);
+ }
+}
+
+/* dumps all registered keywords by section on stdout */
+void cfg_dump_registered_keywords()
+{
+ /* CFG_GLOBAL, CFG_LISTEN, CFG_USERLIST, CFG_PEERS, CFG_CRTLIST */
+ const char* sect_names[] = { "", "global", "listen", "userlist", "peers", "crt-list", 0 };
+ int section;
+ int index;
+
+ for (section = 1; sect_names[section]; section++) {
+ struct cfg_kw_list *kwl;
+ const struct cfg_keyword *kwp, *kwn;
+
+ printf("%s\n", sect_names[section]);
+
+ for (kwn = kwp = NULL;; kwp = kwn) {
+ list_for_each_entry(kwl, &cfg_keywords.list, list) {
+ for (index = 0; kwl->kw[index].kw != NULL; index++)
+ if (kwl->kw[index].section == section &&
+ strordered(kwp ? kwp->kw : NULL, kwl->kw[index].kw, kwn != kwp ? kwn->kw : NULL))
+ kwn = &kwl->kw[index];
+ }
+ if (kwn == kwp)
+ break;
+ printf("\t%s\n", kwn->kw);
+ }
+
+ if (section == CFG_LISTEN) {
+ /* there are plenty of other keywords there */
+ extern struct list tcp_req_conn_keywords, tcp_req_sess_keywords,
+ tcp_req_cont_keywords, tcp_res_cont_keywords;
+ extern struct bind_kw_list bind_keywords;
+ extern struct srv_kw_list srv_keywords;
+ struct bind_kw_list *bkwl;
+ struct srv_kw_list *skwl;
+ const struct bind_kw *bkwp, *bkwn;
+ const struct srv_kw *skwp, *skwn;
+ const struct cfg_opt *coptp, *coptn;
+
+ /* display the non-ssl keywords */
+ for (bkwn = bkwp = NULL;; bkwp = bkwn) {
+ list_for_each_entry(bkwl, &bind_keywords.list, list) {
+ if (strcmp(bkwl->scope, "SSL") == 0) /* skip SSL keywords */
+ continue;
+ for (index = 0; bkwl->kw[index].kw != NULL; index++) {
+ if (strordered(bkwp ? bkwp->kw : NULL,
+ bkwl->kw[index].kw,
+ bkwn != bkwp ? bkwn->kw : NULL))
+ bkwn = &bkwl->kw[index];
+ }
+ }
+ if (bkwn == bkwp)
+ break;
+
+ if (!bkwn->skip)
+ printf("\tbind <addr> %s\n", bkwn->kw);
+ else
+ printf("\tbind <addr> %s +%d\n", bkwn->kw, bkwn->skip);
+ }
+#if defined(USE_OPENSSL)
+ /* displays the "ssl" keywords */
+ for (bkwn = bkwp = NULL;; bkwp = bkwn) {
+ list_for_each_entry(bkwl, &bind_keywords.list, list) {
+ if (strcmp(bkwl->scope, "SSL") != 0) /* skip non-SSL keywords */
+ continue;
+ for (index = 0; bkwl->kw[index].kw != NULL; index++) {
+ if (strordered(bkwp ? bkwp->kw : NULL,
+ bkwl->kw[index].kw,
+ bkwn != bkwp ? bkwn->kw : NULL))
+ bkwn = &bkwl->kw[index];
+ }
+ }
+ if (bkwn == bkwp)
+ break;
+
+ if (strcmp(bkwn->kw, "ssl") == 0) /* skip "bind <addr> ssl ssl" */
+ continue;
+
+ if (!bkwn->skip)
+ printf("\tbind <addr> ssl %s\n", bkwn->kw);
+ else
+ printf("\tbind <addr> ssl %s +%d\n", bkwn->kw, bkwn->skip);
+ }
+#endif
+ for (skwn = skwp = NULL;; skwp = skwn) {
+ list_for_each_entry(skwl, &srv_keywords.list, list) {
+ for (index = 0; skwl->kw[index].kw != NULL; index++)
+ if (strordered(skwp ? skwp->kw : NULL,
+ skwl->kw[index].kw,
+ skwn != skwp ? skwn->kw : NULL))
+ skwn = &skwl->kw[index];
+ }
+ if (skwn == skwp)
+ break;
+
+ if (!skwn->skip)
+ printf("\tserver <name> <addr> %s\n", skwn->kw);
+ else
+ printf("\tserver <name> <addr> %s +%d\n", skwn->kw, skwn->skip);
+ }
+
+ for (coptn = coptp = NULL;; coptp = coptn) {
+ for (index = 0; cfg_opts[index].name; index++)
+ if (strordered(coptp ? coptp->name : NULL,
+ cfg_opts[index].name,
+ coptn != coptp ? coptn->name : NULL))
+ coptn = &cfg_opts[index];
+
+ for (index = 0; cfg_opts2[index].name; index++)
+ if (strordered(coptp ? coptp->name : NULL,
+ cfg_opts2[index].name,
+ coptn != coptp ? coptn->name : NULL))
+ coptn = &cfg_opts2[index];
+ if (coptn == coptp)
+ break;
+
+ printf("\toption %s [ ", coptn->name);
+ if (coptn->cap & PR_CAP_FE)
+ printf("FE ");
+ if (coptn->cap & PR_CAP_BE)
+ printf("BE ");
+ if (coptn->mode == PR_MODE_HTTP)
+ printf("HTTP ");
+ printf("]\n");
+ }
+
+ dump_act_rules(&tcp_req_conn_keywords, "\ttcp-request connection ");
+ dump_act_rules(&tcp_req_sess_keywords, "\ttcp-request session ");
+ dump_act_rules(&tcp_req_cont_keywords, "\ttcp-request content ");
+ dump_act_rules(&tcp_res_cont_keywords, "\ttcp-response content ");
+ dump_act_rules(&http_req_keywords.list, "\thttp-request ");
+ dump_act_rules(&http_res_keywords.list, "\thttp-response ");
+ dump_act_rules(&http_after_res_keywords.list, "\thttp-after-response ");
+ }
+ if (section == CFG_PEERS) {
+ struct peers_kw_list *pkwl;
+ const struct peers_keyword *pkwp, *pkwn;
+ for (pkwn = pkwp = NULL;; pkwp = pkwn) {
+ list_for_each_entry(pkwl, &peers_keywords.list, list) {
+ for (index = 0; pkwl->kw[index].kw != NULL; index++) {
+ if (strordered(pkwp ? pkwp->kw : NULL,
+ pkwl->kw[index].kw,
+ pkwn != pkwp ? pkwn->kw : NULL))
+ pkwn = &pkwl->kw[index];
+ }
+ }
+ if (pkwn == pkwp)
+ break;
+ printf("\t%s\n", pkwn->kw);
+ }
+ }
+ if (section == CFG_CRTLIST) {
+ /* displays the keyword available for the crt-lists */
+ extern struct ssl_crtlist_kw ssl_crtlist_kws[] __maybe_unused;
+ const struct ssl_crtlist_kw *sbkwp __maybe_unused, *sbkwn __maybe_unused;
+
+#if defined(USE_OPENSSL)
+ for (sbkwn = sbkwp = NULL;; sbkwp = sbkwn) {
+ for (index = 0; ssl_crtlist_kws[index].kw != NULL; index++) {
+ if (strordered(sbkwp ? sbkwp->kw : NULL,
+ ssl_crtlist_kws[index].kw,
+ sbkwn != sbkwp ? sbkwn->kw : NULL))
+ sbkwn = &ssl_crtlist_kws[index];
+ }
+ if (sbkwn == sbkwp)
+ break;
+ if (!sbkwn->skip)
+ printf("\t%s\n", sbkwn->kw);
+ else
+ printf("\t%s +%d\n", sbkwn->kw, sbkwn->skip);
+ }
+#endif
+
+ }
+ }
+}
+
+/* these are the config sections handled by default */
+REGISTER_CONFIG_SECTION("listen", cfg_parse_listen, NULL);
+REGISTER_CONFIG_SECTION("frontend", cfg_parse_listen, NULL);
+REGISTER_CONFIG_SECTION("backend", cfg_parse_listen, NULL);
+REGISTER_CONFIG_SECTION("defaults", cfg_parse_listen, NULL);
+REGISTER_CONFIG_SECTION("global", cfg_parse_global, NULL);
+REGISTER_CONFIG_SECTION("userlist", cfg_parse_users, NULL);
+REGISTER_CONFIG_SECTION("peers", cfg_parse_peers, NULL);
+REGISTER_CONFIG_SECTION("mailers", cfg_parse_mailers, NULL);
+REGISTER_CONFIG_SECTION("namespace_list", cfg_parse_netns, NULL);
+
+static struct cfg_kw_list cfg_kws = {{ },{
+ { CFG_GLOBAL, "default-path", cfg_parse_global_def_path },
+ { /* END */ }
+}};
+
+INITCALL1(STG_REGISTER, cfg_register_keywords, &cfg_kws);
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/src/channel.c b/src/channel.c
new file mode 100644
index 0000000..0b6389d
--- /dev/null
+++ b/src/channel.c
@@ -0,0 +1,591 @@
+/*
+ * Channel management functions.
+ *
+ * Copyright 2000-2014 Willy Tarreau <w@1wt.eu>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <ctype.h>
+#include <stdarg.h>
+#include <stdio.h>
+#include <string.h>
+
+#include <haproxy/api.h>
+#include <haproxy/buf.h>
+#include <haproxy/channel.h>
+
+
+/* Schedule up to <bytes> more bytes to be forwarded via the channel without
+ * notifying the owner task. Any data pending in the buffer are scheduled to be
+ * sent as well, within the limit of the number of bytes to forward. This must
+ * be the only method to use to schedule bytes to be forwarded. If the requested
+ * number is too large, it is automatically adjusted. The number of bytes taken
+ * into account is returned. Directly touching ->to_forward will cause lockups
+ * when buf->o goes down to zero if nobody is ready to push the remaining data.
+ */
+unsigned long long __channel_forward(struct channel *chn, unsigned long long bytes)
+{
+ unsigned int budget;
+ unsigned int forwarded;
+
+ /* This is more of a safety measure as it's not supposed to happen in
+ * regular code paths.
+ */
+ if (unlikely(chn->to_forward == CHN_INFINITE_FORWARD)) {
+ c_adv(chn, ci_data(chn));
+ return bytes;
+ }
+
+ /* Bound the transferred size to a 32-bit count since all our values
+ * are 32-bit, and we don't want to reach CHN_INFINITE_FORWARD.
+ */
+ budget = MIN(bytes, CHN_INFINITE_FORWARD - 1);
+
+ /* transfer as much as we can of buf->i */
+ forwarded = MIN(ci_data(chn), budget);
+ c_adv(chn, forwarded);
+ budget -= forwarded;
+
+ if (!budget)
+ return forwarded;
+
+ /* Now we must ensure chn->to_forward sats below CHN_INFINITE_FORWARD,
+ * which also implies it won't overflow. It's less operations in 64-bit.
+ */
+ bytes = (unsigned long long)chn->to_forward + budget;
+ if (bytes >= CHN_INFINITE_FORWARD)
+ bytes = CHN_INFINITE_FORWARD - 1;
+ budget = bytes - chn->to_forward;
+
+ chn->to_forward += budget;
+ forwarded += budget;
+ return forwarded;
+}
+
+/* writes <len> bytes from message <msg> to the channel's buffer. Returns -1 in
+ * case of success, -2 if the message is larger than the buffer size, or the
+ * number of bytes available otherwise. The send limit is automatically
+ * adjusted to the amount of data written. FIXME-20060521: handle unaligned
+ * data. Note: this function appends data to the buffer's output and possibly
+ * overwrites any pending input data which are assumed not to exist.
+ */
+int co_inject(struct channel *chn, const char *msg, int len)
+{
+ int max;
+
+ if (len == 0)
+ return -1;
+
+ if (len < 0 || len > c_size(chn)) {
+ /* we can't write this chunk and will never be able to, because
+ * it is larger than the buffer. This must be reported as an
+ * error. Then we return -2 so that writers that don't care can
+ * ignore it and go on, and others can check for this value.
+ */
+ return -2;
+ }
+
+ c_realign_if_empty(chn);
+ max = b_contig_space(&chn->buf);
+ if (len > max)
+ return max;
+
+ memcpy(co_tail(chn), msg, len);
+ b_add(&chn->buf, len);
+ c_adv(chn, len);
+ chn->total += len;
+ return -1;
+}
+
+/* Tries to copy character <c> into the channel's buffer after some length
+ * controls. The chn->o and to_forward pointers are updated. If the channel
+ * input is closed, -2 is returned. If there is not enough room left in the
+ * buffer, -1 is returned. Otherwise the number of bytes copied is returned
+ * (1). Channel flag READ_PARTIAL is updated if some data can be transferred.
+ */
+int ci_putchr(struct channel *chn, char c)
+{
+ if (unlikely(channel_input_closed(chn)))
+ return -2;
+
+ if (!channel_may_recv(chn))
+ return -1;
+
+ *ci_tail(chn) = c;
+
+ b_add(&chn->buf, 1);
+ chn->flags |= CF_READ_EVENT;
+
+ if (chn->to_forward >= 1) {
+ if (chn->to_forward != CHN_INFINITE_FORWARD)
+ chn->to_forward--;
+ c_adv(chn, 1);
+ }
+
+ chn->total++;
+ return 1;
+}
+
+/* Tries to copy block <blk> at once into the channel's buffer after length
+ * controls. The chn->o and to_forward pointers are updated. If the channel
+ * input is closed, -2 is returned. If the block is too large for this buffer,
+ * -3 is returned. If there is not enough room left in the buffer, -1 is
+ * returned. Otherwise the number of bytes copied is returned (0 being a valid
+ * number). Channel flag READ_PARTIAL is updated if some data can be
+ * transferred.
+ */
+int ci_putblk(struct channel *chn, const char *blk, int len)
+{
+ int max;
+
+ if (unlikely(channel_input_closed(chn)))
+ return -2;
+
+ if (len < 0)
+ return -3;
+
+ max = channel_recv_limit(chn);
+ if (unlikely(len > max - c_data(chn))) {
+ /* we can't write this chunk right now because the buffer is
+ * almost full or because the block is too large. Returns
+ * -3 if block is too large for this buffer. Or -1 if the
+ * room left is not large enough.
+ */
+ if (len > max)
+ return -3;
+
+ return -1;
+ }
+
+ if (unlikely(len == 0))
+ return 0;
+
+ /* OK so the data fits in the buffer in one or two blocks */
+ max = b_contig_space(&chn->buf);
+ memcpy(ci_tail(chn), blk, MIN(len, max));
+ if (len > max)
+ memcpy(c_orig(chn), blk + max, len - max);
+
+ b_add(&chn->buf, len);
+ channel_add_input(chn, len);
+ return len;
+}
+
+/* Locates the longest part of the channel's output buffer that is composed
+ * exclusively of characters not in the <delim> set, and delimited by one of
+ * these characters, and returns the initial part and the first of such
+ * delimiters. A single escape character in <escape> may be specified so that
+ * when not 0 and found, the character that follows it is never taken as a
+ * delimiter. Note that <delim> cannot contain the zero byte, hence this
+ * function is not usable with byte zero as a delimiter.
+ *
+ * Return values :
+ * >0 : number of bytes read. Includes the sep if present before len or end.
+ * =0 : no sep before end found. <str> is left undefined.
+ * <0 : no more bytes readable because output is shut.
+ * The channel status is not changed. The caller must call co_skip() to
+ * update it. One of the delimiters is waited for as long as neither the buffer
+ * nor the output are full. If either of them is full, the string may be
+ * returned as is, without the delimiter.
+ */
+int co_getdelim(const struct channel *chn, char *str, int len, const char *delim, char escape)
+{
+ uchar delim_map[256 / 8];
+ int found, escaped;
+ uint pos, bit;
+ int ret, max;
+ uchar b;
+ char *p;
+
+ ret = 0;
+ max = len;
+
+ /* closed or empty + imminent close = -1; empty = 0 */
+ if (unlikely((chn_cons(chn)->flags & SC_FL_SHUT_DONE) || !co_data(chn))) {
+ if (chn_cons(chn)->flags & (SC_FL_SHUT_DONE|SC_FL_SHUT_WANTED))
+ ret = -1;
+ goto out;
+ }
+
+ p = co_head(chn);
+
+ if (max > co_data(chn)) {
+ max = co_data(chn);
+ str[max-1] = 0;
+ }
+
+ /* create the byte map */
+ memset(delim_map, 0, sizeof(delim_map));
+ while ((b = *delim)) {
+ pos = b >> 3;
+ bit = b & 7;
+ delim_map[pos] |= 1 << bit;
+ delim++;
+ }
+
+ found = escaped = 0;
+ while (max) {
+ *str++ = b = *p;
+ ret++;
+ max--;
+
+ if (escape && (escaped || *p == escape)) {
+ escaped = !escaped;
+ goto skip;
+ }
+
+ pos = b >> 3;
+ bit = b & 7;
+ if (delim_map[pos] & (1 << bit)) {
+ found = 1;
+ break;
+ }
+ skip:
+ p = b_next(&chn->buf, p);
+ }
+
+ if (ret > 0 && ret < len &&
+ (ret < co_data(chn) || channel_may_recv(chn)) &&
+ !found &&
+ !(chn_cons(chn)->flags & (SC_FL_SHUT_DONE|SC_FL_SHUT_WANTED)))
+ ret = 0;
+ out:
+ if (max)
+ *str = 0;
+ return ret;
+}
+
+/* Gets one text word out of a channel's buffer from a stream connector.
+ * Return values :
+ * >0 : number of bytes read. Includes the sep if present before len or end.
+ * =0 : no sep before end found. <str> is left undefined.
+ * <0 : no more bytes readable because output is shut.
+ * The channel status is not changed. The caller must call co_skip() to
+ * update it. The line separator is waited for as long as neither the buffer
+ * nor the output are full. If either of them is full, the string may be
+ * returned as is, without the line separator.
+ */
+int co_getword(const struct channel *chn, char *str, int len, char sep)
+{
+ int ret, max;
+ char *p;
+
+ ret = 0;
+ max = len;
+
+ /* closed or empty + imminent close = -1; empty = 0 */
+ if (unlikely((chn_cons(chn)->flags & SC_FL_SHUT_DONE) || !co_data(chn))) {
+ if (chn_cons(chn)->flags & (SC_FL_SHUT_DONE|SC_FL_SHUT_WANTED))
+ ret = -1;
+ goto out;
+ }
+
+ p = co_head(chn);
+
+ if (max > co_data(chn)) {
+ max = co_data(chn);
+ str[max-1] = 0;
+ }
+ while (max) {
+ *str++ = *p;
+ ret++;
+ max--;
+
+ if (*p == sep)
+ break;
+ p = b_next(&chn->buf, p);
+ }
+ if (ret > 0 && ret < len &&
+ (ret < co_data(chn) || channel_may_recv(chn)) &&
+ *(str-1) != sep &&
+ !(chn_cons(chn)->flags & (SC_FL_SHUT_DONE|SC_FL_SHUT_WANTED)))
+ ret = 0;
+ out:
+ if (max)
+ *str = 0;
+ return ret;
+}
+
+/* Gets one text line out of a channel's buffer from a stream connector.
+ * Return values :
+ * >0 : number of bytes read. Includes the \n if present before len or end.
+ * =0 : no '\n' before end found. <str> is left undefined.
+ * <0 : no more bytes readable because output is shut.
+ * The channel status is not changed. The caller must call co_skip() to
+ * update it. The '\n' is waited for as long as neither the buffer nor the
+ * output are full. If either of them is full, the string may be returned
+ * as is, without the '\n'.
+ */
+int co_getline(const struct channel *chn, char *str, int len)
+{
+ int ret, max;
+ char *p;
+
+ ret = 0;
+ max = len;
+
+ /* closed or empty + imminent close = -1; empty = 0 */
+ if (unlikely((chn_cons(chn)->flags & SC_FL_SHUT_DONE) || !co_data(chn))) {
+ if (chn_cons(chn)->flags & (SC_FL_SHUT_DONE|SC_FL_SHUT_WANTED))
+ ret = -1;
+ goto out;
+ }
+
+ p = co_head(chn);
+
+ if (max > co_data(chn)) {
+ max = co_data(chn);
+ str[max-1] = 0;
+ }
+ while (max) {
+ *str++ = *p;
+ ret++;
+ max--;
+
+ if (*p == '\n')
+ break;
+ p = b_next(&chn->buf, p);
+ }
+ if (ret > 0 && ret < len &&
+ (ret < co_data(chn) || channel_may_recv(chn)) &&
+ *(str-1) != '\n' &&
+ !(chn_cons(chn)->flags & (SC_FL_SHUT_DONE|SC_FL_SHUT_WANTED)))
+ ret = 0;
+ out:
+ if (max)
+ *str = 0;
+ return ret;
+}
+
+/* Gets one char of data from a channel's buffer,
+ * Return values :
+ * 1 : number of bytes read, equal to requested size.
+ * =0 : not enough data available. <c> is left undefined.
+ * <0 : no more bytes readable because output is shut.
+ * The channel status is not changed. The caller must call co_skip() to
+ * update it.
+ */
+int co_getchar(const struct channel *chn, char *c)
+{
+ if (chn_cons(chn)->flags & SC_FL_SHUT_DONE)
+ return -1;
+
+ if (unlikely(co_data(chn) == 0)) {
+ if (chn_cons(chn)->flags & (SC_FL_SHUT_DONE|SC_FL_SHUT_WANTED))
+ return -1;
+ return 0;
+ }
+
+ *c = *(co_head(chn));
+ return 1;
+}
+
+/* Gets one full block of data at once from a channel's buffer, optionally from
+ * a specific offset. Return values :
+ * >0 : number of bytes read, equal to requested size.
+ * =0 : not enough data available. <blk> is left undefined.
+ * <0 : no more bytes readable because output is shut.
+ * The channel status is not changed. The caller must call co_skip() to
+ * update it.
+ */
+int co_getblk(const struct channel *chn, char *blk, int len, int offset)
+{
+ if (chn_cons(chn)->flags & SC_FL_SHUT_DONE)
+ return -1;
+
+ if (len + offset > co_data(chn) || co_data(chn) == 0) {
+ if (chn_cons(chn)->flags & (SC_FL_SHUT_DONE|SC_FL_SHUT_WANTED))
+ return -1;
+ return 0;
+ }
+
+ return b_getblk(&chn->buf, blk, len, offset);
+}
+
+/* Gets one or two blocks of data at once from a channel's output buffer.
+ * Return values :
+ * >0 : number of blocks filled (1 or 2). blk1 is always filled before blk2.
+ * =0 : not enough data available. <blk*> are left undefined.
+ * <0 : no more bytes readable because output is shut.
+ * The channel status is not changed. The caller must call co_skip() to
+ * update it. Unused buffers are left in an undefined state.
+ */
+int co_getblk_nc(const struct channel *chn, const char **blk1, size_t *len1, const char **blk2, size_t *len2)
+{
+ if (unlikely(co_data(chn) == 0)) {
+ if (chn_cons(chn)->flags & (SC_FL_SHUT_DONE|SC_FL_SHUT_WANTED))
+ return -1;
+ return 0;
+ }
+
+ return b_getblk_nc(&chn->buf, blk1, len1, blk2, len2, 0, co_data(chn));
+}
+
+/* Gets one text line out of a channel's output buffer from a stream connector.
+ * Return values :
+ * >0 : number of blocks returned (1 or 2). blk1 is always filled before blk2.
+ * =0 : not enough data available.
+ * <0 : no more bytes readable because output is shut.
+ * The '\n' is waited for as long as neither the buffer nor the output are
+ * full. If either of them is full, the string may be returned as is, without
+ * the '\n'. Unused buffers are left in an undefined state.
+ */
+int co_getline_nc(const struct channel *chn,
+ const char **blk1, size_t *len1,
+ const char **blk2, size_t *len2)
+{
+ int retcode;
+ int l;
+
+ retcode = co_getblk_nc(chn, blk1, len1, blk2, len2);
+ if (unlikely(retcode <= 0))
+ return retcode;
+
+ for (l = 0; l < *len1 && (*blk1)[l] != '\n'; l++);
+ if (l < *len1 && (*blk1)[l] == '\n') {
+ *len1 = l + 1;
+ return 1;
+ }
+
+ if (retcode >= 2) {
+ for (l = 0; l < *len2 && (*blk2)[l] != '\n'; l++);
+ if (l < *len2 && (*blk2)[l] == '\n') {
+ *len2 = l + 1;
+ return 2;
+ }
+ }
+
+ if (chn_cons(chn)->flags & (SC_FL_SHUT_DONE|SC_FL_SHUT_WANTED)) {
+ /* If we have found no LF and the buffer is shut, then
+ * the resulting string is made of the concatenation of
+ * the pending blocks (1 or 2).
+ */
+ return retcode;
+ }
+
+ /* No LF yet and not shut yet */
+ return 0;
+}
+
+/* Gets one full block of data at once from a channel's input buffer.
+ * This function can return the data slitted in one or two blocks.
+ * Return values :
+ * >0 : number of blocks returned (1 or 2). blk1 is always filled before blk2.
+ * =0 : not enough data available.
+ * <0 : no more bytes readable because input is shut.
+ */
+int ci_getblk_nc(const struct channel *chn,
+ char **blk1, size_t *len1,
+ char **blk2, size_t *len2)
+{
+ if (unlikely(ci_data(chn) == 0)) {
+ if (chn_prod(chn)->flags & (SC_FL_EOS|SC_FL_ABRT_DONE))
+ return -1;
+ return 0;
+ }
+
+ if (unlikely(ci_head(chn) + ci_data(chn) > c_wrap(chn))) {
+ *blk1 = ci_head(chn);
+ *len1 = c_wrap(chn) - ci_head(chn);
+ *blk2 = c_orig(chn);
+ *len2 = ci_data(chn) - *len1;
+ return 2;
+ }
+
+ *blk1 = ci_head(chn);
+ *len1 = ci_data(chn);
+ return 1;
+}
+
+/* Gets one text line out of a channel's input buffer from a stream connector.
+ * Return values :
+ * >0 : number of blocks returned (1 or 2). blk1 is always filled before blk2.
+ * =0 : not enough data available.
+ * <0 : no more bytes readable because output is shut.
+ * The '\n' is waited for as long as neither the buffer nor the input are
+ * full. If either of them is full, the string may be returned as is, without
+ * the '\n'. Unused buffers are left in an undefined state.
+ */
+int ci_getline_nc(const struct channel *chn,
+ char **blk1, size_t *len1,
+ char **blk2, size_t *len2)
+{
+ int retcode;
+ int l;
+
+ retcode = ci_getblk_nc(chn, blk1, len1, blk2, len2);
+ if (unlikely(retcode <= 0))
+ return retcode;
+
+ for (l = 0; l < *len1 && (*blk1)[l] != '\n'; l++);
+ if (l < *len1 && (*blk1)[l] == '\n') {
+ *len1 = l + 1;
+ return 1;
+ }
+
+ if (retcode >= 2) {
+ for (l = 0; l < *len2 && (*blk2)[l] != '\n'; l++);
+ if (l < *len2 && (*blk2)[l] == '\n') {
+ *len2 = l + 1;
+ return 2;
+ }
+ }
+
+ if (chn_cons(chn)->flags & SC_FL_SHUT_DONE) {
+ /* If we have found no LF and the buffer is shut, then
+ * the resulting string is made of the concatenation of
+ * the pending blocks (1 or 2).
+ */
+ return retcode;
+ }
+
+ /* No LF yet and not shut yet */
+ return 0;
+}
+
+/* Inserts <str> followed by "\r\n" at position <pos> relative to channel <c>'s
+ * input head. The <len> argument informs about the length of string <str> so
+ * that we don't have to measure it. <str> must be a valid pointer and must not
+ * include the trailing "\r\n".
+ *
+ * The number of bytes added is returned on success. 0 is returned on failure.
+ */
+int ci_insert_line2(struct channel *c, int pos, const char *str, int len)
+{
+ struct buffer *b = &c->buf;
+ char *dst = c_ptr(c, pos);
+ int delta;
+
+ delta = len + 2;
+
+ if (__b_tail(b) + delta >= b_wrap(b))
+ return 0; /* no space left */
+
+ if (b_data(b) &&
+ b_tail(b) + delta > b_head(b) &&
+ b_head(b) >= b_tail(b))
+ return 0; /* no space left before wrapping data */
+
+ /* first, protect the end of the buffer */
+ memmove(dst + delta, dst, b_tail(b) - dst);
+
+ /* now, copy str over dst */
+ memcpy(dst, str, len);
+ dst[len] = '\r';
+ dst[len + 1] = '\n';
+
+ b_add(b, delta);
+ return delta;
+}
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/src/check.c b/src/check.c
new file mode 100644
index 0000000..2753c93
--- /dev/null
+++ b/src/check.c
@@ -0,0 +1,2642 @@
+/*
+ * Health-checks functions.
+ *
+ * Copyright 2000-2009 Willy Tarreau <w@1wt.eu>
+ * Copyright 2007-2009 Krzysztof Piotr Oledzki <ole@ans.pl>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <assert.h>
+#include <ctype.h>
+#include <errno.h>
+#include <stdarg.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <time.h>
+#include <unistd.h>
+#include <sys/resource.h>
+#include <sys/socket.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <netinet/in.h>
+#include <netinet/tcp.h>
+#include <arpa/inet.h>
+
+#include <haproxy/action.h>
+#include <haproxy/api.h>
+#include <haproxy/arg.h>
+#include <haproxy/cfgparse.h>
+#include <haproxy/check.h>
+#include <haproxy/chunk.h>
+#include <haproxy/dgram.h>
+#include <haproxy/dynbuf.h>
+#include <haproxy/extcheck.h>
+#include <haproxy/fd.h>
+#include <haproxy/global.h>
+#include <haproxy/h1.h>
+#include <haproxy/http.h>
+#include <haproxy/http_htx.h>
+#include <haproxy/htx.h>
+#include <haproxy/istbuf.h>
+#include <haproxy/list.h>
+#include <haproxy/log.h>
+#include <haproxy/mailers.h>
+#include <haproxy/port_range.h>
+#include <haproxy/proto_tcp.h>
+#include <haproxy/protocol.h>
+#include <haproxy/proxy.h>
+#include <haproxy/queue.h>
+#include <haproxy/regex.h>
+#include <haproxy/resolvers.h>
+#include <haproxy/sample.h>
+#include <haproxy/server.h>
+#include <haproxy/ssl_sock.h>
+#include <haproxy/stats-t.h>
+#include <haproxy/task.h>
+#include <haproxy/tcpcheck.h>
+#include <haproxy/thread.h>
+#include <haproxy/time.h>
+#include <haproxy/tools.h>
+#include <haproxy/trace.h>
+#include <haproxy/vars.h>
+
+/* trace source and events */
+static void check_trace(enum trace_level level, uint64_t mask,
+ const struct trace_source *src,
+ const struct ist where, const struct ist func,
+ const void *a1, const void *a2, const void *a3, const void *a4);
+
+/* The event representation is split like this :
+ * check - check
+ *
+ * CHECK_EV_* macros are defined in <haproxy/check.h>
+ */
+static const struct trace_event check_trace_events[] = {
+ { .mask = CHK_EV_TASK_WAKE, .name = "task_wake", .desc = "Check task woken up" },
+ { .mask = CHK_EV_HCHK_START, .name = "hchck_start", .desc = "Health-check started" },
+ { .mask = CHK_EV_HCHK_WAKE, .name = "hchck_wake", .desc = "Health-check woken up" },
+ { .mask = CHK_EV_HCHK_RUN, .name = "hchck_run", .desc = "Health-check running" },
+ { .mask = CHK_EV_HCHK_END, .name = "hchck_end", .desc = "Health-check terminated" },
+ { .mask = CHK_EV_HCHK_SUCC, .name = "hchck_succ", .desc = "Health-check success" },
+ { .mask = CHK_EV_HCHK_ERR, .name = "hchck_err", .desc = "Health-check failure" },
+
+ { .mask = CHK_EV_TCPCHK_EVAL, .name = "tcp_check_eval", .desc = "tcp-check rules evaluation" },
+ { .mask = CHK_EV_TCPCHK_ERR, .name = "tcp_check_err", .desc = "tcp-check evaluation error" },
+ { .mask = CHK_EV_TCPCHK_CONN, .name = "tcp_check_conn", .desc = "tcp-check connection rule" },
+ { .mask = CHK_EV_TCPCHK_SND, .name = "tcp_check_send", .desc = "tcp-check send rule" },
+ { .mask = CHK_EV_TCPCHK_EXP, .name = "tcp_check_expect", .desc = "tcp-check expect rule" },
+ { .mask = CHK_EV_TCPCHK_ACT, .name = "tcp_check_action", .desc = "tcp-check action rule" },
+
+ { .mask = CHK_EV_RX_DATA, .name = "rx_data", .desc = "receipt of data" },
+ { .mask = CHK_EV_RX_BLK, .name = "rx_blk", .desc = "receipt blocked" },
+ { .mask = CHK_EV_RX_ERR, .name = "rx_err", .desc = "receipt error" },
+
+ { .mask = CHK_EV_TX_DATA, .name = "tx_data", .desc = "transmission of data" },
+ { .mask = CHK_EV_TX_BLK, .name = "tx_blk", .desc = "transmission blocked" },
+ { .mask = CHK_EV_TX_ERR, .name = "tx_err", .desc = "transmission error" },
+
+ {}
+};
+
+static const struct name_desc check_trace_lockon_args[4] = {
+ /* arg1 */ { /* already used by the check */ },
+ /* arg2 */ { },
+ /* arg3 */ { },
+ /* arg4 */ { }
+};
+
+static const struct name_desc check_trace_decoding[] = {
+#define CHK_VERB_CLEAN 1
+ { .name="clean", .desc="only user-friendly stuff, generally suitable for level \"user\"" },
+#define CHK_VERB_MINIMAL 2
+ { .name="minimal", .desc="report info on streams and connectors" },
+#define CHK_VERB_SIMPLE 3
+ { .name="simple", .desc="add info on request and response channels" },
+#define CHK_VERB_ADVANCED 4
+ { .name="advanced", .desc="add info on channel's buffer for data and developer levels only" },
+#define CHK_VERB_COMPLETE 5
+ { .name="complete", .desc="add info on channel's buffer" },
+ { /* end */ }
+};
+
+struct trace_source trace_check = {
+ .name = IST("check"),
+ .desc = "Health-check",
+ .arg_def = TRC_ARG1_CHK, // TRACE()'s first argument is always a stream
+ .default_cb = check_trace,
+ .known_events = check_trace_events,
+ .lockon_args = check_trace_lockon_args,
+ .decoding = check_trace_decoding,
+ .report_events = ~0, // report everything by default
+};
+
+#define TRACE_SOURCE &trace_check
+INITCALL1(STG_REGISTER, trace_register_source, TRACE_SOURCE);
+
+
+/* Dummy frontend used to create all checks sessions. */
+struct proxy checks_fe;
+
+
+static inline void check_trace_buf(const struct buffer *buf, size_t ofs, size_t len)
+{
+ size_t block1, block2;
+ int line, ptr, newptr;
+
+ block1 = b_contig_data(buf, ofs);
+ block2 = 0;
+ if (block1 > len)
+ block1 = len;
+ block2 = len - block1;
+
+ ofs = b_peek_ofs(buf, ofs);
+
+ line = 0;
+ ptr = ofs;
+ while (ptr < ofs + block1) {
+ newptr = dump_text_line(&trace_buf, b_orig(buf), b_size(buf), ofs + block1, &line, ptr);
+ if (newptr == ptr)
+ break;
+ ptr = newptr;
+ }
+
+ line = ptr = 0;
+ while (ptr < block2) {
+ newptr = dump_text_line(&trace_buf, b_orig(buf), b_size(buf), block2, &line, ptr);
+ if (newptr == ptr)
+ break;
+ ptr = newptr;
+ }
+}
+
+/* trace source and events */
+static void check_trace(enum trace_level level, uint64_t mask,
+ const struct trace_source *src,
+ const struct ist where, const struct ist func,
+ const void *a1, const void *a2, const void *a3, const void *a4)
+{
+ const struct check *check = a1;
+ const struct server *srv = (check ? check->server : NULL);
+ const size_t *val = a4;
+ const char *res;
+
+ if (!check || src->verbosity < CHK_VERB_CLEAN)
+ return;
+
+ if (srv) {
+ chunk_appendf(&trace_buf, " : [%c] SRV=%s",
+ ((check->type == PR_O2_EXT_CHK) ? 'E' : (check->state & CHK_ST_AGENT ? 'A' : 'H')),
+ srv->id);
+
+ chunk_appendf(&trace_buf, " status=%d/%d %s",
+ (check->health >= check->rise) ? check->health - check->rise + 1 : check->health,
+ (check->health >= check->rise) ? check->fall : check->rise,
+ (check->health >= check->rise) ? (srv->uweight ? "UP" : "DRAIN") : "DOWN");
+ }
+ else
+ chunk_appendf(&trace_buf, " : [EMAIL]");
+
+ switch (check->result) {
+ case CHK_RES_NEUTRAL: res = "-"; break;
+ case CHK_RES_FAILED: res = "FAIL"; break;
+ case CHK_RES_PASSED: res = "PASS"; break;
+ case CHK_RES_CONDPASS: res = "COND"; break;
+ default: res = "UNK"; break;
+ }
+
+ if (src->verbosity == CHK_VERB_CLEAN)
+ return;
+
+ chunk_appendf(&trace_buf, " - last=%s(%d)/%s(%d)",
+ get_check_status_info(check->status), check->status,
+ res, check->result);
+
+ /* Display the value to the 4th argument (level > STATE) */
+ if (src->level > TRACE_LEVEL_STATE && val)
+ chunk_appendf(&trace_buf, " - VAL=%lu", (long)*val);
+
+ chunk_appendf(&trace_buf, " check=%p(0x%08x)", check, check->state);
+
+ if (src->verbosity == CHK_VERB_MINIMAL)
+ return;
+
+
+ if (check->sc) {
+ struct connection *conn = sc_conn(check->sc);
+
+ chunk_appendf(&trace_buf, " - conn=%p(0x%08x)", conn, conn ? conn->flags : 0);
+ chunk_appendf(&trace_buf, " sc=%p(0x%08x)", check->sc, check->sc->flags);
+ }
+
+ if (mask & CHK_EV_TCPCHK) {
+ const char *type;
+
+ switch (check->tcpcheck_rules->flags & TCPCHK_RULES_PROTO_CHK) {
+ case TCPCHK_RULES_PGSQL_CHK: type = "PGSQL"; break;
+ case TCPCHK_RULES_REDIS_CHK: type = "REDIS"; break;
+ case TCPCHK_RULES_SMTP_CHK: type = "SMTP"; break;
+ case TCPCHK_RULES_HTTP_CHK: type = "HTTP"; break;
+ case TCPCHK_RULES_MYSQL_CHK: type = "MYSQL"; break;
+ case TCPCHK_RULES_LDAP_CHK: type = "LDAP"; break;
+ case TCPCHK_RULES_SSL3_CHK: type = "SSL3"; break;
+ case TCPCHK_RULES_AGENT_CHK: type = "AGENT"; break;
+ case TCPCHK_RULES_SPOP_CHK: type = "SPOP"; break;
+ case TCPCHK_RULES_TCP_CHK: type = "TCP"; break;
+ default: type = "???"; break;
+ }
+ if (check->current_step)
+ chunk_appendf(&trace_buf, " - tcp-check=(%s,%d)", type, tcpcheck_get_step_id(check, NULL));
+ else
+ chunk_appendf(&trace_buf, " - tcp-check=(%s,-)", type);
+ }
+
+ /* Display bi and bo buffer info (level > USER & verbosity > SIMPLE) */
+ if (src->level > TRACE_LEVEL_USER) {
+ const struct buffer *buf = NULL;
+
+ chunk_appendf(&trace_buf, " bi=%u@%p+%u/%u",
+ (unsigned int)b_data(&check->bi), b_orig(&check->bi),
+ (unsigned int)b_head_ofs(&check->bi), (unsigned int)b_size(&check->bi));
+ chunk_appendf(&trace_buf, " bo=%u@%p+%u/%u",
+ (unsigned int)b_data(&check->bo), b_orig(&check->bo),
+ (unsigned int)b_head_ofs(&check->bo), (unsigned int)b_size(&check->bo));
+
+ if (src->verbosity >= CHK_VERB_ADVANCED && (mask & (CHK_EV_RX)))
+ buf = (b_is_null(&check->bi) ? NULL : &check->bi);
+ else if (src->verbosity >= CHK_VERB_ADVANCED && (mask & (CHK_EV_TX)))
+ buf = (b_is_null(&check->bo) ? NULL : &check->bo);
+
+ if (buf) {
+ if ((check->tcpcheck_rules->flags & TCPCHK_RULES_PROTO_CHK) == TCPCHK_RULES_HTTP_CHK) {
+ int full = (src->verbosity == CHK_VERB_COMPLETE);
+
+ chunk_memcat(&trace_buf, "\n\t", 2);
+ htx_dump(&trace_buf, htxbuf(buf), full);
+ }
+ else {
+ int max = ((src->verbosity == CHK_VERB_COMPLETE) ? 1024 : 256);
+
+ chunk_memcat(&trace_buf, "\n", 1);
+ if (b_data(buf) > max) {
+ check_trace_buf(buf, 0, max);
+ chunk_memcat(&trace_buf, " ...\n", 6);
+ }
+ else
+ check_trace_buf(buf, 0, b_data(buf));
+ }
+
+ }
+ }
+
+}
+
+
+/**************************************************************************/
+/************************ Handle check results ****************************/
+/**************************************************************************/
+struct check_status {
+ short result; /* one of SRV_CHK_* */
+ char *info; /* human readable short info */
+ char *desc; /* long description */
+};
+
+struct analyze_status {
+ char *desc; /* description */
+ unsigned char lr[HANA_OBS_SIZE]; /* result for l4/l7: 0 = ignore, 1 - error, 2 - OK */
+};
+
+static const struct check_status check_statuses[HCHK_STATUS_SIZE] = {
+ [HCHK_STATUS_UNKNOWN] = { CHK_RES_UNKNOWN, "UNK", "Unknown" },
+ [HCHK_STATUS_INI] = { CHK_RES_UNKNOWN, "INI", "Initializing" },
+ [HCHK_STATUS_START] = { /* SPECIAL STATUS*/ },
+
+ /* Below we have finished checks */
+ [HCHK_STATUS_CHECKED] = { CHK_RES_NEUTRAL, "CHECKED", "No status change" },
+ [HCHK_STATUS_HANA] = { CHK_RES_FAILED, "HANA", "Health analyze" },
+
+ [HCHK_STATUS_SOCKERR] = { CHK_RES_FAILED, "SOCKERR", "Socket error" },
+
+ [HCHK_STATUS_L4OK] = { CHK_RES_PASSED, "L4OK", "Layer4 check passed" },
+ [HCHK_STATUS_L4TOUT] = { CHK_RES_FAILED, "L4TOUT", "Layer4 timeout" },
+ [HCHK_STATUS_L4CON] = { CHK_RES_FAILED, "L4CON", "Layer4 connection problem" },
+
+ [HCHK_STATUS_L6OK] = { CHK_RES_PASSED, "L6OK", "Layer6 check passed" },
+ [HCHK_STATUS_L6TOUT] = { CHK_RES_FAILED, "L6TOUT", "Layer6 timeout" },
+ [HCHK_STATUS_L6RSP] = { CHK_RES_FAILED, "L6RSP", "Layer6 invalid response" },
+
+ [HCHK_STATUS_L7TOUT] = { CHK_RES_FAILED, "L7TOUT", "Layer7 timeout" },
+ [HCHK_STATUS_L7RSP] = { CHK_RES_FAILED, "L7RSP", "Layer7 invalid response" },
+
+ [HCHK_STATUS_L57DATA] = { /* DUMMY STATUS */ },
+
+ [HCHK_STATUS_L7OKD] = { CHK_RES_PASSED, "L7OK", "Layer7 check passed" },
+ [HCHK_STATUS_L7OKCD] = { CHK_RES_CONDPASS, "L7OKC", "Layer7 check conditionally passed" },
+ [HCHK_STATUS_L7STS] = { CHK_RES_FAILED, "L7STS", "Layer7 wrong status" },
+
+ [HCHK_STATUS_PROCERR] = { CHK_RES_FAILED, "PROCERR", "External check error" },
+ [HCHK_STATUS_PROCTOUT] = { CHK_RES_FAILED, "PROCTOUT", "External check timeout" },
+ [HCHK_STATUS_PROCOK] = { CHK_RES_PASSED, "PROCOK", "External check passed" },
+};
+
+static const struct analyze_status analyze_statuses[HANA_STATUS_SIZE] = { /* 0: ignore, 1: error, 2: OK */
+ [HANA_STATUS_UNKNOWN] = { "Unknown", { 0, 0 }},
+
+ [HANA_STATUS_L4_OK] = { "L4 successful connection", { 2, 0 }},
+ [HANA_STATUS_L4_ERR] = { "L4 unsuccessful connection", { 1, 1 }},
+
+ [HANA_STATUS_HTTP_OK] = { "Correct http response", { 0, 2 }},
+ [HANA_STATUS_HTTP_STS] = { "Wrong http response", { 0, 1 }},
+ [HANA_STATUS_HTTP_HDRRSP] = { "Invalid http response (headers)", { 0, 1 }},
+ [HANA_STATUS_HTTP_RSP] = { "Invalid http response", { 0, 1 }},
+
+ [HANA_STATUS_HTTP_READ_ERROR] = { "Read error (http)", { 0, 1 }},
+ [HANA_STATUS_HTTP_READ_TIMEOUT] = { "Read timeout (http)", { 0, 1 }},
+ [HANA_STATUS_HTTP_BROKEN_PIPE] = { "Close from server (http)", { 0, 1 }},
+};
+
+/* checks if <err> is a real error for errno or one that can be ignored, and
+ * return 0 for these ones or <err> for real ones.
+ */
+static inline int unclean_errno(int err)
+{
+ if (err == EAGAIN || err == EWOULDBLOCK || err == EINPROGRESS ||
+ err == EISCONN || err == EALREADY)
+ return 0;
+ return err;
+}
+
+/* Converts check_status code to result code */
+short get_check_status_result(short check_status)
+{
+ if (check_status < HCHK_STATUS_SIZE)
+ return check_statuses[check_status].result;
+ else
+ return check_statuses[HCHK_STATUS_UNKNOWN].result;
+}
+
+/* Converts check_status code to description */
+const char *get_check_status_description(short check_status) {
+
+ const char *desc;
+
+ if (check_status < HCHK_STATUS_SIZE)
+ desc = check_statuses[check_status].desc;
+ else
+ desc = NULL;
+
+ if (desc && *desc)
+ return desc;
+ else
+ return check_statuses[HCHK_STATUS_UNKNOWN].desc;
+}
+
+/* Converts check_status code to short info */
+const char *get_check_status_info(short check_status)
+{
+ const char *info;
+
+ if (check_status < HCHK_STATUS_SIZE)
+ info = check_statuses[check_status].info;
+ else
+ info = NULL;
+
+ if (info && *info)
+ return info;
+ else
+ return check_statuses[HCHK_STATUS_UNKNOWN].info;
+}
+
+/* Convert analyze_status to description */
+const char *get_analyze_status(short analyze_status) {
+
+ const char *desc;
+
+ if (analyze_status < HANA_STATUS_SIZE)
+ desc = analyze_statuses[analyze_status].desc;
+ else
+ desc = NULL;
+
+ if (desc && *desc)
+ return desc;
+ else
+ return analyze_statuses[HANA_STATUS_UNKNOWN].desc;
+}
+
+/* append check info to buffer msg */
+void check_append_info(struct buffer *msg, struct check *check)
+{
+ if (!check)
+ return;
+ chunk_appendf(msg, ", reason: %s", get_check_status_description(check->status));
+
+ if (check->status >= HCHK_STATUS_L57DATA)
+ chunk_appendf(msg, ", code: %d", check->code);
+
+ if (check->desc[0]) {
+ struct buffer src;
+
+ chunk_appendf(msg, ", info: \"");
+
+ chunk_initlen(&src, check->desc, 0, strlen(check->desc));
+ chunk_asciiencode(msg, &src, '"');
+
+ chunk_appendf(msg, "\"");
+ }
+
+ if (check->duration >= 0)
+ chunk_appendf(msg, ", check duration: %ldms", check->duration);
+}
+
+/* Sets check->status, update check->duration and fill check->result with an
+ * adequate CHK_RES_* value. The new check->health is computed based on the
+ * result.
+ *
+ * Shows information in logs about failed health check if server is UP or
+ * succeeded health checks if server is DOWN.
+ */
+void set_server_check_status(struct check *check, short status, const char *desc)
+{
+ struct server *s = check->server;
+ short prev_status = check->status;
+ int report = (status != prev_status) ? 1 : 0;
+
+ TRACE_POINT(CHK_EV_HCHK_RUN, check);
+
+ if (status == HCHK_STATUS_START) {
+ check->result = CHK_RES_UNKNOWN; /* no result yet */
+ check->desc[0] = '\0';
+ check->start = now_ns;
+ return;
+ }
+
+ if (!check->status)
+ return;
+
+ if (desc && *desc) {
+ strncpy(check->desc, desc, HCHK_DESC_LEN-1);
+ check->desc[HCHK_DESC_LEN-1] = '\0';
+ } else
+ check->desc[0] = '\0';
+
+ check->status = status;
+ if (check_statuses[status].result)
+ check->result = check_statuses[status].result;
+
+ if (status == HCHK_STATUS_HANA)
+ check->duration = -1;
+ else if (check->start) {
+ /* set_server_check_status() may be called more than once */
+ check->duration = ns_to_ms(now_ns - check->start);
+ check->start = 0;
+ }
+
+ /* no change is expected if no state change occurred */
+ if (check->result == CHK_RES_NEUTRAL)
+ return;
+
+ /* If the check was really just sending a mail, it won't have an
+ * associated server, so we're done now.
+ */
+ if (!s)
+ return;
+
+ switch (check->result) {
+ case CHK_RES_FAILED:
+ /* Failure to connect to the agent as a secondary check should not
+ * cause the server to be marked down.
+ */
+ if ((!(check->state & CHK_ST_AGENT) ||
+ (check->status >= HCHK_STATUS_L57DATA)) &&
+ (check->health > 0)) {
+ _HA_ATOMIC_INC(&s->counters.failed_checks);
+ report = 1;
+ check->health--;
+ if (check->health < check->rise)
+ check->health = 0;
+ }
+ break;
+
+ case CHK_RES_PASSED:
+ case CHK_RES_CONDPASS:
+ if (check->health < check->rise + check->fall - 1) {
+ report = 1;
+ check->health++;
+
+ if (check->health >= check->rise)
+ check->health = check->rise + check->fall - 1; /* OK now */
+ }
+
+ /* clear consecutive_errors if observing is enabled */
+ if (s->onerror)
+ HA_ATOMIC_STORE(&s->consecutive_errors, 0);
+ break;
+
+ default:
+ break;
+ }
+
+ if (report)
+ srv_event_hdl_publish_check(s, check);
+
+ if (s->proxy->options2 & PR_O2_LOGHCHKS && report) {
+ chunk_printf(&trash,
+ "%s check for %sserver %s/%s %s%s",
+ (check->state & CHK_ST_AGENT) ? "Agent" : "Health",
+ s->flags & SRV_F_BACKUP ? "backup " : "",
+ s->proxy->id, s->id,
+ (check->result == CHK_RES_CONDPASS) ? "conditionally ":"",
+ (check->result >= CHK_RES_PASSED) ? "succeeded" : "failed");
+
+ check_append_info(&trash, check);
+
+ chunk_appendf(&trash, ", status: %d/%d %s",
+ (check->health >= check->rise) ? check->health - check->rise + 1 : check->health,
+ (check->health >= check->rise) ? check->fall : check->rise,
+ (check->health >= check->rise) ? (s->uweight ? "UP" : "DRAIN") : "DOWN");
+
+ ha_warning("%s.\n", trash.area);
+ send_log(s->proxy, LOG_NOTICE, "%s.\n", trash.area);
+ send_email_alert(s, LOG_INFO, "%s", trash.area);
+ }
+}
+
+static inline enum srv_op_st_chg_cause check_notify_cause(struct check *check)
+{
+ struct server *s = check->server;
+
+ /* We only report a cause for the check if we did not do so previously */
+ if (!s->track && !(s->proxy->options2 & PR_O2_LOGHCHKS))
+ return (check->state & CHK_ST_AGENT) ? SRV_OP_STCHGC_AGENT : SRV_OP_STCHGC_HEALTH;
+ return SRV_OP_STCHGC_NONE;
+}
+
+/* Marks the check <check>'s server down if the current check is already failed
+ * and the server is not down yet nor in maintenance.
+ */
+void check_notify_failure(struct check *check)
+{
+ struct server *s = check->server;
+
+ /* The agent secondary check should only cause a server to be marked
+ * as down if check->status is HCHK_STATUS_L7STS, which indicates
+ * that the agent returned "fail", "stopped" or "down".
+ * The implication here is that failure to connect to the agent
+ * as a secondary check should not cause the server to be marked
+ * down. */
+ if ((check->state & CHK_ST_AGENT) && check->status != HCHK_STATUS_L7STS)
+ return;
+
+ if (check->health > 0)
+ return;
+
+ TRACE_STATE("health-check failed, set server DOWN", CHK_EV_HCHK_END|CHK_EV_HCHK_ERR, check);
+ srv_set_stopped(s, check_notify_cause(check));
+}
+
+/* Marks the check <check> as valid and tries to set its server up, provided
+ * it isn't in maintenance, it is not tracking a down server and other checks
+ * comply. The rule is simple : by default, a server is up, unless any of the
+ * following conditions is true :
+ * - health check failed (check->health < rise)
+ * - agent check failed (agent->health < rise)
+ * - the server tracks a down server (track && track->state == STOPPED)
+ * Note that if the server has a slowstart, it will switch to STARTING instead
+ * of RUNNING. Also, only the health checks support the nolb mode, so the
+ * agent's success may not take the server out of this mode.
+ */
+void check_notify_success(struct check *check)
+{
+ struct server *s = check->server;
+
+ if (s->next_admin & SRV_ADMF_MAINT)
+ return;
+
+ if (s->track && s->track->next_state == SRV_ST_STOPPED)
+ return;
+
+ if ((s->check.state & CHK_ST_ENABLED) && (s->check.health < s->check.rise))
+ return;
+
+ if ((s->agent.state & CHK_ST_ENABLED) && (s->agent.health < s->agent.rise))
+ return;
+
+ if ((check->state & CHK_ST_AGENT) && s->next_state == SRV_ST_STOPPING)
+ return;
+
+ TRACE_STATE("health-check succeeded, set server RUNNING", CHK_EV_HCHK_END|CHK_EV_HCHK_SUCC, check);
+ srv_set_running(s, check_notify_cause(check));
+}
+
+/* Marks the check <check> as valid and tries to set its server into stopping mode
+ * if it was running or starting, and provided it isn't in maintenance and other
+ * checks comply. The conditions for the server to be marked in stopping mode are
+ * the same as for it to be turned up. Also, only the health checks support the
+ * nolb mode.
+ */
+void check_notify_stopping(struct check *check)
+{
+ struct server *s = check->server;
+
+ if (s->next_admin & SRV_ADMF_MAINT)
+ return;
+
+ if (check->state & CHK_ST_AGENT)
+ return;
+
+ if (s->track && s->track->next_state == SRV_ST_STOPPED)
+ return;
+
+ if ((s->check.state & CHK_ST_ENABLED) && (s->check.health < s->check.rise))
+ return;
+
+ if ((s->agent.state & CHK_ST_ENABLED) && (s->agent.health < s->agent.rise))
+ return;
+
+ TRACE_STATE("health-check condionnaly succeeded, set server STOPPING", CHK_EV_HCHK_END|CHK_EV_HCHK_SUCC, check);
+ srv_set_stopping(s, check_notify_cause(check));
+}
+
+/* note: use health_adjust() only, which first checks that the observe mode is
+ * enabled. This will take the server lock if needed.
+ */
+void __health_adjust(struct server *s, short status)
+{
+ int failed;
+
+ if (s->observe >= HANA_OBS_SIZE)
+ return;
+
+ if (status >= HANA_STATUS_SIZE || !analyze_statuses[status].desc)
+ return;
+
+ switch (analyze_statuses[status].lr[s->observe - 1]) {
+ case 1:
+ failed = 1;
+ break;
+
+ case 2:
+ failed = 0;
+ break;
+
+ default:
+ return;
+ }
+
+ if (!failed) {
+ /* good: clear consecutive_errors */
+ HA_ATOMIC_STORE(&s->consecutive_errors, 0);
+ return;
+ }
+
+ if (HA_ATOMIC_ADD_FETCH(&s->consecutive_errors, 1) < s->consecutive_errors_limit)
+ return;
+
+ chunk_printf(&trash, "Detected %d consecutive errors, last one was: %s",
+ HA_ATOMIC_LOAD(&s->consecutive_errors), get_analyze_status(status));
+
+ HA_SPIN_LOCK(SERVER_LOCK, &s->lock);
+
+ /* force fastinter for upcoming check
+ * (does nothing if fastinter is not enabled)
+ */
+ s->check.state |= CHK_ST_FASTINTER;
+
+ switch (s->onerror) {
+ case HANA_ONERR_FASTINTER:
+ /* force fastinter - nothing to do here as all modes force it */
+ break;
+
+ case HANA_ONERR_SUDDTH:
+ /* simulate a pre-fatal failed health check */
+ if (s->check.health > s->check.rise)
+ s->check.health = s->check.rise + 1;
+
+ __fallthrough;
+
+ case HANA_ONERR_FAILCHK:
+ /* simulate a failed health check */
+ set_server_check_status(&s->check, HCHK_STATUS_HANA,
+ trash.area);
+ check_notify_failure(&s->check);
+ break;
+
+ case HANA_ONERR_MARKDWN:
+ /* mark server down */
+ s->check.health = s->check.rise;
+ set_server_check_status(&s->check, HCHK_STATUS_HANA,
+ trash.area);
+ check_notify_failure(&s->check);
+ break;
+
+ default:
+ /* write a warning? */
+ break;
+ }
+
+ HA_SPIN_UNLOCK(SERVER_LOCK, &s->lock);
+
+ HA_ATOMIC_STORE(&s->consecutive_errors, 0);
+ _HA_ATOMIC_INC(&s->counters.failed_hana);
+
+ if (s->check.fastinter) {
+ /* timer might need to be advanced, it might also already be
+ * running in another thread. Let's just wake the task up, it
+ * will automatically adjust its timer.
+ */
+ task_wakeup(s->check.task, TASK_WOKEN_MSG);
+ }
+}
+
+/* Checks the connection. If an error has already been reported or the socket is
+ * closed, keep errno intact as it is supposed to contain the valid error code.
+ * If no error is reported, check the socket's error queue using getsockopt().
+ * Warning, this must be done only once when returning from poll, and never
+ * after an I/O error was attempted, otherwise the error queue might contain
+ * inconsistent errors. If an error is detected, the CO_FL_ERROR is set on the
+ * socket. Returns non-zero if an error was reported, zero if everything is
+ * clean (including a properly closed socket).
+ */
+static int retrieve_errno_from_socket(struct connection *conn)
+{
+ int skerr;
+ socklen_t lskerr = sizeof(skerr);
+
+ if (conn->flags & CO_FL_ERROR && (unclean_errno(errno) || !conn->ctrl))
+ return 1;
+
+ if (!conn_ctrl_ready(conn))
+ return 0;
+
+ BUG_ON(conn->flags & CO_FL_FDLESS);
+
+ if (getsockopt(conn->handle.fd, SOL_SOCKET, SO_ERROR, &skerr, &lskerr) == 0)
+ errno = skerr;
+
+ errno = unclean_errno(errno);
+
+ if (!errno) {
+ /* we could not retrieve an error, that does not mean there is
+ * none. Just don't change anything and only report the prior
+ * error if any.
+ */
+ if (conn->flags & CO_FL_ERROR)
+ return 1;
+ else
+ return 0;
+ }
+
+ conn->flags |= CO_FL_ERROR | CO_FL_SOCK_WR_SH | CO_FL_SOCK_RD_SH;
+ return 1;
+}
+
+/* Tries to collect as much information as possible on the connection status,
+ * and adjust the server status accordingly. It may make use of <errno_bck>
+ * if non-null when the caller is absolutely certain of its validity (eg:
+ * checked just after a syscall). If the caller doesn't have a valid errno,
+ * it can pass zero, and retrieve_errno_from_socket() will be called to try
+ * to extract errno from the socket. If no error is reported, it will consider
+ * the <expired> flag. This is intended to be used when a connection error was
+ * reported in conn->flags or when a timeout was reported in <expired>. The
+ * function takes care of not updating a server status which was already set.
+ * All situations where at least one of <expired> or CO_FL_ERROR are set
+ * produce a status.
+ */
+void chk_report_conn_err(struct check *check, int errno_bck, int expired)
+{
+ struct stconn *sc = check->sc;
+ struct connection *conn = sc_conn(sc);
+ const char *err_msg;
+ struct buffer *chk;
+ int step;
+
+ if (check->result != CHK_RES_UNKNOWN) {
+ return;
+ }
+
+ errno = unclean_errno(errno_bck);
+ if (conn && errno)
+ retrieve_errno_from_socket(conn);
+
+ if (conn && !(conn->flags & CO_FL_ERROR) && !sc_ep_test(sc, SE_FL_ERROR) && !expired)
+ return;
+
+ TRACE_ENTER(CHK_EV_HCHK_END|CHK_EV_HCHK_ERR, check, 0, 0, (size_t[]){expired});
+
+ /* we'll try to build a meaningful error message depending on the
+ * context of the error possibly present in conn->err_code, and the
+ * socket error possibly collected above. This is useful to know the
+ * exact step of the L6 layer (eg: SSL handshake).
+ */
+ chk = get_trash_chunk();
+
+ if (check->type == PR_O2_TCPCHK_CHK &&
+ (check->tcpcheck_rules->flags & TCPCHK_RULES_PROTO_CHK) == TCPCHK_RULES_TCP_CHK) {
+ step = tcpcheck_get_step_id(check, NULL);
+ if (!step) {
+ TRACE_DEVEL("initial connection failure", CHK_EV_HCHK_END|CHK_EV_HCHK_ERR, check);
+ chunk_printf(chk, " at initial connection step of tcp-check");
+ }
+ else {
+ chunk_printf(chk, " at step %d of tcp-check", step);
+ /* we were looking for a string */
+ if (check->current_step && check->current_step->action == TCPCHK_ACT_CONNECT) {
+ if (check->current_step->connect.port)
+ chunk_appendf(chk, " (connect port %d)" ,check->current_step->connect.port);
+ else
+ chunk_appendf(chk, " (connect)");
+ TRACE_DEVEL("connection failure", CHK_EV_HCHK_END|CHK_EV_HCHK_ERR, check);
+ }
+ else if (check->current_step && check->current_step->action == TCPCHK_ACT_EXPECT) {
+ struct tcpcheck_expect *expect = &check->current_step->expect;
+
+ switch (expect->type) {
+ case TCPCHK_EXPECT_STRING:
+ chunk_appendf(chk, " (expect string '%.*s')", (unsigned int)istlen(expect->data), istptr(expect->data));
+ break;
+ case TCPCHK_EXPECT_BINARY:
+ chunk_appendf(chk, " (expect binary '");
+ dump_binary(chk, istptr(expect->data), (int)istlen(expect->data));
+ chunk_appendf(chk, "')");
+ break;
+ case TCPCHK_EXPECT_STRING_REGEX:
+ chunk_appendf(chk, " (expect regex)");
+ break;
+ case TCPCHK_EXPECT_BINARY_REGEX:
+ chunk_appendf(chk, " (expect binary regex)");
+ break;
+ case TCPCHK_EXPECT_STRING_LF:
+ chunk_appendf(chk, " (expect log-format string)");
+ break;
+ case TCPCHK_EXPECT_BINARY_LF:
+ chunk_appendf(chk, " (expect log-format binary)");
+ break;
+ case TCPCHK_EXPECT_HTTP_STATUS:
+ chunk_appendf(chk, " (expect HTTP status codes)");
+ break;
+ case TCPCHK_EXPECT_HTTP_STATUS_REGEX:
+ chunk_appendf(chk, " (expect HTTP status regex)");
+ break;
+ case TCPCHK_EXPECT_HTTP_HEADER:
+ chunk_appendf(chk, " (expect HTTP header pattern)");
+ break;
+ case TCPCHK_EXPECT_HTTP_BODY:
+ chunk_appendf(chk, " (expect HTTP body content '%.*s')", (unsigned int)istlen(expect->data), istptr(expect->data));
+ break;
+ case TCPCHK_EXPECT_HTTP_BODY_REGEX:
+ chunk_appendf(chk, " (expect HTTP body regex)");
+ break;
+ case TCPCHK_EXPECT_HTTP_BODY_LF:
+ chunk_appendf(chk, " (expect log-format HTTP body)");
+ break;
+ case TCPCHK_EXPECT_CUSTOM:
+ chunk_appendf(chk, " (expect custom function)");
+ break;
+ case TCPCHK_EXPECT_UNDEF:
+ chunk_appendf(chk, " (undefined expect!)");
+ break;
+ }
+ TRACE_DEVEL("expect rule failed", CHK_EV_HCHK_END|CHK_EV_HCHK_ERR, check);
+ }
+ else if (check->current_step && check->current_step->action == TCPCHK_ACT_SEND) {
+ chunk_appendf(chk, " (send)");
+ TRACE_DEVEL("send rule failed", CHK_EV_HCHK_END|CHK_EV_HCHK_ERR, check);
+ }
+
+ if (check->current_step && check->current_step->comment)
+ chunk_appendf(chk, " comment: '%s'", check->current_step->comment);
+ }
+ }
+
+ if (conn && conn->err_code) {
+ if (unclean_errno(errno))
+ chunk_printf(&trash, "%s (%s)%s", conn_err_code_str(conn), strerror(errno),
+ chk->area);
+ else
+ chunk_printf(&trash, "%s%s", conn_err_code_str(conn),
+ chk->area);
+ err_msg = trash.area;
+ }
+ else {
+ if (unclean_errno(errno)) {
+ chunk_printf(&trash, "%s%s", strerror(errno),
+ chk->area);
+ err_msg = trash.area;
+ }
+ else {
+ err_msg = chk->area;
+ }
+ }
+
+ if (check->state & CHK_ST_PORT_MISS) {
+ /* NOTE: this is reported after <fall> tries */
+ set_server_check_status(check, HCHK_STATUS_SOCKERR, err_msg);
+ }
+
+ if (!conn || !conn->ctrl) {
+ /* error before any connection attempt (connection allocation error or no control layer) */
+ set_server_check_status(check, HCHK_STATUS_SOCKERR, err_msg);
+ }
+ else if (conn->flags & CO_FL_WAIT_L4_CONN) {
+ /* L4 not established (yet) */
+ if (conn->flags & CO_FL_ERROR || sc_ep_test(sc, SE_FL_ERROR))
+ set_server_check_status(check, HCHK_STATUS_L4CON, err_msg);
+ else if (expired)
+ set_server_check_status(check, HCHK_STATUS_L4TOUT, err_msg);
+
+ /*
+ * might be due to a server IP change.
+ * Let's trigger a DNS resolution if none are currently running.
+ */
+ if (check->server)
+ resolv_trigger_resolution(check->server->resolv_requester);
+
+ }
+ else if (conn->flags & CO_FL_WAIT_L6_CONN) {
+ /* L6 not established (yet) */
+ if (conn->flags & CO_FL_ERROR || sc_ep_test(sc, SE_FL_ERROR))
+ set_server_check_status(check, HCHK_STATUS_L6RSP, err_msg);
+ else if (expired)
+ set_server_check_status(check, HCHK_STATUS_L6TOUT, err_msg);
+ }
+ else if (conn->flags & CO_FL_ERROR || sc_ep_test(sc, SE_FL_ERROR)) {
+ /* I/O error after connection was established and before we could diagnose */
+ set_server_check_status(check, HCHK_STATUS_SOCKERR, err_msg);
+ }
+ else if (expired) {
+ enum healthcheck_status tout = HCHK_STATUS_L7TOUT;
+
+ /* connection established but expired check */
+ if (check->current_step && check->current_step->action == TCPCHK_ACT_EXPECT &&
+ check->current_step->expect.tout_status != HCHK_STATUS_UNKNOWN)
+ tout = check->current_step->expect.tout_status;
+ set_server_check_status(check, tout, err_msg);
+ }
+
+ TRACE_LEAVE(CHK_EV_HCHK_END|CHK_EV_HCHK_ERR, check);
+ return;
+}
+
+
+/* Builds the server state header used by HTTP health-checks */
+int httpchk_build_status_header(struct server *s, struct buffer *buf)
+{
+ int sv_state;
+ int ratio;
+ char addr[46];
+ char port[6];
+ const char *srv_hlt_st[7] = { "DOWN", "DOWN %d/%d",
+ "UP %d/%d", "UP",
+ "NOLB %d/%d", "NOLB",
+ "no check" };
+
+ if (!(s->check.state & CHK_ST_ENABLED))
+ sv_state = 6;
+ else if (s->cur_state != SRV_ST_STOPPED) {
+ if (s->check.health == s->check.rise + s->check.fall - 1)
+ sv_state = 3; /* UP */
+ else
+ sv_state = 2; /* going down */
+
+ if (s->cur_state == SRV_ST_STOPPING)
+ sv_state += 2;
+ } else {
+ if (s->check.health)
+ sv_state = 1; /* going up */
+ else
+ sv_state = 0; /* DOWN */
+ }
+
+ chunk_appendf(buf, srv_hlt_st[sv_state],
+ (s->cur_state != SRV_ST_STOPPED) ? (s->check.health - s->check.rise + 1) : (s->check.health),
+ (s->cur_state != SRV_ST_STOPPED) ? (s->check.fall) : (s->check.rise));
+
+ addr_to_str(&s->addr, addr, sizeof(addr));
+ if (s->addr.ss_family == AF_INET || s->addr.ss_family == AF_INET6)
+ snprintf(port, sizeof(port), "%u", s->svc_port);
+ else
+ *port = 0;
+
+ chunk_appendf(buf, "; address=%s; port=%s; name=%s/%s; node=%s; weight=%d/%d; scur=%d/%d; qcur=%d",
+ addr, port, s->proxy->id, s->id,
+ global.node,
+ (s->cur_eweight * s->proxy->lbprm.wmult + s->proxy->lbprm.wdiv - 1) / s->proxy->lbprm.wdiv,
+ (s->proxy->lbprm.tot_weight * s->proxy->lbprm.wmult + s->proxy->lbprm.wdiv - 1) / s->proxy->lbprm.wdiv,
+ s->cur_sess, s->proxy->beconn - s->proxy->queue.length,
+ s->queue.length);
+
+ if ((s->cur_state == SRV_ST_STARTING) &&
+ ns_to_sec(now_ns) < s->last_change + s->slowstart &&
+ ns_to_sec(now_ns) >= s->last_change) {
+ ratio = MAX(1, 100 * (ns_to_sec(now_ns) - s->last_change) / s->slowstart);
+ chunk_appendf(buf, "; throttle=%d%%", ratio);
+ }
+
+ return b_data(buf);
+}
+
+/**************************************************************************/
+/***************** Health-checks based on connections *********************/
+/**************************************************************************/
+/* This function is used only for server health-checks. It handles connection
+ * status updates including errors. If necessary, it wakes the check task up.
+ * It returns 0 on normal cases, <0 if at least one close() has happened on the
+ * connection (eg: reconnect). It relies on tcpcheck_main().
+ */
+int wake_srv_chk(struct stconn *sc)
+{
+ struct connection *conn;
+ struct check *check = __sc_check(sc);
+ struct email_alertq *q = container_of(check, typeof(*q), check);
+ int ret = 0;
+
+ TRACE_ENTER(CHK_EV_HCHK_WAKE, check);
+ if (check->result != CHK_RES_UNKNOWN)
+ goto end;
+
+ if (check->server)
+ HA_SPIN_LOCK(SERVER_LOCK, &check->server->lock);
+ else
+ HA_SPIN_LOCK(EMAIL_ALERTS_LOCK, &q->lock);
+
+ /* we may have to make progress on the TCP checks */
+ ret = tcpcheck_main(check);
+
+ sc = check->sc;
+ conn = sc_conn(sc);
+
+ if (unlikely(!conn || conn->flags & CO_FL_ERROR || sc_ep_test(sc, SE_FL_ERROR))) {
+ /* We may get error reports bypassing the I/O handlers, typically
+ * the case when sending a pure TCP check which fails, then the I/O
+ * handlers above are not called. This is completely handled by the
+ * main processing task so let's simply wake it up. If we get here,
+ * we expect errno to still be valid.
+ */
+ TRACE_ERROR("report connection error", CHK_EV_HCHK_WAKE|CHK_EV_HCHK_END|CHK_EV_HCHK_ERR, check);
+ chk_report_conn_err(check, errno, 0);
+ task_wakeup(check->task, TASK_WOKEN_IO);
+ }
+
+ if (check->result != CHK_RES_UNKNOWN || ret == -1) {
+ /* Check complete or aborted. Wake the check task up to be sure
+ * the result is handled ASAP. */
+ ret = -1;
+ task_wakeup(check->task, TASK_WOKEN_IO);
+ }
+
+ if (check->server)
+ HA_SPIN_UNLOCK(SERVER_LOCK, &check->server->lock);
+ else
+ HA_SPIN_UNLOCK(EMAIL_ALERTS_LOCK, &q->lock);
+
+ end:
+ TRACE_LEAVE(CHK_EV_HCHK_WAKE, check);
+ return ret;
+}
+
+/* This function checks if any I/O is wanted, and if so, attempts to do so */
+struct task *srv_chk_io_cb(struct task *t, void *ctx, unsigned int state)
+{
+ struct stconn *sc = ctx;
+
+ wake_srv_chk(sc);
+ return NULL;
+}
+
+/* returns <0, 0, >0 if check thread 1 is respectively less loaded than,
+ * equally as, or more loaded than thread 2. This is made to decide on
+ * migrations so a margin is applied in either direction. For ease of
+ * remembering the direction, consider this returns load1 - load2.
+ */
+static inline int check_thread_cmp_load(int thr1, int thr2)
+{
+ uint t1_load = _HA_ATOMIC_LOAD(&ha_thread_ctx[thr1].rq_total);
+ uint t1_act = _HA_ATOMIC_LOAD(&ha_thread_ctx[thr1].active_checks);
+ uint t2_load = _HA_ATOMIC_LOAD(&ha_thread_ctx[thr2].rq_total);
+ uint t2_act = _HA_ATOMIC_LOAD(&ha_thread_ctx[thr2].active_checks);
+
+ /* twice as more active checks is a significant difference */
+ if (t1_act * 2 < t2_act)
+ return -1;
+
+ if (t2_act * 2 < t1_act)
+ return 1;
+
+ /* twice as more rqload with more checks is also a significant
+ * difference.
+ */
+ if (t1_act <= t2_act && t1_load * 2 < t2_load)
+ return -1;
+
+ if (t2_act <= t1_act && t2_load * 2 < t1_load)
+ return 1;
+
+ /* otherwise they're roughly equal */
+ return 0;
+}
+
+/* returns <0, 0, >0 if check thread 1's active checks count is respectively
+ * higher than, equal, or lower than thread 2's. This is made to decide on
+ * forced migrations upon overload, so only a very little margin is applied
+ * here (~1%). For ease of remembering the direction, consider this returns
+ * active1 - active2.
+ */
+static inline int check_thread_cmp_active(int thr1, int thr2)
+{
+ uint t1_act = _HA_ATOMIC_LOAD(&ha_thread_ctx[thr1].active_checks);
+ uint t2_act = _HA_ATOMIC_LOAD(&ha_thread_ctx[thr2].active_checks);
+
+ if (t1_act * 128 >= t2_act * 129)
+ return 1;
+ if (t2_act * 128 >= t1_act * 129)
+ return -1;
+ return 0;
+}
+
+
+/* manages a server health-check that uses a connection. Returns
+ * the time the task accepts to wait, or TIME_ETERNITY for infinity.
+ *
+ * Please do NOT place any return statement in this function and only leave
+ * via the out_unlock label.
+ */
+struct task *process_chk_conn(struct task *t, void *context, unsigned int state)
+{
+ struct check *check = context;
+ struct proxy *proxy = check->proxy;
+ struct stconn *sc;
+ struct connection *conn;
+ int rv;
+ int expired = tick_is_expired(t->expire, now_ms);
+
+ TRACE_ENTER(CHK_EV_TASK_WAKE, check);
+
+ if (check->state & CHK_ST_SLEEPING) {
+ /* This check just restarted. It's still time to verify if
+ * we're on an overloaded thread or if a more suitable one is
+ * available. This helps spread the load over the available
+ * threads, without migrating too often. For this we'll check
+ * our load, and pick a random thread, check if it has less
+ * than half of the current thread's load, and if so we'll
+ * bounce the task there. It's possible because it's not yet
+ * tied to the current thread. The other thread will not bounce
+ * the task again because we're setting CHK_ST_READY indicating
+ * a migration.
+ */
+ uint run_checks = _HA_ATOMIC_LOAD(&th_ctx->running_checks);
+ uint my_load = HA_ATOMIC_LOAD(&th_ctx->rq_total);
+ uint attempts = MIN(global.nbthread, 3);
+
+ if (check->state & CHK_ST_READY) {
+ /* check was migrated, active already counted */
+ activity[tid].check_adopted++;
+ }
+ else {
+ /* first wakeup, let's check if another thread is less loaded
+ * than this one in order to smooth the load. If the current
+ * thread is not yet overloaded, we attempt an opportunistic
+ * migration to another thread that is not full and that is
+ * significantly less loaded. And if the current thread is
+ * already overloaded, we attempt a forced migration to a
+ * thread with less active checks. We try at most 3 random
+ * other thread.
+ */
+ while (attempts-- > 0 &&
+ (!LIST_ISEMPTY(&th_ctx->queued_checks) || my_load >= 3) &&
+ _HA_ATOMIC_LOAD(&th_ctx->active_checks) >= 3) {
+ uint new_tid = statistical_prng_range(global.nbthread);
+
+ if (new_tid == tid)
+ continue;
+
+ ALREADY_CHECKED(new_tid);
+
+ if (check_thread_cmp_active(tid, new_tid) > 0 &&
+ (run_checks >= global.tune.max_checks_per_thread ||
+ check_thread_cmp_load(tid, new_tid) > 0)) {
+ /* Found one. Let's migrate the task over there. We have to
+ * remove it from the WQ first and kill its expire time
+ * otherwise the scheduler will reinsert it and trigger a
+ * BUG_ON() as we're not allowed to call task_queue() for a
+ * foreign thread. The recipient will restore the expiration.
+ */
+ check->state |= CHK_ST_READY;
+ HA_ATOMIC_INC(&ha_thread_ctx[new_tid].active_checks);
+ task_unlink_wq(t);
+ t->expire = TICK_ETERNITY;
+ task_set_thread(t, new_tid);
+ task_wakeup(t, TASK_WOKEN_MSG);
+ TRACE_LEAVE(CHK_EV_TASK_WAKE, check);
+ return t;
+ }
+ }
+ /* check just woke up, count it as active */
+ _HA_ATOMIC_INC(&th_ctx->active_checks);
+ }
+
+ /* OK we're keeping it so this check is ours now */
+ task_set_thread(t, tid);
+ check->state &= ~CHK_ST_SLEEPING;
+
+ /* if we just woke up and the thread is full of running, or
+ * already has others waiting, we might have to wait in queue
+ * (for health checks only). This means !SLEEPING && !READY.
+ */
+ if (check->server &&
+ (!LIST_ISEMPTY(&th_ctx->queued_checks) ||
+ (global.tune.max_checks_per_thread &&
+ _HA_ATOMIC_LOAD(&th_ctx->running_checks) >= global.tune.max_checks_per_thread))) {
+ TRACE_DEVEL("health-check queued", CHK_EV_TASK_WAKE, check);
+ t->expire = TICK_ETERNITY;
+ LIST_APPEND(&th_ctx->queued_checks, &check->check_queue);
+
+ /* reset fastinter flag (if set) so that srv_getinter()
+ * only returns fastinter if server health is degraded
+ */
+ check->state &= ~CHK_ST_FASTINTER;
+ goto out_leave;
+ }
+
+ /* OK let's run, now we cannot roll back anymore */
+ check->state |= CHK_ST_READY;
+ activity[tid].check_started++;
+ _HA_ATOMIC_INC(&th_ctx->running_checks);
+ }
+
+ /* at this point, CHK_ST_SLEEPING = 0 and CHK_ST_READY = 1*/
+
+ if (check->server)
+ HA_SPIN_LOCK(SERVER_LOCK, &check->server->lock);
+
+ if (!(check->state & (CHK_ST_INPROGRESS|CHK_ST_IN_ALLOC|CHK_ST_OUT_ALLOC))) {
+ /* This task might have bounced from another overloaded thread, it
+ * needs an expiration timer that was supposed to be now, but that
+ * was erased during the bounce.
+ */
+ if (!tick_isset(t->expire)) {
+ t->expire = now_ms;
+ expired = 0;
+ }
+ }
+
+ if (unlikely(check->state & CHK_ST_PURGE)) {
+ TRACE_STATE("health-check state to purge", CHK_EV_TASK_WAKE, check);
+ }
+ else if (!(check->state & (CHK_ST_INPROGRESS))) {
+ /* no check currently running, but we might have been woken up
+ * before the timer's expiration to update it according to a
+ * new state (e.g. fastinter), in which case we'll reprogram
+ * the new timer.
+ */
+ if (!tick_is_expired(t->expire, now_ms)) { /* woke up too early */
+ if (check->server) {
+ int new_exp = tick_add(now_ms, MS_TO_TICKS(srv_getinter(check)));
+
+ if (tick_is_expired(new_exp, t->expire)) {
+ TRACE_STATE("health-check was advanced", CHK_EV_TASK_WAKE, check);
+ goto update_timer;
+ }
+ }
+
+ TRACE_STATE("health-check wake up too early", CHK_EV_TASK_WAKE, check);
+ goto out_unlock;
+ }
+
+ /* we don't send any health-checks when the proxy is
+ * stopped, the server should not be checked or the check
+ * is disabled.
+ */
+ if (((check->state & (CHK_ST_ENABLED | CHK_ST_PAUSED)) != CHK_ST_ENABLED) ||
+ (proxy->flags & (PR_FL_DISABLED|PR_FL_STOPPED))) {
+ TRACE_STATE("health-check paused or disabled", CHK_EV_TASK_WAKE, check);
+ goto reschedule;
+ }
+
+ /* we'll initiate a new check */
+ set_server_check_status(check, HCHK_STATUS_START, NULL);
+
+ check->state |= CHK_ST_INPROGRESS;
+ TRACE_STATE("init new health-check", CHK_EV_TASK_WAKE|CHK_EV_HCHK_START, check);
+
+ check->current_step = NULL;
+
+ check->sc = sc_new_from_check(check, SC_FL_NONE);
+ if (!check->sc) {
+ set_server_check_status(check, HCHK_STATUS_SOCKERR, NULL);
+ goto end;
+ }
+ tcpcheck_main(check);
+ expired = 0;
+ }
+
+ /* there was a test running.
+ * First, let's check whether there was an uncaught error,
+ * which can happen on connect timeout or error.
+ */
+ if (check->result == CHK_RES_UNKNOWN && likely(!(check->state & CHK_ST_PURGE))) {
+ sc = check->sc;
+ conn = sc_conn(sc);
+
+ /* Here the connection must be defined. Otherwise the
+ * error would have already been detected
+ */
+ if ((conn && ((conn->flags & CO_FL_ERROR) || sc_ep_test(sc, SE_FL_ERROR))) || expired) {
+ TRACE_ERROR("report connection error", CHK_EV_TASK_WAKE|CHK_EV_HCHK_END|CHK_EV_HCHK_ERR, check);
+ chk_report_conn_err(check, 0, expired);
+ }
+ else {
+ if (check->state & CHK_ST_CLOSE_CONN) {
+ TRACE_DEVEL("closing current connection", CHK_EV_TASK_WAKE|CHK_EV_HCHK_RUN, check);
+ check->state &= ~CHK_ST_CLOSE_CONN;
+ if (!sc_reset_endp(check->sc)) {
+ /* error will be handled by tcpcheck_main().
+ * On success, remove all flags except SE_FL_DETACHED
+ */
+ sc_ep_clr(check->sc, ~SE_FL_DETACHED);
+ }
+ tcpcheck_main(check);
+ }
+ if (check->result == CHK_RES_UNKNOWN) {
+ TRACE_DEVEL("health-check not expired", CHK_EV_TASK_WAKE|CHK_EV_HCHK_RUN, check);
+ goto out_unlock; /* timeout not reached, wait again */
+ }
+ }
+ }
+
+ /* check complete or aborted */
+ TRACE_STATE("health-check complete or aborted", CHK_EV_TASK_WAKE|CHK_EV_HCHK_END, check);
+
+ /* check->sc may be NULL when the healthcheck is purged */
+ check->current_step = NULL;
+ sc = check->sc;
+ conn = (sc ? sc_conn(sc) : NULL);
+
+ if (conn && conn->xprt) {
+ /* The check was aborted and the connection was not yet closed.
+ * This can happen upon timeout, or when an external event such
+ * as a failed response coupled with "observe layer7" caused the
+ * server state to be suddenly changed.
+ */
+ sc_conn_drain_and_shut(sc);
+ }
+
+ if (sc) {
+ sc_destroy(sc);
+ check->sc = NULL;
+ }
+
+ if (check->sess != NULL) {
+ vars_prune(&check->vars, check->sess, NULL);
+ session_free(check->sess);
+ check->sess = NULL;
+ }
+
+ end:
+ if (check->server && likely(!(check->state & CHK_ST_PURGE))) {
+ if (check->result == CHK_RES_FAILED) {
+ /* a failure or timeout detected */
+ TRACE_DEVEL("report failure", CHK_EV_TASK_WAKE|CHK_EV_HCHK_END|CHK_EV_HCHK_ERR, check);
+ check_notify_failure(check);
+ }
+ else if (check->result == CHK_RES_CONDPASS) {
+ /* check is OK but asks for stopping mode */
+ TRACE_DEVEL("report conditional success", CHK_EV_TASK_WAKE|CHK_EV_HCHK_END|CHK_EV_HCHK_SUCC, check);
+ check_notify_stopping(check);
+ }
+ else if (check->result == CHK_RES_PASSED) {
+ /* a success was detected */
+ TRACE_DEVEL("report success", CHK_EV_TASK_WAKE|CHK_EV_HCHK_END|CHK_EV_HCHK_SUCC, check);
+ check_notify_success(check);
+ }
+ }
+
+ if (LIST_INLIST(&check->buf_wait.list))
+ LIST_DEL_INIT(&check->buf_wait.list);
+
+ check_release_buf(check, &check->bi);
+ check_release_buf(check, &check->bo);
+ _HA_ATOMIC_DEC(&th_ctx->running_checks);
+ _HA_ATOMIC_DEC(&th_ctx->active_checks);
+ check->state &= ~(CHK_ST_INPROGRESS|CHK_ST_IN_ALLOC|CHK_ST_OUT_ALLOC);
+ check->state &= ~CHK_ST_READY;
+ check->state |= CHK_ST_SLEEPING;
+
+ update_timer:
+ /* when going to sleep, we need to check if other checks are waiting
+ * for a slot. If so we pick them out of the queue and wake them up.
+ */
+ if (check->server && (check->state & CHK_ST_SLEEPING)) {
+ if (!LIST_ISEMPTY(&th_ctx->queued_checks) &&
+ _HA_ATOMIC_LOAD(&th_ctx->running_checks) < global.tune.max_checks_per_thread) {
+ struct check *next_chk = LIST_ELEM(th_ctx->queued_checks.n, struct check *, check_queue);
+
+ /* wake up pending task */
+ LIST_DEL_INIT(&next_chk->check_queue);
+
+ activity[tid].check_started++;
+ _HA_ATOMIC_INC(&th_ctx->running_checks);
+ next_chk->state |= CHK_ST_READY;
+ /* now running */
+ task_wakeup(next_chk->task, TASK_WOKEN_RES);
+ }
+ }
+
+ if (check->server) {
+ rv = 0;
+ if (global.spread_checks > 0) {
+ rv = srv_getinter(check) * global.spread_checks / 100;
+ rv -= (int) (2 * rv * (statistical_prng() / 4294967295.0));
+ }
+ t->expire = tick_add(now_ms, MS_TO_TICKS(srv_getinter(check) + rv));
+ /* reset fastinter flag (if set) so that srv_getinter()
+ * only returns fastinter if server health is degraded
+ */
+ check->state &= ~CHK_ST_FASTINTER;
+ }
+
+ reschedule:
+ if (proxy->flags & (PR_FL_DISABLED|PR_FL_STOPPED))
+ t->expire = TICK_ETERNITY;
+ else {
+ while (tick_is_expired(t->expire, now_ms))
+ t->expire = tick_add(t->expire, MS_TO_TICKS(check->inter));
+ }
+
+ out_unlock:
+ if (check->server)
+ HA_SPIN_UNLOCK(SERVER_LOCK, &check->server->lock);
+
+ out_leave:
+ TRACE_LEAVE(CHK_EV_TASK_WAKE, check);
+
+ /* Free the check if set to PURGE. After this, the check instance may be
+ * freed via the srv_drop invocation, so it must not be accessed after
+ * this point.
+ */
+ if (unlikely(check->state & CHK_ST_PURGE)) {
+ free_check(check);
+ if (check->server)
+ srv_drop(check->server);
+
+ t = NULL;
+ }
+
+ return t;
+}
+
+
+/**************************************************************************/
+/************************** Init/deinit checks ****************************/
+/**************************************************************************/
+/*
+ * Tries to grab a buffer and to re-enables processing on check <target>. The
+ * check flags are used to figure what buffer was requested. It returns 1 if the
+ * allocation succeeds, in which case the I/O tasklet is woken up, or 0 if it's
+ * impossible to wake up and we prefer to be woken up later.
+ */
+int check_buf_available(void *target)
+{
+ struct check *check = target;
+
+ BUG_ON(!check->sc);
+
+ if ((check->state & CHK_ST_IN_ALLOC) && b_alloc(&check->bi)) {
+ TRACE_STATE("unblocking check, input buffer allocated", CHK_EV_TCPCHK_EXP|CHK_EV_RX_BLK, check);
+ check->state &= ~CHK_ST_IN_ALLOC;
+ tasklet_wakeup(check->sc->wait_event.tasklet);
+ return 1;
+ }
+ if ((check->state & CHK_ST_OUT_ALLOC) && b_alloc(&check->bo)) {
+ TRACE_STATE("unblocking check, output buffer allocated", CHK_EV_TCPCHK_SND|CHK_EV_TX_BLK, check);
+ check->state &= ~CHK_ST_OUT_ALLOC;
+ tasklet_wakeup(check->sc->wait_event.tasklet);
+ return 1;
+ }
+
+ return 0;
+}
+
+/*
+ * Allocate a buffer. If it fails, it adds the check in buffer wait queue.
+ */
+struct buffer *check_get_buf(struct check *check, struct buffer *bptr)
+{
+ struct buffer *buf = NULL;
+
+ if (likely(!LIST_INLIST(&check->buf_wait.list)) &&
+ unlikely((buf = b_alloc(bptr)) == NULL)) {
+ check->buf_wait.target = check;
+ check->buf_wait.wakeup_cb = check_buf_available;
+ LIST_APPEND(&th_ctx->buffer_wq, &check->buf_wait.list);
+ }
+ return buf;
+}
+
+/*
+ * Release a buffer, if any, and try to wake up entities waiting in the buffer
+ * wait queue.
+ */
+void check_release_buf(struct check *check, struct buffer *bptr)
+{
+ if (bptr->size) {
+ b_free(bptr);
+ offer_buffers(check->buf_wait.target, 1);
+ }
+}
+
+const char *init_check(struct check *check, int type)
+{
+ check->type = type;
+
+ check->bi = BUF_NULL;
+ check->bo = BUF_NULL;
+ LIST_INIT(&check->buf_wait.list);
+ LIST_INIT(&check->check_queue);
+ return NULL;
+}
+
+/* Liberates the resources allocated for a check.
+ *
+ * This function must only be run by the thread owning the check.
+ */
+void free_check(struct check *check)
+{
+ /* For agent-check, free the rules / vars from the server. This is not
+ * done for health-check : the proxy is the owner of the rules / vars
+ * in this case.
+ */
+ if (check->state & CHK_ST_AGENT) {
+ free_tcpcheck_vars(&check->tcpcheck_rules->preset_vars);
+ ha_free(&check->tcpcheck_rules);
+ }
+
+ task_destroy(check->task);
+
+ check_release_buf(check, &check->bi);
+ check_release_buf(check, &check->bo);
+ if (check->sc) {
+ sc_destroy(check->sc);
+ check->sc = NULL;
+ }
+}
+
+/* This function must be used in order to free a started check. The check will
+ * be scheduled for a next execution in order to properly close and free all
+ * check elements.
+ *
+ * Non thread-safe.
+ */
+void check_purge(struct check *check)
+{
+ check->state |= CHK_ST_PURGE;
+ task_wakeup(check->task, TASK_WOKEN_OTHER);
+}
+
+/* manages a server health-check. Returns the time the task accepts to wait, or
+ * TIME_ETERNITY for infinity.
+ */
+struct task *process_chk(struct task *t, void *context, unsigned int state)
+{
+ struct check *check = context;
+
+ if (check->type == PR_O2_EXT_CHK)
+ return process_chk_proc(t, context, state);
+ return process_chk_conn(t, context, state);
+
+}
+
+
+int start_check_task(struct check *check, int mininter,
+ int nbcheck, int srvpos)
+{
+ struct task *t;
+
+ /* task for the check. Process-based checks exclusively run on thread 1. */
+ if (check->type == PR_O2_EXT_CHK)
+ t = task_new_on(0);
+ else
+ t = task_new_anywhere();
+
+ if (!t)
+ goto fail_alloc_task;
+
+ check->task = t;
+ t->process = process_chk;
+ t->context = check;
+
+ if (mininter < srv_getinter(check))
+ mininter = srv_getinter(check);
+
+ if (global.spread_checks > 0) {
+ int rnd;
+
+ rnd = srv_getinter(check) * global.spread_checks / 100;
+ rnd -= (int) (2 * rnd * (ha_random32() / 4294967295.0));
+ mininter += rnd;
+ }
+
+ if (global.max_spread_checks && mininter > global.max_spread_checks)
+ mininter = global.max_spread_checks;
+
+ /* check this every ms */
+ t->expire = tick_add(now_ms, MS_TO_TICKS(mininter * srvpos / nbcheck));
+ check->start = now_ns;
+ task_queue(t);
+
+ return 1;
+
+ fail_alloc_task:
+ ha_alert("Starting [%s:%s] check: out of memory.\n",
+ check->server->proxy->id, check->server->id);
+ return 0;
+}
+
+/*
+ * Start health-check.
+ * Returns 0 if OK, ERR_FATAL on error, and prints the error in this case.
+ */
+static int start_checks()
+{
+
+ struct proxy *px;
+ struct server *s;
+ int nbcheck=0, mininter=0, srvpos=0;
+
+ /* 0- init the dummy frontend used to create all checks sessions */
+ init_new_proxy(&checks_fe);
+ checks_fe.id = strdup("CHECKS-FE");
+ checks_fe.cap = PR_CAP_FE | PR_CAP_BE;
+ checks_fe.mode = PR_MODE_TCP;
+ checks_fe.maxconn = 0;
+ checks_fe.conn_retries = CONN_RETRIES;
+ checks_fe.options2 |= PR_O2_INDEPSTR | PR_O2_SMARTCON | PR_O2_SMARTACC;
+ checks_fe.timeout.client = TICK_ETERNITY;
+
+ /* 1- count the checkers to run simultaneously.
+ * We also determine the minimum interval among all of those which
+ * have an interval larger than SRV_CHK_INTER_THRES. This interval
+ * will be used to spread their start-up date. Those which have
+ * a shorter interval will start independently and will not dictate
+ * too short an interval for all others.
+ */
+ for (px = proxies_list; px; px = px->next) {
+ for (s = px->srv; s; s = s->next) {
+ if (s->check.state & CHK_ST_CONFIGURED) {
+ nbcheck++;
+ if ((srv_getinter(&s->check) >= SRV_CHK_INTER_THRES) &&
+ (!mininter || mininter > srv_getinter(&s->check)))
+ mininter = srv_getinter(&s->check);
+ }
+
+ if (s->agent.state & CHK_ST_CONFIGURED) {
+ nbcheck++;
+ if ((srv_getinter(&s->agent) >= SRV_CHK_INTER_THRES) &&
+ (!mininter || mininter > srv_getinter(&s->agent)))
+ mininter = srv_getinter(&s->agent);
+ }
+ }
+ }
+
+ if (!nbcheck)
+ return ERR_NONE;
+
+ srand((unsigned)time(NULL));
+
+ /* 2- start them as far as possible from each other. For this, we will
+ * start them after their interval is set to the min interval divided
+ * by the number of servers, weighted by the server's position in the
+ * list.
+ */
+ for (px = proxies_list; px; px = px->next) {
+ if ((px->options2 & PR_O2_CHK_ANY) == PR_O2_EXT_CHK) {
+ if (init_pid_list()) {
+ ha_alert("Starting [%s] check: out of memory.\n", px->id);
+ return ERR_ALERT | ERR_FATAL;
+ }
+ }
+
+ for (s = px->srv; s; s = s->next) {
+ /* A task for the main check */
+ if (s->check.state & CHK_ST_CONFIGURED) {
+ if (s->check.type == PR_O2_EXT_CHK) {
+ if (!prepare_external_check(&s->check))
+ return ERR_ALERT | ERR_FATAL;
+ }
+ if (!start_check_task(&s->check, mininter, nbcheck, srvpos))
+ return ERR_ALERT | ERR_FATAL;
+ srvpos++;
+ }
+
+ /* A task for a auxiliary agent check */
+ if (s->agent.state & CHK_ST_CONFIGURED) {
+ if (!start_check_task(&s->agent, mininter, nbcheck, srvpos)) {
+ return ERR_ALERT | ERR_FATAL;
+ }
+ srvpos++;
+ }
+ }
+ }
+ return ERR_NONE;
+}
+
+
+/*
+ * Return value:
+ * the port to be used for the health check
+ * 0 in case no port could be found for the check
+ */
+static int srv_check_healthcheck_port(struct check *chk)
+{
+ int i = 0;
+ struct server *srv = NULL;
+
+ srv = chk->server;
+
+ /* by default, we use the health check port configured */
+ if (chk->port > 0)
+ return chk->port;
+
+ /* try to get the port from check_core.addr if check.port not set */
+ i = get_host_port(&chk->addr);
+ if (i > 0)
+ return i;
+
+ /* try to get the port from server address */
+ /* prevent MAPPORTS from working at this point, since checks could
+ * not be performed in such case (MAPPORTS impose a relative ports
+ * based on live traffic)
+ */
+ if (srv->flags & SRV_F_MAPPORTS)
+ return 0;
+
+ i = srv->svc_port; /* by default */
+ if (i > 0)
+ return i;
+
+ return 0;
+}
+
+/* Initializes an health-check attached to the server <srv>. Non-zero is returned
+ * if an error occurred.
+ */
+int init_srv_check(struct server *srv)
+{
+ const char *err;
+ struct tcpcheck_rule *r;
+ int ret = ERR_NONE;
+ int check_type;
+
+ if (!srv->do_check || !(srv->proxy->cap & PR_CAP_BE))
+ goto out;
+
+ check_type = srv->check.tcpcheck_rules->flags & TCPCHK_RULES_PROTO_CHK;
+
+ if (!(srv->flags & SRV_F_DYNAMIC)) {
+ /* If neither a port nor an addr was specified and no check
+ * transport layer is forced, then the transport layer used by
+ * the checks is the same as for the production traffic.
+ * Otherwise we use raw_sock by default, unless one is
+ * specified.
+ */
+ if (!srv->check.port && !is_addr(&srv->check.addr)) {
+ if (!srv->check.use_ssl && srv->use_ssl != -1) {
+ srv->check.use_ssl = srv->use_ssl;
+ srv->check.xprt = srv->xprt;
+ }
+ else if (srv->check.use_ssl == 1)
+ srv->check.xprt = xprt_get(XPRT_SSL);
+ srv->check.send_proxy |= (srv->pp_opts);
+ }
+ else if (srv->check.use_ssl == 1)
+ srv->check.xprt = xprt_get(XPRT_SSL);
+ }
+ else {
+ /* For dynamic servers, check-ssl and check-send-proxy must be
+ * explicitly defined even if the check port was not
+ * overridden.
+ */
+ if (srv->check.use_ssl == 1)
+ srv->check.xprt = xprt_get(XPRT_SSL);
+ }
+
+ /* Inherit the mux protocol from the server if not already defined for
+ * the check
+ */
+ if (srv->mux_proto && !srv->check.mux_proto &&
+ ((srv->mux_proto->mode == PROTO_MODE_HTTP && check_type == TCPCHK_RULES_HTTP_CHK) ||
+ (srv->mux_proto->mode == PROTO_MODE_TCP && check_type != TCPCHK_RULES_HTTP_CHK))) {
+ srv->check.mux_proto = srv->mux_proto;
+ }
+ /* test that check proto is valid if explicitly defined */
+ else if (srv->check.mux_proto &&
+ ((srv->check.mux_proto->mode == PROTO_MODE_HTTP && check_type != TCPCHK_RULES_HTTP_CHK) ||
+ (srv->check.mux_proto->mode == PROTO_MODE_TCP && check_type == TCPCHK_RULES_HTTP_CHK))) {
+ ha_alert("config: %s '%s': server '%s' uses an incompatible MUX protocol for the selected check type\n",
+ proxy_type_str(srv->proxy), srv->proxy->id, srv->id);
+ ret |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+
+ /* validate <srv> server health-check settings */
+
+ /* We need at least a service port, a check port or the first tcp-check
+ * rule must be a 'connect' one when checking an IPv4/IPv6 server.
+ */
+ if ((srv_check_healthcheck_port(&srv->check) != 0) ||
+ (!is_inet_addr(&srv->check.addr) && (is_addr(&srv->check.addr) || !is_inet_addr(&srv->addr))))
+ goto init;
+
+ if (!srv->proxy->tcpcheck_rules.list || LIST_ISEMPTY(srv->proxy->tcpcheck_rules.list)) {
+ ha_alert("config: %s '%s': server '%s' has neither service port nor check port.\n",
+ proxy_type_str(srv->proxy), srv->proxy->id, srv->id);
+ ret |= ERR_ALERT | ERR_ABORT;
+ goto out;
+ }
+
+ /* search the first action (connect / send / expect) in the list */
+ r = get_first_tcpcheck_rule(&srv->proxy->tcpcheck_rules);
+ if (!r || (r->action != TCPCHK_ACT_CONNECT) || (!r->connect.port && !get_host_port(&r->connect.addr))) {
+ ha_alert("config: %s '%s': server '%s' has neither service port nor check port "
+ "nor tcp_check rule 'connect' with port information.\n",
+ proxy_type_str(srv->proxy), srv->proxy->id, srv->id);
+ ret |= ERR_ALERT | ERR_ABORT;
+ goto out;
+ }
+
+ /* scan the tcp-check ruleset to ensure a port has been configured */
+ list_for_each_entry(r, srv->proxy->tcpcheck_rules.list, list) {
+ if ((r->action == TCPCHK_ACT_CONNECT) && (!r->connect.port && !get_host_port(&r->connect.addr))) {
+ ha_alert("config: %s '%s': server '%s' has neither service port nor check port, "
+ "and a tcp_check rule 'connect' with no port information.\n",
+ proxy_type_str(srv->proxy), srv->proxy->id, srv->id);
+ ret |= ERR_ALERT | ERR_ABORT;
+ goto out;
+ }
+ }
+
+ init:
+ err = init_check(&srv->check, srv->proxy->options2 & PR_O2_CHK_ANY);
+ if (err) {
+ ha_alert("config: %s '%s': unable to init check for server '%s' (%s).\n",
+ proxy_type_str(srv->proxy), srv->proxy->id, srv->id, err);
+ ret |= ERR_ALERT | ERR_ABORT;
+ goto out;
+ }
+ srv->check.state |= CHK_ST_CONFIGURED | CHK_ST_ENABLED | CHK_ST_SLEEPING;
+ srv_take(srv);
+
+ /* Only increment maxsock for servers from the configuration. Dynamic
+ * servers at the moment are not taken into account for the estimation
+ * of the resources limits.
+ */
+ if (global.mode & MODE_STARTING)
+ global.maxsock++;
+
+ out:
+ return ret;
+}
+
+/* Initializes an agent-check attached to the server <srv>. Non-zero is returned
+ * if an error occurred.
+ */
+int init_srv_agent_check(struct server *srv)
+{
+ struct tcpcheck_rule *chk;
+ const char *err;
+ int ret = ERR_NONE;
+
+ if (!srv->do_agent || !(srv->proxy->cap & PR_CAP_BE))
+ goto out;
+
+ /* If there is no connect rule preceding all send / expect rules, an
+ * implicit one is inserted before all others.
+ */
+ chk = get_first_tcpcheck_rule(srv->agent.tcpcheck_rules);
+ if (!chk || chk->action != TCPCHK_ACT_CONNECT) {
+ chk = calloc(1, sizeof(*chk));
+ if (!chk) {
+ ha_alert("%s '%s': unable to add implicit tcp-check connect rule"
+ " to agent-check for server '%s' (out of memory).\n",
+ proxy_type_str(srv->proxy), srv->proxy->id, srv->id);
+ ret |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ chk->action = TCPCHK_ACT_CONNECT;
+ chk->connect.options = (TCPCHK_OPT_DEFAULT_CONNECT|TCPCHK_OPT_IMPLICIT);
+ LIST_INSERT(srv->agent.tcpcheck_rules->list, &chk->list);
+ }
+
+ /* <chk> is always defined here and it is a CONNECT action. If there is
+ * a preset variable, it means there is an agent string defined and data
+ * will be sent after the connect.
+ */
+ if (!LIST_ISEMPTY(&srv->agent.tcpcheck_rules->preset_vars))
+ chk->connect.options |= TCPCHK_OPT_HAS_DATA;
+
+
+ err = init_check(&srv->agent, PR_O2_TCPCHK_CHK);
+ if (err) {
+ ha_alert("config: %s '%s': unable to init agent-check for server '%s' (%s).\n",
+ proxy_type_str(srv->proxy), srv->proxy->id, srv->id, err);
+ ret |= ERR_ALERT | ERR_ABORT;
+ goto out;
+ }
+
+ if (!srv->agent.inter)
+ srv->agent.inter = srv->check.inter;
+
+ srv->agent.state |= CHK_ST_CONFIGURED | CHK_ST_ENABLED | CHK_ST_SLEEPING | CHK_ST_AGENT;
+ srv_take(srv);
+
+ /* Only increment maxsock for servers from the configuration. Dynamic
+ * servers at the moment are not taken into account for the estimation
+ * of the resources limits.
+ */
+ if (global.mode & MODE_STARTING)
+ global.maxsock++;
+
+ out:
+ return ret;
+}
+
+static void deinit_srv_check(struct server *srv)
+{
+ if (srv->check.state & CHK_ST_CONFIGURED) {
+ free_check(&srv->check);
+ /* it is safe to drop now since the main server reference is still held by the proxy */
+ srv_drop(srv);
+ }
+ srv->check.state &= ~CHK_ST_CONFIGURED & ~CHK_ST_ENABLED;
+ srv->do_check = 0;
+}
+
+
+static void deinit_srv_agent_check(struct server *srv)
+{
+ if (srv->agent.state & CHK_ST_CONFIGURED) {
+ free_check(&srv->agent);
+ /* it is safe to drop now since the main server reference is still held by the proxy */
+ srv_drop(srv);
+ }
+
+ srv->agent.state &= ~CHK_ST_CONFIGURED & ~CHK_ST_ENABLED & ~CHK_ST_AGENT;
+ srv->do_agent = 0;
+}
+
+REGISTER_POST_SERVER_CHECK(init_srv_check);
+REGISTER_POST_SERVER_CHECK(init_srv_agent_check);
+REGISTER_POST_CHECK(start_checks);
+
+REGISTER_SERVER_DEINIT(deinit_srv_check);
+REGISTER_SERVER_DEINIT(deinit_srv_agent_check);
+
+/* perform minimal initializations */
+static void init_checks()
+{
+ int i;
+
+ for (i = 0; i < MAX_THREADS; i++)
+ LIST_INIT(&ha_thread_ctx[i].queued_checks);
+}
+
+INITCALL0(STG_PREPARE, init_checks);
+
+/**************************************************************************/
+/************************** Check sample fetches **************************/
+/**************************************************************************/
+
+static struct sample_fetch_kw_list smp_kws = {ILH, {
+ { /* END */ },
+}};
+
+INITCALL1(STG_REGISTER, sample_register_fetches, &smp_kws);
+
+
+/**************************************************************************/
+/************************ Check's parsing functions ***********************/
+/**************************************************************************/
+/* Parse the "addr" server keyword */
+static int srv_parse_addr(char **args, int *cur_arg, struct proxy *curpx, struct server *srv,
+ char **errmsg)
+{
+ struct sockaddr_storage *sk;
+ int port1, port2, err_code = 0;
+
+
+ if (!*args[*cur_arg+1]) {
+ memprintf(errmsg, "'%s' expects <ipv4|ipv6> as argument.", args[*cur_arg]);
+ goto error;
+ }
+
+ sk = str2sa_range(args[*cur_arg+1], NULL, &port1, &port2, NULL, NULL, NULL, errmsg, NULL, NULL,
+ PA_O_RESOLVE | PA_O_PORT_OK | PA_O_STREAM | PA_O_CONNECT);
+ if (!sk) {
+ memprintf(errmsg, "'%s' : %s", args[*cur_arg], *errmsg);
+ goto error;
+ }
+
+ srv->check.addr = *sk;
+ /* if agentaddr was never set, we can use addr */
+ if (!(srv->flags & SRV_F_AGENTADDR))
+ srv->agent.addr = *sk;
+
+ out:
+ return err_code;
+
+ error:
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+}
+
+/* Parse the "agent-addr" server keyword */
+static int srv_parse_agent_addr(char **args, int *cur_arg, struct proxy *curpx, struct server *srv,
+ char **errmsg)
+{
+ struct sockaddr_storage sk;
+ int err_code = 0;
+
+ if (!*(args[*cur_arg+1])) {
+ memprintf(errmsg, "'%s' expects an address as argument.", args[*cur_arg]);
+ goto error;
+ }
+ memset(&sk, 0, sizeof(sk));
+ if (str2ip(args[*cur_arg + 1], &sk) == NULL) {
+ memprintf(errmsg, "parsing agent-addr failed. Check if '%s' is correct address.", args[*cur_arg+1]);
+ goto error;
+ }
+ set_srv_agent_addr(srv, &sk);
+
+ out:
+ return err_code;
+
+ error:
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+}
+
+/* Parse the "agent-check" server keyword */
+static int srv_parse_agent_check(char **args, int *cur_arg, struct proxy *curpx, struct server *srv,
+ char **errmsg)
+{
+ struct tcpcheck_ruleset *rs = NULL;
+ struct tcpcheck_rules *rules = srv->agent.tcpcheck_rules;
+ struct tcpcheck_rule *chk;
+ int err_code = 0;
+
+ if (srv->do_agent)
+ goto out;
+
+ if (!(curpx->cap & PR_CAP_BE)) {
+ memprintf(errmsg, "'%s' ignored because %s '%s' has no backend capability",
+ args[*cur_arg], proxy_type_str(curpx), curpx->id);
+ return ERR_WARN;
+ }
+
+ if (!rules) {
+ rules = calloc(1, sizeof(*rules));
+ if (!rules) {
+ memprintf(errmsg, "out of memory.");
+ goto error;
+ }
+ LIST_INIT(&rules->preset_vars);
+ srv->agent.tcpcheck_rules = rules;
+ }
+ rules->list = NULL;
+ rules->flags = 0;
+
+ rs = find_tcpcheck_ruleset("*agent-check");
+ if (rs)
+ goto ruleset_found;
+
+ rs = create_tcpcheck_ruleset("*agent-check");
+ if (rs == NULL) {
+ memprintf(errmsg, "out of memory.");
+ goto error;
+ }
+
+ chk = parse_tcpcheck_send((char *[]){"tcp-check", "send-lf", "%[var(check.agent_string)]", ""},
+ 1, curpx, &rs->rules, srv->conf.file, srv->conf.line, errmsg);
+ if (!chk) {
+ memprintf(errmsg, "'%s': %s", args[*cur_arg], *errmsg);
+ goto error;
+ }
+ chk->index = 0;
+ LIST_APPEND(&rs->rules, &chk->list);
+
+ chk = parse_tcpcheck_expect((char *[]){"tcp-check", "expect", "custom", ""},
+ 1, curpx, &rs->rules, TCPCHK_RULES_AGENT_CHK,
+ srv->conf.file, srv->conf.line, errmsg);
+ if (!chk) {
+ memprintf(errmsg, "'%s': %s", args[*cur_arg], *errmsg);
+ goto error;
+ }
+ chk->expect.custom = tcpcheck_agent_expect_reply;
+ chk->index = 1;
+ LIST_APPEND(&rs->rules, &chk->list);
+
+ ruleset_found:
+ rules->list = &rs->rules;
+ rules->flags &= ~(TCPCHK_RULES_PROTO_CHK|TCPCHK_RULES_UNUSED_RS);
+ rules->flags |= TCPCHK_RULES_AGENT_CHK;
+ srv->do_agent = 1;
+
+ out:
+ return err_code;
+
+ error:
+ deinit_srv_agent_check(srv);
+ free_tcpcheck_ruleset(rs);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+}
+
+/* Parse the "agent-inter" server keyword */
+static int srv_parse_agent_inter(char **args, int *cur_arg, struct proxy *curpx, struct server *srv,
+ char **errmsg)
+{
+ const char *err = NULL;
+ unsigned int delay;
+ int err_code = 0;
+
+ if (!*(args[*cur_arg+1])) {
+ memprintf(errmsg, "'%s' expects a delay as argument.", args[*cur_arg]);
+ goto error;
+ }
+
+ err = parse_time_err(args[*cur_arg+1], &delay, TIME_UNIT_MS);
+ if (err == PARSE_TIME_OVER) {
+ memprintf(errmsg, "timer overflow in argument <%s> to <%s> of server %s, maximum value is 2147483647 ms (~24.8 days).",
+ args[*cur_arg+1], args[*cur_arg], srv->id);
+ goto error;
+ }
+ else if (err == PARSE_TIME_UNDER) {
+ memprintf(errmsg, "timer underflow in argument <%s> to <%s> of server %s, minimum non-null value is 1 ms.",
+ args[*cur_arg+1], args[*cur_arg], srv->id);
+ goto error;
+ }
+ else if (err) {
+ memprintf(errmsg, "unexpected character '%c' in 'agent-inter' argument of server %s.",
+ *err, srv->id);
+ goto error;
+ }
+ if (delay <= 0) {
+ memprintf(errmsg, "invalid value %d for argument '%s' of server %s.",
+ delay, args[*cur_arg], srv->id);
+ goto error;
+ }
+ srv->agent.inter = delay;
+
+ out:
+ return err_code;
+
+ error:
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+}
+
+/* Parse the "agent-port" server keyword */
+static int srv_parse_agent_port(char **args, int *cur_arg, struct proxy *curpx, struct server *srv,
+ char **errmsg)
+{
+ int err_code = 0;
+
+ if (!*(args[*cur_arg+1])) {
+ memprintf(errmsg, "'%s' expects a port number as argument.", args[*cur_arg]);
+ goto error;
+ }
+
+ /* Only increment maxsock for servers from the configuration. Dynamic
+ * servers at the moment are not taken into account for the estimation
+ * of the resources limits.
+ */
+ if (global.mode & MODE_STARTING)
+ global.maxsock++;
+
+ set_srv_agent_port(srv, atol(args[*cur_arg + 1]));
+
+ out:
+ return err_code;
+
+ error:
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+}
+
+int set_srv_agent_send(struct server *srv, const char *send)
+{
+ struct tcpcheck_rules *rules = srv->agent.tcpcheck_rules;
+ struct tcpcheck_var *var = NULL;
+ char *str;
+
+ str = strdup(send);
+ var = create_tcpcheck_var(ist("check.agent_string"));
+ if (str == NULL || var == NULL)
+ goto error;
+
+ free_tcpcheck_vars(&rules->preset_vars);
+
+ var->data.type = SMP_T_STR;
+ var->data.u.str.area = str;
+ var->data.u.str.data = strlen(str);
+ LIST_INIT(&var->list);
+ LIST_APPEND(&rules->preset_vars, &var->list);
+
+ return 1;
+
+ error:
+ free(str);
+ free(var);
+ return 0;
+}
+
+/* Parse the "agent-send" server keyword */
+static int srv_parse_agent_send(char **args, int *cur_arg, struct proxy *curpx, struct server *srv,
+ char **errmsg)
+{
+ struct tcpcheck_rules *rules = srv->agent.tcpcheck_rules;
+ int err_code = 0;
+
+ if (!*(args[*cur_arg+1])) {
+ memprintf(errmsg, "'%s' expects a string as argument.", args[*cur_arg]);
+ goto error;
+ }
+
+ if (!rules) {
+ rules = calloc(1, sizeof(*rules));
+ if (!rules) {
+ memprintf(errmsg, "out of memory.");
+ goto error;
+ }
+ LIST_INIT(&rules->preset_vars);
+ srv->agent.tcpcheck_rules = rules;
+ }
+
+ if (!set_srv_agent_send(srv, args[*cur_arg+1])) {
+ memprintf(errmsg, "out of memory.");
+ goto error;
+ }
+
+ out:
+ return err_code;
+
+ error:
+ deinit_srv_agent_check(srv);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+}
+
+/* Parse the "no-agent-send" server keyword */
+static int srv_parse_no_agent_check(char **args, int *cur_arg, struct proxy *curpx, struct server *srv,
+ char **errmsg)
+{
+ deinit_srv_agent_check(srv);
+ return 0;
+}
+
+/* Parse the "check" server keyword */
+static int srv_parse_check(char **args, int *cur_arg, struct proxy *curpx, struct server *srv,
+ char **errmsg)
+{
+ if (!(curpx->cap & PR_CAP_BE)) {
+ memprintf(errmsg, "'%s' ignored because %s '%s' has no backend capability",
+ args[*cur_arg], proxy_type_str(curpx), curpx->id);
+ return ERR_WARN;
+ }
+
+ srv->do_check = 1;
+ return 0;
+}
+
+/* Parse the "check-send-proxy" server keyword */
+static int srv_parse_check_send_proxy(char **args, int *cur_arg, struct proxy *curpx, struct server *srv,
+ char **errmsg)
+{
+ srv->check.send_proxy = 1;
+ return 0;
+}
+
+/* Parse the "check-via-socks4" server keyword */
+static int srv_parse_check_via_socks4(char **args, int *cur_arg, struct proxy *curpx, struct server *srv,
+ char **errmsg)
+{
+ srv->check.via_socks4 = 1;
+ return 0;
+}
+
+/* Parse the "no-check" server keyword */
+static int srv_parse_no_check(char **args, int *cur_arg, struct proxy *curpx, struct server *srv,
+ char **errmsg)
+{
+ deinit_srv_check(srv);
+ return 0;
+}
+
+/* Parse the "no-check-send-proxy" server keyword */
+static int srv_parse_no_check_send_proxy(char **args, int *cur_arg, struct proxy *curpx, struct server *srv,
+ char **errmsg)
+{
+ srv->check.send_proxy = 0;
+ return 0;
+}
+
+/* parse the "check-proto" server keyword */
+static int srv_parse_check_proto(char **args, int *cur_arg,
+ struct proxy *px, struct server *newsrv, char **err)
+{
+ int err_code = 0;
+
+ if (!*args[*cur_arg + 1]) {
+ memprintf(err, "'%s' : missing value", args[*cur_arg]);
+ goto error;
+ }
+ newsrv->check.mux_proto = get_mux_proto(ist(args[*cur_arg + 1]));
+ if (!newsrv->check.mux_proto) {
+ memprintf(err, "'%s' : unknown MUX protocol '%s'", args[*cur_arg], args[*cur_arg+1]);
+ goto error;
+ }
+
+ out:
+ return err_code;
+
+ error:
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+}
+
+
+/* Parse the "rise" server keyword */
+static int srv_parse_check_rise(char **args, int *cur_arg, struct proxy *curpx, struct server *srv,
+ char **errmsg)
+{
+ int err_code = 0;
+
+ if (!*args[*cur_arg + 1]) {
+ memprintf(errmsg, "'%s' expects an integer argument.", args[*cur_arg]);
+ goto error;
+ }
+
+ srv->check.rise = atol(args[*cur_arg+1]);
+ if (srv->check.rise <= 0) {
+ memprintf(errmsg, "'%s' has to be > 0.", args[*cur_arg]);
+ goto error;
+ }
+
+ if (srv->check.health)
+ srv->check.health = srv->check.rise;
+
+ out:
+ return err_code;
+
+ error:
+ deinit_srv_agent_check(srv);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+}
+
+/* Parse the "fall" server keyword */
+static int srv_parse_check_fall(char **args, int *cur_arg, struct proxy *curpx, struct server *srv,
+ char **errmsg)
+{
+ int err_code = 0;
+
+ if (!*args[*cur_arg + 1]) {
+ memprintf(errmsg, "'%s' expects an integer argument.", args[*cur_arg]);
+ goto error;
+ }
+
+ srv->check.fall = atol(args[*cur_arg+1]);
+ if (srv->check.fall <= 0) {
+ memprintf(errmsg, "'%s' has to be > 0.", args[*cur_arg]);
+ goto error;
+ }
+
+ out:
+ return err_code;
+
+ error:
+ deinit_srv_agent_check(srv);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+}
+
+/* Parse the "inter" server keyword */
+static int srv_parse_check_inter(char **args, int *cur_arg, struct proxy *curpx, struct server *srv,
+ char **errmsg)
+{
+ const char *err = NULL;
+ unsigned int delay;
+ int err_code = 0;
+
+ if (!*(args[*cur_arg+1])) {
+ memprintf(errmsg, "'%s' expects a delay as argument.", args[*cur_arg]);
+ goto error;
+ }
+
+ err = parse_time_err(args[*cur_arg+1], &delay, TIME_UNIT_MS);
+ if (err == PARSE_TIME_OVER) {
+ memprintf(errmsg, "timer overflow in argument <%s> to <%s> of server %s, maximum value is 2147483647 ms (~24.8 days).",
+ args[*cur_arg+1], args[*cur_arg], srv->id);
+ goto error;
+ }
+ else if (err == PARSE_TIME_UNDER) {
+ memprintf(errmsg, "timer underflow in argument <%s> to <%s> of server %s, minimum non-null value is 1 ms.",
+ args[*cur_arg+1], args[*cur_arg], srv->id);
+ goto error;
+ }
+ else if (err) {
+ memprintf(errmsg, "unexpected character '%c' in 'agent-inter' argument of server %s.",
+ *err, srv->id);
+ goto error;
+ }
+ if (delay <= 0) {
+ memprintf(errmsg, "invalid value %d for argument '%s' of server %s.",
+ delay, args[*cur_arg], srv->id);
+ goto error;
+ }
+ srv->check.inter = delay;
+
+ out:
+ return err_code;
+
+ error:
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+}
+
+
+/* Parse the "fastinter" server keyword */
+static int srv_parse_check_fastinter(char **args, int *cur_arg, struct proxy *curpx, struct server *srv,
+ char **errmsg)
+{
+ const char *err = NULL;
+ unsigned int delay;
+ int err_code = 0;
+
+ if (!*(args[*cur_arg+1])) {
+ memprintf(errmsg, "'%s' expects a delay as argument.", args[*cur_arg]);
+ goto error;
+ }
+
+ err = parse_time_err(args[*cur_arg+1], &delay, TIME_UNIT_MS);
+ if (err == PARSE_TIME_OVER) {
+ memprintf(errmsg, "timer overflow in argument <%s> to <%s> of server %s, maximum value is 2147483647 ms (~24.8 days).",
+ args[*cur_arg+1], args[*cur_arg], srv->id);
+ goto error;
+ }
+ else if (err == PARSE_TIME_UNDER) {
+ memprintf(errmsg, "timer underflow in argument <%s> to <%s> of server %s, minimum non-null value is 1 ms.",
+ args[*cur_arg+1], args[*cur_arg], srv->id);
+ goto error;
+ }
+ else if (err) {
+ memprintf(errmsg, "unexpected character '%c' in 'agent-inter' argument of server %s.",
+ *err, srv->id);
+ goto error;
+ }
+ if (delay <= 0) {
+ memprintf(errmsg, "invalid value %d for argument '%s' of server %s.",
+ delay, args[*cur_arg], srv->id);
+ goto error;
+ }
+ srv->check.fastinter = delay;
+
+ out:
+ return err_code;
+
+ error:
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+}
+
+
+/* Parse the "downinter" server keyword */
+static int srv_parse_check_downinter(char **args, int *cur_arg, struct proxy *curpx, struct server *srv,
+ char **errmsg)
+{
+ const char *err = NULL;
+ unsigned int delay;
+ int err_code = 0;
+
+ if (!*(args[*cur_arg+1])) {
+ memprintf(errmsg, "'%s' expects a delay as argument.", args[*cur_arg]);
+ goto error;
+ }
+
+ err = parse_time_err(args[*cur_arg+1], &delay, TIME_UNIT_MS);
+ if (err == PARSE_TIME_OVER) {
+ memprintf(errmsg, "timer overflow in argument <%s> to <%s> of server %s, maximum value is 2147483647 ms (~24.8 days).",
+ args[*cur_arg+1], args[*cur_arg], srv->id);
+ goto error;
+ }
+ else if (err == PARSE_TIME_UNDER) {
+ memprintf(errmsg, "timer underflow in argument <%s> to <%s> of server %s, minimum non-null value is 1 ms.",
+ args[*cur_arg+1], args[*cur_arg], srv->id);
+ goto error;
+ }
+ else if (err) {
+ memprintf(errmsg, "unexpected character '%c' in 'agent-inter' argument of server %s.",
+ *err, srv->id);
+ goto error;
+ }
+ if (delay <= 0) {
+ memprintf(errmsg, "invalid value %d for argument '%s' of server %s.",
+ delay, args[*cur_arg], srv->id);
+ goto error;
+ }
+ srv->check.downinter = delay;
+
+ out:
+ return err_code;
+
+ error:
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+}
+
+/* Parse the "port" server keyword */
+static int srv_parse_check_port(char **args, int *cur_arg, struct proxy *curpx, struct server *srv,
+ char **errmsg)
+{
+ int err_code = 0;
+
+ if (!*(args[*cur_arg+1])) {
+ memprintf(errmsg, "'%s' expects a port number as argument.", args[*cur_arg]);
+ goto error;
+ }
+
+ /* Only increment maxsock for servers from the configuration. Dynamic
+ * servers at the moment are not taken into account for the estimation
+ * of the resources limits.
+ */
+ if (global.mode & MODE_STARTING)
+ global.maxsock++;
+
+ srv->check.port = atol(args[*cur_arg+1]);
+ /* if agentport was never set, we can use port */
+ if (!(srv->flags & SRV_F_AGENTPORT))
+ srv->agent.port = srv->check.port;
+
+ out:
+ return err_code;
+
+ error:
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+}
+
+/* config parser for global "tune.max-checks-per-thread" */
+static int check_parse_global_max_checks(char **args, int section_type, struct proxy *curpx,
+ const struct proxy *defpx, const char *file, int line,
+ char **err)
+{
+ if (too_many_args(1, args, err, NULL))
+ return -1;
+ global.tune.max_checks_per_thread = atoi(args[1]);
+ return 0;
+}
+
+/* register "global" section keywords */
+static struct cfg_kw_list chk_cfg_kws = {ILH, {
+ { CFG_GLOBAL, "tune.max-checks-per-thread", check_parse_global_max_checks },
+ { 0, NULL, NULL }
+}};
+
+INITCALL1(STG_REGISTER, cfg_register_keywords, &chk_cfg_kws);
+
+/* register "server" line keywords */
+static struct srv_kw_list srv_kws = { "CHK", { }, {
+ { "addr", srv_parse_addr, 1, 1, 1 }, /* IP address to send health to or to probe from agent-check */
+ { "agent-addr", srv_parse_agent_addr, 1, 1, 1 }, /* Enable an auxiliary agent check */
+ { "agent-check", srv_parse_agent_check, 0, 1, 1 }, /* Enable agent checks */
+ { "agent-inter", srv_parse_agent_inter, 1, 1, 1 }, /* Set the interval between two agent checks */
+ { "agent-port", srv_parse_agent_port, 1, 1, 1 }, /* Set the TCP port used for agent checks. */
+ { "agent-send", srv_parse_agent_send, 1, 1, 1 }, /* Set string to send to agent. */
+ { "check", srv_parse_check, 0, 1, 1 }, /* Enable health checks */
+ { "check-proto", srv_parse_check_proto, 1, 1, 1 }, /* Set the mux protocol for health checks */
+ { "check-send-proxy", srv_parse_check_send_proxy, 0, 1, 1 }, /* Enable PROXY protocol for health checks */
+ { "check-via-socks4", srv_parse_check_via_socks4, 0, 1, 1 }, /* Enable socks4 proxy for health checks */
+ { "no-agent-check", srv_parse_no_agent_check, 0, 1, 0 }, /* Do not enable any auxiliary agent check */
+ { "no-check", srv_parse_no_check, 0, 1, 0 }, /* Disable health checks */
+ { "no-check-send-proxy", srv_parse_no_check_send_proxy, 0, 1, 0 }, /* Disable PROXY protocol for health checks */
+ { "rise", srv_parse_check_rise, 1, 1, 1 }, /* Set rise value for health checks */
+ { "fall", srv_parse_check_fall, 1, 1, 1 }, /* Set fall value for health checks */
+ { "inter", srv_parse_check_inter, 1, 1, 1 }, /* Set inter value for health checks */
+ { "fastinter", srv_parse_check_fastinter, 1, 1, 1 }, /* Set fastinter value for health checks */
+ { "downinter", srv_parse_check_downinter, 1, 1, 1 }, /* Set downinter value for health checks */
+ { "port", srv_parse_check_port, 1, 1, 1 }, /* Set the TCP port used for health checks. */
+ { NULL, NULL, 0 },
+}};
+
+INITCALL1(STG_REGISTER, srv_register_keywords, &srv_kws);
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/src/chunk.c b/src/chunk.c
new file mode 100644
index 0000000..c5b74fc
--- /dev/null
+++ b/src/chunk.c
@@ -0,0 +1,311 @@
+/*
+ * Chunk management functions.
+ *
+ * Copyright 2000-2012 Willy Tarreau <w@1wt.eu>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <ctype.h>
+#include <stdarg.h>
+#include <stdio.h>
+#include <string.h>
+
+#include <haproxy/api.h>
+#include <haproxy/chunk.h>
+#include <haproxy/global.h>
+#include <haproxy/tools.h>
+
+/* trash chunks used for various conversions */
+static THREAD_LOCAL struct buffer *trash_chunk;
+static THREAD_LOCAL struct buffer trash_chunk1;
+static THREAD_LOCAL struct buffer trash_chunk2;
+
+/* trash buffers used for various conversions */
+static int trash_size __read_mostly;
+static THREAD_LOCAL char *trash_buf1;
+static THREAD_LOCAL char *trash_buf2;
+
+/* the trash pool for reentrant allocations */
+struct pool_head *pool_head_trash __read_mostly = NULL;
+
+/* this is used to drain data, and as a temporary buffer for sprintf()... */
+THREAD_LOCAL struct buffer trash = { };
+
+/*
+* Returns a pre-allocated and initialized trash chunk that can be used for any
+* type of conversion. Two chunks and their respective buffers are alternatively
+* returned so that it is always possible to iterate data transformations without
+* losing the data being transformed. The blocks are initialized to the size of
+* a standard buffer, so they should be enough for everything. For convenience,
+* a zero is always emitted at the beginning of the string so that it may be
+* used as an empty string as well.
+*/
+struct buffer *get_trash_chunk(void)
+{
+ char *trash_buf;
+
+ if (trash_chunk == &trash_chunk1) {
+ trash_chunk = &trash_chunk2;
+ trash_buf = trash_buf2;
+ }
+ else {
+ trash_chunk = &trash_chunk1;
+ trash_buf = trash_buf1;
+ }
+ *trash_buf = 0;
+ chunk_init(trash_chunk, trash_buf, trash_size);
+ return trash_chunk;
+}
+
+/* (re)allocates the trash buffers. Returns 0 in case of failure. It is
+ * possible to call this function multiple times if the trash size changes.
+ */
+static int alloc_trash_buffers(int bufsize)
+{
+ chunk_init(&trash, my_realloc2(trash.area, bufsize), bufsize);
+ trash_size = bufsize;
+ trash_buf1 = (char *)my_realloc2(trash_buf1, bufsize);
+ trash_buf2 = (char *)my_realloc2(trash_buf2, bufsize);
+ return trash.area && trash_buf1 && trash_buf2;
+}
+
+static int alloc_trash_buffers_per_thread()
+{
+ return alloc_trash_buffers(global.tune.bufsize);
+}
+
+static void free_trash_buffers_per_thread()
+{
+ chunk_destroy(&trash);
+ ha_free(&trash_buf2);
+ ha_free(&trash_buf1);
+}
+
+/* Initialize the trash buffers. It returns 0 if an error occurred. */
+int init_trash_buffers(int first)
+{
+ pool_destroy(pool_head_trash);
+ pool_head_trash = create_pool("trash",
+ sizeof(struct buffer) + global.tune.bufsize,
+ MEM_F_EXACT);
+ if (!pool_head_trash || !alloc_trash_buffers(global.tune.bufsize))
+ return 0;
+ return 1;
+}
+
+/* This is called during STG_POOL to allocate trash buffers early. They will
+ * be reallocated later once their final size is known. It returns 0 if an
+ * error occurred.
+ */
+static int alloc_early_trash(void)
+{
+ return init_trash_buffers(1);
+}
+
+/*
+ * Does an snprintf() at the beginning of chunk <chk>, respecting the limit of
+ * at most chk->size chars. If the chk->len is over, nothing is added. Returns
+ * the new chunk size, or < 0 in case of failure.
+ */
+int chunk_printf(struct buffer *chk, const char *fmt, ...)
+{
+ va_list argp;
+ int ret;
+
+ if (!chk->area || !chk->size)
+ return 0;
+
+ va_start(argp, fmt);
+ ret = vsnprintf(chk->area, chk->size, fmt, argp);
+ va_end(argp);
+
+ if (ret >= chk->size)
+ return -1;
+
+ chk->data = ret;
+ return chk->data;
+}
+
+/*
+ * Does an snprintf() at the end of chunk <chk>, respecting the limit of
+ * at most chk->size chars. If the chk->len is over, nothing is added. Returns
+ * the new chunk size.
+ */
+int chunk_appendf(struct buffer *chk, const char *fmt, ...)
+{
+ va_list argp;
+ size_t room;
+ int ret;
+
+ if (!chk->area || !chk->size)
+ return 0;
+
+ room = chk->size - chk->data;
+ if (!room)
+ return chk->data;
+
+ va_start(argp, fmt);
+ ret = vsnprintf(chk->area + chk->data, room, fmt, argp);
+ if (ret >= room)
+ /* do not copy anything in case of truncation */
+ chk->area[chk->data] = 0;
+ else
+ chk->data += ret;
+ va_end(argp);
+ return chk->data;
+}
+
+/*
+ * Encode chunk <src> into chunk <dst>, respecting the limit of at most
+ * chk->size chars. Replace non-printable or special characters with "&#%d;".
+ * If the chk->len is over, nothing is added. Returns the new chunk size.
+ */
+int chunk_htmlencode(struct buffer *dst, struct buffer *src)
+{
+ int i, l;
+ int olen, free;
+ char c;
+
+ olen = dst->data;
+
+ for (i = 0; i < src->data; i++) {
+ free = dst->size - dst->data;
+
+ if (!free) {
+ dst->data = olen;
+ return dst->data;
+ }
+
+ c = src->area[i];
+
+ if (!isascii((unsigned char)c) || !isprint((unsigned char)c) || c == '&' || c == '"' || c == '\'' || c == '<' || c == '>') {
+ l = snprintf(dst->area + dst->data, free, "&#%u;",
+ (unsigned char)c);
+
+ if (free < l) {
+ dst->data = olen;
+ return dst->data;
+ }
+
+ dst->data += l;
+ } else {
+ dst->area[dst->data] = c;
+ dst->data++;
+ }
+ }
+
+ return dst->data;
+}
+
+/*
+ * Encode chunk <src> into chunk <dst>, respecting the limit of at most
+ * chk->size chars. Replace non-printable or char passed in qc with "<%02X>".
+ * If the chk->len is over, nothing is added. Returns the new chunk size.
+ */
+int chunk_asciiencode(struct buffer *dst, struct buffer *src, char qc)
+{
+ int i, l;
+ int olen, free;
+ char c;
+
+ olen = dst->data;
+
+ for (i = 0; i < src->data; i++) {
+ free = dst->size - dst->data;
+
+ if (!free) {
+ dst->data = olen;
+ return dst->data;
+ }
+
+ c = src->area[i];
+
+ if (!isascii((unsigned char)c) || !isprint((unsigned char)c) || c == '<' || c == '>' || c == qc) {
+ l = snprintf(dst->area + dst->data, free, "<%02X>",
+ (unsigned char)c);
+
+ if (free < l) {
+ dst->data = olen;
+ return dst->data;
+ }
+
+ dst->data += l;
+ } else {
+ dst->area[dst->data] = c;
+ dst->data++;
+ }
+ }
+
+ return dst->data;
+}
+
+/* Compares the string in chunk <chk> with the string in <str> which must be
+ * zero-terminated. Return is the same as with strcmp(). Neither is allowed
+ * to be null.
+ */
+int chunk_strcmp(const struct buffer *chk, const char *str)
+{
+ const char *s1 = chk->area;
+ int len = chk->data;
+ int diff = 0;
+
+ do {
+ if (--len < 0) {
+ diff = (unsigned char)0 - (unsigned char)*str;
+ break;
+ }
+ diff = (unsigned char)*(s1++) - (unsigned char)*(str++);
+ } while (!diff);
+ return diff;
+}
+
+/* Case-insensitively compares the string in chunk <chk> with the string in
+ * <str> which must be zero-terminated. Return is the same as with strcmp().
+ * Neither is allowed to be null.
+ */
+int chunk_strcasecmp(const struct buffer *chk, const char *str)
+{
+ const char *s1 = chk->area;
+ int len = chk->data;
+ int diff = 0;
+
+ do {
+ if (--len < 0) {
+ diff = (unsigned char)0 - (unsigned char)*str;
+ break;
+ }
+ diff = (unsigned char)*s1 - (unsigned char)*str;
+ if (unlikely(diff)) {
+ unsigned int l = (unsigned char)*s1;
+ unsigned int r = (unsigned char)*str;
+
+ l -= 'a';
+ r -= 'a';
+
+ if (likely(l <= (unsigned char)'z' - 'a'))
+ l -= 'a' - 'A';
+ if (likely(r <= (unsigned char)'z' - 'a'))
+ r -= 'a' - 'A';
+ diff = l - r;
+ }
+ s1++; str++;
+ } while (!diff);
+ return diff;
+}
+
+INITCALL0(STG_POOL, alloc_early_trash);
+REGISTER_PER_THREAD_ALLOC(alloc_trash_buffers_per_thread);
+REGISTER_PER_THREAD_FREE(free_trash_buffers_per_thread);
+REGISTER_POST_DEINIT(free_trash_buffers_per_thread);
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/src/cli.c b/src/cli.c
new file mode 100644
index 0000000..d0435f7
--- /dev/null
+++ b/src/cli.c
@@ -0,0 +1,3423 @@
+/*
+ * Functions dedicated to statistics output and the stats socket
+ *
+ * Copyright 2000-2012 Willy Tarreau <w@1wt.eu>
+ * Copyright 2007-2009 Krzysztof Piotr Oledzki <ole@ans.pl>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <ctype.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <pwd.h>
+#include <grp.h>
+
+#include <sys/socket.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+
+#include <net/if.h>
+
+#include <haproxy/api.h>
+#include <haproxy/applet.h>
+#include <haproxy/base64.h>
+#include <haproxy/cfgparse.h>
+#include <haproxy/channel.h>
+#include <haproxy/check.h>
+#include <haproxy/cli.h>
+#include <haproxy/compression.h>
+#include <haproxy/dns-t.h>
+#include <haproxy/errors.h>
+#include <haproxy/fd.h>
+#include <haproxy/freq_ctr.h>
+#include <haproxy/frontend.h>
+#include <haproxy/global.h>
+#include <haproxy/list.h>
+#include <haproxy/listener.h>
+#include <haproxy/log.h>
+#include <haproxy/mworker.h>
+#include <haproxy/mworker-t.h>
+#include <haproxy/pattern-t.h>
+#include <haproxy/peers.h>
+#include <haproxy/pipe.h>
+#include <haproxy/protocol.h>
+#include <haproxy/proxy.h>
+#include <haproxy/quic_sock.h>
+#include <haproxy/sample-t.h>
+#include <haproxy/sc_strm.h>
+#include <haproxy/server.h>
+#include <haproxy/session.h>
+#include <haproxy/sock.h>
+#include <haproxy/stats-t.h>
+#include <haproxy/stconn.h>
+#include <haproxy/stream.h>
+#include <haproxy/task.h>
+#include <haproxy/ticks.h>
+#include <haproxy/time.h>
+#include <haproxy/tools.h>
+#include <haproxy/version.h>
+
+#define PAYLOAD_PATTERN "<<"
+
+static struct applet cli_applet;
+static struct applet mcli_applet;
+
+static const char cli_permission_denied_msg[] =
+ "Permission denied\n"
+ "";
+
+
+static THREAD_LOCAL char *dynamic_usage_msg = NULL;
+
+/* List head of cli keywords */
+static struct cli_kw_list cli_keywords = {
+ .list = LIST_HEAD_INIT(cli_keywords.list)
+};
+
+extern const char *stat_status_codes[];
+
+struct proxy *mworker_proxy; /* CLI proxy of the master */
+struct bind_conf *mcli_reload_bind_conf;
+
+/* CLI context for the "show env" command */
+struct show_env_ctx {
+ char **var; /* first variable to show */
+ int show_one; /* stop after showing the first one */
+};
+
+/* CLI context for the "show fd" command */
+/* flags for show_fd_ctx->show_mask */
+#define CLI_SHOWFD_F_PI 0x00000001 /* pipes */
+#define CLI_SHOWFD_F_LI 0x00000002 /* listeners */
+#define CLI_SHOWFD_F_FE 0x00000004 /* frontend conns */
+#define CLI_SHOWFD_F_SV 0x00000010 /* server-only conns */
+#define CLI_SHOWFD_F_PX 0x00000020 /* proxy-only conns */
+#define CLI_SHOWFD_F_BE 0x00000030 /* backend: srv+px */
+#define CLI_SHOWFD_F_CO 0x00000034 /* conn: be+fe */
+#define CLI_SHOWFD_F_ANY 0x0000003f /* any type */
+
+struct show_fd_ctx {
+ int fd; /* first FD to show */
+ int show_one; /* stop after showing one FD */
+ uint show_mask; /* CLI_SHOWFD_F_xxx */
+};
+
+/* CLI context for the "show cli sockets" command */
+struct show_sock_ctx {
+ struct bind_conf *bind_conf;
+ struct listener *listener;
+};
+
+static int cmp_kw_entries(const void *a, const void *b)
+{
+ const struct cli_kw *l = *(const struct cli_kw **)a;
+ const struct cli_kw *r = *(const struct cli_kw **)b;
+
+ return strcmp(l->usage ? l->usage : "", r->usage ? r->usage : "");
+}
+
+/* This will show the help message and list the commands supported at the
+ * current level that match all of the first words of <args> if args is not
+ * NULL, or all args if none matches or if args is null.
+ */
+static char *cli_gen_usage_msg(struct appctx *appctx, char * const *args)
+{
+ struct cli_kw *entries[CLI_MAX_HELP_ENTRIES];
+ struct cli_kw_list *kw_list;
+ struct cli_kw *kw;
+ struct buffer *tmp = get_trash_chunk();
+ struct buffer out;
+ struct { struct cli_kw *kw; int dist; } matches[CLI_MAX_MATCHES], swp;
+ int idx;
+ int ishelp = 0;
+ int length = 0;
+ int help_entries = 0;
+
+ ha_free(&dynamic_usage_msg);
+
+ if (args && *args && strcmp(*args, "help") == 0) {
+ args++;
+ ishelp = 1;
+ }
+
+ /* first, let's measure the longest match */
+ list_for_each_entry(kw_list, &cli_keywords.list, list) {
+ for (kw = &kw_list->kw[0]; kw->str_kw[0]; kw++) {
+ if (kw->level & ~appctx->cli_level & (ACCESS_MASTER_ONLY|ACCESS_EXPERT|ACCESS_EXPERIMENTAL))
+ continue;
+ if (!(appctx->cli_level & ACCESS_MCLI_DEBUG) &&
+ (appctx->cli_level & ~kw->level & (ACCESS_MASTER_ONLY|ACCESS_MASTER)) ==
+ (ACCESS_MASTER_ONLY|ACCESS_MASTER))
+ continue;
+
+ /* OK this command is visible */
+ for (idx = 0; idx < CLI_PREFIX_KW_NB; idx++) {
+ if (!kw->str_kw[idx])
+ break; // end of keyword
+ if (!args || !args[idx] || !*args[idx])
+ break; // end of command line
+ if (strcmp(kw->str_kw[idx], args[idx]) != 0)
+ break;
+ if (idx + 1 > length)
+ length = idx + 1;
+ }
+ }
+ }
+
+ /* now <length> equals the number of exactly matching words */
+ chunk_reset(tmp);
+ if (ishelp) // this is the help message.
+ chunk_strcat(tmp, "The following commands are valid at this level:\n");
+ else {
+ chunk_strcat(tmp, "Unknown command: '");
+ if (args && *args)
+ chunk_strcat(tmp, *args);
+ chunk_strcat(tmp, "'");
+
+ if (!length && (!args || !*args || !**args)) // no match
+ chunk_strcat(tmp, ". Please enter one of the following commands only:\n");
+ else // partial match
+ chunk_strcat(tmp, ", but maybe one of the following ones is a better match:\n");
+ }
+
+ for (idx = 0; idx < CLI_MAX_MATCHES; idx++) {
+ matches[idx].kw = NULL;
+ matches[idx].dist = INT_MAX;
+ }
+
+ /* In case of partial match we'll look for the best matching entries
+ * starting from position <length>
+ */
+ if (args && args[length] && *args[length]) {
+ list_for_each_entry(kw_list, &cli_keywords.list, list) {
+ for (kw = &kw_list->kw[0]; kw->str_kw[0]; kw++) {
+ if (kw->level & ~appctx->cli_level & (ACCESS_MASTER_ONLY|ACCESS_EXPERT|ACCESS_EXPERIMENTAL))
+ continue;
+ if (!(appctx->cli_level & ACCESS_MCLI_DEBUG) &&
+ ((appctx->cli_level & ~kw->level & (ACCESS_MASTER_ONLY|ACCESS_MASTER)) ==
+ (ACCESS_MASTER_ONLY|ACCESS_MASTER)))
+ continue;
+
+ for (idx = 0; idx < length; idx++) {
+ if (!kw->str_kw[idx])
+ break; // end of keyword
+ if (!args || !args[idx] || !*args[idx])
+ break; // end of command line
+ if (strcmp(kw->str_kw[idx], args[idx]) != 0)
+ break;
+ }
+
+ /* extra non-matching words are fuzzy-matched */
+ if (kw->usage && idx == length && args[idx] && *args[idx]) {
+ uint8_t word_sig[1024];
+ uint8_t list_sig[1024];
+ int dist = 0;
+ int totlen = 0;
+ int i;
+
+ /* this one matches, let's compute the distance between the two
+ * on the remaining words. For this we're computing the signature
+ * of everything that remains and the cumulated length of the
+ * strings.
+ */
+ memset(word_sig, 0, sizeof(word_sig));
+ for (i = idx; i < CLI_PREFIX_KW_NB && args[i] && *args[i]; i++) {
+ update_word_fingerprint(word_sig, args[i]);
+ totlen += strlen(args[i]);
+ }
+
+ memset(list_sig, 0, sizeof(list_sig));
+ for (i = idx; i < CLI_PREFIX_KW_NB && kw->str_kw[i]; i++) {
+ update_word_fingerprint(list_sig, kw->str_kw[i]);
+ totlen += strlen(kw->str_kw[i]);
+ }
+
+ dist = word_fingerprint_distance(word_sig, list_sig);
+
+ /* insert this one at its place if relevant, in order to keep only
+ * the best matches.
+ */
+ swp.kw = kw; swp.dist = dist;
+ if (dist < 5*totlen/2 && dist < matches[CLI_MAX_MATCHES-1].dist) {
+ matches[CLI_MAX_MATCHES-1] = swp;
+ for (idx = CLI_MAX_MATCHES - 1; --idx >= 0;) {
+ if (matches[idx+1].dist >= matches[idx].dist)
+ break;
+ matches[idx+1] = matches[idx];
+ matches[idx] = swp;
+ }
+ }
+ }
+ }
+ }
+ }
+
+ if (matches[0].kw) {
+ /* we have fuzzy matches, let's propose them */
+ for (idx = 0; idx < CLI_MAX_MATCHES; idx++) {
+ kw = matches[idx].kw;
+ if (!kw)
+ break;
+
+ /* stop the dump if some words look very unlikely candidates */
+ if (matches[idx].dist > 5*matches[0].dist/2)
+ break;
+
+ if (help_entries < CLI_MAX_HELP_ENTRIES)
+ entries[help_entries++] = kw;
+ }
+ }
+
+ list_for_each_entry(kw_list, &cli_keywords.list, list) {
+ /* no full dump if we've already found nice candidates */
+ if (matches[0].kw)
+ break;
+
+ for (kw = &kw_list->kw[0]; kw->str_kw[0]; kw++) {
+
+ /* in a worker or normal process, don't display master-only commands
+ * nor expert/experimental mode commands if not in this mode.
+ */
+ if (kw->level & ~appctx->cli_level & (ACCESS_MASTER_ONLY|ACCESS_EXPERT|ACCESS_EXPERIMENTAL))
+ continue;
+
+ /* in master, if the CLI don't have the
+ * ACCESS_MCLI_DEBUG don't display commands that have
+ * neither the master bit nor the master-only bit.
+ */
+ if (!(appctx->cli_level & ACCESS_MCLI_DEBUG) &&
+ ((appctx->cli_level & ~kw->level & (ACCESS_MASTER_ONLY|ACCESS_MASTER)) ==
+ (ACCESS_MASTER_ONLY|ACCESS_MASTER)))
+ continue;
+
+ for (idx = 0; idx < length; idx++) {
+ if (!kw->str_kw[idx])
+ break; // end of keyword
+ if (!args || !args[idx] || !*args[idx])
+ break; // end of command line
+ if (strcmp(kw->str_kw[idx], args[idx]) != 0)
+ break;
+ }
+
+ if (kw->usage && idx == length && help_entries < CLI_MAX_HELP_ENTRIES)
+ entries[help_entries++] = kw;
+ }
+ }
+
+ qsort(entries, help_entries, sizeof(*entries), cmp_kw_entries);
+
+ for (idx = 0; idx < help_entries; idx++)
+ chunk_appendf(tmp, " %s\n", entries[idx]->usage);
+
+ /* always show the prompt/help/quit commands */
+ chunk_strcat(tmp,
+ " help [<command>] : list matching or all commands\n"
+ " prompt [timed] : toggle interactive mode with prompt\n"
+ " quit : disconnect\n");
+
+ chunk_init(&out, NULL, 0);
+ chunk_dup(&out, tmp);
+ dynamic_usage_msg = out.area;
+
+ cli_msg(appctx, LOG_INFO, dynamic_usage_msg);
+ return dynamic_usage_msg;
+}
+
+struct cli_kw* cli_find_kw(char **args)
+{
+ struct cli_kw_list *kw_list;
+ struct cli_kw *kw;/* current cli_kw */
+ char **tmp_args;
+ const char **tmp_str_kw;
+ int found = 0;
+
+ if (LIST_ISEMPTY(&cli_keywords.list))
+ return NULL;
+
+ list_for_each_entry(kw_list, &cli_keywords.list, list) {
+ kw = &kw_list->kw[0];
+ while (*kw->str_kw) {
+ tmp_args = args;
+ tmp_str_kw = kw->str_kw;
+ while (*tmp_str_kw) {
+ if (strcmp(*tmp_str_kw, *tmp_args) == 0) {
+ found = 1;
+ } else {
+ found = 0;
+ break;
+ }
+ tmp_args++;
+ tmp_str_kw++;
+ }
+ if (found)
+ return (kw);
+ kw++;
+ }
+ }
+ return NULL;
+}
+
+struct cli_kw* cli_find_kw_exact(char **args)
+{
+ struct cli_kw_list *kw_list;
+ int found = 0;
+ int i;
+ int j;
+
+ if (LIST_ISEMPTY(&cli_keywords.list))
+ return NULL;
+
+ list_for_each_entry(kw_list, &cli_keywords.list, list) {
+ for (i = 0; kw_list->kw[i].str_kw[0]; i++) {
+ found = 1;
+ for (j = 0; j < CLI_PREFIX_KW_NB; j++) {
+ if (args[j] == NULL && kw_list->kw[i].str_kw[j] == NULL) {
+ break;
+ }
+ if (args[j] == NULL || kw_list->kw[i].str_kw[j] == NULL) {
+ found = 0;
+ break;
+ }
+ if (strcmp(args[j], kw_list->kw[i].str_kw[j]) != 0) {
+ found = 0;
+ break;
+ }
+ }
+ if (found)
+ return &kw_list->kw[i];
+ }
+ }
+ return NULL;
+}
+
+void cli_register_kw(struct cli_kw_list *kw_list)
+{
+ LIST_APPEND(&cli_keywords.list, &kw_list->list);
+}
+
+/* list all known keywords on stdout, one per line */
+void cli_list_keywords(void)
+{
+ struct cli_kw_list *kw_list;
+ struct cli_kw *kwp, *kwn, *kw;
+ int idx;
+
+ for (kwn = kwp = NULL;; kwp = kwn) {
+ list_for_each_entry(kw_list, &cli_keywords.list, list) {
+ /* note: we sort based on the usage message when available,
+ * otherwise we fall back to the first keyword.
+ */
+ for (kw = &kw_list->kw[0]; kw->str_kw[0]; kw++) {
+ if (strordered(kwp ? kwp->usage ? kwp->usage : kwp->str_kw[0] : NULL,
+ kw->usage ? kw->usage : kw->str_kw[0],
+ kwn != kwp ? kwn->usage ? kwn->usage : kwn->str_kw[0] : NULL))
+ kwn = kw;
+ }
+ }
+
+ if (kwn == kwp)
+ break;
+
+ for (idx = 0; kwn->str_kw[idx]; idx++) {
+ printf("%s ", kwn->str_kw[idx]);
+ }
+ if (kwn->level & (ACCESS_MASTER_ONLY|ACCESS_MASTER))
+ printf("[MASTER] ");
+ if (!(kwn->level & ACCESS_MASTER_ONLY))
+ printf("[WORKER] ");
+ if (kwn->level & ACCESS_EXPERT)
+ printf("[EXPERT] ");
+ if (kwn->level & ACCESS_EXPERIMENTAL)
+ printf("[EXPERIM] ");
+ printf("\n");
+ }
+}
+
+/* allocate a new stats frontend named <name>, and return it
+ * (or NULL in case of lack of memory).
+ */
+static struct proxy *cli_alloc_fe(const char *name, const char *file, int line)
+{
+ struct proxy *fe;
+
+ fe = calloc(1, sizeof(*fe));
+ if (!fe)
+ return NULL;
+
+ init_new_proxy(fe);
+ fe->next = proxies_list;
+ proxies_list = fe;
+ fe->last_change = ns_to_sec(now_ns);
+ fe->id = strdup("GLOBAL");
+ fe->cap = PR_CAP_FE|PR_CAP_INT;
+ fe->maxconn = 10; /* default to 10 concurrent connections */
+ fe->timeout.client = MS_TO_TICKS(10000); /* default timeout of 10 seconds */
+ fe->conf.file = strdup(file);
+ fe->conf.line = line;
+ fe->accept = frontend_accept;
+ fe->default_target = &cli_applet.obj_type;
+
+ /* the stats frontend is the only one able to assign ID #0 */
+ fe->conf.id.key = fe->uuid = 0;
+ eb32_insert(&used_proxy_id, &fe->conf.id);
+ return fe;
+}
+
+/* This function parses a "stats" statement in the "global" section. It returns
+ * -1 if there is any error, otherwise zero. If it returns -1, it will write an
+ * error message into the <err> buffer which will be preallocated. The trailing
+ * '\n' must not be written. The function must be called with <args> pointing to
+ * the first word after "stats".
+ */
+static int cli_parse_global(char **args, int section_type, struct proxy *curpx,
+ const struct proxy *defpx, const char *file, int line,
+ char **err)
+{
+ struct bind_conf *bind_conf;
+ struct listener *l;
+
+ if (strcmp(args[1], "socket") == 0) {
+ int cur_arg;
+
+ if (*args[2] == 0) {
+ memprintf(err, "'%s %s' in global section expects an address or a path to a UNIX socket", args[0], args[1]);
+ return -1;
+ }
+
+ if (!global.cli_fe) {
+ if ((global.cli_fe = cli_alloc_fe("GLOBAL", file, line)) == NULL) {
+ memprintf(err, "'%s %s' : out of memory trying to allocate a frontend", args[0], args[1]);
+ return -1;
+ }
+ }
+
+ bind_conf = bind_conf_alloc(global.cli_fe, file, line, args[2], xprt_get(XPRT_RAW));
+ if (!bind_conf) {
+ memprintf(err, "'%s %s' : out of memory trying to allocate a bind_conf", args[0], args[1]);
+ return -1;
+ }
+ bind_conf->level &= ~ACCESS_LVL_MASK;
+ bind_conf->level |= ACCESS_LVL_OPER; /* default access level */
+
+ if (!str2listener(args[2], global.cli_fe, bind_conf, file, line, err)) {
+ memprintf(err, "parsing [%s:%d] : '%s %s' : %s\n",
+ file, line, args[0], args[1], err && *err ? *err : "error");
+ return -1;
+ }
+
+ cur_arg = 3;
+ while (*args[cur_arg]) {
+ struct bind_kw *kw;
+ const char *best;
+ int code;
+
+ kw = bind_find_kw(args[cur_arg]);
+ if (kw) {
+ if (!kw->parse) {
+ memprintf(err, "'%s %s' : '%s' option is not implemented in this version (check build options).",
+ args[0], args[1], args[cur_arg]);
+ return -1;
+ }
+
+ code = kw->parse(args, cur_arg, global.cli_fe, bind_conf, err);
+
+ /* FIXME: this is ugly, we don't have a way to collect warnings,
+ * yet some important bind keywords may report warnings that we
+ * must display.
+ */
+ if (((code & (ERR_WARN|ERR_FATAL|ERR_ALERT)) == ERR_WARN) && err && *err) {
+ indent_msg(err, 2);
+ ha_warning("parsing [%s:%d] : '%s %s' : %s\n", file, line, args[0], args[1], *err);
+ ha_free(err);
+ }
+
+ if (code & ~ERR_WARN) {
+ if (err && *err)
+ memprintf(err, "'%s %s' : '%s'", args[0], args[1], *err);
+ else
+ memprintf(err, "'%s %s' : error encountered while processing '%s'",
+ args[0], args[1], args[cur_arg]);
+ return -1;
+ }
+
+ cur_arg += 1 + kw->skip;
+ continue;
+ }
+
+ best = bind_find_best_kw(args[cur_arg]);
+ if (best)
+ memprintf(err, "'%s %s' : unknown keyword '%s'. Did you mean '%s' maybe ?",
+ args[0], args[1], args[cur_arg], best);
+ else
+ memprintf(err, "'%s %s' : unknown keyword '%s'.",
+ args[0], args[1], args[cur_arg]);
+ return -1;
+ }
+
+ bind_conf->accept = session_accept_fd;
+ bind_conf->nice = -64; /* we want to boost priority for local stats */
+ bind_conf->options |= BC_O_UNLIMITED; /* don't make the peers subject to global limits */
+
+ list_for_each_entry(l, &bind_conf->listeners, by_bind) {
+ global.maxsock++; /* for the listening socket */
+ }
+ }
+ else if (strcmp(args[1], "timeout") == 0) {
+ unsigned timeout;
+ const char *res = parse_time_err(args[2], &timeout, TIME_UNIT_MS);
+
+ if (res == PARSE_TIME_OVER) {
+ memprintf(err, "timer overflow in argument '%s' to '%s %s' (maximum value is 2147483647 ms or ~24.8 days)",
+ args[2], args[0], args[1]);
+ return -1;
+ }
+ else if (res == PARSE_TIME_UNDER) {
+ memprintf(err, "timer underflow in argument '%s' to '%s %s' (minimum non-null value is 1 ms)",
+ args[2], args[0], args[1]);
+ return -1;
+ }
+ else if (res) {
+ memprintf(err, "'%s %s' : unexpected character '%c'", args[0], args[1], *res);
+ return -1;
+ }
+
+ if (!timeout) {
+ memprintf(err, "'%s %s' expects a positive value", args[0], args[1]);
+ return -1;
+ }
+ if (!global.cli_fe) {
+ if ((global.cli_fe = cli_alloc_fe("GLOBAL", file, line)) == NULL) {
+ memprintf(err, "'%s %s' : out of memory trying to allocate a frontend", args[0], args[1]);
+ return -1;
+ }
+ }
+ global.cli_fe->timeout.client = MS_TO_TICKS(timeout);
+ }
+ else if (strcmp(args[1], "maxconn") == 0) {
+ int maxconn = atol(args[2]);
+
+ if (maxconn <= 0) {
+ memprintf(err, "'%s %s' expects a positive value", args[0], args[1]);
+ return -1;
+ }
+
+ if (!global.cli_fe) {
+ if ((global.cli_fe = cli_alloc_fe("GLOBAL", file, line)) == NULL) {
+ memprintf(err, "'%s %s' : out of memory trying to allocate a frontend", args[0], args[1]);
+ return -1;
+ }
+ }
+ global.cli_fe->maxconn = maxconn;
+ }
+ else if (strcmp(args[1], "bind-process") == 0) {
+ memprintf(err, "'%s %s' is not supported anymore.", args[0], args[1]);
+ return -1;
+ }
+ else {
+ memprintf(err, "'%s' only supports 'socket', 'maxconn', 'bind-process' and 'timeout' (got '%s')", args[0], args[1]);
+ return -1;
+ }
+ return 0;
+}
+
+/*
+ * This function exports the bound addresses of a <frontend> in the environment
+ * variable <varname>. Those addresses are separated by semicolons and prefixed
+ * with their type (abns@, unix@, sockpair@ etc)
+ * Return -1 upon error, 0 otherwise
+ */
+int listeners_setenv(struct proxy *frontend, const char *varname)
+{
+ struct buffer *trash = get_trash_chunk();
+ struct bind_conf *bind_conf;
+
+ if (frontend) {
+ list_for_each_entry(bind_conf, &frontend->conf.bind, by_fe) {
+ struct listener *l;
+
+ list_for_each_entry(l, &bind_conf->listeners, by_bind) {
+ char addr[46];
+ char port[6];
+
+ /* separate listener by semicolons */
+ if (trash->data)
+ chunk_appendf(trash, ";");
+
+ if (l->rx.addr.ss_family == AF_UNIX) {
+ const struct sockaddr_un *un;
+
+ un = (struct sockaddr_un *)&l->rx.addr;
+ if (un->sun_path[0] == '\0') {
+ chunk_appendf(trash, "abns@%s", un->sun_path+1);
+ } else {
+ chunk_appendf(trash, "unix@%s", un->sun_path);
+ }
+ } else if (l->rx.addr.ss_family == AF_INET) {
+ addr_to_str(&l->rx.addr, addr, sizeof(addr));
+ port_to_str(&l->rx.addr, port, sizeof(port));
+ chunk_appendf(trash, "ipv4@%s:%s", addr, port);
+ } else if (l->rx.addr.ss_family == AF_INET6) {
+ addr_to_str(&l->rx.addr, addr, sizeof(addr));
+ port_to_str(&l->rx.addr, port, sizeof(port));
+ chunk_appendf(trash, "ipv6@[%s]:%s", addr, port);
+ } else if (l->rx.addr.ss_family == AF_CUST_SOCKPAIR) {
+ chunk_appendf(trash, "sockpair@%d", ((struct sockaddr_in *)&l->rx.addr)->sin_addr.s_addr);
+ }
+ }
+ }
+ trash->area[trash->data++] = '\0';
+ if (setenv(varname, trash->area, 1) < 0)
+ return -1;
+ }
+
+ return 0;
+}
+
+int cli_socket_setenv()
+{
+ if (listeners_setenv(global.cli_fe, "HAPROXY_CLI") < 0)
+ return -1;
+ if (listeners_setenv(mworker_proxy, "HAPROXY_MASTER_CLI") < 0)
+ return -1;
+
+ return 0;
+}
+
+REGISTER_CONFIG_POSTPARSER("cli", cli_socket_setenv);
+
+/* Verifies that the CLI at least has a level at least as high as <level>
+ * (typically ACCESS_LVL_ADMIN). Returns 1 if OK, otherwise 0. In case of
+ * failure, an error message is prepared and the appctx's state is adjusted
+ * to print it so that a return 1 is enough to abort any processing.
+ */
+int cli_has_level(struct appctx *appctx, int level)
+{
+
+ if ((appctx->cli_level & ACCESS_LVL_MASK) < level) {
+ cli_err(appctx, cli_permission_denied_msg);
+ return 0;
+ }
+ return 1;
+}
+
+/* same as cli_has_level but for the CLI proxy and without error message */
+int pcli_has_level(struct stream *s, int level)
+{
+ if ((s->pcli_flags & ACCESS_LVL_MASK) < level) {
+ return 0;
+ }
+ return 1;
+}
+
+/* Returns severity_output for the current session if set, or default for the socket */
+static int cli_get_severity_output(struct appctx *appctx)
+{
+ if (appctx->cli_severity_output)
+ return appctx->cli_severity_output;
+ return strm_li(appctx_strm(appctx))->bind_conf->severity_output;
+}
+
+/* Processes the CLI interpreter on the stats socket. This function is called
+ * from the CLI's IO handler running in an appctx context. The function returns
+ * 1 if the request was understood, otherwise zero (in which case an error
+ * message will be displayed). It is called with appctx->st0
+ * set to CLI_ST_GETREQ and presets ->st2 to 0 so that parsers don't have to do
+ * it. It will possilbly leave st0 to CLI_ST_CALLBACK if the keyword needs to
+ * have its own I/O handler called again. Most of the time, parsers will only
+ * set st0 to CLI_ST_PRINT and put their message to be displayed into cli.msg.
+ * If a keyword parser is NULL and an I/O handler is declared, the I/O handler
+ * will automatically be used.
+ */
+static int cli_parse_request(struct appctx *appctx)
+{
+ char *args[MAX_CLI_ARGS + 1], *p, *end, *payload = NULL;
+ int i = 0;
+ struct cli_kw *kw;
+
+ p = appctx->chunk->area;
+ end = p + appctx->chunk->data;
+
+ /*
+ * Get pointers on words.
+ * One extra slot is reserved to store a pointer on a null byte.
+ */
+ while (i < MAX_CLI_ARGS && p < end) {
+ int j, k;
+
+ /* skip leading spaces/tabs */
+ p += strspn(p, " \t");
+ if (!*p)
+ break;
+
+ /* first check if the '<<' is present, but this is not enough
+ * because we don't know if this is the end of the string */
+ if (strncmp(p, PAYLOAD_PATTERN, strlen(PAYLOAD_PATTERN)) == 0) {
+ int pat_len = strlen(appctx->cli_payload_pat);
+
+ /* then if the customized pattern is empty, check if the next character is '\0' */
+ if (pat_len == 0 && p[strlen(PAYLOAD_PATTERN)] == '\0') {
+ payload = p + strlen(PAYLOAD_PATTERN) + 1;
+ break;
+ }
+
+ /* else if we found the customized pattern at the end of the string */
+ if (strcmp(p + strlen(PAYLOAD_PATTERN), appctx->cli_payload_pat) == 0) {
+ payload = p + strlen(PAYLOAD_PATTERN) + pat_len + 1;
+ break;
+ }
+ }
+
+ args[i] = p;
+ while (1) {
+ p += strcspn(p, " \t\\");
+ /* escaped chars using backlashes (\) */
+ if (*p == '\\') {
+ if (!*++p)
+ break;
+ if (!*++p)
+ break;
+ } else {
+ break;
+ }
+ }
+ *p++ = 0;
+
+ /* unescape backslashes (\) */
+ for (j = 0, k = 0; args[i][k]; k++) {
+ if (args[i][k] == '\\') {
+ if (args[i][k + 1] == '\\')
+ k++;
+ else
+ continue;
+ }
+ args[i][j] = args[i][k];
+ j++;
+ }
+ args[i][j] = 0;
+
+ i++;
+ }
+ /* fill unused slots */
+ p = appctx->chunk->area + appctx->chunk->data;
+ for (; i < MAX_CLI_ARGS + 1; i++)
+ args[i] = p;
+
+ if (!**args)
+ return 0;
+
+ kw = cli_find_kw(args);
+ if (!kw ||
+ (kw->level & ~appctx->cli_level & ACCESS_MASTER_ONLY) ||
+ (!(appctx->cli_level & ACCESS_MCLI_DEBUG) &&
+ (appctx->cli_level & ~kw->level & (ACCESS_MASTER_ONLY|ACCESS_MASTER)) == (ACCESS_MASTER_ONLY|ACCESS_MASTER))) {
+ /* keyword not found in this mode */
+ cli_gen_usage_msg(appctx, args);
+ return 0;
+ }
+
+ /* don't handle expert mode commands if not in this mode. */
+ if (kw->level & ~appctx->cli_level & ACCESS_EXPERT) {
+ cli_err(appctx, "This command is restricted to expert mode only.\n");
+ return 0;
+ }
+
+ if (kw->level & ~appctx->cli_level & ACCESS_EXPERIMENTAL) {
+ cli_err(appctx, "This command is restricted to experimental mode only.\n");
+ return 0;
+ }
+
+ if (kw->level == ACCESS_EXPERT)
+ mark_tainted(TAINTED_CLI_EXPERT_MODE);
+ else if (kw->level == ACCESS_EXPERIMENTAL)
+ mark_tainted(TAINTED_CLI_EXPERIMENTAL_MODE);
+
+ appctx->io_handler = kw->io_handler;
+ appctx->io_release = kw->io_release;
+
+ if (kw->parse && kw->parse(args, payload, appctx, kw->private) != 0)
+ goto fail;
+
+ /* kw->parse could set its own io_handler or io_release handler */
+ if (!appctx->io_handler)
+ goto fail;
+
+ appctx->st0 = CLI_ST_CALLBACK;
+ return 1;
+fail:
+ appctx->io_handler = NULL;
+ appctx->io_release = NULL;
+ return 1;
+}
+
+/* prepends then outputs the argument msg with a syslog-type severity depending on severity_output value */
+static int cli_output_msg(struct appctx *appctx, const char *msg, int severity, int severity_output)
+{
+ struct buffer *tmp;
+ struct ist imsg;
+
+ tmp = get_trash_chunk();
+ chunk_reset(tmp);
+
+ if (likely(severity_output == CLI_SEVERITY_NONE))
+ goto send_it;
+
+ if (severity < 0 || severity > 7) {
+ ha_warning("socket command feedback with invalid severity %d", severity);
+ chunk_printf(tmp, "[%d]: ", severity);
+ }
+ else {
+ switch (severity_output) {
+ case CLI_SEVERITY_NUMBER:
+ chunk_printf(tmp, "[%d]: ", severity);
+ break;
+ case CLI_SEVERITY_STRING:
+ chunk_printf(tmp, "[%s]: ", log_levels[severity]);
+ break;
+ default:
+ ha_warning("Unrecognized severity output %d", severity_output);
+ }
+ }
+ send_it:
+ /* the vast majority of messages have their trailing LF but a few are
+ * still missing it, and very rare ones might even have two. For this
+ * reason, we'll first delete the trailing LFs if present, then
+ * systematically append one.
+ */
+ for (imsg = ist(msg); imsg.len > 0 && imsg.ptr[imsg.len - 1] == '\n'; imsg.len--)
+ ;
+
+ chunk_istcat(tmp, imsg);
+ chunk_istcat(tmp, ist("\n"));
+
+ return applet_putchk(appctx, tmp);
+}
+
+/* This I/O handler runs as an applet embedded in a stream connector. It is
+ * used to processes I/O from/to the stats unix socket. The system relies on a
+ * state machine handling requests and various responses. We read a request,
+ * then we process it and send the response, and we possibly display a prompt.
+ * Then we can read again. The state is stored in appctx->st0 and is one of the
+ * CLI_ST_* constants. appctx->st1 is used to indicate whether prompt is enabled
+ * or not.
+ */
+static void cli_io_handler(struct appctx *appctx)
+{
+ struct stconn *sc = appctx_sc(appctx);
+ struct channel *req = sc_oc(sc);
+ struct channel *res = sc_ic(sc);
+ struct bind_conf *bind_conf = strm_li(__sc_strm(sc))->bind_conf;
+ int reql;
+ int len;
+
+ if (unlikely(se_fl_test(appctx->sedesc, (SE_FL_EOS|SE_FL_ERROR|SE_FL_SHR|SE_FL_SHW)))) {
+ co_skip(sc_oc(sc), co_data(sc_oc(sc)));
+ goto out;
+ }
+
+ /* Check if the input buffer is available. */
+ if (!b_size(&res->buf)) {
+ sc_need_room(sc, 0);
+ goto out;
+ }
+
+ while (1) {
+ if (appctx->st0 == CLI_ST_INIT) {
+ /* reset severity to default at init */
+ appctx->cli_severity_output = bind_conf->severity_output;
+ applet_reset_svcctx(appctx);
+ appctx->st0 = CLI_ST_GETREQ;
+ appctx->cli_level = bind_conf->level;
+ }
+ else if (appctx->st0 == CLI_ST_END) {
+ se_fl_set(appctx->sedesc, SE_FL_EOS);
+ free_trash_chunk(appctx->chunk);
+ appctx->chunk = NULL;
+ break;
+ }
+ else if (appctx->st0 == CLI_ST_GETREQ) {
+ char *str;
+
+ /* use a trash chunk to store received data */
+ if (!appctx->chunk) {
+ appctx->chunk = alloc_trash_chunk();
+ if (!appctx->chunk) {
+ se_fl_set(appctx->sedesc, SE_FL_ERROR);
+ appctx->st0 = CLI_ST_END;
+ continue;
+ }
+ }
+
+ str = appctx->chunk->area + appctx->chunk->data;
+
+ /* ensure we have some output room left in the event we
+ * would want to return some info right after parsing.
+ */
+ if (buffer_almost_full(sc_ib(sc))) {
+ sc_need_room(sc, b_size(&res->buf) / 2);
+ break;
+ }
+
+ /* payload doesn't take escapes nor does it end on semi-colons, so
+ * we use the regular getline. Normal mode however must stop on
+ * LFs and semi-colons that are not prefixed by a backslash. Note
+ * that we reserve one byte at the end to insert a trailing nul byte.
+ */
+
+ if (appctx->st1 & APPCTX_CLI_ST1_PAYLOAD)
+ reql = co_getline(sc_oc(sc), str,
+ appctx->chunk->size - appctx->chunk->data - 1);
+ else
+ reql = co_getdelim(sc_oc(sc), str,
+ appctx->chunk->size - appctx->chunk->data - 1,
+ "\n;", '\\');
+
+ if (reql <= 0) { /* closed or EOL not found */
+ if (reql == 0)
+ break;
+ se_fl_set(appctx->sedesc, SE_FL_ERROR);
+ appctx->st0 = CLI_ST_END;
+ continue;
+ }
+
+ if (!(appctx->st1 & APPCTX_CLI_ST1_PAYLOAD)) {
+ /* seek for a possible unescaped semi-colon. If we find
+ * one, we replace it with an LF and skip only this part.
+ */
+ for (len = 0; len < reql; len++) {
+ if (str[len] == '\\') {
+ len++;
+ continue;
+ }
+ if (str[len] == ';') {
+ str[len] = '\n';
+ reql = len + 1;
+ break;
+ }
+ }
+ }
+
+ /* now it is time to check that we have a full line,
+ * remove the trailing \n and possibly \r, then cut the
+ * line.
+ */
+ len = reql - 1;
+ if (str[len] != '\n') {
+ se_fl_set(appctx->sedesc, SE_FL_ERROR);
+ appctx->st0 = CLI_ST_END;
+ continue;
+ }
+
+ if (len && str[len-1] == '\r')
+ len--;
+
+ str[len] = '\0';
+ appctx->chunk->data += len;
+
+ if (appctx->st1 & APPCTX_CLI_ST1_PAYLOAD) {
+ appctx->chunk->area[appctx->chunk->data] = '\n';
+ appctx->chunk->area[appctx->chunk->data + 1] = 0;
+ appctx->chunk->data++;
+ }
+
+ appctx->st0 = CLI_ST_PROMPT;
+
+ if (appctx->st1 & APPCTX_CLI_ST1_PAYLOAD) {
+ /* look for a pattern */
+ if (len == strlen(appctx->cli_payload_pat)) {
+ /* here use 'len' because str still contains the \n */
+ if (strncmp(str, appctx->cli_payload_pat, len) == 0) {
+ /* remove the last two \n */
+ appctx->chunk->data -= strlen(appctx->cli_payload_pat) + 2;
+ appctx->chunk->area[appctx->chunk->data] = 0;
+ cli_parse_request(appctx);
+ chunk_reset(appctx->chunk);
+ /* NB: cli_sock_parse_request() may have put
+ * another CLI_ST_O_* into appctx->st0.
+ */
+
+ appctx->st1 &= ~APPCTX_CLI_ST1_PAYLOAD;
+ }
+ }
+ }
+ else {
+ char *last_arg;
+ /*
+ * Look for the "payload start" pattern at the end of a line
+ * Its location is not remembered here, this is just to switch
+ * to a gathering mode.
+ * The pattern must start by << followed by 0
+ * to 7 characters, and finished by the end of
+ * the command (\n or ;).
+ */
+ /* look for the first space starting by the end of the line */
+ for (last_arg = appctx->chunk->area + appctx->chunk->data; last_arg != appctx->chunk->area; last_arg--) {
+ if (*last_arg == ' ' || *last_arg == '\t') {
+ last_arg++;
+ break;
+ }
+ }
+ if (strncmp(last_arg, PAYLOAD_PATTERN, strlen(PAYLOAD_PATTERN)) == 0) {
+ ssize_t pat_len = strlen(last_arg + strlen(PAYLOAD_PATTERN));
+
+ /* A customized pattern can't be more than 7 characters
+ * if it's more, don't make it a payload
+ */
+ if (pat_len < sizeof(appctx->cli_payload_pat)) {
+ appctx->st1 |= APPCTX_CLI_ST1_PAYLOAD;
+ /* copy the customized pattern, don't store the << */
+ strncpy(appctx->cli_payload_pat, last_arg + strlen(PAYLOAD_PATTERN), sizeof(appctx->cli_payload_pat)-1);
+ appctx->cli_payload_pat[sizeof(appctx->cli_payload_pat)-1] = '\0';
+ appctx->chunk->data++; // keep the trailing \0 after the pattern
+ }
+ }
+ else {
+ /* no payload, the command is complete: parse the request */
+ cli_parse_request(appctx);
+ chunk_reset(appctx->chunk);
+ }
+ }
+
+ /* re-adjust req buffer */
+ co_skip(sc_oc(sc), reql);
+ sc_opposite(sc)->flags |= SC_FL_RCV_ONCE; /* we plan to read small requests */
+ }
+ else { /* output functions */
+ struct cli_print_ctx *ctx;
+ const char *msg;
+ int sev;
+
+ switch (appctx->st0) {
+ case CLI_ST_PROMPT:
+ break;
+ case CLI_ST_PRINT: /* print const message in msg */
+ case CLI_ST_PRINT_ERR: /* print const error in msg */
+ case CLI_ST_PRINT_DYN: /* print dyn message in msg, free */
+ case CLI_ST_PRINT_DYNERR: /* print dyn error in err, free */
+ case CLI_ST_PRINT_UMSG: /* print usermsgs_ctx and reset it */
+ case CLI_ST_PRINT_UMSGERR: /* print usermsgs_ctx as error and reset it */
+ /* the message is in the svcctx */
+ ctx = applet_reserve_svcctx(appctx, sizeof(*ctx));
+ if (appctx->st0 == CLI_ST_PRINT || appctx->st0 == CLI_ST_PRINT_ERR) {
+ sev = appctx->st0 == CLI_ST_PRINT_ERR ?
+ LOG_ERR : ctx->severity;
+ msg = ctx->msg;
+ }
+ else if (appctx->st0 == CLI_ST_PRINT_DYN || appctx->st0 == CLI_ST_PRINT_DYNERR) {
+ sev = appctx->st0 == CLI_ST_PRINT_DYNERR ?
+ LOG_ERR : ctx->severity;
+ msg = ctx->err;
+ if (!msg) {
+ sev = LOG_ERR;
+ msg = "Out of memory.\n";
+ }
+ }
+ else if (appctx->st0 == CLI_ST_PRINT_UMSG ||
+ appctx->st0 == CLI_ST_PRINT_UMSGERR) {
+ sev = appctx->st0 == CLI_ST_PRINT_UMSGERR ?
+ LOG_ERR : ctx->severity;
+ msg = usermsgs_str();
+ }
+ else {
+ sev = LOG_ERR;
+ msg = "Internal error.\n";
+ }
+
+ if (cli_output_msg(appctx, msg, sev, cli_get_severity_output(appctx)) != -1) {
+ if (appctx->st0 == CLI_ST_PRINT_DYN ||
+ appctx->st0 == CLI_ST_PRINT_DYNERR) {
+ ha_free(&ctx->err);
+ }
+ else if (appctx->st0 == CLI_ST_PRINT_UMSG ||
+ appctx->st0 == CLI_ST_PRINT_UMSGERR) {
+ usermsgs_clr(NULL);
+ }
+ appctx->st0 = CLI_ST_PROMPT;
+ }
+ break;
+
+ case CLI_ST_CALLBACK: /* use custom pointer */
+ if (appctx->io_handler)
+ if (appctx->io_handler(appctx)) {
+ appctx->st0 = CLI_ST_PROMPT;
+ if (appctx->io_release) {
+ appctx->io_release(appctx);
+ appctx->io_release = NULL;
+ }
+ }
+ break;
+ default: /* abnormal state */
+ se_fl_set(appctx->sedesc, SE_FL_ERROR);
+ break;
+ }
+
+ /* The post-command prompt is either LF alone or LF + '> ' in interactive mode */
+ if (appctx->st0 == CLI_ST_PROMPT) {
+ char prompt_buf[20];
+ const char *prompt = "";
+
+ if (appctx->st1 & APPCTX_CLI_ST1_PROMPT) {
+ /*
+ * when entering a payload with interactive mode, change the prompt
+ * to emphasize that more data can still be sent
+ */
+ if (appctx->chunk->data && appctx->st1 & APPCTX_CLI_ST1_PAYLOAD)
+ prompt = "+ ";
+ else if (appctx->st1 & APPCTX_CLI_ST1_TIMED) {
+ uint up = ns_to_sec(now_ns - start_time_ns);
+ snprintf(prompt_buf, sizeof(prompt_buf),
+ "\n[%u:%02u:%02u:%02u]> ",
+ (up / 86400), (up / 3600) % 24, (up / 60) % 60, up % 60);
+ prompt = prompt_buf;
+ }
+ else
+ prompt = "\n> ";
+ }
+ else {
+ if (!(appctx->st1 & (APPCTX_CLI_ST1_PAYLOAD|APPCTX_CLI_ST1_NOLF)))
+ prompt = "\n";
+ }
+
+ if (applet_putstr(appctx, prompt) != -1) {
+ applet_reset_svcctx(appctx);
+ appctx->st0 = CLI_ST_GETREQ;
+ }
+ }
+
+ /* If the output functions are still there, it means they require more room. */
+ if (appctx->st0 >= CLI_ST_OUTPUT) {
+ applet_wont_consume(appctx);
+ break;
+ }
+
+ /* Now we close the output if we're not in interactive
+ * mode and the request buffer is empty. This still
+ * allows pipelined requests to be sent in
+ * non-interactive mode.
+ */
+ if (!(appctx->st1 & APPCTX_CLI_ST1_PROMPT) && !co_data(req) && (!(appctx->st1 & APPCTX_CLI_ST1_PAYLOAD))) {
+ se_fl_set(appctx->sedesc, SE_FL_EOI);
+ appctx->st0 = CLI_ST_END;
+ continue;
+ }
+
+ /* switch state back to GETREQ to read next requests */
+ applet_reset_svcctx(appctx);
+ appctx->st0 = CLI_ST_GETREQ;
+ applet_will_consume(appctx);
+ applet_expect_data(appctx);
+
+ /* reactivate the \n at the end of the response for the next command */
+ appctx->st1 &= ~APPCTX_CLI_ST1_NOLF;
+
+ /* this forces us to yield between pipelined commands and
+ * avoid extremely long latencies (e.g. "del map" etc). In
+ * addition this increases the likelihood that the stream
+ * refills the buffer with new bytes in non-interactive
+ * mode, avoiding to close on apparently empty commands.
+ */
+ if (co_data(sc_oc(sc))) {
+ appctx_wakeup(appctx);
+ goto out;
+ }
+ }
+ }
+
+ out:
+ return;
+}
+
+/* This is called when the stream connector is closed. For instance, upon an
+ * external abort, we won't call the i/o handler anymore so we may need to
+ * remove back references to the stream currently being dumped.
+ */
+static void cli_release_handler(struct appctx *appctx)
+{
+ free_trash_chunk(appctx->chunk);
+ appctx->chunk = NULL;
+
+ if (appctx->io_release) {
+ appctx->io_release(appctx);
+ appctx->io_release = NULL;
+ }
+ else if (appctx->st0 == CLI_ST_PRINT_DYN || appctx->st0 == CLI_ST_PRINT_DYNERR) {
+ struct cli_print_ctx *ctx = applet_reserve_svcctx(appctx, sizeof(*ctx));
+
+ ha_free(&ctx->err);
+ }
+ else if (appctx->st0 == CLI_ST_PRINT_UMSG || appctx->st0 == CLI_ST_PRINT_UMSGERR) {
+ usermsgs_clr(NULL);
+ }
+}
+
+/* This function dumps all environmnent variables to the buffer. It returns 0
+ * if the output buffer is full and it needs to be called again, otherwise
+ * non-zero. It takes its context from the show_env_ctx in svcctx, and will
+ * start from ->var and dump only one variable if ->show_one is set.
+ */
+static int cli_io_handler_show_env(struct appctx *appctx)
+{
+ struct show_env_ctx *ctx = appctx->svcctx;
+ struct stconn *sc = appctx_sc(appctx);
+ char **var = ctx->var;
+
+ /* FIXME: Don't watch the other side !*/
+ if (unlikely(sc_opposite(sc)->flags & SC_FL_SHUT_DONE))
+ return 1;
+
+ chunk_reset(&trash);
+
+ /* we have two inner loops here, one for the proxy, the other one for
+ * the buffer.
+ */
+ while (*var) {
+ chunk_printf(&trash, "%s\n", *var);
+
+ if (applet_putchk(appctx, &trash) == -1)
+ return 0;
+
+ if (ctx->show_one)
+ break;
+ var++;
+ ctx->var = var;
+ }
+
+ /* dump complete */
+ return 1;
+}
+
+/* This function dumps all file descriptors states (or the requested one) to
+ * the buffer. It returns 0 if the output buffer is full and it needs to be
+ * called again, otherwise non-zero. It takes its context from the show_fd_ctx
+ * in svcctx, only dumps one entry if ->show_one is non-zero, and (re)starts
+ * from ->fd.
+ */
+static int cli_io_handler_show_fd(struct appctx *appctx)
+{
+ struct stconn *sc = appctx_sc(appctx);
+ struct show_fd_ctx *fdctx = appctx->svcctx;
+ uint match = fdctx->show_mask;
+ int fd = fdctx->fd;
+ int ret = 1;
+
+ /* FIXME: Don't watch the other side !*/
+ if (unlikely(sc_opposite(sc)->flags & SC_FL_SHUT_DONE))
+ goto end;
+
+ chunk_reset(&trash);
+
+ /* isolate the threads once per round. We're limited to a buffer worth
+ * of output anyway, it cannot last very long.
+ */
+ thread_isolate();
+
+ /* we have two inner loops here, one for the proxy, the other one for
+ * the buffer.
+ */
+ while (fd >= 0 && fd < global.maxsock) {
+ struct fdtab fdt;
+ const struct listener *li = NULL;
+ const struct server *sv = NULL;
+ const struct proxy *px = NULL;
+ const struct connection *conn = NULL;
+ const struct mux_ops *mux = NULL;
+ const struct xprt_ops *xprt = NULL;
+ const void *ctx = NULL;
+ const void *xprt_ctx = NULL;
+ const struct quic_conn *qc = NULL;
+ uint32_t conn_flags = 0;
+ uint8_t conn_err = 0;
+ int is_back = 0;
+ int suspicious = 0;
+
+ fdt = fdtab[fd];
+
+ /* When DEBUG_FD is set, we also report closed FDs that have a
+ * non-null event count to detect stuck ones.
+ */
+ if (!fdt.owner) {
+#ifdef DEBUG_FD
+ if (!fdt.event_count)
+#endif
+ goto skip; // closed
+ }
+ else if (fdt.iocb == sock_conn_iocb) {
+ conn = (const struct connection *)fdt.owner;
+ conn_flags = conn->flags;
+ conn_err = conn->err_code;
+ mux = conn->mux;
+ ctx = conn->ctx;
+ xprt = conn->xprt;
+ xprt_ctx = conn->xprt_ctx;
+ li = objt_listener(conn->target);
+ sv = objt_server(conn->target);
+ px = objt_proxy(conn->target);
+ is_back = conn_is_back(conn);
+ if (atleast2(fdt.thread_mask))
+ suspicious = 1;
+ if (conn->handle.fd != fd)
+ suspicious = 1;
+ }
+#if defined(USE_QUIC)
+ else if (fdt.iocb == quic_conn_sock_fd_iocb) {
+ qc = fdtab[fd].owner;
+ li = qc ? qc->li : NULL;
+ xprt_ctx = qc ? qc->xprt_ctx : NULL;
+ conn = qc ? qc->conn : NULL;
+ xprt = conn ? conn->xprt : NULL; // in fact it's &ssl_quic
+ mux = conn ? conn->mux : NULL;
+ /* quic_conns don't always have a connection but they
+ * always have an xprt_ctx.
+ */
+ }
+ else if (fdt.iocb == quic_lstnr_sock_fd_iocb) {
+ li = objt_listener(fdtab[fd].owner);
+ }
+#endif
+ else if (fdt.iocb == sock_accept_iocb)
+ li = fdt.owner;
+
+ if (!(((conn || xprt_ctx) &&
+ ((match & CLI_SHOWFD_F_SV && sv) ||
+ (match & CLI_SHOWFD_F_PX && px) ||
+ (match & CLI_SHOWFD_F_FE && li))) ||
+ (!conn &&
+ ((match & CLI_SHOWFD_F_LI && li) ||
+ (match & CLI_SHOWFD_F_PI && !li /* only pipes match this */))))) {
+ /* not a desired type */
+ goto skip;
+ }
+
+ if (!fdt.thread_mask)
+ suspicious = 1;
+
+ chunk_printf(&trash,
+ " %5d : st=0x%06x(%c%c %c%c%c%c%c W:%c%c%c R:%c%c%c) ref=%#x gid=%d tmask=0x%lx umask=0x%lx prmsk=0x%lx pwmsk=0x%lx owner=%p iocb=%p(",
+ fd,
+ fdt.state,
+ (fdt.state & FD_CLONED) ? 'C' : 'c',
+ (fdt.state & FD_LINGER_RISK) ? 'L' : 'l',
+ (fdt.state & FD_POLL_HUP) ? 'H' : 'h',
+ (fdt.state & FD_POLL_ERR) ? 'E' : 'e',
+ (fdt.state & FD_POLL_OUT) ? 'O' : 'o',
+ (fdt.state & FD_POLL_PRI) ? 'P' : 'p',
+ (fdt.state & FD_POLL_IN) ? 'I' : 'i',
+ (fdt.state & FD_EV_SHUT_W) ? 'S' : 's',
+ (fdt.state & FD_EV_READY_W) ? 'R' : 'r',
+ (fdt.state & FD_EV_ACTIVE_W) ? 'A' : 'a',
+ (fdt.state & FD_EV_SHUT_R) ? 'S' : 's',
+ (fdt.state & FD_EV_READY_R) ? 'R' : 'r',
+ (fdt.state & FD_EV_ACTIVE_R) ? 'A' : 'a',
+ (fdt.refc_tgid >> 4) & 0xffff,
+ (fdt.refc_tgid) & 0xffff,
+ fdt.thread_mask, fdt.update_mask,
+ polled_mask[fd].poll_recv,
+ polled_mask[fd].poll_send,
+ fdt.owner,
+ fdt.iocb);
+ resolve_sym_name(&trash, NULL, fdt.iocb);
+
+ if (!fdt.owner) {
+ chunk_appendf(&trash, ")");
+ }
+ else if (conn) {
+ chunk_appendf(&trash, ") back=%d cflg=0x%08x cerr=%d", is_back, conn_flags, conn_err);
+
+ if (!(conn->flags & CO_FL_FDLESS) && conn->handle.fd != fd) {
+ chunk_appendf(&trash, " fd=%d(BOGUS)", conn->handle.fd);
+ suspicious = 1;
+ } else if ((conn->flags & CO_FL_FDLESS) && (qc != conn->handle.qc)) {
+ chunk_appendf(&trash, " qc=%p(BOGUS)", conn->handle.qc);
+ suspicious = 1;
+ } else {
+ struct sockaddr_storage sa;
+ socklen_t salen;
+
+ salen = sizeof(sa);
+ if (getsockname(fd, (struct sockaddr *)&sa, &salen) != -1) {
+ if (sa.ss_family == AF_INET)
+ chunk_appendf(&trash, " fam=ipv4 lport=%d", ntohs(((const struct sockaddr_in *)&sa)->sin_port));
+ else if (sa.ss_family == AF_INET6)
+ chunk_appendf(&trash, " fam=ipv6 lport=%d", ntohs(((const struct sockaddr_in6 *)&sa)->sin6_port));
+ else if (sa.ss_family == AF_UNIX)
+ chunk_appendf(&trash, " fam=unix");
+ }
+
+ salen = sizeof(sa);
+ if (getpeername(fd, (struct sockaddr *)&sa, &salen) != -1) {
+ if (sa.ss_family == AF_INET)
+ chunk_appendf(&trash, " rport=%d", ntohs(((const struct sockaddr_in *)&sa)->sin_port));
+ else if (sa.ss_family == AF_INET6)
+ chunk_appendf(&trash, " rport=%d", ntohs(((const struct sockaddr_in6 *)&sa)->sin6_port));
+ }
+ }
+
+ if (px)
+ chunk_appendf(&trash, " px=%s", px->id);
+ else if (sv)
+ chunk_appendf(&trash, " sv=%s/%s", sv->proxy->id, sv->id);
+ else if (li)
+ chunk_appendf(&trash, " fe=%s", li->bind_conf->frontend->id);
+
+ if (mux) {
+ chunk_appendf(&trash, " mux=%s ctx=%p", mux->name, ctx);
+ if (!ctx && !qc)
+ suspicious = 1;
+ if (mux->show_fd)
+ suspicious |= mux->show_fd(&trash, fdt.owner);
+ }
+ else
+ chunk_appendf(&trash, " nomux");
+
+ chunk_appendf(&trash, " xprt=%s", xprt ? xprt->name : "");
+ if (xprt) {
+ if (xprt_ctx || xprt->show_fd)
+ chunk_appendf(&trash, " xprt_ctx=%p", xprt_ctx);
+ if (xprt->show_fd)
+ suspicious |= xprt->show_fd(&trash, conn, xprt_ctx);
+ }
+ }
+ else if (li && !xprt_ctx) {
+ struct sockaddr_storage sa;
+ socklen_t salen;
+
+ chunk_appendf(&trash, ") l.st=%s fe=%s",
+ listener_state_str(li),
+ li->bind_conf->frontend->id);
+
+ salen = sizeof(sa);
+ if (getsockname(fd, (struct sockaddr *)&sa, &salen) != -1) {
+ if (sa.ss_family == AF_INET)
+ chunk_appendf(&trash, " fam=ipv4 lport=%d", ntohs(((const struct sockaddr_in *)&sa)->sin_port));
+ else if (sa.ss_family == AF_INET6)
+ chunk_appendf(&trash, " fam=ipv6 lport=%d", ntohs(((const struct sockaddr_in6 *)&sa)->sin6_port));
+ else if (sa.ss_family == AF_UNIX)
+ chunk_appendf(&trash, " fam=unix");
+ }
+ }
+ else
+ chunk_appendf(&trash, ")");
+
+#ifdef DEBUG_FD
+ chunk_appendf(&trash, " evcnt=%u", fdtab[fd].event_count);
+ if (fdtab[fd].event_count >= 1000000)
+ suspicious = 1;
+#endif
+ chunk_appendf(&trash, "%s\n", suspicious ? " !" : "");
+
+ if (applet_putchk(appctx, &trash) == -1) {
+ fdctx->fd = fd;
+ ret = 0;
+ break;
+ }
+ skip:
+ if (fdctx->show_one)
+ break;
+
+ fd++;
+ }
+
+ end:
+ /* dump complete */
+
+ thread_release();
+ return ret;
+}
+
+/*
+ * CLI IO handler for `show cli sockets`.
+ * Uses the svcctx as a show_sock_ctx to store/retrieve the bind_conf and the
+ * listener pointers.
+ */
+static int cli_io_handler_show_cli_sock(struct appctx *appctx)
+{
+ struct show_sock_ctx *ctx = applet_reserve_svcctx(appctx, sizeof(*ctx));
+ struct bind_conf *bind_conf = ctx->bind_conf;
+
+ if (!global.cli_fe)
+ goto done;
+
+ chunk_reset(&trash);
+
+ if (!bind_conf) {
+ /* first call */
+ if (applet_putstr(appctx, "# socket lvl processes\n") == -1)
+ goto full;
+ bind_conf = LIST_ELEM(global.cli_fe->conf.bind.n, typeof(bind_conf), by_fe);
+ }
+
+ list_for_each_entry_from(bind_conf, &global.cli_fe->conf.bind, by_fe) {
+ struct listener *l = ctx->listener;
+
+ if (!l)
+ l = LIST_ELEM(bind_conf->listeners.n, typeof(l), by_bind);
+
+ list_for_each_entry_from(l, &bind_conf->listeners, by_bind) {
+ char addr[46];
+ char port[6];
+
+ if (l->rx.addr.ss_family == AF_UNIX) {
+ const struct sockaddr_un *un;
+
+ un = (struct sockaddr_un *)&l->rx.addr;
+ if (un->sun_path[0] == '\0') {
+ chunk_appendf(&trash, "abns@%s ", un->sun_path+1);
+ } else {
+ chunk_appendf(&trash, "unix@%s ", un->sun_path);
+ }
+ } else if (l->rx.addr.ss_family == AF_INET) {
+ addr_to_str(&l->rx.addr, addr, sizeof(addr));
+ port_to_str(&l->rx.addr, port, sizeof(port));
+ chunk_appendf(&trash, "ipv4@%s:%s ", addr, port);
+ } else if (l->rx.addr.ss_family == AF_INET6) {
+ addr_to_str(&l->rx.addr, addr, sizeof(addr));
+ port_to_str(&l->rx.addr, port, sizeof(port));
+ chunk_appendf(&trash, "ipv6@[%s]:%s ", addr, port);
+ } else if (l->rx.addr.ss_family == AF_CUST_SOCKPAIR) {
+ chunk_appendf(&trash, "sockpair@%d ", ((struct sockaddr_in *)&l->rx.addr)->sin_addr.s_addr);
+ } else
+ chunk_appendf(&trash, "unknown ");
+
+ if ((bind_conf->level & ACCESS_LVL_MASK) == ACCESS_LVL_ADMIN)
+ chunk_appendf(&trash, "admin ");
+ else if ((bind_conf->level & ACCESS_LVL_MASK) == ACCESS_LVL_OPER)
+ chunk_appendf(&trash, "operator ");
+ else if ((bind_conf->level & ACCESS_LVL_MASK) == ACCESS_LVL_USER)
+ chunk_appendf(&trash, "user ");
+ else
+ chunk_appendf(&trash, " ");
+
+ chunk_appendf(&trash, "all\n");
+
+ if (applet_putchk(appctx, &trash) == -1) {
+ ctx->bind_conf = bind_conf;
+ ctx->listener = l;
+ goto full;
+ }
+ }
+ }
+ done:
+ return 1;
+ full:
+ return 0;
+}
+
+
+/* parse a "show env" CLI request. Returns 0 if it needs to continue, 1 if it
+ * wants to stop here. It reserves a sohw_env_ctx where it puts the variable to
+ * be dumped as well as a flag if a single variable is requested, otherwise puts
+ * environ there.
+ */
+static int cli_parse_show_env(char **args, char *payload, struct appctx *appctx, void *private)
+{
+ struct show_env_ctx *ctx = applet_reserve_svcctx(appctx, sizeof(*ctx));
+ extern char **environ;
+ char **var;
+
+ if (!cli_has_level(appctx, ACCESS_LVL_OPER))
+ return 1;
+
+ var = environ;
+
+ if (*args[2]) {
+ int len = strlen(args[2]);
+
+ for (; *var; var++) {
+ if (strncmp(*var, args[2], len) == 0 &&
+ (*var)[len] == '=')
+ break;
+ }
+ if (!*var)
+ return cli_err(appctx, "Variable not found\n");
+
+ ctx->show_one = 1;
+ }
+ ctx->var = var;
+ return 0;
+}
+
+/* parse a "show fd" CLI request. Returns 0 if it needs to continue, 1 if it
+ * wants to stop here. It sets a show_fd_ctx context where, if a specific fd is
+ * requested, it puts the FD number into ->fd and sets ->show_one to 1.
+ */
+static int cli_parse_show_fd(char **args, char *payload, struct appctx *appctx, void *private)
+{
+ struct show_fd_ctx *ctx = applet_reserve_svcctx(appctx, sizeof(*ctx));
+ const char *c;
+ int arg;
+
+ if (!cli_has_level(appctx, ACCESS_LVL_OPER))
+ return 1;
+
+ arg = 2;
+
+ /* when starting with an inversion we preset every flag */
+ if (*args[arg] == '!' || *args[arg] == '-')
+ ctx->show_mask = CLI_SHOWFD_F_ANY;
+
+ while (*args[arg] && !isdigit((uchar)*args[arg])) {
+ uint flag = 0, inv = 0;
+ c = args[arg];
+ while (*c) {
+ switch (*c) {
+ case '!': inv = !inv; break;
+ case '-': inv = !inv; break;
+ case 'p': flag = CLI_SHOWFD_F_PI; break;
+ case 'l': flag = CLI_SHOWFD_F_LI; break;
+ case 'c': flag = CLI_SHOWFD_F_CO; break;
+ case 'f': flag = CLI_SHOWFD_F_FE; break;
+ case 'b': flag = CLI_SHOWFD_F_BE; break;
+ case 's': flag = CLI_SHOWFD_F_SV; break;
+ case 'd': flag = CLI_SHOWFD_F_PX; break;
+ default: return cli_err(appctx, "Invalid FD type\n");
+ }
+ c++;
+ if (!inv)
+ ctx->show_mask |= flag;
+ else
+ ctx->show_mask &= ~flag;
+ }
+ arg++;
+ }
+
+ /* default mask is to show everything */
+ if (!ctx->show_mask)
+ ctx->show_mask = CLI_SHOWFD_F_ANY;
+
+ if (*args[arg]) {
+ ctx->fd = atoi(args[2]);
+ ctx->show_one = 1;
+ }
+
+ return 0;
+}
+
+/* parse a "set timeout" CLI request. It always returns 1. */
+static int cli_parse_set_timeout(char **args, char *payload, struct appctx *appctx, void *private)
+{
+ struct stream *s = appctx_strm(appctx);
+
+ if (strcmp(args[2], "cli") == 0) {
+ unsigned timeout;
+ const char *res;
+
+ if (!*args[3])
+ return cli_err(appctx, "Expects an integer value.\n");
+
+ res = parse_time_err(args[3], &timeout, TIME_UNIT_S);
+ if (res || timeout < 1)
+ return cli_err(appctx, "Invalid timeout value.\n");
+
+ s->scf->ioto = 1 + MS_TO_TICKS(timeout*1000);
+ task_wakeup(s->task, TASK_WOKEN_MSG); // recompute timeouts
+ return 1;
+ }
+
+ return cli_err(appctx, "'set timeout' only supports 'cli'.\n");
+}
+
+/* parse a "set maxconn global" command. It always returns 1. */
+static int cli_parse_set_maxconn_global(char **args, char *payload, struct appctx *appctx, void *private)
+{
+ int v;
+
+ if (!cli_has_level(appctx, ACCESS_LVL_ADMIN))
+ return 1;
+
+ if (!*args[3])
+ return cli_err(appctx, "Expects an integer value.\n");
+
+ v = atoi(args[3]);
+ if (v > global.hardmaxconn)
+ return cli_err(appctx, "Value out of range.\n");
+
+ /* check for unlimited values */
+ if (v <= 0)
+ v = global.hardmaxconn;
+
+ global.maxconn = v;
+
+ /* Dequeues all of the listeners waiting for a resource */
+ dequeue_all_listeners();
+
+ return 1;
+}
+
+static int set_severity_output(int *target, char *argument)
+{
+ if (strcmp(argument, "none") == 0) {
+ *target = CLI_SEVERITY_NONE;
+ return 1;
+ }
+ else if (strcmp(argument, "number") == 0) {
+ *target = CLI_SEVERITY_NUMBER;
+ return 1;
+ }
+ else if (strcmp(argument, "string") == 0) {
+ *target = CLI_SEVERITY_STRING;
+ return 1;
+ }
+ return 0;
+}
+
+/* parse a "set severity-output" command. */
+static int cli_parse_set_severity_output(char **args, char *payload, struct appctx *appctx, void *private)
+{
+ /* this will ask the applet to not output a \n after the command */
+ if (strcmp(args[3], "-") == 0)
+ appctx->st1 |= APPCTX_CLI_ST1_NOLF;
+
+ if (*args[2] && set_severity_output(&appctx->cli_severity_output, args[2]))
+ return 0;
+
+ return cli_err(appctx, "one of 'none', 'number', 'string' is a required argument\n");
+}
+
+
+/* show the level of the current CLI session */
+static int cli_parse_show_lvl(char **args, char *payload, struct appctx *appctx, void *private)
+{
+ if ((appctx->cli_level & ACCESS_LVL_MASK) == ACCESS_LVL_ADMIN)
+ return cli_msg(appctx, LOG_INFO, "admin\n");
+ else if ((appctx->cli_level & ACCESS_LVL_MASK) == ACCESS_LVL_OPER)
+ return cli_msg(appctx, LOG_INFO, "operator\n");
+ else if ((appctx->cli_level & ACCESS_LVL_MASK) == ACCESS_LVL_USER)
+ return cli_msg(appctx, LOG_INFO, "user\n");
+ else
+ return cli_msg(appctx, LOG_INFO, "unknown\n");
+}
+
+/* parse and set the CLI level dynamically */
+static int cli_parse_set_lvl(char **args, char *payload, struct appctx *appctx, void *private)
+{
+ /* this will ask the applet to not output a \n after the command */
+ if (strcmp(args[1], "-") == 0)
+ appctx->st1 |= APPCTX_CLI_ST1_NOLF;
+
+ if (strcmp(args[0], "operator") == 0) {
+ if (!cli_has_level(appctx, ACCESS_LVL_OPER)) {
+ return 1;
+ }
+ appctx->cli_level &= ~ACCESS_LVL_MASK;
+ appctx->cli_level |= ACCESS_LVL_OPER;
+
+ } else if (strcmp(args[0], "user") == 0) {
+ if (!cli_has_level(appctx, ACCESS_LVL_USER)) {
+ return 1;
+ }
+ appctx->cli_level &= ~ACCESS_LVL_MASK;
+ appctx->cli_level |= ACCESS_LVL_USER;
+ }
+ appctx->cli_level &= ~(ACCESS_EXPERT|ACCESS_EXPERIMENTAL);
+ return 1;
+}
+
+
+/* parse and set the CLI expert/experimental-mode dynamically */
+static int cli_parse_expert_experimental_mode(char **args, char *payload, struct appctx *appctx, void *private)
+{
+ int level;
+ char *level_str;
+ char *output = NULL;
+
+ /* this will ask the applet to not output a \n after the command */
+ if (*args[1] && *args[2] && strcmp(args[2], "-") == 0)
+ appctx->st1 |= APPCTX_CLI_ST1_NOLF;
+
+ if (!cli_has_level(appctx, ACCESS_LVL_ADMIN))
+ return 1;
+
+ if (strcmp(args[0], "expert-mode") == 0) {
+ level = ACCESS_EXPERT;
+ level_str = "expert-mode";
+ }
+ else if (strcmp(args[0], "experimental-mode") == 0) {
+ level = ACCESS_EXPERIMENTAL;
+ level_str = "experimental-mode";
+ }
+ else if (strcmp(args[0], "mcli-debug-mode") == 0) {
+ level = ACCESS_MCLI_DEBUG;
+ level_str = "mcli-debug-mode";
+ }
+ else {
+ return 1;
+ }
+
+ if (!*args[1]) {
+ memprintf(&output, "%s is %s\n", level_str,
+ (appctx->cli_level & level) ? "ON" : "OFF");
+ return cli_dynmsg(appctx, LOG_INFO, output);
+ }
+
+ appctx->cli_level &= ~level;
+ if (strcmp(args[1], "on") == 0)
+ appctx->cli_level |= level;
+ return 1;
+}
+
+/* shows HAProxy version */
+static int cli_parse_show_version(char **args, char *payload, struct appctx *appctx, void *private)
+{
+ char *msg = NULL;
+
+ return cli_dynmsg(appctx, LOG_INFO, memprintf(&msg, "%s\n", haproxy_version));
+}
+
+int cli_parse_default(char **args, char *payload, struct appctx *appctx, void *private)
+{
+ return 0;
+}
+
+/* enable or disable the anonymized mode, it returns 1 when it works or displays an error message if it doesn't. */
+static int cli_parse_set_anon(char **args, char *payload, struct appctx *appctx, void *private)
+{
+ uint32_t tmp;
+ long long key;
+
+ if (strcmp(args[2], "on") == 0) {
+
+ if (*args[3]) {
+ key = atoll(args[3]);
+ if (key < 1 || key > UINT_MAX)
+ return cli_err(appctx, "Value out of range (1 to 4294967295 expected).\n");
+ appctx->cli_anon_key = key;
+ }
+ else {
+ tmp = HA_ATOMIC_LOAD(&global.anon_key);
+ if (tmp != 0)
+ appctx->cli_anon_key = tmp;
+ else
+ appctx->cli_anon_key = ha_random32();
+ }
+ }
+ else if (strcmp(args[2], "off") == 0) {
+
+ if (*args[3]) {
+ return cli_err(appctx, "Key can't be added while disabling anonymized mode\n");
+ }
+ else {
+ appctx->cli_anon_key = 0;
+ }
+ }
+ else {
+ return cli_err(appctx,
+ "'set anon' only supports :\n"
+ " - 'on' [key] to enable the anonymized mode\n"
+ " - 'off' to disable the anonymized mode");
+ }
+ return 1;
+}
+
+/* This function set the global anonyzing key, restricted to level 'admin' */
+static int cli_parse_set_global_key(char **args, char *payload, struct appctx *appctx, void *private)
+{
+ long long key;
+
+ if (!cli_has_level(appctx, ACCESS_LVL_ADMIN))
+ return cli_err(appctx, "Permission denied\n");
+ if (!*args[2])
+ return cli_err(appctx, "Expects an integer value.\n");
+
+ key = atoll(args[2]);
+ if (key < 0 || key > UINT_MAX)
+ return cli_err(appctx, "Value out of range (0 to 4294967295 expected).\n");
+
+ HA_ATOMIC_STORE(&global.anon_key, key);
+ return 1;
+}
+
+/* shows the anonymized mode state to everyone, and the key except for users, it always returns 1. */
+static int cli_parse_show_anon(char **args, char *payload, struct appctx *appctx, void *private)
+{
+ char *msg = NULL;
+ char *anon_mode = NULL;
+ uint32_t c_key = appctx->cli_anon_key;
+
+ if (!c_key)
+ anon_mode = "Anonymized mode disabled";
+ else
+ anon_mode = "Anonymized mode enabled";
+
+ if ( !((appctx->cli_level & ACCESS_LVL_MASK) < ACCESS_LVL_OPER) && c_key != 0) {
+ cli_dynmsg(appctx, LOG_INFO, memprintf(&msg, "%s\nKey : %u\n", anon_mode, c_key));
+ }
+ else {
+ cli_dynmsg(appctx, LOG_INFO, memprintf(&msg, "%s\n", anon_mode));
+ }
+
+ return 1;
+}
+
+/* parse a "set rate-limit" command. It always returns 1. */
+static int cli_parse_set_ratelimit(char **args, char *payload, struct appctx *appctx, void *private)
+{
+ int v;
+ int *res;
+ int mul = 1;
+
+ if (!cli_has_level(appctx, ACCESS_LVL_ADMIN))
+ return 1;
+
+ if (strcmp(args[2], "connections") == 0 && strcmp(args[3], "global") == 0)
+ res = &global.cps_lim;
+ else if (strcmp(args[2], "sessions") == 0 && strcmp(args[3], "global") == 0)
+ res = &global.sps_lim;
+#ifdef USE_OPENSSL
+ else if (strcmp(args[2], "ssl-sessions") == 0 && strcmp(args[3], "global") == 0)
+ res = &global.ssl_lim;
+#endif
+ else if (strcmp(args[2], "http-compression") == 0 && strcmp(args[3], "global") == 0) {
+ res = &global.comp_rate_lim;
+ mul = 1024;
+ }
+ else {
+ return cli_err(appctx,
+ "'set rate-limit' only supports :\n"
+ " - 'connections global' to set the per-process maximum connection rate\n"
+ " - 'sessions global' to set the per-process maximum session rate\n"
+#ifdef USE_OPENSSL
+ " - 'ssl-sessions global' to set the per-process maximum SSL session rate\n"
+#endif
+ " - 'http-compression global' to set the per-process maximum compression speed in kB/s\n");
+ }
+
+ if (!*args[4])
+ return cli_err(appctx, "Expects an integer value.\n");
+
+ v = atoi(args[4]);
+ if (v < 0)
+ return cli_err(appctx, "Value out of range.\n");
+
+ *res = v * mul;
+
+ /* Dequeues all of the listeners waiting for a resource */
+ dequeue_all_listeners();
+
+ return 1;
+}
+
+/* parse the "expose-fd" argument on the bind lines */
+static int bind_parse_expose_fd(char **args, int cur_arg, struct proxy *px, struct bind_conf *conf, char **err)
+{
+ if (!*args[cur_arg + 1]) {
+ memprintf(err, "'%s' : missing fd type", args[cur_arg]);
+ return ERR_ALERT | ERR_FATAL;
+ }
+ if (strcmp(args[cur_arg + 1], "listeners") == 0) {
+ conf->level |= ACCESS_FD_LISTENERS;
+ } else {
+ memprintf(err, "'%s' only supports 'listeners' (got '%s')",
+ args[cur_arg], args[cur_arg+1]);
+ return ERR_ALERT | ERR_FATAL;
+ }
+
+ return 0;
+}
+
+/* parse the "level" argument on the bind lines */
+static int bind_parse_level(char **args, int cur_arg, struct proxy *px, struct bind_conf *conf, char **err)
+{
+ if (!*args[cur_arg + 1]) {
+ memprintf(err, "'%s' : missing level", args[cur_arg]);
+ return ERR_ALERT | ERR_FATAL;
+ }
+
+ if (strcmp(args[cur_arg + 1], "user") == 0) {
+ conf->level &= ~ACCESS_LVL_MASK;
+ conf->level |= ACCESS_LVL_USER;
+ } else if (strcmp(args[cur_arg + 1], "operator") == 0) {
+ conf->level &= ~ACCESS_LVL_MASK;
+ conf->level |= ACCESS_LVL_OPER;
+ } else if (strcmp(args[cur_arg + 1], "admin") == 0) {
+ conf->level &= ~ACCESS_LVL_MASK;
+ conf->level |= ACCESS_LVL_ADMIN;
+ } else {
+ memprintf(err, "'%s' only supports 'user', 'operator', and 'admin' (got '%s')",
+ args[cur_arg], args[cur_arg+1]);
+ return ERR_ALERT | ERR_FATAL;
+ }
+
+ return 0;
+}
+
+static int bind_parse_severity_output(char **args, int cur_arg, struct proxy *px, struct bind_conf *conf, char **err)
+{
+ if (!*args[cur_arg + 1]) {
+ memprintf(err, "'%s' : missing severity format", args[cur_arg]);
+ return ERR_ALERT | ERR_FATAL;
+ }
+
+ if (set_severity_output(&conf->severity_output, args[cur_arg+1]))
+ return 0;
+ else {
+ memprintf(err, "'%s' only supports 'none', 'number', and 'string' (got '%s')",
+ args[cur_arg], args[cur_arg+1]);
+ return ERR_ALERT | ERR_FATAL;
+ }
+}
+
+/* Send all the bound sockets, always returns 1 */
+static int _getsocks(char **args, char *payload, struct appctx *appctx, void *private)
+{
+ static int already_sent = 0;
+ char *cmsgbuf = NULL;
+ unsigned char *tmpbuf = NULL;
+ struct cmsghdr *cmsg;
+ struct stconn *sc = appctx_sc(appctx);
+ struct stream *s = __sc_strm(sc);
+ struct connection *remote = sc_conn(sc_opposite(sc));
+ struct msghdr msghdr;
+ struct iovec iov;
+ struct timeval tv = { .tv_sec = 1, .tv_usec = 0 };
+ const char *ns_name, *if_name;
+ unsigned char ns_nlen, if_nlen;
+ int nb_queued;
+ int cur_fd = 0;
+ int *tmpfd;
+ int tot_fd_nb = 0;
+ int fd = -1;
+ int curoff = 0;
+ int old_fcntl = -1;
+ int ret;
+
+ if (!remote) {
+ ha_warning("Only works on real connections\n");
+ goto out;
+ }
+
+ fd = remote->handle.fd;
+
+ /* Temporary set the FD in blocking mode, that will make our life easier */
+ old_fcntl = fcntl(fd, F_GETFL);
+ if (old_fcntl < 0) {
+ ha_warning("Couldn't get the flags for the unix socket\n");
+ goto out;
+ }
+ cmsgbuf = malloc(CMSG_SPACE(sizeof(int) * MAX_SEND_FD));
+ if (!cmsgbuf) {
+ ha_warning("Failed to allocate memory to send sockets\n");
+ goto out;
+ }
+ if (fcntl(fd, F_SETFL, old_fcntl &~ O_NONBLOCK) == -1) {
+ ha_warning("Cannot make the unix socket blocking\n");
+ goto out;
+ }
+ setsockopt(fd, SOL_SOCKET, SO_RCVTIMEO, (void *)&tv, sizeof(tv));
+ iov.iov_base = &tot_fd_nb;
+ iov.iov_len = sizeof(tot_fd_nb);
+ if (!(strm_li(s)->bind_conf->level & ACCESS_FD_LISTENERS))
+ goto out;
+ memset(&msghdr, 0, sizeof(msghdr));
+ /*
+ * First, calculates the total number of FD, so that we can let
+ * the caller know how much it should expect.
+ */
+ for (cur_fd = 0;cur_fd < global.maxsock; cur_fd++)
+ tot_fd_nb += !!(fdtab[cur_fd].state & FD_EXPORTED);
+
+ if (tot_fd_nb == 0) {
+ if (already_sent)
+ ha_warning("_getsocks: attempt to get sockets but they were already sent and closed in this process!\n");
+ goto out;
+ }
+
+ /* First send the total number of file descriptors, so that the
+ * receiving end knows what to expect.
+ */
+ msghdr.msg_iov = &iov;
+ msghdr.msg_iovlen = 1;
+ ret = sendmsg(fd, &msghdr, 0);
+ if (ret != sizeof(tot_fd_nb)) {
+ ha_warning("Failed to send the number of sockets to send\n");
+ goto out;
+ }
+
+ /* Now send the fds */
+ msghdr.msg_control = cmsgbuf;
+ msghdr.msg_controllen = CMSG_SPACE(sizeof(int) * MAX_SEND_FD);
+ cmsg = CMSG_FIRSTHDR(&msghdr);
+ cmsg->cmsg_len = CMSG_LEN(MAX_SEND_FD * sizeof(int));
+ cmsg->cmsg_level = SOL_SOCKET;
+ cmsg->cmsg_type = SCM_RIGHTS;
+ tmpfd = (int *)CMSG_DATA(cmsg);
+
+ /* For each socket, e message is sent, containing the following :
+ * Size of the namespace name (or 0 if none), as an unsigned char.
+ * The namespace name, if any
+ * Size of the interface name (or 0 if none), as an unsigned char
+ * The interface name, if any
+ * 32 bits of zeroes (used to be listener options).
+ */
+ /* We will send sockets MAX_SEND_FD per MAX_SEND_FD, allocate a
+ * buffer big enough to store the socket information.
+ */
+ tmpbuf = malloc(MAX_SEND_FD * (1 + MAXPATHLEN + 1 + IFNAMSIZ + sizeof(int)));
+ if (tmpbuf == NULL) {
+ ha_warning("Failed to allocate memory to transfer socket information\n");
+ goto out;
+ }
+
+ nb_queued = 0;
+ iov.iov_base = tmpbuf;
+ for (cur_fd = 0; cur_fd < global.maxsock; cur_fd++) {
+ if (!(fdtab[cur_fd].state & FD_EXPORTED))
+ continue;
+
+ ns_name = if_name = "";
+ ns_nlen = if_nlen = 0;
+
+ /* for now we can only retrieve namespaces and interfaces from
+ * pure listeners.
+ */
+ if (fdtab[cur_fd].iocb == sock_accept_iocb) {
+ const struct listener *l = fdtab[cur_fd].owner;
+
+ if (l->rx.settings->interface) {
+ if_name = l->rx.settings->interface;
+ if_nlen = strlen(if_name);
+ }
+
+#ifdef USE_NS
+ if (l->rx.settings->netns) {
+ ns_name = l->rx.settings->netns->node.key;
+ ns_nlen = l->rx.settings->netns->name_len;
+ }
+#endif
+ }
+
+ /* put the FD into the CMSG_DATA */
+ tmpfd[nb_queued++] = cur_fd;
+
+ /* first block is <ns_name_len> <ns_name> */
+ tmpbuf[curoff++] = ns_nlen;
+ if (ns_nlen)
+ memcpy(tmpbuf + curoff, ns_name, ns_nlen);
+ curoff += ns_nlen;
+
+ /* second block is <if_name_len> <if_name> */
+ tmpbuf[curoff++] = if_nlen;
+ if (if_nlen)
+ memcpy(tmpbuf + curoff, if_name, if_nlen);
+ curoff += if_nlen;
+
+ /* we used to send the listener options here before 2.3 */
+ memset(tmpbuf + curoff, 0, sizeof(int));
+ curoff += sizeof(int);
+
+ /* there's a limit to how many FDs may be sent at once */
+ if (nb_queued == MAX_SEND_FD) {
+ iov.iov_len = curoff;
+ if (sendmsg(fd, &msghdr, 0) != curoff) {
+ ha_warning("Failed to transfer sockets\n");
+ return -1;
+ }
+
+ /* Wait for an ack */
+ do {
+ ret = recv(fd, &tot_fd_nb, sizeof(tot_fd_nb), 0);
+ } while (ret == -1 && errno == EINTR);
+
+ if (ret <= 0) {
+ ha_warning("Unexpected error while transferring sockets\n");
+ return -1;
+ }
+ curoff = 0;
+ nb_queued = 0;
+ }
+ }
+
+ already_sent = 1;
+
+ /* flush pending stuff */
+ if (nb_queued) {
+ iov.iov_len = curoff;
+ cmsg->cmsg_len = CMSG_LEN(nb_queued * sizeof(int));
+ msghdr.msg_controllen = CMSG_SPACE(nb_queued * sizeof(int));
+ if (sendmsg(fd, &msghdr, 0) != curoff) {
+ ha_warning("Failed to transfer sockets\n");
+ goto out;
+ }
+ }
+
+out:
+ if (fd >= 0 && old_fcntl >= 0 && fcntl(fd, F_SETFL, old_fcntl) == -1) {
+ ha_warning("Cannot make the unix socket non-blocking\n");
+ goto out;
+ }
+ se_fl_set(appctx->sedesc, SE_FL_EOI);
+ appctx->st0 = CLI_ST_END;
+ free(cmsgbuf);
+ free(tmpbuf);
+ return 1;
+}
+
+static int cli_parse_simple(char **args, char *payload, struct appctx *appctx, void *private)
+{
+ if (*args[0] == 'h')
+ /* help */
+ cli_gen_usage_msg(appctx, args);
+ else if (*args[0] == 'p')
+ /* prompt */
+ if (strcmp(args[1], "timed") == 0) {
+ appctx->st1 |= APPCTX_CLI_ST1_PROMPT;
+ appctx->st1 ^= APPCTX_CLI_ST1_TIMED;
+ }
+ else
+ appctx->st1 ^= APPCTX_CLI_ST1_PROMPT;
+ else if (*args[0] == 'q') {
+ /* quit */
+ se_fl_set(appctx->sedesc, SE_FL_EOI);
+ appctx->st0 = CLI_ST_END;
+ }
+
+ return 1;
+}
+
+void pcli_write_prompt(struct stream *s)
+{
+ struct buffer *msg = get_trash_chunk();
+ struct channel *oc = sc_oc(s->scf);
+
+ if (!(s->pcli_flags & PCLI_F_PROMPT))
+ return;
+
+ if (s->pcli_flags & PCLI_F_PAYLOAD) {
+ chunk_appendf(msg, "+ ");
+ } else {
+ if (s->pcli_next_pid == 0) {
+ /* master's prompt */
+ if (s->pcli_flags & PCLI_F_TIMED) {
+ uint up = ns_to_sec(now_ns - start_time_ns);
+ chunk_appendf(msg, "[%u:%02u:%02u:%02u] ",
+ (up / 86400), (up / 3600) % 24, (up / 60) % 60, up % 60);
+ }
+
+ chunk_appendf(msg, "master%s",
+ (proc_self->failedreloads > 0) ? "[ReloadFailed]" : "");
+ }
+ else {
+ /* worker's prompt */
+ if (s->pcli_flags & PCLI_F_TIMED) {
+ const struct mworker_proc *tmp, *proc;
+ uint up;
+
+ /* set proc to the worker corresponding to pcli_next_pid or NULL */
+ proc = NULL;
+ list_for_each_entry(tmp, &proc_list, list) {
+ if (!(tmp->options & PROC_O_TYPE_WORKER))
+ continue;
+ if (tmp->pid == s->pcli_next_pid) {
+ proc = tmp;
+ break;
+ }
+ }
+
+ if (!proc)
+ chunk_appendf(msg, "[gone] ");
+ else {
+ up = date.tv_sec - proc->timestamp;
+ if ((int)up < 0) /* must never be negative because of clock drift */
+ up = 0;
+ chunk_appendf(msg, "[%u:%02u:%02u:%02u] ",
+ (up / 86400), (up / 3600) % 24, (up / 60) % 60, up % 60);
+ }
+ }
+ chunk_appendf(msg, "%d", s->pcli_next_pid);
+ }
+
+ if (s->pcli_flags & (ACCESS_EXPERIMENTAL|ACCESS_EXPERT|ACCESS_MCLI_DEBUG)) {
+ chunk_appendf(msg, "(");
+
+ if (s->pcli_flags & ACCESS_EXPERIMENTAL)
+ chunk_appendf(msg, "x");
+
+ if (s->pcli_flags & ACCESS_EXPERT)
+ chunk_appendf(msg, "e");
+
+ if (s->pcli_flags & ACCESS_MCLI_DEBUG)
+ chunk_appendf(msg, "d");
+
+ chunk_appendf(msg, ")");
+ }
+
+ chunk_appendf(msg, "> ");
+
+
+ }
+ co_inject(oc, msg->area, msg->data);
+}
+
+/* The pcli_* functions are used for the CLI proxy in the master */
+
+
+/* flush the input buffer and output an error */
+void pcli_error(struct stream *s, const char *msg)
+{
+ struct buffer *buf = get_trash_chunk();
+ struct channel *oc = &s->res;
+ struct channel *ic = &s->req;
+
+ chunk_initstr(buf, msg);
+
+ if (likely(buf && buf->data))
+ co_inject(oc, buf->area, buf->data);
+
+ channel_erase(ic);
+
+}
+
+/* flush the input buffer, output the error and close */
+void pcli_reply_and_close(struct stream *s, const char *msg)
+{
+ struct buffer *buf = get_trash_chunk();
+
+ chunk_initstr(buf, msg);
+ stream_retnclose(s, buf);
+}
+
+static enum obj_type *pcli_pid_to_server(int proc_pid)
+{
+ struct mworker_proc *child;
+
+ /* return the mCLI applet of the master */
+ if (proc_pid == 0)
+ return &mcli_applet.obj_type;
+
+ list_for_each_entry(child, &proc_list, list) {
+ if (child->pid == proc_pid){
+ return &child->srv->obj_type;
+ }
+ }
+ return NULL;
+}
+
+/* Take a CLI prefix in argument (eg: @!1234 @master @1)
+ * Return:
+ * 0: master
+ * > 0: pid of a worker
+ * < 0: didn't find a worker
+ */
+static int pcli_prefix_to_pid(const char *prefix)
+{
+ int proc_pid;
+ struct mworker_proc *child;
+ char *errtol = NULL;
+
+ if (*prefix != '@') /* not a prefix, should not happen */
+ return -1;
+
+ prefix++;
+ if (!*prefix) /* sent @ alone, return the master */
+ return 0;
+
+ if (strcmp("master", prefix) == 0) {
+ return 0;
+ } else if (*prefix == '!') {
+ prefix++;
+ if (!*prefix)
+ return -1;
+
+ proc_pid = strtol(prefix, &errtol, 10);
+ if (*errtol != '\0')
+ return -1;
+ list_for_each_entry(child, &proc_list, list) {
+ if (!(child->options & PROC_O_TYPE_WORKER))
+ continue;
+ if (child->pid == proc_pid){
+ return child->pid;
+ }
+ }
+ } else {
+ struct mworker_proc *chosen = NULL;
+ /* this is a relative pid */
+
+ proc_pid = strtol(prefix, &errtol, 10);
+ if (*errtol != '\0')
+ return -1;
+
+ if (proc_pid == 0) /* return the master */
+ return 0;
+
+ if (proc_pid != 1) /* only the "@1" relative PID is supported */
+ return -1;
+
+ /* chose the right process, the current one is the one with the
+ least number of reloads */
+ list_for_each_entry(child, &proc_list, list) {
+ if (!(child->options & PROC_O_TYPE_WORKER))
+ continue;
+ if (child->reloads == 0)
+ return child->pid;
+ else if (chosen == NULL || child->reloads < chosen->reloads)
+ chosen = child;
+ }
+ if (chosen)
+ return chosen->pid;
+ }
+ return -1;
+}
+
+/* Return::
+ * >= 0 : number of words to escape
+ * = -1 : error
+ */
+int pcli_find_and_exec_kw(struct stream *s, char **args, int argl, char **errmsg, int *next_pid)
+{
+ if (argl < 1)
+ return 0;
+
+ /* there is a prefix */
+ if (args[0][0] == '@') {
+ int target_pid = pcli_prefix_to_pid(args[0]);
+
+ if (target_pid == -1) {
+ memprintf(errmsg, "Can't find the target PID matching the prefix '%s'\n", args[0]);
+ return -1;
+ }
+
+ /* if the prefix is alone, define a default target */
+ if (argl == 1)
+ s->pcli_next_pid = target_pid;
+ else
+ *next_pid = target_pid;
+ return 1;
+ } else if (strcmp("prompt", args[0]) == 0) {
+ if (argl >= 2 && strcmp(args[1], "timed") == 0) {
+ s->pcli_flags |= PCLI_F_PROMPT;
+ s->pcli_flags ^= PCLI_F_TIMED;
+ }
+ else
+ s->pcli_flags ^= PCLI_F_PROMPT;
+ return argl; /* return the number of elements in the array */
+ } else if (strcmp("quit", args[0]) == 0) {
+ sc_schedule_abort(s->scf);
+ sc_schedule_shutdown(s->scf);
+ return argl; /* return the number of elements in the array */
+ } else if (strcmp(args[0], "operator") == 0) {
+ if (!pcli_has_level(s, ACCESS_LVL_OPER)) {
+ memprintf(errmsg, "Permission denied!\n");
+ return -1;
+ }
+ s->pcli_flags &= ~ACCESS_LVL_MASK;
+ s->pcli_flags |= ACCESS_LVL_OPER;
+ return argl;
+
+ } else if (strcmp(args[0], "user") == 0) {
+ if (!pcli_has_level(s, ACCESS_LVL_USER)) {
+ memprintf(errmsg, "Permission denied!\n");
+ return -1;
+ }
+ s->pcli_flags &= ~ACCESS_LVL_MASK;
+ s->pcli_flags |= ACCESS_LVL_USER;
+ return argl;
+
+ } else if (strcmp(args[0], "expert-mode") == 0) {
+ if (!pcli_has_level(s, ACCESS_LVL_ADMIN)) {
+ memprintf(errmsg, "Permission denied!\n");
+ return -1;
+ }
+
+ s->pcli_flags &= ~ACCESS_EXPERT;
+ if ((argl > 1) && (strcmp(args[1], "on") == 0))
+ s->pcli_flags |= ACCESS_EXPERT;
+ return argl;
+
+ } else if (strcmp(args[0], "experimental-mode") == 0) {
+ if (!pcli_has_level(s, ACCESS_LVL_ADMIN)) {
+ memprintf(errmsg, "Permission denied!\n");
+ return -1;
+ }
+ s->pcli_flags &= ~ACCESS_EXPERIMENTAL;
+ if ((argl > 1) && (strcmp(args[1], "on") == 0))
+ s->pcli_flags |= ACCESS_EXPERIMENTAL;
+ return argl;
+ } else if (strcmp(args[0], "mcli-debug-mode") == 0) {
+ if (!pcli_has_level(s, ACCESS_LVL_ADMIN)) {
+ memprintf(errmsg, "Permission denied!\n");
+ return -1;
+ }
+ s->pcli_flags &= ~ACCESS_MCLI_DEBUG;
+ if ((argl > 1) && (strcmp(args[1], "on") == 0))
+ s->pcli_flags |= ACCESS_MCLI_DEBUG;
+ return argl;
+ } else if (strcmp(args[0], "set") == 0) {
+ if ((argl > 1) && (strcmp(args[1], "severity-output") == 0)) {
+ if ((argl > 2) &&strcmp(args[2], "none") == 0) {
+ s->pcli_flags &= ~(ACCESS_MCLI_SEVERITY_NB|ACCESS_MCLI_SEVERITY_STR);
+ } else if ((argl > 2) && strcmp(args[2], "string") == 0) {
+ s->pcli_flags |= ACCESS_MCLI_SEVERITY_STR;
+ } else if ((argl > 2) && strcmp(args[2], "number") == 0) {
+ s->pcli_flags |= ACCESS_MCLI_SEVERITY_NB;
+ } else {
+ memprintf(errmsg, "one of 'none', 'number', 'string' is a required argument\n");
+ return -1;
+ }
+ /* only skip argl if we have "set severity-output" not only "set" */
+ return argl;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * Parse the CLI request:
+ * - It does basically the same as the cli_io_handler, but as a proxy
+ * - It can exec a command and strip non forwardable commands
+ *
+ * Return:
+ * - the number of characters to forward or
+ * - 1 if there is an error or not enough data
+ */
+int pcli_parse_request(struct stream *s, struct channel *req, char **errmsg, int *next_pid)
+{
+ char *str;
+ char *end;
+ char *args[MAX_CLI_ARGS + 1]; /* +1 for storing a NULL */
+ int argl; /* number of args */
+ char *p;
+ char *trim = NULL;
+ int wtrim = 0; /* number of words to trim */
+ int reql = 0;
+ int ret;
+ int i = 0;
+
+ /* we cannot deal with a wrapping buffer, so let's take care of this
+ * first.
+ */
+ if (b_head(&req->buf) + b_data(&req->buf) > b_wrap(&req->buf))
+ b_slow_realign(&req->buf, trash.area, co_data(req));
+
+ str = (char *)ci_head(req);
+ end = (char *)ci_stop(req);
+
+ p = str;
+
+ if (!(s->pcli_flags & PCLI_F_PAYLOAD)) {
+
+ /* Looks for the end of one command */
+ while (p+reql < end) {
+ /* handle escaping */
+ if (p[reql] == '\\') {
+ reql+=2;
+ continue;
+ }
+ if (p[reql] == ';' || p[reql] == '\n') {
+ /* found the end of the command */
+ p[reql] = '\n';
+ reql++;
+ break;
+ }
+ reql++;
+ }
+ } else {
+ while (p+reql < end) {
+ if (p[reql] == '\n') {
+ /* found the end of the line */
+ reql++;
+ break;
+ }
+ reql++;
+ }
+ }
+
+ /* set end to first byte after the end of the command */
+ end = p + reql;
+
+ /* there is no end to this command, need more to parse ! */
+ if (!reql || *(end-1) != '\n') {
+ ret = -1;
+ goto end;
+ }
+
+ /* in payload mode, skip the whole parsing/exec and just look for a pattern */
+ if (s->pcli_flags & PCLI_F_PAYLOAD) {
+ if (reql-1 == strlen(s->pcli_payload_pat)) {
+ /* the custom pattern len can be 0 (empty line) */
+ if (strncmp(str, s->pcli_payload_pat, strlen(s->pcli_payload_pat)) == 0) {
+ s->pcli_flags &= ~PCLI_F_PAYLOAD;
+ }
+ }
+ ret = reql;
+ goto end;
+ }
+
+ *(end-1) = '\0';
+
+ /* splits the command in words */
+ while (i < MAX_CLI_ARGS && p < end) {
+ /* skip leading spaces/tabs */
+ p += strspn(p, " \t");
+ if (!*p)
+ break;
+
+ args[i] = p;
+ while (1) {
+ p += strcspn(p, " \t\\");
+ /* escaped chars using backlashes (\) */
+ if (*p == '\\') {
+ if (!*++p)
+ break;
+ if (!*++p)
+ break;
+ } else {
+ break;
+ }
+ }
+ *p++ = 0;
+ i++;
+ }
+ argl = i;
+
+ /* first look for '<<' at the beginning of the last argument */
+ if (argl && strncmp(args[argl-1], PAYLOAD_PATTERN, strlen(PAYLOAD_PATTERN)) == 0) {
+ size_t pat_len = strlen(args[argl-1] + strlen(PAYLOAD_PATTERN));
+
+ /*
+ * A customized pattern can't be more than 7 characters
+ * if it's more, don't make it a payload
+ */
+ if (pat_len < sizeof(s->pcli_payload_pat)) {
+ s->pcli_flags |= PCLI_F_PAYLOAD;
+ /* copy the customized pattern, don't store the << */
+ strncpy(s->pcli_payload_pat, args[argl-1] + strlen(PAYLOAD_PATTERN), sizeof(s->pcli_payload_pat)-1);
+ s->pcli_payload_pat[sizeof(s->pcli_payload_pat)-1] = '\0';
+ }
+ }
+
+ for (; i < MAX_CLI_ARGS + 1; i++)
+ args[i] = NULL;
+
+ wtrim = pcli_find_and_exec_kw(s, args, argl, errmsg, next_pid);
+
+ /* End of words are ending by \0, we need to replace the \0s by spaces
+ before forwarding them */
+ p = str;
+ while (p < end-1) {
+ if (*p == '\0')
+ *p = ' ';
+ p++;
+ }
+
+ *(end-1) = '\n';
+
+ if (wtrim > 0) {
+ trim = &args[wtrim][0];
+ if (trim == NULL) /* if this was the last word in the table */
+ trim = end;
+
+ b_del(&req->buf, trim - str);
+
+ ret = end - trim;
+ } else if (wtrim < 0) {
+ /* parsing error */
+ ret = -1;
+ goto end;
+ } else {
+ /* the whole string */
+ ret = end - str;
+ }
+
+ if (ret > 1) {
+
+ /* the mcli-debug-mode is only sent to the applet of the master */
+ if ((s->pcli_flags & ACCESS_MCLI_DEBUG) && *next_pid <= 0) {
+ ci_insert_line2(req, 0, "mcli-debug-mode on -", strlen("mcli-debug-mode on -"));
+ ret += strlen("mcli-debug-mode on -") + 2;
+ }
+ if (s->pcli_flags & ACCESS_EXPERIMENTAL) {
+ ci_insert_line2(req, 0, "experimental-mode on -", strlen("experimental-mode on -"));
+ ret += strlen("experimental-mode on -") + 2;
+ }
+ if (s->pcli_flags & ACCESS_EXPERT) {
+ ci_insert_line2(req, 0, "expert-mode on -", strlen("expert-mode on -"));
+ ret += strlen("expert-mode on -") + 2;
+ }
+ if (s->pcli_flags & ACCESS_MCLI_SEVERITY_STR) {
+ const char *cmd = "set severity-output string -";
+ ci_insert_line2(req, 0, cmd, strlen(cmd));
+ ret += strlen(cmd) + 2;
+ }
+ if (s->pcli_flags & ACCESS_MCLI_SEVERITY_NB) {
+ const char *cmd = "set severity-output number -";
+ ci_insert_line2(req, 0, cmd, strlen(cmd));
+ ret += strlen(cmd) + 2;
+ }
+
+ if (pcli_has_level(s, ACCESS_LVL_ADMIN)) {
+ goto end;
+ } else if (pcli_has_level(s, ACCESS_LVL_OPER)) {
+ ci_insert_line2(req, 0, "operator -", strlen("operator -"));
+ ret += strlen("operator -") + 2;
+ } else if (pcli_has_level(s, ACCESS_LVL_USER)) {
+ ci_insert_line2(req, 0, "user -", strlen("user -"));
+ ret += strlen("user -") + 2;
+ }
+ }
+end:
+
+ return ret;
+}
+
+int pcli_wait_for_request(struct stream *s, struct channel *req, int an_bit)
+{
+ int next_pid = -1;
+ int to_forward;
+ char *errmsg = NULL;
+
+ /* Don't read the next command if still processing the response of the
+ * current one. Just wait. At this stage, errors should be handled by
+ * the response analyzer.
+ */
+ if (s->res.analysers & AN_RES_WAIT_CLI)
+ return 0;
+
+ if ((s->pcli_flags & ACCESS_LVL_MASK) == ACCESS_LVL_NONE)
+ s->pcli_flags |= strm_li(s)->bind_conf->level & ACCESS_LVL_MASK;
+
+ /* stream that comes from the reload listener only responses the reload
+ * status and quits */
+ if (!(s->pcli_flags & PCLI_F_RELOAD)
+ && strm_li(s)->bind_conf == mcli_reload_bind_conf)
+ goto send_status;
+
+
+read_again:
+ /* if the channel is closed for read, we won't receive any more data
+ from the client, but we don't want to forward this close to the
+ server */
+ channel_dont_close(req);
+
+ /* We don't know yet to which server we will connect */
+ channel_dont_connect(req);
+
+ s->scf->flags |= SC_FL_RCV_ONCE;
+
+ /* need more data */
+ if (!ci_data(req))
+ goto missing_data;
+
+ /* If there is data available for analysis, log the end of the idle time. */
+ if (c_data(req) && s->logs.t_idle == -1)
+ s->logs.t_idle = ns_to_ms(now_ns - s->logs.accept_ts) - s->logs.t_handshake;
+
+ to_forward = pcli_parse_request(s, req, &errmsg, &next_pid);
+ if (to_forward > 0) {
+ int target_pid;
+ /* enough data */
+
+ /* forward only 1 command */
+ channel_forward(req, to_forward);
+
+ if (!(s->pcli_flags & PCLI_F_PAYLOAD)) {
+ /* we send only 1 command per request, and we write close after it */
+ sc_schedule_shutdown(s->scb);
+ } else {
+ pcli_write_prompt(s);
+ }
+
+ s->res.flags |= CF_WAKE_ONCE; /* need to be called again */
+ s->res.analysers |= AN_RES_WAIT_CLI;
+
+ if (!(s->flags & SF_ASSIGNED)) {
+ if (next_pid > -1)
+ target_pid = next_pid;
+ else
+ target_pid = s->pcli_next_pid;
+ /* we can connect now */
+ s->target = pcli_pid_to_server(target_pid);
+
+ if (!s->target)
+ goto server_disconnect;
+
+ s->flags |= (SF_DIRECT | SF_ASSIGNED);
+ channel_auto_connect(req);
+ }
+
+ } else if (to_forward == 0) {
+ /* we trimmed things but we might have other commands to consume */
+ pcli_write_prompt(s);
+ goto read_again;
+ } else if (to_forward == -1) {
+ if (!errmsg) /* no error means missing data */
+ goto missing_data;
+
+ /* there was an error during the parsing */
+ pcli_error(s, errmsg);
+ pcli_write_prompt(s);
+ }
+
+ return 0;
+
+send_help:
+ b_reset(&req->buf);
+ b_putblk(&req->buf, "help\n", 5);
+ goto read_again;
+
+send_status:
+ s->pcli_flags |= PCLI_F_RELOAD;
+ /* don't use ci_putblk here because SHUT_DONE could have been sent */
+ b_reset(&req->buf);
+ b_putblk(&req->buf, "_loadstatus;quit\n", 17);
+ goto read_again;
+
+missing_data:
+ if (s->scf->flags & (SC_FL_ABRT_DONE|SC_FL_EOS)) {
+ /* There is no more request or a only a partial one and we
+ * receive a close from the client, we can leave */
+ sc_schedule_shutdown(s->scf);
+ s->req.analysers &= ~AN_REQ_WAIT_CLI;
+ return 1;
+ }
+ else if (channel_full(req, global.tune.maxrewrite)) {
+ /* buffer is full and we didn't catch the end of a command */
+ goto send_help;
+ }
+ return 0;
+
+server_disconnect:
+ pcli_reply_and_close(s, "Can't connect to the target CLI!\n");
+ return 0;
+}
+
+int pcli_wait_for_response(struct stream *s, struct channel *rep, int an_bit)
+{
+ struct proxy *fe = strm_fe(s);
+ struct proxy *be = s->be;
+
+ if ((s->scb->flags & SC_FL_ERROR) || (rep->flags & (CF_READ_TIMEOUT|CF_WRITE_TIMEOUT)) ||
+ ((s->scf->flags & SC_FL_SHUT_DONE) && (rep->to_forward || co_data(rep)))) {
+ pcli_reply_and_close(s, "Can't connect to the target CLI!\n");
+ s->req.analysers &= ~AN_REQ_WAIT_CLI;
+ s->res.analysers &= ~AN_RES_WAIT_CLI;
+ return 0;
+ }
+ s->scb->flags |= SC_FL_RCV_ONCE; /* try to get back here ASAP */
+ s->scf->flags |= SC_FL_SND_NEVERWAIT;
+
+ /* don't forward the close */
+ channel_dont_close(&s->res);
+ channel_dont_close(&s->req);
+
+ if (s->pcli_flags & PCLI_F_PAYLOAD) {
+ s->res.analysers &= ~AN_RES_WAIT_CLI;
+ s->req.flags |= CF_WAKE_ONCE; /* need to be called again if there is some command left in the request */
+ return 0;
+ }
+
+ /* forward the data */
+ if (ci_data(rep)) {
+ c_adv(rep, ci_data(rep));
+ return 0;
+ }
+
+ if (s->scb->flags & (SC_FL_ABRT_DONE|SC_FL_EOS)) {
+ /* stream cleanup */
+
+ pcli_write_prompt(s);
+
+ s->scb->flags |= SC_FL_NOLINGER | SC_FL_NOHALF;
+ sc_abort(s->scb);
+ sc_shutdown(s->scb);
+
+ /*
+ * starting from there this the same code as
+ * http_end_txn_clean_session().
+ *
+ * It allows to do frontend keepalive while reconnecting to a
+ * new server for each request.
+ */
+
+ if (s->flags & SF_BE_ASSIGNED) {
+ HA_ATOMIC_DEC(&be->beconn);
+ if (unlikely(s->srv_conn))
+ sess_change_server(s, NULL);
+ }
+
+ s->logs.t_close = ns_to_ms(now_ns - s->logs.accept_ts);
+ stream_process_counters(s);
+
+ /* don't count other requests' data */
+ s->logs.bytes_in -= ci_data(&s->req);
+ s->logs.bytes_out -= ci_data(&s->res);
+
+ /* we may need to know the position in the queue */
+ pendconn_free(s);
+
+ /* let's do a final log if we need it */
+ if (!LIST_ISEMPTY(&fe->logformat) && s->logs.logwait &&
+ !(s->flags & SF_MONITOR) &&
+ (!(fe->options & PR_O_NULLNOLOG) || s->req.total)) {
+ s->do_log(s);
+ }
+
+ /* stop tracking content-based counters */
+ stream_stop_content_counters(s);
+ stream_update_time_stats(s);
+
+ s->logs.accept_date = date; /* user-visible date for logging */
+ s->logs.accept_ts = now_ns; /* corrected date for internal use */
+ s->logs.t_handshake = 0; /* There are no handshake in keep alive connection. */
+ s->logs.t_idle = -1;
+ s->logs.request_ts = 0;
+ s->logs.t_queue = -1;
+ s->logs.t_connect = -1;
+ s->logs.t_data = -1;
+ s->logs.t_close = 0;
+ s->logs.prx_queue_pos = 0; /* we get the number of pending conns before us */
+ s->logs.srv_queue_pos = 0; /* we will get this number soon */
+
+ s->logs.bytes_in = s->req.total = ci_data(&s->req);
+ s->logs.bytes_out = s->res.total = ci_data(&s->res);
+
+ stream_del_srv_conn(s);
+ if (objt_server(s->target)) {
+ if (s->flags & SF_CURR_SESS) {
+ s->flags &= ~SF_CURR_SESS;
+ HA_ATOMIC_DEC(&__objt_server(s->target)->cur_sess);
+ }
+ if (may_dequeue_tasks(__objt_server(s->target), be))
+ process_srv_queue(__objt_server(s->target));
+ }
+
+ s->target = NULL;
+
+ /* only release our endpoint if we don't intend to reuse the
+ * connection.
+ */
+ if (!sc_conn_ready(s->scb)) {
+ s->srv_conn = NULL;
+ if (sc_reset_endp(s->scb) < 0) {
+ if (!s->conn_err_type)
+ s->conn_err_type = STRM_ET_CONN_OTHER;
+ if (s->srv_error)
+ s->srv_error(s, s->scb);
+ return 1;
+ }
+ se_fl_clr(s->scb->sedesc, ~SE_FL_DETACHED);
+ }
+
+ sockaddr_free(&s->scb->dst);
+
+ sc_set_state(s->scb, SC_ST_INI);
+ s->scb->flags &= ~(SC_FL_ERROR|SC_FL_SHUT_DONE|SC_FL_SHUT_WANTED);
+ s->scb->flags &= SC_FL_ISBACK | SC_FL_DONT_WAKE; /* we're in the context of process_stream */
+
+ s->req.flags &= ~(CF_AUTO_CONNECT|CF_STREAMER|CF_STREAMER_FAST|CF_WROTE_DATA);
+ s->res.flags &= ~(CF_STREAMER|CF_STREAMER_FAST|CF_WRITE_EVENT|CF_WROTE_DATA|CF_READ_EVENT);
+ s->flags &= ~(SF_DIRECT|SF_ASSIGNED|SF_BE_ASSIGNED|SF_FORCE_PRST|SF_IGNORE_PRST);
+ s->flags &= ~(SF_CURR_SESS|SF_REDIRECTABLE|SF_SRV_REUSED);
+ s->flags &= ~(SF_ERR_MASK|SF_FINST_MASK|SF_REDISP);
+ s->conn_retries = 0; /* used for logging too */
+ s->conn_exp = TICK_ETERNITY;
+ s->conn_err_type = STRM_ET_NONE;
+ /* reinitialise the current rule list pointer to NULL. We are sure that
+ * any rulelist match the NULL pointer.
+ */
+ s->current_rule_list = NULL;
+
+ s->be = strm_fe(s);
+ s->logs.logwait = strm_fe(s)->to_log;
+ s->logs.level = 0;
+ stream_del_srv_conn(s);
+ s->target = NULL;
+ /* re-init store persistence */
+ s->store_count = 0;
+ s->uniq_id = global.req_count++;
+
+ s->scf->flags &= ~(SC_FL_EOS|SC_FL_ERROR|SC_FL_ABRT_DONE|SC_FL_ABRT_WANTED);
+ s->scf->flags &= ~SC_FL_SND_NEVERWAIT;
+ s->scf->flags |= SC_FL_RCV_ONCE; /* one read is usually enough */
+
+ s->req.flags |= CF_WAKE_ONCE; /* need to be called again if there is some command left in the request */
+
+ s->res.analysers &= ~AN_RES_WAIT_CLI;
+
+ /* We must trim any excess data from the response buffer, because we
+ * may have blocked an invalid response from a server that we don't
+ * want to accidentally forward once we disable the analysers, nor do
+ * we want those data to come along with next response. A typical
+ * example of such data would be from a buggy server responding to
+ * a HEAD with some data, or sending more than the advertised
+ * content-length.
+ */
+ if (unlikely(ci_data(&s->res)))
+ b_set_data(&s->res.buf, co_data(&s->res));
+
+ /* Now we can realign the response buffer */
+ c_realign_if_empty(&s->res);
+
+ s->scf->ioto = strm_fe(s)->timeout.client;
+ s->scb->ioto = TICK_ETERNITY;
+
+ s->req.analyse_exp = TICK_ETERNITY;
+ s->res.analyse_exp = TICK_ETERNITY;
+
+ /* we're removing the analysers, we MUST re-enable events detection.
+ * We don't enable close on the response channel since it's either
+ * already closed, or in keep-alive with an idle connection handler.
+ */
+ channel_auto_read(&s->req);
+ channel_auto_close(&s->req);
+ channel_auto_read(&s->res);
+
+
+ return 1;
+ }
+ return 0;
+}
+
+/*
+ * The mworker functions are used to initialize the CLI in the master process
+ */
+
+ /*
+ * Stop the mworker proxy
+ */
+void mworker_cli_proxy_stop()
+{
+ if (mworker_proxy)
+ stop_proxy(mworker_proxy);
+}
+
+/*
+ * Create the mworker CLI proxy
+ */
+int mworker_cli_proxy_create()
+{
+ struct mworker_proc *child;
+ char *msg = NULL;
+ char *errmsg = NULL;
+
+ mworker_proxy = alloc_new_proxy("MASTER", PR_CAP_LISTEN|PR_CAP_INT, &errmsg);
+ if (!mworker_proxy)
+ goto error_proxy;
+
+ mworker_proxy->mode = PR_MODE_CLI;
+ mworker_proxy->maxconn = 10; /* default to 10 concurrent connections */
+ mworker_proxy->timeout.client = 0; /* no timeout */
+ mworker_proxy->conf.file = strdup("MASTER");
+ mworker_proxy->conf.line = 0;
+ mworker_proxy->accept = frontend_accept;
+ mworker_proxy-> lbprm.algo = BE_LB_ALGO_NONE;
+
+ /* Does not init the default target the CLI applet, but must be done in
+ * the request parsing code */
+ mworker_proxy->default_target = NULL;
+
+ /* create all servers using the mworker_proc list */
+ list_for_each_entry(child, &proc_list, list) {
+ struct server *newsrv = NULL;
+ struct sockaddr_storage *sk;
+ int port1, port2, port;
+ struct protocol *proto;
+
+ /* only the workers support the master CLI */
+ if (!(child->options & PROC_O_TYPE_WORKER))
+ continue;
+
+ newsrv = new_server(mworker_proxy);
+ if (!newsrv)
+ goto error;
+
+ /* we don't know the new pid yet */
+ if (child->pid == -1)
+ memprintf(&msg, "cur-%d", 1);
+ else
+ memprintf(&msg, "old-%d", child->pid);
+
+ newsrv->next = mworker_proxy->srv;
+ mworker_proxy->srv = newsrv;
+ newsrv->conf.file = strdup(msg);
+ newsrv->id = strdup(msg);
+ newsrv->conf.line = 0;
+
+ memprintf(&msg, "sockpair@%d", child->ipc_fd[0]);
+ if ((sk = str2sa_range(msg, &port, &port1, &port2, NULL, &proto, NULL,
+ &errmsg, NULL, NULL, PA_O_STREAM)) == 0) {
+ goto error;
+ }
+ ha_free(&msg);
+
+ if (!proto->connect) {
+ goto error;
+ }
+
+ /* no port specified */
+ newsrv->flags |= SRV_F_MAPPORTS;
+ newsrv->addr = *sk;
+ /* don't let the server participate to load balancing */
+ newsrv->iweight = 0;
+ newsrv->uweight = 0;
+ srv_lb_commit_status(newsrv);
+
+ child->srv = newsrv;
+ }
+
+ mworker_proxy->next = proxies_list;
+ proxies_list = mworker_proxy;
+
+ return 0;
+
+error:
+
+ list_for_each_entry(child, &proc_list, list) {
+ free((char *)child->srv->conf.file); /* cast because of const char * */
+ free(child->srv->id);
+ ha_free(&child->srv);
+ }
+ free_proxy(mworker_proxy);
+ free(msg);
+
+error_proxy:
+ ha_alert("%s\n", errmsg);
+ free(errmsg);
+
+ return -1;
+}
+
+/*
+ * Create a new listener for the master CLI proxy
+ */
+struct bind_conf *mworker_cli_proxy_new_listener(char *line)
+{
+ struct bind_conf *bind_conf;
+ struct listener *l;
+ char *err = NULL;
+ char *args[MAX_LINE_ARGS + 1];
+ int arg;
+ int cur_arg;
+
+ arg = 1;
+ args[0] = line;
+
+ /* args is a bind configuration with spaces replaced by commas */
+ while (*line && arg < MAX_LINE_ARGS) {
+
+ if (*line == ',') {
+ *line++ = '\0';
+ while (*line == ',')
+ line++;
+ args[arg++] = line;
+ }
+ line++;
+ }
+
+ args[arg] = "\0";
+
+ bind_conf = bind_conf_alloc(mworker_proxy, "master-socket", 0, "", xprt_get(XPRT_RAW));
+ if (!bind_conf)
+ goto err;
+
+ bind_conf->level &= ~ACCESS_LVL_MASK;
+ bind_conf->level |= ACCESS_LVL_ADMIN;
+ bind_conf->level |= ACCESS_MASTER | ACCESS_MASTER_ONLY;
+
+ if (!str2listener(args[0], mworker_proxy, bind_conf, "master-socket", 0, &err)) {
+ ha_alert("Cannot create the listener of the master CLI\n");
+ goto err;
+ }
+
+ cur_arg = 1;
+
+ while (*args[cur_arg]) {
+ struct bind_kw *kw;
+ const char *best;
+
+ kw = bind_find_kw(args[cur_arg]);
+ if (kw) {
+ if (!kw->parse) {
+ memprintf(&err, "'%s %s' : '%s' option is not implemented in this version (check build options).",
+ args[0], args[1], args[cur_arg]);
+ goto err;
+ }
+
+ if (kw->parse(args, cur_arg, global.cli_fe, bind_conf, &err) != 0) {
+ if (err)
+ memprintf(&err, "'%s %s' : '%s'", args[0], args[1], err);
+ else
+ memprintf(&err, "'%s %s' : error encountered while processing '%s'",
+ args[0], args[1], args[cur_arg]);
+ goto err;
+ }
+
+ cur_arg += 1 + kw->skip;
+ continue;
+ }
+
+ best = bind_find_best_kw(args[cur_arg]);
+ if (best)
+ memprintf(&err, "'%s %s' : unknown keyword '%s'. Did you mean '%s' maybe ?",
+ args[0], args[1], args[cur_arg], best);
+ else
+ memprintf(&err, "'%s %s' : unknown keyword '%s'.",
+ args[0], args[1], args[cur_arg]);
+ goto err;
+ }
+
+
+ bind_conf->accept = session_accept_fd;
+ bind_conf->nice = -64; /* we want to boost priority for local stats */
+ bind_conf->options |= BC_O_UNLIMITED; /* don't make the peers subject to global limits */
+
+ /* Pin master CLI on the first thread of the first group only */
+ thread_set_pin_grp1(&bind_conf->thread_set, 1);
+
+ list_for_each_entry(l, &bind_conf->listeners, by_bind) {
+ l->rx.flags |= RX_F_MWORKER; /* we are keeping this FD in the master */
+ global.maxsock++; /* for the listening socket */
+ }
+ global.maxsock += mworker_proxy->maxconn;
+
+ return bind_conf;
+
+err:
+ ha_alert("%s\n", err);
+ free(err);
+ free(bind_conf);
+ return NULL;
+
+}
+
+/*
+ * Create a new CLI socket using a socketpair for a worker process
+ * <mworker_proc> is the process structure, and <proc> is the process number
+ */
+int mworker_cli_sockpair_new(struct mworker_proc *mworker_proc, int proc)
+{
+ struct bind_conf *bind_conf;
+ struct listener *l;
+ char *path = NULL;
+ char *err = NULL;
+
+ /* master pipe to ensure the master is still alive */
+ if (socketpair(AF_UNIX, SOCK_STREAM, 0, mworker_proc->ipc_fd) < 0) {
+ ha_alert("Cannot create worker socketpair.\n");
+ return -1;
+ }
+
+ /* XXX: we might want to use a separate frontend at some point */
+ if (!global.cli_fe) {
+ if ((global.cli_fe = cli_alloc_fe("GLOBAL", "master-socket", 0)) == NULL) {
+ ha_alert("out of memory trying to allocate the stats frontend");
+ goto error;
+ }
+ }
+
+ bind_conf = bind_conf_alloc(global.cli_fe, "master-socket", 0, "", xprt_get(XPRT_RAW));
+ if (!bind_conf)
+ goto error;
+
+ bind_conf->level &= ~ACCESS_LVL_MASK;
+ bind_conf->level |= ACCESS_LVL_ADMIN; /* TODO: need to lower the rights with a CLI keyword*/
+ bind_conf->level |= ACCESS_FD_LISTENERS;
+
+ if (!memprintf(&path, "sockpair@%d", mworker_proc->ipc_fd[1])) {
+ ha_alert("Cannot allocate listener.\n");
+ goto error;
+ }
+
+ if (!str2listener(path, global.cli_fe, bind_conf, "master-socket", 0, &err)) {
+ free(path);
+ ha_alert("Cannot create a CLI sockpair listener for process #%d\n", proc);
+ goto error;
+ }
+ ha_free(&path);
+
+ bind_conf->accept = session_accept_fd;
+ bind_conf->nice = -64; /* we want to boost priority for local stats */
+ bind_conf->options |= BC_O_UNLIMITED | BC_O_NOSTOP;
+
+ /* Pin master CLI on the first thread of the first group only */
+ thread_set_pin_grp1(&bind_conf->thread_set, 1);
+
+ list_for_each_entry(l, &bind_conf->listeners, by_bind) {
+ HA_ATOMIC_INC(&unstoppable_jobs);
+ /* it's a sockpair but we don't want to keep the fd in the master */
+ l->rx.flags &= ~RX_F_INHERITED;
+ global.maxsock++; /* for the listening socket */
+ }
+
+ return 0;
+
+error:
+ close(mworker_proc->ipc_fd[0]);
+ close(mworker_proc->ipc_fd[1]);
+ free(err);
+
+ return -1;
+}
+
+static struct applet cli_applet = {
+ .obj_type = OBJ_TYPE_APPLET,
+ .name = "<CLI>", /* used for logging */
+ .fct = cli_io_handler,
+ .release = cli_release_handler,
+};
+
+/* master CLI */
+static struct applet mcli_applet = {
+ .obj_type = OBJ_TYPE_APPLET,
+ .name = "<MCLI>", /* used for logging */
+ .fct = cli_io_handler,
+ .release = cli_release_handler,
+};
+
+/* register cli keywords */
+static struct cli_kw_list cli_kws = {{ },{
+ { { "help", NULL }, NULL, cli_parse_simple, NULL, NULL, NULL, ACCESS_MASTER },
+ { { "prompt", NULL }, NULL, cli_parse_simple, NULL, NULL, NULL, ACCESS_MASTER },
+ { { "quit", NULL }, NULL, cli_parse_simple, NULL, NULL, NULL, ACCESS_MASTER },
+ { { "_getsocks", NULL }, NULL, _getsocks, NULL },
+ { { "expert-mode", NULL }, NULL, cli_parse_expert_experimental_mode, NULL, NULL, NULL, ACCESS_MASTER }, // not listed
+ { { "experimental-mode", NULL }, NULL, cli_parse_expert_experimental_mode, NULL, NULL, NULL, ACCESS_MASTER }, // not listed
+ { { "mcli-debug-mode", NULL }, NULL, cli_parse_expert_experimental_mode, NULL, NULL, NULL, ACCESS_MASTER_ONLY }, // not listed
+ { { "set", "anon", "on" }, "set anon on [value] : activate the anonymized mode", cli_parse_set_anon, NULL, NULL },
+ { { "set", "anon", "off" }, "set anon off : deactivate the anonymized mode", cli_parse_set_anon, NULL, NULL },
+ { { "set", "anon", "global-key", NULL }, "set anon global-key <value> : change the global anonymizing key", cli_parse_set_global_key, NULL, NULL },
+ { { "set", "maxconn", "global", NULL }, "set maxconn global <value> : change the per-process maxconn setting", cli_parse_set_maxconn_global, NULL },
+ { { "set", "rate-limit", NULL }, "set rate-limit <setting> <value> : change a rate limiting value", cli_parse_set_ratelimit, NULL },
+ { { "set", "severity-output", NULL }, "set severity-output [none|number|string]: set presence of severity level in feedback information", cli_parse_set_severity_output, NULL, NULL },
+ { { "set", "timeout", NULL }, "set timeout [cli] <delay> : change a timeout setting", cli_parse_set_timeout, NULL, NULL },
+ { { "show", "anon", NULL }, "show anon : display the current state of anonymized mode", cli_parse_show_anon, NULL },
+ { { "show", "env", NULL }, "show env [var] : dump environment variables known to the process", cli_parse_show_env, cli_io_handler_show_env, NULL },
+ { { "show", "cli", "sockets", NULL }, "show cli sockets : dump list of cli sockets", cli_parse_default, cli_io_handler_show_cli_sock, NULL, NULL, ACCESS_MASTER },
+ { { "show", "cli", "level", NULL }, "show cli level : display the level of the current CLI session", cli_parse_show_lvl, NULL, NULL, NULL, ACCESS_MASTER},
+ { { "show", "fd", NULL }, "show fd [-!plcfbsd]* [num] : dump list of file descriptors in use or a specific one", cli_parse_show_fd, cli_io_handler_show_fd, NULL },
+ { { "show", "version", NULL }, "show version : show version of the current process", cli_parse_show_version, NULL, NULL, NULL, ACCESS_MASTER },
+ { { "operator", NULL }, "operator : lower the level of the current CLI session to operator", cli_parse_set_lvl, NULL, NULL, NULL, ACCESS_MASTER},
+ { { "user", NULL }, "user : lower the level of the current CLI session to user", cli_parse_set_lvl, NULL, NULL, NULL, ACCESS_MASTER},
+ {{},}
+}};
+
+INITCALL1(STG_REGISTER, cli_register_kw, &cli_kws);
+
+static struct cfg_kw_list cfg_kws = {ILH, {
+ { CFG_GLOBAL, "stats", cli_parse_global },
+ { 0, NULL, NULL },
+}};
+
+INITCALL1(STG_REGISTER, cfg_register_keywords, &cfg_kws);
+
+static struct bind_kw_list bind_kws = { "STAT", { }, {
+ { "level", bind_parse_level, 1 }, /* set the unix socket admin level */
+ { "expose-fd", bind_parse_expose_fd, 1 }, /* set the unix socket expose fd rights */
+ { "severity-output", bind_parse_severity_output, 1 }, /* set the severity output format */
+ { NULL, NULL, 0 },
+}};
+
+INITCALL1(STG_REGISTER, bind_register_keywords, &bind_kws);
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/src/clock.c b/src/clock.c
new file mode 100644
index 0000000..ec2133c
--- /dev/null
+++ b/src/clock.c
@@ -0,0 +1,460 @@
+/*
+ * General time-keeping code and variables
+ *
+ * Copyright 2000-2021 Willy Tarreau <w@1wt.eu>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <sys/time.h>
+#include <signal.h>
+#include <time.h>
+
+#ifdef USE_THREAD
+#include <pthread.h>
+#endif
+
+#include <haproxy/api.h>
+#include <haproxy/activity.h>
+#include <haproxy/clock.h>
+#include <haproxy/signal-t.h>
+#include <haproxy/time.h>
+#include <haproxy/tinfo-t.h>
+#include <haproxy/tools.h>
+
+struct timeval start_date; /* the process's start date in wall-clock time */
+struct timeval ready_date; /* date when the process was considered ready */
+ullong start_time_ns; /* the process's start date in internal monotonic time (ns) */
+volatile ullong global_now_ns; /* common monotonic date between all threads, in ns (wraps every 585 yr) */
+volatile uint global_now_ms; /* common monotonic date in milliseconds (may wrap) */
+
+THREAD_ALIGNED(64) static llong now_offset; /* global offset between system time and global time in ns */
+
+THREAD_LOCAL ullong now_ns; /* internal monotonic date derived from real clock, in ns (wraps every 585 yr) */
+THREAD_LOCAL uint now_ms; /* internal monotonic date in milliseconds (may wrap) */
+THREAD_LOCAL struct timeval date; /* the real current date (wall-clock time) */
+
+static THREAD_LOCAL struct timeval before_poll; /* system date before calling poll() */
+static THREAD_LOCAL struct timeval after_poll; /* system date after leaving poll() */
+static THREAD_LOCAL unsigned int samp_time; /* total elapsed time over current sample */
+static THREAD_LOCAL unsigned int idle_time; /* total idle time over current sample */
+static THREAD_LOCAL unsigned int iso_time_sec; /* last iso time value for this thread */
+static THREAD_LOCAL char iso_time_str[34]; /* ISO time representation of gettimeofday() */
+
+#if defined(_POSIX_TIMERS) && (_POSIX_TIMERS > 0) && defined(_POSIX_THREAD_CPUTIME)
+static clockid_t per_thread_clock_id[MAX_THREADS];
+#endif
+
+/* returns the system's monotonic time in nanoseconds if supported, otherwise zero */
+uint64_t now_mono_time(void)
+{
+ uint64_t ret = 0;
+#if defined(_POSIX_TIMERS) && defined(_POSIX_TIMERS) && (_POSIX_TIMERS > 0) && defined(_POSIX_MONOTONIC_CLOCK)
+ struct timespec ts;
+ clock_gettime(CLOCK_MONOTONIC, &ts);
+ ret = ts.tv_sec * 1000000000ULL + ts.tv_nsec;
+#endif
+ return ret;
+}
+
+/* Returns the system's monotonic time in nanoseconds.
+ * Uses the coarse clock source if supported (for fast but
+ * less precise queries with limited resource usage).
+ * Fallback to now_mono_time() if coarse source is not supported,
+ * which may itself return 0 if not supported either.
+ */
+uint64_t now_mono_time_fast(void)
+{
+#if defined(CLOCK_MONOTONIC_COARSE)
+ struct timespec ts;
+
+ clock_gettime(CLOCK_MONOTONIC_COARSE, &ts);
+ return (ts.tv_sec * 1000000000ULL + ts.tv_nsec);
+#else
+ /* fallback to regular mono time,
+ * returns 0 if not supported
+ */
+ return now_mono_time();
+#endif
+}
+
+/* returns the current thread's cumulated CPU time in nanoseconds if supported, otherwise zero */
+uint64_t now_cpu_time(void)
+{
+ uint64_t ret = 0;
+#if defined(_POSIX_TIMERS) && (_POSIX_TIMERS > 0) && defined(_POSIX_THREAD_CPUTIME)
+ struct timespec ts;
+ clock_gettime(CLOCK_THREAD_CPUTIME_ID, &ts);
+ ret = ts.tv_sec * 1000000000ULL + ts.tv_nsec;
+#endif
+ return ret;
+}
+
+/* Returns the current thread's cumulated CPU time in nanoseconds.
+ *
+ * thread_local timer is cached so that call is less precise but also less
+ * expensive if heavily used.
+ * We use the mono time as a cache expiration hint since now_cpu_time() is
+ * known to be much more expensive than now_mono_time_fast() on systems
+ * supporting the COARSE clock source.
+ *
+ * Returns 0 if either now_mono_time_fast() or now_cpu_time() are not
+ * supported.
+ */
+uint64_t now_cpu_time_fast(void)
+{
+ static THREAD_LOCAL uint64_t mono_cache = 0;
+ static THREAD_LOCAL uint64_t cpu_cache = 0;
+ uint64_t mono_cur;
+
+ mono_cur = now_mono_time_fast();
+ if (unlikely(mono_cur != mono_cache)) {
+ /* global mono clock was updated: local cache is outdated */
+ cpu_cache = now_cpu_time();
+ mono_cache = mono_cur;
+ }
+ return cpu_cache;
+}
+
+/* returns another thread's cumulated CPU time in nanoseconds if supported, otherwise zero */
+uint64_t now_cpu_time_thread(int thr)
+{
+ uint64_t ret = 0;
+#if defined(_POSIX_TIMERS) && (_POSIX_TIMERS > 0) && defined(_POSIX_THREAD_CPUTIME)
+ struct timespec ts;
+ clock_gettime(per_thread_clock_id[thr], &ts);
+ ret = ts.tv_sec * 1000000000ULL + ts.tv_nsec;
+#endif
+ return ret;
+}
+
+/* set the clock source for the local thread */
+void clock_set_local_source(void)
+{
+#if defined(_POSIX_TIMERS) && (_POSIX_TIMERS > 0) && defined(_POSIX_THREAD_CPUTIME)
+#ifdef USE_THREAD
+ pthread_getcpuclockid(pthread_self(), &per_thread_clock_id[tid]);
+#else
+ per_thread_clock_id[tid] = CLOCK_THREAD_CPUTIME_ID;
+#endif
+#endif
+}
+
+/* registers a timer <tmr> of type timer_t delivering signal <sig> with value
+ * <val>. It tries on the current thread's clock ID first and falls back to
+ * CLOCK_REALTIME. Returns non-zero on success, 1 on failure.
+ */
+int clock_setup_signal_timer(void *tmr, int sig, int val)
+{
+ int ret = 0;
+
+#if defined(USE_RT) && (_POSIX_TIMERS > 0) && defined(_POSIX_THREAD_CPUTIME)
+ struct sigevent sev = { };
+ timer_t *timer = tmr;
+ sigset_t set;
+
+ /* unblock the WDTSIG signal we intend to use */
+ sigemptyset(&set);
+ sigaddset(&set, WDTSIG);
+ ha_sigmask(SIG_UNBLOCK, &set, NULL);
+
+ /* this timer will signal WDTSIG when it fires, with tid in the si_int
+ * field (important since any thread will receive the signal).
+ */
+ sev.sigev_notify = SIGEV_SIGNAL;
+ sev.sigev_signo = sig;
+ sev.sigev_value.sival_int = val;
+ if (timer_create(per_thread_clock_id[tid], &sev, timer) != -1 ||
+ timer_create(CLOCK_REALTIME, &sev, timer) != -1)
+ ret = 1;
+#endif
+ return ret;
+}
+
+/* clock_update_date: sets <date> to system time, and sets <now_ns> to something
+ * as close as possible to real time, following a monotonic function. The main
+ * principle consists in detecting backwards and forwards time jumps and adjust
+ * an offset to correct them. This function should be called once after each
+ * poll, and never farther apart than MAX_DELAY_MS*2. The poll's timeout should
+ * be passed in <max_wait>, and the return value in <interrupted> (a non-zero
+ * value means that we have not expired the timeout).
+ *
+ * clock_init_process_date() must have been called once first, and
+ * clock_init_thread_date() must also have been called once for each thread.
+ *
+ * An offset is used to adjust the current time (date), to figure a monotonic
+ * local time (now_ns). The offset is not critical, as it is only updated after
+ * a clock jump is detected. From this point all threads will apply it to their
+ * locally measured time, and will then agree around a common monotonic
+ * global_now_ns value that serves to further refine their local time. Both
+ * now_ns and global_now_ns are 64-bit integers counting nanoseconds since a
+ * vague reference (it starts roughly 20s before the next wrap-around of the
+ * millisecond counter after boot). The offset is also an integral number of
+ * nanoseconds, but it's signed so that the clock can be adjusted in the two
+ * directions.
+ */
+void clock_update_local_date(int max_wait, int interrupted)
+{
+ struct timeval min_deadline, max_deadline;
+
+ gettimeofday(&date, NULL);
+
+ /* compute the minimum and maximum local date we may have reached based
+ * on our past date and the associated timeout. There are three possible
+ * extremities:
+ * - the new date cannot be older than before_poll
+ * - if not interrupted, the new date cannot be older than
+ * before_poll+max_wait
+ * - in any case the new date cannot be newer than
+ * before_poll+max_wait+some margin (100ms used here).
+ * In case of violation, we'll ignore the current date and instead
+ * restart from the last date we knew.
+ */
+ _tv_ms_add(&min_deadline, &before_poll, max_wait);
+ _tv_ms_add(&max_deadline, &before_poll, max_wait + 100);
+
+ if (unlikely(__tv_islt(&date, &before_poll) || // big jump backwards
+ (!interrupted && __tv_islt(&date, &min_deadline)) || // small jump backwards
+ __tv_islt(&max_deadline, &date))) { // big jump forwards
+ if (!interrupted)
+ now_ns += ms_to_ns(max_wait);
+ } else {
+ /* The date is still within expectations. Let's apply the
+ * now_offset to the system date. Note: ofs if made of two
+ * independent signed ints.
+ */
+ now_ns = tv_to_ns(&date) + HA_ATOMIC_LOAD(&now_offset);
+ }
+ now_ms = ns_to_ms(now_ns);
+}
+
+void clock_update_global_date()
+{
+ ullong old_now_ns;
+ uint old_now_ms;
+
+ /* now that we have bounded the local time, let's check if it's
+ * realistic regarding the global date, which only moves forward,
+ * otherwise catch up.
+ */
+ old_now_ns = _HA_ATOMIC_LOAD(&global_now_ns);
+ old_now_ms = global_now_ms;
+
+ do {
+ if (now_ns < old_now_ns)
+ now_ns = old_now_ns;
+
+ /* now <now_ns> is expected to be the most accurate date,
+ * equal to <global_now_ns> or newer. Updating the global
+ * date too often causes extreme contention and is not
+ * needed: it's only used to help threads run at the
+ * same date in case of local drift, and the global date,
+ * which changes, is only used by freq counters (a choice
+ * which is debatable by the way since it changes under us).
+ * Tests have seen that the contention can be reduced from
+ * 37% in this function to almost 0% when keeping clocks
+ * synchronized no better than 32 microseconds, so that's
+ * what we're doing here.
+ */
+ now_ms = ns_to_ms(now_ns);
+
+ if (!((now_ns ^ old_now_ns) & ~0x7FFFULL))
+ return;
+
+ /* let's try to update the global_now_ns (both in nanoseconds
+ * and ms forms) or loop again.
+ */
+ } while ((!_HA_ATOMIC_CAS(&global_now_ns, &old_now_ns, now_ns) ||
+ (now_ms != old_now_ms && !_HA_ATOMIC_CAS(&global_now_ms, &old_now_ms, now_ms))) &&
+ __ha_cpu_relax());
+
+ /* <now_ns> and <now_ms> are now updated to the last value of
+ * global_now_ns and global_now_ms, which were also monotonically
+ * updated. We can compute the latest offset, we don't care who writes
+ * it last, the variations will not break the monotonic property.
+ */
+ HA_ATOMIC_STORE(&now_offset, now_ns - tv_to_ns(&date));
+}
+
+/* must be called once at boot to initialize some global variables */
+void clock_init_process_date(void)
+{
+ now_offset = 0;
+ gettimeofday(&date, NULL);
+ after_poll = before_poll = date;
+ now_ns = global_now_ns = tv_to_ns(&date);
+ global_now_ms = ns_to_ms(now_ns);
+
+ /* force time to wrap 20s after boot: we first compute the time offset
+ * that once applied to the wall-clock date will make the local time
+ * wrap in 5 seconds. This offset is applied to the process-wide time,
+ * and will be used to recompute the local time, both of which will
+ * match and continue from this shifted date.
+ */
+ now_offset = sec_to_ns((uint)((uint)(-global_now_ms) / 1000U - BOOT_TIME_WRAP_SEC));
+ global_now_ns += now_offset;
+ now_ns = global_now_ns;
+ now_ms = global_now_ms = ns_to_ms(now_ns);
+
+ th_ctx->idle_pct = 100;
+ clock_update_date(0, 1);
+}
+
+void clock_adjust_now_offset(void)
+{
+ HA_ATOMIC_STORE(&now_offset, now_ns - tv_to_ns(&date));
+}
+
+/* must be called once per thread to initialize their thread-local variables.
+ * Note that other threads might also be initializing and running in parallel.
+ */
+void clock_init_thread_date(void)
+{
+ gettimeofday(&date, NULL);
+ after_poll = before_poll = date;
+
+ now_ns = _HA_ATOMIC_LOAD(&global_now_ns);
+ th_ctx->idle_pct = 100;
+ th_ctx->prev_cpu_time = now_cpu_time();
+ clock_update_date(0, 1);
+}
+
+/* report the average CPU idle percentage over all running threads, between 0 and 100 */
+uint clock_report_idle(void)
+{
+ uint total = 0;
+ uint rthr = 0;
+ uint thr;
+
+ for (thr = 0; thr < MAX_THREADS; thr++) {
+ if (!ha_thread_info[thr].tg ||
+ !(ha_thread_info[thr].tg->threads_enabled & ha_thread_info[thr].ltid_bit))
+ continue;
+ total += HA_ATOMIC_LOAD(&ha_thread_ctx[thr].idle_pct);
+ rthr++;
+ }
+ return rthr ? total / rthr : 0;
+}
+
+/* Update the idle time value twice a second, to be called after
+ * clock_update_date() when called after poll(), and currently called only by
+ * clock_leaving_poll() below. It relies on <before_poll> to be updated to
+ * the system time before calling poll().
+ */
+static inline void clock_measure_idle(void)
+{
+ /* Let's compute the idle to work ratio. We worked between after_poll
+ * and before_poll, and slept between before_poll and date. The idle_pct
+ * is updated at most twice every second. Note that the current second
+ * rarely changes so we avoid a multiply when not needed.
+ */
+ int delta;
+
+ if ((delta = date.tv_sec - before_poll.tv_sec))
+ delta *= 1000000;
+ idle_time += delta + (date.tv_usec - before_poll.tv_usec);
+
+ if ((delta = date.tv_sec - after_poll.tv_sec))
+ delta *= 1000000;
+ samp_time += delta + (date.tv_usec - after_poll.tv_usec);
+
+ after_poll.tv_sec = date.tv_sec; after_poll.tv_usec = date.tv_usec;
+ if (samp_time < 500000)
+ return;
+
+ HA_ATOMIC_STORE(&th_ctx->idle_pct, (100ULL * idle_time + samp_time / 2) / samp_time);
+ idle_time = samp_time = 0;
+}
+
+/* Collect date and time information after leaving poll(). <timeout> must be
+ * set to the maximum sleep time passed to poll (in milliseconds), and
+ * <interrupted> must be zero if the poller reached the timeout or non-zero
+ * otherwise, which generally is provided by the poller's return value.
+ */
+void clock_leaving_poll(int timeout, int interrupted)
+{
+ clock_measure_idle();
+ th_ctx->prev_cpu_time = now_cpu_time();
+ th_ctx->prev_mono_time = now_mono_time();
+}
+
+/* Collect date and time information before calling poll(). This will be used
+ * to count the run time of the past loop and the sleep time of the next poll.
+ * It also compares the elapsed and cpu times during the activity period to
+ * estimate the amount of stolen time, which is reported if higher than half
+ * a millisecond.
+ */
+void clock_entering_poll(void)
+{
+ uint64_t new_mono_time;
+ uint64_t new_cpu_time;
+ uint32_t run_time;
+ int64_t stolen;
+
+ gettimeofday(&before_poll, NULL);
+
+ run_time = (before_poll.tv_sec - after_poll.tv_sec) * 1000000U + (before_poll.tv_usec - after_poll.tv_usec);
+
+ new_cpu_time = now_cpu_time();
+ new_mono_time = now_mono_time();
+
+ if (th_ctx->prev_cpu_time && th_ctx->prev_mono_time) {
+ new_cpu_time -= th_ctx->prev_cpu_time;
+ new_mono_time -= th_ctx->prev_mono_time;
+ stolen = new_mono_time - new_cpu_time;
+ if (unlikely(stolen >= 500000)) {
+ stolen /= 500000;
+ /* more than half a millisecond difference might
+ * indicate an undesired preemption.
+ */
+ report_stolen_time(stolen);
+ }
+ }
+
+ /* update the average runtime */
+ activity_count_runtime(run_time);
+}
+
+/* returns the current date as returned by gettimeofday() in ISO+microsecond
+ * format. It uses a thread-local static variable that the reader can consume
+ * for as long as it wants until next call. Thus, do not call it from a signal
+ * handler. If <pad> is non-0, a trailing space will be added. It will always
+ * return exactly 32 or 33 characters (depending on padding) and will always be
+ * zero-terminated, thus it will always fit into a 34 bytes buffer.
+ * This also always include the local timezone (in +/-HH:mm format) .
+ */
+char *timeofday_as_iso_us(int pad)
+{
+ struct timeval new_date;
+ struct tm tm;
+ const char *offset;
+ char c;
+
+ gettimeofday(&new_date, NULL);
+ if (new_date.tv_sec != iso_time_sec || !new_date.tv_sec) {
+ get_localtime(new_date.tv_sec, &tm);
+ offset = get_gmt_offset(new_date.tv_sec, &tm);
+ if (unlikely(strftime(iso_time_str, sizeof(iso_time_str), "%Y-%m-%dT%H:%M:%S.000000+00:00", &tm) != 32))
+ strlcpy2(iso_time_str, "YYYY-mm-ddTHH:MM:SS.000000-00:00", sizeof(iso_time_str)); // make the failure visible but respect format.
+ iso_time_str[26] = offset[0];
+ iso_time_str[27] = offset[1];
+ iso_time_str[28] = offset[2];
+ iso_time_str[30] = offset[3];
+ iso_time_str[31] = offset[4];
+ iso_time_sec = new_date.tv_sec;
+ }
+
+ /* utoa_pad adds a trailing 0 so we save the char for restore */
+ c = iso_time_str[26];
+ utoa_pad(new_date.tv_usec, iso_time_str + 20, 7);
+ iso_time_str[26] = c;
+ if (pad) {
+ iso_time_str[32] = ' ';
+ iso_time_str[33] = 0;
+ }
+ return iso_time_str;
+}
diff --git a/src/compression.c b/src/compression.c
new file mode 100644
index 0000000..7b75461
--- /dev/null
+++ b/src/compression.c
@@ -0,0 +1,742 @@
+/*
+ * HTTP compression.
+ *
+ * Copyright 2012 Exceliance, David Du Colombier <dducolombier@exceliance.fr>
+ * William Lallemand <wlallemand@exceliance.fr>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <stdio.h>
+
+#if defined(USE_ZLIB)
+/* Note: the crappy zlib and openssl libs both define the "free_func" type.
+ * That's a very clever idea to use such a generic name in general purpose
+ * libraries, really... The zlib one is easier to redefine than openssl's,
+ * so let's only fix this one.
+ */
+#define free_func zlib_free_func
+#include <zlib.h>
+#undef free_func
+#endif /* USE_ZLIB */
+
+#include <haproxy/api.h>
+#include <haproxy/cfgparse.h>
+#include <haproxy/compression-t.h>
+#include <haproxy/compression.h>
+#include <haproxy/dynbuf.h>
+#include <haproxy/freq_ctr.h>
+#include <haproxy/global.h>
+#include <haproxy/pool.h>
+#include <haproxy/stream.h>
+#include <haproxy/thread.h>
+#include <haproxy/tools.h>
+
+
+#if defined(USE_ZLIB)
+__decl_spinlock(comp_pool_lock);
+#endif
+
+#ifdef USE_ZLIB
+
+static void *alloc_zlib(void *opaque, unsigned int items, unsigned int size);
+static void free_zlib(void *opaque, void *ptr);
+
+/* zlib allocation */
+static struct pool_head *zlib_pool_deflate_state __read_mostly = NULL;
+static struct pool_head *zlib_pool_window __read_mostly = NULL;
+static struct pool_head *zlib_pool_prev __read_mostly = NULL;
+static struct pool_head *zlib_pool_head __read_mostly = NULL;
+static struct pool_head *zlib_pool_pending_buf __read_mostly = NULL;
+
+long zlib_used_memory = 0;
+
+static int global_tune_zlibmemlevel = 8; /* zlib memlevel */
+static int global_tune_zlibwindowsize = MAX_WBITS; /* zlib window size */
+
+#endif
+
+unsigned int compress_min_idle = 0;
+
+static int identity_init(struct comp_ctx **comp_ctx, int level);
+static int identity_add_data(struct comp_ctx *comp_ctx, const char *in_data, int in_len, struct buffer *out);
+static int identity_flush(struct comp_ctx *comp_ctx, struct buffer *out);
+static int identity_finish(struct comp_ctx *comp_ctx, struct buffer *out);
+static int identity_end(struct comp_ctx **comp_ctx);
+
+#if defined(USE_SLZ)
+
+static int rfc1950_init(struct comp_ctx **comp_ctx, int level);
+static int rfc1951_init(struct comp_ctx **comp_ctx, int level);
+static int rfc1952_init(struct comp_ctx **comp_ctx, int level);
+static int rfc195x_add_data(struct comp_ctx *comp_ctx, const char *in_data, int in_len, struct buffer *out);
+static int rfc195x_flush(struct comp_ctx *comp_ctx, struct buffer *out);
+static int rfc195x_finish(struct comp_ctx *comp_ctx, struct buffer *out);
+static int rfc195x_end(struct comp_ctx **comp_ctx);
+
+#elif defined(USE_ZLIB)
+
+static int gzip_init(struct comp_ctx **comp_ctx, int level);
+static int raw_def_init(struct comp_ctx **comp_ctx, int level);
+static int deflate_init(struct comp_ctx **comp_ctx, int level);
+static int deflate_add_data(struct comp_ctx *comp_ctx, const char *in_data, int in_len, struct buffer *out);
+static int deflate_flush(struct comp_ctx *comp_ctx, struct buffer *out);
+static int deflate_finish(struct comp_ctx *comp_ctx, struct buffer *out);
+static int deflate_end(struct comp_ctx **comp_ctx);
+
+#endif /* USE_ZLIB */
+
+
+const struct comp_algo comp_algos[] =
+{
+ { "identity", 8, "identity", 8, identity_init, identity_add_data, identity_flush, identity_finish, identity_end },
+#if defined(USE_SLZ)
+ { "deflate", 7, "deflate", 7, rfc1950_init, rfc195x_add_data, rfc195x_flush, rfc195x_finish, rfc195x_end },
+ { "raw-deflate", 11, "deflate", 7, rfc1951_init, rfc195x_add_data, rfc195x_flush, rfc195x_finish, rfc195x_end },
+ { "gzip", 4, "gzip", 4, rfc1952_init, rfc195x_add_data, rfc195x_flush, rfc195x_finish, rfc195x_end },
+#elif defined(USE_ZLIB)
+ { "deflate", 7, "deflate", 7, deflate_init, deflate_add_data, deflate_flush, deflate_finish, deflate_end },
+ { "raw-deflate", 11, "deflate", 7, raw_def_init, deflate_add_data, deflate_flush, deflate_finish, deflate_end },
+ { "gzip", 4, "gzip", 4, gzip_init, deflate_add_data, deflate_flush, deflate_finish, deflate_end },
+#endif /* USE_ZLIB */
+ { NULL, 0, NULL, 0, NULL , NULL, NULL, NULL, NULL }
+};
+
+/*
+ * Add a content-type in the configuration
+ * Returns 0 in case of success, 1 in case of allocation failure.
+ */
+int comp_append_type(struct comp_type **types, const char *type)
+{
+ struct comp_type *comp_type;
+
+ comp_type = calloc(1, sizeof(*comp_type));
+ if (!comp_type)
+ return 1;
+ comp_type->name_len = strlen(type);
+ comp_type->name = strdup(type);
+ comp_type->next = *types;
+ *types = comp_type;
+ return 0;
+}
+
+/*
+ * Add an algorithm in the configuration
+ * Returns 0 in case of success, -1 if the <algo> is unmanaged, 1 in case of
+ * allocation failure.
+ */
+int comp_append_algo(struct comp_algo **algos, const char *algo)
+{
+ struct comp_algo *comp_algo;
+ int i;
+
+ for (i = 0; comp_algos[i].cfg_name; i++) {
+ if (strcmp(algo, comp_algos[i].cfg_name) == 0) {
+ comp_algo = calloc(1, sizeof(*comp_algo));
+ if (!comp_algo)
+ return 1;
+ memmove(comp_algo, &comp_algos[i], sizeof(struct comp_algo));
+ comp_algo->next = *algos;
+ *algos = comp_algo;
+ return 0;
+ }
+ }
+ return -1;
+}
+
+#if defined(USE_ZLIB) || defined(USE_SLZ)
+DECLARE_STATIC_POOL(pool_comp_ctx, "comp_ctx", sizeof(struct comp_ctx));
+
+/*
+ * Alloc the comp_ctx
+ */
+static inline int init_comp_ctx(struct comp_ctx **comp_ctx)
+{
+#ifdef USE_ZLIB
+ z_stream *strm;
+
+ if (global.maxzlibmem > 0 && (global.maxzlibmem - zlib_used_memory) < sizeof(struct comp_ctx))
+ return -1;
+#endif
+
+ *comp_ctx = pool_alloc(pool_comp_ctx);
+ if (*comp_ctx == NULL)
+ return -1;
+#if defined(USE_SLZ)
+ (*comp_ctx)->direct_ptr = NULL;
+ (*comp_ctx)->direct_len = 0;
+ (*comp_ctx)->queued = BUF_NULL;
+#elif defined(USE_ZLIB)
+ _HA_ATOMIC_ADD(&zlib_used_memory, sizeof(struct comp_ctx));
+ __ha_barrier_atomic_store();
+
+ strm = &(*comp_ctx)->strm;
+ strm->zalloc = alloc_zlib;
+ strm->zfree = free_zlib;
+ strm->opaque = *comp_ctx;
+#endif
+ return 0;
+}
+
+/*
+ * Dealloc the comp_ctx
+ */
+static inline int deinit_comp_ctx(struct comp_ctx **comp_ctx)
+{
+ if (!*comp_ctx)
+ return 0;
+
+ pool_free(pool_comp_ctx, *comp_ctx);
+ *comp_ctx = NULL;
+
+#ifdef USE_ZLIB
+ _HA_ATOMIC_SUB(&zlib_used_memory, sizeof(struct comp_ctx));
+ __ha_barrier_atomic_store();
+#endif
+ return 0;
+}
+#endif
+
+
+/****************************
+ **** Identity algorithm ****
+ ****************************/
+
+/*
+ * Init the identity algorithm
+ */
+static int identity_init(struct comp_ctx **comp_ctx, int level)
+{
+ return 0;
+}
+
+/*
+ * Process data
+ * Return size of consumed data or -1 on error
+ */
+static int identity_add_data(struct comp_ctx *comp_ctx, const char *in_data, int in_len, struct buffer *out)
+{
+ char *out_data = b_tail(out);
+ int out_len = b_room(out);
+
+ if (out_len < in_len)
+ return -1;
+
+ memcpy(out_data, in_data, in_len);
+
+ b_add(out, in_len);
+
+ return in_len;
+}
+
+static int identity_flush(struct comp_ctx *comp_ctx, struct buffer *out)
+{
+ return 0;
+}
+
+static int identity_finish(struct comp_ctx *comp_ctx, struct buffer *out)
+{
+ return 0;
+}
+
+/*
+ * Deinit the algorithm
+ */
+static int identity_end(struct comp_ctx **comp_ctx)
+{
+ return 0;
+}
+
+
+#ifdef USE_SLZ
+
+/* SLZ's gzip format (RFC1952). Returns < 0 on error. */
+static int rfc1952_init(struct comp_ctx **comp_ctx, int level)
+{
+ if (init_comp_ctx(comp_ctx) < 0)
+ return -1;
+
+ (*comp_ctx)->cur_lvl = !!level;
+ return slz_rfc1952_init(&(*comp_ctx)->strm, !!level);
+}
+
+/* SLZ's raw deflate format (RFC1951). Returns < 0 on error. */
+static int rfc1951_init(struct comp_ctx **comp_ctx, int level)
+{
+ if (init_comp_ctx(comp_ctx) < 0)
+ return -1;
+
+ (*comp_ctx)->cur_lvl = !!level;
+ return slz_rfc1951_init(&(*comp_ctx)->strm, !!level);
+}
+
+/* SLZ's zlib format (RFC1950). Returns < 0 on error. */
+static int rfc1950_init(struct comp_ctx **comp_ctx, int level)
+{
+ if (init_comp_ctx(comp_ctx) < 0)
+ return -1;
+
+ (*comp_ctx)->cur_lvl = !!level;
+ return slz_rfc1950_init(&(*comp_ctx)->strm, !!level);
+}
+
+/* Return the size of consumed data or -1. The output buffer is unused at this
+ * point, we only keep a reference to the input data or a copy of them if the
+ * reference is already used.
+ */
+static int rfc195x_add_data(struct comp_ctx *comp_ctx, const char *in_data, int in_len, struct buffer *out)
+{
+ static THREAD_LOCAL struct buffer tmpbuf = BUF_NULL;
+
+ if (in_len <= 0)
+ return 0;
+
+ if (comp_ctx->direct_ptr && b_is_null(&comp_ctx->queued)) {
+ /* data already being pointed to, we're in front of fragmented
+ * data and need a buffer now. We reuse the same buffer, as it's
+ * not used out of the scope of a series of add_data()*, end().
+ */
+ if (b_alloc(&tmpbuf) == NULL)
+ return -1; /* no memory */
+ b_reset(&tmpbuf);
+ memcpy(b_tail(&tmpbuf), comp_ctx->direct_ptr, comp_ctx->direct_len);
+ b_add(&tmpbuf, comp_ctx->direct_len);
+ comp_ctx->direct_ptr = NULL;
+ comp_ctx->direct_len = 0;
+ comp_ctx->queued = tmpbuf;
+ /* fall through buffer copy */
+ }
+
+ if (!b_is_null(&comp_ctx->queued)) {
+ /* data already pending */
+ memcpy(b_tail(&comp_ctx->queued), in_data, in_len);
+ b_add(&comp_ctx->queued, in_len);
+ return in_len;
+ }
+
+ comp_ctx->direct_ptr = in_data;
+ comp_ctx->direct_len = in_len;
+ return in_len;
+}
+
+/* Compresses the data accumulated using add_data(), and optionally sends the
+ * format-specific trailer if <finish> is non-null. <out> is expected to have a
+ * large enough free non-wrapping space as verified by http_comp_buffer_init().
+ * The number of bytes emitted is reported.
+ */
+static int rfc195x_flush_or_finish(struct comp_ctx *comp_ctx, struct buffer *out, int finish)
+{
+ struct slz_stream *strm = &comp_ctx->strm;
+ const char *in_ptr;
+ int in_len;
+ int out_len;
+
+ in_ptr = comp_ctx->direct_ptr;
+ in_len = comp_ctx->direct_len;
+
+ if (!b_is_null(&comp_ctx->queued)) {
+ in_ptr = b_head(&comp_ctx->queued);
+ in_len = b_data(&comp_ctx->queued);
+ }
+
+ out_len = b_data(out);
+
+ if (in_ptr)
+ b_add(out, slz_encode(strm, b_tail(out), in_ptr, in_len, !finish));
+
+ if (finish)
+ b_add(out, slz_finish(strm, b_tail(out)));
+ else
+ b_add(out, slz_flush(strm, b_tail(out)));
+
+ out_len = b_data(out) - out_len;
+
+ /* very important, we must wipe the data we've just flushed */
+ comp_ctx->direct_len = 0;
+ comp_ctx->direct_ptr = NULL;
+ comp_ctx->queued = BUF_NULL;
+
+ /* Verify compression rate limiting and CPU usage */
+ if ((global.comp_rate_lim > 0 && (read_freq_ctr(&global.comp_bps_out) > global.comp_rate_lim)) || /* rate */
+ (th_ctx->idle_pct < compress_min_idle)) { /* idle */
+ if (comp_ctx->cur_lvl > 0)
+ strm->level = --comp_ctx->cur_lvl;
+ }
+ else if (comp_ctx->cur_lvl < global.tune.comp_maxlevel && comp_ctx->cur_lvl < 1) {
+ strm->level = ++comp_ctx->cur_lvl;
+ }
+
+ /* and that's all */
+ return out_len;
+}
+
+static int rfc195x_flush(struct comp_ctx *comp_ctx, struct buffer *out)
+{
+ return rfc195x_flush_or_finish(comp_ctx, out, 0);
+}
+
+static int rfc195x_finish(struct comp_ctx *comp_ctx, struct buffer *out)
+{
+ return rfc195x_flush_or_finish(comp_ctx, out, 1);
+}
+
+/* we just need to free the comp_ctx here, nothing was allocated */
+static int rfc195x_end(struct comp_ctx **comp_ctx)
+{
+ deinit_comp_ctx(comp_ctx);
+ return 0;
+}
+
+#elif defined(USE_ZLIB) /* ! USE_SLZ */
+
+/*
+ * This is a tricky allocation function using the zlib.
+ * This is based on the allocation order in deflateInit2.
+ */
+static void *alloc_zlib(void *opaque, unsigned int items, unsigned int size)
+{
+ struct comp_ctx *ctx = opaque;
+ static THREAD_LOCAL char round = 0; /* order in deflateInit2 */
+ void *buf = NULL;
+ struct pool_head *pool = NULL;
+
+ if (global.maxzlibmem > 0 && (global.maxzlibmem - zlib_used_memory) < (long)(items * size))
+ goto end;
+
+ switch (round) {
+ case 0:
+ if (zlib_pool_deflate_state == NULL) {
+ HA_SPIN_LOCK(COMP_POOL_LOCK, &comp_pool_lock);
+ if (zlib_pool_deflate_state == NULL)
+ zlib_pool_deflate_state = create_pool("zlib_state", size * items, MEM_F_SHARED);
+ HA_SPIN_UNLOCK(COMP_POOL_LOCK, &comp_pool_lock);
+ }
+ pool = zlib_pool_deflate_state;
+ ctx->zlib_deflate_state = buf = pool_alloc(pool);
+ break;
+
+ case 1:
+ if (zlib_pool_window == NULL) {
+ HA_SPIN_LOCK(COMP_POOL_LOCK, &comp_pool_lock);
+ if (zlib_pool_window == NULL)
+ zlib_pool_window = create_pool("zlib_window", size * items, MEM_F_SHARED);
+ HA_SPIN_UNLOCK(COMP_POOL_LOCK, &comp_pool_lock);
+ }
+ pool = zlib_pool_window;
+ ctx->zlib_window = buf = pool_alloc(pool);
+ break;
+
+ case 2:
+ if (zlib_pool_prev == NULL) {
+ HA_SPIN_LOCK(COMP_POOL_LOCK, &comp_pool_lock);
+ if (zlib_pool_prev == NULL)
+ zlib_pool_prev = create_pool("zlib_prev", size * items, MEM_F_SHARED);
+ HA_SPIN_UNLOCK(COMP_POOL_LOCK, &comp_pool_lock);
+ }
+ pool = zlib_pool_prev;
+ ctx->zlib_prev = buf = pool_alloc(pool);
+ break;
+
+ case 3:
+ if (zlib_pool_head == NULL) {
+ HA_SPIN_LOCK(COMP_POOL_LOCK, &comp_pool_lock);
+ if (zlib_pool_head == NULL)
+ zlib_pool_head = create_pool("zlib_head", size * items, MEM_F_SHARED);
+ HA_SPIN_UNLOCK(COMP_POOL_LOCK, &comp_pool_lock);
+ }
+ pool = zlib_pool_head;
+ ctx->zlib_head = buf = pool_alloc(pool);
+ break;
+
+ case 4:
+ if (zlib_pool_pending_buf == NULL) {
+ HA_SPIN_LOCK(COMP_POOL_LOCK, &comp_pool_lock);
+ if (zlib_pool_pending_buf == NULL)
+ zlib_pool_pending_buf = create_pool("zlib_pending_buf", size * items, MEM_F_SHARED);
+ HA_SPIN_UNLOCK(COMP_POOL_LOCK, &comp_pool_lock);
+ }
+ pool = zlib_pool_pending_buf;
+ ctx->zlib_pending_buf = buf = pool_alloc(pool);
+ break;
+ }
+ if (buf != NULL) {
+ _HA_ATOMIC_ADD(&zlib_used_memory, pool->size);
+ __ha_barrier_atomic_store();
+ }
+
+end:
+
+ /* deflateInit2() first allocates and checks the deflate_state, then if
+ * it succeeds, it allocates all other 4 areas at ones and checks them
+ * at the end. So we want to correctly count the rounds depending on when
+ * zlib is supposed to abort.
+ */
+ if (buf || round)
+ round = (round + 1) % 5;
+ return buf;
+}
+
+static void free_zlib(void *opaque, void *ptr)
+{
+ struct comp_ctx *ctx = opaque;
+ struct pool_head *pool = NULL;
+
+ if (ptr == ctx->zlib_window)
+ pool = zlib_pool_window;
+ else if (ptr == ctx->zlib_deflate_state)
+ pool = zlib_pool_deflate_state;
+ else if (ptr == ctx->zlib_prev)
+ pool = zlib_pool_prev;
+ else if (ptr == ctx->zlib_head)
+ pool = zlib_pool_head;
+ else if (ptr == ctx->zlib_pending_buf)
+ pool = zlib_pool_pending_buf;
+ else {
+ // never matched, just to silence gcc
+ ABORT_NOW();
+ return;
+ }
+
+ pool_free(pool, ptr);
+ _HA_ATOMIC_SUB(&zlib_used_memory, pool->size);
+ __ha_barrier_atomic_store();
+}
+
+/**************************
+**** gzip algorithm ****
+***************************/
+static int gzip_init(struct comp_ctx **comp_ctx, int level)
+{
+ z_stream *strm;
+
+ if (init_comp_ctx(comp_ctx) < 0)
+ return -1;
+
+ strm = &(*comp_ctx)->strm;
+
+ if (deflateInit2(strm, level, Z_DEFLATED, global_tune_zlibwindowsize + 16, global_tune_zlibmemlevel, Z_DEFAULT_STRATEGY) != Z_OK) {
+ deinit_comp_ctx(comp_ctx);
+ return -1;
+ }
+
+ (*comp_ctx)->cur_lvl = level;
+
+ return 0;
+}
+
+/* Raw deflate algorithm */
+static int raw_def_init(struct comp_ctx **comp_ctx, int level)
+{
+ z_stream *strm;
+
+ if (init_comp_ctx(comp_ctx) < 0)
+ return -1;
+
+ strm = &(*comp_ctx)->strm;
+
+ if (deflateInit2(strm, level, Z_DEFLATED, -global_tune_zlibwindowsize, global_tune_zlibmemlevel, Z_DEFAULT_STRATEGY) != Z_OK) {
+ deinit_comp_ctx(comp_ctx);
+ return -1;
+ }
+
+ (*comp_ctx)->cur_lvl = level;
+ return 0;
+}
+
+/**************************
+**** Deflate algorithm ****
+***************************/
+
+static int deflate_init(struct comp_ctx **comp_ctx, int level)
+{
+ z_stream *strm;
+
+ if (init_comp_ctx(comp_ctx) < 0)
+ return -1;
+
+ strm = &(*comp_ctx)->strm;
+
+ if (deflateInit2(strm, level, Z_DEFLATED, global_tune_zlibwindowsize, global_tune_zlibmemlevel, Z_DEFAULT_STRATEGY) != Z_OK) {
+ deinit_comp_ctx(comp_ctx);
+ return -1;
+ }
+
+ (*comp_ctx)->cur_lvl = level;
+
+ return 0;
+}
+
+/* Return the size of consumed data or -1 */
+static int deflate_add_data(struct comp_ctx *comp_ctx, const char *in_data, int in_len, struct buffer *out)
+{
+ int ret;
+ z_stream *strm = &comp_ctx->strm;
+ char *out_data = b_tail(out);
+ int out_len = b_room(out);
+
+ if (in_len <= 0)
+ return 0;
+
+
+ if (out_len <= 0)
+ return -1;
+
+ strm->next_in = (unsigned char *)in_data;
+ strm->avail_in = in_len;
+ strm->next_out = (unsigned char *)out_data;
+ strm->avail_out = out_len;
+
+ ret = deflate(strm, Z_NO_FLUSH);
+ if (ret != Z_OK)
+ return -1;
+
+ /* deflate update the available data out */
+ b_add(out, out_len - strm->avail_out);
+
+ return in_len - strm->avail_in;
+}
+
+static int deflate_flush_or_finish(struct comp_ctx *comp_ctx, struct buffer *out, int flag)
+{
+ int ret;
+ int out_len = 0;
+ z_stream *strm = &comp_ctx->strm;
+
+ strm->next_in = NULL;
+ strm->avail_in = 0;
+ strm->next_out = (unsigned char *)b_tail(out);
+ strm->avail_out = b_room(out);
+
+ ret = deflate(strm, flag);
+ if (ret != Z_OK && ret != Z_STREAM_END)
+ return -1;
+
+ out_len = b_room(out) - strm->avail_out;
+ b_add(out, out_len);
+
+ /* compression limit */
+ if ((global.comp_rate_lim > 0 && (read_freq_ctr(&global.comp_bps_out) > global.comp_rate_lim)) || /* rate */
+ (th_ctx->idle_pct < compress_min_idle)) { /* idle */
+ /* decrease level */
+ if (comp_ctx->cur_lvl > 0) {
+ comp_ctx->cur_lvl--;
+ deflateParams(&comp_ctx->strm, comp_ctx->cur_lvl, Z_DEFAULT_STRATEGY);
+ }
+
+ } else if (comp_ctx->cur_lvl < global.tune.comp_maxlevel) {
+ /* increase level */
+ comp_ctx->cur_lvl++ ;
+ deflateParams(&comp_ctx->strm, comp_ctx->cur_lvl, Z_DEFAULT_STRATEGY);
+ }
+
+ return out_len;
+}
+
+static int deflate_flush(struct comp_ctx *comp_ctx, struct buffer *out)
+{
+ return deflate_flush_or_finish(comp_ctx, out, Z_SYNC_FLUSH);
+}
+
+static int deflate_finish(struct comp_ctx *comp_ctx, struct buffer *out)
+{
+ return deflate_flush_or_finish(comp_ctx, out, Z_FINISH);
+}
+
+static int deflate_end(struct comp_ctx **comp_ctx)
+{
+ z_stream *strm = &(*comp_ctx)->strm;
+ int ret;
+
+ ret = deflateEnd(strm);
+
+ deinit_comp_ctx(comp_ctx);
+
+ return ret;
+}
+
+/* config parser for global "tune.zlibmemlevel" */
+static int zlib_parse_global_memlevel(char **args, int section_type, struct proxy *curpx,
+ const struct proxy *defpx, const char *file, int line,
+ char **err)
+{
+ if (too_many_args(1, args, err, NULL))
+ return -1;
+
+ if (*(args[1]) == 0) {
+ memprintf(err, "'%s' expects a numeric value between 1 and 9.", args[0]);
+ return -1;
+ }
+
+ global_tune_zlibmemlevel = atoi(args[1]);
+ if (global_tune_zlibmemlevel < 1 || global_tune_zlibmemlevel > 9) {
+ memprintf(err, "'%s' expects a numeric value between 1 and 9.", args[0]);
+ return -1;
+ }
+ return 0;
+}
+
+
+/* config parser for global "tune.zlibwindowsize" */
+static int zlib_parse_global_windowsize(char **args, int section_type, struct proxy *curpx,
+ const struct proxy *defpx, const char *file, int line,
+ char **err)
+{
+ if (too_many_args(1, args, err, NULL))
+ return -1;
+
+ if (*(args[1]) == 0) {
+ memprintf(err, "'%s' expects a numeric value between 8 and 15.", args[0]);
+ return -1;
+ }
+
+ global_tune_zlibwindowsize = atoi(args[1]);
+ if (global_tune_zlibwindowsize < 8 || global_tune_zlibwindowsize > 15) {
+ memprintf(err, "'%s' expects a numeric value between 8 and 15.", args[0]);
+ return -1;
+ }
+ return 0;
+}
+
+#endif /* USE_ZLIB */
+
+
+/* config keyword parsers */
+static struct cfg_kw_list cfg_kws = {ILH, {
+#ifdef USE_ZLIB
+ { CFG_GLOBAL, "tune.zlib.memlevel", zlib_parse_global_memlevel },
+ { CFG_GLOBAL, "tune.zlib.windowsize", zlib_parse_global_windowsize },
+#endif
+ { 0, NULL, NULL }
+}};
+
+INITCALL1(STG_REGISTER, cfg_register_keywords, &cfg_kws);
+
+static void comp_register_build_opts(void)
+{
+ char *ptr = NULL;
+ int i;
+
+#ifdef USE_ZLIB
+ memprintf(&ptr, "Built with zlib version : " ZLIB_VERSION);
+ memprintf(&ptr, "%s\nRunning on zlib version : %s", ptr, zlibVersion());
+#elif defined(USE_SLZ)
+ memprintf(&ptr, "Built with libslz for stateless compression.");
+#else
+ memprintf(&ptr, "Built without compression support (neither USE_ZLIB nor USE_SLZ are set).");
+#endif
+ memprintf(&ptr, "%s\nCompression algorithms supported :", ptr);
+
+ for (i = 0; comp_algos[i].cfg_name; i++)
+ memprintf(&ptr, "%s%s %s(\"%s\")", ptr, (i == 0 ? "" : ","), comp_algos[i].cfg_name, comp_algos[i].ua_name);
+
+ if (i == 0)
+ memprintf(&ptr, "%s none", ptr);
+
+ hap_register_build_opts(ptr, 1);
+}
+
+INITCALL0(STG_REGISTER, comp_register_build_opts);
diff --git a/src/connection.c b/src/connection.c
new file mode 100644
index 0000000..7930cc4
--- /dev/null
+++ b/src/connection.c
@@ -0,0 +1,2748 @@
+/*
+ * Connection management functions
+ *
+ * Copyright 2000-2012 Willy Tarreau <w@1wt.eu>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <errno.h>
+
+#include <import/ebmbtree.h>
+
+#include <haproxy/api.h>
+#include <haproxy/arg.h>
+#include <haproxy/cfgparse.h>
+#include <haproxy/connection.h>
+#include <haproxy/fd.h>
+#include <haproxy/frontend.h>
+#include <haproxy/hash.h>
+#include <haproxy/list.h>
+#include <haproxy/log.h>
+#include <haproxy/namespace.h>
+#include <haproxy/net_helper.h>
+#include <haproxy/proto_rhttp.h>
+#include <haproxy/proto_tcp.h>
+#include <haproxy/sample.h>
+#include <haproxy/sc_strm.h>
+#include <haproxy/server.h>
+#include <haproxy/session.h>
+#include <haproxy/ssl_sock.h>
+#include <haproxy/stconn.h>
+#include <haproxy/tools.h>
+#include <haproxy/xxhash.h>
+
+
+DECLARE_POOL(pool_head_connection, "connection", sizeof(struct connection));
+DECLARE_POOL(pool_head_conn_hash_node, "conn_hash_node", sizeof(struct conn_hash_node));
+DECLARE_POOL(pool_head_sockaddr, "sockaddr", sizeof(struct sockaddr_storage));
+DECLARE_POOL(pool_head_pp_tlv_128, "pp_tlv_128", sizeof(struct conn_tlv_list) + HA_PP2_TLV_VALUE_128);
+DECLARE_POOL(pool_head_pp_tlv_256, "pp_tlv_256", sizeof(struct conn_tlv_list) + HA_PP2_TLV_VALUE_256);
+
+struct idle_conns idle_conns[MAX_THREADS] = { };
+struct xprt_ops *registered_xprt[XPRT_ENTRIES] = { NULL, };
+
+/* List head of all known muxes for PROTO */
+struct mux_proto_list mux_proto_list = {
+ .list = LIST_HEAD_INIT(mux_proto_list.list)
+};
+
+struct mux_stopping_data mux_stopping_data[MAX_THREADS];
+
+/* disables sending of proxy-protocol-v2's LOCAL command */
+static int pp2_never_send_local;
+
+/* find the value of a received TLV for a given type */
+struct conn_tlv_list *conn_get_tlv(struct connection *conn, int type)
+{
+ struct conn_tlv_list *tlv = NULL;
+
+ if (!conn)
+ return NULL;
+
+ list_for_each_entry(tlv, &conn->tlv_list, list) {
+ if (tlv->type == type)
+ return tlv;
+ }
+
+ return NULL;
+}
+
+/* Remove <conn> idle connection from its attached tree (idle, safe or avail).
+ * If also present in the secondary server idle list, conn is removed from it.
+ *
+ * Must be called with idle_conns_lock held.
+ */
+void conn_delete_from_tree(struct connection *conn)
+{
+ LIST_DEL_INIT(&conn->idle_list);
+ eb64_delete(&conn->hash_node->node);
+}
+
+int conn_create_mux(struct connection *conn)
+{
+ if (conn_is_back(conn)) {
+ struct server *srv;
+ struct stconn *sc = conn->ctx;
+ struct session *sess = conn->owner;
+
+ if (conn->flags & CO_FL_ERROR)
+ goto fail;
+
+ if (sess && obj_type(sess->origin) == OBJ_TYPE_CHECK) {
+ if (conn_install_mux_chk(conn, conn->ctx, sess) < 0)
+ goto fail;
+ }
+ else if (conn_install_mux_be(conn, conn->ctx, sess, NULL) < 0)
+ goto fail;
+ srv = objt_server(conn->target);
+
+ /* If we're doing http-reuse always, and the connection is not
+ * private with available streams (an http2 connection), add it
+ * to the available list, so that others can use it right
+ * away. If the connection is private, add it in the session
+ * server list.
+ */
+ if (srv && ((srv->proxy->options & PR_O_REUSE_MASK) == PR_O_REUSE_ALWS) &&
+ !(conn->flags & CO_FL_PRIVATE) && conn->mux->avail_streams(conn) > 0) {
+ srv_add_to_avail_list(srv, conn);
+ }
+ else if (conn->flags & CO_FL_PRIVATE) {
+ /* If it fail now, the same will be done in mux->detach() callback */
+ session_add_conn(sess, conn, conn->target);
+ }
+ return 0;
+fail:
+ /* let the upper layer know the connection failed */
+ if (sc) {
+ sc->app_ops->wake(sc);
+ }
+ else if (conn_reverse_in_preconnect(conn)) {
+ struct listener *l = conn_active_reverse_listener(conn);
+
+ /* If mux init failed, consider connection on error.
+ * This is necessary to ensure connection is freed by
+ * proto-rhttp receiver task.
+ */
+ if (!conn->mux)
+ conn->flags |= CO_FL_ERROR;
+
+ /* If connection is interrupted without CO_FL_ERROR, receiver task won't free it. */
+ BUG_ON(!(conn->flags & CO_FL_ERROR));
+
+ task_wakeup(l->rx.rhttp.task, TASK_WOKEN_ANY);
+ }
+ return -1;
+ } else
+ return conn_complete_session(conn);
+
+}
+
+/* This is used at the end of the socket IOCB to possibly create the mux if it
+ * was not done yet, or wake it up if flags changed compared to old_flags or if
+ * need_wake insists on this. It returns <0 if the connection was destroyed and
+ * must not be used, >=0 otherwise.
+ */
+int conn_notify_mux(struct connection *conn, int old_flags, int forced_wake)
+{
+ int ret = 0;
+
+ /* If we don't yet have a mux, that means we were waiting for
+ * information to create one, typically from the ALPN. If we're
+ * done with the handshake, attempt to create one.
+ */
+ if (unlikely(!conn->mux) && !(conn->flags & CO_FL_WAIT_XPRT)) {
+ ret = conn_create_mux(conn);
+ if (ret < 0)
+ goto done;
+ }
+
+ /* The wake callback is normally used to notify the data layer about
+ * data layer activity (successful send/recv), connection establishment,
+ * shutdown and fatal errors. We need to consider the following
+ * situations to wake up the data layer :
+ * - change among the CO_FL_NOTIFY_DONE flags :
+ * SOCK_{RD,WR}_SH, ERROR,
+ * - absence of any of {L4,L6}_CONN and CONNECTED, indicating the
+ * end of handshake and transition to CONNECTED
+ * - raise of CONNECTED with HANDSHAKE down
+ * - end of HANDSHAKE with CONNECTED set
+ * - regular data layer activity
+ *
+ * One tricky case is the wake up on read0 or error on an idle
+ * backend connection, that can happen on a connection that is still
+ * polled while at the same moment another thread is about to perform a
+ * takeover. The solution against this is to remove the connection from
+ * the idle list if it was in it, and possibly reinsert it at the end
+ * if the connection remains valid. The cost is non-null (locked tree
+ * removal) but remains low given that this is extremely rarely called.
+ * In any case it's guaranteed by the FD's thread_mask that we're
+ * called from the same thread the connection is queued in.
+ *
+ * Note that the wake callback is allowed to release the connection and
+ * the fd (and return < 0 in this case).
+ */
+ if ((forced_wake ||
+ ((conn->flags ^ old_flags) & CO_FL_NOTIFY_DONE) ||
+ ((old_flags & CO_FL_WAIT_XPRT) && !(conn->flags & CO_FL_WAIT_XPRT))) &&
+ conn->mux && conn->mux->wake) {
+ uint conn_in_list = conn->flags & CO_FL_LIST_MASK;
+ struct server *srv = objt_server(conn->target);
+
+ if (conn_in_list) {
+ HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
+ conn_delete_from_tree(conn);
+ HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
+ }
+
+ ret = conn->mux->wake(conn);
+ if (ret < 0)
+ goto done;
+
+ if (conn_in_list) {
+ HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
+ _srv_add_idle(srv, conn, conn_in_list == CO_FL_SAFE_LIST);
+ HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
+ }
+ }
+ done:
+ return ret;
+}
+
+/* Change the mux for the connection.
+ * The caller should make sure he's not subscribed to the underlying XPRT.
+ */
+int conn_upgrade_mux_fe(struct connection *conn, void *ctx, struct buffer *buf,
+ struct ist mux_proto, int mode)
+{
+ struct bind_conf *bind_conf = __objt_listener(conn->target)->bind_conf;
+ const struct mux_ops *old_mux, *new_mux;
+ void *old_mux_ctx;
+ const char *alpn_str = NULL;
+ int alpn_len = 0;
+
+ if (!mux_proto.len) {
+ conn_get_alpn(conn, &alpn_str, &alpn_len);
+ mux_proto = ist2(alpn_str, alpn_len);
+ }
+ new_mux = conn_get_best_mux(conn, mux_proto, PROTO_SIDE_FE, mode);
+ old_mux = conn->mux;
+
+ /* No mux found */
+ if (!new_mux)
+ return -1;
+
+ /* Same mux, nothing to do */
+ if (old_mux == new_mux)
+ return 0;
+
+ old_mux_ctx = conn->ctx;
+ conn->mux = new_mux;
+ conn->ctx = ctx;
+ if (new_mux->init(conn, bind_conf->frontend, conn->owner, buf) == -1) {
+ /* The mux upgrade failed, so restore the old mux */
+ conn->ctx = old_mux_ctx;
+ conn->mux = old_mux;
+ return -1;
+ }
+
+ /* The mux was upgraded, destroy the old one */
+ *buf = BUF_NULL;
+ old_mux->destroy(old_mux_ctx);
+ return 0;
+}
+
+/* installs the best mux for incoming connection <conn> using the upper context
+ * <ctx>. If the mux protocol is forced, we use it to find the best
+ * mux. Otherwise we use the ALPN name, if any. Returns < 0 on error.
+ */
+int conn_install_mux_fe(struct connection *conn, void *ctx)
+{
+ struct bind_conf *bind_conf = __objt_listener(conn->target)->bind_conf;
+ const struct mux_ops *mux_ops;
+
+ if (bind_conf->mux_proto)
+ mux_ops = bind_conf->mux_proto->mux;
+ else {
+ struct ist mux_proto;
+ const char *alpn_str = NULL;
+ int alpn_len = 0;
+ int mode;
+
+ if (bind_conf->frontend->mode == PR_MODE_HTTP)
+ mode = PROTO_MODE_HTTP;
+ else
+ mode = PROTO_MODE_TCP;
+
+ conn_get_alpn(conn, &alpn_str, &alpn_len);
+ mux_proto = ist2(alpn_str, alpn_len);
+ mux_ops = conn_get_best_mux(conn, mux_proto, PROTO_SIDE_FE, mode);
+ if (!mux_ops)
+ return -1;
+ }
+
+ /* Ensure a valid protocol is selected if connection is targeted by a
+ * tcp-request session attach-srv rule.
+ */
+ if (conn->reverse.target && !(mux_ops->flags & MX_FL_REVERSABLE)) {
+ conn->err_code = CO_ER_REVERSE;
+ return -1;
+ }
+
+ return conn_install_mux(conn, mux_ops, ctx, bind_conf->frontend, conn->owner);
+}
+
+/* installs the best mux for outgoing connection <conn> using the upper context
+ * <ctx>. If the server mux protocol is forced, we use it to find the best mux.
+ * It's also possible to specify an alternative mux protocol <force_mux_ops>,
+ * in which case it will be used instead of the default server mux protocol.
+ *
+ * Returns < 0 on error.
+ */
+int conn_install_mux_be(struct connection *conn, void *ctx, struct session *sess,
+ const struct mux_ops *force_mux_ops)
+{
+ struct server *srv = objt_server(conn->target);
+ struct proxy *prx = objt_proxy(conn->target);
+ const struct mux_ops *mux_ops;
+
+ if (srv)
+ prx = srv->proxy;
+
+ if (!prx) // target must be either proxy or server
+ return -1;
+
+ if (srv && srv->mux_proto && likely(!force_mux_ops)) {
+ mux_ops = srv->mux_proto->mux;
+ }
+ else if (srv && unlikely(force_mux_ops)) {
+ mux_ops = force_mux_ops;
+ }
+ else {
+ struct ist mux_proto;
+ const char *alpn_str = NULL;
+ int alpn_len = 0;
+ int mode;
+
+ if (prx->mode == PR_MODE_HTTP)
+ mode = PROTO_MODE_HTTP;
+ else
+ mode = PROTO_MODE_TCP;
+
+ conn_get_alpn(conn, &alpn_str, &alpn_len);
+ mux_proto = ist2(alpn_str, alpn_len);
+
+ mux_ops = conn_get_best_mux(conn, mux_proto, PROTO_SIDE_BE, mode);
+ if (!mux_ops)
+ return -1;
+ }
+ return conn_install_mux(conn, mux_ops, ctx, prx, sess);
+}
+
+/* installs the best mux for outgoing connection <conn> for a check using the
+ * upper context <ctx>. If the mux protocol is forced by the check, we use it to
+ * find the best mux. Returns < 0 on error.
+ */
+int conn_install_mux_chk(struct connection *conn, void *ctx, struct session *sess)
+{
+ struct check *check = objt_check(sess->origin);
+ struct server *srv = objt_server(conn->target);
+ struct proxy *prx = objt_proxy(conn->target);
+ const struct mux_ops *mux_ops;
+
+ if (!check) // Check must be defined
+ return -1;
+
+ if (srv)
+ prx = srv->proxy;
+
+ if (!prx) // target must be either proxy or server
+ return -1;
+
+ if (check->mux_proto)
+ mux_ops = check->mux_proto->mux;
+ else {
+ struct ist mux_proto;
+ const char *alpn_str = NULL;
+ int alpn_len = 0;
+ int mode;
+
+ if ((check->tcpcheck_rules->flags & TCPCHK_RULES_PROTO_CHK) == TCPCHK_RULES_HTTP_CHK)
+ mode = PROTO_MODE_HTTP;
+ else
+ mode = PROTO_MODE_TCP;
+
+ conn_get_alpn(conn, &alpn_str, &alpn_len);
+ mux_proto = ist2(alpn_str, alpn_len);
+
+ mux_ops = conn_get_best_mux(conn, mux_proto, PROTO_SIDE_BE, mode);
+ if (!mux_ops)
+ return -1;
+ }
+ return conn_install_mux(conn, mux_ops, ctx, prx, sess);
+}
+
+/* Set the ALPN of connection <conn> to <alpn>. If force is false, <alpn> must
+ * be a subset or identical to the registered protos for the parent SSL_CTX.
+ * In this case <alpn> must be a single protocol value, not a list.
+ *
+ * Returns 0 if ALPN is updated else -1.
+ */
+int conn_update_alpn(struct connection *conn, const struct ist alpn, int force)
+{
+#ifdef TLSEXT_TYPE_application_layer_protocol_negotiation
+ size_t alpn_len = istlen(alpn);
+ char *ctx_alpn_str = NULL;
+ int ctx_alpn_len = 0, found = 0;
+
+ /* if not force, first search if alpn is a subset or identical to the
+ * parent SSL_CTX.
+ */
+ if (!force) {
+ /* retrieve the SSL_CTX according to the connection side. */
+ if (conn_is_back(conn)) {
+ if (obj_type(conn->target) == OBJ_TYPE_SERVER) {
+ struct server *srv = __objt_server(conn->target);
+ ctx_alpn_str = srv->ssl_ctx.alpn_str;
+ ctx_alpn_len = srv->ssl_ctx.alpn_len;
+ }
+ }
+ else {
+ struct session *sess = conn->owner;
+ struct listener *li = sess->listener;
+
+ if (li->bind_conf && li->bind_conf->options & BC_O_USE_SSL) {
+ ctx_alpn_str = li->bind_conf->ssl_conf.alpn_str;
+ ctx_alpn_len = li->bind_conf->ssl_conf.alpn_len;
+ }
+ }
+
+ if (ctx_alpn_str) {
+ /* search if ALPN is present in SSL_CTX ALPN before
+ * using it.
+ */
+ while (ctx_alpn_len) {
+ /* skip ALPN whose size is not 8 */
+ if (*ctx_alpn_str != alpn_len - 1) {
+ ctx_alpn_len -= *ctx_alpn_str + 1;
+ }
+ else {
+ if (isteqi(ist2(ctx_alpn_str, alpn_len), alpn)) {
+ found = 1;
+ break;
+ }
+ }
+ ctx_alpn_str += *ctx_alpn_str + 1;
+
+ /* This indicates an invalid ALPN formatted
+ * string and should never happen. */
+ BUG_ON(ctx_alpn_len < 0);
+ }
+ }
+ }
+
+ if (found || force) {
+ ssl_sock_set_alpn(conn, (const uchar *)istptr(alpn), istlen(alpn));
+ return 0;
+ }
+
+#endif
+ return -1;
+}
+
+/* Initializes all required fields for a new connection. Note that it does the
+ * minimum acceptable initialization for a connection that already exists and
+ * is about to be reused. It also leaves the addresses untouched, which makes
+ * it usable across connection retries to reset a connection to a known state.
+ */
+void conn_init(struct connection *conn, void *target)
+{
+ conn->obj_type = OBJ_TYPE_CONN;
+ conn->flags = CO_FL_NONE;
+ conn->mux = NULL;
+ conn->ctx = NULL;
+ conn->owner = NULL;
+ conn->send_proxy_ofs = 0;
+ conn->handle.fd = DEAD_FD_MAGIC;
+ conn->err_code = CO_ER_NONE;
+ conn->target = target;
+ conn->destroy_cb = NULL;
+ conn->proxy_netns = NULL;
+ MT_LIST_INIT(&conn->toremove_list);
+ if (conn_is_back(conn))
+ LIST_INIT(&conn->session_list);
+ else
+ LIST_INIT(&conn->stopping_list);
+ LIST_INIT(&conn->tlv_list);
+ conn->subs = NULL;
+ conn->src = NULL;
+ conn->dst = NULL;
+ conn->hash_node = NULL;
+ conn->xprt = NULL;
+ conn->reverse.target = NULL;
+ conn->reverse.name = BUF_NULL;
+}
+
+/* Initialize members used for backend connections.
+ *
+ * Returns 0 on success else non-zero.
+ */
+static int conn_backend_init(struct connection *conn)
+{
+ if (!sockaddr_alloc(&conn->dst, 0, 0))
+ return 1;
+
+ conn->hash_node = conn_alloc_hash_node(conn);
+ if (unlikely(!conn->hash_node))
+ return 1;
+
+ return 0;
+}
+
+/* Release connection elements reserved for backend side usage. It also takes
+ * care to detach it if linked to a session or a server instance.
+ *
+ * This function is useful when freeing a connection or reversing it to the
+ * frontend side.
+ */
+static void conn_backend_deinit(struct connection *conn)
+{
+ /* If the connection is owned by the session, remove it from its list
+ */
+ if (conn_is_back(conn) && LIST_INLIST(&conn->session_list)) {
+ session_unown_conn(conn->owner, conn);
+ }
+ else if (!(conn->flags & CO_FL_PRIVATE)) {
+ if (obj_type(conn->target) == OBJ_TYPE_SERVER)
+ srv_release_conn(__objt_server(conn->target), conn);
+ }
+
+ /* Make sure the connection is not left in the idle connection tree */
+ if (conn->hash_node != NULL)
+ BUG_ON(conn->hash_node->node.node.leaf_p != NULL);
+
+ pool_free(pool_head_conn_hash_node, conn->hash_node);
+ conn->hash_node = NULL;
+
+}
+
+/* Tries to allocate a new connection and initialized its main fields. The
+ * connection is returned on success, NULL on failure. The connection must
+ * be released using pool_free() or conn_free().
+ */
+struct connection *conn_new(void *target)
+{
+ struct connection *conn;
+
+ conn = pool_alloc(pool_head_connection);
+ if (unlikely(!conn))
+ return NULL;
+
+ conn_init(conn, target);
+
+ if (conn_is_back(conn)) {
+ if (obj_type(target) == OBJ_TYPE_SERVER)
+ srv_use_conn(__objt_server(target), conn);
+
+ if (conn_backend_init(conn)) {
+ conn_free(conn);
+ return NULL;
+ }
+ }
+
+ return conn;
+}
+
+/* Releases a connection previously allocated by conn_new() */
+void conn_free(struct connection *conn)
+{
+ struct conn_tlv_list *tlv, *tlv_back = NULL;
+
+ if (conn_is_back(conn))
+ conn_backend_deinit(conn);
+
+ /* Remove the conn from toremove_list.
+ *
+ * This is needed to prevent a double-free in case the connection was
+ * already scheduled from cleaning but is freed before via another
+ * call.
+ */
+ MT_LIST_DELETE(&conn->toremove_list);
+
+ sockaddr_free(&conn->src);
+ sockaddr_free(&conn->dst);
+
+ /* Free all previously allocated TLVs */
+ list_for_each_entry_safe(tlv, tlv_back, &conn->tlv_list, list) {
+ LIST_DELETE(&tlv->list);
+ if (tlv->len > HA_PP2_TLV_VALUE_256)
+ free(tlv);
+ else if (tlv->len <= HA_PP2_TLV_VALUE_128)
+ pool_free(pool_head_pp_tlv_128, tlv);
+ else
+ pool_free(pool_head_pp_tlv_256, tlv);
+ }
+
+ ha_free(&conn->reverse.name.area);
+
+ if (conn_reverse_in_preconnect(conn)) {
+ struct listener *l = conn_active_reverse_listener(conn);
+ rhttp_notify_preconn_err(l);
+ HA_ATOMIC_DEC(&th_ctx->nb_rhttp_conns);
+ }
+ else if (conn->flags & CO_FL_REVERSED) {
+ HA_ATOMIC_DEC(&th_ctx->nb_rhttp_conns);
+ }
+
+
+ conn_force_unsubscribe(conn);
+ pool_free(pool_head_connection, conn);
+}
+
+struct conn_hash_node *conn_alloc_hash_node(struct connection *conn)
+{
+ struct conn_hash_node *hash_node = NULL;
+
+ hash_node = pool_zalloc(pool_head_conn_hash_node);
+ if (unlikely(!hash_node))
+ return NULL;
+
+ hash_node->conn = conn;
+
+ return hash_node;
+}
+
+/* Allocates a struct sockaddr from the pool if needed, assigns it to *sap and
+ * returns it. If <sap> is NULL, the address is always allocated and returned.
+ * if <sap> is non-null, an address will only be allocated if it points to a
+ * non-null pointer. In this case the allocated address will be assigned there.
+ * If <orig> is non-null and <len> positive, the address in <sa> will be copied
+ * into the allocated address. In both situations the new pointer is returned.
+ */
+struct sockaddr_storage *sockaddr_alloc(struct sockaddr_storage **sap, const struct sockaddr_storage *orig, socklen_t len)
+{
+ struct sockaddr_storage *sa;
+
+ if (sap && *sap)
+ return *sap;
+
+ sa = pool_alloc(pool_head_sockaddr);
+ if (sa && orig && len > 0)
+ memcpy(sa, orig, len);
+ if (sap)
+ *sap = sa;
+ return sa;
+}
+
+/* Releases the struct sockaddr potentially pointed to by <sap> to the pool. It
+ * may be NULL or may point to NULL. If <sap> is not NULL, a NULL is placed
+ * there.
+ */
+void sockaddr_free(struct sockaddr_storage **sap)
+{
+ if (!sap)
+ return;
+ pool_free(pool_head_sockaddr, *sap);
+ *sap = NULL;
+}
+
+/* Try to add a handshake pseudo-XPRT. If the connection's first XPRT is
+ * raw_sock, then just use the new XPRT as the connection XPRT, otherwise
+ * call the xprt's add_xprt() method.
+ * Returns 0 on success, or non-zero on failure.
+ */
+int xprt_add_hs(struct connection *conn)
+{
+ void *xprt_ctx = NULL;
+ const struct xprt_ops *ops = xprt_get(XPRT_HANDSHAKE);
+ void *nextxprt_ctx = NULL;
+ const struct xprt_ops *nextxprt_ops = NULL;
+
+ if (conn->flags & CO_FL_ERROR)
+ return -1;
+ if (ops->init(conn, &xprt_ctx) < 0)
+ return -1;
+ if (conn->xprt == xprt_get(XPRT_RAW)) {
+ nextxprt_ctx = conn->xprt_ctx;
+ nextxprt_ops = conn->xprt;
+ conn->xprt_ctx = xprt_ctx;
+ conn->xprt = ops;
+ } else {
+ if (conn->xprt->add_xprt(conn, conn->xprt_ctx, xprt_ctx, ops,
+ &nextxprt_ctx, &nextxprt_ops) != 0) {
+ ops->close(conn, xprt_ctx);
+ return -1;
+ }
+ }
+ if (ops->add_xprt(conn, xprt_ctx, nextxprt_ctx, nextxprt_ops, NULL, NULL) != 0) {
+ ops->close(conn, xprt_ctx);
+ return -1;
+ }
+ return 0;
+}
+
+/* returns a human-readable error code for conn->err_code, or NULL if the code
+ * is unknown.
+ */
+const char *conn_err_code_str(struct connection *c)
+{
+ switch (c->err_code) {
+ case CO_ER_NONE: return "Success";
+
+ case CO_ER_CONF_FDLIM: return "Reached configured maxconn value";
+ case CO_ER_PROC_FDLIM: return "Too many sockets on the process";
+ case CO_ER_SYS_FDLIM: return "Too many sockets on the system";
+ case CO_ER_SYS_MEMLIM: return "Out of system buffers";
+ case CO_ER_NOPROTO: return "Protocol or address family not supported";
+ case CO_ER_SOCK_ERR: return "General socket error";
+ case CO_ER_PORT_RANGE: return "Source port range exhausted";
+ case CO_ER_CANT_BIND: return "Can't bind to source address";
+ case CO_ER_FREE_PORTS: return "Out of local source ports on the system";
+ case CO_ER_ADDR_INUSE: return "Local source address already in use";
+
+ case CO_ER_PRX_EMPTY: return "Connection closed while waiting for PROXY protocol header";
+ case CO_ER_PRX_ABORT: return "Connection error while waiting for PROXY protocol header";
+ case CO_ER_PRX_TIMEOUT: return "Timeout while waiting for PROXY protocol header";
+ case CO_ER_PRX_TRUNCATED: return "Truncated PROXY protocol header received";
+ case CO_ER_PRX_NOT_HDR: return "Received something which does not look like a PROXY protocol header";
+ case CO_ER_PRX_BAD_HDR: return "Received an invalid PROXY protocol header";
+ case CO_ER_PRX_BAD_PROTO: return "Received an unhandled protocol in the PROXY protocol header";
+
+ case CO_ER_CIP_EMPTY: return "Connection closed while waiting for NetScaler Client IP header";
+ case CO_ER_CIP_ABORT: return "Connection error while waiting for NetScaler Client IP header";
+ case CO_ER_CIP_TIMEOUT: return "Timeout while waiting for a NetScaler Client IP header";
+ case CO_ER_CIP_TRUNCATED: return "Truncated NetScaler Client IP header received";
+ case CO_ER_CIP_BAD_MAGIC: return "Received an invalid NetScaler Client IP magic number";
+ case CO_ER_CIP_BAD_PROTO: return "Received an unhandled protocol in the NetScaler Client IP header";
+
+ case CO_ER_SSL_EMPTY: return "Connection closed during SSL handshake";
+ case CO_ER_SSL_ABORT: return "Connection error during SSL handshake";
+ case CO_ER_SSL_TIMEOUT: return "Timeout during SSL handshake";
+ case CO_ER_SSL_TOO_MANY: return "Too many SSL connections";
+ case CO_ER_SSL_NO_MEM: return "Out of memory when initializing an SSL connection";
+ case CO_ER_SSL_RENEG: return "Rejected a client-initiated SSL renegotiation attempt";
+ case CO_ER_SSL_CA_FAIL: return "SSL client CA chain cannot be verified";
+ case CO_ER_SSL_CRT_FAIL: return "SSL client certificate not trusted";
+ case CO_ER_SSL_MISMATCH: return "Server presented an SSL certificate different from the configured one";
+ case CO_ER_SSL_MISMATCH_SNI: return "Server presented an SSL certificate different from the expected one";
+ case CO_ER_SSL_HANDSHAKE: return "SSL handshake failure";
+ case CO_ER_SSL_HANDSHAKE_HB: return "SSL handshake failure after heartbeat";
+ case CO_ER_SSL_KILLED_HB: return "Stopped a TLSv1 heartbeat attack (CVE-2014-0160)";
+ case CO_ER_SSL_NO_TARGET: return "Attempt to use SSL on an unknown target (internal error)";
+ case CO_ER_SSL_EARLY_FAILED: return "Server refused early data";
+
+ case CO_ER_SOCKS4_SEND: return "SOCKS4 Proxy write error during handshake";
+ case CO_ER_SOCKS4_RECV: return "SOCKS4 Proxy read error during handshake";
+ case CO_ER_SOCKS4_DENY: return "SOCKS4 Proxy deny the request";
+ case CO_ER_SOCKS4_ABORT: return "SOCKS4 Proxy handshake aborted by server";
+
+ case CO_ERR_SSL_FATAL: return "SSL fatal error";
+
+ case CO_ER_REVERSE: return "Reverse connect failure";
+ }
+ return NULL;
+}
+
+/* Send a message over an established connection. It makes use of send() and
+ * returns the same return code and errno. If the socket layer is not ready yet
+ * then -1 is returned and ENOTSOCK is set into errno. If the fd is not marked
+ * as ready, or if EAGAIN or ENOTCONN is returned, then we return 0. It returns
+ * EMSGSIZE if called with a zero length message. The purpose is to simplify
+ * some rare attempts to directly write on the socket from above the connection
+ * (typically send_proxy). In case of EAGAIN, the fd is marked as "cant_send".
+ * It automatically retries on EINTR. Other errors cause the connection to be
+ * marked as in error state. It takes similar arguments as send() except the
+ * first one which is the connection instead of the file descriptor. <flags>
+ * only support CO_SFL_MSG_MORE.
+ */
+int conn_ctrl_send(struct connection *conn, const void *buf, int len, int flags)
+{
+ const struct buffer buffer = b_make((char*)buf, len, 0, len);
+ const struct xprt_ops *xprt = xprt_get(XPRT_RAW);
+ int ret;
+
+ ret = -1;
+ errno = ENOTSOCK;
+
+ if (conn->flags & CO_FL_SOCK_WR_SH)
+ goto fail;
+
+ if (!conn_ctrl_ready(conn))
+ goto fail;
+
+ errno = EMSGSIZE;
+ if (!len)
+ goto fail;
+
+ /* snd_buf() already takes care of updating conn->flags and handling
+ * the FD polling status.
+ */
+ ret = xprt->snd_buf(conn, NULL, &buffer, buffer.data, flags);
+ if (conn->flags & CO_FL_ERROR)
+ ret = -1;
+ return ret;
+ fail:
+ conn->flags |= CO_FL_SOCK_RD_SH | CO_FL_SOCK_WR_SH | CO_FL_ERROR;
+ return ret;
+}
+
+/* Called from the upper layer, to unsubscribe <es> from events <event_type>.
+ * The event subscriber <es> is not allowed to change from a previous call as
+ * long as at least one event is still subscribed. The <event_type> must only
+ * be a combination of SUB_RETRY_RECV and SUB_RETRY_SEND. It always returns 0.
+ */
+int conn_unsubscribe(struct connection *conn, void *xprt_ctx, int event_type, struct wait_event *es)
+{
+ BUG_ON(event_type & ~(SUB_RETRY_SEND|SUB_RETRY_RECV));
+ BUG_ON(conn->subs && conn->subs != es);
+
+ es->events &= ~event_type;
+ if (!es->events)
+ conn->subs = NULL;
+
+ if (conn_ctrl_ready(conn) && conn->ctrl->ignore_events)
+ conn->ctrl->ignore_events(conn, event_type);
+
+ return 0;
+}
+
+/* Called from the upper layer, to subscribe <es> to events <event_type>.
+ * The <es> struct is not allowed to differ from the one passed during a
+ * previous call to subscribe(). If the connection's ctrl layer is ready,
+ * the wait_event is immediately woken up and the subscription is cancelled.
+ * It always returns zero.
+ */
+int conn_subscribe(struct connection *conn, void *xprt_ctx, int event_type, struct wait_event *es)
+{
+ int ret = 0;
+
+ BUG_ON(event_type & ~(SUB_RETRY_SEND|SUB_RETRY_RECV));
+ BUG_ON(conn->subs && conn->subs != es);
+
+ if (conn->subs && (conn->subs->events & event_type) == event_type)
+ return 0;
+
+ if (conn_ctrl_ready(conn) && conn->ctrl->check_events) {
+ ret = conn->ctrl->check_events(conn, event_type);
+ if (ret)
+ tasklet_wakeup(es->tasklet);
+ }
+
+ es->events = (es->events | event_type) & ~ret;
+ conn->subs = es->events ? es : NULL;
+ return 0;
+}
+
+/* Drains possibly pending incoming data on the connection and update the flags
+ * accordingly. This is used to know whether we need to disable lingering on
+ * close. Returns non-zero if it is safe to close without disabling lingering,
+ * otherwise zero. The CO_FL_SOCK_RD_SH flag may also be updated if the incoming
+ * shutdown was reported by the ->drain() function.
+ */
+int conn_ctrl_drain(struct connection *conn)
+{
+ int ret = 0;
+
+ if (!conn_ctrl_ready(conn) || conn->flags & (CO_FL_ERROR | CO_FL_SOCK_RD_SH))
+ ret = 1;
+ else if (conn->ctrl->drain) {
+ ret = conn->ctrl->drain(conn);
+ if (ret)
+ conn->flags |= CO_FL_SOCK_RD_SH;
+ }
+ return ret;
+}
+
+/*
+ * Get data length from tlv
+ */
+static inline size_t get_tlv_length(const struct tlv *src)
+{
+ return (src->length_hi << 8) | src->length_lo;
+}
+
+/* This handshake handler waits a PROXY protocol header at the beginning of the
+ * raw data stream. The header looks like this :
+ *
+ * "PROXY" <SP> PROTO <SP> SRC3 <SP> DST3 <SP> SRC4 <SP> <DST4> "\r\n"
+ *
+ * There must be exactly one space between each field. Fields are :
+ * - PROTO : layer 4 protocol, which must be "TCP4" or "TCP6".
+ * - SRC3 : layer 3 (eg: IP) source address in standard text form
+ * - DST3 : layer 3 (eg: IP) destination address in standard text form
+ * - SRC4 : layer 4 (eg: TCP port) source address in standard text form
+ * - DST4 : layer 4 (eg: TCP port) destination address in standard text form
+ *
+ * This line MUST be at the beginning of the buffer and MUST NOT wrap.
+ *
+ * The header line is small and in all cases smaller than the smallest normal
+ * TCP MSS. So it MUST always be delivered as one segment, which ensures we
+ * can safely use MSG_PEEK and avoid buffering.
+ *
+ * Once the data is fetched, the values are set in the connection's address
+ * fields, and data are removed from the socket's buffer. The function returns
+ * zero if it needs to wait for more data or if it fails, or 1 if it completed
+ * and removed itself.
+ */
+int conn_recv_proxy(struct connection *conn, int flag)
+{
+ struct session *sess = conn->owner;
+ char *line, *end;
+ struct proxy_hdr_v2 *hdr_v2;
+ const char v2sig[] = PP2_SIGNATURE;
+ size_t total_v2_len;
+ size_t tlv_offset = 0;
+ int ret;
+
+ if (!conn_ctrl_ready(conn))
+ goto fail;
+
+ BUG_ON(conn->flags & CO_FL_FDLESS);
+
+ if (!fd_recv_ready(conn->handle.fd))
+ goto not_ready;
+
+ while (1) {
+ ret = recv(conn->handle.fd, trash.area, trash.size, MSG_PEEK);
+ if (ret < 0) {
+ if (errno == EINTR)
+ continue;
+ if (errno == EAGAIN || errno == EWOULDBLOCK) {
+ fd_cant_recv(conn->handle.fd);
+ goto not_ready;
+ }
+ goto recv_abort;
+ }
+ trash.data = ret;
+ break;
+ }
+
+ if (!trash.data) {
+ /* client shutdown */
+ conn->err_code = CO_ER_PRX_EMPTY;
+ goto fail;
+ }
+
+ conn->flags &= ~CO_FL_WAIT_L4_CONN;
+
+ if (trash.data < 6)
+ goto missing;
+
+ line = trash.area;
+ end = trash.area + trash.data;
+
+ /* Decode a possible proxy request, fail early if it does not match */
+ if (strncmp(line, "PROXY ", 6) != 0)
+ goto not_v1;
+
+ line += 6;
+ if (trash.data < 9) /* shortest possible line */
+ goto missing;
+
+ if (memcmp(line, "TCP4 ", 5) == 0) {
+ u32 src3, dst3, sport, dport;
+
+ line += 5;
+
+ src3 = inetaddr_host_lim_ret(line, end, &line);
+ if (line == end)
+ goto missing;
+ if (*line++ != ' ')
+ goto bad_header;
+
+ dst3 = inetaddr_host_lim_ret(line, end, &line);
+ if (line == end)
+ goto missing;
+ if (*line++ != ' ')
+ goto bad_header;
+
+ sport = read_uint((const char **)&line, end);
+ if (line == end)
+ goto missing;
+ if (*line++ != ' ')
+ goto bad_header;
+
+ dport = read_uint((const char **)&line, end);
+ if (line > end - 2)
+ goto missing;
+ if (*line++ != '\r')
+ goto bad_header;
+ if (*line++ != '\n')
+ goto bad_header;
+
+ if (!sess || !sockaddr_alloc(&sess->src, NULL, 0) || !sockaddr_alloc(&sess->dst, NULL, 0))
+ goto fail;
+
+ /* update the session's addresses and mark them set */
+ ((struct sockaddr_in *)sess->src)->sin_family = AF_INET;
+ ((struct sockaddr_in *)sess->src)->sin_addr.s_addr = htonl(src3);
+ ((struct sockaddr_in *)sess->src)->sin_port = htons(sport);
+
+ ((struct sockaddr_in *)sess->dst)->sin_family = AF_INET;
+ ((struct sockaddr_in *)sess->dst)->sin_addr.s_addr = htonl(dst3);
+ ((struct sockaddr_in *)sess->dst)->sin_port = htons(dport);
+ }
+ else if (memcmp(line, "TCP6 ", 5) == 0) {
+ u32 sport, dport;
+ char *src_s;
+ char *dst_s, *sport_s, *dport_s;
+ struct in6_addr src3, dst3;
+
+ line += 5;
+
+ src_s = line;
+ dst_s = sport_s = dport_s = NULL;
+ while (1) {
+ if (line > end - 2) {
+ goto missing;
+ }
+ else if (*line == '\r') {
+ *line = 0;
+ line++;
+ if (*line++ != '\n')
+ goto bad_header;
+ break;
+ }
+
+ if (*line == ' ') {
+ *line = 0;
+ if (!dst_s)
+ dst_s = line + 1;
+ else if (!sport_s)
+ sport_s = line + 1;
+ else if (!dport_s)
+ dport_s = line + 1;
+ }
+ line++;
+ }
+
+ if (!dst_s || !sport_s || !dport_s)
+ goto bad_header;
+
+ sport = read_uint((const char **)&sport_s,dport_s - 1);
+ if (*sport_s != 0)
+ goto bad_header;
+
+ dport = read_uint((const char **)&dport_s,line - 2);
+ if (*dport_s != 0)
+ goto bad_header;
+
+ if (inet_pton(AF_INET6, src_s, (void *)&src3) != 1)
+ goto bad_header;
+
+ if (inet_pton(AF_INET6, dst_s, (void *)&dst3) != 1)
+ goto bad_header;
+
+ if (!sess || !sockaddr_alloc(&sess->src, NULL, 0) || !sockaddr_alloc(&sess->dst, NULL, 0))
+ goto fail;
+
+ /* update the session's addresses and mark them set */
+ ((struct sockaddr_in6 *)sess->src)->sin6_family = AF_INET6;
+ memcpy(&((struct sockaddr_in6 *)sess->src)->sin6_addr, &src3, sizeof(struct in6_addr));
+ ((struct sockaddr_in6 *)sess->src)->sin6_port = htons(sport);
+
+ ((struct sockaddr_in6 *)sess->dst)->sin6_family = AF_INET6;
+ memcpy(&((struct sockaddr_in6 *)sess->dst)->sin6_addr, &dst3, sizeof(struct in6_addr));
+ ((struct sockaddr_in6 *)sess->dst)->sin6_port = htons(dport);
+ }
+ else if (memcmp(line, "UNKNOWN\r\n", 9) == 0) {
+ /* This can be a UNIX socket forwarded by an haproxy upstream */
+ line += 9;
+ }
+ else {
+ /* The protocol does not match something known (TCP4/TCP6/UNKNOWN) */
+ conn->err_code = CO_ER_PRX_BAD_PROTO;
+ goto fail;
+ }
+
+ trash.data = line - trash.area;
+ goto eat_header;
+
+ not_v1:
+ /* try PPv2 */
+ if (trash.data < PP2_HEADER_LEN)
+ goto missing;
+
+ hdr_v2 = (struct proxy_hdr_v2 *) trash.area;
+
+ if (memcmp(hdr_v2->sig, v2sig, PP2_SIGNATURE_LEN) != 0 ||
+ (hdr_v2->ver_cmd & PP2_VERSION_MASK) != PP2_VERSION) {
+ conn->err_code = CO_ER_PRX_NOT_HDR;
+ goto fail;
+ }
+
+ total_v2_len = PP2_HEADER_LEN + ntohs(hdr_v2->len);
+ if (trash.data < total_v2_len)
+ goto missing;
+
+ switch (hdr_v2->ver_cmd & PP2_CMD_MASK) {
+ case 0x01: /* PROXY command */
+ switch (hdr_v2->fam) {
+ case 0x11: /* TCPv4 */
+ if (ntohs(hdr_v2->len) < PP2_ADDR_LEN_INET)
+ goto bad_header;
+
+ if (!sess || !sockaddr_alloc(&sess->src, NULL, 0) || !sockaddr_alloc(&sess->dst, NULL, 0))
+ goto fail;
+
+ ((struct sockaddr_in *)sess->src)->sin_family = AF_INET;
+ ((struct sockaddr_in *)sess->src)->sin_addr.s_addr = hdr_v2->addr.ip4.src_addr;
+ ((struct sockaddr_in *)sess->src)->sin_port = hdr_v2->addr.ip4.src_port;
+ ((struct sockaddr_in *)sess->dst)->sin_family = AF_INET;
+ ((struct sockaddr_in *)sess->dst)->sin_addr.s_addr = hdr_v2->addr.ip4.dst_addr;
+ ((struct sockaddr_in *)sess->dst)->sin_port = hdr_v2->addr.ip4.dst_port;
+ tlv_offset = PP2_HEADER_LEN + PP2_ADDR_LEN_INET;
+ break;
+ case 0x21: /* TCPv6 */
+ if (ntohs(hdr_v2->len) < PP2_ADDR_LEN_INET6)
+ goto bad_header;
+
+ if (!sess || !sockaddr_alloc(&sess->src, NULL, 0) || !sockaddr_alloc(&sess->dst, NULL, 0))
+ goto fail;
+
+ ((struct sockaddr_in6 *)sess->src)->sin6_family = AF_INET6;
+ memcpy(&((struct sockaddr_in6 *)sess->src)->sin6_addr, hdr_v2->addr.ip6.src_addr, 16);
+ ((struct sockaddr_in6 *)sess->src)->sin6_port = hdr_v2->addr.ip6.src_port;
+ ((struct sockaddr_in6 *)sess->dst)->sin6_family = AF_INET6;
+ memcpy(&((struct sockaddr_in6 *)sess->dst)->sin6_addr, hdr_v2->addr.ip6.dst_addr, 16);
+ ((struct sockaddr_in6 *)sess->dst)->sin6_port = hdr_v2->addr.ip6.dst_port;
+ tlv_offset = PP2_HEADER_LEN + PP2_ADDR_LEN_INET6;
+ break;
+ }
+
+ /* TLV parsing */
+ while (tlv_offset < total_v2_len) {
+ struct ist tlv;
+ struct tlv *tlv_packet = NULL;
+ struct conn_tlv_list *new_tlv = NULL;
+ size_t data_len = 0;
+
+ /* Verify that we have at least TLV_HEADER_SIZE bytes left */
+ if (tlv_offset + TLV_HEADER_SIZE > total_v2_len)
+ goto bad_header;
+
+ tlv_packet = (struct tlv *) &trash.area[tlv_offset];
+ tlv = ist2((const char *)tlv_packet->value, get_tlv_length(tlv_packet));
+ tlv_offset += istlen(tlv) + TLV_HEADER_SIZE;
+
+ /* Verify that the TLV length does not exceed the total PROXYv2 length */
+ if (tlv_offset > total_v2_len)
+ goto bad_header;
+
+ /* Prepare known TLV types */
+ switch (tlv_packet->type) {
+ case PP2_TYPE_CRC32C: {
+ uint32_t n_crc32c;
+
+ /* Verify that this TLV is exactly 4 bytes long */
+ if (istlen(tlv) != PP2_CRC32C_LEN)
+ goto bad_header;
+
+ n_crc32c = read_n32(istptr(tlv));
+ write_n32(istptr(tlv), 0); // compute with CRC==0
+
+ if (hash_crc32c(trash.area, total_v2_len) != n_crc32c)
+ goto bad_header;
+ break;
+ }
+#ifdef USE_NS
+ case PP2_TYPE_NETNS: {
+ const struct netns_entry *ns;
+
+ ns = netns_store_lookup(istptr(tlv), istlen(tlv));
+ if (ns)
+ conn->proxy_netns = ns;
+ break;
+ }
+#endif
+ case PP2_TYPE_AUTHORITY: {
+ /* For now, keep the length restriction by HAProxy */
+ if (istlen(tlv) > HA_PP2_AUTHORITY_MAX)
+ goto bad_header;
+
+ break;
+ }
+ case PP2_TYPE_UNIQUE_ID: {
+ if (istlen(tlv) > UNIQUEID_LEN)
+ goto bad_header;
+ break;
+ }
+ default:
+ break;
+ }
+
+ /* If we did not find a known TLV type that we can optimize for, we generically allocate it */
+ data_len = get_tlv_length(tlv_packet);
+
+ /* Prevent attackers from allocating too much memory */
+ if (unlikely(data_len > HA_PP2_MAX_ALLOC))
+ goto fail;
+
+ /* Alloc memory based on data_len */
+ if (data_len > HA_PP2_TLV_VALUE_256)
+ new_tlv = malloc(get_tlv_length(tlv_packet) + sizeof(struct conn_tlv_list));
+ else if (data_len <= HA_PP2_TLV_VALUE_128)
+ new_tlv = pool_alloc(pool_head_pp_tlv_128);
+ else
+ new_tlv = pool_alloc(pool_head_pp_tlv_256);
+
+ if (unlikely(!new_tlv))
+ goto fail;
+
+ new_tlv->type = tlv_packet->type;
+
+ /* Save TLV to make it accessible via sample fetch */
+ memcpy(new_tlv->value, tlv.ptr, data_len);
+ new_tlv->len = data_len;
+
+ LIST_APPEND(&conn->tlv_list, &new_tlv->list);
+ }
+
+
+ /* Verify that the PROXYv2 header ends at a TLV boundary.
+ * This is can not be true, because the TLV parsing already
+ * verifies that a TLV does not exceed the total length and
+ * also that there is space for a TLV header.
+ */
+ BUG_ON(tlv_offset != total_v2_len);
+
+ /* unsupported protocol, keep local connection address */
+ break;
+ case 0x00: /* LOCAL command */
+ /* keep local connection address for LOCAL */
+ break;
+ default:
+ goto bad_header; /* not a supported command */
+ }
+
+ trash.data = total_v2_len;
+ goto eat_header;
+
+ eat_header:
+ /* remove the PROXY line from the request. For this we re-read the
+ * exact line at once. If we don't get the exact same result, we
+ * fail.
+ */
+ while (1) {
+ ssize_t len2 = recv(conn->handle.fd, trash.area, trash.data, 0);
+
+ if (len2 < 0 && errno == EINTR)
+ continue;
+ if (len2 != trash.data)
+ goto recv_abort;
+ break;
+ }
+
+ conn->flags &= ~flag;
+ conn->flags |= CO_FL_RCVD_PROXY;
+ return 1;
+
+ not_ready:
+ return 0;
+
+ missing:
+ /* Missing data. Since we're using MSG_PEEK, we can only poll again if
+ * we have not read anything. Otherwise we need to fail because we won't
+ * be able to poll anymore.
+ */
+ conn->err_code = CO_ER_PRX_TRUNCATED;
+ goto fail;
+
+ bad_header:
+ /* This is not a valid proxy protocol header */
+ conn->err_code = CO_ER_PRX_BAD_HDR;
+ goto fail;
+
+ recv_abort:
+ conn->err_code = CO_ER_PRX_ABORT;
+ conn->flags |= CO_FL_SOCK_RD_SH | CO_FL_SOCK_WR_SH;
+ goto fail;
+
+ fail:
+ conn->flags |= CO_FL_ERROR;
+ return 0;
+}
+
+/* This callback is used to send a valid PROXY protocol line to a socket being
+ * established. It returns 0 if it fails in a fatal way or needs to poll to go
+ * further, otherwise it returns non-zero and removes itself from the connection's
+ * flags (the bit is provided in <flag> by the caller). It is designed to be
+ * called by the connection handler and relies on it to commit polling changes.
+ * Note that it can emit a PROXY line by relying on the other end's address
+ * when the connection is attached to a stream connector, or by resolving the
+ * local address otherwise (also called a LOCAL line).
+ */
+int conn_send_proxy(struct connection *conn, unsigned int flag)
+{
+ if (!conn_ctrl_ready(conn))
+ goto out_error;
+
+ /* If we have a PROXY line to send, we'll use this to validate the
+ * connection, in which case the connection is validated only once
+ * we've sent the whole proxy line. Otherwise we use connect().
+ */
+ if (conn->send_proxy_ofs) {
+ struct stconn *sc;
+ int ret;
+
+ /* If there is no mux attached to the connection, it means the
+ * connection context is a stream connector.
+ */
+ sc = conn->mux ? conn_get_first_sc(conn) : conn->ctx;
+
+ /* The target server expects a PROXY line to be sent first.
+ * If the send_proxy_ofs is negative, it corresponds to the
+ * offset to start sending from then end of the proxy string
+ * (which is recomputed every time since it's constant). If
+ * it is positive, it means we have to send from the start.
+ * We can only send a "normal" PROXY line when the connection
+ * is attached to a stream connector. Otherwise we can only
+ * send a LOCAL line (eg: for use with health checks).
+ */
+
+ if (sc && sc_strm(sc)) {
+ ret = make_proxy_line(trash.area, trash.size,
+ objt_server(conn->target),
+ sc_conn(sc_opposite(sc)),
+ __sc_strm(sc));
+ }
+ else {
+ /* The target server expects a LOCAL line to be sent first. Retrieving
+ * local or remote addresses may fail until the connection is established.
+ */
+ if (!conn_get_src(conn) || !conn_get_dst(conn))
+ goto out_wait;
+
+ ret = make_proxy_line(trash.area, trash.size,
+ objt_server(conn->target), conn,
+ NULL);
+ }
+
+ if (!ret)
+ goto out_error;
+
+ if (conn->send_proxy_ofs > 0)
+ conn->send_proxy_ofs = -ret; /* first call */
+
+ /* we have to send trash from (ret+sp for -sp bytes). If the
+ * data layer has a pending write, we'll also set MSG_MORE.
+ */
+ ret = conn_ctrl_send(conn,
+ trash.area + ret + conn->send_proxy_ofs,
+ -conn->send_proxy_ofs,
+ (conn->subs && conn->subs->events & SUB_RETRY_SEND) ? CO_SFL_MSG_MORE : 0);
+
+ if (ret < 0)
+ goto out_error;
+
+ conn->send_proxy_ofs += ret; /* becomes zero once complete */
+ if (conn->send_proxy_ofs != 0)
+ goto out_wait;
+
+ /* OK we've sent the whole line, we're connected */
+ }
+
+ /* The connection is ready now, simply return and let the connection
+ * handler notify upper layers if needed.
+ */
+ conn->flags &= ~CO_FL_WAIT_L4_CONN;
+ conn->flags &= ~flag;
+ return 1;
+
+ out_error:
+ /* Write error on the file descriptor */
+ conn->flags |= CO_FL_ERROR;
+ return 0;
+
+ out_wait:
+ return 0;
+}
+
+/* This handshake handler waits a NetScaler Client IP insertion header
+ * at the beginning of the raw data stream. The header format is
+ * described in doc/netscaler-client-ip-insertion-protocol.txt
+ *
+ * This line MUST be at the beginning of the buffer and MUST NOT be
+ * fragmented.
+ *
+ * The header line is small and in all cases smaller than the smallest normal
+ * TCP MSS. So it MUST always be delivered as one segment, which ensures we
+ * can safely use MSG_PEEK and avoid buffering.
+ *
+ * Once the data is fetched, the values are set in the connection's address
+ * fields, and data are removed from the socket's buffer. The function returns
+ * zero if it needs to wait for more data or if it fails, or 1 if it completed
+ * and removed itself.
+ */
+int conn_recv_netscaler_cip(struct connection *conn, int flag)
+{
+ struct session *sess = conn->owner;
+ char *line;
+ uint32_t hdr_len;
+ uint8_t ip_ver;
+ int ret;
+
+ if (!conn_ctrl_ready(conn))
+ goto fail;
+
+ BUG_ON(conn->flags & CO_FL_FDLESS);
+
+ if (!fd_recv_ready(conn->handle.fd))
+ goto not_ready;
+
+ while (1) {
+ ret = recv(conn->handle.fd, trash.area, trash.size, MSG_PEEK);
+ if (ret < 0) {
+ if (errno == EINTR)
+ continue;
+ if (errno == EAGAIN || errno == EWOULDBLOCK) {
+ fd_cant_recv(conn->handle.fd);
+ goto not_ready;
+ }
+ goto recv_abort;
+ }
+ trash.data = ret;
+ break;
+ }
+
+ conn->flags &= ~CO_FL_WAIT_L4_CONN;
+
+ if (!trash.data) {
+ /* client shutdown */
+ conn->err_code = CO_ER_CIP_EMPTY;
+ goto fail;
+ }
+
+ /* Fail if buffer length is not large enough to contain
+ * CIP magic, header length or
+ * CIP magic, CIP length, CIP type, header length */
+ if (trash.data < 12)
+ goto missing;
+
+ line = trash.area;
+
+ /* Decode a possible NetScaler Client IP request, fail early if
+ * it does not match */
+ if (ntohl(read_u32(line)) != __objt_listener(conn->target)->bind_conf->ns_cip_magic)
+ goto bad_magic;
+
+ /* Legacy CIP protocol */
+ if ((trash.area[8] & 0xD0) == 0x40) {
+ hdr_len = ntohl(read_u32((line+4)));
+ line += 8;
+ }
+ /* Standard CIP protocol */
+ else if (trash.area[8] == 0x00) {
+ hdr_len = ntohs(read_u32((line+10)));
+ line += 12;
+ }
+ /* Unknown CIP protocol */
+ else {
+ conn->err_code = CO_ER_CIP_BAD_PROTO;
+ goto fail;
+ }
+
+ /* Fail if buffer length is not large enough to contain
+ * a minimal IP header */
+ if (trash.data < 20)
+ goto missing;
+
+ /* Get IP version from the first four bits */
+ ip_ver = (*line & 0xf0) >> 4;
+
+ if (ip_ver == 4) {
+ struct ip *hdr_ip4;
+ struct my_tcphdr *hdr_tcp;
+
+ hdr_ip4 = (struct ip *)line;
+
+ if (trash.data < 40 || trash.data < hdr_len) {
+ /* Fail if buffer length is not large enough to contain
+ * IPv4 header, TCP header */
+ goto missing;
+ }
+ else if (hdr_ip4->ip_p != IPPROTO_TCP) {
+ /* The protocol does not include a TCP header */
+ conn->err_code = CO_ER_CIP_BAD_PROTO;
+ goto fail;
+ }
+
+ hdr_tcp = (struct my_tcphdr *)(line + (hdr_ip4->ip_hl * 4));
+
+ if (!sess || !sockaddr_alloc(&sess->src, NULL, 0) || !sockaddr_alloc(&sess->dst, NULL, 0))
+ goto fail;
+
+ /* update the session's addresses and mark them set */
+ ((struct sockaddr_in *)sess->src)->sin_family = AF_INET;
+ ((struct sockaddr_in *)sess->src)->sin_addr.s_addr = hdr_ip4->ip_src.s_addr;
+ ((struct sockaddr_in *)sess->src)->sin_port = hdr_tcp->source;
+
+ ((struct sockaddr_in *)sess->dst)->sin_family = AF_INET;
+ ((struct sockaddr_in *)sess->dst)->sin_addr.s_addr = hdr_ip4->ip_dst.s_addr;
+ ((struct sockaddr_in *)sess->dst)->sin_port = hdr_tcp->dest;
+ }
+ else if (ip_ver == 6) {
+ struct ip6_hdr *hdr_ip6;
+ struct my_tcphdr *hdr_tcp;
+
+ hdr_ip6 = (struct ip6_hdr *)line;
+
+ if (trash.data < 60 || trash.data < hdr_len) {
+ /* Fail if buffer length is not large enough to contain
+ * IPv6 header, TCP header */
+ goto missing;
+ }
+ else if (hdr_ip6->ip6_nxt != IPPROTO_TCP) {
+ /* The protocol does not include a TCP header */
+ conn->err_code = CO_ER_CIP_BAD_PROTO;
+ goto fail;
+ }
+
+ hdr_tcp = (struct my_tcphdr *)(line + sizeof(struct ip6_hdr));
+
+ if (!sess || !sockaddr_alloc(&sess->src, NULL, 0) || !sockaddr_alloc(&sess->dst, NULL, 0))
+ goto fail;
+
+ /* update the session's addresses and mark them set */
+ ((struct sockaddr_in6 *)sess->src)->sin6_family = AF_INET6;
+ ((struct sockaddr_in6 *)sess->src)->sin6_addr = hdr_ip6->ip6_src;
+ ((struct sockaddr_in6 *)sess->src)->sin6_port = hdr_tcp->source;
+
+ ((struct sockaddr_in6 *)sess->dst)->sin6_family = AF_INET6;
+ ((struct sockaddr_in6 *)sess->dst)->sin6_addr = hdr_ip6->ip6_dst;
+ ((struct sockaddr_in6 *)sess->dst)->sin6_port = hdr_tcp->dest;
+ }
+ else {
+ /* The protocol does not match something known (IPv4/IPv6) */
+ conn->err_code = CO_ER_CIP_BAD_PROTO;
+ goto fail;
+ }
+
+ line += hdr_len;
+ trash.data = line - trash.area;
+
+ /* remove the NetScaler Client IP header from the request. For this
+ * we re-read the exact line at once. If we don't get the exact same
+ * result, we fail.
+ */
+ while (1) {
+ int len2 = recv(conn->handle.fd, trash.area, trash.data, 0);
+ if (len2 < 0 && errno == EINTR)
+ continue;
+ if (len2 != trash.data)
+ goto recv_abort;
+ break;
+ }
+
+ conn->flags &= ~flag;
+ return 1;
+
+ not_ready:
+ return 0;
+
+ missing:
+ /* Missing data. Since we're using MSG_PEEK, we can only poll again if
+ * we have not read anything. Otherwise we need to fail because we won't
+ * be able to poll anymore.
+ */
+ conn->err_code = CO_ER_CIP_TRUNCATED;
+ goto fail;
+
+ bad_magic:
+ conn->err_code = CO_ER_CIP_BAD_MAGIC;
+ goto fail;
+
+ recv_abort:
+ conn->err_code = CO_ER_CIP_ABORT;
+ conn->flags |= CO_FL_SOCK_RD_SH | CO_FL_SOCK_WR_SH;
+ goto fail;
+
+ fail:
+ conn->flags |= CO_FL_ERROR;
+ return 0;
+}
+
+
+int conn_send_socks4_proxy_request(struct connection *conn)
+{
+ struct socks4_request req_line;
+
+ if (!conn_ctrl_ready(conn))
+ goto out_error;
+
+ if (!conn_get_dst(conn))
+ goto out_error;
+
+ req_line.version = 0x04;
+ req_line.command = 0x01;
+ req_line.port = get_net_port(conn->dst);
+ req_line.ip = is_inet_addr(conn->dst);
+ memcpy(req_line.user_id, "HAProxy\0", 8);
+
+ if (conn->send_proxy_ofs > 0) {
+ /*
+ * This is the first call to send the request
+ */
+ conn->send_proxy_ofs = -(int)sizeof(req_line);
+ }
+
+ if (conn->send_proxy_ofs < 0) {
+ int ret = 0;
+
+ /* we are sending the socks4_req_line here. If the data layer
+ * has a pending write, we'll also set MSG_MORE.
+ */
+ ret = conn_ctrl_send(
+ conn,
+ ((char *)(&req_line)) + (sizeof(req_line)+conn->send_proxy_ofs),
+ -conn->send_proxy_ofs,
+ (conn->subs && conn->subs->events & SUB_RETRY_SEND) ? CO_SFL_MSG_MORE : 0);
+
+ DPRINTF(stderr, "SOCKS PROXY HS FD[%04X]: Before send remain is [%d], sent [%d]\n",
+ conn_fd(conn), -conn->send_proxy_ofs, ret);
+
+ if (ret < 0) {
+ goto out_error;
+ }
+
+ conn->send_proxy_ofs += ret; /* becomes zero once complete */
+ if (conn->send_proxy_ofs != 0) {
+ goto out_wait;
+ }
+ }
+
+ /* OK we've the whole request sent */
+ conn->flags &= ~CO_FL_SOCKS4_SEND;
+
+ /* The connection is ready now, simply return and let the connection
+ * handler notify upper layers if needed.
+ */
+ conn->flags &= ~CO_FL_WAIT_L4_CONN;
+
+ if (conn->flags & CO_FL_SEND_PROXY) {
+ /*
+ * Get the send_proxy_ofs ready for the send_proxy due to we are
+ * reusing the "send_proxy_ofs", and SOCKS4 handshake should be done
+ * before sending PROXY Protocol.
+ */
+ conn->send_proxy_ofs = 1;
+ }
+ return 1;
+
+ out_error:
+ /* Write error on the file descriptor */
+ conn->flags |= CO_FL_ERROR;
+ if (conn->err_code == CO_ER_NONE) {
+ conn->err_code = CO_ER_SOCKS4_SEND;
+ }
+ return 0;
+
+ out_wait:
+ return 0;
+}
+
+int conn_recv_socks4_proxy_response(struct connection *conn)
+{
+ char line[SOCKS4_HS_RSP_LEN];
+ int ret;
+
+ if (!conn_ctrl_ready(conn))
+ goto fail;
+
+ BUG_ON(conn->flags & CO_FL_FDLESS);
+
+ if (!fd_recv_ready(conn->handle.fd))
+ goto not_ready;
+
+ while (1) {
+ /* SOCKS4 Proxy will response with 8 bytes, 0x00 | 0x5A | 0x00 0x00 | 0x00 0x00 0x00 0x00
+ * Try to peek into it, before all 8 bytes ready.
+ */
+ ret = recv(conn->handle.fd, line, SOCKS4_HS_RSP_LEN, MSG_PEEK);
+
+ if (ret == 0) {
+ /* the socket has been closed or shutdown for send */
+ DPRINTF(stderr, "SOCKS PROXY HS FD[%04X]: Received ret[%d], errno[%d], looks like the socket has been closed or shutdown for send\n",
+ conn->handle.fd, ret, errno);
+ if (conn->err_code == CO_ER_NONE) {
+ conn->err_code = CO_ER_SOCKS4_RECV;
+ }
+ goto fail;
+ }
+
+ if (ret > 0) {
+ if (ret == SOCKS4_HS_RSP_LEN) {
+ DPRINTF(stderr, "SOCKS PROXY HS FD[%04X]: Received 8 bytes, the response is [%02X|%02X|%02X %02X|%02X %02X %02X %02X]\n",
+ conn->handle.fd, line[0], line[1], line[2], line[3], line[4], line[5], line[6], line[7]);
+ }else{
+ DPRINTF(stderr, "SOCKS PROXY HS FD[%04X]: Received ret[%d], first byte is [%02X], last bye is [%02X]\n", conn->handle.fd, ret, line[0], line[ret-1]);
+ }
+ } else {
+ DPRINTF(stderr, "SOCKS PROXY HS FD[%04X]: Received ret[%d], errno[%d]\n", conn->handle.fd, ret, errno);
+ }
+
+ if (ret < 0) {
+ if (errno == EINTR) {
+ continue;
+ }
+ if (errno == EAGAIN || errno == EWOULDBLOCK) {
+ fd_cant_recv(conn->handle.fd);
+ goto not_ready;
+ }
+ goto recv_abort;
+ }
+ break;
+ }
+
+ conn->flags &= ~CO_FL_WAIT_L4_CONN;
+
+ if (ret < SOCKS4_HS_RSP_LEN) {
+ /* Missing data. Since we're using MSG_PEEK, we can only poll again if
+ * we are not able to read enough data.
+ */
+ goto not_ready;
+ }
+
+ /*
+ * Base on the SOCSK4 protocol:
+ *
+ * +----+----+----+----+----+----+----+----+
+ * | VN | CD | DSTPORT | DSTIP |
+ * +----+----+----+----+----+----+----+----+
+ * # of bytes: 1 1 2 4
+ * VN is the version of the reply code and should be 0. CD is the result
+ * code with one of the following values:
+ * 90: request granted
+ * 91: request rejected or failed
+ * 92: request rejected because SOCKS server cannot connect to identd on the client
+ * 93: request rejected because the client program and identd report different user-ids
+ * The remaining fields are ignored.
+ */
+ if (line[1] != 90) {
+ conn->flags &= ~CO_FL_SOCKS4_RECV;
+
+ DPRINTF(stderr, "SOCKS PROXY HS FD[%04X]: FAIL, the response is [%02X|%02X|%02X %02X|%02X %02X %02X %02X]\n",
+ conn->handle.fd, line[0], line[1], line[2], line[3], line[4], line[5], line[6], line[7]);
+ if (conn->err_code == CO_ER_NONE) {
+ conn->err_code = CO_ER_SOCKS4_DENY;
+ }
+ goto fail;
+ }
+
+ /* remove the 8 bytes response from the stream */
+ while (1) {
+ ret = recv(conn->handle.fd, line, SOCKS4_HS_RSP_LEN, 0);
+ if (ret < 0 && errno == EINTR) {
+ continue;
+ }
+ if (ret != SOCKS4_HS_RSP_LEN) {
+ if (conn->err_code == CO_ER_NONE) {
+ conn->err_code = CO_ER_SOCKS4_RECV;
+ }
+ goto fail;
+ }
+ break;
+ }
+
+ conn->flags &= ~CO_FL_SOCKS4_RECV;
+ return 1;
+
+ not_ready:
+ return 0;
+
+ recv_abort:
+ if (conn->err_code == CO_ER_NONE) {
+ conn->err_code = CO_ER_SOCKS4_ABORT;
+ }
+ conn->flags |= (CO_FL_SOCK_RD_SH | CO_FL_SOCK_WR_SH);
+ goto fail;
+
+ fail:
+ conn->flags |= CO_FL_ERROR;
+ return 0;
+}
+
+/* registers proto mux list <list>. Modifies the list element! */
+void register_mux_proto(struct mux_proto_list *list)
+{
+ LIST_APPEND(&mux_proto_list.list, &list->list);
+}
+
+/* Lists the known proto mux on <out>. This function is used by "haproxy -vv"
+ * and is suitable for early boot just after the "REGISTER" stage because it
+ * doesn't depend on anything to be already allocated.
+ */
+void list_mux_proto(FILE *out)
+{
+ struct mux_proto_list *item;
+ struct ist proto;
+ char *mode, *side;
+ int done;
+
+ fprintf(out, "Available multiplexer protocols :\n"
+ "(protocols marked as <default> cannot be specified using 'proto' keyword)\n");
+ list_for_each_entry(item, &mux_proto_list.list, list) {
+ proto = item->token;
+
+ if (item->mode == PROTO_MODE_ANY)
+ mode = "TCP|HTTP";
+ else if (item->mode == PROTO_MODE_TCP)
+ mode = "TCP";
+ else if (item->mode == PROTO_MODE_HTTP)
+ mode = "HTTP";
+ else
+ mode = "NONE";
+
+ if (item->side == PROTO_SIDE_BOTH)
+ side = "FE|BE";
+ else if (item->side == PROTO_SIDE_FE)
+ side = "FE";
+ else if (item->side == PROTO_SIDE_BE)
+ side = "BE";
+ else
+ side = "NONE";
+
+ fprintf(out, " %10s : mode=%-5s side=%-6s mux=%-5s flags=",
+ (proto.len ? proto.ptr : "<default>"), mode, side, item->mux->name);
+
+ done = 0;
+
+ /* note: the block below could be simplified using macros but for only
+ * 4 flags it's not worth it.
+ */
+ if (item->mux->flags & MX_FL_HTX)
+ done |= fprintf(out, "%sHTX", done ? "|" : "");
+
+ if (item->mux->flags & MX_FL_HOL_RISK)
+ done |= fprintf(out, "%sHOL_RISK", done ? "|" : "");
+
+ if (item->mux->flags & MX_FL_NO_UPG)
+ done |= fprintf(out, "%sNO_UPG", done ? "|" : "");
+
+ if (item->mux->flags & MX_FL_FRAMED)
+ done |= fprintf(out, "%sFRAMED", done ? "|" : "");
+
+ fprintf(out, "\n");
+ }
+}
+
+/* Makes a PROXY protocol line from the two addresses. The output is sent to
+ * buffer <buf> for a maximum size of <buf_len> (including the trailing zero).
+ * It returns the number of bytes composing this line (including the trailing
+ * LF), or zero in case of failure (eg: not enough space). It supports TCP4,
+ * TCP6 and "UNKNOWN" formats. If any of <src> or <dst> is null, UNKNOWN is
+ * emitted as well.
+ */
+static int make_proxy_line_v1(char *buf, int buf_len, const struct sockaddr_storage *src, const struct sockaddr_storage *dst)
+{
+ int ret = 0;
+ char * protocol;
+ char src_str[MAX(INET_ADDRSTRLEN, INET6_ADDRSTRLEN)];
+ char dst_str[MAX(INET_ADDRSTRLEN, INET6_ADDRSTRLEN)];
+ in_port_t src_port;
+ in_port_t dst_port;
+
+ if ( !src
+ || !dst
+ || (src->ss_family != AF_INET && src->ss_family != AF_INET6)
+ || (dst->ss_family != AF_INET && dst->ss_family != AF_INET6)) {
+ /* unknown family combination */
+ ret = snprintf(buf, buf_len, "PROXY UNKNOWN\r\n");
+ if (ret >= buf_len)
+ return 0;
+
+ return ret;
+ }
+
+ /* IPv4 for both src and dst */
+ if (src->ss_family == AF_INET && dst->ss_family == AF_INET) {
+ protocol = "TCP4";
+ if (!inet_ntop(AF_INET, &((struct sockaddr_in *)src)->sin_addr, src_str, sizeof(src_str)))
+ return 0;
+ src_port = ((struct sockaddr_in *)src)->sin_port;
+ if (!inet_ntop(AF_INET, &((struct sockaddr_in *)dst)->sin_addr, dst_str, sizeof(dst_str)))
+ return 0;
+ dst_port = ((struct sockaddr_in *)dst)->sin_port;
+ }
+ /* IPv6 for at least one of src and dst */
+ else {
+ struct in6_addr tmp;
+
+ protocol = "TCP6";
+
+ if (src->ss_family == AF_INET) {
+ /* Convert src to IPv6 */
+ v4tov6(&tmp, &((struct sockaddr_in *)src)->sin_addr);
+ src_port = ((struct sockaddr_in *)src)->sin_port;
+ }
+ else {
+ tmp = ((struct sockaddr_in6 *)src)->sin6_addr;
+ src_port = ((struct sockaddr_in6 *)src)->sin6_port;
+ }
+
+ if (!inet_ntop(AF_INET6, &tmp, src_str, sizeof(src_str)))
+ return 0;
+
+ if (dst->ss_family == AF_INET) {
+ /* Convert dst to IPv6 */
+ v4tov6(&tmp, &((struct sockaddr_in *)dst)->sin_addr);
+ dst_port = ((struct sockaddr_in *)dst)->sin_port;
+ }
+ else {
+ tmp = ((struct sockaddr_in6 *)dst)->sin6_addr;
+ dst_port = ((struct sockaddr_in6 *)dst)->sin6_port;
+ }
+
+ if (!inet_ntop(AF_INET6, &tmp, dst_str, sizeof(dst_str)))
+ return 0;
+ }
+
+ ret = snprintf(buf, buf_len, "PROXY %s %s %s %u %u\r\n", protocol, src_str, dst_str, ntohs(src_port), ntohs(dst_port));
+ if (ret >= buf_len)
+ return 0;
+
+ return ret;
+}
+
+static int make_tlv(char *dest, int dest_len, char type, uint16_t length, const char *value)
+{
+ struct tlv *tlv;
+
+ if (!dest || (length + sizeof(*tlv) > dest_len))
+ return 0;
+
+ tlv = (struct tlv *)dest;
+
+ tlv->type = type;
+ tlv->length_hi = length >> 8;
+ tlv->length_lo = length & 0x00ff;
+ memcpy(tlv->value, value, length);
+ return length + sizeof(*tlv);
+}
+
+/* Note: <remote> is explicitly allowed to be NULL */
+static int make_proxy_line_v2(char *buf, int buf_len, struct server *srv, struct connection *remote, struct stream *strm)
+{
+ const char pp2_signature[] = PP2_SIGNATURE;
+ void *tlv_crc32c_p = NULL;
+ int ret = 0;
+ struct proxy_hdr_v2 *hdr = (struct proxy_hdr_v2 *)buf;
+ struct sockaddr_storage null_addr = { .ss_family = 0 };
+ struct srv_pp_tlv_list *srv_tlv = NULL;
+ const struct sockaddr_storage *src = &null_addr;
+ const struct sockaddr_storage *dst = &null_addr;
+ const char *value = "";
+ int value_len = 0;
+
+ if (buf_len < PP2_HEADER_LEN)
+ return 0;
+ memcpy(hdr->sig, pp2_signature, PP2_SIGNATURE_LEN);
+
+ if (strm) {
+ src = sc_src(strm->scf);
+ dst = sc_dst(strm->scf);
+ }
+ else if (remote && conn_get_src(remote) && conn_get_dst(remote)) {
+ src = conn_src(remote);
+ dst = conn_dst(remote);
+ }
+
+ /* At least one of src or dst is not of AF_INET or AF_INET6 */
+ if ( !src
+ || !dst
+ || (!pp2_never_send_local && conn_is_back(remote)) // locally initiated connection
+ || (src->ss_family != AF_INET && src->ss_family != AF_INET6)
+ || (dst->ss_family != AF_INET && dst->ss_family != AF_INET6)) {
+ if (buf_len < PP2_HDR_LEN_UNSPEC)
+ return 0;
+ hdr->ver_cmd = PP2_VERSION | PP2_CMD_LOCAL;
+ hdr->fam = PP2_FAM_UNSPEC | PP2_TRANS_UNSPEC;
+ ret = PP2_HDR_LEN_UNSPEC;
+ }
+ else {
+ hdr->ver_cmd = PP2_VERSION | PP2_CMD_PROXY;
+ /* IPv4 for both src and dst */
+ if (src->ss_family == AF_INET && dst->ss_family == AF_INET) {
+ if (buf_len < PP2_HDR_LEN_INET)
+ return 0;
+ hdr->fam = PP2_FAM_INET | PP2_TRANS_STREAM;
+ hdr->addr.ip4.src_addr = ((struct sockaddr_in *)src)->sin_addr.s_addr;
+ hdr->addr.ip4.src_port = ((struct sockaddr_in *)src)->sin_port;
+ hdr->addr.ip4.dst_addr = ((struct sockaddr_in *)dst)->sin_addr.s_addr;
+ hdr->addr.ip4.dst_port = ((struct sockaddr_in *)dst)->sin_port;
+ ret = PP2_HDR_LEN_INET;
+ }
+ /* IPv6 for at least one of src and dst */
+ else {
+ struct in6_addr tmp;
+
+ if (buf_len < PP2_HDR_LEN_INET6)
+ return 0;
+ hdr->fam = PP2_FAM_INET6 | PP2_TRANS_STREAM;
+ if (src->ss_family == AF_INET) {
+ v4tov6(&tmp, &((struct sockaddr_in *)src)->sin_addr);
+ memcpy(hdr->addr.ip6.src_addr, &tmp, 16);
+ hdr->addr.ip6.src_port = ((struct sockaddr_in *)src)->sin_port;
+ }
+ else {
+ memcpy(hdr->addr.ip6.src_addr, &((struct sockaddr_in6 *)src)->sin6_addr, 16);
+ hdr->addr.ip6.src_port = ((struct sockaddr_in6 *)src)->sin6_port;
+ }
+ if (dst->ss_family == AF_INET) {
+ v4tov6(&tmp, &((struct sockaddr_in *)dst)->sin_addr);
+ memcpy(hdr->addr.ip6.dst_addr, &tmp, 16);
+ hdr->addr.ip6.dst_port = ((struct sockaddr_in *)dst)->sin_port;
+ }
+ else {
+ memcpy(hdr->addr.ip6.dst_addr, &((struct sockaddr_in6 *)dst)->sin6_addr, 16);
+ hdr->addr.ip6.dst_port = ((struct sockaddr_in6 *)dst)->sin6_port;
+ }
+
+ ret = PP2_HDR_LEN_INET6;
+ }
+ }
+
+ if (strm) {
+ struct buffer *replace = NULL;
+
+ list_for_each_entry(srv_tlv, &srv->pp_tlvs, list) {
+ replace = NULL;
+
+ /* Users will always need to provide a value, in case of forwarding, they should use fc_pp_tlv.
+ * for generic types. Otherwise, we will send an empty TLV.
+ */
+ if (!LIST_ISEMPTY(&srv_tlv->fmt)) {
+ replace = alloc_trash_chunk();
+ if (unlikely(!replace))
+ return 0;
+
+ replace->data = build_logline(strm, replace->area, replace->size, &srv_tlv->fmt);
+
+ if (unlikely((buf_len - ret) < sizeof(struct tlv))) {
+ free_trash_chunk(replace);
+ return 0;
+ }
+ ret += make_tlv(&buf[ret], (buf_len - ret), srv_tlv->type, replace->data, replace->area);
+ free_trash_chunk(replace);
+ }
+ else {
+ /* Create empty TLV as no value was specified */
+ ret += make_tlv(&buf[ret], (buf_len - ret), srv_tlv->type, 0, NULL);
+ }
+ }
+ }
+
+ /* Handle predefined TLVs as usual */
+ if (srv->pp_opts & SRV_PP_V2_CRC32C) {
+ uint32_t zero_crc32c = 0;
+
+ if ((buf_len - ret) < sizeof(struct tlv))
+ return 0;
+ tlv_crc32c_p = (void *)((struct tlv *)&buf[ret])->value;
+ ret += make_tlv(&buf[ret], (buf_len - ret), PP2_TYPE_CRC32C, sizeof(zero_crc32c), (const char *)&zero_crc32c);
+ }
+
+ if (remote && conn_get_alpn(remote, &value, &value_len)) {
+ if ((buf_len - ret) < sizeof(struct tlv))
+ return 0;
+ ret += make_tlv(&buf[ret], (buf_len - ret), PP2_TYPE_ALPN, value_len, value);
+ }
+
+ if (srv->pp_opts & SRV_PP_V2_AUTHORITY) {
+ struct conn_tlv_list *tlv = conn_get_tlv(remote, PP2_TYPE_AUTHORITY);
+
+ value = NULL;
+ if (tlv) {
+ value_len = tlv->len;
+ value = tlv->value;
+ }
+#ifdef USE_OPENSSL
+ else {
+ if ((value = ssl_sock_get_sni(remote)))
+ value_len = strlen(value);
+ }
+#endif
+ if (value) {
+ if ((buf_len - ret) < sizeof(struct tlv))
+ return 0;
+ ret += make_tlv(&buf[ret], (buf_len - ret), PP2_TYPE_AUTHORITY, value_len, value);
+ }
+ }
+
+ if (strm && (srv->pp_opts & SRV_PP_V2_UNIQUE_ID)) {
+ struct session* sess = strm_sess(strm);
+ struct ist unique_id = stream_generate_unique_id(strm, &sess->fe->format_unique_id);
+
+ value = unique_id.ptr;
+ value_len = unique_id.len;
+
+ if (value_len >= 0) {
+ if ((buf_len - ret) < sizeof(struct tlv))
+ return 0;
+ ret += make_tlv(&buf[ret], (buf_len - ret), PP2_TYPE_UNIQUE_ID, value_len, value);
+ }
+ }
+
+#ifdef USE_OPENSSL
+ if (srv->pp_opts & SRV_PP_V2_SSL) {
+ struct tlv_ssl *tlv;
+ int ssl_tlv_len = 0;
+
+ if ((buf_len - ret) < sizeof(struct tlv_ssl))
+ return 0;
+ tlv = (struct tlv_ssl *)&buf[ret];
+ memset(tlv, 0, sizeof(struct tlv_ssl));
+ ssl_tlv_len += sizeof(struct tlv_ssl);
+ tlv->tlv.type = PP2_TYPE_SSL;
+ if (conn_is_ssl(remote)) {
+ tlv->client |= PP2_CLIENT_SSL;
+ value = ssl_sock_get_proto_version(remote);
+ if (value) {
+ ssl_tlv_len += make_tlv(&buf[ret+ssl_tlv_len], (buf_len-ret-ssl_tlv_len), PP2_SUBTYPE_SSL_VERSION, strlen(value), value);
+ }
+ if (ssl_sock_get_cert_used_sess(remote)) {
+ tlv->client |= PP2_CLIENT_CERT_SESS;
+ tlv->verify = htonl(ssl_sock_get_verify_result(remote));
+ if (ssl_sock_get_cert_used_conn(remote))
+ tlv->client |= PP2_CLIENT_CERT_CONN;
+ }
+ if (srv->pp_opts & SRV_PP_V2_SSL_CN) {
+ struct buffer *cn_trash = get_trash_chunk();
+ if (ssl_sock_get_remote_common_name(remote, cn_trash) > 0) {
+ ssl_tlv_len += make_tlv(&buf[ret+ssl_tlv_len], (buf_len - ret - ssl_tlv_len), PP2_SUBTYPE_SSL_CN,
+ cn_trash->data,
+ cn_trash->area);
+ }
+ }
+ if (srv->pp_opts & SRV_PP_V2_SSL_KEY_ALG) {
+ struct buffer *pkey_trash = get_trash_chunk();
+ if (ssl_sock_get_pkey_algo(remote, pkey_trash) > 0) {
+ ssl_tlv_len += make_tlv(&buf[ret+ssl_tlv_len], (buf_len - ret - ssl_tlv_len), PP2_SUBTYPE_SSL_KEY_ALG,
+ pkey_trash->data,
+ pkey_trash->area);
+ }
+ }
+ if (srv->pp_opts & SRV_PP_V2_SSL_SIG_ALG) {
+ value = ssl_sock_get_cert_sig(remote);
+ if (value) {
+ ssl_tlv_len += make_tlv(&buf[ret+ssl_tlv_len], (buf_len - ret - ssl_tlv_len), PP2_SUBTYPE_SSL_SIG_ALG, strlen(value), value);
+ }
+ }
+ if (srv->pp_opts & SRV_PP_V2_SSL_CIPHER) {
+ value = ssl_sock_get_cipher_name(remote);
+ if (value) {
+ ssl_tlv_len += make_tlv(&buf[ret+ssl_tlv_len], (buf_len - ret - ssl_tlv_len), PP2_SUBTYPE_SSL_CIPHER, strlen(value), value);
+ }
+ }
+ }
+ tlv->tlv.length_hi = (uint16_t)(ssl_tlv_len - sizeof(struct tlv)) >> 8;
+ tlv->tlv.length_lo = (uint16_t)(ssl_tlv_len - sizeof(struct tlv)) & 0x00ff;
+ ret += ssl_tlv_len;
+ }
+#endif
+
+#ifdef USE_NS
+ if (remote && (remote->proxy_netns)) {
+ if ((buf_len - ret) < sizeof(struct tlv))
+ return 0;
+ ret += make_tlv(&buf[ret], (buf_len - ret), PP2_TYPE_NETNS, remote->proxy_netns->name_len, remote->proxy_netns->node.key);
+ }
+#endif
+
+ hdr->len = htons((uint16_t)(ret - PP2_HEADER_LEN));
+
+ if (tlv_crc32c_p) {
+ write_u32(tlv_crc32c_p, htonl(hash_crc32c(buf, ret)));
+ }
+
+ return ret;
+}
+
+/* Note: <remote> is explicitly allowed to be NULL */
+int make_proxy_line(char *buf, int buf_len, struct server *srv, struct connection *remote, struct stream *strm)
+{
+ int ret = 0;
+
+ if (srv && (srv->pp_opts & SRV_PP_V2)) {
+ ret = make_proxy_line_v2(buf, buf_len, srv, remote, strm);
+ }
+ else {
+ const struct sockaddr_storage *src = NULL;
+ const struct sockaddr_storage *dst = NULL;
+
+ if (strm) {
+ src = sc_src(strm->scf);
+ dst = sc_dst(strm->scf);
+ }
+ else if (remote && conn_get_src(remote) && conn_get_dst(remote)) {
+ src = conn_src(remote);
+ dst = conn_dst(remote);
+ }
+
+ if (src && dst)
+ ret = make_proxy_line_v1(buf, buf_len, src, dst);
+ else
+ ret = make_proxy_line_v1(buf, buf_len, NULL, NULL);
+ }
+
+ return ret;
+}
+
+/* returns 0 on success */
+static int cfg_parse_pp2_never_send_local(char **args, int section_type, struct proxy *curpx,
+ const struct proxy *defpx, const char *file, int line,
+ char **err)
+{
+ if (too_many_args(0, args, err, NULL))
+ return -1;
+ pp2_never_send_local = 1;
+ return 0;
+}
+
+/* extracts some info from the connection and appends them to buffer <buf>. The
+ * connection's pointer, its direction, target (fe/be/srv), xprt/ctrl, source
+ * when set, destination when set, are printed in a compact human-readable format
+ * fitting on a single line. This is handy to complete traces or debug output.
+ * It is permitted to pass a NULL conn pointer. The number of characters emitted
+ * is returned. A prefix <pfx> might be prepended before the first field if not
+ * NULL.
+ */
+int conn_append_debug_info(struct buffer *buf, const struct connection *conn, const char *pfx)
+{
+ const struct listener *li;
+ const struct server *sv;
+ const struct proxy *px;
+ char addr[40];
+ int old_len = buf->data;
+
+ if (!conn)
+ return 0;
+
+ chunk_appendf(buf, "%sconn=%p(%s)", pfx ? pfx : "", conn, conn_is_back(conn) ? "OUT" : "IN");
+
+ if ((li = objt_listener(conn->target)))
+ chunk_appendf(buf, " fe=%s", li->bind_conf->frontend->id);
+ else if ((sv = objt_server(conn->target)))
+ chunk_appendf(buf, " sv=%s/%s", sv->proxy->id, sv->id);
+ else if ((px = objt_proxy(conn->target)))
+ chunk_appendf(buf, " be=%s", px->id);
+
+ chunk_appendf(buf, " %s/%s", conn_get_xprt_name(conn), conn_get_ctrl_name(conn));
+
+ if (conn->src && addr_to_str(conn->src, addr, sizeof(addr)))
+ chunk_appendf(buf, " src=%s:%d", addr, get_host_port(conn->src));
+
+ if (conn->dst && addr_to_str(conn->dst, addr, sizeof(addr)))
+ chunk_appendf(buf, " dst=%s:%d", addr, get_host_port(conn->dst));
+
+ return buf->data - old_len;
+}
+
+/* return the major HTTP version as 1 or 2 depending on how the request arrived
+ * before being processed.
+ *
+ * WARNING: Should be updated if a new major HTTP version is added.
+ */
+static int
+smp_fetch_fc_http_major(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ struct connection *conn = NULL;
+ const char *mux_name = NULL;
+
+ if (obj_type(smp->sess->origin) == OBJ_TYPE_CHECK)
+ conn = (kw[0] == 'b') ? sc_conn(__objt_check(smp->sess->origin)->sc) : NULL;
+ else
+ conn = (kw[0] != 'b') ? objt_conn(smp->sess->origin) :
+ smp->strm ? sc_conn(smp->strm->scb) : NULL;
+
+ /* No connection or a connection with a RAW muxx */
+ if (!conn || (conn->mux && !(conn->mux->flags & MX_FL_HTX)))
+ return 0;
+
+ /* No mux install, this may change */
+ if (!conn->mux) {
+ smp->flags |= SMP_F_MAY_CHANGE;
+ return 0;
+ }
+
+ mux_name = conn_get_mux_name(conn);
+
+ smp->data.type = SMP_T_SINT;
+ if (strcmp(mux_name, "QUIC") == 0)
+ smp->data.u.sint = 3;
+ else if (strcmp(mux_name, "H2") == 0)
+ smp->data.u.sint = 2;
+ else
+ smp->data.u.sint = 1;
+
+ return 1;
+}
+
+/* fetch if the received connection used a PROXY protocol header */
+int smp_fetch_fc_rcvd_proxy(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ struct connection *conn;
+
+ conn = objt_conn(smp->sess->origin);
+ if (!conn)
+ return 0;
+
+ if (conn->flags & CO_FL_WAIT_XPRT) {
+ smp->flags |= SMP_F_MAY_CHANGE;
+ return 0;
+ }
+
+ smp->flags = 0;
+ smp->data.type = SMP_T_BOOL;
+ smp->data.u.sint = (conn->flags & CO_FL_RCVD_PROXY) ? 1 : 0;
+
+ return 1;
+}
+
+/*
+ * This function checks the TLV type converter configuration.
+ * It expects the corresponding TLV type as a string representing the number
+ * or a constant. args[0] will be turned into the numerical value of the
+ * TLV type string.
+ */
+static int smp_check_tlv_type(struct arg *args, char **err)
+{
+ int type;
+ char *endp;
+ struct ist input = ist2(args[0].data.str.area, args[0].data.str.data);
+
+ if (isteqi(input, ist("ALPN")) != 0)
+ type = PP2_TYPE_ALPN;
+ else if (isteqi(input, ist("AUTHORITY")) != 0)
+ type = PP2_TYPE_AUTHORITY;
+ else if (isteqi(input, ist("CRC32C")) != 0)
+ type = PP2_TYPE_CRC32C;
+ else if (isteqi(input, ist("NOOP")) != 0)
+ type = PP2_TYPE_NOOP;
+ else if (isteqi(input, ist("UNIQUE_ID")) != 0)
+ type = PP2_TYPE_UNIQUE_ID;
+ else if (isteqi(input, ist("SSL")) != 0)
+ type = PP2_TYPE_SSL;
+ else if (isteqi(input, ist("SSL_VERSION")) != 0)
+ type = PP2_SUBTYPE_SSL_VERSION;
+ else if (isteqi(input, ist("SSL_CN")) != 0)
+ type = PP2_SUBTYPE_SSL_CN;
+ else if (isteqi(input, ist("SSL_CIPHER")) != 0)
+ type = PP2_SUBTYPE_SSL_CIPHER;
+ else if (isteqi(input, ist("SSL_SIG_ALG")) != 0)
+ type = PP2_SUBTYPE_SSL_SIG_ALG;
+ else if (isteqi(input, ist("SSL_KEY_ALG")) != 0)
+ type = PP2_SUBTYPE_SSL_KEY_ALG;
+ else if (isteqi(input, ist("NETNS")) != 0)
+ type = PP2_TYPE_NETNS;
+ else {
+ type = strtoul(input.ptr, &endp, 0);
+ if (endp && *endp != '\0') {
+ memprintf(err, "Could not convert type '%s'", input.ptr);
+ return 0;
+ }
+ }
+
+ if (type < 0 || type > 255) {
+ memprintf(err, "Invalid TLV Type '%s'", input.ptr);
+ return 0;
+ }
+
+ chunk_destroy(&args[0].data.str);
+ args[0].type = ARGT_SINT;
+ args[0].data.sint = type;
+
+ return 1;
+}
+
+/* fetch an arbitrary TLV from a PROXY protocol v2 header */
+int smp_fetch_fc_pp_tlv(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ int idx;
+ struct connection *conn = NULL;
+ struct conn_tlv_list *conn_tlv = NULL;
+
+ conn = objt_conn(smp->sess->origin);
+ if (!conn)
+ return 0;
+
+ if (conn->flags & CO_FL_WAIT_XPRT) {
+ smp->flags |= SMP_F_MAY_CHANGE;
+ return 0;
+ }
+
+ if (args[0].type != ARGT_SINT)
+ return 0;
+
+ idx = args[0].data.sint;
+ conn_tlv = smp->ctx.p ? smp->ctx.p : LIST_ELEM(conn->tlv_list.n, struct conn_tlv_list *, list);
+ list_for_each_entry_from(conn_tlv, &conn->tlv_list, list) {
+ if (conn_tlv->type == idx) {
+ smp->flags |= SMP_F_NOT_LAST;
+ smp->data.type = SMP_T_STR;
+ smp->data.u.str.area = conn_tlv->value;
+ smp->data.u.str.data = conn_tlv->len;
+ smp->ctx.p = conn_tlv;
+
+ return 1;
+ }
+ }
+
+ smp->flags &= ~SMP_F_NOT_LAST;
+
+ return 0;
+}
+
+/* fetch the authority TLV from a PROXY protocol header */
+int smp_fetch_fc_pp_authority(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ struct arg tlv_arg;
+ int ret;
+
+ set_tlv_arg(PP2_TYPE_AUTHORITY, &tlv_arg);
+ ret = smp_fetch_fc_pp_tlv(&tlv_arg, smp, kw, private);
+ smp->flags &= ~SMP_F_NOT_LAST; // return only the first authority
+ return ret;
+}
+
+/* fetch the unique ID TLV from a PROXY protocol header */
+int smp_fetch_fc_pp_unique_id(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ struct arg tlv_arg;
+ int ret;
+
+ set_tlv_arg(PP2_TYPE_UNIQUE_ID, &tlv_arg);
+ ret = smp_fetch_fc_pp_tlv(&tlv_arg, smp, kw, private);
+ smp->flags &= ~SMP_F_NOT_LAST; // return only the first unique ID
+ return ret;
+}
+
+/* fetch the error code of a connection */
+int smp_fetch_fc_err(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ struct connection *conn;
+
+ if (obj_type(smp->sess->origin) == OBJ_TYPE_CHECK)
+ conn = (kw[0] == 'b') ? sc_conn(__objt_check(smp->sess->origin)->sc) : NULL;
+ else
+ conn = (kw[0] != 'b') ? objt_conn(smp->sess->origin) :
+ smp->strm ? sc_conn(smp->strm->scb) : NULL;
+
+ if (!conn)
+ return 0;
+
+ if (conn->flags & CO_FL_WAIT_XPRT && !conn->err_code) {
+ smp->flags |= SMP_F_MAY_CHANGE;
+ return 0;
+ }
+
+ smp->flags = 0;
+ smp->data.type = SMP_T_SINT;
+ smp->data.u.sint = (unsigned long long int)conn->err_code;
+
+ return 1;
+}
+
+/* fetch a string representation of the error code of a connection */
+int smp_fetch_fc_err_str(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ struct connection *conn;
+ const char *err_code_str;
+
+ if (obj_type(smp->sess->origin) == OBJ_TYPE_CHECK)
+ conn = (kw[0] == 'b') ? sc_conn(__objt_check(smp->sess->origin)->sc) : NULL;
+ else
+ conn = (kw[0] != 'b') ? objt_conn(smp->sess->origin) :
+ smp->strm ? sc_conn(smp->strm->scb) : NULL;
+
+ if (!conn)
+ return 0;
+
+ if (conn->flags & CO_FL_WAIT_XPRT && !conn->err_code) {
+ smp->flags |= SMP_F_MAY_CHANGE;
+ return 0;
+ }
+
+ err_code_str = conn_err_code_str(conn);
+
+ if (!err_code_str)
+ return 0;
+
+ smp->flags = 0;
+ smp->data.type = SMP_T_STR;
+ smp->data.u.str.area = (char*)err_code_str;
+ smp->data.u.str.data = strlen(err_code_str);
+
+ return 1;
+}
+
+/* Note: must not be declared <const> as its list will be overwritten.
+ * Note: fetches that may return multiple types should be declared using the
+ * appropriate pseudo-type. If not available it must be declared as the lowest
+ * common denominator, the type that can be casted into all other ones.
+ */
+static struct sample_fetch_kw_list sample_fetch_keywords = {ILH, {
+ { "bc_err", smp_fetch_fc_err, 0, NULL, SMP_T_SINT, SMP_USE_L4SRV },
+ { "bc_err_str", smp_fetch_fc_err_str, 0, NULL, SMP_T_STR, SMP_USE_L4SRV },
+ { "bc_http_major", smp_fetch_fc_http_major, 0, NULL, SMP_T_SINT, SMP_USE_L4SRV },
+ { "fc_err", smp_fetch_fc_err, 0, NULL, SMP_T_SINT, SMP_USE_L4CLI },
+ { "fc_err_str", smp_fetch_fc_err_str, 0, NULL, SMP_T_STR, SMP_USE_L4CLI },
+ { "fc_http_major", smp_fetch_fc_http_major, 0, NULL, SMP_T_SINT, SMP_USE_L4CLI },
+ { "fc_rcvd_proxy", smp_fetch_fc_rcvd_proxy, 0, NULL, SMP_T_BOOL, SMP_USE_L4CLI },
+ { "fc_pp_authority", smp_fetch_fc_pp_authority, 0, NULL, SMP_T_STR, SMP_USE_L4CLI },
+ { "fc_pp_unique_id", smp_fetch_fc_pp_unique_id, 0, NULL, SMP_T_STR, SMP_USE_L4CLI },
+ { "fc_pp_tlv", smp_fetch_fc_pp_tlv, ARG1(1, STR), smp_check_tlv_type, SMP_T_STR, SMP_USE_L4CLI },
+ { /* END */ },
+}};
+
+INITCALL1(STG_REGISTER, sample_register_fetches, &sample_fetch_keywords);
+
+static struct cfg_kw_list cfg_kws = {ILH, {
+ { CFG_GLOBAL, "pp2-never-send-local", cfg_parse_pp2_never_send_local },
+ { /* END */ },
+}};
+
+INITCALL1(STG_REGISTER, cfg_register_keywords, &cfg_kws);
+
+/* private function to handle sockaddr as input for connection hash */
+static void conn_calculate_hash_sockaddr(const struct sockaddr_storage *ss,
+ char *buf, size_t *idx,
+ enum conn_hash_params_t *hash_flags,
+ enum conn_hash_params_t param_type_addr,
+ enum conn_hash_params_t param_type_port)
+{
+ struct sockaddr_in *addr;
+ struct sockaddr_in6 *addr6;
+
+ switch (ss->ss_family) {
+ case AF_INET:
+ addr = (struct sockaddr_in *)ss;
+
+ conn_hash_update(buf, idx,
+ &addr->sin_addr, sizeof(addr->sin_addr),
+ hash_flags, param_type_addr);
+
+ if (addr->sin_port) {
+ conn_hash_update(buf, idx,
+ &addr->sin_port, sizeof(addr->sin_port),
+ hash_flags, param_type_port);
+ }
+
+ break;
+
+ case AF_INET6:
+ addr6 = (struct sockaddr_in6 *)ss;
+
+ conn_hash_update(buf, idx,
+ &addr6->sin6_addr, sizeof(addr6->sin6_addr),
+ hash_flags, param_type_addr);
+
+ if (addr6->sin6_port) {
+ conn_hash_update(buf, idx,
+ &addr6->sin6_port, sizeof(addr6->sin6_port),
+ hash_flags, param_type_port);
+ }
+
+ break;
+ }
+}
+
+/* Generate the hash of a connection with params as input
+ * Each non-null field of params is taken into account for the hash calcul.
+ */
+uint64_t conn_hash_prehash(char *buf, size_t size)
+{
+ return XXH64(buf, size, 0);
+}
+
+/* Append <data> into <buf> at <idx> offset in preparation for connection hash
+ * calcul. <idx> is incremented beyond data <size>. In the same time, <flags>
+ * are updated with <type> for the hash header.
+ */
+void conn_hash_update(char *buf, size_t *idx,
+ const void *data, size_t size,
+ enum conn_hash_params_t *flags,
+ enum conn_hash_params_t type)
+{
+ memcpy(&buf[*idx], data, size);
+ *idx += size;
+ *flags |= type;
+}
+
+uint64_t conn_hash_digest(char *buf, size_t bufsize,
+ enum conn_hash_params_t flags)
+{
+ const uint64_t flags_u64 = (uint64_t)flags;
+ const uint64_t hash = XXH64(buf, bufsize, 0);
+
+ return (flags_u64 << CONN_HASH_PAYLOAD_LEN) | CONN_HASH_GET_PAYLOAD(hash);
+}
+
+uint64_t conn_calculate_hash(const struct conn_hash_params *params)
+{
+ char *buf;
+ size_t idx = 0;
+ uint64_t hash = 0;
+ enum conn_hash_params_t hash_flags = 0;
+
+ buf = trash.area;
+
+ conn_hash_update(buf, &idx, &params->target, sizeof(params->target), &hash_flags, 0);
+
+ if (params->sni_prehash) {
+ conn_hash_update(buf, &idx,
+ &params->sni_prehash, sizeof(params->sni_prehash),
+ &hash_flags, CONN_HASH_PARAMS_TYPE_SNI);
+ }
+
+ if (params->dst_addr) {
+ conn_calculate_hash_sockaddr(params->dst_addr,
+ buf, &idx, &hash_flags,
+ CONN_HASH_PARAMS_TYPE_DST_ADDR,
+ CONN_HASH_PARAMS_TYPE_DST_PORT);
+ }
+
+ if (params->src_addr) {
+ conn_calculate_hash_sockaddr(params->src_addr,
+ buf, &idx, &hash_flags,
+ CONN_HASH_PARAMS_TYPE_SRC_ADDR,
+ CONN_HASH_PARAMS_TYPE_SRC_PORT);
+ }
+
+ if (params->proxy_prehash) {
+ conn_hash_update(buf, &idx,
+ &params->proxy_prehash, sizeof(params->proxy_prehash),
+ &hash_flags, CONN_HASH_PARAMS_TYPE_PROXY);
+ }
+
+ hash = conn_hash_digest(buf, idx, hash_flags);
+ return hash;
+}
+
+/* Reverse a <conn> connection instance. This effectively moves the connection
+ * from frontend to backend side or vice-versa depending on its initial status.
+ *
+ * For active reversal, 'reverse' member points to the listener used as the new
+ * connection target. Once transition is completed, the connection needs to be
+ * accepted on the listener to instantiate its parent session before using
+ * streams.
+ *
+ * For passive reversal, 'reverse' member points to the server used as the new
+ * connection target. Once transition is completed, the connection appears as a
+ * normal backend connection.
+ *
+ * Returns 0 on success else non-zero.
+ */
+int conn_reverse(struct connection *conn)
+{
+ struct conn_hash_params hash_params;
+ int64_t hash = 0;
+ struct session *sess = conn->owner;
+
+ if (!conn_is_back(conn)) {
+ /* srv must have been set by a previous 'attach-srv' rule. */
+ struct server *srv = objt_server(conn->reverse.target);
+ BUG_ON(!srv);
+
+ if (conn_backend_init(conn))
+ return 1;
+
+ /* Initialize hash value for usage as idle conns. */
+ memset(&hash_params, 0, sizeof(hash_params));
+ hash_params.target = srv;
+
+ if (b_data(&conn->reverse.name)) {
+ /* data cannot wrap else prehash usage is incorrect */
+ BUG_ON(b_data(&conn->reverse.name) != b_contig_data(&conn->reverse.name, 0));
+
+ hash_params.sni_prehash =
+ conn_hash_prehash(b_head(&conn->reverse.name),
+ b_data(&conn->reverse.name));
+ }
+
+ hash = conn_calculate_hash(&hash_params);
+ conn->hash_node->node.key = hash;
+
+ conn->target = &srv->obj_type;
+ srv_use_conn(srv, conn);
+
+ /* Free the session after detaching the connection from it. */
+ session_unown_conn(sess, conn);
+ sess->origin = NULL;
+ session_free(sess);
+ conn_set_owner(conn, NULL, NULL);
+
+ conn->flags |= CO_FL_REVERSED;
+ }
+ else {
+ /* Wake up receiver to proceed to connection accept. */
+ struct listener *l = __objt_listener(conn->reverse.target);
+
+ conn_backend_deinit(conn);
+
+ conn->target = &l->obj_type;
+ conn->flags |= CO_FL_ACT_REVERSING;
+ task_wakeup(l->rx.rhttp.task, TASK_WOKEN_ANY);
+ }
+
+ /* Invert source and destination addresses if already set. */
+ SWAP(conn->src, conn->dst);
+
+ conn->reverse.target = NULL;
+ ha_free(&conn->reverse.name.area);
+ conn->reverse.name = BUF_NULL;
+
+ return 0;
+}
+
+/* Handler of the task of mux_stopping_data.
+ * Called on soft-stop.
+ */
+static struct task *mux_stopping_process(struct task *t, void *ctx, unsigned int state)
+{
+ struct connection *conn, *back;
+
+ list_for_each_entry_safe(conn, back, &mux_stopping_data[tid].list, stopping_list) {
+ if (conn->mux && conn->mux->wake)
+ conn->mux->wake(conn);
+ }
+
+ return t;
+}
+
+static int allocate_mux_cleanup(void)
+{
+ /* allocates the thread bound mux_stopping_data task */
+ mux_stopping_data[tid].task = task_new_here();
+ if (!mux_stopping_data[tid].task) {
+ ha_alert("Failed to allocate the task for connection cleanup on thread %d.\n", tid);
+ return 0;
+ }
+
+ mux_stopping_data[tid].task->process = mux_stopping_process;
+ LIST_INIT(&mux_stopping_data[tid].list);
+
+ return 1;
+}
+REGISTER_PER_THREAD_ALLOC(allocate_mux_cleanup);
+
+static int deallocate_mux_cleanup(void)
+{
+ task_destroy(mux_stopping_data[tid].task);
+ return 1;
+}
+REGISTER_PER_THREAD_FREE(deallocate_mux_cleanup);
+
+static void deinit_idle_conns(void)
+{
+ int i;
+
+ for (i = 0; i < global.nbthread; i++) {
+ task_destroy(idle_conns[i].cleanup_task);
+ }
+}
+REGISTER_POST_DEINIT(deinit_idle_conns);
diff --git a/src/cpuset.c b/src/cpuset.c
new file mode 100644
index 0000000..82e350f
--- /dev/null
+++ b/src/cpuset.c
@@ -0,0 +1,296 @@
+#define _GNU_SOURCE
+#include <sched.h>
+#include <ctype.h>
+
+#include <haproxy/compat.h>
+#include <haproxy/cpuset.h>
+#include <haproxy/intops.h>
+#include <haproxy/tools.h>
+
+struct cpu_map *cpu_map;
+
+void ha_cpuset_zero(struct hap_cpuset *set)
+{
+#if defined(CPUSET_USE_CPUSET) || defined(CPUSET_USE_FREEBSD_CPUSET)
+ CPU_ZERO(&set->cpuset);
+
+#elif defined(CPUSET_USE_ULONG)
+ set->cpuset = 0;
+#endif
+}
+
+int ha_cpuset_set(struct hap_cpuset *set, int cpu)
+{
+ if (cpu >= ha_cpuset_size())
+ return 1;
+
+#if defined(CPUSET_USE_CPUSET) || defined(CPUSET_USE_FREEBSD_CPUSET)
+ CPU_SET(cpu, &set->cpuset);
+ return 0;
+
+#elif defined(CPUSET_USE_ULONG)
+ set->cpuset |= (0x1 << cpu);
+ return 0;
+#endif
+}
+
+int ha_cpuset_clr(struct hap_cpuset *set, int cpu)
+{
+ if (cpu >= ha_cpuset_size())
+ return 1;
+
+#if defined(CPUSET_USE_CPUSET) || defined(CPUSET_USE_FREEBSD_CPUSET)
+ CPU_CLR(cpu, &set->cpuset);
+ return 0;
+
+#elif defined(CPUSET_USE_ULONG)
+ set->cpuset &= ~(0x1 << cpu);
+ return 0;
+#endif
+}
+
+void ha_cpuset_and(struct hap_cpuset *dst, struct hap_cpuset *src)
+{
+#if defined(CPUSET_USE_CPUSET)
+ CPU_AND(&dst->cpuset, &dst->cpuset, &src->cpuset);
+
+#elif defined(CPUSET_USE_FREEBSD_CPUSET)
+ CPU_AND(&dst->cpuset, &src->cpuset);
+
+#elif defined(CPUSET_USE_ULONG)
+ dst->cpuset &= src->cpuset;
+#endif
+}
+
+void ha_cpuset_or(struct hap_cpuset *dst, struct hap_cpuset *src)
+{
+#if defined(CPUSET_USE_CPUSET)
+ CPU_OR(&dst->cpuset, &dst->cpuset, &src->cpuset);
+
+#elif defined(CPUSET_USE_FREEBSD_CPUSET)
+ CPU_OR(&dst->cpuset, &src->cpuset);
+
+#elif defined(CPUSET_USE_ULONG)
+ dst->cpuset |= src->cpuset;
+#endif
+}
+
+int ha_cpuset_isset(const struct hap_cpuset *set, int cpu)
+{
+ if (cpu >= ha_cpuset_size())
+ return 0;
+
+#if defined(CPUSET_USE_CPUSET) || defined(CPUSET_USE_FREEBSD_CPUSET)
+ return CPU_ISSET(cpu, &set->cpuset);
+
+#elif defined(CPUSET_USE_ULONG)
+ return !!(set->cpuset & (0x1 << cpu));
+#else
+ return 0;
+#endif
+}
+
+int ha_cpuset_count(const struct hap_cpuset *set)
+{
+#if defined(CPUSET_USE_CPUSET) || defined(CPUSET_USE_FREEBSD_CPUSET)
+ return CPU_COUNT(&set->cpuset);
+
+#elif defined(CPUSET_USE_ULONG)
+ return my_popcountl(set->cpuset);
+#endif
+}
+
+int ha_cpuset_ffs(const struct hap_cpuset *set)
+{
+#if defined(CPUSET_USE_CPUSET)
+ int n;
+
+ if (!CPU_COUNT(&set->cpuset))
+ return 0;
+
+ for (n = 0; !CPU_ISSET(n, &set->cpuset); ++n)
+ ;
+
+ return n + 1;
+
+#elif defined(CPUSET_USE_FREEBSD_CPUSET)
+ return CPU_FFS(&set->cpuset);
+
+#elif defined(CPUSET_USE_ULONG)
+ if (!set->cpuset)
+ return 0;
+
+ return my_ffsl(set->cpuset);
+#endif
+}
+
+void ha_cpuset_assign(struct hap_cpuset *dst, struct hap_cpuset *src)
+{
+#if defined(CPUSET_USE_CPUSET)
+ CPU_ZERO(&dst->cpuset);
+ CPU_OR(&dst->cpuset, &dst->cpuset, &src->cpuset);
+
+#elif defined(CPUSET_USE_FREEBSD_CPUSET)
+ CPU_COPY(&src->cpuset, &dst->cpuset);
+
+#elif defined(CPUSET_USE_ULONG)
+ dst->cpuset = src->cpuset;
+#endif
+}
+
+int ha_cpuset_size()
+{
+#if defined(CPUSET_USE_CPUSET) || defined(CPUSET_USE_FREEBSD_CPUSET)
+ return CPU_SETSIZE;
+
+#elif defined(CPUSET_USE_ULONG)
+ return LONGBITS;
+
+#endif
+}
+
+/* Detects CPUs that are bound to the current process. Returns the number of
+ * CPUs detected or 0 if the detection failed.
+ */
+int ha_cpuset_detect_bound(struct hap_cpuset *set)
+{
+ ha_cpuset_zero(set);
+
+ /* detect bound CPUs depending on the OS's API */
+ if (0
+#if defined(__linux__)
+ || sched_getaffinity(0, sizeof(set->cpuset), &set->cpuset) != 0
+#elif defined(__FreeBSD__)
+ || cpuset_getaffinity(CPU_LEVEL_CPUSET, CPU_WHICH_PID, -1, sizeof(set->cpuset), &set->cpuset) != 0
+#else
+ || 1 // unhandled platform
+#endif
+ ) {
+ /* detection failed */
+ return 0;
+ }
+
+ return ha_cpuset_count(set);
+}
+
+/* Parse cpu sets. Each CPU set is either a unique number between 0 and
+ * ha_cpuset_size() - 1 or a range with two such numbers delimited by a dash
+ * ('-'). Each CPU set can be a list of unique numbers or ranges separated by
+ * a comma. It is also possible to specify multiple cpu numbers or ranges in
+ * distinct argument in <args>. On success, it returns 0, otherwise it returns
+ * 1, optionally with an error message in <err> if <err> is not NULL.
+ */
+int parse_cpu_set(const char **args, struct hap_cpuset *cpu_set, char **err)
+{
+ int cur_arg = 0;
+ const char *arg;
+
+ ha_cpuset_zero(cpu_set);
+
+ arg = args[cur_arg];
+ while (*arg) {
+ const char *dash, *comma;
+ unsigned int low, high;
+
+ if (!isdigit((unsigned char)*args[cur_arg])) {
+ memprintf(err, "'%s' is not a CPU range.", arg);
+ return 1;
+ }
+
+ low = high = str2uic(arg);
+
+ comma = strchr(arg, ',');
+ dash = strchr(arg, '-');
+
+ if (dash && (!comma || dash < comma))
+ high = *(dash+1) ? str2uic(dash + 1) : ha_cpuset_size() - 1;
+
+ if (high < low) {
+ unsigned int swap = low;
+ low = high;
+ high = swap;
+ }
+
+ if (high >= ha_cpuset_size()) {
+ memprintf(err, "supports CPU numbers from 0 to %d.",
+ ha_cpuset_size() - 1);
+ return 1;
+ }
+
+ while (low <= high)
+ ha_cpuset_set(cpu_set, low++);
+
+ /* if a comma is present, parse the rest of the arg, else
+ * skip to the next arg */
+ arg = comma ? comma + 1 : args[++cur_arg];
+ }
+ return 0;
+}
+
+/* Parse a linux cpu map string representing to a numeric cpu mask map
+ * The cpu map string is a list of 4-byte hex strings separated by commas, with
+ * most-significant byte first, one bit per cpu number.
+ */
+void parse_cpumap(char *cpumap_str, struct hap_cpuset *cpu_set)
+{
+ unsigned long cpumap;
+ char *start, *endptr, *comma;
+ int i, j;
+
+ ha_cpuset_zero(cpu_set);
+
+ i = 0;
+ do {
+ /* reverse-search for a comma, parse the string after the comma
+ * or at the beginning if no comma found
+ */
+ comma = strrchr(cpumap_str, ',');
+ start = comma ? comma + 1 : cpumap_str;
+
+ cpumap = strtoul(start, &endptr, 16);
+ for (j = 0; cpumap; cpumap >>= 1, ++j) {
+ if (cpumap & 0x1)
+ ha_cpuset_set(cpu_set, j + i * 32);
+ }
+
+ if (comma)
+ *comma = '\0';
+ ++i;
+ } while (comma);
+}
+
+/* Returns true if at least one cpu-map directive was configured, otherwise
+ * false.
+ */
+int cpu_map_configured(void)
+{
+ int grp, thr;
+
+ for (grp = 0; grp < MAX_TGROUPS; grp++) {
+ for (thr = 0; thr < MAX_THREADS_PER_GROUP; thr++)
+ if (ha_cpuset_count(&cpu_map[grp].thread[thr]))
+ return 1;
+ }
+ return 0;
+}
+
+/* Allocates everything needed to store CPU information at boot.
+ * Returns non-zero on success, zero on failure.
+ */
+static int cpuset_alloc(void)
+{
+ /* allocate the structures used to store CPU topology info */
+ cpu_map = (struct cpu_map*)calloc(MAX_TGROUPS, sizeof(*cpu_map));
+ if (!cpu_map)
+ return 0;
+
+ return 1;
+}
+
+static void cpuset_deinit(void)
+{
+ ha_free(&cpu_map);
+}
+
+INITCALL0(STG_ALLOC, cpuset_alloc);
+REGISTER_POST_DEINIT(cpuset_deinit);
diff --git a/src/debug.c b/src/debug.c
new file mode 100644
index 0000000..fbaad80
--- /dev/null
+++ b/src/debug.c
@@ -0,0 +1,2301 @@
+/*
+ * Process debugging functions.
+ *
+ * Copyright 2000-2019 Willy Tarreau <willy@haproxy.org>.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+
+#include <errno.h>
+#include <fcntl.h>
+#include <signal.h>
+#include <time.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <syslog.h>
+#include <sys/resource.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <sys/utsname.h>
+#include <sys/wait.h>
+#include <unistd.h>
+#ifdef USE_EPOLL
+#include <sys/epoll.h>
+#endif
+
+#include <haproxy/api.h>
+#include <haproxy/applet.h>
+#include <haproxy/buf.h>
+#include <haproxy/cli.h>
+#include <haproxy/clock.h>
+#include <haproxy/debug.h>
+#include <haproxy/fd.h>
+#include <haproxy/global.h>
+#include <haproxy/hlua.h>
+#include <haproxy/http_ana.h>
+#include <haproxy/log.h>
+#include <haproxy/net_helper.h>
+#include <haproxy/sc_strm.h>
+#include <haproxy/stconn.h>
+#include <haproxy/task.h>
+#include <haproxy/thread.h>
+#include <haproxy/time.h>
+#include <haproxy/tools.h>
+#include <import/ist.h>
+
+
+/* The dump state is made of:
+ * - num_thread on the lowest 15 bits
+ * - a SYNC flag on bit 15 (waiting for sync start)
+ * - number of participating threads on bits 16-30
+ * Initiating a dump consists in setting it to SYNC and incrementing the
+ * num_thread part when entering the function. The first thread periodically
+ * recounts active threads and compares it to the ready ones, and clears SYNC
+ * and sets the number of participants to the value found, which serves as a
+ * start signal. A thread finished dumping looks up the TID of the next active
+ * thread after it and writes it in the lowest part. If there's none, it sets
+ * the thread counter to the number of participants and resets that part,
+ * which serves as an end-of-dump signal. All threads decrement the num_thread
+ * part. Then all threads wait for the value to reach zero. Only used when
+ * USE_THREAD_DUMP is set.
+ */
+#define THREAD_DUMP_TMASK 0x00007FFFU
+#define THREAD_DUMP_FSYNC 0x00008000U
+#define THREAD_DUMP_PMASK 0x7FFF0000U
+
+/* Description of a component with name, version, path, build options etc. E.g.
+ * one of them is haproxy. Others might be some clearly identified shared libs.
+ * They're intentionally self-contained and to be placed into an array to make
+ * it easier to find them in a core. The important fields (name and version)
+ * are locally allocated, other ones are dynamic.
+ */
+struct post_mortem_component {
+ char name[32]; // symbolic short name
+ char version[32]; // exact version
+ char *toolchain; // compiler and version (e.g. gcc-11.4.0)
+ char *toolchain_opts; // optims, arch-specific options (e.g. CFLAGS)
+ char *build_settings; // build options (e.g. USE_*, TARGET, etc)
+ char *path; // path if known.
+};
+
+/* This is a collection of information that are centralized to help with core
+ * dump analysis. It must be used with a public variable and gather elements
+ * as much as possible without dereferences so that even when identified in a
+ * core dump it's possible to get the most out of it even if the core file is
+ * not much exploitable. It's aligned to 256 so that it's easy to spot, given
+ * that being that large it will not change its size much.
+ */
+struct post_mortem {
+ /* platform-specific information */
+ struct {
+ struct utsname utsname; // OS name+ver+arch+hostname
+ char hw_vendor[64]; // hardware/hypervisor vendor when known
+ char hw_family[64]; // hardware/hypervisor product family when known
+ char hw_model[64]; // hardware/hypervisor product/model when known
+ char brd_vendor[64]; // mainboard vendor when known
+ char brd_model[64]; // mainboard model when known
+ char soc_vendor[64]; // SoC/CPU vendor from cpuinfo
+ char soc_model[64]; // SoC model when known and relevant
+ char cpu_model[64]; // CPU model when different from SoC
+ char virt_techno[16]; // when provided by cpuid
+ char cont_techno[16]; // empty, "no", "yes", "docker" or others
+ } platform;
+
+ /* process-specific information */
+ struct {
+ pid_t pid;
+ uid_t boot_uid;
+ gid_t boot_gid;
+ struct rlimit limit_fd; // RLIMIT_NOFILE
+ struct rlimit limit_ram; // RLIMIT_AS or RLIMIT_DATA
+
+#if defined(USE_THREAD)
+ struct {
+ ullong pth_id; // pthread_t cast to a ullong
+ void *stack_top; // top of the stack
+ } thread_info[MAX_THREADS];
+#endif
+ } process;
+
+#if defined(HA_HAVE_DUMP_LIBS)
+ /* information about dynamic shared libraries involved */
+ char *libs; // dump of one addr / path per line, or NULL
+#endif
+
+ /* info about identified distinct components (executable, shared libs, etc).
+ * These can be all listed at once in gdb using:
+ * p *post_mortem.components@post_mortem.nb_components
+ */
+ uint nb_components; // # of components below
+ struct post_mortem_component *components; // NULL or array
+} post_mortem ALIGNED(256) = { };
+
+/* Points to a copy of the buffer where the dump functions should write, when
+ * non-null. It's only used by debuggers for core dump analysis.
+ */
+struct buffer *thread_dump_buffer = NULL;
+unsigned int debug_commands_issued = 0;
+
+/* dumps a backtrace of the current thread that is appended to buffer <buf>.
+ * Lines are prefixed with the string <prefix> which may be empty (used for
+ * indenting). It is recommended to use this at a function's tail so that
+ * the function does not appear in the call stack. The <dump> argument
+ * indicates what dump state to start from, and should usually be zero. It
+ * may be among the following values:
+ * - 0: search usual callers before step 1, or directly jump to 2
+ * - 1: skip usual callers before step 2
+ * - 2: dump until polling loop, scheduler, or main() (excluded)
+ * - 3: end
+ * - 4-7: like 0 but stops *after* main.
+ */
+void ha_dump_backtrace(struct buffer *buf, const char *prefix, int dump)
+{
+ struct buffer bak;
+ char pfx2[100];
+ void *callers[100];
+ int j, nptrs;
+ const void *addr;
+
+ nptrs = my_backtrace(callers, sizeof(callers)/sizeof(*callers));
+ if (!nptrs)
+ return;
+
+ if (snprintf(pfx2, sizeof(pfx2), "%s| ", prefix) > sizeof(pfx2))
+ pfx2[0] = 0;
+
+ /* The call backtrace_symbols_fd(callers, nptrs, STDOUT_FILENO would
+ * produce similar output to the following:
+ */
+ chunk_appendf(buf, "%scall trace(%d):\n", prefix, nptrs);
+ for (j = 0; (j < nptrs || (dump & 3) < 2); j++) {
+ if (j == nptrs && !(dump & 3)) {
+ /* we failed to spot the starting point of the
+ * dump, let's start over dumping everything we
+ * have.
+ */
+ dump += 2;
+ j = 0;
+ }
+ bak = *buf;
+ dump_addr_and_bytes(buf, pfx2, callers[j], 8);
+ addr = resolve_sym_name(buf, ": ", callers[j]);
+ if ((dump & 3) == 0) {
+ /* dump not started, will start *after* ha_thread_dump_one(),
+ * ha_panic and ha_backtrace_to_stderr
+ */
+ if (addr == ha_panic ||
+ addr == ha_backtrace_to_stderr || addr == ha_thread_dump_one)
+ dump++;
+ *buf = bak;
+ continue;
+ }
+
+ if ((dump & 3) == 1) {
+ /* starting */
+ if (addr == ha_panic ||
+ addr == ha_backtrace_to_stderr || addr == ha_thread_dump_one) {
+ *buf = bak;
+ continue;
+ }
+ dump++;
+ }
+
+ if ((dump & 3) == 2) {
+ /* still dumping */
+ if (dump == 6) {
+ /* we only stop *after* main and we must send the LF */
+ if (addr == main) {
+ j = nptrs;
+ dump++;
+ }
+ }
+ else if (addr == run_poll_loop || addr == main || addr == run_tasks_from_lists) {
+ dump++;
+ *buf = bak;
+ break;
+ }
+ }
+ /* OK, line dumped */
+ chunk_appendf(buf, "\n");
+ }
+}
+
+/* dump a backtrace of current thread's stack to stderr. */
+void ha_backtrace_to_stderr(void)
+{
+ char area[2048];
+ struct buffer b = b_make(area, sizeof(area), 0, 0);
+
+ ha_dump_backtrace(&b, " ", 4);
+ if (b.data)
+ DISGUISE(write(2, b.area, b.data));
+}
+
+/* Dumps to the thread's buffer some known information for the desired thread,
+ * and optionally extra info when it's safe to do so (current thread or
+ * isolated). The dump will be appended to the buffer, so the caller is
+ * responsible for preliminary initializing it. The <from_signal> argument will
+ * indicate if the function is called from the debug signal handler, indicating
+ * the thread was dumped upon request from another one, otherwise if the thread
+ * it the current one, a star ('*') will be displayed in front of the thread to
+ * indicate the requesting one. Any stuck thread is also prefixed with a '>'.
+ * The caller is responsible for atomically setting up the thread's dump buffer
+ * to point to a valid buffer with enough room. Output will be truncated if it
+ * does not fit. When the dump is complete, the dump buffer will be switched to
+ * (void*)0x1 that the caller must turn to 0x0 once the contents are collected.
+ */
+void ha_thread_dump_one(int thr, int from_signal)
+{
+ struct buffer *buf = HA_ATOMIC_LOAD(&ha_thread_ctx[thr].thread_dump_buffer);
+ unsigned long __maybe_unused thr_bit = ha_thread_info[thr].ltid_bit;
+ int __maybe_unused tgrp = ha_thread_info[thr].tgid;
+ unsigned long long p = ha_thread_ctx[thr].prev_cpu_time;
+ unsigned long long n = now_cpu_time_thread(thr);
+ int stuck = !!(ha_thread_ctx[thr].flags & TH_FL_STUCK);
+
+ chunk_appendf(buf,
+ "%c%cThread %-2u: id=0x%llx act=%d glob=%d wq=%d rq=%d tl=%d tlsz=%d rqsz=%d\n"
+ " %2u/%-2u stuck=%d prof=%d",
+ (thr == tid && !from_signal) ? '*' : ' ', stuck ? '>' : ' ', thr + 1,
+ ha_get_pthread_id(thr),
+ thread_has_tasks(),
+ !eb_is_empty(&ha_thread_ctx[thr].rqueue_shared),
+ !eb_is_empty(&ha_thread_ctx[thr].timers),
+ !eb_is_empty(&ha_thread_ctx[thr].rqueue),
+ !(LIST_ISEMPTY(&ha_thread_ctx[thr].tasklets[TL_URGENT]) &&
+ LIST_ISEMPTY(&ha_thread_ctx[thr].tasklets[TL_NORMAL]) &&
+ LIST_ISEMPTY(&ha_thread_ctx[thr].tasklets[TL_BULK]) &&
+ MT_LIST_ISEMPTY(&ha_thread_ctx[thr].shared_tasklet_list)),
+ ha_thread_ctx[thr].tasks_in_list,
+ ha_thread_ctx[thr].rq_total,
+ ha_thread_info[thr].tgid, ha_thread_info[thr].ltid + 1,
+ stuck,
+ !!(ha_thread_ctx[thr].flags & TH_FL_TASK_PROFILING));
+
+#if defined(USE_THREAD)
+ chunk_appendf(buf,
+ " harmless=%d isolated=%d",
+ !!(_HA_ATOMIC_LOAD(&ha_tgroup_ctx[tgrp-1].threads_harmless) & thr_bit),
+ isolated_thread == thr);
+#endif
+
+ chunk_appendf(buf, "\n");
+ chunk_appendf(buf, " cpu_ns: poll=%llu now=%llu diff=%llu\n", p, n, n-p);
+
+ /* this is the end of what we can dump from outside the current thread */
+
+ if (thr != tid && !thread_isolated())
+ goto leave;
+
+ chunk_appendf(buf, " curr_task=");
+ ha_task_dump(buf, th_ctx->current, " ");
+
+ if (stuck && thr == tid) {
+#ifdef USE_LUA
+ if (th_ctx->current &&
+ th_ctx->current->process == process_stream && th_ctx->current->context) {
+ const struct stream *s = (const struct stream *)th_ctx->current->context;
+ struct hlua *hlua = s ? s->hlua : NULL;
+
+ if (hlua && hlua->T) {
+ mark_tainted(TAINTED_LUA_STUCK);
+ if (hlua->state_id == 0)
+ mark_tainted(TAINTED_LUA_STUCK_SHARED);
+ }
+ }
+#endif
+
+ if (HA_ATOMIC_LOAD(&pool_trim_in_progress))
+ mark_tainted(TAINTED_MEM_TRIMMING_STUCK);
+
+ /* We only emit the backtrace for stuck threads in order not to
+ * waste precious output buffer space with non-interesting data.
+ * Please leave this as the last instruction in this function
+ * so that the compiler uses tail merging and the current
+ * function does not appear in the stack.
+ */
+ ha_dump_backtrace(buf, " ", 0);
+ }
+ leave:
+ /* end of dump, setting the buffer to 0x1 will tell the caller we're done */
+ HA_ATOMIC_STORE(&ha_thread_ctx[thr].thread_dump_buffer, (void*)0x1UL);
+}
+
+/* Triggers a thread dump from thread <thr>, either directly if it's the
+ * current thread or if thread dump signals are not implemented, or by sending
+ * a signal if it's a remote one and the feature is supported. The buffer <buf>
+ * will get the dump appended, and the caller is responsible for making sure
+ * there is enough room otherwise some contents will be truncated.
+ */
+void ha_thread_dump(struct buffer *buf, int thr)
+{
+ struct buffer *old = NULL;
+
+ /* try to impose our dump buffer and to reserve the target thread's
+ * next dump for us.
+ */
+ do {
+ if (old)
+ ha_thread_relax();
+ old = NULL;
+ } while (!HA_ATOMIC_CAS(&ha_thread_ctx[thr].thread_dump_buffer, &old, buf));
+
+#ifdef USE_THREAD_DUMP
+ /* asking the remote thread to dump itself allows to get more details
+ * including a backtrace.
+ */
+ if (thr != tid)
+ ha_tkill(thr, DEBUGSIG);
+ else
+#endif
+ ha_thread_dump_one(thr, thr != tid);
+
+ /* now wait for the dump to be done, and release it */
+ do {
+ if (old)
+ ha_thread_relax();
+ old = (void*)0x01;
+ } while (!HA_ATOMIC_CAS(&ha_thread_ctx[thr].thread_dump_buffer, &old, 0));
+}
+
+/* dumps into the buffer some information related to task <task> (which may
+ * either be a task or a tasklet, and prepend each line except the first one
+ * with <pfx>. The buffer is only appended and the first output starts by the
+ * pointer itself. The caller is responsible for making sure the task is not
+ * going to vanish during the dump.
+ */
+void ha_task_dump(struct buffer *buf, const struct task *task, const char *pfx)
+{
+ const struct stream *s = NULL;
+ const struct appctx __maybe_unused *appctx = NULL;
+ struct hlua __maybe_unused *hlua = NULL;
+ const struct stconn *sc;
+
+ if (!task) {
+ chunk_appendf(buf, "0\n");
+ return;
+ }
+
+ if (TASK_IS_TASKLET(task))
+ chunk_appendf(buf,
+ "%p (tasklet) calls=%u\n",
+ task,
+ task->calls);
+ else
+ chunk_appendf(buf,
+ "%p (task) calls=%u last=%llu%s\n",
+ task,
+ task->calls,
+ task->wake_date ? (unsigned long long)(now_mono_time() - task->wake_date) : 0,
+ task->wake_date ? " ns ago" : "");
+
+ chunk_appendf(buf, "%s fct=%p(", pfx, task->process);
+ resolve_sym_name(buf, NULL, task->process);
+ chunk_appendf(buf,") ctx=%p", task->context);
+
+ if (task->process == task_run_applet && (appctx = task->context))
+ chunk_appendf(buf, "(%s)\n", appctx->applet->name);
+ else
+ chunk_appendf(buf, "\n");
+
+ if (task->process == process_stream && task->context)
+ s = (struct stream *)task->context;
+ else if (task->process == task_run_applet && task->context && (sc = appctx_sc((struct appctx *)task->context)))
+ s = sc_strm(sc);
+ else if (task->process == sc_conn_io_cb && task->context)
+ s = sc_strm(((struct stconn *)task->context));
+
+ if (s) {
+ chunk_appendf(buf, "%sstream=", pfx);
+ strm_dump_to_buffer(buf, s, pfx, HA_ATOMIC_LOAD(&global.anon_key));
+ }
+
+#ifdef USE_LUA
+ hlua = NULL;
+ if (s && (hlua = s->hlua)) {
+ chunk_appendf(buf, "%sCurrent executing Lua from a stream analyser -- ", pfx);
+ }
+ else if (task->process == hlua_process_task && (hlua = task->context)) {
+ chunk_appendf(buf, "%sCurrent executing a Lua task -- ", pfx);
+ }
+ else if (task->process == task_run_applet && (appctx = task->context) &&
+ (appctx->applet->fct == hlua_applet_tcp_fct)) {
+ chunk_appendf(buf, "%sCurrent executing a Lua TCP service -- ", pfx);
+ }
+ else if (task->process == task_run_applet && (appctx = task->context) &&
+ (appctx->applet->fct == hlua_applet_http_fct)) {
+ chunk_appendf(buf, "%sCurrent executing a Lua HTTP service -- ", pfx);
+ }
+
+ if (hlua && hlua->T) {
+ chunk_appendf(buf, "stack traceback:\n ");
+ append_prefixed_str(buf, hlua_traceback(hlua->T, "\n "), pfx, '\n', 0);
+ }
+
+ /* we may need to terminate the current line */
+ if (*b_peek(buf, b_data(buf)-1) != '\n')
+ b_putchr(buf, '\n');
+#endif
+}
+
+/* This function dumps all profiling settings. It returns 0 if the output
+ * buffer is full and it needs to be called again, otherwise non-zero.
+ */
+static int cli_io_handler_show_threads(struct appctx *appctx)
+{
+ struct stconn *sc = appctx_sc(appctx);
+ int thr;
+
+ /* FIXME: Don't watch the other side !*/
+ if (unlikely(sc_opposite(sc)->flags & SC_FL_SHUT_DONE))
+ return 1;
+
+ if (appctx->st0)
+ thr = appctx->st1;
+ else
+ thr = 0;
+
+ do {
+ chunk_reset(&trash);
+ ha_thread_dump(&trash, thr);
+
+ if (applet_putchk(appctx, &trash) == -1) {
+ /* failed, try again */
+ appctx->st1 = thr;
+ return 0;
+ }
+ thr++;
+ } while (thr < global.nbthread);
+
+ return 1;
+}
+
+#if defined(HA_HAVE_DUMP_LIBS)
+/* parse a "show libs" command. It returns 1 if it emits anything otherwise zero. */
+static int debug_parse_cli_show_libs(char **args, char *payload, struct appctx *appctx, void *private)
+{
+ if (!cli_has_level(appctx, ACCESS_LVL_OPER))
+ return 1;
+
+ chunk_reset(&trash);
+ if (dump_libs(&trash, 1))
+ return cli_msg(appctx, LOG_INFO, trash.area);
+ else
+ return 0;
+}
+#endif
+
+/* parse a "show dev" command. It returns 1 if it emits anything otherwise zero. */
+static int debug_parse_cli_show_dev(char **args, char *payload, struct appctx *appctx, void *private)
+{
+ const char **build_opt;
+
+ if (*args[2])
+ return cli_err(appctx, "This command takes no argument.\n");
+
+ chunk_reset(&trash);
+
+ chunk_appendf(&trash, "Features\n %s\n", build_features);
+
+ chunk_appendf(&trash, "Build options\n");
+ for (build_opt = NULL; (build_opt = hap_get_next_build_opt(build_opt)); )
+ if (append_prefixed_str(&trash, *build_opt, " ", '\n', 0) == 0)
+ chunk_strcat(&trash, "\n");
+
+ chunk_appendf(&trash, "Platform info\n");
+ if (*post_mortem.platform.hw_vendor)
+ chunk_appendf(&trash, " machine vendor: %s\n", post_mortem.platform.hw_vendor);
+ if (*post_mortem.platform.hw_family)
+ chunk_appendf(&trash, " machine family: %s\n", post_mortem.platform.hw_family);
+ if (*post_mortem.platform.hw_model)
+ chunk_appendf(&trash, " machine model: %s\n", post_mortem.platform.hw_model);
+ if (*post_mortem.platform.brd_vendor)
+ chunk_appendf(&trash, " board vendor: %s\n", post_mortem.platform.brd_vendor);
+ if (*post_mortem.platform.brd_model)
+ chunk_appendf(&trash, " board model: %s\n", post_mortem.platform.brd_model);
+ if (*post_mortem.platform.soc_vendor)
+ chunk_appendf(&trash, " soc vendor: %s\n", post_mortem.platform.soc_vendor);
+ if (*post_mortem.platform.soc_model)
+ chunk_appendf(&trash, " soc model: %s\n", post_mortem.platform.soc_model);
+ if (*post_mortem.platform.cpu_model)
+ chunk_appendf(&trash, " cpu model: %s\n", post_mortem.platform.cpu_model);
+ if (*post_mortem.platform.virt_techno)
+ chunk_appendf(&trash, " virtual machine: %s\n", post_mortem.platform.virt_techno);
+ if (*post_mortem.platform.cont_techno)
+ chunk_appendf(&trash, " container: %s\n", post_mortem.platform.cont_techno);
+ if (*post_mortem.platform.utsname.sysname)
+ chunk_appendf(&trash, " OS name: %s\n", post_mortem.platform.utsname.sysname);
+ if (*post_mortem.platform.utsname.release)
+ chunk_appendf(&trash, " OS release: %s\n", post_mortem.platform.utsname.release);
+ if (*post_mortem.platform.utsname.version)
+ chunk_appendf(&trash, " OS version: %s\n", post_mortem.platform.utsname.version);
+ if (*post_mortem.platform.utsname.machine)
+ chunk_appendf(&trash, " OS architecture: %s\n", post_mortem.platform.utsname.machine);
+ if (*post_mortem.platform.utsname.nodename)
+ chunk_appendf(&trash, " node name: %s\n", HA_ANON_CLI(post_mortem.platform.utsname.nodename));
+
+ chunk_appendf(&trash, "Process info\n");
+ chunk_appendf(&trash, " pid: %d\n", post_mortem.process.pid);
+ chunk_appendf(&trash, " boot uid: %d\n", post_mortem.process.boot_uid);
+ chunk_appendf(&trash, " boot gid: %d\n", post_mortem.process.boot_gid);
+
+ if ((ulong)post_mortem.process.limit_fd.rlim_cur != RLIM_INFINITY)
+ chunk_appendf(&trash, " fd limit (soft): %lu\n", (ulong)post_mortem.process.limit_fd.rlim_cur);
+ if ((ulong)post_mortem.process.limit_fd.rlim_max != RLIM_INFINITY)
+ chunk_appendf(&trash, " fd limit (hard): %lu\n", (ulong)post_mortem.process.limit_fd.rlim_max);
+ if ((ulong)post_mortem.process.limit_ram.rlim_cur != RLIM_INFINITY)
+ chunk_appendf(&trash, " ram limit (soft): %lu\n", (ulong)post_mortem.process.limit_ram.rlim_cur);
+ if ((ulong)post_mortem.process.limit_ram.rlim_max != RLIM_INFINITY)
+ chunk_appendf(&trash, " ram limit (hard): %lu\n", (ulong)post_mortem.process.limit_ram.rlim_max);
+
+ return cli_msg(appctx, LOG_INFO, trash.area);
+}
+
+/* Dumps a state of all threads into the trash and on fd #2, then aborts.
+ * A copy will be put into a trash chunk that's assigned to thread_dump_buffer
+ * so that the debugger can easily find it. This buffer might be truncated if
+ * too many threads are being dumped, but at least we'll dump them all on stderr.
+ * If thread_dump_buffer is set, it means that a panic has already begun.
+ */
+void ha_panic()
+{
+ struct buffer *old;
+ unsigned int thr;
+
+ mark_tainted(TAINTED_PANIC);
+
+ old = NULL;
+ if (!HA_ATOMIC_CAS(&thread_dump_buffer, &old, get_trash_chunk())) {
+ /* a panic dump is already in progress, let's not disturb it,
+ * we'll be called via signal DEBUGSIG. By returning we may be
+ * able to leave a current signal handler (e.g. WDT) so that
+ * this will ensure more reliable signal delivery.
+ */
+ return;
+ }
+
+ chunk_reset(&trash);
+ chunk_appendf(&trash, "Thread %u is about to kill the process.\n", tid + 1);
+
+ for (thr = 0; thr < global.nbthread; thr++) {
+ ha_thread_dump(&trash, thr);
+ DISGUISE(write(2, trash.area, trash.data));
+ b_force_xfer(thread_dump_buffer, &trash, b_room(thread_dump_buffer));
+ chunk_reset(&trash);
+ }
+
+#ifdef USE_LUA
+ if (get_tainted() & TAINTED_LUA_STUCK_SHARED && global.nbthread > 1) {
+ chunk_printf(&trash,
+ "### Note: at least one thread was stuck in a Lua context loaded using the\n"
+ " 'lua-load' directive, which is known for causing heavy contention\n"
+ " when used with threads. Please consider using 'lua-load-per-thread'\n"
+ " instead if your code is safe to run in parallel on multiple threads.\n");
+ DISGUISE(write(2, trash.area, trash.data));
+ }
+ else if (get_tainted() & TAINTED_LUA_STUCK) {
+ chunk_printf(&trash,
+ "### Note: at least one thread was stuck in a Lua context in a way that suggests\n"
+ " heavy processing inside a dependency or a long loop that can't yield.\n"
+ " Please make sure any external code you may rely on is safe for use in\n"
+ " an event-driven engine.\n");
+ DISGUISE(write(2, trash.area, trash.data));
+ }
+#endif
+ if (get_tainted() & TAINTED_MEM_TRIMMING_STUCK) {
+ chunk_printf(&trash,
+ "### Note: one thread was found stuck under malloc_trim(), which can run for a\n"
+ " very long time on large memory systems. You way want to disable this\n"
+ " memory reclaiming feature by setting 'no-memory-trimming' in the\n"
+ " 'global' section of your configuration to avoid this in the future.\n");
+ DISGUISE(write(2, trash.area, trash.data));
+ }
+
+ for (;;)
+ abort();
+}
+
+/* Complain with message <msg> on stderr. If <counter> is not NULL, it is
+ * atomically incremented, and the message is only printed when the counter
+ * was zero, so that the message is only printed once. <taint> is only checked
+ * on bit 1, and will taint the process either for a bug (2) or warn (0).
+ */
+void complain(int *counter, const char *msg, int taint)
+{
+ if (counter && _HA_ATOMIC_FETCH_ADD(counter, 1))
+ return;
+ DISGUISE(write(2, msg, strlen(msg)));
+ if (taint & 2)
+ mark_tainted(TAINTED_BUG);
+ else
+ mark_tainted(TAINTED_WARN);
+}
+
+/* parse a "debug dev exit" command. It always returns 1, though it should never return. */
+static int debug_parse_cli_exit(char **args, char *payload, struct appctx *appctx, void *private)
+{
+ int code = atoi(args[3]);
+
+ if (!cli_has_level(appctx, ACCESS_LVL_ADMIN))
+ return 1;
+
+ _HA_ATOMIC_INC(&debug_commands_issued);
+ exit(code);
+ return 1;
+}
+
+/* parse a "debug dev bug" command. It always returns 1, though it should never return.
+ * Note: we make sure not to make the function static so that it appears in the trace.
+ */
+int debug_parse_cli_bug(char **args, char *payload, struct appctx *appctx, void *private)
+{
+ if (!cli_has_level(appctx, ACCESS_LVL_ADMIN))
+ return 1;
+
+ _HA_ATOMIC_INC(&debug_commands_issued);
+ BUG_ON(one > zero);
+ return 1;
+}
+
+/* parse a "debug dev warn" command. It always returns 1.
+ * Note: we make sure not to make the function static so that it appears in the trace.
+ */
+int debug_parse_cli_warn(char **args, char *payload, struct appctx *appctx, void *private)
+{
+ if (!cli_has_level(appctx, ACCESS_LVL_ADMIN))
+ return 1;
+
+ _HA_ATOMIC_INC(&debug_commands_issued);
+ WARN_ON(one > zero);
+ return 1;
+}
+
+/* parse a "debug dev check" command. It always returns 1.
+ * Note: we make sure not to make the function static so that it appears in the trace.
+ */
+int debug_parse_cli_check(char **args, char *payload, struct appctx *appctx, void *private)
+{
+ if (!cli_has_level(appctx, ACCESS_LVL_ADMIN))
+ return 1;
+
+ _HA_ATOMIC_INC(&debug_commands_issued);
+ CHECK_IF(one > zero);
+ return 1;
+}
+
+/* parse a "debug dev close" command. It always returns 1. */
+static int debug_parse_cli_close(char **args, char *payload, struct appctx *appctx, void *private)
+{
+ int fd;
+
+ if (!cli_has_level(appctx, ACCESS_LVL_ADMIN))
+ return 1;
+
+ if (!*args[3])
+ return cli_err(appctx, "Missing file descriptor number.\n");
+
+ fd = atoi(args[3]);
+ if (fd < 0 || fd >= global.maxsock)
+ return cli_err(appctx, "File descriptor out of range.\n");
+
+ if (!fdtab[fd].owner)
+ return cli_msg(appctx, LOG_INFO, "File descriptor was already closed.\n");
+
+ _HA_ATOMIC_INC(&debug_commands_issued);
+ fd_delete(fd);
+ return 1;
+}
+
+/* this is meant to cause a deadlock when more than one task is running it or when run twice */
+static struct task *debug_run_cli_deadlock(struct task *task, void *ctx, unsigned int state)
+{
+ static HA_SPINLOCK_T lock __maybe_unused;
+
+ HA_SPIN_LOCK(OTHER_LOCK, &lock);
+ return NULL;
+}
+
+/* parse a "debug dev deadlock" command. It always returns 1. */
+static int debug_parse_cli_deadlock(char **args, char *payload, struct appctx *appctx, void *private)
+{
+ int tasks;
+
+ if (!cli_has_level(appctx, ACCESS_LVL_ADMIN))
+ return 1;
+
+ _HA_ATOMIC_INC(&debug_commands_issued);
+ for (tasks = atoi(args[3]); tasks > 0; tasks--) {
+ struct task *t = task_new_on(tasks % global.nbthread);
+ if (!t)
+ continue;
+ t->process = debug_run_cli_deadlock;
+ t->context = NULL;
+ task_wakeup(t, TASK_WOKEN_INIT);
+ }
+
+ return 1;
+}
+
+/* parse a "debug dev delay" command. It always returns 1. */
+static int debug_parse_cli_delay(char **args, char *payload, struct appctx *appctx, void *private)
+{
+ int delay = atoi(args[3]);
+
+ if (!cli_has_level(appctx, ACCESS_LVL_ADMIN))
+ return 1;
+
+ _HA_ATOMIC_INC(&debug_commands_issued);
+ usleep((long)delay * 1000);
+ return 1;
+}
+
+/* parse a "debug dev log" command. It always returns 1. */
+static int debug_parse_cli_log(char **args, char *payload, struct appctx *appctx, void *private)
+{
+ int arg;
+
+ if (!cli_has_level(appctx, ACCESS_LVL_ADMIN))
+ return 1;
+
+ _HA_ATOMIC_INC(&debug_commands_issued);
+ chunk_reset(&trash);
+ for (arg = 3; *args[arg]; arg++) {
+ if (arg > 3)
+ chunk_strcat(&trash, " ");
+ chunk_strcat(&trash, args[arg]);
+ }
+
+ send_log(NULL, LOG_INFO, "%s\n", trash.area);
+ return 1;
+}
+
+/* parse a "debug dev loop" command. It always returns 1. */
+static int debug_parse_cli_loop(char **args, char *payload, struct appctx *appctx, void *private)
+{
+ struct timeval deadline, curr;
+ int loop = atoi(args[3]);
+ int isolate;
+
+ if (!cli_has_level(appctx, ACCESS_LVL_ADMIN))
+ return 1;
+
+ isolate = strcmp(args[4], "isolated") == 0;
+
+ _HA_ATOMIC_INC(&debug_commands_issued);
+ gettimeofday(&curr, NULL);
+ tv_ms_add(&deadline, &curr, loop);
+
+ if (isolate)
+ thread_isolate();
+
+ while (tv_ms_cmp(&curr, &deadline) < 0)
+ gettimeofday(&curr, NULL);
+
+ if (isolate)
+ thread_release();
+
+ return 1;
+}
+
+/* parse a "debug dev panic" command. It always returns 1, though it should never return. */
+static int debug_parse_cli_panic(char **args, char *payload, struct appctx *appctx, void *private)
+{
+ if (!cli_has_level(appctx, ACCESS_LVL_ADMIN))
+ return 1;
+
+ _HA_ATOMIC_INC(&debug_commands_issued);
+ ha_panic();
+ return 1;
+}
+
+/* parse a "debug dev exec" command. It always returns 1. */
+#if defined(DEBUG_DEV)
+static int debug_parse_cli_exec(char **args, char *payload, struct appctx *appctx, void *private)
+{
+ int pipefd[2];
+ int arg;
+ int pid;
+
+ if (!cli_has_level(appctx, ACCESS_LVL_ADMIN))
+ return 1;
+
+ _HA_ATOMIC_INC(&debug_commands_issued);
+ chunk_reset(&trash);
+ for (arg = 3; *args[arg]; arg++) {
+ if (arg > 3)
+ chunk_strcat(&trash, " ");
+ chunk_strcat(&trash, args[arg]);
+ }
+
+ thread_isolate();
+ if (pipe(pipefd) < 0)
+ goto fail_pipe;
+
+ if (fd_set_cloexec(pipefd[0]) == -1)
+ goto fail_fcntl;
+
+ if (fd_set_cloexec(pipefd[1]) == -1)
+ goto fail_fcntl;
+
+ pid = fork();
+
+ if (pid < 0)
+ goto fail_fork;
+ else if (pid == 0) {
+ /* child */
+ char *cmd[4] = { "/bin/sh", "-c", 0, 0 };
+
+ close(0);
+ dup2(pipefd[1], 1);
+ dup2(pipefd[1], 2);
+
+ cmd[2] = trash.area;
+ execvp(cmd[0], cmd);
+ printf("execvp() failed\n");
+ exit(1);
+ }
+
+ /* parent */
+ thread_release();
+ close(pipefd[1]);
+ chunk_reset(&trash);
+ while (1) {
+ size_t ret = read(pipefd[0], trash.area + trash.data, trash.size - 20 - trash.data);
+ if (ret <= 0)
+ break;
+ trash.data += ret;
+ if (trash.data + 20 == trash.size) {
+ chunk_strcat(&trash, "\n[[[TRUNCATED]]]\n");
+ break;
+ }
+ }
+ close(pipefd[0]);
+ waitpid(pid, NULL, WNOHANG);
+ trash.area[trash.data] = 0;
+ return cli_msg(appctx, LOG_INFO, trash.area);
+
+ fail_fork:
+ fail_fcntl:
+ close(pipefd[0]);
+ close(pipefd[1]);
+ fail_pipe:
+ thread_release();
+ return cli_err(appctx, "Failed to execute command.\n");
+}
+
+/* handles SIGRTMAX to inject random delays on the receiving thread in order
+ * to try to increase the likelihood to reproduce inter-thread races. The
+ * signal is periodically sent by a task initiated by "debug dev delay-inj".
+ */
+void debug_delay_inj_sighandler(int sig, siginfo_t *si, void *arg)
+{
+ volatile int i = statistical_prng_range(10000);
+
+ while (i--)
+ __ha_cpu_relax();
+}
+#endif
+
+/* parse a "debug dev hex" command. It always returns 1. */
+static int debug_parse_cli_hex(char **args, char *payload, struct appctx *appctx, void *private)
+{
+ unsigned long start, len;
+
+ if (!cli_has_level(appctx, ACCESS_LVL_ADMIN))
+ return 1;
+
+ if (!*args[3])
+ return cli_err(appctx, "Missing memory address to dump from.\n");
+
+ start = strtoul(args[3], NULL, 0);
+ if (!start)
+ return cli_err(appctx, "Will not dump from NULL address.\n");
+
+ _HA_ATOMIC_INC(&debug_commands_issued);
+
+ /* by default, dump ~128 till next block of 16 */
+ len = strtoul(args[4], NULL, 0);
+ if (!len)
+ len = ((start + 128) & -16) - start;
+
+ chunk_reset(&trash);
+ dump_hex(&trash, " ", (const void *)start, len, 1);
+ trash.area[trash.data] = 0;
+ return cli_msg(appctx, LOG_INFO, trash.area);
+}
+
+/* parse a "debug dev sym <addr>" command. It always returns 1. */
+static int debug_parse_cli_sym(char **args, char *payload, struct appctx *appctx, void *private)
+{
+ unsigned long addr;
+
+ if (!cli_has_level(appctx, ACCESS_LVL_ADMIN))
+ return 1;
+
+ if (!*args[3])
+ return cli_err(appctx, "Missing memory address to be resolved.\n");
+
+ _HA_ATOMIC_INC(&debug_commands_issued);
+
+ addr = strtoul(args[3], NULL, 0);
+ chunk_printf(&trash, "%#lx resolves to ", addr);
+ resolve_sym_name(&trash, NULL, (const void *)addr);
+ chunk_appendf(&trash, "\n");
+
+ return cli_msg(appctx, LOG_INFO, trash.area);
+}
+
+/* parse a "debug dev tkill" command. It always returns 1. */
+static int debug_parse_cli_tkill(char **args, char *payload, struct appctx *appctx, void *private)
+{
+ int thr = 0;
+ int sig = SIGABRT;
+
+ if (!cli_has_level(appctx, ACCESS_LVL_ADMIN))
+ return 1;
+
+ if (*args[3])
+ thr = atoi(args[3]);
+
+ if (thr < 0 || thr > global.nbthread)
+ return cli_err(appctx, "Thread number out of range (use 0 for current).\n");
+
+ if (*args[4])
+ sig = atoi(args[4]);
+
+ _HA_ATOMIC_INC(&debug_commands_issued);
+ if (thr)
+ ha_tkill(thr - 1, sig);
+ else
+ raise(sig);
+ return 1;
+}
+
+/* hashes 'word' in "debug dev hash 'word' ". */
+static int debug_parse_cli_hash(char **args, char *payload, struct appctx *appctx, void *private)
+{
+ char *msg = NULL;
+
+ cli_dynmsg(appctx, LOG_INFO, memprintf(&msg, "%s\n", HA_ANON_CLI(args[3])));
+ return 1;
+}
+
+/* parse a "debug dev write" command. It always returns 1. */
+static int debug_parse_cli_write(char **args, char *payload, struct appctx *appctx, void *private)
+{
+ unsigned long len;
+
+ if (!*args[3])
+ return cli_err(appctx, "Missing output size.\n");
+
+ len = strtoul(args[3], NULL, 0);
+ if (len >= trash.size)
+ return cli_err(appctx, "Output too large, must be <tune.bufsize.\n");
+
+ _HA_ATOMIC_INC(&debug_commands_issued);
+
+ chunk_reset(&trash);
+ trash.data = len;
+ memset(trash.area, '.', trash.data);
+ trash.area[trash.data] = 0;
+ for (len = 64; len < trash.data; len += 64)
+ trash.area[len] = '\n';
+ return cli_msg(appctx, LOG_INFO, trash.area);
+}
+
+/* parse a "debug dev stream" command */
+/*
+ * debug dev stream [strm=<ptr>] [strm.f[{+-=}<flags>]] [txn.f[{+-=}<flags>]] \
+ * [req.f[{+-=}<flags>]] [res.f[{+-=}<flags>]] \
+ * [sif.f[{+-=<flags>]] [sib.f[{+-=<flags>]] \
+ * [sif.s[=<state>]] [sib.s[=<state>]]
+ */
+static int debug_parse_cli_stream(char **args, char *payload, struct appctx *appctx, void *private)
+{
+ struct stream *s = appctx_strm(appctx);
+ int arg;
+ void *ptr;
+ int size;
+ const char *word, *end;
+ struct ist name;
+ char *msg = NULL;
+ char *endarg;
+ unsigned long long old, new;
+
+ if (!cli_has_level(appctx, ACCESS_LVL_ADMIN))
+ return 1;
+
+ ptr = NULL; size = 0;
+
+ if (!*args[3]) {
+ return cli_err(appctx,
+ "Usage: debug dev stream [ strm=<ptr> ] { <obj> <op> <value> | wake }*\n"
+ " <obj> = { strm.f | strm.x | scf.s | scb.s | txn.f | req.f | res.f }\n"
+ " <op> = {'' (show) | '=' (assign) | '^' (xor) | '+' (or) | '-' (andnot)}\n"
+ " <value> = 'now' | 64-bit dec/hex integer (0x prefix supported)\n"
+ " 'wake' wakes the stream assigned to 'strm' (default: current)\n"
+ );
+ }
+
+ _HA_ATOMIC_INC(&debug_commands_issued);
+ for (arg = 3; *args[arg]; arg++) {
+ old = 0;
+ end = word = args[arg];
+ while (*end && *end != '=' && *end != '^' && *end != '+' && *end != '-')
+ end++;
+ name = ist2(word, end - word);
+ if (isteq(name, ist("strm"))) {
+ ptr = (!s || !may_access(s)) ? NULL : &s; size = sizeof(s);
+ } else if (isteq(name, ist("strm.f"))) {
+ ptr = (!s || !may_access(s)) ? NULL : &s->flags; size = sizeof(s->flags);
+ } else if (isteq(name, ist("strm.x"))) {
+ ptr = (!s || !may_access(s)) ? NULL : &s->conn_exp; size = sizeof(s->conn_exp);
+ } else if (isteq(name, ist("txn.f"))) {
+ ptr = (!s || !may_access(s)) ? NULL : &s->txn->flags; size = sizeof(s->txn->flags);
+ } else if (isteq(name, ist("req.f"))) {
+ ptr = (!s || !may_access(s)) ? NULL : &s->req.flags; size = sizeof(s->req.flags);
+ } else if (isteq(name, ist("res.f"))) {
+ ptr = (!s || !may_access(s)) ? NULL : &s->res.flags; size = sizeof(s->res.flags);
+ } else if (isteq(name, ist("scf.s"))) {
+ ptr = (!s || !may_access(s)) ? NULL : &s->scf->state; size = sizeof(s->scf->state);
+ } else if (isteq(name, ist("scb.s"))) {
+ ptr = (!s || !may_access(s)) ? NULL : &s->scf->state; size = sizeof(s->scb->state);
+ } else if (isteq(name, ist("wake"))) {
+ if (s && may_access(s) && may_access((void *)s + sizeof(*s) - 1))
+ task_wakeup(s->task, TASK_WOKEN_TIMER|TASK_WOKEN_IO|TASK_WOKEN_MSG);
+ continue;
+ } else
+ return cli_dynerr(appctx, memprintf(&msg, "Unsupported field name: '%s'.\n", word));
+
+ /* read previous value */
+ if ((s || ptr == &s) && ptr && may_access(ptr) && may_access(ptr + size - 1)) {
+ if (size == 8)
+ old = read_u64(ptr);
+ else if (size == 4)
+ old = read_u32(ptr);
+ else if (size == 2)
+ old = read_u16(ptr);
+ else
+ old = *(const uint8_t *)ptr;
+ } else {
+ memprintf(&msg,
+ "%sSkipping inaccessible pointer %p for field '%.*s'.\n",
+ msg ? msg : "", ptr, (int)(end - word), word);
+ continue;
+ }
+
+ /* parse the new value . */
+ new = strtoll(end + 1, &endarg, 0);
+ if (end[1] && *endarg) {
+ if (strcmp(end + 1, "now") == 0)
+ new = now_ms;
+ else {
+ memprintf(&msg,
+ "%sIgnoring unparsable value '%s' for field '%.*s'.\n",
+ msg ? msg : "", end + 1, (int)(end - word), word);
+ continue;
+ }
+ }
+
+ switch (*end) {
+ case '\0': /* show */
+ memprintf(&msg, "%s%.*s=%#llx ", msg ? msg : "", (int)(end - word), word, old);
+ new = old; // do not change the value
+ break;
+
+ case '=': /* set */
+ break;
+
+ case '^': /* XOR */
+ new = old ^ new;
+ break;
+
+ case '+': /* OR */
+ new = old | new;
+ break;
+
+ case '-': /* AND NOT */
+ new = old & ~new;
+ break;
+
+ default:
+ break;
+ }
+
+ /* write the new value */
+ if (new != old) {
+ if (size == 8)
+ write_u64(ptr, new);
+ else if (size == 4)
+ write_u32(ptr, new);
+ else if (size == 2)
+ write_u16(ptr, new);
+ else
+ *(uint8_t *)ptr = new;
+ }
+ }
+
+ if (msg && *msg)
+ return cli_dynmsg(appctx, LOG_INFO, msg);
+ return 1;
+}
+
+/* parse a "debug dev stream" command */
+/*
+ * debug dev task <ptr> [ "wake" | "expire" | "kill" ]
+ * Show/change status of a task/tasklet
+ */
+static int debug_parse_cli_task(char **args, char *payload, struct appctx *appctx, void *private)
+{
+ const struct ha_caller *caller;
+ struct task *t;
+ char *endarg;
+ char *msg;
+ void *ptr;
+ int ret = 1;
+ int task_ok;
+ int arg;
+
+ if (!cli_has_level(appctx, ACCESS_LVL_ADMIN))
+ return 1;
+
+ /* parse the pointer value */
+ ptr = (void *)strtoul(args[3], &endarg, 0);
+ if (!*args[3] || *endarg)
+ goto usage;
+
+ _HA_ATOMIC_INC(&debug_commands_issued);
+
+ /* everything below must run under thread isolation till reaching label "leave" */
+ thread_isolate();
+
+ /* struct tasklet is smaller than struct task and is sufficient to check
+ * the TASK_COMMON part.
+ */
+ if (!may_access(ptr) || !may_access(ptr + sizeof(struct tasklet) - 1) ||
+ ((const struct tasklet *)ptr)->tid < -1 ||
+ ((const struct tasklet *)ptr)->tid >= (int)MAX_THREADS) {
+ ret = cli_err(appctx, "The designated memory area doesn't look like a valid task/tasklet\n");
+ goto leave;
+ }
+
+ t = ptr;
+ caller = t->caller;
+ msg = NULL;
+ task_ok = may_access(ptr + sizeof(*t) - 1);
+
+ chunk_reset(&trash);
+ resolve_sym_name(&trash, NULL, (const void *)t->process);
+
+ /* we need to be careful here because we may dump a freed task that's
+ * still in the pool cache, containing garbage in pointers.
+ */
+ if (!*args[4]) {
+ memprintf(&msg, "%s%p: %s state=%#x tid=%d process=%s ctx=%p calls=%d last=%s:%d intl=%d",
+ msg ? msg : "", t, (t->state & TASK_F_TASKLET) ? "tasklet" : "task",
+ t->state, t->tid, trash.area, t->context, t->calls,
+ caller && may_access(caller) && may_access(caller->func) && isalnum((uchar)*caller->func) ? caller->func : "0",
+ caller ? t->caller->line : 0,
+ (t->state & TASK_F_TASKLET) ? LIST_INLIST(&((const struct tasklet *)t)->list) : 0);
+
+ if (task_ok && !(t->state & TASK_F_TASKLET))
+ memprintf(&msg, "%s inrq=%d inwq=%d exp=%d nice=%d",
+ msg ? msg : "", task_in_rq(t), task_in_wq(t), t->expire, t->nice);
+
+ memprintf(&msg, "%s\n", msg ? msg : "");
+ }
+
+ for (arg = 4; *args[arg]; arg++) {
+ if (strcmp(args[arg], "expire") == 0) {
+ if (t->state & TASK_F_TASKLET) {
+ /* do nothing for tasklets */
+ }
+ else if (task_ok) {
+ /* unlink task and wake with timer flag */
+ __task_unlink_wq(t);
+ t->expire = now_ms;
+ task_wakeup(t, TASK_WOKEN_TIMER);
+ }
+ } else if (strcmp(args[arg], "wake") == 0) {
+ /* wake with all flags but init / timer */
+ if (t->state & TASK_F_TASKLET)
+ tasklet_wakeup((struct tasklet *)t);
+ else if (task_ok)
+ task_wakeup(t, TASK_WOKEN_ANY & ~(TASK_WOKEN_INIT|TASK_WOKEN_TIMER));
+ } else if (strcmp(args[arg], "kill") == 0) {
+ /* Kill the task. This is not idempotent! */
+ if (!(t->state & TASK_KILLED)) {
+ if (t->state & TASK_F_TASKLET)
+ tasklet_kill((struct tasklet *)t);
+ else if (task_ok)
+ task_kill(t);
+ }
+ } else {
+ thread_release();
+ goto usage;
+ }
+ }
+
+ if (msg && *msg)
+ ret = cli_dynmsg(appctx, LOG_INFO, msg);
+ leave:
+ thread_release();
+ return ret;
+ usage:
+ return cli_err(appctx,
+ "Usage: debug dev task <ptr> [ wake | expire | kill ]\n"
+ " By default, dumps some info on task/tasklet <ptr>. 'wake' will wake it up\n"
+ " with all conditions flags but init/exp. 'expire' will expire the entry, and\n"
+ " 'kill' will kill it (warning: may crash since later not idempotent!). All\n"
+ " changes may crash the process if performed on a wrong object!\n"
+ );
+}
+
+#if defined(DEBUG_DEV)
+static struct task *debug_delay_inj_task(struct task *t, void *ctx, unsigned int state)
+{
+ unsigned long *tctx = ctx; // [0] = interval, [1] = nbwakeups
+ unsigned long inter = tctx[0];
+ unsigned long count = tctx[1];
+ unsigned long rnd;
+
+ if (inter)
+ t->expire = tick_add(now_ms, inter);
+ else
+ task_wakeup(t, TASK_WOKEN_MSG);
+
+ /* wake a random thread */
+ while (count--) {
+ rnd = statistical_prng_range(global.nbthread);
+ ha_tkill(rnd, SIGRTMAX);
+ }
+ return t;
+}
+
+/* parse a "debug dev delay-inj" command
+ * debug dev delay-inj <inter> <count>
+ */
+static int debug_parse_delay_inj(char **args, char *payload, struct appctx *appctx, void *private)
+{
+ unsigned long *tctx; // [0] = inter, [2] = count
+ struct task *task;
+
+ if (!cli_has_level(appctx, ACCESS_LVL_ADMIN))
+ return 1;
+
+ if (!*args[4])
+ return cli_err(appctx, "Usage: debug dev delay-inj <inter_ms> <count>*\n");
+
+ _HA_ATOMIC_INC(&debug_commands_issued);
+
+ tctx = calloc(2, sizeof(*tctx));
+ if (!tctx)
+ goto fail;
+
+ tctx[0] = atoi(args[3]);
+ tctx[1] = atoi(args[4]);
+
+ task = task_new_here/*anywhere*/();
+ if (!task)
+ goto fail;
+
+ task->process = debug_delay_inj_task;
+ task->context = tctx;
+ task_wakeup(task, TASK_WOKEN_INIT);
+ return 1;
+
+ fail:
+ free(tctx);
+ return cli_err(appctx, "Not enough memory");
+}
+#endif // DEBUG_DEV
+
+static struct task *debug_task_handler(struct task *t, void *ctx, unsigned int state)
+{
+ unsigned long *tctx = ctx; // [0] = #tasks, [1] = inter, [2+] = { tl | (tsk+1) }
+ unsigned long inter = tctx[1];
+ unsigned long rnd;
+
+ t->expire = tick_add(now_ms, inter);
+
+ /* half of the calls will wake up another entry */
+ rnd = statistical_prng();
+ if (rnd & 1) {
+ rnd >>= 1;
+ rnd %= tctx[0];
+ rnd = tctx[rnd + 2];
+
+ if (rnd & 1)
+ task_wakeup((struct task *)(rnd - 1), TASK_WOKEN_MSG);
+ else
+ tasklet_wakeup((struct tasklet *)rnd);
+ }
+ return t;
+}
+
+static struct task *debug_tasklet_handler(struct task *t, void *ctx, unsigned int state)
+{
+ unsigned long *tctx = ctx; // [0] = #tasks, [1] = inter, [2+] = { tl | (tsk+1) }
+ unsigned long rnd;
+ int i;
+
+ /* wake up two random entries */
+ for (i = 0; i < 2; i++) {
+ rnd = statistical_prng() % tctx[0];
+ rnd = tctx[rnd + 2];
+
+ if (rnd & 1)
+ task_wakeup((struct task *)(rnd - 1), TASK_WOKEN_MSG);
+ else
+ tasklet_wakeup((struct tasklet *)rnd);
+ }
+ return t;
+}
+
+/* parse a "debug dev sched" command
+ * debug dev sched {task|tasklet} [count=<count>] [mask=<mask>] [single=<single>] [inter=<inter>]
+ */
+static int debug_parse_cli_sched(char **args, char *payload, struct appctx *appctx, void *private)
+{
+ int arg;
+ void *ptr;
+ int size;
+ const char *word, *end;
+ struct ist name;
+ char *msg = NULL;
+ char *endarg;
+ unsigned long long new;
+ unsigned long count = 0;
+ unsigned long thrid = tid;
+ unsigned int inter = 0;
+ unsigned long i;
+ int mode = 0; // 0 = tasklet; 1 = task
+ unsigned long *tctx; // [0] = #tasks, [1] = inter, [2+] = { tl | (tsk+1) }
+
+ if (!cli_has_level(appctx, ACCESS_LVL_ADMIN))
+ return 1;
+
+ ptr = NULL; size = 0;
+
+ if (strcmp(args[3], "task") != 0 && strcmp(args[3], "tasklet") != 0) {
+ return cli_err(appctx,
+ "Usage: debug dev sched {task|tasklet} { <obj> = <value> }*\n"
+ " <obj> = {count | tid | inter }\n"
+ " <value> = 64-bit dec/hex integer (0x prefix supported)\n"
+ );
+ }
+
+ mode = strcmp(args[3], "task") == 0;
+
+ _HA_ATOMIC_INC(&debug_commands_issued);
+ for (arg = 4; *args[arg]; arg++) {
+ end = word = args[arg];
+ while (*end && *end != '=' && *end != '^' && *end != '+' && *end != '-')
+ end++;
+ name = ist2(word, end - word);
+ if (isteq(name, ist("count"))) {
+ ptr = &count; size = sizeof(count);
+ } else if (isteq(name, ist("tid"))) {
+ ptr = &thrid; size = sizeof(thrid);
+ } else if (isteq(name, ist("inter"))) {
+ ptr = &inter; size = sizeof(inter);
+ } else
+ return cli_dynerr(appctx, memprintf(&msg, "Unsupported setting: '%s'.\n", word));
+
+ /* parse the new value . */
+ new = strtoll(end + 1, &endarg, 0);
+ if (end[1] && *endarg) {
+ memprintf(&msg,
+ "%sIgnoring unparsable value '%s' for field '%.*s'.\n",
+ msg ? msg : "", end + 1, (int)(end - word), word);
+ continue;
+ }
+
+ /* write the new value */
+ if (size == 8)
+ write_u64(ptr, new);
+ else if (size == 4)
+ write_u32(ptr, new);
+ else if (size == 2)
+ write_u16(ptr, new);
+ else
+ *(uint8_t *)ptr = new;
+ }
+
+ tctx = calloc(count + 2, sizeof(*tctx));
+ if (!tctx)
+ goto fail;
+
+ tctx[0] = (unsigned long)count;
+ tctx[1] = (unsigned long)inter;
+
+ if (thrid >= global.nbthread)
+ thrid = tid;
+
+ for (i = 0; i < count; i++) {
+ /* now, if poly or mask was set, tmask corresponds to the
+ * valid thread mask to use, otherwise it remains zero.
+ */
+ //printf("%lu: mode=%d mask=%#lx\n", i, mode, tmask);
+ if (mode == 0) {
+ struct tasklet *tl = tasklet_new();
+
+ if (!tl)
+ goto fail;
+
+ tl->tid = thrid;
+ tl->process = debug_tasklet_handler;
+ tl->context = tctx;
+ tctx[i + 2] = (unsigned long)tl;
+ } else {
+ struct task *task = task_new_on(thrid);
+
+ if (!task)
+ goto fail;
+
+ task->process = debug_task_handler;
+ task->context = tctx;
+ tctx[i + 2] = (unsigned long)task + 1;
+ }
+ }
+
+ /* start the tasks and tasklets */
+ for (i = 0; i < count; i++) {
+ unsigned long ctx = tctx[i + 2];
+
+ if (ctx & 1)
+ task_wakeup((struct task *)(ctx - 1), TASK_WOKEN_INIT);
+ else
+ tasklet_wakeup((struct tasklet *)ctx);
+ }
+
+ if (msg && *msg)
+ return cli_dynmsg(appctx, LOG_INFO, msg);
+ return 1;
+
+ fail:
+ /* free partially allocated entries */
+ for (i = 0; tctx && i < count; i++) {
+ unsigned long ctx = tctx[i + 2];
+
+ if (!ctx)
+ break;
+
+ if (ctx & 1)
+ task_destroy((struct task *)(ctx - 1));
+ else
+ tasklet_free((struct tasklet *)ctx);
+ }
+
+ free(tctx);
+ return cli_err(appctx, "Not enough memory");
+}
+
+/* CLI state for "debug dev fd" */
+struct dev_fd_ctx {
+ int start_fd;
+};
+
+/* CLI parser for the "debug dev fd" command. The current FD to restart from is
+ * stored in a struct dev_fd_ctx pointed to by svcctx.
+ */
+static int debug_parse_cli_fd(char **args, char *payload, struct appctx *appctx, void *private)
+{
+ struct dev_fd_ctx *ctx = applet_reserve_svcctx(appctx, sizeof(*ctx));
+
+ if (!cli_has_level(appctx, ACCESS_LVL_OPER))
+ return 1;
+
+ /* start at fd #0 */
+ ctx->start_fd = 0;
+ return 0;
+}
+
+/* CLI I/O handler for the "debug dev fd" command. Dumps all FDs that are
+ * accessible from the process but not known from fdtab. The FD number to
+ * restart from is stored in a struct dev_fd_ctx pointed to by svcctx.
+ */
+static int debug_iohandler_fd(struct appctx *appctx)
+{
+ struct dev_fd_ctx *ctx = appctx->svcctx;
+ struct stconn *sc = appctx_sc(appctx);
+ struct sockaddr_storage sa;
+ struct stat statbuf;
+ socklen_t salen, vlen;
+ int ret1, ret2, port;
+ char *addrstr;
+ int ret = 1;
+ int i, fd;
+
+ /* FIXME: Don't watch the other side !*/
+ if (unlikely(sc_opposite(sc)->flags & SC_FL_SHUT_DONE))
+ goto end;
+
+ chunk_reset(&trash);
+
+ thread_isolate();
+
+ /* we have two inner loops here, one for the proxy, the other one for
+ * the buffer.
+ */
+ for (fd = ctx->start_fd; fd < global.maxsock; fd++) {
+ /* check for FD's existence */
+ ret1 = fcntl(fd, F_GETFD, 0);
+ if (ret1 == -1)
+ continue; // not known to the process
+ if (fdtab[fd].owner)
+ continue; // well-known
+
+ /* OK we're seeing an orphan let's try to retrieve as much
+ * information as possible about it.
+ */
+ chunk_printf(&trash, "%5d", fd);
+
+ if (fstat(fd, &statbuf) != -1) {
+ chunk_appendf(&trash, " type=%s mod=%04o dev=%#llx siz=%#llx uid=%lld gid=%lld fs=%#llx ino=%#llx",
+ isatty(fd) ? "tty.":
+ S_ISREG(statbuf.st_mode) ? "file":
+ S_ISDIR(statbuf.st_mode) ? "dir.":
+ S_ISCHR(statbuf.st_mode) ? "chr.":
+ S_ISBLK(statbuf.st_mode) ? "blk.":
+ S_ISFIFO(statbuf.st_mode) ? "pipe":
+ S_ISLNK(statbuf.st_mode) ? "link":
+ S_ISSOCK(statbuf.st_mode) ? "sock":
+#ifdef USE_EPOLL
+ /* trick: epoll_ctl() will return -ENOENT when trying
+ * to remove from a valid epoll FD an FD that was not
+ * registered against it. But we don't want to risk
+ * disabling a random FD. Instead we'll create a new
+ * one by duplicating 0 (it should be valid since
+ * pointing to a terminal or /dev/null), and try to
+ * remove it.
+ */
+ ({
+ int fd2 = dup(0);
+ int ret = fd2;
+ if (ret >= 0) {
+ ret = epoll_ctl(fd, EPOLL_CTL_DEL, fd2, NULL);
+ if (ret == -1 && errno == ENOENT)
+ ret = 0; // that's a real epoll
+ else
+ ret = -1; // it's something else
+ close(fd2);
+ }
+ ret;
+ }) == 0 ? "epol" :
+#endif
+ "????",
+ (uint)statbuf.st_mode & 07777,
+
+ (ullong)statbuf.st_rdev,
+ (ullong)statbuf.st_size,
+ (ullong)statbuf.st_uid,
+ (ullong)statbuf.st_gid,
+
+ (ullong)statbuf.st_dev,
+ (ullong)statbuf.st_ino);
+ }
+
+ chunk_appendf(&trash, " getfd=%s+%#x",
+ (ret1 & FD_CLOEXEC) ? "cloex" : "",
+ ret1 &~ FD_CLOEXEC);
+
+ /* FD options */
+ ret2 = fcntl(fd, F_GETFL, 0);
+ if (ret2) {
+ chunk_appendf(&trash, " getfl=%s",
+ (ret1 & 3) >= 2 ? "O_RDWR" :
+ (ret1 & 1) ? "O_WRONLY" : "O_RDONLY");
+
+ for (i = 2; i < 32; i++) {
+ if (!(ret2 & (1UL << i)))
+ continue;
+ switch (1UL << i) {
+ case O_CREAT: chunk_appendf(&trash, ",O_CREAT"); break;
+ case O_EXCL: chunk_appendf(&trash, ",O_EXCL"); break;
+ case O_NOCTTY: chunk_appendf(&trash, ",O_NOCTTY"); break;
+ case O_TRUNC: chunk_appendf(&trash, ",O_TRUNC"); break;
+ case O_APPEND: chunk_appendf(&trash, ",O_APPEND"); break;
+#ifdef O_ASYNC
+ case O_ASYNC: chunk_appendf(&trash, ",O_ASYNC"); break;
+#endif
+#ifdef O_DIRECT
+ case O_DIRECT: chunk_appendf(&trash, ",O_DIRECT"); break;
+#endif
+#ifdef O_NOATIME
+ case O_NOATIME: chunk_appendf(&trash, ",O_NOATIME"); break;
+#endif
+ }
+ }
+ }
+
+ vlen = sizeof(ret2);
+ ret1 = getsockopt(fd, SOL_SOCKET, SO_TYPE, &ret2, &vlen);
+ if (ret1 != -1)
+ chunk_appendf(&trash, " so_type=%d", ret2);
+
+ vlen = sizeof(ret2);
+ ret1 = getsockopt(fd, SOL_SOCKET, SO_ACCEPTCONN, &ret2, &vlen);
+ if (ret1 != -1)
+ chunk_appendf(&trash, " so_accept=%d", ret2);
+
+ vlen = sizeof(ret2);
+ ret1 = getsockopt(fd, SOL_SOCKET, SO_ERROR, &ret2, &vlen);
+ if (ret1 != -1)
+ chunk_appendf(&trash, " so_error=%d", ret2);
+
+ salen = sizeof(sa);
+ if (getsockname(fd, (struct sockaddr *)&sa, &salen) != -1) {
+ if (sa.ss_family == AF_INET)
+ port = ntohs(((const struct sockaddr_in *)&sa)->sin_port);
+ else if (sa.ss_family == AF_INET6)
+ port = ntohs(((const struct sockaddr_in6 *)&sa)->sin6_port);
+ else
+ port = 0;
+ addrstr = sa2str(&sa, port, 0);
+ chunk_appendf(&trash, " laddr=%s", addrstr);
+ free(addrstr);
+ }
+
+ salen = sizeof(sa);
+ if (getpeername(fd, (struct sockaddr *)&sa, &salen) != -1) {
+ if (sa.ss_family == AF_INET)
+ port = ntohs(((const struct sockaddr_in *)&sa)->sin_port);
+ else if (sa.ss_family == AF_INET6)
+ port = ntohs(((const struct sockaddr_in6 *)&sa)->sin6_port);
+ else
+ port = 0;
+ addrstr = sa2str(&sa, port, 0);
+ chunk_appendf(&trash, " raddr=%s", addrstr);
+ free(addrstr);
+ }
+
+ chunk_appendf(&trash, "\n");
+
+ if (applet_putchk(appctx, &trash) == -1) {
+ ctx->start_fd = fd;
+ ret = 0;
+ break;
+ }
+ }
+
+ thread_release();
+ end:
+ return ret;
+}
+
+#if defined(DEBUG_MEM_STATS)
+
+/* CLI state for "debug dev memstats" */
+struct dev_mem_ctx {
+ struct mem_stats *start, *stop; /* begin/end of dump */
+ char *match; /* non-null if a name prefix is specified */
+ int show_all; /* show all entries if non-null */
+ int width; /* 1st column width */
+ long tot_size; /* sum of alloc-free */
+ ulong tot_calls; /* sum of calls */
+};
+
+/* CLI parser for the "debug dev memstats" command. Sets a dev_mem_ctx shown above. */
+static int debug_parse_cli_memstats(char **args, char *payload, struct appctx *appctx, void *private)
+{
+ struct dev_mem_ctx *ctx = applet_reserve_svcctx(appctx, sizeof(*ctx));
+ int arg;
+
+ extern __attribute__((__weak__)) struct mem_stats __start_mem_stats;
+ extern __attribute__((__weak__)) struct mem_stats __stop_mem_stats;
+
+ if (!cli_has_level(appctx, ACCESS_LVL_OPER))
+ return 1;
+
+ for (arg = 3; *args[arg]; arg++) {
+ if (strcmp(args[arg], "reset") == 0) {
+ struct mem_stats *ptr;
+
+ if (!cli_has_level(appctx, ACCESS_LVL_ADMIN))
+ return 1;
+
+ for (ptr = &__start_mem_stats; ptr < &__stop_mem_stats; ptr++) {
+ _HA_ATOMIC_STORE(&ptr->calls, 0);
+ _HA_ATOMIC_STORE(&ptr->size, 0);
+ }
+ return 1;
+ }
+ else if (strcmp(args[arg], "all") == 0) {
+ ctx->show_all = 1;
+ continue;
+ }
+ else if (strcmp(args[arg], "match") == 0 && *args[arg + 1]) {
+ ha_free(&ctx->match);
+ ctx->match = strdup(args[arg + 1]);
+ arg++;
+ continue;
+ }
+ else
+ return cli_err(appctx, "Expects either 'reset', 'all', or 'match <pfx>'.\n");
+ }
+
+ /* otherwise proceed with the dump from p0 to p1 */
+ ctx->start = &__start_mem_stats;
+ ctx->stop = &__stop_mem_stats;
+ ctx->width = 0;
+ return 0;
+}
+
+/* CLI I/O handler for the "debug dev memstats" command using a dev_mem_ctx
+ * found in appctx->svcctx. Dumps all mem_stats structs referenced by pointers
+ * located between ->start and ->stop. Dumps all entries if ->show_all != 0,
+ * otherwise only non-zero calls.
+ */
+static int debug_iohandler_memstats(struct appctx *appctx)
+{
+ struct dev_mem_ctx *ctx = appctx->svcctx;
+ struct stconn *sc = appctx_sc(appctx);
+ struct mem_stats *ptr;
+ const char *pfx = ctx->match;
+ int ret = 1;
+
+ /* FIXME: Don't watch the other side !*/
+ if (unlikely(sc_opposite(sc)->flags & SC_FL_SHUT_DONE))
+ goto end;
+
+ if (!ctx->width) {
+ /* we don't know the first column's width, let's compute it
+ * now based on a first pass on printable entries and their
+ * expected width (approximated).
+ */
+ for (ptr = ctx->start; ptr != ctx->stop; ptr++) {
+ const char *p, *name;
+ int w = 0;
+ char tmp;
+
+ if (!ptr->size && !ptr->calls && !ctx->show_all)
+ continue;
+
+ for (p = name = ptr->caller.file; *p; p++) {
+ if (*p == '/')
+ name = p + 1;
+ }
+
+ if (ctx->show_all)
+ w = snprintf(&tmp, 0, "%s(%s:%d) ", ptr->caller.func, name, ptr->caller.line);
+ else
+ w = snprintf(&tmp, 0, "%s:%d ", name, ptr->caller.line);
+
+ if (w > ctx->width)
+ ctx->width = w;
+ }
+ }
+
+ /* we have two inner loops here, one for the proxy, the other one for
+ * the buffer.
+ */
+ for (ptr = ctx->start; ptr != ctx->stop; ptr++) {
+ const char *type;
+ const char *name;
+ const char *p;
+ const char *info = NULL;
+ const char *func = NULL;
+ int direction = 0; // neither alloc nor free (e.g. realloc)
+
+ if (!ptr->size && !ptr->calls && !ctx->show_all)
+ continue;
+
+ /* basename only */
+ for (p = name = ptr->caller.file; *p; p++) {
+ if (*p == '/')
+ name = p + 1;
+ }
+
+ func = ptr->caller.func;
+
+ switch (ptr->caller.what) {
+ case MEM_STATS_TYPE_CALLOC: type = "CALLOC"; direction = 1; break;
+ case MEM_STATS_TYPE_FREE: type = "FREE"; direction = -1; break;
+ case MEM_STATS_TYPE_MALLOC: type = "MALLOC"; direction = 1; break;
+ case MEM_STATS_TYPE_REALLOC: type = "REALLOC"; break;
+ case MEM_STATS_TYPE_STRDUP: type = "STRDUP"; direction = 1; break;
+ case MEM_STATS_TYPE_P_ALLOC: type = "P_ALLOC"; direction = 1; if (ptr->extra) info = ((const struct pool_head *)ptr->extra)->name; break;
+ case MEM_STATS_TYPE_P_FREE: type = "P_FREE"; direction = -1; if (ptr->extra) info = ((const struct pool_head *)ptr->extra)->name; break;
+ default: type = "UNSET"; break;
+ }
+
+ //chunk_printf(&trash,
+ // "%20s:%-5d %7s size: %12lu calls: %9lu size/call: %6lu\n",
+ // name, ptr->line, type,
+ // (unsigned long)ptr->size, (unsigned long)ptr->calls,
+ // (unsigned long)(ptr->calls ? (ptr->size / ptr->calls) : 0));
+
+ /* only match requested prefixes */
+ if (pfx && (!info || strncmp(info, pfx, strlen(pfx)) != 0))
+ continue;
+
+ chunk_reset(&trash);
+ if (ctx->show_all)
+ chunk_appendf(&trash, "%s(", func);
+
+ chunk_appendf(&trash, "%s:%d", name, ptr->caller.line);
+
+ if (ctx->show_all)
+ chunk_appendf(&trash, ")");
+
+ while (trash.data < ctx->width)
+ trash.area[trash.data++] = ' ';
+
+ chunk_appendf(&trash, "%7s size: %12lu calls: %9lu size/call: %6lu %s\n",
+ type,
+ (unsigned long)ptr->size, (unsigned long)ptr->calls,
+ (unsigned long)(ptr->calls ? (ptr->size / ptr->calls) : 0),
+ info ? info : "");
+
+ if (applet_putchk(appctx, &trash) == -1) {
+ ctx->start = ptr;
+ ret = 0;
+ goto end;
+ }
+ if (direction > 0) {
+ ctx->tot_size += (ulong)ptr->size;
+ ctx->tot_calls += (ulong)ptr->calls;
+ }
+ else if (direction < 0) {
+ ctx->tot_size -= (ulong)ptr->size;
+ ctx->tot_calls += (ulong)ptr->calls;
+ }
+ }
+
+ /* now dump a summary */
+ chunk_reset(&trash);
+ chunk_appendf(&trash, "Total");
+ while (trash.data < ctx->width)
+ trash.area[trash.data++] = ' ';
+
+ chunk_appendf(&trash, "%7s size: %12ld calls: %9lu size/call: %6ld %s\n",
+ "BALANCE",
+ ctx->tot_size, ctx->tot_calls,
+ (long)(ctx->tot_calls ? (ctx->tot_size / ctx->tot_calls) : 0),
+ "(excl. realloc)");
+
+ if (applet_putchk(appctx, &trash) == -1) {
+ ctx->start = ptr;
+ ret = 0;
+ goto end;
+ }
+ end:
+ return ret;
+}
+
+/* release the "show pools" context */
+static void debug_release_memstats(struct appctx *appctx)
+{
+ struct dev_mem_ctx *ctx = appctx->svcctx;
+
+ ha_free(&ctx->match);
+}
+#endif
+
+#ifdef USE_THREAD_DUMP
+
+/* handles DEBUGSIG to dump the state of the thread it's working on. This is
+ * appended at the end of thread_dump_buffer which must be protected against
+ * reentrance from different threads (a thread-local buffer works fine).
+ */
+void debug_handler(int sig, siginfo_t *si, void *arg)
+{
+ struct buffer *buf = HA_ATOMIC_LOAD(&th_ctx->thread_dump_buffer);
+ int harmless = is_thread_harmless();
+
+ /* first, let's check it's really for us and that we didn't just get
+ * a spurious DEBUGSIG.
+ */
+ if (!buf || buf == (void*)(0x1UL))
+ return;
+
+ /* now dump the current state into the designated buffer, and indicate
+ * we come from a sig handler.
+ */
+ ha_thread_dump_one(tid, 1);
+
+ /* mark the current thread as stuck to detect it upon next invocation
+ * if it didn't move.
+ */
+ if (!harmless &&
+ !(_HA_ATOMIC_LOAD(&th_ctx->flags) & TH_FL_SLEEPING))
+ _HA_ATOMIC_OR(&th_ctx->flags, TH_FL_STUCK);
+}
+
+static int init_debug_per_thread()
+{
+ sigset_t set;
+
+ /* unblock the DEBUGSIG signal we intend to use */
+ sigemptyset(&set);
+ sigaddset(&set, DEBUGSIG);
+#if defined(DEBUG_DEV)
+ sigaddset(&set, SIGRTMAX);
+#endif
+ ha_sigmask(SIG_UNBLOCK, &set, NULL);
+ return 1;
+}
+
+static int init_debug()
+{
+ struct sigaction sa;
+ void *callers[1];
+
+ /* calling backtrace() will access libgcc at runtime. We don't want to
+ * do it after the chroot, so let's perform a first call to have it
+ * ready in memory for later use.
+ */
+ my_backtrace(callers, sizeof(callers)/sizeof(*callers));
+ sa.sa_handler = NULL;
+ sa.sa_sigaction = debug_handler;
+ sigemptyset(&sa.sa_mask);
+ sa.sa_flags = SA_SIGINFO;
+ sigaction(DEBUGSIG, &sa, NULL);
+
+#if defined(DEBUG_DEV)
+ sa.sa_handler = NULL;
+ sa.sa_sigaction = debug_delay_inj_sighandler;
+ sigemptyset(&sa.sa_mask);
+ sa.sa_flags = SA_SIGINFO;
+ sigaction(SIGRTMAX, &sa, NULL);
+#endif
+ return ERR_NONE;
+}
+
+REGISTER_POST_CHECK(init_debug);
+REGISTER_PER_THREAD_INIT(init_debug_per_thread);
+
+#endif /* USE_THREAD_DUMP */
+
+
+static void feed_post_mortem_linux()
+{
+#if defined(__linux__)
+ struct stat statbuf;
+ FILE *file;
+
+ /* DMI reports either HW or hypervisor, this allows to detect most VMs.
+ * On ARM the device-tree is often more precise for the model. Since many
+ * boards present "to be filled by OEM" or so in many fields, we dedup
+ * them as much as possible.
+ */
+ if (read_line_to_trash("/sys/class/dmi/id/sys_vendor") > 0)
+ strlcpy2(post_mortem.platform.hw_vendor, trash.area, sizeof(post_mortem.platform.hw_vendor));
+
+ if (read_line_to_trash("/sys/class/dmi/id/product_family") > 0 &&
+ strcmp(trash.area, post_mortem.platform.hw_vendor) != 0)
+ strlcpy2(post_mortem.platform.hw_family, trash.area, sizeof(post_mortem.platform.hw_family));
+
+ if ((read_line_to_trash("/sys/class/dmi/id/product_name") > 0 &&
+ strcmp(trash.area, post_mortem.platform.hw_vendor) != 0 &&
+ strcmp(trash.area, post_mortem.platform.hw_family) != 0))
+ strlcpy2(post_mortem.platform.hw_model, trash.area, sizeof(post_mortem.platform.hw_model));
+
+ if ((read_line_to_trash("/sys/class/dmi/id/board_vendor") > 0 &&
+ strcmp(trash.area, post_mortem.platform.hw_vendor) != 0))
+ strlcpy2(post_mortem.platform.brd_vendor, trash.area, sizeof(post_mortem.platform.brd_vendor));
+
+ if ((read_line_to_trash("/sys/firmware/devicetree/base/model") > 0 &&
+ strcmp(trash.area, post_mortem.platform.brd_vendor) != 0 &&
+ strcmp(trash.area, post_mortem.platform.hw_vendor) != 0 &&
+ strcmp(trash.area, post_mortem.platform.hw_family) != 0 &&
+ strcmp(trash.area, post_mortem.platform.hw_model) != 0) ||
+ (read_line_to_trash("/sys/class/dmi/id/board_name") > 0 &&
+ strcmp(trash.area, post_mortem.platform.brd_vendor) != 0 &&
+ strcmp(trash.area, post_mortem.platform.hw_vendor) != 0 &&
+ strcmp(trash.area, post_mortem.platform.hw_family) != 0 &&
+ strcmp(trash.area, post_mortem.platform.hw_model) != 0))
+ strlcpy2(post_mortem.platform.brd_model, trash.area, sizeof(post_mortem.platform.brd_model));
+
+ /* Check for containers. In a container on linux we don't see keventd (2.4) kthreadd (2.6+) on pid 2 */
+ if (read_line_to_trash("/proc/2/status") <= 0 ||
+ (strcmp(trash.area, "Name:\tkthreadd") != 0 &&
+ strcmp(trash.area, "Name:\tkeventd") != 0)) {
+ /* OK we're in a container. Docker often has /.dockerenv */
+ const char *tech = "yes";
+
+ if (stat("/.dockerenv", &statbuf) == 0)
+ tech = "docker";
+ strlcpy2(post_mortem.platform.cont_techno, tech, sizeof(post_mortem.platform.cont_techno));
+ }
+ else {
+ strlcpy2(post_mortem.platform.cont_techno, "no", sizeof(post_mortem.platform.cont_techno));
+ }
+
+ file = fopen("/proc/cpuinfo", "r");
+ if (file) {
+ uint cpu_implem = 0, cpu_arch = 0, cpu_variant = 0, cpu_part = 0, cpu_rev = 0; // arm
+ uint cpu_family = 0, model = 0, stepping = 0; // x86
+ char vendor_id[64] = "", model_name[64] = ""; // x86
+ char machine[64] = "", system_type[64] = "", cpu_model[64] = ""; // mips
+ const char *virt = "no";
+ char *p, *e, *v, *lf;
+
+ /* let's figure what CPU we're working with */
+ while ((p = fgets(trash.area, trash.size, file)) != NULL) {
+ lf = strchr(p, '\n');
+ if (lf)
+ *lf = 0;
+
+ /* stop at first line break */
+ if (!*p)
+ break;
+
+ /* skip colon and spaces and trim spaces after name */
+ v = e = strchr(p, ':');
+ if (!e)
+ continue;
+
+ do { *e-- = 0; } while (e >= p && (*e == ' ' || *e == '\t'));
+
+ /* locate value after colon */
+ do { v++; } while (*v == ' ' || *v == '\t');
+
+ /* ARM */
+ if (strcmp(p, "CPU implementer") == 0)
+ cpu_implem = strtoul(v, NULL, 0);
+ else if (strcmp(p, "CPU architecture") == 0)
+ cpu_arch = strtoul(v, NULL, 0);
+ else if (strcmp(p, "CPU variant") == 0)
+ cpu_variant = strtoul(v, NULL, 0);
+ else if (strcmp(p, "CPU part") == 0)
+ cpu_part = strtoul(v, NULL, 0);
+ else if (strcmp(p, "CPU revision") == 0)
+ cpu_rev = strtoul(v, NULL, 0);
+
+ /* x86 */
+ else if (strcmp(p, "cpu family") == 0)
+ cpu_family = strtoul(v, NULL, 0);
+ else if (strcmp(p, "model") == 0)
+ model = strtoul(v, NULL, 0);
+ else if (strcmp(p, "stepping") == 0)
+ stepping = strtoul(v, NULL, 0);
+ else if (strcmp(p, "vendor_id") == 0)
+ strlcpy2(vendor_id, v, sizeof(vendor_id));
+ else if (strcmp(p, "model name") == 0)
+ strlcpy2(model_name, v, sizeof(model_name));
+ else if (strcmp(p, "flags") == 0) {
+ if (strstr(v, "hypervisor")) {
+ if (strncmp(post_mortem.platform.hw_vendor, "QEMU", 4) == 0)
+ virt = "qemu";
+ else if (strncmp(post_mortem.platform.hw_vendor, "VMware", 6) == 0)
+ virt = "vmware";
+ else
+ virt = "yes";
+ }
+ }
+
+ /* MIPS */
+ else if (strcmp(p, "system type") == 0)
+ strlcpy2(system_type, v, sizeof(system_type));
+ else if (strcmp(p, "machine") == 0)
+ strlcpy2(machine, v, sizeof(machine));
+ else if (strcmp(p, "cpu model") == 0)
+ strlcpy2(cpu_model, v, sizeof(cpu_model));
+ }
+ fclose(file);
+
+ /* Machine may replace hw_product on MIPS */
+ if (!*post_mortem.platform.hw_model)
+ strlcpy2(post_mortem.platform.hw_model, machine, sizeof(post_mortem.platform.hw_model));
+
+ /* SoC vendor */
+ strlcpy2(post_mortem.platform.soc_vendor, vendor_id, sizeof(post_mortem.platform.soc_vendor));
+
+ /* SoC model */
+ if (*system_type) {
+ /* MIPS */
+ strlcpy2(post_mortem.platform.soc_model, system_type, sizeof(post_mortem.platform.soc_model));
+ *system_type = 0;
+ } else if (*model_name) {
+ /* x86 */
+ strlcpy2(post_mortem.platform.soc_model, model_name, sizeof(post_mortem.platform.soc_model));
+ *model_name = 0;
+ }
+
+ /* Create a CPU model name based on available IDs */
+ if (cpu_implem) // arm
+ snprintf(cpu_model + strlen(cpu_model),
+ sizeof(cpu_model) - strlen(cpu_model),
+ "%sImpl %#02x", *cpu_model ? " " : "", cpu_implem);
+
+ if (cpu_family) // x86
+ snprintf(cpu_model + strlen(cpu_model),
+ sizeof(cpu_model) - strlen(cpu_model),
+ "%sFam %u", *cpu_model ? " " : "", cpu_family);
+
+ if (model) // x86
+ snprintf(cpu_model + strlen(cpu_model),
+ sizeof(cpu_model) - strlen(cpu_model),
+ "%sModel %u", *cpu_model ? " " : "", model);
+
+ if (stepping) // x86
+ snprintf(cpu_model + strlen(cpu_model),
+ sizeof(cpu_model) - strlen(cpu_model),
+ "%sStep %u", *cpu_model ? " " : "", stepping);
+
+ if (cpu_arch) // arm
+ snprintf(cpu_model + strlen(cpu_model),
+ sizeof(cpu_model) - strlen(cpu_model),
+ "%sArch %u", *cpu_model ? " " : "", cpu_arch);
+
+ if (cpu_part) // arm
+ snprintf(cpu_model + strlen(cpu_model),
+ sizeof(cpu_model) - strlen(cpu_model),
+ "%sPart %#03x", *cpu_model ? " " : "", cpu_part);
+
+ if (cpu_variant || cpu_rev) // arm
+ snprintf(cpu_model + strlen(cpu_model),
+ sizeof(cpu_model) - strlen(cpu_model),
+ "%sr%up%u", *cpu_model ? " " : "", cpu_variant, cpu_rev);
+
+ strlcpy2(post_mortem.platform.cpu_model, cpu_model, sizeof(post_mortem.platform.cpu_model));
+
+ if (*virt)
+ strlcpy2(post_mortem.platform.virt_techno, virt, sizeof(post_mortem.platform.virt_techno));
+ }
+#endif // __linux__
+}
+
+static int feed_post_mortem()
+{
+ /* kernel type, version and arch */
+ uname(&post_mortem.platform.utsname);
+
+ /* some boot-time info related to the process */
+ post_mortem.process.pid = getpid();
+ post_mortem.process.boot_uid = geteuid();
+ post_mortem.process.boot_gid = getegid();
+
+ getrlimit(RLIMIT_NOFILE, &post_mortem.process.limit_fd);
+#if defined(RLIMIT_AS)
+ getrlimit(RLIMIT_AS, &post_mortem.process.limit_ram);
+#elif defined(RLIMIT_DATA)
+ getrlimit(RLIMIT_DATA, &post_mortem.process.limit_ram);
+#endif
+
+ if (strcmp(post_mortem.platform.utsname.sysname, "Linux") == 0)
+ feed_post_mortem_linux();
+
+#if defined(HA_HAVE_DUMP_LIBS)
+ chunk_reset(&trash);
+ if (dump_libs(&trash, 1))
+ post_mortem.libs = strdup(trash.area);
+#endif
+
+ return ERR_NONE;
+}
+
+REGISTER_POST_CHECK(feed_post_mortem);
+
+static void deinit_post_mortem(void)
+{
+ int comp;
+
+#if defined(HA_HAVE_DUMP_LIBS)
+ ha_free(&post_mortem.libs);
+#endif
+ for (comp = 0; comp < post_mortem.nb_components; comp++) {
+ free(post_mortem.components[comp].toolchain);
+ free(post_mortem.components[comp].toolchain_opts);
+ free(post_mortem.components[comp].build_settings);
+ free(post_mortem.components[comp].path);
+ }
+ ha_free(&post_mortem.components);
+}
+
+REGISTER_POST_DEINIT(deinit_post_mortem);
+
+/* Appends a component to the list of post_portem info. May silently fail
+ * on allocation errors but we don't care since the goal is to provide info
+ * we have in case it helps.
+ */
+void post_mortem_add_component(const char *name, const char *version,
+ const char *toolchain, const char *toolchain_opts,
+ const char *build_settings, const char *path)
+{
+ struct post_mortem_component *comp;
+ int nbcomp = post_mortem.nb_components;
+
+ comp = realloc(post_mortem.components, (nbcomp + 1) * sizeof(*comp));
+ if (!comp)
+ return;
+
+ memset(&comp[nbcomp], 0, sizeof(*comp));
+ strlcpy2(comp[nbcomp].name, name, sizeof(comp[nbcomp].name));
+ strlcpy2(comp[nbcomp].version, version, sizeof(comp[nbcomp].version));
+ comp[nbcomp].toolchain = strdup(toolchain);
+ comp[nbcomp].toolchain_opts = strdup(toolchain_opts);
+ comp[nbcomp].build_settings = strdup(build_settings);
+ comp[nbcomp].path = strdup(path);
+
+ post_mortem.nb_components++;
+ post_mortem.components = comp;
+}
+
+#ifdef USE_THREAD
+/* init code is called one at a time so let's collect all per-thread info on
+ * the last starting thread. These info are not critical anyway and there's no
+ * problem if we get them slightly late.
+ */
+static int feed_post_mortem_late()
+{
+ static int per_thread_info_collected;
+
+ if (HA_ATOMIC_ADD_FETCH(&per_thread_info_collected, 1) == global.nbthread) {
+ int i;
+ for (i = 0; i < global.nbthread; i++) {
+ post_mortem.process.thread_info[i].pth_id = ha_thread_info[i].pth_id;
+ post_mortem.process.thread_info[i].stack_top = ha_thread_info[i].stack_top;
+ }
+ }
+ return 1;
+}
+
+REGISTER_PER_THREAD_INIT(feed_post_mortem_late);
+#endif
+
+/* register cli keywords */
+static struct cli_kw_list cli_kws = {{ },{
+ {{ "debug", "dev", "bug", NULL }, "debug dev bug : call BUG_ON() and crash", debug_parse_cli_bug, NULL, NULL, NULL, ACCESS_EXPERT },
+ {{ "debug", "dev", "check", NULL }, "debug dev check : call CHECK_IF() and possibly crash", debug_parse_cli_check, NULL, NULL, NULL, ACCESS_EXPERT },
+ {{ "debug", "dev", "close", NULL }, "debug dev close <fd> : close this file descriptor", debug_parse_cli_close, NULL, NULL, NULL, ACCESS_EXPERT },
+ {{ "debug", "dev", "deadlock", NULL }, "debug dev deadlock [nbtask] : deadlock between this number of tasks", debug_parse_cli_deadlock, NULL, NULL, NULL, ACCESS_EXPERT },
+ {{ "debug", "dev", "delay", NULL }, "debug dev delay [ms] : sleep this long", debug_parse_cli_delay, NULL, NULL, NULL, ACCESS_EXPERT },
+#if defined(DEBUG_DEV)
+ {{ "debug", "dev", "delay-inj", NULL },"debug dev delay-inj <inter> <count> : inject random delays into threads", debug_parse_delay_inj, NULL, NULL, NULL, ACCESS_EXPERT },
+ {{ "debug", "dev", "exec", NULL }, "debug dev exec [cmd] ... : show this command's output", debug_parse_cli_exec, NULL, NULL, NULL, ACCESS_EXPERT },
+#endif
+ {{ "debug", "dev", "fd", NULL }, "debug dev fd : scan for rogue/unhandled FDs", debug_parse_cli_fd, debug_iohandler_fd, NULL, NULL, ACCESS_EXPERT },
+ {{ "debug", "dev", "exit", NULL }, "debug dev exit [code] : immediately exit the process", debug_parse_cli_exit, NULL, NULL, NULL, ACCESS_EXPERT },
+ {{ "debug", "dev", "hash", NULL }, "debug dev hash [msg] : return msg hashed if anon is set", debug_parse_cli_hash, NULL, NULL, NULL, 0 },
+ {{ "debug", "dev", "hex", NULL }, "debug dev hex <addr> [len] : dump a memory area", debug_parse_cli_hex, NULL, NULL, NULL, ACCESS_EXPERT },
+ {{ "debug", "dev", "log", NULL }, "debug dev log [msg] ... : send this msg to global logs", debug_parse_cli_log, NULL, NULL, NULL, ACCESS_EXPERT },
+ {{ "debug", "dev", "loop", NULL }, "debug dev loop <ms> [isolated] : loop this long, possibly isolated", debug_parse_cli_loop, NULL, NULL, NULL, ACCESS_EXPERT },
+#if defined(DEBUG_MEM_STATS)
+ {{ "debug", "dev", "memstats", NULL }, "debug dev memstats [reset|all|match ...]: dump/reset memory statistics", debug_parse_cli_memstats, debug_iohandler_memstats, debug_release_memstats, NULL, 0 },
+#endif
+ {{ "debug", "dev", "panic", NULL }, "debug dev panic : immediately trigger a panic", debug_parse_cli_panic, NULL, NULL, NULL, ACCESS_EXPERT },
+ {{ "debug", "dev", "sched", NULL }, "debug dev sched {task|tasklet} [k=v]* : stress the scheduler", debug_parse_cli_sched, NULL, NULL, NULL, ACCESS_EXPERT },
+ {{ "debug", "dev", "stream",NULL }, "debug dev stream [k=v]* : show/manipulate stream flags", debug_parse_cli_stream,NULL, NULL, NULL, ACCESS_EXPERT },
+ {{ "debug", "dev", "sym", NULL }, "debug dev sym <addr> : resolve symbol address", debug_parse_cli_sym, NULL, NULL, NULL, ACCESS_EXPERT },
+ {{ "debug", "dev", "task", NULL }, "debug dev task <ptr> [wake|expire|kill] : show/wake/expire/kill task/tasklet", debug_parse_cli_task, NULL, NULL, NULL, ACCESS_EXPERT },
+ {{ "debug", "dev", "tkill", NULL }, "debug dev tkill [thr] [sig] : send signal to thread", debug_parse_cli_tkill, NULL, NULL, NULL, ACCESS_EXPERT },
+ {{ "debug", "dev", "warn", NULL }, "debug dev warn : call WARN_ON() and possibly crash", debug_parse_cli_warn, NULL, NULL, NULL, ACCESS_EXPERT },
+ {{ "debug", "dev", "write", NULL }, "debug dev write [size] : write that many bytes in return", debug_parse_cli_write, NULL, NULL, NULL, ACCESS_EXPERT },
+
+ {{ "show", "dev", NULL, NULL }, "show dev : show debug info for developers", debug_parse_cli_show_dev, NULL, NULL },
+#if defined(HA_HAVE_DUMP_LIBS)
+ {{ "show", "libs", NULL, NULL }, "show libs : show loaded object files and libraries", debug_parse_cli_show_libs, NULL, NULL },
+#endif
+ {{ "show", "threads", NULL, NULL }, "show threads : show some threads debugging information", NULL, cli_io_handler_show_threads, NULL },
+ {{},}
+}};
+
+INITCALL1(STG_REGISTER, cli_register_kw, &cli_kws);
diff --git a/src/dgram.c b/src/dgram.c
new file mode 100644
index 0000000..c983c03
--- /dev/null
+++ b/src/dgram.c
@@ -0,0 +1,79 @@
+/*
+ * Datagram processing functions
+ *
+ * Copyright 2014 Baptiste Assmann <bedis9@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <haproxy/fd.h>
+#include <haproxy/cfgparse.h>
+#include <haproxy/dgram.h>
+#include <haproxy/errors.h>
+#include <haproxy/tools.h>
+
+/* datagram handler callback */
+void dgram_fd_handler(int fd)
+{
+ struct dgram_conn *dgram = fdtab[fd].owner;
+
+ if (unlikely(!dgram))
+ return;
+
+ if (fd_recv_ready(fd))
+ dgram->data->recv(dgram);
+ if (fd_send_ready(fd))
+ dgram->data->send(dgram);
+
+ return;
+}
+
+/* config parser for global "tune.{rcv,snd}buf.{frontend,backend}" */
+static int dgram_parse_tune_bufs(char **args, int section_type, struct proxy *curpx,
+ const struct proxy *defpx, const char *file, int line,
+ char **err)
+{
+ int *valptr;
+ int val;
+
+ if (too_many_args(1, args, err, NULL))
+ return -1;
+
+ /* "tune.rcvbuf.frontend", "tune.rcvbuf.backend",
+ * "tune.sndbuf.frontend", "tune.sndbuf.backend"
+ */
+ valptr = (args[0][5] == 'r' && args[0][12] == 'f') ? &global.tune.frontend_rcvbuf :
+ (args[0][5] == 'r' && args[0][12] == 'b') ? &global.tune.backend_rcvbuf :
+ (args[0][5] == 's' && args[0][12] == 'f') ? &global.tune.frontend_sndbuf :
+ &global.tune.backend_sndbuf;
+
+ if (*valptr != 0) {
+ memprintf(err, "parsing [%s:%d] : ignoring '%s' which was already specified.\n", file, line, args[0]);
+ return 1;
+ }
+
+ val = atoi(args[1]);
+
+ if (*(args[1]) == 0 || val <= 0) {
+ memprintf(err, "parsing [%s:%d] : '%s' expects a strictly positive integer argument.\n", file, line, args[0]);
+ return -1;
+ }
+
+ *valptr = val;
+ return 0;
+}
+
+/* register "global" section keywords */
+static struct cfg_kw_list dgram_cfg_kws = {ILH, {
+ { CFG_GLOBAL, "tune.rcvbuf.backend", dgram_parse_tune_bufs },
+ { CFG_GLOBAL, "tune.rcvbuf.frontend", dgram_parse_tune_bufs },
+ { CFG_GLOBAL, "tune.sndbuf.backend", dgram_parse_tune_bufs },
+ { CFG_GLOBAL, "tune.sndbuf.frontend", dgram_parse_tune_bufs },
+ { 0, NULL, NULL }
+}};
+
+INITCALL1(STG_REGISTER, cfg_register_keywords, &dgram_cfg_kws);
diff --git a/src/dict.c b/src/dict.c
new file mode 100644
index 0000000..a225081
--- /dev/null
+++ b/src/dict.c
@@ -0,0 +1,127 @@
+#include <string.h>
+
+#include <import/eb32tree.h>
+#include <import/ebistree.h>
+#include <haproxy/dict.h>
+#include <haproxy/thread.h>
+
+struct dict *new_dict(const char *name)
+{
+ struct dict *dict;
+
+ dict = malloc(sizeof *dict);
+ if (!dict)
+ return NULL;
+
+ dict->name = name;
+ dict->values = EB_ROOT_UNIQUE;
+ HA_RWLOCK_INIT(&dict->rwlock);
+
+ return dict;
+}
+
+/*
+ * Allocate a new dictionary entry with <s> as string value which is strdup()'ed.
+ * Returns the new allocated entry if succeeded, NULL if not.
+ */
+static struct dict_entry *new_dict_entry(char *s)
+{
+ struct dict_entry *de;
+
+ de = calloc(1, sizeof *de);
+ if (!de)
+ return NULL;
+
+ de->value.key = strdup(s);
+ if (!de->value.key)
+ goto err;
+
+ de->len = strlen(s);
+ de->refcount = 1;
+
+ return de;
+
+ err:
+ ha_free(&de->value.key);
+ de->len = 0;
+ free(de);
+ return NULL;
+}
+
+/*
+ * Release the memory allocated for <de> dictionary entry.
+ */
+static void free_dict_entry(struct dict_entry *de)
+{
+ de->refcount = 0;
+ ha_free(&de->value.key);
+ free(de);
+}
+
+/*
+ * Simple function to lookup dictionary entries with <s> as value.
+ */
+static struct dict_entry *__dict_lookup(struct dict *d, const char *s)
+{
+ struct dict_entry *de;
+ struct ebpt_node *node;
+
+ de = NULL;
+ node = ebis_lookup(&d->values, s);
+ if (node)
+ de = container_of(node, struct dict_entry, value);
+
+ return de;
+}
+
+/*
+ * Insert an entry in <d> dictionary with <s> as value. *
+ */
+struct dict_entry *dict_insert(struct dict *d, char *s)
+{
+ struct dict_entry *de;
+ struct ebpt_node *n;
+
+ HA_RWLOCK_RDLOCK(DICT_LOCK, &d->rwlock);
+ de = __dict_lookup(d, s);
+ HA_RWLOCK_RDUNLOCK(DICT_LOCK, &d->rwlock);
+ if (de) {
+ HA_ATOMIC_INC(&de->refcount);
+ return de;
+ }
+
+ de = new_dict_entry(s);
+ if (!de)
+ return NULL;
+
+ HA_RWLOCK_WRLOCK(DICT_LOCK, &d->rwlock);
+ n = ebis_insert(&d->values, &de->value);
+ HA_RWLOCK_WRUNLOCK(DICT_LOCK, &d->rwlock);
+ if (n != &de->value) {
+ free_dict_entry(de);
+ de = container_of(n, struct dict_entry, value);
+ }
+
+ return de;
+}
+
+
+/*
+ * Unreference a dict entry previously acquired with <dict_insert>.
+ * If this is the last live reference to the entry, it is
+ * removed from the dictionary.
+ */
+void dict_entry_unref(struct dict *d, struct dict_entry *de)
+{
+ if (!de)
+ return;
+
+ if (HA_ATOMIC_SUB_FETCH(&de->refcount, 1) != 0)
+ return;
+
+ HA_RWLOCK_WRLOCK(DICT_LOCK, &d->rwlock);
+ ebpt_delete(&de->value);
+ HA_RWLOCK_WRUNLOCK(DICT_LOCK, &d->rwlock);
+
+ free_dict_entry(de);
+}
diff --git a/src/dns.c b/src/dns.c
new file mode 100644
index 0000000..23e9d9d
--- /dev/null
+++ b/src/dns.c
@@ -0,0 +1,1330 @@
+/*
+ * Name server resolution
+ *
+ * Copyright 2020 HAProxy Technologies
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+#include <sys/types.h>
+
+#include <haproxy/action.h>
+#include <haproxy/api.h>
+#include <haproxy/applet.h>
+#include <haproxy/cfgparse.h>
+#include <haproxy/channel.h>
+#include <haproxy/check.h>
+#include <haproxy/cli.h>
+#include <haproxy/dgram.h>
+#include <haproxy/dns.h>
+#include <haproxy/errors.h>
+#include <haproxy/fd.h>
+#include <haproxy/log.h>
+#include <haproxy/ring.h>
+#include <haproxy/sc_strm.h>
+#include <haproxy/stconn.h>
+#include <haproxy/stream.h>
+#include <haproxy/tools.h>
+
+static THREAD_LOCAL char *dns_msg_trash;
+
+DECLARE_STATIC_POOL(dns_session_pool, "dns_session", sizeof(struct dns_session));
+DECLARE_STATIC_POOL(dns_query_pool, "dns_query", sizeof(struct dns_query));
+DECLARE_STATIC_POOL(dns_msg_buf, "dns_msg_buf", DNS_TCP_MSG_RING_MAX_SIZE);
+
+/* Opens an UDP socket on the namesaver's IP/Port, if required. Returns 0 on
+ * success, -1 otherwise. ns->dgram must be defined.
+ */
+static int dns_connect_nameserver(struct dns_nameserver *ns)
+{
+ struct dgram_conn *dgram = &ns->dgram->conn;
+ int fd;
+
+ /* Already connected */
+ if (dgram->t.sock.fd != -1)
+ return 0;
+
+ /* Create an UDP socket and connect it on the nameserver's IP/Port */
+ if ((fd = socket(dgram->addr.to.ss_family, SOCK_DGRAM, IPPROTO_UDP)) == -1) {
+ send_log(NULL, LOG_WARNING,
+ "DNS : section '%s': can't create socket for nameserver '%s'.\n",
+ ns->counters->pid, ns->id);
+ return -1;
+ }
+ if (connect(fd, (struct sockaddr*)&dgram->addr.to, get_addr_len(&dgram->addr.to)) == -1) {
+ send_log(NULL, LOG_WARNING,
+ "DNS : section '%s': can't connect socket for nameserver '%s'.\n",
+ ns->counters->id, ns->id);
+ close(fd);
+ return -1;
+ }
+
+ /* Make the socket non blocking */
+ fd_set_nonblock(fd);
+
+ /* Add the fd in the fd list and update its parameters */
+ dgram->t.sock.fd = fd;
+ fd_insert(fd, dgram, dgram_fd_handler, tgid, tg->threads_enabled);
+ fd_want_recv(fd);
+ return 0;
+}
+
+/* Sends a message to a name server
+ * It returns message length on success
+ * or -1 in error case
+ * 0 is returned in case of output ring buffer is full
+ */
+int dns_send_nameserver(struct dns_nameserver *ns, void *buf, size_t len)
+{
+ int ret = -1;
+
+ if (ns->dgram) {
+ struct dgram_conn *dgram = &ns->dgram->conn;
+ int fd;
+
+ HA_SPIN_LOCK(DNS_LOCK, &dgram->lock);
+ fd = dgram->t.sock.fd;
+ if (fd == -1) {
+ if (dns_connect_nameserver(ns) == -1) {
+ HA_SPIN_UNLOCK(DNS_LOCK, &dgram->lock);
+ return -1;
+ }
+ fd = dgram->t.sock.fd;
+ }
+
+ ret = send(fd, buf, len, 0);
+ if (ret < 0) {
+ if (errno == EAGAIN || errno == EWOULDBLOCK) {
+ struct ist myist;
+
+ myist = ist2(buf, len);
+ ret = ring_write(ns->dgram->ring_req, DNS_TCP_MSG_MAX_SIZE, NULL, 0, &myist, 1);
+ if (!ret) {
+ ns->counters->snd_error++;
+ HA_SPIN_UNLOCK(DNS_LOCK, &dgram->lock);
+ return -1;
+ }
+ fd_cant_send(fd);
+ HA_SPIN_UNLOCK(DNS_LOCK, &dgram->lock);
+ return ret;
+ }
+ ns->counters->snd_error++;
+ fd_delete(fd);
+ dgram->t.sock.fd = -1;
+ HA_SPIN_UNLOCK(DNS_LOCK, &dgram->lock);
+ return -1;
+ }
+ ns->counters->sent++;
+ HA_SPIN_UNLOCK(DNS_LOCK, &dgram->lock);
+ }
+ else if (ns->stream) {
+ struct ist myist;
+
+ myist = ist2(buf, len);
+ ret = ring_write(ns->stream->ring_req, DNS_TCP_MSG_MAX_SIZE, NULL, 0, &myist, 1);
+ if (!ret) {
+ ns->counters->snd_error++;
+ return -1;
+ }
+ task_wakeup(ns->stream->task_req, TASK_WOKEN_MSG);
+ return ret;
+ }
+
+ return ret;
+}
+
+void dns_session_free(struct dns_session *);
+
+/* Receives a dns message
+ * Returns message length
+ * 0 is returned if no more message available
+ * -1 in error case
+ */
+ssize_t dns_recv_nameserver(struct dns_nameserver *ns, void *data, size_t size)
+{
+ ssize_t ret = -1;
+
+ if (ns->dgram) {
+ struct dgram_conn *dgram = &ns->dgram->conn;
+ int fd;
+
+ HA_SPIN_LOCK(DNS_LOCK, &dgram->lock);
+ fd = dgram->t.sock.fd;
+ if (fd == -1) {
+ HA_SPIN_UNLOCK(DNS_LOCK, &dgram->lock);
+ return -1;
+ }
+
+ if ((ret = recv(fd, data, size, 0)) < 0) {
+ if (errno == EAGAIN || errno == EWOULDBLOCK) {
+ fd_cant_recv(fd);
+ HA_SPIN_UNLOCK(DNS_LOCK, &dgram->lock);
+ return 0;
+ }
+ fd_delete(fd);
+ dgram->t.sock.fd = -1;
+ HA_SPIN_UNLOCK(DNS_LOCK, &dgram->lock);
+ return -1;
+ }
+ HA_SPIN_UNLOCK(DNS_LOCK, &dgram->lock);
+ }
+ else if (ns->stream) {
+ struct dns_stream_server *dss = ns->stream;
+ struct dns_session *ds;
+
+ HA_SPIN_LOCK(DNS_LOCK, &dss->lock);
+
+ if (!LIST_ISEMPTY(&dss->wait_sess)) {
+ ds = LIST_NEXT(&dss->wait_sess, struct dns_session *, waiter);
+ ret = ds->rx_msg.len < size ? ds->rx_msg.len : size;
+ memcpy(data, ds->rx_msg.area, ret);
+
+ ds->rx_msg.len = 0;
+
+ /* This barrier is here to ensure that all data is
+ * stored if the appctx detect the elem is out of the
+ * list.
+ */
+ __ha_barrier_store();
+
+ LIST_DEL_INIT(&ds->waiter);
+
+ if (ds->appctx) {
+ /* This second barrier is here to ensure that
+ * the waked up appctx won't miss that the elem
+ * is removed from the list.
+ */
+ __ha_barrier_store();
+
+ /* awake appctx because it may have other
+ * message to receive
+ */
+ appctx_wakeup(ds->appctx);
+
+ /* dns_session could already be into free_sess list
+ * so we firstly remove it */
+ LIST_DEL_INIT(&ds->list);
+
+ /* decrease nb_queries to free a slot for a new query on that sess */
+ ds->nb_queries--;
+ if (ds->nb_queries) {
+ /* it remains pipelined unanswered request
+ * into this session but we just decrease
+ * the counter so the session
+ * can not be full of pipelined requests
+ * so we can add if to free_sess list
+ * to receive a new request
+ */
+ LIST_INSERT(&ds->dss->free_sess, &ds->list);
+ }
+ else {
+ /* there is no more pipelined requests
+ * into this session, so we move it
+ * to idle_sess list */
+ LIST_INSERT(&ds->dss->idle_sess, &ds->list);
+
+ /* update the counter of idle sessions */
+ ds->dss->idle_conns++;
+
+ /* Note: this is useless there to update
+ * the max_active_conns since we increase
+ * the idle count */
+ }
+ }
+ else {
+ /* there is no more appctx for this session
+ * it means it is ready to die
+ */
+ dns_session_free(ds);
+ }
+
+
+ }
+
+ HA_SPIN_UNLOCK(DNS_LOCK, &dss->lock);
+ }
+
+ return ret;
+}
+
+static void dns_resolve_recv(struct dgram_conn *dgram)
+{
+ struct dns_nameserver *ns;
+ int fd;
+
+ HA_SPIN_LOCK(DNS_LOCK, &dgram->lock);
+
+ fd = dgram->t.sock.fd;
+
+ /* check if ready for reading */
+ if ((fd == -1) || !fd_recv_ready(fd)) {
+ HA_SPIN_UNLOCK(DNS_LOCK, &dgram->lock);
+ return;
+ }
+
+ /* no need to go further if we can't retrieve the nameserver */
+ if ((ns = dgram->owner) == NULL) {
+ _HA_ATOMIC_AND(&fdtab[fd].state, ~(FD_POLL_HUP|FD_POLL_ERR));
+ fd_stop_recv(fd);
+ HA_SPIN_UNLOCK(DNS_LOCK, &dgram->lock);
+ return;
+ }
+
+ HA_SPIN_UNLOCK(DNS_LOCK, &dgram->lock);
+
+ ns->process_responses(ns);
+}
+
+/* Called when a dns network socket is ready to send data */
+static void dns_resolve_send(struct dgram_conn *dgram)
+{
+ int fd;
+ struct dns_nameserver *ns;
+ struct ring *ring;
+ struct buffer *buf;
+ uint64_t msg_len;
+ size_t len, cnt, ofs;
+
+ HA_SPIN_LOCK(DNS_LOCK, &dgram->lock);
+
+ fd = dgram->t.sock.fd;
+
+ /* check if ready for sending */
+ if ((fd == -1) || !fd_send_ready(fd)) {
+ HA_SPIN_UNLOCK(DNS_LOCK, &dgram->lock);
+ return;
+ }
+
+ /* no need to go further if we can't retrieve the nameserver */
+ if ((ns = dgram->owner) == NULL) {
+ _HA_ATOMIC_AND(&fdtab[fd].state, ~(FD_POLL_HUP|FD_POLL_ERR));
+ fd_stop_send(fd);
+ HA_SPIN_UNLOCK(DNS_LOCK, &dgram->lock);
+ return;
+ }
+
+ ring = ns->dgram->ring_req;
+ buf = &ring->buf;
+
+ HA_RWLOCK_RDLOCK(DNS_LOCK, &ring->lock);
+
+ /* explanation for the initialization below: it would be better to do
+ * this in the parsing function but this would occasionally result in
+ * dropped events because we'd take a reference on the oldest message
+ * and keep it while being scheduled. Thus instead let's take it the
+ * first time we enter here so that we have a chance to pass many
+ * existing messages before grabbing a reference to a location. This
+ * value cannot be produced after initialization.
+ */
+ if (unlikely(ns->dgram->ofs_req == ~0)) {
+ ns->dgram->ofs_req = b_peek_ofs(buf, 0);
+ HA_ATOMIC_INC(b_orig(buf) + ns->dgram->ofs_req);
+ }
+
+ /* we were already there, adjust the offset to be relative to
+ * the buffer's head and remove us from the counter.
+ */
+ ofs = ns->dgram->ofs_req - b_head_ofs(buf);
+ if (ns->dgram->ofs_req < b_head_ofs(buf))
+ ofs += b_size(buf);
+ BUG_ON(ofs >= buf->size);
+ HA_ATOMIC_DEC(b_peek(buf, ofs));
+
+ while (ofs + 1 < b_data(buf)) {
+ int ret;
+
+ cnt = 1;
+ len = b_peek_varint(buf, ofs + cnt, &msg_len);
+ if (!len)
+ break;
+ cnt += len;
+ BUG_ON(msg_len + ofs + cnt + 1 > b_data(buf));
+ if (unlikely(msg_len > DNS_TCP_MSG_MAX_SIZE)) {
+ /* too large a message to ever fit, let's skip it */
+ ofs += cnt + msg_len;
+ continue;
+ }
+
+ len = b_getblk(buf, dns_msg_trash, msg_len, ofs + cnt);
+
+ ret = send(fd, dns_msg_trash, len, 0);
+ if (ret < 0) {
+ if (errno == EAGAIN || errno == EWOULDBLOCK) {
+ fd_cant_send(fd);
+ goto out;
+ }
+ ns->counters->snd_error++;
+ fd_delete(fd);
+ fd = dgram->t.sock.fd = -1;
+ goto out;
+ }
+ ns->counters->sent++;
+
+ ofs += cnt + len;
+ }
+
+ /* we don't want/need to be waked up any more for sending
+ * because all ring content is sent */
+ fd_stop_send(fd);
+
+out:
+ HA_ATOMIC_INC(b_peek(buf, ofs));
+ ns->dgram->ofs_req = b_peek_ofs(buf, ofs);
+ HA_RWLOCK_RDUNLOCK(DNS_LOCK, &ring->lock);
+ HA_SPIN_UNLOCK(DNS_LOCK, &dgram->lock);
+}
+
+/* proto_udp callback functions for a DNS resolution */
+struct dgram_data_cb dns_dgram_cb = {
+ .recv = dns_resolve_recv,
+ .send = dns_resolve_send,
+};
+
+int dns_dgram_init(struct dns_nameserver *ns, struct sockaddr_storage *sk)
+{
+ struct dns_dgram_server *dgram;
+
+ if ((dgram = calloc(1, sizeof(*dgram))) == NULL)
+ return -1;
+
+ /* Leave dgram partially initialized, no FD attached for
+ * now. */
+ dgram->conn.owner = ns;
+ dgram->conn.data = &dns_dgram_cb;
+ dgram->conn.t.sock.fd = -1;
+ dgram->conn.addr.to = *sk;
+ HA_SPIN_INIT(&dgram->conn.lock);
+ ns->dgram = dgram;
+
+ dgram->ofs_req = ~0; /* init ring offset */
+ dgram->ring_req = ring_new(2*DNS_TCP_MSG_RING_MAX_SIZE);
+ if (!dgram->ring_req) {
+ ha_alert("memory allocation error initializing the ring for nameserver.\n");
+ goto out;
+ }
+
+ /* attach the task as reader */
+ if (!ring_attach(dgram->ring_req)) {
+ /* mark server attached to the ring */
+ ha_alert("nameserver sets too many watchers > 255 on ring. This is a bug and should not happen.\n");
+ goto out;
+ }
+ return 0;
+out:
+ ring_free(dgram->ring_req);
+
+ free(dgram);
+
+ return -1;
+}
+
+/*
+ * IO Handler to handle message push to dns tcp server
+ * It takes its context from appctx->svcctx.
+ */
+static void dns_session_io_handler(struct appctx *appctx)
+{
+ struct stconn *sc = appctx_sc(appctx);
+ struct dns_session *ds = appctx->svcctx;
+ struct ring *ring = &ds->ring;
+ struct buffer *buf = &ring->buf;
+ uint64_t msg_len;
+ int available_room;
+ size_t len, cnt, ofs;
+ int ret = 0;
+
+ if (unlikely(se_fl_test(appctx->sedesc, (SE_FL_EOS|SE_FL_ERROR|SE_FL_SHR|SE_FL_SHW)))) {
+ co_skip(sc_oc(sc), co_data(sc_oc(sc)));
+ goto out;
+ }
+
+ /* if stopping was requested, close immediately */
+ if (unlikely(stopping))
+ goto close;
+
+ /* we want to be sure to not miss that we have been awaked for a shutdown */
+ __ha_barrier_load();
+
+ /* that means the connection was requested to shutdown
+ * for instance idle expire */
+ if (ds->shutdown)
+ goto close;
+
+ /* if the connection is not established, inform the stream that we want
+ * to be notified whenever the connection completes.
+ */
+ if (sc_opposite(sc)->state < SC_ST_EST) {
+ applet_need_more_data(appctx);
+ se_need_remote_conn(appctx->sedesc);
+ applet_have_more_data(appctx);
+ goto out;
+ }
+
+ HA_RWLOCK_WRLOCK(DNS_LOCK, &ring->lock);
+ LIST_DEL_INIT(&appctx->wait_entry);
+ HA_RWLOCK_WRUNLOCK(DNS_LOCK, &ring->lock);
+
+ HA_RWLOCK_RDLOCK(DNS_LOCK, &ring->lock);
+
+ /* explanation for the initialization below: it would be better to do
+ * this in the parsing function but this would occasionally result in
+ * dropped events because we'd take a reference on the oldest message
+ * and keep it while being scheduled. Thus instead let's take it the
+ * first time we enter here so that we have a chance to pass many
+ * existing messages before grabbing a reference to a location. This
+ * value cannot be produced after initialization.
+ */
+ if (unlikely(ds->ofs == ~0)) {
+ ds->ofs = b_peek_ofs(buf, 0);
+ HA_ATOMIC_INC(b_orig(buf) + ds->ofs);
+ }
+
+ /* we were already there, adjust the offset to be relative to
+ * the buffer's head and remove us from the counter.
+ */
+ ofs = ds->ofs - b_head_ofs(buf);
+ if (ds->ofs < b_head_ofs(buf))
+ ofs += b_size(buf);
+
+ BUG_ON(ofs >= buf->size);
+ HA_ATOMIC_DEC(b_peek(buf, ofs));
+
+ /* in following loop, ofs always points to the counter byte that
+ * precedes the message so that we can take our reference there if we
+ * have to stop before the end (ret=0).
+ */
+ ret = 1;
+ while (ofs + 1 < b_data(buf)) {
+ struct dns_query *query;
+ uint16_t original_qid;
+ uint16_t new_qid;
+
+ cnt = 1;
+ len = b_peek_varint(buf, ofs + cnt, &msg_len);
+ if (!len)
+ break;
+ cnt += len;
+ BUG_ON(msg_len + ofs + cnt + 1 > b_data(buf));
+
+ /* retrieve available room on output channel */
+ available_room = channel_recv_max(sc_ic(sc));
+
+ /* tx_msg_offset null means we are at the start of a new message */
+ if (!ds->tx_msg_offset) {
+ uint16_t slen;
+
+ /* check if there is enough room to put message len and query id */
+ if (available_room < sizeof(slen) + sizeof(new_qid)) {
+ sc_need_room(sc, sizeof(slen) + sizeof(new_qid));
+ ret = 0;
+ break;
+ }
+
+ /* put msg len into then channel */
+ slen = (uint16_t)msg_len;
+ slen = htons(slen);
+ applet_putblk(appctx, (char *)&slen, sizeof(slen));
+ available_room -= sizeof(slen);
+
+ /* backup original query id */
+ len = b_getblk(buf, (char *)&original_qid, sizeof(original_qid), ofs + cnt);
+ if (!len) {
+ /* should never happen since messages are atomically
+ * written into ring
+ */
+ ret = 0;
+ break;
+ }
+
+ /* generates new query id */
+ new_qid = ++ds->query_counter;
+ new_qid = htons(new_qid);
+
+ /* put new query id into the channel */
+ applet_putblk(appctx, (char *)&new_qid, sizeof(new_qid));
+ available_room -= sizeof(new_qid);
+
+ /* keep query id mapping */
+
+ query = pool_alloc(dns_query_pool);
+ if (query) {
+ query->qid.key = new_qid;
+ query->original_qid = original_qid;
+ query->expire = tick_add(now_ms, 5000);
+ LIST_INIT(&query->list);
+ if (LIST_ISEMPTY(&ds->queries)) {
+ /* enable task to handle expire */
+ ds->task_exp->expire = query->expire;
+ /* ensure this will be executed by the same
+ * thread than ds_session_release
+ * to ensure session_release is free
+ * to destroy the task */
+ task_queue(ds->task_exp);
+ }
+ LIST_APPEND(&ds->queries, &query->list);
+ eb32_insert(&ds->query_ids, &query->qid);
+ ds->onfly_queries++;
+ }
+
+ /* update the tx_offset to handle output in 16k streams */
+ ds->tx_msg_offset = sizeof(original_qid);
+
+ }
+
+ /* check if it remains available room on output chan */
+ if (unlikely(!available_room)) {
+ sc_need_room(sc, 1);
+ ret = 0;
+ break;
+ }
+
+ chunk_reset(&trash);
+ if ((msg_len - ds->tx_msg_offset) > available_room) {
+ /* remaining msg data is too large to be written in output channel at one time */
+
+ len = b_getblk(buf, trash.area, available_room, ofs + cnt + ds->tx_msg_offset);
+
+ /* update offset to complete mesg forwarding later */
+ ds->tx_msg_offset += len;
+ }
+ else {
+ /* remaining msg data can be written in output channel at one time */
+ len = b_getblk(buf, trash.area, msg_len - ds->tx_msg_offset, ofs + cnt + ds->tx_msg_offset);
+
+ /* reset tx_msg_offset to mark forward fully processed */
+ ds->tx_msg_offset = 0;
+ }
+ trash.data += len;
+
+ if (applet_putchk(appctx, &trash) == -1) {
+ /* should never happen since we
+ * check available_room is large
+ * enough here.
+ */
+ ret = 0;
+ break;
+ }
+
+ if (ds->tx_msg_offset) {
+ /* msg was not fully processed, we must be awake to drain pending data */
+ sc_need_room(sc, 0);
+ ret = 0;
+ break;
+ }
+ /* switch to next message */
+ ofs += cnt + msg_len;
+ }
+
+ HA_ATOMIC_INC(b_peek(buf, ofs));
+ ds->ofs = b_peek_ofs(buf, ofs);
+
+ HA_RWLOCK_RDUNLOCK(DNS_LOCK, &ring->lock);
+
+ if (ret) {
+ /* let's be woken up once new request to write arrived */
+ HA_RWLOCK_WRLOCK(DNS_LOCK, &ring->lock);
+ BUG_ON(LIST_INLIST(&appctx->wait_entry));
+ LIST_APPEND(&ring->waiters, &appctx->wait_entry);
+ HA_RWLOCK_WRUNLOCK(DNS_LOCK, &ring->lock);
+ applet_have_no_more_data(appctx);
+ }
+
+ /* if session is not a waiter it means there is no committed
+ * message into rx_buf and we are free to use it
+ * Note: we need a load barrier here to not miss the
+ * delete from the list
+ */
+ __ha_barrier_load();
+ if (!LIST_INLIST_ATOMIC(&ds->waiter)) {
+ while (1) {
+ uint16_t query_id;
+ struct eb32_node *eb;
+ struct dns_query *query;
+
+ if (!ds->rx_msg.len) {
+ /* retrieve message len */
+ ret = co_getblk(sc_oc(sc), (char *)&msg_len, 2, 0);
+ if (ret <= 0) {
+ if (ret == -1)
+ goto error;
+ applet_need_more_data(appctx);
+ break;
+ }
+
+ /* mark as consumed */
+ co_skip(sc_oc(sc), 2);
+
+ /* store message len */
+ ds->rx_msg.len = ntohs(msg_len);
+ if (!ds->rx_msg.len)
+ continue;
+ }
+
+ if (co_data(sc_oc(sc)) + ds->rx_msg.offset < ds->rx_msg.len) {
+ /* message only partially available */
+
+ /* read available data */
+ ret = co_getblk(sc_oc(sc), ds->rx_msg.area + ds->rx_msg.offset, co_data(sc_oc(sc)), 0);
+ if (ret <= 0) {
+ if (ret == -1)
+ goto error;
+ applet_need_more_data(appctx);
+ break;
+ }
+
+ /* update message offset */
+ ds->rx_msg.offset += co_data(sc_oc(sc));
+
+ /* consume all pending data from the channel */
+ co_skip(sc_oc(sc), co_data(sc_oc(sc)));
+
+ /* we need to wait for more data */
+ applet_need_more_data(appctx);
+ break;
+ }
+
+ /* enough data is available into the channel to read the message until the end */
+
+ /* read from the channel until the end of the message */
+ ret = co_getblk(sc_oc(sc), ds->rx_msg.area + ds->rx_msg.offset, ds->rx_msg.len - ds->rx_msg.offset, 0);
+ if (ret <= 0) {
+ if (ret == -1)
+ goto error;
+ applet_need_more_data(appctx);
+ break;
+ }
+
+ /* consume all data until the end of the message from the channel */
+ co_skip(sc_oc(sc), ds->rx_msg.len - ds->rx_msg.offset);
+
+ /* reset reader offset to 0 for next message reand */
+ ds->rx_msg.offset = 0;
+
+ /* try remap query id to original */
+ memcpy(&query_id, ds->rx_msg.area, sizeof(query_id));
+ eb = eb32_lookup(&ds->query_ids, query_id);
+ if (!eb) {
+ /* query id not found means we have an unknown corresponding
+ * request, perhaps server's bug or or the query reached
+ * timeout
+ */
+ ds->rx_msg.len = 0;
+ continue;
+ }
+
+ /* re-map the original query id set by the requester */
+ query = eb32_entry(eb, struct dns_query, qid);
+ memcpy(ds->rx_msg.area, &query->original_qid, sizeof(query->original_qid));
+
+ /* remove query ids mapping from pending queries list/tree */
+ eb32_delete(&query->qid);
+ LIST_DELETE(&query->list);
+ pool_free(dns_query_pool, query);
+ ds->onfly_queries--;
+
+ /* the dns_session is also added in queue of the
+ * wait_sess list where the task processing
+ * response will pop available responses
+ */
+ HA_SPIN_LOCK(DNS_LOCK, &ds->dss->lock);
+
+ BUG_ON(LIST_INLIST(&ds->waiter));
+ LIST_APPEND(&ds->dss->wait_sess, &ds->waiter);
+
+ HA_SPIN_UNLOCK(DNS_LOCK, &ds->dss->lock);
+
+ /* awake the task processing the responses */
+ task_wakeup(ds->dss->task_rsp, TASK_WOKEN_INIT);
+
+ break;
+ }
+ }
+
+out:
+ return;
+
+close:
+ se_fl_set(appctx->sedesc, SE_FL_EOS|SE_FL_EOI);
+ goto out;
+
+error:
+ se_fl_set(appctx->sedesc, SE_FL_ERROR);
+ goto out;
+}
+
+void dns_queries_flush(struct dns_session *ds)
+{
+ struct dns_query *query, *queryb;
+
+ list_for_each_entry_safe(query, queryb, &ds->queries, list) {
+ eb32_delete(&query->qid);
+ LIST_DELETE(&query->list);
+ pool_free(dns_query_pool, query);
+ }
+}
+
+void dns_session_free(struct dns_session *ds)
+{
+ pool_free(dns_msg_buf, ds->rx_msg.area);
+ pool_free(dns_msg_buf, ds->tx_ring_area);
+ task_destroy(ds->task_exp);
+
+ dns_queries_flush(ds);
+
+ /* Ensure to remove this session from external lists
+ * Note: we are under the lock of dns_stream_server
+ * which own the heads of those lists.
+ */
+ LIST_DEL_INIT(&ds->waiter);
+ LIST_DEL_INIT(&ds->list);
+
+ ds->dss->cur_conns--;
+ /* Note: this is useless to update
+ * max_active_conns here because
+ * we decrease the value
+ */
+
+ BUG_ON(!LIST_ISEMPTY(&ds->list));
+ BUG_ON(!LIST_ISEMPTY(&ds->waiter));
+ BUG_ON(!LIST_ISEMPTY(&ds->queries));
+ BUG_ON(!LIST_ISEMPTY(&ds->ring.waiters));
+ BUG_ON(!eb_is_empty(&ds->query_ids));
+ pool_free(dns_session_pool, ds);
+}
+
+static struct appctx *dns_session_create(struct dns_session *ds);
+
+static int dns_session_init(struct appctx *appctx)
+{
+ struct dns_session *ds = appctx->svcctx;
+ struct stream *s;
+ struct sockaddr_storage *addr = NULL;
+
+ if (!sockaddr_alloc(&addr, &ds->dss->srv->addr, sizeof(ds->dss->srv->addr)))
+ goto error;
+
+ if (appctx_finalize_startup(appctx, ds->dss->srv->proxy, &BUF_NULL) == -1)
+ goto error;
+
+ s = appctx_strm(appctx);
+ s->scb->dst = addr;
+ s->scb->flags |= (SC_FL_RCV_ONCE|SC_FL_NOLINGER);
+ s->target = &ds->dss->srv->obj_type;
+ s->flags = SF_ASSIGNED;
+
+ s->do_log = NULL;
+ s->uniq_id = 0;
+
+ applet_expect_no_data(appctx);
+ ds->appctx = appctx;
+ return 0;
+
+ error:
+ return -1;
+}
+
+/*
+ * Function to release a DNS tcp session
+ */
+static void dns_session_release(struct appctx *appctx)
+{
+ struct dns_session *ds = appctx->svcctx;
+ struct dns_stream_server *dss __maybe_unused;
+
+ if (!ds)
+ return;
+
+ /* We do not call ring_appctx_detach here
+ * because we want to keep readers counters
+ * to retry a conn with a different appctx.
+ */
+ HA_RWLOCK_WRLOCK(DNS_LOCK, &ds->ring.lock);
+ LIST_DEL_INIT(&appctx->wait_entry);
+ HA_RWLOCK_WRUNLOCK(DNS_LOCK, &ds->ring.lock);
+
+ dss = ds->dss;
+
+ HA_SPIN_LOCK(DNS_LOCK, &dss->lock);
+ LIST_DEL_INIT(&ds->list);
+
+ if (stopping) {
+ dns_session_free(ds);
+ HA_SPIN_UNLOCK(DNS_LOCK, &dss->lock);
+ return;
+ }
+
+ if (!ds->nb_queries) {
+ /* this is an idle session */
+ /* Note: this is useless to update max_active_sess
+ * here because we decrease idle_conns but
+ * dns_session_free decrease curconns
+ */
+
+ ds->dss->idle_conns--;
+ dns_session_free(ds);
+ HA_SPIN_UNLOCK(DNS_LOCK, &dss->lock);
+ return;
+ }
+
+ if (ds->onfly_queries == ds->nb_queries) {
+ /* the session can be released because
+ * it means that all queries AND
+ * responses are in fly */
+ dns_session_free(ds);
+ HA_SPIN_UNLOCK(DNS_LOCK, &dss->lock);
+ return;
+ }
+
+ /* if there is no pending complete response
+ * message, ensure to reset
+ * message offsets if the session
+ * was closed with an incomplete pending response
+ */
+ if (!LIST_INLIST(&ds->waiter))
+ ds->rx_msg.len = ds->rx_msg.offset = 0;
+
+ /* we flush pending sent queries because we never
+ * have responses
+ */
+ ds->nb_queries -= ds->onfly_queries;
+ dns_queries_flush(ds);
+
+ /* reset offset to be sure to start from message start */
+ ds->tx_msg_offset = 0;
+
+ /* here the ofs and the attached counter
+ * are kept unchanged
+ */
+
+ /* Create a new appctx, We hope we can
+ * create from the release callback! */
+ ds->appctx = dns_session_create(ds);
+ if (!ds->appctx) {
+ dns_session_free(ds);
+ HA_SPIN_UNLOCK(DNS_LOCK, &dss->lock);
+ return;
+ }
+
+ if (ds->nb_queries < DNS_STREAM_MAX_PIPELINED_REQ)
+ LIST_INSERT(&ds->dss->free_sess, &ds->list);
+
+ HA_SPIN_UNLOCK(DNS_LOCK, &dss->lock);
+}
+
+/* DNS tcp session applet */
+static struct applet dns_session_applet = {
+ .obj_type = OBJ_TYPE_APPLET,
+ .name = "<STRMDNS>", /* used for logging */
+ .fct = dns_session_io_handler,
+ .init = dns_session_init,
+ .release = dns_session_release,
+};
+
+/*
+ * Function used to create an appctx for a DNS session
+ * It sets its context into appctx->svcctx.
+ */
+static struct appctx *dns_session_create(struct dns_session *ds)
+{
+ struct appctx *appctx;
+
+ appctx = appctx_new_here(&dns_session_applet, NULL);
+ if (!appctx)
+ goto out_close;
+ appctx->svcctx = (void *)ds;
+
+ if (appctx_init(appctx) == -1) {
+ ha_alert("out of memory in dns_session_create().\n");
+ goto out_free_appctx;
+ }
+
+ return appctx;
+
+ /* Error unrolling */
+ out_free_appctx:
+ appctx_free_on_early_error(appctx);
+ out_close:
+ return NULL;
+}
+
+/* Task processing expiration of unresponded queries, this one is supposed
+ * to be stuck on the same thread than the appctx handler
+ */
+static struct task *dns_process_query_exp(struct task *t, void *context, unsigned int state)
+{
+ struct dns_session *ds = (struct dns_session *)context;
+ struct dns_query *query, *queryb;
+
+ t->expire = TICK_ETERNITY;
+
+ list_for_each_entry_safe(query, queryb, &ds->queries, list) {
+ if (tick_is_expired(query->expire, now_ms)) {
+ eb32_delete(&query->qid);
+ LIST_DELETE(&query->list);
+ pool_free(dns_query_pool, query);
+ ds->onfly_queries--;
+ }
+ else {
+ t->expire = query->expire;
+ break;
+ }
+ }
+
+ return t;
+}
+
+/* Task processing expiration of idle sessions */
+static struct task *dns_process_idle_exp(struct task *t, void *context, unsigned int state)
+{
+ struct dns_stream_server *dss = (struct dns_stream_server *)context;
+ struct dns_session *ds, *dsb;
+ int target = 0;
+ int cur_active_conns;
+
+ HA_SPIN_LOCK(DNS_LOCK, &dss->lock);
+
+
+ cur_active_conns = dss->cur_conns - dss->idle_conns;
+ if (cur_active_conns > dss->max_active_conns)
+ dss->max_active_conns = cur_active_conns;
+
+ target = (dss->max_active_conns - cur_active_conns) / 2;
+ list_for_each_entry_safe(ds, dsb, &dss->idle_sess, list) {
+ if (!stopping && !target)
+ break;
+
+ /* remove conn to pending list to ensure it won't be reused */
+ LIST_DEL_INIT(&ds->list);
+
+ /* force session shutdown */
+ ds->shutdown = 1;
+
+ /* to be sure that the appctx won't miss shutdown */
+ __ha_barrier_store();
+
+ /* wake appctx to perform the shutdown */
+ appctx_wakeup(ds->appctx);
+ }
+
+ /* reset max to current active conns */
+ dss->max_active_conns = cur_active_conns;
+
+ HA_SPIN_UNLOCK(DNS_LOCK, &dss->lock);
+
+ t->expire = tick_add(now_ms, 5000);
+
+ return t;
+}
+
+struct dns_session *dns_session_new(struct dns_stream_server *dss)
+{
+ struct dns_session *ds;
+
+ if (dss->maxconn && (dss->maxconn <= dss->cur_conns))
+ return NULL;
+
+ ds = pool_zalloc(dns_session_pool);
+ if (!ds)
+ return NULL;
+
+ ds->ofs = ~0;
+ ds->dss = dss;
+ LIST_INIT(&ds->list);
+ LIST_INIT(&ds->queries);
+ LIST_INIT(&ds->waiter);
+ ds->rx_msg.offset = ds->rx_msg.len = 0;
+ ds->rx_msg.area = NULL;
+ ds->tx_ring_area = NULL;
+ ds->task_exp = NULL;
+ ds->appctx = NULL;
+ ds->shutdown = 0;
+ ds->nb_queries = 0;
+ ds->query_ids = EB_ROOT_UNIQUE;
+ ds->rx_msg.area = pool_alloc(dns_msg_buf);
+ if (!ds->rx_msg.area)
+ goto error;
+
+ ds->tx_ring_area = pool_alloc(dns_msg_buf);
+ if (!ds->tx_ring_area)
+ goto error;
+
+ ring_init(&ds->ring, ds->tx_ring_area, DNS_TCP_MSG_RING_MAX_SIZE);
+ /* never fail because it is the first watcher attached to the ring */
+ DISGUISE(ring_attach(&ds->ring));
+
+ if ((ds->task_exp = task_new_here()) == NULL)
+ goto error;
+
+ ds->task_exp->process = dns_process_query_exp;
+ ds->task_exp->context = ds;
+
+ ds->appctx = dns_session_create(ds);
+ if (!ds->appctx)
+ goto error;
+
+ dss->cur_conns++;
+
+ return ds;
+
+error:
+ task_destroy(ds->task_exp);
+ pool_free(dns_msg_buf, ds->rx_msg.area);
+ pool_free(dns_msg_buf, ds->tx_ring_area);
+
+ pool_free(dns_session_pool, ds);
+
+ return NULL;
+}
+
+/*
+ * Task used to consume pending messages from nameserver ring
+ * and forward them to dns_session ring.
+ * Note: If no slot found a new dns_session is allocated
+ */
+static struct task *dns_process_req(struct task *t, void *context, unsigned int state)
+{
+ struct dns_nameserver *ns = (struct dns_nameserver *)context;
+ struct dns_stream_server *dss = ns->stream;
+ struct ring *ring = dss->ring_req;
+ struct buffer *buf = &ring->buf;
+ uint64_t msg_len;
+ size_t len, cnt, ofs;
+ struct dns_session *ds, *ads;
+ HA_SPIN_LOCK(DNS_LOCK, &dss->lock);
+
+ HA_RWLOCK_RDLOCK(DNS_LOCK, &ring->lock);
+
+ /* explanation for the initialization below: it would be better to do
+ * this in the parsing function but this would occasionally result in
+ * dropped events because we'd take a reference on the oldest message
+ * and keep it while being scheduled. Thus instead let's take it the
+ * first time we enter here so that we have a chance to pass many
+ * existing messages before grabbing a reference to a location. This
+ * value cannot be produced after initialization.
+ */
+ if (unlikely(dss->ofs_req == ~0)) {
+ dss->ofs_req = b_peek_ofs(buf, 0);
+ HA_ATOMIC_INC(b_orig(buf) + dss->ofs_req);
+ }
+
+ /* we were already there, adjust the offset to be relative to
+ * the buffer's head and remove us from the counter.
+ */
+ ofs = dss->ofs_req - b_head_ofs(buf);
+ if (dss->ofs_req < b_head_ofs(buf))
+ ofs += b_size(buf);
+
+ BUG_ON(ofs >= buf->size);
+ HA_ATOMIC_DEC(b_peek(buf, ofs));
+
+ while (ofs + 1 < b_data(buf)) {
+ struct ist myist;
+
+ cnt = 1;
+ len = b_peek_varint(buf, ofs + cnt, &msg_len);
+ if (!len)
+ break;
+ cnt += len;
+ BUG_ON(msg_len + ofs + cnt + 1 > b_data(buf));
+ if (unlikely(msg_len > DNS_TCP_MSG_MAX_SIZE)) {
+ /* too large a message to ever fit, let's skip it */
+ ofs += cnt + msg_len;
+ continue;
+ }
+
+ len = b_getblk(buf, dns_msg_trash, msg_len, ofs + cnt);
+
+ myist = ist2(dns_msg_trash, len);
+
+ ads = NULL;
+ /* try to push request into active sess with free slot */
+ if (!LIST_ISEMPTY(&dss->free_sess)) {
+ ds = LIST_NEXT(&dss->free_sess, struct dns_session *, list);
+
+ if (ring_write(&ds->ring, DNS_TCP_MSG_MAX_SIZE, NULL, 0, &myist, 1) > 0) {
+ ds->nb_queries++;
+ if (ds->nb_queries >= DNS_STREAM_MAX_PIPELINED_REQ)
+ LIST_DEL_INIT(&ds->list);
+ ads = ds;
+ }
+ else {
+ /* it means we were unable to put a request in this slot,
+ * it may be close to be full so we put it at the end
+ * of free conn list */
+ LIST_DEL_INIT(&ds->list);
+ LIST_APPEND(&dss->free_sess, &ds->list);
+ }
+ }
+
+ if (!ads) {
+ /* try to push request into idle, this one should have enough free space */
+ if (!LIST_ISEMPTY(&dss->idle_sess)) {
+ ds = LIST_NEXT(&dss->idle_sess, struct dns_session *, list);
+
+ /* ring is empty so this ring_write should never fail */
+ ring_write(&ds->ring, DNS_TCP_MSG_MAX_SIZE, NULL, 0, &myist, 1);
+ ds->nb_queries++;
+ LIST_DEL_INIT(&ds->list);
+
+ ds->dss->idle_conns--;
+
+ /* we may have to update the max_active_conns */
+ if (ds->dss->max_active_conns < ds->dss->cur_conns - ds->dss->idle_conns)
+ ds->dss->max_active_conns = ds->dss->cur_conns - ds->dss->idle_conns;
+
+ /* since we may unable to find a free list to handle
+ * this request, this request may be large and fill
+ * the ring buffer so we prefer to put at the end of free
+ * list. */
+ LIST_APPEND(&dss->free_sess, &ds->list);
+ ads = ds;
+ }
+ }
+
+ /* we didn't find a session available with large enough room */
+ if (!ads) {
+ /* allocate a new session */
+ ads = dns_session_new(dss);
+ if (ads) {
+ /* ring is empty so this ring_write should never fail */
+ ring_write(&ads->ring, DNS_TCP_MSG_MAX_SIZE, NULL, 0, &myist, 1);
+ ads->nb_queries++;
+ LIST_INSERT(&dss->free_sess, &ads->list);
+ }
+ else
+ ns->counters->snd_error++;
+ }
+
+ if (ads)
+ ns->counters->sent++;
+
+ ofs += cnt + len;
+ }
+
+ HA_ATOMIC_INC(b_peek(buf, ofs));
+ dss->ofs_req = b_peek_ofs(buf, ofs);
+ HA_RWLOCK_RDUNLOCK(DNS_LOCK, &ring->lock);
+
+
+ HA_SPIN_UNLOCK(DNS_LOCK, &dss->lock);
+ return t;
+}
+
+/*
+ * Task used to consume response
+ * Note: upper layer callback is called
+ */
+static struct task *dns_process_rsp(struct task *t, void *context, unsigned int state)
+{
+ struct dns_nameserver *ns = (struct dns_nameserver *)context;
+
+ ns->process_responses(ns);
+
+ return t;
+}
+
+/* Function used to initialize an TCP nameserver */
+int dns_stream_init(struct dns_nameserver *ns, struct server *srv)
+{
+ struct dns_stream_server *dss = NULL;
+
+ dss = calloc(1, sizeof(*dss));
+ if (!dss) {
+ ha_alert("memory allocation error initializing dns tcp server '%s'.\n", srv->id);
+ goto out;
+ }
+
+ dss->srv = srv;
+ dss->maxconn = srv->maxconn;
+
+ dss->ofs_req = ~0; /* init ring offset */
+ dss->ring_req = ring_new(2*DNS_TCP_MSG_RING_MAX_SIZE);
+ if (!dss->ring_req) {
+ ha_alert("memory allocation error initializing the ring for dns tcp server '%s'.\n", srv->id);
+ goto out;
+ }
+ /* Create the task associated to the resolver target handling conns */
+ if ((dss->task_req = task_new_anywhere()) == NULL) {
+ ha_alert("memory allocation error initializing the ring for dns tcp server '%s'.\n", srv->id);
+ goto out;
+ }
+
+ /* Update task's parameters */
+ dss->task_req->process = dns_process_req;
+ dss->task_req->context = ns;
+
+ /* attach the task as reader */
+ if (!ring_attach(dss->ring_req)) {
+ /* mark server attached to the ring */
+ ha_alert("server '%s': too many watchers for ring. this should never happen.\n", srv->id);
+ goto out;
+ }
+
+ /* Create the task associated to the resolver target handling conns */
+ if ((dss->task_rsp = task_new_anywhere()) == NULL) {
+ ha_alert("memory allocation error initializing the ring for dns tcp server '%s'.\n", srv->id);
+ goto out;
+ }
+
+ /* Update task's parameters */
+ dss->task_rsp->process = dns_process_rsp;
+ dss->task_rsp->context = ns;
+
+ /* Create the task associated to the resolver target handling conns */
+ if ((dss->task_idle = task_new_anywhere()) == NULL) {
+ ha_alert("memory allocation error initializing the ring for dns tcp server '%s'.\n", srv->id);
+ goto out;
+ }
+
+ /* Update task's parameters */
+ dss->task_idle->process = dns_process_idle_exp;
+ dss->task_idle->context = dss;
+ dss->task_idle->expire = tick_add(now_ms, 5000);
+
+ /* let start the task to free idle conns immediately */
+ task_queue(dss->task_idle);
+
+ LIST_INIT(&dss->free_sess);
+ LIST_INIT(&dss->idle_sess);
+ LIST_INIT(&dss->wait_sess);
+ HA_SPIN_INIT(&dss->lock);
+ ns->stream = dss;
+ return 0;
+out:
+ if (dss && dss->task_rsp)
+ task_destroy(dss->task_rsp);
+ if (dss && dss->task_req)
+ task_destroy(dss->task_req);
+ if (dss && dss->ring_req)
+ ring_free(dss->ring_req);
+
+ free(dss);
+ return -1;
+}
+
+int init_dns_buffers()
+{
+ dns_msg_trash = malloc(DNS_TCP_MSG_MAX_SIZE);
+ if (!dns_msg_trash)
+ return 0;
+
+ return 1;
+}
+
+void deinit_dns_buffers()
+{
+ ha_free(&dns_msg_trash);
+}
+
+REGISTER_PER_THREAD_ALLOC(init_dns_buffers);
+REGISTER_PER_THREAD_FREE(deinit_dns_buffers);
diff --git a/src/dynbuf.c b/src/dynbuf.c
new file mode 100644
index 0000000..712e334
--- /dev/null
+++ b/src/dynbuf.c
@@ -0,0 +1,129 @@
+/*
+ * Buffer management functions.
+ *
+ * Copyright 2000-2012 Willy Tarreau <w@1wt.eu>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <ctype.h>
+#include <stdio.h>
+#include <string.h>
+
+#include <haproxy/api.h>
+#include <haproxy/dynbuf.h>
+#include <haproxy/global.h>
+#include <haproxy/list.h>
+#include <haproxy/pool.h>
+
+struct pool_head *pool_head_buffer __read_mostly;
+
+/* perform minimal initializations, report 0 in case of error, 1 if OK. */
+int init_buffer()
+{
+ void *buffer;
+ int thr;
+ int done;
+
+ pool_head_buffer = create_pool("buffer", global.tune.bufsize, MEM_F_SHARED|MEM_F_EXACT);
+ if (!pool_head_buffer)
+ return 0;
+
+ for (thr = 0; thr < MAX_THREADS; thr++)
+ LIST_INIT(&ha_thread_ctx[thr].buffer_wq);
+
+
+ /* The reserved buffer is what we leave behind us. Thus we always need
+ * at least one extra buffer in minavail otherwise we'll end up waking
+ * up tasks with no memory available, causing a lot of useless wakeups.
+ * That means that we always want to have at least 3 buffers available
+ * (2 for current session, one for next session that might be needed to
+ * release a server connection).
+ */
+ pool_head_buffer->minavail = MAX(global.tune.reserved_bufs, 3);
+ if (global.tune.buf_limit)
+ pool_head_buffer->limit = global.tune.buf_limit;
+
+ for (done = 0; done < pool_head_buffer->minavail - 1; done++) {
+ buffer = pool_alloc_nocache(pool_head_buffer, init_buffer);
+ if (!buffer)
+ return 0;
+ pool_free(pool_head_buffer, buffer);
+ }
+ return 1;
+}
+
+/*
+ * Dumps part or all of a buffer.
+ */
+void buffer_dump(FILE *o, struct buffer *b, int from, int to)
+{
+ fprintf(o, "Dumping buffer %p\n", b);
+ fprintf(o, " orig=%p size=%u head=%u tail=%u data=%u\n",
+ b_orig(b), (unsigned int)b_size(b), (unsigned int)b_head_ofs(b), (unsigned int)b_tail_ofs(b), (unsigned int)b_data(b));
+
+ fprintf(o, "Dumping contents from byte %d to byte %d\n", from, to);
+ fprintf(o, " 0 1 2 3 4 5 6 7 8 9 a b c d e f\n");
+ /* dump hexa */
+ while (from < to) {
+ int i;
+
+ fprintf(o, " %04x: ", from);
+ for (i = 0; ((from + i) < to) && (i < 16) ; i++) {
+ fprintf(o, "%02x ", (unsigned char)b_orig(b)[from + i]);
+ if (i == 7)
+ fprintf(o, "- ");
+ }
+ if (to - from < 16) {
+ int j = 0;
+
+ for (j = 0; j < from + 16 - to; j++)
+ fprintf(o, " ");
+ if (j > 8)
+ fprintf(o, " ");
+ }
+ fprintf(o, " ");
+ for (i = 0; (from + i < to) && (i < 16) ; i++) {
+ fprintf(o, "%c", isprint((unsigned char)b_orig(b)[from + i]) ? b_orig(b)[from + i] : '.') ;
+ if ((i == 15) && ((from + i) != to-1))
+ fprintf(o, "\n");
+ }
+ from += i;
+ }
+ fprintf(o, "\n--\n");
+ fflush(o);
+}
+
+/* see offer_buffers() for details */
+void __offer_buffers(void *from, unsigned int count)
+{
+ struct buffer_wait *wait, *wait_back;
+
+ /* For now, we consider that all objects need 1 buffer, so we can stop
+ * waking up them once we have enough of them to eat all the available
+ * buffers. Note that we don't really know if they are streams or just
+ * other tasks, but that's a rough estimate. Similarly, for each cached
+ * event we'll need 1 buffer.
+ */
+ list_for_each_entry_safe(wait, wait_back, &th_ctx->buffer_wq, list) {
+ if (!count)
+ break;
+
+ if (wait->target == from || !wait->wakeup_cb(wait->target))
+ continue;
+
+ LIST_DEL_INIT(&wait->list);
+ count--;
+ }
+}
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/src/eb32sctree.c b/src/eb32sctree.c
new file mode 100644
index 0000000..af6a539
--- /dev/null
+++ b/src/eb32sctree.c
@@ -0,0 +1,472 @@
+/*
+ * Elastic Binary Trees - exported functions for operations on 32bit nodes.
+ * Version 6.0.6 with backports from v7-dev
+ * (C) 2002-2011 - Willy Tarreau <w@1wt.eu>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/* Consult eb32sctree.h for more details about those functions */
+
+#include <import/eb32sctree.h>
+
+
+/* This function is used to build a tree of duplicates by adding a new node to
+ * a subtree of at least 2 entries.
+ */
+struct eb32sc_node *eb32sc_insert_dup(struct eb_node *sub, struct eb_node *new, unsigned long scope)
+{
+ struct eb32sc_node *eb32;
+ struct eb_node *head = sub;
+ eb_troot_t *new_left = eb_dotag(&new->branches, EB_LEFT);
+ eb_troot_t *new_rght = eb_dotag(&new->branches, EB_RGHT);
+ eb_troot_t *new_leaf = eb_dotag(&new->branches, EB_LEAF);
+
+ /* first, identify the deepest hole on the right branch */
+ while (eb_gettag(head->branches.b[EB_RGHT]) != EB_LEAF) {
+ struct eb_node *last = head;
+
+ head = container_of(eb_untag(head->branches.b[EB_RGHT], EB_NODE),
+ struct eb_node, branches);
+
+ if (unlikely(head->bit > last->bit + 1)) {
+ /* there's a hole here, we must assign the top of the
+ * following sub-tree to <sub> and mark all intermediate
+ * nodes with the scope mask.
+ */
+ do {
+ eb32 = container_of(sub, struct eb32sc_node, node);
+ if (!(eb32->node_s & scope))
+ eb32->node_s |= scope;
+
+ sub = container_of(eb_untag(sub->branches.b[EB_RGHT], EB_NODE),
+ struct eb_node, branches);
+ } while (sub != head);
+ }
+
+ eb32 = container_of(head, struct eb32sc_node, node);
+ if (!(eb32->node_s & scope))
+ eb32->node_s |= scope;
+ }
+
+ /* Here we have a leaf attached to (head)->b[EB_RGHT] */
+ if (head->bit < -1) {
+ /* A hole exists just before the leaf, we insert there */
+ new->bit = -1;
+ sub = container_of(eb_untag(head->branches.b[EB_RGHT], EB_LEAF),
+ struct eb_node, branches);
+ head->branches.b[EB_RGHT] = eb_dotag(&new->branches, EB_NODE);
+
+ new->node_p = sub->leaf_p;
+ new->leaf_p = new_rght;
+ sub->leaf_p = new_left;
+ new->branches.b[EB_LEFT] = eb_dotag(&sub->branches, EB_LEAF);
+ new->branches.b[EB_RGHT] = new_leaf;
+ eb32 = container_of(new, struct eb32sc_node, node);
+ eb32->node_s = container_of(sub, struct eb32sc_node, node)->leaf_s | scope;
+ return eb32;
+ } else {
+ int side;
+ /* No hole was found before a leaf. We have to insert above
+ * <sub>. Note that we cannot be certain that <sub> is attached
+ * to the right of its parent, as this is only true if <sub>
+ * is inside the dup tree, not at the head.
+ */
+ new->bit = sub->bit - 1; /* install at the lowest level */
+ side = eb_gettag(sub->node_p);
+ head = container_of(eb_untag(sub->node_p, side), struct eb_node, branches);
+ head->branches.b[side] = eb_dotag(&new->branches, EB_NODE);
+
+ new->node_p = sub->node_p;
+ new->leaf_p = new_rght;
+ sub->node_p = new_left;
+ new->branches.b[EB_LEFT] = eb_dotag(&sub->branches, EB_NODE);
+ new->branches.b[EB_RGHT] = new_leaf;
+ eb32 = container_of(new, struct eb32sc_node, node);
+ eb32->node_s = container_of(sub, struct eb32sc_node, node)->node_s | scope;
+ return eb32;
+ }
+}
+
+/* Insert eb32sc_node <new> into subtree starting at node root <root>. Only
+ * new->key needs be set with the key. The eb32sc_node is returned. This
+ * implementation does NOT support unique trees.
+ */
+struct eb32sc_node *eb32sc_insert(struct eb_root *root, struct eb32sc_node *new, unsigned long scope)
+{
+ struct eb32sc_node *old;
+ unsigned int side;
+ eb_troot_t *troot, **up_ptr;
+ u32 newkey; /* caching the key saves approximately one cycle */
+ eb_troot_t *new_left, *new_rght;
+ eb_troot_t *new_leaf;
+ int old_node_bit;
+ unsigned long old_scope;
+
+ side = EB_LEFT;
+ troot = root->b[EB_LEFT];
+ if (unlikely(troot == NULL)) {
+ /* Tree is empty, insert the leaf part below the left branch */
+ root->b[EB_LEFT] = eb_dotag(&new->node.branches, EB_LEAF);
+ new->node.leaf_p = eb_dotag(root, EB_LEFT);
+ new->node.node_p = NULL; /* node part unused */
+ new->node_s = scope;
+ new->leaf_s = scope;
+ return new;
+ }
+
+ /* The tree descent is fairly easy :
+ * - first, check if we have reached a leaf node
+ * - second, check if we have gone too far
+ * - third, reiterate
+ * Everywhere, we use <new> for the node node we are inserting, <root>
+ * for the node we attach it to, and <old> for the node we are
+ * displacing below <new>. <troot> will always point to the future node
+ * (tagged with its type). <side> carries the side the node <new> is
+ * attached to below its parent, which is also where previous node
+ * was attached. <newkey> carries the key being inserted.
+ */
+ newkey = new->key;
+
+ while (1) {
+ if (eb_gettag(troot) == EB_LEAF) {
+ /* insert above a leaf */
+ old = container_of(eb_untag(troot, EB_LEAF),
+ struct eb32sc_node, node.branches);
+ new->node.node_p = old->node.leaf_p;
+ up_ptr = &old->node.leaf_p;
+ old_scope = old->leaf_s;
+ break;
+ }
+
+ /* OK we're walking down this link */
+ old = container_of(eb_untag(troot, EB_NODE),
+ struct eb32sc_node, node.branches);
+ old_node_bit = old->node.bit;
+
+ /* our new node will be found through this one, we must mark it */
+ if ((old->node_s | scope) != old->node_s)
+ old->node_s |= scope;
+
+ /* Stop going down when we don't have common bits anymore. We
+ * also stop in front of a duplicates tree because it means we
+ * have to insert above.
+ */
+
+ if ((old_node_bit < 0) || /* we're above a duplicate tree, stop here */
+ (((new->key ^ old->key) >> old_node_bit) >= EB_NODE_BRANCHES)) {
+ /* The tree did not contain the key, so we insert <new> before the node
+ * <old>, and set ->bit to designate the lowest bit position in <new>
+ * which applies to ->branches.b[].
+ */
+ new->node.node_p = old->node.node_p;
+ up_ptr = &old->node.node_p;
+ old_scope = old->node_s;
+ break;
+ }
+
+ /* walk down */
+ root = &old->node.branches;
+ side = (newkey >> old_node_bit) & EB_NODE_BRANCH_MASK;
+ troot = root->b[side];
+ }
+
+ new_left = eb_dotag(&new->node.branches, EB_LEFT);
+ new_rght = eb_dotag(&new->node.branches, EB_RGHT);
+ new_leaf = eb_dotag(&new->node.branches, EB_LEAF);
+
+ /* We need the common higher bits between new->key and old->key.
+ * What differences are there between new->key and the node here ?
+ * NOTE that bit(new) is always < bit(root) because highest
+ * bit of new->key and old->key are identical here (otherwise they
+ * would sit on different branches).
+ */
+
+ // note that if EB_NODE_BITS > 1, we should check that it's still >= 0
+ new->node.bit = flsnz(new->key ^ old->key) - EB_NODE_BITS;
+ new->leaf_s = scope;
+ new->node_s = old_scope | scope;
+
+ if (new->key == old->key) {
+ new->node.bit = -1; /* mark as new dup tree, just in case */
+
+ if (eb_gettag(troot) != EB_LEAF) {
+ /* there was already a dup tree below */
+ return eb32sc_insert_dup(&old->node, &new->node, scope);
+ }
+ /* otherwise fall through */
+ }
+
+ if (new->key >= old->key) {
+ new->node.branches.b[EB_LEFT] = troot;
+ new->node.branches.b[EB_RGHT] = new_leaf;
+ new->node.leaf_p = new_rght;
+ *up_ptr = new_left;
+ }
+ else {
+ new->node.branches.b[EB_LEFT] = new_leaf;
+ new->node.branches.b[EB_RGHT] = troot;
+ new->node.leaf_p = new_left;
+ *up_ptr = new_rght;
+ }
+
+ /* Ok, now we are inserting <new> between <root> and <old>. <old>'s
+ * parent is already set to <new>, and the <root>'s branch is still in
+ * <side>. Update the root's leaf till we have it. Note that we can also
+ * find the side by checking the side of new->node.node_p.
+ */
+
+ root->b[side] = eb_dotag(&new->node.branches, EB_NODE);
+ return new;
+}
+
+/*
+ * Find the first occurrence of the lowest key in the tree <root>, which is
+ * equal to or greater than <x>. NULL is returned is no key matches.
+ */
+struct eb32sc_node *eb32sc_lookup_ge(struct eb_root *root, u32 x, unsigned long scope)
+{
+ struct eb32sc_node *node;
+ eb_troot_t *troot;
+
+ troot = root->b[EB_LEFT];
+ if (unlikely(troot == NULL))
+ return NULL;
+
+ while (1) {
+ if ((eb_gettag(troot) == EB_LEAF)) {
+ /* We reached a leaf, which means that the whole upper
+ * parts were common. We will return either the current
+ * node or its next one if the former is too small.
+ */
+ node = container_of(eb_untag(troot, EB_LEAF),
+ struct eb32sc_node, node.branches);
+ if ((node->leaf_s & scope) && node->key >= x)
+ return node;
+ /* return next */
+ troot = node->node.leaf_p;
+ break;
+ }
+ node = container_of(eb_untag(troot, EB_NODE),
+ struct eb32sc_node, node.branches);
+
+ if (node->node.bit < 0) {
+ /* We're at the top of a dup tree. Either we got a
+ * matching value and we return the leftmost node, or
+ * we don't and we skip the whole subtree to return the
+ * next node after the subtree. Note that since we're
+ * at the top of the dup tree, we can simply return the
+ * next node without first trying to escape from the
+ * tree.
+ */
+ if ((node->node_s & scope) && node->key >= x)
+ troot = eb_dotag(&node->node.branches, EB_LEFT);
+ else
+ troot = node->node.node_p;
+ break;
+ }
+
+ if (((x ^ node->key) >> node->node.bit) >= EB_NODE_BRANCHES) {
+ /* No more common bits at all. Either this node is too
+ * large and we need to get its lowest value, or it is too
+ * small, and we need to get the next value.
+ */
+ if ((node->node_s & scope) && (node->key >> node->node.bit) > (x >> node->node.bit))
+ troot = eb_dotag(&node->node.branches, EB_LEFT);
+ else
+ troot = node->node.node_p;
+ break;
+ }
+ troot = node->node.branches.b[(x >> node->node.bit) & EB_NODE_BRANCH_MASK];
+ }
+
+ /* If we get here, it means we want to report next node after the
+ * current one which is not below. <troot> is already initialised
+ * to the parent's branches.
+ */
+ return eb32sc_next_with_parent(troot, scope);
+}
+
+/*
+ * Find the first occurrence of the lowest key in the tree <root> which is
+ * equal to or greater than <x>, matching scope <scope>. If not found, it loops
+ * back to the beginning of the tree. NULL is returned is no key matches.
+ */
+struct eb32sc_node *eb32sc_lookup_ge_or_first(struct eb_root *root, u32 x, unsigned long scope)
+{
+ struct eb32sc_node *eb32;
+ eb_troot_t *troot;
+
+ troot = root->b[EB_LEFT];
+ if (unlikely(troot == NULL))
+ return NULL;
+
+ while (1) {
+ if ((eb_gettag(troot) == EB_LEAF)) {
+ /* We reached a leaf, which means that the whole upper
+ * parts were common. We will return either the current
+ * node or its next one if the former is too small.
+ */
+ eb32 = container_of(eb_untag(troot, EB_LEAF),
+ struct eb32sc_node, node.branches);
+ if ((eb32->leaf_s & scope) && eb32->key >= x)
+ return eb32;
+ /* return next */
+ troot = eb32->node.leaf_p;
+ break;
+ }
+ eb32 = container_of(eb_untag(troot, EB_NODE),
+ struct eb32sc_node, node.branches);
+
+ if (eb32->node.bit < 0) {
+ /* We're at the top of a dup tree. Either we got a
+ * matching value and we return the leftmost node, or
+ * we don't and we skip the whole subtree to return the
+ * next node after the subtree. Note that since we're
+ * at the top of the dup tree, we can simply return the
+ * next node without first trying to escape from the
+ * tree.
+ */
+ if ((eb32->node_s & scope) && eb32->key >= x)
+ troot = eb_dotag(&eb32->node.branches, EB_LEFT);
+ else
+ troot = eb32->node.node_p;
+ break;
+ }
+
+ if (((x ^ eb32->key) >> eb32->node.bit) >= EB_NODE_BRANCHES) {
+ /* No more common bits at all. Either this node is too
+ * large and we need to get its lowest value, or it is too
+ * small, and we need to get the next value.
+ */
+ if ((eb32->node_s & scope) && (eb32->key >> eb32->node.bit) > (x >> eb32->node.bit))
+ troot = eb_dotag(&eb32->node.branches, EB_LEFT);
+ else
+ troot = eb32->node.node_p;
+ break;
+ }
+ troot = eb32->node.branches.b[(x >> eb32->node.bit) & EB_NODE_BRANCH_MASK];
+ }
+
+ /* If we get here, it means we want to report next node after the
+ * current one which is not below. <troot> is already initialised
+ * to the parent's branches.
+ */
+ eb32 = eb32sc_next_with_parent(troot, scope);
+ if (!eb32)
+ eb32 = eb32sc_walk_down_left(root->b[EB_LEFT], scope);
+
+ return eb32;
+}
+
+/* Removes a leaf node from the tree if it was still in it. Marks the node
+ * as unlinked.
+ */
+void eb32sc_delete(struct eb32sc_node *eb32)
+{
+ struct eb_node *node = &eb32->node;
+ unsigned int pside, gpside, sibtype;
+ struct eb_node *parent;
+ struct eb_root *gparent;
+ unsigned long scope;
+
+ if (!node->leaf_p)
+ return;
+
+ /* we need the parent, our side, and the grand parent */
+ pside = eb_gettag(node->leaf_p);
+ parent = eb_root_to_node(eb_untag(node->leaf_p, pside));
+
+ /* We likely have to release the parent link, unless it's the root,
+ * in which case we only set our branch to NULL. Note that we can
+ * only be attached to the root by its left branch.
+ */
+
+ if (eb_clrtag(parent->branches.b[EB_RGHT]) == NULL) {
+ /* we're just below the root, it's trivial. */
+ parent->branches.b[EB_LEFT] = NULL;
+ goto delete_unlink;
+ }
+
+ /* To release our parent, we have to identify our sibling, and reparent
+ * it directly to/from the grand parent. Note that the sibling can
+ * either be a link or a leaf.
+ */
+
+ gpside = eb_gettag(parent->node_p);
+ gparent = eb_untag(parent->node_p, gpside);
+
+ gparent->b[gpside] = parent->branches.b[!pside];
+ sibtype = eb_gettag(gparent->b[gpside]);
+
+ if (sibtype == EB_LEAF) {
+ eb_root_to_node(eb_untag(gparent->b[gpside], EB_LEAF))->leaf_p =
+ eb_dotag(gparent, gpside);
+ } else {
+ eb_root_to_node(eb_untag(gparent->b[gpside], EB_NODE))->node_p =
+ eb_dotag(gparent, gpside);
+ }
+ /* Mark the parent unused. Note that we do not check if the parent is
+ * our own node, but that's not a problem because if it is, it will be
+ * marked unused at the same time, which we'll use below to know we can
+ * safely remove it.
+ */
+ parent->node_p = NULL;
+
+ /* The parent node has been detached, and is currently unused. It may
+ * belong to another node, so we cannot remove it that way. Also, our
+ * own node part might still be used. so we can use this spare node
+ * to replace ours if needed.
+ */
+
+ /* If our link part is unused, we can safely exit now */
+ if (!node->node_p)
+ goto delete_unlink;
+
+ /* From now on, <node> and <parent> are necessarily different, and the
+ * <node>'s node part is in use. By definition, <parent> is at least
+ * below <node>, so keeping its key for the bit string is OK. However
+ * its scope must be enlarged to cover the new branch it absorbs.
+ */
+
+ parent->node_p = node->node_p;
+ parent->branches = node->branches;
+ parent->bit = node->bit;
+
+ /* We must now update the new node's parent... */
+ gpside = eb_gettag(parent->node_p);
+ gparent = eb_untag(parent->node_p, gpside);
+ gparent->b[gpside] = eb_dotag(&parent->branches, EB_NODE);
+
+ /* ... and its branches */
+ scope = 0;
+ for (pside = 0; pside <= 1; pside++) {
+ if (eb_gettag(parent->branches.b[pside]) == EB_NODE) {
+ eb_root_to_node(eb_untag(parent->branches.b[pside], EB_NODE))->node_p =
+ eb_dotag(&parent->branches, pside);
+ scope |= container_of(eb_untag(parent->branches.b[pside], EB_NODE), struct eb32sc_node, node.branches)->node_s;
+ } else {
+ eb_root_to_node(eb_untag(parent->branches.b[pside], EB_LEAF))->leaf_p =
+ eb_dotag(&parent->branches, pside);
+ scope |= container_of(eb_untag(parent->branches.b[pside], EB_LEAF), struct eb32sc_node, node.branches)->leaf_s;
+ }
+ }
+ container_of(parent, struct eb32sc_node, node)->node_s = scope;
+
+ delete_unlink:
+ /* Now the node has been completely unlinked */
+ node->leaf_p = NULL;
+ return; /* tree is not empty yet */
+}
diff --git a/src/eb32tree.c b/src/eb32tree.c
new file mode 100644
index 0000000..38ddab0
--- /dev/null
+++ b/src/eb32tree.c
@@ -0,0 +1,218 @@
+/*
+ * Elastic Binary Trees - exported functions for operations on 32bit nodes.
+ * Version 6.0.6
+ * (C) 2002-2011 - Willy Tarreau <w@1wt.eu>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/* Consult eb32tree.h for more details about those functions */
+
+#include <import/eb32tree.h>
+
+struct eb32_node *eb32_insert(struct eb_root *root, struct eb32_node *new)
+{
+ return __eb32_insert(root, new);
+}
+
+struct eb32_node *eb32i_insert(struct eb_root *root, struct eb32_node *new)
+{
+ return __eb32i_insert(root, new);
+}
+
+struct eb32_node *eb32_lookup(struct eb_root *root, u32 x)
+{
+ return __eb32_lookup(root, x);
+}
+
+struct eb32_node *eb32i_lookup(struct eb_root *root, s32 x)
+{
+ return __eb32i_lookup(root, x);
+}
+
+/*
+ * Find the last occurrence of the highest key in the tree <root>, which is
+ * equal to or less than <x>. NULL is returned is no key matches.
+ */
+struct eb32_node *eb32_lookup_le(struct eb_root *root, u32 x)
+{
+ struct eb32_node *node;
+ eb_troot_t *troot;
+
+ troot = root->b[EB_LEFT];
+ if (unlikely(troot == NULL))
+ return NULL;
+
+ while (1) {
+ if ((eb_gettag(troot) == EB_LEAF)) {
+ /* We reached a leaf, which means that the whole upper
+ * parts were common. We will return either the current
+ * node or its next one if the former is too small.
+ */
+ node = container_of(eb_untag(troot, EB_LEAF),
+ struct eb32_node, node.branches);
+ if (node->key <= x)
+ return node;
+ /* return prev */
+ troot = node->node.leaf_p;
+ break;
+ }
+ node = container_of(eb_untag(troot, EB_NODE),
+ struct eb32_node, node.branches);
+
+ if (node->node.bit < 0) {
+ /* We're at the top of a dup tree. Either we got a
+ * matching value and we return the rightmost node, or
+ * we don't and we skip the whole subtree to return the
+ * prev node before the subtree. Note that since we're
+ * at the top of the dup tree, we can simply return the
+ * prev node without first trying to escape from the
+ * tree.
+ */
+ if (node->key <= x) {
+ troot = node->node.branches.b[EB_RGHT];
+ while (eb_gettag(troot) != EB_LEAF)
+ troot = (eb_untag(troot, EB_NODE))->b[EB_RGHT];
+ return container_of(eb_untag(troot, EB_LEAF),
+ struct eb32_node, node.branches);
+ }
+ /* return prev */
+ troot = node->node.node_p;
+ break;
+ }
+
+ if (((x ^ node->key) >> node->node.bit) >= EB_NODE_BRANCHES) {
+ /* No more common bits at all. Either this node is too
+ * small and we need to get its highest value, or it is
+ * too large, and we need to get the prev value.
+ */
+ if ((node->key >> node->node.bit) < (x >> node->node.bit)) {
+ troot = node->node.branches.b[EB_RGHT];
+ return eb32_entry(eb_walk_down(troot, EB_RGHT), struct eb32_node, node);
+ }
+
+ /* Further values will be too high here, so return the prev
+ * unique node (if it exists).
+ */
+ troot = node->node.node_p;
+ break;
+ }
+ troot = node->node.branches.b[(x >> node->node.bit) & EB_NODE_BRANCH_MASK];
+ }
+
+ /* If we get here, it means we want to report previous node before the
+ * current one which is not above. <troot> is already initialised to
+ * the parent's branches.
+ */
+ while (eb_gettag(troot) == EB_LEFT) {
+ /* Walking up from left branch. We must ensure that we never
+ * walk beyond root.
+ */
+ if (unlikely(eb_clrtag((eb_untag(troot, EB_LEFT))->b[EB_RGHT]) == NULL))
+ return NULL;
+ troot = (eb_root_to_node(eb_untag(troot, EB_LEFT)))->node_p;
+ }
+ /* Note that <troot> cannot be NULL at this stage */
+ troot = (eb_untag(troot, EB_RGHT))->b[EB_LEFT];
+ node = eb32_entry(eb_walk_down(troot, EB_RGHT), struct eb32_node, node);
+ return node;
+}
+
+/*
+ * Find the first occurrence of the lowest key in the tree <root>, which is
+ * equal to or greater than <x>. NULL is returned is no key matches.
+ */
+struct eb32_node *eb32_lookup_ge(struct eb_root *root, u32 x)
+{
+ struct eb32_node *node;
+ eb_troot_t *troot;
+
+ troot = root->b[EB_LEFT];
+ if (unlikely(troot == NULL))
+ return NULL;
+
+ while (1) {
+ if ((eb_gettag(troot) == EB_LEAF)) {
+ /* We reached a leaf, which means that the whole upper
+ * parts were common. We will return either the current
+ * node or its next one if the former is too small.
+ */
+ node = container_of(eb_untag(troot, EB_LEAF),
+ struct eb32_node, node.branches);
+ if (node->key >= x)
+ return node;
+ /* return next */
+ troot = node->node.leaf_p;
+ break;
+ }
+ node = container_of(eb_untag(troot, EB_NODE),
+ struct eb32_node, node.branches);
+
+ if (node->node.bit < 0) {
+ /* We're at the top of a dup tree. Either we got a
+ * matching value and we return the leftmost node, or
+ * we don't and we skip the whole subtree to return the
+ * next node after the subtree. Note that since we're
+ * at the top of the dup tree, we can simply return the
+ * next node without first trying to escape from the
+ * tree.
+ */
+ if (node->key >= x) {
+ troot = node->node.branches.b[EB_LEFT];
+ while (eb_gettag(troot) != EB_LEAF)
+ troot = (eb_untag(troot, EB_NODE))->b[EB_LEFT];
+ return container_of(eb_untag(troot, EB_LEAF),
+ struct eb32_node, node.branches);
+ }
+ /* return next */
+ troot = node->node.node_p;
+ break;
+ }
+
+ if (((x ^ node->key) >> node->node.bit) >= EB_NODE_BRANCHES) {
+ /* No more common bits at all. Either this node is too
+ * large and we need to get its lowest value, or it is too
+ * small, and we need to get the next value.
+ */
+ if ((node->key >> node->node.bit) > (x >> node->node.bit)) {
+ troot = node->node.branches.b[EB_LEFT];
+ return eb32_entry(eb_walk_down(troot, EB_LEFT), struct eb32_node, node);
+ }
+
+ /* Further values will be too low here, so return the next
+ * unique node (if it exists).
+ */
+ troot = node->node.node_p;
+ break;
+ }
+ troot = node->node.branches.b[(x >> node->node.bit) & EB_NODE_BRANCH_MASK];
+ }
+
+ /* If we get here, it means we want to report next node after the
+ * current one which is not below. <troot> is already initialised
+ * to the parent's branches.
+ */
+ while (eb_gettag(troot) != EB_LEFT)
+ /* Walking up from right branch, so we cannot be below root */
+ troot = (eb_root_to_node(eb_untag(troot, EB_RGHT)))->node_p;
+
+ /* Note that <troot> cannot be NULL at this stage */
+ troot = (eb_untag(troot, EB_LEFT))->b[EB_RGHT];
+ if (eb_clrtag(troot) == NULL)
+ return NULL;
+
+ node = eb32_entry(eb_walk_down(troot, EB_LEFT), struct eb32_node, node);
+ return node;
+}
diff --git a/src/eb64tree.c b/src/eb64tree.c
new file mode 100644
index 0000000..b908d4d
--- /dev/null
+++ b/src/eb64tree.c
@@ -0,0 +1,218 @@
+/*
+ * Elastic Binary Trees - exported functions for operations on 64bit nodes.
+ * Version 6.0.6
+ * (C) 2002-2011 - Willy Tarreau <w@1wt.eu>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/* Consult eb64tree.h for more details about those functions */
+
+#include <import/eb64tree.h>
+
+struct eb64_node *eb64_insert(struct eb_root *root, struct eb64_node *new)
+{
+ return __eb64_insert(root, new);
+}
+
+struct eb64_node *eb64i_insert(struct eb_root *root, struct eb64_node *new)
+{
+ return __eb64i_insert(root, new);
+}
+
+struct eb64_node *eb64_lookup(struct eb_root *root, u64 x)
+{
+ return __eb64_lookup(root, x);
+}
+
+struct eb64_node *eb64i_lookup(struct eb_root *root, s64 x)
+{
+ return __eb64i_lookup(root, x);
+}
+
+/*
+ * Find the last occurrence of the highest key in the tree <root>, which is
+ * equal to or less than <x>. NULL is returned is no key matches.
+ */
+struct eb64_node *eb64_lookup_le(struct eb_root *root, u64 x)
+{
+ struct eb64_node *node;
+ eb_troot_t *troot;
+
+ troot = root->b[EB_LEFT];
+ if (unlikely(troot == NULL))
+ return NULL;
+
+ while (1) {
+ if ((eb_gettag(troot) == EB_LEAF)) {
+ /* We reached a leaf, which means that the whole upper
+ * parts were common. We will return either the current
+ * node or its next one if the former is too small.
+ */
+ node = container_of(eb_untag(troot, EB_LEAF),
+ struct eb64_node, node.branches);
+ if (node->key <= x)
+ return node;
+ /* return prev */
+ troot = node->node.leaf_p;
+ break;
+ }
+ node = container_of(eb_untag(troot, EB_NODE),
+ struct eb64_node, node.branches);
+
+ if (node->node.bit < 0) {
+ /* We're at the top of a dup tree. Either we got a
+ * matching value and we return the rightmost node, or
+ * we don't and we skip the whole subtree to return the
+ * prev node before the subtree. Note that since we're
+ * at the top of the dup tree, we can simply return the
+ * prev node without first trying to escape from the
+ * tree.
+ */
+ if (node->key <= x) {
+ troot = node->node.branches.b[EB_RGHT];
+ while (eb_gettag(troot) != EB_LEAF)
+ troot = (eb_untag(troot, EB_NODE))->b[EB_RGHT];
+ return container_of(eb_untag(troot, EB_LEAF),
+ struct eb64_node, node.branches);
+ }
+ /* return prev */
+ troot = node->node.node_p;
+ break;
+ }
+
+ if (((x ^ node->key) >> node->node.bit) >= EB_NODE_BRANCHES) {
+ /* No more common bits at all. Either this node is too
+ * small and we need to get its highest value, or it is
+ * too large, and we need to get the prev value.
+ */
+ if ((node->key >> node->node.bit) < (x >> node->node.bit)) {
+ troot = node->node.branches.b[EB_RGHT];
+ return eb64_entry(eb_walk_down(troot, EB_RGHT), struct eb64_node, node);
+ }
+
+ /* Further values will be too high here, so return the prev
+ * unique node (if it exists).
+ */
+ troot = node->node.node_p;
+ break;
+ }
+ troot = node->node.branches.b[(x >> node->node.bit) & EB_NODE_BRANCH_MASK];
+ }
+
+ /* If we get here, it means we want to report previous node before the
+ * current one which is not above. <troot> is already initialised to
+ * the parent's branches.
+ */
+ while (eb_gettag(troot) == EB_LEFT) {
+ /* Walking up from left branch. We must ensure that we never
+ * walk beyond root.
+ */
+ if (unlikely(eb_clrtag((eb_untag(troot, EB_LEFT))->b[EB_RGHT]) == NULL))
+ return NULL;
+ troot = (eb_root_to_node(eb_untag(troot, EB_LEFT)))->node_p;
+ }
+ /* Note that <troot> cannot be NULL at this stage */
+ troot = (eb_untag(troot, EB_RGHT))->b[EB_LEFT];
+ node = eb64_entry(eb_walk_down(troot, EB_RGHT), struct eb64_node, node);
+ return node;
+}
+
+/*
+ * Find the first occurrence of the lowest key in the tree <root>, which is
+ * equal to or greater than <x>. NULL is returned is no key matches.
+ */
+struct eb64_node *eb64_lookup_ge(struct eb_root *root, u64 x)
+{
+ struct eb64_node *node;
+ eb_troot_t *troot;
+
+ troot = root->b[EB_LEFT];
+ if (unlikely(troot == NULL))
+ return NULL;
+
+ while (1) {
+ if ((eb_gettag(troot) == EB_LEAF)) {
+ /* We reached a leaf, which means that the whole upper
+ * parts were common. We will return either the current
+ * node or its next one if the former is too small.
+ */
+ node = container_of(eb_untag(troot, EB_LEAF),
+ struct eb64_node, node.branches);
+ if (node->key >= x)
+ return node;
+ /* return next */
+ troot = node->node.leaf_p;
+ break;
+ }
+ node = container_of(eb_untag(troot, EB_NODE),
+ struct eb64_node, node.branches);
+
+ if (node->node.bit < 0) {
+ /* We're at the top of a dup tree. Either we got a
+ * matching value and we return the leftmost node, or
+ * we don't and we skip the whole subtree to return the
+ * next node after the subtree. Note that since we're
+ * at the top of the dup tree, we can simply return the
+ * next node without first trying to escape from the
+ * tree.
+ */
+ if (node->key >= x) {
+ troot = node->node.branches.b[EB_LEFT];
+ while (eb_gettag(troot) != EB_LEAF)
+ troot = (eb_untag(troot, EB_NODE))->b[EB_LEFT];
+ return container_of(eb_untag(troot, EB_LEAF),
+ struct eb64_node, node.branches);
+ }
+ /* return next */
+ troot = node->node.node_p;
+ break;
+ }
+
+ if (((x ^ node->key) >> node->node.bit) >= EB_NODE_BRANCHES) {
+ /* No more common bits at all. Either this node is too
+ * large and we need to get its lowest value, or it is too
+ * small, and we need to get the next value.
+ */
+ if ((node->key >> node->node.bit) > (x >> node->node.bit)) {
+ troot = node->node.branches.b[EB_LEFT];
+ return eb64_entry(eb_walk_down(troot, EB_LEFT), struct eb64_node, node);
+ }
+
+ /* Further values will be too low here, so return the next
+ * unique node (if it exists).
+ */
+ troot = node->node.node_p;
+ break;
+ }
+ troot = node->node.branches.b[(x >> node->node.bit) & EB_NODE_BRANCH_MASK];
+ }
+
+ /* If we get here, it means we want to report next node after the
+ * current one which is not below. <troot> is already initialised
+ * to the parent's branches.
+ */
+ while (eb_gettag(troot) != EB_LEFT)
+ /* Walking up from right branch, so we cannot be below root */
+ troot = (eb_root_to_node(eb_untag(troot, EB_RGHT)))->node_p;
+
+ /* Note that <troot> cannot be NULL at this stage */
+ troot = (eb_untag(troot, EB_LEFT))->b[EB_RGHT];
+ if (eb_clrtag(troot) == NULL)
+ return NULL;
+
+ node = eb64_entry(eb_walk_down(troot, EB_LEFT), struct eb64_node, node);
+ return node;
+}
diff --git a/src/ebimtree.c b/src/ebimtree.c
new file mode 100644
index 0000000..1ac444a
--- /dev/null
+++ b/src/ebimtree.c
@@ -0,0 +1,44 @@
+/*
+ * Elastic Binary Trees - exported functions for Indirect Multi-Byte data nodes.
+ * Version 6.0.6
+ * (C) 2002-2011 - Willy Tarreau <w@1wt.eu>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/* Consult ebimtree.h for more details about those functions */
+
+#include <import/ebimtree.h>
+#include <import/ebpttree.h>
+
+/* Find the first occurrence of a key of <len> bytes in the tree <root>.
+ * If none can be found, return NULL.
+ */
+struct ebpt_node *
+ebim_lookup(struct eb_root *root, const void *x, unsigned int len)
+{
+ return __ebim_lookup(root, x, len);
+}
+
+/* Insert ebpt_node <new> into subtree starting at node root <root>.
+ * Only new->key needs be set with the key. The ebpt_node is returned.
+ * If root->b[EB_RGHT]==1, the tree may only contain unique keys. The
+ * len is specified in bytes.
+ */
+struct ebpt_node *
+ebim_insert(struct eb_root *root, struct ebpt_node *new, unsigned int len)
+{
+ return __ebim_insert(root, new, len);
+}
diff --git a/src/ebistree.c b/src/ebistree.c
new file mode 100644
index 0000000..193950d
--- /dev/null
+++ b/src/ebistree.c
@@ -0,0 +1,42 @@
+/*
+ * Elastic Binary Trees - exported functions for Indirect String data nodes.
+ * Version 6.0.6
+ * (C) 2002-2011 - Willy Tarreau <w@1wt.eu>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/* Consult ebistree.h for more details about those functions */
+
+#include <import/ebistree.h>
+
+/* Find the first occurrence of a zero-terminated string <x> in the tree <root>.
+ * It's the caller's responsibility to use this function only on trees which
+ * only contain zero-terminated strings. If none can be found, return NULL.
+ */
+struct ebpt_node *ebis_lookup(struct eb_root *root, const char *x)
+{
+ return __ebis_lookup(root, x);
+}
+
+/* Insert ebpt_node <new> into subtree starting at node root <root>. Only
+ * new->key needs be set with the zero-terminated string key. The ebpt_node is
+ * returned. If root->b[EB_RGHT]==1, the tree may only contain unique keys. The
+ * caller is responsible for properly terminating the key with a zero.
+ */
+struct ebpt_node *ebis_insert(struct eb_root *root, struct ebpt_node *new)
+{
+ return __ebis_insert(root, new);
+}
diff --git a/src/ebmbtree.c b/src/ebmbtree.c
new file mode 100644
index 0000000..a3de9a1
--- /dev/null
+++ b/src/ebmbtree.c
@@ -0,0 +1,77 @@
+/*
+ * Elastic Binary Trees - exported functions for Multi-Byte data nodes.
+ * Version 6.0.6
+ * (C) 2002-2011 - Willy Tarreau <w@1wt.eu>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/* Consult ebmbtree.h for more details about those functions */
+
+#include <import/ebmbtree.h>
+
+/* Find the first occurrence of a key of <len> bytes in the tree <root>.
+ * If none can be found, return NULL.
+ */
+struct ebmb_node *
+ebmb_lookup(struct eb_root *root, const void *x, unsigned int len)
+{
+ return __ebmb_lookup(root, x, len);
+}
+
+/* Insert ebmb_node <new> into subtree starting at node root <root>.
+ * Only new->key needs be set with the key. The ebmb_node is returned.
+ * If root->b[EB_RGHT]==1, the tree may only contain unique keys. The
+ * len is specified in bytes.
+ */
+struct ebmb_node *
+ebmb_insert(struct eb_root *root, struct ebmb_node *new, unsigned int len)
+{
+ return __ebmb_insert(root, new, len);
+}
+
+/* Find the first occurrence of the longest prefix matching a key <x> in the
+ * tree <root>. It's the caller's responsibility to ensure that key <x> is at
+ * least as long as the keys in the tree. If none can be found, return NULL.
+ */
+struct ebmb_node *
+ebmb_lookup_longest(struct eb_root *root, const void *x)
+{
+ return __ebmb_lookup_longest(root, x);
+}
+
+/* Find the first occurrence of a prefix matching a key <x> of <pfx> BITS in the
+ * tree <root>. If none can be found, return NULL.
+ */
+struct ebmb_node *
+ebmb_lookup_prefix(struct eb_root *root, const void *x, unsigned int pfx)
+{
+ return __ebmb_lookup_prefix(root, x, pfx);
+}
+
+/* Insert ebmb_node <new> into a prefix subtree starting at node root <root>.
+ * Only new->key and new->pfx need be set with the key and its prefix length.
+ * Note that bits between <pfx> and <len> are theoretically ignored and should be
+ * zero, as it is not certain yet that they will always be ignored everywhere
+ * (eg in bit compare functions).
+ * The ebmb_node is returned.
+ * If root->b[EB_RGHT]==1, the tree may only contain unique keys. The
+ * len is specified in bytes.
+ */
+struct ebmb_node *
+ebmb_insert_prefix(struct eb_root *root, struct ebmb_node *new, unsigned int len)
+{
+ return __ebmb_insert_prefix(root, new, len);
+}
diff --git a/src/ebpttree.c b/src/ebpttree.c
new file mode 100644
index 0000000..558d334
--- /dev/null
+++ b/src/ebpttree.c
@@ -0,0 +1,208 @@
+/*
+ * Elastic Binary Trees - exported functions for operations on pointer nodes.
+ * Version 6.0.6
+ * (C) 2002-2011 - Willy Tarreau <w@1wt.eu>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/* Consult ebpttree.h for more details about those functions */
+
+#include <import/ebpttree.h>
+
+struct ebpt_node *ebpt_insert(struct eb_root *root, struct ebpt_node *new)
+{
+ return __ebpt_insert(root, new);
+}
+
+struct ebpt_node *ebpt_lookup(struct eb_root *root, void *x)
+{
+ return __ebpt_lookup(root, x);
+}
+
+/*
+ * Find the last occurrence of the highest key in the tree <root>, which is
+ * equal to or less than <x>. NULL is returned is no key matches.
+ */
+struct ebpt_node *ebpt_lookup_le(struct eb_root *root, void *x)
+{
+ struct ebpt_node *node;
+ eb_troot_t *troot;
+
+ troot = root->b[EB_LEFT];
+ if (unlikely(troot == NULL))
+ return NULL;
+
+ while (1) {
+ if ((eb_gettag(troot) == EB_LEAF)) {
+ /* We reached a leaf, which means that the whole upper
+ * parts were common. We will return either the current
+ * node or its next one if the former is too small.
+ */
+ node = container_of(eb_untag(troot, EB_LEAF),
+ struct ebpt_node, node.branches);
+ if (node->key <= x)
+ return node;
+ /* return prev */
+ troot = node->node.leaf_p;
+ break;
+ }
+ node = container_of(eb_untag(troot, EB_NODE),
+ struct ebpt_node, node.branches);
+
+ if (node->node.bit < 0) {
+ /* We're at the top of a dup tree. Either we got a
+ * matching value and we return the rightmost node, or
+ * we don't and we skip the whole subtree to return the
+ * prev node before the subtree. Note that since we're
+ * at the top of the dup tree, we can simply return the
+ * prev node without first trying to escape from the
+ * tree.
+ */
+ if (node->key <= x) {
+ troot = node->node.branches.b[EB_RGHT];
+ while (eb_gettag(troot) != EB_LEAF)
+ troot = (eb_untag(troot, EB_NODE))->b[EB_RGHT];
+ return container_of(eb_untag(troot, EB_LEAF),
+ struct ebpt_node, node.branches);
+ }
+ /* return prev */
+ troot = node->node.node_p;
+ break;
+ }
+
+ if ((((ptr_t)x ^ (ptr_t)node->key) >> node->node.bit) >= EB_NODE_BRANCHES) {
+ /* No more common bits at all. Either this node is too
+ * small and we need to get its highest value, or it is
+ * too large, and we need to get the prev value.
+ */
+ if (((ptr_t)node->key >> node->node.bit) < ((ptr_t)x >> node->node.bit)) {
+ troot = node->node.branches.b[EB_RGHT];
+ return ebpt_entry(eb_walk_down(troot, EB_RGHT), struct ebpt_node, node);
+ }
+
+ /* Further values will be too high here, so return the prev
+ * unique node (if it exists).
+ */
+ troot = node->node.node_p;
+ break;
+ }
+ troot = node->node.branches.b[((ptr_t)x >> node->node.bit) & EB_NODE_BRANCH_MASK];
+ }
+
+ /* If we get here, it means we want to report previous node before the
+ * current one which is not above. <troot> is already initialised to
+ * the parent's branches.
+ */
+ while (eb_gettag(troot) == EB_LEFT) {
+ /* Walking up from left branch. We must ensure that we never
+ * walk beyond root.
+ */
+ if (unlikely(eb_clrtag((eb_untag(troot, EB_LEFT))->b[EB_RGHT]) == NULL))
+ return NULL;
+ troot = (eb_root_to_node(eb_untag(troot, EB_LEFT)))->node_p;
+ }
+ /* Note that <troot> cannot be NULL at this stage */
+ troot = (eb_untag(troot, EB_RGHT))->b[EB_LEFT];
+ node = ebpt_entry(eb_walk_down(troot, EB_RGHT), struct ebpt_node, node);
+ return node;
+}
+
+/*
+ * Find the first occurrence of the lowest key in the tree <root>, which is
+ * equal to or greater than <x>. NULL is returned is no key matches.
+ */
+struct ebpt_node *ebpt_lookup_ge(struct eb_root *root, void *x)
+{
+ struct ebpt_node *node;
+ eb_troot_t *troot;
+
+ troot = root->b[EB_LEFT];
+ if (unlikely(troot == NULL))
+ return NULL;
+
+ while (1) {
+ if ((eb_gettag(troot) == EB_LEAF)) {
+ /* We reached a leaf, which means that the whole upper
+ * parts were common. We will return either the current
+ * node or its next one if the former is too small.
+ */
+ node = container_of(eb_untag(troot, EB_LEAF),
+ struct ebpt_node, node.branches);
+ if (node->key >= x)
+ return node;
+ /* return next */
+ troot = node->node.leaf_p;
+ break;
+ }
+ node = container_of(eb_untag(troot, EB_NODE),
+ struct ebpt_node, node.branches);
+
+ if (node->node.bit < 0) {
+ /* We're at the top of a dup tree. Either we got a
+ * matching value and we return the leftmost node, or
+ * we don't and we skip the whole subtree to return the
+ * next node after the subtree. Note that since we're
+ * at the top of the dup tree, we can simply return the
+ * next node without first trying to escape from the
+ * tree.
+ */
+ if (node->key >= x) {
+ troot = node->node.branches.b[EB_LEFT];
+ while (eb_gettag(troot) != EB_LEAF)
+ troot = (eb_untag(troot, EB_NODE))->b[EB_LEFT];
+ return container_of(eb_untag(troot, EB_LEAF),
+ struct ebpt_node, node.branches);
+ }
+ /* return next */
+ troot = node->node.node_p;
+ break;
+ }
+
+ if ((((ptr_t)x ^ (ptr_t)node->key) >> node->node.bit) >= EB_NODE_BRANCHES) {
+ /* No more common bits at all. Either this node is too
+ * large and we need to get its lowest value, or it is too
+ * small, and we need to get the next value.
+ */
+ if (((ptr_t)node->key >> node->node.bit) > ((ptr_t)x >> node->node.bit)) {
+ troot = node->node.branches.b[EB_LEFT];
+ return ebpt_entry(eb_walk_down(troot, EB_LEFT), struct ebpt_node, node);
+ }
+
+ /* Further values will be too low here, so return the next
+ * unique node (if it exists).
+ */
+ troot = node->node.node_p;
+ break;
+ }
+ troot = node->node.branches.b[((ptr_t)x >> node->node.bit) & EB_NODE_BRANCH_MASK];
+ }
+
+ /* If we get here, it means we want to report next node after the
+ * current one which is not below. <troot> is already initialised
+ * to the parent's branches.
+ */
+ while (eb_gettag(troot) != EB_LEFT)
+ /* Walking up from right branch, so we cannot be below root */
+ troot = (eb_root_to_node(eb_untag(troot, EB_RGHT)))->node_p;
+
+ /* Note that <troot> cannot be NULL at this stage */
+ troot = (eb_untag(troot, EB_LEFT))->b[EB_RGHT];
+ if (eb_clrtag(troot) == NULL)
+ return NULL;
+
+ node = ebpt_entry(eb_walk_down(troot, EB_LEFT), struct ebpt_node, node);
+ return node;
+}
diff --git a/src/ebsttree.c b/src/ebsttree.c
new file mode 100644
index 0000000..a4fbe33
--- /dev/null
+++ b/src/ebsttree.c
@@ -0,0 +1,42 @@
+/*
+ * Elastic Binary Trees - exported functions for String data nodes.
+ * Version 6.0.6
+ * (C) 2002-2011 - Willy Tarreau <w@1wt.eu>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/* Consult ebsttree.h for more details about those functions */
+
+#include <import/ebsttree.h>
+
+/* Find the first occurrence of a zero-terminated string <x> in the tree <root>.
+ * It's the caller's responsibility to use this function only on trees which
+ * only contain zero-terminated strings. If none can be found, return NULL.
+ */
+struct ebmb_node *ebst_lookup(struct eb_root *root, const char *x)
+{
+ return __ebst_lookup(root, x);
+}
+
+/* Insert ebmb_node <new> into subtree starting at node root <root>. Only
+ * new->key needs be set with the zero-terminated string key. The ebmb_node is
+ * returned. If root->b[EB_RGHT]==1, the tree may only contain unique keys. The
+ * caller is responsible for properly terminating the key with a zero.
+ */
+struct ebmb_node *ebst_insert(struct eb_root *root, struct ebmb_node *new)
+{
+ return __ebst_insert(root, new);
+}
diff --git a/src/ebtree.c b/src/ebtree.c
new file mode 100644
index 0000000..db27875
--- /dev/null
+++ b/src/ebtree.c
@@ -0,0 +1,50 @@
+/*
+ * Elastic Binary Trees - exported generic functions
+ * Version 6.0.6
+ * (C) 2002-2011 - Willy Tarreau <w@1wt.eu>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <import/ebtree.h>
+
+void eb_delete(struct eb_node *node)
+{
+ __eb_delete(node);
+}
+
+/* used by insertion primitives */
+struct eb_node *eb_insert_dup(struct eb_node *sub, struct eb_node *new)
+{
+ return __eb_insert_dup(sub, new);
+}
+
+/* compares memory blocks m1 and m2 for up to <len> bytes. Immediately stops at
+ * the first non-matching byte. It returns 0 on full match, non-zero otherwise.
+ * One byte will always be checked so this must not be called with len==0. It
+ * takes 2+5cy/B on x86_64 and is ~29 bytes long.
+ */
+int eb_memcmp(const void *m1, const void *m2, size_t len)
+{
+ const char *p1 = (const char *)m1 + len;
+ const char *p2 = (const char *)m2 + len;
+ ssize_t ofs = -len;
+ char diff;
+
+ do {
+ diff = p1[ofs] - p2[ofs];
+ } while (!diff && ++ofs);
+ return diff;
+}
diff --git a/src/errors.c b/src/errors.c
new file mode 100644
index 0000000..7a2d14a
--- /dev/null
+++ b/src/errors.c
@@ -0,0 +1,567 @@
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <stdarg.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <syslog.h>
+
+#include <haproxy/api.h>
+#include <haproxy/applet-t.h>
+#include <haproxy/buf.h>
+#include <haproxy/cli.h>
+#include <haproxy/errors.h>
+#include <haproxy/global.h>
+#include <haproxy/obj_type.h>
+#include <haproxy/ring.h>
+#include <haproxy/tools.h>
+#include <haproxy/version.h>
+
+/* A global buffer used to store all startup alerts/warnings. It will then be
+ * retrieve on the CLI. */
+struct ring *startup_logs = NULL;
+uint tot_warnings = 0;
+#ifdef USE_SHM_OPEN
+static struct ring *shm_startup_logs = NULL;
+#endif
+
+/* A thread local buffer used to store all alerts/warnings. It can be used to
+ * retrieve them for CLI commands after startup.
+ */
+#define USER_MESSAGES_BUFSIZE 1024
+static THREAD_LOCAL struct buffer usermsgs_buf = BUF_NULL;
+
+/* A thread local context used for stderr output via ha_alert/warning/notice/diag.
+ */
+#define USERMSGS_CTX_BUFSIZE PATH_MAX
+static THREAD_LOCAL struct usermsgs_ctx usermsgs_ctx = { .str = BUF_NULL, };
+
+#ifdef USE_SHM_OPEN
+
+/* initialise an SHM for the startup logs and return its fd */
+static int startup_logs_new_shm()
+{
+ char *path = NULL;
+ int fd = -1;
+ int flags;
+
+ /* create a unique path per PID so we don't collide with another
+ process */
+ memprintf(&path, "/haproxy_startup_logs_%d", getpid());
+ fd = shm_open(path, O_CREAT | O_RDWR, S_IRUSR | S_IWUSR);
+ if (fd == -1)
+ goto error;
+ shm_unlink(path);
+ ha_free(&path);
+
+ if (ftruncate(fd, STARTUP_LOG_SIZE) == -1)
+ goto error;
+
+ flags = fcntl(fd, F_GETFD);
+ if (flags == -1)
+ goto error;
+ flags &= ~FD_CLOEXEC;
+ flags = fcntl(fd, F_SETFD, flags);
+ if (flags == -1)
+ goto error;
+
+ return fd;
+error:
+ if (fd != -1) {
+ close(fd);
+ fd = -1;
+ }
+ return fd;
+}
+
+/* mmap a startup-logs from a <fd>.
+ * if <new> is set to one, initialize the buffer.
+ * Returns the ring.
+ */
+static struct ring *startup_logs_from_fd(int fd, int new)
+{
+ char *area;
+ struct ring *r = NULL;
+
+ if (fd == -1)
+ goto error;
+
+ area = mmap(NULL, STARTUP_LOG_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
+ if (area == MAP_FAILED || area == NULL)
+ goto error;
+
+ if (new)
+ r = ring_make_from_area(area, STARTUP_LOG_SIZE);
+ else
+ r = ring_cast_from_area(area);
+
+ if (r == NULL)
+ goto error;
+
+ shm_startup_logs = r; /* save the ptr so we can unmap later */
+
+ return r;
+error:
+ return NULL;
+}
+
+/*
+ * Use a shm across reexec of the master.
+ *
+ * During the startup of the master, a shm_open must be done and the FD saved
+ * into the HAPROXY_STARTUPLOGS_FD environment variable.
+ *
+ * When forking workers, the child must use a copy of the shm, not the shm itself.
+ *
+ * Once in wait mode, the shm must be copied and closed.
+ *
+ */
+void startup_logs_init()
+{
+ struct ring *r = NULL;
+ char *str_fd, *endptr;
+ int fd = -1;
+
+ str_fd = getenv("HAPROXY_STARTUPLOGS_FD");
+ if (str_fd) {
+ fd = strtol(str_fd, &endptr, 10);
+ if (*endptr != '\0')
+ goto error;
+ unsetenv("HAPROXY_STARTUPLOGS_FD");
+ }
+
+ /* during startup, or just after a reload.
+ * Note: the WAIT_ONLY env variable must be
+ * check in case of an early call */
+ if (!(global.mode & MODE_MWORKER_WAIT) &&
+ getenv("HAPROXY_MWORKER_WAIT_ONLY") == NULL) {
+ if (fd != -1)
+ close(fd);
+
+ fd = startup_logs_new_shm();
+ if (fd == -1)
+ goto error;
+
+ r = startup_logs_from_fd(fd, 1);
+ if (!r)
+ goto error;
+
+ str_fd = NULL;
+ memprintf(&str_fd, "%d", fd);
+ setenv("HAPROXY_STARTUPLOGS_FD", str_fd, 1);
+ ha_free(&str_fd);
+
+ } else {
+ /* in wait mode, copy the shm to an allocated buffer */
+ struct ring *prev = NULL;
+
+ if (fd == -1)
+ goto error;
+
+ prev = startup_logs_from_fd(fd, 0);
+ if (!prev)
+ goto error;
+
+ r = startup_logs_dup(prev);
+ if (!r)
+ goto error;
+ startup_logs_free(prev);
+ close(fd);
+ }
+
+ startup_logs = r;
+
+ return;
+error:
+ if (fd != -1)
+ close(fd);
+ /* couldn't get a mmap to work */
+ startup_logs = ring_new(STARTUP_LOG_SIZE);
+
+}
+
+#else /* ! USE_SHM_OPEN */
+
+void startup_logs_init()
+{
+ startup_logs = ring_new(STARTUP_LOG_SIZE);
+}
+
+#endif
+
+/* free the startup logs, unmap if it was an shm */
+void startup_logs_free(struct ring *r)
+{
+#ifdef USE_SHM_OPEN
+ if (r == shm_startup_logs)
+ munmap(r, STARTUP_LOG_SIZE);
+ else
+#endif /* ! USE_SHM_OPEN */
+ ring_free(r);
+}
+
+/* duplicate a startup logs which was previously allocated in a shm */
+struct ring *startup_logs_dup(struct ring *src)
+{
+ struct ring *dst = NULL;
+
+ /* must use the size of the previous buffer */
+ dst = ring_new(b_size(&src->buf));
+ if (!dst)
+ goto error;
+
+ b_reset(&dst->buf);
+ b_ncat(&dst->buf, &src->buf, b_data(&src->buf));
+error:
+ return dst;
+}
+
+/* Put msg in usermsgs_buf.
+ *
+ * The message should not be terminated by a newline because this function
+ * manually insert it.
+ *
+ * If there is not enough room in the buffer, the message is silently discarded.
+ * Do not forget to frequently clear the buffer.
+ */
+static void usermsgs_put(const struct ist *msg)
+{
+ /* Allocate the buffer if not already done. */
+ if (unlikely(b_is_null(&usermsgs_buf))) {
+ usermsgs_buf.area = malloc(USER_MESSAGES_BUFSIZE * sizeof(char));
+ if (usermsgs_buf.area)
+ usermsgs_buf.size = USER_MESSAGES_BUFSIZE;
+ }
+
+ if (likely(!b_is_null(&usermsgs_buf))) {
+ if (b_room(&usermsgs_buf) >= msg->len + 2) {
+ /* Insert the message + newline. */
+ b_putblk(&usermsgs_buf, msg->ptr, msg->len);
+ b_putchr(&usermsgs_buf, '\n');
+ /* Insert NUL outside of the buffer. */
+ *b_tail(&usermsgs_buf) = '\0';
+ }
+ }
+}
+
+/* Clear the user messages log buffer.
+ *
+ * <prefix> will set the local-thread context appended to every output
+ * following this call. It can be NULL if not necessary.
+ */
+void usermsgs_clr(const char *prefix)
+{
+ if (likely(!b_is_null(&usermsgs_buf))) {
+ b_reset(&usermsgs_buf);
+ usermsgs_buf.area[0] = '\0';
+ }
+
+ usermsgs_ctx.prefix = prefix;
+}
+
+/* Check if the user messages buffer is empty. */
+int usermsgs_empty(void)
+{
+ return !!(b_is_null(&usermsgs_buf) || !b_data(&usermsgs_buf));
+}
+
+/* Return the messages log buffer content. */
+const char *usermsgs_str(void)
+{
+ if (unlikely(b_is_null(&usermsgs_buf)))
+ return "";
+
+ return b_head(&usermsgs_buf);
+}
+
+/* Set thread-local context infos to prefix forthcoming stderr output during
+ * configuration parsing.
+ *
+ * <file> and <line> specify the location of the parsed configuration.
+ *
+ * <obj> can be of various types. If not NULL, the string prefix generated will
+ * depend on its type.
+ */
+void set_usermsgs_ctx(const char *file, int line, enum obj_type *obj)
+{
+ usermsgs_ctx.file = file;
+ usermsgs_ctx.line = line;
+ usermsgs_ctx.obj = obj;
+}
+
+/* Set thread-local context infos to prefix forthcoming stderr output. It will
+ * be set as a complement to possibly already defined file/line.
+ *
+ * <obj> can be of various types. If not NULL, the string prefix generated will
+ * depend on its type.
+ */
+void register_parsing_obj(enum obj_type *obj)
+{
+ usermsgs_ctx.obj = obj;
+}
+
+/* Reset thread-local context infos for stderr output. */
+void reset_usermsgs_ctx(void)
+{
+ usermsgs_ctx.file = NULL;
+ usermsgs_ctx.line = 0;
+ usermsgs_ctx.obj = NULL;
+}
+
+static void generate_usermsgs_ctx_str(void)
+{
+ struct usermsgs_ctx *ctx = &usermsgs_ctx;
+ void *area;
+ int ret;
+
+ if (unlikely(b_is_null(&ctx->str))) {
+ area = calloc(USERMSGS_CTX_BUFSIZE, sizeof(*area));
+ if (area)
+ ctx->str = b_make(area, USERMSGS_CTX_BUFSIZE, 0, 0);
+ }
+
+ if (likely(!b_is_null(&ctx->str))) {
+ b_reset(&ctx->str);
+
+ if (ctx->prefix) {
+ ret = snprintf(b_tail(&ctx->str), b_room(&ctx->str),
+ "%s : ", ctx->prefix);
+ b_add(&ctx->str, MIN(ret, b_room(&ctx->str)));
+ }
+
+ if (ctx->file) {
+ ret = snprintf(b_tail(&ctx->str), b_room(&ctx->str),
+ "[%s:%d] : ", ctx->file, ctx->line);
+ b_add(&ctx->str, MIN(ret, b_room(&ctx->str)));
+ }
+
+ switch (obj_type(ctx->obj)) {
+ case OBJ_TYPE_SERVER:
+ ret = snprintf(b_tail(&ctx->str), b_room(&ctx->str),
+ "'server %s/%s' : ",
+ __objt_server(ctx->obj)->proxy->id,
+ __objt_server(ctx->obj)->id);
+ b_add(&ctx->str, MIN(ret, b_room(&ctx->str)));
+ break;
+
+ case OBJ_TYPE_NONE:
+ default:
+ break;
+ }
+
+ if (!b_data(&ctx->str))
+ snprintf(b_tail(&ctx->str), b_room(&ctx->str), "%s", "");
+ }
+}
+
+/* Generic function to display messages prefixed by a label */
+static void print_message(int use_usermsgs_ctx, const char *label, const char *fmt, va_list argp)
+{
+ struct ist msg_ist = IST_NULL;
+ char *head, *parsing_str, *msg;
+ char prefix[11]; // '[' + 8 chars + ']' + 0.
+
+ *prefix = '[';
+ strncpy(prefix + 1, label, sizeof(prefix) - 2);
+ msg = prefix + strlen(prefix);
+ *msg++ = ']';
+ while (msg < prefix + sizeof(prefix) - 1)
+ *msg++ = ' ';
+ *msg = 0;
+
+ head = parsing_str = msg = NULL;
+ memprintf(&head, "%s (%u) : ", prefix, (uint)getpid());
+ memvprintf(&msg, fmt, argp);
+
+ /* trim the trailing '\n' */
+ msg_ist = ist(msg);
+ if (msg_ist.len > 0 && msg_ist.ptr[msg_ist.len - 1] == '\n')
+ msg_ist.len--;
+
+ if (use_usermsgs_ctx) {
+ generate_usermsgs_ctx_str();
+ parsing_str = b_head(&usermsgs_ctx.str);
+ reset_usermsgs_ctx();
+ }
+ else {
+ parsing_str = "";
+ }
+
+ if (global.mode & MODE_STARTING) {
+ if (unlikely(!startup_logs))
+ startup_logs_init();
+
+ if (likely(startup_logs)) {
+ struct ist m[3];
+
+ m[0] = ist(head);
+ m[1] = ist(parsing_str);
+ m[2] = msg_ist;
+
+ ring_write(startup_logs, ~0, 0, 0, m, 3);
+ }
+ }
+ else {
+ usermsgs_put(&msg_ist);
+ }
+ if (!(global.mode & MODE_QUIET) || (global.mode & MODE_VERBOSE)) {
+ fprintf(stderr, "%s%s%s", head, parsing_str, msg);
+ fflush(stderr);
+ }
+
+ free(head);
+ free(msg);
+}
+
+static void print_message_args(int use_usermsgs_ctx, const char *label, const char *fmt, ...)
+{
+ va_list argp;
+ va_start(argp, fmt);
+ print_message(use_usermsgs_ctx, label, fmt, argp);
+ va_end(argp);
+}
+
+/*
+ * Display a notice with the happroxy version and executable path when the
+ * first message is emitted in starting mode.
+ */
+static void warn_exec_path()
+{
+ if (!(warned & WARN_EXEC_PATH) && (global.mode & MODE_STARTING)) {
+ const char *path = get_exec_path();
+
+ warned |= WARN_EXEC_PATH;
+ print_message_args(0, "NOTICE", "haproxy version is %s\n", haproxy_version);
+ if (path)
+ print_message_args(0, "NOTICE", "path to executable is %s\n", path);
+ }
+}
+
+/*
+ * Displays the message on stderr with the pid.
+ */
+void ha_alert(const char *fmt, ...)
+{
+ va_list argp;
+
+ warn_exec_path();
+ va_start(argp, fmt);
+ print_message(1, "ALERT", fmt, argp);
+ va_end(argp);
+}
+
+/*
+ * Displays the message on stderr with the pid.
+ */
+void ha_warning(const char *fmt, ...)
+{
+ va_list argp;
+
+ warned |= WARN_ANY;
+ HA_ATOMIC_INC(&tot_warnings);
+
+ warn_exec_path();
+ va_start(argp, fmt);
+ print_message(1, "WARNING", fmt, argp);
+ va_end(argp);
+}
+
+/*
+ * Variant of _ha_diag_warning with va_list.
+ * Use it only if MODE_DIAG has been previously checked.
+ */
+void _ha_vdiag_warning(const char *fmt, va_list argp)
+{
+ warned |= WARN_ANY;
+ HA_ATOMIC_INC(&tot_warnings);
+
+ warn_exec_path();
+ print_message(1, "DIAG", fmt, argp);
+}
+
+/*
+ * Output a diagnostic warning.
+ * Use it only if MODE_DIAG has been previously checked.
+ */
+void _ha_diag_warning(const char *fmt, ...)
+{
+ va_list argp;
+
+ va_start(argp, fmt);
+ _ha_vdiag_warning(fmt, argp);
+ va_end(argp);
+}
+
+/*
+ * Output a diagnostic warning. Do nothing of MODE_DIAG is not on.
+ */
+void ha_diag_warning(const char *fmt, ...)
+{
+ va_list argp;
+
+ if (global.mode & MODE_DIAG) {
+ va_start(argp, fmt);
+ _ha_vdiag_warning(fmt, argp);
+ va_end(argp);
+ }
+}
+
+/*
+ * Displays the message on stderr with the pid.
+ */
+void ha_notice(const char *fmt, ...)
+{
+ va_list argp;
+
+ va_start(argp, fmt);
+ print_message(1, "NOTICE", fmt, argp);
+ va_end(argp);
+}
+
+/*
+ * Displays the message on <out> only if quiet mode is not set.
+ */
+void qfprintf(FILE *out, const char *fmt, ...)
+{
+ va_list argp;
+
+ if (!(global.mode & MODE_QUIET) || (global.mode & MODE_VERBOSE)) {
+ va_start(argp, fmt);
+ vfprintf(out, fmt, argp);
+ fflush(out);
+ va_end(argp);
+ }
+}
+
+
+/* parse the "show startup-logs" command, returns 1 if a message is returned, otherwise zero */
+static int cli_parse_show_startup_logs(char **args, char *payload, struct appctx *appctx, void *private)
+{
+ if (!cli_has_level(appctx, ACCESS_LVL_OPER))
+ return 1;
+
+ if (!startup_logs)
+ return cli_msg(appctx, LOG_INFO, "\n"); // nothing to print
+
+ return ring_attach_cli(startup_logs, appctx, 0);
+}
+
+/* register cli keywords */
+static struct cli_kw_list cli_kws = {{ },{
+ { { "show", "startup-logs", NULL }, "show startup-logs : report logs emitted during HAProxy startup", cli_parse_show_startup_logs, NULL, NULL, NULL, ACCESS_MASTER },
+ {{},}
+}};
+
+INITCALL1(STG_REGISTER, cli_register_kw, &cli_kws);
+
+
+static void deinit_errors_buffers()
+{
+ ring_free(_HA_ATOMIC_XCHG(&startup_logs, NULL));
+ ha_free(&usermsgs_buf.area);
+ ha_free(&usermsgs_ctx.str.area);
+}
+
+/* errors might be used in threads and even before forking, thus 2 deinit */
+REGISTER_PER_THREAD_FREE(deinit_errors_buffers);
+REGISTER_POST_DEINIT(deinit_errors_buffers);
diff --git a/src/ev_epoll.c b/src/ev_epoll.c
new file mode 100644
index 0000000..c42cf2e
--- /dev/null
+++ b/src/ev_epoll.c
@@ -0,0 +1,413 @@
+/*
+ * FD polling functions for Linux epoll
+ *
+ * Copyright 2000-2014 Willy Tarreau <w@1wt.eu>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <unistd.h>
+#include <sys/epoll.h>
+#include <sys/time.h>
+#include <sys/types.h>
+
+#include <haproxy/activity.h>
+#include <haproxy/api.h>
+#include <haproxy/clock.h>
+#include <haproxy/fd.h>
+#include <haproxy/global.h>
+#include <haproxy/signal.h>
+#include <haproxy/ticks.h>
+#include <haproxy/task.h>
+#include <haproxy/tools.h>
+
+
+/* private data */
+static THREAD_LOCAL struct epoll_event *epoll_events = NULL;
+static int epoll_fd[MAX_THREADS] __read_mostly; // per-thread epoll_fd
+
+#ifndef EPOLLRDHUP
+/* EPOLLRDHUP was defined late in libc, and it appeared in kernel 2.6.17 */
+#define EPOLLRDHUP 0x2000
+#endif
+
+/*
+ * Immediately remove file descriptor from epoll set upon close.
+ * Since we forked, some fds share inodes with the other process, and epoll may
+ * send us events even though this process closed the fd (see man 7 epoll,
+ * "Questions and answers", Q 6).
+ */
+static void __fd_clo(int fd)
+{
+ if (unlikely(fdtab[fd].state & FD_CLONED)) {
+ unsigned long m = _HA_ATOMIC_LOAD(&polled_mask[fd].poll_recv) | _HA_ATOMIC_LOAD(&polled_mask[fd].poll_send);
+ int tgrp = fd_tgid(fd);
+ struct epoll_event ev;
+ int i;
+
+ if (!m)
+ return;
+
+ /* since FDs may only be shared per group and are only closed
+ * once entirely reset, it should never happen that we have to
+ * close an FD for another group, unless we're stopping from the
+ * wrong thread or during startup, which is what we're checking
+ * for. Regardless, it is not a problem to do so.
+ */
+ if (unlikely(!(global.mode & MODE_STARTING))) {
+ CHECK_IF(tgid != tgrp && !thread_isolated());
+ }
+
+ for (i = ha_tgroup_info[tgrp-1].base; i < ha_tgroup_info[tgrp-1].base + ha_tgroup_info[tgrp-1].count; i++)
+ if (m & ha_thread_info[i].ltid_bit)
+ epoll_ctl(epoll_fd[i], EPOLL_CTL_DEL, fd, &ev);
+ }
+}
+
+static void _update_fd(int fd)
+{
+ int en, opcode;
+ struct epoll_event ev = { };
+ ulong pr, ps;
+
+ en = fdtab[fd].state;
+ pr = _HA_ATOMIC_LOAD(&polled_mask[fd].poll_recv);
+ ps = _HA_ATOMIC_LOAD(&polled_mask[fd].poll_send);
+
+ /* Try to force EPOLLET on FDs that support it */
+ if (fdtab[fd].state & FD_ET_POSSIBLE) {
+ /* already done ? */
+ if (pr & ps & ti->ltid_bit)
+ return;
+
+ /* enable ET polling in both directions */
+ _HA_ATOMIC_OR(&polled_mask[fd].poll_recv, ti->ltid_bit);
+ _HA_ATOMIC_OR(&polled_mask[fd].poll_send, ti->ltid_bit);
+ opcode = EPOLL_CTL_ADD;
+ ev.events = EPOLLIN | EPOLLRDHUP | EPOLLOUT | EPOLLET;
+ goto done;
+ }
+
+ /* if we're already polling or are going to poll for this FD and it's
+ * neither active nor ready, force it to be active so that we don't
+ * needlessly unsubscribe then re-subscribe it.
+ */
+ if (!(en & (FD_EV_READY_R | FD_EV_SHUT_R | FD_EV_ERR_RW | FD_POLL_ERR)) &&
+ ((en & FD_EV_ACTIVE_W) || ((ps | pr) & ti->ltid_bit)))
+ en |= FD_EV_ACTIVE_R;
+
+ if ((ps | pr) & ti->ltid_bit) {
+ if (!(fdtab[fd].thread_mask & ti->ltid_bit) || !(en & FD_EV_ACTIVE_RW)) {
+ /* fd removed from poll list */
+ opcode = EPOLL_CTL_DEL;
+ if (pr & ti->ltid_bit)
+ _HA_ATOMIC_AND(&polled_mask[fd].poll_recv, ~ti->ltid_bit);
+ if (ps & ti->ltid_bit)
+ _HA_ATOMIC_AND(&polled_mask[fd].poll_send, ~ti->ltid_bit);
+ }
+ else {
+ if (((en & FD_EV_ACTIVE_R) != 0) == ((pr & ti->ltid_bit) != 0) &&
+ ((en & FD_EV_ACTIVE_W) != 0) == ((ps & ti->ltid_bit) != 0))
+ return;
+ if (en & FD_EV_ACTIVE_R) {
+ if (!(pr & ti->ltid_bit))
+ _HA_ATOMIC_OR(&polled_mask[fd].poll_recv, ti->ltid_bit);
+ } else {
+ if (pr & ti->ltid_bit)
+ _HA_ATOMIC_AND(&polled_mask[fd].poll_recv, ~ti->ltid_bit);
+ }
+ if (en & FD_EV_ACTIVE_W) {
+ if (!(ps & ti->ltid_bit))
+ _HA_ATOMIC_OR(&polled_mask[fd].poll_send, ti->ltid_bit);
+ } else {
+ if (ps & ti->ltid_bit)
+ _HA_ATOMIC_AND(&polled_mask[fd].poll_send, ~ti->ltid_bit);
+ }
+ /* fd status changed */
+ opcode = EPOLL_CTL_MOD;
+ }
+ }
+ else if ((fdtab[fd].thread_mask & ti->ltid_bit) && (en & FD_EV_ACTIVE_RW)) {
+ /* new fd in the poll list */
+ opcode = EPOLL_CTL_ADD;
+ if (en & FD_EV_ACTIVE_R)
+ _HA_ATOMIC_OR(&polled_mask[fd].poll_recv, ti->ltid_bit);
+ if (en & FD_EV_ACTIVE_W)
+ _HA_ATOMIC_OR(&polled_mask[fd].poll_send, ti->ltid_bit);
+ }
+ else {
+ return;
+ }
+
+ /* construct the epoll events based on new state */
+ if (en & FD_EV_ACTIVE_R)
+ ev.events |= EPOLLIN | EPOLLRDHUP;
+
+ if (en & FD_EV_ACTIVE_W)
+ ev.events |= EPOLLOUT;
+
+ done:
+ ev.data.fd = fd;
+ epoll_ctl(epoll_fd[tid], opcode, fd, &ev);
+}
+
+/*
+ * Linux epoll() poller
+ */
+static void _do_poll(struct poller *p, int exp, int wake)
+{
+ int status;
+ int fd;
+ int count;
+ int updt_idx;
+ int wait_time;
+ int old_fd;
+
+ /* first, scan the update list to find polling changes */
+ for (updt_idx = 0; updt_idx < fd_nbupdt; updt_idx++) {
+ fd = fd_updt[updt_idx];
+
+ if (!fd_grab_tgid(fd, tgid)) {
+ /* was reassigned */
+ activity[tid].poll_drop_fd++;
+ continue;
+ }
+
+ _HA_ATOMIC_AND(&fdtab[fd].update_mask, ~ti->ltid_bit);
+
+ if (fdtab[fd].owner)
+ _update_fd(fd);
+ else
+ activity[tid].poll_drop_fd++;
+
+ fd_drop_tgid(fd);
+ }
+ fd_nbupdt = 0;
+
+ /* Scan the shared update list */
+ for (old_fd = fd = update_list[tgid - 1].first; fd != -1; fd = fdtab[fd].update.next) {
+ if (fd == -2) {
+ fd = old_fd;
+ continue;
+ }
+ else if (fd <= -3)
+ fd = -fd -4;
+ if (fd == -1)
+ break;
+
+ if (!fd_grab_tgid(fd, tgid)) {
+ /* was reassigned */
+ activity[tid].poll_drop_fd++;
+ continue;
+ }
+
+ if (!(fdtab[fd].update_mask & ti->ltid_bit)) {
+ fd_drop_tgid(fd);
+ continue;
+ }
+
+ done_update_polling(fd);
+
+ if (fdtab[fd].owner)
+ _update_fd(fd);
+ else
+ activity[tid].poll_drop_fd++;
+
+ fd_drop_tgid(fd);
+ }
+
+ thread_idle_now();
+ thread_harmless_now();
+
+ /* Now let's wait for polled events. */
+ wait_time = wake ? 0 : compute_poll_timeout(exp);
+ clock_entering_poll();
+
+ do {
+ int timeout = (global.tune.options & GTUNE_BUSY_POLLING) ? 0 : wait_time;
+
+ status = epoll_wait(epoll_fd[tid], epoll_events, global.tune.maxpollevents, timeout);
+ clock_update_local_date(timeout, status);
+
+ if (status) {
+ activity[tid].poll_io++;
+ break;
+ }
+ if (timeout || !wait_time)
+ break;
+ if (tick_isset(exp) && tick_is_expired(exp, now_ms))
+ break;
+ } while (1);
+
+ clock_update_global_date();
+ fd_leaving_poll(wait_time, status);
+
+ /* process polled events */
+
+ for (count = 0; count < status; count++) {
+ unsigned int n, e;
+
+ e = epoll_events[count].events;
+ fd = epoll_events[count].data.fd;
+
+ if ((e & EPOLLRDHUP) && !(cur_poller.flags & HAP_POLL_F_RDHUP))
+ _HA_ATOMIC_OR(&cur_poller.flags, HAP_POLL_F_RDHUP);
+
+#ifdef DEBUG_FD
+ _HA_ATOMIC_INC(&fdtab[fd].event_count);
+#endif
+ n = ((e & EPOLLIN) ? FD_EV_READY_R : 0) |
+ ((e & EPOLLOUT) ? FD_EV_READY_W : 0) |
+ ((e & EPOLLRDHUP) ? FD_EV_SHUT_R : 0) |
+ ((e & EPOLLHUP) ? FD_EV_SHUT_RW : 0) |
+ ((e & EPOLLERR) ? FD_EV_ERR_RW : 0);
+
+ fd_update_events(fd, n);
+ }
+ /* the caller will take care of cached events */
+}
+
+static int init_epoll_per_thread()
+{
+ epoll_events = calloc(1, sizeof(struct epoll_event) * global.tune.maxpollevents);
+ if (epoll_events == NULL)
+ goto fail_alloc;
+
+ if (MAX_THREADS > 1 && tid) {
+ epoll_fd[tid] = epoll_create(global.maxsock + 1);
+ if (epoll_fd[tid] < 0)
+ goto fail_fd;
+ }
+
+ /* we may have to unregister some events initially registered on the
+ * original fd when it was alone, and/or to register events on the new
+ * fd for this thread. Let's just mark them as updated, the poller will
+ * do the rest.
+ */
+ fd_reregister_all(tgid, ti->ltid_bit);
+
+ return 1;
+ fail_fd:
+ free(epoll_events);
+ fail_alloc:
+ return 0;
+}
+
+static void deinit_epoll_per_thread()
+{
+ if (MAX_THREADS > 1 && tid)
+ close(epoll_fd[tid]);
+
+ ha_free(&epoll_events);
+}
+
+/*
+ * Initialization of the epoll() poller.
+ * Returns 0 in case of failure, non-zero in case of success. If it fails, it
+ * disables the poller by setting its pref to 0.
+ */
+static int _do_init(struct poller *p)
+{
+ p->private = NULL;
+
+ epoll_fd[tid] = epoll_create(global.maxsock + 1);
+ if (epoll_fd[tid] < 0)
+ goto fail_fd;
+
+ hap_register_per_thread_init(init_epoll_per_thread);
+ hap_register_per_thread_deinit(deinit_epoll_per_thread);
+
+ return 1;
+
+ fail_fd:
+ p->pref = 0;
+ return 0;
+}
+
+/*
+ * Termination of the epoll() poller.
+ * Memory is released and the poller is marked as unselectable.
+ */
+static void _do_term(struct poller *p)
+{
+ if (epoll_fd[tid] >= 0) {
+ close(epoll_fd[tid]);
+ epoll_fd[tid] = -1;
+ }
+
+ p->private = NULL;
+ p->pref = 0;
+}
+
+/*
+ * Check that the poller works.
+ * Returns 1 if OK, otherwise 0.
+ */
+static int _do_test(struct poller *p)
+{
+ int fd;
+
+ fd = epoll_create(global.maxsock + 1);
+ if (fd < 0)
+ return 0;
+ close(fd);
+ return 1;
+}
+
+/*
+ * Recreate the epoll file descriptor after a fork(). Returns 1 if OK,
+ * otherwise 0. It will ensure that all processes will not share their
+ * epoll_fd. Some side effects were encountered because of this, such
+ * as epoll_wait() returning an FD which was previously deleted.
+ */
+static int _do_fork(struct poller *p)
+{
+ if (epoll_fd[tid] >= 0)
+ close(epoll_fd[tid]);
+ epoll_fd[tid] = epoll_create(global.maxsock + 1);
+ if (epoll_fd[tid] < 0)
+ return 0;
+ return 1;
+}
+
+/*
+ * Registers the poller.
+ */
+static void _do_register(void)
+{
+ struct poller *p;
+ int i;
+
+ if (nbpollers >= MAX_POLLERS)
+ return;
+
+ for (i = 0; i < MAX_THREADS; i++)
+ epoll_fd[i] = -1;
+
+ p = &pollers[nbpollers++];
+
+ p->name = "epoll";
+ p->pref = 300;
+ p->flags = HAP_POLL_F_ERRHUP; // note: RDHUP might be dynamically added
+ p->private = NULL;
+
+ p->clo = __fd_clo;
+ p->test = _do_test;
+ p->init = _do_init;
+ p->term = _do_term;
+ p->poll = _do_poll;
+ p->fork = _do_fork;
+}
+
+INITCALL0(STG_REGISTER, _do_register);
+
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/src/ev_evports.c b/src/ev_evports.c
new file mode 100644
index 0000000..07676e6
--- /dev/null
+++ b/src/ev_evports.c
@@ -0,0 +1,441 @@
+/*
+ * FD polling functions for SunOS event ports.
+ *
+ * Copyright 2018 Joyent, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <unistd.h>
+#include <sys/time.h>
+#include <sys/types.h>
+
+#include <poll.h>
+#include <port.h>
+#include <errno.h>
+#include <syslog.h>
+
+#include <haproxy/activity.h>
+#include <haproxy/api.h>
+#include <haproxy/clock.h>
+#include <haproxy/fd.h>
+#include <haproxy/global.h>
+#include <haproxy/signal.h>
+#include <haproxy/task.h>
+#include <haproxy/ticks.h>
+
+/*
+ * Private data:
+ */
+static int evports_fd[MAX_THREADS]; // per-thread evports_fd
+static THREAD_LOCAL port_event_t *evports_evlist = NULL;
+static THREAD_LOCAL int evports_evlist_max = 0;
+
+/*
+ * Convert the "state" member of "fdtab" into an event ports event mask.
+ */
+static inline int evports_state_to_events(int state)
+{
+ int events = 0;
+
+ if (state & FD_EV_ACTIVE_W)
+ events |= POLLOUT;
+ if (state & FD_EV_ACTIVE_R)
+ events |= POLLIN;
+
+ return (events);
+}
+
+/*
+ * Associate or dissociate this file descriptor with the event port, using the
+ * specified event mask.
+ */
+static inline void evports_resync_fd(int fd, int events)
+{
+ if (events == 0)
+ port_dissociate(evports_fd[tid], PORT_SOURCE_FD, fd);
+ else
+ port_associate(evports_fd[tid], PORT_SOURCE_FD, fd, events, NULL);
+}
+
+static void _update_fd(int fd)
+{
+ int en;
+ int events;
+ ulong pr, ps;
+
+ en = fdtab[fd].state;
+ pr = _HA_ATOMIC_LOAD(&polled_mask[fd].poll_recv);
+ ps = _HA_ATOMIC_LOAD(&polled_mask[fd].poll_send);
+
+ if (!(fdtab[fd].thread_mask & ti->ltid_bit) || !(en & FD_EV_ACTIVE_RW)) {
+ if (!((pr | ps) & ti->ltid_bit)) {
+ /* fd was not watched, it's still not */
+ return;
+ }
+ /* fd totally removed from poll list */
+ events = 0;
+ if (pr & ti->ltid_bit)
+ _HA_ATOMIC_AND(&polled_mask[fd].poll_recv, ~ti->ltid_bit);
+ if (ps & ti->ltid_bit)
+ _HA_ATOMIC_AND(&polled_mask[fd].poll_send, ~ti->ltid_bit);
+ }
+ else {
+ /* OK fd has to be monitored, it was either added or changed */
+ events = evports_state_to_events(en);
+ if (en & FD_EV_ACTIVE_R) {
+ if (!(pr & ti->ltid_bit))
+ _HA_ATOMIC_OR(&polled_mask[fd].poll_recv, ti->ltid_bit);
+ } else {
+ if (pr & ti->ltid_bit)
+ _HA_ATOMIC_AND(&polled_mask[fd].poll_recv, ~ti->ltid_bit);
+ }
+ if (en & FD_EV_ACTIVE_W) {
+ if (!(ps & ti->ltid_bit))
+ _HA_ATOMIC_OR(&polled_mask[fd].poll_send, ti->ltid_bit);
+ } else {
+ if (ps & ti->ltid_bit)
+ _HA_ATOMIC_AND(&polled_mask[fd].poll_send, ~ti->ltid_bit);
+ }
+
+ }
+ evports_resync_fd(fd, events);
+}
+
+/*
+ * Event Ports poller. This routine interacts with the file descriptor
+ * management data structures and routines; see the large block comment in
+ * "src/fd.c" for more information.
+ */
+
+static void _do_poll(struct poller *p, int exp, int wake)
+{
+ int i;
+ int wait_time;
+ struct timespec timeout_ts;
+ unsigned int nevlist;
+ int fd, old_fd;
+ int status;
+
+ /*
+ * Scan the list of file descriptors with an updated status:
+ */
+ for (i = 0; i < fd_nbupdt; i++) {
+ fd = fd_updt[i];
+
+ if (!fd_grab_tgid(fd, tgid)) {
+ /* was reassigned */
+ activity[tid].poll_drop_fd++;
+ continue;
+ }
+
+ _HA_ATOMIC_AND(&fdtab[fd].update_mask, ~ti->ltid_bit);
+
+ if (fdtab[fd].owner)
+ _update_fd(fd);
+ else
+ activity[tid].poll_drop_fd++;
+
+ fd_drop_tgid(fd);
+ }
+ fd_nbupdt = 0;
+
+ /* Scan the shared update list */
+ for (old_fd = fd = update_list[tgid - 1].first; fd != -1; fd = fdtab[fd].update.next) {
+ if (fd == -2) {
+ fd = old_fd;
+ continue;
+ }
+ else if (fd <= -3)
+ fd = -fd -4;
+ if (fd == -1)
+ break;
+
+ if (!fd_grab_tgid(fd, tgid)) {
+ /* was reassigned */
+ activity[tid].poll_drop_fd++;
+ continue;
+ }
+
+ if (!(fdtab[fd].update_mask & ti->ltid_bit)) {
+ fd_drop_tgid(fd);
+ continue;
+ }
+
+ done_update_polling(fd);
+
+ if (fdtab[fd].owner)
+ _update_fd(fd);
+ else
+ activity[tid].poll_drop_fd++;
+
+ fd_drop_tgid(fd);
+ }
+
+ thread_idle_now();
+ thread_harmless_now();
+
+ /* Now let's wait for polled events. */
+ wait_time = wake ? 0 : compute_poll_timeout(exp);
+ clock_entering_poll();
+
+ do {
+ int timeout = (global.tune.options & GTUNE_BUSY_POLLING) ? 0 : wait_time;
+ int interrupted = 0;
+ nevlist = 1; /* desired number of events to be retrieved */
+ timeout_ts.tv_sec = (timeout / 1000);
+ timeout_ts.tv_nsec = (timeout % 1000) * 1000000;
+
+ status = port_getn(evports_fd[tid],
+ evports_evlist,
+ evports_evlist_max,
+ &nevlist, /* updated to the number of events retrieved */
+ &timeout_ts);
+ if (status != 0) {
+ int e = errno;
+ switch (e) {
+ case ETIME:
+ /*
+ * Though the manual page has not historically made it
+ * clear, port_getn() can return -1 with an errno of
+ * ETIME and still have returned some number of events.
+ */
+ /* nevlist >= 0 */
+ break;
+ default:
+ nevlist = 0;
+ interrupted = 1;
+ break;
+ }
+ }
+ clock_update_local_date(timeout, nevlist);
+
+ if (nevlist || interrupted)
+ break;
+ if (timeout || !wait_time)
+ break;
+ if (tick_isset(exp) && tick_is_expired(exp, now_ms))
+ break;
+ } while(1);
+
+ clock_update_global_date();
+ fd_leaving_poll(wait_time, nevlist);
+
+ if (nevlist > 0)
+ activity[tid].poll_io++;
+
+ for (i = 0; i < nevlist; i++) {
+ unsigned int n = 0;
+ int events, rebind_events;
+ int ret;
+
+ fd = evports_evlist[i].portev_object;
+ events = evports_evlist[i].portev_events;
+
+#ifdef DEBUG_FD
+ _HA_ATOMIC_INC(&fdtab[fd].event_count);
+#endif
+ /*
+ * By virtue of receiving an event for this file descriptor, it
+ * is no longer associated with the port in question. Store
+ * the previous event mask so that we may reassociate after
+ * processing is complete.
+ */
+ rebind_events = evports_state_to_events(fdtab[fd].state);
+ /* rebind_events != 0 */
+
+ /*
+ * Set bits based on the events we received from the port:
+ */
+ n = ((events & POLLIN) ? FD_EV_READY_R : 0) |
+ ((events & POLLOUT) ? FD_EV_READY_W : 0) |
+ ((events & POLLHUP) ? FD_EV_SHUT_RW : 0) |
+ ((events & POLLERR) ? FD_EV_ERR_RW : 0);
+
+ /*
+ * Call connection processing callbacks. Note that it's
+ * possible for this processing to alter the required event
+ * port association; i.e., the "state" member of the "fdtab"
+ * entry. If it changes, the fd will be placed on the updated
+ * list for processing the next time we are called.
+ */
+ ret = fd_update_events(fd, n);
+
+ /* polling will be on this instance if the FD was migrated */
+ if (ret == FD_UPDT_MIGRATED)
+ continue;
+
+ /*
+ * This file descriptor was closed during the processing of
+ * polled events. No need to reassociate.
+ */
+ if (ret == FD_UPDT_CLOSED)
+ continue;
+
+ /*
+ * Reassociate with the port, using the same event mask as
+ * before. This call will not result in a dissociation as we
+ * asserted that _some_ events needed to be rebound above.
+ *
+ * Reassociating with the same mask allows us to mimic the
+ * level-triggered behaviour of poll(2). In the event that we
+ * are interested in the same events on the next turn of the
+ * loop, this represents no extra work.
+ *
+ * If this additional port_associate(3C) call becomes a
+ * performance problem, we would need to verify that we can
+ * correctly interact with the file descriptor cache and update
+ * list (see "src/fd.c") to avoid reassociating here, or to use
+ * a different events mask.
+ */
+ evports_resync_fd(fd, rebind_events);
+ }
+}
+
+static int init_evports_per_thread()
+{
+ evports_evlist_max = global.tune.maxpollevents;
+ evports_evlist = calloc(evports_evlist_max, sizeof(*evports_evlist));
+ if (evports_evlist == NULL) {
+ goto fail_alloc;
+ }
+
+ if (MAX_THREADS > 1 && tid) {
+ if ((evports_fd[tid] = port_create()) == -1) {
+ goto fail_fd;
+ }
+ }
+
+ /* we may have to unregister some events initially registered on the
+ * original fd when it was alone, and/or to register events on the new
+ * fd for this thread. Let's just mark them as updated, the poller will
+ * do the rest.
+ */
+ fd_reregister_all(tgid, ti->ltid_bit);
+
+ return 1;
+
+ fail_fd:
+ ha_free(&evports_evlist);
+ evports_evlist_max = 0;
+ fail_alloc:
+ return 0;
+}
+
+static void deinit_evports_per_thread()
+{
+ if (MAX_THREADS > 1 && tid)
+ close(evports_fd[tid]);
+
+ ha_free(&evports_evlist);
+ evports_evlist_max = 0;
+}
+
+/*
+ * Initialisation of the event ports poller.
+ * Returns 0 in case of failure, non-zero in case of success.
+ */
+static int _do_init(struct poller *p)
+{
+ p->private = NULL;
+
+ if ((evports_fd[tid] = port_create()) == -1) {
+ goto fail;
+ }
+
+ hap_register_per_thread_init(init_evports_per_thread);
+ hap_register_per_thread_deinit(deinit_evports_per_thread);
+
+ return 1;
+
+fail:
+ p->pref = 0;
+ return 0;
+}
+
+/*
+ * Termination of the event ports poller.
+ * All resources are released and the poller is marked as inoperative.
+ */
+static void _do_term(struct poller *p)
+{
+ if (evports_fd[tid] != -1) {
+ close(evports_fd[tid]);
+ evports_fd[tid] = -1;
+ }
+
+ p->private = NULL;
+ p->pref = 0;
+
+ ha_free(&evports_evlist);
+ evports_evlist_max = 0;
+}
+
+/*
+ * Run-time check to make sure we can allocate the resources needed for
+ * the poller to function correctly.
+ * Returns 1 on success, otherwise 0.
+ */
+static int _do_test(struct poller *p)
+{
+ int fd;
+
+ if ((fd = port_create()) == -1) {
+ return 0;
+ }
+
+ close(fd);
+ return 1;
+}
+
+/*
+ * Close and recreate the event port after fork(). Returns 1 on success,
+ * otherwise 0. If this function fails, "_do_term()" must be called to
+ * clean up the poller.
+ */
+static int _do_fork(struct poller *p)
+{
+ if (evports_fd[tid] != -1) {
+ close(evports_fd[tid]);
+ }
+
+ if ((evports_fd[tid] = port_create()) == -1) {
+ return 0;
+ }
+
+ return 1;
+}
+
+/*
+ * Registers the poller.
+ */
+static void _do_register(void)
+{
+ struct poller *p;
+ int i;
+
+ if (nbpollers >= MAX_POLLERS)
+ return;
+
+ for (i = 0; i < MAX_THREADS; i++)
+ evports_fd[i] = -1;
+
+ p = &pollers[nbpollers++];
+
+ p->name = "evports";
+ p->pref = 300;
+ p->flags = HAP_POLL_F_ERRHUP;
+ p->private = NULL;
+
+ p->clo = NULL;
+ p->test = _do_test;
+ p->init = _do_init;
+ p->term = _do_term;
+ p->poll = _do_poll;
+ p->fork = _do_fork;
+}
+
+INITCALL0(STG_REGISTER, _do_register);
diff --git a/src/ev_kqueue.c b/src/ev_kqueue.c
new file mode 100644
index 0000000..f123e7b
--- /dev/null
+++ b/src/ev_kqueue.c
@@ -0,0 +1,380 @@
+/*
+ * FD polling functions for FreeBSD kqueue()
+ *
+ * Copyright 2000-2014 Willy Tarreau <w@1wt.eu>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <unistd.h>
+#include <sys/time.h>
+#include <sys/types.h>
+
+#include <sys/event.h>
+#include <sys/time.h>
+
+#include <haproxy/activity.h>
+#include <haproxy/api.h>
+#include <haproxy/clock.h>
+#include <haproxy/fd.h>
+#include <haproxy/global.h>
+#include <haproxy/signal.h>
+#include <haproxy/task.h>
+#include <haproxy/ticks.h>
+
+
+/* private data */
+static int kqueue_fd[MAX_THREADS] __read_mostly; // per-thread kqueue_fd
+static THREAD_LOCAL struct kevent *kev = NULL;
+static struct kevent *kev_out = NULL; // Trash buffer for kevent() to write the eventlist in
+
+static int _update_fd(int fd, int start)
+{
+ int en;
+ int changes = start;
+ ulong pr, ps;
+
+ en = fdtab[fd].state;
+ pr = _HA_ATOMIC_LOAD(&polled_mask[fd].poll_recv);
+ ps = _HA_ATOMIC_LOAD(&polled_mask[fd].poll_send);
+
+ if (!(fdtab[fd].thread_mask & ti->ltid_bit) || !(en & FD_EV_ACTIVE_RW)) {
+ if (!((pr | ps) & ti->ltid_bit)) {
+ /* fd was not watched, it's still not */
+ return changes;
+ }
+ /* fd totally removed from poll list */
+ EV_SET(&kev[changes++], fd, EVFILT_READ, EV_DELETE, 0, 0, NULL);
+ EV_SET(&kev[changes++], fd, EVFILT_WRITE, EV_DELETE, 0, 0, NULL);
+ if (pr & ti->ltid_bit)
+ _HA_ATOMIC_AND(&polled_mask[fd].poll_recv, ~ti->ltid_bit);
+ if (ps & ti->ltid_bit)
+ _HA_ATOMIC_AND(&polled_mask[fd].poll_send, ~ti->ltid_bit);
+ }
+ else {
+ /* OK fd has to be monitored, it was either added or changed */
+
+ if (en & FD_EV_ACTIVE_R) {
+ if (!(pr & ti->ltid_bit)) {
+ EV_SET(&kev[changes++], fd, EVFILT_READ, EV_ADD, 0, 0, NULL);
+ _HA_ATOMIC_OR(&polled_mask[fd].poll_recv, ti->ltid_bit);
+ }
+ }
+ else if (pr & ti->ltid_bit) {
+ EV_SET(&kev[changes++], fd, EVFILT_READ, EV_DELETE, 0, 0, NULL);
+ HA_ATOMIC_AND(&polled_mask[fd].poll_recv, ~ti->ltid_bit);
+ }
+
+ if (en & FD_EV_ACTIVE_W) {
+ if (!(ps & ti->ltid_bit)) {
+ EV_SET(&kev[changes++], fd, EVFILT_WRITE, EV_ADD, 0, 0, NULL);
+ _HA_ATOMIC_OR(&polled_mask[fd].poll_send, ti->ltid_bit);
+ }
+ }
+ else if (ps & ti->ltid_bit) {
+ EV_SET(&kev[changes++], fd, EVFILT_WRITE, EV_DELETE, 0, 0, NULL);
+ _HA_ATOMIC_AND(&polled_mask[fd].poll_send, ~ti->ltid_bit);
+ }
+
+ }
+ return changes;
+}
+
+/*
+ * kqueue() poller
+ */
+static void _do_poll(struct poller *p, int exp, int wake)
+{
+ int status;
+ int count, fd, wait_time;
+ struct timespec timeout_ts;
+ int updt_idx;
+ int changes = 0;
+ int old_fd;
+
+ timeout_ts.tv_sec = 0;
+ timeout_ts.tv_nsec = 0;
+ /* first, scan the update list to find changes */
+ for (updt_idx = 0; updt_idx < fd_nbupdt; updt_idx++) {
+ fd = fd_updt[updt_idx];
+
+ if (!fd_grab_tgid(fd, tgid)) {
+ /* was reassigned */
+ activity[tid].poll_drop_fd++;
+ continue;
+ }
+
+ _HA_ATOMIC_AND(&fdtab[fd].update_mask, ~ti->ltid_bit);
+
+ if (fdtab[fd].owner)
+ changes = _update_fd(fd, changes);
+ else
+ activity[tid].poll_drop_fd++;
+
+ fd_drop_tgid(fd);
+ }
+ /* Scan the global update list */
+ for (old_fd = fd = update_list[tgid - 1].first; fd != -1; fd = fdtab[fd].update.next) {
+ if (fd == -2) {
+ fd = old_fd;
+ continue;
+ }
+ else if (fd <= -3)
+ fd = -fd -4;
+ if (fd == -1)
+ break;
+
+ if (!fd_grab_tgid(fd, tgid)) {
+ /* was reassigned */
+ activity[tid].poll_drop_fd++;
+ continue;
+ }
+
+ if (!(fdtab[fd].update_mask & ti->ltid_bit)) {
+ fd_drop_tgid(fd);
+ continue;
+ }
+
+ done_update_polling(fd);
+
+ if (fdtab[fd].owner)
+ changes = _update_fd(fd, changes);
+ else
+ activity[tid].poll_drop_fd++;
+
+ fd_drop_tgid(fd);
+ }
+
+ thread_idle_now();
+ thread_harmless_now();
+
+ if (changes) {
+#ifdef EV_RECEIPT
+ kev[0].flags |= EV_RECEIPT;
+#else
+ /* If EV_RECEIPT isn't defined, just add an invalid entry,
+ * so that we get an error and kevent() stops before scanning
+ * the kqueue.
+ */
+ EV_SET(&kev[changes++], -1, EVFILT_WRITE, EV_DELETE, 0, 0, NULL);
+#endif
+ kevent(kqueue_fd[tid], kev, changes, kev_out, changes, &timeout_ts);
+ }
+ fd_nbupdt = 0;
+
+ /* Now let's wait for polled events. */
+ wait_time = wake ? 0 : compute_poll_timeout(exp);
+ fd = global.tune.maxpollevents;
+ clock_entering_poll();
+
+ do {
+ int timeout = (global.tune.options & GTUNE_BUSY_POLLING) ? 0 : wait_time;
+
+ timeout_ts.tv_sec = (timeout / 1000);
+ timeout_ts.tv_nsec = (timeout % 1000) * 1000000;
+
+ status = kevent(kqueue_fd[tid], // int kq
+ NULL, // const struct kevent *changelist
+ 0, // int nchanges
+ kev, // struct kevent *eventlist
+ fd, // int nevents
+ &timeout_ts); // const struct timespec *timeout
+ clock_update_local_date(timeout, status);
+
+ if (status) {
+ activity[tid].poll_io++;
+ break;
+ }
+ if (timeout || !wait_time)
+ break;
+ if (tick_isset(exp) && tick_is_expired(exp, now_ms))
+ break;
+ } while (1);
+
+ clock_update_global_date();
+ fd_leaving_poll(wait_time, status);
+
+ for (count = 0; count < status; count++) {
+ unsigned int n = 0;
+
+ fd = kev[count].ident;
+
+#ifdef DEBUG_FD
+ _HA_ATOMIC_INC(&fdtab[fd].event_count);
+#endif
+ if (kev[count].filter == EVFILT_READ) {
+ if (kev[count].data || !(kev[count].flags & EV_EOF))
+ n |= FD_EV_READY_R;
+ if (kev[count].flags & EV_EOF)
+ n |= FD_EV_SHUT_R;
+ }
+ else if (kev[count].filter == EVFILT_WRITE) {
+ n |= FD_EV_READY_W;
+ if (kev[count].flags & EV_EOF)
+ n |= FD_EV_ERR_RW;
+ }
+
+ fd_update_events(fd, n);
+ }
+}
+
+
+static int init_kqueue_per_thread()
+{
+ /* we can have up to two events per fd, so allocate enough to store
+ * 2*fd event, and an extra one, in case EV_RECEIPT isn't defined,
+ * so that we can add an invalid entry and get an error, to avoid
+ * scanning the kqueue uselessly.
+ */
+ kev = calloc(1, sizeof(struct kevent) * (2 * global.maxsock + 1));
+ if (kev == NULL)
+ goto fail_alloc;
+
+ if (MAX_THREADS > 1 && tid) {
+ kqueue_fd[tid] = kqueue();
+ if (kqueue_fd[tid] < 0)
+ goto fail_fd;
+ }
+
+ /* we may have to unregister some events initially registered on the
+ * original fd when it was alone, and/or to register events on the new
+ * fd for this thread. Let's just mark them as updated, the poller will
+ * do the rest.
+ */
+ fd_reregister_all(tgid, ti->ltid_bit);
+
+ return 1;
+ fail_fd:
+ free(kev);
+ fail_alloc:
+ return 0;
+}
+
+static void deinit_kqueue_per_thread()
+{
+ if (MAX_THREADS > 1 && tid)
+ close(kqueue_fd[tid]);
+
+ ha_free(&kev);
+}
+
+/*
+ * Initialization of the kqueue() poller.
+ * Returns 0 in case of failure, non-zero in case of success. If it fails, it
+ * disables the poller by setting its pref to 0.
+ */
+static int _do_init(struct poller *p)
+{
+ p->private = NULL;
+
+ /* we can have up to two events per fd, so allocate enough to store
+ * 2*fd event, and an extra one, in case EV_RECEIPT isn't defined,
+ * so that we can add an invalid entry and get an error, to avoid
+ * scanning the kqueue uselessly.
+ */
+ kev_out = calloc(1, sizeof(struct kevent) * (2 * global.maxsock + 1));
+ if (!kev_out)
+ goto fail_alloc;
+
+ kqueue_fd[tid] = kqueue();
+ if (kqueue_fd[tid] < 0)
+ goto fail_fd;
+
+ hap_register_per_thread_init(init_kqueue_per_thread);
+ hap_register_per_thread_deinit(deinit_kqueue_per_thread);
+ return 1;
+
+ fail_fd:
+ ha_free(&kev_out);
+fail_alloc:
+ p->pref = 0;
+ return 0;
+}
+
+/*
+ * Termination of the kqueue() poller.
+ * Memory is released and the poller is marked as unselectable.
+ */
+static void _do_term(struct poller *p)
+{
+ if (kqueue_fd[tid] >= 0) {
+ close(kqueue_fd[tid]);
+ kqueue_fd[tid] = -1;
+ }
+
+ p->private = NULL;
+ p->pref = 0;
+ if (kev_out) {
+ ha_free(&kev_out);
+ }
+}
+
+/*
+ * Check that the poller works.
+ * Returns 1 if OK, otherwise 0.
+ */
+static int _do_test(struct poller *p)
+{
+ int fd;
+
+ fd = kqueue();
+ if (fd < 0)
+ return 0;
+ close(fd);
+ return 1;
+}
+
+/*
+ * Recreate the kqueue file descriptor after a fork(). Returns 1 if OK,
+ * otherwise 0. Note that some pollers need to be reopened after a fork()
+ * (such as kqueue), and some others may fail to do so in a chroot.
+ */
+static int _do_fork(struct poller *p)
+{
+ kqueue_fd[tid] = kqueue();
+ if (kqueue_fd[tid] < 0)
+ return 0;
+ return 1;
+}
+
+/*
+ * Registers the poller.
+ */
+static void _do_register(void)
+{
+ struct poller *p;
+ int i;
+
+ if (nbpollers >= MAX_POLLERS)
+ return;
+
+ for (i = 0; i < MAX_THREADS; i++)
+ kqueue_fd[i] = -1;
+
+ p = &pollers[nbpollers++];
+
+ p->name = "kqueue";
+ p->pref = 300;
+ p->flags = HAP_POLL_F_RDHUP | HAP_POLL_F_ERRHUP;
+ p->private = NULL;
+
+ p->clo = NULL;
+ p->test = _do_test;
+ p->init = _do_init;
+ p->term = _do_term;
+ p->poll = _do_poll;
+ p->fork = _do_fork;
+}
+
+INITCALL0(STG_REGISTER, _do_register);
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/src/ev_poll.c b/src/ev_poll.c
new file mode 100644
index 0000000..e98630c
--- /dev/null
+++ b/src/ev_poll.c
@@ -0,0 +1,348 @@
+/*
+ * FD polling functions for generic poll()
+ *
+ * Copyright 2000-2014 Willy Tarreau <w@1wt.eu>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#define _GNU_SOURCE // for POLLRDHUP on Linux
+
+#include <unistd.h>
+#include <poll.h>
+#include <sys/time.h>
+#include <sys/types.h>
+
+#include <haproxy/activity.h>
+#include <haproxy/api.h>
+#include <haproxy/clock.h>
+#include <haproxy/fd.h>
+#include <haproxy/global.h>
+#include <haproxy/signal.h>
+#include <haproxy/task.h>
+#include <haproxy/ticks.h>
+
+
+#ifndef POLLRDHUP
+/* POLLRDHUP was defined late in libc, and it appeared in kernel 2.6.17 */
+#define POLLRDHUP 0
+#endif
+
+static int maxfd; /* # of the highest fd + 1 */
+static unsigned int *fd_evts[2];
+
+/* private data */
+static THREAD_LOCAL int nbfd = 0;
+static THREAD_LOCAL struct pollfd *poll_events = NULL;
+
+static void __fd_clo(int fd)
+{
+ hap_fd_clr(fd, fd_evts[DIR_RD]);
+ hap_fd_clr(fd, fd_evts[DIR_WR]);
+}
+
+static void _update_fd(int fd, int *max_add_fd)
+{
+ int en;
+ ulong pr, ps;
+
+ en = fdtab[fd].state;
+ pr = _HA_ATOMIC_LOAD(&polled_mask[fd].poll_recv);
+ ps = _HA_ATOMIC_LOAD(&polled_mask[fd].poll_send);
+
+ /* we have a single state for all threads, which is why we
+ * don't check the tid_bit. First thread to see the update
+ * takes it for every other one.
+ */
+ if (!(en & FD_EV_ACTIVE_RW)) {
+ if (!(pr | ps)) {
+ /* fd was not watched, it's still not */
+ return;
+ }
+ /* fd totally removed from poll list */
+ hap_fd_clr(fd, fd_evts[DIR_RD]);
+ hap_fd_clr(fd, fd_evts[DIR_WR]);
+ _HA_ATOMIC_AND(&polled_mask[fd].poll_recv, 0);
+ _HA_ATOMIC_AND(&polled_mask[fd].poll_send, 0);
+ }
+ else {
+ /* OK fd has to be monitored, it was either added or changed */
+ if (!(en & FD_EV_ACTIVE_R)) {
+ hap_fd_clr(fd, fd_evts[DIR_RD]);
+ if (pr & ti->ltid_bit)
+ _HA_ATOMIC_AND(&polled_mask[fd].poll_recv, ~ti->ltid_bit);
+ } else {
+ hap_fd_set(fd, fd_evts[DIR_RD]);
+ if (!(pr & ti->ltid_bit))
+ _HA_ATOMIC_OR(&polled_mask[fd].poll_recv, ti->ltid_bit);
+ }
+
+ if (!(en & FD_EV_ACTIVE_W)) {
+ hap_fd_clr(fd, fd_evts[DIR_WR]);
+ if (ps & ti->ltid_bit)
+ _HA_ATOMIC_AND(&polled_mask[fd].poll_send, ~ti->ltid_bit);
+ } else {
+ hap_fd_set(fd, fd_evts[DIR_WR]);
+ if (!(ps & ti->ltid_bit))
+ _HA_ATOMIC_OR(&polled_mask[fd].poll_send, ti->ltid_bit);
+ }
+
+ if (fd > *max_add_fd)
+ *max_add_fd = fd;
+ }
+}
+
+/*
+ * Poll() poller
+ */
+static void _do_poll(struct poller *p, int exp, int wake)
+{
+ int status;
+ int fd;
+ int wait_time;
+ int updt_idx;
+ int fds, count;
+ int sr, sw;
+ int old_maxfd, new_maxfd, max_add_fd;
+ unsigned rn, wn; /* read new, write new */
+ int old_fd;
+
+ max_add_fd = -1;
+
+ /* first, scan the update list to find changes */
+ for (updt_idx = 0; updt_idx < fd_nbupdt; updt_idx++) {
+ fd = fd_updt[updt_idx];
+
+ _HA_ATOMIC_AND(&fdtab[fd].update_mask, ~ti->ltid_bit);
+ if (!fdtab[fd].owner) {
+ activity[tid].poll_drop_fd++;
+ continue;
+ }
+ _update_fd(fd, &max_add_fd);
+ }
+
+ /* Now scan the global update list */
+ for (old_fd = fd = update_list[tgid - 1].first; fd != -1; fd = fdtab[fd].update.next) {
+ if (fd == -2) {
+ fd = old_fd;
+ continue;
+ }
+ else if (fd <= -3)
+ fd = -fd -4;
+ if (fd == -1)
+ break;
+ if (fdtab[fd].update_mask & ti->ltid_bit) {
+ /* Cheat a bit, as the state is global to all pollers
+ * we don't need every thread to take care of the
+ * update.
+ */
+ _HA_ATOMIC_AND(&fdtab[fd].update_mask, ~tg->threads_enabled);
+ done_update_polling(fd);
+ } else
+ continue;
+ if (!fdtab[fd].owner)
+ continue;
+ _update_fd(fd, &max_add_fd);
+ }
+
+ /* maybe we added at least one fd larger than maxfd */
+ for (old_maxfd = maxfd; old_maxfd <= max_add_fd; ) {
+ if (_HA_ATOMIC_CAS(&maxfd, &old_maxfd, max_add_fd + 1))
+ break;
+ }
+
+ /* maxfd doesn't need to be precise but it needs to cover *all* active
+ * FDs. Thus we only shrink it if we have such an opportunity. The algo
+ * is simple : look for the previous used place, try to update maxfd to
+ * point to it, abort if maxfd changed in the mean time.
+ */
+ old_maxfd = maxfd;
+ do {
+ new_maxfd = old_maxfd;
+ while (new_maxfd - 1 >= 0 && !fdtab[new_maxfd - 1].owner)
+ new_maxfd--;
+ if (new_maxfd >= old_maxfd)
+ break;
+ } while (!_HA_ATOMIC_CAS(&maxfd, &old_maxfd, new_maxfd));
+
+ thread_idle_now();
+ thread_harmless_now();
+
+ fd_nbupdt = 0;
+
+ nbfd = 0;
+ for (fds = 0; (fds * 8*sizeof(**fd_evts)) < maxfd; fds++) {
+ rn = fd_evts[DIR_RD][fds];
+ wn = fd_evts[DIR_WR][fds];
+
+ if (!(rn|wn))
+ continue;
+
+ for (count = 0, fd = fds * 8*sizeof(**fd_evts); count < 8*sizeof(**fd_evts) && fd < maxfd; count++, fd++) {
+ sr = (rn >> count) & 1;
+ sw = (wn >> count) & 1;
+ if ((sr|sw)) {
+ if (!fdtab[fd].owner) {
+ /* should normally not happen here except
+ * due to rare thread concurrency
+ */
+ continue;
+ }
+
+ if (!(fdtab[fd].thread_mask & ti->ltid_bit)) {
+ continue;
+ }
+
+ poll_events[nbfd].fd = fd;
+ poll_events[nbfd].events = (sr ? (POLLIN | POLLRDHUP) : 0) | (sw ? POLLOUT : 0);
+ nbfd++;
+ }
+ }
+ }
+
+ /* Now let's wait for polled events. */
+ wait_time = wake ? 0 : compute_poll_timeout(exp);
+ clock_entering_poll();
+ status = poll(poll_events, nbfd, wait_time);
+ clock_update_date(wait_time, status);
+
+ fd_leaving_poll(wait_time, status);
+
+ if (status > 0)
+ activity[tid].poll_io++;
+
+ for (count = 0; status > 0 && count < nbfd; count++) {
+ unsigned int n;
+ int e = poll_events[count].revents;
+
+ fd = poll_events[count].fd;
+
+ if ((e & POLLRDHUP) && !(cur_poller.flags & HAP_POLL_F_RDHUP))
+ _HA_ATOMIC_OR(&cur_poller.flags, HAP_POLL_F_RDHUP);
+
+#ifdef DEBUG_FD
+ _HA_ATOMIC_INC(&fdtab[fd].event_count);
+#endif
+ if (!(e & ( POLLOUT | POLLIN | POLLERR | POLLHUP | POLLRDHUP )))
+ continue;
+
+ /* ok, we found one active fd */
+ status--;
+
+ n = ((e & POLLIN) ? FD_EV_READY_R : 0) |
+ ((e & POLLOUT) ? FD_EV_READY_W : 0) |
+ ((e & POLLRDHUP) ? FD_EV_SHUT_R : 0) |
+ ((e & POLLHUP) ? FD_EV_SHUT_RW : 0) |
+ ((e & POLLERR) ? FD_EV_ERR_RW : 0);
+
+ fd_update_events(fd, n);
+ }
+}
+
+
+static int init_poll_per_thread()
+{
+ poll_events = calloc(1, sizeof(struct pollfd) * global.maxsock);
+ if (poll_events == NULL)
+ return 0;
+ return 1;
+}
+
+static void deinit_poll_per_thread()
+{
+ ha_free(&poll_events);
+}
+
+/*
+ * Initialization of the poll() poller.
+ * Returns 0 in case of failure, non-zero in case of success. If it fails, it
+ * disables the poller by setting its pref to 0.
+ */
+static int _do_init(struct poller *p)
+{
+ __label__ fail_swevt, fail_srevt;
+ int fd_evts_bytes;
+
+ p->private = NULL;
+
+ /* this old poller uses a process-wide FD list that cannot work with
+ * groups.
+ */
+ if (global.nbtgroups > 1)
+ goto fail_srevt;
+
+ fd_evts_bytes = (global.maxsock + sizeof(**fd_evts) * 8 - 1) / (sizeof(**fd_evts) * 8) * sizeof(**fd_evts);
+
+ if ((fd_evts[DIR_RD] = calloc(1, fd_evts_bytes)) == NULL)
+ goto fail_srevt;
+ if ((fd_evts[DIR_WR] = calloc(1, fd_evts_bytes)) == NULL)
+ goto fail_swevt;
+
+ hap_register_per_thread_init(init_poll_per_thread);
+ hap_register_per_thread_deinit(deinit_poll_per_thread);
+
+ return 1;
+
+ fail_swevt:
+ free(fd_evts[DIR_RD]);
+ fail_srevt:
+ p->pref = 0;
+ return 0;
+}
+
+/*
+ * Termination of the poll() poller.
+ * Memory is released and the poller is marked as unselectable.
+ */
+static void _do_term(struct poller *p)
+{
+ free(fd_evts[DIR_WR]);
+ free(fd_evts[DIR_RD]);
+ p->private = NULL;
+ p->pref = 0;
+}
+
+/*
+ * Check that the poller works.
+ * Returns 1 if OK, otherwise 0.
+ */
+static int _do_test(struct poller *p)
+{
+ return 1;
+}
+
+/*
+ * Registers the poller.
+ */
+static void _do_register(void)
+{
+ struct poller *p;
+
+ if (nbpollers >= MAX_POLLERS)
+ return;
+ p = &pollers[nbpollers++];
+
+ p->name = "poll";
+ p->pref = 200;
+ p->flags = HAP_POLL_F_ERRHUP;
+ p->private = NULL;
+
+ p->clo = __fd_clo;
+ p->test = _do_test;
+ p->init = _do_init;
+ p->term = _do_term;
+ p->poll = _do_poll;
+}
+
+INITCALL0(STG_REGISTER, _do_register);
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/src/ev_select.c b/src/ev_select.c
new file mode 100644
index 0000000..eadd588
--- /dev/null
+++ b/src/ev_select.c
@@ -0,0 +1,335 @@
+/*
+ * FD polling functions for generic select()
+ *
+ * Copyright 2000-2014 Willy Tarreau <w@1wt.eu>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <unistd.h>
+#include <sys/time.h>
+#include <sys/types.h>
+
+#include <haproxy/activity.h>
+#include <haproxy/api.h>
+#include <haproxy/clock.h>
+#include <haproxy/fd.h>
+#include <haproxy/global.h>
+#include <haproxy/task.h>
+#include <haproxy/ticks.h>
+
+
+/* private data */
+static int maxfd; /* # of the highest fd + 1 */
+static unsigned int *fd_evts[2];
+static THREAD_LOCAL fd_set *tmp_evts[2];
+
+/* Immediately remove the entry upon close() */
+static void __fd_clo(int fd)
+{
+ hap_fd_clr(fd, fd_evts[DIR_RD]);
+ hap_fd_clr(fd, fd_evts[DIR_WR]);
+}
+
+static void _update_fd(int fd, int *max_add_fd)
+{
+ int en;
+ ulong pr, ps;
+
+ en = fdtab[fd].state;
+ pr = _HA_ATOMIC_LOAD(&polled_mask[fd].poll_recv);
+ ps = _HA_ATOMIC_LOAD(&polled_mask[fd].poll_send);
+
+ /* we have a single state for all threads, which is why we
+ * don't check the tid_bit. First thread to see the update
+ * takes it for every other one.
+ */
+ if (!(en & FD_EV_ACTIVE_RW)) {
+ if (!(pr | ps)) {
+ /* fd was not watched, it's still not */
+ return;
+ }
+ /* fd totally removed from poll list */
+ hap_fd_clr(fd, fd_evts[DIR_RD]);
+ hap_fd_clr(fd, fd_evts[DIR_WR]);
+ _HA_ATOMIC_AND(&polled_mask[fd].poll_recv, 0);
+ _HA_ATOMIC_AND(&polled_mask[fd].poll_send, 0);
+ }
+ else {
+ /* OK fd has to be monitored, it was either added or changed */
+ if (!(en & FD_EV_ACTIVE_R)) {
+ hap_fd_clr(fd, fd_evts[DIR_RD]);
+ if (pr & ti->ltid_bit)
+ _HA_ATOMIC_AND(&polled_mask[fd].poll_recv, ~ti->ltid_bit);
+ } else {
+ hap_fd_set(fd, fd_evts[DIR_RD]);
+ if (!(pr & ti->ltid_bit))
+ _HA_ATOMIC_OR(&polled_mask[fd].poll_recv, ti->ltid_bit);
+ }
+
+ if (!(en & FD_EV_ACTIVE_W)) {
+ hap_fd_clr(fd, fd_evts[DIR_WR]);
+ if (ps & ti->ltid_bit)
+ _HA_ATOMIC_AND(&polled_mask[fd].poll_send, ~ti->ltid_bit);
+ } else {
+ hap_fd_set(fd, fd_evts[DIR_WR]);
+ if (!(ps & ti->ltid_bit))
+ _HA_ATOMIC_OR(&polled_mask[fd].poll_send, ti->ltid_bit);
+ }
+
+ if (fd > *max_add_fd)
+ *max_add_fd = fd;
+ }
+}
+
+/*
+ * Select() poller
+ */
+static void _do_poll(struct poller *p, int exp, int wake)
+{
+ int status;
+ int fd, i;
+ struct timeval delta;
+ int delta_ms;
+ int fds;
+ int updt_idx;
+ char count;
+ int readnotnull, writenotnull;
+ int old_maxfd, new_maxfd, max_add_fd;
+ int old_fd;
+
+ max_add_fd = -1;
+
+ /* first, scan the update list to find changes */
+ for (updt_idx = 0; updt_idx < fd_nbupdt; updt_idx++) {
+ fd = fd_updt[updt_idx];
+
+ _HA_ATOMIC_AND(&fdtab[fd].update_mask, ~ti->ltid_bit);
+ if (!fdtab[fd].owner) {
+ activity[tid].poll_drop_fd++;
+ continue;
+ }
+ _update_fd(fd, &max_add_fd);
+ }
+ /* Now scan the global update list */
+ for (old_fd = fd = update_list[tgid - 1].first; fd != -1; fd = fdtab[fd].update.next) {
+ if (fd == -2) {
+ fd = old_fd;
+ continue;
+ }
+ else if (fd <= -3)
+ fd = -fd -4;
+ if (fd == -1)
+ break;
+ if (fdtab[fd].update_mask & ti->ltid_bit) {
+ /* Cheat a bit, as the state is global to all pollers
+ * we don't need every thread to take care of the
+ * update.
+ */
+ _HA_ATOMIC_AND(&fdtab[fd].update_mask, ~tg->threads_enabled);
+ done_update_polling(fd);
+ } else
+ continue;
+ if (!fdtab[fd].owner)
+ continue;
+ _update_fd(fd, &max_add_fd);
+ }
+
+
+ /* maybe we added at least one fd larger than maxfd */
+ for (old_maxfd = maxfd; old_maxfd <= max_add_fd; ) {
+ if (_HA_ATOMIC_CAS(&maxfd, &old_maxfd, max_add_fd + 1))
+ break;
+ }
+
+ /* maxfd doesn't need to be precise but it needs to cover *all* active
+ * FDs. Thus we only shrink it if we have such an opportunity. The algo
+ * is simple : look for the previous used place, try to update maxfd to
+ * point to it, abort if maxfd changed in the mean time.
+ */
+ old_maxfd = maxfd;
+ do {
+ new_maxfd = old_maxfd;
+ while (new_maxfd - 1 >= 0 && !fdtab[new_maxfd - 1].owner)
+ new_maxfd--;
+ if (new_maxfd >= old_maxfd)
+ break;
+ } while (!_HA_ATOMIC_CAS(&maxfd, &old_maxfd, new_maxfd));
+
+ thread_idle_now();
+ thread_harmless_now();
+
+ fd_nbupdt = 0;
+
+ /* let's restore fdset state */
+ readnotnull = 0; writenotnull = 0;
+ for (i = 0; i < (maxfd + FD_SETSIZE - 1)/(8*sizeof(int)); i++) {
+ readnotnull |= (*(((int*)tmp_evts[DIR_RD])+i) = *(((int*)fd_evts[DIR_RD])+i)) != 0;
+ writenotnull |= (*(((int*)tmp_evts[DIR_WR])+i) = *(((int*)fd_evts[DIR_WR])+i)) != 0;
+ }
+
+ /* now let's wait for events */
+ delta_ms = wake ? 0 : compute_poll_timeout(exp);
+ delta.tv_sec = (delta_ms / 1000);
+ delta.tv_usec = (delta_ms % 1000) * 1000;
+ clock_entering_poll();
+ status = select(maxfd,
+ readnotnull ? tmp_evts[DIR_RD] : NULL,
+ writenotnull ? tmp_evts[DIR_WR] : NULL,
+ NULL,
+ &delta);
+ clock_update_date(delta_ms, status);
+ fd_leaving_poll(delta_ms, status);
+
+ if (status <= 0)
+ return;
+
+ activity[tid].poll_io++;
+
+ for (fds = 0; (fds * BITS_PER_INT) < maxfd; fds++) {
+ if ((((int *)(tmp_evts[DIR_RD]))[fds] | ((int *)(tmp_evts[DIR_WR]))[fds]) == 0)
+ continue;
+
+ for (count = BITS_PER_INT, fd = fds * BITS_PER_INT; count && fd < maxfd; count--, fd++) {
+ unsigned int n = 0;
+
+ if (FD_ISSET(fd, tmp_evts[DIR_RD]))
+ n |= FD_EV_READY_R;
+
+ if (FD_ISSET(fd, tmp_evts[DIR_WR]))
+ n |= FD_EV_READY_W;
+
+ if (!n)
+ continue;
+
+#ifdef DEBUG_FD
+ _HA_ATOMIC_INC(&fdtab[fd].event_count);
+#endif
+
+ fd_update_events(fd, n);
+ }
+ }
+}
+
+static int init_select_per_thread()
+{
+ int fd_set_bytes;
+
+ fd_set_bytes = sizeof(fd_set) * (global.maxsock + FD_SETSIZE - 1) / FD_SETSIZE;
+ tmp_evts[DIR_RD] = calloc(1, fd_set_bytes);
+ if (tmp_evts[DIR_RD] == NULL)
+ goto fail;
+ tmp_evts[DIR_WR] = calloc(1, fd_set_bytes);
+ if (tmp_evts[DIR_WR] == NULL)
+ goto fail;
+ return 1;
+ fail:
+ free(tmp_evts[DIR_RD]);
+ free(tmp_evts[DIR_WR]);
+ return 0;
+}
+
+static void deinit_select_per_thread()
+{
+ ha_free(&tmp_evts[DIR_WR]);
+ ha_free(&tmp_evts[DIR_RD]);
+}
+
+/*
+ * Initialization of the select() poller.
+ * Returns 0 in case of failure, non-zero in case of success. If it fails, it
+ * disables the poller by setting its pref to 0.
+ */
+static int _do_init(struct poller *p)
+{
+ int fd_set_bytes;
+
+ p->private = NULL;
+
+ /* this old poller uses a process-wide FD list that cannot work with
+ * groups.
+ */
+ if (global.nbtgroups > 1)
+ goto fail_srevt;
+
+ if (global.maxsock > FD_SETSIZE)
+ goto fail_srevt;
+
+ fd_set_bytes = sizeof(fd_set) * (global.maxsock + FD_SETSIZE - 1) / FD_SETSIZE;
+
+ if ((fd_evts[DIR_RD] = calloc(1, fd_set_bytes)) == NULL)
+ goto fail_srevt;
+ if ((fd_evts[DIR_WR] = calloc(1, fd_set_bytes)) == NULL)
+ goto fail_swevt;
+
+ hap_register_per_thread_init(init_select_per_thread);
+ hap_register_per_thread_deinit(deinit_select_per_thread);
+
+ return 1;
+
+ fail_swevt:
+ free(fd_evts[DIR_RD]);
+ fail_srevt:
+ p->pref = 0;
+ return 0;
+}
+
+/*
+ * Termination of the select() poller.
+ * Memory is released and the poller is marked as unselectable.
+ */
+static void _do_term(struct poller *p)
+{
+ free(fd_evts[DIR_WR]);
+ free(fd_evts[DIR_RD]);
+ p->private = NULL;
+ p->pref = 0;
+}
+
+/*
+ * Check that the poller works.
+ * Returns 1 if OK, otherwise 0.
+ */
+static int _do_test(struct poller *p)
+{
+ if (global.maxsock > FD_SETSIZE)
+ return 0;
+
+ return 1;
+}
+
+/*
+ * Registers the poller.
+ */
+static void _do_register(void)
+{
+ struct poller *p;
+
+ if (nbpollers >= MAX_POLLERS)
+ return;
+ p = &pollers[nbpollers++];
+
+ p->name = "select";
+ p->pref = 150;
+ p->flags = 0;
+ p->private = NULL;
+
+ p->clo = __fd_clo;
+ p->test = _do_test;
+ p->init = _do_init;
+ p->term = _do_term;
+ p->poll = _do_poll;
+}
+
+INITCALL0(STG_REGISTER, _do_register);
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/src/event_hdl.c b/src/event_hdl.c
new file mode 100644
index 0000000..aeb4d24
--- /dev/null
+++ b/src/event_hdl.c
@@ -0,0 +1,999 @@
+/*
+ * general purpose event handlers management
+ *
+ * Copyright 2022 HAProxy Technologies
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2.1 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <string.h>
+#include <haproxy/event_hdl.h>
+#include <haproxy/compiler.h>
+#include <haproxy/task.h>
+#include <haproxy/tools.h>
+#include <haproxy/errors.h>
+#include <haproxy/signal.h>
+#include <haproxy/xxhash.h>
+#include <haproxy/cfgparse.h>
+
+/* event types changes in event_hdl-t.h file should be reflected in the
+ * map below to allow string to type and type to string conversions
+ */
+static struct event_hdl_sub_type_map event_hdl_sub_type_map[] = {
+ {"NONE", EVENT_HDL_SUB_NONE},
+ {"SERVER", EVENT_HDL_SUB_SERVER},
+ {"SERVER_ADD", EVENT_HDL_SUB_SERVER_ADD},
+ {"SERVER_DEL", EVENT_HDL_SUB_SERVER_DEL},
+ {"SERVER_UP", EVENT_HDL_SUB_SERVER_UP},
+ {"SERVER_DOWN", EVENT_HDL_SUB_SERVER_DOWN},
+ {"SERVER_STATE", EVENT_HDL_SUB_SERVER_STATE},
+ {"SERVER_ADMIN", EVENT_HDL_SUB_SERVER_ADMIN},
+ {"SERVER_CHECK", EVENT_HDL_SUB_SERVER_CHECK},
+ {"SERVER_INETADDR", EVENT_HDL_SUB_SERVER_INETADDR},
+};
+
+/* internal types (only used in this file) */
+struct event_hdl_async_task_default_ctx
+{
+ event_hdl_async_equeue e_queue; /* event queue list */
+ event_hdl_cb_async func; /* event handling func */
+};
+
+/* memory pools declarations */
+DECLARE_STATIC_POOL(pool_head_sub, "ehdl_sub", sizeof(struct event_hdl_sub));
+DECLARE_STATIC_POOL(pool_head_sub_event, "ehdl_sub_e", sizeof(struct event_hdl_async_event));
+DECLARE_STATIC_POOL(pool_head_sub_event_data, "ehdl_sub_ed", sizeof(struct event_hdl_async_event_data));
+DECLARE_STATIC_POOL(pool_head_sub_taskctx, "ehdl_sub_tctx", sizeof(struct event_hdl_async_task_default_ctx));
+
+/* global event_hdl tunables (public variable) */
+struct event_hdl_tune event_hdl_tune;
+
+/* global subscription list (implicit where NULL is used as sublist argument) */
+static event_hdl_sub_list global_event_hdl_sub_list;
+
+/* every known subscription lists are tracked in this list (including the global one) */
+static struct mt_list known_event_hdl_sub_list = MT_LIST_HEAD_INIT(known_event_hdl_sub_list);
+
+static void _event_hdl_sub_list_destroy(event_hdl_sub_list *sub_list);
+
+static void event_hdl_deinit(struct sig_handler *sh)
+{
+ event_hdl_sub_list *cur_list;
+ struct mt_list *elt1, elt2;
+
+ /* destroy all known subscription lists */
+ mt_list_for_each_entry_safe(cur_list, &known_event_hdl_sub_list, known, elt1, elt2) {
+ /* remove cur elem from list */
+ MT_LIST_DELETE_SAFE(elt1);
+ /* then destroy it */
+ _event_hdl_sub_list_destroy(cur_list);
+ }
+}
+
+static void event_hdl_init(void)
+{
+ /* initialize global subscription list */
+ event_hdl_sub_list_init(&global_event_hdl_sub_list);
+ /* register the deinit function, will be called on soft-stop */
+ signal_register_fct(0, event_hdl_deinit, 0);
+
+ /* set some default values */
+ event_hdl_tune.max_events_at_once = EVENT_HDL_MAX_AT_ONCE;
+}
+
+/* general purpose hashing function when you want to compute
+ * an ID based on <scope> x <name>
+ * It is your responsibility to make sure <scope> is not used
+ * elsewhere in the code (or that you are fine with sharing
+ * the scope).
+ */
+inline uint64_t event_hdl_id(const char *scope, const char *name)
+{
+ XXH64_state_t state;
+
+ XXH64_reset(&state, 0);
+ XXH64_update(&state, scope, strlen(scope));
+ XXH64_update(&state, name, strlen(name));
+ return XXH64_digest(&state);
+}
+
+/* takes a sub_type as input, returns corresponding sub_type
+ * printable string or "N/A" if not found.
+ * If not found, an error will be reported to stderr so the developers
+ * know that a sub_type is missing its associated string in event_hdl-t.h
+ */
+const char *event_hdl_sub_type_to_string(struct event_hdl_sub_type sub_type)
+{
+ int it;
+
+ for (it = 0; it < (int)(sizeof(event_hdl_sub_type_map) / sizeof(event_hdl_sub_type_map[0])); it++) {
+ if (sub_type.family == event_hdl_sub_type_map[it].type.family &&
+ sub_type.subtype == event_hdl_sub_type_map[it].type.subtype)
+ return event_hdl_sub_type_map[it].name;
+ }
+ ha_alert("event_hdl-t.h: missing sub_type string representation.\n"
+ "Please reflect any changes in event_hdl_sub_type_map.\n");
+ return "N/A";
+}
+
+/* returns the internal sub_type corresponding
+ * to the printable representation <name>
+ * or EVENT_HDL_SUB_NONE if no such event exists
+ * (see event_hdl-t.h for the complete list of supported types)
+ */
+struct event_hdl_sub_type event_hdl_string_to_sub_type(const char *name)
+{
+ int it;
+
+ for (it = 0; it < (int)(sizeof(event_hdl_sub_type_map) / sizeof(event_hdl_sub_type_map[0])); it++) {
+ if (!strcmp(name, event_hdl_sub_type_map[it].name))
+ return event_hdl_sub_type_map[it].type;
+ }
+ return EVENT_HDL_SUB_NONE;
+}
+
+/* Takes <subscriptions> sub list as input, returns a printable string
+ * containing every sub_types contained in <subscriptions>
+ * separated by '|' char.
+ * Returns NULL if no sub_types are found in <subscriptions>
+ * This functions leverages memprintf, thus it is up to the
+ * caller to free the returned value (if != NULL) when he no longer
+ * uses it.
+ */
+char *event_hdl_sub_type_print(struct event_hdl_sub_type subscriptions)
+{
+ char *out = NULL;
+ int it;
+ uint8_t first = 1;
+
+ for (it = 0; it < (int)(sizeof(event_hdl_sub_type_map) / sizeof(event_hdl_sub_type_map[0])); it++) {
+ if (subscriptions.family == event_hdl_sub_type_map[it].type.family &&
+ ((subscriptions.subtype & event_hdl_sub_type_map[it].type.subtype) ==
+ event_hdl_sub_type_map[it].type.subtype)) {
+ if (first) {
+ memprintf(&out, "%s", event_hdl_sub_type_map[it].name);
+ first--;
+ }
+ else
+ memprintf(&out, "%s%s%s", out, "|", event_hdl_sub_type_map[it].name);
+ }
+ }
+
+ return out;
+}
+
+/* event_hdl debug/reporting function */
+typedef void (*event_hdl_report_hdl_state_func)(const char *fmt, ...);
+static void event_hdl_report_hdl_state(event_hdl_report_hdl_state_func report_func,
+ const struct event_hdl *hdl, const char *what, const char *state)
+{
+ report_func("[event_hdl]:%s (%s)'#%llu@%s': %s\n",
+ what,
+ (hdl->async) ? "ASYNC" : "SYNC",
+ (long long unsigned int)hdl->id,
+ hdl->dorigin,
+ state);
+}
+
+static inline void _event_hdl_async_data_drop(struct event_hdl_async_event_data *data)
+{
+ if (HA_ATOMIC_SUB_FETCH(&data->refcount, 1) == 0) {
+ /* we were the last one holding a reference to event data - free required */
+ if (data->mfree) {
+ /* Some event data members are dynamically allocated and thus
+ * require specific cleanup using user-provided function.
+ * We directly pass a pointer to internal data storage but
+ * we only expect the cleanup function to typecast it in the
+ * relevant data type to give enough context to the function to
+ * perform the cleanup on data members, and not actually freeing
+ * data pointer since it is our internal buffer :)
+ */
+ data->mfree(&data->data);
+ }
+ pool_free(pool_head_sub_event_data, data);
+ }
+}
+
+void event_hdl_async_free_event(struct event_hdl_async_event *e)
+{
+ if (unlikely(event_hdl_sub_type_equal(e->type, EVENT_HDL_SUB_END))) {
+ /* last event for hdl, special case */
+ /* free subscription entry as we're the last one still using it
+ * (it is already removed from mt_list, no race can occur)
+ */
+ event_hdl_drop(e->sub_mgmt.this);
+ HA_ATOMIC_DEC(&jobs);
+ }
+ else if (e->_data)
+ _event_hdl_async_data_drop(e->_data); /* data wrapper */
+ pool_free(pool_head_sub_event, e);
+}
+
+/* wakeup the task depending on its type:
+ * normal async mode internally uses tasklets but advanced async mode
+ * allows both tasks and tasklets.
+ * While tasks and tasklets may be easily casted, we need to use the proper
+ * API to wake them up (the waiting queues are exclusive).
+ */
+static void event_hdl_task_wakeup(struct tasklet *task)
+{
+ if (TASK_IS_TASKLET(task))
+ tasklet_wakeup(task);
+ else
+ task_wakeup((struct task *)task, TASK_WOKEN_OTHER); /* TODO: switch to TASK_WOKEN_EVENT? */
+}
+
+/* task handler used for normal async subscription mode
+ * if you use advanced async subscription mode, you can use this
+ * as an example to implement your own task wrapper
+ */
+static struct task *event_hdl_async_task_default(struct task *task, void *ctx, unsigned int state)
+{
+ struct tasklet *tl = (struct tasklet *)task;
+ struct event_hdl_async_task_default_ctx *task_ctx = ctx;
+ struct event_hdl_async_event *event;
+ int max_notif_at_once_it = 0;
+ uint8_t done = 0;
+
+ /* run through e_queue, and call func() for each event
+ * if we read END event, it indicates we must stop:
+ * no more events to come (handler is unregistered)
+ * so we must free task_ctx and stop task
+ */
+ while (max_notif_at_once_it < event_hdl_tune.max_events_at_once &&
+ (event = event_hdl_async_equeue_pop(&task_ctx->e_queue)))
+ {
+ if (event_hdl_sub_type_equal(event->type, EVENT_HDL_SUB_END)) {
+ done = 1;
+ event_hdl_async_free_event(event);
+ /* break is normally not even required, EVENT_HDL_SUB_END
+ * is guaranteed to be last event of e_queue
+ * (because in normal mode one sub == one e_queue)
+ */
+ break;
+ }
+ else {
+ struct event_hdl_cb cb;
+
+ cb.e_type = event->type;
+ cb.e_data = event->data;
+ cb.sub_mgmt = &event->sub_mgmt;
+ cb._sync = 0;
+
+ /* call user function */
+ task_ctx->func(&cb, event->private);
+ max_notif_at_once_it++;
+ }
+ event_hdl_async_free_event(event);
+ }
+
+ if (done) {
+ /* our job is done, subscription is over: no more events to come */
+ pool_free(pool_head_sub_taskctx, task_ctx);
+ tasklet_free(tl);
+ return NULL;
+ }
+ return task;
+}
+
+/* internal subscription mgmt functions */
+static inline struct event_hdl_sub_type _event_hdl_getsub(struct event_hdl_sub *cur_sub)
+{
+ return cur_sub->sub;
+}
+
+static inline struct event_hdl_sub_type _event_hdl_getsub_async(struct event_hdl_sub *cur_sub)
+{
+ struct mt_list lock;
+ struct event_hdl_sub_type type = EVENT_HDL_SUB_NONE;
+
+ lock = MT_LIST_LOCK_ELT(&cur_sub->mt_list);
+ if (lock.next != &cur_sub->mt_list)
+ type = _event_hdl_getsub(cur_sub);
+ // else already removed
+ MT_LIST_UNLOCK_ELT(&cur_sub->mt_list, lock);
+ return type;
+}
+
+static inline int _event_hdl_resub(struct event_hdl_sub *cur_sub, struct event_hdl_sub_type type)
+{
+ if (!event_hdl_sub_family_equal(cur_sub->sub, type))
+ return 0; /* family types differ, do nothing */
+ cur_sub->sub.subtype = type.subtype; /* new subtype assignment */
+ return 1;
+}
+
+static inline int _event_hdl_resub_async(struct event_hdl_sub *cur_sub, struct event_hdl_sub_type type)
+{
+ int status = 0;
+ struct mt_list lock;
+
+ lock = MT_LIST_LOCK_ELT(&cur_sub->mt_list);
+ if (lock.next != &cur_sub->mt_list)
+ status = _event_hdl_resub(cur_sub, type);
+ // else already removed
+ MT_LIST_UNLOCK_ELT(&cur_sub->mt_list, lock);
+ return status;
+}
+
+static inline void _event_hdl_unsubscribe(struct event_hdl_sub *del_sub)
+{
+ struct mt_list lock;
+
+ if (del_sub->hdl.async) {
+ /* ASYNC SUB MODE */
+ /* push EVENT_HDL_SUB_END (to notify the task that the subscription is dead) */
+
+ /* push END EVENT in busy state so we can safely wakeup
+ * the task before releasing it.
+ * Not doing that would expose us to a race where the task could've already
+ * consumed the END event before the wakeup, and some tasks
+ * kill themselves (ie: normal async mode) when they receive such event
+ */
+ HA_ATOMIC_INC(&del_sub->hdl.async_equeue->size);
+ lock = MT_LIST_APPEND_LOCKED(&del_sub->hdl.async_equeue->head, &del_sub->async_end->mt_list);
+
+ /* wake up the task */
+ event_hdl_task_wakeup(del_sub->hdl.async_task);
+
+ /* unlock END EVENT (we're done, the task is now free to consume it) */
+ MT_LIST_UNLOCK_ELT(&del_sub->async_end->mt_list, lock);
+
+ /* we don't free sub here
+ * freeing will be performed by async task so it can safely rely
+ * on the pointer until it notices it
+ */
+ } else {
+ /* SYNC SUB MODE */
+
+ /* we can directly free the subscription:
+ * no other thread can access it since we successfully
+ * removed it from the list
+ */
+ event_hdl_drop(del_sub);
+ }
+}
+
+static inline void _event_hdl_unsubscribe_async(struct event_hdl_sub *del_sub)
+{
+ if (!MT_LIST_DELETE(&del_sub->mt_list))
+ return; /* already removed (but may be pending in e_queues) */
+ _event_hdl_unsubscribe(del_sub);
+}
+
+/* sub_mgmt function pointers (for handlers) */
+static struct event_hdl_sub_type event_hdl_getsub_sync(const struct event_hdl_sub_mgmt *mgmt)
+{
+ if (!mgmt)
+ return EVENT_HDL_SUB_NONE;
+
+ if (!mgmt->this)
+ return EVENT_HDL_SUB_NONE; /* already removed from sync ctx */
+ return _event_hdl_getsub(mgmt->this);
+}
+
+static struct event_hdl_sub_type event_hdl_getsub_async(const struct event_hdl_sub_mgmt *mgmt)
+{
+ if (!mgmt)
+ return EVENT_HDL_SUB_NONE;
+
+ return _event_hdl_getsub_async(mgmt->this);
+}
+
+static int event_hdl_resub_sync(const struct event_hdl_sub_mgmt *mgmt, struct event_hdl_sub_type type)
+{
+ if (!mgmt)
+ return 0;
+
+ if (!mgmt->this)
+ return 0; /* already removed from sync ctx */
+ return _event_hdl_resub(mgmt->this, type);
+}
+
+static int event_hdl_resub_async(const struct event_hdl_sub_mgmt *mgmt, struct event_hdl_sub_type type)
+{
+ if (!mgmt)
+ return 0;
+
+ return _event_hdl_resub_async(mgmt->this, type);
+}
+
+static void event_hdl_unsubscribe_sync(const struct event_hdl_sub_mgmt *mgmt)
+{
+ if (!mgmt)
+ return;
+
+ if (!mgmt->this)
+ return; /* already removed from sync ctx */
+
+ /* assuming that publish sync code will notice that mgmt->this is NULL
+ * and will perform the list removal using MT_LIST_DELETE_SAFE and
+ * _event_hdl_unsubscribe()
+ * while still owning the lock
+ */
+ ((struct event_hdl_sub_mgmt *)mgmt)->this = NULL;
+}
+
+static void event_hdl_unsubscribe_async(const struct event_hdl_sub_mgmt *mgmt)
+{
+ if (!mgmt)
+ return;
+
+ _event_hdl_unsubscribe_async(mgmt->this);
+}
+
+#define EVENT_HDL_SUB_MGMT_ASYNC(_sub) (struct event_hdl_sub_mgmt){ .this = _sub, \
+ .getsub = event_hdl_getsub_async, \
+ .resub = event_hdl_resub_async, \
+ .unsub = event_hdl_unsubscribe_async}
+#define EVENT_HDL_SUB_MGMT_SYNC(_sub) (struct event_hdl_sub_mgmt){ .this = _sub, \
+ .getsub = event_hdl_getsub_sync, \
+ .resub = event_hdl_resub_sync, \
+ .unsub = event_hdl_unsubscribe_sync}
+
+struct event_hdl_sub *event_hdl_subscribe_ptr(event_hdl_sub_list *sub_list,
+ struct event_hdl_sub_type e_type, struct event_hdl hdl)
+{
+ struct event_hdl_sub *new_sub = NULL;
+ struct mt_list *elt1, elt2;
+ struct event_hdl_async_task_default_ctx *task_ctx = NULL;
+ struct mt_list lock;
+
+ if (!sub_list)
+ sub_list = &global_event_hdl_sub_list; /* fall back to global list */
+
+ /* hdl API consistency check */
+ /*FIXME: do we need to ensure that if private is set, private_free should be set as well? */
+ BUG_ON((!hdl.async && !hdl.sync_ptr) ||
+ (hdl.async == EVENT_HDL_ASYNC_MODE_NORMAL && !hdl.async_ptr) ||
+ (hdl.async == EVENT_HDL_ASYNC_MODE_ADVANCED &&
+ (!hdl.async_equeue || !hdl.async_task)));
+
+ new_sub = pool_alloc(pool_head_sub);
+ if (new_sub == NULL) {
+ goto memory_error;
+ }
+
+ /* assignments */
+ new_sub->sub.family = e_type.family;
+ new_sub->sub.subtype = e_type.subtype;
+ new_sub->flags = 0;
+ new_sub->hdl = hdl;
+
+ if (hdl.async) {
+ /* async END event pre-allocation */
+ new_sub->async_end = pool_alloc(pool_head_sub_event);
+ if (!new_sub->async_end) {
+ /* memory error */
+ goto memory_error;
+ }
+ if (hdl.async == EVENT_HDL_ASYNC_MODE_NORMAL) {
+ /* normal mode: no task provided, we must initialize it */
+
+ /* initialize task context */
+ task_ctx = pool_alloc(pool_head_sub_taskctx);
+
+ if (!task_ctx) {
+ /* memory error */
+ goto memory_error;
+ }
+ event_hdl_async_equeue_init(&task_ctx->e_queue);
+ task_ctx->func = new_sub->hdl.async_ptr;
+
+ new_sub->hdl.async_equeue = &task_ctx->e_queue;
+ new_sub->hdl.async_task = tasklet_new();
+
+ if (!new_sub->hdl.async_task) {
+ /* memory error */
+ goto memory_error;
+ }
+ new_sub->hdl.async_task->context = task_ctx;
+ new_sub->hdl.async_task->process = event_hdl_async_task_default;
+ }
+ /* initialize END event (used to notify about subscription ending)
+ * used by both normal and advanced mode:
+ * - to safely terminate the task in normal mode
+ * - to safely free subscription and
+ * keep track of active subscriptions in advanced mode
+ */
+ new_sub->async_end->type = EVENT_HDL_SUB_END;
+ new_sub->async_end->sub_mgmt = EVENT_HDL_SUB_MGMT_ASYNC(new_sub);
+ new_sub->async_end->private = new_sub->hdl.private;
+ new_sub->async_end->_data = NULL;
+ MT_LIST_INIT(&new_sub->async_end->mt_list);
+ }
+ /* set refcount to 2:
+ * 1 for handler (because handler can manage the subscription itself)
+ * 1 for caller (will be dropped automatically if caller use the non-ptr version)
+ */
+ new_sub->refcount = 2;
+
+ /* ready for registration */
+ MT_LIST_INIT(&new_sub->mt_list);
+
+ lock = MT_LIST_LOCK_ELT(&sub_list->known);
+
+ /* check if such identified hdl is not already registered */
+ if (hdl.id) {
+ struct event_hdl_sub *cur_sub;
+ uint8_t found = 0;
+
+ mt_list_for_each_entry_safe(cur_sub, &sub_list->head, mt_list, elt1, elt2) {
+ if (hdl.id == cur_sub->hdl.id) {
+ /* we found matching registered hdl */
+ found = 1;
+ break;
+ }
+ }
+ if (found) {
+ /* error already registered */
+ MT_LIST_UNLOCK_ELT(&sub_list->known, lock);
+ event_hdl_report_hdl_state(ha_alert, &hdl, "SUB", "could not subscribe: subscription with this id already exists");
+ goto cleanup;
+ }
+ }
+
+ if (lock.next == &sub_list->known) {
+ /* this is an expected corner case on de-init path, a subscribe attempt
+ * was made but the subscription list is already destroyed, we pretend
+ * it is a memory/IO error since it should not be long before haproxy
+ * enters the deinit() function anyway
+ */
+ MT_LIST_UNLOCK_ELT(&sub_list->known, lock);
+ goto cleanup;
+ }
+
+ /* Append in list (global or user specified list).
+ * For now, append when sync mode, and insert when async mode
+ * so that async handlers are executed first
+ */
+ if (hdl.async) {
+ /* Prevent the task from being aborted on soft-stop: let's wait
+ * until the END event is acknowledged by the task.
+ * (decrease is performed in event_hdl_async_free_event())
+ *
+ * If we don't do this, event_hdl API will leak and we won't give
+ * a chance to the event-handling task to perform cleanup
+ */
+ HA_ATOMIC_INC(&jobs);
+ /* async mode, insert at the beginning of the list */
+ MT_LIST_INSERT(&sub_list->head, &new_sub->mt_list);
+ } else {
+ /* sync mode, append at the end of the list */
+ MT_LIST_APPEND(&sub_list->head, &new_sub->mt_list);
+ }
+
+ MT_LIST_UNLOCK_ELT(&sub_list->known, lock);
+
+ return new_sub;
+
+ cleanup:
+ if (new_sub) {
+ if (hdl.async == EVENT_HDL_ASYNC_MODE_NORMAL) {
+ tasklet_free(new_sub->hdl.async_task);
+ pool_free(pool_head_sub_taskctx, task_ctx);
+ }
+ if (hdl.async)
+ pool_free(pool_head_sub_event, new_sub->async_end);
+ pool_free(pool_head_sub, new_sub);
+ }
+
+ return NULL;
+
+ memory_error:
+ event_hdl_report_hdl_state(ha_warning, &hdl, "SUB", "could not register subscription due to memory error");
+ goto cleanup;
+}
+
+void event_hdl_take(struct event_hdl_sub *sub)
+{
+ HA_ATOMIC_INC(&sub->refcount);
+}
+
+void event_hdl_drop(struct event_hdl_sub *sub)
+{
+ if (HA_ATOMIC_SUB_FETCH(&sub->refcount, 1) != 0)
+ return;
+
+ /* we were the last one holding a reference to event sub - free required */
+ if (sub->hdl.private_free) {
+ /* free private data if specified upon registration */
+ sub->hdl.private_free(sub->hdl.private);
+ }
+ pool_free(pool_head_sub, sub);
+}
+
+int event_hdl_resubscribe(struct event_hdl_sub *cur_sub, struct event_hdl_sub_type type)
+{
+ return _event_hdl_resub_async(cur_sub, type);
+}
+
+void _event_hdl_pause(struct event_hdl_sub *cur_sub)
+{
+ cur_sub->flags |= EHDL_SUB_F_PAUSED;
+}
+
+void event_hdl_pause(struct event_hdl_sub *cur_sub)
+{
+ struct mt_list lock;
+
+ lock = MT_LIST_LOCK_ELT(&cur_sub->mt_list);
+ if (lock.next != &cur_sub->mt_list)
+ _event_hdl_pause(cur_sub);
+ // else already removed
+ MT_LIST_UNLOCK_ELT(&cur_sub->mt_list, lock);
+}
+
+void _event_hdl_resume(struct event_hdl_sub *cur_sub)
+{
+ cur_sub->flags &= ~EHDL_SUB_F_PAUSED;
+}
+
+void event_hdl_resume(struct event_hdl_sub *cur_sub)
+{
+ struct mt_list lock;
+
+ lock = MT_LIST_LOCK_ELT(&cur_sub->mt_list);
+ if (lock.next != &cur_sub->mt_list)
+ _event_hdl_resume(cur_sub);
+ // else already removed
+ MT_LIST_UNLOCK_ELT(&cur_sub->mt_list, lock);
+}
+
+void event_hdl_unsubscribe(struct event_hdl_sub *del_sub)
+{
+ _event_hdl_unsubscribe_async(del_sub);
+ /* drop refcount, assuming caller no longer use ptr */
+ event_hdl_drop(del_sub);
+}
+
+int event_hdl_subscribe(event_hdl_sub_list *sub_list, struct event_hdl_sub_type e_type, struct event_hdl hdl)
+{
+ struct event_hdl_sub *sub;
+
+ sub = event_hdl_subscribe_ptr(sub_list, e_type, hdl);
+ if (sub) {
+ /* drop refcount because the user is not willing to hold a reference */
+ event_hdl_drop(sub);
+ return 1;
+ }
+ return 0;
+}
+
+/* Subscription external lookup functions
+ */
+int event_hdl_lookup_unsubscribe(event_hdl_sub_list *sub_list,
+ uint64_t lookup_id)
+{
+ struct event_hdl_sub *del_sub = NULL;
+ struct mt_list *elt1, elt2;
+ int found = 0;
+
+ if (!sub_list)
+ sub_list = &global_event_hdl_sub_list; /* fall back to global list */
+
+ mt_list_for_each_entry_safe(del_sub, &sub_list->head, mt_list, elt1, elt2) {
+ if (lookup_id == del_sub->hdl.id) {
+ /* we found matching registered hdl */
+ MT_LIST_DELETE_SAFE(elt1);
+ _event_hdl_unsubscribe(del_sub);
+ found = 1;
+ break; /* id is unique, stop searching */
+ }
+ }
+ return found;
+}
+
+int event_hdl_lookup_resubscribe(event_hdl_sub_list *sub_list,
+ uint64_t lookup_id, struct event_hdl_sub_type type)
+{
+ struct event_hdl_sub *cur_sub = NULL;
+ struct mt_list *elt1, elt2;
+ int status = 0;
+
+ if (!sub_list)
+ sub_list = &global_event_hdl_sub_list; /* fall back to global list */
+
+ mt_list_for_each_entry_safe(cur_sub, &sub_list->head, mt_list, elt1, elt2) {
+ if (lookup_id == cur_sub->hdl.id) {
+ /* we found matching registered hdl */
+ status = _event_hdl_resub(cur_sub, type);
+ break; /* id is unique, stop searching */
+ }
+ }
+ return status;
+}
+
+int event_hdl_lookup_pause(event_hdl_sub_list *sub_list,
+ uint64_t lookup_id)
+{
+ struct event_hdl_sub *cur_sub = NULL;
+ struct mt_list *elt1, elt2;
+ int found = 0;
+
+ if (!sub_list)
+ sub_list = &global_event_hdl_sub_list; /* fall back to global list */
+
+ mt_list_for_each_entry_safe(cur_sub, &sub_list->head, mt_list, elt1, elt2) {
+ if (lookup_id == cur_sub->hdl.id) {
+ /* we found matching registered hdl */
+ _event_hdl_pause(cur_sub);
+ found = 1;
+ break; /* id is unique, stop searching */
+ }
+ }
+ return found;
+}
+
+int event_hdl_lookup_resume(event_hdl_sub_list *sub_list,
+ uint64_t lookup_id)
+{
+ struct event_hdl_sub *cur_sub = NULL;
+ struct mt_list *elt1, elt2;
+ int found = 0;
+
+ if (!sub_list)
+ sub_list = &global_event_hdl_sub_list; /* fall back to global list */
+
+ mt_list_for_each_entry_safe(cur_sub, &sub_list->head, mt_list, elt1, elt2) {
+ if (lookup_id == cur_sub->hdl.id) {
+ /* we found matching registered hdl */
+ _event_hdl_resume(cur_sub);
+ found = 1;
+ break; /* id is unique, stop searching */
+ }
+ }
+ return found;
+}
+
+struct event_hdl_sub *event_hdl_lookup_take(event_hdl_sub_list *sub_list,
+ uint64_t lookup_id)
+{
+ struct event_hdl_sub *cur_sub = NULL;
+ struct mt_list *elt1, elt2;
+ uint8_t found = 0;
+
+ if (!sub_list)
+ sub_list = &global_event_hdl_sub_list; /* fall back to global list */
+
+ mt_list_for_each_entry_safe(cur_sub, &sub_list->head, mt_list, elt1, elt2) {
+ if (lookup_id == cur_sub->hdl.id) {
+ /* we found matching registered hdl */
+ event_hdl_take(cur_sub);
+ found = 1;
+ break; /* id is unique, stop searching */
+ }
+ }
+ if (found)
+ return cur_sub;
+ return NULL;
+}
+
+/* event publishing functions
+ */
+static int _event_hdl_publish(event_hdl_sub_list *sub_list, struct event_hdl_sub_type e_type,
+ const struct event_hdl_cb_data *data)
+{
+ struct event_hdl_sub *cur_sub;
+ struct mt_list *elt1, elt2;
+ struct event_hdl_async_event_data *async_data = NULL; /* reuse async data for multiple async hdls */
+ int error = 0;
+
+ mt_list_for_each_entry_safe(cur_sub, &sub_list->head, mt_list, elt1, elt2) {
+ /* notify each function that has subscribed to sub_family.type, unless paused */
+ if ((cur_sub->sub.family == e_type.family) &&
+ ((cur_sub->sub.subtype & e_type.subtype) == e_type.subtype) &&
+ !(cur_sub->flags & EHDL_SUB_F_PAUSED)) {
+ /* hdl should be notified */
+ if (!cur_sub->hdl.async) {
+ /* sync mode: simply call cb pointer
+ * it is up to the callee to schedule a task if needed or
+ * take specific precautions in order to return as fast as possible
+ * and not use locks that are already held by the caller
+ */
+ struct event_hdl_cb cb;
+ struct event_hdl_sub_mgmt sub_mgmt;
+
+ sub_mgmt = EVENT_HDL_SUB_MGMT_SYNC(cur_sub);
+ cb.e_type = e_type;
+ if (data)
+ cb.e_data = data->_ptr;
+ else
+ cb.e_data = NULL;
+ cb.sub_mgmt = &sub_mgmt;
+ cb._sync = 1;
+
+ /* call user function */
+ cur_sub->hdl.sync_ptr(&cb, cur_sub->hdl.private);
+
+ if (!sub_mgmt.this) {
+ /* user has performed hdl unsub
+ * we must remove it from the list
+ */
+ MT_LIST_DELETE_SAFE(elt1);
+ /* then free it */
+ _event_hdl_unsubscribe(cur_sub);
+ }
+ } else {
+ /* async mode: here we need to prepare event data
+ * and push it to the event_queue of the task(s)
+ * responsible for consuming the events of current
+ * subscription.
+ * Once the event is pushed, we wake up the associated task.
+ * This feature depends on <haproxy/task> that also
+ * depends on <haproxy/pool>:
+ * If STG_PREPARE+STG_POOL is not performed prior to publishing to
+ * async handler, program may crash.
+ * Hopefully, STG_PREPARE+STG_POOL should be done early in
+ * HAProxy startup sequence.
+ */
+ struct event_hdl_async_event *new_event;
+
+ new_event = pool_alloc(pool_head_sub_event);
+ if (!new_event) {
+ error = 1;
+ break; /* stop on error */
+ }
+ new_event->type = e_type;
+ new_event->private = cur_sub->hdl.private;
+ new_event->when = date;
+ new_event->sub_mgmt = EVENT_HDL_SUB_MGMT_ASYNC(cur_sub);
+ if (data) {
+ /* if this fails, please adjust EVENT_HDL_ASYNC_EVENT_DATA in
+ * event_hdl-t.h file or consider providing dynamic struct members
+ * to reduce overall struct size
+ */
+ BUG_ON(data->_size > sizeof(async_data->data));
+ if (!async_data) {
+ /* first async hdl reached - preparing async_data cache */
+ async_data = pool_alloc(pool_head_sub_event_data);
+ if (!async_data) {
+ error = 1;
+ pool_free(pool_head_sub_event, new_event);
+ break; /* stop on error */
+ }
+
+ /* async data assignment */
+ memcpy(async_data->data, data->_ptr, data->_size);
+ async_data->mfree = data->_mfree;
+ /* Initialize refcount, we start at 1 to prevent async
+ * data from being freed by an async handler while we
+ * still use it. We will drop the reference when the
+ * publish is over.
+ *
+ * (first use, atomic operation not required)
+ */
+ async_data->refcount = 1;
+ }
+ new_event->_data = async_data;
+ new_event->data = async_data->data;
+ /* increment refcount because multiple hdls could
+ * use the same async_data
+ */
+ HA_ATOMIC_INC(&async_data->refcount);
+ } else
+ new_event->data = NULL;
+
+ /* appending new event to event hdl queue */
+ MT_LIST_INIT(&new_event->mt_list);
+ HA_ATOMIC_INC(&cur_sub->hdl.async_equeue->size);
+ MT_LIST_APPEND(&cur_sub->hdl.async_equeue->head, &new_event->mt_list);
+
+ /* wake up the task */
+ event_hdl_task_wakeup(cur_sub->hdl.async_task);
+ } /* end async mode */
+ } /* end hdl should be notified */
+ } /* end mt_list */
+ if (async_data) {
+ /* we finished publishing, drop the reference on async data */
+ _event_hdl_async_data_drop(async_data);
+ } else {
+ /* no async subscribers, we are responsible for calling the data
+ * member freeing function if it was provided
+ */
+ if (data && data->_mfree)
+ data->_mfree(data->_ptr);
+ }
+ if (error) {
+ event_hdl_report_hdl_state(ha_warning, &cur_sub->hdl, "PUBLISH", "memory error");
+ return 0;
+ }
+ return 1;
+}
+
+/* Publish function should not be used from high calling rate or time sensitive
+ * places for now, because list lookup based on e_type is not optimized at
+ * all!
+ * Returns 1 in case of SUCCESS:
+ * Subscribed handlers were notified successfully
+ * Returns 0 in case of FAILURE:
+ * FAILURE means memory error while handling the very first async handler from
+ * the subscription list.
+ * As async handlers are executed first within the list, when such failure occurs
+ * you can safely assume that no events were published for the current call
+ */
+int event_hdl_publish(event_hdl_sub_list *sub_list,
+ struct event_hdl_sub_type e_type, const struct event_hdl_cb_data *data)
+{
+ if (!e_type.family) {
+ /* do nothing, these types are reserved for internal use only
+ * (ie: unregistering) */
+ return 0;
+ }
+ if (sub_list) {
+ /* if sublist is provided, first publish event to list subscribers */
+ return _event_hdl_publish(sub_list, e_type, data);
+ } else {
+ /* publish to global list */
+ return _event_hdl_publish(&global_event_hdl_sub_list, e_type, data);
+ }
+}
+
+void event_hdl_sub_list_init(event_hdl_sub_list *sub_list)
+{
+ BUG_ON(!sub_list); /* unexpected, global sublist is managed internally */
+ MT_LIST_INIT(&sub_list->head);
+ MT_LIST_APPEND(&known_event_hdl_sub_list, &sub_list->known);
+}
+
+/* internal function, assumes that sub_list ptr is always valid */
+static void _event_hdl_sub_list_destroy(event_hdl_sub_list *sub_list)
+{
+ struct event_hdl_sub *cur_sub;
+ struct mt_list *elt1, elt2;
+
+ mt_list_for_each_entry_safe(cur_sub, &sub_list->head, mt_list, elt1, elt2) {
+ /* remove cur elem from list */
+ MT_LIST_DELETE_SAFE(elt1);
+ /* then free it */
+ _event_hdl_unsubscribe(cur_sub);
+ }
+}
+
+/* when a subscription list is no longer used, call this
+ * to do the cleanup and make sure all related subscriptions are
+ * safely ended according to their types
+ */
+void event_hdl_sub_list_destroy(event_hdl_sub_list *sub_list)
+{
+ BUG_ON(!sub_list); /* unexpected, global sublist is managed internally */
+ if (!MT_LIST_DELETE(&sub_list->known))
+ return; /* already destroyed */
+ _event_hdl_sub_list_destroy(sub_list);
+}
+
+/* config parser for global "tune.events.max-events-at-once" */
+static int event_hdl_parse_max_events_at_once(char **args, int section_type, struct proxy *curpx,
+ const struct proxy *defpx, const char *file, int line,
+ char **err)
+{
+ int arg = -1;
+
+ if (too_many_args(1, args, err, NULL))
+ return -1;
+
+ if (*(args[1]) != 0)
+ arg = atoi(args[1]);
+
+ if (arg < 1 || arg > 10000) {
+ memprintf(err, "'%s' expects an integer argument between 1 and 10000.", args[0]);
+ return -1;
+ }
+
+ event_hdl_tune.max_events_at_once = arg;
+ return 0;
+}
+
+/* config keyword parsers */
+static struct cfg_kw_list cfg_kws = {ILH, {
+ { CFG_GLOBAL, "tune.events.max-events-at-once", event_hdl_parse_max_events_at_once },
+ { 0, NULL, NULL }
+}};
+
+INITCALL1(STG_REGISTER, cfg_register_keywords, &cfg_kws);
+
+INITCALL0(STG_INIT, event_hdl_init);
diff --git a/src/extcheck.c b/src/extcheck.c
new file mode 100644
index 0000000..c667b16
--- /dev/null
+++ b/src/extcheck.c
@@ -0,0 +1,694 @@
+/*
+ * External health-checks functions.
+ *
+ * Copyright 2000-2009,2020 Willy Tarreau <w@1wt.eu>
+ * Copyright 2014 Horms Solutions Ltd, Simon Horman <horms@verge.net.au>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <sys/resource.h>
+#include <sys/socket.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <assert.h>
+#include <ctype.h>
+#include <errno.h>
+#include <signal.h>
+#include <stdarg.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <time.h>
+#include <unistd.h>
+
+#include <haproxy/api.h>
+#include <haproxy/cfgparse.h>
+#include <haproxy/check.h>
+#include <haproxy/errors.h>
+#include <haproxy/global.h>
+#include <haproxy/list.h>
+#include <haproxy/proxy.h>
+#include <haproxy/server.h>
+#include <haproxy/signal.h>
+#include <haproxy/stream-t.h>
+#include <haproxy/task.h>
+#include <haproxy/thread.h>
+#include <haproxy/tools.h>
+
+
+static struct list pid_list = LIST_HEAD_INIT(pid_list);
+static struct pool_head *pool_head_pid_list __read_mostly;
+__decl_spinlock(pid_list_lock);
+
+struct extcheck_env {
+ char *name; /* environment variable name */
+ int vmaxlen; /* value maximum length, used to determine the required memory allocation */
+};
+
+/* environment variables memory requirement for different types of data */
+#define EXTCHK_SIZE_EVAL_INIT 0 /* size determined during the init phase,
+ * such environment variables are not updatable. */
+#define EXTCHK_SIZE_ULONG 20 /* max string length for an unsigned long value */
+#define EXTCHK_SIZE_UINT 11 /* max string length for an unsigned int value */
+#define EXTCHK_SIZE_ADDR 256 /* max string length for an IPv4/IPv6/UNIX address */
+
+/* external checks environment variables */
+enum {
+ EXTCHK_PATH = 0,
+
+ /* Proxy specific environment variables */
+ EXTCHK_HAPROXY_PROXY_NAME, /* the backend name */
+ EXTCHK_HAPROXY_PROXY_ID, /* the backend id */
+ EXTCHK_HAPROXY_PROXY_ADDR, /* the first bind address if available (or empty) */
+ EXTCHK_HAPROXY_PROXY_PORT, /* the first bind port if available (or empty) */
+
+ /* Server specific environment variables */
+ EXTCHK_HAPROXY_SERVER_NAME, /* the server name */
+ EXTCHK_HAPROXY_SERVER_ID, /* the server id */
+ EXTCHK_HAPROXY_SERVER_ADDR, /* the server address */
+ EXTCHK_HAPROXY_SERVER_PORT, /* the server port if available (or empty) */
+ EXTCHK_HAPROXY_SERVER_MAXCONN, /* the server max connections */
+ EXTCHK_HAPROXY_SERVER_CURCONN, /* the current number of connections on the server */
+ EXTCHK_HAPROXY_SERVER_SSL, /* "1" if the server supports SSL, otherwise zero */
+ EXTCHK_HAPROXY_SERVER_PROTO, /* the server's configured proto, if any */
+
+ EXTCHK_SIZE
+};
+
+const struct extcheck_env extcheck_envs[EXTCHK_SIZE] = {
+ [EXTCHK_PATH] = { "PATH", EXTCHK_SIZE_EVAL_INIT },
+ [EXTCHK_HAPROXY_PROXY_NAME] = { "HAPROXY_PROXY_NAME", EXTCHK_SIZE_EVAL_INIT },
+ [EXTCHK_HAPROXY_PROXY_ID] = { "HAPROXY_PROXY_ID", EXTCHK_SIZE_EVAL_INIT },
+ [EXTCHK_HAPROXY_PROXY_ADDR] = { "HAPROXY_PROXY_ADDR", EXTCHK_SIZE_EVAL_INIT },
+ [EXTCHK_HAPROXY_PROXY_PORT] = { "HAPROXY_PROXY_PORT", EXTCHK_SIZE_EVAL_INIT },
+ [EXTCHK_HAPROXY_SERVER_NAME] = { "HAPROXY_SERVER_NAME", EXTCHK_SIZE_EVAL_INIT },
+ [EXTCHK_HAPROXY_SERVER_ID] = { "HAPROXY_SERVER_ID", EXTCHK_SIZE_EVAL_INIT },
+ [EXTCHK_HAPROXY_SERVER_ADDR] = { "HAPROXY_SERVER_ADDR", EXTCHK_SIZE_ADDR },
+ [EXTCHK_HAPROXY_SERVER_PORT] = { "HAPROXY_SERVER_PORT", EXTCHK_SIZE_UINT },
+ [EXTCHK_HAPROXY_SERVER_MAXCONN] = { "HAPROXY_SERVER_MAXCONN", EXTCHK_SIZE_EVAL_INIT },
+ [EXTCHK_HAPROXY_SERVER_CURCONN] = { "HAPROXY_SERVER_CURCONN", EXTCHK_SIZE_ULONG },
+ [EXTCHK_HAPROXY_SERVER_SSL] = { "HAPROXY_SERVER_SSL", EXTCHK_SIZE_UINT },
+ [EXTCHK_HAPROXY_SERVER_PROTO] = { "HAPROXY_SERVER_PROTO", EXTCHK_SIZE_EVAL_INIT },
+};
+
+void block_sigchld(void)
+{
+ sigset_t set;
+ sigemptyset(&set);
+ sigaddset(&set, SIGCHLD);
+ assert(ha_sigmask(SIG_BLOCK, &set, NULL) == 0);
+}
+
+void unblock_sigchld(void)
+{
+ sigset_t set;
+ sigemptyset(&set);
+ sigaddset(&set, SIGCHLD);
+ assert(ha_sigmask(SIG_UNBLOCK, &set, NULL) == 0);
+}
+
+static struct pid_list *pid_list_add(pid_t pid, struct task *t)
+{
+ struct pid_list *elem;
+ struct check *check = t->context;
+
+ elem = pool_alloc(pool_head_pid_list);
+ if (!elem)
+ return NULL;
+ elem->pid = pid;
+ elem->t = t;
+ elem->exited = 0;
+ check->curpid = elem;
+ LIST_INIT(&elem->list);
+
+ HA_SPIN_LOCK(PID_LIST_LOCK, &pid_list_lock);
+ LIST_INSERT(&pid_list, &elem->list);
+ HA_SPIN_UNLOCK(PID_LIST_LOCK, &pid_list_lock);
+
+ return elem;
+}
+
+static void pid_list_del(struct pid_list *elem)
+{
+ struct check *check;
+
+ if (!elem)
+ return;
+
+ HA_SPIN_LOCK(PID_LIST_LOCK, &pid_list_lock);
+ LIST_DELETE(&elem->list);
+ HA_SPIN_UNLOCK(PID_LIST_LOCK, &pid_list_lock);
+
+ if (!elem->exited)
+ kill(elem->pid, SIGTERM);
+
+ check = elem->t->context;
+ check->curpid = NULL;
+ pool_free(pool_head_pid_list, elem);
+}
+
+/* Called from inside SIGCHLD handler, SIGCHLD is blocked */
+static void pid_list_expire(pid_t pid, int status)
+{
+ struct pid_list *elem;
+
+ HA_SPIN_LOCK(PID_LIST_LOCK, &pid_list_lock);
+ list_for_each_entry(elem, &pid_list, list) {
+ if (elem->pid == pid) {
+ elem->t->expire = now_ms;
+ elem->status = status;
+ elem->exited = 1;
+ task_wakeup(elem->t, TASK_WOKEN_IO);
+ break;
+ }
+ }
+ HA_SPIN_UNLOCK(PID_LIST_LOCK, &pid_list_lock);
+}
+
+static void sigchld_handler(struct sig_handler *sh)
+{
+ pid_t pid;
+ int status;
+
+ while ((pid = waitpid(0, &status, WNOHANG)) > 0)
+ pid_list_expire(pid, status);
+}
+
+int init_pid_list(void)
+{
+ if (pool_head_pid_list != NULL)
+ /* Nothing to do */
+ return 0;
+
+ if (!signal_register_fct(SIGCHLD, sigchld_handler, SIGCHLD)) {
+ ha_alert("Failed to set signal handler for external health checks: %s. Aborting.\n",
+ strerror(errno));
+ return 1;
+ }
+
+ pool_head_pid_list = create_pool("pid_list", sizeof(struct pid_list), MEM_F_SHARED);
+ if (pool_head_pid_list == NULL) {
+ ha_alert("Failed to allocate memory pool for external health checks: %s. Aborting.\n",
+ strerror(errno));
+ return 1;
+ }
+
+ return 0;
+}
+
+/* helper macro to set an environment variable and jump to a specific label on failure. */
+#define EXTCHK_SETENV(check, envidx, value, fail) { if (extchk_setenv(check, envidx, value)) goto fail; }
+
+/*
+ * helper function to allocate enough memory to store an environment variable.
+ * It will also check that the environment variable is updatable, and silently
+ * fail if not.
+ */
+static int extchk_setenv(struct check *check, int idx, const char *value)
+{
+ int len, ret;
+ char *envname;
+ int vmaxlen;
+
+ if (idx < 0 || idx >= EXTCHK_SIZE) {
+ ha_alert("Illegal environment variable index %d. Aborting.\n", idx);
+ return 1;
+ }
+
+ envname = extcheck_envs[idx].name;
+ vmaxlen = extcheck_envs[idx].vmaxlen;
+
+ /* Check if the environment variable is already set, and silently reject
+ * the update if this one is not updatable. */
+ if ((vmaxlen == EXTCHK_SIZE_EVAL_INIT) && (check->envp[idx]))
+ return 0;
+
+ /* Instead of sending NOT_USED, sending an empty value is preferable */
+ if (strcmp(value, "NOT_USED") == 0) {
+ value = "";
+ }
+
+ len = strlen(envname) + 1;
+ if (vmaxlen == EXTCHK_SIZE_EVAL_INIT)
+ len += strlen(value);
+ else
+ len += vmaxlen;
+
+ if (!check->envp[idx])
+ check->envp[idx] = malloc(len + 1);
+
+ if (!check->envp[idx]) {
+ ha_alert("Failed to allocate memory for the environment variable '%s'. Aborting.\n", envname);
+ return 1;
+ }
+ ret = snprintf(check->envp[idx], len + 1, "%s=%s", envname, value);
+ if (ret < 0) {
+ ha_alert("Failed to store the environment variable '%s'. Reason : %s. Aborting.\n", envname, strerror(errno));
+ return 1;
+ }
+ else if (ret > len) {
+ ha_alert("Environment variable '%s' was truncated. Aborting.\n", envname);
+ return 1;
+ }
+ return 0;
+}
+
+int prepare_external_check(struct check *check)
+{
+ struct server *s = check->server;
+ struct proxy *px = s->proxy;
+ struct listener *listener = NULL, *l;
+ int i;
+ const char *path = px->check_path ? px->check_path : DEF_CHECK_PATH;
+ char buf[256];
+ const char *svmode = NULL;
+
+ list_for_each_entry(l, &px->conf.listeners, by_fe)
+ /* Use the first INET, INET6 or UNIX listener */
+ if (l->rx.addr.ss_family == AF_INET ||
+ l->rx.addr.ss_family == AF_INET6 ||
+ l->rx.addr.ss_family == AF_UNIX) {
+ listener = l;
+ break;
+ }
+
+ check->curpid = NULL;
+ check->envp = calloc((EXTCHK_SIZE + 1), sizeof(*check->envp));
+ if (!check->envp) {
+ ha_alert("Failed to allocate memory for environment variables. Aborting\n");
+ goto err;
+ }
+
+ check->argv = calloc(6, sizeof(*check->argv));
+ if (!check->argv) {
+ ha_alert("Starting [%s:%s] check: out of memory.\n", px->id, s->id);
+ goto err;
+ }
+
+ check->argv[0] = px->check_command;
+
+ if (!listener) {
+ check->argv[1] = strdup("NOT_USED");
+ check->argv[2] = strdup("NOT_USED");
+ }
+ else if (listener->rx.addr.ss_family == AF_INET ||
+ listener->rx.addr.ss_family == AF_INET6) {
+ addr_to_str(&listener->rx.addr, buf, sizeof(buf));
+ check->argv[1] = strdup(buf);
+ port_to_str(&listener->rx.addr, buf, sizeof(buf));
+ check->argv[2] = strdup(buf);
+ }
+ else if (listener->rx.addr.ss_family == AF_UNIX) {
+ const struct sockaddr_un *un;
+
+ un = (struct sockaddr_un *)&listener->rx.addr;
+ check->argv[1] = strdup(un->sun_path);
+ check->argv[2] = strdup("NOT_USED");
+ }
+ else {
+ ha_alert("Starting [%s:%s] check: unsupported address family.\n", px->id, s->id);
+ goto err;
+ }
+
+ /* args 3 and 4 are the address, they're replaced on each check */
+ check->argv[3] = calloc(EXTCHK_SIZE_ADDR, sizeof(*check->argv[3]));
+ check->argv[4] = calloc(EXTCHK_SIZE_UINT, sizeof(*check->argv[4]));
+
+ for (i = 0; i < 5; i++) {
+ if (!check->argv[i]) {
+ ha_alert("Starting [%s:%s] check: out of memory.\n", px->id, s->id);
+ goto err;
+ }
+ }
+
+ EXTCHK_SETENV(check, EXTCHK_PATH, path, err);
+ /* Add proxy environment variables */
+ EXTCHK_SETENV(check, EXTCHK_HAPROXY_PROXY_NAME, px->id, err);
+ EXTCHK_SETENV(check, EXTCHK_HAPROXY_PROXY_ID, ultoa_r(px->uuid, buf, sizeof(buf)), err);
+ EXTCHK_SETENV(check, EXTCHK_HAPROXY_PROXY_ADDR, check->argv[1], err);
+ EXTCHK_SETENV(check, EXTCHK_HAPROXY_PROXY_PORT, check->argv[2], err);
+ /* Add server environment variables */
+ EXTCHK_SETENV(check, EXTCHK_HAPROXY_SERVER_NAME, s->id, err);
+ EXTCHK_SETENV(check, EXTCHK_HAPROXY_SERVER_ID, ultoa_r(s->puid, buf, sizeof(buf)), err);
+ EXTCHK_SETENV(check, EXTCHK_HAPROXY_SERVER_ADDR, check->argv[3], err);
+ EXTCHK_SETENV(check, EXTCHK_HAPROXY_SERVER_PORT, check->argv[4], err);
+ EXTCHK_SETENV(check, EXTCHK_HAPROXY_SERVER_MAXCONN, ultoa_r(s->maxconn, buf, sizeof(buf)), err);
+ EXTCHK_SETENV(check, EXTCHK_HAPROXY_SERVER_CURCONN, ultoa_r(s->cur_sess, buf, sizeof(buf)), err);
+ EXTCHK_SETENV(check, EXTCHK_HAPROXY_SERVER_SSL, s->use_ssl ? "1" : "0", err);
+
+ switch (px->mode) {
+ case PR_MODE_CLI: svmode = "cli"; break;
+ case PR_MODE_SYSLOG: svmode = "syslog"; break;
+ case PR_MODE_PEERS: svmode = "peers"; break;
+ case PR_MODE_HTTP: svmode = (s->mux_proto) ? s->mux_proto->token.ptr : "h1"; break;
+ case PR_MODE_TCP: svmode = "tcp"; break;
+ /* all valid cases must be enumerated above, below is to avoid a warning */
+ case PR_MODES: svmode = "?"; break;
+ }
+ EXTCHK_SETENV(check, EXTCHK_HAPROXY_SERVER_PROTO, svmode, err);
+
+ /* Ensure that we don't leave any hole in check->envp */
+ for (i = 0; i < EXTCHK_SIZE; i++)
+ if (!check->envp[i])
+ EXTCHK_SETENV(check, i, "", err);
+
+ return 1;
+err:
+ if (check->envp) {
+ for (i = 0; i < EXTCHK_SIZE; i++)
+ free(check->envp[i]);
+ ha_free(&check->envp);
+ }
+
+ if (check->argv) {
+ for (i = 1; i < 5; i++)
+ free(check->argv[i]);
+ ha_free(&check->argv);
+ }
+ return 0;
+}
+
+/*
+ * establish a server health-check that makes use of a process.
+ *
+ * It can return one of :
+ * - SF_ERR_NONE if everything's OK
+ * - SF_ERR_RESOURCE if a system resource is lacking (eg: fd limits, ports, ...)
+ * Additionally, in the case of SF_ERR_RESOURCE, an emergency log will be emitted.
+ *
+ * Blocks and then unblocks SIGCHLD
+ */
+static int connect_proc_chk(struct task *t)
+{
+ char buf[256];
+ struct check *check = t->context;
+ struct server *s = check->server;
+ struct proxy *px = s->proxy;
+ int status;
+ pid_t pid;
+
+ status = SF_ERR_RESOURCE;
+
+ block_sigchld();
+
+ pid = fork();
+ if (pid < 0) {
+ ha_alert("Failed to fork process for external health check%s: %s. Aborting.\n",
+ (global.tune.options & GTUNE_INSECURE_FORK) ?
+ "" : " (likely caused by missing 'insecure-fork-wanted')",
+ strerror(errno));
+ set_server_check_status(check, HCHK_STATUS_SOCKERR, strerror(errno));
+ goto out;
+ }
+ if (pid == 0) {
+ /* Child */
+ extern char **environ;
+ struct rlimit limit;
+ int fd;
+
+ /* close all FDs. Keep stdin/stdout/stderr in verbose mode */
+ fd = (global.mode & (MODE_QUIET|MODE_VERBOSE)) == MODE_QUIET ? 0 : 3;
+
+ my_closefrom(fd);
+
+ /* restore the initial FD limits */
+ limit.rlim_cur = rlim_fd_cur_at_boot;
+ limit.rlim_max = rlim_fd_max_at_boot;
+ if (raise_rlim_nofile(NULL, &limit) != 0) {
+ getrlimit(RLIMIT_NOFILE, &limit);
+ ha_warning("External check: failed to restore initial FD limits (cur=%u max=%u), using cur=%u max=%u\n",
+ rlim_fd_cur_at_boot, rlim_fd_max_at_boot,
+ (unsigned int)limit.rlim_cur, (unsigned int)limit.rlim_max);
+ }
+
+ if (global.external_check < 2) {
+ /* fresh new env for each check */
+ environ = check->envp;
+ }
+
+ /* Update some environment variables and command args: curconn, server addr and server port */
+ EXTCHK_SETENV(check, EXTCHK_HAPROXY_SERVER_CURCONN, ultoa_r(s->cur_sess, buf, sizeof(buf)), fail);
+
+ if (s->addr.ss_family == AF_UNIX) {
+ const struct sockaddr_un *un = (struct sockaddr_un *)&s->addr;
+ strlcpy2(check->argv[3], un->sun_path, EXTCHK_SIZE_ADDR);
+ memcpy(check->argv[4], "NOT_USED", 9);
+ } else {
+ addr_to_str(&s->addr, check->argv[3], EXTCHK_SIZE_ADDR);
+ *check->argv[4] = 0; // just in case the address family changed
+ if (s->addr.ss_family == AF_INET || s->addr.ss_family == AF_INET6)
+ snprintf(check->argv[4], EXTCHK_SIZE_UINT, "%u", s->svc_port);
+ }
+
+ EXTCHK_SETENV(check, EXTCHK_HAPROXY_SERVER_ADDR, check->argv[3], fail);
+ EXTCHK_SETENV(check, EXTCHK_HAPROXY_SERVER_PORT, check->argv[4], fail);
+
+ if (global.external_check >= 2) {
+ /* environment is preserved, let's merge new vars */
+ int i;
+
+ for (i = 0; check->envp[i] && *check->envp[i]; i++) {
+ char *delim = strchr(check->envp[i], '=');
+ if (!delim)
+ continue;
+ *(delim++) = 0;
+ if (setenv(check->envp[i], delim, 1) != 0)
+ goto fail;
+ }
+ }
+ haproxy_unblock_signals();
+ execvp(px->check_command, check->argv);
+ ha_alert("Failed to exec process for external health check: %s. Aborting.\n",
+ strerror(errno));
+ fail:
+ exit(-1);
+ }
+
+ /* Parent */
+ if (check->result == CHK_RES_UNKNOWN) {
+ if (pid_list_add(pid, t) != NULL) {
+ t->expire = tick_add(now_ms, MS_TO_TICKS(check->inter));
+
+ if (px->timeout.check && px->timeout.connect) {
+ int t_con = tick_add(now_ms, px->timeout.connect);
+ t->expire = tick_first(t->expire, t_con);
+ }
+ status = SF_ERR_NONE;
+ goto out;
+ }
+ else {
+ set_server_check_status(check, HCHK_STATUS_SOCKERR, strerror(errno));
+ }
+ kill(pid, SIGTERM); /* process creation error */
+ }
+ else
+ set_server_check_status(check, HCHK_STATUS_SOCKERR, strerror(errno));
+
+out:
+ unblock_sigchld();
+ return status;
+}
+
+/*
+ * manages a server health-check that uses an external process. Returns
+ * the time the task accepts to wait, or TIME_ETERNITY for infinity.
+ *
+ * Please do NOT place any return statement in this function and only leave
+ * via the out_unlock label.
+ */
+struct task *process_chk_proc(struct task *t, void *context, unsigned int state)
+{
+ struct check *check = context;
+ struct server *s = check->server;
+ int rv;
+ int ret;
+ int expired = tick_is_expired(t->expire, now_ms);
+
+ HA_SPIN_LOCK(SERVER_LOCK, &check->server->lock);
+ if (!(check->state & CHK_ST_INPROGRESS)) {
+ /* no check currently running */
+ if (!expired) /* woke up too early */
+ goto out_unlock;
+
+ /* we don't send any health-checks when the proxy is
+ * stopped, the server should not be checked or the check
+ * is disabled.
+ */
+ if (((check->state & (CHK_ST_ENABLED | CHK_ST_PAUSED)) != CHK_ST_ENABLED) ||
+ (s->proxy->flags & (PR_FL_DISABLED|PR_FL_STOPPED)))
+ goto reschedule;
+
+ /* we'll initiate a new check */
+ set_server_check_status(check, HCHK_STATUS_START, NULL);
+
+ check->state |= CHK_ST_INPROGRESS;
+
+ ret = connect_proc_chk(t);
+ if (ret == SF_ERR_NONE) {
+ /* the process was forked, we allow up to min(inter,
+ * timeout.connect) for it to report its status, but
+ * only when timeout.check is set as it may be to short
+ * for a full check otherwise.
+ */
+ t->expire = tick_add(now_ms, MS_TO_TICKS(check->inter));
+
+ if (s->proxy->timeout.check && s->proxy->timeout.connect) {
+ int t_con = tick_add(now_ms, s->proxy->timeout.connect);
+ t->expire = tick_first(t->expire, t_con);
+ }
+ task_set_thread(t, tid);
+ goto reschedule;
+ }
+
+ /* here, we failed to start the check */
+
+ check->state &= ~CHK_ST_INPROGRESS;
+ check_notify_failure(check);
+
+ /* we allow up to min(inter, timeout.connect) for a connection
+ * to establish but only when timeout.check is set
+ * as it may be to short for a full check otherwise
+ */
+ while (tick_is_expired(t->expire, now_ms)) {
+ int t_con;
+
+ t_con = tick_add(t->expire, s->proxy->timeout.connect);
+ t->expire = tick_add(t->expire, MS_TO_TICKS(check->inter));
+
+ if (s->proxy->timeout.check)
+ t->expire = tick_first(t->expire, t_con);
+ }
+ }
+ else {
+ /* there was a test running.
+ * First, let's check whether there was an uncaught error,
+ * which can happen on connect timeout or error.
+ */
+ if (check->result == CHK_RES_UNKNOWN) {
+ /* good connection is enough for pure TCP check */
+ struct pid_list *elem = check->curpid;
+ int status = HCHK_STATUS_UNKNOWN;
+
+ if (elem->exited) {
+ status = elem->status; /* Save in case the process exits between use below */
+ if (!WIFEXITED(status))
+ check->code = -1;
+ else
+ check->code = WEXITSTATUS(status);
+ if (!WIFEXITED(status) || WEXITSTATUS(status))
+ status = HCHK_STATUS_PROCERR;
+ else
+ status = HCHK_STATUS_PROCOK;
+ } else if (expired) {
+ status = HCHK_STATUS_PROCTOUT;
+ ha_warning("kill %d\n", (int)elem->pid);
+ kill(elem->pid, SIGTERM);
+ }
+ set_server_check_status(check, status, NULL);
+ }
+
+ if (check->result == CHK_RES_FAILED) {
+ /* a failure or timeout detected */
+ check_notify_failure(check);
+ }
+ else if (check->result == CHK_RES_CONDPASS) {
+ /* check is OK but asks for stopping mode */
+ check_notify_stopping(check);
+ }
+ else if (check->result == CHK_RES_PASSED) {
+ /* a success was detected */
+ check_notify_success(check);
+ }
+ task_set_thread(t, 0);
+ check->state &= ~CHK_ST_INPROGRESS;
+
+ pid_list_del(check->curpid);
+
+ rv = 0;
+ if (global.spread_checks > 0) {
+ rv = srv_getinter(check) * global.spread_checks / 100;
+ rv -= (int) (2 * rv * (statistical_prng() / 4294967295.0));
+ }
+ t->expire = tick_add(now_ms, MS_TO_TICKS(srv_getinter(check) + rv));
+ }
+
+ reschedule:
+ while (tick_is_expired(t->expire, now_ms))
+ t->expire = tick_add(t->expire, MS_TO_TICKS(check->inter));
+
+ out_unlock:
+ HA_SPIN_UNLOCK(SERVER_LOCK, &check->server->lock);
+ return t;
+}
+
+/* Parses the "external-check" proxy keyword */
+int proxy_parse_extcheck(char **args, int section, struct proxy *curpx,
+ const struct proxy *defpx, const char *file, int line,
+ char **errmsg)
+{
+ int cur_arg, ret = 0;
+
+ cur_arg = 1;
+ if (!*(args[cur_arg])) {
+ memprintf(errmsg, "missing argument after '%s'.\n", args[0]);
+ goto error;
+ }
+
+ if (strcmp(args[cur_arg], "command") == 0) {
+ if (too_many_args(2, args, errmsg, NULL))
+ goto error;
+ if (!*(args[cur_arg+1])) {
+ memprintf(errmsg, "missing argument after '%s'.", args[cur_arg]);
+ goto error;
+ }
+ free(curpx->check_command);
+ curpx->check_command = strdup(args[cur_arg+1]);
+ }
+ else if (strcmp(args[cur_arg], "path") == 0) {
+ if (too_many_args(2, args, errmsg, NULL))
+ goto error;
+ if (!*(args[cur_arg+1])) {
+ memprintf(errmsg, "missing argument after '%s'.", args[cur_arg]);
+ goto error;
+ }
+ free(curpx->check_path);
+ curpx->check_path = strdup(args[cur_arg+1]);
+ }
+ else {
+ memprintf(errmsg, "'%s' only supports 'command' and 'path'. but got '%s'.",
+ args[0], args[1]);
+ goto error;
+ }
+
+ ret = (*errmsg != NULL); /* Handle warning */
+ return ret;
+
+error:
+ return -1;
+}
+
+int proxy_parse_external_check_opt(char **args, int cur_arg, struct proxy *curpx, const struct proxy *defpx,
+ const char *file, int line)
+{
+ int err_code = 0;
+
+ curpx->options2 &= ~PR_O2_CHK_ANY;
+ curpx->options2 |= PR_O2_EXT_CHK;
+ if (alertif_too_many_args_idx(0, 1, file, line, args, &err_code))
+ goto out;
+
+ out:
+ return err_code;
+}
+
+static struct cfg_kw_list cfg_kws = {ILH, {
+ { CFG_LISTEN, "external-check", proxy_parse_extcheck },
+ { 0, NULL, NULL },
+}};
+
+INITCALL1(STG_REGISTER, cfg_register_keywords, &cfg_kws);
diff --git a/src/fcgi-app.c b/src/fcgi-app.c
new file mode 100644
index 0000000..00562f8
--- /dev/null
+++ b/src/fcgi-app.c
@@ -0,0 +1,1133 @@
+/*
+ * Functions about FCGI applications and filters.
+ *
+ * Copyright (C) 2019 HAProxy Technologies, Christopher Faulet <cfaulet@haproxy.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <haproxy/acl.h>
+#include <haproxy/api.h>
+#include <haproxy/cfgparse.h>
+#include <haproxy/chunk.h>
+#include <haproxy/errors.h>
+#include <haproxy/fcgi-app.h>
+#include <haproxy/filters.h>
+#include <haproxy/http_fetch.h>
+#include <haproxy/http_htx.h>
+#include <haproxy/log.h>
+#include <haproxy/proxy.h>
+#include <haproxy/regex.h>
+#include <haproxy/sample.h>
+#include <haproxy/server-t.h>
+#include <haproxy/session.h>
+#include <haproxy/sink.h>
+#include <haproxy/tools.h>
+
+
+/* Global list of all FCGI applications */
+static struct fcgi_app *fcgi_apps = NULL;
+
+struct flt_ops fcgi_flt_ops;
+const char *fcgi_flt_id = "FCGI filter";
+
+DECLARE_STATIC_POOL(pool_head_fcgi_flt_ctx, "fcgi_flt_ctx", sizeof(struct fcgi_flt_ctx));
+DECLARE_STATIC_POOL(pool_head_fcgi_param_rule, "fcgi_param_rule", sizeof(struct fcgi_param_rule));
+DECLARE_STATIC_POOL(pool_head_fcgi_hdr_rule, "fcgi_hdr_rule", sizeof(struct fcgi_hdr_rule));
+
+/**************************************************************************/
+/***************************** Uitls **************************************/
+/**************************************************************************/
+/* Makes a fcgi parameter name (prefixed by ':fcgi-') with <name> (in
+ * lowercase). All non alphanumeric character are replaced by an underscore
+ * ('_'). The result is copied into <dst>. the corresponding ist is returned.
+ */
+static struct ist fcgi_param_name(char *dst, const struct ist name)
+{
+ size_t ofs1, ofs2;
+
+ memcpy(dst, ":fcgi-", 6);
+ ofs1 = 6;
+ for (ofs2 = 0; ofs2 < name.len; ofs2++) {
+ if (isalnum((unsigned char)name.ptr[ofs2]))
+ dst[ofs1++] = ist_lc[(unsigned char)name.ptr[ofs2]];
+ else
+ dst[ofs1++] = '_';
+ }
+ return ist2(dst, ofs1);
+}
+
+/* Returns a pointer to the FCGi application matching the name <name>. NULL is
+ * returned if no match found.
+ */
+struct fcgi_app *fcgi_app_find_by_name(const char *name)
+{
+ struct fcgi_app *app;
+
+ for (app = fcgi_apps; app != NULL; app = app->next) {
+ if (strcmp(app->name, name) == 0)
+ return app;
+ }
+
+ return NULL;
+}
+
+struct fcgi_flt_conf *find_px_fcgi_conf(struct proxy *px)
+{
+ struct flt_conf *fconf;
+
+ list_for_each_entry(fconf, &px->filter_configs, list) {
+ if (fconf->id == fcgi_flt_id)
+ return fconf->conf;
+ }
+ return NULL;
+}
+
+struct fcgi_flt_ctx *find_strm_fcgi_ctx(struct stream *s)
+{
+ struct filter *filter;
+
+ if (!s)
+ return NULL;
+
+ list_for_each_entry(filter, &strm_flt(s)->filters, list) {
+ if (FLT_ID(filter) == fcgi_flt_id)
+ return FLT_CONF(filter);
+ }
+ return NULL;
+}
+
+struct fcgi_app *get_px_fcgi_app(struct proxy *px)
+{
+ struct fcgi_flt_conf *fcgi_conf = find_px_fcgi_conf(px);
+
+ if (fcgi_conf)
+ return fcgi_conf->app;
+ return NULL;
+}
+
+struct fcgi_app *get_strm_fcgi_app(struct stream *s)
+{
+ struct fcgi_flt_ctx *fcgi_ctx = find_strm_fcgi_ctx(s);
+
+ if (fcgi_ctx)
+ return fcgi_ctx->app;
+ return NULL;
+}
+
+static void fcgi_release_rule_conf(struct fcgi_rule_conf *rule)
+{
+ if (!rule)
+ return;
+ free(rule->name);
+ free(rule->value);
+ free_acl_cond(rule->cond);
+ free(rule);
+}
+
+static void fcgi_release_rule(struct fcgi_rule *rule)
+{
+ if (!rule)
+ return;
+
+ if (!LIST_ISEMPTY(&rule->value)) {
+ struct logformat_node *lf, *lfb;
+
+ list_for_each_entry_safe(lf, lfb, &rule->value, list) {
+ LIST_DELETE(&lf->list);
+ release_sample_expr(lf->expr);
+ free(lf->arg);
+ free(lf);
+ }
+ }
+ /* ->cond and ->name are not owned by the rule */
+ free(rule);
+}
+
+/**************************************************************************/
+/*********************** FCGI Sample fetches ******************************/
+/**************************************************************************/
+
+static int smp_fetch_fcgi_docroot(const struct arg *args, struct sample *smp,
+ const char *kw, void *private)
+{
+ struct fcgi_app *app = get_strm_fcgi_app(smp->strm);
+
+ if (!app)
+ return 0;
+
+ smp->data.type = SMP_T_STR;
+ smp->data.u.str.area = app->docroot.ptr;
+ smp->data.u.str.data = app->docroot.len;
+ smp->flags = SMP_F_CONST;
+ return 1;
+}
+
+static int smp_fetch_fcgi_index(const struct arg *args, struct sample *smp,
+ const char *kw, void *private)
+{
+ struct fcgi_app *app = get_strm_fcgi_app(smp->strm);
+
+ if (!app || !istlen(app->index))
+ return 0;
+
+ smp->data.type = SMP_T_STR;
+ smp->data.u.str.area = app->index.ptr;
+ smp->data.u.str.data = app->index.len;
+ smp->flags = SMP_F_CONST;
+ return 1;
+}
+
+/**************************************************************************/
+/************************** FCGI filter ***********************************/
+/**************************************************************************/
+static int fcgi_flt_init(struct proxy *px, struct flt_conf *fconf)
+{
+ fconf->flags |= FLT_CFG_FL_HTX;
+ return 0;
+}
+
+static void fcgi_flt_deinit(struct proxy *px, struct flt_conf *fconf)
+{
+ struct fcgi_flt_conf *fcgi_conf = fconf->conf;
+ struct fcgi_rule *rule, *back;
+
+ if (!fcgi_conf)
+ return;
+
+ free(fcgi_conf->name);
+
+ list_for_each_entry_safe(rule, back, &fcgi_conf->param_rules, list) {
+ LIST_DELETE(&rule->list);
+ fcgi_release_rule(rule);
+ }
+
+ list_for_each_entry_safe(rule, back, &fcgi_conf->hdr_rules, list) {
+ LIST_DELETE(&rule->list);
+ fcgi_release_rule(rule);
+ }
+
+ free(fcgi_conf);
+}
+
+static int fcgi_flt_check(struct proxy *px, struct flt_conf *fconf)
+{
+ struct fcgi_flt_conf *fcgi_conf = fconf->conf;
+ struct fcgi_rule_conf *crule, *back;
+ struct fcgi_rule *rule = NULL;
+ struct flt_conf *f;
+ char *errmsg = NULL;
+
+ fcgi_conf->app = fcgi_app_find_by_name(fcgi_conf->name);
+ if (!fcgi_conf->app) {
+ ha_alert("proxy '%s' : fcgi-app '%s' not found.\n",
+ px->id, fcgi_conf->name);
+ goto err;
+ }
+
+ list_for_each_entry(f, &px->filter_configs, list) {
+ if (f->id == http_comp_flt_id || f->id == cache_store_flt_id)
+ continue;
+ else if ((f->id == fconf->id) && f->conf != fcgi_conf) {
+ ha_alert("proxy '%s' : only one fcgi-app supported per backend.\n",
+ px->id);
+ goto err;
+ }
+ else if (f->id != fconf->id) {
+ /* Implicit declaration is only allowed with the
+ * compression and cache. For other filters, an implicit
+ * declaration is required. */
+ ha_alert("config: proxy '%s': require an explicit filter declaration "
+ "to use the fcgi-app '%s'.\n", px->id, fcgi_conf->name);
+ goto err;
+ }
+ }
+
+ list_for_each_entry_safe(crule, back, &fcgi_conf->app->conf.rules, list) {
+ rule = calloc(1, sizeof(*rule));
+ if (!rule) {
+ ha_alert("proxy '%s' : out of memory.\n", px->id);
+ goto err;
+ }
+ rule->type = crule->type;
+ rule->name = ist(crule->name);
+ rule->cond = crule->cond;
+ LIST_INIT(&rule->value);
+
+ if (crule->value) {
+ if (!parse_logformat_string(crule->value, px, &rule->value, LOG_OPT_HTTP,
+ SMP_VAL_BE_HRQ_HDR, &errmsg)) {
+ ha_alert("proxy '%s' : %s.\n", px->id, errmsg);
+ goto err;
+ }
+ }
+
+ if (rule->type == FCGI_RULE_SET_PARAM || rule->type == FCGI_RULE_UNSET_PARAM)
+ LIST_APPEND(&fcgi_conf->param_rules, &rule->list);
+ else /* FCGI_RULE_PASS_HDR/FCGI_RULE_HIDE_HDR */
+ LIST_APPEND(&fcgi_conf->hdr_rules, &rule->list);
+ }
+ return 0;
+
+ err:
+ free(errmsg);
+ free(rule);
+ return 1;
+}
+
+static int fcgi_flt_start(struct stream *s, struct filter *filter)
+{
+ struct fcgi_flt_conf *fcgi_conf = FLT_CONF(filter);
+ struct fcgi_flt_ctx *fcgi_ctx;
+
+ fcgi_ctx = pool_alloc(pool_head_fcgi_flt_ctx);
+ if (fcgi_ctx == NULL) {
+ // FIXME: send a warning
+ return 0;
+ }
+ fcgi_ctx->filter = filter;
+ fcgi_ctx->app = fcgi_conf->app;
+ filter->ctx = fcgi_ctx;
+
+ s->req.analysers |= AN_REQ_HTTP_BODY;
+ return 1;
+}
+
+static void fcgi_flt_stop(struct stream *s, struct filter *filter)
+{
+ struct flt_fcgi_ctx *fcgi_ctx = filter->ctx;
+
+ if (!fcgi_ctx)
+ return;
+ pool_free(pool_head_fcgi_flt_ctx, fcgi_ctx);
+ filter->ctx = NULL;
+}
+
+static int fcgi_flt_http_headers(struct stream *s, struct filter *filter, struct http_msg *msg)
+{
+ struct session *sess = strm_sess(s);
+ struct buffer *value;
+ struct fcgi_flt_conf *fcgi_conf = FLT_CONF(filter);
+ struct fcgi_rule *rule;
+ struct fcgi_param_rule *param_rule;
+ struct fcgi_hdr_rule *hdr_rule;
+ struct ebpt_node *node, *next;
+ struct eb_root param_rules = EB_ROOT;
+ struct eb_root hdr_rules = EB_ROOT;
+ struct htx *htx;
+ struct http_hdr_ctx ctx;
+ int ret;
+
+ htx = htxbuf(&msg->chn->buf);
+
+ if (msg->chn->flags & CF_ISRESP) {
+ struct htx_sl *sl;
+
+ /* Remove the header "Status:" from the response */
+ ctx.blk = NULL;
+ while (http_find_header(htx, ist("status"), &ctx, 1))
+ http_remove_header(htx, &ctx);
+
+ /* Add the header "Date:" if not found */
+ ctx.blk = NULL;
+ if (!http_find_header(htx, ist("date"), &ctx, 1)) {
+ struct tm tm;
+
+ get_gmtime(date.tv_sec, &tm);
+ trash.data = strftime(trash.area, trash.size, "%a, %d %b %Y %T %Z", &tm);
+ if (trash.data)
+ http_add_header(htx, ist("date"), ist2(trash.area, trash.data));
+ }
+
+ /* Add the header "Content-Length:" if possible */
+ sl = http_get_stline(htx);
+ if (s->txn->meth != HTTP_METH_HEAD && sl &&
+ (msg->flags & (HTTP_MSGF_XFER_LEN|HTTP_MSGF_CNT_LEN|HTTP_MSGF_TE_CHNK)) == HTTP_MSGF_XFER_LEN &&
+ (htx->flags & HTX_FL_EOM)) {
+ struct htx_blk * blk;
+ char *end;
+ size_t len = 0;
+
+ for (blk = htx_get_first_blk(htx); blk; blk = htx_get_next_blk(htx, blk)) {
+ enum htx_blk_type type = htx_get_blk_type(blk);
+
+ if (type == HTX_BLK_TLR || type == HTX_BLK_EOT)
+ break;
+ if (type == HTX_BLK_DATA)
+ len += htx_get_blksz(blk);
+ }
+ end = ultoa_o(len, trash.area, trash.size);
+ if (http_add_header(htx, ist("content-length"), ist2(trash.area, end-trash.area))) {
+ sl->flags |= HTX_SL_F_CLEN;
+ msg->flags |= HTTP_MSGF_CNT_LEN;
+ }
+ }
+
+ return 1;
+ }
+
+ /* Analyze the request's headers */
+
+ value = alloc_trash_chunk();
+ if (!value)
+ goto end;
+
+ list_for_each_entry(rule, &fcgi_conf->param_rules, list) {
+ if (rule->cond) {
+ ret = acl_exec_cond(rule->cond, s->be, sess, s, SMP_OPT_DIR_REQ|SMP_OPT_FINAL);
+ ret = acl_pass(ret);
+ if (rule->cond->pol == ACL_COND_UNLESS)
+ ret = !ret;
+
+ /* the rule does not match */
+ if (!ret)
+ continue;
+ }
+
+ param_rule = NULL;
+ node = ebis_lookup_len(&param_rules, rule->name.ptr, rule->name.len);
+ if (node) {
+ param_rule = container_of(node, struct fcgi_param_rule, node);
+ ebpt_delete(node);
+ }
+ else {
+ param_rule = pool_alloc(pool_head_fcgi_param_rule);
+ if (param_rule == NULL)
+ goto param_rule_err;
+ }
+
+ param_rule->node.key = rule->name.ptr;
+ param_rule->name = rule->name;
+ param_rule->value = &rule->value;
+ ebis_insert(&param_rules, &param_rule->node);
+ }
+
+ list_for_each_entry(rule, &fcgi_conf->hdr_rules, list) {
+ if (rule->cond) {
+ ret = acl_exec_cond(rule->cond, s->be, sess, s, SMP_OPT_DIR_REQ|SMP_OPT_FINAL);
+ ret = acl_pass(ret);
+ if (rule->cond->pol == ACL_COND_UNLESS)
+ ret = !ret;
+
+ /* the rule does not match */
+ if (!ret)
+ continue;
+ }
+
+ hdr_rule = NULL;
+ node = ebis_lookup_len(&hdr_rules, rule->name.ptr, rule->name.len);
+ if (node) {
+ hdr_rule = container_of(node, struct fcgi_hdr_rule, node);
+ ebpt_delete(node);
+ }
+ else {
+ hdr_rule = pool_alloc(pool_head_fcgi_hdr_rule);
+ if (hdr_rule == NULL)
+ goto hdr_rule_err;
+ }
+
+ hdr_rule->node.key = rule->name.ptr;
+ hdr_rule->name = rule->name;
+ hdr_rule->pass = (rule->type == FCGI_RULE_PASS_HDR);
+ ebis_insert(&hdr_rules, &hdr_rule->node);
+ }
+
+ node = ebpt_first(&param_rules);
+ while (node) {
+ next = ebpt_next(node);
+ ebpt_delete(node);
+ param_rule = container_of(node, struct fcgi_param_rule, node);
+ node = next;
+
+ b_reset(value);
+ value->data = build_logline(s, value->area, value->size, param_rule->value);
+ if (!value->data) {
+ pool_free(pool_head_fcgi_param_rule, param_rule);
+ continue;
+ }
+ if (!http_add_header(htx, param_rule->name, ist2(value->area, value->data)))
+ goto rewrite_err;
+ pool_free(pool_head_fcgi_param_rule, param_rule);
+ }
+
+ node = ebpt_first(&hdr_rules);
+ while (node) {
+ next = ebpt_next(node);
+ ebpt_delete(node);
+ hdr_rule = container_of(node, struct fcgi_hdr_rule, node);
+ node = next;
+
+ if (!hdr_rule->pass) {
+ ctx.blk = NULL;
+ while (http_find_header(htx, hdr_rule->name, &ctx, 1))
+ http_remove_header(htx, &ctx);
+ }
+ pool_free(pool_head_fcgi_hdr_rule, hdr_rule);
+ }
+
+ goto end;
+
+ rewrite_err:
+ _HA_ATOMIC_INC(&sess->fe->fe_counters.failed_rewrites);
+ _HA_ATOMIC_INC(&s->be->be_counters.failed_rewrites);
+ if (sess->listener && sess->listener->counters)
+ _HA_ATOMIC_INC(&sess->listener->counters->failed_rewrites);
+ if (objt_server(s->target))
+ _HA_ATOMIC_INC(&__objt_server(s->target)->counters.failed_rewrites);
+ hdr_rule_err:
+ node = ebpt_first(&hdr_rules);
+ while (node) {
+ next = ebpt_next(node);
+ ebpt_delete(node);
+ hdr_rule = container_of(node, struct fcgi_hdr_rule, node);
+ node = next;
+ pool_free(pool_head_fcgi_hdr_rule, hdr_rule);
+ }
+ param_rule_err:
+ node = ebpt_first(&param_rules);
+ while (node) {
+ next = ebpt_next(node);
+ ebpt_delete(node);
+ param_rule = container_of(node, struct fcgi_param_rule, node);
+ node = next;
+ pool_free(pool_head_fcgi_param_rule, param_rule);
+ }
+ end:
+ free_trash_chunk(value);
+ return 1;
+}
+
+struct flt_ops fcgi_flt_ops = {
+ .init = fcgi_flt_init,
+ .check = fcgi_flt_check,
+ .deinit = fcgi_flt_deinit,
+
+ .attach = fcgi_flt_start,
+ .detach = fcgi_flt_stop,
+
+ .http_headers = fcgi_flt_http_headers,
+};
+
+/**************************************************************************/
+/*********************** FCGI Config parsing ******************************/
+/**************************************************************************/
+static int
+parse_fcgi_flt(char **args, int *cur_arg, struct proxy *px,
+ struct flt_conf *fconf, char **err, void *private)
+{
+ struct flt_conf *f, *back;
+ struct fcgi_flt_conf *fcgi_conf = NULL;
+ char *name = NULL;
+ int pos = *cur_arg;
+
+ /* Get the fcgi-app name*/
+ if (!*args[pos + 1]) {
+ memprintf(err, "%s : expects a <name> argument", args[pos]);
+ goto err;
+ }
+ name = strdup(args[pos + 1]);
+ if (!name) {
+ memprintf(err, "%s '%s' : out of memory", args[pos], args[pos + 1]);
+ goto err;
+ }
+ pos += 2;
+
+ /* Check if an fcgi-app filter with the same name already exists */
+ list_for_each_entry_safe(f, back, &px->filter_configs, list) {
+ if (f->id != fcgi_flt_id)
+ continue;
+ fcgi_conf = f->conf;
+ if (strcmp(name, fcgi_conf->name) != 0) {
+ fcgi_conf = NULL;
+ continue;
+ }
+
+ /* Place the filter at its right position */
+ LIST_DELETE(&f->list);
+ free(f);
+ ha_free(&name);
+ break;
+ }
+
+ /* No other fcgi-app filter found, create configuration for the explicit one */
+ if (!fcgi_conf) {
+ fcgi_conf = calloc(1, sizeof(*fcgi_conf));
+ if (!fcgi_conf) {
+ memprintf(err, "%s: out of memory", args[*cur_arg]);
+ goto err;
+ }
+ fcgi_conf->name = name;
+ LIST_INIT(&fcgi_conf->param_rules);
+ LIST_INIT(&fcgi_conf->hdr_rules);
+ }
+
+ fconf->id = fcgi_flt_id;
+ fconf->conf = fcgi_conf;
+ fconf->ops = &fcgi_flt_ops;
+
+ *cur_arg = pos;
+ return 0;
+ err:
+ free(name);
+ return -1;
+}
+
+/* Parses the "use-fcgi-app" proxy keyword */
+static int proxy_parse_use_fcgi_app(char **args, int section, struct proxy *curpx,
+ const struct proxy *defpx, const char *file, int line,
+ char **err)
+{
+ struct flt_conf *fconf = NULL;
+ struct fcgi_flt_conf *fcgi_conf = NULL;
+ int retval = 0;
+
+ if ((curpx->cap & PR_CAP_DEF) || !(curpx->cap & PR_CAP_BE)) {
+ memprintf(err, "'%s' only available in backend or listen section", args[0]);
+ retval = -1;
+ goto end;
+ }
+
+ if (!*(args[1])) {
+ memprintf(err, "'%s' expects <name> as argument", args[0]);
+ retval = -1;
+ goto end;
+ }
+
+ /* check if a fcgi filter was already registered with this name,
+ * if that's the case, must use it. */
+ list_for_each_entry(fconf, &curpx->filter_configs, list) {
+ if (fconf->id == fcgi_flt_id) {
+ fcgi_conf = fconf->conf;
+ if (fcgi_conf && strcmp((char *)fcgi_conf->name, args[1]) == 0)
+ goto end;
+ memprintf(err, "'%s' : only one fcgi-app supported per backend", args[0]);
+ retval = -1;
+ goto end;
+ }
+ }
+
+ /* Create the FCGI filter config */
+ fcgi_conf = calloc(1, sizeof(*fcgi_conf));
+ if (!fcgi_conf)
+ goto err;
+ fcgi_conf->name = strdup(args[1]);
+ LIST_INIT(&fcgi_conf->param_rules);
+ LIST_INIT(&fcgi_conf->hdr_rules);
+
+ /* Register the filter */
+ fconf = calloc(1, sizeof(*fconf));
+ if (!fconf)
+ goto err;
+ fconf->id = fcgi_flt_id;
+ fconf->conf = fcgi_conf;
+ fconf->ops = &fcgi_flt_ops;
+ LIST_APPEND(&curpx->filter_configs, &fconf->list);
+
+ end:
+ return retval;
+ err:
+ if (fcgi_conf) {
+ free(fcgi_conf->name);
+ free(fcgi_conf);
+ }
+ memprintf(err, "out of memory");
+ retval = -1;
+ goto end;
+}
+
+/* Finishes the parsing of FCGI application of proxies and servers */
+static int cfg_fcgi_apps_postparser()
+{
+ struct fcgi_app *curapp;
+ struct proxy *px;
+ struct server *srv;
+ int err_code = 0;
+
+ for (px = proxies_list; px; px = px->next) {
+ struct fcgi_flt_conf *fcgi_conf = find_px_fcgi_conf(px);
+ int nb_fcgi_srv = 0;
+
+ if (px->mode == PR_MODE_TCP && fcgi_conf) {
+ ha_alert("proxy '%s': FCGI application cannot be used in non-HTTP mode.\n",
+ px->id);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto end;
+ }
+
+ /* By default, for FCGI-ready backend, HTTP request header names
+ * are restricted and the "delete" policy is set
+ */
+ if (fcgi_conf && !(px->options2 & PR_O2_RSTRICT_REQ_HDR_NAMES_MASK))
+ px->options2 |= PR_O2_RSTRICT_REQ_HDR_NAMES_DEL;
+
+ for (srv = px->srv; srv; srv = srv->next) {
+ if (srv->mux_proto && isteq(srv->mux_proto->token, ist("fcgi"))) {
+ nb_fcgi_srv++;
+ if (fcgi_conf)
+ continue;
+ ha_alert("proxy '%s': FCGI server '%s' has no FCGI app configured.\n",
+ px->id, srv->id);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto end;
+ }
+ }
+ if (fcgi_conf && !nb_fcgi_srv) {
+ ha_alert("proxy '%s': FCGI app configured but no FCGI server found.\n",
+ px->id);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto end;
+ }
+ }
+
+ for (curapp = fcgi_apps; curapp != NULL; curapp = curapp->next) {
+ if (!istlen(curapp->docroot)) {
+ ha_alert("fcgi-app '%s': no docroot configured.\n",
+ curapp->name);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto end;
+ }
+ if (!(curapp->flags & (FCGI_APP_FL_MPXS_CONNS|FCGI_APP_FL_GET_VALUES))) {
+ if (curapp->maxreqs > 1) {
+ ha_warning("fcgi-app '%s': multiplexing not supported, "
+ "ignore the option 'max-reqs'.\n",
+ curapp->name);
+ err_code |= ERR_WARN;
+ }
+ curapp->maxreqs = 1;
+ }
+
+ err_code |= postresolve_logger_list(&curapp->loggers, "fcgi-app", curapp->name);
+ }
+
+ end:
+ return err_code;
+}
+
+static int fcgi_app_add_rule(struct fcgi_app *curapp, enum fcgi_rule_type type, char *name, char *value,
+ struct acl_cond *cond, char **err)
+{
+ struct fcgi_rule_conf *rule;
+
+ /* Param not found, add a new one */
+ rule = calloc(1, sizeof(*rule));
+ if (!rule)
+ goto err;
+ LIST_INIT(&rule->list);
+ rule->type = type;
+ if (type == FCGI_RULE_SET_PARAM || type == FCGI_RULE_UNSET_PARAM) {
+ struct ist fname = fcgi_param_name(trash.area, ist(name));
+ rule->name = my_strndup(fname.ptr, fname.len);
+ }
+ else { /* FCGI_RULE_PASS_HDR/FCGI_RULE_HIDE_HDR */
+ struct ist fname = ist2bin_lc(trash.area, ist(name));
+ rule->name = my_strndup(fname.ptr, fname.len);
+ }
+ if (!rule->name)
+ goto err;
+
+ if (value) {
+ rule->value = strdup(value);
+ if (!rule->value)
+ goto err;
+ }
+ rule->cond = cond;
+ LIST_APPEND(&curapp->conf.rules, &rule->list);
+ return 1;
+
+ err:
+ if (rule) {
+ free(rule->name);
+ free(rule->value);
+ free(rule);
+ }
+ free_acl_cond(cond);
+ memprintf(err, "out of memory");
+ return 0;
+}
+
+/* Parses "fcgi-app" section */
+static int cfg_parse_fcgi_app(const char *file, int linenum, char **args, int kwm)
+{
+ static struct fcgi_app *curapp = NULL;
+ struct acl_cond *cond = NULL;
+ char *name, *value = NULL;
+ enum fcgi_rule_type type;
+ int err_code = 0;
+ const char *err;
+ char *errmsg = NULL;
+
+ if (strcmp(args[0], "fcgi-app") == 0) { /* new fcgi-app */
+ if (!*(args[1])) {
+ ha_alert("parsing [%s:%d]: '%s' expects <name> as argument.\n",
+ file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ if (alertif_too_many_args(1, file, linenum, args, &err_code))
+ goto out;
+
+ err = invalid_char(args[1]);
+ if (err) {
+ ha_alert("parsing [%s:%d]: character '%c' is not permitted in '%s' name '%s'.\n",
+ file, linenum, *err, args[0], args[1]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+
+ for (curapp = fcgi_apps; curapp != NULL; curapp = curapp->next) {
+ if (strcmp(curapp->name, args[1]) == 0) {
+ ha_alert("Parsing [%s:%d]: fcgi-app section '%s' has the same name as another one declared at %s:%d.\n",
+ file, linenum, args[1], curapp->conf.file, curapp->conf.line);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ }
+ }
+
+ curapp = calloc(1, sizeof(*curapp));
+ if (!curapp) {
+ ha_alert("parsing [%s:%d] : out of memory.\n", file, linenum);
+ err_code |= ERR_ALERT | ERR_ABORT;
+ goto out;
+ }
+
+ curapp->next = fcgi_apps;
+ fcgi_apps = curapp;
+ curapp->flags = FCGI_APP_FL_KEEP_CONN;
+ curapp->docroot = ist(NULL);
+ curapp->index = ist(NULL);
+ curapp->pathinfo_re = NULL;
+ curapp->name = strdup(args[1]);
+ curapp->maxreqs = 1;
+ curapp->conf.file = strdup(file);
+ curapp->conf.line = linenum;
+ LIST_INIT(&curapp->acls);
+ LIST_INIT(&curapp->loggers);
+ LIST_INIT(&curapp->conf.args.list);
+ LIST_INIT(&curapp->conf.rules);
+
+ /* Set info about authentication */
+ if (!fcgi_app_add_rule(curapp, FCGI_RULE_SET_PARAM, "REMOTE_USER", "%[http_auth_user]", NULL, &errmsg) ||
+ !fcgi_app_add_rule(curapp, FCGI_RULE_SET_PARAM, "AUTH_TYPE", "%[http_auth_type]", NULL, &errmsg)) {
+ ha_alert("parsing [%s:%d] : '%s' : %s.\n", file, linenum,
+ args[1], errmsg);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ }
+
+ /* Hide hop-by-hop headers by default */
+ if (!fcgi_app_add_rule(curapp, FCGI_RULE_HIDE_HDR, "connection", NULL, NULL, &errmsg) ||
+ !fcgi_app_add_rule(curapp, FCGI_RULE_HIDE_HDR, "keep-alive", NULL, NULL, &errmsg) ||
+ !fcgi_app_add_rule(curapp, FCGI_RULE_HIDE_HDR, "authorization", NULL, NULL, &errmsg) ||
+ !fcgi_app_add_rule(curapp, FCGI_RULE_HIDE_HDR, "proxy", NULL, NULL, &errmsg) ||
+ !fcgi_app_add_rule(curapp, FCGI_RULE_HIDE_HDR, "proxy-authorization", NULL, NULL, &errmsg) ||
+ !fcgi_app_add_rule(curapp, FCGI_RULE_HIDE_HDR, "proxy-authenticate", NULL, NULL, &errmsg) ||
+ !fcgi_app_add_rule(curapp, FCGI_RULE_HIDE_HDR, "te", NULL, NULL, &errmsg) ||
+ !fcgi_app_add_rule(curapp, FCGI_RULE_HIDE_HDR, "trailers", NULL, NULL, &errmsg) ||
+ !fcgi_app_add_rule(curapp, FCGI_RULE_HIDE_HDR, "transfer-encoding", NULL, NULL, &errmsg) ||
+ !fcgi_app_add_rule(curapp, FCGI_RULE_HIDE_HDR, "upgrade", NULL, NULL, &errmsg)) {
+ ha_alert("parsing [%s:%d] : '%s' : %s.\n", file, linenum,
+ args[1], errmsg);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ }
+ }
+ else if (strcmp(args[0], "docroot") == 0) {
+ if (!*(args[1])) {
+ ha_alert("parsing [%s:%d] : '%s' expects <path> as argument.\n",
+ file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ if (alertif_too_many_args_idx(0, 1, file, linenum, args, &err_code))
+ goto out;
+ istfree(&curapp->docroot);
+ curapp->docroot = ist(strdup(args[1]));
+ if (!isttest(curapp->docroot)) {
+ ha_alert("parsing [%s:%d] : out of memory.\n", file, linenum);
+ err_code |= ERR_ALERT | ERR_ABORT;
+ }
+ }
+ else if (strcmp(args[0], "path-info") == 0) {
+ if (!*(args[1])) {
+ ha_alert("parsing [%s:%d] : '%s' expects <regex> as argument.\n",
+ file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ if (alertif_too_many_args_idx(0, 1, file, linenum, args, &err_code))
+ goto out;
+ regex_free(curapp->pathinfo_re);
+ curapp->pathinfo_re = regex_comp(args[1], 1, 1, &errmsg);
+ if (!curapp->pathinfo_re) {
+ ha_alert("parsing [%s:%d] : '%s' : %s.\n", file, linenum,
+ args[1], errmsg);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ }
+ }
+ else if (strcmp(args[0], "index") == 0) {
+ if (!*(args[1])) {
+ ha_alert("parsing [%s:%d] : '%s' expects <filename> as argument.\n",
+ file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ if (alertif_too_many_args_idx(0, 1, file, linenum, args, &err_code))
+ goto out;
+ istfree(&curapp->index);
+ curapp->index = ist(strdup(args[1]));
+ if (!isttest(curapp->index)) {
+ ha_alert("parsing [%s:%d] : out of memory.\n", file, linenum);
+ err_code |= ERR_ALERT | ERR_ABORT;
+ }
+ }
+ else if (strcmp(args[0], "acl") == 0) {
+ const char *err;
+ err = invalid_char(args[1]);
+ if (err) {
+ ha_alert("parsing [%s:%d] : character '%c' is not permitted in acl name '%s'.\n",
+ file, linenum, *err, args[1]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ if (strcasecmp(args[1], "or") == 0) {
+ ha_alert("parsing [%s:%d] : acl name '%s' will never match. 'or' is used to express a "
+ "logical disjunction within a condition.\n",
+ file, linenum, args[1]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ if (parse_acl((const char **)args+1, &curapp->acls, &errmsg, &curapp->conf.args, file, linenum) == NULL) {
+ ha_alert("parsing [%s:%d] : error detected while parsing ACL '%s' : %s.\n",
+ file, linenum, args[1], errmsg);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ }
+ else if (strcmp(args[0], "set-param") == 0) {
+ if (!*(args[1]) || !*(args[2])) {
+ ha_alert("parsing [%s:%d] : '%s' expects <name> and <value> as arguments.\n",
+ file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ type = FCGI_RULE_SET_PARAM;
+ name = args[1];
+ value = args[2];
+ cond = NULL;
+ args += 3;
+
+ parse_cond_rule:
+ if (!*(args[0])) /* No condition */
+ goto add_rule;
+
+ if (strcmp(args[0], "if") == 0)
+ cond = parse_acl_cond((const char **)args+1, &curapp->acls, ACL_COND_IF, &errmsg, &curapp->conf.args,
+ file, linenum);
+ else if (strcmp(args[0], "unless") == 0)
+ cond = parse_acl_cond((const char **)args+1, &curapp->acls, ACL_COND_UNLESS, &errmsg, &curapp->conf.args,
+ file, linenum);
+ if (!cond) {
+ ha_alert("parsing [%s:%d] : '%s' : %s.\n", file, linenum,
+ name, errmsg);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ }
+ add_rule:
+ if (!fcgi_app_add_rule(curapp, type, name, value, cond, &errmsg)) {
+ ha_alert("parsing [%s:%d] : '%s' : %s.\n", file, linenum,
+ name, errmsg);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ }
+ }
+#if 0 /* Disabled for now */
+ else if (!strcmp(args[0], "unset-param")) {
+ if (!*(args[1])) {
+ ha_alert("parsing [%s:%d] : '%s' expects <name> as arguments.\n",
+ file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ type = FCGI_RULE_UNSET_PARAM;
+ name = args[1];
+ value = NULL;
+ cond = NULL;
+ args += 2;
+ goto parse_cond_rule;
+ }
+#endif
+ else if (strcmp(args[0], "pass-header") == 0) {
+ if (!*(args[1])) {
+ ha_alert("parsing [%s:%d] : '%s' expects <name> as arguments.\n",
+ file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ type = FCGI_RULE_PASS_HDR;
+ name = args[1];
+ value = NULL;
+ cond = NULL;
+ args += 2;
+ goto parse_cond_rule;
+ }
+#if 0 /* Disabled for now */
+ else if (!strcmp(args[0], "hide-header")) {
+ if (!*(args[1])) {
+ ha_alert("parsing [%s:%d] : '%s' expects <name> as arguments.\n",
+ file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ type = FCGI_RULE_HIDE_HDR;
+ name = args[1];
+ value = NULL;
+ cond = NULL;
+ args += 2;
+ goto parse_cond_rule;
+ }
+#endif
+ else if (strcmp(args[0], "option") == 0) {
+ if (!*(args[1])) {
+ ha_alert("parsing [%s:%d]: '%s' expects an option name.\n",
+ file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ }
+ else if (strcmp(args[1], "keep-conn") == 0) {
+ if (alertif_too_many_args_idx(0, 1, file, linenum, args, &err_code))
+ goto out;
+ if (kwm == KWM_STD)
+ curapp->flags |= FCGI_APP_FL_KEEP_CONN;
+ else if (kwm == KWM_NO)
+ curapp->flags &= ~FCGI_APP_FL_KEEP_CONN;
+ }
+ else if (strcmp(args[1], "get-values") == 0) {
+ if (alertif_too_many_args_idx(0, 1, file, linenum, args, &err_code))
+ goto out;
+ if (kwm == KWM_STD)
+ curapp->flags |= FCGI_APP_FL_GET_VALUES;
+ else if (kwm == KWM_NO)
+ curapp->flags &= ~FCGI_APP_FL_GET_VALUES;
+ }
+ else if (strcmp(args[1], "mpxs-conns") == 0) {
+ if (alertif_too_many_args_idx(0, 1, file, linenum, args, &err_code))
+ goto out;
+ if (kwm == KWM_STD)
+ curapp->flags |= FCGI_APP_FL_MPXS_CONNS;
+ else if (kwm == KWM_NO)
+ curapp->flags &= ~FCGI_APP_FL_MPXS_CONNS;
+ }
+ else if (strcmp(args[1], "max-reqs") == 0) {
+ if (kwm != KWM_STD) {
+ ha_alert("parsing [%s:%d]: negation/default is not supported for option '%s'.\n",
+ file, linenum, args[1]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ if (!*(args[2])) {
+ ha_alert("parsing [%s:%d]: option '%s' expects an integer argument.\n",
+ file, linenum, args[1]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ if (alertif_too_many_args_idx(1, 1, file, linenum, args, &err_code))
+ goto out;
+
+ curapp->maxreqs = atol(args[2]);
+ if (!curapp->maxreqs) {
+ ha_alert("parsing [%s:%d]: option '%s' expects a strictly positive integer argument.\n",
+ file, linenum, args[1]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ }
+ else {
+ ha_alert("parsing [%s:%d] : unknown option '%s'.\n", file, linenum, args[1]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ }
+ }
+ else if (strcmp(args[0], "log-stderr") == 0) {
+ if (!parse_logger(args, &curapp->loggers, (kwm == KWM_NO), file, linenum, &errmsg)) {
+ ha_alert("parsing [%s:%d] : %s : %s\n", file, linenum, args[0], errmsg);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ }
+ }
+ else {
+ ha_alert("parsing [%s:%d]: unknown keyword '%s' in '%s' section\n", file, linenum, args[0], "fcgi-app");
+ err_code |= ERR_ALERT | ERR_FATAL;
+ }
+
+out:
+ free(errmsg);
+ return err_code;
+}
+
+
+/**************************************************************************/
+/*********************** FCGI Deinit functions ****************************/
+/**************************************************************************/
+void fcgi_apps_deinit()
+{
+ struct fcgi_app *curapp, *nextapp;
+ struct logger *log, *logb;
+
+ for (curapp = fcgi_apps; curapp != NULL; curapp = nextapp) {
+ struct fcgi_rule_conf *rule, *back;
+
+ free(curapp->name);
+ istfree(&curapp->docroot);
+ istfree(&curapp->index);
+ regex_free(curapp->pathinfo_re);
+ free(curapp->conf.file);
+
+ list_for_each_entry_safe(log, logb, &curapp->loggers, list) {
+ LIST_DELETE(&log->list);
+ free(log);
+ }
+
+ list_for_each_entry_safe(rule, back, &curapp->conf.rules, list) {
+ LIST_DELETE(&rule->list);
+ fcgi_release_rule_conf(rule);
+ }
+
+ nextapp = curapp->next;
+ free(curapp);
+ }
+}
+
+
+/**************************************************************************/
+/*************** Keywords definition and registration *********************/
+/**************************************************************************/
+static struct cfg_kw_list cfg_kws = {ILH, {
+ { CFG_LISTEN, "use-fcgi-app", proxy_parse_use_fcgi_app },
+ { 0, NULL, NULL },
+}};
+
+// FIXME: Add rep.fcgi smp_fetch
+static struct sample_fetch_kw_list sample_fetch_keywords = {ILH, {
+ { "fcgi.docroot", smp_fetch_fcgi_docroot, 0, NULL, SMP_T_STR, SMP_USE_HRQHV },
+ { "fcgi.index", smp_fetch_fcgi_index, 0, NULL, SMP_T_STR, SMP_USE_HRQHV },
+ { /* END */ }
+}};
+
+/* Declare the filter parser for "fcgi-app" keyword */
+static struct flt_kw_list filter_kws = { "FCGI", { }, {
+ { "fcgi-app", parse_fcgi_flt, NULL },
+ { NULL, NULL, NULL },
+ }
+};
+
+INITCALL1(STG_REGISTER, sample_register_fetches, &sample_fetch_keywords);
+INITCALL1(STG_REGISTER, cfg_register_keywords, &cfg_kws);
+INITCALL1(STG_REGISTER, flt_register_keywords, &filter_kws);
+
+INITCALL1(STG_REGISTER, hap_register_post_deinit, fcgi_apps_deinit);
+
+REGISTER_CONFIG_SECTION("fcgi-app", cfg_parse_fcgi_app, NULL);
+REGISTER_CONFIG_POSTPARSER("fcgi-apps", cfg_fcgi_apps_postparser);
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/src/fcgi.c b/src/fcgi.c
new file mode 100644
index 0000000..1d1a82b
--- /dev/null
+++ b/src/fcgi.c
@@ -0,0 +1,294 @@
+/*
+ * FastCGI protocol processing
+ *
+ * Copyright (C) 2019 HAProxy Technologies, Christopher Faulet <cfaulet@haproxy.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <haproxy/buf.h>
+#include <haproxy/fcgi.h>
+#include <haproxy/istbuf.h>
+
+/* Encodes header of a FCGI record into the chunk <out>. It returns non-zero on
+ * success and 0 on failure (buffer full). <out> is a chunk, so the wrapping is
+ * not handled by this function. It is the caller responsibility to ensure
+ * enough contiguous space is available
+ */
+int fcgi_encode_record_hdr(struct buffer *out, const struct fcgi_header *h)
+{
+ size_t len = out->data;
+
+ if (len + 8 >= b_size(out))
+ return 0;
+
+ out->area[len++] = h->vsn;
+ out->area[len++] = h->type;
+ out->area[len++] = ((h->id >> 8) & 0xff);
+ out->area[len++] = (h->id & 0xff);
+ out->area[len++] = ((h->len >> 8) & 0xff);
+ out->area[len++] = (h->len & 0xff);
+ out->area[len++] = h->padding;
+ out->area[len++] = 0; /* rsv */
+
+ out->data = len;
+ return 1;
+}
+
+/* Decodes a FCGI record header from offset <o> of buffer <in> into descriptor
+ * <h>. The buffer may wrap so each byte read must be checked. The header is
+ * formed like this :
+ *
+ * b0 b1 b2 b3 b4 b5 b6 b7
+ * +-----+------+-----+-----+------+------+--------+-----+
+ * | vsn | type | id1 | id0 | len1 | len0 | padlen | rsv |
+ * +-----+------+-----+-----+------+------+--------+-----+
+ *
+ * Returns zero if some bytes are missing, otherwise the number of read bytes.
+ */
+size_t fcgi_decode_record_hdr(const struct buffer *in, size_t o, struct fcgi_header *h)
+{
+ if (b_data(in) < o + 8)
+ return 0;
+
+ h->vsn = (uint8_t)(*b_peek(in, o));
+ h->type = (uint8_t)(*b_peek(in, o+1));
+ h->id = ((uint8_t)(*b_peek(in, o+2)) << 8) + (uint8_t)(*b_peek(in, o+3));
+ h->len = ((uint8_t)(*b_peek(in, o+4)) << 8) + (uint8_t)(*b_peek(in, o+5));
+ h->padding = (uint8_t)(*b_peek(in, o+6));
+ /* ignore rsv */
+
+ return 8;
+}
+
+/* Encodes the payload part of a BEGIN_REQUEST record into the chunk <out>. It
+ * returns non-zero on success and 0 on failure (buffer full). <out> is a chunk,
+ * so the wrapping is not handled by this function. It is the caller
+ * responsibility to ensure enough contiguous space is available
+ */
+int fcgi_encode_begin_request(struct buffer *out, const struct fcgi_begin_request *r)
+{
+ size_t len = out->data;
+
+ if (len + 8 >= b_size(out))
+ return 0;
+
+ out->area[len++] = ((r->role >> 8) & 0xff);
+ out->area[len++] = (r->role & 0xff);
+ out->area[len++] = r->flags;
+ out->area[len++] = 0; /* rsv */
+ out->area[len++] = 0;
+ out->area[len++] = 0;
+ out->area[len++] = 0;
+ out->area[len++] = 0;
+
+ out->data = len;
+ return 1;
+}
+
+/* Encodes a parameter, part of the payload of a PARAM record, into the chunk
+ * <out>. It returns non-zero on success and 0 on failure (buffer full). <out>
+ * is a chunk, so the wrapping is not handled by this function. It is the caller
+ * responsibility to ensure enough contiguous space is available. The
+ * parameter's name is converted to upper case and non-alphanumeric character
+ * are replaced by an underscore.
+ */
+int fcgi_encode_param(struct buffer *out, const struct fcgi_param *p)
+{
+ size_t off, len = out->data;
+ int nbytes, vbytes;
+
+ nbytes = (!(p->n.len >> 7) ? 1 : 4);
+ vbytes = (!(p->v.len >> 7) ? 1 : 4);
+ if ((len + nbytes + p->n.len + vbytes + p->v.len) >= b_size(out))
+ return 0;
+
+ if (nbytes == 1)
+ out->area[len++] = (p->n.len & 0xff);
+ else {
+ out->area[len++] = (((p->n.len >> 24) & 0xff) | 0x80);
+ out->area[len++] = ((p->n.len >> 16) & 0xff);
+ out->area[len++] = ((p->n.len >> 8) & 0xff);
+ out->area[len++] = (p->n.len & 0xff);
+ }
+
+ if (vbytes == 1)
+ out->area[len++] = (p->v.len & 0xff);
+ else {
+ out->area[len++] = (((p->v.len >> 24) & 0xff) | 0x80);
+ out->area[len++] = ((p->v.len >> 16) & 0xff);
+ out->area[len++] = ((p->v.len >> 8) & 0xff);
+ out->area[len++] = (p->v.len & 0xff);
+ }
+
+ for (off = 0; off < p->n.len; off++) {
+ if (isalnum((unsigned char)p->n.ptr[off]))
+ out->area[len++] = ist_uc[(unsigned char)p->n.ptr[off]];
+ else
+ out->area[len++] = '_';
+ }
+ if (p->v.len) {
+ ist2bin(out->area + len, p->v);
+ len += p->v.len;
+ }
+
+ out->data = len;
+ return 1;
+}
+
+/* Decodes a parameter of a PARAM record from offset <o> of buffer <in> into the
+ * FCGI param <p>. The buffer may wrap so each byte read must be checked.
+ * Returns zero if some bytes are missing, otherwise the number of read bytes.
+ */
+size_t fcgi_decode_param(const struct buffer *in, size_t o, struct fcgi_param *p)
+{
+ size_t data = b_data(in);
+ size_t nlen, vlen, len = 0;
+ uint8_t b0, b1, b2, b3;
+
+ if (data < o + 1)
+ return 0;
+ b0 = *b_peek(in, o++);
+ if (!(b0 >> 7)) {
+ nlen = b0;
+ len++;
+ }
+ else {
+ if (data < o + 3)
+ return 0;
+ b1 = *b_peek(in, o++);
+ b2 = *b_peek(in, o++);
+ b3 = *b_peek(in, o++);
+ nlen = ((b0 & 0x7f) << 24) + (b1 << 16) + (b2 << 8) + b3;
+ len += 4;
+ }
+
+ if (data < o + 1)
+ return 0;
+ b0 = *b_peek(in, o++);
+ if (!(b0 >> 7)) {
+ vlen = b0;
+ len++;
+ }
+ else {
+ if (data < o + 3)
+ return 0;
+ b1 = *b_peek(in, o++);
+ b2 = *b_peek(in, o++);
+ b3 = *b_peek(in, o++);
+ vlen = ((b0 & 0x7f) << 24) + (b1 << 16) + (b2 << 8) + b3;
+ len += 4;
+ }
+
+ if (data < nlen + vlen)
+ return 0;
+
+ p->n = ist2(b_peek(in, o), nlen);
+ p->v = ist2(b_peek(in, o + nlen), vlen);
+ len += nlen + vlen;
+
+ return len;
+}
+
+
+/* Decodes a parameter of a PARAM record from offset <o> of buffer <in> into the
+ * FCGI param <p>. To call this function, the buffer must not wrap. Returns zero
+ * if some bytes are missing, otherwise the number of read bytes.
+ */
+size_t fcgi_aligned_decode_param(const struct buffer *in, size_t o, struct fcgi_param *p)
+{
+ size_t data = b_data(in);
+ size_t nlen, vlen, len = 0;
+ uint8_t b0, b1, b2, b3;
+
+ if (data < o + 1)
+ return 0;
+ b0 = in->area[o++];
+ if (!(b0 >> 7)) {
+ nlen = b0;
+ len++;
+ }
+ else {
+ if (data < o + 3)
+ return 0;
+ b1 = in->area[o++];
+ b2 = in->area[o++];
+ b3 = in->area[o++];
+ nlen = ((b0 & 0x7f) << 24) + (b1 << 16) + (b2 << 8) + b3;
+ len += 4;
+ }
+
+ if (data < o + 1)
+ return 0;
+ b0 = in->area[o++];
+ if (!(b0 >> 7)) {
+ vlen = b0;
+ len++;
+ }
+ else {
+ if (data < o + 3)
+ return 0;
+ b1 = in->area[o++];
+ b2 = in->area[o++];
+ b3 = in->area[o++];
+ vlen = ((b0 & 0x7f) << 24) + (b1 << 16) + (b2 << 8) + b3;
+ len += 4;
+ }
+
+ if (data < nlen + vlen)
+ return 0;
+
+ p->n = ist2(in->area + o, nlen);
+ p->v = ist2(in->area + o + nlen, vlen);
+ len += nlen + vlen;
+
+ return len;
+}
+
+/* Decodes payload of a END_REQUEST record from offset <o> of buffer <in> into
+ * the FCGI param <p>. The buffer may wrap so each byte read must be
+ * checked. Returns zero if some bytes are missing, otherwise the number of read
+ * bytes.
+ */
+size_t fcgi_decode_end_request(const struct buffer *in, size_t o, struct fcgi_end_request *rec)
+{
+ uint8_t b0, b1, b2, b3;
+
+ if (b_data(in) < o + 8)
+ return 0;
+
+ b0 = *b_peek(in, o++);
+ b1 = *b_peek(in, o++);
+ b2 = *b_peek(in, o++);
+ b3 = *b_peek(in, o++);
+ rec->status = ((b0 & 0x7f) << 24) + (b1 << 16) + (b2 << 8) + b3;
+ rec->errcode = *b_peek(in, o++);
+ o += 3; /* ignore rsv */
+
+ return 8;
+}
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/src/fd.c b/src/fd.c
new file mode 100644
index 0000000..9d34315
--- /dev/null
+++ b/src/fd.c
@@ -0,0 +1,1348 @@
+/*
+ * File descriptors management functions.
+ *
+ * Copyright 2000-2014 Willy Tarreau <w@1wt.eu>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * There is no direct link between the FD and the updates list. There is only a
+ * bit in the fdtab[] to indicate than a file descriptor is already present in
+ * the updates list. Once an fd is present in the updates list, it will have to
+ * be considered even if its changes are reverted in the middle or if the fd is
+ * replaced.
+ *
+ * The event state for an FD, as found in fdtab[].state, is maintained for each
+ * direction. The state field is built this way, with R bits in the low nibble
+ * and W bits in the high nibble for ease of access and debugging :
+ *
+ * 7 6 5 4 3 2 1 0
+ * [ 0 | 0 | RW | AW | 0 | 0 | RR | AR ]
+ *
+ * A* = active *R = read
+ * R* = ready *W = write
+ *
+ * An FD is marked "active" when there is a desire to use it.
+ * An FD is marked "ready" when it has not faced a new EAGAIN since last wake-up
+ * (it is a cache of the last EAGAIN regardless of polling changes). Each poller
+ * has its own "polled" state for the same fd, as stored in the polled_mask.
+ *
+ * We have 4 possible states for each direction based on these 2 flags :
+ *
+ * +---+---+----------+---------------------------------------------+
+ * | R | A | State | Description |
+ * +---+---+----------+---------------------------------------------+
+ * | 0 | 0 | DISABLED | No activity desired, not ready. |
+ * | 0 | 1 | ACTIVE | Activity desired. |
+ * | 1 | 0 | STOPPED | End of activity. |
+ * | 1 | 1 | READY | Activity desired and reported. |
+ * +---+---+----------+---------------------------------------------+
+ *
+ * The transitions are pretty simple :
+ * - fd_want_*() : set flag A
+ * - fd_stop_*() : clear flag A
+ * - fd_cant_*() : clear flag R (when facing EAGAIN)
+ * - fd_may_*() : set flag R (upon return from poll())
+ *
+ * Each poller then computes its own polled state :
+ * if (A) { if (!R) P := 1 } else { P := 0 }
+ *
+ * The state transitions look like the diagram below.
+ *
+ * may +----------+
+ * ,----| DISABLED | (READY=0, ACTIVE=0)
+ * | +----------+
+ * | want | ^
+ * | | |
+ * | v | stop
+ * | +----------+
+ * | | ACTIVE | (READY=0, ACTIVE=1)
+ * | +----------+
+ * | | ^
+ * | may | |
+ * | v | EAGAIN (can't)
+ * | +--------+
+ * | | READY | (READY=1, ACTIVE=1)
+ * | +--------+
+ * | stop | ^
+ * | | |
+ * | v | want
+ * | +---------+
+ * `--->| STOPPED | (READY=1, ACTIVE=0)
+ * +---------+
+ */
+
+#include <stdio.h>
+#include <string.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <sys/types.h>
+#include <sys/resource.h>
+#include <sys/uio.h>
+
+#if defined(USE_POLL)
+#include <poll.h>
+#include <errno.h>
+#endif
+
+#include <haproxy/api.h>
+#include <haproxy/activity.h>
+#include <haproxy/cfgparse.h>
+#include <haproxy/fd.h>
+#include <haproxy/global.h>
+#include <haproxy/log.h>
+#include <haproxy/port_range.h>
+#include <haproxy/ticks.h>
+#include <haproxy/tools.h>
+
+
+struct fdtab *fdtab __read_mostly = NULL; /* array of all the file descriptors */
+struct polled_mask *polled_mask __read_mostly = NULL; /* Array for the polled_mask of each fd */
+struct fdinfo *fdinfo __read_mostly = NULL; /* less-often used infos for file descriptors */
+int totalconn; /* total # of terminated sessions */
+int actconn; /* # of active sessions */
+
+struct poller pollers[MAX_POLLERS] __read_mostly;
+struct poller cur_poller __read_mostly;
+int nbpollers = 0;
+
+volatile struct fdlist update_list[MAX_TGROUPS]; // Global update list
+
+THREAD_LOCAL int *fd_updt = NULL; // FD updates list
+THREAD_LOCAL int fd_nbupdt = 0; // number of updates in the list
+THREAD_LOCAL int poller_rd_pipe = -1; // Pipe to wake the thread
+int poller_wr_pipe[MAX_THREADS] __read_mostly; // Pipe to wake the threads
+
+volatile int ha_used_fds = 0; // Number of FD we're currently using
+static struct fdtab *fdtab_addr; /* address of the allocated area containing fdtab */
+
+/* adds fd <fd> to fd list <list> if it was not yet in it */
+void fd_add_to_fd_list(volatile struct fdlist *list, int fd)
+{
+ int next;
+ int new;
+ int old;
+ int last;
+
+redo_next:
+ next = HA_ATOMIC_LOAD(&fdtab[fd].update.next);
+ /* Check that we're not already in the cache, and if not, lock us. */
+ if (next > -2)
+ goto done;
+ if (next == -2)
+ goto redo_next;
+ if (!_HA_ATOMIC_CAS(&fdtab[fd].update.next, &next, -2))
+ goto redo_next;
+ __ha_barrier_atomic_store();
+
+ new = fd;
+redo_last:
+ /* First, insert in the linked list */
+ last = list->last;
+ old = -1;
+
+ fdtab[fd].update.prev = -2;
+ /* Make sure the "prev" store is visible before we update the last entry */
+ __ha_barrier_store();
+
+ if (unlikely(last == -1)) {
+ /* list is empty, try to add ourselves alone so that list->last=fd */
+ if (unlikely(!_HA_ATOMIC_CAS(&list->last, &old, new)))
+ goto redo_last;
+
+ /* list->first was necessary -1, we're guaranteed to be alone here */
+ list->first = fd;
+ } else {
+ /* adding ourselves past the last element
+ * The CAS will only succeed if its next is -1,
+ * which means it's in the cache, and the last element.
+ */
+ if (unlikely(!_HA_ATOMIC_CAS(&fdtab[last].update.next, &old, new)))
+ goto redo_last;
+
+ /* Then, update the last entry */
+ list->last = fd;
+ }
+ __ha_barrier_store();
+ /* since we're alone at the end of the list and still locked(-2),
+ * we know no one tried to add past us. Mark the end of list.
+ */
+ fdtab[fd].update.prev = last;
+ fdtab[fd].update.next = -1;
+ __ha_barrier_store();
+done:
+ return;
+}
+
+/* removes fd <fd> from fd list <list> */
+void fd_rm_from_fd_list(volatile struct fdlist *list, int fd)
+{
+#if defined(HA_HAVE_CAS_DW) || defined(HA_CAS_IS_8B)
+ volatile union {
+ struct fdlist_entry ent;
+ uint64_t u64;
+ uint32_t u32[2];
+ } cur_list, next_list;
+#endif
+ int old;
+ int new = -2;
+ int prev;
+ int next;
+ int last;
+lock_self:
+#if (defined(HA_CAS_IS_8B) || defined(HA_HAVE_CAS_DW))
+ next_list.ent.next = next_list.ent.prev = -2;
+ cur_list.ent = *(volatile typeof(fdtab->update)*)&fdtab[fd].update;
+ /* First, attempt to lock our own entries */
+ do {
+ /* The FD is not in the FD cache, give up */
+ if (unlikely(cur_list.ent.next <= -3))
+ return;
+ if (unlikely(cur_list.ent.prev == -2 || cur_list.ent.next == -2))
+ goto lock_self;
+ } while (
+#ifdef HA_CAS_IS_8B
+ unlikely(!_HA_ATOMIC_CAS(((uint64_t *)&fdtab[fd].update), (uint64_t *)&cur_list.u64, next_list.u64))
+#else
+ unlikely(!_HA_ATOMIC_DWCAS(((long *)&fdtab[fd].update), (uint32_t *)&cur_list.u32, (const uint32_t *)&next_list.u32))
+#endif
+ );
+ next = cur_list.ent.next;
+ prev = cur_list.ent.prev;
+
+#else
+lock_self_next:
+ next = HA_ATOMIC_LOAD(&fdtab[fd].update.next);
+ if (next == -2)
+ goto lock_self_next;
+ if (next <= -3)
+ goto done;
+ if (unlikely(!_HA_ATOMIC_CAS(&fdtab[fd].update.next, &next, -2)))
+ goto lock_self_next;
+lock_self_prev:
+ prev = HA_ATOMIC_LOAD(&fdtab[fd].update.prev);
+ if (prev == -2)
+ goto lock_self_prev;
+ if (unlikely(!_HA_ATOMIC_CAS(&fdtab[fd].update.prev, &prev, -2)))
+ goto lock_self_prev;
+#endif
+ __ha_barrier_atomic_store();
+
+ /* Now, lock the entries of our neighbours */
+ if (likely(prev != -1)) {
+redo_prev:
+ old = fd;
+
+ if (unlikely(!_HA_ATOMIC_CAS(&fdtab[prev].update.next, &old, new))) {
+ if (unlikely(old == -2)) {
+ /* Neighbour already locked, give up and
+ * retry again once he's done
+ */
+ fdtab[fd].update.prev = prev;
+ __ha_barrier_store();
+ fdtab[fd].update.next = next;
+ __ha_barrier_store();
+ goto lock_self;
+ }
+ goto redo_prev;
+ }
+ }
+ if (likely(next != -1)) {
+redo_next:
+ old = fd;
+ if (unlikely(!_HA_ATOMIC_CAS(&fdtab[next].update.prev, &old, new))) {
+ if (unlikely(old == -2)) {
+ /* Neighbour already locked, give up and
+ * retry again once he's done
+ */
+ if (prev != -1) {
+ fdtab[prev].update.next = fd;
+ __ha_barrier_store();
+ }
+ fdtab[fd].update.prev = prev;
+ __ha_barrier_store();
+ fdtab[fd].update.next = next;
+ __ha_barrier_store();
+ goto lock_self;
+ }
+ goto redo_next;
+ }
+ }
+ if (list->first == fd)
+ list->first = next;
+ __ha_barrier_store();
+ last = list->last;
+ while (unlikely(last == fd && (!_HA_ATOMIC_CAS(&list->last, &last, prev))))
+ __ha_compiler_barrier();
+ /* Make sure we let other threads know we're no longer in cache,
+ * before releasing our neighbours.
+ */
+ __ha_barrier_store();
+ if (likely(prev != -1))
+ fdtab[prev].update.next = next;
+ __ha_barrier_store();
+ if (likely(next != -1))
+ fdtab[next].update.prev = prev;
+ __ha_barrier_store();
+ /* Ok, now we're out of the fd cache */
+ fdtab[fd].update.next = -(next + 4);
+ __ha_barrier_store();
+done:
+ return;
+}
+
+/* deletes the FD once nobody uses it anymore, as detected by the caller by its
+ * thread_mask being zero and its running mask turning to zero. There is no
+ * protection against concurrent accesses, it's up to the caller to make sure
+ * only the last thread will call it. If called under isolation, it is safe to
+ * call this from another group than the FD's. This is only for internal use,
+ * please use fd_delete() instead.
+ */
+void _fd_delete_orphan(int fd)
+{
+ int tgrp = fd_tgid(fd);
+ uint fd_disown;
+
+ fd_disown = fdtab[fd].state & FD_DISOWN;
+ if (fdtab[fd].state & FD_LINGER_RISK) {
+ /* this is generally set when connecting to servers */
+ DISGUISE(setsockopt(fd, SOL_SOCKET, SO_LINGER,
+ (struct linger *) &nolinger, sizeof(struct linger)));
+ }
+
+ /* It's expected that a close() will result in the FD disappearing from
+ * pollers, but some pollers may have some internal bookkeeping to be
+ * done prior to the call (e.g. remove references from internal tables).
+ */
+ if (cur_poller.clo)
+ cur_poller.clo(fd);
+
+ /* now we're about to reset some of this FD's fields. We don't want
+ * anyone to grab it anymore and we need to make sure those which could
+ * possibly have stumbled upon it right now are leaving before we
+ * proceed. This is done in two steps. First we reset the tgid so that
+ * fd_take_tgid() and fd_grab_tgid() fail, then we wait for existing
+ * ref counts to drop. Past this point we're alone dealing with the
+ * FD's thead/running/update/polled masks.
+ */
+ fd_reset_tgid(fd);
+
+ while (_HA_ATOMIC_LOAD(&fdtab[fd].refc_tgid) != 0) // refc==0 ?
+ __ha_cpu_relax();
+
+ /* we don't want this FD anymore in the global list */
+ fd_rm_from_fd_list(&update_list[tgrp - 1], fd);
+
+ /* no more updates on this FD are relevant anymore */
+ HA_ATOMIC_STORE(&fdtab[fd].update_mask, 0);
+ if (fd_nbupdt > 0 && fd_updt[fd_nbupdt - 1] == fd)
+ fd_nbupdt--;
+
+ port_range_release_port(fdinfo[fd].port_range, fdinfo[fd].local_port);
+ polled_mask[fd].poll_recv = polled_mask[fd].poll_send = 0;
+
+ fdtab[fd].state = 0;
+
+#ifdef DEBUG_FD
+ fdtab[fd].event_count = 0;
+#endif
+ fdinfo[fd].port_range = NULL;
+ fdtab[fd].owner = NULL;
+
+ /* perform the close() call last as it's what unlocks the instant reuse
+ * of this FD by any other thread.
+ */
+ if (!fd_disown)
+ close(fd);
+ _HA_ATOMIC_DEC(&ha_used_fds);
+}
+
+/* Deletes an FD from the fdsets. The file descriptor is also closed, possibly
+ * asynchronously. It is safe to call it from another thread from the same
+ * group as the FD's or from a thread from a different group. However if called
+ * from a thread from another group, there is an extra cost involved because
+ * the operation is performed under thread isolation, so doing so must be
+ * reserved for ultra-rare cases (e.g. stopping a listener).
+ */
+void fd_delete(int fd)
+{
+ /* This must never happen and would definitely indicate a bug, in
+ * addition to overwriting some unexpected memory areas.
+ */
+ BUG_ON(fd < 0 || fd >= global.maxsock);
+
+ /* NOTE: The master when going into reexec mode re-closes all FDs after
+ * they were already dispatched. But we know we didn't start the polling
+ * threads so we can still close them. The masks will probably not match
+ * however so we force the value and erase the refcount if any.
+ */
+ if (unlikely(global.mode & MODE_STARTING))
+ fdtab[fd].refc_tgid = ti->tgid;
+
+ /* the tgid cannot change before a complete close so we should never
+ * face the situation where we try to close an fd that was reassigned.
+ * However there is one corner case where this happens, it's when an
+ * attempt to pause a listener fails (e.g. abns), leaving the listener
+ * in fault state and it is forcefully stopped. This needs to be done
+ * under isolation, and it's quite rare (i.e. once per such FD per
+ * process). Since we'll be isolated we can clear the thread mask and
+ * close the FD ourselves.
+ */
+ if (unlikely(fd_tgid(fd) != ti->tgid)) {
+ int must_isolate = !thread_isolated() && !(global.mode & MODE_STOPPING);
+
+ if (must_isolate)
+ thread_isolate();
+
+ HA_ATOMIC_STORE(&fdtab[fd].thread_mask, 0);
+ HA_ATOMIC_STORE(&fdtab[fd].running_mask, 0);
+ _fd_delete_orphan(fd);
+
+ if (must_isolate)
+ thread_release();
+ return;
+ }
+
+ /* we must postpone removal of an FD that may currently be in use
+ * by another thread. This can happen in the following two situations:
+ * - after a takeover, the owning thread closes the connection but
+ * the previous one just woke up from the poller and entered
+ * the FD handler iocb. That thread holds an entry in running_mask
+ * and requires removal protection.
+ * - multiple threads are accepting connections on a listener, and
+ * one of them (or even an separate one) decides to unbind the
+ * listener under the listener's lock while other ones still hold
+ * the running bit.
+ * In both situations the FD is marked as unused (thread_mask = 0) and
+ * will not take new bits in its running_mask so we have the guarantee
+ * that the last thread eliminating running_mask is the one allowed to
+ * safely delete the FD. Most of the time it will be the current thread.
+ * We still need to set and check the one-shot flag FD_MUST_CLOSE
+ * to take care of the rare cases where a thread wakes up on late I/O
+ * before the thread_mask is zero, and sets its bit in the running_mask
+ * just after the current thread finishes clearing its own bit, hence
+ * the two threads see themselves as last ones (which they really are).
+ */
+
+ HA_ATOMIC_OR(&fdtab[fd].running_mask, ti->ltid_bit);
+ HA_ATOMIC_OR(&fdtab[fd].state, FD_MUST_CLOSE);
+ HA_ATOMIC_STORE(&fdtab[fd].thread_mask, 0);
+ if (fd_clr_running(fd) == ti->ltid_bit) {
+ if (HA_ATOMIC_BTR(&fdtab[fd].state, FD_MUST_CLOSE_BIT)) {
+ _fd_delete_orphan(fd);
+ }
+ }
+}
+
+/* makes the new fd non-blocking and clears all other O_* flags; this is meant
+ * to be used on new FDs. Returns -1 on failure. The result is disguised at the
+ * end because some callers need to be able to ignore it regardless of the libc
+ * attributes.
+ */
+int fd_set_nonblock(int fd)
+{
+ int ret = fcntl(fd, F_SETFL, O_NONBLOCK);
+
+ return DISGUISE(ret);
+}
+
+/* sets the close-on-exec flag on fd; returns -1 on failure. The result is
+ * disguised at the end because some callers need to be able to ignore it
+ * regardless of the libc attributes.
+ */
+int fd_set_cloexec(int fd)
+{
+ int flags, ret;
+
+ flags = fcntl(fd, F_GETFD);
+ flags |= FD_CLOEXEC;
+ ret = fcntl(fd, F_SETFD, flags);
+ return DISGUISE(ret);
+}
+
+/* Migrate a FD to a new thread <new_tid>. It is explicitly permitted to
+ * migrate to another thread group, the function takes the necessary locking
+ * for this. It is even permitted to migrate from a foreign group to another,
+ * but the calling thread must be certain that the FD is not about to close
+ * when doing so, reason why it is highly recommended that only one of the
+ * FD's owners performs this operation. The polling is completely disabled.
+ * The operation never fails.
+ */
+void fd_migrate_on(int fd, uint new_tid)
+{
+ struct thread_info *new_ti = &ha_thread_info[new_tid];
+
+ /* we must be alone to work on this idle FD. If not, it means that its
+ * poller is currently waking up and is about to use it, likely to
+ * close it on shut/error, but maybe also to process any unexpectedly
+ * pending data. It's also possible that the FD was closed and
+ * reassigned to another thread group, so let's be careful.
+ */
+ fd_lock_tgid(fd, new_ti->tgid);
+
+ /* now we have exclusive access to it. From now FD belongs to tid_bit
+ * for this tgid.
+ */
+ HA_ATOMIC_STORE(&fdtab[fd].thread_mask, new_ti->ltid_bit);
+
+ /* Make sure the FD doesn't have the active bit. It is possible that
+ * the fd is polled by the thread that used to own it, the new thread
+ * is supposed to call subscribe() later, to activate polling.
+ */
+ fd_stop_both(fd);
+
+ /* we're done with it. As soon as we unlock it, other threads from the
+ * target group can manipulate it. However it may only disappear once
+ * we drop the reference.
+ */
+ fd_unlock_tgid(fd);
+ fd_drop_tgid(fd);
+}
+
+/*
+ * Take over a FD belonging to another thread.
+ * unexpected_conn is the expected owner of the fd.
+ * Returns 0 on success, and -1 on failure.
+ */
+int fd_takeover(int fd, void *expected_owner)
+{
+ unsigned long old;
+
+ /* protect ourself against a delete then an insert for the same fd,
+ * if it happens, then the owner will no longer be the expected
+ * connection.
+ */
+ if (fdtab[fd].owner != expected_owner)
+ return -1;
+
+ /* we must be alone to work on this idle FD. If not, it means that its
+ * poller is currently waking up and is about to use it, likely to
+ * close it on shut/error, but maybe also to process any unexpectedly
+ * pending data. It's also possible that the FD was closed and
+ * reassigned to another thread group, so let's be careful.
+ */
+ if (unlikely(!fd_grab_tgid(fd, ti->tgid)))
+ return -1;
+
+ old = 0;
+ if (!HA_ATOMIC_CAS(&fdtab[fd].running_mask, &old, ti->ltid_bit)) {
+ fd_drop_tgid(fd);
+ return -1;
+ }
+
+ /* success, from now on it's ours */
+ HA_ATOMIC_STORE(&fdtab[fd].thread_mask, ti->ltid_bit);
+
+ /* Make sure the FD doesn't have the active bit. It is possible that
+ * the fd is polled by the thread that used to own it, the new thread
+ * is supposed to call subscribe() later, to activate polling.
+ */
+ fd_stop_recv(fd);
+
+ /* we're done with it */
+ HA_ATOMIC_AND(&fdtab[fd].running_mask, ~ti->ltid_bit);
+
+ /* no more changes planned */
+ fd_drop_tgid(fd);
+ return 0;
+}
+
+void updt_fd_polling(const int fd)
+{
+ uint tgrp = fd_take_tgid(fd);
+
+ /* closed ? may happen */
+ if (!tgrp)
+ return;
+
+ if (unlikely(tgrp != tgid && tgrp <= MAX_TGROUPS)) {
+ /* Hmmm delivered an update for another group... That may
+ * happen on suspend/resume of a listener for example when
+ * the FD was not even marked for running. Let's broadcast
+ * the update.
+ */
+ unsigned long update_mask = fdtab[fd].update_mask;
+ int thr;
+
+ while (!_HA_ATOMIC_CAS(&fdtab[fd].update_mask, &update_mask,
+ _HA_ATOMIC_LOAD(&ha_tgroup_info[tgrp - 1].threads_enabled)))
+ __ha_cpu_relax();
+
+ fd_add_to_fd_list(&update_list[tgrp - 1], fd);
+
+ thr = one_among_mask(fdtab[fd].thread_mask & ha_tgroup_info[tgrp - 1].threads_enabled,
+ statistical_prng_range(ha_tgroup_info[tgrp - 1].count));
+ thr += ha_tgroup_info[tgrp - 1].base;
+ wake_thread(thr);
+
+ fd_drop_tgid(fd);
+ return;
+ }
+
+ fd_drop_tgid(fd);
+
+ if (tg->threads_enabled == 1UL || (fdtab[fd].thread_mask & tg->threads_enabled) == ti->ltid_bit) {
+ if (HA_ATOMIC_BTS(&fdtab[fd].update_mask, ti->ltid))
+ return;
+
+ fd_updt[fd_nbupdt++] = fd;
+ } else {
+ unsigned long update_mask = fdtab[fd].update_mask;
+ do {
+ if (update_mask == fdtab[fd].thread_mask) // FIXME: this works only on thread-groups 1
+ return;
+ } while (!_HA_ATOMIC_CAS(&fdtab[fd].update_mask, &update_mask, fdtab[fd].thread_mask));
+
+ fd_add_to_fd_list(&update_list[tgid - 1], fd);
+
+ if (fd_active(fd) && !(fdtab[fd].thread_mask & ti->ltid_bit)) {
+ /* we need to wake up another thread to handle it immediately, any will fit,
+ * so let's pick a random one so that it doesn't always end up on the same.
+ */
+ int thr = one_among_mask(fdtab[fd].thread_mask & tg->threads_enabled,
+ statistical_prng_range(tg->count));
+ thr += tg->base;
+ wake_thread(thr);
+ }
+ }
+}
+
+/* Update events seen for FD <fd> and its state if needed. This should be
+ * called by the poller, passing FD_EV_*_{R,W,RW} in <evts>. FD_EV_ERR_*
+ * doesn't need to also pass FD_EV_SHUT_*, it's implied. ERR and SHUT are
+ * allowed to be reported regardless of R/W readiness. Returns one of
+ * FD_UPDT_*.
+ */
+int fd_update_events(int fd, uint evts)
+{
+ unsigned long locked;
+ uint old, new;
+ uint new_flags, must_stop;
+ ulong rmask, tmask;
+
+ _HA_ATOMIC_AND(&th_ctx->flags, ~TH_FL_STUCK); // this thread is still running
+
+ if (unlikely(!fd_grab_tgid(fd, ti->tgid))) {
+ /* the FD changed to another tgid, we can't safely
+ * check it anymore. The bits in the masks are not
+ * ours anymore and we're not allowed to touch them.
+ * Ours have already been cleared and the FD was
+ * closed in between so we can safely leave now.
+ */
+ activity[tid].poll_drop_fd++;
+ return FD_UPDT_CLOSED;
+ }
+
+ /* Do not take running_mask if not strictly needed (will trigger a
+ * cosmetic BUG_ON() in fd_insert() anyway if done).
+ */
+ tmask = _HA_ATOMIC_LOAD(&fdtab[fd].thread_mask);
+ if (!(tmask & ti->ltid_bit))
+ goto do_update;
+
+ HA_ATOMIC_OR(&fdtab[fd].running_mask, ti->ltid_bit);
+
+ /* From this point, our bit may possibly be in thread_mask, but it may
+ * still vanish, either because a takeover completed just before taking
+ * the bit above with the new owner deleting the FD, or because a
+ * takeover started just before taking the bit. In order to make sure a
+ * started takeover is complete, we need to verify that all bits of
+ * running_mask are present in thread_mask, since takeover first takes
+ * running then atomically replaces thread_mask. Once it's stable, if
+ * our bit remains there, no further takeover may happen because we
+ * hold running, but if our bit is not there it means we've lost the
+ * takeover race and have to decline touching the FD. Regarding the
+ * risk of deletion, our bit in running_mask prevents fd_delete() from
+ * finalizing the close, and the caller will leave the FD with a zero
+ * thread_mask and the FD_MUST_CLOSE flag set. It will then be our
+ * responsibility to close it.
+ */
+ do {
+ rmask = _HA_ATOMIC_LOAD(&fdtab[fd].running_mask);
+ tmask = _HA_ATOMIC_LOAD(&fdtab[fd].thread_mask);
+ rmask &= ~ti->ltid_bit;
+ } while ((rmask & ~tmask) && (tmask & ti->ltid_bit));
+
+ /* Now tmask is stable. Do nothing if the FD was taken over under us */
+
+ if (!(tmask & ti->ltid_bit)) {
+ /* a takeover has started */
+ activity[tid].poll_skip_fd++;
+
+ if (fd_clr_running(fd) == ti->ltid_bit)
+ goto closed_or_migrated;
+
+ goto do_update;
+ }
+
+ /* with running we're safe now, we can drop the reference */
+ fd_drop_tgid(fd);
+
+ locked = (tmask != ti->ltid_bit);
+
+ /* OK now we are guaranteed that our thread_mask was present and
+ * that we're allowed to update the FD.
+ */
+
+ new_flags =
+ ((evts & FD_EV_READY_R) ? FD_POLL_IN : 0) |
+ ((evts & FD_EV_READY_W) ? FD_POLL_OUT : 0) |
+ ((evts & FD_EV_SHUT_R) ? FD_POLL_HUP : 0) |
+ ((evts & FD_EV_ERR_RW) ? FD_POLL_ERR : 0);
+
+ /* SHUTW reported while FD was active for writes is an error */
+ if ((fdtab[fd].state & FD_EV_ACTIVE_W) && (evts & FD_EV_SHUT_W))
+ new_flags |= FD_POLL_ERR;
+
+ /* compute the inactive events reported late that must be stopped */
+ must_stop = 0;
+ if (unlikely(!fd_active(fd))) {
+ /* both sides stopped */
+ must_stop = FD_POLL_IN | FD_POLL_OUT;
+ }
+ else if (unlikely(!fd_recv_active(fd) && (evts & (FD_EV_READY_R | FD_EV_SHUT_R | FD_EV_ERR_RW)))) {
+ /* only send remains */
+ must_stop = FD_POLL_IN;
+ }
+ else if (unlikely(!fd_send_active(fd) && (evts & (FD_EV_READY_W | FD_EV_SHUT_W | FD_EV_ERR_RW)))) {
+ /* only recv remains */
+ must_stop = FD_POLL_OUT;
+ }
+
+ if (new_flags & (FD_POLL_IN | FD_POLL_HUP | FD_POLL_ERR))
+ new_flags |= FD_EV_READY_R;
+
+ if (new_flags & (FD_POLL_OUT | FD_POLL_ERR))
+ new_flags |= FD_EV_READY_W;
+
+ old = fdtab[fd].state;
+ new = (old & ~FD_POLL_UPDT_MASK) | new_flags;
+
+ if (unlikely(locked)) {
+ /* Locked FDs (those with more than 2 threads) are atomically updated */
+ while (unlikely(new != old && !_HA_ATOMIC_CAS(&fdtab[fd].state, &old, new)))
+ new = (old & ~FD_POLL_UPDT_MASK) | new_flags;
+ } else {
+ if (new != old)
+ fdtab[fd].state = new;
+ }
+
+ if (fdtab[fd].iocb && fd_active(fd)) {
+ fdtab[fd].iocb(fd);
+ }
+
+ /*
+ * We entered iocb with running set and with the valid tgid.
+ * Since then, this is what could have happened:
+ * - another thread tried to close the FD (e.g. timeout task from
+ * another one that owns it). We still have running set, but not
+ * tmask. We must call fd_clr_running() then _fd_delete_orphan()
+ * if we were the last one.
+ *
+ * - the iocb tried to close the FD => bit no more present in running,
+ * nothing to do. If it managed to close it, the poller's ->clo()
+ * has already been called.
+ *
+ * - after we closed, the FD was reassigned to another thread in
+ * another group => running not present, tgid differs, nothing to
+ * do because if it got reassigned it indicates it was already
+ * closed.
+ *
+ * There's no risk of takeover of the valid FD here during this period.
+ * Also if we still have running, immediately after we release it, the
+ * events above might instantly happen due to another thread taking
+ * over.
+ *
+ * As such, the only cases where the FD is still relevant are:
+ * - tgid still set and running still set (most common)
+ * - tgid still valid but running cleared due to fd_delete(): we may
+ * still need to stop polling otherwise we may keep it enabled
+ * while waiting for other threads to close it.
+ * And given that we may need to program a tentative update in case we
+ * don't immediately close, it's easier to grab the tgid during the
+ * whole check.
+ */
+
+ if (!fd_grab_tgid(fd, tgid))
+ return FD_UPDT_CLOSED;
+
+ tmask = _HA_ATOMIC_LOAD(&fdtab[fd].thread_mask);
+
+ /* another thread might have attempted to close this FD in the mean
+ * time (e.g. timeout task) striking on a previous thread and closing.
+ * This is detected by us being the last owners of a running_mask bit,
+ * and the thread_mask being zero. At the moment we release the running
+ * bit, a takeover may also happen, so in practice we check for our loss
+ * of the thread_mask bitboth thread_mask and running_mask being 0 after
+ * we remove ourselves last. There is no risk the FD gets reassigned
+ * to a different group since it's not released until the real close()
+ * in _fd_delete_orphan().
+ */
+ if (fd_clr_running(fd) == ti->ltid_bit && !(tmask & ti->ltid_bit))
+ goto closed_or_migrated;
+
+ /* we had to stop this FD and it still must be stopped after the I/O
+ * cb's changes, so let's program an update for this.
+ */
+ if (must_stop && !(fdtab[fd].update_mask & ti->ltid_bit)) {
+ if (((must_stop & FD_POLL_IN) && !fd_recv_active(fd)) ||
+ ((must_stop & FD_POLL_OUT) && !fd_send_active(fd)))
+ if (!HA_ATOMIC_BTS(&fdtab[fd].update_mask, ti->ltid))
+ fd_updt[fd_nbupdt++] = fd;
+ }
+
+ fd_drop_tgid(fd);
+ return FD_UPDT_DONE;
+
+ closed_or_migrated:
+ /* We only come here once we've last dropped running and the FD is
+ * not for us as per !(tmask & tid_bit). It may imply we're
+ * responsible for closing it. Otherwise it's just a migration.
+ */
+ if (HA_ATOMIC_BTR(&fdtab[fd].state, FD_MUST_CLOSE_BIT)) {
+ fd_drop_tgid(fd);
+ _fd_delete_orphan(fd);
+ return FD_UPDT_CLOSED;
+ }
+
+ /* So we were alone, no close bit, at best the FD was migrated, at
+ * worst it's in the process of being closed by another thread. We must
+ * be ultra-careful as it can be re-inserted by yet another thread as
+ * the result of socket() or accept(). Let's just tell the poller the
+ * FD was lost. If it was closed it was already removed and this will
+ * only cost an update for nothing.
+ */
+
+ do_update:
+ /* The FD is not closed but we don't want the poller to wake up for
+ * it anymore.
+ */
+ if (!HA_ATOMIC_BTS(&fdtab[fd].update_mask, ti->ltid))
+ fd_updt[fd_nbupdt++] = fd;
+
+ fd_drop_tgid(fd);
+ return FD_UPDT_MIGRATED;
+}
+
+/* This is used by pollers at boot time to re-register desired events for
+ * all FDs after new pollers have been created. It doesn't do much, it checks
+ * that their thread group matches the one in argument, and that the thread
+ * mask matches at least one of the bits in the mask, and if so, marks the FD
+ * as updated.
+ */
+void fd_reregister_all(int tgrp, ulong mask)
+{
+ int fd;
+
+ for (fd = 0; fd < global.maxsock; fd++) {
+ if (!fdtab[fd].owner)
+ continue;
+
+ /* make sure we don't register other tgroups' FDs. We just
+ * avoid needlessly taking the lock if not needed.
+ */
+ if (!(_HA_ATOMIC_LOAD(&fdtab[fd].thread_mask) & mask) ||
+ !fd_grab_tgid(fd, tgrp))
+ continue; // was not for us anyway
+
+ if (_HA_ATOMIC_LOAD(&fdtab[fd].thread_mask) & mask)
+ updt_fd_polling(fd);
+ fd_drop_tgid(fd);
+ }
+}
+
+/* Tries to send <npfx> parts from <prefix> followed by <nmsg> parts from <msg>
+ * optionally followed by a newline if <nl> is non-null, to file descriptor
+ * <fd>. The message is sent atomically using writev(). It may be truncated to
+ * <maxlen> bytes if <maxlen> is non-null. There is no distinction between the
+ * two lists, it's just a convenience to help the caller prepend some prefixes
+ * when necessary. It takes the fd's lock to make sure no other thread will
+ * write to the same fd in parallel. Returns the number of bytes sent, or <=0
+ * on failure. A limit to 31 total non-empty segments is enforced. The caller
+ * is responsible for taking care of making the fd non-blocking.
+ */
+ssize_t fd_write_frag_line(int fd, size_t maxlen, const struct ist pfx[], size_t npfx, const struct ist msg[], size_t nmsg, int nl)
+{
+ struct iovec iovec[32];
+ size_t sent = 0;
+ int vec = 0;
+ int attempts = 0;
+
+ if (!maxlen)
+ maxlen = ~0;
+
+ /* keep one char for a possible trailing '\n' in any case */
+ maxlen--;
+
+ /* make an iovec from the concatenation of all parts of the original
+ * message. Skip empty fields and truncate the whole message to maxlen,
+ * leaving one spare iovec for the '\n'.
+ */
+ while (vec < (sizeof(iovec) / sizeof(iovec[0]) - 1)) {
+ if (!npfx) {
+ pfx = msg;
+ npfx = nmsg;
+ nmsg = 0;
+ if (!npfx)
+ break;
+ }
+
+ iovec[vec].iov_base = pfx->ptr;
+ iovec[vec].iov_len = MIN(maxlen, pfx->len);
+ maxlen -= iovec[vec].iov_len;
+ if (iovec[vec].iov_len)
+ vec++;
+ pfx++; npfx--;
+ };
+
+ if (nl) {
+ iovec[vec].iov_base = "\n";
+ iovec[vec].iov_len = 1;
+ vec++;
+ }
+
+ /* make sure we never interleave writes and we never block. This means
+ * we prefer to fail on collision than to block. But we don't want to
+ * lose too many logs so we just perform a few lock attempts then give
+ * up.
+ */
+
+ while (HA_ATOMIC_BTS(&fdtab[fd].state, FD_EXCL_SYSCALL_BIT)) {
+ if (++attempts >= 200) {
+ /* so that the caller knows the message couldn't be delivered */
+ sent = -1;
+ errno = EAGAIN;
+ goto leave;
+ }
+ ha_thread_relax();
+ }
+
+ if (unlikely(!(fdtab[fd].state & FD_INITIALIZED))) {
+ HA_ATOMIC_OR(&fdtab[fd].state, FD_INITIALIZED);
+ if (!isatty(fd))
+ fd_set_nonblock(fd);
+ }
+ sent = writev(fd, iovec, vec);
+ HA_ATOMIC_BTR(&fdtab[fd].state, FD_EXCL_SYSCALL_BIT);
+
+ leave:
+ /* sent > 0 if the message was delivered */
+ return sent;
+}
+
+#if defined(USE_CLOSEFROM)
+void my_closefrom(int start)
+{
+ closefrom(start);
+}
+
+#elif defined(USE_POLL)
+/* This is a portable implementation of closefrom(). It closes all open file
+ * descriptors starting at <start> and above. It relies on the fact that poll()
+ * will return POLLNVAL for each invalid (hence close) file descriptor passed
+ * in argument in order to skip them. It acts with batches of FDs and will
+ * typically perform one poll() call per 1024 FDs so the overhead is low in
+ * case all FDs have to be closed.
+ */
+void my_closefrom(int start)
+{
+ struct pollfd poll_events[1024];
+ struct rlimit limit;
+ int nbfds, fd, ret, idx;
+ int step, next;
+
+ if (getrlimit(RLIMIT_NOFILE, &limit) == 0)
+ step = nbfds = limit.rlim_cur;
+ else
+ step = nbfds = 0;
+
+ if (nbfds <= 0) {
+ /* set safe limit */
+ nbfds = 1024;
+ step = 256;
+ }
+
+ if (step > sizeof(poll_events) / sizeof(poll_events[0]))
+ step = sizeof(poll_events) / sizeof(poll_events[0]);
+
+ while (start < nbfds) {
+ next = (start / step + 1) * step;
+
+ for (fd = start; fd < next && fd < nbfds; fd++) {
+ poll_events[fd - start].fd = fd;
+ poll_events[fd - start].events = 0;
+ }
+
+ do {
+ ret = poll(poll_events, fd - start, 0);
+ if (ret >= 0)
+ break;
+ } while (errno == EAGAIN || errno == EWOULDBLOCK || errno == EINTR || errno == ENOMEM);
+
+ if (ret)
+ ret = fd - start;
+
+ for (idx = 0; idx < ret; idx++) {
+ if (poll_events[idx].revents & POLLNVAL)
+ continue; /* already closed */
+
+ fd = poll_events[idx].fd;
+ close(fd);
+ }
+ start = next;
+ }
+}
+
+#else // defined(USE_POLL)
+
+/* This is a portable implementation of closefrom(). It closes all open file
+ * descriptors starting at <start> and above. This is a naive version for use
+ * when the operating system provides no alternative.
+ */
+void my_closefrom(int start)
+{
+ struct rlimit limit;
+ int nbfds;
+
+ if (getrlimit(RLIMIT_NOFILE, &limit) == 0)
+ nbfds = limit.rlim_cur;
+ else
+ nbfds = 0;
+
+ if (nbfds <= 0)
+ nbfds = 1024; /* safe limit */
+
+ while (start < nbfds)
+ close(start++);
+}
+#endif // defined(USE_POLL)
+
+/* Sets the RLIMIT_NOFILE setting to <new_limit> and returns the previous one
+ * in <old_limit> if the pointer is not NULL, even if set_rlimit() fails. The
+ * two pointers may point to the same variable as the copy happens after
+ * setting the new value. The value is only changed if at least one of the new
+ * limits is strictly higher than the current one, otherwise returns 0 without
+ * changing anything. The getrlimit() or setrlimit() syscall return value is
+ * returned and errno is preserved.
+ */
+int raise_rlim_nofile(struct rlimit *old_limit, struct rlimit *new_limit)
+{
+ struct rlimit limit = { };
+ int ret = 0;
+
+ ret = getrlimit(RLIMIT_NOFILE, &limit);
+
+ if (ret == 0 &&
+ (limit.rlim_max < new_limit->rlim_max ||
+ limit.rlim_cur < new_limit->rlim_cur)) {
+ ret = setrlimit(RLIMIT_NOFILE, new_limit);
+ }
+
+ if (old_limit)
+ *old_limit = limit;
+
+ return ret;
+}
+
+/* Computes the bounded poll() timeout based on the next expiration timer <next>
+ * by bounding it to MAX_DELAY_MS. <next> may equal TICK_ETERNITY. The pollers
+ * just needs to call this function right before polling to get their timeout
+ * value. Timeouts that are already expired (possibly due to a pending event)
+ * are accounted for in activity.poll_exp.
+ */
+int compute_poll_timeout(int next)
+{
+ int wait_time;
+
+ if (!tick_isset(next))
+ wait_time = MAX_DELAY_MS;
+ else if (tick_is_expired(next, now_ms)) {
+ activity[tid].poll_exp++;
+ wait_time = 0;
+ }
+ else {
+ wait_time = TICKS_TO_MS(tick_remain(now_ms, next)) + 1;
+ if (wait_time > MAX_DELAY_MS)
+ wait_time = MAX_DELAY_MS;
+ }
+ return wait_time;
+}
+
+/* Handle the return of the poller, which consists in calculating the idle
+ * time, saving a few clocks, marking the thread harmful again etc. All that
+ * is some boring stuff that all pollers have to do anyway.
+ */
+void fd_leaving_poll(int wait_time, int status)
+{
+ clock_leaving_poll(wait_time, status);
+
+ thread_harmless_end();
+ thread_idle_end();
+
+ _HA_ATOMIC_AND(&th_ctx->flags, ~TH_FL_SLEEPING);
+}
+
+/* disable the specified poller */
+void disable_poller(const char *poller_name)
+{
+ int p;
+
+ for (p = 0; p < nbpollers; p++)
+ if (strcmp(pollers[p].name, poller_name) == 0)
+ pollers[p].pref = 0;
+}
+
+void poller_pipe_io_handler(int fd)
+{
+ char buf[1024];
+ /* Flush the pipe */
+ while (read(fd, buf, sizeof(buf)) > 0);
+ fd_cant_recv(fd);
+}
+
+/* allocate the per-thread fd_updt thus needs to be called early after
+ * thread creation.
+ */
+static int alloc_pollers_per_thread()
+{
+ fd_updt = calloc(global.maxsock, sizeof(*fd_updt));
+ return fd_updt != NULL;
+}
+
+/* Initialize the pollers per thread.*/
+static int init_pollers_per_thread()
+{
+ int mypipe[2];
+
+ if (pipe(mypipe) < 0)
+ return 0;
+
+ poller_rd_pipe = mypipe[0];
+ poller_wr_pipe[tid] = mypipe[1];
+ fd_set_nonblock(poller_rd_pipe);
+ fd_insert(poller_rd_pipe, poller_pipe_io_handler, poller_pipe_io_handler, tgid, ti->ltid_bit);
+ fd_insert(poller_wr_pipe[tid], poller_pipe_io_handler, poller_pipe_io_handler, tgid, ti->ltid_bit);
+ fd_want_recv(poller_rd_pipe);
+ fd_stop_both(poller_wr_pipe[tid]);
+ return 1;
+}
+
+/* Deinitialize the pollers per thread */
+static void deinit_pollers_per_thread()
+{
+ /* rd and wr are init at the same place, but only rd is init to -1, so
+ we rely to rd to close. */
+ if (poller_rd_pipe > -1) {
+ fd_delete(poller_rd_pipe);
+ poller_rd_pipe = -1;
+ fd_delete(poller_wr_pipe[tid]);
+ poller_wr_pipe[tid] = -1;
+ }
+}
+
+/* Release the pollers per thread, to be called late */
+static void free_pollers_per_thread()
+{
+ fd_nbupdt = 0;
+ ha_free(&fd_updt);
+}
+
+/*
+ * Initialize the pollers till the best one is found.
+ * If none works, returns 0, otherwise 1.
+ */
+int init_pollers()
+{
+ int p;
+ struct poller *bp;
+
+ if ((fdtab_addr = calloc(global.maxsock, sizeof(*fdtab) + 64)) == NULL) {
+ ha_alert("Not enough memory to allocate %d entries for fdtab!\n", global.maxsock);
+ goto fail_tab;
+ }
+
+ /* always provide an aligned fdtab */
+ fdtab = (struct fdtab*)((((size_t)fdtab_addr) + 63) & -(size_t)64);
+
+ if ((polled_mask = calloc(global.maxsock, sizeof(*polled_mask))) == NULL) {
+ ha_alert("Not enough memory to allocate %d entries for polled_mask!\n", global.maxsock);
+ goto fail_polledmask;
+ }
+
+ if ((fdinfo = calloc(global.maxsock, sizeof(*fdinfo))) == NULL) {
+ ha_alert("Not enough memory to allocate %d entries for fdinfo!\n", global.maxsock);
+ goto fail_info;
+ }
+
+ for (p = 0; p < MAX_TGROUPS; p++)
+ update_list[p].first = update_list[p].last = -1;
+
+ for (p = 0; p < global.maxsock; p++) {
+ /* Mark the fd as out of the fd cache */
+ fdtab[p].update.next = -3;
+ }
+
+ do {
+ bp = NULL;
+ for (p = 0; p < nbpollers; p++)
+ if (!bp || (pollers[p].pref > bp->pref))
+ bp = &pollers[p];
+
+ if (!bp || bp->pref == 0)
+ break;
+
+ if (bp->init(bp)) {
+ memcpy(&cur_poller, bp, sizeof(*bp));
+ return 1;
+ }
+ } while (!bp || bp->pref == 0);
+
+ free(fdinfo);
+ fail_info:
+ free(polled_mask);
+ fail_polledmask:
+ free(fdtab_addr);
+ fail_tab:
+ return 0;
+}
+
+/*
+ * Deinitialize the pollers.
+ */
+void deinit_pollers() {
+
+ struct poller *bp;
+ int p;
+
+ for (p = 0; p < nbpollers; p++) {
+ bp = &pollers[p];
+
+ if (bp && bp->pref)
+ bp->term(bp);
+ }
+
+ ha_free(&fdinfo);
+ ha_free(&fdtab_addr);
+ ha_free(&polled_mask);
+}
+
+/*
+ * Lists the known pollers on <out>.
+ * Should be performed only before initialization.
+ */
+int list_pollers(FILE *out)
+{
+ int p;
+ int last, next;
+ int usable;
+ struct poller *bp;
+
+ fprintf(out, "Available polling systems :\n");
+
+ usable = 0;
+ bp = NULL;
+ last = next = -1;
+ while (1) {
+ for (p = 0; p < nbpollers; p++) {
+ if ((next < 0 || pollers[p].pref > next)
+ && (last < 0 || pollers[p].pref < last)) {
+ next = pollers[p].pref;
+ if (!bp || (pollers[p].pref > bp->pref))
+ bp = &pollers[p];
+ }
+ }
+
+ if (next == -1)
+ break;
+
+ for (p = 0; p < nbpollers; p++) {
+ if (pollers[p].pref == next) {
+ fprintf(out, " %10s : ", pollers[p].name);
+ if (pollers[p].pref == 0)
+ fprintf(out, "disabled, ");
+ else
+ fprintf(out, "pref=%3d, ", pollers[p].pref);
+ if (pollers[p].test(&pollers[p])) {
+ fprintf(out, " test result OK");
+ if (next > 0)
+ usable++;
+ } else {
+ fprintf(out, " test result FAILED");
+ if (bp == &pollers[p])
+ bp = NULL;
+ }
+ fprintf(out, "\n");
+ }
+ }
+ last = next;
+ next = -1;
+ };
+ fprintf(out, "Total: %d (%d usable), will use %s.\n", nbpollers, usable, bp ? bp->name : "none");
+ return 0;
+}
+
+/*
+ * Some pollers may lose their connection after a fork(). It may be necessary
+ * to create initialize part of them again. Returns 0 in case of failure,
+ * otherwise 1. The fork() function may be NULL if unused. In case of error,
+ * the the current poller is destroyed and the caller is responsible for trying
+ * another one by calling init_pollers() again.
+ */
+int fork_poller()
+{
+ int fd;
+ for (fd = 0; fd < global.maxsock; fd++) {
+ if (fdtab[fd].owner) {
+ HA_ATOMIC_OR(&fdtab[fd].state, FD_CLONED);
+ }
+ }
+
+ if (cur_poller.fork) {
+ if (cur_poller.fork(&cur_poller))
+ return 1;
+ cur_poller.term(&cur_poller);
+ return 0;
+ }
+ return 1;
+}
+
+/* config parser for global "tune.fd.edge-triggered", accepts "on" or "off" */
+static int cfg_parse_tune_fd_edge_triggered(char **args, int section_type, struct proxy *curpx,
+ const struct proxy *defpx, const char *file, int line,
+ char **err)
+{
+ if (too_many_args(1, args, err, NULL))
+ return -1;
+
+ if (strcmp(args[1], "on") == 0)
+ global.tune.options |= GTUNE_FD_ET;
+ else if (strcmp(args[1], "off") == 0)
+ global.tune.options &= ~GTUNE_FD_ET;
+ else {
+ memprintf(err, "'%s' expects either 'on' or 'off' but got '%s'.", args[0], args[1]);
+ return -1;
+ }
+ return 0;
+}
+
+/* config keyword parsers */
+static struct cfg_kw_list cfg_kws = {ILH, {
+ { CFG_GLOBAL, "tune.fd.edge-triggered", cfg_parse_tune_fd_edge_triggered, KWF_EXPERIMENTAL },
+ { 0, NULL, NULL }
+}};
+
+INITCALL1(STG_REGISTER, cfg_register_keywords, &cfg_kws);
+
+REGISTER_PER_THREAD_ALLOC(alloc_pollers_per_thread);
+REGISTER_PER_THREAD_INIT(init_pollers_per_thread);
+REGISTER_PER_THREAD_DEINIT(deinit_pollers_per_thread);
+REGISTER_PER_THREAD_FREE(free_pollers_per_thread);
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/src/filters.c b/src/filters.c
new file mode 100644
index 0000000..e55adee
--- /dev/null
+++ b/src/filters.c
@@ -0,0 +1,1125 @@
+/*
+ * Stream filters related variables and functions.
+ *
+ * Copyright (C) 2015 Qualys Inc., Christopher Faulet <cfaulet@qualys.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <haproxy/api.h>
+#include <haproxy/buf-t.h>
+#include <haproxy/cfgparse.h>
+#include <haproxy/compression.h>
+#include <haproxy/errors.h>
+#include <haproxy/filters.h>
+#include <haproxy/flt_http_comp.h>
+#include <haproxy/http_ana.h>
+#include <haproxy/http_htx.h>
+#include <haproxy/htx.h>
+#include <haproxy/namespace.h>
+#include <haproxy/proxy.h>
+#include <haproxy/stream.h>
+#include <haproxy/tools.h>
+#include <haproxy/trace.h>
+
+
+#define TRACE_SOURCE &trace_strm
+
+/* Pool used to allocate filters */
+DECLARE_STATIC_POOL(pool_head_filter, "filter", sizeof(struct filter));
+
+static int handle_analyzer_result(struct stream *s, struct channel *chn, unsigned int an_bit, int ret);
+
+/* - RESUME_FILTER_LOOP and RESUME_FILTER_END must always be used together.
+ * The first one begins a loop and the seconds one ends it.
+ *
+ * - BREAK_EXECUTION must be used to break the loop and set the filter from
+ * which to resume the next time.
+ *
+ * Here is an example:
+ *
+ * RESUME_FILTER_LOOP(stream, channel) {
+ * ...
+ * if (cond)
+ * BREAK_EXECUTION(stream, channel, label);
+ * ...
+ * } RESUME_FILTER_END;
+ * ...
+ * label:
+ * ...
+ *
+ */
+#define RESUME_FILTER_LOOP(strm, chn) \
+ do { \
+ struct filter *filter; \
+ \
+ if (strm_flt(strm)->current[CHN_IDX(chn)]) { \
+ filter = strm_flt(strm)->current[CHN_IDX(chn)]; \
+ strm_flt(strm)->current[CHN_IDX(chn)] = NULL; \
+ goto resume_execution; \
+ } \
+ \
+ list_for_each_entry(filter, &strm_flt(s)->filters, list) { \
+ resume_execution:
+
+#define RESUME_FILTER_END \
+ } \
+ } while(0)
+
+#define BREAK_EXECUTION(strm, chn, label) \
+ do { \
+ strm_flt(strm)->current[CHN_IDX(chn)] = filter; \
+ goto label; \
+ } while (0)
+
+
+/* List head of all known filter keywords */
+static struct flt_kw_list flt_keywords = {
+ .list = LIST_HEAD_INIT(flt_keywords.list)
+};
+
+/*
+ * Registers the filter keyword list <kwl> as a list of valid keywords for next
+ * parsing sessions.
+ */
+void
+flt_register_keywords(struct flt_kw_list *kwl)
+{
+ LIST_APPEND(&flt_keywords.list, &kwl->list);
+}
+
+/*
+ * Returns a pointer to the filter keyword <kw>, or NULL if not found. If the
+ * keyword is found with a NULL ->parse() function, then an attempt is made to
+ * find one with a valid ->parse() function. This way it is possible to declare
+ * platform-dependant, known keywords as NULL, then only declare them as valid
+ * if some options are met. Note that if the requested keyword contains an
+ * opening parenthesis, everything from this point is ignored.
+ */
+struct flt_kw *
+flt_find_kw(const char *kw)
+{
+ int index;
+ const char *kwend;
+ struct flt_kw_list *kwl;
+ struct flt_kw *ret = NULL;
+
+ kwend = strchr(kw, '(');
+ if (!kwend)
+ kwend = kw + strlen(kw);
+
+ list_for_each_entry(kwl, &flt_keywords.list, list) {
+ for (index = 0; kwl->kw[index].kw != NULL; index++) {
+ if ((strncmp(kwl->kw[index].kw, kw, kwend - kw) == 0) &&
+ kwl->kw[index].kw[kwend-kw] == 0) {
+ if (kwl->kw[index].parse)
+ return &kwl->kw[index]; /* found it !*/
+ else
+ ret = &kwl->kw[index]; /* may be OK */
+ }
+ }
+ }
+ return ret;
+}
+
+/*
+ * Dumps all registered "filter" keywords to the <out> string pointer. The
+ * unsupported keywords are only dumped if their supported form was not found.
+ * If <out> is NULL, the output is emitted using a more compact format on stdout.
+ */
+void
+flt_dump_kws(char **out)
+{
+ struct flt_kw_list *kwl;
+ const struct flt_kw *kwp, *kw;
+ const char *scope = NULL;
+ int index;
+
+ if (out)
+ *out = NULL;
+
+ for (kw = kwp = NULL;; kwp = kw) {
+ list_for_each_entry(kwl, &flt_keywords.list, list) {
+ for (index = 0; kwl->kw[index].kw != NULL; index++) {
+ if ((kwl->kw[index].parse ||
+ flt_find_kw(kwl->kw[index].kw) == &kwl->kw[index])
+ && strordered(kwp ? kwp->kw : NULL,
+ kwl->kw[index].kw,
+ kw != kwp ? kw->kw : NULL)) {
+ kw = &kwl->kw[index];
+ scope = kwl->scope;
+ }
+ }
+ }
+
+ if (kw == kwp)
+ break;
+
+ if (out)
+ memprintf(out, "%s[%4s] %s%s\n", *out ? *out : "",
+ scope,
+ kw->kw,
+ kw->parse ? "" : " (not supported)");
+ else
+ printf("%s [%s]\n",
+ kw->kw, scope);
+ }
+}
+
+/*
+ * Lists the known filters on <out>
+ */
+void
+list_filters(FILE *out)
+{
+ char *filters, *p, *f;
+
+ fprintf(out, "Available filters :\n");
+ flt_dump_kws(&filters);
+ for (p = filters; (f = strtok_r(p,"\n",&p));)
+ fprintf(out, "\t%s\n", f);
+ free(filters);
+}
+
+/*
+ * Parses the "filter" keyword. All keywords must be handled by filters
+ * themselves
+ */
+static int
+parse_filter(char **args, int section_type, struct proxy *curpx,
+ const struct proxy *defpx, const char *file, int line, char **err)
+{
+ struct flt_conf *fconf = NULL;
+
+ /* Filter cannot be defined on a default proxy */
+ if (curpx == defpx) {
+ memprintf(err, "parsing [%s:%d] : %s is not allowed in a 'default' section.",
+ file, line, args[0]);
+ return -1;
+ }
+ if (strcmp(args[0], "filter") == 0) {
+ struct flt_kw *kw;
+ int cur_arg;
+
+ if (!*args[1]) {
+ memprintf(err,
+ "parsing [%s:%d] : missing argument for '%s' in %s '%s'.",
+ file, line, args[0], proxy_type_str(curpx), curpx->id);
+ goto error;
+ }
+ fconf = calloc(1, sizeof(*fconf));
+ if (!fconf) {
+ memprintf(err, "'%s' : out of memory", args[0]);
+ goto error;
+ }
+
+ cur_arg = 1;
+ kw = flt_find_kw(args[cur_arg]);
+ if (kw) {
+ if (!kw->parse) {
+ memprintf(err, "parsing [%s:%d] : '%s' : "
+ "'%s' option is not implemented in this version (check build options).",
+ file, line, args[0], args[cur_arg]);
+ goto error;
+ }
+ if (kw->parse(args, &cur_arg, curpx, fconf, err, kw->private) != 0) {
+ if (err && *err)
+ memprintf(err, "'%s' : '%s'",
+ args[0], *err);
+ else
+ memprintf(err, "'%s' : error encountered while processing '%s'",
+ args[0], args[cur_arg]);
+ goto error;
+ }
+ }
+ else {
+ flt_dump_kws(err);
+ indent_msg(err, 4);
+ memprintf(err, "'%s' : unknown keyword '%s'.%s%s",
+ args[0], args[cur_arg],
+ err && *err ? " Registered keywords :" : "", err && *err ? *err : "");
+ goto error;
+ }
+ if (*args[cur_arg]) {
+ memprintf(err, "'%s %s' : unknown keyword '%s'.",
+ args[0], args[1], args[cur_arg]);
+ goto error;
+ }
+ if (fconf->ops == NULL) {
+ memprintf(err, "'%s %s' : no callbacks defined.",
+ args[0], args[1]);
+ goto error;
+ }
+
+ LIST_APPEND(&curpx->filter_configs, &fconf->list);
+ }
+ return 0;
+
+ error:
+ free(fconf);
+ return -1;
+
+
+}
+
+/*
+ * Calls 'init' callback for all filters attached to a proxy. This happens after
+ * the configuration parsing. Filters can finish to fill their config. Returns
+ * (ERR_ALERT|ERR_FATAL) if an error occurs, 0 otherwise.
+ */
+static int
+flt_init(struct proxy *proxy)
+{
+ struct flt_conf *fconf;
+
+ list_for_each_entry(fconf, &proxy->filter_configs, list) {
+ if (fconf->ops->init && fconf->ops->init(proxy, fconf) < 0)
+ return ERR_ALERT|ERR_FATAL;
+ }
+ return 0;
+}
+
+/*
+ * Calls 'init_per_thread' callback for all filters attached to a proxy for each
+ * threads. This happens after the thread creation. Filters can finish to fill
+ * their config. Returns (ERR_ALERT|ERR_FATAL) if an error occurs, 0 otherwise.
+ */
+static int
+flt_init_per_thread(struct proxy *proxy)
+{
+ struct flt_conf *fconf;
+
+ list_for_each_entry(fconf, &proxy->filter_configs, list) {
+ if (fconf->ops->init_per_thread && fconf->ops->init_per_thread(proxy, fconf) < 0)
+ return ERR_ALERT|ERR_FATAL;
+ }
+ return 0;
+}
+
+/* Calls flt_init() for all proxies, see above */
+static int
+flt_init_all()
+{
+ struct proxy *px;
+ int err_code = ERR_NONE;
+
+ for (px = proxies_list; px; px = px->next) {
+ if (px->flags & (PR_FL_DISABLED|PR_FL_STOPPED))
+ continue;
+
+ err_code |= flt_init(px);
+ if (err_code & (ERR_ABORT|ERR_FATAL)) {
+ ha_alert("Failed to initialize filters for proxy '%s'.\n",
+ px->id);
+ return err_code;
+ }
+ }
+ return 0;
+}
+
+/* Calls flt_init_per_thread() for all proxies, see above. Be careful here, it
+ * returns 0 if an error occurred. This is the opposite of flt_init_all. */
+static int
+flt_init_all_per_thread()
+{
+ struct proxy *px;
+ int err_code = 0;
+
+ for (px = proxies_list; px; px = px->next) {
+ if (px->flags & (PR_FL_DISABLED|PR_FL_STOPPED))
+ continue;
+
+ err_code = flt_init_per_thread(px);
+ if (err_code & (ERR_ABORT|ERR_FATAL)) {
+ ha_alert("Failed to initialize filters for proxy '%s' for thread %u.\n",
+ px->id, tid);
+ return 0;
+ }
+ }
+ return 1;
+}
+
+/*
+ * Calls 'check' callback for all filters attached to a proxy. This happens
+ * after the configuration parsing but before filters initialization. Returns
+ * the number of encountered errors.
+ */
+int
+flt_check(struct proxy *proxy)
+{
+ struct flt_conf *fconf;
+ int err = 0;
+
+ err += check_implicit_http_comp_flt(proxy);
+ list_for_each_entry(fconf, &proxy->filter_configs, list) {
+ if (fconf->ops->check)
+ err += fconf->ops->check(proxy, fconf);
+ }
+ return err;
+}
+
+/*
+ * Calls 'denit' callback for all filters attached to a proxy. This happens when
+ * HAProxy is stopped.
+ */
+void
+flt_deinit(struct proxy *proxy)
+{
+ struct flt_conf *fconf, *back;
+
+ list_for_each_entry_safe(fconf, back, &proxy->filter_configs, list) {
+ if (fconf->ops->deinit)
+ fconf->ops->deinit(proxy, fconf);
+ LIST_DELETE(&fconf->list);
+ free(fconf);
+ }
+}
+
+/*
+ * Calls 'denit_per_thread' callback for all filters attached to a proxy for
+ * each threads. This happens before exiting a thread.
+ */
+void
+flt_deinit_per_thread(struct proxy *proxy)
+{
+ struct flt_conf *fconf, *back;
+
+ list_for_each_entry_safe(fconf, back, &proxy->filter_configs, list) {
+ if (fconf->ops->deinit_per_thread)
+ fconf->ops->deinit_per_thread(proxy, fconf);
+ }
+}
+
+
+/* Calls flt_deinit_per_thread() for all proxies, see above */
+static void
+flt_deinit_all_per_thread()
+{
+ struct proxy *px;
+
+ for (px = proxies_list; px; px = px->next)
+ flt_deinit_per_thread(px);
+}
+
+/* Attaches a filter to a stream. Returns -1 if an error occurs, 0 otherwise. */
+static int
+flt_stream_add_filter(struct stream *s, struct flt_conf *fconf, unsigned int flags)
+{
+ struct filter *f;
+
+ if (IS_HTX_STRM(s) && !(fconf->flags & FLT_CFG_FL_HTX))
+ return 0;
+
+ f = pool_zalloc(pool_head_filter);
+ if (!f) /* not enough memory */
+ return -1;
+ f->config = fconf;
+ f->flags |= flags;
+
+ if (FLT_OPS(f)->attach) {
+ int ret = FLT_OPS(f)->attach(s, f);
+ if (ret <= 0) {
+ pool_free(pool_head_filter, f);
+ return ret;
+ }
+ }
+
+ LIST_APPEND(&strm_flt(s)->filters, &f->list);
+ strm_flt(s)->flags |= STRM_FLT_FL_HAS_FILTERS;
+ return 0;
+}
+
+/*
+ * Called when a stream is created. It attaches all frontend filters to the
+ * stream. Returns -1 if an error occurs, 0 otherwise.
+ */
+int
+flt_stream_init(struct stream *s)
+{
+ struct flt_conf *fconf;
+
+ memset(strm_flt(s), 0, sizeof(*strm_flt(s)));
+ LIST_INIT(&strm_flt(s)->filters);
+ list_for_each_entry(fconf, &strm_fe(s)->filter_configs, list) {
+ if (flt_stream_add_filter(s, fconf, 0) < 0)
+ return -1;
+ }
+ return 0;
+}
+
+/*
+ * Called when a stream is closed or when analyze ends (For an HTTP stream, this
+ * happens after each request/response exchange). When analyze ends, backend
+ * filters are removed. When the stream is closed, all filters attached to the
+ * stream are removed.
+ */
+void
+flt_stream_release(struct stream *s, int only_backend)
+{
+ struct filter *filter, *back;
+
+ list_for_each_entry_safe(filter, back, &strm_flt(s)->filters, list) {
+ if (!only_backend || (filter->flags & FLT_FL_IS_BACKEND_FILTER)) {
+ if (FLT_OPS(filter)->detach)
+ FLT_OPS(filter)->detach(s, filter);
+ LIST_DELETE(&filter->list);
+ pool_free(pool_head_filter, filter);
+ }
+ }
+ if (LIST_ISEMPTY(&strm_flt(s)->filters))
+ strm_flt(s)->flags &= ~STRM_FLT_FL_HAS_FILTERS;
+}
+
+/*
+ * Calls 'stream_start' for all filters attached to a stream. This happens when
+ * the stream is created, just after calling flt_stream_init
+ * function. Returns -1 if an error occurs, 0 otherwise.
+ */
+int
+flt_stream_start(struct stream *s)
+{
+ struct filter *filter;
+
+ list_for_each_entry(filter, &strm_flt(s)->filters, list) {
+ if (FLT_OPS(filter)->stream_start && FLT_OPS(filter)->stream_start(s, filter) < 0)
+ return -1;
+ }
+ if (strm_li(s) && (strm_li(s)->bind_conf->analysers & AN_REQ_FLT_START_FE)) {
+ s->req.flags |= CF_FLT_ANALYZE;
+ s->req.analysers |= AN_REQ_FLT_END;
+ }
+ return 0;
+}
+
+/*
+ * Calls 'stream_stop' for all filters attached to a stream. This happens when
+ * the stream is stopped, just before calling flt_stream_release function.
+ */
+void
+flt_stream_stop(struct stream *s)
+{
+ struct filter *filter;
+
+ list_for_each_entry(filter, &strm_flt(s)->filters, list) {
+ if (FLT_OPS(filter)->stream_stop)
+ FLT_OPS(filter)->stream_stop(s, filter);
+ }
+}
+
+/*
+ * Calls 'check_timeouts' for all filters attached to a stream. This happens when
+ * the stream is woken up because of expired timer.
+ */
+void
+flt_stream_check_timeouts(struct stream *s)
+{
+ struct filter *filter;
+
+ list_for_each_entry(filter, &strm_flt(s)->filters, list) {
+ if (FLT_OPS(filter)->check_timeouts)
+ FLT_OPS(filter)->check_timeouts(s, filter);
+ }
+}
+
+/*
+ * Called when a backend is set for a stream. If the frontend and the backend
+ * are not the same, this function attaches all backend filters to the
+ * stream. Returns -1 if an error occurs, 0 otherwise.
+ */
+int
+flt_set_stream_backend(struct stream *s, struct proxy *be)
+{
+ struct flt_conf *fconf;
+ struct filter *filter;
+
+ if (strm_fe(s) == be)
+ goto end;
+
+ list_for_each_entry(fconf, &be->filter_configs, list) {
+ if (flt_stream_add_filter(s, fconf, FLT_FL_IS_BACKEND_FILTER) < 0)
+ return -1;
+ }
+
+ end:
+ list_for_each_entry(filter, &strm_flt(s)->filters, list) {
+ if (FLT_OPS(filter)->stream_set_backend &&
+ FLT_OPS(filter)->stream_set_backend(s, filter, be) < 0)
+ return -1;
+ }
+ if (be->be_req_ana & AN_REQ_FLT_START_BE) {
+ s->req.flags |= CF_FLT_ANALYZE;
+ s->req.analysers |= AN_REQ_FLT_END;
+ }
+ if ((strm_fe(s)->fe_rsp_ana | be->be_rsp_ana) & (AN_RES_FLT_START_FE|AN_RES_FLT_START_BE)) {
+ s->res.flags |= CF_FLT_ANALYZE;
+ s->res.analysers |= AN_RES_FLT_END;
+ }
+
+ return 0;
+}
+
+
+/*
+ * Calls 'http_end' callback for all filters attached to a stream. All filters
+ * are called here, but only if there is at least one "data" filter. This
+ * functions is called when all data were parsed and forwarded. 'http_end'
+ * callback is resumable, so this function returns a negative value if an error
+ * occurs, 0 if it needs to wait for some reason, any other value otherwise.
+ */
+int
+flt_http_end(struct stream *s, struct http_msg *msg)
+{
+ unsigned long long *strm_off = &FLT_STRM_OFF(s, msg->chn);
+ unsigned int offset = 0;
+ int ret = 1;
+
+ DBG_TRACE_ENTER(STRM_EV_STRM_ANA|STRM_EV_HTTP_ANA|STRM_EV_FLT_ANA, s, s->txn, msg);
+ RESUME_FILTER_LOOP(s, msg->chn) {
+ unsigned long long flt_off = FLT_OFF(filter, msg->chn);
+ offset = flt_off - *strm_off;
+
+ /* Call http_end for data filters only. But the filter offset is
+ * still valid for all filters
+ . */
+ if (!IS_DATA_FILTER(filter, msg->chn))
+ continue;
+
+ if (FLT_OPS(filter)->http_end) {
+ DBG_TRACE_DEVEL(FLT_ID(filter), STRM_EV_HTTP_ANA|STRM_EV_FLT_ANA, s);
+ ret = FLT_OPS(filter)->http_end(s, filter, msg);
+ if (ret <= 0)
+ BREAK_EXECUTION(s, msg->chn, end);
+ }
+ } RESUME_FILTER_END;
+
+ c_adv(msg->chn, offset);
+ *strm_off += offset;
+
+end:
+ DBG_TRACE_LEAVE(STRM_EV_STRM_ANA|STRM_EV_HTTP_ANA|STRM_EV_FLT_ANA, s);
+ return ret;
+}
+
+/*
+ * Calls 'http_reset' callback for all filters attached to a stream. This
+ * happens when a 100-continue response is received.
+ */
+void
+flt_http_reset(struct stream *s, struct http_msg *msg)
+{
+ struct filter *filter;
+
+ DBG_TRACE_ENTER(STRM_EV_STRM_ANA|STRM_EV_HTTP_ANA|STRM_EV_FLT_ANA, s, s->txn, msg);
+ list_for_each_entry(filter, &strm_flt(s)->filters, list) {
+ if (FLT_OPS(filter)->http_reset) {
+ DBG_TRACE_DEVEL(FLT_ID(filter), STRM_EV_HTTP_ANA|STRM_EV_FLT_ANA, s);
+ FLT_OPS(filter)->http_reset(s, filter, msg);
+ }
+ }
+ DBG_TRACE_LEAVE(STRM_EV_STRM_ANA|STRM_EV_HTTP_ANA|STRM_EV_FLT_ANA, s);
+}
+
+/*
+ * Calls 'http_reply' callback for all filters attached to a stream when HA
+ * decides to stop the HTTP message processing.
+ */
+void
+flt_http_reply(struct stream *s, short status, const struct buffer *msg)
+{
+ struct filter *filter;
+
+ DBG_TRACE_ENTER(STRM_EV_STRM_ANA|STRM_EV_HTTP_ANA|STRM_EV_FLT_ANA, s, s->txn, msg);
+ list_for_each_entry(filter, &strm_flt(s)->filters, list) {
+ if (FLT_OPS(filter)->http_reply) {
+ DBG_TRACE_DEVEL(FLT_ID(filter), STRM_EV_HTTP_ANA|STRM_EV_FLT_ANA, s);
+ FLT_OPS(filter)->http_reply(s, filter, status, msg);
+ }
+ }
+ DBG_TRACE_LEAVE(STRM_EV_STRM_ANA|STRM_EV_HTTP_ANA|STRM_EV_FLT_ANA, s);
+}
+
+/*
+ * Calls 'http_payload' callback for all "data" filters attached to a
+ * stream. This function is called when some data can be forwarded in the
+ * AN_REQ_HTTP_XFER_BODY and AN_RES_HTTP_XFER_BODY analyzers. It takes care to
+ * update the filters and the stream offset to be sure that a filter cannot
+ * forward more data than its predecessors. A filter can choose to not forward
+ * all data. Returns a negative value if an error occurs, else the number of
+ * forwarded bytes.
+ */
+int
+flt_http_payload(struct stream *s, struct http_msg *msg, unsigned int len)
+{
+ struct filter *filter;
+ struct htx *htx;
+ unsigned long long *strm_off = &FLT_STRM_OFF(s, msg->chn);
+ unsigned int out = co_data(msg->chn);
+ int ret, data;
+
+ strm_flt(s)->flags &= ~STRM_FLT_FL_HOLD_HTTP_HDRS;
+
+ ret = data = len - out;
+ DBG_TRACE_ENTER(STRM_EV_STRM_ANA|STRM_EV_HTTP_ANA|STRM_EV_FLT_ANA, s, s->txn, msg);
+ list_for_each_entry(filter, &strm_flt(s)->filters, list) {
+ unsigned long long *flt_off = &FLT_OFF(filter, msg->chn);
+ unsigned int offset = *flt_off - *strm_off;
+
+ /* Call http_payload for filters only. Forward all data for
+ * others and update the filter offset
+ */
+ if (!IS_DATA_FILTER(filter, msg->chn)) {
+ *flt_off += data - offset;
+ continue;
+ }
+
+ if (FLT_OPS(filter)->http_payload) {
+ DBG_TRACE_DEVEL(FLT_ID(filter), STRM_EV_HTTP_ANA|STRM_EV_FLT_ANA, s);
+ ret = FLT_OPS(filter)->http_payload(s, filter, msg, out + offset, data - offset);
+ if (ret < 0)
+ goto end;
+ data = ret + *flt_off - *strm_off;
+ *flt_off += ret;
+ }
+ }
+
+ /* If nothing was forwarded yet, we take care to hold the headers if
+ * following conditions are met :
+ *
+ * - *strm_off == 0 (nothing forwarded yet)
+ * - ret == 0 (no data forwarded at all on this turn)
+ * - STRM_FLT_FL_HOLD_HTTP_HDRS flag set (at least one filter want to hold the headers)
+ *
+ * Be careful, STRM_FLT_FL_HOLD_HTTP_HDRS is removed before each http_payload loop.
+ * Thus, it must explicitly be set when necessary. We must do that to hold the headers
+ * when there is no payload.
+ */
+ if (!ret && !*strm_off && (strm_flt(s)->flags & STRM_FLT_FL_HOLD_HTTP_HDRS))
+ goto end;
+
+ ret = data;
+ *strm_off += ret;
+ end:
+ htx = htxbuf(&msg->chn->buf);
+ if (msg->flags & HTTP_MSGF_XFER_LEN)
+ htx->extra = 0;
+ DBG_TRACE_LEAVE(STRM_EV_STRM_ANA|STRM_EV_HTTP_ANA|STRM_EV_FLT_ANA, s);
+ return ret;
+}
+
+/*
+ * Calls 'channel_start_analyze' callback for all filters attached to a
+ * stream. This function is called when we start to analyze a request or a
+ * response. For frontend filters, it is called before all other analyzers. For
+ * backend ones, it is called before all backend
+ * analyzers. 'channel_start_analyze' callback is resumable, so this function
+ * returns 0 if an error occurs or if it needs to wait, any other value
+ * otherwise.
+ */
+int
+flt_start_analyze(struct stream *s, struct channel *chn, unsigned int an_bit)
+{
+ int ret = 1;
+
+ DBG_TRACE_ENTER(STRM_EV_STRM_ANA|STRM_EV_FLT_ANA, s);
+
+ /* If this function is called, this means there is at least one filter,
+ * so we do not need to check the filter list's emptiness. */
+
+ /* Set flag on channel to tell that the channel is filtered */
+ chn->flags |= CF_FLT_ANALYZE;
+ chn->analysers |= ((chn->flags & CF_ISRESP) ? AN_RES_FLT_END : AN_REQ_FLT_END);
+
+ RESUME_FILTER_LOOP(s, chn) {
+ if (!(chn->flags & CF_ISRESP)) {
+ if (an_bit == AN_REQ_FLT_START_BE &&
+ !(filter->flags & FLT_FL_IS_BACKEND_FILTER))
+ continue;
+ }
+ else {
+ if (an_bit == AN_RES_FLT_START_BE &&
+ !(filter->flags & FLT_FL_IS_BACKEND_FILTER))
+ continue;
+ }
+
+ FLT_OFF(filter, chn) = 0;
+ if (FLT_OPS(filter)->channel_start_analyze) {
+ DBG_TRACE_DEVEL(FLT_ID(filter), STRM_EV_FLT_ANA, s);
+ ret = FLT_OPS(filter)->channel_start_analyze(s, filter, chn);
+ if (ret <= 0)
+ BREAK_EXECUTION(s, chn, end);
+ }
+ } RESUME_FILTER_END;
+
+ end:
+ ret = handle_analyzer_result(s, chn, an_bit, ret);
+ DBG_TRACE_LEAVE(STRM_EV_STRM_ANA|STRM_EV_FLT_ANA, s);
+ return ret;
+}
+
+/*
+ * Calls 'channel_pre_analyze' callback for all filters attached to a
+ * stream. This function is called BEFORE each analyzer attached to a channel,
+ * expects analyzers responsible for data sending. 'channel_pre_analyze'
+ * callback is resumable, so this function returns 0 if an error occurs or if it
+ * needs to wait, any other value otherwise.
+ *
+ * Note this function can be called many times for the same analyzer. In fact,
+ * it is called until the analyzer finishes its processing.
+ */
+int
+flt_pre_analyze(struct stream *s, struct channel *chn, unsigned int an_bit)
+{
+ int ret = 1;
+
+ DBG_TRACE_ENTER(STRM_EV_STRM_ANA|STRM_EV_FLT_ANA, s);
+
+ RESUME_FILTER_LOOP(s, chn) {
+ if (FLT_OPS(filter)->channel_pre_analyze && (filter->pre_analyzers & an_bit)) {
+ DBG_TRACE_DEVEL(FLT_ID(filter), STRM_EV_FLT_ANA, s);
+ ret = FLT_OPS(filter)->channel_pre_analyze(s, filter, chn, an_bit);
+ if (ret <= 0)
+ BREAK_EXECUTION(s, chn, check_result);
+ filter->pre_analyzers &= ~an_bit;
+ }
+ } RESUME_FILTER_END;
+
+ check_result:
+ ret = handle_analyzer_result(s, chn, 0, ret);
+ DBG_TRACE_LEAVE(STRM_EV_STRM_ANA|STRM_EV_FLT_ANA, s);
+ return ret;
+}
+
+/*
+ * Calls 'channel_post_analyze' callback for all filters attached to a
+ * stream. This function is called AFTER each analyzer attached to a channel,
+ * expects analyzers responsible for data sending. 'channel_post_analyze'
+ * callback is NOT resumable, so this function returns a 0 if an error occurs,
+ * any other value otherwise.
+ *
+ * Here, AFTER means when the analyzer finishes its processing.
+ */
+int
+flt_post_analyze(struct stream *s, struct channel *chn, unsigned int an_bit)
+{
+ struct filter *filter;
+ int ret = 1;
+
+ DBG_TRACE_ENTER(STRM_EV_STRM_ANA|STRM_EV_FLT_ANA, s);
+
+ list_for_each_entry(filter, &strm_flt(s)->filters, list) {
+ if (FLT_OPS(filter)->channel_post_analyze && (filter->post_analyzers & an_bit)) {
+ DBG_TRACE_DEVEL(FLT_ID(filter), STRM_EV_FLT_ANA, s);
+ ret = FLT_OPS(filter)->channel_post_analyze(s, filter, chn, an_bit);
+ if (ret < 0)
+ break;
+ filter->post_analyzers &= ~an_bit;
+ }
+ }
+ ret = handle_analyzer_result(s, chn, 0, ret);
+ DBG_TRACE_LEAVE(STRM_EV_STRM_ANA|STRM_EV_FLT_ANA, s);
+ return ret;
+}
+
+/*
+ * This function is the AN_REQ/RES_FLT_HTTP_HDRS analyzer, used to filter HTTP
+ * headers or a request or a response. Returns 0 if an error occurs or if it
+ * needs to wait, any other value otherwise.
+ */
+int
+flt_analyze_http_headers(struct stream *s, struct channel *chn, unsigned int an_bit)
+{
+ struct http_msg *msg;
+ int ret = 1;
+
+ msg = ((chn->flags & CF_ISRESP) ? &s->txn->rsp : &s->txn->req);
+ DBG_TRACE_ENTER(STRM_EV_STRM_ANA|STRM_EV_HTTP_ANA|STRM_EV_FLT_ANA, s, s->txn, msg);
+
+ RESUME_FILTER_LOOP(s, chn) {
+ if (FLT_OPS(filter)->http_headers) {
+ DBG_TRACE_DEVEL(FLT_ID(filter), STRM_EV_HTTP_ANA|STRM_EV_FLT_ANA, s);
+ ret = FLT_OPS(filter)->http_headers(s, filter, msg);
+ if (ret <= 0)
+ BREAK_EXECUTION(s, chn, check_result);
+ }
+ } RESUME_FILTER_END;
+
+ if (HAS_DATA_FILTERS(s, chn)) {
+ size_t data = http_get_hdrs_size(htxbuf(&chn->buf));
+ struct filter *f;
+
+ list_for_each_entry(f, &strm_flt(s)->filters, list)
+ FLT_OFF(f, chn) = data;
+ }
+
+ check_result:
+ ret = handle_analyzer_result(s, chn, an_bit, ret);
+ DBG_TRACE_LEAVE(STRM_EV_STRM_ANA|STRM_EV_HTTP_ANA|STRM_EV_FLT_ANA, s);
+ return ret;
+}
+
+/*
+ * Calls 'channel_end_analyze' callback for all filters attached to a
+ * stream. This function is called when we stop to analyze a request or a
+ * response. It is called after all other analyzers. 'channel_end_analyze'
+ * callback is resumable, so this function returns 0 if an error occurs or if it
+ * needs to wait, any other value otherwise.
+ */
+int
+flt_end_analyze(struct stream *s, struct channel *chn, unsigned int an_bit)
+{
+ int ret = 1;
+
+ DBG_TRACE_ENTER(STRM_EV_STRM_ANA|STRM_EV_FLT_ANA, s);
+
+ /* Check if all filters attached on the stream have finished their
+ * processing on this channel. */
+ if (!(chn->flags & CF_FLT_ANALYZE))
+ goto sync;
+
+ RESUME_FILTER_LOOP(s, chn) {
+ FLT_OFF(filter, chn) = 0;
+ unregister_data_filter(s, chn, filter);
+
+ if (FLT_OPS(filter)->channel_end_analyze) {
+ DBG_TRACE_DEVEL(FLT_ID(filter), STRM_EV_FLT_ANA, s);
+ ret = FLT_OPS(filter)->channel_end_analyze(s, filter, chn);
+ if (ret <= 0)
+ BREAK_EXECUTION(s, chn, end);
+ }
+ } RESUME_FILTER_END;
+
+ end:
+ /* We don't remove yet this analyzer because we need to synchronize the
+ * both channels. So here, we just remove the flag CF_FLT_ANALYZE. */
+ ret = handle_analyzer_result(s, chn, 0, ret);
+ if (ret) {
+ chn->flags &= ~CF_FLT_ANALYZE;
+
+ /* Pretend there is an activity on both channels. Flag on the
+ * current one will be automatically removed, so only the other
+ * one will remain. This is a way to be sure that
+ * 'channel_end_analyze' callback will have a chance to be
+ * called at least once for the other side to finish the current
+ * processing. Of course, this is the filter responsibility to
+ * wakeup the stream if it choose to loop on this callback. */
+ s->req.flags |= CF_WAKE_ONCE;
+ s->res.flags |= CF_WAKE_ONCE;
+ }
+
+
+ sync:
+ /* Now we can check if filters have finished their work on the both
+ * channels */
+ if (!(s->req.flags & CF_FLT_ANALYZE) && !(s->res.flags & CF_FLT_ANALYZE)) {
+ /* Sync channels by removing this analyzer for the both channels */
+ s->req.analysers &= ~AN_REQ_FLT_END;
+ s->res.analysers &= ~AN_RES_FLT_END;
+
+ /* Remove backend filters from the list */
+ flt_stream_release(s, 1);
+ DBG_TRACE_LEAVE(STRM_EV_STRM_ANA|STRM_EV_FLT_ANA, s);
+ }
+ else {
+ DBG_TRACE_DEVEL("waiting for sync", STRM_EV_STRM_ANA|STRM_EV_FLT_ANA, s);
+ }
+ return ret;
+}
+
+
+/*
+ * Calls 'tcp_payload' callback for all "data" filters attached to a
+ * stream. This function is called when some data can be forwarded in the
+ * AN_REQ_FLT_XFER_BODY and AN_RES_FLT_XFER_BODY analyzers. It takes care to
+ * update the filters and the stream offset to be sure that a filter cannot
+ * forward more data than its predecessors. A filter can choose to not forward
+ * all data. Returns a negative value if an error occurs, else the number of
+ * forwarded bytes.
+ */
+int
+flt_tcp_payload(struct stream *s, struct channel *chn, unsigned int len)
+{
+ struct filter *filter;
+ unsigned long long *strm_off = &FLT_STRM_OFF(s, chn);
+ unsigned int out = co_data(chn);
+ int ret, data;
+
+ ret = data = len - out;
+ DBG_TRACE_ENTER(STRM_EV_TCP_ANA|STRM_EV_FLT_ANA, s);
+ list_for_each_entry(filter, &strm_flt(s)->filters, list) {
+ unsigned long long *flt_off = &FLT_OFF(filter, chn);
+ unsigned int offset = *flt_off - *strm_off;
+
+ /* Call tcp_payload for filters only. Forward all data for
+ * others and update the filter offset
+ */
+ if (!IS_DATA_FILTER(filter, chn)) {
+ *flt_off += data - offset;
+ continue;
+ }
+
+ if (FLT_OPS(filter)->tcp_payload) {
+
+ DBG_TRACE_DEVEL(FLT_ID(filter), STRM_EV_TCP_ANA|STRM_EV_FLT_ANA, s);
+ ret = FLT_OPS(filter)->tcp_payload(s, filter, chn, out + offset, data - offset);
+ if (ret < 0)
+ goto end;
+ data = ret + *flt_off - *strm_off;
+ *flt_off += ret;
+ }
+ }
+
+ /* Only forward data if the last filter decides to forward something */
+ if (ret > 0) {
+ ret = data;
+ *strm_off += ret;
+ }
+ end:
+ DBG_TRACE_LEAVE(STRM_EV_TCP_ANA|STRM_EV_FLT_ANA, s);
+ return ret;
+}
+
+/*
+ * Called when TCP data must be filtered on a channel. This function is the
+ * AN_REQ/RES_FLT_XFER_DATA analyzer. When called, it is responsible to forward
+ * data when the proxy is not in http mode. Behind the scene, it calls
+ * consecutively 'tcp_data' and 'tcp_forward_data' callbacks for all "data"
+ * filters attached to a stream. Returns 0 if an error occurs or if it needs to
+ * wait, any other value otherwise.
+ */
+int
+flt_xfer_data(struct stream *s, struct channel *chn, unsigned int an_bit)
+{
+ unsigned int len;
+ int ret = 1;
+
+ DBG_TRACE_ENTER(STRM_EV_STRM_ANA|STRM_EV_TCP_ANA|STRM_EV_FLT_ANA, s);
+
+ /* If there is no "data" filters, we do nothing */
+ if (!HAS_DATA_FILTERS(s, chn))
+ goto end;
+
+ if (s->flags & SF_HTX) {
+ struct htx *htx = htxbuf(&chn->buf);
+ len = htx->data;
+ }
+ else
+ len = c_data(chn);
+
+ ret = flt_tcp_payload(s, chn, len);
+ if (ret < 0)
+ goto end;
+ c_adv(chn, ret);
+
+ /* Stop waiting data if:
+ * - it the output is closed
+ * - the input in closed and no data is pending
+ * - There is a READ/WRITE timeout
+ */
+ if (chn_cons(chn)->flags & SC_FL_SHUT_DONE) {
+ ret = 1;
+ goto end;
+ }
+ if (chn_prod(chn)->flags & (SC_FL_ABRT_DONE|SC_FL_EOS)) {
+ if (((s->flags & SF_HTX) && htx_is_empty(htxbuf(&chn->buf))) || c_empty(chn)) {
+ ret = 1;
+ goto end;
+ }
+ }
+ if (chn->flags & (CF_READ_TIMEOUT|CF_WRITE_TIMEOUT)) {
+ ret = 1;
+ goto end;
+ }
+
+ /* Wait for data */
+ DBG_TRACE_DEVEL("waiting for more data", STRM_EV_STRM_ANA|STRM_EV_TCP_ANA|STRM_EV_FLT_ANA, s);
+ return 0;
+ end:
+ /* Terminate the data filtering. If <ret> is negative, an error was
+ * encountered during the filtering. */
+ ret = handle_analyzer_result(s, chn, an_bit, ret);
+ DBG_TRACE_LEAVE(STRM_EV_STRM_ANA|STRM_EV_TCP_ANA|STRM_EV_FLT_ANA, s);
+ return ret;
+}
+
+/*
+ * Handles result of filter's analyzers. It returns 0 if an error occurs or if
+ * it needs to wait, any other value otherwise.
+ */
+static int
+handle_analyzer_result(struct stream *s, struct channel *chn,
+ unsigned int an_bit, int ret)
+{
+ if (ret < 0)
+ goto return_bad_req;
+ else if (!ret)
+ goto wait;
+
+ /* End of job, return OK */
+ if (an_bit) {
+ chn->analysers &= ~an_bit;
+ chn->analyse_exp = TICK_ETERNITY;
+ }
+ return 1;
+
+ return_bad_req:
+ /* An error occurs */
+ if (IS_HTX_STRM(s)) {
+ http_set_term_flags(s);
+
+ if (s->txn->status > 0)
+ http_reply_and_close(s, s->txn->status, NULL);
+ else {
+ s->txn->status = (!(chn->flags & CF_ISRESP)) ? 400 : 502;
+ http_reply_and_close(s, s->txn->status, http_error_message(s));
+ }
+ }
+ else {
+ sess_set_term_flags(s);
+ stream_retnclose(s, NULL);
+ }
+
+ if (!(chn->flags & CF_ISRESP))
+ s->req.analysers &= AN_REQ_FLT_END;
+ else
+ s->res.analysers &= AN_RES_FLT_END;
+
+
+ DBG_TRACE_DEVEL("leaving on error", STRM_EV_FLT_ANA|STRM_EV_FLT_ERR, s);
+ return 0;
+
+ wait:
+ if (!(chn->flags & CF_ISRESP))
+ channel_dont_connect(chn);
+ DBG_TRACE_DEVEL("wairing for more data", STRM_EV_FLT_ANA, s);
+ return 0;
+}
+
+
+/* Note: must not be declared <const> as its list will be overwritten.
+ * Please take care of keeping this list alphabetically sorted, doing so helps
+ * all code contributors.
+ * Optional keywords are also declared with a NULL ->parse() function so that
+ * the config parser can report an appropriate error when a known keyword was
+ * not enabled. */
+static struct cfg_kw_list cfg_kws = {ILH, {
+ { CFG_LISTEN, "filter", parse_filter },
+ { 0, NULL, NULL },
+ }
+};
+
+INITCALL1(STG_REGISTER, cfg_register_keywords, &cfg_kws);
+
+REGISTER_POST_CHECK(flt_init_all);
+REGISTER_PER_THREAD_INIT(flt_init_all_per_thread);
+REGISTER_PER_THREAD_DEINIT(flt_deinit_all_per_thread);
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/src/fix.c b/src/fix.c
new file mode 100644
index 0000000..abf3119
--- /dev/null
+++ b/src/fix.c
@@ -0,0 +1,264 @@
+/*
+ * Financial Information eXchange Protocol
+ *
+ * Copyright 2020 Baptiste Assmann <bedis9@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <haproxy/intops.h>
+#include <haproxy/fix.h>
+/*
+ * Return the corresponding numerical tag id if <str> looks like a valid FIX
+ * protocol tag ID. Otherwise, 0 is returned (0 is an invalid id).
+ *
+ * If <version> is given, it must be one of a defined FIX version string (see
+ * FIX_X_Y macros). In this case, the function will also check tag ID ranges. If
+ * no <version> is provided, any strictly positive integer is valid.
+ *
+ * tag ID range depends on FIX protocol version:
+ * - FIX.4.0: 1-140
+ * - FIX.4.1: 1-211
+ * - FIX.4.2: 1-446
+ * - FIX.4.3: 1-659
+ * - FIX.4.4: 1-956
+ * - FIX.5.0: 1-1139
+ * - FIX.5.0SP1: 1-1426
+ * - FIX.5.0SP2: 1-1621
+ * range 10000 to 19999 is for "user defined tags"
+ */
+unsigned int fix_check_id(const struct ist str, const struct ist version) {
+ const char *s, *end;
+ unsigned int ret;
+
+ s = istptr(str);
+ end = istend(str);
+ ret = read_uint(&s, end);
+
+ /* we did not consume all characters from <str>, this is an error */
+ if (s != end)
+ return 0;
+
+ /* field ID can't be 0 */
+ if (ret == 0)
+ return 0;
+
+ /* we can leave now if version was not provided */
+ if (!isttest(version))
+ return ret;
+
+ /* we can leave now if this is a "user defined tag id" */
+ if (ret >= 10000 && ret <= 19999)
+ return ret;
+
+ /* now perform checking per FIX version */
+ if (istissame(FIX_4_0, version) && (ret <= 140))
+ return ret;
+ else if (istissame(FIX_4_1, version) && (ret <= 211))
+ return ret;
+ else if (istissame(FIX_4_2, version) && (ret <= 446))
+ return ret;
+ else if (istissame(FIX_4_3, version) && (ret <= 659))
+ return ret;
+ else if (istissame(FIX_4_4, version) && (ret <= 956))
+ return ret;
+ /* version string is the same for all 5.0 versions, so we can only take
+ * into consideration the biggest range
+ */
+ else if (istissame(FIX_5_0, version) && (ret <= 1621))
+ return ret;
+
+ return 0;
+}
+
+/*
+ * Parse a FIX message <msg> and performs following sanity checks:
+ *
+ * - checks tag ids and values are not empty
+ * - checks tag ids are numerical value
+ * - checks the first tag is BeginString with a valid version
+ * - checks the second tag is BodyLength with the right body length
+ * - checks the third tag is MsgType
+ * - checks the last tag is CheckSum with a valid checksum
+ *
+ * Returns:
+ * FIX_INVALID_MESSAGE if the message is invalid
+ * FIX_NEED_MORE_DATA if we need more data to fully validate the message
+ * FIX_VALID_MESSAGE if the message looks valid
+ */
+int fix_validate_message(const struct ist msg)
+{
+ struct ist parser, version;
+ unsigned int tagnum, bodylen;
+ unsigned char checksum;
+ char *body;
+ int ret = FIX_INVALID_MESSAGE;
+
+ if (istlen(msg) < FIX_MSG_MINSIZE) {
+ ret = FIX_NEED_MORE_DATA;
+ goto end;
+ }
+
+ /* parsing the whole message to compute the checksum and check all tag
+ * ids are properly set. Here we are sure to have the 2 first tags. Thus
+ * the version and the body length can be checked.
+ */
+ parser = msg;
+ version = IST_NULL;
+ checksum = tagnum = bodylen = 0;
+ body = NULL;
+ while (istlen(parser) > 0) {
+ struct ist tag, value;
+ unsigned int tagid;
+ const char *p, *end;
+
+ /* parse the tag ID and its value and perform first sanity checks */
+ value = iststop(istfind(parser, '='), FIX_DELIMITER);
+
+ /* end of value not found */
+ if (istend(value) == istend(parser)) {
+ ret = FIX_NEED_MORE_DATA;
+ goto end;
+ }
+ /* empty tag or empty value are forbidden */
+ if (istptr(parser) == istptr(value) ||!istlen(value))
+ goto end;
+
+ /* value points on '='. get the tag and skip '=' */
+ tag = ist2(istptr(parser), istptr(value) - istptr(parser));
+ value = istnext(value);
+
+ /* Check the tag id */
+ tagid = fix_check_id(tag, version);
+ if (!tagid)
+ goto end;
+ tagnum++;
+
+ if (tagnum == 1) {
+ /* the first tag must be BeginString */
+ if (tagid != FIX_TAG_BeginString)
+ goto end;
+
+ version = fix_version(value);
+ if (!isttest(version))
+ goto end;
+ }
+ else if (tagnum == 2) {
+ /* the second tag must be bodyLength */
+ if (tagid != FIX_TAG_BodyLength)
+ goto end;
+
+ p = istptr(value);
+ end = istend(value);
+ bodylen = read_uint(&p, end);
+
+ /* we did not consume all characters from <str> or no body, this is an error.
+ * There is at least the message type in the body.
+ */
+ if (p != end || !bodylen)
+ goto end;
+
+ body = istend(value) + 1;
+ }
+ else if (tagnum == 3) {
+ /* the third tag must be MsgType */
+ if (tagid != FIX_TAG_MsgType)
+ goto end;
+ }
+ else if (tagnum > 3 && tagid == FIX_TAG_CheckSum) {
+ /* CheckSum tag should be the last one and is not taken into account
+ * to compute the checksum itself and the body length. The value is
+ * a three-octet representation of the checksum decimal value.
+ */
+ if (bodylen != istptr(parser) - body)
+ goto end;
+
+ if (istlen(value) != 3)
+ goto end;
+ if (checksum != strl2ui(istptr(value), istlen(value)))
+ goto end;
+
+ /* End of the message, exit from the loop */
+ ret = FIX_VALID_MESSAGE;
+ goto end;
+ }
+
+ /* compute checksum of tag=value<delim> */
+ for (p = istptr(tag) ; p < istend(tag) ; ++p)
+ checksum += *p;
+ checksum += '=';
+ for (p = istptr(value) ; p < istend(value) ; ++p)
+ checksum += *p;
+ checksum += FIX_DELIMITER;
+
+ /* move the parser after the value and its delimiter */
+ parser = istadv(parser, istlen(tag) + istlen(value) + 2);
+ }
+
+ if (body) {
+ /* We start to read the body but we don't reached the checksum tag */
+ ret = FIX_NEED_MORE_DATA;
+ }
+
+ end:
+ return ret;
+}
+
+
+/*
+ * Iter on a FIX message <msg> and return the value of <tagid>.
+ *
+ * Returns the corresponding value if <tagid> is found. If <tagid> is not found
+ * because more data are required, the message with a length set to 0 is
+ * returned. If <tagid> is not found in the message or if the message is
+ * invalid, IST_NULL is returned.
+ *
+ * Note: Only simple sanity checks are performed on tags and values (not empty).
+ *
+ * the tag looks like
+ * <tagid>=<value>FIX_DELIMITER with <tag> and <value> not empty
+ */
+struct ist fix_tag_value(const struct ist msg, unsigned int tagid)
+{
+ struct ist parser, t, v;
+ unsigned int id;
+
+ parser = msg;
+ while (istlen(parser) > 0) {
+ v = iststop(istfind(parser, '='), FIX_DELIMITER);
+
+ /* delimiter not found, need more data */
+ if (istend(v) == istend(parser))
+ break;
+
+ /* empty tag or empty value, invalid */
+ if (istptr(parser) == istptr(v) || !istlen(v))
+ goto not_found_or_invalid;
+
+ t = ist2(istptr(parser), istptr(v) - istptr(parser));
+ v = istnext(v);
+
+ id = fix_check_id(t, IST_NULL);
+ if (!id)
+ goto not_found_or_invalid;
+ if (id == tagid) {
+ /* <tagId> found, return the corresponding value */
+ return v;
+ }
+
+ /* CheckSum tag is the last one, no <tagid> found */
+ if (id == FIX_TAG_CheckSum)
+ goto not_found_or_invalid;
+
+ parser = istadv(parser, istlen(t) + istlen(v) + 2);
+ }
+ /* not enough data to find <tagid> */
+ return ist2(istptr(msg), 0);
+
+ not_found_or_invalid:
+ return IST_NULL;
+}
diff --git a/src/flt_bwlim.c b/src/flt_bwlim.c
new file mode 100644
index 0000000..66c2883
--- /dev/null
+++ b/src/flt_bwlim.c
@@ -0,0 +1,976 @@
+/*
+ * Bandwidth limitation filter.
+ *
+ * Copyright 2022 HAProxy Technologies, Christopher Faulet <cfaulet@haproxy.com>
+ *
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <ctype.h>
+
+#include <haproxy/api.h>
+#include <haproxy/channel-t.h>
+#include <haproxy/filters.h>
+#include <haproxy/global.h>
+#include <haproxy/http_ana-t.h>
+#include <haproxy/http_rules.h>
+#include <haproxy/proxy.h>
+#include <haproxy/sample.h>
+#include <haproxy/stream.h>
+#include <haproxy/tcp_rules.h>
+#include <haproxy/time.h>
+#include <haproxy/tools.h>
+
+const char *bwlim_flt_id = "bandwidth limitation filter";
+
+struct flt_ops bwlim_ops;
+
+#define BWLIM_FL_NONE 0x00000000 /* For init purposr */
+#define BWLIM_FL_IN 0x00000001 /* Limit clients uploads */
+#define BWLIM_FL_OUT 0x00000002 /* Limit clients downloads */
+#define BWLIM_FL_SHARED 0x00000004 /* Limit shared between clients (using stick-tables) */
+
+#define BWLIM_ACT_LIMIT_EXPR 0x00000001
+#define BWLIM_ACT_LIMIT_CONST 0x00000002
+#define BWLIM_ACT_PERIOD_EXPR 0x00000004
+#define BWLIM_ACT_PERIOD_CONST 0x00000008
+
+struct bwlim_config {
+ struct proxy *proxy;
+ char *name;
+ unsigned int flags;
+ struct sample_expr *expr;
+ union {
+ char *n;
+ struct stktable *t;
+ } table;
+ unsigned int period;
+ unsigned int limit;
+ unsigned int min_size;
+};
+
+struct bwlim_state {
+ struct freq_ctr bytes_rate;
+ struct stksess *ts;
+ struct act_rule *rule;
+ unsigned int limit;
+ unsigned int period;
+ unsigned int exp;
+};
+
+
+/* Pools used to allocate comp_state structs */
+DECLARE_STATIC_POOL(pool_head_bwlim_state, "bwlim_state", sizeof(struct bwlim_state));
+
+
+/* Apply the bandwidth limitation of the filter <filter>. <len> is the maximum
+ * amount of data that the filter can forward. This function applies the
+ * limitation and returns what the stream is authorized to forward. Several
+ * limitation can be stacked.
+ */
+static int bwlim_apply_limit(struct filter *filter, struct channel *chn, unsigned int len)
+{
+ struct bwlim_config *conf = FLT_CONF(filter);
+ struct bwlim_state *st = filter->ctx;
+ struct freq_ctr *bytes_rate;
+ unsigned int period, limit, remain, tokens, users;
+ unsigned int wait = 0;
+ int overshoot, ret = 0;
+
+ /* Don't forward anything if there is nothing to forward or the waiting
+ * time is not expired
+ */
+ if (!len || (tick_isset(st->exp) && !tick_is_expired(st->exp, now_ms)))
+ goto end;
+
+ st->exp = TICK_ETERNITY;
+ ret = len;
+ if (conf->flags & BWLIM_FL_SHARED) {
+ void *ptr;
+ unsigned int type = ((conf->flags & BWLIM_FL_IN) ? STKTABLE_DT_BYTES_IN_RATE : STKTABLE_DT_BYTES_OUT_RATE);
+
+ /* In shared mode, get a pointer on the stick table entry. it
+ * will be used to get the freq-counter. It is also used to get
+ * The number of users.
+ */
+ ptr = stktable_data_ptr(conf->table.t, st->ts, type);
+ if (!ptr)
+ goto end;
+
+ HA_RWLOCK_WRLOCK(STK_SESS_LOCK, &st->ts->lock);
+ bytes_rate = &stktable_data_cast(ptr, std_t_frqp);
+ period = conf->table.t->data_arg[type].u;
+ limit = conf->limit;
+ users = st->ts->ref_cnt;
+ }
+ else {
+ /* On per-stream mode, the freq-counter is private to the
+ * stream. Get it from the filter state. Rely on the custom
+ * limit/period if defined or use the default ones. In this mode,
+ * there is only one user.
+ */
+ bytes_rate = &st->bytes_rate;
+ period = (st->period ? st->period : conf->period);
+ limit = (st->limit ? st->limit : conf->limit);
+ users = 1;
+ }
+
+ /* Be sure the current rate does not exceed the limit over the current
+ * period. In this case, nothing is forwarded and the waiting time is
+ * computed to be sure to not retry too early.
+ *
+ * The test is used to avoid the initial burst. Otherwise, streams will
+ * consume the limit as fast as possible and will then be paused for
+ * long time.
+ */
+ overshoot = freq_ctr_overshoot_period(bytes_rate, period, limit);
+ if (overshoot > 0) {
+ if (conf->flags & BWLIM_FL_SHARED)
+ HA_RWLOCK_WRUNLOCK(STK_SESS_LOCK, &st->ts->lock);
+ wait = div64_32((uint64_t)(conf->min_size + overshoot) * period * users,
+ limit);
+ st->exp = tick_add(now_ms, (wait ? wait : 1));
+ ret = 0;
+ goto end;
+ }
+
+ /* Get the allowed quota per user. */
+ remain = freq_ctr_remain_period(bytes_rate, period, limit, 0);
+ tokens = div64_32((uint64_t)(remain + users - 1), users);
+
+ if (tokens < len) {
+ /* The stream cannot forward all its data. But we will check if
+ * it can perform a small burst if the global quota is large
+ * enough. But, in this case, its waiting time will be
+ * increased accordingly.
+ */
+ ret = tokens;
+ if (tokens < conf->min_size) {
+ ret = (chn_prod(chn)->flags & (SC_FL_EOI|SC_FL_EOS|SC_FL_ABRT_DONE))
+ ? MIN(len, conf->min_size)
+ : conf->min_size;
+
+ if (ret <= remain)
+ wait = div64_32((uint64_t)(ret - tokens) * period * users + limit - 1, limit);
+ else
+ ret = (limit < ret) ? remain : 0;
+ }
+ }
+
+ /* At the end, update the freq-counter and compute the waiting time if
+ * the stream is limited
+ */
+ update_freq_ctr_period(bytes_rate, period, ret);
+ if (ret < len) {
+ wait += next_event_delay_period(bytes_rate, period, limit, MIN(len - ret, conf->min_size * users));
+ st->exp = tick_add(now_ms, (wait ? wait : 1));
+ }
+
+ if (conf->flags & BWLIM_FL_SHARED)
+ HA_RWLOCK_WRUNLOCK(STK_SESS_LOCK, &st->ts->lock);
+
+ end:
+ chn->analyse_exp = tick_first((tick_is_expired(chn->analyse_exp, now_ms) ? TICK_ETERNITY : chn->analyse_exp),
+ st->exp);
+ return ret;
+}
+
+/***************************************************************************
+ * Hooks that manage the filter lifecycle (init/check/deinit)
+ **************************************************************************/
+/* Initialize the filter. Returns -1 on error, else 0. */
+static int bwlim_init(struct proxy *px, struct flt_conf *fconf)
+{
+ fconf->flags |= FLT_CFG_FL_HTX;
+ return 0;
+}
+
+/* Free resources allocated by the bwlim filter. */
+static void bwlim_deinit(struct proxy *px, struct flt_conf *fconf)
+{
+ struct bwlim_config *conf = fconf->conf;
+
+ if (conf) {
+ ha_free(&conf->name);
+ release_sample_expr(conf->expr);
+ conf->expr = NULL;
+ ha_free(&fconf->conf);
+ }
+}
+
+/* Check configuration of a bwlim filter for a specified proxy.
+ * Return 1 on error, else 0. */
+static int bwlim_check(struct proxy *px, struct flt_conf *fconf)
+{
+ struct bwlim_config *conf = fconf->conf;
+ struct stktable *target;
+
+ if (!(conf->flags & BWLIM_FL_SHARED))
+ return 0;
+
+ if (conf->table.n)
+ target = stktable_find_by_name(conf->table.n);
+ else
+ target = px->table;
+
+ if (!target) {
+ ha_alert("Proxy %s : unable to find table '%s' referenced by bwlim filter '%s'",
+ px->id, conf->table.n ? conf->table.n : px->id, conf->name);
+ return 1;
+ }
+
+ if ((conf->flags & BWLIM_FL_IN) && !target->data_ofs[STKTABLE_DT_BYTES_IN_RATE]) {
+ ha_alert("Proxy %s : stick-table '%s' uses a data type incompatible with bwlim filter '%s'."
+ " It must be 'bytes_in_rate'",
+ px->id, conf->table.n ? conf->table.n : px->id, conf->name);
+ return 1;
+ }
+ else if ((conf->flags & BWLIM_FL_OUT) && !target->data_ofs[STKTABLE_DT_BYTES_OUT_RATE]) {
+ ha_alert("Proxy %s : stick-table '%s' uses a data type incompatible with bwlim filter '%s'."
+ " It must be 'bytes_out_rate'",
+ px->id, conf->table.n ? conf->table.n : px->id, conf->name);
+ return 1;
+ }
+
+ if (!stktable_compatible_sample(conf->expr, target->type)) {
+ ha_alert("Proxy %s : stick-table '%s' uses a key type incompatible with bwlim filter '%s'",
+ px->id, conf->table.n ? conf->table.n : px->id, conf->name);
+ return 1;
+ }
+ else {
+ if (!in_proxies_list(target->proxies_list, px)) {
+ px->next_stkt_ref = target->proxies_list;
+ target->proxies_list = px;
+ }
+ ha_free(&conf->table.n);
+ conf->table.t = target;
+ }
+
+ return 0;
+}
+
+/**************************************************************************
+ * Hooks to handle start/stop of streams
+ *************************************************************************/
+/* Called when a filter instance is created and attach to a stream */
+static int bwlim_attach(struct stream *s, struct filter *filter)
+{
+ struct bwlim_state *st;
+
+ st = pool_zalloc(pool_head_bwlim_state);
+ if (!st)
+ return -1;
+ filter->ctx = st;
+ return 1;
+}
+
+/* Called when a filter instance is detach from a stream, just before its
+ * destruction */
+static void bwlim_detach(struct stream *s, struct filter *filter)
+{
+ struct bwlim_config *conf = FLT_CONF(filter);
+ struct bwlim_state *st = filter->ctx;
+ struct stktable *t = conf->table.t;
+
+ if (!st)
+ return;
+
+ if (st->ts)
+ stktable_touch_local(t, st->ts, 1);
+
+ /* release any possible compression context */
+ pool_free(pool_head_bwlim_state, st);
+ filter->ctx = NULL;
+}
+
+/**************************************************************************
+ * Hooks to handle channels activity
+ *************************************************************************/
+
+/* Called when analyze ends for a given channel */
+static int bwlim_chn_end_analyze(struct stream *s, struct filter *filter, struct channel *chn)
+{
+ chn->analyse_exp = TICK_ETERNITY;
+ return 1;
+}
+
+
+/**************************************************************************
+ * Hooks to filter HTTP messages
+ *************************************************************************/
+static int bwlim_http_headers(struct stream *s, struct filter *filter, struct http_msg *msg)
+{
+ msg->chn->analyse_exp = TICK_ETERNITY;
+ return 1;
+}
+
+static int bwlim_http_payload(struct stream *s, struct filter *filter, struct http_msg *msg,
+ unsigned int offset, unsigned int len)
+{
+ return bwlim_apply_limit(filter, msg->chn, len);
+}
+
+/**************************************************************************
+ * Hooks to filter TCP data
+ *************************************************************************/
+static int bwlim_tcp_payload(struct stream *s, struct filter *filter, struct channel *chn,
+ unsigned int offset, unsigned int len)
+{
+ return bwlim_apply_limit(filter, chn, len);
+}
+
+/********************************************************************
+ * Functions that manage the filter initialization
+ ********************************************************************/
+struct flt_ops bwlim_ops = {
+ /* Manage bwlim filter, called for each filter declaration */
+ .init = bwlim_init,
+ .deinit = bwlim_deinit,
+ .check = bwlim_check,
+
+ /* Handle start/stop of streams */
+ .attach = bwlim_attach,
+ .detach = bwlim_detach,
+
+ /* Handle channels activity */
+ .channel_end_analyze = bwlim_chn_end_analyze,
+
+ /* Filter HTTP requests and responses */
+ .http_headers = bwlim_http_headers,
+ .http_payload = bwlim_http_payload,
+
+ /* Filter TCP data */
+ .tcp_payload = bwlim_tcp_payload,
+};
+
+/* Set a bandwidth limitation. It always return ACT_RET_CONT. On error, the rule
+ * is ignored. First of all, it looks for the corresponding filter. Then, for a
+ * shared limitation, the stick-table entry is retrieved. For a per-stream
+ * limitation, the custom limit and period are computed, if necessary. At the
+ * end, the filter is registered on the data filtering for the right channel
+ * (bwlim-in = request, bwlim-out = response).
+ */
+static enum act_return bwlim_set_limit(struct act_rule *rule, struct proxy *px,
+ struct session *sess, struct stream *s, int flags)
+{
+ struct bwlim_config *conf = rule->arg.act.p[3];
+ struct filter *filter;
+ struct bwlim_state *st = NULL;
+ struct stktable *t;
+ struct stktable_key *key;
+ struct stksess *ts;
+ int opt;
+
+ list_for_each_entry(filter, &s->strm_flt.filters, list) {
+ if (FLT_ID(filter) == bwlim_flt_id && FLT_CONF(filter) == conf) {
+ st = filter->ctx;
+ break;
+ }
+ }
+
+ if (!st)
+ goto end;
+
+ switch (rule->from) {
+ case ACT_F_TCP_REQ_CNT: opt = SMP_OPT_DIR_REQ | SMP_OPT_FINAL; break;
+ case ACT_F_TCP_RES_CNT: opt = SMP_OPT_DIR_RES | SMP_OPT_FINAL; break;
+ case ACT_F_HTTP_REQ: opt = SMP_OPT_DIR_REQ | SMP_OPT_FINAL; break;
+ case ACT_F_HTTP_RES: opt = SMP_OPT_DIR_RES | SMP_OPT_FINAL; break;
+ default:
+ goto end;
+ }
+
+ if (conf->flags & BWLIM_FL_SHARED) {
+ t = conf->table.t;
+ key = stktable_fetch_key(t, px, sess, s, opt, conf->expr, NULL);
+ if (!key)
+ goto end;
+
+ ts = stktable_get_entry(t, key);
+ if (!ts)
+ goto end;
+
+ st->ts = ts;
+ st->rule = rule;
+ }
+ else {
+ struct sample *smp;
+
+ st->limit = 0;
+ st->period = 0;
+ if (rule->action & BWLIM_ACT_LIMIT_EXPR) {
+ smp = sample_fetch_as_type(px, sess, s, opt, rule->arg.act.p[1], SMP_T_SINT);
+ if (smp && smp->data.u.sint > 0)
+ st->limit = smp->data.u.sint;
+ }
+ else if (rule->action & BWLIM_ACT_LIMIT_CONST)
+ st->limit = (uintptr_t)rule->arg.act.p[1];
+
+ if (rule->action & BWLIM_ACT_PERIOD_EXPR) {
+ smp = sample_fetch_as_type(px, sess, s, opt, rule->arg.act.p[2], SMP_T_SINT);
+ if (smp && smp->data.u.sint > 0)
+ st->period = smp->data.u.sint;
+ }
+ else if (rule->action & BWLIM_ACT_PERIOD_CONST)
+ st->period = (uintptr_t)rule->arg.act.p[2];
+ }
+
+ st->exp = TICK_ETERNITY;
+ if (conf->flags & BWLIM_FL_IN)
+ register_data_filter(s, &s->req, filter);
+ else
+ register_data_filter(s, &s->res, filter);
+
+ end:
+ return ACT_RET_CONT;
+}
+
+/* Check function for "set-bandwidth-limit" action. It returns 1 on
+ * success. Otherwise, it returns 0 and <err> is filled.
+ */
+int check_bwlim_action(struct act_rule *rule, struct proxy *px, char **err)
+{
+ struct flt_conf *fconf;
+ struct bwlim_config *conf = NULL;
+ unsigned int where;
+
+ list_for_each_entry(fconf, &px->filter_configs, list) {
+ conf = NULL;
+ if (fconf->id == bwlim_flt_id) {
+ conf = fconf->conf;
+ if (strcmp(rule->arg.act.p[0], conf->name) == 0)
+ break;
+ }
+ }
+ if (!conf) {
+ memprintf(err, "unable to find bwlim filter '%s' referenced by set-bandwidth-limit rule",
+ (char *)rule->arg.act.p[0]);
+ return 0;
+ }
+
+ if ((conf->flags & BWLIM_FL_SHARED) && rule->arg.act.p[1]) {
+ memprintf(err, "set-bandwidth-limit rule cannot define a limit for a shared bwlim filter");
+ return 0;
+ }
+
+ if ((conf->flags & BWLIM_FL_SHARED) && rule->arg.act.p[2]) {
+ memprintf(err, "set-bandwidth-limit rule cannot define a period for a shared bwlim filter");
+ return 0;
+ }
+
+ where = 0;
+ if (px->cap & PR_CAP_FE) {
+ if (rule->from == ACT_F_TCP_REQ_CNT)
+ where |= SMP_VAL_FE_REQ_CNT;
+ else if (rule->from == ACT_F_HTTP_REQ)
+ where |= SMP_VAL_FE_HRQ_HDR;
+ else if (rule->from == ACT_F_TCP_RES_CNT)
+ where |= SMP_VAL_FE_RES_CNT;
+ else if (rule->from == ACT_F_HTTP_RES)
+ where |= SMP_VAL_FE_HRS_HDR;
+ }
+ if (px->cap & PR_CAP_BE) {
+ if (rule->from == ACT_F_TCP_REQ_CNT)
+ where |= SMP_VAL_BE_REQ_CNT;
+ else if (rule->from == ACT_F_HTTP_REQ)
+ where |= SMP_VAL_BE_HRQ_HDR;
+ else if (rule->from == ACT_F_TCP_RES_CNT)
+ where |= SMP_VAL_BE_RES_CNT;
+ else if (rule->from == ACT_F_HTTP_RES)
+ where |= SMP_VAL_BE_HRS_HDR;
+ }
+
+ if ((rule->action & BWLIM_ACT_LIMIT_EXPR) && rule->arg.act.p[1]) {
+ struct sample_expr *expr = rule->arg.act.p[1];
+
+ if (!(expr->fetch->val & where)) {
+ memprintf(err, "set-bandwidth-limit rule uses a limit extracting information from '%s', none of which is available here",
+ sample_src_names(expr->fetch->use));
+ return 0;
+ }
+
+ if (rule->from == ACT_F_TCP_REQ_CNT && (px->cap & PR_CAP_FE)) {
+ if (!px->tcp_req.inspect_delay && !(expr->fetch->val & SMP_VAL_FE_SES_ACC)) {
+ ha_warning("%s '%s' : a 'tcp-request content set-bandwidth-limit*' rule explicitly depending on request"
+ " contents without any 'tcp-request inspect-delay' setting."
+ " This means that this rule will randomly find its contents. This can be fixed by"
+ " setting the tcp-request inspect-delay.\n",
+ proxy_type_str(px), px->id);
+ }
+ }
+ if (rule->from == ACT_F_TCP_RES_CNT && (px->cap & PR_CAP_BE)) {
+ if (!px->tcp_rep.inspect_delay && !(expr->fetch->val & SMP_VAL_BE_SRV_CON)) {
+ ha_warning("%s '%s' : a 'tcp-response content set-bandwidth-limit*' rule explicitly depending on response"
+ " contents without any 'tcp-response inspect-delay' setting."
+ " This means that this rule will randomly find its contents. This can be fixed by"
+ " setting the tcp-response inspect-delay.\n",
+ proxy_type_str(px), px->id);
+ }
+ }
+ }
+
+ if ((rule->action & BWLIM_ACT_PERIOD_EXPR) && rule->arg.act.p[2]) {
+ struct sample_expr *expr = rule->arg.act.p[2];
+
+ if (!(expr->fetch->val & where)) {
+ memprintf(err, "set-bandwidth-limit rule uses a period extracting information from '%s', none of which is available here",
+ sample_src_names(expr->fetch->use));
+ return 0;
+ }
+
+ if (rule->from == ACT_F_TCP_REQ_CNT && (px->cap & PR_CAP_FE)) {
+ if (!px->tcp_req.inspect_delay && !(expr->fetch->val & SMP_VAL_FE_SES_ACC)) {
+ ha_warning("%s '%s' : a 'tcp-request content set-bandwidth-limit*' rule explicitly depending on request"
+ " contents without any 'tcp-request inspect-delay' setting."
+ " This means that this rule will randomly find its contents. This can be fixed by"
+ " setting the tcp-request inspect-delay.\n",
+ proxy_type_str(px), px->id);
+ }
+ }
+ if (rule->from == ACT_F_TCP_RES_CNT && (px->cap & PR_CAP_BE)) {
+ if (!px->tcp_rep.inspect_delay && !(expr->fetch->val & SMP_VAL_BE_SRV_CON)) {
+ ha_warning("%s '%s' : a 'tcp-response content set-bandwidth-limit*' rule explicitly depending on response"
+ " contents without any 'tcp-response inspect-delay' setting."
+ " This means that this rule will randomly find its contents. This can be fixed by"
+ " setting the tcp-response inspect-delay.\n",
+ proxy_type_str(px), px->id);
+ }
+ }
+ }
+
+ if (conf->expr) {
+ if (!(conf->expr->fetch->val & where)) {
+ memprintf(err, "bwlim filter '%s uses a key extracting information from '%s', none of which is available here",
+ conf->name, sample_src_names(conf->expr->fetch->use));
+ return 0;
+ }
+
+ if (rule->from == ACT_F_TCP_REQ_CNT && (px->cap & PR_CAP_FE)) {
+ if (!px->tcp_req.inspect_delay && !(conf->expr->fetch->val & SMP_VAL_FE_SES_ACC)) {
+ ha_warning("%s '%s' : a 'tcp-request content set-bandwidth-limit*' rule explicitly depending on request"
+ " contents without any 'tcp-request inspect-delay' setting."
+ " This means that this rule will randomly find its contents. This can be fixed by"
+ " setting the tcp-request inspect-delay.\n",
+ proxy_type_str(px), px->id);
+ }
+ }
+ if (rule->from == ACT_F_TCP_RES_CNT && (px->cap & PR_CAP_BE)) {
+ if (!px->tcp_rep.inspect_delay && !(conf->expr->fetch->val & SMP_VAL_BE_SRV_CON)) {
+ ha_warning("%s '%s' : a 'tcp-response content set-bandwidth-limit*' rule explicitly depending on response"
+ " contents without any 'tcp-response inspect-delay' setting."
+ " This means that this rule will randomly find its contents. This can be fixed by"
+ " setting the tcp-response inspect-delay.\n",
+ proxy_type_str(px), px->id);
+ }
+ }
+ }
+
+ end:
+ rule->arg.act.p[3] = conf;
+ return 1;
+}
+
+/* Release memory allocated by "set-bandwidth-limit" action. */
+static void release_bwlim_action(struct act_rule *rule)
+{
+ ha_free(&rule->arg.act.p[0]);
+ if ((rule->action & BWLIM_ACT_LIMIT_EXPR) && rule->arg.act.p[1]) {
+ release_sample_expr(rule->arg.act.p[1]);
+ rule->arg.act.p[1] = NULL;
+ }
+ if ((rule->action & BWLIM_ACT_PERIOD_EXPR) && rule->arg.act.p[2]) {
+ release_sample_expr(rule->arg.act.p[2]);
+ rule->arg.act.p[2] = NULL;
+ }
+ rule->arg.act.p[3] = NULL; /* points on the filter's config */
+}
+
+/* Parse "set-bandwidth-limit" action. The filter name must be specified. For
+ * shared limitations, there is no other supported parameter. For per-stream
+ * limitations, a custom limit and period may be specified. In both case, it
+ * must be an expression. On success:
+ *
+ * arg.act.p[0] will be the filter name (mandatory)
+ * arg.act.p[1] will be an expression for the custom limit (optional, may be NULL)
+ * arg.act.p[2] will be an expression for the custom period (optional, may be NULL)
+ *
+ * It returns ACT_RET_PRS_OK on success, ACT_RET_PRS_ERR on error.
+ */
+static enum act_parse_ret parse_bandwidth_limit(const char **args, int *orig_arg, struct proxy *px,
+ struct act_rule *rule, char **err)
+{
+ struct sample_expr *expr;
+ int cur_arg;
+
+ cur_arg = *orig_arg;
+
+ if (!*args[cur_arg]) {
+ memprintf(err, "missing bwlim filter name");
+ return ACT_RET_PRS_ERR;
+ }
+
+ rule->arg.act.p[0] = strdup(args[cur_arg]);
+ if (!rule->arg.act.p[0]) {
+ memprintf(err, "out of memory");
+ return ACT_RET_PRS_ERR;
+ }
+ cur_arg++;
+
+ while (1) {
+ if (strcmp(args[cur_arg], "limit") == 0) {
+ const char *res;
+ unsigned int limit;
+
+ cur_arg++;
+ if (!args[cur_arg]) {
+ memprintf(err, "missing limit value or expression");
+ goto error;
+ }
+
+ res = parse_size_err(args[cur_arg], &limit);
+ if (!res) {
+ rule->action |= BWLIM_ACT_LIMIT_CONST;
+ rule->arg.act.p[1] = (void *)(uintptr_t)limit;
+ cur_arg++;
+ continue;
+ }
+
+ expr = sample_parse_expr((char **)args, &cur_arg, px->conf.args.file, px->conf.args.line, NULL, &px->conf.args, NULL);
+ if (!expr) {
+ memprintf(err, "'%s': invalid size value or unknown fetch method '%s'", args[cur_arg-1], args[cur_arg]);
+ goto error;
+ }
+ rule->action |= BWLIM_ACT_LIMIT_EXPR;
+ rule->arg.act.p[1] = expr;
+ }
+ else if (strcmp(args[cur_arg], "period") == 0) {
+ const char *res;
+ unsigned int period;
+
+ cur_arg++;
+ if (!args[cur_arg]) {
+ memprintf(err, "missing period value or expression");
+ goto error;
+ }
+
+ res = parse_time_err(args[cur_arg], &period, TIME_UNIT_MS);
+ if (!res) {
+ rule->action |= BWLIM_ACT_PERIOD_CONST;
+ rule->arg.act.p[2] = (void *)(uintptr_t)period;
+ cur_arg++;
+ continue;
+ }
+
+ expr = sample_parse_expr((char **)args, &cur_arg, px->conf.args.file, px->conf.args.line, NULL, &px->conf.args, NULL);
+ if (!expr) {
+ memprintf(err, "'%s': invalid time value or unknown fetch method '%s'", args[cur_arg-1], args[cur_arg]);
+ goto error;
+ }
+ rule->action |= BWLIM_ACT_PERIOD_EXPR;
+ rule->arg.act.p[2] = expr;
+ }
+ else
+ break;
+ }
+
+ rule->action_ptr = bwlim_set_limit;
+ rule->check_ptr = check_bwlim_action;
+ rule->release_ptr = release_bwlim_action;
+
+ *orig_arg = cur_arg;
+ return ACT_RET_PRS_OK;
+
+error:
+ release_bwlim_action(rule);
+ return ACT_RET_PRS_ERR;
+}
+
+
+static struct action_kw_list tcp_req_cont_actions = {
+ .kw = {
+ { "set-bandwidth-limit", parse_bandwidth_limit, 0 },
+ { NULL, NULL }
+ }
+};
+
+static struct action_kw_list tcp_res_cont_actions = {
+ .kw = {
+ { "set-bandwidth-limit", parse_bandwidth_limit, 0 },
+ { NULL, NULL }
+ }
+};
+
+static struct action_kw_list http_req_actions = {
+ .kw = {
+ { "set-bandwidth-limit", parse_bandwidth_limit, 0 },
+ { NULL, NULL }
+ }
+};
+
+static struct action_kw_list http_res_actions = {
+ .kw = {
+ { "set-bandwidth-limit", parse_bandwidth_limit, 0 },
+ { NULL, NULL }
+ }
+};
+
+INITCALL1(STG_REGISTER, tcp_req_cont_keywords_register, &tcp_req_cont_actions);
+INITCALL1(STG_REGISTER, tcp_res_cont_keywords_register, &tcp_res_cont_actions);
+INITCALL1(STG_REGISTER, http_req_keywords_register, &http_req_actions);
+INITCALL1(STG_REGISTER, http_res_keywords_register, &http_res_actions);
+
+
+/* Generic function to parse bandwidth limitation filter configurartion. It
+ * Returns -1 on error and 0 on success. It handles configuration for per-stream
+ * and shared limitations.
+ */
+static int parse_bwlim_flt(char **args, int *cur_arg, struct proxy *px, struct flt_conf *fconf,
+ char **err, void *private)
+{
+ struct flt_conf *fc;
+ struct bwlim_config *conf;
+ int shared, per_stream;
+ int pos = *cur_arg + 1;
+
+ conf = calloc(1, sizeof(*conf));
+ if (!conf) {
+ memprintf(err, "%s: out of memory", args[*cur_arg]);
+ return -1;
+ }
+ conf->proxy = px;
+
+ if (!*args[pos]) {
+ memprintf(err, "'%s' : a name is expected as first argument ", args[*cur_arg]);
+ goto error;
+ }
+ conf->flags = BWLIM_FL_NONE;
+ conf->name = strdup(args[pos]);
+ if (!conf->name) {
+ memprintf(err, "%s: out of memory", args[*cur_arg]);
+ goto error;
+ }
+
+ list_for_each_entry(fc, &px->filter_configs, list) {
+ if (fc->id == bwlim_flt_id) {
+ struct bwlim_config *c = fc->conf;
+
+ if (strcmp(conf->name, c->name) == 0) {
+ memprintf(err, "bwlim filter '%s' already declared for proxy '%s'\n",
+ conf->name, px->id);
+ goto error;
+ }
+ }
+ }
+ shared = per_stream = 0;
+ pos++;
+ while (*args[pos]) {
+ if (strcmp(args[pos], "key") == 0) {
+ if (per_stream) {
+ memprintf(err, "'%s' : cannot mix per-stream and shared parameter",
+ args[*cur_arg]);
+ goto error;
+ }
+ if (!*args[pos + 1]) {
+ memprintf(err, "'%s' : the sample expression is missing for '%s' option",
+ args[*cur_arg], args[pos]);
+ goto error;
+ }
+ shared = 1;
+ pos++;
+ conf->expr = sample_parse_expr((char **)args, &pos, px->conf.args.file, px->conf.args.line,
+ err, &px->conf.args, NULL);
+ if (!conf->expr)
+ goto error;
+ }
+ else if (strcmp(args[pos], "table") == 0) {
+ if (per_stream) {
+ memprintf(err, "'%s' : cannot mix per-stream and shared parameter",
+ args[*cur_arg]);
+ goto error;
+ }
+ if (!*args[pos + 1]) {
+ memprintf(err, "'%s' : the table name is missing for '%s' option",
+ args[*cur_arg], args[pos]);
+ goto error;
+ }
+ shared = 1;
+ conf->table.n = strdup(args[pos + 1]);
+ if (!conf->table.n) {
+ memprintf(err, "%s: out of memory", args[*cur_arg]);
+ goto error;
+ }
+ pos += 2;
+ }
+ else if (strcmp(args[pos], "default-period") == 0) {
+ const char *res;
+
+ if (shared) {
+ memprintf(err, "'%s' : cannot mix per-stream and shared parameter",
+ args[*cur_arg]);
+ goto error;
+ }
+ if (!*args[pos + 1]) {
+ memprintf(err, "'%s' : the value is missing for '%s' option",
+ args[*cur_arg], args[pos]);
+ goto error;
+ }
+ per_stream = 1;
+ res = parse_time_err(args[pos + 1], &conf->period, TIME_UNIT_MS);
+ if (res) {
+ memprintf(err, "'%s' : invalid value for option '%s' (unexpected character '%c')",
+ args[*cur_arg], args[pos], *res);
+ goto error;
+ }
+ pos += 2;
+ }
+ else if (strcmp(args[pos], "limit") == 0) {
+ const char *res;
+
+ if (per_stream) {
+ memprintf(err, "'%s' : cannot mix per-stream and shared parameter",
+ args[*cur_arg]);
+ goto error;
+ }
+ if (!*args[pos + 1]) {
+ memprintf(err, "'%s' : the value is missing for '%s' option",
+ args[*cur_arg], args[pos]);
+ goto error;
+ }
+ shared = 1;
+ res = parse_size_err(args[pos + 1], &conf->limit);
+ if (res) {
+ memprintf(err, "'%s' : invalid value for option '%s' (unexpected character '%c')",
+ args[*cur_arg], args[pos], *res);
+ goto error;
+ }
+ pos += 2;
+ }
+ else if (strcmp(args[pos], "default-limit") == 0) {
+ const char *res;
+
+ if (shared) {
+ memprintf(err, "'%s' : cannot mix per-stream and shared parameter",
+ args[*cur_arg]);
+ goto error;
+ }
+ if (!*args[pos + 1]) {
+ memprintf(err, "'%s' : the value is missing for '%s' option",
+ args[*cur_arg], args[pos]);
+ goto error;
+ }
+ per_stream = 1;
+ res = parse_size_err(args[pos + 1], &conf->limit);
+ if (res) {
+ memprintf(err, "'%s' : invalid value for option '%s' (unexpected character '%c')",
+ args[*cur_arg], args[pos], *res);
+ goto error;
+ }
+ pos += 2;
+ }
+ else if (strcmp(args[pos], "min-size") == 0) {
+ const char *res;
+
+ if (!*args[pos + 1]) {
+ memprintf(err, "'%s' : the value is missing for '%s' option",
+ args[*cur_arg], args[pos]);
+ goto error;
+ }
+ res = parse_size_err(args[pos + 1], &conf->min_size);
+ if (res) {
+ memprintf(err, "'%s' : invalid value for option '%s' (unexpected character '%c')",
+ args[*cur_arg], args[pos], *res);
+ goto error;
+ }
+ pos += 2;
+ }
+ else
+ break;
+ }
+
+ if (shared) {
+ conf->flags |= BWLIM_FL_SHARED;
+ if (!conf->expr) {
+ memprintf(err, "'%s' : <key> option is missing", args[*cur_arg]);
+ goto error;
+ }
+ if (!conf->limit) {
+ memprintf(err, "'%s' : <limit> option is missing", args[*cur_arg]);
+ goto error;
+ }
+ }
+ else {
+ /* Per-stream: limit downloads only for now */
+ conf->flags |= BWLIM_FL_OUT;
+ if (!conf->period) {
+ memprintf(err, "'%s' : <default-period> option is missing", args[*cur_arg]);
+ goto error;
+ }
+ if (!conf->limit) {
+ memprintf(err, "'%s' : <default-limit> option is missing", args[*cur_arg]);
+ goto error;
+ }
+ }
+
+ *cur_arg = pos;
+ fconf->id = bwlim_flt_id;
+ fconf->ops = &bwlim_ops;
+ fconf->conf = conf;
+ return 0;
+
+ error:
+ if (conf->name)
+ ha_free(&conf->name);
+ if (conf->expr) {
+ release_sample_expr(conf->expr);
+ conf->expr = NULL;
+ }
+ if (conf->table.n)
+ ha_free(&conf->table.n);
+ free(conf);
+ return -1;
+}
+
+
+static int parse_bwlim_in_flt(char **args, int *cur_arg, struct proxy *px, struct flt_conf *fconf,
+ char **err, void *private)
+{
+ int ret;
+
+ ret = parse_bwlim_flt(args, cur_arg, px, fconf, err, private);
+ if (!ret) {
+ struct bwlim_config *conf = fconf->conf;
+
+ conf->flags |= BWLIM_FL_IN;
+ }
+
+ return ret;
+}
+
+static int parse_bwlim_out_flt(char **args, int *cur_arg, struct proxy *px, struct flt_conf *fconf,
+ char **err, void *private)
+{
+ int ret;
+
+ ret = parse_bwlim_flt(args, cur_arg, px, fconf, err, private);
+ if (!ret) {
+ struct bwlim_config *conf = fconf->conf;
+
+ conf->flags |= BWLIM_FL_OUT;
+ }
+ return ret;
+}
+
+/* Declare the filter parser for "trace" keyword */
+static struct flt_kw_list flt_kws = { "BWLIM", { }, {
+ { "bwlim-in", parse_bwlim_in_flt, NULL },
+ { "bwlim-out", parse_bwlim_out_flt, NULL },
+ { NULL, NULL, NULL },
+ }
+};
+
+INITCALL1(STG_REGISTER, flt_register_keywords, &flt_kws);
diff --git a/src/flt_http_comp.c b/src/flt_http_comp.c
new file mode 100644
index 0000000..30f9d2a
--- /dev/null
+++ b/src/flt_http_comp.c
@@ -0,0 +1,1076 @@
+/*
+ * Stream filters related variables and functions.
+ *
+ * Copyright (C) 2015 Qualys Inc., Christopher Faulet <cfaulet@qualys.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <haproxy/api.h>
+#include <haproxy/cfgparse.h>
+#include <haproxy/compression.h>
+#include <haproxy/dynbuf.h>
+#include <haproxy/filters.h>
+#include <haproxy/http.h>
+#include <haproxy/http_ana-t.h>
+#include <haproxy/http_htx.h>
+#include <haproxy/htx.h>
+#include <haproxy/list.h>
+#include <haproxy/proxy.h>
+#include <haproxy/sample.h>
+#include <haproxy/stream.h>
+#include <haproxy/tools.h>
+
+#define COMP_STATE_PROCESSING 0x01
+
+const char *http_comp_flt_id = "compression filter";
+
+struct flt_ops comp_ops;
+
+struct comp_state {
+ /*
+ * For both comp_ctx and comp_algo, COMP_DIR_REQ is the index
+ * for requests, and COMP_DIR_RES for responses
+ */
+ struct comp_ctx *comp_ctx[2]; /* compression context */
+ struct comp_algo *comp_algo[2]; /* compression algorithm if not NULL */
+ unsigned int flags; /* COMP_STATE_* */
+};
+
+/* Pools used to allocate comp_state structs */
+DECLARE_STATIC_POOL(pool_head_comp_state, "comp_state", sizeof(struct comp_state));
+
+static THREAD_LOCAL struct buffer tmpbuf;
+static THREAD_LOCAL struct buffer zbuf;
+
+static int select_compression_request_header(struct comp_state *st,
+ struct stream *s,
+ struct http_msg *msg);
+static int select_compression_response_header(struct comp_state *st,
+ struct stream *s,
+ struct http_msg *msg);
+static int set_compression_header(struct comp_state *st,
+ struct stream *s,
+ struct http_msg *msg);
+
+static int htx_compression_buffer_init(struct htx *htx, struct buffer *out);
+static int htx_compression_buffer_add_data(struct comp_state *st, const char *data, size_t len,
+ struct buffer *out, int dir);
+static int htx_compression_buffer_end(struct comp_state *st, struct buffer *out, int end, int dir);
+
+/***********************************************************************/
+static int
+comp_flt_init(struct proxy *px, struct flt_conf *fconf)
+{
+ fconf->flags |= FLT_CFG_FL_HTX;
+ return 0;
+}
+
+static int
+comp_flt_init_per_thread(struct proxy *px, struct flt_conf *fconf)
+{
+ if (b_alloc(&tmpbuf) == NULL)
+ return -1;
+ if (b_alloc(&zbuf) == NULL)
+ return -1;
+ return 0;
+}
+
+static void
+comp_flt_deinit_per_thread(struct proxy *px, struct flt_conf *fconf)
+{
+ if (tmpbuf.size)
+ b_free(&tmpbuf);
+ if (zbuf.size)
+ b_free(&zbuf);
+}
+
+static int
+comp_strm_init(struct stream *s, struct filter *filter)
+{
+ struct comp_state *st;
+
+ st = pool_alloc(pool_head_comp_state);
+ if (st == NULL)
+ return -1;
+
+ st->comp_algo[COMP_DIR_REQ] = NULL;
+ st->comp_algo[COMP_DIR_RES] = NULL;
+ st->comp_ctx[COMP_DIR_REQ] = NULL;
+ st->comp_ctx[COMP_DIR_RES] = NULL;
+ st->flags = 0;
+ filter->ctx = st;
+
+ /* Register post-analyzer on AN_RES_WAIT_HTTP because we need to
+ * analyze response headers before http-response rules execution
+ * to be sure we can use res.comp and res.comp_algo sample
+ * fetches */
+ filter->post_analyzers |= AN_RES_WAIT_HTTP;
+ return 1;
+}
+
+static void
+comp_strm_deinit(struct stream *s, struct filter *filter)
+{
+ struct comp_state *st = filter->ctx;
+
+ if (!st)
+ return;
+
+ /* release any possible compression context */
+ if (st->comp_algo[COMP_DIR_REQ])
+ st->comp_algo[COMP_DIR_REQ]->end(&st->comp_ctx[COMP_DIR_REQ]);
+ if (st->comp_algo[COMP_DIR_RES])
+ st->comp_algo[COMP_DIR_RES]->end(&st->comp_ctx[COMP_DIR_RES]);
+ pool_free(pool_head_comp_state, st);
+ filter->ctx = NULL;
+}
+
+static void
+comp_prepare_compress_request(struct comp_state *st, struct stream *s, struct http_msg *msg)
+{
+ struct htx *htx = htxbuf(&msg->chn->buf);
+ struct http_txn *txn = s->txn;
+ struct http_hdr_ctx ctx;
+ struct comp_type *comp_type;
+
+ ctx.blk = NULL;
+ /* Already compressed, don't bother */
+ if (http_find_header(htx, ist("Content-Encoding"), &ctx, 1))
+ return;
+ /* HTTP < 1.1 should not be compressed */
+ if (!(msg->flags & HTTP_MSGF_VER_11) || !(txn->req.flags & HTTP_MSGF_VER_11))
+ return;
+ comp_type = NULL;
+
+ /*
+ * We don't want to compress content-types not listed in the "compression type" directive if any. If no content-type was found but configuration
+ * requires one, we don't compress either. Backend has the priority.
+ */
+ ctx.blk = NULL;
+ if (http_find_header(htx, ist("Content-Type"), &ctx, 1)) {
+ if ((s->be->comp && (comp_type = s->be->comp->types_req)) ||
+ (strm_fe(s)->comp && (comp_type = strm_fe(s)->comp->types_req))) {
+ for (; comp_type; comp_type = comp_type->next) {
+ if (ctx.value.len >= comp_type->name_len &&
+ strncasecmp(ctx.value.ptr, comp_type->name, comp_type->name_len) == 0)
+ /* this Content-Type should be compressed */
+ break;
+ }
+ /* this Content-Type should not be compressed */
+ if (comp_type == NULL)
+ goto fail;
+ }
+ }
+ else { /* no content-type header */
+ if ((s->be->comp && s->be->comp->types_req) ||
+ (strm_fe(s)->comp && strm_fe(s)->comp->types_req))
+ goto fail; /* a content-type was required */
+ }
+
+ /* limit compression rate */
+ if (global.comp_rate_lim > 0)
+ if (read_freq_ctr(&global.comp_bps_in) > global.comp_rate_lim)
+ goto fail;
+
+ /* limit cpu usage */
+ if (th_ctx->idle_pct < compress_min_idle)
+ goto fail;
+
+ if (txn->meth == HTTP_METH_HEAD)
+ return;
+ if (s->be->comp && s->be->comp->algo_req != NULL)
+ st->comp_algo[COMP_DIR_REQ] = s->be->comp->algo_req;
+ else if (strm_fe(s)->comp && strm_fe(s)->comp->algo_req != NULL)
+ st->comp_algo[COMP_DIR_REQ] = strm_fe(s)->comp->algo_req;
+ else
+ goto fail; /* no algo selected: nothing to do */
+
+
+ /* limit compression rate */
+ if (global.comp_rate_lim > 0)
+ if (read_freq_ctr(&global.comp_bps_in) > global.comp_rate_lim)
+ goto fail;
+
+ /* limit cpu usage */
+ if (th_ctx->idle_pct < compress_min_idle)
+ goto fail;
+
+ /* initialize compression */
+ if (st->comp_algo[COMP_DIR_REQ]->init(&st->comp_ctx[COMP_DIR_REQ], global.tune.comp_maxlevel) < 0)
+ goto fail;
+
+ return;
+fail:
+ st->comp_algo[COMP_DIR_REQ] = NULL;
+}
+
+static int
+comp_http_headers(struct stream *s, struct filter *filter, struct http_msg *msg)
+{
+ struct comp_state *st = filter->ctx;
+ int comp_flags = 0;
+
+ if (!strm_fe(s)->comp && !s->be->comp)
+ goto end;
+ if (strm_fe(s)->comp)
+ comp_flags |= strm_fe(s)->comp->flags;
+ if (s->be->comp)
+ comp_flags |= s->be->comp->flags;
+
+ if (!(msg->chn->flags & CF_ISRESP)) {
+ if (comp_flags & COMP_FL_DIR_REQ) {
+ comp_prepare_compress_request(st, s, msg);
+ if (st->comp_algo[COMP_DIR_REQ]) {
+ if (!set_compression_header(st, s, msg))
+ goto end;
+ register_data_filter(s, msg->chn, filter);
+ st->flags |= COMP_STATE_PROCESSING;
+ }
+ }
+ if (comp_flags & COMP_FL_DIR_RES)
+ select_compression_request_header(st, s, msg);
+ } else if (comp_flags & COMP_FL_DIR_RES) {
+ /* Response headers have already been checked in
+ * comp_http_post_analyze callback. */
+ if (st->comp_algo[COMP_DIR_RES]) {
+ if (!set_compression_header(st, s, msg))
+ goto end;
+ register_data_filter(s, msg->chn, filter);
+ st->flags |= COMP_STATE_PROCESSING;
+ }
+ }
+
+ end:
+ return 1;
+}
+
+static int
+comp_http_post_analyze(struct stream *s, struct filter *filter,
+ struct channel *chn, unsigned an_bit)
+{
+ struct http_txn *txn = s->txn;
+ struct http_msg *msg = &txn->rsp;
+ struct comp_state *st = filter->ctx;
+
+ if (an_bit != AN_RES_WAIT_HTTP)
+ goto end;
+
+ if (!strm_fe(s)->comp && !s->be->comp)
+ goto end;
+
+ select_compression_response_header(st, s, msg);
+
+ end:
+ return 1;
+}
+
+static int
+comp_http_payload(struct stream *s, struct filter *filter, struct http_msg *msg,
+ unsigned int offset, unsigned int len)
+{
+ struct comp_state *st = filter->ctx;
+ struct htx *htx = htxbuf(&msg->chn->buf);
+ struct htx_ret htxret = htx_find_offset(htx, offset);
+ struct htx_blk *blk, *next;
+ int ret, consumed = 0, to_forward = 0, last = 0;
+ int dir;
+
+ if (msg->chn->flags & CF_ISRESP)
+ dir = COMP_DIR_RES;
+ else
+ dir = COMP_DIR_REQ;
+
+ blk = htxret.blk;
+ offset = htxret.ret;
+ for (next = NULL; blk && len; blk = next) {
+ enum htx_blk_type type = htx_get_blk_type(blk);
+ uint32_t sz = htx_get_blksz(blk);
+ struct ist v;
+
+ next = htx_get_next_blk(htx, blk);
+ while (next && htx_get_blk_type(next) == HTX_BLK_UNUSED)
+ next = htx_get_next_blk(htx, next);
+
+ if (!(st->flags & COMP_STATE_PROCESSING))
+ goto consume;
+
+ if (htx_compression_buffer_init(htx, &trash) < 0) {
+ msg->chn->flags |= CF_WAKE_WRITE;
+ goto end;
+ }
+
+ switch (type) {
+ case HTX_BLK_DATA:
+ /* it is the last data block */
+ last = ((!next && (htx->flags & HTX_FL_EOM)) || (next && htx_get_blk_type(next) != HTX_BLK_DATA));
+ v = htx_get_blk_value(htx, blk);
+ v = istadv(v, offset);
+ if (v.len > len) {
+ last = 0;
+ v.len = len;
+ }
+
+ ret = htx_compression_buffer_add_data(st, v.ptr, v.len, &trash, dir);
+ if (ret < 0 || htx_compression_buffer_end(st, &trash, last, dir) < 0)
+ goto error;
+ BUG_ON(v.len != ret);
+
+ if (ret == sz && !b_data(&trash))
+ next = htx_remove_blk(htx, blk);
+ else {
+ blk = htx_replace_blk_value(htx, blk, v, ist2(b_head(&trash), b_data(&trash)));
+ next = htx_get_next_blk(htx, blk);
+ }
+
+ len -= ret;
+ consumed += ret;
+ to_forward += b_data(&trash);
+ if (last)
+ st->flags &= ~COMP_STATE_PROCESSING;
+ break;
+
+ case HTX_BLK_TLR:
+ case HTX_BLK_EOT:
+ if (htx_compression_buffer_end(st, &trash, 1, dir) < 0)
+ goto error;
+ if (b_data(&trash)) {
+ struct htx_blk *last = htx_add_last_data(htx, ist2(b_head(&trash), b_data(&trash)));
+ if (!last)
+ goto error;
+ blk = htx_get_next_blk(htx, last);
+ if (!blk)
+ goto error;
+ next = htx_get_next_blk(htx, blk);
+ to_forward += b_data(&trash);
+ }
+ st->flags &= ~COMP_STATE_PROCESSING;
+ __fallthrough;
+
+ default:
+ consume:
+ sz -= offset;
+ if (sz > len)
+ sz = len;
+ consumed += sz;
+ to_forward += sz;
+ len -= sz;
+ break;
+ }
+
+ offset = 0;
+ }
+
+ end:
+ if (to_forward != consumed)
+ flt_update_offsets(filter, msg->chn, to_forward - consumed);
+
+ if (st->comp_ctx[dir] && st->comp_ctx[dir]->cur_lvl > 0) {
+ update_freq_ctr(&global.comp_bps_in, consumed);
+ _HA_ATOMIC_ADD(&strm_fe(s)->fe_counters.comp_in[dir], consumed);
+ _HA_ATOMIC_ADD(&s->be->be_counters.comp_in[dir], consumed);
+ update_freq_ctr(&global.comp_bps_out, to_forward);
+ _HA_ATOMIC_ADD(&strm_fe(s)->fe_counters.comp_out[dir], to_forward);
+ _HA_ATOMIC_ADD(&s->be->be_counters.comp_out[dir], to_forward);
+ } else {
+ _HA_ATOMIC_ADD(&strm_fe(s)->fe_counters.comp_byp[dir], consumed);
+ _HA_ATOMIC_ADD(&s->be->be_counters.comp_byp[dir], consumed);
+ }
+ return to_forward;
+
+ error:
+ return -1;
+}
+
+
+static int
+comp_http_end(struct stream *s, struct filter *filter,
+ struct http_msg *msg)
+{
+ struct comp_state *st = filter->ctx;
+
+ if (!(msg->chn->flags & CF_ISRESP) || !st || !st->comp_algo[COMP_DIR_RES])
+ goto end;
+
+ if (strm_fe(s)->mode == PR_MODE_HTTP)
+ _HA_ATOMIC_INC(&strm_fe(s)->fe_counters.p.http.comp_rsp);
+ if ((s->flags & SF_BE_ASSIGNED) && (s->be->mode == PR_MODE_HTTP))
+ _HA_ATOMIC_INC(&s->be->be_counters.p.http.comp_rsp);
+ end:
+ return 1;
+}
+
+/***********************************************************************/
+static int
+set_compression_header(struct comp_state *st, struct stream *s, struct http_msg *msg)
+{
+ struct htx *htx = htxbuf(&msg->chn->buf);
+ struct htx_sl *sl;
+ struct http_hdr_ctx ctx, last_vary;
+ struct comp_algo *comp_algo;
+ int comp_index;
+
+ if (msg->chn->flags & CF_ISRESP)
+ comp_index = COMP_DIR_RES;
+ else
+ comp_index = COMP_DIR_REQ;
+
+ sl = http_get_stline(htx);
+ if (!sl)
+ goto error;
+
+ comp_algo = st->comp_algo[comp_index];
+
+ /* add "Transfer-Encoding: chunked" header */
+ if (!(msg->flags & HTTP_MSGF_TE_CHNK)) {
+ if (!http_add_header(htx, ist("Transfer-Encoding"), ist("chunked")))
+ goto error;
+ msg->flags |= HTTP_MSGF_TE_CHNK;
+ sl->flags |= (HTX_SL_F_XFER_ENC|HTX_SL_F_CHNK);
+ }
+
+ /* remove Content-Length header */
+ if (msg->flags & HTTP_MSGF_CNT_LEN) {
+ ctx.blk = NULL;
+ while (http_find_header(htx, ist("Content-Length"), &ctx, 1))
+ http_remove_header(htx, &ctx);
+ msg->flags &= ~HTTP_MSGF_CNT_LEN;
+ sl->flags &= ~HTX_SL_F_CLEN;
+ }
+
+ /* convert "ETag" header to a weak ETag */
+ ctx.blk = NULL;
+ if (http_find_header(htx, ist("ETag"), &ctx, 1)) {
+ if (ctx.value.ptr[0] == '"') {
+ /* This a strong ETag. Convert it to a weak one. */
+ struct ist v = ist2(trash.area, 0);
+ if (istcat(&v, ist("W/"), trash.size) == -1 || istcat(&v, ctx.value, trash.size) == -1)
+ goto error;
+
+ if (!http_replace_header_value(htx, &ctx, v))
+ goto error;
+ }
+ }
+
+ /* Add "Vary: Accept-Encoding" header but only if it is not found. */
+ ctx.blk = NULL;
+ last_vary.blk = NULL;
+ while (http_find_header(htx, ist("Vary"), &ctx, 0)) {
+ if (isteqi(ctx.value, ist("Accept-Encoding")))
+ break;
+ last_vary = ctx;
+ }
+ /* No "Accept-Encoding" value found. */
+ if (ctx.blk == NULL) {
+ if (last_vary.blk == NULL) {
+ /* No Vary header found at all. Add our header */
+ if (!http_add_header(htx, ist("Vary"), ist("Accept-Encoding")))
+ goto error;
+ }
+ else {
+ /* At least one Vary header found. Append the value to
+ * the last one.
+ */
+ if (!http_append_header_value(htx, &last_vary, ist("Accept-Encoding")))
+ goto error;
+ }
+ }
+
+ /*
+ * Add Content-Encoding header when it's not identity encoding.
+ * RFC 2616 : Identity encoding: This content-coding is used only in the
+ * Accept-Encoding header, and SHOULD NOT be used in the Content-Encoding
+ * header.
+ */
+ if (comp_algo->cfg_name_len != 8 || memcmp(comp_algo->cfg_name, "identity", 8) != 0) {
+ struct ist v = ist2(comp_algo->ua_name, comp_algo->ua_name_len);
+
+ if (!http_add_header(htx, ist("Content-Encoding"), v))
+ goto error;
+ }
+
+ return 1;
+
+ error:
+ st->comp_algo[comp_index]->end(&st->comp_ctx[comp_index]);
+ st->comp_algo[comp_index] = NULL;
+ return 0;
+}
+
+/*
+ * Selects a compression algorithm depending on the client request.
+ */
+static int
+select_compression_request_header(struct comp_state *st, struct stream *s, struct http_msg *msg)
+{
+ struct htx *htx = htxbuf(&msg->chn->buf);
+ struct http_hdr_ctx ctx;
+ struct comp_algo *comp_algo = NULL;
+ struct comp_algo *comp_algo_back = NULL;
+
+ /* Disable compression for older user agents announcing themselves as "Mozilla/4"
+ * unless they are known good (MSIE 6 with XP SP2, or MSIE 7 and later).
+ * See http://zoompf.com/2012/02/lose-the-wait-http-compression for more details.
+ */
+ ctx.blk = NULL;
+ if (http_find_header(htx, ist("User-Agent"), &ctx, 1) &&
+ ctx.value.len >= 9 &&
+ memcmp(ctx.value.ptr, "Mozilla/4", 9) == 0 &&
+ (ctx.value.len < 31 ||
+ memcmp(ctx.value.ptr + 25, "MSIE ", 5) != 0 ||
+ *(ctx.value.ptr + 30) < '6' ||
+ (*(ctx.value.ptr + 30) == '6' &&
+ (ctx.value.len < 54 || memcmp(ctx.value.ptr + 51, "SV1", 3) != 0)))) {
+ st->comp_algo[COMP_DIR_RES] = NULL;
+ return 0;
+ }
+
+ /* search for the algo in the backend in priority or the frontend */
+ if ((s->be->comp && (comp_algo_back = s->be->comp->algos_res)) ||
+ (strm_fe(s)->comp && (comp_algo_back = strm_fe(s)->comp->algos_res))) {
+ int best_q = 0;
+
+ ctx.blk = NULL;
+ while (http_find_header(htx, ist("Accept-Encoding"), &ctx, 0)) {
+ const char *qval;
+ int q;
+ int toklen;
+
+ /* try to isolate the token from the optional q-value */
+ toklen = 0;
+ while (toklen < ctx.value.len && HTTP_IS_TOKEN(*(ctx.value.ptr + toklen)))
+ toklen++;
+
+ qval = ctx.value.ptr + toklen;
+ while (1) {
+ while (qval < istend(ctx.value) && HTTP_IS_LWS(*qval))
+ qval++;
+
+ if (qval >= istend(ctx.value) || *qval != ';') {
+ qval = NULL;
+ break;
+ }
+ qval++;
+
+ while (qval < istend(ctx.value) && HTTP_IS_LWS(*qval))
+ qval++;
+
+ if (qval >= istend(ctx.value)) {
+ qval = NULL;
+ break;
+ }
+ if (strncmp(qval, "q=", MIN(istend(ctx.value) - qval, 2)) == 0)
+ break;
+
+ while (qval < istend(ctx.value) && *qval != ';')
+ qval++;
+ }
+
+ /* here we have qval pointing to the first "q=" attribute or NULL if not found */
+ q = qval ? http_parse_qvalue(qval + 2, NULL) : 1000;
+
+ if (q <= best_q)
+ continue;
+
+ for (comp_algo = comp_algo_back; comp_algo; comp_algo = comp_algo->next) {
+ if (*(ctx.value.ptr) == '*' ||
+ word_match(ctx.value.ptr, toklen, comp_algo->ua_name, comp_algo->ua_name_len)) {
+ st->comp_algo[COMP_DIR_RES] = comp_algo;
+ best_q = q;
+ break;
+ }
+ }
+ }
+ }
+
+ /* remove all occurrences of the header when "compression offload" is set */
+ if (st->comp_algo[COMP_DIR_RES]) {
+ if ((s->be->comp && (s->be->comp->flags & COMP_FL_OFFLOAD)) ||
+ (strm_fe(s)->comp && (strm_fe(s)->comp->flags & COMP_FL_OFFLOAD))) {
+ http_remove_header(htx, &ctx);
+ ctx.blk = NULL;
+ while (http_find_header(htx, ist("Accept-Encoding"), &ctx, 1))
+ http_remove_header(htx, &ctx);
+ }
+ return 1;
+ }
+
+ /* identity is implicit does not require headers */
+ if ((s->be->comp && (comp_algo_back = s->be->comp->algos_res)) ||
+ (strm_fe(s)->comp && (comp_algo_back = strm_fe(s)->comp->algos_res))) {
+ for (comp_algo = comp_algo_back; comp_algo; comp_algo = comp_algo->next) {
+ if (comp_algo->cfg_name_len == 8 && memcmp(comp_algo->cfg_name, "identity", 8) == 0) {
+ st->comp_algo[COMP_DIR_RES] = comp_algo;
+ return 1;
+ }
+ }
+ }
+
+ st->comp_algo[COMP_DIR_RES] = NULL;
+ return 0;
+}
+
+/*
+ * Selects a compression algorithm depending of the server response.
+ */
+static int
+select_compression_response_header(struct comp_state *st, struct stream *s, struct http_msg *msg)
+{
+ struct htx *htx = htxbuf(&msg->chn->buf);
+ struct http_txn *txn = s->txn;
+ struct http_hdr_ctx ctx;
+ struct comp_type *comp_type;
+
+ /* no common compression algorithm was found in request header */
+ if (st->comp_algo[COMP_DIR_RES] == NULL)
+ goto fail;
+
+ /* compression already in progress */
+ if (msg->flags & HTTP_MSGF_COMPRESSING)
+ goto fail;
+
+ /* HTTP < 1.1 should not be compressed */
+ if (!(msg->flags & HTTP_MSGF_VER_11) || !(txn->req.flags & HTTP_MSGF_VER_11))
+ goto fail;
+
+ if (txn->meth == HTTP_METH_HEAD)
+ goto fail;
+
+ /* compress 200,201,202,203 responses only */
+ if ((txn->status != 200) &&
+ (txn->status != 201) &&
+ (txn->status != 202) &&
+ (txn->status != 203))
+ goto fail;
+
+ if (!(msg->flags & HTTP_MSGF_XFER_LEN) || msg->flags & HTTP_MSGF_BODYLESS)
+ goto fail;
+
+ /* content is already compressed */
+ ctx.blk = NULL;
+ if (http_find_header(htx, ist("Content-Encoding"), &ctx, 1))
+ goto fail;
+
+ /* no compression when Cache-Control: no-transform is present in the message */
+ ctx.blk = NULL;
+ while (http_find_header(htx, ist("Cache-Control"), &ctx, 0)) {
+ if (word_match(ctx.value.ptr, ctx.value.len, "no-transform", 12))
+ goto fail;
+ }
+
+ /* no compression when ETag is malformed */
+ ctx.blk = NULL;
+ if (http_find_header(htx, ist("ETag"), &ctx, 1)) {
+ if (http_get_etag_type(ctx.value) == ETAG_INVALID)
+ goto fail;
+ }
+ /* no compression when multiple ETags are present
+ * Note: Do not reset ctx.blk!
+ */
+ if (http_find_header(htx, ist("ETag"), &ctx, 1))
+ goto fail;
+
+ comp_type = NULL;
+
+ /* we don't want to compress multipart content-types, nor content-types that are
+ * not listed in the "compression type" directive if any. If no content-type was
+ * found but configuration requires one, we don't compress either. Backend has
+ * the priority.
+ */
+ ctx.blk = NULL;
+ if (http_find_header(htx, ist("Content-Type"), &ctx, 1)) {
+ if (ctx.value.len >= 9 && strncasecmp("multipart", ctx.value.ptr, 9) == 0)
+ goto fail;
+
+ if ((s->be->comp && (comp_type = s->be->comp->types_res)) ||
+ (strm_fe(s)->comp && (comp_type = strm_fe(s)->comp->types_res))) {
+ for (; comp_type; comp_type = comp_type->next) {
+ if (ctx.value.len >= comp_type->name_len &&
+ strncasecmp(ctx.value.ptr, comp_type->name, comp_type->name_len) == 0)
+ /* this Content-Type should be compressed */
+ break;
+ }
+ /* this Content-Type should not be compressed */
+ if (comp_type == NULL)
+ goto fail;
+ }
+ }
+ else { /* no content-type header */
+ if ((s->be->comp && s->be->comp->types_res) ||
+ (strm_fe(s)->comp && strm_fe(s)->comp->types_res))
+ goto fail; /* a content-type was required */
+ }
+
+ /* limit compression rate */
+ if (global.comp_rate_lim > 0)
+ if (read_freq_ctr(&global.comp_bps_in) > global.comp_rate_lim)
+ goto fail;
+
+ /* limit cpu usage */
+ if (th_ctx->idle_pct < compress_min_idle)
+ goto fail;
+
+ /* initialize compression */
+ if (st->comp_algo[COMP_DIR_RES]->init(&st->comp_ctx[COMP_DIR_RES], global.tune.comp_maxlevel) < 0)
+ goto fail;
+ msg->flags |= HTTP_MSGF_COMPRESSING;
+ return 1;
+
+ fail:
+ st->comp_algo[COMP_DIR_RES] = NULL;
+ return 0;
+}
+
+/***********************************************************************/
+static int
+htx_compression_buffer_init(struct htx *htx, struct buffer *out)
+{
+ /* output stream requires at least 10 bytes for the gzip header, plus
+ * at least 8 bytes for the gzip trailer (crc+len), plus a possible
+ * plus at most 5 bytes per 32kB block and 2 bytes to close the stream.
+ */
+ if (htx_free_space(htx) < 20 + 5 * ((htx->data + 32767) >> 15))
+ return -1;
+ b_reset(out);
+ return 0;
+}
+
+static int
+htx_compression_buffer_add_data(struct comp_state *st, const char *data, size_t len,
+ struct buffer *out, int dir)
+{
+
+ return st->comp_algo[dir]->add_data(st->comp_ctx[dir], data, len, out);
+}
+
+static int
+htx_compression_buffer_end(struct comp_state *st, struct buffer *out, int end, int dir)
+{
+
+ if (end)
+ return st->comp_algo[dir]->finish(st->comp_ctx[dir], out);
+ else
+ return st->comp_algo[dir]->flush(st->comp_ctx[dir], out);
+}
+
+
+/***********************************************************************/
+struct flt_ops comp_ops = {
+ .init = comp_flt_init,
+ .init_per_thread = comp_flt_init_per_thread,
+ .deinit_per_thread = comp_flt_deinit_per_thread,
+
+ .attach = comp_strm_init,
+ .detach = comp_strm_deinit,
+
+ .channel_post_analyze = comp_http_post_analyze,
+
+ .http_headers = comp_http_headers,
+ .http_payload = comp_http_payload,
+ .http_end = comp_http_end,
+};
+
+static int
+parse_compression_options(char **args, int section, struct proxy *proxy,
+ const struct proxy *defpx, const char *file, int line,
+ char **err)
+{
+ struct comp *comp;
+ int ret = 0;
+
+ if (proxy->comp == NULL) {
+ comp = calloc(1, sizeof(*comp));
+ /* Always default to compress responses */
+ comp->flags = COMP_FL_DIR_RES;
+ proxy->comp = comp;
+ }
+ else
+ comp = proxy->comp;
+
+ if (strcmp(args[1], "algo") == 0 || strcmp(args[1], "algo-res") == 0) {
+ struct comp_ctx *ctx;
+ int cur_arg = 2;
+
+ if (!*args[cur_arg]) {
+ memprintf(err, "parsing [%s:%d] : '%s' expects <algorithm>.",
+ file, line, args[0]);
+ ret = -1;
+ goto end;
+ }
+ while (*(args[cur_arg])) {
+ int retval = comp_append_algo(&comp->algos_res, args[cur_arg]);
+ if (retval) {
+ if (retval < 0)
+ memprintf(err, "'%s' : '%s' is not a supported algorithm.",
+ args[0], args[cur_arg]);
+ else
+ memprintf(err, "'%s' : out of memory while parsing algo '%s'.",
+ args[0], args[cur_arg]);
+ ret = -1;
+ goto end;
+ }
+
+ if (proxy->comp->algos_res->init(&ctx, 9) == 0)
+ proxy->comp->algos_res->end(&ctx);
+ else {
+ memprintf(err, "'%s' : Can't init '%s' algorithm.",
+ args[0], args[cur_arg]);
+ ret = -1;
+ goto end;
+ }
+ cur_arg++;
+ continue;
+ }
+ }
+ else if (strcmp(args[1], "algo-req") == 0) {
+ struct comp_ctx *ctx;
+ int retval = comp_append_algo(&comp->algo_req, args[2]);
+
+ if (retval) {
+ if (retval < 0)
+ memprintf(err, "'%s' : '%s' is not a supported algorithm.",
+ args[0], args[2]);
+ else
+ memprintf(err, "'%s' : out of memory while parsing algo '%s'.",
+ args[0], args[2]);
+ ret = -1;
+ goto end;
+ }
+
+ if (proxy->comp->algo_req->init(&ctx, 9) == 0)
+ proxy->comp->algo_req->end(&ctx);
+ else {
+ memprintf(err, "'%s' : Can't init '%s' algorithm.",
+ args[0], args[2]);
+ ret = -1;
+ goto end;
+ }
+ }
+ else if (strcmp(args[1], "offload") == 0) {
+ if (proxy->cap & PR_CAP_DEF) {
+ memprintf(err, "'%s' : '%s' ignored in 'defaults' section.",
+ args[0], args[1]);
+ ret = 1;
+ }
+ comp->flags |= COMP_FL_OFFLOAD;
+ }
+ else if (strcmp(args[1], "type") == 0 || strcmp(args[1], "type-res") == 0) {
+ int cur_arg = 2;
+
+ if (!*args[cur_arg]) {
+ memprintf(err, "'%s' expects <type>.", args[0]);
+ ret = -1;
+ goto end;
+ }
+ while (*(args[cur_arg])) {
+ if (comp_append_type(&comp->types_res, args[cur_arg])) {
+ memprintf(err, "'%s': out of memory.", args[0]);
+ ret = -1;
+ goto end;
+ }
+ cur_arg++;
+ continue;
+ }
+ }
+ else if (strcmp(args[1], "type-req") == 0) {
+ int cur_arg = 2;
+
+ if (!*args[cur_arg]) {
+ memprintf(err, "'%s' expects <type>.", args[0]);
+ ret = -1;
+ goto end;
+ }
+ while (*(args[cur_arg])) {
+ if (comp_append_type(&comp->types_req, args[cur_arg])) {
+ memprintf(err, "'%s': out of memory.", args[0]);
+ ret = -1;
+ goto end;
+ }
+ cur_arg++;
+ continue;
+ }
+ }
+ else if (strcmp(args[1], "direction") == 0) {
+ if (!args[2]) {
+ memprintf(err, "'%s' expects 'request', 'response', or 'both'.", args[0]);
+ ret = -1;
+ goto end;
+ }
+ if (strcmp(args[2], "request") == 0) {
+ comp->flags &= ~COMP_FL_DIR_RES;
+ comp->flags |= COMP_FL_DIR_REQ;
+ } else if (strcmp(args[2], "response") == 0) {
+ comp->flags &= COMP_FL_DIR_REQ;
+ comp->flags |= COMP_FL_DIR_RES;
+ } else if (strcmp(args[2], "both") == 0)
+ comp->flags |= COMP_FL_DIR_REQ | COMP_FL_DIR_RES;
+ else {
+ memprintf(err, "'%s' expects 'request', 'response', or 'both'.", args[0]);
+ ret = -1;
+ goto end;
+ }
+ }
+ else {
+ memprintf(err, "'%s' expects 'algo', 'type' 'direction' or 'offload'",
+ args[0]);
+ ret = -1;
+ goto end;
+ }
+
+ end:
+ return ret;
+}
+
+static int
+parse_http_comp_flt(char **args, int *cur_arg, struct proxy *px,
+ struct flt_conf *fconf, char **err, void *private)
+{
+ struct flt_conf *fc, *back;
+
+ list_for_each_entry_safe(fc, back, &px->filter_configs, list) {
+ if (fc->id == http_comp_flt_id) {
+ memprintf(err, "%s: Proxy supports only one compression filter\n", px->id);
+ return -1;
+ }
+ }
+
+ fconf->id = http_comp_flt_id;
+ fconf->conf = NULL;
+ fconf->ops = &comp_ops;
+ (*cur_arg)++;
+
+ return 0;
+}
+
+
+int
+check_implicit_http_comp_flt(struct proxy *proxy)
+{
+ struct flt_conf *fconf;
+ int explicit = 0;
+ int comp = 0;
+ int err = 0;
+
+ if (proxy->comp == NULL)
+ goto end;
+ if (!LIST_ISEMPTY(&proxy->filter_configs)) {
+ list_for_each_entry(fconf, &proxy->filter_configs, list) {
+ if (fconf->id == http_comp_flt_id)
+ comp = 1;
+ else if (fconf->id == cache_store_flt_id) {
+ if (comp) {
+ ha_alert("config: %s '%s': unable to enable the compression filter "
+ "before any cache filter.\n",
+ proxy_type_str(proxy), proxy->id);
+ err++;
+ goto end;
+ }
+ }
+ else if (fconf->id == fcgi_flt_id)
+ continue;
+ else
+ explicit = 1;
+ }
+ }
+ if (comp)
+ goto end;
+ else if (explicit) {
+ ha_alert("config: %s '%s': require an explicit filter declaration to use "
+ "HTTP compression\n", proxy_type_str(proxy), proxy->id);
+ err++;
+ goto end;
+ }
+
+ /* Implicit declaration of the compression filter is always the last
+ * one */
+ fconf = calloc(1, sizeof(*fconf));
+ if (!fconf) {
+ ha_alert("config: %s '%s': out of memory\n",
+ proxy_type_str(proxy), proxy->id);
+ err++;
+ goto end;
+ }
+ fconf->id = http_comp_flt_id;
+ fconf->conf = NULL;
+ fconf->ops = &comp_ops;
+ LIST_APPEND(&proxy->filter_configs, &fconf->list);
+ end:
+ return err;
+}
+
+/*
+ * boolean, returns true if compression is used (either gzip or deflate) in the
+ * response.
+ */
+static int
+smp_fetch_res_comp(const struct arg *args, struct sample *smp, const char *kw,
+ void *private)
+{
+ struct http_txn *txn = smp->strm ? smp->strm->txn : NULL;
+
+ smp->data.type = SMP_T_BOOL;
+ smp->data.u.sint = (txn && (txn->rsp.flags & HTTP_MSGF_COMPRESSING));
+ return 1;
+}
+
+/*
+ * string, returns algo
+ */
+static int
+smp_fetch_res_comp_algo(const struct arg *args, struct sample *smp,
+ const char *kw, void *private)
+{
+ struct http_txn *txn = smp->strm ? smp->strm->txn : NULL;
+ struct filter *filter;
+ struct comp_state *st;
+
+ if (!txn || !(txn->rsp.flags & HTTP_MSGF_COMPRESSING))
+ return 0;
+
+ list_for_each_entry(filter, &strm_flt(smp->strm)->filters, list) {
+ if (FLT_ID(filter) != http_comp_flt_id)
+ continue;
+
+ if (!(st = filter->ctx))
+ break;
+
+ smp->data.type = SMP_T_STR;
+ smp->flags = SMP_F_CONST;
+ smp->data.u.str.area = st->comp_algo[COMP_DIR_RES]->cfg_name;
+ smp->data.u.str.data = st->comp_algo[COMP_DIR_RES]->cfg_name_len;
+ return 1;
+ }
+ return 0;
+}
+
+/* Declare the config parser for "compression" keyword */
+static struct cfg_kw_list cfg_kws = {ILH, {
+ { CFG_LISTEN, "compression", parse_compression_options },
+ { 0, NULL, NULL },
+ }
+};
+
+INITCALL1(STG_REGISTER, cfg_register_keywords, &cfg_kws);
+
+/* Declare the filter parser for "compression" keyword */
+static struct flt_kw_list filter_kws = { "COMP", { }, {
+ { "compression", parse_http_comp_flt, NULL },
+ { NULL, NULL, NULL },
+ }
+};
+
+INITCALL1(STG_REGISTER, flt_register_keywords, &filter_kws);
+
+/* Note: must not be declared <const> as its list will be overwritten */
+static struct sample_fetch_kw_list sample_fetch_keywords = {ILH, {
+ { "res.comp", smp_fetch_res_comp, 0, NULL, SMP_T_BOOL, SMP_USE_HRSHP },
+ { "res.comp_algo", smp_fetch_res_comp_algo, 0, NULL, SMP_T_STR, SMP_USE_HRSHP },
+ { /* END */ },
+ }
+};
+
+INITCALL1(STG_REGISTER, sample_register_fetches, &sample_fetch_keywords);
diff --git a/src/flt_spoe.c b/src/flt_spoe.c
new file mode 100644
index 0000000..70ea2ba
--- /dev/null
+++ b/src/flt_spoe.c
@@ -0,0 +1,4739 @@
+/*
+ * Stream processing offload engine management.
+ *
+ * Copyright 2016 HAProxy Technologies, Christopher Faulet <cfaulet@haproxy.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+#include <ctype.h>
+#include <errno.h>
+
+#include <haproxy/acl.h>
+#include <haproxy/applet.h>
+#include <haproxy/action-t.h>
+#include <haproxy/api.h>
+#include <haproxy/arg.h>
+#include <haproxy/cfgparse.h>
+#include <haproxy/check.h>
+#include <haproxy/filters.h>
+#include <haproxy/freq_ctr.h>
+#include <haproxy/frontend.h>
+#include <haproxy/global.h>
+#include <haproxy/http_rules.h>
+#include <haproxy/log.h>
+#include <haproxy/pool.h>
+#include <haproxy/proxy.h>
+#include <haproxy/sample.h>
+#include <haproxy/sc_strm.h>
+#include <haproxy/session.h>
+#include <haproxy/signal.h>
+#include <haproxy/sink.h>
+#include <haproxy/spoe.h>
+#include <haproxy/stconn.h>
+#include <haproxy/stream.h>
+#include <haproxy/task.h>
+#include <haproxy/tcp_rules.h>
+#include <haproxy/thread.h>
+#include <haproxy/time.h>
+#include <haproxy/tools.h>
+#include <haproxy/vars.h>
+
+
+#if defined(DEBUG_SPOE) || defined(DEBUG_FULL)
+#define SPOE_PRINTF(x...) fprintf(x)
+#define SPOE_DEBUG_STMT(statement) statement
+#else
+#define SPOE_PRINTF(x...)
+#define SPOE_DEBUG_STMT(statement)
+#endif
+
+/* Reserved 4 bytes to the frame size. So a frame and its size can be written
+ * together in a buffer */
+#define MAX_FRAME_SIZE global.tune.bufsize - 4
+
+/* The minimum size for a frame */
+#define MIN_FRAME_SIZE 256
+
+/* Reserved for the metadata and the frame type.
+ * So <MAX_FRAME_SIZE> - <FRAME_HDR_SIZE> is the maximum payload size */
+#define FRAME_HDR_SIZE 32
+
+/* Helper to get SPOE ctx inside an appctx */
+#define SPOE_APPCTX(appctx) ((struct spoe_appctx *)((appctx)->svcctx))
+
+/* SPOE filter id. Used to identify SPOE filters */
+const char *spoe_filter_id = "SPOE filter";
+
+/* Set if the handle on SIGUSR1 is registered */
+static int sighandler_registered = 0;
+
+/* The name of the SPOE engine, used during the parsing */
+char *curengine = NULL;
+
+/* SPOE agent used during the parsing */
+/* SPOE agent/group/message used during the parsing */
+struct spoe_agent *curagent = NULL;
+struct spoe_group *curgrp = NULL;
+struct spoe_message *curmsg = NULL;
+
+/* list of SPOE messages and placeholders used during the parsing */
+struct list curmsgs;
+struct list curgrps;
+struct list curmphs;
+struct list curgphs;
+struct list curvars;
+
+/* list of log servers used during the parsing */
+struct list curloggers;
+
+/* agent's proxy flags (PR_O_* and PR_O2_*) used during parsing */
+int curpxopts;
+int curpxopts2;
+
+/* Pools used to allocate SPOE structs */
+DECLARE_STATIC_POOL(pool_head_spoe_ctx, "spoe_ctx", sizeof(struct spoe_context));
+DECLARE_STATIC_POOL(pool_head_spoe_appctx, "spoe_appctx", sizeof(struct spoe_appctx));
+
+struct flt_ops spoe_ops;
+
+static int spoe_queue_context(struct spoe_context *ctx);
+static int spoe_acquire_buffer(struct buffer *buf, struct buffer_wait *buffer_wait);
+static void spoe_release_buffer(struct buffer *buf, struct buffer_wait *buffer_wait);
+static struct appctx *spoe_create_appctx(struct spoe_config *conf);
+
+/********************************************************************
+ * helper functions/globals
+ ********************************************************************/
+static void
+spoe_release_placeholder(struct spoe_placeholder *ph)
+{
+ if (!ph)
+ return;
+ free(ph->id);
+ free(ph);
+}
+
+static void
+spoe_release_message(struct spoe_message *msg)
+{
+ struct spoe_arg *arg, *argback;
+ struct acl *acl, *aclback;
+
+ if (!msg)
+ return;
+ free(msg->id);
+ free(msg->conf.file);
+ list_for_each_entry_safe(arg, argback, &msg->args, list) {
+ release_sample_expr(arg->expr);
+ free(arg->name);
+ LIST_DELETE(&arg->list);
+ free(arg);
+ }
+ list_for_each_entry_safe(acl, aclback, &msg->acls, list) {
+ LIST_DELETE(&acl->list);
+ prune_acl(acl);
+ free(acl);
+ }
+ free_acl_cond(msg->cond);
+ free(msg);
+}
+
+static void
+spoe_release_group(struct spoe_group *grp)
+{
+ if (!grp)
+ return;
+ free(grp->id);
+ free(grp->conf.file);
+ free(grp);
+}
+
+static void
+spoe_release_agent(struct spoe_agent *agent)
+{
+ struct spoe_message *msg, *msgback;
+ struct spoe_group *grp, *grpback;
+ int i;
+
+ if (!agent)
+ return;
+ free(agent->id);
+ free(agent->conf.file);
+ free(agent->var_pfx);
+ free(agent->var_on_error);
+ free(agent->var_t_process);
+ free(agent->var_t_total);
+ list_for_each_entry_safe(msg, msgback, &agent->messages, list) {
+ LIST_DELETE(&msg->list);
+ spoe_release_message(msg);
+ }
+ list_for_each_entry_safe(grp, grpback, &agent->groups, list) {
+ LIST_DELETE(&grp->list);
+ spoe_release_group(grp);
+ }
+ if (agent->rt) {
+ for (i = 0; i < global.nbthread; ++i) {
+ free(agent->rt[i].engine_id);
+ HA_SPIN_DESTROY(&agent->rt[i].lock);
+ }
+ }
+ free(agent->rt);
+ free(agent);
+}
+
+static const char *spoe_frm_err_reasons[SPOE_FRM_ERRS] = {
+ [SPOE_FRM_ERR_NONE] = "normal",
+ [SPOE_FRM_ERR_IO] = "I/O error",
+ [SPOE_FRM_ERR_TOUT] = "a timeout occurred",
+ [SPOE_FRM_ERR_TOO_BIG] = "frame is too big",
+ [SPOE_FRM_ERR_INVALID] = "invalid frame received",
+ [SPOE_FRM_ERR_NO_VSN] = "version value not found",
+ [SPOE_FRM_ERR_NO_FRAME_SIZE] = "max-frame-size value not found",
+ [SPOE_FRM_ERR_NO_CAP] = "capabilities value not found",
+ [SPOE_FRM_ERR_BAD_VSN] = "unsupported version",
+ [SPOE_FRM_ERR_BAD_FRAME_SIZE] = "max-frame-size too big or too small",
+ [SPOE_FRM_ERR_FRAG_NOT_SUPPORTED] = "fragmentation not supported",
+ [SPOE_FRM_ERR_INTERLACED_FRAMES] = "invalid interlaced frames",
+ [SPOE_FRM_ERR_FRAMEID_NOTFOUND] = "frame-id not found",
+ [SPOE_FRM_ERR_RES] = "resource allocation error",
+ [SPOE_FRM_ERR_UNKNOWN] = "an unknown error occurred",
+};
+
+static const char *spoe_event_str[SPOE_EV_EVENTS] = {
+ [SPOE_EV_ON_CLIENT_SESS] = "on-client-session",
+ [SPOE_EV_ON_TCP_REQ_FE] = "on-frontend-tcp-request",
+ [SPOE_EV_ON_TCP_REQ_BE] = "on-backend-tcp-request",
+ [SPOE_EV_ON_HTTP_REQ_FE] = "on-frontend-http-request",
+ [SPOE_EV_ON_HTTP_REQ_BE] = "on-backend-http-request",
+
+ [SPOE_EV_ON_SERVER_SESS] = "on-server-session",
+ [SPOE_EV_ON_TCP_RSP] = "on-tcp-response",
+ [SPOE_EV_ON_HTTP_RSP] = "on-http-response",
+};
+
+
+#if defined(DEBUG_SPOE) || defined(DEBUG_FULL)
+
+static const char *spoe_ctx_state_str[SPOE_CTX_ST_ERROR+1] = {
+ [SPOE_CTX_ST_NONE] = "NONE",
+ [SPOE_CTX_ST_READY] = "READY",
+ [SPOE_CTX_ST_ENCODING_MSGS] = "ENCODING_MSGS",
+ [SPOE_CTX_ST_SENDING_MSGS] = "SENDING_MSGS",
+ [SPOE_CTX_ST_WAITING_ACK] = "WAITING_ACK",
+ [SPOE_CTX_ST_DONE] = "DONE",
+ [SPOE_CTX_ST_ERROR] = "ERROR",
+};
+
+static const char *spoe_appctx_state_str[SPOE_APPCTX_ST_END+1] = {
+ [SPOE_APPCTX_ST_CONNECT] = "CONNECT",
+ [SPOE_APPCTX_ST_CONNECTING] = "CONNECTING",
+ [SPOE_APPCTX_ST_IDLE] = "IDLE",
+ [SPOE_APPCTX_ST_PROCESSING] = "PROCESSING",
+ [SPOE_APPCTX_ST_SENDING_FRAG_NOTIFY] = "SENDING_FRAG_NOTIFY",
+ [SPOE_APPCTX_ST_WAITING_SYNC_ACK] = "WAITING_SYNC_ACK",
+ [SPOE_APPCTX_ST_DISCONNECT] = "DISCONNECT",
+ [SPOE_APPCTX_ST_DISCONNECTING] = "DISCONNECTING",
+ [SPOE_APPCTX_ST_EXIT] = "EXIT",
+ [SPOE_APPCTX_ST_END] = "END",
+};
+
+#endif
+
+/* Used to generates a unique id for an engine. On success, it returns a
+ * allocated string. So it is the caller's responsibility to release it. If the
+ * allocation failed, it returns NULL. */
+static char *
+generate_pseudo_uuid()
+{
+ ha_generate_uuid(&trash);
+ return my_strndup(trash.area, trash.data);
+}
+
+/* set/add to <t> the elapsed time since <since> and now */
+static inline void
+spoe_update_stat_time(ullong *since, long *t)
+{
+ if (*t == -1)
+ *t = ns_to_ms(now_ns - *since);
+ else
+ *t += ns_to_ms(now_ns - *since);
+ *since = 0;
+}
+
+/********************************************************************
+ * Functions that encode/decode SPOE frames
+ ********************************************************************/
+/* Helper to get static string length, excluding the terminating null byte */
+#define SLEN(str) (sizeof(str)-1)
+
+/* Predefined key used in HELLO/DISCONNECT frames */
+#define SUPPORTED_VERSIONS_KEY "supported-versions"
+#define VERSION_KEY "version"
+#define MAX_FRAME_SIZE_KEY "max-frame-size"
+#define CAPABILITIES_KEY "capabilities"
+#define ENGINE_ID_KEY "engine-id"
+#define HEALTHCHECK_KEY "healthcheck"
+#define STATUS_CODE_KEY "status-code"
+#define MSG_KEY "message"
+
+struct spoe_version {
+ char *str;
+ int min;
+ int max;
+};
+
+/* All supported versions */
+static struct spoe_version supported_versions[] = {
+ /* 1.0 is now unsupported because of a bug about frame's flags*/
+ {"2.0", 2000, 2000},
+ {NULL, 0, 0}
+};
+
+/* Comma-separated list of supported versions */
+#define SUPPORTED_VERSIONS_VAL "2.0"
+
+/* Convert a string to a SPOE version value. The string must follow the format
+ * "MAJOR.MINOR". It will be concerted into the integer (1000 * MAJOR + MINOR).
+ * If an error occurred, -1 is returned. */
+static int
+spoe_str_to_vsn(const char *str, size_t len)
+{
+ const char *p, *end;
+ int maj, min, vsn;
+
+ p = str;
+ end = str+len;
+ maj = min = 0;
+ vsn = -1;
+
+ /* skip leading spaces */
+ while (p < end && isspace((unsigned char)*p))
+ p++;
+
+ /* parse Major number, until the '.' */
+ while (*p != '.') {
+ if (p >= end || *p < '0' || *p > '9')
+ goto out;
+ maj *= 10;
+ maj += (*p - '0');
+ p++;
+ }
+
+ /* check Major version */
+ if (!maj)
+ goto out;
+
+ p++; /* skip the '.' */
+ if (p >= end || *p < '0' || *p > '9') /* Minor number is missing */
+ goto out;
+
+ /* Parse Minor number */
+ while (p < end) {
+ if (*p < '0' || *p > '9')
+ break;
+ min *= 10;
+ min += (*p - '0');
+ p++;
+ }
+
+ /* check Minor number */
+ if (min > 999)
+ goto out;
+
+ /* skip trailing spaces */
+ while (p < end && isspace((unsigned char)*p))
+ p++;
+ if (p != end)
+ goto out;
+
+ vsn = maj * 1000 + min;
+ out:
+ return vsn;
+}
+
+/* Encode the HELLO frame sent by HAProxy to an agent. It returns the number of
+ * encoded bytes in the frame on success, 0 if an encoding error occurred and -1
+ * if a fatal error occurred. */
+static int
+spoe_prepare_hahello_frame(struct appctx *appctx, char *frame, size_t size)
+{
+ struct buffer *chk;
+ struct spoe_agent *agent = SPOE_APPCTX(appctx)->agent;
+ char *p, *end;
+ unsigned int flags = SPOE_FRM_FL_FIN;
+ size_t sz;
+
+ p = frame;
+ end = frame+size;
+
+ /* Set Frame type */
+ *p++ = SPOE_FRM_T_HAPROXY_HELLO;
+
+ /* Set flags */
+ flags = htonl(flags);
+ memcpy(p, (char *)&flags, 4);
+ p += 4;
+
+ /* No stream-id and frame-id for HELLO frames */
+ *p++ = 0; *p++ = 0;
+
+ /* There are 3 mandatory items: "supported-versions", "max-frame-size"
+ * and "capabilities" */
+
+ /* "supported-versions" K/V item */
+ sz = SLEN(SUPPORTED_VERSIONS_KEY);
+ if (spoe_encode_buffer(SUPPORTED_VERSIONS_KEY, sz, &p, end) == -1)
+ goto too_big;
+
+ *p++ = SPOE_DATA_T_STR;
+ sz = SLEN(SUPPORTED_VERSIONS_VAL);
+ if (spoe_encode_buffer(SUPPORTED_VERSIONS_VAL, sz, &p, end) == -1)
+ goto too_big;
+
+ /* "max-fram-size" K/V item */
+ sz = SLEN(MAX_FRAME_SIZE_KEY);
+ if (spoe_encode_buffer(MAX_FRAME_SIZE_KEY, sz, &p, end) == -1)
+ goto too_big;
+
+ *p++ = SPOE_DATA_T_UINT32;
+ if (encode_varint(SPOE_APPCTX(appctx)->max_frame_size, &p, end) == -1)
+ goto too_big;
+
+ /* "capabilities" K/V item */
+ sz = SLEN(CAPABILITIES_KEY);
+ if (spoe_encode_buffer(CAPABILITIES_KEY, sz, &p, end) == -1)
+ goto too_big;
+
+ *p++ = SPOE_DATA_T_STR;
+ chk = get_trash_chunk();
+ if (agent != NULL && (agent->flags & SPOE_FL_PIPELINING)) {
+ memcpy(chk->area, "pipelining", 10);
+ chk->data += 10;
+ }
+ if (agent != NULL && (agent->flags & SPOE_FL_ASYNC)) {
+ if (chk->data) chk->area[chk->data++] = ',';
+ memcpy(chk->area+chk->data, "async", 5);
+ chk->data += 5;
+ }
+ if (agent != NULL && (agent->flags & SPOE_FL_RCV_FRAGMENTATION)) {
+ if (chk->data) chk->area[chk->data++] = ',';
+ memcpy(chk->area+chk->data, "fragmentation", 13);
+ chk->data += 13;
+ }
+ if (spoe_encode_buffer(chk->area, chk->data, &p, end) == -1)
+ goto too_big;
+
+ /* (optional) "engine-id" K/V item, if present */
+ if (agent != NULL && agent->rt[tid].engine_id != NULL) {
+ sz = SLEN(ENGINE_ID_KEY);
+ if (spoe_encode_buffer(ENGINE_ID_KEY, sz, &p, end) == -1)
+ goto too_big;
+
+ *p++ = SPOE_DATA_T_STR;
+ sz = strlen(agent->rt[tid].engine_id);
+ if (spoe_encode_buffer(agent->rt[tid].engine_id, sz, &p, end) == -1)
+ goto too_big;
+ }
+
+ return (p - frame);
+
+ too_big:
+ SPOE_APPCTX(appctx)->status_code = SPOE_FRM_ERR_TOO_BIG;
+ return 0;
+}
+
+/* Encode DISCONNECT frame sent by HAProxy to an agent. It returns the number of
+ * encoded bytes in the frame on success, 0 if an encoding error occurred and -1
+ * if a fatal error occurred. */
+static int
+spoe_prepare_hadiscon_frame(struct appctx *appctx, char *frame, size_t size)
+{
+ const char *reason;
+ char *p, *end;
+ unsigned int flags = SPOE_FRM_FL_FIN;
+ size_t sz;
+
+ p = frame;
+ end = frame+size;
+
+ /* Set Frame type */
+ *p++ = SPOE_FRM_T_HAPROXY_DISCON;
+
+ /* Set flags */
+ flags = htonl(flags);
+ memcpy(p, (char *)&flags, 4);
+ p += 4;
+
+ /* No stream-id and frame-id for DISCONNECT frames */
+ *p++ = 0; *p++ = 0;
+
+ if (SPOE_APPCTX(appctx)->status_code >= SPOE_FRM_ERRS)
+ SPOE_APPCTX(appctx)->status_code = SPOE_FRM_ERR_UNKNOWN;
+
+ /* There are 2 mandatory items: "status-code" and "message" */
+
+ /* "status-code" K/V item */
+ sz = SLEN(STATUS_CODE_KEY);
+ if (spoe_encode_buffer(STATUS_CODE_KEY, sz, &p, end) == -1)
+ goto too_big;
+
+ *p++ = SPOE_DATA_T_UINT32;
+ if (encode_varint(SPOE_APPCTX(appctx)->status_code, &p, end) == -1)
+ goto too_big;
+
+ /* "message" K/V item */
+ sz = SLEN(MSG_KEY);
+ if (spoe_encode_buffer(MSG_KEY, sz, &p, end) == -1)
+ goto too_big;
+
+ /*Get the message corresponding to the status code */
+ reason = spoe_frm_err_reasons[SPOE_APPCTX(appctx)->status_code];
+
+ *p++ = SPOE_DATA_T_STR;
+ sz = strlen(reason);
+ if (spoe_encode_buffer(reason, sz, &p, end) == -1)
+ goto too_big;
+
+ return (p - frame);
+
+ too_big:
+ SPOE_APPCTX(appctx)->status_code = SPOE_FRM_ERR_TOO_BIG;
+ return 0;
+}
+
+/* Encode the NOTIFY frame sent by HAProxy to an agent. It returns the number of
+ * encoded bytes in the frame on success, 0 if an encoding error occurred and -1
+ * if a fatal error occurred. */
+static int
+spoe_prepare_hanotify_frame(struct appctx *appctx, struct spoe_context *ctx,
+ char *frame, size_t size)
+{
+ char *p, *end;
+ unsigned int stream_id, frame_id;
+ unsigned int flags = SPOE_FRM_FL_FIN;
+ size_t sz;
+
+ p = frame;
+ end = frame+size;
+
+ stream_id = ctx->stream_id;
+ frame_id = ctx->frame_id;
+
+ if (ctx->flags & SPOE_CTX_FL_FRAGMENTED) {
+ /* The fragmentation is not supported by the applet */
+ if (!(SPOE_APPCTX(appctx)->flags & SPOE_APPCTX_FL_FRAGMENTATION)) {
+ SPOE_APPCTX(appctx)->status_code = SPOE_FRM_ERR_FRAG_NOT_SUPPORTED;
+ return -1;
+ }
+ flags = ctx->frag_ctx.flags;
+ }
+
+ /* Set Frame type */
+ *p++ = SPOE_FRM_T_HAPROXY_NOTIFY;
+
+ /* Set flags */
+ flags = htonl(flags);
+ memcpy(p, (char *)&flags, 4);
+ p += 4;
+
+ /* Set stream-id and frame-id */
+ if (encode_varint(stream_id, &p, end) == -1)
+ goto too_big;
+ if (encode_varint(frame_id, &p, end) == -1)
+ goto too_big;
+
+ /* Copy encoded messages, if possible */
+ sz = b_data(&ctx->buffer);
+ if (p + sz >= end)
+ goto too_big;
+ memcpy(p, b_head(&ctx->buffer), sz);
+ p += sz;
+
+ return (p - frame);
+
+ too_big:
+ SPOE_APPCTX(appctx)->status_code = SPOE_FRM_ERR_TOO_BIG;
+ return 0;
+}
+
+/* Encode next part of a fragmented frame sent by HAProxy to an agent. It
+ * returns the number of encoded bytes in the frame on success, 0 if an encoding
+ * error occurred and -1 if a fatal error occurred. */
+static int
+spoe_prepare_hafrag_frame(struct appctx *appctx, struct spoe_context *ctx,
+ char *frame, size_t size)
+{
+ char *p, *end;
+ unsigned int stream_id, frame_id;
+ unsigned int flags;
+ size_t sz;
+
+ p = frame;
+ end = frame+size;
+
+ /* <ctx> is null when the stream has aborted the processing of a
+ * fragmented frame. In this case, we must notify the corresponding
+ * agent using ids stored in <frag_ctx>. */
+ if (ctx == NULL) {
+ flags = (SPOE_FRM_FL_FIN|SPOE_FRM_FL_ABRT);
+ stream_id = SPOE_APPCTX(appctx)->frag_ctx.cursid;
+ frame_id = SPOE_APPCTX(appctx)->frag_ctx.curfid;
+ }
+ else {
+ flags = ctx->frag_ctx.flags;
+ stream_id = ctx->stream_id;
+ frame_id = ctx->frame_id;
+ }
+
+ /* Set Frame type */
+ *p++ = SPOE_FRM_T_UNSET;
+
+ /* Set flags */
+ flags = htonl(flags);
+ memcpy(p, (char *)&flags, 4);
+ p += 4;
+
+ /* Set stream-id and frame-id */
+ if (encode_varint(stream_id, &p, end) == -1)
+ goto too_big;
+ if (encode_varint(frame_id, &p, end) == -1)
+ goto too_big;
+
+ if (ctx == NULL)
+ goto end;
+
+ /* Copy encoded messages, if possible */
+ sz = b_data(&ctx->buffer);
+ if (p + sz >= end)
+ goto too_big;
+ memcpy(p, b_head(&ctx->buffer), sz);
+ p += sz;
+
+ end:
+ return (p - frame);
+
+ too_big:
+ SPOE_APPCTX(appctx)->status_code = SPOE_FRM_ERR_TOO_BIG;
+ return 0;
+}
+
+/* Decode and process the HELLO frame sent by an agent. It returns the number of
+ * read bytes on success, 0 if a decoding error occurred, and -1 if a fatal
+ * error occurred. */
+static int
+spoe_handle_agenthello_frame(struct appctx *appctx, char *frame, size_t size)
+{
+ struct spoe_agent *agent = SPOE_APPCTX(appctx)->agent;
+ char *p, *end;
+ int vsn, max_frame_size;
+ unsigned int flags;
+
+ p = frame;
+ end = frame + size;
+
+ /* Check frame type */
+ if (*p++ != SPOE_FRM_T_AGENT_HELLO) {
+ SPOE_APPCTX(appctx)->status_code = SPOE_FRM_ERR_INVALID;
+ return 0;
+ }
+
+ if (size < 7 /* TYPE + METADATA */) {
+ SPOE_APPCTX(appctx)->status_code = SPOE_FRM_ERR_INVALID;
+ return 0;
+ }
+
+ /* Retrieve flags */
+ memcpy((char *)&flags, p, 4);
+ flags = ntohl(flags);
+ p += 4;
+
+ /* Fragmentation is not supported for HELLO frame */
+ if (!(flags & SPOE_FRM_FL_FIN)) {
+ SPOE_APPCTX(appctx)->status_code = SPOE_FRM_ERR_FRAG_NOT_SUPPORTED;
+ return -1;
+ }
+
+ /* stream-id and frame-id must be cleared */
+ if (*p != 0 || *(p+1) != 0) {
+ SPOE_APPCTX(appctx)->status_code = SPOE_FRM_ERR_INVALID;
+ return 0;
+ }
+ p += 2;
+
+ /* There are 3 mandatory items: "version", "max-frame-size" and
+ * "capabilities" */
+
+ /* Loop on K/V items */
+ vsn = max_frame_size = flags = 0;
+ while (p < end) {
+ char *str;
+ uint64_t sz;
+ int ret;
+
+ /* Decode the item key */
+ ret = spoe_decode_buffer(&p, end, &str, &sz);
+ if (ret == -1 || !sz) {
+ SPOE_APPCTX(appctx)->status_code = SPOE_FRM_ERR_INVALID;
+ return 0;
+ }
+
+ /* Check "version" K/V item */
+ if (sz >= strlen(VERSION_KEY) && !memcmp(str, VERSION_KEY, strlen(VERSION_KEY))) {
+ int i, type = *p++;
+
+ /* The value must be a string */
+ if ((type & SPOE_DATA_T_MASK) != SPOE_DATA_T_STR) {
+ SPOE_APPCTX(appctx)->status_code = SPOE_FRM_ERR_INVALID;
+ return 0;
+ }
+ if (spoe_decode_buffer(&p, end, &str, &sz) == -1) {
+ SPOE_APPCTX(appctx)->status_code = SPOE_FRM_ERR_INVALID;
+ return 0;
+ }
+
+ vsn = spoe_str_to_vsn(str, sz);
+ if (vsn == -1) {
+ SPOE_APPCTX(appctx)->status_code = SPOE_FRM_ERR_BAD_VSN;
+ return -1;
+ }
+ for (i = 0; supported_versions[i].str != NULL; ++i) {
+ if (vsn >= supported_versions[i].min &&
+ vsn <= supported_versions[i].max)
+ break;
+ }
+ if (supported_versions[i].str == NULL) {
+ SPOE_APPCTX(appctx)->status_code = SPOE_FRM_ERR_BAD_VSN;
+ return -1;
+ }
+ }
+ /* Check "max-frame-size" K/V item */
+ else if (sz >= strlen(MAX_FRAME_SIZE_KEY) && !memcmp(str, MAX_FRAME_SIZE_KEY, strlen(MAX_FRAME_SIZE_KEY))) {
+ int type = *p++;
+
+ /* The value must be integer */
+ if ((type & SPOE_DATA_T_MASK) != SPOE_DATA_T_INT32 &&
+ (type & SPOE_DATA_T_MASK) != SPOE_DATA_T_INT64 &&
+ (type & SPOE_DATA_T_MASK) != SPOE_DATA_T_UINT32 &&
+ (type & SPOE_DATA_T_MASK) != SPOE_DATA_T_UINT64) {
+ SPOE_APPCTX(appctx)->status_code = SPOE_FRM_ERR_INVALID;
+ return 0;
+ }
+ if (decode_varint(&p, end, &sz) == -1) {
+ SPOE_APPCTX(appctx)->status_code = SPOE_FRM_ERR_INVALID;
+ return 0;
+ }
+ if (sz < MIN_FRAME_SIZE ||
+ sz > SPOE_APPCTX(appctx)->max_frame_size) {
+ SPOE_APPCTX(appctx)->status_code = SPOE_FRM_ERR_BAD_FRAME_SIZE;
+ return -1;
+ }
+ max_frame_size = sz;
+ }
+ /* Check "capabilities" K/V item */
+ else if (sz >= strlen(CAPABILITIES_KEY) && !memcmp(str, CAPABILITIES_KEY, strlen(CAPABILITIES_KEY))) {
+ int type = *p++;
+
+ /* The value must be a string */
+ if ((type & SPOE_DATA_T_MASK) != SPOE_DATA_T_STR) {
+ SPOE_APPCTX(appctx)->status_code = SPOE_FRM_ERR_INVALID;
+ return 0;
+ }
+ if (spoe_decode_buffer(&p, end, &str, &sz) == -1) {
+ SPOE_APPCTX(appctx)->status_code = SPOE_FRM_ERR_INVALID;
+ return 0;
+ }
+
+ while (sz) {
+ char *delim;
+
+ /* Skip leading spaces */
+ for (; isspace((unsigned char)*str) && sz; str++, sz--);
+
+ if (sz >= 10 && !strncmp(str, "pipelining", 10)) {
+ str += 10; sz -= 10;
+ if (!sz || isspace((unsigned char)*str) || *str == ',')
+ flags |= SPOE_APPCTX_FL_PIPELINING;
+ }
+ else if (sz >= 5 && !strncmp(str, "async", 5)) {
+ str += 5; sz -= 5;
+ if (!sz || isspace((unsigned char)*str) || *str == ',')
+ flags |= SPOE_APPCTX_FL_ASYNC;
+ }
+ else if (sz >= 13 && !strncmp(str, "fragmentation", 13)) {
+ str += 13; sz -= 13;
+ if (!sz || isspace((unsigned char)*str) || *str == ',')
+ flags |= SPOE_APPCTX_FL_FRAGMENTATION;
+ }
+
+ /* Get the next comma or break */
+ if (!sz || (delim = memchr(str, ',', sz)) == NULL)
+ break;
+ delim++;
+ sz -= (delim - str);
+ str = delim;
+ }
+ }
+ else {
+ /* Silently ignore unknown item */
+ if (spoe_skip_data(&p, end) == -1) {
+ SPOE_APPCTX(appctx)->status_code = SPOE_FRM_ERR_INVALID;
+ return 0;
+ }
+ }
+ }
+
+ /* Final checks */
+ if (!vsn) {
+ SPOE_APPCTX(appctx)->status_code = SPOE_FRM_ERR_NO_VSN;
+ return -1;
+ }
+ if (!max_frame_size) {
+ SPOE_APPCTX(appctx)->status_code = SPOE_FRM_ERR_NO_FRAME_SIZE;
+ return -1;
+ }
+ if (!agent)
+ flags &= ~(SPOE_APPCTX_FL_PIPELINING|SPOE_APPCTX_FL_ASYNC);
+ else {
+ if ((flags & SPOE_APPCTX_FL_PIPELINING) && !(agent->flags & SPOE_FL_PIPELINING))
+ flags &= ~SPOE_APPCTX_FL_PIPELINING;
+ if ((flags & SPOE_APPCTX_FL_ASYNC) && !(agent->flags & SPOE_FL_ASYNC))
+ flags &= ~SPOE_APPCTX_FL_ASYNC;
+ }
+
+ SPOE_APPCTX(appctx)->version = (unsigned int)vsn;
+ SPOE_APPCTX(appctx)->max_frame_size = (unsigned int)max_frame_size;
+ SPOE_APPCTX(appctx)->flags |= flags;
+
+ return (p - frame);
+}
+
+/* Decode DISCONNECT frame sent by an agent. It returns the number of by read
+ * bytes on success, 0 if the frame can be ignored and -1 if an error
+ * occurred. */
+static int
+spoe_handle_agentdiscon_frame(struct appctx *appctx, char *frame, size_t size)
+{
+ char *p, *end;
+ unsigned int flags;
+
+ p = frame;
+ end = frame + size;
+
+ /* Check frame type */
+ if (*p++ != SPOE_FRM_T_AGENT_DISCON) {
+ SPOE_APPCTX(appctx)->status_code = SPOE_FRM_ERR_INVALID;
+ return 0;
+ }
+
+ if (size < 7 /* TYPE + METADATA */) {
+ SPOE_APPCTX(appctx)->status_code = SPOE_FRM_ERR_INVALID;
+ return 0;
+ }
+
+ /* Retrieve flags */
+ memcpy((char *)&flags, p, 4);
+ flags = ntohl(flags);
+ p += 4;
+
+ /* Fragmentation is not supported for DISCONNECT frame */
+ if (!(flags & SPOE_FRM_FL_FIN)) {
+ SPOE_APPCTX(appctx)->status_code = SPOE_FRM_ERR_FRAG_NOT_SUPPORTED;
+ return -1;
+ }
+
+ /* stream-id and frame-id must be cleared */
+ if (*p != 0 || *(p+1) != 0) {
+ SPOE_APPCTX(appctx)->status_code = SPOE_FRM_ERR_INVALID;
+ return 0;
+ }
+ p += 2;
+
+ /* There are 2 mandatory items: "status-code" and "message" */
+
+ /* Loop on K/V items */
+ while (p < end) {
+ char *str;
+ uint64_t sz;
+ int ret;
+
+ /* Decode the item key */
+ ret = spoe_decode_buffer(&p, end, &str, &sz);
+ if (ret == -1 || !sz) {
+ SPOE_APPCTX(appctx)->status_code = SPOE_FRM_ERR_INVALID;
+ return 0;
+ }
+
+ /* Check "status-code" K/V item */
+ if (sz >= strlen(STATUS_CODE_KEY) && !memcmp(str, STATUS_CODE_KEY, strlen(STATUS_CODE_KEY))) {
+ int type = *p++;
+
+ /* The value must be an integer */
+ if ((type & SPOE_DATA_T_MASK) != SPOE_DATA_T_INT32 &&
+ (type & SPOE_DATA_T_MASK) != SPOE_DATA_T_INT64 &&
+ (type & SPOE_DATA_T_MASK) != SPOE_DATA_T_UINT32 &&
+ (type & SPOE_DATA_T_MASK) != SPOE_DATA_T_UINT64) {
+ SPOE_APPCTX(appctx)->status_code = SPOE_FRM_ERR_INVALID;
+ return 0;
+ }
+ if (decode_varint(&p, end, &sz) == -1) {
+ SPOE_APPCTX(appctx)->status_code = SPOE_FRM_ERR_INVALID;
+ return 0;
+ }
+ SPOE_APPCTX(appctx)->status_code = sz;
+ }
+
+ /* Check "message" K/V item */
+ else if (sz >= strlen(MSG_KEY) && !memcmp(str, MSG_KEY, strlen(MSG_KEY))) {
+ int type = *p++;
+
+ /* The value must be a string */
+ if ((type & SPOE_DATA_T_MASK) != SPOE_DATA_T_STR) {
+ SPOE_APPCTX(appctx)->status_code = SPOE_FRM_ERR_INVALID;
+ return 0;
+ }
+ ret = spoe_decode_buffer(&p, end, &str, &sz);
+ if (ret == -1 || sz > 255) {
+ SPOE_APPCTX(appctx)->status_code = SPOE_FRM_ERR_INVALID;
+ return 0;
+ }
+#if defined(DEBUG_SPOE) || defined(DEBUG_FULL)
+ SPOE_APPCTX(appctx)->reason = str;
+ SPOE_APPCTX(appctx)->rlen = sz;
+#endif
+ }
+ else {
+ /* Silently ignore unknown item */
+ if (spoe_skip_data(&p, end) == -1) {
+ SPOE_APPCTX(appctx)->status_code = SPOE_FRM_ERR_INVALID;
+ return 0;
+ }
+ }
+ }
+
+ return (p - frame);
+}
+
+
+/* Decode ACK frame sent by an agent. It returns the number of read bytes on
+ * success, 0 if the frame can be ignored and -1 if an error occurred. */
+static int
+spoe_handle_agentack_frame(struct appctx *appctx, struct spoe_context **ctx,
+ char *frame, size_t size)
+{
+ struct spoe_agent *agent = SPOE_APPCTX(appctx)->agent;
+ char *p, *end;
+ uint64_t stream_id, frame_id;
+ int len;
+ unsigned int flags;
+
+ p = frame;
+ end = frame + size;
+ *ctx = NULL;
+
+ /* Check frame type */
+ if (*p++ != SPOE_FRM_T_AGENT_ACK) {
+ SPOE_APPCTX(appctx)->status_code = SPOE_FRM_ERR_INVALID;
+ return 0;
+ }
+
+ if (size < 7 /* TYPE + METADATA */) {
+ SPOE_APPCTX(appctx)->status_code = SPOE_FRM_ERR_INVALID;
+ return 0;
+ }
+
+ /* Retrieve flags */
+ memcpy((char *)&flags, p, 4);
+ flags = ntohl(flags);
+ p += 4;
+
+ /* Fragmentation is not supported for now */
+ if (!(flags & SPOE_FRM_FL_FIN)) {
+ SPOE_APPCTX(appctx)->status_code = SPOE_FRM_ERR_FRAG_NOT_SUPPORTED;
+ return -1;
+ }
+
+ /* Get the stream-id and the frame-id */
+ if (decode_varint(&p, end, &stream_id) == -1) {
+ SPOE_APPCTX(appctx)->status_code = SPOE_FRM_ERR_INVALID;
+ return 0;
+ }
+ if (decode_varint(&p, end, &frame_id) == -1) {
+ SPOE_APPCTX(appctx)->status_code = SPOE_FRM_ERR_INVALID;
+ return 0;
+ }
+
+ /* Try to find the corresponding SPOE context */
+ if (SPOE_APPCTX(appctx)->flags & SPOE_APPCTX_FL_ASYNC) {
+ list_for_each_entry((*ctx), &agent->rt[tid].waiting_queue, list) {
+ if ((*ctx)->stream_id == (unsigned int)stream_id &&
+ (*ctx)->frame_id == (unsigned int)frame_id)
+ goto found;
+ }
+ }
+ else {
+ list_for_each_entry((*ctx), &SPOE_APPCTX(appctx)->waiting_queue, list) {
+ if ((*ctx)->stream_id == (unsigned int)stream_id &&
+ (*ctx)->frame_id == (unsigned int)frame_id)
+ goto found;
+ }
+ }
+
+ if (SPOE_APPCTX(appctx)->frag_ctx.ctx &&
+ SPOE_APPCTX(appctx)->frag_ctx.cursid == (unsigned int)stream_id &&
+ SPOE_APPCTX(appctx)->frag_ctx.curfid == (unsigned int)frame_id) {
+
+ /* ABRT bit is set for an unfinished fragmented frame */
+ if (flags & SPOE_FRM_FL_ABRT) {
+ *ctx = SPOE_APPCTX(appctx)->frag_ctx.ctx;
+ (*ctx)->state = SPOE_CTX_ST_ERROR;
+ (*ctx)->status_code = SPOE_CTX_ERR_FRAG_FRAME_ABRT;
+ /* Ignore the payload */
+ goto end;
+ }
+ /* TODO: Handle more flags for fragmented frames: RESUME, FINISH... */
+ /* For now, we ignore the ack */
+ SPOE_APPCTX(appctx)->status_code = SPOE_FRM_ERR_INVALID;
+ return 0;
+ }
+
+ /* No Stream found, ignore the frame */
+ SPOE_PRINTF(stderr, "%d.%06d [SPOE/%-15s] %s: appctx=%p"
+ " - Ignore ACK frame"
+ " - stream-id=%u - frame-id=%u\n",
+ (int)date.tv_sec, (int)date.tv_usec, agent->id,
+ __FUNCTION__, appctx,
+ (unsigned int)stream_id, (unsigned int)frame_id);
+
+ SPOE_APPCTX(appctx)->status_code = SPOE_FRM_ERR_FRAMEID_NOTFOUND;
+ if (appctx->st0 == SPOE_APPCTX_ST_WAITING_SYNC_ACK) {
+ /* Report an error if we are waiting the ack for another frame,
+ * but not if there is no longer frame waiting for a ack
+ * (timeout)
+ */
+ if (!LIST_ISEMPTY(&SPOE_APPCTX(appctx)->waiting_queue) ||
+ SPOE_APPCTX(appctx)->frag_ctx.ctx)
+ return -1;
+ appctx->st0 = SPOE_APPCTX_ST_PROCESSING;
+ SPOE_APPCTX(appctx)->cur_fpa = 0;
+ }
+ return 0;
+
+ found:
+ if (!spoe_acquire_buffer(&SPOE_APPCTX(appctx)->buffer,
+ &SPOE_APPCTX(appctx)->buffer_wait)) {
+ *ctx = NULL;
+ return 1; /* Retry later */
+ }
+
+ /* Copy encoded actions */
+ len = (end - p);
+ memcpy(b_head(&SPOE_APPCTX(appctx)->buffer), p, len);
+ b_set_data(&SPOE_APPCTX(appctx)->buffer, len);
+ p += len;
+
+ /* Transfer the buffer ownership to the SPOE context */
+ (*ctx)->buffer = SPOE_APPCTX(appctx)->buffer;
+ SPOE_APPCTX(appctx)->buffer = BUF_NULL;
+
+ (*ctx)->state = SPOE_CTX_ST_DONE;
+
+ end:
+ SPOE_PRINTF(stderr, "%d.%06d [SPOE/%-15s] %s: appctx=%p"
+ " - ACK frame received"
+ " - ctx=%p - stream-id=%u - frame-id=%u - flags=0x%08x\n",
+ (int)date.tv_sec, (int)date.tv_usec, agent->id,
+ __FUNCTION__, appctx, *ctx, (*ctx)->stream_id,
+ (*ctx)->frame_id, flags);
+ return (p - frame);
+}
+
+/* This function is used in cfgparse.c and declared in proto/checks.h. It
+ * prepare the request to send to agents during a healthcheck. It returns 0 on
+ * success and -1 if an error occurred. */
+int
+spoe_prepare_healthcheck_request(char **req, int *len)
+{
+ struct appctx appctx;
+ struct spoe_appctx spoe_appctx;
+ char *frame, *end, buf[MAX_FRAME_SIZE+4];
+ size_t sz;
+ int ret;
+
+ memset(&appctx, 0, sizeof(appctx));
+ memset(&spoe_appctx, 0, sizeof(spoe_appctx));
+ memset(buf, 0, sizeof(buf));
+
+ appctx.svcctx = &spoe_appctx;
+ SPOE_APPCTX(&appctx)->max_frame_size = MAX_FRAME_SIZE;
+
+ frame = buf+4; /* Reserved the 4 first bytes for the frame size */
+ end = frame + MAX_FRAME_SIZE;
+
+ ret = spoe_prepare_hahello_frame(&appctx, frame, MAX_FRAME_SIZE);
+ if (ret <= 0)
+ return -1;
+ frame += ret;
+
+ /* Add "healthcheck" K/V item */
+ sz = SLEN(HEALTHCHECK_KEY);
+ if (spoe_encode_buffer(HEALTHCHECK_KEY, sz, &frame, end) == -1)
+ return -1;
+ *frame++ = (SPOE_DATA_T_BOOL | SPOE_DATA_FL_TRUE);
+
+ *len = frame - buf;
+ sz = htonl(*len - 4);
+ memcpy(buf, (char *)&sz, 4);
+
+ if ((*req = malloc(*len)) == NULL)
+ return -1;
+ memcpy(*req, buf, *len);
+ return 0;
+}
+
+/* This function is used in checks.c and declared in proto/checks.h. It decode
+ * the response received from an agent during a healthcheck. It returns 0 on
+ * success and -1 if an error occurred. */
+int
+spoe_handle_healthcheck_response(char *frame, size_t size, char *err, int errlen)
+{
+ struct appctx appctx;
+ struct spoe_appctx spoe_appctx;
+
+ memset(&appctx, 0, sizeof(appctx));
+ memset(&spoe_appctx, 0, sizeof(spoe_appctx));
+
+ appctx.svcctx = &spoe_appctx;
+ SPOE_APPCTX(&appctx)->max_frame_size = MAX_FRAME_SIZE;
+
+ if (*frame == SPOE_FRM_T_AGENT_DISCON) {
+ spoe_handle_agentdiscon_frame(&appctx, frame, size);
+ goto error;
+ }
+ if (spoe_handle_agenthello_frame(&appctx, frame, size) <= 0)
+ goto error;
+
+ return 0;
+
+ error:
+ if (SPOE_APPCTX(&appctx)->status_code >= SPOE_FRM_ERRS)
+ SPOE_APPCTX(&appctx)->status_code = SPOE_FRM_ERR_UNKNOWN;
+ strncpy(err, spoe_frm_err_reasons[SPOE_APPCTX(&appctx)->status_code], errlen);
+ return -1;
+}
+
+/* Send a SPOE frame to an agent. It returns -1 when an error occurred, 0 when
+ * the frame can be ignored, 1 to retry later, and the frame length on
+ * success. */
+static int
+spoe_send_frame(struct appctx *appctx, char *buf, size_t framesz)
+{
+ struct stconn *sc = appctx_sc(appctx);
+ int ret;
+ uint32_t netint;
+
+ /* 4 bytes are reserved at the beginning of <buf> to store the frame
+ * length. */
+ netint = htonl(framesz);
+ memcpy(buf, (char *)&netint, 4);
+ ret = applet_putblk(appctx, buf, framesz+4);
+ if (ret <= 0) {
+ if (ret == -3 && b_is_null(&sc_ic(sc)->buf)) {
+ /* WT: is this still needed for the case ret==-3 ? */
+ sc_need_room(sc, 0);
+ return 1; /* retry */
+ }
+ SPOE_APPCTX(appctx)->status_code = SPOE_FRM_ERR_IO;
+ return -1; /* error */
+ }
+ return framesz;
+}
+
+/* Receive a SPOE frame from an agent. It return -1 when an error occurred, 0
+ * when the frame can be ignored, 1 to retry later and the frame length on
+ * success. */
+static int
+spoe_recv_frame(struct appctx *appctx, char *buf, size_t framesz)
+{
+ struct stconn *sc = appctx_sc(appctx);
+ int ret;
+ uint32_t netint;
+
+ ret = co_getblk(sc_oc(sc), (char *)&netint, 4, 0);
+ if (ret > 0) {
+ framesz = ntohl(netint);
+ if (framesz > SPOE_APPCTX(appctx)->max_frame_size) {
+ SPOE_APPCTX(appctx)->status_code = SPOE_FRM_ERR_TOO_BIG;
+ return -1;
+ }
+ ret = co_getblk(sc_oc(sc), buf, framesz, 4);
+ }
+ if (ret <= 0) {
+ if (ret == 0) {
+ return 1; /* retry */
+ }
+ SPOE_APPCTX(appctx)->status_code = SPOE_FRM_ERR_IO;
+ return -1; /* error */
+ }
+ return framesz;
+}
+
+/********************************************************************
+ * Functions that manage the SPOE applet
+ ********************************************************************/
+static int
+spoe_wakeup_appctx(struct appctx *appctx)
+{
+ applet_will_consume(appctx);
+ applet_have_more_data(appctx);
+ appctx_wakeup(appctx);
+ return 1;
+}
+
+/* Callback function that catches applet timeouts. If a timeout occurred, we set
+ * <appctx->st1> flag and the SPOE applet is woken up. */
+static struct task *
+spoe_process_appctx(struct task * task, void *context, unsigned int state)
+{
+ struct appctx *appctx = context;
+
+ appctx->st1 = SPOE_APPCTX_ERR_NONE;
+ if (tick_is_expired(task->expire, now_ms)) {
+ task->expire = TICK_ETERNITY;
+ appctx->st1 = SPOE_APPCTX_ERR_TOUT;
+ }
+ spoe_wakeup_appctx(appctx);
+ return task;
+}
+
+static int
+spoe_init_appctx(struct appctx *appctx)
+{
+ struct spoe_appctx *spoe_appctx = SPOE_APPCTX(appctx);
+ struct spoe_agent *agent = spoe_appctx->agent;
+ struct task *task;
+ struct stream *s;
+
+ if ((task = task_new_here()) == NULL)
+ goto out_error;
+ task->process = spoe_process_appctx;
+ task->context = appctx;
+
+ if (appctx_finalize_startup(appctx, &agent->spoe_conf->agent_fe, &BUF_NULL) == -1)
+ goto out_free_task;
+
+ spoe_appctx->owner = appctx;
+ spoe_appctx->task = task;
+
+ LIST_INIT(&spoe_appctx->buffer_wait.list);
+ spoe_appctx->buffer_wait.target = appctx;
+ spoe_appctx->buffer_wait.wakeup_cb = (int (*)(void *))spoe_wakeup_appctx;
+
+ s = appctx_strm(appctx);
+ stream_set_backend(s, agent->b.be);
+
+ /* applet is waiting for data */
+ applet_need_more_data(appctx);
+
+ s->do_log = NULL;
+ s->scb->flags |= SC_FL_RCV_ONCE;
+
+ HA_SPIN_LOCK(SPOE_APPLET_LOCK, &agent->rt[tid].lock);
+ LIST_APPEND(&agent->rt[tid].applets, &spoe_appctx->list);
+ HA_SPIN_UNLOCK(SPOE_APPLET_LOCK, &agent->rt[tid].lock);
+ _HA_ATOMIC_INC(&agent->counters.applets);
+
+ appctx->st0 = SPOE_APPCTX_ST_CONNECT;
+ task_wakeup(spoe_appctx->task, TASK_WOKEN_INIT);
+ return 0;
+ out_free_task:
+ task_destroy(task);
+ out_error:
+ return -1;
+}
+
+/* Callback function that releases a SPOE applet. This happens when the
+ * connection with the agent is closed. */
+static void
+spoe_release_appctx(struct appctx *appctx)
+{
+ struct spoe_appctx *spoe_appctx = SPOE_APPCTX(appctx);
+ struct spoe_agent *agent;
+ struct spoe_context *ctx, *back;
+
+ if (spoe_appctx == NULL)
+ return;
+
+ appctx->svcctx = NULL;
+ agent = spoe_appctx->agent;
+
+ SPOE_PRINTF(stderr, "%d.%06d [SPOE/%-15s] %s: appctx=%p\n",
+ (int)date.tv_sec, (int)date.tv_usec, agent->id,
+ __FUNCTION__, appctx);
+
+ /* Remove applet from the list of running applets */
+ _HA_ATOMIC_DEC(&agent->counters.applets);
+ HA_SPIN_LOCK(SPOE_APPLET_LOCK, &agent->rt[tid].lock);
+ if (!LIST_ISEMPTY(&spoe_appctx->list)) {
+ LIST_DELETE(&spoe_appctx->list);
+ LIST_INIT(&spoe_appctx->list);
+ }
+ HA_SPIN_UNLOCK(SPOE_APPLET_LOCK, &agent->rt[tid].lock);
+
+ /* Shutdown the server connection, if needed */
+ if (appctx->st0 != SPOE_APPCTX_ST_END) {
+ if (appctx->st0 == SPOE_APPCTX_ST_IDLE) {
+ eb32_delete(&spoe_appctx->node);
+ _HA_ATOMIC_DEC(&agent->counters.idles);
+ agent->rt[tid].idles--;
+ }
+
+ appctx->st0 = SPOE_APPCTX_ST_END;
+ if (spoe_appctx->status_code == SPOE_FRM_ERR_NONE)
+ spoe_appctx->status_code = SPOE_FRM_ERR_IO;
+ }
+
+ /* Destroy the task attached to this applet */
+ task_destroy(spoe_appctx->task);
+
+ /* Report an error to all streams in the appctx waiting queue */
+ list_for_each_entry_safe(ctx, back, &spoe_appctx->waiting_queue, list) {
+ LIST_DELETE(&ctx->list);
+ LIST_INIT(&ctx->list);
+ _HA_ATOMIC_DEC(&agent->counters.nb_waiting);
+ spoe_update_stat_time(&ctx->stats.wait_ts, &ctx->stats.t_waiting);
+ ctx->spoe_appctx = NULL;
+ ctx->state = SPOE_CTX_ST_ERROR;
+ ctx->status_code = (spoe_appctx->status_code + 0x100);
+ task_wakeup(ctx->strm->task, TASK_WOKEN_MSG);
+ }
+
+ /* If the applet was processing a fragmented frame, report an error to
+ * the corresponding stream. */
+ if (spoe_appctx->frag_ctx.ctx) {
+ ctx = spoe_appctx->frag_ctx.ctx;
+ ctx->spoe_appctx = NULL;
+ ctx->state = SPOE_CTX_ST_ERROR;
+ ctx->status_code = (spoe_appctx->status_code + 0x100);
+ task_wakeup(ctx->strm->task, TASK_WOKEN_MSG);
+ }
+
+ if (!LIST_ISEMPTY(&agent->rt[tid].applets)) {
+ /* If there are still some running applets, remove reference on
+ * the current one from streams in the async waiting queue. In
+ * async mode, the ACK may be received from another appctx.
+ */
+ list_for_each_entry_safe(ctx, back, &agent->rt[tid].waiting_queue, list) {
+ if (ctx->spoe_appctx == spoe_appctx)
+ ctx->spoe_appctx = NULL;
+ }
+ goto end;
+ }
+ else {
+ /* It is the last running applet and the sending and async
+ * waiting queues are not empty. So try to start a new applet if
+ * HAproxy is not stopping. On success, we remove reference on
+ * the current appctx from streams in the async waiting queue.
+ * In async mode, the ACK may be received from another appctx.
+ */
+ if (!stopping &&
+ (!LIST_ISEMPTY(&agent->rt[tid].sending_queue) || !LIST_ISEMPTY(&agent->rt[tid].waiting_queue)) &&
+ spoe_create_appctx(agent->spoe_conf)) {
+ list_for_each_entry_safe(ctx, back, &agent->rt[tid].waiting_queue, list) {
+ if (ctx->spoe_appctx == spoe_appctx)
+ ctx->spoe_appctx = NULL;
+ }
+ goto end;
+ }
+
+ /* Otherwise, report an error to all streams in the sending and
+ * async waiting queues.
+ */
+ list_for_each_entry_safe(ctx, back, &agent->rt[tid].sending_queue, list) {
+ LIST_DELETE(&ctx->list);
+ LIST_INIT(&ctx->list);
+ _HA_ATOMIC_DEC(&agent->counters.nb_sending);
+ spoe_update_stat_time(&ctx->stats.queue_ts, &ctx->stats.t_queue);
+ ctx->spoe_appctx = NULL;
+ ctx->state = SPOE_CTX_ST_ERROR;
+ ctx->status_code = (spoe_appctx->status_code + 0x100);
+ task_wakeup(ctx->strm->task, TASK_WOKEN_MSG);
+ }
+ list_for_each_entry_safe(ctx, back, &agent->rt[tid].waiting_queue, list) {
+ LIST_DELETE(&ctx->list);
+ LIST_INIT(&ctx->list);
+ _HA_ATOMIC_DEC(&agent->counters.nb_waiting);
+ spoe_update_stat_time(&ctx->stats.wait_ts, &ctx->stats.t_waiting);
+ ctx->spoe_appctx = NULL;
+ ctx->state = SPOE_CTX_ST_ERROR;
+ ctx->status_code = (spoe_appctx->status_code + 0x100);
+ task_wakeup(ctx->strm->task, TASK_WOKEN_MSG);
+ }
+ }
+
+ end:
+ /* Release allocated memory */
+ spoe_release_buffer(&spoe_appctx->buffer,
+ &spoe_appctx->buffer_wait);
+ pool_free(pool_head_spoe_appctx, spoe_appctx);
+
+ /* Update runtinme agent info */
+ agent->rt[tid].frame_size = agent->max_frame_size;
+ list_for_each_entry(spoe_appctx, &agent->rt[tid].applets, list)
+ HA_ATOMIC_UPDATE_MIN(&agent->rt[tid].frame_size, spoe_appctx->max_frame_size);
+}
+
+static int
+spoe_handle_connect_appctx(struct appctx *appctx)
+{
+ struct stconn *sc = appctx_sc(appctx);
+ struct spoe_agent *agent = SPOE_APPCTX(appctx)->agent;
+ char *frame, *buf;
+ int ret;
+
+ /* if the connection is not established, inform the stream that we want
+ * to be notified whenever the connection completes.
+ */
+ if (sc_opposite(sc)->state < SC_ST_EST) {
+ applet_need_more_data(appctx);
+ se_need_remote_conn(appctx->sedesc);
+ applet_have_more_data(appctx);
+ goto stop;
+ }
+
+ if (appctx->st1 == SPOE_APPCTX_ERR_TOUT) {
+ SPOE_PRINTF(stderr, "%d.%06d [SPOE/%-15s] %s: appctx=%p"
+ " - Connection timed out\n",
+ (int)date.tv_sec, (int)date.tv_usec, agent->id,
+ __FUNCTION__, appctx);
+ SPOE_APPCTX(appctx)->status_code = SPOE_FRM_ERR_TOUT;
+ goto exit;
+ }
+
+ if (SPOE_APPCTX(appctx)->task->expire == TICK_ETERNITY)
+ SPOE_APPCTX(appctx)->task->expire =
+ tick_add_ifset(now_ms, agent->timeout.hello);
+
+ /* 4 bytes are reserved at the beginning of <buf> to store the frame
+ * length. */
+ buf = trash.area; frame = buf+4;
+ ret = spoe_prepare_hahello_frame(appctx, frame,
+ SPOE_APPCTX(appctx)->max_frame_size);
+ if (ret > 1)
+ ret = spoe_send_frame(appctx, buf, ret);
+
+ switch (ret) {
+ case -1: /* error */
+ case 0: /* ignore => an error, cannot be ignored */
+ goto exit;
+
+ case 1: /* retry later */
+ goto stop;
+
+ default:
+ /* HELLO frame successfully sent, now wait for the
+ * reply. */
+ appctx->st0 = SPOE_APPCTX_ST_CONNECTING;
+ goto next;
+ }
+
+ next:
+ return 0;
+ stop:
+ return 1;
+ exit:
+ appctx->st0 = SPOE_APPCTX_ST_EXIT;
+ return 0;
+}
+
+static int
+spoe_handle_connecting_appctx(struct appctx *appctx)
+{
+ struct stconn *sc = appctx_sc(appctx);
+ struct spoe_agent *agent = SPOE_APPCTX(appctx)->agent;
+ char *frame;
+ int ret;
+
+ if (appctx->st1 == SPOE_APPCTX_ERR_TOUT) {
+ SPOE_PRINTF(stderr, "%d.%06d [SPOE/%-15s] %s: appctx=%p"
+ " - Connection timed out\n",
+ (int)date.tv_sec, (int)date.tv_usec, agent->id,
+ __FUNCTION__, appctx);
+ SPOE_APPCTX(appctx)->status_code = SPOE_FRM_ERR_TOUT;
+ goto exit;
+ }
+
+ frame = trash.area; trash.data = 0;
+ ret = spoe_recv_frame(appctx, frame,
+ SPOE_APPCTX(appctx)->max_frame_size);
+ if (ret > 1) {
+ if (*frame == SPOE_FRM_T_AGENT_DISCON) {
+ appctx->st0 = SPOE_APPCTX_ST_DISCONNECTING;
+ goto next;
+ }
+ trash.data = ret + 4;
+ ret = spoe_handle_agenthello_frame(appctx, frame, ret);
+ }
+
+ switch (ret) {
+ case -1: /* error */
+ case 0: /* ignore => an error, cannot be ignored */
+ appctx->st0 = SPOE_APPCTX_ST_DISCONNECT;
+ goto next;
+
+ case 1: /* retry later */
+ goto stop;
+
+ default:
+ _HA_ATOMIC_INC(&agent->counters.idles);
+ agent->rt[tid].idles++;
+ appctx->st0 = SPOE_APPCTX_ST_IDLE;
+ SPOE_APPCTX(appctx)->node.key = 0;
+ eb32_insert(&agent->rt[tid].idle_applets, &SPOE_APPCTX(appctx)->node);
+
+ /* Update runtinme agent info */
+ HA_ATOMIC_UPDATE_MIN(&agent->rt[tid].frame_size, SPOE_APPCTX(appctx)->max_frame_size);
+ goto next;
+ }
+
+ next:
+ /* Do not forget to remove processed frame from the output buffer */
+ if (trash.data)
+ co_skip(sc_oc(sc), trash.data);
+
+ SPOE_APPCTX(appctx)->task->expire =
+ tick_add_ifset(now_ms, agent->timeout.idle);
+ return 0;
+ stop:
+ return 1;
+ exit:
+ appctx->st0 = SPOE_APPCTX_ST_EXIT;
+ return 0;
+}
+
+
+static int
+spoe_handle_sending_frame_appctx(struct appctx *appctx, int *skip)
+{
+ struct spoe_agent *agent = SPOE_APPCTX(appctx)->agent;
+ struct spoe_context *ctx = NULL;
+ char *frame, *buf;
+ int ret;
+
+ /* 4 bytes are reserved at the beginning of <buf> to store the frame
+ * length. */
+ buf = trash.area; frame = buf+4;
+
+ if (appctx->st0 == SPOE_APPCTX_ST_SENDING_FRAG_NOTIFY) {
+ ctx = SPOE_APPCTX(appctx)->frag_ctx.ctx;
+ ret = spoe_prepare_hafrag_frame(appctx, ctx, frame,
+ SPOE_APPCTX(appctx)->max_frame_size);
+ }
+ else if (LIST_ISEMPTY(&agent->rt[tid].sending_queue)) {
+ *skip = 1;
+ ret = 1;
+ goto end;
+ }
+ else {
+ ctx = LIST_NEXT(&agent->rt[tid].sending_queue, typeof(ctx), list);
+ ret = spoe_prepare_hanotify_frame(appctx, ctx, frame,
+ SPOE_APPCTX(appctx)->max_frame_size);
+
+ }
+
+ if (ret > 1)
+ ret = spoe_send_frame(appctx, buf, ret);
+
+ switch (ret) {
+ case -1: /* error */
+ appctx->st0 = SPOE_APPCTX_ST_DISCONNECT;
+ goto end;
+
+ case 0: /* ignore */
+ if (ctx == NULL)
+ goto abort_frag_frame;
+
+ spoe_release_buffer(&ctx->buffer, &ctx->buffer_wait);
+ LIST_DELETE(&ctx->list);
+ LIST_INIT(&ctx->list);
+ _HA_ATOMIC_DEC(&agent->counters.nb_sending);
+ spoe_update_stat_time(&ctx->stats.queue_ts, &ctx->stats.t_queue);
+ ctx->spoe_appctx = NULL;
+ ctx->state = SPOE_CTX_ST_ERROR;
+ ctx->status_code = (SPOE_APPCTX(appctx)->status_code + 0x100);
+ task_wakeup(ctx->strm->task, TASK_WOKEN_MSG);
+ *skip = 1;
+ break;
+
+ case 1: /* retry */
+ *skip = 1;
+ break;
+
+ default:
+ if (ctx == NULL)
+ goto abort_frag_frame;
+
+ spoe_release_buffer(&ctx->buffer, &ctx->buffer_wait);
+ LIST_DELETE(&ctx->list);
+ LIST_INIT(&ctx->list);
+ _HA_ATOMIC_DEC(&agent->counters.nb_sending);
+ spoe_update_stat_time(&ctx->stats.queue_ts, &ctx->stats.t_queue);
+ ctx->spoe_appctx = SPOE_APPCTX(appctx);
+ if (!(ctx->flags & SPOE_CTX_FL_FRAGMENTED) ||
+ (ctx->frag_ctx.flags & SPOE_FRM_FL_FIN))
+ goto no_frag_frame_sent;
+ else
+ goto frag_frame_sent;
+ }
+ goto end;
+
+ frag_frame_sent:
+ appctx->st0 = SPOE_APPCTX_ST_SENDING_FRAG_NOTIFY;
+ *skip = 1;
+ SPOE_APPCTX(appctx)->frag_ctx.ctx = ctx;
+ SPOE_APPCTX(appctx)->frag_ctx.cursid = ctx->stream_id;
+ SPOE_APPCTX(appctx)->frag_ctx.curfid = ctx->frame_id;
+ ctx->state = SPOE_CTX_ST_ENCODING_MSGS;
+ task_wakeup(ctx->strm->task, TASK_WOKEN_MSG);
+ goto end;
+
+ no_frag_frame_sent:
+ if (SPOE_APPCTX(appctx)->flags & SPOE_APPCTX_FL_ASYNC) {
+ appctx->st0 = SPOE_APPCTX_ST_PROCESSING;
+ LIST_APPEND(&agent->rt[tid].waiting_queue, &ctx->list);
+ }
+ else if (SPOE_APPCTX(appctx)->flags & SPOE_APPCTX_FL_PIPELINING) {
+ appctx->st0 = SPOE_APPCTX_ST_PROCESSING;
+ LIST_APPEND(&SPOE_APPCTX(appctx)->waiting_queue, &ctx->list);
+ }
+ else {
+ appctx->st0 = SPOE_APPCTX_ST_WAITING_SYNC_ACK;
+ *skip = 1;
+ LIST_APPEND(&SPOE_APPCTX(appctx)->waiting_queue, &ctx->list);
+ }
+ _HA_ATOMIC_INC(&agent->counters.nb_waiting);
+ ctx->stats.wait_ts = now_ns;
+ SPOE_APPCTX(appctx)->frag_ctx.ctx = NULL;
+ SPOE_APPCTX(appctx)->frag_ctx.cursid = 0;
+ SPOE_APPCTX(appctx)->frag_ctx.curfid = 0;
+ SPOE_APPCTX(appctx)->cur_fpa++;
+
+ ctx->state = SPOE_CTX_ST_WAITING_ACK;
+ goto end;
+
+ abort_frag_frame:
+ appctx->st0 = SPOE_APPCTX_ST_PROCESSING;
+ SPOE_APPCTX(appctx)->frag_ctx.ctx = NULL;
+ SPOE_APPCTX(appctx)->frag_ctx.cursid = 0;
+ SPOE_APPCTX(appctx)->frag_ctx.curfid = 0;
+ goto end;
+
+ end:
+ return ret;
+}
+
+static int
+spoe_handle_receiving_frame_appctx(struct appctx *appctx, int *skip)
+{
+ struct spoe_agent *agent = SPOE_APPCTX(appctx)->agent;
+ struct spoe_context *ctx = NULL;
+ char *frame;
+ int ret;
+
+ frame = trash.area; trash.data = 0;
+ ret = spoe_recv_frame(appctx, frame,
+ SPOE_APPCTX(appctx)->max_frame_size);
+ if (ret > 1) {
+ if (*frame == SPOE_FRM_T_AGENT_DISCON) {
+ appctx->st0 = SPOE_APPCTX_ST_DISCONNECTING;
+ ret = -1;
+ goto end;
+ }
+ trash.data = ret + 4;
+ ret = spoe_handle_agentack_frame(appctx, &ctx, frame, ret);
+ }
+ switch (ret) {
+ case -1: /* error */
+ appctx->st0 = SPOE_APPCTX_ST_DISCONNECT;
+ break;
+
+ case 0: /* ignore */
+ break;
+
+ case 1: /* retry */
+ *skip = 1;
+ break;
+
+ default:
+ LIST_DELETE(&ctx->list);
+ LIST_INIT(&ctx->list);
+ _HA_ATOMIC_DEC(&agent->counters.nb_waiting);
+ spoe_update_stat_time(&ctx->stats.wait_ts, &ctx->stats.t_waiting);
+ ctx->stats.response_ts = now_ns;
+ if (ctx->spoe_appctx) {
+ ctx->spoe_appctx->cur_fpa--;
+ ctx->spoe_appctx = NULL;
+ }
+ if (appctx->st0 == SPOE_APPCTX_ST_SENDING_FRAG_NOTIFY &&
+ ctx == SPOE_APPCTX(appctx)->frag_ctx.ctx) {
+ appctx->st0 = SPOE_APPCTX_ST_PROCESSING;
+ SPOE_APPCTX(appctx)->frag_ctx.ctx = NULL;
+ SPOE_APPCTX(appctx)->frag_ctx.cursid = 0;
+ SPOE_APPCTX(appctx)->frag_ctx.curfid = 0;
+ }
+ else if (appctx->st0 == SPOE_APPCTX_ST_WAITING_SYNC_ACK)
+ appctx->st0 = SPOE_APPCTX_ST_PROCESSING;
+ task_wakeup(ctx->strm->task, TASK_WOKEN_MSG);
+ break;
+ }
+
+ /* Do not forget to remove processed frame from the output buffer */
+ if (trash.data)
+ co_skip(sc_oc(appctx_sc(appctx)), trash.data);
+ end:
+ return ret;
+}
+
+static int
+spoe_handle_processing_appctx(struct appctx *appctx)
+{
+ struct stconn *sc = appctx_sc(appctx);
+ struct server *srv = objt_server(__sc_strm(sc)->target);
+ struct spoe_agent *agent = SPOE_APPCTX(appctx)->agent;
+ int ret, skip_sending = 0, skip_receiving = 0, active_s = 0, active_r = 0, close_asap = 0;
+
+ if (appctx->st1 == SPOE_APPCTX_ERR_TOUT) {
+ SPOE_APPCTX(appctx)->status_code = SPOE_FRM_ERR_TOUT;
+ appctx->st0 = SPOE_APPCTX_ST_DISCONNECT;
+ appctx->st1 = SPOE_APPCTX_ERR_NONE;
+ goto next;
+ }
+
+ SPOE_PRINTF(stderr, "%d.%06d [SPOE/%-15s] %s: appctx=%p"
+ " - process: fpa=%u/%u - appctx-state=%s - weight=%u - flags=0x%08x\n",
+ (int)date.tv_sec, (int)date.tv_usec, agent->id,
+ __FUNCTION__, appctx, SPOE_APPCTX(appctx)->cur_fpa,
+ agent->max_fpa, spoe_appctx_state_str[appctx->st0],
+ SPOE_APPCTX(appctx)->node.key, SPOE_APPCTX(appctx)->flags);
+
+
+ /* Close the applet ASAP because some sessions are waiting for a free
+ * connection slot. It is only an issue in multithreaded mode.
+ */
+ close_asap = (global.nbthread > 1 &&
+ (agent->b.be->queue.length ||
+ (srv && (srv->queue.length || (srv->maxconn && srv->served >= srv_dynamic_maxconn(srv))))));
+
+ /* receiving_frame loop */
+ while (!skip_receiving) {
+ ret = spoe_handle_receiving_frame_appctx(appctx, &skip_receiving);
+ switch (ret) {
+ case -1: /* error */
+ goto next;
+
+ case 0: /* ignore */
+ active_r = 1;
+ break;
+
+ case 1: /* retry */
+ break;
+
+ default:
+ active_r = 1;
+ break;
+ }
+ }
+
+ /* Don"t try to send new frame we are waiting for at lease a ack, in
+ * sync mode or if applet must be closed ASAP
+ */
+ if (appctx->st0 == SPOE_APPCTX_ST_WAITING_SYNC_ACK || (close_asap && SPOE_APPCTX(appctx)->cur_fpa))
+ skip_sending = 1;
+
+ /* send_frame loop */
+ while (!skip_sending && SPOE_APPCTX(appctx)->cur_fpa < agent->max_fpa) {
+ ret = spoe_handle_sending_frame_appctx(appctx, &skip_sending);
+ switch (ret) {
+ case -1: /* error */
+ goto next;
+
+ case 0: /* ignore */
+ if (SPOE_APPCTX(appctx)->node.key)
+ SPOE_APPCTX(appctx)->node.key--;
+ active_s++;
+ break;
+
+ case 1: /* retry */
+ break;
+
+ default:
+ if (SPOE_APPCTX(appctx)->node.key)
+ SPOE_APPCTX(appctx)->node.key--;
+ active_s++;
+ break;
+ }
+
+ /* if applet must be close ASAP, don't send more than a frame */
+ if (close_asap)
+ break;
+ }
+
+ if (active_s || active_r) {
+ update_freq_ctr(&agent->rt[tid].processing_per_sec, active_s);
+ SPOE_APPCTX(appctx)->task->expire = tick_add_ifset(now_ms, agent->timeout.idle);
+ }
+
+ if (appctx->st0 == SPOE_APPCTX_ST_PROCESSING && SPOE_APPCTX(appctx)->cur_fpa < agent->max_fpa) {
+ /* If applet must be closed, don't switch it in IDLE state and
+ * close it when the last waiting frame is acknowledged.
+ */
+ if (close_asap) {
+ if (SPOE_APPCTX(appctx)->cur_fpa)
+ goto out;
+ SPOE_APPCTX(appctx)->status_code = SPOE_FRM_ERR_NONE;
+ appctx->st0 = SPOE_APPCTX_ST_DISCONNECT;
+ appctx->st1 = SPOE_APPCTX_ERR_NONE;
+ goto next;
+ }
+ _HA_ATOMIC_INC(&agent->counters.idles);
+ agent->rt[tid].idles++;
+ appctx->st0 = SPOE_APPCTX_ST_IDLE;
+ eb32_insert(&agent->rt[tid].idle_applets, &SPOE_APPCTX(appctx)->node);
+ }
+
+ out:
+ return 1;
+
+ next:
+ SPOE_APPCTX(appctx)->task->expire = tick_add_ifset(now_ms, agent->timeout.idle);
+ return 0;
+}
+
+static int
+spoe_handle_disconnect_appctx(struct appctx *appctx)
+{
+ struct spoe_agent *agent = SPOE_APPCTX(appctx)->agent;
+ char *frame, *buf;
+ int ret;
+
+ if (appctx->st1 == SPOE_APPCTX_ERR_TOUT)
+ goto exit;
+
+ /* 4 bytes are reserved at the beginning of <buf> to store the frame
+ * length. */
+ buf = trash.area; frame = buf+4;
+ ret = spoe_prepare_hadiscon_frame(appctx, frame,
+ SPOE_APPCTX(appctx)->max_frame_size);
+ if (ret > 1)
+ ret = spoe_send_frame(appctx, buf, ret);
+
+ switch (ret) {
+ case -1: /* error */
+ case 0: /* ignore => an error, cannot be ignored */
+ goto exit;
+
+ case 1: /* retry */
+ goto stop;
+
+ default:
+ SPOE_PRINTF(stderr, "%d.%06d [SPOE/%-15s] %s: appctx=%p"
+ " - disconnected by HAProxy (%d): %s\n",
+ (int)date.tv_sec, (int)date.tv_usec, agent->id,
+ __FUNCTION__, appctx,
+ SPOE_APPCTX(appctx)->status_code,
+ spoe_frm_err_reasons[SPOE_APPCTX(appctx)->status_code]);
+
+ appctx->st0 = SPOE_APPCTX_ST_DISCONNECTING;
+ goto next;
+ }
+
+ next:
+ SPOE_APPCTX(appctx)->task->expire =
+ tick_add_ifset(now_ms, agent->timeout.idle);
+ return 0;
+ stop:
+ return 1;
+ exit:
+ appctx->st0 = SPOE_APPCTX_ST_EXIT;
+ return 0;
+}
+
+static int
+spoe_handle_disconnecting_appctx(struct appctx *appctx)
+{
+ struct stconn *sc = appctx_sc(appctx);
+ char *frame;
+ int ret;
+
+ if (appctx->st1 == SPOE_APPCTX_ERR_TOUT) {
+ SPOE_APPCTX(appctx)->status_code = SPOE_FRM_ERR_TOUT;
+ goto exit;
+ }
+
+ frame = trash.area; trash.data = 0;
+ ret = spoe_recv_frame(appctx, frame,
+ SPOE_APPCTX(appctx)->max_frame_size);
+ if (ret > 1) {
+ trash.data = ret + 4;
+ ret = spoe_handle_agentdiscon_frame(appctx, frame, ret);
+ }
+
+ switch (ret) {
+ case -1: /* error */
+ SPOE_PRINTF(stderr, "%d.%06d [SPOE/%-15s] %s: appctx=%p"
+ " - error on frame (%s)\n",
+ (int)date.tv_sec, (int)date.tv_usec,
+ ((struct spoe_agent *)SPOE_APPCTX(appctx)->agent)->id,
+ __FUNCTION__, appctx,
+ spoe_frm_err_reasons[SPOE_APPCTX(appctx)->status_code]);
+ goto exit;
+
+ case 0: /* ignore */
+ goto next;
+
+ case 1: /* retry */
+ goto stop;
+
+ default:
+ SPOE_PRINTF(stderr, "%d.%06d [SPOE/%-15s] %s: appctx=%p"
+ " - disconnected by peer (%d): %.*s\n",
+ (int)date.tv_sec, (int)date.tv_usec,
+ ((struct spoe_agent *)SPOE_APPCTX(appctx)->agent)->id,
+ __FUNCTION__, appctx, SPOE_APPCTX(appctx)->status_code,
+ SPOE_APPCTX(appctx)->rlen, SPOE_APPCTX(appctx)->reason);
+ goto exit;
+ }
+
+ next:
+ /* Do not forget to remove processed frame from the output buffer */
+ if (trash.data)
+ co_skip(sc_oc(sc), trash.data);
+
+ return 0;
+ stop:
+ return 1;
+ exit:
+ appctx->st0 = SPOE_APPCTX_ST_EXIT;
+ return 0;
+}
+
+/* I/O Handler processing messages exchanged with the agent */
+static void
+spoe_handle_appctx(struct appctx *appctx)
+{
+ struct stconn *sc = appctx_sc(appctx);
+ struct spoe_agent *agent;
+
+ if (SPOE_APPCTX(appctx) == NULL)
+ return;
+
+ if (unlikely(se_fl_test(appctx->sedesc, (SE_FL_EOS|SE_FL_ERROR|SE_FL_SHR|SE_FL_SHW)))) {
+ co_skip(sc_oc(sc), co_data(sc_oc(sc)));
+ goto out;
+ }
+
+ SPOE_APPCTX(appctx)->status_code = SPOE_FRM_ERR_NONE;
+ agent = SPOE_APPCTX(appctx)->agent;
+
+ switchstate:
+ SPOE_PRINTF(stderr, "%d.%06d [SPOE/%-15s] %s: appctx=%p"
+ " - appctx-state=%s\n",
+ (int)date.tv_sec, (int)date.tv_usec, agent->id,
+ __FUNCTION__, appctx, spoe_appctx_state_str[appctx->st0]);
+
+ switch (appctx->st0) {
+ case SPOE_APPCTX_ST_CONNECT:
+ if (spoe_handle_connect_appctx(appctx))
+ goto out;
+ goto switchstate;
+
+ case SPOE_APPCTX_ST_CONNECTING:
+ if (spoe_handle_connecting_appctx(appctx))
+ goto out;
+ goto switchstate;
+
+ case SPOE_APPCTX_ST_IDLE:
+ _HA_ATOMIC_DEC(&agent->counters.idles);
+ agent->rt[tid].idles--;
+ eb32_delete(&SPOE_APPCTX(appctx)->node);
+ if (stopping &&
+ LIST_ISEMPTY(&agent->rt[tid].sending_queue) &&
+ LIST_ISEMPTY(&SPOE_APPCTX(appctx)->waiting_queue)) {
+ SPOE_APPCTX(appctx)->task->expire =
+ tick_add_ifset(now_ms, agent->timeout.idle);
+ appctx->st0 = SPOE_APPCTX_ST_DISCONNECT;
+ goto switchstate;
+ }
+ appctx->st0 = SPOE_APPCTX_ST_PROCESSING;
+ __fallthrough;
+
+ case SPOE_APPCTX_ST_PROCESSING:
+ case SPOE_APPCTX_ST_SENDING_FRAG_NOTIFY:
+ case SPOE_APPCTX_ST_WAITING_SYNC_ACK:
+ if (spoe_handle_processing_appctx(appctx))
+ goto out;
+ goto switchstate;
+
+ case SPOE_APPCTX_ST_DISCONNECT:
+ if (spoe_handle_disconnect_appctx(appctx))
+ goto out;
+ goto switchstate;
+
+ case SPOE_APPCTX_ST_DISCONNECTING:
+ if (spoe_handle_disconnecting_appctx(appctx))
+ goto out;
+ goto switchstate;
+
+ case SPOE_APPCTX_ST_EXIT:
+ appctx->st0 = SPOE_APPCTX_ST_END;
+ SPOE_APPCTX(appctx)->task->expire = TICK_ETERNITY;
+ se_fl_set(appctx->sedesc, SE_FL_EOS);
+ if (SPOE_APPCTX(appctx)->status_code != SPOE_FRM_ERR_NONE)
+ se_fl_set(appctx->sedesc, SE_FL_ERROR);
+ else
+ se_fl_set(appctx->sedesc, SE_FL_EOI);
+ __fallthrough;
+
+ case SPOE_APPCTX_ST_END:
+ return;
+ }
+ out:
+ if (SPOE_APPCTX(appctx)->task->expire != TICK_ETERNITY)
+ task_queue(SPOE_APPCTX(appctx)->task);
+}
+
+struct applet spoe_applet = {
+ .obj_type = OBJ_TYPE_APPLET,
+ .name = "<SPOE>", /* used for logging */
+ .fct = spoe_handle_appctx,
+ .init = spoe_init_appctx,
+ .release = spoe_release_appctx,
+};
+
+/* Create a SPOE applet. On success, the created applet is returned, else
+ * NULL. */
+static struct appctx *
+spoe_create_appctx(struct spoe_config *conf)
+{
+ struct spoe_agent *agent = conf->agent;
+ struct spoe_appctx *spoe_appctx;
+ struct appctx *appctx;
+
+ /* Do not try to create a new applet if there is no server up for the
+ * agent's backend. */
+ if (!agent->b.be->srv_act && !agent->b.be->srv_bck) {
+ SPOE_PRINTF(stderr, "%d.%06d [SPOE/%-15s] %s: don't create SPOE appctx: no server up\n",
+ (int)date.tv_sec, (int)date.tv_usec, agent->id, __FUNCTION__);
+ goto out;
+ }
+
+ /* Do not try to create a new applet if we have reached the maximum of
+ * connection per seconds */
+ if (agent->cps_max > 0) {
+ if (!freq_ctr_remain(&agent->rt[tid].conn_per_sec, agent->cps_max, 0)) {
+ SPOE_PRINTF(stderr, "%d.%06d [SPOE/%-15s] %s: don't create SPOE appctx: max CPS reached\n",
+ (int)date.tv_sec, (int)date.tv_usec, agent->id, __FUNCTION__);
+ goto out;
+ }
+ }
+
+ spoe_appctx = pool_zalloc(pool_head_spoe_appctx);
+ if (spoe_appctx == NULL)
+ goto out_error;
+
+ spoe_appctx->agent = agent;
+ spoe_appctx->version = 0;
+ spoe_appctx->max_frame_size = agent->max_frame_size;
+ spoe_appctx->flags = 0;
+ spoe_appctx->status_code = SPOE_FRM_ERR_NONE;
+ spoe_appctx->buffer = BUF_NULL;
+ spoe_appctx->cur_fpa = 0;
+ LIST_INIT(&spoe_appctx->list);
+ LIST_INIT(&spoe_appctx->waiting_queue);
+
+
+ if ((appctx = appctx_new_here(&spoe_applet, NULL)) == NULL)
+ goto out_free_spoe_appctx;
+
+ appctx->svcctx = spoe_appctx;
+ if (appctx_init(appctx) == -1)
+ goto out_free_appctx;
+
+ /* Increase the per-process number of cumulated connections */
+ if (agent->cps_max > 0)
+ update_freq_ctr(&agent->rt[tid].conn_per_sec, 1);
+
+ appctx_wakeup(appctx);
+ return appctx;
+
+ /* Error unrolling */
+ out_free_appctx:
+ appctx_free_on_early_error(appctx);
+ out_free_spoe_appctx:
+ pool_free(pool_head_spoe_appctx, spoe_appctx);
+ out_error:
+ SPOE_PRINTF(stderr, "%d.%06d [SPOE/%-15s] %s: failed to create SPOE appctx\n",
+ (int)date.tv_sec, (int)date.tv_usec, agent->id, __FUNCTION__);
+ send_log(&conf->agent_fe, LOG_EMERG, "SPOE: [%s] failed to create SPOE applet\n", agent->id);
+ out:
+
+ return NULL;
+}
+
+static int
+spoe_queue_context(struct spoe_context *ctx)
+{
+ struct spoe_config *conf = FLT_CONF(ctx->filter);
+ struct spoe_agent *agent = conf->agent;
+ struct spoe_appctx *spoe_appctx;
+
+ /* Check if we need to create a new SPOE applet or not. */
+ if (agent->rt[tid].processing < agent->rt[tid].idles ||
+ agent->rt[tid].processing < read_freq_ctr(&agent->rt[tid].processing_per_sec))
+ goto end;
+
+ SPOE_PRINTF(stderr, "%d.%06d [SPOE/%-15s] %s: stream=%p"
+ " - try to create new SPOE appctx\n",
+ (int)date.tv_sec, (int)date.tv_usec, agent->id, __FUNCTION__,
+ ctx->strm);
+
+ spoe_create_appctx(conf);
+
+ end:
+ /* The only reason to return an error is when there is no applet */
+ if (LIST_ISEMPTY(&agent->rt[tid].applets)) {
+ ctx->status_code = SPOE_CTX_ERR_RES;
+ return -1;
+ }
+
+ /* Add the SPOE context in the sending queue if the stream has no applet
+ * already assigned and wakeup all idle applets. Otherwise, don't queue
+ * it. */
+ _HA_ATOMIC_INC(&agent->counters.nb_sending);
+ spoe_update_stat_time(&ctx->stats.request_ts, &ctx->stats.t_request);
+ ctx->stats.queue_ts = now_ns;
+ if (ctx->spoe_appctx)
+ return 1;
+ LIST_APPEND(&agent->rt[tid].sending_queue, &ctx->list);
+
+ SPOE_PRINTF(stderr, "%d.%06d [SPOE/%-15s] %s: stream=%p"
+ " - Add stream in sending queue"
+ " - applets=%u - idles=%u - processing=%u\n",
+ (int)date.tv_sec, (int)date.tv_usec, agent->id, __FUNCTION__,
+ ctx->strm, agent->counters.applets, agent->counters.idles,
+ agent->rt[tid].processing);
+
+ /* Finally try to wakeup an IDLE applet. */
+ if (!eb_is_empty(&agent->rt[tid].idle_applets)) {
+ struct eb32_node *node;
+
+ node = eb32_first(&agent->rt[tid].idle_applets);
+ spoe_appctx = eb32_entry(node, struct spoe_appctx, node);
+ if (node && spoe_appctx) {
+ eb32_delete(&spoe_appctx->node);
+ spoe_appctx->node.key++;
+ eb32_insert(&agent->rt[tid].idle_applets, &spoe_appctx->node);
+ spoe_wakeup_appctx(spoe_appctx->owner);
+ }
+ }
+ return 1;
+}
+
+/***************************************************************************
+ * Functions that encode SPOE messages
+ **************************************************************************/
+/* Encode a SPOE message. Info in <ctx->frag_ctx>, if any, are used to handle
+ * fragmented_content. If the next message can be processed, it returns 0. If
+ * the message is too big, it returns -1.*/
+static int
+spoe_encode_message(struct stream *s, struct spoe_context *ctx,
+ struct spoe_message *msg, int dir,
+ char **buf, char *end)
+{
+ struct sample *smp;
+ struct spoe_arg *arg;
+ int ret;
+
+ if (msg->cond) {
+ ret = acl_exec_cond(msg->cond, s->be, s->sess, s, dir|SMP_OPT_FINAL);
+ ret = acl_pass(ret);
+ if (msg->cond->pol == ACL_COND_UNLESS)
+ ret = !ret;
+
+ /* the rule does not match */
+ if (!ret)
+ goto next;
+ }
+
+ /* Resume encoding of a SPOE argument */
+ if (ctx->frag_ctx.curarg != NULL) {
+ arg = ctx->frag_ctx.curarg;
+ goto encode_argument;
+ }
+
+ if (ctx->frag_ctx.curoff != UINT_MAX)
+ goto encode_msg_payload;
+
+ /* Check if there is enough space for the message name and the
+ * number of arguments. It implies <msg->id_len> is encoded on 2
+ * bytes, at most (< 2288). */
+ if (*buf + 2 + msg->id_len + 1 > end)
+ goto too_big;
+
+ /* Encode the message name */
+ if (spoe_encode_buffer(msg->id, msg->id_len, buf, end) == -1)
+ goto too_big;
+
+ /* Set the number of arguments for this message */
+ **buf = msg->nargs;
+ (*buf)++;
+
+ ctx->frag_ctx.curoff = 0;
+ encode_msg_payload:
+
+ /* Loop on arguments */
+ list_for_each_entry(arg, &msg->args, list) {
+ ctx->frag_ctx.curarg = arg;
+ ctx->frag_ctx.curoff = UINT_MAX;
+ ctx->frag_ctx.curlen = 0;
+
+ encode_argument:
+ if (ctx->frag_ctx.curoff != UINT_MAX)
+ goto encode_arg_value;
+
+ /* Encode the argument name as a string. It can by NULL */
+ if (spoe_encode_buffer(arg->name, arg->name_len, buf, end) == -1)
+ goto too_big;
+
+ ctx->frag_ctx.curoff = 0;
+ encode_arg_value:
+
+ /* Fetch the argument value */
+ smp = sample_process(s->be, s->sess, s, dir|SMP_OPT_FINAL, arg->expr, NULL);
+ if (smp) {
+ smp->ctx.a[0] = &ctx->frag_ctx.curlen;
+ smp->ctx.a[1] = &ctx->frag_ctx.curoff;
+ }
+ ret = spoe_encode_data(smp, buf, end);
+ if (ret == -1 || ctx->frag_ctx.curoff)
+ goto too_big;
+ }
+
+ next:
+ return 0;
+
+ too_big:
+ return -1;
+}
+
+/* Encode list of SPOE messages. Info in <ctx->frag_ctx>, if any, are used to
+ * handle fragmented content. On success it returns 1. If an error occurred, -1
+ * is returned. If nothing has been encoded, it returns 0 (this is only possible
+ * for unfragmented payload). */
+static int
+spoe_encode_messages(struct stream *s, struct spoe_context *ctx,
+ struct list *messages, int dir, int type)
+{
+ struct spoe_config *conf = FLT_CONF(ctx->filter);
+ struct spoe_agent *agent = conf->agent;
+ struct spoe_message *msg;
+ char *p, *end;
+
+ p = b_head(&ctx->buffer);
+ end = p + agent->rt[tid].frame_size - FRAME_HDR_SIZE;
+
+ if (type == SPOE_MSGS_BY_EVENT) { /* Loop on messages by event */
+ /* Resume encoding of a SPOE message */
+ if (ctx->frag_ctx.curmsg != NULL) {
+ msg = ctx->frag_ctx.curmsg;
+ goto encode_evt_message;
+ }
+
+ list_for_each_entry(msg, messages, by_evt) {
+ ctx->frag_ctx.curmsg = msg;
+ ctx->frag_ctx.curarg = NULL;
+ ctx->frag_ctx.curoff = UINT_MAX;
+
+ encode_evt_message:
+ if (spoe_encode_message(s, ctx, msg, dir, &p, end) == -1)
+ goto too_big;
+ }
+ }
+ else if (type == SPOE_MSGS_BY_GROUP) { /* Loop on messages by group */
+ /* Resume encoding of a SPOE message */
+ if (ctx->frag_ctx.curmsg != NULL) {
+ msg = ctx->frag_ctx.curmsg;
+ goto encode_grp_message;
+ }
+
+ list_for_each_entry(msg, messages, by_grp) {
+ ctx->frag_ctx.curmsg = msg;
+ ctx->frag_ctx.curarg = NULL;
+ ctx->frag_ctx.curoff = UINT_MAX;
+
+ encode_grp_message:
+ if (spoe_encode_message(s, ctx, msg, dir, &p, end) == -1)
+ goto too_big;
+ }
+ }
+ else
+ goto skip;
+
+
+ /* nothing has been encoded for an unfragmented payload */
+ if (!(ctx->flags & SPOE_CTX_FL_FRAGMENTED) && p == b_head(&ctx->buffer))
+ goto skip;
+
+ SPOE_PRINTF(stderr, "%d.%06d [SPOE/%-15s] %s: stream=%p"
+ " - encode %s messages - spoe_appctx=%p"
+ "- max_size=%u - encoded=%ld\n",
+ (int)date.tv_sec, (int)date.tv_usec,
+ agent->id, __FUNCTION__, s,
+ ((ctx->flags & SPOE_CTX_FL_FRAGMENTED) ? "last fragment of" : "unfragmented"),
+ ctx->spoe_appctx, (agent->rt[tid].frame_size - FRAME_HDR_SIZE),
+ p - b_head(&ctx->buffer));
+
+ b_set_data(&ctx->buffer, p - b_head(&ctx->buffer));
+ ctx->frag_ctx.curmsg = NULL;
+ ctx->frag_ctx.curarg = NULL;
+ ctx->frag_ctx.curoff = 0;
+ ctx->frag_ctx.flags = SPOE_FRM_FL_FIN;
+
+ return 1;
+
+ too_big:
+ /* Return an error if fragmentation is unsupported or if nothing has
+ * been encoded because its too big and not splittable. */
+ if (!(agent->flags & SPOE_FL_SND_FRAGMENTATION) || p == b_head(&ctx->buffer)) {
+ ctx->status_code = SPOE_CTX_ERR_TOO_BIG;
+ return -1;
+ }
+
+ SPOE_PRINTF(stderr, "%d.%06d [SPOE/%-15s] %s: stream=%p"
+ " - encode fragmented messages - spoe_appctx=%p"
+ " - curmsg=%p - curarg=%p - curoff=%u"
+ " - max_size=%u - encoded=%ld\n",
+ (int)date.tv_sec, (int)date.tv_usec,
+ agent->id, __FUNCTION__, s, ctx->spoe_appctx,
+ ctx->frag_ctx.curmsg, ctx->frag_ctx.curarg, ctx->frag_ctx.curoff,
+ (agent->rt[tid].frame_size - FRAME_HDR_SIZE), p - b_head(&ctx->buffer));
+
+ b_set_data(&ctx->buffer, p - b_head(&ctx->buffer));
+ ctx->flags |= SPOE_CTX_FL_FRAGMENTED;
+ ctx->frag_ctx.flags &= ~SPOE_FRM_FL_FIN;
+ return 1;
+
+ skip:
+ SPOE_PRINTF(stderr, "%d.%06d [SPOE/%-15s] %s: stream=%p"
+ " - skip the frame because nothing has been encoded\n",
+ (int)date.tv_sec, (int)date.tv_usec,
+ agent->id, __FUNCTION__, s);
+ return 0;
+}
+
+
+/***************************************************************************
+ * Functions that handle SPOE actions
+ **************************************************************************/
+/* Helper function to set a variable */
+static void
+spoe_set_var(struct spoe_context *ctx, char *scope, char *name, int len,
+ struct sample *smp)
+{
+ struct spoe_config *conf = FLT_CONF(ctx->filter);
+ struct spoe_agent *agent = conf->agent;
+ char varname[64];
+
+ memset(varname, 0, sizeof(varname));
+ len = snprintf(varname, sizeof(varname), "%s.%s.%.*s",
+ scope, agent->var_pfx, len, name);
+ if (agent->flags & SPOE_FL_FORCE_SET_VAR)
+ vars_set_by_name(varname, len, smp);
+ else
+ vars_set_by_name_ifexist(varname, len, smp);
+}
+
+/* Helper function to unset a variable */
+static void
+spoe_unset_var(struct spoe_context *ctx, char *scope, char *name, int len,
+ struct sample *smp)
+{
+ struct spoe_config *conf = FLT_CONF(ctx->filter);
+ struct spoe_agent *agent = conf->agent;
+ char varname[64];
+
+ memset(varname, 0, sizeof(varname));
+ len = snprintf(varname, sizeof(varname), "%s.%s.%.*s",
+ scope, agent->var_pfx, len, name);
+ vars_unset_by_name_ifexist(varname, len, smp);
+}
+
+
+static inline int
+spoe_decode_action_set_var(struct stream *s, struct spoe_context *ctx,
+ char **buf, char *end, int dir)
+{
+ char *str, *scope, *p = *buf;
+ struct sample smp;
+ uint64_t sz;
+ int ret;
+
+ if (p + 2 >= end)
+ goto skip;
+
+ /* SET-VAR requires 3 arguments */
+ if (*p++ != 3)
+ goto skip;
+
+ switch (*p++) {
+ case SPOE_SCOPE_PROC: scope = "proc"; break;
+ case SPOE_SCOPE_SESS: scope = "sess"; break;
+ case SPOE_SCOPE_TXN : scope = "txn"; break;
+ case SPOE_SCOPE_REQ : scope = "req"; break;
+ case SPOE_SCOPE_RES : scope = "res"; break;
+ default: goto skip;
+ }
+
+ if (spoe_decode_buffer(&p, end, &str, &sz) == -1)
+ goto skip;
+ memset(&smp, 0, sizeof(smp));
+ smp_set_owner(&smp, s->be, s->sess, s, dir|SMP_OPT_FINAL);
+
+ if (spoe_decode_data(&p, end, &smp) == -1)
+ goto skip;
+
+ SPOE_PRINTF(stderr, "%d.%06d [SPOE/%-15s] %s: stream=%p"
+ " - set-var '%s.%s.%.*s'\n",
+ (int)date.tv_sec, (int)date.tv_usec,
+ ((struct spoe_config *)FLT_CONF(ctx->filter))->agent->id,
+ __FUNCTION__, s, scope,
+ ((struct spoe_config *)FLT_CONF(ctx->filter))->agent->var_pfx,
+ (int)sz, str);
+
+ if (smp.data.type == SMP_T_ANY)
+ spoe_unset_var(ctx, scope, str, sz, &smp);
+ else
+ spoe_set_var(ctx, scope, str, sz, &smp);
+
+ ret = (p - *buf);
+ *buf = p;
+ return ret;
+ skip:
+ return 0;
+}
+
+static inline int
+spoe_decode_action_unset_var(struct stream *s, struct spoe_context *ctx,
+ char **buf, char *end, int dir)
+{
+ char *str, *scope, *p = *buf;
+ struct sample smp;
+ uint64_t sz;
+ int ret;
+
+ if (p + 2 >= end)
+ goto skip;
+
+ /* UNSET-VAR requires 2 arguments */
+ if (*p++ != 2)
+ goto skip;
+
+ switch (*p++) {
+ case SPOE_SCOPE_PROC: scope = "proc"; break;
+ case SPOE_SCOPE_SESS: scope = "sess"; break;
+ case SPOE_SCOPE_TXN : scope = "txn"; break;
+ case SPOE_SCOPE_REQ : scope = "req"; break;
+ case SPOE_SCOPE_RES : scope = "res"; break;
+ default: goto skip;
+ }
+
+ if (spoe_decode_buffer(&p, end, &str, &sz) == -1)
+ goto skip;
+ memset(&smp, 0, sizeof(smp));
+ smp_set_owner(&smp, s->be, s->sess, s, dir|SMP_OPT_FINAL);
+
+ SPOE_PRINTF(stderr, "%d.%06d [SPOE/%-15s] %s: stream=%p"
+ " - unset-var '%s.%s.%.*s'\n",
+ (int)date.tv_sec, (int)date.tv_usec,
+ ((struct spoe_config *)FLT_CONF(ctx->filter))->agent->id,
+ __FUNCTION__, s, scope,
+ ((struct spoe_config *)FLT_CONF(ctx->filter))->agent->var_pfx,
+ (int)sz, str);
+
+ spoe_unset_var(ctx, scope, str, sz, &smp);
+
+ ret = (p - *buf);
+ *buf = p;
+ return ret;
+ skip:
+ return 0;
+}
+
+/* Process SPOE actions for a specific event. It returns 1 on success. If an
+ * error occurred, 0 is returned. */
+static int
+spoe_process_actions(struct stream *s, struct spoe_context *ctx, int dir)
+{
+ char *p, *end;
+ int ret;
+
+ p = b_head(&ctx->buffer);
+ end = p + b_data(&ctx->buffer);
+
+ while (p < end) {
+ enum spoe_action_type type;
+
+ type = *p++;
+ switch (type) {
+ case SPOE_ACT_T_SET_VAR:
+ ret = spoe_decode_action_set_var(s, ctx, &p, end, dir);
+ if (!ret)
+ goto skip;
+ break;
+
+ case SPOE_ACT_T_UNSET_VAR:
+ ret = spoe_decode_action_unset_var(s, ctx, &p, end, dir);
+ if (!ret)
+ goto skip;
+ break;
+
+ default:
+ goto skip;
+ }
+ }
+
+ return 1;
+ skip:
+ return 0;
+}
+
+/***************************************************************************
+ * Functions that process SPOE events
+ **************************************************************************/
+static void
+spoe_update_stats(struct stream *s, struct spoe_agent *agent,
+ struct spoe_context *ctx, int dir)
+{
+ if (ctx->stats.start_ts != 0) {
+ spoe_update_stat_time(&ctx->stats.start_ts, &ctx->stats.t_process);
+ ctx->stats.t_total += ctx->stats.t_process;
+ ctx->stats.request_ts = 0;
+ ctx->stats.queue_ts = 0;
+ ctx->stats.wait_ts = 0;
+ ctx->stats.response_ts = 0;
+ }
+
+ if (agent->var_t_process) {
+ struct sample smp;
+
+ memset(&smp, 0, sizeof(smp));
+ smp_set_owner(&smp, s->be, s->sess, s, dir|SMP_OPT_FINAL);
+ smp.data.u.sint = ctx->stats.t_process;
+ smp.data.type = SMP_T_SINT;
+
+ spoe_set_var(ctx, "txn", agent->var_t_process,
+ strlen(agent->var_t_process), &smp);
+ }
+
+ if (agent->var_t_total) {
+ struct sample smp;
+
+ memset(&smp, 0, sizeof(smp));
+ smp_set_owner(&smp, s->be, s->sess, s, dir|SMP_OPT_FINAL);
+ smp.data.u.sint = ctx->stats.t_total;
+ smp.data.type = SMP_T_SINT;
+
+ spoe_set_var(ctx, "txn", agent->var_t_total,
+ strlen(agent->var_t_total), &smp);
+ }
+}
+
+static void
+spoe_handle_processing_error(struct stream *s, struct spoe_agent *agent,
+ struct spoe_context *ctx, int dir)
+{
+ if (agent->eps_max > 0)
+ update_freq_ctr(&agent->rt[tid].err_per_sec, 1);
+
+ if (agent->var_on_error) {
+ struct sample smp;
+
+ memset(&smp, 0, sizeof(smp));
+ smp_set_owner(&smp, s->be, s->sess, s, dir|SMP_OPT_FINAL);
+ smp.data.u.sint = ctx->status_code;
+ smp.data.type = SMP_T_BOOL;
+
+ spoe_set_var(ctx, "txn", agent->var_on_error,
+ strlen(agent->var_on_error), &smp);
+ }
+
+ ctx->state = ((agent->flags & SPOE_FL_CONT_ON_ERR)
+ ? SPOE_CTX_ST_READY
+ : SPOE_CTX_ST_NONE);
+}
+
+static inline int
+spoe_start_processing(struct spoe_agent *agent, struct spoe_context *ctx, int dir)
+{
+ /* If a process is already started for this SPOE context, retry
+ * later. */
+ if (ctx->flags & SPOE_CTX_FL_PROCESS)
+ return 0;
+
+ agent->rt[tid].processing++;
+ ctx->stats.start_ts = now_ns;
+ ctx->stats.request_ts = now_ns;
+ ctx->stats.t_request = -1;
+ ctx->stats.t_queue = -1;
+ ctx->stats.t_waiting = -1;
+ ctx->stats.t_response = -1;
+ ctx->stats.t_process = -1;
+
+ ctx->status_code = 0;
+
+ /* Set the right flag to prevent request and response processing
+ * in same time. */
+ ctx->flags |= ((dir == SMP_OPT_DIR_REQ)
+ ? SPOE_CTX_FL_REQ_PROCESS
+ : SPOE_CTX_FL_RSP_PROCESS);
+ return 1;
+}
+
+static inline void
+spoe_stop_processing(struct spoe_agent *agent, struct spoe_context *ctx)
+{
+ struct spoe_appctx *sa = ctx->spoe_appctx;
+
+ if (!(ctx->flags & SPOE_CTX_FL_PROCESS))
+ return;
+ _HA_ATOMIC_INC(&agent->counters.nb_processed);
+ if (sa) {
+ if (sa->frag_ctx.ctx == ctx) {
+ sa->frag_ctx.ctx = NULL;
+ spoe_wakeup_appctx(sa->owner);
+ }
+ else
+ sa->cur_fpa--;
+ }
+
+ /* Reset the flag to allow next processing */
+ agent->rt[tid].processing--;
+ ctx->flags &= ~(SPOE_CTX_FL_PROCESS|SPOE_CTX_FL_FRAGMENTED);
+
+ /* Reset processing timer */
+ ctx->process_exp = TICK_ETERNITY;
+
+ spoe_release_buffer(&ctx->buffer, &ctx->buffer_wait);
+
+ ctx->spoe_appctx = NULL;
+ ctx->frag_ctx.curmsg = NULL;
+ ctx->frag_ctx.curarg = NULL;
+ ctx->frag_ctx.curoff = 0;
+ ctx->frag_ctx.flags = 0;
+
+ if (!LIST_ISEMPTY(&ctx->list)) {
+ if (ctx->state == SPOE_CTX_ST_SENDING_MSGS)
+ _HA_ATOMIC_DEC(&agent->counters.nb_sending);
+ else
+ _HA_ATOMIC_DEC(&agent->counters.nb_waiting);
+
+ LIST_DELETE(&ctx->list);
+ LIST_INIT(&ctx->list);
+ }
+}
+
+/* Process a list of SPOE messages. First, this functions will process messages
+ * and send them to an agent in a NOTIFY frame. Then, it will wait a ACK frame
+ * to process corresponding actions. During all the processing, it returns 0
+ * and it returns 1 when the processing is finished. If an error occurred, -1
+ * is returned. */
+static int
+spoe_process_messages(struct stream *s, struct spoe_context *ctx,
+ struct list *messages, int dir, int type)
+{
+ struct spoe_config *conf = FLT_CONF(ctx->filter);
+ struct spoe_agent *agent = conf->agent;
+ int ret = 1;
+
+ if (ctx->state == SPOE_CTX_ST_ERROR)
+ goto end;
+
+ if (tick_is_expired(ctx->process_exp, now_ms) && ctx->state != SPOE_CTX_ST_DONE) {
+ SPOE_PRINTF(stderr, "%d.%06d [SPOE/%-15s] %s: stream=%p"
+ " - failed to process messages: timeout\n",
+ (int)date.tv_sec, (int)date.tv_usec,
+ agent->id, __FUNCTION__, s);
+ ctx->status_code = SPOE_CTX_ERR_TOUT;
+ goto end;
+ }
+
+ if (ctx->state == SPOE_CTX_ST_READY) {
+ if (agent->eps_max > 0) {
+ if (!freq_ctr_remain(&agent->rt[tid].err_per_sec, agent->eps_max, 0)) {
+ SPOE_PRINTF(stderr, "%d.%06d [SPOE/%-15s] %s: stream=%p"
+ " - skip processing of messages: max EPS reached\n",
+ (int)date.tv_sec, (int)date.tv_usec,
+ agent->id, __FUNCTION__, s);
+ goto skip;
+ }
+ }
+
+ if (!tick_isset(ctx->process_exp)) {
+ ctx->process_exp = tick_add_ifset(now_ms, agent->timeout.processing);
+ s->task->expire = tick_first((tick_is_expired(s->task->expire, now_ms) ? 0 : s->task->expire),
+ ctx->process_exp);
+ }
+ ret = spoe_start_processing(agent, ctx, dir);
+ if (!ret)
+ goto out;
+
+ ctx->state = SPOE_CTX_ST_ENCODING_MSGS;
+ /* fall through */
+ }
+
+ if (ctx->state == SPOE_CTX_ST_ENCODING_MSGS) {
+ if (ctx->stats.request_ts == 0)
+ ctx->stats.request_ts = now_ns;
+ if (!spoe_acquire_buffer(&ctx->buffer, &ctx->buffer_wait))
+ goto out;
+ ret = spoe_encode_messages(s, ctx, messages, dir, type);
+ if (ret < 0)
+ goto end;
+ if (!ret)
+ goto skip;
+ if (spoe_queue_context(ctx) < 0)
+ goto end;
+ ctx->state = SPOE_CTX_ST_SENDING_MSGS;
+ }
+
+ if (ctx->state == SPOE_CTX_ST_SENDING_MSGS) {
+ if (ctx->spoe_appctx)
+ spoe_wakeup_appctx(ctx->spoe_appctx->owner);
+ ret = 0;
+ goto out;
+ }
+
+ if (ctx->state == SPOE_CTX_ST_WAITING_ACK) {
+ ret = 0;
+ goto out;
+ }
+
+ if (ctx->state == SPOE_CTX_ST_DONE) {
+ spoe_process_actions(s, ctx, dir);
+ ret = 1;
+ ctx->frame_id++;
+ ctx->state = SPOE_CTX_ST_READY;
+ spoe_update_stat_time(&ctx->stats.response_ts, &ctx->stats.t_response);
+ goto end;
+ }
+
+ out:
+ return ret;
+
+ skip:
+ ctx->stats.start_ts = 0;
+ ctx->state = SPOE_CTX_ST_READY;
+ spoe_stop_processing(agent, ctx);
+ return 1;
+
+ end:
+ spoe_update_stats(s, agent, ctx, dir);
+ spoe_stop_processing(agent, ctx);
+ if (ctx->status_code) {
+ _HA_ATOMIC_INC(&agent->counters.nb_errors);
+ spoe_handle_processing_error(s, agent, ctx, dir);
+ ret = 1;
+ }
+ return ret;
+}
+
+/* Process a SPOE group, ie the list of messages attached to the group <grp>.
+ * See spoe_process_message for details. */
+static int
+spoe_process_group(struct stream *s, struct spoe_context *ctx,
+ struct spoe_group *group, int dir)
+{
+ struct spoe_config *conf = FLT_CONF(ctx->filter);
+ struct spoe_agent *agent = conf->agent;
+ int ret;
+
+ SPOE_PRINTF(stderr, "%d.%06d [SPOE/%-15s] %s: stream=%p"
+ " - ctx-state=%s - Process messages for group=%s\n",
+ (int)date.tv_sec, (int)date.tv_usec, agent->id,
+ __FUNCTION__, s, spoe_ctx_state_str[ctx->state],
+ group->id);
+
+ if (LIST_ISEMPTY(&group->messages))
+ return 1;
+
+ ret = spoe_process_messages(s, ctx, &group->messages, dir, SPOE_MSGS_BY_GROUP);
+ if (ret && ctx->stats.t_process != -1) {
+ SPOE_PRINTF(stderr, "%d.%06d [SPOE/%-15s] %s: stream=%p"
+ " - <GROUP:%s> sid=%u st=%u %ld/%ld/%ld/%ld/%ld %u/%u %u/%u %llu/%llu %u/%u\n",
+ (int)date.tv_sec, (int)date.tv_usec, agent->id,
+ __FUNCTION__, s, group->id, s->uniq_id, ctx->status_code,
+ ctx->stats.t_request, ctx->stats.t_queue, ctx->stats.t_waiting,
+ ctx->stats.t_response, ctx->stats.t_process,
+ agent->counters.idles, agent->counters.applets,
+ agent->counters.nb_sending, agent->counters.nb_waiting,
+ agent->counters.nb_errors, agent->counters.nb_processed,
+ agent->rt[tid].processing, read_freq_ctr(&agent->rt[tid].processing_per_sec));
+ if (ctx->status_code || !(conf->agent_fe.options2 & PR_O2_NOLOGNORM))
+ send_log(&conf->agent_fe, (!ctx->status_code ? LOG_NOTICE : LOG_WARNING),
+ "SPOE: [%s] <GROUP:%s> sid=%u st=%u %ld/%ld/%ld/%ld/%ld %u/%u %u/%u %llu/%llu\n",
+ agent->id, group->id, s->uniq_id, ctx->status_code,
+ ctx->stats.t_request, ctx->stats.t_queue, ctx->stats.t_waiting,
+ ctx->stats.t_response, ctx->stats.t_process,
+ agent->counters.idles, agent->counters.applets,
+ agent->counters.nb_sending, agent->counters.nb_waiting,
+ agent->counters.nb_errors, agent->counters.nb_processed);
+ }
+ return ret;
+}
+
+/* Process a SPOE event, ie the list of messages attached to the event <ev>.
+ * See spoe_process_message for details. */
+static int
+spoe_process_event(struct stream *s, struct spoe_context *ctx,
+ enum spoe_event ev)
+{
+ struct spoe_config *conf = FLT_CONF(ctx->filter);
+ struct spoe_agent *agent = conf->agent;
+ int dir, ret;
+
+ SPOE_PRINTF(stderr, "%d.%06d [SPOE/%-15s] %s: stream=%p"
+ " - ctx-state=%s - Process messages for event=%s\n",
+ (int)date.tv_sec, (int)date.tv_usec, agent->id,
+ __FUNCTION__, s, spoe_ctx_state_str[ctx->state],
+ spoe_event_str[ev]);
+
+ dir = ((ev < SPOE_EV_ON_SERVER_SESS) ? SMP_OPT_DIR_REQ : SMP_OPT_DIR_RES);
+
+ if (LIST_ISEMPTY(&(ctx->events[ev])))
+ return 1;
+
+ ret = spoe_process_messages(s, ctx, &(ctx->events[ev]), dir, SPOE_MSGS_BY_EVENT);
+ if (ret && ctx->stats.t_process != -1) {
+ SPOE_PRINTF(stderr, "%d.%06d [SPOE/%-15s] %s: stream=%p"
+ " - <EVENT:%s> sid=%u st=%u %ld/%ld/%ld/%ld/%ld %u/%u %u/%u %llu/%llu %u/%u\n",
+ (int)date.tv_sec, (int)date.tv_usec, agent->id,
+ __FUNCTION__, s, spoe_event_str[ev], s->uniq_id, ctx->status_code,
+ ctx->stats.t_request, ctx->stats.t_queue, ctx->stats.t_waiting,
+ ctx->stats.t_response, ctx->stats.t_process,
+ agent->counters.idles, agent->counters.applets,
+ agent->counters.nb_sending, agent->counters.nb_waiting,
+ agent->counters.nb_errors, agent->counters.nb_processed,
+ agent->rt[tid].processing, read_freq_ctr(&agent->rt[tid].processing_per_sec));
+ if (ctx->status_code || !(conf->agent_fe.options2 & PR_O2_NOLOGNORM))
+ send_log(&conf->agent_fe, (!ctx->status_code ? LOG_NOTICE : LOG_WARNING),
+ "SPOE: [%s] <EVENT:%s> sid=%u st=%u %ld/%ld/%ld/%ld/%ld %u/%u %u/%u %llu/%llu\n",
+ agent->id, spoe_event_str[ev], s->uniq_id, ctx->status_code,
+ ctx->stats.t_request, ctx->stats.t_queue, ctx->stats.t_waiting,
+ ctx->stats.t_response, ctx->stats.t_process,
+ agent->counters.idles, agent->counters.applets,
+ agent->counters.nb_sending, agent->counters.nb_waiting,
+ agent->counters.nb_errors, agent->counters.nb_processed);
+ }
+ return ret;
+}
+
+/***************************************************************************
+ * Functions that create/destroy SPOE contexts
+ **************************************************************************/
+static int
+spoe_acquire_buffer(struct buffer *buf, struct buffer_wait *buffer_wait)
+{
+ if (buf->size)
+ return 1;
+
+ if (LIST_INLIST(&buffer_wait->list))
+ LIST_DEL_INIT(&buffer_wait->list);
+
+ if (b_alloc(buf))
+ return 1;
+
+ LIST_APPEND(&th_ctx->buffer_wq, &buffer_wait->list);
+ return 0;
+}
+
+static void
+spoe_release_buffer(struct buffer *buf, struct buffer_wait *buffer_wait)
+{
+ if (LIST_INLIST(&buffer_wait->list))
+ LIST_DEL_INIT(&buffer_wait->list);
+
+ /* Release the buffer if needed */
+ if (buf->size) {
+ b_free(buf);
+ offer_buffers(buffer_wait->target, 1);
+ }
+}
+
+static int
+spoe_wakeup_context(struct spoe_context *ctx)
+{
+ task_wakeup(ctx->strm->task, TASK_WOKEN_MSG);
+ return 1;
+}
+
+static struct spoe_context *
+spoe_create_context(struct stream *s, struct filter *filter)
+{
+ struct spoe_config *conf = FLT_CONF(filter);
+ struct spoe_context *ctx;
+
+ ctx = pool_zalloc(pool_head_spoe_ctx);
+ if (ctx == NULL) {
+ return NULL;
+ }
+ ctx->filter = filter;
+ ctx->state = SPOE_CTX_ST_NONE;
+ ctx->status_code = SPOE_CTX_ERR_NONE;
+ ctx->flags = 0;
+ ctx->events = conf->agent->events;
+ ctx->groups = &conf->agent->groups;
+ ctx->buffer = BUF_NULL;
+ LIST_INIT(&ctx->buffer_wait.list);
+ ctx->buffer_wait.target = ctx;
+ ctx->buffer_wait.wakeup_cb = (int (*)(void *))spoe_wakeup_context;
+ LIST_INIT(&ctx->list);
+
+ ctx->stream_id = 0;
+ ctx->frame_id = 1;
+ ctx->process_exp = TICK_ETERNITY;
+
+ ctx->stats.start_ts = 0;
+ ctx->stats.request_ts = 0;
+ ctx->stats.queue_ts = 0;
+ ctx->stats.wait_ts = 0;
+ ctx->stats.response_ts= 0;
+ ctx->stats.t_request = -1;
+ ctx->stats.t_queue = -1;
+ ctx->stats.t_waiting = -1;
+ ctx->stats.t_response = -1;
+ ctx->stats.t_process = -1;
+ ctx->stats.t_total = 0;
+
+ ctx->strm = s;
+ ctx->state = SPOE_CTX_ST_READY;
+ filter->ctx = ctx;
+
+ return ctx;
+}
+
+static void
+spoe_destroy_context(struct filter *filter)
+{
+ struct spoe_config *conf = FLT_CONF(filter);
+ struct spoe_context *ctx = filter->ctx;
+
+ if (!ctx)
+ return;
+
+ spoe_stop_processing(conf->agent, ctx);
+ pool_free(pool_head_spoe_ctx, ctx);
+ filter->ctx = NULL;
+}
+
+static void
+spoe_reset_context(struct spoe_context *ctx)
+{
+ ctx->state = SPOE_CTX_ST_READY;
+ ctx->flags &= ~(SPOE_CTX_FL_PROCESS|SPOE_CTX_FL_FRAGMENTED);
+
+ ctx->stats.start_ts = 0;
+ ctx->stats.request_ts = 0;
+ ctx->stats.queue_ts = 0;
+ ctx->stats.wait_ts = 0;
+ ctx->stats.response_ts= 0;
+ ctx->stats.t_request = -1;
+ ctx->stats.t_queue = -1;
+ ctx->stats.t_waiting = -1;
+ ctx->stats.t_response = -1;
+ ctx->stats.t_process = -1;
+ ctx->stats.t_total = 0;
+}
+
+
+/***************************************************************************
+ * Hooks that manage the filter lifecycle (init/check/deinit)
+ **************************************************************************/
+/* Signal handler: Do a soft stop, wakeup SPOE applet */
+static void
+spoe_sig_stop(struct sig_handler *sh)
+{
+ struct proxy *p;
+
+ p = proxies_list;
+ while (p) {
+ struct flt_conf *fconf;
+
+ /* SPOE filter are not initialized for disabled proxoes. Move to
+ * the next one
+ */
+ if (p->flags & PR_FL_DISABLED) {
+ p = p->next;
+ continue;
+ }
+
+ list_for_each_entry(fconf, &p->filter_configs, list) {
+ struct spoe_config *conf;
+ struct spoe_agent *agent;
+ struct spoe_appctx *spoe_appctx;
+ int i;
+
+ if (fconf->id != spoe_filter_id)
+ continue;
+
+ conf = fconf->conf;
+ agent = conf->agent;
+
+ for (i = 0; i < global.nbthread; ++i) {
+ HA_SPIN_LOCK(SPOE_APPLET_LOCK, &agent->rt[i].lock);
+ list_for_each_entry(spoe_appctx, &agent->rt[i].applets, list)
+ spoe_wakeup_appctx(spoe_appctx->owner);
+ HA_SPIN_UNLOCK(SPOE_APPLET_LOCK, &agent->rt[i].lock);
+ }
+ }
+ p = p->next;
+ }
+}
+
+
+/* Initialize the SPOE filter. Returns -1 on error, else 0. */
+static int
+spoe_init(struct proxy *px, struct flt_conf *fconf)
+{
+ struct spoe_config *conf = fconf->conf;
+
+ /* conf->agent_fe was already initialized during the config
+ * parsing. Finish initialization. */
+ conf->agent_fe.last_change = ns_to_sec(now_ns);
+ conf->agent_fe.cap = PR_CAP_FE;
+ conf->agent_fe.mode = PR_MODE_TCP;
+ conf->agent_fe.maxconn = 0;
+ conf->agent_fe.options2 |= PR_O2_INDEPSTR;
+ conf->agent_fe.conn_retries = CONN_RETRIES;
+ conf->agent_fe.accept = frontend_accept;
+ conf->agent_fe.srv = NULL;
+ conf->agent_fe.timeout.client = TICK_ETERNITY;
+ conf->agent_fe.fe_req_ana = AN_REQ_SWITCHING_RULES;
+
+ if (!sighandler_registered) {
+ signal_register_fct(0, spoe_sig_stop, 0);
+ sighandler_registered = 1;
+ }
+
+ fconf->flags |= FLT_CFG_FL_HTX;
+ return 0;
+}
+
+/* Free resources allocated by the SPOE filter. */
+static void
+spoe_deinit(struct proxy *px, struct flt_conf *fconf)
+{
+ struct spoe_config *conf = fconf->conf;
+
+ if (conf) {
+ struct spoe_agent *agent = conf->agent;
+
+ spoe_release_agent(agent);
+ free(conf->id);
+ free(conf);
+ }
+ fconf->conf = NULL;
+}
+
+/* Check configuration of a SPOE filter for a specified proxy.
+ * Return 1 on error, else 0. */
+static int
+spoe_check(struct proxy *px, struct flt_conf *fconf)
+{
+ struct flt_conf *f;
+ struct spoe_config *conf = fconf->conf;
+ struct proxy *target;
+ int i;
+
+ /* Check all SPOE filters for proxy <px> to be sure all SPOE agent names
+ * are uniq */
+ list_for_each_entry(f, &px->filter_configs, list) {
+ struct spoe_config *c = f->conf;
+
+ /* This is not an SPOE filter */
+ if (f->id != spoe_filter_id)
+ continue;
+ /* This is the current SPOE filter */
+ if (f == fconf)
+ continue;
+
+ /* Check engine Id. It should be uniq */
+ if (strcmp(conf->id, c->id) == 0) {
+ ha_alert("Proxy %s : duplicated name for SPOE engine '%s'.\n",
+ px->id, conf->id);
+ return 1;
+ }
+ }
+
+ target = proxy_be_by_name(conf->agent->b.name);
+ if (target == NULL) {
+ ha_alert("Proxy %s : unknown backend '%s' used by SPOE agent '%s'"
+ " declared at %s:%d.\n",
+ px->id, conf->agent->b.name, conf->agent->id,
+ conf->agent->conf.file, conf->agent->conf.line);
+ return 1;
+ }
+ if (target->mode != PR_MODE_TCP) {
+ ha_alert("Proxy %s : backend '%s' used by SPOE agent '%s' declared"
+ " at %s:%d does not support HTTP mode.\n",
+ px->id, target->id, conf->agent->id,
+ conf->agent->conf.file, conf->agent->conf.line);
+ return 1;
+ }
+
+ if ((conf->agent->rt = calloc(global.nbthread, sizeof(*conf->agent->rt))) == NULL) {
+ ha_alert("Proxy %s : out of memory initializing SPOE agent '%s' declared at %s:%d.\n",
+ px->id, conf->agent->id, conf->agent->conf.file, conf->agent->conf.line);
+ return 1;
+ }
+ for (i = 0; i < global.nbthread; ++i) {
+ conf->agent->rt[i].engine_id = NULL;
+ conf->agent->rt[i].frame_size = conf->agent->max_frame_size;
+ conf->agent->rt[i].processing = 0;
+ conf->agent->rt[i].idles = 0;
+ LIST_INIT(&conf->agent->rt[i].applets);
+ LIST_INIT(&conf->agent->rt[i].sending_queue);
+ LIST_INIT(&conf->agent->rt[i].waiting_queue);
+ HA_SPIN_INIT(&conf->agent->rt[i].lock);
+ }
+
+ if (postresolve_logger_list(&conf->agent_fe.loggers, "SPOE agent", conf->agent->id) & ERR_CODE)
+ return 1;
+
+ ha_free(&conf->agent->b.name);
+ conf->agent->b.be = target;
+ return 0;
+}
+
+/* Initializes the SPOE filter for a proxy for a specific thread.
+ * Returns a negative value if an error occurs. */
+static int
+spoe_init_per_thread(struct proxy *p, struct flt_conf *fconf)
+{
+ struct spoe_config *conf = fconf->conf;
+ struct spoe_agent *agent = conf->agent;
+
+ agent->rt[tid].engine_id = generate_pseudo_uuid();
+ if (agent->rt[tid].engine_id == NULL)
+ return -1;
+ return 0;
+}
+
+/**************************************************************************
+ * Hooks attached to a stream
+ *************************************************************************/
+/* Called when a filter instance is created and attach to a stream. It creates
+ * the context that will be used to process this stream. */
+static int
+spoe_start(struct stream *s, struct filter *filter)
+{
+ struct spoe_config *conf = FLT_CONF(filter);
+ struct spoe_agent *agent = conf->agent;
+ struct spoe_context *ctx;
+
+ SPOE_PRINTF(stderr, "%d.%06d [SPOE/%-15s] %s: stream=%p\n",
+ (int)date.tv_sec, (int)date.tv_usec, agent->id,
+ __FUNCTION__, s);
+
+ if ((ctx = spoe_create_context(s, filter)) == NULL) {
+ SPOE_PRINTF(stderr, "%d.%06d [SPOE/%-15s] %s: stream=%p"
+ " - failed to create SPOE context\n",
+ (int)date.tv_sec, (int)date.tv_usec, agent->id,
+ __FUNCTION__, s);
+ send_log(&conf->agent_fe, LOG_EMERG,
+ "SPOE: [%s] failed to create SPOE context\n",
+ agent->id);
+ return 0;
+ }
+
+ if (!LIST_ISEMPTY(&ctx->events[SPOE_EV_ON_TCP_REQ_FE]))
+ filter->pre_analyzers |= AN_REQ_INSPECT_FE;
+
+ if (!LIST_ISEMPTY(&ctx->events[SPOE_EV_ON_TCP_REQ_BE]))
+ filter->pre_analyzers |= AN_REQ_INSPECT_BE;
+
+ if (!LIST_ISEMPTY(&ctx->events[SPOE_EV_ON_TCP_RSP]))
+ filter->pre_analyzers |= AN_RES_INSPECT;
+
+ if (!LIST_ISEMPTY(&ctx->events[SPOE_EV_ON_HTTP_REQ_FE]))
+ filter->pre_analyzers |= AN_REQ_HTTP_PROCESS_FE;
+
+ if (!LIST_ISEMPTY(&ctx->events[SPOE_EV_ON_HTTP_REQ_BE]))
+ filter->pre_analyzers |= AN_REQ_HTTP_PROCESS_BE;
+
+ if (!LIST_ISEMPTY(&ctx->events[SPOE_EV_ON_HTTP_RSP]))
+ filter->pre_analyzers |= AN_RES_HTTP_PROCESS_FE;
+
+ return 1;
+}
+
+/* Called when a filter instance is detached from a stream. It release the
+ * attached SPOE context. */
+static void
+spoe_stop(struct stream *s, struct filter *filter)
+{
+ SPOE_PRINTF(stderr, "%d.%06d [SPOE/%-15s] %s: stream=%p\n",
+ (int)date.tv_sec, (int)date.tv_usec,
+ ((struct spoe_config *)FLT_CONF(filter))->agent->id,
+ __FUNCTION__, s);
+ spoe_destroy_context(filter);
+}
+
+
+/*
+ * Called when the stream is woken up because of expired timer.
+ */
+static void
+spoe_check_timeouts(struct stream *s, struct filter *filter)
+{
+ struct spoe_context *ctx = filter->ctx;
+
+ if (tick_is_expired(ctx->process_exp, now_ms))
+ s->pending_events |= TASK_WOKEN_MSG;
+}
+
+/* Called when we are ready to filter data on a channel */
+static int
+spoe_start_analyze(struct stream *s, struct filter *filter, struct channel *chn)
+{
+ struct spoe_context *ctx = filter->ctx;
+ int ret = 1;
+
+ SPOE_PRINTF(stderr, "%d.%06d [SPOE/%-15s] %s: stream=%p - ctx-state=%s"
+ " - ctx-flags=0x%08x\n",
+ (int)date.tv_sec, (int)date.tv_usec,
+ ((struct spoe_config *)FLT_CONF(filter))->agent->id,
+ __FUNCTION__, s, spoe_ctx_state_str[ctx->state], ctx->flags);
+
+ if (ctx->state == SPOE_CTX_ST_NONE)
+ goto out;
+
+ if (!(chn->flags & CF_ISRESP)) {
+ if (filter->pre_analyzers & AN_REQ_INSPECT_FE)
+ chn->analysers |= AN_REQ_INSPECT_FE;
+ if (filter->pre_analyzers & AN_REQ_INSPECT_BE)
+ chn->analysers |= AN_REQ_INSPECT_BE;
+
+ if (ctx->flags & SPOE_CTX_FL_CLI_CONNECTED)
+ goto out;
+
+ ctx->stream_id = s->uniq_id;
+ ret = spoe_process_event(s, ctx, SPOE_EV_ON_CLIENT_SESS);
+ if (!ret)
+ goto out;
+ ctx->flags |= SPOE_CTX_FL_CLI_CONNECTED;
+ }
+ else {
+ if (filter->pre_analyzers & AN_RES_INSPECT)
+ chn->analysers |= AN_RES_INSPECT;
+
+ if (ctx->flags & SPOE_CTX_FL_SRV_CONNECTED)
+ goto out;
+
+ ret = spoe_process_event(s, ctx, SPOE_EV_ON_SERVER_SESS);
+ if (!ret) {
+ channel_dont_read(chn);
+ channel_dont_close(chn);
+ goto out;
+ }
+ ctx->flags |= SPOE_CTX_FL_SRV_CONNECTED;
+ }
+
+ out:
+ return ret;
+}
+
+/* Called before a processing happens on a given channel */
+static int
+spoe_chn_pre_analyze(struct stream *s, struct filter *filter,
+ struct channel *chn, unsigned an_bit)
+{
+ struct spoe_context *ctx = filter->ctx;
+ int ret = 1;
+
+ SPOE_PRINTF(stderr, "%d.%06d [SPOE/%-15s] %s: stream=%p - ctx-state=%s"
+ " - ctx-flags=0x%08x - ana=0x%08x\n",
+ (int)date.tv_sec, (int)date.tv_usec,
+ ((struct spoe_config *)FLT_CONF(filter))->agent->id,
+ __FUNCTION__, s, spoe_ctx_state_str[ctx->state],
+ ctx->flags, an_bit);
+
+ if (ctx->state == SPOE_CTX_ST_NONE)
+ goto out;
+
+ switch (an_bit) {
+ case AN_REQ_INSPECT_FE:
+ ret = spoe_process_event(s, ctx, SPOE_EV_ON_TCP_REQ_FE);
+ break;
+ case AN_REQ_INSPECT_BE:
+ ret = spoe_process_event(s, ctx, SPOE_EV_ON_TCP_REQ_BE);
+ break;
+ case AN_RES_INSPECT:
+ ret = spoe_process_event(s, ctx, SPOE_EV_ON_TCP_RSP);
+ break;
+ case AN_REQ_HTTP_PROCESS_FE:
+ ret = spoe_process_event(s, ctx, SPOE_EV_ON_HTTP_REQ_FE);
+ break;
+ case AN_REQ_HTTP_PROCESS_BE:
+ ret = spoe_process_event(s, ctx, SPOE_EV_ON_HTTP_REQ_BE);
+ break;
+ case AN_RES_HTTP_PROCESS_FE:
+ ret = spoe_process_event(s, ctx, SPOE_EV_ON_HTTP_RSP);
+ break;
+ }
+
+ out:
+ if (!ret && (chn->flags & CF_ISRESP)) {
+ channel_dont_read(chn);
+ channel_dont_close(chn);
+ }
+ return ret;
+}
+
+/* Called when the filtering on the channel ends. */
+static int
+spoe_end_analyze(struct stream *s, struct filter *filter, struct channel *chn)
+{
+ struct spoe_context *ctx = filter->ctx;
+
+ SPOE_PRINTF(stderr, "%d.%06d [SPOE/%-15s] %s: stream=%p - ctx-state=%s"
+ " - ctx-flags=0x%08x\n",
+ (int)date.tv_sec, (int)date.tv_usec,
+ ((struct spoe_config *)FLT_CONF(filter))->agent->id,
+ __FUNCTION__, s, spoe_ctx_state_str[ctx->state], ctx->flags);
+
+ if (!(ctx->flags & SPOE_CTX_FL_PROCESS)) {
+ spoe_reset_context(ctx);
+ }
+
+ return 1;
+}
+
+/********************************************************************
+ * Functions that manage the filter initialization
+ ********************************************************************/
+struct flt_ops spoe_ops = {
+ /* Manage SPOE filter, called for each filter declaration */
+ .init = spoe_init,
+ .deinit = spoe_deinit,
+ .check = spoe_check,
+ .init_per_thread = spoe_init_per_thread,
+
+ /* Handle start/stop of SPOE */
+ .attach = spoe_start,
+ .detach = spoe_stop,
+ .check_timeouts = spoe_check_timeouts,
+
+ /* Handle channels activity */
+ .channel_start_analyze = spoe_start_analyze,
+ .channel_pre_analyze = spoe_chn_pre_analyze,
+ .channel_end_analyze = spoe_end_analyze,
+};
+
+
+static int
+cfg_parse_spoe_agent(const char *file, int linenum, char **args, int kwm)
+{
+ const char *err;
+ int i, err_code = 0;
+
+ if ((cfg_scope == NULL && curengine != NULL) ||
+ (cfg_scope != NULL && curengine == NULL) ||
+ (curengine != NULL && cfg_scope != NULL && strcmp(curengine, cfg_scope) != 0))
+ goto out;
+
+ if (strcmp(args[0], "spoe-agent") == 0) { /* new spoe-agent section */
+ if (!*args[1]) {
+ ha_alert("parsing [%s:%d] : missing name for spoe-agent section.\n",
+ file, linenum);
+ err_code |= ERR_ALERT | ERR_ABORT;
+ goto out;
+ }
+ if (alertif_too_many_args(1, file, linenum, args, &err_code)) {
+ err_code |= ERR_ABORT;
+ goto out;
+ }
+
+ err = invalid_char(args[1]);
+ if (err) {
+ ha_alert("parsing [%s:%d] : character '%c' is not permitted in '%s' name '%s'.\n",
+ file, linenum, *err, args[0], args[1]);
+ err_code |= ERR_ALERT | ERR_ABORT;
+ goto out;
+ }
+
+ if (curagent != NULL) {
+ ha_alert("parsing [%s:%d] : another spoe-agent section previously defined.\n",
+ file, linenum);
+ err_code |= ERR_ALERT | ERR_ABORT;
+ goto out;
+ }
+ if ((curagent = calloc(1, sizeof(*curagent))) == NULL) {
+ ha_alert("parsing [%s:%d] : out of memory.\n", file, linenum);
+ err_code |= ERR_ALERT | ERR_ABORT;
+ goto out;
+ }
+
+ curagent->id = strdup(args[1]);
+
+ curagent->conf.file = strdup(file);
+ curagent->conf.line = linenum;
+
+ curagent->timeout.hello = TICK_ETERNITY;
+ curagent->timeout.idle = TICK_ETERNITY;
+ curagent->timeout.processing = TICK_ETERNITY;
+
+ curagent->var_pfx = NULL;
+ curagent->var_on_error = NULL;
+ curagent->var_t_process = NULL;
+ curagent->var_t_total = NULL;
+ curagent->flags = (SPOE_FL_ASYNC | SPOE_FL_PIPELINING | SPOE_FL_SND_FRAGMENTATION);
+ curagent->cps_max = 0;
+ curagent->eps_max = 0;
+ curagent->max_frame_size = MAX_FRAME_SIZE;
+ curagent->max_fpa = 20;
+
+ for (i = 0; i < SPOE_EV_EVENTS; ++i)
+ LIST_INIT(&curagent->events[i]);
+ LIST_INIT(&curagent->groups);
+ LIST_INIT(&curagent->messages);
+ }
+ else if (strcmp(args[0], "use-backend") == 0) {
+ if (!*args[1]) {
+ ha_alert("parsing [%s:%d] : '%s' expects a backend name.\n",
+ file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ if (alertif_too_many_args(1, file, linenum, args, &err_code))
+ goto out;
+ free(curagent->b.name);
+ curagent->b.name = strdup(args[1]);
+ }
+ else if (strcmp(args[0], "messages") == 0) {
+ int cur_arg = 1;
+ while (*args[cur_arg]) {
+ struct spoe_placeholder *ph = NULL;
+
+ list_for_each_entry(ph, &curmphs, list) {
+ if (strcmp(ph->id, args[cur_arg]) == 0) {
+ ha_alert("parsing [%s:%d]: spoe-message '%s' already used.\n",
+ file, linenum, args[cur_arg]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ }
+
+ if ((ph = calloc(1, sizeof(*ph))) == NULL) {
+ ha_alert("parsing [%s:%d] : out of memory.\n", file, linenum);
+ err_code |= ERR_ALERT | ERR_ABORT;
+ goto out;
+ }
+ ph->id = strdup(args[cur_arg]);
+ LIST_APPEND(&curmphs, &ph->list);
+ cur_arg++;
+ }
+ }
+ else if (strcmp(args[0], "groups") == 0) {
+ int cur_arg = 1;
+ while (*args[cur_arg]) {
+ struct spoe_placeholder *ph = NULL;
+
+ list_for_each_entry(ph, &curgphs, list) {
+ if (strcmp(ph->id, args[cur_arg]) == 0) {
+ ha_alert("parsing [%s:%d]: spoe-group '%s' already used.\n",
+ file, linenum, args[cur_arg]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ }
+
+ if ((ph = calloc(1, sizeof(*ph))) == NULL) {
+ ha_alert("parsing [%s:%d] : out of memory.\n", file, linenum);
+ err_code |= ERR_ALERT | ERR_ABORT;
+ goto out;
+ }
+ ph->id = strdup(args[cur_arg]);
+ LIST_APPEND(&curgphs, &ph->list);
+ cur_arg++;
+ }
+ }
+ else if (strcmp(args[0], "timeout") == 0) {
+ unsigned int *tv = NULL;
+ const char *res;
+ unsigned timeout;
+
+ if (!*args[1]) {
+ ha_alert("parsing [%s:%d] : 'timeout' expects 'hello', 'idle' and 'processing'.\n",
+ file, linenum);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ if (alertif_too_many_args(2, file, linenum, args, &err_code))
+ goto out;
+ if (strcmp(args[1], "hello") == 0)
+ tv = &curagent->timeout.hello;
+ else if (strcmp(args[1], "idle") == 0)
+ tv = &curagent->timeout.idle;
+ else if (strcmp(args[1], "processing") == 0)
+ tv = &curagent->timeout.processing;
+ else {
+ ha_alert("parsing [%s:%d] : 'timeout' supports 'hello', 'idle' or 'processing' (got %s).\n",
+ file, linenum, args[1]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ if (!*args[2]) {
+ ha_alert("parsing [%s:%d] : 'timeout %s' expects an integer value (in milliseconds).\n",
+ file, linenum, args[1]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ res = parse_time_err(args[2], &timeout, TIME_UNIT_MS);
+ if (res == PARSE_TIME_OVER) {
+ ha_alert("parsing [%s:%d]: timer overflow in argument <%s> to <%s %s>, maximum value is 2147483647 ms (~24.8 days).\n",
+ file, linenum, args[2], args[0], args[1]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ else if (res == PARSE_TIME_UNDER) {
+ ha_alert("parsing [%s:%d]: timer underflow in argument <%s> to <%s %s>, minimum non-null value is 1 ms.\n",
+ file, linenum, args[2], args[0], args[1]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ else if (res) {
+ ha_alert("parsing [%s:%d] : unexpected character '%c' in 'timeout %s'.\n",
+ file, linenum, *res, args[1]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ *tv = MS_TO_TICKS(timeout);
+ }
+ else if (strcmp(args[0], "option") == 0) {
+ if (!*args[1]) {
+ ha_alert("parsing [%s:%d]: '%s' expects an option name.\n",
+ file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+
+ if (strcmp(args[1], "pipelining") == 0) {
+ if (alertif_too_many_args(1, file, linenum, args, &err_code))
+ goto out;
+ if (kwm == 1)
+ curagent->flags &= ~SPOE_FL_PIPELINING;
+ else
+ curagent->flags |= SPOE_FL_PIPELINING;
+ goto out;
+ }
+ else if (strcmp(args[1], "async") == 0) {
+ if (alertif_too_many_args(1, file, linenum, args, &err_code))
+ goto out;
+ if (kwm == 1)
+ curagent->flags &= ~SPOE_FL_ASYNC;
+ else
+ curagent->flags |= SPOE_FL_ASYNC;
+ goto out;
+ }
+ else if (strcmp(args[1], "send-frag-payload") == 0) {
+ if (alertif_too_many_args(1, file, linenum, args, &err_code))
+ goto out;
+ if (kwm == 1)
+ curagent->flags &= ~SPOE_FL_SND_FRAGMENTATION;
+ else
+ curagent->flags |= SPOE_FL_SND_FRAGMENTATION;
+ goto out;
+ }
+ else if (strcmp(args[1], "dontlog-normal") == 0) {
+ if (alertif_too_many_args(1, file, linenum, args, &err_code))
+ goto out;
+ if (kwm == 1)
+ curpxopts2 &= ~PR_O2_NOLOGNORM;
+ else
+ curpxopts2 |= PR_O2_NOLOGNORM;
+ goto out;
+ }
+
+ /* Following options does not support negation */
+ if (kwm == 1) {
+ ha_alert("parsing [%s:%d]: negation is not supported for option '%s'.\n",
+ file, linenum, args[1]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+
+ if (strcmp(args[1], "var-prefix") == 0) {
+ char *tmp;
+
+ if (!*args[2]) {
+ ha_alert("parsing [%s:%d]: '%s %s' expects a value.\n",
+ file, linenum, args[0],
+ args[1]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ if (alertif_too_many_args(2, file, linenum, args, &err_code))
+ goto out;
+ tmp = args[2];
+ while (*tmp) {
+ if (!isalnum((unsigned char)*tmp) && *tmp != '_' && *tmp != '.') {
+ ha_alert("parsing [%s:%d]: '%s %s' only supports [a-zA-Z0-9_.] chars.\n",
+ file, linenum, args[0], args[1]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ tmp++;
+ }
+ curagent->var_pfx = strdup(args[2]);
+ }
+ else if (strcmp(args[1], "force-set-var") == 0) {
+ if (alertif_too_many_args(1, file, linenum, args, &err_code))
+ goto out;
+ curagent->flags |= SPOE_FL_FORCE_SET_VAR;
+ }
+ else if (strcmp(args[1], "continue-on-error") == 0) {
+ if (alertif_too_many_args(1, file, linenum, args, &err_code))
+ goto out;
+ curagent->flags |= SPOE_FL_CONT_ON_ERR;
+ }
+ else if (strcmp(args[1], "set-on-error") == 0) {
+ char *tmp;
+
+ if (!*args[2]) {
+ ha_alert("parsing [%s:%d]: '%s %s' expects a value.\n",
+ file, linenum, args[0],
+ args[1]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ if (alertif_too_many_args(2, file, linenum, args, &err_code))
+ goto out;
+ tmp = args[2];
+ while (*tmp) {
+ if (!isalnum((unsigned char)*tmp) && *tmp != '_' && *tmp != '.') {
+ ha_alert("parsing [%s:%d]: '%s %s' only supports [a-zA-Z0-9_.] chars.\n",
+ file, linenum, args[0], args[1]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ tmp++;
+ }
+ curagent->var_on_error = strdup(args[2]);
+ }
+ else if (strcmp(args[1], "set-process-time") == 0) {
+ char *tmp;
+
+ if (!*args[2]) {
+ ha_alert("parsing [%s:%d]: '%s %s' expects a value.\n",
+ file, linenum, args[0],
+ args[1]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ if (alertif_too_many_args(2, file, linenum, args, &err_code))
+ goto out;
+ tmp = args[2];
+ while (*tmp) {
+ if (!isalnum((unsigned char)*tmp) && *tmp != '_' && *tmp != '.') {
+ ha_alert("parsing [%s:%d]: '%s %s' only supports [a-zA-Z0-9_.] chars.\n",
+ file, linenum, args[0], args[1]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ tmp++;
+ }
+ curagent->var_t_process = strdup(args[2]);
+ }
+ else if (strcmp(args[1], "set-total-time") == 0) {
+ char *tmp;
+
+ if (!*args[2]) {
+ ha_alert("parsing [%s:%d]: '%s %s' expects a value.\n",
+ file, linenum, args[0],
+ args[1]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ if (alertif_too_many_args(2, file, linenum, args, &err_code))
+ goto out;
+ tmp = args[2];
+ while (*tmp) {
+ if (!isalnum((unsigned char)*tmp) && *tmp != '_' && *tmp != '.') {
+ ha_alert("parsing [%s:%d]: '%s %s' only supports [a-zA-Z0-9_.] chars.\n",
+ file, linenum, args[0], args[1]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ tmp++;
+ }
+ curagent->var_t_total = strdup(args[2]);
+ }
+ else {
+ ha_alert("parsing [%s:%d]: option '%s' is not supported.\n",
+ file, linenum, args[1]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ }
+ else if (strcmp(args[0], "maxconnrate") == 0) {
+ if (!*args[1]) {
+ ha_alert("parsing [%s:%d] : '%s' expects an integer argument.\n",
+ file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ if (alertif_too_many_args(1, file, linenum, args, &err_code))
+ goto out;
+ curagent->cps_max = atol(args[1]);
+ }
+ else if (strcmp(args[0], "maxerrrate") == 0) {
+ if (!*args[1]) {
+ ha_alert("parsing [%s:%d] : '%s' expects an integer argument.\n",
+ file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ if (alertif_too_many_args(1, file, linenum, args, &err_code))
+ goto out;
+ curagent->eps_max = atol(args[1]);
+ }
+ else if (strcmp(args[0], "max-frame-size") == 0) {
+ if (!*args[1]) {
+ ha_alert("parsing [%s:%d] : '%s' expects an integer argument.\n",
+ file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ if (alertif_too_many_args(1, file, linenum, args, &err_code))
+ goto out;
+ curagent->max_frame_size = atol(args[1]);
+ if (curagent->max_frame_size < MIN_FRAME_SIZE ||
+ curagent->max_frame_size > MAX_FRAME_SIZE) {
+ ha_alert("parsing [%s:%d] : '%s' expects a positive integer argument in the range [%d, %d].\n",
+ file, linenum, args[0], MIN_FRAME_SIZE, MAX_FRAME_SIZE);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ }
+ else if (strcmp(args[0], "max-waiting-frames") == 0) {
+ if (!*args[1]) {
+ ha_alert("parsing [%s:%d] : '%s' expects an integer argument.\n",
+ file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ if (alertif_too_many_args(1, file, linenum, args, &err_code))
+ goto out;
+ curagent->max_fpa = atol(args[1]);
+ if (curagent->max_fpa < 1) {
+ ha_alert("parsing [%s:%d] : '%s' expects a positive integer argument.\n",
+ file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ }
+ else if (strcmp(args[0], "register-var-names") == 0) {
+ int cur_arg;
+
+ if (!*args[1]) {
+ ha_alert("parsing [%s:%d] : '%s' expects one or more variable names.\n",
+ file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ cur_arg = 1;
+ while (*args[cur_arg]) {
+ struct spoe_var_placeholder *vph;
+
+ if ((vph = calloc(1, sizeof(*vph))) == NULL) {
+ ha_alert("parsing [%s:%d] : out of memory.\n", file, linenum);
+ err_code |= ERR_ALERT | ERR_ABORT;
+ goto out;
+ }
+ if ((vph->name = strdup(args[cur_arg])) == NULL) {
+ free(vph);
+ ha_alert("parsing [%s:%d] : out of memory.\n", file, linenum);
+ err_code |= ERR_ALERT | ERR_ABORT;
+ goto out;
+ }
+ LIST_APPEND(&curvars, &vph->list);
+ cur_arg++;
+ }
+ }
+ else if (strcmp(args[0], "log") == 0) {
+ char *errmsg = NULL;
+
+ if (!parse_logger(args, &curloggers, (kwm == 1), file, linenum, &errmsg)) {
+ ha_alert("parsing [%s:%d] : %s : %s\n", file, linenum, args[0], errmsg);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ }
+ else if (*args[0]) {
+ ha_alert("parsing [%s:%d] : unknown keyword '%s' in spoe-agent section.\n",
+ file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ out:
+ return err_code;
+}
+static int
+cfg_parse_spoe_group(const char *file, int linenum, char **args, int kwm)
+{
+ struct spoe_group *grp;
+ const char *err;
+ int err_code = 0;
+
+ if ((cfg_scope == NULL && curengine != NULL) ||
+ (cfg_scope != NULL && curengine == NULL) ||
+ (curengine != NULL && cfg_scope != NULL && strcmp(curengine, cfg_scope) != 0))
+ goto out;
+
+ if (strcmp(args[0], "spoe-group") == 0) { /* new spoe-group section */
+ if (!*args[1]) {
+ ha_alert("parsing [%s:%d] : missing name for spoe-group section.\n",
+ file, linenum);
+ err_code |= ERR_ALERT | ERR_ABORT;
+ goto out;
+ }
+ if (alertif_too_many_args(1, file, linenum, args, &err_code)) {
+ err_code |= ERR_ABORT;
+ goto out;
+ }
+
+ err = invalid_char(args[1]);
+ if (err) {
+ ha_alert("parsing [%s:%d] : character '%c' is not permitted in '%s' name '%s'.\n",
+ file, linenum, *err, args[0], args[1]);
+ err_code |= ERR_ALERT | ERR_ABORT;
+ goto out;
+ }
+
+ list_for_each_entry(grp, &curgrps, list) {
+ if (strcmp(grp->id, args[1]) == 0) {
+ ha_alert("parsing [%s:%d]: spoe-group section '%s' has the same"
+ " name as another one declared at %s:%d.\n",
+ file, linenum, args[1], grp->conf.file, grp->conf.line);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ }
+
+ if ((curgrp = calloc(1, sizeof(*curgrp))) == NULL) {
+ ha_alert("parsing [%s:%d] : out of memory.\n", file, linenum);
+ err_code |= ERR_ALERT | ERR_ABORT;
+ goto out;
+ }
+
+ curgrp->id = strdup(args[1]);
+ curgrp->conf.file = strdup(file);
+ curgrp->conf.line = linenum;
+ LIST_INIT(&curgrp->phs);
+ LIST_INIT(&curgrp->messages);
+ LIST_APPEND(&curgrps, &curgrp->list);
+ }
+ else if (strcmp(args[0], "messages") == 0) {
+ int cur_arg = 1;
+ while (*args[cur_arg]) {
+ struct spoe_placeholder *ph = NULL;
+
+ list_for_each_entry(ph, &curgrp->phs, list) {
+ if (strcmp(ph->id, args[cur_arg]) == 0) {
+ ha_alert("parsing [%s:%d]: spoe-message '%s' already used.\n",
+ file, linenum, args[cur_arg]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ }
+
+ if ((ph = calloc(1, sizeof(*ph))) == NULL) {
+ ha_alert("parsing [%s:%d] : out of memory.\n", file, linenum);
+ err_code |= ERR_ALERT | ERR_ABORT;
+ goto out;
+ }
+ ph->id = strdup(args[cur_arg]);
+ LIST_APPEND(&curgrp->phs, &ph->list);
+ cur_arg++;
+ }
+ }
+ else if (*args[0]) {
+ ha_alert("parsing [%s:%d] : unknown keyword '%s' in spoe-group section.\n",
+ file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ out:
+ return err_code;
+}
+
+static int
+cfg_parse_spoe_message(const char *file, int linenum, char **args, int kwm)
+{
+ struct spoe_message *msg;
+ struct spoe_arg *arg;
+ const char *err;
+ char *errmsg = NULL;
+ int err_code = 0;
+
+ if ((cfg_scope == NULL && curengine != NULL) ||
+ (cfg_scope != NULL && curengine == NULL) ||
+ (curengine != NULL && cfg_scope != NULL && strcmp(curengine, cfg_scope) != 0))
+ goto out;
+
+ if (strcmp(args[0], "spoe-message") == 0) { /* new spoe-message section */
+ if (!*args[1]) {
+ ha_alert("parsing [%s:%d] : missing name for spoe-message section.\n",
+ file, linenum);
+ err_code |= ERR_ALERT | ERR_ABORT;
+ goto out;
+ }
+ if (alertif_too_many_args(1, file, linenum, args, &err_code)) {
+ err_code |= ERR_ABORT;
+ goto out;
+ }
+
+ err = invalid_char(args[1]);
+ if (err) {
+ ha_alert("parsing [%s:%d] : character '%c' is not permitted in '%s' name '%s'.\n",
+ file, linenum, *err, args[0], args[1]);
+ err_code |= ERR_ALERT | ERR_ABORT;
+ goto out;
+ }
+
+ list_for_each_entry(msg, &curmsgs, list) {
+ if (strcmp(msg->id, args[1]) == 0) {
+ ha_alert("parsing [%s:%d]: spoe-message section '%s' has the same"
+ " name as another one declared at %s:%d.\n",
+ file, linenum, args[1], msg->conf.file, msg->conf.line);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ }
+
+ if ((curmsg = calloc(1, sizeof(*curmsg))) == NULL) {
+ ha_alert("parsing [%s:%d] : out of memory.\n", file, linenum);
+ err_code |= ERR_ALERT | ERR_ABORT;
+ goto out;
+ }
+
+ curmsg->id = strdup(args[1]);
+ curmsg->id_len = strlen(curmsg->id);
+ curmsg->event = SPOE_EV_NONE;
+ curmsg->conf.file = strdup(file);
+ curmsg->conf.line = linenum;
+ curmsg->nargs = 0;
+ LIST_INIT(&curmsg->args);
+ LIST_INIT(&curmsg->acls);
+ LIST_INIT(&curmsg->by_evt);
+ LIST_INIT(&curmsg->by_grp);
+ LIST_APPEND(&curmsgs, &curmsg->list);
+ }
+ else if (strcmp(args[0], "args") == 0) {
+ int cur_arg = 1;
+
+ curproxy->conf.args.ctx = ARGC_SPOE;
+ curproxy->conf.args.file = file;
+ curproxy->conf.args.line = linenum;
+ while (*args[cur_arg]) {
+ char *delim = strchr(args[cur_arg], '=');
+ int idx = 0;
+
+ if ((arg = calloc(1, sizeof(*arg))) == NULL) {
+ ha_alert("parsing [%s:%d] : out of memory.\n", file, linenum);
+ err_code |= ERR_ALERT | ERR_ABORT;
+ goto out;
+ }
+
+ if (!delim) {
+ arg->name = NULL;
+ arg->name_len = 0;
+ delim = args[cur_arg];
+ }
+ else {
+ arg->name = my_strndup(args[cur_arg], delim - args[cur_arg]);
+ arg->name_len = delim - args[cur_arg];
+ delim++;
+ }
+ arg->expr = sample_parse_expr((char*[]){delim, NULL},
+ &idx, file, linenum, &errmsg,
+ &curproxy->conf.args, NULL);
+ if (arg->expr == NULL) {
+ ha_alert("parsing [%s:%d] : '%s': %s.\n", file, linenum, args[0], errmsg);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ free(arg->name);
+ free(arg);
+ goto out;
+ }
+ curmsg->nargs++;
+ LIST_APPEND(&curmsg->args, &arg->list);
+ cur_arg++;
+ }
+ curproxy->conf.args.file = NULL;
+ curproxy->conf.args.line = 0;
+ }
+ else if (strcmp(args[0], "acl") == 0) {
+ err = invalid_char(args[1]);
+ if (err) {
+ ha_alert("parsing [%s:%d] : character '%c' is not permitted in acl name '%s'.\n",
+ file, linenum, *err, args[1]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ if (strcasecmp(args[1], "or") == 0) {
+ ha_alert("parsing [%s:%d] : acl name '%s' will never match. 'or' is used to express a "
+ "logical disjunction within a condition.\n",
+ file, linenum, args[1]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ if (parse_acl((const char **)args + 1, &curmsg->acls, &errmsg, &curproxy->conf.args, file, linenum) == NULL) {
+ ha_alert("parsing [%s:%d] : error detected while parsing ACL '%s' : %s.\n",
+ file, linenum, args[1], errmsg);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ }
+ else if (strcmp(args[0], "event") == 0) {
+ if (!*args[1]) {
+ ha_alert("parsing [%s:%d] : missing event name.\n", file, linenum);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ /* if (alertif_too_many_args(1, file, linenum, args, &err_code)) */
+ /* goto out; */
+
+ if (strcmp(args[1], spoe_event_str[SPOE_EV_ON_CLIENT_SESS]) == 0)
+ curmsg->event = SPOE_EV_ON_CLIENT_SESS;
+ else if (strcmp(args[1], spoe_event_str[SPOE_EV_ON_SERVER_SESS]) == 0)
+ curmsg->event = SPOE_EV_ON_SERVER_SESS;
+
+ else if (strcmp(args[1], spoe_event_str[SPOE_EV_ON_TCP_REQ_FE]) == 0)
+ curmsg->event = SPOE_EV_ON_TCP_REQ_FE;
+ else if (strcmp(args[1], spoe_event_str[SPOE_EV_ON_TCP_REQ_BE]) == 0)
+ curmsg->event = SPOE_EV_ON_TCP_REQ_BE;
+ else if (strcmp(args[1], spoe_event_str[SPOE_EV_ON_TCP_RSP]) == 0)
+ curmsg->event = SPOE_EV_ON_TCP_RSP;
+
+ else if (strcmp(args[1], spoe_event_str[SPOE_EV_ON_HTTP_REQ_FE]) == 0)
+ curmsg->event = SPOE_EV_ON_HTTP_REQ_FE;
+ else if (strcmp(args[1], spoe_event_str[SPOE_EV_ON_HTTP_REQ_BE]) == 0)
+ curmsg->event = SPOE_EV_ON_HTTP_REQ_BE;
+ else if (strcmp(args[1], spoe_event_str[SPOE_EV_ON_HTTP_RSP]) == 0)
+ curmsg->event = SPOE_EV_ON_HTTP_RSP;
+ else {
+ ha_alert("parsing [%s:%d] : unknown event '%s'.\n",
+ file, linenum, args[1]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+
+ if (strcmp(args[2], "if") == 0 || strcmp(args[2], "unless") == 0) {
+ struct acl_cond *cond;
+
+ cond = build_acl_cond(file, linenum, &curmsg->acls,
+ curproxy, (const char **)args+2,
+ &errmsg);
+ if (cond == NULL) {
+ ha_alert("parsing [%s:%d] : error detected while "
+ "parsing an 'event %s' condition : %s.\n",
+ file, linenum, args[1], errmsg);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ curmsg->cond = cond;
+ }
+ else if (*args[2]) {
+ ha_alert("parsing [%s:%d]: 'event %s' expects either 'if' "
+ "or 'unless' followed by a condition but found '%s'.\n",
+ file, linenum, args[1], args[2]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ }
+ else if (!*args[0]) {
+ ha_alert("parsing [%s:%d] : unknown keyword '%s' in spoe-message section.\n",
+ file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ out:
+ free(errmsg);
+ return err_code;
+}
+
+/* Return -1 on error, else 0 */
+static int
+parse_spoe_flt(char **args, int *cur_arg, struct proxy *px,
+ struct flt_conf *fconf, char **err, void *private)
+{
+ struct list backup_sections;
+ struct spoe_config *conf;
+ struct spoe_message *msg, *msgback;
+ struct spoe_group *grp, *grpback;
+ struct spoe_placeholder *ph, *phback;
+ struct spoe_var_placeholder *vph, *vphback;
+ struct logger *logger, *loggerback;
+ char *file = NULL, *engine = NULL;
+ int ret, pos = *cur_arg + 1;
+
+ LIST_INIT(&curmsgs);
+ LIST_INIT(&curgrps);
+ LIST_INIT(&curmphs);
+ LIST_INIT(&curgphs);
+ LIST_INIT(&curvars);
+ LIST_INIT(&curloggers);
+ curpxopts = 0;
+ curpxopts2 = 0;
+
+ conf = calloc(1, sizeof(*conf));
+ if (conf == NULL) {
+ memprintf(err, "%s: out of memory", args[*cur_arg]);
+ goto error;
+ }
+ conf->proxy = px;
+
+ while (*args[pos]) {
+ if (strcmp(args[pos], "config") == 0) {
+ if (!*args[pos+1]) {
+ memprintf(err, "'%s' : '%s' option without value",
+ args[*cur_arg], args[pos]);
+ goto error;
+ }
+ file = args[pos+1];
+ pos += 2;
+ }
+ else if (strcmp(args[pos], "engine") == 0) {
+ if (!*args[pos+1]) {
+ memprintf(err, "'%s' : '%s' option without value",
+ args[*cur_arg], args[pos]);
+ goto error;
+ }
+ engine = args[pos+1];
+ pos += 2;
+ }
+ else {
+ memprintf(err, "unknown keyword '%s'", args[pos]);
+ goto error;
+ }
+ }
+ if (file == NULL) {
+ memprintf(err, "'%s' : missing config file", args[*cur_arg]);
+ goto error;
+ }
+
+ /* backup sections and register SPOE sections */
+ LIST_INIT(&backup_sections);
+ cfg_backup_sections(&backup_sections);
+ cfg_register_section("spoe-agent", cfg_parse_spoe_agent, NULL);
+ cfg_register_section("spoe-group", cfg_parse_spoe_group, NULL);
+ cfg_register_section("spoe-message", cfg_parse_spoe_message, NULL);
+
+ /* Parse SPOE filter configuration file */
+ BUG_ON(px != curproxy);
+ curengine = engine;
+ curagent = NULL;
+ curmsg = NULL;
+ ret = readcfgfile(file);
+
+ /* unregister SPOE sections and restore previous sections */
+ cfg_unregister_sections();
+ cfg_restore_sections(&backup_sections);
+
+ if (ret == -1) {
+ memprintf(err, "Could not open configuration file %s : %s",
+ file, strerror(errno));
+ goto error;
+ }
+ if (ret & (ERR_ABORT|ERR_FATAL)) {
+ memprintf(err, "Error(s) found in configuration file %s", file);
+ goto error;
+ }
+
+ /* Check SPOE agent */
+ if (curagent == NULL) {
+ memprintf(err, "No SPOE agent found in file %s", file);
+ goto error;
+ }
+ if (curagent->b.name == NULL) {
+ memprintf(err, "No backend declared for SPOE agent '%s' declared at %s:%d",
+ curagent->id, curagent->conf.file, curagent->conf.line);
+ goto error;
+ }
+ if (curagent->timeout.hello == TICK_ETERNITY ||
+ curagent->timeout.idle == TICK_ETERNITY ||
+ curagent->timeout.processing == TICK_ETERNITY) {
+ ha_warning("Proxy '%s': missing timeouts for SPOE agent '%s' declare at %s:%d.\n"
+ " | While not properly invalid, you will certainly encounter various problems\n"
+ " | with such a configuration. To fix this, please ensure that all following\n"
+ " | timeouts are set to a non-zero value: 'hello', 'idle', 'processing'.\n",
+ px->id, curagent->id, curagent->conf.file, curagent->conf.line);
+ }
+ if (curagent->var_pfx == NULL) {
+ char *tmp = curagent->id;
+
+ while (*tmp) {
+ if (!isalnum((unsigned char)*tmp) && *tmp != '_' && *tmp != '.') {
+ memprintf(err, "Invalid variable prefix '%s' for SPOE agent '%s' declared at %s:%d. "
+ "Use 'option var-prefix' to set it. Only [a-zA-Z0-9_.] chars are supported.\n",
+ curagent->id, curagent->id, curagent->conf.file, curagent->conf.line);
+ goto error;
+ }
+ tmp++;
+ }
+ curagent->var_pfx = strdup(curagent->id);
+ }
+
+ if (curagent->var_on_error) {
+ struct arg arg;
+
+ trash.data = snprintf(trash.area, trash.size, "txn.%s.%s",
+ curagent->var_pfx, curagent->var_on_error);
+
+ arg.type = ARGT_STR;
+ arg.data.str.area = trash.area;
+ arg.data.str.data = trash.data;
+ arg.data.str.size = 0; /* Set it to 0 to not release it in vars_check_arg() */
+ if (!vars_check_arg(&arg, err)) {
+ memprintf(err, "SPOE agent '%s': failed to register variable %s.%s (%s)",
+ curagent->id, curagent->var_pfx, curagent->var_on_error, *err);
+ goto error;
+ }
+ }
+
+ if (curagent->var_t_process) {
+ struct arg arg;
+
+ trash.data = snprintf(trash.area, trash.size, "txn.%s.%s",
+ curagent->var_pfx, curagent->var_t_process);
+
+ arg.type = ARGT_STR;
+ arg.data.str.area = trash.area;
+ arg.data.str.data = trash.data;
+ arg.data.str.size = 0; /* Set it to 0 to not release it in vars_check_arg() */
+ if (!vars_check_arg(&arg, err)) {
+ memprintf(err, "SPOE agent '%s': failed to register variable %s.%s (%s)",
+ curagent->id, curagent->var_pfx, curagent->var_t_process, *err);
+ goto error;
+ }
+ }
+
+ if (curagent->var_t_total) {
+ struct arg arg;
+
+ trash.data = snprintf(trash.area, trash.size, "txn.%s.%s",
+ curagent->var_pfx, curagent->var_t_total);
+
+ arg.type = ARGT_STR;
+ arg.data.str.area = trash.area;
+ arg.data.str.data = trash.data;
+ arg.data.str.size = 0; /* Set it to 0 to not release it in vars_check_arg() */
+ if (!vars_check_arg(&arg, err)) {
+ memprintf(err, "SPOE agent '%s': failed to register variable %s.%s (%s)",
+ curagent->id, curagent->var_pfx, curagent->var_t_process, *err);
+ goto error;
+ }
+ }
+
+ if (LIST_ISEMPTY(&curmphs) && LIST_ISEMPTY(&curgphs)) {
+ ha_warning("Proxy '%s': No message/group used by SPOE agent '%s' declared at %s:%d.\n",
+ px->id, curagent->id, curagent->conf.file, curagent->conf.line);
+ goto finish;
+ }
+
+ /* Replace placeholders by the corresponding messages for the SPOE
+ * agent */
+ list_for_each_entry(ph, &curmphs, list) {
+ list_for_each_entry(msg, &curmsgs, list) {
+ struct spoe_arg *arg;
+ unsigned int where;
+
+ if (strcmp(msg->id, ph->id) == 0) {
+ if ((px->cap & (PR_CAP_FE|PR_CAP_BE)) == (PR_CAP_FE|PR_CAP_BE)) {
+ if (msg->event == SPOE_EV_ON_TCP_REQ_BE)
+ msg->event = SPOE_EV_ON_TCP_REQ_FE;
+ if (msg->event == SPOE_EV_ON_HTTP_REQ_BE)
+ msg->event = SPOE_EV_ON_HTTP_REQ_FE;
+ }
+ if (!(px->cap & PR_CAP_FE) && (msg->event == SPOE_EV_ON_CLIENT_SESS ||
+ msg->event == SPOE_EV_ON_TCP_REQ_FE ||
+ msg->event == SPOE_EV_ON_HTTP_REQ_FE)) {
+ ha_warning("Proxy '%s': frontend event used on a backend proxy at %s:%d.\n",
+ px->id, msg->conf.file, msg->conf.line);
+ goto next_mph;
+ }
+ if (msg->event == SPOE_EV_NONE) {
+ ha_warning("Proxy '%s': Ignore SPOE message '%s' without event at %s:%d.\n",
+ px->id, msg->id, msg->conf.file, msg->conf.line);
+ goto next_mph;
+ }
+
+ where = 0;
+ switch (msg->event) {
+ case SPOE_EV_ON_CLIENT_SESS:
+ where |= SMP_VAL_FE_CON_ACC;
+ break;
+
+ case SPOE_EV_ON_TCP_REQ_FE:
+ where |= SMP_VAL_FE_REQ_CNT;
+ break;
+
+ case SPOE_EV_ON_HTTP_REQ_FE:
+ where |= SMP_VAL_FE_HRQ_HDR;
+ break;
+
+ case SPOE_EV_ON_TCP_REQ_BE:
+ if (px->cap & PR_CAP_FE)
+ where |= SMP_VAL_FE_REQ_CNT;
+ if (px->cap & PR_CAP_BE)
+ where |= SMP_VAL_BE_REQ_CNT;
+ break;
+
+ case SPOE_EV_ON_HTTP_REQ_BE:
+ if (px->cap & PR_CAP_FE)
+ where |= SMP_VAL_FE_HRQ_HDR;
+ if (px->cap & PR_CAP_BE)
+ where |= SMP_VAL_BE_HRQ_HDR;
+ break;
+
+ case SPOE_EV_ON_SERVER_SESS:
+ where |= SMP_VAL_BE_SRV_CON;
+ break;
+
+ case SPOE_EV_ON_TCP_RSP:
+ if (px->cap & PR_CAP_FE)
+ where |= SMP_VAL_FE_RES_CNT;
+ if (px->cap & PR_CAP_BE)
+ where |= SMP_VAL_BE_RES_CNT;
+ break;
+
+ case SPOE_EV_ON_HTTP_RSP:
+ if (px->cap & PR_CAP_FE)
+ where |= SMP_VAL_FE_HRS_HDR;
+ if (px->cap & PR_CAP_BE)
+ where |= SMP_VAL_BE_HRS_HDR;
+ break;
+
+ default:
+ break;
+ }
+
+ list_for_each_entry(arg, &msg->args, list) {
+ if (!(arg->expr->fetch->val & where)) {
+ memprintf(err, "Ignore SPOE message '%s' at %s:%d: "
+ "some args extract information from '%s', "
+ "none of which is available here ('%s')",
+ msg->id, msg->conf.file, msg->conf.line,
+ sample_ckp_names(arg->expr->fetch->use),
+ sample_ckp_names(where));
+ goto error;
+ }
+ }
+
+ msg->agent = curagent;
+ LIST_APPEND(&curagent->events[msg->event], &msg->by_evt);
+ goto next_mph;
+ }
+ }
+ memprintf(err, "SPOE agent '%s' try to use undefined SPOE message '%s' at %s:%d",
+ curagent->id, ph->id, curagent->conf.file, curagent->conf.line);
+ goto error;
+ next_mph:
+ continue;
+ }
+
+ /* Replace placeholders by the corresponding groups for the SPOE
+ * agent */
+ list_for_each_entry(ph, &curgphs, list) {
+ list_for_each_entry_safe(grp, grpback, &curgrps, list) {
+ if (strcmp(grp->id, ph->id) == 0) {
+ grp->agent = curagent;
+ LIST_DELETE(&grp->list);
+ LIST_APPEND(&curagent->groups, &grp->list);
+ goto next_aph;
+ }
+ }
+ memprintf(err, "SPOE agent '%s' try to use undefined SPOE group '%s' at %s:%d",
+ curagent->id, ph->id, curagent->conf.file, curagent->conf.line);
+ goto error;
+ next_aph:
+ continue;
+ }
+
+ /* Replace placeholders by the corresponding message for each SPOE
+ * group of the SPOE agent */
+ list_for_each_entry(grp, &curagent->groups, list) {
+ list_for_each_entry_safe(ph, phback, &grp->phs, list) {
+ list_for_each_entry(msg, &curmsgs, list) {
+ if (strcmp(msg->id, ph->id) == 0) {
+ if (msg->group != NULL) {
+ memprintf(err, "SPOE message '%s' already belongs to "
+ "the SPOE group '%s' declare at %s:%d",
+ msg->id, msg->group->id,
+ msg->group->conf.file,
+ msg->group->conf.line);
+ goto error;
+ }
+
+ /* Scope for arguments are not checked for now. We will check
+ * them only if a rule use the corresponding SPOE group. */
+ msg->agent = curagent;
+ msg->group = grp;
+ LIST_DELETE(&ph->list);
+ LIST_APPEND(&grp->messages, &msg->by_grp);
+ goto next_mph_grp;
+ }
+ }
+ memprintf(err, "SPOE group '%s' try to use undefined SPOE message '%s' at %s:%d",
+ grp->id, ph->id, curagent->conf.file, curagent->conf.line);
+ goto error;
+ next_mph_grp:
+ continue;
+ }
+ }
+
+ finish:
+ /* move curmsgs to the agent message list */
+ curmsgs.n->p = &curagent->messages;
+ curmsgs.p->n = &curagent->messages;
+ curagent->messages = curmsgs;
+ LIST_INIT(&curmsgs);
+
+ conf->id = strdup(engine ? engine : curagent->id);
+ conf->agent = curagent;
+ curagent->spoe_conf = conf;
+
+ /* Start agent's proxy initialization here. It will be finished during
+ * the filter init. */
+ memset(&conf->agent_fe, 0, sizeof(conf->agent_fe));
+ init_new_proxy(&conf->agent_fe);
+ conf->agent_fe.id = conf->agent->id;
+ conf->agent_fe.parent = conf->agent;
+ conf->agent_fe.options |= curpxopts;
+ conf->agent_fe.options2 |= curpxopts2;
+
+ list_for_each_entry_safe(logger, loggerback, &curloggers, list) {
+ LIST_DELETE(&logger->list);
+ LIST_APPEND(&conf->agent_fe.loggers, &logger->list);
+ }
+
+ list_for_each_entry_safe(ph, phback, &curmphs, list) {
+ LIST_DELETE(&ph->list);
+ spoe_release_placeholder(ph);
+ }
+ list_for_each_entry_safe(ph, phback, &curgphs, list) {
+ LIST_DELETE(&ph->list);
+ spoe_release_placeholder(ph);
+ }
+ list_for_each_entry_safe(vph, vphback, &curvars, list) {
+ struct arg arg;
+
+ trash.data = snprintf(trash.area, trash.size, "proc.%s.%s",
+ curagent->var_pfx, vph->name);
+
+ arg.type = ARGT_STR;
+ arg.data.str.area = trash.area;
+ arg.data.str.data = trash.data;
+ arg.data.str.size = 0; /* Set it to 0 to not release it in vars_check_arg() */
+ if (!vars_check_arg(&arg, err)) {
+ memprintf(err, "SPOE agent '%s': failed to register variable %s.%s (%s)",
+ curagent->id, curagent->var_pfx, vph->name, *err);
+ goto error;
+ }
+
+ LIST_DELETE(&vph->list);
+ free(vph->name);
+ free(vph);
+ }
+ list_for_each_entry_safe(grp, grpback, &curgrps, list) {
+ LIST_DELETE(&grp->list);
+ spoe_release_group(grp);
+ }
+ *cur_arg = pos;
+ fconf->id = spoe_filter_id;
+ fconf->ops = &spoe_ops;
+ fconf->conf = conf;
+ return 0;
+
+ error:
+ spoe_release_agent(curagent);
+ list_for_each_entry_safe(ph, phback, &curmphs, list) {
+ LIST_DELETE(&ph->list);
+ spoe_release_placeholder(ph);
+ }
+ list_for_each_entry_safe(ph, phback, &curgphs, list) {
+ LIST_DELETE(&ph->list);
+ spoe_release_placeholder(ph);
+ }
+ list_for_each_entry_safe(vph, vphback, &curvars, list) {
+ LIST_DELETE(&vph->list);
+ free(vph->name);
+ free(vph);
+ }
+ list_for_each_entry_safe(grp, grpback, &curgrps, list) {
+ LIST_DELETE(&grp->list);
+ spoe_release_group(grp);
+ }
+ list_for_each_entry_safe(msg, msgback, &curmsgs, list) {
+ LIST_DELETE(&msg->list);
+ spoe_release_message(msg);
+ }
+ list_for_each_entry_safe(logger, loggerback, &curloggers, list) {
+ LIST_DELETE(&logger->list);
+ free(logger);
+ }
+ free(conf);
+ return -1;
+}
+
+/* Send message of a SPOE group. This is the action_ptr callback of a rule
+ * associated to a "send-spoe-group" action.
+ *
+ * It returns ACT_RET_CONT if processing is finished (with error or not), it returns
+ * ACT_RET_YIELD if the action is in progress. */
+static enum act_return
+spoe_send_group(struct act_rule *rule, struct proxy *px,
+ struct session *sess, struct stream *s, int flags)
+{
+ struct filter *filter;
+ struct spoe_agent *agent = NULL;
+ struct spoe_group *group = NULL;
+ struct spoe_context *ctx = NULL;
+ int ret, dir;
+
+ list_for_each_entry(filter, &s->strm_flt.filters, list) {
+ if (filter->config == rule->arg.act.p[0]) {
+ agent = rule->arg.act.p[2];
+ group = rule->arg.act.p[3];
+ ctx = filter->ctx;
+ break;
+ }
+ }
+ if (agent == NULL || group == NULL || ctx == NULL)
+ return ACT_RET_CONT;
+ if (ctx->state == SPOE_CTX_ST_NONE)
+ return ACT_RET_CONT;
+
+ switch (rule->from) {
+ case ACT_F_TCP_REQ_SES: dir = SMP_OPT_DIR_REQ; break;
+ case ACT_F_TCP_REQ_CNT: dir = SMP_OPT_DIR_REQ; break;
+ case ACT_F_TCP_RES_CNT: dir = SMP_OPT_DIR_RES; break;
+ case ACT_F_HTTP_REQ: dir = SMP_OPT_DIR_REQ; break;
+ case ACT_F_HTTP_RES: dir = SMP_OPT_DIR_RES; break;
+ default:
+ SPOE_PRINTF(stderr, "%d.%06d [SPOE/%-15s] %s: stream=%p"
+ " - internal error while execute spoe-send-group\n",
+ (int)date.tv_sec, (int)date.tv_usec, agent->id,
+ __FUNCTION__, s);
+ send_log(px, LOG_ERR, "SPOE: [%s] internal error while execute spoe-send-group\n",
+ agent->id);
+ return ACT_RET_CONT;
+ }
+
+ ret = spoe_process_group(s, ctx, group, dir);
+ if (ret == 1)
+ return ACT_RET_CONT;
+ else if (ret == 0) {
+ if (flags & ACT_OPT_FINAL) {
+ SPOE_PRINTF(stderr, "%d.%06d [SPOE/%-15s] %s: stream=%p"
+ " - failed to process group '%s': interrupted by caller\n",
+ (int)date.tv_sec, (int)date.tv_usec,
+ agent->id, __FUNCTION__, s, group->id);
+ ctx->status_code = SPOE_CTX_ERR_INTERRUPT;
+ spoe_stop_processing(agent, ctx);
+ spoe_handle_processing_error(s, agent, ctx, dir);
+ return ACT_RET_CONT;
+ }
+ return ACT_RET_YIELD;
+ }
+ else
+ return ACT_RET_CONT;
+}
+
+/* Check an "send-spoe-group" action. Here, we'll try to find the real SPOE
+ * group associated to <rule>. The format of an rule using 'send-spoe-group'
+ * action should be:
+ *
+ * (http|tcp)-(request|response) send-spoe-group <engine-id> <group-id>
+ *
+ * So, we'll loop on each configured SPOE filter for the proxy <px> to find the
+ * SPOE engine matching <engine-id>. And then, we'll try to find the good group
+ * matching <group-id>. Finally, we'll check all messages referenced by the SPOE
+ * group.
+ *
+ * The function returns 1 in success case, otherwise, it returns 0 and err is
+ * filled.
+ */
+static int
+check_send_spoe_group(struct act_rule *rule, struct proxy *px, char **err)
+{
+ struct flt_conf *fconf;
+ struct spoe_config *conf;
+ struct spoe_agent *agent = NULL;
+ struct spoe_group *group;
+ struct spoe_message *msg;
+ char *engine_id = rule->arg.act.p[0];
+ char *group_id = rule->arg.act.p[1];
+ unsigned int where = 0;
+
+ switch (rule->from) {
+ case ACT_F_TCP_REQ_SES: where = SMP_VAL_FE_SES_ACC; break;
+ case ACT_F_TCP_REQ_CNT: where = SMP_VAL_FE_REQ_CNT; break;
+ case ACT_F_TCP_RES_CNT: where = SMP_VAL_BE_RES_CNT; break;
+ case ACT_F_HTTP_REQ: where = SMP_VAL_FE_HRQ_HDR; break;
+ case ACT_F_HTTP_RES: where = SMP_VAL_BE_HRS_HDR; break;
+ default:
+ memprintf(err,
+ "internal error, unexpected rule->from=%d, please report this bug!",
+ rule->from);
+ goto error;
+ }
+
+ /* Try to find the SPOE engine by checking all SPOE filters for proxy
+ * <px> */
+ list_for_each_entry(fconf, &px->filter_configs, list) {
+ conf = fconf->conf;
+
+ /* This is not an SPOE filter */
+ if (fconf->id != spoe_filter_id)
+ continue;
+
+ /* This is the good engine */
+ if (strcmp(conf->id, engine_id) == 0) {
+ agent = conf->agent;
+ break;
+ }
+ }
+ if (agent == NULL) {
+ memprintf(err, "unable to find SPOE engine '%s' used by the send-spoe-group '%s'",
+ engine_id, group_id);
+ goto error;
+ }
+
+ /* Try to find the right group */
+ list_for_each_entry(group, &agent->groups, list) {
+ /* This is the good group */
+ if (strcmp(group->id, group_id) == 0)
+ break;
+ }
+ if (&group->list == &agent->groups) {
+ memprintf(err, "unable to find SPOE group '%s' into SPOE engine '%s' configuration",
+ group_id, engine_id);
+ goto error;
+ }
+
+ /* Ok, we found the group, we need to check messages and their
+ * arguments */
+ list_for_each_entry(msg, &group->messages, by_grp) {
+ struct spoe_arg *arg;
+
+ list_for_each_entry(arg, &msg->args, list) {
+ if (!(arg->expr->fetch->val & where)) {
+ memprintf(err, "Invalid SPOE message '%s' used by SPOE group '%s' at %s:%d: "
+ "some args extract information from '%s',"
+ "none of which is available here ('%s')",
+ msg->id, group->id, msg->conf.file, msg->conf.line,
+ sample_ckp_names(arg->expr->fetch->use),
+ sample_ckp_names(where));
+ goto error;
+ }
+ }
+ }
+
+ free(engine_id);
+ free(group_id);
+ rule->arg.act.p[0] = fconf; /* Associate filter config with the rule */
+ rule->arg.act.p[1] = conf; /* Associate SPOE config with the rule */
+ rule->arg.act.p[2] = agent; /* Associate SPOE agent with the rule */
+ rule->arg.act.p[3] = group; /* Associate SPOE group with the rule */
+ return 1;
+
+ error:
+ free(engine_id);
+ free(group_id);
+ return 0;
+}
+
+/* Parse 'send-spoe-group' action following the format:
+ *
+ * ... send-spoe-group <engine-id> <group-id>
+ *
+ * It returns ACT_RET_PRS_ERR if fails and <err> is filled with an error
+ * message. Otherwise, it returns ACT_RET_PRS_OK and parsing engine and group
+ * ids are saved and used later, when the rule will be checked.
+ */
+static enum act_parse_ret
+parse_send_spoe_group(const char **args, int *orig_arg, struct proxy *px,
+ struct act_rule *rule, char **err)
+{
+ if (!*args[*orig_arg] || !*args[*orig_arg+1] ||
+ (*args[*orig_arg+2] && strcmp(args[*orig_arg+2], "if") != 0 && strcmp(args[*orig_arg+2], "unless") != 0)) {
+ memprintf(err, "expects 2 arguments: <engine-id> <group-id>");
+ return ACT_RET_PRS_ERR;
+ }
+ rule->arg.act.p[0] = strdup(args[*orig_arg]); /* Copy the SPOE engine id */
+ rule->arg.act.p[1] = strdup(args[*orig_arg+1]); /* Cope the SPOE group id */
+
+ (*orig_arg) += 2;
+
+ rule->action = ACT_CUSTOM;
+ rule->action_ptr = spoe_send_group;
+ rule->check_ptr = check_send_spoe_group;
+ return ACT_RET_PRS_OK;
+}
+
+
+/* Declare the filter parser for "spoe" keyword */
+static struct flt_kw_list flt_kws = { "SPOE", { }, {
+ { "spoe", parse_spoe_flt, NULL },
+ { NULL, NULL, NULL },
+ }
+};
+
+INITCALL1(STG_REGISTER, flt_register_keywords, &flt_kws);
+
+/* Delcate the action parser for "spoe-action" keyword */
+static struct action_kw_list tcp_req_action_kws = { { }, {
+ { "send-spoe-group", parse_send_spoe_group },
+ { /* END */ },
+ }
+};
+
+INITCALL1(STG_REGISTER, tcp_req_cont_keywords_register, &tcp_req_action_kws);
+
+static struct action_kw_list tcp_res_action_kws = { { }, {
+ { "send-spoe-group", parse_send_spoe_group },
+ { /* END */ },
+ }
+};
+
+INITCALL1(STG_REGISTER, tcp_res_cont_keywords_register, &tcp_res_action_kws);
+
+static struct action_kw_list http_req_action_kws = { { }, {
+ { "send-spoe-group", parse_send_spoe_group },
+ { /* END */ },
+ }
+};
+
+INITCALL1(STG_REGISTER, http_req_keywords_register, &http_req_action_kws);
+
+static struct action_kw_list http_res_action_kws = { { }, {
+ { "send-spoe-group", parse_send_spoe_group },
+ { /* END */ },
+ }
+};
+
+INITCALL1(STG_REGISTER, http_res_keywords_register, &http_res_action_kws);
diff --git a/src/flt_trace.c b/src/flt_trace.c
new file mode 100644
index 0000000..bbadfe2
--- /dev/null
+++ b/src/flt_trace.c
@@ -0,0 +1,675 @@
+/*
+ * Stream filters related variables and functions.
+ *
+ * Copyright (C) 2015 Qualys Inc., Christopher Faulet <cfaulet@qualys.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <ctype.h>
+
+#include <haproxy/api.h>
+#include <haproxy/channel-t.h>
+#include <haproxy/errors.h>
+#include <haproxy/filters.h>
+#include <haproxy/global.h>
+#include <haproxy/http_ana-t.h>
+#include <haproxy/http_htx.h>
+#include <haproxy/htx.h>
+#include <haproxy/proxy-t.h>
+#include <haproxy/stream.h>
+#include <haproxy/time.h>
+#include <haproxy/tools.h>
+
+const char *trace_flt_id = "trace filter";
+
+struct flt_ops trace_ops;
+
+#define TRACE_F_QUIET 0x00000001
+#define TRACE_F_RAND_FWD 0x00000002
+#define TRACE_F_HEXDUMP 0x00000004
+
+struct trace_config {
+ struct proxy *proxy;
+ char *name;
+ unsigned int flags;
+};
+
+#define FLT_TRACE(conf, fmt, ...) \
+ do { \
+ if (!(conf->flags & TRACE_F_QUIET)) \
+ fprintf(stderr, "%d.%06d [%-20s] " fmt "\n", \
+ (int)date.tv_sec, (int)date.tv_usec, (conf)->name,\
+ ##__VA_ARGS__); \
+ } while (0)
+
+#define FLT_STRM_TRACE(conf, strm, fmt, ...) \
+ do { \
+ if (!(conf->flags & TRACE_F_QUIET)) \
+ fprintf(stderr, "%d.%06d [%-20s] [strm %p(%x) 0x%08x 0x%08x] " fmt "\n", \
+ (int)date.tv_sec, (int)date.tv_usec, (conf)->name, \
+ strm, (strm ? ((struct stream *)strm)->uniq_id : ~0U), \
+ (strm ? strm->req.analysers : 0), (strm ? strm->res.analysers : 0), \
+ ##__VA_ARGS__); \
+ } while (0)
+
+
+static const char *
+channel_label(const struct channel *chn)
+{
+ return (chn->flags & CF_ISRESP) ? "RESPONSE" : "REQUEST";
+}
+
+static const char *
+proxy_mode(const struct stream *s)
+{
+ struct proxy *px = (s->flags & SF_BE_ASSIGNED ? s->be : strm_fe(s));
+
+ return ((px->mode == PR_MODE_HTTP) ? "HTTP" : "TCP");
+}
+
+static const char *
+stream_pos(const struct stream *s)
+{
+ return (s->flags & SF_BE_ASSIGNED) ? "backend" : "frontend";
+}
+
+static const char *
+filter_type(const struct filter *f)
+{
+ return (f->flags & FLT_FL_IS_BACKEND_FILTER) ? "backend" : "frontend";
+}
+
+static void
+trace_hexdump(struct ist ist)
+{
+ int i, j, padding;
+
+ padding = ((ist.len % 16) ? (16 - ist.len % 16) : 0);
+ for (i = 0; i < ist.len + padding; i++) {
+ if (!(i % 16))
+ fprintf(stderr, "\t0x%06x: ", i);
+ else if (!(i % 8))
+ fprintf(stderr, " ");
+
+ if (i < ist.len)
+ fprintf(stderr, "%02x ", (unsigned char)*(ist.ptr+i));
+ else
+ fprintf(stderr, " ");
+
+ /* print ASCII dump */
+ if (i % 16 == 15) {
+ fprintf(stderr, " |");
+ for(j = i - 15; j <= i && j < ist.len; j++)
+ fprintf(stderr, "%c", (isprint((unsigned char)*(ist.ptr+j)) ? *(ist.ptr+j) : '.'));
+ fprintf(stderr, "|\n");
+ }
+ }
+}
+
+static void
+trace_raw_hexdump(struct buffer *buf, unsigned int offset, unsigned int len)
+{
+ unsigned char p[len];
+ int block1, block2;
+
+ block1 = len;
+ if (block1 > b_contig_data(buf, offset))
+ block1 = b_contig_data(buf, offset);
+ block2 = len - block1;
+
+ memcpy(p, b_peek(buf, offset), block1);
+ memcpy(p+block1, b_orig(buf), block2);
+ trace_hexdump(ist2(p, len));
+}
+
+static void
+trace_htx_hexdump(struct htx *htx, unsigned int offset, unsigned int len)
+{
+ struct htx_blk *blk;
+
+ for (blk = htx_get_first_blk(htx); blk && len; blk = htx_get_next_blk(htx, blk)) {
+ enum htx_blk_type type = htx_get_blk_type(blk);
+ uint32_t sz = htx_get_blksz(blk);
+ struct ist v;
+
+ if (offset >= sz) {
+ offset -= sz;
+ continue;
+ }
+
+ v = htx_get_blk_value(htx, blk);
+ v = istadv(v, offset);
+ offset = 0;
+
+ v = isttrim(v, len);
+ len -= v.len;
+ if (type == HTX_BLK_DATA)
+ trace_hexdump(v);
+ }
+}
+
+static unsigned int
+trace_get_htx_datalen(struct htx *htx, unsigned int offset, unsigned int len)
+{
+ struct htx_blk *blk;
+ struct htx_ret htxret = htx_find_offset(htx, offset);
+ uint32_t data = 0;
+
+ blk = htxret.blk;
+ if (blk && htxret.ret && htx_get_blk_type(blk) == HTX_BLK_DATA) {
+ data += htxret.ret;
+ blk = htx_get_next_blk(htx, blk);
+ }
+ while (blk) {
+ if (htx_get_blk_type(blk) == HTX_BLK_UNUSED)
+ goto next;
+ else if (htx_get_blk_type(blk) != HTX_BLK_DATA)
+ break;
+ data += htx_get_blksz(blk);
+ next:
+ blk = htx_get_next_blk(htx, blk);
+ }
+ return data;
+}
+
+/***************************************************************************
+ * Hooks that manage the filter lifecycle (init/check/deinit)
+ **************************************************************************/
+/* Initialize the filter. Returns -1 on error, else 0. */
+static int
+trace_init(struct proxy *px, struct flt_conf *fconf)
+{
+ struct trace_config *conf = fconf->conf;
+
+ if (conf->name)
+ memprintf(&conf->name, "%s/%s", conf->name, px->id);
+ else
+ memprintf(&conf->name, "TRACE/%s", px->id);
+
+ fconf->flags |= FLT_CFG_FL_HTX;
+ fconf->conf = conf;
+
+ FLT_TRACE(conf, "filter initialized [quiet=%s - fwd random=%s - hexdump=%s]",
+ ((conf->flags & TRACE_F_QUIET) ? "true" : "false"),
+ ((conf->flags & TRACE_F_RAND_FWD) ? "true" : "false"),
+ ((conf->flags & TRACE_F_HEXDUMP) ? "true" : "false"));
+ return 0;
+}
+
+/* Free resources allocated by the trace filter. */
+static void
+trace_deinit(struct proxy *px, struct flt_conf *fconf)
+{
+ struct trace_config *conf = fconf->conf;
+
+ if (conf) {
+ FLT_TRACE(conf, "filter deinitialized");
+ free(conf->name);
+ free(conf);
+ }
+ fconf->conf = NULL;
+}
+
+/* Check configuration of a trace filter for a specified proxy.
+ * Return 1 on error, else 0. */
+static int
+trace_check(struct proxy *px, struct flt_conf *fconf)
+{
+ return 0;
+}
+
+/* Initialize the filter for each thread. Return -1 on error, else 0. */
+static int
+trace_init_per_thread(struct proxy *px, struct flt_conf *fconf)
+{
+ struct trace_config *conf = fconf->conf;
+
+ FLT_TRACE(conf, "filter initialized for thread tid %u", tid);
+ return 0;
+}
+
+/* Free resources allocate by the trace filter for each thread. */
+static void
+trace_deinit_per_thread(struct proxy *px, struct flt_conf *fconf)
+{
+ struct trace_config *conf = fconf->conf;
+
+ if (conf)
+ FLT_TRACE(conf, "filter deinitialized for thread tid %u", tid);
+}
+
+/**************************************************************************
+ * Hooks to handle start/stop of streams
+ *************************************************************************/
+/* Called when a filter instance is created and attach to a stream */
+static int
+trace_attach(struct stream *s, struct filter *filter)
+{
+ struct trace_config *conf = FLT_CONF(filter);
+
+ FLT_STRM_TRACE(conf, s, "%-25s: filter-type=%s",
+ __FUNCTION__, filter_type(filter));
+
+ return 1;
+}
+
+/* Called when a filter instance is detach from a stream, just before its
+ * destruction */
+static void
+trace_detach(struct stream *s, struct filter *filter)
+{
+ struct trace_config *conf = FLT_CONF(filter);
+
+ FLT_STRM_TRACE(conf, s, "%-25s: filter-type=%s",
+ __FUNCTION__, filter_type(filter));
+}
+
+/* Called when a stream is created */
+static int
+trace_stream_start(struct stream *s, struct filter *filter)
+{
+ struct trace_config *conf = FLT_CONF(filter);
+
+ FLT_STRM_TRACE(conf, s, "%-25s",
+ __FUNCTION__);
+ return 0;
+}
+
+
+/* Called when a backend is set for a stream */
+static int
+trace_stream_set_backend(struct stream *s, struct filter *filter,
+ struct proxy *be)
+{
+ struct trace_config *conf = FLT_CONF(filter);
+
+ FLT_STRM_TRACE(conf, s, "%-25s: backend=%s",
+ __FUNCTION__, be->id);
+ return 0;
+}
+
+/* Called when a stream is destroyed */
+static void
+trace_stream_stop(struct stream *s, struct filter *filter)
+{
+ struct trace_config *conf = FLT_CONF(filter);
+
+ FLT_STRM_TRACE(conf, s, "%-25s",
+ __FUNCTION__);
+}
+
+/* Called when the stream is woken up because of an expired timer */
+static void
+trace_check_timeouts(struct stream *s, struct filter *filter)
+{
+ struct trace_config *conf = FLT_CONF(filter);
+
+ FLT_STRM_TRACE(conf, s, "%-25s",
+ __FUNCTION__);
+}
+
+/**************************************************************************
+ * Hooks to handle channels activity
+ *************************************************************************/
+/* Called when analyze starts for a given channel */
+static int
+trace_chn_start_analyze(struct stream *s, struct filter *filter,
+ struct channel *chn)
+{
+ struct trace_config *conf = FLT_CONF(filter);
+
+ FLT_STRM_TRACE(conf, s, "%-25s: channel=%-10s - mode=%-5s (%s)",
+ __FUNCTION__,
+ channel_label(chn), proxy_mode(s), stream_pos(s));
+ filter->pre_analyzers |= (AN_REQ_ALL | AN_RES_ALL);
+ filter->post_analyzers |= (AN_REQ_ALL | AN_RES_ALL);
+ register_data_filter(s, chn, filter);
+ return 1;
+}
+
+/* Called before a processing happens on a given channel */
+static int
+trace_chn_analyze(struct stream *s, struct filter *filter,
+ struct channel *chn, unsigned an_bit)
+{
+ struct trace_config *conf = FLT_CONF(filter);
+ char *ana;
+
+ switch (an_bit) {
+ case AN_REQ_INSPECT_FE:
+ ana = "AN_REQ_INSPECT_FE";
+ break;
+ case AN_REQ_WAIT_HTTP:
+ ana = "AN_REQ_WAIT_HTTP";
+ break;
+ case AN_REQ_HTTP_BODY:
+ ana = "AN_REQ_HTTP_BODY";
+ break;
+ case AN_REQ_HTTP_PROCESS_FE:
+ ana = "AN_REQ_HTTP_PROCESS_FE";
+ break;
+ case AN_REQ_SWITCHING_RULES:
+ ana = "AN_REQ_SWITCHING_RULES";
+ break;
+ case AN_REQ_INSPECT_BE:
+ ana = "AN_REQ_INSPECT_BE";
+ break;
+ case AN_REQ_HTTP_PROCESS_BE:
+ ana = "AN_REQ_HTTP_PROCESS_BE";
+ break;
+ case AN_REQ_SRV_RULES:
+ ana = "AN_REQ_SRV_RULES";
+ break;
+ case AN_REQ_HTTP_INNER:
+ ana = "AN_REQ_HTTP_INNER";
+ break;
+ case AN_REQ_HTTP_TARPIT:
+ ana = "AN_REQ_HTTP_TARPIT";
+ break;
+ case AN_REQ_STICKING_RULES:
+ ana = "AN_REQ_STICKING_RULES";
+ break;
+ case AN_REQ_PRST_RDP_COOKIE:
+ ana = "AN_REQ_PRST_RDP_COOKIE";
+ break;
+ case AN_REQ_HTTP_XFER_BODY:
+ ana = "AN_REQ_HTTP_XFER_BODY";
+ break;
+ case AN_RES_INSPECT:
+ ana = "AN_RES_INSPECT";
+ break;
+ case AN_RES_WAIT_HTTP:
+ ana = "AN_RES_WAIT_HTTP";
+ break;
+ case AN_RES_HTTP_PROCESS_FE: // AN_RES_HTTP_PROCESS_BE
+ ana = "AN_RES_HTTP_PROCESS_FE/BE";
+ break;
+ case AN_RES_STORE_RULES:
+ ana = "AN_RES_STORE_RULES";
+ break;
+ case AN_RES_HTTP_XFER_BODY:
+ ana = "AN_RES_HTTP_XFER_BODY";
+ break;
+ default:
+ ana = "unknown";
+ }
+
+ FLT_STRM_TRACE(conf, s, "%-25s: channel=%-10s - mode=%-5s (%s) - "
+ "analyzer=%s - step=%s",
+ __FUNCTION__,
+ channel_label(chn), proxy_mode(s), stream_pos(s),
+ ana, ((chn->analysers & an_bit) ? "PRE" : "POST"));
+ return 1;
+}
+
+/* Called when analyze ends for a given channel */
+static int
+trace_chn_end_analyze(struct stream *s, struct filter *filter,
+ struct channel *chn)
+{
+ struct trace_config *conf = FLT_CONF(filter);
+
+ FLT_STRM_TRACE(conf, s, "%-25s: channel=%-10s - mode=%-5s (%s)",
+ __FUNCTION__,
+ channel_label(chn), proxy_mode(s), stream_pos(s));
+ return 1;
+}
+
+/**************************************************************************
+ * Hooks to filter HTTP messages
+ *************************************************************************/
+static int
+trace_http_headers(struct stream *s, struct filter *filter,
+ struct http_msg *msg)
+{
+ struct trace_config *conf = FLT_CONF(filter);
+ struct htx *htx = htxbuf(&msg->chn->buf);
+ struct htx_sl *sl = http_get_stline(htx);
+ int32_t pos;
+
+ FLT_STRM_TRACE(conf, s, "%-25s: channel=%-10s - mode=%-5s (%s)\t%.*s %.*s %.*s",
+ __FUNCTION__,
+ channel_label(msg->chn), proxy_mode(s), stream_pos(s),
+ HTX_SL_P1_LEN(sl), HTX_SL_P1_PTR(sl),
+ HTX_SL_P2_LEN(sl), HTX_SL_P2_PTR(sl),
+ HTX_SL_P3_LEN(sl), HTX_SL_P3_PTR(sl));
+
+ for (pos = htx_get_first(htx); pos != -1; pos = htx_get_next(htx, pos)) {
+ struct htx_blk *blk = htx_get_blk(htx, pos);
+ enum htx_blk_type type = htx_get_blk_type(blk);
+ struct ist n, v;
+
+ if (type == HTX_BLK_EOH)
+ break;
+ if (type != HTX_BLK_HDR)
+ continue;
+
+ n = htx_get_blk_name(htx, blk);
+ v = htx_get_blk_value(htx, blk);
+ FLT_STRM_TRACE(conf, s, "\t%.*s: %.*s",
+ (int)n.len, n.ptr, (int)v.len, v.ptr);
+ }
+ return 1;
+}
+
+static int
+trace_http_payload(struct stream *s, struct filter *filter, struct http_msg *msg,
+ unsigned int offset, unsigned int len)
+{
+ struct trace_config *conf = FLT_CONF(filter);
+ int ret = len;
+
+ if (ret && (conf->flags & TRACE_F_RAND_FWD)) {
+ unsigned int data = trace_get_htx_datalen(htxbuf(&msg->chn->buf), offset, len);
+
+ if (data) {
+ ret = ha_random() % (ret+1);
+ if (!ret || ret >= data)
+ ret = len;
+ }
+ }
+
+ FLT_STRM_TRACE(conf, s, "%-25s: channel=%-10s - mode=%-5s (%s) - "
+ "offset=%u - len=%u - forward=%d",
+ __FUNCTION__,
+ channel_label(msg->chn), proxy_mode(s), stream_pos(s),
+ offset, len, ret);
+
+ if (conf->flags & TRACE_F_HEXDUMP)
+ trace_htx_hexdump(htxbuf(&msg->chn->buf), offset, ret);
+
+ if (ret != len)
+ task_wakeup(s->task, TASK_WOKEN_MSG);
+ return ret;
+}
+
+static int
+trace_http_end(struct stream *s, struct filter *filter,
+ struct http_msg *msg)
+{
+ struct trace_config *conf = FLT_CONF(filter);
+
+ FLT_STRM_TRACE(conf, s, "%-25s: channel=%-10s - mode=%-5s (%s)",
+ __FUNCTION__,
+ channel_label(msg->chn), proxy_mode(s), stream_pos(s));
+ return 1;
+}
+
+static void
+trace_http_reset(struct stream *s, struct filter *filter,
+ struct http_msg *msg)
+{
+ struct trace_config *conf = FLT_CONF(filter);
+
+ FLT_STRM_TRACE(conf, s, "%-25s: channel=%-10s - mode=%-5s (%s)",
+ __FUNCTION__,
+ channel_label(msg->chn), proxy_mode(s), stream_pos(s));
+}
+
+static void
+trace_http_reply(struct stream *s, struct filter *filter, short status,
+ const struct buffer *msg)
+{
+ struct trace_config *conf = FLT_CONF(filter);
+
+ FLT_STRM_TRACE(conf, s, "%-25s: channel=%-10s - mode=%-5s (%s)",
+ __FUNCTION__, "-", proxy_mode(s), stream_pos(s));
+}
+
+/**************************************************************************
+ * Hooks to filter TCP data
+ *************************************************************************/
+static int
+trace_tcp_payload(struct stream *s, struct filter *filter, struct channel *chn,
+ unsigned int offset, unsigned int len)
+{
+ struct trace_config *conf = FLT_CONF(filter);
+ int ret = len;
+
+ if (s->flags & SF_HTX) {
+ if (ret && (conf->flags & TRACE_F_RAND_FWD)) {
+ unsigned int data = trace_get_htx_datalen(htxbuf(&chn->buf), offset, len);
+
+ if (data) {
+ ret = ha_random() % (ret+1);
+ if (!ret || ret >= data)
+ ret = len;
+ }
+ }
+
+ FLT_STRM_TRACE(conf, s, "%-25s: channel=%-10s - mode=%-5s (%s) - "
+ "offset=%u - len=%u - forward=%d",
+ __FUNCTION__,
+ channel_label(chn), proxy_mode(s), stream_pos(s),
+ offset, len, ret);
+
+ if (conf->flags & TRACE_F_HEXDUMP)
+ trace_htx_hexdump(htxbuf(&chn->buf), offset, ret);
+ }
+ else {
+
+ if (ret && (conf->flags & TRACE_F_RAND_FWD))
+ ret = ha_random() % (ret+1);
+
+ FLT_STRM_TRACE(conf, s, "%-25s: channel=%-10s - mode=%-5s (%s) - "
+ "offset=%u - len=%u - forward=%d",
+ __FUNCTION__,
+ channel_label(chn), proxy_mode(s), stream_pos(s),
+ offset, len, ret);
+
+ if (conf->flags & TRACE_F_HEXDUMP)
+ trace_raw_hexdump(&chn->buf, offset, ret);
+ }
+
+ if (ret != len)
+ task_wakeup(s->task, TASK_WOKEN_MSG);
+ return ret;
+}
+/********************************************************************
+ * Functions that manage the filter initialization
+ ********************************************************************/
+struct flt_ops trace_ops = {
+ /* Manage trace filter, called for each filter declaration */
+ .init = trace_init,
+ .deinit = trace_deinit,
+ .check = trace_check,
+ .init_per_thread = trace_init_per_thread,
+ .deinit_per_thread = trace_deinit_per_thread,
+
+ /* Handle start/stop of streams */
+ .attach = trace_attach,
+ .detach = trace_detach,
+ .stream_start = trace_stream_start,
+ .stream_set_backend = trace_stream_set_backend,
+ .stream_stop = trace_stream_stop,
+ .check_timeouts = trace_check_timeouts,
+
+ /* Handle channels activity */
+ .channel_start_analyze = trace_chn_start_analyze,
+ .channel_pre_analyze = trace_chn_analyze,
+ .channel_post_analyze = trace_chn_analyze,
+ .channel_end_analyze = trace_chn_end_analyze,
+
+ /* Filter HTTP requests and responses */
+ .http_headers = trace_http_headers,
+ .http_payload = trace_http_payload,
+ .http_end = trace_http_end,
+ .http_reset = trace_http_reset,
+ .http_reply = trace_http_reply,
+
+ /* Filter TCP data */
+ .tcp_payload = trace_tcp_payload,
+};
+
+/* Return -1 on error, else 0 */
+static int
+parse_trace_flt(char **args, int *cur_arg, struct proxy *px,
+ struct flt_conf *fconf, char **err, void *private)
+{
+ struct trace_config *conf;
+ int pos = *cur_arg;
+
+ conf = calloc(1, sizeof(*conf));
+ if (!conf) {
+ memprintf(err, "%s: out of memory", args[*cur_arg]);
+ return -1;
+ }
+ conf->proxy = px;
+ conf->flags = 0;
+ if (strcmp(args[pos], "trace") == 0) {
+ pos++;
+
+ while (*args[pos]) {
+ if (strcmp(args[pos], "name") == 0) {
+ if (!*args[pos + 1]) {
+ memprintf(err, "'%s' : '%s' option without value",
+ args[*cur_arg], args[pos]);
+ goto error;
+ }
+ conf->name = strdup(args[pos + 1]);
+ if (!conf->name) {
+ memprintf(err, "%s: out of memory", args[*cur_arg]);
+ goto error;
+ }
+ pos++;
+ }
+ else if (strcmp(args[pos], "quiet") == 0)
+ conf->flags |= TRACE_F_QUIET;
+ else if (strcmp(args[pos], "random-parsing") == 0)
+ ; // ignore
+ else if (strcmp(args[pos], "random-forwarding") == 0)
+ conf->flags |= TRACE_F_RAND_FWD;
+ else if (strcmp(args[pos], "hexdump") == 0)
+ conf->flags |= TRACE_F_HEXDUMP;
+ else
+ break;
+ pos++;
+ }
+ *cur_arg = pos;
+ fconf->id = trace_flt_id;
+ fconf->ops = &trace_ops;
+ }
+
+ fconf->conf = conf;
+ return 0;
+
+ error:
+ if (conf->name)
+ free(conf->name);
+ free(conf);
+ return -1;
+}
+
+/* Declare the filter parser for "trace" keyword */
+static struct flt_kw_list flt_kws = { "TRACE", { }, {
+ { "trace", parse_trace_flt, NULL },
+ { NULL, NULL, NULL },
+ }
+};
+
+INITCALL1(STG_REGISTER, flt_register_keywords, &flt_kws);
diff --git a/src/freq_ctr.c b/src/freq_ctr.c
new file mode 100644
index 0000000..1361333
--- /dev/null
+++ b/src/freq_ctr.c
@@ -0,0 +1,218 @@
+/*
+ * Event rate calculation functions.
+ *
+ * Copyright 2000-2010 Willy Tarreau <w@1wt.eu>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <haproxy/api.h>
+#include <haproxy/freq_ctr.h>
+#include <haproxy/tools.h>
+
+/* Update a frequency counter by <inc> incremental units. It is automatically
+ * rotated if the period is over. It is important that it correctly initializes
+ * a null area. This one works on frequency counters which have a period
+ * different from one second. It relies on the process-wide clock that is
+ * guaranteed to be monotonic. It's important to avoid forced rotates between
+ * threads. A faster wrapper (update_freq_ctr_period) should be used instead,
+ * which uses the thread's local time whenever possible and falls back to this
+ * one when needed (less than 0.003% of the time).
+ */
+uint update_freq_ctr_period_slow(struct freq_ctr *ctr, uint period, uint inc)
+{
+ uint curr_tick;
+ uint32_t now_ms_tmp;
+
+ /* atomically update the counter if still within the period, even if
+ * a rotation is in progress (no big deal).
+ */
+ for (;; __ha_cpu_relax()) {
+ curr_tick = HA_ATOMIC_LOAD(&ctr->curr_tick);
+ now_ms_tmp = HA_ATOMIC_LOAD(&global_now_ms);
+
+ if (now_ms_tmp - curr_tick < period)
+ return HA_ATOMIC_ADD_FETCH(&ctr->curr_ctr, inc);
+
+ /* a rotation is needed. While extremely rare, contention may
+ * happen because it will be triggered on time, and all threads
+ * see the time change simultaneously.
+ */
+ if (!(curr_tick & 1) &&
+ HA_ATOMIC_CAS(&ctr->curr_tick, &curr_tick, curr_tick | 0x1))
+ break;
+ }
+
+ /* atomically switch the new period into the old one without losing any
+ * potential concurrent update. We're the only one performing the rotate
+ * (locked above), others are only adding positive values to curr_ctr.
+ */
+ HA_ATOMIC_STORE(&ctr->prev_ctr, HA_ATOMIC_XCHG(&ctr->curr_ctr, inc));
+ curr_tick += period;
+ if (likely(now_ms_tmp - curr_tick >= period)) {
+ /* we missed at least two periods */
+ HA_ATOMIC_STORE(&ctr->prev_ctr, 0);
+ curr_tick = now_ms_tmp;
+ }
+
+ /* release the lock and update the time in case of rotate. */
+ HA_ATOMIC_STORE(&ctr->curr_tick, curr_tick & ~1);
+ return inc;
+}
+
+/* Returns the total number of events over the current + last period, including
+ * a number of already pending events <pend>. The average frequency will be
+ * obtained by dividing the output by <period>. This is essentially made to
+ * ease implementation of higher-level read functions.
+ *
+ * As a special case, if pend < 0, it's assumed there are no pending
+ * events and a flapping correction must be applied at the end. This is used by
+ * read_freq_ctr_period() to avoid reporting ups and downs on low-frequency
+ * events when the past value is <= 1.
+ */
+ullong freq_ctr_total(const struct freq_ctr *ctr, uint period, int pend)
+{
+ ullong curr, past, old_curr, old_past;
+ uint tick, old_tick;
+ int remain;
+
+ tick = HA_ATOMIC_LOAD(&ctr->curr_tick);
+ curr = HA_ATOMIC_LOAD(&ctr->curr_ctr);
+ past = HA_ATOMIC_LOAD(&ctr->prev_ctr);
+
+ while (1) {
+ if (tick & 0x1) // change in progress
+ goto redo0;
+
+ old_tick = tick;
+ old_curr = curr;
+ old_past = past;
+
+ /* now let's load the values a second time and make sure they
+ * did not change, which will indicate it was a stable reading.
+ */
+
+ tick = HA_ATOMIC_LOAD(&ctr->curr_tick);
+ if (tick & 0x1) // change in progress
+ goto redo0;
+
+ if (tick != old_tick)
+ goto redo1;
+
+ curr = HA_ATOMIC_LOAD(&ctr->curr_ctr);
+ if (curr != old_curr)
+ goto redo2;
+
+ past = HA_ATOMIC_LOAD(&ctr->prev_ctr);
+ if (past != old_past)
+ goto redo3;
+
+ /* all values match between two loads, they're stable, let's
+ * quit now.
+ */
+ break;
+ redo0:
+ tick = HA_ATOMIC_LOAD(&ctr->curr_tick);
+ redo1:
+ curr = HA_ATOMIC_LOAD(&ctr->curr_ctr);
+ redo2:
+ past = HA_ATOMIC_LOAD(&ctr->prev_ctr);
+ redo3:
+ __ha_cpu_relax();
+ };
+
+ remain = tick + period - HA_ATOMIC_LOAD(&global_now_ms);
+ if (unlikely(remain < 0)) {
+ /* We're past the first period, check if we can still report a
+ * part of last period or if we're too far away.
+ */
+ remain += period;
+ past = (remain >= 0) ? curr : 0;
+ curr = 0;
+ }
+
+ if (pend < 0) {
+ /* enable flapping correction at very low rates */
+ pend = 0;
+ if (!curr && past <= 1)
+ return past * period;
+ }
+
+ /* compute the total number of confirmed events over the period */
+ return past * remain + (curr + pend) * period;
+}
+
+/* Returns the excess of events (may be negative) over the current period for
+ * target frequency <freq>. It returns 0 if the counter is in the future or if
+ * the counter is empty. The result considers the position of the current time
+ * within the current period.
+ *
+ * The caller may safely add new events if result is negative or null.
+ */
+int freq_ctr_overshoot_period(const struct freq_ctr *ctr, uint period, uint freq)
+{
+ ullong curr, old_curr;
+ uint tick, old_tick;
+ int elapsed;
+
+ tick = HA_ATOMIC_LOAD(&ctr->curr_tick);
+ curr = HA_ATOMIC_LOAD(&ctr->curr_ctr);
+
+ while (1) {
+ if (tick & 0x1) // change in progress
+ goto redo0;
+
+ old_tick = tick;
+ old_curr = curr;
+
+ /* now let's load the values a second time and make sure they
+ * did not change, which will indicate it was a stable reading.
+ */
+
+ tick = HA_ATOMIC_LOAD(&ctr->curr_tick);
+ if (tick & 0x1) // change in progress
+ goto redo0;
+
+ if (tick != old_tick)
+ goto redo1;
+
+ curr = HA_ATOMIC_LOAD(&ctr->curr_ctr);
+ if (curr != old_curr)
+ goto redo2;
+
+ /* all values match between two loads, they're stable, let's
+ * quit now.
+ */
+ break;
+ redo0:
+ tick = HA_ATOMIC_LOAD(&ctr->curr_tick);
+ redo1:
+ curr = HA_ATOMIC_LOAD(&ctr->curr_ctr);
+ redo2:
+ __ha_cpu_relax();
+ };
+
+ if (!curr && !tick) {
+ /* The counter is empty, there is no overshoot */
+ return 0;
+ }
+
+ elapsed = HA_ATOMIC_LOAD(&global_now_ms) - tick;
+ if (unlikely(elapsed < 0 || elapsed > period)) {
+ /* The counter is in the future or the elapsed time is higher than the period, there is no overshoot */
+ return 0;
+ }
+
+ return curr - div64_32((uint64_t)elapsed * freq, period);
+}
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/src/frontend.c b/src/frontend.c
new file mode 100644
index 0000000..ad2e39e
--- /dev/null
+++ b/src/frontend.c
@@ -0,0 +1,339 @@
+/*
+ * Frontend variables and functions.
+ *
+ * Copyright 2000-2013 Willy Tarreau <w@1wt.eu>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <sys/socket.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+
+#include <netinet/tcp.h>
+
+#include <haproxy/acl.h>
+#include <haproxy/api.h>
+#include <haproxy/arg.h>
+#include <haproxy/chunk.h>
+#include <haproxy/connection.h>
+#include <haproxy/fd.h>
+#include <haproxy/frontend.h>
+#include <haproxy/global.h>
+#include <haproxy/http_ana.h>
+#include <haproxy/log.h>
+#include <haproxy/proto_tcp.h>
+#include <haproxy/proxy.h>
+#include <haproxy/sample.h>
+#include <haproxy/sc_strm.h>
+#include <haproxy/stream.h>
+#include <haproxy/task.h>
+#include <haproxy/ticks.h>
+#include <haproxy/tools.h>
+
+
+/* Finish a stream accept() for a proxy (TCP or HTTP). It returns a negative
+ * value in case of a critical failure which must cause the listener to be
+ * disabled, a positive or null value in case of success.
+ */
+int frontend_accept(struct stream *s)
+{
+ const struct sockaddr_storage *src, *dst;
+ struct session *sess = s->sess;
+ struct connection *conn = objt_conn(sess->origin);
+ struct listener *l = sess->listener;
+ struct proxy *fe = sess->fe;
+
+ if ((fe->mode == PR_MODE_TCP || fe->mode == PR_MODE_HTTP)
+ && (!LIST_ISEMPTY(&fe->loggers))) {
+ if (likely(!LIST_ISEMPTY(&fe->logformat))) {
+ /* we have the client ip */
+ if (s->logs.logwait & LW_CLIP)
+ if (!(s->logs.logwait &= ~(LW_CLIP|LW_INIT)))
+ s->do_log(s);
+ }
+ else if (conn) {
+ src = sc_src(s->scf);
+ if (!src)
+ send_log(fe, LOG_INFO, "Connect from unknown source to listener %d (%s/%s)\n",
+ l->luid, fe->id, (fe->mode == PR_MODE_HTTP) ? "HTTP" : "TCP");
+ else {
+ char pn[INET6_ADDRSTRLEN], sn[INET6_ADDRSTRLEN];
+ int port;
+
+ switch (addr_to_str(src, pn, sizeof(pn))) {
+ case AF_INET:
+ case AF_INET6:
+ dst = sc_dst(s->scf);
+ if (dst) {
+ addr_to_str(dst, sn, sizeof(sn));
+ port = get_host_port(dst);
+ } else {
+ strlcpy2(sn, "undetermined address", sizeof(sn));
+ port = 0;
+ }
+ send_log(fe, LOG_INFO, "Connect from %s:%d to %s:%d (%s/%s)\n",
+ pn, get_host_port(src),
+ sn, port,
+ fe->id, (fe->mode == PR_MODE_HTTP) ? "HTTP" : "TCP");
+ break;
+ case AF_UNIX:
+ /* UNIX socket, only the destination is known */
+ send_log(fe, LOG_INFO, "Connect to unix:%d (%s/%s)\n",
+ l->luid,
+ fe->id, (fe->mode == PR_MODE_HTTP) ? "HTTP" : "TCP");
+ break;
+ }
+ }
+ }
+ }
+
+ if (unlikely((global.mode & MODE_DEBUG) && conn &&
+ (!(global.mode & MODE_QUIET) || (global.mode & MODE_VERBOSE)))) {
+ char pn[INET6_ADDRSTRLEN];
+ char alpn[16] = "<none>";
+ const char *alpn_str = NULL;
+ int alpn_len;
+
+ /* try to report the ALPN value when available (also works for NPN) */
+ if (conn == sc_conn(s->scf)) {
+ if (conn_get_alpn(conn, &alpn_str, &alpn_len) && alpn_str) {
+ int len = MIN(alpn_len, sizeof(alpn) - 1);
+ memcpy(alpn, alpn_str, len);
+ alpn[len] = 0;
+ }
+ }
+
+ src = sc_src(s->scf);
+ if (!src) {
+ chunk_printf(&trash, "%08x:%s.accept(%04x)=%04x from [listener:%d] ALPN=%s\n",
+ s->uniq_id, fe->id, (unsigned short)l->rx.fd, (unsigned short)conn->handle.fd,
+ l->luid, alpn);
+ }
+ else switch (addr_to_str(src, pn, sizeof(pn))) {
+ case AF_INET:
+ case AF_INET6:
+ chunk_printf(&trash, "%08x:%s.accept(%04x)=%04x from [%s:%d] ALPN=%s\n",
+ s->uniq_id, fe->id, (unsigned short)l->rx.fd, (unsigned short)conn->handle.fd,
+ pn, get_host_port(src), alpn);
+ break;
+ case AF_UNIX:
+ /* UNIX socket, only the destination is known */
+ chunk_printf(&trash, "%08x:%s.accept(%04x)=%04x from [unix:%d] ALPN=%s\n",
+ s->uniq_id, fe->id, (unsigned short)l->rx.fd, (unsigned short)conn->handle.fd,
+ l->luid, alpn);
+ break;
+ }
+
+ DISGUISE(write(1, trash.area, trash.data));
+ }
+
+ if (fe->mode == PR_MODE_HTTP)
+ s->scf->flags |= SC_FL_RCV_ONCE; /* one read is usually enough */
+
+ if (unlikely(fe->nb_req_cap > 0)) {
+ if ((s->req_cap = pool_zalloc(fe->req_cap_pool)) == NULL)
+ goto out_return; /* no memory */
+ }
+
+ if (unlikely(fe->nb_rsp_cap > 0)) {
+ if ((s->res_cap = pool_zalloc(fe->rsp_cap_pool)) == NULL)
+ goto out_free_reqcap; /* no memory */
+ }
+
+ if ((fe->http_needed || IS_HTX_STRM(s)) && !http_create_txn(s))
+ goto out_free_rspcap;
+
+ /* everything's OK, let's go on */
+ return 1;
+
+ /* Error unrolling */
+ out_free_rspcap:
+ pool_free(fe->rsp_cap_pool, s->res_cap);
+ out_free_reqcap:
+ pool_free(fe->req_cap_pool, s->req_cap);
+ out_return:
+ return -1;
+}
+
+/* Increment current active connection counter. This ensures that global
+ * maxconn is not reached or exceeded. This must be done for every new frontend
+ * connection allocation.
+ *
+ * Returns the new actconn global value. If maxconn reached or exceeded, 0 is
+ * returned : the connection allocation should be cancelled.
+ */
+int increment_actconn()
+{
+ unsigned int count, next_actconn;
+
+ do {
+ count = actconn;
+ if (unlikely(count >= global.maxconn)) {
+ /* maxconn reached */
+ next_actconn = 0;
+ goto end;
+ }
+
+ /* try to increment actconn */
+ next_actconn = count + 1;
+ } while (!_HA_ATOMIC_CAS(&actconn, (int *)(&count), next_actconn) && __ha_cpu_relax());
+
+ end:
+ return next_actconn;
+}
+
+/************************************************************************/
+/* All supported sample and ACL keywords must be declared here. */
+/************************************************************************/
+
+/* set temp integer to the id of the frontend */
+static int
+smp_fetch_fe_id(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ smp->flags = SMP_F_VOL_SESS;
+ smp->data.type = SMP_T_SINT;
+ smp->data.u.sint = smp->sess->fe->uuid;
+ return 1;
+}
+
+/* set string to the name of the frontend */
+static int
+smp_fetch_fe_name(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ smp->data.u.str.area = (char *)smp->sess->fe->id;
+ if (!smp->data.u.str.area)
+ return 0;
+
+ smp->data.type = SMP_T_STR;
+ smp->flags = SMP_F_CONST;
+ smp->data.u.str.data = strlen(smp->data.u.str.area);
+ return 1;
+}
+
+/* set string to the name of the default backend */
+static int
+smp_fetch_fe_defbe(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ if (!smp->sess->fe->defbe.be)
+ return 0;
+ smp->data.u.str.area = (char *)smp->sess->fe->defbe.be->id;
+ if (!smp->data.u.str.area)
+ return 0;
+
+ smp->data.type = SMP_T_STR;
+ smp->flags = SMP_F_CONST;
+ smp->data.u.str.data = strlen(smp->data.u.str.area);
+ return 1;
+}
+
+/* set temp integer to the number of HTTP requests per second reaching the frontend.
+ * Accepts exactly 1 argument. Argument is a frontend, other types will cause
+ * an undefined behaviour.
+ */
+static int
+smp_fetch_fe_req_rate(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ struct proxy *px = args->data.prx;
+
+ if (px == NULL)
+ return 0;
+ if (px->cap & PR_CAP_DEF)
+ px = smp->px;
+
+ smp->flags = SMP_F_VOL_TEST;
+ smp->data.type = SMP_T_SINT;
+ smp->data.u.sint = read_freq_ctr(&px->fe_req_per_sec);
+ return 1;
+}
+
+/* set temp integer to the number of connections per second reaching the frontend.
+ * Accepts exactly 1 argument. Argument is a frontend, other types will cause
+ * an undefined behaviour.
+ */
+static int
+smp_fetch_fe_sess_rate(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ struct proxy *px = args->data.prx;
+
+ if (px == NULL)
+ return 0;
+ if (px->cap & PR_CAP_DEF)
+ px = smp->px;
+
+ smp->flags = SMP_F_VOL_TEST;
+ smp->data.type = SMP_T_SINT;
+ smp->data.u.sint = read_freq_ctr(&px->fe_sess_per_sec);
+ return 1;
+}
+
+/* set temp integer to the number of concurrent connections on the frontend
+ * Accepts exactly 1 argument. Argument is a frontend, other types will cause
+ * an undefined behaviour.
+ */
+static int
+smp_fetch_fe_conn(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ struct proxy *px = args->data.prx;
+
+ if (px == NULL)
+ return 0;
+ if (px->cap & PR_CAP_DEF)
+ px = smp->px;
+
+ smp->flags = SMP_F_VOL_TEST;
+ smp->data.type = SMP_T_SINT;
+ smp->data.u.sint = px->feconn;
+ return 1;
+}
+
+static int
+smp_fetch_fe_client_timeout(const struct arg *args, struct sample *smp, const char *km, void *private)
+{
+ smp->flags = SMP_F_VOL_TXN;
+ smp->data.type = SMP_T_SINT;
+ smp->data.u.sint = TICKS_TO_MS(smp->sess->fe->timeout.client);
+ return 1;
+}
+
+
+/* Note: must not be declared <const> as its list will be overwritten.
+ * Please take care of keeping this list alphabetically sorted.
+ */
+static struct sample_fetch_kw_list smp_kws = {ILH, {
+ { "fe_client_timeout", smp_fetch_fe_client_timeout, 0, NULL, SMP_T_SINT, SMP_USE_FTEND, },
+ { "fe_conn", smp_fetch_fe_conn, ARG1(1,FE), NULL, SMP_T_SINT, SMP_USE_INTRN, },
+ { "fe_defbe", smp_fetch_fe_defbe, 0, NULL, SMP_T_STR, SMP_USE_FTEND, },
+ { "fe_id", smp_fetch_fe_id, 0, NULL, SMP_T_SINT, SMP_USE_FTEND, },
+ { "fe_name", smp_fetch_fe_name, 0, NULL, SMP_T_STR, SMP_USE_FTEND, },
+ { "fe_req_rate", smp_fetch_fe_req_rate, ARG1(1,FE), NULL, SMP_T_SINT, SMP_USE_INTRN, },
+ { "fe_sess_rate", smp_fetch_fe_sess_rate, ARG1(1,FE), NULL, SMP_T_SINT, SMP_USE_INTRN, },
+ { /* END */ },
+}};
+
+INITCALL1(STG_REGISTER, sample_register_fetches, &smp_kws);
+
+/* Note: must not be declared <const> as its list will be overwritten.
+ * Please take care of keeping this list alphabetically sorted.
+ */
+static struct acl_kw_list acl_kws = {ILH, {
+ { /* END */ },
+}};
+
+INITCALL1(STG_REGISTER, acl_register_keywords, &acl_kws);
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/src/h1.c b/src/h1.c
new file mode 100644
index 0000000..e251e74
--- /dev/null
+++ b/src/h1.c
@@ -0,0 +1,1319 @@
+/*
+ * HTTP/1 protocol analyzer
+ *
+ * Copyright 2000-2017 Willy Tarreau <w@1wt.eu>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <ctype.h>
+
+#include <import/sha1.h>
+
+#include <haproxy/api.h>
+#include <haproxy/base64.h>
+#include <haproxy/h1.h>
+#include <haproxy/http-hdr.h>
+#include <haproxy/tools.h>
+
+/* Parse the Content-Length header field of an HTTP/1 request. The function
+ * checks all possible occurrences of a comma-delimited value, and verifies
+ * if any of them doesn't match a previous value. It returns <0 if a value
+ * differs, 0 if the whole header can be dropped (i.e. already known), or >0
+ * if the value can be indexed (first one). In the last case, the value might
+ * be adjusted and the caller must only add the updated value.
+ */
+int h1_parse_cont_len_header(struct h1m *h1m, struct ist *value)
+{
+ char *e, *n;
+ long long cl;
+ int not_first = !!(h1m->flags & H1_MF_CLEN);
+ struct ist word;
+
+ word.ptr = value->ptr;
+ e = value->ptr + value->len;
+
+ while (1) {
+ if (word.ptr >= e) {
+ /* empty header or empty value */
+ goto fail;
+ }
+
+ /* skip leading delimiter and blanks */
+ if (unlikely(HTTP_IS_LWS(*word.ptr))) {
+ word.ptr++;
+ continue;
+ }
+
+ /* digits only now */
+ for (cl = 0, n = word.ptr; n < e; n++) {
+ unsigned int c = *n - '0';
+ if (unlikely(c > 9)) {
+ /* non-digit */
+ if (unlikely(n == word.ptr)) // spaces only
+ goto fail;
+ break;
+ }
+
+ if (unlikely(!cl && n > word.ptr)) {
+ /* There was a leading zero before this digit,
+ * let's trim it.
+ */
+ word.ptr = n;
+ }
+
+ if (unlikely(cl > ULLONG_MAX / 10ULL))
+ goto fail; /* multiply overflow */
+ cl = cl * 10ULL;
+ if (unlikely(cl + c < cl))
+ goto fail; /* addition overflow */
+ cl = cl + c;
+ }
+
+ /* keep a copy of the exact cleaned value */
+ word.len = n - word.ptr;
+
+ /* skip trailing LWS till next comma or EOL */
+ for (; n < e; n++) {
+ if (!HTTP_IS_LWS(*n)) {
+ if (unlikely(*n != ','))
+ goto fail;
+ break;
+ }
+ }
+
+ /* if duplicate, must be equal */
+ if (h1m->flags & H1_MF_CLEN && cl != h1m->body_len)
+ goto fail;
+
+ /* OK, store this result as the one to be indexed */
+ h1m->flags |= H1_MF_CLEN;
+ h1m->curr_len = h1m->body_len = cl;
+ *value = word;
+
+ /* Now either n==e and we're done, or n points to the comma,
+ * and we skip it and continue.
+ */
+ if (n++ == e)
+ break;
+
+ word.ptr = n;
+ }
+ /* here we've reached the end with a single value or a series of
+ * identical values, all matching previous series if any. The last
+ * parsed value was sent back into <value>. We just have to decide
+ * if this occurrence has to be indexed (it's the first one) or
+ * silently skipped (it's not the first one)
+ */
+ return !not_first;
+ fail:
+ return -1;
+}
+
+/* Parse the Transfer-Encoding: header field of an HTTP/1 request, looking for
+ * "chunked" encoding to perform some checks (it must be the last encoding for
+ * the request and must not be performed twice for any message). The
+ * H1_MF_TE_CHUNKED is set if a valid "chunked" encoding is found. The
+ * H1_MF_TE_OTHER flag is set if any other encoding is found. The H1_MF_XFER_ENC
+ * flag is always set. The H1_MF_CHNK is set when "chunked" encoding is the last
+ * one. Note that transfer codings are case-insensitive (cf RFC7230#4). This
+ * function returns <0 if a error is found, 0 if the whole header can be dropped
+ * (not used yet), or >0 if the value can be indexed.
+ */
+int h1_parse_xfer_enc_header(struct h1m *h1m, struct ist value)
+{
+ char *e, *n;
+ struct ist word;
+
+ h1m->flags |= H1_MF_XFER_ENC;
+
+ word.ptr = value.ptr - 1; // -1 for next loop's pre-increment
+ e = istend(value);
+
+ while (++word.ptr < e) {
+ /* skip leading delimiter and blanks */
+ if (HTTP_IS_LWS(*word.ptr))
+ continue;
+
+ n = http_find_hdr_value_end(word.ptr, e); // next comma or end of line
+ word.len = n - word.ptr;
+
+ /* trim trailing blanks */
+ while (word.len && HTTP_IS_LWS(word.ptr[word.len-1]))
+ word.len--;
+
+ h1m->flags &= ~H1_MF_CHNK;
+ if (isteqi(word, ist("chunked"))) {
+ if (h1m->flags & H1_MF_TE_CHUNKED) {
+ /* cf RFC7230#3.3.1 : A sender MUST NOT apply
+ * chunked more than once to a message body
+ * (i.e., chunking an already chunked message is
+ * not allowed)
+ */
+ goto fail;
+ }
+ h1m->flags |= (H1_MF_TE_CHUNKED|H1_MF_CHNK);
+ }
+ else {
+ if ((h1m->flags & (H1_MF_RESP|H1_MF_TE_CHUNKED)) == H1_MF_TE_CHUNKED) {
+ /* cf RFC7230#3.3.1 : If any transfer coding
+ * other than chunked is applied to a request
+ * payload body, the sender MUST apply chunked
+ * as the final transfer coding to ensure that
+ * the message is properly framed.
+ */
+ goto fail;
+ }
+ h1m->flags |= H1_MF_TE_OTHER;
+ }
+
+ word.ptr = n;
+ }
+
+ return 1;
+ fail:
+ return -1;
+}
+
+/* Validate the authority and the host header value for CONNECT method. If there
+ * is hast header, its value is normalized. 0 is returned on success, -1 if the
+ * authority is invalid and -2 if the host is invalid.
+ */
+static int h1_validate_connect_authority(struct ist authority, struct ist *host_hdr)
+{
+ struct ist uri_host, uri_port, host, host_port;
+
+ if (!isttest(authority))
+ goto invalid_authority;
+ uri_host = authority;
+ uri_port = http_get_host_port(authority);
+ if (!istlen(uri_port))
+ goto invalid_authority;
+ uri_host.len -= (istlen(uri_port) + 1);
+
+ if (!host_hdr || !isttest(*host_hdr))
+ goto end;
+
+ /* Get the port of the host header value, if any */
+ host = *host_hdr;
+ host_port = http_get_host_port(*host_hdr);
+ if (isttest(host_port))
+ host.len -= (istlen(host_port) + 1);
+
+ if (istlen(host_port)) {
+ if (!isteqi(host, uri_host) || !isteq(host_port, uri_port))
+ goto invalid_host;
+ if (http_is_default_port(IST_NULL, uri_port))
+ *host_hdr = host; /* normalize */
+ }
+ else {
+ if (!http_is_default_port(IST_NULL, uri_port) || !isteqi(host, uri_host))
+ goto invalid_host;
+ }
+
+ end:
+ return 0;
+
+ invalid_authority:
+ return -1;
+
+ invalid_host:
+ return -2;
+}
+
+
+/* Validate the authority and the host header value for non-CONNECT method, when
+ * an absolute-URI is detected but when it does not exactly match the host
+ * value. The idea is to detect default port (http or https). authority and host
+ * are defined here. 0 is returned on success, -1 if the host is does not match
+ * the authority.
+ */
+static int h1_validate_mismatch_authority(struct ist scheme, struct ist authority, struct ist host_hdr)
+{
+ struct ist uri_host, uri_port, host, host_port;
+
+ if (!isttest(scheme))
+ goto mismatch;
+
+ uri_host = authority;
+ uri_port = http_get_host_port(authority);
+ if (isttest(uri_port))
+ uri_host.len -= (istlen(uri_port) + 1);
+
+ host = host_hdr;
+ host_port = http_get_host_port(host_hdr);
+ if (isttest(host_port))
+ host.len -= (istlen(host_port) + 1);
+
+ if (!isttest(uri_port) && !isttest(host_port)) {
+ /* No port on both: we already know the authority does not match
+ * the host value
+ */
+ goto mismatch;
+ }
+ else if (isttest(uri_port) && !http_is_default_port(scheme, uri_port)) {
+ /* here there is no port for the host value and the port for the
+ * authority is not the default one
+ */
+ goto mismatch;
+ }
+ else if (isttest(host_port) && !http_is_default_port(scheme, host_port)) {
+ /* here there is no port for the authority and the port for the
+ * host value is not the default one
+ */
+ goto mismatch;
+ }
+ else {
+ /* the authority or the host value contain a default port and
+ * there is no port on the other value
+ */
+ if (!isteqi(uri_host, host))
+ goto mismatch;
+ }
+
+ return 0;
+
+ mismatch:
+ return -1;
+}
+
+
+/* Parse the Connection: header of an HTTP/1 request, looking for "close",
+ * "keep-alive", and "upgrade" values, and updating h1m->flags according to
+ * what was found there. Note that flags are only added, not removed, so the
+ * function is safe for being called multiple times if multiple occurrences
+ * are found. If the flag H1_MF_CLEAN_CONN_HDR, the header value is cleaned
+ * up from "keep-alive" and "close" values. To do so, the header value is
+ * rewritten in place and its length is updated.
+ */
+void h1_parse_connection_header(struct h1m *h1m, struct ist *value)
+{
+ char *e, *n, *p;
+ struct ist word;
+
+ word.ptr = value->ptr - 1; // -1 for next loop's pre-increment
+ p = value->ptr;
+ e = value->ptr + value->len;
+ if (h1m->flags & H1_MF_CLEAN_CONN_HDR)
+ value->len = 0;
+
+ while (++word.ptr < e) {
+ /* skip leading delimiter and blanks */
+ if (HTTP_IS_LWS(*word.ptr))
+ continue;
+
+ n = http_find_hdr_value_end(word.ptr, e); // next comma or end of line
+ word.len = n - word.ptr;
+
+ /* trim trailing blanks */
+ while (word.len && HTTP_IS_LWS(word.ptr[word.len-1]))
+ word.len--;
+
+ if (isteqi(word, ist("keep-alive"))) {
+ h1m->flags |= H1_MF_CONN_KAL;
+ if (h1m->flags & H1_MF_CLEAN_CONN_HDR)
+ goto skip_val;
+ }
+ else if (isteqi(word, ist("close"))) {
+ h1m->flags |= H1_MF_CONN_CLO;
+ if (h1m->flags & H1_MF_CLEAN_CONN_HDR)
+ goto skip_val;
+ }
+ else if (isteqi(word, ist("upgrade")))
+ h1m->flags |= H1_MF_CONN_UPG;
+
+ if (h1m->flags & H1_MF_CLEAN_CONN_HDR) {
+ if (value->ptr + value->len == p) {
+ /* no rewrite done till now */
+ value->len = n - value->ptr;
+ }
+ else {
+ if (value->len)
+ value->ptr[value->len++] = ',';
+ istcat(value, word, e - value->ptr);
+ }
+ }
+
+ skip_val:
+ word.ptr = p = n;
+ }
+}
+
+/* Parse the Upgrade: header of an HTTP/1 request.
+ * If "websocket" is found, set H1_MF_UPG_WEBSOCKET flag
+ */
+void h1_parse_upgrade_header(struct h1m *h1m, struct ist value)
+{
+ char *e, *n;
+ struct ist word;
+
+ h1m->flags &= ~H1_MF_UPG_WEBSOCKET;
+
+ word.ptr = value.ptr - 1; // -1 for next loop's pre-increment
+ e = istend(value);
+
+ while (++word.ptr < e) {
+ /* skip leading delimiter and blanks */
+ if (HTTP_IS_LWS(*word.ptr))
+ continue;
+
+ n = http_find_hdr_value_end(word.ptr, e); // next comma or end of line
+ word.len = n - word.ptr;
+
+ /* trim trailing blanks */
+ while (word.len && HTTP_IS_LWS(word.ptr[word.len-1]))
+ word.len--;
+
+ if (isteqi(word, ist("websocket")))
+ h1m->flags |= H1_MF_UPG_WEBSOCKET;
+
+ word.ptr = n;
+ }
+}
+
+/* Macros used in the HTTP/1 parser, to check for the expected presence of
+ * certain bytes (ef: LF) or to skip to next byte and yield in case of failure.
+ */
+
+/* Expects to find an LF at <ptr>. If not, set <state> to <where> and jump to
+ * <bad>.
+ */
+#define EXPECT_LF_HERE(ptr, bad, state, where) \
+ do { \
+ if (unlikely(*(ptr) != '\n')) { \
+ state = (where); \
+ goto bad; \
+ } \
+ } while (0)
+
+/* Increments pointer <ptr>, continues to label <more> if it's still below
+ * pointer <end>, or goes to <stop> and sets <state> to <where> if the end
+ * of buffer was reached.
+ */
+#define EAT_AND_JUMP_OR_RETURN(ptr, end, more, stop, state, where) \
+ do { \
+ if (likely(++(ptr) < (end))) \
+ goto more; \
+ else { \
+ state = (where); \
+ goto stop; \
+ } \
+ } while (0)
+
+/* This function parses a contiguous HTTP/1 headers block starting at <start>
+ * and ending before <stop>, at once, and converts it a list of (name,value)
+ * pairs representing header fields into the array <hdr> of size <hdr_num>,
+ * whose last entry will have an empty name and an empty value. If <hdr_num> is
+ * too small to represent the whole message, an error is returned. Some
+ * protocol elements such as content-length and transfer-encoding will be
+ * parsed and stored into h1m as well. <hdr> may be null, in which case only
+ * the parsing state will be updated. This may be used to restart the parsing
+ * where it stopped for example.
+ *
+ * For now it's limited to the response. If the header block is incomplete,
+ * 0 is returned, waiting to be called again with more data to try it again.
+ * The caller is responsible for initializing h1m->state to H1_MSG_RPBEFORE,
+ * and h1m->next to zero on the first call, the parser will do the rest. If
+ * an incomplete message is seen, the caller only needs to present h1m->state
+ * and h1m->next again, with an empty header list so that the parser can start
+ * again. In this case, it will detect that it interrupted a previous session
+ * and will first look for the end of the message before reparsing it again and
+ * indexing it at the same time. This ensures that incomplete messages fed 1
+ * character at a time are never processed entirely more than exactly twice,
+ * and that there is no need to store all the internal state and pre-parsed
+ * headers or start line between calls.
+ *
+ * A pointer to a start line descriptor may be passed in <slp>, in which case
+ * the parser will fill it with whatever it found.
+ *
+ * The code derived from the main HTTP/1 parser above but was simplified and
+ * optimized to process responses produced or forwarded by haproxy. The caller
+ * is responsible for ensuring that the message doesn't wrap, and should ensure
+ * it is complete to avoid having to retry the operation after a failed
+ * attempt. The message is not supposed to be invalid, which is why a few
+ * properties such as the character set used in the header field names are not
+ * checked. In case of an unparsable response message, a negative value will be
+ * returned with h1m->err_pos and h1m->err_state matching the location and
+ * state where the error was met. Leading blank likes are tolerated but not
+ * recommended. If flag H1_MF_HDRS_ONLY is set in h1m->flags, only headers are
+ * parsed and the start line is skipped. It is not required to set h1m->state
+ * nor h1m->next in this case.
+ *
+ * This function returns :
+ * -1 in case of error. In this case, h1m->err_state is filled (if h1m is
+ * set) with the state the error occurred in and h1m->err_pos with the
+ * the position relative to <start>
+ * -2 if the output is full (hdr_num reached). err_state and err_pos also
+ * indicate where it failed.
+ * 0 in case of missing data.
+ * > 0 on success, it then corresponds to the number of bytes read since
+ * <start> so that the caller can go on with the payload.
+ */
+int h1_headers_to_hdr_list(char *start, const char *stop,
+ struct http_hdr *hdr, unsigned int hdr_num,
+ struct h1m *h1m, union h1_sl *slp)
+{
+ enum h1m_state state;
+ register char *ptr;
+ register const char *end;
+ unsigned int hdr_count;
+ unsigned int skip; /* number of bytes skipped at the beginning */
+ unsigned int sol; /* start of line */
+ unsigned int col; /* position of the colon */
+ unsigned int eol; /* end of line */
+ unsigned int sov; /* start of value */
+ union h1_sl sl;
+ int skip_update;
+ int restarting;
+ int host_idx;
+ struct ist n, v; /* header name and value during parsing */
+
+ skip = 0; // do it only once to keep track of the leading CRLF.
+
+ try_again:
+ hdr_count = sol = col = eol = sov = 0;
+ sl.st.status = 0;
+ skip_update = restarting = 0;
+ host_idx = -1;
+
+ if (h1m->flags & H1_MF_HDRS_ONLY) {
+ state = H1_MSG_HDR_FIRST;
+ h1m->next = 0;
+ }
+ else {
+ state = h1m->state;
+ if (h1m->state != H1_MSG_RQBEFORE && h1m->state != H1_MSG_RPBEFORE)
+ restarting = 1;
+ }
+
+ ptr = start + h1m->next;
+ end = stop;
+
+ if (unlikely(ptr >= end))
+ goto http_msg_ood;
+
+ /* don't update output if hdr is NULL or if we're restarting */
+ if (!hdr || restarting)
+ skip_update = 1;
+
+ switch (state) {
+ case H1_MSG_RQBEFORE:
+ http_msg_rqbefore:
+ if (likely(HTTP_IS_TOKEN(*ptr))) {
+ /* we have a start of message, we may have skipped some
+ * heading CRLF. Skip them now.
+ */
+ skip += ptr - start;
+ start = ptr;
+
+ sol = 0;
+ sl.rq.m.ptr = ptr;
+ hdr_count = 0;
+ state = H1_MSG_RQMETH;
+ goto http_msg_rqmeth;
+ }
+
+ if (unlikely(!HTTP_IS_CRLF(*ptr))) {
+ state = H1_MSG_RQBEFORE;
+ goto http_msg_invalid;
+ }
+
+ if (unlikely(*ptr == '\n'))
+ EAT_AND_JUMP_OR_RETURN(ptr, end, http_msg_rqbefore, http_msg_ood, state, H1_MSG_RQBEFORE);
+ EAT_AND_JUMP_OR_RETURN(ptr, end, http_msg_rqbefore_cr, http_msg_ood, state, H1_MSG_RQBEFORE_CR);
+ /* stop here */
+
+ case H1_MSG_RQBEFORE_CR:
+ http_msg_rqbefore_cr:
+ EXPECT_LF_HERE(ptr, http_msg_invalid, state, H1_MSG_RQBEFORE_CR);
+ EAT_AND_JUMP_OR_RETURN(ptr, end, http_msg_rqbefore, http_msg_ood, state, H1_MSG_RQBEFORE);
+ /* stop here */
+
+ case H1_MSG_RQMETH:
+ http_msg_rqmeth:
+ if (likely(HTTP_IS_TOKEN(*ptr)))
+ EAT_AND_JUMP_OR_RETURN(ptr, end, http_msg_rqmeth, http_msg_ood, state, H1_MSG_RQMETH);
+
+ if (likely(HTTP_IS_SPHT(*ptr))) {
+ sl.rq.m.len = ptr - sl.rq.m.ptr;
+ sl.rq.meth = find_http_meth(start, sl.rq.m.len);
+ EAT_AND_JUMP_OR_RETURN(ptr, end, http_msg_rqmeth_sp, http_msg_ood, state, H1_MSG_RQMETH_SP);
+ }
+
+ if (likely(HTTP_IS_CRLF(*ptr))) {
+ /* HTTP 0.9 request */
+ sl.rq.m.len = ptr - sl.rq.m.ptr;
+ sl.rq.meth = find_http_meth(sl.rq.m.ptr, sl.rq.m.len);
+ http_msg_req09_uri:
+ sl.rq.u.ptr = ptr;
+ http_msg_req09_uri_e:
+ sl.rq.u.len = ptr - sl.rq.u.ptr;
+ http_msg_req09_ver:
+ sl.rq.v = ist2(ptr, 0);
+ goto http_msg_rqline_eol;
+ }
+ state = H1_MSG_RQMETH;
+ goto http_msg_invalid;
+
+ case H1_MSG_RQMETH_SP:
+ http_msg_rqmeth_sp:
+ if (likely(!HTTP_IS_LWS(*ptr))) {
+ sl.rq.u.ptr = ptr;
+ goto http_msg_rquri;
+ }
+ if (likely(HTTP_IS_SPHT(*ptr)))
+ EAT_AND_JUMP_OR_RETURN(ptr, end, http_msg_rqmeth_sp, http_msg_ood, state, H1_MSG_RQMETH_SP);
+ /* so it's a CR/LF, meaning an HTTP 0.9 request */
+ goto http_msg_req09_uri;
+
+ case H1_MSG_RQURI:
+ http_msg_rquri:
+#ifdef HA_UNALIGNED_LE
+ /* speedup: skip bytes not between 0x24 and 0x7e inclusive */
+ while (ptr <= end - sizeof(int)) {
+ int x = *(int *)ptr - 0x24242424;
+ if (x & 0x80808080)
+ break;
+
+ x -= 0x5b5b5b5b;
+ if (!(x & 0x80808080))
+ break;
+
+ ptr += sizeof(int);
+ }
+#endif
+ if (ptr >= end) {
+ state = H1_MSG_RQURI;
+ goto http_msg_ood;
+ }
+ http_msg_rquri2:
+ if (likely((unsigned char)(*ptr - 33) <= 93)) { /* 33 to 126 included */
+ if (*ptr == '#') {
+ if (h1m->err_pos < -1) /* PR_O2_REQBUG_OK not set */
+ goto invalid_char;
+ if (h1m->err_pos == -1) /* PR_O2_REQBUG_OK set: just log */
+ h1m->err_pos = ptr - start + skip;
+ }
+ EAT_AND_JUMP_OR_RETURN(ptr, end, http_msg_rquri2, http_msg_ood, state, H1_MSG_RQURI);
+ }
+
+ if (likely(HTTP_IS_SPHT(*ptr))) {
+ sl.rq.u.len = ptr - sl.rq.u.ptr;
+ EAT_AND_JUMP_OR_RETURN(ptr, end, http_msg_rquri_sp, http_msg_ood, state, H1_MSG_RQURI_SP);
+ }
+ if (likely((unsigned char)*ptr >= 128)) {
+ /* non-ASCII chars are forbidden unless option
+ * accept-invalid-http-request is enabled in the frontend.
+ * In any case, we capture the faulty char.
+ */
+ if (h1m->err_pos < -1)
+ goto invalid_char;
+ if (h1m->err_pos == -1)
+ h1m->err_pos = ptr - start + skip;
+ EAT_AND_JUMP_OR_RETURN(ptr, end, http_msg_rquri, http_msg_ood, state, H1_MSG_RQURI);
+ }
+
+ if (likely(HTTP_IS_CRLF(*ptr))) {
+ /* so it's a CR/LF, meaning an HTTP 0.9 request */
+ goto http_msg_req09_uri_e;
+ }
+
+ /* OK forbidden chars, 0..31 or 127 */
+ invalid_char:
+ state = H1_MSG_RQURI;
+ goto http_msg_invalid;
+
+ case H1_MSG_RQURI_SP:
+ http_msg_rquri_sp:
+ if (likely(!HTTP_IS_LWS(*ptr))) {
+ sl.rq.v.ptr = ptr;
+ goto http_msg_rqver;
+ }
+ if (likely(HTTP_IS_SPHT(*ptr)))
+ EAT_AND_JUMP_OR_RETURN(ptr, end, http_msg_rquri_sp, http_msg_ood, state, H1_MSG_RQURI_SP);
+ /* so it's a CR/LF, meaning an HTTP 0.9 request */
+ goto http_msg_req09_ver;
+
+
+ case H1_MSG_RQVER:
+ http_msg_rqver:
+ if (likely(HTTP_IS_VER_TOKEN(*ptr)))
+ EAT_AND_JUMP_OR_RETURN(ptr, end, http_msg_rqver, http_msg_ood, state, H1_MSG_RQVER);
+
+ if (likely(HTTP_IS_CRLF(*ptr))) {
+ sl.rq.v.len = ptr - sl.rq.v.ptr;
+ http_msg_rqline_eol:
+ /* We have seen the end of line. Note that we do not
+ * necessarily have the \n yet, but at least we know that we
+ * have EITHER \r OR \n, otherwise the request would not be
+ * complete. We can then record the request length and return
+ * to the caller which will be able to register it.
+ */
+
+ if (likely(!skip_update)) {
+ if ((sl.rq.v.len == 8) &&
+ (*(sl.rq.v.ptr + 5) > '1' ||
+ (*(sl.rq.v.ptr + 5) == '1' && *(sl.rq.v.ptr + 7) >= '1')))
+ h1m->flags |= H1_MF_VER_11;
+
+ if (unlikely(hdr_count >= hdr_num)) {
+ state = H1_MSG_RQVER;
+ goto http_output_full;
+ }
+ if (!(h1m->flags & H1_MF_NO_PHDR))
+ http_set_hdr(&hdr[hdr_count++], ist(":method"), sl.rq.m);
+
+ if (unlikely(hdr_count >= hdr_num)) {
+ state = H1_MSG_RQVER;
+ goto http_output_full;
+ }
+ if (!(h1m->flags & H1_MF_NO_PHDR))
+ http_set_hdr(&hdr[hdr_count++], ist(":path"), sl.rq.u);
+ }
+
+ sol = ptr - start;
+ if (likely(*ptr == '\r'))
+ EAT_AND_JUMP_OR_RETURN(ptr, end, http_msg_rqline_end, http_msg_ood, state, H1_MSG_RQLINE_END);
+ goto http_msg_rqline_end;
+ }
+
+ /* neither an HTTP_VER token nor a CRLF */
+ state = H1_MSG_RQVER;
+ goto http_msg_invalid;
+
+ case H1_MSG_RQLINE_END:
+ http_msg_rqline_end:
+ /* check for HTTP/0.9 request : no version information
+ * available. sol must point to the first of CR or LF. However
+ * since we don't save these elements between calls, if we come
+ * here from a restart, we don't necessarily know. Thus in this
+ * case we simply start over.
+ */
+ if (restarting)
+ goto restart;
+
+ if (unlikely(sl.rq.v.len == 0))
+ goto http_msg_last_lf;
+
+ EXPECT_LF_HERE(ptr, http_msg_invalid, state, H1_MSG_RQLINE_END);
+ EAT_AND_JUMP_OR_RETURN(ptr, end, http_msg_hdr_first, http_msg_ood, state, H1_MSG_HDR_FIRST);
+ /* stop here */
+
+ /*
+ * Common states below
+ */
+ case H1_MSG_RPBEFORE:
+ http_msg_rpbefore:
+ if (likely(HTTP_IS_TOKEN(*ptr))) {
+ /* we have a start of message, we may have skipped some
+ * heading CRLF. Skip them now.
+ */
+ skip += ptr - start;
+ start = ptr;
+
+ sol = 0;
+ sl.st.v.ptr = ptr;
+ hdr_count = 0;
+ state = H1_MSG_RPVER;
+ goto http_msg_rpver;
+ }
+
+ if (unlikely(!HTTP_IS_CRLF(*ptr))) {
+ state = H1_MSG_RPBEFORE;
+ goto http_msg_invalid;
+ }
+
+ if (unlikely(*ptr == '\n'))
+ EAT_AND_JUMP_OR_RETURN(ptr, end, http_msg_rpbefore, http_msg_ood, state, H1_MSG_RPBEFORE);
+ EAT_AND_JUMP_OR_RETURN(ptr, end, http_msg_rpbefore_cr, http_msg_ood, state, H1_MSG_RPBEFORE_CR);
+ /* stop here */
+
+ case H1_MSG_RPBEFORE_CR:
+ http_msg_rpbefore_cr:
+ EXPECT_LF_HERE(ptr, http_msg_invalid, state, H1_MSG_RPBEFORE_CR);
+ EAT_AND_JUMP_OR_RETURN(ptr, end, http_msg_rpbefore, http_msg_ood, state, H1_MSG_RPBEFORE);
+ /* stop here */
+
+ case H1_MSG_RPVER:
+ http_msg_rpver:
+ if (likely(HTTP_IS_VER_TOKEN(*ptr)))
+ EAT_AND_JUMP_OR_RETURN(ptr, end, http_msg_rpver, http_msg_ood, state, H1_MSG_RPVER);
+
+ if (likely(HTTP_IS_SPHT(*ptr))) {
+ sl.st.v.len = ptr - sl.st.v.ptr;
+
+ if ((sl.st.v.len == 8) &&
+ (*(sl.st.v.ptr + 5) > '1' ||
+ (*(sl.st.v.ptr + 5) == '1' && *(sl.st.v.ptr + 7) >= '1')))
+ h1m->flags |= H1_MF_VER_11;
+
+ EAT_AND_JUMP_OR_RETURN(ptr, end, http_msg_rpver_sp, http_msg_ood, state, H1_MSG_RPVER_SP);
+ }
+ state = H1_MSG_RPVER;
+ goto http_msg_invalid;
+
+ case H1_MSG_RPVER_SP:
+ http_msg_rpver_sp:
+ if (likely(!HTTP_IS_LWS(*ptr))) {
+ sl.st.status = 0;
+ sl.st.c.ptr = ptr;
+ goto http_msg_rpcode;
+ }
+ if (likely(HTTP_IS_SPHT(*ptr)))
+ EAT_AND_JUMP_OR_RETURN(ptr, end, http_msg_rpver_sp, http_msg_ood, state, H1_MSG_RPVER_SP);
+ /* so it's a CR/LF, this is invalid */
+ state = H1_MSG_RPVER_SP;
+ goto http_msg_invalid;
+
+ case H1_MSG_RPCODE:
+ http_msg_rpcode:
+ if (likely(HTTP_IS_DIGIT(*ptr))) {
+ sl.st.status = sl.st.status * 10 + *ptr - '0';
+ EAT_AND_JUMP_OR_RETURN(ptr, end, http_msg_rpcode, http_msg_ood, state, H1_MSG_RPCODE);
+ }
+
+ if (unlikely(!HTTP_IS_LWS(*ptr))) {
+ state = H1_MSG_RPCODE;
+ goto http_msg_invalid;
+ }
+
+ if (likely(HTTP_IS_SPHT(*ptr))) {
+ sl.st.c.len = ptr - sl.st.c.ptr;
+ EAT_AND_JUMP_OR_RETURN(ptr, end, http_msg_rpcode_sp, http_msg_ood, state, H1_MSG_RPCODE_SP);
+ }
+
+ /* so it's a CR/LF, so there is no reason phrase */
+ sl.st.c.len = ptr - sl.st.c.ptr;
+
+ http_msg_rsp_reason:
+ sl.st.r = ist2(ptr, 0);
+ goto http_msg_rpline_eol;
+
+ case H1_MSG_RPCODE_SP:
+ http_msg_rpcode_sp:
+ if (likely(!HTTP_IS_LWS(*ptr))) {
+ sl.st.r.ptr = ptr;
+ goto http_msg_rpreason;
+ }
+ if (likely(HTTP_IS_SPHT(*ptr)))
+ EAT_AND_JUMP_OR_RETURN(ptr, end, http_msg_rpcode_sp, http_msg_ood, state, H1_MSG_RPCODE_SP);
+ /* so it's a CR/LF, so there is no reason phrase */
+ goto http_msg_rsp_reason;
+
+ case H1_MSG_RPREASON:
+ http_msg_rpreason:
+ if (likely(!HTTP_IS_CRLF(*ptr)))
+ EAT_AND_JUMP_OR_RETURN(ptr, end, http_msg_rpreason, http_msg_ood, state, H1_MSG_RPREASON);
+ sl.st.r.len = ptr - sl.st.r.ptr;
+ http_msg_rpline_eol:
+ /* We have seen the end of line. Note that we do not
+ * necessarily have the \n yet, but at least we know that we
+ * have EITHER \r OR \n, otherwise the response would not be
+ * complete. We can then record the response length and return
+ * to the caller which will be able to register it.
+ */
+
+ if (likely(!skip_update)) {
+ if (unlikely(hdr_count >= hdr_num)) {
+ state = H1_MSG_RPREASON;
+ goto http_output_full;
+ }
+ if (!(h1m->flags & H1_MF_NO_PHDR))
+ http_set_hdr(&hdr[hdr_count++], ist(":status"), sl.st.c);
+ }
+
+ sol = ptr - start;
+ if (likely(*ptr == '\r'))
+ EAT_AND_JUMP_OR_RETURN(ptr, end, http_msg_rpline_end, http_msg_ood, state, H1_MSG_RPLINE_END);
+ goto http_msg_rpline_end;
+
+ case H1_MSG_RPLINE_END:
+ http_msg_rpline_end:
+ /* sol must point to the first of CR or LF. */
+ EXPECT_LF_HERE(ptr, http_msg_invalid, state, H1_MSG_RPLINE_END);
+ EAT_AND_JUMP_OR_RETURN(ptr, end, http_msg_hdr_first, http_msg_ood, state, H1_MSG_HDR_FIRST);
+ /* stop here */
+
+ case H1_MSG_HDR_FIRST:
+ http_msg_hdr_first:
+ sol = ptr - start;
+ if (likely(!HTTP_IS_CRLF(*ptr))) {
+ goto http_msg_hdr_name;
+ }
+
+ if (likely(*ptr == '\r'))
+ EAT_AND_JUMP_OR_RETURN(ptr, end, http_msg_last_lf, http_msg_ood, state, H1_MSG_LAST_LF);
+ goto http_msg_last_lf;
+
+ case H1_MSG_HDR_NAME:
+ http_msg_hdr_name:
+ /* assumes sol points to the first char */
+ if (likely(HTTP_IS_TOKEN(*ptr))) {
+ if (!skip_update) {
+ /* turn it to lower case if needed */
+ if (isupper((unsigned char)*ptr) && h1m->flags & H1_MF_TOLOWER)
+ *ptr = tolower((unsigned char)*ptr);
+ }
+ EAT_AND_JUMP_OR_RETURN(ptr, end, http_msg_hdr_name, http_msg_ood, state, H1_MSG_HDR_NAME);
+ }
+
+ if (likely(*ptr == ':')) {
+ col = ptr - start;
+ if (col <= sol) {
+ state = H1_MSG_HDR_NAME;
+ goto http_msg_invalid;
+ }
+ EAT_AND_JUMP_OR_RETURN(ptr, end, http_msg_hdr_l1_sp, http_msg_ood, state, H1_MSG_HDR_L1_SP);
+ }
+
+ if (likely(h1m->err_pos < -1) || *ptr == '\n') {
+ state = H1_MSG_HDR_NAME;
+ goto http_msg_invalid;
+ }
+
+ if (h1m->err_pos == -1) /* capture the error pointer */
+ h1m->err_pos = ptr - start + skip; /* >= 0 now */
+
+ /* and we still accept this non-token character */
+ EAT_AND_JUMP_OR_RETURN(ptr, end, http_msg_hdr_name, http_msg_ood, state, H1_MSG_HDR_NAME);
+
+ case H1_MSG_HDR_L1_SP:
+ http_msg_hdr_l1_sp:
+ /* assumes sol points to the first char */
+ if (likely(HTTP_IS_SPHT(*ptr)))
+ EAT_AND_JUMP_OR_RETURN(ptr, end, http_msg_hdr_l1_sp, http_msg_ood, state, H1_MSG_HDR_L1_SP);
+
+ /* header value can be basically anything except CR/LF */
+ sov = ptr - start;
+
+ if (likely(!HTTP_IS_CRLF(*ptr))) {
+ goto http_msg_hdr_val;
+ }
+
+ if (likely(*ptr == '\r'))
+ EAT_AND_JUMP_OR_RETURN(ptr, end, http_msg_hdr_l1_lf, http_msg_ood, state, H1_MSG_HDR_L1_LF);
+ goto http_msg_hdr_l1_lf;
+
+ case H1_MSG_HDR_L1_LF:
+ http_msg_hdr_l1_lf:
+ EXPECT_LF_HERE(ptr, http_msg_invalid, state, H1_MSG_HDR_L1_LF);
+ EAT_AND_JUMP_OR_RETURN(ptr, end, http_msg_hdr_l1_lws, http_msg_ood, state, H1_MSG_HDR_L1_LWS);
+
+ case H1_MSG_HDR_L1_LWS:
+ http_msg_hdr_l1_lws:
+ if (likely(HTTP_IS_SPHT(*ptr))) {
+ if (!skip_update) {
+ /* replace HT,CR,LF with spaces */
+ for (; start + sov < ptr; sov++)
+ start[sov] = ' ';
+ }
+ goto http_msg_hdr_l1_sp;
+ }
+ /* we had a header consisting only in spaces ! */
+ eol = sov;
+ goto http_msg_complete_header;
+
+ case H1_MSG_HDR_VAL:
+ http_msg_hdr_val:
+ /* assumes sol points to the first char, and sov
+ * points to the first character of the value.
+ */
+
+ /* speedup: we'll skip packs of 4 or 8 bytes not containing bytes 0x0D
+ * and lower. In fact since most of the time is spent in the loop, we
+ * also remove the sign bit test so that bytes 0x8e..0x0d break the
+ * loop, but we don't care since they're very rare in header values.
+ */
+#ifdef HA_UNALIGNED_LE64
+ while (ptr <= end - sizeof(long)) {
+ if ((*(long *)ptr - 0x0e0e0e0e0e0e0e0eULL) & 0x8080808080808080ULL)
+ goto http_msg_hdr_val2;
+ ptr += sizeof(long);
+ }
+#endif
+#ifdef HA_UNALIGNED_LE
+ while (ptr <= end - sizeof(int)) {
+ if ((*(int*)ptr - 0x0e0e0e0e) & 0x80808080)
+ goto http_msg_hdr_val2;
+ ptr += sizeof(int);
+ }
+#endif
+ if (ptr >= end) {
+ state = H1_MSG_HDR_VAL;
+ goto http_msg_ood;
+ }
+ http_msg_hdr_val2:
+ if (likely(!*ptr)) {
+ /* RFC9110 clarified that NUL is explicitly forbidden in header values
+ * (like CR and LF).
+ */
+ if (h1m->err_pos < -1) { /* PR_O2_REQBUG_OK not set */
+ state = H1_MSG_HDR_VAL;
+ goto http_msg_invalid;
+ }
+ if (h1m->err_pos == -1) /* PR_O2_REQBUG_OK set: just log */
+ h1m->err_pos = ptr - start + skip;
+ }
+ if (likely(!HTTP_IS_CRLF(*ptr)))
+ EAT_AND_JUMP_OR_RETURN(ptr, end, http_msg_hdr_val2, http_msg_ood, state, H1_MSG_HDR_VAL);
+
+ eol = ptr - start;
+ /* Note: we could also copy eol into ->eoh so that we have the
+ * real header end in case it ends with lots of LWS, but is this
+ * really needed ?
+ */
+ if (likely(*ptr == '\r'))
+ EAT_AND_JUMP_OR_RETURN(ptr, end, http_msg_hdr_l2_lf, http_msg_ood, state, H1_MSG_HDR_L2_LF);
+ goto http_msg_hdr_l2_lf;
+
+ case H1_MSG_HDR_L2_LF:
+ http_msg_hdr_l2_lf:
+ EXPECT_LF_HERE(ptr, http_msg_invalid, state, H1_MSG_HDR_L2_LF);
+ EAT_AND_JUMP_OR_RETURN(ptr, end, http_msg_hdr_l2_lws, http_msg_ood, state, H1_MSG_HDR_L2_LWS);
+
+ case H1_MSG_HDR_L2_LWS:
+ http_msg_hdr_l2_lws:
+ if (unlikely(HTTP_IS_SPHT(*ptr))) {
+ if (!skip_update) {
+ /* LWS: replace HT,CR,LF with spaces */
+ for (; start + eol < ptr; eol++)
+ start[eol] = ' ';
+ }
+ goto http_msg_hdr_val;
+ }
+ http_msg_complete_header:
+ /*
+ * It was a new header, so the last one is finished. Assumes
+ * <sol> points to the first char of the name, <col> to the
+ * colon, <sov> points to the first character of the value and
+ * <eol> to the first CR or LF so we know how the line ends. We
+ * will trim spaces around the value. It's possible to do it by
+ * adjusting <eol> and <sov> which are no more used after this.
+ * We can add the header field to the list.
+ */
+ if (likely(!skip_update)) {
+ while (sov < eol && HTTP_IS_LWS(start[sov]))
+ sov++;
+
+ while (eol - 1 > sov && HTTP_IS_LWS(start[eol - 1]))
+ eol--;
+
+
+ n = ist2(start + sol, col - sol);
+ v = ist2(start + sov, eol - sov);
+
+ do {
+ int ret;
+
+ if (unlikely(hdr_count >= hdr_num)) {
+ state = H1_MSG_HDR_L2_LWS;
+ goto http_output_full;
+ }
+
+ if (isteqi(n, ist("transfer-encoding"))) {
+ ret = h1_parse_xfer_enc_header(h1m, v);
+ if (ret < 0) {
+ state = H1_MSG_HDR_L2_LWS;
+ ptr = v.ptr; /* Set ptr on the error */
+ goto http_msg_invalid;
+ }
+ else if (ret == 0) {
+ /* skip it */
+ break;
+ }
+ }
+ else if (isteqi(n, ist("content-length"))) {
+ ret = h1_parse_cont_len_header(h1m, &v);
+
+ if (ret < 0) {
+ state = H1_MSG_HDR_L2_LWS;
+ ptr = v.ptr; /* Set ptr on the error */
+ goto http_msg_invalid;
+ }
+ else if (ret == 0) {
+ /* skip it */
+ break;
+ }
+ }
+ else if (isteqi(n, ist("connection"))) {
+ h1_parse_connection_header(h1m, &v);
+ if (!v.len) {
+ /* skip it */
+ break;
+ }
+ }
+ else if (isteqi(n, ist("upgrade"))) {
+ h1_parse_upgrade_header(h1m, v);
+ }
+ else if (!(h1m->flags & H1_MF_RESP) && isteqi(n, ist("host"))) {
+ if (host_idx == -1)
+ host_idx = hdr_count;
+ else {
+ if (!isteqi(v, hdr[host_idx].v)) {
+ state = H1_MSG_HDR_L2_LWS;
+ ptr = v.ptr; /* Set ptr on the error */
+ goto http_msg_invalid;
+ }
+ /* if the same host, skip it */
+ break;
+ }
+ }
+
+ http_set_hdr(&hdr[hdr_count++], n, v);
+ } while (0);
+ }
+
+ sol = ptr - start;
+
+ if (likely(!HTTP_IS_CRLF(*ptr)))
+ goto http_msg_hdr_name;
+
+ if (likely(*ptr == '\r'))
+ EAT_AND_JUMP_OR_RETURN(ptr, end, http_msg_last_lf, http_msg_ood, state, H1_MSG_LAST_LF);
+ goto http_msg_last_lf;
+
+ case H1_MSG_LAST_LF:
+ http_msg_last_lf:
+ EXPECT_LF_HERE(ptr, http_msg_invalid, state, H1_MSG_LAST_LF);
+ ptr++;
+ /* <ptr> now points to the first byte of payload. If needed sol
+ * still points to the first of either CR or LF of the empty
+ * line ending the headers block.
+ */
+ if (likely(!skip_update)) {
+ if (unlikely(hdr_count >= hdr_num)) {
+ state = H1_MSG_LAST_LF;
+ goto http_output_full;
+ }
+ http_set_hdr(&hdr[hdr_count++], ist2(start+sol, 0), ist(""));
+ }
+
+ /* reaching here we've parsed the whole message. We may detect
+ * that we were already continuing an interrupted parsing pass
+ * so we were silently looking for the end of message not
+ * updating anything before deciding to parse it fully at once.
+ * It's guaranteed that we won't match this test twice in a row
+ * since restarting will turn zero.
+ */
+ if (restarting)
+ goto restart;
+
+
+ if (!(h1m->flags & (H1_MF_HDRS_ONLY|H1_MF_RESP))) {
+ struct http_uri_parser parser = http_uri_parser_init(sl.rq.u);
+ struct ist scheme, authority;
+ int ret;
+
+ scheme = http_parse_scheme(&parser);
+ authority = http_parse_authority(&parser, 1);
+ if (sl.rq.meth == HTTP_METH_CONNECT) {
+ struct ist *host = ((host_idx != -1) ? &hdr[host_idx].v : NULL);
+
+ ret = h1_validate_connect_authority(authority, host);
+ if (ret < 0) {
+ if (h1m->err_pos < -1) {
+ state = H1_MSG_LAST_LF;
+ /* WT: gcc seems to see a path where sl.rq.u.ptr was used
+ * uninitialized, but it doesn't know that the function is
+ * called with initial states making this impossible.
+ */
+ ALREADY_CHECKED(sl.rq.u.ptr);
+ ptr = ((ret == -1) ? sl.rq.u.ptr : host->ptr); /* Set ptr on the error */
+ goto http_msg_invalid;
+ }
+ if (h1m->err_pos == -1) /* capture the error pointer */
+ h1m->err_pos = ((ret == -1) ? sl.rq.u.ptr : host->ptr) - start + skip; /* >= 0 now */
+ }
+ }
+ else if (host_idx != -1 && istlen(authority)) {
+ struct ist host = hdr[host_idx].v;
+
+ /* For non-CONNECT method, the authority must match the host header value */
+ if (!isteqi(authority, host)) {
+ ret = h1_validate_mismatch_authority(scheme, authority, host);
+ if (ret < 0) {
+ if (h1m->err_pos < -1) {
+ state = H1_MSG_LAST_LF;
+ ptr = host.ptr; /* Set ptr on the error */
+ goto http_msg_invalid;
+ }
+ if (h1m->err_pos == -1) /* capture the error pointer */
+ h1m->err_pos = v.ptr - start + skip; /* >= 0 now */
+ }
+ }
+ }
+ }
+
+ state = H1_MSG_DATA;
+ if (h1m->flags & H1_MF_XFER_ENC) {
+ if (h1m->flags & H1_MF_CLEN) {
+ /* T-E + C-L: force close and remove C-L */
+ h1m->flags |= H1_MF_CONN_CLO;
+ h1m->flags &= ~H1_MF_CLEN;
+ h1m->curr_len = h1m->body_len = 0;
+ hdr_count = http_del_hdr(hdr, ist("content-length"));
+ }
+ else if (!(h1m->flags & H1_MF_VER_11)) {
+ /* T-E + HTTP/1.0: force close */
+ h1m->flags |= H1_MF_CONN_CLO;
+ }
+
+ if (h1m->flags & H1_MF_CHNK)
+ state = H1_MSG_CHUNK_SIZE;
+ else if (!(h1m->flags & H1_MF_RESP)) {
+ /* cf RFC7230#3.3.3 : transfer-encoding in
+ * request without chunked encoding is invalid.
+ */
+ goto http_msg_invalid;
+ }
+ }
+
+ break;
+
+ default:
+ /* impossible states */
+ goto http_msg_invalid;
+ }
+
+ /* Now we've left the headers state and are either in H1_MSG_DATA or
+ * H1_MSG_CHUNK_SIZE.
+ */
+
+ if (slp && !skip_update)
+ *slp = sl;
+
+ h1m->state = state;
+ h1m->next = ptr - start + skip;
+ return h1m->next;
+
+ http_msg_ood:
+ /* out of data at <ptr> during state <state> */
+ if (slp && !skip_update)
+ *slp = sl;
+
+ h1m->state = state;
+ h1m->next = ptr - start + skip;
+ return 0;
+
+ http_msg_invalid:
+ /* invalid message, error at <ptr> */
+ if (slp && !skip_update)
+ *slp = sl;
+
+ h1m->err_state = h1m->state = state;
+ h1m->err_pos = h1m->next = ptr - start + skip;
+ return -1;
+
+ http_output_full:
+ /* no more room to store the current header, error at <ptr> */
+ if (slp && !skip_update)
+ *slp = sl;
+
+ h1m->err_state = h1m->state = state;
+ h1m->err_pos = h1m->next = ptr - start + skip;
+ return -2;
+
+ restart:
+ h1m->flags &= H1_MF_RESTART_MASK;
+ h1m->curr_len = h1m->body_len = h1m->next = 0;
+ if (h1m->flags & H1_MF_RESP)
+ h1m->state = H1_MSG_RPBEFORE;
+ else
+ h1m->state = H1_MSG_RQBEFORE;
+ goto try_again;
+}
+
+/* This function performs a very minimal parsing of the trailers block present
+ * at offset <ofs> in <buf> for up to <max> bytes, and returns the number of
+ * bytes to delete to skip the trailers. It may return 0 if it's missing some
+ * input data, or < 0 in case of parse error (in which case the caller may have
+ * to decide how to proceed, possibly eating everything).
+ */
+int h1_measure_trailers(const struct buffer *buf, unsigned int ofs, unsigned int max)
+{
+ const char *stop = b_peek(buf, ofs + max);
+ int count = ofs;
+
+ while (1) {
+ const char *p1 = NULL, *p2 = NULL;
+ const char *start = b_peek(buf, count);
+ const char *ptr = start;
+
+ /* scan current line and stop at LF or CRLF */
+ while (1) {
+ if (ptr == stop)
+ return 0;
+
+ if (*ptr == '\n') {
+ if (!p1)
+ p1 = ptr;
+ p2 = ptr;
+ break;
+ }
+
+ if (*ptr == '\r') {
+ if (p1)
+ return -1;
+ p1 = ptr;
+ }
+
+ ptr = b_next(buf, ptr);
+ }
+
+ /* after LF; point to beginning of next line */
+ p2 = b_next(buf, p2);
+ count += b_dist(buf, start, p2);
+
+ /* LF/CRLF at beginning of line => end of trailers at p2.
+ * Everything was scheduled for forwarding, there's nothing left
+ * from this message. */
+ if (p1 == start)
+ break;
+ /* OK, next line then */
+ }
+ return count - ofs;
+}
+
+/* Generate a random key for a WebSocket Handshake in respect with rfc6455
+ * The key is 128-bits long encoded as a base64 string in <key_out> parameter
+ * (25 bytes long).
+ */
+void h1_generate_random_ws_input_key(char key_out[25])
+{
+ /* generate a random websocket key */
+ const uint64_t rand1 = ha_random64(), rand2 = ha_random64();
+ char key[16];
+
+ memcpy(key, &rand1, 8);
+ memcpy(&key[8], &rand2, 8);
+ a2base64(key, 16, key_out, 25);
+}
+
+#define H1_WS_KEY_SUFFIX_GUID "258EAFA5-E914-47DA-95CA-C5AB0DC85B11"
+
+/*
+ * Calculate the WebSocket handshake response key from <key_in>. Following the
+ * rfc6455, <key_in> must be 24 bytes longs. The result is stored in <key_out>
+ * as a 29 bytes long string.
+ */
+void h1_calculate_ws_output_key(const char *key, char *result)
+{
+ blk_SHA_CTX sha1_ctx;
+ char hash_in[60], hash_out[20];
+
+ /* concatenate the key with a fixed suffix */
+ memcpy(hash_in, key, 24);
+ memcpy(&hash_in[24], H1_WS_KEY_SUFFIX_GUID, 36);
+
+ /* sha1 the result */
+ blk_SHA1_Init(&sha1_ctx);
+ blk_SHA1_Update(&sha1_ctx, hash_in, 60);
+ blk_SHA1_Final((unsigned char *)hash_out, &sha1_ctx);
+
+ /* encode in base64 the hash */
+ a2base64(hash_out, 20, result, 29);
+}
diff --git a/src/h1_htx.c b/src/h1_htx.c
new file mode 100644
index 0000000..f4f13fc
--- /dev/null
+++ b/src/h1_htx.c
@@ -0,0 +1,1074 @@
+/*
+ * Functions to manipulate H1 messages using the internal representation.
+ *
+ * Copyright (C) 2019 HAProxy Technologies, Christopher Faulet <cfaulet@haproxy.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <haproxy/api.h>
+#include <haproxy/cfgparse.h>
+#include <haproxy/global.h>
+#include <haproxy/h1.h>
+#include <haproxy/h1_htx.h>
+#include <haproxy/http.h>
+#include <haproxy/http_htx.h>
+#include <haproxy/htx.h>
+#include <haproxy/tools.h>
+
+/* Estimate the size of the HTX headers after the parsing, including the EOH. */
+static size_t h1_eval_htx_hdrs_size(const struct http_hdr *hdrs)
+{
+ size_t sz = 0;
+ int i;
+
+ for (i = 0; hdrs[i].n.len; i++)
+ sz += sizeof(struct htx_blk) + hdrs[i].n.len + hdrs[i].v.len;
+ sz += sizeof(struct htx_blk) + 1;
+ return sz;
+}
+
+/* Estimate the size of the HTX request after the parsing. */
+static size_t h1_eval_htx_size(const struct ist p1, const struct ist p2, const struct ist p3,
+ const struct http_hdr *hdrs)
+{
+ size_t sz;
+
+ /* size of the HTX start-line */
+ sz = sizeof(struct htx_blk) + sizeof(struct htx_sl) + p1.len + p2.len + p3.len;
+ sz += h1_eval_htx_hdrs_size(hdrs);
+ return sz;
+}
+
+/* Check the validity of the request version. If the version is valid, it
+ * returns 1. Otherwise, it returns 0.
+ */
+static int h1_process_req_vsn(struct h1m *h1m, union h1_sl *sl)
+{
+ /* RFC7230#2.6 has enforced the format of the HTTP version string to be
+ * exactly one digit "." one digit. This check may be disabled using
+ * option accept-invalid-http-request.
+ */
+ if (h1m->err_pos == -2) { /* PR_O2_REQBUG_OK not set */
+ if (sl->rq.v.len != 8)
+ return 0;
+
+ if (!istnmatch(sl->rq.v, ist("HTTP/"), 5) ||
+ !isdigit((unsigned char)*(sl->rq.v.ptr + 5)) ||
+ *(sl->rq.v.ptr + 6) != '.' ||
+ !isdigit((unsigned char)*(sl->rq.v.ptr + 7)))
+ return 0;
+ }
+ else if (!sl->rq.v.len) {
+ /* try to convert HTTP/0.9 requests to HTTP/1.0 */
+
+ /* RFC 1945 allows only GET for HTTP/0.9 requests */
+ if (sl->rq.meth != HTTP_METH_GET)
+ return 0;
+
+ /* HTTP/0.9 requests *must* have a request URI, per RFC 1945 */
+ if (!sl->rq.u.len)
+ return 0;
+
+ /* Add HTTP version */
+ sl->rq.v = ist("HTTP/1.0");
+ return 1;
+ }
+
+ if ((sl->rq.v.len == 8) &&
+ ((*(sl->rq.v.ptr + 5) > '1') ||
+ ((*(sl->rq.v.ptr + 5) == '1') && (*(sl->rq.v.ptr + 7) >= '1'))))
+ h1m->flags |= H1_MF_VER_11;
+ return 1;
+}
+
+/* Check the validity of the response version. If the version is valid, it
+ * returns 1. Otherwise, it returns 0.
+ */
+static int h1_process_res_vsn(struct h1m *h1m, union h1_sl *sl)
+{
+ /* RFC7230#2.6 has enforced the format of the HTTP version string to be
+ * exactly one digit "." one digit. This check may be disabled using
+ * option accept-invalid-http-request.
+ */
+ if (h1m->err_pos == -2) { /* PR_O2_REQBUG_OK not set */
+ if (sl->st.v.len != 8)
+ return 0;
+
+ if (*(sl->st.v.ptr + 4) != '/' ||
+ !isdigit((unsigned char)*(sl->st.v.ptr + 5)) ||
+ *(sl->st.v.ptr + 6) != '.' ||
+ !isdigit((unsigned char)*(sl->st.v.ptr + 7)))
+ return 0;
+ }
+
+ if ((sl->st.v.len == 8) &&
+ ((*(sl->st.v.ptr + 5) > '1') ||
+ ((*(sl->st.v.ptr + 5) == '1') && (*(sl->st.v.ptr + 7) >= '1'))))
+ h1m->flags |= H1_MF_VER_11;
+
+ return 1;
+}
+
+/* Convert H1M flags to HTX start-line flags. */
+static unsigned int h1m_htx_sl_flags(struct h1m *h1m)
+{
+ unsigned int flags = HTX_SL_F_NONE;
+
+ if (h1m->flags & H1_MF_RESP)
+ flags |= HTX_SL_F_IS_RESP;
+ if (h1m->flags & H1_MF_VER_11)
+ flags |= HTX_SL_F_VER_11;
+ if (h1m->flags & H1_MF_XFER_ENC)
+ flags |= HTX_SL_F_XFER_ENC;
+ if (h1m->flags & H1_MF_XFER_LEN) {
+ flags |= HTX_SL_F_XFER_LEN;
+ if (h1m->flags & H1_MF_CHNK)
+ flags |= HTX_SL_F_CHNK;
+ else if (h1m->flags & H1_MF_CLEN) {
+ flags |= HTX_SL_F_CLEN;
+ if (h1m->body_len == 0)
+ flags |= HTX_SL_F_BODYLESS;
+ }
+ else
+ flags |= HTX_SL_F_BODYLESS;
+ }
+ if (h1m->flags & H1_MF_CONN_UPG)
+ flags |= HTX_SL_F_CONN_UPG;
+ return flags;
+}
+
+/* Postprocess the parsed headers for a request and convert them into an htx
+ * message. It returns the number of bytes parsed if > 0, or 0 if it couldn't
+ * proceed. Parsing errors are reported by setting the htx flag
+ * HTX_FL_PARSING_ERROR and filling h1m->err_pos and h1m->err_state fields.
+ */
+static int h1_postparse_req_hdrs(struct h1m *h1m, union h1_sl *h1sl, struct htx *htx,
+ struct http_hdr *hdrs, size_t max)
+{
+ struct htx_sl *sl;
+ struct ist meth, uri, vsn;
+ unsigned int flags = 0;
+
+ /* <h1sl> is always defined for a request */
+ meth = h1sl->rq.m;
+ uri = h1sl->rq.u;
+ vsn = h1sl->rq.v;
+
+ /* Be sure the message, once converted into HTX, will not exceed the max
+ * size allowed.
+ */
+ if (h1_eval_htx_size(meth, uri, vsn, hdrs) > max) {
+ if (htx_is_empty(htx))
+ goto error;
+ goto output_full;
+ }
+
+ /* By default, request have always a known length */
+ h1m->flags |= H1_MF_XFER_LEN;
+
+ if (h1sl->rq.meth == HTTP_METH_CONNECT) {
+ h1m->flags &= ~(H1_MF_CLEN|H1_MF_CHNK);
+ h1m->curr_len = h1m->body_len = 0;
+ }
+ else if (h1sl->rq.meth == HTTP_METH_HEAD)
+ flags |= HTX_SL_F_BODYLESS_RESP;
+
+
+ flags |= h1m_htx_sl_flags(h1m);
+ if ((flags & (HTX_SL_F_CONN_UPG|HTX_SL_F_BODYLESS)) == HTX_SL_F_CONN_UPG) {
+ int i;
+
+ for (i = 0; hdrs[i].n.len; i++) {
+ if (isteqi(hdrs[i].n, ist("upgrade")))
+ hdrs[i].v = IST_NULL;
+ }
+ h1m->flags &=~ H1_MF_CONN_UPG;
+ flags &= ~HTX_SL_F_CONN_UPG;
+ }
+ sl = htx_add_stline(htx, HTX_BLK_REQ_SL, flags, meth, uri, vsn);
+ if (!sl || !htx_add_all_headers(htx, hdrs))
+ goto error;
+ sl->info.req.meth = h1sl->rq.meth;
+
+ /* Check if the uri contains an authority. Also check if it contains an
+ * explicit scheme and if it is "http" or "https". */
+ if (h1sl->rq.meth == HTTP_METH_CONNECT)
+ sl->flags |= HTX_SL_F_HAS_AUTHORITY;
+ else if (uri.len && uri.ptr[0] != '/' && uri.ptr[0] != '*') {
+ sl->flags |= (HTX_SL_F_HAS_AUTHORITY|HTX_SL_F_HAS_SCHM);
+ if (uri.len > 4 && (uri.ptr[0] | 0x20) == 'h')
+ sl->flags |= ((uri.ptr[4] == ':') ? HTX_SL_F_SCHM_HTTP : HTX_SL_F_SCHM_HTTPS);
+
+ /* absolute-form target URI present, proceed to scheme-based
+ * normalization */
+ http_scheme_based_normalize(htx);
+ }
+
+ /* If body length cannot be determined, set htx->extra to
+ * HTX_UNKOWN_PAYLOAD_LENGTH. This value is impossible in other cases.
+ */
+ htx->extra = ((h1m->flags & H1_MF_XFER_LEN) ? h1m->curr_len : HTX_UNKOWN_PAYLOAD_LENGTH);
+
+ end:
+ return 1;
+ output_full:
+ h1m_init_req(h1m);
+ h1m->flags |= (H1_MF_NO_PHDR|H1_MF_CLEAN_CONN_HDR);
+ return -2;
+ error:
+ h1m->err_pos = h1m->next;
+ h1m->err_state = h1m->state;
+ htx->flags |= HTX_FL_PARSING_ERROR;
+ return -1;
+}
+
+/* Postprocess the parsed headers for a response and convert them into an htx
+ * message. It returns the number of bytes parsed if > 0, or 0 if it couldn't
+ * proceed. Parsing errors are reported by setting the htx flag
+ * HTX_FL_PARSING_ERROR and filling h1m->err_pos and h1m->err_state fields.
+ */
+static int h1_postparse_res_hdrs(struct h1m *h1m, union h1_sl *h1sl, struct htx *htx,
+ struct http_hdr *hdrs, size_t max)
+{
+ struct htx_sl *sl;
+ struct ist vsn, status, reason;
+ unsigned int flags = 0;
+ uint16_t code = 0;
+
+ if (h1sl) {
+ /* For HTTP responses, the start-line was parsed */
+ code = h1sl->st.status;
+ vsn = h1sl->st.v;
+ status = h1sl->st.c;
+ reason = h1sl->st.r;
+ }
+ else {
+ /* For FCGI responses, there is no start(-line but the "Status"
+ * header must be parsed, if found.
+ */
+ int hdr;
+
+ vsn = ((h1m->flags & H1_MF_VER_11) ? ist("HTTP/1.1") : ist("HTTP/1.0"));
+ for (hdr = 0; hdrs[hdr].n.len; hdr++) {
+ if (isteqi(hdrs[hdr].n, ist("status"))) {
+ code = http_parse_status_val(hdrs[hdr].v, &status, &reason);
+ }
+ else if (isteqi(hdrs[hdr].n, ist("location"))) {
+ code = 302;
+ status = ist("302");
+ reason = ist("Found");
+ }
+ }
+ if (!code) {
+ code = 200;
+ status = ist("200");
+ reason = ist("OK");
+ }
+ /* FIXME: Check the codes 1xx ? */
+ }
+
+ /* Be sure the message, once converted into HTX, will not exceed the max
+ * size allowed.
+ */
+ if (h1_eval_htx_size(vsn, status, reason, hdrs) > max) {
+ if (htx_is_empty(htx))
+ goto error;
+ goto output_full;
+ }
+
+ if ((h1m->flags & (H1_MF_CONN_UPG|H1_MF_UPG_WEBSOCKET)) && code != 101)
+ h1m->flags &= ~(H1_MF_CONN_UPG|H1_MF_UPG_WEBSOCKET);
+
+ if (((h1m->flags & H1_MF_METH_CONNECT) && code >= 200 && code < 300) || code == 101) {
+ h1m->flags &= ~(H1_MF_CLEN|H1_MF_CHNK);
+ h1m->flags |= H1_MF_XFER_LEN;
+ h1m->curr_len = h1m->body_len = 0;
+ flags |= HTX_SL_F_BODYLESS_RESP;
+ }
+ else if ((h1m->flags & H1_MF_METH_HEAD) || (code >= 100 && code < 200) ||
+ (code == 204) || (code == 304)) {
+ /* Responses known to have no body. */
+ h1m->flags |= H1_MF_XFER_LEN;
+ h1m->curr_len = h1m->body_len = 0;
+ flags |= HTX_SL_F_BODYLESS_RESP;
+ }
+ else if (h1m->flags & (H1_MF_CLEN|H1_MF_CHNK)) {
+ /* Responses with a known body length. */
+ h1m->flags |= H1_MF_XFER_LEN;
+ }
+
+ flags |= h1m_htx_sl_flags(h1m);
+ sl = htx_add_stline(htx, HTX_BLK_RES_SL, flags, vsn, status, reason);
+ if (!sl || !htx_add_all_headers(htx, hdrs))
+ goto error;
+ sl->info.res.status = code;
+
+ /* If body length cannot be determined, set htx->extra to
+ * HTX_UNKOWN_PAYLOAD_LENGTH. This value is impossible in other cases.
+ */
+ htx->extra = ((h1m->flags & H1_MF_XFER_LEN) ? h1m->curr_len : HTX_UNKOWN_PAYLOAD_LENGTH);
+
+ end:
+ return 1;
+ output_full:
+ h1m_init_res(h1m);
+ h1m->flags |= (H1_MF_NO_PHDR|H1_MF_CLEAN_CONN_HDR);
+ return -2;
+ error:
+ h1m->err_pos = h1m->next;
+ h1m->err_state = h1m->state;
+ htx->flags |= HTX_FL_PARSING_ERROR;
+ return -1;
+}
+
+/* Parse HTTP/1 headers. It returns the number of bytes parsed on success, 0 if
+ * headers are incomplete, -1 if an error occurred or -2 if it needs more space
+ * to proceed while the output buffer is not empty. Parsing errors are reported
+ * by setting the htx flag HTX_FL_PARSING_ERROR and filling h1m->err_pos and
+ * h1m->err_state fields. This functions is responsible to update the parser
+ * state <h1m> and the start-line <h1sl> if not NULL. For the requests, <h1sl>
+ * must always be provided. For responses, <h1sl> may be NULL and <h1m> flags
+ * HTTP_METH_CONNECT of HTTP_METH_HEAD may be set.
+ */
+int h1_parse_msg_hdrs(struct h1m *h1m, union h1_sl *h1sl, struct htx *dsthtx,
+ struct buffer *srcbuf, size_t ofs, size_t max)
+{
+ struct http_hdr hdrs[global.tune.max_http_hdr];
+ int total = 0, ret = 0;
+
+ if (!max || !b_data(srcbuf))
+ goto end;
+
+ /* Realing input buffer if necessary */
+ if (b_head(srcbuf) + b_data(srcbuf) > b_wrap(srcbuf))
+ b_slow_realign_ofs(srcbuf, trash.area, 0);
+
+ if (!h1sl) {
+ /* If there no start-line, be sure to only parse the headers */
+ h1m->flags |= H1_MF_HDRS_ONLY;
+ }
+ ret = h1_headers_to_hdr_list(b_peek(srcbuf, ofs), b_tail(srcbuf),
+ hdrs, sizeof(hdrs)/sizeof(hdrs[0]), h1m, h1sl);
+ if (ret <= 0) {
+ /* Incomplete or invalid message. If the input buffer only
+ * contains headers and is full, which is detected by it being
+ * full and the offset to be zero, it's an error because
+ * headers are too large to be handled by the parser. */
+ if (ret < 0 || (!ret && !ofs && !buf_room_for_htx_data(srcbuf)))
+ goto error;
+ goto end;
+ }
+ total = ret;
+
+ /* messages headers fully parsed, do some checks to prepare the body
+ * parsing.
+ */
+
+ if (!(h1m->flags & H1_MF_RESP)) {
+ if (!h1_process_req_vsn(h1m, h1sl)) {
+ h1m->err_pos = h1sl->rq.v.ptr - b_head(srcbuf);
+ h1m->err_state = h1m->state;
+ goto vsn_error;
+ }
+ ret = h1_postparse_req_hdrs(h1m, h1sl, dsthtx, hdrs, max);
+ if (ret < 0)
+ return ret;
+ }
+ else {
+ if (h1sl && !h1_process_res_vsn(h1m, h1sl)) {
+ h1m->err_pos = h1sl->st.v.ptr - b_head(srcbuf);
+ h1m->err_state = h1m->state;
+ goto vsn_error;
+ }
+ ret = h1_postparse_res_hdrs(h1m, h1sl, dsthtx, hdrs, max);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* Switch messages without any payload to DONE state */
+ if (((h1m->flags & H1_MF_CLEN) && h1m->body_len == 0) ||
+ ((h1m->flags & (H1_MF_XFER_LEN|H1_MF_CLEN|H1_MF_CHNK)) == H1_MF_XFER_LEN)) {
+ h1m->state = H1_MSG_DONE;
+ dsthtx->flags |= HTX_FL_EOM;
+ }
+
+ end:
+ return total;
+ error:
+ h1m->err_pos = h1m->next;
+ h1m->err_state = h1m->state;
+ vsn_error:
+ dsthtx->flags |= HTX_FL_PARSING_ERROR;
+ return -1;
+
+}
+
+/* Copy data from <srbuf> into an DATA block in <dsthtx>. If possible, a
+ * zero-copy is performed. It returns the number of bytes copied.
+ */
+static size_t h1_copy_msg_data(struct htx **dsthtx, struct buffer *srcbuf, size_t ofs,
+ size_t count, size_t max, struct buffer *htxbuf)
+{
+ struct htx *tmp_htx = *dsthtx;
+ size_t block1, block2, ret = 0;
+
+ /* Be prepared to create at least one HTX block by reserving its size
+ * and adjust <count> accordingly.
+ */
+ if (max <= sizeof(struct htx_blk))
+ goto end;
+ max -= sizeof(struct htx_blk);
+ if (count > max)
+ count = max;
+
+ /* very often with large files we'll face the following
+ * situation :
+ * - htx is empty and points to <htxbuf>
+ * - count == srcbuf->data
+ * - srcbuf->head == sizeof(struct htx)
+ * => we can swap the buffers and place an htx header into
+ * the target buffer instead
+ */
+ if (unlikely(htx_is_empty(tmp_htx) && count == b_data(srcbuf) &&
+ !ofs && b_head_ofs(srcbuf) == sizeof(struct htx))) {
+ void *raw_area = srcbuf->area;
+ void *htx_area = htxbuf->area;
+ struct htx_blk *blk;
+
+ srcbuf->area = htx_area;
+ htxbuf->area = raw_area;
+ tmp_htx = (struct htx *)htxbuf->area;
+ tmp_htx->size = htxbuf->size - sizeof(*tmp_htx);
+ htx_reset(tmp_htx);
+ b_set_data(htxbuf, b_size(htxbuf));
+
+ blk = htx_add_blk(tmp_htx, HTX_BLK_DATA, count);
+ blk->info += count;
+
+ *dsthtx = tmp_htx;
+ /* nothing else to do, the old buffer now contains an
+ * empty pre-initialized HTX header
+ */
+ return count;
+ }
+
+ /* * First block is the copy of contiguous data starting at offset <ofs>
+ * with <count> as max. <max> is updated accordingly
+ *
+ * * Second block is the remaining (count - block1) if <max> is large
+ * enough. Another HTX block is reserved.
+ */
+ block1 = b_contig_data(srcbuf, ofs);
+ block2 = 0;
+ if (block1 > count)
+ block1 = count;
+ max -= block1;
+
+ if (max > sizeof(struct htx_blk)) {
+ block2 = count - block1;
+ max -= sizeof(struct htx_blk);
+ if (block2 > max)
+ block2 = max;
+ }
+
+ ret = htx_add_data(tmp_htx, ist2(b_peek(srcbuf, ofs), block1));
+ if (ret == block1 && block2)
+ ret += htx_add_data(tmp_htx, ist2(b_orig(srcbuf), block2));
+ end:
+ return ret;
+}
+
+static const char hextable[] = {
+ -1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
+ -1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,-1,-1,-1,-1,-1,-1,
+ -1,10,11,12,13,14,15,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
+ -1,10,11,12,13,14,15,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
+ -1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
+ -1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
+ -1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
+ -1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1
+};
+
+/* Generic function to parse the current HTTP chunk. It may be used to parsed
+ * any kind of chunks, including incomplete HTTP chunks or split chunks
+ * because the buffer wraps. This version tries to performed zero-copy on large
+ * chunks if possible.
+ */
+static size_t h1_parse_chunk(struct h1m *h1m, struct htx **dsthtx,
+ struct buffer *srcbuf, size_t ofs, size_t *max,
+ struct buffer *htxbuf)
+{
+ uint64_t chksz;
+ size_t sz, used, lmax, total = 0;
+ int ret = 0;
+
+ lmax = *max;
+ switch (h1m->state) {
+ case H1_MSG_DATA:
+ new_chunk:
+ used = htx_used_space(*dsthtx);
+ if (b_data(srcbuf) == ofs || lmax <= sizeof(struct htx_blk))
+ break;
+
+ sz = b_data(srcbuf) - ofs;
+ if (unlikely(sz > h1m->curr_len))
+ sz = h1m->curr_len;
+ sz = h1_copy_msg_data(dsthtx, srcbuf, ofs, sz, lmax, htxbuf);
+ lmax -= htx_used_space(*dsthtx) - used;
+ ofs += sz;
+ total += sz;
+ h1m->curr_len -= sz;
+ if (h1m->curr_len)
+ break;
+
+ h1m->state = H1_MSG_CHUNK_CRLF;
+ __fallthrough;
+
+ case H1_MSG_CHUNK_CRLF:
+ ret = h1_skip_chunk_crlf(srcbuf, ofs, b_data(srcbuf));
+ if (ret <= 0)
+ break;
+ ofs += ret;
+ total += ret;
+
+ /* Don't parse next chunk to try to handle contiguous chunks if possible */
+ h1m->state = H1_MSG_CHUNK_SIZE;
+ break;
+
+ case H1_MSG_CHUNK_SIZE:
+ ret = h1_parse_chunk_size(srcbuf, ofs, b_data(srcbuf), &chksz);
+ if (ret <= 0)
+ break;
+ h1m->state = ((!chksz) ? H1_MSG_TRAILERS : H1_MSG_DATA);
+ h1m->curr_len = chksz;
+ h1m->body_len += chksz;
+ ofs += ret;
+ total += ret;
+
+ if (h1m->curr_len) {
+ h1m->state = H1_MSG_DATA;
+ goto new_chunk;
+ }
+ h1m->state = H1_MSG_TRAILERS;
+ break;
+
+ default:
+ /* unexpected */
+ ret = -1;
+ break;
+ }
+
+ if (ret < 0) {
+ (*dsthtx)->flags |= HTX_FL_PARSING_ERROR;
+ h1m->err_state = h1m->state;
+ h1m->err_pos = ofs;
+ total = 0;
+ }
+
+ /* Don't forget to update htx->extra */
+ (*dsthtx)->extra = h1m->curr_len;
+ *max = lmax;
+ return total;
+}
+
+/* Parses full contiguous HTTP chunks. This version is optimized for small
+ * chunks and does not performed zero-copy. It must be called in
+ * H1_MSG_CHUNK_SIZE state. Be careful if you change something in this
+ * function. It is really sensitive, any change may have an impact on
+ * performance.
+ */
+static size_t h1_parse_full_contig_chunks(struct h1m *h1m, struct htx **dsthtx,
+ struct buffer *srcbuf, size_t ofs, size_t *max,
+ struct buffer *htxbuf)
+{
+ char *start, *end, *dptr;
+ ssize_t dpos, ridx, save;
+ size_t lmax, total = 0;
+ uint64_t chksz;
+ struct htx_ret htxret;
+
+ lmax = *max;
+ if (lmax <= sizeof(struct htx_blk))
+ goto out;
+
+ /* source info :
+ * start : pointer at <ofs> position
+ * end : pointer marking the end of data to parse
+ * ridx : the reverse index (negative) marking the parser position (end[ridx])
+ */
+ ridx = -b_contig_data(srcbuf, ofs);
+ if (!ridx)
+ goto out;
+ start = b_peek(srcbuf, ofs);
+ end = start - ridx;
+
+ /* Reserve the maximum possible size for the data */
+ htxret = htx_reserve_max_data(*dsthtx);
+ if (!htxret.blk)
+ goto out;
+
+ /* destination info :
+ * dptr : pointer on the beginning of the data
+ * dpos : current position where to copy data
+ */
+ dptr = htx_get_blk_ptr(*dsthtx, htxret.blk);
+ dpos = htxret.ret;
+
+ /* Empty DATA block is not possible, thus if <dpos> is the beginning of
+ * the block, it means it is a new block. We can remove the block size
+ * from <max>. Then we must adjust it if it exceeds the free size in the
+ * block.
+ */
+ if (!dpos)
+ lmax -= sizeof(struct htx_blk);
+ if (lmax > htx_get_blksz(htxret.blk) - dpos)
+ lmax = htx_get_blksz(htxret.blk) - dpos;
+
+ while (1) {
+ /* The chunk size is in the following form, though we are only
+ * interested in the size and CRLF :
+ * 1*HEXDIGIT *WSP *[ ';' extensions ] CRLF
+ */
+ chksz = 0;
+ save = ridx; /* Save the parser position to rewind if necessary */
+ while (1) {
+ int c;
+
+ if (!ridx)
+ goto end_parsing;
+
+ /* Convert current character */
+ c = hextable[(unsigned char)end[ridx]];
+
+ /* not a hex digit anymore */
+ if (c & 0xF0)
+ break;
+
+ /* Update current chunk size */
+ chksz = (chksz << 4) + c;
+
+ if (unlikely(chksz & 0xF0000000000000ULL)) {
+ /* Don't get more than 13 hexa-digit (2^52 - 1)
+ * to never fed possibly bogus values from
+ * languages that use floats for their integers
+ */
+ goto parsing_error;
+ }
+ ++ridx;
+ }
+
+ if (unlikely(chksz > lmax))
+ goto end_parsing;
+
+ if (unlikely(ridx == save)) {
+ /* empty size not allowed */
+ goto parsing_error;
+ }
+
+ /* Skip spaces */
+ while (HTTP_IS_SPHT(end[ridx])) {
+ if (!++ridx)
+ goto end_parsing;
+ }
+
+ /* Up to there, we know that at least one byte is present. Check
+ * for the end of chunk size.
+ */
+ while (1) {
+ if (likely(end[ridx] == '\r')) {
+ /* Parse CRLF */
+ if (!++ridx)
+ goto end_parsing;
+ if (unlikely(end[ridx] != '\n')) {
+ /* CR must be followed by LF */
+ goto parsing_error;
+ }
+
+ /* done */
+ ++ridx;
+ break;
+ }
+ else if (likely(end[ridx] == ';')) {
+ /* chunk extension, ends at next CRLF */
+ if (!++ridx)
+ goto end_parsing;
+ while (!HTTP_IS_CRLF(end[ridx])) {
+ if (!++ridx)
+ goto end_parsing;
+ }
+ /* we have a CRLF now, loop above */
+ continue;
+ }
+ else {
+ /* all other characters are unexpected, especially LF alone */
+ goto parsing_error;
+ }
+ }
+
+ /* Exit if it is the last chunk */
+ if (unlikely(!chksz)) {
+ h1m->state = H1_MSG_TRAILERS;
+ save = ridx;
+ goto end_parsing;
+ }
+
+ /* Now check if the whole chunk is here (including the CRLF at
+ * the end), otherwise we switch in H1_MSG_DATA state.
+ */
+ if (chksz + 2 > -ridx) {
+ h1m->curr_len = chksz;
+ h1m->body_len += chksz;
+ h1m->state = H1_MSG_DATA;
+ (*dsthtx)->extra = h1m->curr_len;
+ save = ridx;
+ goto end_parsing;
+ }
+
+ memcpy(dptr + dpos, end + ridx, chksz);
+ h1m->body_len += chksz;
+ lmax -= chksz;
+ dpos += chksz;
+ ridx += chksz;
+
+ /* Parse CRLF */
+ if (unlikely(end[ridx] != '\r')) {
+ h1m->state = H1_MSG_CHUNK_CRLF;
+ goto parsing_error;
+ }
+ ++ridx;
+ if (end[ridx] != '\n') {
+ h1m->state = H1_MSG_CHUNK_CRLF;
+ goto parsing_error;
+ }
+ ++ridx;
+ }
+
+ end_parsing:
+ ridx = save;
+
+ /* Adjust the HTX block size or remove the block if nothing was copied
+ * (Empty HTX data block are not supported).
+ */
+ if (!dpos)
+ htx_remove_blk(*dsthtx, htxret.blk);
+ else
+ htx_change_blk_value_len(*dsthtx, htxret.blk, dpos);
+ total = end + ridx - start;
+ *max = lmax;
+
+ out:
+ return total;
+
+ parsing_error:
+ (*dsthtx)->flags |= HTX_FL_PARSING_ERROR;
+ h1m->err_state = h1m->state;
+ h1m->err_pos = ofs + end + ridx - start;
+ return 0;
+}
+
+/* Parse HTTP chunks. This function relies on an optimized function to parse
+ * contiguous chunks if possible. Otherwise, when a chunk is incomplete or when
+ * the underlying buffer is wrapping, a generic function is used.
+ */
+static size_t h1_parse_msg_chunks(struct h1m *h1m, struct htx **dsthtx,
+ struct buffer *srcbuf, size_t ofs, size_t max,
+ struct buffer *htxbuf)
+{
+ size_t ret, total = 0;
+
+ while (ofs < b_data(srcbuf)) {
+ ret = 0;
+
+ /* First parse full contiguous chunks. It is only possible if we
+ * are waiting for the next chunk size.
+ */
+ if (h1m->state == H1_MSG_CHUNK_SIZE) {
+ ret = h1_parse_full_contig_chunks(h1m, dsthtx, srcbuf, ofs, &max, htxbuf);
+ /* exit on error */
+ if (!ret && (*dsthtx)->flags & HTX_FL_PARSING_ERROR) {
+ total = 0;
+ break;
+ }
+ /* or let a chance to parse remaining data */
+ total += ret;
+ ofs += ret;
+ ret = 0;
+ }
+
+ /* If some data remains, try to parse it using the generic
+ * function handling incomplete chunks and split chunks
+ * because of a wrapping buffer.
+ */
+ if (h1m->state < H1_MSG_TRAILERS && ofs < b_data(srcbuf)) {
+ ret = h1_parse_chunk(h1m, dsthtx, srcbuf, ofs, &max, htxbuf);
+ total += ret;
+ ofs += ret;
+ }
+
+ /* nothing more was parsed or parsing was stopped on incomplete
+ * chunk, we can exit, handling parsing error if necessary.
+ */
+ if (!ret || h1m->state != H1_MSG_CHUNK_SIZE) {
+ if ((*dsthtx)->flags & HTX_FL_PARSING_ERROR)
+ total = 0;
+ break;
+ }
+ }
+
+ return total;
+}
+
+/* Parse HTTP/1 body. It returns the number of bytes parsed if > 0, or 0 if it
+ * couldn't proceed. Parsing errors are reported by setting the htx flags
+ * HTX_FL_PARSING_ERROR and filling h1m->err_pos and h1m->err_state fields. This
+ * functions is responsible to update the parser state <h1m>.
+ */
+size_t h1_parse_msg_data(struct h1m *h1m, struct htx **dsthtx,
+ struct buffer *srcbuf, size_t ofs, size_t max,
+ struct buffer *htxbuf)
+{
+ size_t sz, total = 0;
+
+ if (b_data(srcbuf) == ofs)
+ return 0;
+
+ if (h1m->flags & H1_MF_CLEN) {
+ /* content-length: read only h2m->body_len */
+ sz = b_data(srcbuf) - ofs;
+ if (unlikely(sz > h1m->curr_len))
+ sz = h1m->curr_len;
+ sz = h1_copy_msg_data(dsthtx, srcbuf, ofs, sz, max, htxbuf);
+ h1m->curr_len -= sz;
+ (*dsthtx)->extra = h1m->curr_len;
+ total += sz;
+ if (!h1m->curr_len) {
+ h1m->state = H1_MSG_DONE;
+ (*dsthtx)->flags |= HTX_FL_EOM;
+ }
+ }
+ else if (h1m->flags & H1_MF_CHNK) {
+ /* te:chunked : parse chunks */
+ total += h1_parse_msg_chunks(h1m, dsthtx, srcbuf, ofs, max, htxbuf);
+ }
+ else if (h1m->flags & H1_MF_XFER_LEN) {
+ /* XFER_LEN is set but not CLEN nor CHNK, it means there is no
+ * body. Switch the message in DONE state
+ */
+ h1m->state = H1_MSG_DONE;
+ (*dsthtx)->flags |= HTX_FL_EOM;
+ }
+ else {
+ /* no content length, read till SHUTW */
+ sz = b_data(srcbuf) - ofs;
+ sz = h1_copy_msg_data(dsthtx, srcbuf, ofs, sz, max, htxbuf);
+ total += sz;
+ }
+
+ return total;
+}
+
+/* Parse HTTP/1 trailers. It returns the number of bytes parsed on success, 0 if
+ * trailers are incomplete, -1 if an error occurred or -2 if it needs more space
+ * to proceed while the output buffer is not empty. Parsing errors are reported
+ * by setting the htx flags HTX_FL_PARSING_ERROR and filling h1m->err_pos and
+ * h1m->err_state fields. This functions is responsible to update the parser
+ * state <h1m>.
+ */
+int h1_parse_msg_tlrs(struct h1m *h1m, struct htx *dsthtx,
+ struct buffer *srcbuf, size_t ofs, size_t max)
+{
+ struct http_hdr hdrs[global.tune.max_http_hdr];
+ struct h1m tlr_h1m;
+ int ret = 0;
+
+ if (b_data(srcbuf) == ofs) {
+ /* Nothing to parse */
+ goto end;
+ }
+ if (!max) {
+ /* No more room */
+ goto output_full;
+ }
+
+ /* Realing input buffer if necessary */
+ if (b_peek(srcbuf, ofs) > b_tail(srcbuf))
+ b_slow_realign_ofs(srcbuf, trash.area, 0);
+
+ tlr_h1m.flags = (H1_MF_NO_PHDR|H1_MF_HDRS_ONLY);
+ tlr_h1m.err_pos = h1m->err_pos;
+ ret = h1_headers_to_hdr_list(b_peek(srcbuf, ofs), b_tail(srcbuf),
+ hdrs, sizeof(hdrs)/sizeof(hdrs[0]), &tlr_h1m, NULL);
+ if (ret <= 0) {
+ /* Incomplete or invalid trailers. If the input buffer only
+ * contains trailers and is full, which is detected by it being
+ * full and the offset to be zero, it's an error because
+ * trailers are too large to be handled by the parser. */
+ if (ret < 0 || (!ret && !ofs && !buf_room_for_htx_data(srcbuf)))
+ goto error;
+ goto end;
+ }
+
+ /* messages trailers fully parsed. */
+ if (h1_eval_htx_hdrs_size(hdrs) > max) {
+ if (htx_is_empty(dsthtx))
+ goto error;
+ goto output_full;
+ }
+
+ if (!htx_add_all_trailers(dsthtx, hdrs))
+ goto error;
+
+ h1m->state = H1_MSG_DONE;
+ dsthtx->flags |= HTX_FL_EOM;
+
+ end:
+ return ret;
+ output_full:
+ return -2;
+ error:
+ h1m->err_state = h1m->state;
+ h1m->err_pos = h1m->next;
+ dsthtx->flags |= HTX_FL_PARSING_ERROR;
+ return -1;
+}
+
+/* Appends the H1 representation of the request line <sl> to the chunk <chk>. It
+ * returns 1 if data are successfully appended, otherwise it returns 0.
+ */
+int h1_format_htx_reqline(const struct htx_sl *sl, struct buffer *chk)
+{
+ struct ist uri;
+ size_t sz = chk->data;
+
+ uri = h1_get_uri(sl);
+ if (!chunk_memcat(chk, HTX_SL_REQ_MPTR(sl), HTX_SL_REQ_MLEN(sl)) ||
+ !chunk_memcat(chk, " ", 1) ||
+ !chunk_memcat(chk, uri.ptr, uri.len) ||
+ !chunk_memcat(chk, " ", 1))
+ goto full;
+
+ if (sl->flags & HTX_SL_F_VER_11) {
+ if (!chunk_memcat(chk, "HTTP/1.1", 8))
+ goto full;
+ }
+ else {
+ if (!chunk_memcat(chk, HTX_SL_REQ_VPTR(sl), HTX_SL_REQ_VLEN(sl)))
+ goto full;
+ }
+
+ if (!chunk_memcat(chk, "\r\n", 2))
+ goto full;
+
+ return 1;
+
+ full:
+ chk->data = sz;
+ return 0;
+}
+
+/* Appends the H1 representation of the status line <sl> to the chunk <chk>. It
+ * returns 1 if data are successfully appended, otherwise it returns 0.
+ */
+int h1_format_htx_stline(const struct htx_sl *sl, struct buffer *chk)
+{
+ size_t sz = chk->data;
+
+ if (HTX_SL_LEN(sl) + 4 > b_room(chk))
+ return 0;
+
+ if (sl->flags & HTX_SL_F_VER_11) {
+ if (!chunk_memcat(chk, "HTTP/1.1", 8))
+ goto full;
+ }
+ else {
+ if (!chunk_memcat(chk, HTX_SL_RES_VPTR(sl), HTX_SL_RES_VLEN(sl)))
+ goto full;
+ }
+ if (!chunk_memcat(chk, " ", 1) ||
+ !chunk_memcat(chk, HTX_SL_RES_CPTR(sl), HTX_SL_RES_CLEN(sl)) ||
+ !chunk_memcat(chk, " ", 1) ||
+ !chunk_memcat(chk, HTX_SL_RES_RPTR(sl), HTX_SL_RES_RLEN(sl)) ||
+ !chunk_memcat(chk, "\r\n", 2))
+ goto full;
+
+ return 1;
+
+ full:
+ chk->data = sz;
+ return 0;
+}
+
+/* Appends the H1 representation of the header <n> with the value <v> to the
+ * chunk <chk>. It returns 1 if data are successfully appended, otherwise it
+ * returns 0.
+ */
+int h1_format_htx_hdr(const struct ist n, const struct ist v, struct buffer *chk)
+{
+ size_t sz = chk->data;
+
+ if (n.len + v.len + 4 > b_room(chk))
+ return 0;
+
+ if (!chunk_memcat(chk, n.ptr, n.len) ||
+ !chunk_memcat(chk, ": ", 2) ||
+ !chunk_memcat(chk, v.ptr, v.len) ||
+ !chunk_memcat(chk, "\r\n", 2))
+ goto full;
+
+ return 1;
+
+ full:
+ chk->data = sz;
+ return 0;
+}
+
+/* Appends the H1 representation of the data <data> to the chunk <chk>. If
+ * <chunked> is non-zero, it emits HTTP/1 chunk-encoded data. It returns 1 if
+ * data are successfully appended, otherwise it returns 0.
+ */
+int h1_format_htx_data(const struct ist data, struct buffer *chk, int chunked)
+{
+ size_t sz = chk->data;
+
+ if (chunked) {
+ uint32_t chksz;
+ char tmp[10];
+ char *beg, *end;
+
+ chksz = data.len;
+
+ beg = end = tmp+10;
+ *--beg = '\n';
+ *--beg = '\r';
+ do {
+ *--beg = hextab[chksz & 0xF];
+ } while (chksz >>= 4);
+
+ if (!chunk_memcat(chk, beg, end - beg) ||
+ !chunk_memcat(chk, data.ptr, data.len) ||
+ !chunk_memcat(chk, "\r\n", 2))
+ goto full;
+ }
+ else {
+ if (!chunk_memcat(chk, data.ptr, data.len))
+ return 0;
+ }
+
+ return 1;
+
+ full:
+ chk->data = sz;
+ return 0;
+}
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/src/h2.c b/src/h2.c
new file mode 100644
index 0000000..9c60cc6
--- /dev/null
+++ b/src/h2.c
@@ -0,0 +1,814 @@
+/*
+ * HTTP/2 protocol processing
+ *
+ * Copyright 2017 Willy Tarreau <w@1wt.eu>
+ * Copyright (C) 2017 HAProxy Technologies
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <inttypes.h>
+#include <haproxy/api.h>
+#include <haproxy/global.h>
+#include <haproxy/h2.h>
+#include <haproxy/http-hdr-t.h>
+#include <haproxy/http.h>
+#include <haproxy/http_htx.h>
+#include <haproxy/htx.h>
+#include <import/ist.h>
+
+
+struct h2_frame_definition h2_frame_definition[H2_FT_ENTRIES] = {
+ [H2_FT_DATA ] = { .dir = 3, .min_id = 1, .max_id = H2_MAX_STREAM_ID, .min_len = 0, .max_len = H2_MAX_FRAME_LEN, },
+ [H2_FT_HEADERS ] = { .dir = 3, .min_id = 1, .max_id = H2_MAX_STREAM_ID, .min_len = 1, .max_len = H2_MAX_FRAME_LEN, },
+ [H2_FT_PRIORITY ] = { .dir = 3, .min_id = 1, .max_id = H2_MAX_STREAM_ID, .min_len = 5, .max_len = 5, },
+ [H2_FT_RST_STREAM ] = { .dir = 3, .min_id = 1, .max_id = H2_MAX_STREAM_ID, .min_len = 4, .max_len = 4, },
+ [H2_FT_SETTINGS ] = { .dir = 3, .min_id = 0, .max_id = 0, .min_len = 0, .max_len = H2_MAX_FRAME_LEN, },
+ [H2_FT_PUSH_PROMISE ] = { .dir = 0, .min_id = 1, .max_id = H2_MAX_STREAM_ID, .min_len = 4, .max_len = H2_MAX_FRAME_LEN, },
+ [H2_FT_PING ] = { .dir = 3, .min_id = 0, .max_id = 0, .min_len = 8, .max_len = 8, },
+ [H2_FT_GOAWAY ] = { .dir = 3, .min_id = 0, .max_id = 0, .min_len = 8, .max_len = H2_MAX_FRAME_LEN, },
+ [H2_FT_WINDOW_UPDATE] = { .dir = 3, .min_id = 0, .max_id = H2_MAX_STREAM_ID, .min_len = 4, .max_len = 4, },
+ [H2_FT_CONTINUATION ] = { .dir = 3, .min_id = 1, .max_id = H2_MAX_STREAM_ID, .min_len = 0, .max_len = H2_MAX_FRAME_LEN, },
+};
+
+/* Prepare the request line into <htx> from pseudo headers stored in <phdr[]>.
+ * <fields> indicates what was found so far. This should be called once at the
+ * detection of the first general header field or at the end of the request if
+ * no general header field was found yet. Returns the created start line on
+ * success, or NULL on failure. Upon success, <msgf> is updated with a few
+ * H2_MSGF_* flags indicating what was found while parsing.
+ *
+ * The rules below deserve a bit of explanation. There tends to be some
+ * confusion regarding H2's authority vs the Host header. They are different
+ * though may sometimes be exchanged. In H2, the request line is broken into :
+ * - :method
+ * - :scheme
+ * - :authority
+ * - :path
+ *
+ * An equivalent HTTP/1.x absolute-form request would then look like :
+ * <:method> <:scheme>://<:authority><:path> HTTP/x.y
+ *
+ * Except for CONNECT which doesn't have scheme nor path and looks like :
+ * <:method> <:authority> HTTP/x.y
+ *
+ * It's worth noting that H2 still supports an encoding to map H1 origin-form
+ * and asterisk-form requests. These ones do not specify the authority. However
+ * in H2 they must still specify the scheme, which is not present in H1. Also,
+ * when encoding an absolute-form H1 request without a path, the path
+ * automatically becomes "/" except for the OPTIONS method where it
+ * becomes "*".
+ *
+ * As such it is explicitly permitted for an H2 client to send a request
+ * featuring a Host header and no :authority, though it's not the recommended
+ * way to use H2 for a client. It is however the only permitted way to encode
+ * an origin-form H1 request over H2. Thus we need to respect such differences
+ * as much as possible when re-encoding the H2 request into HTX.
+ */
+static struct htx_sl *h2_prepare_htx_reqline(uint32_t fields, struct ist *phdr, struct htx *htx, unsigned int *msgf)
+{
+ struct ist uri, meth_sl;
+ unsigned int flags = HTX_SL_F_NONE;
+ struct htx_sl *sl;
+ enum http_meth_t meth;
+ size_t i;
+
+ if ((fields & H2_PHDR_FND_METH) && isteq(phdr[H2_PHDR_IDX_METH], ist("CONNECT"))) {
+ if (fields & H2_PHDR_FND_PROT) {
+ /* rfc 8441 Extended Connect Protocol
+ * #4 :scheme and :path must be present, as well as
+ * :authority like all h2 requests
+ */
+ if (!(fields & H2_PHDR_FND_SCHM)) {
+ /* missing scheme */
+ goto fail;
+ }
+ else if (!(fields & H2_PHDR_FND_PATH)) {
+ /* missing path */
+ goto fail;
+ }
+ else if (!(fields & H2_PHDR_FND_AUTH)) {
+ /* missing authority */
+ goto fail;
+ }
+
+ flags |= HTX_SL_F_HAS_SCHM;
+ if (isteqi(phdr[H2_PHDR_IDX_SCHM], ist("http")))
+ flags |= HTX_SL_F_SCHM_HTTP;
+ else if (isteqi(phdr[H2_PHDR_IDX_SCHM], ist("https")))
+ flags |= HTX_SL_F_SCHM_HTTPS;
+ else if (!http_validate_scheme(phdr[H2_PHDR_IDX_SCHM]))
+ htx->flags |= HTX_FL_PARSING_ERROR;
+
+ meth_sl = ist("GET");
+
+ *msgf |= H2_MSGF_EXT_CONNECT;
+ /* no ES on the HEADERS frame but no body either for
+ * Extended CONNECT */
+ *msgf &= ~H2_MSGF_BODY;
+ }
+ else {
+ /* RFC 7540 #8.2.6 regarding CONNECT: ":scheme" and ":path"
+ * MUST be omitted ; ":authority" contains the host and port
+ * to connect to.
+ */
+ if (fields & H2_PHDR_FND_SCHM) {
+ /* scheme not allowed */
+ goto fail;
+ }
+ else if (fields & H2_PHDR_FND_PATH) {
+ /* path not allowed */
+ goto fail;
+ }
+ else if (!(fields & H2_PHDR_FND_AUTH)) {
+ /* missing authority */
+ goto fail;
+ }
+
+ meth_sl = phdr[H2_PHDR_IDX_METH];
+ }
+
+ *msgf |= H2_MSGF_BODY_TUNNEL;
+ }
+ else if ((fields & (H2_PHDR_FND_METH|H2_PHDR_FND_SCHM|H2_PHDR_FND_PATH)) !=
+ (H2_PHDR_FND_METH|H2_PHDR_FND_SCHM|H2_PHDR_FND_PATH)) {
+ /* RFC 7540 #8.1.2.3 : all requests MUST include exactly one
+ * valid value for the ":method", ":scheme" and ":path" phdr
+ * unless it is a CONNECT request.
+ */
+ if (!(fields & H2_PHDR_FND_METH)) {
+ /* missing method */
+ goto fail;
+ }
+ else if (!(fields & H2_PHDR_FND_SCHM)) {
+ /* missing scheme */
+ goto fail;
+ }
+ else {
+ /* missing path */
+ goto fail;
+ }
+ }
+ else { /* regular methods */
+ /* RFC3986#6.2.2.1: scheme is case-insensitive. We need to
+ * classify the scheme as "present/http", "present/https",
+ * "present/other", "absent" so as to decide whether or not
+ * we're facing a normalized URI that will have to be encoded
+ * in origin or absolute form. Indeed, 7540#8.1.2.3 says that
+ * clients should use the absolute form, thus we cannot infer
+ * whether or not the client wanted to use a proxy here.
+ */
+ flags |= HTX_SL_F_HAS_SCHM;
+ if (isteqi(phdr[H2_PHDR_IDX_SCHM], ist("http")))
+ flags |= HTX_SL_F_SCHM_HTTP;
+ else if (isteqi(phdr[H2_PHDR_IDX_SCHM], ist("https")))
+ flags |= HTX_SL_F_SCHM_HTTPS;
+ else if (!http_validate_scheme(phdr[H2_PHDR_IDX_SCHM]))
+ htx->flags |= HTX_FL_PARSING_ERROR;
+
+ meth_sl = phdr[H2_PHDR_IDX_METH];
+ }
+
+ if (fields & H2_PHDR_FND_PATH) {
+ /* 7540#8.1.2.3: :path must not be empty, and must be either
+ * '*' or an RFC3986 "path-absolute" starting with a "/" but
+ * not with "//".
+ * However, this "path-absolute" was a mistake which was
+ * later fixed in http2bis as "absolute-path" to match
+ * HTTP/1, thus also allowing "//".
+ */
+ if (unlikely(!phdr[H2_PHDR_IDX_PATH].len))
+ goto fail;
+ else if (unlikely(phdr[H2_PHDR_IDX_PATH].ptr[0] != '/')) {
+ if (!isteq(phdr[H2_PHDR_IDX_PATH], ist("*")))
+ goto fail;
+ }
+ }
+
+ if (!(flags & HTX_SL_F_HAS_SCHM)) {
+ /* no scheme, use authority only (CONNECT) */
+ uri = phdr[H2_PHDR_IDX_AUTH];
+ flags |= HTX_SL_F_HAS_AUTHORITY;
+ }
+ else if (fields & H2_PHDR_FND_AUTH) {
+ /* authority is present, let's use the absolute form. We simply
+ * use the trash to concatenate them since all of them MUST fit
+ * in a bufsize since it's where they come from.
+ */
+ uri = ist2bin(trash.area, phdr[H2_PHDR_IDX_SCHM]);
+ istcat(&uri, ist("://"), trash.size);
+ istcat(&uri, phdr[H2_PHDR_IDX_AUTH], trash.size);
+ if (!isteq(phdr[H2_PHDR_IDX_PATH], ist("*")))
+ istcat(&uri, phdr[H2_PHDR_IDX_PATH], trash.size);
+ flags |= HTX_SL_F_HAS_AUTHORITY;
+
+ if (flags & (HTX_SL_F_SCHM_HTTP|HTX_SL_F_SCHM_HTTPS)) {
+ /* we don't know if it was originally an absolute or a
+ * relative request because newer versions of HTTP use
+ * the absolute URI format by default, which we call
+ * the normalized URI format internally. This is the
+ * strongly recommended way of sending a request for
+ * a regular client, so we cannot distinguish this
+ * from a request intended for a proxy. For other
+ * schemes however there is no doubt.
+ */
+ flags |= HTX_SL_F_NORMALIZED_URI;
+ }
+ }
+ else {
+ /* usual schemes with or without authority, use origin form */
+ uri = phdr[H2_PHDR_IDX_PATH];
+ if (fields & H2_PHDR_FND_AUTH)
+ flags |= HTX_SL_F_HAS_AUTHORITY;
+ }
+
+ /* The method is a non-empty token (RFC7231#4.1) */
+ if (!meth_sl.len)
+ goto fail;
+ for (i = 0; i < meth_sl.len; i++) {
+ if (!HTTP_IS_TOKEN(meth_sl.ptr[i]))
+ htx->flags |= HTX_FL_PARSING_ERROR;
+ }
+
+ /* make sure the final URI isn't empty. Note that 7540#8.1.2.3 states
+ * that :path must not be empty.
+ */
+ if (!uri.len)
+ goto fail;
+
+ /* The final URI must not contain LWS nor CTL characters */
+ for (i = 0; i < uri.len; i++) {
+ unsigned char c = uri.ptr[i];
+ if (HTTP_IS_LWS(c) || HTTP_IS_CTL(c))
+ htx->flags |= HTX_FL_PARSING_ERROR;
+ }
+
+ /* Set HTX start-line flags */
+ flags |= HTX_SL_F_VER_11; // V2 in fact
+ flags |= HTX_SL_F_XFER_LEN; // xfer len always known with H2
+
+
+ meth = find_http_meth(meth_sl.ptr, meth_sl.len);
+ if (meth == HTTP_METH_HEAD) {
+ *msgf |= H2_MSGF_BODYLESS_RSP;
+ flags |= HTX_SL_F_BODYLESS_RESP;
+ }
+
+ sl = htx_add_stline(htx, HTX_BLK_REQ_SL, flags, meth_sl, uri, ist("HTTP/2.0"));
+ if (!sl)
+ goto fail;
+ sl->info.req.meth = meth;
+ return sl;
+ fail:
+ return NULL;
+}
+
+/* Takes an H2 request present in the headers list <list> terminated by a name
+ * being <NULL,0> and emits the equivalent HTX request according to the rules
+ * documented in RFC7540 #8.1.2. The output contents are emitted in <htx>, and
+ * non-zero is returned if some bytes were emitted. In case of error, a
+ * negative error code is returned.
+ *
+ * Upon success, <msgf> is filled with a few H2_MSGF_* flags indicating what
+ * was found while parsing. The caller must set it to zero in or H2_MSGF_BODY
+ * if a body is detected (!ES).
+ *
+ * The headers list <list> must be composed of :
+ * - n.name != NULL, n.len > 0 : literal header name
+ * - n.name == NULL, n.len > 0 : indexed pseudo header name number <n.len>
+ * among H2_PHDR_IDX_*
+ * - n.name ignored, n.len == 0 : end of list
+ * - in all cases except the end of list, v.name and v.len must designate a
+ * valid value.
+ *
+ * The Cookie header will be reassembled at the end, and for this, the <list>
+ * will be used to create a linked list, so its contents may be destroyed.
+ *
+ * When <relaxed> is non-nul, some non-dangerous checks will be ignored. This
+ * is in order to satisfy "option accept-invalid-http-request" for
+ * interoperability purposes.
+ */
+int h2_make_htx_request(struct http_hdr *list, struct htx *htx, unsigned int *msgf, unsigned long long *body_len, int relaxed)
+{
+ struct ist phdr_val[H2_PHDR_NUM_ENTRIES];
+ uint32_t fields; /* bit mask of H2_PHDR_FND_* */
+ uint32_t idx;
+ int ck, lck; /* cookie index and last cookie index */
+ int phdr;
+ int ret;
+ int i;
+ struct htx_sl *sl = NULL;
+ unsigned int sl_flags = 0;
+ const char *ctl;
+
+ lck = ck = -1; // no cookie for now
+ fields = 0;
+ for (idx = 0; list[idx].n.len != 0; idx++) {
+ if (!isttest(list[idx].n)) {
+ /* this is an indexed pseudo-header */
+ phdr = list[idx].n.len;
+ }
+ else {
+ /* this can be any type of header */
+ /* RFC7540#8.1.2: upper case not allowed in header field names.
+ * #10.3: header names must be valid (i.e. match a token).
+ * For pseudo-headers we check from 2nd char and for other ones
+ * from the first char, because HTTP_IS_TOKEN() also excludes
+ * the colon.
+ */
+ phdr = h2_str_to_phdr(list[idx].n);
+
+ for (i = !!phdr; i < list[idx].n.len; i++)
+ if ((uint8_t)(list[idx].n.ptr[i] - 'A') < 'Z' - 'A' || !HTTP_IS_TOKEN(list[idx].n.ptr[i]))
+ goto fail;
+ }
+
+ /* RFC7540#10.3: intermediaries forwarding to HTTP/1 must take care of
+ * rejecting NUL, CR and LF characters. For :path we reject all CTL
+ * chars, spaces, and '#'.
+ */
+ if (phdr == H2_PHDR_IDX_PATH && !relaxed) {
+ ctl = ist_find_range(list[idx].v, 0, '#');
+ if (unlikely(ctl) && http_path_has_forbidden_char(list[idx].v, ctl))
+ goto fail;
+ } else {
+ ctl = ist_find_ctl(list[idx].v);
+ if (unlikely(ctl) && http_header_has_forbidden_char(list[idx].v, ctl))
+ goto fail;
+ }
+
+ if (phdr > 0 && phdr < H2_PHDR_NUM_ENTRIES) {
+ /* insert a pseudo header by its index (in phdr) and value (in value) */
+ if (fields & ((1 << phdr) | H2_PHDR_FND_NONE)) {
+ if (fields & H2_PHDR_FND_NONE) {
+ /* pseudo header field after regular headers */
+ goto fail;
+ }
+ else {
+ /* repeated pseudo header field */
+ goto fail;
+ }
+ }
+ fields |= 1 << phdr;
+ phdr_val[phdr] = list[idx].v;
+ continue;
+ }
+ else if (phdr != 0) {
+ /* invalid pseudo header -- should never happen here */
+ goto fail;
+ }
+
+ /* regular header field in (name,value) */
+ if (unlikely(!(fields & H2_PHDR_FND_NONE))) {
+ /* no more pseudo-headers, time to build the request line */
+ sl = h2_prepare_htx_reqline(fields, phdr_val, htx, msgf);
+ if (!sl)
+ goto fail;
+ fields |= H2_PHDR_FND_NONE;
+
+ /* http2bis draft recommends to drop Host in favor of :authority when
+ * the latter is present. This is required to make sure there is no
+ * discrepancy between the authority and the host header, especially
+ * since routing rules usually involve Host. Here we already know if
+ * :authority was found so we can emit it right now and mark the host
+ * as filled so that it's skipped later.
+ */
+ if (fields & H2_PHDR_FND_AUTH) {
+ if (!htx_add_header(htx, ist("host"), phdr_val[H2_PHDR_IDX_AUTH]))
+ goto fail;
+ fields |= H2_PHDR_FND_HOST;
+ }
+ }
+
+ if (isteq(list[idx].n, ist("host"))) {
+ if (fields & H2_PHDR_FND_HOST)
+ continue;
+
+ fields |= H2_PHDR_FND_HOST;
+ }
+
+ if (isteq(list[idx].n, ist("content-length"))) {
+ ret = http_parse_cont_len_header(&list[idx].v, body_len,
+ *msgf & H2_MSGF_BODY_CL);
+ if (ret < 0)
+ goto fail;
+
+ *msgf |= H2_MSGF_BODY_CL;
+ sl_flags |= HTX_SL_F_CLEN;
+ if (ret == 0)
+ continue; // skip this duplicate
+ }
+
+ /* these ones are forbidden in requests (RFC7540#8.1.2.2) */
+ if (isteq(list[idx].n, ist("connection")) ||
+ isteq(list[idx].n, ist("proxy-connection")) ||
+ isteq(list[idx].n, ist("keep-alive")) ||
+ isteq(list[idx].n, ist("upgrade")) ||
+ isteq(list[idx].n, ist("transfer-encoding")))
+ goto fail;
+
+ if (isteq(list[idx].n, ist("te")) && !isteq(list[idx].v, ist("trailers")))
+ goto fail;
+
+ /* cookie requires special processing at the end */
+ if (isteq(list[idx].n, ist("cookie"))) {
+ http_cookie_register(list, idx, &ck, &lck);
+ continue;
+ }
+
+ if (!htx_add_header(htx, list[idx].n, list[idx].v))
+ goto fail;
+ }
+
+ /* RFC7540#8.1.2.1 mandates to reject response pseudo-headers (:status) */
+ if (fields & H2_PHDR_FND_STAT)
+ goto fail;
+
+ /* Let's dump the request now if not yet emitted. */
+ if (!(fields & H2_PHDR_FND_NONE)) {
+ sl = h2_prepare_htx_reqline(fields, phdr_val, htx, msgf);
+ if (!sl)
+ goto fail;
+ }
+
+ if (*msgf & H2_MSGF_BODY_TUNNEL)
+ *msgf &= ~(H2_MSGF_BODY|H2_MSGF_BODY_CL);
+
+ if (!(*msgf & H2_MSGF_BODY) || ((*msgf & H2_MSGF_BODY_CL) && *body_len == 0) ||
+ (*msgf & H2_MSGF_BODY_TUNNEL)) {
+ /* Request without body or tunnel requested */
+ sl_flags |= HTX_SL_F_BODYLESS;
+ htx->flags |= HTX_FL_EOM;
+ }
+
+ if (*msgf & H2_MSGF_EXT_CONNECT) {
+ if (!htx_add_header(htx, ist("upgrade"), phdr_val[H2_PHDR_IDX_PROT]))
+ goto fail;
+ if (!htx_add_header(htx, ist("connection"), ist("upgrade")))
+ goto fail;
+ sl_flags |= HTX_SL_F_CONN_UPG;
+ }
+
+ /* update the start line with last detected header info */
+ sl->flags |= sl_flags;
+
+ /* complete with missing Host if needed (we may validate this test if
+ * no regular header was found).
+ */
+ if ((fields & (H2_PHDR_FND_HOST|H2_PHDR_FND_AUTH)) == H2_PHDR_FND_AUTH) {
+ /* missing Host field, use :authority instead */
+ if (!htx_add_header(htx, ist("host"), phdr_val[H2_PHDR_IDX_AUTH]))
+ goto fail;
+ }
+
+ /* now we may have to build a cookie list. We'll dump the values of all
+ * visited headers.
+ */
+ if (ck >= 0) {
+ if (http_cookie_merge(htx, list, ck))
+ goto fail;
+ }
+
+ /* now send the end of headers marker */
+ if (!htx_add_endof(htx, HTX_BLK_EOH))
+ goto fail;
+
+ /* proceed to scheme-based normalization on target-URI */
+ if (fields & H2_PHDR_FND_SCHM)
+ http_scheme_based_normalize(htx);
+
+ ret = 1;
+ return ret;
+
+ fail:
+ return -1;
+}
+
+/* Prepare the status line into <htx> from pseudo headers stored in <phdr[]>.
+ * <fields> indicates what was found so far. This should be called once at the
+ * detection of the first general header field or at the end of the message if
+ * no general header field was found yet. Returns the created start line on
+ * success, or NULL on failure. Upon success, <msgf> is updated with a few
+ * H2_MSGF_* flags indicating what was found while parsing.
+ */
+static struct htx_sl *h2_prepare_htx_stsline(uint32_t fields, struct ist *phdr, struct htx *htx, unsigned int *msgf)
+{
+ unsigned int status, flags = HTX_SL_F_IS_RESP;
+ struct htx_sl *sl;
+ struct ist stat;
+
+ /* only :status is allowed as a pseudo header */
+ if (!(fields & H2_PHDR_FND_STAT))
+ goto fail;
+
+ if (phdr[H2_PHDR_IDX_STAT].len != 3)
+ goto fail;
+
+ /* if Extended CONNECT is used, convert status code from 200 to htx 101
+ * following rfc 8441 */
+ if (unlikely(*msgf & H2_MSGF_EXT_CONNECT) &&
+ isteq(phdr[H2_PHDR_IDX_STAT], ist("200"))) {
+ stat = ist("101");
+ status = 101;
+ }
+ else {
+ unsigned char h, t, u;
+
+ stat = phdr[H2_PHDR_IDX_STAT];
+
+ h = stat.ptr[0] - '0';
+ t = stat.ptr[1] - '0';
+ u = stat.ptr[2] - '0';
+ if (h > 9 || t > 9 || u > 9)
+ goto fail;
+ status = h * 100 + t * 10 + u;
+ }
+
+ /* 101 responses are not supported in H2, so return a error.
+ * On 1xx responses there is no ES on the HEADERS frame but there is no
+ * body. So remove the flag H2_MSGF_BODY and add H2_MSGF_RSP_1XX to
+ * notify the decoder another HEADERS frame is expected.
+ * 204/304 response have no body by definition. So remove the flag
+ * H2_MSGF_BODY and set H2_MSGF_BODYLESS_RSP.
+ *
+ * Note however that there is a special condition for Extended CONNECT.
+ * In this case, we explicitly convert it to HTX 101 to mimic
+ * Get+Upgrade HTTP/1.1 mechanism
+ */
+ if (status == 101) {
+ if (!(*msgf & H2_MSGF_EXT_CONNECT))
+ goto fail;
+ }
+ else if (status < 200) {
+ *msgf |= H2_MSGF_RSP_1XX;
+ *msgf &= ~H2_MSGF_BODY;
+ }
+ else if (status == 204 || status == 304) {
+ *msgf &= ~H2_MSGF_BODY;
+ *msgf |= H2_MSGF_BODYLESS_RSP;
+ flags |= HTX_SL_F_BODYLESS_RESP;
+ }
+
+ /* Set HTX start-line flags */
+ flags |= HTX_SL_F_VER_11; // V2 in fact
+ flags |= HTX_SL_F_XFER_LEN; // xfer len always known with H2
+
+ sl = htx_add_stline(htx, HTX_BLK_RES_SL, flags, ist("HTTP/2.0"), stat, ist(""));
+ if (!sl)
+ goto fail;
+ sl->info.res.status = status;
+ return sl;
+ fail:
+ return NULL;
+}
+
+/* Takes an H2 response present in the headers list <list> terminated by a name
+ * being <NULL,0> and emits the equivalent HTX response according to the rules
+ * documented in RFC7540 #8.1.2. The output contents are emitted in <htx>, and
+ * a positive value is returned if some bytes were emitted. In case of error, a
+ * negative error code is returned.
+ *
+ * Upon success, <msgf> is filled with a few H2_MSGF_* flags indicating what
+ * was found while parsing. The caller must set it to zero in or H2_MSGF_BODY
+ * if a body is detected (!ES).
+ *
+ * The headers list <list> must be composed of :
+ * - n.name != NULL, n.len > 0 : literal header name
+ * - n.name == NULL, n.len > 0 : indexed pseudo header name number <n.len>
+ * among H2_PHDR_IDX_*
+ * - n.name ignored, n.len == 0 : end of list
+ * - in all cases except the end of list, v.name and v.len must designate a
+ * valid value.
+ *
+ * <upgrade_protocol> is only used if the htx status code is 101 indicating a
+ * response to an upgrade or h2-equivalent request.
+ */
+int h2_make_htx_response(struct http_hdr *list, struct htx *htx, unsigned int *msgf, unsigned long long *body_len, char *upgrade_protocol)
+{
+ struct ist phdr_val[H2_PHDR_NUM_ENTRIES];
+ uint32_t fields; /* bit mask of H2_PHDR_FND_* */
+ uint32_t idx;
+ int phdr;
+ int ret;
+ int i;
+ struct htx_sl *sl = NULL;
+ unsigned int sl_flags = 0;
+ const char *ctl;
+
+ fields = 0;
+ for (idx = 0; list[idx].n.len != 0; idx++) {
+ if (!isttest(list[idx].n)) {
+ /* this is an indexed pseudo-header */
+ phdr = list[idx].n.len;
+ }
+ else {
+ /* this can be any type of header */
+ /* RFC7540#8.1.2: upper case not allowed in header field names.
+ * #10.3: header names must be valid (i.e. match a token).
+ * For pseudo-headers we check from 2nd char and for other ones
+ * from the first char, because HTTP_IS_TOKEN() also excludes
+ * the colon.
+ */
+ phdr = h2_str_to_phdr(list[idx].n);
+
+ for (i = !!phdr; i < list[idx].n.len; i++)
+ if ((uint8_t)(list[idx].n.ptr[i] - 'A') < 'Z' - 'A' || !HTTP_IS_TOKEN(list[idx].n.ptr[i]))
+ goto fail;
+ }
+
+ /* RFC7540#10.3: intermediaries forwarding to HTTP/1 must take care of
+ * rejecting NUL, CR and LF characters.
+ */
+ ctl = ist_find_ctl(list[idx].v);
+ if (unlikely(ctl) && http_header_has_forbidden_char(list[idx].v, ctl))
+ goto fail;
+
+ if (phdr > 0 && phdr < H2_PHDR_NUM_ENTRIES) {
+ /* insert a pseudo header by its index (in phdr) and value (in value) */
+ if (fields & ((1 << phdr) | H2_PHDR_FND_NONE)) {
+ if (fields & H2_PHDR_FND_NONE) {
+ /* pseudo header field after regular headers */
+ goto fail;
+ }
+ else {
+ /* repeated pseudo header field */
+ goto fail;
+ }
+ }
+ fields |= 1 << phdr;
+ phdr_val[phdr] = list[idx].v;
+ continue;
+ }
+ else if (phdr != 0) {
+ /* invalid pseudo header -- should never happen here */
+ goto fail;
+ }
+
+ /* regular header field in (name,value) */
+ if (!(fields & H2_PHDR_FND_NONE)) {
+ /* no more pseudo-headers, time to build the status line */
+ sl = h2_prepare_htx_stsline(fields, phdr_val, htx, msgf);
+ if (!sl)
+ goto fail;
+ fields |= H2_PHDR_FND_NONE;
+ }
+
+ if (isteq(list[idx].n, ist("content-length"))) {
+ ret = http_parse_cont_len_header(&list[idx].v, body_len,
+ *msgf & H2_MSGF_BODY_CL);
+ if (ret < 0)
+ goto fail;
+
+ *msgf |= H2_MSGF_BODY_CL;
+ sl_flags |= HTX_SL_F_CLEN;
+ if (ret == 0)
+ continue; // skip this duplicate
+ }
+
+ /* these ones are forbidden in responses (RFC7540#8.1.2.2) */
+ if (isteq(list[idx].n, ist("connection")) ||
+ isteq(list[idx].n, ist("proxy-connection")) ||
+ isteq(list[idx].n, ist("keep-alive")) ||
+ isteq(list[idx].n, ist("upgrade")) ||
+ isteq(list[idx].n, ist("transfer-encoding")))
+ goto fail;
+
+ if (!htx_add_header(htx, list[idx].n, list[idx].v))
+ goto fail;
+ }
+
+ /* RFC7540#8.1.2.1 mandates to reject request pseudo-headers */
+ if (fields & (H2_PHDR_FND_AUTH|H2_PHDR_FND_METH|H2_PHDR_FND_PATH|H2_PHDR_FND_SCHM))
+ goto fail;
+
+ /* Let's dump the request now if not yet emitted. */
+ if (!(fields & H2_PHDR_FND_NONE)) {
+ sl = h2_prepare_htx_stsline(fields, phdr_val, htx, msgf);
+ if (!sl)
+ goto fail;
+ }
+
+ if (sl->info.res.status == 101 && upgrade_protocol) {
+ if (!htx_add_header(htx, ist("connection"), ist("upgrade")))
+ goto fail;
+ if (!htx_add_header(htx, ist("upgrade"), ist(upgrade_protocol)))
+ goto fail;
+ sl_flags |= HTX_SL_F_CONN_UPG;
+ }
+
+ if ((*msgf & H2_MSGF_BODY_TUNNEL) &&
+ ((sl->info.res.status >= 200 && sl->info.res.status < 300) || sl->info.res.status == 101))
+ *msgf &= ~(H2_MSGF_BODY|H2_MSGF_BODY_CL);
+ else
+ *msgf &= ~H2_MSGF_BODY_TUNNEL;
+
+ if (!(*msgf & H2_MSGF_BODY) || ((*msgf & H2_MSGF_BODY_CL) && *body_len == 0) ||
+ (*msgf & H2_MSGF_BODY_TUNNEL)) {
+ /* Response without body or tunnel successfully established */
+ sl_flags |= HTX_SL_F_BODYLESS;
+ htx->flags |= HTX_FL_EOM;
+ }
+
+ /* update the start line with last detected header info */
+ sl->flags |= sl_flags;
+
+ if ((*msgf & (H2_MSGF_BODY|H2_MSGF_BODY_TUNNEL|H2_MSGF_BODY_CL)) == H2_MSGF_BODY) {
+ /* FIXME: Do we need to signal anything when we have a body and
+ * no content-length, to have the equivalent of H1's chunked
+ * encoding?
+ */
+ }
+
+ /* now send the end of headers marker */
+ if (!htx_add_endof(htx, HTX_BLK_EOH))
+ goto fail;
+
+ ret = 1;
+ return ret;
+
+ fail:
+ return -1;
+}
+
+/* Takes an H2 headers list <list> terminated by a name being <NULL,0> and emits
+ * the equivalent HTX trailers blocks. The output contents are emitted in <htx>,
+ * and a positive value is returned if some bytes were emitted. In case of
+ * error, a negative error code is returned. The caller must have verified that
+ * the message in the buffer is compatible with receipt of trailers.
+ *
+ * The headers list <list> must be composed of :
+ * - n.name != NULL, n.len > 0 : literal header name
+ * - n.name == NULL, n.len > 0 : indexed pseudo header name number <n.len>
+ * among H2_PHDR_IDX_* (illegal here)
+ * - n.name ignored, n.len == 0 : end of list
+ * - in all cases except the end of list, v.name and v.len must designate a
+ * valid value.
+ */
+int h2_make_htx_trailers(struct http_hdr *list, struct htx *htx)
+{
+ const char *ctl;
+ uint32_t idx;
+ int i;
+
+ for (idx = 0; list[idx].n.len != 0; idx++) {
+ if (!isttest(list[idx].n)) {
+ /* This is an indexed pseudo-header (RFC7540#8.1.2.1) */
+ goto fail;
+ }
+
+ /* RFC7540#8.1.2: upper case not allowed in header field names.
+ * #10.3: header names must be valid (i.e. match a token). This
+ * also catches pseudo-headers which are forbidden in trailers.
+ */
+ for (i = 0; i < list[idx].n.len; i++)
+ if ((uint8_t)(list[idx].n.ptr[i] - 'A') < 'Z' - 'A' || !HTTP_IS_TOKEN(list[idx].n.ptr[i]))
+ goto fail;
+
+ /* these ones are forbidden in trailers (RFC7540#8.1.2.2) */
+ if (isteq(list[idx].n, ist("host")) ||
+ isteq(list[idx].n, ist("content-length")) ||
+ isteq(list[idx].n, ist("connection")) ||
+ isteq(list[idx].n, ist("proxy-connection")) ||
+ isteq(list[idx].n, ist("keep-alive")) ||
+ isteq(list[idx].n, ist("upgrade")) ||
+ isteq(list[idx].n, ist("te")) ||
+ isteq(list[idx].n, ist("transfer-encoding")))
+ goto fail;
+
+ /* RFC7540#10.3: intermediaries forwarding to HTTP/1 must take care of
+ * rejecting NUL, CR and LF characters.
+ */
+ ctl = ist_find_ctl(list[idx].v);
+ if (unlikely(ctl) && http_header_has_forbidden_char(list[idx].v, ctl))
+ goto fail;
+
+ if (!htx_add_trailer(htx, list[idx].n, list[idx].v))
+ goto fail;
+ }
+
+ if (!htx_add_endof(htx, HTX_BLK_EOT))
+ goto fail;
+
+ return 1;
+
+ fail:
+ return -1;
+}
diff --git a/src/h3.c b/src/h3.c
new file mode 100644
index 0000000..4aa1a52
--- /dev/null
+++ b/src/h3.c
@@ -0,0 +1,2403 @@
+/*
+ * HTTP/3 protocol processing
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <import/ist.h>
+
+#include <haproxy/api.h>
+#include <haproxy/buf.h>
+#include <haproxy/chunk.h>
+#include <haproxy/connection.h>
+#include <haproxy/dynbuf.h>
+#include <haproxy/h3.h>
+#include <haproxy/h3_stats.h>
+#include <haproxy/http.h>
+#include <haproxy/http-hdr-t.h>
+#include <haproxy/http_htx.h>
+#include <haproxy/htx.h>
+#include <haproxy/intops.h>
+#include <haproxy/istbuf.h>
+#include <haproxy/mux_quic.h>
+#include <haproxy/pool.h>
+#include <haproxy/qmux_http.h>
+#include <haproxy/qpack-dec.h>
+#include <haproxy/qpack-enc.h>
+#include <haproxy/quic_conn-t.h>
+#include <haproxy/quic_enc.h>
+#include <haproxy/quic_frame.h>
+#include <haproxy/stats-t.h>
+#include <haproxy/tools.h>
+#include <haproxy/trace.h>
+
+/* trace source and events */
+static void h3_trace(enum trace_level level, uint64_t mask,
+ const struct trace_source *src,
+ const struct ist where, const struct ist func,
+ const void *a1, const void *a2, const void *a3, const void *a4);
+
+static const struct trace_event h3_trace_events[] = {
+#define H3_EV_RX_FRAME (1ULL << 0)
+ { .mask = H3_EV_RX_FRAME, .name = "rx_frame", .desc = "receipt of any H3 frame" },
+#define H3_EV_RX_DATA (1ULL << 1)
+ { .mask = H3_EV_RX_DATA, .name = "rx_data", .desc = "receipt of H3 DATA frame" },
+#define H3_EV_RX_HDR (1ULL << 2)
+ { .mask = H3_EV_RX_HDR, .name = "rx_hdr", .desc = "receipt of H3 HEADERS frame" },
+#define H3_EV_RX_SETTINGS (1ULL << 3)
+ { .mask = H3_EV_RX_SETTINGS, .name = "rx_settings", .desc = "receipt of H3 SETTINGS frame" },
+#define H3_EV_TX_DATA (1ULL << 4)
+ { .mask = H3_EV_TX_DATA, .name = "tx_data", .desc = "transmission of H3 DATA frame" },
+#define H3_EV_TX_HDR (1ULL << 5)
+ { .mask = H3_EV_TX_HDR, .name = "tx_hdr", .desc = "transmission of H3 HEADERS frame" },
+#define H3_EV_TX_SETTINGS (1ULL << 6)
+ { .mask = H3_EV_TX_SETTINGS, .name = "tx_settings", .desc = "transmission of H3 SETTINGS frame" },
+#define H3_EV_H3S_NEW (1ULL << 7)
+ { .mask = H3_EV_H3S_NEW, .name = "h3s_new", .desc = "new H3 stream" },
+#define H3_EV_H3S_END (1ULL << 8)
+ { .mask = H3_EV_H3S_END, .name = "h3s_end", .desc = "H3 stream terminated" },
+#define H3_EV_H3C_NEW (1ULL << 9)
+ { .mask = H3_EV_H3C_NEW, .name = "h3c_new", .desc = "new H3 connection" },
+#define H3_EV_H3C_END (1ULL << 10)
+ { .mask = H3_EV_H3C_END, .name = "h3c_end", .desc = "H3 connection terminated" },
+#define H3_EV_STRM_SEND (1ULL << 12)
+ { .mask = H3_EV_STRM_SEND, .name = "strm_send", .desc = "sending data for stream" },
+ { }
+};
+
+static const struct name_desc h3_trace_lockon_args[4] = {
+ /* arg1 */ { /* already used by the connection */ },
+ /* arg2 */ { .name="qcs", .desc="QUIC stream" },
+ /* arg3 */ { },
+ /* arg4 */ { }
+};
+
+static const struct name_desc h3_trace_decoding[] = {
+#define H3_VERB_CLEAN 1
+ { .name="clean", .desc="only user-friendly stuff, generally suitable for level \"user\"" },
+#define H3_VERB_MINIMAL 2
+ { .name="minimal", .desc="report only qcc/qcs state and flags, no real decoding" },
+ { /* end */ }
+};
+
+struct trace_source trace_h3 = {
+ .name = IST("h3"),
+ .desc = "HTTP/3 transcoder",
+ .arg_def = TRC_ARG1_CONN, /* TRACE()'s first argument is always a connection */
+ .default_cb = h3_trace,
+ .known_events = h3_trace_events,
+ .lockon_args = h3_trace_lockon_args,
+ .decoding = h3_trace_decoding,
+ .report_events = ~0, /* report everything by default */
+};
+
+#define TRACE_SOURCE &trace_h3
+INITCALL1(STG_REGISTER, trace_register_source, TRACE_SOURCE);
+
+#if defined(DEBUG_H3)
+#define h3_debug_printf fprintf
+#define h3_debug_hexdump debug_hexdump
+#else
+#define h3_debug_printf(...) do { } while (0)
+#define h3_debug_hexdump(...) do { } while (0)
+#endif
+
+#define H3_CF_SETTINGS_SENT 0x00000001 /* SETTINGS frame already sent on local control stream */
+#define H3_CF_SETTINGS_RECV 0x00000002 /* SETTINGS frame already received on remote control stream */
+#define H3_CF_UNI_CTRL_SET 0x00000004 /* Remote H3 Control stream opened */
+#define H3_CF_UNI_QPACK_DEC_SET 0x00000008 /* Remote QPACK decoder stream opened */
+#define H3_CF_UNI_QPACK_ENC_SET 0x00000010 /* Remote QPACK encoder stream opened */
+#define H3_CF_GOAWAY_SENT 0x00000020 /* GOAWAY sent on local control stream */
+
+/* Default settings */
+static uint64_t h3_settings_qpack_max_table_capacity = 0;
+static uint64_t h3_settings_qpack_blocked_streams = 4096;
+static uint64_t h3_settings_max_field_section_size = QUIC_VARINT_8_BYTE_MAX; /* Unlimited */
+
+struct h3c {
+ struct qcc *qcc;
+ struct qcs *ctrl_strm; /* Control stream */
+ enum h3_err err;
+ uint32_t flags;
+
+ /* Settings */
+ uint64_t qpack_max_table_capacity;
+ uint64_t qpack_blocked_streams;
+ uint64_t max_field_section_size;
+
+ uint64_t id_goaway; /* stream ID used for a GOAWAY frame */
+
+ struct buffer_wait buf_wait; /* wait list for buffer allocations */
+ /* Stats counters */
+ struct h3_counters *prx_counters;
+};
+
+DECLARE_STATIC_POOL(pool_head_h3c, "h3c", sizeof(struct h3c));
+
+#define H3_SF_UNI_INIT 0x00000001 /* stream type not parsed for unidirectional stream */
+#define H3_SF_UNI_NO_H3 0x00000002 /* unidirectional stream does not carry H3 frames */
+#define H3_SF_HAVE_CLEN 0x00000004 /* content-length header is present */
+
+struct h3s {
+ struct h3c *h3c;
+
+ enum h3s_t type;
+ enum h3s_st_req st_req; /* only used for request streams */
+ uint64_t demux_frame_len;
+ uint64_t demux_frame_type;
+
+ unsigned long long body_len; /* known request body length from content-length header if present */
+ unsigned long long data_len; /* total length of all parsed DATA */
+
+ int flags;
+ int err; /* used for stream reset */
+};
+
+DECLARE_STATIC_POOL(pool_head_h3s, "h3s", sizeof(struct h3s));
+
+/* Initialize an uni-stream <qcs> by reading its type from <b>.
+ *
+ * Returns the count of consumed bytes or a negative error code.
+ */
+static ssize_t h3_init_uni_stream(struct h3c *h3c, struct qcs *qcs,
+ struct buffer *b)
+{
+ /* decode unidirectional stream type */
+ struct h3s *h3s = qcs->ctx;
+ uint64_t type;
+ size_t len = 0, ret;
+
+ TRACE_ENTER(H3_EV_H3S_NEW, qcs->qcc->conn, qcs);
+
+ /* Function reserved to uni streams. Must be called only once per stream instance. */
+ BUG_ON(!quic_stream_is_uni(qcs->id) || h3s->flags & H3_SF_UNI_INIT);
+
+ ret = b_quic_dec_int(&type, b, &len);
+ if (!ret) {
+ /* not enough data to decode uni stream type, retry later */
+ TRACE_DATA("cannot decode uni stream type due to incomplete data", H3_EV_H3S_NEW, qcs->qcc->conn, qcs);
+ goto out;
+ }
+
+ switch (type) {
+ case H3_UNI_S_T_CTRL:
+ if (h3c->flags & H3_CF_UNI_CTRL_SET) {
+ TRACE_ERROR("duplicated control stream", H3_EV_H3S_NEW, qcs->qcc->conn, qcs);
+ qcc_set_error(qcs->qcc, H3_STREAM_CREATION_ERROR, 1);
+ goto err;
+ }
+ h3c->flags |= H3_CF_UNI_CTRL_SET;
+ h3s->type = H3S_T_CTRL;
+ break;
+
+ case H3_UNI_S_T_PUSH:
+ /* TODO not supported for the moment */
+ h3s->type = H3S_T_PUSH;
+ break;
+
+ case H3_UNI_S_T_QPACK_DEC:
+ if (h3c->flags & H3_CF_UNI_QPACK_DEC_SET) {
+ TRACE_ERROR("duplicated qpack decoder stream", H3_EV_H3S_NEW, qcs->qcc->conn, qcs);
+ qcc_set_error(qcs->qcc, H3_STREAM_CREATION_ERROR, 1);
+ goto err;
+ }
+ h3c->flags |= H3_CF_UNI_QPACK_DEC_SET;
+ h3s->type = H3S_T_QPACK_DEC;
+ h3s->flags |= H3_SF_UNI_NO_H3;
+ break;
+
+ case H3_UNI_S_T_QPACK_ENC:
+ if (h3c->flags & H3_CF_UNI_QPACK_ENC_SET) {
+ TRACE_ERROR("duplicated qpack encoder stream", H3_EV_H3S_NEW, qcs->qcc->conn, qcs);
+ qcc_set_error(qcs->qcc, H3_STREAM_CREATION_ERROR, 1);
+ goto err;
+ }
+ h3c->flags |= H3_CF_UNI_QPACK_ENC_SET;
+ h3s->type = H3S_T_QPACK_ENC;
+ h3s->flags |= H3_SF_UNI_NO_H3;
+ break;
+
+ default:
+ /* draft-ietf-quic-http34 9. Extensions to HTTP/3
+ *
+ * Implementations MUST [...] abort reading on unidirectional
+ * streams that have unknown or unsupported types.
+ */
+ TRACE_STATE("abort reading on unknown uni stream type", H3_EV_H3S_NEW, qcs->qcc->conn, qcs);
+ qcc_abort_stream_read(qcs);
+ goto err;
+ }
+
+ h3s->flags |= H3_SF_UNI_INIT;
+
+ out:
+ TRACE_LEAVE(H3_EV_H3S_NEW, qcs->qcc->conn, qcs);
+ return len;
+
+ err:
+ TRACE_DEVEL("leaving on error", H3_EV_H3S_NEW, qcs->qcc->conn, qcs);
+ return -1;
+}
+
+/* Parse a buffer <b> for a <qcs> uni-stream which does not contains H3 frames.
+ * This may be used for QPACK encoder/decoder streams for example. <fin> is set
+ * if this is the last frame of the stream.
+ *
+ * Returns the number of consumed bytes or a negative error code.
+ */
+static ssize_t h3_parse_uni_stream_no_h3(struct qcs *qcs, struct buffer *b, int fin)
+{
+ struct h3s *h3s = qcs->ctx;
+
+ /* Function reserved to non-HTTP/3 unidirectional streams. */
+ BUG_ON(!quic_stream_is_uni(qcs->id) || !(h3s->flags & H3_SF_UNI_NO_H3));
+
+ switch (h3s->type) {
+ case H3S_T_QPACK_DEC:
+ if (qpack_decode_dec(b, fin, qcs))
+ return -1;
+ break;
+ case H3S_T_QPACK_ENC:
+ if (qpack_decode_enc(b, fin, qcs))
+ return -1;
+ break;
+ case H3S_T_UNKNOWN:
+ default:
+ /* Unknown stream should be flagged with QC_SF_READ_ABORTED. */
+ ABORT_NOW();
+ }
+
+ /* TODO adjust return code */
+ return 0;
+}
+
+/* Decode a H3 frame header from <rxbuf> buffer. The frame type is stored in
+ * <ftype> and length in <flen>.
+ *
+ * Returns the size of the H3 frame header. Note that the input buffer is not
+ * consumed.
+ */
+static inline size_t h3_decode_frm_header(uint64_t *ftype, uint64_t *flen,
+ struct buffer *b)
+{
+ size_t hlen;
+
+ hlen = 0;
+ if (!b_quic_dec_int(ftype, b, &hlen) ||
+ !b_quic_dec_int(flen, b, &hlen)) {
+ return 0;
+ }
+
+ return hlen;
+}
+
+/* Check if H3 frame of type <ftype> is valid when received on stream <qcs>.
+ *
+ * Returns 0 if frame valid, otherwise HTTP/3 error code.
+ */
+static int h3_check_frame_valid(struct h3c *h3c, struct qcs *qcs, uint64_t ftype)
+{
+ struct h3s *h3s = qcs->ctx;
+ int ret = 0;
+
+ /* Stream type must be known to ensure frame is valid for this stream. */
+ BUG_ON(h3s->type == H3S_T_UNKNOWN);
+
+ switch (ftype) {
+ case H3_FT_DATA:
+ /* cf H3_FT_HEADERS case. */
+ if (h3s->type == H3S_T_CTRL ||
+ (h3s->st_req != H3S_ST_REQ_HEADERS && h3s->st_req != H3S_ST_REQ_DATA)) {
+ ret = H3_FRAME_UNEXPECTED;
+ }
+
+ break;
+
+ case H3_FT_HEADERS:
+ /* RFC 9114 4.1. HTTP Message Framing
+ *
+ *
+ * An HTTP message (request or response) consists of:
+ * 1. the header section, including message control data, sent as a
+ * single HEADERS frame,
+ * 2. optionally, the content, if present, sent as a series of DATA
+ * frames, and
+ * 3. optionally, the trailer section, if present, sent as a single
+ * HEADERS frame.
+ *
+ * [...]
+ *
+ * Receipt of an invalid sequence of frames MUST be treated as a
+ * connection error of type H3_FRAME_UNEXPECTED. In particular, a DATA
+ * frame before any HEADERS frame, or a HEADERS or DATA frame after the
+ * trailing HEADERS frame, is considered invalid. Other frame types,
+ * especially unknown frame types, might be permitted subject to their
+ * own rules; see Section 9.
+ */
+ if (h3s->type == H3S_T_CTRL || h3s->st_req == H3S_ST_REQ_TRAILERS)
+ ret = H3_FRAME_UNEXPECTED;
+ break;
+
+ case H3_FT_CANCEL_PUSH:
+ case H3_FT_GOAWAY:
+ case H3_FT_MAX_PUSH_ID:
+ /* RFC 9114 7.2.3. CANCEL_PUSH
+ *
+ * A CANCEL_PUSH frame is sent on the control stream. Receiving a
+ * CANCEL_PUSH frame on a stream other than the control stream MUST be
+ * treated as a connection error of type H3_FRAME_UNEXPECTED.
+ */
+
+ /* RFC 9114 7.2.6. GOAWAY
+ *
+ * A client MUST treat a GOAWAY frame on a stream other than the
+ * control stream as a connection error of type H3_FRAME_UNEXPECTED.
+ */
+
+ /* RFC 9114 7.2.7. MAX_PUSH_ID
+ *
+ * The MAX_PUSH_ID frame is always sent on the control stream. Receipt
+ * of a MAX_PUSH_ID frame on any other stream MUST be treated as a
+ * connection error of type H3_FRAME_UNEXPECTED.
+ */
+
+ if (h3s->type != H3S_T_CTRL)
+ ret = H3_FRAME_UNEXPECTED;
+ else if (!(h3c->flags & H3_CF_SETTINGS_RECV))
+ ret = H3_MISSING_SETTINGS;
+ break;
+
+ case H3_FT_SETTINGS:
+ /* RFC 9114 7.2.4. SETTINGS
+ *
+ * A SETTINGS frame MUST be sent as the first frame of
+ * each control stream (see Section 6.2.1) by each peer, and it MUST NOT
+ * be sent subsequently. If an endpoint receives a second SETTINGS frame
+ * on the control stream, the endpoint MUST respond with a connection
+ * error of type H3_FRAME_UNEXPECTED.
+ *
+ * SETTINGS frames MUST NOT be sent on any stream other than the control
+ * stream. If an endpoint receives a SETTINGS frame on a different
+ * stream, the endpoint MUST respond with a connection error of type
+ * H3_FRAME_UNEXPECTED.
+ */
+ if (h3s->type != H3S_T_CTRL || h3c->flags & H3_CF_SETTINGS_RECV)
+ ret = H3_FRAME_UNEXPECTED;
+ break;
+
+ case H3_FT_PUSH_PROMISE:
+ /* RFC 9114 7.2.5. PUSH_PROMISE
+ *
+ * A client MUST NOT send a PUSH_PROMISE frame. A server MUST treat the
+ * receipt of a PUSH_PROMISE frame as a connection error of type
+ * H3_FRAME_UNEXPECTED.
+ */
+
+ /* TODO server-side only. */
+ ret = H3_FRAME_UNEXPECTED;
+ break;
+
+ default:
+ /* RFC 9114 9. Extensions to HTTP/3
+ *
+ * Implementations MUST ignore unknown or unsupported values in all
+ * extensible protocol elements. [...]
+ * However, where a known frame type is required to be in a
+ * specific location, such as the SETTINGS frame as the first frame of
+ * the control stream (see Section 6.2.1), an unknown frame type does
+ * not satisfy that requirement and SHOULD be treated as an error.
+ */
+ if (h3s->type == H3S_T_CTRL && !(h3c->flags & H3_CF_SETTINGS_RECV))
+ ret = H3_MISSING_SETTINGS;
+ break;
+ }
+
+ return ret;
+}
+
+/* Check from stream <qcs> that length of all DATA frames does not exceed with
+ * a previously parsed content-length header. <fin> must be set for the last
+ * data of the stream so that length of DATA frames must be equal to the
+ * content-length.
+ *
+ * This must only be called for a stream with H3_SF_HAVE_CLEN flag.
+ *
+ * Return 0 on valid else non-zero.
+ */
+static int h3_check_body_size(struct qcs *qcs, int fin)
+{
+ struct h3s *h3s = qcs->ctx;
+ int ret = 0;
+ TRACE_ENTER(H3_EV_RX_FRAME, qcs->qcc->conn, qcs);
+
+ /* Reserved for streams with a previously parsed content-length header. */
+ BUG_ON(!(h3s->flags & H3_SF_HAVE_CLEN));
+
+ /* RFC 9114 4.1.2. Malformed Requests and Responses
+ *
+ * A request or response that is defined as having content when it
+ * contains a Content-Length header field (Section 8.6 of [HTTP]) is
+ * malformed if the value of the Content-Length header field does not
+ * equal the sum of the DATA frame lengths received.
+ *
+ * TODO for backend support
+ * A response that is
+ * defined as never having content, even when a Content-Length is
+ * present, can have a non-zero Content-Length header field even though
+ * no content is included in DATA frames.
+ */
+ if (h3s->data_len > h3s->body_len ||
+ (fin && h3s->data_len < h3s->body_len)) {
+ TRACE_ERROR("Content-length does not match DATA frame size", H3_EV_RX_FRAME|H3_EV_RX_DATA, qcs->qcc->conn, qcs);
+ h3s->err = H3_MESSAGE_ERROR;
+ ret = -1;
+ }
+
+ TRACE_LEAVE(H3_EV_RX_FRAME, qcs->qcc->conn, qcs);
+ return ret;
+}
+
+/* Set <auth> authority header to the new value <value> for <qcs> stream. This
+ * ensures that value is conformant to the specification. If <auth> is a
+ * non-null length string, it ensures that <value> is identical to it.
+ *
+ * Returns 0 on success else non-zero.
+ */
+static int h3_set_authority(struct qcs *qcs, struct ist *auth, const struct ist value)
+{
+ /* RFC 9114 4.3.1. Request Pseudo-Header Fields
+ *
+ * If the :scheme pseudo-header field identifies a scheme that has a
+ * mandatory authority component (including "http" and "https"), the
+ * request MUST contain either an :authority pseudo-header field or a
+ * Host header field. If these fields are present, they MUST NOT be
+ * empty. If both fields are present, they MUST contain the same value.
+ */
+
+ /* Check that if a previous value is set the new value is identical. */
+ if (isttest(*auth) && !isteq(*auth, value)) {
+ TRACE_ERROR("difference between :authority and host headers", H3_EV_RX_FRAME|H3_EV_RX_HDR, qcs->qcc->conn, qcs);
+ return 1;
+ }
+
+ /* Check that value is not empty. */
+ if (!istlen(value)) {
+ TRACE_ERROR("empty :authority/host header", H3_EV_RX_FRAME|H3_EV_RX_HDR, qcs->qcc->conn, qcs);
+ return 1;
+ }
+
+ *auth = value;
+ return 0;
+}
+
+/* Parse from buffer <buf> a H3 HEADERS frame of length <len>. Data are copied
+ * in a local HTX buffer and transfer to the stream connector layer. <fin> must be
+ * set if this is the last data to transfer from this stream.
+ *
+ * Returns the number of consumed bytes or a negative error code. On error
+ * either the connection should be closed or the stream reset using codes
+ * provided in h3c.err / h3s.err.
+ */
+static ssize_t h3_headers_to_htx(struct qcs *qcs, const struct buffer *buf,
+ uint64_t len, char fin)
+{
+ struct h3s *h3s = qcs->ctx;
+ struct h3c *h3c = h3s->h3c;
+ struct buffer htx_buf = BUF_NULL;
+ struct buffer *tmp = get_trash_chunk();
+ struct htx *htx = NULL;
+ struct htx_sl *sl;
+ struct http_hdr list[global.tune.max_http_hdr];
+ unsigned int flags = HTX_SL_F_NONE;
+ struct ist meth = IST_NULL, path = IST_NULL;
+ struct ist scheme = IST_NULL, authority = IST_NULL;
+ int hdr_idx, ret;
+ int cookie = -1, last_cookie = -1, i;
+ const char *ctl;
+ int relaxed = !!(h3c->qcc->proxy->options2 & PR_O2_REQBUG_OK);
+
+ /* RFC 9114 4.1.2. Malformed Requests and Responses
+ *
+ * A malformed request or response is one that is an otherwise valid
+ * sequence of frames but is invalid due to:
+ * - the presence of prohibited fields or pseudo-header fields,
+ * - the absence of mandatory pseudo-header fields,
+ * - invalid values for pseudo-header fields,
+ * - pseudo-header fields after fields,
+ * - an invalid sequence of HTTP messages,
+ * - the inclusion of uppercase field names, or
+ * - the inclusion of invalid characters in field names or values.
+ *
+ * [...]
+ *
+ * Intermediaries that process HTTP requests or responses (i.e., any
+ * intermediary not acting as a tunnel) MUST NOT forward a malformed
+ * request or response. Malformed requests or responses that are
+ * detected MUST be treated as a stream error of type H3_MESSAGE_ERROR.
+ */
+
+ TRACE_ENTER(H3_EV_RX_FRAME|H3_EV_RX_HDR, qcs->qcc->conn, qcs);
+
+ /* TODO support trailer parsing in this function */
+
+ /* TODO support buffer wrapping */
+ BUG_ON(b_head(buf) + len >= b_wrap(buf));
+ ret = qpack_decode_fs((const unsigned char *)b_head(buf), len, tmp,
+ list, sizeof(list) / sizeof(list[0]));
+ if (ret < 0) {
+ TRACE_ERROR("QPACK decoding error", H3_EV_RX_FRAME|H3_EV_RX_HDR, qcs->qcc->conn, qcs);
+ h3c->err = -ret;
+ len = -1;
+ goto out;
+ }
+
+ if (!qcs_get_buf(qcs, &htx_buf)) {
+ TRACE_ERROR("HTX buffer alloc failure", H3_EV_RX_FRAME|H3_EV_RX_HDR, qcs->qcc->conn, qcs);
+ h3c->err = H3_INTERNAL_ERROR;
+ len = -1;
+ goto out;
+ }
+ BUG_ON(!b_size(&htx_buf)); /* TODO */
+ htx = htx_from_buf(&htx_buf);
+
+ /* first treat pseudo-header to build the start line */
+ hdr_idx = 0;
+ while (1) {
+ /* RFC 9114 4.3. HTTP Control Data
+ *
+ * Endpoints MUST treat a request or response that contains
+ * undefined or invalid pseudo-header fields as malformed.
+ *
+ * All pseudo-header fields MUST appear in the header section before
+ * regular header fields. Any request or response that contains a
+ * pseudo-header field that appears in a header section after a regular
+ * header field MUST be treated as malformed.
+ */
+
+ /* Stop at first non pseudo-header. */
+ if (!istmatch(list[hdr_idx].n, ist(":")))
+ break;
+
+ /* RFC 9114 10.3 Intermediary-Encapsulation Attacks
+ *
+ * While most values that can be encoded will not alter field
+ * parsing, carriage return (ASCII 0x0d), line feed (ASCII 0x0a),
+ * and the null character (ASCII 0x00) might be exploited by an
+ * attacker if they are translated verbatim. Any request or
+ * response that contains a character not permitted in a field
+ * value MUST be treated as malformed
+ */
+
+ /* look for forbidden control characters in the pseudo-header value */
+ ctl = ist_find_ctl(list[hdr_idx].v);
+ if (unlikely(ctl) && http_header_has_forbidden_char(list[hdr_idx].v, ctl)) {
+ TRACE_ERROR("control character present in pseudo-header value", H3_EV_RX_FRAME|H3_EV_RX_HDR, qcs->qcc->conn, qcs);
+ h3s->err = H3_MESSAGE_ERROR;
+ len = -1;
+ goto out;
+ }
+
+ /* pseudo-header. Malformed name with uppercase character or
+ * invalid token will be rejected in the else clause.
+ */
+ if (isteq(list[hdr_idx].n, ist(":method"))) {
+ if (isttest(meth)) {
+ TRACE_ERROR("duplicated method pseudo-header", H3_EV_RX_FRAME|H3_EV_RX_HDR, qcs->qcc->conn, qcs);
+ h3s->err = H3_MESSAGE_ERROR;
+ len = -1;
+ goto out;
+ }
+ meth = list[hdr_idx].v;
+ }
+ else if (isteq(list[hdr_idx].n, ist(":path"))) {
+ if (isttest(path)) {
+ TRACE_ERROR("duplicated path pseudo-header", H3_EV_RX_FRAME|H3_EV_RX_HDR, qcs->qcc->conn, qcs);
+ h3s->err = H3_MESSAGE_ERROR;
+ len = -1;
+ goto out;
+ }
+
+ if (!relaxed) {
+ /* we need to reject any control chars or '#' from the path,
+ * unless option accept-invalid-http-request is set.
+ */
+ ctl = ist_find_range(list[hdr_idx].v, 0, '#');
+ if (unlikely(ctl) && http_path_has_forbidden_char(list[hdr_idx].v, ctl)) {
+ TRACE_ERROR("forbidden character in ':path' pseudo-header", H3_EV_RX_FRAME|H3_EV_RX_HDR, qcs->qcc->conn, qcs);
+ h3s->err = H3_MESSAGE_ERROR;
+ len = -1;
+ goto out;
+ }
+ }
+
+ path = list[hdr_idx].v;
+ }
+ else if (isteq(list[hdr_idx].n, ist(":scheme"))) {
+ if (isttest(scheme)) {
+ /* duplicated pseudo-header */
+ TRACE_ERROR("duplicated scheme pseudo-header", H3_EV_RX_FRAME|H3_EV_RX_HDR, qcs->qcc->conn, qcs);
+ h3s->err = H3_MESSAGE_ERROR;
+ len = -1;
+ goto out;
+ }
+ scheme = list[hdr_idx].v;
+ }
+ else if (isteq(list[hdr_idx].n, ist(":authority"))) {
+ if (isttest(authority)) {
+ TRACE_ERROR("duplicated authority pseudo-header", H3_EV_RX_FRAME|H3_EV_RX_HDR, qcs->qcc->conn, qcs);
+ h3s->err = H3_MESSAGE_ERROR;
+ len = -1;
+ goto out;
+ }
+
+ if (h3_set_authority(qcs, &authority, list[hdr_idx].v)) {
+ h3s->err = H3_MESSAGE_ERROR;
+ len = -1;
+ goto out;
+ }
+ }
+ else {
+ TRACE_ERROR("unknown pseudo-header", H3_EV_RX_FRAME|H3_EV_RX_HDR, qcs->qcc->conn, qcs);
+ h3s->err = H3_MESSAGE_ERROR;
+ len = -1;
+ goto out;
+ }
+
+ ++hdr_idx;
+ }
+
+ if (!istmatch(meth, ist("CONNECT"))) {
+ /* RFC 9114 4.3.1. Request Pseudo-Header Fields
+ *
+ * All HTTP/3 requests MUST include exactly one value for the :method,
+ * :scheme, and :path pseudo-header fields, unless the request is a
+ * CONNECT request; see Section 4.4.
+ */
+ if (!isttest(meth) || !isttest(scheme) || !isttest(path)) {
+ TRACE_ERROR("missing mandatory pseudo-header", H3_EV_RX_FRAME|H3_EV_RX_HDR, qcs->qcc->conn, qcs);
+ h3s->err = H3_MESSAGE_ERROR;
+ len = -1;
+ goto out;
+ }
+ }
+
+ flags |= HTX_SL_F_VER_11;
+ flags |= HTX_SL_F_XFER_LEN;
+
+ sl = htx_add_stline(htx, HTX_BLK_REQ_SL, flags, meth, path, ist("HTTP/3.0"));
+ if (!sl) {
+ h3c->err = H3_INTERNAL_ERROR;
+ len = -1;
+ goto out;
+ }
+
+ if (fin)
+ sl->flags |= HTX_SL_F_BODYLESS;
+
+ sl->info.req.meth = find_http_meth(meth.ptr, meth.len);
+
+ if (isttest(authority)) {
+ if (!htx_add_header(htx, ist("host"), authority)) {
+ h3c->err = H3_INTERNAL_ERROR;
+ len = -1;
+ goto out;
+ }
+ }
+
+ /* now treat standard headers */
+ while (1) {
+ if (isteq(list[hdr_idx].n, ist("")))
+ break;
+
+ if (istmatch(list[hdr_idx].n, ist(":"))) {
+ TRACE_ERROR("pseudo-header field after fields", H3_EV_RX_FRAME|H3_EV_RX_HDR, qcs->qcc->conn, qcs);
+ h3s->err = H3_MESSAGE_ERROR;
+ len = -1;
+ goto out;
+ }
+
+ for (i = 0; i < list[hdr_idx].n.len; ++i) {
+ const char c = list[hdr_idx].n.ptr[i];
+ if ((uint8_t)(c - 'A') < 'Z' - 'A' || !HTTP_IS_TOKEN(c)) {
+ TRACE_ERROR("invalid characters in field name", H3_EV_RX_FRAME|H3_EV_RX_HDR, qcs->qcc->conn, qcs);
+ h3s->err = H3_MESSAGE_ERROR;
+ len = -1;
+ goto out;
+ }
+ }
+
+
+ /* RFC 9114 10.3 Intermediary-Encapsulation Attacks
+ *
+ * While most values that can be encoded will not alter field
+ * parsing, carriage return (ASCII 0x0d), line feed (ASCII 0x0a),
+ * and the null character (ASCII 0x00) might be exploited by an
+ * attacker if they are translated verbatim. Any request or
+ * response that contains a character not permitted in a field
+ * value MUST be treated as malformed
+ */
+
+ /* look for forbidden control characters in the header value */
+ ctl = ist_find_ctl(list[hdr_idx].v);
+ if (unlikely(ctl) && http_header_has_forbidden_char(list[hdr_idx].v, ctl)) {
+ TRACE_ERROR("control character present in header value", H3_EV_RX_FRAME|H3_EV_RX_HDR, qcs->qcc->conn, qcs);
+ h3s->err = H3_MESSAGE_ERROR;
+ len = -1;
+ goto out;
+ }
+
+ if (isteq(list[hdr_idx].n, ist("host"))) {
+ if (h3_set_authority(qcs, &authority, list[hdr_idx].v)) {
+ h3s->err = H3_MESSAGE_ERROR;
+ len = -1;
+ goto out;
+ }
+ }
+ else if (isteq(list[hdr_idx].n, ist("cookie"))) {
+ http_cookie_register(list, hdr_idx, &cookie, &last_cookie);
+ ++hdr_idx;
+ continue;
+ }
+ else if (isteq(list[hdr_idx].n, ist("content-length"))) {
+ ret = http_parse_cont_len_header(&list[hdr_idx].v,
+ &h3s->body_len,
+ h3s->flags & H3_SF_HAVE_CLEN);
+ if (ret < 0) {
+ TRACE_ERROR("invalid content-length", H3_EV_RX_FRAME|H3_EV_RX_HDR, qcs->qcc->conn, qcs);
+ h3s->err = H3_MESSAGE_ERROR;
+ len = -1;
+ goto out;
+ }
+ else if (!ret) {
+ /* Skip duplicated value. */
+ ++hdr_idx;
+ continue;
+ }
+
+ h3s->flags |= H3_SF_HAVE_CLEN;
+ sl->flags |= HTX_SL_F_CLEN;
+ /* This will fail if current frame is the last one and
+ * content-length is not null.
+ */
+ if (h3_check_body_size(qcs, fin)) {
+ len = -1;
+ goto out;
+ }
+ }
+ else if (isteq(list[hdr_idx].n, ist("connection")) ||
+ isteq(list[hdr_idx].n, ist("proxy-connection")) ||
+ isteq(list[hdr_idx].n, ist("keep-alive")) ||
+ isteq(list[hdr_idx].n, ist("transfer-encoding"))) {
+ /* RFC 9114 4.2. HTTP Fields
+ *
+ * HTTP/3 does not use the Connection header field to indicate
+ * connection-specific fields; in this protocol, connection-
+ * specific metadata is conveyed by other means. An endpoint
+ * MUST NOT generate an HTTP/3 field section containing
+ * connection-specific fields; any message containing
+ * connection-specific fields MUST be treated as malformed.
+ */
+ TRACE_ERROR("invalid connection header", H3_EV_RX_FRAME|H3_EV_RX_HDR, qcs->qcc->conn, qcs);
+ h3s->err = H3_MESSAGE_ERROR;
+ len = -1;
+ goto out;
+ }
+ else if (isteq(list[hdr_idx].n, ist("te")) &&
+ !isteq(list[hdr_idx].v, ist("trailers"))) {
+ /* RFC 9114 4.2. HTTP Fields
+ *
+ * The only exception to this is the TE header field, which MAY
+ * be present in an HTTP/3 request header; when it is, it MUST
+ * NOT contain any value other than "trailers".
+ */
+ TRACE_ERROR("invalid te header", H3_EV_RX_FRAME|H3_EV_RX_HDR, qcs->qcc->conn, qcs);
+ h3s->err = H3_MESSAGE_ERROR;
+ len = -1;
+ goto out;
+ }
+
+ if (!htx_add_header(htx, list[hdr_idx].n, list[hdr_idx].v)) {
+ h3c->err = H3_INTERNAL_ERROR;
+ len = -1;
+ goto out;
+ }
+ ++hdr_idx;
+ }
+
+ /* RFC 9114 4.3.1. Request Pseudo-Header Fields
+ *
+ * If the :scheme pseudo-header field identifies a scheme that has a
+ * mandatory authority component (including "http" and "https"), the
+ * request MUST contain either an :authority pseudo-header field or a
+ * Host header field.
+ */
+ if (!isttest(authority)) {
+ TRACE_ERROR("missing mandatory pseudo-header", H3_EV_RX_FRAME|H3_EV_RX_HDR, qcs->qcc->conn, qcs);
+ h3s->err = H3_MESSAGE_ERROR;
+ len = -1;
+ goto out;
+ }
+
+ if (cookie >= 0) {
+ if (http_cookie_merge(htx, list, cookie)) {
+ h3c->err = H3_INTERNAL_ERROR;
+ len = -1;
+ goto out;
+ }
+ }
+
+ if (!htx_add_endof(htx, HTX_BLK_EOH)) {
+ h3c->err = H3_INTERNAL_ERROR;
+ len = -1;
+ goto out;
+ }
+
+ if (fin)
+ htx->flags |= HTX_FL_EOM;
+
+ htx_to_buf(htx, &htx_buf);
+ htx = NULL;
+
+ if (!qcs_attach_sc(qcs, &htx_buf, fin)) {
+ h3c->err = H3_INTERNAL_ERROR;
+ len = -1;
+ goto out;
+ }
+
+ /* RFC 9114 5.2. Connection Shutdown
+ *
+ * The GOAWAY frame contains an identifier that
+ * indicates to the receiver the range of requests or pushes that were
+ * or might be processed in this connection. The server sends a client-
+ * initiated bidirectional stream ID; the client sends a push ID.
+ * Requests or pushes with the indicated identifier or greater are
+ * rejected (Section 4.1.1) by the sender of the GOAWAY. This
+ * identifier MAY be zero if no requests or pushes were processed.
+ */
+ if (qcs->id >= h3c->id_goaway)
+ h3c->id_goaway = qcs->id + 4;
+
+ out:
+ /* HTX may be non NULL if error before previous htx_to_buf(). */
+ if (htx)
+ htx_to_buf(htx, &htx_buf);
+
+ /* buffer is transferred to the stream connector and set to NULL
+ * except on stream creation error.
+ */
+ if (b_size(&htx_buf)) {
+ b_free(&htx_buf);
+ offer_buffers(NULL, 1);
+ }
+
+ TRACE_LEAVE(H3_EV_RX_FRAME|H3_EV_RX_HDR, qcs->qcc->conn, qcs);
+ return len;
+}
+
+/* Parse from buffer <buf> a H3 HEADERS frame of length <len> used as trailers.
+ * Data are copied in a local HTX buffer and transfer to the stream connector
+ * layer. <fin> must be set if this is the last data to transfer from this
+ * stream.
+ *
+ * Returns the number of consumed bytes or a negative error code. On error
+ * either the connection should be closed or the stream reset using codes
+ * provided in h3c.err / h3s.err.
+ */
+static ssize_t h3_trailers_to_htx(struct qcs *qcs, const struct buffer *buf,
+ uint64_t len, char fin)
+{
+ struct h3s *h3s = qcs->ctx;
+ struct h3c *h3c = h3s->h3c;
+ struct buffer *tmp = get_trash_chunk();
+ struct buffer *appbuf = NULL;
+ struct htx *htx = NULL;
+ struct htx_sl *sl;
+ struct http_hdr list[global.tune.max_http_hdr];
+ int hdr_idx, ret;
+ const char *ctl;
+ int i;
+
+ TRACE_ENTER(H3_EV_RX_FRAME|H3_EV_RX_HDR, qcs->qcc->conn, qcs);
+
+ /* TODO support buffer wrapping */
+ BUG_ON(b_head(buf) + len >= b_wrap(buf));
+ ret = qpack_decode_fs((const unsigned char *)b_head(buf), len, tmp,
+ list, sizeof(list) / sizeof(list[0]));
+ if (ret < 0) {
+ TRACE_ERROR("QPACK decoding error", H3_EV_RX_FRAME|H3_EV_RX_HDR, qcs->qcc->conn, qcs);
+ h3c->err = -ret;
+ len = -1;
+ goto out;
+ }
+
+ if (!(appbuf = qcs_get_buf(qcs, &qcs->rx.app_buf))) {
+ TRACE_ERROR("HTX buffer alloc failure", H3_EV_RX_FRAME|H3_EV_RX_HDR, qcs->qcc->conn, qcs);
+ h3c->err = H3_INTERNAL_ERROR;
+ len = -1;
+ goto out;
+ }
+ BUG_ON(!b_size(appbuf)); /* TODO */
+ htx = htx_from_buf(appbuf);
+
+ if (!h3s->data_len) {
+ /* Notify that no body is present. This can only happens if
+ * there is H3 HEADERS as trailers without or empty H3 DATA
+ * frame. So this is probably not realistice ?
+ *
+ * TODO if sl is NULL because already consumed there is no way
+ * to notify about missing body.
+ */
+ sl = http_get_stline(htx);
+ if (sl)
+ sl->flags |= HTX_SL_F_BODYLESS;
+ else
+ TRACE_ERROR("cannot notify missing body after trailers", H3_EV_RX_FRAME|H3_EV_RX_HDR, qcs->qcc->conn, qcs);
+ }
+
+ hdr_idx = 0;
+ while (1) {
+ if (isteq(list[hdr_idx].n, ist("")))
+ break;
+
+ /* RFC 9114 4.3. HTTP Control Data
+ *
+ * Pseudo-header
+ * fields MUST NOT appear in trailer sections.
+ */
+ if (istmatch(list[hdr_idx].n, ist(":"))) {
+ TRACE_ERROR("pseudo-header field in trailers", H3_EV_RX_FRAME|H3_EV_RX_HDR, qcs->qcc->conn, qcs);
+ h3s->err = H3_MESSAGE_ERROR;
+ len = -1;
+ goto out;
+ }
+
+ for (i = 0; i < list[hdr_idx].n.len; ++i) {
+ const char c = list[hdr_idx].n.ptr[i];
+ if ((uint8_t)(c - 'A') < 'Z' - 'A' || !HTTP_IS_TOKEN(c)) {
+ TRACE_ERROR("invalid characters in field name", H3_EV_RX_FRAME|H3_EV_RX_HDR, qcs->qcc->conn, qcs);
+ h3s->err = H3_MESSAGE_ERROR;
+ len = -1;
+ goto out;
+ }
+ }
+
+ /* forbidden HTTP/3 headers, cf h3_headers_to_htx() */
+ if (isteq(list[hdr_idx].n, ist("host")) ||
+ isteq(list[hdr_idx].n, ist("content-length")) ||
+ isteq(list[hdr_idx].n, ist("connection")) ||
+ isteq(list[hdr_idx].n, ist("proxy-connection")) ||
+ isteq(list[hdr_idx].n, ist("keep-alive")) ||
+ isteq(list[hdr_idx].n, ist("te")) ||
+ isteq(list[hdr_idx].n, ist("transfer-encoding"))) {
+ TRACE_ERROR("forbidden HTTP/3 headers", H3_EV_RX_FRAME|H3_EV_RX_HDR, qcs->qcc->conn, qcs);
+ h3s->err = H3_MESSAGE_ERROR;
+ len = -1;
+ goto out;
+ }
+
+ /* RFC 9114 10.3 Intermediary-Encapsulation Attacks
+ *
+ * While most values that can be encoded will not alter field
+ * parsing, carriage return (ASCII 0x0d), line feed (ASCII 0x0a),
+ * and the null character (ASCII 0x00) might be exploited by an
+ * attacker if they are translated verbatim. Any request or
+ * response that contains a character not permitted in a field
+ * value MUST be treated as malformed
+ */
+
+ /* look for forbidden control characters in the trailer value */
+ ctl = ist_find_ctl(list[hdr_idx].v);
+ if (unlikely(ctl) && http_header_has_forbidden_char(list[hdr_idx].v, ctl)) {
+ TRACE_ERROR("control character present in trailer value", H3_EV_RX_FRAME|H3_EV_RX_HDR, qcs->qcc->conn, qcs);
+ h3s->err = H3_MESSAGE_ERROR;
+ len = -1;
+ goto out;
+ }
+
+ if (!htx_add_trailer(htx, list[hdr_idx].n, list[hdr_idx].v)) {
+ TRACE_ERROR("cannot add trailer", H3_EV_RX_FRAME|H3_EV_RX_HDR, qcs->qcc->conn, qcs);
+ h3c->err = H3_INTERNAL_ERROR;
+ len = -1;
+ goto out;
+ }
+
+ ++hdr_idx;
+ }
+
+ if (!htx_add_endof(htx, HTX_BLK_EOT)) {
+ TRACE_ERROR("cannot add trailer", H3_EV_RX_FRAME|H3_EV_RX_HDR, qcs->qcc->conn, qcs);
+ h3c->err = H3_INTERNAL_ERROR;
+ len = -1;
+ goto out;
+ }
+
+ if (fin)
+ htx->flags |= HTX_FL_EOM;
+
+ out:
+ /* HTX may be non NULL if error before previous htx_to_buf(). */
+ if (appbuf)
+ htx_to_buf(htx, appbuf);
+
+ TRACE_LEAVE(H3_EV_RX_FRAME|H3_EV_RX_HDR, qcs->qcc->conn, qcs);
+ return len;
+}
+
+/* Copy from buffer <buf> a H3 DATA frame of length <len> in QUIC stream <qcs>
+ * HTX buffer. <fin> must be set if this is the last data to transfer from this
+ * stream.
+ *
+ * Returns the number of consumed bytes or a negative error code.
+ */
+static ssize_t h3_data_to_htx(struct qcs *qcs, const struct buffer *buf,
+ uint64_t len, char fin)
+{
+ struct h3s *h3s = qcs->ctx;
+ struct h3c *h3c = h3s->h3c;
+ struct buffer *appbuf;
+ struct htx *htx = NULL;
+ size_t htx_sent = 0;
+ int htx_space;
+ char *head;
+
+ TRACE_ENTER(H3_EV_RX_FRAME|H3_EV_RX_DATA, qcs->qcc->conn, qcs);
+
+ if (!(appbuf = qcs_get_buf(qcs, &qcs->rx.app_buf))) {
+ TRACE_ERROR("data buffer alloc failure", H3_EV_RX_FRAME|H3_EV_RX_DATA, qcs->qcc->conn, qcs);
+ h3c->err = H3_INTERNAL_ERROR;
+ len = -1;
+ goto out;
+ }
+
+ htx = htx_from_buf(appbuf);
+
+ if (len > b_data(buf)) {
+ len = b_data(buf);
+ fin = 0;
+ }
+
+ head = b_head(buf);
+ retry:
+ htx_space = htx_free_data_space(htx);
+ if (!htx_space) {
+ qcs->flags |= QC_SF_DEM_FULL;
+ goto out;
+ }
+
+ if (len > htx_space) {
+ len = htx_space;
+ fin = 0;
+ }
+
+ if (head + len > b_wrap(buf)) {
+ size_t contig = b_wrap(buf) - head;
+ htx_sent = htx_add_data(htx, ist2(b_head(buf), contig));
+ if (htx_sent < contig) {
+ qcs->flags |= QC_SF_DEM_FULL;
+ goto out;
+ }
+
+ len -= contig;
+ head = b_orig(buf);
+ goto retry;
+ }
+
+ htx_sent += htx_add_data(htx, ist2(head, len));
+ if (htx_sent < len) {
+ qcs->flags |= QC_SF_DEM_FULL;
+ goto out;
+ }
+
+ if (fin && len == htx_sent)
+ htx->flags |= HTX_FL_EOM;
+
+ out:
+ if (appbuf)
+ htx_to_buf(htx, appbuf);
+
+ TRACE_LEAVE(H3_EV_RX_FRAME|H3_EV_RX_DATA, qcs->qcc->conn, qcs);
+ return htx_sent;
+}
+
+/* Parse a SETTINGS frame of length <len> of payload <buf>.
+ *
+ * Returns the number of consumed bytes or a negative error code.
+ */
+static ssize_t h3_parse_settings_frm(struct h3c *h3c, const struct buffer *buf,
+ size_t len)
+{
+ struct buffer b;
+ uint64_t id, value;
+ size_t ret = 0;
+ long mask = 0; /* used to detect duplicated settings identifier */
+
+ TRACE_ENTER(H3_EV_RX_FRAME|H3_EV_RX_SETTINGS, h3c->qcc->conn);
+
+ /* Work on a copy of <buf>. */
+ b = b_make(b_orig(buf), b_size(buf), b_head_ofs(buf), len);
+
+ while (b_data(&b)) {
+ if (!b_quic_dec_int(&id, &b, &ret) || !b_quic_dec_int(&value, &b, &ret)) {
+ h3c->err = H3_FRAME_ERROR;
+ return -1;
+ }
+
+ h3_debug_printf(stderr, "%s id: %llu value: %llu\n",
+ __func__, (unsigned long long)id, (unsigned long long)value);
+
+ /* draft-ietf-quic-http34 7.2.4. SETTINGS
+ *
+ * The same setting identifier MUST NOT occur more than once in the
+ * SETTINGS frame. A receiver MAY treat the presence of duplicate
+ * setting identifiers as a connection error of type H3_SETTINGS_ERROR.
+ */
+
+ /* Ignore duplicate check for ID too big used for GREASE. */
+ if (id < sizeof(mask)) {
+ if (ha_bit_test(id, &mask)) {
+ h3c->err = H3_SETTINGS_ERROR;
+ return -1;
+ }
+ ha_bit_set(id, &mask);
+ }
+
+ switch (id) {
+ case H3_SETTINGS_QPACK_MAX_TABLE_CAPACITY:
+ h3c->qpack_max_table_capacity = value;
+ break;
+ case H3_SETTINGS_MAX_FIELD_SECTION_SIZE:
+ h3c->max_field_section_size = value;
+ break;
+ case H3_SETTINGS_QPACK_BLOCKED_STREAMS:
+ h3c->qpack_blocked_streams = value;
+ break;
+
+ case H3_SETTINGS_RESERVED_0:
+ case H3_SETTINGS_RESERVED_2:
+ case H3_SETTINGS_RESERVED_3:
+ case H3_SETTINGS_RESERVED_4:
+ case H3_SETTINGS_RESERVED_5:
+ /* draft-ietf-quic-http34 7.2.4.1. Defined SETTINGS Parameters
+ *
+ * Setting identifiers which were defined in [HTTP2] where there is no
+ * corresponding HTTP/3 setting have also been reserved
+ * (Section 11.2.2). These reserved settings MUST NOT be sent, and
+ * their receipt MUST be treated as a connection error of type
+ * H3_SETTINGS_ERROR.
+ */
+ h3c->err = H3_SETTINGS_ERROR;
+ return -1;
+ default:
+ /* MUST be ignored */
+ break;
+ }
+ }
+
+ TRACE_LEAVE(H3_EV_RX_FRAME|H3_EV_RX_SETTINGS, h3c->qcc->conn);
+ return ret;
+}
+
+/* Decode <qcs> remotely initiated bidi-stream. <fin> must be set to indicate
+ * that we received the last data of the stream.
+ *
+ * Returns 0 on success else non-zero.
+ */
+static ssize_t h3_decode_qcs(struct qcs *qcs, struct buffer *b, int fin)
+{
+ struct h3s *h3s = qcs->ctx;
+ struct h3c *h3c = h3s->h3c;
+ ssize_t total = 0, ret;
+
+ TRACE_ENTER(H3_EV_RX_FRAME, qcs->qcc->conn, qcs);
+
+ if (quic_stream_is_uni(qcs->id) && !(h3s->flags & H3_SF_UNI_INIT)) {
+ ret = h3_init_uni_stream(h3c, qcs, b);
+ if (ret < 0) {
+ TRACE_ERROR("cannot initialize uni stream", H3_EV_RX_FRAME, qcs->qcc->conn, qcs);
+ goto err;
+ }
+ else if (!ret) {
+ /* not enough data to initialize uni stream, retry later */
+ goto done;
+ }
+
+ total += ret;
+ }
+
+ if (quic_stream_is_uni(qcs->id) && (h3s->flags & H3_SF_UNI_NO_H3)) {
+ /* For non-h3 STREAM, parse it and return immediately. */
+ if ((ret = h3_parse_uni_stream_no_h3(qcs, b, fin)) < 0) {
+ TRACE_ERROR("error when parsing non-HTTP3 uni stream", H3_EV_RX_FRAME, qcs->qcc->conn, qcs);
+ goto err;
+ }
+
+ total += ret;
+ goto done;
+ }
+
+ /* RFC 9114 6.2.1. Control Streams
+ *
+ * The sender MUST NOT close the control stream, and the receiver MUST NOT
+ * request that the sender close the control stream. If either control
+ * stream is closed at any point, this MUST be treated as a connection
+ * error of type H3_CLOSED_CRITICAL_STREAM.
+ */
+ if (h3s->type == H3S_T_CTRL && fin) {
+ TRACE_ERROR("control stream closed by remote peer", H3_EV_RX_FRAME, qcs->qcc->conn, qcs);
+ qcc_set_error(qcs->qcc, H3_CLOSED_CRITICAL_STREAM, 1);
+ goto err;
+ }
+
+ if (!b_data(b) && fin && quic_stream_is_bidi(qcs->id)) {
+ struct buffer *appbuf;
+ struct htx *htx;
+
+ TRACE_PROTO("received FIN without data", H3_EV_RX_FRAME, qcs->qcc->conn, qcs);
+ if (!(appbuf = qcs_get_buf(qcs, &qcs->rx.app_buf))) {
+ TRACE_ERROR("data buffer alloc failure", H3_EV_RX_FRAME, qcs->qcc->conn, qcs);
+ h3c->err = H3_INTERNAL_ERROR;
+ goto err;
+ }
+
+ htx = htx_from_buf(appbuf);
+ if (!htx_set_eom(htx)) {
+ TRACE_ERROR("cannot set EOM", H3_EV_RX_FRAME, qcs->qcc->conn, qcs);
+ h3c->err = H3_INTERNAL_ERROR;
+ }
+ htx_to_buf(htx, appbuf);
+ goto done;
+ }
+
+ while (b_data(b) && !(qcs->flags & QC_SF_DEM_FULL) && !h3c->err && !h3s->err) {
+ uint64_t ftype, flen;
+ char last_stream_frame = 0;
+
+ if (!h3s->demux_frame_len) {
+ /* Switch to a new frame. */
+ size_t hlen = h3_decode_frm_header(&ftype, &flen, b);
+ if (!hlen) {
+ TRACE_PROTO("pause parsing on incomplete frame header", H3_EV_RX_FRAME, qcs->qcc->conn, qcs);
+ break;
+ }
+
+ h3s->demux_frame_type = ftype;
+ h3s->demux_frame_len = flen;
+ total += hlen;
+ TRACE_PROTO("parsing a new frame", H3_EV_RX_FRAME, qcs->qcc->conn, qcs);
+
+ /* Check that content-length is not exceeded on a new DATA frame. */
+ if (ftype == H3_FT_DATA) {
+ h3s->data_len += flen;
+ if (h3s->flags & H3_SF_HAVE_CLEN && h3_check_body_size(qcs, (fin && flen == b_data(b))))
+ break;
+ }
+
+ if ((ret = h3_check_frame_valid(h3c, qcs, ftype))) {
+ TRACE_ERROR("received an invalid frame", H3_EV_RX_FRAME, qcs->qcc->conn, qcs);
+ qcc_set_error(qcs->qcc, ret, 1);
+ goto err;
+ }
+
+ if (!b_data(b))
+ break;
+ }
+
+ flen = h3s->demux_frame_len;
+ ftype = h3s->demux_frame_type;
+
+ /* Do not demux incomplete frames except H3 DATA which can be
+ * fragmented in multiple HTX blocks.
+ */
+ if (flen > b_data(b) && ftype != H3_FT_DATA) {
+ /* Reject frames bigger than bufsize.
+ *
+ * TODO HEADERS should in complement be limited with H3
+ * SETTINGS_MAX_FIELD_SECTION_SIZE parameter to prevent
+ * excessive decompressed size.
+ */
+ if (flen > QC_S_RX_BUF_SZ) {
+ TRACE_ERROR("received a too big frame", H3_EV_RX_FRAME, qcs->qcc->conn, qcs);
+ qcc_set_error(qcs->qcc, H3_EXCESSIVE_LOAD, 1);
+ goto err;
+ }
+ break;
+ }
+
+ last_stream_frame = (fin && flen == b_data(b));
+
+ /* Check content-length equality with DATA frames length on the last frame. */
+ if (last_stream_frame && h3s->flags & H3_SF_HAVE_CLEN && h3_check_body_size(qcs, last_stream_frame))
+ break;
+
+ h3_inc_frame_type_cnt(h3c->prx_counters, ftype);
+ switch (ftype) {
+ case H3_FT_DATA:
+ ret = h3_data_to_htx(qcs, b, flen, last_stream_frame);
+ h3s->st_req = H3S_ST_REQ_DATA;
+ break;
+ case H3_FT_HEADERS:
+ if (h3s->st_req == H3S_ST_REQ_BEFORE) {
+ ret = h3_headers_to_htx(qcs, b, flen, last_stream_frame);
+ h3s->st_req = H3S_ST_REQ_HEADERS;
+ }
+ else {
+ ret = h3_trailers_to_htx(qcs, b, flen, last_stream_frame);
+ h3s->st_req = H3S_ST_REQ_TRAILERS;
+ }
+ break;
+ case H3_FT_CANCEL_PUSH:
+ case H3_FT_PUSH_PROMISE:
+ case H3_FT_MAX_PUSH_ID:
+ case H3_FT_GOAWAY:
+ /* Not supported */
+ ret = flen;
+ break;
+ case H3_FT_SETTINGS:
+ ret = h3_parse_settings_frm(qcs->qcc->ctx, b, flen);
+ if (ret < 0) {
+ TRACE_ERROR("error on SETTINGS parsing", H3_EV_RX_FRAME, qcs->qcc->conn, qcs);
+ qcc_set_error(qcs->qcc, h3c->err, 1);
+ goto err;
+ }
+ h3c->flags |= H3_CF_SETTINGS_RECV;
+ break;
+ default:
+ /* draft-ietf-quic-http34 9. Extensions to HTTP/3
+ *
+ * Implementations MUST discard frames [...] that have unknown
+ * or unsupported types.
+ */
+ ret = flen;
+ break;
+ }
+
+ if (ret > 0) {
+ BUG_ON(h3s->demux_frame_len < ret);
+ h3s->demux_frame_len -= ret;
+ b_del(b, ret);
+ total += ret;
+ }
+ }
+
+ /* Reset demux frame type for traces. */
+ if (!h3s->demux_frame_len)
+ h3s->demux_frame_type = H3_FT_UNINIT;
+
+ /* Interrupt decoding on stream/connection error detected. */
+ if (h3s->err) {
+ qcc_abort_stream_read(qcs);
+ qcc_reset_stream(qcs, h3s->err);
+ return b_data(b);
+ }
+ else if (h3c->err) {
+ qcc_set_error(qcs->qcc, h3c->err, 1);
+ return b_data(b);
+ }
+
+ /* TODO may be useful to wakeup the MUX if blocked due to full buffer.
+ * However, currently, io-cb of MUX does not handle Rx.
+ */
+
+ done:
+ TRACE_LEAVE(H3_EV_RX_FRAME, qcs->qcc->conn, qcs);
+ return total;
+
+ err:
+ TRACE_DEVEL("leaving on error", H3_EV_RX_FRAME, qcs->qcc->conn, qcs);
+ return -1;
+}
+
+/* Returns buffer for data sending.
+ * May be NULL if the allocation failed.
+ */
+static struct buffer *mux_get_buf(struct qcs *qcs)
+{
+ if (!b_size(&qcs->tx.buf))
+ b_alloc(&qcs->tx.buf);
+
+ return &qcs->tx.buf;
+}
+
+/* Function used to emit stream data from <qcs> control uni-stream.
+ *
+ * On success return the number of sent bytes. A negative code is used on
+ * error.
+ */
+static int h3_control_send(struct qcs *qcs, void *ctx)
+{
+ int ret;
+ struct h3c *h3c = ctx;
+ unsigned char data[(2 + 3) * 2 * QUIC_VARINT_MAX_SIZE]; /* enough for 3 settings */
+ struct buffer pos, *res;
+ size_t frm_len;
+
+ TRACE_ENTER(H3_EV_TX_SETTINGS, qcs->qcc->conn, qcs);
+
+ BUG_ON_HOT(h3c->flags & H3_CF_SETTINGS_SENT);
+
+ ret = 0;
+ pos = b_make((char *)data, sizeof(data), 0, 0);
+
+ frm_len = quic_int_getsize(H3_SETTINGS_QPACK_MAX_TABLE_CAPACITY) +
+ quic_int_getsize(h3_settings_qpack_max_table_capacity) +
+ quic_int_getsize(H3_SETTINGS_QPACK_BLOCKED_STREAMS) +
+ quic_int_getsize(h3_settings_qpack_blocked_streams);
+ if (h3_settings_max_field_section_size) {
+ frm_len += quic_int_getsize(H3_SETTINGS_MAX_FIELD_SECTION_SIZE) +
+ quic_int_getsize(h3_settings_max_field_section_size);
+ }
+
+ b_quic_enc_int(&pos, H3_UNI_S_T_CTRL, 0);
+ /* Build a SETTINGS frame */
+ b_quic_enc_int(&pos, H3_FT_SETTINGS, 0);
+ b_quic_enc_int(&pos, frm_len, 0);
+ b_quic_enc_int(&pos, H3_SETTINGS_QPACK_MAX_TABLE_CAPACITY, 0);
+ b_quic_enc_int(&pos, h3_settings_qpack_max_table_capacity, 0);
+ b_quic_enc_int(&pos, H3_SETTINGS_QPACK_BLOCKED_STREAMS, 0);
+ b_quic_enc_int(&pos, h3_settings_qpack_blocked_streams, 0);
+ if (h3_settings_max_field_section_size) {
+ b_quic_enc_int(&pos, H3_SETTINGS_MAX_FIELD_SECTION_SIZE, 0);
+ b_quic_enc_int(&pos, h3_settings_max_field_section_size, 0);
+ }
+
+ res = mux_get_buf(qcs);
+ if (b_is_null(res)) {
+ TRACE_ERROR("cannot allocate Tx buffer", H3_EV_TX_SETTINGS, qcs->qcc->conn, qcs);
+ goto err;
+ }
+
+ if (b_room(res) < b_data(&pos)) {
+ // TODO the mux should be put in blocked state, with
+ // the stream in state waiting for settings to be sent
+ ABORT_NOW();
+ }
+
+ ret = b_force_xfer(res, &pos, b_data(&pos));
+ if (ret > 0) {
+ /* Register qcs for sending before other streams. */
+ qcc_send_stream(qcs, 1);
+ h3c->flags |= H3_CF_SETTINGS_SENT;
+ }
+
+ TRACE_LEAVE(H3_EV_TX_SETTINGS, qcs->qcc->conn, qcs);
+ return ret;
+
+ err:
+ TRACE_DEVEL("leaving on error", H3_EV_TX_SETTINGS, qcs->qcc->conn, qcs);
+ return -1;
+}
+
+static int h3_resp_headers_send(struct qcs *qcs, struct htx *htx)
+{
+ struct h3s *h3s = qcs->ctx;
+ struct h3c *h3c = h3s->h3c;
+ struct buffer outbuf;
+ struct buffer headers_buf = BUF_NULL;
+ struct buffer *res;
+ struct http_hdr list[global.tune.max_http_hdr];
+ struct htx_sl *sl;
+ struct htx_blk *blk;
+ enum htx_blk_type type;
+ int frame_length_size; /* size in bytes of frame length varint field */
+ int ret = 0;
+ int hdr;
+ int status = 0;
+
+ TRACE_ENTER(H3_EV_TX_HDR, qcs->qcc->conn, qcs);
+
+ sl = NULL;
+ hdr = 0;
+ for (blk = htx_get_head_blk(htx); blk; blk = htx_get_next_blk(htx, blk)) {
+ type = htx_get_blk_type(blk);
+
+ if (type == HTX_BLK_UNUSED)
+ continue;
+
+ if (type == HTX_BLK_EOH)
+ break;
+
+ if (type == HTX_BLK_RES_SL) {
+ /* start-line -> HEADERS h3 frame */
+ BUG_ON(sl);
+ sl = htx_get_blk_ptr(htx, blk);
+ /* TODO should be on h3 layer */
+ status = sl->info.res.status;
+ }
+ else if (type == HTX_BLK_HDR) {
+ if (unlikely(hdr >= sizeof(list) / sizeof(list[0]) - 1)) {
+ TRACE_ERROR("too many headers", H3_EV_TX_HDR, qcs->qcc->conn, qcs);
+ h3c->err = H3_INTERNAL_ERROR;
+ goto err;
+ }
+ list[hdr].n = htx_get_blk_name(htx, blk);
+ list[hdr].v = htx_get_blk_value(htx, blk);
+ hdr++;
+ }
+ else {
+ ABORT_NOW();
+ goto err;
+ }
+ }
+
+ BUG_ON(!sl);
+
+ list[hdr].n = ist("");
+
+ res = mux_get_buf(qcs);
+ if (b_is_null(res)) {
+ TRACE_ERROR("cannot allocate Tx buffer", H3_EV_TX_HDR, qcs->qcc->conn, qcs);
+ h3c->err = H3_INTERNAL_ERROR;
+ goto err;
+ }
+
+ /* At least 5 bytes to store frame type + length as a varint max size */
+ if (b_room(res) < 5)
+ ABORT_NOW();
+
+ b_reset(&outbuf);
+ outbuf = b_make(b_tail(res), b_contig_space(res), 0, 0);
+ /* Start the headers after frame type + length */
+ headers_buf = b_make(b_head(res) + 5, b_size(res) - 5, 0, 0);
+
+ if (qpack_encode_field_section_line(&headers_buf))
+ ABORT_NOW();
+ if (qpack_encode_int_status(&headers_buf, status)) {
+ TRACE_ERROR("invalid status code", H3_EV_TX_HDR, qcs->qcc->conn, qcs);
+ h3c->err = H3_INTERNAL_ERROR;
+ goto err;
+ }
+
+ for (hdr = 0; hdr < sizeof(list) / sizeof(list[0]); ++hdr) {
+ if (isteq(list[hdr].n, ist("")))
+ break;
+
+ /* RFC 9114 4.2. HTTP Fields
+ *
+ * An intermediary transforming an HTTP/1.x message to HTTP/3
+ * MUST remove connection-specific header fields as discussed in
+ * Section 7.6.1 of [HTTP], or their messages will be treated by
+ * other HTTP/3 endpoints as malformed.
+ */
+ if (isteq(list[hdr].n, ist("connection")) ||
+ isteq(list[hdr].n, ist("proxy-connection")) ||
+ isteq(list[hdr].n, ist("keep-alive")) ||
+ isteq(list[hdr].n, ist("transfer-encoding"))) {
+ continue;
+ }
+ else if (isteq(list[hdr].n, ist("te"))) {
+ /* "te" may only be sent with "trailers" if this value
+ * is present, otherwise it must be deleted.
+ */
+ const struct ist v = istist(list[hdr].v, ist("trailers"));
+ if (!isttest(v) || (v.len > 8 && v.ptr[8] != ','))
+ continue;
+ list[hdr].v = ist("trailers");
+ }
+
+ if (qpack_encode_header(&headers_buf, list[hdr].n, list[hdr].v))
+ ABORT_NOW();
+ }
+
+ /* Now that all headers are encoded, we are certain that res buffer is
+ * big enough
+ */
+ frame_length_size = quic_int_getsize(b_data(&headers_buf));
+ res->head += 4 - frame_length_size;
+ b_putchr(res, 0x01); /* h3 HEADERS frame type */
+ if (!b_quic_enc_int(res, b_data(&headers_buf), 0))
+ ABORT_NOW();
+ b_add(res, b_data(&headers_buf));
+
+ ret = 0;
+ blk = htx_get_head_blk(htx);
+ while (blk) {
+ type = htx_get_blk_type(blk);
+ ret += htx_get_blksz(blk);
+ blk = htx_remove_blk(htx, blk);
+ if (type == HTX_BLK_EOH)
+ break;
+ }
+
+ TRACE_LEAVE(H3_EV_TX_HDR, qcs->qcc->conn, qcs);
+ return ret;
+
+ err:
+ TRACE_DEVEL("leaving on error", H3_EV_TX_HDR, qcs->qcc->conn, qcs);
+ return -1;
+}
+
+/* Convert a series of HTX trailer blocks from <htx> buffer into <qcs> buffer
+ * as a H3 HEADERS frame. H3 forbidden trailers are skipped. HTX trailer blocks
+ * are removed from <htx> until EOT is found and itself removed.
+ *
+ * If only a EOT HTX block is present without trailer, no H3 frame is produced.
+ * Caller is responsible to emit an empty QUIC STREAM frame to signal the end
+ * of the stream.
+ *
+ * Returns the size of HTX blocks removed.
+ */
+static int h3_resp_trailers_send(struct qcs *qcs, struct htx *htx)
+{
+ struct h3s *h3s = qcs->ctx;
+ struct h3c *h3c = h3s->h3c;
+ struct buffer headers_buf = BUF_NULL;
+ struct buffer *res;
+ struct http_hdr list[global.tune.max_http_hdr];
+ struct htx_blk *blk;
+ enum htx_blk_type type;
+ char *tail;
+ int ret = 0;
+ int hdr;
+
+ TRACE_ENTER(H3_EV_TX_HDR, qcs->qcc->conn, qcs);
+
+ hdr = 0;
+ for (blk = htx_get_head_blk(htx); blk; blk = htx_get_next_blk(htx, blk)) {
+ type = htx_get_blk_type(blk);
+
+ if (type == HTX_BLK_UNUSED)
+ continue;
+
+ if (type == HTX_BLK_EOT)
+ break;
+
+ if (type == HTX_BLK_TLR) {
+ if (unlikely(hdr >= sizeof(list) / sizeof(list[0]) - 1)) {
+ TRACE_ERROR("too many headers", H3_EV_TX_HDR, qcs->qcc->conn, qcs);
+ h3c->err = H3_INTERNAL_ERROR;
+ goto err;
+ }
+ list[hdr].n = htx_get_blk_name(htx, blk);
+ list[hdr].v = htx_get_blk_value(htx, blk);
+ hdr++;
+ }
+ else {
+ TRACE_ERROR("unexpected HTX block", H3_EV_TX_HDR, qcs->qcc->conn, qcs);
+ h3c->err = H3_INTERNAL_ERROR;
+ goto err;
+ }
+ }
+
+ if (!hdr) {
+ /* No headers encoded here so no need to generate a H3 HEADERS
+ * frame. Mux will send an empty QUIC STREAM frame with FIN.
+ */
+ TRACE_DATA("skipping trailer", H3_EV_TX_HDR, qcs->qcc->conn, qcs);
+ goto end;
+ }
+ list[hdr].n = ist("");
+
+ res = mux_get_buf(qcs);
+ if (b_is_null(res)) {
+ TRACE_ERROR("cannot allocate Tx buffer", H3_EV_TX_HDR, qcs->qcc->conn, qcs);
+ h3c->err = H3_INTERNAL_ERROR;
+ goto err;
+ }
+
+ /* At least 9 bytes to store frame type + length as a varint max size */
+ if (b_room(res) < 9) {
+ qcs->flags |= QC_SF_BLK_MROOM;
+ goto err;
+ }
+
+ /* Force buffer realignment as size required to encode headers is unknown. */
+ if (b_space_wraps(res))
+ b_slow_realign(res, trash.area, b_data(res));
+ /* Start the headers after frame type + length */
+ headers_buf = b_make(b_peek(res, b_data(res) + 9), b_contig_space(res) - 9, 0, 0);
+
+ if (qpack_encode_field_section_line(&headers_buf)) {
+ qcs->flags |= QC_SF_BLK_MROOM;
+ goto err;
+ }
+
+ tail = b_tail(&headers_buf);
+ for (hdr = 0; hdr < sizeof(list) / sizeof(list[0]); ++hdr) {
+ if (isteq(list[hdr].n, ist("")))
+ break;
+
+ /* forbidden HTTP/3 headers, cf h3_resp_headers_send() */
+ if (isteq(list[hdr].n, ist("host")) ||
+ isteq(list[hdr].n, ist("content-length")) ||
+ isteq(list[hdr].n, ist("connection")) ||
+ isteq(list[hdr].n, ist("proxy-connection")) ||
+ isteq(list[hdr].n, ist("keep-alive")) ||
+ isteq(list[hdr].n, ist("te")) ||
+ isteq(list[hdr].n, ist("transfer-encoding"))) {
+ continue;
+ }
+
+ if (qpack_encode_header(&headers_buf, list[hdr].n, list[hdr].v)) {
+ qcs->flags |= QC_SF_BLK_MROOM;
+ goto err;
+ }
+ }
+
+ /* Check that at least one header was encoded in buffer. */
+ if (b_tail(&headers_buf) == tail) {
+ /* No headers encoded here so no need to generate a H3 HEADERS
+ * frame. Mux will send an empty QUIC STREAM frame with FIN.
+ */
+ TRACE_DATA("skipping trailer", H3_EV_TX_HDR, qcs->qcc->conn, qcs);
+ goto end;
+ }
+
+ /* Now that all headers are encoded, we are certain that res buffer is
+ * big enough.
+ */
+ b_putchr(res, 0x01); /* h3 HEADERS frame type */
+ if (!b_quic_enc_int(res, b_data(&headers_buf), 8))
+ ABORT_NOW();
+ b_add(res, b_data(&headers_buf));
+
+ end:
+ ret = 0;
+ blk = htx_get_head_blk(htx);
+ while (blk) {
+ type = htx_get_blk_type(blk);
+ ret += htx_get_blksz(blk);
+ blk = htx_remove_blk(htx, blk);
+ if (type == HTX_BLK_EOT)
+ break;
+ }
+
+ TRACE_LEAVE(H3_EV_TX_HDR, qcs->qcc->conn, qcs);
+ return ret;
+
+ err:
+ TRACE_DEVEL("leaving on error", H3_EV_TX_HDR, qcs->qcc->conn, qcs);
+ return -1;
+}
+
+/* Returns the total of bytes sent. This corresponds to the
+ * total bytes of HTX block removed. A negative error code is returned in case
+ * of a fatal error which should caused a connection closure.
+ */
+static int h3_resp_data_send(struct qcs *qcs, struct buffer *buf, size_t count)
+{
+ struct htx *htx;
+ struct h3s *h3s = qcs->ctx;
+ struct h3c *h3c = h3s->h3c;
+ struct buffer outbuf;
+ struct buffer *res;
+ size_t total = 0;
+ int bsize, fsize, hsize;
+ struct htx_blk *blk;
+ enum htx_blk_type type;
+
+ TRACE_ENTER(H3_EV_TX_DATA, qcs->qcc->conn, qcs);
+
+ htx = htx_from_buf(buf);
+
+ new_frame:
+ if (!count || htx_is_empty(htx))
+ goto end;
+
+ blk = htx_get_head_blk(htx);
+ type = htx_get_blk_type(blk);
+ fsize = bsize = htx_get_blksz(blk);
+
+ /* h3 DATA headers : 1-byte frame type + varint frame length */
+ hsize = 1 + QUIC_VARINT_MAX_SIZE;
+
+ if (type != HTX_BLK_DATA)
+ goto end;
+
+ res = mux_get_buf(qcs);
+ if (b_is_null(res)) {
+ TRACE_ERROR("cannot allocate Tx buffer", H3_EV_TX_DATA, qcs->qcc->conn, qcs);
+ h3c->err = H3_INTERNAL_ERROR;
+ goto err;
+ }
+
+ if (unlikely(fsize == count &&
+ !b_data(res) &&
+ htx_nbblks(htx) == 1 && type == HTX_BLK_DATA)) {
+ void *old_area = res->area;
+
+ /* map an H2 frame to the HTX block so that we can put the
+ * frame header there.
+ */
+ *res = b_make(buf->area, buf->size, sizeof(struct htx) + blk->addr - hsize, fsize + hsize);
+ outbuf = b_make(b_head(res), hsize, 0, 0);
+ b_putchr(&outbuf, 0x00); /* h3 frame type = DATA */
+ b_quic_enc_int(&outbuf, fsize, QUIC_VARINT_MAX_SIZE); /* h3 frame length */
+
+ /* and exchange with our old area */
+ buf->area = old_area;
+ buf->data = buf->head = 0;
+ total += fsize;
+ fsize = 0;
+ goto end;
+ }
+
+ if (fsize > count)
+ fsize = count;
+
+ while (1) {
+ b_reset(&outbuf);
+ outbuf = b_make(b_tail(res), b_contig_space(res), 0, 0);
+ if (b_size(&outbuf) > hsize || !b_space_wraps(res))
+ break;
+ b_slow_realign(res, trash.area, b_data(res));
+ }
+
+ /* Not enough room for headers and at least one data byte, block the
+ * stream. It is expected that the stream connector layer will subscribe
+ * on SEND.
+ */
+ if (b_size(&outbuf) <= hsize) {
+ TRACE_STATE("not enough room for data frame", H3_EV_TX_DATA, qcs->qcc->conn, qcs);
+ qcs->flags |= QC_SF_BLK_MROOM;
+ goto end;
+ }
+
+ if (b_size(&outbuf) < hsize + fsize)
+ fsize = b_size(&outbuf) - hsize;
+ BUG_ON(fsize <= 0);
+
+ b_putchr(&outbuf, 0x00); /* h3 frame type = DATA */
+ b_quic_enc_int(&outbuf, fsize, 0); /* h3 frame length */
+
+ b_putblk(&outbuf, htx_get_blk_ptr(htx, blk), fsize);
+ total += fsize;
+ count -= fsize;
+
+ if (fsize == bsize)
+ htx_remove_blk(htx, blk);
+ else
+ htx_cut_data_blk(htx, blk, fsize);
+
+ /* commit the buffer */
+ b_add(res, b_data(&outbuf));
+ goto new_frame;
+
+ end:
+ TRACE_LEAVE(H3_EV_TX_DATA, qcs->qcc->conn, qcs);
+ return total;
+
+ err:
+ BUG_ON(total); /* Must return HTX removed size if at least on frame encoded. */
+ TRACE_DEVEL("leaving on error", H3_EV_TX_DATA, qcs->qcc->conn, qcs);
+ return -1;
+}
+
+static size_t h3_snd_buf(struct qcs *qcs, struct buffer *buf, size_t count)
+{
+ struct h3s *h3s = qcs->ctx;
+ struct h3c *h3c = h3s->h3c;
+ size_t total = 0;
+ enum htx_blk_type btype;
+ struct htx *htx;
+ struct htx_blk *blk;
+ uint32_t bsize;
+ int32_t idx;
+ int ret = 0;
+
+ TRACE_ENTER(H3_EV_STRM_SEND, qcs->qcc->conn, qcs);
+
+ htx = htx_from_buf(buf);
+
+ if (htx->extra && htx->extra == HTX_UNKOWN_PAYLOAD_LENGTH)
+ qcs->flags |= QC_SF_UNKNOWN_PL_LENGTH;
+
+ while (count && !htx_is_empty(htx) &&
+ !(qcs->flags & QC_SF_BLK_MROOM) && !h3c->err) {
+
+ idx = htx_get_head(htx);
+ blk = htx_get_blk(htx, idx);
+ btype = htx_get_blk_type(blk);
+ bsize = htx_get_blksz(blk);
+
+ /* Not implemented : QUIC on backend side */
+ BUG_ON(btype == HTX_BLK_REQ_SL);
+
+ switch (btype) {
+ case HTX_BLK_RES_SL:
+ /* start-line -> HEADERS h3 frame */
+ ret = h3_resp_headers_send(qcs, htx);
+ if (ret > 0) {
+ total += ret;
+ count -= ret;
+ if (ret < bsize)
+ goto out;
+ }
+ break;
+
+ case HTX_BLK_DATA:
+ ret = h3_resp_data_send(qcs, buf, count);
+ if (ret > 0) {
+ htx = htx_from_buf(buf);
+ total += ret;
+ count -= ret;
+ if (ret < bsize)
+ goto out;
+ }
+ break;
+
+ case HTX_BLK_TLR:
+ case HTX_BLK_EOT:
+ ret = h3_resp_trailers_send(qcs, htx);
+ if (ret > 0) {
+ total += ret;
+ count -= ret;
+ if (ret < bsize)
+ goto out;
+ }
+ break;
+
+ default:
+ htx_remove_blk(htx, blk);
+ total += bsize;
+ count -= bsize;
+ break;
+ }
+
+ /* If an error occured, either buffer space or connection error
+ * must be set to break current loop.
+ */
+ BUG_ON(ret < 0 && !(qcs->flags & QC_SF_BLK_MROOM) && !h3c->err);
+ }
+
+ /* Interrupt sending on connection error. */
+ if (unlikely(h3c->err)) {
+ qcc_set_error(qcs->qcc, h3c->err, 1);
+ goto out;
+ }
+
+ /* RFC 9114 4.1. HTTP Message Framing
+ *
+ * A server can send a complete response prior to the client sending an
+ * entire request if the response does not depend on any portion of the
+ * request that has not been sent and received. When the server does not
+ * need to receive the remainder of the request, it MAY abort reading
+ * the request stream, send a complete response, and cleanly close the
+ * sending part of the stream. The error code H3_NO_ERROR SHOULD be used
+ * when requesting that the client stop sending on the request stream.
+ * Clients MUST NOT discard complete responses as a result of having
+ * their request terminated abruptly, though clients can always discard
+ * responses at their discretion for other reasons. If the server sends
+ * a partial or complete response but does not abort reading the
+ * request, clients SHOULD continue sending the content of the request
+ * and close the stream normally.
+ */
+ if (unlikely((htx->flags & HTX_FL_EOM) && htx_is_empty(htx)) &&
+ !qcs_is_close_remote(qcs)) {
+ /* Generate a STOP_SENDING if full response transferred before
+ * receiving the full request.
+ */
+ qcs->err = H3_NO_ERROR;
+ qcc_abort_stream_read(qcs);
+ }
+
+ out:
+ htx_to_buf(htx, buf);
+
+ TRACE_LEAVE(H3_EV_STRM_SEND, qcs->qcc->conn, qcs);
+ return total;
+}
+
+static size_t h3_nego_ff(struct qcs *qcs, size_t count)
+{
+ struct buffer *res;
+ int hsize;
+ size_t sz, ret = 0;
+
+ TRACE_ENTER(H3_EV_STRM_SEND, qcs->qcc->conn, qcs);
+
+ res = mux_get_buf(qcs);
+ if (b_is_null(res)) {
+ qcs->sd->iobuf.flags |= IOBUF_FL_NO_FF;
+ goto end;
+ }
+
+ /* h3 DATA headers : 1-byte frame type + varint frame length */
+ hsize = 1 + QUIC_VARINT_MAX_SIZE;
+ while (1) {
+ if (b_contig_space(res) >= hsize || !b_space_wraps(res))
+ break;
+ b_slow_realign(res, trash.area, b_data(res));
+ }
+
+ /* Not enough room for headers and at least one data byte, block the
+ * stream. It is expected that the stream connector layer will subscribe
+ * on SEND.
+ */
+ if (b_contig_space(res) <= hsize) {
+ qcs->flags |= QC_SF_BLK_MROOM;
+ qcs->sd->iobuf.flags |= IOBUF_FL_FF_BLOCKED;
+ goto end;
+ }
+
+ /* Cannot forward more than available room in output buffer */
+ sz = b_contig_space(res) - hsize;
+ if (count > sz)
+ count = sz;
+
+ qcs->sd->iobuf.buf = res;
+ qcs->sd->iobuf.offset = hsize;
+ qcs->sd->iobuf.data = 0;
+
+ ret = count;
+ end:
+ TRACE_LEAVE(H3_EV_STRM_SEND, qcs->qcc->conn, qcs);
+ return ret;
+}
+
+static size_t h3_done_ff(struct qcs *qcs)
+{
+ size_t total = qcs->sd->iobuf.data;
+ TRACE_ENTER(H3_EV_STRM_SEND, qcs->qcc->conn, qcs);
+
+ h3_debug_printf(stderr, "%s\n", __func__);
+
+ if (qcs->sd->iobuf.data) {
+ b_sub(qcs->sd->iobuf.buf, qcs->sd->iobuf.data);
+ b_putchr(qcs->sd->iobuf.buf, 0x00); /* h3 frame type = DATA */
+ b_quic_enc_int(qcs->sd->iobuf.buf, qcs->sd->iobuf.data, QUIC_VARINT_MAX_SIZE); /* h3 frame length */
+ b_add(qcs->sd->iobuf.buf, qcs->sd->iobuf.data);
+ }
+
+ qcs->sd->iobuf.buf = NULL;
+ qcs->sd->iobuf.offset = 0;
+ qcs->sd->iobuf.data = 0;
+
+ TRACE_LEAVE(H3_EV_STRM_SEND, qcs->qcc->conn, qcs);
+ return total;
+}
+
+/* Notify about a closure on <qcs> stream requested by the remote peer.
+ *
+ * Stream channel <side> is explained relative to our endpoint : WR for
+ * STOP_SENDING or RD for RESET_STREAM reception. Callback decode_qcs() is used
+ * instead for closure performed using a STREAM frame with FIN bit.
+ *
+ * The main objective of this function is to check if closure is valid
+ * according to HTTP/3 specification.
+ *
+ * Returns 0 on success else non-zero. A CONNECTION_CLOSE is generated on
+ * error.
+ */
+static int h3_close(struct qcs *qcs, enum qcc_app_ops_close_side side)
+{
+ struct h3s *h3s = qcs->ctx;
+ struct h3c *h3c = h3s->h3c;;
+
+ /* RFC 9114 6.2.1. Control Streams
+ *
+ * The sender
+ * MUST NOT close the control stream, and the receiver MUST NOT
+ * request that the sender close the control stream. If either
+ * control stream is closed at any point, this MUST be treated
+ * as a connection error of type H3_CLOSED_CRITICAL_STREAM.
+ */
+ if (qcs == h3c->ctrl_strm || h3s->type == H3S_T_CTRL) {
+ TRACE_ERROR("closure detected on control stream", H3_EV_H3S_END, qcs->qcc->conn, qcs);
+ qcc_set_error(qcs->qcc, H3_CLOSED_CRITICAL_STREAM, 1);
+ return 1;
+ }
+
+ return 0;
+}
+
+static int h3_attach(struct qcs *qcs, void *conn_ctx)
+{
+ struct h3c *h3c = conn_ctx;
+ struct h3s *h3s = NULL;
+
+ TRACE_ENTER(H3_EV_H3S_NEW, qcs->qcc->conn, qcs);
+
+ /* RFC 9114 5.2. Connection Shutdown
+ *
+ * Upon sending
+ * a GOAWAY frame, the endpoint SHOULD explicitly cancel (see
+ * Sections 4.1.1 and 7.2.3) any requests or pushes that have
+ * identifiers greater than or equal to the one indicated, in
+ * order to clean up transport state for the affected streams.
+ * The endpoint SHOULD continue to do so as more requests or
+ * pushes arrive.
+ */
+ if (h3c->flags & H3_CF_GOAWAY_SENT && qcs->id >= h3c->id_goaway &&
+ quic_stream_is_bidi(qcs->id)) {
+ /* Reject request and do not allocate a h3s context.
+ * TODO support push uni-stream rejection.
+ */
+ TRACE_STATE("reject stream higher than goaway", H3_EV_H3S_NEW, qcs->qcc->conn, qcs);
+ qcc_abort_stream_read(qcs);
+ qcc_reset_stream(qcs, H3_REQUEST_REJECTED);
+ goto done;
+ }
+
+ h3s = pool_alloc(pool_head_h3s);
+ if (!h3s) {
+ TRACE_ERROR("h3s allocation failure", H3_EV_H3S_NEW, qcs->qcc->conn, qcs);
+ goto err;
+ }
+
+ qcs->ctx = h3s;
+ h3s->h3c = conn_ctx;
+
+ h3s->demux_frame_len = 0;
+ h3s->demux_frame_type = H3_FT_UNINIT;
+ h3s->body_len = 0;
+ h3s->data_len = 0;
+ h3s->flags = 0;
+ h3s->err = 0;
+
+ if (quic_stream_is_bidi(qcs->id)) {
+ h3s->type = H3S_T_REQ;
+ h3s->st_req = H3S_ST_REQ_BEFORE;
+ qcs_wait_http_req(qcs);
+ }
+ else {
+ /* stream type must be decoded for unidirectional streams */
+ h3s->type = H3S_T_UNKNOWN;
+ }
+
+ done:
+ TRACE_LEAVE(H3_EV_H3S_NEW, qcs->qcc->conn, qcs);
+ return 0;
+
+ err:
+ TRACE_DEVEL("leaving in error", H3_EV_H3S_NEW, qcs->qcc->conn, qcs);
+ return 1;
+}
+
+static void h3_detach(struct qcs *qcs)
+{
+ struct h3s *h3s = qcs->ctx;
+
+ TRACE_ENTER(H3_EV_H3S_END, qcs->qcc->conn, qcs);
+
+ pool_free(pool_head_h3s, h3s);
+ qcs->ctx = NULL;
+
+ TRACE_LEAVE(H3_EV_H3S_END, qcs->qcc->conn, qcs);
+}
+
+/* Initialize H3 control stream and prepare SETTINGS emission.
+ *
+ * Returns 0 on success else non-zero.
+ */
+static int h3_finalize(void *ctx)
+{
+ struct h3c *h3c = ctx;
+ struct qcc *qcc = h3c->qcc;
+ struct qcs *qcs;
+
+ TRACE_ENTER(H3_EV_H3C_NEW, qcc->conn);
+
+ qcs = qcc_init_stream_local(h3c->qcc, 0);
+ if (!qcs) {
+ TRACE_ERROR("cannot init control stream", H3_EV_H3C_NEW, qcc->conn);
+ goto err;
+ }
+
+ h3c->ctrl_strm = qcs;
+
+ if (h3_control_send(qcs, h3c) < 0)
+ goto err;
+
+ TRACE_LEAVE(H3_EV_H3C_NEW, qcc->conn);
+ return 0;
+
+ err:
+ TRACE_DEVEL("leaving on error", H3_EV_H3C_NEW, qcc->conn);
+ return 1;
+}
+
+/* Generate a GOAWAY frame for <h3c> connection on the control stream.
+ *
+ * Returns 0 on success else non-zero.
+ */
+static int h3_send_goaway(struct h3c *h3c)
+{
+ struct qcs *qcs = h3c->ctrl_strm;
+ struct buffer pos, *res;
+ unsigned char data[3 * QUIC_VARINT_MAX_SIZE];
+ size_t frm_len = quic_int_getsize(h3c->id_goaway);
+
+ TRACE_ENTER(H3_EV_H3C_END, h3c->qcc->conn);
+
+ if (!qcs) {
+ TRACE_ERROR("control stream not initialized", H3_EV_H3C_END, h3c->qcc->conn);
+ goto err;
+ }
+
+ pos = b_make((char *)data, sizeof(data), 0, 0);
+
+ b_quic_enc_int(&pos, H3_FT_GOAWAY, 0);
+ b_quic_enc_int(&pos, frm_len, 0);
+ b_quic_enc_int(&pos, h3c->id_goaway, 0);
+
+ res = mux_get_buf(qcs);
+ if (b_is_null(res) || b_room(res) < b_data(&pos)) {
+ /* Do not try forcefully to emit GOAWAY if no space left. */
+ TRACE_ERROR("cannot send GOAWAY", H3_EV_H3C_END, h3c->qcc->conn, qcs);
+ goto err;
+ }
+
+ b_force_xfer(res, &pos, b_data(&pos));
+ qcc_send_stream(qcs, 1);
+
+ h3c->flags |= H3_CF_GOAWAY_SENT;
+ TRACE_LEAVE(H3_EV_H3C_END, h3c->qcc->conn);
+ return 0;
+
+ err:
+ /* Consider GOAWAY as sent even if not really the case. This will
+ * block future stream opening using H3_REQUEST_REJECTED reset.
+ */
+ h3c->flags |= H3_CF_GOAWAY_SENT;
+ TRACE_DEVEL("leaving in error", H3_EV_H3C_END, h3c->qcc->conn);
+ return 1;
+}
+
+/* Initialize the HTTP/3 context for <qcc> mux.
+ * Return 1 if succeeded, 0 if not.
+ */
+static int h3_init(struct qcc *qcc)
+{
+ struct h3c *h3c;
+ struct quic_conn *qc = qcc->conn->handle.qc;
+
+ TRACE_ENTER(H3_EV_H3C_NEW, qcc->conn);
+
+ h3c = pool_alloc(pool_head_h3c);
+ if (!h3c) {
+ TRACE_ERROR("cannot allocate h3c", H3_EV_H3C_NEW, qcc->conn);
+ goto fail_no_h3;
+ }
+
+ h3c->qcc = qcc;
+ h3c->ctrl_strm = NULL;
+ h3c->err = 0;
+ h3c->flags = 0;
+ h3c->id_goaway = 0;
+
+ qcc->ctx = h3c;
+ /* TODO cleanup only ref to quic_conn */
+ h3c->prx_counters =
+ EXTRA_COUNTERS_GET(qc->li->bind_conf->frontend->extra_counters_fe,
+ &h3_stats_module);
+ LIST_INIT(&h3c->buf_wait.list);
+
+ TRACE_LEAVE(H3_EV_H3C_NEW, qcc->conn);
+ return 1;
+
+ fail_no_h3:
+ TRACE_DEVEL("leaving on error", H3_EV_H3C_NEW, qcc->conn);
+ return 0;
+}
+
+/* Send a HTTP/3 GOAWAY followed by a CONNECTION_CLOSE_APP. */
+static void h3_shutdown(void *ctx)
+{
+ struct h3c *h3c = ctx;
+
+ TRACE_ENTER(H3_EV_H3C_END, h3c->qcc->conn);
+
+ /* RFC 9114 5.2. Connection Shutdown
+ *
+ * Even when a connection is not idle, either endpoint can decide to
+ * stop using the connection and initiate a graceful connection close.
+ * Endpoints initiate the graceful shutdown of an HTTP/3 connection by
+ * sending a GOAWAY frame.
+ */
+ h3_send_goaway(h3c);
+
+ /* RFC 9114 5.2. Connection Shutdown
+ *
+ * An endpoint that completes a
+ * graceful shutdown SHOULD use the H3_NO_ERROR error code when closing
+ * the connection.
+ */
+ h3c->qcc->err = quic_err_app(H3_NO_ERROR);
+
+ TRACE_LEAVE(H3_EV_H3C_END, h3c->qcc->conn);
+}
+
+static void h3_release(void *ctx)
+{
+ struct h3c *h3c = ctx;
+ pool_free(pool_head_h3c, h3c);
+}
+
+/* Increment the h3 error code counters for <error_code> value */
+static void h3_stats_inc_err_cnt(void *ctx, int err_code)
+{
+ struct h3c *h3c = ctx;
+
+ h3_inc_err_cnt(h3c->prx_counters, err_code);
+}
+
+static inline const char *h3_ft_str(uint64_t type)
+{
+ switch (type) {
+ case H3_FT_DATA: return "DATA";
+ case H3_FT_HEADERS: return "HEADERS";
+ case H3_FT_SETTINGS: return "SETTINGS";
+ case H3_FT_PUSH_PROMISE: return "PUSH_PROMISE";
+ case H3_FT_MAX_PUSH_ID: return "MAX_PUSH_ID";
+ case H3_FT_CANCEL_PUSH: return "CANCEL_PUSH";
+ case H3_FT_GOAWAY: return "GOAWAY";
+ default: return "_UNKNOWN_";
+ }
+}
+
+/* h3 trace handler */
+static void h3_trace(enum trace_level level, uint64_t mask,
+ const struct trace_source *src,
+ const struct ist where, const struct ist func,
+ const void *a1, const void *a2, const void *a3, const void *a4)
+{
+ const struct connection *conn = a1;
+ const struct qcc *qcc = conn ? conn->ctx : NULL;
+ const struct qcs *qcs = a2;
+ const struct h3s *h3s = qcs ? qcs->ctx : NULL;
+
+ if (!qcc)
+ return;
+
+ if (src->verbosity > H3_VERB_CLEAN) {
+ chunk_appendf(&trace_buf, " : qcc=%p(F)", qcc);
+ if (qcc->conn->handle.qc)
+ chunk_appendf(&trace_buf, " qc=%p", qcc->conn->handle.qc);
+
+ if (qcs)
+ chunk_appendf(&trace_buf, " qcs=%p(%llu)", qcs, (ull)qcs->id);
+
+ if (h3s && h3s->demux_frame_type != H3_FT_UNINIT) {
+ chunk_appendf(&trace_buf, " h3s.dem=%s/%llu",
+ h3_ft_str(h3s->demux_frame_type), (ull)h3s->demux_frame_len);
+ }
+ }
+}
+
+/* HTTP/3 application layer operations */
+const struct qcc_app_ops h3_ops = {
+ .init = h3_init,
+ .attach = h3_attach,
+ .decode_qcs = h3_decode_qcs,
+ .snd_buf = h3_snd_buf,
+ .nego_ff = h3_nego_ff,
+ .done_ff = h3_done_ff,
+ .close = h3_close,
+ .detach = h3_detach,
+ .finalize = h3_finalize,
+ .shutdown = h3_shutdown,
+ .inc_err_cnt = h3_stats_inc_err_cnt,
+ .release = h3_release,
+};
diff --git a/src/h3_stats.c b/src/h3_stats.c
new file mode 100644
index 0000000..c96093f
--- /dev/null
+++ b/src/h3_stats.c
@@ -0,0 +1,276 @@
+#include <haproxy/h3.h>
+#include <haproxy/stats.h>
+
+enum {
+ /* h3 frame type counters */
+ H3_ST_DATA,
+ H3_ST_HEADERS,
+ H3_ST_CANCEL_PUSH,
+ H3_ST_PUSH_PROMISE,
+ H3_ST_MAX_PUSH_ID,
+ H3_ST_GOAWAY,
+ H3_ST_SETTINGS,
+ /* h3 error counters */
+ H3_ST_H3_NO_ERROR,
+ H3_ST_H3_GENERAL_PROTOCOL_ERROR,
+ H3_ST_H3_INTERNAL_ERROR,
+ H3_ST_H3_STREAM_CREATION_ERROR,
+ H3_ST_H3_CLOSED_CRITICAL_STREAM,
+ H3_ST_H3_FRAME_UNEXPECTED,
+ H3_ST_H3_FRAME_ERROR,
+ H3_ST_H3_EXCESSIVE_LOAD,
+ H3_ST_H3_ID_ERROR,
+ H3_ST_H3_SETTINGS_ERROR,
+ H3_ST_H3_MISSING_SETTINGS,
+ H3_ST_H3_REQUEST_REJECTED,
+ H3_ST_H3_REQUEST_CANCELLED,
+ H3_ST_H3_REQUEST_INCOMPLETE,
+ H3_ST_H3_MESSAGE_ERROR,
+ H3_ST_H3_CONNECT_ERROR,
+ H3_ST_H3_VERSION_FALLBACK,
+ /* QPACK error counters */
+ H3_ST_QPACK_DECOMPRESSION_FAILED,
+ H3_ST_QPACK_ENCODER_STREAM_ERROR,
+ H3_ST_QPACK_DECODER_STREAM_ERROR,
+ H3_STATS_COUNT /* must be the last */
+};
+
+static struct name_desc h3_stats[] = {
+ /* h3 frame type counters */
+ [H3_ST_DATA] = { .name = "h3_data",
+ .desc = "Total number of DATA frames received" },
+ [H3_ST_HEADERS] = { .name = "h3_headers",
+ .desc = "Total number of HEADERS frames received" },
+ [H3_ST_CANCEL_PUSH] = { .name = "h3_cancel_push",
+ .desc = "Total number of CANCEL_PUSH frames received" },
+ [H3_ST_PUSH_PROMISE] = { .name = "h3_push_promise",
+ .desc = "Total number of PUSH_PROMISE frames received" },
+ [H3_ST_MAX_PUSH_ID] = { .name = "h3_max_push_id",
+ .desc = "Total number of MAX_PUSH_ID frames received" },
+ [H3_ST_GOAWAY] = { .name = "h3_goaway",
+ .desc = "Total number of GOAWAY frames received" },
+ [H3_ST_SETTINGS] = { .name = "h3_settings",
+ .desc = "Total number of SETTINGS frames received" },
+ /* h3 error counters */
+ [H3_ST_H3_NO_ERROR] = { .name = "h3_no_error",
+ .desc = "Total number of H3_NO_ERROR errors received" },
+ [H3_ST_H3_GENERAL_PROTOCOL_ERROR] = { .name = "h3_general_protocol_error",
+ .desc = "Total number of H3_GENERAL_PROTOCOL_ERROR errors received" },
+ [H3_ST_H3_INTERNAL_ERROR] = { .name = "h3_internal_error",
+ .desc = "Total number of H3_INTERNAL_ERROR errors received" },
+ [H3_ST_H3_STREAM_CREATION_ERROR] = { .name = "h3_stream_creation_error",
+ .desc = "Total number of H3_STREAM_CREATION_ERROR errors received" },
+ [H3_ST_H3_CLOSED_CRITICAL_STREAM] = { .name = "h3_closed_critical_stream",
+ .desc = "Total number of H3_CLOSED_CRITICAL_STREAM errors received" },
+ [H3_ST_H3_FRAME_UNEXPECTED] = { .name = "h3_frame_unexpected",
+ .desc = "Total number of H3_FRAME_UNEXPECTED errors received" },
+ [H3_ST_H3_FRAME_ERROR] = { .name = "h3_frame_error",
+ .desc = "Total number of H3_FRAME_ERROR errors received" },
+ [H3_ST_H3_EXCESSIVE_LOAD] = { .name = "h3_excessive_load",
+ .desc = "Total number of H3_EXCESSIVE_LOAD errors received" },
+ [H3_ST_H3_ID_ERROR] = { .name = "h3_id_error",
+ .desc = "Total number of H3_ID_ERROR errors received" },
+ [H3_ST_H3_SETTINGS_ERROR] = { .name = "h3_settings_error",
+ .desc = "Total number of H3_SETTINGS_ERROR errors received" },
+ [H3_ST_H3_MISSING_SETTINGS] = { .name = "h3_missing_settings",
+ .desc = "Total number of H3_MISSING_SETTINGS errors received" },
+ [H3_ST_H3_REQUEST_REJECTED] = { .name = "h3_request_rejected",
+ .desc = "Total number of H3_REQUEST_REJECTED errors received" },
+ [H3_ST_H3_REQUEST_CANCELLED] = { .name = "h3_request_cancelled",
+ .desc = "Total number of H3_REQUEST_CANCELLED errors received" },
+ [H3_ST_H3_REQUEST_INCOMPLETE] = { .name = "h3_request_incomplete",
+ .desc = "Total number of H3_REQUEST_INCOMPLETE errors received" },
+ [H3_ST_H3_MESSAGE_ERROR] = { .name = "h3_message_error",
+ .desc = "Total number of H3_MESSAGE_ERROR errors received" },
+ [H3_ST_H3_CONNECT_ERROR] = { .name = "h3_connect_error",
+ .desc = "Total number of H3_CONNECT_ERROR errors received" },
+ [H3_ST_H3_VERSION_FALLBACK] = { .name = "h3_version_fallback",
+ .desc = "Total number of H3_VERSION_FALLBACK errors received" },
+ /* QPACK error counters */
+ [H3_ST_QPACK_DECOMPRESSION_FAILED] = { .name = "pack_decompression_failed",
+ .desc = "Total number of QPACK_DECOMPRESSION_FAILED errors received" },
+ [H3_ST_QPACK_ENCODER_STREAM_ERROR] = { .name = "qpack_encoder_stream_error",
+ .desc = "Total number of QPACK_ENCODER_STREAM_ERROR errors received" },
+ [H3_ST_QPACK_DECODER_STREAM_ERROR] = { .name = "qpack_decoder_stream_error",
+ .desc = "Total number of QPACK_DECODER_STREAM_ERROR errors received" },
+};
+
+static struct h3_counters {
+ /* h3 frame type counters */
+ long long h3_data; /* total number of DATA frames received */
+ long long h3_headers; /* total number of HEADERS frames received */
+ long long h3_cancel_push; /* total number of CANCEL_PUSH frames received */
+ long long h3_push_promise; /* total number of PUSH_PROMISE frames received */
+ long long h3_max_push_id; /* total number of MAX_PUSH_ID frames received */
+ long long h3_goaway; /* total number of GOAWAY frames received */
+ long long h3_settings; /* total number of SETTINGS frames received */
+ /* h3 error counters */
+ long long h3_no_error; /* total number of H3_NO_ERROR errors received */
+ long long h3_general_protocol_error; /* total number of H3_GENERAL_PROTOCOL_ERROR errors received */
+ long long h3_internal_error; /* total number of H3_INTERNAL_ERROR errors received */
+ long long h3_stream_creation_error; /* total number of H3_STREAM_CREATION_ERROR errors received */
+ long long h3_closed_critical_stream; /* total number of H3_CLOSED_CRITICAL_STREAM errors received */
+ long long h3_frame_unexpected; /* total number of H3_FRAME_UNEXPECTED errors received */
+ long long h3_frame_error; /* total number of H3_FRAME_ERROR errors received */
+ long long h3_excessive_load; /* total number of H3_EXCESSIVE_LOAD errors received */
+ long long h3_id_error; /* total number of H3_ID_ERROR errors received */
+ long long h3_settings_error; /* total number of H3_SETTINGS_ERROR errors received */
+ long long h3_missing_settings; /* total number of H3_MISSING_SETTINGS errors received */
+ long long h3_request_rejected; /* total number of H3_REQUEST_REJECTED errors received */
+ long long h3_request_cancelled; /* total number of H3_REQUEST_CANCELLED errors received */
+ long long h3_request_incomplete; /* total number of H3_REQUEST_INCOMPLETE errors received */
+ long long h3_message_error; /* total number of H3_MESSAGE_ERROR errors received */
+ long long h3_connect_error; /* total number of H3_CONNECT_ERROR errors received */
+ long long h3_version_fallback; /* total number of H3_VERSION_FALLBACK errors received */
+ /* QPACK error counters */
+ long long qpack_decompression_failed; /* total number of QPACK_DECOMPRESSION_FAILED errors received */
+ long long qpack_encoder_stream_error; /* total number of QPACK_ENCODER_STREAM_ERROR errors received */
+ long long qpack_decoder_stream_error; /* total number of QPACK_DECODER_STREAM_ERROR errors received */
+} h3_counters;
+
+static void h3_fill_stats(void *data, struct field *stats)
+{
+ struct h3_counters *counters = data;
+
+ /* h3 frame type counters */
+ stats[H3_ST_DATA] = mkf_u64(FN_COUNTER, counters->h3_data);
+ stats[H3_ST_HEADERS] = mkf_u64(FN_COUNTER, counters->h3_headers);
+ stats[H3_ST_CANCEL_PUSH] = mkf_u64(FN_COUNTER, counters->h3_cancel_push);
+ stats[H3_ST_PUSH_PROMISE] = mkf_u64(FN_COUNTER, counters->h3_push_promise);
+ stats[H3_ST_MAX_PUSH_ID] = mkf_u64(FN_COUNTER, counters->h3_max_push_id);
+ stats[H3_ST_GOAWAY] = mkf_u64(FN_COUNTER, counters->h3_goaway);
+ stats[H3_ST_SETTINGS] = mkf_u64(FN_COUNTER, counters->h3_settings);
+ /* h3 error counters */
+ stats[H3_ST_H3_NO_ERROR] = mkf_u64(FN_COUNTER, counters->h3_no_error);
+ stats[H3_ST_H3_GENERAL_PROTOCOL_ERROR] = mkf_u64(FN_COUNTER, counters->h3_general_protocol_error);
+ stats[H3_ST_H3_INTERNAL_ERROR] = mkf_u64(FN_COUNTER, counters->h3_internal_error);
+ stats[H3_ST_H3_STREAM_CREATION_ERROR] = mkf_u64(FN_COUNTER, counters->h3_stream_creation_error);
+ stats[H3_ST_H3_CLOSED_CRITICAL_STREAM] = mkf_u64(FN_COUNTER, counters->h3_closed_critical_stream);
+ stats[H3_ST_H3_FRAME_UNEXPECTED] = mkf_u64(FN_COUNTER, counters->h3_frame_unexpected);
+ stats[H3_ST_H3_FRAME_ERROR] = mkf_u64(FN_COUNTER, counters->h3_frame_error);
+ stats[H3_ST_H3_EXCESSIVE_LOAD] = mkf_u64(FN_COUNTER, counters->h3_excessive_load);
+ stats[H3_ST_H3_ID_ERROR] = mkf_u64(FN_COUNTER, counters->h3_id_error);
+ stats[H3_ST_H3_SETTINGS_ERROR] = mkf_u64(FN_COUNTER, counters->h3_settings_error);
+ stats[H3_ST_H3_MISSING_SETTINGS] = mkf_u64(FN_COUNTER, counters->h3_missing_settings);
+ stats[H3_ST_H3_REQUEST_REJECTED] = mkf_u64(FN_COUNTER, counters->h3_request_rejected);
+ stats[H3_ST_H3_REQUEST_CANCELLED] = mkf_u64(FN_COUNTER, counters->h3_request_cancelled);
+ stats[H3_ST_H3_REQUEST_INCOMPLETE] = mkf_u64(FN_COUNTER, counters->h3_request_incomplete);
+ stats[H3_ST_H3_MESSAGE_ERROR] = mkf_u64(FN_COUNTER, counters->h3_message_error);
+ stats[H3_ST_H3_CONNECT_ERROR] = mkf_u64(FN_COUNTER, counters->h3_connect_error);
+ stats[H3_ST_H3_VERSION_FALLBACK] = mkf_u64(FN_COUNTER, counters->h3_version_fallback);
+ /* QPACK error counters */
+ stats[H3_ST_QPACK_DECOMPRESSION_FAILED] = mkf_u64(FN_COUNTER, counters->qpack_decompression_failed);
+ stats[H3_ST_QPACK_ENCODER_STREAM_ERROR] = mkf_u64(FN_COUNTER, counters->qpack_encoder_stream_error);
+ stats[H3_ST_QPACK_DECODER_STREAM_ERROR] = mkf_u64(FN_COUNTER, counters->qpack_decoder_stream_error);
+}
+
+struct stats_module h3_stats_module = {
+ .name = "h3",
+ .fill_stats = h3_fill_stats,
+ .stats = h3_stats,
+ .stats_count = H3_STATS_COUNT,
+ .counters = &h3_counters,
+ .counters_size = sizeof(h3_counters),
+ .domain_flags = MK_STATS_PROXY_DOMAIN(STATS_PX_CAP_FE),
+ .clearable = 1,
+};
+
+INITCALL1(STG_REGISTER, stats_register_module, &h3_stats_module);
+
+void h3_inc_err_cnt(struct h3_counters *ctrs, int error_code)
+{
+ switch (error_code) {
+ case H3_NO_ERROR:
+ HA_ATOMIC_INC(&ctrs->h3_no_error);
+ break;
+ case H3_GENERAL_PROTOCOL_ERROR:
+ HA_ATOMIC_INC(&ctrs->h3_general_protocol_error);
+ break;
+ case H3_INTERNAL_ERROR:
+ HA_ATOMIC_INC(&ctrs->h3_internal_error);
+ break;
+ case H3_STREAM_CREATION_ERROR:
+ HA_ATOMIC_INC(&ctrs->h3_stream_creation_error);
+ break;
+ case H3_CLOSED_CRITICAL_STREAM:
+ HA_ATOMIC_INC(&ctrs->h3_closed_critical_stream);
+ break;
+ case H3_FRAME_UNEXPECTED:
+ HA_ATOMIC_INC(&ctrs->h3_frame_unexpected);
+ break;
+ case H3_FRAME_ERROR:
+ HA_ATOMIC_INC(&ctrs->h3_frame_error);
+ break;
+ case H3_EXCESSIVE_LOAD:
+ HA_ATOMIC_INC(&ctrs->h3_excessive_load);
+ break;
+ case H3_ID_ERROR:
+ HA_ATOMIC_INC(&ctrs->h3_id_error);
+ break;
+ case H3_SETTINGS_ERROR:
+ HA_ATOMIC_INC(&ctrs->h3_settings_error);
+ break;
+ case H3_MISSING_SETTINGS:
+ HA_ATOMIC_INC(&ctrs->h3_missing_settings);
+ break;
+ case H3_REQUEST_REJECTED:
+ HA_ATOMIC_INC(&ctrs->h3_request_rejected);
+ break;
+ case H3_REQUEST_CANCELLED:
+ HA_ATOMIC_INC(&ctrs->h3_request_cancelled);
+ break;
+ case H3_REQUEST_INCOMPLETE:
+ HA_ATOMIC_INC(&ctrs->h3_request_incomplete);
+ break;
+ case H3_MESSAGE_ERROR:
+ HA_ATOMIC_INC(&ctrs->h3_message_error);
+ break;
+ case H3_CONNECT_ERROR:
+ HA_ATOMIC_INC(&ctrs->h3_connect_error);
+ break;
+ case H3_VERSION_FALLBACK:
+ HA_ATOMIC_INC(&ctrs->h3_version_fallback);
+ break;
+ case QPACK_DECOMPRESSION_FAILED:
+ HA_ATOMIC_INC(&ctrs->qpack_decompression_failed);
+ break;
+ case QPACK_ENCODER_STREAM_ERROR:
+ HA_ATOMIC_INC(&ctrs->qpack_encoder_stream_error);
+ break;
+ case QPACK_DECODER_STREAM_ERROR:
+ HA_ATOMIC_INC(&ctrs->qpack_decoder_stream_error);
+ break;
+ default:
+ break;
+
+ }
+}
+
+void h3_inc_frame_type_cnt(struct h3_counters *ctrs, int frm_type)
+{
+ switch (frm_type) {
+ case H3_FT_DATA:
+ HA_ATOMIC_INC(&ctrs->h3_data);
+ break;
+ case H3_FT_HEADERS:
+ HA_ATOMIC_INC(&ctrs->h3_headers);
+ break;
+ case H3_FT_CANCEL_PUSH:
+ HA_ATOMIC_INC(&ctrs->h3_cancel_push);
+ break;
+ case H3_FT_PUSH_PROMISE:
+ HA_ATOMIC_INC(&ctrs->h3_push_promise);
+ break;
+ case H3_FT_MAX_PUSH_ID:
+ HA_ATOMIC_INC(&ctrs->h3_max_push_id);
+ break;
+ case H3_FT_GOAWAY:
+ HA_ATOMIC_INC(&ctrs->h3_goaway);
+ break;
+ case H3_FT_SETTINGS:
+ HA_ATOMIC_INC(&ctrs->h3_settings);
+ break;
+ default:
+ break;
+ }
+}
diff --git a/src/haproxy.c b/src/haproxy.c
new file mode 100644
index 0000000..4c739f4
--- /dev/null
+++ b/src/haproxy.c
@@ -0,0 +1,3962 @@
+/*
+ * HAProxy : High Availability-enabled HTTP/TCP proxy
+ * Copyright 2000-2024 Willy Tarreau <willy@haproxy.org>.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#define _GNU_SOURCE
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <string.h>
+#include <ctype.h>
+#include <dirent.h>
+#include <sys/stat.h>
+#include <sys/time.h>
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <netinet/tcp.h>
+#include <netinet/in.h>
+#include <arpa/inet.h>
+#include <netdb.h>
+#include <fcntl.h>
+#include <errno.h>
+#include <signal.h>
+#include <stdarg.h>
+#include <sys/resource.h>
+#include <sys/utsname.h>
+#include <sys/wait.h>
+#include <time.h>
+#include <syslog.h>
+#include <grp.h>
+
+#ifdef USE_THREAD
+#include <pthread.h>
+#endif
+
+#ifdef USE_CPU_AFFINITY
+#include <sched.h>
+#if defined(__FreeBSD__) || defined(__DragonFly__)
+#include <sys/param.h>
+#ifdef __FreeBSD__
+#include <sys/cpuset.h>
+#endif
+#endif
+#endif
+
+#if defined(USE_PRCTL)
+#include <sys/prctl.h>
+#endif
+
+#if defined(USE_PROCCTL)
+#include <sys/procctl.h>
+#endif
+
+#ifdef DEBUG_FULL
+#include <assert.h>
+#endif
+#if defined(USE_SYSTEMD)
+#include <systemd/sd-daemon.h>
+#endif
+
+#include <import/sha1.h>
+
+#include <haproxy/acl.h>
+#include <haproxy/action.h>
+#include <haproxy/activity.h>
+#include <haproxy/api.h>
+#include <haproxy/arg.h>
+#include <haproxy/auth.h>
+#include <haproxy/base64.h>
+#include <haproxy/capture-t.h>
+#include <haproxy/cfgcond.h>
+#include <haproxy/cfgdiag.h>
+#include <haproxy/cfgparse.h>
+#include <haproxy/chunk.h>
+#include <haproxy/cli.h>
+#include <haproxy/clock.h>
+#include <haproxy/connection.h>
+#ifdef USE_CPU_AFFINITY
+#include <haproxy/cpuset.h>
+#endif
+#include <haproxy/debug.h>
+#include <haproxy/dns.h>
+#include <haproxy/dynbuf.h>
+#include <haproxy/errors.h>
+#include <haproxy/fd.h>
+#include <haproxy/filters.h>
+#include <haproxy/global.h>
+#include <haproxy/hlua.h>
+#include <haproxy/http_rules.h>
+#if defined(USE_LINUX_CAP)
+#include <haproxy/linuxcap.h>
+#endif
+#include <haproxy/list.h>
+#include <haproxy/listener.h>
+#include <haproxy/log.h>
+#include <haproxy/mworker.h>
+#include <haproxy/namespace.h>
+#include <haproxy/net_helper.h>
+#include <haproxy/openssl-compat.h>
+#include <haproxy/quic_conn.h>
+#include <haproxy/quic_tp-t.h>
+#include <haproxy/pattern.h>
+#include <haproxy/peers.h>
+#include <haproxy/pool.h>
+#include <haproxy/protocol.h>
+#include <haproxy/proto_tcp.h>
+#include <haproxy/proxy.h>
+#include <haproxy/regex.h>
+#include <haproxy/sample.h>
+#include <haproxy/server.h>
+#include <haproxy/session.h>
+#include <haproxy/signal.h>
+#include <haproxy/sock.h>
+#include <haproxy/sock_inet.h>
+#include <haproxy/ssl_sock.h>
+#include <haproxy/stats-t.h>
+#include <haproxy/stream.h>
+#include <haproxy/task.h>
+#include <haproxy/thread.h>
+#include <haproxy/time.h>
+#include <haproxy/tools.h>
+#include <haproxy/trace.h>
+#include <haproxy/uri_auth-t.h>
+#include <haproxy/vars.h>
+#include <haproxy/version.h>
+
+
+/* array of init calls for older platforms */
+DECLARE_INIT_STAGES;
+
+/* create a read_mostly section to hold variables which are accessed a lot
+ * but which almost never change. The purpose is to isolate them in their
+ * own cache lines where they don't risk to be perturbated by write accesses
+ * to neighbor variables. We need to create an empty aligned variable for
+ * this. The fact that the variable is of size zero means that it will be
+ * eliminated at link time if no other variable uses it, but alignment will
+ * be respected.
+ */
+empty_t __read_mostly_align HA_SECTION("read_mostly") ALIGNED(64);
+
+#ifdef BUILD_FEATURES
+char *build_features = BUILD_FEATURES;
+#else
+char *build_features = "";
+#endif
+
+/* list of config files */
+static struct list cfg_cfgfiles = LIST_HEAD_INIT(cfg_cfgfiles);
+int pid; /* current process id */
+
+static unsigned long stopping_tgroup_mask; /* Thread groups acknowledging stopping */
+
+/* global options */
+struct global global = {
+ .hard_stop_after = TICK_ETERNITY,
+ .close_spread_time = TICK_ETERNITY,
+ .close_spread_end = TICK_ETERNITY,
+ .numa_cpu_mapping = 1,
+ .nbthread = 0,
+ .req_count = 0,
+ .loggers = LIST_HEAD_INIT(global.loggers),
+ .maxzlibmem = DEFAULT_MAXZLIBMEM * 1024U * 1024U,
+ .comp_rate_lim = 0,
+ .ssl_server_verify = SSL_SERVER_VERIFY_REQUIRED,
+ .unix_bind = {
+ .ux = {
+ .uid = -1,
+ .gid = -1,
+ .mode = 0,
+ }
+ },
+ .tune = {
+ .options = GTUNE_LISTENER_MQ_OPT,
+ .bufsize = (BUFSIZE + 2*sizeof(void *) - 1) & -(2*sizeof(void *)),
+ .maxrewrite = MAXREWRITE,
+ .reserved_bufs = RESERVED_BUFS,
+ .pattern_cache = DEFAULT_PAT_LRU_SIZE,
+ .pool_low_ratio = 20,
+ .pool_high_ratio = 25,
+ .max_http_hdr = MAX_HTTP_HDR,
+#ifdef USE_OPENSSL
+ .sslcachesize = SSLCACHESIZE,
+#endif
+ .comp_maxlevel = 1,
+#ifdef DEFAULT_IDLE_TIMER
+ .idle_timer = DEFAULT_IDLE_TIMER,
+#else
+ .idle_timer = 1000, /* 1 second */
+#endif
+ .nb_stk_ctr = MAX_SESS_STKCTR,
+ .default_shards = -2, /* by-group */
+#ifdef USE_QUIC
+ .quic_backend_max_idle_timeout = QUIC_TP_DFLT_BACK_MAX_IDLE_TIMEOUT,
+ .quic_frontend_max_idle_timeout = QUIC_TP_DFLT_FRONT_MAX_IDLE_TIMEOUT,
+ .quic_frontend_max_streams_bidi = QUIC_TP_DFLT_FRONT_MAX_STREAMS_BIDI,
+ .quic_reorder_ratio = QUIC_DFLT_REORDER_RATIO,
+ .quic_retry_threshold = QUIC_DFLT_RETRY_THRESHOLD,
+ .quic_max_frame_loss = QUIC_DFLT_MAX_FRAME_LOSS,
+ .quic_streams_buf = 30,
+#endif /* USE_QUIC */
+ },
+#ifdef USE_OPENSSL
+#ifdef DEFAULT_MAXSSLCONN
+ .maxsslconn = DEFAULT_MAXSSLCONN,
+#endif
+#endif
+ /* others NULL OK */
+};
+
+/*********************************************************************/
+
+int stopping; /* non zero means stopping in progress */
+int killed; /* non zero means a hard-stop is triggered */
+int jobs = 0; /* number of active jobs (conns, listeners, active tasks, ...) */
+int unstoppable_jobs = 0; /* number of active jobs that can't be stopped during a soft stop */
+int active_peers = 0; /* number of active peers (connection attempts and connected) */
+int connected_peers = 0; /* number of connected peers (verified ones) */
+int arg_mode = 0; /* MODE_DEBUG etc as passed on command line ... */
+char *change_dir = NULL; /* set when -C is passed */
+char *check_condition = NULL; /* check condition passed to -cc */
+
+/* Here we store information about the pids of the processes we may pause
+ * or kill. We will send them a signal every 10 ms until we can bind to all
+ * our ports. With 200 retries, that's about 2 seconds.
+ */
+#define MAX_START_RETRIES 200
+static int *oldpids = NULL;
+static int oldpids_sig; /* use USR1 or TERM */
+
+/* Path to the unix socket we use to retrieve listener sockets from the old process */
+static const char *old_unixsocket;
+
+int atexit_flag = 0;
+
+int nb_oldpids = 0;
+const int zero = 0;
+const int one = 1;
+const struct linger nolinger = { .l_onoff = 1, .l_linger = 0 };
+
+char hostname[MAX_HOSTNAME_LEN];
+char *localpeer = NULL;
+static char *kwd_dump = NULL; // list of keyword dumps to produce
+
+static char **old_argv = NULL; /* previous argv but cleaned up */
+
+struct list proc_list = LIST_HEAD_INIT(proc_list);
+
+int master = 0; /* 1 if in master, 0 if in child */
+unsigned int rlim_fd_cur_at_boot = 0;
+unsigned int rlim_fd_max_at_boot = 0;
+
+/* per-boot randomness */
+unsigned char boot_seed[20]; /* per-boot random seed (160 bits initially) */
+
+/* takes the thread config in argument or NULL for any thread */
+static void *run_thread_poll_loop(void *data);
+
+/* bitfield of a few warnings to emit just once (WARN_*) */
+unsigned int warned = 0;
+
+/* set if experimental features have been used for the current process */
+unsigned int tainted = 0;
+
+unsigned int experimental_directives_allowed = 0;
+
+int check_kw_experimental(struct cfg_keyword *kw, const char *file, int linenum,
+ char **errmsg)
+{
+ if (kw->flags & KWF_EXPERIMENTAL) {
+ if (!experimental_directives_allowed) {
+ memprintf(errmsg, "parsing [%s:%d] : '%s' directive is experimental, must be allowed via a global 'expose-experimental-directives'",
+ file, linenum, kw->kw);
+ return 1;
+ }
+ mark_tainted(TAINTED_CONFIG_EXP_KW_DECLARED);
+ }
+
+ return 0;
+}
+
+/* master CLI configuration (-S flag) */
+struct list mworker_cli_conf = LIST_HEAD_INIT(mworker_cli_conf);
+
+/* These are strings to be reported in the output of "haproxy -vv". They may
+ * either be constants (in which case must_free must be zero) or dynamically
+ * allocated strings to pass to free() on exit, and in this case must_free
+ * must be non-zero.
+ */
+struct list build_opts_list = LIST_HEAD_INIT(build_opts_list);
+struct build_opts_str {
+ struct list list;
+ const char *str;
+ int must_free;
+};
+
+/*********************************************************************/
+/* general purpose functions ***************************************/
+/*********************************************************************/
+
+/* used to register some build option strings at boot. Set must_free to
+ * non-zero if the string must be freed upon exit.
+ */
+void hap_register_build_opts(const char *str, int must_free)
+{
+ struct build_opts_str *b;
+
+ b = calloc(1, sizeof(*b));
+ if (!b) {
+ fprintf(stderr, "out of memory\n");
+ exit(1);
+ }
+ b->str = str;
+ b->must_free = must_free;
+ LIST_APPEND(&build_opts_list, &b->list);
+}
+
+/* returns the first build option when <curr> is NULL, or the next one when
+ * <curr> is passed the last returned value. NULL when there is no more entries
+ * in the list. Otherwise the returned pointer is &opt->str so the caller can
+ * print it as *ret.
+ */
+const char **hap_get_next_build_opt(const char **curr)
+{
+ struct build_opts_str *head, *start;
+
+ head = container_of(&build_opts_list, struct build_opts_str, list);
+
+ if (curr)
+ start = container_of(curr, struct build_opts_str, str);
+ else
+ start = head;
+
+ start = container_of(start->list.n, struct build_opts_str, list);
+
+ if (start == head)
+ return NULL;
+
+ return &start->str;
+}
+
+/* used to make a new feature appear in the build_features list at boot time.
+ * The feature must be in the format "XXX" without the leading "+" which will
+ * be automatically appended.
+ */
+void hap_register_feature(const char *name)
+{
+ static int must_free = 0;
+ int new_len = strlen(build_features) + 2 + strlen(name);
+ char *new_features;
+
+ new_features = malloc(new_len + 1);
+ if (!new_features)
+ return;
+
+ strlcpy2(new_features, build_features, new_len);
+ snprintf(new_features, new_len + 1, "%s +%s", build_features, name);
+
+ if (must_free)
+ ha_free(&build_features);
+
+ build_features = new_features;
+ must_free = 1;
+}
+
+#define VERSION_MAX_ELTS 7
+
+/* This function splits an haproxy version string into an array of integers.
+ * The syntax of the supported version string is the following:
+ *
+ * <a>[.<b>[.<c>[.<d>]]][-{dev,pre,rc}<f>][-*][-<g>]
+ *
+ * This validates for example:
+ * 1.2.1-pre2, 1.2.1, 1.2.10.1, 1.3.16-rc1, 1.4-dev3, 1.5-dev18, 1.5-dev18-43
+ * 2.4-dev18-f6818d-20
+ *
+ * The result is set in a array of <VERSION_MAX_ELTS> elements. Each letter has
+ * one fixed place in the array. The tags take a numeric value called <e> which
+ * defaults to 3. "dev" is 1, "rc" and "pre" are 2. Numbers not encountered are
+ * considered as zero (henxe 1.5 and 1.5.0 are the same).
+ *
+ * The resulting values are:
+ * 1.2.1-pre2 1, 2, 1, 0, 2, 2, 0
+ * 1.2.1 1, 2, 1, 0, 3, 0, 0
+ * 1.2.10.1 1, 2, 10, 1, 3, 0, 0
+ * 1.3.16-rc1 1, 3, 16, 0, 2, 1, 0
+ * 1.4-dev3 1, 4, 0, 0, 1, 3, 0
+ * 1.5-dev18 1, 5, 0, 0, 1, 18, 0
+ * 1.5-dev18-43 1, 5, 0, 0, 1, 18, 43
+ * 2.4-dev18-f6818d-20 2, 4, 0, 0, 1, 18, 20
+ *
+ * The function returns non-zero if the conversion succeeded, or zero if it
+ * failed.
+ */
+int split_version(const char *version, unsigned int *value)
+{
+ const char *p, *s;
+ char *error;
+ int nelts;
+
+ /* Initialize array with zeroes */
+ for (nelts = 0; nelts < VERSION_MAX_ELTS; nelts++)
+ value[nelts] = 0;
+ value[4] = 3;
+
+ p = version;
+
+ /* If the version number is empty, return false */
+ if (*p == '\0')
+ return 0;
+
+ /* Convert first number <a> */
+ value[0] = strtol(p, &error, 10);
+ p = error + 1;
+ if (*error == '\0')
+ return 1;
+ if (*error == '-')
+ goto split_version_tag;
+ if (*error != '.')
+ return 0;
+
+ /* Convert first number <b> */
+ value[1] = strtol(p, &error, 10);
+ p = error + 1;
+ if (*error == '\0')
+ return 1;
+ if (*error == '-')
+ goto split_version_tag;
+ if (*error != '.')
+ return 0;
+
+ /* Convert first number <c> */
+ value[2] = strtol(p, &error, 10);
+ p = error + 1;
+ if (*error == '\0')
+ return 1;
+ if (*error == '-')
+ goto split_version_tag;
+ if (*error != '.')
+ return 0;
+
+ /* Convert first number <d> */
+ value[3] = strtol(p, &error, 10);
+ p = error + 1;
+ if (*error == '\0')
+ return 1;
+ if (*error != '-')
+ return 0;
+
+ split_version_tag:
+ /* Check for commit number */
+ if (*p >= '0' && *p <= '9')
+ goto split_version_commit;
+
+ /* Read tag */
+ if (strncmp(p, "dev", 3) == 0) { value[4] = 1; p += 3; }
+ else if (strncmp(p, "rc", 2) == 0) { value[4] = 2; p += 2; }
+ else if (strncmp(p, "pre", 3) == 0) { value[4] = 2; p += 3; }
+ else
+ goto split_version_commit;
+
+ /* Convert tag number */
+ value[5] = strtol(p, &error, 10);
+ p = error + 1;
+ if (*error == '\0')
+ return 1;
+ if (*error != '-')
+ return 0;
+
+ split_version_commit:
+ /* Search the last "-" */
+ s = strrchr(p, '-');
+ if (s) {
+ s++;
+ if (*s == '\0')
+ return 0;
+ value[6] = strtol(s, &error, 10);
+ if (*error != '\0')
+ value[6] = 0;
+ return 1;
+ }
+
+ /* convert the version */
+ value[6] = strtol(p, &error, 10);
+ if (*error != '\0')
+ value[6] = 0;
+
+ return 1;
+}
+
+/* This function compares the current haproxy version with an arbitrary version
+ * string. It returns:
+ * -1 : the version in argument is older than the current haproxy version
+ * 0 : the version in argument is the same as the current haproxy version
+ * 1 : the version in argument is newer than the current haproxy version
+ *
+ * Or some errors:
+ * -2 : the current haproxy version is not parsable
+ * -3 : the version in argument is not parsable
+ */
+int compare_current_version(const char *version)
+{
+ unsigned int loc[VERSION_MAX_ELTS];
+ unsigned int mod[VERSION_MAX_ELTS];
+ int i;
+
+ /* split versions */
+ if (!split_version(haproxy_version, loc))
+ return -2;
+ if (!split_version(version, mod))
+ return -3;
+
+ /* compare versions */
+ for (i = 0; i < VERSION_MAX_ELTS; i++) {
+ if (mod[i] < loc[i])
+ return -1;
+ else if (mod[i] > loc[i])
+ return 1;
+ }
+ return 0;
+}
+
+void display_version()
+{
+ struct utsname utsname;
+
+ printf("HAProxy version %s %s - https://haproxy.org/\n"
+ PRODUCT_STATUS "\n", haproxy_version, haproxy_date);
+
+ if (strlen(PRODUCT_URL_BUGS) > 0) {
+ char base_version[20];
+ int dots = 0;
+ char *del;
+
+ /* only retrieve the base version without distro-specific extensions */
+ for (del = haproxy_version; *del; del++) {
+ if (*del == '.')
+ dots++;
+ else if (*del < '0' || *del > '9')
+ break;
+ }
+
+ strlcpy2(base_version, haproxy_version, del - haproxy_version + 1);
+ if (dots < 2)
+ printf("Known bugs: https://github.com/haproxy/haproxy/issues?q=is:issue+is:open\n");
+ else
+ printf("Known bugs: " PRODUCT_URL_BUGS "\n", base_version);
+ }
+
+ if (uname(&utsname) == 0) {
+ printf("Running on: %s %s %s %s\n", utsname.sysname, utsname.release, utsname.version, utsname.machine);
+ }
+}
+
+static void display_build_opts()
+{
+ const char **opt;
+
+ printf("Build options :"
+#ifdef BUILD_TARGET
+ "\n TARGET = " BUILD_TARGET
+#endif
+#ifdef BUILD_CPU
+ "\n CPU = " BUILD_CPU
+#endif
+#ifdef BUILD_CC
+ "\n CC = " BUILD_CC
+#endif
+#ifdef BUILD_CFLAGS
+ "\n CFLAGS = " BUILD_CFLAGS
+#endif
+#ifdef BUILD_OPTIONS
+ "\n OPTIONS = " BUILD_OPTIONS
+#endif
+#ifdef BUILD_DEBUG
+ "\n DEBUG = " BUILD_DEBUG
+#endif
+ "\n\nFeature list : %s"
+ "\n\nDefault settings :"
+ "\n bufsize = %d, maxrewrite = %d, maxpollevents = %d"
+ "\n\n",
+ build_features, BUFSIZE, MAXREWRITE, MAX_POLL_EVENTS);
+
+ for (opt = NULL; (opt = hap_get_next_build_opt(opt)); puts(*opt))
+ ;
+
+ putchar('\n');
+
+ list_pollers(stdout);
+ putchar('\n');
+ list_mux_proto(stdout);
+ putchar('\n');
+ list_services(stdout);
+ putchar('\n');
+ list_filters(stdout);
+ putchar('\n');
+}
+
+/*
+ * This function prints the command line usage and exits
+ */
+static void usage(char *name)
+{
+ display_version();
+ fprintf(stderr,
+ "Usage : %s [-f <cfgfile|cfgdir>]* [ -vdV"
+ "D ] [ -n <maxconn> ] [ -N <maxpconn> ]\n"
+ " [ -p <pidfile> ] [ -m <max megs> ] [ -C <dir> ] [-- <cfgfile>*]\n"
+ " -v displays version ; -vv shows known build options.\n"
+ " -d enters debug mode ; -db only disables background mode.\n"
+ " -dM[<byte>,help,...] debug memory (default: poison with <byte>/0x50)\n"
+ " -dt activate traces on stderr\n"
+ " -V enters verbose mode (disables quiet mode)\n"
+ " -D goes daemon ; -C changes to <dir> before loading files.\n"
+ " -W master-worker mode.\n"
+#if defined(USE_SYSTEMD)
+ " -Ws master-worker mode with systemd notify support.\n"
+#endif
+ " -q quiet mode : don't display messages\n"
+ " -c check mode : only check config files and exit\n"
+ " -cc check condition : evaluate a condition and exit\n"
+ " -n sets the maximum total # of connections (uses ulimit -n)\n"
+ " -m limits the usable amount of memory (in MB)\n"
+ " -N sets the default, per-proxy maximum # of connections (%d)\n"
+ " -L set local peer name (default to hostname)\n"
+ " -p writes pids of all children to this file\n"
+ " -dC[[key],line] display the configuration file, if there is a key, the file will be anonymised\n"
+#if defined(USE_EPOLL)
+ " -de disables epoll() usage even when available\n"
+#endif
+#if defined(USE_KQUEUE)
+ " -dk disables kqueue() usage even when available\n"
+#endif
+#if defined(USE_EVPORTS)
+ " -dv disables event ports usage even when available\n"
+#endif
+#if defined(USE_POLL)
+ " -dp disables poll() usage even when available\n"
+#endif
+#if defined(USE_LINUX_SPLICE)
+ " -dS disables splice usage (broken on old kernels)\n"
+#endif
+#if defined(USE_GETADDRINFO)
+ " -dG disables getaddrinfo() usage\n"
+#endif
+#if defined(SO_REUSEPORT)
+ " -dR disables SO_REUSEPORT usage\n"
+#endif
+#if defined(HA_HAVE_DUMP_LIBS)
+ " -dL dumps loaded object files after config checks\n"
+#endif
+ " -dK{class[,...]} dump registered keywords (use 'help' for list)\n"
+ " -dr ignores server address resolution failures\n"
+ " -dV disables SSL verify on servers side\n"
+ " -dW fails if any warning is emitted\n"
+ " -dD diagnostic mode : warn about suspicious configuration statements\n"
+ " -dF disable fast-forward\n"
+ " -dZ disable zero-copy forwarding\n"
+ " -sf/-st [pid ]* finishes/terminates old pids.\n"
+ " -x <unix_socket> get listening sockets from a unix socket\n"
+ " -S <bind>[,<bind options>...] new master CLI\n"
+ "\n",
+ name, cfg_maxpconn);
+ exit(1);
+}
+
+
+
+/*********************************************************************/
+/* more specific functions ***************************************/
+/*********************************************************************/
+
+/* sends the signal <sig> to all pids found in <oldpids>. Returns the number of
+ * pids the signal was correctly delivered to.
+ */
+int tell_old_pids(int sig)
+{
+ int p;
+ int ret = 0;
+ for (p = 0; p < nb_oldpids; p++)
+ if (kill(oldpids[p], sig) == 0)
+ ret++;
+ return ret;
+}
+
+/*
+ * remove a pid forom the olpid array and decrease nb_oldpids
+ * return 1 pid was found otherwise return 0
+ */
+
+int delete_oldpid(int pid)
+{
+ int i;
+
+ for (i = 0; i < nb_oldpids; i++) {
+ if (oldpids[i] == pid) {
+ oldpids[i] = oldpids[nb_oldpids - 1];
+ oldpids[nb_oldpids - 1] = 0;
+ nb_oldpids--;
+ return 1;
+ }
+ }
+ return 0;
+}
+
+
+/*
+ * When called, this function reexec haproxy with -sf followed by current
+ * children PIDs and possibly old children PIDs if they didn't leave yet.
+ */
+static void mworker_reexec(int hardreload)
+{
+ char **next_argv = NULL;
+ int old_argc = 0; /* previous number of argument */
+ int next_argc = 0;
+ int i = 0;
+ char *msg = NULL;
+ struct rlimit limit;
+ struct mworker_proc *current_child = NULL;
+
+ mworker_block_signals();
+ setenv("HAPROXY_MWORKER_REEXEC", "1", 1);
+
+ mworker_cleanup_proc();
+ mworker_proc_list_to_env(); /* put the children description in the env */
+
+ /* ensure that we close correctly every listeners before reexecuting */
+ mworker_cleanlisteners();
+
+ /* during the reload we must ensure that every FDs that can't be
+ * reuse (ie those that are not referenced in the proc_list)
+ * are closed or they will leak. */
+
+ /* close the listeners FD */
+ mworker_cli_proxy_stop();
+
+ if (fdtab)
+ deinit_pollers();
+
+#ifdef HAVE_SSL_RAND_KEEP_RANDOM_DEVICES_OPEN
+ /* close random device FDs */
+ RAND_keep_random_devices_open(0);
+#endif
+
+ /* restore the initial FD limits */
+ limit.rlim_cur = rlim_fd_cur_at_boot;
+ limit.rlim_max = rlim_fd_max_at_boot;
+ if (raise_rlim_nofile(&limit, &limit) != 0) {
+ ha_warning("Failed to restore initial FD limits (cur=%u max=%u), using cur=%u max=%u\n",
+ rlim_fd_cur_at_boot, rlim_fd_max_at_boot,
+ (unsigned int)limit.rlim_cur, (unsigned int)limit.rlim_max);
+ }
+
+ /* compute length */
+ while (old_argv[old_argc])
+ old_argc++;
+
+ /* 1 for haproxy -sf, 2 for -x /socket */
+ next_argv = calloc(old_argc + 1 + 2 + mworker_child_nb() + 1,
+ sizeof(*next_argv));
+ if (next_argv == NULL)
+ goto alloc_error;
+
+ /* copy the program name */
+ next_argv[next_argc++] = old_argv[0];
+
+ /* insert the new options just after argv[0] in case we have a -- */
+
+ if (getenv("HAPROXY_MWORKER_WAIT_ONLY") == NULL) {
+ /* add -sf <PID>* to argv */
+ if (mworker_child_nb() > 0) {
+ struct mworker_proc *child;
+
+ if (hardreload)
+ next_argv[next_argc++] = "-st";
+ else
+ next_argv[next_argc++] = "-sf";
+
+ list_for_each_entry(child, &proc_list, list) {
+ if (!(child->options & PROC_O_LEAVING) && (child->options & PROC_O_TYPE_WORKER))
+ current_child = child;
+
+ if (!(child->options & (PROC_O_TYPE_WORKER|PROC_O_TYPE_PROG)) || child->pid <= -1)
+ continue;
+ if ((next_argv[next_argc++] = memprintf(&msg, "%d", child->pid)) == NULL)
+ goto alloc_error;
+ msg = NULL;
+ }
+ }
+
+ if (current_child) {
+ /* add the -x option with the socketpair of the current worker */
+ next_argv[next_argc++] = "-x";
+ if ((next_argv[next_argc++] = memprintf(&msg, "sockpair@%d", current_child->ipc_fd[0])) == NULL)
+ goto alloc_error;
+ msg = NULL;
+ }
+ }
+
+ /* copy the previous options */
+ for (i = 1; i < old_argc; i++)
+ next_argv[next_argc++] = old_argv[i];
+
+ signal(SIGPROF, SIG_IGN);
+ execvp(next_argv[0], next_argv);
+ ha_warning("Failed to reexecute the master process [%d]: %s\n", pid, strerror(errno));
+ ha_free(&next_argv);
+ return;
+
+alloc_error:
+ ha_free(&next_argv);
+ ha_warning("Failed to reexecute the master process [%d]: Cannot allocate memory\n", pid);
+ return;
+}
+
+/* reexec haproxy in waitmode */
+static void mworker_reexec_waitmode()
+{
+ setenv("HAPROXY_MWORKER_WAIT_ONLY", "1", 1);
+ mworker_reexec(0);
+}
+
+/* reload haproxy and emit a warning */
+void mworker_reload(int hardreload)
+{
+ struct mworker_proc *child;
+ struct per_thread_deinit_fct *ptdf;
+
+ ha_notice("Reloading HAProxy%s\n", hardreload?" (hard-reload)":"");
+
+ /* close the poller FD and the thread waker pipe FD */
+ list_for_each_entry(ptdf, &per_thread_deinit_list, list)
+ ptdf->fct();
+
+ /* increment the number of reloads */
+ list_for_each_entry(child, &proc_list, list) {
+ child->reloads++;
+ }
+
+#if defined(USE_SYSTEMD)
+ if (global.tune.options & GTUNE_USE_SYSTEMD)
+ sd_notify(0, "RELOADING=1\nSTATUS=Reloading Configuration.\n");
+#endif
+ mworker_reexec(hardreload);
+}
+
+static void mworker_loop()
+{
+
+ /* Busy polling makes no sense in the master :-) */
+ global.tune.options &= ~GTUNE_BUSY_POLLING;
+
+
+ signal_unregister(SIGTTIN);
+ signal_unregister(SIGTTOU);
+ signal_unregister(SIGUSR1);
+ signal_unregister(SIGHUP);
+ signal_unregister(SIGQUIT);
+
+ signal_register_fct(SIGTERM, mworker_catch_sigterm, SIGTERM);
+ signal_register_fct(SIGUSR1, mworker_catch_sigterm, SIGUSR1);
+ signal_register_fct(SIGTTIN, mworker_broadcast_signal, SIGTTIN);
+ signal_register_fct(SIGTTOU, mworker_broadcast_signal, SIGTTOU);
+ signal_register_fct(SIGINT, mworker_catch_sigterm, SIGINT);
+ signal_register_fct(SIGHUP, mworker_catch_sighup, SIGHUP);
+ signal_register_fct(SIGUSR2, mworker_catch_sighup, SIGUSR2);
+ signal_register_fct(SIGCHLD, mworker_catch_sigchld, SIGCHLD);
+
+ mworker_unblock_signals();
+ mworker_cleantasks();
+
+ mworker_catch_sigchld(NULL); /* ensure we clean the children in case
+ some SIGCHLD were lost */
+
+ jobs++; /* this is the "master" job, we want to take care of the
+ signals even if there is no listener so the poll loop don't
+ leave */
+
+ fork_poller();
+ run_thread_poll_loop(NULL);
+}
+
+/*
+ * Reexec the process in failure mode, instead of exiting
+ */
+void reexec_on_failure()
+{
+ struct mworker_proc *child;
+
+ if (!atexit_flag)
+ return;
+
+ /* get the info of the children in the env */
+ if (mworker_env_to_proc_list() < 0) {
+ exit(EXIT_FAILURE);
+ }
+
+ /* increment the number of failed reloads */
+ list_for_each_entry(child, &proc_list, list) {
+ child->failedreloads++;
+ }
+
+ /* do not keep unused FDs retrieved from the previous process */
+ sock_drop_unused_old_sockets();
+
+ usermsgs_clr(NULL);
+ setenv("HAPROXY_LOAD_SUCCESS", "0", 1);
+ ha_warning("Loading failure!\n");
+#if defined(USE_SYSTEMD)
+ /* the sd_notify API is not able to send a reload failure signal. So
+ * the READY=1 signal still need to be sent */
+ if (global.tune.options & GTUNE_USE_SYSTEMD)
+ sd_notify(0, "READY=1\nSTATUS=Reload failed!\n");
+#endif
+
+ mworker_reexec_waitmode();
+}
+
+/*
+ * Exit with an error message upon a wait-mode failure.
+ */
+void exit_on_waitmode_failure()
+{
+ if (!atexit_flag)
+ return;
+
+ ha_alert("Non-recoverable mworker wait-mode error, exiting.\n");
+}
+
+
+/*
+ * upon SIGUSR1, let's have a soft stop. Note that soft_stop() broadcasts
+ * a signal zero to all subscribers. This means that it's as easy as
+ * subscribing to signal 0 to get informed about an imminent shutdown.
+ */
+static void sig_soft_stop(struct sig_handler *sh)
+{
+ soft_stop();
+ signal_unregister_handler(sh);
+ pool_gc(NULL);
+}
+
+/*
+ * upon SIGTTOU, we pause everything
+ */
+static void sig_pause(struct sig_handler *sh)
+{
+ if (protocol_pause_all() & ERR_FATAL) {
+ const char *msg = "Some proxies refused to pause, performing soft stop now.\n";
+ ha_warning("%s", msg);
+ send_log(NULL, LOG_WARNING, "%s", msg);
+ soft_stop();
+ }
+ pool_gc(NULL);
+}
+
+/*
+ * upon SIGTTIN, let's have a soft stop.
+ */
+static void sig_listen(struct sig_handler *sh)
+{
+ if (protocol_resume_all() & ERR_FATAL) {
+ const char *msg = "Some proxies refused to resume, probably due to a conflict on a listening port. You may want to try again after the conflicting application is stopped, otherwise a restart might be needed to resume safe operations.\n";
+ ha_warning("%s", msg);
+ send_log(NULL, LOG_WARNING, "%s", msg);
+ }
+}
+
+/*
+ * this function dumps every server's state when the process receives SIGHUP.
+ */
+static void sig_dump_state(struct sig_handler *sh)
+{
+ struct proxy *p = proxies_list;
+
+ ha_warning("SIGHUP received, dumping servers states.\n");
+ while (p) {
+ struct server *s = p->srv;
+
+ send_log(p, LOG_NOTICE, "SIGHUP received, dumping servers states for proxy %s.\n", p->id);
+ while (s) {
+ chunk_printf(&trash,
+ "SIGHUP: Server %s/%s is %s. Conn: %d act, %d pend, %lld tot.",
+ p->id, s->id,
+ (s->cur_state != SRV_ST_STOPPED) ? "UP" : "DOWN",
+ s->cur_sess, s->queue.length, s->counters.cum_sess);
+ ha_warning("%s\n", trash.area);
+ send_log(p, LOG_NOTICE, "%s\n", trash.area);
+ s = s->next;
+ }
+
+ /* FIXME: those info are a bit outdated. We should be able to distinguish between FE and BE. */
+ if (!p->srv) {
+ chunk_printf(&trash,
+ "SIGHUP: Proxy %s has no servers. Conn: act(FE+BE): %d+%d, %d pend (%d unass), tot(FE+BE): %lld+%lld.",
+ p->id,
+ p->feconn, p->beconn, p->totpend, p->queue.length, p->fe_counters.cum_conn, p->be_counters.cum_conn);
+ } else if (p->srv_act == 0) {
+ chunk_printf(&trash,
+ "SIGHUP: Proxy %s %s ! Conn: act(FE+BE): %d+%d, %d pend (%d unass), tot(FE+BE): %lld+%lld.",
+ p->id,
+ (p->srv_bck) ? "is running on backup servers" : "has no server available",
+ p->feconn, p->beconn, p->totpend, p->queue.length, p->fe_counters.cum_conn, p->be_counters.cum_conn);
+ } else {
+ chunk_printf(&trash,
+ "SIGHUP: Proxy %s has %d active servers and %d backup servers available."
+ " Conn: act(FE+BE): %d+%d, %d pend (%d unass), tot(FE+BE): %lld+%lld.",
+ p->id, p->srv_act, p->srv_bck,
+ p->feconn, p->beconn, p->totpend, p->queue.length, p->fe_counters.cum_conn, p->be_counters.cum_conn);
+ }
+ ha_warning("%s\n", trash.area);
+ send_log(p, LOG_NOTICE, "%s\n", trash.area);
+
+ p = p->next;
+ }
+}
+
+static void dump(struct sig_handler *sh)
+{
+ /* dump memory usage then free everything possible */
+ dump_pools();
+ pool_gc(NULL);
+}
+
+/*
+ * This function dup2 the stdio FDs (0,1,2) with <fd>, then closes <fd>
+ * If <fd> < 0, it opens /dev/null and use it to dup
+ *
+ * In the case of chrooting, you have to open /dev/null before the chroot, and
+ * pass the <fd> to this function
+ */
+static void stdio_quiet(int fd)
+{
+ if (fd < 0)
+ fd = open("/dev/null", O_RDWR, 0);
+
+ if (fd > -1) {
+ fclose(stdin);
+ fclose(stdout);
+ fclose(stderr);
+
+ dup2(fd, 0);
+ dup2(fd, 1);
+ dup2(fd, 2);
+ if (fd > 2)
+ close(fd);
+ return;
+ }
+
+ ha_alert("Cannot open /dev/null\n");
+ exit(EXIT_FAILURE);
+}
+
+
+/* This function checks if cfg_cfgfiles contains directories.
+ * If it finds one, it adds all the files (and only files) it contains
+ * in cfg_cfgfiles in place of the directory (and removes the directory).
+ * It adds the files in lexical order.
+ * It adds only files with .cfg extension.
+ * It doesn't add files with name starting with '.'
+ */
+static void cfgfiles_expand_directories(void)
+{
+ struct wordlist *wl, *wlb;
+ char *err = NULL;
+
+ list_for_each_entry_safe(wl, wlb, &cfg_cfgfiles, list) {
+ struct stat file_stat;
+ struct dirent **dir_entries = NULL;
+ int dir_entries_nb;
+ int dir_entries_it;
+
+ if (stat(wl->s, &file_stat)) {
+ ha_alert("Cannot open configuration file/directory %s : %s\n",
+ wl->s,
+ strerror(errno));
+ exit(1);
+ }
+
+ if (!S_ISDIR(file_stat.st_mode))
+ continue;
+
+ /* from this point wl->s is a directory */
+
+ dir_entries_nb = scandir(wl->s, &dir_entries, NULL, alphasort);
+ if (dir_entries_nb < 0) {
+ ha_alert("Cannot open configuration directory %s : %s\n",
+ wl->s,
+ strerror(errno));
+ exit(1);
+ }
+
+ /* for each element in the directory wl->s */
+ for (dir_entries_it = 0; dir_entries_it < dir_entries_nb; dir_entries_it++) {
+ struct dirent *dir_entry = dir_entries[dir_entries_it];
+ char *filename = NULL;
+ char *d_name_cfgext = strstr(dir_entry->d_name, ".cfg");
+
+ /* don't add filename that begin with .
+ * only add filename with .cfg extension
+ */
+ if (dir_entry->d_name[0] == '.' ||
+ !(d_name_cfgext && d_name_cfgext[4] == '\0'))
+ goto next_dir_entry;
+
+ if (!memprintf(&filename, "%s/%s", wl->s, dir_entry->d_name)) {
+ ha_alert("Cannot load configuration files %s : out of memory.\n",
+ filename);
+ exit(1);
+ }
+
+ if (stat(filename, &file_stat)) {
+ ha_alert("Cannot open configuration file %s : %s\n",
+ wl->s,
+ strerror(errno));
+ exit(1);
+ }
+
+ /* don't add anything else than regular file in cfg_cfgfiles
+ * this way we avoid loops
+ */
+ if (!S_ISREG(file_stat.st_mode))
+ goto next_dir_entry;
+
+ if (!list_append_word(&wl->list, filename, &err)) {
+ ha_alert("Cannot load configuration files %s : %s\n",
+ filename,
+ err);
+ exit(1);
+ }
+
+next_dir_entry:
+ free(filename);
+ free(dir_entry);
+ }
+
+ free(dir_entries);
+
+ /* remove the current directory (wl) from cfg_cfgfiles */
+ free(wl->s);
+ LIST_DELETE(&wl->list);
+ free(wl);
+ }
+
+ free(err);
+}
+
+/*
+ * copy and cleanup the current argv
+ * Remove the -sf /-st / -x parameters
+ * Return an allocated copy of argv
+ */
+
+static char **copy_argv(int argc, char **argv)
+{
+ char **newargv, **retargv;
+
+ newargv = calloc(argc + 2, sizeof(*newargv));
+ if (newargv == NULL) {
+ ha_warning("Cannot allocate memory\n");
+ return NULL;
+ }
+ retargv = newargv;
+
+ /* first copy argv[0] */
+ *newargv++ = *argv++;
+ argc--;
+
+ while (argc > 0) {
+ if (**argv != '-') {
+ /* non options are copied but will fail in the argument parser */
+ *newargv++ = *argv++;
+ argc--;
+
+ } else {
+ char *flag;
+
+ flag = *argv + 1;
+
+ if (flag[0] == '-' && flag[1] == 0) {
+ /* "--\0" copy every arguments till the end of argv */
+ *newargv++ = *argv++;
+ argc--;
+
+ while (argc > 0) {
+ *newargv++ = *argv++;
+ argc--;
+ }
+ } else {
+ switch (*flag) {
+ case 's':
+ /* -sf / -st and their parameters are ignored */
+ if (flag[1] == 'f' || flag[1] == 't') {
+ argc--;
+ argv++;
+ /* The list can't contain a negative value since the only
+ way to know the end of this list is by looking for the
+ next option or the end of the options */
+ while (argc > 0 && argv[0][0] != '-') {
+ argc--;
+ argv++;
+ }
+ } else {
+ argc--;
+ argv++;
+
+ }
+ break;
+
+ case 'x':
+ /* this option and its parameter are ignored */
+ argc--;
+ argv++;
+ if (argc > 0) {
+ argc--;
+ argv++;
+ }
+ break;
+
+ case 'C':
+ case 'n':
+ case 'm':
+ case 'N':
+ case 'L':
+ case 'f':
+ case 'p':
+ case 'S':
+ /* these options have only 1 parameter which must be copied and can start with a '-' */
+ *newargv++ = *argv++;
+ argc--;
+ if (argc == 0)
+ goto error;
+ *newargv++ = *argv++;
+ argc--;
+ break;
+ default:
+ /* for other options just copy them without parameters, this is also done
+ * for options like "--foo", but this will fail in the argument parser.
+ * */
+ *newargv++ = *argv++;
+ argc--;
+ break;
+ }
+ }
+ }
+ }
+
+ return retargv;
+
+error:
+ free(retargv);
+ return NULL;
+}
+
+
+/* Performs basic random seed initialization. The main issue with this is that
+ * srandom_r() only takes 32 bits and purposely provides a reproducible sequence,
+ * which means that there will only be 4 billion possible random sequences once
+ * srandom() is called, regardless of the internal state. Not calling it is
+ * even worse as we'll always produce the same randoms sequences. What we do
+ * here is to create an initial sequence from various entropy sources, hash it
+ * using SHA1 and keep the resulting 160 bits available globally.
+ *
+ * We initialize the current process with the first 32 bits before starting the
+ * polling loop, where all this will be changed to have process specific and
+ * thread specific sequences.
+ *
+ * Before starting threads, it's still possible to call random() as srandom()
+ * is initialized from this, but after threads and/or processes are started,
+ * only ha_random() is expected to be used to guarantee distinct sequences.
+ */
+static void ha_random_boot(char *const *argv)
+{
+ unsigned char message[256];
+ unsigned char *m = message;
+ struct timeval tv;
+ blk_SHA_CTX ctx;
+ unsigned long l;
+ int fd;
+ int i;
+
+ /* start with current time as pseudo-random seed */
+ gettimeofday(&tv, NULL);
+ write_u32(m, tv.tv_sec); m += 4;
+ write_u32(m, tv.tv_usec); m += 4;
+
+ /* PID and PPID add some OS-based randomness */
+ write_u16(m, getpid()); m += 2;
+ write_u16(m, getppid()); m += 2;
+
+ /* take up to 160 bits bytes from /dev/urandom if available (non-blocking) */
+ fd = open("/dev/urandom", O_RDONLY);
+ if (fd >= 0) {
+ i = read(fd, m, 20);
+ if (i > 0)
+ m += i;
+ close(fd);
+ }
+
+ /* take up to 160 bits bytes from openssl (non-blocking) */
+#ifdef USE_OPENSSL
+ if (RAND_bytes(m, 20) == 1)
+ m += 20;
+#endif
+
+ /* take 160 bits from existing random in case it was already initialized */
+ for (i = 0; i < 5; i++) {
+ write_u32(m, random());
+ m += 4;
+ }
+
+ /* stack address (benefit form operating system's ASLR) */
+ l = (unsigned long)&m;
+ memcpy(m, &l, sizeof(l)); m += sizeof(l);
+
+ /* argv address (benefit form operating system's ASLR) */
+ l = (unsigned long)&argv;
+ memcpy(m, &l, sizeof(l)); m += sizeof(l);
+
+ /* use tv_usec again after all the operations above */
+ gettimeofday(&tv, NULL);
+ write_u32(m, tv.tv_usec); m += 4;
+
+ /*
+ * At this point, ~84-92 bytes have been used
+ */
+
+ /* finish with the hostname */
+ strncpy((char *)m, hostname, message + sizeof(message) - m);
+ m += strlen(hostname);
+
+ /* total message length */
+ l = m - message;
+
+ memset(&ctx, 0, sizeof(ctx));
+ blk_SHA1_Init(&ctx);
+ blk_SHA1_Update(&ctx, message, l);
+ blk_SHA1_Final(boot_seed, &ctx);
+
+ srandom(read_u32(boot_seed));
+ ha_random_seed(boot_seed, sizeof(boot_seed));
+}
+
+/* considers splicing proxies' maxconn, computes the ideal global.maxpipes
+ * setting, and returns it. It may return -1 meaning "unlimited" if some
+ * unlimited proxies have been found and the global.maxconn value is not yet
+ * set. It may also return a value greater than maxconn if it's not yet set.
+ * Note that a value of zero means there is no need for pipes. -1 is never
+ * returned if global.maxconn is valid.
+ */
+static int compute_ideal_maxpipes()
+{
+ struct proxy *cur;
+ int nbfe = 0, nbbe = 0;
+ int unlimited = 0;
+ int pipes;
+ int max;
+
+ for (cur = proxies_list; cur; cur = cur->next) {
+ if (cur->options2 & (PR_O2_SPLIC_ANY)) {
+ if (cur->cap & PR_CAP_FE) {
+ max = cur->maxconn;
+ nbfe += max;
+ if (!max) {
+ unlimited = 1;
+ break;
+ }
+ }
+ if (cur->cap & PR_CAP_BE) {
+ max = cur->fullconn ? cur->fullconn : global.maxconn;
+ nbbe += max;
+ if (!max) {
+ unlimited = 1;
+ break;
+ }
+ }
+ }
+ }
+
+ pipes = MAX(nbfe, nbbe);
+ if (global.maxconn) {
+ if (pipes > global.maxconn || unlimited)
+ pipes = global.maxconn;
+ } else if (unlimited) {
+ pipes = -1;
+ }
+
+ return pipes >= 4 ? pipes / 4 : pipes;
+}
+
+/* considers global.maxsocks, global.maxpipes, async engines, SSL frontends and
+ * rlimits and computes an ideal maxconn. It's meant to be called only when
+ * maxsock contains the sum of listening FDs, before it is updated based on
+ * maxconn and pipes. If there are not enough FDs left, DEFAULT_MAXCONN (by
+ * default 100) is returned as it is expected that it will even run on tight
+ * environments, and will maintain compatibility with previous packages that
+ * used to rely on this value as the default one. The system will emit a
+ * warning indicating how many FDs are missing anyway if needed.
+ */
+static int compute_ideal_maxconn()
+{
+ int ssl_sides = !!global.ssl_used_frontend + !!global.ssl_used_backend;
+ int engine_fds = global.ssl_used_async_engines * ssl_sides;
+ int pipes = compute_ideal_maxpipes();
+ int remain = MAX(rlim_fd_cur_at_boot, rlim_fd_max_at_boot);
+ int maxconn;
+
+ /* we have to take into account these elements :
+ * - number of engine_fds, which inflates the number of FD needed per
+ * connection by this number.
+ * - number of pipes per connection on average : for the unlimited
+ * case, this is 0.5 pipe FDs per connection, otherwise it's a
+ * fixed value of 2*pipes.
+ * - two FDs per connection
+ */
+
+ if (global.fd_hard_limit && remain > global.fd_hard_limit)
+ remain = global.fd_hard_limit;
+
+ /* subtract listeners and checks */
+ remain -= global.maxsock;
+
+ /* one epoll_fd/kqueue_fd per thread */
+ remain -= global.nbthread;
+
+ /* one wake-up pipe (2 fd) per thread */
+ remain -= 2 * global.nbthread;
+
+ /* Fixed pipes values : we only subtract them if they're not larger
+ * than the remaining FDs because pipes are optional.
+ */
+ if (pipes >= 0 && pipes * 2 < remain)
+ remain -= pipes * 2;
+
+ if (pipes < 0) {
+ /* maxsock = maxconn * 2 + maxconn/4 * 2 + maxconn * engine_fds.
+ * = maxconn * (2 + 0.5 + engine_fds)
+ * = maxconn * (4 + 1 + 2*engine_fds) / 2
+ */
+ maxconn = 2 * remain / (5 + 2 * engine_fds);
+ } else {
+ /* maxsock = maxconn * 2 + maxconn * engine_fds.
+ * = maxconn * (2 + engine_fds)
+ */
+ maxconn = remain / (2 + engine_fds);
+ }
+
+ return MAX(maxconn, DEFAULT_MAXCONN);
+}
+
+/* computes the estimated maxsock value for the given maxconn based on the
+ * possibly set global.maxpipes and existing partial global.maxsock. It may
+ * temporarily change global.maxconn for the time needed to propagate the
+ * computations, and will reset it.
+ */
+static int compute_ideal_maxsock(int maxconn)
+{
+ int maxpipes = global.maxpipes;
+ int maxsock = global.maxsock;
+
+
+ if (!maxpipes) {
+ int old_maxconn = global.maxconn;
+
+ global.maxconn = maxconn;
+ maxpipes = compute_ideal_maxpipes();
+ global.maxconn = old_maxconn;
+ }
+
+ maxsock += maxconn * 2; /* each connection needs two sockets */
+ maxsock += maxpipes * 2; /* each pipe needs two FDs */
+ maxsock += global.nbthread; /* one epoll_fd/kqueue_fd per thread */
+ maxsock += 2 * global.nbthread; /* one wake-up pipe (2 fd) per thread */
+
+ /* compute fd used by async engines */
+ if (global.ssl_used_async_engines) {
+ int sides = !!global.ssl_used_frontend + !!global.ssl_used_backend;
+
+ maxsock += maxconn * sides * global.ssl_used_async_engines;
+ }
+ return maxsock;
+}
+
+/* Tests if it is possible to set the current process's RLIMIT_NOFILE to
+ * <maxsock>, then sets it back to the previous value. Returns non-zero if the
+ * value is accepted, non-zero otherwise. This is used to determine if an
+ * automatic limit may be applied or not. When it is not, the caller knows that
+ * the highest we can do is the rlim_max at boot. In case of error, we return
+ * that the setting is possible, so that we defer the error processing to the
+ * final stage in charge of enforcing this.
+ */
+static int check_if_maxsock_permitted(int maxsock)
+{
+ struct rlimit orig_limit, test_limit;
+ int ret;
+
+ if (global.fd_hard_limit && maxsock > global.fd_hard_limit)
+ return 0;
+
+ if (getrlimit(RLIMIT_NOFILE, &orig_limit) != 0)
+ return 1;
+
+ /* don't go further if we can't even set to what we have */
+ if (raise_rlim_nofile(NULL, &orig_limit) != 0)
+ return 1;
+
+ test_limit.rlim_max = MAX(maxsock, orig_limit.rlim_max);
+ test_limit.rlim_cur = test_limit.rlim_max;
+ ret = raise_rlim_nofile(NULL, &test_limit);
+
+ if (raise_rlim_nofile(NULL, &orig_limit) != 0)
+ return 1;
+
+ return ret == 0;
+}
+
+/* This performs th every basic early initialization at the end of the PREPARE
+ * init stage. It may only assume that list heads are initialized, but not that
+ * anything else is correct. It will initialize a number of variables that
+ * depend on command line and will pre-parse the command line. If it fails, it
+ * directly exits.
+ */
+static void init_early(int argc, char **argv)
+{
+ char *progname;
+ char *tmp;
+ int len;
+
+ setenv("HAPROXY_STARTUP_VERSION", HAPROXY_VERSION, 0);
+
+ /* First, let's initialize most global variables */
+ totalconn = actconn = listeners = stopping = 0;
+ killed = pid = 0;
+
+ global.maxsock = 10; /* reserve 10 fds ; will be incremented by socket eaters */
+ global.rlimit_memmax_all = HAPROXY_MEMMAX;
+ global.mode = MODE_STARTING;
+
+ /* if we were in mworker mode, we should restart in mworker mode */
+ if (getenv("HAPROXY_MWORKER_REEXEC") != NULL)
+ global.mode |= MODE_MWORKER;
+
+ /* initialize date, time, and pid */
+ tzset();
+ clock_init_process_date();
+ start_date = date;
+ start_time_ns = now_ns;
+ pid = getpid();
+
+ /* Set local host name and adjust some environment variables.
+ * NB: POSIX does not make it mandatory for gethostname() to
+ * NULL-terminate the string in case of truncation, and at least
+ * FreeBSD appears not to do it.
+ */
+ memset(hostname, 0, sizeof(hostname));
+ gethostname(hostname, sizeof(hostname) - 1);
+
+ /* preset some environment variables */
+ localpeer = strdup(hostname);
+ if (!localpeer || setenv("HAPROXY_LOCALPEER", localpeer, 1) < 0) {
+ ha_alert("Cannot allocate memory for local peer.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ /* extract the program name from argv[0], it will be used for the logs
+ * and error messages.
+ */
+ progname = *argv;
+ while ((tmp = strchr(progname, '/')) != NULL)
+ progname = tmp + 1;
+
+ len = strlen(progname);
+ progname = strdup(progname);
+ if (!progname) {
+ ha_alert("Cannot allocate memory for log_tag.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ chunk_initlen(&global.log_tag, progname, len, len);
+}
+
+/* handles program arguments. Very minimal parsing is performed, variables are
+ * fed with some values, and lists are completed with other ones. In case of
+ * error, it will exit.
+ */
+static void init_args(int argc, char **argv)
+{
+ char *progname = global.log_tag.area;
+ char *err_msg = NULL;
+
+ /* pre-fill in the global tuning options before we let the cmdline
+ * change them.
+ */
+ global.tune.options |= GTUNE_USE_SELECT; /* select() is always available */
+#if defined(USE_POLL)
+ global.tune.options |= GTUNE_USE_POLL;
+#endif
+#if defined(USE_EPOLL)
+ global.tune.options |= GTUNE_USE_EPOLL;
+#endif
+#if defined(USE_KQUEUE)
+ global.tune.options |= GTUNE_USE_KQUEUE;
+#endif
+#if defined(USE_EVPORTS)
+ global.tune.options |= GTUNE_USE_EVPORTS;
+#endif
+#if defined(USE_LINUX_SPLICE)
+ global.tune.options |= GTUNE_USE_SPLICE;
+#endif
+#if defined(USE_GETADDRINFO)
+ global.tune.options |= GTUNE_USE_GAI;
+#endif
+#ifdef USE_THREAD
+ global.tune.options |= GTUNE_IDLE_POOL_SHARED;
+#endif
+#ifdef USE_QUIC
+ global.tune.options |= GTUNE_QUIC_SOCK_PER_CONN;
+#endif
+ global.tune.options |= GTUNE_STRICT_LIMITS;
+
+ global.tune.options |= GTUNE_USE_FAST_FWD; /* Use fast-forward by default */
+
+ /* Use zero-copy forwarding by default */
+ global.tune.no_zero_copy_fwd = NO_ZERO_COPY_FWD_QUIC_SND;
+
+ /* keep a copy of original arguments for the master process */
+ old_argv = copy_argv(argc, argv);
+ if (!old_argv) {
+ ha_alert("failed to copy argv.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ /* skip program name and start */
+ argc--; argv++;
+ while (argc > 0) {
+ char *flag;
+
+ if (**argv == '-') {
+ flag = *argv+1;
+
+ /* 1 arg */
+ if (*flag == 'v') {
+ display_version();
+ if (flag[1] == 'v') /* -vv */
+ display_build_opts();
+ deinit_and_exit(0);
+ }
+#if defined(USE_EPOLL)
+ else if (*flag == 'd' && flag[1] == 'e')
+ global.tune.options &= ~GTUNE_USE_EPOLL;
+#endif
+#if defined(USE_POLL)
+ else if (*flag == 'd' && flag[1] == 'p')
+ global.tune.options &= ~GTUNE_USE_POLL;
+#endif
+#if defined(USE_KQUEUE)
+ else if (*flag == 'd' && flag[1] == 'k')
+ global.tune.options &= ~GTUNE_USE_KQUEUE;
+#endif
+#if defined(USE_EVPORTS)
+ else if (*flag == 'd' && flag[1] == 'v')
+ global.tune.options &= ~GTUNE_USE_EVPORTS;
+#endif
+#if defined(USE_LINUX_SPLICE)
+ else if (*flag == 'd' && flag[1] == 'S')
+ global.tune.options &= ~GTUNE_USE_SPLICE;
+#endif
+#if defined(USE_GETADDRINFO)
+ else if (*flag == 'd' && flag[1] == 'G')
+ global.tune.options &= ~GTUNE_USE_GAI;
+#endif
+#if defined(SO_REUSEPORT)
+ else if (*flag == 'd' && flag[1] == 'R')
+ protocol_clrf_all(PROTO_F_REUSEPORT_SUPPORTED);
+#endif
+ else if (*flag == 'd' && flag[1] == 'F')
+ global.tune.options &= ~GTUNE_USE_FAST_FWD;
+ else if (*flag == 'd' && flag[1] == 'V')
+ global.ssl_server_verify = SSL_SERVER_VERIFY_NONE;
+ else if (*flag == 'd' && flag[1] == 'Z')
+ global.tune.no_zero_copy_fwd |= NO_ZERO_COPY_FWD;
+ else if (*flag == 'V')
+ arg_mode |= MODE_VERBOSE;
+ else if (*flag == 'd' && flag[1] == 'C') {
+ char *end;
+ char *key;
+
+ key = flag + 2;
+ for (;key && *key; key = end) {
+ end = strchr(key, ',');
+ if (end)
+ *(end++) = 0;
+
+ if (strcmp(key, "line") == 0)
+ arg_mode |= MODE_DUMP_NB_L;
+
+ }
+ arg_mode |= MODE_DUMP_CFG;
+ HA_ATOMIC_STORE(&global.anon_key, atoll(flag + 2));
+ }
+ else if (*flag == 'd' && flag[1] == 'b')
+ arg_mode |= MODE_FOREGROUND;
+ else if (*flag == 'd' && flag[1] == 'D')
+ arg_mode |= MODE_DIAG;
+ else if (*flag == 'd' && flag[1] == 'W')
+ arg_mode |= MODE_ZERO_WARNING;
+ else if (*flag == 'd' && flag[1] == 'M') {
+ int ret = pool_parse_debugging(flag + 2, &err_msg);
+
+ if (ret <= -1) {
+ if (ret < -1)
+ ha_alert("-dM: %s\n", err_msg);
+ else
+ printf("%s\n", err_msg);
+ ha_free(&err_msg);
+ exit(ret < -1 ? EXIT_FAILURE : 0);
+ } else if (ret == 0) {
+ ha_warning("-dM: %s\n", err_msg);
+ ha_free(&err_msg);
+ }
+ }
+ else if (*flag == 'd' && flag[1] == 'r')
+ global.tune.options |= GTUNE_RESOLVE_DONTFAIL;
+#if defined(HA_HAVE_DUMP_LIBS)
+ else if (*flag == 'd' && flag[1] == 'L')
+ arg_mode |= MODE_DUMP_LIBS;
+#endif
+ else if (*flag == 'd' && flag[1] == 'K') {
+ arg_mode |= MODE_DUMP_KWD;
+ kwd_dump = flag + 2;
+ }
+ else if (*flag == 'd' && flag[1] == 't') {
+ if (argc > 1 && argv[1][0] != '-') {
+ if (trace_parse_cmd(argv[1], &err_msg)) {
+ ha_alert("-dt: %s.\n", err_msg);
+ ha_free(&err_msg);
+ exit(EXIT_FAILURE);
+ }
+ argc--; argv++;
+ }
+ else {
+ trace_parse_cmd(NULL, NULL);
+ }
+ }
+ else if (*flag == 'd')
+ arg_mode |= MODE_DEBUG;
+ else if (*flag == 'c' && flag[1] == 'c') {
+ arg_mode |= MODE_CHECK_CONDITION;
+ argv++;
+ argc--;
+ check_condition = *argv;
+ }
+ else if (*flag == 'c')
+ arg_mode |= MODE_CHECK;
+ else if (*flag == 'D')
+ arg_mode |= MODE_DAEMON;
+ else if (*flag == 'W' && flag[1] == 's') {
+ arg_mode |= MODE_MWORKER | MODE_FOREGROUND;
+#if defined(USE_SYSTEMD)
+ global.tune.options |= GTUNE_USE_SYSTEMD;
+#else
+ ha_alert("master-worker mode with systemd support (-Ws) requested, but not compiled. Use master-worker mode (-W) if you are not using Type=notify in your unit file or recompile with USE_SYSTEMD=1.\n\n");
+ usage(progname);
+#endif
+ }
+ else if (*flag == 'W')
+ arg_mode |= MODE_MWORKER;
+ else if (*flag == 'q')
+ arg_mode |= MODE_QUIET;
+ else if (*flag == 'x') {
+ if (argc <= 1) {
+ ha_alert("Unix socket path expected with the -x flag\n\n");
+ usage(progname);
+ }
+ if (old_unixsocket)
+ ha_warning("-x option already set, overwriting the value\n");
+ old_unixsocket = argv[1];
+
+ argv++;
+ argc--;
+ }
+ else if (*flag == 'S') {
+ struct wordlist *c;
+
+ if (argc <= 1) {
+ ha_alert("Socket and optional bind parameters expected with the -S flag\n");
+ usage(progname);
+ }
+ if ((c = malloc(sizeof(*c))) == NULL || (c->s = strdup(argv[1])) == NULL) {
+ ha_alert("Cannot allocate memory\n");
+ exit(EXIT_FAILURE);
+ }
+ LIST_INSERT(&mworker_cli_conf, &c->list);
+
+ argv++;
+ argc--;
+ }
+ else if (*flag == 's' && (flag[1] == 'f' || flag[1] == 't')) {
+ /* list of pids to finish ('f') or terminate ('t') */
+
+ if (flag[1] == 'f')
+ oldpids_sig = SIGUSR1; /* finish then exit */
+ else
+ oldpids_sig = SIGTERM; /* terminate immediately */
+ while (argc > 1 && argv[1][0] != '-') {
+ char * endptr = NULL;
+ oldpids = realloc(oldpids, (nb_oldpids + 1) * sizeof(int));
+ if (!oldpids) {
+ ha_alert("Cannot allocate old pid : out of memory.\n");
+ exit(1);
+ }
+ argc--; argv++;
+ errno = 0;
+ oldpids[nb_oldpids] = strtol(*argv, &endptr, 10);
+ if (errno) {
+ ha_alert("-%2s option: failed to parse {%s}: %s\n",
+ flag,
+ *argv, strerror(errno));
+ exit(1);
+ } else if (endptr && strlen(endptr)) {
+ while (isspace((unsigned char)*endptr)) endptr++;
+ if (*endptr != 0) {
+ ha_alert("-%2s option: some bytes unconsumed in PID list {%s}\n",
+ flag, endptr);
+ exit(1);
+ }
+ }
+ if (oldpids[nb_oldpids] <= 0)
+ usage(progname);
+ nb_oldpids++;
+ }
+ }
+ else if (flag[0] == '-' && flag[1] == 0) { /* "--" */
+ /* now that's a cfgfile list */
+ argv++; argc--;
+ while (argc > 0) {
+ if (!list_append_word(&cfg_cfgfiles, *argv, &err_msg)) {
+ ha_alert("Cannot load configuration file/directory %s : %s\n",
+ *argv,
+ err_msg);
+ exit(1);
+ }
+ argv++; argc--;
+ }
+ break;
+ }
+ else { /* >=2 args */
+ argv++; argc--;
+ if (argc == 0)
+ usage(progname);
+
+ switch (*flag) {
+ case 'C' : change_dir = *argv; break;
+ case 'n' : cfg_maxconn = atol(*argv); break;
+ case 'm' : global.rlimit_memmax_all = atol(*argv); break;
+ case 'N' : cfg_maxpconn = atol(*argv); break;
+ case 'L' :
+ free(localpeer);
+ if ((localpeer = strdup(*argv)) == NULL) {
+ ha_alert("Cannot allocate memory for local peer.\n");
+ exit(EXIT_FAILURE);
+ }
+ setenv("HAPROXY_LOCALPEER", localpeer, 1);
+ global.localpeer_cmdline = 1;
+ break;
+ case 'f' :
+ if (!list_append_word(&cfg_cfgfiles, *argv, &err_msg)) {
+ ha_alert("Cannot load configuration file/directory %s : %s\n",
+ *argv,
+ err_msg);
+ exit(1);
+ }
+ break;
+ case 'p' :
+ free(global.pidfile);
+ if ((global.pidfile = strdup(*argv)) == NULL) {
+ ha_alert("Cannot allocate memory for pidfile.\n");
+ exit(EXIT_FAILURE);
+ }
+ break;
+ default: usage(progname);
+ }
+ }
+ }
+ else
+ usage(progname);
+ argv++; argc--;
+ }
+ free(err_msg);
+}
+
+/* call the various keyword dump functions based on the comma-delimited list of
+ * classes in kwd_dump.
+ */
+static void dump_registered_keywords(void)
+{
+ char *end;
+ int all __maybe_unused = 0;
+
+ for (; kwd_dump && *kwd_dump; kwd_dump = end) {
+ end = strchr(kwd_dump, ',');
+ if (end)
+ *(end++) = 0;
+
+ if (strcmp(kwd_dump, "help") == 0) {
+ printf("# List of supported keyword classes:\n");
+ printf("all: list all keywords\n");
+ printf("acl: ACL keywords\n");
+ printf("cfg: configuration keywords\n");
+ printf("cli: CLI keywords\n");
+ printf("cnv: sample converter keywords\n");
+ printf("flt: filter names\n");
+ printf("smp: sample fetch functions\n");
+ printf("svc: service names\n");
+ continue;
+ }
+ else if (strcmp(kwd_dump, "all") == 0) {
+ all = 1;
+ }
+
+ if (all || strcmp(kwd_dump, "acl") == 0) {
+ printf("# List of registered ACL keywords:\n");
+ acl_dump_kwd();
+ }
+
+ if (all || strcmp(kwd_dump, "cfg") == 0) {
+ printf("# List of registered configuration keywords:\n");
+ cfg_dump_registered_keywords();
+ }
+
+ if (all || strcmp(kwd_dump, "cli") == 0) {
+ printf("# List of registered CLI keywords:\n");
+ cli_list_keywords();
+ }
+
+ if (all || strcmp(kwd_dump, "cnv") == 0) {
+ printf("# List of registered sample converter functions:\n");
+ smp_dump_conv_kw();
+ }
+
+ if (all || strcmp(kwd_dump, "flt") == 0) {
+ printf("# List of registered filter names:\n");
+ flt_dump_kws(NULL);
+ }
+
+ if (all || strcmp(kwd_dump, "smp") == 0) {
+ printf("# List of registered sample fetch functions:\n");
+ smp_dump_fetch_kw();
+ }
+
+ if (all || strcmp(kwd_dump, "svc") == 0) {
+ printf("# List of registered service names:\n");
+ list_services(NULL);
+ }
+ }
+}
+
+/* Generate a random cluster-secret in case the setting is not provided in the
+ * configuration. This allows to use features which rely on it albeit with some
+ * limitations.
+ */
+static void generate_random_cluster_secret()
+{
+ /* used as a default random cluster-secret if none defined. */
+ uint64_t rand;
+
+ /* The caller must not overwrite an already defined secret. */
+ BUG_ON(cluster_secret_isset);
+
+ rand = ha_random64();
+ memcpy(global.cluster_secret, &rand, sizeof(rand));
+ rand = ha_random64();
+ memcpy(global.cluster_secret + sizeof(rand), &rand, sizeof(rand));
+ cluster_secret_isset = 1;
+}
+
+/*
+ * This function initializes all the necessary variables. It only returns
+ * if everything is OK. If something fails, it exits.
+ */
+static void init(int argc, char **argv)
+{
+ char *progname = global.log_tag.area;
+ int err_code = 0;
+ struct wordlist *wl;
+ struct proxy *px;
+ struct post_check_fct *pcf;
+ struct pre_check_fct *prcf;
+ int ideal_maxconn;
+ const char *cc, *cflags, *opts;
+
+#ifdef USE_OPENSSL
+#ifdef USE_OPENSSL_WOLFSSL
+ wolfSSL_Init();
+ wolfSSL_Debugging_ON();
+#endif
+
+#ifdef USE_OPENSSL_AWSLC
+ const char *version_str = OpenSSL_version(OPENSSL_VERSION);
+ if (strncmp(version_str, "AWS-LC", 6) != 0) {
+ ha_alert("HAPRoxy built with AWS-LC but running with %s.\n", version_str);
+ exit(1);
+ }
+#endif
+
+#if (HA_OPENSSL_VERSION_NUMBER < 0x1010000fL)
+ /* Initialize the error strings of OpenSSL
+ * It only needs to be done explicitly with older versions of the SSL
+ * library. On newer versions, errors strings are loaded during start
+ * up. */
+ SSL_load_error_strings();
+#endif
+#endif
+
+ startup_logs_init();
+
+ if (init_acl() != 0)
+ exit(1);
+
+ /* Initialise lua. */
+ hlua_init();
+
+ global.mode |= (arg_mode & (MODE_DAEMON | MODE_MWORKER | MODE_FOREGROUND | MODE_VERBOSE
+ | MODE_QUIET | MODE_CHECK | MODE_DEBUG | MODE_ZERO_WARNING
+ | MODE_DIAG | MODE_CHECK_CONDITION | MODE_DUMP_LIBS | MODE_DUMP_KWD
+ | MODE_DUMP_CFG | MODE_DUMP_NB_L));
+
+ if (getenv("HAPROXY_MWORKER_WAIT_ONLY")) {
+ unsetenv("HAPROXY_MWORKER_WAIT_ONLY");
+ global.mode |= MODE_MWORKER_WAIT;
+ global.mode &= ~MODE_MWORKER;
+ }
+
+ /* set the atexit functions when not doing configuration check */
+ if (!(global.mode & (MODE_CHECK | MODE_CHECK_CONDITION))
+ && (getenv("HAPROXY_MWORKER_REEXEC") != NULL)) {
+
+ if (global.mode & MODE_MWORKER) {
+ atexit_flag = 1;
+ atexit(reexec_on_failure);
+ } else if (global.mode & MODE_MWORKER_WAIT) {
+ atexit_flag = 1;
+ atexit(exit_on_waitmode_failure);
+ }
+ }
+
+ if (change_dir && chdir(change_dir) < 0) {
+ ha_alert("Could not change to directory %s : %s\n", change_dir, strerror(errno));
+ exit(1);
+ }
+
+ usermsgs_clr("config");
+
+ if (global.mode & MODE_CHECK_CONDITION) {
+ int result;
+
+ uint32_t err;
+ const char *errptr;
+ char *errmsg = NULL;
+
+ char *args[MAX_LINE_ARGS+1];
+ int arg = sizeof(args) / sizeof(*args);
+ size_t outlen;
+ char *w;
+
+ if (!check_condition)
+ usage(progname);
+
+ outlen = strlen(check_condition) + 1;
+ err = parse_line(check_condition, check_condition, &outlen, args, &arg,
+ PARSE_OPT_ENV | PARSE_OPT_WORD_EXPAND | PARSE_OPT_DQUOTE | PARSE_OPT_SQUOTE | PARSE_OPT_BKSLASH,
+ &errptr);
+
+ if (err & PARSE_ERR_QUOTE) {
+ ha_alert("Syntax Error in condition: Unmatched quote.\n");
+ exit(2);
+ }
+
+ if (err & PARSE_ERR_HEX) {
+ ha_alert("Syntax Error in condition: Truncated or invalid hexadecimal sequence.\n");
+ exit(2);
+ }
+
+ if (err & (PARSE_ERR_TOOLARGE|PARSE_ERR_OVERLAP)) {
+ ha_alert("Error in condition: Line too long.\n");
+ exit(2);
+ }
+
+ if (err & PARSE_ERR_TOOMANY) {
+ ha_alert("Error in condition: Too many words.\n");
+ exit(2);
+ }
+
+ if (err) {
+ ha_alert("Unhandled error in condition, please report this to the developers.\n");
+ exit(2);
+ }
+
+ /* remerge all words into a single expression */
+ for (w = *args; (w += strlen(w)) < check_condition + outlen - 1; *w = ' ')
+ ;
+
+ result = cfg_eval_condition(args, &errmsg, &errptr);
+
+ if (result < 0) {
+ if (errmsg)
+ ha_alert("Failed to evaluate condition: %s\n", errmsg);
+
+ exit(2);
+ }
+
+ exit(result ? 0 : 1);
+ }
+
+ /* in wait mode, we don't try to read the configuration files */
+ if (!(global.mode & MODE_MWORKER_WAIT)) {
+ char *env_cfgfiles = NULL;
+ int env_err = 0;
+
+ /* handle cfgfiles that are actually directories */
+ cfgfiles_expand_directories();
+
+ if (LIST_ISEMPTY(&cfg_cfgfiles))
+ usage(progname);
+
+ /* temporary create environment variables with default
+ * values to ease user configuration. Do not forget to
+ * unset them after the list_for_each_entry loop.
+ */
+ setenv("HAPROXY_HTTP_LOG_FMT", default_http_log_format, 1);
+ setenv("HAPROXY_HTTPS_LOG_FMT", default_https_log_format, 1);
+ setenv("HAPROXY_TCP_LOG_FMT", default_tcp_log_format, 1);
+ setenv("HAPROXY_BRANCH", PRODUCT_BRANCH, 1);
+ list_for_each_entry(wl, &cfg_cfgfiles, list) {
+ int ret;
+
+ if (env_err == 0) {
+ if (!memprintf(&env_cfgfiles, "%s%s%s",
+ (env_cfgfiles ? env_cfgfiles : ""),
+ (env_cfgfiles ? ";" : ""), wl->s))
+ env_err = 1;
+ }
+
+ ret = readcfgfile(wl->s);
+ if (ret == -1) {
+ ha_alert("Could not open configuration file %s : %s\n",
+ wl->s, strerror(errno));
+ free(env_cfgfiles);
+ exit(1);
+ }
+ if (ret & (ERR_ABORT|ERR_FATAL))
+ ha_alert("Error(s) found in configuration file : %s\n", wl->s);
+ err_code |= ret;
+ if (err_code & ERR_ABORT) {
+ free(env_cfgfiles);
+ exit(1);
+ }
+ }
+ /* remove temporary environment variables. */
+ unsetenv("HAPROXY_BRANCH");
+ unsetenv("HAPROXY_HTTP_LOG_FMT");
+ unsetenv("HAPROXY_HTTPS_LOG_FMT");
+ unsetenv("HAPROXY_TCP_LOG_FMT");
+
+ /* do not try to resolve arguments nor to spot inconsistencies when
+ * the configuration contains fatal errors caused by files not found
+ * or failed memory allocations.
+ */
+ if (err_code & (ERR_ABORT|ERR_FATAL)) {
+ ha_alert("Fatal errors found in configuration.\n");
+ free(env_cfgfiles);
+ exit(1);
+ }
+ if (env_err) {
+ ha_alert("Could not allocate memory for HAPROXY_CFGFILES env variable\n");
+ exit(1);
+ }
+ setenv("HAPROXY_CFGFILES", env_cfgfiles, 1);
+ free(env_cfgfiles);
+
+ }
+ if (global.mode & MODE_MWORKER) {
+ struct mworker_proc *tmproc;
+
+ setenv("HAPROXY_MWORKER", "1", 1);
+
+ if (getenv("HAPROXY_MWORKER_REEXEC") == NULL) {
+
+ tmproc = mworker_proc_new();
+ if (!tmproc) {
+ ha_alert("Cannot allocate process structures.\n");
+ exit(EXIT_FAILURE);
+ }
+ tmproc->options |= PROC_O_TYPE_MASTER; /* master */
+ tmproc->pid = pid;
+ tmproc->timestamp = start_date.tv_sec;
+ proc_self = tmproc;
+
+ LIST_APPEND(&proc_list, &tmproc->list);
+ }
+
+ tmproc = mworker_proc_new();
+ if (!tmproc) {
+ ha_alert("Cannot allocate process structures.\n");
+ exit(EXIT_FAILURE);
+ }
+ tmproc->options |= PROC_O_TYPE_WORKER; /* worker */
+
+ if (mworker_cli_sockpair_new(tmproc, 0) < 0) {
+ exit(EXIT_FAILURE);
+ }
+
+ LIST_APPEND(&proc_list, &tmproc->list);
+ }
+
+ if (global.mode & MODE_MWORKER_WAIT) {
+ /* in exec mode, there's always exactly one thread. Failure to
+ * set these ones now will result in nbthread being detected
+ * automatically.
+ */
+ global.nbtgroups = 1;
+ global.nbthread = 1;
+ }
+
+ if (global.mode & (MODE_MWORKER|MODE_MWORKER_WAIT)) {
+ struct wordlist *it, *c;
+
+ master = 1;
+ /* get the info of the children in the env */
+ if (mworker_env_to_proc_list() < 0) {
+ exit(EXIT_FAILURE);
+ }
+
+ if (!LIST_ISEMPTY(&mworker_cli_conf)) {
+ char *path = NULL;
+
+ if (mworker_cli_proxy_create() < 0) {
+ ha_alert("Can't create the master's CLI.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ list_for_each_entry_safe(c, it, &mworker_cli_conf, list) {
+
+ if (mworker_cli_proxy_new_listener(c->s) == NULL) {
+ ha_alert("Can't create the master's CLI.\n");
+ exit(EXIT_FAILURE);
+ }
+ LIST_DELETE(&c->list);
+ free(c->s);
+ free(c);
+ }
+ /* Creates the mcli_reload listener, which is the listener used
+ * to retrieve the master CLI session which asked for the reload.
+ *
+ * ipc_fd[1] will be used as a listener, and ipc_fd[0]
+ * will be used to send the FD of the session.
+ *
+ * Both FDs will be kept in the master. The sockets are
+ * created only if they weren't inherited.
+ */
+ if ((proc_self->ipc_fd[1] == -1) &&
+ socketpair(AF_UNIX, SOCK_STREAM, 0, proc_self->ipc_fd) < 0) {
+ ha_alert("cannot create the mcli_reload socketpair.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ /* Create the mcli_reload listener from the proc_self struct */
+ memprintf(&path, "sockpair@%d", proc_self->ipc_fd[1]);
+ mcli_reload_bind_conf = mworker_cli_proxy_new_listener(path);
+ if (mcli_reload_bind_conf == NULL) {
+ ha_alert("Cannot create the mcli_reload listener.\n");
+ exit(EXIT_FAILURE);
+ }
+ ha_free(&path);
+ }
+ }
+
+ if (!LIST_ISEMPTY(&mworker_cli_conf) && !(arg_mode & MODE_MWORKER)) {
+ ha_alert("a master CLI socket was defined, but master-worker mode (-W) is not enabled.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ /* destroy unreferenced defaults proxies */
+ proxy_destroy_all_unref_defaults();
+
+ list_for_each_entry(prcf, &pre_check_list, list)
+ err_code |= prcf->fct();
+
+ if (err_code & (ERR_ABORT|ERR_FATAL)) {
+ ha_alert("Fatal errors found in configuration.\n");
+ exit(1);
+ }
+
+ /* update the ready date that will be used to count the startup time
+ * during config checks (e.g. to schedule certain tasks if needed)
+ */
+ clock_update_date(0, 1);
+ clock_adjust_now_offset();
+ ready_date = date;
+
+
+ /* Note: global.nbthread will be initialized as part of this call */
+ err_code |= check_config_validity();
+
+ /* update the ready date to also account for the check time */
+ clock_update_date(0, 1);
+ clock_adjust_now_offset();
+ ready_date = date;
+
+ for (px = proxies_list; px; px = px->next) {
+ struct server *srv;
+ struct post_proxy_check_fct *ppcf;
+ struct post_server_check_fct *pscf;
+
+ if (px->flags & (PR_FL_DISABLED|PR_FL_STOPPED))
+ continue;
+
+ list_for_each_entry(pscf, &post_server_check_list, list) {
+ for (srv = px->srv; srv; srv = srv->next)
+ err_code |= pscf->fct(srv);
+ }
+ list_for_each_entry(ppcf, &post_proxy_check_list, list)
+ err_code |= ppcf->fct(px);
+ }
+ if (err_code & (ERR_ABORT|ERR_FATAL)) {
+ ha_alert("Fatal errors found in configuration.\n");
+ exit(1);
+ }
+
+ err_code |= pattern_finalize_config();
+ if (err_code & (ERR_ABORT|ERR_FATAL)) {
+ ha_alert("Failed to finalize pattern config.\n");
+ exit(1);
+ }
+
+ if (global.rlimit_memmax_all)
+ global.rlimit_memmax = global.rlimit_memmax_all;
+
+#ifdef USE_NS
+ err_code |= netns_init();
+ if (err_code & (ERR_ABORT|ERR_FATAL)) {
+ ha_alert("Failed to initialize namespace support.\n");
+ exit(1);
+ }
+#endif
+
+ thread_detect_binding_discrepancies();
+ thread_detect_more_than_cpus();
+
+ /* Apply server states */
+ apply_server_state();
+
+ for (px = proxies_list; px; px = px->next)
+ srv_compute_all_admin_states(px);
+
+ /* Apply servers' configured address */
+ err_code |= srv_init_addr();
+ if (err_code & (ERR_ABORT|ERR_FATAL)) {
+ ha_alert("Failed to initialize server(s) addr.\n");
+ exit(1);
+ }
+
+ if (warned & WARN_ANY && global.mode & MODE_ZERO_WARNING) {
+ ha_alert("Some warnings were found and 'zero-warning' is set. Aborting.\n");
+ exit(1);
+ }
+
+#if defined(HA_HAVE_DUMP_LIBS)
+ if (global.mode & MODE_DUMP_LIBS) {
+ qfprintf(stdout, "List of loaded object files:\n");
+ chunk_reset(&trash);
+ if (dump_libs(&trash, ((arg_mode & (MODE_QUIET|MODE_VERBOSE)) == MODE_VERBOSE)))
+ printf("%s", trash.area);
+ }
+#endif
+
+ if (global.mode & MODE_DUMP_KWD)
+ dump_registered_keywords();
+
+ if (global.mode & MODE_DIAG) {
+ cfg_run_diagnostics();
+ }
+
+ if (global.mode & MODE_CHECK) {
+ struct peers *pr;
+ struct proxy *px;
+
+ if (warned & WARN_ANY)
+ qfprintf(stdout, "Warnings were found.\n");
+
+ for (pr = cfg_peers; pr; pr = pr->next)
+ if (pr->peers_fe)
+ break;
+
+ for (px = proxies_list; px; px = px->next)
+ if (!(px->flags & (PR_FL_DISABLED|PR_FL_STOPPED)) && px->li_all)
+ break;
+
+ if (!px) {
+ /* We may only have log-forward section */
+ for (px = cfg_log_forward; px; px = px->next)
+ if (!(px->flags & (PR_FL_DISABLED|PR_FL_STOPPED)) && px->li_all)
+ break;
+ }
+
+ if (pr || px) {
+ /* At least one peer or one listener has been found */
+ if (global.mode & MODE_VERBOSE)
+ qfprintf(stdout, "Configuration file is valid\n");
+ deinit_and_exit(0);
+ }
+ qfprintf(stdout, "Configuration file has no error but will not start (no listener) => exit(2).\n");
+ exit(2);
+ }
+
+ if (global.mode & MODE_DUMP_CFG)
+ deinit_and_exit(0);
+
+#ifdef USE_OPENSSL
+
+ /* Initialize SSL random generator. Must be called before chroot for
+ * access to /dev/urandom, and before ha_random_boot() which may use
+ * RAND_bytes().
+ */
+ if (!ssl_initialize_random()) {
+ ha_alert("OpenSSL random data generator initialization failed.\n");
+ exit(EXIT_FAILURE);
+ }
+#endif
+ ha_random_boot(argv); // the argv pointer brings some kernel-fed entropy
+
+ /* now we know the buffer size, we can initialize the channels and buffers */
+ init_buffer();
+
+ list_for_each_entry(pcf, &post_check_list, list) {
+ err_code |= pcf->fct();
+ if (err_code & (ERR_ABORT|ERR_FATAL))
+ exit(1);
+ }
+
+ /* set the default maxconn in the master, but let it be rewritable with -n */
+ if (global.mode & MODE_MWORKER_WAIT)
+ global.maxconn = MASTER_MAXCONN;
+
+ if (cfg_maxconn > 0)
+ global.maxconn = cfg_maxconn;
+
+ if (global.cli_fe)
+ global.maxsock += global.cli_fe->maxconn;
+
+ if (cfg_peers) {
+ /* peers also need to bypass global maxconn */
+ struct peers *p = cfg_peers;
+
+ for (p = cfg_peers; p; p = p->next)
+ if (p->peers_fe)
+ global.maxsock += p->peers_fe->maxconn;
+ }
+
+ /* Now we want to compute the maxconn and possibly maxsslconn values.
+ * It's a bit tricky. Maxconn defaults to the pre-computed value based
+ * on rlim_fd_cur and the number of FDs in use due to the configuration,
+ * and maxsslconn defaults to DEFAULT_MAXSSLCONN. On top of that we can
+ * enforce a lower limit based on memmax.
+ *
+ * If memmax is set, then it depends on which values are set. If
+ * maxsslconn is set, we use memmax to determine how many cleartext
+ * connections may be added, and set maxconn to the sum of the two.
+ * If maxconn is set and not maxsslconn, maxsslconn is computed from
+ * the remaining amount of memory between memmax and the cleartext
+ * connections. If neither are set, then it is considered that all
+ * connections are SSL-capable, and maxconn is computed based on this,
+ * then maxsslconn accordingly. We need to know if SSL is used on the
+ * frontends, backends, or both, because when it's used on both sides,
+ * we need twice the value for maxsslconn, but we only count the
+ * handshake once since it is not performed on the two sides at the
+ * same time (frontend-side is terminated before backend-side begins).
+ * The SSL stack is supposed to have filled ssl_session_cost and
+ * ssl_handshake_cost during its initialization. In any case, if
+ * SYSTEM_MAXCONN is set, we still enforce it as an upper limit for
+ * maxconn in order to protect the system.
+ */
+ ideal_maxconn = compute_ideal_maxconn();
+
+ if (!global.rlimit_memmax) {
+ if (global.maxconn == 0) {
+ global.maxconn = ideal_maxconn;
+ if (global.mode & (MODE_VERBOSE|MODE_DEBUG))
+ fprintf(stderr, "Note: setting global.maxconn to %d.\n", global.maxconn);
+ }
+ }
+#ifdef USE_OPENSSL
+ else if (!global.maxconn && !global.maxsslconn &&
+ (global.ssl_used_frontend || global.ssl_used_backend)) {
+ /* memmax is set, compute everything automatically. Here we want
+ * to ensure that all SSL connections will be served. We take
+ * care of the number of sides where SSL is used, and consider
+ * the worst case : SSL used on both sides and doing a handshake
+ * simultaneously. Note that we can't have more than maxconn
+ * handshakes at a time by definition, so for the worst case of
+ * two SSL conns per connection, we count a single handshake.
+ */
+ int sides = !!global.ssl_used_frontend + !!global.ssl_used_backend;
+ int64_t mem = global.rlimit_memmax * 1048576ULL;
+ int retried = 0;
+
+ mem -= global.tune.sslcachesize * 200ULL; // about 200 bytes per SSL cache entry
+ mem -= global.maxzlibmem;
+ mem = mem * MEM_USABLE_RATIO;
+
+ /* Principle: we test once to set maxconn according to the free
+ * memory. If it results in values the system rejects, we try a
+ * second time by respecting rlim_fd_max. If it fails again, we
+ * go back to the initial value and will let the final code
+ * dealing with rlimit report the error. That's up to 3 attempts.
+ */
+ do {
+ global.maxconn = mem /
+ ((STREAM_MAX_COST + 2 * global.tune.bufsize) + // stream + 2 buffers per stream
+ sides * global.ssl_session_max_cost + // SSL buffers, one per side
+ global.ssl_handshake_max_cost); // 1 handshake per connection max
+
+ if (retried == 1)
+ global.maxconn = MIN(global.maxconn, ideal_maxconn);
+ global.maxconn = round_2dig(global.maxconn);
+#ifdef SYSTEM_MAXCONN
+ if (global.maxconn > SYSTEM_MAXCONN)
+ global.maxconn = SYSTEM_MAXCONN;
+#endif /* SYSTEM_MAXCONN */
+ global.maxsslconn = sides * global.maxconn;
+
+ if (check_if_maxsock_permitted(compute_ideal_maxsock(global.maxconn)))
+ break;
+ } while (retried++ < 2);
+
+ if (global.mode & (MODE_VERBOSE|MODE_DEBUG))
+ fprintf(stderr, "Note: setting global.maxconn to %d and global.maxsslconn to %d.\n",
+ global.maxconn, global.maxsslconn);
+ }
+ else if (!global.maxsslconn &&
+ (global.ssl_used_frontend || global.ssl_used_backend)) {
+ /* memmax and maxconn are known, compute maxsslconn automatically.
+ * maxsslconn being forced, we don't know how many of it will be
+ * on each side if both sides are being used. The worst case is
+ * when all connections use only one SSL instance because
+ * handshakes may be on two sides at the same time.
+ */
+ int sides = !!global.ssl_used_frontend + !!global.ssl_used_backend;
+ int64_t mem = global.rlimit_memmax * 1048576ULL;
+ int64_t sslmem;
+
+ mem -= global.tune.sslcachesize * 200ULL; // about 200 bytes per SSL cache entry
+ mem -= global.maxzlibmem;
+ mem = mem * MEM_USABLE_RATIO;
+
+ sslmem = mem - global.maxconn * (int64_t)(STREAM_MAX_COST + 2 * global.tune.bufsize);
+ global.maxsslconn = sslmem / (global.ssl_session_max_cost + global.ssl_handshake_max_cost);
+ global.maxsslconn = round_2dig(global.maxsslconn);
+
+ if (sslmem <= 0 || global.maxsslconn < sides) {
+ ha_alert("Cannot compute the automatic maxsslconn because global.maxconn is already too "
+ "high for the global.memmax value (%d MB). The absolute maximum possible value "
+ "without SSL is %d, but %d was found and SSL is in use.\n",
+ global.rlimit_memmax,
+ (int)(mem / (STREAM_MAX_COST + 2 * global.tune.bufsize)),
+ global.maxconn);
+ exit(1);
+ }
+
+ if (global.maxsslconn > sides * global.maxconn)
+ global.maxsslconn = sides * global.maxconn;
+
+ if (global.mode & (MODE_VERBOSE|MODE_DEBUG))
+ fprintf(stderr, "Note: setting global.maxsslconn to %d\n", global.maxsslconn);
+ }
+#endif
+ else if (!global.maxconn) {
+ /* memmax and maxsslconn are known/unused, compute maxconn automatically */
+ int sides = !!global.ssl_used_frontend + !!global.ssl_used_backend;
+ int64_t mem = global.rlimit_memmax * 1048576ULL;
+ int64_t clearmem;
+ int retried = 0;
+
+ if (global.ssl_used_frontend || global.ssl_used_backend)
+ mem -= global.tune.sslcachesize * 200ULL; // about 200 bytes per SSL cache entry
+
+ mem -= global.maxzlibmem;
+ mem = mem * MEM_USABLE_RATIO;
+
+ clearmem = mem;
+ if (sides)
+ clearmem -= (global.ssl_session_max_cost + global.ssl_handshake_max_cost) * (int64_t)global.maxsslconn;
+
+ /* Principle: we test once to set maxconn according to the free
+ * memory. If it results in values the system rejects, we try a
+ * second time by respecting rlim_fd_max. If it fails again, we
+ * go back to the initial value and will let the final code
+ * dealing with rlimit report the error. That's up to 3 attempts.
+ */
+ do {
+ global.maxconn = clearmem / (STREAM_MAX_COST + 2 * global.tune.bufsize);
+ if (retried == 1)
+ global.maxconn = MIN(global.maxconn, ideal_maxconn);
+ global.maxconn = round_2dig(global.maxconn);
+#ifdef SYSTEM_MAXCONN
+ if (global.maxconn > SYSTEM_MAXCONN)
+ global.maxconn = SYSTEM_MAXCONN;
+#endif /* SYSTEM_MAXCONN */
+
+ if (clearmem <= 0 || !global.maxconn) {
+ ha_alert("Cannot compute the automatic maxconn because global.maxsslconn is already too "
+ "high for the global.memmax value (%d MB). The absolute maximum possible value "
+ "is %d, but %d was found.\n",
+ global.rlimit_memmax,
+ (int)(mem / (global.ssl_session_max_cost + global.ssl_handshake_max_cost)),
+ global.maxsslconn);
+ exit(1);
+ }
+
+ if (check_if_maxsock_permitted(compute_ideal_maxsock(global.maxconn)))
+ break;
+ } while (retried++ < 2);
+
+ if (global.mode & (MODE_VERBOSE|MODE_DEBUG)) {
+ if (sides && global.maxsslconn > sides * global.maxconn) {
+ fprintf(stderr, "Note: global.maxsslconn is forced to %d which causes global.maxconn "
+ "to be limited to %d. Better reduce global.maxsslconn to get more "
+ "room for extra connections.\n", global.maxsslconn, global.maxconn);
+ }
+ fprintf(stderr, "Note: setting global.maxconn to %d\n", global.maxconn);
+ }
+ }
+
+ global.maxsock = compute_ideal_maxsock(global.maxconn);
+ global.hardmaxconn = global.maxconn;
+ if (!global.maxpipes)
+ global.maxpipes = compute_ideal_maxpipes();
+
+ /* update connection pool thresholds */
+ global.tune.pool_low_count = ((long long)global.maxsock * global.tune.pool_low_ratio + 99) / 100;
+ global.tune.pool_high_count = ((long long)global.maxsock * global.tune.pool_high_ratio + 99) / 100;
+
+ proxy_adjust_all_maxconn();
+
+ if (global.tune.maxpollevents <= 0)
+ global.tune.maxpollevents = MAX_POLL_EVENTS;
+
+ if (global.tune.runqueue_depth <= 0) {
+ /* tests on various thread counts from 1 to 64 have shown an
+ * optimal queue depth following roughly 1/sqrt(threads).
+ */
+ int s = my_flsl(global.nbthread);
+ s += (global.nbthread / s); // roughly twice the sqrt.
+ global.tune.runqueue_depth = RUNQUEUE_DEPTH * 2 / s;
+ }
+
+ if (global.tune.recv_enough == 0)
+ global.tune.recv_enough = MIN_RECV_AT_ONCE_ENOUGH;
+
+ if (global.tune.maxrewrite >= global.tune.bufsize / 2)
+ global.tune.maxrewrite = global.tune.bufsize / 2;
+
+ usermsgs_clr(NULL);
+
+ if (arg_mode & (MODE_DEBUG | MODE_FOREGROUND)) {
+ /* command line debug mode inhibits configuration mode */
+ global.mode &= ~(MODE_DAEMON | MODE_QUIET);
+ global.mode |= (arg_mode & (MODE_DEBUG | MODE_FOREGROUND));
+ }
+
+ if (arg_mode & MODE_DAEMON) {
+ /* command line daemon mode inhibits foreground and debug modes mode */
+ global.mode &= ~(MODE_DEBUG | MODE_FOREGROUND);
+ global.mode |= arg_mode & MODE_DAEMON;
+ }
+
+ global.mode |= (arg_mode & (MODE_QUIET | MODE_VERBOSE));
+
+ if ((global.mode & MODE_DEBUG) && (global.mode & (MODE_DAEMON | MODE_QUIET))) {
+ ha_warning("<debug> mode incompatible with <quiet> and <daemon>. Keeping <debug> only.\n");
+ global.mode &= ~(MODE_DAEMON | MODE_QUIET);
+ }
+
+ /* Realloc trash buffers because global.tune.bufsize may have changed */
+ if (!init_trash_buffers(0)) {
+ ha_alert("failed to initialize trash buffers.\n");
+ exit(1);
+ }
+
+ if (!init_log_buffers()) {
+ ha_alert("failed to initialize log buffers.\n");
+ exit(1);
+ }
+
+ if (!cluster_secret_isset)
+ generate_random_cluster_secret();
+
+ /*
+ * Note: we could register external pollers here.
+ * Built-in pollers have been registered before main().
+ */
+
+ if (!(global.tune.options & GTUNE_USE_KQUEUE))
+ disable_poller("kqueue");
+
+ if (!(global.tune.options & GTUNE_USE_EVPORTS))
+ disable_poller("evports");
+
+ if (!(global.tune.options & GTUNE_USE_EPOLL))
+ disable_poller("epoll");
+
+ if (!(global.tune.options & GTUNE_USE_POLL))
+ disable_poller("poll");
+
+ if (!(global.tune.options & GTUNE_USE_SELECT))
+ disable_poller("select");
+
+ /* Note: we could disable any poller by name here */
+
+ if (global.mode & (MODE_VERBOSE|MODE_DEBUG)) {
+ list_pollers(stderr);
+ fprintf(stderr, "\n");
+ list_filters(stderr);
+ }
+
+ if (!init_pollers()) {
+ ha_alert("No polling mechanism available.\n"
+ " This may happen when using thread-groups with old pollers (poll/select), or\n"
+ " it is possible that haproxy was built with TARGET=generic and that FD_SETSIZE\n"
+ " is too low on this platform to support maxconn and the number of listeners\n"
+ " and servers. You should rebuild haproxy specifying your system using TARGET=\n"
+ " in order to support other polling systems (poll, epoll, kqueue) or reduce the\n"
+ " global maxconn setting to accommodate the system's limitation. For reference,\n"
+ " FD_SETSIZE=%d on this system, global.maxconn=%d resulting in a maximum of\n"
+ " %d file descriptors. You should thus reduce global.maxconn by %d. Also,\n"
+ " check build settings using 'haproxy -vv'.\n\n",
+ FD_SETSIZE, global.maxconn, global.maxsock, (global.maxsock + 1 - FD_SETSIZE) / 2);
+ exit(1);
+ }
+ if (global.mode & (MODE_VERBOSE|MODE_DEBUG)) {
+ printf("Using %s() as the polling mechanism.\n", cur_poller.name);
+ }
+
+ if (!global.node)
+ global.node = strdup(hostname);
+
+ /* stop disabled proxies */
+ for (px = proxies_list; px; px = px->next) {
+ if (px->flags & (PR_FL_DISABLED|PR_FL_STOPPED))
+ stop_proxy(px);
+ }
+
+ if (!hlua_post_init())
+ exit(1);
+
+ /* Set the per-thread pool cache size to the default value if not set.
+ * This is the right place to decide to automatically adjust it (e.g.
+ * check L2 cache size, thread counts or take into account certain
+ * expensive pools).
+ */
+ if (!global.tune.pool_cache_size)
+ global.tune.pool_cache_size = CONFIG_HAP_POOL_CACHE_SIZE;
+
+ /* fill in a few info about our version and build options */
+ chunk_reset(&trash);
+
+ /* toolchain */
+ cc = chunk_newstr(&trash);
+#if defined(__clang_version__)
+ chunk_appendf(&trash, "clang-" __clang_version__);
+#elif defined(__VERSION__)
+ chunk_appendf(&trash, "gcc-" __VERSION__);
+#endif
+#if __has_feature(address_sanitizer) || defined(__SANITIZE_ADDRESS__)
+ chunk_appendf(&trash, "+asan");
+#endif
+ /* toolchain opts */
+ cflags = chunk_newstr(&trash);
+#ifdef BUILD_CC
+ chunk_appendf(&trash, "%s", BUILD_CC);
+#endif
+#ifdef BUILD_CFLAGS
+ chunk_appendf(&trash, " %s", BUILD_CFLAGS);
+#endif
+#ifdef BUILD_DEBUG
+ chunk_appendf(&trash, " %s", BUILD_DEBUG);
+#endif
+ /* settings */
+ opts = chunk_newstr(&trash);
+#ifdef BUILD_TARGET
+ chunk_appendf(&trash, "TARGET='%s'", BUILD_TARGET);
+#endif
+#ifdef BUILD_CPU
+ chunk_appendf(&trash, " CPU='%s'", BUILD_CPU);
+#endif
+#ifdef BUILD_OPTIONS
+ chunk_appendf(&trash, " %s", BUILD_OPTIONS);
+#endif
+
+ post_mortem_add_component("haproxy", haproxy_version, cc, cflags, opts, argv[0]);
+}
+
+void deinit(void)
+{
+ struct proxy *p = proxies_list, *p0;
+ struct wordlist *wl, *wlb;
+ struct uri_auth *uap, *ua = NULL;
+ struct logger *log, *logb;
+ struct build_opts_str *bol, *bolb;
+ struct post_deinit_fct *pdf, *pdfb;
+ struct proxy_deinit_fct *pxdf, *pxdfb;
+ struct server_deinit_fct *srvdf, *srvdfb;
+ struct per_thread_init_fct *tif, *tifb;
+ struct per_thread_deinit_fct *tdf, *tdfb;
+ struct per_thread_alloc_fct *taf, *tafb;
+ struct per_thread_free_fct *tff, *tffb;
+ struct post_server_check_fct *pscf, *pscfb;
+ struct post_check_fct *pcf, *pcfb;
+ struct post_proxy_check_fct *ppcf, *ppcfb;
+ struct pre_check_fct *prcf, *prcfb;
+ struct cfg_postparser *pprs, *pprsb;
+ int cur_fd;
+
+ /* the user may want to skip this phase */
+ if (global.tune.options & GTUNE_QUICK_EXIT)
+ return;
+
+ /* At this point the listeners state is weird:
+ * - most listeners are still bound and referenced in their protocol
+ * - some might be zombies that are not in their proto anymore, but
+ * still appear in their proxy's listeners with a valid FD.
+ * - some might be stopped and still appear in their proxy as FD #-1
+ * - among all of them, some might be inherited hence shared and we're
+ * not allowed to pause them or whatever, we must just close them.
+ * - finally some are not listeners (pipes, logs, stdout, etc) and
+ * must be left intact.
+ *
+ * The safe way to proceed is to unbind (and close) whatever is not yet
+ * unbound so that no more receiver/listener remains alive. Then close
+ * remaining listener FDs, which correspond to zombie listeners (those
+ * belonging to disabled proxies that were in another process).
+ * objt_listener() would be cleaner here but not converted yet.
+ */
+ protocol_unbind_all();
+
+ for (cur_fd = 0; cur_fd < global.maxsock; cur_fd++) {
+ if (!fdtab || !fdtab[cur_fd].owner)
+ continue;
+
+ if (fdtab[cur_fd].iocb == &sock_accept_iocb) {
+ struct listener *l = fdtab[cur_fd].owner;
+
+ BUG_ON(l->state != LI_INIT);
+ unbind_listener(l);
+ }
+ }
+
+ deinit_signals();
+ while (p) {
+ /* build a list of unique uri_auths */
+ if (!ua)
+ ua = p->uri_auth;
+ else {
+ /* check if p->uri_auth is unique */
+ for (uap = ua; uap; uap=uap->next)
+ if (uap == p->uri_auth)
+ break;
+
+ if (!uap && p->uri_auth) {
+ /* add it, if it is */
+ p->uri_auth->next = ua;
+ ua = p->uri_auth;
+ }
+ }
+
+ p0 = p;
+ p = p->next;
+ free_proxy(p0);
+ }/* end while(p) */
+
+ /* we don't need to free sink_proxies_list nor cfg_log_forward proxies since
+ * they are respectively cleaned up in sink_deinit() and deinit_log_forward()
+ */
+
+ /* destroy all referenced defaults proxies */
+ proxy_destroy_all_unref_defaults();
+
+ while (ua) {
+ struct stat_scope *scope, *scopep;
+
+ uap = ua;
+ ua = ua->next;
+
+ free(uap->uri_prefix);
+ free(uap->auth_realm);
+ free(uap->node);
+ free(uap->desc);
+
+ userlist_free(uap->userlist);
+ free_act_rules(&uap->http_req_rules);
+
+ scope = uap->scope;
+ while (scope) {
+ scopep = scope;
+ scope = scope->next;
+
+ free(scopep->px_id);
+ free(scopep);
+ }
+
+ free(uap);
+ }
+
+ userlist_free(userlist);
+
+ cfg_unregister_sections();
+
+ deinit_log_buffers();
+
+ list_for_each_entry(pdf, &post_deinit_list, list)
+ pdf->fct();
+
+ ha_free(&global.log_send_hostname);
+ chunk_destroy(&global.log_tag);
+ ha_free(&global.chroot);
+ ha_free(&global.pidfile);
+ ha_free(&global.node);
+ ha_free(&global.desc);
+ ha_free(&oldpids);
+ ha_free(&old_argv);
+ ha_free(&localpeer);
+ ha_free(&global.server_state_base);
+ ha_free(&global.server_state_file);
+ task_destroy(idle_conn_task);
+ idle_conn_task = NULL;
+
+ list_for_each_entry_safe(log, logb, &global.loggers, list) {
+ LIST_DEL_INIT(&log->list);
+ free_logger(log);
+ }
+
+ list_for_each_entry_safe(wl, wlb, &cfg_cfgfiles, list) {
+ free(wl->s);
+ LIST_DELETE(&wl->list);
+ free(wl);
+ }
+
+ list_for_each_entry_safe(bol, bolb, &build_opts_list, list) {
+ if (bol->must_free)
+ free((void *)bol->str);
+ LIST_DELETE(&bol->list);
+ free(bol);
+ }
+
+ list_for_each_entry_safe(pxdf, pxdfb, &proxy_deinit_list, list) {
+ LIST_DELETE(&pxdf->list);
+ free(pxdf);
+ }
+
+ list_for_each_entry_safe(pdf, pdfb, &post_deinit_list, list) {
+ LIST_DELETE(&pdf->list);
+ free(pdf);
+ }
+
+ list_for_each_entry_safe(srvdf, srvdfb, &server_deinit_list, list) {
+ LIST_DELETE(&srvdf->list);
+ free(srvdf);
+ }
+
+ list_for_each_entry_safe(pcf, pcfb, &post_check_list, list) {
+ LIST_DELETE(&pcf->list);
+ free(pcf);
+ }
+
+ list_for_each_entry_safe(pscf, pscfb, &post_server_check_list, list) {
+ LIST_DELETE(&pscf->list);
+ free(pscf);
+ }
+
+ list_for_each_entry_safe(ppcf, ppcfb, &post_proxy_check_list, list) {
+ LIST_DELETE(&ppcf->list);
+ free(ppcf);
+ }
+
+ list_for_each_entry_safe(prcf, prcfb, &pre_check_list, list) {
+ LIST_DELETE(&prcf->list);
+ free(prcf);
+ }
+
+ list_for_each_entry_safe(tif, tifb, &per_thread_init_list, list) {
+ LIST_DELETE(&tif->list);
+ free(tif);
+ }
+
+ list_for_each_entry_safe(tdf, tdfb, &per_thread_deinit_list, list) {
+ LIST_DELETE(&tdf->list);
+ free(tdf);
+ }
+
+ list_for_each_entry_safe(taf, tafb, &per_thread_alloc_list, list) {
+ LIST_DELETE(&taf->list);
+ free(taf);
+ }
+
+ list_for_each_entry_safe(tff, tffb, &per_thread_free_list, list) {
+ LIST_DELETE(&tff->list);
+ free(tff);
+ }
+
+ list_for_each_entry_safe(pprs, pprsb, &postparsers, list) {
+ LIST_DELETE(&pprs->list);
+ free(pprs);
+ }
+
+ vars_prune(&proc_vars, NULL, NULL);
+ pool_destroy_all();
+ deinit_pollers();
+} /* end deinit() */
+
+__attribute__((noreturn)) void deinit_and_exit(int status)
+{
+ global.mode |= MODE_STOPPING;
+ deinit();
+ exit(status);
+}
+
+/* Runs the polling loop */
+void run_poll_loop()
+{
+ int next, wake;
+
+ _HA_ATOMIC_OR(&th_ctx->flags, TH_FL_IN_LOOP);
+
+ clock_update_date(0,1);
+ while (1) {
+ wake_expired_tasks();
+
+ /* check if we caught some signals and process them in the
+ first thread */
+ if (signal_queue_len && tid == 0) {
+ activity[tid].wake_signal++;
+ signal_process_queue();
+ }
+
+ /* Process a few tasks */
+ process_runnable_tasks();
+
+ /* also stop if we failed to cleanly stop all tasks */
+ if (killed > 1)
+ break;
+
+ /* expire immediately if events or signals are pending */
+ wake = 1;
+ if (thread_has_tasks())
+ activity[tid].wake_tasks++;
+ else {
+ _HA_ATOMIC_OR(&th_ctx->flags, TH_FL_SLEEPING);
+ _HA_ATOMIC_AND(&th_ctx->flags, ~TH_FL_NOTIFIED);
+ __ha_barrier_atomic_store();
+ if (thread_has_tasks()) {
+ activity[tid].wake_tasks++;
+ _HA_ATOMIC_AND(&th_ctx->flags, ~TH_FL_SLEEPING);
+ } else if (signal_queue_len) {
+ /* this check is required after setting TH_FL_SLEEPING to avoid
+ * a race with wakeup on signals using wake_threads() */
+ _HA_ATOMIC_AND(&th_ctx->flags, ~TH_FL_SLEEPING);
+ } else
+ wake = 0;
+ }
+
+ if (!wake) {
+ int i;
+
+ if (stopping) {
+ /* stop muxes/quic-conns before acknowledging stopping */
+ if (!(tg_ctx->stopping_threads & ti->ltid_bit)) {
+ task_wakeup(mux_stopping_data[tid].task, TASK_WOKEN_OTHER);
+ wake = 1;
+ }
+
+ if (_HA_ATOMIC_OR_FETCH(&tg_ctx->stopping_threads, ti->ltid_bit) == ti->ltid_bit &&
+ _HA_ATOMIC_OR_FETCH(&stopping_tgroup_mask, tg->tgid_bit) == tg->tgid_bit) {
+ /* first one to detect it, notify all threads that stopping was just set */
+ for (i = 0; i < global.nbthread; i++) {
+ if (_HA_ATOMIC_LOAD(&ha_thread_info[i].tg->threads_enabled) &
+ ha_thread_info[i].ltid_bit &
+ ~_HA_ATOMIC_LOAD(&ha_thread_info[i].tg_ctx->stopping_threads))
+ wake_thread(i);
+ }
+ }
+ }
+
+ /* stop when there's nothing left to do */
+ if ((jobs - unstoppable_jobs) == 0 &&
+ (_HA_ATOMIC_LOAD(&stopping_tgroup_mask) & all_tgroups_mask) == all_tgroups_mask) {
+ /* check that all threads are aware of the stopping status */
+ for (i = 0; i < global.nbtgroups; i++)
+ if ((_HA_ATOMIC_LOAD(&ha_tgroup_ctx[i].stopping_threads) &
+ _HA_ATOMIC_LOAD(&ha_tgroup_info[i].threads_enabled)) !=
+ _HA_ATOMIC_LOAD(&ha_tgroup_info[i].threads_enabled))
+ break;
+#ifdef USE_THREAD
+ if (i == global.nbtgroups) {
+ /* all are OK, let's wake them all and stop */
+ for (i = 0; i < global.nbthread; i++)
+ if (i != tid && _HA_ATOMIC_LOAD(&ha_thread_info[i].tg->threads_enabled) & ha_thread_info[i].ltid_bit)
+ wake_thread(i);
+ break;
+ }
+#endif
+ }
+ }
+
+ /* If we have to sleep, measure how long */
+ next = wake ? TICK_ETERNITY : next_timer_expiry();
+
+ /* The poller will ensure it returns around <next> */
+ cur_poller.poll(&cur_poller, next, wake);
+
+ activity[tid].loops++;
+ }
+
+ _HA_ATOMIC_AND(&th_ctx->flags, ~TH_FL_IN_LOOP);
+}
+
+static void *run_thread_poll_loop(void *data)
+{
+ struct per_thread_alloc_fct *ptaf;
+ struct per_thread_init_fct *ptif;
+ struct per_thread_deinit_fct *ptdf;
+ struct per_thread_free_fct *ptff;
+ static int init_left = 0;
+ __decl_thread(static pthread_mutex_t init_mutex = PTHREAD_MUTEX_INITIALIZER);
+ __decl_thread(static pthread_cond_t init_cond = PTHREAD_COND_INITIALIZER);
+
+ ha_set_thread(data);
+ set_thread_cpu_affinity();
+ clock_set_local_source();
+
+#ifdef USE_THREAD
+ ha_thread_info[tid].pth_id = ha_get_pthread_id(tid);
+#endif
+ ha_thread_info[tid].stack_top = __builtin_frame_address(0);
+
+ /* thread is started, from now on it is not idle nor harmless */
+ thread_harmless_end();
+ thread_idle_end();
+ _HA_ATOMIC_OR(&th_ctx->flags, TH_FL_STARTED);
+
+ /* Now, initialize one thread init at a time. This is better since
+ * some init code is a bit tricky and may release global resources
+ * after reallocating them locally. This will also ensure there is
+ * no race on file descriptors allocation.
+ */
+#ifdef USE_THREAD
+ pthread_mutex_lock(&init_mutex);
+#endif
+ /* The first thread must set the number of threads left */
+ if (!init_left)
+ init_left = global.nbthread;
+ init_left--;
+
+ clock_init_thread_date();
+
+ /* per-thread alloc calls performed here are not allowed to snoop on
+ * other threads, so they are free to initialize at their own rhythm
+ * as long as they act as if they were alone. None of them may rely
+ * on resources initialized by the other ones.
+ */
+ list_for_each_entry(ptaf, &per_thread_alloc_list, list) {
+ if (!ptaf->fct()) {
+ ha_alert("failed to allocate resources for thread %u.\n", tid);
+#ifdef USE_THREAD
+ pthread_mutex_unlock(&init_mutex);
+#endif
+ exit(1);
+ }
+ }
+
+ /* per-thread init calls performed here are not allowed to snoop on
+ * other threads, so they are free to initialize at their own rhythm
+ * as long as they act as if they were alone.
+ */
+ list_for_each_entry(ptif, &per_thread_init_list, list) {
+ if (!ptif->fct()) {
+ ha_alert("failed to initialize thread %u.\n", tid);
+#ifdef USE_THREAD
+ pthread_mutex_unlock(&init_mutex);
+#endif
+ exit(1);
+ }
+ }
+
+ /* enabling protocols will result in fd_insert() calls to be performed,
+ * we want all threads to have already allocated their local fd tables
+ * before doing so, thus only the last thread does it.
+ */
+ if (init_left == 0)
+ protocol_enable_all();
+
+#ifdef USE_THREAD
+ pthread_cond_broadcast(&init_cond);
+ pthread_mutex_unlock(&init_mutex);
+
+ /* now wait for other threads to finish starting */
+ pthread_mutex_lock(&init_mutex);
+ while (init_left)
+ pthread_cond_wait(&init_cond, &init_mutex);
+ pthread_mutex_unlock(&init_mutex);
+#endif
+
+#if defined(PR_SET_NO_NEW_PRIVS) && defined(USE_PRCTL)
+ /* Let's refrain from using setuid executables. This way the impact of
+ * an eventual vulnerability in a library remains limited. It may
+ * impact external checks but who cares about them anyway ? In the
+ * worst case it's possible to disable the option. Obviously we do this
+ * in workers only. We can't hard-fail on this one as it really is
+ * implementation dependent though we're interested in feedback, hence
+ * the warning.
+ */
+ if (!(global.tune.options & GTUNE_INSECURE_SETUID) && !master) {
+ static int warn_fail;
+ if (prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0) == -1 && !_HA_ATOMIC_FETCH_ADD(&warn_fail, 1)) {
+ ha_warning("Failed to disable setuid, please report to developers with detailed "
+ "information about your operating system. You can silence this warning "
+ "by adding 'insecure-setuid-wanted' in the 'global' section.\n");
+ }
+ }
+#endif
+
+#if defined(RLIMIT_NPROC)
+ /* all threads have started, it's now time to prevent any new thread
+ * or process from starting. Obviously we do this in workers only. We
+ * can't hard-fail on this one as it really is implementation dependent
+ * though we're interested in feedback, hence the warning.
+ */
+ if (!(global.tune.options & GTUNE_INSECURE_FORK) && !master) {
+ struct rlimit limit = { .rlim_cur = 0, .rlim_max = 0 };
+ static int warn_fail;
+
+ if (setrlimit(RLIMIT_NPROC, &limit) == -1 && !_HA_ATOMIC_FETCH_ADD(&warn_fail, 1)) {
+ ha_warning("Failed to disable forks, please report to developers with detailed "
+ "information about your operating system. You can silence this warning "
+ "by adding 'insecure-fork-wanted' in the 'global' section.\n");
+ }
+ }
+#endif
+ run_poll_loop();
+
+ list_for_each_entry(ptdf, &per_thread_deinit_list, list)
+ ptdf->fct();
+
+ list_for_each_entry(ptff, &per_thread_free_list, list)
+ ptff->fct();
+
+#ifdef USE_THREAD
+ if (!_HA_ATOMIC_AND_FETCH(&ha_tgroup_info[ti->tgid-1].threads_enabled, ~ti->ltid_bit))
+ _HA_ATOMIC_AND(&all_tgroups_mask, ~tg->tgid_bit);
+ if (!_HA_ATOMIC_AND_FETCH(&tg_ctx->stopping_threads, ~ti->ltid_bit))
+ _HA_ATOMIC_AND(&stopping_tgroup_mask, ~tg->tgid_bit);
+ if (tid > 0)
+ pthread_exit(NULL);
+#endif
+ return NULL;
+}
+
+/* set uid/gid depending on global settings */
+static void set_identity(const char *program_name)
+{
+ int from_uid __maybe_unused = geteuid();
+
+ if (global.gid) {
+ if (getgroups(0, NULL) > 0 && setgroups(0, NULL) == -1)
+ ha_warning("[%s.main()] Failed to drop supplementary groups. Using 'gid'/'group'"
+ " without 'uid'/'user' is generally useless.\n", program_name);
+
+ if (setgid(global.gid) == -1) {
+ ha_alert("[%s.main()] Cannot set gid %d.\n", program_name, global.gid);
+ protocol_unbind_all();
+ exit(1);
+ }
+ }
+
+#if defined(USE_LINUX_CAP)
+ if (prepare_caps_for_setuid(from_uid, global.uid) < 0) {
+ ha_alert("[%s.main()] Cannot switch uid to %d.\n", program_name, global.uid);
+ protocol_unbind_all();
+ exit(1);
+ }
+#endif
+
+ if (global.uid && setuid(global.uid) == -1) {
+ ha_alert("[%s.main()] Cannot set uid %d.\n", program_name, global.uid);
+ protocol_unbind_all();
+ exit(1);
+ }
+
+#if defined(USE_LINUX_CAP)
+ if (finalize_caps_after_setuid(from_uid, global.uid) < 0) {
+ ha_alert("[%s.main()] Cannot switch uid to %d.\n", program_name, global.uid);
+ protocol_unbind_all();
+ exit(1);
+ }
+#endif
+}
+
+int main(int argc, char **argv)
+{
+ int err, retry;
+ struct rlimit limit;
+ int pidfd = -1;
+ int intovf = (unsigned char)argc + 1; /* let the compiler know it's strictly positive */
+
+ /* Catch broken toolchains */
+ if (sizeof(long) != sizeof(void *) || (intovf + 0x7FFFFFFF >= intovf)) {
+ const char *msg;
+
+ if (sizeof(long) != sizeof(void *))
+ /* Apparently MingW64 was not made for us and can also break openssl */
+ msg = "The compiler this program was built with uses unsupported integral type sizes.\n"
+ "Most likely it follows the unsupported LLP64 model. Never try to link HAProxy\n"
+ "against libraries built with that compiler either! Please only use a compiler\n"
+ "producing ILP32 or LP64 programs for both programs and libraries.\n";
+ else if (intovf + 0x7FFFFFFF >= intovf)
+ /* Catch forced CFLAGS that miss 2-complement integer overflow */
+ msg = "The source code was miscompiled by the compiler, which usually indicates that\n"
+ "some of the CFLAGS needed to work around overzealous compiler optimizations\n"
+ "were overwritten at build time. Please do not force CFLAGS, and read Makefile\n"
+ "and INSTALL files to decide on the best way to pass your local build options.\n";
+ else
+ msg = "Bug in the compiler bug detection code, please report it to developers!\n";
+
+ fprintf(stderr,
+ "FATAL ERROR: invalid code detected -- cannot go further, please recompile!\n"
+ "%s"
+ "\nBuild options :"
+#ifdef BUILD_TARGET
+ "\n TARGET = " BUILD_TARGET
+#endif
+#ifdef BUILD_CPU
+ "\n CPU = " BUILD_CPU
+#endif
+#ifdef BUILD_CC
+ "\n CC = " BUILD_CC
+#endif
+#ifdef BUILD_CFLAGS
+ "\n CFLAGS = " BUILD_CFLAGS
+#endif
+#ifdef BUILD_OPTIONS
+ "\n OPTIONS = " BUILD_OPTIONS
+#endif
+#ifdef BUILD_DEBUG
+ "\n DEBUG = " BUILD_DEBUG
+#endif
+ "\n\n", msg);
+
+ return 1;
+ }
+
+ setvbuf(stdout, NULL, _IONBF, 0);
+
+ /* take a copy of initial limits before we possibly change them */
+ getrlimit(RLIMIT_NOFILE, &limit);
+
+ if (limit.rlim_max == RLIM_INFINITY)
+ limit.rlim_max = limit.rlim_cur;
+ rlim_fd_cur_at_boot = limit.rlim_cur;
+ rlim_fd_max_at_boot = limit.rlim_max;
+
+ /* process all initcalls in order of potential dependency */
+ RUN_INITCALLS(STG_PREPARE);
+ RUN_INITCALLS(STG_LOCK);
+ RUN_INITCALLS(STG_REGISTER);
+
+ /* now's time to initialize early boot variables */
+ init_early(argc, argv);
+
+ /* handles argument parsing */
+ init_args(argc, argv);
+
+ RUN_INITCALLS(STG_ALLOC);
+ RUN_INITCALLS(STG_POOL);
+
+ /* some code really needs to have the trash properly allocated */
+ if (!trash.area) {
+ ha_alert("failed to initialize trash buffers.\n");
+ exit(1);
+ }
+
+ RUN_INITCALLS(STG_INIT);
+
+ /* this is the late init where the config is parsed */
+ init(argc, argv);
+
+ signal_register_fct(SIGQUIT, dump, SIGQUIT);
+ signal_register_fct(SIGUSR1, sig_soft_stop, SIGUSR1);
+ signal_register_fct(SIGHUP, sig_dump_state, SIGHUP);
+ signal_register_fct(SIGUSR2, NULL, 0);
+
+ /* Always catch SIGPIPE even on platforms which define MSG_NOSIGNAL.
+ * Some recent FreeBSD setups report broken pipes, and MSG_NOSIGNAL
+ * was defined there, so let's stay on the safe side.
+ */
+ signal_register_fct(SIGPIPE, NULL, 0);
+
+ /* ulimits */
+ if (!global.rlimit_nofile)
+ global.rlimit_nofile = global.maxsock;
+
+ if (global.rlimit_nofile) {
+ limit.rlim_cur = global.rlimit_nofile;
+ limit.rlim_max = MAX(rlim_fd_max_at_boot, limit.rlim_cur);
+
+ if ((global.fd_hard_limit && limit.rlim_cur > global.fd_hard_limit) ||
+ raise_rlim_nofile(NULL, &limit) != 0) {
+ getrlimit(RLIMIT_NOFILE, &limit);
+ if (global.fd_hard_limit && limit.rlim_cur > global.fd_hard_limit)
+ limit.rlim_cur = global.fd_hard_limit;
+
+ if (global.tune.options & GTUNE_STRICT_LIMITS) {
+ ha_alert("[%s.main()] Cannot raise FD limit to %d, limit is %d.\n",
+ argv[0], global.rlimit_nofile, (int)limit.rlim_cur);
+ exit(1);
+ }
+ else {
+ /* try to set it to the max possible at least */
+ limit.rlim_cur = limit.rlim_max;
+ if (global.fd_hard_limit && limit.rlim_cur > global.fd_hard_limit)
+ limit.rlim_cur = global.fd_hard_limit;
+
+ if (raise_rlim_nofile(&limit, &limit) == 0)
+ getrlimit(RLIMIT_NOFILE, &limit);
+
+ ha_warning("[%s.main()] Cannot raise FD limit to %d, limit is %d.\n",
+ argv[0], global.rlimit_nofile, (int)limit.rlim_cur);
+ global.rlimit_nofile = limit.rlim_cur;
+ }
+ }
+ }
+
+ if (global.rlimit_memmax) {
+ limit.rlim_cur = limit.rlim_max =
+ global.rlimit_memmax * 1048576ULL;
+#ifdef RLIMIT_AS
+ if (setrlimit(RLIMIT_AS, &limit) == -1) {
+ if (global.tune.options & GTUNE_STRICT_LIMITS) {
+ ha_alert("[%s.main()] Cannot fix MEM limit to %d megs.\n",
+ argv[0], global.rlimit_memmax);
+ exit(1);
+ }
+ else
+ ha_warning("[%s.main()] Cannot fix MEM limit to %d megs.\n",
+ argv[0], global.rlimit_memmax);
+ }
+#else
+ if (setrlimit(RLIMIT_DATA, &limit) == -1) {
+ if (global.tune.options & GTUNE_STRICT_LIMITS) {
+ ha_alert("[%s.main()] Cannot fix MEM limit to %d megs.\n",
+ argv[0], global.rlimit_memmax);
+ exit(1);
+ }
+ else
+ ha_warning("[%s.main()] Cannot fix MEM limit to %d megs.\n",
+ argv[0], global.rlimit_memmax);
+ }
+#endif
+ }
+
+ /* Try to get the listeners FD from the previous process using
+ * _getsocks on the stat socket, it must never been done in wait mode
+ * and check mode
+ */
+ if (old_unixsocket &&
+ !(global.mode & (MODE_MWORKER_WAIT|MODE_CHECK|MODE_CHECK_CONDITION))) {
+ if (strcmp("/dev/null", old_unixsocket) != 0) {
+ if (sock_get_old_sockets(old_unixsocket) != 0) {
+ ha_alert("Failed to get the sockets from the old process!\n");
+ if (!(global.mode & MODE_MWORKER))
+ exit(1);
+ }
+ }
+ }
+
+ /* We will loop at most 100 times with 10 ms delay each time.
+ * That's at most 1 second. We only send a signal to old pids
+ * if we cannot grab at least one port.
+ */
+ retry = MAX_START_RETRIES;
+ err = ERR_NONE;
+ while (retry >= 0) {
+ struct timeval w;
+ err = protocol_bind_all(retry == 0 || nb_oldpids == 0);
+ /* exit the loop on no error or fatal error */
+ if ((err & (ERR_RETRYABLE|ERR_FATAL)) != ERR_RETRYABLE)
+ break;
+ if (nb_oldpids == 0 || retry == 0)
+ break;
+
+ /* FIXME-20060514: Solaris and OpenBSD do not support shutdown() on
+ * listening sockets. So on those platforms, it would be wiser to
+ * simply send SIGUSR1, which will not be undoable.
+ */
+ if (tell_old_pids(SIGTTOU) == 0) {
+ /* no need to wait if we can't contact old pids */
+ retry = 0;
+ continue;
+ }
+ /* give some time to old processes to stop listening */
+ w.tv_sec = 0;
+ w.tv_usec = 10*1000;
+ select(0, NULL, NULL, NULL, &w);
+ retry--;
+ }
+
+ /* Note: protocol_bind_all() sends an alert when it fails. */
+ if ((err & ~ERR_WARN) != ERR_NONE) {
+ ha_alert("[%s.main()] Some protocols failed to start their listeners! Exiting.\n", argv[0]);
+ if (retry != MAX_START_RETRIES && nb_oldpids)
+ tell_old_pids(SIGTTIN);
+ protocol_unbind_all(); /* cleanup everything we can */
+ exit(1);
+ }
+
+ if (!(global.mode & MODE_MWORKER_WAIT) && listeners == 0) {
+ ha_alert("[%s.main()] No enabled listener found (check for 'bind' directives) ! Exiting.\n", argv[0]);
+ /* Note: we don't have to send anything to the old pids because we
+ * never stopped them. */
+ exit(1);
+ }
+
+ /* Ok, all listeners should now be bound, close any leftover sockets
+ * the previous process gave us, we don't need them anymore
+ */
+ sock_drop_unused_old_sockets();
+
+ /* prepare pause/play signals */
+ signal_register_fct(SIGTTOU, sig_pause, SIGTTOU);
+ signal_register_fct(SIGTTIN, sig_listen, SIGTTIN);
+
+ /* MODE_QUIET can inhibit alerts and warnings below this line */
+
+ if (getenv("HAPROXY_MWORKER_REEXEC") != NULL) {
+ /* either stdin/out/err are already closed or should stay as they are. */
+ if ((global.mode & MODE_DAEMON)) {
+ /* daemon mode re-executing, stdin/stdout/stderr are already closed so keep quiet */
+ global.mode &= ~MODE_VERBOSE;
+ global.mode |= MODE_QUIET; /* ensure that we won't say anything from now */
+ }
+ } else {
+ if ((global.mode & MODE_QUIET) && !(global.mode & MODE_VERBOSE)) {
+ /* detach from the tty */
+ stdio_quiet(-1);
+ }
+ }
+
+ /* open log & pid files before the chroot */
+ if ((global.mode & MODE_DAEMON || global.mode & MODE_MWORKER) &&
+ !(global.mode & MODE_MWORKER_WAIT) && global.pidfile != NULL) {
+ unlink(global.pidfile);
+ pidfd = open(global.pidfile, O_CREAT | O_WRONLY | O_TRUNC, 0644);
+ if (pidfd < 0) {
+ ha_alert("[%s.main()] Cannot create pidfile %s\n", argv[0], global.pidfile);
+ if (nb_oldpids)
+ tell_old_pids(SIGTTIN);
+ protocol_unbind_all();
+ exit(1);
+ }
+ }
+
+ if ((global.last_checks & LSTCHK_NETADM) && global.uid) {
+ ha_alert("[%s.main()] Some configuration options require full privileges, so global.uid cannot be changed.\n"
+ "", argv[0]);
+ protocol_unbind_all();
+ exit(1);
+ }
+
+ /* If the user is not root, we'll still let them try the configuration
+ * but we inform them that unexpected behaviour may occur.
+ */
+ if ((global.last_checks & LSTCHK_NETADM) && getuid())
+ ha_warning("[%s.main()] Some options which require full privileges"
+ " might not work well.\n"
+ "", argv[0]);
+
+ if ((global.mode & (MODE_MWORKER|MODE_DAEMON)) == 0) {
+
+ /* chroot if needed */
+ if (global.chroot != NULL) {
+ if (chroot(global.chroot) == -1 || chdir("/") == -1) {
+ ha_alert("[%s.main()] Cannot chroot(%s).\n", argv[0], global.chroot);
+ if (nb_oldpids)
+ tell_old_pids(SIGTTIN);
+ protocol_unbind_all();
+ exit(1);
+ }
+ }
+ }
+
+ if (nb_oldpids && !(global.mode & MODE_MWORKER_WAIT))
+ nb_oldpids = tell_old_pids(oldpids_sig);
+
+ /* send a SIGTERM to workers who have a too high reloads number */
+ if ((global.mode & MODE_MWORKER) && !(global.mode & MODE_MWORKER_WAIT))
+ mworker_kill_max_reloads(SIGTERM);
+
+ /* Note that any error at this stage will be fatal because we will not
+ * be able to restart the old pids.
+ */
+
+ if ((global.mode & (MODE_MWORKER | MODE_DAEMON)) == 0)
+ set_identity(argv[0]);
+
+ /* check ulimits */
+ limit.rlim_cur = limit.rlim_max = 0;
+ getrlimit(RLIMIT_NOFILE, &limit);
+ if (limit.rlim_cur < global.maxsock) {
+ if (global.tune.options & GTUNE_STRICT_LIMITS) {
+ ha_alert("[%s.main()] FD limit (%d) too low for maxconn=%d/maxsock=%d. "
+ "Please raise 'ulimit-n' to %d or more to avoid any trouble.\n",
+ argv[0], (int)limit.rlim_cur, global.maxconn, global.maxsock,
+ global.maxsock);
+ exit(1);
+ }
+ else
+ ha_alert("[%s.main()] FD limit (%d) too low for maxconn=%d/maxsock=%d. "
+ "Please raise 'ulimit-n' to %d or more to avoid any trouble.\n",
+ argv[0], (int)limit.rlim_cur, global.maxconn, global.maxsock,
+ global.maxsock);
+ }
+
+ if (global.prealloc_fd && fcntl((int)limit.rlim_cur - 1, F_GETFD) == -1) {
+ if (dup2(0, (int)limit.rlim_cur - 1) == -1)
+ ha_warning("[%s.main()] Unable to preallocate file descriptor %d : %s",
+ argv[0], (int)limit.rlim_cur - 1, strerror(errno));
+ else
+ close((int)limit.rlim_cur - 1);
+ }
+
+ /* update the ready date a last time to also account for final setup time */
+ clock_update_date(0, 1);
+ clock_adjust_now_offset();
+ ready_date = date;
+
+ if (global.mode & (MODE_DAEMON | MODE_MWORKER | MODE_MWORKER_WAIT)) {
+ int ret = 0;
+ int in_parent = 0;
+ int devnullfd = -1;
+
+ /*
+ * if daemon + mworker: must fork here to let a master
+ * process live in background before forking children
+ */
+
+ if ((getenv("HAPROXY_MWORKER_REEXEC") == NULL)
+ && (global.mode & MODE_MWORKER)
+ && (global.mode & MODE_DAEMON)) {
+ ret = fork();
+ if (ret < 0) {
+ ha_alert("[%s.main()] Cannot fork.\n", argv[0]);
+ protocol_unbind_all();
+ exit(1); /* there has been an error */
+ } else if (ret > 0) { /* parent leave to daemonize */
+ exit(0);
+ } else /* change the process group ID in the child (master process) */
+ setsid();
+ }
+
+
+ /* if in master-worker mode, write the PID of the father */
+ if (global.mode & MODE_MWORKER) {
+ char pidstr[100];
+ snprintf(pidstr, sizeof(pidstr), "%d\n", (int)getpid());
+ if (pidfd >= 0)
+ DISGUISE(write(pidfd, pidstr, strlen(pidstr)));
+ }
+
+ /* the father launches the required number of processes */
+ if (!(global.mode & MODE_MWORKER_WAIT)) {
+ struct ring *tmp_startup_logs = NULL;
+
+ if (global.mode & MODE_MWORKER)
+ mworker_ext_launch_all();
+
+ /* at this point the worker must have his own startup_logs buffer */
+ tmp_startup_logs = startup_logs_dup(startup_logs);
+ ret = fork();
+ if (ret < 0) {
+ ha_alert("[%s.main()] Cannot fork.\n", argv[0]);
+ protocol_unbind_all();
+ exit(1); /* there has been an error */
+ }
+ else if (ret == 0) { /* child breaks here */
+ startup_logs_free(startup_logs);
+ startup_logs = tmp_startup_logs;
+ /* This one must not be exported, it's internal! */
+ unsetenv("HAPROXY_MWORKER_REEXEC");
+ ha_random_jump96(1);
+ }
+ else { /* parent here */
+ in_parent = 1;
+
+ if (pidfd >= 0 && !(global.mode & MODE_MWORKER)) {
+ char pidstr[100];
+ snprintf(pidstr, sizeof(pidstr), "%d\n", ret);
+ DISGUISE(write(pidfd, pidstr, strlen(pidstr)));
+ }
+ if (global.mode & MODE_MWORKER) {
+ struct mworker_proc *child;
+
+ ha_notice("New worker (%d) forked\n", ret);
+ /* find the right mworker_proc */
+ list_for_each_entry(child, &proc_list, list) {
+ if (child->reloads == 0 &&
+ child->options & PROC_O_TYPE_WORKER &&
+ child->pid == -1) {
+ child->timestamp = date.tv_sec;
+ child->pid = ret;
+ child->version = strdup(haproxy_version);
+ /* at this step the fd is bound for the worker, set it to -1 so
+ * it could be close in case of errors in mworker_cleanup_proc() */
+ child->ipc_fd[1] = -1;
+ break;
+ }
+ }
+ }
+ }
+
+ } else {
+ /* wait mode */
+ in_parent = 1;
+ }
+
+ /* close the pidfile both in children and father */
+ if (pidfd >= 0) {
+ //lseek(pidfd, 0, SEEK_SET); /* debug: emulate eglibc bug */
+ close(pidfd);
+ }
+
+ /* We won't ever use this anymore */
+ ha_free(&global.pidfile);
+
+ if (in_parent) {
+ if (global.mode & (MODE_MWORKER|MODE_MWORKER_WAIT)) {
+ master = 1;
+
+ if ((!(global.mode & MODE_QUIET) || (global.mode & MODE_VERBOSE)) &&
+ (global.mode & MODE_DAEMON)) {
+ /* detach from the tty, this is required to properly daemonize. */
+ if ((getenv("HAPROXY_MWORKER_REEXEC") == NULL))
+ stdio_quiet(-1);
+
+ global.mode &= ~MODE_VERBOSE;
+ global.mode |= MODE_QUIET; /* ensure that we won't say anything from now */
+ }
+
+ if (global.mode & MODE_MWORKER_WAIT) {
+ /* only the wait mode handles the master CLI */
+ mworker_loop();
+ } else {
+
+#if defined(USE_SYSTEMD)
+ if (global.tune.options & GTUNE_USE_SYSTEMD)
+ sd_notifyf(0, "READY=1\nMAINPID=%lu\nSTATUS=Ready.\n", (unsigned long)getpid());
+#endif
+ /* if not in wait mode, reload in wait mode to free the memory */
+ setenv("HAPROXY_LOAD_SUCCESS", "1", 1);
+ ha_notice("Loading success.\n");
+ proc_self->failedreloads = 0; /* reset the number of failure */
+ mworker_reexec_waitmode();
+ }
+ /* should never get there */
+ exit(EXIT_FAILURE);
+ }
+#if defined(USE_OPENSSL) && !defined(OPENSSL_NO_DH)
+ ssl_free_dh();
+#endif
+ exit(0); /* parent must leave */
+ }
+
+ /* child must never use the atexit function */
+ atexit_flag = 0;
+
+ /* close useless master sockets */
+ if (global.mode & MODE_MWORKER) {
+ struct mworker_proc *child, *it;
+ master = 0;
+
+ mworker_cli_proxy_stop();
+
+ /* free proc struct of other processes */
+ list_for_each_entry_safe(child, it, &proc_list, list) {
+ /* close the FD of the master side for all
+ * workers, we don't need to close the worker
+ * side of other workers since it's done with
+ * the bind_proc */
+ if (child->ipc_fd[0] >= 0) {
+ close(child->ipc_fd[0]);
+ child->ipc_fd[0] = -1;
+ }
+ if (child->options & PROC_O_TYPE_WORKER &&
+ child->reloads == 0 &&
+ child->pid == -1) {
+ /* keep this struct if this is our pid */
+ proc_self = child;
+ continue;
+ }
+ LIST_DELETE(&child->list);
+ mworker_free_child(child);
+ child = NULL;
+ }
+ }
+
+ if (!(global.mode & MODE_QUIET) || (global.mode & MODE_VERBOSE)) {
+ devnullfd = open("/dev/null", O_RDWR, 0);
+ if (devnullfd < 0) {
+ ha_alert("Cannot open /dev/null\n");
+ exit(EXIT_FAILURE);
+ }
+ }
+
+ /* Must chroot and setgid/setuid in the children */
+ /* chroot if needed */
+ if (global.chroot != NULL) {
+ if (chroot(global.chroot) == -1 || chdir("/") == -1) {
+ ha_alert("[%s.main()] Cannot chroot(%s).\n", argv[0], global.chroot);
+ if (nb_oldpids)
+ tell_old_pids(SIGTTIN);
+ protocol_unbind_all();
+ exit(1);
+ }
+ }
+
+ ha_free(&global.chroot);
+ set_identity(argv[0]);
+
+ /*
+ * This is only done in daemon mode because we might want the
+ * logs on stdout in mworker mode. If we're NOT in QUIET mode,
+ * we should now close the 3 first FDs to ensure that we can
+ * detach from the TTY. We MUST NOT do it in other cases since
+ * it would have already be done, and 0-2 would have been
+ * affected to listening sockets
+ */
+ if ((global.mode & MODE_DAEMON) &&
+ (!(global.mode & MODE_QUIET) || (global.mode & MODE_VERBOSE))) {
+ /* detach from the tty */
+ stdio_quiet(devnullfd);
+ global.mode &= ~MODE_VERBOSE;
+ global.mode |= MODE_QUIET; /* ensure that we won't say anything from now */
+ }
+ pid = getpid(); /* update child's pid */
+ if (!(global.mode & MODE_MWORKER)) /* in mworker mode we don't want a new pgid for the children */
+ setsid();
+ fork_poller();
+ }
+
+ /* pass through every cli socket, and check if it's bound to
+ * the current process and if it exposes listeners sockets.
+ * Caution: the GTUNE_SOCKET_TRANSFER is now set after the fork.
+ * */
+
+ if (global.cli_fe) {
+ struct bind_conf *bind_conf;
+
+ list_for_each_entry(bind_conf, &global.cli_fe->conf.bind, by_fe) {
+ if (bind_conf->level & ACCESS_FD_LISTENERS) {
+ global.tune.options |= GTUNE_SOCKET_TRANSFER;
+ break;
+ }
+ }
+ }
+
+ /* Note that here we can't be in the parent/master anymore */
+#if !defined(USE_THREAD) && defined(USE_CPU_AFFINITY)
+ if (ha_cpuset_count(&cpu_map[0].thread[0])) { /* only do this if the process has a CPU map */
+
+#if defined(CPUSET_USE_CPUSET) || defined(__DragonFly__)
+ struct hap_cpuset *set = &cpu_map[0].thread[0];
+ sched_setaffinity(0, sizeof(set->cpuset), &set->cpuset);
+#elif defined(__FreeBSD__)
+ struct hap_cpuset *set = &cpu_map[0].thread[0];
+ ret = cpuset_setaffinity(CPU_LEVEL_WHICH, CPU_WHICH_PID, -1, sizeof(set->cpuset), &set->cpuset);
+#endif
+ }
+#endif
+ /* try our best to re-enable core dumps depending on system capabilities.
+ * What is addressed here :
+ * - remove file size limits
+ * - remove core size limits
+ * - mark the process dumpable again if it lost it due to user/group
+ */
+ if (global.tune.options & GTUNE_SET_DUMPABLE) {
+ limit.rlim_cur = limit.rlim_max = RLIM_INFINITY;
+
+#if defined(RLIMIT_FSIZE)
+ if (setrlimit(RLIMIT_FSIZE, &limit) == -1) {
+ if (global.tune.options & GTUNE_STRICT_LIMITS) {
+ ha_alert("[%s.main()] Failed to set the raise the maximum "
+ "file size.\n", argv[0]);
+ exit(1);
+ }
+ else
+ ha_warning("[%s.main()] Failed to set the raise the maximum "
+ "file size.\n", argv[0]);
+ }
+#endif
+
+#if defined(RLIMIT_CORE)
+ if (setrlimit(RLIMIT_CORE, &limit) == -1) {
+ if (global.tune.options & GTUNE_STRICT_LIMITS) {
+ ha_alert("[%s.main()] Failed to set the raise the core "
+ "dump size.\n", argv[0]);
+ exit(1);
+ }
+ else
+ ha_warning("[%s.main()] Failed to set the raise the core "
+ "dump size.\n", argv[0]);
+ }
+#endif
+
+#if defined(USE_PRCTL)
+ if (prctl(PR_SET_DUMPABLE, 1, 0, 0, 0) == -1)
+ ha_warning("[%s.main()] Failed to set the dumpable flag, "
+ "no core will be dumped.\n", argv[0]);
+#elif defined(USE_PROCCTL)
+ {
+ int traceable = PROC_TRACE_CTL_ENABLE;
+ if (procctl(P_PID, getpid(), PROC_TRACE_CTL, &traceable) == -1)
+ ha_warning("[%s.main()] Failed to set the traceable flag, "
+ "no core will be dumped.\n", argv[0]);
+ }
+#endif
+ }
+
+ global.mode &= ~MODE_STARTING;
+ reset_usermsgs_ctx();
+
+ /* start threads 2 and above */
+ setup_extra_threads(&run_thread_poll_loop);
+
+ /* when multithreading we need to let only the thread 0 handle the signals */
+ haproxy_unblock_signals();
+
+ /* Finally, start the poll loop for the first thread */
+ run_thread_poll_loop(&ha_thread_info[0]);
+
+ /* wait for all threads to terminate */
+ wait_for_threads_completion();
+
+ deinit_and_exit(0);
+}
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/src/hash.c b/src/hash.c
new file mode 100644
index 0000000..5c92e94
--- /dev/null
+++ b/src/hash.c
@@ -0,0 +1,190 @@
+/*
+ * Hash function implementation
+ *
+ * See mailing list thread on "Consistent hashing alternative to sdbm"
+ * http://marc.info/?l=haproxy&m=138213693909219
+ *
+ * Copyright 2000-2010 Willy Tarreau <w@1wt.eu>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+
+#include <haproxy/compiler.h>
+#include <haproxy/hash.h>
+
+
+unsigned int hash_wt6(const void *input, int len)
+{
+ const unsigned char *key = input;
+ unsigned h0 = 0xa53c965aUL;
+ unsigned h1 = 0x5ca6953aUL;
+ unsigned step0 = 6;
+ unsigned step1 = 18;
+
+ for (; len > 0; len--) {
+ unsigned int t;
+
+ t = *key;
+ key++;
+
+ h0 = ~(h0 ^ t);
+ h1 = ~(h1 + t);
+
+ t = (h1 << step0) | (h1 >> (32-step0));
+ h1 = (h0 << step1) | (h0 >> (32-step1));
+ h0 = t;
+
+ t = ((h0 >> 16) ^ h1) & 0xffff;
+ step0 = t & 0x1F;
+ step1 = t >> 11;
+ }
+ return h0 ^ h1;
+}
+
+unsigned int hash_djb2(const void *input, int len)
+{
+ const unsigned char *key = input;
+ unsigned int hash = 5381;
+
+ /* the hash unrolled eight times */
+ for (; len >= 8; len -= 8) {
+ hash = ((hash << 5) + hash) + *key++;
+ hash = ((hash << 5) + hash) + *key++;
+ hash = ((hash << 5) + hash) + *key++;
+ hash = ((hash << 5) + hash) + *key++;
+ hash = ((hash << 5) + hash) + *key++;
+ hash = ((hash << 5) + hash) + *key++;
+ hash = ((hash << 5) + hash) + *key++;
+ hash = ((hash << 5) + hash) + *key++;
+ }
+ switch (len) {
+ case 7: hash = ((hash << 5) + hash) + *key++; __fallthrough;
+ case 6: hash = ((hash << 5) + hash) + *key++; __fallthrough;
+ case 5: hash = ((hash << 5) + hash) + *key++; __fallthrough;
+ case 4: hash = ((hash << 5) + hash) + *key++; __fallthrough;
+ case 3: hash = ((hash << 5) + hash) + *key++; __fallthrough;
+ case 2: hash = ((hash << 5) + hash) + *key++; __fallthrough;
+ case 1: hash = ((hash << 5) + hash) + *key++; break;
+ default: /* case 0: */ break;
+ }
+ return hash;
+}
+
+unsigned int hash_sdbm(const void *input, int len)
+{
+ const unsigned char *key = input;
+ unsigned int hash = 0;
+ int c;
+
+ while (len--) {
+ c = *key++;
+ hash = c + (hash << 6) + (hash << 16) - hash;
+ }
+
+ return hash;
+}
+
+/* Small yet efficient CRC32 calculation loosely inspired from crc32b found
+ * here : http://www.hackersdelight.org/hdcodetxt/crc.c.txt
+ * The magic value represents the polynom with one bit per exponent. Much
+ * faster table-based versions exist but are pointless for our usage here,
+ * this hash already sustains gigabit speed which is far faster than what
+ * we'd ever need. Better preserve the CPU's cache instead.
+ */
+unsigned int hash_crc32(const void *input, int len)
+{
+ const unsigned char *key = input;
+ unsigned int hash;
+ int bit;
+
+ hash = ~0;
+ while (len--) {
+ hash ^= *key++;
+ for (bit = 0; bit < 8; bit++)
+ hash = (hash >> 1) ^ ((hash & 1) ? 0xedb88320 : 0);
+ }
+ return ~hash;
+}
+
+/* CRC32c poly 0x11EDC6F41 (RFC4960, Appendix B [8].) */
+static const uint32_t crctable[256] = {
+ 0x00000000L, 0xF26B8303L, 0xE13B70F7L, 0x1350F3F4L,
+ 0xC79A971FL, 0x35F1141CL, 0x26A1E7E8L, 0xD4CA64EBL,
+ 0x8AD958CFL, 0x78B2DBCCL, 0x6BE22838L, 0x9989AB3BL,
+ 0x4D43CFD0L, 0xBF284CD3L, 0xAC78BF27L, 0x5E133C24L,
+ 0x105EC76FL, 0xE235446CL, 0xF165B798L, 0x030E349BL,
+ 0xD7C45070L, 0x25AFD373L, 0x36FF2087L, 0xC494A384L,
+ 0x9A879FA0L, 0x68EC1CA3L, 0x7BBCEF57L, 0x89D76C54L,
+ 0x5D1D08BFL, 0xAF768BBCL, 0xBC267848L, 0x4E4DFB4BL,
+ 0x20BD8EDEL, 0xD2D60DDDL, 0xC186FE29L, 0x33ED7D2AL,
+ 0xE72719C1L, 0x154C9AC2L, 0x061C6936L, 0xF477EA35L,
+ 0xAA64D611L, 0x580F5512L, 0x4B5FA6E6L, 0xB93425E5L,
+ 0x6DFE410EL, 0x9F95C20DL, 0x8CC531F9L, 0x7EAEB2FAL,
+ 0x30E349B1L, 0xC288CAB2L, 0xD1D83946L, 0x23B3BA45L,
+ 0xF779DEAEL, 0x05125DADL, 0x1642AE59L, 0xE4292D5AL,
+ 0xBA3A117EL, 0x4851927DL, 0x5B016189L, 0xA96AE28AL,
+ 0x7DA08661L, 0x8FCB0562L, 0x9C9BF696L, 0x6EF07595L,
+ 0x417B1DBCL, 0xB3109EBFL, 0xA0406D4BL, 0x522BEE48L,
+ 0x86E18AA3L, 0x748A09A0L, 0x67DAFA54L, 0x95B17957L,
+ 0xCBA24573L, 0x39C9C670L, 0x2A993584L, 0xD8F2B687L,
+ 0x0C38D26CL, 0xFE53516FL, 0xED03A29BL, 0x1F682198L,
+ 0x5125DAD3L, 0xA34E59D0L, 0xB01EAA24L, 0x42752927L,
+ 0x96BF4DCCL, 0x64D4CECFL, 0x77843D3BL, 0x85EFBE38L,
+ 0xDBFC821CL, 0x2997011FL, 0x3AC7F2EBL, 0xC8AC71E8L,
+ 0x1C661503L, 0xEE0D9600L, 0xFD5D65F4L, 0x0F36E6F7L,
+ 0x61C69362L, 0x93AD1061L, 0x80FDE395L, 0x72966096L,
+ 0xA65C047DL, 0x5437877EL, 0x4767748AL, 0xB50CF789L,
+ 0xEB1FCBADL, 0x197448AEL, 0x0A24BB5AL, 0xF84F3859L,
+ 0x2C855CB2L, 0xDEEEDFB1L, 0xCDBE2C45L, 0x3FD5AF46L,
+ 0x7198540DL, 0x83F3D70EL, 0x90A324FAL, 0x62C8A7F9L,
+ 0xB602C312L, 0x44694011L, 0x5739B3E5L, 0xA55230E6L,
+ 0xFB410CC2L, 0x092A8FC1L, 0x1A7A7C35L, 0xE811FF36L,
+ 0x3CDB9BDDL, 0xCEB018DEL, 0xDDE0EB2AL, 0x2F8B6829L,
+ 0x82F63B78L, 0x709DB87BL, 0x63CD4B8FL, 0x91A6C88CL,
+ 0x456CAC67L, 0xB7072F64L, 0xA457DC90L, 0x563C5F93L,
+ 0x082F63B7L, 0xFA44E0B4L, 0xE9141340L, 0x1B7F9043L,
+ 0xCFB5F4A8L, 0x3DDE77ABL, 0x2E8E845FL, 0xDCE5075CL,
+ 0x92A8FC17L, 0x60C37F14L, 0x73938CE0L, 0x81F80FE3L,
+ 0x55326B08L, 0xA759E80BL, 0xB4091BFFL, 0x466298FCL,
+ 0x1871A4D8L, 0xEA1A27DBL, 0xF94AD42FL, 0x0B21572CL,
+ 0xDFEB33C7L, 0x2D80B0C4L, 0x3ED04330L, 0xCCBBC033L,
+ 0xA24BB5A6L, 0x502036A5L, 0x4370C551L, 0xB11B4652L,
+ 0x65D122B9L, 0x97BAA1BAL, 0x84EA524EL, 0x7681D14DL,
+ 0x2892ED69L, 0xDAF96E6AL, 0xC9A99D9EL, 0x3BC21E9DL,
+ 0xEF087A76L, 0x1D63F975L, 0x0E330A81L, 0xFC588982L,
+ 0xB21572C9L, 0x407EF1CAL, 0x532E023EL, 0xA145813DL,
+ 0x758FE5D6L, 0x87E466D5L, 0x94B49521L, 0x66DF1622L,
+ 0x38CC2A06L, 0xCAA7A905L, 0xD9F75AF1L, 0x2B9CD9F2L,
+ 0xFF56BD19L, 0x0D3D3E1AL, 0x1E6DCDEEL, 0xEC064EEDL,
+ 0xC38D26C4L, 0x31E6A5C7L, 0x22B65633L, 0xD0DDD530L,
+ 0x0417B1DBL, 0xF67C32D8L, 0xE52CC12CL, 0x1747422FL,
+ 0x49547E0BL, 0xBB3FFD08L, 0xA86F0EFCL, 0x5A048DFFL,
+ 0x8ECEE914L, 0x7CA56A17L, 0x6FF599E3L, 0x9D9E1AE0L,
+ 0xD3D3E1ABL, 0x21B862A8L, 0x32E8915CL, 0xC083125FL,
+ 0x144976B4L, 0xE622F5B7L, 0xF5720643L, 0x07198540L,
+ 0x590AB964L, 0xAB613A67L, 0xB831C993L, 0x4A5A4A90L,
+ 0x9E902E7BL, 0x6CFBAD78L, 0x7FAB5E8CL, 0x8DC0DD8FL,
+ 0xE330A81AL, 0x115B2B19L, 0x020BD8EDL, 0xF0605BEEL,
+ 0x24AA3F05L, 0xD6C1BC06L, 0xC5914FF2L, 0x37FACCF1L,
+ 0x69E9F0D5L, 0x9B8273D6L, 0x88D28022L, 0x7AB90321L,
+ 0xAE7367CAL, 0x5C18E4C9L, 0x4F48173DL, 0xBD23943EL,
+ 0xF36E6F75L, 0x0105EC76L, 0x12551F82L, 0xE03E9C81L,
+ 0x34F4F86AL, 0xC69F7B69L, 0xD5CF889DL, 0x27A40B9EL,
+ 0x79B737BAL, 0x8BDCB4B9L, 0x988C474DL, 0x6AE7C44EL,
+ 0xBE2DA0A5L, 0x4C4623A6L, 0x5F16D052L, 0xAD7D5351L
+};
+
+uint32_t hash_crc32c(const void *input, int len)
+{
+ const unsigned char *buf = input;
+ uint32_t crc = 0xffffffff;
+ while (len-- > 0) {
+ crc = (crc >> 8) ^ crctable[(crc ^ (*buf++)) & 0xff];
+ }
+ return (crc ^ 0xffffffff);
+}
diff --git a/src/hlua.c b/src/hlua.c
new file mode 100644
index 0000000..d1f5323
--- /dev/null
+++ b/src/hlua.c
@@ -0,0 +1,13961 @@
+/*
+ * Lua unsafe core engine
+ *
+ * Copyright 2015-2016 Thierry Fournier <tfournier@arpalert.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#define _GNU_SOURCE
+
+#include <ctype.h>
+#include <setjmp.h>
+
+#include <lauxlib.h>
+#include <lua.h>
+#include <lualib.h>
+
+#if !defined(LUA_VERSION_NUM) || LUA_VERSION_NUM < 503
+#error "Requires Lua 5.3 or later."
+#endif
+
+#include <import/ebpttree.h>
+
+#include <haproxy/api.h>
+#include <haproxy/applet.h>
+#include <haproxy/arg.h>
+#include <haproxy/auth.h>
+#include <haproxy/cfgparse.h>
+#include <haproxy/channel.h>
+#include <haproxy/cli.h>
+#include <haproxy/clock.h>
+#include <haproxy/connection.h>
+#include <haproxy/filters.h>
+#include <haproxy/h1.h>
+#include <haproxy/hlua.h>
+#include <haproxy/hlua_fcn.h>
+#include <haproxy/http_ana.h>
+#include <haproxy/http_client.h>
+#include <haproxy/http_fetch.h>
+#include <haproxy/http_htx.h>
+#include <haproxy/http_rules.h>
+#include <haproxy/log.h>
+#include <haproxy/map.h>
+#include <haproxy/obj_type.h>
+#include <haproxy/pattern.h>
+#include <haproxy/payload.h>
+#include <haproxy/proxy.h>
+#include <haproxy/regex.h>
+#include <haproxy/sample.h>
+#include <haproxy/sc_strm.h>
+#include <haproxy/server.h>
+#include <haproxy/session.h>
+#include <haproxy/ssl_ckch.h>
+#include <haproxy/ssl_sock.h>
+#include <haproxy/stats-t.h>
+#include <haproxy/stconn.h>
+#include <haproxy/stream.h>
+#include <haproxy/task.h>
+#include <haproxy/tcp_rules.h>
+#include <haproxy/thread.h>
+#include <haproxy/tools.h>
+#include <haproxy/vars.h>
+#include <haproxy/xref.h>
+#include <haproxy/event_hdl.h>
+#include <haproxy/check.h>
+#include <haproxy/mailers.h>
+
+/* Global LUA flags */
+
+enum hlua_log_opt {
+ /* tune.lua.log.loggers */
+ HLUA_LOG_LOGGERS_ON = 0x00000001, /* forward logs to current loggers */
+
+ /* tune.lua.log.stderr */
+ HLUA_LOG_STDERR_ON = 0x00000010, /* forward logs to stderr */
+ HLUA_LOG_STDERR_AUTO = 0x00000020, /* forward logs to stderr if no loggers */
+ HLUA_LOG_STDERR_MASK = 0x00000030,
+};
+/* default log options, made of flags in hlua_log_opt */
+static uint hlua_log_opts = HLUA_LOG_LOGGERS_ON | HLUA_LOG_STDERR_AUTO;
+
+/* Lua uses longjmp to perform yield or throwing errors. This
+ * macro is used only for identifying the function that can
+ * not return because a longjmp is executed.
+ * __LJMP marks a prototype of hlua file that can use longjmp.
+ * WILL_LJMP() marks an lua function that will use longjmp.
+ * MAY_LJMP() marks an lua function that may use longjmp.
+ */
+#define __LJMP
+#define WILL_LJMP(func) do { func; my_unreachable(); } while(0)
+#define MAY_LJMP(func) func
+
+/* This couple of function executes securely some Lua calls outside of
+ * the lua runtime environment. Each Lua call can return a longjmp
+ * if it encounter a memory error.
+ *
+ * Lua documentation extract:
+ *
+ * If an error happens outside any protected environment, Lua calls
+ * a panic function (see lua_atpanic) and then calls abort, thus
+ * exiting the host application. Your panic function can avoid this
+ * exit by never returning (e.g., doing a long jump to your own
+ * recovery point outside Lua).
+ *
+ * The panic function runs as if it were a message handler (see
+ * #2.3); in particular, the error message is at the top of the
+ * stack. However, there is no guarantee about stack space. To push
+ * anything on the stack, the panic function must first check the
+ * available space (see #4.2).
+ *
+ * We must check all the Lua entry point. This includes:
+ * - The include/proto/hlua.h exported functions
+ * - the task wrapper function
+ * - The action wrapper function
+ * - The converters wrapper function
+ * - The sample-fetch wrapper functions
+ *
+ * It is tolerated that the initialisation function returns an abort.
+ * Before each Lua abort, an error message is written on stderr.
+ *
+ * The macro SET_SAFE_LJMP initialise the longjmp. The Macro
+ * RESET_SAFE_LJMP reset the longjmp. These function must be macro
+ * because they must be exists in the program stack when the longjmp
+ * is called.
+ *
+ * Note that the Lua processing is not really thread safe. It provides
+ * heavy system which consists to add our own lock function in the Lua
+ * code and recompile the library. This system will probably not accepted
+ * by maintainers of various distribs.
+ *
+ * Our main execution point of the Lua is the function lua_resume(). A
+ * quick looking on the Lua sources displays a lua_lock() a the start
+ * of function and a lua_unlock() at the end of the function. So I
+ * conclude that the Lua thread safe mode just perform a mutex around
+ * all execution. So I prefer to do this in the HAProxy code, it will be
+ * easier for distro maintainers.
+ *
+ * Note that the HAProxy lua functions rounded by the macro SET_SAFE_LJMP
+ * and RESET_SAFE_LJMP manipulates the Lua stack, so it will be careful
+ * to set mutex around these functions.
+ */
+__decl_spinlock(hlua_global_lock);
+THREAD_LOCAL jmp_buf safe_ljmp_env;
+static int hlua_panic_safe(lua_State *L) { return 0; }
+static int hlua_panic_ljmp(lua_State *L) { WILL_LJMP(longjmp(safe_ljmp_env, 1)); return 0; }
+
+/* This is the chained list of struct hlua_function referenced
+ * for haproxy action, sample-fetches, converters, cli and
+ * applet bindings. It is used for a post-initialisation control.
+ */
+static struct list referenced_functions = LIST_HEAD_INIT(referenced_functions);
+
+/* This variable is used only during initialization to identify the Lua state
+ * currently being initialized. 0 is the common lua state, 1 to n are the Lua
+ * states dedicated to each thread (in this case hlua_state_id==tid+1).
+ */
+static int hlua_state_id;
+
+/* This is a NULL-terminated list of lua file which are referenced to load per thread */
+static char ***per_thread_load = NULL;
+
+lua_State *hlua_init_state(int thread_id);
+
+/* This function takes the Lua global lock. Keep this function's visibility
+ * global so that it can appear in stack dumps and performance profiles!
+ */
+static inline void lua_take_global_lock()
+{
+ HA_SPIN_LOCK(LUA_LOCK, &hlua_global_lock);
+}
+
+static inline void lua_drop_global_lock()
+{
+ HA_SPIN_UNLOCK(LUA_LOCK, &hlua_global_lock);
+}
+
+/* lua lock helpers: only lock when required
+ *
+ * state_id == 0: we're operating on the main lua stack (shared between
+ * os threads), so we need to acquire the main lock
+ *
+ * If the thread already owns the lock (_hlua_locked != 0), skip the lock
+ * attempt. This could happen if we run under protected lua environment.
+ * Not doing this could result in deadlocks because of nested locking
+ * attempts from the same thread
+ */
+static THREAD_LOCAL int _hlua_locked = 0;
+static inline void hlua_lock(struct hlua *hlua)
+{
+ if (hlua->state_id != 0)
+ return;
+ if (!_hlua_locked)
+ lua_take_global_lock();
+ _hlua_locked += 1;
+}
+static inline void hlua_unlock(struct hlua *hlua)
+{
+ if (hlua->state_id != 0)
+ return;
+ BUG_ON(_hlua_locked <= 0);
+ _hlua_locked--;
+ /* drop the lock once the lock count reaches 0 */
+ if (!_hlua_locked)
+ lua_drop_global_lock();
+}
+
+#define SET_SAFE_LJMP_L(__L, __HLUA) \
+ ({ \
+ int ret; \
+ hlua_lock(__HLUA); \
+ if (setjmp(safe_ljmp_env) != 0) { \
+ lua_atpanic(__L, hlua_panic_safe); \
+ ret = 0; \
+ hlua_unlock(__HLUA); \
+ } else { \
+ lua_atpanic(__L, hlua_panic_ljmp); \
+ ret = 1; \
+ } \
+ ret; \
+ })
+
+/* If we are the last function catching Lua errors, we
+ * must reset the panic function.
+ */
+#define RESET_SAFE_LJMP_L(__L, __HLUA) \
+ do { \
+ lua_atpanic(__L, hlua_panic_safe); \
+ hlua_unlock(__HLUA); \
+ } while(0)
+
+#define SET_SAFE_LJMP(__HLUA) \
+ SET_SAFE_LJMP_L((__HLUA)->T, __HLUA)
+
+#define RESET_SAFE_LJMP(__HLUA) \
+ RESET_SAFE_LJMP_L((__HLUA)->T, __HLUA)
+
+#define SET_SAFE_LJMP_PARENT(__HLUA) \
+ SET_SAFE_LJMP_L(hlua_states[(__HLUA)->state_id], __HLUA)
+
+#define RESET_SAFE_LJMP_PARENT(__HLUA) \
+ RESET_SAFE_LJMP_L(hlua_states[(__HLUA)->state_id], __HLUA)
+
+/* Applet status flags */
+#define APPLET_DONE 0x01 /* applet processing is done. */
+/* unused: 0x02 */
+#define APPLET_HDR_SENT 0x04 /* Response header sent. */
+/* unused: 0x08, 0x10 */
+#define APPLET_HTTP11 0x20 /* Last chunk sent. */
+#define APPLET_RSP_SENT 0x40 /* The response was fully sent */
+
+/* The main Lua execution context. The 0 index is the
+ * common state shared by all threads.
+ */
+static lua_State *hlua_states[MAX_THREADS + 1];
+
+#define HLUA_FLT_CB_FINAL 0x00000001
+#define HLUA_FLT_CB_RETVAL 0x00000002
+#define HLUA_FLT_CB_ARG_CHN 0x00000004
+#define HLUA_FLT_CB_ARG_HTTP_MSG 0x00000008
+
+#define HLUA_FLT_CTX_FL_PAYLOAD 0x00000001
+
+struct hlua_reg_filter {
+ char *name;
+ int flt_ref[MAX_THREADS + 1];
+ int fun_ref[MAX_THREADS + 1];
+ struct list l;
+};
+
+struct hlua_flt_config {
+ struct hlua_reg_filter *reg;
+ int ref[MAX_THREADS + 1];
+ char **args;
+};
+
+struct hlua_flt_ctx {
+ int ref; /* ref to the filter lua object */
+ struct hlua *hlua[2]; /* lua runtime context (0: request, 1: response) */
+ unsigned int cur_off[2]; /* current offset (0: request, 1: response) */
+ unsigned int cur_len[2]; /* current forwardable length (0: request, 1: response) */
+ unsigned int flags; /* HLUA_FLT_CTX_FL_* */
+};
+
+/* appctx context used by the cosockets */
+struct hlua_csk_ctx {
+ int connected;
+ struct xref xref; /* cross reference with the Lua object owner. */
+ struct list wake_on_read;
+ struct list wake_on_write;
+ struct appctx *appctx;
+ struct server *srv;
+ int timeout;
+ int die;
+};
+
+/* appctx context used by TCP services */
+struct hlua_tcp_ctx {
+ struct hlua *hlua;
+ int flags;
+ struct task *task;
+};
+
+/* appctx context used by HTTP services */
+struct hlua_http_ctx {
+ struct hlua *hlua;
+ int left_bytes; /* The max amount of bytes that we can read. */
+ int flags;
+ int status;
+ const char *reason;
+ struct task *task;
+};
+
+/* used by registered CLI keywords */
+struct hlua_cli_ctx {
+ struct hlua *hlua;
+ struct task *task;
+ struct hlua_function *fcn;
+};
+
+DECLARE_STATIC_POOL(pool_head_hlua_flt_ctx, "hlua_flt_ctx", sizeof(struct hlua_flt_ctx));
+
+static int hlua_filter_from_payload(struct filter *filter);
+
+/* This is the chained list of struct hlua_flt referenced
+ * for haproxy filters. It is used for a post-initialisation control.
+ */
+static struct list referenced_filters = LIST_HEAD_INIT(referenced_filters);
+
+
+/* This is the memory pool containing struct lua for applets
+ * (including cli).
+ */
+DECLARE_STATIC_POOL(pool_head_hlua, "hlua", sizeof(struct hlua));
+
+/* Used for Socket connection. */
+static struct proxy *socket_proxy;
+static struct server *socket_tcp;
+#ifdef USE_OPENSSL
+static struct server *socket_ssl;
+#endif
+
+/* List head of the function called at the initialisation time. */
+struct list hlua_init_functions[MAX_THREADS + 1];
+
+/* The following variables contains the reference of the different
+ * Lua classes. These references are useful for identify metadata
+ * associated with an object.
+ */
+static int class_txn_ref;
+static int class_socket_ref;
+static int class_channel_ref;
+static int class_fetches_ref;
+static int class_converters_ref;
+static int class_http_ref;
+static int class_http_msg_ref;
+static int class_httpclient_ref;
+static int class_map_ref;
+static int class_applet_tcp_ref;
+static int class_applet_http_ref;
+static int class_txn_reply_ref;
+
+/* Lua max execution timeouts. By default, stream-related
+ * lua coroutines (e.g.: actions) have a short timeout.
+ * On the other hand tasks coroutines don't have a timeout because
+ * a task may remain alive during all the haproxy execution.
+ *
+ * Timeouts are expressed in milliseconds, they are meant to be used
+ * with hlua timer's API exclusively.
+ * 0 means no timeout
+ */
+static uint32_t hlua_timeout_burst = 1000; /* burst timeout. */
+static uint32_t hlua_timeout_session = 4000; /* session timeout. */
+static uint32_t hlua_timeout_task = 0; /* task timeout. */
+static uint32_t hlua_timeout_applet = 4000; /* applet timeout. */
+
+/* hlua multipurpose timer:
+ * used to compute burst lua time (within a single hlua_ctx_resume())
+ * and cumulative lua time for a given coroutine, and to check
+ * the lua coroutine against the configured timeouts
+ */
+
+/* fetch per-thread cpu_time with ms precision (may wrap) */
+static inline uint32_t _hlua_time_ms()
+{
+ /* We're interested in the current cpu time in ms, which will be returned
+ * as a uint32_t to save some space.
+ * We must take the following into account:
+ *
+ * - now_cpu_time_fast() which returns the time in nanoseconds as a uint64_t
+ * will wrap every 585 years.
+ * - uint32_t may only contain 4294967295ms (~=49.7 days), so _hlua_time_ms()
+ * itself will also wrap every 49.7 days.
+ *
+ * While we can safely ignore the now_cpu_time_fast() wrap, we must
+ * take care of the uint32_t wrap by making sure to exclusively
+ * manipulate the time using uint32_t everywhere _hlua_time_ms()
+ * is involved.
+ */
+ return (uint32_t)(now_cpu_time_fast() / 1000000ULL);
+}
+
+/* computes time spent in a single lua execution (in ms) */
+static inline uint32_t _hlua_time_burst(const struct hlua_timer *timer)
+{
+ uint32_t burst_ms;
+
+ /* wrapping is expected and properly
+ * handled thanks to _hlua_time_ms() and burst_ms
+ * being of the same type
+ */
+ burst_ms = _hlua_time_ms() - timer->start;
+ return burst_ms;
+}
+
+static inline void hlua_timer_init(struct hlua_timer *timer, unsigned int max)
+{
+ timer->cumulative = 0;
+ timer->burst = 0;
+ timer->max = max;
+}
+
+/* reset the timer ctx between 2 yields */
+static inline void hlua_timer_reset(struct hlua_timer *timer)
+{
+ timer->cumulative += timer->burst;
+ timer->burst = 0;
+}
+
+/* start the timer right before a new execution */
+static inline void hlua_timer_start(struct hlua_timer *timer)
+{
+ timer->start = _hlua_time_ms();
+}
+
+/* update hlua timer when finishing an execution */
+static inline void hlua_timer_stop(struct hlua_timer *timer)
+{
+ timer->burst += _hlua_time_burst(timer);
+}
+
+/* check the timers for current hlua context:
+ * - first check for burst timeout (max execution time for the current
+ hlua resume, ie: time between effective yields)
+ * - then check for yield cumulative timeout
+ *
+ * Returns 1 if the check succeeded and 0 if it failed
+ * (ie: timeout exceeded)
+ */
+static inline int hlua_timer_check(const struct hlua_timer *timer)
+{
+ uint32_t pburst = _hlua_time_burst(timer); /* pending burst time in ms */
+
+ if (hlua_timeout_burst && (timer->burst + pburst) > hlua_timeout_burst)
+ return 0; /* burst timeout exceeded */
+ if (timer->max && (timer->cumulative + timer->burst + pburst) > timer->max)
+ return 0; /* cumulative timeout exceeded */
+ return 1; /* ok */
+}
+
+/* Interrupts the Lua processing each "hlua_nb_instruction" instructions.
+ * it is used for preventing infinite loops.
+ *
+ * I test the scheer with an infinite loop containing one incrementation
+ * and one test. I run this loop between 10 seconds, I raise a ceil of
+ * 710M loops from one interrupt each 9000 instructions, so I fix the value
+ * to one interrupt each 10 000 instructions.
+ *
+ * configured | Number of
+ * instructions | loops executed
+ * between two | in milions
+ * forced yields |
+ * ---------------+---------------
+ * 10 | 160
+ * 500 | 670
+ * 1000 | 680
+ * 5000 | 700
+ * 7000 | 700
+ * 8000 | 700
+ * 9000 | 710 <- ceil
+ * 10000 | 710
+ * 100000 | 710
+ * 1000000 | 710
+ *
+ */
+static unsigned int hlua_nb_instruction = 10000;
+
+/* Descriptor for the memory allocation state. The limit is pre-initialised to
+ * 0 until it is replaced by "tune.lua.maxmem" during the config parsing, or it
+ * is replaced with ~0 during post_init after everything was loaded. This way
+ * it is guaranteed that if limit is ~0 the boot is complete and that if it's
+ * zero it's not yet limited and proper accounting is required.
+ */
+struct hlua_mem_allocator {
+ size_t allocated;
+ size_t limit;
+};
+
+static struct hlua_mem_allocator hlua_global_allocator THREAD_ALIGNED(64);
+
+/* hlua event subscription */
+struct hlua_event_sub {
+ int fcn_ref;
+ int state_id;
+ struct hlua *hlua;
+ struct task *task;
+ event_hdl_async_equeue equeue;
+ struct event_hdl_sub *sub;
+ uint8_t paused;
+};
+
+/* This is the memory pool containing struct hlua_event_sub
+ * for event subscriptions from lua
+ */
+DECLARE_STATIC_POOL(pool_head_hlua_event_sub, "hlua_esub", sizeof(struct hlua_event_sub));
+
+/* These functions converts types between HAProxy internal args or
+ * sample and LUA types. Another function permits to check if the
+ * LUA stack contains arguments according with an required ARG_T
+ * format.
+ */
+__LJMP static int hlua_arg2lua(lua_State *L, const struct arg *arg);
+static int hlua_lua2arg(lua_State *L, int ud, struct arg *arg);
+__LJMP static int hlua_lua2arg_check(lua_State *L, int first, struct arg *argp,
+ uint64_t mask, struct proxy *p);
+__LJMP static int hlua_smp2lua(lua_State *L, struct sample *smp);
+__LJMP static int hlua_smp2lua_str(lua_State *L, struct sample *smp);
+static int hlua_lua2smp(lua_State *L, int ud, struct sample *smp);
+
+__LJMP static int hlua_http_get_headers(lua_State *L, struct http_msg *msg);
+
+struct prepend_path {
+ struct list l;
+ char *type;
+ char *path;
+};
+
+static struct list prepend_path_list = LIST_HEAD_INIT(prepend_path_list);
+
+#define SEND_ERR(__be, __fmt, __args...) \
+ do { \
+ send_log(__be, LOG_ERR, __fmt, ## __args); \
+ if (!(global.mode & MODE_QUIET) || (global.mode & MODE_VERBOSE)) \
+ ha_alert(__fmt, ## __args); \
+ } while (0)
+
+static inline struct hlua_function *new_hlua_function()
+{
+ struct hlua_function *fcn;
+ int i;
+
+ fcn = calloc(1, sizeof(*fcn));
+ if (!fcn)
+ return NULL;
+ LIST_APPEND(&referenced_functions, &fcn->l);
+ for (i = 0; i < MAX_THREADS + 1; i++)
+ fcn->function_ref[i] = -1;
+ return fcn;
+}
+
+static inline void release_hlua_function(struct hlua_function *fcn)
+{
+ if (!fcn)
+ return;
+ if (fcn->name)
+ ha_free(&fcn->name);
+ LIST_DELETE(&fcn->l);
+ ha_free(&fcn);
+}
+
+/* If the common state is set, the stack id is 0, otherwise it is the tid + 1 */
+static inline int fcn_ref_to_stack_id(struct hlua_function *fcn)
+{
+ if (fcn->function_ref[0] == -1)
+ return tid + 1;
+ return 0;
+}
+
+/* Create a new registered filter. Only its name is filled */
+static inline struct hlua_reg_filter *new_hlua_reg_filter(const char *name)
+{
+ struct hlua_reg_filter *reg_flt;
+ int i;
+
+ reg_flt = calloc(1, sizeof(*reg_flt));
+ if (!reg_flt)
+ return NULL;
+ reg_flt->name = strdup(name);
+ if (!reg_flt->name) {
+ free(reg_flt);
+ return NULL;
+ }
+ LIST_APPEND(&referenced_filters, &reg_flt->l);
+ for (i = 0; i < MAX_THREADS + 1; i++) {
+ reg_flt->flt_ref[i] = -1;
+ reg_flt->fun_ref[i] = -1;
+ }
+ return reg_flt;
+}
+
+/* Release a registered filter */
+static inline void release_hlua_reg_filter(struct hlua_reg_filter *reg_flt)
+{
+ if (!reg_flt)
+ return;
+ if (reg_flt->name)
+ ha_free(&reg_flt->name);
+ LIST_DELETE(&reg_flt->l);
+ ha_free(&reg_flt);
+}
+
+/* If the common state is set, the stack id is 0, otherwise it is the tid + 1 */
+static inline int reg_flt_to_stack_id(struct hlua_reg_filter *reg_flt)
+{
+ if (reg_flt->fun_ref[0] == -1)
+ return tid + 1;
+ return 0;
+}
+
+/* Used to check an Lua function type in the stack. It creates and
+ * returns a reference of the function. This function throws an
+ * error if the argument is not a "function".
+ * When no longer used, the ref must be released with hlua_unref()
+ */
+__LJMP int hlua_checkfunction(lua_State *L, int argno)
+{
+ if (!lua_isfunction(L, argno)) {
+ const char *msg = lua_pushfstring(L, "function expected, got %s", luaL_typename(L, argno));
+ WILL_LJMP(luaL_argerror(L, argno, msg));
+ }
+ lua_pushvalue(L, argno);
+ return luaL_ref(L, LUA_REGISTRYINDEX);
+}
+
+/* Used to check an Lua table type in the stack. It creates and
+ * returns a reference of the table. This function throws an
+ * error if the argument is not a "table".
+ * When no longer used, the ref must be released with hlua_unref()
+ */
+__LJMP int hlua_checktable(lua_State *L, int argno)
+{
+ if (!lua_istable(L, argno)) {
+ const char *msg = lua_pushfstring(L, "table expected, got %s", luaL_typename(L, argno));
+ WILL_LJMP(luaL_argerror(L, argno, msg));
+ }
+ lua_pushvalue(L, argno);
+ return luaL_ref(L, LUA_REGISTRYINDEX);
+}
+
+/* Get a reference to the object that is at the top of the stack
+ * The referenced object will be popped from the stack
+ *
+ * The function returns the reference to the object which must
+ * be cleared using hlua_unref() when no longer used
+ */
+__LJMP int hlua_ref(lua_State *L)
+{
+ return MAY_LJMP(luaL_ref(L, LUA_REGISTRYINDEX));
+}
+
+/* Pushes a reference previously created using luaL_ref(L, LUA_REGISTRYINDEX)
+ * on <L> stack
+ * (ie: hlua_checkfunction(), hlua_checktable() or hlua_ref())
+ *
+ * When the reference is no longer used, it should be released by calling
+ * hlua_unref()
+ *
+ * <L> can be from any co-routine as long as it belongs to the same lua
+ * parent state that the one used to get the reference.
+ */
+void hlua_pushref(lua_State *L, int ref)
+{
+ lua_rawgeti(L, LUA_REGISTRYINDEX, ref);
+}
+
+/* Releases a reference previously created using luaL_ref(L, LUA_REGISTRYINDEX)
+ * (ie: hlua_checkfunction(), hlua_checktable() or hlua_ref())
+ *
+ * This will allow the reference to be reused and the referred object
+ * to be garbage collected.
+ *
+ * <L> can be from any co-routine as long as it belongs to the same lua
+ * parent state that the one used to get the reference.
+ */
+void hlua_unref(lua_State *L, int ref)
+{
+ luaL_unref(L, LUA_REGISTRYINDEX, ref);
+}
+
+__LJMP const char *hlua_traceback(lua_State *L, const char* sep)
+{
+ lua_Debug ar;
+ int level = 0;
+ struct buffer *msg = get_trash_chunk();
+
+ while (lua_getstack(L, level++, &ar)) {
+ /* Fill fields:
+ * 'S': fills in the fields source, short_src, linedefined, lastlinedefined, and what;
+ * 'l': fills in the field currentline;
+ * 'n': fills in the field name and namewhat;
+ * 't': fills in the field istailcall;
+ */
+ lua_getinfo(L, "Slnt", &ar);
+
+ /* skip these empty entries, usually they come from deep C functions */
+ if (ar.currentline < 0 && *ar.what == 'C' && !*ar.namewhat && !ar.name)
+ continue;
+
+ /* Add separator */
+ if (b_data(msg))
+ chunk_appendf(msg, "%s", sep);
+
+ /* Append code localisation */
+ if (ar.currentline > 0)
+ chunk_appendf(msg, "%s:%d: ", ar.short_src, ar.currentline);
+ else
+ chunk_appendf(msg, "%s: ", ar.short_src);
+
+ /*
+ * Get function name
+ *
+ * if namewhat is no empty, name is defined.
+ * what contains "Lua" for Lua function, "C" for C function,
+ * or "main" for main code.
+ */
+ if (*ar.namewhat != '\0' && ar.name != NULL) /* is there a name from code? */
+ chunk_appendf(msg, "in %s '%s'", ar.namewhat, ar.name); /* use it */
+
+ else if (*ar.what == 'm') /* "main", the code is not executed in a function */
+ chunk_appendf(msg, "in main chunk");
+
+ else if (*ar.what != 'C') /* for Lua functions, use <file:line> */
+ chunk_appendf(msg, "in function line %d", ar.linedefined);
+
+ else /* nothing left... */
+ chunk_appendf(msg, "?");
+
+
+ /* Display tailed call */
+ if (ar.istailcall)
+ chunk_appendf(msg, " ...");
+ }
+
+ return msg->area;
+}
+
+
+/* This function check the number of arguments available in the
+ * stack. If the number of arguments available is not the same
+ * then <nb> an error is thrown.
+ */
+__LJMP static inline void check_args(lua_State *L, int nb, char *fcn)
+{
+ if (lua_gettop(L) == nb)
+ return;
+ WILL_LJMP(luaL_error(L, "'%s' needs %d arguments", fcn, nb));
+}
+
+/* This function pushes an error string prefixed by the file name
+ * and the line number where the error is encountered.
+ */
+static int hlua_pusherror(lua_State *L, const char *fmt, ...)
+{
+ va_list argp;
+ va_start(argp, fmt);
+ luaL_where(L, 1);
+ lua_pushvfstring(L, fmt, argp);
+ va_end(argp);
+ lua_concat(L, 2);
+ return 1;
+}
+
+/* This functions is used with sample fetch and converters. It
+ * converts the HAProxy configuration argument in a lua stack
+ * values.
+ *
+ * It takes an array of "arg", and each entry of the array is
+ * converted and pushed in the LUA stack.
+ */
+__LJMP static int hlua_arg2lua(lua_State *L, const struct arg *arg)
+{
+ switch (arg->type) {
+ case ARGT_SINT:
+ case ARGT_TIME:
+ case ARGT_SIZE:
+ lua_pushinteger(L, arg->data.sint);
+ break;
+
+ case ARGT_STR:
+ lua_pushlstring(L, arg->data.str.area, arg->data.str.data);
+ break;
+
+ case ARGT_IPV4:
+ case ARGT_IPV6:
+ case ARGT_MSK4:
+ case ARGT_MSK6:
+ case ARGT_FE:
+ case ARGT_BE:
+ case ARGT_TAB:
+ case ARGT_SRV:
+ case ARGT_USR:
+ case ARGT_MAP:
+ default:
+ lua_pushnil(L);
+ break;
+ }
+ return 1;
+}
+
+/* This function take one entry in an LUA stack at the index "ud",
+ * and try to convert it in an HAProxy argument entry. This is useful
+ * with sample fetch wrappers. The input arguments are given to the
+ * lua wrapper and converted as arg list by the function.
+ *
+ * Note: although lua_tolstring() may raise a memory error according to
+ * lua documentation, in practise this could only happen when using to
+ * use lua_tolstring() on a number (lua will try to push the number as a
+ * string on the stack, and this may result in memory failure), so here we
+ * assume that hlua_lua2arg() will never raise an exception since it is
+ * exclusively used with lua string inputs.
+ *
+ * Note2: You should be extra careful when using <arg> argument, since
+ * string arguments rely on lua_tolstring() which returns a pointer to lua
+ * object that may be garbage collected at any time when removed from lua
+ * stack, thus you should make sure that <arg> is only used from a local
+ * scope within lua context (and not exported or stored in a lua-independent
+ * ctx) and that related lua object still exists when accessing arg data.
+ * See: https://www.lua.org/manual/5.4/manual.html#4.1.3
+ */
+static int hlua_lua2arg(lua_State *L, int ud, struct arg *arg)
+{
+ switch (lua_type(L, ud)) {
+
+ case LUA_TNUMBER:
+ case LUA_TBOOLEAN:
+ arg->type = ARGT_SINT;
+ arg->data.sint = lua_tointeger(L, ud);
+ break;
+
+ case LUA_TSTRING:
+ arg->type = ARGT_STR;
+ arg->data.str.area = (char *)lua_tolstring(L, ud, &arg->data.str.data);
+ /* We don't know the actual size of the underlying allocation, so be conservative. */
+ arg->data.str.size = arg->data.str.data+1; /* count the terminating null byte */
+ arg->data.str.head = 0;
+ break;
+
+ case LUA_TUSERDATA:
+ case LUA_TNIL:
+ case LUA_TTABLE:
+ case LUA_TFUNCTION:
+ case LUA_TTHREAD:
+ case LUA_TLIGHTUSERDATA:
+ arg->type = ARGT_SINT;
+ arg->data.sint = 0;
+ break;
+ }
+ return 1;
+}
+
+/* the following functions are used to convert a struct sample
+ * in Lua type. This useful to convert the return of the
+ * fetches or converters.
+ */
+__LJMP static int hlua_smp2lua(lua_State *L, struct sample *smp)
+{
+ switch (smp->data.type) {
+ case SMP_T_SINT:
+ case SMP_T_BOOL:
+ lua_pushinteger(L, smp->data.u.sint);
+ break;
+
+ case SMP_T_BIN:
+ case SMP_T_STR:
+ lua_pushlstring(L, smp->data.u.str.area, smp->data.u.str.data);
+ break;
+
+ case SMP_T_METH:
+ switch (smp->data.u.meth.meth) {
+ case HTTP_METH_OPTIONS: lua_pushstring(L, "OPTIONS"); break;
+ case HTTP_METH_GET: lua_pushstring(L, "GET"); break;
+ case HTTP_METH_HEAD: lua_pushstring(L, "HEAD"); break;
+ case HTTP_METH_POST: lua_pushstring(L, "POST"); break;
+ case HTTP_METH_PUT: lua_pushstring(L, "PUT"); break;
+ case HTTP_METH_DELETE: lua_pushstring(L, "DELETE"); break;
+ case HTTP_METH_TRACE: lua_pushstring(L, "TRACE"); break;
+ case HTTP_METH_CONNECT: lua_pushstring(L, "CONNECT"); break;
+ case HTTP_METH_OTHER:
+ lua_pushlstring(L, smp->data.u.meth.str.area, smp->data.u.meth.str.data);
+ break;
+ default:
+ lua_pushnil(L);
+ break;
+ }
+ break;
+
+ case SMP_T_IPV4:
+ case SMP_T_IPV6:
+ case SMP_T_ADDR: /* This type is never used to qualify a sample. */
+ if (sample_casts[smp->data.type][SMP_T_STR] &&
+ sample_casts[smp->data.type][SMP_T_STR](smp))
+ lua_pushlstring(L, smp->data.u.str.area, smp->data.u.str.data);
+ else
+ lua_pushnil(L);
+ break;
+ default:
+ lua_pushnil(L);
+ break;
+ }
+ return 1;
+}
+
+/* the following functions are used to convert a struct sample
+ * in Lua strings. This is useful to convert the return of the
+ * fetches or converters.
+ */
+__LJMP static int hlua_smp2lua_str(lua_State *L, struct sample *smp)
+{
+ switch (smp->data.type) {
+
+ case SMP_T_BIN:
+ case SMP_T_STR:
+ lua_pushlstring(L, smp->data.u.str.area, smp->data.u.str.data);
+ break;
+
+ case SMP_T_METH:
+ switch (smp->data.u.meth.meth) {
+ case HTTP_METH_OPTIONS: lua_pushstring(L, "OPTIONS"); break;
+ case HTTP_METH_GET: lua_pushstring(L, "GET"); break;
+ case HTTP_METH_HEAD: lua_pushstring(L, "HEAD"); break;
+ case HTTP_METH_POST: lua_pushstring(L, "POST"); break;
+ case HTTP_METH_PUT: lua_pushstring(L, "PUT"); break;
+ case HTTP_METH_DELETE: lua_pushstring(L, "DELETE"); break;
+ case HTTP_METH_TRACE: lua_pushstring(L, "TRACE"); break;
+ case HTTP_METH_CONNECT: lua_pushstring(L, "CONNECT"); break;
+ case HTTP_METH_OTHER:
+ lua_pushlstring(L, smp->data.u.meth.str.area, smp->data.u.meth.str.data);
+ break;
+ default:
+ lua_pushstring(L, "");
+ break;
+ }
+ break;
+
+ case SMP_T_SINT:
+ case SMP_T_BOOL:
+ case SMP_T_IPV4:
+ case SMP_T_IPV6:
+ case SMP_T_ADDR: /* This type is never used to qualify a sample. */
+ if (sample_casts[smp->data.type][SMP_T_STR] &&
+ sample_casts[smp->data.type][SMP_T_STR](smp))
+ lua_pushlstring(L, smp->data.u.str.area, smp->data.u.str.data);
+ else
+ lua_pushstring(L, "");
+ break;
+ default:
+ lua_pushstring(L, "");
+ break;
+ }
+ return 1;
+}
+
+/* The following function is used to convert a Lua type to a
+ * struct sample. This is useful to provide data from LUA code to
+ * a converter.
+ *
+ * Note: although lua_tolstring() may raise a memory error according to
+ * lua documentation, in practise this could only happen when using to
+ * use lua_tolstring() on a number (lua will try to push the number as a
+ * string on the stack, and this may result in memory failure), so here we
+ * assume that hlua_lua2arg() will never raise an exception since it is
+ * exclusively used with lua string inputs.
+ *
+ * Note2: You should be extra careful when using <smp> argument, since
+ * string arguments rely on lua_tolstring() which returns a pointer to lua
+ * object that may be garbage collected at any time when removed from lua
+ * stack, thus you should make sure that <smp> is only used from a local
+ * scope within lua context (not exported or stored in a lua-independent
+ * ctx) and that related lua object still exists when accessing arg data.
+ * See: https://www.lua.org/manual/5.4/manual.html#4.1.3
+ *
+ * If you don't comply with this usage restriction, then you should consider
+ * duplicating the smp using smp_dup() to make it portable (little overhead),
+ * as this will ensure that the smp always points to valid memory block.
+ */
+static int hlua_lua2smp(lua_State *L, int ud, struct sample *smp)
+{
+ switch (lua_type(L, ud)) {
+
+ case LUA_TNUMBER:
+ smp->data.type = SMP_T_SINT;
+ smp->data.u.sint = lua_tointeger(L, ud);
+ break;
+
+
+ case LUA_TBOOLEAN:
+ smp->data.type = SMP_T_BOOL;
+ smp->data.u.sint = lua_toboolean(L, ud);
+ break;
+
+ case LUA_TSTRING:
+ smp->data.type = SMP_T_STR;
+ smp->flags |= SMP_F_CONST;
+ smp->data.u.str.area = (char *)lua_tolstring(L, ud, &smp->data.u.str.data);
+ /* We don't know the actual size of the underlying allocation, so be conservative. */
+ smp->data.u.str.size = smp->data.u.str.data+1; /* count the terminating null byte */
+ smp->data.u.str.head = 0;
+ break;
+
+ case LUA_TUSERDATA:
+ case LUA_TNIL:
+ case LUA_TTABLE:
+ case LUA_TFUNCTION:
+ case LUA_TTHREAD:
+ case LUA_TLIGHTUSERDATA:
+ case LUA_TNONE:
+ default:
+ smp->data.type = SMP_T_BOOL;
+ smp->data.u.sint = 0;
+ break;
+ }
+ return 1;
+}
+
+/* This function check the "argp" built by another conversion function
+ * is in accord with the expected argp defined by the "mask". The function
+ * returns true or false. It can be adjust the types if there compatibles.
+ *
+ * This function assumes that the argp argument contains ARGM_NBARGS + 1
+ * entries and that there is at least one stop at the last position.
+ */
+__LJMP int hlua_lua2arg_check(lua_State *L, int first, struct arg *argp,
+ uint64_t mask, struct proxy *p)
+{
+ int min_arg;
+ int idx;
+ struct proxy *px;
+ struct userlist *ul;
+ struct my_regex *reg;
+ const char *msg = NULL;
+ char *sname, *pname, *err = NULL;
+
+ idx = 0;
+ min_arg = ARGM(mask);
+ mask >>= ARGM_BITS;
+
+ while (1) {
+ struct buffer tmp = BUF_NULL;
+
+ /* Check for mandatory arguments. */
+ if (argp[idx].type == ARGT_STOP) {
+ if (idx < min_arg) {
+
+ /* If miss other argument than the first one, we return an error. */
+ if (idx > 0) {
+ msg = "Mandatory argument expected";
+ goto error;
+ }
+
+ /* If first argument have a certain type, some default values
+ * may be used. See the function smp_resolve_args().
+ */
+ switch (mask & ARGT_MASK) {
+
+ case ARGT_FE:
+ if (!(p->cap & PR_CAP_FE)) {
+ msg = "Mandatory argument expected";
+ goto error;
+ }
+ argp[idx].data.prx = p;
+ argp[idx].type = ARGT_FE;
+ argp[idx+1].type = ARGT_STOP;
+ break;
+
+ case ARGT_BE:
+ if (!(p->cap & PR_CAP_BE)) {
+ msg = "Mandatory argument expected";
+ goto error;
+ }
+ argp[idx].data.prx = p;
+ argp[idx].type = ARGT_BE;
+ argp[idx+1].type = ARGT_STOP;
+ break;
+
+ case ARGT_TAB:
+ if (!p->table) {
+ msg = "Mandatory argument expected";
+ goto error;
+ }
+ argp[idx].data.t = p->table;
+ argp[idx].type = ARGT_TAB;
+ argp[idx+1].type = ARGT_STOP;
+ break;
+
+ default:
+ msg = "Mandatory argument expected";
+ goto error;
+ break;
+ }
+ }
+ break;
+ }
+
+ /* Check for exceed the number of required argument. */
+ if ((mask & ARGT_MASK) == ARGT_STOP &&
+ argp[idx].type != ARGT_STOP) {
+ msg = "Last argument expected";
+ goto error;
+ }
+
+ if ((mask & ARGT_MASK) == ARGT_STOP &&
+ argp[idx].type == ARGT_STOP) {
+ break;
+ }
+
+ /* Convert some argument types. All string in argp[] are for not
+ * duplicated yet.
+ */
+ switch (mask & ARGT_MASK) {
+ case ARGT_SINT:
+ if (argp[idx].type != ARGT_SINT) {
+ msg = "integer expected";
+ goto error;
+ }
+ argp[idx].type = ARGT_SINT;
+ break;
+
+ case ARGT_TIME:
+ if (argp[idx].type != ARGT_SINT) {
+ msg = "integer expected";
+ goto error;
+ }
+ argp[idx].type = ARGT_TIME;
+ break;
+
+ case ARGT_SIZE:
+ if (argp[idx].type != ARGT_SINT) {
+ msg = "integer expected";
+ goto error;
+ }
+ argp[idx].type = ARGT_SIZE;
+ break;
+
+ case ARGT_FE:
+ if (argp[idx].type != ARGT_STR) {
+ msg = "string expected";
+ goto error;
+ }
+ argp[idx].data.prx = proxy_fe_by_name(argp[idx].data.str.area);
+ if (!argp[idx].data.prx) {
+ msg = "frontend doesn't exist";
+ goto error;
+ }
+ argp[idx].type = ARGT_FE;
+ break;
+
+ case ARGT_BE:
+ if (argp[idx].type != ARGT_STR) {
+ msg = "string expected";
+ goto error;
+ }
+ argp[idx].data.prx = proxy_be_by_name(argp[idx].data.str.area);
+ if (!argp[idx].data.prx) {
+ msg = "backend doesn't exist";
+ goto error;
+ }
+ argp[idx].type = ARGT_BE;
+ break;
+
+ case ARGT_TAB:
+ if (argp[idx].type != ARGT_STR) {
+ msg = "string expected";
+ goto error;
+ }
+ argp[idx].data.t = stktable_find_by_name(argp[idx].data.str.area);
+ if (!argp[idx].data.t) {
+ msg = "table doesn't exist";
+ goto error;
+ }
+ argp[idx].type = ARGT_TAB;
+ break;
+
+ case ARGT_SRV:
+ if (argp[idx].type != ARGT_STR) {
+ msg = "string expected";
+ goto error;
+ }
+ sname = strrchr(argp[idx].data.str.area, '/');
+ if (sname) {
+ *sname++ = '\0';
+ pname = argp[idx].data.str.area;
+ px = proxy_be_by_name(pname);
+ if (!px) {
+ msg = "backend doesn't exist";
+ goto error;
+ }
+ }
+ else {
+ sname = argp[idx].data.str.area;
+ px = p;
+ }
+ argp[idx].data.srv = findserver(px, sname);
+ if (!argp[idx].data.srv) {
+ msg = "server doesn't exist";
+ goto error;
+ }
+ argp[idx].type = ARGT_SRV;
+ break;
+
+ case ARGT_IPV4:
+ if (argp[idx].type != ARGT_STR) {
+ msg = "string expected";
+ goto error;
+ }
+ if (inet_pton(AF_INET, argp[idx].data.str.area, &argp[idx].data.ipv4)) {
+ msg = "invalid IPv4 address";
+ goto error;
+ }
+ argp[idx].type = ARGT_IPV4;
+ break;
+
+ case ARGT_MSK4:
+ if (argp[idx].type == ARGT_SINT)
+ len2mask4(argp[idx].data.sint, &argp[idx].data.ipv4);
+ else if (argp[idx].type == ARGT_STR) {
+ if (!str2mask(argp[idx].data.str.area, &argp[idx].data.ipv4)) {
+ msg = "invalid IPv4 mask";
+ goto error;
+ }
+ }
+ else {
+ msg = "integer or string expected";
+ goto error;
+ }
+ argp[idx].type = ARGT_MSK4;
+ break;
+
+ case ARGT_IPV6:
+ if (argp[idx].type != ARGT_STR) {
+ msg = "string expected";
+ goto error;
+ }
+ if (inet_pton(AF_INET6, argp[idx].data.str.area, &argp[idx].data.ipv6)) {
+ msg = "invalid IPv6 address";
+ goto error;
+ }
+ argp[idx].type = ARGT_IPV6;
+ break;
+
+ case ARGT_MSK6:
+ if (argp[idx].type == ARGT_SINT)
+ len2mask6(argp[idx].data.sint, &argp[idx].data.ipv6);
+ else if (argp[idx].type == ARGT_STR) {
+ if (!str2mask6(argp[idx].data.str.area, &argp[idx].data.ipv6)) {
+ msg = "invalid IPv6 mask";
+ goto error;
+ }
+ }
+ else {
+ msg = "integer or string expected";
+ goto error;
+ }
+ argp[idx].type = ARGT_MSK6;
+ break;
+
+ case ARGT_REG:
+ if (argp[idx].type != ARGT_STR) {
+ msg = "string expected";
+ goto error;
+ }
+ reg = regex_comp(argp[idx].data.str.area, !(argp[idx].type_flags & ARGF_REG_ICASE), 1, &err);
+ if (!reg) {
+ msg = lua_pushfstring(L, "error compiling regex '%s' : '%s'",
+ argp[idx].data.str.area, err);
+ free(err);
+ goto error;
+ }
+ argp[idx].type = ARGT_REG;
+ argp[idx].data.reg = reg;
+ break;
+
+ case ARGT_USR:
+ if (argp[idx].type != ARGT_STR) {
+ msg = "string expected";
+ goto error;
+ }
+ if (p->uri_auth && p->uri_auth->userlist &&
+ strcmp(p->uri_auth->userlist->name, argp[idx].data.str.area) == 0)
+ ul = p->uri_auth->userlist;
+ else
+ ul = auth_find_userlist(argp[idx].data.str.area);
+
+ if (!ul) {
+ msg = lua_pushfstring(L, "unable to find userlist '%s'", argp[idx].data.str.area);
+ goto error;
+ }
+ argp[idx].type = ARGT_USR;
+ argp[idx].data.usr = ul;
+ break;
+
+ case ARGT_STR:
+ if (!chunk_dup(&tmp, &argp[idx].data.str)) {
+ msg = "unable to duplicate string arg";
+ goto error;
+ }
+ argp[idx].data.str = tmp;
+ break;
+
+ case ARGT_MAP:
+ msg = "type not yet supported";
+ goto error;
+ break;
+
+ }
+
+ /* Check for type of argument. */
+ if ((mask & ARGT_MASK) != argp[idx].type) {
+ msg = lua_pushfstring(L, "'%s' expected, got '%s'",
+ arg_type_names[(mask & ARGT_MASK)],
+ arg_type_names[argp[idx].type & ARGT_MASK]);
+ goto error;
+ }
+
+ /* Next argument. */
+ mask >>= ARGT_BITS;
+ idx++;
+ }
+ return 0;
+
+ error:
+ argp[idx].type = ARGT_STOP;
+ free_args(argp);
+ WILL_LJMP(luaL_argerror(L, first + idx, msg));
+ return 0; /* Never reached */
+}
+
+/*
+ * The following functions are used to make correspondence between the the
+ * executed lua pointer and the "struct hlua *" that contain the context.
+ *
+ * - hlua_gethlua : return the hlua context associated with an lua_State.
+ * - hlua_sethlua : create the association between hlua context and lua_state.
+ */
+inline struct hlua *hlua_gethlua(lua_State *L)
+{
+ struct hlua **hlua = lua_getextraspace(L);
+ return *hlua;
+}
+static inline void hlua_sethlua(struct hlua *hlua)
+{
+ struct hlua **hlua_store = lua_getextraspace(hlua->T);
+ *hlua_store = hlua;
+}
+
+/* Will return a non-NULL string indicating the Lua call trace if the caller
+ * currently is executing from within a Lua function. One line per entry will
+ * be emitted, and each extra line will be prefixed with <pfx>. If a current
+ * Lua function is not detected, NULL is returned.
+ */
+const char *hlua_show_current_location(const char *pfx)
+{
+ lua_State *L;
+ lua_Debug ar;
+
+ /* global or per-thread stack initializing ? */
+ if (hlua_state_id != -1 && (L = hlua_states[hlua_state_id]) && lua_getstack(L, 0, &ar))
+ return hlua_traceback(L, pfx);
+
+ /* per-thread stack running ? */
+ if (hlua_states[tid + 1] && (L = hlua_states[tid + 1]) && lua_getstack(L, 0, &ar))
+ return hlua_traceback(L, pfx);
+
+ /* global stack running ? */
+ if (hlua_states[0] && (L = hlua_states[0]) && lua_getstack(L, 0, &ar))
+ return hlua_traceback(L, pfx);
+
+ return NULL;
+}
+
+/* This function is used to send logs. It tries to send them to:
+ * - the log target applicable in the current context, OR
+ * - stderr when no logger is in use for the current context
+ */
+static inline void hlua_sendlog(struct proxy *px, int level, const char *msg)
+{
+ struct tm tm;
+ char *p;
+
+ /* Cleanup the log message. */
+ p = trash.area;
+ for (; *msg != '\0'; msg++, p++) {
+ if (p >= trash.area + trash.size - 1) {
+ /* Break the message if exceed the buffer size. */
+ *(p-4) = ' ';
+ *(p-3) = '.';
+ *(p-2) = '.';
+ *(p-1) = '.';
+ break;
+ }
+ if (isprint((unsigned char)*msg))
+ *p = *msg;
+ else
+ *p = '.';
+ }
+ *p = '\0';
+
+ if (hlua_log_opts & HLUA_LOG_LOGGERS_ON)
+ send_log(px, level, "%s\n", trash.area);
+
+ if (!(global.mode & MODE_QUIET) || (global.mode & (MODE_VERBOSE | MODE_STARTING))) {
+ if (!(hlua_log_opts & HLUA_LOG_STDERR_MASK))
+ return;
+
+ /* when logging via stderr is set to 'auto', it behaves like 'off' unless one of:
+ * - logging via loggers is disabled
+ * - this is a non-proxy context and there is no global logger configured
+ * - this is a proxy context and the proxy has no logger configured
+ */
+ if ((hlua_log_opts & (HLUA_LOG_STDERR_MASK | HLUA_LOG_LOGGERS_ON)) == (HLUA_LOG_STDERR_AUTO | HLUA_LOG_LOGGERS_ON)) {
+ /* AUTO=OFF in non-proxy context only if at least one global logger is defined */
+ if ((px == NULL) && (!LIST_ISEMPTY(&global.loggers)))
+ return;
+
+ /* AUTO=OFF in proxy context only if at least one logger is configured for the proxy */
+ if ((px != NULL) && (!LIST_ISEMPTY(&px->loggers)))
+ return;
+ }
+
+ if (level == LOG_DEBUG && !(global.mode & MODE_DEBUG))
+ return;
+
+ get_localtime(date.tv_sec, &tm);
+ fprintf(stderr, "[%s] %03d/%02d%02d%02d (%d) : %s\n",
+ log_levels[level], tm.tm_yday, tm.tm_hour, tm.tm_min, tm.tm_sec,
+ (int)getpid(), trash.area);
+ fflush(stderr);
+ }
+}
+
+/* This function just ensure that the yield will be always
+ * returned with a timeout and permit to set some flags
+ * <timeout> is a tick value
+ */
+__LJMP void hlua_yieldk(lua_State *L, int nresults, lua_KContext ctx,
+ lua_KFunction k, int timeout, unsigned int flags)
+{
+ struct hlua *hlua;
+
+ /* Get hlua struct, or NULL if we execute from main lua state */
+ hlua = hlua_gethlua(L);
+ if (!hlua) {
+ return;
+ }
+
+ /* Set the wake timeout. If timeout is required, we set
+ * the expiration time.
+ */
+ hlua->wake_time = timeout;
+
+ hlua->flags |= flags;
+
+ /* Process the yield. */
+ MAY_LJMP(lua_yieldk(L, nresults, ctx, k));
+}
+
+/* This function initialises the Lua environment stored in the stream.
+ * It must be called at the start of the stream. This function creates
+ * an LUA coroutine. It can not be use to crete the main LUA context.
+ *
+ * This function is particular. it initialises a new Lua thread. If the
+ * initialisation fails (example: out of memory error), the lua function
+ * throws an error (longjmp).
+ *
+ * This function manipulates two Lua stacks: the main and the thread. Only
+ * the main stack can fail. The thread is not manipulated. This function
+ * MUST NOT manipulate the created thread stack state, because it is not
+ * protected against errors thrown by the thread stack.
+ */
+int hlua_ctx_init(struct hlua *lua, int state_id, struct task *task)
+{
+ lua->Mref = LUA_REFNIL;
+ lua->flags = 0;
+ lua->gc_count = 0;
+ lua->wake_time = TICK_ETERNITY;
+ lua->state_id = state_id;
+ hlua_timer_init(&lua->timer, 0); /* default value, no timeout */
+ LIST_INIT(&lua->com);
+ MT_LIST_INIT(&lua->hc_list);
+ if (!SET_SAFE_LJMP_PARENT(lua)) {
+ lua->Tref = LUA_REFNIL;
+ return 0;
+ }
+ lua->T = lua_newthread(hlua_states[state_id]);
+ if (!lua->T) {
+ lua->Tref = LUA_REFNIL;
+ RESET_SAFE_LJMP_PARENT(lua);
+ return 0;
+ }
+ hlua_sethlua(lua);
+ lua->Tref = luaL_ref(hlua_states[state_id], LUA_REGISTRYINDEX);
+ lua->task = task;
+ RESET_SAFE_LJMP_PARENT(lua);
+ return 1;
+}
+
+/* kill all associated httpclient to this hlua task
+ * We must take extra precautions as we're manipulating lua-exposed
+ * objects without the main lua lock.
+ */
+static void hlua_httpclient_destroy_all(struct hlua *hlua)
+{
+ struct hlua_httpclient *hlua_hc;
+
+ /* use thread-safe accessors for hc_list since GC cycle initiated by
+ * another thread sharing the same main lua stack (lua coroutine)
+ * could execute hlua_httpclient_gc() on the hlua->hc_list items
+ * in parallel: Lua GC applies on the main stack, it is not limited to
+ * a single coroutine stack, see Github issue #2037 for reference.
+ * Remember, coroutines created using lua_newthread() are not meant to
+ * be thread safe in Lua. (From lua co-author:
+ * http://lua-users.org/lists/lua-l/2011-07/msg00072.html)
+ *
+ * This security measure is superfluous when 'lua-load-per-thread' is used
+ * since in this case coroutines exclusively run on the same thread
+ * (main stack is not shared between OS threads).
+ */
+ while ((hlua_hc = MT_LIST_POP(&hlua->hc_list, typeof(hlua_hc), by_hlua))) {
+ httpclient_stop_and_destroy(hlua_hc->hc);
+ hlua_hc->hc = NULL;
+ }
+}
+
+
+/* Used to destroy the Lua coroutine when the attached stream or task
+ * is destroyed. The destroy also the memory context. The struct "lua"
+ * will be freed.
+ */
+void hlua_ctx_destroy(struct hlua *lua)
+{
+ if (!lua)
+ return;
+
+ if (!lua->T)
+ goto end;
+
+ /* clean all running httpclient */
+ hlua_httpclient_destroy_all(lua);
+
+ /* Purge all the pending signals. */
+ notification_purge(&lua->com);
+
+ if (!SET_SAFE_LJMP(lua))
+ return;
+ luaL_unref(lua->T, LUA_REGISTRYINDEX, lua->Mref);
+ RESET_SAFE_LJMP(lua);
+
+ if (!SET_SAFE_LJMP_PARENT(lua))
+ return;
+ luaL_unref(hlua_states[lua->state_id], LUA_REGISTRYINDEX, lua->Tref);
+ RESET_SAFE_LJMP_PARENT(lua);
+ /* Forces a garbage collecting process. If the Lua program is finished
+ * without error, we run the GC on the thread pointer. Its freed all
+ * the unused memory.
+ * If the thread is finnish with an error or is currently yielded,
+ * it seems that the GC applied on the thread doesn't clean anything,
+ * so e run the GC on the main thread.
+ * NOTE: maybe this action locks all the Lua threads untiml the en of
+ * the garbage collection.
+ */
+ if (lua->gc_count) {
+ if (!SET_SAFE_LJMP_PARENT(lua))
+ return;
+ lua_gc(hlua_states[lua->state_id], LUA_GCCOLLECT, 0);
+ RESET_SAFE_LJMP_PARENT(lua);
+ }
+
+ lua->T = NULL;
+
+end:
+ pool_free(pool_head_hlua, lua);
+}
+
+/* This function is used to restore the Lua context when a coroutine
+ * fails. This function copy the common memory between old coroutine
+ * and the new coroutine. The old coroutine is destroyed, and its
+ * replaced by the new coroutine.
+ * If the flag "keep_msg" is set, the last entry of the old is assumed
+ * as string error message and it is copied in the new stack.
+ */
+static int hlua_ctx_renew(struct hlua *lua, int keep_msg)
+{
+ lua_State *T;
+ int new_ref;
+
+ /* New Lua coroutine. */
+ T = lua_newthread(hlua_states[lua->state_id]);
+ if (!T)
+ return 0;
+
+ /* Copy last error message. */
+ if (keep_msg)
+ lua_xmove(lua->T, T, 1);
+
+ /* Copy data between the coroutines. */
+ lua_rawgeti(lua->T, LUA_REGISTRYINDEX, lua->Mref);
+ lua_xmove(lua->T, T, 1);
+ new_ref = luaL_ref(T, LUA_REGISTRYINDEX); /* Value popped. */
+
+ /* Destroy old data. */
+ luaL_unref(lua->T, LUA_REGISTRYINDEX, lua->Mref);
+
+ /* The thread is garbage collected by Lua. */
+ luaL_unref(hlua_states[lua->state_id], LUA_REGISTRYINDEX, lua->Tref);
+
+ /* Fill the struct with the new coroutine values. */
+ lua->Mref = new_ref;
+ lua->T = T;
+ lua->Tref = luaL_ref(hlua_states[lua->state_id], LUA_REGISTRYINDEX);
+
+ /* Set context. */
+ hlua_sethlua(lua);
+
+ return 1;
+}
+
+void hlua_hook(lua_State *L, lua_Debug *ar)
+{
+ struct hlua *hlua;
+
+ /* Get hlua struct, or NULL if we execute from main lua state */
+ hlua = hlua_gethlua(L);
+ if (!hlua)
+ return;
+
+ if (hlua->T != L) {
+ /* We don't want to enforce a yield on a sub coroutine, since
+ * we have no guarantees that the yield will be handled properly.
+ * Indeed, only the hlua->T coroutine is being handled through
+ * hlua_ctx_resume() function.
+ *
+ * Instead, we simply check for timeouts and wait for the sub
+ * coroutine to finish..
+ */
+ goto check_timeout;
+ }
+
+ /* Lua cannot yield when its returning from a function,
+ * so, we can fix the interrupt hook to 1 instruction,
+ * expecting that the function is finished.
+ */
+ if (lua_gethookmask(L) & LUA_MASKRET) {
+ lua_sethook(hlua->T, hlua_hook, LUA_MASKCOUNT, 1);
+ return;
+ }
+
+ /* If we interrupt the Lua processing in yieldable state, we yield.
+ * If the state is not yieldable, trying yield causes an error.
+ */
+ if (lua_isyieldable(L)) {
+ /* note: for converters/fetches.. where yielding is not allowed
+ * hlua_ctx_resume() will simply perform a goto resume_execution
+ * instead of rescheduling hlua->task.
+ * also: hlua_ctx_resume() will take care of checking execution
+ * timeout and re-applying the hook as needed.
+ */
+ MAY_LJMP(hlua_yieldk(L, 0, 0, NULL, TICK_ETERNITY, HLUA_CTRLYIELD));
+ /* lua docs says that the hook should return immediately after lua_yieldk
+ *
+ * From: https://www.lua.org/manual/5.3/manual.html#lua_yieldk
+ *
+ * Moreover, it seems that we don't want to continue after the yield
+ * because the end of the function is about handling unyieldable function,
+ * which is not the case here.
+ *
+ * ->if we don't return lua_sethook gets incorrectly set with MASKRET later
+ * in the function.
+ */
+ return;
+ }
+
+ check_timeout:
+ /* If we cannot yield, check the timeout. */
+ if (!hlua_timer_check(&hlua->timer)) {
+ lua_pushfstring(L, "execution timeout");
+ WILL_LJMP(lua_error(L));
+ }
+
+ /* Try to interrupt the process at the end of the current
+ * unyieldable function.
+ */
+ lua_sethook(hlua->T, hlua_hook, LUA_MASKRET|LUA_MASKCOUNT, hlua_nb_instruction);
+}
+
+/* This function start or resumes the Lua stack execution. If the flag
+ * "yield_allowed" if no set and the LUA stack execution returns a yield
+ * The function return an error.
+ *
+ * The function can returns 4 values:
+ * - HLUA_E_OK : The execution is terminated without any errors.
+ * - HLUA_E_AGAIN : The execution must continue at the next associated
+ * task wakeup.
+ * - HLUA_E_ERRMSG : An error has occurred, an error message is set in
+ * the top of the stack.
+ * - HLUA_E_ERR : An error has occurred without error message.
+ *
+ * If an error occurred, the stack is renewed and it is ready to run new
+ * LUA code.
+ */
+static enum hlua_exec hlua_ctx_resume(struct hlua *lua, int yield_allowed)
+{
+#if defined(LUA_VERSION_NUM) && LUA_VERSION_NUM >= 504
+ int nres;
+#endif
+ int ret;
+ const char *msg;
+ const char *trace;
+
+ /* Lock the whole Lua execution. This lock must be before the
+ * label "resume_execution".
+ */
+ hlua_lock(lua);
+
+ /* reset the timer as we might be re-entering the function to
+ * resume the coroutine after a successful yield
+ * (cumulative time will be updated)
+ */
+ hlua_timer_reset(&lua->timer);
+
+resume_execution:
+
+ /* This hook interrupts the Lua processing each 'hlua_nb_instruction'
+ * instructions. it is used for preventing infinite loops.
+ */
+ lua_sethook(lua->T, hlua_hook, LUA_MASKCOUNT, hlua_nb_instruction);
+
+ /* Remove all flags except the running flags. */
+ HLUA_SET_RUN(lua);
+ HLUA_CLR_CTRLYIELD(lua);
+ HLUA_CLR_WAKERESWR(lua);
+ HLUA_CLR_WAKEREQWR(lua);
+ HLUA_CLR_NOYIELD(lua);
+ if (!yield_allowed)
+ HLUA_SET_NOYIELD(lua);
+
+ /* reset wake_time. */
+ lua->wake_time = TICK_ETERNITY;
+
+ /* start the timer as we're about to start lua processing */
+ hlua_timer_start(&lua->timer);
+
+ /* Call the function. */
+#if defined(LUA_VERSION_NUM) && LUA_VERSION_NUM >= 504
+ ret = lua_resume(lua->T, hlua_states[lua->state_id], lua->nargs, &nres);
+#else
+ ret = lua_resume(lua->T, hlua_states[lua->state_id], lua->nargs);
+#endif
+
+ /* out of lua processing, stop the timer */
+ hlua_timer_stop(&lua->timer);
+
+ /* reset nargs because those possibly passed to the lua_resume() call
+ * were already consumed, and since we may call lua_resume() again
+ * after a successful yield, we don't want to pass stale nargs hint
+ * to the Lua API. As such, nargs should be set explicitly before each
+ * lua_resume() (or hlua_ctx_resume()) invocation if needed.
+ */
+ lua->nargs = 0;
+
+ switch (ret) {
+
+ case LUA_OK:
+ ret = HLUA_E_OK;
+ break;
+
+ case LUA_YIELD:
+ /* Check if the execution timeout is expired. If it is the case, we
+ * break the Lua execution.
+ */
+ if (!hlua_timer_check(&lua->timer)) {
+ lua_settop(lua->T, 0); /* Empty the stack. */
+ ret = HLUA_E_ETMOUT;
+ break;
+ }
+ /* Process the forced yield. if the general yield is not allowed or
+ * if no task were associated this the current Lua execution
+ * coroutine, we resume the execution. Else we want to return in the
+ * scheduler and we want to be waked up again, to continue the
+ * current Lua execution. So we schedule our own task.
+ */
+ if (HLUA_IS_CTRLYIELDING(lua)) {
+ if (!yield_allowed || !lua->task)
+ goto resume_execution;
+ task_wakeup(lua->task, TASK_WOKEN_MSG);
+ }
+ if (!yield_allowed) {
+ lua_settop(lua->T, 0); /* Empty the stack. */
+ ret = HLUA_E_YIELD;
+ break;
+ }
+ ret = HLUA_E_AGAIN;
+ break;
+
+ case LUA_ERRRUN:
+
+ /* Special exit case. The traditional exit is returned as an error
+ * because the errors ares the only one mean to return immediately
+ * from and lua execution.
+ */
+ if (lua->flags & HLUA_EXIT) {
+ ret = HLUA_E_OK;
+ hlua_ctx_renew(lua, 1);
+ break;
+ }
+
+ lua->wake_time = TICK_ETERNITY;
+ if (!lua_checkstack(lua->T, 1)) {
+ ret = HLUA_E_ERR;
+ break;
+ }
+ msg = lua_tostring(lua->T, -1);
+ lua_settop(lua->T, 0); /* Empty the stack. */
+ trace = hlua_traceback(lua->T, ", ");
+ if (msg)
+ lua_pushfstring(lua->T, "[state-id %d] runtime error: %s from %s", lua->state_id, msg, trace);
+ else
+ lua_pushfstring(lua->T, "[state-id %d] unknown runtime error from %s", lua->state_id, trace);
+ ret = HLUA_E_ERRMSG;
+ break;
+
+ case LUA_ERRMEM:
+ lua->wake_time = TICK_ETERNITY;
+ lua_settop(lua->T, 0); /* Empty the stack. */
+ ret = HLUA_E_NOMEM;
+ break;
+
+ case LUA_ERRERR:
+ lua->wake_time = TICK_ETERNITY;
+ if (!lua_checkstack(lua->T, 1)) {
+ ret = HLUA_E_ERR;
+ break;
+ }
+ msg = lua_tostring(lua->T, -1);
+ lua_settop(lua->T, 0); /* Empty the stack. */
+ if (msg)
+ lua_pushfstring(lua->T, "[state-id %d] message handler error: %s", lua->state_id, msg);
+ else
+ lua_pushfstring(lua->T, "[state-id %d] message handler error", lua->state_id);
+ ret = HLUA_E_ERRMSG;
+ break;
+
+ default:
+ lua->wake_time = TICK_ETERNITY;
+ lua_settop(lua->T, 0); /* Empty the stack. */
+ ret = HLUA_E_ERR;
+ break;
+ }
+
+ switch (ret) {
+ case HLUA_E_AGAIN:
+ break;
+
+ case HLUA_E_ERRMSG:
+ notification_purge(&lua->com);
+ hlua_ctx_renew(lua, 1);
+ HLUA_CLR_RUN(lua);
+ break;
+
+ case HLUA_E_ETMOUT:
+ case HLUA_E_NOMEM:
+ case HLUA_E_YIELD:
+ case HLUA_E_ERR:
+ HLUA_CLR_RUN(lua);
+ notification_purge(&lua->com);
+ hlua_ctx_renew(lua, 0);
+ break;
+
+ case HLUA_E_OK:
+ HLUA_CLR_RUN(lua);
+ notification_purge(&lua->com);
+ break;
+ }
+
+ /* This is the main exit point, remove the Lua lock. */
+ hlua_unlock(lua);
+
+ return ret;
+}
+
+/* This function exit the current code. */
+__LJMP static int hlua_done(lua_State *L)
+{
+ struct hlua *hlua;
+
+ /* Get hlua struct, or NULL if we execute from main lua state */
+ hlua = hlua_gethlua(L);
+ if (!hlua)
+ return 0;
+
+ hlua->flags |= HLUA_EXIT;
+ WILL_LJMP(lua_error(L));
+
+ return 0;
+}
+
+/* This function is an LUA binding. It provides a function
+ * for deleting ACL from a referenced ACL file.
+ */
+__LJMP static int hlua_del_acl(lua_State *L)
+{
+ const char *name;
+ const char *key;
+ struct pat_ref *ref;
+
+ MAY_LJMP(check_args(L, 2, "del_acl"));
+
+ name = MAY_LJMP(luaL_checkstring(L, 1));
+ key = MAY_LJMP(luaL_checkstring(L, 2));
+
+ ref = pat_ref_lookup(name);
+ if (!ref)
+ WILL_LJMP(luaL_error(L, "'del_acl': unknown acl file '%s'", name));
+
+ HA_RWLOCK_WRLOCK(PATREF_LOCK, &ref->lock);
+ pat_ref_delete(ref, key);
+ HA_RWLOCK_WRUNLOCK(PATREF_LOCK, &ref->lock);
+ return 0;
+}
+
+/* This function is an LUA binding. It provides a function
+ * for deleting map entry from a referenced map file.
+ */
+static int hlua_del_map(lua_State *L)
+{
+ const char *name;
+ const char *key;
+ struct pat_ref *ref;
+
+ MAY_LJMP(check_args(L, 2, "del_map"));
+
+ name = MAY_LJMP(luaL_checkstring(L, 1));
+ key = MAY_LJMP(luaL_checkstring(L, 2));
+
+ ref = pat_ref_lookup(name);
+ if (!ref)
+ WILL_LJMP(luaL_error(L, "'del_map': unknown acl file '%s'", name));
+
+ HA_RWLOCK_WRLOCK(PATREF_LOCK, &ref->lock);
+ pat_ref_delete(ref, key);
+ HA_RWLOCK_WRUNLOCK(PATREF_LOCK, &ref->lock);
+ return 0;
+}
+
+/* This function is an LUA binding. It provides a function
+ * for adding ACL pattern from a referenced ACL file.
+ */
+static int hlua_add_acl(lua_State *L)
+{
+ const char *name;
+ const char *key;
+ struct pat_ref *ref;
+
+ MAY_LJMP(check_args(L, 2, "add_acl"));
+
+ name = MAY_LJMP(luaL_checkstring(L, 1));
+ key = MAY_LJMP(luaL_checkstring(L, 2));
+
+ ref = pat_ref_lookup(name);
+ if (!ref)
+ WILL_LJMP(luaL_error(L, "'add_acl': unknown acl file '%s'", name));
+
+ HA_RWLOCK_WRLOCK(PATREF_LOCK, &ref->lock);
+ if (pat_ref_find_elt(ref, key) == NULL)
+ pat_ref_add(ref, key, NULL, NULL);
+ HA_RWLOCK_WRUNLOCK(PATREF_LOCK, &ref->lock);
+ return 0;
+}
+
+/* This function is an LUA binding. It provides a function
+ * for setting map pattern and sample from a referenced map
+ * file.
+ */
+static int hlua_set_map(lua_State *L)
+{
+ const char *name;
+ const char *key;
+ const char *value;
+ struct pat_ref *ref;
+
+ MAY_LJMP(check_args(L, 3, "set_map"));
+
+ name = MAY_LJMP(luaL_checkstring(L, 1));
+ key = MAY_LJMP(luaL_checkstring(L, 2));
+ value = MAY_LJMP(luaL_checkstring(L, 3));
+
+ ref = pat_ref_lookup(name);
+ if (!ref)
+ WILL_LJMP(luaL_error(L, "'set_map': unknown map file '%s'", name));
+
+ HA_RWLOCK_WRLOCK(PATREF_LOCK, &ref->lock);
+ if (pat_ref_find_elt(ref, key) != NULL)
+ pat_ref_set(ref, key, value, NULL, NULL);
+ else
+ pat_ref_add(ref, key, value, NULL);
+ HA_RWLOCK_WRUNLOCK(PATREF_LOCK, &ref->lock);
+ return 0;
+}
+
+/* This function is an LUA binding. It provides a function
+ * for retrieving a var from the proc scope in core.
+ */
+ static int hlua_core_get_var(lua_State *L)
+{
+ const char *name;
+ size_t len;
+ struct sample smp;
+
+ MAY_LJMP(check_args(L, 1, "get_var"));
+
+ name = MAY_LJMP(luaL_checklstring(L, 1, &len));
+
+ /* We can only retrieve information from the proc. scope */
+ /* FIXME: I didn't want to expose vars_hash_name from vars.c */
+ if (len < 5 || strncmp(name, "proc.", 5) != 0)
+ WILL_LJMP(luaL_error(L, "'get_var': Only 'proc.' scope allowed to be retrieved in 'core.get_var()'."));
+
+ memset(&smp, 0, sizeof(smp));
+ if (!vars_get_by_name(name, len, &smp, NULL)) {
+ lua_pushnil(L);
+ return 1;
+ }
+
+ return MAY_LJMP(hlua_smp2lua(L, &smp));
+ return 1;
+}
+
+/* This function disables the sending of email through the
+ * legacy email sending function which is implemented using
+ * checks.
+ *
+ * It may not be used during runtime.
+ */
+__LJMP static int hlua_disable_legacy_mailers(lua_State *L)
+{
+ if (hlua_gethlua(L))
+ WILL_LJMP(luaL_error(L, "disable_legacy_mailers: "
+ "not available outside of init or body context"));
+ send_email_disabled = 1;
+ return 0;
+}
+
+/* A class is a lot of memory that contain data. This data can be a table,
+ * an integer or user data. This data is associated with a metatable. This
+ * metatable have an original version registered in the global context with
+ * the name of the object (_G[<name>] = <metable> ).
+ *
+ * A metable is a table that modify the standard behavior of a standard
+ * access to the associated data. The entries of this new metatable are
+ * defined as is:
+ *
+ * http://lua-users.org/wiki/MetatableEvents
+ *
+ * __index
+ *
+ * we access an absent field in a table, the result is nil. This is
+ * true, but it is not the whole truth. Actually, such access triggers
+ * the interpreter to look for an __index metamethod: If there is no
+ * such method, as usually happens, then the access results in nil;
+ * otherwise, the metamethod will provide the result.
+ *
+ * Control 'prototype' inheritance. When accessing "myTable[key]" and
+ * the key does not appear in the table, but the metatable has an __index
+ * property:
+ *
+ * - if the value is a function, the function is called, passing in the
+ * table and the key; the return value of that function is returned as
+ * the result.
+ *
+ * - if the value is another table, the value of the key in that table is
+ * asked for and returned (and if it doesn't exist in that table, but that
+ * table's metatable has an __index property, then it continues on up)
+ *
+ * - Use "rawget(myTable,key)" to skip this metamethod.
+ *
+ * http://www.lua.org/pil/13.4.1.html
+ *
+ * __newindex
+ *
+ * Like __index, but control property assignment.
+ *
+ * __mode - Control weak references. A string value with one or both
+ * of the characters 'k' and 'v' which specifies that the the
+ * keys and/or values in the table are weak references.
+ *
+ * __call - Treat a table like a function. When a table is followed by
+ * parenthesis such as "myTable( 'foo' )" and the metatable has
+ * a __call key pointing to a function, that function is invoked
+ * (passing any specified arguments) and the return value is
+ * returned.
+ *
+ * __metatable - Hide the metatable. When "getmetatable( myTable )" is
+ * called, if the metatable for myTable has a __metatable
+ * key, the value of that key is returned instead of the
+ * actual metatable.
+ *
+ * __tostring - Control string representation. When the builtin
+ * "tostring( myTable )" function is called, if the metatable
+ * for myTable has a __tostring property set to a function,
+ * that function is invoked (passing myTable to it) and the
+ * return value is used as the string representation.
+ *
+ * __len - Control table length. When the table length is requested using
+ * the length operator ( '#' ), if the metatable for myTable has
+ * a __len key pointing to a function, that function is invoked
+ * (passing myTable to it) and the return value used as the value
+ * of "#myTable".
+ *
+ * __gc - Userdata finalizer code. When userdata is set to be garbage
+ * collected, if the metatable has a __gc field pointing to a
+ * function, that function is first invoked, passing the userdata
+ * to it. The __gc metamethod is not called for tables.
+ * (See http://lua-users.org/lists/lua-l/2006-11/msg00508.html)
+ *
+ * Special metamethods for redefining standard operators:
+ * http://www.lua.org/pil/13.1.html
+ *
+ * __add "+"
+ * __sub "-"
+ * __mul "*"
+ * __div "/"
+ * __unm "!"
+ * __pow "^"
+ * __concat ".."
+ *
+ * Special methods for redefining standard relations
+ * http://www.lua.org/pil/13.2.html
+ *
+ * __eq "=="
+ * __lt "<"
+ * __le "<="
+ */
+
+/*
+ *
+ *
+ * Class Map
+ *
+ *
+ */
+
+/* Returns a struct hlua_map if the stack entry "ud" is
+ * a class session, otherwise it throws an error.
+ */
+__LJMP static struct map_descriptor *hlua_checkmap(lua_State *L, int ud)
+{
+ return MAY_LJMP(hlua_checkudata(L, ud, class_map_ref));
+}
+
+/* This function is the map constructor. It don't need
+ * the class Map object. It creates and return a new Map
+ * object. It must be called only during "body" or "init"
+ * context because it process some filesystem accesses.
+ */
+__LJMP static int hlua_map_new(struct lua_State *L)
+{
+ const char *fn;
+ int match = PAT_MATCH_STR;
+ struct sample_conv conv;
+ const char *file = "";
+ int line = 0;
+ lua_Debug ar;
+ char *err = NULL;
+ struct arg args[2];
+
+ if (lua_gettop(L) < 1 || lua_gettop(L) > 2)
+ WILL_LJMP(luaL_error(L, "'new' needs at least 1 argument."));
+
+ fn = MAY_LJMP(luaL_checkstring(L, 1));
+
+ if (lua_gettop(L) >= 2) {
+ match = MAY_LJMP(luaL_checkinteger(L, 2));
+ if (match < 0 || match >= PAT_MATCH_NUM)
+ WILL_LJMP(luaL_error(L, "'new' needs a valid match method."));
+ }
+
+ /* Get Lua filename and line number. */
+ if (lua_getstack(L, 1, &ar)) { /* check function at level */
+ lua_getinfo(L, "Sl", &ar); /* get info about it */
+ if (ar.currentline > 0) { /* is there info? */
+ file = ar.short_src;
+ line = ar.currentline;
+ }
+ }
+
+ /* fill fake sample_conv struct. */
+ conv.kw = ""; /* unused. */
+ conv.process = NULL; /* unused. */
+ conv.arg_mask = 0; /* unused. */
+ conv.val_args = NULL; /* unused. */
+ conv.out_type = SMP_T_STR;
+ conv.private = (void *)(long)match;
+ switch (match) {
+ case PAT_MATCH_STR: conv.in_type = SMP_T_STR; break;
+ case PAT_MATCH_BEG: conv.in_type = SMP_T_STR; break;
+ case PAT_MATCH_SUB: conv.in_type = SMP_T_STR; break;
+ case PAT_MATCH_DIR: conv.in_type = SMP_T_STR; break;
+ case PAT_MATCH_DOM: conv.in_type = SMP_T_STR; break;
+ case PAT_MATCH_END: conv.in_type = SMP_T_STR; break;
+ case PAT_MATCH_REG: conv.in_type = SMP_T_STR; break;
+ case PAT_MATCH_INT: conv.in_type = SMP_T_SINT; break;
+ case PAT_MATCH_IP: conv.in_type = SMP_T_ADDR; break;
+ default:
+ WILL_LJMP(luaL_error(L, "'new' doesn't support this match mode."));
+ }
+
+ /* fill fake args. */
+ args[0].type = ARGT_STR;
+ args[0].data.str.area = strdup(fn);
+ args[0].data.str.data = strlen(fn);
+ args[0].data.str.size = args[0].data.str.data+1;
+ args[1].type = ARGT_STOP;
+
+ /* load the map. */
+ if (!sample_load_map(args, &conv, file, line, &err)) {
+ /* error case: we can't use luaL_error because we must
+ * free the err variable.
+ */
+ luaL_where(L, 1);
+ lua_pushfstring(L, "'new': %s.", err);
+ lua_concat(L, 2);
+ free(err);
+ chunk_destroy(&args[0].data.str);
+ WILL_LJMP(lua_error(L));
+ }
+
+ /* create the lua object. */
+ lua_newtable(L);
+ lua_pushlightuserdata(L, args[0].data.map);
+ lua_rawseti(L, -2, 0);
+
+ /* Pop a class Map metatable and affect it to the userdata. */
+ lua_rawgeti(L, LUA_REGISTRYINDEX, class_map_ref);
+ lua_setmetatable(L, -2);
+
+
+ return 1;
+}
+
+__LJMP static inline int _hlua_map_lookup(struct lua_State *L, int str)
+{
+ struct map_descriptor *desc;
+ struct pattern *pat;
+ struct sample smp;
+
+ MAY_LJMP(check_args(L, 2, "lookup"));
+ desc = MAY_LJMP(hlua_checkmap(L, 1));
+ if (desc->pat.expect_type == SMP_T_SINT) {
+ smp.data.type = SMP_T_SINT;
+ smp.data.u.sint = MAY_LJMP(luaL_checkinteger(L, 2));
+ }
+ else {
+ smp.data.type = SMP_T_STR;
+ smp.flags = SMP_F_CONST;
+ smp.data.u.str.area = (char *)MAY_LJMP(luaL_checklstring(L, 2, (size_t *)&smp.data.u.str.data));
+ smp.data.u.str.size = smp.data.u.str.data + 1;
+ }
+
+ pat = pattern_exec_match(&desc->pat, &smp, 1);
+ if (!pat || !pat->data) {
+ if (str)
+ lua_pushstring(L, "");
+ else
+ lua_pushnil(L);
+ return 1;
+ }
+
+ /* The Lua pattern must return a string, so we can't check the returned type */
+ lua_pushlstring(L, pat->data->u.str.area, pat->data->u.str.data);
+ return 1;
+}
+
+__LJMP static int hlua_map_lookup(struct lua_State *L)
+{
+ return _hlua_map_lookup(L, 0);
+}
+
+__LJMP static int hlua_map_slookup(struct lua_State *L)
+{
+ return _hlua_map_lookup(L, 1);
+}
+
+/*
+ *
+ *
+ * Class Socket
+ *
+ *
+ */
+
+__LJMP static struct hlua_socket *hlua_checksocket(lua_State *L, int ud)
+{
+ return MAY_LJMP(hlua_checkudata(L, ud, class_socket_ref));
+}
+
+/* This function is the handler called for each I/O on the established
+ * connection. It is used for notify space available to send or data
+ * received.
+ */
+static void hlua_socket_handler(struct appctx *appctx)
+{
+ struct hlua_csk_ctx *ctx = appctx->svcctx;
+ struct stconn *sc = appctx_sc(appctx);
+
+ if (unlikely(se_fl_test(appctx->sedesc, (SE_FL_EOS|SE_FL_ERROR|SE_FL_SHR|SE_FL_SHW)))) {
+ co_skip(sc_oc(sc), co_data(sc_oc(sc)));
+ notification_wake(&ctx->wake_on_read);
+ notification_wake(&ctx->wake_on_write);
+ return;
+ }
+
+ if (ctx->die) {
+ se_fl_set(appctx->sedesc, SE_FL_EOI|SE_FL_EOS);
+ notification_wake(&ctx->wake_on_read);
+ notification_wake(&ctx->wake_on_write);
+ return;
+ }
+
+ /* If we can't write, wakeup the pending write signals. */
+ if (channel_output_closed(sc_ic(sc)))
+ notification_wake(&ctx->wake_on_write);
+
+ /* If we can't read, wakeup the pending read signals. */
+ if (channel_input_closed(sc_oc(sc)))
+ notification_wake(&ctx->wake_on_read);
+
+ /* if the connection is not established, inform the stream that we want
+ * to be notified whenever the connection completes.
+ */
+ if (sc_opposite(sc)->state < SC_ST_EST) {
+ applet_need_more_data(appctx);
+ se_need_remote_conn(appctx->sedesc);
+ applet_have_more_data(appctx);
+ return;
+ }
+
+ /* This function is called after the connect. */
+ ctx->connected = 1;
+
+ /* Wake the tasks which wants to write if the buffer have available space. */
+ if (channel_may_recv(sc_ic(sc)))
+ notification_wake(&ctx->wake_on_write);
+
+ /* Wake the tasks which wants to read if the buffer contains data. */
+ if (co_data(sc_oc(sc)))
+ notification_wake(&ctx->wake_on_read);
+
+ /* If write notifications are registered, we considers we want
+ * to write, so we clear the blocking flag.
+ */
+ if (notification_registered(&ctx->wake_on_write))
+ applet_have_more_data(appctx);
+}
+
+static int hlua_socket_init(struct appctx *appctx)
+{
+ struct hlua_csk_ctx *csk_ctx = appctx->svcctx;
+ struct stream *s;
+
+ if (appctx_finalize_startup(appctx, socket_proxy, &BUF_NULL) == -1)
+ goto error;
+
+ s = appctx_strm(appctx);
+
+ /* Configure "right" stream connector. This stconn is used to connect
+ * and retrieve data from the server. The connection is initialized
+ * with the "struct server".
+ */
+ sc_set_state(s->scb, SC_ST_ASS);
+
+ /* Force destination server. */
+ s->flags |= SF_DIRECT | SF_ASSIGNED | SF_BE_ASSIGNED;
+ s->target = &csk_ctx->srv->obj_type;
+
+ if (csk_ctx->timeout) {
+ s->sess->fe->timeout.connect = csk_ctx->timeout;
+ s->scf->ioto = csk_ctx->timeout;
+ s->scb->ioto = csk_ctx->timeout;
+ }
+
+ return 0;
+
+ error:
+ return -1;
+}
+
+/* This function is called when the "struct stream" is destroyed.
+ * Remove the link from the object to this stream.
+ * Wake all the pending signals.
+ */
+static void hlua_socket_release(struct appctx *appctx)
+{
+ struct hlua_csk_ctx *ctx = appctx->svcctx;
+ struct xref *peer;
+
+ /* Remove my link in the original objects. */
+ peer = xref_get_peer_and_lock(&ctx->xref);
+ if (peer)
+ xref_disconnect(&ctx->xref, peer);
+
+ /* Wake all the task waiting for me. */
+ notification_wake(&ctx->wake_on_read);
+ notification_wake(&ctx->wake_on_write);
+}
+
+/* If the garbage collectio of the object is launch, nobody
+ * uses this object. If the stream does not exists, just quit.
+ * Send the shutdown signal to the stream. In some cases,
+ * pending signal can rest in the read and write lists. destroy
+ * it.
+ */
+__LJMP static int hlua_socket_gc(lua_State *L)
+{
+ struct hlua_socket *socket;
+ struct hlua_csk_ctx *ctx;
+ struct xref *peer;
+
+ MAY_LJMP(check_args(L, 1, "__gc"));
+
+ socket = MAY_LJMP(hlua_checksocket(L, 1));
+ peer = xref_get_peer_and_lock(&socket->xref);
+ if (!peer)
+ return 0;
+
+ ctx = container_of(peer, struct hlua_csk_ctx, xref);
+
+ /* Set the flag which destroy the session. */
+ ctx->die = 1;
+ appctx_wakeup(ctx->appctx);
+
+ /* Remove all reference between the Lua stack and the coroutine stream. */
+ xref_disconnect(&socket->xref, peer);
+ return 0;
+}
+
+/* The close function send shutdown signal and break the
+ * links between the stream and the object.
+ */
+__LJMP static int hlua_socket_close_helper(lua_State *L)
+{
+ struct hlua_socket *socket;
+ struct hlua_csk_ctx *ctx;
+ struct xref *peer;
+ struct hlua *hlua;
+
+ /* Get hlua struct, or NULL if we execute from main lua state */
+ hlua = hlua_gethlua(L);
+ if (!hlua)
+ return 0;
+
+ socket = MAY_LJMP(hlua_checksocket(L, 1));
+
+ /* Check if we run on the same thread than the xreator thread.
+ * We cannot access to the socket if the thread is different.
+ */
+ if (socket->tid != tid)
+ WILL_LJMP(luaL_error(L, "connect: cannot use socket on other thread"));
+
+ peer = xref_get_peer_and_lock(&socket->xref);
+ if (!peer)
+ return 0;
+
+ hlua->gc_count--;
+ ctx = container_of(peer, struct hlua_csk_ctx, xref);
+
+ /* Set the flag which destroy the session. */
+ ctx->die = 1;
+ appctx_wakeup(ctx->appctx);
+
+ /* Remove all reference between the Lua stack and the coroutine stream. */
+ xref_disconnect(&socket->xref, peer);
+ return 0;
+}
+
+/* The close function calls close_helper.
+ */
+__LJMP static int hlua_socket_close(lua_State *L)
+{
+ MAY_LJMP(check_args(L, 1, "close"));
+ return hlua_socket_close_helper(L);
+}
+
+/* This Lua function assumes that the stack contain three parameters.
+ * 1 - USERDATA containing a struct socket
+ * 2 - INTEGER with values of the macro defined below
+ * If the integer is -1, we must read at most one line.
+ * If the integer is -2, we ust read all the data until the
+ * end of the stream.
+ * If the integer is positive value, we must read a number of
+ * bytes corresponding to this value.
+ */
+#define HLSR_READ_LINE (-1)
+#define HLSR_READ_ALL (-2)
+__LJMP static int hlua_socket_receive_yield(struct lua_State *L, int status, lua_KContext ctx)
+{
+ struct hlua_socket *socket = MAY_LJMP(hlua_checksocket(L, 1));
+ int wanted = lua_tointeger(L, 2);
+ struct hlua *hlua;
+ struct hlua_csk_ctx *csk_ctx;
+ struct appctx *appctx;
+ size_t len;
+ int nblk;
+ const char *blk1;
+ size_t len1;
+ const char *blk2;
+ size_t len2;
+ int skip_at_end = 0;
+ struct channel *oc;
+ struct stream *s;
+ struct xref *peer;
+ int missing_bytes;
+
+ /* Get hlua struct, or NULL if we execute from main lua state */
+ hlua = hlua_gethlua(L);
+
+ /* Check if this lua stack is schedulable. */
+ if (!hlua || !hlua->task)
+ WILL_LJMP(luaL_error(L, "The 'receive' function is only allowed in "
+ "'frontend', 'backend' or 'task'"));
+
+ /* Check if we run on the same thread than the xreator thread.
+ * We cannot access to the socket if the thread is different.
+ */
+ if (socket->tid != tid)
+ WILL_LJMP(luaL_error(L, "connect: cannot use socket on other thread"));
+
+ /* check for connection break. If some data where read, return it. */
+ peer = xref_get_peer_and_lock(&socket->xref);
+ if (!peer)
+ goto no_peer;
+
+ csk_ctx = container_of(peer, struct hlua_csk_ctx, xref);
+ if (!csk_ctx->connected)
+ goto connection_closed;
+
+ appctx = csk_ctx->appctx;
+ s = appctx_strm(appctx);
+
+ oc = &s->res;
+ if (wanted == HLSR_READ_LINE) {
+ /* Read line. */
+ nblk = co_getline_nc(oc, &blk1, &len1, &blk2, &len2);
+ if (nblk < 0) /* Connection close. */
+ goto connection_closed;
+ if (nblk == 0) /* No data available. */
+ goto connection_empty;
+
+ /* remove final \r\n. */
+ if (nblk == 1) {
+ if (blk1[len1-1] == '\n') {
+ len1--;
+ skip_at_end++;
+ if (blk1[len1-1] == '\r') {
+ len1--;
+ skip_at_end++;
+ }
+ }
+ }
+ else {
+ if (blk2[len2-1] == '\n') {
+ len2--;
+ skip_at_end++;
+ if (blk2[len2-1] == '\r') {
+ len2--;
+ skip_at_end++;
+ }
+ }
+ }
+ }
+
+ else if (wanted == HLSR_READ_ALL) {
+ /* Read all the available data. */
+ nblk = co_getblk_nc(oc, &blk1, &len1, &blk2, &len2);
+ if (nblk < 0) /* Connection close. */
+ goto connection_closed;
+ if (nblk == 0) /* No data available. */
+ goto connection_empty;
+ }
+
+ else {
+ /* Read a block of data. */
+ nblk = co_getblk_nc(oc, &blk1, &len1, &blk2, &len2);
+ if (nblk < 0) /* Connection close. */
+ goto connection_closed;
+ if (nblk == 0) /* No data available. */
+ goto connection_empty;
+
+ missing_bytes = wanted - socket->b.n;
+ if (len1 > missing_bytes) {
+ nblk = 1;
+ len1 = missing_bytes;
+ } if (nblk == 2 && len1 + len2 > missing_bytes)
+ len2 = missing_bytes - len1;
+ }
+
+ len = len1;
+
+ luaL_addlstring(&socket->b, blk1, len1);
+ if (nblk == 2) {
+ len += len2;
+ luaL_addlstring(&socket->b, blk2, len2);
+ }
+
+ /* Consume data. */
+ co_skip(oc, len + skip_at_end);
+
+ /* Don't wait anything. */
+ appctx_wakeup(appctx);
+
+ /* If the pattern reclaim to read all the data
+ * in the connection, got out.
+ */
+ if (wanted == HLSR_READ_ALL)
+ goto connection_empty;
+ else if (wanted >= 0 && socket->b.n < wanted)
+ goto connection_empty;
+
+ /* Return result. */
+ luaL_pushresult(&socket->b);
+ xref_unlock(&socket->xref, peer);
+ return 1;
+
+connection_closed:
+
+ xref_unlock(&socket->xref, peer);
+
+no_peer:
+
+ /* If the buffer containds data. */
+ if (socket->b.n > 0) {
+ luaL_pushresult(&socket->b);
+ return 1;
+ }
+ lua_pushnil(L);
+ lua_pushstring(L, "connection closed.");
+ return 2;
+
+connection_empty:
+
+ if (!notification_new(&hlua->com, &csk_ctx->wake_on_read, hlua->task)) {
+ xref_unlock(&socket->xref, peer);
+ WILL_LJMP(luaL_error(L, "out of memory"));
+ }
+ xref_unlock(&socket->xref, peer);
+ MAY_LJMP(hlua_yieldk(L, 0, 0, hlua_socket_receive_yield, TICK_ETERNITY, 0));
+ return 0;
+}
+
+/* This Lua function gets two parameters. The first one can be string
+ * or a number. If the string is "*l", the user requires one line. If
+ * the string is "*a", the user requires all the contents of the stream.
+ * If the value is a number, the user require a number of bytes equal
+ * to the value. The default value is "*l" (a line).
+ *
+ * This parameter with a variable type is converted in integer. This
+ * integer takes this values:
+ * -1 : read a line
+ * -2 : read all the stream
+ * >0 : amount of bytes.
+ *
+ * The second parameter is optional. It contains a string that must be
+ * concatenated with the read data.
+ */
+__LJMP static int hlua_socket_receive(struct lua_State *L)
+{
+ int wanted = HLSR_READ_LINE;
+ const char *pattern;
+ int lastarg, type;
+ char *error;
+ size_t len;
+ struct hlua_socket *socket;
+
+ if (lua_gettop(L) < 1 || lua_gettop(L) > 3)
+ WILL_LJMP(luaL_error(L, "The 'receive' function requires between 1 and 3 arguments."));
+
+ socket = MAY_LJMP(hlua_checksocket(L, 1));
+
+ /* Check if we run on the same thread than the xreator thread.
+ * We cannot access to the socket if the thread is different.
+ */
+ if (socket->tid != tid)
+ WILL_LJMP(luaL_error(L, "connect: cannot use socket on other thread"));
+
+ /* check for pattern. */
+ if (lua_gettop(L) >= 2) {
+ type = lua_type(L, 2);
+ if (type == LUA_TSTRING) {
+ pattern = lua_tostring(L, 2);
+ if (strcmp(pattern, "*a") == 0)
+ wanted = HLSR_READ_ALL;
+ else if (strcmp(pattern, "*l") == 0)
+ wanted = HLSR_READ_LINE;
+ else {
+ wanted = strtoll(pattern, &error, 10);
+ if (*error != '\0')
+ WILL_LJMP(luaL_error(L, "Unsupported pattern."));
+ }
+ }
+ else if (type == LUA_TNUMBER) {
+ wanted = lua_tointeger(L, 2);
+ if (wanted < 0)
+ WILL_LJMP(luaL_error(L, "Unsupported size."));
+ }
+ }
+
+ /* Set pattern. */
+ lua_pushinteger(L, wanted);
+
+ /* Check if we would replace the top by itself. */
+ if (lua_gettop(L) != 2)
+ lua_replace(L, 2);
+
+ /* Save index of the top of the stack because since buffers are used, it
+ * may change
+ */
+ lastarg = lua_gettop(L);
+
+ /* init buffer, and fill it with prefix. */
+ luaL_buffinit(L, &socket->b);
+
+ /* Check prefix. */
+ if (lastarg >= 3) {
+ if (lua_type(L, 3) != LUA_TSTRING)
+ WILL_LJMP(luaL_error(L, "Expect a 'string' for the prefix"));
+ pattern = lua_tolstring(L, 3, &len);
+ luaL_addlstring(&socket->b, pattern, len);
+ }
+
+ return __LJMP(hlua_socket_receive_yield(L, 0, 0));
+}
+
+/* Write the Lua input string in the output buffer.
+ * This function returns a yield if no space is available.
+ */
+static int hlua_socket_write_yield(struct lua_State *L,int status, lua_KContext ctx)
+{
+ struct hlua_socket *socket;
+ struct hlua *hlua;
+ struct hlua_csk_ctx *csk_ctx;
+ struct appctx *appctx;
+ size_t buf_len;
+ const char *buf;
+ int len;
+ int send_len;
+ int sent;
+ struct xref *peer;
+ struct stream *s;
+ struct stconn *sc;
+
+ /* Get hlua struct, or NULL if we execute from main lua state */
+ hlua = hlua_gethlua(L);
+
+ /* Check if this lua stack is schedulable. */
+ if (!hlua || !hlua->task)
+ WILL_LJMP(luaL_error(L, "The 'write' function is only allowed in "
+ "'frontend', 'backend' or 'task'"));
+
+ /* Get object */
+ socket = MAY_LJMP(hlua_checksocket(L, 1));
+ buf = MAY_LJMP(luaL_checklstring(L, 2, &buf_len));
+ sent = MAY_LJMP(luaL_checkinteger(L, 3));
+
+ /* Check if we run on the same thread than the xreator thread.
+ * We cannot access to the socket if the thread is different.
+ */
+ if (socket->tid != tid)
+ WILL_LJMP(luaL_error(L, "connect: cannot use socket on other thread"));
+
+ /* check for connection break. If some data where read, return it. */
+ peer = xref_get_peer_and_lock(&socket->xref);
+ if (!peer) {
+ lua_pushinteger(L, -1);
+ return 1;
+ }
+
+ csk_ctx = container_of(peer, struct hlua_csk_ctx, xref);
+ if (!csk_ctx->connected) {
+ xref_unlock(&socket->xref, peer);
+ lua_pushinteger(L, -1);
+ return 1;
+ }
+
+ appctx = csk_ctx->appctx;
+ sc = appctx_sc(appctx);
+ s = __sc_strm(sc);
+
+ /* Check for connection close. */
+ if (channel_output_closed(&s->req)) {
+ xref_unlock(&socket->xref, peer);
+ lua_pushinteger(L, -1);
+ return 1;
+ }
+
+ /* Update the input buffer data. */
+ buf += sent;
+ send_len = buf_len - sent;
+
+ /* All the data are sent. */
+ if (sent >= buf_len) {
+ xref_unlock(&socket->xref, peer);
+ return 1; /* Implicitly return the length sent. */
+ }
+
+ /* Check if the buffer is available because HAProxy doesn't allocate
+ * the request buffer if its not required.
+ */
+ if (s->req.buf.size == 0) {
+ if (!sc_alloc_ibuf(sc, &appctx->buffer_wait))
+ goto hlua_socket_write_yield_return;
+ }
+
+ /* Check for available space. */
+ len = b_room(&s->req.buf);
+ if (len <= 0) {
+ goto hlua_socket_write_yield_return;
+ }
+
+ /* send data */
+ if (len < send_len)
+ send_len = len;
+ len = ci_putblk(&s->req, buf, send_len);
+
+ /* "Not enough space" (-1), "Buffer too little to contain
+ * the data" (-2) are not expected because the available length
+ * is tested.
+ * Other unknown error are also not expected.
+ */
+ if (len <= 0) {
+ if (len == -1)
+ s->req.flags |= CF_WAKE_WRITE;
+
+ MAY_LJMP(hlua_socket_close_helper(L));
+ lua_pop(L, 1);
+ lua_pushinteger(L, -1);
+ xref_unlock(&socket->xref, peer);
+ return 1;
+ }
+
+ /* update buffers. */
+ appctx_wakeup(appctx);
+
+ /* Update length sent. */
+ lua_pop(L, 1);
+ lua_pushinteger(L, sent + len);
+
+ /* All the data buffer is sent ? */
+ if (sent + len >= buf_len) {
+ xref_unlock(&socket->xref, peer);
+ return 1;
+ }
+
+hlua_socket_write_yield_return:
+ if (!notification_new(&hlua->com, &csk_ctx->wake_on_write, hlua->task)) {
+ xref_unlock(&socket->xref, peer);
+ WILL_LJMP(luaL_error(L, "out of memory"));
+ }
+ xref_unlock(&socket->xref, peer);
+ MAY_LJMP(hlua_yieldk(L, 0, 0, hlua_socket_write_yield, TICK_ETERNITY, 0));
+ return 0;
+}
+
+/* This function initiate the send of data. It just check the input
+ * parameters and push an integer in the Lua stack that contain the
+ * amount of data written to the buffer. This is used by the function
+ * "hlua_socket_write_yield" that can yield.
+ *
+ * The Lua function gets between 3 and 4 parameters. The first one is
+ * the associated object. The second is a string buffer. The third is
+ * a facultative integer that represents where is the buffer position
+ * of the start of the data that can send. The first byte is the
+ * position "1". The default value is "1". The fourth argument is a
+ * facultative integer that represents where is the buffer position
+ * of the end of the data that can send. The default is the last byte.
+ */
+static int hlua_socket_send(struct lua_State *L)
+{
+ int i;
+ int j;
+ const char *buf;
+ size_t buf_len;
+
+ /* Check number of arguments. */
+ if (lua_gettop(L) < 2 || lua_gettop(L) > 4)
+ WILL_LJMP(luaL_error(L, "'send' needs between 2 and 4 arguments"));
+
+ /* Get the string. */
+ buf = MAY_LJMP(luaL_checklstring(L, 2, &buf_len));
+
+ /* Get and check j. */
+ if (lua_gettop(L) == 4) {
+ j = MAY_LJMP(luaL_checkinteger(L, 4));
+ if (j < 0)
+ j = buf_len + j + 1;
+ if (j > buf_len)
+ j = buf_len + 1;
+ lua_pop(L, 1);
+ }
+ else
+ j = buf_len;
+
+ /* Get and check i. */
+ if (lua_gettop(L) == 3) {
+ i = MAY_LJMP(luaL_checkinteger(L, 3));
+ if (i < 0)
+ i = buf_len + i + 1;
+ if (i > buf_len)
+ i = buf_len + 1;
+ lua_pop(L, 1);
+ } else
+ i = 1;
+
+ /* Check bth i and j. */
+ if (i > j) {
+ lua_pushinteger(L, 0);
+ return 1;
+ }
+ if (i == 0 && j == 0) {
+ lua_pushinteger(L, 0);
+ return 1;
+ }
+ if (i == 0)
+ i = 1;
+ if (j == 0)
+ j = 1;
+
+ /* Pop the string. */
+ lua_pop(L, 1);
+
+ /* Update the buffer length. */
+ buf += i - 1;
+ buf_len = j - i + 1;
+ lua_pushlstring(L, buf, buf_len);
+
+ /* This unsigned is used to remember the amount of sent data. */
+ lua_pushinteger(L, 0);
+
+ return MAY_LJMP(hlua_socket_write_yield(L, 0, 0));
+}
+
+#define SOCKET_INFO_MAX_LEN sizeof("[0000:0000:0000:0000:0000:0000:0000:0000]:12345")
+__LJMP static inline int hlua_socket_info(struct lua_State *L, const struct sockaddr_storage *addr)
+{
+ static char buffer[SOCKET_INFO_MAX_LEN];
+ int ret;
+ int len;
+ char *p;
+
+ ret = addr_to_str(addr, buffer+1, SOCKET_INFO_MAX_LEN-1);
+ if (ret <= 0) {
+ lua_pushnil(L);
+ return 1;
+ }
+
+ if (ret == AF_UNIX) {
+ lua_pushstring(L, buffer+1);
+ return 1;
+ }
+ else if (ret == AF_INET6) {
+ buffer[0] = '[';
+ len = strlen(buffer);
+ buffer[len] = ']';
+ len++;
+ buffer[len] = ':';
+ len++;
+ p = buffer;
+ }
+ else if (ret == AF_INET) {
+ p = buffer + 1;
+ len = strlen(p);
+ p[len] = ':';
+ len++;
+ }
+ else {
+ lua_pushnil(L);
+ return 1;
+ }
+
+ if (port_to_str(addr, p + len, SOCKET_INFO_MAX_LEN-1 - len) <= 0) {
+ lua_pushnil(L);
+ return 1;
+ }
+
+ lua_pushstring(L, p);
+ return 1;
+}
+
+/* Returns information about the peer of the connection. */
+__LJMP static int hlua_socket_getpeername(struct lua_State *L)
+{
+ struct hlua_socket *socket;
+ struct xref *peer;
+ struct hlua_csk_ctx *csk_ctx;
+ struct appctx *appctx;
+ struct stconn *sc;
+ const struct sockaddr_storage *dst;
+ int ret;
+
+ MAY_LJMP(check_args(L, 1, "getpeername"));
+
+ socket = MAY_LJMP(hlua_checksocket(L, 1));
+
+ /* Check if we run on the same thread than the xreator thread.
+ * We cannot access to the socket if the thread is different.
+ */
+ if (socket->tid != tid)
+ WILL_LJMP(luaL_error(L, "connect: cannot use socket on other thread"));
+
+ /* check for connection break. If some data where read, return it. */
+ peer = xref_get_peer_and_lock(&socket->xref);
+ if (!peer) {
+ lua_pushnil(L);
+ return 1;
+ }
+
+ csk_ctx = container_of(peer, struct hlua_csk_ctx, xref);
+ if (!csk_ctx->connected) {
+ xref_unlock(&socket->xref, peer);
+ lua_pushnil(L);
+ return 1;
+ }
+
+ appctx = csk_ctx->appctx;
+ sc = appctx_sc(appctx);
+ dst = sc_dst(sc_opposite(sc));
+ if (!dst) {
+ xref_unlock(&socket->xref, peer);
+ lua_pushnil(L);
+ return 1;
+ }
+
+ ret = MAY_LJMP(hlua_socket_info(L, dst));
+ xref_unlock(&socket->xref, peer);
+ return ret;
+}
+
+/* Returns information about my connection side. */
+static int hlua_socket_getsockname(struct lua_State *L)
+{
+ struct hlua_socket *socket;
+ struct connection *conn;
+ struct appctx *appctx;
+ struct xref *peer;
+ struct hlua_csk_ctx *csk_ctx;
+ struct stream *s;
+ int ret;
+
+ MAY_LJMP(check_args(L, 1, "getsockname"));
+
+ socket = MAY_LJMP(hlua_checksocket(L, 1));
+
+ /* Check if we run on the same thread than the xreator thread.
+ * We cannot access to the socket if the thread is different.
+ */
+ if (socket->tid != tid)
+ WILL_LJMP(luaL_error(L, "connect: cannot use socket on other thread"));
+
+ /* check for connection break. If some data where read, return it. */
+ peer = xref_get_peer_and_lock(&socket->xref);
+ if (!peer) {
+ lua_pushnil(L);
+ return 1;
+ }
+
+ csk_ctx = container_of(peer, struct hlua_csk_ctx, xref);
+ if (!csk_ctx->connected) {
+ xref_unlock(&socket->xref, peer);
+ lua_pushnil(L);
+ return 1;
+ }
+
+ appctx = csk_ctx->appctx;
+ s = appctx_strm(appctx);
+
+ conn = sc_conn(s->scb);
+ if (!conn || !conn_get_src(conn)) {
+ xref_unlock(&socket->xref, peer);
+ lua_pushnil(L);
+ return 1;
+ }
+
+ ret = hlua_socket_info(L, conn->src);
+ xref_unlock(&socket->xref, peer);
+ return ret;
+}
+
+/* This struct define the applet. */
+static struct applet update_applet = {
+ .obj_type = OBJ_TYPE_APPLET,
+ .name = "<LUA_TCP>",
+ .fct = hlua_socket_handler,
+ .init = hlua_socket_init,
+ .release = hlua_socket_release,
+};
+
+__LJMP static int hlua_socket_connect_yield(struct lua_State *L, int status, lua_KContext ctx)
+{
+ struct hlua_socket *socket = MAY_LJMP(hlua_checksocket(L, 1));
+ struct hlua *hlua;
+ struct xref *peer;
+ struct hlua_csk_ctx *csk_ctx;
+ struct appctx *appctx;
+ struct stream *s;
+
+ /* Get hlua struct, or NULL if we execute from main lua state */
+ hlua = hlua_gethlua(L);
+ if (!hlua)
+ return 0;
+
+ /* Check if we run on the same thread than the xreator thread.
+ * We cannot access to the socket if the thread is different.
+ */
+ if (socket->tid != tid)
+ WILL_LJMP(luaL_error(L, "connect: cannot use socket on other thread"));
+
+ /* check for connection break. If some data where read, return it. */
+ peer = xref_get_peer_and_lock(&socket->xref);
+ if (!peer) {
+ lua_pushnil(L);
+ lua_pushstring(L, "Can't connect");
+ return 2;
+ }
+
+ csk_ctx = container_of(peer, struct hlua_csk_ctx, xref);
+ appctx = csk_ctx->appctx;
+ s = appctx_strm(appctx);
+
+ /* Check if we run on the same thread than the xreator thread.
+ * We cannot access to the socket if the thread is different.
+ */
+ if (socket->tid != tid) {
+ xref_unlock(&socket->xref, peer);
+ WILL_LJMP(luaL_error(L, "connect: cannot use socket on other thread"));
+ }
+
+ /* Check for connection close. */
+ if (!hlua || channel_output_closed(&s->req)) {
+ xref_unlock(&socket->xref, peer);
+ lua_pushnil(L);
+ lua_pushstring(L, "Can't connect");
+ return 2;
+ }
+
+ appctx = __sc_appctx(s->scf);
+
+ /* Check for connection established. */
+ if (csk_ctx->connected) {
+ xref_unlock(&socket->xref, peer);
+ lua_pushinteger(L, 1);
+ return 1;
+ }
+
+ if (!notification_new(&hlua->com, &csk_ctx->wake_on_write, hlua->task)) {
+ xref_unlock(&socket->xref, peer);
+ WILL_LJMP(luaL_error(L, "out of memory error"));
+ }
+ xref_unlock(&socket->xref, peer);
+ MAY_LJMP(hlua_yieldk(L, 0, 0, hlua_socket_connect_yield, TICK_ETERNITY, 0));
+ return 0;
+}
+
+/* This function fail or initite the connection. */
+__LJMP static int hlua_socket_connect(struct lua_State *L)
+{
+ struct hlua_socket *socket;
+ int port = -1;
+ const char *ip;
+ struct hlua *hlua;
+ struct hlua_csk_ctx *csk_ctx;
+ struct appctx *appctx;
+ int low, high;
+ struct sockaddr_storage *addr;
+ struct xref *peer;
+ struct stconn *sc;
+
+ /* Get hlua struct, or NULL if we execute from main lua state */
+ hlua = hlua_gethlua(L);
+ if (!hlua)
+ return 0;
+
+ if (lua_gettop(L) < 2)
+ WILL_LJMP(luaL_error(L, "connect: need at least 2 arguments"));
+
+ /* Get args. */
+ socket = MAY_LJMP(hlua_checksocket(L, 1));
+
+ /* Check if we run on the same thread than the xreator thread.
+ * We cannot access to the socket if the thread is different.
+ */
+ if (socket->tid != tid)
+ WILL_LJMP(luaL_error(L, "connect: cannot use socket on other thread"));
+
+ ip = MAY_LJMP(luaL_checkstring(L, 2));
+ if (lua_gettop(L) >= 3) {
+ luaL_Buffer b;
+ port = MAY_LJMP(luaL_checkinteger(L, 3));
+
+ /* Force the ip to end with a colon, to support IPv6 addresses
+ * that are not enclosed within square brackets.
+ */
+ if (port > 0) {
+ luaL_buffinit(L, &b);
+ luaL_addstring(&b, ip);
+ luaL_addchar(&b, ':');
+ luaL_pushresult(&b);
+ ip = lua_tolstring(L, lua_gettop(L), NULL);
+ }
+ }
+
+ /* check for connection break. If some data where read, return it. */
+ peer = xref_get_peer_and_lock(&socket->xref);
+ if (!peer) {
+ lua_pushnil(L);
+ return 1;
+ }
+
+ csk_ctx = container_of(peer, struct hlua_csk_ctx, xref);
+ if (!csk_ctx->srv)
+ csk_ctx->srv = socket_tcp;
+
+ /* Parse ip address. */
+ addr = str2sa_range(ip, NULL, &low, &high, NULL, NULL, NULL, NULL, NULL, NULL, PA_O_PORT_OK | PA_O_STREAM);
+ if (!addr) {
+ xref_unlock(&socket->xref, peer);
+ WILL_LJMP(luaL_error(L, "connect: cannot parse destination address '%s'", ip));
+ }
+
+ /* Set port. */
+ if (low == 0) {
+ if (addr->ss_family == AF_INET) {
+ if (port == -1) {
+ xref_unlock(&socket->xref, peer);
+ WILL_LJMP(luaL_error(L, "connect: port missing"));
+ }
+ ((struct sockaddr_in *)addr)->sin_port = htons(port);
+ } else if (addr->ss_family == AF_INET6) {
+ if (port == -1) {
+ xref_unlock(&socket->xref, peer);
+ WILL_LJMP(luaL_error(L, "connect: port missing"));
+ }
+ ((struct sockaddr_in6 *)addr)->sin6_port = htons(port);
+ }
+ }
+
+ appctx = csk_ctx->appctx;
+ if (appctx_sc(appctx)) {
+ xref_unlock(&socket->xref, peer);
+ WILL_LJMP(luaL_error(L, "connect: connect already performed\n"));
+ }
+
+ if (appctx_init(appctx) == -1) {
+ xref_unlock(&socket->xref, peer);
+ WILL_LJMP(luaL_error(L, "connect: fail to init applet."));
+ }
+
+ sc = appctx_sc(appctx);
+
+ if (!sockaddr_alloc(&sc_opposite(sc)->dst, addr, sizeof(*addr))) {
+ xref_unlock(&socket->xref, peer);
+ WILL_LJMP(luaL_error(L, "connect: internal error"));
+ }
+
+ /* inform the stream that we want to be notified whenever the
+ * connection completes.
+ */
+ applet_need_more_data(appctx);
+ applet_have_more_data(appctx);
+ appctx_wakeup(appctx);
+
+ hlua->gc_count++;
+
+ if (!notification_new(&hlua->com, &csk_ctx->wake_on_write, hlua->task)) {
+ xref_unlock(&socket->xref, peer);
+ WILL_LJMP(luaL_error(L, "out of memory"));
+ }
+ xref_unlock(&socket->xref, peer);
+
+ /* Return yield waiting for connection. */
+ MAY_LJMP(hlua_yieldk(L, 0, 0, hlua_socket_connect_yield, TICK_ETERNITY, 0));
+
+ return 0;
+}
+
+#ifdef USE_OPENSSL
+__LJMP static int hlua_socket_connect_ssl(struct lua_State *L)
+{
+ struct hlua_socket *socket;
+ struct xref *peer;
+
+ MAY_LJMP(check_args(L, 3, "connect_ssl"));
+ socket = MAY_LJMP(hlua_checksocket(L, 1));
+
+ /* check for connection break. If some data where read, return it. */
+ peer = xref_get_peer_and_lock(&socket->xref);
+ if (!peer) {
+ lua_pushnil(L);
+ return 1;
+ }
+
+ container_of(peer, struct hlua_csk_ctx, xref)->srv = socket_ssl;
+
+ xref_unlock(&socket->xref, peer);
+ return MAY_LJMP(hlua_socket_connect(L));
+}
+#endif
+
+__LJMP static int hlua_socket_setoption(struct lua_State *L)
+{
+ return 0;
+}
+
+__LJMP static int hlua_socket_settimeout(struct lua_State *L)
+{
+ struct hlua_socket *socket;
+ int tmout;
+ double dtmout;
+ struct xref *peer;
+ struct hlua_csk_ctx *csk_ctx;
+ struct appctx *appctx;
+ struct stream *s;
+
+ MAY_LJMP(check_args(L, 2, "settimeout"));
+
+ socket = MAY_LJMP(hlua_checksocket(L, 1));
+
+ /* convert the timeout to millis */
+ dtmout = MAY_LJMP(luaL_checknumber(L, 2)) * 1000;
+
+ /* Check for negative values */
+ if (dtmout < 0)
+ WILL_LJMP(luaL_error(L, "settimeout: cannot set negatives values"));
+
+ if (dtmout > INT_MAX) /* overflow check */
+ WILL_LJMP(luaL_error(L, "settimeout: cannot set values larger than %d ms", INT_MAX));
+
+ tmout = MS_TO_TICKS((int)dtmout);
+ if (tmout == 0)
+ tmout++; /* very small timeouts are adjusted to a minimum of 1ms */
+
+ /* Check if we run on the same thread than the xreator thread.
+ * We cannot access to the socket if the thread is different.
+ */
+ if (socket->tid != tid)
+ WILL_LJMP(luaL_error(L, "connect: cannot use socket on other thread"));
+
+ /* check for connection break. If some data were read, return it. */
+ peer = xref_get_peer_and_lock(&socket->xref);
+ if (!peer) {
+ hlua_pusherror(L, "socket: not yet initialised, you can't set timeouts.");
+ WILL_LJMP(lua_error(L));
+ return 0;
+ }
+
+ csk_ctx = container_of(peer, struct hlua_csk_ctx, xref);
+ csk_ctx->timeout = tmout;
+
+ appctx = csk_ctx->appctx;
+ if (!appctx_sc(appctx))
+ goto end;
+
+ s = appctx_strm(csk_ctx->appctx);
+
+ s->sess->fe->timeout.connect = tmout;
+ s->scf->ioto = tmout;
+ s->scb->ioto = tmout;
+
+ s->task->expire = (tick_is_expired(s->task->expire, now_ms) ? 0 : s->task->expire);
+ s->task->expire = tick_first(s->task->expire, tick_add_ifset(now_ms, tmout));
+ task_queue(s->task);
+
+ end:
+ xref_unlock(&socket->xref, peer);
+ lua_pushinteger(L, 1);
+ return 1;
+}
+
+__LJMP static int hlua_socket_new(lua_State *L)
+{
+ struct hlua_socket *socket;
+ struct hlua_csk_ctx *ctx;
+ struct appctx *appctx;
+
+ /* Check stack size. */
+ if (!lua_checkstack(L, 3)) {
+ hlua_pusherror(L, "socket: full stack");
+ goto out_fail_conf;
+ }
+
+ /* Create the object: obj[0] = userdata. */
+ lua_newtable(L);
+ socket = MAY_LJMP(lua_newuserdata(L, sizeof(*socket)));
+ lua_rawseti(L, -2, 0);
+ memset(socket, 0, sizeof(*socket));
+ socket->tid = tid;
+
+ /* Check if the various memory pools are initialized. */
+ if (!pool_head_stream || !pool_head_buffer) {
+ hlua_pusherror(L, "socket: uninitialized pools.");
+ goto out_fail_conf;
+ }
+
+ /* Pop a class stream metatable and affect it to the userdata. */
+ lua_rawgeti(L, LUA_REGISTRYINDEX, class_socket_ref);
+ lua_setmetatable(L, -2);
+
+ /* Create the applet context */
+ appctx = appctx_new_here(&update_applet, NULL);
+ if (!appctx) {
+ hlua_pusherror(L, "socket: out of memory");
+ goto out_fail_conf;
+ }
+ ctx = applet_reserve_svcctx(appctx, sizeof(*ctx));
+ ctx->connected = 0;
+ ctx->die = 0;
+ ctx->srv = NULL;
+ ctx->timeout = 0;
+ ctx->appctx = appctx;
+ LIST_INIT(&ctx->wake_on_write);
+ LIST_INIT(&ctx->wake_on_read);
+
+ /* Initialise cross reference between stream and Lua socket object. */
+ xref_create(&socket->xref, &ctx->xref);
+ return 1;
+
+ out_fail_conf:
+ WILL_LJMP(lua_error(L));
+ return 0;
+}
+
+/*
+ *
+ *
+ * Class Channel
+ *
+ *
+ */
+
+/* Returns the struct hlua_channel join to the class channel in the
+ * stack entry "ud" or throws an argument error.
+ */
+__LJMP static struct channel *hlua_checkchannel(lua_State *L, int ud)
+{
+ return MAY_LJMP(hlua_checkudata(L, ud, class_channel_ref));
+}
+
+/* Pushes the channel onto the top of the stack. If the stask does not have a
+ * free slots, the function fails and returns 0;
+ */
+static int hlua_channel_new(lua_State *L, struct channel *channel)
+{
+ /* Check stack size. */
+ if (!lua_checkstack(L, 3))
+ return 0;
+
+ lua_newtable(L);
+ lua_pushlightuserdata(L, channel);
+ lua_rawseti(L, -2, 0);
+
+ /* Pop a class sesison metatable and affect it to the userdata. */
+ lua_rawgeti(L, LUA_REGISTRYINDEX, class_channel_ref);
+ lua_setmetatable(L, -2);
+ return 1;
+}
+
+/* Helper function returning a filter attached to a channel at the position <ud>
+ * in the stack, filling the current offset and length of the filter. If no
+ * filter is attached, NULL is returned and <offset> and <len> are not
+ * initialized.
+ */
+static struct filter *hlua_channel_filter(lua_State *L, int ud, struct channel *chn, size_t *offset, size_t *len)
+{
+ struct filter *filter = NULL;
+
+ if (lua_getfield(L, ud, "__filter") == LUA_TLIGHTUSERDATA) {
+ struct hlua_flt_ctx *flt_ctx;
+
+ filter = lua_touserdata (L, -1);
+ flt_ctx = filter->ctx;
+ if (hlua_filter_from_payload(filter)) {
+ *offset = flt_ctx->cur_off[CHN_IDX(chn)];
+ *len = flt_ctx->cur_len[CHN_IDX(chn)];
+ }
+ }
+
+ lua_pop(L, 1);
+ return filter;
+}
+
+/* Copies <len> bytes of data present in the channel's buffer, starting at the
+* offset <offset>, and put it in a LUA string variable. It is the caller
+* responsibility to ensure <len> and <offset> are valid. It always return the
+* length of the built string. <len> may be 0, in this case, an empty string is
+* created and 0 is returned.
+*/
+static inline int _hlua_channel_dup(struct channel *chn, lua_State *L, size_t offset, size_t len)
+{
+ size_t block1, block2;
+ luaL_Buffer b;
+
+ block1 = len;
+ if (block1 > b_contig_data(&chn->buf, b_peek_ofs(&chn->buf, offset)))
+ block1 = b_contig_data(&chn->buf, b_peek_ofs(&chn->buf, offset));
+ block2 = len - block1;
+
+ luaL_buffinit(L, &b);
+ luaL_addlstring(&b, b_peek(&chn->buf, offset), block1);
+ if (block2)
+ luaL_addlstring(&b, b_orig(&chn->buf), block2);
+ luaL_pushresult(&b);
+ return len;
+}
+
+/* Inserts the string <str> to the channel's buffer at the offset <offset>. This
+ * function returns -1 if data cannot be copied. Otherwise, it returns the
+ * number of bytes copied.
+ */
+static int _hlua_channel_insert(struct channel *chn, lua_State *L, struct ist str, size_t offset)
+{
+ int ret = 0;
+
+ /* Nothing to do, just return */
+ if (unlikely(istlen(str) == 0))
+ goto end;
+
+ if (istlen(str) > c_room(chn)) {
+ ret = -1;
+ goto end;
+ }
+ ret = b_insert_blk(&chn->buf, offset, istptr(str), istlen(str));
+
+ end:
+ return ret;
+}
+
+/* Removes <len> bytes of data at the absolute position <offset>.
+ */
+static void _hlua_channel_delete(struct channel *chn, size_t offset, size_t len)
+{
+ size_t end = offset + len;
+
+ if (b_peek(&chn->buf, end) != b_tail(&chn->buf))
+ b_move(&chn->buf, b_peek_ofs(&chn->buf, end),
+ b_data(&chn->buf) - end, -len);
+ b_sub(&chn->buf, len);
+}
+
+/* Copies input data in the channel's buffer. It is possible to set a specific
+ * offset (0 by default) and a length (all remaining input data starting for the
+ * offset by default). If there is not enough input data and more data can be
+ * received, this function yields.
+ *
+ * From an action, All input data are considered. For a filter, the offset and
+ * the length of input data to consider are retrieved from the filter context.
+ */
+__LJMP static int hlua_channel_get_data_yield(lua_State *L, int status, lua_KContext ctx)
+{
+ struct channel *chn;
+ struct filter *filter;
+ size_t input, output;
+ int offset, len;
+
+ chn = MAY_LJMP(hlua_checkchannel(L, 1));
+
+ output = co_data(chn);
+ input = ci_data(chn);
+
+ filter = hlua_channel_filter(L, 1, chn, &output, &input);
+ if (filter && !hlua_filter_from_payload(filter))
+ WILL_LJMP(lua_error(L));
+
+ offset = output;
+ if (lua_gettop(L) > 1) {
+ offset = MAY_LJMP(luaL_checkinteger(L, 2));
+ if (offset < 0)
+ offset = MAX(0, (int)input + offset);
+ offset += output;
+ if (offset < output || offset > input + output) {
+ lua_pushfstring(L, "offset out of range.");
+ WILL_LJMP(lua_error(L));
+ }
+ }
+ len = output + input - offset;
+ if (lua_gettop(L) == 3) {
+ len = MAY_LJMP(luaL_checkinteger(L, 3));
+ if (!len)
+ goto dup;
+ if (len == -1)
+ len = global.tune.bufsize;
+ if (len < 0) {
+ lua_pushfstring(L, "length out of range.");
+ WILL_LJMP(lua_error(L));
+ }
+ }
+
+ /* Wait for more data if possible if no length was specified and there
+ * is no data or not enough data was received.
+ */
+ if (!len || offset + len > output + input) {
+ if (!HLUA_CANT_YIELD(hlua_gethlua(L)) && !channel_input_closed(chn) && channel_may_recv(chn)) {
+ /* Yield waiting for more data, as requested */
+ MAY_LJMP(hlua_yieldk(L, 0, 0, hlua_channel_get_data_yield, TICK_ETERNITY, 0));
+ }
+
+ /* Return 'nil' if there is no data and the channel can't receive more data */
+ if (!len) {
+ lua_pushnil(L);
+ return -1;
+ }
+
+ /* Otherwise, return all data */
+ len = output + input - offset;
+ }
+
+ dup:
+ _hlua_channel_dup(chn, L, offset, len);
+ return 1;
+}
+
+/* Copies the first line (including the trailing LF) of input data in the
+ * channel's buffer. It is possible to set a specific offset (0 by default) and
+ * a length (all remaining input data starting for the offset by default). If
+ * there is not enough input data and more data can be received, the function
+ * yields. If a length is explicitly specified, no more data are
+ * copied. Otherwise, if no LF is found and more data can be received, this
+ * function yields.
+ *
+ * From an action, All input data are considered. For a filter, the offset and
+ * the length of input data to consider are retrieved from the filter context.
+ */
+__LJMP static int hlua_channel_get_line_yield(lua_State *L, int status, lua_KContext ctx)
+{
+ struct channel *chn;
+ struct filter *filter;
+ size_t l, input, output;
+ int offset, len;
+
+ chn = MAY_LJMP(hlua_checkchannel(L, 1));
+ output = co_data(chn);
+ input = ci_data(chn);
+
+ filter = hlua_channel_filter(L, 1, chn, &output, &input);
+ if (filter && !hlua_filter_from_payload(filter))
+ WILL_LJMP(lua_error(L));
+
+ offset = output;
+ if (lua_gettop(L) > 1) {
+ offset = MAY_LJMP(luaL_checkinteger(L, 2));
+ if (offset < 0)
+ offset = MAX(0, (int)input + offset);
+ offset += output;
+ if (offset < output || offset > input + output) {
+ lua_pushfstring(L, "offset out of range.");
+ WILL_LJMP(lua_error(L));
+ }
+ }
+
+ len = output + input - offset;
+ if (lua_gettop(L) == 3) {
+ len = MAY_LJMP(luaL_checkinteger(L, 3));
+ if (!len)
+ goto dup;
+ if (len == -1)
+ len = global.tune.bufsize;
+ if (len < 0) {
+ lua_pushfstring(L, "length out of range.");
+ WILL_LJMP(lua_error(L));
+ }
+ }
+
+ for (l = 0; l < len; l++) {
+ if (l + offset >= output + input)
+ break;
+ if (*(b_peek(&chn->buf, offset + l)) == '\n') {
+ len = l+1;
+ goto dup;
+ }
+ }
+
+ /* Wait for more data if possible if no line is found and no length was
+ * specified or not enough data was received.
+ */
+ if (lua_gettop(L) != 3 || offset + len > output + input) {
+ if (!HLUA_CANT_YIELD(hlua_gethlua(L)) && !channel_input_closed(chn) && channel_may_recv(chn)) {
+ /* Yield waiting for more data */
+ MAY_LJMP(hlua_yieldk(L, 0, 0, hlua_channel_get_line_yield, TICK_ETERNITY, 0));
+ }
+
+ /* Return 'nil' if there is no data and the channel can't receive more data */
+ if (!len) {
+ lua_pushnil(L);
+ return -1;
+ }
+
+ /* Otherwise, return all data */
+ len = output + input - offset;
+ }
+
+ dup:
+ _hlua_channel_dup(chn, L, offset, len);
+ return 1;
+}
+
+/* [ DEPRECATED ]
+ *
+ * Duplicate all input data foud in the channel's buffer. The data are not
+ * removed from the buffer. This function relies on _hlua_channel_dup().
+ *
+ * From an action, All input data are considered. For a filter, the offset and
+ * the length of input data to consider are retrieved from the filter context.
+ */
+__LJMP static int hlua_channel_dup(lua_State *L)
+{
+ struct channel *chn;
+ struct filter *filter;
+ size_t offset, len;
+
+ MAY_LJMP(check_args(L, 1, "dup"));
+ chn = MAY_LJMP(hlua_checkchannel(L, 1));
+ if (IS_HTX_STRM(chn_strm(chn))) {
+ lua_pushfstring(L, "Cannot manipulate HAProxy channels in HTTP mode.");
+ WILL_LJMP(lua_error(L));
+ }
+
+ offset = co_data(chn);
+ len = ci_data(chn);
+
+ filter = hlua_channel_filter(L, 1, chn, &offset, &len);
+ if (filter && !hlua_filter_from_payload(filter))
+ WILL_LJMP(lua_error(L));
+
+ if (!ci_data(chn) && channel_input_closed(chn)) {
+ lua_pushnil(L);
+ return 1;
+ }
+
+ _hlua_channel_dup(chn, L, offset, len);
+ return 1;
+}
+
+/* [ DEPRECATED ]
+ *
+ * Get all input data foud in the channel's buffer. The data are removed from
+ * the buffer after the copy. This function relies on _hlua_channel_dup() and
+ * _hlua_channel_delete().
+ *
+ * From an action, All input data are considered. For a filter, the offset and
+ * the length of input data to consider are retrieved from the filter context.
+ */
+__LJMP static int hlua_channel_get(lua_State *L)
+{
+ struct channel *chn;
+ struct filter *filter;
+ size_t offset, len;
+ int ret;
+
+ MAY_LJMP(check_args(L, 1, "get"));
+ chn = MAY_LJMP(hlua_checkchannel(L, 1));
+ if (IS_HTX_STRM(chn_strm(chn))) {
+ lua_pushfstring(L, "Cannot manipulate HAProxy channels in HTTP mode.");
+ WILL_LJMP(lua_error(L));
+ }
+
+ offset = co_data(chn);
+ len = ci_data(chn);
+
+ filter = hlua_channel_filter(L, 1, chn, &offset, &len);
+ if (filter && !hlua_filter_from_payload(filter))
+ WILL_LJMP(lua_error(L));
+
+ if (!ci_data(chn) && channel_input_closed(chn)) {
+ lua_pushnil(L);
+ return 1;
+ }
+
+ ret = _hlua_channel_dup(chn, L, offset, len);
+ _hlua_channel_delete(chn, offset, ret);
+ return 1;
+}
+
+/* This functions consumes and returns one line. If the channel is closed,
+ * and the last data does not contains a final '\n', the data are returned
+ * without the final '\n'. When no more data are available, it returns nil
+ * value.
+ *
+ * From an action, All input data are considered. For a filter, the offset and
+ * the length of input data to consider are retrieved from the filter context.
+ */
+__LJMP static int hlua_channel_getline_yield(lua_State *L, int status, lua_KContext ctx)
+{
+ struct channel *chn;
+ struct filter *filter;
+ size_t l, offset, len;
+ int ret;
+
+ chn = MAY_LJMP(hlua_checkchannel(L, 1));
+
+ offset = co_data(chn);
+ len = ci_data(chn);
+
+ filter = hlua_channel_filter(L, 1, chn, &offset, &len);
+ if (filter && !hlua_filter_from_payload(filter))
+ WILL_LJMP(lua_error(L));
+
+ if (!ci_data(chn) && channel_input_closed(chn)) {
+ lua_pushnil(L);
+ return 1;
+ }
+
+ for (l = 0; l < len; l++) {
+ if (*(b_peek(&chn->buf, offset+l)) == '\n') {
+ len = l+1;
+ goto dup;
+ }
+ }
+
+ if (!HLUA_CANT_YIELD(hlua_gethlua(L)) && !channel_input_closed(chn) && channel_may_recv(chn)) {
+ /* Yield waiting for more data */
+ MAY_LJMP(hlua_yieldk(L, 0, 0, hlua_channel_getline_yield, TICK_ETERNITY, 0));
+ }
+
+ dup:
+ ret = _hlua_channel_dup(chn, L, offset, len);
+ _hlua_channel_delete(chn, offset, ret);
+ return 1;
+}
+
+/* [ DEPRECATED ]
+ *
+ * Check arguments for the function "hlua_channel_getline_yield".
+ */
+__LJMP static int hlua_channel_getline(lua_State *L)
+{
+ struct channel *chn;
+
+ MAY_LJMP(check_args(L, 1, "getline"));
+ chn = MAY_LJMP(hlua_checkchannel(L, 1));
+ if (IS_HTX_STRM(chn_strm(chn))) {
+ lua_pushfstring(L, "Cannot manipulate HAProxy channels in HTTP mode.");
+ WILL_LJMP(lua_error(L));
+ }
+ return MAY_LJMP(hlua_channel_getline_yield(L, 0, 0));
+}
+
+/* Retrieves a given amount of input data at the given offset. By default all
+ * available input data are returned. The offset may be negactive to start from
+ * the end of input data. The length may be -1 to set it to the maximum buffer
+ * size.
+ */
+__LJMP static int hlua_channel_get_data(lua_State *L)
+{
+ struct channel *chn;
+
+ if (lua_gettop(L) < 1 || lua_gettop(L) > 3)
+ WILL_LJMP(luaL_error(L, "'data' expects at most 2 arguments"));
+ chn = MAY_LJMP(hlua_checkchannel(L, 1));
+ if (IS_HTX_STRM(chn_strm(chn))) {
+ lua_pushfstring(L, "Cannot manipulate HAProxy channels in HTTP mode.");
+ WILL_LJMP(lua_error(L));
+ }
+ return MAY_LJMP(hlua_channel_get_data_yield(L, 0, 0));
+}
+
+/* Retrieves a given amount of input data at the given offset. By default all
+ * available input data are returned. The offset may be negactive to start from
+ * the end of input data. The length may be -1 to set it to the maximum buffer
+ * size.
+ */
+__LJMP static int hlua_channel_get_line(lua_State *L)
+{
+ struct channel *chn;
+
+ if (lua_gettop(L) < 1 || lua_gettop(L) > 3)
+ WILL_LJMP(luaL_error(L, "'line' expects at most 2 arguments"));
+ chn = MAY_LJMP(hlua_checkchannel(L, 1));
+ if (IS_HTX_STRM(chn_strm(chn))) {
+ lua_pushfstring(L, "Cannot manipulate HAProxy channels in HTTP mode.");
+ WILL_LJMP(lua_error(L));
+ }
+ return MAY_LJMP(hlua_channel_get_line_yield(L, 0, 0));
+}
+
+/* Appends a string into the input side of channel. It returns the length of the
+ * written string, or -1 if the channel is closed or if the buffer size is too
+ * little for the data. 0 may be returned if nothing is copied. This function
+ * does not yield.
+ *
+ * For a filter, the context is updated on success.
+ */
+__LJMP static int hlua_channel_append(lua_State *L)
+{
+ struct channel *chn;
+ struct filter *filter;
+ const char *str;
+ size_t sz, offset, len;
+ int ret;
+
+ MAY_LJMP(check_args(L, 2, "append"));
+ chn = MAY_LJMP(hlua_checkchannel(L, 1));
+ str = MAY_LJMP(luaL_checklstring(L, 2, &sz));
+ if (IS_HTX_STRM(chn_strm(chn))) {
+ lua_pushfstring(L, "Cannot manipulate HAProxy channels in HTTP mode.");
+ WILL_LJMP(lua_error(L));
+ }
+
+ offset = co_data(chn);
+ len = ci_data(chn);
+
+ filter = hlua_channel_filter(L, 1, chn, &offset, &len);
+ if (filter && !hlua_filter_from_payload(filter))
+ WILL_LJMP(lua_error(L));
+
+ ret = _hlua_channel_insert(chn, L, ist2(str, sz), offset);
+ if (ret > 0 && filter) {
+ struct hlua_flt_ctx *flt_ctx = filter->ctx;
+
+ flt_update_offsets(filter, chn, ret);
+ flt_ctx->cur_len[CHN_IDX(chn)] += ret;
+ }
+ lua_pushinteger(L, ret);
+ return 1;
+}
+
+/* Prepends a string into the input side of channel. It returns the length of the
+ * written string, or -1 if the channel is closed or if the buffer size is too
+ * little for the data. 0 may be returned if nothing is copied. This function
+ * does not yield.
+ *
+ * For a filter, the context is updated on success.
+ */
+__LJMP static int hlua_channel_prepend(lua_State *L)
+{
+ struct channel *chn;
+ struct filter *filter;
+ const char *str;
+ size_t sz, offset, len;
+ int ret;
+
+ MAY_LJMP(check_args(L, 2, "prepend"));
+ chn = MAY_LJMP(hlua_checkchannel(L, 1));
+ str = MAY_LJMP(luaL_checklstring(L, 2, &sz));
+ if (IS_HTX_STRM(chn_strm(chn))) {
+ lua_pushfstring(L, "Cannot manipulate HAProxy channels in HTTP mode.");
+ WILL_LJMP(lua_error(L));
+ }
+
+ offset = co_data(chn);
+ len = ci_data(chn);
+
+ filter = hlua_channel_filter(L, 1, chn, &offset, &len);
+ if (filter && !hlua_filter_from_payload(filter))
+ WILL_LJMP(lua_error(L));
+
+ ret = _hlua_channel_insert(chn, L, ist2(str, sz), offset);
+ if (ret > 0 && filter) {
+ struct hlua_flt_ctx *flt_ctx = filter->ctx;
+
+ flt_update_offsets(filter, chn, ret);
+ flt_ctx->cur_len[CHN_IDX(chn)] += ret;
+ }
+
+ lua_pushinteger(L, ret);
+ return 1;
+}
+
+/* Inserts a given amount of input data at the given offset by a string
+ * content. By default the string is appended in front of input data. It
+ * returns the length of the written string, or -1 if the channel is closed or
+ * if the buffer size is too little for the data.
+ *
+ * For a filter, the context is updated on success.
+ */
+__LJMP static int hlua_channel_insert_data(lua_State *L)
+{
+ struct channel *chn;
+ struct filter *filter;
+ const char *str;
+ size_t sz, input, output;
+ int ret, offset;
+
+ if (lua_gettop(L) < 2 || lua_gettop(L) > 3)
+ WILL_LJMP(luaL_error(L, "'insert' expects at least 1 argument and at most 2 arguments"));
+ chn = MAY_LJMP(hlua_checkchannel(L, 1));
+ str = MAY_LJMP(luaL_checklstring(L, 2, &sz));
+
+ output = co_data(chn);
+ input = ci_data(chn);
+
+ filter = hlua_channel_filter(L, 1, chn, &output, &input);
+ if (filter && !hlua_filter_from_payload(filter))
+ WILL_LJMP(lua_error(L));
+
+ offset = output;
+ if (lua_gettop(L) > 2) {
+ offset = MAY_LJMP(luaL_checkinteger(L, 3));
+ if (offset < 0)
+ offset = MAX(0, (int)input + offset);
+ offset += output;
+ if (offset > output + input) {
+ lua_pushfstring(L, "offset out of range.");
+ WILL_LJMP(lua_error(L));
+ }
+ }
+ if (IS_HTX_STRM(chn_strm(chn))) {
+ lua_pushfstring(L, "Cannot manipulate HAProxy channels in HTTP mode.");
+ WILL_LJMP(lua_error(L));
+ }
+
+ ret = _hlua_channel_insert(chn, L, ist2(str, sz), offset);
+ if (ret > 0 && filter) {
+ struct hlua_flt_ctx *flt_ctx = filter->ctx;
+
+ flt_update_offsets(filter, chn, ret);
+ flt_ctx->cur_len[CHN_IDX(chn)] += ret;
+ }
+
+ lua_pushinteger(L, ret);
+ return 1;
+}
+/* Replaces a given amount of input data at the given offset by a string
+ * content. By default all remaining data are removed (offset = 0 and len =
+ * -1). It returns the length of the written string, or -1 if the channel is
+ * closed or if the buffer size is too little for the data.
+ *
+ * For a filter, the context is updated on success.
+ */
+__LJMP static int hlua_channel_set_data(lua_State *L)
+{
+ struct channel *chn;
+ struct filter *filter;
+ const char *str;
+ size_t sz, input, output;
+ int ret, offset, len;
+
+ if (lua_gettop(L) < 2 || lua_gettop(L) > 4)
+ WILL_LJMP(luaL_error(L, "'set' expects at least 1 argument and at most 3 arguments"));
+ chn = MAY_LJMP(hlua_checkchannel(L, 1));
+ str = MAY_LJMP(luaL_checklstring(L, 2, &sz));
+
+ if (IS_HTX_STRM(chn_strm(chn))) {
+ lua_pushfstring(L, "Cannot manipulate HAProxy channels in HTTP mode.");
+ WILL_LJMP(lua_error(L));
+ }
+
+ output = co_data(chn);
+ input = ci_data(chn);
+
+ filter = hlua_channel_filter(L, 1, chn, &output, &input);
+ if (filter && !hlua_filter_from_payload(filter))
+ WILL_LJMP(lua_error(L));
+
+ offset = output;
+ if (lua_gettop(L) > 2) {
+ offset = MAY_LJMP(luaL_checkinteger(L, 3));
+ if (offset < 0)
+ offset = MAX(0, (int)input + offset);
+ offset += output;
+ if (offset < output || offset > input + output) {
+ lua_pushfstring(L, "offset out of range.");
+ WILL_LJMP(lua_error(L));
+ }
+ }
+
+ len = output + input - offset;
+ if (lua_gettop(L) == 4) {
+ len = MAY_LJMP(luaL_checkinteger(L, 4));
+ if (!len)
+ goto set;
+ if (len == -1)
+ len = output + input - offset;
+ if (len < 0 || offset + len > output + input) {
+ lua_pushfstring(L, "length out of range.");
+ WILL_LJMP(lua_error(L));
+ }
+ }
+
+ set:
+ /* Be sure we can copied the string once input data will be removed. */
+ if (sz > c_room(chn) + len)
+ lua_pushinteger(L, -1);
+ else {
+ _hlua_channel_delete(chn, offset, len);
+ ret = _hlua_channel_insert(chn, L, ist2(str, sz), offset);
+ if (filter) {
+ struct hlua_flt_ctx *flt_ctx = filter->ctx;
+
+ len -= (ret > 0 ? ret : 0);
+ flt_update_offsets(filter, chn, -len);
+ flt_ctx->cur_len[CHN_IDX(chn)] -= len;
+ }
+
+ lua_pushinteger(L, ret);
+ }
+ return 1;
+}
+
+/* Removes a given amount of input data at the given offset. By default all
+ * input data are removed (offset = 0 and len = -1). It returns the amount of
+ * the removed data.
+ *
+ * For a filter, the context is updated on success.
+ */
+__LJMP static int hlua_channel_del_data(lua_State *L)
+{
+ struct channel *chn;
+ struct filter *filter;
+ size_t input, output;
+ int offset, len;
+
+ if (lua_gettop(L) < 1 || lua_gettop(L) > 3)
+ WILL_LJMP(luaL_error(L, "'remove' expects at most 2 arguments"));
+ chn = MAY_LJMP(hlua_checkchannel(L, 1));
+
+ if (IS_HTX_STRM(chn_strm(chn))) {
+ lua_pushfstring(L, "Cannot manipulate HAProxy channels in HTTP mode.");
+ WILL_LJMP(lua_error(L));
+ }
+
+ output = co_data(chn);
+ input = ci_data(chn);
+
+ filter = hlua_channel_filter(L, 1, chn, &output, &input);
+ if (filter && !hlua_filter_from_payload(filter))
+ WILL_LJMP(lua_error(L));
+
+ offset = output;
+ if (lua_gettop(L) > 1) {
+ offset = MAY_LJMP(luaL_checkinteger(L, 2));
+ if (offset < 0)
+ offset = MAX(0, (int)input + offset);
+ offset += output;
+ if (offset < output || offset > input + output) {
+ lua_pushfstring(L, "offset out of range.");
+ WILL_LJMP(lua_error(L));
+ }
+ }
+
+ len = output + input - offset;
+ if (lua_gettop(L) == 3) {
+ len = MAY_LJMP(luaL_checkinteger(L, 3));
+ if (!len)
+ goto end;
+ if (len == -1)
+ len = output + input - offset;
+ if (len < 0 || offset + len > output + input) {
+ lua_pushfstring(L, "length out of range.");
+ WILL_LJMP(lua_error(L));
+ }
+ }
+
+ _hlua_channel_delete(chn, offset, len);
+ if (filter) {
+ struct hlua_flt_ctx *flt_ctx = filter->ctx;
+
+ flt_update_offsets(filter, chn, -len);
+ flt_ctx->cur_len[CHN_IDX(chn)] -= len;
+ }
+
+ end:
+ lua_pushinteger(L, len);
+ return 1;
+}
+
+/* Append data in the output side of the buffer. This data is immediately
+ * sent. The function returns the amount of data written. If the buffer
+ * cannot contain the data, the function yields. The function returns -1
+ * if the channel is closed.
+ */
+__LJMP static int hlua_channel_send_yield(lua_State *L, int status, lua_KContext ctx)
+{
+ struct channel *chn;
+ struct filter *filter;
+ const char *str;
+ size_t offset, len, sz;
+ int l, ret;
+ struct hlua *hlua;
+
+ /* Get hlua struct, or NULL if we execute from main lua state */
+ hlua = hlua_gethlua(L);
+ if (!hlua) {
+ lua_pushnil(L);
+ return 1;
+ }
+
+ chn = MAY_LJMP(hlua_checkchannel(L, 1));
+ str = MAY_LJMP(luaL_checklstring(L, 2, &sz));
+ l = MAY_LJMP(luaL_checkinteger(L, 3));
+
+ offset = co_data(chn);
+ len = ci_data(chn);
+
+ filter = hlua_channel_filter(L, 1, chn, &offset, &len);
+ if (filter && !hlua_filter_from_payload(filter))
+ WILL_LJMP(lua_error(L));
+
+
+ if (unlikely(channel_output_closed(chn))) {
+ lua_pushinteger(L, -1);
+ return 1;
+ }
+
+ len = c_room(chn);
+ if (len > sz -l) {
+ if (filter) {
+ lua_pushinteger(L, -1);
+ return 1;
+ }
+ len = sz - l;
+ }
+
+ ret = _hlua_channel_insert(chn, L, ist2(str, len), offset);
+ if (ret == -1) {
+ lua_pop(L, 1);
+ lua_pushinteger(L, -1);
+ return 1;
+ }
+ if (ret) {
+ if (filter) {
+ struct hlua_flt_ctx *flt_ctx = filter->ctx;
+
+
+ flt_update_offsets(filter, chn, ret);
+ FLT_OFF(filter, chn) += ret;
+ flt_ctx->cur_off[CHN_IDX(chn)] += ret;
+ }
+ else
+ c_adv(chn, ret);
+
+ l += ret;
+ lua_pop(L, 1);
+ lua_pushinteger(L, l);
+ }
+
+ if (l < sz) {
+ /* Yield only if the channel's output is not empty.
+ * Otherwise it means we cannot add more data. */
+ if (co_data(chn) == 0 || HLUA_CANT_YIELD(hlua_gethlua(L)))
+ return 1;
+
+ /* If we are waiting for space in the response buffer, we
+ * must set the flag WAKERESWR. This flag required the task
+ * wake up if any activity is detected on the response buffer.
+ */
+ if (chn->flags & CF_ISRESP)
+ HLUA_SET_WAKERESWR(hlua);
+ else
+ HLUA_SET_WAKEREQWR(hlua);
+ MAY_LJMP(hlua_yieldk(L, 0, 0, hlua_channel_send_yield, TICK_ETERNITY, 0));
+ }
+
+ return 1;
+}
+
+/* Just a wrapper of "_hlua_channel_send". This wrapper permits
+ * yield the LUA process, and resume it without checking the
+ * input arguments.
+ *
+ * This function cannot be called from a filter.
+ */
+__LJMP static int hlua_channel_send(lua_State *L)
+{
+ struct channel *chn;
+
+ MAY_LJMP(check_args(L, 2, "send"));
+ chn = MAY_LJMP(hlua_checkchannel(L, 1));
+ if (IS_HTX_STRM(chn_strm(chn))) {
+ lua_pushfstring(L, "Cannot manipulate HAProxy channels in HTTP mode.");
+ WILL_LJMP(lua_error(L));
+ }
+ lua_pushinteger(L, 0);
+ return MAY_LJMP(hlua_channel_send_yield(L, 0, 0));
+}
+
+/* This function forward and amount of butes. The data pass from
+ * the input side of the buffer to the output side, and can be
+ * forwarded. This function never fails.
+ *
+ * The Lua function takes an amount of bytes to be forwarded in
+ * input. It returns the number of bytes forwarded.
+ */
+__LJMP static int hlua_channel_forward_yield(lua_State *L, int status, lua_KContext ctx)
+{
+ struct channel *chn;
+ struct filter *filter;
+ size_t offset, len, fwd;
+ int l, max;
+ struct hlua *hlua;
+
+ /* Get hlua struct, or NULL if we execute from main lua state */
+ hlua = hlua_gethlua(L);
+ if (!hlua) {
+ lua_pushnil(L);
+ return 1;
+ }
+
+ chn = MAY_LJMP(hlua_checkchannel(L, 1));
+ fwd = MAY_LJMP(luaL_checkinteger(L, 2));
+ l = MAY_LJMP(luaL_checkinteger(L, -1));
+
+ offset = co_data(chn);
+ len = ci_data(chn);
+
+ filter = hlua_channel_filter(L, 1, chn, &offset, &len);
+ if (filter && !hlua_filter_from_payload(filter))
+ WILL_LJMP(lua_error(L));
+
+ max = fwd - l;
+ if (max > len)
+ max = len;
+
+ if (filter) {
+ struct hlua_flt_ctx *flt_ctx = filter->ctx;
+
+ FLT_OFF(filter, chn) += max;
+ flt_ctx->cur_off[CHN_IDX(chn)] += max;
+ flt_ctx->cur_len[CHN_IDX(chn)] -= max;
+ }
+ else
+ channel_forward(chn, max);
+
+ l += max;
+ lua_pop(L, 1);
+ lua_pushinteger(L, l);
+
+ /* Check if it miss bytes to forward. */
+ if (l < fwd) {
+ /* The the input channel or the output channel are closed, we
+ * must return the amount of data forwarded.
+ */
+ if (channel_input_closed(chn) || channel_output_closed(chn) || HLUA_CANT_YIELD(hlua_gethlua(L)))
+ return 1;
+
+ /* If we are waiting for space data in the response buffer, we
+ * must set the flag WAKERESWR. This flag required the task
+ * wake up if any activity is detected on the response buffer.
+ */
+ if (chn->flags & CF_ISRESP)
+ HLUA_SET_WAKERESWR(hlua);
+ else
+ HLUA_SET_WAKEREQWR(hlua);
+
+ /* Otherwise, we can yield waiting for new data in the input side. */
+ MAY_LJMP(hlua_yieldk(L, 0, 0, hlua_channel_forward_yield, TICK_ETERNITY, 0));
+ }
+
+ return 1;
+}
+
+/* Just check the input and prepare the stack for the previous
+ * function "hlua_channel_forward_yield"
+ *
+ * This function cannot be called from a filter.
+ */
+__LJMP static int hlua_channel_forward(lua_State *L)
+{
+ struct channel *chn;
+
+ MAY_LJMP(check_args(L, 2, "forward"));
+ chn = MAY_LJMP(hlua_checkchannel(L, 1));
+ if (IS_HTX_STRM(chn_strm(chn))) {
+ lua_pushfstring(L, "Cannot manipulate HAProxy channels in HTTP mode.");
+ WILL_LJMP(lua_error(L));
+ }
+ lua_pushinteger(L, 0);
+ return MAY_LJMP(hlua_channel_forward_yield(L, 0, 0));
+}
+
+/* Just returns the number of bytes available in the input
+ * side of the buffer. This function never fails.
+ */
+__LJMP static int hlua_channel_get_in_len(lua_State *L)
+{
+ struct channel *chn;
+ struct filter *filter;
+ size_t output, input;
+
+ MAY_LJMP(check_args(L, 1, "input"));
+ chn = MAY_LJMP(hlua_checkchannel(L, 1));
+
+ output = co_data(chn);
+ input = ci_data(chn);
+ filter = hlua_channel_filter(L, 1, chn, &output, &input);
+ if (filter || !IS_HTX_STRM(chn_strm(chn)))
+ lua_pushinteger(L, input);
+ else {
+ struct htx *htx = htxbuf(&chn->buf);
+
+ lua_pushinteger(L, htx->data - co_data(chn));
+ }
+ return 1;
+}
+
+/* Returns true if the channel is full. */
+__LJMP static int hlua_channel_is_full(lua_State *L)
+{
+ struct channel *chn;
+
+ MAY_LJMP(check_args(L, 1, "is_full"));
+ chn = MAY_LJMP(hlua_checkchannel(L, 1));
+ /* ignore the reserve, we are not on a producer side (ie in an
+ * applet).
+ */
+ lua_pushboolean(L, channel_full(chn, 0));
+ return 1;
+}
+
+/* Returns true if the channel may still receive data. */
+__LJMP static int hlua_channel_may_recv(lua_State *L)
+{
+ struct channel *chn;
+
+ MAY_LJMP(check_args(L, 1, "may_recv"));
+ chn = MAY_LJMP(hlua_checkchannel(L, 1));
+ lua_pushboolean(L, (!channel_input_closed(chn) && channel_may_recv(chn)));
+ return 1;
+}
+
+/* Returns true if the channel is the response channel. */
+__LJMP static int hlua_channel_is_resp(lua_State *L)
+{
+ struct channel *chn;
+
+ MAY_LJMP(check_args(L, 1, "is_resp"));
+ chn = MAY_LJMP(hlua_checkchannel(L, 1));
+
+ lua_pushboolean(L, !!(chn->flags & CF_ISRESP));
+ return 1;
+}
+
+/* Just returns the number of bytes available in the output
+ * side of the buffer. This function never fails.
+ */
+__LJMP static int hlua_channel_get_out_len(lua_State *L)
+{
+ struct channel *chn;
+ size_t output, input;
+
+ MAY_LJMP(check_args(L, 1, "output"));
+ chn = MAY_LJMP(hlua_checkchannel(L, 1));
+
+ output = co_data(chn);
+ input = ci_data(chn);
+ hlua_channel_filter(L, 1, chn, &output, &input);
+
+ lua_pushinteger(L, output);
+ return 1;
+}
+
+/*
+ *
+ *
+ * Class Fetches
+ *
+ *
+ */
+
+/* Returns a struct hlua_session if the stack entry "ud" is
+ * a class stream, otherwise it throws an error.
+ */
+__LJMP static struct hlua_smp *hlua_checkfetches(lua_State *L, int ud)
+{
+ return MAY_LJMP(hlua_checkudata(L, ud, class_fetches_ref));
+}
+
+/* This function creates and push in the stack a fetch object according
+ * with a current TXN.
+ */
+static int hlua_fetches_new(lua_State *L, struct hlua_txn *txn, unsigned int flags)
+{
+ struct hlua_smp *hsmp;
+
+ /* Check stack size. */
+ if (!lua_checkstack(L, 3))
+ return 0;
+
+ /* Create the object: obj[0] = userdata.
+ * Note that the base of the Fetches object is the
+ * transaction object.
+ */
+ lua_newtable(L);
+ hsmp = lua_newuserdata(L, sizeof(*hsmp));
+ lua_rawseti(L, -2, 0);
+
+ hsmp->s = txn->s;
+ hsmp->p = txn->p;
+ hsmp->dir = txn->dir;
+ hsmp->flags = flags;
+
+ /* Pop a class sesison metatable and affect it to the userdata. */
+ lua_rawgeti(L, LUA_REGISTRYINDEX, class_fetches_ref);
+ lua_setmetatable(L, -2);
+
+ return 1;
+}
+
+/* This function is an LUA binding. It is called with each sample-fetch.
+ * It uses closure argument to store the associated sample-fetch. It
+ * returns only one argument or throws an error. An error is thrown
+ * only if an error is encountered during the argument parsing. If
+ * the "sample-fetch" function fails, nil is returned.
+ */
+__LJMP static int hlua_run_sample_fetch(lua_State *L)
+{
+ struct hlua_smp *hsmp;
+ struct sample_fetch *f;
+ struct arg args[ARGM_NBARGS + 1] = {{0}};
+ int i;
+ struct sample smp;
+
+ /* Get closure arguments. */
+ f = lua_touserdata(L, lua_upvalueindex(1));
+
+ /* Get traditional arguments. */
+ hsmp = MAY_LJMP(hlua_checkfetches(L, 1));
+
+ /* Check execution authorization. */
+ if (f->use & SMP_USE_HTTP_ANY &&
+ !(hsmp->flags & HLUA_F_MAY_USE_HTTP)) {
+ lua_pushfstring(L, "the sample-fetch '%s' needs an HTTP parser which "
+ "is not available in Lua services", f->kw);
+ WILL_LJMP(lua_error(L));
+ }
+
+ /* Get extra arguments. */
+ for (i = 0; i < lua_gettop(L) - 1; i++) {
+ if (i >= ARGM_NBARGS)
+ break;
+ hlua_lua2arg(L, i + 2, &args[i]);
+ }
+ args[i].type = ARGT_STOP;
+ args[i].data.str.area = NULL;
+
+ /* Check arguments. */
+ MAY_LJMP(hlua_lua2arg_check(L, 2, args, f->arg_mask, hsmp->p));
+
+ /* Run the special args checker. */
+ if (f->val_args && !f->val_args(args, NULL)) {
+ lua_pushfstring(L, "error in arguments");
+ goto error;
+ }
+
+ /* Initialise the sample. */
+ memset(&smp, 0, sizeof(smp));
+
+ /* Run the sample fetch process. */
+ smp_set_owner(&smp, hsmp->p, hsmp->s->sess, hsmp->s, hsmp->dir & SMP_OPT_DIR);
+ if (!f->process(args, &smp, f->kw, f->private)) {
+ if (hsmp->flags & HLUA_F_AS_STRING)
+ lua_pushstring(L, "");
+ else
+ lua_pushnil(L);
+ goto end;
+ }
+
+ /* Convert the returned sample in lua value. */
+ if (hsmp->flags & HLUA_F_AS_STRING)
+ MAY_LJMP(hlua_smp2lua_str(L, &smp));
+ else
+ MAY_LJMP(hlua_smp2lua(L, &smp));
+
+ end:
+ free_args(args);
+ return 1;
+
+ error:
+ free_args(args);
+ WILL_LJMP(lua_error(L));
+ return 0; /* Never reached */
+}
+
+/*
+ *
+ *
+ * Class Converters
+ *
+ *
+ */
+
+/* Returns a struct hlua_session if the stack entry "ud" is
+ * a class stream, otherwise it throws an error.
+ */
+__LJMP static struct hlua_smp *hlua_checkconverters(lua_State *L, int ud)
+{
+ return MAY_LJMP(hlua_checkudata(L, ud, class_converters_ref));
+}
+
+/* This function creates and push in the stack a Converters object
+ * according with a current TXN.
+ */
+static int hlua_converters_new(lua_State *L, struct hlua_txn *txn, unsigned int flags)
+{
+ struct hlua_smp *hsmp;
+
+ /* Check stack size. */
+ if (!lua_checkstack(L, 3))
+ return 0;
+
+ /* Create the object: obj[0] = userdata.
+ * Note that the base of the Converters object is the
+ * same than the TXN object.
+ */
+ lua_newtable(L);
+ hsmp = lua_newuserdata(L, sizeof(*hsmp));
+ lua_rawseti(L, -2, 0);
+
+ hsmp->s = txn->s;
+ hsmp->p = txn->p;
+ hsmp->dir = txn->dir;
+ hsmp->flags = flags;
+
+ /* Pop a class stream metatable and affect it to the table. */
+ lua_rawgeti(L, LUA_REGISTRYINDEX, class_converters_ref);
+ lua_setmetatable(L, -2);
+
+ return 1;
+}
+
+/* This function is an LUA binding. It is called with each converter.
+ * It uses closure argument to store the associated converter. It
+ * returns only one argument or throws an error. An error is thrown
+ * only if an error is encountered during the argument parsing. If
+ * the converter function function fails, nil is returned.
+ */
+__LJMP static int hlua_run_sample_conv(lua_State *L)
+{
+ struct hlua_smp *hsmp;
+ struct sample_conv *conv;
+ struct arg args[ARGM_NBARGS + 1] = {{0}};
+ int i;
+ struct sample smp;
+
+ /* Get closure arguments. */
+ conv = lua_touserdata(L, lua_upvalueindex(1));
+
+ /* Get traditional arguments. */
+ hsmp = MAY_LJMP(hlua_checkconverters(L, 1));
+
+ /* Get extra arguments. */
+ for (i = 0; i < lua_gettop(L) - 2; i++) {
+ if (i >= ARGM_NBARGS)
+ break;
+ hlua_lua2arg(L, i + 3, &args[i]);
+ }
+ args[i].type = ARGT_STOP;
+ args[i].data.str.area = NULL;
+
+ /* Check arguments. */
+ MAY_LJMP(hlua_lua2arg_check(L, 3, args, conv->arg_mask, hsmp->p));
+
+ /* Run the special args checker. */
+ if (conv->val_args && !conv->val_args(args, conv, "", 0, NULL)) {
+ hlua_pusherror(L, "error in arguments");
+ goto error;
+ }
+
+ /* Initialise the sample. */
+ memset(&smp, 0, sizeof(smp));
+ if (!hlua_lua2smp(L, 2, &smp)) {
+ hlua_pusherror(L, "error in the input argument");
+ goto error;
+ }
+
+ smp_set_owner(&smp, hsmp->p, hsmp->s->sess, hsmp->s, hsmp->dir & SMP_OPT_DIR);
+
+ /* Apply expected cast. */
+ if (!sample_casts[smp.data.type][conv->in_type]) {
+ hlua_pusherror(L, "invalid input argument: cannot cast '%s' to '%s'",
+ smp_to_type[smp.data.type], smp_to_type[conv->in_type]);
+ goto error;
+ }
+ if (sample_casts[smp.data.type][conv->in_type] != c_none &&
+ !sample_casts[smp.data.type][conv->in_type](&smp)) {
+ hlua_pusherror(L, "error during the input argument casting");
+ goto error;
+ }
+
+ /* Run the sample conversion process. */
+ if (!conv->process(args, &smp, conv->private)) {
+ if (hsmp->flags & HLUA_F_AS_STRING)
+ lua_pushstring(L, "");
+ else
+ lua_pushnil(L);
+ goto end;
+ }
+
+ /* Convert the returned sample in lua value. */
+ if (hsmp->flags & HLUA_F_AS_STRING)
+ MAY_LJMP(hlua_smp2lua_str(L, &smp));
+ else
+ MAY_LJMP(hlua_smp2lua(L, &smp));
+ end:
+ free_args(args);
+ return 1;
+
+ error:
+ free_args(args);
+ WILL_LJMP(lua_error(L));
+ return 0; /* Never reached */
+}
+
+/*
+ *
+ *
+ * Class AppletTCP
+ *
+ *
+ */
+
+/* Returns a struct hlua_txn if the stack entry "ud" is
+ * a class stream, otherwise it throws an error.
+ */
+__LJMP static struct hlua_appctx *hlua_checkapplet_tcp(lua_State *L, int ud)
+{
+ return MAY_LJMP(hlua_checkudata(L, ud, class_applet_tcp_ref));
+}
+
+/* This function creates and push in the stack an Applet object
+ * according with a current TXN.
+ */
+static int hlua_applet_tcp_new(lua_State *L, struct appctx *ctx)
+{
+ struct hlua_appctx *luactx;
+ struct stream *s = appctx_strm(ctx);
+ struct proxy *p;
+
+ ALREADY_CHECKED(s);
+ p = s->be;
+
+ /* Check stack size. */
+ if (!lua_checkstack(L, 3))
+ return 0;
+
+ /* Create the object: obj[0] = userdata.
+ * Note that the base of the Converters object is the
+ * same than the TXN object.
+ */
+ lua_newtable(L);
+ luactx = lua_newuserdata(L, sizeof(*luactx));
+ lua_rawseti(L, -2, 0);
+ luactx->appctx = ctx;
+ luactx->htxn.s = s;
+ luactx->htxn.p = p;
+
+ /* Create the "f" field that contains a list of fetches. */
+ lua_pushstring(L, "f");
+ if (!hlua_fetches_new(L, &luactx->htxn, 0))
+ return 0;
+ lua_settable(L, -3);
+
+ /* Create the "sf" field that contains a list of stringsafe fetches. */
+ lua_pushstring(L, "sf");
+ if (!hlua_fetches_new(L, &luactx->htxn, HLUA_F_AS_STRING))
+ return 0;
+ lua_settable(L, -3);
+
+ /* Create the "c" field that contains a list of converters. */
+ lua_pushstring(L, "c");
+ if (!hlua_converters_new(L, &luactx->htxn, 0))
+ return 0;
+ lua_settable(L, -3);
+
+ /* Create the "sc" field that contains a list of stringsafe converters. */
+ lua_pushstring(L, "sc");
+ if (!hlua_converters_new(L, &luactx->htxn, HLUA_F_AS_STRING))
+ return 0;
+ lua_settable(L, -3);
+
+ /* Pop a class stream metatable and affect it to the table. */
+ lua_rawgeti(L, LUA_REGISTRYINDEX, class_applet_tcp_ref);
+ lua_setmetatable(L, -2);
+
+ return 1;
+}
+
+__LJMP static int hlua_applet_tcp_set_var(lua_State *L)
+{
+ struct hlua_appctx *luactx;
+ struct stream *s;
+ const char *name;
+ size_t len;
+ struct sample smp;
+
+ if (lua_gettop(L) < 3 || lua_gettop(L) > 4)
+ WILL_LJMP(luaL_error(L, "'set_var' needs between 3 and 4 arguments"));
+
+ /* It is useles to retrieve the stream, but this function
+ * runs only in a stream context.
+ */
+ luactx = MAY_LJMP(hlua_checkapplet_tcp(L, 1));
+ name = MAY_LJMP(luaL_checklstring(L, 2, &len));
+ s = luactx->htxn.s;
+
+ /* Converts the third argument in a sample. */
+ memset(&smp, 0, sizeof(smp));
+ hlua_lua2smp(L, 3, &smp);
+
+ /* Store the sample in a variable. We don't need to dup the smp, vars API
+ * already takes care of duplicating dynamic var data.
+ */
+ smp_set_owner(&smp, s->be, s->sess, s, 0);
+
+ if (lua_gettop(L) == 4 && lua_toboolean(L, 4))
+ lua_pushboolean(L, vars_set_by_name_ifexist(name, len, &smp) != 0);
+ else
+ lua_pushboolean(L, vars_set_by_name(name, len, &smp) != 0);
+
+ return 1;
+}
+
+__LJMP static int hlua_applet_tcp_unset_var(lua_State *L)
+{
+ struct hlua_appctx *luactx;
+ struct stream *s;
+ const char *name;
+ size_t len;
+ struct sample smp;
+
+ MAY_LJMP(check_args(L, 2, "unset_var"));
+
+ /* It is useles to retrieve the stream, but this function
+ * runs only in a stream context.
+ */
+ luactx = MAY_LJMP(hlua_checkapplet_tcp(L, 1));
+ name = MAY_LJMP(luaL_checklstring(L, 2, &len));
+ s = luactx->htxn.s;
+
+ /* Unset the variable. */
+ smp_set_owner(&smp, s->be, s->sess, s, 0);
+ lua_pushboolean(L, vars_unset_by_name_ifexist(name, len, &smp) != 0);
+ return 1;
+}
+
+__LJMP static int hlua_applet_tcp_get_var(lua_State *L)
+{
+ struct hlua_appctx *luactx;
+ struct stream *s;
+ const char *name;
+ size_t len;
+ struct sample smp;
+
+ MAY_LJMP(check_args(L, 2, "get_var"));
+
+ /* It is useles to retrieve the stream, but this function
+ * runs only in a stream context.
+ */
+ luactx = MAY_LJMP(hlua_checkapplet_tcp(L, 1));
+ name = MAY_LJMP(luaL_checklstring(L, 2, &len));
+ s = luactx->htxn.s;
+
+ smp_set_owner(&smp, s->be, s->sess, s, 0);
+ if (!vars_get_by_name(name, len, &smp, NULL)) {
+ lua_pushnil(L);
+ return 1;
+ }
+
+ return MAY_LJMP(hlua_smp2lua(L, &smp));
+}
+
+__LJMP static int hlua_applet_tcp_set_priv(lua_State *L)
+{
+ struct hlua_appctx *luactx = MAY_LJMP(hlua_checkapplet_tcp(L, 1));
+ struct stream *s = luactx->htxn.s;
+ struct hlua *hlua;
+
+ /* Note that this hlua struct is from the session and not from the applet. */
+ if (!s->hlua)
+ return 0;
+ hlua = s->hlua;
+
+ MAY_LJMP(check_args(L, 2, "set_priv"));
+
+ /* Remove previous value. */
+ luaL_unref(L, LUA_REGISTRYINDEX, hlua->Mref);
+
+ /* Get and store new value. */
+ lua_pushvalue(L, 2); /* Copy the element 2 at the top of the stack. */
+ hlua->Mref = luaL_ref(L, LUA_REGISTRYINDEX); /* pop the previously pushed value. */
+
+ return 0;
+}
+
+__LJMP static int hlua_applet_tcp_get_priv(lua_State *L)
+{
+ struct hlua_appctx *luactx = MAY_LJMP(hlua_checkapplet_tcp(L, 1));
+ struct stream *s = luactx->htxn.s;
+ struct hlua *hlua;
+
+ /* Note that this hlua struct is from the session and not from the applet. */
+ if (!s->hlua) {
+ lua_pushnil(L);
+ return 1;
+ }
+ hlua = s->hlua;
+
+ /* Push configuration index in the stack. */
+ lua_rawgeti(L, LUA_REGISTRYINDEX, hlua->Mref);
+
+ return 1;
+}
+
+/* If expected data not yet available, it returns a yield. This function
+ * consumes the data in the buffer. It returns a string containing the
+ * data. This string can be empty.
+ */
+__LJMP static int hlua_applet_tcp_getline_yield(lua_State *L, int status, lua_KContext ctx)
+{
+ struct hlua_appctx *luactx = MAY_LJMP(hlua_checkapplet_tcp(L, 1));
+ struct stconn *sc = appctx_sc(luactx->appctx);
+ int ret;
+ const char *blk1;
+ size_t len1;
+ const char *blk2;
+ size_t len2;
+
+ /* Read the maximum amount of data available. */
+ ret = co_getline_nc(sc_oc(sc), &blk1, &len1, &blk2, &len2);
+
+ /* Data not yet available. return yield. */
+ if (ret == 0) {
+ applet_need_more_data(luactx->appctx);
+ MAY_LJMP(hlua_yieldk(L, 0, 0, hlua_applet_tcp_getline_yield, TICK_ETERNITY, 0));
+ }
+
+ /* End of data: commit the total strings and return. */
+ if (ret < 0) {
+ luaL_pushresult(&luactx->b);
+ return 1;
+ }
+
+ /* Ensure that the block 2 length is usable. */
+ if (ret == 1)
+ len2 = 0;
+
+ /* don't check the max length read and don't check. */
+ luaL_addlstring(&luactx->b, blk1, len1);
+ luaL_addlstring(&luactx->b, blk2, len2);
+
+ /* Consume input channel output buffer data. */
+ co_skip(sc_oc(sc), len1 + len2);
+ luaL_pushresult(&luactx->b);
+ return 1;
+}
+
+/* Check arguments for the function "hlua_channel_get_yield". */
+__LJMP static int hlua_applet_tcp_getline(lua_State *L)
+{
+ struct hlua_appctx *luactx = MAY_LJMP(hlua_checkapplet_tcp(L, 1));
+
+ /* Initialise the string catenation. */
+ luaL_buffinit(L, &luactx->b);
+
+ return MAY_LJMP(hlua_applet_tcp_getline_yield(L, 0, 0));
+}
+
+/* If expected data not yet available, it returns a yield. This function
+ * consumes the data in the buffer. It returns a string containing the
+ * data. This string can be empty.
+ */
+__LJMP static int hlua_applet_tcp_recv_yield(lua_State *L, int status, lua_KContext ctx)
+{
+ struct hlua_appctx *luactx = MAY_LJMP(hlua_checkapplet_tcp(L, 1));
+ struct stconn *sc = appctx_sc(luactx->appctx);
+ size_t len = MAY_LJMP(luaL_checkinteger(L, 2));
+ int ret;
+ const char *blk1;
+ size_t len1;
+ const char *blk2;
+ size_t len2;
+
+ /* Read the maximum amount of data available. */
+ ret = co_getblk_nc(sc_oc(sc), &blk1, &len1, &blk2, &len2);
+
+ /* Data not yet available. return yield. */
+ if (ret == 0) {
+ applet_need_more_data(luactx->appctx);
+ MAY_LJMP(hlua_yieldk(L, 0, 0, hlua_applet_tcp_recv_yield, TICK_ETERNITY, 0));
+ }
+
+ /* End of data: commit the total strings and return. */
+ if (ret < 0) {
+ luaL_pushresult(&luactx->b);
+ return 1;
+ }
+
+ /* Ensure that the block 2 length is usable. */
+ if (ret == 1)
+ len2 = 0;
+
+ if (len == -1) {
+
+ /* If len == -1, catenate all the data avalaile and
+ * yield because we want to get all the data until
+ * the end of data stream.
+ */
+ luaL_addlstring(&luactx->b, blk1, len1);
+ luaL_addlstring(&luactx->b, blk2, len2);
+ co_skip(sc_oc(sc), len1 + len2);
+ applet_need_more_data(luactx->appctx);
+ MAY_LJMP(hlua_yieldk(L, 0, 0, hlua_applet_tcp_recv_yield, TICK_ETERNITY, 0));
+
+ } else {
+
+ /* Copy the first block caping to the length required. */
+ if (len1 > len)
+ len1 = len;
+ luaL_addlstring(&luactx->b, blk1, len1);
+ len -= len1;
+
+ /* Copy the second block. */
+ if (len2 > len)
+ len2 = len;
+ luaL_addlstring(&luactx->b, blk2, len2);
+ len -= len2;
+
+ /* Consume input channel output buffer data. */
+ co_skip(sc_oc(sc), len1 + len2);
+
+ /* If there is no other data available, yield waiting for new data. */
+ if (len > 0) {
+ lua_pushinteger(L, len);
+ lua_replace(L, 2);
+ applet_need_more_data(luactx->appctx);
+ MAY_LJMP(hlua_yieldk(L, 0, 0, hlua_applet_tcp_recv_yield, TICK_ETERNITY, 0));
+ }
+
+ /* return the result. */
+ luaL_pushresult(&luactx->b);
+ return 1;
+ }
+
+ /* we never execute this */
+ hlua_pusherror(L, "Lua: internal error");
+ WILL_LJMP(lua_error(L));
+ return 0;
+}
+
+/* Check arguments for the function "hlua_channel_get_yield". */
+__LJMP static int hlua_applet_tcp_recv(lua_State *L)
+{
+ struct hlua_appctx *luactx = MAY_LJMP(hlua_checkapplet_tcp(L, 1));
+ int len = -1;
+
+ if (lua_gettop(L) > 2)
+ WILL_LJMP(luaL_error(L, "The 'recv' function requires between 1 and 2 arguments."));
+ if (lua_gettop(L) >= 2) {
+ len = MAY_LJMP(luaL_checkinteger(L, 2));
+ lua_pop(L, 1);
+ }
+
+ /* Confirm or set the required length */
+ lua_pushinteger(L, len);
+
+ /* Initialise the string catenation. */
+ luaL_buffinit(L, &luactx->b);
+
+ return MAY_LJMP(hlua_applet_tcp_recv_yield(L, 0, 0));
+}
+
+/* Append data in the output side of the buffer. This data is immediately
+ * sent. The function returns the amount of data written. If the buffer
+ * cannot contain the data, the function yields. The function returns -1
+ * if the channel is closed.
+ */
+__LJMP static int hlua_applet_tcp_send_yield(lua_State *L, int status, lua_KContext ctx)
+{
+ size_t len;
+ struct hlua_appctx *luactx = MAY_LJMP(hlua_checkapplet_tcp(L, 1));
+ const char *str = MAY_LJMP(luaL_checklstring(L, 2, &len));
+ int l = MAY_LJMP(luaL_checkinteger(L, 3));
+ struct stconn *sc = appctx_sc(luactx->appctx);
+ struct channel *chn = sc_ic(sc);
+ int max;
+
+ /* Get the max amount of data which can write as input in the channel. */
+ max = channel_recv_max(chn);
+ if (max > (len - l))
+ max = len - l;
+
+ /* Copy data. */
+ ci_putblk(chn, str + l, max);
+
+ /* update counters. */
+ l += max;
+ lua_pop(L, 1);
+ lua_pushinteger(L, l);
+
+ /* If some data is not send, declares the situation to the
+ * applet, and returns a yield.
+ */
+ if (l < len) {
+ sc_need_room(sc, channel_recv_max(chn) + 1);
+ MAY_LJMP(hlua_yieldk(L, 0, 0, hlua_applet_tcp_send_yield, TICK_ETERNITY, 0));
+ }
+
+ return 1;
+}
+
+/* Just a wrapper of "hlua_applet_tcp_send_yield". This wrapper permits
+ * yield the LUA process, and resume it without checking the
+ * input arguments.
+ */
+__LJMP static int hlua_applet_tcp_send(lua_State *L)
+{
+ MAY_LJMP(check_args(L, 2, "send"));
+ lua_pushinteger(L, 0);
+
+ return MAY_LJMP(hlua_applet_tcp_send_yield(L, 0, 0));
+}
+
+/*
+ *
+ *
+ * Class AppletHTTP
+ *
+ *
+ */
+
+/* Returns a struct hlua_txn if the stack entry "ud" is
+ * a class stream, otherwise it throws an error.
+ */
+__LJMP static struct hlua_appctx *hlua_checkapplet_http(lua_State *L, int ud)
+{
+ return MAY_LJMP(hlua_checkudata(L, ud, class_applet_http_ref));
+}
+
+/* This function creates and push in the stack an Applet object
+ * according with a current TXN.
+ * It relies on the caller to have already reserved the room in ctx->svcctx
+ * for the local storage of hlua_http_ctx.
+ */
+static int hlua_applet_http_new(lua_State *L, struct appctx *ctx)
+{
+ struct hlua_http_ctx *http_ctx = ctx->svcctx;
+ struct hlua_appctx *luactx;
+ struct hlua_txn htxn;
+ struct stream *s = appctx_strm(ctx);
+ struct proxy *px = s->be;
+ struct htx *htx;
+ struct htx_blk *blk;
+ struct htx_sl *sl;
+ struct ist path;
+ unsigned long long len = 0;
+ int32_t pos;
+ struct http_uri_parser parser;
+
+ /* Check stack size. */
+ if (!lua_checkstack(L, 3))
+ return 0;
+
+ /* Create the object: obj[0] = userdata.
+ * Note that the base of the Converters object is the
+ * same than the TXN object.
+ */
+ lua_newtable(L);
+ luactx = lua_newuserdata(L, sizeof(*luactx));
+ lua_rawseti(L, -2, 0);
+ luactx->appctx = ctx;
+ http_ctx->status = 200; /* Default status code returned. */
+ http_ctx->reason = NULL; /* Use default reason based on status */
+ luactx->htxn.s = s;
+ luactx->htxn.p = px;
+
+ /* Create the "f" field that contains a list of fetches. */
+ lua_pushstring(L, "f");
+ if (!hlua_fetches_new(L, &luactx->htxn, 0))
+ return 0;
+ lua_settable(L, -3);
+
+ /* Create the "sf" field that contains a list of stringsafe fetches. */
+ lua_pushstring(L, "sf");
+ if (!hlua_fetches_new(L, &luactx->htxn, HLUA_F_AS_STRING))
+ return 0;
+ lua_settable(L, -3);
+
+ /* Create the "c" field that contains a list of converters. */
+ lua_pushstring(L, "c");
+ if (!hlua_converters_new(L, &luactx->htxn, 0))
+ return 0;
+ lua_settable(L, -3);
+
+ /* Create the "sc" field that contains a list of stringsafe converters. */
+ lua_pushstring(L, "sc");
+ if (!hlua_converters_new(L, &luactx->htxn, HLUA_F_AS_STRING))
+ return 0;
+ lua_settable(L, -3);
+
+ htx = htxbuf(&s->req.buf);
+ blk = htx_get_first_blk(htx);
+ BUG_ON(!blk || htx_get_blk_type(blk) != HTX_BLK_REQ_SL);
+ sl = htx_get_blk_ptr(htx, blk);
+
+ /* Stores the request method. */
+ lua_pushstring(L, "method");
+ lua_pushlstring(L, HTX_SL_REQ_MPTR(sl), HTX_SL_REQ_MLEN(sl));
+ lua_settable(L, -3);
+
+ /* Stores the http version. */
+ lua_pushstring(L, "version");
+ lua_pushlstring(L, HTX_SL_REQ_VPTR(sl), HTX_SL_REQ_VLEN(sl));
+ lua_settable(L, -3);
+
+ /* creates an array of headers. hlua_http_get_headers() crates and push
+ * the array on the top of the stack.
+ */
+ lua_pushstring(L, "headers");
+ htxn.s = s;
+ htxn.p = px;
+ htxn.dir = SMP_OPT_DIR_REQ;
+ if (!hlua_http_get_headers(L, &htxn.s->txn->req))
+ return 0;
+ lua_settable(L, -3);
+
+ parser = http_uri_parser_init(htx_sl_req_uri(sl));
+ path = http_parse_path(&parser);
+ if (isttest(path)) {
+ char *p, *q, *end;
+
+ p = path.ptr;
+ end = istend(path);
+ q = p;
+ while (q < end && *q != '?')
+ q++;
+
+ /* Stores the request path. */
+ lua_pushstring(L, "path");
+ lua_pushlstring(L, p, q - p);
+ lua_settable(L, -3);
+
+ /* Stores the query string. */
+ lua_pushstring(L, "qs");
+ if (*q == '?')
+ q++;
+ lua_pushlstring(L, q, end - q);
+ lua_settable(L, -3);
+ }
+
+ for (pos = htx_get_first(htx); pos != -1; pos = htx_get_next(htx, pos)) {
+ struct htx_blk *blk = htx_get_blk(htx, pos);
+ enum htx_blk_type type = htx_get_blk_type(blk);
+
+ if (type == HTX_BLK_TLR || type == HTX_BLK_EOT)
+ break;
+ if (type == HTX_BLK_DATA)
+ len += htx_get_blksz(blk);
+ }
+ if (htx->extra != HTX_UNKOWN_PAYLOAD_LENGTH)
+ len += htx->extra;
+
+ /* Stores the request path. */
+ lua_pushstring(L, "length");
+ lua_pushinteger(L, len);
+ lua_settable(L, -3);
+
+ /* Create an empty array of HTTP request headers. */
+ lua_pushstring(L, "response");
+ lua_newtable(L);
+ lua_settable(L, -3);
+
+ /* Pop a class stream metatable and affect it to the table. */
+ lua_rawgeti(L, LUA_REGISTRYINDEX, class_applet_http_ref);
+ lua_setmetatable(L, -2);
+
+ return 1;
+}
+
+__LJMP static int hlua_applet_http_set_var(lua_State *L)
+{
+ struct hlua_appctx *luactx;
+ struct stream *s;
+ const char *name;
+ size_t len;
+ struct sample smp;
+
+ if (lua_gettop(L) < 3 || lua_gettop(L) > 4)
+ WILL_LJMP(luaL_error(L, "'set_var' needs between 3 and 4 arguments"));
+
+ /* It is useles to retrieve the stream, but this function
+ * runs only in a stream context.
+ */
+ luactx = MAY_LJMP(hlua_checkapplet_http(L, 1));
+ name = MAY_LJMP(luaL_checklstring(L, 2, &len));
+ s = luactx->htxn.s;
+
+ /* Converts the third argument in a sample. */
+ memset(&smp, 0, sizeof(smp));
+ hlua_lua2smp(L, 3, &smp);
+
+ /* Store the sample in a variable. We don't need to dup the smp, vars API
+ * already takes care of duplicating dynamic var data.
+ */
+ smp_set_owner(&smp, s->be, s->sess, s, 0);
+
+ if (lua_gettop(L) == 4 && lua_toboolean(L, 4))
+ lua_pushboolean(L, vars_set_by_name_ifexist(name, len, &smp) != 0);
+ else
+ lua_pushboolean(L, vars_set_by_name(name, len, &smp) != 0);
+
+ return 1;
+}
+
+__LJMP static int hlua_applet_http_unset_var(lua_State *L)
+{
+ struct hlua_appctx *luactx;
+ struct stream *s;
+ const char *name;
+ size_t len;
+ struct sample smp;
+
+ MAY_LJMP(check_args(L, 2, "unset_var"));
+
+ /* It is useles to retrieve the stream, but this function
+ * runs only in a stream context.
+ */
+ luactx = MAY_LJMP(hlua_checkapplet_http(L, 1));
+ name = MAY_LJMP(luaL_checklstring(L, 2, &len));
+ s = luactx->htxn.s;
+
+ /* Unset the variable. */
+ smp_set_owner(&smp, s->be, s->sess, s, 0);
+ lua_pushboolean(L, vars_unset_by_name_ifexist(name, len, &smp) != 0);
+ return 1;
+}
+
+__LJMP static int hlua_applet_http_get_var(lua_State *L)
+{
+ struct hlua_appctx *luactx;
+ struct stream *s;
+ const char *name;
+ size_t len;
+ struct sample smp;
+
+ MAY_LJMP(check_args(L, 2, "get_var"));
+
+ /* It is useles to retrieve the stream, but this function
+ * runs only in a stream context.
+ */
+ luactx = MAY_LJMP(hlua_checkapplet_http(L, 1));
+ name = MAY_LJMP(luaL_checklstring(L, 2, &len));
+ s = luactx->htxn.s;
+
+ smp_set_owner(&smp, s->be, s->sess, s, 0);
+ if (!vars_get_by_name(name, len, &smp, NULL)) {
+ lua_pushnil(L);
+ return 1;
+ }
+
+ return MAY_LJMP(hlua_smp2lua(L, &smp));
+}
+
+__LJMP static int hlua_applet_http_set_priv(lua_State *L)
+{
+ struct hlua_appctx *luactx = MAY_LJMP(hlua_checkapplet_http(L, 1));
+ struct stream *s = luactx->htxn.s;
+ struct hlua *hlua;
+
+ /* Note that this hlua struct is from the session and not from the applet. */
+ if (!s->hlua)
+ return 0;
+ hlua = s->hlua;
+
+ MAY_LJMP(check_args(L, 2, "set_priv"));
+
+ /* Remove previous value. */
+ luaL_unref(L, LUA_REGISTRYINDEX, hlua->Mref);
+
+ /* Get and store new value. */
+ lua_pushvalue(L, 2); /* Copy the element 2 at the top of the stack. */
+ hlua->Mref = luaL_ref(L, LUA_REGISTRYINDEX); /* pop the previously pushed value. */
+
+ return 0;
+}
+
+__LJMP static int hlua_applet_http_get_priv(lua_State *L)
+{
+ struct hlua_appctx *luactx = MAY_LJMP(hlua_checkapplet_http(L, 1));
+ struct stream *s = luactx->htxn.s;
+ struct hlua *hlua;
+
+ /* Note that this hlua struct is from the session and not from the applet. */
+ if (!s->hlua) {
+ lua_pushnil(L);
+ return 1;
+ }
+ hlua = s->hlua;
+
+ /* Push configuration index in the stack. */
+ lua_rawgeti(L, LUA_REGISTRYINDEX, hlua->Mref);
+
+ return 1;
+}
+
+/* If expected data not yet available, it returns a yield. This function
+ * consumes the data in the buffer. It returns a string containing the
+ * data. This string can be empty.
+ */
+__LJMP static int hlua_applet_http_getline_yield(lua_State *L, int status, lua_KContext ctx)
+{
+ struct hlua_appctx *luactx = MAY_LJMP(hlua_checkapplet_http(L, 1));
+ struct stconn *sc = appctx_sc(luactx->appctx);
+ struct channel *req = sc_oc(sc);
+ struct htx *htx;
+ struct htx_blk *blk;
+ size_t count;
+ int stop = 0;
+
+ htx = htx_from_buf(&req->buf);
+ count = co_data(req);
+ blk = htx_get_first_blk(htx);
+
+ while (count && !stop && blk) {
+ enum htx_blk_type type = htx_get_blk_type(blk);
+ uint32_t sz = htx_get_blksz(blk);
+ struct ist v;
+ uint32_t vlen;
+ char *nl;
+
+ vlen = sz;
+ if (vlen > count) {
+ if (type != HTX_BLK_DATA)
+ break;
+ vlen = count;
+ }
+
+ switch (type) {
+ case HTX_BLK_UNUSED:
+ break;
+
+ case HTX_BLK_DATA:
+ v = htx_get_blk_value(htx, blk);
+ v.len = vlen;
+ nl = istchr(v, '\n');
+ if (nl != NULL) {
+ stop = 1;
+ vlen = nl - v.ptr + 1;
+ }
+ luaL_addlstring(&luactx->b, v.ptr, vlen);
+ break;
+
+ case HTX_BLK_TLR:
+ case HTX_BLK_EOT:
+ stop = 1;
+ break;
+
+ default:
+ break;
+ }
+
+ c_rew(req, vlen);
+ count -= vlen;
+ if (sz == vlen)
+ blk = htx_remove_blk(htx, blk);
+ else {
+ htx_cut_data_blk(htx, blk, vlen);
+ break;
+ }
+ }
+
+ /* The message was fully consumed and no more data are expected
+ * (EOM flag set).
+ */
+ if (htx_is_empty(htx) && (sc_opposite(sc)->flags & SC_FL_EOI))
+ stop = 1;
+
+ htx_to_buf(htx, &req->buf);
+ if (!stop) {
+ applet_need_more_data(luactx->appctx);
+ MAY_LJMP(hlua_yieldk(L, 0, 0, hlua_applet_http_getline_yield, TICK_ETERNITY, 0));
+ }
+
+ /* return the result. */
+ luaL_pushresult(&luactx->b);
+ return 1;
+}
+
+
+/* Check arguments for the function "hlua_channel_get_yield". */
+__LJMP static int hlua_applet_http_getline(lua_State *L)
+{
+ struct hlua_appctx *luactx = MAY_LJMP(hlua_checkapplet_http(L, 1));
+
+ /* Initialise the string catenation. */
+ luaL_buffinit(L, &luactx->b);
+
+ return MAY_LJMP(hlua_applet_http_getline_yield(L, 0, 0));
+}
+
+/* If expected data not yet available, it returns a yield. This function
+ * consumes the data in the buffer. It returns a string containing the
+ * data. This string can be empty.
+ */
+__LJMP static int hlua_applet_http_recv_yield(lua_State *L, int status, lua_KContext ctx)
+{
+ struct hlua_appctx *luactx = MAY_LJMP(hlua_checkapplet_http(L, 1));
+ struct stconn *sc = appctx_sc(luactx->appctx);
+ struct channel *req = sc_oc(sc);
+ struct htx *htx;
+ struct htx_blk *blk;
+ size_t count;
+ int len;
+
+ htx = htx_from_buf(&req->buf);
+ len = MAY_LJMP(luaL_checkinteger(L, 2));
+ count = co_data(req);
+ blk = htx_get_head_blk(htx);
+ while (count && len && blk) {
+ enum htx_blk_type type = htx_get_blk_type(blk);
+ uint32_t sz = htx_get_blksz(blk);
+ struct ist v;
+ uint32_t vlen;
+
+ vlen = sz;
+ if (len > 0 && vlen > len)
+ vlen = len;
+ if (vlen > count) {
+ if (type != HTX_BLK_DATA)
+ break;
+ vlen = count;
+ }
+
+ switch (type) {
+ case HTX_BLK_UNUSED:
+ break;
+
+ case HTX_BLK_DATA:
+ v = htx_get_blk_value(htx, blk);
+ luaL_addlstring(&luactx->b, v.ptr, vlen);
+ break;
+
+ case HTX_BLK_TLR:
+ case HTX_BLK_EOT:
+ len = 0;
+ break;
+
+ default:
+ break;
+ }
+
+ c_rew(req, vlen);
+ count -= vlen;
+ if (len > 0)
+ len -= vlen;
+ if (sz == vlen)
+ blk = htx_remove_blk(htx, blk);
+ else {
+ htx_cut_data_blk(htx, blk, vlen);
+ break;
+ }
+ }
+
+ /* The message was fully consumed and no more data are expected
+ * (EOM flag set).
+ */
+ if (htx_is_empty(htx) && (sc_opposite(sc)->flags & SC_FL_EOI))
+ len = 0;
+
+ htx_to_buf(htx, &req->buf);
+
+ /* If we are no other data available, yield waiting for new data. */
+ if (len) {
+ if (len > 0) {
+ lua_pushinteger(L, len);
+ lua_replace(L, 2);
+ }
+ applet_need_more_data(luactx->appctx);
+ MAY_LJMP(hlua_yieldk(L, 0, 0, hlua_applet_http_recv_yield, TICK_ETERNITY, 0));
+ }
+
+ /* return the result. */
+ luaL_pushresult(&luactx->b);
+ return 1;
+}
+
+/* Check arguments for the function "hlua_channel_get_yield". */
+__LJMP static int hlua_applet_http_recv(lua_State *L)
+{
+ struct hlua_appctx *luactx = MAY_LJMP(hlua_checkapplet_http(L, 1));
+ int len = -1;
+
+ /* Check arguments. */
+ if (lua_gettop(L) > 2)
+ WILL_LJMP(luaL_error(L, "The 'recv' function requires between 1 and 2 arguments."));
+ if (lua_gettop(L) >= 2) {
+ len = MAY_LJMP(luaL_checkinteger(L, 2));
+ lua_pop(L, 1);
+ }
+
+ lua_pushinteger(L, len);
+
+ /* Initialise the string catenation. */
+ luaL_buffinit(L, &luactx->b);
+
+ return MAY_LJMP(hlua_applet_http_recv_yield(L, 0, 0));
+}
+
+/* Append data in the output side of the buffer. This data is immediately
+ * sent. The function returns the amount of data written. If the buffer
+ * cannot contain the data, the function yields. The function returns -1
+ * if the channel is closed.
+ */
+__LJMP static int hlua_applet_http_send_yield(lua_State *L, int status, lua_KContext ctx)
+{
+ struct hlua_appctx *luactx = MAY_LJMP(hlua_checkapplet_http(L, 1));
+ struct stconn *sc = appctx_sc(luactx->appctx);
+ struct channel *res = sc_ic(sc);
+ struct htx *htx = htx_from_buf(&res->buf);
+ const char *data;
+ size_t len;
+ int l = MAY_LJMP(luaL_checkinteger(L, 3));
+ int max;
+
+ max = htx_get_max_blksz(htx, channel_htx_recv_max(res, htx));
+ if (!max)
+ goto snd_yield;
+
+ data = MAY_LJMP(luaL_checklstring(L, 2, &len));
+
+ /* Get the max amount of data which can write as input in the channel. */
+ if (max > (len - l))
+ max = len - l;
+
+ /* Copy data. */
+ max = htx_add_data(htx, ist2(data + l, max));
+ channel_add_input(res, max);
+
+ /* update counters. */
+ l += max;
+ lua_pop(L, 1);
+ lua_pushinteger(L, l);
+
+ /* If some data is not send, declares the situation to the
+ * applet, and returns a yield.
+ */
+ if (l < len) {
+ snd_yield:
+ htx_to_buf(htx, &res->buf);
+ sc_need_room(sc, channel_recv_max(res) + 1);
+ MAY_LJMP(hlua_yieldk(L, 0, 0, hlua_applet_http_send_yield, TICK_ETERNITY, 0));
+ }
+
+ htx_to_buf(htx, &res->buf);
+ return 1;
+}
+
+/* Just a wrapper of "hlua_applet_send_yield". This wrapper permits
+ * yield the LUA process, and resume it without checking the
+ * input arguments.
+ */
+__LJMP static int hlua_applet_http_send(lua_State *L)
+{
+ struct hlua_appctx *luactx = MAY_LJMP(hlua_checkapplet_http(L, 1));
+ struct hlua_http_ctx *http_ctx = luactx->appctx->svcctx;
+
+ /* We want to send some data. Headers must be sent. */
+ if (!(http_ctx->flags & APPLET_HDR_SENT)) {
+ hlua_pusherror(L, "Lua: 'send' you must call start_response() before sending data.");
+ WILL_LJMP(lua_error(L));
+ }
+
+ /* This integer is used for followinf the amount of data sent. */
+ lua_pushinteger(L, 0);
+
+ return MAY_LJMP(hlua_applet_http_send_yield(L, 0, 0));
+}
+
+__LJMP static int hlua_applet_http_addheader(lua_State *L)
+{
+ const char *name;
+ int ret;
+
+ MAY_LJMP(hlua_checkapplet_http(L, 1));
+ name = MAY_LJMP(luaL_checkstring(L, 2));
+ MAY_LJMP(luaL_checkstring(L, 3));
+
+ /* Push in the stack the "response" entry. */
+ ret = lua_getfield(L, 1, "response");
+ if (ret != LUA_TTABLE) {
+ hlua_pusherror(L, "Lua: 'add_header' internal error: AppletHTTP['response'] "
+ "is expected as an array. %s found", lua_typename(L, ret));
+ WILL_LJMP(lua_error(L));
+ }
+
+ /* check if the header is already registered if it is not
+ * the case, register it.
+ */
+ ret = lua_getfield(L, -1, name);
+ if (ret == LUA_TNIL) {
+
+ /* Entry not found. */
+ lua_pop(L, 1); /* remove the nil. The "response" table is the top of the stack. */
+
+ /* Insert the new header name in the array in the top of the stack.
+ * It left the new array in the top of the stack.
+ */
+ lua_newtable(L);
+ lua_pushvalue(L, 2);
+ lua_pushvalue(L, -2);
+ lua_settable(L, -4);
+
+ } else if (ret != LUA_TTABLE) {
+
+ /* corruption error. */
+ hlua_pusherror(L, "Lua: 'add_header' internal error: AppletHTTP['response']['%s'] "
+ "is expected as an array. %s found", name, lua_typename(L, ret));
+ WILL_LJMP(lua_error(L));
+ }
+
+ /* Now the top of thestack is an array of values. We push
+ * the header value as new entry.
+ */
+ lua_pushvalue(L, 3);
+ ret = lua_rawlen(L, -2);
+ lua_rawseti(L, -2, ret + 1);
+ lua_pushboolean(L, 1);
+ return 1;
+}
+
+__LJMP static int hlua_applet_http_status(lua_State *L)
+{
+ struct hlua_appctx *luactx = MAY_LJMP(hlua_checkapplet_http(L, 1));
+ int status = MAY_LJMP(luaL_checkinteger(L, 2));
+ const char *reason = MAY_LJMP(luaL_optlstring(L, 3, NULL, NULL));
+ struct hlua_http_ctx *http_ctx = luactx->appctx->svcctx;
+
+ if (status < 100 || status > 599) {
+ lua_pushboolean(L, 0);
+ return 1;
+ }
+
+ http_ctx->status = status;
+ http_ctx->reason = reason;
+ lua_pushboolean(L, 1);
+ return 1;
+}
+
+
+__LJMP static int hlua_applet_http_send_response(lua_State *L)
+{
+ struct hlua_appctx *luactx = MAY_LJMP(hlua_checkapplet_http(L, 1));
+ struct hlua_http_ctx *http_ctx = luactx->appctx->svcctx;
+ struct stconn *sc = appctx_sc(luactx->appctx);
+ struct channel *res = sc_ic(sc);
+ struct htx *htx;
+ struct htx_sl *sl;
+ struct h1m h1m;
+ const char *status, *reason;
+ const char *name, *value;
+ size_t nlen, vlen;
+ unsigned int flags;
+
+ /* Send the message at once. */
+ htx = htx_from_buf(&res->buf);
+ h1m_init_res(&h1m);
+
+ /* Use the same http version than the request. */
+ status = ultoa_r(http_ctx->status, trash.area, trash.size);
+ reason = http_ctx->reason;
+ if (reason == NULL)
+ reason = http_get_reason(http_ctx->status);
+ if (http_ctx->flags & APPLET_HTTP11) {
+ flags = (HTX_SL_F_IS_RESP|HTX_SL_F_VER_11);
+ sl = htx_add_stline(htx, HTX_BLK_RES_SL, flags, ist("HTTP/1.1"), ist(status), ist(reason));
+ }
+ else {
+ flags = HTX_SL_F_IS_RESP;
+ sl = htx_add_stline(htx, HTX_BLK_RES_SL, flags, ist("HTTP/1.0"), ist(status), ist(reason));
+ }
+ if (!sl) {
+ hlua_pusherror(L, "Lua applet http '%s': Failed to create response.\n",
+ luactx->appctx->rule->arg.hlua_rule->fcn->name);
+ WILL_LJMP(lua_error(L));
+ }
+ sl->info.res.status = http_ctx->status;
+
+ /* Get the array associated to the field "response" in the object AppletHTTP. */
+ if (lua_getfield(L, 1, "response") != LUA_TTABLE) {
+ hlua_pusherror(L, "Lua applet http '%s': AppletHTTP['response'] missing.\n",
+ luactx->appctx->rule->arg.hlua_rule->fcn->name);
+ WILL_LJMP(lua_error(L));
+ }
+
+ /* Browse the list of headers. */
+ lua_pushnil(L);
+ while(lua_next(L, -2) != 0) {
+ /* We expect a string as -2. */
+ if (lua_type(L, -2) != LUA_TSTRING) {
+ hlua_pusherror(L, "Lua applet http '%s': AppletHTTP['response'][] element must be a string. got %s.\n",
+ luactx->appctx->rule->arg.hlua_rule->fcn->name,
+ lua_typename(L, lua_type(L, -2)));
+ WILL_LJMP(lua_error(L));
+ }
+ name = lua_tolstring(L, -2, &nlen);
+
+ /* We expect an array as -1. */
+ if (lua_type(L, -1) != LUA_TTABLE) {
+ hlua_pusherror(L, "Lua applet http '%s': AppletHTTP['response']['%s'] element must be an table. got %s.\n",
+ luactx->appctx->rule->arg.hlua_rule->fcn->name,
+ name,
+ lua_typename(L, lua_type(L, -1)));
+ WILL_LJMP(lua_error(L));
+ }
+
+ /* Browse the table who is on the top of the stack. */
+ lua_pushnil(L);
+ while(lua_next(L, -2) != 0) {
+ int id;
+
+ /* We expect a number as -2. */
+ if (lua_type(L, -2) != LUA_TNUMBER) {
+ hlua_pusherror(L, "Lua applet http '%s': AppletHTTP['response']['%s'][] element must be a number. got %s.\n",
+ luactx->appctx->rule->arg.hlua_rule->fcn->name,
+ name,
+ lua_typename(L, lua_type(L, -2)));
+ WILL_LJMP(lua_error(L));
+ }
+ id = lua_tointeger(L, -2);
+
+ /* We expect a string as -2. */
+ if (lua_type(L, -1) != LUA_TSTRING) {
+ hlua_pusherror(L, "Lua applet http '%s': AppletHTTP['response']['%s'][%d] element must be a string. got %s.\n",
+ luactx->appctx->rule->arg.hlua_rule->fcn->name,
+ name, id,
+ lua_typename(L, lua_type(L, -1)));
+ WILL_LJMP(lua_error(L));
+ }
+ value = lua_tolstring(L, -1, &vlen);
+
+ /* Simple Protocol checks. */
+ if (isteqi(ist2(name, nlen), ist("transfer-encoding"))) {
+ int ret;
+
+ ret = h1_parse_xfer_enc_header(&h1m, ist2(value, vlen));
+ if (ret < 0) {
+ hlua_pusherror(L, "Lua applet http '%s': Invalid '%s' header.\n",
+ luactx->appctx->rule->arg.hlua_rule->fcn->name,
+ name);
+ WILL_LJMP(lua_error(L));
+ }
+ else if (ret == 0)
+ goto next; /* Skip it */
+ }
+ else if (isteqi(ist2(name, nlen), ist("content-length"))) {
+ struct ist v = ist2(value, vlen);
+ int ret;
+
+ ret = h1_parse_cont_len_header(&h1m, &v);
+ if (ret < 0) {
+ hlua_pusherror(L, "Lua applet http '%s': Invalid '%s' header.\n",
+ luactx->appctx->rule->arg.hlua_rule->fcn->name,
+ name);
+ WILL_LJMP(lua_error(L));
+ }
+ else if (ret == 0)
+ goto next; /* Skip it */
+ }
+
+ /* Add a new header */
+ if (!htx_add_header(htx, ist2(name, nlen), ist2(value, vlen))) {
+ hlua_pusherror(L, "Lua applet http '%s': Failed to add header '%s' in the response.\n",
+ luactx->appctx->rule->arg.hlua_rule->fcn->name,
+ name);
+ WILL_LJMP(lua_error(L));
+ }
+ next:
+ /* Remove the array from the stack, and get next element with a remaining string. */
+ lua_pop(L, 1);
+ }
+
+ /* Remove the array from the stack, and get next element with a remaining string. */
+ lua_pop(L, 1);
+ }
+
+ if (h1m.flags & H1_MF_CHNK)
+ h1m.flags &= ~H1_MF_CLEN;
+ if (h1m.flags & (H1_MF_CLEN|H1_MF_CHNK))
+ h1m.flags |= H1_MF_XFER_LEN;
+
+ /* Uset HTX start-line flags */
+ if (h1m.flags & H1_MF_XFER_ENC)
+ flags |= HTX_SL_F_XFER_ENC;
+ if (h1m.flags & H1_MF_XFER_LEN) {
+ flags |= HTX_SL_F_XFER_LEN;
+ if (h1m.flags & H1_MF_CHNK)
+ flags |= HTX_SL_F_CHNK;
+ else if (h1m.flags & H1_MF_CLEN)
+ flags |= HTX_SL_F_CLEN;
+ if (h1m.body_len == 0)
+ flags |= HTX_SL_F_BODYLESS;
+ }
+ sl->flags |= flags;
+
+ /* If we don't have a content-length set, and the HTTP version is 1.1
+ * and the status code implies the presence of a message body, we must
+ * announce a transfer encoding chunked. This is required by haproxy
+ * for the keepalive compliance. If the applet announces a transfer-encoding
+ * chunked itself, don't do anything.
+ */
+ if ((flags & (HTX_SL_F_VER_11|HTX_SL_F_XFER_LEN)) == HTX_SL_F_VER_11 &&
+ http_ctx->status >= 200 && http_ctx->status != 204 && http_ctx->status != 304) {
+ /* Add a new header */
+ sl->flags |= (HTX_SL_F_XFER_ENC|H1_MF_CHNK|H1_MF_XFER_LEN);
+ if (!htx_add_header(htx, ist("transfer-encoding"), ist("chunked"))) {
+ hlua_pusherror(L, "Lua applet http '%s': Failed to add header 'transfer-encoding' in the response.\n",
+ luactx->appctx->rule->arg.hlua_rule->fcn->name);
+ WILL_LJMP(lua_error(L));
+ }
+ }
+
+ /* Finalize headers. */
+ if (!htx_add_endof(htx, HTX_BLK_EOH)) {
+ hlua_pusherror(L, "Lua applet http '%s': Failed create the response.\n",
+ luactx->appctx->rule->arg.hlua_rule->fcn->name);
+ WILL_LJMP(lua_error(L));
+ }
+
+ if (htx_used_space(htx) > b_size(&res->buf) - global.tune.maxrewrite) {
+ b_reset(&res->buf);
+ hlua_pusherror(L, "Lua: 'start_response': response header block too big");
+ WILL_LJMP(lua_error(L));
+ }
+
+ htx_to_buf(htx, &res->buf);
+ channel_add_input(res, htx->data);
+
+ /* Headers sent, set the flag. */
+ http_ctx->flags |= APPLET_HDR_SENT;
+ return 0;
+
+}
+/* We will build the status line and the headers of the HTTP response.
+ * We will try send at once if its not possible, we give back the hand
+ * waiting for more room.
+ */
+__LJMP static int hlua_applet_http_start_response_yield(lua_State *L, int status, lua_KContext ctx)
+{
+ struct hlua_appctx *luactx = MAY_LJMP(hlua_checkapplet_http(L, 1));
+ struct stconn *sc = appctx_sc(luactx->appctx);
+ struct channel *res = sc_ic(sc);
+
+ if (co_data(res)) {
+ sc_need_room(sc, -1);
+ MAY_LJMP(hlua_yieldk(L, 0, 0, hlua_applet_http_start_response_yield, TICK_ETERNITY, 0));
+ }
+ return MAY_LJMP(hlua_applet_http_send_response(L));
+}
+
+
+__LJMP static int hlua_applet_http_start_response(lua_State *L)
+{
+ return MAY_LJMP(hlua_applet_http_start_response_yield(L, 0, 0));
+}
+
+/*
+ *
+ *
+ * Class HTTP
+ *
+ *
+ */
+
+/* Returns a struct hlua_txn if the stack entry "ud" is
+ * a class stream, otherwise it throws an error.
+ */
+__LJMP static struct hlua_txn *hlua_checkhttp(lua_State *L, int ud)
+{
+ return MAY_LJMP(hlua_checkudata(L, ud, class_http_ref));
+}
+
+/* This function creates and push in the stack a HTTP object
+ * according with a current TXN.
+ */
+static int hlua_http_new(lua_State *L, struct hlua_txn *txn)
+{
+ struct hlua_txn *htxn;
+
+ /* Check stack size. */
+ if (!lua_checkstack(L, 3))
+ return 0;
+
+ /* Create the object: obj[0] = userdata.
+ * Note that the base of the Converters object is the
+ * same than the TXN object.
+ */
+ lua_newtable(L);
+ htxn = lua_newuserdata(L, sizeof(*htxn));
+ lua_rawseti(L, -2, 0);
+
+ htxn->s = txn->s;
+ htxn->p = txn->p;
+ htxn->dir = txn->dir;
+ htxn->flags = txn->flags;
+
+ /* Pop a class stream metatable and affect it to the table. */
+ lua_rawgeti(L, LUA_REGISTRYINDEX, class_http_ref);
+ lua_setmetatable(L, -2);
+
+ return 1;
+}
+
+/* This function creates and returns an array containing the status-line
+ * elements. This function does not fails.
+ */
+__LJMP static int hlua_http_get_stline(lua_State *L, struct htx_sl *sl)
+{
+ /* Create the table. */
+ lua_newtable(L);
+
+ if (sl->flags & HTX_SL_F_IS_RESP) {
+ lua_pushstring(L, "version");
+ lua_pushlstring(L, HTX_SL_RES_VPTR(sl), HTX_SL_RES_VLEN(sl));
+ lua_settable(L, -3);
+ lua_pushstring(L, "code");
+ lua_pushlstring(L, HTX_SL_RES_CPTR(sl), HTX_SL_RES_CLEN(sl));
+ lua_settable(L, -3);
+ lua_pushstring(L, "reason");
+ lua_pushlstring(L, HTX_SL_RES_RPTR(sl), HTX_SL_RES_RLEN(sl));
+ lua_settable(L, -3);
+ }
+ else {
+ lua_pushstring(L, "method");
+ lua_pushlstring(L, HTX_SL_REQ_MPTR(sl), HTX_SL_REQ_MLEN(sl));
+ lua_settable(L, -3);
+ lua_pushstring(L, "uri");
+ lua_pushlstring(L, HTX_SL_REQ_UPTR(sl), HTX_SL_REQ_ULEN(sl));
+ lua_settable(L, -3);
+ lua_pushstring(L, "version");
+ lua_pushlstring(L, HTX_SL_REQ_VPTR(sl), HTX_SL_REQ_VLEN(sl));
+ lua_settable(L, -3);
+ }
+ return 1;
+}
+
+/* This function creates ans returns an array of HTTP headers.
+ * This function does not fails. It is used as wrapper with the
+ * 2 following functions.
+ */
+__LJMP static int hlua_http_get_headers(lua_State *L, struct http_msg *msg)
+{
+ struct htx *htx;
+ int32_t pos;
+
+ /* Create the table. */
+ lua_newtable(L);
+
+
+ htx = htxbuf(&msg->chn->buf);
+ for (pos = htx_get_first(htx); pos != -1; pos = htx_get_next(htx, pos)) {
+ struct htx_blk *blk = htx_get_blk(htx, pos);
+ enum htx_blk_type type = htx_get_blk_type(blk);
+ struct ist n, v;
+ int len;
+
+ if (type == HTX_BLK_HDR) {
+ n = htx_get_blk_name(htx,blk);
+ v = htx_get_blk_value(htx, blk);
+ }
+ else if (type == HTX_BLK_EOH)
+ break;
+ else
+ continue;
+
+ /* Check for existing entry:
+ * assume that the table is on the top of the stack, and
+ * push the key in the stack, the function lua_gettable()
+ * perform the lookup.
+ */
+ lua_pushlstring(L, n.ptr, n.len);
+ lua_gettable(L, -2);
+
+ switch (lua_type(L, -1)) {
+ case LUA_TNIL:
+ /* Table not found, create it. */
+ lua_pop(L, 1); /* remove the nil value. */
+ lua_pushlstring(L, n.ptr, n.len); /* push the header name as key. */
+ lua_newtable(L); /* create and push empty table. */
+ lua_pushlstring(L, v.ptr, v.len); /* push header value. */
+ lua_rawseti(L, -2, 0); /* index header value (pop it). */
+ lua_rawset(L, -3); /* index new table with header name (pop the values). */
+ break;
+
+ case LUA_TTABLE:
+ /* Entry found: push the value in the table. */
+ len = lua_rawlen(L, -1);
+ lua_pushlstring(L, v.ptr, v.len); /* push header value. */
+ lua_rawseti(L, -2, len+1); /* index header value (pop it). */
+ lua_pop(L, 1); /* remove the table (it is stored in the main table). */
+ break;
+
+ default:
+ /* Other cases are errors. */
+ hlua_pusherror(L, "internal error during the parsing of headers.");
+ WILL_LJMP(lua_error(L));
+ }
+ }
+ return 1;
+}
+
+__LJMP static int hlua_http_req_get_headers(lua_State *L)
+{
+ struct hlua_txn *htxn;
+
+ MAY_LJMP(check_args(L, 1, "req_get_headers"));
+ htxn = MAY_LJMP(hlua_checkhttp(L, 1));
+
+ if (htxn->dir != SMP_OPT_DIR_REQ || !IS_HTX_STRM(htxn->s))
+ WILL_LJMP(lua_error(L));
+
+ return hlua_http_get_headers(L, &htxn->s->txn->req);
+}
+
+__LJMP static int hlua_http_res_get_headers(lua_State *L)
+{
+ struct hlua_txn *htxn;
+
+ MAY_LJMP(check_args(L, 1, "res_get_headers"));
+ htxn = MAY_LJMP(hlua_checkhttp(L, 1));
+
+ if (htxn->dir != SMP_OPT_DIR_RES || !IS_HTX_STRM(htxn->s))
+ WILL_LJMP(lua_error(L));
+
+ return hlua_http_get_headers(L, &htxn->s->txn->rsp);
+}
+
+/* This function replace full header, or just a value in
+ * the request or in the response. It is a wrapper fir the
+ * 4 following functions.
+ */
+__LJMP static inline int hlua_http_rep_hdr(lua_State *L, struct http_msg *msg, int full)
+{
+ size_t name_len;
+ const char *name = MAY_LJMP(luaL_checklstring(L, 2, &name_len));
+ const char *reg = MAY_LJMP(luaL_checkstring(L, 3));
+ const char *value = MAY_LJMP(luaL_checkstring(L, 4));
+ struct htx *htx;
+ struct my_regex *re;
+
+ if (!(re = regex_comp(reg, 1, 1, NULL)))
+ WILL_LJMP(luaL_argerror(L, 3, "invalid regex"));
+
+ htx = htxbuf(&msg->chn->buf);
+ http_replace_hdrs(chn_strm(msg->chn), htx, ist2(name, name_len), value, re, full);
+ regex_free(re);
+ return 0;
+}
+
+__LJMP static int hlua_http_req_rep_hdr(lua_State *L)
+{
+ struct hlua_txn *htxn;
+
+ MAY_LJMP(check_args(L, 4, "req_rep_hdr"));
+ htxn = MAY_LJMP(hlua_checkhttp(L, 1));
+
+ if (htxn->dir != SMP_OPT_DIR_REQ || !IS_HTX_STRM(htxn->s))
+ WILL_LJMP(lua_error(L));
+
+ return MAY_LJMP(hlua_http_rep_hdr(L, &htxn->s->txn->req, 1));
+}
+
+__LJMP static int hlua_http_res_rep_hdr(lua_State *L)
+{
+ struct hlua_txn *htxn;
+
+ MAY_LJMP(check_args(L, 4, "res_rep_hdr"));
+ htxn = MAY_LJMP(hlua_checkhttp(L, 1));
+
+ if (htxn->dir != SMP_OPT_DIR_RES || !IS_HTX_STRM(htxn->s))
+ WILL_LJMP(lua_error(L));
+
+ return MAY_LJMP(hlua_http_rep_hdr(L, &htxn->s->txn->rsp, 1));
+}
+
+__LJMP static int hlua_http_req_rep_val(lua_State *L)
+{
+ struct hlua_txn *htxn;
+
+ MAY_LJMP(check_args(L, 4, "req_rep_hdr"));
+ htxn = MAY_LJMP(hlua_checkhttp(L, 1));
+
+ if (htxn->dir != SMP_OPT_DIR_REQ || !IS_HTX_STRM(htxn->s))
+ WILL_LJMP(lua_error(L));
+
+ return MAY_LJMP(hlua_http_rep_hdr(L, &htxn->s->txn->req, 0));
+}
+
+__LJMP static int hlua_http_res_rep_val(lua_State *L)
+{
+ struct hlua_txn *htxn;
+
+ MAY_LJMP(check_args(L, 4, "res_rep_val"));
+ htxn = MAY_LJMP(hlua_checkhttp(L, 1));
+
+ if (htxn->dir != SMP_OPT_DIR_RES || !IS_HTX_STRM(htxn->s))
+ WILL_LJMP(lua_error(L));
+
+ return MAY_LJMP(hlua_http_rep_hdr(L, &htxn->s->txn->rsp, 0));
+}
+
+/* This function deletes all the occurrences of an header.
+ * It is a wrapper for the 2 following functions.
+ */
+__LJMP static inline int hlua_http_del_hdr(lua_State *L, struct http_msg *msg)
+{
+ size_t len;
+ const char *name = MAY_LJMP(luaL_checklstring(L, 2, &len));
+ struct htx *htx = htxbuf(&msg->chn->buf);
+ struct http_hdr_ctx ctx;
+
+ ctx.blk = NULL;
+ while (http_find_header(htx, ist2(name, len), &ctx, 1))
+ http_remove_header(htx, &ctx);
+ return 0;
+}
+
+__LJMP static int hlua_http_req_del_hdr(lua_State *L)
+{
+ struct hlua_txn *htxn;
+
+ MAY_LJMP(check_args(L, 2, "req_del_hdr"));
+ htxn = MAY_LJMP(hlua_checkhttp(L, 1));
+
+ if (htxn->dir != SMP_OPT_DIR_REQ || !IS_HTX_STRM(htxn->s))
+ WILL_LJMP(lua_error(L));
+
+ return hlua_http_del_hdr(L, &htxn->s->txn->req);
+}
+
+__LJMP static int hlua_http_res_del_hdr(lua_State *L)
+{
+ struct hlua_txn *htxn;
+
+ MAY_LJMP(check_args(L, 2, "res_del_hdr"));
+ htxn = MAY_LJMP(hlua_checkhttp(L, 1));
+
+ if (htxn->dir != SMP_OPT_DIR_RES || !IS_HTX_STRM(htxn->s))
+ WILL_LJMP(lua_error(L));
+
+ return hlua_http_del_hdr(L, &htxn->s->txn->rsp);
+}
+
+/* This function adds an header. It is a wrapper used by
+ * the 2 following functions.
+ */
+__LJMP static inline int hlua_http_add_hdr(lua_State *L, struct http_msg *msg)
+{
+ size_t name_len;
+ const char *name = MAY_LJMP(luaL_checklstring(L, 2, &name_len));
+ size_t value_len;
+ const char *value = MAY_LJMP(luaL_checklstring(L, 3, &value_len));
+ struct htx *htx = htxbuf(&msg->chn->buf);
+
+ lua_pushboolean(L, http_add_header(htx, ist2(name, name_len),
+ ist2(value, value_len)));
+ return 0;
+}
+
+__LJMP static int hlua_http_req_add_hdr(lua_State *L)
+{
+ struct hlua_txn *htxn;
+
+ MAY_LJMP(check_args(L, 3, "req_add_hdr"));
+ htxn = MAY_LJMP(hlua_checkhttp(L, 1));
+
+ if (htxn->dir != SMP_OPT_DIR_REQ || !IS_HTX_STRM(htxn->s))
+ WILL_LJMP(lua_error(L));
+
+ return hlua_http_add_hdr(L, &htxn->s->txn->req);
+}
+
+__LJMP static int hlua_http_res_add_hdr(lua_State *L)
+{
+ struct hlua_txn *htxn;
+
+ MAY_LJMP(check_args(L, 3, "res_add_hdr"));
+ htxn = MAY_LJMP(hlua_checkhttp(L, 1));
+
+ if (htxn->dir != SMP_OPT_DIR_RES || !IS_HTX_STRM(htxn->s))
+ WILL_LJMP(lua_error(L));
+
+ return hlua_http_add_hdr(L, &htxn->s->txn->rsp);
+}
+
+static int hlua_http_req_set_hdr(lua_State *L)
+{
+ struct hlua_txn *htxn;
+
+ MAY_LJMP(check_args(L, 3, "req_set_hdr"));
+ htxn = MAY_LJMP(hlua_checkhttp(L, 1));
+
+ if (htxn->dir != SMP_OPT_DIR_REQ || !IS_HTX_STRM(htxn->s))
+ WILL_LJMP(lua_error(L));
+
+ hlua_http_del_hdr(L, &htxn->s->txn->req);
+ return hlua_http_add_hdr(L, &htxn->s->txn->req);
+}
+
+static int hlua_http_res_set_hdr(lua_State *L)
+{
+ struct hlua_txn *htxn;
+
+ MAY_LJMP(check_args(L, 3, "res_set_hdr"));
+ htxn = MAY_LJMP(hlua_checkhttp(L, 1));
+
+ if (htxn->dir != SMP_OPT_DIR_RES || !IS_HTX_STRM(htxn->s))
+ WILL_LJMP(lua_error(L));
+
+ hlua_http_del_hdr(L, &htxn->s->txn->rsp);
+ return hlua_http_add_hdr(L, &htxn->s->txn->rsp);
+}
+
+/* This function set the method. */
+static int hlua_http_req_set_meth(lua_State *L)
+{
+ struct hlua_txn *htxn = MAY_LJMP(hlua_checkhttp(L, 1));
+ size_t name_len;
+ const char *name = MAY_LJMP(luaL_checklstring(L, 2, &name_len));
+
+ if (htxn->dir != SMP_OPT_DIR_REQ || !IS_HTX_STRM(htxn->s))
+ WILL_LJMP(lua_error(L));
+
+ lua_pushboolean(L, http_req_replace_stline(0, name, name_len, htxn->p, htxn->s) != -1);
+ return 1;
+}
+
+/* This function set the method. */
+static int hlua_http_req_set_path(lua_State *L)
+{
+ struct hlua_txn *htxn = MAY_LJMP(hlua_checkhttp(L, 1));
+ size_t name_len;
+ const char *name = MAY_LJMP(luaL_checklstring(L, 2, &name_len));
+
+ if (htxn->dir != SMP_OPT_DIR_REQ || !IS_HTX_STRM(htxn->s))
+ WILL_LJMP(lua_error(L));
+
+ lua_pushboolean(L, http_req_replace_stline(1, name, name_len, htxn->p, htxn->s) != -1);
+ return 1;
+}
+
+/* This function set the query-string. */
+static int hlua_http_req_set_query(lua_State *L)
+{
+ struct hlua_txn *htxn = MAY_LJMP(hlua_checkhttp(L, 1));
+ size_t name_len;
+ const char *name = MAY_LJMP(luaL_checklstring(L, 2, &name_len));
+
+ if (htxn->dir != SMP_OPT_DIR_REQ || !IS_HTX_STRM(htxn->s))
+ WILL_LJMP(lua_error(L));
+
+ /* Check length. */
+ if (name_len > trash.size - 1) {
+ lua_pushboolean(L, 0);
+ return 1;
+ }
+
+ /* Add the mark question as prefix. */
+ chunk_reset(&trash);
+ trash.area[trash.data++] = '?';
+ memcpy(trash.area + trash.data, name, name_len);
+ trash.data += name_len;
+
+ lua_pushboolean(L,
+ http_req_replace_stline(2, trash.area, trash.data, htxn->p, htxn->s) != -1);
+ return 1;
+}
+
+/* This function set the uri. */
+static int hlua_http_req_set_uri(lua_State *L)
+{
+ struct hlua_txn *htxn = MAY_LJMP(hlua_checkhttp(L, 1));
+ size_t name_len;
+ const char *name = MAY_LJMP(luaL_checklstring(L, 2, &name_len));
+
+ if (htxn->dir != SMP_OPT_DIR_REQ || !IS_HTX_STRM(htxn->s))
+ WILL_LJMP(lua_error(L));
+
+ lua_pushboolean(L, http_req_replace_stline(3, name, name_len, htxn->p, htxn->s) != -1);
+ return 1;
+}
+
+/* This function set the response code & optionally reason. */
+static int hlua_http_res_set_status(lua_State *L)
+{
+ struct hlua_txn *htxn = MAY_LJMP(hlua_checkhttp(L, 1));
+ unsigned int code = MAY_LJMP(luaL_checkinteger(L, 2));
+ const char *str = MAY_LJMP(luaL_optlstring(L, 3, NULL, NULL));
+ const struct ist reason = ist2(str, (str ? strlen(str) : 0));
+
+ if (htxn->dir != SMP_OPT_DIR_RES || !IS_HTX_STRM(htxn->s))
+ WILL_LJMP(lua_error(L));
+
+ http_res_set_status(code, reason, htxn->s);
+ return 0;
+}
+
+/*
+ *
+ *
+ * Class HTTPMessage
+ *
+ *
+ */
+
+/* Returns a struct http_msg if the stack entry "ud" is a class HTTPMessage,
+ * otherwise it throws an error.
+ */
+__LJMP static struct http_msg *hlua_checkhttpmsg(lua_State *L, int ud)
+{
+ return MAY_LJMP(hlua_checkudata(L, ud, class_http_msg_ref));
+}
+
+/* Creates and pushes on the stack a HTTP object according with a current TXN.
+ */
+static int hlua_http_msg_new(lua_State *L, struct http_msg *msg)
+{
+ /* Check stack size. */
+ if (!lua_checkstack(L, 3))
+ return 0;
+
+ lua_newtable(L);
+ lua_pushlightuserdata(L, msg);
+ lua_rawseti(L, -2, 0);
+
+ /* Create the "channel" field that contains the request channel object. */
+ lua_pushstring(L, "channel");
+ if (!hlua_channel_new(L, msg->chn))
+ return 0;
+ lua_rawset(L, -3);
+
+ /* Pop a class stream metatable and affect it to the table. */
+ lua_rawgeti(L, LUA_REGISTRYINDEX, class_http_msg_ref);
+ lua_setmetatable(L, -2);
+
+ return 1;
+}
+
+/* Helper function returning a filter attached to the HTTP message at the
+ * position <ud> in the stack, filling the current offset and length of the
+ * filter. If no filter is attached, NULL is returned and <offset> and <len> are
+ * filled with output and input length respectively.
+ */
+static struct filter *hlua_http_msg_filter(lua_State *L, int ud, struct http_msg *msg, size_t *offset, size_t *len)
+{
+ struct channel *chn = msg->chn;
+ struct htx *htx = htxbuf(&chn->buf);
+ struct filter *filter = NULL;
+
+ *offset = co_data(msg->chn);
+ *len = htx->data - co_data(msg->chn);
+
+ if (lua_getfield(L, ud, "__filter") == LUA_TLIGHTUSERDATA) {
+ filter = lua_touserdata (L, -1);
+ if (msg->msg_state >= HTTP_MSG_DATA) {
+ struct hlua_flt_ctx *flt_ctx = filter->ctx;
+
+ *offset = flt_ctx->cur_off[CHN_IDX(chn)];
+ *len = flt_ctx->cur_len[CHN_IDX(chn)];
+ }
+ }
+
+ lua_pop(L, 1);
+ return filter;
+}
+
+/* Returns true if the channel attached to the HTTP message is the response
+ * channel.
+ */
+__LJMP static int hlua_http_msg_is_resp(lua_State *L)
+{
+ struct http_msg *msg;
+
+ MAY_LJMP(check_args(L, 1, "is_resp"));
+ msg = MAY_LJMP(hlua_checkhttpmsg(L, 1));
+
+ lua_pushboolean(L, !!(msg->chn->flags & CF_ISRESP));
+ return 1;
+}
+
+/* Returns an array containing the elements status-line of the HTTP message. It relies
+ * on hlua_http_get_stline().
+ */
+__LJMP static int hlua_http_msg_get_stline(lua_State *L)
+{
+ struct http_msg *msg;
+ struct htx *htx;
+ struct htx_sl *sl;
+
+ MAY_LJMP(check_args(L, 1, "get_stline"));
+ msg = MAY_LJMP(hlua_checkhttpmsg(L, 1));
+
+ if (msg->msg_state > HTTP_MSG_BODY)
+ WILL_LJMP(lua_error(L));
+
+ htx = htxbuf(&msg->chn->buf);
+ sl = http_get_stline(htx);
+ if (!sl)
+ return 0;
+ return hlua_http_get_stline(L, sl);
+}
+
+/* Returns an array containing all headers of the HTTP message. it relies on
+ * hlua_http_get_headers().
+ */
+__LJMP static int hlua_http_msg_get_headers(lua_State *L)
+{
+ struct http_msg *msg;
+
+ MAY_LJMP(check_args(L, 1, "get_headers"));
+ msg = MAY_LJMP(hlua_checkhttpmsg(L, 1));
+
+ if (msg->msg_state > HTTP_MSG_BODY)
+ WILL_LJMP(lua_error(L));
+
+ return hlua_http_get_headers(L, msg);
+}
+
+/* Deletes all occurrences of an header in the HTTP message matching on its
+ * name. It relies on hlua_http_del_hdr().
+ */
+__LJMP static int hlua_http_msg_del_hdr(lua_State *L)
+{
+ struct http_msg *msg;
+
+ MAY_LJMP(check_args(L, 2, "del_header"));
+ msg = MAY_LJMP(hlua_checkhttpmsg(L, 1));
+
+ if (msg->msg_state > HTTP_MSG_BODY)
+ WILL_LJMP(lua_error(L));
+
+ return hlua_http_del_hdr(L, msg);
+}
+
+/* Matches the full value line of all occurrences of an header in the HTTP
+ * message given its name against a regex and replaces it if it matches. It
+ * relies on hlua_http_rep_hdr().
+ */
+__LJMP static int hlua_http_msg_rep_hdr(lua_State *L)
+{
+ struct http_msg *msg;
+
+ MAY_LJMP(check_args(L, 4, "rep_header"));
+ msg = MAY_LJMP(hlua_checkhttpmsg(L, 1));
+
+ if (msg->msg_state > HTTP_MSG_BODY)
+ WILL_LJMP(lua_error(L));
+
+ return hlua_http_rep_hdr(L, msg, 1);
+}
+
+/* Matches all comma-separated values of all occurrences of an header in the HTTP
+ * message given its name against a regex and replaces it if it matches. It
+ * relies on hlua_http_rep_hdr().
+ */
+__LJMP static int hlua_http_msg_rep_val(lua_State *L)
+{
+ struct http_msg *msg;
+
+ MAY_LJMP(check_args(L, 4, "rep_value"));
+ msg = MAY_LJMP(hlua_checkhttpmsg(L, 1));
+
+ if (msg->msg_state > HTTP_MSG_BODY)
+ WILL_LJMP(lua_error(L));
+
+ return hlua_http_rep_hdr(L, msg, 0);
+}
+
+/* Add an header in the HTTP message. It relies on hlua_http_add_hdr() */
+__LJMP static int hlua_http_msg_add_hdr(lua_State *L)
+{
+ struct http_msg *msg;
+
+ MAY_LJMP(check_args(L, 3, "add_header"));
+ msg = MAY_LJMP(hlua_checkhttpmsg(L, 1));
+
+ if (msg->msg_state > HTTP_MSG_BODY)
+ WILL_LJMP(lua_error(L));
+
+ return hlua_http_add_hdr(L, msg);
+}
+
+/* Add an header in the HTTP message removing existing headers with the same
+ * name. It relies on hlua_http_del_hdr() and hlua_http_add_hdr().
+ */
+__LJMP static int hlua_http_msg_set_hdr(lua_State *L)
+{
+ struct http_msg *msg;
+
+ MAY_LJMP(check_args(L, 3, "set_header"));
+ msg = MAY_LJMP(hlua_checkhttpmsg(L, 1));
+
+ if (msg->msg_state > HTTP_MSG_BODY)
+ WILL_LJMP(lua_error(L));
+
+ hlua_http_del_hdr(L, msg);
+ return hlua_http_add_hdr(L, msg);
+}
+
+/* Rewrites the request method. It relies on http_req_replace_stline(). */
+__LJMP static int hlua_http_msg_set_meth(lua_State *L)
+{
+ struct stream *s;
+ struct http_msg *msg;
+ const char *name;
+ size_t name_len;
+
+ MAY_LJMP(check_args(L, 2, "set_method"));
+ msg = MAY_LJMP(hlua_checkhttpmsg(L, 1));
+ name = MAY_LJMP(luaL_checklstring(L, 2, &name_len));
+
+ if ((msg->chn->flags & CF_ISRESP) || msg->msg_state > HTTP_MSG_BODY)
+ WILL_LJMP(lua_error(L));
+
+ s = chn_strm(msg->chn);
+ lua_pushboolean(L, http_req_replace_stline(0, name, name_len, s->be, s) != -1);
+ return 1;
+}
+
+/* Rewrites the request path. It relies on http_req_replace_stline(). */
+__LJMP static int hlua_http_msg_set_path(lua_State *L)
+{
+ struct stream *s;
+ struct http_msg *msg;
+ const char *name;
+ size_t name_len;
+
+ MAY_LJMP(check_args(L, 2, "set_path"));
+ msg = MAY_LJMP(hlua_checkhttpmsg(L, 1));
+ name = MAY_LJMP(luaL_checklstring(L, 2, &name_len));
+
+ if ((msg->chn->flags & CF_ISRESP) || msg->msg_state > HTTP_MSG_BODY)
+ WILL_LJMP(lua_error(L));
+
+ s = chn_strm(msg->chn);
+ lua_pushboolean(L, http_req_replace_stline(1, name, name_len, s->be, s) != -1);
+ return 1;
+}
+
+/* Rewrites the request query-string. It relies on http_req_replace_stline(). */
+__LJMP static int hlua_http_msg_set_query(lua_State *L)
+{
+ struct stream *s;
+ struct http_msg *msg;
+ const char *name;
+ size_t name_len;
+
+ MAY_LJMP(check_args(L, 2, "set_query"));
+ msg = MAY_LJMP(hlua_checkhttpmsg(L, 1));
+ name = MAY_LJMP(luaL_checklstring(L, 2, &name_len));
+
+ if ((msg->chn->flags & CF_ISRESP) || msg->msg_state > HTTP_MSG_BODY)
+ WILL_LJMP(lua_error(L));
+
+ /* Check length. */
+ if (name_len > trash.size - 1) {
+ lua_pushboolean(L, 0);
+ return 1;
+ }
+
+ /* Add the mark question as prefix. */
+ chunk_reset(&trash);
+ trash.area[trash.data++] = '?';
+ memcpy(trash.area + trash.data, name, name_len);
+ trash.data += name_len;
+
+ s = chn_strm(msg->chn);
+ lua_pushboolean(L, http_req_replace_stline(2, trash.area, trash.data, s->be, s) != -1);
+ return 1;
+}
+
+/* Rewrites the request URI. It relies on http_req_replace_stline(). */
+__LJMP static int hlua_http_msg_set_uri(lua_State *L)
+{
+ struct stream *s;
+ struct http_msg *msg;
+ const char *name;
+ size_t name_len;
+
+ MAY_LJMP(check_args(L, 2, "set_uri"));
+ msg = MAY_LJMP(hlua_checkhttpmsg(L, 1));
+ name = MAY_LJMP(luaL_checklstring(L, 2, &name_len));
+
+ if ((msg->chn->flags & CF_ISRESP) || msg->msg_state > HTTP_MSG_BODY)
+ WILL_LJMP(lua_error(L));
+
+ s = chn_strm(msg->chn);
+ lua_pushboolean(L, http_req_replace_stline(3, name, name_len, s->be, s) != -1);
+ return 1;
+}
+
+/* Rewrites the response status code. It relies on http_res_set_status(). */
+__LJMP static int hlua_http_msg_set_status(lua_State *L)
+{
+ struct http_msg *msg;
+ unsigned int code;
+ const char *reason;
+ size_t reason_len;
+
+ msg = MAY_LJMP(hlua_checkhttpmsg(L, 1));
+ code = MAY_LJMP(luaL_checkinteger(L, 2));
+ reason = MAY_LJMP(luaL_optlstring(L, 3, NULL, &reason_len));
+
+ if (!(msg->chn->flags & CF_ISRESP) || msg->msg_state > HTTP_MSG_BODY)
+ WILL_LJMP(lua_error(L));
+
+ lua_pushboolean(L, http_res_set_status(code, ist2(reason, reason_len), chn_strm(msg->chn)) != -1);
+ return 1;
+}
+
+/* Returns true if the HTTP message is full. */
+__LJMP static int hlua_http_msg_is_full(lua_State *L)
+{
+ struct http_msg *msg;
+
+ MAY_LJMP(check_args(L, 1, "is_full"));
+ msg = MAY_LJMP(hlua_checkhttpmsg(L, 1));
+ lua_pushboolean(L, channel_full(msg->chn, 0));
+ return 1;
+}
+
+/* Returns true if the HTTP message may still receive data. */
+__LJMP static int hlua_http_msg_may_recv(lua_State *L)
+{
+ struct http_msg *msg;
+ struct htx *htx;
+
+ MAY_LJMP(check_args(L, 1, "may_recv"));
+ msg = MAY_LJMP(hlua_checkhttpmsg(L, 1));
+ htx = htxbuf(&msg->chn->buf);
+ lua_pushboolean(L, (htx_expect_more(htx) && !channel_input_closed(msg->chn) && channel_may_recv(msg->chn)));
+ return 1;
+}
+
+/* Returns true if the HTTP message EOM was received */
+__LJMP static int hlua_http_msg_is_eom(lua_State *L)
+{
+ struct http_msg *msg;
+ struct htx *htx;
+
+ MAY_LJMP(check_args(L, 1, "may_recv"));
+ msg = MAY_LJMP(hlua_checkhttpmsg(L, 1));
+ htx = htxbuf(&msg->chn->buf);
+ lua_pushboolean(L, !htx_expect_more(htx));
+ return 1;
+}
+
+/* Returns the number of bytes available in the input side of the HTTP
+ * message. This function never fails.
+ */
+__LJMP static int hlua_http_msg_get_in_len(lua_State *L)
+{
+ struct http_msg *msg;
+ size_t output, input;
+
+ MAY_LJMP(check_args(L, 1, "input"));
+ msg = MAY_LJMP(hlua_checkhttpmsg(L, 1));
+ hlua_http_msg_filter(L, 1, msg, &output, &input);
+ lua_pushinteger(L, input);
+ return 1;
+}
+
+/* Returns the number of bytes available in the output side of the HTTP
+ * message. This function never fails.
+ */
+__LJMP static int hlua_http_msg_get_out_len(lua_State *L)
+{
+ struct http_msg *msg;
+ size_t output, input;
+
+ MAY_LJMP(check_args(L, 1, "output"));
+ msg = MAY_LJMP(hlua_checkhttpmsg(L, 1));
+ hlua_http_msg_filter(L, 1, msg, &output, &input);
+ lua_pushinteger(L, output);
+ return 1;
+}
+
+/* Copies at most <len> bytes of DATA blocks from the HTTP message <msg>
+ * starting at the offset <offset> and put it in a string LUA variables. It
+ * returns the built string length. It stops on the first non-DATA HTX
+ * block. This function is called during the payload filtering, so the headers
+ * are already scheduled for output (from the filter point of view).
+ */
+static int _hlua_http_msg_dup(struct http_msg *msg, lua_State *L, size_t offset, size_t len)
+{
+ struct htx *htx = htxbuf(&msg->chn->buf);
+ struct htx_blk *blk;
+ struct htx_ret htxret;
+ luaL_Buffer b;
+ int ret = 0;
+
+ luaL_buffinit(L, &b);
+ htxret = htx_find_offset(htx, offset);
+ for (blk = htxret.blk, offset = htxret.ret; blk && len; blk = htx_get_next_blk(htx, blk)) {
+ enum htx_blk_type type = htx_get_blk_type(blk);
+ struct ist v;
+
+ switch (type) {
+ case HTX_BLK_UNUSED:
+ break;
+
+ case HTX_BLK_DATA:
+ v = htx_get_blk_value(htx, blk);
+ v = istadv(v, offset);
+ v = isttrim(v, len);
+
+ luaL_addlstring(&b, v.ptr, v.len);
+ ret += v.len;
+ break;
+
+ default:
+ if (!ret)
+ goto no_data;
+ goto end;
+ }
+ offset = 0;
+ }
+
+end:
+ if (!ret && (htx->flags & HTX_FL_EOM))
+ goto no_data;
+ luaL_pushresult(&b);
+ return ret;
+
+ no_data:
+ /* Remove the empty string and push nil on the stack */
+ lua_pop(L, 1);
+ lua_pushnil(L);
+ return 0;
+}
+
+/* Copies the string <str> to the HTTP message <msg> at the offset
+ * <offset>. This function returns -1 if data cannot be copied. Otherwise, it
+ * returns the amount of data written. This function is responsible to update
+ * the filter context.
+ */
+static int _hlua_http_msg_insert(struct http_msg *msg, struct filter *filter, struct ist str, size_t offset)
+{
+ struct htx *htx = htx_from_buf(&msg->chn->buf);
+ struct htx_ret htxret;
+ int /*max, */ret = 0;
+
+ /* Nothing to do, just return */
+ if (unlikely(istlen(str) == 0))
+ goto end;
+
+ if (istlen(str) > htx_free_data_space(htx)) {
+ ret = -1;
+ goto end;
+ }
+
+ htxret = htx_find_offset(htx, offset);
+ if (!htxret.blk || htx_get_blk_type(htxret.blk) != HTX_BLK_DATA) {
+ if (!htx_add_last_data(htx, str))
+ goto end;
+ }
+ else {
+ struct ist v = htx_get_blk_value(htx, htxret.blk);
+ v.ptr += htxret.ret;
+ v.len = 0;
+ if (!htx_replace_blk_value(htx, htxret.blk, v, str))
+ goto end;
+ }
+ ret = str.len;
+ if (ret) {
+ struct hlua_flt_ctx *flt_ctx = filter->ctx;
+ flt_update_offsets(filter, msg->chn, ret);
+ flt_ctx->cur_len[CHN_IDX(msg->chn)] += ret;
+ }
+
+ end:
+ htx_to_buf(htx, &msg->chn->buf);
+ return ret;
+}
+
+/* Helper function removing at most <len> bytes of DATA blocks at the absolute
+ * position <offset>. It stops on the first non-DATA HTX block. This function is
+ * called during the payload filtering, so the headers are already scheduled for
+ * output (from the filter point of view). This function is responsible to
+ * update the filter context.
+ */
+static void _hlua_http_msg_delete(struct http_msg *msg, struct filter *filter, size_t offset, size_t len)
+{
+ struct hlua_flt_ctx *flt_ctx = filter->ctx;
+ struct htx *htx = htx_from_buf(&msg->chn->buf);
+ struct htx_blk *blk;
+ struct htx_ret htxret;
+ size_t ret = 0;
+
+ /* Be sure <len> is always the amount of DATA to remove */
+ if (htx->data == offset+len && htx_get_tail_type(htx) == HTX_BLK_DATA) {
+ /* When htx tail type == HTX_BLK_DATA, no need to take care
+ * of special blocks like HTX_BLK_EOT.
+ * We simply truncate after offset
+ * (truncate targeted blk and discard the following ones)
+ */
+ htx_truncate(htx, offset);
+ ret = len;
+ goto end;
+ }
+
+ htxret = htx_find_offset(htx, offset);
+ blk = htxret.blk;
+ if (htxret.ret) {
+ /* dealing with offset: we need to trim targeted blk */
+ struct ist v;
+
+ if (htx_get_blk_type(blk) != HTX_BLK_DATA)
+ goto end;
+
+ v = htx_get_blk_value(htx, blk);
+ v = istadv(v, htxret.ret);
+
+ v = isttrim(v, len);
+ /* trimming data in blk: discard everything after the offset
+ * (replace 'v' with 'IST_NULL')
+ */
+ blk = htx_replace_blk_value(htx, blk, v, IST_NULL);
+ if (blk && v.len < len) {
+ /* In this case, caller wants to keep removing data,
+ * but we need to spare current blk
+ * because it was already trimmed
+ */
+ blk = htx_get_next_blk(htx, blk);
+ }
+ len -= v.len;
+ ret += v.len;
+ }
+
+
+ while (blk && len) {
+ /* there is more data that needs to be discarded */
+ enum htx_blk_type type = htx_get_blk_type(blk);
+ uint32_t sz = htx_get_blksz(blk);
+
+ switch (type) {
+ case HTX_BLK_UNUSED:
+ break;
+
+ case HTX_BLK_DATA:
+ if (len < sz) {
+ /* don't discard whole blk, only part of it
+ * (from the beginning)
+ */
+ htx_cut_data_blk(htx, blk, len);
+ ret += len;
+ goto end;
+ }
+ break;
+
+ default:
+ /* HTX_BLK_EOT blk won't be removed */
+ goto end;
+ }
+
+ /* Remove all the data block */
+ len -= sz;
+ ret += sz;
+ blk = htx_remove_blk(htx, blk);
+ }
+
+end:
+ flt_update_offsets(filter, msg->chn, -ret);
+ flt_ctx->cur_len[CHN_IDX(msg->chn)] -= ret;
+ /* WARNING: we don't call htx_to_buf() on purpose, because we don't want
+ * to loose the EOM flag if the message is empty.
+ */
+}
+
+/* Copies input data found in an HTTP message. Unlike the channel function used
+ * to duplicate raw data, this one can only be called inside a filter, from
+ * http_payload callback. So it cannot yield. An exception is returned if it is
+ * called from another callback. If nothing was copied, a nil value is pushed on
+ * the stack.
+ */
+__LJMP static int hlua_http_msg_get_body(lua_State *L)
+{
+ struct http_msg *msg;
+ struct filter *filter;
+ size_t output, input;
+ int offset, len;
+
+ if (lua_gettop(L) < 1 || lua_gettop(L) > 3)
+ WILL_LJMP(luaL_error(L, "'data' expects at most 2 arguments"));
+ msg = MAY_LJMP(hlua_checkhttpmsg(L, 1));
+
+ if (msg->msg_state < HTTP_MSG_DATA)
+ WILL_LJMP(lua_error(L));
+
+ filter = hlua_http_msg_filter(L, 1, msg, &output, &input);
+ if (!filter || !hlua_filter_from_payload(filter))
+ WILL_LJMP(lua_error(L));
+
+ if (!ci_data(msg->chn) && channel_input_closed(msg->chn)) {
+ lua_pushnil(L);
+ return 1;
+ }
+
+ offset = output;
+ if (lua_gettop(L) > 1) {
+ offset = MAY_LJMP(luaL_checkinteger(L, 2));
+ if (offset < 0)
+ offset = MAX(0, (int)input + offset);
+ offset += output;
+ if (offset < output || offset > input + output) {
+ lua_pushfstring(L, "offset out of range.");
+ WILL_LJMP(lua_error(L));
+ }
+ }
+ len = output + input - offset;
+ if (lua_gettop(L) == 3) {
+ len = MAY_LJMP(luaL_checkinteger(L, 3));
+ if (!len)
+ goto dup;
+ if (len == -1)
+ len = global.tune.bufsize;
+ if (len < 0) {
+ lua_pushfstring(L, "length out of range.");
+ WILL_LJMP(lua_error(L));
+ }
+ }
+
+ dup:
+ _hlua_http_msg_dup(msg, L, offset, len);
+ return 1;
+}
+
+/* Appends a string to the HTTP message, after all existing DATA blocks but
+ * before the trailers, if any. It returns the amount of data written or -1 if
+ * nothing was copied. Unlike the channel function used to append data, this one
+ * can only be called inside a filter, from http_payload callback. So it cannot
+ * yield. An exception is returned if it is called from another callback.
+ */
+__LJMP static int hlua_http_msg_append(lua_State *L)
+{
+ struct http_msg *msg;
+ struct filter *filter;
+ const char *str;
+ size_t offset, len, sz;
+ int ret;
+
+ MAY_LJMP(check_args(L, 2, "append"));
+ msg = MAY_LJMP(hlua_checkhttpmsg(L, 1));
+
+ if (msg->msg_state < HTTP_MSG_DATA)
+ WILL_LJMP(lua_error(L));
+
+ str = MAY_LJMP(luaL_checklstring(L, 2, &sz));
+ filter = hlua_http_msg_filter(L, 1, msg, &offset, &len);
+ if (!filter || !hlua_filter_from_payload(filter))
+ WILL_LJMP(lua_error(L));
+
+ ret = _hlua_http_msg_insert(msg, filter, ist2(str, sz), offset+len);
+ lua_pushinteger(L, ret);
+ return 1;
+}
+
+/* Prepends a string to the HTTP message, before all existing DATA blocks. It
+ * returns the amount of data written or -1 if nothing was copied. Unlike the
+ * channel function used to prepend data, this one can only be called inside a
+ * filter, from http_payload callback. So it cannot yield. An exception is
+ * returned if it is called from another callback.
+ */
+__LJMP static int hlua_http_msg_prepend(lua_State *L)
+{
+ struct http_msg *msg;
+ struct filter *filter;
+ const char *str;
+ size_t offset, len, sz;
+ int ret;
+
+ MAY_LJMP(check_args(L, 2, "prepend"));
+ msg = MAY_LJMP(hlua_checkhttpmsg(L, 1));
+
+ if (msg->msg_state < HTTP_MSG_DATA)
+ WILL_LJMP(lua_error(L));
+
+ str = MAY_LJMP(luaL_checklstring(L, 2, &sz));
+ filter = hlua_http_msg_filter(L, 1, msg, &offset, &len);
+ if (!filter || !hlua_filter_from_payload(filter))
+ WILL_LJMP(lua_error(L));
+
+ ret = _hlua_http_msg_insert(msg, filter, ist2(str, sz), offset);
+ lua_pushinteger(L, ret);
+ return 1;
+}
+
+/* Inserts a string to the HTTP message at a given offset. By default the string
+ * is appended at the end of DATA blocks. It returns the amount of data written
+ * or -1 if nothing was copied. Unlike the channel function used to insert data,
+ * this one can only be called inside a filter, from http_payload callback. So
+ * it cannot yield. An exception is returned if it is called from another
+ * callback.
+ */
+__LJMP static int hlua_http_msg_insert_data(lua_State *L)
+{
+ struct http_msg *msg;
+ struct filter *filter;
+ const char *str;
+ size_t input, output, sz;
+ int offset;
+ int ret;
+
+ if (lua_gettop(L) < 2 || lua_gettop(L) > 3)
+ WILL_LJMP(luaL_error(L, "'insert' expects at least 1 argument and at most 2 arguments"));
+ msg = MAY_LJMP(hlua_checkhttpmsg(L, 1));
+
+ if (msg->msg_state < HTTP_MSG_DATA)
+ WILL_LJMP(lua_error(L));
+
+ str = MAY_LJMP(luaL_checklstring(L, 2, &sz));
+ filter = hlua_http_msg_filter(L, 1, msg, &output, &input);
+ if (!filter || !hlua_filter_from_payload(filter))
+ WILL_LJMP(lua_error(L));
+
+ offset = output;
+ if (lua_gettop(L) > 2) {
+ offset = MAY_LJMP(luaL_checkinteger(L, 3));
+ if (offset < 0)
+ offset = MAX(0, (int)input + offset);
+ offset += output;
+ if (offset > output + input) {
+ lua_pushfstring(L, "offset out of range.");
+ WILL_LJMP(lua_error(L));
+ }
+ }
+
+ ret = _hlua_http_msg_insert(msg, filter, ist2(str, sz), offset);
+ lua_pushinteger(L, ret);
+ return 1;
+}
+
+/* Removes a given amount of data from the HTTP message at a given offset. By
+ * default all DATA blocks are removed. It returns the amount of data
+ * removed. Unlike the channel function used to remove data, this one can only
+ * be called inside a filter, from http_payload callback. So it cannot yield. An
+ * exception is returned if it is called from another callback.
+ */
+__LJMP static int hlua_http_msg_del_data(lua_State *L)
+{
+ struct http_msg *msg;
+ struct filter *filter;
+ size_t input, output;
+ int offset, len;
+
+ if (lua_gettop(L) < 1 || lua_gettop(L) > 3)
+ WILL_LJMP(luaL_error(L, "'remove' expects at most 2 arguments"));
+ msg = MAY_LJMP(hlua_checkhttpmsg(L, 1));
+
+ if (msg->msg_state < HTTP_MSG_DATA)
+ WILL_LJMP(lua_error(L));
+
+ filter = hlua_http_msg_filter(L, 1, msg, &output, &input);
+ if (!filter || !hlua_filter_from_payload(filter))
+ WILL_LJMP(lua_error(L));
+
+ offset = output;
+ if (lua_gettop(L) > 1) {
+ offset = MAY_LJMP(luaL_checkinteger(L, 2));
+ if (offset < 0)
+ offset = MAX(0, (int)input + offset);
+ offset += output;
+ if (offset > output + input) {
+ lua_pushfstring(L, "offset out of range.");
+ WILL_LJMP(lua_error(L));
+ }
+ }
+
+ len = output + input - offset;
+ if (lua_gettop(L) == 3) {
+ len = MAY_LJMP(luaL_checkinteger(L, 3));
+ if (!len)
+ goto end;
+ if (len == -1)
+ len = output + input - offset;
+ if (len < 0 || offset + len > output + input) {
+ lua_pushfstring(L, "length out of range.");
+ WILL_LJMP(lua_error(L));
+ }
+ }
+
+ _hlua_http_msg_delete(msg, filter, offset, len);
+
+ end:
+ lua_pushinteger(L, len);
+ return 1;
+}
+
+/* Replaces a given amount of data at the given offset by a string. By default,
+ * all remaining data are removed, accordingly to the filter context. It returns
+ * the amount of data written or -1 if nothing was copied. Unlike the channel
+ * function used to replace data, this one can only be called inside a filter,
+ * from http_payload callback. So it cannot yield. An exception is returned if
+ * it is called from another callback.
+ */
+__LJMP static int hlua_http_msg_set_data(lua_State *L)
+{
+ struct http_msg *msg;
+ struct filter *filter;
+ struct htx *htx;
+ const char *str;
+ size_t input, output, sz;
+ int offset, len;
+ int ret;
+
+ if (lua_gettop(L) < 2 || lua_gettop(L) > 4)
+ WILL_LJMP(luaL_error(L, "'set' expects at least 1 argument and at most 3 arguments"));
+ msg = MAY_LJMP(hlua_checkhttpmsg(L, 1));
+
+ if (msg->msg_state < HTTP_MSG_DATA)
+ WILL_LJMP(lua_error(L));
+
+ str = MAY_LJMP(luaL_checklstring(L, 2, &sz));
+ filter = hlua_http_msg_filter(L, 1, msg, &output, &input);
+ if (!filter || !hlua_filter_from_payload(filter))
+ WILL_LJMP(lua_error(L));
+
+ offset = output;
+ if (lua_gettop(L) > 2) {
+ offset = MAY_LJMP(luaL_checkinteger(L, 3));
+ if (offset < 0)
+ offset = MAX(0, (int)input + offset);
+ offset += output;
+ if (offset < output || offset > input + output) {
+ lua_pushfstring(L, "offset out of range.");
+ WILL_LJMP(lua_error(L));
+ }
+ }
+
+ len = output + input - offset;
+ if (lua_gettop(L) == 4) {
+ len = MAY_LJMP(luaL_checkinteger(L, 4));
+ if (!len)
+ goto set;
+ if (len == -1)
+ len = output + input - offset;
+ if (len < 0 || offset + len > output + input) {
+ lua_pushfstring(L, "length out of range.");
+ WILL_LJMP(lua_error(L));
+ }
+ }
+
+ set:
+ /* Be sure we can copied the string once input data will be removed. */
+ htx = htx_from_buf(&msg->chn->buf);
+ if (sz > htx_free_data_space(htx) + len)
+ lua_pushinteger(L, -1);
+ else {
+ _hlua_http_msg_delete(msg, filter, offset, len);
+ ret = _hlua_http_msg_insert(msg, filter, ist2(str, sz), offset);
+ lua_pushinteger(L, ret);
+ }
+ return 1;
+}
+
+/* Prepends data into an HTTP message and forward it, from the filter point of
+ * view. It returns the amount of data written or -1 if nothing was sent. Unlike
+ * the channel function used to send data, this one can only be called inside a
+ * filter, from http_payload callback. So it cannot yield. An exception is
+ * returned if it is called from another callback.
+ */
+__LJMP static int hlua_http_msg_send(lua_State *L)
+{
+ struct http_msg *msg;
+ struct filter *filter;
+ struct htx *htx;
+ const char *str;
+ size_t offset, len, sz;
+ int ret;
+
+ MAY_LJMP(check_args(L, 2, "send"));
+ msg = MAY_LJMP(hlua_checkhttpmsg(L, 1));
+
+ if (msg->msg_state < HTTP_MSG_DATA)
+ WILL_LJMP(lua_error(L));
+
+ str = MAY_LJMP(luaL_checklstring(L, 2, &sz));
+ filter = hlua_http_msg_filter(L, 1, msg, &offset, &len);
+ if (!filter || !hlua_filter_from_payload(filter))
+ WILL_LJMP(lua_error(L));
+
+ /* Return an error if the channel's output is closed */
+ if (unlikely(channel_output_closed(msg->chn))) {
+ lua_pushinteger(L, -1);
+ return 1;
+ }
+
+ htx = htx_from_buf(&msg->chn->buf);
+ if (sz > htx_free_data_space(htx)) {
+ lua_pushinteger(L, -1);
+ return 1;
+ }
+
+ ret = _hlua_http_msg_insert(msg, filter, ist2(str, sz), offset);
+ if (ret > 0) {
+ struct hlua_flt_ctx *flt_ctx = filter->ctx;
+
+ FLT_OFF(filter, msg->chn) += ret;
+ flt_ctx->cur_len[CHN_IDX(msg->chn)] -= ret;
+ flt_ctx->cur_off[CHN_IDX(msg->chn)] += ret;
+ }
+
+ lua_pushinteger(L, ret);
+ return 1;
+}
+
+/* Forwards a given amount of bytes. It return -1 if the channel's output is
+ * closed. Otherwise, it returns the number of bytes forwarded. Unlike the
+ * channel function used to forward data, this one can only be called inside a
+ * filter, from http_payload callback. So it cannot yield. An exception is
+ * returned if it is called from another callback. All other functions deal with
+ * DATA block, this one not.
+*/
+__LJMP static int hlua_http_msg_forward(lua_State *L)
+{
+ struct http_msg *msg;
+ struct filter *filter;
+ size_t offset, len;
+ int fwd, ret = 0;
+
+ MAY_LJMP(check_args(L, 2, "forward"));
+ msg = MAY_LJMP(hlua_checkhttpmsg(L, 1));
+
+ if (msg->msg_state < HTTP_MSG_DATA)
+ WILL_LJMP(lua_error(L));
+
+ fwd = MAY_LJMP(luaL_checkinteger(L, 2));
+ filter = hlua_http_msg_filter(L, 1, msg, &offset, &len);
+ if (!filter || !hlua_filter_from_payload(filter))
+ WILL_LJMP(lua_error(L));
+
+ /* Nothing to do, just return */
+ if (!fwd)
+ goto end;
+
+ /* Return an error if the channel's output is closed */
+ if (unlikely(channel_output_closed(msg->chn))) {
+ ret = -1;
+ goto end;
+ }
+
+ ret = fwd;
+ if (ret > len)
+ ret = len;
+
+ if (ret) {
+ struct hlua_flt_ctx *flt_ctx = filter->ctx;
+
+ FLT_OFF(filter, msg->chn) += ret;
+ flt_ctx->cur_off[CHN_IDX(msg->chn)] += ret;
+ flt_ctx->cur_len[CHN_IDX(msg->chn)] -= ret;
+ }
+
+ end:
+ lua_pushinteger(L, ret);
+ return 1;
+}
+
+/* Set EOM flag on the HTX message.
+ *
+ * NOTE: Not sure it is a good idea to manipulate this flag but for now I don't
+ * really know how to do without this feature.
+ */
+__LJMP static int hlua_http_msg_set_eom(lua_State *L)
+{
+ struct http_msg *msg;
+ struct htx *htx;
+
+ MAY_LJMP(check_args(L, 1, "set_eom"));
+ msg = MAY_LJMP(hlua_checkhttpmsg(L, 1));
+ htx = htxbuf(&msg->chn->buf);
+ htx->flags |= HTX_FL_EOM;
+ return 0;
+}
+
+/* Unset EOM flag on the HTX message.
+ *
+ * NOTE: Not sure it is a good idea to manipulate this flag but for now I don't
+ * really know how to do without this feature.
+ */
+__LJMP static int hlua_http_msg_unset_eom(lua_State *L)
+{
+ struct http_msg *msg;
+ struct htx *htx;
+
+ MAY_LJMP(check_args(L, 1, "set_eom"));
+ msg = MAY_LJMP(hlua_checkhttpmsg(L, 1));
+ htx = htxbuf(&msg->chn->buf);
+ htx->flags &= ~HTX_FL_EOM;
+ return 0;
+}
+
+/*
+ *
+ *
+ * Class HTTPClient
+ *
+ *
+ */
+__LJMP static struct hlua_httpclient *hlua_checkhttpclient(lua_State *L, int ud)
+{
+ return MAY_LJMP(hlua_checkudata(L, ud, class_httpclient_ref));
+}
+
+
+/* stops the httpclient and ask it to kill itself */
+__LJMP static int hlua_httpclient_gc(lua_State *L)
+{
+ struct hlua_httpclient *hlua_hc;
+
+ MAY_LJMP(check_args(L, 1, "__gc"));
+
+ hlua_hc = MAY_LJMP(hlua_checkhttpclient(L, 1));
+
+ if (MT_LIST_DELETE(&hlua_hc->by_hlua)) {
+ /* we won the race against hlua_httpclient_destroy_all() */
+ httpclient_stop_and_destroy(hlua_hc->hc);
+ hlua_hc->hc = NULL;
+ }
+
+ return 0;
+}
+
+
+__LJMP static int hlua_httpclient_new(lua_State *L)
+{
+ struct hlua_httpclient *hlua_hc;
+ struct hlua *hlua;
+
+ /* Get hlua struct, or NULL if we execute from main lua state */
+ hlua = hlua_gethlua(L);
+ if (!hlua)
+ return 0;
+
+ /* Check stack size. */
+ if (!lua_checkstack(L, 3)) {
+ hlua_pusherror(L, "httpclient: full stack");
+ goto err;
+ }
+ /* Create the object: obj[0] = userdata. */
+ lua_newtable(L);
+ hlua_hc = MAY_LJMP(lua_newuserdata(L, sizeof(*hlua_hc)));
+ lua_rawseti(L, -2, 0);
+ memset(hlua_hc, 0, sizeof(*hlua_hc));
+
+ hlua_hc->hc = httpclient_new(hlua, 0, IST_NULL);
+ if (!hlua_hc->hc)
+ goto err;
+
+ MT_LIST_APPEND(&hlua->hc_list, &hlua_hc->by_hlua);
+
+ /* Pop a class stream metatable and affect it to the userdata. */
+ lua_rawgeti(L, LUA_REGISTRYINDEX, class_httpclient_ref);
+ lua_setmetatable(L, -2);
+
+ return 1;
+
+ err:
+ WILL_LJMP(lua_error(L));
+ return 0;
+}
+
+
+/*
+ * Callback of the httpclient, this callback wakes the lua task up, once the
+ * httpclient receives some data
+ *
+ */
+
+static void hlua_httpclient_cb(struct httpclient *hc)
+{
+ struct hlua *hlua = hc->caller;
+
+ if (!hlua || !hlua->task)
+ return;
+
+ task_wakeup(hlua->task, TASK_WOKEN_MSG);
+}
+
+/*
+ * Fill the lua stack with headers from the httpclient response
+ * This works the same way as the hlua_http_get_headers() function
+ */
+__LJMP static int hlua_httpclient_get_headers(lua_State *L, struct hlua_httpclient *hlua_hc)
+{
+ struct http_hdr *hdr;
+
+ lua_newtable(L);
+
+ for (hdr = hlua_hc->hc->res.hdrs; hdr && isttest(hdr->n); hdr++) {
+ struct ist n, v;
+ int len;
+
+ n = hdr->n;
+ v = hdr->v;
+
+ /* Check for existing entry:
+ * assume that the table is on the top of the stack, and
+ * push the key in the stack, the function lua_gettable()
+ * perform the lookup.
+ */
+
+ lua_pushlstring(L, n.ptr, n.len);
+ lua_gettable(L, -2);
+
+ switch (lua_type(L, -1)) {
+ case LUA_TNIL:
+ /* Table not found, create it. */
+ lua_pop(L, 1); /* remove the nil value. */
+ lua_pushlstring(L, n.ptr, n.len); /* push the header name as key. */
+ lua_newtable(L); /* create and push empty table. */
+ lua_pushlstring(L, v.ptr, v.len); /* push header value. */
+ lua_rawseti(L, -2, 0); /* index header value (pop it). */
+ lua_rawset(L, -3); /* index new table with header name (pop the values). */
+ break;
+
+ case LUA_TTABLE:
+ /* Entry found: push the value in the table. */
+ len = lua_rawlen(L, -1);
+ lua_pushlstring(L, v.ptr, v.len); /* push header value. */
+ lua_rawseti(L, -2, len+1); /* index header value (pop it). */
+ lua_pop(L, 1); /* remove the table (it is stored in the main table). */
+ break;
+
+ default:
+ /* Other cases are errors. */
+ hlua_pusherror(L, "internal error during the parsing of headers.");
+ WILL_LJMP(lua_error(L));
+ }
+ }
+ return 1;
+}
+
+/*
+ * Allocate and return an array of http_hdr ist extracted from the <headers> lua table
+ *
+ * Caller must free the result
+ */
+struct http_hdr *hlua_httpclient_table_to_hdrs(lua_State *L)
+{
+ struct http_hdr hdrs[global.tune.max_http_hdr];
+ struct http_hdr *result = NULL;
+ uint32_t hdr_num = 0;
+
+ lua_pushnil(L);
+ while (lua_next(L, -2) != 0) {
+ struct ist name, value;
+ const char *n, *v;
+ size_t nlen, vlen;
+
+ if (!lua_isstring(L, -2) || !lua_istable(L, -1)) {
+ /* Skip element if the key is not a string or if the value is not a table */
+ goto next_hdr;
+ }
+
+ n = lua_tolstring(L, -2, &nlen);
+ name = ist2(n, nlen);
+
+ /* Loop on header's values */
+ lua_pushnil(L);
+ while (lua_next(L, -2)) {
+ if (!lua_isstring(L, -1)) {
+ /* Skip the value if it is not a string */
+ goto next_value;
+ }
+
+ v = lua_tolstring(L, -1, &vlen);
+ value = ist2(v, vlen);
+ name = ist2(n, nlen);
+
+ hdrs[hdr_num].n = istdup(name);
+ hdrs[hdr_num].v = istdup(value);
+
+ hdr_num++;
+
+ next_value:
+ lua_pop(L, 1);
+ }
+
+ next_hdr:
+ lua_pop(L, 1);
+
+ }
+
+ if (hdr_num) {
+ /* alloc and copy the headers in the httpclient struct */
+ result = calloc((hdr_num + 1), sizeof(*result));
+ if (!result)
+ goto skip_headers;
+ memcpy(result, hdrs, sizeof(struct http_hdr) * (hdr_num + 1));
+
+ result[hdr_num].n = IST_NULL;
+ result[hdr_num].v = IST_NULL;
+ }
+
+skip_headers:
+
+ return result;
+}
+
+
+/*
+ * For each yield, checks if there is some data in the httpclient and push them
+ * in the lua buffer, once the httpclient finished its job, push the result on
+ * the stack
+ */
+__LJMP static int hlua_httpclient_rcv_yield(lua_State *L, int status, lua_KContext ctx)
+{
+ struct buffer *tr;
+ int res;
+ struct hlua *hlua = hlua_gethlua(L);
+ struct hlua_httpclient *hlua_hc = hlua_checkhttpclient(L, 1);
+
+
+ tr = get_trash_chunk();
+
+ res = httpclient_res_xfer(hlua_hc->hc, tr);
+ luaL_addlstring(&hlua_hc->b, b_orig(tr), res);
+
+ if (!httpclient_data(hlua_hc->hc) && httpclient_ended(hlua_hc->hc)) {
+
+ luaL_pushresult(&hlua_hc->b);
+ lua_settable(L, -3);
+
+ lua_pushstring(L, "status");
+ lua_pushinteger(L, hlua_hc->hc->res.status);
+ lua_settable(L, -3);
+
+
+ lua_pushstring(L, "reason");
+ lua_pushlstring(L, hlua_hc->hc->res.reason.ptr, hlua_hc->hc->res.reason.len);
+ lua_settable(L, -3);
+
+ lua_pushstring(L, "headers");
+ hlua_httpclient_get_headers(L, hlua_hc);
+ lua_settable(L, -3);
+
+ return 1;
+ }
+
+ if (httpclient_data(hlua_hc->hc))
+ task_wakeup(hlua->task, TASK_WOKEN_MSG);
+
+ MAY_LJMP(hlua_yieldk(L, 0, 0, hlua_httpclient_rcv_yield, TICK_ETERNITY, 0));
+
+ return 0;
+}
+
+/*
+ * Call this when trying to stream a body during a request
+ */
+__LJMP static int hlua_httpclient_snd_yield(lua_State *L, int status, lua_KContext ctx)
+{
+ struct hlua *hlua;
+ struct hlua_httpclient *hlua_hc = hlua_checkhttpclient(L, 1);
+ const char *body_str = NULL;
+ int ret;
+ int end = 0;
+ size_t buf_len;
+ size_t to_send = 0;
+
+ hlua = hlua_gethlua(L);
+
+ if (!hlua || !hlua->task)
+ WILL_LJMP(luaL_error(L, "The 'get' function is only allowed in "
+ "'frontend', 'backend' or 'task'"));
+
+ ret = lua_getfield(L, -1, "body");
+ if (ret != LUA_TSTRING)
+ goto rcv;
+
+ body_str = lua_tolstring(L, -1, &buf_len);
+ lua_pop(L, 1);
+
+ to_send = buf_len - hlua_hc->sent;
+
+ if ((hlua_hc->sent + to_send) >= buf_len)
+ end = 1;
+
+ /* the end flag is always set since we are using the whole remaining size */
+ hlua_hc->sent += httpclient_req_xfer(hlua_hc->hc, ist2(body_str + hlua_hc->sent, to_send), end);
+
+ if (buf_len > hlua_hc->sent) {
+ /* still need to process the buffer */
+ MAY_LJMP(hlua_yieldk(L, 0, 0, hlua_httpclient_snd_yield, TICK_ETERNITY, 0));
+ } else {
+ goto rcv;
+ /* we sent the whole request buffer we can recv */
+ }
+ return 0;
+
+rcv:
+
+ /* we return a "res" object */
+ lua_newtable(L);
+
+ lua_pushstring(L, "body");
+ luaL_buffinit(L, &hlua_hc->b);
+
+ task_wakeup(hlua->task, TASK_WOKEN_MSG);
+ MAY_LJMP(hlua_yieldk(L, 0, 0, hlua_httpclient_rcv_yield, TICK_ETERNITY, 0));
+
+ return 1;
+}
+
+/*
+ * Send an HTTP request and wait for a response
+ */
+
+__LJMP static int hlua_httpclient_send(lua_State *L, enum http_meth_t meth)
+{
+ struct hlua_httpclient *hlua_hc;
+ struct http_hdr *hdrs = NULL;
+ struct http_hdr *hdrs_i = NULL;
+ struct hlua *hlua;
+ const char *url_str = NULL;
+ const char *body_str = NULL;
+ size_t buf_len = 0;
+ int ret;
+
+ hlua = hlua_gethlua(L);
+
+ if (!hlua || !hlua->task)
+ WILL_LJMP(luaL_error(L, "The 'get' function is only allowed in "
+ "'frontend', 'backend' or 'task'"));
+
+ if (lua_gettop(L) != 2 || lua_type(L, -1) != LUA_TTABLE)
+ WILL_LJMP(luaL_error(L, "'get' needs a table as argument"));
+
+ hlua_hc = hlua_checkhttpclient(L, 1);
+
+ lua_pushnil(L); /* first key */
+ while (lua_next(L, 2)) {
+ if (strcmp(lua_tostring(L, -2), "dst") == 0) {
+ if (httpclient_set_dst(hlua_hc->hc, lua_tostring(L, -1)) < 0)
+ WILL_LJMP(luaL_error(L, "Can't use the 'dst' argument"));
+
+ } else if (strcmp(lua_tostring(L, -2), "url") == 0) {
+ if (lua_type(L, -1) != LUA_TSTRING)
+ WILL_LJMP(luaL_error(L, "invalid parameter in 'url', must be a string"));
+ url_str = lua_tostring(L, -1);
+
+ } else if (strcmp(lua_tostring(L, -2), "timeout") == 0) {
+ if (lua_type(L, -1) != LUA_TNUMBER)
+ WILL_LJMP(luaL_error(L, "invalid parameter in 'timeout', must be a number"));
+ httpclient_set_timeout(hlua_hc->hc, lua_tointeger(L, -1));
+
+ } else if (strcmp(lua_tostring(L, -2), "headers") == 0) {
+ if (lua_type(L, -1) != LUA_TTABLE)
+ WILL_LJMP(luaL_error(L, "invalid parameter in 'headers', must be a table"));
+ hdrs = hlua_httpclient_table_to_hdrs(L);
+
+ } else if (strcmp(lua_tostring(L, -2), "body") == 0) {
+ if (lua_type(L, -1) != LUA_TSTRING)
+ WILL_LJMP(luaL_error(L, "invalid parameter in 'body', must be a string"));
+ body_str = lua_tolstring(L, -1, &buf_len);
+
+ } else {
+ WILL_LJMP(luaL_error(L, "'%s' invalid parameter name", lua_tostring(L, -2)));
+ }
+ /* removes 'value'; keeps 'key' for next iteration */
+ lua_pop(L, 1);
+ }
+
+ if (!url_str) {
+ WILL_LJMP(luaL_error(L, "'get' need a 'url' argument"));
+ return 0;
+ }
+
+ hlua_hc->sent = 0;
+
+ istfree(&hlua_hc->hc->req.url);
+ hlua_hc->hc->req.url = istdup(ist(url_str));
+ hlua_hc->hc->req.meth = meth;
+
+ /* update the httpclient callbacks */
+ hlua_hc->hc->ops.res_stline = hlua_httpclient_cb;
+ hlua_hc->hc->ops.res_headers = hlua_httpclient_cb;
+ hlua_hc->hc->ops.res_payload = hlua_httpclient_cb;
+ hlua_hc->hc->ops.res_end = hlua_httpclient_cb;
+
+ /* a body is available, it will use the request callback */
+ if (body_str && buf_len) {
+ hlua_hc->hc->ops.req_payload = hlua_httpclient_cb;
+ }
+
+ ret = httpclient_req_gen(hlua_hc->hc, hlua_hc->hc->req.url, meth, hdrs, IST_NULL);
+
+ /* free the temporary headers array */
+ hdrs_i = hdrs;
+ while (hdrs_i && isttest(hdrs_i->n)) {
+ istfree(&hdrs_i->n);
+ istfree(&hdrs_i->v);
+ hdrs_i++;
+ }
+ ha_free(&hdrs);
+
+
+ if (ret != ERR_NONE) {
+ WILL_LJMP(luaL_error(L, "Can't generate the HTTP request"));
+ return 0;
+ }
+
+ if (!httpclient_start(hlua_hc->hc))
+ WILL_LJMP(luaL_error(L, "couldn't start the httpclient"));
+
+ MAY_LJMP(hlua_yieldk(L, 0, 0, hlua_httpclient_snd_yield, TICK_ETERNITY, 0));
+
+ return 0;
+}
+
+/*
+ * Sends an HTTP HEAD request and wait for a response
+ *
+ * httpclient:head(url, headers, payload)
+ */
+__LJMP static int hlua_httpclient_head(lua_State *L)
+{
+ return hlua_httpclient_send(L, HTTP_METH_HEAD);
+}
+
+/*
+ * Send an HTTP GET request and wait for a response
+ *
+ * httpclient:get(url, headers, payload)
+ */
+__LJMP static int hlua_httpclient_get(lua_State *L)
+{
+ return hlua_httpclient_send(L, HTTP_METH_GET);
+
+}
+
+/*
+ * Sends an HTTP PUT request and wait for a response
+ *
+ * httpclient:put(url, headers, payload)
+ */
+__LJMP static int hlua_httpclient_put(lua_State *L)
+{
+ return hlua_httpclient_send(L, HTTP_METH_PUT);
+}
+
+/*
+ * Send an HTTP POST request and wait for a response
+ *
+ * httpclient:post(url, headers, payload)
+ */
+__LJMP static int hlua_httpclient_post(lua_State *L)
+{
+ return hlua_httpclient_send(L, HTTP_METH_POST);
+}
+
+
+/*
+ * Sends an HTTP DELETE request and wait for a response
+ *
+ * httpclient:delete(url, headers, payload)
+ */
+__LJMP static int hlua_httpclient_delete(lua_State *L)
+{
+ return hlua_httpclient_send(L, HTTP_METH_DELETE);
+}
+
+/*
+ *
+ *
+ * Class TXN
+ *
+ *
+ */
+
+/* Returns a struct hlua_session if the stack entry "ud" is
+ * a class stream, otherwise it throws an error.
+ */
+__LJMP static struct hlua_txn *hlua_checktxn(lua_State *L, int ud)
+{
+ return MAY_LJMP(hlua_checkudata(L, ud, class_txn_ref));
+}
+
+__LJMP static int hlua_set_var(lua_State *L)
+{
+ struct hlua_txn *htxn;
+ const char *name;
+ size_t len;
+ struct sample smp;
+
+ if (lua_gettop(L) < 3 || lua_gettop(L) > 4)
+ WILL_LJMP(luaL_error(L, "'set_var' needs between 3 and 4 arguments"));
+
+ /* It is useles to retrieve the stream, but this function
+ * runs only in a stream context.
+ */
+ htxn = MAY_LJMP(hlua_checktxn(L, 1));
+ name = MAY_LJMP(luaL_checklstring(L, 2, &len));
+
+ /* Converts the third argument in a sample. */
+ memset(&smp, 0, sizeof(smp));
+ hlua_lua2smp(L, 3, &smp);
+
+ /* Store the sample in a variable. We don't need to dup the smp, vars API
+ * already takes care of duplicating dynamic var data.
+ */
+ smp_set_owner(&smp, htxn->p, htxn->s->sess, htxn->s, htxn->dir & SMP_OPT_DIR);
+
+ if (lua_gettop(L) == 4 && lua_toboolean(L, 4))
+ lua_pushboolean(L, vars_set_by_name_ifexist(name, len, &smp) != 0);
+ else
+ lua_pushboolean(L, vars_set_by_name(name, len, &smp) != 0);
+
+ return 1;
+}
+
+__LJMP static int hlua_unset_var(lua_State *L)
+{
+ struct hlua_txn *htxn;
+ const char *name;
+ size_t len;
+ struct sample smp;
+
+ MAY_LJMP(check_args(L, 2, "unset_var"));
+
+ /* It is useles to retrieve the stream, but this function
+ * runs only in a stream context.
+ */
+ htxn = MAY_LJMP(hlua_checktxn(L, 1));
+ name = MAY_LJMP(luaL_checklstring(L, 2, &len));
+
+ /* Unset the variable. */
+ smp_set_owner(&smp, htxn->p, htxn->s->sess, htxn->s, htxn->dir & SMP_OPT_DIR);
+ lua_pushboolean(L, vars_unset_by_name_ifexist(name, len, &smp) != 0);
+ return 1;
+}
+
+__LJMP static int hlua_get_var(lua_State *L)
+{
+ struct hlua_txn *htxn;
+ const char *name;
+ size_t len;
+ struct sample smp;
+
+ MAY_LJMP(check_args(L, 2, "get_var"));
+
+ /* It is useles to retrieve the stream, but this function
+ * runs only in a stream context.
+ */
+ htxn = MAY_LJMP(hlua_checktxn(L, 1));
+ name = MAY_LJMP(luaL_checklstring(L, 2, &len));
+
+ smp_set_owner(&smp, htxn->p, htxn->s->sess, htxn->s, htxn->dir & SMP_OPT_DIR);
+ if (!vars_get_by_name(name, len, &smp, NULL)) {
+ lua_pushnil(L);
+ return 1;
+ }
+
+ return MAY_LJMP(hlua_smp2lua(L, &smp));
+}
+
+__LJMP static int hlua_set_priv(lua_State *L)
+{
+ struct hlua *hlua;
+
+ MAY_LJMP(check_args(L, 2, "set_priv"));
+
+ /* It is useles to retrieve the stream, but this function
+ * runs only in a stream context.
+ */
+ MAY_LJMP(hlua_checktxn(L, 1));
+
+ /* Get hlua struct, or NULL if we execute from main lua state */
+ hlua = hlua_gethlua(L);
+ if (!hlua)
+ return 0;
+
+ /* Remove previous value. */
+ luaL_unref(L, LUA_REGISTRYINDEX, hlua->Mref);
+
+ /* Get and store new value. */
+ lua_pushvalue(L, 2); /* Copy the element 2 at the top of the stack. */
+ hlua->Mref = luaL_ref(L, LUA_REGISTRYINDEX); /* pop the previously pushed value. */
+
+ return 0;
+}
+
+__LJMP static int hlua_get_priv(lua_State *L)
+{
+ struct hlua *hlua;
+
+ MAY_LJMP(check_args(L, 1, "get_priv"));
+
+ /* It is useles to retrieve the stream, but this function
+ * runs only in a stream context.
+ */
+ MAY_LJMP(hlua_checktxn(L, 1));
+
+ /* Get hlua struct, or NULL if we execute from main lua state */
+ hlua = hlua_gethlua(L);
+ if (!hlua) {
+ lua_pushnil(L);
+ return 1;
+ }
+
+ /* Push configuration index in the stack. */
+ lua_rawgeti(L, LUA_REGISTRYINDEX, hlua->Mref);
+
+ return 1;
+}
+
+/* Create stack entry containing a class TXN. This function
+ * return 0 if the stack does not contains free slots,
+ * otherwise it returns 1.
+ */
+static int hlua_txn_new(lua_State *L, struct stream *s, struct proxy *p, int dir, int flags)
+{
+ struct hlua_txn *htxn;
+
+ /* Check stack size. */
+ if (!lua_checkstack(L, 3))
+ return 0;
+
+ /* NOTE: The allocation never fails. The failure
+ * throw an error, and the function never returns.
+ * if the throw is not available, the process is aborted.
+ */
+ /* Create the object: obj[0] = userdata. */
+ lua_newtable(L);
+ htxn = lua_newuserdata(L, sizeof(*htxn));
+ lua_rawseti(L, -2, 0);
+
+ htxn->s = s;
+ htxn->p = p;
+ htxn->dir = dir;
+ htxn->flags = flags;
+
+ /* Create the "f" field that contains a list of fetches. */
+ lua_pushstring(L, "f");
+ if (!hlua_fetches_new(L, htxn, HLUA_F_MAY_USE_HTTP))
+ return 0;
+ lua_rawset(L, -3);
+
+ /* Create the "sf" field that contains a list of stringsafe fetches. */
+ lua_pushstring(L, "sf");
+ if (!hlua_fetches_new(L, htxn, HLUA_F_MAY_USE_HTTP | HLUA_F_AS_STRING))
+ return 0;
+ lua_rawset(L, -3);
+
+ /* Create the "c" field that contains a list of converters. */
+ lua_pushstring(L, "c");
+ if (!hlua_converters_new(L, htxn, 0))
+ return 0;
+ lua_rawset(L, -3);
+
+ /* Create the "sc" field that contains a list of stringsafe converters. */
+ lua_pushstring(L, "sc");
+ if (!hlua_converters_new(L, htxn, HLUA_F_AS_STRING))
+ return 0;
+ lua_rawset(L, -3);
+
+ /* Create the "req" field that contains the request channel object. */
+ lua_pushstring(L, "req");
+ if (!hlua_channel_new(L, &s->req))
+ return 0;
+ lua_rawset(L, -3);
+
+ /* Create the "res" field that contains the response channel object. */
+ lua_pushstring(L, "res");
+ if (!hlua_channel_new(L, &s->res))
+ return 0;
+ lua_rawset(L, -3);
+
+ /* Creates the HTTP object is the current proxy allows http. */
+ lua_pushstring(L, "http");
+ if (IS_HTX_STRM(s)) {
+ if (!hlua_http_new(L, htxn))
+ return 0;
+ }
+ else
+ lua_pushnil(L);
+ lua_rawset(L, -3);
+
+ if ((htxn->flags & HLUA_TXN_CTX_MASK) == HLUA_TXN_FLT_CTX) {
+ /* HTTPMessage object are created when a lua TXN is created from
+ * a filter context only
+ */
+
+ /* Creates the HTTP-Request object is the current proxy allows http. */
+ lua_pushstring(L, "http_req");
+ if (p->mode == PR_MODE_HTTP) {
+ if (!hlua_http_msg_new(L, &s->txn->req))
+ return 0;
+ }
+ else
+ lua_pushnil(L);
+ lua_rawset(L, -3);
+
+ /* Creates the HTTP-Response object is the current proxy allows http. */
+ lua_pushstring(L, "http_res");
+ if (p->mode == PR_MODE_HTTP) {
+ if (!hlua_http_msg_new(L, &s->txn->rsp))
+ return 0;
+ }
+ else
+ lua_pushnil(L);
+ lua_rawset(L, -3);
+ }
+
+ /* Pop a class sesison metatable and affect it to the userdata. */
+ lua_rawgeti(L, LUA_REGISTRYINDEX, class_txn_ref);
+ lua_setmetatable(L, -2);
+
+ return 1;
+}
+
+__LJMP static int hlua_txn_deflog(lua_State *L)
+{
+ const char *msg;
+ struct hlua_txn *htxn;
+
+ MAY_LJMP(check_args(L, 2, "deflog"));
+ htxn = MAY_LJMP(hlua_checktxn(L, 1));
+ msg = MAY_LJMP(luaL_checkstring(L, 2));
+
+ hlua_sendlog(htxn->s->be, htxn->s->logs.level, msg);
+ return 0;
+}
+
+__LJMP static int hlua_txn_log(lua_State *L)
+{
+ int level;
+ const char *msg;
+ struct hlua_txn *htxn;
+
+ MAY_LJMP(check_args(L, 3, "log"));
+ htxn = MAY_LJMP(hlua_checktxn(L, 1));
+ level = MAY_LJMP(luaL_checkinteger(L, 2));
+ msg = MAY_LJMP(luaL_checkstring(L, 3));
+
+ if (level < 0 || level >= NB_LOG_LEVELS)
+ WILL_LJMP(luaL_argerror(L, 1, "Invalid loglevel."));
+
+ hlua_sendlog(htxn->s->be, level, msg);
+ return 0;
+}
+
+__LJMP static int hlua_txn_log_debug(lua_State *L)
+{
+ const char *msg;
+ struct hlua_txn *htxn;
+
+ MAY_LJMP(check_args(L, 2, "Debug"));
+ htxn = MAY_LJMP(hlua_checktxn(L, 1));
+ msg = MAY_LJMP(luaL_checkstring(L, 2));
+ hlua_sendlog(htxn->s->be, LOG_DEBUG, msg);
+ return 0;
+}
+
+__LJMP static int hlua_txn_log_info(lua_State *L)
+{
+ const char *msg;
+ struct hlua_txn *htxn;
+
+ MAY_LJMP(check_args(L, 2, "Info"));
+ htxn = MAY_LJMP(hlua_checktxn(L, 1));
+ msg = MAY_LJMP(luaL_checkstring(L, 2));
+ hlua_sendlog(htxn->s->be, LOG_INFO, msg);
+ return 0;
+}
+
+__LJMP static int hlua_txn_log_warning(lua_State *L)
+{
+ const char *msg;
+ struct hlua_txn *htxn;
+
+ MAY_LJMP(check_args(L, 2, "Warning"));
+ htxn = MAY_LJMP(hlua_checktxn(L, 1));
+ msg = MAY_LJMP(luaL_checkstring(L, 2));
+ hlua_sendlog(htxn->s->be, LOG_WARNING, msg);
+ return 0;
+}
+
+__LJMP static int hlua_txn_log_alert(lua_State *L)
+{
+ const char *msg;
+ struct hlua_txn *htxn;
+
+ MAY_LJMP(check_args(L, 2, "Alert"));
+ htxn = MAY_LJMP(hlua_checktxn(L, 1));
+ msg = MAY_LJMP(luaL_checkstring(L, 2));
+ hlua_sendlog(htxn->s->be, LOG_ALERT, msg);
+ return 0;
+}
+
+__LJMP static int hlua_txn_set_loglevel(lua_State *L)
+{
+ struct hlua_txn *htxn;
+ int ll;
+
+ MAY_LJMP(check_args(L, 2, "set_loglevel"));
+ htxn = MAY_LJMP(hlua_checktxn(L, 1));
+ ll = MAY_LJMP(luaL_checkinteger(L, 2));
+
+ if (ll < 0 || ll > 7)
+ WILL_LJMP(luaL_argerror(L, 2, "Bad log level. It must be between 0 and 7"));
+
+ htxn->s->logs.level = ll;
+ return 0;
+}
+
+__LJMP static int hlua_txn_set_tos(lua_State *L)
+{
+ struct hlua_txn *htxn;
+ int tos;
+
+ MAY_LJMP(check_args(L, 2, "set_tos"));
+ htxn = MAY_LJMP(hlua_checktxn(L, 1));
+ tos = MAY_LJMP(luaL_checkinteger(L, 2));
+
+ conn_set_tos(objt_conn(htxn->s->sess->origin), tos);
+ return 0;
+}
+
+__LJMP static int hlua_txn_set_mark(lua_State *L)
+{
+ struct hlua_txn *htxn;
+ int mark;
+
+ MAY_LJMP(check_args(L, 2, "set_mark"));
+ htxn = MAY_LJMP(hlua_checktxn(L, 1));
+ mark = MAY_LJMP(luaL_checkinteger(L, 2));
+
+ conn_set_mark(objt_conn(htxn->s->sess->origin), mark);
+ return 0;
+}
+
+__LJMP static int hlua_txn_set_priority_class(lua_State *L)
+{
+ struct hlua_txn *htxn;
+
+ MAY_LJMP(check_args(L, 2, "set_priority_class"));
+ htxn = MAY_LJMP(hlua_checktxn(L, 1));
+ htxn->s->priority_class = queue_limit_class(MAY_LJMP(luaL_checkinteger(L, 2)));
+ return 0;
+}
+
+__LJMP static int hlua_txn_set_priority_offset(lua_State *L)
+{
+ struct hlua_txn *htxn;
+
+ MAY_LJMP(check_args(L, 2, "set_priority_offset"));
+ htxn = MAY_LJMP(hlua_checktxn(L, 1));
+ htxn->s->priority_offset = queue_limit_offset(MAY_LJMP(luaL_checkinteger(L, 2)));
+ return 0;
+}
+
+/* Forward the Reply object to the client. This function converts the reply in
+ * HTX an push it to into the response channel. It is response to forward the
+ * message and terminate the transaction. It returns 1 on success and 0 on
+ * error. The Reply must be on top of the stack.
+ */
+__LJMP static int hlua_txn_forward_reply(lua_State *L, struct stream *s)
+{
+ struct htx *htx;
+ struct htx_sl *sl;
+ struct h1m h1m;
+ const char *status, *reason, *body;
+ size_t status_len, reason_len, body_len;
+ int ret, code, flags;
+
+ code = 200;
+ status = "200";
+ status_len = 3;
+ ret = lua_getfield(L, -1, "status");
+ if (ret == LUA_TNUMBER) {
+ code = lua_tointeger(L, -1);
+ status = lua_tolstring(L, -1, &status_len);
+ }
+ lua_pop(L, 1);
+
+ reason = http_get_reason(code);
+ reason_len = strlen(reason);
+ ret = lua_getfield(L, -1, "reason");
+ if (ret == LUA_TSTRING)
+ reason = lua_tolstring(L, -1, &reason_len);
+ lua_pop(L, 1);
+
+ body = NULL;
+ body_len = 0;
+ ret = lua_getfield(L, -1, "body");
+ if (ret == LUA_TSTRING)
+ body = lua_tolstring(L, -1, &body_len);
+ lua_pop(L, 1);
+
+ /* Prepare the response before inserting the headers */
+ h1m_init_res(&h1m);
+ htx = htx_from_buf(&s->res.buf);
+ channel_htx_truncate(&s->res, htx);
+ if (s->txn->req.flags & HTTP_MSGF_VER_11) {
+ flags = (HTX_SL_F_IS_RESP|HTX_SL_F_VER_11);
+ sl = htx_add_stline(htx, HTX_BLK_RES_SL, flags, ist("HTTP/1.1"),
+ ist2(status, status_len), ist2(reason, reason_len));
+ }
+ else {
+ flags = HTX_SL_F_IS_RESP;
+ sl = htx_add_stline(htx, HTX_BLK_RES_SL, flags, ist("HTTP/1.0"),
+ ist2(status, status_len), ist2(reason, reason_len));
+ }
+ if (!sl)
+ goto fail;
+ sl->info.res.status = code;
+
+ /* Push in the stack the "headers" entry. */
+ ret = lua_getfield(L, -1, "headers");
+ if (ret != LUA_TTABLE)
+ goto skip_headers;
+
+ lua_pushnil(L);
+ while (lua_next(L, -2) != 0) {
+ struct ist name, value;
+ const char *n, *v;
+ size_t nlen, vlen;
+
+ if (!lua_isstring(L, -2) || !lua_istable(L, -1)) {
+ /* Skip element if the key is not a string or if the value is not a table */
+ goto next_hdr;
+ }
+
+ n = lua_tolstring(L, -2, &nlen);
+ name = ist2(n, nlen);
+ if (isteqi(name, ist("content-length"))) {
+ /* Always skip content-length header. It will be added
+ * later with the correct len
+ */
+ goto next_hdr;
+ }
+
+ /* Loop on header's values */
+ lua_pushnil(L);
+ while (lua_next(L, -2)) {
+ if (!lua_isstring(L, -1)) {
+ /* Skip the value if it is not a string */
+ goto next_value;
+ }
+
+ v = lua_tolstring(L, -1, &vlen);
+ value = ist2(v, vlen);
+
+ if (isteqi(name, ist("transfer-encoding")))
+ h1_parse_xfer_enc_header(&h1m, value);
+ if (!htx_add_header(htx, ist2(n, nlen), ist2(v, vlen)))
+ goto fail;
+
+ next_value:
+ lua_pop(L, 1);
+ }
+
+ next_hdr:
+ lua_pop(L, 1);
+ }
+ skip_headers:
+ lua_pop(L, 1);
+
+ /* Update h1m flags: CLEN is set if CHNK is not present */
+ if (!(h1m.flags & H1_MF_CHNK)) {
+ const char *clen = ultoa(body_len);
+
+ h1m.flags |= H1_MF_CLEN;
+ if (!htx_add_header(htx, ist("content-length"), ist(clen)))
+ goto fail;
+ }
+ if (h1m.flags & (H1_MF_CLEN|H1_MF_CHNK))
+ h1m.flags |= H1_MF_XFER_LEN;
+
+ /* Update HTX start-line flags */
+ if (h1m.flags & H1_MF_XFER_ENC)
+ flags |= HTX_SL_F_XFER_ENC;
+ if (h1m.flags & H1_MF_XFER_LEN) {
+ flags |= HTX_SL_F_XFER_LEN;
+ if (h1m.flags & H1_MF_CHNK)
+ flags |= HTX_SL_F_CHNK;
+ else if (h1m.flags & H1_MF_CLEN)
+ flags |= HTX_SL_F_CLEN;
+ if (h1m.body_len == 0)
+ flags |= HTX_SL_F_BODYLESS;
+ }
+ sl->flags |= flags;
+
+
+ if (!htx_add_endof(htx, HTX_BLK_EOH) ||
+ (body_len && !htx_add_data_atonce(htx, ist2(body, body_len))))
+ goto fail;
+
+ htx->flags |= HTX_FL_EOM;
+
+ /* Now, forward the response and terminate the transaction */
+ s->txn->status = code;
+ htx_to_buf(htx, &s->res.buf);
+ if (!http_forward_proxy_resp(s, 1))
+ goto fail;
+
+ return 1;
+
+ fail:
+ channel_htx_truncate(&s->res, htx);
+ return 0;
+}
+
+/* Terminate a transaction if called from a lua action. For TCP streams,
+ * processing is just aborted. Nothing is returned to the client and all
+ * arguments are ignored. For HTTP streams, if a reply is passed as argument, it
+ * is forwarded to the client before terminating the transaction. On success,
+ * the function exits with ACT_RET_DONE code. If an error occurred, it exits
+ * with ACT_RET_ERR code. If this function is not called from a lua action, it
+ * just exits without any processing.
+ */
+__LJMP static int hlua_txn_done(lua_State *L)
+{
+ struct hlua_txn *htxn;
+ struct stream *s;
+ int finst;
+
+ htxn = MAY_LJMP(hlua_checktxn(L, 1));
+
+ /* If the flags NOTERM is set, we cannot terminate the session, so we
+ * just end the execution of the current lua code. */
+ if (htxn->flags & HLUA_TXN_NOTERM)
+ WILL_LJMP(hlua_done(L));
+
+ s = htxn->s;
+ if (!IS_HTX_STRM(htxn->s)) {
+ struct channel *req = &s->req;
+ struct channel *res = &s->res;
+
+ channel_auto_read(req);
+ channel_abort(req);
+ channel_erase(req);
+
+ channel_auto_read(res);
+ channel_auto_close(res);
+ sc_schedule_abort(s->scb);
+
+ finst = ((htxn->dir == SMP_OPT_DIR_REQ) ? SF_FINST_R : SF_FINST_D);
+ goto done;
+ }
+
+ if (lua_gettop(L) == 1 || !lua_istable(L, 2)) {
+ /* No reply or invalid reply */
+ s->txn->status = 0;
+ http_reply_and_close(s, 0, NULL);
+ }
+ else {
+ /* Remove extra args to have the reply on top of the stack */
+ if (lua_gettop(L) > 2)
+ lua_pop(L, lua_gettop(L) - 2);
+
+ if (!hlua_txn_forward_reply(L, s)) {
+ if (!(s->flags & SF_ERR_MASK))
+ s->flags |= SF_ERR_PRXCOND;
+ lua_pushinteger(L, ACT_RET_ERR);
+ WILL_LJMP(hlua_done(L));
+ return 0; /* Never reached */
+ }
+ }
+
+ finst = ((htxn->dir == SMP_OPT_DIR_REQ) ? SF_FINST_R : SF_FINST_H);
+ if (htxn->dir == SMP_OPT_DIR_REQ) {
+ /* let's log the request time */
+ s->logs.request_ts = now_ns;
+ if (s->sess->fe == s->be) /* report it if the request was intercepted by the frontend */
+ _HA_ATOMIC_INC(&s->sess->fe->fe_counters.intercepted_req);
+ }
+
+ done:
+ if (!(s->flags & SF_ERR_MASK))
+ s->flags |= SF_ERR_LOCAL;
+ if (!(s->flags & SF_FINST_MASK))
+ s->flags |= finst;
+
+ if ((htxn->flags & HLUA_TXN_CTX_MASK) == HLUA_TXN_FLT_CTX)
+ lua_pushinteger(L, -1);
+ else
+ lua_pushinteger(L, ACT_RET_ABRT);
+ WILL_LJMP(hlua_done(L));
+ return 0;
+}
+
+/*
+ *
+ *
+ * Class REPLY
+ *
+ *
+ */
+
+/* Pushes the TXN reply onto the top of the stack. If the stask does not have a
+ * free slots, the function fails and returns 0;
+ */
+static int hlua_txn_reply_new(lua_State *L)
+{
+ struct hlua_txn *htxn;
+ const char *reason, *body = NULL;
+ int ret, status;
+
+ htxn = MAY_LJMP(hlua_checktxn(L, 1));
+ if (!IS_HTX_STRM(htxn->s)) {
+ hlua_pusherror(L, "txn object is not an HTTP transaction.");
+ WILL_LJMP(lua_error(L));
+ }
+
+ /* Default value */
+ status = 200;
+ reason = http_get_reason(status);
+
+ if (lua_istable(L, 2)) {
+ /* load status and reason from the table argument at index 2 */
+ ret = lua_getfield(L, 2, "status");
+ if (ret == LUA_TNIL)
+ goto reason;
+ else if (ret != LUA_TNUMBER) {
+ /* invalid status: ignore the reason */
+ goto body;
+ }
+ status = lua_tointeger(L, -1);
+
+ reason:
+ lua_pop(L, 1); /* restore the stack: remove status */
+ ret = lua_getfield(L, 2, "reason");
+ if (ret == LUA_TSTRING)
+ reason = lua_tostring(L, -1);
+
+ body:
+ lua_pop(L, 1); /* restore the stack: remove invalid status or reason */
+ ret = lua_getfield(L, 2, "body");
+ if (ret == LUA_TSTRING)
+ body = lua_tostring(L, -1);
+ lua_pop(L, 1); /* restore the stack: remove body */
+ }
+
+ /* Create the Reply table */
+ lua_newtable(L);
+
+ /* Add status element */
+ lua_pushstring(L, "status");
+ lua_pushinteger(L, status);
+ lua_settable(L, -3);
+
+ /* Add reason element */
+ reason = http_get_reason(status);
+ lua_pushstring(L, "reason");
+ lua_pushstring(L, reason);
+ lua_settable(L, -3);
+
+ /* Add body element, nil if undefined */
+ lua_pushstring(L, "body");
+ if (body)
+ lua_pushstring(L, body);
+ else
+ lua_pushnil(L);
+ lua_settable(L, -3);
+
+ /* Add headers element */
+ lua_pushstring(L, "headers");
+ lua_newtable(L);
+
+ /* stack: [ txn, <Arg:table>, <Reply:table>, "headers", <headers:table> ] */
+ if (lua_istable(L, 2)) {
+ /* load headers from the table argument at index 2. If it is a table, copy it. */
+ ret = lua_getfield(L, 2, "headers");
+ if (ret == LUA_TTABLE) {
+ /* stack: [ ... <headers:table>, <table> ] */
+ lua_pushnil(L);
+ while (lua_next(L, -2) != 0) {
+ /* stack: [ ... <headers:table>, <table>, k, v] */
+ if (!lua_isstring(L, -1) && !lua_istable(L, -1)) {
+ /* invalid value type, skip it */
+ lua_pop(L, 1);
+ continue;
+ }
+
+
+ /* Duplicate the key and swap it with the value. */
+ lua_pushvalue(L, -2);
+ lua_insert(L, -2);
+ /* stack: [ ... <headers:table>, <table>, k, k, v ] */
+
+ lua_newtable(L);
+ lua_insert(L, -2);
+ /* stack: [ ... <headers:table>, <table>, k, k, <inner:table>, v ] */
+
+ if (lua_isstring(L, -1)) {
+ /* push the value in the inner table */
+ lua_rawseti(L, -2, 1);
+ }
+ else { /* table */
+ lua_pushnil(L);
+ while (lua_next(L, -2) != 0) {
+ /* stack: [ ... <headers:table>, <table>, k, k, <inner:table>, <v:table>, k2, v2 ] */
+ if (!lua_isstring(L, -1)) {
+ /* invalid value type, skip it*/
+ lua_pop(L, 1);
+ continue;
+ }
+ /* push the value in the inner table */
+ lua_rawseti(L, -4, lua_rawlen(L, -4) + 1);
+ /* stack: [ ... <headers:table>, <table>, k, k, <inner:table>, <v:table>, k2 ] */
+ }
+ lua_pop(L, 1);
+ /* stack: [ ... <headers:table>, <table>, k, k, <inner:table> ] */
+ }
+
+ /* push (k,v) on the stack in the headers table:
+ * stack: [ ... <headers:table>, <table>, k, k, v ]
+ */
+ lua_settable(L, -5);
+ /* stack: [ ... <headers:table>, <table>, k ] */
+ }
+ }
+ lua_pop(L, 1);
+ }
+ /* stack: [ txn, <Arg:table>, <Reply:table>, "headers", <headers:table> ] */
+ lua_settable(L, -3);
+ /* stack: [ txn, <Arg:table>, <Reply:table> ] */
+
+ /* Pop a class sesison metatable and affect it to the userdata. */
+ lua_rawgeti(L, LUA_REGISTRYINDEX, class_txn_reply_ref);
+ lua_setmetatable(L, -2);
+ return 1;
+}
+
+/* Set the reply status code, and optionally the reason. If no reason is
+ * provided, the default one corresponding to the status code is used.
+ */
+__LJMP static int hlua_txn_reply_set_status(lua_State *L)
+{
+ int status = MAY_LJMP(luaL_checkinteger(L, 2));
+ const char *reason = MAY_LJMP(luaL_optlstring(L, 3, NULL, NULL));
+
+ /* First argument (self) must be a table */
+ MAY_LJMP(luaL_checktype(L, 1, LUA_TTABLE));
+
+ if (status < 100 || status > 599) {
+ lua_pushboolean(L, 0);
+ return 1;
+ }
+ if (!reason)
+ reason = http_get_reason(status);
+
+ lua_pushinteger(L, status);
+ lua_setfield(L, 1, "status");
+
+ lua_pushstring(L, reason);
+ lua_setfield(L, 1, "reason");
+
+ lua_pushboolean(L, 1);
+ return 1;
+}
+
+/* Add a header into the reply object. Each header name is associated to an
+ * array of values in the "headers" table. If the header name is not found, a
+ * new entry is created.
+ */
+__LJMP static int hlua_txn_reply_add_header(lua_State *L)
+{
+ const char *name = MAY_LJMP(luaL_checkstring(L, 2));
+ const char *value = MAY_LJMP(luaL_checkstring(L, 3));
+ int ret;
+
+ /* First argument (self) must be a table */
+ MAY_LJMP(luaL_checktype(L, 1, LUA_TTABLE));
+
+ /* Push in the stack the "headers" entry. */
+ ret = lua_getfield(L, 1, "headers");
+ if (ret != LUA_TTABLE) {
+ hlua_pusherror(L, "Reply['headers'] is expected to a an array. %s found", lua_typename(L, ret));
+ WILL_LJMP(lua_error(L));
+ }
+
+ /* check if the header is already registered. If not, register it. */
+ ret = lua_getfield(L, -1, name);
+ if (ret == LUA_TNIL) {
+ /* Entry not found. */
+ lua_pop(L, 1); /* remove the nil. The "headers" table is the top of the stack. */
+
+ /* Insert the new header name in the array in the top of the stack.
+ * It left the new array in the top of the stack.
+ */
+ lua_newtable(L);
+ lua_pushstring(L, name);
+ lua_pushvalue(L, -2);
+ lua_settable(L, -4);
+ }
+ else if (ret != LUA_TTABLE) {
+ hlua_pusherror(L, "Reply['headers']['%s'] is expected to be an array. %s found", name, lua_typename(L, ret));
+ WILL_LJMP(lua_error(L));
+ }
+
+ /* Now the top of thestack is an array of values. We push
+ * the header value as new entry.
+ */
+ lua_pushstring(L, value);
+ ret = lua_rawlen(L, -2);
+ lua_rawseti(L, -2, ret + 1);
+
+ lua_pushboolean(L, 1);
+ return 1;
+}
+
+/* Remove all occurrences of a given header name. */
+__LJMP static int hlua_txn_reply_del_header(lua_State *L)
+{
+ const char *name = MAY_LJMP(luaL_checkstring(L, 2));
+ int ret;
+
+ /* First argument (self) must be a table */
+ MAY_LJMP(luaL_checktype(L, 1, LUA_TTABLE));
+
+ /* Push in the stack the "headers" entry. */
+ ret = lua_getfield(L, 1, "headers");
+ if (ret != LUA_TTABLE) {
+ hlua_pusherror(L, "Reply['headers'] is expected to be an array. %s found", lua_typename(L, ret));
+ WILL_LJMP(lua_error(L));
+ }
+
+ lua_pushstring(L, name);
+ lua_pushnil(L);
+ lua_settable(L, -3);
+
+ lua_pushboolean(L, 1);
+ return 1;
+}
+
+/* Set the reply's body. Overwrite any existing entry. */
+__LJMP static int hlua_txn_reply_set_body(lua_State *L)
+{
+ const char *payload = MAY_LJMP(luaL_checkstring(L, 2));
+
+ /* First argument (self) must be a table */
+ MAY_LJMP(luaL_checktype(L, 1, LUA_TTABLE));
+
+ lua_pushstring(L, payload);
+ lua_setfield(L, 1, "body");
+
+ lua_pushboolean(L, 1);
+ return 1;
+}
+
+__LJMP static int hlua_log(lua_State *L)
+{
+ int level;
+ const char *msg;
+
+ MAY_LJMP(check_args(L, 2, "log"));
+ level = MAY_LJMP(luaL_checkinteger(L, 1));
+ msg = MAY_LJMP(luaL_checkstring(L, 2));
+
+ if (level < 0 || level >= NB_LOG_LEVELS)
+ WILL_LJMP(luaL_argerror(L, 1, "Invalid loglevel."));
+
+ hlua_sendlog(NULL, level, msg);
+ return 0;
+}
+
+__LJMP static int hlua_log_debug(lua_State *L)
+{
+ const char *msg;
+
+ MAY_LJMP(check_args(L, 1, "debug"));
+ msg = MAY_LJMP(luaL_checkstring(L, 1));
+ hlua_sendlog(NULL, LOG_DEBUG, msg);
+ return 0;
+}
+
+__LJMP static int hlua_log_info(lua_State *L)
+{
+ const char *msg;
+
+ MAY_LJMP(check_args(L, 1, "info"));
+ msg = MAY_LJMP(luaL_checkstring(L, 1));
+ hlua_sendlog(NULL, LOG_INFO, msg);
+ return 0;
+}
+
+__LJMP static int hlua_log_warning(lua_State *L)
+{
+ const char *msg;
+
+ MAY_LJMP(check_args(L, 1, "warning"));
+ msg = MAY_LJMP(luaL_checkstring(L, 1));
+ hlua_sendlog(NULL, LOG_WARNING, msg);
+ return 0;
+}
+
+__LJMP static int hlua_log_alert(lua_State *L)
+{
+ const char *msg;
+
+ MAY_LJMP(check_args(L, 1, "alert"));
+ msg = MAY_LJMP(luaL_checkstring(L, 1));
+ hlua_sendlog(NULL, LOG_ALERT, msg);
+ return 0;
+}
+
+__LJMP static int hlua_sleep_yield(lua_State *L, int status, lua_KContext ctx)
+{
+ int wakeup_ms = lua_tointeger(L, -1);
+ if (!tick_is_expired(wakeup_ms, now_ms))
+ MAY_LJMP(hlua_yieldk(L, 0, 0, hlua_sleep_yield, wakeup_ms, 0));
+ return 0;
+}
+
+__LJMP static int hlua_sleep(lua_State *L)
+{
+ unsigned int delay;
+ int wakeup_ms; // tick value
+
+ MAY_LJMP(check_args(L, 1, "sleep"));
+
+ delay = MAY_LJMP(luaL_checkinteger(L, 1)) * 1000;
+ wakeup_ms = tick_add(now_ms, delay);
+ lua_pushinteger(L, wakeup_ms);
+
+ MAY_LJMP(hlua_yieldk(L, 0, 0, hlua_sleep_yield, wakeup_ms, 0));
+ return 0;
+}
+
+__LJMP static int hlua_msleep(lua_State *L)
+{
+ unsigned int delay;
+ int wakeup_ms; // tick value
+
+ MAY_LJMP(check_args(L, 1, "msleep"));
+
+ delay = MAY_LJMP(luaL_checkinteger(L, 1));
+ wakeup_ms = tick_add(now_ms, delay);
+ lua_pushinteger(L, wakeup_ms);
+
+ MAY_LJMP(hlua_yieldk(L, 0, 0, hlua_sleep_yield, wakeup_ms, 0));
+ return 0;
+}
+
+/* This functionis an LUA binding. it permits to give back
+ * the hand at the HAProxy scheduler. It is used when the
+ * LUA processing consumes a lot of time.
+ */
+__LJMP static int hlua_yield_yield(lua_State *L, int status, lua_KContext ctx)
+{
+ return 0;
+}
+
+__LJMP static int hlua_yield(lua_State *L)
+{
+ MAY_LJMP(hlua_yieldk(L, 0, 0, hlua_yield_yield, TICK_ETERNITY, HLUA_CTRLYIELD));
+ return 0;
+}
+
+/* This function change the nice of the currently executed
+ * task. It is used set low or high priority at the current
+ * task.
+ */
+__LJMP static int hlua_set_nice(lua_State *L)
+{
+ struct hlua *hlua;
+ int nice;
+
+ MAY_LJMP(check_args(L, 1, "set_nice"));
+ nice = MAY_LJMP(luaL_checkinteger(L, 1));
+
+ /* Get hlua struct, or NULL if we execute from main lua state */
+ hlua = hlua_gethlua(L);
+
+ /* If the task is not set, I'm in a start mode. */
+ if (!hlua || !hlua->task)
+ return 0;
+
+ if (nice < -1024)
+ nice = -1024;
+ else if (nice > 1024)
+ nice = 1024;
+
+ hlua->task->nice = nice;
+ return 0;
+}
+
+/* safe lua coroutine.create() function:
+ *
+ * This is a simple wrapper for coroutine.create() that
+ * ensures the current hlua state ctx is available from
+ * the new subroutine state
+ */
+__LJMP static int hlua_coroutine_create(lua_State *L)
+{
+ lua_State *new; /* new coroutine state */
+ struct hlua **hlua_store;
+ struct hlua *hlua = hlua_gethlua(L);
+
+ new = lua_newthread(L);
+ if (!new)
+ return 0;
+
+ hlua_store = lua_getextraspace(new);
+ /* Expose current hlua ctx on new lua thread
+ * (hlua_gethlua() will properly return the last "known"
+ * hlua ctx instead of NULL when it is called from such coroutines)
+ */
+ *hlua_store = hlua;
+
+ /* new lua thread is on the top of the stack, we
+ * need to duplicate first stack argument (<f> from coroutine.create(<f>))
+ * on the top of the stack to be able to use xmove() to move it on the new
+ * stack
+ */
+ lua_pushvalue(L, 1);
+ /* move <f> function to the new stack */
+ lua_xmove(L, new, 1);
+ /* new lua thread is back at the top of the stack */
+ return 1;
+}
+
+/* This function is used as a callback of a task. It is called by the
+ * HAProxy task subsystem when the task is awaked. The LUA runtime can
+ * return an E_AGAIN signal, the emmiter of this signal must set a
+ * signal to wake the task.
+ *
+ * Task wrapper are longjmp safe because the only one Lua code
+ * executed is the safe hlua_ctx_resume();
+ */
+struct task *hlua_process_task(struct task *task, void *context, unsigned int state)
+{
+ struct hlua *hlua = context;
+ enum hlua_exec status;
+
+ if (task->tid < 0)
+ task->tid = tid;
+
+ /* If it is the first call to the task, we must initialize the
+ * execution timeouts.
+ */
+ if (!HLUA_IS_RUNNING(hlua))
+ hlua_timer_init(&hlua->timer, hlua_timeout_task);
+
+ /* Execute the Lua code. */
+ status = hlua_ctx_resume(hlua, 1);
+
+ switch (status) {
+ /* finished or yield */
+ case HLUA_E_OK:
+ hlua_ctx_destroy(hlua);
+ task_destroy(task);
+ task = NULL;
+ break;
+
+ case HLUA_E_AGAIN: /* co process or timeout wake me later. */
+ notification_gc(&hlua->com);
+ task->expire = hlua->wake_time;
+ break;
+
+ /* finished with error. */
+ case HLUA_E_ETMOUT:
+ SEND_ERR(NULL, "Lua task: execution timeout.\n");
+ goto err_task_abort;
+ case HLUA_E_ERRMSG:
+ SEND_ERR(NULL, "Lua task: %s.\n", lua_tostring(hlua->T, -1));
+ goto err_task_abort;
+ case HLUA_E_ERR:
+ default:
+ SEND_ERR(NULL, "Lua task: unknown error.\n");
+ err_task_abort:
+ hlua_ctx_destroy(hlua);
+ task_destroy(task);
+ task = NULL;
+ break;
+ }
+ return task;
+}
+
+/* Helper function to prepare the lua ctx for a given stream
+ *
+ * ctx will be enforced in <state_id> parent stack on initial creation.
+ * If s->hlua->state_id differs from <state_id>, which may happen at
+ * runtime since existing stream hlua ctx will be reused for other
+ * "independent" (but stream-related) lua executions, hlua will be
+ * recreated with the expected state id.
+ *
+ * Returns 1 for success and 0 for failure
+ */
+static int hlua_stream_ctx_prepare(struct stream *s, int state_id)
+{
+ /* In the execution wrappers linked with a stream, the
+ * Lua context can be not initialized. This behavior
+ * permits to save performances because a systematic
+ * Lua initialization cause 5% performances loss.
+ */
+ ctx_renew:
+ if (!s->hlua) {
+ struct hlua *hlua;
+
+ hlua = pool_alloc(pool_head_hlua);
+ if (!hlua)
+ return 0;
+ HLUA_INIT(hlua);
+ if (!hlua_ctx_init(hlua, state_id, s->task)) {
+ pool_free(pool_head_hlua, hlua);
+ return 0;
+ }
+ s->hlua = hlua;
+ }
+ else if (s->hlua->state_id != state_id) {
+ /* ctx already created, but not in proper state.
+ * It should only happen after the previous execution is
+ * finished, otherwise it's probably a bug since we don't
+ * want to abort unfinished job..
+ */
+ BUG_ON(HLUA_IS_RUNNING(s->hlua));
+ hlua_ctx_destroy(s->hlua);
+ s->hlua = NULL;
+ goto ctx_renew;
+ }
+ return 1;
+}
+
+/* This function is an LUA binding that register LUA function to be
+ * executed after the HAProxy configuration parsing and before the
+ * HAProxy scheduler starts. This function expect only one LUA
+ * argument that is a function. This function returns nothing, but
+ * throws if an error is encountered.
+ */
+__LJMP static int hlua_register_init(lua_State *L)
+{
+ struct hlua_init_function *init;
+ int ref;
+
+ MAY_LJMP(check_args(L, 1, "register_init"));
+
+ if (hlua_gethlua(L)) {
+ /* runtime processing */
+ WILL_LJMP(luaL_error(L, "register_init: not available outside of body context"));
+ }
+
+ ref = MAY_LJMP(hlua_checkfunction(L, 1));
+
+ init = calloc(1, sizeof(*init));
+ if (!init) {
+ hlua_unref(L, ref);
+ WILL_LJMP(luaL_error(L, "Lua out of memory error."));
+ }
+
+ init->function_ref = ref;
+ LIST_APPEND(&hlua_init_functions[hlua_state_id], &init->l);
+ return 0;
+}
+
+/* This function is an LUA binding. It permits to register a task
+ * executed in parallel of the main HAroxy activity. The task is
+ * created and it is set in the HAProxy scheduler. It can be called
+ * from the "init" section, "post init" or during the runtime.
+ *
+ * Lua prototype:
+ *
+ * <none> core.register_task(<function>[, <arg1>[, <arg2>[, ...[, <arg4>]]]])
+ *
+ * <arg1..4> are optional arguments that will be provided to <function>
+ */
+__LJMP static int hlua_register_task(lua_State *L)
+{
+ struct hlua *hlua = NULL;
+ struct task *task = NULL;
+ int ref;
+ int nb_arg;
+ int it;
+ int arg_ref[4]; /* optional arguments */
+ int state_id;
+
+ nb_arg = lua_gettop(L);
+ if (nb_arg < 1)
+ WILL_LJMP(luaL_error(L, "register_task: <func> argument is required"));
+ else if (nb_arg > 5)
+ WILL_LJMP(luaL_error(L, "register_task: no more that 4 optional arguments may be provided"));
+
+ /* first arg: function ref */
+ ref = MAY_LJMP(hlua_checkfunction(L, 1));
+
+ /* extract optional args (if any) */
+ it = 0;
+ while (--nb_arg) {
+ lua_pushvalue(L, 2 + it);
+ arg_ref[it] = hlua_ref(L); /* get arg reference */
+ it += 1;
+ }
+ nb_arg = it;
+
+ /* Get the reference state. If the reference is NULL, L is the master
+ * state, otherwise hlua->T is.
+ */
+ hlua = hlua_gethlua(L);
+ if (hlua)
+ /* we are in runtime processing */
+ state_id = hlua->state_id;
+ else
+ /* we are in initialization mode */
+ state_id = hlua_state_id;
+
+ hlua = pool_alloc(pool_head_hlua);
+ if (!hlua)
+ goto alloc_error;
+ HLUA_INIT(hlua);
+
+ /* We are in the common lua state, execute the task anywhere,
+ * otherwise, inherit the current thread identifier
+ */
+ if (state_id == 0)
+ task = task_new_anywhere();
+ else
+ task = task_new_here();
+ if (!task)
+ goto alloc_error;
+
+ task->context = hlua;
+ task->process = hlua_process_task;
+
+ if (!hlua_ctx_init(hlua, state_id, task))
+ goto alloc_error;
+
+ /* Ensure there is enough space on the stack for the function
+ * plus optional arguments
+ */
+ if (!lua_checkstack(hlua->T, (1 + nb_arg)))
+ goto alloc_error;
+
+ /* Restore the function in the stack. */
+ hlua_pushref(hlua->T, ref);
+ /* function ref not needed anymore since it was pushed to the substack */
+ hlua_unref(L, ref);
+
+ hlua->nargs = nb_arg;
+
+ /* push optional arguments to the function */
+ for (it = 0; it < nb_arg; it++) {
+ /* push arg to the stack */
+ hlua_pushref(hlua->T, arg_ref[it]);
+ /* arg ref not needed anymore since it was pushed to the substack */
+ hlua_unref(L, arg_ref[it]);
+ }
+
+ /* Schedule task. */
+ task_wakeup(task, TASK_WOKEN_INIT);
+
+ return 0;
+
+ alloc_error:
+ task_destroy(task);
+ hlua_unref(L, ref);
+ for (it = 0; it < nb_arg; it++) {
+ hlua_unref(L, arg_ref[it]);
+ }
+ hlua_ctx_destroy(hlua);
+ WILL_LJMP(luaL_error(L, "Lua out of memory error."));
+ return 0; /* Never reached */
+}
+
+/* called from unsafe location */
+static void hlua_event_subscription_destroy(struct hlua_event_sub *hlua_sub)
+{
+ /* hlua cleanup */
+
+ hlua_lock(hlua_sub->hlua);
+ /* registry is shared between coroutines */
+ hlua_unref(hlua_sub->hlua->T, hlua_sub->fcn_ref);
+ hlua_unlock(hlua_sub->hlua);
+
+ hlua_ctx_destroy(hlua_sub->hlua);
+
+ /* free */
+ pool_free(pool_head_hlua_event_sub, hlua_sub);
+}
+
+/* single event handler: hlua ctx is shared between multiple events handlers
+ * issued from the same subscription. Thus, it is not destroyed when the event
+ * is processed: it is destroyed when no more events are expected for the
+ * subscription (ie: when the subscription ends).
+ *
+ * Moreover, events are processed sequentially within the subscription:
+ * one event must be fully processed before another one may be processed.
+ * This ensures proper consistency for lua event handling from an ordering
+ * point of view. This is especially useful with server events for example
+ * where ADD/DEL/UP/DOWN events ordering really matters to trigger specific
+ * actions from lua (e.g.: sending emails or making API calls).
+ *
+ * Due to this design, each lua event handler is pleased to process the event
+ * as fast as possible to prevent the event queue from growing up.
+ * Strictly speaking, there is no runtime limit for the callback function
+ * (timeout set to default task timeout), but if the event queue goes past
+ * the limit of unconsumed events an error will be reported and the
+ * susbscription will pause itself for as long as it takes for the handler to
+ * catch up (events will be lost as a result).
+ * If the event handler does not need the sequential ordering and wants to
+ * process multiple events at a time, it may spawn a new side-task using
+ * 'core.register_task' to delegate the event handling and make parallel event
+ * processing within the same subscription set.
+ */
+static void hlua_event_handler(struct hlua *hlua)
+{
+ enum hlua_exec status;
+
+ /* If it is the first call to the task, we must initialize the
+ * execution timeouts.
+ */
+ if (!HLUA_IS_RUNNING(hlua))
+ hlua_timer_init(&hlua->timer, hlua_timeout_task);
+
+ /* make sure to reset the task expiry before each hlua_ctx_resume()
+ * since the task is re-used for multiple cb function calls
+ * We couldn't risk to have t->expire pointing to a past date because
+ * it was set during last function invocation but was never reset since
+ * (ie: E_AGAIN)
+ */
+ hlua->task->expire = TICK_ETERNITY;
+
+ /* Execute the Lua code. */
+ status = hlua_ctx_resume(hlua, 1);
+
+ switch (status) {
+ /* finished or yield */
+ case HLUA_E_OK:
+ break;
+
+ case HLUA_E_AGAIN: /* co process or timeout wake me later. */
+ notification_gc(&hlua->com);
+ hlua->task->expire = hlua->wake_time;
+ break;
+
+ /* finished with error. */
+ case HLUA_E_ETMOUT:
+ SEND_ERR(NULL, "Lua event_hdl: execution timeout.\n");
+ break;
+
+ case HLUA_E_ERRMSG:
+ SEND_ERR(NULL, "Lua event_hdl: %s.\n", lua_tostring(hlua->T, -1));
+ break;
+
+ case HLUA_E_ERR:
+ default:
+ SEND_ERR(NULL, "Lua event_hdl: unknown error.\n");
+ break;
+ }
+}
+
+__LJMP static void hlua_event_hdl_cb_push_event_checkres(lua_State *L,
+ struct event_hdl_cb_data_server_checkres *check)
+{
+ lua_pushstring(L, "agent");
+ lua_pushboolean(L, check->agent);
+ lua_settable(L, -3);
+ lua_pushstring(L, "result");
+ switch (check->result) {
+ case CHK_RES_FAILED:
+ lua_pushstring(L, "FAILED");
+ break;
+ case CHK_RES_PASSED:
+ lua_pushstring(L, "PASSED");
+ break;
+ case CHK_RES_CONDPASS:
+ lua_pushstring(L, "CONDPASS");
+ break;
+ default:
+ lua_pushnil(L);
+ break;
+ }
+ lua_settable(L, -3);
+
+ lua_pushstring(L, "duration");
+ lua_pushinteger(L, check->duration);
+ lua_settable(L, -3);
+
+ lua_pushstring(L, "reason");
+ lua_newtable(L);
+
+ lua_pushstring(L, "short");
+ lua_pushstring(L, get_check_status_info(check->reason.status));
+ lua_settable(L, -3);
+ lua_pushstring(L, "desc");
+ lua_pushstring(L, get_check_status_description(check->reason.status));
+ lua_settable(L, -3);
+ if (check->reason.status >= HCHK_STATUS_L57DATA) {
+ /* code only available when the check reached data analysis stage */
+ lua_pushstring(L, "code");
+ lua_pushinteger(L, check->reason.code);
+ lua_settable(L, -3);
+ }
+
+ lua_settable(L, -3); /* reason table */
+
+ lua_pushstring(L, "health");
+ lua_newtable(L);
+
+ lua_pushstring(L, "cur");
+ lua_pushinteger(L, check->health.cur);
+ lua_settable(L, -3);
+ lua_pushstring(L, "rise");
+ lua_pushinteger(L, check->health.rise);
+ lua_settable(L, -3);
+ lua_pushstring(L, "fall");
+ lua_pushinteger(L, check->health.fall);
+ lua_settable(L, -3);
+
+ lua_settable(L, -3); /* health table */
+}
+
+/* This function pushes various arguments such as event type and event data to
+ * the lua function that will be called to consume the event.
+ */
+__LJMP static void hlua_event_hdl_cb_push_args(struct hlua_event_sub *hlua_sub,
+ struct event_hdl_async_event *e)
+{
+ struct hlua *hlua = hlua_sub->hlua;
+ struct event_hdl_sub_type event = e->type;
+ void *data = e->data;
+
+ /* push event type */
+ hlua->nargs = 1;
+ lua_pushstring(hlua->T, event_hdl_sub_type_to_string(event));
+
+ /* push event data (according to event type) */
+ if (event_hdl_sub_family_equal(EVENT_HDL_SUB_SERVER, event)) {
+ struct event_hdl_cb_data_server *e_server = data;
+ struct proxy *px;
+ struct server *server;
+
+ hlua->nargs += 1;
+ lua_newtable(hlua->T);
+ /* Add server name */
+ lua_pushstring(hlua->T, "name");
+ lua_pushstring(hlua->T, e_server->safe.name);
+ lua_settable(hlua->T, -3);
+ /* Add server puid */
+ lua_pushstring(hlua->T, "puid");
+ lua_pushinteger(hlua->T, e_server->safe.puid);
+ lua_settable(hlua->T, -3);
+ /* Add server rid */
+ lua_pushstring(hlua->T, "rid");
+ lua_pushinteger(hlua->T, e_server->safe.rid);
+ lua_settable(hlua->T, -3);
+ /* Add server proxy name */
+ lua_pushstring(hlua->T, "proxy_name");
+ lua_pushstring(hlua->T, e_server->safe.proxy_name);
+ lua_settable(hlua->T, -3);
+ /* Add server proxy uuid */
+ lua_pushstring(hlua->T, "proxy_uuid");
+ lua_pushinteger(hlua->T, e_server->safe.proxy_uuid);
+ lua_settable(hlua->T, -3);
+
+ /* special events, fetch additional info with explicit type casting */
+ if (event_hdl_sub_type_equal(EVENT_HDL_SUB_SERVER_STATE, event)) {
+ struct event_hdl_cb_data_server_state *state = data;
+ int it;
+
+ if (!lua_checkstack(hlua->T, 20))
+ WILL_LJMP(luaL_error(hlua->T, "Lua out of memory error."));
+
+ /* state subclass */
+ lua_pushstring(hlua->T, "state");
+ lua_newtable(hlua->T);
+
+ lua_pushstring(hlua->T, "admin");
+ lua_pushboolean(hlua->T, state->safe.type);
+ lua_settable(hlua->T, -3);
+
+ /* is it because of a check ? */
+ if (!state->safe.type &&
+ (state->safe.op_st_chg.cause == SRV_OP_STCHGC_HEALTH ||
+ state->safe.op_st_chg.cause == SRV_OP_STCHGC_AGENT)) {
+ /* yes, provide check result */
+ lua_pushstring(hlua->T, "check");
+ lua_newtable(hlua->T);
+ hlua_event_hdl_cb_push_event_checkres(hlua->T, &state->safe.op_st_chg.check);
+ lua_settable(hlua->T, -3); /* check table */
+ }
+
+ lua_pushstring(hlua->T, "cause");
+ if (state->safe.type)
+ lua_pushstring(hlua->T, srv_adm_st_chg_cause(state->safe.adm_st_chg.cause));
+ else
+ lua_pushstring(hlua->T, srv_op_st_chg_cause(state->safe.op_st_chg.cause));
+ lua_settable(hlua->T, -3);
+
+ /* old_state, new_state */
+ for (it = 0; it < 2; it++) {
+ enum srv_state srv_state = (!it) ? state->safe.old_state : state->safe.new_state;
+
+ lua_pushstring(hlua->T, (!it) ? "old_state" : "new_state");
+ switch (srv_state) {
+ case SRV_ST_STOPPED:
+ lua_pushstring(hlua->T, "STOPPED");
+ break;
+ case SRV_ST_STOPPING:
+ lua_pushstring(hlua->T, "STOPPING");
+ break;
+ case SRV_ST_STARTING:
+ lua_pushstring(hlua->T, "STARTING");
+ break;
+ case SRV_ST_RUNNING:
+ lua_pushstring(hlua->T, "RUNNING");
+ break;
+ default:
+ lua_pushnil(hlua->T);
+ break;
+ }
+ lua_settable(hlua->T, -3);
+ }
+
+ /* requeued */
+ lua_pushstring(hlua->T, "requeued");
+ lua_pushinteger(hlua->T, state->safe.requeued);
+ lua_settable(hlua->T, -3);
+
+ lua_settable(hlua->T, -3); /* state table */
+ }
+ else if (event_hdl_sub_type_equal(EVENT_HDL_SUB_SERVER_ADMIN, event)) {
+ struct event_hdl_cb_data_server_admin *admin = data;
+ int it;
+
+ if (!lua_checkstack(hlua->T, 20))
+ WILL_LJMP(luaL_error(hlua->T, "Lua out of memory error."));
+
+ /* admin subclass */
+ lua_pushstring(hlua->T, "admin");
+ lua_newtable(hlua->T);
+
+ lua_pushstring(hlua->T, "cause");
+ lua_pushstring(hlua->T, srv_adm_st_chg_cause(admin->safe.cause));
+ lua_settable(hlua->T, -3);
+
+ /* old_admin, new_admin */
+ for (it = 0; it < 2; it++) {
+ enum srv_admin srv_admin = (!it) ? admin->safe.old_admin : admin->safe.new_admin;
+
+ lua_pushstring(hlua->T, (!it) ? "old_admin" : "new_admin");
+
+ /* admin state matrix */
+ lua_newtable(hlua->T);
+
+ lua_pushstring(hlua->T, "MAINT");
+ lua_pushboolean(hlua->T, srv_admin & SRV_ADMF_MAINT);
+ lua_settable(hlua->T, -3);
+ lua_pushstring(hlua->T, "FMAINT");
+ lua_pushboolean(hlua->T, srv_admin & SRV_ADMF_FMAINT);
+ lua_settable(hlua->T, -3);
+ lua_pushstring(hlua->T, "IMAINT");
+ lua_pushboolean(hlua->T, srv_admin & SRV_ADMF_IMAINT);
+ lua_settable(hlua->T, -3);
+ lua_pushstring(hlua->T, "RMAINT");
+ lua_pushboolean(hlua->T, srv_admin & SRV_ADMF_RMAINT);
+ lua_settable(hlua->T, -3);
+ lua_pushstring(hlua->T, "CMAINT");
+ lua_pushboolean(hlua->T, srv_admin & SRV_ADMF_CMAINT);
+ lua_settable(hlua->T, -3);
+
+ lua_pushstring(hlua->T, "DRAIN");
+ lua_pushboolean(hlua->T, srv_admin & SRV_ADMF_DRAIN);
+ lua_settable(hlua->T, -3);
+ lua_pushstring(hlua->T, "FDRAIN");
+ lua_pushboolean(hlua->T, srv_admin & SRV_ADMF_FDRAIN);
+ lua_settable(hlua->T, -3);
+ lua_pushstring(hlua->T, "IDRAIN");
+ lua_pushboolean(hlua->T, srv_admin & SRV_ADMF_IDRAIN);
+ lua_settable(hlua->T, -3);
+
+ lua_settable(hlua->T, -3); /* matrix table */
+ }
+ /* requeued */
+ lua_pushstring(hlua->T, "requeued");
+ lua_pushinteger(hlua->T, admin->safe.requeued);
+ lua_settable(hlua->T, -3);
+
+ lua_settable(hlua->T, -3); /* admin table */
+ }
+ else if (event_hdl_sub_type_equal(EVENT_HDL_SUB_SERVER_CHECK, event)) {
+ struct event_hdl_cb_data_server_check *check = data;
+
+ if (!lua_checkstack(hlua->T, 20))
+ WILL_LJMP(luaL_error(hlua->T, "Lua out of memory error."));
+
+ /* check subclass */
+ lua_pushstring(hlua->T, "check");
+ lua_newtable(hlua->T);
+
+ /* check result snapshot */
+ hlua_event_hdl_cb_push_event_checkres(hlua->T, &check->safe.res);
+
+ lua_settable(hlua->T, -3); /* check table */
+ }
+
+ /* attempt to provide reference server object
+ * (if it wasn't removed yet, SERVER_DEL will never succeed here)
+ */
+ px = proxy_find_by_id(e_server->safe.proxy_uuid, PR_CAP_BE, 0);
+ BUG_ON(!px);
+ server = findserver_unique_id(px, e_server->safe.puid, e_server->safe.rid);
+ if (server) {
+ lua_pushstring(hlua->T, "reference");
+ hlua_fcn_new_server(hlua->T, server);
+ lua_settable(hlua->T, -3);
+ }
+ }
+ /* sub mgmt */
+ hlua->nargs += 1;
+ hlua_fcn_new_event_sub(hlua->T, hlua_sub->sub);
+
+ /* when? */
+ hlua->nargs += 1;
+ lua_pushinteger(hlua->T, e->when.tv_sec);
+}
+
+/* events runner: if there's an ongoing hlua event handling process, finish it
+ * then, check if there are new events waiting to be processed
+ * (events are processed sequentially)
+ *
+ * We have a safety measure to warn/guard if the event queue is growing up
+ * too much due to many events being generated and lua handler is unable to
+ * keep up the pace (e.g.: when the event queue grows past 100 unconsumed events).
+ * TODO: make it tunable
+ */
+static struct task *hlua_event_runner(struct task *task, void *context, unsigned int state)
+{
+ struct hlua_event_sub *hlua_sub = context;
+ struct event_hdl_async_event *event;
+ const char *error = NULL;
+
+ if (!hlua_sub->paused && event_hdl_async_equeue_size(&hlua_sub->equeue) > 100) {
+ const char *trace = NULL;
+
+ /* We reached the limit of pending events in the queue: we should
+ * warn the user, and temporarily pause the subscription to give a chance
+ * to the handler to catch up? (it also prevents resource shortage since
+ * the queue could grow indefinitely otherwise)
+ * TODO: find a way to inform the handler that it missed some events
+ * (example: stats within the subscription in event_hdl api exposed via lua api?)
+ *
+ * Nonetheless, reaching this limit means that the handler is not fast enough
+ * and/or that it subscribed to events that happen too frequently and did not
+ * expect it. This could come from an inadequate design in the user's script.
+ */
+ event_hdl_pause(hlua_sub->sub);
+ hlua_sub->paused = 1;
+
+ if (SET_SAFE_LJMP(hlua_sub->hlua)) {
+ /* The following Lua call may fail. */
+ trace = hlua_traceback(hlua_sub->hlua->T, ", ");
+ /* At this point the execution is safe. */
+ RESET_SAFE_LJMP(hlua_sub->hlua);
+ } else {
+ /* Lua error was raised while fetching lua trace from current ctx */
+ SEND_ERR(NULL, "Lua event_hdl: unexpected error (memory failure?).\n");
+ }
+ ha_warning("Lua event_hdl: pausing the subscription because the handler fails "
+ "to keep up the pace (%u unconsumed events) from %s.\n",
+ event_hdl_async_equeue_size(&hlua_sub->equeue),
+ (trace) ? trace : "[unknown]");
+ }
+
+ if (HLUA_IS_RUNNING(hlua_sub->hlua)) {
+ /* ongoing hlua event handler, resume it */
+ hlua_event_handler(hlua_sub->hlua);
+ } else if ((event = event_hdl_async_equeue_pop(&hlua_sub->equeue))) { /* check for new events */
+ if (event_hdl_sub_type_equal(event->type, EVENT_HDL_SUB_END)) {
+ /* ending event: no more events to come */
+ event_hdl_async_free_event(event);
+ task_destroy(task);
+ hlua_event_subscription_destroy(hlua_sub);
+ return NULL;
+ }
+ /* new event: start processing it */
+
+ /* The following Lua calls can fail. */
+ if (!SET_SAFE_LJMP(hlua_sub->hlua)) {
+ if (lua_type(hlua_sub->hlua->T, -1) == LUA_TSTRING)
+ error = lua_tostring(hlua_sub->hlua->T, -1);
+ else
+ error = "critical error";
+ ha_alert("Lua event_hdl: %s.\n", error);
+ goto skip_event;
+ }
+
+ /* Check stack available size. */
+ if (!lua_checkstack(hlua_sub->hlua->T, 5)) {
+ ha_alert("Lua event_hdl: full stack.\n");
+ RESET_SAFE_LJMP(hlua_sub->hlua);
+ goto skip_event;
+ }
+
+ /* Restore the function in the stack. */
+ hlua_pushref(hlua_sub->hlua->T, hlua_sub->fcn_ref);
+
+ /* push args */
+ hlua_sub->hlua->nargs = 0;
+ MAY_LJMP(hlua_event_hdl_cb_push_args(hlua_sub, event));
+
+ /* At this point the execution is safe. */
+ RESET_SAFE_LJMP(hlua_sub->hlua);
+
+ /* At this point the event was successfully translated into hlua ctx,
+ * or hlua error occurred, so we can safely discard it
+ */
+ event_hdl_async_free_event(event);
+ event = NULL;
+
+ hlua_event_handler(hlua_sub->hlua);
+ skip_event:
+ if (event)
+ event_hdl_async_free_event(event);
+
+ }
+
+ if (!HLUA_IS_RUNNING(hlua_sub->hlua)) {
+ /* we just finished the processing of one event..
+ * check for new events before becoming idle
+ */
+ if (!event_hdl_async_equeue_isempty(&hlua_sub->equeue)) {
+ /* more events to process, make sure the task
+ * will be resumed ASAP to process pending events
+ */
+ task_wakeup(task, TASK_WOKEN_OTHER);
+ }
+ else if (hlua_sub->paused) {
+ /* empty queue, the handler caught up: resume the subscription */
+ event_hdl_resume(hlua_sub->sub);
+ hlua_sub->paused = 0;
+ }
+ }
+
+ return task;
+}
+
+/* Must be called directly under lua protected/safe environment
+ * (not from external callback)
+ * <fcn_ref> should NOT be dropped after the function successfully returns:
+ * it will be done automatically in hlua_event_subscription_destroy() when the
+ * subscription ends.
+ *
+ * Returns the new subscription on success and NULL on failure (memory error)
+ */
+static struct event_hdl_sub *hlua_event_subscribe(event_hdl_sub_list *list, struct event_hdl_sub_type e_type,
+ int state_id, int fcn_ref)
+{
+ struct hlua_event_sub *hlua_sub;
+ struct task *task = NULL;
+
+ hlua_sub = pool_alloc(pool_head_hlua_event_sub);
+ if (!hlua_sub)
+ goto mem_error;
+ hlua_sub->task = NULL;
+ hlua_sub->hlua = NULL;
+ hlua_sub->paused = 0;
+ if ((task = task_new_here()) == NULL) {
+ ha_alert("out of memory while allocating hlua event task");
+ goto mem_error;
+ }
+ task->process = hlua_event_runner;
+ task->context = hlua_sub;
+ event_hdl_async_equeue_init(&hlua_sub->equeue);
+ hlua_sub->task = task;
+ hlua_sub->fcn_ref = fcn_ref;
+ hlua_sub->state_id = state_id;
+ hlua_sub->hlua = pool_alloc(pool_head_hlua);
+ if (!hlua_sub->hlua)
+ goto mem_error;
+ HLUA_INIT(hlua_sub->hlua);
+ if (!hlua_ctx_init(hlua_sub->hlua, hlua_sub->state_id, task))
+ goto mem_error;
+
+ hlua_sub->sub = event_hdl_subscribe_ptr(list, e_type,
+ EVENT_HDL_ASYNC_TASK(&hlua_sub->equeue,
+ task,
+ hlua_sub,
+ NULL));
+ if (!hlua_sub->sub)
+ goto mem_error;
+
+ return hlua_sub->sub; /* returns pointer to event_hdl_sub struct */
+
+ mem_error:
+ if (hlua_sub) {
+ task_destroy(hlua_sub->task);
+ if (hlua_sub->hlua)
+ hlua_ctx_destroy(hlua_sub->hlua);
+ pool_free(pool_head_hlua_event_sub, hlua_sub);
+ }
+
+ return NULL;
+}
+
+/* looks for an array of strings referring to a composition of event_hdl subscription
+ * types at <index> in <L> stack
+ */
+__LJMP static struct event_hdl_sub_type hlua_check_event_sub_types(lua_State *L, int index)
+{
+ struct event_hdl_sub_type subscriptions;
+ const char *msg;
+
+ if (lua_type(L, index) != LUA_TTABLE) {
+ msg = lua_pushfstring(L, "table of strings expected, got %s", luaL_typename(L, index));
+ luaL_argerror(L, index, msg);
+ }
+
+ subscriptions = EVENT_HDL_SUB_NONE;
+
+ /* browse the argument as an array. */
+ lua_pushnil(L);
+ while (lua_next(L, index) != 0) {
+ if (lua_type(L, -1) != LUA_TSTRING) {
+ msg = lua_pushfstring(L, "table of strings expected, got %s", luaL_typename(L, index));
+ luaL_argerror(L, index, msg);
+ }
+
+ if (event_hdl_sub_type_equal(EVENT_HDL_SUB_NONE, event_hdl_string_to_sub_type(lua_tostring(L, -1)))) {
+ msg = lua_pushfstring(L, "'%s' event type is unknown", lua_tostring(L, -1));
+ luaL_argerror(L, index, msg);
+ }
+
+ /* perform subscriptions |= current sub */
+ subscriptions = event_hdl_sub_type_add(subscriptions, event_hdl_string_to_sub_type(lua_tostring(L, -1)));
+
+ /* pop the current value. */
+ lua_pop(L, 1);
+ }
+
+ return subscriptions;
+}
+
+/* Wrapper for hlua_fcn_new_event_sub(): catch errors raised by
+ * the function to prevent LJMP
+ *
+ * If no error occurred, the function returns 1, else it returns 0 and
+ * the error message is pushed at the top of the stack
+ */
+__LJMP static int _hlua_new_event_sub_safe(lua_State *L)
+{
+ struct event_hdl_sub *sub = lua_touserdata(L, 1);
+
+ /* this function may raise errors */
+ return MAY_LJMP(hlua_fcn_new_event_sub(L, sub));
+}
+static int hlua_new_event_sub_safe(lua_State *L, struct event_hdl_sub *sub)
+{
+ if (!lua_checkstack(L, 2))
+ return 0;
+ lua_pushcfunction(L, _hlua_new_event_sub_safe);
+ lua_pushlightuserdata(L, sub);
+ switch (lua_pcall(L, 1, 1, 0)) {
+ case LUA_OK:
+ return 1;
+ default:
+ /* error was caught */
+ return 0;
+ }
+}
+
+/* This function is a LUA helper used for registering lua event callbacks.
+ * It expects an event subscription array and the function to be executed
+ * when subscribed events occur (stack arguments).
+ * It can be called from the "init" section, "post init" or during the runtime.
+ *
+ * <sub_list> is the subscription list where the subscription will be attempted
+ *
+ * Pushes the newly allocated subscription on the stack on success
+ */
+__LJMP int hlua_event_sub(lua_State *L, event_hdl_sub_list *sub_list)
+{
+ struct hlua *hlua;
+ struct event_hdl_sub *sub;
+ struct event_hdl_sub_type subscriptions;
+ int fcn_ref;
+ int state_id;
+
+ MAY_LJMP(check_args(L, 2, "event_sub"));
+
+ /* Get the reference state */
+ hlua = hlua_gethlua(L);
+ if (hlua)
+ /* we are in runtime processing, any thread may subscribe to events:
+ * subscription events will be handled by the thread who performed
+ * the registration.
+ */
+ state_id = hlua->state_id;
+ else {
+ /* we are in initialization mode, only thread 0 (actual calling thread)
+ * may subscribe to events to prevent the same handler (from different lua
+ * stacks) from being registered multiple times
+ *
+ * hlua_state_id == 0: monostack (lua-load)
+ * hlua_state_id > 0: hlua_state_id=tid+1, multi-stack (lua-load-per-thread)
+ * (thus if hlua_state_id > 1, it means we are not in primary thread ctx)
+ */
+ if (hlua_state_id > 1)
+ return 0; /* skip registration */
+ state_id = hlua_state_id;
+ }
+
+ /* First argument : event subscriptions. */
+ subscriptions = MAY_LJMP(hlua_check_event_sub_types(L, 1));
+
+ if (event_hdl_sub_type_equal(subscriptions, EVENT_HDL_SUB_NONE)) {
+ WILL_LJMP(luaL_error(L, "event_sub: no valid event types were provided"));
+ return 0; /* Never reached */
+ }
+
+ /* Second argument : lua function. */
+ fcn_ref = MAY_LJMP(hlua_checkfunction(L, 2));
+
+ /* try to subscribe */
+ sub = hlua_event_subscribe(sub_list, subscriptions, state_id, fcn_ref);
+ if (!sub) {
+ hlua_unref(L, fcn_ref);
+ WILL_LJMP(luaL_error(L, "event_sub: lua out of memory error"));
+ return 0; /* Never reached */
+ }
+
+ /* push the subscription to the stack
+ *
+ * Here we use the safe function so that lua errors will be
+ * handled explicitly to prevent 'sub' from being lost
+ */
+ if (!hlua_new_event_sub_safe(L, sub)) {
+ /* Some events could already be pending in the handler's queue.
+ * However it is wiser to cancel the subscription since we are unable to
+ * provide a valid reference to it.
+ * Pending events will be delivered (unless lua keeps raising errors).
+ */
+ event_hdl_unsubscribe(sub); /* cancel the subscription */
+ WILL_LJMP(luaL_error(L, "event_sub: cannot push the subscription (%s)", lua_tostring(L, -1)));
+ return 0; /* Never reached */
+ }
+ event_hdl_drop(sub); /* sub has been duplicated, discard old ref */
+
+ return 1;
+}
+
+/* This function is a LUA wrapper used for registering global lua event callbacks
+ * The new subscription is pushed onto the stack on success
+ * Returns the number of arguments pushed to the stack (1 for success)
+ */
+__LJMP static int hlua_event_global_sub(lua_State *L)
+{
+ /* NULL <sub_list> = global subscription list */
+ return MAY_LJMP(hlua_event_sub(L, NULL));
+}
+
+/* Wrapper called by HAProxy to execute an LUA converter. This wrapper
+ * doesn't allow "yield" functions because the HAProxy engine cannot
+ * resume converters.
+ */
+static int hlua_sample_conv_wrapper(const struct arg *arg_p, struct sample *smp, void *private)
+{
+ struct hlua_function *fcn = private;
+ struct stream *stream = smp->strm;
+ const char *error;
+
+ if (!stream)
+ return 0;
+
+ if (!hlua_stream_ctx_prepare(stream, fcn_ref_to_stack_id(fcn))) {
+ SEND_ERR(stream->be, "Lua converter '%s': can't initialize Lua context.\n", fcn->name);
+ return 0;
+ }
+
+ /* If it is the first run, initialize the data for the call. */
+ if (!HLUA_IS_RUNNING(stream->hlua)) {
+
+ /* The following Lua calls can fail. */
+ if (!SET_SAFE_LJMP(stream->hlua)) {
+ if (lua_type(stream->hlua->T, -1) == LUA_TSTRING)
+ error = lua_tostring(stream->hlua->T, -1);
+ else
+ error = "critical error";
+ SEND_ERR(stream->be, "Lua converter '%s': %s.\n", fcn->name, error);
+ return 0;
+ }
+
+ /* Check stack available size. */
+ if (!lua_checkstack(stream->hlua->T, 1)) {
+ SEND_ERR(stream->be, "Lua converter '%s': full stack.\n", fcn->name);
+ RESET_SAFE_LJMP(stream->hlua);
+ return 0;
+ }
+
+ /* Restore the function in the stack. */
+ hlua_pushref(stream->hlua->T, fcn->function_ref[stream->hlua->state_id]);
+
+ /* convert input sample and pust-it in the stack. */
+ if (!lua_checkstack(stream->hlua->T, 1)) {
+ SEND_ERR(stream->be, "Lua converter '%s': full stack.\n", fcn->name);
+ RESET_SAFE_LJMP(stream->hlua);
+ return 0;
+ }
+ MAY_LJMP(hlua_smp2lua(stream->hlua->T, smp));
+ stream->hlua->nargs = 1;
+
+ /* push keywords in the stack. */
+ if (arg_p) {
+ for (; arg_p->type != ARGT_STOP; arg_p++) {
+ if (!lua_checkstack(stream->hlua->T, 1)) {
+ SEND_ERR(stream->be, "Lua converter '%s': full stack.\n", fcn->name);
+ RESET_SAFE_LJMP(stream->hlua);
+ return 0;
+ }
+ MAY_LJMP(hlua_arg2lua(stream->hlua->T, arg_p));
+ stream->hlua->nargs++;
+ }
+ }
+
+ /* We must initialize the execution timeouts. */
+ hlua_timer_init(&stream->hlua->timer, hlua_timeout_session);
+
+ /* At this point the execution is safe. */
+ RESET_SAFE_LJMP(stream->hlua);
+ }
+
+ /* Execute the function. */
+ switch (hlua_ctx_resume(stream->hlua, 0)) {
+ /* finished. */
+ case HLUA_E_OK:
+ /* If the stack is empty, the function fails. */
+ if (lua_gettop(stream->hlua->T) <= 0)
+ return 0;
+
+ /* Convert the returned value in sample. */
+ hlua_lua2smp(stream->hlua->T, -1, smp);
+ /* dup the smp before popping the related lua value and
+ * returning it to haproxy
+ */
+ smp_dup(smp);
+ lua_pop(stream->hlua->T, 1);
+ return 1;
+
+ /* yield. */
+ case HLUA_E_AGAIN:
+ SEND_ERR(stream->be, "Lua converter '%s': cannot use yielded functions.\n", fcn->name);
+ return 0;
+
+ /* finished with error. */
+ case HLUA_E_ERRMSG:
+ /* Display log. */
+ SEND_ERR(stream->be, "Lua converter '%s': %s.\n",
+ fcn->name, lua_tostring(stream->hlua->T, -1));
+ lua_pop(stream->hlua->T, 1);
+ return 0;
+
+ case HLUA_E_ETMOUT:
+ SEND_ERR(stream->be, "Lua converter '%s': execution timeout.\n", fcn->name);
+ return 0;
+
+ case HLUA_E_NOMEM:
+ SEND_ERR(stream->be, "Lua converter '%s': out of memory error.\n", fcn->name);
+ return 0;
+
+ case HLUA_E_YIELD:
+ SEND_ERR(stream->be, "Lua converter '%s': yield functions like core.tcp() or core.sleep() are not allowed.\n", fcn->name);
+ return 0;
+
+ case HLUA_E_ERR:
+ /* Display log. */
+ SEND_ERR(stream->be, "Lua converter '%s' returns an unknown error.\n", fcn->name);
+ __fallthrough;
+
+ default:
+ return 0;
+ }
+}
+
+/* Wrapper called by HAProxy to execute a sample-fetch. this wrapper
+ * doesn't allow "yield" functions because the HAProxy engine cannot
+ * resume sample-fetches. This function will be called by the sample
+ * fetch engine to call lua-based fetch operations.
+ */
+static int hlua_sample_fetch_wrapper(const struct arg *arg_p, struct sample *smp,
+ const char *kw, void *private)
+{
+ struct hlua_function *fcn = private;
+ struct stream *stream = smp->strm;
+ const char *error;
+ unsigned int hflags = HLUA_TXN_NOTERM | HLUA_TXN_SMP_CTX;
+
+ if (!stream)
+ return 0;
+
+ if (!hlua_stream_ctx_prepare(stream, fcn_ref_to_stack_id(fcn))) {
+ SEND_ERR(stream->be, "Lua sample-fetch '%s': can't initialize Lua context.\n", fcn->name);
+ return 0;
+ }
+
+ /* If it is the first run, initialize the data for the call. */
+ if (!HLUA_IS_RUNNING(stream->hlua)) {
+
+ /* The following Lua calls can fail. */
+ if (!SET_SAFE_LJMP(stream->hlua)) {
+ if (lua_type(stream->hlua->T, -1) == LUA_TSTRING)
+ error = lua_tostring(stream->hlua->T, -1);
+ else
+ error = "critical error";
+ SEND_ERR(smp->px, "Lua sample-fetch '%s': %s.\n", fcn->name, error);
+ return 0;
+ }
+
+ /* Check stack available size. */
+ if (!lua_checkstack(stream->hlua->T, 2)) {
+ SEND_ERR(smp->px, "Lua sample-fetch '%s': full stack.\n", fcn->name);
+ RESET_SAFE_LJMP(stream->hlua);
+ return 0;
+ }
+
+ /* Restore the function in the stack. */
+ hlua_pushref(stream->hlua->T, fcn->function_ref[stream->hlua->state_id]);
+
+ /* push arguments in the stack. */
+ if (!hlua_txn_new(stream->hlua->T, stream, smp->px, smp->opt & SMP_OPT_DIR, hflags)) {
+ SEND_ERR(smp->px, "Lua sample-fetch '%s': full stack.\n", fcn->name);
+ RESET_SAFE_LJMP(stream->hlua);
+ return 0;
+ }
+ stream->hlua->nargs = 1;
+
+ /* push keywords in the stack. */
+ for (; arg_p && arg_p->type != ARGT_STOP; arg_p++) {
+ /* Check stack available size. */
+ if (!lua_checkstack(stream->hlua->T, 1)) {
+ SEND_ERR(smp->px, "Lua sample-fetch '%s': full stack.\n", fcn->name);
+ RESET_SAFE_LJMP(stream->hlua);
+ return 0;
+ }
+ MAY_LJMP(hlua_arg2lua(stream->hlua->T, arg_p));
+ stream->hlua->nargs++;
+ }
+
+ /* We must initialize the execution timeouts. */
+ hlua_timer_init(&stream->hlua->timer, hlua_timeout_session);
+
+ /* At this point the execution is safe. */
+ RESET_SAFE_LJMP(stream->hlua);
+ }
+
+ /* Execute the function. */
+ switch (hlua_ctx_resume(stream->hlua, 0)) {
+ /* finished. */
+ case HLUA_E_OK:
+ /* If the stack is empty, the function fails. */
+ if (lua_gettop(stream->hlua->T) <= 0)
+ return 0;
+
+ /* Convert the returned value in sample. */
+ hlua_lua2smp(stream->hlua->T, -1, smp);
+ /* dup the smp before popping the related lua value and
+ * returning it to haproxy
+ */
+ smp_dup(smp);
+ lua_pop(stream->hlua->T, 1);
+
+ /* Set the end of execution flag. */
+ smp->flags &= ~SMP_F_MAY_CHANGE;
+ return 1;
+
+ /* yield. */
+ case HLUA_E_AGAIN:
+ SEND_ERR(smp->px, "Lua sample-fetch '%s': cannot use yielded functions.\n", fcn->name);
+ return 0;
+
+ /* finished with error. */
+ case HLUA_E_ERRMSG:
+ /* Display log. */
+ SEND_ERR(smp->px, "Lua sample-fetch '%s': %s.\n",
+ fcn->name, lua_tostring(stream->hlua->T, -1));
+ lua_pop(stream->hlua->T, 1);
+ return 0;
+
+ case HLUA_E_ETMOUT:
+ SEND_ERR(smp->px, "Lua sample-fetch '%s': execution timeout.\n", fcn->name);
+ return 0;
+
+ case HLUA_E_NOMEM:
+ SEND_ERR(smp->px, "Lua sample-fetch '%s': out of memory error.\n", fcn->name);
+ return 0;
+
+ case HLUA_E_YIELD:
+ SEND_ERR(smp->px, "Lua sample-fetch '%s': yield not allowed.\n", fcn->name);
+ return 0;
+
+ case HLUA_E_ERR:
+ /* Display log. */
+ SEND_ERR(smp->px, "Lua sample-fetch '%s' returns an unknown error.\n", fcn->name);
+ __fallthrough;
+
+ default:
+ return 0;
+ }
+}
+
+/* This function is an LUA binding used for registering
+ * "sample-conv" functions. It expects a converter name used
+ * in the haproxy configuration file, and an LUA function.
+ */
+__LJMP static int hlua_register_converters(lua_State *L)
+{
+ struct sample_conv_kw_list *sck;
+ const char *name;
+ int ref;
+ int len;
+ struct hlua_function *fcn = NULL;
+ struct sample_conv *sc;
+ struct buffer *trash;
+
+ MAY_LJMP(check_args(L, 2, "register_converters"));
+
+ if (hlua_gethlua(L)) {
+ /* runtime processing */
+ WILL_LJMP(luaL_error(L, "register_converters: not available outside of body context"));
+ }
+
+ /* First argument : converter name. */
+ name = MAY_LJMP(luaL_checkstring(L, 1));
+
+ /* Second argument : lua function. */
+ ref = MAY_LJMP(hlua_checkfunction(L, 2));
+
+ /* Check if the converter is already registered */
+ trash = get_trash_chunk();
+ chunk_printf(trash, "lua.%s", name);
+ sc = find_sample_conv(trash->area, trash->data);
+ if (sc != NULL) {
+ fcn = sc->private;
+ if (fcn->function_ref[hlua_state_id] != -1) {
+ ha_warning("Trying to register converter 'lua.%s' more than once. "
+ "This will become a hard error in version 2.5.\n", name);
+ hlua_unref(L, fcn->function_ref[hlua_state_id]);
+ }
+ fcn->function_ref[hlua_state_id] = ref;
+ return 0;
+ }
+
+ /* Allocate and fill the sample fetch keyword struct. */
+ sck = calloc(1, sizeof(*sck) + sizeof(struct sample_conv) * 2);
+ if (!sck)
+ goto alloc_error;
+ fcn = new_hlua_function();
+ if (!fcn)
+ goto alloc_error;
+
+ /* Fill fcn. */
+ fcn->name = strdup(name);
+ if (!fcn->name)
+ goto alloc_error;
+ fcn->function_ref[hlua_state_id] = ref;
+
+ /* List head */
+ sck->list.n = sck->list.p = NULL;
+
+ /* converter keyword. */
+ len = strlen("lua.") + strlen(name) + 1;
+ sck->kw[0].kw = calloc(1, len);
+ if (!sck->kw[0].kw)
+ goto alloc_error;
+
+ snprintf((char *)sck->kw[0].kw, len, "lua.%s", name);
+ sck->kw[0].process = hlua_sample_conv_wrapper;
+ sck->kw[0].arg_mask = ARG12(0,STR,STR,STR,STR,STR,STR,STR,STR,STR,STR,STR,STR);
+ sck->kw[0].val_args = NULL;
+ sck->kw[0].in_type = SMP_T_STR;
+ sck->kw[0].out_type = SMP_T_STR;
+ sck->kw[0].private = fcn;
+
+ /* Register this new converter */
+ sample_register_convs(sck);
+
+ return 0;
+
+ alloc_error:
+ release_hlua_function(fcn);
+ hlua_unref(L, ref);
+ ha_free(&sck);
+ WILL_LJMP(luaL_error(L, "Lua out of memory error."));
+ return 0; /* Never reached */
+}
+
+/* This function is an LUA binding used for registering
+ * "sample-fetch" functions. It expects a converter name used
+ * in the haproxy configuration file, and an LUA function.
+ */
+__LJMP static int hlua_register_fetches(lua_State *L)
+{
+ const char *name;
+ int ref;
+ int len;
+ struct sample_fetch_kw_list *sfk;
+ struct hlua_function *fcn = NULL;
+ struct sample_fetch *sf;
+ struct buffer *trash;
+
+ MAY_LJMP(check_args(L, 2, "register_fetches"));
+
+ if (hlua_gethlua(L)) {
+ /* runtime processing */
+ WILL_LJMP(luaL_error(L, "register_fetches: not available outside of body context"));
+ }
+
+ /* First argument : sample-fetch name. */
+ name = MAY_LJMP(luaL_checkstring(L, 1));
+
+ /* Second argument : lua function. */
+ ref = MAY_LJMP(hlua_checkfunction(L, 2));
+
+ /* Check if the sample-fetch is already registered */
+ trash = get_trash_chunk();
+ chunk_printf(trash, "lua.%s", name);
+ sf = find_sample_fetch(trash->area, trash->data);
+ if (sf != NULL) {
+ fcn = sf->private;
+ if (fcn->function_ref[hlua_state_id] != -1) {
+ ha_warning("Trying to register sample-fetch 'lua.%s' more than once. "
+ "This will become a hard error in version 2.5.\n", name);
+ hlua_unref(L, fcn->function_ref[hlua_state_id]);
+ }
+ fcn->function_ref[hlua_state_id] = ref;
+ return 0;
+ }
+
+ /* Allocate and fill the sample fetch keyword struct. */
+ sfk = calloc(1, sizeof(*sfk) + sizeof(struct sample_fetch) * 2);
+ if (!sfk)
+ goto alloc_error;
+ fcn = new_hlua_function();
+ if (!fcn)
+ goto alloc_error;
+
+ /* Fill fcn. */
+ fcn->name = strdup(name);
+ if (!fcn->name)
+ goto alloc_error;
+ fcn->function_ref[hlua_state_id] = ref;
+
+ /* List head */
+ sfk->list.n = sfk->list.p = NULL;
+
+ /* sample-fetch keyword. */
+ len = strlen("lua.") + strlen(name) + 1;
+ sfk->kw[0].kw = calloc(1, len);
+ if (!sfk->kw[0].kw)
+ goto alloc_error;
+
+ snprintf((char *)sfk->kw[0].kw, len, "lua.%s", name);
+ sfk->kw[0].process = hlua_sample_fetch_wrapper;
+ sfk->kw[0].arg_mask = ARG12(0,STR,STR,STR,STR,STR,STR,STR,STR,STR,STR,STR,STR);
+ sfk->kw[0].val_args = NULL;
+ sfk->kw[0].out_type = SMP_T_STR;
+ sfk->kw[0].use = SMP_USE_HTTP_ANY;
+ sfk->kw[0].val = 0;
+ sfk->kw[0].private = fcn;
+
+ /* Register this new fetch. */
+ sample_register_fetches(sfk);
+
+ return 0;
+
+ alloc_error:
+ release_hlua_function(fcn);
+ hlua_unref(L, ref);
+ ha_free(&sfk);
+ WILL_LJMP(luaL_error(L, "Lua out of memory error."));
+ return 0; /* Never reached */
+}
+
+/* This function is a lua binding to set the wake_time.
+ */
+__LJMP static int hlua_set_wake_time(lua_State *L)
+{
+ struct hlua *hlua;
+ unsigned int delay;
+ int wakeup_ms; // tick value
+
+ /* Get hlua struct, or NULL if we execute from main lua state */
+ hlua = hlua_gethlua(L);
+ if (!hlua) {
+ return 0;
+ }
+
+ MAY_LJMP(check_args(L, 1, "wake_time"));
+
+ delay = MAY_LJMP(luaL_checkinteger(L, 1));
+ wakeup_ms = tick_add(now_ms, delay);
+ hlua->wake_time = wakeup_ms;
+ return 0;
+}
+
+/* This function is a wrapper to execute each LUA function declared as an action
+ * wrapper during the initialisation period. This function may return any
+ * ACT_RET_* value. On error ACT_RET_CONT is returned and the action is
+ * ignored. If the lua action yields, ACT_RET_YIELD is returned. On success, the
+ * return value is the first element on the stack.
+ */
+static enum act_return hlua_action(struct act_rule *rule, struct proxy *px,
+ struct session *sess, struct stream *s, int flags)
+{
+ char **arg;
+ unsigned int hflags = HLUA_TXN_ACT_CTX;
+ int dir, act_ret = ACT_RET_CONT;
+ const char *error;
+
+ switch (rule->from) {
+ case ACT_F_TCP_REQ_CNT: dir = SMP_OPT_DIR_REQ; break;
+ case ACT_F_TCP_RES_CNT: dir = SMP_OPT_DIR_RES; break;
+ case ACT_F_HTTP_REQ: dir = SMP_OPT_DIR_REQ; break;
+ case ACT_F_HTTP_RES: dir = SMP_OPT_DIR_RES; break;
+ default:
+ SEND_ERR(px, "Lua: internal error while execute action.\n");
+ goto end;
+ }
+
+ if (!hlua_stream_ctx_prepare(s, fcn_ref_to_stack_id(rule->arg.hlua_rule->fcn))) {
+ SEND_ERR(px, "Lua action '%s': can't initialize Lua context.\n",
+ rule->arg.hlua_rule->fcn->name);
+ goto end;
+ }
+
+ /* If it is the first run, initialize the data for the call. */
+ if (!HLUA_IS_RUNNING(s->hlua)) {
+
+ /* The following Lua calls can fail. */
+ if (!SET_SAFE_LJMP(s->hlua)) {
+ if (lua_type(s->hlua->T, -1) == LUA_TSTRING)
+ error = lua_tostring(s->hlua->T, -1);
+ else
+ error = "critical error";
+ SEND_ERR(px, "Lua function '%s': %s.\n",
+ rule->arg.hlua_rule->fcn->name, error);
+ goto end;
+ }
+
+ /* Check stack available size. */
+ if (!lua_checkstack(s->hlua->T, 1)) {
+ SEND_ERR(px, "Lua function '%s': full stack.\n",
+ rule->arg.hlua_rule->fcn->name);
+ RESET_SAFE_LJMP(s->hlua);
+ goto end;
+ }
+
+ /* Restore the function in the stack. */
+ hlua_pushref(s->hlua->T, rule->arg.hlua_rule->fcn->function_ref[s->hlua->state_id]);
+
+ /* Create and and push object stream in the stack. */
+ if (!hlua_txn_new(s->hlua->T, s, px, dir, hflags)) {
+ SEND_ERR(px, "Lua function '%s': full stack.\n",
+ rule->arg.hlua_rule->fcn->name);
+ RESET_SAFE_LJMP(s->hlua);
+ goto end;
+ }
+ s->hlua->nargs = 1;
+
+ /* push keywords in the stack. */
+ for (arg = rule->arg.hlua_rule->args; arg && *arg; arg++) {
+ if (!lua_checkstack(s->hlua->T, 1)) {
+ SEND_ERR(px, "Lua function '%s': full stack.\n",
+ rule->arg.hlua_rule->fcn->name);
+ RESET_SAFE_LJMP(s->hlua);
+ goto end;
+ }
+ lua_pushstring(s->hlua->T, *arg);
+ s->hlua->nargs++;
+ }
+
+ /* Now the execution is safe. */
+ RESET_SAFE_LJMP(s->hlua);
+
+ /* We must initialize the execution timeouts. */
+ hlua_timer_init(&s->hlua->timer, hlua_timeout_session);
+ }
+
+ /* Execute the function. */
+ switch (hlua_ctx_resume(s->hlua, !(flags & ACT_OPT_FINAL))) {
+ /* finished. */
+ case HLUA_E_OK:
+ /* Catch the return value */
+ if (lua_gettop(s->hlua->T) > 0)
+ act_ret = lua_tointeger(s->hlua->T, -1);
+
+ /* Set timeout in the required channel. */
+ if (act_ret == ACT_RET_YIELD) {
+ if (flags & ACT_OPT_FINAL)
+ goto err_yield;
+
+ if (dir == SMP_OPT_DIR_REQ)
+ s->req.analyse_exp = tick_first((tick_is_expired(s->req.analyse_exp, now_ms) ? 0 : s->req.analyse_exp),
+ s->hlua->wake_time);
+ else
+ s->res.analyse_exp = tick_first((tick_is_expired(s->res.analyse_exp, now_ms) ? 0 : s->res.analyse_exp),
+ s->hlua->wake_time);
+ }
+ goto end;
+
+ /* yield. */
+ case HLUA_E_AGAIN:
+ /* Set timeout in the required channel. */
+ if (dir == SMP_OPT_DIR_REQ)
+ s->req.analyse_exp = tick_first((tick_is_expired(s->req.analyse_exp, now_ms) ? 0 : s->req.analyse_exp),
+ s->hlua->wake_time);
+ else
+ s->res.analyse_exp = tick_first((tick_is_expired(s->res.analyse_exp, now_ms) ? 0 : s->res.analyse_exp),
+ s->hlua->wake_time);
+
+ /* Some actions can be wake up when a "write" event
+ * is detected on a response channel. This is useful
+ * only for actions targeted on the requests.
+ */
+ if (HLUA_IS_WAKERESWR(s->hlua))
+ s->res.flags |= CF_WAKE_WRITE;
+ if (HLUA_IS_WAKEREQWR(s->hlua))
+ s->req.flags |= CF_WAKE_WRITE;
+ act_ret = ACT_RET_YIELD;
+ goto end;
+
+ /* finished with error. */
+ case HLUA_E_ERRMSG:
+ /* Display log. */
+ SEND_ERR(px, "Lua function '%s': %s.\n",
+ rule->arg.hlua_rule->fcn->name, lua_tostring(s->hlua->T, -1));
+ lua_pop(s->hlua->T, 1);
+ goto end;
+
+ case HLUA_E_ETMOUT:
+ SEND_ERR(px, "Lua function '%s': execution timeout.\n", rule->arg.hlua_rule->fcn->name);
+ goto end;
+
+ case HLUA_E_NOMEM:
+ SEND_ERR(px, "Lua function '%s': out of memory error.\n", rule->arg.hlua_rule->fcn->name);
+ goto end;
+
+ case HLUA_E_YIELD:
+ err_yield:
+ act_ret = ACT_RET_CONT;
+ SEND_ERR(px, "Lua function '%s': yield not allowed.\n",
+ rule->arg.hlua_rule->fcn->name);
+ goto end;
+
+ case HLUA_E_ERR:
+ /* Display log. */
+ SEND_ERR(px, "Lua function '%s' return an unknown error.\n",
+ rule->arg.hlua_rule->fcn->name);
+
+ default:
+ goto end;
+ }
+
+ end:
+ if (act_ret != ACT_RET_YIELD && s->hlua)
+ s->hlua->wake_time = TICK_ETERNITY;
+ return act_ret;
+}
+
+struct task *hlua_applet_wakeup(struct task *t, void *context, unsigned int state)
+{
+ struct appctx *ctx = context;
+
+ appctx_wakeup(ctx);
+ t->expire = TICK_ETERNITY;
+ return t;
+}
+
+static int hlua_applet_tcp_init(struct appctx *ctx)
+{
+ struct hlua_tcp_ctx *tcp_ctx = applet_reserve_svcctx(ctx, sizeof(*tcp_ctx));
+ struct stconn *sc = appctx_sc(ctx);
+ struct stream *strm = __sc_strm(sc);
+ struct hlua *hlua;
+ struct task *task;
+ char **arg;
+ const char *error;
+
+ hlua = pool_alloc(pool_head_hlua);
+ if (!hlua) {
+ SEND_ERR(strm->be, "Lua applet tcp '%s': out of memory.\n",
+ ctx->rule->arg.hlua_rule->fcn->name);
+ return -1;
+ }
+ HLUA_INIT(hlua);
+ tcp_ctx->hlua = hlua;
+ tcp_ctx->flags = 0;
+
+ /* Create task used by signal to wakeup applets. */
+ task = task_new_here();
+ if (!task) {
+ SEND_ERR(strm->be, "Lua applet tcp '%s': out of memory.\n",
+ ctx->rule->arg.hlua_rule->fcn->name);
+ return -1;
+ }
+ task->nice = 0;
+ task->context = ctx;
+ task->process = hlua_applet_wakeup;
+ tcp_ctx->task = task;
+
+ /* In the execution wrappers linked with a stream, the
+ * Lua context can be not initialized. This behavior
+ * permits to save performances because a systematic
+ * Lua initialization cause 5% performances loss.
+ */
+ if (!hlua_ctx_init(hlua, fcn_ref_to_stack_id(ctx->rule->arg.hlua_rule->fcn), task)) {
+ SEND_ERR(strm->be, "Lua applet tcp '%s': can't initialize Lua context.\n",
+ ctx->rule->arg.hlua_rule->fcn->name);
+ return -1;
+ }
+
+ /* Set timeout according with the applet configuration. */
+ hlua_timer_init(&hlua->timer, ctx->applet->timeout);
+
+ /* The following Lua calls can fail. */
+ if (!SET_SAFE_LJMP(hlua)) {
+ if (lua_type(hlua->T, -1) == LUA_TSTRING)
+ error = lua_tostring(hlua->T, -1);
+ else
+ error = "critical error";
+ SEND_ERR(strm->be, "Lua applet tcp '%s': %s.\n",
+ ctx->rule->arg.hlua_rule->fcn->name, error);
+ return -1;
+ }
+
+ /* Check stack available size. */
+ if (!lua_checkstack(hlua->T, 1)) {
+ SEND_ERR(strm->be, "Lua applet tcp '%s': full stack.\n",
+ ctx->rule->arg.hlua_rule->fcn->name);
+ RESET_SAFE_LJMP(hlua);
+ return -1;
+ }
+
+ /* Restore the function in the stack. */
+ hlua_pushref(hlua->T, ctx->rule->arg.hlua_rule->fcn->function_ref[hlua->state_id]);
+
+ /* Create and and push object stream in the stack. */
+ if (!hlua_applet_tcp_new(hlua->T, ctx)) {
+ SEND_ERR(strm->be, "Lua applet tcp '%s': full stack.\n",
+ ctx->rule->arg.hlua_rule->fcn->name);
+ RESET_SAFE_LJMP(hlua);
+ return -1;
+ }
+ hlua->nargs = 1;
+
+ /* push keywords in the stack. */
+ for (arg = ctx->rule->arg.hlua_rule->args; arg && *arg; arg++) {
+ if (!lua_checkstack(hlua->T, 1)) {
+ SEND_ERR(strm->be, "Lua applet tcp '%s': full stack.\n",
+ ctx->rule->arg.hlua_rule->fcn->name);
+ RESET_SAFE_LJMP(hlua);
+ return -1;
+ }
+ lua_pushstring(hlua->T, *arg);
+ hlua->nargs++;
+ }
+
+ RESET_SAFE_LJMP(hlua);
+
+ /* Wakeup the applet ASAP. */
+ applet_need_more_data(ctx);
+ applet_have_more_data(ctx);
+
+ return 0;
+}
+
+void hlua_applet_tcp_fct(struct appctx *ctx)
+{
+ struct hlua_tcp_ctx *tcp_ctx = ctx->svcctx;
+ struct stconn *sc = appctx_sc(ctx);
+ struct stream *strm = __sc_strm(sc);
+ struct act_rule *rule = ctx->rule;
+ struct proxy *px = strm->be;
+ struct hlua *hlua = tcp_ctx->hlua;
+
+ if (unlikely(se_fl_test(ctx->sedesc, (SE_FL_EOS|SE_FL_ERROR|SE_FL_SHR|SE_FL_SHW))))
+ goto out;
+
+ /* The applet execution is already done. */
+ if (tcp_ctx->flags & APPLET_DONE)
+ goto out;
+
+ /* Execute the function. */
+ switch (hlua_ctx_resume(hlua, 1)) {
+ /* finished. */
+ case HLUA_E_OK:
+ tcp_ctx->flags |= APPLET_DONE;
+ se_fl_set(ctx->sedesc, SE_FL_EOI|SE_FL_EOS);
+ break;
+
+ /* yield. */
+ case HLUA_E_AGAIN:
+ if (hlua->wake_time != TICK_ETERNITY)
+ task_schedule(tcp_ctx->task, hlua->wake_time);
+ break;
+
+ /* finished with error. */
+ case HLUA_E_ERRMSG:
+ /* Display log. */
+ SEND_ERR(px, "Lua applet tcp '%s': %s.\n",
+ rule->arg.hlua_rule->fcn->name, lua_tostring(hlua->T, -1));
+ lua_pop(hlua->T, 1);
+ goto error;
+
+ case HLUA_E_ETMOUT:
+ SEND_ERR(px, "Lua applet tcp '%s': execution timeout.\n",
+ rule->arg.hlua_rule->fcn->name);
+ goto error;
+
+ case HLUA_E_NOMEM:
+ SEND_ERR(px, "Lua applet tcp '%s': out of memory error.\n",
+ rule->arg.hlua_rule->fcn->name);
+ goto error;
+
+ case HLUA_E_YIELD: /* unexpected */
+ SEND_ERR(px, "Lua applet tcp '%s': yield not allowed.\n",
+ rule->arg.hlua_rule->fcn->name);
+ goto error;
+
+ case HLUA_E_ERR:
+ /* Display log. */
+ SEND_ERR(px, "Lua applet tcp '%s' return an unknown error.\n",
+ rule->arg.hlua_rule->fcn->name);
+ goto error;
+
+ default:
+ goto error;
+ }
+
+out:
+ /* eat the whole request */
+ co_skip(sc_oc(sc), co_data(sc_oc(sc)));
+ return;
+
+error:
+ se_fl_set(ctx->sedesc, SE_FL_ERROR);
+ tcp_ctx->flags |= APPLET_DONE;
+ goto out;
+}
+
+static void hlua_applet_tcp_release(struct appctx *ctx)
+{
+ struct hlua_tcp_ctx *tcp_ctx = ctx->svcctx;
+
+ task_destroy(tcp_ctx->task);
+ tcp_ctx->task = NULL;
+ hlua_ctx_destroy(tcp_ctx->hlua);
+ tcp_ctx->hlua = NULL;
+}
+
+/* The function returns 0 if the initialisation is complete or -1 if
+ * an errors occurs. It also reserves the appctx for an hlua_http_ctx.
+ */
+static int hlua_applet_http_init(struct appctx *ctx)
+{
+ struct hlua_http_ctx *http_ctx = applet_reserve_svcctx(ctx, sizeof(*http_ctx));
+ struct stconn *sc = appctx_sc(ctx);
+ struct stream *strm = __sc_strm(sc);
+ struct http_txn *txn;
+ struct hlua *hlua;
+ char **arg;
+ struct task *task;
+ const char *error;
+
+ txn = strm->txn;
+ hlua = pool_alloc(pool_head_hlua);
+ if (!hlua) {
+ SEND_ERR(strm->be, "Lua applet http '%s': out of memory.\n",
+ ctx->rule->arg.hlua_rule->fcn->name);
+ return -1;
+ }
+ HLUA_INIT(hlua);
+ http_ctx->hlua = hlua;
+ http_ctx->left_bytes = -1;
+ http_ctx->flags = 0;
+
+ if (txn->req.flags & HTTP_MSGF_VER_11)
+ http_ctx->flags |= APPLET_HTTP11;
+
+ /* Create task used by signal to wakeup applets. */
+ task = task_new_here();
+ if (!task) {
+ SEND_ERR(strm->be, "Lua applet http '%s': out of memory.\n",
+ ctx->rule->arg.hlua_rule->fcn->name);
+ return -1;
+ }
+ task->nice = 0;
+ task->context = ctx;
+ task->process = hlua_applet_wakeup;
+ http_ctx->task = task;
+
+ /* In the execution wrappers linked with a stream, the
+ * Lua context can be not initialized. This behavior
+ * permits to save performances because a systematic
+ * Lua initialization cause 5% performances loss.
+ */
+ if (!hlua_ctx_init(hlua, fcn_ref_to_stack_id(ctx->rule->arg.hlua_rule->fcn), task)) {
+ SEND_ERR(strm->be, "Lua applet http '%s': can't initialize Lua context.\n",
+ ctx->rule->arg.hlua_rule->fcn->name);
+ return -1;
+ }
+
+ /* Set timeout according with the applet configuration. */
+ hlua_timer_init(&hlua->timer, ctx->applet->timeout);
+
+ /* The following Lua calls can fail. */
+ if (!SET_SAFE_LJMP(hlua)) {
+ if (lua_type(hlua->T, -1) == LUA_TSTRING)
+ error = lua_tostring(hlua->T, -1);
+ else
+ error = "critical error";
+ SEND_ERR(strm->be, "Lua applet http '%s': %s.\n",
+ ctx->rule->arg.hlua_rule->fcn->name, error);
+ return -1;
+ }
+
+ /* Check stack available size. */
+ if (!lua_checkstack(hlua->T, 1)) {
+ SEND_ERR(strm->be, "Lua applet http '%s': full stack.\n",
+ ctx->rule->arg.hlua_rule->fcn->name);
+ RESET_SAFE_LJMP(hlua);
+ return -1;
+ }
+
+ /* Restore the function in the stack. */
+ hlua_pushref(hlua->T, ctx->rule->arg.hlua_rule->fcn->function_ref[hlua->state_id]);
+
+ /* Create and and push object stream in the stack. */
+ if (!hlua_applet_http_new(hlua->T, ctx)) {
+ SEND_ERR(strm->be, "Lua applet http '%s': full stack.\n",
+ ctx->rule->arg.hlua_rule->fcn->name);
+ RESET_SAFE_LJMP(hlua);
+ return -1;
+ }
+ hlua->nargs = 1;
+
+ /* push keywords in the stack. */
+ for (arg = ctx->rule->arg.hlua_rule->args; arg && *arg; arg++) {
+ if (!lua_checkstack(hlua->T, 1)) {
+ SEND_ERR(strm->be, "Lua applet http '%s': full stack.\n",
+ ctx->rule->arg.hlua_rule->fcn->name);
+ RESET_SAFE_LJMP(hlua);
+ return -1;
+ }
+ lua_pushstring(hlua->T, *arg);
+ hlua->nargs++;
+ }
+
+ RESET_SAFE_LJMP(hlua);
+
+ /* Wakeup the applet when data is ready for read. */
+ applet_need_more_data(ctx);
+
+ return 0;
+}
+
+void hlua_applet_http_fct(struct appctx *ctx)
+{
+ struct hlua_http_ctx *http_ctx = ctx->svcctx;
+ struct stconn *sc = appctx_sc(ctx);
+ struct stream *strm = __sc_strm(sc);
+ struct channel *req = sc_oc(sc);
+ struct channel *res = sc_ic(sc);
+ struct act_rule *rule = ctx->rule;
+ struct proxy *px = strm->be;
+ struct hlua *hlua = http_ctx->hlua;
+ struct htx *req_htx, *res_htx;
+
+ res_htx = htx_from_buf(&res->buf);
+
+ if (unlikely(se_fl_test(ctx->sedesc, (SE_FL_EOS|SE_FL_ERROR|SE_FL_SHR|SE_FL_SHW))))
+ goto out;
+
+ /* The applet execution is already done. */
+ if (http_ctx->flags & APPLET_DONE)
+ goto out;
+
+ /* Check if the input buffer is available. */
+ if (!b_size(&res->buf)) {
+ sc_need_room(sc, 0);
+ goto out;
+ }
+
+ /* Set the currently running flag. */
+ if (!HLUA_IS_RUNNING(hlua) &&
+ !(http_ctx->flags & APPLET_DONE)) {
+ if (!co_data(req)) {
+ applet_need_more_data(ctx);
+ goto out;
+ }
+ }
+
+ /* Execute the function. */
+ switch (hlua_ctx_resume(hlua, 1)) {
+ /* finished. */
+ case HLUA_E_OK:
+ http_ctx->flags |= APPLET_DONE;
+ break;
+
+ /* yield. */
+ case HLUA_E_AGAIN:
+ if (hlua->wake_time != TICK_ETERNITY)
+ task_schedule(http_ctx->task, hlua->wake_time);
+ goto out;
+
+ /* finished with error. */
+ case HLUA_E_ERRMSG:
+ /* Display log. */
+ SEND_ERR(px, "Lua applet http '%s': %s.\n",
+ rule->arg.hlua_rule->fcn->name, lua_tostring(hlua->T, -1));
+ lua_pop(hlua->T, 1);
+ goto error;
+
+ case HLUA_E_ETMOUT:
+ SEND_ERR(px, "Lua applet http '%s': execution timeout.\n",
+ rule->arg.hlua_rule->fcn->name);
+ goto error;
+
+ case HLUA_E_NOMEM:
+ SEND_ERR(px, "Lua applet http '%s': out of memory error.\n",
+ rule->arg.hlua_rule->fcn->name);
+ goto error;
+
+ case HLUA_E_YIELD: /* unexpected */
+ SEND_ERR(px, "Lua applet http '%s': yield not allowed.\n",
+ rule->arg.hlua_rule->fcn->name);
+ goto error;
+
+ case HLUA_E_ERR:
+ /* Display log. */
+ SEND_ERR(px, "Lua applet http '%s' return an unknown error.\n",
+ rule->arg.hlua_rule->fcn->name);
+ goto error;
+
+ default:
+ goto error;
+ }
+
+ if (http_ctx->flags & APPLET_DONE) {
+ if (http_ctx->flags & APPLET_RSP_SENT)
+ goto out;
+
+ if (!(http_ctx->flags & APPLET_HDR_SENT))
+ goto error;
+
+ /* no more data are expected. If the response buffer is empty
+ * for a chunked message, be sure to add something (EOT block in
+ * this case) to have something to send. It is important to be
+ * sure the EOM flags will be handled by the endpoint.
+ */
+ if (htx_is_empty(res_htx) && (strm->txn->rsp.flags & (HTTP_MSGF_XFER_LEN|HTTP_MSGF_CNT_LEN)) == HTTP_MSGF_XFER_LEN) {
+ if (!htx_add_endof(res_htx, HTX_BLK_EOT)) {
+ sc_need_room(sc, sizeof(struct htx_blk)+1);
+ goto out;
+ }
+ channel_add_input(res, 1);
+ }
+
+ res_htx->flags |= HTX_FL_EOM;
+ se_fl_set(ctx->sedesc, SE_FL_EOI|SE_FL_EOS);
+ strm->txn->status = http_ctx->status;
+ http_ctx->flags |= APPLET_RSP_SENT;
+ }
+
+ out:
+ htx_to_buf(res_htx, &res->buf);
+ /* eat the whole request */
+ if (co_data(req)) {
+ req_htx = htx_from_buf(&req->buf);
+ co_htx_skip(req, req_htx, co_data(req));
+ htx_to_buf(req_htx, &req->buf);
+ }
+ return;
+
+ error:
+
+ /* If we are in HTTP mode, and we are not send any
+ * data, return a 500 server error in best effort:
+ * if there is no room available in the buffer,
+ * just close the connection.
+ */
+ if (!(http_ctx->flags & APPLET_HDR_SENT)) {
+ struct buffer *err = &http_err_chunks[HTTP_ERR_500];
+
+ channel_erase(res);
+ res->buf.data = b_data(err);
+ memcpy(res->buf.area, b_head(err), b_data(err));
+ res_htx = htx_from_buf(&res->buf);
+ channel_add_input(res, res_htx->data);
+ se_fl_set(ctx->sedesc, SE_FL_EOI|SE_FL_EOS);
+ }
+ else
+ se_fl_set(ctx->sedesc, SE_FL_ERROR);
+
+ if (!(strm->flags & SF_ERR_MASK))
+ strm->flags |= SF_ERR_RESOURCE;
+ http_ctx->flags |= APPLET_DONE;
+ goto out;
+}
+
+static void hlua_applet_http_release(struct appctx *ctx)
+{
+ struct hlua_http_ctx *http_ctx = ctx->svcctx;
+
+ task_destroy(http_ctx->task);
+ http_ctx->task = NULL;
+ hlua_ctx_destroy(http_ctx->hlua);
+ http_ctx->hlua = NULL;
+}
+
+/* global {tcp|http}-request parser. Return ACT_RET_PRS_OK in
+ * success case, else return ACT_RET_PRS_ERR.
+ *
+ * This function can fail with an abort() due to an Lua critical error.
+ * We are in the configuration parsing process of HAProxy, this abort() is
+ * tolerated.
+ */
+static enum act_parse_ret action_register_lua(const char **args, int *cur_arg, struct proxy *px,
+ struct act_rule *rule, char **err)
+{
+ struct hlua_function *fcn = rule->kw->private;
+ int i;
+
+ /* Memory for the rule. */
+ rule->arg.hlua_rule = calloc(1, sizeof(*rule->arg.hlua_rule));
+ if (!rule->arg.hlua_rule) {
+ memprintf(err, "out of memory error");
+ goto error;
+ }
+
+ /* Memory for arguments. */
+ rule->arg.hlua_rule->args = calloc(fcn->nargs + 1,
+ sizeof(*rule->arg.hlua_rule->args));
+ if (!rule->arg.hlua_rule->args) {
+ memprintf(err, "out of memory error");
+ goto error;
+ }
+
+ /* Reference the Lua function and store the reference. */
+ rule->arg.hlua_rule->fcn = fcn;
+
+ /* Expect some arguments */
+ for (i = 0; i < fcn->nargs; i++) {
+ if (*args[*cur_arg] == '\0') {
+ memprintf(err, "expect %d arguments", fcn->nargs);
+ goto error;
+ }
+ rule->arg.hlua_rule->args[i] = strdup(args[*cur_arg]);
+ if (!rule->arg.hlua_rule->args[i]) {
+ memprintf(err, "out of memory error");
+ goto error;
+ }
+ (*cur_arg)++;
+ }
+ rule->arg.hlua_rule->args[i] = NULL;
+
+ rule->action = ACT_CUSTOM;
+ rule->action_ptr = hlua_action;
+ return ACT_RET_PRS_OK;
+
+ error:
+ if (rule->arg.hlua_rule) {
+ if (rule->arg.hlua_rule->args) {
+ for (i = 0; i < fcn->nargs; i++)
+ ha_free(&rule->arg.hlua_rule->args[i]);
+ ha_free(&rule->arg.hlua_rule->args);
+ }
+ ha_free(&rule->arg.hlua_rule);
+ }
+ return ACT_RET_PRS_ERR;
+}
+
+static enum act_parse_ret action_register_service_http(const char **args, int *cur_arg, struct proxy *px,
+ struct act_rule *rule, char **err)
+{
+ struct hlua_function *fcn = rule->kw->private;
+
+ /* HTTP applets are forbidden in tcp-request rules.
+ * HTTP applet request requires everything initialized by
+ * "http_process_request" (analyzer flag AN_REQ_HTTP_INNER).
+ * The applet will be immediately initialized, but its before
+ * the call of this analyzer.
+ */
+ if (rule->from != ACT_F_HTTP_REQ) {
+ memprintf(err, "HTTP applets are forbidden from 'tcp-request' rulesets");
+ return ACT_RET_PRS_ERR;
+ }
+
+ /* Memory for the rule. */
+ rule->arg.hlua_rule = calloc(1, sizeof(*rule->arg.hlua_rule));
+ if (!rule->arg.hlua_rule) {
+ memprintf(err, "out of memory error");
+ return ACT_RET_PRS_ERR;
+ }
+
+ /* Reference the Lua function and store the reference. */
+ rule->arg.hlua_rule->fcn = fcn;
+
+ /* TODO: later accept arguments. */
+ rule->arg.hlua_rule->args = NULL;
+
+ /* Add applet pointer in the rule. */
+ rule->applet.obj_type = OBJ_TYPE_APPLET;
+ rule->applet.name = fcn->name;
+ rule->applet.init = hlua_applet_http_init;
+ rule->applet.fct = hlua_applet_http_fct;
+ rule->applet.release = hlua_applet_http_release;
+ rule->applet.timeout = hlua_timeout_applet;
+
+ return ACT_RET_PRS_OK;
+}
+
+/* This function is an LUA binding used for registering
+ * "sample-conv" functions. It expects a converter name used
+ * in the haproxy configuration file, and an LUA function.
+ */
+__LJMP static int hlua_register_action(lua_State *L)
+{
+ struct action_kw_list *akl = NULL;
+ const char *name;
+ int ref;
+ int len;
+ struct hlua_function *fcn = NULL;
+ int nargs;
+ struct buffer *trash;
+ struct action_kw *akw;
+
+ /* Initialise the number of expected arguments at 0. */
+ nargs = 0;
+
+ if (lua_gettop(L) < 3 || lua_gettop(L) > 4)
+ WILL_LJMP(luaL_error(L, "'register_action' needs between 3 and 4 arguments"));
+
+ if (hlua_gethlua(L)) {
+ /* runtime processing */
+ WILL_LJMP(luaL_error(L, "register_action: not available outside of body context"));
+ }
+
+ /* First argument : converter name. */
+ name = MAY_LJMP(luaL_checkstring(L, 1));
+
+ /* Second argument : environment. */
+ if (lua_type(L, 2) != LUA_TTABLE)
+ WILL_LJMP(luaL_error(L, "register_action: second argument must be a table of strings"));
+
+ /* Third argument : lua function. */
+ ref = MAY_LJMP(hlua_checkfunction(L, 3));
+
+ /* Fourth argument : number of mandatory arguments expected on the configuration line. */
+ if (lua_gettop(L) >= 4)
+ nargs = MAY_LJMP(luaL_checkinteger(L, 4));
+
+ /* browse the second argument as an array. */
+ lua_pushnil(L);
+ while (lua_next(L, 2) != 0) {
+ if (lua_type(L, -1) != LUA_TSTRING) {
+ hlua_unref(L, ref);
+ WILL_LJMP(luaL_error(L, "register_action: second argument must be a table of strings"));
+ }
+
+ /* Check if action exists */
+ trash = get_trash_chunk();
+ chunk_printf(trash, "lua.%s", name);
+ if (strcmp(lua_tostring(L, -1), "tcp-req") == 0) {
+ akw = tcp_req_cont_action(trash->area);
+ } else if (strcmp(lua_tostring(L, -1), "tcp-res") == 0) {
+ akw = tcp_res_cont_action(trash->area);
+ } else if (strcmp(lua_tostring(L, -1), "http-req") == 0) {
+ akw = action_http_req_custom(trash->area);
+ } else if (strcmp(lua_tostring(L, -1), "http-res") == 0) {
+ akw = action_http_res_custom(trash->area);
+ } else if (strcmp(lua_tostring(L, -1), "http-after-res") == 0) {
+ akw = action_http_after_res_custom(trash->area);
+ } else {
+ akw = NULL;
+ }
+ if (akw != NULL) {
+ fcn = akw->private;
+ if (fcn->function_ref[hlua_state_id] != -1) {
+ ha_warning("Trying to register action 'lua.%s' more than once. "
+ "This will become a hard error in version 2.5.\n", name);
+ hlua_unref(L, fcn->function_ref[hlua_state_id]);
+ }
+ fcn->function_ref[hlua_state_id] = ref;
+
+ /* pop the environment string. */
+ lua_pop(L, 1);
+ continue;
+ }
+
+ /* Check required environment. Only accepted "http" or "tcp". */
+ /* Allocate and fill the sample fetch keyword struct. */
+ akl = calloc(1, sizeof(*akl) + sizeof(struct action_kw) * 2);
+ if (!akl)
+ goto alloc_error;;
+ fcn = new_hlua_function();
+ if (!fcn)
+ goto alloc_error;
+
+ /* Fill fcn. */
+ fcn->name = strdup(name);
+ if (!fcn->name)
+ goto alloc_error;
+ fcn->function_ref[hlua_state_id] = ref;
+
+ /* Set the expected number of arguments. */
+ fcn->nargs = nargs;
+
+ /* List head */
+ akl->list.n = akl->list.p = NULL;
+
+ /* action keyword. */
+ len = strlen("lua.") + strlen(name) + 1;
+ akl->kw[0].kw = calloc(1, len);
+ if (!akl->kw[0].kw)
+ goto alloc_error;
+
+ snprintf((char *)akl->kw[0].kw, len, "lua.%s", name);
+
+ akl->kw[0].flags = 0;
+ akl->kw[0].private = fcn;
+ akl->kw[0].parse = action_register_lua;
+
+ /* select the action registering point. */
+ if (strcmp(lua_tostring(L, -1), "tcp-req") == 0)
+ tcp_req_cont_keywords_register(akl);
+ else if (strcmp(lua_tostring(L, -1), "tcp-res") == 0)
+ tcp_res_cont_keywords_register(akl);
+ else if (strcmp(lua_tostring(L, -1), "http-req") == 0)
+ http_req_keywords_register(akl);
+ else if (strcmp(lua_tostring(L, -1), "http-res") == 0)
+ http_res_keywords_register(akl);
+ else if (strcmp(lua_tostring(L, -1), "http-after-res") == 0)
+ http_after_res_keywords_register(akl);
+ else {
+ release_hlua_function(fcn);
+ hlua_unref(L, ref);
+ if (akl)
+ ha_free((char **)&(akl->kw[0].kw));
+ ha_free(&akl);
+ WILL_LJMP(luaL_error(L, "Lua action environment '%s' is unknown. "
+ "'tcp-req', 'tcp-res', 'http-req', 'http-res' "
+ "or 'http-after-res' "
+ "are expected.", lua_tostring(L, -1)));
+ }
+
+ /* pop the environment string. */
+ lua_pop(L, 1);
+
+ /* reset for next loop */
+ akl = NULL;
+ fcn = NULL;
+ }
+ return ACT_RET_PRS_OK;
+
+ alloc_error:
+ release_hlua_function(fcn);
+ hlua_unref(L, ref);
+ ha_free(&akl);
+ WILL_LJMP(luaL_error(L, "Lua out of memory error."));
+ return 0; /* Never reached */
+}
+
+static enum act_parse_ret action_register_service_tcp(const char **args, int *cur_arg, struct proxy *px,
+ struct act_rule *rule, char **err)
+{
+ struct hlua_function *fcn = rule->kw->private;
+
+ if (px->mode == PR_MODE_HTTP) {
+ memprintf(err, "Lua TCP services cannot be used on HTTP proxies");
+ return ACT_RET_PRS_ERR;
+ }
+
+ /* Memory for the rule. */
+ rule->arg.hlua_rule = calloc(1, sizeof(*rule->arg.hlua_rule));
+ if (!rule->arg.hlua_rule) {
+ memprintf(err, "out of memory error");
+ return ACT_RET_PRS_ERR;
+ }
+
+ /* Reference the Lua function and store the reference. */
+ rule->arg.hlua_rule->fcn = fcn;
+
+ /* TODO: later accept arguments. */
+ rule->arg.hlua_rule->args = NULL;
+
+ /* Add applet pointer in the rule. */
+ rule->applet.obj_type = OBJ_TYPE_APPLET;
+ rule->applet.name = fcn->name;
+ rule->applet.init = hlua_applet_tcp_init;
+ rule->applet.fct = hlua_applet_tcp_fct;
+ rule->applet.release = hlua_applet_tcp_release;
+ rule->applet.timeout = hlua_timeout_applet;
+
+ return 0;
+}
+
+/* This function is an LUA binding used for registering
+ * "sample-conv" functions. It expects a converter name used
+ * in the haproxy configuration file, and an LUA function.
+ */
+__LJMP static int hlua_register_service(lua_State *L)
+{
+ struct action_kw_list *akl;
+ const char *name;
+ const char *env;
+ int ref;
+ int len;
+ struct hlua_function *fcn = NULL;
+ struct buffer *trash;
+ struct action_kw *akw;
+
+ MAY_LJMP(check_args(L, 3, "register_service"));
+
+ if (hlua_gethlua(L)) {
+ /* runtime processing */
+ WILL_LJMP(luaL_error(L, "register_service: not available outside of body context"));
+ }
+
+ /* First argument : converter name. */
+ name = MAY_LJMP(luaL_checkstring(L, 1));
+
+ /* Second argument : environment. */
+ env = MAY_LJMP(luaL_checkstring(L, 2));
+
+ /* Third argument : lua function. */
+ ref = MAY_LJMP(hlua_checkfunction(L, 3));
+
+ /* Check for service already registered */
+ trash = get_trash_chunk();
+ chunk_printf(trash, "lua.%s", name);
+ akw = service_find(trash->area);
+ if (akw != NULL) {
+ fcn = akw->private;
+ if (fcn->function_ref[hlua_state_id] != -1) {
+ ha_warning("Trying to register service 'lua.%s' more than once. "
+ "This will become a hard error in version 2.5.\n", name);
+ hlua_unref(L, fcn->function_ref[hlua_state_id]);
+ }
+ fcn->function_ref[hlua_state_id] = ref;
+ return 0;
+ }
+
+ /* Allocate and fill the sample fetch keyword struct. */
+ akl = calloc(1, sizeof(*akl) + sizeof(struct action_kw) * 2);
+ if (!akl)
+ goto alloc_error;
+ fcn = new_hlua_function();
+ if (!fcn)
+ goto alloc_error;
+
+ /* Fill fcn. */
+ len = strlen("<lua.>") + strlen(name) + 1;
+ fcn->name = calloc(1, len);
+ if (!fcn->name)
+ goto alloc_error;
+ snprintf((char *)fcn->name, len, "<lua.%s>", name);
+ fcn->function_ref[hlua_state_id] = ref;
+
+ /* List head */
+ akl->list.n = akl->list.p = NULL;
+
+ /* converter keyword. */
+ len = strlen("lua.") + strlen(name) + 1;
+ akl->kw[0].kw = calloc(1, len);
+ if (!akl->kw[0].kw)
+ goto alloc_error;
+
+ snprintf((char *)akl->kw[0].kw, len, "lua.%s", name);
+
+ /* Check required environment. Only accepted "http" or "tcp". */
+ if (strcmp(env, "tcp") == 0)
+ akl->kw[0].parse = action_register_service_tcp;
+ else if (strcmp(env, "http") == 0)
+ akl->kw[0].parse = action_register_service_http;
+ else {
+ release_hlua_function(fcn);
+ hlua_unref(L, ref);
+ if (akl)
+ ha_free((char **)&(akl->kw[0].kw));
+ ha_free(&akl);
+ WILL_LJMP(luaL_error(L, "Lua service environment '%s' is unknown. "
+ "'tcp' or 'http' are expected.", env));
+ }
+
+ akl->kw[0].flags = 0;
+ akl->kw[0].private = fcn;
+
+ /* End of array. */
+ memset(&akl->kw[1], 0, sizeof(*akl->kw));
+
+ /* Register this new converter */
+ service_keywords_register(akl);
+
+ return 0;
+
+ alloc_error:
+ release_hlua_function(fcn);
+ hlua_unref(L, ref);
+ ha_free(&akl);
+ WILL_LJMP(luaL_error(L, "Lua out of memory error."));
+ return 0; /* Never reached */
+}
+
+/* This function initialises Lua cli handler. It copies the
+ * arguments in the Lua stack and create channel IO objects.
+ */
+static int hlua_cli_parse_fct(char **args, char *payload, struct appctx *appctx, void *private)
+{
+ struct hlua_cli_ctx *ctx = applet_reserve_svcctx(appctx, sizeof(*ctx));
+ struct hlua *hlua;
+ struct hlua_function *fcn;
+ int i;
+ const char *error;
+
+ fcn = private;
+ ctx->fcn = private;
+
+ hlua = pool_alloc(pool_head_hlua);
+ if (!hlua) {
+ SEND_ERR(NULL, "Lua cli '%s': out of memory.\n", fcn->name);
+ return 1;
+ }
+ HLUA_INIT(hlua);
+ ctx->hlua = hlua;
+
+ /* Create task used by signal to wakeup applets.
+ * We use the same wakeup function than the Lua applet_tcp and
+ * applet_http. It is absolutely compatible.
+ */
+ ctx->task = task_new_here();
+ if (!ctx->task) {
+ SEND_ERR(NULL, "Lua cli '%s': out of memory.\n", fcn->name);
+ goto error;
+ }
+ ctx->task->nice = 0;
+ ctx->task->context = appctx;
+ ctx->task->process = hlua_applet_wakeup;
+
+ /* Initialises the Lua context */
+ if (!hlua_ctx_init(hlua, fcn_ref_to_stack_id(fcn), ctx->task)) {
+ SEND_ERR(NULL, "Lua cli '%s': can't initialize Lua context.\n", fcn->name);
+ goto error;
+ }
+
+ /* The following Lua calls can fail. */
+ if (!SET_SAFE_LJMP(hlua)) {
+ if (lua_type(hlua->T, -1) == LUA_TSTRING)
+ error = lua_tostring(hlua->T, -1);
+ else
+ error = "critical error";
+ SEND_ERR(NULL, "Lua cli '%s': %s.\n", fcn->name, error);
+ goto error;
+ }
+
+ /* Check stack available size. */
+ if (!lua_checkstack(hlua->T, 2)) {
+ SEND_ERR(NULL, "Lua cli '%s': full stack.\n", fcn->name);
+ goto error;
+ }
+
+ /* Restore the function in the stack. */
+ hlua_pushref(hlua->T, fcn->function_ref[hlua->state_id]);
+
+ /* Once the arguments parsed, the CLI is like an AppletTCP,
+ * so push AppletTCP in the stack.
+ */
+ if (!hlua_applet_tcp_new(hlua->T, appctx)) {
+ SEND_ERR(NULL, "Lua cli '%s': full stack.\n", fcn->name);
+ goto error;
+ }
+ hlua->nargs = 1;
+
+ /* push keywords in the stack. */
+ for (i = 0; *args[i]; i++) {
+ /* Check stack available size. */
+ if (!lua_checkstack(hlua->T, 1)) {
+ SEND_ERR(NULL, "Lua cli '%s': full stack.\n", fcn->name);
+ goto error;
+ }
+ lua_pushstring(hlua->T, args[i]);
+ hlua->nargs++;
+ }
+
+ /* We must initialize the execution timeouts. */
+ hlua_timer_init(&hlua->timer, hlua_timeout_session);
+
+ /* At this point the execution is safe. */
+ RESET_SAFE_LJMP(hlua);
+
+ /* It's ok */
+ return 0;
+
+ /* It's not ok. */
+error:
+ RESET_SAFE_LJMP(hlua);
+ hlua_ctx_destroy(hlua);
+ ctx->hlua = NULL;
+ return 1;
+}
+
+static int hlua_cli_io_handler_fct(struct appctx *appctx)
+{
+ struct hlua_cli_ctx *ctx = appctx->svcctx;
+ struct hlua *hlua;
+ struct stconn *sc;
+ struct hlua_function *fcn;
+
+ hlua = ctx->hlua;
+ sc = appctx_sc(appctx);
+ fcn = ctx->fcn;
+
+ /* Execute the function. */
+ switch (hlua_ctx_resume(hlua, 1)) {
+
+ /* finished. */
+ case HLUA_E_OK:
+ return 1;
+
+ /* yield. */
+ case HLUA_E_AGAIN:
+ /* We want write. */
+ if (HLUA_IS_WAKERESWR(hlua))
+ sc_need_room(sc, -1);
+ /* Set the timeout. */
+ if (hlua->wake_time != TICK_ETERNITY)
+ task_schedule(hlua->task, hlua->wake_time);
+ return 0;
+
+ /* finished with error. */
+ case HLUA_E_ERRMSG:
+ /* Display log. */
+ SEND_ERR(NULL, "Lua cli '%s': %s.\n",
+ fcn->name, lua_tostring(hlua->T, -1));
+ lua_pop(hlua->T, 1);
+ return 1;
+
+ case HLUA_E_ETMOUT:
+ SEND_ERR(NULL, "Lua converter '%s': execution timeout.\n",
+ fcn->name);
+ return 1;
+
+ case HLUA_E_NOMEM:
+ SEND_ERR(NULL, "Lua converter '%s': out of memory error.\n",
+ fcn->name);
+ return 1;
+
+ case HLUA_E_YIELD: /* unexpected */
+ SEND_ERR(NULL, "Lua converter '%s': yield not allowed.\n",
+ fcn->name);
+ return 1;
+
+ case HLUA_E_ERR:
+ /* Display log. */
+ SEND_ERR(NULL, "Lua cli '%s' return an unknown error.\n",
+ fcn->name);
+ return 1;
+
+ default:
+ return 1;
+ }
+
+ return 1;
+}
+
+static void hlua_cli_io_release_fct(struct appctx *appctx)
+{
+ struct hlua_cli_ctx *ctx = appctx->svcctx;
+
+ hlua_ctx_destroy(ctx->hlua);
+ ctx->hlua = NULL;
+}
+
+/* This function is an LUA binding used for registering
+ * new keywords in the cli. It expects a list of keywords
+ * which are the "path". It is limited to 5 keywords. A
+ * description of the command, a function to be executed
+ * for the parsing and a function for io handlers.
+ */
+__LJMP static int hlua_register_cli(lua_State *L)
+{
+ struct cli_kw_list *cli_kws;
+ const char *message;
+ int ref_io;
+ int len;
+ struct hlua_function *fcn = NULL;
+ int index;
+ int i;
+ struct buffer *trash;
+ const char *kw[5];
+ struct cli_kw *cli_kw;
+ const char *errmsg;
+ char *end;
+
+ MAY_LJMP(check_args(L, 3, "register_cli"));
+
+ if (hlua_gethlua(L)) {
+ /* runtime processing */
+ WILL_LJMP(luaL_error(L, "register_cli: not available outside of body context"));
+ }
+
+ /* First argument : an array of maximum 5 keywords. */
+ if (!lua_istable(L, 1))
+ WILL_LJMP(luaL_argerror(L, 1, "1st argument must be a table"));
+
+ /* Second argument : string with contextual message. */
+ message = MAY_LJMP(luaL_checkstring(L, 2));
+
+ /* Third and fourth argument : lua function. */
+ ref_io = MAY_LJMP(hlua_checkfunction(L, 3));
+
+ /* Check for CLI service already registered */
+ trash = get_trash_chunk();
+ index = 0;
+ lua_pushnil(L);
+ memset(kw, 0, sizeof(kw));
+ while (lua_next(L, 1) != 0) {
+ if (index >= CLI_PREFIX_KW_NB) {
+ hlua_unref(L, ref_io);
+ WILL_LJMP(luaL_argerror(L, 1, "1st argument must be a table with a maximum of 5 entries"));
+ }
+ if (lua_type(L, -1) != LUA_TSTRING) {
+ hlua_unref(L, ref_io);
+ WILL_LJMP(luaL_argerror(L, 1, "1st argument must be a table filled with strings"));
+ }
+ kw[index] = lua_tostring(L, -1);
+ if (index == 0)
+ chunk_printf(trash, "%s", kw[index]);
+ else
+ chunk_appendf(trash, " %s", kw[index]);
+ index++;
+ lua_pop(L, 1);
+ }
+ cli_kw = cli_find_kw_exact((char **)kw);
+ if (cli_kw != NULL) {
+ fcn = cli_kw->private;
+ if (fcn->function_ref[hlua_state_id] != -1) {
+ ha_warning("Trying to register CLI keyword 'lua.%s' more than once. "
+ "This will become a hard error in version 2.5.\n", trash->area);
+ hlua_unref(L, fcn->function_ref[hlua_state_id]);
+ }
+ fcn->function_ref[hlua_state_id] = ref_io;
+ return 0;
+ }
+
+ /* Allocate and fill the sample fetch keyword struct. */
+ cli_kws = calloc(1, sizeof(*cli_kws) + sizeof(struct cli_kw) * 2);
+ if (!cli_kws) {
+ errmsg = "Lua out of memory error.";
+ goto error;
+ }
+ fcn = new_hlua_function();
+ if (!fcn) {
+ errmsg = "Lua out of memory error.";
+ goto error;
+ }
+
+ /* Fill path. */
+ index = 0;
+ lua_pushnil(L);
+ while(lua_next(L, 1) != 0) {
+ if (index >= 5) {
+ errmsg = "1st argument must be a table with a maximum of 5 entries";
+ goto error;
+ }
+ if (lua_type(L, -1) != LUA_TSTRING) {
+ errmsg = "1st argument must be a table filled with strings";
+ goto error;
+ }
+ cli_kws->kw[0].str_kw[index] = strdup(lua_tostring(L, -1));
+ if (!cli_kws->kw[0].str_kw[index]) {
+ errmsg = "Lua out of memory error.";
+ goto error;
+ }
+ index++;
+ lua_pop(L, 1);
+ }
+
+ /* Copy help message. */
+ cli_kws->kw[0].usage = strdup(message);
+ if (!cli_kws->kw[0].usage) {
+ errmsg = "Lua out of memory error.";
+ goto error;
+ }
+
+ /* Fill fcn io handler. */
+ len = strlen("<lua.cli>") + 1;
+ for (i = 0; i < index; i++)
+ len += strlen(cli_kws->kw[0].str_kw[i]) + 1;
+ fcn->name = calloc(1, len);
+ if (!fcn->name) {
+ errmsg = "Lua out of memory error.";
+ goto error;
+ }
+
+ end = fcn->name;
+ len = 8;
+ memcpy(end, "<lua.cli", len);
+ end += len;
+
+ for (i = 0; i < index; i++) {
+ *(end++) = '.';
+ len = strlen(cli_kws->kw[0].str_kw[i]);
+ memcpy(end, cli_kws->kw[0].str_kw[i], len);
+ end += len;
+ }
+ *(end++) = '>';
+ *(end++) = 0;
+
+ fcn->function_ref[hlua_state_id] = ref_io;
+
+ /* Fill last entries. */
+ cli_kws->kw[0].private = fcn;
+ cli_kws->kw[0].parse = hlua_cli_parse_fct;
+ cli_kws->kw[0].io_handler = hlua_cli_io_handler_fct;
+ cli_kws->kw[0].io_release = hlua_cli_io_release_fct;
+
+ /* Register this new converter */
+ cli_register_kw(cli_kws);
+
+ return 0;
+
+ error:
+ release_hlua_function(fcn);
+ hlua_unref(L, ref_io);
+ if (cli_kws) {
+ for (i = 0; i < index; i++)
+ ha_free((char **)&(cli_kws->kw[0].str_kw[i]));
+ ha_free((char **)&(cli_kws->kw[0].usage));
+ }
+ ha_free(&cli_kws);
+ WILL_LJMP(luaL_error(L, errmsg));
+ return 0; /* Never reached */
+}
+
+static int hlua_filter_init_per_thread(struct proxy *px, struct flt_conf *fconf)
+{
+ struct hlua_flt_config *conf = fconf->conf;
+ lua_State *L;
+ int error, pos, state_id, flt_ref;
+
+ state_id = reg_flt_to_stack_id(conf->reg);
+ L = hlua_states[state_id];
+ pos = lua_gettop(L);
+
+ /* The filter parsing function */
+ hlua_pushref(L, conf->reg->fun_ref[state_id]);
+
+ /* Push the filter class on the stack and resolve all callbacks */
+ hlua_pushref(L, conf->reg->flt_ref[state_id]);
+
+ /* Duplicate the filter class so each filter will have its own copy */
+ lua_newtable(L);
+ lua_pushnil(L);
+
+ while (lua_next(L, pos+2)) {
+ lua_pushvalue(L, -2);
+ lua_insert(L, -2);
+ lua_settable(L, -4);
+ }
+ flt_ref = hlua_ref(L);
+
+ /* Remove the original lua filter class from the stack */
+ lua_pop(L, 1);
+
+ /* Push the copy on the stack */
+ hlua_pushref(L, flt_ref);
+
+ /* extra args are pushed in a table */
+ lua_newtable(L);
+ for (pos = 0; conf->args[pos]; pos++) {
+ /* Check stack available size. */
+ if (!lua_checkstack(L, 1)) {
+ ha_alert("Lua filter '%s' : Lua error : full stack.", conf->reg->name);
+ goto error;
+ }
+ lua_pushstring(L, conf->args[pos]);
+ lua_rawseti(L, -2, lua_rawlen(L, -2) + 1);
+ }
+
+ error = lua_pcall(L, 2, LUA_MULTRET, 0);
+ switch (error) {
+ case LUA_OK:
+ /* replace the filter ref */
+ conf->ref[state_id] = flt_ref;
+ break;
+ case LUA_ERRRUN:
+ ha_alert("Lua filter '%s' : runtime error : %s", conf->reg->name, lua_tostring(L, -1));
+ goto error;
+ case LUA_ERRMEM:
+ ha_alert("Lua filter '%s' : out of memory error", conf->reg->name);
+ goto error;
+ case LUA_ERRERR:
+ ha_alert("Lua filter '%s' : message handler error : %s", conf->reg->name, lua_tostring(L, -1));
+ goto error;
+#if defined(LUA_VERSION_NUM) && LUA_VERSION_NUM <= 503
+ case LUA_ERRGCMM:
+ ha_alert("Lua filter '%s' : garbage collector error : %s", conf->reg->name, lua_tostring(L, -1));
+ goto error;
+#endif
+ default:
+ ha_alert("Lua filter '%s' : unknown error : %s", conf->reg->name, lua_tostring(L, -1));
+ goto error;
+ }
+
+ lua_settop(L, 0);
+ return 0;
+
+ error:
+ lua_settop(L, 0);
+ return -1;
+}
+
+static void hlua_filter_deinit_per_thread(struct proxy *px, struct flt_conf *fconf)
+{
+ struct hlua_flt_config *conf = fconf->conf;
+ lua_State *L;
+ int state_id;
+
+ if (!conf)
+ return;
+
+ state_id = reg_flt_to_stack_id(conf->reg);
+ L = hlua_states[state_id];
+ hlua_unref(L, conf->ref[state_id]);
+}
+
+static int hlua_filter_init(struct proxy *px, struct flt_conf *fconf)
+{
+ struct hlua_flt_config *conf = fconf->conf;
+ int state_id = reg_flt_to_stack_id(conf->reg);
+
+ /* Rely on per-thread init for global scripts */
+ if (!state_id)
+ return hlua_filter_init_per_thread(px, fconf);
+ return 0;
+}
+
+static void hlua_filter_deinit(struct proxy *px, struct flt_conf *fconf)
+{
+
+ if (fconf->conf) {
+ struct hlua_flt_config *conf = fconf->conf;
+ int state_id = reg_flt_to_stack_id(conf->reg);
+ int pos;
+
+ /* Rely on per-thread deinit for global scripts */
+ if (!state_id)
+ hlua_filter_deinit_per_thread(px, fconf);
+
+ for (pos = 0; conf->args[pos]; pos++)
+ free(conf->args[pos]);
+ free(conf->args);
+ }
+ ha_free(&fconf->conf);
+ ha_free((char **)&fconf->id);
+ ha_free(&fconf->ops);
+}
+
+static int hlua_filter_new(struct stream *s, struct filter *filter)
+{
+ struct hlua_flt_config *conf = FLT_CONF(filter);
+ struct hlua_flt_ctx *flt_ctx = NULL;
+ int ret = 1;
+
+ if (!hlua_stream_ctx_prepare(s, reg_flt_to_stack_id(conf->reg))) {
+ SEND_ERR(s->be, "Lua filter '%s': can't initialize filter Lua context.\n",
+ conf->reg->name);
+ ret = 0;
+ goto end;
+ }
+
+ flt_ctx = pool_zalloc(pool_head_hlua_flt_ctx);
+ if (!flt_ctx) {
+ SEND_ERR(s->be, "Lua filter '%s': can't initialize filter Lua context.\n",
+ conf->reg->name);
+ ret = 0;
+ goto end;
+ }
+ flt_ctx->hlua[0] = pool_alloc(pool_head_hlua);
+ flt_ctx->hlua[1] = pool_alloc(pool_head_hlua);
+ if (!flt_ctx->hlua[0] || !flt_ctx->hlua[1]) {
+ SEND_ERR(s->be, "Lua filter '%s': can't initialize filter Lua context.\n",
+ conf->reg->name);
+ ret = 0;
+ goto end;
+ }
+ HLUA_INIT(flt_ctx->hlua[0]);
+ HLUA_INIT(flt_ctx->hlua[1]);
+ if (!hlua_ctx_init(flt_ctx->hlua[0], reg_flt_to_stack_id(conf->reg), s->task) ||
+ !hlua_ctx_init(flt_ctx->hlua[1], reg_flt_to_stack_id(conf->reg), s->task)) {
+ SEND_ERR(s->be, "Lua filter '%s': can't initialize filter Lua context.\n",
+ conf->reg->name);
+ ret = 0;
+ goto end;
+ }
+
+ if (!HLUA_IS_RUNNING(s->hlua)) {
+ /* The following Lua calls can fail. */
+ if (!SET_SAFE_LJMP(s->hlua)) {
+ const char *error;
+
+ if (lua_type(s->hlua->T, -1) == LUA_TSTRING)
+ error = lua_tostring(s->hlua->T, -1);
+ else
+ error = "critical error";
+ SEND_ERR(s->be, "Lua filter '%s': %s.\n", conf->reg->name, error);
+ ret = 0;
+ goto end;
+ }
+
+ /* Check stack size. */
+ if (!lua_checkstack(s->hlua->T, 1)) {
+ SEND_ERR(s->be, "Lua filter '%s': full stack.\n", conf->reg->name);
+ RESET_SAFE_LJMP(s->hlua);
+ ret = 0;
+ goto end;
+ }
+
+ hlua_pushref(s->hlua->T, conf->ref[s->hlua->state_id]);
+ if (lua_getfield(s->hlua->T, -1, "new") != LUA_TFUNCTION) {
+ SEND_ERR(s->be, "Lua filter '%s': 'new' field is not a function.\n",
+ conf->reg->name);
+ RESET_SAFE_LJMP(s->hlua);
+ ret = 0;
+ goto end;
+ }
+ lua_insert(s->hlua->T, -2);
+
+ /* Push the copy on the stack */
+ s->hlua->nargs = 1;
+
+ /* We must initialize the execution timeouts. */
+ hlua_timer_init(&s->hlua->timer, hlua_timeout_session);
+
+ /* At this point the execution is safe. */
+ RESET_SAFE_LJMP(s->hlua);
+ }
+
+ switch (hlua_ctx_resume(s->hlua, 0)) {
+ case HLUA_E_OK:
+ /* Nothing returned or not a table, ignore the filter for current stream */
+ if (!lua_gettop(s->hlua->T) || !lua_istable(s->hlua->T, 1)) {
+ ret = 0;
+ goto end;
+ }
+
+ /* Attached the filter pointer to the ctx */
+ lua_pushstring(s->hlua->T, "__filter");
+ lua_pushlightuserdata(s->hlua->T, filter);
+ lua_settable(s->hlua->T, -3);
+
+ /* Save a ref on the filter ctx */
+ lua_pushvalue(s->hlua->T, 1);
+ flt_ctx->ref = hlua_ref(s->hlua->T);
+ filter->ctx = flt_ctx;
+ break;
+ case HLUA_E_ERRMSG:
+ SEND_ERR(s->be, "Lua filter '%s' : %s.\n", conf->reg->name, lua_tostring(s->hlua->T, -1));
+ ret = -1;
+ goto end;
+ case HLUA_E_ETMOUT:
+ SEND_ERR(s->be, "Lua filter '%s' : 'new' execution timeout.\n", conf->reg->name);
+ ret = 0;
+ goto end;
+ case HLUA_E_NOMEM:
+ SEND_ERR(s->be, "Lua filter '%s' : out of memory error.\n", conf->reg->name);
+ ret = 0;
+ goto end;
+ case HLUA_E_AGAIN:
+ case HLUA_E_YIELD:
+ SEND_ERR(s->be, "Lua filter '%s': yield functions like core.tcp() or core.sleep()"
+ " are not allowed from 'new' function.\n", conf->reg->name);
+ ret = 0;
+ goto end;
+ case HLUA_E_ERR:
+ SEND_ERR(s->be, "Lua filter '%s': 'new' returns an unknown error.\n", conf->reg->name);
+ ret = 0;
+ goto end;
+ default:
+ ret = 0;
+ goto end;
+ }
+
+ end:
+ if (s->hlua)
+ lua_settop(s->hlua->T, 0);
+ if (ret <= 0) {
+ if (flt_ctx) {
+ hlua_ctx_destroy(flt_ctx->hlua[0]);
+ hlua_ctx_destroy(flt_ctx->hlua[1]);
+ pool_free(pool_head_hlua_flt_ctx, flt_ctx);
+ }
+ }
+ return ret;
+}
+
+static void hlua_filter_delete(struct stream *s, struct filter *filter)
+{
+ struct hlua_flt_ctx *flt_ctx = filter->ctx;
+
+ hlua_unref(s->hlua->T, flt_ctx->ref);
+ hlua_ctx_destroy(flt_ctx->hlua[0]);
+ hlua_ctx_destroy(flt_ctx->hlua[1]);
+ pool_free(pool_head_hlua_flt_ctx, flt_ctx);
+ filter->ctx = NULL;
+}
+
+static int hlua_filter_from_payload(struct filter *filter)
+{
+ struct hlua_flt_ctx *flt_ctx = filter->ctx;
+
+ return (flt_ctx && !!(flt_ctx->flags & HLUA_FLT_CTX_FL_PAYLOAD));
+}
+
+static int hlua_filter_callback(struct stream *s, struct filter *filter, const char *fun,
+ int dir, unsigned int flags)
+{
+ struct hlua *flt_hlua;
+ struct hlua_flt_config *conf = FLT_CONF(filter);
+ struct hlua_flt_ctx *flt_ctx = filter->ctx;
+ unsigned int hflags = HLUA_TXN_FLT_CTX;
+ int ret = 1;
+
+ flt_hlua = flt_ctx->hlua[(dir == SMP_OPT_DIR_REQ ? 0 : 1)];
+ if (!flt_hlua)
+ goto end;
+
+ if (!HLUA_IS_RUNNING(flt_hlua)) {
+ int extra_idx = lua_gettop(flt_hlua->T);
+
+ /* The following Lua calls can fail. */
+ if (!SET_SAFE_LJMP(flt_hlua)) {
+ const char *error;
+
+ if (lua_type(flt_hlua->T, -1) == LUA_TSTRING)
+ error = lua_tostring(flt_hlua->T, -1);
+ else
+ error = "critical error";
+ SEND_ERR(s->be, "Lua filter '%s': %s.\n", conf->reg->name, error);
+ goto end;
+ }
+
+ /* Check stack size. */
+ if (!lua_checkstack(flt_hlua->T, 3)) {
+ SEND_ERR(s->be, "Lua filter '%s': full stack.\n", conf->reg->name);
+ RESET_SAFE_LJMP(flt_hlua);
+ goto end;
+ }
+
+ hlua_pushref(flt_hlua->T, flt_ctx->ref);
+ if (lua_getfield(flt_hlua->T, -1, fun) != LUA_TFUNCTION) {
+ RESET_SAFE_LJMP(flt_hlua);
+ goto end;
+ }
+ lua_insert(flt_hlua->T, -2);
+
+ if (!hlua_txn_new(flt_hlua->T, s, s->be, dir, hflags)) {
+ SEND_ERR(s->be, "Lua filter '%s': full stack.\n", conf->reg->name);
+ RESET_SAFE_LJMP(flt_hlua);
+ goto end;
+ }
+ flt_hlua->nargs = 2;
+
+ if (flags & HLUA_FLT_CB_ARG_CHN) {
+ if (dir == SMP_OPT_DIR_REQ)
+ lua_getfield(flt_hlua->T, -1, "req");
+ else
+ lua_getfield(flt_hlua->T, -1, "res");
+ if (lua_type(flt_hlua->T, -1) == LUA_TTABLE) {
+ lua_pushstring(flt_hlua->T, "__filter");
+ lua_pushlightuserdata(flt_hlua->T, filter);
+ lua_settable(flt_hlua->T, -3);
+ }
+ flt_hlua->nargs++;
+ }
+ else if (flags & HLUA_FLT_CB_ARG_HTTP_MSG) {
+ if (dir == SMP_OPT_DIR_REQ)
+ lua_getfield(flt_hlua->T, -1, "http_req");
+ else
+ lua_getfield(flt_hlua->T, -1, "http_res");
+ if (lua_type(flt_hlua->T, -1) == LUA_TTABLE) {
+ lua_pushstring(flt_hlua->T, "__filter");
+ lua_pushlightuserdata(flt_hlua->T, filter);
+ lua_settable(flt_hlua->T, -3);
+ }
+ flt_hlua->nargs++;
+ }
+
+ /* Check stack size. */
+ if (!lua_checkstack(flt_hlua->T, 1)) {
+ SEND_ERR(s->be, "Lua filter '%s': full stack.\n", conf->reg->name);
+ RESET_SAFE_LJMP(flt_hlua);
+ goto end;
+ }
+
+ while (extra_idx--) {
+ lua_pushvalue(flt_hlua->T, 1);
+ lua_remove(flt_hlua->T, 1);
+ flt_hlua->nargs++;
+ }
+
+ /* We must initialize the execution timeouts. */
+ hlua_timer_init(&flt_hlua->timer, hlua_timeout_session);
+
+ /* At this point the execution is safe. */
+ RESET_SAFE_LJMP(flt_hlua);
+ }
+
+ switch (hlua_ctx_resume(flt_hlua, !(flags & HLUA_FLT_CB_FINAL))) {
+ case HLUA_E_OK:
+ /* Catch the return value if it required */
+ if ((flags & HLUA_FLT_CB_RETVAL) && lua_gettop(flt_hlua->T) > 0) {
+ ret = lua_tointeger(flt_hlua->T, -1);
+ lua_settop(flt_hlua->T, 0); /* Empty the stack. */
+ }
+
+ /* Set timeout in the required channel. */
+ if (flt_hlua->wake_time != TICK_ETERNITY) {
+ if (dir == SMP_OPT_DIR_REQ)
+ s->req.analyse_exp = flt_hlua->wake_time;
+ else
+ s->res.analyse_exp = flt_hlua->wake_time;
+ }
+ break;
+ case HLUA_E_AGAIN:
+ /* Set timeout in the required channel. */
+ if (flt_hlua->wake_time != TICK_ETERNITY) {
+ if (dir == SMP_OPT_DIR_REQ)
+ s->req.analyse_exp = flt_hlua->wake_time;
+ else
+ s->res.analyse_exp = flt_hlua->wake_time;
+ }
+ /* Some actions can be wake up when a "write" event
+ * is detected on a response channel. This is useful
+ * only for actions targeted on the requests.
+ */
+ if (HLUA_IS_WAKERESWR(flt_hlua))
+ s->res.flags |= CF_WAKE_WRITE;
+ if (HLUA_IS_WAKEREQWR(flt_hlua))
+ s->req.flags |= CF_WAKE_WRITE;
+ ret = 0;
+ goto end;
+ case HLUA_E_ERRMSG:
+ SEND_ERR(s->be, "Lua filter '%s' : %s.\n", conf->reg->name, lua_tostring(flt_hlua->T, -1));
+ ret = -1;
+ goto end;
+ case HLUA_E_ETMOUT:
+ SEND_ERR(s->be, "Lua filter '%s' : '%s' callback execution timeout.\n", conf->reg->name, fun);
+ goto end;
+ case HLUA_E_NOMEM:
+ SEND_ERR(s->be, "Lua filter '%s' : out of memory error.\n", conf->reg->name);
+ goto end;
+ case HLUA_E_YIELD:
+ SEND_ERR(s->be, "Lua filter '%s': yield functions like core.tcp() or core.sleep()"
+ " are not allowed from '%s' callback.\n", conf->reg->name, fun);
+ goto end;
+ case HLUA_E_ERR:
+ SEND_ERR(s->be, "Lua filter '%s': '%s' returns an unknown error.\n", conf->reg->name, fun);
+ goto end;
+ default:
+ goto end;
+ }
+
+
+ end:
+ return ret;
+}
+
+static int hlua_filter_start_analyze(struct stream *s, struct filter *filter, struct channel *chn)
+{
+ struct hlua_flt_ctx *flt_ctx = filter->ctx;
+
+ flt_ctx->flags = 0;
+ return hlua_filter_callback(s, filter, "start_analyze",
+ (!(chn->flags & CF_ISRESP) ? SMP_OPT_DIR_REQ : SMP_OPT_DIR_RES),
+ (HLUA_FLT_CB_FINAL | HLUA_FLT_CB_RETVAL | HLUA_FLT_CB_ARG_CHN));
+}
+
+static int hlua_filter_end_analyze(struct stream *s, struct filter *filter, struct channel *chn)
+{
+ struct hlua_flt_ctx *flt_ctx = filter->ctx;
+
+ flt_ctx->flags &= ~HLUA_FLT_CTX_FL_PAYLOAD;
+ return hlua_filter_callback(s, filter, "end_analyze",
+ (!(chn->flags & CF_ISRESP) ? SMP_OPT_DIR_REQ : SMP_OPT_DIR_RES),
+ (HLUA_FLT_CB_FINAL | HLUA_FLT_CB_RETVAL | HLUA_FLT_CB_ARG_CHN));
+}
+
+static int hlua_filter_http_headers(struct stream *s, struct filter *filter, struct http_msg *msg)
+{
+ struct hlua_flt_ctx *flt_ctx = filter->ctx;
+
+ flt_ctx->flags &= ~HLUA_FLT_CTX_FL_PAYLOAD;
+ return hlua_filter_callback(s, filter, "http_headers",
+ (!(msg->chn->flags & CF_ISRESP) ? SMP_OPT_DIR_REQ : SMP_OPT_DIR_RES),
+ (HLUA_FLT_CB_FINAL | HLUA_FLT_CB_RETVAL | HLUA_FLT_CB_ARG_HTTP_MSG));
+}
+
+static int hlua_filter_http_payload(struct stream *s, struct filter *filter, struct http_msg *msg,
+ unsigned int offset, unsigned int len)
+{
+ struct hlua_flt_ctx *flt_ctx = filter->ctx;
+ struct hlua *flt_hlua;
+ int dir = (!(msg->chn->flags & CF_ISRESP) ? SMP_OPT_DIR_REQ : SMP_OPT_DIR_RES);
+ int idx = (dir == SMP_OPT_DIR_REQ ? 0 : 1);
+ int ret;
+
+ flt_hlua = flt_ctx->hlua[idx];
+ flt_ctx->cur_off[idx] = offset;
+ flt_ctx->cur_len[idx] = len;
+ flt_ctx->flags |= HLUA_FLT_CTX_FL_PAYLOAD;
+ ret = hlua_filter_callback(s, filter, "http_payload", dir, (HLUA_FLT_CB_FINAL | HLUA_FLT_CB_ARG_HTTP_MSG));
+ if (ret != -1) {
+ ret = flt_ctx->cur_len[idx];
+ if (lua_gettop(flt_hlua->T) > 0) {
+ ret = lua_tointeger(flt_hlua->T, -1);
+ if (ret > flt_ctx->cur_len[idx])
+ ret = flt_ctx->cur_len[idx];
+ lua_settop(flt_hlua->T, 0); /* Empty the stack. */
+ }
+ }
+ return ret;
+}
+
+static int hlua_filter_http_end(struct stream *s, struct filter *filter, struct http_msg *msg)
+{
+ struct hlua_flt_ctx *flt_ctx = filter->ctx;
+
+ flt_ctx->flags &= ~HLUA_FLT_CTX_FL_PAYLOAD;
+ return hlua_filter_callback(s, filter, "http_end",
+ (!(msg->chn->flags & CF_ISRESP) ? SMP_OPT_DIR_REQ : SMP_OPT_DIR_RES),
+ (HLUA_FLT_CB_FINAL | HLUA_FLT_CB_RETVAL | HLUA_FLT_CB_ARG_HTTP_MSG));
+}
+
+static int hlua_filter_tcp_payload(struct stream *s, struct filter *filter, struct channel *chn,
+ unsigned int offset, unsigned int len)
+{
+ struct hlua_flt_ctx *flt_ctx = filter->ctx;
+ struct hlua *flt_hlua;
+ int dir = (!(chn->flags & CF_ISRESP) ? SMP_OPT_DIR_REQ : SMP_OPT_DIR_RES);
+ int idx = (dir == SMP_OPT_DIR_REQ ? 0 : 1);
+ int ret;
+
+ flt_hlua = flt_ctx->hlua[idx];
+ flt_ctx->cur_off[idx] = offset;
+ flt_ctx->cur_len[idx] = len;
+ flt_ctx->flags |= HLUA_FLT_CTX_FL_PAYLOAD;
+ ret = hlua_filter_callback(s, filter, "tcp_payload", dir, (HLUA_FLT_CB_FINAL | HLUA_FLT_CB_ARG_CHN));
+ if (ret != -1) {
+ ret = flt_ctx->cur_len[idx];
+ if (lua_gettop(flt_hlua->T) > 0) {
+ ret = lua_tointeger(flt_hlua->T, -1);
+ if (ret > flt_ctx->cur_len[idx])
+ ret = flt_ctx->cur_len[idx];
+ lua_settop(flt_hlua->T, 0); /* Empty the stack. */
+ }
+ }
+ return ret;
+}
+
+static int hlua_filter_parse_fct(char **args, int *cur_arg, struct proxy *px,
+ struct flt_conf *fconf, char **err, void *private)
+{
+ struct hlua_reg_filter *reg_flt = private;
+ lua_State *L;
+ struct hlua_flt_config *conf = NULL;
+ const char *flt_id = NULL;
+ int state_id, pos, flt_flags = 0;
+ struct flt_ops *hlua_flt_ops = NULL;
+
+ state_id = reg_flt_to_stack_id(reg_flt);
+ L = hlua_states[state_id];
+
+ /* Initialize the filter ops with default callbacks */
+ hlua_flt_ops = calloc(1, sizeof(*hlua_flt_ops));
+ if (!hlua_flt_ops)
+ goto error;
+ hlua_flt_ops->init = hlua_filter_init;
+ hlua_flt_ops->deinit = hlua_filter_deinit;
+ if (state_id) {
+ /* Set per-thread callback if script is loaded per-thread */
+ hlua_flt_ops->init_per_thread = hlua_filter_init_per_thread;
+ hlua_flt_ops->deinit_per_thread = hlua_filter_deinit_per_thread;
+ }
+ hlua_flt_ops->attach = hlua_filter_new;
+ hlua_flt_ops->detach = hlua_filter_delete;
+
+ /* Push the filter class on the stack and resolve all callbacks */
+ hlua_pushref(L, reg_flt->flt_ref[state_id]);
+
+ if (lua_getfield(L, -1, "start_analyze") == LUA_TFUNCTION)
+ hlua_flt_ops->channel_start_analyze = hlua_filter_start_analyze;
+ lua_pop(L, 1);
+ if (lua_getfield(L, -1, "end_analyze") == LUA_TFUNCTION)
+ hlua_flt_ops->channel_end_analyze = hlua_filter_end_analyze;
+ lua_pop(L, 1);
+ if (lua_getfield(L, -1, "http_headers") == LUA_TFUNCTION)
+ hlua_flt_ops->http_headers = hlua_filter_http_headers;
+ lua_pop(L, 1);
+ if (lua_getfield(L, -1, "http_payload") == LUA_TFUNCTION)
+ hlua_flt_ops->http_payload = hlua_filter_http_payload;
+ lua_pop(L, 1);
+ if (lua_getfield(L, -1, "http_end") == LUA_TFUNCTION)
+ hlua_flt_ops->http_end = hlua_filter_http_end;
+ lua_pop(L, 1);
+ if (lua_getfield(L, -1, "tcp_payload") == LUA_TFUNCTION)
+ hlua_flt_ops->tcp_payload = hlua_filter_tcp_payload;
+ lua_pop(L, 1);
+
+ /* Get id and flags of the filter class */
+ if (lua_getfield(L, -1, "id") == LUA_TSTRING)
+ flt_id = lua_tostring(L, -1);
+ lua_pop(L, 1);
+ if (lua_getfield(L, -1, "flags") == LUA_TNUMBER)
+ flt_flags = lua_tointeger(L, -1);
+ lua_pop(L, 1);
+
+ /* Create the filter config */
+ conf = calloc(1, sizeof(*conf));
+ if (!conf)
+ goto error;
+ conf->reg = reg_flt;
+
+ /* duplicate args */
+ for (pos = 0; *args[*cur_arg + 1 + pos]; pos++);
+ conf->args = calloc(pos + 1, sizeof(*conf->args));
+ if (!conf->args)
+ goto error;
+ for (pos = 0; *args[*cur_arg + 1 + pos]; pos++) {
+ conf->args[pos] = strdup(args[*cur_arg + 1 + pos]);
+ if (!conf->args[pos])
+ goto error;
+ }
+ conf->args[pos] = NULL;
+ *cur_arg += pos + 1;
+
+ if (flt_id) {
+ fconf->id = strdup(flt_id);
+ if (!fconf->id)
+ goto error;
+ }
+ fconf->flags = flt_flags;
+ fconf->conf = conf;
+ fconf->ops = hlua_flt_ops;
+
+ lua_settop(L, 0);
+ return 0;
+
+ error:
+ memprintf(err, "Lua filter '%s' : Lua out of memory error", reg_flt->name);
+ free(hlua_flt_ops);
+ if (conf && conf->args) {
+ for (pos = 0; conf->args[pos]; pos++)
+ free(conf->args[pos]);
+ free(conf->args);
+ }
+ free(conf);
+ free((char *)fconf->id);
+ lua_settop(L, 0);
+ return -1;
+}
+
+__LJMP static int hlua_register_data_filter(lua_State *L)
+{
+ struct filter *filter;
+ struct channel *chn;
+
+ MAY_LJMP(check_args(L, 2, "register_data_filter"));
+ MAY_LJMP(luaL_checktype(L, 1, LUA_TTABLE));
+ chn = MAY_LJMP(hlua_checkchannel(L, 2));
+
+ lua_getfield(L, 1, "__filter");
+ MAY_LJMP(luaL_checktype(L, -1, LUA_TLIGHTUSERDATA));
+ filter = lua_touserdata (L, -1);
+ lua_pop(L, 1);
+
+ register_data_filter(chn_strm(chn), chn, filter);
+ return 1;
+}
+
+__LJMP static int hlua_unregister_data_filter(lua_State *L)
+{
+ struct filter *filter;
+ struct channel *chn;
+
+ MAY_LJMP(check_args(L, 2, "unregister_data_filter"));
+ MAY_LJMP(luaL_checktype(L, 1, LUA_TTABLE));
+ chn = MAY_LJMP(hlua_checkchannel(L, 2));
+
+ lua_getfield(L, 1, "__filter");
+ MAY_LJMP(luaL_checktype(L, -1, LUA_TLIGHTUSERDATA));
+ filter = lua_touserdata (L, -1);
+ lua_pop(L, 1);
+
+ unregister_data_filter(chn_strm(chn), chn, filter);
+ return 1;
+}
+
+/* This function is an LUA binding used for registering a filter. It expects a
+ * filter name used in the haproxy configuration file and a LUA function to
+ * parse configuration arguments.
+ */
+__LJMP static int hlua_register_filter(lua_State *L)
+{
+ struct buffer *trash;
+ struct flt_kw_list *fkl;
+ struct flt_kw *fkw;
+ const char *name;
+ struct hlua_reg_filter *reg_flt= NULL;
+ int flt_ref, fun_ref;
+ int len;
+
+ MAY_LJMP(check_args(L, 3, "register_filter"));
+
+ if (hlua_gethlua(L)) {
+ /* runtime processing */
+ WILL_LJMP(luaL_error(L, "register_filter: not available outside of body context"));
+ }
+
+ /* First argument : filter name. */
+ name = MAY_LJMP(luaL_checkstring(L, 1));
+
+ /* Second argument : The filter class */
+ flt_ref = MAY_LJMP(hlua_checktable(L, 2));
+
+ /* Third argument : lua function. */
+ fun_ref = MAY_LJMP(hlua_checkfunction(L, 3));
+
+ trash = get_trash_chunk();
+ chunk_printf(trash, "lua.%s", name);
+ fkw = flt_find_kw(trash->area);
+ if (fkw != NULL) {
+ reg_flt = fkw->private;
+ if (reg_flt->flt_ref[hlua_state_id] != -1 || reg_flt->fun_ref[hlua_state_id] != -1) {
+ ha_warning("Trying to register filter 'lua.%s' more than once. "
+ "This will become a hard error in version 2.5.\n", name);
+ if (reg_flt->flt_ref[hlua_state_id] != -1)
+ hlua_unref(L, reg_flt->flt_ref[hlua_state_id]);
+ if (reg_flt->fun_ref[hlua_state_id] != -1)
+ hlua_unref(L, reg_flt->fun_ref[hlua_state_id]);
+ }
+ reg_flt->flt_ref[hlua_state_id] = flt_ref;
+ reg_flt->fun_ref[hlua_state_id] = fun_ref;
+ return 0;
+ }
+
+ fkl = calloc(1, sizeof(*fkl) + sizeof(struct flt_kw) * 2);
+ if (!fkl)
+ goto alloc_error;
+ fkl->scope = "HLUA";
+
+ reg_flt = new_hlua_reg_filter(name);
+ if (!reg_flt)
+ goto alloc_error;
+
+ reg_flt->flt_ref[hlua_state_id] = flt_ref;
+ reg_flt->fun_ref[hlua_state_id] = fun_ref;
+
+ /* The filter keyword */
+ len = strlen("lua.") + strlen(name) + 1;
+ fkl->kw[0].kw = calloc(1, len);
+ if (!fkl->kw[0].kw)
+ goto alloc_error;
+
+ snprintf((char *)fkl->kw[0].kw, len, "lua.%s", name);
+
+ fkl->kw[0].parse = hlua_filter_parse_fct;
+ fkl->kw[0].private = reg_flt;
+ memset(&fkl->kw[1], 0, sizeof(*fkl->kw));
+
+ /* Register this new filter */
+ flt_register_keywords(fkl);
+
+ return 0;
+
+ alloc_error:
+ release_hlua_reg_filter(reg_flt);
+ hlua_unref(L, flt_ref);
+ hlua_unref(L, fun_ref);
+ ha_free(&fkl);
+ WILL_LJMP(luaL_error(L, "Lua out of memory error."));
+ return 0; /* Never reached */
+}
+
+static int hlua_read_timeout(char **args, int section_type, struct proxy *curpx,
+ const struct proxy *defpx, const char *file, int line,
+ char **err, unsigned int *timeout)
+{
+ const char *error;
+
+ error = parse_time_err(args[1], timeout, TIME_UNIT_MS);
+ if (error == PARSE_TIME_OVER) {
+ memprintf(err, "timer overflow in argument <%s> to <%s> (maximum value is 2147483647 ms or ~24.8 days)",
+ args[1], args[0]);
+ return -1;
+ }
+ else if (error == PARSE_TIME_UNDER) {
+ memprintf(err, "timer underflow in argument <%s> to <%s> (minimum non-null value is 1 ms)",
+ args[1], args[0]);
+ return -1;
+ }
+ else if (error) {
+ memprintf(err, "%s: invalid timeout", args[0]);
+ return -1;
+ }
+ return 0;
+}
+
+static int hlua_burst_timeout(char **args, int section_type, struct proxy *curpx,
+ const struct proxy *defpx, const char *file, int line,
+ char **err)
+{
+ return hlua_read_timeout(args, section_type, curpx, defpx,
+ file, line, err, &hlua_timeout_burst);
+}
+
+static int hlua_session_timeout(char **args, int section_type, struct proxy *curpx,
+ const struct proxy *defpx, const char *file, int line,
+ char **err)
+{
+ return hlua_read_timeout(args, section_type, curpx, defpx,
+ file, line, err, &hlua_timeout_session);
+}
+
+static int hlua_task_timeout(char **args, int section_type, struct proxy *curpx,
+ const struct proxy *defpx, const char *file, int line,
+ char **err)
+{
+ return hlua_read_timeout(args, section_type, curpx, defpx,
+ file, line, err, &hlua_timeout_task);
+}
+
+static int hlua_applet_timeout(char **args, int section_type, struct proxy *curpx,
+ const struct proxy *defpx, const char *file, int line,
+ char **err)
+{
+ return hlua_read_timeout(args, section_type, curpx, defpx,
+ file, line, err, &hlua_timeout_applet);
+}
+
+static int hlua_forced_yield(char **args, int section_type, struct proxy *curpx,
+ const struct proxy *defpx, const char *file, int line,
+ char **err)
+{
+ char *error;
+
+ hlua_nb_instruction = strtoll(args[1], &error, 10);
+ if (*error != '\0') {
+ memprintf(err, "%s: invalid number", args[0]);
+ return -1;
+ }
+ return 0;
+}
+
+static int hlua_parse_maxmem(char **args, int section_type, struct proxy *curpx,
+ const struct proxy *defpx, const char *file, int line,
+ char **err)
+{
+ char *error;
+
+ if (*(args[1]) == 0) {
+ memprintf(err, "'%s' expects an integer argument (Lua memory size in MB).", args[0]);
+ return -1;
+ }
+ hlua_global_allocator.limit = strtoll(args[1], &error, 10) * 1024L * 1024L;
+ if (*error != '\0') {
+ memprintf(err, "%s: invalid number %s (error at '%c')", args[0], args[1], *error);
+ return -1;
+ }
+ return 0;
+}
+
+static int hlua_cfg_parse_log_loggers(char **args, int section_type, struct proxy *curpx,
+ const struct proxy *defpx, const char *file, int line,
+ char **err)
+{
+ if (too_many_args(1, args, err, NULL))
+ return -1;
+
+ if (strcmp(args[1], "on") == 0)
+ hlua_log_opts |= HLUA_LOG_LOGGERS_ON;
+ else if (strcmp(args[1], "off") == 0)
+ hlua_log_opts &= ~HLUA_LOG_LOGGERS_ON;
+ else {
+ memprintf(err, "'%s' expects either 'on' or 'off' but got '%s'.", args[0], args[1]);
+ return -1;
+ }
+ return 0;
+}
+
+static int hlua_cfg_parse_log_stderr(char **args, int section_type, struct proxy *curpx,
+ const struct proxy *defpx, const char *file, int line,
+ char **err)
+{
+ if (too_many_args(1, args, err, NULL))
+ return -1;
+
+ if (strcmp(args[1], "on") == 0)
+ hlua_log_opts = (hlua_log_opts & ~HLUA_LOG_STDERR_MASK) | HLUA_LOG_STDERR_ON;
+ else if (strcmp(args[1], "auto") == 0)
+ hlua_log_opts = (hlua_log_opts & ~HLUA_LOG_STDERR_MASK) | HLUA_LOG_STDERR_AUTO;
+ else if (strcmp(args[1], "off") == 0)
+ hlua_log_opts &= ~HLUA_LOG_STDERR_MASK;
+ else {
+ memprintf(err, "'%s' expects either 'on', 'auto', or 'off' but got '%s'.", args[0], args[1]);
+ return -1;
+ }
+ return 0;
+}
+
+/* This function is called by the main configuration key "lua-load". It loads and
+ * execute an lua file during the parsing of the HAProxy configuration file. It is
+ * the main lua entry point.
+ *
+ * This function runs with the HAProxy keywords API. It returns -1 if an error
+ * occurs, otherwise it returns 0.
+ *
+ * In some error case, LUA set an error message in top of the stack. This function
+ * returns this error message in the HAProxy logs and pop it from the stack.
+ *
+ * This function can fail with an abort() due to an Lua critical error.
+ * We are in the configuration parsing process of HAProxy, this abort() is
+ * tolerated.
+ */
+static int hlua_load_state(char **args, lua_State *L, char **err)
+{
+ int error;
+ int nargs;
+
+ /* Just load and compile the file. */
+ error = luaL_loadfile(L, args[0]);
+ if (error) {
+ memprintf(err, "error in Lua file '%s': %s", args[0], lua_tostring(L, -1));
+ lua_pop(L, 1);
+ return -1;
+ }
+
+ /* Push args in the Lua stack, except the first one which is the filename */
+ for (nargs = 1; *(args[nargs]) != 0; nargs++) {
+ /* Check stack size. */
+ if (!lua_checkstack(L, 1)) {
+ memprintf(err, "Lua runtime error while loading arguments: stack is full.");
+ return -1;
+ }
+ lua_pushstring(L, args[nargs]);
+ }
+ nargs--;
+
+ /* If no syntax error where detected, execute the code. */
+ error = lua_pcall(L, nargs, LUA_MULTRET, 0);
+ switch (error) {
+ case LUA_OK:
+ break;
+ case LUA_ERRRUN:
+ memprintf(err, "Lua runtime error: %s", lua_tostring(L, -1));
+ lua_pop(L, 1);
+ return -1;
+ case LUA_ERRMEM:
+ memprintf(err, "Lua out of memory error");
+ return -1;
+ case LUA_ERRERR:
+ memprintf(err, "Lua message handler error: %s", lua_tostring(L, -1));
+ lua_pop(L, 1);
+ return -1;
+#if defined(LUA_VERSION_NUM) && LUA_VERSION_NUM <= 503
+ case LUA_ERRGCMM:
+ memprintf(err, "Lua garbage collector error: %s", lua_tostring(L, -1));
+ lua_pop(L, 1);
+ return -1;
+#endif
+ default:
+ memprintf(err, "Lua unknown error: %s", lua_tostring(L, -1));
+ lua_pop(L, 1);
+ return -1;
+ }
+
+ return 0;
+}
+
+static int hlua_load(char **args, int section_type, struct proxy *curpx,
+ const struct proxy *defpx, const char *file, int line,
+ char **err)
+{
+ if (*(args[1]) == 0) {
+ memprintf(err, "'%s' expects a file name as parameter.", args[0]);
+ return -1;
+ }
+
+ /* loading for global state */
+ hlua_state_id = 0;
+ ha_set_thread(NULL);
+ return hlua_load_state(&args[1], hlua_states[0], err);
+}
+
+static int hlua_load_per_thread(char **args, int section_type, struct proxy *curpx,
+ const struct proxy *defpx, const char *file, int line,
+ char **err)
+{
+ int len;
+ int i;
+
+ if (*(args[1]) == 0) {
+ memprintf(err, "'%s' expects a file as parameter.", args[0]);
+ return -1;
+ }
+
+ if (per_thread_load == NULL) {
+ /* allocate the first entry large enough to store the final NULL */
+ per_thread_load = calloc(1, sizeof(*per_thread_load));
+ if (per_thread_load == NULL) {
+ memprintf(err, "out of memory error");
+ return -1;
+ }
+ }
+
+ /* count used entries */
+ for (len = 0; per_thread_load[len] != NULL; len++)
+ ;
+
+ per_thread_load = realloc(per_thread_load, (len + 2) * sizeof(*per_thread_load));
+ if (per_thread_load == NULL) {
+ memprintf(err, "out of memory error");
+ return -1;
+ }
+ per_thread_load[len + 1] = NULL;
+
+ /* count args excepting the first, allocate array and copy args */
+ for (i = 0; *(args[i + 1]) != 0; i++);
+ per_thread_load[len] = calloc(i + 1, sizeof(*per_thread_load[len]));
+ if (per_thread_load[len] == NULL) {
+ memprintf(err, "out of memory error");
+ return -1;
+ }
+ for (i = 1; *(args[i]) != 0; i++) {
+ per_thread_load[len][i - 1] = strdup(args[i]);
+ if (per_thread_load[len][i - 1] == NULL) {
+ memprintf(err, "out of memory error");
+ return -1;
+ }
+ }
+ per_thread_load[len][i - 1] = strdup("");
+ if (per_thread_load[len][i - 1] == NULL) {
+ memprintf(err, "out of memory error");
+ return -1;
+ }
+
+ /* loading for thread 1 only */
+ hlua_state_id = 1;
+ ha_set_thread(NULL);
+ return hlua_load_state(per_thread_load[len], hlua_states[1], err);
+}
+
+/* Prepend the given <path> followed by a semicolon to the `package.<type>` variable
+ * in the given <ctx>.
+ */
+static int hlua_prepend_path(lua_State *L, char *type, char *path)
+{
+ lua_getglobal(L, "package"); /* push package variable */
+ lua_pushstring(L, path); /* push given path */
+ lua_pushstring(L, ";"); /* push semicolon */
+ lua_getfield(L, -3, type); /* push old path */
+ lua_concat(L, 3); /* concatenate to new path */
+ lua_setfield(L, -2, type); /* store new path */
+ lua_pop(L, 1); /* pop package variable */
+
+ return 0;
+}
+
+static int hlua_config_prepend_path(char **args, int section_type, struct proxy *curpx,
+ const struct proxy *defpx, const char *file, int line,
+ char **err)
+{
+ char *path;
+ char *type = "path";
+ struct prepend_path *p = NULL;
+ size_t i;
+
+ if (too_many_args(2, args, err, NULL)) {
+ goto err;
+ }
+
+ if (!(*args[1])) {
+ memprintf(err, "'%s' expects to receive a <path> as argument", args[0]);
+ goto err;
+ }
+ path = args[1];
+
+ if (*args[2]) {
+ if (strcmp(args[2], "path") != 0 && strcmp(args[2], "cpath") != 0) {
+ memprintf(err, "'%s' expects <type> to either be 'path' or 'cpath'", args[0]);
+ goto err;
+ }
+ type = args[2];
+ }
+
+ p = calloc(1, sizeof(*p));
+ if (p == NULL) {
+ memprintf(err, "memory allocation failed");
+ goto err;
+ }
+ p->path = strdup(path);
+ if (p->path == NULL) {
+ memprintf(err, "memory allocation failed");
+ goto err2;
+ }
+ p->type = strdup(type);
+ if (p->type == NULL) {
+ memprintf(err, "memory allocation failed");
+ goto err2;
+ }
+ LIST_APPEND(&prepend_path_list, &p->l);
+
+ /* Handle the global state and the per-thread state for the first
+ * thread. The remaining threads will be initialized based on
+ * prepend_path_list.
+ */
+ for (i = 0; i < 2; i++) {
+ lua_State *L = hlua_states[i];
+ const char *error;
+
+ if (setjmp(safe_ljmp_env) != 0) {
+ lua_atpanic(L, hlua_panic_safe);
+ if (lua_type(L, -1) == LUA_TSTRING)
+ error = lua_tostring(L, -1);
+ else
+ error = "critical error";
+ fprintf(stderr, "lua-prepend-path: %s.\n", error);
+ exit(1);
+ } else {
+ lua_atpanic(L, hlua_panic_ljmp);
+ }
+
+ hlua_prepend_path(L, type, path);
+
+ lua_atpanic(L, hlua_panic_safe);
+ }
+
+ return 0;
+
+err2:
+ free(p->type);
+ free(p->path);
+err:
+ free(p);
+ return -1;
+}
+
+/* configuration keywords declaration */
+static struct cfg_kw_list cfg_kws = {{ },{
+ { CFG_GLOBAL, "lua-prepend-path", hlua_config_prepend_path },
+ { CFG_GLOBAL, "lua-load", hlua_load },
+ { CFG_GLOBAL, "lua-load-per-thread", hlua_load_per_thread },
+ { CFG_GLOBAL, "tune.lua.session-timeout", hlua_session_timeout },
+ { CFG_GLOBAL, "tune.lua.task-timeout", hlua_task_timeout },
+ { CFG_GLOBAL, "tune.lua.service-timeout", hlua_applet_timeout },
+ { CFG_GLOBAL, "tune.lua.burst-timeout", hlua_burst_timeout },
+ { CFG_GLOBAL, "tune.lua.forced-yield", hlua_forced_yield },
+ { CFG_GLOBAL, "tune.lua.maxmem", hlua_parse_maxmem },
+ { CFG_GLOBAL, "tune.lua.log.loggers", hlua_cfg_parse_log_loggers },
+ { CFG_GLOBAL, "tune.lua.log.stderr", hlua_cfg_parse_log_stderr },
+ { 0, NULL, NULL },
+}};
+
+INITCALL1(STG_REGISTER, cfg_register_keywords, &cfg_kws);
+
+#ifdef USE_OPENSSL
+
+/*
+ * This function replace a ckch_store by another one, and rebuild the ckch_inst and all its dependencies.
+ * It does the sam as "cli_io_handler_commit_cert" but for lua, the major
+ * difference is that the yield in lua and for the CLI is not handled the same
+ * way.
+ */
+__LJMP static int hlua_ckch_commit_yield(lua_State *L, int status, lua_KContext ctx)
+{
+ struct ckch_inst **lua_ckchi = lua_touserdata(L, -1);
+ struct ckch_store **lua_ckchs = lua_touserdata(L, -2);
+ struct ckch_inst *ckchi = *lua_ckchi;
+ struct ckch_store *old_ckchs = lua_ckchs[0];
+ struct ckch_store *new_ckchs = lua_ckchs[1];
+ struct hlua *hlua;
+ char *err = NULL;
+ int y = 1;
+
+ hlua = hlua_gethlua(L);
+
+ /* get the first ckchi to copy */
+ if (ckchi == NULL)
+ ckchi = LIST_ELEM(old_ckchs->ckch_inst.n, typeof(ckchi), by_ckchs);
+
+ /* walk through the old ckch_inst and creates new ckch_inst using the updated ckchs */
+ list_for_each_entry_from(ckchi, &old_ckchs->ckch_inst, by_ckchs) {
+ struct ckch_inst *new_inst;
+
+ /* it takes a lot of CPU to creates SSL_CTXs, so we yield every 10 CKCH instances */
+ if (y % 10 == 0) {
+
+ *lua_ckchi = ckchi;
+
+ task_wakeup(hlua->task, TASK_WOKEN_MSG);
+ MAY_LJMP(hlua_yieldk(L, 0, 0, hlua_ckch_commit_yield, TICK_ETERNITY, 0));
+ }
+
+ if (ckch_inst_rebuild(new_ckchs, ckchi, &new_inst, &err))
+ goto error;
+
+ /* link the new ckch_inst to the duplicate */
+ LIST_APPEND(&new_ckchs->ckch_inst, &new_inst->by_ckchs);
+ y++;
+ }
+
+ /* The generation is finished, we can insert everything */
+ ckch_store_replace(old_ckchs, new_ckchs);
+
+ lua_pop(L, 2); /* pop the lua_ckchs and ckchi */
+
+ HA_SPIN_UNLOCK(CKCH_LOCK, &ckch_lock);
+
+ return 0;
+
+error:
+ ckch_store_free(new_ckchs);
+ HA_SPIN_UNLOCK(CKCH_LOCK, &ckch_lock);
+ WILL_LJMP(luaL_error(L, "%s", err));
+ free(err);
+
+ return 0;
+}
+
+/*
+ * Replace a ckch_store <filename> in the ckchs_tree with a ckch_store created
+ * from the table in parameter.
+ *
+ * This is equivalent to "set ssl cert" + "commit ssl cert" over the CLI, which
+ * means it does not need to have a transaction since everything is done in the
+ * same function.
+ *
+ * CertCache.set{filename="", crt="", key="", sctl="", ocsp="", issuer=""}
+ *
+ */
+__LJMP static int hlua_ckch_set(lua_State *L)
+{
+ struct hlua *hlua;
+ struct ckch_inst **lua_ckchi;
+ struct ckch_store **lua_ckchs;
+ struct ckch_store *old_ckchs = NULL;
+ struct ckch_store *new_ckchs = NULL;
+ int errcode = 0;
+ char *err = NULL;
+ struct cert_exts *cert_ext = NULL;
+ char *filename;
+ struct ckch_data *data;
+ int ret;
+
+ if (lua_type(L, -1) != LUA_TTABLE)
+ WILL_LJMP(luaL_error(L, "'CertCache.set' needs a table as argument"));
+
+ hlua = hlua_gethlua(L);
+
+ /* FIXME: this should not return an error but should come back later */
+ if (HA_SPIN_TRYLOCK(CKCH_LOCK, &ckch_lock))
+ WILL_LJMP(luaL_error(L, "CertCache already under lock"));
+
+ ret = lua_getfield(L, -1, "filename");
+ if (ret != LUA_TSTRING) {
+ memprintf(&err, "%sNo filename specified!", err ? err : "");
+ errcode |= ERR_ALERT | ERR_FATAL;
+ goto end;
+ }
+ filename = (char *)lua_tostring(L, -1);
+
+
+ /* look for the filename in the tree */
+ old_ckchs = ckchs_lookup(filename);
+ if (!old_ckchs) {
+ memprintf(&err, "%sCan't replace a certificate which is not referenced by the configuration!", err ? err : "");
+ errcode |= ERR_ALERT | ERR_FATAL;
+ goto end;
+ }
+ /* TODO: handle extra_files_noext */
+
+ new_ckchs = ckchs_dup(old_ckchs);
+ if (!new_ckchs) {
+ memprintf(&err, "%sCannot allocate memory!", err ? err : "");
+ errcode |= ERR_ALERT | ERR_FATAL;
+ goto end;
+ }
+
+ data = new_ckchs->data;
+
+ /* loop on the field in the table, which have the same name as the
+ * possible extensions of files */
+ lua_pushnil(L);
+ while (lua_next(L, 1)) {
+ int i;
+ const char *field = lua_tostring(L, -2);
+ char *payload = (char *)lua_tostring(L, -1);
+
+ if (!field || strcmp(field, "filename") == 0) {
+ lua_pop(L, 1);
+ continue;
+ }
+
+ for (i = 0; field && cert_exts[i].ext != NULL; i++) {
+ if (strcmp(field, cert_exts[i].ext) == 0) {
+ cert_ext = &cert_exts[i];
+ break;
+ }
+ }
+
+ /* this is the default type, the field is not supported */
+ if (cert_ext == NULL) {
+ memprintf(&err, "%sUnsupported field '%s'", err ? err : "", field);
+ errcode |= ERR_ALERT | ERR_FATAL;
+ goto end;
+ }
+
+ /* Reset the OCSP CID */
+ if (cert_ext->type == CERT_TYPE_PEM || cert_ext->type == CERT_TYPE_KEY ||
+ cert_ext->type == CERT_TYPE_ISSUER) {
+ OCSP_CERTID_free(new_ckchs->data->ocsp_cid);
+ new_ckchs->data->ocsp_cid = NULL;
+ }
+
+ /* apply the change on the duplicate */
+ if (cert_ext->load(filename, payload, data, &err) != 0) {
+ memprintf(&err, "%sCan't load the payload for '%s'", err ? err : "", cert_ext->ext);
+ errcode |= ERR_ALERT | ERR_FATAL;
+ goto end;
+ }
+ lua_pop(L, 1);
+ }
+
+ /* store the pointers on the lua stack */
+ lua_ckchs = lua_newuserdata(L, sizeof(struct ckch_store *) * 2);
+ lua_ckchs[0] = old_ckchs;
+ lua_ckchs[1] = new_ckchs;
+ lua_ckchi = lua_newuserdata(L, sizeof(struct ckch_inst *));
+ *lua_ckchi = NULL;
+
+ task_wakeup(hlua->task, TASK_WOKEN_MSG);
+ MAY_LJMP(hlua_yieldk(L, 0, 0, hlua_ckch_commit_yield, TICK_ETERNITY, 0));
+
+end:
+ HA_SPIN_UNLOCK(CKCH_LOCK, &ckch_lock);
+
+ if (errcode & ERR_CODE) {
+ ckch_store_free(new_ckchs);
+ WILL_LJMP(luaL_error(L, "%s", err));
+ }
+ free(err);
+
+ return 0;
+}
+
+#else
+
+__LJMP static int hlua_ckch_set(lua_State *L)
+{
+ WILL_LJMP(luaL_error(L, "'CertCache.set' needs an HAProxy built with OpenSSL"));
+
+ return 0;
+}
+#endif /* ! USE_OPENSSL */
+
+
+
+/* This function can fail with an abort() due to an Lua critical error.
+ * We are in the initialisation process of HAProxy, this abort() is
+ * tolerated.
+ */
+int hlua_post_init_state(lua_State *L)
+{
+ struct hlua_init_function *init;
+ const char *msg;
+ enum hlua_exec ret;
+ const char *error;
+ const char *kind;
+ const char *trace;
+ int return_status = 1;
+#if defined(LUA_VERSION_NUM) && LUA_VERSION_NUM >= 504
+ int nres;
+#endif
+
+ /* disable memory limit checks if limit is not set */
+ if (!hlua_global_allocator.limit)
+ hlua_global_allocator.limit = ~hlua_global_allocator.limit;
+
+ /* Call post initialisation function in safe environment. */
+ if (setjmp(safe_ljmp_env) != 0) {
+ lua_atpanic(L, hlua_panic_safe);
+ if (lua_type(L, -1) == LUA_TSTRING)
+ error = lua_tostring(L, -1);
+ else
+ error = "critical error";
+ fprintf(stderr, "Lua post-init: %s.\n", error);
+ exit(1);
+ } else {
+ lua_atpanic(L, hlua_panic_ljmp);
+ }
+
+ list_for_each_entry(init, &hlua_init_functions[hlua_state_id], l) {
+ hlua_pushref(L, init->function_ref);
+ /* function ref should be released right away since it was pushed
+ * on the stack and will not be used anymore
+ */
+ hlua_unref(L, init->function_ref);
+
+#if defined(LUA_VERSION_NUM) && LUA_VERSION_NUM >= 504
+ ret = lua_resume(L, NULL, 0, &nres);
+#else
+ ret = lua_resume(L, NULL, 0);
+#endif
+ kind = NULL;
+ switch (ret) {
+
+ case LUA_OK:
+ lua_pop(L, -1);
+ break;
+
+ case LUA_ERRERR:
+ kind = "message handler error";
+ __fallthrough;
+ case LUA_ERRRUN:
+ if (!kind)
+ kind = "runtime error";
+ msg = lua_tostring(L, -1);
+ lua_settop(L, 0); /* Empty the stack. */
+ trace = hlua_traceback(L, ", ");
+ if (msg)
+ ha_alert("Lua init: %s: '%s' from %s\n", kind, msg, trace);
+ else
+ ha_alert("Lua init: unknown %s from %s\n", kind, trace);
+ return_status = 0;
+ break;
+
+ default:
+ /* Unknown error */
+ kind = "Unknown error";
+ __fallthrough;
+ case LUA_YIELD:
+ /* yield is not configured at this step, this state doesn't happen */
+ if (!kind)
+ kind = "yield not allowed";
+ __fallthrough;
+ case LUA_ERRMEM:
+ if (!kind)
+ kind = "out of memory error";
+ lua_settop(L, 0); /* Empty the stack. */
+ trace = hlua_traceback(L, ", ");
+ ha_alert("Lua init: %s: %s\n", kind, trace);
+ return_status = 0;
+ break;
+ }
+ if (!return_status)
+ break;
+ }
+
+ lua_atpanic(L, hlua_panic_safe);
+ return return_status;
+}
+
+int hlua_post_init()
+{
+ int ret;
+ int i;
+ int errors;
+ char *err = NULL;
+ struct hlua_function *fcn;
+ struct hlua_reg_filter *reg_flt;
+
+#if defined(USE_OPENSSL)
+ /* Initialize SSL server. */
+ if (socket_ssl->xprt->prepare_srv) {
+ int saved_used_backed = global.ssl_used_backend;
+ // don't affect maxconn automatic computation
+ socket_ssl->xprt->prepare_srv(socket_ssl);
+ global.ssl_used_backend = saved_used_backed;
+ }
+#endif
+
+ /* Perform post init of common thread */
+ hlua_state_id = 0;
+ ha_set_thread(&ha_thread_info[0]);
+ ret = hlua_post_init_state(hlua_states[hlua_state_id]);
+ if (ret == 0)
+ return 0;
+
+ /* init remaining lua states and load files */
+ for (hlua_state_id = 2; hlua_state_id < global.nbthread + 1; hlua_state_id++) {
+
+ /* set thread context */
+ ha_set_thread(&ha_thread_info[hlua_state_id - 1]);
+
+ /* Init lua state */
+ hlua_states[hlua_state_id] = hlua_init_state(hlua_state_id);
+
+ /* Load lua files */
+ for (i = 0; per_thread_load && per_thread_load[i]; i++) {
+ ret = hlua_load_state(per_thread_load[i], hlua_states[hlua_state_id], &err);
+ if (ret != 0) {
+ ha_alert("Lua init: %s\n", err);
+ return 0;
+ }
+ }
+ }
+
+ /* Reset thread context */
+ ha_set_thread(NULL);
+
+ /* Execute post init for all states */
+ for (hlua_state_id = 1; hlua_state_id < global.nbthread + 1; hlua_state_id++) {
+
+ /* set thread context */
+ ha_set_thread(&ha_thread_info[hlua_state_id - 1]);
+
+ /* run post init */
+ ret = hlua_post_init_state(hlua_states[hlua_state_id]);
+ if (ret == 0)
+ return 0;
+ }
+
+ /* Reset thread context */
+ ha_set_thread(NULL);
+
+ /* control functions registering. Each function must have:
+ * - only the function_ref[0] set positive and all other to -1
+ * - only the function_ref[0] set to -1 and all other positive
+ * This ensure a same reference is not used both in shared
+ * lua state and thread dedicated lua state. Note: is the case
+ * reach, the shared state is priority, but the bug will be
+ * complicated to found for the end user.
+ */
+ errors = 0;
+ list_for_each_entry(fcn, &referenced_functions, l) {
+ ret = 0;
+ for (i = 1; i < global.nbthread + 1; i++) {
+ if (fcn->function_ref[i] == -1)
+ ret--;
+ else
+ ret++;
+ }
+ if (abs(ret) != global.nbthread) {
+ ha_alert("Lua function '%s' is not referenced in all thread. "
+ "Expect function in all thread or in none thread.\n", fcn->name);
+ errors++;
+ continue;
+ }
+
+ if ((fcn->function_ref[0] == -1) == (ret < 0)) {
+ ha_alert("Lua function '%s' is referenced both ins shared Lua context (through lua-load) "
+ "and per-thread Lua context (through lua-load-per-thread). these two context "
+ "exclusive.\n", fcn->name);
+ errors++;
+ }
+ }
+
+ /* Do the same with registered filters */
+ list_for_each_entry(reg_flt, &referenced_filters, l) {
+ ret = 0;
+ for (i = 1; i < global.nbthread + 1; i++) {
+ if (reg_flt->flt_ref[i] == -1)
+ ret--;
+ else
+ ret++;
+ }
+ if (abs(ret) != global.nbthread) {
+ ha_alert("Lua filter '%s' is not referenced in all thread. "
+ "Expect function in all thread or in none thread.\n", reg_flt->name);
+ errors++;
+ continue;
+ }
+
+ if ((reg_flt->flt_ref[0] == -1) == (ret < 0)) {
+ ha_alert("Lua filter '%s' is referenced both ins shared Lua context (through lua-load) "
+ "and per-thread Lua context (through lua-load-per-thread). these two context "
+ "exclusive.\n", fcn->name);
+ errors++;
+ }
+ }
+
+
+ if (errors > 0)
+ return 0;
+
+ /* after this point, this global will no longer be used, so set to
+ * -1 in order to have probably a segfault if someone use it
+ */
+ hlua_state_id = -1;
+
+ return 1;
+}
+
+/* The memory allocator used by the Lua stack. <ud> is a pointer to the
+ * allocator's context. <ptr> is the pointer to alloc/free/realloc. <osize>
+ * is the previously allocated size or the kind of object in case of a new
+ * allocation. <nsize> is the requested new size. A new allocation is
+ * indicated by <ptr> being NULL. A free is indicated by <nsize> being
+ * zero. This one verifies that the limits are respected but is optimized
+ * for the fast case where limits are not used, hence stats are not updated.
+ *
+ * Warning: while this API ressembles glibc's realloc() a lot, glibc surpasses
+ * POSIX by making realloc(ptr,0) an effective free(), but others do not do
+ * that and will simply allocate zero as if it were the result of malloc(0),
+ * so mapping this onto realloc() will lead to memory leaks on non-glibc
+ * systems.
+ */
+static void *hlua_alloc(void *ud, void *ptr, size_t osize, size_t nsize)
+{
+ struct hlua_mem_allocator *zone = ud;
+ size_t limit, old, new;
+
+ /* a limit of ~0 means unlimited and boot complete, so there's no need
+ * for accounting anymore.
+ */
+ if (likely(~zone->limit == 0)) {
+ if (!nsize)
+ ha_free(&ptr);
+ else
+ ptr = realloc(ptr, nsize);
+ return ptr;
+ }
+
+ if (!ptr)
+ osize = 0;
+
+ /* enforce strict limits across all threads */
+ limit = zone->limit;
+ old = _HA_ATOMIC_LOAD(&zone->allocated);
+ do {
+ new = old + nsize - osize;
+ if (unlikely(nsize && limit && new > limit))
+ return NULL;
+ } while (!_HA_ATOMIC_CAS(&zone->allocated, &old, new));
+
+ if (!nsize)
+ ha_free(&ptr);
+ else
+ ptr = realloc(ptr, nsize);
+
+ if (unlikely(!ptr && nsize)) // failed
+ _HA_ATOMIC_SUB(&zone->allocated, nsize - osize);
+
+ __ha_barrier_atomic_store();
+ return ptr;
+}
+
+/* This function can fail with an abort() due to a Lua critical error.
+ * We are in the initialisation process of HAProxy, this abort() is
+ * tolerated.
+ */
+lua_State *hlua_init_state(int thread_num)
+{
+ int i;
+ int idx;
+ struct sample_fetch *sf;
+ struct sample_conv *sc;
+ char *p;
+ const char *error_msg;
+ void **context;
+ lua_State *L;
+ struct prepend_path *pp;
+
+ /* Init main lua stack. */
+ L = lua_newstate(hlua_alloc, &hlua_global_allocator);
+
+ if (!L) {
+ fprintf(stderr,
+ "Lua init: critical error: lua_newstate() returned NULL."
+ " This may possibly be caused by a memory allocation error.\n");
+ exit(1);
+ }
+
+ /* Initialise Lua context to NULL */
+ context = lua_getextraspace(L);
+ *context = NULL;
+
+ /* From this point, until the end of the initialisation function,
+ * the Lua function can fail with an abort. We are in the initialisation
+ * process of HAProxy, this abort() is tolerated.
+ */
+
+ /* Call post initialisation function in safe environment. */
+ if (setjmp(safe_ljmp_env) != 0) {
+ lua_atpanic(L, hlua_panic_safe);
+ if (lua_type(L, -1) == LUA_TSTRING)
+ error_msg = lua_tostring(L, -1);
+ else
+ error_msg = "critical error";
+ fprintf(stderr, "Lua init: %s.\n", error_msg);
+ exit(1);
+ } else {
+ lua_atpanic(L, hlua_panic_ljmp);
+ }
+
+ /* Initialise lua. */
+ luaL_openlibs(L);
+#define HLUA_PREPEND_PATH_TOSTRING1(x) #x
+#define HLUA_PREPEND_PATH_TOSTRING(x) HLUA_PREPEND_PATH_TOSTRING1(x)
+#ifdef HLUA_PREPEND_PATH
+ hlua_prepend_path(L, "path", HLUA_PREPEND_PATH_TOSTRING(HLUA_PREPEND_PATH));
+#endif
+#ifdef HLUA_PREPEND_CPATH
+ hlua_prepend_path(L, "cpath", HLUA_PREPEND_PATH_TOSTRING(HLUA_PREPEND_CPATH));
+#endif
+#undef HLUA_PREPEND_PATH_TOSTRING
+#undef HLUA_PREPEND_PATH_TOSTRING1
+
+ /* Apply configured prepend path */
+ list_for_each_entry(pp, &prepend_path_list, l)
+ hlua_prepend_path(L, pp->type, pp->path);
+
+ /*
+ * Override some lua functions.
+ *
+ */
+
+ /* push our "safe" coroutine.create() function */
+ lua_getglobal(L, "coroutine");
+ lua_pushcclosure(L, hlua_coroutine_create, 0);
+ lua_setfield(L, -2, "create");
+
+ /*
+ *
+ * Create "core" object.
+ *
+ */
+
+ /* This table entry is the object "core" base. */
+ lua_newtable(L);
+
+ /* set the thread id */
+ hlua_class_const_int(L, "thread", thread_num);
+
+ /* Push the loglevel constants. */
+ for (i = 0; i < NB_LOG_LEVELS; i++)
+ hlua_class_const_int(L, log_levels[i], i);
+
+ /* Register special functions. */
+ hlua_class_function(L, "register_init", hlua_register_init);
+ hlua_class_function(L, "register_task", hlua_register_task);
+ hlua_class_function(L, "register_fetches", hlua_register_fetches);
+ hlua_class_function(L, "register_converters", hlua_register_converters);
+ hlua_class_function(L, "register_action", hlua_register_action);
+ hlua_class_function(L, "register_service", hlua_register_service);
+ hlua_class_function(L, "register_cli", hlua_register_cli);
+ hlua_class_function(L, "register_filter", hlua_register_filter);
+ hlua_class_function(L, "yield", hlua_yield);
+ hlua_class_function(L, "set_nice", hlua_set_nice);
+ hlua_class_function(L, "sleep", hlua_sleep);
+ hlua_class_function(L, "msleep", hlua_msleep);
+ hlua_class_function(L, "add_acl", hlua_add_acl);
+ hlua_class_function(L, "del_acl", hlua_del_acl);
+ hlua_class_function(L, "set_map", hlua_set_map);
+ hlua_class_function(L, "del_map", hlua_del_map);
+ hlua_class_function(L, "get_var", hlua_core_get_var);
+ hlua_class_function(L, "tcp", hlua_socket_new);
+ hlua_class_function(L, "httpclient", hlua_httpclient_new);
+ hlua_class_function(L, "event_sub", hlua_event_global_sub);
+ hlua_class_function(L, "log", hlua_log);
+ hlua_class_function(L, "Debug", hlua_log_debug);
+ hlua_class_function(L, "Info", hlua_log_info);
+ hlua_class_function(L, "Warning", hlua_log_warning);
+ hlua_class_function(L, "Alert", hlua_log_alert);
+ hlua_class_function(L, "done", hlua_done);
+ hlua_class_function(L, "disable_legacy_mailers", hlua_disable_legacy_mailers);
+ hlua_fcn_reg_core_fcn(L);
+
+ lua_setglobal(L, "core");
+
+ /*
+ *
+ * Create "act" object.
+ *
+ */
+
+ /* This table entry is the object "act" base. */
+ lua_newtable(L);
+
+ /* push action return constants */
+ hlua_class_const_int(L, "CONTINUE", ACT_RET_CONT);
+ hlua_class_const_int(L, "STOP", ACT_RET_STOP);
+ hlua_class_const_int(L, "YIELD", ACT_RET_YIELD);
+ hlua_class_const_int(L, "ERROR", ACT_RET_ERR);
+ hlua_class_const_int(L, "DONE", ACT_RET_DONE);
+ hlua_class_const_int(L, "DENY", ACT_RET_DENY);
+ hlua_class_const_int(L, "ABORT", ACT_RET_ABRT);
+ hlua_class_const_int(L, "INVALID", ACT_RET_INV);
+
+ hlua_class_function(L, "wake_time", hlua_set_wake_time);
+
+ lua_setglobal(L, "act");
+
+ /*
+ *
+ * Create "Filter" object.
+ *
+ */
+
+ /* This table entry is the object "filter" base. */
+ lua_newtable(L);
+
+ /* push flags and constants */
+ hlua_class_const_int(L, "CONTINUE", 1);
+ hlua_class_const_int(L, "WAIT", 0);
+ hlua_class_const_int(L, "ERROR", -1);
+
+ hlua_class_const_int(L, "FLT_CFG_FL_HTX", FLT_CFG_FL_HTX);
+
+ hlua_class_function(L, "wake_time", hlua_set_wake_time);
+ hlua_class_function(L, "register_data_filter", hlua_register_data_filter);
+ hlua_class_function(L, "unregister_data_filter", hlua_unregister_data_filter);
+
+ lua_setglobal(L, "filter");
+
+ /*
+ *
+ * Register class Map
+ *
+ */
+
+ /* This table entry is the object "Map" base. */
+ lua_newtable(L);
+
+ /* register pattern types. */
+ for (i=0; i<PAT_MATCH_NUM; i++)
+ hlua_class_const_int(L, pat_match_names[i], i);
+ for (i=0; i<PAT_MATCH_NUM; i++) {
+ snprintf(trash.area, trash.size, "_%s", pat_match_names[i]);
+ hlua_class_const_int(L, trash.area, i);
+ }
+
+ /* register constructor. */
+ hlua_class_function(L, "new", hlua_map_new);
+
+ /* Create and fill the metatable. */
+ lua_newtable(L);
+
+ /* Create and fill the __index entry. */
+ lua_pushstring(L, "__index");
+ lua_newtable(L);
+
+ /* Register . */
+ hlua_class_function(L, "lookup", hlua_map_lookup);
+ hlua_class_function(L, "slookup", hlua_map_slookup);
+
+ lua_rawset(L, -3);
+
+ /* Register previous table in the registry with reference and named entry.
+ * The function hlua_register_metatable() pops the stack, so we
+ * previously create a copy of the table.
+ */
+ lua_pushvalue(L, -1); /* Copy the -1 entry and push it on the stack. */
+ class_map_ref = hlua_register_metatable(L, CLASS_MAP);
+
+ /* Assign the metatable to the mai Map object. */
+ lua_setmetatable(L, -2);
+
+ /* Set a name to the table. */
+ lua_setglobal(L, "Map");
+
+ /*
+ *
+ * Register "CertCache" class
+ *
+ */
+
+ /* Create and fill the metatable. */
+ lua_newtable(L);
+ /* Register */
+ hlua_class_function(L, "set", hlua_ckch_set);
+ lua_setglobal(L, CLASS_CERTCACHE); /* Create global object called Regex */
+
+ /*
+ *
+ * Register class Channel
+ *
+ */
+
+ /* Create and fill the metatable. */
+ lua_newtable(L);
+
+ /* Create and fill the __index entry. */
+ lua_pushstring(L, "__index");
+ lua_newtable(L);
+
+ /* Register . */
+ hlua_class_function(L, "data", hlua_channel_get_data);
+ hlua_class_function(L, "line", hlua_channel_get_line);
+ hlua_class_function(L, "set", hlua_channel_set_data);
+ hlua_class_function(L, "remove", hlua_channel_del_data);
+ hlua_class_function(L, "append", hlua_channel_append);
+ hlua_class_function(L, "prepend", hlua_channel_prepend);
+ hlua_class_function(L, "insert", hlua_channel_insert_data);
+ hlua_class_function(L, "send", hlua_channel_send);
+ hlua_class_function(L, "forward", hlua_channel_forward);
+ hlua_class_function(L, "input", hlua_channel_get_in_len);
+ hlua_class_function(L, "output", hlua_channel_get_out_len);
+ hlua_class_function(L, "may_recv", hlua_channel_may_recv);
+ hlua_class_function(L, "is_full", hlua_channel_is_full);
+ hlua_class_function(L, "is_resp", hlua_channel_is_resp);
+
+ /* Deprecated API */
+ hlua_class_function(L, "get", hlua_channel_get);
+ hlua_class_function(L, "dup", hlua_channel_dup);
+ hlua_class_function(L, "getline", hlua_channel_getline);
+ hlua_class_function(L, "get_in_len", hlua_channel_get_in_len);
+ hlua_class_function(L, "get_out_len", hlua_channel_get_out_len);
+
+ lua_rawset(L, -3);
+
+ /* Register previous table in the registry with reference and named entry. */
+ class_channel_ref = hlua_register_metatable(L, CLASS_CHANNEL);
+
+ /*
+ *
+ * Register class Fetches
+ *
+ */
+
+ /* Create and fill the metatable. */
+ lua_newtable(L);
+
+ /* Create and fill the __index entry. */
+ lua_pushstring(L, "__index");
+ lua_newtable(L);
+
+ /* Browse existing fetches and create the associated
+ * object method.
+ */
+ sf = NULL;
+ while ((sf = sample_fetch_getnext(sf, &idx)) != NULL) {
+ /* gL.Tua doesn't support '.' and '-' in the function names, replace it
+ * by an underscore.
+ */
+ strlcpy2(trash.area, sf->kw, trash.size);
+ for (p = trash.area; *p; p++)
+ if (*p == '.' || *p == '-' || *p == '+')
+ *p = '_';
+
+ /* Register the function. */
+ lua_pushstring(L, trash.area);
+ lua_pushlightuserdata(L, sf);
+ lua_pushcclosure(L, hlua_run_sample_fetch, 1);
+ lua_rawset(L, -3);
+ }
+
+ lua_rawset(L, -3);
+
+ /* Register previous table in the registry with reference and named entry. */
+ class_fetches_ref = hlua_register_metatable(L, CLASS_FETCHES);
+
+ /*
+ *
+ * Register class Converters
+ *
+ */
+
+ /* Create and fill the metatable. */
+ lua_newtable(L);
+
+ /* Create and fill the __index entry. */
+ lua_pushstring(L, "__index");
+ lua_newtable(L);
+
+ /* Browse existing converters and create the associated
+ * object method.
+ */
+ sc = NULL;
+ while ((sc = sample_conv_getnext(sc, &idx)) != NULL) {
+ /* gL.Tua doesn't support '.' and '-' in the function names, replace it
+ * by an underscore.
+ */
+ strlcpy2(trash.area, sc->kw, trash.size);
+ for (p = trash.area; *p; p++)
+ if (*p == '.' || *p == '-' || *p == '+')
+ *p = '_';
+
+ /* Register the function. */
+ lua_pushstring(L, trash.area);
+ lua_pushlightuserdata(L, sc);
+ lua_pushcclosure(L, hlua_run_sample_conv, 1);
+ lua_rawset(L, -3);
+ }
+
+ lua_rawset(L, -3);
+
+ /* Register previous table in the registry with reference and named entry. */
+ class_converters_ref = hlua_register_metatable(L, CLASS_CONVERTERS);
+
+ /*
+ *
+ * Register class HTTP
+ *
+ */
+
+ /* Create and fill the metatable. */
+ lua_newtable(L);
+
+ /* Create and fill the __index entry. */
+ lua_pushstring(L, "__index");
+ lua_newtable(L);
+
+ /* Register Lua functions. */
+ hlua_class_function(L, "req_get_headers",hlua_http_req_get_headers);
+ hlua_class_function(L, "req_del_header", hlua_http_req_del_hdr);
+ hlua_class_function(L, "req_rep_header", hlua_http_req_rep_hdr);
+ hlua_class_function(L, "req_rep_value", hlua_http_req_rep_val);
+ hlua_class_function(L, "req_add_header", hlua_http_req_add_hdr);
+ hlua_class_function(L, "req_set_header", hlua_http_req_set_hdr);
+ hlua_class_function(L, "req_set_method", hlua_http_req_set_meth);
+ hlua_class_function(L, "req_set_path", hlua_http_req_set_path);
+ hlua_class_function(L, "req_set_query", hlua_http_req_set_query);
+ hlua_class_function(L, "req_set_uri", hlua_http_req_set_uri);
+
+ hlua_class_function(L, "res_get_headers",hlua_http_res_get_headers);
+ hlua_class_function(L, "res_del_header", hlua_http_res_del_hdr);
+ hlua_class_function(L, "res_rep_header", hlua_http_res_rep_hdr);
+ hlua_class_function(L, "res_rep_value", hlua_http_res_rep_val);
+ hlua_class_function(L, "res_add_header", hlua_http_res_add_hdr);
+ hlua_class_function(L, "res_set_header", hlua_http_res_set_hdr);
+ hlua_class_function(L, "res_set_status", hlua_http_res_set_status);
+
+ lua_rawset(L, -3);
+
+ /* Register previous table in the registry with reference and named entry. */
+ class_http_ref = hlua_register_metatable(L, CLASS_HTTP);
+
+ /*
+ *
+ * Register class HTTPMessage
+ *
+ */
+
+ /* Create and fill the metatable. */
+ lua_newtable(L);
+
+ /* Create and fill the __index entry. */
+ lua_pushstring(L, "__index");
+ lua_newtable(L);
+
+ /* Register Lua functions. */
+ hlua_class_function(L, "is_resp", hlua_http_msg_is_resp);
+ hlua_class_function(L, "get_stline", hlua_http_msg_get_stline);
+ hlua_class_function(L, "get_headers", hlua_http_msg_get_headers);
+ hlua_class_function(L, "del_header", hlua_http_msg_del_hdr);
+ hlua_class_function(L, "rep_header", hlua_http_msg_rep_hdr);
+ hlua_class_function(L, "rep_value", hlua_http_msg_rep_val);
+ hlua_class_function(L, "add_header", hlua_http_msg_add_hdr);
+ hlua_class_function(L, "set_header", hlua_http_msg_set_hdr);
+ hlua_class_function(L, "set_method", hlua_http_msg_set_meth);
+ hlua_class_function(L, "set_path", hlua_http_msg_set_path);
+ hlua_class_function(L, "set_query", hlua_http_msg_set_query);
+ hlua_class_function(L, "set_uri", hlua_http_msg_set_uri);
+ hlua_class_function(L, "set_status", hlua_http_msg_set_status);
+ hlua_class_function(L, "is_full", hlua_http_msg_is_full);
+ hlua_class_function(L, "may_recv", hlua_http_msg_may_recv);
+ hlua_class_function(L, "eom", hlua_http_msg_is_eom);
+ hlua_class_function(L, "input", hlua_http_msg_get_in_len);
+ hlua_class_function(L, "output", hlua_http_msg_get_out_len);
+
+ hlua_class_function(L, "body", hlua_http_msg_get_body);
+ hlua_class_function(L, "set", hlua_http_msg_set_data);
+ hlua_class_function(L, "remove", hlua_http_msg_del_data);
+ hlua_class_function(L, "append", hlua_http_msg_append);
+ hlua_class_function(L, "prepend", hlua_http_msg_prepend);
+ hlua_class_function(L, "insert", hlua_http_msg_insert_data);
+ hlua_class_function(L, "set_eom", hlua_http_msg_set_eom);
+ hlua_class_function(L, "unset_eom", hlua_http_msg_unset_eom);
+
+ hlua_class_function(L, "send", hlua_http_msg_send);
+ hlua_class_function(L, "forward", hlua_http_msg_forward);
+
+ lua_rawset(L, -3);
+
+ /* Register previous table in the registry with reference and named entry. */
+ class_http_msg_ref = hlua_register_metatable(L, CLASS_HTTP_MSG);
+
+ /*
+ *
+ * Register class HTTPClient
+ *
+ */
+
+ /* Create and fill the metatable. */
+ lua_newtable(L);
+ lua_pushstring(L, "__index");
+ lua_newtable(L);
+ hlua_class_function(L, "get", hlua_httpclient_get);
+ hlua_class_function(L, "head", hlua_httpclient_head);
+ hlua_class_function(L, "put", hlua_httpclient_put);
+ hlua_class_function(L, "post", hlua_httpclient_post);
+ hlua_class_function(L, "delete", hlua_httpclient_delete);
+ lua_settable(L, -3); /* Sets the __index entry. */
+ /* Register the garbage collector entry. */
+ lua_pushstring(L, "__gc");
+ lua_pushcclosure(L, hlua_httpclient_gc, 0);
+ lua_settable(L, -3); /* Push the last 2 entries in the table at index -3 */
+
+
+
+ class_httpclient_ref = hlua_register_metatable(L, CLASS_HTTPCLIENT);
+ /*
+ *
+ * Register class AppletTCP
+ *
+ */
+
+ /* Create and fill the metatable. */
+ lua_newtable(L);
+
+ /* Create and fill the __index entry. */
+ lua_pushstring(L, "__index");
+ lua_newtable(L);
+
+ /* Register Lua functions. */
+ hlua_class_function(L, "getline", hlua_applet_tcp_getline);
+ hlua_class_function(L, "receive", hlua_applet_tcp_recv);
+ hlua_class_function(L, "send", hlua_applet_tcp_send);
+ hlua_class_function(L, "set_priv", hlua_applet_tcp_set_priv);
+ hlua_class_function(L, "get_priv", hlua_applet_tcp_get_priv);
+ hlua_class_function(L, "set_var", hlua_applet_tcp_set_var);
+ hlua_class_function(L, "unset_var", hlua_applet_tcp_unset_var);
+ hlua_class_function(L, "get_var", hlua_applet_tcp_get_var);
+
+ lua_settable(L, -3);
+
+ /* Register previous table in the registry with reference and named entry. */
+ class_applet_tcp_ref = hlua_register_metatable(L, CLASS_APPLET_TCP);
+
+ /*
+ *
+ * Register class AppletHTTP
+ *
+ */
+
+ /* Create and fill the metatable. */
+ lua_newtable(L);
+
+ /* Create and fill the __index entry. */
+ lua_pushstring(L, "__index");
+ lua_newtable(L);
+
+ /* Register Lua functions. */
+ hlua_class_function(L, "set_priv", hlua_applet_http_set_priv);
+ hlua_class_function(L, "get_priv", hlua_applet_http_get_priv);
+ hlua_class_function(L, "set_var", hlua_applet_http_set_var);
+ hlua_class_function(L, "unset_var", hlua_applet_http_unset_var);
+ hlua_class_function(L, "get_var", hlua_applet_http_get_var);
+ hlua_class_function(L, "getline", hlua_applet_http_getline);
+ hlua_class_function(L, "receive", hlua_applet_http_recv);
+ hlua_class_function(L, "send", hlua_applet_http_send);
+ hlua_class_function(L, "add_header", hlua_applet_http_addheader);
+ hlua_class_function(L, "set_status", hlua_applet_http_status);
+ hlua_class_function(L, "start_response", hlua_applet_http_start_response);
+
+ lua_settable(L, -3);
+
+ /* Register previous table in the registry with reference and named entry. */
+ class_applet_http_ref = hlua_register_metatable(L, CLASS_APPLET_HTTP);
+
+ /*
+ *
+ * Register class TXN
+ *
+ */
+
+ /* Create and fill the metatable. */
+ lua_newtable(L);
+
+ /* Create and fill the __index entry. */
+ lua_pushstring(L, "__index");
+ lua_newtable(L);
+
+ /* Register Lua functions. */
+ hlua_class_function(L, "set_priv", hlua_set_priv);
+ hlua_class_function(L, "get_priv", hlua_get_priv);
+ hlua_class_function(L, "set_var", hlua_set_var);
+ hlua_class_function(L, "unset_var", hlua_unset_var);
+ hlua_class_function(L, "get_var", hlua_get_var);
+ hlua_class_function(L, "done", hlua_txn_done);
+ hlua_class_function(L, "reply", hlua_txn_reply_new);
+ hlua_class_function(L, "set_loglevel", hlua_txn_set_loglevel);
+ hlua_class_function(L, "set_tos", hlua_txn_set_tos);
+ hlua_class_function(L, "set_mark", hlua_txn_set_mark);
+ hlua_class_function(L, "set_priority_class", hlua_txn_set_priority_class);
+ hlua_class_function(L, "set_priority_offset", hlua_txn_set_priority_offset);
+ hlua_class_function(L, "deflog", hlua_txn_deflog);
+ hlua_class_function(L, "log", hlua_txn_log);
+ hlua_class_function(L, "Debug", hlua_txn_log_debug);
+ hlua_class_function(L, "Info", hlua_txn_log_info);
+ hlua_class_function(L, "Warning", hlua_txn_log_warning);
+ hlua_class_function(L, "Alert", hlua_txn_log_alert);
+
+ lua_rawset(L, -3);
+
+ /* Register previous table in the registry with reference and named entry. */
+ class_txn_ref = hlua_register_metatable(L, CLASS_TXN);
+
+ /*
+ *
+ * Register class reply
+ *
+ */
+ lua_newtable(L);
+ lua_pushstring(L, "__index");
+ lua_newtable(L);
+ hlua_class_function(L, "set_status", hlua_txn_reply_set_status);
+ hlua_class_function(L, "add_header", hlua_txn_reply_add_header);
+ hlua_class_function(L, "del_header", hlua_txn_reply_del_header);
+ hlua_class_function(L, "set_body", hlua_txn_reply_set_body);
+ lua_settable(L, -3); /* Sets the __index entry. */
+ class_txn_reply_ref = luaL_ref(L, LUA_REGISTRYINDEX);
+
+
+ /*
+ *
+ * Register class Socket
+ *
+ */
+
+ /* Create and fill the metatable. */
+ lua_newtable(L);
+
+ /* Create and fill the __index entry. */
+ lua_pushstring(L, "__index");
+ lua_newtable(L);
+
+#ifdef USE_OPENSSL
+ hlua_class_function(L, "connect_ssl", hlua_socket_connect_ssl);
+#endif
+ hlua_class_function(L, "connect", hlua_socket_connect);
+ hlua_class_function(L, "send", hlua_socket_send);
+ hlua_class_function(L, "receive", hlua_socket_receive);
+ hlua_class_function(L, "close", hlua_socket_close);
+ hlua_class_function(L, "getpeername", hlua_socket_getpeername);
+ hlua_class_function(L, "getsockname", hlua_socket_getsockname);
+ hlua_class_function(L, "setoption", hlua_socket_setoption);
+ hlua_class_function(L, "settimeout", hlua_socket_settimeout);
+
+ lua_rawset(L, -3); /* Push the last 2 entries in the table at index -3 */
+
+ /* Register the garbage collector entry. */
+ lua_pushstring(L, "__gc");
+ lua_pushcclosure(L, hlua_socket_gc, 0);
+ lua_rawset(L, -3); /* Push the last 2 entries in the table at index -3 */
+
+ /* Register previous table in the registry with reference and named entry. */
+ class_socket_ref = hlua_register_metatable(L, CLASS_SOCKET);
+
+ lua_atpanic(L, hlua_panic_safe);
+
+ return L;
+}
+
+void hlua_init(void) {
+ int i;
+ char *errmsg;
+#ifdef USE_OPENSSL
+ struct srv_kw *kw;
+ int tmp_error;
+ char *error;
+ char *args[] = { /* SSL client configuration. */
+ "ssl",
+ "verify",
+ "none",
+ NULL
+ };
+#endif
+
+ /* Init post init function list head */
+ for (i = 0; i < MAX_THREADS + 1; i++)
+ LIST_INIT(&hlua_init_functions[i]);
+
+ /* Init state for common/shared lua parts */
+ hlua_state_id = 0;
+ ha_set_thread(NULL);
+ hlua_states[0] = hlua_init_state(0);
+
+ /* Init state 1 for thread 0. We have at least one thread. */
+ hlua_state_id = 1;
+ ha_set_thread(NULL);
+ hlua_states[1] = hlua_init_state(1);
+
+ /* Proxy and server configuration initialisation. */
+ socket_proxy = alloc_new_proxy("LUA-SOCKET", PR_CAP_FE|PR_CAP_BE|PR_CAP_INT, &errmsg);
+ if (!socket_proxy) {
+ fprintf(stderr, "Lua init: %s\n", errmsg);
+ exit(1);
+ }
+
+ /* Init TCP server: unchanged parameters */
+ socket_tcp = new_server(socket_proxy);
+ if (!socket_tcp) {
+ fprintf(stderr, "Lua init: failed to allocate tcp server socket\n");
+ exit(1);
+ }
+
+#ifdef USE_OPENSSL
+ /* Init TCP server: unchanged parameters */
+ socket_ssl = new_server(socket_proxy);
+ if (!socket_ssl) {
+ fprintf(stderr, "Lua init: failed to allocate ssl server socket\n");
+ exit(1);
+ }
+
+ socket_ssl->use_ssl = 1;
+ socket_ssl->xprt = xprt_get(XPRT_SSL);
+
+ for (i = 0; args[i] != NULL; i++) {
+ if ((kw = srv_find_kw(args[i])) != NULL) { /* Maybe it's registered server keyword */
+ /*
+ *
+ * If the keyword is not known, we can search in the registered
+ * server keywords. This is useful to configure special SSL
+ * features like client certificates and ssl_verify.
+ *
+ */
+ tmp_error = kw->parse(args, &i, socket_proxy, socket_ssl, &error);
+ if (tmp_error != 0) {
+ fprintf(stderr, "INTERNAL ERROR: %s\n", error);
+ abort(); /* This must be never arrives because the command line
+ not editable by the user. */
+ }
+ i += kw->skip;
+ }
+ }
+#endif
+
+}
+
+static void hlua_deinit()
+{
+ int thr;
+ struct hlua_reg_filter *reg_flt, *reg_flt_bck;
+
+ list_for_each_entry_safe(reg_flt, reg_flt_bck, &referenced_filters, l)
+ release_hlua_reg_filter(reg_flt);
+
+ for (thr = 0; thr < MAX_THREADS+1; thr++) {
+ if (hlua_states[thr])
+ lua_close(hlua_states[thr]);
+ }
+
+ srv_drop(socket_tcp);
+
+#ifdef USE_OPENSSL
+ srv_drop(socket_ssl);
+#endif
+
+ free_proxy(socket_proxy);
+}
+
+REGISTER_POST_DEINIT(hlua_deinit);
+
+static void hlua_register_build_options(void)
+{
+ char *ptr = NULL;
+
+ memprintf(&ptr, "Built with Lua version : %s", LUA_RELEASE);
+ hap_register_build_opts(ptr, 1);
+}
+
+INITCALL0(STG_REGISTER, hlua_register_build_options);
diff --git a/src/hlua_fcn.c b/src/hlua_fcn.c
new file mode 100644
index 0000000..d8dcdfd
--- /dev/null
+++ b/src/hlua_fcn.c
@@ -0,0 +1,2721 @@
+/*
+ * Lua safe functions
+ *
+ * Copyright 2015-2016 Thierry Fournier <tfournier@arpalert.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ *
+ * All the functions in this file runs with a Lua stack, and can
+ * return with a longjmp. All of these function must be launched
+ * in an environment able to catch a longjmp, otherwise a
+ * critical error can be raised.
+ */
+
+#define _GNU_SOURCE
+
+#include <lauxlib.h>
+#include <lua.h>
+#include <lualib.h>
+
+#include <import/ebmbtree.h>
+
+#include <haproxy/cli-t.h>
+#include <haproxy/errors.h>
+#include <haproxy/hlua.h>
+#include <haproxy/hlua_fcn.h>
+#include <haproxy/http.h>
+#include <haproxy/net_helper.h>
+#include <haproxy/pattern-t.h>
+#include <haproxy/proxy.h>
+#include <haproxy/regex.h>
+#include <haproxy/server.h>
+#include <haproxy/stats.h>
+#include <haproxy/stick_table.h>
+#include <haproxy/event_hdl.h>
+#include <haproxy/stream-t.h>
+#include <haproxy/time.h>
+#include <haproxy/tools.h>
+#include <haproxy/mailers.h>
+
+/* Contains the class reference of the concat object. */
+static int class_concat_ref;
+static int class_queue_ref;
+static int class_proxy_ref;
+static int class_server_ref;
+static int class_listener_ref;
+static int class_event_sub_ref;
+static int class_regex_ref;
+static int class_stktable_ref;
+static int class_proxy_list_ref;
+static int class_server_list_ref;
+
+#define STATS_LEN (MAX((int)ST_F_TOTAL_FIELDS, (int)INF_TOTAL_FIELDS))
+
+static THREAD_LOCAL struct field stats[STATS_LEN];
+
+int hlua_checkboolean(lua_State *L, int index)
+{
+ if (!lua_isboolean(L, index))
+ luaL_argerror(L, index, "boolean expected");
+ return lua_toboolean(L, index);
+}
+
+/* Helper to push unsigned integers to Lua stack, respecting Lua limitations */
+static int hlua_fcn_pushunsigned(lua_State *L, unsigned int val)
+{
+#if (LUA_MAXINTEGER == LLONG_MAX || ((LUA_MAXINTEGER == LONG_MAX) && (__WORDSIZE == 64)))
+ lua_pushinteger(L, val);
+#else
+ if (val > INT_MAX)
+ lua_pushnumber(L, (lua_Number)val);
+ else
+ lua_pushinteger(L, (int)val);
+#endif
+ return 1;
+}
+
+/* Helper to push unsigned long long to Lua stack, respecting Lua limitations */
+static int hlua_fcn_pushunsigned_ll(lua_State *L, unsigned long long val) {
+#if (LUA_MAXINTEGER == LLONG_MAX || ((LUA_MAXINTEGER == LONG_MAX) && (__WORDSIZE == 64)))
+ /* 64 bits case, U64 is supported until LLONG_MAX */
+ if (val > LLONG_MAX)
+ lua_pushnumber(L, (lua_Number)val);
+ else
+ lua_pushinteger(L, val);
+#else
+ /* 32 bits case, U64 is supported until INT_MAX */
+ if (val > INT_MAX)
+ lua_pushnumber(L, (lua_Number)val);
+ else
+ lua_pushinteger(L, (int)val);
+#endif
+ return 1;
+}
+
+/* This function gets a struct field and converts it in Lua
+ * variable. The variable is pushed at the top of the stack.
+ */
+int hlua_fcn_pushfield(lua_State *L, struct field *field)
+{
+ /* The lua_Integer is always signed. Its length depends on
+ * compilation options, so the following code is conditioned
+ * by some macros. Windows maros are not supported.
+ * If the number cannot be represented as integer, we try to
+ * convert to float.
+ */
+ switch (field_format(field, 0)) {
+
+ case FF_EMPTY:
+ lua_pushnil(L);
+ return 1;
+
+ case FF_S32:
+ /* S32 is always supported. */
+ lua_pushinteger(L, field->u.s32);
+ return 1;
+
+ case FF_U32:
+#if (LUA_MAXINTEGER == LLONG_MAX || ((LUA_MAXINTEGER == LONG_MAX) && (__WORDSIZE == 64)))
+ /* 64 bits case, U32 is always supported */
+ lua_pushinteger(L, field->u.u32);
+#else
+ /* 32 bits case, U32 is supported until INT_MAX. */
+ if (field->u.u32 > INT_MAX)
+ lua_pushnumber(L, (lua_Number)field->u.u32);
+ else
+ lua_pushinteger(L, field->u.u32);
+#endif
+ return 1;
+
+ case FF_S64:
+#if (LUA_MAXINTEGER == LLONG_MAX || ((LUA_MAXINTEGER == LONG_MAX) && (__WORDSIZE == 64)))
+ /* 64 bits case, S64 is always supported */
+ lua_pushinteger(L, field->u.s64);
+#else
+ /* 64 bits case, S64 is supported between INT_MIN and INT_MAX */
+ if (field->u.s64 < INT_MIN || field->u.s64 > INT_MAX)
+ lua_pushnumber(L, (lua_Number)field->u.s64);
+ else
+ lua_pushinteger(L, (int)field->u.s64);
+#endif
+ return 1;
+
+ case FF_U64:
+#if (LUA_MAXINTEGER == LLONG_MAX || ((LUA_MAXINTEGER == LONG_MAX) && (__WORDSIZE == 64)))
+ /* 64 bits case, U64 is supported until LLONG_MAX */
+ if (field->u.u64 > LLONG_MAX)
+ lua_pushnumber(L, (lua_Number)field->u.u64);
+ else
+ lua_pushinteger(L, field->u.u64);
+#else
+ /* 64 bits case, U64 is supported until INT_MAX */
+ if (field->u.u64 > INT_MAX)
+ lua_pushnumber(L, (lua_Number)field->u.u64);
+ else
+ lua_pushinteger(L, (int)field->u.u64);
+#endif
+ return 1;
+
+ case FF_STR:
+ lua_pushstring(L, field->u.str);
+ return 1;
+
+ default:
+ break;
+ }
+
+ /* Default case, never reached. */
+ lua_pushnil(L);
+ return 1;
+}
+
+/* Some string are started or terminated by blank chars,
+ * this function removes the spaces, tabs, \r and
+ * \n at the begin and at the end of the string "str", and
+ * push the result in the lua stack.
+ * Returns a pointer to the Lua internal copy of the string.
+ */
+const char *hlua_pushstrippedstring(lua_State *L, const char *str)
+{
+ const char *p;
+ int l;
+
+ for (p = str; HTTP_IS_LWS(*p); p++);
+
+ for (l = strlen(p); l && HTTP_IS_LWS(p[l-1]); l--);
+
+ return lua_pushlstring(L, p, l);
+}
+
+/* The three following functions are useful for adding entries
+ * in a table. These functions takes a string and respectively an
+ * integer, a string or a function and add it to the table in the
+ * top of the stack.
+ *
+ * These functions throws an error if no more stack size is
+ * available.
+ */
+void hlua_class_const_int(lua_State *L, const char *name, int value)
+{
+ lua_pushstring(L, name);
+ lua_pushinteger(L, value);
+ lua_rawset(L, -3);
+}
+void hlua_class_const_str(lua_State *L, const char *name, const char *value)
+{
+ lua_pushstring(L, name);
+ lua_pushstring(L, value);
+ lua_rawset(L, -3);
+}
+void hlua_class_function(lua_State *L, const char *name, int (*function)(lua_State *L))
+{
+ lua_pushstring(L, name);
+ lua_pushcclosure(L, function, 0);
+ lua_rawset(L, -3);
+}
+
+/* This function returns a string containing the HAProxy object name. */
+int hlua_dump_object(struct lua_State *L)
+{
+ const char *name = (const char *)lua_tostring(L, lua_upvalueindex(1));
+ lua_pushfstring(L, "HAProxy class %s", name);
+ return 1;
+}
+
+/* This function register a table as metatable and. It names
+ * the metatable, and returns the associated reference.
+ * The original table is popped from the top of the stack.
+ * "name" is the referenced class name.
+ */
+int hlua_register_metatable(struct lua_State *L, char *name)
+{
+ /* Check the type of the top element. it must be
+ * a table.
+ */
+ if (lua_type(L, -1) != LUA_TTABLE)
+ luaL_error(L, "hlua_register_metatable() requires a type Table "
+ "in the top of the stack");
+
+ /* Add the __tostring function which identify the
+ * created object.
+ */
+ lua_pushstring(L, "__tostring");
+ lua_pushstring(L, name);
+ lua_pushcclosure(L, hlua_dump_object, 1);
+ lua_rawset(L, -3);
+
+ /* Register a named entry for the table. The table
+ * reference is copied first because the function
+ * lua_setfield() pop the entry.
+ */
+ lua_pushvalue(L, -1);
+ lua_setfield(L, LUA_REGISTRYINDEX, name);
+
+ /* Creates the reference of the object. The
+ * function luaL_ref pop the top of the stack.
+ */
+ return luaL_ref(L, LUA_REGISTRYINDEX);
+}
+
+/* Return an object of the expected type, or throws an error. */
+void *hlua_checkudata(lua_State *L, int ud, int class_ref)
+{
+ void *p;
+ int ret;
+
+ /* Check if the stack entry is an array. */
+ if (!lua_istable(L, ud))
+ luaL_argerror(L, ud, NULL);
+
+ /* pop the metatable of the referencecd object. */
+ if (!lua_getmetatable(L, ud))
+ luaL_argerror(L, ud, NULL);
+
+ /* pop the expected metatable. */
+ lua_rawgeti(L, LUA_REGISTRYINDEX, class_ref);
+
+ /* Check if the metadata have the expected type. */
+ ret = lua_rawequal(L, -1, -2);
+ lua_pop(L, 2);
+ if (!ret)
+ luaL_argerror(L, ud, NULL);
+
+ /* Push on the stack at the entry [0] of the table. */
+ lua_rawgeti(L, ud, 0);
+
+ /* Check if this entry is userdata. */
+ p = lua_touserdata(L, -1);
+ if (!p)
+ luaL_argerror(L, ud, NULL);
+
+ /* Remove the entry returned by lua_rawgeti(). */
+ lua_pop(L, 1);
+
+ /* Return the associated struct. */
+ return p;
+}
+
+/* This function return the current date at epoch format in milliseconds. */
+int hlua_now(lua_State *L)
+{
+ /* WT: the doc says "returns the current time" and later says that it's
+ * monotonic. So the best fit is to use start_date+(now-start_time).
+ */
+ struct timeval tv;
+
+ tv = NS_TO_TV(now_ns - start_time_ns);
+ tv_add(&tv, &tv, &start_date);
+
+ lua_newtable(L);
+ lua_pushstring(L, "sec");
+ lua_pushinteger(L, tv.tv_sec);
+ lua_rawset(L, -3);
+ lua_pushstring(L, "usec");
+ lua_pushinteger(L, tv.tv_usec);
+ lua_rawset(L, -3);
+ return 1;
+}
+
+/* This functions expects a Lua string as HTTP date, parse it and
+ * returns an integer containing the epoch format of the date, or
+ * nil if the parsing fails.
+ */
+static int hlua_parse_date(lua_State *L, int (*fcn)(const char *, int, struct tm*))
+{
+ const char *str;
+ size_t len;
+ struct tm tm;
+ time_t time;
+
+ str = luaL_checklstring(L, 1, &len);
+
+ if (!fcn(str, len, &tm)) {
+ lua_pushnil(L);
+ return 1;
+ }
+
+ /* This function considers the content of the broken-down time
+ * is exprimed in the UTC timezone. timegm don't care about
+ * the gnu variable tm_gmtoff. If gmtoff is set, or if you know
+ * the timezone from the broken-down time, it must be fixed
+ * after the conversion.
+ */
+ time = my_timegm(&tm);
+ if (time == -1) {
+ lua_pushnil(L);
+ return 1;
+ }
+
+ lua_pushinteger(L, (int)time);
+ return 1;
+}
+static int hlua_http_date(lua_State *L)
+{
+ return hlua_parse_date(L, parse_http_date);
+}
+static int hlua_imf_date(lua_State *L)
+{
+ return hlua_parse_date(L, parse_imf_date);
+}
+static int hlua_rfc850_date(lua_State *L)
+{
+ return hlua_parse_date(L, parse_rfc850_date);
+}
+static int hlua_asctime_date(lua_State *L)
+{
+ return hlua_parse_date(L, parse_asctime_date);
+}
+
+static int hlua_get_info(lua_State *L)
+{
+ int i;
+
+ stats_fill_info(stats, STATS_LEN, 0);
+
+ lua_newtable(L);
+ for (i=0; i<INF_TOTAL_FIELDS; i++) {
+ lua_pushstring(L, info_fields[i].name);
+ hlua_fcn_pushfield(L, &stats[i]);
+ lua_settable(L, -3);
+ }
+ return 1;
+}
+
+static struct hlua_concat *hlua_check_concat(lua_State *L, int ud)
+{
+ return (hlua_checkudata(L, ud, class_concat_ref));
+}
+
+static int hlua_concat_add(lua_State *L)
+{
+ struct hlua_concat *b;
+ char *buffer;
+ char *new;
+ const char *str;
+ size_t l;
+
+ /* First arg must be a concat object. */
+ b = hlua_check_concat(L, 1);
+
+ /* Second arg must be a string. */
+ str = luaL_checklstring(L, 2, &l);
+
+ /* Get the buffer. */
+ lua_rawgeti(L, 1, 1);
+ buffer = lua_touserdata(L, -1);
+ lua_pop(L, 1);
+
+ /* Update the buffer size if it s required. The old buffer
+ * is crushed by the new in the object array, so it will
+ * be deleted by the GC.
+ * Note that in the first loop, the "new" variable is only
+ * used as a flag.
+ */
+ new = NULL;
+ while (b->size - b->len < l) {
+ b->size += HLUA_CONCAT_BLOCSZ;
+ new = buffer;
+ }
+ if (new) {
+ new = lua_newuserdata(L, b->size);
+ memcpy(new, buffer, b->len);
+ lua_rawseti(L, 1, 1);
+ buffer = new;
+ }
+
+ /* Copy string, and update metadata. */
+ memcpy(buffer + b->len, str, l);
+ b->len += l;
+ return 0;
+}
+
+static int hlua_concat_dump(lua_State *L)
+{
+ struct hlua_concat *b;
+ char *buffer;
+
+ /* First arg must be a concat object. */
+ b = hlua_check_concat(L, 1);
+
+ /* Get the buffer. */
+ lua_rawgeti(L, 1, 1);
+ buffer = lua_touserdata(L, -1);
+ lua_pop(L, 1);
+
+ /* Push the soncatenated string in the stack. */
+ lua_pushlstring(L, buffer, b->len);
+ return 1;
+}
+
+int hlua_concat_new(lua_State *L)
+{
+ struct hlua_concat *b;
+
+ lua_newtable(L);
+ b = lua_newuserdata(L, sizeof(*b));
+ b->size = HLUA_CONCAT_BLOCSZ;
+ b->len = 0;
+ lua_rawseti(L, -2, 0);
+ lua_newuserdata(L, HLUA_CONCAT_BLOCSZ);
+ lua_rawseti(L, -2, 1);
+
+ lua_rawgeti(L, LUA_REGISTRYINDEX, class_concat_ref);
+ lua_setmetatable(L, -2);
+
+ return 1;
+}
+
+static int concat_tostring(lua_State *L)
+{
+ const void *ptr = lua_topointer(L, 1);
+ lua_pushfstring(L, "Concat object: %p", ptr);
+ return 1;
+}
+
+static void hlua_concat_init(lua_State *L)
+{
+ /* Creates the buffered concat object. */
+ lua_newtable(L);
+
+ lua_pushstring(L, "__tostring");
+ lua_pushcclosure(L, concat_tostring, 0);
+ lua_settable(L, -3);
+
+ lua_pushstring(L, "__index"); /* Creates the index entry. */
+ lua_newtable(L); /* The "__index" content. */
+
+ lua_pushstring(L, "add");
+ lua_pushcclosure(L, hlua_concat_add, 0);
+ lua_settable(L, -3);
+
+ lua_pushstring(L, "dump");
+ lua_pushcclosure(L, hlua_concat_dump, 0);
+ lua_settable(L, -3);
+
+ lua_settable(L, -3); /* Sets the __index entry. */
+ class_concat_ref = luaL_ref(L, LUA_REGISTRYINDEX);
+}
+
+/* C backing storage for lua Queue class */
+struct hlua_queue {
+ uint32_t size;
+ struct mt_list list;
+ struct mt_list wait_tasks;
+};
+
+/* used to store lua objects in queue->list */
+struct hlua_queue_item {
+ int ref; /* lua object reference id */
+ struct mt_list list;
+};
+
+/* used to store wait entries in queue->wait_tasks */
+struct hlua_queue_wait
+{
+ struct task *task;
+ struct mt_list entry;
+};
+
+/* This is the memory pool containing struct hlua_queue_item (queue items)
+ */
+DECLARE_STATIC_POOL(pool_head_hlua_queue, "hlua_queue", sizeof(struct hlua_queue_item));
+
+/* This is the memory pool containing struct hlua_queue_wait
+ * (queue waiting tasks)
+ */
+DECLARE_STATIC_POOL(pool_head_hlua_queuew, "hlua_queuew", sizeof(struct hlua_queue_wait));
+
+static struct hlua_queue *hlua_check_queue(lua_State *L, int ud)
+{
+ return hlua_checkudata(L, ud, class_queue_ref);
+}
+
+/* queue:size(): returns an integer containing the current number of queued
+ * items.
+ */
+static int hlua_queue_size(lua_State *L)
+{
+ struct hlua_queue *queue = hlua_check_queue(L, 1);
+
+ BUG_ON(!queue);
+ lua_pushinteger(L, HA_ATOMIC_LOAD(&queue->size));
+
+ return 1;
+}
+
+/* queue:push(): push an item (any type, except nil) at the end of the queue
+ *
+ * Returns boolean:true for success and boolean:false on error
+ */
+static int hlua_queue_push(lua_State *L)
+{
+ struct hlua_queue *queue = hlua_check_queue(L, 1);
+ struct hlua_queue_item *item;
+ struct mt_list *elt1, elt2;
+ struct hlua_queue_wait *waiter;
+
+ if (lua_gettop(L) != 2 || lua_isnoneornil(L, 2)) {
+ luaL_error(L, "unexpected argument");
+ /* not reached */
+ return 0;
+ }
+ BUG_ON(!queue);
+
+ item = pool_alloc(pool_head_hlua_queue);
+ if (!item) {
+ /* memory error */
+ lua_pushboolean(L, 0);
+ return 1;
+ }
+
+ /* get a reference from lua object at the top of the stack */
+ item->ref = hlua_ref(L);
+
+ /* push new entry to the queue */
+ MT_LIST_INIT(&item->list);
+ HA_ATOMIC_INC(&queue->size);
+ MT_LIST_APPEND(&queue->list, &item->list);
+
+ /* notify tasks waiting on queue:pop_wait() (if any) */
+ mt_list_for_each_entry_safe(waiter, &queue->wait_tasks, entry, elt1, elt2) {
+ task_wakeup(waiter->task, TASK_WOKEN_MSG);
+ }
+
+ lua_pushboolean(L, 1);
+ return 1;
+}
+
+/* internal queue pop helper, returns 1 if it successfully popped an item
+ * from the queue and pushed it on lua stack.
+ *
+ * Else it returns 0 (nothing is pushed on the stack)
+ */
+static int _hlua_queue_pop(lua_State *L, struct hlua_queue *queue)
+{
+ struct hlua_queue_item *item;
+
+ item = MT_LIST_POP(&queue->list, typeof(item), list);
+ if (!item)
+ return 0; /* nothing in queue */
+
+ HA_ATOMIC_DEC(&queue->size);
+ /* push lua obj on the stack */
+ hlua_pushref(L, item->ref);
+
+ /* obj ref should be released right away since it was pushed
+ * on the stack and will not be used anymore
+ */
+ hlua_unref(L, item->ref);
+
+ /* free the queue item */
+ pool_free(pool_head_hlua_queue, item);
+
+ return 1;
+}
+
+/* queue:pop(): returns the first item at the top of que queue or nil if
+ * the queue is empty.
+ */
+static int hlua_queue_pop(lua_State *L)
+{
+ struct hlua_queue *queue = hlua_check_queue(L, 1);
+
+ BUG_ON(!queue);
+ if (!_hlua_queue_pop(L, queue)) {
+ /* nothing in queue, push nil */
+ lua_pushnil(L);
+ }
+ return 1; /* either item or nil is at the top of the stack */
+}
+
+/* queue:pop_wait(): same as queue:pop() but doesn't return on empty queue.
+ *
+ * Aborts if used incorrectly and returns nil in case of memory error.
+ */
+static int _hlua_queue_pop_wait(lua_State *L, int status, lua_KContext ctx)
+{
+ struct hlua_queue *queue = hlua_check_queue(L, 1);
+ struct hlua_queue_wait *wait = lua_touserdata(L, 2);
+
+ /* new pop attempt */
+ if (!_hlua_queue_pop(L, queue)) {
+ hlua_yieldk(L, 0, 0, _hlua_queue_pop_wait, TICK_ETERNITY, 0); // wait retry
+ return 0; // never reached, yieldk won't return
+ }
+
+ /* remove task from waiting list */
+ MT_LIST_DELETE(&wait->entry);
+ pool_free(pool_head_hlua_queuew, wait);
+
+ return 1; // success
+}
+static int hlua_queue_pop_wait(lua_State *L)
+{
+ struct hlua_queue *queue = hlua_check_queue(L, 1);
+ struct hlua_queue_wait *wait;
+ struct hlua *hlua;
+
+ BUG_ON(!queue);
+
+ /* Get hlua struct, or NULL if we execute from main lua state */
+ hlua = hlua_gethlua(L);
+
+ if (!hlua || HLUA_CANT_YIELD(hlua)) {
+ luaL_error(L, "pop_wait() may only be used within task context "
+ "(requires yielding)");
+ return 0; /* not reached */
+ }
+
+ /* try opportunistic pop (there could already be pending items) */
+ if (_hlua_queue_pop(L, queue))
+ return 1; // success
+
+ /* no pending items, waiting required */
+
+ wait = pool_alloc(pool_head_hlua_queuew);
+ if (!wait) {
+ lua_pushnil(L);
+ return 1; /* memory error, return nil */
+ }
+
+ wait->task = hlua->task;
+ MT_LIST_INIT(&wait->entry);
+
+ /* add task to queue's wait list */
+ MT_LIST_TRY_APPEND(&queue->wait_tasks, &wait->entry);
+
+ /* push wait entry at index 2 on the stack (queue is already there) */
+ lua_pushlightuserdata(L, wait);
+
+ /* Go to waiting loop which immediately performs a new attempt to make
+ * sure we didn't miss a push during the wait entry initialization.
+ *
+ * _hlua_queue_pop_wait() won't return to us if it has to yield, which
+ * is the most likely scenario. What happens in this case is that yieldk
+ * call never returns, and instead Lua will call the continuation
+ * function after a successful resume, so the calling function will
+ * no longer be us, but Lua instead. And when the continuation function
+ * eventually returns (because it successfully popped an item), Lua will
+ * directly give the hand back to the Lua function that called us.
+ *
+ * More info here: https://www.lua.org/manual/5.4/manual.html#4.7
+ */
+ return _hlua_queue_pop_wait(L, LUA_OK, 0);
+}
+
+static int hlua_queue_new(lua_State *L)
+{
+ struct hlua_queue *q;
+
+ lua_newtable(L);
+
+ /* set class metatable */
+ lua_rawgeti(L, LUA_REGISTRYINDEX, class_queue_ref);
+ lua_setmetatable(L, -2);
+
+ /* index:0 is queue userdata (c data) */
+ q = lua_newuserdata(L, sizeof(*q));
+ MT_LIST_INIT(&q->list);
+ MT_LIST_INIT(&q->wait_tasks);
+ q->size = 0;
+ lua_rawseti(L, -2, 0);
+
+ /* class methods */
+ hlua_class_function(L, "size", hlua_queue_size);
+ hlua_class_function(L, "pop", hlua_queue_pop);
+ hlua_class_function(L, "pop_wait", hlua_queue_pop_wait);
+ hlua_class_function(L, "push", hlua_queue_push);
+
+ return 1;
+}
+
+static int hlua_queue_gc(struct lua_State *L)
+{
+ struct hlua_queue *queue = hlua_check_queue(L, 1);
+ struct hlua_queue_wait *wait;
+ struct hlua_queue_item *item;
+
+ /* Purge waiting tasks (if any)
+ *
+ * It is normally not expected to have waiting tasks, except if such
+ * task has been aborted while in the middle of a queue:pop_wait()
+ * function call.
+ */
+ while ((wait = MT_LIST_POP(&queue->wait_tasks, typeof(wait), entry))) {
+ /* free the wait entry */
+ pool_free(pool_head_hlua_queuew, wait);
+ }
+
+ /* purge remaining (unconsumed) items in the queue */
+ while ((item = MT_LIST_POP(&queue->list, typeof(item), list))) {
+ /* free the queue item */
+ pool_free(pool_head_hlua_queue, item);
+ }
+
+ /* queue (userdata) will automatically be freed by lua gc */
+
+ return 0;
+}
+
+static void hlua_queue_init(lua_State *L)
+{
+ /* Creates the queue object. */
+ lua_newtable(L);
+
+ hlua_class_function(L, "__gc", hlua_queue_gc);
+
+ class_queue_ref = luaL_ref(L, LUA_REGISTRYINDEX);
+}
+
+int hlua_fcn_new_stktable(lua_State *L, struct stktable *tbl)
+{
+ lua_newtable(L);
+
+ /* Pop a class stktbl metatable and affect it to the userdata. */
+ lua_rawgeti(L, LUA_REGISTRYINDEX, class_stktable_ref);
+ lua_setmetatable(L, -2);
+
+ lua_pushlightuserdata(L, tbl);
+ lua_rawseti(L, -2, 0);
+ return 1;
+}
+
+static struct stktable *hlua_check_stktable(lua_State *L, int ud)
+{
+ return hlua_checkudata(L, ud, class_stktable_ref);
+}
+
+/* Extract stick table attributes into Lua table */
+int hlua_stktable_info(lua_State *L)
+{
+ struct stktable *tbl;
+ int dt;
+
+ tbl = hlua_check_stktable(L, 1);
+
+ if (!tbl->id) {
+ lua_pushnil(L);
+ return 1;
+ }
+
+ lua_newtable(L);
+
+ lua_pushstring(L, "type");
+ lua_pushstring(L, stktable_types[tbl->type].kw);
+ lua_settable(L, -3);
+
+ lua_pushstring(L, "length");
+ lua_pushinteger(L, tbl->key_size);
+ lua_settable(L, -3);
+
+ lua_pushstring(L, "size");
+ hlua_fcn_pushunsigned(L, tbl->size);
+ lua_settable(L, -3);
+
+ lua_pushstring(L, "used");
+ hlua_fcn_pushunsigned(L, tbl->current);
+ lua_settable(L, -3);
+
+ lua_pushstring(L, "nopurge");
+ lua_pushboolean(L, tbl->nopurge > 0);
+ lua_settable(L, -3);
+
+ lua_pushstring(L, "expire");
+ lua_pushinteger(L, tbl->expire);
+ lua_settable(L, -3);
+
+ /* Save data types periods (if applicable) in 'data' table */
+ lua_pushstring(L, "data");
+ lua_newtable(L);
+
+ for (dt = 0; dt < STKTABLE_DATA_TYPES; dt++) {
+ if (tbl->data_ofs[dt] == 0)
+ continue;
+
+ lua_pushstring(L, stktable_data_types[dt].name);
+
+ if (stktable_data_types[dt].arg_type == ARG_T_DELAY)
+ lua_pushinteger(L, tbl->data_arg[dt].u);
+ else
+ lua_pushinteger(L, -1);
+
+ lua_settable(L, -3);
+ }
+
+ lua_settable(L, -3);
+
+ return 1;
+}
+
+/* Helper to get extract stick table entry into Lua table */
+static void hlua_stktable_entry(lua_State *L, struct stktable *t, struct stksess *ts)
+{
+ int dt;
+ void *ptr;
+
+ for (dt = 0; dt < STKTABLE_DATA_TYPES; dt++) {
+
+ ptr = stktable_data_ptr(t, ts, dt);
+ if (!ptr)
+ continue;
+
+ lua_pushstring(L, stktable_data_types[dt].name);
+
+ switch (stktable_data_types[dt].std_type) {
+ case STD_T_SINT:
+ lua_pushinteger(L, stktable_data_cast(ptr, std_t_sint));
+ break;
+ case STD_T_UINT:
+ hlua_fcn_pushunsigned(L, stktable_data_cast(ptr, std_t_uint));
+ break;
+ case STD_T_ULL:
+ hlua_fcn_pushunsigned_ll(L, stktable_data_cast(ptr, std_t_ull));
+ break;
+ case STD_T_FRQP:
+ lua_pushinteger(L, read_freq_ctr_period(&stktable_data_cast(ptr, std_t_frqp),
+ t->data_arg[dt].u));
+ break;
+ case STD_T_DICT: {
+ struct dict_entry *de;
+ de = stktable_data_cast(ptr, std_t_dict);
+ lua_pushstring(L, de ? (char *)de->value.key : "-");
+ break;
+ }
+ }
+
+ lua_settable(L, -3);
+ }
+}
+
+/* Looks in table <t> for a sticky session matching key <key>
+ * Returns table with session data or nil
+ *
+ * The returned table always contains 'use' and 'expire' (integer) fields.
+ * For frequency/rate counters, each data entry is returned as table with
+ * 'value' and 'period' fields.
+ */
+int hlua_stktable_lookup(lua_State *L)
+{
+ struct stktable *t;
+ struct sample smp;
+ struct stktable_key *skey;
+ struct stksess *ts;
+
+ t = hlua_check_stktable(L, 1);
+ smp.data.type = SMP_T_STR;
+ smp.flags = SMP_F_CONST;
+ smp.data.u.str.area = (char *)lua_tolstring(L, 2, &smp.data.u.str.data);
+
+ skey = smp_to_stkey(&smp, t);
+ if (!skey) {
+ lua_pushnil(L);
+ return 1;
+ }
+
+ ts = stktable_lookup_key(t, skey);
+ if (!ts) {
+ lua_pushnil(L);
+ return 1;
+ }
+
+ lua_newtable(L);
+ lua_pushstring(L, "use");
+ lua_pushinteger(L, HA_ATOMIC_LOAD(&ts->ref_cnt) - 1);
+ lua_settable(L, -3);
+
+ lua_pushstring(L, "expire");
+ lua_pushinteger(L, tick_remain(now_ms, ts->expire));
+ lua_settable(L, -3);
+
+ hlua_stktable_entry(L, t, ts);
+ HA_ATOMIC_DEC(&ts->ref_cnt);
+
+ return 1;
+}
+
+struct stk_filter {
+ long long val;
+ int type;
+ int op;
+};
+
+
+/* Helper for returning errors to callers using Lua convention (nil, err) */
+static int hlua_error(lua_State *L, const char *fmt, ...) {
+ char buf[256];
+ int len;
+ va_list args;
+ va_start(args, fmt);
+ len = vsnprintf(buf, sizeof(buf), fmt, args);
+ va_end(args);
+
+ if (len < 0) {
+ ha_alert("hlua_error(): Could not write error message.\n");
+ lua_pushnil(L);
+ return 1;
+ } else if (len >= sizeof(buf))
+ ha_alert("hlua_error(): Error message was truncated.\n");
+
+ lua_pushnil(L);
+ lua_pushstring(L, buf);
+
+ return 2;
+}
+
+/* Dump the contents of stick table <t>*/
+int hlua_stktable_dump(lua_State *L)
+{
+ struct stktable *t;
+ struct ebmb_node *eb;
+ struct ebmb_node *n;
+ struct stksess *ts;
+ int type;
+ int op;
+ int dt;
+ long long val;
+ struct stk_filter filter[STKTABLE_FILTER_LEN];
+ int filter_count = 0;
+ int i;
+ int skip_entry;
+ void *ptr;
+
+ t = hlua_check_stktable(L, 1);
+ type = lua_type(L, 2);
+
+ switch (type) {
+ case LUA_TNONE:
+ case LUA_TNIL:
+ break;
+ case LUA_TTABLE:
+ lua_pushnil(L);
+ while (lua_next(L, 2) != 0) {
+ int entry_idx = 0;
+
+ if (filter_count >= STKTABLE_FILTER_LEN)
+ return hlua_error(L, "Filter table too large (len > %d)", STKTABLE_FILTER_LEN);
+
+ if (lua_type(L, -1) != LUA_TTABLE || lua_rawlen(L, -1) != 3)
+ return hlua_error(L, "Filter table entry must be a triplet: {\"data_col\", \"op\", val} (entry #%d)", filter_count + 1);
+
+ lua_pushnil(L);
+ while (lua_next(L, -2) != 0) {
+ switch (entry_idx) {
+ case 0:
+ if (lua_type(L, -1) != LUA_TSTRING)
+ return hlua_error(L, "Filter table data column must be string (entry #%d)", filter_count + 1);
+
+ dt = stktable_get_data_type((char *)lua_tostring(L, -1));
+ if (dt < 0 || t->data_ofs[dt] == 0)
+ return hlua_error(L, "Filter table data column not present in stick table (entry #%d)", filter_count + 1);
+ filter[filter_count].type = dt;
+ break;
+ case 1:
+ if (lua_type(L, -1) != LUA_TSTRING)
+ return hlua_error(L, "Filter table operator must be string (entry #%d)", filter_count + 1);
+
+ op = get_std_op(lua_tostring(L, -1));
+ if (op < 0)
+ return hlua_error(L, "Unknown operator in filter table (entry #%d)", filter_count + 1);
+ filter[filter_count].op = op;
+ break;
+ case 2:
+ val = lua_tointeger(L, -1);
+ filter[filter_count].val = val;
+ filter_count++;
+ break;
+ default:
+ break;
+ }
+ entry_idx++;
+ lua_pop(L, 1);
+ }
+ lua_pop(L, 1);
+ }
+ break;
+ default:
+ return hlua_error(L, "filter table expected");
+ }
+
+ lua_newtable(L);
+
+ HA_RWLOCK_WRLOCK(STK_TABLE_LOCK, &t->lock);
+ eb = ebmb_first(&t->keys);
+ for (n = eb; n; n = ebmb_next(n)) {
+ ts = ebmb_entry(n, struct stksess, key);
+ if (!ts) {
+ HA_RWLOCK_WRUNLOCK(STK_TABLE_LOCK, &t->lock);
+ return 1;
+ }
+ HA_ATOMIC_INC(&ts->ref_cnt);
+ HA_RWLOCK_WRUNLOCK(STK_TABLE_LOCK, &t->lock);
+
+ /* multi condition/value filter */
+ skip_entry = 0;
+ for (i = 0; i < filter_count; i++) {
+ ptr = stktable_data_ptr(t, ts, filter[i].type);
+ if (!ptr)
+ continue;
+
+ switch (stktable_data_types[filter[i].type].std_type) {
+ case STD_T_SINT:
+ val = stktable_data_cast(ptr, std_t_sint);
+ break;
+ case STD_T_UINT:
+ val = stktable_data_cast(ptr, std_t_uint);
+ break;
+ case STD_T_ULL:
+ val = stktable_data_cast(ptr, std_t_ull);
+ break;
+ case STD_T_FRQP:
+ val = read_freq_ctr_period(&stktable_data_cast(ptr, std_t_frqp),
+ t->data_arg[filter[i].type].u);
+ break;
+ default:
+ continue;
+ break;
+ }
+
+ op = filter[i].op;
+
+ if ((val < filter[i].val && (op == STD_OP_EQ || op == STD_OP_GT || op == STD_OP_GE)) ||
+ (val == filter[i].val && (op == STD_OP_NE || op == STD_OP_GT || op == STD_OP_LT)) ||
+ (val > filter[i].val && (op == STD_OP_EQ || op == STD_OP_LT || op == STD_OP_LE))) {
+ skip_entry = 1;
+ break;
+ }
+ }
+
+ if (skip_entry) {
+ HA_RWLOCK_WRLOCK(STK_TABLE_LOCK, &t->lock);
+ HA_ATOMIC_DEC(&ts->ref_cnt);
+ continue;
+ }
+
+ if (t->type == SMP_T_IPV4) {
+ char addr[INET_ADDRSTRLEN];
+ inet_ntop(AF_INET, (const void *)&ts->key.key, addr, sizeof(addr));
+ lua_pushstring(L, addr);
+ } else if (t->type == SMP_T_IPV6) {
+ char addr[INET6_ADDRSTRLEN];
+ inet_ntop(AF_INET6, (const void *)&ts->key.key, addr, sizeof(addr));
+ lua_pushstring(L, addr);
+ } else if (t->type == SMP_T_SINT) {
+ lua_pushinteger(L, *ts->key.key);
+ } else if (t->type == SMP_T_STR) {
+ lua_pushstring(L, (const char *)ts->key.key);
+ } else {
+ return hlua_error(L, "Unsupported stick table key type");
+ }
+
+ lua_newtable(L);
+ hlua_stktable_entry(L, t, ts);
+ lua_settable(L, -3);
+ HA_RWLOCK_WRLOCK(STK_TABLE_LOCK, &t->lock);
+ HA_ATOMIC_DEC(&ts->ref_cnt);
+ }
+ HA_RWLOCK_WRUNLOCK(STK_TABLE_LOCK, &t->lock);
+
+ return 1;
+}
+
+int hlua_fcn_new_listener(lua_State *L, struct listener *lst)
+{
+ lua_newtable(L);
+
+ /* Pop a class sesison metatable and affect it to the userdata. */
+ lua_rawgeti(L, LUA_REGISTRYINDEX, class_listener_ref);
+ lua_setmetatable(L, -2);
+
+ lua_pushlightuserdata(L, lst);
+ lua_rawseti(L, -2, 0);
+ return 1;
+}
+
+static struct listener *hlua_check_listener(lua_State *L, int ud)
+{
+ return hlua_checkudata(L, ud, class_listener_ref);
+}
+
+int hlua_listener_get_stats(lua_State *L)
+{
+ struct listener *li;
+ int i;
+
+ li = hlua_check_listener(L, 1);
+
+ if (!li->bind_conf->frontend) {
+ lua_pushnil(L);
+ return 1;
+ }
+
+ stats_fill_li_stats(li->bind_conf->frontend, li, STAT_SHLGNDS, stats,
+ STATS_LEN, NULL);
+
+ lua_newtable(L);
+ for (i=0; i<ST_F_TOTAL_FIELDS; i++) {
+ lua_pushstring(L, stat_fields[i].name);
+ hlua_fcn_pushfield(L, &stats[i]);
+ lua_settable(L, -3);
+ }
+ return 1;
+
+}
+
+int hlua_server_gc(lua_State *L)
+{
+ struct server *srv = hlua_checkudata(L, 1, class_server_ref);
+
+ srv_drop(srv); /* srv_drop allows NULL srv */
+ return 0;
+}
+
+static struct server *hlua_check_server(lua_State *L, int ud)
+{
+ struct server *srv = hlua_checkudata(L, ud, class_server_ref);
+ if (srv->flags & SRV_F_DELETED) {
+ return NULL;
+ }
+ return srv;
+}
+
+int hlua_server_get_stats(lua_State *L)
+{
+ struct server *srv;
+ int i;
+
+ srv = hlua_check_server(L, 1);
+ if (srv == NULL) {
+ lua_pushnil(L);
+ return 1;
+ }
+
+ if (!srv->proxy) {
+ lua_pushnil(L);
+ return 1;
+ }
+
+ stats_fill_sv_stats(srv->proxy, srv, STAT_SHLGNDS, stats,
+ STATS_LEN, NULL);
+
+ lua_newtable(L);
+ for (i=0; i<ST_F_TOTAL_FIELDS; i++) {
+ lua_pushstring(L, stat_fields[i].name);
+ hlua_fcn_pushfield(L, &stats[i]);
+ lua_settable(L, -3);
+ }
+ return 1;
+
+}
+
+int hlua_server_get_proxy(lua_State *L)
+{
+ struct server *srv;
+
+ srv = hlua_check_server(L, 1);
+ if (srv == NULL) {
+ lua_pushnil(L);
+ return 1;
+ }
+
+ if (!srv->proxy) {
+ lua_pushnil(L);
+ return 1;
+ }
+
+ hlua_fcn_new_proxy(L, srv->proxy);
+ return 1;
+}
+
+int hlua_server_get_addr(lua_State *L)
+{
+ struct server *srv;
+ char addr[INET6_ADDRSTRLEN];
+ luaL_Buffer b;
+
+ srv = hlua_check_server(L, 1);
+ if (srv == NULL) {
+ lua_pushnil(L);
+ return 1;
+ }
+
+ luaL_buffinit(L, &b);
+
+ switch (srv->addr.ss_family) {
+ case AF_INET:
+ inet_ntop(AF_INET, &((struct sockaddr_in *)&srv->addr)->sin_addr,
+ addr, INET_ADDRSTRLEN);
+ luaL_addstring(&b, addr);
+ luaL_addstring(&b, ":");
+ snprintf(addr, INET_ADDRSTRLEN, "%d", srv->svc_port);
+ luaL_addstring(&b, addr);
+ break;
+ case AF_INET6:
+ inet_ntop(AF_INET6, &((struct sockaddr_in6 *)&srv->addr)->sin6_addr,
+ addr, INET6_ADDRSTRLEN);
+ luaL_addstring(&b, addr);
+ luaL_addstring(&b, ":");
+ snprintf(addr, INET_ADDRSTRLEN, "%d", srv->svc_port);
+ luaL_addstring(&b, addr);
+ break;
+ case AF_UNIX:
+ luaL_addstring(&b, (char *)((struct sockaddr_un *)&srv->addr)->sun_path);
+ break;
+ default:
+ luaL_addstring(&b, "<unknown>");
+ break;
+ }
+
+ luaL_pushresult(&b);
+ return 1;
+}
+
+int hlua_server_get_puid(lua_State *L)
+{
+ struct server *srv;
+ char buffer[12];
+
+ srv = hlua_check_server(L, 1);
+ if (srv == NULL) {
+ lua_pushnil(L);
+ return 1;
+ }
+
+ snprintf(buffer, sizeof(buffer), "%d", srv->puid);
+ lua_pushstring(L, buffer);
+ return 1;
+}
+
+int hlua_server_get_rid(lua_State *L)
+{
+ struct server *srv;
+ char buffer[12];
+
+ srv = hlua_check_server(L, 1);
+ if (srv == NULL) {
+ lua_pushnil(L);
+ return 1;
+ }
+
+ snprintf(buffer, sizeof(buffer), "%d", srv->rid);
+ lua_pushstring(L, buffer);
+ return 1;
+}
+
+int hlua_server_get_name(lua_State *L)
+{
+ struct server *srv;
+
+ srv = hlua_check_server(L, 1);
+ if (srv == NULL) {
+ lua_pushnil(L);
+ return 1;
+ }
+
+ lua_pushstring(L, srv->id);
+ return 1;
+}
+
+/* __index metamethod for server class
+ * support for additional keys that are missing from the main table
+ * stack:1 = table (server class), stack:2 = requested key
+ * Returns 1 if key is supported
+ * else returns 0 to make lua return NIL value to the caller
+ */
+static int hlua_server_index(struct lua_State *L)
+{
+ const char *key = lua_tostring(L, 2);
+
+ if (!strcmp(key, "name")) {
+ if (ONLY_ONCE())
+ ha_warning("hlua: use of server 'name' attribute is deprecated and will eventually be removed, please use get_name() function instead: %s\n", hlua_traceback(L, ", "));
+ lua_pushvalue(L, 1);
+ hlua_server_get_name(L);
+ return 1;
+ }
+ if (!strcmp(key, "puid")) {
+ if (ONLY_ONCE())
+ ha_warning("hlua: use of server 'puid' attribute is deprecated and will eventually be removed, please use get_puid() function instead: %s\n", hlua_traceback(L, ", "));
+ lua_pushvalue(L, 1);
+ hlua_server_get_puid(L);
+ return 1;
+ }
+ /* unknown attribute */
+ return 0;
+}
+
+int hlua_server_is_draining(lua_State *L)
+{
+ struct server *srv;
+
+ srv = hlua_check_server(L, 1);
+ if (srv == NULL) {
+ lua_pushnil(L);
+ return 1;
+ }
+
+ lua_pushboolean(L, server_is_draining(srv));
+ return 1;
+}
+
+int hlua_server_is_backup(lua_State *L)
+{
+ struct server *srv;
+
+ srv = hlua_check_server(L, 1);
+ if (srv == NULL) {
+ lua_pushnil(L);
+ return 1;
+ }
+
+ lua_pushboolean(L, (srv->flags & SRV_F_BACKUP));
+ return 1;
+}
+
+int hlua_server_is_dynamic(lua_State *L)
+{
+ struct server *srv;
+
+ srv = hlua_check_server(L, 1);
+ if (srv == NULL) {
+ lua_pushnil(L);
+ return 1;
+ }
+
+ lua_pushboolean(L, (srv->flags & SRV_F_DYNAMIC));
+ return 1;
+}
+
+int hlua_server_get_cur_sess(lua_State *L)
+{
+ struct server *srv;
+
+ srv = hlua_check_server(L, 1);
+ if (srv == NULL) {
+ lua_pushnil(L);
+ return 1;
+ }
+
+ lua_pushinteger(L, srv->cur_sess);
+ return 1;
+}
+
+int hlua_server_get_pend_conn(lua_State *L)
+{
+ struct server *srv;
+
+ srv = hlua_check_server(L, 1);
+ if (srv == NULL) {
+ lua_pushnil(L);
+ return 1;
+ }
+
+ lua_pushinteger(L, srv->queue.length);
+ return 1;
+}
+
+int hlua_server_set_maxconn(lua_State *L)
+{
+ struct server *srv;
+ const char *maxconn;
+ const char *err;
+
+ srv = hlua_check_server(L, 1);
+ if (srv == NULL) {
+ lua_pushnil(L);
+ return 1;
+ }
+
+ maxconn = luaL_checkstring(L, 2);
+
+ HA_SPIN_LOCK(SERVER_LOCK, &srv->lock);
+ err = server_parse_maxconn_change_request(srv, maxconn);
+ HA_SPIN_UNLOCK(SERVER_LOCK, &srv->lock);
+ if (!err)
+ lua_pushnil(L);
+ else
+ hlua_pushstrippedstring(L, err);
+ return 1;
+}
+
+int hlua_server_get_maxconn(lua_State *L)
+{
+ struct server *srv;
+
+ srv = hlua_check_server(L, 1);
+ if (srv == NULL) {
+ lua_pushnil(L);
+ return 1;
+ }
+
+ lua_pushinteger(L, srv->maxconn);
+ return 1;
+}
+
+int hlua_server_set_weight(lua_State *L)
+{
+ struct server *srv;
+ const char *weight;
+ const char *err;
+
+ srv = hlua_check_server(L, 1);
+ if (srv == NULL) {
+ lua_pushnil(L);
+ return 1;
+ }
+
+ weight = luaL_checkstring(L, 2);
+
+ HA_SPIN_LOCK(SERVER_LOCK, &srv->lock);
+ err = server_parse_weight_change_request(srv, weight);
+ HA_SPIN_UNLOCK(SERVER_LOCK, &srv->lock);
+ if (!err)
+ lua_pushnil(L);
+ else
+ hlua_pushstrippedstring(L, err);
+ return 1;
+}
+
+int hlua_server_get_weight(lua_State *L)
+{
+ struct server *srv;
+
+ srv = hlua_check_server(L, 1);
+ if (srv == NULL) {
+ lua_pushnil(L);
+ return 1;
+ }
+
+ lua_pushinteger(L, srv->uweight);
+ return 1;
+}
+
+int hlua_server_set_addr(lua_State *L)
+{
+ struct server *srv;
+ const char *addr;
+ const char *port;
+ const char *err;
+
+ srv = hlua_check_server(L, 1);
+ if (srv == NULL) {
+ lua_pushnil(L);
+ return 1;
+ }
+
+ addr = luaL_checkstring(L, 2);
+ if (lua_gettop(L) >= 3)
+ port = luaL_checkstring(L, 3);
+ else
+ port = NULL;
+
+ HA_SPIN_LOCK(SERVER_LOCK, &srv->lock);
+ err = srv_update_addr_port(srv, addr, port, "Lua script");
+ HA_SPIN_UNLOCK(SERVER_LOCK, &srv->lock);
+ if (!err)
+ lua_pushnil(L);
+ else
+ hlua_pushstrippedstring(L, err);
+ return 1;
+}
+
+int hlua_server_shut_sess(lua_State *L)
+{
+ struct server *srv;
+
+ srv = hlua_check_server(L, 1);
+ if (srv == NULL) {
+ return 0;
+ }
+ HA_SPIN_LOCK(SERVER_LOCK, &srv->lock);
+ srv_shutdown_streams(srv, SF_ERR_KILLED);
+ HA_SPIN_UNLOCK(SERVER_LOCK, &srv->lock);
+ return 0;
+}
+
+int hlua_server_set_drain(lua_State *L)
+{
+ struct server *srv;
+
+ srv = hlua_check_server(L, 1);
+ if (srv == NULL) {
+ return 0;
+ }
+ HA_SPIN_LOCK(SERVER_LOCK, &srv->lock);
+ srv_adm_set_drain(srv);
+ HA_SPIN_UNLOCK(SERVER_LOCK, &srv->lock);
+ return 0;
+}
+
+int hlua_server_set_maint(lua_State *L)
+{
+ struct server *srv;
+
+ srv = hlua_check_server(L, 1);
+ if (srv == NULL) {
+ return 0;
+ }
+ HA_SPIN_LOCK(SERVER_LOCK, &srv->lock);
+ srv_adm_set_maint(srv);
+ HA_SPIN_UNLOCK(SERVER_LOCK, &srv->lock);
+ return 0;
+}
+
+int hlua_server_set_ready(lua_State *L)
+{
+ struct server *srv;
+
+ srv = hlua_check_server(L, 1);
+ if (srv == NULL) {
+ return 0;
+ }
+ HA_SPIN_LOCK(SERVER_LOCK, &srv->lock);
+ srv_adm_set_ready(srv);
+ HA_SPIN_UNLOCK(SERVER_LOCK, &srv->lock);
+ return 0;
+}
+
+int hlua_server_check_enable(lua_State *L)
+{
+ struct server *sv;
+
+ sv = hlua_check_server(L, 1);
+ if (sv == NULL) {
+ return 0;
+ }
+ HA_SPIN_LOCK(SERVER_LOCK, &sv->lock);
+ if (sv->check.state & CHK_ST_CONFIGURED) {
+ sv->check.state |= CHK_ST_ENABLED;
+ }
+ HA_SPIN_UNLOCK(SERVER_LOCK, &sv->lock);
+ return 0;
+}
+
+int hlua_server_check_disable(lua_State *L)
+{
+ struct server *sv;
+
+ sv = hlua_check_server(L, 1);
+ if (sv == NULL) {
+ return 0;
+ }
+ HA_SPIN_LOCK(SERVER_LOCK, &sv->lock);
+ if (sv->check.state & CHK_ST_CONFIGURED) {
+ sv->check.state &= ~CHK_ST_ENABLED;
+ }
+ HA_SPIN_UNLOCK(SERVER_LOCK, &sv->lock);
+ return 0;
+}
+
+int hlua_server_check_force_up(lua_State *L)
+{
+ struct server *sv;
+
+ sv = hlua_check_server(L, 1);
+ if (sv == NULL) {
+ return 0;
+ }
+ HA_SPIN_LOCK(SERVER_LOCK, &sv->lock);
+ if (!(sv->track)) {
+ sv->check.health = sv->check.rise + sv->check.fall - 1;
+ srv_set_running(sv, SRV_OP_STCHGC_LUA);
+ }
+ HA_SPIN_UNLOCK(SERVER_LOCK, &sv->lock);
+ return 0;
+}
+
+int hlua_server_check_force_nolb(lua_State *L)
+{
+ struct server *sv;
+
+ sv = hlua_check_server(L, 1);
+ if (sv == NULL) {
+ return 0;
+ }
+ HA_SPIN_LOCK(SERVER_LOCK, &sv->lock);
+ if (!(sv->track)) {
+ sv->check.health = sv->check.rise + sv->check.fall - 1;
+ srv_set_stopping(sv, SRV_OP_STCHGC_LUA);
+ }
+ HA_SPIN_UNLOCK(SERVER_LOCK, &sv->lock);
+ return 0;
+}
+
+int hlua_server_check_force_down(lua_State *L)
+{
+ struct server *sv;
+
+ sv = hlua_check_server(L, 1);
+ if (sv == NULL) {
+ return 0;
+ }
+ HA_SPIN_LOCK(SERVER_LOCK, &sv->lock);
+ if (!(sv->track)) {
+ sv->check.health = 0;
+ srv_set_stopped(sv, SRV_OP_STCHGC_LUA);
+ }
+ HA_SPIN_UNLOCK(SERVER_LOCK, &sv->lock);
+ return 0;
+}
+
+int hlua_server_agent_enable(lua_State *L)
+{
+ struct server *sv;
+
+ sv = hlua_check_server(L, 1);
+ if (sv == NULL) {
+ return 0;
+ }
+ HA_SPIN_LOCK(SERVER_LOCK, &sv->lock);
+ if (sv->agent.state & CHK_ST_CONFIGURED) {
+ sv->agent.state |= CHK_ST_ENABLED;
+ }
+ HA_SPIN_UNLOCK(SERVER_LOCK, &sv->lock);
+ return 0;
+}
+
+int hlua_server_agent_disable(lua_State *L)
+{
+ struct server *sv;
+
+ sv = hlua_check_server(L, 1);
+ if (sv == NULL) {
+ return 0;
+ }
+ HA_SPIN_LOCK(SERVER_LOCK, &sv->lock);
+ if (sv->agent.state & CHK_ST_CONFIGURED) {
+ sv->agent.state &= ~CHK_ST_ENABLED;
+ }
+ HA_SPIN_UNLOCK(SERVER_LOCK, &sv->lock);
+ return 0;
+}
+
+int hlua_server_agent_force_up(lua_State *L)
+{
+ struct server *sv;
+
+ sv = hlua_check_server(L, 1);
+ if (sv == NULL) {
+ return 0;
+ }
+ HA_SPIN_LOCK(SERVER_LOCK, &sv->lock);
+ if (sv->agent.state & CHK_ST_ENABLED) {
+ sv->agent.health = sv->agent.rise + sv->agent.fall - 1;
+ srv_set_running(sv, SRV_OP_STCHGC_LUA);
+ }
+ HA_SPIN_UNLOCK(SERVER_LOCK, &sv->lock);
+ return 0;
+}
+
+int hlua_server_agent_force_down(lua_State *L)
+{
+ struct server *sv;
+
+ sv = hlua_check_server(L, 1);
+ if (sv == NULL) {
+ return 0;
+ }
+ HA_SPIN_LOCK(SERVER_LOCK, &sv->lock);
+ if (sv->agent.state & CHK_ST_ENABLED) {
+ sv->agent.health = 0;
+ srv_set_stopped(sv, SRV_OP_STCHGC_LUA);
+ }
+ HA_SPIN_UNLOCK(SERVER_LOCK, &sv->lock);
+ return 0;
+}
+
+/* returns the tracked server, if any */
+int hlua_server_tracking(lua_State *L)
+{
+ struct server *sv;
+ struct server *tracked;
+
+ sv = hlua_check_server(L, 1);
+ if (sv == NULL) {
+ return 0;
+ }
+
+ tracked = sv->track;
+ if (tracked == NULL)
+ lua_pushnil(L);
+ else
+ hlua_fcn_new_server(L, tracked);
+
+ return 1;
+}
+
+/* returns an array of servers tracking the current server */
+int hlua_server_get_trackers(lua_State *L)
+{
+ struct server *sv;
+ struct server *cur_tracker;
+ int index;
+
+ sv = hlua_check_server(L, 1);
+ if (sv == NULL) {
+ return 0;
+ }
+
+ lua_newtable(L);
+ cur_tracker = sv->trackers;
+ for (index = 1; cur_tracker; cur_tracker = cur_tracker->tracknext, index++) {
+ if (!lua_checkstack(L, 5))
+ luaL_error(L, "Lua out of memory error.");
+ hlua_fcn_new_server(L, cur_tracker);
+ /* array index starts at 1 in Lua */
+ lua_rawseti(L, -2, index);
+ }
+ return 1;
+}
+
+/* hlua_event_sub wrapper for per-server subscription:
+ *
+ * hlua_event_sub() is called with sv->e_subs subscription list and
+ * lua arguments are passed as-is (skipping the first argument which
+ * is the server ctx)
+ */
+int hlua_server_event_sub(lua_State *L)
+{
+ struct server *sv;
+
+ sv = hlua_check_server(L, 1);
+ if (sv == NULL) {
+ return 0;
+ }
+ /* remove first argument from the stack (server) */
+ lua_remove(L, 1);
+
+ /* try to subscribe within server's subscription list */
+ return hlua_event_sub(L, &sv->e_subs);
+}
+
+int hlua_fcn_new_server(lua_State *L, struct server *srv)
+{
+ lua_newtable(L);
+
+ /* Pop a class server metatable and affect it to the userdata. */
+ lua_rawgeti(L, LUA_REGISTRYINDEX, class_server_ref);
+ lua_setmetatable(L, -2);
+
+ lua_pushlightuserdata(L, srv);
+ lua_rawseti(L, -2, 0);
+
+ /* userdata is affected: increment server refcount */
+ srv_take(srv);
+
+ /* set public methods */
+ hlua_class_function(L, "get_name", hlua_server_get_name);
+ hlua_class_function(L, "get_puid", hlua_server_get_puid);
+ hlua_class_function(L, "get_rid", hlua_server_get_rid);
+ hlua_class_function(L, "is_draining", hlua_server_is_draining);
+ hlua_class_function(L, "is_backup", hlua_server_is_backup);
+ hlua_class_function(L, "is_dynamic", hlua_server_is_dynamic);
+ hlua_class_function(L, "get_cur_sess", hlua_server_get_cur_sess);
+ hlua_class_function(L, "get_pend_conn", hlua_server_get_pend_conn);
+ hlua_class_function(L, "set_maxconn", hlua_server_set_maxconn);
+ hlua_class_function(L, "get_maxconn", hlua_server_get_maxconn);
+ hlua_class_function(L, "set_weight", hlua_server_set_weight);
+ hlua_class_function(L, "get_weight", hlua_server_get_weight);
+ hlua_class_function(L, "set_addr", hlua_server_set_addr);
+ hlua_class_function(L, "get_addr", hlua_server_get_addr);
+ hlua_class_function(L, "get_stats", hlua_server_get_stats);
+ hlua_class_function(L, "get_proxy", hlua_server_get_proxy);
+ hlua_class_function(L, "shut_sess", hlua_server_shut_sess);
+ hlua_class_function(L, "set_drain", hlua_server_set_drain);
+ hlua_class_function(L, "set_maint", hlua_server_set_maint);
+ hlua_class_function(L, "set_ready", hlua_server_set_ready);
+ hlua_class_function(L, "check_enable", hlua_server_check_enable);
+ hlua_class_function(L, "check_disable", hlua_server_check_disable);
+ hlua_class_function(L, "check_force_up", hlua_server_check_force_up);
+ hlua_class_function(L, "check_force_nolb", hlua_server_check_force_nolb);
+ hlua_class_function(L, "check_force_down", hlua_server_check_force_down);
+ hlua_class_function(L, "agent_enable", hlua_server_agent_enable);
+ hlua_class_function(L, "agent_disable", hlua_server_agent_disable);
+ hlua_class_function(L, "agent_force_up", hlua_server_agent_force_up);
+ hlua_class_function(L, "agent_force_down", hlua_server_agent_force_down);
+ hlua_class_function(L, "tracking", hlua_server_tracking);
+ hlua_class_function(L, "get_trackers", hlua_server_get_trackers);
+ hlua_class_function(L, "event_sub", hlua_server_event_sub);
+
+ return 1;
+}
+
+static struct hlua_server_list *hlua_check_server_list(lua_State *L, int ud)
+{
+ return hlua_checkudata(L, ud, class_server_list_ref);
+}
+
+/* does nothing and returns 0, only prevents insertions in the
+ * table which represents the list of servers
+ */
+int hlua_listable_servers_newindex(lua_State *L) {
+ return 0;
+}
+
+/* first arg is the table (struct hlua_server_list * in metadata)
+ * second arg is the required index
+ */
+int hlua_listable_servers_index(lua_State *L)
+{
+ struct hlua_server_list *hlua_srv;
+ const char *name;
+ struct server *srv;
+
+ hlua_srv = hlua_check_server_list(L, 1);
+ name = luaL_checkstring(L, 2);
+
+ /* Perform a server lookup in px list */
+ srv = server_find_by_name(hlua_srv->px, name);
+ if (srv == NULL) {
+ lua_pushnil(L);
+ return 1;
+ }
+
+ hlua_fcn_new_server(L, srv);
+ return 1;
+}
+
+/* iterator must return key as string and value as server
+ * object, if we reach end of list, it returns nil.
+ * The context knows the last returned server. if the
+ * context contains srv == NULL, we start enumeration.
+ * Then, use 'srv->next' ptr to iterate through the list
+ */
+int hlua_listable_servers_pairs_iterator(lua_State *L)
+{
+ int context_index;
+ struct hlua_server_list_iterator_context *ctx;
+
+ context_index = lua_upvalueindex(1);
+ ctx = lua_touserdata(L, context_index);
+
+ if (ctx->cur == NULL) {
+ /* First iteration, initialize list on the first server */
+ ctx->cur = ctx->px->srv;
+ } else {
+
+ /* Next server (next ptr is always valid, even if current
+ * server has the SRV_F_DELETED flag set)
+ */
+ ctx->cur = ctx->cur->next;
+ }
+
+ /* next server is null, end of iteration */
+ if (ctx->cur == NULL) {
+ lua_pushnil(L);
+ return 1;
+ }
+
+ lua_pushstring(L, ctx->cur->id);
+ hlua_fcn_new_server(L, ctx->cur);
+ return 2;
+}
+
+/* init the iterator context, return iterator function
+ * with context as closure. The only argument is a
+ * server list object.
+ */
+int hlua_listable_servers_pairs(lua_State *L)
+{
+ struct hlua_server_list_iterator_context *ctx;
+ struct hlua_server_list *hlua_srv_list;
+
+ hlua_srv_list = hlua_check_server_list(L, 1);
+
+ ctx = lua_newuserdata(L, sizeof(*ctx));
+ ctx->px = hlua_srv_list->px;
+ ctx->cur = NULL;
+
+ lua_pushcclosure(L, hlua_listable_servers_pairs_iterator, 1);
+ return 1;
+}
+
+void hlua_listable_servers(lua_State *L, struct proxy *px)
+{
+ struct hlua_server_list *list;
+
+ lua_newtable(L);
+ list = lua_newuserdata(L, sizeof(*list));
+ list->px = px;
+ lua_rawseti(L, -2, 0);
+ lua_rawgeti(L, LUA_REGISTRYINDEX, class_server_list_ref);
+ lua_setmetatable(L, -2);
+}
+
+static struct proxy *hlua_check_proxy(lua_State *L, int ud)
+{
+ return hlua_checkudata(L, ud, class_proxy_ref);
+}
+
+int hlua_proxy_get_name(lua_State *L)
+{
+ struct proxy *px;
+
+ px = hlua_check_proxy(L, 1);
+ lua_pushstring(L, px->id);
+ return 1;
+}
+
+int hlua_proxy_get_uuid(lua_State *L)
+{
+ struct proxy *px;
+ char buffer[17];
+
+ px = hlua_check_proxy(L, 1);
+ snprintf(buffer, sizeof(buffer), "%d", px->uuid);
+ lua_pushstring(L, buffer);
+ return 1;
+}
+
+/* __index metamethod for proxy class
+ * support for additional keys that are missing from the main table
+ * stack:1 = table (proxy class), stack:2 = requested key
+ * Returns 1 if key is supported
+ * else returns 0 to make lua return NIL value to the caller
+ */
+static int hlua_proxy_index(struct lua_State *L)
+{
+ const char *key = lua_tostring(L, 2);
+
+ if (!strcmp(key, "name")) {
+ if (ONLY_ONCE())
+ ha_warning("hlua: use of proxy 'name' attribute is deprecated and will eventually be removed, please use get_name() function instead: %s\n", hlua_traceback(L, ", "));
+ lua_pushvalue(L, 1);
+ hlua_proxy_get_name(L);
+ return 1;
+ }
+ if (!strcmp(key, "uuid")) {
+ if (ONLY_ONCE())
+ ha_warning("hlua: use of proxy 'uuid' attribute is deprecated and will eventually be removed, please use get_uuid() function instead: %s\n", hlua_traceback(L, ", "));
+ lua_pushvalue(L, 1);
+ hlua_proxy_get_uuid(L);
+ return 1;
+ }
+ /* unknown attribute */
+ return 0;
+}
+
+int hlua_proxy_pause(lua_State *L)
+{
+ struct proxy *px;
+
+ px = hlua_check_proxy(L, 1);
+ /* safe to call without PROXY_LOCK - pause_proxy takes it */
+ pause_proxy(px);
+ return 0;
+}
+
+int hlua_proxy_resume(lua_State *L)
+{
+ struct proxy *px;
+
+ px = hlua_check_proxy(L, 1);
+ /* safe to call without PROXY_LOCK - resume_proxy takes it */
+ resume_proxy(px);
+ return 0;
+}
+
+int hlua_proxy_stop(lua_State *L)
+{
+ struct proxy *px;
+
+ px = hlua_check_proxy(L, 1);
+ /* safe to call without PROXY_LOCK - stop_proxy takes it */
+ stop_proxy(px);
+ return 0;
+}
+
+int hlua_proxy_get_cap(lua_State *L)
+{
+ struct proxy *px;
+ const char *str;
+
+ px = hlua_check_proxy(L, 1);
+ str = proxy_cap_str(px->cap);
+ lua_pushstring(L, str);
+ return 1;
+}
+
+int hlua_proxy_get_stats(lua_State *L)
+{
+ struct proxy *px;
+ int i;
+
+ px = hlua_check_proxy(L, 1);
+ if (px->cap & PR_CAP_BE)
+ stats_fill_be_stats(px, STAT_SHLGNDS, stats, STATS_LEN, NULL);
+ else
+ stats_fill_fe_stats(px, stats, STATS_LEN, NULL);
+ lua_newtable(L);
+ for (i=0; i<ST_F_TOTAL_FIELDS; i++) {
+ lua_pushstring(L, stat_fields[i].name);
+ hlua_fcn_pushfield(L, &stats[i]);
+ lua_settable(L, -3);
+ }
+ return 1;
+}
+
+int hlua_proxy_get_mode(lua_State *L)
+{
+ struct proxy *px;
+ const char *str;
+
+ px = hlua_check_proxy(L, 1);
+ str = proxy_mode_str(px->mode);
+ lua_pushstring(L, str);
+ return 1;
+}
+
+int hlua_proxy_shut_bcksess(lua_State *L)
+{
+ struct proxy *px;
+
+ px = hlua_check_proxy(L, 1);
+ srv_shutdown_backup_streams(px, SF_ERR_KILLED);
+ return 0;
+}
+
+int hlua_proxy_get_srv_act(lua_State *L)
+{
+ struct proxy *px;
+
+ px = hlua_check_proxy(L, 1);
+ lua_pushinteger(L, px->srv_act);
+ return 1;
+}
+
+int hlua_proxy_get_srv_bck(lua_State *L)
+{
+ struct proxy *px;
+
+ px = hlua_check_proxy(L, 1);
+ lua_pushinteger(L, px->srv_bck);
+ return 1;
+}
+
+/* Get mailers config info, used to implement email alert sending
+ * according to mailers config from lua.
+ */
+int hlua_proxy_get_mailers(lua_State *L)
+{
+ struct proxy *px;
+ int it;
+ struct mailer *mailer;
+
+ px = hlua_check_proxy(L, 1);
+
+ if (!px->email_alert.mailers.m)
+ return 0; /* email-alert mailers not found on proxy */
+
+ lua_newtable(L);
+
+ /* option log-health-checks */
+ lua_pushstring(L, "track_server_health");
+ lua_pushboolean(L, (px->options2 & PR_O2_LOGHCHKS));
+ lua_settable(L, -3);
+
+ /* email-alert level */
+ lua_pushstring(L, "log_level");
+ lua_pushinteger(L, px->email_alert.level);
+ lua_settable(L, -3);
+
+ /* email-alert mailers */
+ lua_pushstring(L, "mailservers");
+ lua_newtable(L);
+ for (it = 0, mailer = px->email_alert.mailers.m->mailer_list;
+ it < px->email_alert.mailers.m->count; it++, mailer = mailer->next) {
+ char *srv_address;
+
+ lua_pushstring(L, mailer->id);
+
+ /* For now, we depend on mailer->addr to restore mailer's address which
+ * was converted using str2sa_range() on startup.
+ *
+ * FIXME?:
+ * It could be a good idea to pass the raw address (unparsed) to allow fqdn
+ * to be resolved at runtime, unless we consider this as a pure legacy mode
+ * and mailers config support is going to be removed in the future?
+ */
+ srv_address = sa2str(&mailer->addr, get_host_port(&mailer->addr), 0);
+ if (srv_address) {
+ lua_pushstring(L, srv_address);
+ ha_free(&srv_address);
+ lua_settable(L, -3);
+ }
+ }
+ lua_settable(L, -3);
+
+ /* mailers timeout (from mailers section) */
+ lua_pushstring(L, "mailservers_timeout");
+ lua_pushinteger(L, px->email_alert.mailers.m->timeout.mail);
+ lua_settable(L, -3);
+
+ /* email-alert myhostname */
+ lua_pushstring(L, "smtp_hostname");
+ lua_pushstring(L, px->email_alert.myhostname);
+ lua_settable(L, -3);
+
+ /* email-alert from */
+ lua_pushstring(L, "smtp_from");
+ lua_pushstring(L, px->email_alert.from);
+ lua_settable(L, -3);
+
+ /* email-alert to */
+ lua_pushstring(L, "smtp_to");
+ lua_pushstring(L, px->email_alert.to);
+ lua_settable(L, -3);
+
+ return 1;
+}
+
+int hlua_fcn_new_proxy(lua_State *L, struct proxy *px)
+{
+ struct listener *lst;
+ int lid;
+ char buffer[17];
+
+ lua_newtable(L);
+
+ /* Pop a class proxy metatable and affect it to the userdata. */
+ lua_rawgeti(L, LUA_REGISTRYINDEX, class_proxy_ref);
+ lua_setmetatable(L, -2);
+
+ lua_pushlightuserdata(L, px);
+ lua_rawseti(L, -2, 0);
+
+ /* set public methods */
+ hlua_class_function(L, "get_name", hlua_proxy_get_name);
+ hlua_class_function(L, "get_uuid", hlua_proxy_get_uuid);
+ hlua_class_function(L, "pause", hlua_proxy_pause);
+ hlua_class_function(L, "resume", hlua_proxy_resume);
+ hlua_class_function(L, "stop", hlua_proxy_stop);
+ hlua_class_function(L, "shut_bcksess", hlua_proxy_shut_bcksess);
+ hlua_class_function(L, "get_cap", hlua_proxy_get_cap);
+ hlua_class_function(L, "get_mode", hlua_proxy_get_mode);
+ hlua_class_function(L, "get_srv_act", hlua_proxy_get_srv_act);
+ hlua_class_function(L, "get_srv_bck", hlua_proxy_get_srv_bck);
+ hlua_class_function(L, "get_stats", hlua_proxy_get_stats);
+ hlua_class_function(L, "get_mailers", hlua_proxy_get_mailers);
+
+ /* Browse and register servers. */
+ lua_pushstring(L, "servers");
+ hlua_listable_servers(L, px);
+ lua_settable(L, -3);
+
+ /* Browse and register listeners. */
+ lua_pushstring(L, "listeners");
+ lua_newtable(L);
+ lid = 1;
+ list_for_each_entry(lst, &px->conf.listeners, by_fe) {
+ if (lst->name)
+ lua_pushstring(L, lst->name);
+ else {
+ snprintf(buffer, sizeof(buffer), "sock-%d", lid);
+ lid++;
+ lua_pushstring(L, buffer);
+ }
+ hlua_fcn_new_listener(L, lst);
+ lua_settable(L, -3);
+ }
+ lua_settable(L, -3);
+
+ if (px->table && px->table->id) {
+ lua_pushstring(L, "stktable");
+ hlua_fcn_new_stktable(L, px->table);
+ lua_settable(L, -3);
+ }
+
+ return 1;
+}
+
+static struct hlua_proxy_list *hlua_check_proxy_list(lua_State *L, int ud)
+{
+ return hlua_checkudata(L, ud, class_proxy_list_ref);
+}
+
+/* does nothing and returns 0, only prevents insertions in the
+ * table which represent list of proxies
+ */
+int hlua_listable_proxies_newindex(lua_State *L) {
+ return 0;
+}
+
+/* first arg is the table (struct hlua_proxy_list * in metadata)
+ * second arg is the required index
+ */
+int hlua_listable_proxies_index(lua_State *L)
+{
+ struct hlua_proxy_list *hlua_px;
+ const char *name;
+ struct proxy *px;
+
+ hlua_px = hlua_check_proxy_list(L, 1);
+ name = luaL_checkstring(L, 2);
+
+ px = NULL;
+ if (hlua_px->capabilities & PR_CAP_FE) {
+ px = proxy_find_by_name(name, PR_CAP_FE, 0);
+ }
+ if (!px && hlua_px->capabilities & PR_CAP_BE) {
+ px = proxy_find_by_name(name, PR_CAP_BE, 0);
+ }
+ if (px == NULL) {
+ lua_pushnil(L);
+ return 1;
+ }
+
+ hlua_fcn_new_proxy(L, px);
+ return 1;
+}
+
+static inline int hlua_listable_proxies_match(struct proxy *px, char cap) {
+ return ((px->cap & cap) && !(px->cap & (PR_CAP_DEF | PR_CAP_INT)));
+}
+
+/* iterator must return key as string and value as proxy
+ * object, if we reach end of list, it returns nil
+ */
+int hlua_listable_proxies_pairs_iterator(lua_State *L)
+{
+ int context_index;
+ struct hlua_proxy_list_iterator_context *ctx;
+
+ context_index = lua_upvalueindex(1);
+ ctx = lua_touserdata(L, context_index);
+
+ if (ctx->next == NULL) {
+ lua_pushnil(L);
+ return 1;
+ }
+
+ lua_pushstring(L, ctx->next->id);
+ hlua_fcn_new_proxy(L, ctx->next);
+
+ for (ctx->next = ctx->next->next;
+ ctx->next && !hlua_listable_proxies_match(ctx->next, ctx->capabilities);
+ ctx->next = ctx->next->next);
+
+ return 2;
+}
+
+/* init the iterator context, return iterator function
+ * with context as closure. The only argument is a
+ * proxy object.
+ */
+int hlua_listable_proxies_pairs(lua_State *L)
+{
+ struct hlua_proxy_list_iterator_context *ctx;
+ struct hlua_proxy_list *hlua_px;
+
+ hlua_px = hlua_check_proxy_list(L, 1);
+
+ ctx = lua_newuserdata(L, sizeof(*ctx));
+
+ ctx->capabilities = hlua_px->capabilities;
+ for (ctx->next = proxies_list;
+ ctx->next && !hlua_listable_proxies_match(ctx->next, ctx->capabilities);
+ ctx->next = ctx->next->next);
+ lua_pushcclosure(L, hlua_listable_proxies_pairs_iterator, 1);
+ return 1;
+}
+
+void hlua_listable_proxies(lua_State *L, char capabilities)
+{
+ struct hlua_proxy_list *list;
+
+ lua_newtable(L);
+ list = lua_newuserdata(L, sizeof(*list));
+ list->capabilities = capabilities;
+ lua_rawseti(L, -2, 0);
+ lua_rawgeti(L, LUA_REGISTRYINDEX, class_proxy_list_ref);
+ lua_setmetatable(L, -2);
+}
+
+int hlua_event_sub_unsub(lua_State *L)
+{
+ struct event_hdl_sub *sub = hlua_checkudata(L, 1, class_event_sub_ref);
+
+ BUG_ON(!sub);
+ event_hdl_take(sub); /* keep a reference on sub until the item is GCed */
+ event_hdl_unsubscribe(sub); /* will automatically call event_hdl_drop() */
+ return 0;
+}
+
+int hlua_event_sub_gc(lua_State *L)
+{
+ struct event_hdl_sub *sub = hlua_checkudata(L, 1, class_event_sub_ref);
+
+ BUG_ON(!sub);
+ event_hdl_drop(sub); /* final drop of the reference */
+ return 0;
+}
+
+int hlua_fcn_new_event_sub(lua_State *L, struct event_hdl_sub *sub)
+{
+ lua_newtable(L);
+
+ /* Pop a class event_sub metatable and affect it to the userdata. */
+ lua_rawgeti(L, LUA_REGISTRYINDEX, class_event_sub_ref);
+ lua_setmetatable(L, -2);
+
+ lua_pushlightuserdata(L, sub);
+ lua_rawseti(L, -2, 0);
+
+ /* userdata is affected: increment sub refcount */
+ event_hdl_take(sub);
+
+ /* set public methods */
+ hlua_class_function(L, "unsub", hlua_event_sub_unsub);
+
+ return 1;
+}
+
+/* This Lua function take a string, a list of separators.
+ * It tokenize the input string using the list of separators
+ * as separator.
+ *
+ * The functionreturns a table filled with tokens.
+ */
+int hlua_tokenize(lua_State *L)
+{
+ const char *str;
+ const char *sep;
+ int index;
+ const char *token;
+ const char *p;
+ const char *c;
+ int ignore_empty;
+
+ ignore_empty = 0;
+
+ str = luaL_checkstring(L, 1);
+ sep = luaL_checkstring(L, 2);
+ if (lua_gettop(L) == 3)
+ ignore_empty = hlua_checkboolean(L, 3);
+
+ lua_newtable(L);
+ index = 1;
+ token = str;
+ p = str;
+ while(1) {
+ for (c = sep; *c != '\0'; c++)
+ if (*p == *c)
+ break;
+ if (*p == *c) {
+ if ((!ignore_empty) || (p - token > 0)) {
+ lua_pushlstring(L, token, p - token);
+ lua_rawseti(L, -2, index);
+ index++;
+ }
+ token = p + 1;
+ }
+ if (*p == '\0')
+ break;
+ p++;
+ }
+
+ return 1;
+}
+
+int hlua_parse_addr(lua_State *L)
+{
+ struct net_addr *addr;
+ const char *str = luaL_checkstring(L, 1);
+ unsigned char mask;
+
+ addr = lua_newuserdata(L, sizeof(struct net_addr));
+ if (!addr) {
+ lua_pushnil(L);
+ return 1;
+ }
+
+ if (str2net(str, PAT_MF_NO_DNS, &addr->addr.v4.ip, &addr->addr.v4.mask)) {
+ addr->family = AF_INET;
+ return 1;
+ }
+
+ if (str62net(str, &addr->addr.v6.ip, &mask)) {
+ len2mask6(mask, &addr->addr.v6.mask);
+ addr->family = AF_INET6;
+ return 1;
+ }
+
+ lua_pop(L, 1);
+ lua_pushnil(L);
+ return 1;
+}
+
+int hlua_match_addr(lua_State *L)
+{
+ struct net_addr *addr1;
+ struct net_addr *addr2;
+
+ if (!lua_isuserdata(L, 1) ||
+ !lua_isuserdata(L, 2)) {
+ lua_pushboolean(L, 0);
+ return 1;
+ }
+
+ addr1 = lua_touserdata(L, 1);
+ addr2 = lua_touserdata(L, 2);
+
+ if (addr1->family != addr2->family) {
+ lua_pushboolean(L, 0);
+ return 1;
+ }
+
+ if (addr1->family == AF_INET) {
+ if ((addr1->addr.v4.ip.s_addr & addr2->addr.v4.mask.s_addr) ==
+ (addr2->addr.v4.ip.s_addr & addr1->addr.v4.mask.s_addr)) {
+ lua_pushboolean(L, 1);
+ return 1;
+ }
+ } else {
+ int i;
+
+ for (i = 0; i < 16; i += 4) {
+ if ((read_u32(&addr1->addr.v6.ip.s6_addr[i]) &
+ read_u32(&addr2->addr.v6.mask.s6_addr[i])) !=
+ (read_u32(&addr2->addr.v6.ip.s6_addr[i]) &
+ read_u32(&addr1->addr.v6.mask.s6_addr[i])))
+ break;
+ }
+ if (i == 16) {
+ lua_pushboolean(L, 1);
+ return 1;
+ }
+ }
+
+ lua_pushboolean(L, 0);
+ return 1;
+}
+
+static struct my_regex **hlua_check_regex(lua_State *L, int ud)
+{
+ return (hlua_checkudata(L, ud, class_regex_ref));
+}
+
+static int hlua_regex_comp(struct lua_State *L)
+{
+ struct my_regex **regex;
+ const char *str;
+ int cs;
+ char *err;
+
+ str = luaL_checkstring(L, 1);
+ luaL_argcheck(L, lua_isboolean(L, 2), 2, NULL);
+ cs = lua_toboolean(L, 2);
+
+ regex = lua_newuserdata(L, sizeof(*regex));
+
+ err = NULL;
+ if (!(*regex = regex_comp(str, cs, 1, &err))) {
+ lua_pushboolean(L, 0); /* status error */
+ lua_pushstring(L, err); /* Reason */
+ free(err);
+ return 2;
+ }
+
+ lua_pushboolean(L, 1); /* Status ok */
+
+ /* Create object */
+ lua_newtable(L);
+ lua_pushvalue(L, -3); /* Get the userdata pointer. */
+ lua_rawseti(L, -2, 0);
+ lua_rawgeti(L, LUA_REGISTRYINDEX, class_regex_ref);
+ lua_setmetatable(L, -2);
+ return 2;
+}
+
+static int hlua_regex_exec(struct lua_State *L)
+{
+ struct my_regex **regex;
+ const char *str;
+ size_t len;
+ struct buffer *tmp;
+
+ regex = hlua_check_regex(L, 1);
+ str = luaL_checklstring(L, 2, &len);
+
+ if (!*regex) {
+ lua_pushboolean(L, 0);
+ return 1;
+ }
+
+ /* Copy the string because regex_exec2 require a 'char *'
+ * and not a 'const char *'.
+ */
+ tmp = get_trash_chunk();
+ if (len >= tmp->size) {
+ lua_pushboolean(L, 0);
+ return 1;
+ }
+ memcpy(tmp->area, str, len);
+
+ lua_pushboolean(L, regex_exec2(*regex, tmp->area, len));
+
+ return 1;
+}
+
+static int hlua_regex_match(struct lua_State *L)
+{
+ struct my_regex **regex;
+ const char *str;
+ size_t len;
+ regmatch_t pmatch[20];
+ int ret;
+ int i;
+ struct buffer *tmp;
+
+ regex = hlua_check_regex(L, 1);
+ str = luaL_checklstring(L, 2, &len);
+
+ if (!*regex) {
+ lua_pushboolean(L, 0);
+ return 1;
+ }
+
+ /* Copy the string because regex_exec2 require a 'char *'
+ * and not a 'const char *'.
+ */
+ tmp = get_trash_chunk();
+ if (len >= tmp->size) {
+ lua_pushboolean(L, 0);
+ return 1;
+ }
+ memcpy(tmp->area, str, len);
+
+ ret = regex_exec_match2(*regex, tmp->area, len, 20, pmatch, 0);
+ lua_pushboolean(L, ret);
+ lua_newtable(L);
+ if (ret) {
+ for (i = 0; i < 20 && pmatch[i].rm_so != -1; i++) {
+ lua_pushlstring(L, str + pmatch[i].rm_so, pmatch[i].rm_eo - pmatch[i].rm_so);
+ lua_rawseti(L, -2, i + 1);
+ }
+ }
+ return 2;
+}
+
+static int hlua_regex_free(struct lua_State *L)
+{
+ struct my_regex **regex;
+
+ regex = hlua_check_regex(L, 1);
+ regex_free(*regex);
+ *regex = NULL;
+ return 0;
+}
+
+void hlua_fcn_reg_core_fcn(lua_State *L)
+{
+ hlua_concat_init(L);
+ hlua_queue_init(L);
+
+ hlua_class_function(L, "now", hlua_now);
+ hlua_class_function(L, "http_date", hlua_http_date);
+ hlua_class_function(L, "imf_date", hlua_imf_date);
+ hlua_class_function(L, "rfc850_date", hlua_rfc850_date);
+ hlua_class_function(L, "asctime_date", hlua_asctime_date);
+ hlua_class_function(L, "concat", hlua_concat_new);
+ hlua_class_function(L, "queue", hlua_queue_new);
+ hlua_class_function(L, "get_info", hlua_get_info);
+ hlua_class_function(L, "parse_addr", hlua_parse_addr);
+ hlua_class_function(L, "match_addr", hlua_match_addr);
+ hlua_class_function(L, "tokenize", hlua_tokenize);
+
+ /* Create regex object. */
+ lua_newtable(L);
+ hlua_class_function(L, "new", hlua_regex_comp);
+
+ lua_newtable(L); /* The metatable. */
+ lua_pushstring(L, "__index");
+ lua_newtable(L);
+ hlua_class_function(L, "exec", hlua_regex_exec);
+ hlua_class_function(L, "match", hlua_regex_match);
+ lua_rawset(L, -3); /* -> META["__index"] = TABLE */
+ hlua_class_function(L, "__gc", hlua_regex_free);
+
+ lua_pushvalue(L, -1); /* Duplicate the metatable reference. */
+ class_regex_ref = hlua_register_metatable(L, CLASS_REGEX);
+
+ lua_setmetatable(L, -2);
+ lua_setglobal(L, CLASS_REGEX); /* Create global object called Regex */
+
+ /* Create stktable object. */
+ lua_newtable(L);
+ lua_pushstring(L, "__index");
+ lua_newtable(L);
+ hlua_class_function(L, "info", hlua_stktable_info);
+ hlua_class_function(L, "lookup", hlua_stktable_lookup);
+ hlua_class_function(L, "dump", hlua_stktable_dump);
+ lua_settable(L, -3); /* -> META["__index"] = TABLE */
+ class_stktable_ref = hlua_register_metatable(L, CLASS_STKTABLE);
+
+ /* Create listener object. */
+ lua_newtable(L);
+ lua_pushstring(L, "__index");
+ lua_newtable(L);
+ hlua_class_function(L, "get_stats", hlua_listener_get_stats);
+ lua_settable(L, -3); /* -> META["__index"] = TABLE */
+ class_listener_ref = hlua_register_metatable(L, CLASS_LISTENER);
+
+ /* Create event_sub object. */
+ lua_newtable(L);
+ hlua_class_function(L, "__gc", hlua_event_sub_gc);
+ class_event_sub_ref = hlua_register_metatable(L, CLASS_EVENT_SUB);
+
+ /* Create server object. */
+ lua_newtable(L);
+ hlua_class_function(L, "__gc", hlua_server_gc);
+ hlua_class_function(L, "__index", hlua_server_index);
+ class_server_ref = hlua_register_metatable(L, CLASS_SERVER);
+
+ /* Create proxy object. */
+ lua_newtable(L);
+ hlua_class_function(L, "__index", hlua_proxy_index);
+ class_proxy_ref = hlua_register_metatable(L, CLASS_PROXY);
+
+ /* list of proxy objects. Instead of having a static array
+ * of proxies, we use special metamethods that rely on internal
+ * proxies list so that the array is resolved at runtime.
+ *
+ * To emulate the same behavior than Lua array, we implement some
+ * metatable functions:
+ * - __newindex : prevent the insertion of a new item in the array
+ * - __index : find a proxy in the list using "name" index
+ * - __pairs : iterate through available proxies in the list
+ */
+ lua_newtable(L);
+ hlua_class_function(L, "__index", hlua_listable_proxies_index);
+ hlua_class_function(L, "__newindex", hlua_listable_proxies_newindex);
+ hlua_class_function(L, "__pairs", hlua_listable_proxies_pairs);
+ class_proxy_list_ref = hlua_register_metatable(L, CLASS_PROXY_LIST);
+
+ /* Create proxies entry. */
+ lua_pushstring(L, "proxies");
+ hlua_listable_proxies(L, PR_CAP_LISTEN);
+ lua_settable(L, -3);
+
+ /* Create frontends entry. */
+ lua_pushstring(L, "frontends");
+ hlua_listable_proxies(L, PR_CAP_FE);
+ lua_settable(L, -3);
+
+ /* Create backends entry. */
+ lua_pushstring(L, "backends");
+ hlua_listable_proxies(L, PR_CAP_BE);
+ lua_settable(L, -3);
+
+ /* list of server. This object is similar to
+ * CLASS_PROXY_LIST
+ */
+ lua_newtable(L);
+ hlua_class_function(L, "__index", hlua_listable_servers_index);
+ hlua_class_function(L, "__newindex", hlua_listable_servers_newindex);
+ hlua_class_function(L, "__pairs", hlua_listable_servers_pairs);
+ class_server_list_ref = hlua_register_metatable(L, CLASS_SERVER_LIST);
+}
diff --git a/src/hpack-dec.c b/src/hpack-dec.c
new file mode 100644
index 0000000..052a7c3
--- /dev/null
+++ b/src/hpack-dec.c
@@ -0,0 +1,475 @@
+/*
+ * HPACK decompressor (RFC7541)
+ *
+ * Copyright (C) 2014-2017 Willy Tarreau <willy@haproxy.org>
+ * Copyright (C) 2017 HAProxy Technologies
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <inttypes.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <import/ist.h>
+#include <haproxy/chunk.h>
+#include <haproxy/global.h>
+#include <haproxy/h2.h>
+#include <haproxy/hpack-dec.h>
+#include <haproxy/hpack-huff.h>
+#include <haproxy/hpack-tbl.h>
+#include <haproxy/tools.h>
+
+
+#if defined(DEBUG_HPACK)
+#define hpack_debug_printf printf
+#define hpack_debug_hexdump debug_hexdump
+#else
+#define hpack_debug_printf(...) do { } while (0)
+#define hpack_debug_hexdump(...) do { } while (0)
+#endif
+
+/* reads a varint from <raw>'s lowest <b> bits and <len> bytes max (raw included).
+ * returns the 32-bit value on success after updating raw_in and len_in. Forces
+ * len_in to (uint32_t)-1 on truncated input.
+ */
+static uint32_t get_var_int(const uint8_t **raw_in, uint32_t *len_in, int b)
+{
+ uint32_t ret = 0;
+ int len = *len_in;
+ const uint8_t *raw = *raw_in;
+ uint8_t shift = 0;
+
+ len--;
+ ret = *(raw++) & ((1 << b) - 1);
+ if (ret != (uint32_t)((1 << b) - 1))
+ goto end;
+
+ while (len && (*raw & 128)) {
+ ret += ((uint32_t)(*raw++) & 127) << shift;
+ shift += 7;
+ len--;
+ }
+
+ /* last 7 bits */
+ if (!len)
+ goto too_short;
+ len--;
+ ret += ((uint32_t)(*raw++) & 127) << shift;
+
+ end:
+ *raw_in = raw;
+ *len_in = len;
+ return ret;
+
+ too_short:
+ *len_in = (uint32_t)-1;
+ return 0;
+}
+
+/* returns the pseudo-header <idx> corresponds to among the following values :
+ * - 0 = unknown, the header's string needs to be used instead
+ * - 1 = ":authority"
+ * - 2 = ":method"
+ * - 3 = ":path"
+ * - 4 = ":scheme"
+ * - 5 = ":status"
+ */
+static inline int hpack_idx_to_phdr(uint32_t idx)
+{
+ if (idx > 14)
+ return 0;
+
+ idx >>= 1;
+ idx <<= 2;
+ return (0x55554321U >> idx) & 0xF;
+}
+
+/* If <idx> designates a static header, returns <in>. Otherwise allocates some
+ * room from chunk <store> to duplicate <in> into it and returns the string
+ * allocated there. In case of allocation failure, returns a string whose
+ * pointer is NULL.
+ */
+static inline struct ist hpack_alloc_string(struct buffer *store, uint32_t idx,
+ struct ist in)
+{
+ struct ist out;
+
+ if (idx < HPACK_SHT_SIZE)
+ return in;
+
+ out.len = in.len;
+ out.ptr = chunk_newstr(store);
+ if (unlikely(!isttest(out)))
+ return out;
+
+ if (unlikely(store->data + out.len > store->size)) {
+ out.ptr = NULL;
+ return out;
+ }
+
+ store->data += out.len;
+ memcpy(out.ptr, in.ptr, out.len);
+ return out;
+}
+
+/* decode an HPACK frame starting at <raw> for <len> bytes, using the dynamic
+ * headers table <dht>, produces the output into list <list> of <list_size>
+ * entries max, and uses pre-allocated buffer <tmp> for temporary storage (some
+ * list elements will point to it). Some <list> name entries may be made of a
+ * NULL pointer and a len, in which case they will designate a pseudo header
+ * index according to the values returned by hpack_idx_to_phdr() above. The
+ * number of <list> entries used is returned on success, or <0 on failure, with
+ * the opposite one of the HPACK_ERR_* codes. A last element is always zeroed
+ * and is not counted in the number of returned entries. This way the caller
+ * can use list[].n.len == 0 as a marker for the end of list.
+ */
+int hpack_decode_frame(struct hpack_dht *dht, const uint8_t *raw, uint32_t len,
+ struct http_hdr *list, int list_size,
+ struct buffer *tmp)
+{
+ uint32_t idx;
+ uint32_t nlen;
+ uint32_t vlen;
+ uint8_t huff;
+ struct ist name;
+ struct ist value;
+ int must_index;
+ int ret;
+
+ hpack_debug_hexdump(stderr, "[HPACK-DEC] ", (const char *)raw, 0, len);
+
+ chunk_reset(tmp);
+ ret = 0;
+ while (len) {
+ int __maybe_unused code = *raw; /* first byte, only for debugging */
+
+ must_index = 0;
+ if (*raw >= 0x80) {
+ /* indexed header field */
+ if (*raw == 0x80) {
+ hpack_debug_printf("unhandled code 0x%02x (raw=%p, len=%u)\n", *raw, raw, len);
+ ret = -HPACK_ERR_UNKNOWN_OPCODE;
+ goto leave;
+ }
+
+ hpack_debug_printf("%02x: p14: indexed header field : ", code);
+
+ idx = get_var_int(&raw, &len, 7);
+ if (len == (uint32_t)-1) { // truncated
+ hpack_debug_printf("##ERR@%d##\n", __LINE__);
+ ret = -HPACK_ERR_TRUNCATED;
+ goto leave;
+ }
+
+ hpack_debug_printf(" idx=%u ", idx);
+
+ if (!hpack_valid_idx(dht, idx)) {
+ hpack_debug_printf("##ERR@%d##\n", __LINE__);
+ ret = -HPACK_ERR_TOO_LARGE;
+ goto leave;
+ }
+
+ value = hpack_alloc_string(tmp, idx, hpack_idx_to_value(dht, idx));
+ if (!isttest(value)) {
+ hpack_debug_printf("##ERR@%d##\n", __LINE__);
+ ret = -HPACK_ERR_TOO_LARGE;
+ goto leave;
+ }
+
+ /* here we don't index so we can always keep the pseudo header number */
+ name = ist2(NULL, hpack_idx_to_phdr(idx));
+
+ if (!name.len) {
+ name = hpack_alloc_string(tmp, idx, hpack_idx_to_name(dht, idx));
+ if (!isttest(name)) {
+ hpack_debug_printf("##ERR@%d##\n", __LINE__);
+ ret = -HPACK_ERR_TOO_LARGE;
+ goto leave;
+ }
+ }
+ /* <name> and <value> are now set and point to stable values */
+ }
+ else if (*raw >= 0x20 && *raw <= 0x3f) {
+ /* max dyn table size change */
+ hpack_debug_printf("%02x: p18: dynamic table size update : ", code);
+
+ if (ret) {
+ /* 7541#4.2.1 : DHT size update must only be at the beginning */
+ hpack_debug_printf("##ERR@%d##\n", __LINE__);
+ ret = -HPACK_ERR_TOO_LARGE;
+ goto leave;
+ }
+
+ idx = get_var_int(&raw, &len, 5);
+ if (len == (uint32_t)-1) { // truncated
+ hpack_debug_printf("##ERR@%d##\n", __LINE__);
+ ret = -HPACK_ERR_TRUNCATED;
+ goto leave;
+ }
+ hpack_debug_printf(" new len=%u\n", idx);
+
+ if (idx > dht->size) {
+ hpack_debug_printf("##ERR@%d##\n", __LINE__);
+ ret = -HPACK_ERR_INVALID_ARGUMENT;
+ goto leave;
+ }
+ continue;
+ }
+ else if (!(*raw & (*raw - 0x10))) {
+ /* 0x00, 0x10, and 0x40 (0x20 and 0x80 were already handled above) */
+
+ /* literal header field without/never/with incremental indexing -- literal name */
+ if (*raw == 0x00)
+ hpack_debug_printf("%02x: p17: literal without indexing : ", code);
+ else if (*raw == 0x10)
+ hpack_debug_printf("%02x: p18: literal never indexed : ", code);
+ else if (*raw == 0x40)
+ hpack_debug_printf("%02x: p16: literal with indexing : ", code);
+
+ if (*raw == 0x40)
+ must_index = 1;
+
+ raw++; len--;
+
+ /* retrieve name */
+ if (!len) { // truncated
+ hpack_debug_printf("##ERR@%d##\n", __LINE__);
+ ret = -HPACK_ERR_TRUNCATED;
+ goto leave;
+ }
+
+ huff = *raw & 0x80;
+ nlen = get_var_int(&raw, &len, 7);
+ if (len == (uint32_t)-1 || len < nlen) { // truncated
+ hpack_debug_printf("##ERR@%d## (truncated): nlen=%d len=%d\n",
+ __LINE__, (int)nlen, (int)len);
+ ret = -HPACK_ERR_TRUNCATED;
+ goto leave;
+ }
+
+ name = ist2(raw, nlen);
+
+ raw += nlen;
+ len -= nlen;
+
+ if (huff) {
+ char *ntrash = chunk_newstr(tmp);
+ if (!ntrash) {
+ hpack_debug_printf("##ERR@%d##\n", __LINE__);
+ ret = -HPACK_ERR_TOO_LARGE;
+ goto leave;
+ }
+
+ nlen = huff_dec((const uint8_t *)name.ptr, name.len, ntrash,
+ tmp->size - tmp->data);
+ if (nlen == (uint32_t)-1) {
+ hpack_debug_printf("2: can't decode huffman.\n");
+ ret = -HPACK_ERR_HUFFMAN;
+ goto leave;
+ }
+ hpack_debug_printf(" [name huff %d->%d] ", (int)name.len, (int)nlen);
+
+ tmp->data += nlen; // make room for the value
+ name = ist2(ntrash, nlen);
+ }
+
+ /* retrieve value */
+ if (!len) { // truncated
+ hpack_debug_printf("##ERR@%d##\n", __LINE__);
+ ret = -HPACK_ERR_TRUNCATED;
+ goto leave;
+ }
+
+ huff = *raw & 0x80;
+ vlen = get_var_int(&raw, &len, 7);
+ if (len == (uint32_t)-1 || len < vlen) { // truncated
+ hpack_debug_printf("##ERR@%d## : vlen=%d len=%d\n",
+ __LINE__, (int)vlen, (int)len);
+ ret = -HPACK_ERR_TRUNCATED;
+ goto leave;
+ }
+
+ value = ist2(raw, vlen);
+ raw += vlen;
+ len -= vlen;
+
+ if (huff) {
+ char *vtrash = chunk_newstr(tmp);
+ if (!vtrash) {
+ hpack_debug_printf("##ERR@%d##\n", __LINE__);
+ ret = -HPACK_ERR_TOO_LARGE;
+ goto leave;
+ }
+
+ vlen = huff_dec((const uint8_t *)value.ptr, value.len, vtrash,
+ tmp->size - tmp->data);
+ if (vlen == (uint32_t)-1) {
+ hpack_debug_printf("3: can't decode huffman.\n");
+ ret = -HPACK_ERR_HUFFMAN;
+ goto leave;
+ }
+ hpack_debug_printf(" [value huff %d->%d] ", (int)value.len, (int)vlen);
+
+ tmp->data += vlen; // make room for the value
+ value = ist2(vtrash, vlen);
+ }
+
+ /* <name> and <value> are correctly filled here */
+ }
+ else {
+ /* 0x01..0x0f : literal header field without indexing -- indexed name */
+ /* 0x11..0x1f : literal header field never indexed -- indexed name */
+ /* 0x41..0x7f : literal header field with incremental indexing -- indexed name */
+
+ if (*raw <= 0x0f)
+ hpack_debug_printf("%02x: p16: literal without indexing -- indexed name : ", code);
+ else if (*raw >= 0x41)
+ hpack_debug_printf("%02x: p15: literal with indexing -- indexed name : ", code);
+ else
+ hpack_debug_printf("%02x: p16: literal never indexed -- indexed name : ", code);
+
+ /* retrieve name index */
+ if (*raw >= 0x41) {
+ must_index = 1;
+ idx = get_var_int(&raw, &len, 6);
+ }
+ else
+ idx = get_var_int(&raw, &len, 4);
+
+ hpack_debug_printf(" idx=%u ", idx);
+
+ if (len == (uint32_t)-1 || !len) { // truncated
+ hpack_debug_printf("##ERR@%d##\n", __LINE__);
+ ret = -HPACK_ERR_TRUNCATED;
+ goto leave;
+ }
+
+ if (!hpack_valid_idx(dht, idx)) {
+ hpack_debug_printf("##ERR@%d##\n", __LINE__);
+ ret = -HPACK_ERR_TOO_LARGE;
+ goto leave;
+ }
+
+ /* retrieve value */
+ huff = *raw & 0x80;
+ vlen = get_var_int(&raw, &len, 7);
+ if (len == (uint32_t)-1 || len < vlen) { // truncated
+ hpack_debug_printf("##ERR@%d##\n", __LINE__);
+ ret = -HPACK_ERR_TRUNCATED;
+ goto leave;
+ }
+
+ value = ist2(raw, vlen);
+ raw += vlen;
+ len -= vlen;
+
+ if (huff) {
+ char *vtrash = chunk_newstr(tmp);
+ if (!vtrash) {
+ hpack_debug_printf("##ERR@%d##\n", __LINE__);
+ ret = -HPACK_ERR_TOO_LARGE;
+ goto leave;
+ }
+
+ vlen = huff_dec((const uint8_t *)value.ptr, value.len, vtrash,
+ tmp->size - tmp->data);
+ if (vlen == (uint32_t)-1) {
+ hpack_debug_printf("##ERR@%d## can't decode huffman : ilen=%d osize=%d\n",
+ __LINE__, (int)value.len,
+ (int)(tmp->size - tmp->data));
+ hpack_debug_hexdump(stderr, "[HUFFMAN] ", value.ptr, 0, value.len);
+ ret = -HPACK_ERR_HUFFMAN;
+ goto leave;
+ }
+ tmp->data += vlen; // make room for the value
+ value = ist2(vtrash, vlen);
+ }
+
+ name = IST_NULL;
+ if (!must_index)
+ name.len = hpack_idx_to_phdr(idx);
+
+ if (!name.len) {
+ name = hpack_alloc_string(tmp, idx, hpack_idx_to_name(dht, idx));
+ if (!isttest(name)) {
+ hpack_debug_printf("##ERR@%d##\n", __LINE__);
+ ret = -HPACK_ERR_TOO_LARGE;
+ goto leave;
+ }
+ }
+ /* <name> and <value> are correctly filled here */
+ }
+
+ /* We must not accept empty header names (forbidden by the spec and used
+ * as a list termination).
+ */
+ if (!name.len) {
+ hpack_debug_printf("##ERR@%d##\n", __LINE__);
+ ret = -HPACK_ERR_INVALID_ARGUMENT;
+ goto leave;
+ }
+
+ /* here's what we have here :
+ * - name.len > 0
+ * - value is filled with either const data or data allocated from tmp
+ * - name.ptr == NULL && !must_index : known pseudo-header #name.len
+ * - name.ptr != NULL || must_index : general header, unknown pseudo-header or index needed
+ */
+ if (ret >= list_size) {
+ hpack_debug_printf("##ERR@%d##\n", __LINE__);
+ ret = -HPACK_ERR_TOO_LARGE;
+ goto leave;
+ }
+
+ list[ret].n = name;
+ list[ret].v = value;
+ ret++;
+
+ if (must_index && hpack_dht_insert(dht, name, value) < 0) {
+ hpack_debug_printf("failed to find some room in the dynamic table\n");
+ ret = -HPACK_ERR_DHT_INSERT_FAIL;
+ goto leave;
+ }
+
+ hpack_debug_printf("\e[1;34m%s\e[0m: ",
+ isttest(name) ? istpad(trash.area, name).ptr : h2_phdr_to_str(name.len));
+
+ hpack_debug_printf("\e[1;35m%s\e[0m [mustidx=%d, used=%d] [n=(%p,%d) v=(%p,%d)]\n",
+ istpad(trash.area, value).ptr, must_index,
+ dht->used,
+ name.ptr, (int)name.len, value.ptr, (int)value.len);
+ }
+
+ if (ret >= list_size) {
+ ret = -HPACK_ERR_TOO_LARGE;
+ goto leave;
+ }
+
+ /* put an end marker */
+ list[ret].n = list[ret].v = IST_NULL;
+ ret++;
+
+ leave:
+ hpack_debug_printf("-- done: ret=%d list_size=%d --\n", (int)ret, (int)list_size);
+ return ret;
+}
diff --git a/src/hpack-enc.c b/src/hpack-enc.c
new file mode 100644
index 0000000..3ab21bc
--- /dev/null
+++ b/src/hpack-enc.c
@@ -0,0 +1,210 @@
+/*
+ * HPACK decompressor (RFC7541)
+ *
+ * Copyright (C) 2014-2017 Willy Tarreau <willy@haproxy.org>
+ * Copyright (C) 2017 HAProxy Technologies
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <inttypes.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <import/ist.h>
+#include <haproxy/hpack-enc.h>
+#include <haproxy/http-hdr-t.h>
+
+/*
+ * HPACK encoding: these tables were generated using gen-enc.c
+ */
+
+/* encoding of stream of compressed headers. This stream is composed of series
+ * of <len:8b> <index:8b> <name:<len>*8b>.
+ */
+const char hpack_enc_stream[666] = {
+ /* 0: */ 0x03, 0x15, 0x61, 0x67, 0x65, 0x03, 0x3c, 0x76,
+ /* 8: */ 0x69, 0x61, 0x04, 0x21, 0x64, 0x61, 0x74, 0x65,
+ /* 16: */ 0x04, 0x26, 0x68, 0x6f, 0x73, 0x74, 0x04, 0x22,
+ /* 24: */ 0x65, 0x74, 0x61, 0x67, 0x04, 0x25, 0x66, 0x72,
+ /* 32: */ 0x6f, 0x6d, 0x04, 0x2d, 0x6c, 0x69, 0x6e, 0x6b,
+ /* 40: */ 0x04, 0x3b, 0x76, 0x61, 0x72, 0x79, 0x05, 0x04,
+ /* 48: */ 0x3a, 0x70, 0x61, 0x74, 0x68, 0x05, 0x16, 0x61,
+ /* 56: */ 0x6c, 0x6c, 0x6f, 0x77, 0x05, 0x32, 0x72, 0x61,
+ /* 64: */ 0x6e, 0x67, 0x65, 0x06, 0x13, 0x61, 0x63, 0x63,
+ /* 72: */ 0x65, 0x70, 0x74, 0x06, 0x36, 0x73, 0x65, 0x72,
+ /* 80: */ 0x76, 0x65, 0x72, 0x06, 0x20, 0x63, 0x6f, 0x6f,
+ /* 88: */ 0x6b, 0x69, 0x65, 0x06, 0x23, 0x65, 0x78, 0x70,
+ /* 96: */ 0x65, 0x63, 0x74, 0x07, 0x33, 0x72, 0x65, 0x66,
+ /* 104: */ 0x65, 0x72, 0x65, 0x72, 0x07, 0x24, 0x65, 0x78,
+ /* 112: */ 0x70, 0x69, 0x72, 0x65, 0x73, 0x07, 0x02, 0x3a,
+ /* 120: */ 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x07, 0x06,
+ /* 128: */ 0x3a, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x07,
+ /* 136: */ 0x08, 0x3a, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73,
+ /* 144: */ 0x07, 0x34, 0x72, 0x65, 0x66, 0x72, 0x65, 0x73,
+ /* 152: */ 0x68, 0x08, 0x2e, 0x6c, 0x6f, 0x63, 0x61, 0x74,
+ /* 160: */ 0x69, 0x6f, 0x6e, 0x08, 0x27, 0x69, 0x66, 0x2d,
+ /* 168: */ 0x6d, 0x61, 0x74, 0x63, 0x68, 0x08, 0x2a, 0x69,
+ /* 176: */ 0x66, 0x2d, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x0a,
+ /* 184: */ 0x3a, 0x75, 0x73, 0x65, 0x72, 0x2d, 0x61, 0x67,
+ /* 192: */ 0x65, 0x6e, 0x74, 0x0a, 0x37, 0x73, 0x65, 0x74,
+ /* 200: */ 0x2d, 0x63, 0x6f, 0x6f, 0x6b, 0x69, 0x65, 0x0a,
+ /* 208: */ 0x01, 0x3a, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72,
+ /* 216: */ 0x69, 0x74, 0x79, 0x0b, 0x35, 0x72, 0x65, 0x74,
+ /* 224: */ 0x72, 0x79, 0x2d, 0x61, 0x66, 0x74, 0x65, 0x72,
+ /* 232: */ 0x0c, 0x1f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e,
+ /* 240: */ 0x74, 0x2d, 0x74, 0x79, 0x70, 0x65, 0x0c, 0x2f,
+ /* 248: */ 0x6d, 0x61, 0x78, 0x2d, 0x66, 0x6f, 0x72, 0x77,
+ /* 256: */ 0x61, 0x72, 0x64, 0x73, 0x0d, 0x18, 0x63, 0x61,
+ /* 264: */ 0x63, 0x68, 0x65, 0x2d, 0x63, 0x6f, 0x6e, 0x74,
+ /* 272: */ 0x72, 0x6f, 0x6c, 0x0d, 0x2c, 0x6c, 0x61, 0x73,
+ /* 280: */ 0x74, 0x2d, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x69,
+ /* 288: */ 0x65, 0x64, 0x0d, 0x12, 0x61, 0x63, 0x63, 0x65,
+ /* 296: */ 0x70, 0x74, 0x2d, 0x72, 0x61, 0x6e, 0x67, 0x65,
+ /* 304: */ 0x73, 0x0d, 0x29, 0x69, 0x66, 0x2d, 0x6e, 0x6f,
+ /* 312: */ 0x6e, 0x65, 0x2d, 0x6d, 0x61, 0x74, 0x63, 0x68,
+ /* 320: */ 0x0d, 0x17, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72,
+ /* 328: */ 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x0d,
+ /* 336: */ 0x1e, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74,
+ /* 344: */ 0x2d, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x0e, 0x1c,
+ /* 352: */ 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x2d,
+ /* 360: */ 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x0e, 0x0f,
+ /* 368: */ 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x2d, 0x63,
+ /* 376: */ 0x68, 0x61, 0x72, 0x73, 0x65, 0x74, 0x0f, 0x10,
+ /* 384: */ 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x2d, 0x65,
+ /* 392: */ 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x0f,
+ /* 400: */ 0x11, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x2d,
+ /* 408: */ 0x6c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65,
+ /* 416: */ 0x10, 0x1a, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e,
+ /* 424: */ 0x74, 0x2d, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x69,
+ /* 432: */ 0x6e, 0x67, 0x10, 0x1b, 0x63, 0x6f, 0x6e, 0x74,
+ /* 440: */ 0x65, 0x6e, 0x74, 0x2d, 0x6c, 0x61, 0x6e, 0x67,
+ /* 448: */ 0x75, 0x61, 0x67, 0x65, 0x10, 0x1d, 0x63, 0x6f,
+ /* 456: */ 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x2d, 0x6c, 0x6f,
+ /* 464: */ 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x10, 0x3d,
+ /* 472: */ 0x77, 0x77, 0x77, 0x2d, 0x61, 0x75, 0x74, 0x68,
+ /* 480: */ 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x65,
+ /* 488: */ 0x11, 0x39, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x66,
+ /* 496: */ 0x65, 0x72, 0x2d, 0x65, 0x6e, 0x63, 0x6f, 0x64,
+ /* 504: */ 0x69, 0x6e, 0x67, 0x11, 0x28, 0x69, 0x66, 0x2d,
+ /* 512: */ 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x69, 0x65, 0x64,
+ /* 520: */ 0x2d, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x12, 0x30,
+ /* 528: */ 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2d, 0x61, 0x75,
+ /* 536: */ 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61,
+ /* 544: */ 0x74, 0x65, 0x13, 0x19, 0x63, 0x6f, 0x6e, 0x74,
+ /* 552: */ 0x65, 0x6e, 0x74, 0x2d, 0x64, 0x69, 0x73, 0x70,
+ /* 560: */ 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x13,
+ /* 568: */ 0x2b, 0x69, 0x66, 0x2d, 0x75, 0x6e, 0x6d, 0x6f,
+ /* 576: */ 0x64, 0x69, 0x66, 0x69, 0x65, 0x64, 0x2d, 0x73,
+ /* 584: */ 0x69, 0x6e, 0x63, 0x65, 0x13, 0x31, 0x70, 0x72,
+ /* 592: */ 0x6f, 0x78, 0x79, 0x2d, 0x61, 0x75, 0x74, 0x68,
+ /* 600: */ 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f,
+ /* 608: */ 0x6e, 0x19, 0x38, 0x73, 0x74, 0x72, 0x69, 0x63,
+ /* 616: */ 0x74, 0x2d, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70,
+ /* 624: */ 0x6f, 0x72, 0x74, 0x2d, 0x73, 0x65, 0x63, 0x75,
+ /* 632: */ 0x72, 0x69, 0x74, 0x79, 0x1b, 0x14, 0x61, 0x63,
+ /* 640: */ 0x63, 0x65, 0x73, 0x73, 0x2d, 0x63, 0x6f, 0x6e,
+ /* 648: */ 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x61, 0x6c, 0x6c,
+ /* 656: */ 0x6f, 0x77, 0x2d, 0x6f, 0x72, 0x69, 0x67, 0x69,
+ /* 664: */ 0x6e, 0x00,
+};
+
+/* This points to the first position in table hpack_enc_stream[] of a header
+ * of the same length.
+ */
+const signed short hpack_pos_len[32] = {
+ /* 0: */ -1, -1, -1, 0, 10, 46, 67, 99,
+ /* 8: */ 153, -1, 183, 219, 232, 260, 350, 382,
+ /* 16: */ 416, 488, 526, 546, -1, -1, -1, -1,
+ /* 24: */ -1, 609, -1, 636, -1, -1, -1, -1,
+};
+
+/* Tries to encode header whose name is <n> and value <v> into the chunk <out>.
+ * Returns non-zero on success, 0 on failure (buffer full).
+ */
+int hpack_encode_header(struct buffer *out, const struct ist n,
+ const struct ist v)
+{
+ int len = out->data;
+ int size = out->size;
+ int pos;
+
+ if (len >= size)
+ return 0;
+
+ /* look for the header field <n> in the static table */
+ if (n.len >= sizeof(hpack_pos_len) / sizeof(hpack_pos_len[0]))
+ goto make_literal;
+
+ pos = hpack_pos_len[n.len];
+ if (pos >= 0) {
+ /* At least one header field of this length exist */
+ do {
+ char idx;
+
+ pos++;
+ idx = hpack_enc_stream[pos++];
+ pos += n.len;
+ if (isteq(ist2(&hpack_enc_stream[pos - n.len], n.len), n)) {
+ /* emit literal with indexing (7541#6.2.1) :
+ * [ 0 | 1 | Index (6+) ]
+ */
+ out->area[len++] = idx | 0x40;
+ goto emit_value;
+ }
+ } while ((unsigned char)hpack_enc_stream[pos] == n.len);
+ }
+
+ make_literal:
+ if (likely(n.len < 127 && len + 2 + n.len <= size)) {
+ out->area[len++] = 0x00; /* literal without indexing -- new name */
+ out->area[len++] = n.len; /* single-byte length encoding */
+ ist2bin(out->area + len, n);
+ len += n.len;
+ }
+ else if (hpack_len_to_bytes(n.len) &&
+ len + 1 + hpack_len_to_bytes(n.len) + n.len <= size) {
+ out->area[len++] = 0x00; /* literal without indexing -- new name */
+ len = hpack_encode_len(out->area, len, n.len);
+ ist2bin(out->area + len, n);
+ len += n.len;
+ }
+ else {
+ /* header field name too large for the buffer */
+ return 0;
+ }
+
+ emit_value:
+ /* copy literal header field value */
+ if (!hpack_len_to_bytes(v.len) ||
+ len + hpack_len_to_bytes(v.len) + v.len > size) {
+ /* header value too large for the buffer */
+ return 0;
+ }
+
+ len = hpack_encode_len(out->area, len, v.len);
+ memcpy(out->area + len, v.ptr, v.len);
+ len += v.len;
+
+ out->data = len;
+ return 1;
+}
diff --git a/src/hpack-huff.c b/src/hpack-huff.c
new file mode 100644
index 0000000..77743be
--- /dev/null
+++ b/src/hpack-huff.c
@@ -0,0 +1,861 @@
+/*
+ * Huffman decoding and encoding for HPACK (RFC7541)
+ *
+ * Copyright (C) 2014-2017 Willy Tarreau <willy@haproxy.org>
+ * Copyright (C) 2017 HAProxy Technologies
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <stdio.h>
+#include <inttypes.h>
+#include <string.h>
+
+#include <haproxy/api.h>
+#include <haproxy/hpack-huff.h>
+#include <haproxy/net_helper.h>
+
+struct huff {
+ uint32_t c; /* code point */
+ int b; /* bits */
+};
+
+/* huffman table as per RFC7541 appendix B */
+static const struct huff ht[257] = {
+ [ 0] = { .c = 0x00001ff8, .b = 13 },
+ [ 1] = { .c = 0x007fffd8, .b = 23 },
+ [ 2] = { .c = 0x0fffffe2, .b = 28 },
+ [ 3] = { .c = 0x0fffffe3, .b = 28 },
+ [ 4] = { .c = 0x0fffffe4, .b = 28 },
+ [ 5] = { .c = 0x0fffffe5, .b = 28 },
+ [ 6] = { .c = 0x0fffffe6, .b = 28 },
+ [ 7] = { .c = 0x0fffffe7, .b = 28 },
+ [ 8] = { .c = 0x0fffffe8, .b = 28 },
+ [ 9] = { .c = 0x00ffffea, .b = 24 },
+ [ 10] = { .c = 0x3ffffffc, .b = 30 },
+ [ 11] = { .c = 0x0fffffe9, .b = 28 },
+ [ 12] = { .c = 0x0fffffea, .b = 28 },
+ [ 13] = { .c = 0x3ffffffd, .b = 30 },
+ [ 14] = { .c = 0x0fffffeb, .b = 28 },
+ [ 15] = { .c = 0x0fffffec, .b = 28 },
+ [ 16] = { .c = 0x0fffffed, .b = 28 },
+ [ 17] = { .c = 0x0fffffee, .b = 28 },
+ [ 18] = { .c = 0x0fffffef, .b = 28 },
+ [ 19] = { .c = 0x0ffffff0, .b = 28 },
+ [ 20] = { .c = 0x0ffffff1, .b = 28 },
+ [ 21] = { .c = 0x0ffffff2, .b = 28 },
+ [ 22] = { .c = 0x3ffffffe, .b = 30 },
+ [ 23] = { .c = 0x0ffffff3, .b = 28 },
+ [ 24] = { .c = 0x0ffffff4, .b = 28 },
+ [ 25] = { .c = 0x0ffffff5, .b = 28 },
+ [ 26] = { .c = 0x0ffffff6, .b = 28 },
+ [ 27] = { .c = 0x0ffffff7, .b = 28 },
+ [ 28] = { .c = 0x0ffffff8, .b = 28 },
+ [ 29] = { .c = 0x0ffffff9, .b = 28 },
+ [ 30] = { .c = 0x0ffffffa, .b = 28 },
+ [ 31] = { .c = 0x0ffffffb, .b = 28 },
+ [ 32] = { .c = 0x00000014, .b = 6 },
+ [ 33] = { .c = 0x000003f8, .b = 10 },
+ [ 34] = { .c = 0x000003f9, .b = 10 },
+ [ 35] = { .c = 0x00000ffa, .b = 12 },
+ [ 36] = { .c = 0x00001ff9, .b = 13 },
+ [ 37] = { .c = 0x00000015, .b = 6 },
+ [ 38] = { .c = 0x000000f8, .b = 8 },
+ [ 39] = { .c = 0x000007fa, .b = 11 },
+ [ 40] = { .c = 0x000003fa, .b = 10 },
+ [ 41] = { .c = 0x000003fb, .b = 10 },
+ [ 42] = { .c = 0x000000f9, .b = 8 },
+ [ 43] = { .c = 0x000007fb, .b = 11 },
+ [ 44] = { .c = 0x000000fa, .b = 8 },
+ [ 45] = { .c = 0x00000016, .b = 6 },
+ [ 46] = { .c = 0x00000017, .b = 6 },
+ [ 47] = { .c = 0x00000018, .b = 6 },
+ [ 48] = { .c = 0x00000000, .b = 5 },
+ [ 49] = { .c = 0x00000001, .b = 5 },
+ [ 50] = { .c = 0x00000002, .b = 5 },
+ [ 51] = { .c = 0x00000019, .b = 6 },
+ [ 52] = { .c = 0x0000001a, .b = 6 },
+ [ 53] = { .c = 0x0000001b, .b = 6 },
+ [ 54] = { .c = 0x0000001c, .b = 6 },
+ [ 55] = { .c = 0x0000001d, .b = 6 },
+ [ 56] = { .c = 0x0000001e, .b = 6 },
+ [ 57] = { .c = 0x0000001f, .b = 6 },
+ [ 58] = { .c = 0x0000005c, .b = 7 },
+ [ 59] = { .c = 0x000000fb, .b = 8 },
+ [ 60] = { .c = 0x00007ffc, .b = 15 },
+ [ 61] = { .c = 0x00000020, .b = 6 },
+ [ 62] = { .c = 0x00000ffb, .b = 12 },
+ [ 63] = { .c = 0x000003fc, .b = 10 },
+ [ 64] = { .c = 0x00001ffa, .b = 13 },
+ [ 65] = { .c = 0x00000021, .b = 6 },
+ [ 66] = { .c = 0x0000005d, .b = 7 },
+ [ 67] = { .c = 0x0000005e, .b = 7 },
+ [ 68] = { .c = 0x0000005f, .b = 7 },
+ [ 69] = { .c = 0x00000060, .b = 7 },
+ [ 70] = { .c = 0x00000061, .b = 7 },
+ [ 71] = { .c = 0x00000062, .b = 7 },
+ [ 72] = { .c = 0x00000063, .b = 7 },
+ [ 73] = { .c = 0x00000064, .b = 7 },
+ [ 74] = { .c = 0x00000065, .b = 7 },
+ [ 75] = { .c = 0x00000066, .b = 7 },
+ [ 76] = { .c = 0x00000067, .b = 7 },
+ [ 77] = { .c = 0x00000068, .b = 7 },
+ [ 78] = { .c = 0x00000069, .b = 7 },
+ [ 79] = { .c = 0x0000006a, .b = 7 },
+ [ 80] = { .c = 0x0000006b, .b = 7 },
+ [ 81] = { .c = 0x0000006c, .b = 7 },
+ [ 82] = { .c = 0x0000006d, .b = 7 },
+ [ 83] = { .c = 0x0000006e, .b = 7 },
+ [ 84] = { .c = 0x0000006f, .b = 7 },
+ [ 85] = { .c = 0x00000070, .b = 7 },
+ [ 86] = { .c = 0x00000071, .b = 7 },
+ [ 87] = { .c = 0x00000072, .b = 7 },
+ [ 88] = { .c = 0x000000fc, .b = 8 },
+ [ 89] = { .c = 0x00000073, .b = 7 },
+ [ 90] = { .c = 0x000000fd, .b = 8 },
+ [ 91] = { .c = 0x00001ffb, .b = 13 },
+ [ 92] = { .c = 0x0007fff0, .b = 19 },
+ [ 93] = { .c = 0x00001ffc, .b = 13 },
+ [ 94] = { .c = 0x00003ffc, .b = 14 },
+ [ 95] = { .c = 0x00000022, .b = 6 },
+ [ 96] = { .c = 0x00007ffd, .b = 15 },
+ [ 97] = { .c = 0x00000003, .b = 5 },
+ [ 98] = { .c = 0x00000023, .b = 6 },
+ [ 99] = { .c = 0x00000004, .b = 5 },
+ [100] = { .c = 0x00000024, .b = 6 },
+ [101] = { .c = 0x00000005, .b = 5 },
+ [102] = { .c = 0x00000025, .b = 6 },
+ [103] = { .c = 0x00000026, .b = 6 },
+ [104] = { .c = 0x00000027, .b = 6 },
+ [105] = { .c = 0x00000006, .b = 5 },
+ [106] = { .c = 0x00000074, .b = 7 },
+ [107] = { .c = 0x00000075, .b = 7 },
+ [108] = { .c = 0x00000028, .b = 6 },
+ [109] = { .c = 0x00000029, .b = 6 },
+ [110] = { .c = 0x0000002a, .b = 6 },
+ [111] = { .c = 0x00000007, .b = 5 },
+ [112] = { .c = 0x0000002b, .b = 6 },
+ [113] = { .c = 0x00000076, .b = 7 },
+ [114] = { .c = 0x0000002c, .b = 6 },
+ [115] = { .c = 0x00000008, .b = 5 },
+ [116] = { .c = 0x00000009, .b = 5 },
+ [117] = { .c = 0x0000002d, .b = 6 },
+ [118] = { .c = 0x00000077, .b = 7 },
+ [119] = { .c = 0x00000078, .b = 7 },
+ [120] = { .c = 0x00000079, .b = 7 },
+ [121] = { .c = 0x0000007a, .b = 7 },
+ [122] = { .c = 0x0000007b, .b = 7 },
+ [123] = { .c = 0x00007ffe, .b = 15 },
+ [124] = { .c = 0x000007fc, .b = 11 },
+ [125] = { .c = 0x00003ffd, .b = 14 },
+ [126] = { .c = 0x00001ffd, .b = 13 },
+ [127] = { .c = 0x0ffffffc, .b = 28 },
+ [128] = { .c = 0x000fffe6, .b = 20 },
+ [129] = { .c = 0x003fffd2, .b = 22 },
+ [130] = { .c = 0x000fffe7, .b = 20 },
+ [131] = { .c = 0x000fffe8, .b = 20 },
+ [132] = { .c = 0x003fffd3, .b = 22 },
+ [133] = { .c = 0x003fffd4, .b = 22 },
+ [134] = { .c = 0x003fffd5, .b = 22 },
+ [135] = { .c = 0x007fffd9, .b = 23 },
+ [136] = { .c = 0x003fffd6, .b = 22 },
+ [137] = { .c = 0x007fffda, .b = 23 },
+ [138] = { .c = 0x007fffdb, .b = 23 },
+ [139] = { .c = 0x007fffdc, .b = 23 },
+ [140] = { .c = 0x007fffdd, .b = 23 },
+ [141] = { .c = 0x007fffde, .b = 23 },
+ [142] = { .c = 0x00ffffeb, .b = 24 },
+ [143] = { .c = 0x007fffdf, .b = 23 },
+ [144] = { .c = 0x00ffffec, .b = 24 },
+ [145] = { .c = 0x00ffffed, .b = 24 },
+ [146] = { .c = 0x003fffd7, .b = 22 },
+ [147] = { .c = 0x007fffe0, .b = 23 },
+ [148] = { .c = 0x00ffffee, .b = 24 },
+ [149] = { .c = 0x007fffe1, .b = 23 },
+ [150] = { .c = 0x007fffe2, .b = 23 },
+ [151] = { .c = 0x007fffe3, .b = 23 },
+ [152] = { .c = 0x007fffe4, .b = 23 },
+ [153] = { .c = 0x001fffdc, .b = 21 },
+ [154] = { .c = 0x003fffd8, .b = 22 },
+ [155] = { .c = 0x007fffe5, .b = 23 },
+ [156] = { .c = 0x003fffd9, .b = 22 },
+ [157] = { .c = 0x007fffe6, .b = 23 },
+ [158] = { .c = 0x007fffe7, .b = 23 },
+ [159] = { .c = 0x00ffffef, .b = 24 },
+ [160] = { .c = 0x003fffda, .b = 22 },
+ [161] = { .c = 0x001fffdd, .b = 21 },
+ [162] = { .c = 0x000fffe9, .b = 20 },
+ [163] = { .c = 0x003fffdb, .b = 22 },
+ [164] = { .c = 0x003fffdc, .b = 22 },
+ [165] = { .c = 0x007fffe8, .b = 23 },
+ [166] = { .c = 0x007fffe9, .b = 23 },
+ [167] = { .c = 0x001fffde, .b = 21 },
+ [168] = { .c = 0x007fffea, .b = 23 },
+ [169] = { .c = 0x003fffdd, .b = 22 },
+ [170] = { .c = 0x003fffde, .b = 22 },
+ [171] = { .c = 0x00fffff0, .b = 24 },
+ [172] = { .c = 0x001fffdf, .b = 21 },
+ [173] = { .c = 0x003fffdf, .b = 22 },
+ [174] = { .c = 0x007fffeb, .b = 23 },
+ [175] = { .c = 0x007fffec, .b = 23 },
+ [176] = { .c = 0x001fffe0, .b = 21 },
+ [177] = { .c = 0x001fffe1, .b = 21 },
+ [178] = { .c = 0x003fffe0, .b = 22 },
+ [179] = { .c = 0x001fffe2, .b = 21 },
+ [180] = { .c = 0x007fffed, .b = 23 },
+ [181] = { .c = 0x003fffe1, .b = 22 },
+ [182] = { .c = 0x007fffee, .b = 23 },
+ [183] = { .c = 0x007fffef, .b = 23 },
+ [184] = { .c = 0x000fffea, .b = 20 },
+ [185] = { .c = 0x003fffe2, .b = 22 },
+ [186] = { .c = 0x003fffe3, .b = 22 },
+ [187] = { .c = 0x003fffe4, .b = 22 },
+ [188] = { .c = 0x007ffff0, .b = 23 },
+ [189] = { .c = 0x003fffe5, .b = 22 },
+ [190] = { .c = 0x003fffe6, .b = 22 },
+ [191] = { .c = 0x007ffff1, .b = 23 },
+ [192] = { .c = 0x03ffffe0, .b = 26 },
+ [193] = { .c = 0x03ffffe1, .b = 26 },
+ [194] = { .c = 0x000fffeb, .b = 20 },
+ [195] = { .c = 0x0007fff1, .b = 19 },
+ [196] = { .c = 0x003fffe7, .b = 22 },
+ [197] = { .c = 0x007ffff2, .b = 23 },
+ [198] = { .c = 0x003fffe8, .b = 22 },
+ [199] = { .c = 0x01ffffec, .b = 25 },
+ [200] = { .c = 0x03ffffe2, .b = 26 },
+ [201] = { .c = 0x03ffffe3, .b = 26 },
+ [202] = { .c = 0x03ffffe4, .b = 26 },
+ [203] = { .c = 0x07ffffde, .b = 27 },
+ [204] = { .c = 0x07ffffdf, .b = 27 },
+ [205] = { .c = 0x03ffffe5, .b = 26 },
+ [206] = { .c = 0x00fffff1, .b = 24 },
+ [207] = { .c = 0x01ffffed, .b = 25 },
+ [208] = { .c = 0x0007fff2, .b = 19 },
+ [209] = { .c = 0x001fffe3, .b = 21 },
+ [210] = { .c = 0x03ffffe6, .b = 26 },
+ [211] = { .c = 0x07ffffe0, .b = 27 },
+ [212] = { .c = 0x07ffffe1, .b = 27 },
+ [213] = { .c = 0x03ffffe7, .b = 26 },
+ [214] = { .c = 0x07ffffe2, .b = 27 },
+ [215] = { .c = 0x00fffff2, .b = 24 },
+ [216] = { .c = 0x001fffe4, .b = 21 },
+ [217] = { .c = 0x001fffe5, .b = 21 },
+ [218] = { .c = 0x03ffffe8, .b = 26 },
+ [219] = { .c = 0x03ffffe9, .b = 26 },
+ [220] = { .c = 0x0ffffffd, .b = 28 },
+ [221] = { .c = 0x07ffffe3, .b = 27 },
+ [222] = { .c = 0x07ffffe4, .b = 27 },
+ [223] = { .c = 0x07ffffe5, .b = 27 },
+ [224] = { .c = 0x000fffec, .b = 20 },
+ [225] = { .c = 0x00fffff3, .b = 24 },
+ [226] = { .c = 0x000fffed, .b = 20 },
+ [227] = { .c = 0x001fffe6, .b = 21 },
+ [228] = { .c = 0x003fffe9, .b = 22 },
+ [229] = { .c = 0x001fffe7, .b = 21 },
+ [230] = { .c = 0x001fffe8, .b = 21 },
+ [231] = { .c = 0x007ffff3, .b = 23 },
+ [232] = { .c = 0x003fffea, .b = 22 },
+ [233] = { .c = 0x003fffeb, .b = 22 },
+ [234] = { .c = 0x01ffffee, .b = 25 },
+ [235] = { .c = 0x01ffffef, .b = 25 },
+ [236] = { .c = 0x00fffff4, .b = 24 },
+ [237] = { .c = 0x00fffff5, .b = 24 },
+ [238] = { .c = 0x03ffffea, .b = 26 },
+ [239] = { .c = 0x007ffff4, .b = 23 },
+ [240] = { .c = 0x03ffffeb, .b = 26 },
+ [241] = { .c = 0x07ffffe6, .b = 27 },
+ [242] = { .c = 0x03ffffec, .b = 26 },
+ [243] = { .c = 0x03ffffed, .b = 26 },
+ [244] = { .c = 0x07ffffe7, .b = 27 },
+ [245] = { .c = 0x07ffffe8, .b = 27 },
+ [246] = { .c = 0x07ffffe9, .b = 27 },
+ [247] = { .c = 0x07ffffea, .b = 27 },
+ [248] = { .c = 0x07ffffeb, .b = 27 },
+ [249] = { .c = 0x0ffffffe, .b = 28 },
+ [250] = { .c = 0x07ffffec, .b = 27 },
+ [251] = { .c = 0x07ffffed, .b = 27 },
+ [252] = { .c = 0x07ffffee, .b = 27 },
+ [253] = { .c = 0x07ffffef, .b = 27 },
+ [254] = { .c = 0x07fffff0, .b = 27 },
+ [255] = { .c = 0x03ffffee, .b = 26 },
+ [256] = { .c = 0x3fffffff, .b = 30 }, /* EOS */
+};
+
+
+/* Reversed huffman codes, generated by dev/hpack/gen-rht.c from the table
+ * above, then simplified by hand by extracting the few different length
+ * values and writing code to produce them instead.
+ *
+ * The codes are aligned on the MSB since that's how they appear in the stream.
+ *
+ * Quick summary below of the way the tables work. They're based on how the
+ * prefixes are organized, starting from the MSB.
+ *
+ * These codes fit in a single octet (5 to 8 bits) :
+ * 00/5 08/5 10/5 18/5 20/5 28/5 30/5 38/5
+ * 40/5 48/5
+ *
+ * 50/6 54/6 58/6 5c/6 60/6 64/6 68/6 6c/6
+ * 70/6 74/6 78/6 7c/6 80/6 84/6 88/6 8c/6
+ * 90/6 94/6 98/6 9c/6 a0/6 a4/6 a8/6 ac/6
+ * b0/6 b4/6
+ *
+ * b8/7 ba/7 bc/7 be/7 c0/7 c2/7 c4/7 c6/7
+ * c8/7 ca/7 cc/7 ce/7 d0/7 d2/7 d4/7 d6/7
+ * d8/7 da/7 dc/7 de/7 e0/7 e2/7 e4/7 e6/7
+ * e8/7 ea/7 ec/7 ee/7 f0/7 f2/7 f4/7 f6/7
+ *
+ * f8/8 f9/8 fa/8 fb/8 fc/8 fd/8
+ *
+ * ==> a single 256-symbol table based on the full byte provides a direct
+ * access and the bit count
+ *
+ * These codes fit in two octets (10 to 15 bits, neither 9 nor 16 bits code) :
+ *
+ * fe + 2 bits:
+ * 00/2 40/2 80/2 c0/2
+ *
+ * ff + 2..7 bits :
+ * 00/2
+ * 40/3 60/3 80/3
+ * a0/4 b0/4
+ * c0/5 c8/5 d0/5 d8/5 e0/5 e8/5
+ * f0/6 f4/6
+ * f8/7 fa/7 fc/7
+ *
+ * ==> a single 256-symbol table made of b0.0 and b1.7-1 provides a direct
+ * access and the bit count after a miss on the first one above.
+ *
+ * These ones fit in three octets :
+ * ff fe + 3..5 bits :
+ * 00/3 20/3 40/3 60/4 70/4 80/4 90/4 a0/4
+ * b0/4 c0/4 d0/4
+ * e0/5 e8/5 f0/5 f8/5
+ *
+ * ff ff + 5..8 bits :
+ * 00/5 08/5 10/5 18/5 20/5 28/5 30/5 38/5
+ * 40/5
+ * 48/6 4c/6 50/6 54/6 58/6 5c/6 60/6 64/6
+ * 68/6 6c/6 70/6 74/6 78/6 7c/6 80/6 84/6
+ * 88/6 8c/6 90/6 94/6 98/6 9c/6 a0/6 a4/6
+ * a8/6 ac/6
+ * b0/7 b2/7 b4/7 b6/7 b8/7 ba/7 bc/7 be/7
+ * c0/7 c2/7 c4/7 c6/7 c8/7 ca/7 cc/7 ce/7
+ * d0/7 d2/7 d4/7 d6/7 d8/7 da/7 dc/7 de/7
+ * e0/7 e2/7 e4/7 e6/7 e8/7
+ * ea/8 eb/8 ec/8 ed/8 ee/8 ef/8 f0/8 f1/8
+ * f2/8 f3/8 f4/8 f5/8
+ *
+ * ==> a 32-symbol table has to be applied to 0xfffe
+ * ==> a 256-symbol table has to be applied to 0xffff
+ *
+ * The other ones fit in four octets with 1 to 6 bits in the last one :
+ * ff ff f6 : 00/1 80/1
+ * ff ff f7 : 00/1 80/1
+ * ff ff f8 : 00/2 40/2 80/2 c0/2
+ * ff ff f9 : 00/2 40/2 80/2 c0/2
+ * ff ff fa : 00/2 40/2 80/2 c0/2
+ * ff ff fb : 00/2 40/2 80/2
+ * ff ff fb : c0/3 e0/3
+ * ff ff fc : 00/3 20/3 40/3 60/3 80/3 a0/3 c0/3 e0/3
+ * ff ff fd : 00/3 20/3 40/3 60/3 80/3 a0/3 c0/3 e0/3
+ * ff ff fe : 00/3
+ * ff ff fe : 20/4 30/4 40/4 50/4 60/4 70/4 80/4 90/4 a0/4 b0/4 c0/4 d0/4 e0/4 f0/4
+ * ff ff ff : 00/4 10/4 20/4 30/4 40/4 50/4 60/4 70/4 80/4 90/4 a0/4 b0/4 c0/4 d0/4 e0/4
+ * ff ff ff : f0/6 f4/6 f8/6 fc/6
+ *
+ * ==> a 256-symbol table with b2.0-3,b3.7-4 gives all of them except the
+ * distinction between ffffff{f0,f4,f8,fc} which is rare enough
+ * and can be done by hand when bit count == 30.
+ *
+ *
+ * Code lengths :
+ * 5..8 : 0x00..0xfe
+ * 10..15 : 0xfe
+ * 0xff 0x00..0xfe
+ * 19..20 : 0xff 0xfe 0x00..0xdf
+ * 21 : 0xff 0xfe 0xe0..0xff
+ * 21 : 0xff 0xff 0x00..0x40
+ * 22..24 : 0xff 0xff 0x00..0xf5
+ * 24..28 : 0xff 0xff 0xf5..0xff
+ * 30 : 0xff 0xff 0xff 0xf0..0xff
+ *
+ *
+ * if b0 < 0xfe ==> 5..8 bits (74 codes)
+ * if b0 == 0xfe or 0xff : 10..15
+ * => if b0 == 0xfe || b1 < 0xfe : lookup (b0:0|b1:7..1) (21 codes)
+ *
+ * -- b0 = 0xff --
+ * if b1 == 0xfe : 19..21 bits
+ * => lookup b2:7..3 (15 codes)
+ *
+ * -- b0 = 0xff, b1 = 0xff : 147 codes --
+ * if b2 < 0xf6 : 21..24 bits (76 codes)
+ * if b2 >= 0xf6 : 25..30 bits (71 codes)
+ *
+ * Algorithm:
+ * - if > 24 and < 32, read missing bits.
+ * - if less than 24 bits, read 1 byte. If past end, insert 0xff instead.
+ * - if b0 < 0xfe lookup b0 in table0[0..255]
+ * - else if b0 == 0xfe, manual lookup
+ * - else if b0 == 0xff, lookup b1 in table1[0..255]
+ * ...
+ */
+
+uint8_t rht_bit31_24[256] = {
+ /* 0x00 */ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
+ /* 0x08 */ 0x31, 0x31, 0x31, 0x31, 0x31, 0x31, 0x31, 0x31,
+ /* 0x10 */ 0x32, 0x32, 0x32, 0x32, 0x32, 0x32, 0x32, 0x32,
+ /* 0x18 */ 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61,
+ /* 0x20 */ 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63,
+ /* 0x28 */ 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65,
+ /* 0x30 */ 0x69, 0x69, 0x69, 0x69, 0x69, 0x69, 0x69, 0x69,
+ /* 0x38 */ 0x6f, 0x6f, 0x6f, 0x6f, 0x6f, 0x6f, 0x6f, 0x6f,
+ /* 0x40 */ 0x73, 0x73, 0x73, 0x73, 0x73, 0x73, 0x73, 0x73,
+ /* 0x48 */ 0x74, 0x74, 0x74, 0x74, 0x74, 0x74, 0x74, 0x74,
+ /* 0x50 */ 0x20, 0x20, 0x20, 0x20,
+ /* 0x54 */ 0x25, 0x25, 0x25, 0x25,
+ /* 0x58 */ 0x2d, 0x2d, 0x2d, 0x2d,
+ /* 0x5c */ 0x2e, 0x2e, 0x2e, 0x2e,
+ /* 0x60 */ 0x2f, 0x2f, 0x2f, 0x2f,
+ /* 0x64 */ 0x33, 0x33, 0x33, 0x33,
+ /* 0x68 */ 0x34, 0x34, 0x34, 0x34,
+ /* 0x6c */ 0x35, 0x35, 0x35, 0x35,
+ /* 0x70 */ 0x36, 0x36, 0x36, 0x36,
+ /* 0x74 */ 0x37, 0x37, 0x37, 0x37,
+ /* 0x78 */ 0x38, 0x38, 0x38, 0x38,
+ /* 0x7c */ 0x39, 0x39, 0x39, 0x39,
+ /* 0x80 */ 0x3d, 0x3d, 0x3d, 0x3d,
+ /* 0x84 */ 0x41, 0x41, 0x41, 0x41,
+ /* 0x88 */ 0x5f, 0x5f, 0x5f, 0x5f,
+ /* 0x8c */ 0x62, 0x62, 0x62, 0x62,
+ /* 0x90 */ 0x64, 0x64, 0x64, 0x64,
+ /* 0x94 */ 0x66, 0x66, 0x66, 0x66,
+ /* 0x98 */ 0x67, 0x67, 0x67, 0x67,
+ /* 0x9c */ 0x68, 0x68, 0x68, 0x68,
+ /* 0xa0 */ 0x6c, 0x6c, 0x6c, 0x6c,
+ /* 0xa4 */ 0x6d, 0x6d, 0x6d, 0x6d,
+ /* 0xa8 */ 0x6e, 0x6e, 0x6e, 0x6e,
+ /* 0xac */ 0x70, 0x70, 0x70, 0x70,
+ /* 0xb0 */ 0x72, 0x72, 0x72, 0x72,
+ /* 0xb4 */ 0x75, 0x75, 0x75, 0x75,
+ /* 0xb8 */ 0x3a, 0x3a,
+ /* 0xba */ 0x42, 0x42,
+ /* 0xbc */ 0x43, 0x43,
+ /* 0xbe */ 0x44, 0x44,
+ /* 0xc0 */ 0x45, 0x45,
+ /* 0xc2 */ 0x46, 0x46,
+ /* 0xc4 */ 0x47, 0x47,
+ /* 0xc6 */ 0x48, 0x48,
+ /* 0xc8 */ 0x49, 0x49,
+ /* 0xca */ 0x4a, 0x4a,
+ /* 0xcc */ 0x4b, 0x4b,
+ /* 0xce */ 0x4c, 0x4c,
+ /* 0xd0 */ 0x4d, 0x4d,
+ /* 0xd2 */ 0x4e, 0x4e,
+ /* 0xd4 */ 0x4f, 0x4f,
+ /* 0xd6 */ 0x50, 0x50,
+ /* 0xd8 */ 0x51, 0x51,
+ /* 0xda */ 0x52, 0x52,
+ /* 0xdc */ 0x53, 0x53,
+ /* 0xde */ 0x54, 0x54,
+ /* 0xe0 */ 0x55, 0x55,
+ /* 0xe2 */ 0x56, 0x56,
+ /* 0xe4 */ 0x57, 0x57,
+ /* 0xe6 */ 0x59, 0x59,
+ /* 0xe8 */ 0x6a, 0x6a,
+ /* 0xea */ 0x6b, 0x6b,
+ /* 0xec */ 0x71, 0x71,
+ /* 0xee */ 0x76, 0x76,
+ /* 0xf0 */ 0x77, 0x77,
+ /* 0xf2 */ 0x78, 0x78,
+ /* 0xf4 */ 0x79, 0x79,
+ /* 0xf6 */ 0x7a, 0x7a,
+ /* 0xf8 */ 0x26,
+ /* 0xf9 */ 0x2a,
+ /* 0xfa */ 0x2c,
+ /* 0xfb */ 0x3b,
+ /* 0xfc */ 0x58,
+ /* 0xfd */ 0x5a,
+};
+
+uint8_t rht_bit24_17[256] = {
+ /* 0x00 */ 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, 0x21,
+ /* 0x10 */ 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, 0x21,
+ /* 0x20 */ 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22,
+ /* 0x30 */ 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22,
+ /* 0x40 */ 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28,
+ /* 0x50 */ 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28,
+ /* 0x60 */ 0x29, 0x29, 0x29, 0x29, 0x29, 0x29, 0x29, 0x29, 0x29, 0x29, 0x29, 0x29, 0x29, 0x29, 0x29, 0x29,
+ /* 0x70 */ 0x29, 0x29, 0x29, 0x29, 0x29, 0x29, 0x29, 0x29, 0x29, 0x29, 0x29, 0x29, 0x29, 0x29, 0x29, 0x29,
+ /* 0x80 */ 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f,
+ /* 0x90 */ 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f,
+ /* 0xa0 */ 0x27, 0x27, 0x27, 0x27, 0x27, 0x27, 0x27, 0x27, 0x27, 0x27, 0x27, 0x27, 0x27, 0x27, 0x27, 0x27,
+ /* 0xb0 */ 0x2b, 0x2b, 0x2b, 0x2b, 0x2b, 0x2b, 0x2b, 0x2b, 0x2b, 0x2b, 0x2b, 0x2b, 0x2b, 0x2b, 0x2b, 0x2b,
+ /* 0xc0 */ 0x7c, 0x7c, 0x7c, 0x7c, 0x7c, 0x7c, 0x7c, 0x7c, 0x7c, 0x7c, 0x7c, 0x7c, 0x7c, 0x7c, 0x7c, 0x7c,
+ /* 0xd0 */ 0x23, 0x23, 0x23, 0x23, 0x23, 0x23, 0x23, 0x23,
+ /* 0xd8 */ 0x3e, 0x3e, 0x3e, 0x3e, 0x3e, 0x3e, 0x3e, 0x3e,
+ /* 0xe0 */ 0x00, 0x00, 0x00, 0x00,
+ /* 0xe4 */ 0x24, 0x24, 0x24, 0x24,
+ /* 0xe8 */ 0x40, 0x40, 0x40, 0x40,
+ /* 0xec */ 0x5b, 0x5b, 0x5b, 0x5b,
+ /* 0xf0 */ 0x5d, 0x5d, 0x5d, 0x5d,
+ /* 0xf4 */ 0x7e, 0x7e, 0x7e, 0x7e,
+ /* 0xf8 */ 0x5e, 0x5e,
+ /* 0xfa */ 0x7d, 0x7d,
+ /* 0xfc */ 0x3c,
+ /* 0xfd */ 0x60,
+ /* 0xfe */ 0x7b,
+};
+
+uint8_t rht_bit15_8[256] = {
+ /* 0x00 */ 0xb0, 0xb0, 0xb0, 0xb0, 0xb0, 0xb0, 0xb0, 0xb0,
+ /* 0x08 */ 0xb1, 0xb1, 0xb1, 0xb1, 0xb1, 0xb1, 0xb1, 0xb1,
+ /* 0x10 */ 0xb3, 0xb3, 0xb3, 0xb3, 0xb3, 0xb3, 0xb3, 0xb3,
+ /* 0x18 */ 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1,
+ /* 0x20 */ 0xd8, 0xd8, 0xd8, 0xd8, 0xd8, 0xd8, 0xd8, 0xd8,
+ /* 0x28 */ 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9,
+ /* 0x30 */ 0xe3, 0xe3, 0xe3, 0xe3, 0xe3, 0xe3, 0xe3, 0xe3,
+ /* 0x38 */ 0xe5, 0xe5, 0xe5, 0xe5, 0xe5, 0xe5, 0xe5, 0xe5,
+ /* 0x40 */ 0xe6, 0xe6, 0xe6, 0xe6, 0xe6, 0xe6, 0xe6, 0xe6,
+ /* 0x48 */ 0x81, 0x81, 0x81, 0x81,
+ /* 0x4c */ 0x84, 0x84, 0x84, 0x84,
+ /* 0x50 */ 0x85, 0x85, 0x85, 0x85,
+ /* 0x54 */ 0x86, 0x86, 0x86, 0x86,
+ /* 0x58 */ 0x88, 0x88, 0x88, 0x88,
+ /* 0x5c */ 0x92, 0x92, 0x92, 0x92,
+ /* 0x60 */ 0x9a, 0x9a, 0x9a, 0x9a,
+ /* 0x64 */ 0x9c, 0x9c, 0x9c, 0x9c,
+ /* 0x68 */ 0xa0, 0xa0, 0xa0, 0xa0,
+ /* 0x6c */ 0xa3, 0xa3, 0xa3, 0xa3,
+ /* 0x70 */ 0xa4, 0xa4, 0xa4, 0xa4,
+ /* 0x74 */ 0xa9, 0xa9, 0xa9, 0xa9,
+ /* 0x78 */ 0xaa, 0xaa, 0xaa, 0xaa,
+ /* 0x7c */ 0xad, 0xad, 0xad, 0xad,
+ /* 0x80 */ 0xb2, 0xb2, 0xb2, 0xb2,
+ /* 0x84 */ 0xb5, 0xb5, 0xb5, 0xb5,
+ /* 0x88 */ 0xb9, 0xb9, 0xb9, 0xb9,
+ /* 0x8c */ 0xba, 0xba, 0xba, 0xba,
+ /* 0x90 */ 0xbb, 0xbb, 0xbb, 0xbb,
+ /* 0x94 */ 0xbd, 0xbd, 0xbd, 0xbd,
+ /* 0x98 */ 0xbe, 0xbe, 0xbe, 0xbe,
+ /* 0x9c */ 0xc4, 0xc4, 0xc4, 0xc4,
+ /* 0xa0 */ 0xc6, 0xc6, 0xc6, 0xc6,
+ /* 0xa4 */ 0xe4, 0xe4, 0xe4, 0xe4,
+ /* 0xa8 */ 0xe8, 0xe8, 0xe8, 0xe8,
+ /* 0xac */ 0xe9, 0xe9, 0xe9, 0xe9,
+ /* 0xb0 */ 0x01, 0x01,
+ /* 0xb2 */ 0x87, 0x87,
+ /* 0xb4 */ 0x89, 0x89,
+ /* 0xb6 */ 0x8a, 0x8a,
+ /* 0xb8 */ 0x8b, 0x8b,
+ /* 0xba */ 0x8c, 0x8c,
+ /* 0xbc */ 0x8d, 0x8d,
+ /* 0xbe */ 0x8f, 0x8f,
+ /* 0xc0 */ 0x93, 0x93,
+ /* 0xc2 */ 0x95, 0x95,
+ /* 0xc4 */ 0x96, 0x96,
+ /* 0xc6 */ 0x97, 0x97,
+ /* 0xc8 */ 0x98, 0x98,
+ /* 0xca */ 0x9b, 0x9b,
+ /* 0xcc */ 0x9d, 0x9d,
+ /* 0xce */ 0x9e, 0x9e,
+ /* 0xd0 */ 0xa5, 0xa5,
+ /* 0xd2 */ 0xa6, 0xa6,
+ /* 0xd4 */ 0xa8, 0xa8,
+ /* 0xd6 */ 0xae, 0xae,
+ /* 0xd8 */ 0xaf, 0xaf,
+ /* 0xda */ 0xb4, 0xb4,
+ /* 0xdc */ 0xb6, 0xb6,
+ /* 0xde */ 0xb7, 0xb7,
+ /* 0xe0 */ 0xbc, 0xbc,
+ /* 0xe2 */ 0xbf, 0xbf,
+ /* 0xe4 */ 0xc5, 0xc5,
+ /* 0xe6 */ 0xe7, 0xe7,
+ /* 0xe8 */ 0xef, 0xef,
+ /* 0xea */ 0x09,
+ /* 0xeb */ 0x8e,
+ /* 0xec */ 0x90,
+ /* 0xed */ 0x91,
+ /* 0xee */ 0x94,
+ /* 0xef */ 0x9f,
+ /* 0xf0 */ 0xab,
+ /* 0xf1 */ 0xce,
+ /* 0xf2 */ 0xd7,
+ /* 0xf3 */ 0xe1,
+ /* 0xf4 */ 0xec,
+ /* 0xf5 */ 0xed,
+};
+
+/* below two non-overlapping tables are merged in order to save on L1D:
+ * - bits 15-11 for values 0x00-0x1f
+ * - bits 11-4 for values 0x60-0xff
+ * Note that there's no data between 0x20 and 0x5f, the caller must
+ * adjust its offsets by subtracting 0x40 for values 0x60 and above.
+ */
+uint8_t rht_bit15_11_11_4[192] = {
+ /* part used for bits 15-11 (0x00-0x1f) */
+ /* 0x00 */ 0x5c, 0x5c, 0x5c, 0x5c,
+ /* 0x04 */ 0xc3, 0xc3, 0xc3, 0xc3,
+ /* 0x08 */ 0xd0, 0xd0, 0xd0, 0xd0,
+ /* 0x0c */ 0x80, 0x80,
+ /* 0x0e */ 0x82, 0x82,
+ /* 0x10 */ 0x83, 0x83,
+ /* 0x12 */ 0xa2, 0xa2,
+ /* 0x14 */ 0xb8, 0xb8,
+ /* 0x16 */ 0xc2, 0xc2,
+ /* 0x18 */ 0xe0, 0xe0,
+ /* 0x1a */ 0xe2, 0xe2,
+ /* 0x1c */ 0x99,
+ /* 0x1d */ 0xa1,
+ /* 0x1e */ 0xa7,
+ /* 0x1f */ 0xac,
+
+ /* part used for bits 11-4 for 0xf600 (0x60-0xff), starting @0x20 */
+ /* 0x60 */ 0xc7, 0xc7, 0xc7, 0xc7, 0xc7, 0xc7, 0xc7, 0xc7,
+ /* 0x68 */ 0xcf, 0xcf, 0xcf, 0xcf, 0xcf, 0xcf, 0xcf, 0xcf,
+ /* 0x70 */ 0xea, 0xea, 0xea, 0xea, 0xea, 0xea, 0xea, 0xea,
+ /* 0x78 */ 0xeb, 0xeb, 0xeb, 0xeb, 0xeb, 0xeb, 0xeb, 0xeb,
+ /* 0x80 */ 0xc0, 0xc0, 0xc0, 0xc0,
+ /* 0x84 */ 0xc1, 0xc1, 0xc1, 0xc1,
+ /* 0x88 */ 0xc8, 0xc8, 0xc8, 0xc8,
+ /* 0x8c */ 0xc9, 0xc9, 0xc9, 0xc9,
+ /* 0x90 */ 0xca, 0xca, 0xca, 0xca,
+ /* 0x94 */ 0xcd, 0xcd, 0xcd, 0xcd,
+ /* 0x98 */ 0xd2, 0xd2, 0xd2, 0xd2,
+ /* 0x9c */ 0xd5, 0xd5, 0xd5, 0xd5,
+ /* 0xa0 */ 0xda, 0xda, 0xda, 0xda,
+ /* 0xa4 */ 0xdb, 0xdb, 0xdb, 0xdb,
+ /* 0xa8 */ 0xee, 0xee, 0xee, 0xee,
+ /* 0xac */ 0xf0, 0xf0, 0xf0, 0xf0,
+ /* 0xb0 */ 0xf2, 0xf2, 0xf2, 0xf2,
+ /* 0xb4 */ 0xf3, 0xf3, 0xf3, 0xf3,
+ /* 0xb8 */ 0xff, 0xff, 0xff, 0xff,
+ /* 0xbc */ 0xcb, 0xcb,
+ /* 0xbe */ 0xcc, 0xcc,
+ /* 0xc0 */ 0xd3, 0xd3,
+ /* 0xc2 */ 0xd4, 0xd4,
+ /* 0xc4 */ 0xd6, 0xd6,
+ /* 0xc6 */ 0xdd, 0xdd,
+ /* 0xc8 */ 0xde, 0xde,
+ /* 0xca */ 0xdf, 0xdf,
+ /* 0xcc */ 0xf1, 0xf1,
+ /* 0xce */ 0xf4, 0xf4,
+ /* 0xd0 */ 0xf5, 0xf5,
+ /* 0xd2 */ 0xf6, 0xf6,
+ /* 0xd4 */ 0xf7, 0xf7,
+ /* 0xd6 */ 0xf8, 0xf8,
+ /* 0xd8 */ 0xfa, 0xfa,
+ /* 0xda */ 0xfb, 0xfb,
+ /* 0xdc */ 0xfc, 0xfc,
+ /* 0xde */ 0xfd, 0xfd,
+ /* 0xe0 */ 0xfe, 0xfe,
+ /* 0xe2 */ 0x02,
+ /* 0xe3 */ 0x03,
+ /* 0xe4 */ 0x04,
+ /* 0xe5 */ 0x05,
+ /* 0xe6 */ 0x06,
+ /* 0xe7 */ 0x07,
+ /* 0xe8 */ 0x08,
+ /* 0xe9 */ 0x0b,
+ /* 0xea */ 0x0c,
+ /* 0xeb */ 0x0e,
+ /* 0xec */ 0x0f,
+ /* 0xed */ 0x10,
+ /* 0xee */ 0x11,
+ /* 0xef */ 0x12,
+ /* 0xf0 */ 0x13,
+ /* 0xf1 */ 0x14,
+ /* 0xf2 */ 0x15,
+ /* 0xf3 */ 0x17,
+ /* 0xf4 */ 0x18,
+ /* 0xf5 */ 0x19,
+ /* 0xf6 */ 0x1a,
+ /* 0xf7 */ 0x1b,
+ /* 0xf8 */ 0x1c,
+ /* 0xf9 */ 0x1d,
+ /* 0xfa */ 0x1e,
+ /* 0xfb */ 0x1f,
+ /* 0xfc */ 0x7f,
+ /* 0xfd */ 0xdc,
+ /* 0xfe */ 0xf9,
+ /* 0xff */ 0x0a,
+ /* Note, for [0xff], l==30 and bits 2..3 give 00:0x0a, 01:0x0d, 10:0x16, 11:EOS */
+};
+
+/* huffman-encode string <s> into the huff_tmp buffer and returns the amount
+ * of output bytes. The caller must ensure the output is large enough (ie at
+ * least 4 times as long as s).
+ *
+ * FIXME: bits are only counted for now, no code is emitted!
+ */
+int huff_enc(const char *s, char *out)
+{
+ int bits = 0;
+
+ while (*s) {
+ bits += ht[(uint8_t)*s].b;
+ s++;
+ }
+ bits += 7;
+
+ /* FIXME: huffman code is not emitted yet. */
+ //memset(out, 'H', bits / 8);
+ return bits / 8;
+}
+
+/* pass a huffman string, it will decode it and return the new output size or
+ * -1 in case of error.
+ *
+ * The principle of the decoder is to lookup full bytes in reverse-huffman
+ * tables. Since we may need up to 30 bits and the word positions are not
+ * always multiples of 8, we build the code word by shifting the "current"
+ * 32-bit word and the "next" one of the appropriate amount of bits. Once
+ * the shift goes beyond 32, words are swapped and the "next" one is refilled
+ * with new bytes. Shift operations are cheap when done a single time like this.
+ * On 64-bit platforms it is possible to further improve this by storing both
+ * of them in a single word.
+ */
+int huff_dec(const uint8_t *huff, int hlen, char *out, int olen)
+{
+ char *out_start = out;
+ char *out_end = out + olen;
+ const uint8_t *huff_end = huff + hlen;
+ uint32_t curr = 0;
+ uint32_t next = 0;
+ uint32_t shift;
+ uint32_t code; /* The 30-bit code being looked up, MSB-aligned */
+ uint8_t sym;
+ int bleft; /* bits left */
+ int l;
+
+ code = 0;
+ shift = 64; // start with an empty buffer
+ bleft = hlen << 3;
+ while (bleft > 0 && out != out_end) {
+ while (shift >= 32) {
+ curr = next;
+
+ /* read up to 4 bytes into next */
+ next = 0;
+
+ if (huff + 4 <= huff_end) {
+ next = read_n32(huff);
+ huff += 4;
+ }
+ else {
+ /* note: we append 0 and not 0xff so that we can
+ * distinguish shifted bits from a really inserted
+ * EOS.
+ */
+ next = (((huff + 0 < huff_end) ? (uint32_t)huff[0] : 0x00) << 24) +
+ (((huff + 1 < huff_end) ? (uint32_t)huff[1] : 0x00) << 16) +
+ (((huff + 2 < huff_end) ? (uint32_t)huff[2] : 0x00) << 8) +
+ ((huff + 3 < huff_end) ? (uint32_t)huff[3] : 0x00);
+ huff = huff_end;
+ }
+
+ shift -= 32;
+ }
+
+ /* curr:next contain 64 bit of huffman code */
+ code = curr;
+ if (shift)
+ code = (code << shift) + (next >> (32 - shift));
+
+ /* now we necessarily have 32 bits available */
+ if (code < 0xfe000000) {
+ /* single byte */
+ sym = code >> 24;
+ l = sym < 0xb8 ?
+ sym < 0x50 ? 5 : 6 :
+ sym < 0xf8 ? 7 : 8;
+ sym = rht_bit31_24[code >> 24];
+ }
+ else if (code < 0xfffe0000) {
+ /* two bytes, 0xfe + 2 bits or 0xff + 2..7 bits */
+ sym = code >> 17;
+ l = sym < 0xe0 ?
+ sym < 0xa0 ? 10 : sym < 0xd0 ? 11 : 12 :
+ sym < 0xf8 ? 13 : sym < 0xfc ? 14 : 15;
+
+ sym = rht_bit24_17[(code >> 17) & 0xff];
+ }
+ else if (code < 0xffff0000) { /* 3..5 bits */
+ /* 0xff + 0xfe + 3..5 bits or
+ * 0xff + 0xff + 5..8 bits for values till 0xf5
+ */
+ sym = (code >> 11) & 0x1f;
+ l = sym < 0x0c ? 19 : sym < 0x1c ? 20 : 21;
+ sym = rht_bit15_11_11_4[(code >> 11) & 0x1f];
+ }
+ else if (code < 0xfffff600) { /* 5..8 bits */
+ /* that's 0xff + 0xff */
+ sym = code >> 8;
+
+ l = sym < 0xb0 ?
+ sym < 0x48 ? 21 : 22 :
+ sym < 0xea ? 23 : 24;
+ sym = rht_bit15_8[(code >> 8) & 0xff];
+ }
+ else {
+ /* 0xff 0xff 0xf6..0xff */
+ sym = code >> 4; /* sym = 0x60..0xff */
+ l = sym < 0xbc ?
+ sym < 0x80 ? 25 : 26 :
+ sym < 0xe2 ? 27 : sym < 0xff ? 28 : 30;
+ if (sym < 0xff)
+ sym = rht_bit15_11_11_4[((code >> 4) & 0xff) - 0x40L];
+ else if ((code & 0xff) == 0xf0)
+ sym = 10;
+ else if ((code & 0xff) == 0xf4)
+ sym = 13;
+ else if ((code & 0xff) == 0xf8)
+ sym = 22;
+ else { // 0xfc : EOS
+ break;
+ }
+ }
+
+ if (!l || bleft - l < 0)
+ break;
+
+ bleft -= l;
+ shift += l;
+ *out++ = sym;
+ }
+
+ if (bleft > 0) {
+ /* some bits were not consumed after the last code, they must
+ * match EOS (ie: all ones) and there must be 7 bits or less.
+ * (7541#5.2).
+ */
+ if (bleft > 7)
+ return -1;
+
+ if ((code & -(1 << (32 - bleft))) != (uint32_t)-(1 << (32 - bleft)))
+ return -1;
+ }
+
+ if (out < out_end)
+ *out = 0; // end of string whenever possible
+ return out - out_start;
+}
diff --git a/src/hpack-tbl.c b/src/hpack-tbl.c
new file mode 100644
index 0000000..990d2f7
--- /dev/null
+++ b/src/hpack-tbl.c
@@ -0,0 +1,372 @@
+/*
+ * HPACK header table management (RFC7541)
+ *
+ * Copyright (C) 2014-2017 Willy Tarreau <willy@haproxy.org>
+ * Copyright (C) 2017 HAProxy Technologies
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <inttypes.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <import/ist.h>
+#include <haproxy/hpack-huff.h>
+#include <haproxy/hpack-tbl.h>
+
+/* static header table as in RFC7541 Appendix A. [0] unused. */
+const struct http_hdr hpack_sht[HPACK_SHT_SIZE] = {
+ [ 1] = { .n = IST(":authority"), .v = IST("") },
+ [ 2] = { .n = IST(":method"), .v = IST("GET") },
+ [ 3] = { .n = IST(":method"), .v = IST("POST") },
+ [ 4] = { .n = IST(":path"), .v = IST("/") },
+ [ 5] = { .n = IST(":path"), .v = IST("/index.html") },
+ [ 6] = { .n = IST(":scheme"), .v = IST("http") },
+ [ 7] = { .n = IST(":scheme"), .v = IST("https") },
+ [ 8] = { .n = IST(":status"), .v = IST("200") },
+ [ 9] = { .n = IST(":status"), .v = IST("204") },
+ [10] = { .n = IST(":status"), .v = IST("206") },
+ [11] = { .n = IST(":status"), .v = IST("304") },
+ [12] = { .n = IST(":status"), .v = IST("400") },
+ [13] = { .n = IST(":status"), .v = IST("404") },
+ [14] = { .n = IST(":status"), .v = IST("500") },
+ [15] = { .n = IST("accept-charset"), .v = IST("") },
+ [16] = { .n = IST("accept-encoding"), .v = IST("gzip, deflate") },
+ [17] = { .n = IST("accept-language"), .v = IST("") },
+ [18] = { .n = IST("accept-ranges"), .v = IST("") },
+ [19] = { .n = IST("accept"), .v = IST("") },
+ [20] = { .n = IST("access-control-allow-origin"), .v = IST("") },
+ [21] = { .n = IST("age"), .v = IST("") },
+ [22] = { .n = IST("allow"), .v = IST("") },
+ [23] = { .n = IST("authorization"), .v = IST("") },
+ [24] = { .n = IST("cache-control"), .v = IST("") },
+ [25] = { .n = IST("content-disposition"), .v = IST("") },
+ [26] = { .n = IST("content-encoding"), .v = IST("") },
+ [27] = { .n = IST("content-language"), .v = IST("") },
+ [28] = { .n = IST("content-length"), .v = IST("") },
+ [29] = { .n = IST("content-location"), .v = IST("") },
+ [30] = { .n = IST("content-range"), .v = IST("") },
+ [31] = { .n = IST("content-type") , .v = IST("") },
+ [32] = { .n = IST("cookie"), .v = IST("") },
+ [33] = { .n = IST("date"), .v = IST("") },
+ [34] = { .n = IST("etag"), .v = IST("") },
+ [35] = { .n = IST("expect"), .v = IST("") },
+ [36] = { .n = IST("expires"), .v = IST("") },
+ [37] = { .n = IST("from"), .v = IST("") },
+ [38] = { .n = IST("host"), .v = IST("") },
+ [39] = { .n = IST("if-match"), .v = IST("") },
+ [40] = { .n = IST("if-modified-since"), .v = IST("") },
+ [41] = { .n = IST("if-none-match"), .v = IST("") },
+ [42] = { .n = IST("if-range"), .v = IST("") },
+ [43] = { .n = IST("if-unmodified-since"), .v = IST("") },
+ [44] = { .n = IST("last-modified"), .v = IST("") },
+ [45] = { .n = IST("link"), .v = IST("") },
+ [46] = { .n = IST("location"), .v = IST("") },
+ [47] = { .n = IST("max-forwards"), .v = IST("") },
+ [48] = { .n = IST("proxy-authenticate"), .v = IST("") },
+ [49] = { .n = IST("proxy-authorization"), .v = IST("") },
+ [50] = { .n = IST("range"), .v = IST("") },
+ [51] = { .n = IST("referer"), .v = IST("") },
+ [52] = { .n = IST("refresh"), .v = IST("") },
+ [53] = { .n = IST("retry-after"), .v = IST("") },
+ [54] = { .n = IST("server"), .v = IST("") },
+ [55] = { .n = IST("set-cookie"), .v = IST("") },
+ [56] = { .n = IST("strict-transport-security"), .v = IST("") },
+ [57] = { .n = IST("transfer-encoding"), .v = IST("") },
+ [58] = { .n = IST("user-agent"), .v = IST("") },
+ [59] = { .n = IST("vary"), .v = IST("") },
+ [60] = { .n = IST("via"), .v = IST("") },
+ [61] = { .n = IST("www-authenticate"), .v = IST("") },
+};
+
+struct pool_head *pool_head_hpack_tbl __read_mostly = NULL;
+
+#ifdef DEBUG_HPACK
+/* dump the whole dynamic header table */
+void hpack_dht_dump(FILE *out, const struct hpack_dht *dht)
+{
+ unsigned int i;
+ unsigned int slot;
+ char name[4096], value[4096];
+
+ for (i = HPACK_SHT_SIZE; i < HPACK_SHT_SIZE + dht->used; i++) {
+ slot = (hpack_get_dte(dht, i - HPACK_SHT_SIZE + 1) - dht->dte);
+ fprintf(out, "idx=%u slot=%u name=<%s> value=<%s> addr=%u-%u\n",
+ i, slot,
+ istpad(name, hpack_idx_to_name(dht, i)).ptr,
+ istpad(value, hpack_idx_to_value(dht, i)).ptr,
+ dht->dte[slot].addr, dht->dte[slot].addr+dht->dte[slot].nlen+dht->dte[slot].vlen-1);
+ }
+}
+
+/* check for the whole dynamic header table consistency, abort on failures */
+void hpack_dht_check_consistency(const struct hpack_dht *dht)
+{
+ unsigned slot = hpack_dht_get_tail(dht);
+ unsigned used2 = dht->used;
+ unsigned total = 0;
+
+ if (!dht->used)
+ return;
+
+ if (dht->front >= dht->wrap)
+ abort();
+
+ if (dht->used > dht->wrap)
+ abort();
+
+ if (dht->head >= dht->wrap)
+ abort();
+
+ while (used2--) {
+ total += dht->dte[slot].nlen + dht->dte[slot].vlen;
+ slot++;
+ if (slot >= dht->wrap)
+ slot = 0;
+ }
+
+ if (total != dht->total) {
+ fprintf(stderr, "%d: total=%u dht=%u\n", __LINE__, total, dht->total);
+ abort();
+ }
+}
+#endif // DEBUG_HPACK
+
+/* rebuild a new dynamic header table from <dht> with an unwrapped index and
+ * contents at the end. The new table is returned, the caller must not use the
+ * previous one anymore. NULL may be returned if no table could be allocated.
+ */
+static struct hpack_dht *hpack_dht_defrag(struct hpack_dht *dht)
+{
+ struct hpack_dht *alt_dht;
+ uint16_t old, new;
+ uint32_t addr;
+
+ /* Note: for small tables we could use alloca() instead but
+ * portability especially for large tables can be problematic.
+ */
+ alt_dht = hpack_dht_alloc();
+ if (!alt_dht)
+ return NULL;
+
+ alt_dht->total = dht->total;
+ alt_dht->used = dht->used;
+ alt_dht->wrap = dht->used;
+
+ new = 0;
+ addr = alt_dht->size;
+
+ if (dht->used) {
+ /* start from the tail */
+ old = hpack_dht_get_tail(dht);
+ do {
+ alt_dht->dte[new].nlen = dht->dte[old].nlen;
+ alt_dht->dte[new].vlen = dht->dte[old].vlen;
+ addr -= dht->dte[old].nlen + dht->dte[old].vlen;
+ alt_dht->dte[new].addr = addr;
+
+ memcpy((void *)alt_dht + alt_dht->dte[new].addr,
+ (void *)dht + dht->dte[old].addr,
+ dht->dte[old].nlen + dht->dte[old].vlen);
+
+ old++;
+ if (old >= dht->wrap)
+ old = 0;
+ new++;
+ } while (new < dht->used);
+ }
+
+ alt_dht->front = alt_dht->head = new - 1;
+
+ memcpy(dht, alt_dht, dht->size);
+ hpack_dht_free(alt_dht);
+
+ return dht;
+}
+
+/* Purges table dht until a header field of <needed> bytes fits according to
+ * the protocol (adding 32 bytes overhead). Returns non-zero on success, zero
+ * on failure (ie: table empty but still not sufficient). It must only be
+ * called when the table is not large enough to suit the new entry and there
+ * are some entries left. In case of doubt, use dht_make_room() instead.
+ */
+int __hpack_dht_make_room(struct hpack_dht *dht, unsigned int needed)
+{
+ unsigned int used = dht->used;
+ unsigned int wrap = dht->wrap;
+ unsigned int tail;
+
+ do {
+ tail = ((dht->head + 1U < used) ? wrap : 0) + dht->head + 1U - used;
+ dht->total -= dht->dte[tail].nlen + dht->dte[tail].vlen;
+ if (tail == dht->front)
+ dht->front = dht->head;
+ used--;
+ } while (used && used * 32 + dht->total + needed + 32 > dht->size);
+
+ dht->used = used;
+
+ /* realign if empty */
+ if (!used)
+ dht->front = dht->head = 0;
+
+ /* pack the table if it doesn't wrap anymore */
+ if (dht->head + 1U >= used)
+ dht->wrap = dht->head + 1;
+
+ /* no need to check for 'used' here as if it doesn't fit, used==0 */
+ return needed + 32 <= dht->size;
+}
+
+/* tries to insert a new header <name>:<value> in front of the current head. A
+ * negative value is returned on error.
+ */
+int hpack_dht_insert(struct hpack_dht *dht, struct ist name, struct ist value)
+{
+ unsigned int used;
+ unsigned int head;
+ unsigned int prev;
+ unsigned int wrap;
+ unsigned int tail;
+ uint32_t headroom, tailroom;
+
+ if (!hpack_dht_make_room(dht, name.len + value.len))
+ return 0;
+
+ /* Now there is enough room in the table, that's guaranteed by the
+ * protocol, but not necessarily where we need it.
+ */
+
+ used = dht->used;
+ if (!used) {
+ /* easy, the table was empty */
+ dht->front = dht->head = 0;
+ dht->wrap = dht->used = 1;
+ dht->total = 0;
+ head = 0;
+ dht->dte[head].addr = dht->size - (name.len + value.len);
+ goto copy;
+ }
+
+ /* compute the new head, used and wrap position */
+ prev = head = dht->head;
+ wrap = dht->wrap;
+ tail = hpack_dht_get_tail(dht);
+
+ used++;
+ head++;
+
+ if (head >= wrap) {
+ /* head is leading the entries, we either need to push the
+ * table further or to loop back to released entries. We could
+ * force to loop back when at least half of the allocatable
+ * entries are free but in practice it never happens.
+ */
+ if ((sizeof(*dht) + (wrap + 1) * sizeof(dht->dte[0]) <= dht->dte[dht->front].addr))
+ wrap++;
+ else if (head >= used) /* there's a hole at the beginning */
+ head = 0;
+ else {
+ /* no more room, head hits tail and the index cannot be
+ * extended, we have to realign the whole table.
+ */
+ if (!hpack_dht_defrag(dht))
+ return -1;
+
+ wrap = dht->wrap + 1;
+ head = dht->head + 1;
+ prev = head - 1;
+ tail = 0;
+ }
+ }
+ else if (used >= wrap) {
+ /* we've hit the tail, we need to reorganize the index so that
+ * the head is at the end (but not necessarily move the data).
+ */
+ if (!hpack_dht_defrag(dht))
+ return -1;
+
+ wrap = dht->wrap + 1;
+ head = dht->head + 1;
+ prev = head - 1;
+ tail = 0;
+ }
+
+ /* Now we have updated head, used and wrap, we know that there is some
+ * available room at least from the protocol's perspective. This space
+ * is split in two areas :
+ *
+ * 1: if the previous head was the front cell, the space between the
+ * end of the index table and the front cell's address.
+ * 2: if the previous head was the front cell, the space between the
+ * end of the tail and the end of the table ; or if the previous
+ * head was not the front cell, the space between the end of the
+ * tail and the head's address.
+ */
+ if (prev == dht->front) {
+ /* the area was contiguous */
+ headroom = dht->dte[dht->front].addr - (sizeof(*dht) + wrap * sizeof(dht->dte[0]));
+ tailroom = dht->size - dht->dte[tail].addr - dht->dte[tail].nlen - dht->dte[tail].vlen;
+ }
+ else {
+ /* it's already wrapped so we can't store anything in the headroom */
+ headroom = 0;
+ tailroom = dht->dte[prev].addr - dht->dte[tail].addr - dht->dte[tail].nlen - dht->dte[tail].vlen;
+ }
+
+ /* We can decide to stop filling the headroom as soon as there's enough
+ * room left in the tail to suit the protocol, but tests show that in
+ * practice it almost never happens in other situations so the extra
+ * test is useless and we simply fill the headroom as long as it's
+ * available and we don't wrap.
+ */
+ if (prev == dht->front && headroom >= name.len + value.len) {
+ /* install upfront and update ->front */
+ dht->dte[head].addr = dht->dte[dht->front].addr - (name.len + value.len);
+ dht->front = head;
+ }
+ else if (tailroom >= name.len + value.len) {
+ dht->dte[head].addr = dht->dte[tail].addr + dht->dte[tail].nlen + dht->dte[tail].vlen + tailroom - (name.len + value.len);
+ }
+ else {
+ /* need to defragment the table before inserting upfront */
+ dht = hpack_dht_defrag(dht);
+ wrap = dht->wrap + 1;
+ head = dht->head + 1;
+ dht->dte[head].addr = dht->dte[dht->front].addr - (name.len + value.len);
+ dht->front = head;
+ }
+
+ dht->wrap = wrap;
+ dht->head = head;
+ dht->used = used;
+
+ copy:
+ dht->total += name.len + value.len;
+ dht->dte[head].nlen = name.len;
+ dht->dte[head].vlen = value.len;
+
+ memcpy((void *)dht + dht->dte[head].addr, name.ptr, name.len);
+ memcpy((void *)dht + dht->dte[head].addr + name.len, value.ptr, value.len);
+ return 0;
+}
diff --git a/src/hq_interop.c b/src/hq_interop.c
new file mode 100644
index 0000000..31c2101
--- /dev/null
+++ b/src/hq_interop.c
@@ -0,0 +1,174 @@
+#include <haproxy/hq_interop.h>
+
+#include <import/ist.h>
+#include <haproxy/buf.h>
+#include <haproxy/connection.h>
+#include <haproxy/dynbuf.h>
+#include <haproxy/htx.h>
+#include <haproxy/http.h>
+#include <haproxy/mux_quic.h>
+#include <haproxy/qmux_http.h>
+
+static ssize_t hq_interop_decode_qcs(struct qcs *qcs, struct buffer *b, int fin)
+{
+ struct htx *htx;
+ struct htx_sl *sl;
+ struct buffer htx_buf = BUF_NULL;
+ struct ist path;
+ char *ptr = b_head(b);
+ size_t data = b_data(b);
+
+ /* hq-interop parser does not support buffer wrapping. */
+ BUG_ON(b_data(b) != b_contig_data(b, 0));
+
+ /* hq-interop parser is only done once full message is received. */
+ if (!fin)
+ return 0;
+
+ b_alloc(&htx_buf);
+ htx = htx_from_buf(&htx_buf);
+
+ /* skip method */
+ while (data && HTTP_IS_TOKEN(*ptr)) {
+ ptr++;
+ data--;
+ }
+
+ if (!data || !HTTP_IS_SPHT(*ptr)) {
+ fprintf(stderr, "truncated stream\n");
+ return -1;
+ }
+
+ ptr++;
+ if (!--data) {
+ fprintf(stderr, "truncated stream\n");
+ return -1;
+ }
+
+ if (HTTP_IS_LWS(*ptr)) {
+ fprintf(stderr, "malformed stream\n");
+ return -1;
+ }
+
+ /* extract path */
+ path.ptr = ptr;
+ while (data && !HTTP_IS_LWS(*ptr)) {
+ ptr++;
+ data--;
+ }
+
+ if (!data) {
+ fprintf(stderr, "truncated stream\n");
+ return -1;
+ }
+
+ path.len = ptr - path.ptr;
+
+ sl = htx_add_stline(htx, HTX_BLK_REQ_SL, 0, ist("GET"), path, ist("HTTP/1.0"));
+ if (!sl)
+ return -1;
+
+ sl->flags |= HTX_SL_F_BODYLESS;
+ sl->info.req.meth = find_http_meth("GET", 3);
+
+ htx_add_endof(htx, HTX_BLK_EOH);
+ htx->flags |= HTX_FL_EOM;
+ htx_to_buf(htx, &htx_buf);
+
+ if (!qcs_attach_sc(qcs, &htx_buf, fin))
+ return -1;
+
+ b_free(&htx_buf);
+
+ return b_data(b);
+}
+
+static struct buffer *mux_get_buf(struct qcs *qcs)
+{
+ if (!b_size(&qcs->tx.buf))
+ b_alloc(&qcs->tx.buf);
+
+ return &qcs->tx.buf;
+}
+
+static size_t hq_interop_snd_buf(struct qcs *qcs, struct buffer *buf,
+ size_t count)
+{
+ enum htx_blk_type btype;
+ struct htx *htx;
+ struct htx_blk *blk;
+ int32_t idx;
+ uint32_t bsize, fsize;
+ struct buffer *res, outbuf;
+ size_t total = 0;
+
+ res = mux_get_buf(qcs);
+ outbuf = b_make(b_tail(res), b_contig_space(res), 0, 0);
+
+ htx = htx_from_buf(buf);
+
+ if (htx->extra && htx->extra == HTX_UNKOWN_PAYLOAD_LENGTH)
+ qcs->flags |= QC_SF_UNKNOWN_PL_LENGTH;
+
+ while (count && !htx_is_empty(htx) && !(qcs->flags & QC_SF_BLK_MROOM)) {
+ /* Not implemented : QUIC on backend side */
+ idx = htx_get_head(htx);
+ blk = htx_get_blk(htx, idx);
+ btype = htx_get_blk_type(blk);
+ fsize = bsize = htx_get_blksz(blk);
+
+ BUG_ON(btype == HTX_BLK_REQ_SL);
+
+ switch (btype) {
+ case HTX_BLK_DATA:
+ if (fsize > count)
+ fsize = count;
+
+ if (b_room(&outbuf) < fsize)
+ fsize = b_room(&outbuf);
+
+ if (!fsize) {
+ qcs->flags |= QC_SF_BLK_MROOM;
+ goto end;
+ }
+
+ b_putblk(&outbuf, htx_get_blk_ptr(htx, blk), fsize);
+ total += fsize;
+ count -= fsize;
+
+ if (fsize == bsize)
+ htx_remove_blk(htx, blk);
+ else
+ htx_cut_data_blk(htx, blk, fsize);
+ break;
+
+ /* only body is transferred on HTTP/0.9 */
+ case HTX_BLK_RES_SL:
+ case HTX_BLK_TLR:
+ case HTX_BLK_EOT:
+ default:
+ htx_remove_blk(htx, blk);
+ total += bsize;
+ count -= bsize;
+ break;
+ }
+ }
+
+ end:
+ b_add(res, b_data(&outbuf));
+ htx_to_buf(htx, buf);
+
+ return total;
+}
+
+static int hq_interop_attach(struct qcs *qcs, void *conn_ctx)
+{
+ qcs_wait_http_req(qcs);
+ return 0;
+}
+
+const struct qcc_app_ops hq_interop_ops = {
+ .decode_qcs = hq_interop_decode_qcs,
+ .snd_buf = hq_interop_snd_buf,
+ .attach = hq_interop_attach,
+};
diff --git a/src/http.c b/src/http.c
new file mode 100644
index 0000000..9599e0e
--- /dev/null
+++ b/src/http.c
@@ -0,0 +1,1433 @@
+/*
+ * HTTP semantics
+ *
+ * Copyright 2000-2018 Willy Tarreau <w@1wt.eu>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <ctype.h>
+#include <haproxy/api.h>
+#include <haproxy/http.h>
+#include <haproxy/tools.h>
+
+/* It is about twice as fast on recent architectures to lookup a byte in a
+ * table than to perform a boolean AND or OR between two tests. Refer to
+ * RFC2616/RFC5234/RFC7230 for those chars. A token is any ASCII char that is
+ * neither a separator nor a CTL char. An http ver_token is any ASCII which can
+ * be found in an HTTP version, which includes 'H', 'T', 'P', '/', '.' and any
+ * digit. Note: please do not overwrite values in assignment since gcc-2.95
+ * will not handle them correctly. It's worth noting that chars 128..255 are
+ * nothing, not even control chars.
+ */
+const unsigned char http_char_classes[256] = {
+ [ 0] = HTTP_FLG_CTL,
+ [ 1] = HTTP_FLG_CTL,
+ [ 2] = HTTP_FLG_CTL,
+ [ 3] = HTTP_FLG_CTL,
+ [ 4] = HTTP_FLG_CTL,
+ [ 5] = HTTP_FLG_CTL,
+ [ 6] = HTTP_FLG_CTL,
+ [ 7] = HTTP_FLG_CTL,
+ [ 8] = HTTP_FLG_CTL,
+ [ 9] = HTTP_FLG_SPHT | HTTP_FLG_LWS | HTTP_FLG_SEP | HTTP_FLG_CTL,
+ [ 10] = HTTP_FLG_CRLF | HTTP_FLG_LWS | HTTP_FLG_CTL,
+ [ 11] = HTTP_FLG_CTL,
+ [ 12] = HTTP_FLG_CTL,
+ [ 13] = HTTP_FLG_CRLF | HTTP_FLG_LWS | HTTP_FLG_CTL,
+ [ 14] = HTTP_FLG_CTL,
+ [ 15] = HTTP_FLG_CTL,
+ [ 16] = HTTP_FLG_CTL,
+ [ 17] = HTTP_FLG_CTL,
+ [ 18] = HTTP_FLG_CTL,
+ [ 19] = HTTP_FLG_CTL,
+ [ 20] = HTTP_FLG_CTL,
+ [ 21] = HTTP_FLG_CTL,
+ [ 22] = HTTP_FLG_CTL,
+ [ 23] = HTTP_FLG_CTL,
+ [ 24] = HTTP_FLG_CTL,
+ [ 25] = HTTP_FLG_CTL,
+ [ 26] = HTTP_FLG_CTL,
+ [ 27] = HTTP_FLG_CTL,
+ [ 28] = HTTP_FLG_CTL,
+ [ 29] = HTTP_FLG_CTL,
+ [ 30] = HTTP_FLG_CTL,
+ [ 31] = HTTP_FLG_CTL,
+ [' '] = HTTP_FLG_SPHT | HTTP_FLG_LWS | HTTP_FLG_SEP,
+ ['!'] = HTTP_FLG_TOK,
+ ['"'] = HTTP_FLG_SEP,
+ ['#'] = HTTP_FLG_TOK,
+ ['$'] = HTTP_FLG_TOK,
+ ['%'] = HTTP_FLG_TOK,
+ ['&'] = HTTP_FLG_TOK,
+ [ 39] = HTTP_FLG_TOK,
+ ['('] = HTTP_FLG_SEP,
+ [')'] = HTTP_FLG_SEP,
+ ['*'] = HTTP_FLG_TOK,
+ ['+'] = HTTP_FLG_TOK,
+ [','] = HTTP_FLG_SEP,
+ ['-'] = HTTP_FLG_TOK,
+ ['.'] = HTTP_FLG_TOK | HTTP_FLG_VER,
+ ['/'] = HTTP_FLG_SEP | HTTP_FLG_VER,
+ ['0'] = HTTP_FLG_TOK | HTTP_FLG_VER | HTTP_FLG_DIG,
+ ['1'] = HTTP_FLG_TOK | HTTP_FLG_VER | HTTP_FLG_DIG,
+ ['2'] = HTTP_FLG_TOK | HTTP_FLG_VER | HTTP_FLG_DIG,
+ ['3'] = HTTP_FLG_TOK | HTTP_FLG_VER | HTTP_FLG_DIG,
+ ['4'] = HTTP_FLG_TOK | HTTP_FLG_VER | HTTP_FLG_DIG,
+ ['5'] = HTTP_FLG_TOK | HTTP_FLG_VER | HTTP_FLG_DIG,
+ ['6'] = HTTP_FLG_TOK | HTTP_FLG_VER | HTTP_FLG_DIG,
+ ['7'] = HTTP_FLG_TOK | HTTP_FLG_VER | HTTP_FLG_DIG,
+ ['8'] = HTTP_FLG_TOK | HTTP_FLG_VER | HTTP_FLG_DIG,
+ ['9'] = HTTP_FLG_TOK | HTTP_FLG_VER | HTTP_FLG_DIG,
+ [':'] = HTTP_FLG_SEP,
+ [';'] = HTTP_FLG_SEP,
+ ['<'] = HTTP_FLG_SEP,
+ ['='] = HTTP_FLG_SEP,
+ ['>'] = HTTP_FLG_SEP,
+ ['?'] = HTTP_FLG_SEP,
+ ['@'] = HTTP_FLG_SEP,
+ ['A'] = HTTP_FLG_TOK | HTTP_FLG_VER,
+ ['B'] = HTTP_FLG_TOK | HTTP_FLG_VER,
+ ['C'] = HTTP_FLG_TOK | HTTP_FLG_VER,
+ ['D'] = HTTP_FLG_TOK | HTTP_FLG_VER,
+ ['E'] = HTTP_FLG_TOK | HTTP_FLG_VER,
+ ['F'] = HTTP_FLG_TOK | HTTP_FLG_VER,
+ ['G'] = HTTP_FLG_TOK | HTTP_FLG_VER,
+ ['H'] = HTTP_FLG_TOK | HTTP_FLG_VER,
+ ['I'] = HTTP_FLG_TOK | HTTP_FLG_VER,
+ ['J'] = HTTP_FLG_TOK | HTTP_FLG_VER,
+ ['K'] = HTTP_FLG_TOK | HTTP_FLG_VER,
+ ['L'] = HTTP_FLG_TOK | HTTP_FLG_VER,
+ ['M'] = HTTP_FLG_TOK | HTTP_FLG_VER,
+ ['N'] = HTTP_FLG_TOK | HTTP_FLG_VER,
+ ['O'] = HTTP_FLG_TOK | HTTP_FLG_VER,
+ ['P'] = HTTP_FLG_TOK | HTTP_FLG_VER,
+ ['Q'] = HTTP_FLG_TOK | HTTP_FLG_VER,
+ ['R'] = HTTP_FLG_TOK | HTTP_FLG_VER,
+ ['S'] = HTTP_FLG_TOK | HTTP_FLG_VER,
+ ['T'] = HTTP_FLG_TOK | HTTP_FLG_VER,
+ ['U'] = HTTP_FLG_TOK | HTTP_FLG_VER,
+ ['V'] = HTTP_FLG_TOK | HTTP_FLG_VER,
+ ['W'] = HTTP_FLG_TOK | HTTP_FLG_VER,
+ ['X'] = HTTP_FLG_TOK | HTTP_FLG_VER,
+ ['Y'] = HTTP_FLG_TOK | HTTP_FLG_VER,
+ ['Z'] = HTTP_FLG_TOK | HTTP_FLG_VER,
+ ['['] = HTTP_FLG_SEP,
+ [ 92] = HTTP_FLG_SEP,
+ [']'] = HTTP_FLG_SEP,
+ ['^'] = HTTP_FLG_TOK,
+ ['_'] = HTTP_FLG_TOK,
+ ['`'] = HTTP_FLG_TOK,
+ ['a'] = HTTP_FLG_TOK,
+ ['b'] = HTTP_FLG_TOK,
+ ['c'] = HTTP_FLG_TOK,
+ ['d'] = HTTP_FLG_TOK,
+ ['e'] = HTTP_FLG_TOK,
+ ['f'] = HTTP_FLG_TOK,
+ ['g'] = HTTP_FLG_TOK,
+ ['h'] = HTTP_FLG_TOK,
+ ['i'] = HTTP_FLG_TOK,
+ ['j'] = HTTP_FLG_TOK,
+ ['k'] = HTTP_FLG_TOK,
+ ['l'] = HTTP_FLG_TOK,
+ ['m'] = HTTP_FLG_TOK,
+ ['n'] = HTTP_FLG_TOK,
+ ['o'] = HTTP_FLG_TOK,
+ ['p'] = HTTP_FLG_TOK,
+ ['q'] = HTTP_FLG_TOK,
+ ['r'] = HTTP_FLG_TOK,
+ ['s'] = HTTP_FLG_TOK,
+ ['t'] = HTTP_FLG_TOK,
+ ['u'] = HTTP_FLG_TOK,
+ ['v'] = HTTP_FLG_TOK,
+ ['w'] = HTTP_FLG_TOK,
+ ['x'] = HTTP_FLG_TOK,
+ ['y'] = HTTP_FLG_TOK,
+ ['z'] = HTTP_FLG_TOK,
+ ['{'] = HTTP_FLG_SEP,
+ ['|'] = HTTP_FLG_TOK,
+ ['}'] = HTTP_FLG_SEP,
+ ['~'] = HTTP_FLG_TOK,
+ [127] = HTTP_FLG_CTL,
+};
+
+const int http_err_codes[HTTP_ERR_SIZE] = {
+ [HTTP_ERR_200] = 200, /* used by "monitor-uri" */
+ [HTTP_ERR_400] = 400,
+ [HTTP_ERR_401] = 401,
+ [HTTP_ERR_403] = 403,
+ [HTTP_ERR_404] = 404,
+ [HTTP_ERR_405] = 405,
+ [HTTP_ERR_407] = 407,
+ [HTTP_ERR_408] = 408,
+ [HTTP_ERR_410] = 410,
+ [HTTP_ERR_413] = 413,
+ [HTTP_ERR_421] = 421,
+ [HTTP_ERR_422] = 422,
+ [HTTP_ERR_425] = 425,
+ [HTTP_ERR_429] = 429,
+ [HTTP_ERR_500] = 500,
+ [HTTP_ERR_501] = 501,
+ [HTTP_ERR_502] = 502,
+ [HTTP_ERR_503] = 503,
+ [HTTP_ERR_504] = 504,
+};
+
+const char *http_err_msgs[HTTP_ERR_SIZE] = {
+ [HTTP_ERR_200] =
+ "HTTP/1.1 200 OK\r\n"
+ "Content-length: 58\r\n"
+ "Cache-Control: no-cache\r\n"
+ "Content-Type: text/html\r\n"
+ "\r\n"
+ "<html><body><h1>200 OK</h1>\nService ready.\n</body></html>\n",
+
+ [HTTP_ERR_400] =
+ "HTTP/1.1 400 Bad request\r\n"
+ "Content-length: 90\r\n"
+ "Cache-Control: no-cache\r\n"
+ "Connection: close\r\n"
+ "Content-Type: text/html\r\n"
+ "\r\n"
+ "<html><body><h1>400 Bad request</h1>\nYour browser sent an invalid request.\n</body></html>\n",
+
+ [HTTP_ERR_401] =
+ "HTTP/1.1 401 Unauthorized\r\n"
+ "Content-length: 112\r\n"
+ "Cache-Control: no-cache\r\n"
+ "Content-Type: text/html\r\n"
+ "\r\n"
+ "<html><body><h1>401 Unauthorized</h1>\nYou need a valid user and password to access this content.\n</body></html>\n",
+
+ [HTTP_ERR_403] =
+ "HTTP/1.1 403 Forbidden\r\n"
+ "Content-length: 93\r\n"
+ "Cache-Control: no-cache\r\n"
+ "Content-Type: text/html\r\n"
+ "\r\n"
+ "<html><body><h1>403 Forbidden</h1>\nRequest forbidden by administrative rules.\n</body></html>\n",
+
+ [HTTP_ERR_404] =
+ "HTTP/1.1 404 Not Found\r\n"
+ "Content-length: 83\r\n"
+ "Cache-Control: no-cache\r\n"
+ "Content-Type: text/html\r\n"
+ "\r\n"
+ "<html><body><h1>404 Not Found</h1>\nThe resource could not be found.\n</body></html>\n",
+
+ [HTTP_ERR_405] =
+ "HTTP/1.1 405 Method Not Allowed\r\n"
+ "Content-length: 146\r\n"
+ "Cache-Control: no-cache\r\n"
+ "Content-Type: text/html\r\n"
+ "\r\n"
+ "<html><body><h1>405 Method Not Allowed</h1>\nA request was made of a resource using a request method not supported by that resource\n</body></html>\n",
+
+ [HTTP_ERR_407] =
+ "HTTP/1.1 407 Unauthorized\r\n"
+ "Content-length: 112\r\n"
+ "Cache-Control: no-cache\r\n"
+ "Content-Type: text/html\r\n"
+ "\r\n"
+ "<html><body><h1>407 Unauthorized</h1>\nYou need a valid user and password to access this content.\n</body></html>\n",
+
+ [HTTP_ERR_408] =
+ "HTTP/1.1 408 Request Time-out\r\n"
+ "Content-length: 110\r\n"
+ "Cache-Control: no-cache\r\n"
+ "Connection: close\r\n"
+ "Content-Type: text/html\r\n"
+ "\r\n"
+ "<html><body><h1>408 Request Time-out</h1>\nYour browser didn't send a complete request in time.\n</body></html>\n",
+
+ [HTTP_ERR_410] =
+ "HTTP/1.1 410 Gone\r\n"
+ "Content-length: 114\r\n"
+ "Cache-Control: no-cache\r\n"
+ "Content-Type: text/html\r\n"
+ "\r\n"
+ "<html><body><h1>410 Gone</h1>\nThe resource is no longer available and will not be available again.\n</body></html>\n",
+
+ [HTTP_ERR_413] =
+ "HTTP/1.1 413 Payload Too Large\r\n"
+ "Content-length: 106\r\n"
+ "Cache-Control: no-cache\r\n"
+ "Content-Type: text/html\r\n"
+ "\r\n"
+ "<html><body><h1>413 Payload Too Large</h1>\nThe request entity exceeds the maximum allowed.\n</body></html>\n",
+
+ [HTTP_ERR_421] =
+ "HTTP/1.1 421 Misdirected Request\r\n"
+ "Content-length: 104\r\n"
+ "Cache-Control: no-cache\r\n"
+ "Content-Type: text/html\r\n"
+ "\r\n"
+ "<html><body><h1>421 Misdirected Request</h1>\nRequest sent to a non-authoritative server.\n</body></html>\n",
+
+ [HTTP_ERR_422] =
+ "HTTP/1.1 422 Unprocessable Content\r\n"
+ "Content-length: 116\r\n"
+ "Cache-Control: no-cache\r\n"
+ "Content-Type: text/html\r\n"
+ "\r\n"
+ "<html><body><h1>422 Unprocessable Content</h1>\nThe server cannot process the contained instructions.\n</body></html>\n",
+
+ [HTTP_ERR_425] =
+ "HTTP/1.1 425 Too Early\r\n"
+ "Content-length: 80\r\n"
+ "Cache-Control: no-cache\r\n"
+ "Content-Type: text/html\r\n"
+ "\r\n"
+ "<html><body><h1>425 Too Early</h1>\nYour browser sent early data.\n</body></html>\n",
+
+ [HTTP_ERR_429] =
+ "HTTP/1.1 429 Too Many Requests\r\n"
+ "Content-length: 117\r\n"
+ "Cache-Control: no-cache\r\n"
+ "Content-Type: text/html\r\n"
+ "\r\n"
+ "<html><body><h1>429 Too Many Requests</h1>\nYou have sent too many requests in a given amount of time.\n</body></html>\n",
+
+ [HTTP_ERR_500] =
+ "HTTP/1.1 500 Internal Server Error\r\n"
+ "Content-length: 97\r\n"
+ "Cache-Control: no-cache\r\n"
+ "Content-Type: text/html\r\n"
+ "\r\n"
+ "<html><body><h1>500 Internal Server Error</h1>\nAn internal server error occurred.\n</body></html>\n",
+
+ [HTTP_ERR_501] =
+ "HTTP/1.1 501 Not Implemented\r\n"
+ "Content-length: 136\r\n"
+ "Cache-Control: no-cache\r\n"
+ "Content-Type: text/html\r\n"
+ "\r\n"
+ "<html><body><h1>501 Not Implemented</h1>\n.The server does not support the functionality required to fulfill the request.\n</body></html>\n",
+
+ [HTTP_ERR_502] =
+ "HTTP/1.1 502 Bad Gateway\r\n"
+ "Content-length: 107\r\n"
+ "Cache-Control: no-cache\r\n"
+ "Content-Type: text/html\r\n"
+ "\r\n"
+ "<html><body><h1>502 Bad Gateway</h1>\nThe server returned an invalid or incomplete response.\n</body></html>\n",
+
+ [HTTP_ERR_503] =
+ "HTTP/1.1 503 Service Unavailable\r\n"
+ "Content-length: 107\r\n"
+ "Cache-Control: no-cache\r\n"
+ "Content-Type: text/html\r\n"
+ "\r\n"
+ "<html><body><h1>503 Service Unavailable</h1>\nNo server is available to handle this request.\n</body></html>\n",
+
+ [HTTP_ERR_504] =
+ "HTTP/1.1 504 Gateway Time-out\r\n"
+ "Content-length: 92\r\n"
+ "Cache-Control: no-cache\r\n"
+ "Content-Type: text/html\r\n"
+ "\r\n"
+ "<html><body><h1>504 Gateway Time-out</h1>\nThe server didn't respond in time.\n</body></html>\n",
+};
+
+const struct ist http_known_methods[HTTP_METH_OTHER] = {
+ [HTTP_METH_OPTIONS] = IST("OPTIONS"),
+ [HTTP_METH_GET] = IST("GET"),
+ [HTTP_METH_HEAD] = IST("HEAD"),
+ [HTTP_METH_POST] = IST("POST"),
+ [HTTP_METH_PUT] = IST("PUT"),
+ [HTTP_METH_DELETE] = IST("DELETE"),
+ [HTTP_METH_TRACE] = IST("TRACE"),
+ [HTTP_METH_CONNECT] = IST("CONNECT"),
+};
+
+/*
+ * returns a known method among HTTP_METH_* or HTTP_METH_OTHER for all unknown
+ * ones.
+ */
+enum http_meth_t find_http_meth(const char *str, const int len)
+{
+ const struct ist m = ist2(str, len);
+
+ if (isteq(m, ist("GET"))) return HTTP_METH_GET;
+ else if (isteq(m, ist("HEAD"))) return HTTP_METH_HEAD;
+ else if (isteq(m, ist("POST"))) return HTTP_METH_POST;
+ else if (isteq(m, ist("CONNECT"))) return HTTP_METH_CONNECT;
+ else if (isteq(m, ist("PUT"))) return HTTP_METH_PUT;
+ else if (isteq(m, ist("OPTIONS"))) return HTTP_METH_OPTIONS;
+ else if (isteq(m, ist("DELETE"))) return HTTP_METH_DELETE;
+ else if (isteq(m, ist("TRACE"))) return HTTP_METH_TRACE;
+ else return HTTP_METH_OTHER;
+}
+
+/* This function returns HTTP_ERR_<num> (enum) matching http status code.
+ * Returned value should match codes from http_err_codes.
+ */
+int http_get_status_idx(unsigned int status)
+{
+ switch (status) {
+ case 200: return HTTP_ERR_200;
+ case 400: return HTTP_ERR_400;
+ case 401: return HTTP_ERR_401;
+ case 403: return HTTP_ERR_403;
+ case 404: return HTTP_ERR_404;
+ case 405: return HTTP_ERR_405;
+ case 407: return HTTP_ERR_407;
+ case 408: return HTTP_ERR_408;
+ case 410: return HTTP_ERR_410;
+ case 413: return HTTP_ERR_413;
+ case 421: return HTTP_ERR_421;
+ case 422: return HTTP_ERR_422;
+ case 425: return HTTP_ERR_425;
+ case 429: return HTTP_ERR_429;
+ case 500: return HTTP_ERR_500;
+ case 501: return HTTP_ERR_501;
+ case 502: return HTTP_ERR_502;
+ case 503: return HTTP_ERR_503;
+ case 504: return HTTP_ERR_504;
+ default: return HTTP_ERR_500;
+ }
+}
+
+/* This function returns a reason associated with the HTTP status.
+ * This function never fails, a message is always returned.
+ */
+const char *http_get_reason(unsigned int status)
+{
+ switch (status) {
+ case 100: return "Continue";
+ case 101: return "Switching Protocols";
+ case 102: return "Processing";
+ case 200: return "OK";
+ case 201: return "Created";
+ case 202: return "Accepted";
+ case 203: return "Non-Authoritative Information";
+ case 204: return "No Content";
+ case 205: return "Reset Content";
+ case 206: return "Partial Content";
+ case 207: return "Multi-Status";
+ case 210: return "Content Different";
+ case 226: return "IM Used";
+ case 300: return "Multiple Choices";
+ case 301: return "Moved Permanently";
+ case 302: return "Found";
+ case 303: return "See Other";
+ case 304: return "Not Modified";
+ case 305: return "Use Proxy";
+ case 307: return "Temporary Redirect";
+ case 308: return "Permanent Redirect";
+ case 310: return "Too many Redirects";
+ case 400: return "Bad Request";
+ case 401: return "Unauthorized";
+ case 402: return "Payment Required";
+ case 403: return "Forbidden";
+ case 404: return "Not Found";
+ case 405: return "Method Not Allowed";
+ case 406: return "Not Acceptable";
+ case 407: return "Proxy Authentication Required";
+ case 408: return "Request Time-out";
+ case 409: return "Conflict";
+ case 410: return "Gone";
+ case 411: return "Length Required";
+ case 412: return "Precondition Failed";
+ case 413: return "Request Entity Too Large";
+ case 414: return "Request-URI Too Long";
+ case 415: return "Unsupported Media Type";
+ case 416: return "Requested range unsatisfiable";
+ case 417: return "Expectation failed";
+ case 418: return "I'm a teapot";
+ case 421: return "Misdirected Request";
+ case 422: return "Unprocessable Content";
+ case 423: return "Locked";
+ case 424: return "Method failure";
+ case 425: return "Too Early";
+ case 426: return "Upgrade Required";
+ case 428: return "Precondition Required";
+ case 429: return "Too Many Requests";
+ case 431: return "Request Header Fields Too Large";
+ case 449: return "Retry With";
+ case 450: return "Blocked by Windows Parental Controls";
+ case 451: return "Unavailable For Legal Reasons";
+ case 456: return "Unrecoverable Error";
+ case 499: return "client has closed connection";
+ case 500: return "Internal Server Error";
+ case 501: return "Not Implemented";
+ case 502: return "Bad Gateway or Proxy Error";
+ case 503: return "Service Unavailable";
+ case 504: return "Gateway Time-out";
+ case 505: return "HTTP Version not supported";
+ case 506: return "Variant also negotiate";
+ case 507: return "Insufficient storage";
+ case 508: return "Loop detected";
+ case 509: return "Bandwidth Limit Exceeded";
+ case 510: return "Not extended";
+ case 511: return "Network authentication required";
+ case 520: return "Web server is returning an unknown error";
+ default:
+ switch (status) {
+ case 100 ... 199: return "Informational";
+ case 200 ... 299: return "Success";
+ case 300 ... 399: return "Redirection";
+ case 400 ... 499: return "Client Error";
+ case 500 ... 599: return "Server Error";
+ default: return "Other";
+ }
+ }
+}
+
+/* Returns the ist string corresponding to port part (without ':') in the host
+ * <host>, IST_NULL if no ':' is found or an empty IST if there is no digit. In
+ * the last case, the result is the original ist trimmed to 0. So be sure to test
+ * the result length before doing any pointer arithmetic.
+*/
+struct ist http_get_host_port(const struct ist host)
+{
+ char *start, *end, *ptr;
+
+ start = istptr(host);
+ end = istend(host);
+ for (ptr = end; ptr > start && isdigit((unsigned char)*--ptr););
+
+ /* no port found */
+ if (likely(*ptr != ':'))
+ return IST_NULL;
+ if (ptr+1 == end)
+ return isttrim(host, 0);
+
+ return istnext(ist2(ptr, end - ptr));
+}
+
+
+/* Return non-zero if the port <port> is a default port. If the scheme <schm> is
+ * set, it is used to detect default ports (HTTP => 80 and HTTPS => 443)
+ * port. Otherwise, both are considered as default ports.
+ */
+int http_is_default_port(const struct ist schm, const struct ist port)
+{
+ if (!istlen(port))
+ return 1;
+
+ if (!isttest(schm))
+ return (isteq(port, ist("443")) || isteq(port, ist("80")));
+ else
+ return (isteq(port, ist("443")) && isteqi(schm, ist("https://"))) ||
+ (isteq(port, ist("80")) && isteqi(schm, ist("http://")));
+}
+
+/* Returns non-zero if the scheme <schm> is syntactically correct according to
+ * RFC3986#3.1, otherwise zero. It expects only the scheme and nothing else
+ * (particularly not the following "://").
+ * Scheme = alpha *(alpha|digit|'+'|'-'|'.')
+ */
+int http_validate_scheme(const struct ist schm)
+{
+ size_t i;
+
+ for (i = 0; i < schm.len; i++) {
+ if (likely((schm.ptr[i] >= 'a' && schm.ptr[i] <= 'z') ||
+ (schm.ptr[i] >= 'A' && schm.ptr[i] <= 'Z')))
+ continue;
+ if (unlikely(!i)) // first char must be alpha
+ return 0;
+ if ((schm.ptr[i] >= '0' && schm.ptr[i] <= '9') ||
+ schm.ptr[i] == '+' || schm.ptr[i] == '-' || schm.ptr[i] == '.')
+ continue;
+ return 0;
+ }
+ return !!i;
+}
+
+/* Parse the uri and looks for the scheme. If not found, an empty ist is
+ * returned. Otherwise, the ist pointing to the scheme is returned.
+ *
+ * <parser> must have been initialized via http_uri_parser_init. See the
+ * related http_uri_parser documentation for the specific API usage.
+ */
+struct ist http_parse_scheme(struct http_uri_parser *parser)
+{
+ const char *ptr, *start, *end;
+
+ if (parser->state >= URI_PARSER_STATE_SCHEME_DONE)
+ goto not_found;
+
+ if (parser->format != URI_PARSER_FORMAT_ABSURI_OR_AUTHORITY)
+ goto not_found;
+
+ ptr = start = istptr(parser->uri);
+ end = istend(parser->uri);
+
+ if (isalpha((unsigned char)*ptr)) {
+ /* this is a scheme as described by RFC3986, par. 3.1, or only
+ * an authority (in case of a CONNECT method).
+ */
+ ptr++;
+ /* retrieve the scheme up to the suffix '://'. If the suffix is
+ * not found, this means there is no scheme and it is an
+ * authority-only uri.
+ */
+ while (ptr < end &&
+ (isalnum((unsigned char)*ptr) || *ptr == '+' || *ptr == '-' || *ptr == '.'))
+ ptr++;
+ if (ptr == end || *ptr++ != ':')
+ goto not_found;
+ if (ptr == end || *ptr++ != '/')
+ goto not_found;
+ if (ptr == end || *ptr++ != '/')
+ goto not_found;
+ }
+ else {
+ goto not_found;
+ }
+
+ parser->uri = ist2(ptr, end - ptr);
+ parser->state = URI_PARSER_STATE_SCHEME_DONE;
+ return ist2(start, ptr - start);
+
+ not_found:
+ parser->state = URI_PARSER_STATE_SCHEME_DONE;
+ return IST_NULL;
+}
+
+/* Parse the uri and looks for the authority, between the scheme and the
+ * path. if no_userinfo is not zero, the part before the '@' (including it) is
+ * skipped. If not found, an empty ist is returned. Otherwise, the ist pointing
+ * on the authority is returned.
+ *
+ * <parser> must have been initialized via http_uri_parser_init. See the
+ * related http_uri_parser documentation for the specific API usage.
+ */
+struct ist http_parse_authority(struct http_uri_parser *parser, int no_userinfo)
+{
+ const char *ptr, *start, *end;
+
+ if (parser->state >= URI_PARSER_STATE_AUTHORITY_DONE)
+ goto not_found;
+
+ if (parser->format != URI_PARSER_FORMAT_ABSURI_OR_AUTHORITY)
+ goto not_found;
+
+ if (parser->state < URI_PARSER_STATE_SCHEME_DONE)
+ http_parse_scheme(parser);
+
+ ptr = start = istptr(parser->uri);
+ end = istend(parser->uri);
+
+ while (ptr < end && *ptr != '/') {
+ if (*ptr++ == '@' && no_userinfo)
+ start = ptr;
+ }
+
+ /* OK, ptr point on the '/' or the end */
+
+ authority:
+ parser->uri = ist2(ptr, end - ptr);
+ parser->state = URI_PARSER_STATE_AUTHORITY_DONE;
+ return ist2(start, ptr - start);
+
+ not_found:
+ parser->state = URI_PARSER_STATE_AUTHORITY_DONE;
+ return IST_NULL;
+}
+
+/* Parse the URI from the given transaction (which is assumed to be in request
+ * phase) and look for the "/" beginning the PATH. If not found, ist2(0,0) is
+ * returned. Otherwise the pointer and length are returned.
+ *
+ * <parser> must have been initialized via http_uri_parser_init. See the
+ * related http_uri_parser documentation for the specific API usage.
+ */
+struct ist http_parse_path(struct http_uri_parser *parser)
+{
+ const char *ptr, *end;
+
+ if (parser->state >= URI_PARSER_STATE_PATH_DONE)
+ goto not_found;
+
+ if (parser->format == URI_PARSER_FORMAT_EMPTY ||
+ parser->format == URI_PARSER_FORMAT_ASTERISK) {
+ goto not_found;
+ }
+
+ ptr = istptr(parser->uri);
+ end = istend(parser->uri);
+
+ /* If the uri is in absolute-path format, first skip the scheme and
+ * authority parts. No scheme will be found if the uri is in authority
+ * format, which indicates that the path won't be present.
+ */
+ if (parser->format == URI_PARSER_FORMAT_ABSURI_OR_AUTHORITY) {
+ if (parser->state < URI_PARSER_STATE_SCHEME_DONE) {
+ /* If no scheme found, uri is in authority format. No
+ * path is present.
+ */
+ if (!isttest(http_parse_scheme(parser)))
+ goto not_found;
+ }
+
+ if (parser->state < URI_PARSER_STATE_AUTHORITY_DONE)
+ http_parse_authority(parser, 1);
+
+ ptr = istptr(parser->uri);
+
+ if (ptr == end)
+ goto not_found;
+ }
+
+ parser->state = URI_PARSER_STATE_PATH_DONE;
+ return ist2(ptr, end - ptr);
+
+ not_found:
+ parser->state = URI_PARSER_STATE_PATH_DONE;
+ return IST_NULL;
+}
+
+/* Parse <value> Content-Length header field of an HTTP request. The function
+ * checks all possible occurrences of a comma-delimited value, and verifies if
+ * any of them doesn't match a previous value. <value> is sanitized on return
+ * to contain a single value if several identical values were found.
+ *
+ * <body_len> must be a valid pointer and is used to return the parsed length
+ * unless values differ. Also if <not_first> is true, <body_len> is assumed to
+ * point to previously parsed value and which must be equal to the new length.
+ * This is useful if an HTTP message contains several Content-Length headers.
+ *
+ * Returns <0 if a value differs, 0 if the whole header can be dropped (i.e.
+ * already known), or >0 if the value can be indexed (first one). In the last
+ * case, the value might be adjusted and the caller must only add the updated
+ * value.
+ */
+int http_parse_cont_len_header(struct ist *value, unsigned long long *body_len,
+ int not_first)
+{
+ char *e, *n;
+ unsigned long long cl;
+ struct ist word;
+ int check_prev = not_first;
+
+ word.ptr = value->ptr;
+ e = value->ptr + value->len;
+
+ while (1) {
+ if (word.ptr >= e) {
+ /* empty header or empty value */
+ goto fail;
+ }
+
+ /* skip leading delimiter and blanks */
+ if (unlikely(HTTP_IS_LWS(*word.ptr))) {
+ word.ptr++;
+ continue;
+ }
+
+ /* digits only now */
+ for (cl = 0, n = word.ptr; n < e; n++) {
+ unsigned int c = *n - '0';
+ if (unlikely(c > 9)) {
+ /* non-digit */
+ if (unlikely(n == word.ptr)) // spaces only
+ goto fail;
+ break;
+ }
+
+ if (unlikely(!cl && n > word.ptr)) {
+ /* There was a leading zero before this digit,
+ * let's trim it.
+ */
+ word.ptr = n;
+ }
+
+ if (unlikely(cl > ULLONG_MAX / 10ULL))
+ goto fail; /* multiply overflow */
+ cl = cl * 10ULL;
+ if (unlikely(cl + c < cl))
+ goto fail; /* addition overflow */
+ cl = cl + c;
+ }
+
+ /* keep a copy of the exact cleaned value */
+ word.len = n - word.ptr;
+
+ /* skip trailing LWS till next comma or EOL */
+ for (; n < e; n++) {
+ if (!HTTP_IS_LWS(*n)) {
+ if (unlikely(*n != ','))
+ goto fail;
+ break;
+ }
+ }
+
+ /* if duplicate, must be equal */
+ if (check_prev && cl != *body_len)
+ goto fail;
+
+ /* OK, store this result as the one to be indexed */
+ *body_len = cl;
+ *value = word;
+
+ /* Now either n==e and we're done, or n points to the comma,
+ * and we skip it and continue.
+ */
+ if (n++ == e)
+ break;
+
+ word.ptr = n;
+ check_prev = 1;
+ }
+
+ /* here we've reached the end with a single value or a series of
+ * identical values, all matching previous series if any. The last
+ * parsed value was sent back into <value>. We just have to decide
+ * if this occurrence has to be indexed (it's the first one) or
+ * silently skipped (it's not the first one)
+ */
+ return !not_first;
+ fail:
+ return -1;
+}
+
+/*
+ * Checks if <hdr> is exactly <name> for <len> chars, and ends with a colon.
+ * If so, returns the position of the first non-space character relative to
+ * <hdr>, or <end>-<hdr> if not found before. If no value is found, it tries
+ * to return a pointer to the place after the first space. Returns 0 if the
+ * header name does not match. Checks are case-insensitive.
+ */
+int http_header_match2(const char *hdr, const char *end,
+ const char *name, int len)
+{
+ const char *val;
+
+ if (hdr + len >= end)
+ return 0;
+ if (hdr[len] != ':')
+ return 0;
+ if (strncasecmp(hdr, name, len) != 0)
+ return 0;
+ val = hdr + len + 1;
+ while (val < end && HTTP_IS_SPHT(*val))
+ val++;
+ if ((val >= end) && (len + 2 <= end - hdr))
+ return len + 2; /* we may replace starting from second space */
+ return val - hdr;
+}
+
+/* Find the end of the header value contained between <s> and <e>. See RFC7230,
+ * par 3.2 for more information. Note that it requires a valid header to return
+ * a valid result. This works for headers defined as comma-separated lists.
+ */
+char *http_find_hdr_value_end(char *s, const char *e)
+{
+ int quoted, qdpair;
+
+ quoted = qdpair = 0;
+
+#ifdef HA_UNALIGNED_LE
+ /* speedup: skip everything not a comma nor a double quote */
+ for (; s <= e - sizeof(int); s += sizeof(int)) {
+ unsigned int c = *(int *)s; // comma
+ unsigned int q = c; // quote
+
+ c ^= 0x2c2c2c2c; // contains one zero on a comma
+ q ^= 0x22222222; // contains one zero on a quote
+
+ c = (c - 0x01010101) & ~c; // contains 0x80 below a comma
+ q = (q - 0x01010101) & ~q; // contains 0x80 below a quote
+
+ if ((c | q) & 0x80808080)
+ break; // found a comma or a quote
+ }
+#endif
+ for (; s < e; s++) {
+ if (qdpair) qdpair = 0;
+ else if (quoted) {
+ if (*s == '\\') qdpair = 1;
+ else if (*s == '"') quoted = 0;
+ }
+ else if (*s == '"') quoted = 1;
+ else if (*s == ',') return s;
+ }
+ return s;
+}
+
+/* Find the end of a cookie value contained between <s> and <e>. It works the
+ * same way as with headers above except that the semi-colon also ends a token.
+ * See RFC2965 for more information. Note that it requires a valid header to
+ * return a valid result.
+ */
+char *http_find_cookie_value_end(char *s, const char *e)
+{
+ int quoted, qdpair;
+
+ quoted = qdpair = 0;
+ for (; s < e; s++) {
+ if (qdpair) qdpair = 0;
+ else if (quoted) {
+ if (*s == '\\') qdpair = 1;
+ else if (*s == '"') quoted = 0;
+ }
+ else if (*s == '"') quoted = 1;
+ else if (*s == ',' || *s == ';') return s;
+ }
+ return s;
+}
+
+/* Try to find the next occurrence of a cookie name in a cookie header value.
+ * To match on any cookie name, <cookie_name_l> must be set to 0.
+ * The lookup begins at <hdr>. The pointer and size of the next occurrence of
+ * the cookie value is returned into *value and *value_l, and the function
+ * returns a pointer to the next pointer to search from if the value was found.
+ * Otherwise if the cookie was not found, NULL is returned and neither value
+ * nor value_l are touched. The input <hdr> string should first point to the
+ * header's value, and the <hdr_end> pointer must point to the first character
+ * not part of the value. <list> must be non-zero if value may represent a list
+ * of values (cookie headers). This makes it faster to abort parsing when no
+ * list is expected.
+ */
+char *http_extract_cookie_value(char *hdr, const char *hdr_end,
+ char *cookie_name, size_t cookie_name_l,
+ int list, char **value, size_t *value_l)
+{
+ char *equal, *att_end, *att_beg, *val_beg, *val_end;
+ char *next;
+
+ /* we search at least a cookie name followed by an equal, and more
+ * generally something like this :
+ * Cookie: NAME1 = VALUE 1 ; NAME2 = VALUE2 ; NAME3 = VALUE3\r\n
+ */
+ for (att_beg = hdr; att_beg + cookie_name_l + 1 < hdr_end; att_beg = next + 1) {
+ /* Iterate through all cookies on this line */
+
+ while (att_beg < hdr_end && HTTP_IS_SPHT(*att_beg))
+ att_beg++;
+
+ /* find att_end : this is the first character after the last non
+ * space before the equal. It may be equal to hdr_end.
+ */
+ equal = att_end = att_beg;
+
+ while (equal < hdr_end) {
+ if (*equal == '=' || *equal == ';' || (list && *equal == ','))
+ break;
+ if (HTTP_IS_SPHT(*equal++))
+ continue;
+ att_end = equal;
+ }
+
+ /* here, <equal> points to '=', a delimiter or the end. <att_end>
+ * is between <att_beg> and <equal>, both may be identical.
+ */
+
+ /* look for end of cookie if there is an equal sign */
+ if (equal < hdr_end && *equal == '=') {
+ /* look for the beginning of the value */
+ val_beg = equal + 1;
+ while (val_beg < hdr_end && HTTP_IS_SPHT(*val_beg))
+ val_beg++;
+
+ /* find the end of the value, respecting quotes */
+ next = http_find_cookie_value_end(val_beg, hdr_end);
+
+ /* make val_end point to the first white space or delimiter after the value */
+ val_end = next;
+ while (val_end > val_beg && HTTP_IS_SPHT(*(val_end - 1)))
+ val_end--;
+ } else {
+ val_beg = val_end = next = equal;
+ }
+
+ /* We have nothing to do with attributes beginning with '$'. However,
+ * they will automatically be removed if a header before them is removed,
+ * since they're supposed to be linked together.
+ */
+ if (*att_beg == '$')
+ continue;
+
+ /* Ignore cookies with no equal sign */
+ if (equal == next)
+ continue;
+
+ /* Now we have the cookie name between att_beg and att_end, and
+ * its value between val_beg and val_end.
+ */
+
+ if (cookie_name_l == 0 || (att_end - att_beg == cookie_name_l &&
+ memcmp(att_beg, cookie_name, cookie_name_l) == 0)) {
+ /* let's return this value and indicate where to go on from */
+ *value = val_beg;
+ *value_l = val_end - val_beg;
+ return next + 1;
+ }
+
+ /* Set-Cookie headers only have the name in the first attr=value part */
+ if (!list)
+ break;
+ }
+
+ return NULL;
+}
+
+/* Try to find the next cookie name in a cookie header given a pointer
+ * <hdr_beg> to the starting position, a pointer <hdr_end> to the ending
+ * position to search in the cookie and a boolean <is_req> of type int that
+ * indicates if the stream direction is for request or response.
+ * The lookup begins at <hdr_beg>, which is assumed to be in
+ * Cookie / Set-Cookie header, and the function returns a pointer to the next
+ * position to search from if a valid cookie k-v pair is found for Cookie
+ * request header (<is_req> is non-zero) and <hdr_end> for Set-Cookie response
+ * header (<is_req> is zero). When the next cookie name is found, <ptr> will
+ * be pointing to the start of the cookie name, and <len> will be the length
+ * of the cookie name.
+ * Otherwise if there is no valid cookie k-v pair, NULL is returned.
+ * The <hdr_end> pointer must point to the first character
+ * not part of the Cookie / Set-Cookie header.
+ */
+char *http_extract_next_cookie_name(char *hdr_beg, char *hdr_end, int is_req,
+ char **ptr, size_t *len)
+{
+ char *equal, *att_end, *att_beg, *val_beg;
+ char *next;
+
+ /* We search a valid cookie name between hdr_beg and hdr_end,
+ * followed by an equal. For example for the following cookie:
+ * Cookie: NAME1 = VALUE 1 ; NAME2 = VALUE2 ; NAME3 = VALUE3\r\n
+ * We want to find NAME1, NAME2, or NAME3 depending on where we start our search
+ * according to <hdr_beg>
+ */
+ for (att_beg = hdr_beg; att_beg + 1 < hdr_end; att_beg = next + 1) {
+ while (att_beg < hdr_end && HTTP_IS_SPHT(*att_beg))
+ att_beg++;
+
+ /* find <att_end> : this is the first character after the last non
+ * space before the equal. It may be equal to <hdr_end>.
+ */
+ equal = att_end = att_beg;
+
+ while (equal < hdr_end) {
+ if (*equal == '=' || *equal == ';')
+ break;
+ if (HTTP_IS_SPHT(*equal++))
+ continue;
+ att_end = equal;
+ }
+
+ /* Here, <equal> points to '=', a delimiter or the end. <att_end>
+ * is between <att_beg> and <equal>, both may be identical.
+ */
+
+ /* Look for end of cookie if there is an equal sign */
+ if (equal < hdr_end && *equal == '=') {
+ /* Look for the beginning of the value */
+ val_beg = equal + 1;
+ while (val_beg < hdr_end && HTTP_IS_SPHT(*val_beg))
+ val_beg++;
+
+ /* Find the end of the value, respecting quotes */
+ next = http_find_cookie_value_end(val_beg, hdr_end);
+ } else {
+ next = equal;
+ }
+
+ /* We have nothing to do with attributes beginning with '$'. However,
+ * they will automatically be removed if a header before them is removed,
+ * since they're supposed to be linked together.
+ */
+ if (*att_beg == '$')
+ continue;
+
+ /* Ignore cookies with no equal sign */
+ if (equal == next)
+ continue;
+
+ /* Now we have the cookie name between <att_beg> and <att_end>, and
+ * <next> points to the end of cookie value
+ */
+ *ptr = att_beg;
+ *len = att_end - att_beg;
+
+ /* Return next position for Cookie request header and <hdr_end> for
+ * Set-Cookie response header as each Set-Cookie header is assumed to
+ * contain only 1 cookie
+ */
+ if (is_req)
+ return next + 1;
+ return hdr_end;
+ }
+
+ return NULL;
+}
+
+/* Parses a qvalue and returns it multiplied by 1000, from 0 to 1000. If the
+ * value is larger than 1000, it is bound to 1000. The parser consumes up to
+ * 1 digit, one dot and 3 digits and stops on the first invalid character.
+ * Unparsable qvalues return 1000 as "q=1.000".
+ */
+int http_parse_qvalue(const char *qvalue, const char **end)
+{
+ int q = 1000;
+
+ if (!isdigit((unsigned char)*qvalue))
+ goto out;
+ q = (*qvalue++ - '0') * 1000;
+
+ if (*qvalue++ != '.')
+ goto out;
+
+ if (!isdigit((unsigned char)*qvalue))
+ goto out;
+ q += (*qvalue++ - '0') * 100;
+
+ if (!isdigit((unsigned char)*qvalue))
+ goto out;
+ q += (*qvalue++ - '0') * 10;
+
+ if (!isdigit((unsigned char)*qvalue))
+ goto out;
+ q += (*qvalue++ - '0') * 1;
+ out:
+ if (q > 1000)
+ q = 1000;
+ if (end)
+ *end = qvalue;
+ return q;
+}
+
+/*
+ * Given a url parameter, find the starting position of the first occurrence,
+ * or NULL if the parameter is not found.
+ *
+ * Example: if query_string is "yo=mama;ye=daddy" and url_param_name is "ye",
+ * the function will return query_string+8.
+ *
+ * Warning: this function returns a pointer that can point to the first chunk
+ * or the second chunk. The caller must be check the position before using the
+ * result.
+ */
+const char *http_find_url_param_pos(const char **chunks,
+ const char* url_param_name, size_t url_param_name_l,
+ char delim, char insensitive)
+{
+ const char *pos, *last, *equal;
+ const char **bufs = chunks;
+ int l1, l2;
+
+
+ pos = bufs[0];
+ last = bufs[1];
+ while (pos < last) {
+ /* Check the equal. */
+ equal = pos + url_param_name_l;
+ if (fix_pointer_if_wrap(chunks, &equal)) {
+ if (equal >= chunks[3])
+ return NULL;
+ } else {
+ if (equal >= chunks[1])
+ return NULL;
+ }
+ if (*equal == '=') {
+ if (pos + url_param_name_l > last) {
+ /* process wrap case, we detect a wrap. In this case, the
+ * comparison is performed in two parts.
+ */
+
+ /* This is the end, we don't have any other chunk. */
+ if (bufs != chunks || !bufs[2])
+ return NULL;
+
+ /* Compute the length of each part of the comparison. */
+ l1 = last - pos;
+ l2 = url_param_name_l - l1;
+
+ /* The second buffer is too short to contain the compared string. */
+ if (bufs[2] + l2 > bufs[3])
+ return NULL;
+
+ if (insensitive) {
+ if (strncasecmp(pos, url_param_name, l1) == 0 &&
+ strncasecmp(bufs[2], url_param_name+l1, l2) == 0)
+ return pos;
+ }
+ else {
+ if (memcmp(pos, url_param_name, l1) == 0 &&
+ memcmp(bufs[2], url_param_name+l1, l2) == 0)
+ return pos;
+ }
+
+ /* Perform wrapping and jump the string who fail the comparison. */
+ bufs += 2;
+ pos = bufs[0] + l2;
+ last = bufs[1];
+
+ } else {
+ /* process a simple comparison.*/
+ if (insensitive) {
+ if (strncasecmp(pos, url_param_name, url_param_name_l) == 0)
+ return pos;
+ } else {
+ if (memcmp(pos, url_param_name, url_param_name_l) == 0)
+ return pos;
+ }
+ pos += url_param_name_l + 1;
+ if (fix_pointer_if_wrap(chunks, &pos))
+ last = bufs[2];
+ }
+ }
+
+ while (1) {
+ /* Look for the next delimiter. */
+ while (pos < last && !http_is_param_delimiter(*pos, delim))
+ pos++;
+ if (pos < last)
+ break;
+ /* process buffer wrapping. */
+ if (bufs != chunks || !bufs[2])
+ return NULL;
+ bufs += 2;
+ pos = bufs[0];
+ last = bufs[1];
+ }
+ pos++;
+ }
+ return NULL;
+}
+
+/*
+ * Given a url parameter name and a query string, find the next value.
+ * An empty url_param_name matches the first available parameter.
+ * If the parameter is found, 1 is returned and *vstart / *vend are updated to
+ * respectively provide a pointer to the value and its end.
+ * Otherwise, 0 is returned and vstart/vend are not modified.
+ */
+int http_find_next_url_param(const char **chunks,
+ const char* url_param_name, size_t url_param_name_l,
+ const char **vstart, const char **vend, char delim, char insensitive)
+{
+ const char *arg_start, *qs_end;
+ const char *value_start, *value_end;
+
+ arg_start = chunks[0];
+ qs_end = chunks[1];
+ if (url_param_name_l) {
+ /* Looks for an argument name. */
+ arg_start = http_find_url_param_pos(chunks,
+ url_param_name, url_param_name_l,
+ delim, insensitive);
+ /* Check for wrapping. */
+ if (arg_start >= qs_end)
+ qs_end = chunks[3];
+ }
+ if (!arg_start)
+ return 0;
+
+ if (!url_param_name_l) {
+ while (1) {
+ /* looks for the first argument. */
+ value_start = memchr(arg_start, '=', qs_end - arg_start);
+ if (!value_start) {
+ /* Check for wrapping. */
+ if (arg_start >= chunks[0] &&
+ arg_start < chunks[1] &&
+ chunks[2]) {
+ arg_start = chunks[2];
+ qs_end = chunks[3];
+ continue;
+ }
+ return 0;
+ }
+ break;
+ }
+ value_start++;
+ }
+ else {
+ /* Jump the argument length. */
+ value_start = arg_start + url_param_name_l + 1;
+
+ /* Check for pointer wrapping. */
+ if (fix_pointer_if_wrap(chunks, &value_start)) {
+ /* Update the end pointer. */
+ qs_end = chunks[3];
+
+ /* Check for overflow. */
+ if (value_start >= qs_end)
+ return 0;
+ }
+ }
+
+ value_end = value_start;
+
+ while (1) {
+ while ((value_end < qs_end) && !http_is_param_delimiter(*value_end, delim))
+ value_end++;
+ if (value_end < qs_end)
+ break;
+ /* process buffer wrapping. */
+ if (value_end >= chunks[0] &&
+ value_end < chunks[1] &&
+ chunks[2]) {
+ value_end = chunks[2];
+ qs_end = chunks[3];
+ continue;
+ }
+ break;
+ }
+
+ *vstart = value_start;
+ *vend = value_end;
+ return 1;
+}
+
+/* Parses a single header line (without the CRLF) and splits it into its name
+ * and its value. The parsing is pretty naive and just skip spaces.
+ */
+int http_parse_header(const struct ist hdr, struct ist *name, struct ist *value)
+{
+ char *p = hdr.ptr;
+ char *end = p + hdr.len;
+
+ name->len = value->len = 0;
+
+ /* Skip leading spaces */
+ for (; p < end && HTTP_IS_SPHT(*p); p++);
+
+ /* Set the header name */
+ name->ptr = p;
+ for (; p < end && HTTP_IS_TOKEN(*p); p++);
+ name->len = p - name->ptr;
+
+ /* Skip the ':' and spaces before and after it */
+ for (; p < end && HTTP_IS_SPHT(*p); p++);
+ if (p < end && *p == ':') p++;
+ for (; p < end && HTTP_IS_SPHT(*p); p++);
+
+ /* Set the header value */
+ value->ptr = p;
+ value->len = end - p;
+
+ return 1;
+}
+
+/* Parses a single start line (without the CRLF) and splits it into 3 parts. The
+ * parsing is pretty naive and just skip spaces.
+ */
+int http_parse_stline(const struct ist line, struct ist *p1, struct ist *p2, struct ist *p3)
+{
+ char *p = line.ptr;
+ char *end = p + line.len;
+
+ p1->len = p2->len = p3->len = 0;
+
+ /* Skip leading spaces */
+ for (; p < end && HTTP_IS_SPHT(*p); p++);
+
+ /* Set the first part */
+ p1->ptr = p;
+ for (; p < end && HTTP_IS_TOKEN(*p); p++);
+ p1->len = p - p1->ptr;
+
+ /* Skip spaces between p1 and p2 */
+ for (; p < end && HTTP_IS_SPHT(*p); p++);
+
+ /* Set the second part */
+ p2->ptr = p;
+ for (; p < end && !HTTP_IS_SPHT(*p); p++);
+ p2->len = p - p2->ptr;
+
+ /* Skip spaces between p2 and p3 */
+ for (; p < end && HTTP_IS_SPHT(*p); p++);
+
+ /* The remaining is the third value */
+ p3->ptr = p;
+ p3->len = end - p;
+
+ return 1;
+}
+
+/* Parses value of a Status header with the following format: "Status: Code[
+ * Reason]". The parsing is pretty naive and just skip spaces. It return the
+ * numeric value of the status code.
+ */
+int http_parse_status_val(const struct ist value, struct ist *status, struct ist *reason)
+{
+ char *p = value.ptr;
+ char *end = p + value.len;
+ uint16_t code;
+
+ status->len = reason->len = 0;
+
+ /* Skip leading spaces */
+ for (; p < end && HTTP_IS_SPHT(*p); p++);
+
+ /* Set the status part */
+ status->ptr = p;
+ for (; p < end && HTTP_IS_TOKEN(*p); p++);
+ status->len = p - status->ptr;
+
+ /* Skip spaces between status and reason */
+ for (; p < end && HTTP_IS_SPHT(*p); p++);
+
+ /* the remaining is the reason */
+ reason->ptr = p;
+ reason->len = end - p;
+
+ code = strl2ui(status->ptr, status->len);
+ return code;
+}
+
+
+/* Returns non-zero if the two ETags are comparable (see RFC 7232#2.3.2).
+ * If any of them is a weak ETag, we discard the weakness prefix and perform
+ * a strict string comparison.
+ * Returns 0 otherwise.
+ */
+int http_compare_etags(struct ist etag1, struct ist etag2)
+{
+ enum http_etag_type etag_type1;
+ enum http_etag_type etag_type2;
+
+ etag_type1 = http_get_etag_type(etag1);
+ etag_type2 = http_get_etag_type(etag2);
+
+ if (etag_type1 == ETAG_INVALID || etag_type2 == ETAG_INVALID)
+ return 0;
+
+ /* Discard the 'W/' prefix an ETag is a weak one. */
+ if (etag_type1 == ETAG_WEAK)
+ etag1 = istadv(etag1, 2);
+ if (etag_type2 == ETAG_WEAK)
+ etag2 = istadv(etag2, 2);
+
+ return isteq(etag1, etag2);
+}
+
+
+/*
+ * Trim leading space or horizontal tab characters from <value> string.
+ * Returns the trimmed string.
+ */
+struct ist http_trim_leading_spht(struct ist value)
+{
+ struct ist ret = value;
+
+ while (ret.len && HTTP_IS_SPHT(ret.ptr[0])) {
+ ++ret.ptr;
+ --ret.len;
+ }
+
+ return ret;
+}
+
+/*
+ * Trim trailing space or horizontal tab characters from <value> string.
+ * Returns the trimmed string.
+ */
+struct ist http_trim_trailing_spht(struct ist value)
+{
+ struct ist ret = value;
+
+ while (ret.len && HTTP_IS_SPHT(ret.ptr[-1]))
+ --ret.len;
+
+ return ret;
+}
diff --git a/src/http_acl.c b/src/http_acl.c
new file mode 100644
index 0000000..bf29fc3
--- /dev/null
+++ b/src/http_acl.c
@@ -0,0 +1,185 @@
+/*
+ * HTTP ACLs declaration
+ *
+ * Copyright 2000-2018 Willy Tarreau <w@1wt.eu>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <sys/types.h>
+
+#include <ctype.h>
+#include <string.h>
+#include <time.h>
+
+#include <haproxy/acl.h>
+#include <haproxy/api.h>
+#include <haproxy/arg.h>
+#include <haproxy/auth.h>
+#include <haproxy/chunk.h>
+#include <haproxy/http.h>
+#include <haproxy/pattern.h>
+#include <haproxy/pool.h>
+#include <haproxy/tools.h>
+#include <haproxy/version.h>
+
+
+/* We use the pre-parsed method if it is known, and store its number as an
+ * integer. If it is unknown, we use the pointer and the length.
+ */
+static int pat_parse_meth(const char *text, struct pattern *pattern, int mflags, char **err)
+{
+ int len, meth;
+
+ len = strlen(text);
+ meth = find_http_meth(text, len);
+
+ pattern->val.i = meth;
+ if (meth == HTTP_METH_OTHER) {
+ pattern->ptr.str = (char *)text;
+ pattern->len = len;
+ }
+ else {
+ pattern->ptr.str = NULL;
+ pattern->len = 0;
+ }
+ return 1;
+}
+
+/* See above how the method is stored in the global pattern */
+static struct pattern *pat_match_meth(struct sample *smp, struct pattern_expr *expr, int fill)
+{
+ int icase;
+ struct pattern_list *lst;
+ struct pattern *pattern;
+
+ list_for_each_entry(lst, &expr->patterns, list) {
+ pattern = &lst->pat;
+
+ /* well-known method */
+ if (pattern->val.i != HTTP_METH_OTHER) {
+ if (smp->data.u.meth.meth == pattern->val.i)
+ return pattern;
+ else
+ continue;
+ }
+
+ /* Other method, we must compare the strings */
+ if (pattern->len != smp->data.u.meth.str.data)
+ continue;
+
+ icase = expr->mflags & PAT_MF_IGNORE_CASE;
+ if ((icase && strncasecmp(pattern->ptr.str, smp->data.u.meth.str.area, smp->data.u.meth.str.data) == 0) ||
+ (!icase && strncmp(pattern->ptr.str, smp->data.u.meth.str.area, smp->data.u.meth.str.data) == 0))
+ return pattern;
+ }
+ return NULL;
+}
+
+/************************************************************************/
+/* All supported ACL keywords must be declared here. */
+/************************************************************************/
+
+/* Note: must not be declared <const> as its list will be overwritten.
+ * Please take care of keeping this list alphabetically sorted.
+ */
+static struct acl_kw_list acl_kws = {ILH, {
+ { "base", "base", PAT_MATCH_STR },
+ { "base_beg", "base", PAT_MATCH_BEG },
+ { "base_dir", "base", PAT_MATCH_DIR },
+ { "base_dom", "base", PAT_MATCH_DOM },
+ { "base_end", "base", PAT_MATCH_END },
+ { "base_len", "base", PAT_MATCH_LEN },
+ { "base_reg", "base", PAT_MATCH_REG },
+ { "base_sub", "base", PAT_MATCH_SUB },
+
+ { "cook", "req.cook", PAT_MATCH_STR },
+ { "cook_beg", "req.cook", PAT_MATCH_BEG },
+ { "cook_dir", "req.cook", PAT_MATCH_DIR },
+ { "cook_dom", "req.cook", PAT_MATCH_DOM },
+ { "cook_end", "req.cook", PAT_MATCH_END },
+ { "cook_len", "req.cook", PAT_MATCH_LEN },
+ { "cook_reg", "req.cook", PAT_MATCH_REG },
+ { "cook_sub", "req.cook", PAT_MATCH_SUB },
+
+ { "hdr", "req.hdr", PAT_MATCH_STR },
+ { "hdr_beg", "req.hdr", PAT_MATCH_BEG },
+ { "hdr_dir", "req.hdr", PAT_MATCH_DIR },
+ { "hdr_dom", "req.hdr", PAT_MATCH_DOM },
+ { "hdr_end", "req.hdr", PAT_MATCH_END },
+ { "hdr_len", "req.hdr", PAT_MATCH_LEN },
+ { "hdr_reg", "req.hdr", PAT_MATCH_REG },
+ { "hdr_sub", "req.hdr", PAT_MATCH_SUB },
+
+ /* these two declarations uses strings with list storage (in place
+ * of tree storage). The basic match is PAT_MATCH_STR, but the indexation
+ * and delete functions are relative to the list management. The parse
+ * and match method are related to the corresponding fetch methods. This
+ * is very particular ACL declaration mode.
+ */
+ { "http_auth_group", NULL, PAT_MATCH_STR, NULL, pat_idx_list_str, NULL, NULL, pat_match_auth },
+ { "method", NULL, PAT_MATCH_STR, pat_parse_meth, pat_idx_list_str, NULL, NULL, pat_match_meth },
+
+ { "path", "path", PAT_MATCH_STR },
+ { "path_beg", "path", PAT_MATCH_BEG },
+ { "path_dir", "path", PAT_MATCH_DIR },
+ { "path_dom", "path", PAT_MATCH_DOM },
+ { "path_end", "path", PAT_MATCH_END },
+ { "path_len", "path", PAT_MATCH_LEN },
+ { "path_reg", "path", PAT_MATCH_REG },
+ { "path_sub", "path", PAT_MATCH_SUB },
+
+ { "req_ver", "req.ver", PAT_MATCH_STR },
+ { "resp_ver", "res.ver", PAT_MATCH_STR },
+
+ { "scook", "res.cook", PAT_MATCH_STR },
+ { "scook_beg", "res.cook", PAT_MATCH_BEG },
+ { "scook_dir", "res.cook", PAT_MATCH_DIR },
+ { "scook_dom", "res.cook", PAT_MATCH_DOM },
+ { "scook_end", "res.cook", PAT_MATCH_END },
+ { "scook_len", "res.cook", PAT_MATCH_LEN },
+ { "scook_reg", "res.cook", PAT_MATCH_REG },
+ { "scook_sub", "res.cook", PAT_MATCH_SUB },
+
+ { "shdr", "res.hdr", PAT_MATCH_STR },
+ { "shdr_beg", "res.hdr", PAT_MATCH_BEG },
+ { "shdr_dir", "res.hdr", PAT_MATCH_DIR },
+ { "shdr_dom", "res.hdr", PAT_MATCH_DOM },
+ { "shdr_end", "res.hdr", PAT_MATCH_END },
+ { "shdr_len", "res.hdr", PAT_MATCH_LEN },
+ { "shdr_reg", "res.hdr", PAT_MATCH_REG },
+ { "shdr_sub", "res.hdr", PAT_MATCH_SUB },
+
+ { "url", "url", PAT_MATCH_STR },
+ { "url_beg", "url", PAT_MATCH_BEG },
+ { "url_dir", "url", PAT_MATCH_DIR },
+ { "url_dom", "url", PAT_MATCH_DOM },
+ { "url_end", "url", PAT_MATCH_END },
+ { "url_len", "url", PAT_MATCH_LEN },
+ { "url_reg", "url", PAT_MATCH_REG },
+ { "url_sub", "url", PAT_MATCH_SUB },
+
+ { "urlp", "urlp", PAT_MATCH_STR },
+ { "urlp_beg", "urlp", PAT_MATCH_BEG },
+ { "urlp_dir", "urlp", PAT_MATCH_DIR },
+ { "urlp_dom", "urlp", PAT_MATCH_DOM },
+ { "urlp_end", "urlp", PAT_MATCH_END },
+ { "urlp_len", "urlp", PAT_MATCH_LEN },
+ { "urlp_reg", "urlp", PAT_MATCH_REG },
+ { "urlp_sub", "urlp", PAT_MATCH_SUB },
+
+ { /* END */ },
+}};
+
+INITCALL1(STG_REGISTER, acl_register_keywords, &acl_kws);
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/src/http_act.c b/src/http_act.c
new file mode 100644
index 0000000..7d45780
--- /dev/null
+++ b/src/http_act.c
@@ -0,0 +1,2501 @@
+/*
+ * HTTP actions
+ *
+ * Copyright 2000-2018 Willy Tarreau <w@1wt.eu>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <sys/types.h>
+
+#include <ctype.h>
+#include <string.h>
+#include <time.h>
+
+#include <haproxy/acl.h>
+#include <haproxy/action.h>
+#include <haproxy/api.h>
+#include <haproxy/arg.h>
+#include <haproxy/capture-t.h>
+#include <haproxy/cfgparse.h>
+#include <haproxy/chunk.h>
+#include <haproxy/global.h>
+#include <haproxy/http.h>
+#include <haproxy/http_ana.h>
+#include <haproxy/http_htx.h>
+#include <haproxy/http_rules.h>
+#include <haproxy/log.h>
+#include <haproxy/pattern.h>
+#include <haproxy/pool.h>
+#include <haproxy/regex.h>
+#include <haproxy/sample.h>
+#include <haproxy/sc_strm.h>
+#include <haproxy/stconn.h>
+#include <haproxy/tools.h>
+#include <haproxy/uri_auth-t.h>
+#include <haproxy/uri_normalizer.h>
+#include <haproxy/version.h>
+
+
+/* Release memory allocated by most of HTTP actions. Concretly, it releases
+ * <arg.http>.
+ */
+static void release_http_action(struct act_rule *rule)
+{
+ struct logformat_node *lf, *lfb;
+
+ istfree(&rule->arg.http.str);
+ if (rule->arg.http.re)
+ regex_free(rule->arg.http.re);
+ list_for_each_entry_safe(lf, lfb, &rule->arg.http.fmt, list) {
+ LIST_DELETE(&lf->list);
+ release_sample_expr(lf->expr);
+ free(lf->arg);
+ free(lf);
+ }
+}
+
+/* Release memory allocated by HTTP actions relying on an http reply. Concretly,
+ * it releases <.arg.http_reply>
+ */
+static void release_act_http_reply(struct act_rule *rule)
+{
+ release_http_reply(rule->arg.http_reply);
+ rule->arg.http_reply = NULL;
+}
+
+
+/* Check function for HTTP actions relying on an http reply. The function
+ * returns 1 in success case, otherwise, it returns 0 and err is filled.
+ */
+static int check_act_http_reply(struct act_rule *rule, struct proxy *px, char **err)
+{
+ struct http_reply *reply = rule->arg.http_reply;
+
+ if (!http_check_http_reply(reply, px, err)) {
+ release_act_http_reply(rule);
+ return 0;
+ }
+ return 1;
+}
+
+
+/* This function executes one of the set-{method,path,query,uri} actions. It
+ * builds a string in the trash from the specified format string. It finds
+ * the action to be performed in <.action>, previously filled by function
+ * parse_set_req_line(). The replacement action is executed by the function
+ * http_action_set_req_line(). On success, it returns ACT_RET_CONT. If an error
+ * occurs while soft rewrites are enabled, the action is canceled, but the rule
+ * processing continue. Otherwsize ACT_RET_ERR is returned.
+ */
+static enum act_return http_action_set_req_line(struct act_rule *rule, struct proxy *px,
+ struct session *sess, struct stream *s, int flags)
+{
+ struct buffer *replace;
+ enum act_return ret = ACT_RET_CONT;
+
+ replace = alloc_trash_chunk();
+ if (!replace)
+ goto fail_alloc;
+
+ /* If we have to create a query string, prepare a '?'. */
+ if (rule->action == 2) // set-query
+ replace->area[replace->data++] = '?';
+ replace->data += build_logline(s, replace->area + replace->data,
+ replace->size - replace->data,
+ &rule->arg.http.fmt);
+
+ if (http_req_replace_stline(rule->action, replace->area, replace->data, px, s) == -1)
+ goto fail_rewrite;
+
+ leave:
+ free_trash_chunk(replace);
+ return ret;
+
+ fail_alloc:
+ if (!(s->flags & SF_ERR_MASK))
+ s->flags |= SF_ERR_RESOURCE;
+ ret = ACT_RET_ERR;
+ goto leave;
+
+ fail_rewrite:
+ _HA_ATOMIC_INC(&sess->fe->fe_counters.failed_rewrites);
+ if (s->flags & SF_BE_ASSIGNED)
+ _HA_ATOMIC_INC(&s->be->be_counters.failed_rewrites);
+ if (sess->listener && sess->listener->counters)
+ _HA_ATOMIC_INC(&sess->listener->counters->failed_rewrites);
+ if (objt_server(s->target))
+ _HA_ATOMIC_INC(&__objt_server(s->target)->counters.failed_rewrites);
+
+ if (!(s->txn->req.flags & HTTP_MSGF_SOFT_RW)) {
+ ret = ACT_RET_ERR;
+ if (!(s->flags & SF_ERR_MASK))
+ s->flags |= SF_ERR_PRXCOND;
+ }
+ goto leave;
+}
+
+/* parse an http-request action among :
+ * set-method
+ * set-path
+ * set-pathq
+ * set-query
+ * set-uri
+ *
+ * All of them accept a single argument of type string representing a log-format.
+ * The resulting rule makes use of <http.fmt> to store the log-format list head,
+ * and <.action> to store the action type as an int (0=method, 1=path, 2=query,
+ * 3=uri). It returns ACT_RET_PRS_OK on success, ACT_RET_PRS_ERR on error.
+ */
+static enum act_parse_ret parse_set_req_line(const char **args, int *orig_arg, struct proxy *px,
+ struct act_rule *rule, char **err)
+{
+ int cur_arg = *orig_arg;
+ int cap = 0;
+
+ switch (args[0][4]) {
+ case 'm' :
+ rule->action = 0; // set-method
+ break;
+ case 'p' :
+ if (args[0][8] == 'q')
+ rule->action = 4; // set-pathq
+ else
+ rule->action = 1; // set-path
+ break;
+ case 'q' :
+ rule->action = 2; // set-query
+ break;
+ case 'u' :
+ rule->action = 3; // set-uri
+ break;
+ default:
+ memprintf(err, "internal error: unhandled action '%s'", args[0]);
+ return ACT_RET_PRS_ERR;
+ }
+ rule->action_ptr = http_action_set_req_line;
+ rule->release_ptr = release_http_action;
+ LIST_INIT(&rule->arg.http.fmt);
+
+ if (!*args[cur_arg] ||
+ (*args[cur_arg + 1] && strcmp(args[cur_arg + 1], "if") != 0 && strcmp(args[cur_arg + 1], "unless") != 0)) {
+ memprintf(err, "expects exactly 1 argument <format>");
+ return ACT_RET_PRS_ERR;
+ }
+
+ px->conf.args.ctx = ARGC_HRQ;
+ if (px->cap & PR_CAP_FE)
+ cap |= SMP_VAL_FE_HRQ_HDR;
+ if (px->cap & PR_CAP_BE)
+ cap |= SMP_VAL_BE_HRQ_HDR;
+ if (!parse_logformat_string(args[cur_arg], px, &rule->arg.http.fmt, LOG_OPT_HTTP, cap, err)) {
+ return ACT_RET_PRS_ERR;
+ }
+
+ (*orig_arg)++;
+ return ACT_RET_PRS_OK;
+}
+
+/* This function executes the http-request normalize-uri action.
+ * `rule->action` is expected to be a value from `enum act_normalize_uri`.
+ *
+ * On success, it returns ACT_RET_CONT. If an error
+ * occurs while soft rewrites are enabled, the action is canceled, but the rule
+ * processing continue. Otherwsize ACT_RET_ERR is returned.
+ */
+static enum act_return http_action_normalize_uri(struct act_rule *rule, struct proxy *px,
+ struct session *sess, struct stream *s, int flags)
+{
+ enum act_return ret = ACT_RET_CONT;
+ struct htx *htx = htxbuf(&s->req.buf);
+ const struct ist uri = htx_sl_req_uri(http_get_stline(htx));
+ struct buffer *replace = alloc_trash_chunk();
+ enum uri_normalizer_err err = URI_NORMALIZER_ERR_INTERNAL_ERROR;
+
+ if (!replace)
+ goto fail_alloc;
+
+ switch ((enum act_normalize_uri) rule->action) {
+ case ACT_NORMALIZE_URI_PATH_MERGE_SLASHES: {
+ struct http_uri_parser parser = http_uri_parser_init(uri);
+ const struct ist path = http_parse_path(&parser);
+ struct ist newpath = ist2(replace->area, replace->size);
+
+ if (!isttest(path))
+ goto leave;
+
+ err = uri_normalizer_path_merge_slashes(iststop(path, '?'), &newpath);
+
+ if (err != URI_NORMALIZER_ERR_NONE)
+ break;
+
+ if (!http_replace_req_path(htx, newpath, 0))
+ goto fail_rewrite;
+
+ break;
+ }
+ case ACT_NORMALIZE_URI_PATH_STRIP_DOT: {
+ struct http_uri_parser parser = http_uri_parser_init(uri);
+ const struct ist path = http_parse_path(&parser);
+ struct ist newpath = ist2(replace->area, replace->size);
+
+ if (!isttest(path))
+ goto leave;
+
+ err = uri_normalizer_path_dot(iststop(path, '?'), &newpath);
+
+ if (err != URI_NORMALIZER_ERR_NONE)
+ break;
+
+ if (!http_replace_req_path(htx, newpath, 0))
+ goto fail_rewrite;
+
+ break;
+ }
+ case ACT_NORMALIZE_URI_PATH_STRIP_DOTDOT:
+ case ACT_NORMALIZE_URI_PATH_STRIP_DOTDOT_FULL: {
+ struct http_uri_parser parser = http_uri_parser_init(uri);
+ const struct ist path = http_parse_path(&parser);
+ struct ist newpath = ist2(replace->area, replace->size);
+
+ if (!isttest(path))
+ goto leave;
+
+ err = uri_normalizer_path_dotdot(iststop(path, '?'), rule->action == ACT_NORMALIZE_URI_PATH_STRIP_DOTDOT_FULL, &newpath);
+
+ if (err != URI_NORMALIZER_ERR_NONE)
+ break;
+
+ if (!http_replace_req_path(htx, newpath, 0))
+ goto fail_rewrite;
+
+ break;
+ }
+ case ACT_NORMALIZE_URI_QUERY_SORT_BY_NAME: {
+ struct http_uri_parser parser = http_uri_parser_init(uri);
+ const struct ist path = http_parse_path(&parser);
+ struct ist newquery = ist2(replace->area, replace->size);
+
+ if (!isttest(path))
+ goto leave;
+
+ err = uri_normalizer_query_sort(istfind(path, '?'), '&', &newquery);
+
+ if (err != URI_NORMALIZER_ERR_NONE)
+ break;
+
+ if (!http_replace_req_query(htx, newquery))
+ goto fail_rewrite;
+
+ break;
+ }
+ case ACT_NORMALIZE_URI_PERCENT_TO_UPPERCASE:
+ case ACT_NORMALIZE_URI_PERCENT_TO_UPPERCASE_STRICT: {
+ struct http_uri_parser parser = http_uri_parser_init(uri);
+ const struct ist path = http_parse_path(&parser);
+ struct ist newpath = ist2(replace->area, replace->size);
+
+ if (!isttest(path))
+ goto leave;
+
+ err = uri_normalizer_percent_upper(path, rule->action == ACT_NORMALIZE_URI_PERCENT_TO_UPPERCASE_STRICT, &newpath);
+
+ if (err != URI_NORMALIZER_ERR_NONE)
+ break;
+
+ if (!http_replace_req_path(htx, newpath, 1))
+ goto fail_rewrite;
+
+ break;
+ }
+ case ACT_NORMALIZE_URI_PERCENT_DECODE_UNRESERVED:
+ case ACT_NORMALIZE_URI_PERCENT_DECODE_UNRESERVED_STRICT: {
+ struct http_uri_parser parser = http_uri_parser_init(uri);
+ const struct ist path = http_parse_path(&parser);
+ struct ist newpath = ist2(replace->area, replace->size);
+
+ if (!isttest(path))
+ goto leave;
+
+ err = uri_normalizer_percent_decode_unreserved(path, rule->action == ACT_NORMALIZE_URI_PERCENT_DECODE_UNRESERVED_STRICT, &newpath);
+
+ if (err != URI_NORMALIZER_ERR_NONE)
+ break;
+
+ if (!http_replace_req_path(htx, newpath, 1))
+ goto fail_rewrite;
+
+ break;
+ }
+ case ACT_NORMALIZE_URI_FRAGMENT_STRIP: {
+ struct http_uri_parser parser = http_uri_parser_init(uri);
+ const struct ist path = http_parse_path(&parser);
+ struct ist newpath = ist2(replace->area, replace->size);
+
+ if (!isttest(path))
+ goto leave;
+
+ err = uri_normalizer_fragment_strip(path, &newpath);
+
+ if (err != URI_NORMALIZER_ERR_NONE)
+ break;
+
+ if (!http_replace_req_path(htx, newpath, 1))
+ goto fail_rewrite;
+
+ break;
+ }
+ case ACT_NORMALIZE_URI_FRAGMENT_ENCODE: {
+ struct http_uri_parser parser = http_uri_parser_init(uri);
+ const struct ist path = http_parse_path(&parser);
+ struct ist newpath = ist2(replace->area, replace->size);
+
+ if (!isttest(path))
+ goto leave;
+
+ err = uri_normalizer_fragment_encode(path, &newpath);
+
+ if (err != URI_NORMALIZER_ERR_NONE)
+ break;
+
+ if (!http_replace_req_path(htx, newpath, 1))
+ goto fail_rewrite;
+
+ break;
+ }
+ }
+
+ switch (err) {
+ case URI_NORMALIZER_ERR_NONE:
+ break;
+ case URI_NORMALIZER_ERR_INTERNAL_ERROR:
+ ret = ACT_RET_ERR;
+ break;
+ case URI_NORMALIZER_ERR_INVALID_INPUT:
+ ret = ACT_RET_INV;
+ break;
+ case URI_NORMALIZER_ERR_ALLOC:
+ goto fail_alloc;
+ }
+
+ leave:
+ free_trash_chunk(replace);
+ return ret;
+
+ fail_alloc:
+ if (!(s->flags & SF_ERR_MASK))
+ s->flags |= SF_ERR_RESOURCE;
+ ret = ACT_RET_ERR;
+ goto leave;
+
+ fail_rewrite:
+ _HA_ATOMIC_ADD(&sess->fe->fe_counters.failed_rewrites, 1);
+ if (s->flags & SF_BE_ASSIGNED)
+ _HA_ATOMIC_ADD(&s->be->be_counters.failed_rewrites, 1);
+ if (sess->listener && sess->listener->counters)
+ _HA_ATOMIC_ADD(&sess->listener->counters->failed_rewrites, 1);
+ if (objt_server(s->target))
+ _HA_ATOMIC_ADD(&__objt_server(s->target)->counters.failed_rewrites, 1);
+
+ if (!(s->txn->req.flags & HTTP_MSGF_SOFT_RW)) {
+ ret = ACT_RET_ERR;
+ if (!(s->flags & SF_ERR_MASK))
+ s->flags |= SF_ERR_PRXCOND;
+ }
+ goto leave;
+}
+
+/* Parses the http-request normalize-uri action. It expects a single <normalizer>
+ * argument, corresponding too a value in `enum act_normalize_uri`.
+ *
+ * It returns ACT_RET_PRS_OK on success, ACT_RET_PRS_ERR on error.
+ */
+static enum act_parse_ret parse_http_normalize_uri(const char **args, int *orig_arg, struct proxy *px,
+ struct act_rule *rule, char **err)
+{
+ int cur_arg = *orig_arg;
+
+ rule->action_ptr = http_action_normalize_uri;
+ rule->release_ptr = NULL;
+
+ if (!*args[cur_arg]) {
+ memprintf(err, "missing argument <normalizer>");
+ return ACT_RET_PRS_ERR;
+ }
+
+ if (strcmp(args[cur_arg], "path-merge-slashes") == 0) {
+ cur_arg++;
+
+ rule->action = ACT_NORMALIZE_URI_PATH_MERGE_SLASHES;
+ }
+ else if (strcmp(args[cur_arg], "path-strip-dot") == 0) {
+ cur_arg++;
+
+ rule->action = ACT_NORMALIZE_URI_PATH_STRIP_DOT;
+ }
+ else if (strcmp(args[cur_arg], "path-strip-dotdot") == 0) {
+ cur_arg++;
+
+ if (strcmp(args[cur_arg], "full") == 0) {
+ cur_arg++;
+ rule->action = ACT_NORMALIZE_URI_PATH_STRIP_DOTDOT_FULL;
+ }
+ else if (!*args[cur_arg]) {
+ rule->action = ACT_NORMALIZE_URI_PATH_STRIP_DOTDOT;
+ }
+ else if (strcmp(args[cur_arg], "if") != 0 && strcmp(args[cur_arg], "unless") != 0) {
+ memprintf(err, "unknown argument '%s' for 'path-strip-dotdot' normalizer", args[cur_arg]);
+ return ACT_RET_PRS_ERR;
+ }
+ }
+ else if (strcmp(args[cur_arg], "query-sort-by-name") == 0) {
+ cur_arg++;
+
+ rule->action = ACT_NORMALIZE_URI_QUERY_SORT_BY_NAME;
+ }
+ else if (strcmp(args[cur_arg], "percent-to-uppercase") == 0) {
+ cur_arg++;
+
+ if (strcmp(args[cur_arg], "strict") == 0) {
+ cur_arg++;
+ rule->action = ACT_NORMALIZE_URI_PERCENT_TO_UPPERCASE_STRICT;
+ }
+ else if (!*args[cur_arg]) {
+ rule->action = ACT_NORMALIZE_URI_PERCENT_TO_UPPERCASE;
+ }
+ else if (strcmp(args[cur_arg], "if") != 0 && strcmp(args[cur_arg], "unless") != 0) {
+ memprintf(err, "unknown argument '%s' for 'percent-to-uppercase' normalizer", args[cur_arg]);
+ return ACT_RET_PRS_ERR;
+ }
+ }
+ else if (strcmp(args[cur_arg], "percent-decode-unreserved") == 0) {
+ cur_arg++;
+
+ if (strcmp(args[cur_arg], "strict") == 0) {
+ cur_arg++;
+ rule->action = ACT_NORMALIZE_URI_PERCENT_DECODE_UNRESERVED_STRICT;
+ }
+ else if (!*args[cur_arg]) {
+ rule->action = ACT_NORMALIZE_URI_PERCENT_DECODE_UNRESERVED;
+ }
+ else if (strcmp(args[cur_arg], "if") != 0 && strcmp(args[cur_arg], "unless") != 0) {
+ memprintf(err, "unknown argument '%s' for 'percent-decode-unreserved' normalizer", args[cur_arg]);
+ return ACT_RET_PRS_ERR;
+ }
+ }
+ else if (strcmp(args[cur_arg], "fragment-strip") == 0) {
+ cur_arg++;
+
+ rule->action = ACT_NORMALIZE_URI_FRAGMENT_STRIP;
+ }
+ else if (strcmp(args[cur_arg], "fragment-encode") == 0) {
+ cur_arg++;
+
+ rule->action = ACT_NORMALIZE_URI_FRAGMENT_ENCODE;
+ }
+ else {
+ memprintf(err, "unknown normalizer '%s'", args[cur_arg]);
+ return ACT_RET_PRS_ERR;
+ }
+
+ *orig_arg = cur_arg;
+ return ACT_RET_PRS_OK;
+}
+
+/* This function executes a replace-uri action. It finds its arguments in
+ * <rule>.arg.http. It builds a string in the trash from the format string
+ * previously filled by function parse_replace_uri() and will execute the regex
+ * in <http.re> to replace the URI. It uses the format string present in
+ * <http.fmt>. The component to act on (path/uri) is taken from <.action> which
+ * contains 1 for the path or 3 for the URI (values used by
+ * http_req_replace_stline()). On success, it returns ACT_RET_CONT. If an error
+ * occurs while soft rewrites are enabled, the action is canceled, but the rule
+ * processing continue. Otherwsize ACT_RET_ERR is returned.
+ */
+static enum act_return http_action_replace_uri(struct act_rule *rule, struct proxy *px,
+ struct session *sess, struct stream *s, int flags)
+{
+ enum act_return ret = ACT_RET_CONT;
+ struct buffer *replace, *output;
+ struct ist uri;
+ int len;
+
+ replace = alloc_trash_chunk();
+ output = alloc_trash_chunk();
+ if (!replace || !output)
+ goto fail_alloc;
+ uri = htx_sl_req_uri(http_get_stline(htxbuf(&s->req.buf)));
+
+ if (rule->action == 1) { // replace-path
+ struct http_uri_parser parser = http_uri_parser_init(uri);
+ uri = iststop(http_parse_path(&parser), '?');
+ }
+ else if (rule->action == 4) { // replace-pathq
+ struct http_uri_parser parser = http_uri_parser_init(uri);
+ uri = http_parse_path(&parser);
+ }
+
+ if (!istlen(uri))
+ goto leave;
+
+ if (!regex_exec_match2(rule->arg.http.re, uri.ptr, uri.len, MAX_MATCH, pmatch, 0))
+ goto leave;
+
+ replace->data = build_logline(s, replace->area, replace->size, &rule->arg.http.fmt);
+
+ /* note: uri.ptr doesn't need to be zero-terminated because it will
+ * only be used to pick pmatch references.
+ */
+ len = exp_replace(output->area, output->size, uri.ptr, replace->area, pmatch);
+ if (len == -1)
+ goto fail_rewrite;
+
+ if (http_req_replace_stline(rule->action, output->area, len, px, s) == -1)
+ goto fail_rewrite;
+
+ leave:
+ free_trash_chunk(output);
+ free_trash_chunk(replace);
+ return ret;
+
+ fail_alloc:
+ if (!(s->flags & SF_ERR_MASK))
+ s->flags |= SF_ERR_RESOURCE;
+ ret = ACT_RET_ERR;
+ goto leave;
+
+ fail_rewrite:
+ _HA_ATOMIC_INC(&sess->fe->fe_counters.failed_rewrites);
+ if (s->flags & SF_BE_ASSIGNED)
+ _HA_ATOMIC_INC(&s->be->be_counters.failed_rewrites);
+ if (sess->listener && sess->listener->counters)
+ _HA_ATOMIC_INC(&sess->listener->counters->failed_rewrites);
+ if (objt_server(s->target))
+ _HA_ATOMIC_INC(&__objt_server(s->target)->counters.failed_rewrites);
+
+ if (!(s->txn->req.flags & HTTP_MSGF_SOFT_RW)) {
+ ret = ACT_RET_ERR;
+ if (!(s->flags & SF_ERR_MASK))
+ s->flags |= SF_ERR_PRXCOND;
+ }
+ goto leave;
+}
+
+/* parse a "replace-uri", "replace-path" or "replace-pathq"
+ * http-request action.
+ * This action takes 2 arguments (a regex and a replacement format string).
+ * The resulting rule makes use of <.action> to store the action (1/3 for now),
+ * <http.re> to store the compiled regex, and <http.fmt> to store the log-format
+ * list head. It returns ACT_RET_PRS_OK on success, ACT_RET_PRS_ERR on error.
+ */
+static enum act_parse_ret parse_replace_uri(const char **args, int *orig_arg, struct proxy *px,
+ struct act_rule *rule, char **err)
+{
+ int cur_arg = *orig_arg;
+ int cap = 0;
+ char *error = NULL;
+
+ switch (args[0][8]) {
+ case 'p':
+ if (args[0][12] == 'q')
+ rule->action = 4; // replace-pathq, same as set-pathq
+ else
+ rule->action = 1; // replace-path, same as set-path
+ break;
+ case 'u':
+ rule->action = 3; // replace-uri, same as set-uri
+ break;
+ default:
+ memprintf(err, "internal error: unhandled action '%s'", args[0]);
+ return ACT_RET_PRS_ERR;
+ }
+
+ rule->action_ptr = http_action_replace_uri;
+ rule->release_ptr = release_http_action;
+ LIST_INIT(&rule->arg.http.fmt);
+
+ if (!*args[cur_arg] || !*args[cur_arg+1] ||
+ (*args[cur_arg+2] && strcmp(args[cur_arg+2], "if") != 0 && strcmp(args[cur_arg+2], "unless") != 0)) {
+ memprintf(err, "expects exactly 2 arguments <match-regex> and <replace-format>");
+ return ACT_RET_PRS_ERR;
+ }
+
+ if (!(rule->arg.http.re = regex_comp(args[cur_arg], 1, 1, &error))) {
+ memprintf(err, "failed to parse the regex : %s", error);
+ free(error);
+ return ACT_RET_PRS_ERR;
+ }
+
+ px->conf.args.ctx = ARGC_HRQ;
+ if (px->cap & PR_CAP_FE)
+ cap |= SMP_VAL_FE_HRQ_HDR;
+ if (px->cap & PR_CAP_BE)
+ cap |= SMP_VAL_BE_HRQ_HDR;
+ if (!parse_logformat_string(args[cur_arg + 1], px, &rule->arg.http.fmt, LOG_OPT_HTTP, cap, err)) {
+ regex_free(rule->arg.http.re);
+ return ACT_RET_PRS_ERR;
+ }
+
+ (*orig_arg) += 2;
+ return ACT_RET_PRS_OK;
+}
+
+/* This function is just a compliant action wrapper for "set-status". */
+static enum act_return action_http_set_status(struct act_rule *rule, struct proxy *px,
+ struct session *sess, struct stream *s, int flags)
+{
+ if (http_res_set_status(rule->arg.http.i, rule->arg.http.str, s) == -1) {
+ _HA_ATOMIC_INC(&sess->fe->fe_counters.failed_rewrites);
+ if (s->flags & SF_BE_ASSIGNED)
+ _HA_ATOMIC_INC(&s->be->be_counters.failed_rewrites);
+ if (sess->listener && sess->listener->counters)
+ _HA_ATOMIC_INC(&sess->listener->counters->failed_rewrites);
+ if (objt_server(s->target))
+ _HA_ATOMIC_INC(&__objt_server(s->target)->counters.failed_rewrites);
+
+ if (!(s->txn->req.flags & HTTP_MSGF_SOFT_RW)) {
+ if (!(s->flags & SF_ERR_MASK))
+ s->flags |= SF_ERR_PRXCOND;
+ return ACT_RET_ERR;
+ }
+ }
+
+ return ACT_RET_CONT;
+}
+
+/* parse set-status action:
+ * This action accepts a single argument of type int representing
+ * an http status code. It returns ACT_RET_PRS_OK on success,
+ * ACT_RET_PRS_ERR on error.
+ */
+static enum act_parse_ret parse_http_set_status(const char **args, int *orig_arg, struct proxy *px,
+ struct act_rule *rule, char **err)
+{
+ char *error;
+
+ rule->action = ACT_CUSTOM;
+ rule->action_ptr = action_http_set_status;
+ rule->release_ptr = release_http_action;
+ LIST_INIT(&rule->arg.http.fmt);
+
+ /* Check if an argument is available */
+ if (!*args[*orig_arg]) {
+ memprintf(err, "expects 1 argument: <status>; or 3 arguments: <status> reason <fmt>");
+ return ACT_RET_PRS_ERR;
+ }
+
+ /* convert status code as integer */
+ rule->arg.http.i = strtol(args[*orig_arg], &error, 10);
+ if (*error != '\0' || rule->arg.http.i < 100 || rule->arg.http.i > 999) {
+ memprintf(err, "expects an integer status code between 100 and 999");
+ return ACT_RET_PRS_ERR;
+ }
+
+ (*orig_arg)++;
+
+ /* set custom reason string */
+ rule->arg.http.str = ist(NULL); // If null, we use the default reason for the status code.
+ if (*args[*orig_arg] && strcmp(args[*orig_arg], "reason") == 0 &&
+ (*args[*orig_arg + 1] && strcmp(args[*orig_arg + 1], "if") != 0 && strcmp(args[*orig_arg + 1], "unless") != 0)) {
+ (*orig_arg)++;
+ rule->arg.http.str = ist(strdup(args[*orig_arg]));
+ (*orig_arg)++;
+ }
+
+ return ACT_RET_PRS_OK;
+}
+
+/* This function executes the "reject" HTTP action. It clears the request and
+ * response buffer without sending any response. It can be useful as an HTTP
+ * alternative to the silent-drop action to defend against DoS attacks, and may
+ * also be used with HTTP/2 to close a connection instead of just a stream.
+ * The txn status is unchanged, indicating no response was sent. The termination
+ * flags will indicate "PR". It always returns ACT_RET_ABRT.
+ */
+static enum act_return http_action_reject(struct act_rule *rule, struct proxy *px,
+ struct session *sess, struct stream *s, int flags)
+{
+ sc_must_kill_conn(s->scf);
+ stream_abort(s);
+ s->req.analysers &= AN_REQ_FLT_END;
+ s->res.analysers &= AN_RES_FLT_END;
+
+ _HA_ATOMIC_INC(&s->be->be_counters.denied_req);
+ _HA_ATOMIC_INC(&sess->fe->fe_counters.denied_req);
+ if (sess->listener && sess->listener->counters)
+ _HA_ATOMIC_INC(&sess->listener->counters->denied_req);
+
+ if (!(s->flags & SF_ERR_MASK))
+ s->flags |= SF_ERR_PRXCOND;
+ if (!(s->flags & SF_FINST_MASK))
+ s->flags |= SF_FINST_R;
+
+ return ACT_RET_ABRT;
+}
+
+/* parse the "reject" action:
+ * This action takes no argument and returns ACT_RET_PRS_OK on success,
+ * ACT_RET_PRS_ERR on error.
+ */
+static enum act_parse_ret parse_http_action_reject(const char **args, int *orig_arg, struct proxy *px,
+ struct act_rule *rule, char **err)
+{
+ rule->action = ACT_CUSTOM;
+ rule->action_ptr = http_action_reject;
+ return ACT_RET_PRS_OK;
+}
+
+/* This function executes the "disable-l7-retry" HTTP action.
+ * It disables L7 retries (all retry except for a connection failure). This
+ * can be useful for example to avoid retrying on POST requests.
+ * It just removes the L7 retry flag on the HTTP transaction, and always
+ * return ACT_RET_CONT;
+ */
+static enum act_return http_req_disable_l7_retry(struct act_rule *rule, struct proxy *px,
+ struct session *sess, struct stream *s, int flags)
+{
+ /* In theory, the TX_L7_RETRY flags isn't set at this point, but
+ * let's be future-proof and remove it anyway.
+ */
+ s->txn->flags &= ~TX_L7_RETRY;
+ s->txn->flags |= TX_D_L7_RETRY;
+ return ACT_RET_CONT;
+}
+
+/* parse the "disable-l7-retry" action:
+ * This action takes no argument and returns ACT_RET_PRS_OK on success,
+ * ACT_RET_PRS_ERR on error.
+ */
+static enum act_parse_ret parse_http_req_disable_l7_retry(const char **args,
+ int *orig_args, struct proxy *px,
+ struct act_rule *rule, char **err)
+{
+ rule->action = ACT_CUSTOM;
+ rule->action_ptr = http_req_disable_l7_retry;
+ return ACT_RET_PRS_OK;
+}
+
+/* This function executes the "capture" action. It executes a fetch expression,
+ * turns the result into a string and puts it in a capture slot. It always
+ * returns 1. If an error occurs the action is cancelled, but the rule
+ * processing continues.
+ */
+static enum act_return http_action_req_capture(struct act_rule *rule, struct proxy *px,
+ struct session *sess, struct stream *s, int flags)
+{
+ struct sample *key;
+ struct cap_hdr *h = rule->arg.cap.hdr;
+ char **cap = s->req_cap;
+ int len;
+
+ key = sample_fetch_as_type(s->be, sess, s, SMP_OPT_DIR_REQ|SMP_OPT_FINAL, rule->arg.cap.expr, SMP_T_STR);
+ if (!key)
+ return ACT_RET_CONT;
+
+ if (cap[h->index] == NULL)
+ cap[h->index] = pool_alloc(h->pool);
+
+ if (cap[h->index] == NULL) /* no more capture memory */
+ return ACT_RET_CONT;
+
+ len = key->data.u.str.data;
+ if (len > h->len)
+ len = h->len;
+
+ memcpy(cap[h->index], key->data.u.str.area, len);
+ cap[h->index][len] = 0;
+ return ACT_RET_CONT;
+}
+
+/* This function executes the "capture" action and store the result in a
+ * capture slot if exists. It executes a fetch expression, turns the result
+ * into a string and puts it in a capture slot. It always returns 1. If an
+ * error occurs the action is cancelled, but the rule processing continues.
+ */
+static enum act_return http_action_req_capture_by_id(struct act_rule *rule, struct proxy *px,
+ struct session *sess, struct stream *s, int flags)
+{
+ struct sample *key;
+ struct cap_hdr *h;
+ char **cap = s->req_cap;
+ struct proxy *fe = strm_fe(s);
+ int len;
+ int i;
+
+ /* Look for the original configuration. */
+ for (h = fe->req_cap, i = fe->nb_req_cap - 1;
+ h != NULL && i != rule->arg.capid.idx ;
+ i--, h = h->next);
+ if (!h)
+ return ACT_RET_CONT;
+
+ key = sample_fetch_as_type(s->be, sess, s, SMP_OPT_DIR_REQ|SMP_OPT_FINAL, rule->arg.capid.expr, SMP_T_STR);
+ if (!key)
+ return ACT_RET_CONT;
+
+ if (cap[h->index] == NULL)
+ cap[h->index] = pool_alloc(h->pool);
+
+ if (cap[h->index] == NULL) /* no more capture memory */
+ return ACT_RET_CONT;
+
+ len = key->data.u.str.data;
+ if (len > h->len)
+ len = h->len;
+
+ memcpy(cap[h->index], key->data.u.str.area, len);
+ cap[h->index][len] = 0;
+ return ACT_RET_CONT;
+}
+
+/* Check an "http-request capture" action.
+ *
+ * The function returns 1 in success case, otherwise, it returns 0 and err is
+ * filled.
+ */
+static int check_http_req_capture(struct act_rule *rule, struct proxy *px, char **err)
+{
+ if (rule->action_ptr != http_action_req_capture_by_id)
+ return 1;
+
+ /* capture slots can only be declared in frontends, so we can't check their
+ * existence in backends at configuration parsing step
+ */
+ if (px->cap & PR_CAP_FE && rule->arg.capid.idx >= px->nb_req_cap) {
+ memprintf(err, "unable to find capture id '%d' referenced by http-request capture rule",
+ rule->arg.capid.idx);
+ return 0;
+ }
+
+ return 1;
+}
+
+/* Release memory allocate by an http capture action */
+static void release_http_capture(struct act_rule *rule)
+{
+ if (rule->action_ptr == http_action_req_capture)
+ release_sample_expr(rule->arg.cap.expr);
+ else
+ release_sample_expr(rule->arg.capid.expr);
+}
+
+/* parse an "http-request capture" action. It takes a single argument which is
+ * a sample fetch expression. It stores the expression into arg->act.p[0] and
+ * the allocated hdr_cap struct or the preallocated "id" into arg->act.p[1].
+ * It returns ACT_RET_PRS_OK on success, ACT_RET_PRS_ERR on error.
+ */
+static enum act_parse_ret parse_http_req_capture(const char **args, int *orig_arg, struct proxy *px,
+ struct act_rule *rule, char **err)
+{
+ struct sample_expr *expr;
+ struct cap_hdr *hdr;
+ int cur_arg;
+ int len = 0;
+
+ for (cur_arg = *orig_arg; cur_arg < *orig_arg + 3 && *args[cur_arg]; cur_arg++)
+ if (strcmp(args[cur_arg], "if") == 0 ||
+ strcmp(args[cur_arg], "unless") == 0)
+ break;
+
+ if (cur_arg < *orig_arg + 3) {
+ memprintf(err, "expects <expression> [ 'len' <length> | id <idx> ]");
+ return ACT_RET_PRS_ERR;
+ }
+
+ cur_arg = *orig_arg;
+ expr = sample_parse_expr((char **)args, &cur_arg, px->conf.args.file, px->conf.args.line, err, &px->conf.args, NULL);
+ if (!expr)
+ return ACT_RET_PRS_ERR;
+
+ if (!(expr->fetch->val & SMP_VAL_FE_HRQ_HDR)) {
+ memprintf(err,
+ "fetch method '%s' extracts information from '%s', none of which is available here",
+ args[cur_arg-1], sample_src_names(expr->fetch->use));
+ release_sample_expr(expr);
+ return ACT_RET_PRS_ERR;
+ }
+
+ if (!args[cur_arg] || !*args[cur_arg]) {
+ memprintf(err, "expects 'len or 'id'");
+ release_sample_expr(expr);
+ return ACT_RET_PRS_ERR;
+ }
+
+ if (strcmp(args[cur_arg], "len") == 0) {
+ cur_arg++;
+
+ if (!(px->cap & PR_CAP_FE)) {
+ memprintf(err, "proxy '%s' has no frontend capability", px->id);
+ release_sample_expr(expr);
+ return ACT_RET_PRS_ERR;
+ }
+
+ px->conf.args.ctx = ARGC_CAP;
+
+ if (!args[cur_arg]) {
+ memprintf(err, "missing length value");
+ release_sample_expr(expr);
+ return ACT_RET_PRS_ERR;
+ }
+ /* we copy the table name for now, it will be resolved later */
+ len = atoi(args[cur_arg]);
+ if (len <= 0) {
+ memprintf(err, "length must be > 0");
+ release_sample_expr(expr);
+ return ACT_RET_PRS_ERR;
+ }
+ cur_arg++;
+
+ hdr = calloc(1, sizeof(*hdr));
+ if (!hdr) {
+ memprintf(err, "out of memory");
+ release_sample_expr(expr);
+ return ACT_RET_PRS_ERR;
+ }
+ hdr->next = px->req_cap;
+ hdr->name = NULL; /* not a header capture */
+ hdr->namelen = 0;
+ hdr->len = len;
+ hdr->pool = create_pool("caphdr", hdr->len + 1, MEM_F_SHARED);
+ hdr->index = px->nb_req_cap++;
+
+ px->req_cap = hdr;
+ px->to_log |= LW_REQHDR;
+
+ rule->action = ACT_CUSTOM;
+ rule->action_ptr = http_action_req_capture;
+ rule->release_ptr = release_http_capture;
+ rule->arg.cap.expr = expr;
+ rule->arg.cap.hdr = hdr;
+ }
+
+ else if (strcmp(args[cur_arg], "id") == 0) {
+ int id;
+ char *error;
+
+ cur_arg++;
+
+ if (!args[cur_arg]) {
+ memprintf(err, "missing id value");
+ release_sample_expr(expr);
+ return ACT_RET_PRS_ERR;
+ }
+
+ id = strtol(args[cur_arg], &error, 10);
+ if (*error != '\0') {
+ memprintf(err, "cannot parse id '%s'", args[cur_arg]);
+ release_sample_expr(expr);
+ return ACT_RET_PRS_ERR;
+ }
+ cur_arg++;
+
+ px->conf.args.ctx = ARGC_CAP;
+
+ rule->action = ACT_CUSTOM;
+ rule->action_ptr = http_action_req_capture_by_id;
+ rule->check_ptr = check_http_req_capture;
+ rule->release_ptr = release_http_capture;
+ rule->arg.capid.expr = expr;
+ rule->arg.capid.idx = id;
+ }
+
+ else {
+ memprintf(err, "expects 'len' or 'id', found '%s'", args[cur_arg]);
+ release_sample_expr(expr);
+ return ACT_RET_PRS_ERR;
+ }
+
+ *orig_arg = cur_arg;
+ return ACT_RET_PRS_OK;
+}
+
+/* This function executes the "capture" action and store the result in a
+ * capture slot if exists. It executes a fetch expression, turns the result
+ * into a string and puts it in a capture slot. It always returns 1. If an
+ * error occurs the action is cancelled, but the rule processing continues.
+ */
+static enum act_return http_action_res_capture_by_id(struct act_rule *rule, struct proxy *px,
+ struct session *sess, struct stream *s, int flags)
+{
+ struct sample *key;
+ struct cap_hdr *h;
+ char **cap = s->res_cap;
+ struct proxy *fe = strm_fe(s);
+ int len;
+ int i;
+
+ /* Look for the original configuration. */
+ for (h = fe->rsp_cap, i = fe->nb_rsp_cap - 1;
+ h != NULL && i != rule->arg.capid.idx ;
+ i--, h = h->next);
+ if (!h)
+ return ACT_RET_CONT;
+
+ key = sample_fetch_as_type(s->be, sess, s, SMP_OPT_DIR_RES|SMP_OPT_FINAL, rule->arg.capid.expr, SMP_T_STR);
+ if (!key)
+ return ACT_RET_CONT;
+
+ if (cap[h->index] == NULL)
+ cap[h->index] = pool_alloc(h->pool);
+
+ if (cap[h->index] == NULL) /* no more capture memory */
+ return ACT_RET_CONT;
+
+ len = key->data.u.str.data;
+ if (len > h->len)
+ len = h->len;
+
+ memcpy(cap[h->index], key->data.u.str.area, len);
+ cap[h->index][len] = 0;
+ return ACT_RET_CONT;
+}
+
+/* Check an "http-response capture" action.
+ *
+ * The function returns 1 in success case, otherwise, it returns 0 and err is
+ * filled.
+ */
+static int check_http_res_capture(struct act_rule *rule, struct proxy *px, char **err)
+{
+ if (rule->action_ptr != http_action_res_capture_by_id)
+ return 1;
+
+ /* capture slots can only be declared in frontends, so we can't check their
+ * existence in backends at configuration parsing step
+ */
+ if (px->cap & PR_CAP_FE && rule->arg.capid.idx >= px->nb_rsp_cap) {
+ memprintf(err, "unable to find capture id '%d' referenced by http-response capture rule",
+ rule->arg.capid.idx);
+ return 0;
+ }
+
+ return 1;
+}
+
+/* parse an "http-response capture" action. It takes a single argument which is
+ * a sample fetch expression. It stores the expression into arg->act.p[0] and
+ * the allocated hdr_cap struct of the preallocated id into arg->act.p[1].
+ * It returns ACT_RET_PRS_OK on success, ACT_RET_PRS_ERR on error.
+ */
+static enum act_parse_ret parse_http_res_capture(const char **args, int *orig_arg, struct proxy *px,
+ struct act_rule *rule, char **err)
+{
+ struct sample_expr *expr;
+ int cur_arg;
+ int id;
+ char *error;
+
+ for (cur_arg = *orig_arg; cur_arg < *orig_arg + 3 && *args[cur_arg]; cur_arg++)
+ if (strcmp(args[cur_arg], "if") == 0 ||
+ strcmp(args[cur_arg], "unless") == 0)
+ break;
+
+ if (cur_arg < *orig_arg + 3) {
+ memprintf(err, "expects <expression> id <idx>");
+ return ACT_RET_PRS_ERR;
+ }
+
+ cur_arg = *orig_arg;
+ expr = sample_parse_expr((char **)args, &cur_arg, px->conf.args.file, px->conf.args.line, err, &px->conf.args, NULL);
+ if (!expr)
+ return ACT_RET_PRS_ERR;
+
+ if (!(expr->fetch->val & SMP_VAL_FE_HRS_HDR)) {
+ memprintf(err,
+ "fetch method '%s' extracts information from '%s', none of which is available here",
+ args[cur_arg-1], sample_src_names(expr->fetch->use));
+ release_sample_expr(expr);
+ return ACT_RET_PRS_ERR;
+ }
+
+ if (!args[cur_arg] || !*args[cur_arg]) {
+ memprintf(err, "expects 'id'");
+ release_sample_expr(expr);
+ return ACT_RET_PRS_ERR;
+ }
+
+ if (strcmp(args[cur_arg], "id") != 0) {
+ memprintf(err, "expects 'id', found '%s'", args[cur_arg]);
+ release_sample_expr(expr);
+ return ACT_RET_PRS_ERR;
+ }
+
+ cur_arg++;
+
+ if (!args[cur_arg]) {
+ memprintf(err, "missing id value");
+ release_sample_expr(expr);
+ return ACT_RET_PRS_ERR;
+ }
+
+ id = strtol(args[cur_arg], &error, 10);
+ if (*error != '\0') {
+ memprintf(err, "cannot parse id '%s'", args[cur_arg]);
+ release_sample_expr(expr);
+ return ACT_RET_PRS_ERR;
+ }
+ cur_arg++;
+
+ px->conf.args.ctx = ARGC_CAP;
+
+ rule->action = ACT_CUSTOM;
+ rule->action_ptr = http_action_res_capture_by_id;
+ rule->check_ptr = check_http_res_capture;
+ rule->release_ptr = release_http_capture;
+ rule->arg.capid.expr = expr;
+ rule->arg.capid.idx = id;
+
+ *orig_arg = cur_arg;
+ return ACT_RET_PRS_OK;
+}
+
+/* Parse a "allow" action for a request or a response rule. It takes no argument. It
+ * returns ACT_RET_PRS_OK on success, ACT_RET_PRS_ERR on error.
+ */
+static enum act_parse_ret parse_http_allow(const char **args, int *orig_arg, struct proxy *px,
+ struct act_rule *rule, char **err)
+{
+ rule->action = ACT_ACTION_ALLOW;
+ rule->flags |= ACT_FLAG_FINAL;
+ return ACT_RET_PRS_OK;
+}
+
+/* Parse "deny" or "tarpit" actions for a request rule or "deny" action for a
+ * response rule. It returns ACT_RET_PRS_OK on success, ACT_RET_PRS_ERR on
+ * error. It relies on http_parse_http_reply() to set
+ * <.arg.http_reply>.
+ */
+static enum act_parse_ret parse_http_deny(const char **args, int *orig_arg, struct proxy *px,
+ struct act_rule *rule, char **err)
+{
+ int default_status;
+ int cur_arg, arg = 0;
+
+ cur_arg = *orig_arg;
+ if (rule->from == ACT_F_HTTP_REQ) {
+ if (strcmp(args[cur_arg - 1], "tarpit") == 0) {
+ rule->action = ACT_HTTP_REQ_TARPIT;
+ default_status = 500;
+ }
+ else {
+ rule->action = ACT_ACTION_DENY;
+ default_status = 403;
+ }
+ }
+ else {
+ rule->action = ACT_ACTION_DENY;
+ default_status = 502;
+ }
+
+ /* If no args or only a deny_status specified, fallback on the legacy
+ * mode and use default error files despite the fact that
+ * default-errorfiles is not used. Otherwise, parse an http reply.
+ */
+
+ /* Prepare parsing of log-format strings */
+ px->conf.args.ctx = ((rule->from == ACT_F_HTTP_REQ) ? ARGC_HRQ : ARGC_HRS);
+
+ if (!*(args[cur_arg]) || strcmp(args[cur_arg], "if") == 0 || strcmp(args[cur_arg], "unless") == 0) {
+ rule->arg.http_reply = http_parse_http_reply((const char *[]){"default-errorfiles", ""}, &arg, px, default_status, err);
+ goto end;
+ }
+
+ if (strcmp(args[cur_arg], "deny_status") == 0) {
+ if (!*(args[cur_arg+2]) || strcmp(args[cur_arg+2], "if") == 0 || strcmp(args[cur_arg+2], "unless") == 0) {
+ rule->arg.http_reply = http_parse_http_reply((const char *[]){"status", args[cur_arg+1], "default-errorfiles", ""},
+ &arg, px, default_status, err);
+ *orig_arg += 2;
+ goto end;
+ }
+ args[cur_arg] += 5; /* skip "deny_" for the parsing */
+ }
+
+ rule->arg.http_reply = http_parse_http_reply(args, orig_arg, px, default_status, err);
+
+ end:
+ if (!rule->arg.http_reply)
+ return ACT_RET_PRS_ERR;
+
+ rule->flags |= ACT_FLAG_FINAL;
+ rule->check_ptr = check_act_http_reply;
+ rule->release_ptr = release_act_http_reply;
+ return ACT_RET_PRS_OK;
+}
+
+
+/* This function executes a auth action. It builds an 401/407 HTX message using
+ * the corresponding proxy's error message. On success, it returns
+ * ACT_RET_ABRT. If an error occurs ACT_RET_ERR is returned.
+ */
+static enum act_return http_action_auth(struct act_rule *rule, struct proxy *px,
+ struct session *sess, struct stream *s, int flags)
+{
+ struct channel *req = &s->req;
+ struct channel *res = &s->res;
+ struct htx *htx = htx_from_buf(&res->buf);
+ struct http_reply *reply;
+ const char *auth_realm;
+ struct http_hdr_ctx ctx;
+ struct ist hdr;
+
+ /* Auth might be performed on regular http-req rules as well as on stats */
+ auth_realm = rule->arg.http.str.ptr;
+ if (!auth_realm) {
+ if (px->uri_auth && s->current_rule_list == &px->uri_auth->http_req_rules)
+ auth_realm = STATS_DEFAULT_REALM;
+ else
+ auth_realm = px->id;
+ }
+
+ if (!(s->txn->flags & TX_USE_PX_CONN)) {
+ s->txn->status = 401;
+ hdr = ist("WWW-Authenticate");
+ }
+ else {
+ s->txn->status = 407;
+ hdr = ist("Proxy-Authenticate");
+ }
+ reply = http_error_message(s);
+ channel_htx_truncate(res, htx);
+
+ if (chunk_printf(&trash, "Basic realm=\"%s\"", auth_realm) == -1)
+ goto fail;
+
+ /* Write the generic 40x message */
+ if (http_reply_to_htx(s, htx, reply) == -1)
+ goto fail;
+
+ /* Remove all existing occurrences of the XXX-Authenticate header */
+ ctx.blk = NULL;
+ while (http_find_header(htx, hdr, &ctx, 1))
+ http_remove_header(htx, &ctx);
+
+ /* Now a the right XXX-Authenticate header */
+ if (!http_add_header(htx, hdr, ist2(b_orig(&trash), b_data(&trash))))
+ goto fail;
+
+ /* Finally forward the reply */
+ htx_to_buf(htx, &res->buf);
+ if (!http_forward_proxy_resp(s, 1))
+ goto fail;
+
+ /* Note: Only eval on the request */
+ s->logs.request_ts = now_ns;
+ req->analysers &= AN_REQ_FLT_END;
+
+ if (s->sess->fe == s->be) /* report it if the request was intercepted by the frontend */
+ _HA_ATOMIC_INC(&s->sess->fe->fe_counters.intercepted_req);
+
+ if (!(s->flags & SF_ERR_MASK))
+ s->flags |= SF_ERR_LOCAL;
+ if (!(s->flags & SF_FINST_MASK))
+ s->flags |= SF_FINST_R;
+
+ stream_inc_http_err_ctr(s);
+ return ACT_RET_ABRT;
+
+ fail:
+ /* If an error occurred, remove the incomplete HTTP response from the
+ * buffer */
+ channel_htx_truncate(res, htx);
+ return ACT_RET_ERR;
+}
+
+/* Parse a "auth" action. It may take 2 optional arguments to define a "realm"
+ * parameter. It returns ACT_RET_PRS_OK on success, ACT_RET_PRS_ERR on error.
+ */
+static enum act_parse_ret parse_http_auth(const char **args, int *orig_arg, struct proxy *px,
+ struct act_rule *rule, char **err)
+{
+ int cur_arg;
+
+ rule->action = ACT_CUSTOM;
+ rule->flags |= ACT_FLAG_FINAL;
+ rule->action_ptr = http_action_auth;
+ rule->release_ptr = release_http_action;
+ LIST_INIT(&rule->arg.http.fmt);
+
+ cur_arg = *orig_arg;
+ if (strcmp(args[cur_arg], "realm") == 0) {
+ cur_arg++;
+ if (!*args[cur_arg]) {
+ memprintf(err, "missing realm value.\n");
+ return ACT_RET_PRS_ERR;
+ }
+ rule->arg.http.str = ist(strdup(args[cur_arg]));
+ cur_arg++;
+ }
+
+ *orig_arg = cur_arg;
+ return ACT_RET_PRS_OK;
+}
+
+/* This function executes a early-hint action. It adds an HTTP Early Hint HTTP
+ * 103 response header with <.arg.http.str> name and with a value built
+ * according to <.arg.http.fmt> log line format. If it is the first early-hint
+ * rule of series, the 103 response start-line is added first. At the end, if
+ * the next rule is not an early-hint rule or if it is the last rule, the EOH
+ * block is added to terminate the response. On success, it returns
+ * ACT_RET_CONT. If an error occurs while soft rewrites are enabled, the action
+ * is canceled, but the rule processing continue. Otherwsize ACT_RET_ERR is
+ * returned.
+ */
+static enum act_return http_action_early_hint(struct act_rule *rule, struct proxy *px,
+ struct session *sess, struct stream *s, int flags)
+{
+ struct act_rule *next_rule;
+ struct channel *res = &s->res;
+ struct htx *htx = htx_from_buf(&res->buf);
+ struct buffer *value = alloc_trash_chunk();
+ enum act_return ret = ACT_RET_CONT;
+
+ if (!(s->txn->req.flags & HTTP_MSGF_VER_11))
+ goto leave;
+
+ if (!value) {
+ if (!(s->flags & SF_ERR_MASK))
+ s->flags |= SF_ERR_RESOURCE;
+ goto error;
+ }
+
+ /* if there is no pending 103 response, start a new response. Otherwise,
+ * continue to add link to a previously started response
+ */
+ if (s->txn->status != 103) {
+ struct htx_sl *sl;
+ unsigned int flags = (HTX_SL_F_IS_RESP|HTX_SL_F_VER_11|
+ HTX_SL_F_XFER_LEN|HTX_SL_F_BODYLESS);
+
+ sl = htx_add_stline(htx, HTX_BLK_RES_SL, flags,
+ ist("HTTP/1.1"), ist("103"), ist("Early Hints"));
+ if (!sl)
+ goto error;
+ sl->info.res.status = 103;
+ s->txn->status = 103;
+ }
+
+ /* Add the HTTP Early Hint HTTP 103 response header */
+ value->data = build_logline(s, b_tail(value), b_room(value), &rule->arg.http.fmt);
+ if (!htx_add_header(htx, rule->arg.http.str, ist2(b_head(value), b_data(value))))
+ goto error;
+
+ /* if it is the last rule or the next one is not an early-hint or an
+ * conditional early-hint, terminate the current response.
+ */
+ next_rule = LIST_NEXT(&rule->list, typeof(rule), list);
+ if (&next_rule->list == s->current_rule_list || next_rule->action_ptr != http_action_early_hint || next_rule->cond) {
+ if (!htx_add_endof(htx, HTX_BLK_EOH))
+ goto error;
+ if (!http_forward_proxy_resp(s, 0))
+ goto error;
+ s->txn->status = 0;
+ }
+
+ leave:
+ free_trash_chunk(value);
+ return ret;
+
+ error:
+ /* If an error occurred during an Early-hint rule, remove the incomplete
+ * HTTP 103 response from the buffer */
+ channel_htx_truncate(res, htx);
+ ret = ACT_RET_ERR;
+ s->txn->status = 0;
+ goto leave;
+}
+
+/* This function executes a set-header or add-header actions. It builds a string
+ * in the trash from the specified format string. It finds the action to be
+ * performed in <.action>, previously filled by function parse_set_header(). The
+ * replacement action is executed by the function http_action_set_header(). On
+ * success, it returns ACT_RET_CONT. If an error occurs while soft rewrites are
+ * enabled, the action is canceled, but the rule processing continue. Otherwsize
+ * ACT_RET_ERR is returned.
+ */
+static enum act_return http_action_set_header(struct act_rule *rule, struct proxy *px,
+ struct session *sess, struct stream *s, int flags)
+{
+ struct http_msg *msg = ((rule->from == ACT_F_HTTP_REQ) ? &s->txn->req : &s->txn->rsp);
+ struct htx *htx = htxbuf(&msg->chn->buf);
+ enum act_return ret = ACT_RET_CONT;
+ struct buffer *replace;
+ struct http_hdr_ctx ctx;
+ struct ist n, v;
+
+ replace = alloc_trash_chunk();
+ if (!replace)
+ goto fail_alloc;
+
+ replace->data = build_logline(s, replace->area, replace->size, &rule->arg.http.fmt);
+ n = rule->arg.http.str;
+ v = ist2(replace->area, replace->data);
+
+ if (rule->action == 0) { // set-header
+ /* remove all occurrences of the header */
+ ctx.blk = NULL;
+ while (http_find_header(htx, n, &ctx, 1))
+ http_remove_header(htx, &ctx);
+ }
+
+ /* Now add header */
+ if (!http_add_header(htx, n, v))
+ goto fail_rewrite;
+
+ leave:
+ free_trash_chunk(replace);
+ return ret;
+
+ fail_alloc:
+ if (!(s->flags & SF_ERR_MASK))
+ s->flags |= SF_ERR_RESOURCE;
+ ret = ACT_RET_ERR;
+ goto leave;
+
+ fail_rewrite:
+ _HA_ATOMIC_INC(&sess->fe->fe_counters.failed_rewrites);
+ if (s->flags & SF_BE_ASSIGNED)
+ _HA_ATOMIC_INC(&s->be->be_counters.failed_rewrites);
+ if (sess->listener && sess->listener->counters)
+ _HA_ATOMIC_INC(&sess->listener->counters->failed_rewrites);
+ if (objt_server(s->target))
+ _HA_ATOMIC_INC(&__objt_server(s->target)->counters.failed_rewrites);
+
+ if (!(msg->flags & HTTP_MSGF_SOFT_RW)) {
+ ret = ACT_RET_ERR;
+ if (!(s->flags & SF_ERR_MASK))
+ s->flags |= SF_ERR_PRXCOND;
+ }
+ goto leave;
+}
+
+/* Parse a "set-header", "add-header" or "early-hint" actions. It takes an
+ * header name and a log-format string as arguments. It returns ACT_RET_PRS_OK
+ * on success, ACT_RET_PRS_ERR on error.
+ *
+ * Note: same function is used for the request and the response. However
+ * "early-hint" rules are only supported for request rules.
+ */
+static enum act_parse_ret parse_http_set_header(const char **args, int *orig_arg, struct proxy *px,
+ struct act_rule *rule, char **err)
+{
+ int cap = 0, cur_arg;
+ const char *p;
+
+ if (args[*orig_arg-1][0] == 'e') {
+ rule->action = ACT_CUSTOM;
+ rule->action_ptr = http_action_early_hint;
+ }
+ else {
+ if (args[*orig_arg-1][0] == 's')
+ rule->action = 0; // set-header
+ else
+ rule->action = 1; // add-header
+ rule->action_ptr = http_action_set_header;
+ }
+ rule->release_ptr = release_http_action;
+ LIST_INIT(&rule->arg.http.fmt);
+
+ cur_arg = *orig_arg;
+ if (!*args[cur_arg] || !*args[cur_arg+1]) {
+ memprintf(err, "expects exactly 2 arguments");
+ return ACT_RET_PRS_ERR;
+ }
+
+
+ rule->arg.http.str = ist(strdup(args[cur_arg]));
+
+ if (rule->from == ACT_F_HTTP_REQ) {
+ px->conf.args.ctx = ARGC_HRQ;
+ if (px->cap & PR_CAP_FE)
+ cap |= SMP_VAL_FE_HRQ_HDR;
+ if (px->cap & PR_CAP_BE)
+ cap |= SMP_VAL_BE_HRQ_HDR;
+ }
+ else{
+ px->conf.args.ctx = ARGC_HRS;
+ if (px->cap & PR_CAP_FE)
+ cap |= SMP_VAL_FE_HRS_HDR;
+ if (px->cap & PR_CAP_BE)
+ cap |= SMP_VAL_BE_HRS_HDR;
+ }
+
+ cur_arg++;
+ if (!parse_logformat_string(args[cur_arg], px, &rule->arg.http.fmt, LOG_OPT_HTTP, cap, err)) {
+ istfree(&rule->arg.http.str);
+ return ACT_RET_PRS_ERR;
+ }
+
+ free(px->conf.lfs_file);
+ px->conf.lfs_file = strdup(px->conf.args.file);
+ px->conf.lfs_line = px->conf.args.line;
+
+ /* some characters are totally forbidden in header names and
+ * may happen by accident when writing configs, causing strange
+ * failures in field. Better catch these ones early, nobody will
+ * miss them. In particular, a colon at the end (or anywhere
+ * after the first char) or a space/cr anywhere due to misplaced
+ * quotes are hard to spot.
+ */
+ for (p = istptr(rule->arg.http.str); p < istend(rule->arg.http.str); p++) {
+ if (HTTP_IS_TOKEN(*p))
+ continue;
+ if (p == istptr(rule->arg.http.str) && *p == ':')
+ continue;
+ /* we only report this as-is but it will not cause an error */
+ memprintf(err, "header name '%s' contains forbidden character '%c'", istptr(rule->arg.http.str), *p);
+ break;
+ }
+
+ *orig_arg = cur_arg + 1;
+ return ACT_RET_PRS_OK;
+}
+
+/* This function executes a replace-header or replace-value actions. It
+ * builds a string in the trash from the specified format string. It finds
+ * the action to be performed in <.action>, previously filled by function
+ * parse_replace_header(). The replacement action is executed by the function
+ * http_action_replace_header(). On success, it returns ACT_RET_CONT. If an error
+ * occurs while soft rewrites are enabled, the action is canceled, but the rule
+ * processing continue. Otherwsize ACT_RET_ERR is returned.
+ */
+static enum act_return http_action_replace_header(struct act_rule *rule, struct proxy *px,
+ struct session *sess, struct stream *s, int flags)
+{
+ struct http_msg *msg = ((rule->from == ACT_F_HTTP_REQ) ? &s->txn->req : &s->txn->rsp);
+ struct htx *htx = htxbuf(&msg->chn->buf);
+ enum act_return ret = ACT_RET_CONT;
+ struct buffer *replace;
+ int r;
+
+ replace = alloc_trash_chunk();
+ if (!replace)
+ goto fail_alloc;
+
+ replace->data = build_logline(s, replace->area, replace->size, &rule->arg.http.fmt);
+
+ r = http_replace_hdrs(s, htx, rule->arg.http.str, replace->area, rule->arg.http.re, (rule->action == 0));
+ if (r == -1)
+ goto fail_rewrite;
+
+ leave:
+ free_trash_chunk(replace);
+ return ret;
+
+ fail_alloc:
+ if (!(s->flags & SF_ERR_MASK))
+ s->flags |= SF_ERR_RESOURCE;
+ ret = ACT_RET_ERR;
+ goto leave;
+
+ fail_rewrite:
+ _HA_ATOMIC_INC(&sess->fe->fe_counters.failed_rewrites);
+ if (s->flags & SF_BE_ASSIGNED)
+ _HA_ATOMIC_INC(&s->be->be_counters.failed_rewrites);
+ if (sess->listener && sess->listener->counters)
+ _HA_ATOMIC_INC(&sess->listener->counters->failed_rewrites);
+ if (objt_server(s->target))
+ _HA_ATOMIC_INC(&__objt_server(s->target)->counters.failed_rewrites);
+
+ if (!(msg->flags & HTTP_MSGF_SOFT_RW)) {
+ ret = ACT_RET_ERR;
+ if (!(s->flags & SF_ERR_MASK))
+ s->flags |= SF_ERR_PRXCOND;
+ }
+ goto leave;
+}
+
+/* Parse a "replace-header" or "replace-value" actions. It takes an header name,
+ * a regex and replacement string as arguments. It returns ACT_RET_PRS_OK on
+ * success, ACT_RET_PRS_ERR on error.
+ */
+static enum act_parse_ret parse_http_replace_header(const char **args, int *orig_arg, struct proxy *px,
+ struct act_rule *rule, char **err)
+{
+ int cap = 0, cur_arg;
+
+ if (args[*orig_arg-1][8] == 'h')
+ rule->action = 0; // replace-header
+ else
+ rule->action = 1; // replace-value
+ rule->action_ptr = http_action_replace_header;
+ rule->release_ptr = release_http_action;
+ LIST_INIT(&rule->arg.http.fmt);
+
+ cur_arg = *orig_arg;
+ if (!*args[cur_arg] || !*args[cur_arg+1] || !*args[cur_arg+2]) {
+ memprintf(err, "expects exactly 3 arguments");
+ return ACT_RET_PRS_ERR;
+ }
+
+ rule->arg.http.str = ist(strdup(args[cur_arg]));
+
+ cur_arg++;
+ if (!(rule->arg.http.re = regex_comp(args[cur_arg], 1, 1, err))) {
+ istfree(&rule->arg.http.str);
+ return ACT_RET_PRS_ERR;
+ }
+
+ if (rule->from == ACT_F_HTTP_REQ) {
+ px->conf.args.ctx = ARGC_HRQ;
+ if (px->cap & PR_CAP_FE)
+ cap |= SMP_VAL_FE_HRQ_HDR;
+ if (px->cap & PR_CAP_BE)
+ cap |= SMP_VAL_BE_HRQ_HDR;
+ }
+ else{
+ px->conf.args.ctx = ARGC_HRS;
+ if (px->cap & PR_CAP_FE)
+ cap |= SMP_VAL_FE_HRS_HDR;
+ if (px->cap & PR_CAP_BE)
+ cap |= SMP_VAL_BE_HRS_HDR;
+ }
+
+ cur_arg++;
+ if (!parse_logformat_string(args[cur_arg], px, &rule->arg.http.fmt, LOG_OPT_HTTP, cap, err)) {
+ istfree(&rule->arg.http.str);
+ regex_free(rule->arg.http.re);
+ return ACT_RET_PRS_ERR;
+ }
+
+ free(px->conf.lfs_file);
+ px->conf.lfs_file = strdup(px->conf.args.file);
+ px->conf.lfs_line = px->conf.args.line;
+
+ *orig_arg = cur_arg + 1;
+ return ACT_RET_PRS_OK;
+}
+
+/* This function executes a del-header action with selected matching mode for
+ * header name. It finds the matching method to be performed in <.action>, previously
+ * filled by function parse_http_del_header(). On success, it returns ACT_RET_CONT.
+ * Otherwise ACT_RET_ERR is returned.
+ */
+static enum act_return http_action_del_header(struct act_rule *rule, struct proxy *px,
+ struct session *sess, struct stream *s, int flags)
+{
+ struct http_hdr_ctx ctx;
+ struct http_msg *msg = ((rule->from == ACT_F_HTTP_REQ) ? &s->txn->req : &s->txn->rsp);
+ struct htx *htx = htxbuf(&msg->chn->buf);
+ enum act_return ret = ACT_RET_CONT;
+
+ /* remove all occurrences of the header */
+ ctx.blk = NULL;
+ switch (rule->action) {
+ case PAT_MATCH_STR:
+ while (http_find_header(htx, rule->arg.http.str, &ctx, 1))
+ http_remove_header(htx, &ctx);
+ break;
+ case PAT_MATCH_BEG:
+ while (http_find_pfx_header(htx, rule->arg.http.str, &ctx, 1))
+ http_remove_header(htx, &ctx);
+ break;
+ case PAT_MATCH_END:
+ while (http_find_sfx_header(htx, rule->arg.http.str, &ctx, 1))
+ http_remove_header(htx, &ctx);
+ break;
+ case PAT_MATCH_SUB:
+ while (http_find_sub_header(htx, rule->arg.http.str, &ctx, 1))
+ http_remove_header(htx, &ctx);
+ break;
+ case PAT_MATCH_REG:
+ while (http_match_header(htx, rule->arg.http.re, &ctx, 1))
+ http_remove_header(htx, &ctx);
+ break;
+ default:
+ return ACT_RET_ERR;
+ }
+ return ret;
+}
+
+/* Parse a "del-header" action. It takes string as a required argument,
+ * optional flag (currently only -m) and optional matching method of input string
+ * with header name to be deleted. Default matching method is exact match (-m str).
+ * It returns ACT_RET_PRS_OK on success, ACT_RET_PRS_ERR on error.
+ */
+static enum act_parse_ret parse_http_del_header(const char **args, int *orig_arg, struct proxy *px,
+ struct act_rule *rule, char **err)
+{
+ int cur_arg;
+ int pat_idx;
+
+ /* set exact matching (-m str) as default */
+ rule->action = PAT_MATCH_STR;
+ rule->action_ptr = http_action_del_header;
+ rule->release_ptr = release_http_action;
+ LIST_INIT(&rule->arg.http.fmt);
+
+ cur_arg = *orig_arg;
+ if (!*args[cur_arg]) {
+ memprintf(err, "expects at least 1 argument");
+ return ACT_RET_PRS_ERR;
+ }
+
+ rule->arg.http.str = ist(strdup(args[cur_arg]));
+ px->conf.args.ctx = (rule->from == ACT_F_HTTP_REQ ? ARGC_HRQ : ARGC_HRS);
+
+ if (strcmp(args[cur_arg+1], "-m") == 0) {
+ cur_arg++;
+ if (!*args[cur_arg+1]) {
+ memprintf(err, "-m flag expects exactly 1 argument");
+ return ACT_RET_PRS_ERR;
+ }
+
+ cur_arg++;
+ pat_idx = pat_find_match_name(args[cur_arg]);
+ switch (pat_idx) {
+ case PAT_MATCH_REG:
+ if (!(rule->arg.http.re = regex_comp(rule->arg.http.str.ptr, 1, 1, err)))
+ return ACT_RET_PRS_ERR;
+ __fallthrough;
+ case PAT_MATCH_STR:
+ case PAT_MATCH_BEG:
+ case PAT_MATCH_END:
+ case PAT_MATCH_SUB:
+ rule->action = pat_idx;
+ break;
+ default:
+ memprintf(err, "-m with unsupported matching method '%s'", args[cur_arg]);
+ return ACT_RET_PRS_ERR;
+ }
+ }
+
+ *orig_arg = cur_arg + 1;
+ return ACT_RET_PRS_OK;
+}
+
+/* Release memory allocated by an http redirect action. */
+static void release_http_redir(struct act_rule *rule)
+{
+ struct redirect_rule *redir;
+
+ redir = rule->arg.redir;
+ if (!redir)
+ return;
+
+ LIST_DELETE(&redir->list);
+ http_free_redirect_rule(redir);
+}
+
+/* Parse a "redirect" action. It returns ACT_RET_PRS_OK on success,
+ * ACT_RET_PRS_ERR on error.
+ */
+static enum act_parse_ret parse_http_redirect(const char **args, int *orig_arg, struct proxy *px,
+ struct act_rule *rule, char **err)
+{
+ struct redirect_rule *redir;
+ int dir, cur_arg;
+
+ rule->action = ACT_HTTP_REDIR;
+ rule->release_ptr = release_http_redir;
+
+ cur_arg = *orig_arg;
+
+ dir = (rule->from == ACT_F_HTTP_REQ ? 0 : 1);
+ if ((redir = http_parse_redirect_rule(px->conf.args.file, px->conf.args.line, px, &args[cur_arg], err, 1, dir)) == NULL)
+ return ACT_RET_PRS_ERR;
+
+ if (!(redir->flags & REDIRECT_FLAG_IGNORE_EMPTY))
+ rule->flags |= ACT_FLAG_FINAL;
+
+ rule->arg.redir = redir;
+ rule->cond = redir->cond;
+ redir->cond = NULL;
+
+ /* skip all arguments */
+ while (*args[cur_arg])
+ cur_arg++;
+
+ *orig_arg = cur_arg;
+ return ACT_RET_PRS_OK;
+}
+
+/* This function executes a add-acl, del-acl, set-map or del-map actions. On
+ * success, it returns ACT_RET_CONT. Otherwsize ACT_RET_ERR is returned.
+ */
+static enum act_return http_action_set_map(struct act_rule *rule, struct proxy *px,
+ struct session *sess, struct stream *s, int flags)
+{
+ struct pat_ref *ref;
+ struct buffer *key = NULL, *value = NULL;
+ enum act_return ret = ACT_RET_CONT;
+
+ /* collect reference */
+ ref = pat_ref_lookup(rule->arg.map.ref);
+ if (!ref)
+ goto leave;
+
+ /* allocate key */
+ key = alloc_trash_chunk();
+ if (!key)
+ goto fail_alloc;
+
+ /* collect key */
+ key->data = build_logline(s, key->area, key->size, &rule->arg.map.key);
+ key->area[key->data] = '\0';
+
+ switch (rule->action) {
+ case 0: // add-acl
+ /* add entry only if it does not already exist */
+ HA_RWLOCK_WRLOCK(PATREF_LOCK, &ref->lock);
+ if (pat_ref_find_elt(ref, key->area) == NULL)
+ pat_ref_add(ref, key->area, NULL, NULL);
+ HA_RWLOCK_WRUNLOCK(PATREF_LOCK, &ref->lock);
+ break;
+
+ case 1: // set-map
+ {
+ struct pat_ref_elt *elt;
+
+ /* allocate value */
+ value = alloc_trash_chunk();
+ if (!value)
+ goto fail_alloc;
+
+ /* collect value */
+ value->data = build_logline(s, value->area, value->size, &rule->arg.map.value);
+ value->area[value->data] = '\0';
+
+ HA_RWLOCK_WRLOCK(PATREF_LOCK, &ref->lock);
+ elt = pat_ref_find_elt(ref, key->area);
+ if (elt) {
+ /* update entry if it exists */
+ pat_ref_set(ref, key->area, value->area, NULL, elt);
+ }
+ else {
+ /* insert a new entry */
+ pat_ref_add(ref, key->area, value->area, NULL);
+ }
+ HA_RWLOCK_WRUNLOCK(PATREF_LOCK, &ref->lock);
+ break;
+ }
+
+ case 2: // del-acl
+ case 3: // del-map
+ /* returned code: 1=ok, 0=ko */
+ HA_RWLOCK_WRLOCK(PATREF_LOCK, &ref->lock);
+ pat_ref_delete(ref, key->area);
+ HA_RWLOCK_WRUNLOCK(PATREF_LOCK, &ref->lock);
+ break;
+
+ default:
+ ret = ACT_RET_ERR;
+ }
+
+
+ leave:
+ free_trash_chunk(key);
+ free_trash_chunk(value);
+ return ret;
+
+ fail_alloc:
+ if (!(s->flags & SF_ERR_MASK))
+ s->flags |= SF_ERR_RESOURCE;
+ ret = ACT_RET_ERR;
+ goto leave;
+}
+
+/* Release memory allocated by an http map/acl action. */
+static void release_http_map(struct act_rule *rule)
+{
+ struct logformat_node *lf, *lfb;
+
+ free(rule->arg.map.ref);
+ list_for_each_entry_safe(lf, lfb, &rule->arg.map.key, list) {
+ LIST_DELETE(&lf->list);
+ release_sample_expr(lf->expr);
+ free(lf->arg);
+ free(lf);
+ }
+ if (rule->action == 1) {
+ list_for_each_entry_safe(lf, lfb, &rule->arg.map.value, list) {
+ LIST_DELETE(&lf->list);
+ release_sample_expr(lf->expr);
+ free(lf->arg);
+ free(lf);
+ }
+ }
+}
+
+/* Parse a "add-acl", "del-acl", "set-map" or "del-map" actions. It takes one or
+ * two log-format string as argument depending on the action. The action is
+ * stored in <.action> as an int (0=add-acl, 1=set-map, 2=del-acl,
+ * 3=del-map). It returns ACT_RET_PRS_OK on success, ACT_RET_PRS_ERR on error.
+ */
+static enum act_parse_ret parse_http_set_map(const char **args, int *orig_arg, struct proxy *px,
+ struct act_rule *rule, char **err)
+{
+ int cap = 0, cur_arg;
+
+ if (args[*orig_arg-1][0] == 'a') // add-acl
+ rule->action = 0;
+ else if (args[*orig_arg-1][0] == 's') // set-map
+ rule->action = 1;
+ else if (args[*orig_arg-1][4] == 'a') // del-acl
+ rule->action = 2;
+ else if (args[*orig_arg-1][4] == 'm') // del-map
+ rule->action = 3;
+ else {
+ memprintf(err, "internal error: unhandled action '%s'", args[0]);
+ return ACT_RET_PRS_ERR;
+ }
+ rule->action_ptr = http_action_set_map;
+ rule->release_ptr = release_http_map;
+
+ cur_arg = *orig_arg;
+ if (rule->action == 1 && (!*args[cur_arg] || !*args[cur_arg+1])) {
+ /* 2 args for set-map */
+ memprintf(err, "expects exactly 2 arguments");
+ return ACT_RET_PRS_ERR;
+ }
+ else if (!*args[cur_arg]) {
+ /* only one arg for other actions */
+ memprintf(err, "expects exactly 1 arguments");
+ return ACT_RET_PRS_ERR;
+ }
+
+ /*
+ * '+ 8' for 'set-map(' (same for del-map)
+ * '- 9' for 'set-map(' + trailing ')' (same for del-map)
+ */
+ rule->arg.map.ref = my_strndup(args[cur_arg-1] + 8, strlen(args[cur_arg-1]) - 9);
+
+ if (rule->from == ACT_F_HTTP_REQ) {
+ px->conf.args.ctx = ARGC_HRQ;
+ if (px->cap & PR_CAP_FE)
+ cap |= SMP_VAL_FE_HRQ_HDR;
+ if (px->cap & PR_CAP_BE)
+ cap |= SMP_VAL_BE_HRQ_HDR;
+ }
+ else{
+ px->conf.args.ctx = ARGC_HRS;
+ if (px->cap & PR_CAP_FE)
+ cap |= SMP_VAL_FE_HRS_HDR;
+ if (px->cap & PR_CAP_BE)
+ cap |= SMP_VAL_BE_HRS_HDR;
+ }
+
+ /* key pattern */
+ LIST_INIT(&rule->arg.map.key);
+ if (!parse_logformat_string(args[cur_arg], px, &rule->arg.map.key, LOG_OPT_HTTP, cap, err)) {
+ free(rule->arg.map.ref);
+ return ACT_RET_PRS_ERR;
+ }
+
+ if (rule->action == 1) {
+ /* value pattern for set-map only */
+ cur_arg++;
+ LIST_INIT(&rule->arg.map.value);
+ if (!parse_logformat_string(args[cur_arg], px, &rule->arg.map.value, LOG_OPT_HTTP, cap, err)) {
+ free(rule->arg.map.ref);
+ return ACT_RET_PRS_ERR;
+ }
+ }
+
+ free(px->conf.lfs_file);
+ px->conf.lfs_file = strdup(px->conf.args.file);
+ px->conf.lfs_line = px->conf.args.line;
+
+ *orig_arg = cur_arg + 1;
+ return ACT_RET_PRS_OK;
+}
+
+/* This function executes a track-sc* actions. On success, it returns
+ * ACT_RET_CONT. Otherwsize ACT_RET_ERR is returned.
+ */
+static enum act_return http_action_track_sc(struct act_rule *rule, struct proxy *px,
+ struct session *sess, struct stream *s, int flags)
+{
+ struct stktable *t;
+ struct stksess *ts;
+ struct stktable_key *key;
+ void *ptr1, *ptr2, *ptr3, *ptr4, *ptr5, *ptr6;
+ int opt;
+
+ ptr1 = ptr2 = ptr3 = ptr4 = ptr5 = ptr6 = NULL;
+ opt = ((rule->from == ACT_F_HTTP_REQ) ? SMP_OPT_DIR_REQ : SMP_OPT_DIR_RES) | SMP_OPT_FINAL;
+
+ t = rule->arg.trk_ctr.table.t;
+
+ if (stkctr_entry(&s->stkctr[rule->action]))
+ goto end;
+
+ key = stktable_fetch_key(t, s->be, sess, s, opt, rule->arg.trk_ctr.expr, NULL);
+
+ if (!key)
+ goto end;
+ ts = stktable_get_entry(t, key);
+ if (!ts)
+ goto end;
+
+ stream_track_stkctr(&s->stkctr[rule->action], t, ts);
+
+ /* let's count a new HTTP request as it's the first time we do it */
+ ptr1 = stktable_data_ptr(t, ts, STKTABLE_DT_HTTP_REQ_CNT);
+ ptr2 = stktable_data_ptr(t, ts, STKTABLE_DT_HTTP_REQ_RATE);
+
+ /* When the client triggers a 4xx from the server, it's most often due
+ * to a missing object or permission. These events should be tracked
+ * because if they happen often, it may indicate a brute force or a
+ * vulnerability scan. Normally this is done when receiving the response
+ * but here we're tracking after this ought to have been done so we have
+ * to do it on purpose.
+ */
+ if (rule->from == ACT_F_HTTP_RES && (unsigned)(s->txn->status - 400) < 100) {
+ ptr3 = stktable_data_ptr(t, ts, STKTABLE_DT_HTTP_ERR_CNT);
+ ptr4 = stktable_data_ptr(t, ts, STKTABLE_DT_HTTP_ERR_RATE);
+ }
+
+ if (rule->from == ACT_F_HTTP_RES && (unsigned)(s->txn->status - 500) < 100 &&
+ s->txn->status != 501 && s->txn->status != 505) {
+ ptr5 = stktable_data_ptr(t, ts, STKTABLE_DT_HTTP_FAIL_CNT);
+ ptr6 = stktable_data_ptr(t, ts, STKTABLE_DT_HTTP_FAIL_RATE);
+ }
+
+ if (ptr1 || ptr2 || ptr3 || ptr4 || ptr5 || ptr6) {
+ HA_RWLOCK_WRLOCK(STK_SESS_LOCK, &ts->lock);
+
+ if (ptr1)
+ stktable_data_cast(ptr1, std_t_uint)++;
+ if (ptr2)
+ update_freq_ctr_period(&stktable_data_cast(ptr2, std_t_frqp),
+ t->data_arg[STKTABLE_DT_HTTP_REQ_RATE].u, 1);
+ if (ptr3)
+ stktable_data_cast(ptr3, std_t_uint)++;
+ if (ptr4)
+ update_freq_ctr_period(&stktable_data_cast(ptr4, std_t_frqp),
+ t->data_arg[STKTABLE_DT_HTTP_ERR_RATE].u, 1);
+ if (ptr5)
+ stktable_data_cast(ptr5, std_t_uint)++;
+ if (ptr6)
+ update_freq_ctr_period(&stktable_data_cast(ptr6, std_t_frqp),
+ t->data_arg[STKTABLE_DT_HTTP_FAIL_RATE].u, 1);
+
+ HA_RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
+
+ /* If data was modified, we need to touch to re-schedule sync */
+ stktable_touch_local(t, ts, 0);
+ }
+
+ stkctr_set_flags(&s->stkctr[rule->action], STKCTR_TRACK_CONTENT);
+ if (sess->fe != s->be)
+ stkctr_set_flags(&s->stkctr[rule->action], STKCTR_TRACK_BACKEND);
+
+ end:
+ return ACT_RET_CONT;
+}
+
+static void release_http_track_sc(struct act_rule *rule)
+{
+ release_sample_expr(rule->arg.trk_ctr.expr);
+}
+
+/* Parse a "track-sc*" actions. It returns ACT_RET_PRS_OK on success,
+ * ACT_RET_PRS_ERR on error.
+ */
+static enum act_parse_ret parse_http_track_sc(const char **args, int *orig_arg, struct proxy *px,
+ struct act_rule *rule, char **err)
+{
+ struct sample_expr *expr;
+ unsigned int where;
+ unsigned int tsc_num;
+ const char *tsc_num_str;
+ int cur_arg;
+
+ tsc_num_str = &args[*orig_arg-1][8];
+ if (cfg_parse_track_sc_num(&tsc_num, tsc_num_str, tsc_num_str + strlen(tsc_num_str), err) == -1)
+ return ACT_RET_PRS_ERR;
+
+ cur_arg = *orig_arg;
+ expr = sample_parse_expr((char **)args, &cur_arg, px->conf.args.file, px->conf.args.line,
+ err, &px->conf.args, NULL);
+ if (!expr)
+ return ACT_RET_PRS_ERR;
+
+ where = 0;
+ if (px->cap & PR_CAP_FE)
+ where |= (rule->from == ACT_F_HTTP_REQ ? SMP_VAL_FE_HRQ_HDR : SMP_VAL_FE_HRS_HDR);
+ if (px->cap & PR_CAP_BE)
+ where |= (rule->from == ACT_F_HTTP_REQ ? SMP_VAL_BE_HRQ_HDR : SMP_VAL_BE_HRS_HDR);
+
+ if (!(expr->fetch->val & where)) {
+ memprintf(err, "fetch method '%s' extracts information from '%s', none of which is available here",
+ args[cur_arg-1], sample_src_names(expr->fetch->use));
+ release_sample_expr(expr);
+ return ACT_RET_PRS_ERR;
+ }
+
+ if (strcmp(args[cur_arg], "table") == 0) {
+ cur_arg++;
+ if (!*args[cur_arg]) {
+ memprintf(err, "missing table name");
+ release_sample_expr(expr);
+ return ACT_RET_PRS_ERR;
+ }
+
+ /* we copy the table name for now, it will be resolved later */
+ rule->arg.trk_ctr.table.n = strdup(args[cur_arg]);
+ cur_arg++;
+ }
+
+ rule->action = tsc_num;
+ rule->arg.trk_ctr.expr = expr;
+ rule->action_ptr = http_action_track_sc;
+ rule->release_ptr = release_http_track_sc;
+ rule->check_ptr = check_trk_action;
+
+ *orig_arg = cur_arg;
+ return ACT_RET_PRS_OK;
+}
+
+static enum act_return action_timeout_set_stream_timeout(struct act_rule *rule,
+ struct proxy *px,
+ struct session *sess,
+ struct stream *s,
+ int flags)
+{
+ struct sample *key;
+
+ if (rule->arg.timeout.expr) {
+ key = sample_fetch_as_type(px, sess, s, SMP_OPT_FINAL, rule->arg.timeout.expr, SMP_T_SINT);
+ if (!key)
+ return ACT_RET_CONT;
+
+ stream_set_timeout(s, rule->arg.timeout.type, MS_TO_TICKS(key->data.u.sint));
+ }
+ else {
+ stream_set_timeout(s, rule->arg.timeout.type, MS_TO_TICKS(rule->arg.timeout.value));
+ }
+
+ return ACT_RET_CONT;
+}
+
+/* Parse a "set-timeout" action. Returns ACT_RET_PRS_ERR if parsing error.
+ */
+static enum act_parse_ret parse_http_set_timeout(const char **args,
+ int *orig_arg,
+ struct proxy *px,
+ struct act_rule *rule, char **err)
+{
+ int cur_arg;
+
+ rule->action = ACT_CUSTOM;
+ rule->action_ptr = action_timeout_set_stream_timeout;
+ rule->release_ptr = release_timeout_action;
+
+ cur_arg = *orig_arg;
+ if (!*args[cur_arg] || !*args[cur_arg + 1]) {
+ memprintf(err, "expects exactly 2 arguments");
+ return ACT_RET_PRS_ERR;
+ }
+
+ if (cfg_parse_rule_set_timeout(args, cur_arg, rule, px, err) == -1) {
+ return ACT_RET_PRS_ERR;
+ }
+
+ *orig_arg = cur_arg + 2;
+
+ return ACT_RET_PRS_OK;
+}
+
+/* This function executes a strict-mode actions. On success, it always returns
+ * ACT_RET_CONT
+ */
+static enum act_return http_action_strict_mode(struct act_rule *rule, struct proxy *px,
+ struct session *sess, struct stream *s, int flags)
+{
+ struct http_msg *msg = ((rule->from == ACT_F_HTTP_REQ) ? &s->txn->req : &s->txn->rsp);
+
+ if (rule->action == 0) // strict-mode on
+ msg->flags &= ~HTTP_MSGF_SOFT_RW;
+ else // strict-mode off
+ msg->flags |= HTTP_MSGF_SOFT_RW;
+ return ACT_RET_CONT;
+}
+
+/* Parse a "strict-mode" action. It returns ACT_RET_PRS_OK on success,
+ * ACT_RET_PRS_ERR on error.
+ */
+static enum act_parse_ret parse_http_strict_mode(const char **args, int *orig_arg, struct proxy *px,
+ struct act_rule *rule, char **err)
+{
+ int cur_arg;
+
+ cur_arg = *orig_arg;
+ if (!*args[cur_arg]) {
+ memprintf(err, "expects exactly 1 arguments");
+ return ACT_RET_PRS_ERR;
+ }
+
+ if (strcasecmp(args[cur_arg], "on") == 0)
+ rule->action = 0; // strict-mode on
+ else if (strcasecmp(args[cur_arg], "off") == 0)
+ rule->action = 1; // strict-mode off
+ else {
+ memprintf(err, "Unexpected value '%s'. Only 'on' and 'off' are supported", args[cur_arg]);
+ return ACT_RET_PRS_ERR;
+ }
+ rule->action_ptr = http_action_strict_mode;
+
+ *orig_arg = cur_arg + 1;
+ return ACT_RET_PRS_OK;
+}
+
+/* This function executes a return action. It builds an HTX message from an
+ * errorfile, an raw file or a log-format string, depending on <.action>
+ * value. On success, it returns ACT_RET_ABRT. If an error occurs ACT_RET_ERR is
+ * returned.
+ */
+static enum act_return http_action_return(struct act_rule *rule, struct proxy *px,
+ struct session *sess, struct stream *s, int flags)
+{
+ struct channel *req = &s->req;
+
+ s->txn->status = rule->arg.http_reply->status;
+
+ if (!(s->flags & SF_ERR_MASK))
+ s->flags |= SF_ERR_LOCAL;
+ if (!(s->flags & SF_FINST_MASK))
+ s->flags |= ((rule->from == ACT_F_HTTP_REQ) ? SF_FINST_R : SF_FINST_H);
+
+ if (http_reply_message(s, rule->arg.http_reply) == -1)
+ return ACT_RET_ERR;
+
+ if (rule->from == ACT_F_HTTP_REQ) {
+ /* let's log the request time */
+ s->logs.request_ts = now_ns;
+ req->analysers &= AN_REQ_FLT_END;
+
+ if (s->sess->fe == s->be) /* report it if the request was intercepted by the frontend */
+ _HA_ATOMIC_INC(&s->sess->fe->fe_counters.intercepted_req);
+ }
+
+ return ACT_RET_ABRT;
+}
+
+/* Parse a "return" action. It returns ACT_RET_PRS_OK on success,
+ * ACT_RET_PRS_ERR on error. It relies on http_parse_http_reply() to set
+ * <.arg.http_reply>.
+ */
+static enum act_parse_ret parse_http_return(const char **args, int *orig_arg, struct proxy *px,
+ struct act_rule *rule, char **err)
+{
+ /* Prepare parsing of log-format strings */
+ px->conf.args.ctx = ((rule->from == ACT_F_HTTP_REQ) ? ARGC_HRQ : ARGC_HRS);
+ rule->arg.http_reply = http_parse_http_reply(args, orig_arg, px, 200, err);
+ if (!rule->arg.http_reply)
+ return ACT_RET_PRS_ERR;
+
+ rule->flags |= ACT_FLAG_FINAL;
+ rule->action = ACT_CUSTOM;
+ rule->check_ptr = check_act_http_reply;
+ rule->action_ptr = http_action_return;
+ rule->release_ptr = release_act_http_reply;
+ return ACT_RET_PRS_OK;
+}
+
+
+
+/* This function executes a wait-for-body action. It waits for the message
+ * payload for a max configured time (.arg.p[0]) and eventually for only first
+ * <arg.p[1]> bytes (0 means no limit). It relies on http_wait_for_msg_body()
+ * function. it returns ACT_RET_CONT when conditions are met to stop to wait.
+ * Otherwise ACT_RET_YIELD is returned to wait for more data. ACT_RET_INV is
+ * returned if a parsing error is raised by lower level and ACT_RET_ERR if an
+ * internal error occurred. Finally ACT_RET_ABRT is returned when a timeout
+ * occurred.
+ */
+static enum act_return http_action_wait_for_body(struct act_rule *rule, struct proxy *px,
+ struct session *sess, struct stream *s, int flags)
+{
+ struct channel *chn = ((rule->from == ACT_F_HTTP_REQ) ? &s->req : &s->res);
+ unsigned int time = (uintptr_t)rule->arg.act.p[0];
+ unsigned int bytes = (uintptr_t)rule->arg.act.p[1];
+
+ switch (http_wait_for_msg_body(s, chn, time, bytes)) {
+ case HTTP_RULE_RES_CONT:
+ return ACT_RET_CONT;
+ case HTTP_RULE_RES_YIELD:
+ return ACT_RET_YIELD;
+ case HTTP_RULE_RES_BADREQ:
+ return ACT_RET_INV;
+ case HTTP_RULE_RES_ERROR:
+ return ACT_RET_ERR;
+ case HTTP_RULE_RES_ABRT:
+ return ACT_RET_ABRT;
+ default:
+ return ACT_RET_ERR;
+ }
+}
+
+/* Parse a "wait-for-body" action. It returns ACT_RET_PRS_OK on success,
+ * ACT_RET_PRS_ERR on error.
+ */
+static enum act_parse_ret parse_http_wait_for_body(const char **args, int *orig_arg, struct proxy *px,
+ struct act_rule *rule, char **err)
+{
+ int cur_arg;
+ unsigned int time, bytes;
+ const char *res;
+
+ cur_arg = *orig_arg;
+ if (!*args[cur_arg]) {
+ memprintf(err, "expects time <time> [ at-least <bytes> ]");
+ return ACT_RET_PRS_ERR;
+ }
+
+ time = UINT_MAX; /* To be sure it is set */
+ bytes = 0; /* Default value, wait all the body */
+ while (*(args[cur_arg])) {
+ if (strcmp(args[cur_arg], "time") == 0) {
+ if (!*args[cur_arg + 1]) {
+ memprintf(err, "missing argument for '%s'", args[cur_arg]);
+ return ACT_RET_PRS_ERR;
+ }
+ res = parse_time_err(args[cur_arg+1], &time, TIME_UNIT_MS);
+ if (res == PARSE_TIME_OVER) {
+ memprintf(err, "time overflow (maximum value is 2147483647 ms or ~24.8 days)");
+ return ACT_RET_PRS_ERR;
+ }
+ if (res == PARSE_TIME_UNDER) {
+ memprintf(err, "time underflow (minimum non-null value is 1 ms)");
+ return ACT_RET_PRS_ERR;
+ }
+ if (res) {
+ memprintf(err, "unexpected character '%c'", *res);
+ return ACT_RET_PRS_ERR;
+ }
+ cur_arg++;
+ }
+ else if (strcmp(args[cur_arg], "at-least") == 0) {
+ if (!*args[cur_arg + 1]) {
+ memprintf(err, "missing argument for '%s'", args[cur_arg]);
+ return ACT_RET_PRS_ERR;
+ }
+ res = parse_size_err(args[cur_arg+1], &bytes);
+ if (res) {
+ memprintf(err, "unexpected character '%c'", *res);
+ return ACT_RET_PRS_ERR;
+ }
+ cur_arg++;
+ }
+ else
+ break;
+ cur_arg++;
+ }
+
+ if (time == UINT_MAX) {
+ memprintf(err, "expects time <time> [ at-least <bytes> ]");
+ return ACT_RET_PRS_ERR;
+ }
+
+ rule->arg.act.p[0] = (void *)(uintptr_t)time;
+ rule->arg.act.p[1] = (void *)(uintptr_t)bytes;
+
+ *orig_arg = cur_arg;
+
+ rule->action = ACT_CUSTOM;
+ rule->action_ptr = http_action_wait_for_body;
+ return ACT_RET_PRS_OK;
+}
+
+/************************************************************************/
+/* All supported http-request action keywords must be declared here. */
+/************************************************************************/
+
+static struct action_kw_list http_req_actions = {
+ .kw = {
+ { "add-acl", parse_http_set_map, KWF_MATCH_PREFIX },
+ { "add-header", parse_http_set_header, 0 },
+ { "allow", parse_http_allow, 0 },
+ { "auth", parse_http_auth, 0 },
+ { "capture", parse_http_req_capture, 0 },
+ { "del-acl", parse_http_set_map, KWF_MATCH_PREFIX },
+ { "del-header", parse_http_del_header, 0 },
+ { "del-map", parse_http_set_map, KWF_MATCH_PREFIX },
+ { "deny", parse_http_deny, 0 },
+ { "disable-l7-retry", parse_http_req_disable_l7_retry, 0 },
+ { "early-hint", parse_http_set_header, 0 },
+ { "normalize-uri", parse_http_normalize_uri, KWF_EXPERIMENTAL },
+ { "redirect", parse_http_redirect, 0 },
+ { "reject", parse_http_action_reject, 0 },
+ { "replace-header", parse_http_replace_header, 0 },
+ { "replace-path", parse_replace_uri, 0 },
+ { "replace-pathq", parse_replace_uri, 0 },
+ { "replace-uri", parse_replace_uri, 0 },
+ { "replace-value", parse_http_replace_header, 0 },
+ { "return", parse_http_return, 0 },
+ { "set-header", parse_http_set_header, 0 },
+ { "set-map", parse_http_set_map, KWF_MATCH_PREFIX },
+ { "set-method", parse_set_req_line, 0 },
+ { "set-path", parse_set_req_line, 0 },
+ { "set-pathq", parse_set_req_line, 0 },
+ { "set-query", parse_set_req_line, 0 },
+ { "set-uri", parse_set_req_line, 0 },
+ { "strict-mode", parse_http_strict_mode, 0 },
+ { "tarpit", parse_http_deny, 0 },
+ { "track-sc", parse_http_track_sc, KWF_MATCH_PREFIX },
+ { "set-timeout", parse_http_set_timeout, 0 },
+ { "wait-for-body", parse_http_wait_for_body, 0 },
+ { NULL, NULL }
+ }
+};
+
+INITCALL1(STG_REGISTER, http_req_keywords_register, &http_req_actions);
+
+static struct action_kw_list http_res_actions = {
+ .kw = {
+ { "add-acl", parse_http_set_map, KWF_MATCH_PREFIX },
+ { "add-header", parse_http_set_header, 0 },
+ { "allow", parse_http_allow, 0 },
+ { "capture", parse_http_res_capture, 0 },
+ { "del-acl", parse_http_set_map, KWF_MATCH_PREFIX },
+ { "del-header", parse_http_del_header, 0 },
+ { "del-map", parse_http_set_map, KWF_MATCH_PREFIX },
+ { "deny", parse_http_deny, 0 },
+ { "redirect", parse_http_redirect, 0 },
+ { "replace-header", parse_http_replace_header, 0 },
+ { "replace-value", parse_http_replace_header, 0 },
+ { "return", parse_http_return, 0 },
+ { "set-header", parse_http_set_header, 0 },
+ { "set-map", parse_http_set_map, KWF_MATCH_PREFIX },
+ { "set-status", parse_http_set_status, 0 },
+ { "strict-mode", parse_http_strict_mode, 0 },
+ { "track-sc", parse_http_track_sc, KWF_MATCH_PREFIX },
+ { "set-timeout", parse_http_set_timeout, 0 },
+ { "wait-for-body", parse_http_wait_for_body, 0 },
+ { NULL, NULL }
+ }
+};
+
+INITCALL1(STG_REGISTER, http_res_keywords_register, &http_res_actions);
+
+static struct action_kw_list http_after_res_actions = {
+ .kw = {
+ { "add-header", parse_http_set_header, 0 },
+ { "allow", parse_http_allow, 0 },
+ { "capture", parse_http_res_capture, 0 },
+ { "del-acl", parse_http_set_map, KWF_MATCH_PREFIX },
+ { "del-header", parse_http_del_header, 0 },
+ { "del-map", parse_http_set_map, KWF_MATCH_PREFIX },
+ { "replace-header", parse_http_replace_header, 0 },
+ { "replace-value", parse_http_replace_header, 0 },
+ { "set-header", parse_http_set_header, 0 },
+ { "set-map", parse_http_set_map, KWF_MATCH_PREFIX },
+ { "set-status", parse_http_set_status, 0 },
+ { "strict-mode", parse_http_strict_mode, 0 },
+ { NULL, NULL }
+ }
+};
+
+INITCALL1(STG_REGISTER, http_after_res_keywords_register, &http_after_res_actions);
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/src/http_ana.c b/src/http_ana.c
new file mode 100644
index 0000000..178f874
--- /dev/null
+++ b/src/http_ana.c
@@ -0,0 +1,5153 @@
+/*
+ * HTTP protocol analyzer
+ *
+ * Copyright (C) 2018 HAProxy Technologies, Christopher Faulet <cfaulet@haproxy.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <haproxy/acl.h>
+#include <haproxy/action-t.h>
+#include <haproxy/api.h>
+#include <haproxy/applet.h>
+#include <haproxy/backend.h>
+#include <haproxy/base64.h>
+#include <haproxy/capture-t.h>
+#include <haproxy/cfgparse.h>
+#include <haproxy/channel.h>
+#include <haproxy/check.h>
+#include <haproxy/connection.h>
+#include <haproxy/errors.h>
+#include <haproxy/filters.h>
+#include <haproxy/http.h>
+#include <haproxy/http_ana.h>
+#include <haproxy/http_htx.h>
+#include <haproxy/http_ext.h>
+#include <haproxy/htx.h>
+#include <haproxy/log.h>
+#include <haproxy/net_helper.h>
+#include <haproxy/proxy.h>
+#include <haproxy/regex.h>
+#include <haproxy/sc_strm.h>
+#include <haproxy/server-t.h>
+#include <haproxy/stats.h>
+#include <haproxy/stconn.h>
+#include <haproxy/stream.h>
+#include <haproxy/trace.h>
+#include <haproxy/uri_auth-t.h>
+#include <haproxy/vars.h>
+
+
+#define TRACE_SOURCE &trace_strm
+
+extern const char *stat_status_codes[];
+
+struct pool_head *pool_head_requri __read_mostly = NULL;
+struct pool_head *pool_head_capture __read_mostly = NULL;
+
+
+static void http_end_request(struct stream *s);
+static void http_end_response(struct stream *s);
+
+static void http_capture_headers(struct htx *htx, char **cap, struct cap_hdr *cap_hdr);
+static int http_del_hdr_value(char *start, char *end, char **from, char *next);
+static size_t http_fmt_req_line(const struct htx_sl *sl, char *str, size_t len);
+static void http_debug_stline(const char *dir, struct stream *s, const struct htx_sl *sl);
+static void http_debug_hdr(const char *dir, struct stream *s, const struct ist n, const struct ist v);
+
+static enum rule_result http_req_get_intercept_rule(struct proxy *px, struct list *def_rules, struct list *rules, struct stream *s);
+static enum rule_result http_res_get_intercept_rule(struct proxy *px, struct list *def_rules, struct list *rules, struct stream *s, uint8_t final);
+static enum rule_result http_req_restrict_header_names(struct stream *s, struct htx *htx, struct proxy *px);
+
+static void http_manage_client_side_cookies(struct stream *s, struct channel *req);
+static void http_manage_server_side_cookies(struct stream *s, struct channel *res);
+
+static int http_stats_check_uri(struct stream *s, struct http_txn *txn, struct proxy *px);
+static int http_handle_stats(struct stream *s, struct channel *req, struct proxy *px);
+
+static int http_handle_expect_hdr(struct stream *s, struct htx *htx, struct http_msg *msg);
+static int http_reply_100_continue(struct stream *s);
+
+/* This stream analyser waits for a complete HTTP request. It returns 1 if the
+ * processing can continue on next analysers, or zero if it either needs more
+ * data or wants to immediately abort the request (eg: timeout, error, ...). It
+ * is tied to AN_REQ_WAIT_HTTP and may may remove itself from s->req.analysers
+ * when it has nothing left to do, and may remove any analyser when it wants to
+ * abort.
+ */
+int http_wait_for_request(struct stream *s, struct channel *req, int an_bit)
+{
+
+ /*
+ * We will analyze a complete HTTP request to check the its syntax.
+ *
+ * Once the start line and all headers are received, we may perform a
+ * capture of the error (if any), and we will set a few fields. We also
+ * check for monitor-uri, logging and finally headers capture.
+ */
+ struct session *sess = s->sess;
+ struct http_txn *txn = s->txn;
+ struct http_msg *msg = &txn->req;
+ struct htx *htx;
+ struct htx_sl *sl;
+ char http_ver;
+ int len;
+
+ DBG_TRACE_ENTER(STRM_EV_STRM_ANA|STRM_EV_HTTP_ANA, s, txn, msg);
+
+ if (unlikely(!IS_HTX_STRM(s))) {
+ /* It is only possible when a TCP stream is upgrade to HTTP.
+ * There is a transition period during which there is no
+ * data. The stream is still in raw mode and SF_IGNORE flag is
+ * still set. When this happens, the new mux is responsible to
+ * handle all errors. Thus we may leave immediately.
+ */
+ BUG_ON(!(s->flags & SF_IGNORE) || !c_empty(&s->req));
+
+ /* Don't connect for now */
+ channel_dont_connect(req);
+
+ /* An abort at this stage means we are performing a "destructive"
+ * HTTP upgrade (TCP>H2). In this case, we can leave.
+ */
+ if (s->scf->flags & (SC_FL_ABRT_DONE|SC_FL_EOS)) {
+ s->logs.logwait = 0;
+ s->logs.level = 0;
+ stream_abort(s);
+ req->analysers &= AN_REQ_FLT_END;
+ req->analyse_exp = TICK_ETERNITY;
+ DBG_TRACE_LEAVE(STRM_EV_STRM_ANA, s);
+ return 1;
+ }
+ DBG_TRACE_LEAVE(STRM_EV_STRM_ANA, s);
+ return 0;
+ }
+
+ htx = htxbuf(&req->buf);
+ sl = http_get_stline(htx);
+ len = HTX_SL_REQ_VLEN(sl);
+ if (len < 6) {
+ http_ver = 0;
+ }
+ else {
+ char *ptr;
+
+ ptr = HTX_SL_REQ_VPTR(sl);
+ http_ver = ptr[5] - '0';
+ }
+
+ /* Parsing errors are caught here */
+ if (htx->flags & (HTX_FL_PARSING_ERROR|HTX_FL_PROCESSING_ERROR)) {
+ stream_inc_http_req_ctr(s);
+ proxy_inc_fe_req_ctr(sess->listener, sess->fe, http_ver);
+ if (htx->flags & HTX_FL_PARSING_ERROR) {
+ stream_inc_http_err_ctr(s);
+ goto return_bad_req;
+ }
+ else
+ goto return_int_err;
+ }
+
+ /* we're speaking HTTP here, so let's speak HTTP to the client */
+ s->srv_error = http_return_srv_error;
+
+ msg->msg_state = HTTP_MSG_BODY;
+ stream_inc_http_req_ctr(s);
+ proxy_inc_fe_req_ctr(sess->listener, sess->fe, http_ver); /* one more valid request for this FE */
+
+ /* kill the pending keep-alive timeout */
+ req->analyse_exp = TICK_ETERNITY;
+
+ BUG_ON(htx_get_first_type(htx) != HTX_BLK_REQ_SL);
+
+ /* 0: we might have to print this header in debug mode */
+ if (unlikely((global.mode & MODE_DEBUG) &&
+ (!(global.mode & MODE_QUIET) || (global.mode & MODE_VERBOSE)))) {
+ int32_t pos;
+
+ http_debug_stline("clireq", s, sl);
+
+ for (pos = htx_get_first(htx); pos != -1; pos = htx_get_next(htx, pos)) {
+ struct htx_blk *blk = htx_get_blk(htx, pos);
+ enum htx_blk_type type = htx_get_blk_type(blk);
+
+ if (type == HTX_BLK_EOH)
+ break;
+ if (type != HTX_BLK_HDR)
+ continue;
+
+ http_debug_hdr("clihdr", s,
+ htx_get_blk_name(htx, blk),
+ htx_get_blk_value(htx, blk));
+ }
+ }
+
+ /*
+ * 1: identify the method and the version. Also set HTTP flags
+ */
+ txn->meth = sl->info.req.meth;
+ if (sl->flags & HTX_SL_F_VER_11)
+ msg->flags |= HTTP_MSGF_VER_11;
+ msg->flags |= HTTP_MSGF_XFER_LEN;
+ if (sl->flags & HTX_SL_F_CLEN)
+ msg->flags |= HTTP_MSGF_CNT_LEN;
+ else if (sl->flags & HTX_SL_F_CHNK)
+ msg->flags |= HTTP_MSGF_TE_CHNK;
+ if (sl->flags & HTX_SL_F_BODYLESS)
+ msg->flags |= HTTP_MSGF_BODYLESS;
+ if (sl->flags & HTX_SL_F_CONN_UPG)
+ msg->flags |= HTTP_MSGF_CONN_UPG;
+
+ /* we can make use of server redirect on GET and HEAD */
+ if (txn->meth == HTTP_METH_GET || txn->meth == HTTP_METH_HEAD)
+ s->flags |= SF_REDIRECTABLE;
+ else if (txn->meth == HTTP_METH_OTHER && isteqi(htx_sl_req_meth(sl), ist("PRI"))) {
+ /* PRI is reserved for the HTTP/2 preface */
+ goto return_bad_req;
+ }
+
+ /*
+ * 2: check if the URI matches the monitor_uri. We have to do this for
+ * every request which gets in, because the monitor-uri is defined by
+ * the frontend. If the monitor-uri starts with a '/', the matching is
+ * done against the request's path. Otherwise, the request's uri is
+ * used. It is a workaround to let HTTP/2 health-checks work as
+ * expected.
+ */
+ if (unlikely(isttest(sess->fe->monitor_uri))) {
+ const struct ist monitor_uri = sess->fe->monitor_uri;
+ struct http_uri_parser parser = http_uri_parser_init(htx_sl_req_uri(sl));
+
+ if ((istptr(monitor_uri)[0] == '/' &&
+ isteq(http_parse_path(&parser), monitor_uri)) ||
+ isteq(htx_sl_req_uri(sl), monitor_uri)) {
+ /*
+ * We have found the monitor URI
+ */
+ struct acl_cond *cond;
+
+ s->flags |= SF_MONITOR;
+ _HA_ATOMIC_INC(&sess->fe->fe_counters.intercepted_req);
+
+ /* Check if we want to fail this monitor request or not */
+ list_for_each_entry(cond, &sess->fe->mon_fail_cond, list) {
+ int ret = acl_exec_cond(cond, sess->fe, sess, s, SMP_OPT_DIR_REQ|SMP_OPT_FINAL);
+
+ ret = acl_pass(ret);
+ if (cond->pol == ACL_COND_UNLESS)
+ ret = !ret;
+
+ if (ret) {
+ /* we fail this request, let's return 503 service unavail */
+ txn->status = 503;
+ if (!(s->flags & SF_ERR_MASK))
+ s->flags |= SF_ERR_LOCAL; /* we don't want a real error here */
+ goto return_prx_cond;
+ }
+ }
+
+ /* nothing to fail, let's reply normally */
+ txn->status = 200;
+ if (!(s->flags & SF_ERR_MASK))
+ s->flags |= SF_ERR_LOCAL; /* we don't want a real error here */
+ goto return_prx_cond;
+ }
+ }
+
+ /*
+ * 3: Maybe we have to copy the original REQURI for the logs ?
+ * Note: we cannot log anymore if the request has been
+ * classified as invalid.
+ */
+ if (unlikely(s->logs.logwait & LW_REQ)) {
+ /* we have a complete HTTP request that we must log */
+ if ((txn->uri = pool_alloc(pool_head_requri)) != NULL) {
+ size_t len;
+
+ len = http_fmt_req_line(sl, txn->uri, global.tune.requri_len - 1);
+ txn->uri[len] = 0;
+
+ if (!(s->logs.logwait &= ~(LW_REQ|LW_INIT)))
+ s->do_log(s);
+ } else {
+ ha_alert("HTTP logging : out of memory.\n");
+ }
+ }
+
+ /* if the frontend has "option http-use-proxy-header", we'll check if
+ * we have what looks like a proxied connection instead of a connection,
+ * and in this case set the TX_USE_PX_CONN flag to use Proxy-connection.
+ * Note that this is *not* RFC-compliant, however browsers and proxies
+ * happen to do that despite being non-standard :-(
+ * We consider that a request not beginning with either '/' or '*' is
+ * a proxied connection, which covers both "scheme://location" and
+ * CONNECT ip:port.
+ */
+ if ((sess->fe->options2 & PR_O2_USE_PXHDR) &&
+ *HTX_SL_REQ_UPTR(sl) != '/' && *HTX_SL_REQ_UPTR(sl) != '*')
+ txn->flags |= TX_USE_PX_CONN;
+
+ /* 5: we may need to capture headers */
+ if (unlikely((s->logs.logwait & LW_REQHDR) && s->req_cap))
+ http_capture_headers(htx, s->req_cap, sess->fe->req_cap);
+
+ /* we may have to wait for the request's body */
+ if (s->be->options & PR_O_WREQ_BODY)
+ req->analysers |= AN_REQ_HTTP_BODY;
+
+ /*
+ * RFC7234#4:
+ * A cache MUST write through requests with methods
+ * that are unsafe (Section 4.2.1 of [RFC7231]) to
+ * the origin server; i.e., a cache is not allowed
+ * to generate a reply to such a request before
+ * having forwarded the request and having received
+ * a corresponding response.
+ *
+ * RFC7231#4.2.1:
+ * Of the request methods defined by this
+ * specification, the GET, HEAD, OPTIONS, and TRACE
+ * methods are defined to be safe.
+ */
+ if (likely(txn->meth == HTTP_METH_GET ||
+ txn->meth == HTTP_METH_HEAD ||
+ txn->meth == HTTP_METH_OPTIONS ||
+ txn->meth == HTTP_METH_TRACE))
+ txn->flags |= TX_CACHEABLE | TX_CACHE_COOK;
+
+ /* end of job, return OK */
+ req->analysers &= ~an_bit;
+ req->analyse_exp = TICK_ETERNITY;
+
+ DBG_TRACE_LEAVE(STRM_EV_STRM_ANA|STRM_EV_HTTP_ANA, s, txn);
+ return 1;
+
+ return_int_err:
+ txn->status = 500;
+ s->flags |= SF_ERR_INTERNAL;
+ _HA_ATOMIC_INC(&sess->fe->fe_counters.internal_errors);
+ if (sess->listener && sess->listener->counters)
+ _HA_ATOMIC_INC(&sess->listener->counters->internal_errors);
+ goto return_prx_cond;
+
+ return_bad_req:
+ txn->status = 400;
+ _HA_ATOMIC_INC(&sess->fe->fe_counters.failed_req);
+ if (sess->listener && sess->listener->counters)
+ _HA_ATOMIC_INC(&sess->listener->counters->failed_req);
+ /* fall through */
+
+ return_prx_cond:
+ http_set_term_flags(s);
+ http_reply_and_close(s, txn->status, http_error_message(s));
+
+ DBG_TRACE_DEVEL("leaving on error",
+ STRM_EV_STRM_ANA|STRM_EV_HTTP_ANA|STRM_EV_HTTP_ERR, s, txn);
+ return 0;
+}
+
+
+/* This stream analyser runs all HTTP request processing which is common to
+ * frontends and backends, which means blocking ACLs, filters, connection-close,
+ * reqadd, stats and redirects. This is performed for the designated proxy.
+ * It returns 1 if the processing can continue on next analysers, or zero if it
+ * either needs more data or wants to immediately abort the request (eg: deny,
+ * error, ...).
+ */
+int http_process_req_common(struct stream *s, struct channel *req, int an_bit, struct proxy *px)
+{
+ struct list *def_rules, *rules;
+ struct session *sess = s->sess;
+ struct http_txn *txn = s->txn;
+ struct http_msg *msg = &txn->req;
+ struct htx *htx;
+ struct redirect_rule *rule;
+ enum rule_result verdict;
+ struct connection *conn = objt_conn(sess->origin);
+
+ DBG_TRACE_ENTER(STRM_EV_STRM_ANA|STRM_EV_HTTP_ANA, s, txn, msg);
+
+ htx = htxbuf(&req->buf);
+
+ /* just in case we have some per-backend tracking. Only called the first
+ * execution of the analyser. */
+ if (!s->current_rule && !s->current_rule_list)
+ stream_inc_be_http_req_ctr(s);
+
+ def_rules = ((px->defpx && (an_bit == AN_REQ_HTTP_PROCESS_FE || px != sess->fe)) ? &px->defpx->http_req_rules : NULL);
+ rules = &px->http_req_rules;
+
+ /* evaluate http-request rules */
+ if ((def_rules && !LIST_ISEMPTY(def_rules)) || !LIST_ISEMPTY(rules)) {
+ verdict = http_req_get_intercept_rule(px, def_rules, rules, s);
+
+ switch (verdict) {
+ case HTTP_RULE_RES_YIELD: /* some data miss, call the function later. */
+ goto return_prx_yield;
+
+ case HTTP_RULE_RES_CONT:
+ case HTTP_RULE_RES_STOP: /* nothing to do */
+ break;
+
+ case HTTP_RULE_RES_DENY: /* deny or tarpit */
+ if (txn->flags & TX_CLTARPIT)
+ goto tarpit;
+ goto deny;
+
+ case HTTP_RULE_RES_ABRT: /* abort request, response already sent. Eg: auth */
+ goto return_prx_cond;
+
+ case HTTP_RULE_RES_DONE: /* OK, but terminate request processing (eg: redirect) */
+ goto done;
+
+ case HTTP_RULE_RES_BADREQ: /* failed with a bad request */
+ goto return_bad_req;
+
+ case HTTP_RULE_RES_ERROR: /* failed with a bad request */
+ goto return_int_err;
+ }
+ }
+
+ if (px->options2 & (PR_O2_RSTRICT_REQ_HDR_NAMES_BLK|PR_O2_RSTRICT_REQ_HDR_NAMES_DEL)) {
+ verdict = http_req_restrict_header_names(s, htx, px);
+ if (verdict == HTTP_RULE_RES_DENY)
+ goto deny;
+ }
+
+ if (conn && (conn->flags & CO_FL_EARLY_DATA) &&
+ (conn->flags & (CO_FL_EARLY_SSL_HS | CO_FL_SSL_WAIT_HS))) {
+ struct http_hdr_ctx ctx;
+
+ ctx.blk = NULL;
+ if (!http_find_header(htx, ist("Early-Data"), &ctx, 0)) {
+ if (unlikely(!http_add_header(htx, ist("Early-Data"), ist("1"))))
+ goto return_fail_rewrite;
+ }
+ }
+
+ /* OK at this stage, we know that the request was accepted according to
+ * the http-request rules, we can check for the stats. Note that the
+ * URI is detected *before* the req* rules in order not to be affected
+ * by a possible reqrep, while they are processed *after* so that a
+ * reqdeny can still block them. This clearly needs to change in 1.6!
+ */
+ if (!s->target && http_stats_check_uri(s, txn, px)) {
+ s->target = &http_stats_applet.obj_type;
+ if (unlikely(!sc_applet_create(s->scb, objt_applet(s->target)))) {
+ s->logs.request_ts = now_ns;
+ if (!(s->flags & SF_ERR_MASK))
+ s->flags |= SF_ERR_RESOURCE;
+ goto return_int_err;
+ }
+
+ /* parse the whole stats request and extract the relevant information */
+ http_handle_stats(s, req, px);
+ verdict = http_req_get_intercept_rule(px, NULL, &px->uri_auth->http_req_rules, s);
+ /* not all actions implemented: deny, allow, auth */
+
+ if (verdict == HTTP_RULE_RES_DENY) /* stats http-request deny */
+ goto deny;
+
+ if (verdict == HTTP_RULE_RES_ABRT) /* stats auth / stats http-request auth */
+ goto return_prx_cond;
+
+ if (verdict == HTTP_RULE_RES_BADREQ) /* failed with a bad request */
+ goto return_bad_req;
+
+ if (verdict == HTTP_RULE_RES_ERROR) /* failed with a bad request */
+ goto return_int_err;
+ }
+
+ /* Proceed with the applets now. */
+ if (unlikely(objt_applet(s->target))) {
+ if (sess->fe == s->be) /* report it if the request was intercepted by the frontend */
+ _HA_ATOMIC_INC(&sess->fe->fe_counters.intercepted_req);
+
+ if (http_handle_expect_hdr(s, htx, msg) == -1)
+ goto return_int_err;
+
+ if (!(s->flags & SF_ERR_MASK)) // this is not really an error but it is
+ s->flags |= SF_ERR_LOCAL; // to mark that it comes from the proxy
+ http_set_term_flags(s);
+
+ if (HAS_FILTERS(s))
+ req->analysers |= AN_REQ_FLT_HTTP_HDRS;
+
+ /* enable the minimally required analyzers to handle keep-alive and compression on the HTTP response */
+ req->analysers &= (AN_REQ_HTTP_BODY | AN_REQ_FLT_HTTP_HDRS | AN_REQ_FLT_END);
+ req->analysers &= ~AN_REQ_FLT_XFER_DATA;
+ req->analysers |= AN_REQ_HTTP_XFER_BODY;
+
+ s->scb->flags |= SC_FL_SND_ASAP;
+ s->flags |= SF_ASSIGNED;
+ goto done;
+ }
+
+ /* check whether we have some ACLs set to redirect this request */
+ list_for_each_entry(rule, &px->redirect_rules, list) {
+ if (rule->cond) {
+ int ret;
+
+ ret = acl_exec_cond(rule->cond, px, sess, s, SMP_OPT_DIR_REQ|SMP_OPT_FINAL);
+ ret = acl_pass(ret);
+ if (rule->cond->pol == ACL_COND_UNLESS)
+ ret = !ret;
+ if (!ret)
+ continue;
+ }
+ if (!http_apply_redirect_rule(rule, s, txn))
+ goto return_int_err;
+ goto done;
+ }
+
+ /* POST requests may be accompanied with an "Expect: 100-Continue" header.
+ * If this happens, then the data will not come immediately, so we must
+ * send all what we have without waiting. Note that due to the small gain
+ * in waiting for the body of the request, it's easier to simply put the
+ * SC_FL_SND_ASAP flag on the back SC any time. It's a one-shot flag so it
+ * will remove itself once used.
+ */
+ s->scb->flags |= SC_FL_SND_ASAP;
+
+ done: /* done with this analyser, continue with next ones that the calling
+ * points will have set, if any.
+ */
+ req->analyse_exp = TICK_ETERNITY;
+ done_without_exp: /* done with this analyser, but don't reset the analyse_exp. */
+ req->analysers &= ~an_bit;
+ s->current_rule = s->current_rule_list = NULL;
+ DBG_TRACE_LEAVE(STRM_EV_STRM_ANA|STRM_EV_HTTP_ANA, s, txn);
+ return 1;
+
+ tarpit:
+ /* Allow cookie logging
+ */
+ if (s->be->cookie_name || sess->fe->capture_name)
+ http_manage_client_side_cookies(s, req);
+
+ /* When a connection is tarpitted, we use the tarpit timeout,
+ * which may be the same as the connect timeout if unspecified.
+ * If unset, then set it to zero because we really want it to
+ * eventually expire. We build the tarpit as an analyser.
+ */
+ channel_htx_erase(&s->req, htx);
+
+ /* wipe the request out so that we can drop the connection early
+ * if the client closes first.
+ */
+ channel_dont_connect(req);
+
+ req->analysers &= AN_REQ_FLT_END; /* remove switching rules etc... */
+ req->analysers |= AN_REQ_HTTP_TARPIT;
+ req->analyse_exp = tick_add_ifset(now_ms, s->be->timeout.tarpit);
+ if (!req->analyse_exp)
+ req->analyse_exp = tick_add(now_ms, 0);
+ stream_inc_http_err_ctr(s);
+ _HA_ATOMIC_INC(&sess->fe->fe_counters.denied_req);
+ if (s->flags & SF_BE_ASSIGNED)
+ _HA_ATOMIC_INC(&s->be->be_counters.denied_req);
+ if (sess->listener && sess->listener->counters)
+ _HA_ATOMIC_INC(&sess->listener->counters->denied_req);
+ goto done_without_exp;
+
+ deny: /* this request was blocked (denied) */
+
+ /* Allow cookie logging
+ */
+ if (s->be->cookie_name || sess->fe->capture_name)
+ http_manage_client_side_cookies(s, req);
+
+ s->logs.request_ts = now_ns;
+ stream_inc_http_err_ctr(s);
+ _HA_ATOMIC_INC(&sess->fe->fe_counters.denied_req);
+ if (s->flags & SF_BE_ASSIGNED)
+ _HA_ATOMIC_INC(&s->be->be_counters.denied_req);
+ if (sess->listener && sess->listener->counters)
+ _HA_ATOMIC_INC(&sess->listener->counters->denied_req);
+ goto return_prx_err;
+
+ return_fail_rewrite:
+ if (!(s->flags & SF_ERR_MASK))
+ s->flags |= SF_ERR_PRXCOND;
+ _HA_ATOMIC_INC(&sess->fe->fe_counters.failed_rewrites);
+ if (s->flags & SF_BE_ASSIGNED)
+ _HA_ATOMIC_INC(&s->be->be_counters.failed_rewrites);
+ if (sess->listener && sess->listener->counters)
+ _HA_ATOMIC_INC(&sess->listener->counters->failed_rewrites);
+ if (objt_server(s->target))
+ _HA_ATOMIC_INC(&__objt_server(s->target)->counters.failed_rewrites);
+ /* fall through */
+
+ return_int_err:
+ txn->status = 500;
+ s->flags |= SF_ERR_INTERNAL;
+ _HA_ATOMIC_INC(&sess->fe->fe_counters.internal_errors);
+ if (s->flags & SF_BE_ASSIGNED)
+ _HA_ATOMIC_INC(&s->be->be_counters.internal_errors);
+ if (sess->listener && sess->listener->counters)
+ _HA_ATOMIC_INC(&sess->listener->counters->internal_errors);
+ goto return_prx_err;
+
+ return_bad_req:
+ txn->status = 400;
+ _HA_ATOMIC_INC(&sess->fe->fe_counters.failed_req);
+ if (sess->listener && sess->listener->counters)
+ _HA_ATOMIC_INC(&sess->listener->counters->failed_req);
+ /* fall through */
+
+ return_prx_err:
+ http_set_term_flags(s);
+ http_reply_and_close(s, txn->status, http_error_message(s));
+ /* fall through */
+
+ return_prx_cond:
+ http_set_term_flags(s);
+
+ req->analysers &= AN_REQ_FLT_END;
+ req->analyse_exp = TICK_ETERNITY;
+ s->current_rule = s->current_rule_list = NULL;
+ DBG_TRACE_DEVEL("leaving on error",
+ STRM_EV_STRM_ANA|STRM_EV_HTTP_ANA|STRM_EV_HTTP_ERR, s, txn);
+ return 0;
+
+ return_prx_yield:
+ channel_dont_connect(req);
+ DBG_TRACE_DEVEL("waiting for more data",
+ STRM_EV_STRM_ANA|STRM_EV_HTTP_ANA, s, txn);
+ return 0;
+}
+
+/* This function performs all the processing enabled for the current request.
+ * It returns 1 if the processing can continue on next analysers, or zero if it
+ * needs more data, encounters an error, or wants to immediately abort the
+ * request. It relies on buffers flags, and updates s->req.analysers.
+ */
+int http_process_request(struct stream *s, struct channel *req, int an_bit)
+{
+ struct session *sess = s->sess;
+ struct http_txn *txn = s->txn;
+ struct htx *htx;
+ struct connection *cli_conn = objt_conn(strm_sess(s)->origin);
+
+ DBG_TRACE_ENTER(STRM_EV_STRM_ANA|STRM_EV_HTTP_ANA, s, txn);
+
+ /*
+ * Right now, we know that we have processed the entire headers
+ * and that unwanted requests have been filtered out. We can do
+ * whatever we want with the remaining request. Also, now we
+ * may have separate values for ->fe, ->be.
+ */
+ htx = htxbuf(&req->buf);
+
+ /*
+ * 7: Now we can work with the cookies.
+ * Note that doing so might move headers in the request, but
+ * the fields will stay coherent and the URI will not move.
+ * This should only be performed in the backend.
+ */
+ if (s->be->cookie_name || sess->fe->capture_name)
+ http_manage_client_side_cookies(s, req);
+
+ /* 8: Generate unique ID if a "unique-id-format" is defined.
+ *
+ * A unique ID is generated even when it is not sent to ensure that the ID can make use of
+ * fetches only available in the HTTP request processing stage.
+ */
+ if (!LIST_ISEMPTY(&sess->fe->format_unique_id)) {
+ struct ist unique_id = stream_generate_unique_id(s, &sess->fe->format_unique_id);
+
+ if (!isttest(unique_id)) {
+ if (!(s->flags & SF_ERR_MASK))
+ s->flags |= SF_ERR_RESOURCE;
+ goto return_int_err;
+ }
+
+ /* send unique ID if a "unique-id-header" is defined */
+ if (isttest(sess->fe->header_unique_id) &&
+ unlikely(!http_add_header(htx, sess->fe->header_unique_id, unique_id)))
+ goto return_fail_rewrite;
+ }
+
+ /* handle http extensions (if configured) */
+ if (unlikely(!http_handle_7239_header(s, req)))
+ goto return_fail_rewrite;
+ if (unlikely(!http_handle_xff_header(s, req)))
+ goto return_fail_rewrite;
+ if (unlikely(!http_handle_xot_header(s, req)))
+ goto return_fail_rewrite;
+
+ /* Filter the request headers if there are filters attached to the
+ * stream.
+ */
+ if (HAS_FILTERS(s))
+ req->analysers |= AN_REQ_FLT_HTTP_HDRS;
+
+ /* If we have no server assigned yet and we're balancing on url_param
+ * with a POST request, we may be interested in checking the body for
+ * that parameter. This will be done in another analyser.
+ */
+ if (!(s->flags & (SF_ASSIGNED|SF_DIRECT)) &&
+ s->txn->meth == HTTP_METH_POST &&
+ (s->be->lbprm.algo & BE_LB_ALGO) == BE_LB_ALGO_PH) {
+ channel_dont_connect(req);
+ req->analysers |= AN_REQ_HTTP_BODY;
+ }
+
+ req->analysers &= ~AN_REQ_FLT_XFER_DATA;
+ req->analysers |= AN_REQ_HTTP_XFER_BODY;
+
+ /* We expect some data from the client. Unless we know for sure
+ * we already have a full request, we have to re-enable quick-ack
+ * in case we previously disabled it, otherwise we might cause
+ * the client to delay further data.
+ */
+ if ((sess->listener && (sess->listener->bind_conf->options & BC_O_NOQUICKACK)) && !(htx->flags & HTX_FL_EOM))
+ conn_set_quickack(cli_conn, 1);
+
+ /*************************************************************
+ * OK, that's finished for the headers. We have done what we *
+ * could. Let's switch to the DATA state. *
+ ************************************************************/
+ req->analyse_exp = TICK_ETERNITY;
+ req->analysers &= ~an_bit;
+
+ s->logs.request_ts = now_ns;
+ /* OK let's go on with the BODY now */
+ DBG_TRACE_LEAVE(STRM_EV_STRM_ANA|STRM_EV_HTTP_ANA, s, txn);
+ return 1;
+
+ return_fail_rewrite:
+ if (!(s->flags & SF_ERR_MASK))
+ s->flags |= SF_ERR_PRXCOND;
+ _HA_ATOMIC_INC(&sess->fe->fe_counters.failed_rewrites);
+ if (s->flags & SF_BE_ASSIGNED)
+ _HA_ATOMIC_INC(&s->be->be_counters.failed_rewrites);
+ if (sess->listener && sess->listener->counters)
+ _HA_ATOMIC_INC(&sess->listener->counters->failed_rewrites);
+ if (objt_server(s->target))
+ _HA_ATOMIC_INC(&__objt_server(s->target)->counters.failed_rewrites);
+ /* fall through */
+
+ return_int_err:
+ txn->status = 500;
+ s->flags |= SF_ERR_INTERNAL;
+ _HA_ATOMIC_INC(&sess->fe->fe_counters.internal_errors);
+ if (s->flags & SF_BE_ASSIGNED)
+ _HA_ATOMIC_INC(&s->be->be_counters.internal_errors);
+ if (sess->listener && sess->listener->counters)
+ _HA_ATOMIC_INC(&sess->listener->counters->internal_errors);
+
+ http_set_term_flags(s);
+ http_reply_and_close(s, txn->status, http_error_message(s));
+
+ DBG_TRACE_DEVEL("leaving on error",
+ STRM_EV_STRM_ANA|STRM_EV_HTTP_ANA|STRM_EV_HTTP_ERR, s, txn);
+ return 0;
+}
+
+/* This function is an analyser which processes the HTTP tarpit. It always
+ * returns zero, at the beginning because it prevents any other processing
+ * from occurring, and at the end because it terminates the request.
+ */
+int http_process_tarpit(struct stream *s, struct channel *req, int an_bit)
+{
+ struct http_txn *txn = s->txn;
+
+ DBG_TRACE_ENTER(STRM_EV_STRM_ANA|STRM_EV_HTTP_ANA, s, txn, &txn->req);
+ /* This connection is being tarpitted. The CLIENT side has
+ * already set the connect expiration date to the right
+ * timeout. We just have to check that the client is still
+ * there and that the timeout has not expired.
+ */
+ channel_dont_connect(req);
+ if (!(s->scf->flags & (SC_FL_ABRT_DONE|SC_FL_EOS)) &&
+ !tick_is_expired(req->analyse_exp, now_ms)) {
+ /* Be sure to drain all data from the request channel */
+ channel_htx_erase(req, htxbuf(&req->buf));
+ DBG_TRACE_DEVEL("waiting for tarpit timeout expiry",
+ STRM_EV_STRM_ANA|STRM_EV_HTTP_ANA, s, txn);
+ return 0;
+ }
+
+
+ /* We will set the queue timer to the time spent, just for
+ * logging purposes. We fake a 500 server error, so that the
+ * attacker will not suspect his connection has been tarpitted.
+ * It will not cause trouble to the logs because we can exclude
+ * the tarpitted connections by filtering on the 'PT' status flags.
+ */
+ s->logs.t_queue = ns_to_ms(now_ns - s->logs.accept_ts);
+
+ http_set_term_flags(s);
+ http_reply_and_close(s, txn->status, (!(s->scf->flags & SC_FL_ERROR) ? http_error_message(s) : NULL));
+
+ DBG_TRACE_LEAVE(STRM_EV_STRM_ANA|STRM_EV_HTTP_ANA, s, txn);
+ return 0;
+}
+
+/* This function is an analyser which waits for the HTTP request body. It waits
+ * for either the buffer to be full, or the full advertised contents to have
+ * reached the buffer. It must only be called after the standard HTTP request
+ * processing has occurred, because it expects the request to be parsed and will
+ * look for the Expect header. It may send a 100-Continue interim response. It
+ * returns zero if it needs to read more data, or 1 once it has completed its
+ * analysis.
+ */
+int http_wait_for_request_body(struct stream *s, struct channel *req, int an_bit)
+{
+ struct session *sess = s->sess;
+ struct http_txn *txn = s->txn;
+
+ DBG_TRACE_ENTER(STRM_EV_STRM_ANA|STRM_EV_HTTP_ANA, s, txn, &s->txn->req);
+
+
+ switch (http_wait_for_msg_body(s, req, s->be->timeout.httpreq, 0)) {
+ case HTTP_RULE_RES_CONT:
+ goto http_end;
+ case HTTP_RULE_RES_YIELD:
+ goto missing_data_or_waiting;
+ case HTTP_RULE_RES_BADREQ:
+ goto return_bad_req;
+ case HTTP_RULE_RES_ERROR:
+ goto return_int_err;
+ case HTTP_RULE_RES_ABRT:
+ goto return_prx_cond;
+ default:
+ goto return_int_err;
+ }
+
+ http_end:
+ /* The situation will not evolve, so let's give up on the analysis. */
+ s->logs.request_ts = now_ns; /* update the request timer to reflect full request */
+ req->analysers &= ~an_bit;
+ req->analyse_exp = TICK_ETERNITY;
+ DBG_TRACE_LEAVE(STRM_EV_STRM_ANA|STRM_EV_HTTP_ANA, s, txn);
+ return 1;
+
+ missing_data_or_waiting:
+ channel_dont_connect(req);
+ DBG_TRACE_DEVEL("waiting for more data",
+ STRM_EV_STRM_ANA|STRM_EV_HTTP_ANA, s, txn);
+ return 0;
+
+ return_int_err:
+ txn->status = 500;
+ s->flags |= SF_ERR_INTERNAL;
+ _HA_ATOMIC_INC(&sess->fe->fe_counters.internal_errors);
+ if (s->flags & SF_BE_ASSIGNED)
+ _HA_ATOMIC_INC(&s->be->be_counters.internal_errors);
+ if (sess->listener && sess->listener->counters)
+ _HA_ATOMIC_INC(&sess->listener->counters->internal_errors);
+ goto return_prx_err;
+
+ return_bad_req: /* let's centralize all bad requests */
+ txn->status = 400;
+ _HA_ATOMIC_INC(&sess->fe->fe_counters.failed_req);
+ if (sess->listener && sess->listener->counters)
+ _HA_ATOMIC_INC(&sess->listener->counters->failed_req);
+ /* fall through */
+
+ return_prx_err:
+ http_set_term_flags(s);
+ http_reply_and_close(s, txn->status, http_error_message(s));
+ /* fall through */
+
+ return_prx_cond:
+ http_set_term_flags(s);
+
+ req->analysers &= AN_REQ_FLT_END;
+ req->analyse_exp = TICK_ETERNITY;
+ DBG_TRACE_DEVEL("leaving on error",
+ STRM_EV_STRM_ANA|STRM_EV_HTTP_ANA|STRM_EV_HTTP_ERR, s, txn);
+ return 0;
+}
+
+/* This function is an analyser which forwards request body (including chunk
+ * sizes if any). It is called as soon as we must forward, even if we forward
+ * zero byte. The only situation where it must not be called is when we're in
+ * tunnel mode and we want to forward till the close. It's used both to forward
+ * remaining data and to resync after end of body. It expects the msg_state to
+ * be between MSG_BODY and MSG_DONE (inclusive). It returns zero if it needs to
+ * read more data, or 1 once we can go on with next request or end the stream.
+ * When in MSG_DATA or MSG_TRAILERS, it will automatically forward chunk_len
+ * bytes of pending data + the headers if not already done.
+ */
+int http_request_forward_body(struct stream *s, struct channel *req, int an_bit)
+{
+ struct session *sess = s->sess;
+ struct http_txn *txn = s->txn;
+ struct http_msg *msg = &txn->req;
+ struct htx *htx;
+ short status = 0;
+ int ret;
+
+ DBG_TRACE_ENTER(STRM_EV_STRM_ANA|STRM_EV_HTTP_ANA, s, txn, msg);
+
+ htx = htxbuf(&req->buf);
+
+ if (htx->flags & HTX_FL_PARSING_ERROR)
+ goto return_bad_req;
+ if (htx->flags & HTX_FL_PROCESSING_ERROR)
+ goto return_int_err;
+
+ /* Note that we don't have to send 100-continue back because we don't
+ * need the data to complete our job, and it's up to the server to
+ * decide whether to return 100, 417 or anything else in return of
+ * an "Expect: 100-continue" header.
+ */
+ if (msg->msg_state == HTTP_MSG_BODY)
+ msg->msg_state = HTTP_MSG_DATA;
+
+ /* in most states, we should abort in case of early close */
+ channel_auto_close(req);
+
+ if (req->to_forward) {
+ if (req->to_forward == CHN_INFINITE_FORWARD) {
+ if (s->scf->flags & SC_FL_EOI)
+ msg->msg_state = HTTP_MSG_ENDING;
+ }
+ else {
+ /* We can't process the buffer's contents yet */
+ req->flags |= CF_WAKE_WRITE;
+ goto missing_data_or_waiting;
+ }
+ }
+
+ if (msg->msg_state >= HTTP_MSG_ENDING)
+ goto ending;
+
+ if (txn->meth == HTTP_METH_CONNECT) {
+ msg->msg_state = HTTP_MSG_ENDING;
+ goto ending;
+ }
+
+ /* Forward input data. We get it by removing all outgoing data not
+ * forwarded yet from HTX data size. If there are some data filters, we
+ * let them decide the amount of data to forward.
+ */
+ if (HAS_REQ_DATA_FILTERS(s)) {
+ ret = flt_http_payload(s, msg, htx->data);
+ if (ret < 0)
+ goto return_bad_req;
+ c_adv(req, ret);
+ }
+ else {
+ c_adv(req, htx->data - co_data(req));
+ if ((global.tune.options & GTUNE_USE_FAST_FWD) && (msg->flags & HTTP_MSGF_XFER_LEN))
+ channel_htx_forward_forever(req, htx);
+ }
+
+ if (htx->data != co_data(req))
+ goto missing_data_or_waiting;
+
+ /* Check if the end-of-message is reached and if so, switch the message
+ * in HTTP_MSG_ENDING state. Then if all data was marked to be
+ * forwarded, set the state to HTTP_MSG_DONE.
+ */
+ if (!(htx->flags & HTX_FL_EOM))
+ goto missing_data_or_waiting;
+
+ msg->msg_state = HTTP_MSG_ENDING;
+
+ ending:
+ s->scb->flags &= ~SC_FL_SND_EXP_MORE; /* no more data are expected to be send */
+
+ /* other states, ENDING...TUNNEL */
+ if (msg->msg_state >= HTTP_MSG_DONE)
+ goto done;
+
+ if (HAS_REQ_DATA_FILTERS(s)) {
+ ret = flt_http_end(s, msg);
+ if (ret <= 0) {
+ if (!ret)
+ goto missing_data_or_waiting;
+ goto return_bad_req;
+ }
+ }
+
+ if (txn->meth == HTTP_METH_CONNECT)
+ msg->msg_state = HTTP_MSG_TUNNEL;
+ else {
+ msg->msg_state = HTTP_MSG_DONE;
+ req->to_forward = 0;
+ }
+
+ done:
+ /* we don't want to forward closes on DONE except in tunnel mode. */
+ if (!(txn->flags & TX_CON_WANT_TUN))
+ channel_dont_close(req);
+
+ if ((s->scb->flags & SC_FL_SHUT_DONE) && co_data(req)) {
+ /* request errors are most likely due to the server aborting the
+ * transfer.Bit handle server aborts only if there is no
+ * response. Otherwise, let a change to forward the response
+ * first.
+ */
+ if (htx_is_empty(htxbuf(&s->res.buf)))
+ goto return_srv_abort;
+ }
+
+ http_end_request(s);
+ if (!(req->analysers & an_bit)) {
+ DBG_TRACE_LEAVE(STRM_EV_STRM_ANA|STRM_EV_HTTP_ANA, s, txn);
+ return 1;
+ }
+
+ /* If "option abortonclose" is set on the backend, we want to monitor
+ * the client's connection and forward any shutdown notification to the
+ * server, which will decide whether to close or to go on processing the
+ * request. We only do that in tunnel mode, and not in other modes since
+ * it can be abused to exhaust source ports. */
+ if (s->be->options & PR_O_ABRT_CLOSE) {
+ channel_auto_read(req);
+ if ((s->scf->flags & (SC_FL_ABRT_DONE|SC_FL_EOS)) && !(txn->flags & TX_CON_WANT_TUN))
+ s->scb->flags |= SC_FL_NOLINGER;
+ channel_auto_close(req);
+ }
+ else if (s->txn->meth == HTTP_METH_POST) {
+ /* POST requests may require to read extra CRLF sent by broken
+ * browsers and which could cause an RST to be sent upon close
+ * on some systems (eg: Linux). */
+ channel_auto_read(req);
+ }
+ DBG_TRACE_DEVEL("waiting for the end of the HTTP txn",
+ STRM_EV_STRM_ANA|STRM_EV_HTTP_ANA, s, txn);
+ return 0;
+
+ missing_data_or_waiting:
+ /* stop waiting for data if the input is closed before the end */
+ if (msg->msg_state < HTTP_MSG_ENDING && (s->scf->flags & (SC_FL_ABRT_DONE|SC_FL_EOS)))
+ goto return_cli_abort;
+
+ waiting:
+ /* waiting for the last bits to leave the buffer */
+ if (s->scb->flags & SC_FL_SHUT_DONE) {
+ /* Handle server aborts only if there is no response. Otherwise,
+ * let a change to forward the response first.
+ */
+ if (htx_is_empty(htxbuf(&s->res.buf)))
+ goto return_srv_abort;
+ }
+
+ /* When TE: chunked is used, we need to get there again to parse remaining
+ * chunks even if the client has closed, so we don't want to set CF_DONTCLOSE.
+ * And when content-length is used, we never want to let the possible
+ * shutdown be forwarded to the other side, as the state machine will
+ * take care of it once the client responds. It's also important to
+ * prevent TIME_WAITs from accumulating on the backend side, and for
+ * HTTP/2 where the last frame comes with a shutdown.
+ */
+ if (msg->flags & HTTP_MSGF_XFER_LEN)
+ channel_dont_close(req);
+
+ /* We know that more data are expected, but we couldn't send more that
+ * what we did. So we always set the SC_FL_SND_EXP_MORE flag so that the
+ * system knows it must not set a PUSH on this first part. Interactive
+ * modes are already handled by the stream sock layer. We must not do
+ * this in content-length mode because it could present the MSG_MORE
+ * flag with the last block of forwarded data, which would cause an
+ * additional delay to be observed by the receiver.
+ */
+ if (HAS_REQ_DATA_FILTERS(s))
+ s->scb->flags |= SC_FL_SND_EXP_MORE;
+
+ DBG_TRACE_DEVEL("waiting for more data to forward",
+ STRM_EV_STRM_ANA|STRM_EV_HTTP_ANA, s, txn);
+ return 0;
+
+ return_cli_abort:
+ _HA_ATOMIC_INC(&sess->fe->fe_counters.cli_aborts);
+ _HA_ATOMIC_INC(&s->be->be_counters.cli_aborts);
+ if (sess->listener && sess->listener->counters)
+ _HA_ATOMIC_INC(&sess->listener->counters->cli_aborts);
+ if (objt_server(s->target))
+ _HA_ATOMIC_INC(&__objt_server(s->target)->counters.cli_aborts);
+ if (!(s->flags & SF_ERR_MASK))
+ s->flags |= ((req->flags & CF_READ_TIMEOUT) ? SF_ERR_CLITO : SF_ERR_CLICL);
+ status = 400;
+ goto return_prx_cond;
+
+ return_srv_abort:
+ _HA_ATOMIC_INC(&sess->fe->fe_counters.srv_aborts);
+ _HA_ATOMIC_INC(&s->be->be_counters.srv_aborts);
+ if (sess->listener && sess->listener->counters)
+ _HA_ATOMIC_INC(&sess->listener->counters->srv_aborts);
+ if (objt_server(s->target))
+ _HA_ATOMIC_INC(&__objt_server(s->target)->counters.srv_aborts);
+ if (!(s->flags & SF_ERR_MASK))
+ s->flags |= ((req->flags & CF_WRITE_TIMEOUT) ? SF_ERR_SRVTO : SF_ERR_SRVCL);
+ status = 502;
+ goto return_prx_cond;
+
+ return_int_err:
+ s->flags |= SF_ERR_INTERNAL;
+ _HA_ATOMIC_INC(&sess->fe->fe_counters.internal_errors);
+ _HA_ATOMIC_INC(&s->be->be_counters.internal_errors);
+ if (sess->listener && sess->listener->counters)
+ _HA_ATOMIC_INC(&sess->listener->counters->internal_errors);
+ if (objt_server(s->target))
+ _HA_ATOMIC_INC(&__objt_server(s->target)->counters.internal_errors);
+ status = 500;
+ goto return_prx_cond;
+
+ return_bad_req:
+ _HA_ATOMIC_INC(&sess->fe->fe_counters.failed_req);
+ if (sess->listener && sess->listener->counters)
+ _HA_ATOMIC_INC(&sess->listener->counters->failed_req);
+ status = 400;
+ /* fall through */
+
+ return_prx_cond:
+ http_set_term_flags(s);
+ if (txn->status > 0) {
+ /* Note: we don't send any error if some data were already sent */
+ http_reply_and_close(s, txn->status, NULL);
+ } else {
+ txn->status = status;
+ http_reply_and_close(s, txn->status, http_error_message(s));
+ }
+ DBG_TRACE_DEVEL("leaving on error ",
+ STRM_EV_STRM_ANA|STRM_EV_HTTP_ANA|STRM_EV_HTTP_ERR, s, txn);
+ return 0;
+}
+
+/* Reset the stream and the backend stream connector to a situation suitable for attemption connection */
+/* Returns 0 if we can attempt to retry, -1 otherwise */
+static __inline int do_l7_retry(struct stream *s, struct stconn *sc)
+{
+ struct channel *req, *res;
+ int co_data;
+
+ if (s->conn_retries >= s->be->conn_retries)
+ return -1;
+ s->conn_retries++;
+ if (objt_server(s->target)) {
+ if (s->flags & SF_CURR_SESS) {
+ s->flags &= ~SF_CURR_SESS;
+ _HA_ATOMIC_DEC(&__objt_server(s->target)->cur_sess);
+ }
+ _HA_ATOMIC_INC(&__objt_server(s->target)->counters.retries);
+ }
+ _HA_ATOMIC_INC(&s->be->be_counters.retries);
+
+ req = &s->req;
+ res = &s->res;
+
+ /* Remove any write error from the request, and read error from the response */
+ s->scf->flags &= ~(SC_FL_EOS|SC_FL_ABRT_DONE|SC_FL_ABRT_WANTED);
+ req->flags &= ~CF_WRITE_TIMEOUT;
+ res->flags &= ~(CF_READ_TIMEOUT | CF_READ_EVENT);
+ res->analysers &= AN_RES_FLT_END;
+ s->conn_err_type = STRM_ET_NONE;
+ s->flags &= ~(SF_CONN_EXP | SF_ERR_MASK | SF_FINST_MASK);
+ s->conn_exp = TICK_ETERNITY;
+ stream_choose_redispatch(s);
+ res->to_forward = 0;
+ res->analyse_exp = TICK_ETERNITY;
+ res->total = 0;
+
+ s->scb->flags &= ~(SC_FL_ERROR|SC_FL_SHUT_DONE|SC_FL_SHUT_WANTED);
+ if (sc_reset_endp(s->scb) < 0) {
+ s->flags |= SF_ERR_INTERNAL;
+ return -1;
+ }
+
+ b_free(&req->buf);
+ /* Swap the L7 buffer with the channel buffer */
+ /* We know we stored the co_data as b_data, so get it there */
+ co_data = b_data(&s->txn->l7_buffer);
+ b_set_data(&s->txn->l7_buffer, b_size(&s->txn->l7_buffer));
+ b_xfer(&req->buf, &s->txn->l7_buffer, b_data(&s->txn->l7_buffer));
+ co_set_data(req, co_data);
+
+ DBG_TRACE_DEVEL("perform a L7 retry", STRM_EV_STRM_ANA|STRM_EV_HTTP_ANA, s, s->txn);
+
+ b_reset(&res->buf);
+ co_set_data(res, 0);
+ return 0;
+}
+
+/* This stream analyser waits for a complete HTTP response. It returns 1 if the
+ * processing can continue on next analysers, or zero if it either needs more
+ * data or wants to immediately abort the response (eg: timeout, error, ...). It
+ * is tied to AN_RES_WAIT_HTTP and may may remove itself from s->res.analysers
+ * when it has nothing left to do, and may remove any analyser when it wants to
+ * abort.
+ */
+int http_wait_for_response(struct stream *s, struct channel *rep, int an_bit)
+{
+ /*
+ * We will analyze a complete HTTP response to check the its syntax.
+ *
+ * Once the start line and all headers are received, we may perform a
+ * capture of the error (if any), and we will set a few fields. We also
+ * logging and finally headers capture.
+ */
+ struct session *sess = s->sess;
+ struct http_txn *txn = s->txn;
+ struct http_msg *msg = &txn->rsp;
+ struct htx *htx;
+ struct connection *srv_conn;
+ struct htx_sl *sl;
+ int n;
+
+ DBG_TRACE_ENTER(STRM_EV_STRM_ANA|STRM_EV_HTTP_ANA, s, txn, msg);
+
+ htx = htxbuf(&rep->buf);
+
+ /* Parsing errors are caught here */
+ if (htx->flags & HTX_FL_PARSING_ERROR)
+ goto return_bad_res;
+ if (htx->flags & HTX_FL_PROCESSING_ERROR)
+ goto return_int_err;
+
+ /*
+ * Now we quickly check if we have found a full valid response.
+ * If not so, we check the FD and buffer states before leaving.
+ * A full response is indicated by the fact that we have seen
+ * the double LF/CRLF, so the state is >= HTTP_MSG_BODY. Invalid
+ * responses are checked first.
+ *
+ * Depending on whether the client is still there or not, we
+ * may send an error response back or not. Note that normally
+ * we should only check for HTTP status there, and check I/O
+ * errors somewhere else.
+ */
+ next_one:
+ if (unlikely(htx_is_empty(htx) || htx->first == -1)) {
+ /* 1: have we encountered a read error ? */
+ if (s->scb->flags & SC_FL_ERROR) {
+ struct connection *conn = sc_conn(s->scb);
+
+
+ if ((txn->flags & TX_L7_RETRY) &&
+ (s->be->retry_type & PR_RE_DISCONNECTED) &&
+ (!conn || conn->err_code != CO_ER_SSL_EARLY_FAILED)) {
+ if (co_data(rep) || do_l7_retry(s, s->scb) == 0)
+ return 0;
+ }
+
+ /* Perform a L7 retry on empty response or because server refuses the early data. */
+ if ((txn->flags & TX_L7_RETRY) &&
+ (s->be->retry_type & PR_RE_EARLY_ERROR) &&
+ conn && conn->err_code == CO_ER_SSL_EARLY_FAILED &&
+ do_l7_retry(s, s->scb) == 0) {
+ DBG_TRACE_DEVEL("leaving on L7 retry",
+ STRM_EV_STRM_ANA|STRM_EV_HTTP_ANA, s, txn);
+ return 0;
+ }
+
+ if (txn->flags & TX_NOT_FIRST)
+ goto abort_keep_alive;
+
+ _HA_ATOMIC_INC(&s->be->be_counters.failed_resp);
+ if (objt_server(s->target)) {
+ _HA_ATOMIC_INC(&__objt_server(s->target)->counters.failed_resp);
+ health_adjust(__objt_server(s->target), HANA_STATUS_HTTP_READ_ERROR);
+ }
+
+ /* if the server refused the early data, just send a 425 */
+ if (conn && conn->err_code == CO_ER_SSL_EARLY_FAILED)
+ txn->status = 425;
+ else {
+ txn->status = 502;
+ stream_inc_http_fail_ctr(s);
+ }
+
+ s->scb->flags |= SC_FL_NOLINGER;
+
+ if (!(s->flags & SF_ERR_MASK))
+ s->flags |= SF_ERR_SRVCL;
+ http_set_term_flags(s);
+
+ http_reply_and_close(s, txn->status, http_error_message(s));
+ DBG_TRACE_DEVEL("leaving on error",
+ STRM_EV_STRM_ANA|STRM_EV_HTTP_ANA|STRM_EV_HTTP_ERR, s, txn);
+ return 0;
+ }
+
+ /* 2: read timeout : return a 504 to the client. */
+ else if (rep->flags & CF_READ_TIMEOUT) {
+ if ((txn->flags & TX_L7_RETRY) &&
+ (s->be->retry_type & PR_RE_TIMEOUT)) {
+ if (co_data(rep) || do_l7_retry(s, s->scb) == 0) {
+ DBG_TRACE_DEVEL("leaving on L7 retry",
+ STRM_EV_STRM_ANA|STRM_EV_HTTP_ANA, s, txn);
+ return 0;
+ }
+ }
+ _HA_ATOMIC_INC(&s->be->be_counters.failed_resp);
+ if (objt_server(s->target)) {
+ _HA_ATOMIC_INC(&__objt_server(s->target)->counters.failed_resp);
+ health_adjust(__objt_server(s->target), HANA_STATUS_HTTP_READ_TIMEOUT);
+ }
+
+ txn->status = 504;
+ stream_inc_http_fail_ctr(s);
+ s->scb->flags |= SC_FL_NOLINGER;
+
+ if (!(s->flags & SF_ERR_MASK))
+ s->flags |= SF_ERR_SRVTO;
+ http_set_term_flags(s);
+
+ http_reply_and_close(s, txn->status, http_error_message(s));
+ DBG_TRACE_DEVEL("leaving on error",
+ STRM_EV_STRM_ANA|STRM_EV_HTTP_ANA|STRM_EV_HTTP_ERR, s, txn);
+ return 0;
+ }
+
+ /* 3: client abort with an abortonclose */
+ else if ((s->scb->flags & (SC_FL_EOS|SC_FL_ABRT_DONE)) && (s->scb->flags & SC_FL_SHUT_DONE) &&
+ (s->scf->flags & (SC_FL_EOS|SC_FL_ABRT_DONE))) {
+ _HA_ATOMIC_INC(&sess->fe->fe_counters.cli_aborts);
+ _HA_ATOMIC_INC(&s->be->be_counters.cli_aborts);
+ if (sess->listener && sess->listener->counters)
+ _HA_ATOMIC_INC(&sess->listener->counters->cli_aborts);
+ if (objt_server(s->target))
+ _HA_ATOMIC_INC(&__objt_server(s->target)->counters.cli_aborts);
+
+ txn->status = 400;
+
+ if (!(s->flags & SF_ERR_MASK))
+ s->flags |= SF_ERR_CLICL;
+ http_set_term_flags(s);
+
+ http_reply_and_close(s, txn->status, http_error_message(s));
+
+ /* process_stream() will take care of the error */
+ DBG_TRACE_DEVEL("leaving on error",
+ STRM_EV_STRM_ANA|STRM_EV_HTTP_ANA|STRM_EV_HTTP_ERR, s, txn);
+ return 0;
+ }
+
+ /* 4: close from server, capture the response if the server has started to respond */
+ else if (s->scb->flags & (SC_FL_EOS|SC_FL_ABRT_DONE)) {
+ if ((txn->flags & TX_L7_RETRY) &&
+ (s->be->retry_type & PR_RE_DISCONNECTED)) {
+ if (co_data(rep) || do_l7_retry(s, s->scb) == 0) {
+ DBG_TRACE_DEVEL("leaving on L7 retry",
+ STRM_EV_STRM_ANA|STRM_EV_HTTP_ANA, s, txn);
+ return 0;
+ }
+ }
+
+ if (txn->flags & TX_NOT_FIRST)
+ goto abort_keep_alive;
+
+ _HA_ATOMIC_INC(&s->be->be_counters.failed_resp);
+ if (objt_server(s->target)) {
+ _HA_ATOMIC_INC(&__objt_server(s->target)->counters.failed_resp);
+ health_adjust(__objt_server(s->target), HANA_STATUS_HTTP_BROKEN_PIPE);
+ }
+
+ txn->status = 502;
+ stream_inc_http_fail_ctr(s);
+ s->scb->flags |= SC_FL_NOLINGER;
+
+ if (!(s->flags & SF_ERR_MASK))
+ s->flags |= SF_ERR_SRVCL;
+ http_set_term_flags(s);
+
+ http_reply_and_close(s, txn->status, http_error_message(s));
+ DBG_TRACE_DEVEL("leaving on error",
+ STRM_EV_STRM_ANA|STRM_EV_HTTP_ANA|STRM_EV_HTTP_ERR, s, txn);
+ return 0;
+ }
+
+ /* 5: write error to client (we don't send any message then) */
+ else if (sc_ep_test(s->scf, SE_FL_ERR_PENDING)) {
+ if (txn->flags & TX_NOT_FIRST)
+ goto abort_keep_alive;
+
+ _HA_ATOMIC_INC(&s->be->be_counters.failed_resp);
+ if (objt_server(s->target))
+ _HA_ATOMIC_INC(&__objt_server(s->target)->counters.failed_resp);
+ rep->analysers &= AN_RES_FLT_END;
+
+ if (!(s->flags & SF_ERR_MASK))
+ s->flags |= SF_ERR_CLICL;
+ http_set_term_flags(s);
+
+ /* process_stream() will take care of the error */
+ DBG_TRACE_DEVEL("leaving on error",
+ STRM_EV_STRM_ANA|STRM_EV_HTTP_ANA|STRM_EV_HTTP_ERR, s, txn);
+ return 0;
+ }
+
+ channel_dont_close(rep);
+ s->scb->flags |= SC_FL_RCV_ONCE; /* try to get back here ASAP */
+ DBG_TRACE_DEVEL("waiting for more data",
+ STRM_EV_STRM_ANA|STRM_EV_HTTP_ANA, s, txn);
+ return 0;
+ }
+
+ /* More interesting part now : we know that we have a complete
+ * response which at least looks like HTTP. We have an indicator
+ * of each header's length, so we can parse them quickly.
+ */
+ BUG_ON(htx_get_first_type(htx) != HTX_BLK_RES_SL);
+ sl = http_get_stline(htx);
+
+ /* Perform a L7 retry because of the status code */
+ if ((txn->flags & TX_L7_RETRY) &&
+ l7_status_match(s->be, sl->info.res.status) &&
+ do_l7_retry(s, s->scb) == 0) {
+ DBG_TRACE_DEVEL("leaving on L7 retry", STRM_EV_STRM_ANA|STRM_EV_HTTP_ANA, s, txn);
+ return 0;
+ }
+
+ /* Now, L7 buffer is useless, it can be released */
+ b_free(&txn->l7_buffer);
+
+ msg->msg_state = HTTP_MSG_BODY;
+
+
+ /* 0: we might have to print this header in debug mode */
+ if (unlikely((global.mode & MODE_DEBUG) &&
+ (!(global.mode & MODE_QUIET) || (global.mode & MODE_VERBOSE)))) {
+ int32_t pos;
+
+ http_debug_stline("srvrep", s, sl);
+
+ for (pos = htx_get_first(htx); pos != -1; pos = htx_get_next(htx, pos)) {
+ struct htx_blk *blk = htx_get_blk(htx, pos);
+ enum htx_blk_type type = htx_get_blk_type(blk);
+
+ if (type == HTX_BLK_EOH)
+ break;
+ if (type != HTX_BLK_HDR)
+ continue;
+
+ http_debug_hdr("srvhdr", s,
+ htx_get_blk_name(htx, blk),
+ htx_get_blk_value(htx, blk));
+ }
+ }
+
+ /* 1: get the status code and the version. Also set HTTP flags */
+ txn->server_status = txn->status = sl->info.res.status;
+ if (sl->flags & HTX_SL_F_VER_11)
+ msg->flags |= HTTP_MSGF_VER_11;
+ if (sl->flags & HTX_SL_F_XFER_LEN) {
+ msg->flags |= HTTP_MSGF_XFER_LEN;
+ if (sl->flags & HTX_SL_F_CLEN)
+ msg->flags |= HTTP_MSGF_CNT_LEN;
+ else if (sl->flags & HTX_SL_F_CHNK)
+ msg->flags |= HTTP_MSGF_TE_CHNK;
+ }
+ if (sl->flags & HTX_SL_F_BODYLESS)
+ msg->flags |= HTTP_MSGF_BODYLESS;
+ if (sl->flags & HTX_SL_F_CONN_UPG)
+ msg->flags |= HTTP_MSGF_CONN_UPG;
+
+ n = txn->status / 100;
+ if (n < 1 || n > 5)
+ n = 0;
+
+ /* when the client triggers a 4xx from the server, it's most often due
+ * to a missing object or permission. These events should be tracked
+ * because if they happen often, it may indicate a brute force or a
+ * vulnerability scan.
+ */
+ if (n == 4)
+ stream_inc_http_err_ctr(s);
+
+ if (n == 5 && txn->status != 501 && txn->status != 505)
+ stream_inc_http_fail_ctr(s);
+
+ if (objt_server(s->target)) {
+ _HA_ATOMIC_INC(&__objt_server(s->target)->counters.p.http.rsp[n]);
+ _HA_ATOMIC_INC(&__objt_server(s->target)->counters.p.http.cum_req);
+ }
+
+ /* Adjust server's health based on status code. Note: status codes 501
+ * and 505 are triggered on demand by client request, so we must not
+ * count them as server failures.
+ */
+ if (objt_server(s->target)) {
+ if (txn->status >= 100 && (txn->status < 500 || txn->status == 501 || txn->status == 505))
+ health_adjust(__objt_server(s->target), HANA_STATUS_HTTP_OK);
+ else
+ health_adjust(__objt_server(s->target), HANA_STATUS_HTTP_STS);
+ }
+
+ /*
+ * We may be facing a 100-continue response, or any other informational
+ * 1xx response which is non-final, in which case this is not the right
+ * response, and we're waiting for the next one. Let's allow this response
+ * to go to the client and wait for the next one. There's an exception for
+ * 101 which is used later in the code to switch protocols.
+ */
+ if (txn->status < 200 &&
+ (txn->status == 100 || txn->status >= 102)) {
+ FLT_STRM_CB(s, flt_http_reset(s, msg));
+ htx->first = channel_htx_fwd_headers(rep, htx);
+ msg->msg_state = HTTP_MSG_RPBEFORE;
+ msg->flags = 0;
+ txn->server_status = txn->status = 0;
+ s->logs.t_data = -1; /* was not a response yet */
+ s->scf->flags |= SC_FL_SND_ASAP; /* Send ASAP informational messages */
+ goto next_one;
+ }
+
+ /* A 101-switching-protocols must contains a Connection header with the
+ * "upgrade" option and the request too. It means both are agree to
+ * upgrade. It is not so strict because there is no test on the Upgrade
+ * header content. But it is probably stronger enough for now.
+ */
+ if (txn->status == 101 &&
+ (!(txn->req.flags & HTTP_MSGF_CONN_UPG) || !(txn->rsp.flags & HTTP_MSGF_CONN_UPG)))
+ goto return_bad_res;
+
+ /*
+ * 2: check for cacheability.
+ */
+
+ switch (txn->status) {
+ case 200:
+ case 203:
+ case 204:
+ case 206:
+ case 300:
+ case 301:
+ case 404:
+ case 405:
+ case 410:
+ case 414:
+ case 501:
+ break;
+ default:
+ /* RFC7231#6.1:
+ * Responses with status codes that are defined as
+ * cacheable by default (e.g., 200, 203, 204, 206,
+ * 300, 301, 404, 405, 410, 414, and 501 in this
+ * specification) can be reused by a cache with
+ * heuristic expiration unless otherwise indicated
+ * by the method definition or explicit cache
+ * controls [RFC7234]; all other status codes are
+ * not cacheable by default.
+ */
+ txn->flags &= ~(TX_CACHEABLE | TX_CACHE_COOK);
+ break;
+ }
+
+ /*
+ * 3: we may need to capture headers
+ */
+ s->logs.logwait &= ~LW_RESP;
+ if (unlikely((s->logs.logwait & LW_RSPHDR) && s->res_cap))
+ http_capture_headers(htx, s->res_cap, sess->fe->rsp_cap);
+
+ /* Skip parsing if no content length is possible. */
+ if (unlikely((txn->meth == HTTP_METH_CONNECT && txn->status >= 200 && txn->status < 300) ||
+ txn->status == 101)) {
+ /* Either we've established an explicit tunnel, or we're
+ * switching the protocol. In both cases, we're very unlikely
+ * to understand the next protocols. We have to switch to tunnel
+ * mode, so that we transfer the request and responses then let
+ * this protocol pass unmodified. When we later implement specific
+ * parsers for such protocols, we'll want to check the Upgrade
+ * header which contains information about that protocol for
+ * responses with status 101 (eg: see RFC2817 about TLS).
+ */
+ txn->flags |= TX_CON_WANT_TUN;
+ }
+
+ /* check for NTML authentication headers in 401 (WWW-Authenticate) and
+ * 407 (Proxy-Authenticate) responses and set the connection to private
+ */
+ srv_conn = sc_conn(s->scb);
+ if (srv_conn) {
+ struct ist hdr;
+ struct http_hdr_ctx ctx;
+
+ if (txn->status == 401)
+ hdr = ist("WWW-Authenticate");
+ else if (txn->status == 407)
+ hdr = ist("Proxy-Authenticate");
+ else
+ goto end;
+
+ ctx.blk = NULL;
+ while (http_find_header(htx, hdr, &ctx, 0)) {
+ /* If www-authenticate contains "Negotiate", "Nego2", or "NTLM",
+ * possibly followed by blanks and a base64 string, the connection
+ * is private. Since it's a mess to deal with, we only check for
+ * values starting with "NTLM" or "Nego". Note that often multiple
+ * headers are sent by the server there.
+ */
+ if ((ctx.value.len >= 4 && strncasecmp(ctx.value.ptr, "Nego", 4) == 0) ||
+ (ctx.value.len >= 4 && strncasecmp(ctx.value.ptr, "NTLM", 4) == 0)) {
+ sess->flags |= SESS_FL_PREFER_LAST;
+ conn_set_owner(srv_conn, sess, NULL);
+ conn_set_private(srv_conn);
+ /* If it fail now, the same will be done in mux->detach() callback */
+ session_add_conn(srv_conn->owner, srv_conn, srv_conn->target);
+ break;
+ }
+ }
+ }
+
+ end:
+ /* we want to have the response time before we start processing it */
+ s->logs.t_data = ns_to_ms(now_ns - s->logs.accept_ts);
+
+ /* end of job, return OK */
+ rep->analysers &= ~an_bit;
+ rep->analyse_exp = TICK_ETERNITY;
+ channel_auto_close(rep);
+ DBG_TRACE_LEAVE(STRM_EV_STRM_ANA|STRM_EV_HTTP_ANA, s, txn);
+ return 1;
+
+ return_int_err:
+ _HA_ATOMIC_INC(&sess->fe->fe_counters.internal_errors);
+ _HA_ATOMIC_INC(&s->be->be_counters.internal_errors);
+ if (sess->listener && sess->listener->counters)
+ _HA_ATOMIC_INC(&sess->listener->counters->internal_errors);
+ if (objt_server(s->target))
+ _HA_ATOMIC_INC(&__objt_server(s->target)->counters.internal_errors);
+ txn->status = 500;
+ s->flags |= SF_ERR_INTERNAL;
+ goto return_prx_cond;
+
+ return_bad_res:
+ _HA_ATOMIC_INC(&s->be->be_counters.failed_resp);
+ if (objt_server(s->target)) {
+ _HA_ATOMIC_INC(&__objt_server(s->target)->counters.failed_resp);
+ health_adjust(__objt_server(s->target), HANA_STATUS_HTTP_HDRRSP);
+ }
+ if ((s->be->retry_type & PR_RE_JUNK_REQUEST) &&
+ (txn->flags & TX_L7_RETRY) &&
+ do_l7_retry(s, s->scb) == 0) {
+ DBG_TRACE_DEVEL("leaving on L7 retry",
+ STRM_EV_STRM_ANA|STRM_EV_HTTP_ANA, s, txn);
+ return 0;
+ }
+ txn->status = 502;
+ stream_inc_http_fail_ctr(s);
+ /* fall through */
+
+ return_prx_cond:
+ http_set_term_flags(s);
+ http_reply_and_close(s, txn->status, http_error_message(s));
+
+ s->scb->flags |= SC_FL_NOLINGER;
+ DBG_TRACE_DEVEL("leaving on error",
+ STRM_EV_STRM_ANA|STRM_EV_HTTP_ANA|STRM_EV_HTTP_ERR, s, txn);
+ return 0;
+
+ abort_keep_alive:
+ /* A keep-alive request to the server failed on a network error.
+ * The client is required to retry. We need to close without returning
+ * any other information so that the client retries.
+ */
+ txn->status = 0;
+ s->logs.logwait = 0;
+ s->logs.level = 0;
+ s->scf->flags &= ~SC_FL_SND_EXP_MORE; /* speed up sending a previous response */
+ http_reply_and_close(s, txn->status, NULL);
+ DBG_TRACE_DEVEL("leaving by closing K/A connection",
+ STRM_EV_STRM_ANA|STRM_EV_HTTP_ANA, s, txn);
+ return 0;
+}
+
+/* This function performs all the processing enabled for the current response.
+ * It normally returns 1 unless it wants to break. It relies on buffers flags,
+ * and updates s->res.analysers. It might make sense to explode it into several
+ * other functions. It works like process_request (see indications above).
+ */
+int http_process_res_common(struct stream *s, struct channel *rep, int an_bit, struct proxy *px)
+{
+ struct session *sess = s->sess;
+ struct http_txn *txn = s->txn;
+ struct http_msg *msg = &txn->rsp;
+ struct htx *htx;
+ struct proxy *cur_proxy;
+ enum rule_result ret = HTTP_RULE_RES_CONT;
+
+ if (unlikely(msg->msg_state < HTTP_MSG_BODY)) /* we need more data */
+ return 0;
+
+ DBG_TRACE_ENTER(STRM_EV_STRM_ANA|STRM_EV_HTTP_ANA, s, txn, msg);
+
+ htx = htxbuf(&rep->buf);
+
+ /* The stats applet needs to adjust the Connection header but we don't
+ * apply any filter there.
+ */
+ if (unlikely(objt_applet(s->target) == &http_stats_applet)) {
+ rep->analysers &= ~an_bit;
+ rep->analyse_exp = TICK_ETERNITY;
+ goto end;
+ }
+
+ /*
+ * We will have to evaluate the filters.
+ * As opposed to version 1.2, now they will be evaluated in the
+ * filters order and not in the header order. This means that
+ * each filter has to be validated among all headers.
+ *
+ * Filters are tried with ->be first, then with ->fe if it is
+ * different from ->be.
+ *
+ * Maybe we are in resume condiion. In this case I choose the
+ * "struct proxy" which contains the rule list matching the resume
+ * pointer. If none of these "struct proxy" match, I initialise
+ * the process with the first one.
+ *
+ * In fact, I check only correspondence between the current list
+ * pointer and the ->fe rule list. If it doesn't match, I initialize
+ * the loop with the ->be.
+ */
+ if (s->current_rule_list == &sess->fe->http_res_rules ||
+ (sess->fe->defpx && s->current_rule_list == &sess->fe->defpx->http_res_rules))
+ cur_proxy = sess->fe;
+ else
+ cur_proxy = s->be;
+
+ while (1) {
+ /* evaluate http-response rules */
+ if (ret == HTTP_RULE_RES_CONT || ret == HTTP_RULE_RES_STOP) {
+ struct list *def_rules, *rules;
+
+ def_rules = ((cur_proxy->defpx && (cur_proxy == s->be || cur_proxy->defpx != s->be->defpx)) ? &cur_proxy->defpx->http_res_rules : NULL);
+ rules = &cur_proxy->http_res_rules;
+
+ ret = http_res_get_intercept_rule(cur_proxy, def_rules, rules, s, 0);
+
+ switch (ret) {
+ case HTTP_RULE_RES_YIELD: /* some data miss, call the function later. */
+ goto return_prx_yield;
+
+ case HTTP_RULE_RES_CONT:
+ case HTTP_RULE_RES_STOP: /* nothing to do */
+ break;
+
+ case HTTP_RULE_RES_DENY: /* deny or tarpit */
+ goto deny;
+
+ case HTTP_RULE_RES_ABRT: /* abort request, response already sent */
+ goto return_prx_cond;
+
+ case HTTP_RULE_RES_DONE: /* OK, but terminate request processing (eg: redirect) */
+ goto done;
+
+ case HTTP_RULE_RES_BADREQ: /* failed with a bad request */
+ goto return_bad_res;
+
+ case HTTP_RULE_RES_ERROR: /* failed with a bad request */
+ goto return_int_err;
+ }
+
+ }
+
+ /* check whether we're already working on the frontend */
+ if (cur_proxy == sess->fe)
+ break;
+ cur_proxy = sess->fe;
+ }
+
+ /* OK that's all we can do for 1xx responses */
+ if (unlikely(txn->status < 200 && txn->status != 101))
+ goto end;
+
+ /*
+ * Now check for a server cookie.
+ */
+ if (s->be->cookie_name || sess->fe->capture_name || (s->be->options & PR_O_CHK_CACHE))
+ http_manage_server_side_cookies(s, rep);
+
+ /*
+ * Check for cache-control or pragma headers if required.
+ */
+ if ((s->be->options & PR_O_CHK_CACHE) || (s->be->ck_opts & PR_CK_NOC))
+ http_check_response_for_cacheability(s, rep);
+
+ /*
+ * Add server cookie in the response if needed
+ */
+ if (objt_server(s->target) && (s->be->ck_opts & PR_CK_INS) &&
+ !((txn->flags & TX_SCK_FOUND) && (s->be->ck_opts & PR_CK_PSV)) &&
+ (!(s->flags & SF_DIRECT) ||
+ ((s->be->cookie_maxidle || txn->cookie_last_date) &&
+ (!txn->cookie_last_date || (txn->cookie_last_date - date.tv_sec) < 0)) ||
+ (s->be->cookie_maxlife && !txn->cookie_first_date) || // set the first_date
+ (!s->be->cookie_maxlife && txn->cookie_first_date)) && // remove the first_date
+ (!(s->be->ck_opts & PR_CK_POST) || (txn->meth == HTTP_METH_POST)) &&
+ !(s->flags & SF_IGNORE_PRST)) {
+ /* the server is known, it's not the one the client requested, or the
+ * cookie's last seen date needs to be refreshed. We have to
+ * insert a set-cookie here, except if we want to insert only on POST
+ * requests and this one isn't. Note that servers which don't have cookies
+ * (eg: some backup servers) will return a full cookie removal request.
+ */
+ if (!__objt_server(s->target)->cookie) {
+ chunk_printf(&trash,
+ "%s=; Expires=Thu, 01-Jan-1970 00:00:01 GMT; path=/",
+ s->be->cookie_name);
+ }
+ else {
+ chunk_printf(&trash, "%s=%s", s->be->cookie_name, __objt_server(s->target)->cookie);
+
+ if (s->be->cookie_maxidle || s->be->cookie_maxlife) {
+ /* emit last_date, which is mandatory */
+ trash.area[trash.data++] = COOKIE_DELIM_DATE;
+ s30tob64((date.tv_sec+3) >> 2,
+ trash.area + trash.data);
+ trash.data += 5;
+
+ if (s->be->cookie_maxlife) {
+ /* emit first_date, which is either the original one or
+ * the current date.
+ */
+ trash.area[trash.data++] = COOKIE_DELIM_DATE;
+ s30tob64(txn->cookie_first_date ?
+ txn->cookie_first_date >> 2 :
+ (date.tv_sec+3) >> 2,
+ trash.area + trash.data);
+ trash.data += 5;
+ }
+ }
+ chunk_appendf(&trash, "; path=/");
+ }
+
+ if (s->be->cookie_domain)
+ chunk_appendf(&trash, "; domain=%s", s->be->cookie_domain);
+
+ if (s->be->ck_opts & PR_CK_HTTPONLY)
+ chunk_appendf(&trash, "; HttpOnly");
+
+ if (s->be->ck_opts & PR_CK_SECURE)
+ chunk_appendf(&trash, "; Secure");
+
+ if (s->be->cookie_attrs)
+ chunk_appendf(&trash, "; %s", s->be->cookie_attrs);
+
+ if (unlikely(!http_add_header(htx, ist("Set-Cookie"), ist2(trash.area, trash.data))))
+ goto return_fail_rewrite;
+
+ txn->flags &= ~TX_SCK_MASK;
+ if (__objt_server(s->target)->cookie && (s->flags & SF_DIRECT))
+ /* the server did not change, only the date was updated */
+ txn->flags |= TX_SCK_UPDATED;
+ else
+ txn->flags |= TX_SCK_INSERTED;
+
+ /* Here, we will tell an eventual cache on the client side that we don't
+ * want it to cache this reply because HTTP/1.0 caches also cache cookies !
+ * Some caches understand the correct form: 'no-cache="set-cookie"', but
+ * others don't (eg: apache <= 1.3.26). So we use 'private' instead.
+ */
+ if ((s->be->ck_opts & PR_CK_NOC) && (txn->flags & TX_CACHEABLE)) {
+
+ txn->flags &= ~TX_CACHEABLE & ~TX_CACHE_COOK;
+
+ if (unlikely(!http_add_header(htx, ist("Cache-control"), ist("private"))))
+ goto return_fail_rewrite;
+ }
+ }
+
+ /*
+ * Check if result will be cacheable with a cookie.
+ * We'll block the response if security checks have caught
+ * nasty things such as a cacheable cookie.
+ */
+ if (((txn->flags & (TX_CACHEABLE | TX_CACHE_COOK | TX_SCK_PRESENT)) ==
+ (TX_CACHEABLE | TX_CACHE_COOK | TX_SCK_PRESENT)) &&
+ (s->be->options & PR_O_CHK_CACHE)) {
+ /* we're in presence of a cacheable response containing
+ * a set-cookie header. We'll block it as requested by
+ * the 'checkcache' option, and send an alert.
+ */
+ ha_alert("Blocking cacheable cookie in response from instance %s, server %s.\n",
+ s->be->id, objt_server(s->target) ? __objt_server(s->target)->id : "<dispatch>");
+ send_log(s->be, LOG_ALERT,
+ "Blocking cacheable cookie in response from instance %s, server %s.\n",
+ s->be->id, objt_server(s->target) ? __objt_server(s->target)->id : "<dispatch>");
+ goto deny;
+ }
+
+ end:
+ /*
+ * Evaluate after-response rules before forwarding the response. rules
+ * from the backend are evaluated first, then one from the frontend if
+ * it differs.
+ */
+ if (!http_eval_after_res_rules(s))
+ goto return_int_err;
+
+ /* Filter the response headers if there are filters attached to the
+ * stream.
+ */
+ if (HAS_FILTERS(s))
+ rep->analysers |= AN_RES_FLT_HTTP_HDRS;
+
+ /* Always enter in the body analyzer */
+ rep->analysers &= ~AN_RES_FLT_XFER_DATA;
+ rep->analysers |= AN_RES_HTTP_XFER_BODY;
+
+ /* if the user wants to log as soon as possible, without counting
+ * bytes from the server, then this is the right moment. We have
+ * to temporarily assign bytes_out to log what we currently have.
+ */
+ if (!LIST_ISEMPTY(&sess->fe->logformat) && !(s->logs.logwait & LW_BYTES)) {
+ s->logs.t_close = s->logs.t_data; /* to get a valid end date */
+ s->logs.bytes_out = htx->data;
+ s->do_log(s);
+ s->logs.bytes_out = 0;
+ }
+
+ done:
+ DBG_TRACE_LEAVE(STRM_EV_STRM_ANA|STRM_EV_HTTP_ANA, s, txn);
+ rep->analysers &= ~an_bit;
+ rep->analyse_exp = TICK_ETERNITY;
+ s->current_rule = s->current_rule_list = NULL;
+ return 1;
+
+ deny:
+ _HA_ATOMIC_INC(&sess->fe->fe_counters.denied_resp);
+ _HA_ATOMIC_INC(&s->be->be_counters.denied_resp);
+ if (sess->listener && sess->listener->counters)
+ _HA_ATOMIC_INC(&sess->listener->counters->denied_resp);
+ if (objt_server(s->target))
+ _HA_ATOMIC_INC(&__objt_server(s->target)->counters.denied_resp);
+ goto return_prx_err;
+
+ return_fail_rewrite:
+ if (!(s->flags & SF_ERR_MASK))
+ s->flags |= SF_ERR_PRXCOND;
+ _HA_ATOMIC_INC(&sess->fe->fe_counters.failed_rewrites);
+ _HA_ATOMIC_INC(&s->be->be_counters.failed_rewrites);
+ if (sess->listener && sess->listener->counters)
+ _HA_ATOMIC_INC(&sess->listener->counters->failed_rewrites);
+ if (objt_server(s->target))
+ _HA_ATOMIC_INC(&__objt_server(s->target)->counters.failed_rewrites);
+ /* fall through */
+
+ return_int_err:
+ txn->status = 500;
+ s->flags |= SF_ERR_INTERNAL;
+ _HA_ATOMIC_INC(&sess->fe->fe_counters.internal_errors);
+ _HA_ATOMIC_INC(&s->be->be_counters.internal_errors);
+ if (sess->listener && sess->listener->counters)
+ _HA_ATOMIC_INC(&sess->listener->counters->internal_errors);
+ if (objt_server(s->target))
+ _HA_ATOMIC_INC(&__objt_server(s->target)->counters.internal_errors);
+ goto return_prx_err;
+
+ return_bad_res:
+ txn->status = 502;
+ stream_inc_http_fail_ctr(s);
+ _HA_ATOMIC_INC(&s->be->be_counters.failed_resp);
+ if (objt_server(s->target)) {
+ _HA_ATOMIC_INC(&__objt_server(s->target)->counters.failed_resp);
+ health_adjust(__objt_server(s->target), HANA_STATUS_HTTP_RSP);
+ }
+ /* fall through */
+
+ return_prx_err:
+ http_set_term_flags(s);
+ http_reply_and_close(s, txn->status, http_error_message(s));
+ /* fall through */
+
+ return_prx_cond:
+ s->logs.t_data = -1; /* was not a valid response */
+ s->scb->flags |= SC_FL_NOLINGER;
+
+ http_set_term_flags(s);
+
+ rep->analysers &= AN_RES_FLT_END;
+ s->req.analysers &= AN_REQ_FLT_END;
+ rep->analyse_exp = TICK_ETERNITY;
+ s->current_rule = s->current_rule_list = NULL;
+ DBG_TRACE_DEVEL("leaving on error",
+ STRM_EV_STRM_ANA|STRM_EV_HTTP_ANA|STRM_EV_HTTP_ERR, s, txn);
+ return 0;
+
+ return_prx_yield:
+ channel_dont_close(rep);
+ DBG_TRACE_DEVEL("waiting for more data",
+ STRM_EV_STRM_ANA|STRM_EV_HTTP_ANA, s, txn);
+ return 0;
+}
+
+/* This function is an analyser which forwards response body (including chunk
+ * sizes if any). It is called as soon as we must forward, even if we forward
+ * zero byte. The only situation where it must not be called is when we're in
+ * tunnel mode and we want to forward till the close. It's used both to forward
+ * remaining data and to resync after end of body. It expects the msg_state to
+ * be between MSG_BODY and MSG_DONE (inclusive). It returns zero if it needs to
+ * read more data, or 1 once we can go on with next request or end the stream.
+ *
+ * It is capable of compressing response data both in content-length mode and
+ * in chunked mode. The state machines follows different flows depending on
+ * whether content-length and chunked modes are used, since there are no
+ * trailers in content-length :
+ *
+ * chk-mode cl-mode
+ * ,----- BODY -----.
+ * / \
+ * V size > 0 V chk-mode
+ * .--> SIZE -------------> DATA -------------> CRLF
+ * | | size == 0 | last byte |
+ * | v final crlf v inspected |
+ * | TRAILERS -----------> DONE |
+ * | |
+ * `----------------------------------------------'
+ *
+ * Compression only happens in the DATA state, and must be flushed in final
+ * states (TRAILERS/DONE) or when leaving on missing data. Normal forwarding
+ * is performed at once on final states for all bytes parsed, or when leaving
+ * on missing data.
+ */
+int http_response_forward_body(struct stream *s, struct channel *res, int an_bit)
+{
+ struct session *sess = s->sess;
+ struct http_txn *txn = s->txn;
+ struct http_msg *msg = &s->txn->rsp;
+ struct htx *htx;
+ int ret;
+
+ DBG_TRACE_ENTER(STRM_EV_STRM_ANA|STRM_EV_HTTP_ANA, s, txn, msg);
+
+ htx = htxbuf(&res->buf);
+
+ if (htx->flags & HTX_FL_PARSING_ERROR)
+ goto return_bad_res;
+ if (htx->flags & HTX_FL_PROCESSING_ERROR)
+ goto return_int_err;
+
+ if (msg->msg_state == HTTP_MSG_BODY)
+ msg->msg_state = HTTP_MSG_DATA;
+
+ /* in most states, we should abort in case of early close */
+ channel_auto_close(res);
+
+ if (res->to_forward) {
+ if (res->to_forward == CHN_INFINITE_FORWARD) {
+ if (s->scb->flags & SC_FL_EOI)
+ msg->msg_state = HTTP_MSG_ENDING;
+ }
+ else {
+ /* We can't process the buffer's contents yet */
+ res->flags |= CF_WAKE_WRITE;
+ goto missing_data_or_waiting;
+ }
+ }
+
+ if (msg->msg_state >= HTTP_MSG_ENDING)
+ goto ending;
+
+ if ((txn->meth == HTTP_METH_CONNECT && txn->status >= 200 && txn->status < 300) || txn->status == 101 ||
+ (!(msg->flags & HTTP_MSGF_XFER_LEN) && !HAS_RSP_DATA_FILTERS(s))) {
+ msg->msg_state = HTTP_MSG_ENDING;
+ goto ending;
+ }
+
+ /* Forward input data. We get it by removing all outgoing data not
+ * forwarded yet from HTX data size. If there are some data filters, we
+ * let them decide the amount of data to forward.
+ */
+ if (HAS_RSP_DATA_FILTERS(s)) {
+ ret = flt_http_payload(s, msg, htx->data);
+ if (ret < 0)
+ goto return_bad_res;
+ c_adv(res, ret);
+ }
+ else {
+ c_adv(res, htx->data - co_data(res));
+ if ((global.tune.options & GTUNE_USE_FAST_FWD) && (msg->flags & HTTP_MSGF_XFER_LEN))
+ channel_htx_forward_forever(res, htx);
+ }
+
+ if (htx->data != co_data(res))
+ goto missing_data_or_waiting;
+
+ if (!(msg->flags & HTTP_MSGF_XFER_LEN) && (s->scb->flags & (SC_FL_EOS|SC_FL_ABRT_DONE))) {
+ msg->msg_state = HTTP_MSG_ENDING;
+ goto ending;
+ }
+
+ /* Check if the end-of-message is reached and if so, switch the message
+ * in HTTP_MSG_ENDING state. Then if all data was marked to be
+ * forwarded, set the state to HTTP_MSG_DONE.
+ */
+ if (!(htx->flags & HTX_FL_EOM))
+ goto missing_data_or_waiting;
+
+ msg->msg_state = HTTP_MSG_ENDING;
+
+ ending:
+ s->scf->flags &= ~SC_FL_SND_EXP_MORE; /* no more data are expected to be sent */
+
+ /* other states, ENDING...TUNNEL */
+ if (msg->msg_state >= HTTP_MSG_DONE)
+ goto done;
+
+ if (HAS_RSP_DATA_FILTERS(s)) {
+ ret = flt_http_end(s, msg);
+ if (ret <= 0) {
+ if (!ret)
+ goto missing_data_or_waiting;
+ goto return_bad_res;
+ }
+ }
+
+ if (!(txn->flags & TX_CON_WANT_TUN) && !(msg->flags & HTTP_MSGF_XFER_LEN)) {
+ /* One-side tunnel */
+ msg->msg_state = HTTP_MSG_TUNNEL;
+ }
+ else {
+ msg->msg_state = HTTP_MSG_DONE;
+ res->to_forward = 0;
+ }
+
+ done:
+
+ channel_dont_close(res);
+
+ if ((s->scf->flags & SC_FL_SHUT_DONE) && co_data(res)) {
+ /* response errors are most likely due to the client aborting
+ * the transfer. */
+ goto return_cli_abort;
+ }
+
+ http_end_response(s);
+ if (!(res->analysers & an_bit)) {
+ DBG_TRACE_LEAVE(STRM_EV_STRM_ANA|STRM_EV_HTTP_ANA, s, txn);
+ return 1;
+ }
+ DBG_TRACE_DEVEL("waiting for the end of the HTTP txn",
+ STRM_EV_STRM_ANA|STRM_EV_HTTP_ANA, s, txn);
+ return 0;
+
+ missing_data_or_waiting:
+ if (s->scf->flags & SC_FL_SHUT_DONE)
+ goto return_cli_abort;
+
+ /* stop waiting for data if the input is closed before the end. If the
+ * client side was already closed, it means that the client has aborted,
+ * so we don't want to count this as a server abort. Otherwise it's a
+ * server abort.
+ */
+ if (msg->msg_state < HTTP_MSG_ENDING && (s->scb->flags & (SC_FL_EOS|SC_FL_ABRT_DONE))) {
+ if ((s->scf->flags & (SC_FL_EOS|SC_FL_ABRT_DONE)) &&
+ (s->scb->flags & SC_FL_SHUT_DONE))
+ goto return_cli_abort;
+ /* If we have some pending data, we continue the processing */
+ if (htx_is_empty(htx))
+ goto return_srv_abort;
+ }
+
+ /* When TE: chunked is used, we need to get there again to parse
+ * remaining chunks even if the server has closed, so we don't want to
+ * set CF_DONTCLOSE. Similarly when there is a content-leng or if there
+ * are filters registered on the stream, we don't want to forward a
+ * close
+ */
+ if ((msg->flags & HTTP_MSGF_XFER_LEN) || HAS_RSP_DATA_FILTERS(s))
+ channel_dont_close(res);
+
+ /* We know that more data are expected, but we couldn't send more that
+ * what we did. So we always set the SC_FL_SND_EXP_MORE flag so that the
+ * system knows it must not set a PUSH on this first part. Interactive
+ * modes are already handled by the stream sock layer. We must not do
+ * this in content-length mode because it could present the MSG_MORE
+ * flag with the last block of forwarded data, which would cause an
+ * additional delay to be observed by the receiver.
+ */
+ if (HAS_RSP_DATA_FILTERS(s))
+ s->scf->flags |= SC_FL_SND_EXP_MORE;
+
+ /* the stream handler will take care of timeouts and errors */
+ DBG_TRACE_DEVEL("waiting for more data to forward",
+ STRM_EV_STRM_ANA|STRM_EV_HTTP_ANA, s, txn);
+ return 0;
+
+ return_srv_abort:
+ _HA_ATOMIC_INC(&sess->fe->fe_counters.srv_aborts);
+ _HA_ATOMIC_INC(&s->be->be_counters.srv_aborts);
+ if (sess->listener && sess->listener->counters)
+ _HA_ATOMIC_INC(&sess->listener->counters->srv_aborts);
+ if (objt_server(s->target))
+ _HA_ATOMIC_INC(&__objt_server(s->target)->counters.srv_aborts);
+ stream_inc_http_fail_ctr(s);
+ if (!(s->flags & SF_ERR_MASK))
+ s->flags |= ((res->flags & CF_READ_TIMEOUT) ? SF_ERR_SRVTO : SF_ERR_SRVCL);
+ goto return_error;
+
+ return_cli_abort:
+ _HA_ATOMIC_INC(&sess->fe->fe_counters.cli_aborts);
+ _HA_ATOMIC_INC(&s->be->be_counters.cli_aborts);
+ if (sess->listener && sess->listener->counters)
+ _HA_ATOMIC_INC(&sess->listener->counters->cli_aborts);
+ if (objt_server(s->target))
+ _HA_ATOMIC_INC(&__objt_server(s->target)->counters.cli_aborts);
+ if (!(s->flags & SF_ERR_MASK))
+ s->flags |= ((res->flags & CF_WRITE_TIMEOUT) ? SF_ERR_CLITO : SF_ERR_CLICL);
+ goto return_error;
+
+ return_int_err:
+ _HA_ATOMIC_INC(&sess->fe->fe_counters.internal_errors);
+ _HA_ATOMIC_INC(&s->be->be_counters.internal_errors);
+ if (sess->listener && sess->listener->counters)
+ _HA_ATOMIC_INC(&sess->listener->counters->internal_errors);
+ if (objt_server(s->target))
+ _HA_ATOMIC_INC(&__objt_server(s->target)->counters.internal_errors);
+ s->flags |= SF_ERR_INTERNAL;
+ goto return_error;
+
+ return_bad_res:
+ _HA_ATOMIC_INC(&s->be->be_counters.failed_resp);
+ if (objt_server(s->target)) {
+ _HA_ATOMIC_INC(&__objt_server(s->target)->counters.failed_resp);
+ health_adjust(__objt_server(s->target), HANA_STATUS_HTTP_RSP);
+ }
+ stream_inc_http_fail_ctr(s);
+ if (!(s->flags & SF_ERR_MASK))
+ s->flags |= SF_ERR_SRVCL;
+ /* fall through */
+
+ return_error:
+ /* don't send any error message as we're in the body */
+ http_set_term_flags(s);
+ http_reply_and_close(s, txn->status, NULL);
+ stream_inc_http_fail_ctr(s);
+ DBG_TRACE_DEVEL("leaving on error",
+ STRM_EV_STRM_ANA|STRM_EV_HTTP_ANA|STRM_EV_HTTP_ERR, s, txn);
+ return 0;
+}
+
+/* Perform an HTTP redirect based on the information in <rule>. The function
+ * returns zero in case of an irrecoverable error such as too large a request
+ * to build a valid response, 1 in case of successful redirect (hence the rule
+ * is final), or 2 if the rule has to be silently skipped.
+ */
+int http_apply_redirect_rule(struct redirect_rule *rule, struct stream *s, struct http_txn *txn)
+{
+ struct channel *req = &s->req;
+ struct channel *res = &s->res;
+ struct htx *htx;
+ struct htx_sl *sl;
+ struct buffer *chunk;
+ struct ist status, reason, location;
+ unsigned int flags;
+ int ret = 1, close = 0; /* Try to keep the connection alive byt default */
+
+ chunk = alloc_trash_chunk();
+ if (!chunk) {
+ if (!(s->flags & SF_ERR_MASK))
+ s->flags |= SF_ERR_RESOURCE;
+ goto fail;
+ }
+
+ /*
+ * Create the location
+ */
+ htx = htxbuf(&req->buf);
+ switch(rule->type) {
+ case REDIRECT_TYPE_SCHEME: {
+ struct http_hdr_ctx ctx;
+ struct ist path, host;
+ struct http_uri_parser parser;
+
+ host = ist("");
+ ctx.blk = NULL;
+ if (http_find_header(htx, ist("Host"), &ctx, 0))
+ host = ctx.value;
+
+ sl = http_get_stline(htx);
+ parser = http_uri_parser_init(htx_sl_req_uri(sl));
+ path = http_parse_path(&parser);
+ /* build message using path */
+ if (isttest(path)) {
+ if (rule->flags & REDIRECT_FLAG_DROP_QS) {
+ int qs = 0;
+ while (qs < path.len) {
+ if (*(path.ptr + qs) == '?') {
+ path.len = qs;
+ break;
+ }
+ qs++;
+ }
+ }
+ }
+ else
+ path = ist("/");
+
+ if (rule->rdr_str) { /* this is an old "redirect" rule */
+ /* add scheme */
+ if (!chunk_memcat(chunk, rule->rdr_str, rule->rdr_len))
+ goto fail;
+ }
+ else {
+ /* add scheme with executing log format */
+ chunk->data += build_logline(s, chunk->area + chunk->data,
+ chunk->size - chunk->data,
+ &rule->rdr_fmt);
+ }
+ /* add "://" + host + path */
+ if (!chunk_memcat(chunk, "://", 3) ||
+ !chunk_memcat(chunk, host.ptr, host.len) ||
+ !chunk_memcat(chunk, path.ptr, path.len))
+ goto fail;
+
+ /* append a slash at the end of the location if needed and missing */
+ if (chunk->data && chunk->area[chunk->data - 1] != '/' &&
+ (rule->flags & REDIRECT_FLAG_APPEND_SLASH)) {
+ if (chunk->data + 1 >= chunk->size)
+ goto fail;
+ chunk->area[chunk->data++] = '/';
+ }
+ break;
+ }
+
+ case REDIRECT_TYPE_PREFIX: {
+ struct ist path;
+ struct http_uri_parser parser;
+
+ sl = http_get_stline(htx);
+ parser = http_uri_parser_init(htx_sl_req_uri(sl));
+ path = http_parse_path(&parser);
+ /* build message using path */
+ if (isttest(path)) {
+ if (rule->flags & REDIRECT_FLAG_DROP_QS) {
+ int qs = 0;
+ while (qs < path.len) {
+ if (*(path.ptr + qs) == '?') {
+ path.len = qs;
+ break;
+ }
+ qs++;
+ }
+ }
+ }
+ else
+ path = ist("/");
+
+ if (rule->rdr_str) { /* this is an old "redirect" rule */
+ /* add prefix. Note that if prefix == "/", we don't want to
+ * add anything, otherwise it makes it hard for the user to
+ * configure a self-redirection.
+ */
+ if (rule->rdr_len != 1 || *rule->rdr_str != '/') {
+ if (!chunk_memcat(chunk, rule->rdr_str, rule->rdr_len))
+ goto fail;
+ }
+ }
+ else {
+ /* add prefix with executing log format */
+ chunk->data += build_logline(s, chunk->area + chunk->data,
+ chunk->size - chunk->data,
+ &rule->rdr_fmt);
+ }
+
+ /* add path */
+ if (!chunk_memcat(chunk, path.ptr, path.len))
+ goto fail;
+
+ /* append a slash at the end of the location if needed and missing */
+ if (chunk->data && chunk->area[chunk->data - 1] != '/' &&
+ (rule->flags & REDIRECT_FLAG_APPEND_SLASH)) {
+ if (chunk->data + 1 >= chunk->size)
+ goto fail;
+ chunk->area[chunk->data++] = '/';
+ }
+ break;
+ }
+ case REDIRECT_TYPE_LOCATION:
+ default:
+ if (rule->rdr_str) { /* this is an old "redirect" rule */
+ /* add location */
+ if (!chunk_memcat(chunk, rule->rdr_str, rule->rdr_len))
+ goto fail;
+ }
+ else {
+ /* add location with executing log format */
+ int len = build_logline(s, chunk->area + chunk->data,
+ chunk->size - chunk->data,
+ &rule->rdr_fmt);
+ if (!len && rule->flags & REDIRECT_FLAG_IGNORE_EMPTY) {
+ ret = 2;
+ goto out;
+ }
+
+ chunk->data += len;
+ }
+ break;
+ }
+ location = ist2(chunk->area, chunk->data);
+
+ /*
+ * Create the 30x response
+ */
+ switch (rule->code) {
+ case 308:
+ status = ist("308");
+ reason = ist("Permanent Redirect");
+ break;
+ case 307:
+ status = ist("307");
+ reason = ist("Temporary Redirect");
+ break;
+ case 303:
+ status = ist("303");
+ reason = ist("See Other");
+ break;
+ case 301:
+ status = ist("301");
+ reason = ist("Moved Permanently");
+ break;
+ case 302:
+ default:
+ status = ist("302");
+ reason = ist("Found");
+ break;
+ }
+
+ if (!(txn->req.flags & HTTP_MSGF_BODYLESS) && txn->req.msg_state != HTTP_MSG_DONE)
+ close = 1;
+
+ htx = htx_from_buf(&res->buf);
+ /* Trim any possible response */
+ channel_htx_truncate(&s->res, htx);
+ flags = (HTX_SL_F_IS_RESP|HTX_SL_F_VER_11|HTX_SL_F_XFER_LEN|HTX_SL_F_CLEN|HTX_SL_F_BODYLESS);
+ sl = htx_add_stline(htx, HTX_BLK_RES_SL, flags, ist("HTTP/1.1"), status, reason);
+ if (!sl)
+ goto fail;
+ sl->info.res.status = rule->code;
+ s->txn->status = rule->code;
+
+ if (close && !htx_add_header(htx, ist("Connection"), ist("close")))
+ goto fail;
+
+ if (!htx_add_header(htx, ist("Content-length"), ist("0")) ||
+ !htx_add_header(htx, ist("Location"), location))
+ goto fail;
+
+ if (rule->code == 302 || rule->code == 303 || rule->code == 307) {
+ if (!htx_add_header(htx, ist("Cache-Control"), ist("no-cache")))
+ goto fail;
+ }
+
+ if (rule->cookie_len) {
+ if (!htx_add_header(htx, ist("Set-Cookie"), ist2(rule->cookie_str, rule->cookie_len)))
+ goto fail;
+ }
+
+ if (!htx_add_endof(htx, HTX_BLK_EOH))
+ goto fail;
+
+ htx->flags |= HTX_FL_EOM;
+ htx_to_buf(htx, &res->buf);
+
+ if (!(s->flags & SF_ERR_MASK))
+ s->flags |= SF_ERR_LOCAL;
+ http_set_term_flags(s);
+
+ if (!http_forward_proxy_resp(s, 1))
+ goto fail;
+
+ if (rule->flags & REDIRECT_FLAG_FROM_REQ) {
+ /* let's log the request time */
+ s->logs.request_ts = now_ns;
+ req->analysers &= AN_REQ_FLT_END;
+
+ if (s->sess->fe == s->be) /* report it if the request was intercepted by the frontend */
+ _HA_ATOMIC_INC(&s->sess->fe->fe_counters.intercepted_req);
+ }
+
+ out:
+ free_trash_chunk(chunk);
+ return ret;
+
+ fail:
+ /* If an error occurred, remove the incomplete HTTP response from the
+ * buffer */
+ channel_htx_truncate(res, htxbuf(&res->buf));
+ ret = 0;
+ goto out;
+}
+
+/* This function filters the request header names to only allow [0-9a-zA-Z-]
+ * characters. Depending on the proxy configuration, headers with a name not
+ * matching this charset are removed or the request is rejected with a
+ * 403-Forbidden response if such name are found. It returns HTTP_RULE_RES_CONT
+ * to continue the request processing or HTTP_RULE_RES_DENY if the request is
+ * rejected.
+ */
+static enum rule_result http_req_restrict_header_names(struct stream *s, struct htx *htx, struct proxy *px)
+{
+ struct htx_blk *blk;
+ enum rule_result rule_ret = HTTP_RULE_RES_CONT;
+
+ blk = htx_get_first_blk(htx);
+ while (blk) {
+ enum htx_blk_type type = htx_get_blk_type(blk);
+
+ if (type == HTX_BLK_HDR) {
+ struct ist n = htx_get_blk_name(htx, blk);
+ int i, end = istlen(n);
+
+ for (i = 0; i < end; i++) {
+ if (!isalnum((unsigned char)n.ptr[i]) && n.ptr[i] != '-') {
+ break;
+ }
+ }
+
+ if (i < end) {
+ /* Disallowed character found - block the request or remove the header */
+ if (px->options2 & PR_O2_RSTRICT_REQ_HDR_NAMES_BLK)
+ goto block;
+ blk = htx_remove_blk(htx, blk);
+ continue;
+ }
+ }
+ if (type == HTX_BLK_EOH)
+ break;
+
+ blk = htx_get_next_blk(htx, blk);
+ }
+ out:
+ return rule_ret;
+ block:
+ /* Block the request returning a 403-Forbidden response */
+ s->txn->status = 403;
+ rule_ret = HTTP_RULE_RES_DENY;
+ goto out;
+}
+
+/* Replace all headers matching the name <name>. The header value is replaced if
+ * it matches the regex <re>. <str> is used for the replacement. If <full> is
+ * set to 1, the full-line is matched and replaced. Otherwise, comma-separated
+ * values are evaluated one by one. It returns 0 on success and -1 on error.
+ */
+int http_replace_hdrs(struct stream* s, struct htx *htx, struct ist name,
+ const char *str, struct my_regex *re, int full)
+{
+ struct http_hdr_ctx ctx;
+
+ ctx.blk = NULL;
+ while (http_find_header(htx, name, &ctx, full)) {
+ struct buffer *output = get_trash_chunk();
+
+ if (!regex_exec_match2(re, ctx.value.ptr, ctx.value.len, MAX_MATCH, pmatch, 0))
+ continue;
+
+ output->data = exp_replace(output->area, output->size, ctx.value.ptr, str, pmatch);
+ if (output->data == -1)
+ return -1;
+ if (!http_replace_header_value(htx, &ctx, ist2(output->area, output->data)))
+ return -1;
+ }
+ return 0;
+}
+
+/* This function executes one of the set-{method,path,query,uri} actions. It
+ * takes the string from the variable 'replace' with length 'len', then modifies
+ * the relevant part of the request line accordingly. Then it updates various
+ * pointers to the next elements which were moved, and the total buffer length.
+ * It finds the action to be performed in p[2], previously filled by function
+ * parse_set_req_line(). It returns 0 in case of success, -1 in case of internal
+ * error, though this can be revisited when this code is finally exploited.
+ *
+ * 'action' can be '0' to replace method, '1' to replace path, '2' to replace
+ * query string, 3 to replace uri or 4 to replace the path+query.
+ *
+ * In query string case, the mark question '?' must be set at the start of the
+ * string by the caller, event if the replacement query string is empty.
+ */
+int http_req_replace_stline(int action, const char *replace, int len,
+ struct proxy *px, struct stream *s)
+{
+ struct htx *htx = htxbuf(&s->req.buf);
+
+ switch (action) {
+ case 0: // method
+ if (!http_replace_req_meth(htx, ist2(replace, len)))
+ return -1;
+ break;
+
+ case 1: // path
+ if (!http_replace_req_path(htx, ist2(replace, len), 0))
+ return -1;
+ break;
+
+ case 2: // query
+ if (!http_replace_req_query(htx, ist2(replace, len)))
+ return -1;
+ break;
+
+ case 3: // uri
+ if (!http_replace_req_uri(htx, ist2(replace, len)))
+ return -1;
+ break;
+
+ case 4: // path + query
+ if (!http_replace_req_path(htx, ist2(replace, len), 1))
+ return -1;
+ break;
+
+ default:
+ return -1;
+ }
+ return 0;
+}
+
+/* This function replace the HTTP status code and the associated message. The
+ * variable <status> contains the new status code. This function never fails. It
+ * returns 0 in case of success, -1 in case of internal error.
+ */
+int http_res_set_status(unsigned int status, struct ist reason, struct stream *s)
+{
+ struct htx *htx = htxbuf(&s->res.buf);
+ char *res;
+
+ chunk_reset(&trash);
+ res = ultoa_o(status, trash.area, trash.size);
+ trash.data = res - trash.area;
+
+ /* Do we have a custom reason format string? */
+ if (!isttest(reason)) {
+ const char *str = http_get_reason(status);
+ reason = ist(str);
+ }
+
+ if (!http_replace_res_status(htx, ist2(trash.area, trash.data), reason))
+ return -1;
+ s->txn->status = status;
+ return 0;
+}
+
+/* Executes the http-request rules <rules> for stream <s>, proxy <px> and
+ * transaction <txn>. Returns the verdict of the first rule that prevents
+ * further processing of the request (auth, deny, ...), and defaults to
+ * HTTP_RULE_RES_STOP if it executed all rules or stopped on an allow, or
+ * HTTP_RULE_RES_CONT if the last rule was reached. It may set the TX_CLTARPIT
+ * on txn->flags if it encounters a tarpit rule. If <deny_status> is not NULL
+ * and a deny/tarpit rule is matched, it will be filled with this rule's deny
+ * status.
+ */
+static enum rule_result http_req_get_intercept_rule(struct proxy *px, struct list *def_rules,
+ struct list *rules, struct stream *s)
+{
+ struct session *sess = strm_sess(s);
+ struct http_txn *txn = s->txn;
+ struct act_rule *rule;
+ enum rule_result rule_ret = HTTP_RULE_RES_CONT;
+ int act_opts = 0;
+
+ /* If "the current_rule_list" match the executed rule list, we are in
+ * resume condition. If a resume is needed it is always in the action
+ * and never in the ACL or converters. In this case, we initialise the
+ * current rule, and go to the action execution point.
+ */
+ if (s->current_rule) {
+ rule = s->current_rule;
+ s->current_rule = NULL;
+ if (s->current_rule_list == rules || (def_rules && s->current_rule_list == def_rules))
+ goto resume_execution;
+ }
+ s->current_rule_list = ((!def_rules || s->current_rule_list == def_rules) ? rules : def_rules);
+
+ restart:
+ /* start the ruleset evaluation in strict mode */
+ txn->req.flags &= ~HTTP_MSGF_SOFT_RW;
+
+ list_for_each_entry(rule, s->current_rule_list, list) {
+ /* check optional condition */
+ if (rule->cond) {
+ int ret;
+
+ ret = acl_exec_cond(rule->cond, px, sess, s, SMP_OPT_DIR_REQ|SMP_OPT_FINAL);
+ ret = acl_pass(ret);
+
+ if (rule->cond->pol == ACL_COND_UNLESS)
+ ret = !ret;
+
+ if (!ret) /* condition not matched */
+ continue;
+ }
+
+ act_opts |= ACT_OPT_FIRST;
+ resume_execution:
+ if (rule->kw->flags & KWF_EXPERIMENTAL)
+ mark_tainted(TAINTED_ACTION_EXP_EXECUTED);
+
+ /* Always call the action function if defined */
+ if (rule->action_ptr) {
+ if ((s->scf->flags & SC_FL_ERROR) ||
+ ((s->scf->flags & (SC_FL_EOS|SC_FL_ABRT_DONE)) &&
+ (px->options & PR_O_ABRT_CLOSE)))
+ act_opts |= ACT_OPT_FINAL;
+
+ switch (rule->action_ptr(rule, px, sess, s, act_opts)) {
+ case ACT_RET_CONT:
+ break;
+ case ACT_RET_STOP:
+ rule_ret = HTTP_RULE_RES_STOP;
+ s->last_rule_file = rule->conf.file;
+ s->last_rule_line = rule->conf.line;
+ goto end;
+ case ACT_RET_YIELD:
+ s->current_rule = rule;
+ rule_ret = HTTP_RULE_RES_YIELD;
+ goto end;
+ case ACT_RET_ERR:
+ rule_ret = HTTP_RULE_RES_ERROR;
+ s->last_rule_file = rule->conf.file;
+ s->last_rule_line = rule->conf.line;
+ goto end;
+ case ACT_RET_DONE:
+ rule_ret = HTTP_RULE_RES_DONE;
+ s->last_rule_file = rule->conf.file;
+ s->last_rule_line = rule->conf.line;
+ goto end;
+ case ACT_RET_DENY:
+ if (txn->status == -1)
+ txn->status = 403;
+ rule_ret = HTTP_RULE_RES_DENY;
+ s->last_rule_file = rule->conf.file;
+ s->last_rule_line = rule->conf.line;
+ goto end;
+ case ACT_RET_ABRT:
+ rule_ret = HTTP_RULE_RES_ABRT;
+ s->last_rule_file = rule->conf.file;
+ s->last_rule_line = rule->conf.line;
+ goto end;
+ case ACT_RET_INV:
+ rule_ret = HTTP_RULE_RES_BADREQ;
+ s->last_rule_file = rule->conf.file;
+ s->last_rule_line = rule->conf.line;
+ goto end;
+ }
+ continue; /* eval the next rule */
+ }
+
+ /* If not action function defined, check for known actions */
+ switch (rule->action) {
+ case ACT_ACTION_ALLOW:
+ rule_ret = HTTP_RULE_RES_STOP;
+ s->last_rule_file = rule->conf.file;
+ s->last_rule_line = rule->conf.line;
+ goto end;
+
+ case ACT_ACTION_DENY:
+ txn->status = rule->arg.http_reply->status;
+ txn->http_reply = rule->arg.http_reply;
+ rule_ret = HTTP_RULE_RES_DENY;
+ s->last_rule_file = rule->conf.file;
+ s->last_rule_line = rule->conf.line;
+ goto end;
+
+ case ACT_HTTP_REQ_TARPIT:
+ txn->flags |= TX_CLTARPIT;
+ txn->status = rule->arg.http_reply->status;
+ txn->http_reply = rule->arg.http_reply;
+ rule_ret = HTTP_RULE_RES_DENY;
+ s->last_rule_file = rule->conf.file;
+ s->last_rule_line = rule->conf.line;
+ goto end;
+
+ case ACT_HTTP_REDIR: {
+ int ret = http_apply_redirect_rule(rule->arg.redir, s, txn);
+
+ if (ret == 2) // 2 == skip
+ break;
+
+ rule_ret = ret ? HTTP_RULE_RES_ABRT : HTTP_RULE_RES_ERROR;
+ s->last_rule_file = rule->conf.file;
+ s->last_rule_line = rule->conf.line;
+ goto end;
+ }
+
+ /* other flags exists, but normally, they never be matched. */
+ default:
+ break;
+ }
+ }
+
+ if (def_rules && s->current_rule_list == def_rules) {
+ s->current_rule_list = rules;
+ goto restart;
+ }
+
+ end:
+ /* if the ruleset evaluation is finished reset the strict mode */
+ if (rule_ret != HTTP_RULE_RES_YIELD)
+ txn->req.flags &= ~HTTP_MSGF_SOFT_RW;
+
+ /* we reached the end of the rules, nothing to report */
+ return rule_ret;
+}
+
+/* Executes the http-response rules <rules> for stream <s> and proxy <px>. It
+ * returns one of 5 possible statuses: HTTP_RULE_RES_CONT, HTTP_RULE_RES_STOP,
+ * HTTP_RULE_RES_DONE, HTTP_RULE_RES_YIELD, or HTTP_RULE_RES_BADREQ. If *CONT
+ * is returned, the process can continue the evaluation of next rule list. If
+ * *STOP or *DONE is returned, the process must stop the evaluation. If *BADREQ
+ * is returned, it means the operation could not be processed and a server error
+ * must be returned. If *YIELD is returned, the caller must call again the
+ * function with the same context.
+ */
+static enum rule_result http_res_get_intercept_rule(struct proxy *px, struct list *def_rules,
+ struct list *rules, struct stream *s, uint8_t final)
+{
+ struct session *sess = strm_sess(s);
+ struct http_txn *txn = s->txn;
+ struct act_rule *rule;
+ enum rule_result rule_ret = HTTP_RULE_RES_CONT;
+ int act_opts = 0;
+
+ if (final)
+ act_opts |= ACT_OPT_FINAL;
+ /* If "the current_rule_list" match the executed rule list, we are in
+ * resume condition. If a resume is needed it is always in the action
+ * and never in the ACL or converters. In this case, we initialise the
+ * current rule, and go to the action execution point.
+ */
+ if (s->current_rule) {
+ rule = s->current_rule;
+ s->current_rule = NULL;
+ if (s->current_rule_list == rules || (def_rules && s->current_rule_list == def_rules))
+ goto resume_execution;
+ }
+ s->current_rule_list = ((!def_rules || s->current_rule_list == def_rules) ? rules : def_rules);
+
+ restart:
+
+ /* start the ruleset evaluation in strict mode */
+ txn->rsp.flags &= ~HTTP_MSGF_SOFT_RW;
+
+ list_for_each_entry(rule, s->current_rule_list, list) {
+ /* check optional condition */
+ if (rule->cond) {
+ int ret;
+
+ ret = acl_exec_cond(rule->cond, px, sess, s, SMP_OPT_DIR_RES|SMP_OPT_FINAL);
+ ret = acl_pass(ret);
+
+ if (rule->cond->pol == ACL_COND_UNLESS)
+ ret = !ret;
+
+ if (!ret) /* condition not matched */
+ continue;
+ }
+
+ act_opts |= ACT_OPT_FIRST;
+resume_execution:
+ if (rule->kw->flags & KWF_EXPERIMENTAL)
+ mark_tainted(TAINTED_ACTION_EXP_EXECUTED);
+
+ /* Always call the action function if defined */
+ if (rule->action_ptr) {
+ if ((s->scf->flags & SC_FL_ERROR) ||
+ ((s->scf->flags & (SC_FL_EOS|SC_FL_ABRT_DONE)) &&
+ (px->options & PR_O_ABRT_CLOSE)))
+ act_opts |= ACT_OPT_FINAL;
+
+ switch (rule->action_ptr(rule, px, sess, s, act_opts)) {
+ case ACT_RET_CONT:
+ break;
+ case ACT_RET_STOP:
+ rule_ret = HTTP_RULE_RES_STOP;
+ s->last_rule_file = rule->conf.file;
+ s->last_rule_line = rule->conf.line;
+ goto end;
+ case ACT_RET_YIELD:
+ s->current_rule = rule;
+ rule_ret = HTTP_RULE_RES_YIELD;
+ goto end;
+ case ACT_RET_ERR:
+ rule_ret = HTTP_RULE_RES_ERROR;
+ s->last_rule_file = rule->conf.file;
+ s->last_rule_line = rule->conf.line;
+ goto end;
+ case ACT_RET_DONE:
+ rule_ret = HTTP_RULE_RES_DONE;
+ s->last_rule_file = rule->conf.file;
+ s->last_rule_line = rule->conf.line;
+ goto end;
+ case ACT_RET_DENY:
+ if (txn->status == -1)
+ txn->status = 502;
+ rule_ret = HTTP_RULE_RES_DENY;
+ s->last_rule_file = rule->conf.file;
+ s->last_rule_line = rule->conf.line;
+ goto end;
+ case ACT_RET_ABRT:
+ rule_ret = HTTP_RULE_RES_ABRT;
+ s->last_rule_file = rule->conf.file;
+ s->last_rule_line = rule->conf.line;
+ goto end;
+ case ACT_RET_INV:
+ rule_ret = HTTP_RULE_RES_BADREQ;
+ s->last_rule_file = rule->conf.file;
+ s->last_rule_line = rule->conf.line;
+ goto end;
+ }
+ continue; /* eval the next rule */
+ }
+
+ /* If not action function defined, check for known actions */
+ switch (rule->action) {
+ case ACT_ACTION_ALLOW:
+ rule_ret = HTTP_RULE_RES_STOP; /* "allow" rules are OK */
+ s->last_rule_file = rule->conf.file;
+ s->last_rule_line = rule->conf.line;
+ goto end;
+
+ case ACT_ACTION_DENY:
+ txn->status = rule->arg.http_reply->status;
+ txn->http_reply = rule->arg.http_reply;
+ rule_ret = HTTP_RULE_RES_DENY;
+ s->last_rule_file = rule->conf.file;
+ s->last_rule_line = rule->conf.line;
+ goto end;
+
+ case ACT_HTTP_REDIR: {
+ int ret = http_apply_redirect_rule(rule->arg.redir, s, txn);
+
+ if (ret == 2) // 2 == skip
+ break;
+
+ rule_ret = ret ? HTTP_RULE_RES_ABRT : HTTP_RULE_RES_ERROR;
+ s->last_rule_file = rule->conf.file;
+ s->last_rule_line = rule->conf.line;
+ goto end;
+ }
+ /* other flags exists, but normally, they never be matched. */
+ default:
+ break;
+ }
+ }
+
+ if (def_rules && s->current_rule_list == def_rules) {
+ s->current_rule_list = rules;
+ goto restart;
+ }
+
+ end:
+ /* if the ruleset evaluation is finished reset the strict mode */
+ if (rule_ret != HTTP_RULE_RES_YIELD)
+ txn->rsp.flags &= ~HTTP_MSGF_SOFT_RW;
+
+ /* we reached the end of the rules, nothing to report */
+ return rule_ret;
+}
+
+/* Executes backend and frontend http-after-response rules for the stream <s>,
+ * in that order. it return 1 on success and 0 on error. It is the caller
+ * responsibility to catch error or ignore it. If it catches it, this function
+ * may be called a second time, for the internal error.
+ */
+int http_eval_after_res_rules(struct stream *s)
+{
+ struct list *def_rules, *rules;
+ struct session *sess = s->sess;
+ enum rule_result ret = HTTP_RULE_RES_CONT;
+
+ /* Eval after-response ruleset only if the reply is not const */
+ if (s->txn->flags & TX_CONST_REPLY)
+ goto end;
+
+ /* prune the request variables if not already done and swap to the response variables. */
+ if (s->vars_reqres.scope != SCOPE_RES) {
+ if (!LIST_ISEMPTY(&s->vars_reqres.head))
+ vars_prune(&s->vars_reqres, s->sess, s);
+ vars_init_head(&s->vars_reqres, SCOPE_RES);
+ }
+
+ def_rules = (s->be->defpx ? &s->be->defpx->http_after_res_rules : NULL);
+ rules = &s->be->http_after_res_rules;
+
+ ret = http_res_get_intercept_rule(s->be, def_rules, rules, s, 1);
+ if ((ret == HTTP_RULE_RES_CONT || ret == HTTP_RULE_RES_STOP) && sess->fe != s->be) {
+ def_rules = ((sess->fe->defpx && sess->fe->defpx != s->be->defpx) ? &sess->fe->defpx->http_after_res_rules : NULL);
+ rules = &sess->fe->http_after_res_rules;
+ ret = http_res_get_intercept_rule(sess->fe, def_rules, rules, s, 1);
+ }
+
+ end:
+ /* All other codes than CONTINUE, STOP or DONE are forbidden */
+ return (ret == HTTP_RULE_RES_CONT || ret == HTTP_RULE_RES_STOP || ret == HTTP_RULE_RES_DONE);
+}
+
+/*
+ * Manage client-side cookie. It can impact performance by about 2% so it is
+ * desirable to call it only when needed. This code is quite complex because
+ * of the multiple very crappy and ambiguous syntaxes we have to support. it
+ * highly recommended not to touch this part without a good reason !
+ */
+static void http_manage_client_side_cookies(struct stream *s, struct channel *req)
+{
+ struct session *sess = s->sess;
+ struct http_txn *txn = s->txn;
+ struct htx *htx;
+ struct http_hdr_ctx ctx;
+ char *hdr_beg, *hdr_end, *del_from;
+ char *prev, *att_beg, *att_end, *equal, *val_beg, *val_end, *next;
+ int preserve_hdr;
+
+ htx = htxbuf(&req->buf);
+ ctx.blk = NULL;
+ while (http_find_header(htx, ist("Cookie"), &ctx, 1)) {
+ int is_first = 1;
+ del_from = NULL; /* nothing to be deleted */
+ preserve_hdr = 0; /* assume we may kill the whole header */
+
+ /* Now look for cookies. Conforming to RFC2109, we have to support
+ * attributes whose name begin with a '$', and associate them with
+ * the right cookie, if we want to delete this cookie.
+ * So there are 3 cases for each cookie read :
+ * 1) it's a special attribute, beginning with a '$' : ignore it.
+ * 2) it's a server id cookie that we *MAY* want to delete : save
+ * some pointers on it (last semi-colon, beginning of cookie...)
+ * 3) it's an application cookie : we *MAY* have to delete a previous
+ * "special" cookie.
+ * At the end of loop, if a "special" cookie remains, we may have to
+ * remove it. If no application cookie persists in the header, we
+ * *MUST* delete it.
+ *
+ * Note: RFC2965 is unclear about the processing of spaces around
+ * the equal sign in the ATTR=VALUE form. A careful inspection of
+ * the RFC explicitly allows spaces before it, and not within the
+ * tokens (attrs or values). An inspection of RFC2109 allows that
+ * too but section 10.1.3 lets one think that spaces may be allowed
+ * after the equal sign too, resulting in some (rare) buggy
+ * implementations trying to do that. So let's do what servers do.
+ * Latest ietf draft forbids spaces all around. Also, earlier RFCs
+ * allowed quoted strings in values, with any possible character
+ * after a backslash, including control chars and delimiters, which
+ * causes parsing to become ambiguous. Browsers also allow spaces
+ * within values even without quotes.
+ *
+ * We have to keep multiple pointers in order to support cookie
+ * removal at the beginning, middle or end of header without
+ * corrupting the header. All of these headers are valid :
+ *
+ * hdr_beg hdr_end
+ * | |
+ * v |
+ * NAME1=VALUE1;NAME2=VALUE2;NAME3=VALUE3 |
+ * NAME1=VALUE1;NAME2_ONLY ;NAME3=VALUE3 v
+ * NAME1 = VALUE 1 ; NAME2 = VALUE2 ; NAME3 = VALUE3
+ * | | | | | | |
+ * | | | | | | |
+ * | | | | | | +--> next
+ * | | | | | +----> val_end
+ * | | | | +-----------> val_beg
+ * | | | +--------------> equal
+ * | | +----------------> att_end
+ * | +---------------------> att_beg
+ * +--------------------------> prev
+ *
+ */
+ hdr_beg = ctx.value.ptr;
+ hdr_end = hdr_beg + ctx.value.len;
+ for (prev = hdr_beg; prev < hdr_end; prev = next) {
+ /* Iterate through all cookies on this line */
+
+ /* find att_beg */
+ att_beg = prev;
+ if (!is_first)
+ att_beg++;
+ is_first = 0;
+
+ while (att_beg < hdr_end && HTTP_IS_SPHT(*att_beg))
+ att_beg++;
+
+ /* find att_end : this is the first character after the last non
+ * space before the equal. It may be equal to hdr_end.
+ */
+ equal = att_end = att_beg;
+ while (equal < hdr_end) {
+ if (*equal == '=' || *equal == ',' || *equal == ';')
+ break;
+ if (HTTP_IS_SPHT(*equal++))
+ continue;
+ att_end = equal;
+ }
+
+ /* here, <equal> points to '=', a delimiter or the end. <att_end>
+ * is between <att_beg> and <equal>, both may be identical.
+ */
+ /* look for end of cookie if there is an equal sign */
+ if (equal < hdr_end && *equal == '=') {
+ /* look for the beginning of the value */
+ val_beg = equal + 1;
+ while (val_beg < hdr_end && HTTP_IS_SPHT(*val_beg))
+ val_beg++;
+
+ /* find the end of the value, respecting quotes */
+ next = http_find_cookie_value_end(val_beg, hdr_end);
+
+ /* make val_end point to the first white space or delimiter after the value */
+ val_end = next;
+ while (val_end > val_beg && HTTP_IS_SPHT(*(val_end - 1)))
+ val_end--;
+ }
+ else
+ val_beg = val_end = next = equal;
+
+ /* We have nothing to do with attributes beginning with
+ * '$'. However, they will automatically be removed if a
+ * header before them is removed, since they're supposed
+ * to be linked together.
+ */
+ if (*att_beg == '$')
+ continue;
+
+ /* Ignore cookies with no equal sign */
+ if (equal == next) {
+ /* This is not our cookie, so we must preserve it. But if we already
+ * scheduled another cookie for removal, we cannot remove the
+ * complete header, but we can remove the previous block itself.
+ */
+ preserve_hdr = 1;
+ if (del_from != NULL) {
+ int delta = http_del_hdr_value(hdr_beg, hdr_end, &del_from, prev);
+ val_end += delta;
+ next += delta;
+ hdr_end += delta;
+ prev = del_from;
+ del_from = NULL;
+ }
+ continue;
+ }
+
+ /* if there are spaces around the equal sign, we need to
+ * strip them otherwise we'll get trouble for cookie captures,
+ * or even for rewrites. Since this happens extremely rarely,
+ * it does not hurt performance.
+ */
+ if (unlikely(att_end != equal || val_beg > equal + 1)) {
+ int stripped_before = 0;
+ int stripped_after = 0;
+
+ if (att_end != equal) {
+ memmove(att_end, equal, hdr_end - equal);
+ stripped_before = (att_end - equal);
+ equal += stripped_before;
+ val_beg += stripped_before;
+ }
+
+ if (val_beg > equal + 1) {
+ memmove(equal + 1, val_beg, hdr_end + stripped_before - val_beg);
+ stripped_after = (equal + 1) - val_beg;
+ val_beg += stripped_after;
+ stripped_before += stripped_after;
+ }
+
+ val_end += stripped_before;
+ next += stripped_before;
+ hdr_end += stripped_before;
+ }
+ /* now everything is as on the diagram above */
+
+ /* First, let's see if we want to capture this cookie. We check
+ * that we don't already have a client side cookie, because we
+ * can only capture one. Also as an optimisation, we ignore
+ * cookies shorter than the declared name.
+ */
+ if (sess->fe->capture_name != NULL && txn->cli_cookie == NULL &&
+ (val_end - att_beg >= sess->fe->capture_namelen) &&
+ memcmp(att_beg, sess->fe->capture_name, sess->fe->capture_namelen) == 0) {
+ int log_len = val_end - att_beg;
+
+ if ((txn->cli_cookie = pool_alloc(pool_head_capture)) == NULL) {
+ ha_alert("HTTP logging : out of memory.\n");
+ } else {
+ if (log_len > sess->fe->capture_len)
+ log_len = sess->fe->capture_len;
+ memcpy(txn->cli_cookie, att_beg, log_len);
+ txn->cli_cookie[log_len] = 0;
+ }
+ }
+
+ /* Persistence cookies in passive, rewrite or insert mode have the
+ * following form :
+ *
+ * Cookie: NAME=SRV[|<lastseen>[|<firstseen>]]
+ *
+ * For cookies in prefix mode, the form is :
+ *
+ * Cookie: NAME=SRV~VALUE
+ */
+ if ((att_end - att_beg == s->be->cookie_len) && (s->be->cookie_name != NULL) &&
+ (memcmp(att_beg, s->be->cookie_name, att_end - att_beg) == 0)) {
+ struct server *srv = s->be->srv;
+ char *delim;
+
+ /* if we're in cookie prefix mode, we'll search the delimiter so that we
+ * have the server ID between val_beg and delim, and the original cookie between
+ * delim+1 and val_end. Otherwise, delim==val_end :
+ *
+ * hdr_beg
+ * |
+ * v
+ * NAME=SRV; # in all but prefix modes
+ * NAME=SRV~OPAQUE ; # in prefix mode
+ * || || | |+-> next
+ * || || | +--> val_end
+ * || || +---------> delim
+ * || |+------------> val_beg
+ * || +-------------> att_end = equal
+ * |+-----------------> att_beg
+ * +------------------> prev
+ *
+ */
+ if (s->be->ck_opts & PR_CK_PFX) {
+ for (delim = val_beg; delim < val_end; delim++)
+ if (*delim == COOKIE_DELIM)
+ break;
+ }
+ else {
+ char *vbar1;
+ delim = val_end;
+ /* Now check if the cookie contains a date field, which would
+ * appear after a vertical bar ('|') just after the server name
+ * and before the delimiter.
+ */
+ vbar1 = memchr(val_beg, COOKIE_DELIM_DATE, val_end - val_beg);
+ if (vbar1) {
+ /* OK, so left of the bar is the server's cookie and
+ * right is the last seen date. It is a base64 encoded
+ * 30-bit value representing the UNIX date since the
+ * epoch in 4-second quantities.
+ */
+ int val;
+ delim = vbar1++;
+ if (val_end - vbar1 >= 5) {
+ val = b64tos30(vbar1);
+ if (val > 0)
+ txn->cookie_last_date = val << 2;
+ }
+ /* look for a second vertical bar */
+ vbar1 = memchr(vbar1, COOKIE_DELIM_DATE, val_end - vbar1);
+ if (vbar1 && (val_end - vbar1 > 5)) {
+ val = b64tos30(vbar1 + 1);
+ if (val > 0)
+ txn->cookie_first_date = val << 2;
+ }
+ }
+ }
+
+ /* if the cookie has an expiration date and the proxy wants to check
+ * it, then we do that now. We first check if the cookie is too old,
+ * then only if it has expired. We detect strict overflow because the
+ * time resolution here is not great (4 seconds). Cookies with dates
+ * in the future are ignored if their offset is beyond one day. This
+ * allows an admin to fix timezone issues without expiring everyone
+ * and at the same time avoids keeping unwanted side effects for too
+ * long.
+ */
+ if (txn->cookie_first_date && s->be->cookie_maxlife &&
+ (((signed)(date.tv_sec - txn->cookie_first_date) > (signed)s->be->cookie_maxlife) ||
+ ((signed)(txn->cookie_first_date - date.tv_sec) > 86400))) {
+ txn->flags &= ~TX_CK_MASK;
+ txn->flags |= TX_CK_OLD;
+ delim = val_beg; // let's pretend we have not found the cookie
+ txn->cookie_first_date = 0;
+ txn->cookie_last_date = 0;
+ }
+ else if (txn->cookie_last_date && s->be->cookie_maxidle &&
+ (((signed)(date.tv_sec - txn->cookie_last_date) > (signed)s->be->cookie_maxidle) ||
+ ((signed)(txn->cookie_last_date - date.tv_sec) > 86400))) {
+ txn->flags &= ~TX_CK_MASK;
+ txn->flags |= TX_CK_EXPIRED;
+ delim = val_beg; // let's pretend we have not found the cookie
+ txn->cookie_first_date = 0;
+ txn->cookie_last_date = 0;
+ }
+
+ /* Here, we'll look for the first running server which supports the cookie.
+ * This allows to share a same cookie between several servers, for example
+ * to dedicate backup servers to specific servers only.
+ * However, to prevent clients from sticking to cookie-less backup server
+ * when they have incidentely learned an empty cookie, we simply ignore
+ * empty cookies and mark them as invalid.
+ * The same behaviour is applied when persistence must be ignored.
+ */
+ if ((delim == val_beg) || (s->flags & (SF_IGNORE_PRST | SF_ASSIGNED)))
+ srv = NULL;
+
+ while (srv) {
+ if (srv->cookie && (srv->cklen == delim - val_beg) &&
+ !memcmp(val_beg, srv->cookie, delim - val_beg)) {
+ if ((srv->cur_state != SRV_ST_STOPPED) ||
+ (s->be->options & PR_O_PERSIST) ||
+ (s->flags & SF_FORCE_PRST)) {
+ /* we found the server and we can use it */
+ txn->flags &= ~TX_CK_MASK;
+ txn->flags |= (srv->cur_state != SRV_ST_STOPPED) ? TX_CK_VALID : TX_CK_DOWN;
+ s->flags |= SF_DIRECT | SF_ASSIGNED;
+ s->target = &srv->obj_type;
+ break;
+ } else {
+ /* we found a server, but it's down,
+ * mark it as such and go on in case
+ * another one is available.
+ */
+ txn->flags &= ~TX_CK_MASK;
+ txn->flags |= TX_CK_DOWN;
+ }
+ }
+ srv = srv->next;
+ }
+
+ if (!srv && !(txn->flags & (TX_CK_DOWN|TX_CK_EXPIRED|TX_CK_OLD))) {
+ /* no server matched this cookie or we deliberately skipped it */
+ txn->flags &= ~TX_CK_MASK;
+ if ((s->flags & (SF_IGNORE_PRST | SF_ASSIGNED)))
+ txn->flags |= TX_CK_UNUSED;
+ else
+ txn->flags |= TX_CK_INVALID;
+ }
+
+ /* depending on the cookie mode, we may have to either :
+ * - delete the complete cookie if we're in insert+indirect mode, so that
+ * the server never sees it ;
+ * - remove the server id from the cookie value, and tag the cookie as an
+ * application cookie so that it does not get accidentally removed later,
+ * if we're in cookie prefix mode
+ */
+ if ((s->be->ck_opts & PR_CK_PFX) && (delim != val_end)) {
+ int delta; /* negative */
+
+ memmove(val_beg, delim + 1, hdr_end - (delim + 1));
+ delta = val_beg - (delim + 1);
+ val_end += delta;
+ next += delta;
+ hdr_end += delta;
+ del_from = NULL;
+ preserve_hdr = 1; /* we want to keep this cookie */
+ }
+ else if (del_from == NULL &&
+ (s->be->ck_opts & (PR_CK_INS | PR_CK_IND)) == (PR_CK_INS | PR_CK_IND)) {
+ del_from = prev;
+ }
+ }
+ else {
+ /* This is not our cookie, so we must preserve it. But if we already
+ * scheduled another cookie for removal, we cannot remove the
+ * complete header, but we can remove the previous block itself.
+ */
+ preserve_hdr = 1;
+
+ if (del_from != NULL) {
+ int delta = http_del_hdr_value(hdr_beg, hdr_end, &del_from, prev);
+ if (att_beg >= del_from)
+ att_beg += delta;
+ if (att_end >= del_from)
+ att_end += delta;
+ val_beg += delta;
+ val_end += delta;
+ next += delta;
+ hdr_end += delta;
+ prev = del_from;
+ del_from = NULL;
+ }
+ }
+
+ } /* for each cookie */
+
+
+ /* There are no more cookies on this line.
+ * We may still have one (or several) marked for deletion at the
+ * end of the line. We must do this now in two ways :
+ * - if some cookies must be preserved, we only delete from the
+ * mark to the end of line ;
+ * - if nothing needs to be preserved, simply delete the whole header
+ */
+ if (del_from) {
+ hdr_end = (preserve_hdr ? del_from : hdr_beg);
+ }
+ if ((hdr_end - hdr_beg) != ctx.value.len) {
+ if (hdr_beg != hdr_end)
+ htx_change_blk_value_len(htx, ctx.blk, hdr_end - hdr_beg);
+ else
+ http_remove_header(htx, &ctx);
+ }
+ } /* for each "Cookie header */
+}
+
+/*
+ * Manage server-side cookies. It can impact performance by about 2% so it is
+ * desirable to call it only when needed. This function is also used when we
+ * just need to know if there is a cookie (eg: for check-cache).
+ */
+static void http_manage_server_side_cookies(struct stream *s, struct channel *res)
+{
+ struct session *sess = s->sess;
+ struct http_txn *txn = s->txn;
+ struct htx *htx;
+ struct http_hdr_ctx ctx;
+ struct server *srv;
+ char *hdr_beg, *hdr_end;
+ char *prev, *att_beg, *att_end, *equal, *val_beg, *val_end, *next;
+
+ htx = htxbuf(&res->buf);
+
+ ctx.blk = NULL;
+ while (http_find_header(htx, ist("Set-Cookie"), &ctx, 1)) {
+ int is_first = 1;
+
+ /* OK, right now we know we have a Set-Cookie* at hdr_beg, and
+ * <prev> points to the colon.
+ */
+ txn->flags |= TX_SCK_PRESENT;
+
+ /* Maybe we only wanted to see if there was a Set-Cookie (eg:
+ * check-cache is enabled) and we are not interested in checking
+ * them. Warning, the cookie capture is declared in the frontend.
+ */
+ if (s->be->cookie_name == NULL && sess->fe->capture_name == NULL)
+ break;
+
+ /* OK so now we know we have to process this response cookie.
+ * The format of the Set-Cookie header is slightly different
+ * from the format of the Cookie header in that it does not
+ * support the comma as a cookie delimiter (thus the header
+ * cannot be folded) because the Expires attribute described in
+ * the original Netscape's spec may contain an unquoted date
+ * with a comma inside. We have to live with this because
+ * many browsers don't support Max-Age and some browsers don't
+ * support quoted strings. However the Set-Cookie2 header is
+ * clean but basically nobody supports it.
+ *
+ * We have to keep multiple pointers in order to support cookie
+ * removal at the beginning, middle or end of header without
+ * corrupting the header (in case of set-cookie2). A special
+ * pointer, <scav> points to the beginning of the set-cookie-av
+ * fields after the first semi-colon. The <next> pointer points
+ * either to the end of line (set-cookie) or next unquoted comma
+ * (set-cookie2). All of these headers are valid :
+ *
+ * hdr_beg hdr_end
+ * | |
+ * v |
+ * NAME1 = VALUE 1 ; Secure; Path="/" |
+ * NAME=VALUE; Secure; Expires=Thu, 01-Jan-1970 00:00:01 GMT v
+ * NAME = VALUE ; Secure; Expires=Thu, 01-Jan-1970 00:00:01 GMT
+ * NAME1 = VALUE 1 ; Max-Age=0, NAME2=VALUE2; Discard
+ * | | | | | | | |
+ * | | | | | | | +-> next
+ * | | | | | | +------------> scav
+ * | | | | | +--------------> val_end
+ * | | | | +--------------------> val_beg
+ * | | | +----------------------> equal
+ * | | +------------------------> att_end
+ * | +----------------------------> att_beg
+ * +------------------------------> prev
+ * -------------------------------> hdr_beg
+ */
+ hdr_beg = ctx.value.ptr;
+ hdr_end = hdr_beg + ctx.value.len;
+ for (prev = hdr_beg; prev < hdr_end; prev = next) {
+
+ /* Iterate through all cookies on this line */
+
+ /* find att_beg */
+ att_beg = prev;
+ if (!is_first)
+ att_beg++;
+ is_first = 0;
+
+ while (att_beg < hdr_end && HTTP_IS_SPHT(*att_beg))
+ att_beg++;
+
+ /* find att_end : this is the first character after the last non
+ * space before the equal. It may be equal to hdr_end.
+ */
+ equal = att_end = att_beg;
+
+ while (equal < hdr_end) {
+ if (*equal == '=' || *equal == ';')
+ break;
+ if (HTTP_IS_SPHT(*equal++))
+ continue;
+ att_end = equal;
+ }
+
+ /* here, <equal> points to '=', a delimiter or the end. <att_end>
+ * is between <att_beg> and <equal>, both may be identical.
+ */
+
+ /* look for end of cookie if there is an equal sign */
+ if (equal < hdr_end && *equal == '=') {
+ /* look for the beginning of the value */
+ val_beg = equal + 1;
+ while (val_beg < hdr_end && HTTP_IS_SPHT(*val_beg))
+ val_beg++;
+
+ /* find the end of the value, respecting quotes */
+ next = http_find_cookie_value_end(val_beg, hdr_end);
+
+ /* make val_end point to the first white space or delimiter after the value */
+ val_end = next;
+ while (val_end > val_beg && HTTP_IS_SPHT(*(val_end - 1)))
+ val_end--;
+ }
+ else {
+ /* <equal> points to next comma, semi-colon or EOL */
+ val_beg = val_end = next = equal;
+ }
+
+ if (next < hdr_end) {
+ /* For Set-Cookie, since commas are permitted
+ * in values, skip to the end.
+ */
+ next = hdr_end;
+ }
+
+ /* Now everything is as on the diagram above */
+
+ /* Ignore cookies with no equal sign */
+ if (equal == val_end)
+ continue;
+
+ /* If there are spaces around the equal sign, we need to
+ * strip them otherwise we'll get trouble for cookie captures,
+ * or even for rewrites. Since this happens extremely rarely,
+ * it does not hurt performance.
+ */
+ if (unlikely(att_end != equal || val_beg > equal + 1)) {
+ int stripped_before = 0;
+ int stripped_after = 0;
+
+ if (att_end != equal) {
+ memmove(att_end, equal, hdr_end - equal);
+ stripped_before = (att_end - equal);
+ equal += stripped_before;
+ val_beg += stripped_before;
+ }
+
+ if (val_beg > equal + 1) {
+ memmove(equal + 1, val_beg, hdr_end + stripped_before - val_beg);
+ stripped_after = (equal + 1) - val_beg;
+ val_beg += stripped_after;
+ stripped_before += stripped_after;
+ }
+
+ val_end += stripped_before;
+ next += stripped_before;
+ hdr_end += stripped_before;
+
+ htx_change_blk_value_len(htx, ctx.blk, hdr_end - hdr_beg);
+ ctx.value.len = hdr_end - hdr_beg;
+ }
+
+ /* First, let's see if we want to capture this cookie. We check
+ * that we don't already have a server side cookie, because we
+ * can only capture one. Also as an optimisation, we ignore
+ * cookies shorter than the declared name.
+ */
+ if (sess->fe->capture_name != NULL &&
+ txn->srv_cookie == NULL &&
+ (val_end - att_beg >= sess->fe->capture_namelen) &&
+ memcmp(att_beg, sess->fe->capture_name, sess->fe->capture_namelen) == 0) {
+ int log_len = val_end - att_beg;
+ if ((txn->srv_cookie = pool_alloc(pool_head_capture)) == NULL) {
+ ha_alert("HTTP logging : out of memory.\n");
+ }
+ else {
+ if (log_len > sess->fe->capture_len)
+ log_len = sess->fe->capture_len;
+ memcpy(txn->srv_cookie, att_beg, log_len);
+ txn->srv_cookie[log_len] = 0;
+ }
+ }
+
+ srv = objt_server(s->target);
+ /* now check if we need to process it for persistence */
+ if (!(s->flags & SF_IGNORE_PRST) &&
+ (att_end - att_beg == s->be->cookie_len) && (s->be->cookie_name != NULL) &&
+ (memcmp(att_beg, s->be->cookie_name, att_end - att_beg) == 0)) {
+ /* assume passive cookie by default */
+ txn->flags &= ~TX_SCK_MASK;
+ txn->flags |= TX_SCK_FOUND;
+
+ /* If the cookie is in insert mode on a known server, we'll delete
+ * this occurrence because we'll insert another one later.
+ * We'll delete it too if the "indirect" option is set and we're in
+ * a direct access.
+ */
+ if (s->be->ck_opts & PR_CK_PSV) {
+ /* The "preserve" flag was set, we don't want to touch the
+ * server's cookie.
+ */
+ }
+ else if ((srv && (s->be->ck_opts & PR_CK_INS)) ||
+ ((s->flags & SF_DIRECT) && (s->be->ck_opts & PR_CK_IND))) {
+ /* this cookie must be deleted */
+ if (prev == hdr_beg && next == hdr_end) {
+ /* whole header */
+ http_remove_header(htx, &ctx);
+ /* note: while both invalid now, <next> and <hdr_end>
+ * are still equal, so the for() will stop as expected.
+ */
+ } else {
+ /* just remove the value */
+ int delta = http_del_hdr_value(hdr_beg, hdr_end, &prev, next);
+ next = prev;
+ hdr_end += delta;
+ }
+ txn->flags &= ~TX_SCK_MASK;
+ txn->flags |= TX_SCK_DELETED;
+ /* and go on with next cookie */
+ }
+ else if (srv && srv->cookie && (s->be->ck_opts & PR_CK_RW)) {
+ /* replace bytes val_beg->val_end with the cookie name associated
+ * with this server since we know it.
+ */
+ int sliding, delta;
+
+ ctx.value = ist2(val_beg, val_end - val_beg);
+ ctx.lws_before = ctx.lws_after = 0;
+ http_replace_header_value(htx, &ctx, ist2(srv->cookie, srv->cklen));
+ delta = srv->cklen - (val_end - val_beg);
+ sliding = (ctx.value.ptr - val_beg);
+ hdr_beg += sliding;
+ val_beg += sliding;
+ next += sliding + delta;
+ hdr_end += sliding + delta;
+
+ txn->flags &= ~TX_SCK_MASK;
+ txn->flags |= TX_SCK_REPLACED;
+ }
+ else if (srv && srv->cookie && (s->be->ck_opts & PR_CK_PFX)) {
+ /* insert the cookie name associated with this server
+ * before existing cookie, and insert a delimiter between them..
+ */
+ int sliding, delta;
+ ctx.value = ist2(val_beg, 0);
+ ctx.lws_before = ctx.lws_after = 0;
+ http_replace_header_value(htx, &ctx, ist2(srv->cookie, srv->cklen + 1));
+ delta = srv->cklen + 1;
+ sliding = (ctx.value.ptr - val_beg);
+ hdr_beg += sliding;
+ val_beg += sliding;
+ next += sliding + delta;
+ hdr_end += sliding + delta;
+
+ val_beg[srv->cklen] = COOKIE_DELIM;
+ txn->flags &= ~TX_SCK_MASK;
+ txn->flags |= TX_SCK_REPLACED;
+ }
+ }
+ /* that's done for this cookie, check the next one on the same
+ * line when next != hdr_end (which should normally not happen
+ * with set-cookie2 support removed).
+ */
+ }
+ }
+}
+
+/*
+ * Parses the Cache-Control and Pragma request header fields to determine if
+ * the request may be served from the cache and/or if it is cacheable. Updates
+ * s->txn->flags.
+ */
+void http_check_request_for_cacheability(struct stream *s, struct channel *req)
+{
+ struct http_txn *txn = s->txn;
+ struct htx *htx;
+ struct http_hdr_ctx ctx = { .blk = NULL };
+ int pragma_found, cc_found;
+
+ if ((txn->flags & (TX_CACHEABLE|TX_CACHE_IGNORE)) == TX_CACHE_IGNORE)
+ return; /* nothing more to do here */
+
+ htx = htxbuf(&req->buf);
+ pragma_found = cc_found = 0;
+
+ /* Check "pragma" header for HTTP/1.0 compatibility. */
+ if (http_find_header(htx, ist("pragma"), &ctx, 1)) {
+ if (isteqi(ctx.value, ist("no-cache"))) {
+ pragma_found = 1;
+ }
+ }
+
+ ctx.blk = NULL;
+ /* Don't use the cache and don't try to store if we found the
+ * Authorization header */
+ if (http_find_header(htx, ist("authorization"), &ctx, 1)) {
+ txn->flags &= ~TX_CACHEABLE & ~TX_CACHE_COOK;
+ txn->flags |= TX_CACHE_IGNORE;
+ }
+
+
+ /* Look for "cache-control" header and iterate over all the values
+ * until we find one that specifies that caching is possible or not. */
+ ctx.blk = NULL;
+ while (http_find_header(htx, ist("cache-control"), &ctx, 0)) {
+ cc_found = 1;
+ /* We don't check the values after max-age, max-stale nor min-fresh,
+ * we simply don't use the cache when they're specified. */
+ if (istmatchi(ctx.value, ist("max-age")) ||
+ istmatchi(ctx.value, ist("no-cache")) ||
+ istmatchi(ctx.value, ist("max-stale")) ||
+ istmatchi(ctx.value, ist("min-fresh"))) {
+ txn->flags |= TX_CACHE_IGNORE;
+ continue;
+ }
+ if (istmatchi(ctx.value, ist("no-store"))) {
+ txn->flags &= ~TX_CACHEABLE & ~TX_CACHE_COOK;
+ continue;
+ }
+ }
+
+ /* RFC7234#5.4:
+ * When the Cache-Control header field is also present and
+ * understood in a request, Pragma is ignored.
+ * When the Cache-Control header field is not present in a
+ * request, caches MUST consider the no-cache request
+ * pragma-directive as having the same effect as if
+ * "Cache-Control: no-cache" were present.
+ */
+ if (!cc_found && pragma_found)
+ txn->flags |= TX_CACHE_IGNORE;
+}
+
+/*
+ * Check if response is cacheable or not. Updates s->txn->flags.
+ */
+void http_check_response_for_cacheability(struct stream *s, struct channel *res)
+{
+ struct http_txn *txn = s->txn;
+ struct http_hdr_ctx ctx = { .blk = NULL };
+ struct htx *htx;
+ int has_freshness_info = 0;
+ int has_validator = 0;
+ int has_null_maxage = 0;
+
+ if (txn->status < 200) {
+ /* do not try to cache interim responses! */
+ txn->flags &= ~TX_CACHEABLE & ~TX_CACHE_COOK;
+ return;
+ }
+
+ htx = htxbuf(&res->buf);
+ /* Check "pragma" header for HTTP/1.0 compatibility. */
+ if (http_find_header(htx, ist("pragma"), &ctx, 1)) {
+ if (isteqi(ctx.value, ist("no-cache"))) {
+ txn->flags &= ~TX_CACHEABLE & ~TX_CACHE_COOK;
+ return;
+ }
+ }
+
+ /* Look for "cache-control" header and iterate over all the values
+ * until we find one that specifies that caching is possible or not. */
+ ctx.blk = NULL;
+ while (http_find_header(htx, ist("cache-control"), &ctx, 0)) {
+ if (isteqi(ctx.value, ist("public"))) {
+ txn->flags |= TX_CACHEABLE | TX_CACHE_COOK;
+ continue;
+ }
+ /* This max-age might be overridden by a s-maxage directive, do
+ * not unset the TX_CACHEABLE yet. */
+ if (isteqi(ctx.value, ist("max-age=0"))) {
+ has_null_maxage = 1;
+ continue;
+ }
+
+ if (isteqi(ctx.value, ist("private")) ||
+ isteqi(ctx.value, ist("no-cache")) ||
+ isteqi(ctx.value, ist("no-store")) ||
+ isteqi(ctx.value, ist("s-maxage=0"))) {
+ txn->flags &= ~TX_CACHEABLE & ~TX_CACHE_COOK;
+ continue;
+ }
+ /* We might have a no-cache="set-cookie" form. */
+ if (istmatchi(ctx.value, ist("no-cache=\"set-cookie"))) {
+ txn->flags &= ~TX_CACHE_COOK;
+ continue;
+ }
+
+ if (istmatchi(ctx.value, ist("s-maxage"))) {
+ has_freshness_info = 1;
+ has_null_maxage = 0; /* The null max-age is overridden, ignore it */
+ continue;
+ }
+ if (istmatchi(ctx.value, ist("max-age"))) {
+ has_freshness_info = 1;
+ continue;
+ }
+ }
+
+ /* We had a 'max-age=0' directive but no extra s-maxage, do not cache
+ * the response. */
+ if (has_null_maxage) {
+ txn->flags &= ~TX_CACHEABLE & ~TX_CACHE_COOK;
+ }
+
+ /* If no freshness information could be found in Cache-Control values,
+ * look for an Expires header. */
+ if (!has_freshness_info) {
+ ctx.blk = NULL;
+ has_freshness_info = http_find_header(htx, ist("expires"), &ctx, 0);
+ }
+
+ /* If no freshness information could be found in Cache-Control or Expires
+ * values, look for an explicit validator. */
+ if (!has_freshness_info) {
+ ctx.blk = NULL;
+ has_validator = 1;
+ if (!http_find_header(htx, ist("etag"), &ctx, 0)) {
+ ctx.blk = NULL;
+ if (!http_find_header(htx, ist("last-modified"), &ctx, 0))
+ has_validator = 0;
+ }
+ }
+
+ /* We won't store an entry that has neither a cache validator nor an
+ * explicit expiration time, as suggested in RFC 7234#3. */
+ if (!has_freshness_info && !has_validator)
+ txn->flags &= ~TX_CACHEABLE;
+}
+
+/*
+ * In a GET, HEAD or POST request, check if the requested URI matches the stats uri
+ * for the current proxy.
+ *
+ * It is assumed that the request is either a HEAD, GET, or POST and that the
+ * uri_auth field is valid.
+ *
+ * Returns 1 if stats should be provided, otherwise 0.
+ */
+static int http_stats_check_uri(struct stream *s, struct http_txn *txn, struct proxy *px)
+{
+ struct uri_auth *uri_auth = px->uri_auth;
+ struct htx *htx;
+ struct htx_sl *sl;
+ struct ist uri;
+
+ if (!uri_auth)
+ return 0;
+
+ if (txn->meth != HTTP_METH_GET && txn->meth != HTTP_METH_HEAD && txn->meth != HTTP_METH_POST)
+ return 0;
+
+ htx = htxbuf(&s->req.buf);
+ sl = http_get_stline(htx);
+ uri = htx_sl_req_uri(sl);
+ if (*uri_auth->uri_prefix == '/') {
+ struct http_uri_parser parser = http_uri_parser_init(uri);
+ uri = http_parse_path(&parser);
+ }
+
+ /* check URI size */
+ if (uri_auth->uri_len > uri.len)
+ return 0;
+
+ if (memcmp(uri.ptr, uri_auth->uri_prefix, uri_auth->uri_len) != 0)
+ return 0;
+
+ return 1;
+}
+
+/* This function prepares an applet to handle the stats. It can deal with the
+ * "100-continue" expectation, check that admin rules are met for POST requests,
+ * and program a response message if something was unexpected. It cannot fail
+ * and always relies on the stats applet to complete the job. It does not touch
+ * analysers nor counters, which are left to the caller. It does not touch
+ * s->target which is supposed to already point to the stats applet. The caller
+ * is expected to have already assigned an appctx to the stream.
+ */
+static int http_handle_stats(struct stream *s, struct channel *req, struct proxy *px)
+{
+ struct stats_admin_rule *stats_admin_rule;
+ struct session *sess = s->sess;
+ struct http_txn *txn = s->txn;
+ struct http_msg *msg = &txn->req;
+ struct uri_auth *uri_auth = px->uri_auth;
+ const char *h, *lookup, *end;
+ struct appctx *appctx = __sc_appctx(s->scb);
+ struct show_stat_ctx *ctx = applet_reserve_svcctx(appctx, sizeof(*ctx));
+ struct htx *htx;
+ struct htx_sl *sl;
+
+ appctx->st1 = 0;
+ ctx->state = STAT_STATE_INIT;
+ ctx->st_code = STAT_STATUS_INIT;
+ ctx->http_px = px;
+ ctx->flags |= uri_auth->flags;
+ ctx->flags |= STAT_FMT_HTML; /* assume HTML mode by default */
+ if ((msg->flags & HTTP_MSGF_VER_11) && (txn->meth != HTTP_METH_HEAD))
+ ctx->flags |= STAT_CHUNKED;
+
+ htx = htxbuf(&req->buf);
+ sl = http_get_stline(htx);
+ lookup = HTX_SL_REQ_UPTR(sl) + uri_auth->uri_len;
+ end = HTX_SL_REQ_UPTR(sl) + HTX_SL_REQ_ULEN(sl);
+
+ for (h = lookup; h <= end - 3; h++) {
+ if (memcmp(h, ";up", 3) == 0) {
+ ctx->flags |= STAT_HIDE_DOWN;
+ break;
+ }
+ }
+
+ for (h = lookup; h <= end - 9; h++) {
+ if (memcmp(h, ";no-maint", 9) == 0) {
+ ctx->flags |= STAT_HIDE_MAINT;
+ break;
+ }
+ }
+
+ if (uri_auth->refresh) {
+ for (h = lookup; h <= end - 10; h++) {
+ if (memcmp(h, ";norefresh", 10) == 0) {
+ ctx->flags |= STAT_NO_REFRESH;
+ break;
+ }
+ }
+ }
+
+ for (h = lookup; h <= end - 4; h++) {
+ if (memcmp(h, ";csv", 4) == 0) {
+ ctx->flags &= ~(STAT_FMT_MASK|STAT_JSON_SCHM);
+ break;
+ }
+ }
+
+ for (h = lookup; h <= end - 6; h++) {
+ if (memcmp(h, ";typed", 6) == 0) {
+ ctx->flags &= ~(STAT_FMT_MASK|STAT_JSON_SCHM);
+ ctx->flags |= STAT_FMT_TYPED;
+ break;
+ }
+ }
+
+ for (h = lookup; h <= end - 5; h++) {
+ if (memcmp(h, ";json", 5) == 0) {
+ ctx->flags &= ~(STAT_FMT_MASK|STAT_JSON_SCHM);
+ ctx->flags |= STAT_FMT_JSON;
+ break;
+ }
+ }
+
+ for (h = lookup; h <= end - 12; h++) {
+ if (memcmp(h, ";json-schema", 12) == 0) {
+ ctx->flags &= ~STAT_FMT_MASK;
+ ctx->flags |= STAT_JSON_SCHM;
+ break;
+ }
+ }
+
+ for (h = lookup; h <= end - 8; h++) {
+ if (memcmp(h, ";st=", 4) == 0) {
+ int i;
+ h += 4;
+ ctx->st_code = STAT_STATUS_UNKN;
+ for (i = STAT_STATUS_INIT + 1; i < STAT_STATUS_SIZE; i++) {
+ if (strncmp(stat_status_codes[i], h, 4) == 0) {
+ ctx->st_code = i;
+ break;
+ }
+ }
+ break;
+ }
+ }
+
+ ctx->scope_str = 0;
+ ctx->scope_len = 0;
+ for (h = lookup; h <= end - 8; h++) {
+ if (memcmp(h, STAT_SCOPE_INPUT_NAME "=", strlen(STAT_SCOPE_INPUT_NAME) + 1) == 0) {
+ int itx = 0;
+ const char *h2;
+ char scope_txt[STAT_SCOPE_TXT_MAXLEN + 1];
+ const char *err;
+
+ h += strlen(STAT_SCOPE_INPUT_NAME) + 1;
+ h2 = h;
+ ctx->scope_str = h2 - HTX_SL_REQ_UPTR(sl);
+ while (h < end) {
+ if (*h == ';' || *h == '&' || *h == ' ')
+ break;
+ itx++;
+ h++;
+ }
+
+ if (itx > STAT_SCOPE_TXT_MAXLEN)
+ itx = STAT_SCOPE_TXT_MAXLEN;
+ ctx->scope_len = itx;
+
+ /* scope_txt = search query, ctx->scope_len is always <= STAT_SCOPE_TXT_MAXLEN */
+ memcpy(scope_txt, h2, itx);
+ scope_txt[itx] = '\0';
+ err = invalid_char(scope_txt);
+ if (err) {
+ /* bad char in search text => clear scope */
+ ctx->scope_str = 0;
+ ctx->scope_len = 0;
+ }
+ break;
+ }
+ }
+
+ /* now check whether we have some admin rules for this request */
+ list_for_each_entry(stats_admin_rule, &uri_auth->admin_rules, list) {
+ int ret = 1;
+
+ if (stats_admin_rule->cond) {
+ ret = acl_exec_cond(stats_admin_rule->cond, s->be, sess, s, SMP_OPT_DIR_REQ|SMP_OPT_FINAL);
+ ret = acl_pass(ret);
+ if (stats_admin_rule->cond->pol == ACL_COND_UNLESS)
+ ret = !ret;
+ }
+
+ if (ret) {
+ /* no rule, or the rule matches */
+ ctx->flags |= STAT_ADMIN;
+ break;
+ }
+ }
+
+ if (txn->meth == HTTP_METH_GET || txn->meth == HTTP_METH_HEAD)
+ appctx->st0 = STAT_HTTP_HEAD;
+ else if (txn->meth == HTTP_METH_POST) {
+ if (ctx->flags & STAT_ADMIN) {
+ appctx->st0 = STAT_HTTP_POST;
+ if (msg->msg_state < HTTP_MSG_DATA)
+ req->analysers |= AN_REQ_HTTP_BODY;
+ }
+ else {
+ /* POST without admin level */
+ ctx->flags &= ~STAT_CHUNKED;
+ ctx->st_code = STAT_STATUS_DENY;
+ appctx->st0 = STAT_HTTP_LAST;
+ }
+ }
+ else {
+ /* Unsupported method */
+ ctx->flags &= ~STAT_CHUNKED;
+ ctx->st_code = STAT_STATUS_IVAL;
+ appctx->st0 = STAT_HTTP_LAST;
+ }
+
+ s->task->nice = -32; /* small boost for HTTP statistics */
+ return 1;
+}
+
+/* This function waits for the message payload at most <time> milliseconds (may
+ * be set to TICK_ETERNITY). It stops to wait if at least <bytes> bytes of the
+ * payload are received (0 means no limit). It returns HTTP_RULE_* depending on
+ * the result:
+ *
+ * - HTTP_RULE_RES_CONT when conditions are met to stop waiting
+ * - HTTP_RULE_RES_YIELD to wait for more data
+ * - HTTP_RULE_RES_ABRT when a timeout occurred.
+ * - HTTP_RULE_RES_BADREQ if a parsing error is raised by lower level
+ * - HTTP_RULE_RES_ERROR if an internal error occurred
+ *
+ * If a timeout occurred, this function is responsible to emit the right response
+ * to the client, depending on the channel (408 on request side, 504 on response
+ * side). All other errors must be handled by the caller.
+ */
+enum rule_result http_wait_for_msg_body(struct stream *s, struct channel *chn,
+ unsigned int time, unsigned int bytes)
+{
+ struct session *sess = s->sess;
+ struct http_txn *txn = s->txn;
+ struct http_msg *msg = ((chn->flags & CF_ISRESP) ? &txn->rsp : &txn->req);
+ struct htx *htx;
+ enum rule_result ret = HTTP_RULE_RES_CONT;
+
+ htx = htxbuf(&chn->buf);
+
+ if (htx->flags & HTX_FL_PARSING_ERROR) {
+ ret = HTTP_RULE_RES_BADREQ;
+ goto end;
+ }
+ if (htx->flags & HTX_FL_PROCESSING_ERROR) {
+ ret = HTTP_RULE_RES_ERROR;
+ goto end;
+ }
+
+ /* Do nothing for bodyless and CONNECT requests */
+ if (txn->meth == HTTP_METH_CONNECT || (msg->flags & HTTP_MSGF_BODYLESS))
+ goto end;
+
+ if (!(chn->flags & CF_ISRESP)) {
+ if (http_handle_expect_hdr(s, htx, msg) == -1) {
+ ret = HTTP_RULE_RES_ERROR;
+ goto end;
+ }
+ }
+
+ /* Now we're are waiting for the payload. We just need to know if all
+ * data have been received or if the buffer is full.
+ */
+ if ((htx->flags & HTX_FL_EOM) ||
+ htx_get_tail_type(htx) > HTX_BLK_DATA ||
+ channel_htx_full(chn, htx, global.tune.maxrewrite) ||
+ sc_waiting_room(chn_prod(chn)))
+ goto end;
+
+ if (bytes) {
+ struct htx_blk *blk;
+ unsigned int len = 0;
+
+ for (blk = htx_get_first_blk(htx); blk; blk = htx_get_next_blk(htx, blk)) {
+ if (htx_get_blk_type(blk) != HTX_BLK_DATA)
+ continue;
+ len += htx_get_blksz(blk);
+ if (len >= bytes)
+ goto end;
+ }
+ }
+
+ if ((chn->flags & CF_READ_TIMEOUT) || tick_is_expired(chn->analyse_exp, now_ms)) {
+ if (!(chn->flags & CF_ISRESP))
+ goto abort_req;
+ goto abort_res;
+ }
+
+ /* we get here if we need to wait for more data */
+ if (!(chn_prod(chn)->flags & (SC_FL_EOS|SC_FL_ABRT_DONE))) {
+ if (!tick_isset(chn->analyse_exp))
+ chn->analyse_exp = tick_add_ifset(now_ms, time);
+ ret = HTTP_RULE_RES_YIELD;
+ }
+
+ end:
+ return ret;
+
+ abort:
+ http_set_term_flags(s);
+ http_reply_and_close(s, txn->status, http_error_message(s));
+ ret = HTTP_RULE_RES_ABRT;
+ goto end;
+
+ abort_req:
+ txn->status = 408;
+ if (!(s->flags & SF_ERR_MASK))
+ s->flags |= SF_ERR_CLITO;
+ _HA_ATOMIC_INC(&sess->fe->fe_counters.failed_req);
+ if (sess->listener && sess->listener->counters)
+ _HA_ATOMIC_INC(&sess->listener->counters->failed_req);
+ goto abort;
+
+ abort_res:
+ txn->status = 504;
+ if (!(s->flags & SF_ERR_MASK))
+ s->flags |= SF_ERR_SRVTO;
+ stream_inc_http_fail_ctr(s);
+ goto abort;
+}
+
+void http_perform_server_redirect(struct stream *s, struct stconn *sc)
+{
+ struct channel *req = &s->req;
+ struct channel *res = &s->res;
+ struct server *srv;
+ struct htx *htx;
+ struct htx_sl *sl;
+ struct ist path, location;
+ unsigned int flags;
+ struct http_uri_parser parser;
+
+ /*
+ * Create the location
+ */
+ chunk_reset(&trash);
+
+ /* 1: add the server's prefix */
+ /* special prefix "/" means don't change URL */
+ srv = __objt_server(s->target);
+ if (srv->rdr_len != 1 || *srv->rdr_pfx != '/') {
+ if (!chunk_memcat(&trash, srv->rdr_pfx, srv->rdr_len))
+ return;
+ }
+
+ /* 2: add the request Path */
+ htx = htxbuf(&req->buf);
+ sl = http_get_stline(htx);
+ parser = http_uri_parser_init(htx_sl_req_uri(sl));
+ path = http_parse_path(&parser);
+ if (!isttest(path))
+ return;
+
+ if (!chunk_memcat(&trash, path.ptr, path.len))
+ return;
+ location = ist2(trash.area, trash.data);
+
+ /*
+ * Create the 302 response
+ */
+ htx = htx_from_buf(&res->buf);
+ flags = (HTX_SL_F_IS_RESP|HTX_SL_F_VER_11|HTX_SL_F_XFER_LEN|HTX_SL_F_CLEN|HTX_SL_F_BODYLESS);
+ sl = htx_add_stline(htx, HTX_BLK_RES_SL, flags,
+ ist("HTTP/1.1"), ist("302"), ist("Found"));
+ if (!sl)
+ goto fail;
+ sl->info.res.status = 302;
+ s->txn->status = 302;
+
+ if (!htx_add_header(htx, ist("Cache-Control"), ist("no-cache")) ||
+ !htx_add_header(htx, ist("Connection"), ist("close")) ||
+ !htx_add_header(htx, ist("Content-length"), ist("0")) ||
+ !htx_add_header(htx, ist("Location"), location))
+ goto fail;
+
+ if (!htx_add_endof(htx, HTX_BLK_EOH))
+ goto fail;
+
+ htx->flags |= HTX_FL_EOM;
+ htx_to_buf(htx, &res->buf);
+
+ if (!(s->flags & SF_ERR_MASK))
+ s->flags |= SF_ERR_LOCAL;
+ if (!(s->flags & SF_FINST_MASK))
+ s->flags |= SF_FINST_C;
+
+ if (!http_forward_proxy_resp(s, 1))
+ goto fail;
+
+ /* return without error. */
+ sc_abort(sc);
+ sc_shutdown(sc);
+ s->conn_err_type = STRM_ET_NONE;
+ sc->state = SC_ST_CLO;
+
+
+ /* FIXME: we should increase a counter of redirects per server and per backend. */
+ srv_inc_sess_ctr(srv);
+ srv_set_sess_last(srv);
+ return;
+
+ fail:
+ /* If an error occurred, remove the incomplete HTTP response from the
+ * buffer */
+ channel_htx_truncate(res, htx);
+}
+
+/* This function terminates the request because it was completely analyzed or
+ * because an error was triggered during the body forwarding.
+ */
+static void http_end_request(struct stream *s)
+{
+ struct channel *chn = &s->req;
+ struct http_txn *txn = s->txn;
+
+ DBG_TRACE_ENTER(STRM_EV_HTTP_ANA, s, txn);
+
+ if (unlikely(txn->req.msg_state < HTTP_MSG_DONE)) {
+ DBG_TRACE_DEVEL("waiting end of the request", STRM_EV_HTTP_ANA, s, txn);
+ return;
+ }
+
+ if (txn->req.msg_state == HTTP_MSG_DONE) {
+ /* No need to read anymore, the request was completely parsed.
+ * We can shut the read side unless we want to abort_on_close,
+ * or we have a POST request. The issue with POST requests is
+ * that some browsers still send a CRLF after the request, and
+ * this CRLF must be read so that it does not remain in the kernel
+ * buffers, otherwise a close could cause an RST on some systems
+ * (eg: Linux).
+ */
+ if (!(s->be->options & PR_O_ABRT_CLOSE) && txn->meth != HTTP_METH_POST)
+ channel_dont_read(chn);
+
+ /* if the server closes the connection, we want to immediately react
+ * and close the socket to save packets and syscalls.
+ */
+ s->scb->flags |= SC_FL_NOHALF;
+
+ /* In any case we've finished parsing the request so we must
+ * disable Nagle when sending data because 1) we're not going
+ * to shut this side, and 2) the server is waiting for us to
+ * send pending data.
+ */
+ s->scb->flags |= SC_FL_SND_NEVERWAIT;
+
+ if (txn->rsp.msg_state < HTTP_MSG_BODY ||
+ (txn->rsp.msg_state < HTTP_MSG_DONE && s->scb->state != SC_ST_CLO)) {
+ /* The server has not finished to respond and the
+ * backend SC is not closed, so we don't want to move in
+ * order not to upset it.
+ */
+ DBG_TRACE_DEVEL("waiting end of the response", STRM_EV_HTTP_ANA, s, txn);
+ return;
+ }
+
+ /* When we get here, it means that both the request and the
+ * response have finished receiving. Depending on the connection
+ * mode, we'll have to wait for the last bytes to leave in either
+ * direction, and sometimes for a close to be effective.
+ */
+ if (txn->flags & TX_CON_WANT_TUN) {
+ /* Tunnel mode will not have any analyser so it needs to
+ * poll for reads.
+ */
+ channel_auto_read(&s->req);
+ txn->req.msg_state = HTTP_MSG_TUNNEL;
+ if (txn->rsp.msg_state != HTTP_MSG_TUNNEL)
+ s->res.flags |= CF_WAKE_ONCE;
+ }
+ else {
+ /* we're not expecting any new data to come for this
+ * transaction, so we can close it.
+ *
+ * However, there is an exception if the response
+ * length is undefined. In this case, we need to wait
+ * the close from the server. The response will be
+ * switched in TUNNEL mode until the end.
+ */
+ if (!(txn->rsp.flags & HTTP_MSGF_XFER_LEN) &&
+ txn->rsp.msg_state != HTTP_MSG_CLOSED)
+ goto check_channel_flags;
+
+ if (!(s->scb->flags & (SC_FL_SHUT_DONE|SC_FL_SHUT_WANTED))) {
+ sc_schedule_abort(s->scf);
+ sc_schedule_shutdown(s->scb);
+ }
+ }
+ goto check_channel_flags;
+ }
+
+ if (txn->req.msg_state == HTTP_MSG_CLOSING) {
+ http_msg_closing:
+ /* nothing else to forward, just waiting for the output buffer
+ * to be empty and for the shut_wanted to take effect.
+ */
+ if (!co_data(chn)) {
+ txn->req.msg_state = HTTP_MSG_CLOSED;
+ goto http_msg_closed;
+ }
+ DBG_TRACE_LEAVE(STRM_EV_HTTP_ANA, s, txn);
+ return;
+ }
+
+ if (txn->req.msg_state == HTTP_MSG_CLOSED) {
+ http_msg_closed:
+ /* if we don't know whether the server will close, we need to hard close */
+ if (txn->rsp.flags & HTTP_MSGF_XFER_LEN)
+ s->scb->flags |= SC_FL_NOLINGER; /* we want to close ASAP */
+ /* see above in MSG_DONE why we only do this in these states */
+ if (!(s->be->options & PR_O_ABRT_CLOSE))
+ channel_dont_read(chn);
+ goto end;
+ }
+
+ check_channel_flags:
+ /* Here, we are in HTTP_MSG_DONE or HTTP_MSG_TUNNEL */
+ if (s->scb->flags & (SC_FL_SHUT_DONE|SC_FL_SHUT_WANTED)) {
+ /* if we've just closed an output, let's switch */
+ txn->req.msg_state = HTTP_MSG_CLOSING;
+ goto http_msg_closing;
+ }
+
+ end:
+ chn->analysers &= AN_REQ_FLT_END;
+ if (txn->req.msg_state == HTTP_MSG_TUNNEL) {
+ s->scb->flags |= SC_FL_SND_NEVERWAIT;
+ if (HAS_REQ_DATA_FILTERS(s))
+ chn->analysers |= AN_REQ_FLT_XFER_DATA;
+ else
+ c_adv(chn, htxbuf(&chn->buf)->data - co_data(chn));
+ }
+ channel_auto_close(chn);
+ channel_auto_read(chn);
+ DBG_TRACE_LEAVE(STRM_EV_HTTP_ANA, s, txn);
+}
+
+
+/* This function terminates the response because it was completely analyzed or
+ * because an error was triggered during the body forwarding.
+ */
+static void http_end_response(struct stream *s)
+{
+ struct channel *chn = &s->res;
+ struct http_txn *txn = s->txn;
+
+ DBG_TRACE_ENTER(STRM_EV_HTTP_ANA, s, txn);
+
+ if (unlikely(txn->rsp.msg_state < HTTP_MSG_DONE)) {
+ DBG_TRACE_DEVEL("waiting end of the response", STRM_EV_HTTP_ANA, s, txn);
+ return;
+ }
+
+ if (txn->rsp.msg_state == HTTP_MSG_DONE) {
+ /* In theory, we don't need to read anymore, but we must
+ * still monitor the server connection for a possible close
+ * while the request is being uploaded, so we don't disable
+ * reading.
+ */
+ /* channel_dont_read(chn); */
+
+ if (txn->req.msg_state < HTTP_MSG_DONE && s->scf->state != SC_ST_CLO) {
+ /* The client seems to still be sending data, probably
+ * because we got an error response during an upload.
+ * We have the choice of either breaking the connection
+ * or letting it pass through. Let's do the later.
+ */
+ DBG_TRACE_DEVEL("waiting end of the request", STRM_EV_HTTP_ANA, s, txn);
+ return;
+ }
+
+ /* When we get here, it means that both the request and the
+ * response have finished receiving. Depending on the connection
+ * mode, we'll have to wait for the last bytes to leave in either
+ * direction, and sometimes for a close to be effective.
+ */
+ if (txn->flags & TX_CON_WANT_TUN) {
+ channel_auto_read(&s->res);
+ txn->rsp.msg_state = HTTP_MSG_TUNNEL;
+ if (txn->req.msg_state != HTTP_MSG_TUNNEL)
+ s->req.flags |= CF_WAKE_ONCE;
+ }
+ else {
+ /* we're not expecting any new data to come for this
+ * transaction, so we can close it.
+ */
+ if (!(s->scf->flags & (SC_FL_SHUT_DONE|SC_FL_SHUT_WANTED))) {
+ sc_schedule_abort(s->scb);
+ sc_schedule_shutdown(s->scf);
+ }
+ }
+ goto check_channel_flags;
+ }
+
+ if (txn->rsp.msg_state == HTTP_MSG_CLOSING) {
+ http_msg_closing:
+ /* nothing else to forward, just waiting for the output buffer
+ * to be empty and for the shut_wanted to take effect.
+ */
+ if (!co_data(chn)) {
+ txn->rsp.msg_state = HTTP_MSG_CLOSED;
+ goto http_msg_closed;
+ }
+ DBG_TRACE_LEAVE(STRM_EV_HTTP_ANA, s, txn);
+ return;
+ }
+
+ if (txn->rsp.msg_state == HTTP_MSG_CLOSED) {
+ http_msg_closed:
+ /* drop any pending data */
+ channel_htx_truncate(&s->req, htxbuf(&s->req.buf));
+ channel_abort(&s->req);
+ goto end;
+ }
+
+ check_channel_flags:
+ /* Here, we are in HTTP_MSG_DONE or HTTP_MSG_TUNNEL */
+ if (s->scf->flags & (SC_FL_SHUT_DONE|SC_FL_SHUT_WANTED)) {
+ /* if we've just closed an output, let's switch */
+ txn->rsp.msg_state = HTTP_MSG_CLOSING;
+ goto http_msg_closing;
+ }
+
+ end:
+ chn->analysers &= AN_RES_FLT_END;
+ if (txn->rsp.msg_state == HTTP_MSG_TUNNEL) {
+ s->scf->flags |= SC_FL_SND_NEVERWAIT;
+ if (HAS_RSP_DATA_FILTERS(s))
+ chn->analysers |= AN_RES_FLT_XFER_DATA;
+ else
+ c_adv(chn, htxbuf(&chn->buf)->data - co_data(chn));
+ }
+ channel_auto_close(chn);
+ channel_auto_read(chn);
+ DBG_TRACE_LEAVE(STRM_EV_HTTP_ANA, s, txn);
+}
+
+/* Forward a response generated by HAProxy (error/redirect/return). This
+ * function forwards all pending incoming data. If <final> is set to 0, nothing
+ * more is performed. It is used for 1xx informational messages. Otherwise, the
+ * transaction is terminated and the request is emptied. On success 1 is
+ * returned. If an error occurred, 0 is returned. If it fails, this function
+ * only exits. It is the caller responsibility to do the cleanup.
+ */
+int http_forward_proxy_resp(struct stream *s, int final)
+{
+ struct channel *req = &s->req;
+ struct channel *res = &s->res;
+ struct htx *htx = htxbuf(&res->buf);
+ size_t data;
+
+ if (final) {
+ htx->flags |= HTX_FL_PROXY_RESP;
+
+ if (!htx_is_empty(htx) && !http_eval_after_res_rules(s))
+ return 0;
+
+ if (s->txn->meth == HTTP_METH_HEAD)
+ htx_skip_msg_payload(htx);
+
+ channel_auto_read(req);
+ channel_abort(req);
+ channel_htx_erase(req, htxbuf(&req->buf));
+
+ channel_auto_read(res);
+ channel_auto_close(res);
+ sc_schedule_abort(s->scb);
+ s->scb->flags |= SC_FL_EOI; /* The response is terminated, add EOI */
+ htxbuf(&res->buf)->flags |= HTX_FL_EOM; /* no more data are expected */
+ }
+ else {
+ /* Send ASAP informational messages. Rely on SC_FL_EOI for final
+ * response.
+ */
+ s->scf->flags |= SC_FL_SND_ASAP;
+ }
+
+ data = htx->data - co_data(res);
+ c_adv(res, data);
+ htx->first = -1;
+ res->total += data;
+ return 1;
+}
+
+void http_server_error(struct stream *s, struct stconn *sc, int err,
+ int finst, struct http_reply *msg)
+{
+ if (!(s->flags & SF_ERR_MASK))
+ s->flags |= err;
+ if (!(s->flags & SF_FINST_MASK))
+ s->flags |= finst;
+
+ http_reply_and_close(s, s->txn->status, msg);
+}
+
+void http_reply_and_close(struct stream *s, short status, struct http_reply *msg)
+{
+ if (!msg) {
+ channel_htx_truncate(&s->res, htxbuf(&s->res.buf));
+ goto end;
+ }
+
+ if (http_reply_message(s, msg) == -1) {
+ /* On error, return a 500 error message, but don't rewrite it if
+ * it is already an internal error. If it was already a "const"
+ * 500 error, just fail.
+ */
+ if (s->txn->status == 500) {
+ if (s->txn->flags & TX_CONST_REPLY)
+ goto end;
+ s->txn->flags |= TX_CONST_REPLY;
+ }
+ s->txn->status = 500;
+ s->txn->http_reply = NULL;
+ return http_reply_and_close(s, s->txn->status, http_error_message(s));
+ }
+
+end:
+ /* At this staged, HTTP analysis is finished */
+ s->req.analysers &= AN_REQ_FLT_END;
+ s->req.analyse_exp = TICK_ETERNITY;
+
+ s->res.analysers &= AN_RES_FLT_END;
+ s->res.analyse_exp = TICK_ETERNITY;
+
+ channel_auto_read(&s->req);
+ channel_abort(&s->req);
+ channel_htx_erase(&s->req, htxbuf(&s->req.buf));
+ channel_auto_read(&s->res);
+ channel_auto_close(&s->res);
+ sc_schedule_abort(s->scb);
+}
+
+struct http_reply *http_error_message(struct stream *s)
+{
+ const int msgnum = http_get_status_idx(s->txn->status);
+
+ if (s->txn->http_reply)
+ return s->txn->http_reply;
+ else if (s->be->replies[msgnum])
+ return s->be->replies[msgnum];
+ else if (strm_fe(s)->replies[msgnum])
+ return strm_fe(s)->replies[msgnum];
+ else
+ return &http_err_replies[msgnum];
+}
+
+/* Produces an HTX message from an http reply. Depending on the http reply type,
+ * a, errorfile, an raw file or a log-format string is used. On success, it
+ * returns 0. If an error occurs -1 is returned. If it fails, this function only
+ * exits. It is the caller responsibility to do the cleanup.
+ */
+int http_reply_to_htx(struct stream *s, struct htx *htx, struct http_reply *reply)
+{
+ struct buffer *errmsg;
+ struct htx_sl *sl;
+ struct buffer *body = NULL;
+ const char *status, *reason, *clen, *ctype;
+ unsigned int slflags;
+ int ret = 0;
+
+ /*
+ * - HTTP_REPLY_ERRFILES unexpected here. handled as no payload if so
+ *
+ * - HTTP_REPLY_INDIRECT: switch on another reply if defined or handled
+ * as no payload if NULL. the TXN status code is set with the status
+ * of the original reply.
+ */
+
+ if (reply->type == HTTP_REPLY_INDIRECT) {
+ if (reply->body.reply)
+ reply = reply->body.reply;
+ }
+ if (reply->type == HTTP_REPLY_ERRMSG && !reply->body.errmsg) {
+ /* get default error message */
+ if (reply == s->txn->http_reply)
+ s->txn->http_reply = NULL;
+ reply = http_error_message(s);
+ if (reply->type == HTTP_REPLY_INDIRECT) {
+ if (reply->body.reply)
+ reply = reply->body.reply;
+ }
+ }
+
+ if (reply->type == HTTP_REPLY_ERRMSG) {
+ /* implicit or explicit error message*/
+ errmsg = reply->body.errmsg;
+ if (errmsg && !b_is_null(errmsg)) {
+ if (!htx_copy_msg(htx, errmsg))
+ goto fail;
+ }
+ }
+ else {
+ /* no payload, file or log-format string */
+ if (reply->type == HTTP_REPLY_RAW) {
+ /* file */
+ body = &reply->body.obj;
+ }
+ else if (reply->type == HTTP_REPLY_LOGFMT) {
+ /* log-format string */
+ body = alloc_trash_chunk();
+ if (!body)
+ goto fail_alloc;
+ body->data = build_logline(s, body->area, body->size, &reply->body.fmt);
+ }
+ /* else no payload */
+
+ status = ultoa(reply->status);
+ reason = http_get_reason(reply->status);
+ slflags = (HTX_SL_F_IS_RESP|HTX_SL_F_VER_11|HTX_SL_F_XFER_LEN|HTX_SL_F_CLEN);
+ if (!body || !b_data(body))
+ slflags |= HTX_SL_F_BODYLESS;
+ sl = htx_add_stline(htx, HTX_BLK_RES_SL, slflags, ist("HTTP/1.1"), ist(status), ist(reason));
+ if (!sl)
+ goto fail;
+ sl->info.res.status = reply->status;
+
+ clen = (body ? ultoa(b_data(body)) : "0");
+ ctype = reply->ctype;
+
+ if (!LIST_ISEMPTY(&reply->hdrs)) {
+ struct http_reply_hdr *hdr;
+ struct buffer *value = alloc_trash_chunk();
+
+ if (!value)
+ goto fail;
+
+ list_for_each_entry(hdr, &reply->hdrs, list) {
+ chunk_reset(value);
+ value->data = build_logline(s, value->area, value->size, &hdr->value);
+ if (b_data(value) && !htx_add_header(htx, hdr->name, ist2(b_head(value), b_data(value)))) {
+ free_trash_chunk(value);
+ goto fail;
+ }
+ chunk_reset(value);
+ }
+ free_trash_chunk(value);
+ }
+
+ if (!htx_add_header(htx, ist("content-length"), ist(clen)) ||
+ (body && b_data(body) && ctype && !htx_add_header(htx, ist("content-type"), ist(ctype))) ||
+ !htx_add_endof(htx, HTX_BLK_EOH) ||
+ (body && b_data(body) && !htx_add_data_atonce(htx, ist2(b_head(body), b_data(body)))))
+ goto fail;
+
+ htx->flags |= HTX_FL_EOM;
+ }
+
+ leave:
+ if (reply->type == HTTP_REPLY_LOGFMT)
+ free_trash_chunk(body);
+ return ret;
+
+ fail_alloc:
+ if (!(s->flags & SF_ERR_MASK))
+ s->flags |= SF_ERR_RESOURCE;
+ /* fall through */
+ fail:
+ ret = -1;
+ goto leave;
+}
+
+/* Send an http reply to the client. On success, it returns 0. If an error
+ * occurs -1 is returned and the response channel is truncated, removing this
+ * way the faulty reply. This function may fail when the reply is formatted
+ * (http_reply_to_htx) or when the reply is forwarded
+ * (http_forward_proxy_resp). On the last case, it is because a
+ * http-after-response rule fails.
+ */
+int http_reply_message(struct stream *s, struct http_reply *reply)
+{
+ struct channel *res = &s->res;
+ struct htx *htx = htx_from_buf(&res->buf);
+
+ if (s->txn->status == -1)
+ s->txn->status = reply->status;
+ channel_htx_truncate(res, htx);
+
+ if (http_reply_to_htx(s, htx, reply) == -1)
+ goto fail;
+
+ htx_to_buf(htx, &s->res.buf);
+ if (!http_forward_proxy_resp(s, 1))
+ goto fail;
+ return 0;
+
+ fail:
+ channel_htx_truncate(res, htx);
+ if (!(s->flags & SF_ERR_MASK))
+ s->flags |= SF_ERR_PRXCOND;
+ return -1;
+}
+
+/* Return the error message corresponding to s->conn_err_type. It is assumed
+ * that the server side is closed. Note that err_type is actually a
+ * bitmask, where almost only aborts may be cumulated with other
+ * values. We consider that aborted operations are more important
+ * than timeouts or errors due to the fact that nobody else in the
+ * logs might explain incomplete retries. All others should avoid
+ * being cumulated. It should normally not be possible to have multiple
+ * aborts at once, but just in case, the first one in sequence is reported.
+ * Note that connection errors appearing on the second request of a keep-alive
+ * connection are not reported since this allows the client to retry.
+ */
+void http_return_srv_error(struct stream *s, struct stconn *sc)
+{
+ int err_type = s->conn_err_type;
+
+ /* set s->txn->status for http_error_message(s) */
+ if (err_type & STRM_ET_QUEUE_ABRT) {
+ s->txn->status = -1;
+ http_server_error(s, sc, SF_ERR_CLICL, SF_FINST_Q, NULL);
+ }
+ else if (err_type & STRM_ET_CONN_ABRT) {
+ s->txn->status = -1;
+ http_server_error(s, sc, SF_ERR_CLICL, SF_FINST_C, NULL);
+ }
+ else if (err_type & STRM_ET_QUEUE_TO) {
+ s->txn->status = 503;
+ http_server_error(s, sc, SF_ERR_SRVTO, SF_FINST_Q,
+ http_error_message(s));
+ }
+ else if (err_type & STRM_ET_QUEUE_ERR) {
+ s->txn->status = 503;
+ http_server_error(s, sc, SF_ERR_SRVCL, SF_FINST_Q,
+ http_error_message(s));
+ }
+ else if (err_type & STRM_ET_CONN_TO) {
+ s->txn->status = 503;
+ http_server_error(s, sc, SF_ERR_SRVTO, SF_FINST_C,
+ (s->txn->flags & TX_NOT_FIRST) ? NULL :
+ http_error_message(s));
+ }
+ else if (err_type & STRM_ET_CONN_ERR) {
+ s->txn->status = 503;
+ http_server_error(s, sc, SF_ERR_SRVCL, SF_FINST_C,
+ (s->flags & SF_SRV_REUSED) ? NULL :
+ http_error_message(s));
+ }
+ else if (err_type & STRM_ET_CONN_RES) {
+ s->txn->status = 503;
+ http_server_error(s, sc, SF_ERR_RESOURCE, SF_FINST_C,
+ (s->txn->flags & TX_NOT_FIRST) ? NULL :
+ http_error_message(s));
+ }
+ else { /* STRM_ET_CONN_OTHER and others */
+ s->txn->status = 500;
+ http_server_error(s, sc, SF_ERR_INTERNAL, SF_FINST_C,
+ http_error_message(s));
+ }
+}
+
+
+/* Handle Expect: 100-continue for HTTP/1.1 messages if necessary. It returns 0
+ * on success and -1 on error.
+ */
+static int http_handle_expect_hdr(struct stream *s, struct htx *htx, struct http_msg *msg)
+{
+ /* If we have HTTP/1.1 message with a body and Expect: 100-continue,
+ * then we must send an HTTP/1.1 100 Continue intermediate response.
+ */
+ if (!(msg->flags & HTTP_MSGF_EXPECT_CHECKED) &&
+ (msg->flags & HTTP_MSGF_VER_11) &&
+ (msg->flags & (HTTP_MSGF_CNT_LEN|HTTP_MSGF_TE_CHNK))) {
+ struct ist hdr = { .ptr = "Expect", .len = 6 };
+ struct http_hdr_ctx ctx;
+
+ ctx.blk = NULL;
+ /* Expect is allowed in 1.1, look for it */
+ if (http_find_header(htx, hdr, &ctx, 0) &&
+ unlikely(isteqi(ctx.value, ist2("100-continue", 12)))) {
+ if (http_reply_100_continue(s) == -1)
+ return -1;
+ http_remove_header(htx, &ctx);
+ }
+ }
+ msg->flags |= HTTP_MSGF_EXPECT_CHECKED;
+ return 0;
+}
+
+/* Send a 100-Continue response to the client. It returns 0 on success and -1
+ * on error. The response channel is updated accordingly.
+ */
+static int http_reply_100_continue(struct stream *s)
+{
+ struct channel *res = &s->res;
+ struct htx *htx = htx_from_buf(&res->buf);
+ struct htx_sl *sl;
+ unsigned int flags = (HTX_SL_F_IS_RESP|HTX_SL_F_VER_11|
+ HTX_SL_F_XFER_LEN|HTX_SL_F_BODYLESS);
+
+ sl = htx_add_stline(htx, HTX_BLK_RES_SL, flags,
+ ist("HTTP/1.1"), ist("100"), ist("Continue"));
+ if (!sl)
+ goto fail;
+ sl->info.res.status = 100;
+
+ if (!htx_add_endof(htx, HTX_BLK_EOH))
+ goto fail;
+
+ if (!http_forward_proxy_resp(s, 0))
+ goto fail;
+ return 0;
+
+ fail:
+ /* If an error occurred, remove the incomplete HTTP response from the
+ * buffer */
+ channel_htx_truncate(res, htx);
+ return -1;
+}
+
+
+/*
+ * Capture headers from message <htx> according to header list <cap_hdr>, and
+ * fill the <cap> pointers appropriately.
+ */
+static void http_capture_headers(struct htx *htx, char **cap, struct cap_hdr *cap_hdr)
+{
+ struct cap_hdr *h;
+ int32_t pos;
+
+ for (pos = htx_get_first(htx); pos != -1; pos = htx_get_next(htx, pos)) {
+ struct htx_blk *blk = htx_get_blk(htx, pos);
+ enum htx_blk_type type = htx_get_blk_type(blk);
+ struct ist n, v;
+
+ if (type == HTX_BLK_EOH)
+ break;
+ if (type != HTX_BLK_HDR)
+ continue;
+
+ n = htx_get_blk_name(htx, blk);
+
+ for (h = cap_hdr; h; h = h->next) {
+ if (h->namelen && (h->namelen == n.len) &&
+ (strncasecmp(n.ptr, h->name, h->namelen) == 0)) {
+ if (cap[h->index] == NULL)
+ cap[h->index] =
+ pool_alloc(h->pool);
+
+ if (cap[h->index] == NULL) {
+ ha_alert("HTTP capture : out of memory.\n");
+ break;
+ }
+
+ v = htx_get_blk_value(htx, blk);
+ v = isttrim(v, h->len);
+
+ memcpy(cap[h->index], v.ptr, v.len);
+ cap[h->index][v.len]=0;
+ }
+ }
+ }
+}
+
+/* Delete a value in a header between delimiters <from> and <next>. The header
+ * itself is delimited by <start> and <end> pointers. The number of characters
+ * displaced is returned, and the pointer to the first delimiter is updated if
+ * required. The function tries as much as possible to respect the following
+ * principles :
+ * - replace <from> delimiter by the <next> one unless <from> points to <start>,
+ * in which case <next> is simply removed
+ * - set exactly one space character after the new first delimiter, unless there
+ * are not enough characters in the block being moved to do so.
+ * - remove unneeded spaces before the previous delimiter and after the new
+ * one.
+ *
+ * It is the caller's responsibility to ensure that :
+ * - <from> points to a valid delimiter or <start> ;
+ * - <next> points to a valid delimiter or <end> ;
+ * - there are non-space chars before <from>.
+ */
+static int http_del_hdr_value(char *start, char *end, char **from, char *next)
+{
+ char *prev = *from;
+
+ if (prev == start) {
+ /* We're removing the first value. eat the semicolon, if <next>
+ * is lower than <end> */
+ if (next < end)
+ next++;
+
+ while (next < end && HTTP_IS_SPHT(*next))
+ next++;
+ }
+ else {
+ /* Remove useless spaces before the old delimiter. */
+ while (HTTP_IS_SPHT(*(prev-1)))
+ prev--;
+ *from = prev;
+
+ /* copy the delimiter and if possible a space if we're
+ * not at the end of the line.
+ */
+ if (next < end) {
+ *prev++ = *next++;
+ if (prev + 1 < next)
+ *prev++ = ' ';
+ while (next < end && HTTP_IS_SPHT(*next))
+ next++;
+ }
+ }
+ memmove(prev, next, end - next);
+ return (prev - next);
+}
+
+
+/* Formats the start line of the request (without CRLF) and puts it in <str> and
+ * return the written length. The line can be truncated if it exceeds <len>.
+ */
+static size_t http_fmt_req_line(const struct htx_sl *sl, char *str, size_t len)
+{
+ struct ist dst = ist2(str, 0);
+
+ if (istcat(&dst, htx_sl_req_meth(sl), len) == -1)
+ goto end;
+ if (dst.len + 1 > len)
+ goto end;
+ dst.ptr[dst.len++] = ' ';
+
+ if (istcat(&dst, htx_sl_req_uri(sl), len) == -1)
+ goto end;
+ if (dst.len + 1 > len)
+ goto end;
+ dst.ptr[dst.len++] = ' ';
+
+ istcat(&dst, htx_sl_req_vsn(sl), len);
+ end:
+ return dst.len;
+}
+
+/*
+ * Print a debug line with a start line.
+ */
+static void http_debug_stline(const char *dir, struct stream *s, const struct htx_sl *sl)
+{
+ struct session *sess = strm_sess(s);
+ int max;
+
+ chunk_printf(&trash, "%08x:%s.%s[%04x:%04x]: ", s->uniq_id, s->be->id,
+ dir,
+ objt_conn(sess->origin) ? (unsigned short)__objt_conn(sess->origin)->handle.fd : -1,
+ sc_conn(s->scb) ? (unsigned short)(__sc_conn(s->scb))->handle.fd : -1);
+
+ max = HTX_SL_P1_LEN(sl);
+ UBOUND(max, trash.size - trash.data - 3);
+ chunk_memcat(&trash, HTX_SL_P1_PTR(sl), max);
+ trash.area[trash.data++] = ' ';
+
+ max = HTX_SL_P2_LEN(sl);
+ UBOUND(max, trash.size - trash.data - 2);
+ chunk_memcat(&trash, HTX_SL_P2_PTR(sl), max);
+ trash.area[trash.data++] = ' ';
+
+ max = HTX_SL_P3_LEN(sl);
+ UBOUND(max, trash.size - trash.data - 1);
+ chunk_memcat(&trash, HTX_SL_P3_PTR(sl), max);
+ trash.area[trash.data++] = '\n';
+
+ DISGUISE(write(1, trash.area, trash.data));
+}
+
+/*
+ * Print a debug line with a header.
+ */
+static void http_debug_hdr(const char *dir, struct stream *s, const struct ist n, const struct ist v)
+{
+ struct session *sess = strm_sess(s);
+ int max;
+
+ chunk_printf(&trash, "%08x:%s.%s[%04x:%04x]: ", s->uniq_id, s->be->id,
+ dir,
+ objt_conn(sess->origin) ? (unsigned short)__objt_conn(sess->origin)->handle.fd : -1,
+ sc_conn(s->scb) ? (unsigned short)(__sc_conn(s->scb))->handle.fd : -1);
+
+ max = n.len;
+ UBOUND(max, trash.size - trash.data - 3);
+ chunk_memcat(&trash, n.ptr, max);
+ trash.area[trash.data++] = ':';
+ trash.area[trash.data++] = ' ';
+
+ max = v.len;
+ UBOUND(max, trash.size - trash.data - 1);
+ chunk_memcat(&trash, v.ptr, max);
+ trash.area[trash.data++] = '\n';
+
+ DISGUISE(write(1, trash.area, trash.data));
+}
+
+void http_txn_reset_req(struct http_txn *txn)
+{
+ txn->req.flags = 0;
+ txn->req.msg_state = HTTP_MSG_RQBEFORE; /* at the very beginning of the request */
+}
+
+void http_txn_reset_res(struct http_txn *txn)
+{
+ txn->rsp.flags = 0;
+ txn->rsp.msg_state = HTTP_MSG_RPBEFORE; /* at the very beginning of the response */
+}
+
+/*
+ * Create and initialize a new HTTP transaction for stream <s>. This should be
+ * used before processing any new request. It returns the transaction or NLULL
+ * on error.
+ */
+struct http_txn *http_create_txn(struct stream *s)
+{
+ struct http_txn *txn;
+ struct stconn *sc = s->scf;
+
+ txn = pool_alloc(pool_head_http_txn);
+ if (!txn)
+ return NULL;
+ s->txn = txn;
+
+ txn->meth = HTTP_METH_OTHER;
+ txn->flags = ((sc && sc_ep_test(sc, SE_FL_NOT_FIRST)) ? TX_NOT_FIRST : 0);
+ txn->status = -1;
+ txn->server_status = -1;
+ txn->http_reply = NULL;
+ txn->l7_buffer = BUF_NULL;
+ write_u32(txn->cache_hash, 0);
+
+ txn->cookie_first_date = 0;
+ txn->cookie_last_date = 0;
+
+ txn->srv_cookie = NULL;
+ txn->cli_cookie = NULL;
+ txn->uri = NULL;
+
+ http_txn_reset_req(txn);
+ http_txn_reset_res(txn);
+
+ txn->req.chn = &s->req;
+ txn->rsp.chn = &s->res;
+
+ txn->auth.method = HTTP_AUTH_UNKNOWN;
+
+ /* here we don't want to re-initialize s->vars_txn and s->vars_reqres
+ * variable lists, because they were already initialized upon stream
+ * creation in stream_new(), and thus may already contain some variables
+ */
+
+ return txn;
+}
+
+/* to be used at the end of a transaction */
+void http_destroy_txn(struct stream *s)
+{
+ struct http_txn *txn = s->txn;
+
+ /* these ones will have been dynamically allocated */
+ pool_free(pool_head_requri, txn->uri);
+ pool_free(pool_head_capture, txn->cli_cookie);
+ pool_free(pool_head_capture, txn->srv_cookie);
+ pool_free(pool_head_uniqueid, s->unique_id.ptr);
+
+ s->unique_id = IST_NULL;
+ txn->uri = NULL;
+ txn->srv_cookie = NULL;
+ txn->cli_cookie = NULL;
+
+ if (!LIST_ISEMPTY(&s->vars_txn.head))
+ vars_prune(&s->vars_txn, s->sess, s);
+ if (!LIST_ISEMPTY(&s->vars_reqres.head))
+ vars_prune(&s->vars_reqres, s->sess, s);
+
+ b_free(&txn->l7_buffer);
+
+ pool_free(pool_head_http_txn, txn);
+ s->txn = NULL;
+}
+
+
+void http_set_term_flags(struct stream *s)
+{
+ if (!(s->flags & SF_ERR_MASK))
+ s->flags |= SF_ERR_PRXCOND;
+
+ if (!(s->flags & SF_FINST_MASK)) {
+ if (s->scb->state == SC_ST_INI) {
+ /* Before any connection attempt on the server side, we
+ * are still in the request analysis. Just take case to
+ * detect tarpit error
+ */
+ if (s->req.analysers & AN_REQ_HTTP_TARPIT)
+ s->flags |= SF_FINST_T;
+ else
+ s->flags |= SF_FINST_R;
+ }
+ else if (s->scb->state == SC_ST_QUE)
+ s->flags |= SF_FINST_Q;
+ else if (sc_state_in(s->scb->state, SC_SB_REQ|SC_SB_TAR|SC_SB_ASS|SC_SB_CON|SC_SB_CER|SC_SB_RDY)) {
+ if (unlikely(objt_applet(s->target))) {
+ s->flags |= SF_FINST_R;
+ }
+ else
+ s->flags |= SF_FINST_C;
+ }
+ else {
+ if (s->txn->rsp.msg_state < HTTP_MSG_DATA) {
+ /* We are still processing the response headers */
+ s->flags |= SF_FINST_H;
+ }
+ // (res == (done|closing|closed)) & (res->flags & shutw)
+ else if (s->txn->rsp.msg_state >= HTTP_MSG_DONE && s->txn->rsp.msg_state < HTTP_MSG_TUNNEL &&
+ (s->flags & (SF_ERR_CLITO|SF_ERR_CLICL))) {
+ /* A client error was reported and we are
+ * transmitting the last block of data
+ */
+ s->flags |= SF_FINST_L;
+ }
+ else {
+ /* Otherwise we are in DATA phase on both sides */
+ s->flags |= SF_FINST_D;
+ }
+ }
+ }
+}
+
+
+DECLARE_POOL(pool_head_http_txn, "http_txn", sizeof(struct http_txn));
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/src/http_client.c b/src/http_client.c
new file mode 100644
index 0000000..d7e50c0
--- /dev/null
+++ b/src/http_client.c
@@ -0,0 +1,1598 @@
+/*
+ * HTTP Client
+ *
+ * Copyright (C) 2021 HAProxy Technologies, William Lallemand <wlallemand@haproxy.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * This file implements an HTTP Client API.
+ *
+ */
+
+#include <haproxy/api.h>
+#include <haproxy/applet.h>
+#include <haproxy/cli.h>
+#include <haproxy/ssl_ckch.h>
+#include <haproxy/dynbuf.h>
+#include <haproxy/cfgparse.h>
+#include <haproxy/global.h>
+#include <haproxy/istbuf.h>
+#include <haproxy/h1_htx.h>
+#include <haproxy/http.h>
+#include <haproxy/http_ana-t.h>
+#include <haproxy/http_client.h>
+#include <haproxy/http_htx.h>
+#include <haproxy/http_rules.h>
+#include <haproxy/htx.h>
+#include <haproxy/log.h>
+#include <haproxy/proxy.h>
+#include <haproxy/resolvers.h>
+#include <haproxy/sc_strm.h>
+#include <haproxy/server.h>
+#include <haproxy/ssl_sock.h>
+#include <haproxy/sock_inet.h>
+#include <haproxy/stconn.h>
+#include <haproxy/tools.h>
+
+#include <string.h>
+
+static struct proxy *httpclient_proxy;
+
+#ifdef USE_OPENSSL
+/* if the httpclient is not configured, error are ignored and features are limited */
+static int hard_error_ssl = 0;
+static int httpclient_ssl_verify = SSL_SOCK_VERIFY_REQUIRED;
+static char *httpclient_ssl_ca_file = NULL;
+#endif
+static struct applet httpclient_applet;
+
+/* if the httpclient is not configured, error are ignored and features are limited */
+static int hard_error_resolvers = 0;
+static char *resolvers_id = NULL;
+static char *resolvers_prefer = NULL;
+static int resolvers_disabled = 0;
+
+static int httpclient_retries = CONN_RETRIES;
+static int httpclient_timeout_connect = MS_TO_TICKS(5000);
+
+/* --- This part of the file implement an HTTP client over the CLI ---
+ * The functions will be starting by "hc_cli" for "httpclient cli"
+ */
+
+/* the CLI context for the httpclient command */
+struct hcli_svc_ctx {
+ struct httpclient *hc; /* the httpclient instance */
+ uint flags; /* flags from HC_CLI_F_* above */
+};
+
+/* These are the callback used by the HTTP Client when it needs to notify new
+ * data, we only sets a flag in the IO handler via the svcctx.
+ */
+void hc_cli_res_stline_cb(struct httpclient *hc)
+{
+ struct appctx *appctx = hc->caller;
+ struct hcli_svc_ctx *ctx;
+
+ if (!appctx)
+ return;
+
+ ctx = appctx->svcctx;
+ ctx->flags |= HC_F_RES_STLINE;
+ appctx_wakeup(appctx);
+}
+
+void hc_cli_res_headers_cb(struct httpclient *hc)
+{
+ struct appctx *appctx = hc->caller;
+ struct hcli_svc_ctx *ctx;
+
+ if (!appctx)
+ return;
+
+ ctx = appctx->svcctx;
+ ctx->flags |= HC_F_RES_HDR;
+ appctx_wakeup(appctx);
+}
+
+void hc_cli_res_body_cb(struct httpclient *hc)
+{
+ struct appctx *appctx = hc->caller;
+ struct hcli_svc_ctx *ctx;
+
+ if (!appctx)
+ return;
+
+ ctx = appctx->svcctx;
+ ctx->flags |= HC_F_RES_BODY;
+ appctx_wakeup(appctx);
+}
+
+void hc_cli_res_end_cb(struct httpclient *hc)
+{
+ struct appctx *appctx = hc->caller;
+ struct hcli_svc_ctx *ctx;
+
+ if (!appctx)
+ return;
+
+ ctx = appctx->svcctx;
+ ctx->flags |= HC_F_RES_END;
+ appctx_wakeup(appctx);
+}
+
+/*
+ * Parse an httpclient keyword on the cli:
+ * httpclient <ID> <method> <URI>
+ */
+static int hc_cli_parse(char **args, char *payload, struct appctx *appctx, void *private)
+{
+ struct hcli_svc_ctx *ctx = applet_reserve_svcctx(appctx, sizeof(*ctx));
+ struct httpclient *hc;
+ char *err = NULL;
+ enum http_meth_t meth;
+ char *meth_str;
+ struct ist uri;
+ struct ist body = IST_NULL;
+
+ if (!cli_has_level(appctx, ACCESS_LVL_ADMIN))
+ return 1;
+
+ if (!*args[1] || !*args[2]) {
+ memprintf(&err, ": not enough parameters");
+ goto err;
+ }
+
+ meth_str = args[1];
+ uri = ist(args[2]);
+
+ if (payload)
+ body = ist(payload);
+
+ meth = find_http_meth(meth_str, strlen(meth_str));
+
+ hc = httpclient_new(appctx, meth, uri);
+ if (!hc) {
+ goto err;
+ }
+
+ /* update the httpclient callbacks */
+ hc->ops.res_stline = hc_cli_res_stline_cb;
+ hc->ops.res_headers = hc_cli_res_headers_cb;
+ hc->ops.res_payload = hc_cli_res_body_cb;
+ hc->ops.res_end = hc_cli_res_end_cb;
+
+ ctx->hc = hc; /* store the httpclient ptr in the applet */
+ ctx->flags = 0;
+
+ if (httpclient_req_gen(hc, hc->req.url, hc->req.meth, NULL, body) != ERR_NONE)
+ goto err;
+
+
+ if (!httpclient_start(hc))
+ goto err;
+
+ return 0;
+
+err:
+ memprintf(&err, "Can't start the HTTP client%s.\n", err ? err : "");
+ return cli_err(appctx, err);
+}
+
+/* This function dumps the content of the httpclient receive buffer
+ * on the CLI output
+ *
+ * Return 1 when the processing is finished
+ * return 0 if it needs to be called again
+ */
+static int hc_cli_io_handler(struct appctx *appctx)
+{
+ struct hcli_svc_ctx *ctx = appctx->svcctx;
+ struct stconn *sc = appctx_sc(appctx);
+ struct httpclient *hc = ctx->hc;
+ struct http_hdr *hdrs, *hdr;
+
+ if (ctx->flags & HC_F_RES_STLINE) {
+ chunk_printf(&trash, "%.*s %d %.*s\n", (unsigned int)istlen(hc->res.vsn), istptr(hc->res.vsn),
+ hc->res.status, (unsigned int)istlen(hc->res.reason), istptr(hc->res.reason));
+ if (applet_putchk(appctx, &trash) == -1)
+ goto more;
+ ctx->flags &= ~HC_F_RES_STLINE;
+ }
+
+ if (ctx->flags & HC_F_RES_HDR) {
+ chunk_reset(&trash);
+ hdrs = hc->res.hdrs;
+ for (hdr = hdrs; isttest(hdr->v); hdr++) {
+ if (!h1_format_htx_hdr(hdr->n, hdr->v, &trash))
+ goto too_many_hdrs;
+ }
+ if (!chunk_memcat(&trash, "\r\n", 2))
+ goto too_many_hdrs;
+ if (applet_putchk(appctx, &trash) == -1)
+ goto more;
+ ctx->flags &= ~HC_F_RES_HDR;
+ }
+
+ if (ctx->flags & HC_F_RES_BODY) {
+ int ret;
+
+ ret = httpclient_res_xfer(hc, sc_ib(sc));
+ channel_add_input(sc_ic(sc), ret); /* forward what we put in the buffer channel */
+
+ /* remove the flag if the buffer was emptied */
+ if (httpclient_data(hc))
+ goto more;
+ ctx->flags &= ~HC_F_RES_BODY;
+ }
+
+ /* we must close only if F_END is the last flag */
+ if (ctx->flags == HC_F_RES_END) {
+ ctx->flags &= ~HC_F_RES_END;
+ goto end;
+ }
+
+more:
+ if (!ctx->flags)
+ applet_have_no_more_data(appctx);
+ return 0;
+end:
+ return 1;
+
+too_many_hdrs:
+ return cli_err(appctx, "Too many headers.\n");
+}
+
+static void hc_cli_release(struct appctx *appctx)
+{
+ struct hcli_svc_ctx *ctx = appctx->svcctx;
+ struct httpclient *hc = ctx->hc;
+
+ /* Everything possible was printed on the CLI, we can destroy the client */
+ httpclient_stop_and_destroy(hc);
+
+ return;
+}
+
+/* register cli keywords */
+static struct cli_kw_list cli_kws = {{ },{
+ { { "httpclient", NULL }, "httpclient <method> <URI> : launch an HTTP request", hc_cli_parse, hc_cli_io_handler, hc_cli_release, NULL, ACCESS_EXPERT},
+ { { NULL }, NULL, NULL, NULL }
+}};
+
+INITCALL1(STG_REGISTER, cli_register_kw, &cli_kws);
+
+
+/* --- This part of the file implements the actual HTTP client API --- */
+
+/*
+ * Generate a simple request and fill the httpclient request buffer with it.
+ * The request contains a request line generated from the absolute <url> and
+ * <meth> as well as list of headers <hdrs>.
+ *
+ * If the buffer was filled correctly the function returns 0, if not it returns
+ * an error_code but there is no guarantee that the buffer wasn't modified.
+ */
+int httpclient_req_gen(struct httpclient *hc, const struct ist url, enum http_meth_t meth, const struct http_hdr *hdrs, const struct ist payload)
+{
+ struct htx_sl *sl;
+ struct htx *htx;
+ int err_code = 0;
+ struct ist meth_ist, vsn;
+ unsigned int flags = HTX_SL_F_VER_11 | HTX_SL_F_NORMALIZED_URI | HTX_SL_F_HAS_SCHM;
+ int i;
+ int foundhost = 0, foundaccept = 0, foundua = 0;
+
+ if (!b_alloc(&hc->req.buf))
+ goto error;
+
+ if (meth >= HTTP_METH_OTHER)
+ goto error;
+
+ meth_ist = http_known_methods[meth];
+
+ vsn = ist("HTTP/1.1");
+
+ htx = htx_from_buf(&hc->req.buf);
+ if (!htx)
+ goto error;
+
+ if (!hc->ops.req_payload && !isttest(payload))
+ flags |= HTX_SL_F_BODYLESS;
+
+ sl = htx_add_stline(htx, HTX_BLK_REQ_SL, flags, meth_ist, url, vsn);
+ if (!sl) {
+ goto error;
+ }
+ sl->info.req.meth = meth;
+
+ for (i = 0; hdrs && hdrs[i].n.len; i++) {
+ /* Don't check the value length because a header value may be empty */
+ if (isttest(hdrs[i].v) == 0)
+ continue;
+
+ if (isteqi(hdrs[i].n, ist("host")))
+ foundhost = 1;
+ else if (isteqi(hdrs[i].n, ist("accept")))
+ foundaccept = 1;
+ else if (isteqi(hdrs[i].n, ist("user-agent")))
+ foundua = 1;
+
+ if (!htx_add_header(htx, hdrs[i].n, hdrs[i].v))
+ goto error;
+ }
+
+ if (!foundhost) {
+ /* Add Host Header from URL */
+ if (!htx_add_header(htx, ist("Host"), ist("h")))
+ goto error;
+ if (!http_update_host(htx, sl, url))
+ goto error;
+ }
+
+ if (!foundaccept) {
+ if (!htx_add_header(htx, ist("Accept"), ist("*/*")))
+ goto error;
+ }
+
+ if (!foundua) {
+ if (!htx_add_header(htx, ist("User-Agent"), ist(HTTPCLIENT_USERAGENT)))
+ goto error;
+ }
+
+
+ if (!htx_add_endof(htx, HTX_BLK_EOH))
+ goto error;
+
+ if (isttest(payload) && istlen(payload)) {
+ /* add the payload if it can feat in the buffer, no need to set
+ * the Content-Length, the data will be sent chunked */
+ if (!htx_add_data_atonce(htx, payload))
+ goto error;
+ }
+
+ /* If req.payload was set, does not set the end of stream which *MUST*
+ * be set in the callback */
+ if (!hc->ops.req_payload)
+ htx->flags |= HTX_FL_EOM;
+
+ htx_to_buf(htx, &hc->req.buf);
+
+ return 0;
+error:
+ err_code |= ERR_ALERT | ERR_ABORT;
+ return err_code;
+}
+
+/*
+ * transfer the response to the destination buffer and wakeup the HTTP client
+ * applet so it could fill again its buffer.
+ *
+ * Return the number of bytes transferred.
+ */
+int httpclient_res_xfer(struct httpclient *hc, struct buffer *dst)
+{
+ size_t room = b_room(dst);
+ int ret;
+
+ ret = b_force_xfer(dst, &hc->res.buf, MIN(room, b_data(&hc->res.buf)));
+ /* call the client once we consumed all data */
+ if (!b_data(&hc->res.buf)) {
+ b_free(&hc->res.buf);
+ if (hc->appctx)
+ appctx_wakeup(hc->appctx);
+ }
+ return ret;
+}
+
+/*
+ * Transfer raw HTTP payload from src, and insert it into HTX format in the
+ * httpclient.
+ *
+ * Must be used to transfer the request body.
+ * Then wakeup the httpclient so it can transfer it.
+ *
+ * <end> tries to add the ending data flag if it succeed to copy all data.
+ *
+ * Return the number of bytes copied from src.
+ */
+int httpclient_req_xfer(struct httpclient *hc, struct ist src, int end)
+{
+ int ret = 0;
+ struct htx *htx;
+
+ if (!b_alloc(&hc->req.buf))
+ goto error;
+
+ htx = htx_from_buf(&hc->req.buf);
+ if (!htx)
+ goto error;
+
+ if (hc->appctx)
+ appctx_wakeup(hc->appctx);
+
+ ret += htx_add_data(htx, src);
+
+
+ /* if we copied all the data and the end flag is set */
+ if ((istlen(src) == ret) && end) {
+ /* no more data are expected. If the HTX buffer is empty, be
+ * sure to add something (EOT block in this case) to have
+ * something to send. It is important to be sure the EOM flags
+ * will be handled by the endpoint. Because the message is
+ * empty, this should not fail. Otherwise it is an error
+ */
+ if (htx_is_empty(htx)) {
+ if (!htx_add_endof(htx, HTX_BLK_EOT))
+ goto error;
+ }
+ htx->flags |= HTX_FL_EOM;
+ }
+ htx_to_buf(htx, &hc->req.buf);
+
+error:
+
+ return ret;
+}
+
+/* Set the 'timeout server' in ms for the next httpclient request */
+void httpclient_set_timeout(struct httpclient *hc, int timeout)
+{
+ hc->timeout_server = timeout;
+}
+
+/*
+ * Sets a destination for the httpclient from an HAProxy addr format
+ * This will prevent to determine the destination from the URL
+ * Return 0 in case of success or -1 otherwise.
+ */
+int httpclient_set_dst(struct httpclient *hc, const char *dst)
+{
+ struct sockaddr_storage *sk;
+ char *errmsg = NULL;
+
+ sockaddr_free(&hc->dst);
+ /* 'sk' is statically allocated (no need to be freed). */
+ sk = str2sa_range(dst, NULL, NULL, NULL, NULL, NULL, NULL,
+ &errmsg, NULL, NULL,
+ PA_O_PORT_OK | PA_O_STREAM | PA_O_XPRT | PA_O_CONNECT);
+ if (!sk) {
+ ha_alert("httpclient: Failed to parse destination address in %s\n", errmsg);
+ free(errmsg);
+ return -1;
+ }
+
+ if (!sockaddr_alloc(&hc->dst, sk, sizeof(*sk))) {
+ ha_alert("httpclient: Failed to allocate sockaddr in %s:%d.\n", __FUNCTION__, __LINE__);
+ return -1;
+ }
+
+ return 0;
+}
+
+/*
+ * Split <url> in <scheme>, <host>, <port>
+ */
+static int httpclient_spliturl(struct ist url, enum http_scheme *scheme,
+ struct ist *host, int *port)
+{
+ enum http_scheme scheme_tmp = SCH_HTTP;
+ int port_tmp = 0;
+ struct ist scheme_ist, authority_ist, host_ist, port_ist;
+ char *p, *end;
+ struct http_uri_parser parser;
+
+ parser = http_uri_parser_init(url);
+ scheme_ist = http_parse_scheme(&parser);
+ if (!isttest(scheme_ist)) {
+ return 0;
+ }
+
+ if (isteqi(scheme_ist, ist("http://"))){
+ scheme_tmp = SCH_HTTP;
+ port_tmp = 80;
+ } else if (isteqi(scheme_ist, ist("https://"))) {
+ scheme_tmp = SCH_HTTPS;
+ port_tmp = 443;
+ }
+
+ authority_ist = http_parse_authority(&parser, 1);
+ if (!isttest(authority_ist)) {
+ return 0;
+ }
+ p = end = istend(authority_ist);
+
+ /* look for a port at the end of the authority */
+ while (p > istptr(authority_ist) && isdigit((unsigned char)*--p))
+ ;
+
+ if (*p == ':') {
+ host_ist = ist2(istptr(authority_ist), p - istptr(authority_ist));
+ port_ist = istnext(ist2(p, end - p));
+ ist2str(trash.area, port_ist);
+ port_tmp = atoi(trash.area);
+ } else {
+ host_ist = authority_ist;
+ }
+
+ if (scheme)
+ *scheme = scheme_tmp;
+ if (host)
+ *host = host_ist;
+ if (port)
+ *port = port_tmp;
+
+ return 1;
+}
+
+/*
+ * Start the HTTP client
+ * Create the appctx, session, stream and wakeup the applet
+ *
+ * Return the <appctx> or NULL if it failed
+ */
+struct appctx *httpclient_start(struct httpclient *hc)
+{
+ struct applet *applet = &httpclient_applet;
+ struct appctx *appctx;
+
+ /* if the client was started and not ended, an applet is already
+ * running, we shouldn't try anything */
+ if (httpclient_started(hc) && !httpclient_ended(hc))
+ return NULL;
+
+ /* The HTTP client will be created in the same thread as the caller,
+ * avoiding threading issues */
+ appctx = appctx_new_here(applet, NULL);
+ if (!appctx)
+ goto out;
+ appctx->svcctx = hc;
+ hc->flags = 0;
+
+ if (appctx_init(appctx) == -1) {
+ ha_alert("httpclient: Failed to initialize appctx %s:%d.\n", __FUNCTION__, __LINE__);
+ goto out_free_appctx;
+ }
+
+ return appctx;
+
+out_free_appctx:
+ appctx_free_on_early_error(appctx);
+out:
+
+ return NULL;
+}
+
+/*
+ * This function tries to destroy the httpclient if it wasn't running.
+ * If it was running, stop the client and ask it to autodestroy itself.
+ *
+ * Once this function is used, all pointer sto the client must be removed
+ *
+ */
+void httpclient_stop_and_destroy(struct httpclient *hc)
+{
+
+ /* The httpclient was already stopped or never started, we can safely destroy it */
+ if (hc->flags & HTTPCLIENT_FS_ENDED || !(hc->flags & HTTPCLIENT_FS_STARTED)) {
+ httpclient_destroy(hc);
+ } else {
+ /* if the client wasn't stopped, ask for a stop and destroy */
+ hc->flags |= (HTTPCLIENT_FA_AUTOKILL | HTTPCLIENT_FA_STOP);
+ /* the calling applet doesn't exist anymore */
+ hc->caller = NULL;
+ if (hc->appctx)
+ appctx_wakeup(hc->appctx);
+ }
+}
+
+/* Free the httpclient */
+void httpclient_destroy(struct httpclient *hc)
+{
+ struct http_hdr *hdrs;
+
+
+ if (!hc)
+ return;
+
+ /* we should never destroy a client which was started but not stopped */
+ BUG_ON(httpclient_started(hc) && !httpclient_ended(hc));
+
+ /* request */
+ istfree(&hc->req.url);
+ b_free(&hc->req.buf);
+ /* response */
+ istfree(&hc->res.vsn);
+ istfree(&hc->res.reason);
+ hdrs = hc->res.hdrs;
+ while (hdrs && isttest(hdrs->n)) {
+ istfree(&hdrs->n);
+ istfree(&hdrs->v);
+ hdrs++;
+ }
+ ha_free(&hc->res.hdrs);
+ b_free(&hc->res.buf);
+ sockaddr_free(&hc->dst);
+
+ free(hc);
+
+ return;
+}
+
+/* Allocate an httpclient and its buffers
+ * Use the default httpclient_proxy
+ *
+ * Return NULL on failure */
+struct httpclient *httpclient_new(void *caller, enum http_meth_t meth, struct ist url)
+{
+ struct httpclient *hc;
+
+ hc = calloc(1, sizeof(*hc));
+ if (!hc)
+ goto err;
+
+ hc->req.buf = BUF_NULL;
+ hc->res.buf = BUF_NULL;
+ hc->caller = caller;
+ hc->req.url = istdup(url);
+ hc->req.meth = meth;
+ httpclient_set_proxy(hc, httpclient_proxy);
+
+ return hc;
+
+err:
+ httpclient_destroy(hc);
+ return NULL;
+}
+
+/* Allocate an httpclient and its buffers,
+ * Use the proxy <px>
+ *
+ * Return and httpclient or NULL.
+ */
+struct httpclient *httpclient_new_from_proxy(struct proxy *px, void *caller, enum http_meth_t meth, struct ist url)
+{
+ struct httpclient *hc;
+
+ hc = httpclient_new(caller, meth, url);
+ if (!hc)
+ return NULL;
+
+ httpclient_set_proxy(hc, px);
+
+ return hc;
+}
+
+/*
+ * Configure an httpclient with a specific proxy <px>
+ *
+ * The proxy <px> must contains 2 srv, one configured for clear connections, the other for SSL.
+ *
+ */
+int httpclient_set_proxy(struct httpclient *hc, struct proxy *px)
+{
+ struct server *srv;
+
+ hc->px = px;
+
+ for (srv = px->srv; srv != NULL; srv = srv->next) {
+ if (srv->xprt == xprt_get(XPRT_RAW)) {
+ hc->srv_raw = srv;
+#ifdef USE_OPENSSL
+ } else if (srv->xprt == xprt_get(XPRT_SSL)) {
+ hc->srv_ssl = srv;
+#endif
+ }
+ }
+
+ return 0;
+}
+
+void httpclient_applet_io_handler(struct appctx *appctx)
+{
+ struct httpclient *hc = appctx->svcctx;
+ struct stconn *sc = appctx_sc(appctx);
+ struct stream *s = __sc_strm(sc);
+ struct channel *req = &s->req;
+ struct channel *res = &s->res;
+ struct htx_blk *blk = NULL;
+ struct htx *htx;
+ struct htx_sl *sl = NULL;
+ uint32_t hdr_num;
+ uint32_t sz;
+ int ret;
+
+ if (unlikely(se_fl_test(appctx->sedesc, (SE_FL_EOS|SE_FL_ERROR|SE_FL_SHR|SE_FL_SHW)))) {
+ if (co_data(res)) {
+ htx = htx_from_buf(&res->buf);
+ co_htx_skip(res, htx, co_data(res));
+ htx_to_buf(htx, &res->buf);
+ }
+ goto out;
+ }
+ /* The IO handler could be called after the release, so we need to
+ * check if hc is still there to run the IO handler */
+ if (!hc)
+ goto out;
+
+ while (1) {
+
+ /* required to stop */
+ if (hc->flags & HTTPCLIENT_FA_STOP)
+ goto error;
+
+ switch(appctx->st0) {
+
+ case HTTPCLIENT_S_REQ:
+ /* we know that the buffer is empty here, since
+ * it's the first call, we can freely copy the
+ * request from the httpclient buffer */
+ ret = b_xfer(&req->buf, &hc->req.buf, b_data(&hc->req.buf));
+ if (!ret) {
+ sc_need_room(sc, 0);
+ goto out;
+ }
+
+ if (!b_data(&hc->req.buf))
+ b_free(&hc->req.buf);
+
+ htx = htx_from_buf(&req->buf);
+ if (!htx) {
+ sc_need_room(sc, 0);
+ goto out;
+ }
+
+ channel_add_input(req, htx->data);
+
+ if (htx->flags & HTX_FL_EOM) /* check if a body need to be added */
+ appctx->st0 = HTTPCLIENT_S_RES_STLINE;
+ else
+ appctx->st0 = HTTPCLIENT_S_REQ_BODY;
+
+ goto out; /* we need to leave the IO handler once we wrote the request */
+ break;
+
+ case HTTPCLIENT_S_REQ_BODY:
+ /* call the payload callback */
+ {
+ if (hc->ops.req_payload) {
+ struct htx *hc_htx;
+
+ /* call the request callback */
+ hc->ops.req_payload(hc);
+
+ hc_htx = htxbuf(&hc->req.buf);
+ if (htx_is_empty(hc_htx))
+ goto out;
+
+ htx = htx_from_buf(&req->buf);
+ if (htx_is_empty(htx)) {
+ size_t data = hc_htx->data;
+
+ /* Here htx_to_buf() will set buffer data to 0 because
+ * the HTX is empty, and allow us to do an xfer.
+ */
+ htx_to_buf(hc_htx, &hc->req.buf);
+ htx_to_buf(htx, &req->buf);
+ b_xfer(&req->buf, &hc->req.buf, b_data(&hc->req.buf));
+ channel_add_input(req, data);
+ } else {
+ struct htx_ret ret;
+
+ ret = htx_xfer_blks(htx, hc_htx, htx_used_space(hc_htx), HTX_BLK_UNUSED);
+ channel_add_input(req, ret.ret);
+
+ /* we must copy the EOM if we empty the buffer */
+ if (htx_is_empty(hc_htx)) {
+ htx->flags |= (hc_htx->flags & HTX_FL_EOM);
+ }
+ htx_to_buf(htx, &req->buf);
+ htx_to_buf(hc_htx, &hc->req.buf);
+ }
+
+
+ if (!b_data(&hc->req.buf))
+ b_free(&hc->req.buf);
+ }
+
+ htx = htxbuf(&req->buf);
+
+ /* if the request contains the HTX_FL_EOM, we finished the request part. */
+ if (htx->flags & HTX_FL_EOM)
+ appctx->st0 = HTTPCLIENT_S_RES_STLINE;
+
+ goto process_data; /* we need to leave the IO handler once we wrote the request */
+ }
+ break;
+
+ case HTTPCLIENT_S_RES_STLINE:
+ /* Request is finished, report EOI */
+ se_fl_set(appctx->sedesc, SE_FL_EOI);
+
+ /* copy the start line in the hc structure,then remove the htx block */
+ if (!co_data(res))
+ goto out;
+ htx = htxbuf(&res->buf);
+ if (htx_is_empty(htx))
+ goto out;
+ blk = htx_get_head_blk(htx);
+ if (blk && (htx_get_blk_type(blk) == HTX_BLK_RES_SL))
+ sl = htx_get_blk_ptr(htx, blk);
+ if (!sl || (!(sl->flags & HTX_SL_F_IS_RESP)))
+ goto out;
+
+ /* copy the status line in the httpclient */
+ hc->res.status = sl->info.res.status;
+ hc->res.vsn = istdup(htx_sl_res_vsn(sl));
+ hc->res.reason = istdup(htx_sl_res_reason(sl));
+ sz = htx_get_blksz(blk);
+ c_rew(res, sz);
+ htx_remove_blk(htx, blk);
+ /* caller callback */
+ if (hc->ops.res_stline)
+ hc->ops.res_stline(hc);
+
+ htx_to_buf(htx, &res->buf);
+
+ /* if there is no HTX data anymore and the EOM flag is
+ * set, leave (no body) */
+ if (htx_is_empty(htx) && htx->flags & HTX_FL_EOM)
+ appctx->st0 = HTTPCLIENT_S_RES_END;
+ else
+ appctx->st0 = HTTPCLIENT_S_RES_HDR;
+
+ break;
+
+ case HTTPCLIENT_S_RES_HDR:
+ /* first copy the headers in a local hdrs
+ * structure, once we the total numbers of the
+ * header we allocate the right size and copy
+ * them. The htx block of the headers are
+ * removed each time one is read */
+ {
+ struct http_hdr hdrs[global.tune.max_http_hdr];
+
+ if (!co_data(res))
+ goto out;
+ htx = htxbuf(&res->buf);
+ if (htx_is_empty(htx))
+ goto out;
+
+ hdr_num = 0;
+ blk = htx_get_head_blk(htx);
+ while (blk) {
+ enum htx_blk_type type = htx_get_blk_type(blk);
+ uint32_t sz = htx_get_blksz(blk);
+
+ c_rew(res, sz);
+
+ if (type == HTX_BLK_HDR) {
+ hdrs[hdr_num].n = istdup(htx_get_blk_name(htx, blk));
+ hdrs[hdr_num].v = istdup(htx_get_blk_value(htx, blk));
+ hdr_num++;
+ }
+ else if (type == HTX_BLK_EOH) {
+ /* create a NULL end of array and leave the loop */
+ hdrs[hdr_num].n = IST_NULL;
+ hdrs[hdr_num].v = IST_NULL;
+ htx_remove_blk(htx, blk);
+ break;
+ }
+ blk = htx_remove_blk(htx, blk);
+ }
+ htx_to_buf(htx, &res->buf);
+
+ if (hdr_num) {
+ /* alloc and copy the headers in the httpclient struct */
+ hc->res.hdrs = calloc((hdr_num + 1), sizeof(*hc->res.hdrs));
+ if (!hc->res.hdrs)
+ goto error;
+ memcpy(hc->res.hdrs, hdrs, sizeof(struct http_hdr) * (hdr_num + 1));
+
+ /* caller callback */
+ if (hc->ops.res_headers)
+ hc->ops.res_headers(hc);
+ }
+
+ /* if there is no HTX data anymore and the EOM flag is
+ * set, leave (no body) */
+ if (htx_is_empty(htx) && htx->flags & HTX_FL_EOM) {
+ appctx->st0 = HTTPCLIENT_S_RES_END;
+ } else {
+ appctx->st0 = HTTPCLIENT_S_RES_BODY;
+ }
+ }
+ break;
+
+ case HTTPCLIENT_S_RES_BODY:
+ /*
+ * The IO handler removes the htx blocks in the response buffer and
+ * push them in the hc->res.buf buffer in a raw format.
+ */
+ if (!co_data(res))
+ goto out;
+
+ htx = htxbuf(&res->buf);
+ if (htx_is_empty(htx))
+ goto out;
+
+ if (!b_alloc(&hc->res.buf))
+ goto out;
+
+ if (b_full(&hc->res.buf))
+ goto process_data;
+
+ /* decapsule the htx data to raw data */
+ blk = htx_get_head_blk(htx);
+ while (blk) {
+ enum htx_blk_type type = htx_get_blk_type(blk);
+ size_t count = co_data(res);
+ uint32_t blksz = htx_get_blksz(blk);
+ uint32_t room = b_room(&hc->res.buf);
+ uint32_t vlen;
+
+ /* we should try to copy the maximum output data in a block, which fit
+ * the destination buffer */
+ vlen = MIN(count, blksz);
+ vlen = MIN(vlen, room);
+
+ if (vlen == 0) {
+ htx_to_buf(htx, &res->buf);
+ goto process_data;
+ }
+
+ if (type == HTX_BLK_DATA) {
+ struct ist v = htx_get_blk_value(htx, blk);
+
+ __b_putblk(&hc->res.buf, v.ptr, vlen);
+ c_rew(res, vlen);
+
+ if (vlen == blksz)
+ blk = htx_remove_blk(htx, blk);
+ else
+ htx_cut_data_blk(htx, blk, vlen);
+
+ /* the data must be processed by the caller in the receive phase */
+ if (hc->ops.res_payload)
+ hc->ops.res_payload(hc);
+
+ /* cannot copy everything, need to process */
+ if (vlen != blksz) {
+ htx_to_buf(htx, &res->buf);
+ goto process_data;
+ }
+ } else {
+ if (vlen != blksz) {
+ htx_to_buf(htx, &res->buf);
+ goto process_data;
+ }
+
+ /* remove any block which is not a data block */
+ c_rew(res, blksz);
+ blk = htx_remove_blk(htx, blk);
+ }
+ }
+
+ htx_to_buf(htx, &res->buf);
+
+ /* if not finished, should be called again */
+ if (!(htx_is_empty(htx) && (htx->flags & HTX_FL_EOM)))
+ goto out;
+
+
+ /* end of message, we should quit */
+ appctx->st0 = HTTPCLIENT_S_RES_END;
+ break;
+
+ case HTTPCLIENT_S_RES_END:
+ se_fl_set(appctx->sedesc, SE_FL_EOS);
+ goto out;
+ break;
+ }
+ }
+
+out:
+ return;
+
+process_data:
+ sc_will_read(sc);
+ goto out;
+
+error:
+ se_fl_set(appctx->sedesc, SE_FL_ERROR);
+ goto out;
+}
+
+int httpclient_applet_init(struct appctx *appctx)
+{
+ struct httpclient *hc = appctx->svcctx;
+ struct stream *s;
+ struct sockaddr_storage *addr = NULL;
+ struct sockaddr_storage ss_url = {};
+ struct sockaddr_storage *ss_dst;
+ enum obj_type *target = NULL;
+ struct ist host = IST_NULL;
+ enum http_scheme scheme;
+ int port;
+ int doresolve = 0;
+
+
+ /* parse the URL and */
+ if (!httpclient_spliturl(hc->req.url, &scheme, &host, &port))
+ goto out_error;
+
+ if (hc->dst) {
+ /* if httpclient_set_dst() was used, sets the alternative address */
+ ss_dst = hc->dst;
+ } else {
+ /* set the dst using the host, or 0.0.0.0 to resolve */
+ ist2str(trash.area, host);
+ ss_dst = str2ip2(trash.area, &ss_url, 0);
+ if (!ss_dst) { /* couldn't get an IP from that, try to resolve */
+ doresolve = 1;
+ ss_dst = str2ip2("0.0.0.0", &ss_url, 0);
+ }
+ sock_inet_set_port(ss_dst, port);
+ }
+
+ if (!sockaddr_alloc(&addr, ss_dst, sizeof(*ss_dst)))
+ goto out_error;
+
+ /* choose the SSL server or not */
+ switch (scheme) {
+ case SCH_HTTP:
+ target = &hc->srv_raw->obj_type;
+ break;
+ case SCH_HTTPS:
+#ifdef USE_OPENSSL
+ if (hc->srv_ssl) {
+ target = &hc->srv_ssl->obj_type;
+ } else {
+ ha_alert("httpclient: SSL was disabled (wrong verify/ca-file)!\n");
+ goto out_free_addr;
+ }
+#else
+ ha_alert("httpclient: OpenSSL is not available %s:%d.\n", __FUNCTION__, __LINE__);
+ goto out_free_addr;
+#endif
+ break;
+ }
+
+ if (appctx_finalize_startup(appctx, hc->px, &hc->req.buf) == -1) {
+ ha_alert("httpclient: Failed to initialize appctx %s:%d.\n", __FUNCTION__, __LINE__);
+ goto out_free_addr;
+ }
+
+ s = appctx_strm(appctx);
+ s->target = target;
+ /* set the "timeout server" */
+ s->scb->ioto = hc->timeout_server;
+
+ if (doresolve) {
+ /* in order to do the set-dst we need to put the address on the front */
+ s->scf->dst = addr;
+ } else {
+ /* in cases we don't use the resolve we already have the address
+ * and must put it on the backend side, some of the cases are
+ * not meant to be used on the frontend (sockpair, unix socket etc.) */
+ s->scb->dst = addr;
+ }
+
+ s->scb->flags |= (SC_FL_RCV_ONCE|SC_FL_NOLINGER);
+ s->flags |= SF_ASSIGNED;
+
+ /* applet is waiting for data */
+ applet_need_more_data(appctx);
+ appctx_wakeup(appctx);
+
+ hc->appctx = appctx;
+ hc->flags |= HTTPCLIENT_FS_STARTED;
+
+ /* The request was transferred when the stream was created. So switch
+ * directly to REQ_BODY or RES_STLINE state
+ */
+ appctx->st0 = (hc->ops.req_payload ? HTTPCLIENT_S_REQ_BODY : HTTPCLIENT_S_RES_STLINE);
+ return 0;
+
+ out_free_addr:
+ sockaddr_free(&addr);
+ out_error:
+ return -1;
+}
+
+void httpclient_applet_release(struct appctx *appctx)
+{
+ struct httpclient *hc = appctx->svcctx;
+
+ /* mark the httpclient as ended */
+ hc->flags |= HTTPCLIENT_FS_ENDED;
+ /* the applet is leaving, remove the ptr so we don't try to call it
+ * again from the caller */
+ hc->appctx = NULL;
+
+ if (hc->ops.res_end)
+ hc->ops.res_end(hc);
+
+ /* destroy the httpclient when set to autotokill */
+ if (hc->flags & HTTPCLIENT_FA_AUTOKILL) {
+ httpclient_destroy(hc);
+ }
+
+ /* be sure not to use this ptr anymore if the IO handler is called a
+ * last time */
+ appctx->svcctx = NULL;
+
+ return;
+}
+
+/* HTTP client applet */
+static struct applet httpclient_applet = {
+ .obj_type = OBJ_TYPE_APPLET,
+ .name = "<HTTPCLIENT>",
+ .fct = httpclient_applet_io_handler,
+ .init = httpclient_applet_init,
+ .release = httpclient_applet_release,
+};
+
+
+static int httpclient_resolve_init(struct proxy *px)
+{
+ struct act_rule *rule;
+ int i;
+ char *do_resolve = NULL;
+ char *http_rules[][11] = {
+ { "set-var(txn.hc_ip)", "dst", "" },
+ { do_resolve, "hdr(Host),host_only", "if", "{", "var(txn.hc_ip)", "-m", "ip", "0.0.0.0", "}", "" },
+ { "return", "status", "503", "if", "{", "var(txn.hc_ip)", "-m", "ip", "0.0.0.0", "}", "" },
+ { "capture", "var(txn.hc_ip)", "len", "40", "" },
+ { "set-dst", "var(txn.hc_ip)", "" },
+ { "" }
+ };
+
+
+ if (resolvers_disabled)
+ return 0;
+
+ if (!resolvers_id)
+ resolvers_id = strdup("default");
+
+ memprintf(&do_resolve, "do-resolve(txn.hc_ip,%s%s%s)", resolvers_id, resolvers_prefer ? "," : "", resolvers_prefer ? resolvers_prefer : "");
+ http_rules[1][0] = do_resolve;
+
+ /* Try to create the default resolvers section */
+ resolvers_create_default();
+
+ /* if the resolver does not exist and no hard_error was set, simply ignore resolving */
+ if (!find_resolvers_by_id(resolvers_id) && !hard_error_resolvers) {
+ free(do_resolve);
+ return 0;
+ }
+
+
+ for (i = 0; *http_rules[i][0] != '\0'; i++) {
+ rule = parse_http_req_cond((const char **)http_rules[i], "httpclient", 0, px);
+ if (!rule) {
+ free(do_resolve);
+ ha_alert("Couldn't setup the httpclient resolver.\n");
+ return 1;
+ }
+ LIST_APPEND(&px->http_req_rules, &rule->list);
+ }
+
+ free(do_resolve);
+ return 0;
+}
+
+/*
+ * Creates an internal proxy which will be used for httpclient.
+ * This will allocate 2 servers (raw and ssl) and 1 proxy.
+ *
+ * This function must be called from a precheck callback.
+ *
+ * Return a proxy or NULL.
+ */
+struct proxy *httpclient_create_proxy(const char *id)
+{
+ int err_code = ERR_NONE;
+ char *errmsg = NULL;
+ struct proxy *px = NULL;
+ struct server *srv_raw = NULL;
+#ifdef USE_OPENSSL
+ struct server *srv_ssl = NULL;
+#endif
+
+ if (global.mode & MODE_MWORKER_WAIT)
+ return ERR_NONE;
+
+ px = alloc_new_proxy(id, PR_CAP_LISTEN|PR_CAP_INT|PR_CAP_HTTPCLIENT, &errmsg);
+ if (!px) {
+ memprintf(&errmsg, "couldn't allocate proxy.");
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto err;
+ }
+
+ px->options |= PR_O_WREQ_BODY;
+ px->retry_type |= PR_RE_CONN_FAILED | PR_RE_DISCONNECTED | PR_RE_TIMEOUT;
+ px->options2 |= PR_O2_INDEPSTR;
+ px->mode = PR_MODE_HTTP;
+ px->maxconn = 0;
+ px->accept = NULL;
+ px->conn_retries = httpclient_retries;
+ px->timeout.connect = httpclient_timeout_connect;
+ px->timeout.client = TICK_ETERNITY;
+ /* The HTTP Client use the "option httplog" with the global loggers */
+ px->conf.logformat_string = httpclient_log_format;
+ px->http_needed = 1;
+
+ /* clear HTTP server */
+ srv_raw = new_server(px);
+ if (!srv_raw) {
+ memprintf(&errmsg, "out of memory.");
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto err;
+ }
+
+ srv_settings_cpy(srv_raw, &px->defsrv, 0);
+ srv_raw->iweight = 0;
+ srv_raw->uweight = 0;
+ srv_raw->xprt = xprt_get(XPRT_RAW);
+ srv_raw->flags |= SRV_F_MAPPORTS; /* needed to apply the port change with resolving */
+ srv_raw->id = strdup("<HTTPCLIENT>");
+ if (!srv_raw->id) {
+ memprintf(&errmsg, "out of memory.");
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto err;
+ }
+
+#ifdef USE_OPENSSL
+ /* SSL HTTP server */
+ srv_ssl = new_server(px);
+ if (!srv_ssl) {
+ memprintf(&errmsg, "out of memory.");
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto err;
+ }
+ srv_settings_cpy(srv_ssl, &px->defsrv, 0);
+ srv_ssl->iweight = 0;
+ srv_ssl->uweight = 0;
+ srv_ssl->xprt = xprt_get(XPRT_SSL);
+ srv_ssl->use_ssl = 1;
+ srv_ssl->flags |= SRV_F_MAPPORTS; /* needed to apply the port change with resolving */
+ srv_ssl->id = strdup("<HTTPSCLIENT>");
+ if (!srv_ssl->id) {
+ memprintf(&errmsg, "out of memory.");
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto err;
+ }
+
+#ifdef TLSEXT_TYPE_application_layer_protocol_negotiation
+ if (ssl_sock_parse_alpn("h2,http/1.1", &srv_ssl->ssl_ctx.alpn_str, &srv_ssl->ssl_ctx.alpn_len, &errmsg) != 0) {
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto err;
+ }
+#endif
+ srv_ssl->ssl_ctx.verify = httpclient_ssl_verify;
+ /* if the verify is required, try to load the system CA */
+ if (httpclient_ssl_verify == SSL_SOCK_VERIFY_REQUIRED) {
+
+ srv_ssl->ssl_ctx.ca_file = strdup(httpclient_ssl_ca_file ? httpclient_ssl_ca_file : "@system-ca");
+ if (!__ssl_store_load_locations_file(srv_ssl->ssl_ctx.ca_file, 1, CAFILE_CERT, !hard_error_ssl)) {
+ /* if we failed to load the ca-file, only quits in
+ * error with hard_error, otherwise just disable the
+ * feature. */
+ if (hard_error_ssl) {
+ memprintf(&errmsg, "cannot initialize SSL verify with 'ca-file \"%s\"'.", srv_ssl->ssl_ctx.ca_file);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto err;
+ } else {
+ ha_free(&srv_ssl->ssl_ctx.ca_file);
+ srv_drop(srv_ssl);
+ srv_ssl = NULL;
+ }
+ }
+ }
+
+#endif
+
+ /* add the proxy in the proxy list only if everything is successful */
+ px->next = proxies_list;
+ proxies_list = px;
+
+ if (httpclient_resolve_init(px) != 0) {
+ memprintf(&errmsg, "cannot initialize resolvers.");
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto err;
+ }
+
+ /* link the 2 servers in the proxy */
+ srv_raw->next = px->srv;
+ px->srv = srv_raw;
+
+#ifdef USE_OPENSSL
+ if (srv_ssl) {
+ srv_ssl->next = px->srv;
+ px->srv = srv_ssl;
+ }
+#endif
+
+
+err:
+ if (err_code & ERR_CODE) {
+ ha_alert("httpclient: cannot initialize: %s\n", errmsg);
+ free(errmsg);
+ srv_drop(srv_raw);
+#ifdef USE_OPENSSL
+ srv_drop(srv_ssl);
+#endif
+ free_proxy(px);
+
+ return NULL;
+ }
+ return px;
+}
+
+/*
+ * Initialize the proxy for the HTTP client with 2 servers, one for raw HTTP,
+ * the other for HTTPS.
+ */
+static int httpclient_precheck()
+{
+ /* initialize the default httpclient_proxy which is used for the CLI and the lua */
+
+ httpclient_proxy = httpclient_create_proxy("<HTTPCLIENT>");
+ if (!httpclient_proxy)
+ return 1;
+
+ return 0;
+}
+
+/* Initialize the logs for every proxy dedicated to the httpclient */
+static int httpclient_postcheck_proxy(struct proxy *curproxy)
+{
+ int err_code = ERR_NONE;
+ struct logger *logger;
+ char *errmsg = NULL;
+#ifdef USE_OPENSSL
+ struct server *srv = NULL;
+ struct server *srv_ssl = NULL;
+#endif
+
+ if (global.mode & MODE_MWORKER_WAIT)
+ return ERR_NONE;
+
+ if (!(curproxy->cap & PR_CAP_HTTPCLIENT))
+ return ERR_NONE; /* nothing to do */
+
+ /* copy logs from "global" log list */
+ list_for_each_entry(logger, &global.loggers, list) {
+ struct logger *node = dup_logger(logger);
+
+ if (!node) {
+ memprintf(&errmsg, "out of memory.");
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto err;
+ }
+ LIST_APPEND(&curproxy->loggers, &node->list);
+ }
+ if (curproxy->conf.logformat_string) {
+ curproxy->conf.args.ctx = ARGC_LOG;
+ if (!parse_logformat_string(curproxy->conf.logformat_string, curproxy, &curproxy->logformat,
+ LOG_OPT_MANDATORY|LOG_OPT_MERGE_SPACES,
+ SMP_VAL_FE_LOG_END, &errmsg)) {
+ memprintf(&errmsg, "failed to parse log-format : %s.", errmsg);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto err;
+ }
+ curproxy->conf.args.file = NULL;
+ curproxy->conf.args.line = 0;
+ }
+
+#ifdef USE_OPENSSL
+ /* initialize the SNI for the SSL servers */
+
+ for (srv = curproxy->srv; srv != NULL; srv = srv->next) {
+ if (srv->xprt == xprt_get(XPRT_SSL)) {
+ srv_ssl = srv;
+ }
+ }
+ if (srv_ssl && !srv_ssl->sni_expr) {
+ /* init the SNI expression */
+ /* always use the host header as SNI, without the port */
+ srv_ssl->sni_expr = strdup("req.hdr(host),field(1,:)");
+ err_code |= server_parse_sni_expr(srv_ssl, curproxy, &errmsg);
+ if (err_code & ERR_CODE) {
+ memprintf(&errmsg, "failed to configure sni: %s.", errmsg);
+ goto err;
+ }
+ }
+#endif
+
+err:
+ if (err_code & ERR_CODE) {
+ ha_alert("httpclient: failed to initialize: %s\n", errmsg);
+ free(errmsg);
+
+ }
+ return err_code;
+}
+
+/* initialize the proxy and servers for the HTTP client */
+
+REGISTER_PRE_CHECK(httpclient_precheck);
+REGISTER_POST_PROXY_CHECK(httpclient_postcheck_proxy);
+
+static int httpclient_parse_global_resolvers(char **args, int section_type, struct proxy *curpx,
+ const struct proxy *defpx, const char *file, int line,
+ char **err)
+{
+ if (too_many_args(1, args, err, NULL))
+ return -1;
+
+ /* any configuration should set the hard_error flag */
+ hard_error_resolvers = 1;
+
+ free(resolvers_id);
+ resolvers_id = strdup(args[1]);
+
+ return 0;
+}
+
+/* config parser for global "httpclient.resolvers.disabled", accepts "on" or "off" */
+static int httpclient_parse_global_resolvers_disabled(char **args, int section_type, struct proxy *curpx,
+ const struct proxy *defpx, const char *file, int line,
+ char **err)
+{
+ if (too_many_args(1, args, err, NULL))
+ return -1;
+
+ if (strcmp(args[1], "on") == 0)
+ resolvers_disabled = 1;
+ else if (strcmp(args[1], "off") == 0)
+ resolvers_disabled = 0;
+ else {
+ memprintf(err, "'%s' expects either 'on' or 'off' but got '%s'.", args[0], args[1]);
+ return -1;
+ }
+ return 0;
+}
+
+static int httpclient_parse_global_prefer(char **args, int section_type, struct proxy *curpx,
+ const struct proxy *defpx, const char *file, int line,
+ char **err)
+{
+ if (too_many_args(1, args, err, NULL))
+ return -1;
+
+ /* any configuration should set the hard_error flag */
+ hard_error_resolvers = 1;
+
+
+ if (strcmp(args[1],"ipv4") == 0)
+ resolvers_prefer = "ipv4";
+ else if (strcmp(args[1],"ipv6") == 0)
+ resolvers_prefer = "ipv6";
+ else {
+ ha_alert("parsing [%s:%d] : '%s' expects 'ipv4' or 'ipv6' as argument.\n", file, line, args[0]);
+ return -1;
+ }
+
+ return 0;
+}
+
+
+#ifdef USE_OPENSSL
+static int httpclient_parse_global_ca_file(char **args, int section_type, struct proxy *curpx,
+ const struct proxy *defpx, const char *file, int line,
+ char **err)
+{
+ if (too_many_args(1, args, err, NULL))
+ return -1;
+
+ /* any configuration should set the hard_error flag */
+ hard_error_ssl = 1;
+
+ free(httpclient_ssl_ca_file);
+ httpclient_ssl_ca_file = strdup(args[1]);
+
+ return 0;
+}
+
+static int httpclient_parse_global_verify(char **args, int section_type, struct proxy *curpx,
+ const struct proxy *defpx, const char *file, int line,
+ char **err)
+{
+ if (too_many_args(1, args, err, NULL))
+ return -1;
+
+ /* any configuration should set the hard_error flag */
+ hard_error_ssl = 1;
+
+ if (strcmp(args[1],"none") == 0)
+ httpclient_ssl_verify = SSL_SOCK_VERIFY_NONE;
+ else if (strcmp(args[1],"required") == 0)
+ httpclient_ssl_verify = SSL_SOCK_VERIFY_REQUIRED;
+ else {
+ ha_alert("parsing [%s:%d] : '%s' expects 'none' or 'required' as argument.\n", file, line, args[0]);
+ return -1;
+ }
+
+ return 0;
+}
+#endif /* ! USE_OPENSSL */
+
+static int httpclient_parse_global_retries(char **args, int section_type, struct proxy *curpx,
+ const struct proxy *defpx, const char *file, int line,
+ char **err)
+{
+ if (too_many_args(1, args, err, NULL))
+ return -1;
+
+ if (*(args[1]) == 0) {
+ ha_alert("parsing [%s:%d] : '%s' expects an integer argument.\n",
+ file, line, args[0]);
+ return -1;
+ }
+ httpclient_retries = atol(args[1]);
+
+ return 0;
+}
+
+static int httpclient_parse_global_timeout_connect(char **args, int section_type, struct proxy *curpx,
+ const struct proxy *defpx, const char *file, int line,
+ char **err)
+{
+ const char *res;
+ unsigned timeout;
+
+ if (too_many_args(1, args, err, NULL))
+ return -1;
+
+ if (*(args[1]) == 0) {
+ ha_alert("parsing [%s:%d] : '%s' expects an integer argument.\n",
+ file, line, args[0]);
+ return -1;
+ }
+
+ res = parse_time_err(args[1], &timeout, TIME_UNIT_MS);
+ if (res == PARSE_TIME_OVER) {
+ memprintf(err, "timer overflow in argument '%s' to '%s' (maximum value is 2147483647 ms or ~24.8 days)",
+ args[1], args[0]);
+ return -1;
+ }
+ else if (res == PARSE_TIME_UNDER) {
+ memprintf(err, "timer underflow in argument '%s' to '%s' (minimum non-null value is 1 ms)",
+ args[1], args[0]);
+ return -1;
+ }
+ else if (res) {
+ memprintf(err, "unexpected character '%c' in '%s'", *res, args[0]);
+ return -1;
+ }
+
+ if (*args[2] != 0) {
+ memprintf(err, "'%s' : unexpected extra argument '%s' after value '%s'.", args[0], args[2], args[1]);
+ return -1;
+ }
+
+ httpclient_timeout_connect = MS_TO_TICKS(timeout);
+
+ return 0;
+}
+
+
+static struct cfg_kw_list cfg_kws = {ILH, {
+ { CFG_GLOBAL, "httpclient.resolvers.disabled", httpclient_parse_global_resolvers_disabled },
+ { CFG_GLOBAL, "httpclient.resolvers.id", httpclient_parse_global_resolvers },
+ { CFG_GLOBAL, "httpclient.resolvers.prefer", httpclient_parse_global_prefer },
+ { CFG_GLOBAL, "httpclient.retries", httpclient_parse_global_retries },
+ { CFG_GLOBAL, "httpclient.timeout.connect", httpclient_parse_global_timeout_connect },
+#ifdef USE_OPENSSL
+ { CFG_GLOBAL, "httpclient.ssl.verify", httpclient_parse_global_verify },
+ { CFG_GLOBAL, "httpclient.ssl.ca-file", httpclient_parse_global_ca_file },
+#endif
+ { 0, NULL, NULL },
+}};
+
+INITCALL1(STG_REGISTER, cfg_register_keywords, &cfg_kws);
diff --git a/src/http_conv.c b/src/http_conv.c
new file mode 100644
index 0000000..cf515a8
--- /dev/null
+++ b/src/http_conv.c
@@ -0,0 +1,453 @@
+/*
+ * HTTP sample conversion
+ *
+ * Copyright 2000-2018 Willy Tarreau <w@1wt.eu>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <sys/types.h>
+
+#include <ctype.h>
+#include <string.h>
+#include <time.h>
+
+#include <haproxy/api.h>
+#include <haproxy/arg.h>
+#include <haproxy/capture-t.h>
+#include <haproxy/chunk.h>
+#include <haproxy/http.h>
+#include <haproxy/pool.h>
+#include <haproxy/sample.h>
+#include <haproxy/stream.h>
+#include <haproxy/tools.h>
+#include <haproxy/version.h>
+
+static int smp_check_http_date_unit(struct arg *args, struct sample_conv *conv,
+ const char *file, int line, char **err)
+{
+ return smp_check_date_unit(args, err);
+}
+
+/* takes an UINT value on input supposed to represent the time since EPOCH,
+ * adds an optional offset found in args[0] and emits a string representing
+ * the date in RFC-1123/5322 format. If optional unit param in args[1] is
+ * provided, decode timestamp in milliseconds ("ms") or microseconds("us"),
+ * and use relevant output date format.
+ */
+static int sample_conv_http_date(const struct arg *args, struct sample *smp, void *private)
+{
+ const char day[7][4] = { "Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat" };
+ const char mon[12][4] = { "Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec" };
+ struct buffer *temp;
+ struct tm tm;
+ int sec_frac = 0;
+ time_t curr_date;
+
+ /* add offset */
+ if (args[0].type == ARGT_SINT)
+ smp->data.u.sint += args[0].data.sint;
+
+ /* report in milliseconds */
+ if (args[1].type == ARGT_SINT && args[1].data.sint == TIME_UNIT_MS) {
+ sec_frac = smp->data.u.sint % 1000;
+ smp->data.u.sint /= 1000;
+ }
+ /* report in microseconds */
+ else if (args[1].type == ARGT_SINT && args[1].data.sint == TIME_UNIT_US) {
+ sec_frac = smp->data.u.sint % 1000000;
+ smp->data.u.sint /= 1000000;
+ }
+
+ /* With high numbers, the date returned can be negative, the 55 bits mask prevent this. */
+ curr_date = smp->data.u.sint & 0x007fffffffffffffLL;
+
+ get_gmtime(curr_date, &tm);
+
+ temp = get_trash_chunk();
+ if (args[1].type == ARGT_SINT && args[1].data.sint != TIME_UNIT_S) {
+ temp->data = snprintf(temp->area, temp->size - temp->data,
+ "%s, %02d %s %04d %02d:%02d:%02d.%d GMT",
+ day[tm.tm_wday], tm.tm_mday, mon[tm.tm_mon],
+ 1900+tm.tm_year,
+ tm.tm_hour, tm.tm_min, tm.tm_sec, sec_frac);
+ } else {
+ temp->data = snprintf(temp->area, temp->size - temp->data,
+ "%s, %02d %s %04d %02d:%02d:%02d GMT",
+ day[tm.tm_wday], tm.tm_mday, mon[tm.tm_mon],
+ 1900+tm.tm_year,
+ tm.tm_hour, tm.tm_min, tm.tm_sec);
+ }
+
+ smp->data.u.str = *temp;
+ smp->data.type = SMP_T_STR;
+ return 1;
+}
+
+/* Arguments: The list of expected value, the number of parts returned and the separator */
+static int sample_conv_q_preferred(const struct arg *args, struct sample *smp, void *private)
+{
+ const char *al = smp->data.u.str.area;
+ const char *end = al + smp->data.u.str.data;
+ const char *token;
+ int toklen;
+ int qvalue;
+ const char *str;
+ const char *w;
+ int best_q = 0;
+
+ /* Set the constant to the sample, because the output of the
+ * function will be peek in the constant configuration string.
+ */
+ smp->flags |= SMP_F_CONST;
+ smp->data.u.str.size = 0;
+ smp->data.u.str.area = "";
+ smp->data.u.str.data = 0;
+
+ /* Parse the accept language */
+ while (1) {
+
+ /* Jump spaces, quit if the end is detected. */
+ while (al < end && isspace((unsigned char)*al))
+ al++;
+ if (al >= end)
+ break;
+
+ /* Start of the first word. */
+ token = al;
+
+ /* Look for separator: isspace(), ',' or ';'. Next value if 0 length word. */
+ while (al < end && *al != ';' && *al != ',' && !isspace((unsigned char)*al))
+ al++;
+ if (al == token)
+ goto expect_comma;
+
+ /* Length of the token. */
+ toklen = al - token;
+ qvalue = 1000;
+
+ /* Check if the token exists in the list. If the token not exists,
+ * jump to the next token.
+ */
+ str = args[0].data.str.area;
+ w = str;
+ while (1) {
+ if (*str == ';' || *str == '\0') {
+ if (http_language_range_match(token, toklen, w, str - w))
+ goto look_for_q;
+ if (*str == '\0')
+ goto expect_comma;
+ w = str + 1;
+ }
+ str++;
+ }
+ goto expect_comma;
+
+look_for_q:
+
+ /* Jump spaces, quit if the end is detected. */
+ while (al < end && isspace((unsigned char)*al))
+ al++;
+ if (al >= end)
+ goto process_value;
+
+ /* If ',' is found, process the result */
+ if (*al == ',')
+ goto process_value;
+
+ /* If the character is different from ';', look
+ * for the end of the header part in best effort.
+ */
+ if (*al != ';')
+ goto expect_comma;
+
+ /* Assumes that the char is ';', now expect "q=". */
+ al++;
+
+ /* Jump spaces, process value if the end is detected. */
+ while (al < end && isspace((unsigned char)*al))
+ al++;
+ if (al >= end)
+ goto process_value;
+
+ /* Expect 'q'. If no 'q', continue in best effort */
+ if (*al != 'q')
+ goto process_value;
+ al++;
+
+ /* Jump spaces, process value if the end is detected. */
+ while (al < end && isspace((unsigned char)*al))
+ al++;
+ if (al >= end)
+ goto process_value;
+
+ /* Expect '='. If no '=', continue in best effort */
+ if (*al != '=')
+ goto process_value;
+ al++;
+
+ /* Jump spaces, process value if the end is detected. */
+ while (al < end && isspace((unsigned char)*al))
+ al++;
+ if (al >= end)
+ goto process_value;
+
+ /* Parse the q value. */
+ qvalue = http_parse_qvalue(al, &al);
+
+process_value:
+
+ /* If the new q value is the best q value, then store the associated
+ * language in the response. If qvalue is the biggest value (1000),
+ * break the process.
+ */
+ if (qvalue > best_q) {
+ smp->data.u.str.area = (char *)w;
+ smp->data.u.str.data = str - w;
+ if (qvalue >= 1000)
+ break;
+ best_q = qvalue;
+ }
+
+expect_comma:
+
+ /* Expect comma or end. If the end is detected, quit the loop. */
+ while (al < end && *al != ',')
+ al++;
+ if (al >= end)
+ break;
+
+ /* Comma is found, jump it and restart the analyzer. */
+ al++;
+ }
+
+ /* Set default value if required. */
+ if (smp->data.u.str.data == 0 && args[1].type == ARGT_STR) {
+ smp->data.u.str.area = args[1].data.str.area;
+ smp->data.u.str.data = args[1].data.str.data;
+ }
+
+ /* Return true only if a matching language was found. */
+ return smp->data.u.str.data != 0;
+}
+
+/* This fetch url-decode any input string. */
+static int sample_conv_url_dec(const struct arg *args, struct sample *smp, void *private)
+{
+ int in_form = 0;
+ int len;
+
+ /* If the constant flag is set or if not size is available at
+ * the end of the buffer, copy the string in other buffer
+ * before decoding.
+ */
+ if (smp->flags & SMP_F_CONST || smp->data.u.str.size <= smp->data.u.str.data) {
+ struct buffer *str = get_trash_chunk();
+ memcpy(str->area, smp->data.u.str.area, smp->data.u.str.data);
+ smp->data.u.str.area = str->area;
+ smp->data.u.str.size = str->size;
+ smp->flags &= ~SMP_F_CONST;
+ }
+
+ /* Add final \0 required by url_decode(), and convert the input string. */
+ smp->data.u.str.area[smp->data.u.str.data] = '\0';
+
+ if (args[0].type == ARGT_SINT)
+ in_form = !!args[0].data.sint;
+
+ len = url_decode(smp->data.u.str.area, in_form);
+ if (len < 0)
+ return 0;
+ smp->data.u.str.data = len;
+ return 1;
+}
+
+/* url-encode types and encode maps */
+enum encode_type {
+ ENC_QUERY = 0,
+};
+long query_encode_map[(256 / 8) / sizeof(long)];
+
+/* Check url-encode type */
+static int sample_conv_url_enc_check(struct arg *arg, struct sample_conv *conv,
+ const char *file, int line, char **err)
+{
+ enum encode_type enc_type;
+
+ if (strcmp(arg->data.str.area, "") == 0)
+ enc_type = ENC_QUERY;
+ else if (strcmp(arg->data.str.area, "query") == 0)
+ enc_type = ENC_QUERY;
+ else {
+ memprintf(err, "Unexpected encode type. "
+ "Allowed value is 'query'");
+ return 0;
+ }
+
+ chunk_destroy(&arg->data.str);
+ arg->type = ARGT_SINT;
+ arg->data.sint = enc_type;
+ return 1;
+}
+
+/* Initializes some url encode data at boot */
+static void sample_conf_url_enc_init()
+{
+ int i;
+
+ memset(query_encode_map, 0, sizeof(query_encode_map));
+ /* use rfc3986 to determine list of characters to keep unchanged for
+ * query string */
+ for (i = 0; i < 256; i++) {
+ if (!((i >= 'a' && i <= 'z') || (i >= 'A' && i <= 'Z')
+ || (i >= '0' && i <= '9') ||
+ i == '-' || i == '.' || i == '_' || i == '~'))
+ ha_bit_set(i, query_encode_map);
+ }
+}
+
+INITCALL0(STG_PREPARE, sample_conf_url_enc_init);
+
+/* This fetch url-encode any input string. Only support query string for now */
+static int sample_conv_url_enc(const struct arg *args, struct sample *smp, void
+ *private)
+{
+ enum encode_type enc_type;
+ struct buffer *trash = get_trash_chunk();
+ long *encode_map;
+ char *ret;
+
+ enc_type = ENC_QUERY;
+ enc_type = args->data.sint;
+
+ if (enc_type == ENC_QUERY)
+ encode_map = query_encode_map;
+ else
+ return 0;
+
+ ret = encode_chunk(trash->area, trash->area + trash->size, '%',
+ encode_map, &smp->data.u.str);
+ if (ret == NULL || *ret != '\0')
+ return 0;
+ trash->data = ret - trash->area;
+ smp->data.u.str = *trash;
+ return 1;
+}
+
+static int smp_conv_req_capture(const struct arg *args, struct sample *smp, void *private)
+{
+ struct proxy *fe;
+ int idx, i;
+ struct cap_hdr *hdr;
+ int len;
+
+ if (args->type != ARGT_SINT)
+ return 0;
+
+ if (!smp->strm)
+ return 0;
+
+ fe = strm_fe(smp->strm);
+ idx = args->data.sint;
+
+ /* Check the availability of the capture id. */
+ if (idx > fe->nb_req_cap - 1)
+ return 0;
+
+ /* Look for the original configuration. */
+ for (hdr = fe->req_cap, i = fe->nb_req_cap - 1;
+ hdr != NULL && i != idx ;
+ i--, hdr = hdr->next);
+ if (!hdr)
+ return 0;
+
+ /* check for the memory allocation */
+ if (smp->strm->req_cap[hdr->index] == NULL)
+ smp->strm->req_cap[hdr->index] = pool_alloc(hdr->pool);
+ if (smp->strm->req_cap[hdr->index] == NULL)
+ return 0;
+
+ /* Check length. */
+ len = smp->data.u.str.data;
+ if (len > hdr->len)
+ len = hdr->len;
+
+ /* Capture input data. */
+ memcpy(smp->strm->req_cap[idx], smp->data.u.str.area, len);
+ smp->strm->req_cap[idx][len] = '\0';
+
+ return 1;
+}
+
+static int smp_conv_res_capture(const struct arg *args, struct sample *smp, void *private)
+{
+ struct proxy *fe;
+ int idx, i;
+ struct cap_hdr *hdr;
+ int len;
+
+ if (args->type != ARGT_SINT)
+ return 0;
+
+ if (!smp->strm)
+ return 0;
+
+ fe = strm_fe(smp->strm);
+ idx = args->data.sint;
+
+ /* Check the availability of the capture id. */
+ if (idx > fe->nb_rsp_cap - 1)
+ return 0;
+
+ /* Look for the original configuration. */
+ for (hdr = fe->rsp_cap, i = fe->nb_rsp_cap - 1;
+ hdr != NULL && i != idx ;
+ i--, hdr = hdr->next);
+ if (!hdr)
+ return 0;
+
+ /* check for the memory allocation */
+ if (smp->strm->res_cap[hdr->index] == NULL)
+ smp->strm->res_cap[hdr->index] = pool_alloc(hdr->pool);
+ if (smp->strm->res_cap[hdr->index] == NULL)
+ return 0;
+
+ /* Check length. */
+ len = smp->data.u.str.data;
+ if (len > hdr->len)
+ len = hdr->len;
+
+ /* Capture input data. */
+ memcpy(smp->strm->res_cap[idx], smp->data.u.str.area, len);
+ smp->strm->res_cap[idx][len] = '\0';
+
+ return 1;
+}
+
+/************************************************************************/
+/* All supported converter keywords must be declared here. */
+/************************************************************************/
+
+/* Note: must not be declared <const> as its list will be overwritten */
+static struct sample_conv_kw_list sample_conv_kws = {ILH, {
+ { "http_date", sample_conv_http_date, ARG2(0,SINT,STR), smp_check_http_date_unit, SMP_T_SINT, SMP_T_STR},
+ { "language", sample_conv_q_preferred, ARG2(1,STR,STR), NULL, SMP_T_STR, SMP_T_STR},
+ { "capture-req", smp_conv_req_capture, ARG1(1,SINT), NULL, SMP_T_STR, SMP_T_STR},
+ { "capture-res", smp_conv_res_capture, ARG1(1,SINT), NULL, SMP_T_STR, SMP_T_STR},
+ { "url_dec", sample_conv_url_dec, ARG1(0,SINT), NULL, SMP_T_STR, SMP_T_STR},
+ { "url_enc", sample_conv_url_enc, ARG1(1,STR), sample_conv_url_enc_check, SMP_T_STR, SMP_T_STR},
+ { NULL, NULL, 0, 0, 0 },
+}};
+
+INITCALL1(STG_REGISTER, sample_register_convs, &sample_conv_kws);
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/src/http_ext.c b/src/http_ext.c
new file mode 100644
index 0000000..a367519
--- /dev/null
+++ b/src/http_ext.c
@@ -0,0 +1,1881 @@
+/*
+ * HTTP extensions logic and helpers
+ *
+ * Copyright 2022 HAProxy Technologies
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2.1 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <haproxy/sample.h>
+#include <haproxy/http_htx.h>
+#include <haproxy/http_ext.h>
+#include <haproxy/chunk.h>
+#include <haproxy/stream.h>
+#include <haproxy/proxy.h>
+#include <haproxy/sc_strm.h>
+#include <haproxy/obj_type.h>
+#include <haproxy/cfgparse.h>
+#include <haproxy/arg.h>
+#include <haproxy/initcall.h>
+#include <haproxy/tools.h>
+
+/*
+ * =========== ANALYZE ===========
+ * below are http process/ana helpers
+ */
+
+/* checks if <input> contains rfc7239 compliant port
+ * Returns 1 for success and 0 for failure
+ * if <port> is not NULL, it will be set to the extracted value contained
+ * in <input>
+ * <input> will be consumed accordingly (parsed/extracted characters are
+ * removed from <input>)
+ */
+static inline int http_7239_extract_port(struct ist *input, uint16_t *port)
+{
+ char *start = istptr(*input);
+ uint32_t port_cast = 0;
+ int it = 0;
+
+ /* strtol does not support non-null terminated str,
+ * we extract port ourselves
+ */
+ while (it < istlen(*input) &&
+ isdigit((unsigned char)start[it])) {
+ port_cast = (port_cast * 10) + (start[it] - '0');
+ if (port_cast > 65535)
+ return 0; /* invalid port */
+ it += 1;
+ }
+ if (!port_cast)
+ return 0; /* invalid port */
+ /* ok */
+ if (port)
+ *port = (uint16_t)port_cast;
+ *input = istadv(*input, it);
+ return 1;
+}
+
+/* check if char is a valid obfuscated identifier char
+ * (according to 7239 RFC)
+ * Returns non zero value for valid char
+ */
+static inline int http_7239_valid_obfsc(char c)
+{
+ return (isalnum((unsigned char)c) ||
+ (c == '.' || c == '-' || c == '_'));
+}
+
+/* checks if <input> contains rfc7239 compliant obfuscated identifier
+ * Returns 1 for success and 0 for failure
+ * if <obfs> is not NULL, it will be set to the extracted value contained
+ * in <input>
+ * <input> will be consumed accordingly (parsed/extracted characters are
+ * removed from <input>)
+ */
+static inline int http_7239_extract_obfs(struct ist *input, struct ist *obfs)
+{
+ int it = 0;
+
+ if (obfs)
+ obfs->ptr = input->ptr;
+
+ while (it < istlen(*input) && istptr(*input)[it] != ';') {
+ if (!http_7239_valid_obfsc(istptr(*input)[it]))
+ break; /* end of obfs token */
+ it += 1;
+ }
+ if (obfs)
+ obfs->len = it;
+ *input = istadv(*input, it);
+ return !!it;
+}
+
+/* checks if <input> contains rfc7239 compliant IPV4 address
+ * Returns 1 for success and 0 for failure
+ * if <ip> is not NULL, it will be set to the extracted value contained
+ * in <input>
+ * <input> will be consumed accordingly (parsed/extracted characters are
+ * removed from <input>)
+ */
+static inline int http_7239_extract_ipv4(struct ist *input, struct in_addr *ip)
+{
+ char ip4[INET_ADDRSTRLEN];
+ unsigned char buf[sizeof(struct in_addr)];
+ int it = 0;
+
+ /* extract ipv4 addr */
+ while (it < istlen(*input) && it < (sizeof(ip4) - 1)) {
+ if (!isdigit((unsigned char)istptr(*input)[it]) &&
+ istptr(*input)[it] != '.')
+ break; /* no more ip4 char */
+ ip4[it] = istptr(*input)[it];
+ it += 1;
+ }
+ ip4[it] = 0;
+ if (inet_pton(AF_INET, ip4, buf) != 1)
+ return 0; /* invalid ip4 addr */
+ /* ok */
+ if (ip)
+ memcpy(ip, buf, sizeof(buf));
+ *input = istadv(*input, it);
+ return 1;
+}
+
+/* checks if <input> contains rfc7239 compliant IPV6 address
+ * assuming input.len >= 1 and first char is '['
+ * Returns 1 for success and 0 for failure
+ * if <ip> is not NULL, it will be set to the extracted value contained
+ * in <input>
+ * <input> will be consumed accordingly (parsed/extracted characters are
+ * removed from <input>)
+ */
+static inline int http_7239_extract_ipv6(struct ist *input, struct in6_addr *ip)
+{
+ char ip6[INET6_ADDRSTRLEN];
+ unsigned char buf[sizeof(struct in6_addr)];
+ int it = 0;
+
+ *input = istnext(*input); /* skip '[' leading char */
+ /* extract ipv6 addr */
+ while (it < istlen(*input) &&
+ it < (sizeof(ip6) - 1)) {
+ if (!isalnum((unsigned char)istptr(*input)[it]) &&
+ istptr(*input)[it] != ':')
+ break; /* no more ip6 char */
+ ip6[it] = istptr(*input)[it];
+ it += 1;
+ }
+ ip6[it] = 0;
+ if ((istlen(*input)-it) < 1 || istptr(*input)[it] != ']')
+ return 0; /* missing ending "]" char */
+ it += 1;
+ if (inet_pton(AF_INET6, ip6, buf) != 1)
+ return 0; /* invalid ip6 addr */
+ /* ok */
+ if (ip)
+ memcpy(ip, buf, sizeof(buf));
+ *input = istadv(*input, it);
+ return 1;
+}
+
+/* checks if <input> contains rfc7239 compliant host
+ * <quoted> is used to determine if the current input is being extracted
+ * from a quoted (non zero) or unquoted (zero) token, as the parsing rules
+ * differ whether the input is quoted or not according to the rfc.
+ * Returns 1 for success and 0 for failure
+ * if <host> is not NULL, it will be set to the extracted value contained
+ * in <input>
+ * <input> will be consumed accordingly (parsed/extracted characters are
+ * removed from <input>)
+ */
+static inline int http_7239_extract_host(struct ist *input, struct ist *host, int quoted)
+{
+ if (istlen(*input) < 1)
+ return 0; /* invalid input */
+
+ if (host)
+ host->ptr = input->ptr;
+
+ if (quoted && *istptr(*input) == '[') {
+ /* raw ipv6 address */
+ if (!http_7239_extract_ipv6(input, NULL))
+ return 0; /* invalid addr */
+ }
+ else {
+ /* ipv4 or dns */
+ while (istlen(*input)) {
+ if (!isalnum((unsigned char)*istptr(*input)) &&
+ *istptr(*input) != '.')
+ break; /* end of hostname token */
+ *input = istnext(*input);
+ }
+ }
+ if (istlen(*input) < 1 || *istptr(*input) != ':') {
+ goto out; /* no optional port provided */
+ }
+ if (!quoted)
+ return 0; /* not supported */
+ *input = istnext(*input); /* skip ':' */
+ /* validate port */
+ if (!http_7239_extract_port(input, NULL))
+ return 0; /* invalid port */
+ out:
+ if (host)
+ host->len = (input->ptr - host->ptr);
+ return 1;
+}
+
+/* checks if <input> contains rfc7239 compliant nodename
+ * <quoted> is used to determine if the current input is being extracted
+ * from a quoted (non zero) or unquoted (zero) token, as the parsing rules
+ * differ whether the input is quoted or not according to the rfc.
+ * Returns 1 for success and 0 for failure
+ * if <nodename> is not NULL, it will be set to the extracted value contained
+ * in <input>
+ * <input> will be consumed accordingly (parsed/extracted characters are
+ * removed from <input>)
+ */
+static inline int http_7239_extract_nodename(struct ist *input, struct forwarded_header_nodename *nodename, int quoted)
+{
+ if (istlen(*input) < 1)
+ return 0; /* invalid input */
+ if (*istptr(*input) == '_') {
+ struct ist *obfs = NULL;
+
+ /* obfuscated nodename */
+ *input = istnext(*input); /* skip '_' */
+ if (nodename) {
+ nodename->type = FORWARDED_HEADER_OBFS;
+ obfs = &nodename->obfs;
+ }
+ if (!http_7239_extract_obfs(input, obfs))
+ return 0; /* invalid obfs */
+ } else if (*istptr(*input) == 'u') {
+ /* "unknown" nodename? */
+ if (istlen(*input) < 7 ||
+ strncmp("unknown", istptr(*input), 7))
+ return 0; /* syntax error */
+ *input = istadv(*input, 7); /* skip "unknown" */
+ if (nodename)
+ nodename->type = FORWARDED_HEADER_UNK;
+ } else if (quoted && *istptr(*input) == '[') {
+ struct in6_addr *ip6 = NULL;
+
+ /* ipv6 address */
+ if (nodename) {
+ struct sockaddr_in6 *addr = (void *)&nodename->ip;
+
+ ip6 = &addr->sin6_addr;
+ addr->sin6_family = AF_INET6;
+ nodename->type = FORWARDED_HEADER_IP;
+ }
+ if (!http_7239_extract_ipv6(input, ip6))
+ return 0; /* invalid ip6 */
+ } else if (*istptr(*input)) {
+ struct in_addr *ip = NULL;
+
+ /* ipv4 address */
+ if (nodename) {
+ struct sockaddr_in *addr = (void *)&nodename->ip;
+
+ ip = &addr->sin_addr;
+ addr->sin_family = AF_INET;
+ nodename->type = FORWARDED_HEADER_IP;
+ }
+ if (!http_7239_extract_ipv4(input, ip))
+ return 0; /* invalid ip */
+ } else
+ return 0; /* unexpected char */
+
+ /* ok */
+ return 1;
+}
+
+/* checks if <input> contains rfc7239 compliant nodeport
+ * <quoted> is used to determine if the current input is being extracted
+ * from a quoted (non zero) or unquoted (zero) token, as the parsing rules
+ * differ whether the input is quoted or not according to the rfc.
+ * Returns 1 for success and 0 for failure
+ * if <nodeport> is not NULL, it will be set to the extracted value contained
+ * in <input>
+ * <input> will be consumed accordingly (parsed/extracted characters are
+ * removed from <input>)
+ */
+static inline int http_7239_extract_nodeport(struct ist *input, struct forwarded_header_nodeport *nodeport)
+{
+ if (*istptr(*input) == '_') {
+ struct ist *obfs = NULL;
+
+ /* obfuscated nodeport */
+ *input = istnext(*input); /* skip '_' */
+ if (nodeport) {
+ nodeport->type = FORWARDED_HEADER_OBFS;
+ obfs = &nodeport->obfs;
+ }
+ if (!http_7239_extract_obfs(input, obfs))
+ return 0; /* invalid obfs */
+ } else {
+ uint16_t *port = NULL;
+
+ /* normal port */
+ if (nodeport) {
+ nodeport->type = FORWARDED_HEADER_PORT;
+ port = &nodeport->port;
+ }
+ if (!http_7239_extract_port(input, port))
+ return 0; /* invalid port */
+ }
+ /* ok */
+ return 1;
+}
+
+/* checks if <input> contains rfc7239 compliant node (nodename:nodeport token)
+ * <quoted> is used to determine if the current input is being extracted
+ * from a quoted (non zero) or unquoted (zero) token, as the parsing rules
+ * differ whether the input is quoted or not according to the rfc.
+ * Returns 1 for success and 0 for failure
+ * if <node> is not NULL, it will be set to the extracted value contained
+ * in <input>
+ * <input> will be consumed accordingly (parsed/extracted characters are
+ * removed from <input>)
+ */
+static inline int http_7239_extract_node(struct ist *input, struct forwarded_header_node *node, int quoted)
+{
+ struct forwarded_header_nodename *nodename = NULL;
+ struct forwarded_header_nodeport *nodeport = NULL;
+
+ if (node) {
+ nodename = &node->nodename;
+ nodeport = &node->nodeport;
+ node->raw.ptr = input->ptr;
+ }
+ if (!http_7239_extract_nodename(input, nodename, quoted))
+ return 0; /* invalid nodename */
+ if (istlen(*input) < 1 || *istptr(*input) != ':') {
+ if (node)
+ node->nodeport.type = FORWARDED_HEADER_UNK;
+ goto out; /* no optional port provided */
+ }
+ if (!quoted)
+ return 0; /* not supported */
+ *input = istnext(*input);
+ if (!http_7239_extract_nodeport(input, nodeport))
+ return 0; /* invalid nodeport */
+ out:
+ /* ok */
+ if (node)
+ node->raw.len = input->ptr - node->raw.ptr;
+ return 1;
+}
+
+static inline int _forwarded_header_save_ctx(struct forwarded_header_ctx *ctx, int current_step, int required_steps)
+{
+ return (ctx && (current_step & required_steps));
+}
+
+static inline void _forwarded_header_quote_expected(struct ist *hdr, uint8_t *quoted)
+{
+ if (istlen(*hdr) > 0 && *istptr(*hdr) == '"') {
+ *quoted = 1;
+ /* node is quoted, we must find corresponding
+ * ending quote at the end of the token
+ */
+ *hdr = istnext(*hdr); /* skip quote */
+ }
+}
+
+/* checks if current header <hdr> is RFC 7239 compliant and can be "trusted".
+ * function will stop parsing as soon as every <required_steps> have
+ * been validated or error is encountered.
+ * Provide FORWARDED_HEADER_ALL for a full header validating spectrum.
+ * You may provide limited scope to perform quick searches on specific attributes
+ * If <ctx> is provided (not NULL), parsed attributes will be stored according to
+ * their types, allowing you to extract some useful information from the header.
+ * Returns 0 on failure and <validated_steps> bitfield on success.
+ */
+int http_validate_7239_header(struct ist hdr, int required_steps, struct forwarded_header_ctx *ctx)
+{
+ int validated_steps = 0;
+ int current_step = 0;
+ uint8_t first = 1;
+ uint8_t quoted = 0;
+
+ while (istlen(hdr) && (required_steps & ~validated_steps)) {
+ if (!first) {
+ if (*istptr(hdr) == ';')
+ hdr = istnext(hdr); /* skip ';' */
+ else
+ goto not_ok; /* unexpected char */
+ }
+ else
+ first = 0;
+
+ if (!(validated_steps & FORWARDED_HEADER_FOR) && istlen(hdr) > 4 &&
+ strncmp("for=", istptr(hdr), 4) == 0) {
+ struct forwarded_header_node *node = NULL;
+
+ /* for parameter */
+ current_step = FORWARDED_HEADER_FOR;
+ hdr = istadv(hdr, 4); /* skip "for=" */
+ _forwarded_header_quote_expected(&hdr, &quoted);
+ if (_forwarded_header_save_ctx(ctx, current_step, required_steps))
+ node = &ctx->nfor;
+ /* validate node */
+ if (!http_7239_extract_node(&hdr, node, quoted))
+ goto not_ok; /* invalid node */
+ }
+ else if (!(validated_steps & FORWARDED_HEADER_BY) && istlen(hdr) > 3 &&
+ strncmp("by=", istptr(hdr), 3) == 0) {
+ struct forwarded_header_node *node = NULL;
+
+ /* by parameter */
+ current_step = FORWARDED_HEADER_BY;
+ hdr = istadv(hdr, 3); /* skip "by=" */
+ _forwarded_header_quote_expected(&hdr, &quoted);
+ if (_forwarded_header_save_ctx(ctx, current_step, required_steps))
+ node = &ctx->nby;
+ /* validate node */
+ if (!http_7239_extract_node(&hdr, node, quoted))
+ goto not_ok; /* invalid node */
+ }
+ else if (!(validated_steps & FORWARDED_HEADER_HOST) && istlen(hdr) > 5 &&
+ strncmp("host=", istptr(hdr), 5) == 0) {
+ struct ist *host = NULL;
+
+ /* host parameter */
+ current_step = FORWARDED_HEADER_HOST;
+ hdr = istadv(hdr, 5); /* skip "host=" */
+ _forwarded_header_quote_expected(&hdr, &quoted);
+ if (_forwarded_header_save_ctx(ctx, current_step, required_steps))
+ host = &ctx->host;
+ /* validate host */
+ if (!http_7239_extract_host(&hdr, host, quoted))
+ goto not_ok; /* invalid host */
+ }
+ else if (!(validated_steps & FORWARDED_HEADER_PROTO) && istlen(hdr) > 6 &&
+ strncmp("proto=", istptr(hdr), 6) == 0) {
+ /* proto parameter */
+ current_step = FORWARDED_HEADER_PROTO;
+ hdr = istadv(hdr, 6); /* skip "proto=" */
+ /* validate proto (only common used http|https are supported for now) */
+ if (istlen(hdr) < 4 || strncmp("http", istptr(hdr), 4))
+ goto not_ok;
+ hdr = istadv(hdr, 4); /* skip "http" */
+ if (istlen(hdr) && *istptr(hdr) == 's') {
+ hdr = istnext(hdr);
+ if (_forwarded_header_save_ctx(ctx, current_step, required_steps))
+ ctx->proto = FORWARDED_HEADER_HTTPS;
+ } else if (_forwarded_header_save_ctx(ctx, current_step, required_steps))
+ ctx->proto = FORWARDED_HEADER_HTTP;
+ /* rfc allows for potential proto quoting, but we don't support
+ * it: it is not common usage
+ */
+ }
+ else {
+ /* not supported
+ * rfc allows for upcoming extensions
+ * but obviously, we can't trust them
+ * as they are not yet standardized
+ */
+
+ goto not_ok;
+ }
+ /* quote check */
+ if (quoted) {
+ if (istlen(hdr) < 1 || *istptr(hdr) != '"') {
+ /* matching ending quote not found */
+ goto not_ok;
+ }
+ hdr = istnext(hdr); /* skip ending quote */
+ quoted = 0; /* reset */
+ }
+ validated_steps |= current_step;
+ }
+
+ return validated_steps;
+
+ not_ok:
+ return 0;
+}
+
+static inline void _7239_print_ip6(struct buffer *out, struct in6_addr *ip6_addr, int quoted)
+{
+ char pn[INET6_ADDRSTRLEN];
+
+ inet_ntop(AF_INET6,
+ ip6_addr,
+ pn, sizeof(pn));
+ if (!quoted)
+ chunk_appendf(out, "\""); /* explicit quoting required for ipv6 */
+ chunk_appendf(out, "[%s]", pn);
+}
+
+static inline void http_build_7239_header_nodename(struct buffer *out,
+ struct stream *s, struct proxy *curproxy,
+ const struct sockaddr_storage *addr,
+ struct http_ext_7239_forby *forby)
+{
+ struct in6_addr *ip6_addr;
+ int quoted = !!forby->np_mode;
+
+ if (forby->nn_mode == HTTP_7239_FORBY_ORIG) {
+ if (addr && addr->ss_family == AF_INET) {
+ unsigned char *pn = (unsigned char *)&((struct sockaddr_in *)addr)->sin_addr;
+
+ chunk_appendf(out, "%d.%d.%d.%d", pn[0], pn[1], pn[2], pn[3]);
+ }
+ else if (addr && addr->ss_family == AF_INET6) {
+ ip6_addr = &((struct sockaddr_in6 *)addr)->sin6_addr;
+ _7239_print_ip6(out, ip6_addr, quoted);
+ }
+ /* else: not supported */
+ }
+ else if (forby->nn_mode == HTTP_7239_FORBY_SMP && forby->nn_expr) {
+ struct sample *smp;
+
+ smp = sample_process(curproxy, s->sess, s,
+ SMP_OPT_DIR_REQ | SMP_OPT_FINAL, forby->nn_expr, NULL);
+
+ if (smp) {
+ if (smp->data.type == SMP_T_IPV6) {
+ /* smp is valid IP6, print with RFC compliant output */
+ ip6_addr = &smp->data.u.ipv6;
+ _7239_print_ip6(out, ip6_addr, quoted);
+ }
+ else if (sample_casts[smp->data.type][SMP_T_STR] &&
+ sample_casts[smp->data.type][SMP_T_STR](smp)) {
+ struct ist validate_n = ist2(smp->data.u.str.area, smp->data.u.str.data);
+ struct ist validate_o = ist2(smp->data.u.str.area, smp->data.u.str.data);
+ struct forwarded_header_nodename nodename;
+
+ /* validate nodename */
+ if (http_7239_extract_nodename(&validate_n, &nodename, 1) &&
+ !istlen(validate_n)) {
+ if (nodename.type == FORWARDED_HEADER_IP &&
+ nodename.ip.ss_family == AF_INET6) {
+ /* special care needed for valid ip6 nodename (quoting) */
+ ip6_addr = &((struct sockaddr_in6 *)&nodename.ip)->sin6_addr;
+ _7239_print_ip6(out, ip6_addr, quoted);
+ } else {
+ /* no special care needed, input is already rfc compliant,
+ * just print as regular non quoted string
+ */
+ chunk_cat(out, &smp->data.u.str);
+ }
+ }
+ else if (http_7239_extract_obfs(&validate_o, NULL) &&
+ !istlen(validate_o)) {
+ /* raw user input that should be printed as 7239 obfs */
+ chunk_appendf(out, "_%.*s", (int)smp->data.u.str.data, smp->data.u.str.area);
+ }
+ /* else: not compliant */
+ }
+ /* else: cannot be casted to str */
+ }
+ /* else: smp error */
+ }
+}
+
+static inline void http_build_7239_header_nodeport(struct buffer *out,
+ struct stream *s, struct proxy *curproxy,
+ const struct sockaddr_storage *addr,
+ struct http_ext_7239_forby *forby)
+{
+ if (forby->np_mode == HTTP_7239_FORBY_ORIG) {
+ if (addr && addr->ss_family == AF_INET)
+ chunk_appendf(out, "%d", ntohs(((struct sockaddr_in *)addr)->sin_port));
+ else if (addr && addr->ss_family == AF_INET6)
+ chunk_appendf(out, "%d", ntohs(((struct sockaddr_in6 *)addr)->sin6_port));
+ /* else: not supported */
+ }
+ else if (forby->np_mode == HTTP_7239_FORBY_SMP && forby->np_expr) {
+ struct sample *smp;
+
+ smp = sample_fetch_as_type(curproxy, s->sess, s,
+ SMP_OPT_DIR_REQ | SMP_OPT_FINAL, forby->np_expr, SMP_T_STR);
+ if (smp) {
+ struct ist validate_n = ist2(smp->data.u.str.area, smp->data.u.str.data);
+ struct ist validate_o = ist2(smp->data.u.str.area, smp->data.u.str.data);
+
+ /* validate nodeport */
+ if (http_7239_extract_nodeport(&validate_n, NULL) &&
+ !istlen(validate_n)) {
+ /* no special care needed, input is already rfc compliant,
+ * just print as regular non quoted string
+ */
+ chunk_cat(out, &smp->data.u.str);
+ }
+ else if (http_7239_extract_obfs(&validate_o, NULL) &&
+ !istlen(validate_o)) {
+ /* raw user input that should be printed as 7239 obfs */
+ chunk_appendf(out, "_%.*s", (int)smp->data.u.str.data, smp->data.u.str.area);
+ }
+ /* else: not compliant */
+ }
+ /* else: smp error */
+ }
+}
+
+static inline void http_build_7239_header_node(struct buffer *out,
+ struct stream *s, struct proxy *curproxy,
+ const struct sockaddr_storage *addr,
+ struct http_ext_7239_forby *forby)
+{
+ size_t offset_start;
+ size_t offset_save;
+
+ offset_start = out->data;
+ if (forby->np_mode)
+ chunk_appendf(out, "\"");
+ offset_save = out->data;
+ http_build_7239_header_nodename(out, s, curproxy, addr, forby);
+ if (offset_save == out->data) {
+ /* could not build nodename, either because some
+ * data is not available or user is providing bad input
+ */
+ chunk_appendf(out, "unknown");
+ }
+ if (forby->np_mode) {
+ chunk_appendf(out, ":");
+ offset_save = out->data;
+ http_build_7239_header_nodeport(out, s, curproxy, addr, forby);
+ if (offset_save == out->data) {
+ /* could not build nodeport, either because some data is
+ * not available or user is providing bad input
+ */
+ out->data = offset_save - 1;
+ }
+ }
+ if (out->data != offset_start && out->area[offset_start] == '"')
+ chunk_appendf(out, "\""); /* add matching end quote */
+}
+
+static inline void http_build_7239_header_host(struct buffer *out,
+ struct stream *s, struct proxy *curproxy,
+ struct htx *htx, struct http_ext_7239_host *host)
+{
+ struct http_hdr_ctx ctx = { .blk = NULL };
+ char *str = NULL;
+ int str_len = 0;
+
+ if (host->mode == HTTP_7239_HOST_ORIG &&
+ http_find_header(htx, ist("host"), &ctx, 0)) {
+ str = ctx.value.ptr;
+ str_len = ctx.value.len;
+ print_host:
+ {
+ struct ist validate = ist2(str, str_len);
+ /* host check, to ensure rfc compliant output
+ * (assuming host is quoted/escaped)
+ */
+ if (http_7239_extract_host(&validate, NULL, 1) && !istlen(validate))
+ chunk_memcat(out, str, str_len);
+ /* else: not compliant or partially compliant */
+ }
+
+ }
+ else if (host->mode == HTTP_7239_HOST_SMP && host->expr) {
+ struct sample *smp;
+
+ smp = sample_fetch_as_type(curproxy, s->sess, s,
+ SMP_OPT_DIR_REQ | SMP_OPT_FINAL, host->expr, SMP_T_STR);
+ if (smp) {
+ str = smp->data.u.str.area;
+ str_len = smp->data.u.str.data;
+ goto print_host;
+ }
+ /* else: smp error */
+ }
+}
+
+/* Tries build 7239 header according to <curproxy> parameters and <s> context
+ * It both depends on <curproxy>->http_ext->fwd for config and <s> for request
+ * context data.
+ * The function will write output to <out> buffer
+ * Returns 1 for success and 0 for error (ie: not enough space in buffer)
+ */
+static int http_build_7239_header(struct buffer *out,
+ struct stream *s, struct proxy *curproxy, struct htx *htx)
+{
+ struct connection *cli_conn = objt_conn(strm_sess(s)->origin);
+
+ if (curproxy->http_ext->fwd->p_proto) {
+ chunk_appendf(out, "%sproto=%s", ((out->data) ? ";" : ""),
+ ((conn_is_ssl(cli_conn)) ? "https" : "http"));
+ }
+ if (curproxy->http_ext->fwd->p_host.mode) {
+ /* always add quotes for host parameter to make output compliance checks simpler */
+ chunk_appendf(out, "%shost=\"", ((out->data) ? ";" : ""));
+ /* ignore return value for now, but could be useful some day */
+ http_build_7239_header_host(out, s, curproxy, htx, &curproxy->http_ext->fwd->p_host);
+ chunk_appendf(out, "\"");
+ }
+
+ if (curproxy->http_ext->fwd->p_by.nn_mode) {
+ const struct sockaddr_storage *dst = sc_dst(s->scf);
+
+ chunk_appendf(out, "%sby=", ((out->data) ? ";" : ""));
+ http_build_7239_header_node(out, s, curproxy, dst, &curproxy->http_ext->fwd->p_by);
+ }
+
+ if (curproxy->http_ext->fwd->p_for.nn_mode) {
+ const struct sockaddr_storage *src = sc_src(s->scf);
+
+ chunk_appendf(out, "%sfor=", ((out->data) ? ";" : ""));
+ http_build_7239_header_node(out, s, curproxy, src, &curproxy->http_ext->fwd->p_for);
+ }
+ if (unlikely(out->data == out->size)) {
+ /* not enough space in buffer, error */
+ return 0;
+ }
+ return 1;
+}
+
+/* This function will try to inject RFC 7239 forwarded header if
+ * configured on the backend (ignored for frontends).
+ * Will do nothing if the option is not enabled on the proxy.
+ * Returns 1 for success and 0 for failure
+ */
+int http_handle_7239_header(struct stream *s, struct channel *req)
+{
+ struct proxy *curproxy = s->be; /* ignore frontend */
+
+ if (curproxy->http_ext && curproxy->http_ext->fwd) {
+ struct htx *htx = htxbuf(&req->buf);
+ int validate = 1;
+ struct http_hdr_ctx find = { .blk = NULL };
+ struct http_hdr_ctx last = { .blk = NULL};
+ struct ist hdr = ist("forwarded");
+
+ /* ok, let's build forwarded header */
+ chunk_reset(&trash);
+ if (unlikely(!http_build_7239_header(&trash, s, curproxy, htx)))
+ return 0; /* error when building header (bad user conf or memory error) */
+
+ /* validate existing forwarded header (including multiple values),
+ * hard stop if error is encountered
+ */
+ while (http_find_header(htx, hdr, &find, 0)) {
+ /* validate current header chunk */
+ if (!http_validate_7239_header(find.value, FORWARDED_HEADER_ALL, NULL)) {
+ /* at least one error, existing forwarded header not OK, add our own
+ * forwarded header, so that it can be trusted
+ */
+ validate = 0;
+ break;
+ }
+ last = find;
+ }
+ /* no errors, append our data at the end of existing header */
+ if (last.blk && validate) {
+ if (unlikely(!http_append_header_value(htx, &last, ist2(trash.area, trash.data))))
+ return 0; /* htx error */
+ }
+ else {
+ if (unlikely(!http_add_header(htx, hdr, ist2(trash.area, trash.data))))
+ return 0; /* htx error */
+ }
+ }
+ return 1;
+}
+
+/*
+ * add X-Forwarded-For if either the frontend or the backend
+ * asks for it.
+ * Returns 1 for success and 0 for failure
+ */
+int http_handle_xff_header(struct stream *s, struct channel *req)
+{
+ struct session *sess = s->sess;
+ struct http_ext_xff *f_xff = NULL;
+ struct http_ext_xff *b_xff = NULL;
+
+ if (sess->fe->http_ext && sess->fe->http_ext->xff) {
+ /* frontend */
+ f_xff = sess->fe->http_ext->xff;
+ }
+ if (s->be->http_ext && s->be->http_ext->xff) {
+ /* backend */
+ b_xff = s->be->http_ext->xff;
+ }
+
+ if (f_xff || b_xff) {
+ struct htx *htx = htxbuf(&req->buf);
+ const struct sockaddr_storage *src = sc_src(s->scf);
+ struct http_hdr_ctx ctx = { .blk = NULL };
+ struct ist hdr = ((b_xff) ? b_xff->hdr_name : f_xff->hdr_name);
+
+ if ((!f_xff || f_xff->mode == HTTP_XFF_IFNONE) &&
+ (!b_xff || b_xff->mode == HTTP_XFF_IFNONE) &&
+ http_find_header(htx, hdr, &ctx, 0)) {
+ /* The header is set to be added only if none is present
+ * and we found it, so don't do anything.
+ */
+ }
+ else if (src && src->ss_family == AF_INET) {
+ /* Add an X-Forwarded-For header unless the source IP is
+ * in the 'except' network range.
+ */
+ if ((!f_xff || ipcmp2net(src, &f_xff->except_net)) &&
+ (!b_xff || ipcmp2net(src, &b_xff->except_net))) {
+ unsigned char *pn = (unsigned char *)&((struct sockaddr_in *)src)->sin_addr;
+
+ /* Note: we rely on the backend to get the header name to be used for
+ * x-forwarded-for, because the header is really meant for the backends.
+ * However, if the backend did not specify any option, we have to rely
+ * on the frontend's header name.
+ */
+ chunk_printf(&trash, "%d.%d.%d.%d", pn[0], pn[1], pn[2], pn[3]);
+ if (unlikely(!http_add_header(htx, hdr, ist2(trash.area, trash.data))))
+ return 0;
+ }
+ }
+ else if (src && src->ss_family == AF_INET6) {
+ /* Add an X-Forwarded-For header unless the source IP is
+ * in the 'except' network range.
+ */
+ if ((!f_xff || ipcmp2net(src, &f_xff->except_net)) &&
+ (!b_xff || ipcmp2net(src, &b_xff->except_net))) {
+ char pn[INET6_ADDRSTRLEN];
+
+ inet_ntop(AF_INET6,
+ (const void *)&((struct sockaddr_in6 *)(src))->sin6_addr,
+ pn, sizeof(pn));
+
+ /* Note: we rely on the backend to get the header name to be used for
+ * x-forwarded-for, because the header is really meant for the backends.
+ * However, if the backend did not specify any option, we have to rely
+ * on the frontend's header name.
+ */
+ chunk_printf(&trash, "%s", pn);
+ if (unlikely(!http_add_header(htx, hdr, ist2(trash.area, trash.data))))
+ return 0;
+ }
+ }
+ }
+ return 1;
+}
+
+/*
+ * add X-Original-To if either the frontend or the backend
+ * asks for it.
+ * Returns 1 for success and 0 for failure
+ */
+int http_handle_xot_header(struct stream *s, struct channel *req)
+{
+ struct session *sess = s->sess;
+ struct http_ext_xot *f_xot = NULL;
+ struct http_ext_xot *b_xot = NULL;
+
+ if (sess->fe->http_ext && sess->fe->http_ext->xot) {
+ /* frontend */
+ f_xot = sess->fe->http_ext->xot;
+ }
+ if (s->be->http_ext && s->be->http_ext->xot) {
+ /* backend */
+ BUG_ON(!s->be->http_ext);
+ b_xot = s->be->http_ext->xot;
+ }
+
+ if (f_xot || b_xot) {
+ struct htx *htx = htxbuf(&req->buf);
+ const struct sockaddr_storage *dst = sc_dst(s->scf);
+ struct ist hdr = ((b_xot) ? b_xot->hdr_name : f_xot->hdr_name);
+
+ if (dst && dst->ss_family == AF_INET) {
+ /* Add an X-Original-To header unless the destination IP is
+ * in the 'except' network range.
+ */
+ if ((!f_xot || ipcmp2net(dst, &f_xot->except_net)) &&
+ (!b_xot || ipcmp2net(dst, &b_xot->except_net))) {
+ unsigned char *pn = (unsigned char *)&((struct sockaddr_in *)dst)->sin_addr;
+
+ /* Note: we rely on the backend to get the header name to be used for
+ * x-original-to, because the header is really meant for the backends.
+ * However, if the backend did not specify any option, we have to rely
+ * on the frontend's header name.
+ */
+ chunk_printf(&trash, "%d.%d.%d.%d", pn[0], pn[1], pn[2], pn[3]);
+ if (unlikely(!http_add_header(htx, hdr, ist2(trash.area, trash.data))))
+ return 0;
+ }
+ }
+ else if (dst && dst->ss_family == AF_INET6) {
+ /* Add an X-Original-To header unless the source IP is
+ * in the 'except' network range.
+ */
+ if ((!f_xot || ipcmp2net(dst, &f_xot->except_net)) &&
+ (!b_xot || ipcmp2net(dst, &b_xot->except_net))) {
+ char pn[INET6_ADDRSTRLEN];
+
+ inet_ntop(AF_INET6,
+ (const void *)&((struct sockaddr_in6 *)dst)->sin6_addr,
+ pn, sizeof(pn));
+
+ /* Note: we rely on the backend to get the header name to be used for
+ * x-forwarded-for, because the header is really meant for the backends.
+ * However, if the backend did not specify any option, we have to rely
+ * on the frontend's header name.
+ */
+ chunk_printf(&trash, "%s", pn);
+ if (unlikely(!http_add_header(htx, hdr, ist2(trash.area, trash.data))))
+ return 0;
+ }
+ }
+ }
+ return 1;
+}
+
+/*
+ * =========== CONFIG ===========
+ * below are helpers to parse http ext options from the config
+ */
+static int proxy_http_parse_oom(const char *file, int linenum)
+{
+ int err_code = 0;
+
+ ha_alert("parsing [%s:%d]: out of memory.\n", file, linenum);
+ err_code |= ERR_ALERT | ERR_ABORT;
+ return err_code;
+}
+
+static inline int _proxy_http_parse_7239_expr(char **args, int *cur_arg,
+ const char *file, int linenum,
+ char **expr_s)
+{
+ int err_code = 0;
+
+ if (!*args[*cur_arg + 1]) {
+ ha_alert("parsing [%s:%d]: '%s' expects <expr> as argument.\n",
+ file, linenum, args[*cur_arg]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ *cur_arg += 1;
+ ha_free(expr_s);
+ *expr_s = strdup(args[*cur_arg]);
+ if (!*expr_s)
+ return proxy_http_parse_oom(file, linenum);
+ *cur_arg += 1;
+ out:
+ return err_code;
+}
+
+/* forwarded/7239 RFC: tries to parse "option forwarded" config keyword
+ * Returns a composition of ERR_ABORT, ERR_ALERT, ERR_FATAL, ERR_WARN
+ */
+int proxy_http_parse_7239(char **args, int cur_arg,
+ struct proxy *curproxy, const struct proxy *defpx,
+ const char *file, int linenum)
+{
+ struct http_ext_7239 *fwd;
+ int err_code = 0;
+
+ if (warnifnotcap(curproxy, PR_CAP_BE, file, linenum, "option forwarded", NULL)) {
+ /* option is ignored for frontends */
+ err_code |= ERR_WARN;
+ goto out;
+ }
+
+ if (!http_ext_7239_prepare(curproxy))
+ return proxy_http_parse_oom(file, linenum);
+
+ fwd = curproxy->http_ext->fwd;
+
+ fwd->p_proto = 0;
+ fwd->p_host.mode = 0;
+ fwd->p_for.nn_mode = 0;
+ fwd->p_for.np_mode = 0;
+ fwd->p_by.nn_mode = 0;
+ fwd->p_by.np_mode = 0;
+ ha_free(&fwd->c_file);
+ fwd->c_file = strdup(file);
+ fwd->c_line = linenum;
+
+ /* start at 2, since 0+1 = "option" "forwarded" */
+ cur_arg = 2;
+ if (!*(args[cur_arg])) {
+ /* no optional argument provided, use default settings */
+ fwd->p_for.nn_mode = HTTP_7239_FORBY_ORIG; /* enable for and mimic xff */
+ fwd->p_proto = 1; /* enable proto */
+ goto out;
+ }
+ /* loop to go through optional arguments */
+ while (*(args[cur_arg])) {
+ if (strcmp(args[cur_arg], "proto") == 0) {
+ fwd->p_proto = 1;
+ cur_arg += 1;
+ } else if (strcmp(args[cur_arg], "host") == 0) {
+ fwd->p_host.mode = HTTP_7239_HOST_ORIG;
+ cur_arg += 1;
+ } else if (strcmp(args[cur_arg], "host-expr") == 0) {
+ fwd->p_host.mode = HTTP_7239_HOST_SMP;
+ err_code |= _proxy_http_parse_7239_expr(args, &cur_arg, file, linenum,
+ &fwd->p_host.expr_s);
+ if (err_code & ERR_CODE)
+ goto out;
+ } else if (strcmp(args[cur_arg], "by") == 0) {
+ fwd->p_by.nn_mode = HTTP_7239_FORBY_ORIG;
+ cur_arg += 1;
+ } else if (strcmp(args[cur_arg], "by-expr") == 0) {
+ fwd->p_by.nn_mode = HTTP_7239_FORBY_SMP;
+ err_code |= _proxy_http_parse_7239_expr(args, &cur_arg, file, linenum,
+ &fwd->p_by.nn_expr_s);
+ if (err_code & ERR_CODE)
+ goto out;
+ } else if (strcmp(args[cur_arg], "for") == 0) {
+ fwd->p_for.nn_mode = HTTP_7239_FORBY_ORIG;
+ cur_arg += 1;
+ } else if (strcmp(args[cur_arg], "for-expr") == 0) {
+ fwd->p_for.nn_mode = HTTP_7239_FORBY_SMP;
+ err_code |= _proxy_http_parse_7239_expr(args, &cur_arg, file, linenum,
+ &fwd->p_for.nn_expr_s);
+ if (err_code & ERR_CODE)
+ goto out;
+ } else if (strcmp(args[cur_arg], "by_port") == 0) {
+ fwd->p_by.np_mode = HTTP_7239_FORBY_ORIG;
+ cur_arg += 1;
+ } else if (strcmp(args[cur_arg], "by_port-expr") == 0) {
+ fwd->p_by.np_mode = HTTP_7239_FORBY_SMP;
+ err_code |= _proxy_http_parse_7239_expr(args, &cur_arg, file, linenum,
+ &fwd->p_by.np_expr_s);
+ if (err_code & ERR_CODE)
+ goto out;
+ } else if (strcmp(args[cur_arg], "for_port") == 0) {
+ fwd->p_for.np_mode = HTTP_7239_FORBY_ORIG;
+ cur_arg += 1;
+ } else if (strcmp(args[cur_arg], "for_port-expr") == 0) {
+ fwd->p_for.np_mode = HTTP_7239_FORBY_SMP;
+ err_code |= _proxy_http_parse_7239_expr(args, &cur_arg, file, linenum,
+ &fwd->p_for.np_expr_s);
+ if (err_code & ERR_CODE)
+ goto out;
+ } else {
+ /* unknown suboption - catchall */
+ ha_alert("parsing [%s:%d] : '%s %s' only supports optional values: 'proto', 'host', "
+ "'host-expr', 'by', 'by-expr', 'by_port', 'by_port-expr', "
+ "'for', 'for-expr', 'for_port' and 'for_port-expr'.\n",
+ file, linenum, args[0], args[1]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ } /* end while loop */
+
+ /* consistency check */
+ if (fwd->p_by.np_mode &&
+ !fwd->p_by.nn_mode) {
+ fwd->p_by.np_mode = 0;
+ ha_free(&fwd->p_by.np_expr_s);
+ ha_warning("parsing [%s:%d] : '%s %s' : '%s' will be ignored because both 'by' "
+ "and 'by-expr' are unset\n",
+ file, linenum, args[0], args[1],
+ ((fwd->p_by.np_mode == HTTP_7239_FORBY_ORIG) ? "by_port" : "by_port-expr"));
+ err_code |= ERR_WARN;
+ }
+ if (fwd->p_for.np_mode &&
+ !fwd->p_for.nn_mode) {
+ fwd->p_for.np_mode = 0;
+ ha_free(&fwd->p_for.np_expr_s);
+ ha_warning("parsing [%s:%d] : '%s %s' : '%s' will be ignored because both 'for' "
+ "and 'for-expr' are unset\n",
+ file, linenum, args[0], args[1],
+ ((fwd->p_for.np_mode == HTTP_7239_FORBY_ORIG) ? "for_port" : "for_port-expr"));
+ err_code |= ERR_WARN;
+ }
+
+ out:
+ return err_code;
+}
+
+/* rfc7239 forwarded option needs a postparsing step
+ * to convert parsing hints into runtime usable sample expressions
+ * Returns a composition of ERR_NONE, ERR_FATAL, ERR_ALERT, ERR_WARN
+ */
+int proxy_http_compile_7239(struct proxy *curproxy)
+{
+ struct http_ext_7239 *fwd;
+ int err = ERR_NONE;
+ int loop;
+
+ if (!(curproxy->cap & PR_CAP_BE)) {
+ /* no backend cap: not supported (ie: frontend) */
+ goto out;
+ }
+
+ /* should not happen (test should be performed after BE cap test) */
+ BUG_ON(!curproxy->http_ext || !curproxy->http_ext->fwd);
+
+ curproxy->conf.args.ctx = ARGC_OPT; /* option */
+ curproxy->conf.args.file = curproxy->http_ext->fwd->c_file;
+ curproxy->conf.args.line = curproxy->http_ext->fwd->c_line;
+ fwd = curproxy->http_ext->fwd;
+
+ /* it is important that we keep iterating on error to make sure
+ * all fwd config fields are in the same state (post-parsing state)
+ */
+ for (loop = 0; loop < 5; loop++) {
+ char **expr_str = NULL;
+ struct sample_expr **expr = NULL;
+ struct sample_expr *cur_expr;
+ char *err_str = NULL;
+ int smp = 0;
+ int idx = 0;
+
+ switch (loop) {
+ case 0:
+ /* host */
+ expr_str = &fwd->p_host.expr_s;
+ expr = &fwd->p_host.expr;
+ smp = (fwd->p_host.mode == HTTP_7239_HOST_SMP);
+ break;
+ case 1:
+ /* by->node */
+ expr_str = &fwd->p_by.nn_expr_s;
+ expr = &fwd->p_by.nn_expr;
+ smp = (fwd->p_by.nn_mode == HTTP_7239_FORBY_SMP);
+ break;
+ case 2:
+ /* by->nodeport */
+ expr_str = &fwd->p_by.np_expr_s;
+ expr = &fwd->p_by.np_expr;
+ smp = (fwd->p_by.np_mode == HTTP_7239_FORBY_SMP);
+ break;
+ case 3:
+ /* for->node */
+ expr_str = &fwd->p_for.nn_expr_s;
+ expr = &fwd->p_for.nn_expr;
+ smp = (fwd->p_for.nn_mode == HTTP_7239_FORBY_SMP);
+ break;
+ case 4:
+ /* for->nodeport */
+ expr_str = &fwd->p_for.np_expr_s;
+ expr = &fwd->p_for.np_expr;
+ smp = (fwd->p_for.np_mode == HTTP_7239_FORBY_SMP);
+ break;
+ }
+ if (!smp)
+ continue; /* no expr */
+
+ /* expr and expr_str cannot be NULL past this point */
+ BUG_ON(!expr || !expr_str);
+
+ if (!*expr_str) {
+ /* should not happen unless system memory exhaustion */
+ ha_alert("%s '%s' [%s:%d]: failed to parse 'option forwarded' expression : %s.\n",
+ proxy_type_str(curproxy), curproxy->id,
+ fwd->c_file, fwd->c_line,
+ "memory error");
+ err |= ERR_ALERT | ERR_FATAL;
+ continue;
+ }
+
+ cur_expr =
+ sample_parse_expr((char*[]){*expr_str, NULL}, &idx,
+ fwd->c_file,
+ fwd->c_line,
+ &err_str, &curproxy->conf.args, NULL);
+
+ if (!cur_expr) {
+ ha_alert("%s '%s' [%s:%d]: failed to parse 'option forwarded' expression '%s' in : %s.\n",
+ proxy_type_str(curproxy), curproxy->id,
+ fwd->c_file, fwd->c_line,
+ *expr_str, err_str);
+ ha_free(&err_str);
+ err |= ERR_ALERT | ERR_FATAL;
+ }
+ else if (!(cur_expr->fetch->val & SMP_VAL_BE_HRQ_HDR)) {
+ /* fetch not available in this context: sample expr is resolved
+ * within backend right after headers are processed.
+ * (in http_process_request())
+ * -> we simply warn the user about the misuse
+ */
+ ha_warning("%s '%s' [%s:%d]: in 'option forwarded' sample expression '%s' : "
+ "some args extract information from '%s', "
+ "none of which is available here.\n",
+ proxy_type_str(curproxy), curproxy->id,
+ fwd->c_file, fwd->c_line,
+ *expr_str, sample_ckp_names(cur_expr->fetch->use));
+ err |= ERR_WARN;
+ }
+ /* post parsing individual expr cleanup */
+ ha_free(expr_str);
+
+ /* expr assignment */
+ *expr = cur_expr;
+ }
+ curproxy->conf.args.file = NULL;
+ curproxy->conf.args.line = 0;
+
+ /* post parsing general cleanup */
+ ha_free(&fwd->c_file);
+ fwd->c_line = 0;
+
+ fwd->c_mode = 1; /* parsing completed */
+
+ out:
+ return err;
+}
+
+/* x-forwarded-for: tries to parse "option forwardfor" config keyword
+ * Returns a composition of ERR_NONE, ERR_FATAL, ERR_ALERT
+ */
+int proxy_http_parse_xff(char **args, int cur_arg,
+ struct proxy *curproxy, const struct proxy *defpx,
+ const char *file, int linenum)
+{
+ struct http_ext_xff *xff;
+ int err_code = 0;
+
+ if (!http_ext_xff_prepare(curproxy))
+ return proxy_http_parse_oom(file, linenum);
+
+ xff = curproxy->http_ext->xff;
+
+ /* insert x-forwarded-for field, but not for the IP address listed as an except.
+ * set default options (ie: bitfield, header name, etc)
+ */
+
+ xff->mode = HTTP_XFF_ALWAYS;
+
+ istfree(&xff->hdr_name);
+ xff->hdr_name = istdup(ist(DEF_XFORWARDFOR_HDR));
+ if (!isttest(xff->hdr_name))
+ return proxy_http_parse_oom(file, linenum);
+ xff->except_net.family = AF_UNSPEC;
+
+ /* loop to go through arguments - start at 2, since 0+1 = "option" "forwardfor" */
+ cur_arg = 2;
+ while (*(args[cur_arg])) {
+ if (strcmp(args[cur_arg], "except") == 0) {
+ unsigned char mask;
+ int i;
+
+ /* suboption except - needs additional argument for it */
+ if (*(args[cur_arg+1]) &&
+ str2net(args[cur_arg+1], 1, &xff->except_net.addr.v4.ip, &xff->except_net.addr.v4.mask)) {
+ xff->except_net.family = AF_INET;
+ xff->except_net.addr.v4.ip.s_addr &= xff->except_net.addr.v4.mask.s_addr;
+ }
+ else if (*(args[cur_arg+1]) &&
+ str62net(args[cur_arg+1], &xff->except_net.addr.v6.ip, &mask)) {
+ xff->except_net.family = AF_INET6;
+ len2mask6(mask, &xff->except_net.addr.v6.mask);
+ for (i = 0; i < 16; i++)
+ xff->except_net.addr.v6.ip.s6_addr[i] &= xff->except_net.addr.v6.mask.s6_addr[i];
+ }
+ else {
+ ha_alert("parsing [%s:%d] : '%s %s %s' expects <address>[/mask] as argument.\n",
+ file, linenum, args[0], args[1], args[cur_arg]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ /* flush useless bits */
+ cur_arg += 2;
+ } else if (strcmp(args[cur_arg], "header") == 0) {
+ /* suboption header - needs additional argument for it */
+ if (*(args[cur_arg+1]) == 0) {
+ ha_alert("parsing [%s:%d] : '%s %s %s' expects <header_name> as argument.\n",
+ file, linenum, args[0], args[1], args[cur_arg]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ istfree(&xff->hdr_name);
+ xff->hdr_name = istdup(ist(args[cur_arg+1]));
+ if (!isttest(xff->hdr_name))
+ return proxy_http_parse_oom(file, linenum);
+ cur_arg += 2;
+ } else if (strcmp(args[cur_arg], "if-none") == 0) {
+ xff->mode = HTTP_XFF_IFNONE;
+ cur_arg += 1;
+ } else {
+ /* unknown suboption - catchall */
+ ha_alert("parsing [%s:%d] : '%s %s' only supports optional values: 'except', 'header' and 'if-none'.\n",
+ file, linenum, args[0], args[1]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ } /* end while loop */
+ out:
+ return err_code;
+}
+
+/* x-original-to: tries to parse "option originalto" config keyword
+ * Returns a composition of ERR_NONE, ERR_FATAL, ERR_ALERT
+ */
+int proxy_http_parse_xot(char **args, int cur_arg,
+ struct proxy *curproxy, const struct proxy *defpx,
+ const char *file, int linenum)
+{
+ struct http_ext_xot *xot;
+ int err_code = 0;
+
+ if (!http_ext_xot_prepare(curproxy))
+ return proxy_http_parse_oom(file, linenum);
+
+ xot = curproxy->http_ext->xot;
+
+ /* insert x-original-to field, but not for the IP address listed as an except.
+ * set default options (ie: bitfield, header name, etc)
+ */
+
+ istfree(&xot->hdr_name);
+ xot->hdr_name = istdup(ist(DEF_XORIGINALTO_HDR));
+ if (!isttest(xot->hdr_name))
+ return proxy_http_parse_oom(file, linenum);
+ xot->except_net.family = AF_UNSPEC;
+
+ /* loop to go through arguments - start at 2, since 0+1 = "option" "originalto" */
+ cur_arg = 2;
+ while (*(args[cur_arg])) {
+ if (strcmp(args[cur_arg], "except") == 0) {
+ unsigned char mask;
+ int i;
+
+ /* suboption except - needs additional argument for it */
+ if (*(args[cur_arg+1]) &&
+ str2net(args[cur_arg+1], 1, &xot->except_net.addr.v4.ip, &xot->except_net.addr.v4.mask)) {
+ xot->except_net.family = AF_INET;
+ xot->except_net.addr.v4.ip.s_addr &= xot->except_net.addr.v4.mask.s_addr;
+ }
+ else if (*(args[cur_arg+1]) &&
+ str62net(args[cur_arg+1], &xot->except_net.addr.v6.ip, &mask)) {
+ xot->except_net.family = AF_INET6;
+ len2mask6(mask, &xot->except_net.addr.v6.mask);
+ for (i = 0; i < 16; i++)
+ xot->except_net.addr.v6.ip.s6_addr[i] &= xot->except_net.addr.v6.mask.s6_addr[i];
+ }
+ else {
+ ha_alert("parsing [%s:%d] : '%s %s %s' expects <address>[/mask] as argument.\n",
+ file, linenum, args[0], args[1], args[cur_arg]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ cur_arg += 2;
+ } else if (strcmp(args[cur_arg], "header") == 0) {
+ /* suboption header - needs additional argument for it */
+ if (*(args[cur_arg+1]) == 0) {
+ ha_alert("parsing [%s:%d] : '%s %s %s' expects <header_name> as argument.\n",
+ file, linenum, args[0], args[1], args[cur_arg]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ istfree(&xot->hdr_name);
+ xot->hdr_name = istdup(ist(args[cur_arg+1]));
+ if (!isttest(xot->hdr_name))
+ return proxy_http_parse_oom(file, linenum);
+ cur_arg += 2;
+ } else {
+ /* unknown suboption - catchall */
+ ha_alert("parsing [%s:%d] : '%s %s' only supports optional values: 'except' and 'header'.\n",
+ file, linenum, args[0], args[1]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ } /* end while loop */
+
+ out:
+ return err_code;
+}
+
+/*
+ * =========== MGMT ===========
+ * below are helpers to manage http ext options
+ */
+
+/* Ensure http_ext->fwd is properly allocated and
+ * initialized for <curproxy>.
+ * The function will leverage http_ext_prepare() to make
+ * sure http_ext is properly allocated and initialized as well.
+ * Returns 1 for success and 0 for failure (memory error)
+ */
+int http_ext_7239_prepare(struct proxy *curproxy)
+{
+ struct http_ext_7239 *fwd;
+
+ if (!http_ext_prepare(curproxy))
+ return 0;
+ if (curproxy->http_ext->fwd)
+ return 1; /* nothing to do */
+
+ fwd = malloc(sizeof(*fwd));
+ if (!fwd)
+ return 0;
+ /* initialize fwd mandatory fields */
+ fwd->c_mode = 0; /* pre-compile (parse) time */
+ fwd->c_file = NULL;
+ fwd->p_host.expr_s = NULL;
+ fwd->p_by.nn_expr_s = NULL;
+ fwd->p_by.np_expr_s = NULL;
+ fwd->p_for.nn_expr_s = NULL;
+ fwd->p_for.np_expr_s = NULL;
+ /* assign */
+ curproxy->http_ext->fwd = fwd;
+ return 1;
+}
+
+/* Ensure http_ext->xff is properly allocated and
+ * initialized for <curproxy>.
+ * The function will leverage http_ext_prepare() to make
+ * sure http_ext is properly allocated and initialized as well.
+ * Returns 1 for success and 0 for failure (memory error)
+ */
+int http_ext_xff_prepare(struct proxy *curproxy)
+{
+ struct http_ext_xff *xff;
+
+ if (!http_ext_prepare(curproxy))
+ return 0;
+ if (curproxy->http_ext->xff)
+ return 1; /* nothing to do */
+
+ xff = malloc(sizeof(*xff));
+ if (!xff)
+ return 0;
+ /* initialize xff mandatory fields */
+ xff->hdr_name = IST_NULL;
+ /* assign */
+ curproxy->http_ext->xff = xff;
+ return 1;
+}
+
+/* Ensure http_ext->xot is properly allocated and
+ * initialized for <curproxy>.
+ * The function will leverage http_ext_prepare() to make
+ * sure http_ext is properly allocated and initialized as well.
+ * Returns 1 for success and 0 for failure (memory error)
+ */
+int http_ext_xot_prepare(struct proxy *curproxy)
+{
+ struct http_ext_xot *xot;
+
+ if (!http_ext_prepare(curproxy))
+ return 0;
+ if (curproxy->http_ext->xot)
+ return 1; /* nothing to do */
+
+ xot = malloc(sizeof(*xot));
+ if (!xot)
+ return 0;
+ /* initialize xot mandatory fields */
+ xot->hdr_name = IST_NULL;
+ /* assign */
+ curproxy->http_ext->xot = xot;
+ return 1;
+}
+
+/* deep clean http_ext->fwd parameter for <curproxy>
+ * http_ext->fwd will be freed
+ * clean behavior will differ depending on http_ext->fwd
+ * state. If fwd is in 'parsed' state, parsing hints will be
+ * cleaned. Else, it means fwd is in 'compiled' state, in this
+ * case we're cleaning compiled results.
+ * This is because parse and compile memory areas are shared in
+ * a single union to optimize struct http_ext_7239 size.
+ */
+void http_ext_7239_clean(struct proxy *curproxy)
+{
+ struct http_ext_7239 *clean;
+
+ if (!curproxy->http_ext)
+ return;
+ clean = curproxy->http_ext->fwd;
+ if (!clean)
+ return; /* nothing to do */
+ if (!clean->c_mode) {
+ /* parsed */
+ ha_free(&clean->c_file);
+ ha_free(&clean->p_host.expr_s);
+ ha_free(&clean->p_by.nn_expr_s);
+ ha_free(&clean->p_by.np_expr_s);
+ ha_free(&clean->p_for.nn_expr_s);
+ ha_free(&clean->p_for.np_expr_s);
+ }
+ else {
+ /* compiled */
+ release_sample_expr(clean->p_host.expr);
+ clean->p_host.expr = NULL;
+ release_sample_expr(clean->p_by.nn_expr);
+ clean->p_by.nn_expr = NULL;
+ release_sample_expr(clean->p_by.np_expr);
+ clean->p_by.np_expr = NULL;
+ release_sample_expr(clean->p_for.nn_expr);
+ clean->p_for.nn_expr = NULL;
+ release_sample_expr(clean->p_for.np_expr);
+ clean->p_for.np_expr = NULL;
+ }
+ /* free fwd */
+ ha_free(&curproxy->http_ext->fwd);
+}
+
+/* deep clean http_ext->xff parameter for <curproxy>
+ * http_ext->xff will be freed
+ */
+void http_ext_xff_clean(struct proxy *curproxy)
+{
+ struct http_ext_xff *clean;
+
+ if (!curproxy->http_ext)
+ return;
+ clean = curproxy->http_ext->xff;
+ if (!clean)
+ return; /* nothing to do */
+ istfree(&clean->hdr_name);
+ /* free xff */
+ ha_free(&curproxy->http_ext->xff);
+}
+
+/* deep clean http_ext->xot parameter for <curproxy>
+ * http_ext->xot will be freed
+ */
+void http_ext_xot_clean(struct proxy *curproxy)
+{
+ struct http_ext_xot *clean;
+
+ if (!curproxy->http_ext)
+ return;
+ clean = curproxy->http_ext->xot;
+ if (!clean)
+ return; /* nothing to do */
+ istfree(&clean->hdr_name);
+ /* free xot */
+ ha_free(&curproxy->http_ext->xot);
+}
+
+/* duplicate http_ext->fwd parameters from <def> to <cpy>
+ * performs the required memory allocation and initialization
+ */
+void http_ext_7239_dup(const struct proxy *def, struct proxy *cpy)
+{
+ struct http_ext_7239 *dest = NULL;
+ struct http_ext_7239 *orig = NULL;
+
+ /* feature requires backend cap */
+ if (!(cpy->cap & PR_CAP_BE))
+ return;
+
+ if (def->http_ext == NULL || def->http_ext->fwd == NULL)
+ return;
+
+ orig = def->http_ext->fwd;
+
+ if (orig->c_mode)
+ return; /* copy not supported once compiled */
+
+ if (!http_ext_7239_prepare(cpy))
+ return;
+
+ dest = cpy->http_ext->fwd;
+
+ if (orig->c_file)
+ dest->c_file = strdup(orig->c_file);
+ dest->c_line = orig->c_line;
+ /* proto */
+ dest->p_proto = orig->p_proto;
+ /* host */
+ dest->p_host.mode = orig->p_host.mode;
+ if (orig->p_host.expr_s)
+ dest->p_host.expr_s = strdup(orig->p_host.expr_s);
+ /* by - nodename */
+ dest->p_by.nn_mode = orig->p_by.nn_mode;
+ if (orig->p_by.nn_expr_s)
+ dest->p_by.nn_expr_s = strdup(orig->p_by.nn_expr_s);
+ /* by - nodeport */
+ dest->p_by.np_mode = orig->p_by.np_mode;
+ if (orig->p_by.np_expr_s)
+ dest->p_by.np_expr_s = strdup(orig->p_by.np_expr_s);
+ /* for - nodename */
+ dest->p_for.nn_mode = orig->p_for.nn_mode;
+ if (orig->p_for.nn_expr_s)
+ dest->p_for.nn_expr_s = strdup(orig->p_for.nn_expr_s);
+ /* for - nodeport */
+ dest->p_for.np_mode = orig->p_for.np_mode;
+ if (orig->p_for.np_expr_s)
+ dest->p_for.np_expr_s = strdup(orig->p_for.np_expr_s);
+}
+
+/* duplicate http_ext->xff parameters from <def> to <cpy>
+ * performs the required memory allocation and initialization
+ */
+void http_ext_xff_dup(const struct proxy *def, struct proxy *cpy)
+{
+ struct http_ext_xff *dest = NULL;
+ struct http_ext_xff *orig = NULL;
+
+ if (def->http_ext == NULL || def->http_ext->xff == NULL ||
+ !http_ext_xff_prepare(cpy))
+ return;
+
+ orig = def->http_ext->xff;
+ dest = cpy->http_ext->xff;
+
+ if (isttest(orig->hdr_name))
+ dest->hdr_name = istdup(orig->hdr_name);
+ dest->mode = orig->mode;
+ dest->except_net = orig->except_net;
+}
+
+/* duplicate http_ext->xot parameters from <def> to <cpy>
+ * performs the required memory allocation and initialization
+ */
+void http_ext_xot_dup(const struct proxy *def, struct proxy *cpy)
+{
+ struct http_ext_xot *dest = NULL;
+ struct http_ext_xot *orig = NULL;
+
+ if (def->http_ext == NULL || def->http_ext->xot == NULL ||
+ !http_ext_xot_prepare(cpy))
+ return;
+
+ orig = def->http_ext->xot;
+ dest = cpy->http_ext->xot;
+
+ if (isttest(orig->hdr_name))
+ dest->hdr_name = istdup(orig->hdr_name);
+ dest->except_net = orig->except_net;
+}
+
+/* Allocate new http_ext and initialize it
+ * if needed
+ * Returns 1 for success and 0 for failure
+ */
+int http_ext_prepare(struct proxy *curproxy)
+{
+ if (curproxy->http_ext)
+ return 1; /* nothing to do */
+
+ curproxy->http_ext = malloc(sizeof(*curproxy->http_ext));
+ if (!curproxy->http_ext)
+ return 0; /* failure */
+ /* first init, set supported ext to NULL */
+ curproxy->http_ext->fwd = NULL;
+ curproxy->http_ext->xff = NULL;
+ curproxy->http_ext->xot = NULL;
+ return 1;
+}
+
+/* duplicate existing http_ext from <defproxy> to <curproxy>
+ */
+void http_ext_dup(const struct proxy *defproxy, struct proxy *curproxy)
+{
+ /* copy defproxy.http_ext members */
+ http_ext_7239_dup(defproxy, curproxy);
+ http_ext_xff_dup(defproxy, curproxy);
+ http_ext_xot_dup(defproxy, curproxy);
+}
+
+/* deep clean http_ext for <curproxy> (if previously allocated)
+ */
+void http_ext_clean(struct proxy *curproxy)
+{
+ if (!curproxy->http_ext)
+ return; /* nothing to do */
+ /* first, free supported ext */
+ http_ext_7239_clean(curproxy);
+ http_ext_xff_clean(curproxy);
+ http_ext_xot_clean(curproxy);
+
+ /* then, free http_ext */
+ ha_free(&curproxy->http_ext);
+}
+
+/* soft clean (only clean http_ext if no more options are used) */
+void http_ext_softclean(struct proxy *curproxy)
+{
+ if (!curproxy->http_ext)
+ return; /* nothing to do */
+ if (!curproxy->http_ext->fwd &&
+ !curproxy->http_ext->xff &&
+ !curproxy->http_ext->xot) {
+ /* no more use for http_ext, all options are disabled */
+ http_ext_clean(curproxy);
+ }
+}
+
+/* Perform some consistency checks on px.http_ext after parsing
+ * is completed.
+ * We make sure to perform a softclean in case some options were
+ * to be disabled in this check. This way we can release some memory.
+ * Returns a composition of ERR_NONE, ERR_ALERT, ERR_FATAL, ERR_WARN
+ */
+static int check_http_ext_postconf(struct proxy *px) {
+ int err = ERR_NONE;
+
+ if (px->http_ext) {
+ /* consistency check for http_ext */
+ if (px->mode != PR_MODE_HTTP && !(px->options & PR_O_HTTP_UPG)) {
+ /* http is disabled on px, yet it is required by http_ext */
+ if (px->http_ext->fwd) {
+ ha_warning("'option %s' ignored for %s '%s' as it requires HTTP mode.\n",
+ "forwarded", proxy_type_str(px), px->id);
+ err |= ERR_WARN;
+ http_ext_7239_clean(px);
+ }
+ if (px->http_ext->xff) {
+ ha_warning("'option %s' ignored for %s '%s' as it requires HTTP mode.\n",
+ "forwardfor", proxy_type_str(px), px->id);
+ err |= ERR_WARN;
+ http_ext_xff_clean(px);
+ }
+ if (px->http_ext->xot) {
+ ha_warning("'option %s' ignored for %s '%s' as it requires HTTP mode.\n",
+ "originalto", proxy_type_str(px), px->id);
+ err |= ERR_WARN;
+ http_ext_xot_clean(px);
+ }
+ } else if (px->http_ext->fwd) {
+ /* option "forwarded" may need to compile its expressions */
+ err |= proxy_http_compile_7239(px);
+ }
+ /* http_ext post init early cleanup */
+ http_ext_softclean(px);
+
+ }
+ return err;
+}
+
+REGISTER_POST_PROXY_CHECK(check_http_ext_postconf);
+/*
+ * =========== CONV ===========
+ * related converters
+ */
+
+/* input: string representing 7239 forwarded header single value
+ * does not take arguments
+ * output: 1 if header is RFC compliant, 0 otherwise
+ */
+static int sample_conv_7239_valid(const struct arg *args, struct sample *smp, void *private)
+{
+ struct ist input = ist2(smp->data.u.str.area, smp->data.u.str.data);
+
+ smp->data.type = SMP_T_BOOL;
+ smp->data.u.sint = !!http_validate_7239_header(input, FORWARDED_HEADER_ALL, NULL);
+ return 1;
+}
+
+/* input: string representing 7239 forwarded header single value
+ * argument: parameter name to look for in the header
+ * output: header parameter raw value, as a string
+ */
+static int sample_conv_7239_field(const struct arg *args, struct sample *smp, void *private)
+{
+ struct ist input = ist2(smp->data.u.str.area, smp->data.u.str.data);
+ struct buffer *output;
+ struct forwarded_header_ctx ctx;
+ int validate;
+ int field = 0;
+
+ if (strcmp(args->data.str.area, "proto") == 0)
+ field = FORWARDED_HEADER_PROTO;
+ else if (strcmp(args->data.str.area, "host") == 0)
+ field = FORWARDED_HEADER_HOST;
+ else if (strcmp(args->data.str.area, "for") == 0)
+ field = FORWARDED_HEADER_FOR;
+ else if (strcmp(args->data.str.area, "by") == 0)
+ field = FORWARDED_HEADER_BY;
+
+ validate = http_validate_7239_header(input, FORWARDED_HEADER_ALL, &ctx);
+ if (!(validate & field))
+ return 0; /* invalid header or header does not contain field */
+ output = get_trash_chunk();
+ switch (field) {
+ case FORWARDED_HEADER_PROTO:
+ if (ctx.proto == FORWARDED_HEADER_HTTP)
+ chunk_appendf(output, "http");
+ else if (ctx.proto == FORWARDED_HEADER_HTTPS)
+ chunk_appendf(output, "https");
+ break;
+ case FORWARDED_HEADER_HOST:
+ chunk_istcat(output, ctx.host);
+ break;
+ case FORWARDED_HEADER_FOR:
+ chunk_istcat(output, ctx.nfor.raw);
+ break;
+ case FORWARDED_HEADER_BY:
+ chunk_istcat(output, ctx.nby.raw);
+ break;
+ default:
+ break;
+ }
+ smp->flags &= ~SMP_F_CONST;
+ smp->data.type = SMP_T_STR;
+ smp->data.u.str = *output;
+ return 1;
+}
+
+/* input: substring representing 7239 forwarded header node
+ * output: forwarded header nodename translated to either
+ * ipv4 address, ipv6 address or str
+ * ('_' prefix if obfuscated, or "unknown" if unknown)
+ */
+static int sample_conv_7239_n2nn(const struct arg *args, struct sample *smp, void *private)
+{
+ struct ist input = ist2(smp->data.u.str.area, smp->data.u.str.data);
+ struct forwarded_header_node ctx;
+ struct buffer *output;
+
+ if (http_7239_extract_node(&input, &ctx, 1) == 0)
+ return 0; /* could not extract node */
+ switch (ctx.nodename.type) {
+ case FORWARDED_HEADER_UNK:
+ output = get_trash_chunk();
+ chunk_appendf(output, "unknown");
+ smp->flags &= ~SMP_F_CONST;
+ smp->data.type = SMP_T_STR;
+ smp->data.u.str = *output;
+ break;
+ case FORWARDED_HEADER_OBFS:
+ output = get_trash_chunk();
+ chunk_appendf(output, "_"); /* append obfs prefix */
+ chunk_istcat(output, ctx.nodename.obfs);
+ smp->flags &= ~SMP_F_CONST;
+ smp->data.type = SMP_T_STR;
+ smp->data.u.str = *output;
+ break;
+ case FORWARDED_HEADER_IP:
+ if (ctx.nodename.ip.ss_family == AF_INET) {
+ smp->data.type = SMP_T_IPV4;
+ smp->data.u.ipv4 = ((struct sockaddr_in *)&ctx.nodename.ip)->sin_addr;
+ }
+ else if (ctx.nodename.ip.ss_family == AF_INET6) {
+ smp->data.type = SMP_T_IPV6;
+ smp->data.u.ipv6 = ((struct sockaddr_in6 *)&ctx.nodename.ip)->sin6_addr;
+ }
+ else
+ return 0; /* unsupported */
+ break;
+ default:
+ return 0; /* unsupported */
+ }
+ return 1;
+}
+
+/* input: substring representing 7239 forwarded header node
+ * output: forwarded header nodeport translated to either
+ * integer or str for obfuscated ('_' prefix)
+ */
+static int sample_conv_7239_n2np(const struct arg *args, struct sample *smp, void *private)
+{
+ struct ist input = ist2(smp->data.u.str.area, smp->data.u.str.data);
+ struct forwarded_header_node ctx;
+ struct buffer *output;
+
+ if (http_7239_extract_node(&input, &ctx, 1) == 0)
+ return 0; /* could not extract node */
+
+ switch (ctx.nodeport.type) {
+ case FORWARDED_HEADER_UNK:
+ return 0; /* not provided */
+ case FORWARDED_HEADER_OBFS:
+ output = get_trash_chunk();
+ chunk_appendf(output, "_"); /* append obfs prefix */
+ chunk_istcat(output, ctx.nodeport.obfs);
+ smp->flags &= ~SMP_F_CONST;
+ smp->data.type = SMP_T_STR;
+ smp->data.u.str = *output;
+ break;
+ case FORWARDED_HEADER_PORT:
+ smp->data.type = SMP_T_SINT;
+ smp->data.u.sint = ctx.nodeport.port;
+ break;
+ default:
+ return 0; /* unsupported */
+ }
+
+ return 1;
+}
+
+/* Note: must not be declared <const> as its list will be overwritten */
+static struct sample_conv_kw_list sample_conv_kws = {ILH, {
+ { "rfc7239_is_valid", sample_conv_7239_valid, 0, NULL, SMP_T_STR, SMP_T_BOOL},
+ { "rfc7239_field", sample_conv_7239_field, ARG1(1,STR), NULL, SMP_T_STR, SMP_T_STR},
+ { "rfc7239_n2nn", sample_conv_7239_n2nn, 0, NULL, SMP_T_STR, SMP_T_ANY},
+ { "rfc7239_n2np", sample_conv_7239_n2np, 0, NULL, SMP_T_STR, SMP_T_ANY},
+ { NULL, NULL, 0, 0, 0 },
+}};
+
+INITCALL1(STG_REGISTER, sample_register_convs, &sample_conv_kws);
diff --git a/src/http_fetch.c b/src/http_fetch.c
new file mode 100644
index 0000000..1f3e4a0
--- /dev/null
+++ b/src/http_fetch.c
@@ -0,0 +1,2368 @@
+/*
+ * HTTP samples fetching
+ *
+ * Copyright 2000-2018 Willy Tarreau <w@1wt.eu>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <sys/types.h>
+
+#include <ctype.h>
+#include <string.h>
+#include <time.h>
+
+#include <haproxy/api.h>
+#include <haproxy/arg.h>
+#include <haproxy/auth.h>
+#include <haproxy/base64.h>
+#include <haproxy/channel.h>
+#include <haproxy/chunk.h>
+#include <haproxy/connection.h>
+#include <haproxy/global.h>
+#include <haproxy/h1.h>
+#include <haproxy/h1_htx.h>
+#include <haproxy/http.h>
+#include <haproxy/http_ana.h>
+#include <haproxy/http_fetch.h>
+#include <haproxy/http_htx.h>
+#include <haproxy/htx.h>
+#include <haproxy/obj_type.h>
+#include <haproxy/pool.h>
+#include <haproxy/sample.h>
+#include <haproxy/sc_strm.h>
+#include <haproxy/stream.h>
+#include <haproxy/tools.h>
+#include <haproxy/version.h>
+
+
+/* this struct is used between calls to smp_fetch_hdr() or smp_fetch_cookie() */
+static THREAD_LOCAL struct http_hdr_ctx static_http_hdr_ctx;
+/* this is used to convert raw connection buffers to htx */
+static THREAD_LOCAL struct buffer static_raw_htx_chunk;
+static THREAD_LOCAL char *static_raw_htx_buf;
+
+#define SMP_REQ_CHN(smp) (smp->strm ? &smp->strm->req : NULL)
+#define SMP_RES_CHN(smp) (smp->strm ? &smp->strm->res : NULL)
+
+/* This function returns the static htx chunk, where raw connections get
+ * converted to HTX as needed for samplxsing.
+ */
+struct buffer *get_raw_htx_chunk(void)
+{
+ chunk_reset(&static_raw_htx_chunk);
+ return &static_raw_htx_chunk;
+}
+
+static int alloc_raw_htx_chunk_per_thread()
+{
+ static_raw_htx_buf = malloc(global.tune.bufsize);
+ if (!static_raw_htx_buf)
+ return 0;
+ chunk_init(&static_raw_htx_chunk, static_raw_htx_buf, global.tune.bufsize);
+ return 1;
+}
+
+static void free_raw_htx_chunk_per_thread()
+{
+ ha_free(&static_raw_htx_buf);
+}
+
+REGISTER_PER_THREAD_ALLOC(alloc_raw_htx_chunk_per_thread);
+REGISTER_PER_THREAD_FREE(free_raw_htx_chunk_per_thread);
+
+/*
+ * Returns the data from Authorization header. Function may be called more
+ * than once so data is stored in txn->auth_data. When no header is found
+ * or auth method is unknown auth_method is set to HTTP_AUTH_WRONG to avoid
+ * searching again for something we are unable to find anyway. However, if
+ * the result if valid, the cache is not reused because we would risk to
+ * have the credentials overwritten by another stream in parallel.
+ * The caller is responsible for passing a sample with a valid stream/txn,
+ * and a valid htx.
+ */
+
+static int get_http_auth(struct sample *smp, struct htx *htx)
+{
+ struct stream *s = smp->strm;
+ struct http_txn *txn = s->txn;
+ struct http_hdr_ctx ctx = { .blk = NULL };
+ struct ist hdr;
+ struct buffer auth_method;
+ char *p;
+ int len;
+
+#ifdef DEBUG_AUTH
+ printf("Auth for stream %p: %d\n", s, txn->auth.method);
+#endif
+ if (txn->auth.method == HTTP_AUTH_WRONG)
+ return 0;
+
+ txn->auth.method = HTTP_AUTH_WRONG;
+
+ if (txn->flags & TX_USE_PX_CONN)
+ hdr = ist("Proxy-Authorization");
+ else
+ hdr = ist("Authorization");
+
+ ctx.blk = NULL;
+ if (!http_find_header(htx, hdr, &ctx, 0))
+ return 0;
+
+ p = memchr(ctx.value.ptr, ' ', ctx.value.len);
+ if (!p || p == ctx.value.ptr) /* if no space was found or if the space is the first character */
+ return 0;
+ len = p - ctx.value.ptr;
+
+ if (chunk_initlen(&auth_method, ctx.value.ptr, 0, len) != 1)
+ return 0;
+
+ /* According to RFC7235, there could be multiple spaces between the
+ * scheme and its value, we must skip all of them.
+ */
+ while (p < istend(ctx.value) && *p == ' ')
+ ++p;
+
+ chunk_initlen(&txn->auth.method_data, p, 0, istend(ctx.value) - p);
+
+ if (!strncasecmp("Basic", auth_method.area, auth_method.data)) {
+ struct buffer *http_auth = get_trash_chunk();
+
+ len = base64dec(txn->auth.method_data.area,
+ txn->auth.method_data.data,
+ http_auth->area, global.tune.bufsize - 1);
+
+ if (len < 0)
+ return 0;
+
+
+ http_auth->area[len] = '\0';
+
+ p = strchr(http_auth->area, ':');
+
+ if (!p)
+ return 0;
+
+ txn->auth.user = http_auth->area;
+ *p = '\0';
+ txn->auth.pass = p+1;
+
+ txn->auth.method = HTTP_AUTH_BASIC;
+ return 1;
+ } else if (!strncasecmp("Bearer", auth_method.area, auth_method.data)) {
+ txn->auth.method = HTTP_AUTH_BEARER;
+ return 1;
+ }
+
+ return 0;
+}
+
+/* This function ensures that the prerequisites for an L7 fetch are ready,
+ * which means that a request or response is ready. If some data is missing,
+ * a parsing attempt is made. This is useful in TCP-based ACLs which are able
+ * to extract data from L7. If <vol> is non-null during a prefetch, another
+ * test is made to ensure the required information is not gone.
+ *
+ * The function returns :
+ * NULL with SMP_F_MAY_CHANGE in the sample flags if some data is missing to
+ * decide whether or not an HTTP message is present ;
+ * NULL if the requested data cannot be fetched or if it is certain that
+ * we'll never have any HTTP message there; this includes null strm or chn.
+ * NULL if the sample's direction does not match the channel's (i.e. the
+ * function was asked to work on the wrong channel)
+ * The HTX message if ready
+ */
+struct htx *smp_prefetch_htx(struct sample *smp, struct channel *chn, struct check *check, int vol)
+{
+ struct stream *s = smp->strm;
+ struct http_txn *txn = NULL;
+ struct htx *htx = NULL;
+ struct http_msg *msg;
+ struct htx_sl *sl;
+
+ if (chn &&
+ (((smp->opt & SMP_OPT_DIR) == SMP_OPT_DIR_REQ && (chn->flags & CF_ISRESP)) ||
+ ((smp->opt & SMP_OPT_DIR) == SMP_OPT_DIR_RES && !(chn->flags & CF_ISRESP))))
+ return 0;
+
+ /* Note: it is possible that <s> is NULL when called before stream
+ * initialization (eg: tcp-request connection), so this function is the
+ * one responsible for guarding against this case for all HTTP users.
+ *
+ * In the health check context, the stream and the channel must be NULL
+ * and <check> must be set. In this case, only the input buffer,
+ * corresponding to the response, is considered. It is the caller
+ * responsibility to provide <check>.
+ */
+ BUG_ON(check && (s || chn));
+ if (!s || !chn) {
+ if (check) {
+ htx = htxbuf(&check->bi);
+
+ /* Analyse not yet started */
+ if (htx_is_empty(htx) || htx->first == -1)
+ return NULL;
+
+ sl = http_get_stline(htx);
+ if (vol && !sl) {
+ /* The start-line was already forwarded, it is too late to fetch anything */
+ return NULL;
+ }
+ goto end;
+ }
+
+ return NULL;
+ }
+
+ if (!s->txn && !http_create_txn(s))
+ return NULL;
+ txn = s->txn;
+ msg = (!(chn->flags & CF_ISRESP) ? &txn->req : &txn->rsp);
+
+ if (IS_HTX_STRM(s)) {
+ htx = htxbuf(&chn->buf);
+
+ if (htx->flags & HTX_FL_PARSING_ERROR)
+ return NULL;
+
+ if (msg->msg_state < HTTP_MSG_BODY) {
+ /* Analyse not yet started */
+ if (htx_is_empty(htx) || htx->first == -1) {
+ /* Parsing is done by the mux, just wait */
+ smp->flags |= SMP_F_MAY_CHANGE;
+ return NULL;
+ }
+ }
+ sl = http_get_stline(htx);
+ if (vol && !sl) {
+ /* The start-line was already forwarded, it is too late to fetch anything */
+ return NULL;
+ }
+ }
+ else { /* RAW mode */
+ struct buffer *buf;
+ struct h1m h1m;
+ struct http_hdr hdrs[global.tune.max_http_hdr];
+ union h1_sl h1sl;
+ unsigned int flags = HTX_FL_NONE;
+ int ret;
+
+ /* no HTTP fetch on the response in TCP mode */
+ if (chn->flags & CF_ISRESP)
+ return NULL;
+
+ /* Now we are working on the request only */
+ buf = &chn->buf;
+ if (b_head(buf) + b_data(buf) > b_wrap(buf))
+ b_slow_realign(buf, trash.area, 0);
+
+ h1m_init_req(&h1m);
+ ret = h1_headers_to_hdr_list(b_head(buf), b_stop(buf),
+ hdrs, sizeof(hdrs)/sizeof(hdrs[0]), &h1m, &h1sl);
+ if (ret <= 0) {
+ /* Invalid or too big*/
+ if (ret < 0 || channel_full(&s->req, global.tune.maxrewrite))
+ return NULL;
+
+ /* wait for a full request */
+ smp->flags |= SMP_F_MAY_CHANGE;
+ return NULL;
+ }
+
+ /* OK we just got a valid HTTP message. We have to convert it
+ * into an HTX message.
+ */
+ if (unlikely(h1sl.rq.v.len == 0)) {
+ /* try to convert HTTP/0.9 requests to HTTP/1.0 */
+ if (h1sl.rq.meth != HTTP_METH_GET || !h1sl.rq.u.len)
+ return NULL;
+ h1sl.rq.v = ist("HTTP/1.0");
+ }
+
+ /* Set HTX start-line flags */
+ if (h1m.flags & H1_MF_VER_11)
+ flags |= HTX_SL_F_VER_11;
+ if (h1m.flags & H1_MF_XFER_ENC)
+ flags |= HTX_SL_F_XFER_ENC;
+ flags |= HTX_SL_F_XFER_LEN;
+ if (h1m.flags & H1_MF_CHNK)
+ flags |= HTX_SL_F_CHNK;
+ else if (h1m.flags & H1_MF_CLEN)
+ flags |= HTX_SL_F_CLEN;
+
+ htx = htx_from_buf(get_raw_htx_chunk());
+ sl = htx_add_stline(htx, HTX_BLK_REQ_SL, flags, h1sl.rq.m, h1sl.rq.u, h1sl.rq.v);
+ if (!sl || !htx_add_all_headers(htx, hdrs))
+ return NULL;
+ sl->info.req.meth = h1sl.rq.meth;
+ }
+
+ /* OK we just got a valid HTTP message. If not already done by
+ * HTTP analyzers, we have some minor preparation to perform so
+ * that further checks can rely on HTTP tests.
+ */
+ if (sl && msg->msg_state < HTTP_MSG_BODY) {
+ if (!(chn->flags & CF_ISRESP)) {
+ txn->meth = sl->info.req.meth;
+ if (txn->meth == HTTP_METH_GET || txn->meth == HTTP_METH_HEAD)
+ s->flags |= SF_REDIRECTABLE;
+ }
+ else {
+ if (txn->status == -1)
+ txn->status = sl->info.res.status;
+ if (!(htx->flags & HTX_FL_PROXY_RESP) && txn->server_status == -1)
+ txn->server_status = sl->info.res.status;
+ }
+ if (sl->flags & HTX_SL_F_VER_11)
+ msg->flags |= HTTP_MSGF_VER_11;
+ }
+
+ /* everything's OK */
+ end:
+ return htx;
+}
+
+/* This function fetches the method of current HTTP request and stores
+ * it in the global pattern struct as a chunk. There are two possibilities :
+ * - if the method is known (not HTTP_METH_OTHER), its identifier is stored
+ * in <len> and <ptr> is NULL ;
+ * - if the method is unknown (HTTP_METH_OTHER), <ptr> points to the text and
+ * <len> to its length.
+ * This is intended to be used with pat_match_meth() only.
+ */
+static int smp_fetch_meth(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ struct channel *chn = SMP_REQ_CHN(smp);
+ struct http_txn *txn;
+ struct htx *htx = NULL;
+ int meth;
+
+ txn = (smp->strm ? smp->strm->txn : NULL);
+ if (!txn)
+ return 0;
+
+ meth = txn->meth;
+ if (meth == HTTP_METH_OTHER) {
+ htx = smp_prefetch_htx(smp, chn, NULL, 1);
+ if (!htx)
+ return 0;
+ meth = txn->meth;
+ }
+
+ smp->data.type = SMP_T_METH;
+ smp->data.u.meth.meth = meth;
+ if (meth == HTTP_METH_OTHER) {
+ struct htx_sl *sl;
+
+ sl = http_get_stline(htx);
+ smp->flags |= SMP_F_CONST;
+ smp->data.u.meth.str.area = HTX_SL_REQ_MPTR(sl);
+ smp->data.u.meth.str.data = HTX_SL_REQ_MLEN(sl);
+ }
+ smp->flags |= SMP_F_VOL_1ST;
+ return 1;
+}
+
+static int smp_fetch_rqver(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ struct channel *chn = SMP_REQ_CHN(smp);
+ struct htx *htx = smp_prefetch_htx(smp, chn, NULL, 1);
+ struct htx_sl *sl;
+ char *ptr;
+ int len;
+
+ if (!htx)
+ return 0;
+
+ sl = http_get_stline(htx);
+ len = HTX_SL_REQ_VLEN(sl);
+ ptr = HTX_SL_REQ_VPTR(sl);
+
+ while ((len-- > 0) && (*ptr++ != '/'));
+ if (len <= 0)
+ return 0;
+
+ smp->data.type = SMP_T_STR;
+ smp->data.u.str.area = ptr;
+ smp->data.u.str.data = len;
+
+ smp->flags = SMP_F_VOL_1ST | SMP_F_CONST;
+ return 1;
+}
+
+static int smp_fetch_stver(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ struct channel *chn = SMP_RES_CHN(smp);
+ struct check *check = objt_check(smp->sess->origin);
+ struct htx *htx = smp_prefetch_htx(smp, chn, check, 1);
+ struct htx_sl *sl;
+ char *ptr;
+ int len;
+
+ if (!htx)
+ return 0;
+
+ sl = http_get_stline(htx);
+ len = HTX_SL_RES_VLEN(sl);
+ ptr = HTX_SL_RES_VPTR(sl);
+
+ while ((len-- > 0) && (*ptr++ != '/'));
+ if (len <= 0)
+ return 0;
+
+ smp->data.type = SMP_T_STR;
+ smp->data.u.str.area = ptr;
+ smp->data.u.str.data = len;
+
+ smp->flags = SMP_F_VOL_1ST | SMP_F_CONST;
+ return 1;
+}
+
+/* 3. Check on Status Code. We manipulate integers here. */
+static int smp_fetch_stcode(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ struct channel *chn = SMP_RES_CHN(smp);
+ struct check *check = objt_check(smp->sess->origin);
+ struct htx *htx = smp_prefetch_htx(smp, chn, check, 1);
+ struct htx_sl *sl;
+ char *ptr;
+ int len;
+
+ if (!htx)
+ return 0;
+
+ sl = http_get_stline(htx);
+ len = HTX_SL_RES_CLEN(sl);
+ ptr = HTX_SL_RES_CPTR(sl);
+
+ smp->data.type = SMP_T_SINT;
+ smp->data.u.sint = __strl2ui(ptr, len);
+ smp->flags = SMP_F_VOL_1ST;
+ return 1;
+}
+
+/* It returns the server or the txn status code, depending on the keyword */
+static int smp_fetch_srv_status(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ struct http_txn *txn;
+ short status;
+
+ txn = (smp->strm ? smp->strm->txn : NULL);
+ if (!txn)
+ return 0;
+
+ status = (kw[0] == 't' ? txn->status : txn->server_status);
+ if (status == -1) {
+ struct channel *chn = SMP_RES_CHN(smp);
+ struct htx *htx = smp_prefetch_htx(smp, chn, NULL, 1);
+
+ if (!htx)
+ return 0;
+
+ status = (kw[0] == 't' ? txn->status : txn->server_status);
+ }
+
+ if (kw[0] != 't')
+ smp->flags = SMP_F_VOL_1ST;
+ smp->data.type = SMP_T_SINT;
+ smp->data.u.sint = status;
+ return 1;
+}
+
+static int smp_fetch_uniqueid(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ struct ist unique_id;
+
+ if (LIST_ISEMPTY(&smp->sess->fe->format_unique_id))
+ return 0;
+
+ if (!smp->strm)
+ return 0;
+
+ unique_id = stream_generate_unique_id(smp->strm, &smp->sess->fe->format_unique_id);
+ if (!isttest(unique_id))
+ return 0;
+
+ smp->data.u.str.area = smp->strm->unique_id.ptr;
+ smp->data.u.str.data = smp->strm->unique_id.len;
+ smp->data.type = SMP_T_STR;
+ smp->flags = SMP_F_CONST;
+ return 1;
+}
+
+/* Returns a string block containing all headers including the
+ * empty line which separates headers from the body. This is useful
+ * for some headers analysis.
+ */
+static int smp_fetch_hdrs(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ /* possible keywords: req.hdrs, res.hdrs */
+ struct channel *chn = ((kw[2] == 'q') ? SMP_REQ_CHN(smp) : SMP_RES_CHN(smp));
+ struct check *check = ((kw[2] == 's') ? objt_check(smp->sess->origin) : NULL);
+ struct htx *htx = smp_prefetch_htx(smp, chn, check, 1);
+ struct buffer *temp;
+ int32_t pos;
+
+ if (!htx)
+ return 0;
+ temp = get_trash_chunk();
+ for (pos = htx_get_first(htx); pos != -1; pos = htx_get_next(htx, pos)) {
+ struct htx_blk *blk = htx_get_blk(htx, pos);
+ enum htx_blk_type type = htx_get_blk_type(blk);
+
+ if (type == HTX_BLK_HDR) {
+ struct ist n = htx_get_blk_name(htx, blk);
+ struct ist v = htx_get_blk_value(htx, blk);
+
+ if (!h1_format_htx_hdr(n, v, temp))
+ return 0;
+ }
+ else if (type == HTX_BLK_EOH) {
+ if (!chunk_memcat(temp, "\r\n", 2))
+ return 0;
+ break;
+ }
+ }
+ smp->data.type = SMP_T_STR;
+ smp->data.u.str = *temp;
+ return 1;
+}
+
+/* Returns the header request in a length/value encoded format.
+ * This is useful for exchanges with the SPOE.
+ *
+ * A "length value" is a multibyte code encoding numbers. It uses the
+ * SPOE format. The encoding is the following:
+ *
+ * Each couple "header name" / "header value" is composed
+ * like this:
+ * "length value" "header name bytes"
+ * "length value" "header value bytes"
+ * When the last header is reached, the header name and the header
+ * value are empty. Their length are 0
+ */
+static int smp_fetch_hdrs_bin(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ /* possible keywords: req.hdrs_bin, res.hdrs_bin */
+ struct channel *chn = ((kw[2] == 'q') ? SMP_REQ_CHN(smp) : SMP_RES_CHN(smp));
+ struct check *check = ((kw[2] == 's') ? objt_check(smp->sess->origin) : NULL);
+ struct htx *htx = smp_prefetch_htx(smp, chn, check, 1);
+ struct buffer *temp;
+ char *p, *end;
+ int32_t pos;
+ int ret;
+
+ if (!htx)
+ return 0;
+ temp = get_trash_chunk();
+ p = temp->area;
+ end = temp->area + temp->size;
+ for (pos = htx_get_first(htx); pos != -1; pos = htx_get_next(htx, pos)) {
+ struct htx_blk *blk = htx_get_blk(htx, pos);
+ enum htx_blk_type type = htx_get_blk_type(blk);
+ struct ist n, v;
+
+ if (type == HTX_BLK_HDR) {
+ n = htx_get_blk_name(htx,blk);
+ v = htx_get_blk_value(htx, blk);
+
+ /* encode the header name. */
+ ret = encode_varint(n.len, &p, end);
+ if (ret == -1)
+ return 0;
+ if (p + n.len > end)
+ return 0;
+ memcpy(p, n.ptr, n.len);
+ p += n.len;
+
+ /* encode the header value. */
+ ret = encode_varint(v.len, &p, end);
+ if (ret == -1)
+ return 0;
+ if (p + v.len > end)
+ return 0;
+ memcpy(p, v.ptr, v.len);
+ p += v.len;
+
+ }
+ else if (type == HTX_BLK_EOH) {
+ /* encode the end of the header list with empty
+ * header name and header value.
+ */
+ ret = encode_varint(0, &p, end);
+ if (ret == -1)
+ return 0;
+ ret = encode_varint(0, &p, end);
+ if (ret == -1)
+ return 0;
+ break;
+ }
+ }
+
+ /* Initialise sample data which will be filled. */
+ smp->data.type = SMP_T_BIN;
+ smp->data.u.str.area = temp->area;
+ smp->data.u.str.data = p - temp->area;
+ smp->data.u.str.size = temp->size;
+ return 1;
+}
+
+/* returns the longest available part of the body. This requires that the body
+ * has been waited for using http-buffer-request.
+ */
+static int smp_fetch_body(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ /* possible keywords: req.body, res.body */
+ struct channel *chn = ((kw[2] == 'q') ? SMP_REQ_CHN(smp) : SMP_RES_CHN(smp));
+ struct check *check = ((kw[2] == 's') ? objt_check(smp->sess->origin) : NULL);
+ struct htx *htx = smp_prefetch_htx(smp, chn, check, 1);
+ struct buffer *temp;
+ int32_t pos;
+ int finished = 0;
+
+ if (!htx)
+ return 0;
+
+ temp = get_trash_chunk();
+ for (pos = htx_get_first(htx); pos != -1; pos = htx_get_next(htx, pos)) {
+ struct htx_blk *blk = htx_get_blk(htx, pos);
+ enum htx_blk_type type = htx_get_blk_type(blk);
+
+ if (type == HTX_BLK_TLR || type == HTX_BLK_EOT) {
+ finished = 1;
+ break;
+ }
+ if (type == HTX_BLK_DATA) {
+ if (!h1_format_htx_data(htx_get_blk_value(htx, blk), temp, 0))
+ return 0;
+ }
+ }
+
+ smp->data.type = SMP_T_BIN;
+ smp->data.u.str = *temp;
+ smp->flags = SMP_F_VOL_TEST;
+
+ if (!finished && (check || (chn && !channel_full(chn, global.tune.maxrewrite) &&
+ !(chn_prod(chn)->flags & (SC_FL_EOI|SC_FL_EOS|SC_FL_ABRT_DONE)))))
+ smp->flags |= SMP_F_MAY_CHANGE;
+
+ return 1;
+}
+
+
+/* returns the available length of the body. This requires that the body
+ * has been waited for using http-buffer-request.
+ */
+static int smp_fetch_body_len(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ /* possible keywords: req.body_len, res.body_len */
+ struct channel *chn = ((kw[2] == 'q') ? SMP_REQ_CHN(smp) : SMP_RES_CHN(smp));
+ struct check *check = ((kw[2] == 's') ? objt_check(smp->sess->origin) : NULL);
+ struct htx *htx = smp_prefetch_htx(smp, chn, check, 1);
+ int32_t pos;
+ unsigned long long len = 0;
+
+ if (!htx)
+ return 0;
+
+ for (pos = htx_get_first(htx); pos != -1; pos = htx_get_next(htx, pos)) {
+ struct htx_blk *blk = htx_get_blk(htx, pos);
+ enum htx_blk_type type = htx_get_blk_type(blk);
+
+ if (type == HTX_BLK_TLR || type == HTX_BLK_EOT)
+ break;
+ if (type == HTX_BLK_DATA)
+ len += htx_get_blksz(blk);
+ }
+
+ smp->data.type = SMP_T_SINT;
+ smp->data.u.sint = len;
+ smp->flags = SMP_F_VOL_TEST;
+ return 1;
+}
+
+
+/* returns the advertised length of the body, or the advertised size of the
+ * chunks available in the buffer. This requires that the body has been waited
+ * for using http-buffer-request.
+ */
+static int smp_fetch_body_size(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ /* possible keywords: req.body_size, res.body_size */
+ struct channel *chn = ((kw[2] == 'q') ? SMP_REQ_CHN(smp) : SMP_RES_CHN(smp));
+ struct check *check = ((kw[2] == 's') ? objt_check(smp->sess->origin) : NULL);
+ struct htx *htx = smp_prefetch_htx(smp, chn, check, 1);
+ int32_t pos;
+ unsigned long long len = 0;
+
+ if (!htx)
+ return 0;
+
+ for (pos = htx_get_first(htx); pos != -1; pos = htx_get_next(htx, pos)) {
+ struct htx_blk *blk = htx_get_blk(htx, pos);
+ enum htx_blk_type type = htx_get_blk_type(blk);
+
+ if (type == HTX_BLK_TLR || type == HTX_BLK_EOT)
+ break;
+ if (type == HTX_BLK_DATA)
+ len += htx_get_blksz(blk);
+ }
+ if (htx->extra != HTX_UNKOWN_PAYLOAD_LENGTH)
+ len += htx->extra;
+
+ smp->data.type = SMP_T_SINT;
+ smp->data.u.sint = len;
+ smp->flags = SMP_F_VOL_TEST;
+ return 1;
+}
+
+
+/* 4. Check on URL/URI. A pointer to the URI is stored. */
+static int smp_fetch_url(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ struct channel *chn = SMP_REQ_CHN(smp);
+ struct htx *htx = smp_prefetch_htx(smp, chn, NULL, 1);
+ struct htx_sl *sl;
+
+ if (!htx)
+ return 0;
+ sl = http_get_stline(htx);
+ smp->data.type = SMP_T_STR;
+ smp->data.u.str.area = HTX_SL_REQ_UPTR(sl);
+ smp->data.u.str.data = HTX_SL_REQ_ULEN(sl);
+ smp->flags = SMP_F_VOL_1ST | SMP_F_CONST;
+ return 1;
+}
+
+static int smp_fetch_url_ip(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ struct channel *chn = SMP_REQ_CHN(smp);
+ struct htx *htx = smp_prefetch_htx(smp, chn, NULL, 1);
+ struct htx_sl *sl;
+ struct sockaddr_storage addr;
+
+ memset(&addr, 0, sizeof(addr));
+
+ if (!htx)
+ return 0;
+ sl = http_get_stline(htx);
+ if (url2sa(HTX_SL_REQ_UPTR(sl), HTX_SL_REQ_ULEN(sl), &addr, NULL) < 0)
+ return 0;
+
+ if (addr.ss_family != AF_INET)
+ return 0;
+
+ smp->data.type = SMP_T_IPV4;
+ smp->data.u.ipv4 = ((struct sockaddr_in *)&addr)->sin_addr;
+ smp->flags = 0;
+ return 1;
+}
+
+static int smp_fetch_url_port(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ struct channel *chn = SMP_REQ_CHN(smp);
+ struct htx *htx = smp_prefetch_htx(smp, chn, NULL, 1);
+ struct htx_sl *sl;
+ struct sockaddr_storage addr;
+
+ memset(&addr, 0, sizeof(addr));
+
+ if (!htx)
+ return 0;
+ sl = http_get_stline(htx);
+ if (url2sa(HTX_SL_REQ_UPTR(sl), HTX_SL_REQ_ULEN(sl), &addr, NULL) < 0)
+ return 0;
+
+ if (addr.ss_family != AF_INET)
+ return 0;
+
+ smp->data.type = SMP_T_SINT;
+ smp->data.u.sint = get_host_port(&addr);
+ smp->flags = 0;
+ return 1;
+}
+
+/* Fetch an HTTP header. A pointer to the beginning of the value is returned.
+ * Accepts an optional argument of type string containing the header field name,
+ * and an optional argument of type signed or unsigned integer to request an
+ * explicit occurrence of the header. Note that in the event of a missing name,
+ * headers are considered from the first one. It does not stop on commas and
+ * returns full lines instead (useful for User-Agent or Date for example).
+ */
+static int smp_fetch_fhdr(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ /* possible keywords: req.fhdr, res.fhdr */
+ struct channel *chn = ((kw[2] == 'q') ? SMP_REQ_CHN(smp) : SMP_RES_CHN(smp));
+ struct check *check = ((kw[2] == 's') ? objt_check(smp->sess->origin) : NULL);
+ struct htx *htx = smp_prefetch_htx(smp, chn, check, 1);
+ struct http_hdr_ctx *ctx = smp->ctx.a[0];
+ struct ist name;
+ int occ = 0;
+
+ if (!ctx) {
+ /* first call */
+ ctx = &static_http_hdr_ctx;
+ ctx->blk = NULL;
+ smp->ctx.a[0] = ctx;
+ }
+
+ if (args[0].type != ARGT_STR)
+ return 0;
+ name = ist2(args[0].data.str.area, args[0].data.str.data);
+
+ if (args[1].type == ARGT_SINT)
+ occ = args[1].data.sint;
+
+ if (!htx)
+ return 0;
+
+ if (ctx && !(smp->flags & SMP_F_NOT_LAST))
+ /* search for header from the beginning */
+ ctx->blk = NULL;
+
+ if (!occ && !(smp->opt & SMP_OPT_ITERATE))
+ /* no explicit occurrence and single fetch => last header by default */
+ occ = -1;
+
+ if (!occ)
+ /* prepare to report multiple occurrences for ACL fetches */
+ smp->flags |= SMP_F_NOT_LAST;
+
+ smp->data.type = SMP_T_STR;
+ smp->flags |= SMP_F_VOL_HDR | SMP_F_CONST;
+ if (http_get_htx_fhdr(htx, name, occ, ctx, &smp->data.u.str.area, &smp->data.u.str.data))
+ return 1;
+ smp->flags &= ~SMP_F_NOT_LAST;
+ return 0;
+}
+
+/* 6. Check on HTTP header count. The number of occurrences is returned.
+ * Accepts exactly 1 argument of type string. It does not stop on commas and
+ * returns full lines instead (useful for User-Agent or Date for example).
+ */
+static int smp_fetch_fhdr_cnt(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ /* possible keywords: req.fhdr_cnt, res.fhdr_cnt */
+ struct channel *chn = ((kw[2] == 'q') ? SMP_REQ_CHN(smp) : SMP_RES_CHN(smp));
+ struct check *check = ((kw[2] == 's') ? objt_check(smp->sess->origin) : NULL);
+ struct htx *htx = smp_prefetch_htx(smp, chn, check, 1);
+ struct http_hdr_ctx ctx;
+ struct ist name;
+ int cnt;
+
+ if (!htx)
+ return 0;
+
+ if (args->type == ARGT_STR) {
+ name = ist2(args->data.str.area, args->data.str.data);
+ } else {
+ name = IST_NULL;
+ }
+
+ ctx.blk = NULL;
+ cnt = 0;
+ while (http_find_header(htx, name, &ctx, 1))
+ cnt++;
+ smp->data.type = SMP_T_SINT;
+ smp->data.u.sint = cnt;
+ smp->flags = SMP_F_VOL_HDR;
+ return 1;
+}
+
+static int smp_fetch_hdr_names(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ /* possible keywords: req.hdr_names, res.hdr_names */
+ struct channel *chn = ((kw[2] == 'q') ? SMP_REQ_CHN(smp) : SMP_RES_CHN(smp));
+ struct check *check = ((kw[2] == 's') ? objt_check(smp->sess->origin) : NULL);
+ struct htx *htx = smp_prefetch_htx(smp, chn, check, 1);
+ struct buffer *temp;
+ char del = ',';
+
+ int32_t pos;
+
+ if (!htx)
+ return 0;
+
+ if (args->type == ARGT_STR)
+ del = *args[0].data.str.area;
+
+ temp = get_trash_chunk();
+ for (pos = htx_get_first(htx); pos != -1; pos = htx_get_next(htx, pos)) {
+ struct htx_blk *blk = htx_get_blk(htx, pos);
+ enum htx_blk_type type = htx_get_blk_type(blk);
+ struct ist n;
+
+ if (type == HTX_BLK_EOH)
+ break;
+ if (type != HTX_BLK_HDR)
+ continue;
+ n = htx_get_blk_name(htx, blk);
+
+ if (temp->data)
+ temp->area[temp->data++] = del;
+ chunk_istcat(temp, n);
+ }
+
+ smp->data.type = SMP_T_STR;
+ smp->data.u.str = *temp;
+ smp->flags = SMP_F_VOL_HDR;
+ return 1;
+}
+
+/* Fetch an HTTP header. A pointer to the beginning of the value is returned.
+ * Accepts an optional argument of type string containing the header field name,
+ * and an optional argument of type signed or unsigned integer to request an
+ * explicit occurrence of the header. Note that in the event of a missing name,
+ * headers are considered from the first one.
+ */
+static int smp_fetch_hdr(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ /* possible keywords: req.hdr / hdr, res.hdr / shdr */
+ struct channel *chn = ((kw[0] == 'h' || kw[2] == 'q') ? SMP_REQ_CHN(smp) : SMP_RES_CHN(smp));
+ struct check *check = ((kw[0] == 's' || kw[2] == 's') ? objt_check(smp->sess->origin) : NULL);
+ struct htx *htx = smp_prefetch_htx(smp, chn, check, 1);
+ struct http_hdr_ctx *ctx = smp->ctx.a[0];
+ struct ist name;
+ int occ = 0;
+
+ if (!ctx) {
+ /* first call */
+ ctx = &static_http_hdr_ctx;
+ ctx->blk = NULL;
+ smp->ctx.a[0] = ctx;
+ }
+
+ if (args[0].type != ARGT_STR)
+ return 0;
+ name = ist2(args[0].data.str.area, args[0].data.str.data);
+
+ if (args[1].type == ARGT_SINT)
+ occ = args[1].data.sint;
+
+ if (!htx)
+ return 0;
+
+ if (ctx && !(smp->flags & SMP_F_NOT_LAST))
+ /* search for header from the beginning */
+ ctx->blk = NULL;
+
+ if (!occ && !(smp->opt & SMP_OPT_ITERATE))
+ /* no explicit occurrence and single fetch => last header by default */
+ occ = -1;
+
+ if (!occ)
+ /* prepare to report multiple occurrences for ACL fetches */
+ smp->flags |= SMP_F_NOT_LAST;
+
+ smp->data.type = SMP_T_STR;
+ smp->flags |= SMP_F_VOL_HDR | SMP_F_CONST;
+ if (http_get_htx_hdr(htx, name, occ, ctx, &smp->data.u.str.area, &smp->data.u.str.data))
+ return 1;
+
+ smp->flags &= ~SMP_F_NOT_LAST;
+ return 0;
+}
+
+/* Same than smp_fetch_hdr() but only relies on the sample direction to choose
+ * the right channel. So instead of duplicating the code, we just change the
+ * keyword and then fallback on smp_fetch_hdr().
+ */
+static int smp_fetch_chn_hdr(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ kw = ((smp->opt & SMP_OPT_DIR) == SMP_OPT_DIR_REQ ? "req.hdr" : "res.hdr");
+ return smp_fetch_hdr(args, smp, kw, private);
+}
+
+/* 6. Check on HTTP header count. The number of occurrences is returned.
+ * Accepts exactly 1 argument of type string.
+ */
+static int smp_fetch_hdr_cnt(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ /* possible keywords: req.hdr_cnt / hdr_cnt, res.hdr_cnt / shdr_cnt */
+ struct channel *chn = ((kw[0] == 'h' || kw[2] == 'q') ? SMP_REQ_CHN(smp) : SMP_RES_CHN(smp));
+ struct check *check = ((kw[0] == 's' || kw[2] == 's') ? objt_check(smp->sess->origin) : NULL);
+ struct htx *htx = smp_prefetch_htx(smp, chn, check, 1);
+ struct http_hdr_ctx ctx;
+ struct ist name;
+ int cnt;
+
+ if (!htx)
+ return 0;
+
+ if (args->type == ARGT_STR) {
+ name = ist2(args->data.str.area, args->data.str.data);
+ } else {
+ name = IST_NULL;
+ }
+
+ ctx.blk = NULL;
+ cnt = 0;
+ while (http_find_header(htx, name, &ctx, 0))
+ cnt++;
+
+ smp->data.type = SMP_T_SINT;
+ smp->data.u.sint = cnt;
+ smp->flags = SMP_F_VOL_HDR;
+ return 1;
+}
+
+/* Fetch an HTTP header's integer value. The integer value is returned. It
+ * takes a mandatory argument of type string and an optional one of type int
+ * to designate a specific occurrence. It returns an unsigned integer, which
+ * may or may not be appropriate for everything.
+ */
+static int smp_fetch_hdr_val(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ int ret = smp_fetch_hdr(args, smp, kw, private);
+
+ if (ret > 0) {
+ smp->data.type = SMP_T_SINT;
+ smp->data.u.sint = strl2ic(smp->data.u.str.area,
+ smp->data.u.str.data);
+ }
+
+ return ret;
+}
+
+/* Fetch an HTTP header's IP value. takes a mandatory argument of type string
+ * and an optional one of type int to designate a specific occurrence.
+ * It returns an IPv4 or IPv6 address. Addresses surrounded by invalid chars
+ * are rejected. However IPv4 addresses may be followed with a colon and a
+ * valid port number.
+ */
+static int smp_fetch_hdr_ip(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ struct buffer *temp = get_trash_chunk();
+ int ret, len;
+ int port;
+
+ while ((ret = smp_fetch_hdr(args, smp, kw, private)) > 0) {
+ if (smp->data.u.str.data < temp->size - 1) {
+ memcpy(temp->area, smp->data.u.str.area,
+ smp->data.u.str.data);
+ temp->area[smp->data.u.str.data] = '\0';
+ len = url2ipv4((char *) temp->area, &smp->data.u.ipv4);
+ if (len > 0 && len == smp->data.u.str.data) {
+ /* plain IPv4 address */
+ smp->data.type = SMP_T_IPV4;
+ break;
+ } else if (len > 0 && temp->area[len] == ':' &&
+ strl2irc(temp->area + len + 1, smp->data.u.str.data - len - 1, &port) == 0 &&
+ port >= 0 && port <= 65535) {
+ /* IPv4 address suffixed with ':' followed by a valid port number */
+ smp->data.type = SMP_T_IPV4;
+ break;
+ } else if (temp->area[0] == '[' && temp->area[smp->data.u.str.data-1] == ']') {
+ /* IPv6 address enclosed in square brackets */
+ temp->area[smp->data.u.str.data-1] = '\0';
+ if (inet_pton(AF_INET6, temp->area+1, &smp->data.u.ipv6)) {
+ smp->data.type = SMP_T_IPV6;
+ break;
+ }
+ } else if (inet_pton(AF_INET6, temp->area, &smp->data.u.ipv6)) {
+ /* plain IPv6 address */
+ smp->data.type = SMP_T_IPV6;
+ break;
+ }
+ }
+
+ /* if the header doesn't match an IP address, fetch next one */
+ if (!(smp->flags & SMP_F_NOT_LAST))
+ return 0;
+ }
+ return ret;
+}
+
+/* 8. Check on URI PATH. A pointer to the PATH is stored. The path starts at the
+ * first '/' after the possible hostname. It ends before the possible '?' except
+ * for 'pathq' keyword.
+ */
+static int smp_fetch_path(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ struct channel *chn = SMP_REQ_CHN(smp);
+ struct htx *htx = smp_prefetch_htx(smp, chn, NULL, 1);
+ struct htx_sl *sl;
+ struct ist path;
+ struct http_uri_parser parser;
+
+ if (!htx)
+ return 0;
+
+ sl = http_get_stline(htx);
+ parser = http_uri_parser_init(htx_sl_req_uri(sl));
+
+ if (kw[4] == 'q' && (kw[0] == 'p' || kw[0] == 'b')) // pathq or baseq
+ path = http_parse_path(&parser);
+ else
+ path = iststop(http_parse_path(&parser), '?');
+
+ if (!isttest(path))
+ return 0;
+
+ /* OK, we got the '/' ! */
+ smp->data.type = SMP_T_STR;
+ smp->data.u.str.area = path.ptr;
+ smp->data.u.str.data = path.len;
+ smp->flags = SMP_F_VOL_1ST | SMP_F_CONST;
+ return 1;
+}
+
+/* This produces a concatenation of the first occurrence of the Host header
+ * followed by the path component if it begins with a slash ('/'). This means
+ * that '*' will not be added, resulting in exactly the first Host entry.
+ * If no Host header is found, then the path is returned as-is. The returned
+ * value is stored in the trash so it does not need to be marked constant.
+ * The returned sample is of type string.
+ */
+static int smp_fetch_base(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ struct channel *chn = SMP_REQ_CHN(smp);
+ struct htx *htx = smp_prefetch_htx(smp, chn, NULL, 1);
+ struct htx_sl *sl;
+ struct buffer *temp;
+ struct http_hdr_ctx ctx;
+ struct ist path;
+ struct http_uri_parser parser;
+
+ if (!htx)
+ return 0;
+
+ ctx.blk = NULL;
+ if (!http_find_header(htx, ist("Host"), &ctx, 0) || !ctx.value.len)
+ return smp_fetch_path(args, smp, kw, private);
+
+ /* OK we have the header value in ctx.value */
+ temp = get_trash_chunk();
+ chunk_istcat(temp, ctx.value);
+
+ /* now retrieve the path */
+ sl = http_get_stline(htx);
+ parser = http_uri_parser_init(htx_sl_req_uri(sl));
+ path = http_parse_path(&parser);
+ if (isttest(path)) {
+ size_t len;
+
+ if (kw[4] == 'q' && kw[0] == 'b') { // baseq
+ len = path.len;
+ } else {
+ for (len = 0; len < path.len && *(path.ptr + len) != '?'; len++)
+ ;
+ }
+
+ if (len && *(path.ptr) == '/')
+ chunk_memcat(temp, path.ptr, len);
+ }
+
+ smp->data.type = SMP_T_STR;
+ smp->data.u.str = *temp;
+ smp->flags = SMP_F_VOL_1ST;
+ return 1;
+}
+
+/* This produces a 32-bit hash of the concatenation of the first occurrence of
+ * the Host header followed by the path component if it begins with a slash ('/').
+ * This means that '*' will not be added, resulting in exactly the first Host
+ * entry. If no Host header is found, then the path is used. The resulting value
+ * is hashed using the path hash followed by a full avalanche hash and provides a
+ * 32-bit integer value. This fetch is useful for tracking per-path activity on
+ * high-traffic sites without having to store whole paths.
+ */
+static int smp_fetch_base32(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ struct channel *chn = SMP_REQ_CHN(smp);
+ struct htx *htx = smp_prefetch_htx(smp, chn, NULL, 1);
+ struct htx_sl *sl;
+ struct http_hdr_ctx ctx;
+ struct ist path;
+ unsigned int hash = 0;
+ struct http_uri_parser parser;
+
+ if (!htx)
+ return 0;
+
+ ctx.blk = NULL;
+ if (http_find_header(htx, ist("Host"), &ctx, 0)) {
+ /* OK we have the header value in ctx.value */
+ while (ctx.value.len--)
+ hash = *(ctx.value.ptr++) + (hash << 6) + (hash << 16) - hash;
+ }
+
+ /* now retrieve the path */
+ sl = http_get_stline(htx);
+ parser = http_uri_parser_init(htx_sl_req_uri(sl));
+ path = http_parse_path(&parser);
+ if (isttest(path)) {
+ size_t len;
+
+ for (len = 0; len < path.len && *(path.ptr + len) != '?'; len++)
+ ;
+
+ if (len && *(path.ptr) == '/') {
+ while (len--)
+ hash = *(path.ptr++) + (hash << 6) + (hash << 16) - hash;
+ }
+ }
+
+ hash = full_hash(hash);
+
+ smp->data.type = SMP_T_SINT;
+ smp->data.u.sint = hash;
+ smp->flags = SMP_F_VOL_1ST;
+ return 1;
+}
+
+/* This concatenates the source address with the 32-bit hash of the Host and
+ * path as returned by smp_fetch_base32(). The idea is to have per-source and
+ * per-path counters. The result is a binary block from 8 to 20 bytes depending
+ * on the source address length. The path hash is stored before the address so
+ * that in environments where IPv6 is insignificant, truncating the output to
+ * 8 bytes would still work.
+ */
+static int smp_fetch_base32_src(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ const struct sockaddr_storage *src = (smp->strm ? sc_src(smp->strm->scf) : NULL);
+ struct buffer *temp;
+
+ if (!src)
+ return 0;
+
+ if (!smp_fetch_base32(args, smp, kw, private))
+ return 0;
+
+ temp = get_trash_chunk();
+ *(unsigned int *) temp->area = htonl(smp->data.u.sint);
+ temp->data += sizeof(unsigned int);
+
+ switch (src->ss_family) {
+ case AF_INET:
+ memcpy(temp->area + temp->data,
+ &((struct sockaddr_in *)src)->sin_addr,
+ 4);
+ temp->data += 4;
+ break;
+ case AF_INET6:
+ memcpy(temp->area + temp->data,
+ &((struct sockaddr_in6 *)src)->sin6_addr,
+ 16);
+ temp->data += 16;
+ break;
+ default:
+ return 0;
+ }
+
+ smp->data.u.str = *temp;
+ smp->data.type = SMP_T_BIN;
+ return 1;
+}
+
+/* Extracts the query string, which comes after the question mark '?'. If no
+ * question mark is found, nothing is returned. Otherwise it returns a sample
+ * of type string carrying the whole query string.
+ */
+static int smp_fetch_query(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ struct channel *chn = SMP_REQ_CHN(smp);
+ struct htx *htx = smp_prefetch_htx(smp, chn, NULL, 1);
+ struct htx_sl *sl;
+ char *ptr, *end;
+
+ if (!htx)
+ return 0;
+
+ sl = http_get_stline(htx);
+ ptr = HTX_SL_REQ_UPTR(sl);
+ end = HTX_SL_REQ_UPTR(sl) + HTX_SL_REQ_ULEN(sl);
+
+ /* look up the '?' */
+ do {
+ if (ptr == end)
+ return 0;
+ } while (*ptr++ != '?');
+
+ smp->data.type = SMP_T_STR;
+ smp->data.u.str.area = ptr;
+ smp->data.u.str.data = end - ptr;
+ smp->flags = SMP_F_VOL_1ST | SMP_F_CONST;
+ return 1;
+}
+
+static int smp_fetch_proto_http(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ struct channel *chn = SMP_REQ_CHN(smp);
+ struct htx *htx = smp_prefetch_htx(smp, chn, NULL, 0);
+
+ if (!htx)
+ return 0;
+ smp->data.type = SMP_T_BOOL;
+ smp->data.u.sint = 1;
+ return 1;
+}
+
+/* return a valid test if the current request is the first one on the connection */
+static int smp_fetch_http_first_req(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ if (!smp->strm)
+ return 0;
+
+ smp->data.type = SMP_T_BOOL;
+ smp->data.u.sint = !(smp->strm->txn->flags & TX_NOT_FIRST);
+ return 1;
+}
+
+/* Fetch the authentication method if there is an Authorization header. It
+ * relies on get_http_auth()
+ */
+static int smp_fetch_http_auth_type(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ struct channel *chn = SMP_REQ_CHN(smp);
+ struct htx *htx = smp_prefetch_htx(smp, chn, NULL, 1);
+ struct http_txn *txn;
+
+ if (!htx)
+ return 0;
+
+ txn = smp->strm->txn;
+ if (!get_http_auth(smp, htx))
+ return 0;
+
+ switch (txn->auth.method) {
+ case HTTP_AUTH_BASIC:
+ smp->data.u.str.area = "Basic";
+ smp->data.u.str.data = 5;
+ break;
+ case HTTP_AUTH_DIGEST:
+ /* Unexpected because not supported */
+ smp->data.u.str.area = "Digest";
+ smp->data.u.str.data = 6;
+ break;
+ case HTTP_AUTH_BEARER:
+ smp->data.u.str.area = "Bearer";
+ smp->data.u.str.data = 6;
+ break;
+ default:
+ return 0;
+ }
+
+ smp->data.type = SMP_T_STR;
+ smp->flags = SMP_F_CONST;
+ return 1;
+}
+
+/* Fetch the user supplied if there is an Authorization header. It relies on
+ * get_http_auth()
+ */
+static int smp_fetch_http_auth_user(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ struct channel *chn = SMP_REQ_CHN(smp);
+ struct htx *htx = smp_prefetch_htx(smp, chn, NULL, 1);
+ struct http_txn *txn;
+
+ if (!htx)
+ return 0;
+
+ txn = smp->strm->txn;
+ if (!get_http_auth(smp, htx) || txn->auth.method != HTTP_AUTH_BASIC)
+ return 0;
+
+ smp->data.type = SMP_T_STR;
+ smp->data.u.str.area = txn->auth.user;
+ smp->data.u.str.data = strlen(txn->auth.user);
+ smp->flags = SMP_F_CONST;
+ return 1;
+}
+
+/* Fetch the password supplied if there is an Authorization header. It relies on
+ * get_http_auth()
+ */
+static int smp_fetch_http_auth_pass(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ struct channel *chn = SMP_REQ_CHN(smp);
+ struct htx *htx = smp_prefetch_htx(smp, chn, NULL, 1);
+ struct http_txn *txn;
+
+ if (!htx)
+ return 0;
+
+ txn = smp->strm->txn;
+ if (!get_http_auth(smp, htx) || txn->auth.method != HTTP_AUTH_BASIC)
+ return 0;
+
+ smp->data.type = SMP_T_STR;
+ smp->data.u.str.area = txn->auth.pass;
+ smp->data.u.str.data = strlen(txn->auth.pass);
+ smp->flags = SMP_F_CONST;
+ return 1;
+}
+
+static int smp_fetch_http_auth_bearer(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ struct channel *chn = SMP_REQ_CHN(smp);
+ struct htx *htx = smp_prefetch_htx(smp, chn, NULL, 1);
+ struct http_txn *txn;
+ struct buffer bearer_val = {};
+
+ if (!htx)
+ return 0;
+
+ if (args->type == ARGT_STR) {
+ struct http_hdr_ctx ctx;
+ struct ist hdr_name = ist2(args->data.str.area, args->data.str.data);
+
+ ctx.blk = NULL;
+ if (http_find_header(htx, hdr_name, &ctx, 0)) {
+ struct ist type = istsplit(&ctx.value, ' ');
+
+ /* There must be "at least" one space character between
+ * the scheme and the following value so ctx.value might
+ * still have leading spaces here (see RFC7235).
+ */
+ ctx.value = istskip(ctx.value, ' ');
+
+ if (isteqi(type, ist("Bearer")) && istlen(ctx.value))
+ chunk_initlen(&bearer_val, istptr(ctx.value), 0, istlen(ctx.value));
+ }
+ }
+ else {
+ txn = smp->strm->txn;
+ if (!get_http_auth(smp, htx) || txn->auth.method != HTTP_AUTH_BEARER)
+ return 0;
+
+ bearer_val = txn->auth.method_data;
+ }
+
+ smp->data.type = SMP_T_STR;
+ smp->data.u.str = bearer_val;
+ smp->flags = SMP_F_CONST;
+ return 1;
+}
+
+/* Accepts exactly 1 argument of type userlist */
+static int smp_fetch_http_auth(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ struct channel *chn = SMP_REQ_CHN(smp);
+ struct htx *htx = smp_prefetch_htx(smp, chn, NULL, 1);
+
+ if (args->type != ARGT_USR)
+ return 0;
+
+ if (!htx)
+ return 0;
+ if (!get_http_auth(smp, htx) || smp->strm->txn->auth.method != HTTP_AUTH_BASIC)
+ return 0;
+
+ smp->data.type = SMP_T_BOOL;
+ smp->data.u.sint = check_user(args->data.usr, smp->strm->txn->auth.user,
+ smp->strm->txn->auth.pass);
+ return 1;
+}
+
+/* Accepts exactly 1 argument of type userlist */
+static int smp_fetch_http_auth_grp(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ struct channel *chn = SMP_REQ_CHN(smp);
+ struct htx *htx = smp_prefetch_htx(smp, chn, NULL, 1);
+
+ if (args->type != ARGT_USR)
+ return 0;
+
+ if (!htx)
+ return 0;
+ if (!get_http_auth(smp, htx) || smp->strm->txn->auth.method != HTTP_AUTH_BASIC)
+ return 0;
+
+ /* if the user does not belong to the userlist or has a wrong password,
+ * report that it unconditionally does not match. Otherwise we return
+ * a string containing the username.
+ */
+ if (!check_user(args->data.usr, smp->strm->txn->auth.user,
+ smp->strm->txn->auth.pass))
+ return 0;
+
+ /* pat_match_auth() will need the user list */
+ smp->ctx.a[0] = args->data.usr;
+
+ smp->data.type = SMP_T_STR;
+ smp->flags = SMP_F_CONST;
+ smp->data.u.str.area = smp->strm->txn->auth.user;
+ smp->data.u.str.data = strlen(smp->strm->txn->auth.user);
+
+ return 1;
+}
+
+/* Fetch a captured HTTP request header. The index is the position of
+ * the "capture" option in the configuration file
+ */
+static int smp_fetch_capture_req_hdr(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ struct proxy *fe;
+ int idx;
+
+ if (args->type != ARGT_SINT)
+ return 0;
+
+ if (!smp->strm)
+ return 0;
+
+ fe = strm_fe(smp->strm);
+ idx = args->data.sint;
+
+ if (idx > (fe->nb_req_cap - 1) || smp->strm->req_cap == NULL || smp->strm->req_cap[idx] == NULL)
+ return 0;
+
+ smp->data.type = SMP_T_STR;
+ smp->flags |= SMP_F_CONST;
+ smp->data.u.str.area = smp->strm->req_cap[idx];
+ smp->data.u.str.data = strlen(smp->strm->req_cap[idx]);
+
+ return 1;
+}
+
+/* Fetch a captured HTTP response header. The index is the position of
+ * the "capture" option in the configuration file
+ */
+static int smp_fetch_capture_res_hdr(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ struct proxy *fe;
+ int idx;
+
+ if (args->type != ARGT_SINT)
+ return 0;
+
+ if (!smp->strm)
+ return 0;
+
+ fe = strm_fe(smp->strm);
+ idx = args->data.sint;
+
+ if (idx > (fe->nb_rsp_cap - 1) || smp->strm->res_cap == NULL || smp->strm->res_cap[idx] == NULL)
+ return 0;
+
+ smp->data.type = SMP_T_STR;
+ smp->flags |= SMP_F_CONST;
+ smp->data.u.str.area = smp->strm->res_cap[idx];
+ smp->data.u.str.data = strlen(smp->strm->res_cap[idx]);
+
+ return 1;
+}
+
+/* Extracts the METHOD in the HTTP request, the txn->uri should be filled before the call */
+static int smp_fetch_capture_req_method(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ struct buffer *temp;
+ struct http_txn *txn;
+ char *ptr;
+
+ if (!smp->strm)
+ return 0;
+
+ txn = smp->strm->txn;
+ if (!txn || !txn->uri)
+ return 0;
+
+ ptr = txn->uri;
+
+ while (*ptr != ' ' && *ptr != '\0') /* find first space */
+ ptr++;
+
+ temp = get_trash_chunk();
+ temp->area = txn->uri;
+ temp->data = ptr - txn->uri;
+ smp->data.u.str = *temp;
+ smp->data.type = SMP_T_STR;
+ smp->flags = SMP_F_CONST;
+
+ return 1;
+
+}
+
+/* Extracts the path in the HTTP request, the txn->uri should be filled before the call */
+static int smp_fetch_capture_req_uri(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ struct http_txn *txn;
+ struct ist path;
+ const char *ptr;
+ struct http_uri_parser parser;
+
+ if (!smp->strm)
+ return 0;
+
+ txn = smp->strm->txn;
+ if (!txn || !txn->uri)
+ return 0;
+
+ ptr = txn->uri;
+
+ while (*ptr != ' ' && *ptr != '\0') /* find first space */
+ ptr++;
+
+ if (!*ptr)
+ return 0;
+
+ /* skip the first space and find space after URI */
+ path = ist2(++ptr, 0);
+ while (*ptr != ' ' && *ptr != '\0')
+ ptr++;
+ path.len = ptr - path.ptr;
+
+ parser = http_uri_parser_init(path);
+ path = http_parse_path(&parser);
+ if (!isttest(path))
+ return 0;
+
+ smp->data.u.str.area = path.ptr;
+ smp->data.u.str.data = path.len;
+ smp->data.type = SMP_T_STR;
+ smp->flags = SMP_F_CONST;
+
+ return 1;
+}
+
+/* Retrieves the HTTP version from the request (either 1.0 or 1.1) and emits it
+ * as a string (either "HTTP/1.0" or "HTTP/1.1").
+ */
+static int smp_fetch_capture_req_ver(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ struct http_txn *txn;
+
+ if (!smp->strm)
+ return 0;
+
+ txn = smp->strm->txn;
+ if (!txn || txn->req.msg_state < HTTP_MSG_BODY)
+ return 0;
+
+ if (txn->req.flags & HTTP_MSGF_VER_11)
+ smp->data.u.str.area = "HTTP/1.1";
+ else
+ smp->data.u.str.area = "HTTP/1.0";
+
+ smp->data.u.str.data = 8;
+ smp->data.type = SMP_T_STR;
+ smp->flags = SMP_F_CONST;
+ return 1;
+
+}
+
+/* Retrieves the HTTP version from the response (either 1.0 or 1.1) and emits it
+ * as a string (either "HTTP/1.0" or "HTTP/1.1").
+ */
+static int smp_fetch_capture_res_ver(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ struct http_txn *txn;
+
+ if (!smp->strm)
+ return 0;
+
+ txn = smp->strm->txn;
+ if (!txn || txn->rsp.msg_state < HTTP_MSG_BODY)
+ return 0;
+
+ if (txn->rsp.flags & HTTP_MSGF_VER_11)
+ smp->data.u.str.area = "HTTP/1.1";
+ else
+ smp->data.u.str.area = "HTTP/1.0";
+
+ smp->data.u.str.data = 8;
+ smp->data.type = SMP_T_STR;
+ smp->flags = SMP_F_CONST;
+ return 1;
+
+}
+
+/* Iterate over all cookies present in a message. The context is stored in
+ * smp->ctx.a[0] for the in-header position, smp->ctx.a[1] for the
+ * end-of-header-value, and smp->ctx.a[2] for the hdr_ctx. Depending on
+ * the direction, multiple cookies may be parsed on the same line or not.
+ * If provided, the searched cookie name is in args, in args->data.str. If
+ * the input options indicate that no iterating is desired, then only last
+ * value is fetched if any. If no cookie name is provided, the first cookie
+ * value found is fetched. The returned sample is of type CSTR. Can be used
+ * to parse cookies in other files.
+ */
+static int smp_fetch_cookie(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ /* possible keywords: req.cookie / cookie / cook, res.cookie / scook / set-cookie */
+ struct channel *chn = ((kw[0] == 'c' || kw[2] == 'q') ? SMP_REQ_CHN(smp) : SMP_RES_CHN(smp));
+ struct check *check = ((kw[0] == 's' || kw[2] == 's') ? objt_check(smp->sess->origin) : NULL);
+ struct htx *htx = smp_prefetch_htx(smp, chn, check, 1);
+ struct http_hdr_ctx *ctx = smp->ctx.a[2];
+ struct ist hdr;
+ char *cook = NULL;
+ size_t cook_l = 0;
+ int found = 0;
+
+ if (args->type == ARGT_STR) {
+ cook = args->data.str.area;
+ cook_l = args->data.str.data;
+ }
+
+ if (!ctx) {
+ /* first call */
+ ctx = &static_http_hdr_ctx;
+ ctx->blk = NULL;
+ smp->ctx.a[2] = ctx;
+ }
+
+ if (!htx)
+ return 0;
+
+ hdr = (!(check || (chn && chn->flags & CF_ISRESP)) ? ist("Cookie") : ist("Set-Cookie"));
+
+ /* OK so basically here, either we want only one value or we want to
+ * iterate over all of them and we fetch the next one. In this last case
+ * SMP_OPT_ITERATE option is set.
+ */
+
+ if (!(smp->flags & SMP_F_NOT_LAST)) {
+ /* search for the header from the beginning, we must first initialize
+ * the search parameters.
+ */
+ smp->ctx.a[0] = NULL;
+ ctx->blk = NULL;
+ }
+
+ smp->flags |= SMP_F_VOL_HDR;
+ while (1) {
+ /* Note: smp->ctx.a[0] == NULL every time we need to fetch a new header */
+ if (!smp->ctx.a[0]) {
+ if (!http_find_header(htx, hdr, ctx, 0))
+ goto out;
+
+ if (ctx->value.len < cook_l + 1)
+ continue;
+
+ smp->ctx.a[0] = ctx->value.ptr;
+ smp->ctx.a[1] = smp->ctx.a[0] + ctx->value.len;
+ }
+
+ smp->data.type = SMP_T_STR;
+ smp->flags |= SMP_F_CONST;
+ smp->ctx.a[0] = http_extract_cookie_value(smp->ctx.a[0], smp->ctx.a[1],
+ cook, cook_l,
+ (smp->opt & SMP_OPT_DIR) == SMP_OPT_DIR_REQ,
+ &smp->data.u.str.area,
+ &smp->data.u.str.data);
+ if (smp->ctx.a[0]) {
+ found = 1;
+ if (smp->opt & SMP_OPT_ITERATE) {
+ /* iterate on cookie value */
+ smp->flags |= SMP_F_NOT_LAST;
+ return 1;
+ }
+ if (args->data.str.data == 0) {
+ /* No cookie name, first occurrence returned */
+ break;
+ }
+ }
+ /* if we're looking for last occurrence, let's loop */
+ }
+
+ /* all cookie headers and values were scanned. If we're looking for the
+ * last occurrence, we may return it now.
+ */
+ out:
+ smp->flags &= ~SMP_F_NOT_LAST;
+ return found;
+}
+
+/* Same than smp_fetch_cookie() but only relies on the sample direction to
+ * choose the right channel. So instead of duplicating the code, we just change
+ * the keyword and then fallback on smp_fetch_cookie().
+ */
+static int smp_fetch_chn_cookie(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ kw = ((smp->opt & SMP_OPT_DIR) == SMP_OPT_DIR_REQ ? "req.cook" : "res.cook");
+ return smp_fetch_cookie(args, smp, kw, private);
+}
+
+/* Iterate over all cookies present in a request to count how many occurrences
+ * match the name in args and args->data.str.len. If <multi> is non-null, then
+ * multiple cookies may be parsed on the same line. The returned sample is of
+ * type UINT. Accepts exactly 1 argument of type string.
+ */
+static int smp_fetch_cookie_cnt(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ /* possible keywords: req.cook_cnt / cook_cnt, res.cook_cnt / scook_cnt */
+ struct channel *chn = ((kw[0] == 'c' || kw[2] == 'q') ? SMP_REQ_CHN(smp) : SMP_RES_CHN(smp));
+ struct check *check = ((kw[0] == 's' || kw[2] == 's') ? objt_check(smp->sess->origin) : NULL);
+ struct htx *htx = smp_prefetch_htx(smp, chn, check, 1);
+ struct http_hdr_ctx ctx;
+ struct ist hdr;
+ char *val_beg, *val_end;
+ char *cook = NULL;
+ size_t cook_l = 0;
+ int cnt;
+
+ if (args->type == ARGT_STR){
+ cook = args->data.str.area;
+ cook_l = args->data.str.data;
+ }
+
+ if (!htx)
+ return 0;
+
+ hdr = (!(check || (chn && chn->flags & CF_ISRESP)) ? ist("Cookie") : ist("Set-Cookie"));
+
+ val_end = val_beg = NULL;
+ ctx.blk = NULL;
+ cnt = 0;
+ while (1) {
+ /* Note: val_beg == NULL every time we need to fetch a new header */
+ if (!val_beg) {
+ if (!http_find_header(htx, hdr, &ctx, 0))
+ break;
+
+ if (ctx.value.len < cook_l + 1)
+ continue;
+
+ val_beg = ctx.value.ptr;
+ val_end = val_beg + ctx.value.len;
+ }
+
+ smp->data.type = SMP_T_STR;
+ smp->flags |= SMP_F_CONST;
+ while ((val_beg = http_extract_cookie_value(val_beg, val_end,
+ cook, cook_l,
+ (smp->opt & SMP_OPT_DIR) == SMP_OPT_DIR_REQ,
+ &smp->data.u.str.area,
+ &smp->data.u.str.data))) {
+ cnt++;
+ }
+ }
+
+ smp->data.type = SMP_T_SINT;
+ smp->data.u.sint = cnt;
+ smp->flags |= SMP_F_VOL_HDR;
+ return 1;
+}
+
+/* Fetch an cookie's integer value. The integer value is returned. It
+ * takes a mandatory argument of type string. It relies on smp_fetch_cookie().
+ */
+static int smp_fetch_cookie_val(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ int ret = smp_fetch_cookie(args, smp, kw, private);
+
+ if (ret > 0) {
+ smp->data.type = SMP_T_SINT;
+ smp->data.u.sint = strl2ic(smp->data.u.str.area,
+ smp->data.u.str.data);
+ }
+
+ return ret;
+}
+
+/* Iterate over all cookies present in a message,
+ * and return the list of cookie names separated by
+ * the input argument character.
+ * If no input argument is provided,
+ * the default delimiter is ','.
+ * The returned sample is of type CSTR.
+ */
+static int smp_fetch_cookie_names(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ /* possible keywords: req.cook_names, res.cook_names */
+ struct channel *chn = ((kw[2] == 'q') ? SMP_REQ_CHN(smp) : SMP_RES_CHN(smp));
+ struct check *check = ((kw[2] == 's') ? objt_check(smp->sess->origin) : NULL);
+ struct htx *htx = smp_prefetch_htx(smp, chn, check, 1);
+ struct http_hdr_ctx ctx;
+ struct ist hdr;
+ struct buffer *temp;
+ char del = ',';
+ char *ptr, *attr_beg, *attr_end;
+ size_t len = 0;
+ int is_req = !(check || (chn && chn->flags & CF_ISRESP));
+
+ if (!htx)
+ return 0;
+
+ if (args->type == ARGT_STR)
+ del = *args[0].data.str.area;
+
+ hdr = (is_req ? ist("Cookie") : ist("Set-Cookie"));
+ temp = get_trash_chunk();
+
+ smp->flags |= SMP_F_VOL_HDR;
+ attr_end = attr_beg = NULL;
+ ctx.blk = NULL;
+ /* Scan through all headers and extract all cookie names from
+ * 1. Cookie header(s) for request channel OR
+ * 2. Set-Cookie header(s) for response channel
+ */
+ while (1) {
+ /* Note: attr_beg == NULL every time we need to fetch a new header */
+ if (!attr_beg) {
+ /* For Set-Cookie, we need to fetch the entire header line (set flag to 1) */
+ if (!http_find_header(htx, hdr, &ctx, !is_req))
+ break;
+ attr_beg = ctx.value.ptr;
+ attr_end = attr_beg + ctx.value.len;
+ }
+
+ while (1) {
+ attr_beg = http_extract_next_cookie_name(attr_beg, attr_end, is_req, &ptr, &len);
+ if (!attr_beg)
+ break;
+
+ /* prepend delimiter if this is not the first cookie name found */
+ if (temp->data)
+ temp->area[temp->data++] = del;
+
+ /* At this point ptr should point to the start of the cookie name and len would be the length of the cookie name */
+ if (!chunk_memcat(temp, ptr, len))
+ return 0;
+ }
+ }
+ smp->data.type = SMP_T_STR;
+ smp->data.u.str = *temp;
+ return 1;
+}
+
+/************************************************************************/
+/* The code below is dedicated to sample fetches */
+/************************************************************************/
+
+/* This scans a URL-encoded query string. It takes an optionally wrapping
+ * string whose first contiguous chunk has its beginning in ctx->a[0] and end
+ * in ctx->a[1], and the optional second part in (ctx->a[2]..ctx->a[3]). The
+ * pointers are updated for next iteration before leaving.
+ */
+static int smp_fetch_param(char delim, const char *name, int name_len, const struct arg *args, struct sample *smp, const char *kw, void *private, char insensitive)
+{
+ const char *vstart, *vend;
+ struct buffer *temp;
+ const char **chunks = (const char **)smp->ctx.a;
+
+ if (!http_find_next_url_param(chunks, name, name_len,
+ &vstart, &vend, delim, insensitive))
+ return 0;
+
+ /* Create sample. If the value is contiguous, return the pointer as CONST,
+ * if the value is wrapped, copy-it in a buffer.
+ */
+ smp->data.type = SMP_T_STR;
+ if (chunks[2] &&
+ vstart >= chunks[0] && vstart <= chunks[1] &&
+ vend >= chunks[2] && vend <= chunks[3]) {
+ /* Wrapped case. */
+ temp = get_trash_chunk();
+ memcpy(temp->area, vstart, chunks[1] - vstart);
+ memcpy(temp->area + ( chunks[1] - vstart ), chunks[2],
+ vend - chunks[2]);
+ smp->data.u.str.area = temp->area;
+ smp->data.u.str.data = ( chunks[1] - vstart ) + ( vend - chunks[2] );
+ } else {
+ /* Contiguous case. */
+ smp->data.u.str.area = (char *)vstart;
+ smp->data.u.str.data = vend - vstart;
+ smp->flags = SMP_F_VOL_1ST | SMP_F_CONST;
+ }
+
+ /* Update context, check wrapping. */
+ chunks[0] = vend;
+ if (chunks[2] && vend >= chunks[2] && vend <= chunks[3]) {
+ chunks[1] = chunks[3];
+ chunks[2] = NULL;
+ }
+
+ if (chunks[0] < chunks[1])
+ smp->flags |= SMP_F_NOT_LAST;
+
+ return 1;
+}
+
+/* This function iterates over each parameter of the query string. It uses
+ * ctx->a[0] and ctx->a[1] to store the beginning and end of the current
+ * parameter. Since it uses smp_fetch_param(), ctx->a[2..3] are both NULL.
+ * An optional parameter name is passed in args[0], otherwise any parameter is
+ * considered. It supports an optional delimiter argument for the beginning of
+ * the string in args[1], which defaults to "?".
+ */
+static int smp_fetch_url_param(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ struct channel *chn = SMP_REQ_CHN(smp);
+ char delim = '?';
+ const char *name;
+ int name_len;
+ char insensitive = 0;
+
+ if ((args[0].type && args[0].type != ARGT_STR) ||
+ (args[1].type && args[1].type != ARGT_STR) ||
+ (args[2].type && args[2].type != ARGT_STR))
+ return 0;
+
+ name = "";
+ name_len = 0;
+ if (args->type == ARGT_STR) {
+ name = args->data.str.area;
+ name_len = args->data.str.data;
+ }
+
+ if (args[1].type && *args[1].data.str.area)
+ delim = *args[1].data.str.area;
+ if (args[2].type && *args[2].data.str.area == 'i')
+ insensitive = 1;
+
+ if (!smp->ctx.a[0]) { // first call, find the query string
+ struct htx *htx = smp_prefetch_htx(smp, chn, NULL, 1);
+ struct htx_sl *sl;
+
+ if (!htx)
+ return 0;
+
+ sl = http_get_stline(htx);
+ smp->ctx.a[0] = http_find_param_list(HTX_SL_REQ_UPTR(sl), HTX_SL_REQ_ULEN(sl), delim);
+ if (!smp->ctx.a[0])
+ return 0;
+
+ smp->ctx.a[1] = HTX_SL_REQ_UPTR(sl) + HTX_SL_REQ_ULEN(sl);
+
+ /* Assume that the context is filled with NULL pointer
+ * before the first call.
+ * smp->ctx.a[2] = NULL;
+ * smp->ctx.a[3] = NULL;
+ */
+ }
+
+ return smp_fetch_param(delim, name, name_len, args, smp, kw, private, insensitive);
+}
+
+/* This function iterates over each parameter of the body. This requires
+ * that the body has been waited for using http-buffer-request. It uses
+ * ctx->a[0] and ctx->a[1] to store the beginning and end of the first
+ * contiguous part of the body, and optionally ctx->a[2..3] to reference the
+ * optional second part if the body wraps at the end of the buffer. An optional
+ * parameter name is passed in args[0], otherwise any parameter is considered.
+ */
+static int smp_fetch_body_param(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ struct channel *chn = SMP_REQ_CHN(smp);
+ const char *name;
+ int name_len;
+ char insensitive = 0;
+
+ if ((args[0].type && args[0].type != ARGT_STR) ||
+ (args[1].type && args[1].type != ARGT_STR))
+ return 0;
+
+ name = "";
+ name_len = 0;
+ if (args[0].type == ARGT_STR) {
+ name = args[0].data.str.area;
+ name_len = args[0].data.str.data;
+ }
+
+ if (args[1].type && *args[1].data.str.area == 'i')
+ insensitive = 1;
+
+ if (!smp->ctx.a[0]) { // first call, find the query string
+ struct htx *htx = smp_prefetch_htx(smp, chn, NULL, 1);
+ struct buffer *temp;
+ int32_t pos;
+
+ if (!htx)
+ return 0;
+
+ temp = get_trash_chunk();
+ for (pos = htx_get_first(htx); pos != -1; pos = htx_get_next(htx, pos)) {
+ struct htx_blk *blk = htx_get_blk(htx, pos);
+ enum htx_blk_type type = htx_get_blk_type(blk);
+
+ if (type == HTX_BLK_TLR || type == HTX_BLK_EOT)
+ break;
+ if (type == HTX_BLK_DATA) {
+ if (!h1_format_htx_data(htx_get_blk_value(htx, blk), temp, 0))
+ return 0;
+ }
+ }
+
+ smp->ctx.a[0] = temp->area;
+ smp->ctx.a[1] = temp->area + temp->data;
+
+ /* Assume that the context is filled with NULL pointer
+ * before the first call.
+ * smp->ctx.a[2] = NULL;
+ * smp->ctx.a[3] = NULL;
+ */
+
+ }
+
+ return smp_fetch_param('&', name, name_len, args, smp, kw, private, insensitive);
+}
+
+/* Return the signed integer value for the specified url parameter (see url_param
+ * above).
+ */
+static int smp_fetch_url_param_val(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ int ret = smp_fetch_url_param(args, smp, kw, private);
+
+ if (ret > 0) {
+ smp->data.type = SMP_T_SINT;
+ smp->data.u.sint = strl2ic(smp->data.u.str.area,
+ smp->data.u.str.data);
+ }
+
+ return ret;
+}
+
+/* This produces a 32-bit hash of the concatenation of the first occurrence of
+ * the Host header followed by the path component if it begins with a slash ('/').
+ * This means that '*' will not be added, resulting in exactly the first Host
+ * entry. If no Host header is found, then the path is used. The resulting value
+ * is hashed using the url hash followed by a full avalanche hash and provides a
+ * 32-bit integer value. This fetch is useful for tracking per-URL activity on
+ * high-traffic sites without having to store whole paths.
+ * this differs from the base32 functions in that it includes the url parameters
+ * as well as the path
+ */
+static int smp_fetch_url32(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ struct channel *chn = SMP_REQ_CHN(smp);
+ struct htx *htx = smp_prefetch_htx(smp, chn, NULL, 1);
+ struct http_hdr_ctx ctx;
+ struct htx_sl *sl;
+ struct ist path;
+ unsigned int hash = 0;
+ struct http_uri_parser parser;
+
+ if (!htx)
+ return 0;
+
+ ctx.blk = NULL;
+ if (http_find_header(htx, ist("Host"), &ctx, 1)) {
+ /* OK we have the header value in ctx.value */
+ while (ctx.value.len--)
+ hash = *(ctx.value.ptr++) + (hash << 6) + (hash << 16) - hash;
+ }
+
+ /* now retrieve the path */
+ sl = http_get_stline(htx);
+ parser = http_uri_parser_init(htx_sl_req_uri(sl));
+ path = http_parse_path(&parser);
+ if (path.len && *(path.ptr) == '/') {
+ while (path.len--)
+ hash = *(path.ptr++) + (hash << 6) + (hash << 16) - hash;
+ }
+
+ hash = full_hash(hash);
+
+ smp->data.type = SMP_T_SINT;
+ smp->data.u.sint = hash;
+ smp->flags = SMP_F_VOL_1ST;
+ return 1;
+}
+
+/* This concatenates the source address with the 32-bit hash of the Host and
+ * URL as returned by smp_fetch_base32(). The idea is to have per-source and
+ * per-url counters. The result is a binary block from 8 to 20 bytes depending
+ * on the source address length. The URL hash is stored before the address so
+ * that in environments where IPv6 is insignificant, truncating the output to
+ * 8 bytes would still work.
+ */
+static int smp_fetch_url32_src(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ const struct sockaddr_storage *src = (smp->strm ? sc_src(smp->strm->scf) : NULL);
+ struct buffer *temp;
+
+ if (!src)
+ return 0;
+
+ if (!smp_fetch_url32(args, smp, kw, private))
+ return 0;
+
+ temp = get_trash_chunk();
+ *(unsigned int *) temp->area = htonl(smp->data.u.sint);
+ temp->data += sizeof(unsigned int);
+
+ switch (src->ss_family) {
+ case AF_INET:
+ memcpy(temp->area + temp->data,
+ &((struct sockaddr_in *)src)->sin_addr,
+ 4);
+ temp->data += 4;
+ break;
+ case AF_INET6:
+ memcpy(temp->area + temp->data,
+ &((struct sockaddr_in6 *)src)->sin6_addr,
+ 16);
+ temp->data += 16;
+ break;
+ default:
+ return 0;
+ }
+
+ smp->data.u.str = *temp;
+ smp->data.type = SMP_T_BIN;
+ return 1;
+}
+
+/************************************************************************/
+/* Other utility functions */
+/************************************************************************/
+
+/* This function is used to validate the arguments passed to any "hdr" fetch
+ * keyword. These keywords support an optional positive or negative occurrence
+ * number. We must ensure that the number is greater than -MAX_HDR_HISTORY. It
+ * is assumed that the types are already the correct ones. Returns 0 on error,
+ * non-zero if OK. If <err> is not NULL, it will be filled with a pointer to an
+ * error message in case of error, that the caller is responsible for freeing.
+ * The initial location must either be freeable or NULL.
+ * Note: this function's pointer is checked from Lua.
+ */
+int val_hdr(struct arg *arg, char **err_msg)
+{
+ if (arg && arg[1].type == ARGT_SINT && arg[1].data.sint < -MAX_HDR_HISTORY) {
+ memprintf(err_msg, "header occurrence must be >= %d", -MAX_HDR_HISTORY);
+ return 0;
+ }
+ return 1;
+}
+
+/************************************************************************/
+/* All supported sample fetch keywords must be declared here. */
+/************************************************************************/
+
+/* Note: must not be declared <const> as its list will be overwritten */
+static struct sample_fetch_kw_list sample_fetch_keywords = {ILH, {
+ { "base", smp_fetch_base, 0, NULL, SMP_T_STR, SMP_USE_HRQHV },
+ { "base32", smp_fetch_base32, 0, NULL, SMP_T_SINT, SMP_USE_HRQHV },
+ { "base32+src", smp_fetch_base32_src, 0, NULL, SMP_T_BIN, SMP_USE_HRQHV },
+ { "baseq", smp_fetch_base, 0, NULL, SMP_T_STR, SMP_USE_HRQHV },
+
+ /* capture are allocated and are permanent in the stream */
+ { "capture.req.hdr", smp_fetch_capture_req_hdr, ARG1(1,SINT), NULL, SMP_T_STR, SMP_USE_HRQHP },
+
+ /* retrieve these captures from the HTTP logs */
+ { "capture.req.method", smp_fetch_capture_req_method, 0, NULL, SMP_T_STR, SMP_USE_HRQHP },
+ { "capture.req.uri", smp_fetch_capture_req_uri, 0, NULL, SMP_T_STR, SMP_USE_HRQHP },
+ { "capture.req.ver", smp_fetch_capture_req_ver, 0, NULL, SMP_T_STR, SMP_USE_HRQHP },
+
+ { "capture.res.hdr", smp_fetch_capture_res_hdr, ARG1(1,SINT), NULL, SMP_T_STR, SMP_USE_HRSHP },
+ { "capture.res.ver", smp_fetch_capture_res_ver, 0, NULL, SMP_T_STR, SMP_USE_HRQHP },
+
+ /* cookie is valid in both directions (eg: for "stick ...") but cook*
+ * are only here to match the ACL's name, are request-only and are used
+ * for ACL compatibility only.
+ */
+ { "cook", smp_fetch_cookie, ARG1(0,STR), NULL, SMP_T_STR, SMP_USE_HRQHV },
+ { "cookie", smp_fetch_chn_cookie, ARG1(0,STR), NULL, SMP_T_STR, SMP_USE_HRQHV|SMP_USE_HRSHV },
+ { "cook_cnt", smp_fetch_cookie_cnt, ARG1(0,STR), NULL, SMP_T_SINT, SMP_USE_HRQHV },
+ { "cook_val", smp_fetch_cookie_val, ARG1(0,STR), NULL, SMP_T_SINT, SMP_USE_HRQHV },
+
+ /* hdr is valid in both directions (eg: for "stick ...") but hdr_* are
+ * only here to match the ACL's name, are request-only and are used for
+ * ACL compatibility only.
+ */
+ { "hdr", smp_fetch_chn_hdr, ARG2(0,STR,SINT), val_hdr, SMP_T_STR, SMP_USE_HRQHV|SMP_USE_HRSHV },
+ { "hdr_cnt", smp_fetch_hdr_cnt, ARG1(0,STR), NULL, SMP_T_SINT, SMP_USE_HRQHV },
+ { "hdr_ip", smp_fetch_hdr_ip, ARG2(0,STR,SINT), val_hdr, SMP_T_ADDR, SMP_USE_HRQHV },
+ { "hdr_val", smp_fetch_hdr_val, ARG2(0,STR,SINT), val_hdr, SMP_T_SINT, SMP_USE_HRQHV },
+
+ { "http_auth_type", smp_fetch_http_auth_type, 0, NULL, SMP_T_STR, SMP_USE_HRQHV },
+ { "http_auth_user", smp_fetch_http_auth_user, 0, NULL, SMP_T_STR, SMP_USE_HRQHV },
+ { "http_auth_pass", smp_fetch_http_auth_pass, 0, NULL, SMP_T_STR, SMP_USE_HRQHV },
+ { "http_auth_bearer", smp_fetch_http_auth_bearer, ARG1(0,STR), NULL, SMP_T_STR, SMP_USE_HRQHV },
+ { "http_auth", smp_fetch_http_auth, ARG1(1,USR), NULL, SMP_T_BOOL, SMP_USE_HRQHV },
+ { "http_auth_group", smp_fetch_http_auth_grp, ARG1(1,USR), NULL, SMP_T_STR, SMP_USE_HRQHV },
+ { "http_first_req", smp_fetch_http_first_req, 0, NULL, SMP_T_BOOL, SMP_USE_HRQHP },
+ { "method", smp_fetch_meth, 0, NULL, SMP_T_METH, SMP_USE_HRQHP },
+ { "path", smp_fetch_path, 0, NULL, SMP_T_STR, SMP_USE_HRQHV },
+ { "pathq", smp_fetch_path, 0, NULL, SMP_T_STR, SMP_USE_HRQHV },
+ { "query", smp_fetch_query, 0, NULL, SMP_T_STR, SMP_USE_HRQHV },
+
+ /* HTTP protocol on the request path */
+ { "req.proto_http", smp_fetch_proto_http, 0, NULL, SMP_T_BOOL, SMP_USE_HRQHP },
+ { "req_proto_http", smp_fetch_proto_http, 0, NULL, SMP_T_BOOL, SMP_USE_HRQHP },
+
+ /* HTTP version on the request path */
+ { "req.ver", smp_fetch_rqver, 0, NULL, SMP_T_STR, SMP_USE_HRQHV },
+ { "req_ver", smp_fetch_rqver, 0, NULL, SMP_T_STR, SMP_USE_HRQHV },
+
+ { "req.body", smp_fetch_body, 0, NULL, SMP_T_BIN, SMP_USE_HRQHV },
+ { "req.body_len", smp_fetch_body_len, 0, NULL, SMP_T_SINT, SMP_USE_HRQHV },
+ { "req.body_size", smp_fetch_body_size, 0, NULL, SMP_T_SINT, SMP_USE_HRQHV },
+ { "req.body_param", smp_fetch_body_param, ARG2(0,STR,STR), NULL, SMP_T_BIN, SMP_USE_HRQHV },
+
+ { "req.hdrs", smp_fetch_hdrs, 0, NULL, SMP_T_BIN, SMP_USE_HRQHV },
+ { "req.hdrs_bin", smp_fetch_hdrs_bin, 0, NULL, SMP_T_BIN, SMP_USE_HRQHV },
+
+ /* HTTP version on the response path */
+ { "res.ver", smp_fetch_stver, 0, NULL, SMP_T_STR, SMP_USE_HRSHV },
+ { "resp_ver", smp_fetch_stver, 0, NULL, SMP_T_STR, SMP_USE_HRSHV },
+
+ { "res.body", smp_fetch_body, 0, NULL, SMP_T_BIN, SMP_USE_HRSHV },
+ { "res.body_len", smp_fetch_body_len, 0, NULL, SMP_T_SINT, SMP_USE_HRSHV },
+ { "res.body_size", smp_fetch_body_size, 0, NULL, SMP_T_SINT, SMP_USE_HRSHV },
+
+ { "res.hdrs", smp_fetch_hdrs, 0, NULL, SMP_T_BIN, SMP_USE_HRSHV },
+ { "res.hdrs_bin", smp_fetch_hdrs_bin, 0, NULL, SMP_T_BIN, SMP_USE_HRSHV },
+
+ /* explicit req.{cook,hdr} are used to force the fetch direction to be request-only */
+ { "req.cook", smp_fetch_cookie, ARG1(0,STR), NULL, SMP_T_STR, SMP_USE_HRQHV },
+ { "req.cook_cnt", smp_fetch_cookie_cnt, ARG1(0,STR), NULL, SMP_T_SINT, SMP_USE_HRQHV },
+ { "req.cook_val", smp_fetch_cookie_val, ARG1(0,STR), NULL, SMP_T_SINT, SMP_USE_HRQHV },
+ { "req.cook_names", smp_fetch_cookie_names, ARG1(0,STR), NULL, SMP_T_STR, SMP_USE_HRQHV },
+
+ { "req.fhdr", smp_fetch_fhdr, ARG2(0,STR,SINT), val_hdr, SMP_T_STR, SMP_USE_HRQHV },
+ { "req.fhdr_cnt", smp_fetch_fhdr_cnt, ARG1(0,STR), NULL, SMP_T_SINT, SMP_USE_HRQHV },
+ { "req.hdr", smp_fetch_hdr, ARG2(0,STR,SINT), val_hdr, SMP_T_STR, SMP_USE_HRQHV },
+ { "req.hdr_cnt", smp_fetch_hdr_cnt, ARG1(0,STR), NULL, SMP_T_SINT, SMP_USE_HRQHV },
+ { "req.hdr_ip", smp_fetch_hdr_ip, ARG2(0,STR,SINT), val_hdr, SMP_T_ADDR, SMP_USE_HRQHV },
+ { "req.hdr_names", smp_fetch_hdr_names, ARG1(0,STR), NULL, SMP_T_STR, SMP_USE_HRQHV },
+ { "req.hdr_val", smp_fetch_hdr_val, ARG2(0,STR,SINT), val_hdr, SMP_T_SINT, SMP_USE_HRQHV },
+
+ /* explicit req.{cook,hdr} are used to force the fetch direction to be response-only */
+ { "res.cook", smp_fetch_cookie, ARG1(0,STR), NULL, SMP_T_STR, SMP_USE_HRSHV },
+ { "res.cook_cnt", smp_fetch_cookie_cnt, ARG1(0,STR), NULL, SMP_T_SINT, SMP_USE_HRSHV },
+ { "res.cook_val", smp_fetch_cookie_val, ARG1(0,STR), NULL, SMP_T_SINT, SMP_USE_HRSHV },
+ { "res.cook_names", smp_fetch_cookie_names, ARG1(0,STR), NULL, SMP_T_STR, SMP_USE_HRSHV },
+
+ { "res.fhdr", smp_fetch_fhdr, ARG2(0,STR,SINT), val_hdr, SMP_T_STR, SMP_USE_HRSHV },
+ { "res.fhdr_cnt", smp_fetch_fhdr_cnt, ARG1(0,STR), NULL, SMP_T_SINT, SMP_USE_HRSHV },
+ { "res.hdr", smp_fetch_hdr, ARG2(0,STR,SINT), val_hdr, SMP_T_STR, SMP_USE_HRSHV },
+ { "res.hdr_cnt", smp_fetch_hdr_cnt, ARG1(0,STR), NULL, SMP_T_SINT, SMP_USE_HRSHV },
+ { "res.hdr_ip", smp_fetch_hdr_ip, ARG2(0,STR,SINT), val_hdr, SMP_T_ADDR, SMP_USE_HRSHV },
+ { "res.hdr_names", smp_fetch_hdr_names, ARG1(0,STR), NULL, SMP_T_STR, SMP_USE_HRSHV },
+ { "res.hdr_val", smp_fetch_hdr_val, ARG2(0,STR,SINT), val_hdr, SMP_T_SINT, SMP_USE_HRSHV },
+
+ { "server_status", smp_fetch_srv_status, 0, NULL, SMP_T_SINT, SMP_USE_HRSHP },
+
+ /* scook is valid only on the response and is used for ACL compatibility */
+ { "scook", smp_fetch_cookie, ARG1(0,STR), NULL, SMP_T_STR, SMP_USE_HRSHV },
+ { "scook_cnt", smp_fetch_cookie_cnt, ARG1(0,STR), NULL, SMP_T_SINT, SMP_USE_HRSHV },
+ { "scook_val", smp_fetch_cookie_val, ARG1(0,STR), NULL, SMP_T_SINT, SMP_USE_HRSHV },
+
+ /* shdr is valid only on the response and is used for ACL compatibility */
+ { "shdr", smp_fetch_hdr, ARG2(0,STR,SINT), val_hdr, SMP_T_STR, SMP_USE_HRSHV },
+ { "shdr_cnt", smp_fetch_hdr_cnt, ARG1(0,STR), NULL, SMP_T_SINT, SMP_USE_HRSHV },
+ { "shdr_ip", smp_fetch_hdr_ip, ARG2(0,STR,SINT), val_hdr, SMP_T_ADDR, SMP_USE_HRSHV },
+ { "shdr_val", smp_fetch_hdr_val, ARG2(0,STR,SINT), val_hdr, SMP_T_SINT, SMP_USE_HRSHV },
+
+ { "status", smp_fetch_stcode, 0, NULL, SMP_T_SINT, SMP_USE_HRSHP },
+ { "txn.status", smp_fetch_srv_status, 0, NULL, SMP_T_SINT, SMP_USE_HRSHP },
+ { "unique-id", smp_fetch_uniqueid, 0, NULL, SMP_T_STR, SMP_SRC_L4SRV },
+ { "url", smp_fetch_url, 0, NULL, SMP_T_STR, SMP_USE_HRQHV },
+ { "url32", smp_fetch_url32, 0, NULL, SMP_T_SINT, SMP_USE_HRQHV },
+ { "url32+src", smp_fetch_url32_src, 0, NULL, SMP_T_BIN, SMP_USE_HRQHV },
+ { "url_ip", smp_fetch_url_ip, 0, NULL, SMP_T_IPV4, SMP_USE_HRQHV },
+ { "url_port", smp_fetch_url_port, 0, NULL, SMP_T_SINT, SMP_USE_HRQHV },
+ { "url_param", smp_fetch_url_param, ARG3(0,STR,STR,STR), NULL, SMP_T_STR, SMP_USE_HRQHV },
+ { "urlp" , smp_fetch_url_param, ARG3(0,STR,STR,STR), NULL, SMP_T_STR, SMP_USE_HRQHV },
+ { "urlp_val", smp_fetch_url_param_val, ARG3(0,STR,STR,STR), NULL, SMP_T_SINT, SMP_USE_HRQHV },
+
+ { /* END */ },
+}};
+
+INITCALL1(STG_REGISTER, sample_register_fetches, &sample_fetch_keywords);
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/src/http_htx.c b/src/http_htx.c
new file mode 100644
index 0000000..004d343
--- /dev/null
+++ b/src/http_htx.c
@@ -0,0 +1,3028 @@
+/*
+ * Functions to manipulate HTTP messages using the internal representation.
+ *
+ * Copyright (C) 2018 HAProxy Technologies, Christopher Faulet <cfaulet@haproxy.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <ctype.h>
+#include <fcntl.h>
+#include <unistd.h>
+
+#include <haproxy/api.h>
+#include <haproxy/arg.h>
+#include <haproxy/cfgparse.h>
+#include <haproxy/global.h>
+#include <haproxy/h1.h>
+#include <haproxy/http.h>
+#include <haproxy/http-hdr.h>
+#include <haproxy/http_fetch.h>
+#include <haproxy/http_htx.h>
+#include <haproxy/htx.h>
+#include <haproxy/log.h>
+#include <haproxy/regex.h>
+#include <haproxy/sample.h>
+#include <haproxy/tools.h>
+
+
+struct buffer http_err_chunks[HTTP_ERR_SIZE];
+struct http_reply http_err_replies[HTTP_ERR_SIZE];
+
+struct eb_root http_error_messages = EB_ROOT;
+struct list http_errors_list = LIST_HEAD_INIT(http_errors_list);
+struct list http_replies_list = LIST_HEAD_INIT(http_replies_list);
+
+/* The declaration of an errorfiles/errorfile directives. Used during config
+ * parsing only. */
+struct conf_errors {
+ char type; /* directive type (0: errorfiles, 1: errorfile) */
+ union {
+ struct {
+ int status; /* the status code associated to this error */
+ struct http_reply *reply; /* the http reply for the errorfile */
+ } errorfile; /* describe an "errorfile" directive */
+ struct {
+ char *name; /* the http-errors section name */
+ char status[HTTP_ERR_SIZE]; /* list of status to import (0: ignore, 1: implicit import, 2: explicit import) */
+ } errorfiles; /* describe an "errorfiles" directive */
+ } info;
+
+ char *file; /* file where the directive appears */
+ int line; /* line where the directive appears */
+
+ struct list list; /* next conf_errors */
+};
+
+/* Returns the next unporocessed start line in the HTX message. It returns NULL
+ * if the start-line is undefined (first == -1). Otherwise, it returns the
+ * pointer on the htx_sl structure.
+ */
+struct htx_sl *http_get_stline(const struct htx *htx)
+{
+ struct htx_blk *blk;
+
+ blk = htx_get_first_blk(htx);
+ if (!blk || (htx_get_blk_type(blk) != HTX_BLK_REQ_SL && htx_get_blk_type(blk) != HTX_BLK_RES_SL))
+ return NULL;
+ return htx_get_blk_ptr(htx, blk);
+}
+
+/* Returns the headers size in the HTX message */
+size_t http_get_hdrs_size(struct htx *htx)
+{
+ struct htx_blk *blk;
+ size_t sz = 0;
+
+ blk = htx_get_first_blk(htx);
+ if (!blk || htx_get_blk_type(blk) > HTX_BLK_EOH)
+ return sz;
+
+ for (; blk; blk = htx_get_next_blk(htx, blk)) {
+ sz += htx_get_blksz(blk);
+ if (htx_get_blk_type(blk) == HTX_BLK_EOH)
+ break;
+ }
+ return sz;
+}
+
+/* Finds the first or next occurrence of header matching <pattern> in the HTX
+ * message <htx> using the context <ctx>. This structure holds everything
+ * necessary to use the header and find next occurrence. If its <blk> member is
+ * NULL, the header is searched from the beginning. Otherwise, the next
+ * occurrence is returned. The function returns 1 when it finds a value, and 0
+ * when there is no more. It is designed to work with headers defined as
+ * comma-separated lists. If HTTP_FIND_FL_FULL flag is set, it works on
+ * full-line headers in whose comma is not a delimiter but is part of the
+ * syntax. A special case, if ctx->value is NULL when searching for a new values
+ * of a header, the current header is rescanned. This allows rescanning after a
+ * header deletion.
+ *
+ * The matching method is chosen by checking the flags :
+ *
+ * * HTTP_FIND_FL_MATCH_REG : <pattern> is a regex. header names matching
+ * the regex are evaluated.
+ * * HTTP_FIND_FL_MATCH_STR : <pattern> is a string. The header names equal
+ * to the string are evaluated.
+ * * HTTP_FIND_FL_MATCH_PFX : <pattern> is a string. The header names
+ * starting by the string are evaluated.
+ * * HTTP_FIND_FL_MATCH_SFX : <pattern> is a string. The header names
+ * ending by the string are evaluated.
+ * * HTTP_FIND_FL_MATCH_SUB : <pattern> is a string. The header names
+ * containing the string are evaluated.
+ */
+
+#define HTTP_FIND_FL_MATCH_STR 0x0001
+#define HTTP_FIND_FL_MATCH_PFX 0x0002
+#define HTTP_FIND_FL_MATCH_SFX 0x0003
+#define HTTP_FIND_FL_MATCH_SUB 0x0004
+#define HTTP_FIND_FL_MATCH_REG 0x0005
+/* 0x0006..0x000f: for other matching methods */
+#define HTTP_FIND_FL_MATCH_TYPE 0x000F
+#define HTTP_FIND_FL_FULL 0x0010
+
+static int __http_find_header(const struct htx *htx, const void *pattern, struct http_hdr_ctx *ctx, int flags)
+{
+ struct htx_blk *blk = ctx->blk;
+ struct ist n, v;
+ enum htx_blk_type type;
+
+ if (blk) {
+ char *p;
+
+ if (!isttest(ctx->value))
+ goto rescan_hdr;
+ if (flags & HTTP_FIND_FL_FULL)
+ goto next_blk;
+ v = htx_get_blk_value(htx, blk);
+ p = istend(ctx->value) + ctx->lws_after;
+ v.len -= (p - v.ptr);
+ v.ptr = p;
+ if (!v.len)
+ goto next_blk;
+ /* Skip comma */
+ if (*(v.ptr) == ',') {
+ v = istnext(v);
+ }
+
+ goto return_hdr;
+ }
+
+ if (htx_is_empty(htx))
+ return 0;
+
+ for (blk = htx_get_first_blk(htx); blk; blk = htx_get_next_blk(htx, blk)) {
+ rescan_hdr:
+ type = htx_get_blk_type(blk);
+ if (type == HTX_BLK_EOH)
+ break;
+ if (type != HTX_BLK_HDR)
+ continue;
+
+ if ((flags & HTTP_FIND_FL_MATCH_TYPE) == HTTP_FIND_FL_MATCH_REG) {
+ const struct my_regex *re = pattern;
+
+ n = htx_get_blk_name(htx, blk);
+ if (!regex_exec2(re, n.ptr, n.len))
+ goto next_blk;
+ }
+ else {
+ const struct ist name = *(const struct ist *)(pattern);
+
+ /* If no name was passed, we want any header. So skip the comparison */
+ if (!istlen(name))
+ goto match;
+
+ n = htx_get_blk_name(htx, blk);
+ switch (flags & HTTP_FIND_FL_MATCH_TYPE) {
+ case HTTP_FIND_FL_MATCH_STR:
+ if (!isteqi(n, name))
+ goto next_blk;
+ break;
+ case HTTP_FIND_FL_MATCH_PFX:
+ if (istlen(n) < istlen(name))
+ goto next_blk;
+
+ n = ist2(istptr(n), istlen(name));
+ if (!isteqi(n, name))
+ goto next_blk;
+ break;
+ case HTTP_FIND_FL_MATCH_SFX:
+ if (istlen(n) < istlen(name))
+ goto next_blk;
+
+ n = ist2(istend(n) - istlen(name),
+ istlen(name));
+ if (!isteqi(n, name))
+ goto next_blk;
+ break;
+ case HTTP_FIND_FL_MATCH_SUB:
+ if (!strnistr(n.ptr, n.len, name.ptr, name.len))
+ goto next_blk;
+ break;
+ default:
+ goto next_blk;
+ break;
+ }
+ }
+ match:
+ v = htx_get_blk_value(htx, blk);
+
+ return_hdr:
+ ctx->lws_before = 0;
+ ctx->lws_after = 0;
+ while (v.len && HTTP_IS_LWS(*v.ptr)) {
+ v = istnext(v);
+ ctx->lws_before++;
+ }
+ if (!(flags & HTTP_FIND_FL_FULL))
+ v.len = http_find_hdr_value_end(v.ptr, istend(v)) - v.ptr;
+
+ while (v.len && HTTP_IS_LWS(*(istend(v) - 1))) {
+ v.len--;
+ ctx->lws_after++;
+ }
+ ctx->blk = blk;
+ ctx->value = v;
+ return 1;
+
+ next_blk:
+ ;
+ }
+
+ ctx->blk = NULL;
+ ctx->value = ist("");
+ ctx->lws_before = ctx->lws_after = 0;
+ return 0;
+}
+
+
+/* Header names must match <name> */
+int http_find_header(const struct htx *htx, const struct ist name, struct http_hdr_ctx *ctx, int full)
+{
+ return __http_find_header(htx, &name, ctx, HTTP_FIND_FL_MATCH_STR | (full ? HTTP_FIND_FL_FULL : 0));
+}
+
+/* Header names must match <name>. Same than http_find_header */
+int http_find_str_header(const struct htx *htx, const struct ist name, struct http_hdr_ctx *ctx, int full)
+{
+ return __http_find_header(htx, &name, ctx, HTTP_FIND_FL_MATCH_STR | (full ? HTTP_FIND_FL_FULL : 0));
+}
+
+
+/* Header names must start with <prefix> */
+int http_find_pfx_header(const struct htx *htx, const struct ist prefix, struct http_hdr_ctx *ctx, int full)
+{
+ return __http_find_header(htx, &prefix, ctx, HTTP_FIND_FL_MATCH_PFX | (full ? HTTP_FIND_FL_FULL : 0));
+}
+
+/* Header names must end with <suffix> */
+int http_find_sfx_header(const struct htx *htx, const struct ist suffix, struct http_hdr_ctx *ctx, int full)
+{
+ return __http_find_header(htx, &suffix, ctx, HTTP_FIND_FL_MATCH_SFX | (full ? HTTP_FIND_FL_FULL : 0));
+}
+/* Header names must contain <sub> */
+int http_find_sub_header(const struct htx *htx, const struct ist sub, struct http_hdr_ctx *ctx, int full)
+{
+ return __http_find_header(htx, &sub, ctx, HTTP_FIND_FL_MATCH_SUB | (full ? HTTP_FIND_FL_FULL : 0));
+}
+
+/* Header names must match <re> regex*/
+int http_match_header(const struct htx *htx, const struct my_regex *re, struct http_hdr_ctx *ctx, int full)
+{
+ return __http_find_header(htx, re, ctx, HTTP_FIND_FL_MATCH_REG | (full ? HTTP_FIND_FL_FULL : 0));
+}
+
+
+/* Adds a header block int the HTX message <htx>, just before the EOH block. It
+ * returns 1 on success, otherwise it returns 0.
+ */
+int http_add_header(struct htx *htx, const struct ist n, const struct ist v)
+{
+ struct htx_blk *blk;
+ struct htx_sl *sl;
+ enum htx_blk_type type = htx_get_tail_type(htx);
+ int32_t prev;
+
+ blk = htx_add_header(htx, n, v);
+ if (!blk)
+ goto fail;
+
+ if (unlikely(type < HTX_BLK_EOH))
+ goto end;
+
+ /* <blk> is the head, swap it iteratively with its predecessor to place
+ * it just before the end-of-header block. So blocks remains ordered. */
+ for (prev = htx_get_prev(htx, htx->tail); prev != htx->first; prev = htx_get_prev(htx, prev)) {
+ struct htx_blk *pblk = htx_get_blk(htx, prev);
+ enum htx_blk_type type = htx_get_blk_type(pblk);
+
+ /* Swap .addr and .info fields */
+ blk->addr ^= pblk->addr; pblk->addr ^= blk->addr; blk->addr ^= pblk->addr;
+ blk->info ^= pblk->info; pblk->info ^= blk->info; blk->info ^= pblk->info;
+
+ if (blk->addr == pblk->addr)
+ blk->addr += htx_get_blksz(pblk);
+
+ /* Stop when end-of-header is reached */
+ if (type == HTX_BLK_EOH)
+ break;
+
+ blk = pblk;
+ }
+
+ end:
+ sl = http_get_stline(htx);
+ if (sl && (sl->flags & HTX_SL_F_HAS_AUTHORITY) && isteqi(n, ist("host"))) {
+ if (!http_update_authority(htx, sl, v))
+ goto fail;
+ }
+ return 1;
+
+ fail:
+ return 0;
+}
+
+/* Replaces parts of the start-line of the HTX message <htx>. It returns 1 on
+ * success, otherwise it returns 0.
+ */
+int http_replace_stline(struct htx *htx, const struct ist p1, const struct ist p2, const struct ist p3)
+{
+ struct htx_blk *blk;
+
+ blk = htx_get_first_blk(htx);
+ if (!blk || !htx_replace_stline(htx, blk, p1, p2, p3))
+ return 0;
+ return 1;
+}
+
+/* Replace the request method in the HTX message <htx> by <meth>. It returns 1
+ * on success, otherwise 0.
+ */
+int http_replace_req_meth(struct htx *htx, const struct ist meth)
+{
+ struct buffer *temp = get_trash_chunk();
+ struct htx_sl *sl = http_get_stline(htx);
+ struct ist uri, vsn;
+
+ if (!sl)
+ return 0;
+
+ /* Start by copying old uri and version */
+ chunk_memcat(temp, HTX_SL_REQ_UPTR(sl), HTX_SL_REQ_ULEN(sl)); /* uri */
+ uri = ist2(temp->area, HTX_SL_REQ_ULEN(sl));
+
+ chunk_memcat(temp, HTX_SL_REQ_VPTR(sl), HTX_SL_REQ_VLEN(sl)); /* vsn */
+ vsn = ist2(temp->area + uri.len, HTX_SL_REQ_VLEN(sl));
+
+ /* create the new start line */
+ sl->info.req.meth = find_http_meth(meth.ptr, meth.len);
+ return http_replace_stline(htx, meth, uri, vsn);
+}
+
+/* Replace the request uri in the HTX message <htx> by <uri>. It returns 1 on
+ * success, otherwise 0.
+ */
+int http_replace_req_uri(struct htx *htx, const struct ist uri)
+{
+ struct buffer *temp = get_trash_chunk();
+ struct htx_sl *sl = http_get_stline(htx);
+ struct ist meth, vsn;
+
+ if (!sl)
+ goto fail;
+
+ /* Start by copying old method and version */
+ chunk_memcat(temp, HTX_SL_REQ_MPTR(sl), HTX_SL_REQ_MLEN(sl)); /* meth */
+ meth = ist2(temp->area, HTX_SL_REQ_MLEN(sl));
+
+ chunk_memcat(temp, HTX_SL_REQ_VPTR(sl), HTX_SL_REQ_VLEN(sl)); /* vsn */
+ vsn = ist2(temp->area + meth.len, HTX_SL_REQ_VLEN(sl));
+
+ /* create the new start line */
+ if (!http_replace_stline(htx, meth, uri, vsn))
+ goto fail;
+
+ sl = http_get_stline(htx);
+ ALREADY_CHECKED(sl); /* the stline exists because http_replace_stline() succeeded */
+
+ sl->flags &= ~HTX_SL_F_NORMALIZED_URI;
+ if (!http_update_host(htx, sl, uri))
+ goto fail;
+
+ return 1;
+ fail:
+ return 0;
+}
+
+/* Replace the request path in the HTX message <htx> by <path>. The host part is
+ * preserverd. if <with_qs> is set, the query string is evaluated as part of the
+ * path and replaced. Otherwise, it is preserved too. It returns 1 on success,
+ * otherwise 0.
+ */
+int http_replace_req_path(struct htx *htx, const struct ist path, int with_qs)
+{
+ struct buffer *temp = get_trash_chunk();
+ struct htx_sl *sl = http_get_stline(htx);
+ struct ist meth, uri, vsn, p;
+ size_t plen = 0;
+ struct http_uri_parser parser;
+
+ if (!sl)
+ return 0;
+
+ uri = htx_sl_req_uri(sl);
+ parser = http_uri_parser_init(uri);
+ p = http_parse_path(&parser);
+ if (!isttest(p))
+ p = uri;
+ if (with_qs)
+ plen = p.len;
+ else {
+ while (plen < p.len && *(p.ptr + plen) != '?')
+ plen++;
+ }
+
+ /* Start by copying old method and version and create the new uri */
+ chunk_memcat(temp, HTX_SL_REQ_MPTR(sl), HTX_SL_REQ_MLEN(sl)); /* meth */
+ meth = ist2(temp->area, HTX_SL_REQ_MLEN(sl));
+
+ chunk_memcat(temp, HTX_SL_REQ_VPTR(sl), HTX_SL_REQ_VLEN(sl)); /* vsn */
+ vsn = ist2(temp->area + meth.len, HTX_SL_REQ_VLEN(sl));
+
+ chunk_memcat(temp, uri.ptr, p.ptr - uri.ptr); /* uri: host part */
+ chunk_istcat(temp, path); /* uri: new path */
+ chunk_memcat(temp, p.ptr + plen, p.len - plen); /* uri: QS part */
+ uri = ist2(temp->area + meth.len + vsn.len, uri.len - plen + path.len);
+
+ /* create the new start line */
+ return http_replace_stline(htx, meth, uri, vsn);
+}
+
+/* Replace the request query-string in the HTX message <htx> by <query>. The
+ * host part and the path are preserved. It returns 1 on success, otherwise
+ * 0.
+ */
+int http_replace_req_query(struct htx *htx, const struct ist query)
+{
+ struct buffer *temp = get_trash_chunk();
+ struct htx_sl *sl = http_get_stline(htx);
+ struct ist meth, uri, vsn, q;
+ int offset = 1;
+
+ if (!sl)
+ return 0;
+
+ uri = htx_sl_req_uri(sl);
+ q = uri;
+ while (q.len > 0 && *(q.ptr) != '?') {
+ q = istnext(q);
+ }
+
+ /* skip the question mark or indicate that we must insert it
+ * (but only if the format string is not empty then).
+ */
+ if (q.len) {
+ q = istnext(q);
+ }
+ else if (query.len > 1)
+ offset = 0;
+
+ /* Start by copying old method and version and create the new uri */
+ chunk_memcat(temp, HTX_SL_REQ_MPTR(sl), HTX_SL_REQ_MLEN(sl)); /* meth */
+ meth = ist2(temp->area, HTX_SL_REQ_MLEN(sl));
+
+ chunk_memcat(temp, HTX_SL_REQ_VPTR(sl), HTX_SL_REQ_VLEN(sl)); /* vsn */
+ vsn = ist2(temp->area + meth.len, HTX_SL_REQ_VLEN(sl));
+
+ chunk_memcat(temp, uri.ptr, q.ptr - uri.ptr); /* uri: host + path part */
+ chunk_memcat(temp, query.ptr + offset, query.len - offset); /* uri: new QS */
+ uri = ist2(temp->area + meth.len + vsn.len, uri.len - q.len + query.len - offset);
+
+ /* create the new start line */
+ return http_replace_stline(htx, meth, uri, vsn);
+}
+
+/* Replace the response status in the HTX message <htx> by <status>. It returns
+ * 1 on success, otherwise 0.
+*/
+int http_replace_res_status(struct htx *htx, const struct ist status, const struct ist reason)
+{
+ struct buffer *temp = get_trash_chunk();
+ struct htx_sl *sl = http_get_stline(htx);
+ struct ist vsn, r;
+
+ if (!sl)
+ return 0;
+
+ /* Start by copying old uri and version */
+ chunk_memcat(temp, HTX_SL_RES_VPTR(sl), HTX_SL_RES_VLEN(sl)); /* vsn */
+ vsn = ist2(temp->area, HTX_SL_RES_VLEN(sl));
+ r = reason;
+ if (!isttest(r)) {
+ chunk_memcat(temp, HTX_SL_RES_RPTR(sl), HTX_SL_RES_RLEN(sl)); /* reason */
+ r = ist2(temp->area + vsn.len, HTX_SL_RES_RLEN(sl));
+ }
+
+ /* create the new start line */
+ sl->info.res.status = strl2ui(status.ptr, status.len);
+ return http_replace_stline(htx, vsn, status, r);
+}
+
+/* Replace the response reason in the HTX message <htx> by <reason>. It returns
+ * 1 on success, otherwise 0.
+*/
+int http_replace_res_reason(struct htx *htx, const struct ist reason)
+{
+ struct buffer *temp = get_trash_chunk();
+ struct htx_sl *sl = http_get_stline(htx);
+ struct ist vsn, status;
+
+ if (!sl)
+ return 0;
+
+ /* Start by copying old uri and version */
+ chunk_memcat(temp, HTX_SL_RES_VPTR(sl), HTX_SL_RES_VLEN(sl)); /* vsn */
+ vsn = ist2(temp->area, HTX_SL_RES_VLEN(sl));
+
+ chunk_memcat(temp, HTX_SL_RES_CPTR(sl), HTX_SL_RES_CLEN(sl)); /* code */
+ status = ist2(temp->area + vsn.len, HTX_SL_RES_CLEN(sl));
+
+ /* create the new start line */
+ return http_replace_stline(htx, vsn, status, reason);
+}
+
+/* Append new value <data> after <ctx> value in header
+ * if header is not empty (at least one value exists):
+ * - ',' delimiter is added before <data> is appended
+ * - <ctx> must be valid and must point to an existing value,
+ * else it is an error and prepend_value should be used instead.
+ *
+ * ctx is updated to point to new value
+ *
+ * Returns 1 on success and 0 on failure.
+ */
+int http_append_header_value(struct htx *htx, struct http_hdr_ctx *ctx, const struct ist data)
+{
+ char *start;
+ struct htx_blk *blk = ctx->blk;
+ struct ist v;
+ uint32_t off = 0;
+
+ if (!blk)
+ goto fail;
+
+ v = htx_get_blk_value(htx, blk);
+
+ if (!istlen(v)) {
+ start = v.ptr;
+ goto empty; /* header is empty, append without ',' */
+ }
+ if (unlikely(!istlen(ctx->value)))
+ goto fail; /* invalid: value is empty, not supported */
+
+ start = istend(ctx->value) + ctx->lws_after;
+ off = start - v.ptr;
+
+ blk = htx_replace_blk_value(htx, blk, ist2(start, 0), ist(","));
+ if (!blk)
+ goto fail;
+ off += 1; /* add 1 for ',' */
+ v = htx_get_blk_value(htx, blk);
+ start = v.ptr + off;
+
+ empty:
+ blk = htx_replace_blk_value(htx, blk, ist2(start, 0), data);
+ if (!blk)
+ goto fail;
+ v = htx_get_blk_value(htx, blk);
+
+ ctx->blk = blk;
+ ctx->value = ist2(v.ptr + off, data.len);
+ ctx->lws_before = ctx->lws_after = 0;
+
+ return 1;
+ fail:
+ return 0;
+}
+
+/* Prepend new value <data> before <ctx> value in header
+ * if <ctx> is not first value (at least one value exists):
+ * - ',' delimiter is added after <data> is prepended
+ *
+ * ctx is updated to point to new value
+ *
+ * Returns 1 on success and 0 on failure.
+ */
+int http_prepend_header_value(struct htx *htx, struct http_hdr_ctx *ctx, const struct ist data)
+{
+ char *start;
+ struct htx_blk *blk = ctx->blk;
+ struct ist v;
+ uint32_t off = 0;
+ uint8_t first;
+
+ if (!blk)
+ goto fail;
+
+ v = htx_get_blk_value(htx, blk);
+
+ first = !istlen(v);
+ start = first ? v.ptr : istptr(ctx->value) - ctx->lws_before;
+
+ if (unlikely(!istlen(ctx->value)))
+ goto fail; /* invalid: value is empty, not supported */
+
+ off = start - v.ptr;
+
+ blk = htx_replace_blk_value(htx, blk, ist2(start, 0), data);
+ if (!blk)
+ goto fail;
+ v = htx_get_blk_value(htx, blk);
+
+ if (first)
+ goto end; /* header is empty, don't append ',' */
+
+ start = v.ptr + off + data.len;
+
+ blk = htx_replace_blk_value(htx, blk, ist2(start, 0), ist(","));
+ if (!blk)
+ goto fail;
+ v = htx_get_blk_value(htx, blk);
+
+ end:
+ ctx->blk = blk;
+ ctx->value = ist2(v.ptr + off, data.len);
+ ctx->lws_before = ctx->lws_after = 0;
+
+ return 1;
+ fail:
+ return 0;
+}
+
+/* Replaces a part of a header value referenced in the context <ctx> by
+ * <data>. It returns 1 on success, otherwise it returns 0. The context is
+ * updated if necessary.
+ */
+int http_replace_header_value(struct htx *htx, struct http_hdr_ctx *ctx, const struct ist data)
+{
+ struct htx_blk *blk = ctx->blk;
+ struct htx_sl *sl;
+ char *start;
+ struct ist v;
+ uint32_t len, off;
+
+ if (!blk)
+ goto fail;
+
+ v = htx_get_blk_value(htx, blk);
+ start = ctx->value.ptr - ctx->lws_before;
+ len = ctx->lws_before + ctx->value.len + ctx->lws_after;
+ off = start - v.ptr;
+
+ blk = htx_replace_blk_value(htx, blk, ist2(start, len), data);
+ if (!blk)
+ goto fail;
+
+ v = htx_get_blk_value(htx, blk);
+
+ sl = http_get_stline(htx);
+ if (sl && (sl->flags & HTX_SL_F_HAS_AUTHORITY)) {
+ struct ist n = htx_get_blk_name(htx, blk);
+
+ if (isteq(n, ist("host"))) {
+ if (!http_update_authority(htx, sl, v))
+ goto fail;
+ ctx->blk = NULL;
+ http_find_header(htx, ist("host"), ctx, 1);
+ blk = ctx->blk;
+ v = htx_get_blk_value(htx, blk);
+ }
+ }
+
+ ctx->blk = blk;
+ ctx->value = ist2(v.ptr + off, data.len);
+ ctx->lws_before = ctx->lws_after = 0;
+
+ return 1;
+ fail:
+ return 0;
+}
+
+/* Fully replaces a header referenced in the context <ctx> by the name <name>
+ * with the value <value>. It returns 1 on success, otherwise it returns 0. The
+ * context is updated if necessary.
+ */
+int http_replace_header(struct htx *htx, struct http_hdr_ctx *ctx,
+ const struct ist name, const struct ist value)
+{
+ struct htx_blk *blk = ctx->blk;
+ struct htx_sl *sl;
+
+ if (!blk)
+ goto fail;
+
+ blk = htx_replace_header(htx, blk, name, value);
+ if (!blk)
+ goto fail;
+
+ sl = http_get_stline(htx);
+ if (sl && (sl->flags & HTX_SL_F_HAS_AUTHORITY) && isteqi(name, ist("host"))) {
+ if (!http_update_authority(htx, sl, value))
+ goto fail;
+ ctx->blk = NULL;
+ http_find_header(htx, ist("host"), ctx, 1);
+ blk = ctx->blk;
+ }
+
+ ctx->blk = blk;
+ ctx->value = ist(NULL);
+ ctx->lws_before = ctx->lws_after = 0;
+
+ return 1;
+ fail:
+ return 0;
+}
+
+/* Remove one value of a header. This only works on a <ctx> returned by
+ * http_find_header function. The value is removed, as well as surrounding commas
+ * if any. If the removed value was alone, the whole header is removed. The
+ * <ctx> is always updated accordingly, as well as the HTX message <htx>. It
+ * returns 1 on success. Otherwise, it returns 0. The <ctx> is always left in a
+ * form that can be handled by http_find_header() to find next occurrence.
+ */
+int http_remove_header(struct htx *htx, struct http_hdr_ctx *ctx)
+{
+ struct htx_blk *blk = ctx->blk;
+ char *start;
+ struct ist v;
+ uint32_t len;
+
+ if (!blk)
+ return 0;
+
+ start = ctx->value.ptr - ctx->lws_before;
+ len = ctx->lws_before + ctx->value.len + ctx->lws_after;
+
+ v = htx_get_blk_value(htx, blk);
+ if (len == v.len) {
+ blk = htx_remove_blk(htx, blk);
+ if (blk || htx_is_empty(htx)) {
+ ctx->blk = blk;
+ ctx->value = IST_NULL;
+ ctx->lws_before = ctx->lws_after = 0;
+ }
+ else {
+ ctx->blk = htx_get_blk(htx, htx->tail);
+ ctx->value = htx_get_blk_value(htx, ctx->blk);
+ ctx->lws_before = ctx->lws_after = 0;
+ }
+ return 1;
+ }
+
+ /* This was not the only value of this header. We have to remove the
+ * part pointed by ctx->value. If it is the last entry of the list, we
+ * remove the last separator.
+ */
+ if (start == v.ptr) {
+ /* It's the first header part but not the only one. So remove
+ * the comma after it. */
+ len++;
+ }
+ else {
+ /* There is at least one header part before the removed one. So
+ * remove the comma between them. */
+ start--;
+ len++;
+ }
+ /* Update the block content and its len */
+ memmove(start, start+len, v.len-len);
+ htx_change_blk_value_len(htx, blk, v.len-len);
+
+ /* Finally update the ctx */
+ ctx->value = ist2(start, 0);
+ ctx->lws_before = ctx->lws_after = 0;
+
+ return 1;
+}
+
+/* Updates the authority part of the uri with the value <host>. It happens when
+ * the header host is modified. It returns 0 on failure and 1 on success. It is
+ * the caller responsibility to provide the start-line and to be sure the uri
+ * contains an authority. Thus, if no authority is found in the uri, an error is
+ * returned.
+ */
+int http_update_authority(struct htx *htx, struct htx_sl *sl, const struct ist host)
+{
+ struct buffer *temp = get_trash_chunk();
+ struct ist meth, vsn, uri, authority;
+ struct http_uri_parser parser;
+
+ uri = htx_sl_req_uri(sl);
+ parser = http_uri_parser_init(uri);
+ authority = http_parse_authority(&parser, 1);
+ if (!authority.len)
+ return 0;
+
+ /* Don't update the uri if there is no change */
+ if (isteq(host, authority))
+ return 1;
+
+ /* Start by copying old method and version */
+ chunk_memcat(temp, HTX_SL_REQ_MPTR(sl), HTX_SL_REQ_MLEN(sl)); /* meth */
+ meth = ist2(temp->area, HTX_SL_REQ_MLEN(sl));
+
+ chunk_memcat(temp, HTX_SL_REQ_VPTR(sl), HTX_SL_REQ_VLEN(sl)); /* vsn */
+ vsn = ist2(temp->area + meth.len, HTX_SL_REQ_VLEN(sl));
+
+ chunk_memcat(temp, uri.ptr, authority.ptr - uri.ptr);
+ chunk_istcat(temp, host);
+ chunk_memcat(temp, istend(authority), istend(uri) - istend(authority));
+ uri = ist2(temp->area + meth.len + vsn.len, host.len + uri.len - authority.len); /* uri */
+
+ return http_replace_stline(htx, meth, uri, vsn);
+
+}
+
+/* Update the header host by extracting the authority of the uri <uri>. flags of
+ * the start-line are also updated accordingly. For orgin-form and asterisk-form
+ * uri, the header host is not changed and the flag HTX_SL_F_HAS_AUTHORITY is
+ * removed from the flags of the start-line. Otherwise, this flag is set and the
+ * authority is used to set the value of the header host. This function returns
+ * 0 on failure and 1 on success.
+*/
+int http_update_host(struct htx *htx, struct htx_sl *sl, const struct ist uri)
+{
+ struct ist authority;
+ struct http_hdr_ctx ctx;
+ struct http_uri_parser parser = http_uri_parser_init(uri);
+
+ if (parser.format == URI_PARSER_FORMAT_EMPTY ||
+ parser.format == URI_PARSER_FORMAT_ASTERISK ||
+ parser.format == URI_PARSER_FORMAT_ABSPATH) {
+ sl->flags &= ~HTX_SL_F_HAS_AUTHORITY;
+ }
+ else {
+ sl->flags |= HTX_SL_F_HAS_AUTHORITY;
+ if (sl->info.req.meth != HTTP_METH_CONNECT) {
+ // absolute-form (RFC7320 #5.3.2)
+ sl->flags |= HTX_SL_F_HAS_SCHM;
+ if (uri.len > 4 && (uri.ptr[0] | 0x20) == 'h')
+ sl->flags |= ((uri.ptr[4] == ':') ? HTX_SL_F_SCHM_HTTP : HTX_SL_F_SCHM_HTTPS);
+
+ authority = http_parse_authority(&parser, 1);
+ if (!authority.len)
+ goto fail;
+ }
+ else {
+ // authority-form (RFC7320 #5.3.3)
+ authority = uri;
+ }
+
+ /* Replace header host value */
+ ctx.blk = NULL;
+ while (http_find_header(htx, ist("host"), &ctx, 1)) {
+ if (!http_replace_header_value(htx, &ctx, authority))
+ goto fail;
+ }
+
+ }
+ return 1;
+ fail:
+ return 0;
+}
+
+/* Return in <vptr> and <vlen> the pointer and length of occurrence <occ> of
+ * header whose name is <hname> of length <hlen>. If <ctx> is null, lookup is
+ * performed over the whole headers. Otherwise it must contain a valid header
+ * context, initialised with ctx->blk=NULL for the first lookup in a series. If
+ * <occ> is positive or null, occurrence #occ from the beginning (or last ctx)
+ * is returned. Occ #0 and #1 are equivalent. If <occ> is negative (and no less
+ * than -MAX_HDR_HISTORY), the occurrence is counted from the last one which is
+ * -1. The value fetch stops at commas, so this function is suited for use with
+ * list headers.
+ * The return value is 0 if nothing was found, or non-zero otherwise.
+ */
+unsigned int http_get_htx_hdr(const struct htx *htx, const struct ist hdr,
+ int occ, struct http_hdr_ctx *ctx, char **vptr, size_t *vlen)
+{
+ struct http_hdr_ctx local_ctx;
+ struct ist val_hist[MAX_HDR_HISTORY];
+ unsigned int hist_idx;
+ int found;
+
+ if (!ctx) {
+ local_ctx.blk = NULL;
+ ctx = &local_ctx;
+ }
+
+ if (occ >= 0) {
+ /* search from the beginning */
+ while (http_find_header(htx, hdr, ctx, 0)) {
+ occ--;
+ if (occ <= 0) {
+ *vptr = ctx->value.ptr;
+ *vlen = ctx->value.len;
+ return 1;
+ }
+ }
+ return 0;
+ }
+
+ /* negative occurrence, we scan all the list then walk back */
+ if (-occ > MAX_HDR_HISTORY)
+ return 0;
+
+ found = hist_idx = 0;
+ while (http_find_header(htx, hdr, ctx, 0)) {
+ val_hist[hist_idx] = ctx->value;
+ if (++hist_idx >= MAX_HDR_HISTORY)
+ hist_idx = 0;
+ found++;
+ }
+ if (-occ > found)
+ return 0;
+
+ /* OK now we have the last occurrence in [hist_idx-1], and we need to
+ * find occurrence -occ. 0 <= hist_idx < MAX_HDR_HISTORY, and we have
+ * -10 <= occ <= -1. So we have to check [hist_idx%MAX_HDR_HISTORY+occ]
+ * to remain in the 0..9 range.
+ */
+ hist_idx += occ + MAX_HDR_HISTORY;
+ if (hist_idx >= MAX_HDR_HISTORY)
+ hist_idx -= MAX_HDR_HISTORY;
+ *vptr = val_hist[hist_idx].ptr;
+ *vlen = val_hist[hist_idx].len;
+ return 1;
+}
+
+/* Return in <vptr> and <vlen> the pointer and length of occurrence <occ> of
+ * header whose name is <hname> of length <hlen>. If <ctx> is null, lookup is
+ * performed over the whole headers. Otherwise it must contain a valid header
+ * context, initialised with ctx->blk=NULL for the first lookup in a series. If
+ * <occ> is positive or null, occurrence #occ from the beginning (or last ctx)
+ * is returned. Occ #0 and #1 are equivalent. If <occ> is negative (and no less
+ * than -MAX_HDR_HISTORY), the occurrence is counted from the last one which is
+ * -1. This function differs from http_get_hdr() in that it only returns full
+ * line header values and does not stop at commas.
+ * The return value is 0 if nothing was found, or non-zero otherwise.
+ */
+unsigned int http_get_htx_fhdr(const struct htx *htx, const struct ist hdr,
+ int occ, struct http_hdr_ctx *ctx, char **vptr, size_t *vlen)
+{
+ struct http_hdr_ctx local_ctx;
+ struct ist val_hist[MAX_HDR_HISTORY];
+ unsigned int hist_idx;
+ int found;
+
+ if (!ctx) {
+ local_ctx.blk = NULL;
+ ctx = &local_ctx;
+ }
+
+ if (occ >= 0) {
+ /* search from the beginning */
+ while (http_find_header(htx, hdr, ctx, 1)) {
+ occ--;
+ if (occ <= 0) {
+ *vptr = ctx->value.ptr;
+ *vlen = ctx->value.len;
+ return 1;
+ }
+ }
+ return 0;
+ }
+
+ /* negative occurrence, we scan all the list then walk back */
+ if (-occ > MAX_HDR_HISTORY)
+ return 0;
+
+ found = hist_idx = 0;
+ while (http_find_header(htx, hdr, ctx, 1)) {
+ val_hist[hist_idx] = ctx->value;
+ if (++hist_idx >= MAX_HDR_HISTORY)
+ hist_idx = 0;
+ found++;
+ }
+ if (-occ > found)
+ return 0;
+
+ /* OK now we have the last occurrence in [hist_idx-1], and we need to
+ * find occurrence -occ. 0 <= hist_idx < MAX_HDR_HISTORY, and we have
+ * -10 <= occ <= -1. So we have to check [hist_idx%MAX_HDR_HISTORY+occ]
+ * to remain in the 0..9 range.
+ */
+ hist_idx += occ + MAX_HDR_HISTORY;
+ if (hist_idx >= MAX_HDR_HISTORY)
+ hist_idx -= MAX_HDR_HISTORY;
+ *vptr = val_hist[hist_idx].ptr;
+ *vlen = val_hist[hist_idx].len;
+ return 1;
+}
+
+int http_str_to_htx(struct buffer *buf, struct ist raw, char **errmsg)
+{
+ struct htx *htx;
+ struct htx_sl *sl;
+ struct h1m h1m;
+ struct http_hdr hdrs[global.tune.max_http_hdr];
+ union h1_sl h1sl;
+ unsigned int flags = HTX_SL_F_IS_RESP;
+ int ret = 0;
+
+ b_reset(buf);
+ if (!raw.len) {
+ buf->size = 0;
+ buf->area = NULL;
+ return 1;
+ }
+
+ buf->size = global.tune.bufsize;
+ buf->area = malloc(buf->size);
+ if (!buf->area)
+ goto error;
+
+ h1m_init_res(&h1m);
+ h1m.flags |= H1_MF_NO_PHDR;
+ ret = h1_headers_to_hdr_list(raw.ptr, istend(raw),
+ hdrs, sizeof(hdrs)/sizeof(hdrs[0]), &h1m, &h1sl);
+ if (ret <= 0) {
+ memprintf(errmsg, "unable to parse headers (error offset: %d)", h1m.err_pos);
+ goto error;
+ }
+
+ if (unlikely(h1sl.st.v.len != 8)) {
+ memprintf(errmsg, "invalid http version (%.*s)", (int)h1sl.st.v.len, h1sl.st.v.ptr);
+ goto error;
+ }
+ if ((*(h1sl.st.v.ptr + 5) > '1') ||
+ ((*(h1sl.st.v.ptr + 5) == '1') && (*(h1sl.st.v.ptr + 7) >= '1')))
+ h1m.flags |= H1_MF_VER_11;
+
+ if (h1sl.st.status < 200 && (h1sl.st.status == 100 || h1sl.st.status >= 102)) {
+ memprintf(errmsg, "invalid http status code for an error message (%u)",
+ h1sl.st.status);
+ goto error;
+ }
+
+ if (h1sl.st.status == 204 || h1sl.st.status == 304) {
+ /* Responses known to have no body. */
+ h1m.flags &= ~(H1_MF_CLEN|H1_MF_CHNK);
+ h1m.flags |= H1_MF_XFER_LEN;
+ h1m.curr_len = h1m.body_len = 0;
+ }
+ else if (h1m.flags & (H1_MF_CLEN|H1_MF_CHNK))
+ h1m.flags |= H1_MF_XFER_LEN;
+
+ if (h1m.flags & H1_MF_VER_11)
+ flags |= HTX_SL_F_VER_11;
+ if (h1m.flags & H1_MF_XFER_ENC)
+ flags |= HTX_SL_F_XFER_ENC;
+ if (h1m.flags & H1_MF_XFER_LEN) {
+ flags |= HTX_SL_F_XFER_LEN;
+ if (h1m.flags & H1_MF_CHNK) {
+ memprintf(errmsg, "chunk-encoded payload not supported");
+ goto error;
+ }
+ else if (h1m.flags & H1_MF_CLEN) {
+ flags |= HTX_SL_F_CLEN;
+ if (h1m.body_len == 0)
+ flags |= HTX_SL_F_BODYLESS;
+ }
+ else
+ flags |= HTX_SL_F_BODYLESS;
+ }
+
+ if ((flags & HTX_SL_F_BODYLESS) && raw.len > ret) {
+ memprintf(errmsg, "message payload not expected");
+ goto error;
+ }
+ if ((flags & HTX_SL_F_CLEN) && h1m.body_len != (raw.len - ret)) {
+ memprintf(errmsg, "payload size does not match the announced content-length (%lu != %lu)",
+ (unsigned long)(raw.len - ret), (unsigned long)h1m.body_len);
+ goto error;
+ }
+
+ htx = htx_from_buf(buf);
+ sl = htx_add_stline(htx, HTX_BLK_RES_SL, flags, h1sl.st.v, h1sl.st.c, h1sl.st.r);
+ if (!sl || !htx_add_all_headers(htx, hdrs)) {
+ memprintf(errmsg, "unable to add headers into the HTX message");
+ goto error;
+ }
+ sl->info.res.status = h1sl.st.status;
+
+ while (raw.len > ret) {
+ int sent = htx_add_data(htx, ist2(raw.ptr + ret, raw.len - ret));
+ if (!sent) {
+ memprintf(errmsg, "unable to add payload into the HTX message");
+ goto error;
+ }
+ ret += sent;
+ }
+
+ htx->flags |= HTX_FL_EOM;
+
+ return 1;
+
+error:
+ if (buf->size)
+ free(buf->area);
+ return 0;
+}
+
+void release_http_reply(struct http_reply *http_reply)
+{
+ struct logformat_node *lf, *lfb;
+ struct http_reply_hdr *hdr, *hdrb;
+
+ if (!http_reply)
+ return;
+
+ ha_free(&http_reply->ctype);
+ list_for_each_entry_safe(hdr, hdrb, &http_reply->hdrs, list) {
+ LIST_DELETE(&hdr->list);
+ list_for_each_entry_safe(lf, lfb, &hdr->value, list) {
+ LIST_DELETE(&lf->list);
+ release_sample_expr(lf->expr);
+ free(lf->arg);
+ free(lf);
+ }
+ istfree(&hdr->name);
+ free(hdr);
+ }
+
+ if (http_reply->type == HTTP_REPLY_ERRFILES) {
+ ha_free(&http_reply->body.http_errors);
+ }
+ else if (http_reply->type == HTTP_REPLY_RAW)
+ chunk_destroy(&http_reply->body.obj);
+ else if (http_reply->type == HTTP_REPLY_LOGFMT) {
+ list_for_each_entry_safe(lf, lfb, &http_reply->body.fmt, list) {
+ LIST_DELETE(&lf->list);
+ release_sample_expr(lf->expr);
+ free(lf->arg);
+ free(lf);
+ }
+ }
+ free(http_reply);
+}
+
+static int http_htx_init(void)
+{
+ struct buffer chk;
+ struct ist raw;
+ char *errmsg = NULL;
+ int rc;
+ int err_code = 0;
+
+ for (rc = 0; rc < HTTP_ERR_SIZE; rc++) {
+ if (!http_err_msgs[rc]) {
+ ha_alert("Internal error: no default message defined for HTTP return code %d", rc);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ continue;
+ }
+
+ raw = ist(http_err_msgs[rc]);
+ if (!http_str_to_htx(&chk, raw, &errmsg)) {
+ ha_alert("Internal error: invalid default message for HTTP return code %d: %s.\n",
+ http_err_codes[rc], errmsg);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ }
+ else if (errmsg) {
+ ha_warning("invalid default message for HTTP return code %d: %s.\n", http_err_codes[rc], errmsg);
+ err_code |= ERR_WARN;
+ }
+
+ /* Reset errmsg */
+ ha_free(&errmsg);
+
+ http_err_chunks[rc] = chk;
+ http_err_replies[rc].type = HTTP_REPLY_ERRMSG;
+ http_err_replies[rc].status = http_err_codes[rc];
+ http_err_replies[rc].ctype = NULL;
+ LIST_INIT(&http_err_replies[rc].hdrs);
+ http_err_replies[rc].body.errmsg = &http_err_chunks[rc];
+ }
+end:
+ return err_code;
+}
+
+static void http_htx_deinit(void)
+{
+ struct http_errors *http_errs, *http_errsb;
+ struct http_reply *http_rep, *http_repb;
+ struct ebpt_node *node, *next;
+ struct http_error_msg *http_errmsg;
+ int rc;
+
+ node = ebpt_first(&http_error_messages);
+ while (node) {
+ next = ebpt_next(node);
+ ebpt_delete(node);
+ http_errmsg = container_of(node, typeof(*http_errmsg), node);
+ chunk_destroy(&http_errmsg->msg);
+ free(node->key);
+ free(http_errmsg);
+ node = next;
+ }
+
+ list_for_each_entry_safe(http_errs, http_errsb, &http_errors_list, list) {
+ free(http_errs->conf.file);
+ free(http_errs->id);
+ for (rc = 0; rc < HTTP_ERR_SIZE; rc++)
+ release_http_reply(http_errs->replies[rc]);
+ LIST_DELETE(&http_errs->list);
+ free(http_errs);
+ }
+
+ list_for_each_entry_safe(http_rep, http_repb, &http_replies_list, list) {
+ LIST_DELETE(&http_rep->list);
+ release_http_reply(http_rep);
+ }
+
+ for (rc = 0; rc < HTTP_ERR_SIZE; rc++)
+ chunk_destroy(&http_err_chunks[rc]);
+}
+
+REGISTER_CONFIG_POSTPARSER("http_htx", http_htx_init);
+REGISTER_POST_DEINIT(http_htx_deinit);
+
+/* Reads content of the error file <file> and convert it into an HTX message. On
+ * success, the HTX message is returned. On error, NULL is returned and an error
+ * message is written into the <errmsg> buffer.
+ */
+struct buffer *http_load_errorfile(const char *file, char **errmsg)
+{
+ struct buffer *buf = NULL;
+ struct buffer chk;
+ struct ebpt_node *node;
+ struct http_error_msg *http_errmsg;
+ struct stat stat;
+ char *err = NULL;
+ int errnum, errlen;
+ int fd = -1;
+
+ /* already loaded */
+ node = ebis_lookup_len(&http_error_messages, file, strlen(file));
+ if (node) {
+ http_errmsg = container_of(node, typeof(*http_errmsg), node);
+ buf = &http_errmsg->msg;
+ goto out;
+ }
+
+ /* Read the error file content */
+ fd = open(file, O_RDONLY);
+ if ((fd < 0) || (fstat(fd, &stat) < 0)) {
+ memprintf(errmsg, "error opening file '%s'.", file);
+ goto out;
+ }
+
+ if (stat.st_size <= global.tune.bufsize)
+ errlen = stat.st_size;
+ else {
+ ha_warning("custom error message file '%s' larger than %d bytes. Truncating.\n",
+ file, global.tune.bufsize);
+ errlen = global.tune.bufsize;
+ }
+
+ err = malloc(errlen);
+ if (!err) {
+ memprintf(errmsg, "out of memory.");
+ goto out;
+ }
+
+ errnum = read(fd, err, errlen);
+ if (errnum != errlen) {
+ memprintf(errmsg, "error reading file '%s'.", file);
+ goto out;
+ }
+
+ /* Create the node corresponding to the error file */
+ http_errmsg = calloc(1, sizeof(*http_errmsg));
+ if (!http_errmsg) {
+ memprintf(errmsg, "out of memory.");
+ goto out;
+ }
+ http_errmsg->node.key = strdup(file);
+ if (!http_errmsg->node.key) {
+ memprintf(errmsg, "out of memory.");
+ free(http_errmsg);
+ goto out;
+ }
+
+ /* Convert the error file into an HTX message */
+ if (!http_str_to_htx(&chk, ist2(err, errlen), errmsg)) {
+ memprintf(errmsg, "'%s': %s", file, *errmsg);
+ free(http_errmsg->node.key);
+ free(http_errmsg);
+ goto out;
+ }
+
+ /* Insert the node in the tree and return the HTX message */
+ http_errmsg->msg = chk;
+ ebis_insert(&http_error_messages, &http_errmsg->node);
+ buf = &http_errmsg->msg;
+
+ out:
+ if (fd >= 0)
+ close(fd);
+ free(err);
+ return buf;
+}
+
+/* Convert the raw http message <msg> into an HTX message. On success, the HTX
+ * message is returned. On error, NULL is returned and an error message is
+ * written into the <errmsg> buffer.
+ */
+struct buffer *http_load_errormsg(const char *key, const struct ist msg, char **errmsg)
+{
+ struct buffer *buf = NULL;
+ struct buffer chk;
+ struct ebpt_node *node;
+ struct http_error_msg *http_errmsg;
+
+ /* already loaded */
+ node = ebis_lookup_len(&http_error_messages, key, strlen(key));
+ if (node) {
+ http_errmsg = container_of(node, typeof(*http_errmsg), node);
+ buf = &http_errmsg->msg;
+ goto out;
+ }
+ /* Create the node corresponding to the error file */
+ http_errmsg = calloc(1, sizeof(*http_errmsg));
+ if (!http_errmsg) {
+ memprintf(errmsg, "out of memory.");
+ goto out;
+ }
+ http_errmsg->node.key = strdup(key);
+ if (!http_errmsg->node.key) {
+ memprintf(errmsg, "out of memory.");
+ free(http_errmsg);
+ goto out;
+ }
+
+ /* Convert the error file into an HTX message */
+ if (!http_str_to_htx(&chk, msg, errmsg)) {
+ memprintf(errmsg, "invalid error message: %s", *errmsg);
+ free(http_errmsg->node.key);
+ free(http_errmsg);
+ goto out;
+ }
+
+ /* Insert the node in the tree and return the HTX message */
+ http_errmsg->msg = chk;
+ ebis_insert(&http_error_messages, &http_errmsg->node);
+ buf = &http_errmsg->msg;
+ out:
+ return buf;
+}
+
+/* This function parses the raw HTTP error file <file> for the status code
+ * <status>. It returns NULL if there is any error, otherwise it return the
+ * corresponding HTX message.
+ */
+struct buffer *http_parse_errorfile(int status, const char *file, char **errmsg)
+{
+ struct buffer *buf = NULL;
+ int rc;
+
+ for (rc = 0; rc < HTTP_ERR_SIZE; rc++) {
+ if (http_err_codes[rc] == status) {
+ buf = http_load_errorfile(file, errmsg);
+ break;
+ }
+ }
+
+ if (rc >= HTTP_ERR_SIZE)
+ memprintf(errmsg, "status code '%d' not handled.", status);
+ return buf;
+}
+
+/* This function creates HTX error message corresponding to a redirect message
+ * for the status code <status>. <url> is used as location url for the
+ * redirect. <errloc> is used to know if it is a 302 or a 303 redirect. It
+ * returns NULL if there is any error, otherwise it return the corresponding HTX
+ * message.
+ */
+struct buffer *http_parse_errorloc(int errloc, int status, const char *url, char **errmsg)
+{
+ static const char *HTTP_302 =
+ "HTTP/1.1 302 Found\r\n"
+ "Cache-Control: no-cache\r\n"
+ "Content-length: 0\r\n"
+ "Location: "; /* not terminated since it will be concatenated with the URL */
+ static const char *HTTP_303 =
+ "HTTP/1.1 303 See Other\r\n"
+ "Cache-Control: no-cache\r\n"
+ "Content-length: 0\r\n"
+ "Location: "; /* not terminated since it will be concatenated with the URL */
+
+ struct buffer *buf = NULL;
+ const char *msg;
+ char *key = NULL, *err = NULL;
+ int rc, errlen;
+
+ for (rc = 0; rc < HTTP_ERR_SIZE; rc++) {
+ if (http_err_codes[rc] == status) {
+ /* Create the error key */
+ if (!memprintf(&key, "errorloc%d %s", errloc, url)) {
+ memprintf(errmsg, "out of memory.");
+ goto out;
+ }
+ /* Create the error message */
+ msg = (errloc == 302 ? HTTP_302 : HTTP_303);
+ errlen = strlen(msg) + strlen(url) + 5;
+ err = malloc(errlen);
+ if (!err) {
+ memprintf(errmsg, "out of memory.");
+ goto out;
+ }
+ errlen = snprintf(err, errlen, "%s%s\r\n\r\n", msg, url);
+
+ /* Load it */
+ buf = http_load_errormsg(key, ist2(err, errlen), errmsg);
+ break;
+ }
+ }
+
+ if (rc >= HTTP_ERR_SIZE)
+ memprintf(errmsg, "status code '%d' not handled.", status);
+out:
+ free(key);
+ free(err);
+ return buf;
+}
+
+/* Check an "http reply" and, for replies referencing an http-errors section,
+ * try to find the right section and the right error message in this section. If
+ * found, the reply is updated. If the http-errors section exists but the error
+ * message is not found, no error message is set to fallback on the default
+ * ones. Otherwise (unknown section) an error is returned.
+ *
+ * The function returns 1 in success case, otherwise, it returns 0 and errmsg is
+ * filled.
+ */
+int http_check_http_reply(struct http_reply *reply, struct proxy *px, char **errmsg)
+{
+ struct http_errors *http_errs;
+ int ret = 1;
+
+ if (reply->type != HTTP_REPLY_ERRFILES)
+ goto end;
+
+ list_for_each_entry(http_errs, &http_errors_list, list) {
+ if (strcmp(http_errs->id, reply->body.http_errors) == 0) {
+ reply->type = HTTP_REPLY_INDIRECT;
+ free(reply->body.http_errors);
+ reply->body.reply = http_errs->replies[http_get_status_idx(reply->status)];
+ if (!reply->body.reply)
+ ha_warning("Proxy '%s': status '%d' referenced by an http reply "
+ "not declared in http-errors section '%s'.\n",
+ px->id, reply->status, http_errs->id);
+ break;
+ }
+ }
+
+ if (&http_errs->list == &http_errors_list) {
+ memprintf(errmsg, "unknown http-errors section '%s' referenced by an http reply ",
+ reply->body.http_errors);
+ ret = 0;
+ }
+
+ end:
+ return ret;
+}
+
+/* Parse an "http reply". It returns the reply on success or NULL on error. This
+ * function creates one of the following http replies :
+ *
+ * - HTTP_REPLY_EMPTY : dummy response, no payload
+ * - HTTP_REPLY_ERRMSG : implicit error message depending on the status code or explicit one
+ * - HTTP_REPLY_ERRFILES : points on an http-errors section (resolved during post-parsing)
+ * - HTTP_REPLY_RAW : explicit file object ('file' argument)
+ * - HTTP_REPLY_LOGFMT : explicit log-format string ('content' argument)
+ *
+ * The content-type must be defined for non-empty payload. It is ignored for
+ * error messages (implicit or explicit). When an http-errors section is
+ * referenced (HTTP_REPLY_ERRFILES), the real error message should be resolved
+ * during the configuration validity check or dynamically. It is the caller
+ * responsibility to choose. If no status code is configured, <default_status>
+ * is set.
+ */
+struct http_reply *http_parse_http_reply(const char **args, int *orig_arg, struct proxy *px,
+ int default_status, char **errmsg)
+{
+ struct logformat_node *lf, *lfb;
+ struct http_reply *reply = NULL;
+ struct http_reply_hdr *hdr, *hdrb;
+ struct stat stat;
+ const char *act_arg = NULL;
+ char *obj = NULL;
+ int cur_arg, cap = 0, objlen = 0, fd = -1;
+
+
+ reply = calloc(1, sizeof(*reply));
+ if (!reply) {
+ memprintf(errmsg, "out of memory");
+ goto error;
+ }
+ LIST_INIT(&reply->hdrs);
+ reply->type = HTTP_REPLY_EMPTY;
+ reply->status = default_status;
+
+ if (px->conf.args.ctx == ARGC_HERR)
+ cap = (SMP_VAL_REQUEST | SMP_VAL_RESPONSE);
+ else {
+ if (px->cap & PR_CAP_FE)
+ cap |= ((px->conf.args.ctx == ARGC_HRQ) ? SMP_VAL_FE_HRQ_HDR : SMP_VAL_FE_HRS_HDR);
+ if (px->cap & PR_CAP_BE)
+ cap |= ((px->conf.args.ctx == ARGC_HRQ) ? SMP_VAL_BE_HRQ_HDR : SMP_VAL_BE_HRS_HDR);
+ }
+
+ cur_arg = *orig_arg;
+ while (*args[cur_arg]) {
+ if (strcmp(args[cur_arg], "status") == 0) {
+ cur_arg++;
+ if (!*args[cur_arg]) {
+ memprintf(errmsg, "'%s' expects <status_code> as argument", args[cur_arg-1]);
+ goto error;
+ }
+ reply->status = atol(args[cur_arg]);
+ if (reply->status < 200 || reply->status > 599) {
+ memprintf(errmsg, "Unexpected status code '%d'", reply->status);
+ goto error;
+ }
+ cur_arg++;
+ }
+ else if (strcmp(args[cur_arg], "content-type") == 0) {
+ cur_arg++;
+ if (!*args[cur_arg]) {
+ memprintf(errmsg, "'%s' expects <ctype> as argument", args[cur_arg-1]);
+ goto error;
+ }
+ free(reply->ctype);
+ reply->ctype = strdup(args[cur_arg]);
+ cur_arg++;
+ }
+ else if (strcmp(args[cur_arg], "errorfiles") == 0) {
+ if (reply->type != HTTP_REPLY_EMPTY) {
+ memprintf(errmsg, "unexpected '%s' argument, '%s' already defined", args[cur_arg], act_arg);
+ goto error;
+ }
+ act_arg = args[cur_arg];
+ cur_arg++;
+ if (!*args[cur_arg]) {
+ memprintf(errmsg, "'%s' expects <name> as argument", args[cur_arg-1]);
+ goto error;
+ }
+ reply->body.http_errors = strdup(args[cur_arg]);
+ if (!reply->body.http_errors) {
+ memprintf(errmsg, "out of memory");
+ goto error;
+ }
+ reply->type = HTTP_REPLY_ERRFILES;
+ cur_arg++;
+ }
+ else if (strcmp(args[cur_arg], "default-errorfiles") == 0) {
+ if (reply->type != HTTP_REPLY_EMPTY) {
+ memprintf(errmsg, "unexpected '%s' argument, '%s' already defined", args[cur_arg], act_arg);
+ goto error;
+ }
+ act_arg = args[cur_arg];
+ reply->type = HTTP_REPLY_ERRMSG;
+ cur_arg++;
+ }
+ else if (strcmp(args[cur_arg], "errorfile") == 0) {
+ if (reply->type != HTTP_REPLY_EMPTY) {
+ memprintf(errmsg, "unexpected '%s' argument, '%s' already defined", args[cur_arg], act_arg);
+ goto error;
+ }
+ act_arg = args[cur_arg];
+ cur_arg++;
+ if (!*args[cur_arg]) {
+ memprintf(errmsg, "'%s' expects <fmt> as argument", args[cur_arg-1]);
+ goto error;
+ }
+ reply->body.errmsg = http_load_errorfile(args[cur_arg], errmsg);
+ if (!reply->body.errmsg) {
+ goto error;
+ }
+ reply->type = HTTP_REPLY_ERRMSG;
+ cur_arg++;
+ }
+ else if (strcmp(args[cur_arg], "file") == 0) {
+ if (reply->type != HTTP_REPLY_EMPTY) {
+ memprintf(errmsg, "unexpected '%s' argument, '%s' already defined", args[cur_arg], act_arg);
+ goto error;
+ }
+ act_arg = args[cur_arg];
+ cur_arg++;
+ if (!*args[cur_arg]) {
+ memprintf(errmsg, "'%s' expects <file> as argument", args[cur_arg-1]);
+ goto error;
+ }
+ fd = open(args[cur_arg], O_RDONLY);
+ if ((fd < 0) || (fstat(fd, &stat) < 0)) {
+ memprintf(errmsg, "error opening file '%s'", args[cur_arg]);
+ goto error;
+ }
+ if (stat.st_size > global.tune.bufsize) {
+ memprintf(errmsg, "file '%s' exceeds the buffer size (%lld > %d)",
+ args[cur_arg], (long long)stat.st_size, global.tune.bufsize);
+ goto error;
+ }
+ objlen = stat.st_size;
+ obj = malloc(objlen);
+ if (!obj || read(fd, obj, objlen) != objlen) {
+ memprintf(errmsg, "error reading file '%s'", args[cur_arg]);
+ goto error;
+ }
+ close(fd);
+ fd = -1;
+ reply->type = HTTP_REPLY_RAW;
+ chunk_initlen(&reply->body.obj, obj, global.tune.bufsize, objlen);
+ obj = NULL;
+ cur_arg++;
+ }
+ else if (strcmp(args[cur_arg], "string") == 0) {
+ if (reply->type != HTTP_REPLY_EMPTY) {
+ memprintf(errmsg, "unexpected '%s' argument, '%s' already defined", args[cur_arg], act_arg);
+ goto error;
+ }
+ act_arg = args[cur_arg];
+ cur_arg++;
+ if (!*args[cur_arg]) {
+ memprintf(errmsg, "'%s' expects <str> as argument", args[cur_arg-1]);
+ goto error;
+ }
+ obj = strdup(args[cur_arg]);
+ objlen = strlen(args[cur_arg]);
+ if (!obj) {
+ memprintf(errmsg, "out of memory");
+ goto error;
+ }
+ reply->type = HTTP_REPLY_RAW;
+ chunk_initlen(&reply->body.obj, obj, global.tune.bufsize, objlen);
+ obj = NULL;
+ cur_arg++;
+ }
+ else if (strcmp(args[cur_arg], "lf-file") == 0) {
+ if (reply->type != HTTP_REPLY_EMPTY) {
+ memprintf(errmsg, "unexpected '%s' argument, '%s' already defined", args[cur_arg], act_arg);
+ goto error;
+ }
+ act_arg = args[cur_arg];
+ cur_arg++;
+ if (!*args[cur_arg]) {
+ memprintf(errmsg, "'%s' expects <file> as argument", args[cur_arg-1]);
+ goto error;
+ }
+ fd = open(args[cur_arg], O_RDONLY);
+ if ((fd < 0) || (fstat(fd, &stat) < 0)) {
+ memprintf(errmsg, "error opening file '%s'", args[cur_arg]);
+ goto error;
+ }
+ if (stat.st_size > global.tune.bufsize) {
+ memprintf(errmsg, "file '%s' exceeds the buffer size (%lld > %d)",
+ args[cur_arg], (long long)stat.st_size, global.tune.bufsize);
+ goto error;
+ }
+ objlen = stat.st_size;
+ obj = malloc(objlen + 1);
+ if (!obj || read(fd, obj, objlen) != objlen) {
+ memprintf(errmsg, "error reading file '%s'", args[cur_arg]);
+ goto error;
+ }
+ close(fd);
+ fd = -1;
+ obj[objlen] = '\0';
+ reply->type = HTTP_REPLY_LOGFMT;
+ LIST_INIT(&reply->body.fmt);
+ cur_arg++;
+ }
+ else if (strcmp(args[cur_arg], "lf-string") == 0) {
+ if (reply->type != HTTP_REPLY_EMPTY) {
+ memprintf(errmsg, "unexpected '%s' argument, '%s' already defined", args[cur_arg], act_arg);
+ goto error;
+ }
+ act_arg = args[cur_arg];
+ cur_arg++;
+ if (!*args[cur_arg]) {
+ memprintf(errmsg, "'%s' expects <fmt> as argument", args[cur_arg-1]);
+ goto error;
+ }
+ obj = strdup(args[cur_arg]);
+ objlen = strlen(args[cur_arg]);
+ reply->type = HTTP_REPLY_LOGFMT;
+ LIST_INIT(&reply->body.fmt);
+ cur_arg++;
+ }
+ else if (strcmp(args[cur_arg], "hdr") == 0) {
+ cur_arg++;
+ if (!*args[cur_arg] || !*args[cur_arg+1]) {
+ memprintf(errmsg, "'%s' expects <name> and <value> as arguments", args[cur_arg-1]);
+ goto error;
+ }
+ if (strcasecmp(args[cur_arg], "content-length") == 0 ||
+ strcasecmp(args[cur_arg], "transfer-encoding") == 0 ||
+ strcasecmp(args[cur_arg], "content-type") == 0) {
+ ha_warning("parsing [%s:%d] : header '%s' always ignored by the http reply.\n",
+ px->conf.args.file, px->conf.args.line, args[cur_arg]);
+ cur_arg += 2;
+ continue;
+ }
+ hdr = calloc(1, sizeof(*hdr));
+ if (!hdr) {
+ memprintf(errmsg, "'%s' : out of memory", args[cur_arg-1]);
+ goto error;
+ }
+ LIST_APPEND(&reply->hdrs, &hdr->list);
+ LIST_INIT(&hdr->value);
+ hdr->name = ist(strdup(args[cur_arg]));
+ if (!isttest(hdr->name)) {
+ memprintf(errmsg, "out of memory");
+ goto error;
+ }
+ if (!parse_logformat_string(args[cur_arg+1], px, &hdr->value, LOG_OPT_HTTP, cap, errmsg))
+ goto error;
+
+ free(px->conf.lfs_file);
+ px->conf.lfs_file = strdup(px->conf.args.file);
+ px->conf.lfs_line = px->conf.args.line;
+ cur_arg += 2;
+ }
+ else
+ break;
+ }
+
+ if (reply->type == HTTP_REPLY_EMPTY) { /* no payload */
+ if (reply->ctype) {
+ ha_warning("parsing [%s:%d] : content-type '%s' ignored by the http reply because"
+ " neither errorfile nor payload defined.\n",
+ px->conf.args.file, px->conf.args.line, reply->ctype);
+ ha_free(&reply->ctype);
+ }
+ }
+ else if (reply->type == HTTP_REPLY_ERRFILES || reply->type == HTTP_REPLY_ERRMSG) { /* errorfiles or errorfile */
+
+ if (reply->type != HTTP_REPLY_ERRMSG || !reply->body.errmsg) {
+ /* default errorfile or errorfiles: check the status */
+ int rc;
+
+ for (rc = 0; rc < HTTP_ERR_SIZE; rc++) {
+ if (http_err_codes[rc] == reply->status)
+ break;
+ }
+
+ if (rc >= HTTP_ERR_SIZE) {
+ memprintf(errmsg, "status code '%d' not handled by default with '%s' argument.",
+ reply->status, act_arg);
+ goto error;
+ }
+ }
+
+ if (reply->ctype) {
+ ha_warning("parsing [%s:%d] : content-type '%s' ignored by the http reply when used "
+ "with an erorrfile.\n",
+ px->conf.args.file, px->conf.args.line, reply->ctype);
+ ha_free(&reply->ctype);
+ }
+ if (!LIST_ISEMPTY(&reply->hdrs)) {
+ ha_warning("parsing [%s:%d] : hdr parameters ignored by the http reply when used "
+ "with an erorrfile.\n",
+ px->conf.args.file, px->conf.args.line);
+ list_for_each_entry_safe(hdr, hdrb, &reply->hdrs, list) {
+ LIST_DELETE(&hdr->list);
+ list_for_each_entry_safe(lf, lfb, &hdr->value, list) {
+ LIST_DELETE(&lf->list);
+ release_sample_expr(lf->expr);
+ free(lf->arg);
+ free(lf);
+ }
+ istfree(&hdr->name);
+ free(hdr);
+ }
+ }
+ }
+ else if (reply->type == HTTP_REPLY_RAW) { /* explicit parameter using 'file' parameter*/
+ if ((reply->status == 204 || reply->status == 304) && objlen) {
+ memprintf(errmsg, "No body expected for %d responses", reply->status);
+ goto error;
+ }
+ if (!reply->ctype && objlen) {
+ memprintf(errmsg, "a content type must be defined when non-empty payload is configured");
+ goto error;
+ }
+ if (reply->ctype && !b_data(&reply->body.obj)) {
+ ha_warning("parsing [%s:%d] : content-type '%s' ignored by the http reply when used "
+ "with an empty payload.\n",
+ px->conf.args.file, px->conf.args.line, reply->ctype);
+ ha_free(&reply->ctype);
+ }
+ if (b_room(&reply->body.obj) < global.tune.maxrewrite) {
+ ha_warning("parsing [%s:%d] : http reply payload runs over the buffer space reserved to headers rewriting."
+ " It may lead to internal errors if strict rewriting mode is enabled.\n",
+ px->conf.args.file, px->conf.args.line);
+ }
+ }
+ else if (reply->type == HTTP_REPLY_LOGFMT) { /* log-format payload using 'lf-file' of 'lf-string' parameter */
+ LIST_INIT(&reply->body.fmt);
+ if ((reply->status == 204 || reply->status == 304)) {
+ memprintf(errmsg, "No body expected for %d responses", reply->status);
+ goto error;
+ }
+ if (!reply->ctype) {
+ memprintf(errmsg, "a content type must be defined with a log-format payload");
+ goto error;
+ }
+ if (!parse_logformat_string(obj, px, &reply->body.fmt, LOG_OPT_HTTP, cap, errmsg))
+ goto error;
+
+ free(px->conf.lfs_file);
+ px->conf.lfs_file = strdup(px->conf.args.file);
+ px->conf.lfs_line = px->conf.args.line;
+ }
+
+ free(obj);
+ *orig_arg = cur_arg;
+ return reply;
+
+ error:
+ free(obj);
+ if (fd >= 0)
+ close(fd);
+ release_http_reply(reply);
+ return NULL;
+}
+
+/* Apply schemed-based normalization as described on rfc3986 on section 6.3.2.
+ * Returns 0 if no error has been found else non-zero.
+ *
+ * The normalization is processed on the target-uri at the condition that it is
+ * in absolute-form. In the case where the target-uri was normalized, every
+ * host headers values found are also replaced by the normalized hostname. This
+ * assumes that the target-uri and host headers were properly identify as
+ * similar before calling this function.
+ */
+int http_scheme_based_normalize(struct htx *htx)
+{
+ struct http_hdr_ctx ctx;
+ struct htx_sl *sl;
+ struct ist uri, scheme, authority, host, port;
+ struct http_uri_parser parser;
+
+ sl = http_get_stline(htx);
+
+ if (!sl || !(sl->flags & (HTX_SL_F_HAS_SCHM|HTX_SL_F_HAS_AUTHORITY)))
+ return 0;
+
+ uri = htx_sl_req_uri(sl);
+
+ parser = http_uri_parser_init(uri);
+ scheme = http_parse_scheme(&parser);
+ /* if no scheme found, no normalization to proceed */
+ if (!isttest(scheme))
+ return 0;
+
+ /* Extract the port if present in authority */
+ authority = http_parse_authority(&parser, 1);
+ port = http_get_host_port(authority);
+ if (!isttest(port)) {
+ /* if no port found, no normalization to proceed */
+ return 0;
+ }
+ host = isttrim(authority, istlen(authority) - istlen(port) - 1);
+
+ if (http_is_default_port(scheme, port)) {
+ /* reconstruct the uri with removal of the port */
+ struct buffer *temp = get_trash_chunk();
+ struct ist meth, vsn;
+
+ /* meth */
+ chunk_memcat(temp, HTX_SL_REQ_MPTR(sl), HTX_SL_REQ_MLEN(sl));
+ meth = ist2(temp->area, HTX_SL_REQ_MLEN(sl));
+
+ /* vsn */
+ chunk_memcat(temp, HTX_SL_REQ_VPTR(sl), HTX_SL_REQ_VLEN(sl));
+ vsn = ist2(temp->area + meth.len, HTX_SL_REQ_VLEN(sl));
+
+ /* reconstruct uri without port */
+ chunk_memcat(temp, uri.ptr, authority.ptr - uri.ptr);
+ chunk_istcat(temp, host);
+ chunk_memcat(temp, istend(authority), istend(uri) - istend(authority));
+ uri = ist2(temp->area + meth.len + vsn.len, host.len + uri.len - authority.len); /* uri */
+
+ http_replace_stline(htx, meth, uri, vsn);
+
+ /* replace every host headers values by the normalized host */
+ ctx.blk = NULL;
+ while (http_find_header(htx, ist("host"), &ctx, 0)) {
+ if (!http_replace_header_value(htx, &ctx, host))
+ goto fail;
+ }
+ }
+
+ return 0;
+
+ fail:
+ return 1;
+}
+
+/* First step function to merge multiple cookie headers in a single entry.
+ *
+ * Use it for each cookie header at <idx> index over HTTP headers in <list>.
+ * <first> and <last> are state variables used internally and must be
+ * initialized to -1 before the first invocation.
+ */
+void http_cookie_register(struct http_hdr *list, int idx, int *first, int *last)
+{
+ /* Build a linked list of cookie headers. Use header length to point to
+ * the next one. The last entry will contains -1.
+ */
+
+ /* Caller is responsible to initialize *first and *last to -1 on first
+ * invocation. Both will thus be set to a valid index after it.
+ */
+ BUG_ON(*first > 0 && *last < 0);
+
+ /* Mark the current end of cookie linked list. */
+ list[idx].n.len = -1;
+ if (*first < 0) {
+ /* Save first found cookie for http_cookie_merge call. */
+ *first = idx;
+ }
+ else {
+ /* Update linked list of cookies. */
+ list[*last].n.len = idx;
+ }
+
+ *last = idx;
+}
+
+/* Second step to merge multiple cookie headers in a single entry.
+ *
+ * Use it when looping over HTTP headers is done and <htx> message is built.
+ * This will concatenate each cookie headers present from <list> directly into
+ * <htx> message. <first> is reused from previous http_cookie_register
+ * invocation.
+ *
+ * Returns 0 on success else non-zero.
+ */
+int http_cookie_merge(struct htx *htx, struct http_hdr *list, int first)
+{
+ uint32_t fs; /* free space */
+ uint32_t bs; /* block size */
+ uint32_t vl; /* value len */
+ uint32_t tl; /* total length */
+ struct htx_blk *blk;
+
+ if (first < 0)
+ return 0;
+
+ blk = htx_add_header(htx, ist("cookie"), list[first].v);
+ if (!blk)
+ return 1;
+
+ tl = list[first].v.len;
+ fs = htx_free_data_space(htx);
+ bs = htx_get_blksz(blk);
+
+ /* for each extra cookie, we'll extend the cookie's value and insert
+ * ";" before the new value.
+ */
+ fs += tl; /* first one is already counted */
+
+ /* Loop over cookies linked list built from http_cookie_register. */
+ while ((first = list[first].n.len) >= 0) {
+ vl = list[first].v.len;
+ tl += vl + 2;
+ if (tl > fs)
+ return 1;
+
+ htx_change_blk_value_len(htx, blk, tl);
+ *(char *)(htx_get_blk_ptr(htx, blk) + bs + 0) = ';';
+ *(char *)(htx_get_blk_ptr(htx, blk) + bs + 1) = ' ';
+ memcpy(htx_get_blk_ptr(htx, blk) + bs + 2,
+ list[first].v.ptr, vl);
+ bs += vl + 2;
+ }
+
+ return 0;
+}
+
+/* Parses the "errorloc[302|303]" proxy keyword */
+static int proxy_parse_errorloc(char **args, int section, struct proxy *curpx,
+ const struct proxy *defpx, const char *file, int line,
+ char **errmsg)
+{
+ struct conf_errors *conf_err;
+ struct http_reply *reply;
+ struct buffer *msg;
+ int errloc, status;
+ int ret = 0;
+
+ if (warnifnotcap(curpx, PR_CAP_FE | PR_CAP_BE, file, line, args[0], NULL)) {
+ ret = 1;
+ goto out;
+ }
+
+ if (*(args[1]) == 0 || *(args[2]) == 0) {
+ memprintf(errmsg, "%s : expects <status_code> and <url> as arguments.\n", args[0]);
+ ret = -1;
+ goto out;
+ }
+
+ status = atol(args[1]);
+ errloc = (strcmp(args[0], "errorloc303") == 0 ? 303 : 302);
+ msg = http_parse_errorloc(errloc, status, args[2], errmsg);
+ if (!msg) {
+ memprintf(errmsg, "%s : %s", args[0], *errmsg);
+ ret = -1;
+ goto out;
+ }
+
+ reply = calloc(1, sizeof(*reply));
+ if (!reply) {
+ memprintf(errmsg, "%s : out of memory.", args[0]);
+ ret = -1;
+ goto out;
+ }
+ reply->type = HTTP_REPLY_ERRMSG;
+ reply->status = status;
+ reply->ctype = NULL;
+ LIST_INIT(&reply->hdrs);
+ reply->body.errmsg = msg;
+ LIST_APPEND(&http_replies_list, &reply->list);
+
+ conf_err = calloc(1, sizeof(*conf_err));
+ if (!conf_err) {
+ memprintf(errmsg, "%s : out of memory.", args[0]);
+ free(reply);
+ ret = -1;
+ goto out;
+ }
+ conf_err->type = 1;
+ conf_err->info.errorfile.status = status;
+ conf_err->info.errorfile.reply = reply;
+
+ conf_err->file = strdup(file);
+ conf_err->line = line;
+ LIST_APPEND(&curpx->conf.errors, &conf_err->list);
+
+ /* handle warning message */
+ if (*errmsg)
+ ret = 1;
+ out:
+ return ret;
+
+}
+
+/* Parses the "errorfile" proxy keyword */
+static int proxy_parse_errorfile(char **args, int section, struct proxy *curpx,
+ const struct proxy *defpx, const char *file, int line,
+ char **errmsg)
+{
+ struct conf_errors *conf_err;
+ struct http_reply *reply;
+ struct buffer *msg;
+ int status;
+ int ret = 0;
+
+ if (warnifnotcap(curpx, PR_CAP_FE | PR_CAP_BE, file, line, args[0], NULL)) {
+ ret = 1;
+ goto out;
+ }
+
+ if (*(args[1]) == 0 || *(args[2]) == 0) {
+ memprintf(errmsg, "%s : expects <status_code> and <file> as arguments.\n", args[0]);
+ ret = -1;
+ goto out;
+ }
+
+ status = atol(args[1]);
+ msg = http_parse_errorfile(status, args[2], errmsg);
+ if (!msg) {
+ memprintf(errmsg, "%s : %s", args[0], *errmsg);
+ ret = -1;
+ goto out;
+ }
+
+ reply = calloc(1, sizeof(*reply));
+ if (!reply) {
+ memprintf(errmsg, "%s : out of memory.", args[0]);
+ ret = -1;
+ goto out;
+ }
+ reply->type = HTTP_REPLY_ERRMSG;
+ reply->status = status;
+ reply->ctype = NULL;
+ LIST_INIT(&reply->hdrs);
+ reply->body.errmsg = msg;
+ LIST_APPEND(&http_replies_list, &reply->list);
+
+ conf_err = calloc(1, sizeof(*conf_err));
+ if (!conf_err) {
+ memprintf(errmsg, "%s : out of memory.", args[0]);
+ free(reply);
+ ret = -1;
+ goto out;
+ }
+ conf_err->type = 1;
+ conf_err->info.errorfile.status = status;
+ conf_err->info.errorfile.reply = reply;
+ conf_err->file = strdup(file);
+ conf_err->line = line;
+ LIST_APPEND(&curpx->conf.errors, &conf_err->list);
+
+ /* handle warning message */
+ if (*errmsg)
+ ret = 1;
+ out:
+ return ret;
+
+}
+
+/* Parses the "errorfiles" proxy keyword */
+static int proxy_parse_errorfiles(char **args, int section, struct proxy *curpx,
+ const struct proxy *defpx, const char *file, int line,
+ char **err)
+{
+ struct conf_errors *conf_err = NULL;
+ char *name = NULL;
+ int rc, ret = 0;
+
+ if (warnifnotcap(curpx, PR_CAP_FE | PR_CAP_BE, file, line, args[0], NULL)) {
+ ret = 1;
+ goto out;
+ }
+
+ if (!*(args[1])) {
+ memprintf(err, "%s : expects <name> as argument.", args[0]);
+ ret = -1;
+ goto out;
+ }
+
+ name = strdup(args[1]);
+ conf_err = calloc(1, sizeof(*conf_err));
+ if (!name || !conf_err) {
+ memprintf(err, "%s : out of memory.", args[0]);
+ goto error;
+ }
+ conf_err->type = 0;
+
+ conf_err->info.errorfiles.name = name;
+ if (!*(args[2])) {
+ for (rc = 0; rc < HTTP_ERR_SIZE; rc++)
+ conf_err->info.errorfiles.status[rc] = 1;
+ }
+ else {
+ int cur_arg, status;
+ for (cur_arg = 2; *(args[cur_arg]); cur_arg++) {
+ status = atol(args[cur_arg]);
+
+ for (rc = 0; rc < HTTP_ERR_SIZE; rc++) {
+ if (http_err_codes[rc] == status) {
+ conf_err->info.errorfiles.status[rc] = 2;
+ break;
+ }
+ }
+ if (rc >= HTTP_ERR_SIZE) {
+ memprintf(err, "%s : status code '%d' not handled.", args[0], status);
+ goto error;
+ }
+ }
+ }
+ conf_err->file = strdup(file);
+ conf_err->line = line;
+ LIST_APPEND(&curpx->conf.errors, &conf_err->list);
+ out:
+ return ret;
+
+ error:
+ free(name);
+ free(conf_err);
+ ret = -1;
+ goto out;
+}
+
+/* Parses the "http-error" proxy keyword */
+static int proxy_parse_http_error(char **args, int section, struct proxy *curpx,
+ const struct proxy *defpx, const char *file, int line,
+ char **errmsg)
+{
+ struct conf_errors *conf_err;
+ struct http_reply *reply = NULL;
+ int rc, cur_arg, ret = 0;
+
+ if (warnifnotcap(curpx, PR_CAP_FE | PR_CAP_BE, file, line, args[0], NULL)) {
+ ret = 1;
+ goto out;
+ }
+
+ cur_arg = 1;
+ curpx->conf.args.ctx = ARGC_HERR;
+ reply = http_parse_http_reply((const char **)args, &cur_arg, curpx, 0, errmsg);
+ if (!reply) {
+ memprintf(errmsg, "%s : %s", args[0], *errmsg);
+ goto error;
+ }
+ else if (!reply->status) {
+ memprintf(errmsg, "%s : expects at least a <status> as arguments.\n", args[0]);
+ goto error;
+ }
+
+ for (rc = 0; rc < HTTP_ERR_SIZE; rc++) {
+ if (http_err_codes[rc] == reply->status)
+ break;
+ }
+
+ if (rc >= HTTP_ERR_SIZE) {
+ memprintf(errmsg, "%s: status code '%d' not handled.", args[0], reply->status);
+ goto error;
+ }
+ if (*args[cur_arg]) {
+ memprintf(errmsg, "%s : unknown keyword '%s'.", args[0], args[cur_arg]);
+ goto error;
+ }
+
+ conf_err = calloc(1, sizeof(*conf_err));
+ if (!conf_err) {
+ memprintf(errmsg, "%s : out of memory.", args[0]);
+ goto error;
+ }
+ if (reply->type == HTTP_REPLY_ERRFILES) {
+ int rc = http_get_status_idx(reply->status);
+
+ conf_err->type = 2;
+ conf_err->info.errorfiles.name = reply->body.http_errors;
+ conf_err->info.errorfiles.status[rc] = 2;
+ reply->body.http_errors = NULL;
+ release_http_reply(reply);
+ }
+ else {
+ conf_err->type = 1;
+ conf_err->info.errorfile.status = reply->status;
+ conf_err->info.errorfile.reply = reply;
+ LIST_APPEND(&http_replies_list, &reply->list);
+ }
+ conf_err->file = strdup(file);
+ conf_err->line = line;
+ LIST_APPEND(&curpx->conf.errors, &conf_err->list);
+
+ /* handle warning message */
+ if (*errmsg)
+ ret = 1;
+ out:
+ return ret;
+
+ error:
+ release_http_reply(reply);
+ ret = -1;
+ goto out;
+
+}
+
+/* Check "errorfiles" proxy keyword */
+static int proxy_check_errors(struct proxy *px)
+{
+ struct conf_errors *conf_err, *conf_err_back;
+ struct http_errors *http_errs;
+ int rc, err = ERR_NONE;
+
+ list_for_each_entry_safe(conf_err, conf_err_back, &px->conf.errors, list) {
+ if (conf_err->type == 1) {
+ /* errorfile */
+ rc = http_get_status_idx(conf_err->info.errorfile.status);
+ px->replies[rc] = conf_err->info.errorfile.reply;
+
+ /* For proxy, to rely on default replies, just don't reference a reply */
+ if (px->replies[rc]->type == HTTP_REPLY_ERRMSG && !px->replies[rc]->body.errmsg)
+ px->replies[rc] = NULL;
+ }
+ else {
+ /* errorfiles */
+ list_for_each_entry(http_errs, &http_errors_list, list) {
+ if (strcmp(http_errs->id, conf_err->info.errorfiles.name) == 0)
+ break;
+ }
+
+ /* unknown http-errors section */
+ if (&http_errs->list == &http_errors_list) {
+ ha_alert("proxy '%s': unknown http-errors section '%s' (at %s:%d).\n",
+ px->id, conf_err->info.errorfiles.name, conf_err->file, conf_err->line);
+ err |= ERR_ALERT | ERR_FATAL;
+ free(conf_err->info.errorfiles.name);
+ goto next;
+ }
+
+ free(conf_err->info.errorfiles.name);
+ for (rc = 0; rc < HTTP_ERR_SIZE; rc++) {
+ if (conf_err->info.errorfiles.status[rc] > 0) {
+ if (http_errs->replies[rc])
+ px->replies[rc] = http_errs->replies[rc];
+ else if (conf_err->info.errorfiles.status[rc] == 2)
+ ha_warning("config: proxy '%s' : status '%d' not declared in"
+ " http-errors section '%s' (at %s:%d).\n",
+ px->id, http_err_codes[rc], http_errs->id,
+ conf_err->file, conf_err->line);
+ }
+ }
+ }
+ next:
+ LIST_DELETE(&conf_err->list);
+ free(conf_err->file);
+ free(conf_err);
+ }
+
+ out:
+ return err;
+}
+
+static int post_check_errors()
+{
+ struct ebpt_node *node;
+ struct http_error_msg *http_errmsg;
+ struct htx *htx;
+ int err_code = ERR_NONE;
+
+ node = ebpt_first(&http_error_messages);
+ while (node) {
+ http_errmsg = container_of(node, typeof(*http_errmsg), node);
+ if (b_is_null(&http_errmsg->msg))
+ goto next;
+ htx = htxbuf(&http_errmsg->msg);
+ if (htx_free_data_space(htx) < global.tune.maxrewrite) {
+ ha_warning("config: errorfile '%s' runs over the buffer space"
+ " reserved to headers rewriting. It may lead to internal errors if "
+ " http-after-response rules are evaluated on this message.\n",
+ (char *)node->key);
+ err_code |= ERR_WARN;
+ }
+ next:
+ node = ebpt_next(node);
+ }
+
+ return err_code;
+}
+
+int proxy_dup_default_conf_errors(struct proxy *curpx, const struct proxy *defpx, char **errmsg)
+{
+ struct conf_errors *conf_err, *new_conf_err = NULL;
+ int ret = 0;
+
+ list_for_each_entry(conf_err, &defpx->conf.errors, list) {
+ new_conf_err = calloc(1, sizeof(*new_conf_err));
+ if (!new_conf_err) {
+ memprintf(errmsg, "unable to duplicate default errors (out of memory).");
+ goto out;
+ }
+ new_conf_err->type = conf_err->type;
+ if (conf_err->type == 1) {
+ new_conf_err->info.errorfile.status = conf_err->info.errorfile.status;
+ new_conf_err->info.errorfile.reply = conf_err->info.errorfile.reply;
+ }
+ else {
+ new_conf_err->info.errorfiles.name = strdup(conf_err->info.errorfiles.name);
+ if (!new_conf_err->info.errorfiles.name) {
+ memprintf(errmsg, "unable to duplicate default errors (out of memory).");
+ goto out;
+ }
+ memcpy(&new_conf_err->info.errorfiles.status, &conf_err->info.errorfiles.status,
+ sizeof(conf_err->info.errorfiles.status));
+ }
+ new_conf_err->file = strdup(conf_err->file);
+ new_conf_err->line = conf_err->line;
+ LIST_APPEND(&curpx->conf.errors, &new_conf_err->list);
+ new_conf_err = NULL;
+ }
+ ret = 1;
+
+ out:
+ free(new_conf_err);
+ return ret;
+}
+
+void proxy_release_conf_errors(struct proxy *px)
+{
+ struct conf_errors *conf_err, *conf_err_back;
+
+ list_for_each_entry_safe(conf_err, conf_err_back, &px->conf.errors, list) {
+ if (conf_err->type == 0)
+ free(conf_err->info.errorfiles.name);
+ LIST_DELETE(&conf_err->list);
+ free(conf_err->file);
+ free(conf_err);
+ }
+}
+
+/*
+ * Parse an <http-errors> section.
+ * Returns the error code, 0 if OK, or any combination of :
+ * - ERR_ABORT: must abort ASAP
+ * - ERR_FATAL: we can continue parsing but not start the service
+ * - ERR_WARN: a warning has been emitted
+ * - ERR_ALERT: an alert has been emitted
+ * Only the two first ones can stop processing, the two others are just
+ * indicators.
+ */
+static int cfg_parse_http_errors(const char *file, int linenum, char **args, int kwm)
+{
+ static struct http_errors *curr_errs = NULL;
+ int err_code = 0;
+ const char *err;
+ char *errmsg = NULL;
+
+ if (strcmp(args[0], "http-errors") == 0) { /* new errors section */
+ if (!*args[1]) {
+ ha_alert("parsing [%s:%d] : missing name for http-errors section.\n", file, linenum);
+ err_code |= ERR_ALERT | ERR_ABORT;
+ goto out;
+ }
+
+ err = invalid_char(args[1]);
+ if (err) {
+ ha_alert("parsing [%s:%d] : character '%c' is not permitted in '%s' name '%s'.\n",
+ file, linenum, *err, args[0], args[1]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ }
+
+ list_for_each_entry(curr_errs, &http_errors_list, list) {
+ /* Error if two errors section owns the same name */
+ if (strcmp(curr_errs->id, args[1]) == 0) {
+ ha_alert("parsing [%s:%d]: http-errors section '%s' already exists (declared at %s:%d).\n",
+ file, linenum, args[1], curr_errs->conf.file, curr_errs->conf.line);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ }
+ }
+
+ if ((curr_errs = calloc(1, sizeof(*curr_errs))) == NULL) {
+ ha_alert("parsing [%s:%d] : out of memory.\n", file, linenum);
+ err_code |= ERR_ALERT | ERR_ABORT;
+ goto out;
+ }
+
+ LIST_APPEND(&http_errors_list, &curr_errs->list);
+ curr_errs->id = strdup(args[1]);
+ curr_errs->conf.file = strdup(file);
+ curr_errs->conf.line = linenum;
+ }
+ else if (strcmp(args[0], "errorfile") == 0) { /* error message from a file */
+ struct http_reply *reply;
+ struct buffer *msg;
+ int status, rc;
+
+ if (*(args[1]) == 0 || *(args[2]) == 0) {
+ ha_alert("parsing [%s:%d] : %s: expects <status_code> and <file> as arguments.\n",
+ file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+
+ status = atol(args[1]);
+ msg = http_parse_errorfile(status, args[2], &errmsg);
+ if (!msg) {
+ ha_alert("parsing [%s:%d] : %s : %s\n", file, linenum, args[0], errmsg);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ if (errmsg) {
+ ha_warning("parsing [%s:%d] : %s: %s\n", file, linenum, args[0], errmsg);
+ err_code |= ERR_WARN;
+ }
+
+ reply = calloc(1, sizeof(*reply));
+ if (!reply) {
+ ha_alert("parsing [%s:%d] : %s : out of memory.\n", file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ reply->type = HTTP_REPLY_ERRMSG;
+ reply->status = status;
+ reply->ctype = NULL;
+ LIST_INIT(&reply->hdrs);
+ reply->body.errmsg = msg;
+
+ rc = http_get_status_idx(status);
+ curr_errs->replies[rc] = reply;
+ }
+ else if (*args[0] != 0) {
+ ha_alert("parsing [%s:%d] : unknown keyword '%s' in '%s' section\n", file, linenum, args[0], cursection);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+
+out:
+ free(errmsg);
+ return err_code;
+}
+
+static struct cfg_kw_list cfg_kws = {ILH, {
+ { CFG_LISTEN, "errorloc", proxy_parse_errorloc },
+ { CFG_LISTEN, "errorloc302", proxy_parse_errorloc },
+ { CFG_LISTEN, "errorloc303", proxy_parse_errorloc },
+ { CFG_LISTEN, "errorfile", proxy_parse_errorfile },
+ { CFG_LISTEN, "errorfiles", proxy_parse_errorfiles },
+ { CFG_LISTEN, "http-error", proxy_parse_http_error },
+ { 0, NULL, NULL },
+}};
+
+INITCALL1(STG_REGISTER, cfg_register_keywords, &cfg_kws);
+REGISTER_POST_PROXY_CHECK(proxy_check_errors);
+REGISTER_POST_CHECK(post_check_errors);
+
+REGISTER_CONFIG_SECTION("http-errors", cfg_parse_http_errors, NULL);
+
+/************************************************************************/
+/* HTX sample fetches */
+/************************************************************************/
+
+/* Returns 1 if a stream is an HTX stream. Otherwise, it returns 0. */
+static int
+smp_fetch_is_htx(const struct arg *arg_p, struct sample *smp, const char *kw, void *private)
+{
+ if (!smp->strm)
+ return 0;
+
+ smp->data.u.sint = !!IS_HTX_STRM(smp->strm);
+ smp->data.type = SMP_T_BOOL;
+ return 1;
+}
+
+/* Returns the number of blocks in an HTX message. The channel is chosen
+ * depending on the sample direction. */
+static int
+smp_fetch_htx_nbblks(const struct arg *arg_p, struct sample *smp, const char *kw, void *private)
+{
+ struct channel *chn;
+ struct htx *htx;
+
+ if (!smp->strm)
+ return 0;
+
+ chn = ((smp->opt & SMP_OPT_DIR) == SMP_OPT_DIR_RES) ? &smp->strm->res : &smp->strm->req;
+ htx = smp_prefetch_htx(smp, chn, NULL, 0);
+ if (!htx)
+ return 0;
+
+ smp->data.u.sint = htx_nbblks(htx);
+ smp->data.type = SMP_T_SINT;
+ smp->flags = SMP_F_VOLATILE | SMP_F_MAY_CHANGE;
+ return 1;
+}
+
+/* Returns the size of an HTX message. The channel is chosen depending on the
+ * sample direction. */
+static int
+smp_fetch_htx_size(const struct arg *arg_p, struct sample *smp, const char *kw, void *private)
+{
+ struct channel *chn;
+ struct htx *htx;
+
+ if (!smp->strm)
+ return 0;
+
+ chn = ((smp->opt & SMP_OPT_DIR) == SMP_OPT_DIR_RES) ? &smp->strm->res : &smp->strm->req;
+ htx = smp_prefetch_htx(smp, chn, NULL, 0);
+ if (!htx)
+ return 0;
+
+ smp->data.u.sint = htx->size;
+ smp->data.type = SMP_T_SINT;
+ smp->flags = SMP_F_VOLATILE | SMP_F_MAY_CHANGE;
+ return 1;
+}
+
+/* Returns the data size of an HTX message. The channel is chosen depending on the
+ * sample direction. */
+static int
+smp_fetch_htx_data(const struct arg *arg_p, struct sample *smp, const char *kw, void *private)
+{
+ struct channel *chn;
+ struct htx *htx;
+
+ if (!smp->strm)
+ return 0;
+
+ chn = ((smp->opt & SMP_OPT_DIR) == SMP_OPT_DIR_RES) ? &smp->strm->res : &smp->strm->req;
+ htx = smp_prefetch_htx(smp, chn, NULL, 0);
+ if (!htx)
+ return 0;
+
+ smp->data.u.sint = htx->data;
+ smp->data.type = SMP_T_SINT;
+ smp->flags = SMP_F_VOLATILE | SMP_F_MAY_CHANGE;
+ return 1;
+}
+
+/* Returns the used space (data+meta) of an HTX message. The channel is chosen
+ * depending on the sample direction. */
+static int
+smp_fetch_htx_used(const struct arg *arg_p, struct sample *smp, const char *kw, void *private)
+{
+ struct channel *chn;
+ struct htx *htx;
+
+ if (!smp->strm)
+ return 0;
+
+ chn = ((smp->opt & SMP_OPT_DIR) == SMP_OPT_DIR_RES) ? &smp->strm->res : &smp->strm->req;
+ htx = smp_prefetch_htx(smp, chn, NULL, 0);
+ if (!htx)
+ return 0;
+
+ smp->data.u.sint = htx_used_space(htx);
+ smp->data.type = SMP_T_SINT;
+ smp->flags = SMP_F_VOLATILE | SMP_F_MAY_CHANGE;
+ return 1;
+}
+
+/* Returns the free space (size-used) of an HTX message. The channel is chosen
+ * depending on the sample direction. */
+static int
+smp_fetch_htx_free(const struct arg *arg_p, struct sample *smp, const char *kw, void *private)
+{
+ struct channel *chn;
+ struct htx *htx;
+
+ if (!smp->strm)
+ return 0;
+
+ chn = ((smp->opt & SMP_OPT_DIR) == SMP_OPT_DIR_RES) ? &smp->strm->res : &smp->strm->req;
+ htx = smp_prefetch_htx(smp, chn, NULL, 0);
+ if (!htx)
+ return 0;
+
+ smp->data.u.sint = htx_free_space(htx);
+ smp->data.type = SMP_T_SINT;
+ smp->flags = SMP_F_VOLATILE | SMP_F_MAY_CHANGE;
+ return 1;
+}
+
+/* Returns the free space for data (free-sizeof(blk)) of an HTX message. The
+ * channel is chosen depending on the sample direction. */
+static int
+smp_fetch_htx_free_data(const struct arg *arg_p, struct sample *smp, const char *kw, void *private)
+{
+ struct channel *chn;
+ struct htx *htx;
+
+ if (!smp->strm)
+ return 0;
+
+ chn = ((smp->opt & SMP_OPT_DIR) == SMP_OPT_DIR_RES) ? &smp->strm->res : &smp->strm->req;
+ htx = smp_prefetch_htx(smp, chn, NULL, 0);
+ if (!htx)
+ return 0;
+
+ smp->data.u.sint = htx_free_data_space(htx);
+ smp->data.type = SMP_T_SINT;
+ smp->flags = SMP_F_VOLATILE | SMP_F_MAY_CHANGE;
+ return 1;
+}
+
+/* Returns 1 if the HTX message contains EOM flag. Otherwise it returns 0. The
+ * channel is chosen depending on the sample direction.
+ */
+static int
+smp_fetch_htx_has_eom(const struct arg *arg_p, struct sample *smp, const char *kw, void *private)
+{
+ struct channel *chn;
+ struct htx *htx;
+
+ if (!smp->strm)
+ return 0;
+
+ chn = ((smp->opt & SMP_OPT_DIR) == SMP_OPT_DIR_RES) ? &smp->strm->res : &smp->strm->req;
+ htx = smp_prefetch_htx(smp, chn, NULL, 0);
+ if (!htx)
+ return 0;
+
+ smp->data.u.sint = !!(htx->flags & HTX_FL_EOM);
+ smp->data.type = SMP_T_BOOL;
+ smp->flags = SMP_F_VOLATILE | SMP_F_MAY_CHANGE;
+ return 1;
+}
+
+/* Returns the type of a specific HTX block, if found in the message. Otherwise
+ * HTX_BLK_UNUSED is returned. Any positive integer (>= 0) is supported or
+ * "head", "tail" or "first". The channel is chosen depending on the sample
+ * direction. */
+static int
+smp_fetch_htx_blk_type(const struct arg *arg_p, struct sample *smp, const char *kw, void *private)
+{
+ struct channel *chn;
+ struct htx *htx;
+ enum htx_blk_type type;
+ int32_t pos;
+
+ if (!smp->strm || !arg_p)
+ return 0;
+
+ chn = ((smp->opt & SMP_OPT_DIR) == SMP_OPT_DIR_RES) ? &smp->strm->res : &smp->strm->req;
+ htx = smp_prefetch_htx(smp, chn, NULL, 0);
+ if (!htx)
+ return 0;
+
+ pos = arg_p[0].data.sint;
+ if (pos == -1)
+ type = htx_get_head_type(htx);
+ else if (pos == -2)
+ type = htx_get_tail_type(htx);
+ else if (pos == -3)
+ type = htx_get_first_type(htx);
+ else
+ type = ((pos >= htx->head && pos <= htx->tail)
+ ? htx_get_blk_type(htx_get_blk(htx, pos))
+ : HTX_BLK_UNUSED);
+
+ chunk_initstr(&smp->data.u.str, htx_blk_type_str(type));
+ smp->data.type = SMP_T_STR;
+ smp->flags = SMP_F_CONST | SMP_F_VOLATILE | SMP_F_MAY_CHANGE;
+ return 1;
+}
+
+/* Returns the size of a specific HTX block, if found in the message. Otherwise
+ * 0 is returned. Any positive integer (>= 0) is supported or "head", "tail" or
+ * "first". The channel is chosen depending on the sample direction. */
+static int
+smp_fetch_htx_blk_size(const struct arg *arg_p, struct sample *smp, const char *kw, void *private)
+{
+ struct channel *chn;
+ struct htx *htx;
+ struct htx_blk *blk;
+ int32_t pos;
+
+ if (!smp->strm || !arg_p)
+ return 0;
+
+ chn = ((smp->opt & SMP_OPT_DIR) == SMP_OPT_DIR_RES) ? &smp->strm->res : &smp->strm->req;
+ htx = smp_prefetch_htx(smp, chn, NULL, 0);
+ if (!htx)
+ return 0;
+
+ pos = arg_p[0].data.sint;
+ if (pos == -1)
+ blk = htx_get_head_blk(htx);
+ else if (pos == -2)
+ blk = htx_get_tail_blk(htx);
+ else if (pos == -3)
+ blk = htx_get_first_blk(htx);
+ else
+ blk = ((pos >= htx->head && pos <= htx->tail) ? htx_get_blk(htx, pos) : NULL);
+
+ smp->data.u.sint = (blk ? htx_get_blksz(blk) : 0);
+ smp->data.type = SMP_T_SINT;
+ smp->flags = SMP_F_VOLATILE | SMP_F_MAY_CHANGE;
+ return 1;
+}
+
+/* Returns the start-line if the selected HTX block exists and is a
+ * start-line. Otherwise 0 an empty string. Any positive integer (>= 0) is
+ * supported or "head", "tail" or "first". The channel is chosen depending on
+ * the sample direction. */
+static int
+smp_fetch_htx_blk_stline(const struct arg *arg_p, struct sample *smp, const char *kw, void *private)
+{
+ struct buffer *temp;
+ struct channel *chn;
+ struct htx *htx;
+ struct htx_blk *blk;
+ struct htx_sl *sl;
+ int32_t pos;
+
+ if (!smp->strm || !arg_p)
+ return 0;
+
+ chn = ((smp->opt & SMP_OPT_DIR) == SMP_OPT_DIR_RES) ? &smp->strm->res : &smp->strm->req;
+ htx = smp_prefetch_htx(smp, chn, NULL, 0);
+ if (!htx)
+ return 0;
+
+ pos = arg_p[0].data.sint;
+ if (pos == -1)
+ blk = htx_get_head_blk(htx);
+ else if (pos == -2)
+ blk = htx_get_tail_blk(htx);
+ else if (pos == -3)
+ blk = htx_get_first_blk(htx);
+ else
+ blk = ((pos >= htx->head && pos <= htx->tail) ? htx_get_blk(htx, pos) : NULL);
+
+ if (!blk || (htx_get_blk_type(blk) != HTX_BLK_REQ_SL && htx_get_blk_type(blk) != HTX_BLK_RES_SL)) {
+ smp->data.u.str.size = 0;
+ smp->data.u.str.area = "";
+ smp->data.u.str.data = 0;
+ }
+ else {
+ sl = htx_get_blk_ptr(htx, blk);
+
+ temp = get_trash_chunk();
+ chunk_istcat(temp, htx_sl_p1(sl));
+ temp->area[temp->data++] = ' ';
+ chunk_istcat(temp, htx_sl_p2(sl));
+ temp->area[temp->data++] = ' ';
+ chunk_istcat(temp, htx_sl_p3(sl));
+
+ smp->data.u.str = *temp;
+ }
+
+ smp->data.type = SMP_T_STR;
+ smp->flags = SMP_F_VOLATILE | SMP_F_MAY_CHANGE;
+ return 1;
+}
+
+/* Returns the header name if the selected HTX block exists and is a header or a
+ * trailer. Otherwise 0 an empty string. Any positive integer (>= 0) is
+ * supported or "head", "tail" or "first". The channel is chosen depending on
+ * the sample direction. */
+static int
+smp_fetch_htx_blk_hdrname(const struct arg *arg_p, struct sample *smp, const char *kw, void *private)
+{
+ struct channel *chn;
+ struct htx *htx;
+ struct htx_blk *blk;
+ int32_t pos;
+
+ if (!smp->strm || !arg_p)
+ return 0;
+
+ chn = ((smp->opt & SMP_OPT_DIR) == SMP_OPT_DIR_RES) ? &smp->strm->res : &smp->strm->req;
+ htx = smp_prefetch_htx(smp, chn, NULL, 0);
+ if (!htx)
+ return 0;
+
+ pos = arg_p[0].data.sint;
+ if (pos == -1)
+ blk = htx_get_head_blk(htx);
+ else if (pos == -2)
+ blk = htx_get_tail_blk(htx);
+ else if (pos == -3)
+ blk = htx_get_first_blk(htx);
+ else
+ blk = ((pos >= htx->head && pos <= htx->tail) ? htx_get_blk(htx, pos) : NULL);
+
+ if (!blk || (htx_get_blk_type(blk) != HTX_BLK_HDR && htx_get_blk_type(blk) != HTX_BLK_TLR)) {
+ smp->data.u.str.size = 0;
+ smp->data.u.str.area = "";
+ smp->data.u.str.data = 0;
+ }
+ else {
+ struct ist name = htx_get_blk_name(htx, blk);
+
+ chunk_initlen(&smp->data.u.str, name.ptr, name.len, name.len);
+ }
+ smp->data.type = SMP_T_STR;
+ smp->flags = SMP_F_CONST | SMP_F_VOLATILE | SMP_F_MAY_CHANGE;
+ return 1;
+}
+
+/* Returns the header value if the selected HTX block exists and is a header or
+ * a trailer. Otherwise 0 an empty string. Any positive integer (>= 0) is
+ * supported or "head", "tail" or "first". The channel is chosen depending on
+ * the sample direction. */
+static int
+smp_fetch_htx_blk_hdrval(const struct arg *arg_p, struct sample *smp, const char *kw, void *private)
+{
+ struct channel *chn;
+ struct htx *htx;
+ struct htx_blk *blk;
+ int32_t pos;
+
+ if (!smp->strm || !arg_p)
+ return 0;
+
+ chn = ((smp->opt & SMP_OPT_DIR) == SMP_OPT_DIR_RES) ? &smp->strm->res : &smp->strm->req;
+ htx = smp_prefetch_htx(smp, chn, NULL, 0);
+ if (!htx)
+ return 0;
+
+ pos = arg_p[0].data.sint;
+ if (pos == -1)
+ blk = htx_get_head_blk(htx);
+ else if (pos == -2)
+ blk = htx_get_tail_blk(htx);
+ else if (pos == -3)
+ blk = htx_get_first_blk(htx);
+ else
+ blk = ((pos >= htx->head && pos <= htx->tail) ? htx_get_blk(htx, pos) : NULL);
+
+ if (!blk || (htx_get_blk_type(blk) != HTX_BLK_HDR && htx_get_blk_type(blk) != HTX_BLK_TLR)) {
+ smp->data.u.str.size = 0;
+ smp->data.u.str.area = "";
+ smp->data.u.str.data = 0;
+ }
+ else {
+ struct ist val = htx_get_blk_value(htx, blk);
+
+ chunk_initlen(&smp->data.u.str, val.ptr, val.len, val.len);
+ }
+ smp->data.type = SMP_T_STR;
+ smp->flags = SMP_F_CONST | SMP_F_VOLATILE | SMP_F_MAY_CHANGE;
+ return 1;
+}
+
+/* Returns the value if the selected HTX block exists and is a data
+ * block. Otherwise 0 an empty string. Any positive integer (>= 0) is supported
+ * or "head", "tail" or "first". The channel is chosen depending on the sample
+ * direction. */
+static int
+smp_fetch_htx_blk_data(const struct arg *arg_p, struct sample *smp, const char *kw, void *private)
+{
+ struct channel *chn;
+ struct htx *htx;
+ struct htx_blk *blk;
+ int32_t pos;
+
+ if (!smp->strm || !arg_p)
+ return 0;
+
+ chn = ((smp->opt & SMP_OPT_DIR) == SMP_OPT_DIR_RES) ? &smp->strm->res : &smp->strm->req;
+ htx = smp_prefetch_htx(smp, chn, NULL, 0);
+ if (!htx)
+ return 0;
+
+ pos = arg_p[0].data.sint;
+ if (pos == -1)
+ blk = htx_get_head_blk(htx);
+ else if (pos == -2)
+ blk = htx_get_tail_blk(htx);
+ else if (pos == -3)
+ blk = htx_get_first_blk(htx);
+ else
+ blk = ((pos >= htx->head && pos <= htx->tail) ? htx_get_blk(htx, pos) : NULL);
+
+ if (!blk || htx_get_blk_type(blk) != HTX_BLK_DATA) {
+ smp->data.u.str.size = 0;
+ smp->data.u.str.area = "";
+ smp->data.u.str.data = 0;
+ }
+ else {
+ struct ist val = htx_get_blk_value(htx, blk);
+
+ chunk_initlen(&smp->data.u.str, val.ptr, val.len, val.len);
+ }
+ smp->data.type = SMP_T_BIN;
+ smp->flags = SMP_F_CONST | SMP_F_VOLATILE | SMP_F_MAY_CHANGE;
+ return 1;
+}
+
+/* This function is used to validate the arguments passed to any "htx_blk" fetch
+ * keywords. An argument is expected by these keywords. It must be a positive
+ * integer or on of the following strings: "head", "tail" or "first". It returns
+ * 0 on error, and a non-zero value if OK.
+ */
+int val_blk_arg(struct arg *arg, char **err_msg)
+{
+ if (arg[0].type != ARGT_STR || !arg[0].data.str.data) {
+ memprintf(err_msg, "a block position is expected (> 0) or a special block name (head, tail, first)");
+ return 0;
+ }
+ if (arg[0].data.str.data == 4 && !strncmp(arg[0].data.str.area, "head", 4)) {
+ chunk_destroy(&arg[0].data.str);
+ arg[0].type = ARGT_SINT;
+ arg[0].data.sint = -1;
+ }
+ else if (arg[0].data.str.data == 4 && !strncmp(arg[0].data.str.area, "tail", 4)) {
+ chunk_destroy(&arg[0].data.str);
+ arg[0].type = ARGT_SINT;
+ arg[0].data.sint = -2;
+ }
+ else if (arg[0].data.str.data == 5 && !strncmp(arg[0].data.str.area, "first", 5)) {
+ chunk_destroy(&arg[0].data.str);
+ arg[0].type = ARGT_SINT;
+ arg[0].data.sint = -3;
+ }
+ else {
+ int pos;
+
+ for (pos = 0; pos < arg[0].data.str.data; pos++) {
+ if (!isdigit((unsigned char)arg[0].data.str.area[pos])) {
+ memprintf(err_msg, "invalid block position");
+ return 0;
+ }
+ }
+
+ pos = strl2uic(arg[0].data.str.area, arg[0].data.str.data);
+ if (pos < 0) {
+ memprintf(err_msg, "block position must not be negative");
+ return 0;
+ }
+ chunk_destroy(&arg[0].data.str);
+ arg[0].type = ARGT_SINT;
+ arg[0].data.sint = pos;
+ }
+
+ return 1;
+}
+
+
+/* Note: must not be declared <const> as its list will be overwritten.
+ * Note: htx sample fetches should only used for development purpose.
+ */
+static struct sample_fetch_kw_list sample_fetch_keywords = {ILH, {
+ { "internal.strm.is_htx", smp_fetch_is_htx, 0, NULL, SMP_T_BOOL, SMP_USE_INTRN },
+
+ { "internal.htx.nbblks", smp_fetch_htx_nbblks, 0, NULL, SMP_T_SINT, SMP_USE_HRQHV|SMP_USE_HRSHV},
+ { "internal.htx.size", smp_fetch_htx_size, 0, NULL, SMP_T_SINT, SMP_USE_HRQHV|SMP_USE_HRSHV},
+ { "internal.htx.data", smp_fetch_htx_data, 0, NULL, SMP_T_SINT, SMP_USE_HRQHV|SMP_USE_HRSHV},
+ { "internal.htx.used", smp_fetch_htx_used, 0, NULL, SMP_T_SINT, SMP_USE_HRQHV|SMP_USE_HRSHV},
+ { "internal.htx.free", smp_fetch_htx_free, 0, NULL, SMP_T_SINT, SMP_USE_HRQHV|SMP_USE_HRSHV},
+ { "internal.htx.free_data", smp_fetch_htx_free_data, 0, NULL, SMP_T_SINT, SMP_USE_HRQHV|SMP_USE_HRSHV},
+ { "internal.htx.has_eom", smp_fetch_htx_has_eom, 0, NULL, SMP_T_BOOL, SMP_USE_HRQHV|SMP_USE_HRSHV},
+
+ { "internal.htx_blk.type", smp_fetch_htx_blk_type, ARG1(1,STR), val_blk_arg, SMP_T_STR, SMP_USE_HRQHV|SMP_USE_HRSHV},
+ { "internal.htx_blk.size", smp_fetch_htx_blk_size, ARG1(1,STR), val_blk_arg, SMP_T_SINT, SMP_USE_HRQHV|SMP_USE_HRSHV},
+ { "internal.htx_blk.start_line", smp_fetch_htx_blk_stline, ARG1(1,STR), val_blk_arg, SMP_T_STR, SMP_USE_HRQHV|SMP_USE_HRSHV},
+ { "internal.htx_blk.hdrname", smp_fetch_htx_blk_hdrname, ARG1(1,STR), val_blk_arg, SMP_T_STR, SMP_USE_HRQHV|SMP_USE_HRSHV},
+ { "internal.htx_blk.hdrval", smp_fetch_htx_blk_hdrval, ARG1(1,STR), val_blk_arg, SMP_T_STR, SMP_USE_HRQHV|SMP_USE_HRSHV},
+ { "internal.htx_blk.data", smp_fetch_htx_blk_data, ARG1(1,STR), val_blk_arg, SMP_T_BIN, SMP_USE_HRQHV|SMP_USE_HRSHV},
+
+ { /* END */ },
+}};
+
+INITCALL1(STG_REGISTER, sample_register_fetches, &sample_fetch_keywords);
diff --git a/src/http_rules.c b/src/http_rules.c
new file mode 100644
index 0000000..192f0c7
--- /dev/null
+++ b/src/http_rules.c
@@ -0,0 +1,530 @@
+/*
+ * HTTP rules parsing and registration
+ *
+ * Copyright 2000-2018 Willy Tarreau <w@1wt.eu>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <sys/types.h>
+
+#include <ctype.h>
+#include <string.h>
+#include <time.h>
+
+#include <haproxy/acl.h>
+#include <haproxy/action.h>
+#include <haproxy/api.h>
+#include <haproxy/arg.h>
+#include <haproxy/capture-t.h>
+#include <haproxy/cfgparse.h>
+#include <haproxy/chunk.h>
+#include <haproxy/global.h>
+#include <haproxy/http.h>
+#include <haproxy/http_ana-t.h>
+#include <haproxy/http_rules.h>
+#include <haproxy/log.h>
+#include <haproxy/pool.h>
+#include <haproxy/proxy.h>
+#include <haproxy/sample.h>
+#include <haproxy/tools.h>
+#include <haproxy/version.h>
+
+
+/* List head of all known action keywords for "http-request" */
+struct action_kw_list http_req_keywords = {
+ .list = LIST_HEAD_INIT(http_req_keywords.list)
+};
+
+/* List head of all known action keywords for "http-response" */
+struct action_kw_list http_res_keywords = {
+ .list = LIST_HEAD_INIT(http_res_keywords.list)
+};
+
+/* List head of all known action keywords for "http-after-response" */
+struct action_kw_list http_after_res_keywords = {
+ .list = LIST_HEAD_INIT(http_after_res_keywords.list)
+};
+
+void http_req_keywords_register(struct action_kw_list *kw_list)
+{
+ LIST_APPEND(&http_req_keywords.list, &kw_list->list);
+}
+
+void http_res_keywords_register(struct action_kw_list *kw_list)
+{
+ LIST_APPEND(&http_res_keywords.list, &kw_list->list);
+}
+
+void http_after_res_keywords_register(struct action_kw_list *kw_list)
+{
+ LIST_APPEND(&http_after_res_keywords.list, &kw_list->list);
+}
+
+/*
+ * Return the struct http_req_action_kw associated to a keyword.
+ */
+struct action_kw *action_http_req_custom(const char *kw)
+{
+ return action_lookup(&http_req_keywords.list, kw);
+}
+
+/*
+ * Return the struct http_res_action_kw associated to a keyword.
+ */
+struct action_kw *action_http_res_custom(const char *kw)
+{
+ return action_lookup(&http_res_keywords.list, kw);
+}
+
+/*
+ * Return the struct http_after_res_action_kw associated to a keyword.
+ */
+struct action_kw *action_http_after_res_custom(const char *kw)
+{
+ return action_lookup(&http_after_res_keywords.list, kw);
+}
+
+/* parse an "http-request" rule */
+struct act_rule *parse_http_req_cond(const char **args, const char *file, int linenum, struct proxy *proxy)
+{
+ struct act_rule *rule;
+ const struct action_kw *custom = NULL;
+ int cur_arg;
+
+ rule = new_act_rule(ACT_F_HTTP_REQ, file, linenum);
+ if (!rule) {
+ ha_alert("parsing [%s:%d]: out of memory.\n", file, linenum);
+ goto out;
+ }
+
+ if (((custom = action_http_req_custom(args[0])) != NULL)) {
+ char *errmsg = NULL;
+
+ cur_arg = 1;
+ /* try in the module list */
+ rule->kw = custom;
+
+ if (custom->flags & KWF_EXPERIMENTAL) {
+ if (!experimental_directives_allowed) {
+ ha_alert("parsing [%s:%d] : '%s' action is experimental, must be allowed via a global 'expose-experimental-directives'\n",
+ file, linenum, custom->kw);
+ goto out_err;
+ }
+ mark_tainted(TAINTED_CONFIG_EXP_KW_DECLARED);
+ }
+
+ if (custom->parse(args, &cur_arg, proxy, rule, &errmsg) == ACT_RET_PRS_ERR) {
+ ha_alert("parsing [%s:%d] : error detected in %s '%s' while parsing 'http-request %s' rule : %s.\n",
+ file, linenum, proxy_type_str(proxy), proxy->id, args[0], errmsg);
+ free(errmsg);
+ goto out_err;
+ }
+ else if (errmsg) {
+ ha_warning("parsing [%s:%d] : %s.\n", file, linenum, errmsg);
+ free(errmsg);
+ }
+ }
+ else {
+ const char *best = action_suggest(args[0], &http_req_keywords.list, NULL);
+
+ action_build_list(&http_req_keywords.list, &trash);
+ ha_alert("parsing [%s:%d]: 'http-request' expects %s, but got '%s'%s.%s%s%s\n",
+ file, linenum, trash.area,
+ args[0], *args[0] ? "" : " (missing argument)",
+ best ? " Did you mean '" : "",
+ best ? best : "",
+ best ? "' maybe ?" : "");
+ goto out_err;
+ }
+
+ if (strcmp(args[cur_arg], "if") == 0 || strcmp(args[cur_arg], "unless") == 0) {
+ struct acl_cond *cond;
+ char *errmsg = NULL;
+
+ if ((cond = build_acl_cond(file, linenum, &proxy->acl, proxy, args+cur_arg, &errmsg)) == NULL) {
+ ha_alert("parsing [%s:%d] : error detected while parsing an 'http-request %s' condition : %s.\n",
+ file, linenum, args[0], errmsg);
+ free(errmsg);
+ goto out_err;
+ }
+ rule->cond = cond;
+ }
+ else if (*args[cur_arg]) {
+ ha_alert("parsing [%s:%d]: 'http-request %s' expects"
+ " either 'if' or 'unless' followed by a condition but found '%s'.\n",
+ file, linenum, args[0], args[cur_arg]);
+ goto out_err;
+ }
+
+ return rule;
+ out_err:
+ free_act_rule(rule);
+ out:
+ return NULL;
+}
+
+/* parse an "http-respose" rule */
+struct act_rule *parse_http_res_cond(const char **args, const char *file, int linenum, struct proxy *proxy)
+{
+ struct act_rule *rule;
+ const struct action_kw *custom = NULL;
+ int cur_arg;
+
+ rule = new_act_rule(ACT_F_HTTP_RES, file, linenum);
+ if (!rule) {
+ ha_alert("parsing [%s:%d]: out of memory.\n", file, linenum);
+ goto out;
+ }
+
+ if (((custom = action_http_res_custom(args[0])) != NULL)) {
+ char *errmsg = NULL;
+
+ cur_arg = 1;
+ /* try in the module list */
+ rule->kw = custom;
+
+ if (custom->flags & KWF_EXPERIMENTAL) {
+ if (!experimental_directives_allowed) {
+ ha_alert("parsing [%s:%d] : '%s' action is experimental, must be allowed via a global 'expose-experimental-directives'\n",
+ file, linenum, custom->kw);
+ goto out_err;
+ }
+ mark_tainted(TAINTED_CONFIG_EXP_KW_DECLARED);
+ }
+
+ if (custom->parse(args, &cur_arg, proxy, rule, &errmsg) == ACT_RET_PRS_ERR) {
+ ha_alert("parsing [%s:%d] : error detected in %s '%s' while parsing 'http-response %s' rule : %s.\n",
+ file, linenum, proxy_type_str(proxy), proxy->id, args[0], errmsg);
+ free(errmsg);
+ goto out_err;
+ }
+ else if (errmsg) {
+ ha_warning("parsing [%s:%d] : %s.\n", file, linenum, errmsg);
+ free(errmsg);
+ }
+ }
+ else {
+ const char *best = action_suggest(args[0], &http_res_keywords.list, NULL);
+
+ action_build_list(&http_res_keywords.list, &trash);
+ ha_alert("parsing [%s:%d]: 'http-response' expects %s, but got '%s'%s.%s%s%s\n",
+ file, linenum, trash.area,
+ args[0], *args[0] ? "" : " (missing argument)",
+ best ? " Did you mean '" : "",
+ best ? best : "",
+ best ? "' maybe ?" : "");
+ goto out_err;
+ }
+
+ if (strcmp(args[cur_arg], "if") == 0 || strcmp(args[cur_arg], "unless") == 0) {
+ struct acl_cond *cond;
+ char *errmsg = NULL;
+
+ if ((cond = build_acl_cond(file, linenum, &proxy->acl, proxy, args+cur_arg, &errmsg)) == NULL) {
+ ha_alert("parsing [%s:%d] : error detected while parsing an 'http-response %s' condition : %s.\n",
+ file, linenum, args[0], errmsg);
+ free(errmsg);
+ goto out_err;
+ }
+ rule->cond = cond;
+ }
+ else if (*args[cur_arg]) {
+ ha_alert("parsing [%s:%d]: 'http-response %s' expects"
+ " either 'if' or 'unless' followed by a condition but found '%s'.\n",
+ file, linenum, args[0], args[cur_arg]);
+ goto out_err;
+ }
+
+ return rule;
+ out_err:
+ free_act_rule(rule);
+ out:
+ return NULL;
+}
+
+
+/* parse an "http-after-response" rule */
+struct act_rule *parse_http_after_res_cond(const char **args, const char *file, int linenum, struct proxy *proxy)
+{
+ struct act_rule *rule;
+ const struct action_kw *custom = NULL;
+ int cur_arg;
+
+ rule = new_act_rule(ACT_F_HTTP_RES, file, linenum);
+ if (!rule) {
+ ha_alert("parsing [%s:%d]: out of memory.\n", file, linenum);
+ goto out;
+ }
+
+ if (((custom = action_http_after_res_custom(args[0])) != NULL)) {
+ char *errmsg = NULL;
+
+ cur_arg = 1;
+ /* try in the module list */
+ rule->kw = custom;
+ if (custom->parse(args, &cur_arg, proxy, rule, &errmsg) == ACT_RET_PRS_ERR) {
+ ha_alert("parsing [%s:%d] : error detected in %s '%s' while parsing 'http-after-response %s' rule : %s.\n",
+ file, linenum, proxy_type_str(proxy), proxy->id, args[0], errmsg);
+ free(errmsg);
+ goto out_err;
+ }
+ else if (errmsg) {
+ ha_warning("parsing [%s:%d] : %s.\n", file, linenum, errmsg);
+ free(errmsg);
+ }
+ }
+ else {
+ const char *best = action_suggest(args[0], &http_after_res_keywords.list, NULL);
+
+ action_build_list(&http_after_res_keywords.list, &trash);
+ ha_alert("parsing [%s:%d]: 'http-after-response' expects %s, but got '%s'%s.%s%s%s\n",
+ file, linenum, trash.area,
+ args[0], *args[0] ? "" : " (missing argument)",
+ best ? " Did you mean '" : "",
+ best ? best : "",
+ best ? "' maybe ?" : "");
+ goto out_err;
+ }
+
+ if (strcmp(args[cur_arg], "if") == 0 || strcmp(args[cur_arg], "unless") == 0) {
+ struct acl_cond *cond;
+ char *errmsg = NULL;
+
+ if ((cond = build_acl_cond(file, linenum, &proxy->acl, proxy, args+cur_arg, &errmsg)) == NULL) {
+ ha_alert("parsing [%s:%d] : error detected while parsing an 'http-after-response %s' condition : %s.\n",
+ file, linenum, args[0], errmsg);
+ free(errmsg);
+ goto out_err;
+ }
+ rule->cond = cond;
+ }
+ else if (*args[cur_arg]) {
+ ha_alert("parsing [%s:%d]: 'http-after-response %s' expects"
+ " either 'if' or 'unless' followed by a condition but found '%s'.\n",
+ file, linenum, args[0], args[cur_arg]);
+ goto out_err;
+ }
+
+ return rule;
+ out_err:
+ free_act_rule(rule);
+ out:
+ return NULL;
+}
+
+/* completely free redirect rule */
+void http_free_redirect_rule(struct redirect_rule *rdr)
+{
+ struct logformat_node *lf, *lfb;
+
+ free_acl_cond(rdr->cond);
+ free(rdr->rdr_str);
+ free(rdr->cookie_str);
+ list_for_each_entry_safe(lf, lfb, &rdr->rdr_fmt, list) {
+ LIST_DELETE(&lf->list);
+ release_sample_expr(lf->expr);
+ free(lf->arg);
+ free(lf);
+ }
+ free(rdr);
+}
+
+/* Parses a redirect rule. Returns the redirect rule on success or NULL on error,
+ * with <err> filled with the error message. If <use_fmt> is not null, builds a
+ * dynamic log-format rule instead of a static string. Parameter <dir> indicates
+ * the direction of the rule, and equals 0 for request, non-zero for responses.
+ */
+struct redirect_rule *http_parse_redirect_rule(const char *file, int linenum, struct proxy *curproxy,
+ const char **args, char **errmsg, int use_fmt, int dir)
+{
+ struct redirect_rule *rule = NULL;
+ int cur_arg;
+ int type = REDIRECT_TYPE_NONE;
+ int code = 302;
+ const char *destination = NULL;
+ const char *cookie = NULL;
+ int cookie_set = 0;
+ unsigned int flags = (!dir ? REDIRECT_FLAG_FROM_REQ : REDIRECT_FLAG_NONE);
+ struct acl_cond *cond = NULL;
+
+ cur_arg = 0;
+ while (*(args[cur_arg])) {
+ if (strcmp(args[cur_arg], "location") == 0) {
+ if (!*args[cur_arg + 1])
+ goto missing_arg;
+
+ type = REDIRECT_TYPE_LOCATION;
+ cur_arg++;
+ destination = args[cur_arg];
+ }
+ else if (strcmp(args[cur_arg], "prefix") == 0) {
+ if (!*args[cur_arg + 1])
+ goto missing_arg;
+ type = REDIRECT_TYPE_PREFIX;
+ cur_arg++;
+ destination = args[cur_arg];
+ }
+ else if (strcmp(args[cur_arg], "scheme") == 0) {
+ if (!*args[cur_arg + 1])
+ goto missing_arg;
+
+ type = REDIRECT_TYPE_SCHEME;
+ cur_arg++;
+ destination = args[cur_arg];
+ }
+ else if (strcmp(args[cur_arg], "set-cookie") == 0) {
+ if (!*args[cur_arg + 1])
+ goto missing_arg;
+
+ cur_arg++;
+ cookie = args[cur_arg];
+ cookie_set = 1;
+ }
+ else if (strcmp(args[cur_arg], "clear-cookie") == 0) {
+ if (!*args[cur_arg + 1])
+ goto missing_arg;
+
+ cur_arg++;
+ cookie = args[cur_arg];
+ cookie_set = 0;
+ }
+ else if (strcmp(args[cur_arg], "code") == 0) {
+ if (!*args[cur_arg + 1])
+ goto missing_arg;
+
+ cur_arg++;
+ code = atol(args[cur_arg]);
+ if (code < 301 || code > 308 || (code > 303 && code < 307)) {
+ memprintf(errmsg,
+ "'%s': unsupported HTTP code '%s' (must be one of 301, 302, 303, 307 or 308)",
+ args[cur_arg - 1], args[cur_arg]);
+ goto err;
+ }
+ }
+ else if (strcmp(args[cur_arg], "drop-query") == 0) {
+ flags |= REDIRECT_FLAG_DROP_QS;
+ }
+ else if (strcmp(args[cur_arg], "append-slash") == 0) {
+ flags |= REDIRECT_FLAG_APPEND_SLASH;
+ }
+ else if (strcmp(args[cur_arg], "ignore-empty") == 0) {
+ flags |= REDIRECT_FLAG_IGNORE_EMPTY;
+ }
+ else if (strcmp(args[cur_arg], "if") == 0 ||
+ strcmp(args[cur_arg], "unless") == 0) {
+ cond = build_acl_cond(file, linenum, &curproxy->acl, curproxy, (const char **)args + cur_arg, errmsg);
+ if (!cond) {
+ memprintf(errmsg, "error in condition: %s", *errmsg);
+ goto err;
+ }
+ break;
+ }
+ else {
+ memprintf(errmsg,
+ "expects 'code', 'prefix', 'location', 'scheme', 'set-cookie', 'clear-cookie', 'drop-query', 'ignore-empty' or 'append-slash' (was '%s')",
+ args[cur_arg]);
+ goto err;
+ }
+ cur_arg++;
+ }
+
+ if (type == REDIRECT_TYPE_NONE) {
+ memprintf(errmsg, "redirection type expected ('prefix', 'location', or 'scheme')");
+ goto err;
+ }
+
+ if (dir && type != REDIRECT_TYPE_LOCATION) {
+ memprintf(errmsg, "response only supports redirect type 'location'");
+ goto err;
+ }
+
+ rule = calloc(1, sizeof(*rule));
+ if (!rule)
+ goto out_of_memory;
+ rule->cond = cond;
+ LIST_INIT(&rule->rdr_fmt);
+
+ if (!use_fmt) {
+ /* old-style static redirect rule */
+ rule->rdr_str = strdup(destination);
+ if (!rule->rdr_str)
+ goto out_of_memory;
+ rule->rdr_len = strlen(destination);
+ }
+ else {
+ /* log-format based redirect rule */
+ int cap = 0;
+
+ /* Parse destination. Note that in the REDIRECT_TYPE_PREFIX case,
+ * if prefix == "/", we don't want to add anything, otherwise it
+ * makes it hard for the user to configure a self-redirection.
+ */
+ curproxy->conf.args.ctx = ARGC_RDR;
+ if (curproxy->cap & PR_CAP_FE)
+ cap |= (dir ? SMP_VAL_FE_HRS_HDR : SMP_VAL_FE_HRQ_HDR);
+ if (curproxy->cap & PR_CAP_BE)
+ cap |= (dir ? SMP_VAL_BE_HRS_HDR : SMP_VAL_BE_HRQ_HDR);
+ if (!(type == REDIRECT_TYPE_PREFIX && destination[0] == '/' && destination[1] == '\0')) {
+ if (!parse_logformat_string(destination, curproxy, &rule->rdr_fmt, LOG_OPT_HTTP, cap, errmsg)) {
+ goto err;
+ }
+ free(curproxy->conf.lfs_file);
+ curproxy->conf.lfs_file = strdup(curproxy->conf.args.file);
+ curproxy->conf.lfs_line = curproxy->conf.args.line;
+ }
+ }
+
+ if (cookie) {
+ /* depending on cookie_set, either we want to set the cookie, or to clear it.
+ * a clear consists in appending "; path=/; Max-Age=0;" at the end.
+ */
+ rule->cookie_len = strlen(cookie);
+ if (cookie_set) {
+ rule->cookie_str = malloc(rule->cookie_len + 10);
+ if (!rule->cookie_str)
+ goto out_of_memory;
+ memcpy(rule->cookie_str, cookie, rule->cookie_len);
+ memcpy(rule->cookie_str + rule->cookie_len, "; path=/;", 10);
+ rule->cookie_len += 9;
+ } else {
+ rule->cookie_str = malloc(rule->cookie_len + 21);
+ if (!rule->cookie_str)
+ goto out_of_memory;
+ memcpy(rule->cookie_str, cookie, rule->cookie_len);
+ memcpy(rule->cookie_str + rule->cookie_len, "; path=/; Max-Age=0;", 21);
+ rule->cookie_len += 20;
+ }
+ }
+ rule->type = type;
+ rule->code = code;
+ rule->flags = flags;
+ LIST_INIT(&rule->list);
+ return rule;
+
+ missing_arg:
+ memprintf(errmsg, "missing argument for '%s'", args[cur_arg]);
+ goto err;
+ out_of_memory:
+ memprintf(errmsg, "parsing [%s:%d]: out of memory.", file, linenum);
+ err:
+ if (rule)
+ http_free_redirect_rule(rule);
+ else if (cond) {
+ /* rule not yet allocated, but cond already is */
+ free_acl_cond(cond);
+ }
+
+ return NULL;
+}
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/src/htx.c b/src/htx.c
new file mode 100644
index 0000000..feb7eec
--- /dev/null
+++ b/src/htx.c
@@ -0,0 +1,1099 @@
+/*
+ * internal HTTP message
+ *
+ * Copyright 2018 HAProxy Technologies, Christopher Faulet <cfaulet@haproxy.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <haproxy/chunk.h>
+#include <haproxy/htx.h>
+#include <haproxy/net_helper.h>
+
+struct htx htx_empty = { .size = 0, .data = 0, .head = -1, .tail = -1, .first = -1 };
+
+/* tests show that 63% of these calls are for 64-bit chunks, so better avoid calling
+ * memcpy() for that!
+ */
+static inline __attribute__((always_inline)) void htx_memcpy(void *dst, void *src, size_t len)
+{
+ if (likely(len == 8))
+ write_u64(dst, read_u64(src));
+ else
+ memcpy(dst, src, len);
+}
+
+/* Defragments an HTX message. It removes unused blocks and unwraps the payloads
+ * part. A temporary buffer is used to do so. This function never fails. Most of
+ * time, we need keep a ref on a specific HTX block. Thus is <blk> is set, the
+ * pointer on its new position, after defrag, is returned. In addition, if the
+ * size of the block must be altered, <blkinfo> info must be provided (!=
+ * 0). But in this case, it remains the caller responsibility to update the
+ * block content.
+ */
+/* TODO: merge data blocks into one */
+struct htx_blk *htx_defrag(struct htx *htx, struct htx_blk *blk, uint32_t blkinfo)
+{
+ struct buffer *chunk = get_trash_chunk();
+ struct htx *tmp = htxbuf(chunk);
+ struct htx_blk *newblk, *oldblk;
+ uint32_t new, old, blkpos;
+ uint32_t addr, blksz;
+ int32_t first = -1;
+
+ if (htx->head == -1)
+ return NULL;
+
+ blkpos = -1;
+
+ new = 0;
+ addr = 0;
+ tmp->size = htx->size;
+ tmp->data = 0;
+
+ /* start from the head */
+ for (old = htx_get_head(htx); old != -1; old = htx_get_next(htx, old)) {
+ oldblk = htx_get_blk(htx, old);
+ if (htx_get_blk_type(oldblk) == HTX_BLK_UNUSED)
+ continue;
+
+ blksz = htx_get_blksz(oldblk);
+ htx_memcpy((void *)tmp->blocks + addr, htx_get_blk_ptr(htx, oldblk), blksz);
+
+ /* update the start-line position */
+ if (htx->first == old)
+ first = new;
+
+ newblk = htx_get_blk(tmp, new);
+ newblk->addr = addr;
+ newblk->info = oldblk->info;
+
+ /* if <blk> is defined, save its new position */
+ if (blk != NULL && blk == oldblk) {
+ if (blkinfo)
+ newblk->info = blkinfo;
+ blkpos = new;
+ }
+
+ blksz = htx_get_blksz(newblk);
+ addr += blksz;
+ tmp->data += blksz;
+ new++;
+ }
+
+ htx->data = tmp->data;
+ htx->first = first;
+ htx->head = 0;
+ htx->tail = new - 1;
+ htx->head_addr = htx->end_addr = 0;
+ htx->tail_addr = addr;
+ htx->flags &= ~HTX_FL_FRAGMENTED;
+ htx_memcpy((void *)htx->blocks, (void *)tmp->blocks, htx->size);
+
+ return ((blkpos == -1) ? NULL : htx_get_blk(htx, blkpos));
+}
+
+/* Degragments HTX blocks of an HTX message. Payloads part is keep untouched
+ * here. This function will move back all blocks starting at the position 0,
+ * removing unused blocks. It must never be called with an empty message.
+ */
+static void htx_defrag_blks(struct htx *htx)
+{
+ int32_t pos, new;
+
+ new = 0;
+ for (pos = htx_get_head(htx); pos != -1; pos = htx_get_next(htx, pos)) {
+ struct htx_blk *posblk, *newblk;
+
+ if (pos == new) {
+ new++;
+ continue;
+ }
+
+ posblk = htx_get_blk(htx, pos);
+ if (htx_get_blk_type(posblk) == HTX_BLK_UNUSED)
+ continue;
+
+ if (htx->first == pos)
+ htx->first = new;
+ newblk = htx_get_blk(htx, new++);
+ newblk->info = posblk->info;
+ newblk->addr = posblk->addr;
+ }
+ BUG_ON(!new);
+ htx->head = 0;
+ htx->tail = new - 1;
+}
+
+/* Reserves a new block in the HTX message <htx> with a content of <blksz>
+ * bytes. If there is not enough space, NULL is returned. Otherwise the reserved
+ * block is returned and the HTX message is updated. Space for this new block is
+ * reserved in the HTX message. But it is the caller responsibility to set right
+ * info in the block to reflect the stored data.
+ */
+static struct htx_blk *htx_reserve_nxblk(struct htx *htx, uint32_t blksz)
+{
+ struct htx_blk *blk;
+ uint32_t tail, headroom, tailroom;
+
+ if (blksz > htx_free_data_space(htx))
+ return NULL; /* full */
+
+ if (htx->head == -1) {
+ /* Empty message */
+ htx->head = htx->tail = htx->first = 0;
+ blk = htx_get_blk(htx, htx->tail);
+ blk->addr = 0;
+ htx->data = blksz;
+ htx->tail_addr = blksz;
+ return blk;
+ }
+
+ /* Find the block's position. First, we try to get the next position in
+ * the message, increasing the tail by one. If this position is not
+ * available with some holes, we try to defrag the blocks without
+ * touching their paylood. If it is impossible, we fully defrag the
+ * message.
+ */
+ tail = htx->tail + 1;
+ if (htx_pos_to_addr(htx, tail) >= htx->tail_addr)
+ ;
+ else if (htx->head > 0) {
+ htx_defrag_blks(htx);
+ tail = htx->tail + 1;
+ BUG_ON(htx_pos_to_addr(htx, tail) < htx->tail_addr);
+ }
+ else
+ goto defrag;
+
+ /* Now, we have found the block's position. Try to find where to put its
+ * payload. The free space is split in two areas:
+ *
+ * * The free space in front of the blocks table. This one is used if and
+ * only if the other one was not used yet.
+ *
+ * * The free space at the beginning of the message. Once this one is
+ * used, the other one is never used again, until the next defrag.
+ */
+ headroom = (htx->end_addr - htx->head_addr);
+ tailroom = (!htx->head_addr ? htx_pos_to_addr(htx, tail) - htx->tail_addr : 0);
+ BUG_ON((int32_t)headroom < 0);
+ BUG_ON((int32_t)tailroom < 0);
+
+ if (blksz <= tailroom) {
+ blk = htx_get_blk(htx, tail);
+ blk->addr = htx->tail_addr;
+ htx->tail_addr += blksz;
+ }
+ else if (blksz <= headroom) {
+ blk = htx_get_blk(htx, tail);
+ blk->addr = htx->head_addr;
+ htx->head_addr += blksz;
+ }
+ else {
+ defrag:
+ /* need to defragment the message before inserting upfront */
+ htx_defrag(htx, NULL, 0);
+ tail = htx->tail + 1;
+ blk = htx_get_blk(htx, tail);
+ blk->addr = htx->tail_addr;
+ htx->tail_addr += blksz;
+ }
+
+ htx->tail = tail;
+ htx->data += blksz;
+ /* Set first position if not already set */
+ if (htx->first == -1)
+ htx->first = tail;
+
+ BUG_ON((int32_t)htx->tail_addr < 0);
+ BUG_ON((int32_t)htx->head_addr < 0);
+ BUG_ON(htx->end_addr > htx->tail_addr);
+ BUG_ON(htx->head_addr > htx->end_addr);
+
+ return blk;
+}
+
+/* Prepares the block to an expansion of its payload. The payload will be
+ * expanded by <delta> bytes and we need find where this expansion will be
+ * performed. It can be a compression if <delta> is negative. This function only
+ * updates all addresses. The caller have the responsibility to perform the
+ * expansion and update the block and the HTX message accordingly. No error must
+ * occur. It returns following values:
+ *
+ * 0: The expansion cannot be performed, there is not enough space.
+ *
+ * 1: the expansion must be performed in place, there is enough space after
+ * the block's payload to handle it. This is especially true if it is a
+ * compression and not an expansion.
+ *
+ * 2: the block's payload must be moved at the new block address before doing
+ * the expansion.
+ *
+ * 3: the HTX message message must be defragmented
+ */
+static int htx_prepare_blk_expansion(struct htx *htx, struct htx_blk *blk, int32_t delta)
+{
+ uint32_t sz, tailroom, headroom;
+ int ret = 3;
+
+ BUG_ON(htx->head == -1);
+
+ headroom = (htx->end_addr - htx->head_addr);
+ tailroom = (htx_pos_to_addr(htx, htx->tail) - htx->tail_addr);
+ BUG_ON((int32_t)headroom < 0);
+ BUG_ON((int32_t)tailroom < 0);
+
+ sz = htx_get_blksz(blk);
+ if (delta <= 0) {
+ /* It is a compression, it can be performed in place */
+ if (blk->addr+sz == htx->tail_addr)
+ htx->tail_addr += delta;
+ else if (blk->addr+sz == htx->head_addr)
+ htx->head_addr += delta;
+ ret = 1;
+ }
+ else if (delta > htx_free_space(htx)) {
+ /* There is not enough space to handle the expansion */
+ ret = 0;
+ }
+ else if (blk->addr+sz == htx->tail_addr) {
+ /* The block's payload is just before the tail room */
+ if (delta < tailroom) {
+ /* Expand the block's payload */
+ htx->tail_addr += delta;
+ ret = 1;
+ }
+ else if ((sz + delta) < headroom) {
+ uint32_t oldaddr = blk->addr;
+
+ /* Move the block's payload into the headroom */
+ blk->addr = htx->head_addr;
+ htx->tail_addr -= sz;
+ htx->head_addr += sz + delta;
+ if (oldaddr == htx->end_addr) {
+ if (htx->end_addr == htx->tail_addr) {
+ htx->tail_addr = htx->head_addr;
+ htx->head_addr = htx->end_addr = 0;
+ }
+ else
+ htx->end_addr += sz;
+ }
+ ret = 2;
+ }
+ }
+ else if (blk->addr+sz == htx->head_addr) {
+ /* The block's payload is just before the head room */
+ if (delta < headroom) {
+ /* Expand the block's payload */
+ htx->head_addr += delta;
+ ret = 1;
+ }
+ }
+ else {
+ /* The block's payload is not at the rooms edge */
+ if (!htx->head_addr && sz+delta < tailroom) {
+ /* Move the block's payload into the tailroom */
+ if (blk->addr == htx->end_addr)
+ htx->end_addr += sz;
+ blk->addr = htx->tail_addr;
+ htx->tail_addr += sz + delta;
+ ret = 2;
+ }
+ else if (sz+delta < headroom) {
+ /* Move the block's payload into the headroom */
+ if (blk->addr == htx->end_addr)
+ htx->end_addr += sz;
+ blk->addr = htx->head_addr;
+ htx->head_addr += sz + delta;
+ ret = 2;
+ }
+ }
+ /* Otherwise defrag the HTX message */
+
+ BUG_ON((int32_t)htx->tail_addr < 0);
+ BUG_ON((int32_t)htx->head_addr < 0);
+ BUG_ON(htx->end_addr > htx->tail_addr);
+ BUG_ON(htx->head_addr > htx->end_addr);
+ return ret;
+}
+
+/* Adds a new block of type <type> in the HTX message <htx>. Its content size is
+ * passed but it is the caller responsibility to do the copy.
+ */
+struct htx_blk *htx_add_blk(struct htx *htx, enum htx_blk_type type, uint32_t blksz)
+{
+ struct htx_blk *blk;
+
+ BUG_ON(blksz >= 256 << 20);
+ blk = htx_reserve_nxblk(htx, blksz);
+ if (!blk)
+ return NULL;
+ BUG_ON(blk->addr > htx->size);
+
+ blk->info = (type << 28);
+ return blk;
+}
+
+/* Removes the block <blk> from the HTX message <htx>. The function returns the
+ * block following <blk> or NULL if <blk> is the last block or the last inserted
+ * one.
+ */
+struct htx_blk *htx_remove_blk(struct htx *htx, struct htx_blk *blk)
+{
+ enum htx_blk_type type;
+ uint32_t pos, addr, sz;
+
+ BUG_ON(!blk || htx->head == -1);
+
+ /* This is the last block in use */
+ if (htx->head == htx->tail) {
+ uint32_t flags = (htx->flags & ~HTX_FL_FRAGMENTED); /* Preserve flags except FRAGMENTED */
+
+ htx_reset(htx);
+ htx->flags = flags; /* restore flags */
+ return NULL;
+ }
+
+ type = htx_get_blk_type(blk);
+ pos = htx_get_blk_pos(htx, blk);
+ sz = htx_get_blksz(blk);
+ addr = blk->addr;
+ if (type != HTX_BLK_UNUSED) {
+ /* Mark the block as unused, decrement allocated size */
+ htx->data -= htx_get_blksz(blk);
+ blk->info = ((uint32_t)HTX_BLK_UNUSED << 28);
+ }
+
+ /* There is at least 2 blocks, so tail is always > 0 */
+ if (pos == htx->head) {
+ /* move the head forward */
+ htx->head++;
+ }
+ else if (pos == htx->tail) {
+ /* remove the tail. this was the last inserted block so
+ * return NULL. */
+ htx->tail--;
+ blk = NULL;
+ goto end;
+ }
+ else
+ htx->flags |= HTX_FL_FRAGMENTED;
+
+ blk = htx_get_blk(htx, pos+1);
+
+ end:
+ if (pos == htx->first)
+ htx->first = (blk ? htx_get_blk_pos(htx, blk) : -1);
+
+ if (htx->head == htx->tail) {
+ /* If there is just one block in the HTX message, free space can
+ * be adjusted. This operation could save some defrags. */
+ struct htx_blk *lastblk = htx_get_blk(htx, htx->tail);
+
+ htx->head_addr = 0;
+ htx->end_addr = lastblk->addr;
+ htx->tail_addr = lastblk->addr+htx->data;
+ }
+ else {
+ if (addr+sz == htx->tail_addr)
+ htx->tail_addr = addr;
+ else if (addr+sz == htx->head_addr)
+ htx->head_addr = addr;
+ if (addr == htx->end_addr) {
+ if (htx->tail_addr == htx->end_addr) {
+ htx->tail_addr = htx->head_addr;
+ htx->head_addr = htx->end_addr = 0;
+ }
+ else
+ htx->end_addr += sz;
+ }
+ }
+
+ BUG_ON((int32_t)htx->tail_addr < 0);
+ BUG_ON((int32_t)htx->head_addr < 0);
+ BUG_ON(htx->end_addr > htx->tail_addr);
+ BUG_ON(htx->head_addr > htx->end_addr);
+ return blk;
+}
+
+/* Looks for the HTX block containing the offset <offset>, starting at the HTX
+ * message's head. The function returns an htx_ret with the found HTX block and
+ * the position inside this block where the offset is. If the offset <offset> is
+ * outside of the HTX message, htx_ret.blk is set to NULL.
+ */
+struct htx_ret htx_find_offset(struct htx *htx, uint32_t offset)
+{
+ struct htx_blk *blk;
+ struct htx_ret htxret = { .blk = NULL, .ret = 0 };
+
+ if (offset >= htx->data)
+ return htxret;
+
+ for (blk = htx_get_head_blk(htx); blk && offset; blk = htx_get_next_blk(htx, blk)) {
+ uint32_t sz = htx_get_blksz(blk);
+
+ if (offset < sz)
+ break;
+ offset -= sz;
+ }
+ htxret.blk = blk;
+ htxret.ret = offset;
+ return htxret;
+}
+
+/* Removes all blocks after the one containing the offset <offset>. This last
+ * one may be truncated if it is a DATA block.
+ */
+void htx_truncate(struct htx *htx, uint32_t offset)
+{
+ struct htx_blk *blk;
+ struct htx_ret htxret = htx_find_offset(htx, offset);
+
+ blk = htxret.blk;
+ if (blk && htxret.ret && htx_get_blk_type(blk) == HTX_BLK_DATA) {
+ htx_change_blk_value_len(htx, blk, htxret.ret);
+ blk = htx_get_next_blk(htx, blk);
+ }
+ while (blk)
+ blk = htx_remove_blk(htx, blk);
+}
+
+/* Drains <count> bytes from the HTX message <htx>. If the last block is a DATA
+ * block, it will be cut if necessary. Others blocks will be removed at once if
+ * <count> is large enough. The function returns an htx_ret with the first block
+ * remaining in the message and the amount of data drained. If everything is
+ * removed, htx_ret.blk is set to NULL.
+ */
+struct htx_ret htx_drain(struct htx *htx, uint32_t count)
+{
+ struct htx_blk *blk;
+ struct htx_ret htxret = { .blk = NULL, .ret = 0 };
+
+ if (count == htx->data) {
+ uint32_t flags = (htx->flags & ~HTX_FL_FRAGMENTED); /* Preserve flags except FRAGMENTED */
+
+ htx_reset(htx);
+ htx->flags = flags; /* restore flags */
+ htxret.ret = count;
+ return htxret;
+ }
+
+ blk = htx_get_head_blk(htx);
+ while (count && blk) {
+ uint32_t sz = htx_get_blksz(blk);
+ enum htx_blk_type type = htx_get_blk_type(blk);
+
+ /* Ignore unused block */
+ if (type == HTX_BLK_UNUSED)
+ goto next;
+
+ if (sz > count) {
+ if (type == HTX_BLK_DATA) {
+ htx_cut_data_blk(htx, blk, count);
+ htxret.ret += count;
+ }
+ break;
+ }
+ count -= sz;
+ htxret.ret += sz;
+ next:
+ blk = htx_remove_blk(htx, blk);
+ }
+ htxret.blk = blk;
+
+ return htxret;
+}
+
+/* Tries to append data to the last inserted block, if the type matches and if
+ * there is enough space to take it all. If the space wraps, the buffer is
+ * defragmented and a new block is inserted. If an error occurred, NULL is
+ * returned. Otherwise, on success, the updated block (or the new one) is
+ * returned. Due to its nature this function can be expensive and should be
+ * avoided whenever possible.
+ */
+struct htx_blk *htx_add_data_atonce(struct htx *htx, struct ist data)
+{
+ struct htx_blk *blk, *tailblk;
+ void *ptr;
+ uint32_t len, sz, tailroom, headroom;
+
+ if (htx->head == -1)
+ goto add_new_block;
+
+ /* Not enough space to store data */
+ if (data.len > htx_free_data_space(htx))
+ return NULL;
+
+ /* get the tail block and its size */
+ tailblk = htx_get_tail_blk(htx);
+ if (tailblk == NULL)
+ goto add_new_block;
+ sz = htx_get_blksz(tailblk);
+
+ /* Don't try to append data if the last inserted block is not of the
+ * same type */
+ if (htx_get_blk_type(tailblk) != HTX_BLK_DATA)
+ goto add_new_block;
+
+ /*
+ * Same type and enough space: append data
+ */
+ headroom = (htx->end_addr - htx->head_addr);
+ tailroom = (htx_pos_to_addr(htx, htx->tail) - htx->tail_addr);
+ BUG_ON((int32_t)headroom < 0);
+ BUG_ON((int32_t)tailroom < 0);
+
+ len = data.len;
+ if (tailblk->addr+sz == htx->tail_addr) {
+ if (data.len <= tailroom)
+ goto append_data;
+ else if (!htx->head_addr) {
+ len = tailroom;
+ goto append_data;
+ }
+ }
+ else if (tailblk->addr+sz == htx->head_addr && data.len <= headroom)
+ goto append_data;
+
+ goto add_new_block;
+
+ append_data:
+ /* Append data and update the block itself */
+ ptr = htx_get_blk_ptr(htx, tailblk);
+ htx_memcpy(ptr+sz, data.ptr, len);
+ htx_change_blk_value_len(htx, tailblk, sz+len);
+
+ if (data.len == len) {
+ blk = tailblk;
+ goto end;
+ }
+ data = istadv(data, len);
+
+ add_new_block:
+ blk = htx_add_blk(htx, HTX_BLK_DATA, data.len);
+ if (!blk)
+ return NULL;
+
+ blk->info += data.len;
+ htx_memcpy(htx_get_blk_ptr(htx, blk), data.ptr, data.len);
+
+ end:
+ BUG_ON((int32_t)htx->tail_addr < 0);
+ BUG_ON((int32_t)htx->head_addr < 0);
+ BUG_ON(htx->end_addr > htx->tail_addr);
+ BUG_ON(htx->head_addr > htx->end_addr);
+ return blk;
+}
+
+/* Replaces a value part of a block by a new one. The new part can be smaller or
+ * larger than the old one. This function works for any kind of block with
+ * attached data. It returns the new block on success, otherwise it returns
+ * NULL.
+ */
+struct htx_blk *htx_replace_blk_value(struct htx *htx, struct htx_blk *blk,
+ const struct ist old, const struct ist new)
+{
+ struct ist n, v;
+ int32_t delta;
+ int ret;
+
+ n = htx_get_blk_name(htx, blk);
+ v = htx_get_blk_value(htx, blk);
+ delta = new.len - old.len;
+ ret = htx_prepare_blk_expansion(htx, blk, delta);
+ if (!ret)
+ return NULL; /* not enough space */
+
+ if (ret == 1) { /* Replace in place */
+ if (delta <= 0) {
+ /* compression: copy new data first then move the end */
+ htx_memcpy(old.ptr, new.ptr, new.len);
+ memmove(old.ptr + new.len, istend(old),
+ istend(v) - istend(old));
+ }
+ else {
+ /* expansion: move the end first then copy new data */
+ memmove(old.ptr + new.len, istend(old),
+ istend(v) - istend(old));
+ htx_memcpy(old.ptr, new.ptr, new.len);
+ }
+
+ /* set the new block size and update HTX message */
+ htx_set_blk_value_len(blk, v.len + delta);
+ htx->data += delta;
+ }
+ else if (ret == 2) { /* New address but no defrag */
+ void *ptr = htx_get_blk_ptr(htx, blk);
+
+ /* Copy the name, if any */
+ htx_memcpy(ptr, n.ptr, n.len);
+ ptr += n.len;
+
+ /* Copy value before old part, if any */
+ htx_memcpy(ptr, v.ptr, old.ptr - v.ptr);
+ ptr += old.ptr - v.ptr;
+
+ /* Copy new value */
+ htx_memcpy(ptr, new.ptr, new.len);
+ ptr += new.len;
+
+ /* Copy value after old part, if any */
+ htx_memcpy(ptr, istend(old), istend(v) - istend(old));
+
+ /* set the new block size and update HTX message */
+ htx_set_blk_value_len(blk, v.len + delta);
+ htx->data += delta;
+ }
+ else { /* Do a degrag first (it is always an expansion) */
+ struct htx_blk tmpblk;
+ int32_t offset;
+
+ /* use tmpblk to set new block size before defrag and to compute
+ * the offset after defrag
+ */
+ tmpblk.addr = blk->addr;
+ tmpblk.info = blk->info;
+ htx_set_blk_value_len(&tmpblk, v.len + delta);
+
+ /* htx_defrag() will take care to update the block size and the htx message */
+ blk = htx_defrag(htx, blk, tmpblk.info);
+
+ /* newblk is now the new HTX block. Compute the offset to copy/move payload */
+ offset = blk->addr - tmpblk.addr;
+
+ /* move the end first and copy new data
+ */
+ memmove(old.ptr + offset + new.len, old.ptr + offset + old.len,
+ istend(v) - istend(old));
+ htx_memcpy(old.ptr + offset, new.ptr, new.len);
+ }
+ return blk;
+}
+
+/* Transfer HTX blocks from <src> to <dst>, stopping on the first block of the
+ * type <mark> (typically EOH or EOT) or when <count> bytes were moved
+ * (including payload and meta-data). It returns the number of bytes moved and
+ * the last HTX block inserted in <dst>.
+ */
+struct htx_ret htx_xfer_blks(struct htx *dst, struct htx *src, uint32_t count,
+ enum htx_blk_type mark)
+{
+ struct htx_blk *blk, *dstblk;
+ struct htx_blk *srcref, *dstref;
+ enum htx_blk_type type;
+ uint32_t info, max, sz, ret;
+
+ ret = htx_used_space(dst);
+ srcref = dstref = dstblk = NULL;
+
+ /* blocks are not removed yet from <src> HTX message to be able to
+ * rollback the transfer if all the headers/trailers are not copied.
+ */
+ for (blk = htx_get_head_blk(src); blk && count; blk = htx_get_next_blk(src, blk)) {
+ type = htx_get_blk_type(blk);
+
+ /* Ignore unused block */
+ if (type == HTX_BLK_UNUSED)
+ continue;
+
+
+ max = htx_get_max_blksz(dst, count);
+ if (!max)
+ break;
+
+ sz = htx_get_blksz(blk);
+ info = blk->info;
+ if (sz > max) {
+ /* Only DATA blocks can be partially xferred */
+ if (type != HTX_BLK_DATA)
+ break;
+ sz = max;
+ info = (type << 28) + sz;
+ }
+
+ dstblk = htx_reserve_nxblk(dst, sz);
+ if (!dstblk)
+ break;
+ dstblk->info = info;
+ htx_memcpy(htx_get_blk_ptr(dst, dstblk), htx_get_blk_ptr(src, blk), sz);
+
+ count -= sizeof(dstblk) + sz;
+ if (blk->info != info) {
+ /* Partial xfer: don't remove <blk> from <src> but
+ * resize its content */
+ htx_cut_data_blk(src, blk, sz);
+ break;
+ }
+
+ if (type == mark) {
+ blk = htx_get_next_blk(src, blk);
+ srcref = dstref = NULL;
+ break;
+ }
+
+ /* Save <blk> to <srcref> and <dstblk> to <dstref> when we start
+ * to xfer headers or trailers. When EOH/EOT block is reached,
+ * both are reset. It is mandatory to be able to rollback a
+ * partial transfer.
+ */
+ if (!srcref && !dstref &&
+ (type == HTX_BLK_REQ_SL || type == HTX_BLK_RES_SL || type == HTX_BLK_TLR)) {
+ srcref = blk;
+ dstref = dstblk;
+ }
+ else if (type == HTX_BLK_EOH || type == HTX_BLK_EOT)
+ srcref = dstref = NULL;
+ }
+
+ if (unlikely(dstref)) {
+ /* Headers or trailers part was partially xferred, so rollback
+ * the copy by removing all block between <dstref> and <dstblk>,
+ * both included. <dstblk> may be NULL.
+ */
+ while (dstref && dstref != dstblk)
+ dstref = htx_remove_blk(dst, dstref);
+ if (dstblk)
+ htx_remove_blk(dst, dstblk);
+
+ /* <dst> HTX message is empty, it means the headers or trailers
+ * part is too big to be copied at once.
+ */
+ if (htx_is_empty(dst))
+ src->flags |= HTX_FL_PARSING_ERROR;
+ }
+
+ /* Now, remove xferred blocks from <src> htx message */
+ if (!blk && !srcref) {
+ /* End of src reached, all blocks were consumed, drain all data */
+ htx_drain(src, src->data);
+ }
+ else {
+ /* Remove all block from the head to <blk>, or <srcref> if defined, excluded */
+ srcref = (srcref ? srcref : blk);
+ for (blk = htx_get_head_blk(src); blk && blk != srcref; blk = htx_remove_blk(src, blk));
+ }
+
+ end:
+ ret = htx_used_space(dst) - ret;
+ return (struct htx_ret){.ret = ret, .blk = dstblk};
+}
+
+/* Replaces an header by a new one. The new header can be smaller or larger than
+ * the old one. It returns the new block on success, otherwise it returns NULL.
+ * The header name is always lower cased.
+ */
+struct htx_blk *htx_replace_header(struct htx *htx, struct htx_blk *blk,
+ const struct ist name, const struct ist value)
+{
+ enum htx_blk_type type;
+ void *ptr;
+ int32_t delta;
+ int ret;
+
+ type = htx_get_blk_type(blk);
+ if (type != HTX_BLK_HDR)
+ return NULL;
+
+ delta = name.len + value.len - htx_get_blksz(blk);
+ ret = htx_prepare_blk_expansion(htx, blk, delta);
+ if (!ret)
+ return NULL; /* not enough space */
+
+
+ /* Replace in place or at a new address is the same. We replace all the
+ * header (name+value). Only take care to defrag the message if
+ * necessary. */
+ if (ret == 3)
+ blk = htx_defrag(htx, blk, (type << 28) + (value.len << 8) + name.len);
+ else {
+ /* Set the new block size and update HTX message */
+ blk->info = (type << 28) + (value.len << 8) + name.len;
+ htx->data += delta;
+ }
+
+ /* Finally, copy data. */
+ ptr = htx_get_blk_ptr(htx, blk);
+ ist2bin_lc(ptr, name);
+ htx_memcpy(ptr + name.len, value.ptr, value.len);
+ return blk;
+}
+
+/* Replaces the parts of the start-line. It returns the new start-line on
+ * success, otherwise it returns NULL. It is the caller responsibility to update
+ * sl->info, if necessary.
+ */
+struct htx_sl *htx_replace_stline(struct htx *htx, struct htx_blk *blk, const struct ist p1,
+ const struct ist p2, const struct ist p3)
+{
+ enum htx_blk_type type;
+ struct htx_sl *sl;
+ struct htx_sl tmp; /* used to save sl->info and sl->flags */
+ uint32_t sz;
+ int32_t delta;
+ int ret;
+
+ type = htx_get_blk_type(blk);
+ if (type != HTX_BLK_REQ_SL && type != HTX_BLK_RES_SL)
+ return NULL;
+
+ /* Save start-line info and flags */
+ sl = htx_get_blk_ptr(htx, blk);
+ tmp.info = sl->info;
+ tmp.flags = sl->flags;
+
+ sz = htx_get_blksz(blk);
+ delta = sizeof(*sl) + p1.len + p2.len + p3.len - sz;
+ ret = htx_prepare_blk_expansion(htx, blk, delta);
+ if (!ret)
+ return NULL; /* not enough space */
+
+ /* Replace in place or at a new address is the same. We replace all the
+ * start-line. Only take care to defrag the message if necessary. */
+ if (ret == 3) {
+ blk = htx_defrag(htx, blk, (type << 28) + sz + delta);
+ }
+ else {
+ /* Set the new block size and update HTX message */
+ blk->info = (type << 28) + sz + delta;
+ htx->data += delta;
+ }
+
+ /* Restore start-line info and flags and copy parts of the start-line */
+ sl = htx_get_blk_ptr(htx, blk);
+ sl->info = tmp.info;
+ sl->flags = tmp.flags;
+
+ HTX_SL_P1_LEN(sl) = p1.len;
+ HTX_SL_P2_LEN(sl) = p2.len;
+ HTX_SL_P3_LEN(sl) = p3.len;
+
+ htx_memcpy(HTX_SL_P1_PTR(sl), p1.ptr, p1.len);
+ htx_memcpy(HTX_SL_P2_PTR(sl), p2.ptr, p2.len);
+ htx_memcpy(HTX_SL_P3_PTR(sl), p3.ptr, p3.len);
+
+ return sl;
+}
+
+/* Reserves the maximum possible size for an HTX data block, by extending an
+ * existing one or by creating a now one. It returns a compound result with the
+ * HTX block and the position where new data must be inserted (0 for a new
+ * block). If an error occurs or if there is no space left, NULL is returned
+ * instead of a pointer on an HTX block.
+ */
+struct htx_ret htx_reserve_max_data(struct htx *htx)
+{
+ struct htx_blk *blk, *tailblk;
+ uint32_t sz, room;
+ int32_t len = htx_free_data_space(htx);
+
+ if (htx->head == -1)
+ goto rsv_new_block;
+
+ if (!len)
+ return (struct htx_ret){.ret = 0, .blk = NULL};
+
+ /* get the tail and head block */
+ tailblk = htx_get_tail_blk(htx);
+ if (tailblk == NULL)
+ goto rsv_new_block;
+ sz = htx_get_blksz(tailblk);
+
+ /* Don't try to append data if the last inserted block is not of the
+ * same type */
+ if (htx_get_blk_type(tailblk) != HTX_BLK_DATA)
+ goto rsv_new_block;
+
+ /*
+ * Same type and enough space: append data
+ */
+ if (!htx->head_addr) {
+ if (tailblk->addr+sz != htx->tail_addr)
+ goto rsv_new_block;
+ room = (htx_pos_to_addr(htx, htx->tail) - htx->tail_addr);
+ }
+ else {
+ if (tailblk->addr+sz != htx->head_addr)
+ goto rsv_new_block;
+ room = (htx->end_addr - htx->head_addr);
+ }
+ BUG_ON((int32_t)room < 0);
+ if (room < len)
+ len = room;
+
+append_data:
+ htx_change_blk_value_len(htx, tailblk, sz+len);
+
+ BUG_ON((int32_t)htx->tail_addr < 0);
+ BUG_ON((int32_t)htx->head_addr < 0);
+ BUG_ON(htx->end_addr > htx->tail_addr);
+ BUG_ON(htx->head_addr > htx->end_addr);
+ return (struct htx_ret){.ret = sz, .blk = tailblk};
+
+rsv_new_block:
+ blk = htx_add_blk(htx, HTX_BLK_DATA, len);
+ if (!blk)
+ return (struct htx_ret){.ret = 0, .blk = NULL};
+ blk->info += len;
+ return (struct htx_ret){.ret = 0, .blk = blk};
+}
+
+/* Adds an HTX block of type DATA in <htx>. It first tries to append data if
+ * possible. It returns the number of bytes consumed from <data>, which may be
+ * zero if nothing could be copied.
+ */
+size_t htx_add_data(struct htx *htx, const struct ist data)
+{
+ struct htx_blk *blk, *tailblk;
+ void *ptr;
+ uint32_t sz, room;
+ int32_t len = data.len;
+
+ /* Not enough space to store data */
+ if (len > htx_free_data_space(htx))
+ len = htx_free_data_space(htx);
+
+ if (!len)
+ return 0;
+
+ if (htx->head == -1)
+ goto add_new_block;
+
+ /* get the tail and head block */
+ tailblk = htx_get_tail_blk(htx);
+ if (tailblk == NULL)
+ goto add_new_block;
+ sz = htx_get_blksz(tailblk);
+
+ /* Don't try to append data if the last inserted block is not of the
+ * same type */
+ if (htx_get_blk_type(tailblk) != HTX_BLK_DATA)
+ goto add_new_block;
+
+ /*
+ * Same type and enough space: append data
+ */
+ if (!htx->head_addr) {
+ if (tailblk->addr+sz != htx->tail_addr)
+ goto add_new_block;
+ room = (htx_pos_to_addr(htx, htx->tail) - htx->tail_addr);
+ }
+ else {
+ if (tailblk->addr+sz != htx->head_addr)
+ goto add_new_block;
+ room = (htx->end_addr - htx->head_addr);
+ }
+ BUG_ON((int32_t)room < 0);
+ if (room < len)
+ len = room;
+
+ append_data:
+ /* Append data and update the block itself */
+ ptr = htx_get_blk_ptr(htx, tailblk);
+ htx_memcpy(ptr + sz, data.ptr, len);
+ htx_change_blk_value_len(htx, tailblk, sz+len);
+
+ BUG_ON((int32_t)htx->tail_addr < 0);
+ BUG_ON((int32_t)htx->head_addr < 0);
+ BUG_ON(htx->end_addr > htx->tail_addr);
+ BUG_ON(htx->head_addr > htx->end_addr);
+ return len;
+
+ add_new_block:
+ blk = htx_add_blk(htx, HTX_BLK_DATA, len);
+ if (!blk)
+ return 0;
+
+ blk->info += len;
+ htx_memcpy(htx_get_blk_ptr(htx, blk), data.ptr, len);
+ return len;
+}
+
+
+/* Adds an HTX block of type DATA in <htx> just after all other DATA
+ * blocks. Because it relies on htx_add_data_atonce(), It may be happened to a
+ * DATA block if possible. But, if the function succeeds, it will be the last
+ * DATA block in all cases. If an error occurred, NULL is returned. Otherwise,
+ * on success, the updated block (or the new one) is returned.
+ */
+struct htx_blk *htx_add_last_data(struct htx *htx, struct ist data)
+{
+ struct htx_blk *blk, *pblk;
+
+ blk = htx_add_data_atonce(htx, data);
+ if (!blk)
+ return NULL;
+
+ for (pblk = htx_get_prev_blk(htx, blk); pblk; pblk = htx_get_prev_blk(htx, pblk)) {
+ if (htx_get_blk_type(pblk) <= HTX_BLK_DATA)
+ break;
+
+ /* Swap .addr and .info fields */
+ blk->addr ^= pblk->addr; pblk->addr ^= blk->addr; blk->addr ^= pblk->addr;
+ blk->info ^= pblk->info; pblk->info ^= blk->info; blk->info ^= pblk->info;
+
+ if (blk->addr == pblk->addr)
+ blk->addr += htx_get_blksz(pblk);
+ blk = pblk;
+ }
+
+ return blk;
+}
+
+/* Moves the block <blk> just before the block <ref>. Both blocks must be in the
+ * HTX message <htx> and <blk> must be placed after <ref>. pointer to these
+ * blocks are updated to remain valid after the move. */
+void htx_move_blk_before(struct htx *htx, struct htx_blk **blk, struct htx_blk **ref)
+{
+ struct htx_blk *cblk, *pblk;
+
+ cblk = *blk;
+ for (pblk = htx_get_prev_blk(htx, cblk); pblk; pblk = htx_get_prev_blk(htx, pblk)) {
+ /* Swap .addr and .info fields */
+ cblk->addr ^= pblk->addr; pblk->addr ^= cblk->addr; cblk->addr ^= pblk->addr;
+ cblk->info ^= pblk->info; pblk->info ^= cblk->info; cblk->info ^= pblk->info;
+
+ if (cblk->addr == pblk->addr)
+ cblk->addr += htx_get_blksz(pblk);
+ if (pblk == *ref)
+ break;
+ cblk = pblk;
+ }
+ *blk = cblk;
+ *ref = pblk;
+}
+
+/* Append the HTX message <src> to the HTX message <dst>. It returns 1 on
+ * success and 0 on error. All the message or nothing is copied. If an error
+ * occurred, all blocks from <src> already appended to <dst> are truncated.
+ */
+int htx_append_msg(struct htx *dst, const struct htx *src)
+{
+ struct htx_blk *blk, *newblk;
+ enum htx_blk_type type;
+ uint32_t blksz, offset = dst->data;
+
+ for (blk = htx_get_head_blk(src); blk; blk = htx_get_next_blk(src, blk)) {
+ type = htx_get_blk_type(blk);
+
+ if (type == HTX_BLK_UNUSED)
+ continue;
+
+ blksz = htx_get_blksz(blk);
+ newblk = htx_add_blk(dst, type, blksz);
+ if (!newblk)
+ goto error;
+ newblk->info = blk->info;
+ htx_memcpy(htx_get_blk_ptr(dst, newblk), htx_get_blk_ptr(src, blk), blksz);
+ }
+
+ return 1;
+
+ error:
+ htx_truncate(dst, offset);
+ return 0;
+}
diff --git a/src/init.c b/src/init.c
new file mode 100644
index 0000000..6367ac5
--- /dev/null
+++ b/src/init.c
@@ -0,0 +1,249 @@
+#include <stdio.h>
+#include <stdlib.h>
+
+#include <haproxy/init.h>
+#include <haproxy/list.h>
+
+/* These functions are called just before a config validity check, which mean
+ * they are suited to use them in case we need to generate part of the
+ * configuration. It could be used for example to generate a proxy with
+ * multiple servers using the configuration parser itself. At this step the
+ * trash buffers are allocated.
+ * The functions must return 0 on success, or a combination
+ * of ERR_* flags (ERR_WARN, ERR_ABORT, ERR_FATAL, ...). The 2 latter cause
+ * and immediate exit, so the function must have emitted any useful error.
+ */
+struct list pre_check_list = LIST_HEAD_INIT(pre_check_list);
+
+/* These functions are called just after the point where the program exits
+ * after a config validity check, so they are generally suited for resource
+ * allocation and slow initializations that should be skipped during basic
+ * config checks. The functions must return 0 on success, or a combination
+ * of ERR_* flags (ERR_WARN, ERR_ABORT, ERR_FATAL, ...). The 2 latter cause
+ * and immediate exit, so the function must have emitted any useful error.
+ */
+struct list post_check_list = LIST_HEAD_INIT(post_check_list);
+
+/* These functions are called for each proxy just after the config validity
+ * check. The functions must return 0 on success, or a combination of ERR_*
+ * flags (ERR_WARN, ERR_ABORT, ERR_FATAL, ...). The 2 latter cause and immediate
+ * exit, so the function must have emitted any useful error.
+ */
+struct list post_proxy_check_list = LIST_HEAD_INIT(post_proxy_check_list);
+
+/* These functions are called for each server just after the config validity
+ * check. The functions must return 0 on success, or a combination of ERR_*
+ * flags (ERR_WARN, ERR_ABORT, ERR_FATAL, ...). The 2 latter cause and immediate
+ * exit, so the function must have emitted any useful error.
+ */
+struct list post_server_check_list = LIST_HEAD_INIT(post_server_check_list);
+
+/* These functions are called for each thread just after the thread creation
+ * and before running the init functions. They should be used to do per-thread
+ * (re-)allocations that are needed by subsequent functoins. They must return 0
+ * if an error occurred. */
+struct list per_thread_alloc_list = LIST_HEAD_INIT(per_thread_alloc_list);
+
+/* These functions are called for each thread just after the thread creation
+ * and before running the scheduler. They should be used to do per-thread
+ * initializations. They must return 0 if an error occurred. */
+struct list per_thread_init_list = LIST_HEAD_INIT(per_thread_init_list);
+
+/* These functions are called when freeing the global sections at the end of
+ * deinit, after everything is stopped. They don't return anything. They should
+ * not release shared resources that are possibly used by other deinit
+ * functions, only close/release what is private. Use the per_thread_free_list
+ * to release shared resources.
+ */
+struct list post_deinit_list = LIST_HEAD_INIT(post_deinit_list);
+
+/* These functions are called when freeing a proxy during the deinit, after
+ * everything isg stopped. They don't return anything. They should not release
+ * the proxy itself or any shared resources that are possibly used by other
+ * deinit functions, only close/release what is private.
+ */
+struct list proxy_deinit_list = LIST_HEAD_INIT(proxy_deinit_list);
+
+/* These functions are called when freeing a server during the deinit, after
+ * everything isg stopped. They don't return anything. They should not release
+ * the proxy itself or any shared resources that are possibly used by other
+ * deinit functions, only close/release what is private.
+ */
+struct list server_deinit_list = LIST_HEAD_INIT(server_deinit_list);
+
+/* These functions are called when freeing the global sections at the end of
+ * deinit, after the thread deinit functions, to release unneeded memory
+ * allocations. They don't return anything, and they work in best effort mode
+ * as their sole goal is to make valgrind mostly happy.
+ */
+struct list per_thread_free_list = LIST_HEAD_INIT(per_thread_free_list);
+
+/* These functions are called for each thread just after the scheduler loop and
+ * before exiting the thread. They don't return anything and, as for post-deinit
+ * functions, they work in best effort mode as their sole goal is to make
+ * valgrind mostly happy. */
+struct list per_thread_deinit_list = LIST_HEAD_INIT(per_thread_deinit_list);
+
+/* used to register some initialization functions to call before the checks. */
+void hap_register_pre_check(int (*fct)())
+{
+ struct pre_check_fct *b;
+
+ b = calloc(1, sizeof(*b));
+ if (!b) {
+ fprintf(stderr, "out of memory\n");
+ exit(1);
+ }
+ b->fct = fct;
+ LIST_APPEND(&pre_check_list, &b->list);
+}
+
+/* used to register some initialization functions to call after the checks. */
+void hap_register_post_check(int (*fct)())
+{
+ struct post_check_fct *b;
+
+ b = calloc(1, sizeof(*b));
+ if (!b) {
+ fprintf(stderr, "out of memory\n");
+ exit(1);
+ }
+ b->fct = fct;
+ LIST_APPEND(&post_check_list, &b->list);
+}
+
+/* used to register some initialization functions to call for each proxy after
+ * the checks.
+ */
+void hap_register_post_proxy_check(int (*fct)(struct proxy *))
+{
+ struct post_proxy_check_fct *b;
+
+ b = calloc(1, sizeof(*b));
+ if (!b) {
+ fprintf(stderr, "out of memory\n");
+ exit(1);
+ }
+ b->fct = fct;
+ LIST_APPEND(&post_proxy_check_list, &b->list);
+}
+
+/* used to register some initialization functions to call for each server after
+ * the checks.
+ */
+void hap_register_post_server_check(int (*fct)(struct server *))
+{
+ struct post_server_check_fct *b;
+
+ b = calloc(1, sizeof(*b));
+ if (!b) {
+ fprintf(stderr, "out of memory\n");
+ exit(1);
+ }
+ b->fct = fct;
+ LIST_APPEND(&post_server_check_list, &b->list);
+}
+
+/* used to register some de-initialization functions to call after everything
+ * has stopped.
+ */
+void hap_register_post_deinit(void (*fct)())
+{
+ struct post_deinit_fct *b;
+
+ b = calloc(1, sizeof(*b));
+ if (!b) {
+ fprintf(stderr, "out of memory\n");
+ exit(1);
+ }
+ b->fct = fct;
+ LIST_APPEND(&post_deinit_list, &b->list);
+}
+
+/* used to register some per proxy de-initialization functions to call after
+ * everything has stopped.
+ */
+void hap_register_proxy_deinit(void (*fct)(struct proxy *))
+{
+ struct proxy_deinit_fct *b;
+
+ b = calloc(1, sizeof(*b));
+ if (!b) {
+ fprintf(stderr, "out of memory\n");
+ exit(1);
+ }
+ b->fct = fct;
+ LIST_APPEND(&proxy_deinit_list, &b->list);
+}
+
+/* used to register some per server de-initialization functions to call after
+ * everything has stopped.
+ */
+void hap_register_server_deinit(void (*fct)(struct server *))
+{
+ struct server_deinit_fct *b;
+
+ b = calloc(1, sizeof(*b));
+ if (!b) {
+ fprintf(stderr, "out of memory\n");
+ exit(1);
+ }
+ b->fct = fct;
+ LIST_APPEND(&server_deinit_list, &b->list);
+}
+
+/* used to register some allocation functions to call for each thread. */
+void hap_register_per_thread_alloc(int (*fct)())
+{
+ struct per_thread_alloc_fct *b;
+
+ b = calloc(1, sizeof(*b));
+ if (!b) {
+ fprintf(stderr, "out of memory\n");
+ exit(1);
+ }
+ b->fct = fct;
+ LIST_APPEND(&per_thread_alloc_list, &b->list);
+}
+
+/* used to register some initialization functions to call for each thread. */
+void hap_register_per_thread_init(int (*fct)())
+{
+ struct per_thread_init_fct *b;
+
+ b = calloc(1, sizeof(*b));
+ if (!b) {
+ fprintf(stderr, "out of memory\n");
+ exit(1);
+ }
+ b->fct = fct;
+ LIST_APPEND(&per_thread_init_list, &b->list);
+}
+
+/* used to register some de-initialization functions to call for each thread. */
+void hap_register_per_thread_deinit(void (*fct)())
+{
+ struct per_thread_deinit_fct *b;
+
+ b = calloc(1, sizeof(*b));
+ if (!b) {
+ fprintf(stderr, "out of memory\n");
+ exit(1);
+ }
+ b->fct = fct;
+ LIST_APPEND(&per_thread_deinit_list, &b->list);
+}
+
+/* used to register some free functions to call for each thread. */
+void hap_register_per_thread_free(void (*fct)())
+{
+ struct per_thread_free_fct *b;
+
+ b = calloc(1, sizeof(*b));
+ if (!b) {
+ fprintf(stderr, "out of memory\n");
+ exit(1);
+ }
+ b->fct = fct;
+ LIST_APPEND(&per_thread_free_list, &b->list);
+}
diff --git a/src/jwt.c b/src/jwt.c
new file mode 100644
index 0000000..6c4cbd3
--- /dev/null
+++ b/src/jwt.c
@@ -0,0 +1,478 @@
+/*
+ * JSON Web Token (JWT) processing
+ *
+ * Copyright 2021 HAProxy Technologies
+ * Remi Tricot-Le Breton <rlebreton@haproxy.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <import/ebmbtree.h>
+#include <import/ebsttree.h>
+
+#include <haproxy/api.h>
+#include <haproxy/tools.h>
+#include <haproxy/openssl-compat.h>
+#include <haproxy/base64.h>
+#include <haproxy/jwt.h>
+#include <haproxy/buf.h>
+
+
+#ifdef USE_OPENSSL
+/* Tree into which the public certificates used to validate JWTs will be stored. */
+static struct eb_root jwt_cert_tree = EB_ROOT_UNIQUE;
+
+/*
+ * The possible algorithm strings that can be found in a JWS's JOSE header are
+ * defined in section 3.1 of RFC7518.
+ */
+enum jwt_alg jwt_parse_alg(const char *alg_str, unsigned int alg_len)
+{
+ enum jwt_alg alg = JWT_ALG_DEFAULT;
+
+ /* Algorithms are all 5 characters long apart from "none". */
+ if (alg_len < sizeof("HS256")-1) {
+ if (alg_len == sizeof("none")-1 && strcmp("none", alg_str) == 0)
+ alg = JWS_ALG_NONE;
+ return alg;
+ }
+
+ if (alg == JWT_ALG_DEFAULT) {
+ switch(*alg_str++) {
+ case 'H':
+ if (strncmp(alg_str, "S256", alg_len-1) == 0)
+ alg = JWS_ALG_HS256;
+ else if (strncmp(alg_str, "S384", alg_len-1) == 0)
+ alg = JWS_ALG_HS384;
+ else if (strncmp(alg_str, "S512", alg_len-1) == 0)
+ alg = JWS_ALG_HS512;
+ break;
+ case 'R':
+ if (strncmp(alg_str, "S256", alg_len-1) == 0)
+ alg = JWS_ALG_RS256;
+ else if (strncmp(alg_str, "S384", alg_len-1) == 0)
+ alg = JWS_ALG_RS384;
+ else if (strncmp(alg_str, "S512", alg_len-1) == 0)
+ alg = JWS_ALG_RS512;
+ break;
+ case 'E':
+ if (strncmp(alg_str, "S256", alg_len-1) == 0)
+ alg = JWS_ALG_ES256;
+ else if (strncmp(alg_str, "S384", alg_len-1) == 0)
+ alg = JWS_ALG_ES384;
+ else if (strncmp(alg_str, "S512", alg_len-1) == 0)
+ alg = JWS_ALG_ES512;
+ break;
+ case 'P':
+ if (strncmp(alg_str, "S256", alg_len-1) == 0)
+ alg = JWS_ALG_PS256;
+ else if (strncmp(alg_str, "S384", alg_len-1) == 0)
+ alg = JWS_ALG_PS384;
+ else if (strncmp(alg_str, "S512", alg_len-1) == 0)
+ alg = JWS_ALG_PS512;
+ break;
+ default:
+ break;
+ }
+ }
+
+ return alg;
+}
+
+/*
+ * Split a JWT into its separate dot-separated parts.
+ * Since only JWS following the Compact Serialization format are managed for
+ * now, we don't need to manage more than three subparts in the tokens.
+ * See section 3.1 of RFC7515 for more information about JWS Compact
+ * Serialization.
+ * Returns 0 in case of success.
+ */
+int jwt_tokenize(const struct buffer *jwt, struct jwt_item *items, unsigned int *item_num)
+{
+ char *ptr = jwt->area;
+ char *jwt_end = jwt->area + jwt->data;
+ unsigned int index = 0;
+ unsigned int length = 0;
+
+ if (index < *item_num) {
+ items[index].start = ptr;
+ items[index].length = 0;
+ }
+
+ while (index < *item_num && ptr < jwt_end) {
+ if (*ptr++ == '.') {
+ items[index++].length = length;
+
+ if (index == *item_num)
+ return -1;
+ items[index].start = ptr;
+ items[index].length = 0;
+ length = 0;
+ } else
+ ++length;
+ }
+
+ if (index < *item_num)
+ items[index].length = length;
+
+ *item_num = (index+1);
+
+ return (ptr != jwt_end);
+}
+
+/*
+ * Parse a public certificate and insert it into the jwt_cert_tree.
+ * Returns 0 in case of success.
+ */
+int jwt_tree_load_cert(char *path, int pathlen, char **err)
+{
+ int retval = -1;
+ struct jwt_cert_tree_entry *entry = NULL;
+ EVP_PKEY *pkey = NULL;
+ BIO *bio = NULL;
+
+ entry = calloc(1, sizeof(*entry) + pathlen + 1);
+ if (!entry) {
+ memprintf(err, "%sunable to allocate memory (jwt_cert_tree_entry).\n", err && *err ? *err : "");
+ return -1;
+ }
+ memcpy(entry->path, path, pathlen + 1);
+
+ if (ebst_insert(&jwt_cert_tree, &entry->node) != &entry->node) {
+ free(entry);
+ return 0; /* Entry already in the tree */
+ }
+
+ bio = BIO_new(BIO_s_file());
+ if (!bio) {
+ memprintf(err, "%sunable to allocate memory (BIO).\n", err && *err ? *err : "");
+ goto end;
+ }
+
+ if (BIO_read_filename(bio, path) == 1) {
+
+ pkey = PEM_read_bio_PUBKEY(bio, NULL, NULL, NULL);
+
+ if (!pkey) {
+ memprintf(err, "%sfile not found (%s)\n", err && *err ? *err : "", path);
+ goto end;
+ }
+
+ entry->pkey = pkey;
+ retval = 0;
+ }
+
+end:
+ if (retval) {
+ /* Some error happened during pkey parsing, remove the already
+ * inserted node from the tree and free it.
+ */
+ ebmb_delete(&entry->node);
+ free(entry);
+ }
+ BIO_free(bio);
+ return retval;
+}
+
+/*
+ * Calculate the HMAC signature of a specific JWT and check that it matches the
+ * one included in the token.
+ * Returns 1 in case of success.
+ */
+static enum jwt_vrfy_status
+jwt_jwsverify_hmac(const struct jwt_ctx *ctx, const struct buffer *decoded_signature)
+{
+ const EVP_MD *evp = NULL;
+ unsigned char signature[EVP_MAX_MD_SIZE];
+ unsigned int signature_length = 0;
+ unsigned char *hmac_res = NULL;
+ enum jwt_vrfy_status retval = JWT_VRFY_KO;
+
+ switch(ctx->alg) {
+ case JWS_ALG_HS256:
+ evp = EVP_sha256();
+ break;
+ case JWS_ALG_HS384:
+ evp = EVP_sha384();
+ break;
+ case JWS_ALG_HS512:
+ evp = EVP_sha512();
+ break;
+ default: break;
+ }
+
+ hmac_res = HMAC(evp, ctx->key, ctx->key_length, (const unsigned char*)ctx->jose.start,
+ ctx->jose.length + ctx->claims.length + 1, signature, &signature_length);
+
+ if (hmac_res && signature_length == decoded_signature->data &&
+ (CRYPTO_memcmp(decoded_signature->area, signature, signature_length) == 0))
+ retval = JWT_VRFY_OK;
+
+ return retval;
+}
+
+/*
+ * Convert a JWT ECDSA signature (R and S parameters concatenatedi, see section
+ * 3.4 of RFC7518) into an ECDSA_SIG that can be fed back into OpenSSL's digest
+ * verification functions.
+ * Returns 0 in case of success.
+ */
+static int convert_ecdsa_sig(const struct jwt_ctx *ctx, EVP_PKEY *pkey, struct buffer *signature)
+{
+ int retval = 0;
+ ECDSA_SIG *ecdsa_sig = NULL;
+ BIGNUM *ec_R = NULL, *ec_S = NULL;
+ unsigned int bignum_len;
+ unsigned char *p;
+
+ ecdsa_sig = ECDSA_SIG_new();
+ if (!ecdsa_sig) {
+ retval = JWT_VRFY_OUT_OF_MEMORY;
+ goto end;
+ }
+
+ if (b_data(signature) % 2) {
+ retval = JWT_VRFY_INVALID_TOKEN;
+ goto end;
+ }
+
+ bignum_len = b_data(signature) / 2;
+
+ ec_R = BN_bin2bn((unsigned char*)b_orig(signature), bignum_len, NULL);
+ ec_S = BN_bin2bn((unsigned char *)(b_orig(signature) + bignum_len), bignum_len, NULL);
+
+ if (!ec_R || !ec_S) {
+ retval = JWT_VRFY_INVALID_TOKEN;
+ goto end;
+ }
+
+ /* Build ecdsa out of R and S values. */
+ ECDSA_SIG_set0(ecdsa_sig, ec_R, ec_S);
+
+ p = (unsigned char*)signature->area;
+
+ signature->data = i2d_ECDSA_SIG(ecdsa_sig, &p);
+ if (signature->data == 0) {
+ retval = JWT_VRFY_INVALID_TOKEN;
+ goto end;
+ }
+
+end:
+ ECDSA_SIG_free(ecdsa_sig);
+ return retval;
+}
+
+/*
+ * Check that the signature included in a JWT signed via RSA or ECDSA is valid
+ * and can be verified thanks to a given public certificate.
+ * Returns 1 in case of success.
+ */
+static enum jwt_vrfy_status
+jwt_jwsverify_rsa_ecdsa(const struct jwt_ctx *ctx, struct buffer *decoded_signature)
+{
+ const EVP_MD *evp = NULL;
+ EVP_MD_CTX *evp_md_ctx;
+ EVP_PKEY_CTX *pkey_ctx = NULL;
+ enum jwt_vrfy_status retval = JWT_VRFY_KO;
+ struct ebmb_node *eb;
+ struct jwt_cert_tree_entry *entry = NULL;
+ int is_ecdsa = 0;
+ int padding = RSA_PKCS1_PADDING;
+
+ switch(ctx->alg) {
+ case JWS_ALG_RS256:
+ evp = EVP_sha256();
+ break;
+ case JWS_ALG_RS384:
+ evp = EVP_sha384();
+ break;
+ case JWS_ALG_RS512:
+ evp = EVP_sha512();
+ break;
+
+ case JWS_ALG_ES256:
+ evp = EVP_sha256();
+ is_ecdsa = 1;
+ break;
+ case JWS_ALG_ES384:
+ evp = EVP_sha384();
+ is_ecdsa = 1;
+ break;
+ case JWS_ALG_ES512:
+ evp = EVP_sha512();
+ is_ecdsa = 1;
+ break;
+
+ case JWS_ALG_PS256:
+ evp = EVP_sha256();
+ padding = RSA_PKCS1_PSS_PADDING;
+ break;
+ case JWS_ALG_PS384:
+ evp = EVP_sha384();
+ padding = RSA_PKCS1_PSS_PADDING;
+ break;
+ case JWS_ALG_PS512:
+ evp = EVP_sha512();
+ padding = RSA_PKCS1_PSS_PADDING;
+ break;
+ default: break;
+ }
+
+ evp_md_ctx = EVP_MD_CTX_new();
+ if (!evp_md_ctx)
+ return JWT_VRFY_OUT_OF_MEMORY;
+
+ eb = ebst_lookup(&jwt_cert_tree, ctx->key);
+
+ if (!eb) {
+ retval = JWT_VRFY_UNKNOWN_CERT;
+ goto end;
+ }
+
+ entry = ebmb_entry(eb, struct jwt_cert_tree_entry, node);
+
+ if (!entry->pkey) {
+ retval = JWT_VRFY_UNKNOWN_CERT;
+ goto end;
+ }
+
+ /*
+ * ECXXX signatures are a direct concatenation of the (R, S) pair and
+ * need to be converted back to asn.1 in order for verify operations to
+ * work with OpenSSL.
+ */
+ if (is_ecdsa) {
+ int conv_retval = convert_ecdsa_sig(ctx, entry->pkey, decoded_signature);
+ if (conv_retval != 0) {
+ retval = conv_retval;
+ goto end;
+ }
+ }
+
+ if (EVP_DigestVerifyInit(evp_md_ctx, &pkey_ctx, evp, NULL, entry->pkey) == 1) {
+ if (is_ecdsa || EVP_PKEY_CTX_set_rsa_padding(pkey_ctx, padding) > 0) {
+ if (EVP_DigestVerifyUpdate(evp_md_ctx, (const unsigned char*)ctx->jose.start,
+ ctx->jose.length + ctx->claims.length + 1) == 1 &&
+ EVP_DigestVerifyFinal(evp_md_ctx, (const unsigned char*)decoded_signature->area, decoded_signature->data) == 1) {
+ retval = JWT_VRFY_OK;
+ }
+ }
+ }
+
+end:
+ EVP_MD_CTX_free(evp_md_ctx);
+ return retval;
+}
+
+/*
+ * Check that the <token> that was signed via algorithm <alg> using the <key>
+ * (either an HMAC secret or the path to a public certificate) has a valid
+ * signature.
+ * Returns 1 in case of success.
+ */
+enum jwt_vrfy_status jwt_verify(const struct buffer *token, const struct buffer *alg,
+ const struct buffer *key)
+{
+ struct jwt_item items[JWT_ELT_MAX] = { { 0 } };
+ unsigned int item_num = JWT_ELT_MAX;
+ struct buffer *decoded_sig = NULL;
+ struct jwt_ctx ctx = {};
+ enum jwt_vrfy_status retval = JWT_VRFY_KO;
+ int ret;
+
+ ctx.alg = jwt_parse_alg(alg->area, alg->data);
+
+ if (ctx.alg == JWT_ALG_DEFAULT)
+ return JWT_VRFY_UNKNOWN_ALG;
+
+ if (jwt_tokenize(token, items, &item_num))
+ return JWT_VRFY_INVALID_TOKEN;
+
+ if (item_num != JWT_ELT_MAX)
+ if (ctx.alg != JWS_ALG_NONE || item_num != JWT_ELT_SIG)
+ return JWT_VRFY_INVALID_TOKEN;
+
+ ctx.jose = items[JWT_ELT_JOSE];
+ ctx.claims = items[JWT_ELT_CLAIMS];
+ ctx.signature = items[JWT_ELT_SIG];
+
+ /* "alg" is "none", the signature must be empty for the JWS to be valid. */
+ if (ctx.alg == JWS_ALG_NONE) {
+ return (ctx.signature.length == 0) ? JWT_VRFY_OK : JWT_VRFY_KO;
+ }
+
+ if (ctx.signature.length == 0)
+ return JWT_VRFY_INVALID_TOKEN;
+
+ decoded_sig = alloc_trash_chunk();
+ if (!decoded_sig)
+ return JWT_VRFY_OUT_OF_MEMORY;
+
+ ret = base64urldec(ctx.signature.start, ctx.signature.length,
+ decoded_sig->area, decoded_sig->size);
+ if (ret == -1) {
+ retval = JWT_VRFY_INVALID_TOKEN;
+ goto end;
+ }
+
+ decoded_sig->data = ret;
+ ctx.key = key->area;
+ ctx.key_length = key->data;
+
+ /* We have all three sections, signature calculation can begin. */
+
+ switch(ctx.alg) {
+
+ case JWS_ALG_HS256:
+ case JWS_ALG_HS384:
+ case JWS_ALG_HS512:
+ /* HMAC + SHA-XXX */
+ retval = jwt_jwsverify_hmac(&ctx, decoded_sig);
+ break;
+ case JWS_ALG_RS256:
+ case JWS_ALG_RS384:
+ case JWS_ALG_RS512:
+ case JWS_ALG_ES256:
+ case JWS_ALG_ES384:
+ case JWS_ALG_ES512:
+ case JWS_ALG_PS256:
+ case JWS_ALG_PS384:
+ case JWS_ALG_PS512:
+ /* RSASSA-PKCS1-v1_5 + SHA-XXX */
+ /* ECDSA using P-XXX and SHA-XXX */
+ /* RSASSA-PSS using SHA-XXX and MGF1 with SHA-XXX */
+ retval = jwt_jwsverify_rsa_ecdsa(&ctx, decoded_sig);
+ break;
+ default:
+ /* Not managed yet */
+ retval = JWT_VRFY_UNMANAGED_ALG;
+ break;
+ }
+
+end:
+ free_trash_chunk(decoded_sig);
+
+ return retval;
+}
+
+static void jwt_deinit(void)
+{
+ struct ebmb_node *node = NULL;
+ struct jwt_cert_tree_entry *entry = NULL;
+
+ node = ebmb_first(&jwt_cert_tree);
+ while (node) {
+ entry = ebmb_entry(node, struct jwt_cert_tree_entry, node);
+ ebmb_delete(node);
+ EVP_PKEY_free(entry->pkey);
+ ha_free(&entry);
+ node = ebmb_first(&jwt_cert_tree);
+ }
+}
+REGISTER_POST_DEINIT(jwt_deinit);
+
+
+#endif /* USE_OPENSSL */
diff --git a/src/lb_chash.c b/src/lb_chash.c
new file mode 100644
index 0000000..4e8fb15
--- /dev/null
+++ b/src/lb_chash.c
@@ -0,0 +1,517 @@
+/*
+ * Consistent Hash implementation
+ * Please consult this very well detailed article for more information :
+ * http://www.spiteful.com/2008/03/17/programmers-toolbox-part-3-consistent-hashing/
+ *
+ * Our implementation has to support both weighted hashing and weighted round
+ * robin because we'll use it to replace the previous map-based implementation
+ * which offered both algorithms.
+ *
+ * Copyright 2000-2010 Willy Tarreau <w@1wt.eu>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <import/eb32tree.h>
+#include <haproxy/api.h>
+#include <haproxy/backend.h>
+#include <haproxy/errors.h>
+#include <haproxy/queue.h>
+#include <haproxy/server-t.h>
+#include <haproxy/tools.h>
+
+/* Return next tree node after <node> which must still be in the tree, or be
+ * NULL. Lookup wraps around the end to the beginning. If the next node is the
+ * same node, return NULL. This is designed to find a valid next node before
+ * deleting one from the tree.
+ */
+static inline struct eb32_node *chash_skip_node(struct eb_root *root, struct eb32_node *node)
+{
+ struct eb32_node *stop = node;
+
+ if (!node)
+ return NULL;
+ node = eb32_next(node);
+ if (!node)
+ node = eb32_first(root);
+ if (node == stop)
+ return NULL;
+ return node;
+}
+
+/* Remove all of a server's entries from its tree. This may be used when
+ * setting a server down.
+ */
+static inline void chash_dequeue_srv(struct server *s)
+{
+ while (s->lb_nodes_now > 0) {
+ if (s->lb_nodes_now >= s->lb_nodes_tot) // should always be false anyway
+ s->lb_nodes_now = s->lb_nodes_tot;
+ s->lb_nodes_now--;
+ if (s->proxy->lbprm.chash.last == &s->lb_nodes[s->lb_nodes_now].node)
+ s->proxy->lbprm.chash.last = chash_skip_node(s->lb_tree, s->proxy->lbprm.chash.last);
+ eb32_delete(&s->lb_nodes[s->lb_nodes_now].node);
+ }
+}
+
+/* Adjust the number of entries of a server in its tree. The server must appear
+ * as many times as its weight indicates it. If it's there too often, we remove
+ * the last occurrences. If it's not there enough, we add more occurrences. To
+ * remove a server from the tree, normally call this with eweight=0.
+ *
+ * The server's lock and the lbprm's lock must be held.
+ */
+static inline void chash_queue_dequeue_srv(struct server *s)
+{
+ while (s->lb_nodes_now > s->next_eweight) {
+ if (s->lb_nodes_now >= s->lb_nodes_tot) // should always be false anyway
+ s->lb_nodes_now = s->lb_nodes_tot;
+ s->lb_nodes_now--;
+ if (s->proxy->lbprm.chash.last == &s->lb_nodes[s->lb_nodes_now].node)
+ s->proxy->lbprm.chash.last = chash_skip_node(s->lb_tree, s->proxy->lbprm.chash.last);
+ eb32_delete(&s->lb_nodes[s->lb_nodes_now].node);
+ }
+
+ /* Attempt to increase the total number of nodes, if the user
+ * increased the weight beyond the original weight
+ */
+ if (s->lb_nodes_tot < s->next_eweight) {
+ struct tree_occ *new_nodes;
+
+ /* First we need to remove all server's entries from its tree
+ * because the realloc will change all nodes pointers */
+ chash_dequeue_srv(s);
+
+ new_nodes = realloc(s->lb_nodes, s->next_eweight * sizeof(*new_nodes));
+ if (new_nodes) {
+ unsigned int j;
+
+ s->lb_nodes = new_nodes;
+ memset(&s->lb_nodes[s->lb_nodes_tot], 0,
+ (s->next_eweight - s->lb_nodes_tot) * sizeof(*s->lb_nodes));
+ for (j = s->lb_nodes_tot; j < s->next_eweight; j++) {
+ s->lb_nodes[j].server = s;
+ s->lb_nodes[j].node.key = full_hash(s->puid * SRV_EWGHT_RANGE + j);
+ }
+ s->lb_nodes_tot = s->next_eweight;
+ }
+ }
+ while (s->lb_nodes_now < s->next_eweight) {
+ if (s->lb_nodes_now >= s->lb_nodes_tot) // should always be false anyway
+ break;
+ if (s->proxy->lbprm.chash.last == &s->lb_nodes[s->lb_nodes_now].node)
+ s->proxy->lbprm.chash.last = chash_skip_node(s->lb_tree, s->proxy->lbprm.chash.last);
+ eb32_insert(s->lb_tree, &s->lb_nodes[s->lb_nodes_now].node);
+ s->lb_nodes_now++;
+ }
+}
+
+/* This function updates the server trees according to server <srv>'s new
+ * state. It should be called when server <srv>'s status changes to down.
+ * It is not important whether the server was already down or not. It is not
+ * important either that the new state is completely down (the caller may not
+ * know all the variables of a server's state).
+ *
+ * The server's lock must be held. The lbprm lock will be used.
+ */
+static void chash_set_server_status_down(struct server *srv)
+{
+ struct proxy *p = srv->proxy;
+
+ if (!srv_lb_status_changed(srv))
+ return;
+
+ HA_RWLOCK_WRLOCK(LBPRM_LOCK, &p->lbprm.lock);
+
+ if (srv_willbe_usable(srv))
+ goto out_update_state;
+
+ if (!srv_currently_usable(srv))
+ /* server was already down */
+ goto out_update_backend;
+
+ if (srv->flags & SRV_F_BACKUP) {
+ p->lbprm.tot_wbck -= srv->cur_eweight;
+ p->srv_bck--;
+
+ if (srv == p->lbprm.fbck) {
+ /* we lost the first backup server in a single-backup
+ * configuration, we must search another one.
+ */
+ struct server *srv2 = p->lbprm.fbck;
+ do {
+ srv2 = srv2->next;
+ } while (srv2 &&
+ !((srv2->flags & SRV_F_BACKUP) &&
+ srv_willbe_usable(srv2)));
+ p->lbprm.fbck = srv2;
+ }
+ } else {
+ p->lbprm.tot_wact -= srv->cur_eweight;
+ p->srv_act--;
+ }
+
+ chash_dequeue_srv(srv);
+
+out_update_backend:
+ /* check/update tot_used, tot_weight */
+ update_backend_weight(p);
+ out_update_state:
+ srv_lb_commit_status(srv);
+
+ HA_RWLOCK_WRUNLOCK(LBPRM_LOCK, &p->lbprm.lock);
+}
+
+/* This function updates the server trees according to server <srv>'s new
+ * state. It should be called when server <srv>'s status changes to up.
+ * It is not important whether the server was already down or not. It is not
+ * important either that the new state is completely UP (the caller may not
+ * know all the variables of a server's state). This function will not change
+ * the weight of a server which was already up.
+ *
+ * The server's lock must be held. The lbprm lock will be used.
+ */
+static void chash_set_server_status_up(struct server *srv)
+{
+ struct proxy *p = srv->proxy;
+
+ if (!srv_lb_status_changed(srv))
+ return;
+
+ HA_RWLOCK_WRLOCK(LBPRM_LOCK, &p->lbprm.lock);
+
+ if (!srv_willbe_usable(srv))
+ goto out_update_state;
+
+ if (srv_currently_usable(srv))
+ /* server was already up */
+ goto out_update_backend;
+
+ if (srv->flags & SRV_F_BACKUP) {
+ p->lbprm.tot_wbck += srv->next_eweight;
+ p->srv_bck++;
+
+ if (!(p->options & PR_O_USE_ALL_BK)) {
+ if (!p->lbprm.fbck) {
+ /* there was no backup server anymore */
+ p->lbprm.fbck = srv;
+ } else {
+ /* we may have restored a backup server prior to fbck,
+ * in which case it should replace it.
+ */
+ struct server *srv2 = srv;
+ do {
+ srv2 = srv2->next;
+ } while (srv2 && (srv2 != p->lbprm.fbck));
+ if (srv2)
+ p->lbprm.fbck = srv;
+ }
+ }
+ } else {
+ p->lbprm.tot_wact += srv->next_eweight;
+ p->srv_act++;
+ }
+
+ /* note that eweight cannot be 0 here */
+ chash_queue_dequeue_srv(srv);
+
+ out_update_backend:
+ /* check/update tot_used, tot_weight */
+ update_backend_weight(p);
+ out_update_state:
+ srv_lb_commit_status(srv);
+
+ HA_RWLOCK_WRUNLOCK(LBPRM_LOCK, &p->lbprm.lock);
+}
+
+/* This function must be called after an update to server <srv>'s effective
+ * weight. It may be called after a state change too.
+ *
+ * The server's lock must be held. The lbprm lock may be used.
+ */
+static void chash_update_server_weight(struct server *srv)
+{
+ int old_state, new_state;
+ struct proxy *p = srv->proxy;
+
+ if (!srv_lb_status_changed(srv))
+ return;
+
+ /* If changing the server's weight changes its state, we simply apply
+ * the procedures we already have for status change. If the state
+ * remains down, the server is not in any tree, so it's as easy as
+ * updating its values. If the state remains up with different weights,
+ * there are some computations to perform to find a new place and
+ * possibly a new tree for this server.
+ */
+
+ old_state = srv_currently_usable(srv);
+ new_state = srv_willbe_usable(srv);
+
+ if (!old_state && !new_state) {
+ srv_lb_commit_status(srv);
+ return;
+ }
+ else if (!old_state && new_state) {
+ chash_set_server_status_up(srv);
+ return;
+ }
+ else if (old_state && !new_state) {
+ chash_set_server_status_down(srv);
+ return;
+ }
+
+ HA_RWLOCK_WRLOCK(LBPRM_LOCK, &p->lbprm.lock);
+
+ /* only adjust the server's presence in the tree */
+ chash_queue_dequeue_srv(srv);
+
+ if (srv->flags & SRV_F_BACKUP)
+ p->lbprm.tot_wbck += srv->next_eweight - srv->cur_eweight;
+ else
+ p->lbprm.tot_wact += srv->next_eweight - srv->cur_eweight;
+
+ update_backend_weight(p);
+ srv_lb_commit_status(srv);
+
+ HA_RWLOCK_WRUNLOCK(LBPRM_LOCK, &p->lbprm.lock);
+}
+
+/*
+ * This function implements the "Consistent Hashing with Bounded Loads" algorithm
+ * of Mirrokni, Thorup, and Zadimoghaddam (arxiv:1608.01350), adapted for use with
+ * unequal server weights.
+ */
+int chash_server_is_eligible(struct server *s)
+{
+ /* The total number of slots to allocate is the total number of outstanding requests
+ * (including the one we're about to make) times the load-balance-factor, rounded up.
+ */
+ unsigned tot_slots = ((s->proxy->served + 1) * s->proxy->lbprm.hash_balance_factor + 99) / 100;
+ unsigned slots_per_weight = tot_slots / s->proxy->lbprm.tot_weight;
+ unsigned remainder = tot_slots % s->proxy->lbprm.tot_weight;
+
+ /* Allocate a whole number of slots per weight unit... */
+ unsigned slots = s->cur_eweight * slots_per_weight;
+
+ /* And then distribute the rest among servers proportionally to their weight. */
+ slots += ((s->cumulative_weight + s->cur_eweight) * remainder) / s->proxy->lbprm.tot_weight
+ - (s->cumulative_weight * remainder) / s->proxy->lbprm.tot_weight;
+
+ /* But never leave a server with 0. */
+ if (slots == 0)
+ slots = 1;
+
+ return s->served < slots;
+}
+
+/*
+ * This function returns the running server from the CHASH tree, which is at
+ * the closest distance from the value of <hash>. Doing so ensures that even
+ * with a well imbalanced hash, if some servers are close to each other, they
+ * will still both receive traffic. If any server is found, it will be returned.
+ * It will also skip server <avoid> if the hash result ends on this one.
+ * If no valid server is found, NULL is returned.
+ *
+ * The lbprm's lock will be used in R/O mode. The server's lock is not used.
+ */
+struct server *chash_get_server_hash(struct proxy *p, unsigned int hash, const struct server *avoid)
+{
+ struct eb32_node *next, *prev;
+ struct server *nsrv, *psrv;
+ struct eb_root *root;
+ unsigned int dn, dp;
+ int loop;
+
+ HA_RWLOCK_RDLOCK(LBPRM_LOCK, &p->lbprm.lock);
+
+ if (p->srv_act)
+ root = &p->lbprm.chash.act;
+ else if (p->lbprm.fbck) {
+ nsrv = p->lbprm.fbck;
+ goto out;
+ }
+ else if (p->srv_bck)
+ root = &p->lbprm.chash.bck;
+ else {
+ nsrv = NULL;
+ goto out;
+ }
+
+ /* find the node after and the node before */
+ next = eb32_lookup_ge(root, hash);
+ if (!next)
+ next = eb32_first(root);
+ if (!next) {
+ nsrv = NULL; /* tree is empty */
+ goto out;
+ }
+
+ prev = eb32_prev(next);
+ if (!prev)
+ prev = eb32_last(root);
+
+ nsrv = eb32_entry(next, struct tree_occ, node)->server;
+ psrv = eb32_entry(prev, struct tree_occ, node)->server;
+
+ /* OK we're located between two servers, let's
+ * compare distances between hash and the two servers
+ * and select the closest server.
+ */
+ dp = hash - prev->key;
+ dn = next->key - hash;
+
+ if (dp <= dn) {
+ next = prev;
+ nsrv = psrv;
+ }
+
+ loop = 0;
+ while (nsrv == avoid || (p->lbprm.hash_balance_factor && !chash_server_is_eligible(nsrv))) {
+ next = eb32_next(next);
+ if (!next) {
+ next = eb32_first(root);
+ if (++loop > 1) // protection against accidental loop
+ break;
+ }
+ nsrv = eb32_entry(next, struct tree_occ, node)->server;
+ }
+
+ out:
+ HA_RWLOCK_RDUNLOCK(LBPRM_LOCK, &p->lbprm.lock);
+ return nsrv;
+}
+
+/* Return next server from the CHASH tree in backend <p>. If the tree is empty,
+ * return NULL. Saturated servers are skipped.
+ *
+ * The lbprm's lock will be used in R/W mode. The server's lock is not used.
+ */
+struct server *chash_get_next_server(struct proxy *p, struct server *srvtoavoid)
+{
+ struct server *srv, *avoided;
+ struct eb32_node *node, *stop, *avoided_node;
+ struct eb_root *root;
+
+ srv = avoided = NULL;
+ avoided_node = NULL;
+
+ HA_RWLOCK_WRLOCK(LBPRM_LOCK, &p->lbprm.lock);
+ if (p->srv_act)
+ root = &p->lbprm.chash.act;
+ else if (p->lbprm.fbck) {
+ srv = p->lbprm.fbck;
+ goto out;
+ }
+ else if (p->srv_bck)
+ root = &p->lbprm.chash.bck;
+ else {
+ srv = NULL;
+ goto out;
+ }
+
+ stop = node = p->lbprm.chash.last;
+ do {
+ struct server *s;
+
+ if (node)
+ node = eb32_next(node);
+ if (!node)
+ node = eb32_first(root);
+
+ p->lbprm.chash.last = node;
+ if (!node) {
+ /* no node is available */
+ srv = NULL;
+ goto out;
+ }
+
+ /* Note: if we came here after a down/up cycle with no last
+ * pointer, and after a redispatch (srvtoavoid is set), we
+ * must set stop to non-null otherwise we can loop forever.
+ */
+ if (!stop)
+ stop = node;
+
+ /* OK, we have a server. However, it may be saturated, in which
+ * case we don't want to reconsider it for now, so we'll simply
+ * skip it. Same if it's the server we try to avoid, in which
+ * case we simply remember it for later use if needed.
+ */
+ s = eb32_entry(node, struct tree_occ, node)->server;
+ if (!s->maxconn || (!s->queue.length && s->served < srv_dynamic_maxconn(s))) {
+ if (s != srvtoavoid) {
+ srv = s;
+ break;
+ }
+ avoided = s;
+ avoided_node = node;
+ }
+ } while (node != stop);
+
+ if (!srv) {
+ srv = avoided;
+ p->lbprm.chash.last = avoided_node;
+ }
+
+ out:
+ HA_RWLOCK_WRUNLOCK(LBPRM_LOCK, &p->lbprm.lock);
+ return srv;
+}
+
+/* This function is responsible for building the active and backup trees for
+ * consistent hashing. The servers receive an array of initialized nodes
+ * with their assigned keys. It also sets p->lbprm.wdiv to the eweight to
+ * uweight ratio.
+ * Return 0 in case of success, -1 in case of allocation failure.
+ */
+int chash_init_server_tree(struct proxy *p)
+{
+ struct server *srv;
+ struct eb_root init_head = EB_ROOT;
+ int node;
+
+ p->lbprm.set_server_status_up = chash_set_server_status_up;
+ p->lbprm.set_server_status_down = chash_set_server_status_down;
+ p->lbprm.update_server_eweight = chash_update_server_weight;
+ p->lbprm.server_take_conn = NULL;
+ p->lbprm.server_drop_conn = NULL;
+
+ p->lbprm.wdiv = BE_WEIGHT_SCALE;
+ for (srv = p->srv; srv; srv = srv->next) {
+ srv->next_eweight = (srv->uweight * p->lbprm.wdiv + p->lbprm.wmult - 1) / p->lbprm.wmult;
+ srv_lb_commit_status(srv);
+ }
+
+ recount_servers(p);
+ update_backend_weight(p);
+
+ p->lbprm.chash.act = init_head;
+ p->lbprm.chash.bck = init_head;
+ p->lbprm.chash.last = NULL;
+
+ /* queue active and backup servers in two distinct groups */
+ for (srv = p->srv; srv; srv = srv->next) {
+ srv->lb_tree = (srv->flags & SRV_F_BACKUP) ? &p->lbprm.chash.bck : &p->lbprm.chash.act;
+ srv->lb_nodes_tot = srv->uweight * BE_WEIGHT_SCALE;
+ srv->lb_nodes_now = 0;
+ srv->lb_nodes = calloc(srv->lb_nodes_tot,
+ sizeof(*srv->lb_nodes));
+ if (!srv->lb_nodes) {
+ ha_alert("failed to allocate lb_nodes for server %s.\n", srv->id);
+ return -1;
+ }
+ for (node = 0; node < srv->lb_nodes_tot; node++) {
+ srv->lb_nodes[node].server = srv;
+ srv->lb_nodes[node].node.key = full_hash(srv->puid * SRV_EWGHT_RANGE + node);
+ }
+
+ if (srv_currently_usable(srv))
+ chash_queue_dequeue_srv(srv);
+ }
+ return 0;
+}
diff --git a/src/lb_fas.c b/src/lb_fas.c
new file mode 100644
index 0000000..d90388b
--- /dev/null
+++ b/src/lb_fas.c
@@ -0,0 +1,348 @@
+/*
+ * First Available Server load balancing algorithm.
+ *
+ * This file implements an algorithm which emerged during a discussion with
+ * Steen Larsen, initially inspired from Anshul Gandhi et.al.'s work now
+ * described as "packing" in section 3.5:
+ *
+ * http://reports-archive.adm.cs.cmu.edu/anon/2012/CMU-CS-12-109.pdf
+ *
+ * Copyright 2000-2012 Willy Tarreau <w@1wt.eu>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <import/eb32tree.h>
+#include <haproxy/api.h>
+#include <haproxy/backend.h>
+#include <haproxy/queue.h>
+#include <haproxy/server-t.h>
+
+
+/* Remove a server from a tree. It must have previously been dequeued. This
+ * function is meant to be called when a server is going down or has its
+ * weight disabled.
+ *
+ * The server's lock and the lbprm's lock must be held.
+ */
+static inline void fas_remove_from_tree(struct server *s)
+{
+ s->lb_tree = NULL;
+}
+
+/* simply removes a server from a tree.
+ *
+ * The lbprm's lock must be held.
+ */
+static inline void fas_dequeue_srv(struct server *s)
+{
+ eb32_delete(&s->lb_node);
+}
+
+/* Queue a server in its associated tree, assuming the weight is >0.
+ * Servers are sorted by unique ID so that we send all connections to the first
+ * available server in declaration order (or ID order) until its maxconn is
+ * reached. It is important to understand that the server weight is not used
+ * here.
+ *
+ * The lbprm's lock must be held.
+ */
+static inline void fas_queue_srv(struct server *s)
+{
+ s->lb_node.key = s->puid;
+ eb32_insert(s->lb_tree, &s->lb_node);
+}
+
+/* Re-position the server in the FS tree after it has been assigned one
+ * connection or after it has released one. Note that it is possible that
+ * the server has been moved out of the tree due to failed health-checks.
+ * The lbprm's lock will be used.
+ */
+static void fas_srv_reposition(struct server *s)
+{
+ HA_RWLOCK_WRLOCK(LBPRM_LOCK, &s->proxy->lbprm.lock);
+ if (s->lb_tree) {
+ fas_dequeue_srv(s);
+ fas_queue_srv(s);
+ }
+ HA_RWLOCK_WRUNLOCK(LBPRM_LOCK, &s->proxy->lbprm.lock);
+}
+
+/* This function updates the server trees according to server <srv>'s new
+ * state. It should be called when server <srv>'s status changes to down.
+ * It is not important whether the server was already down or not. It is not
+ * important either that the new state is completely down (the caller may not
+ * know all the variables of a server's state).
+ *
+ * The server's lock must be held. The lbprm's lock will be used.
+ */
+static void fas_set_server_status_down(struct server *srv)
+{
+ struct proxy *p = srv->proxy;
+
+ if (!srv_lb_status_changed(srv))
+ return;
+
+ if (srv_willbe_usable(srv))
+ goto out_update_state;
+
+ HA_RWLOCK_WRLOCK(LBPRM_LOCK, &p->lbprm.lock);
+
+ if (!srv_currently_usable(srv))
+ /* server was already down */
+ goto out_update_backend;
+
+ if (srv->flags & SRV_F_BACKUP) {
+ p->lbprm.tot_wbck -= srv->cur_eweight;
+ p->srv_bck--;
+
+ if (srv == p->lbprm.fbck) {
+ /* we lost the first backup server in a single-backup
+ * configuration, we must search another one.
+ */
+ struct server *srv2 = p->lbprm.fbck;
+ do {
+ srv2 = srv2->next;
+ } while (srv2 &&
+ !((srv2->flags & SRV_F_BACKUP) &&
+ srv_willbe_usable(srv2)));
+ p->lbprm.fbck = srv2;
+ }
+ } else {
+ p->lbprm.tot_wact -= srv->cur_eweight;
+ p->srv_act--;
+ }
+
+ fas_dequeue_srv(srv);
+ fas_remove_from_tree(srv);
+
+ out_update_backend:
+ /* check/update tot_used, tot_weight */
+ update_backend_weight(p);
+ HA_RWLOCK_WRUNLOCK(LBPRM_LOCK, &p->lbprm.lock);
+
+ out_update_state:
+ srv_lb_commit_status(srv);
+}
+
+/* This function updates the server trees according to server <srv>'s new
+ * state. It should be called when server <srv>'s status changes to up.
+ * It is not important whether the server was already down or not. It is not
+ * important either that the new state is completely UP (the caller may not
+ * know all the variables of a server's state). This function will not change
+ * the weight of a server which was already up.
+ *
+ * The server's lock must be held. The lbprm's lock will be used.
+ */
+static void fas_set_server_status_up(struct server *srv)
+{
+ struct proxy *p = srv->proxy;
+
+ if (!srv_lb_status_changed(srv))
+ return;
+
+ if (!srv_willbe_usable(srv))
+ goto out_update_state;
+
+ HA_RWLOCK_WRLOCK(LBPRM_LOCK, &p->lbprm.lock);
+
+ if (srv_currently_usable(srv))
+ /* server was already up */
+ goto out_update_backend;
+
+ if (srv->flags & SRV_F_BACKUP) {
+ srv->lb_tree = &p->lbprm.fas.bck;
+ p->lbprm.tot_wbck += srv->next_eweight;
+ p->srv_bck++;
+
+ if (!(p->options & PR_O_USE_ALL_BK)) {
+ if (!p->lbprm.fbck) {
+ /* there was no backup server anymore */
+ p->lbprm.fbck = srv;
+ } else {
+ /* we may have restored a backup server prior to fbck,
+ * in which case it should replace it.
+ */
+ struct server *srv2 = srv;
+ do {
+ srv2 = srv2->next;
+ } while (srv2 && (srv2 != p->lbprm.fbck));
+ if (srv2)
+ p->lbprm.fbck = srv;
+ }
+ }
+ } else {
+ srv->lb_tree = &p->lbprm.fas.act;
+ p->lbprm.tot_wact += srv->next_eweight;
+ p->srv_act++;
+ }
+
+ /* note that eweight cannot be 0 here */
+ fas_queue_srv(srv);
+
+ out_update_backend:
+ /* check/update tot_used, tot_weight */
+ update_backend_weight(p);
+ HA_RWLOCK_WRUNLOCK(LBPRM_LOCK, &p->lbprm.lock);
+
+ out_update_state:
+ srv_lb_commit_status(srv);
+}
+
+/* This function must be called after an update to server <srv>'s effective
+ * weight. It may be called after a state change too.
+ *
+ * The server's lock must be held. The lbprm's lock will be used.
+ */
+static void fas_update_server_weight(struct server *srv)
+{
+ int old_state, new_state;
+ struct proxy *p = srv->proxy;
+
+ if (!srv_lb_status_changed(srv))
+ return;
+
+ /* If changing the server's weight changes its state, we simply apply
+ * the procedures we already have for status change. If the state
+ * remains down, the server is not in any tree, so it's as easy as
+ * updating its values. If the state remains up with different weights,
+ * there are some computations to perform to find a new place and
+ * possibly a new tree for this server.
+ */
+
+ old_state = srv_currently_usable(srv);
+ new_state = srv_willbe_usable(srv);
+
+ if (!old_state && !new_state) {
+ srv_lb_commit_status(srv);
+ return;
+ }
+ else if (!old_state && new_state) {
+ fas_set_server_status_up(srv);
+ return;
+ }
+ else if (old_state && !new_state) {
+ fas_set_server_status_down(srv);
+ return;
+ }
+
+ HA_RWLOCK_WRLOCK(LBPRM_LOCK, &p->lbprm.lock);
+
+ if (srv->lb_tree)
+ fas_dequeue_srv(srv);
+
+ if (srv->flags & SRV_F_BACKUP) {
+ p->lbprm.tot_wbck += srv->next_eweight - srv->cur_eweight;
+ srv->lb_tree = &p->lbprm.fas.bck;
+ } else {
+ p->lbprm.tot_wact += srv->next_eweight - srv->cur_eweight;
+ srv->lb_tree = &p->lbprm.fas.act;
+ }
+
+ fas_queue_srv(srv);
+
+ update_backend_weight(p);
+ HA_RWLOCK_WRUNLOCK(LBPRM_LOCK, &p->lbprm.lock);
+
+ srv_lb_commit_status(srv);
+}
+
+/* This function is responsible for building the trees in case of fast
+ * weighted least-conns. It also sets p->lbprm.wdiv to the eweight to
+ * uweight ratio. Both active and backup groups are initialized.
+ */
+void fas_init_server_tree(struct proxy *p)
+{
+ struct server *srv;
+ struct eb_root init_head = EB_ROOT;
+
+ p->lbprm.set_server_status_up = fas_set_server_status_up;
+ p->lbprm.set_server_status_down = fas_set_server_status_down;
+ p->lbprm.update_server_eweight = fas_update_server_weight;
+ p->lbprm.server_take_conn = fas_srv_reposition;
+ p->lbprm.server_drop_conn = fas_srv_reposition;
+
+ p->lbprm.wdiv = BE_WEIGHT_SCALE;
+ for (srv = p->srv; srv; srv = srv->next) {
+ srv->next_eweight = (srv->uweight * p->lbprm.wdiv + p->lbprm.wmult - 1) / p->lbprm.wmult;
+ srv_lb_commit_status(srv);
+ }
+
+ recount_servers(p);
+ update_backend_weight(p);
+
+ p->lbprm.fas.act = init_head;
+ p->lbprm.fas.bck = init_head;
+
+ /* queue active and backup servers in two distinct groups */
+ for (srv = p->srv; srv; srv = srv->next) {
+ if (!srv_currently_usable(srv))
+ continue;
+ srv->lb_tree = (srv->flags & SRV_F_BACKUP) ? &p->lbprm.fas.bck : &p->lbprm.fas.act;
+ fas_queue_srv(srv);
+ }
+}
+
+/* Return next server from the FS tree in backend <p>. If the tree is empty,
+ * return NULL. Saturated servers are skipped.
+ *
+ * The lbprm's lock will be used. The server's lock is not used.
+ */
+struct server *fas_get_next_server(struct proxy *p, struct server *srvtoavoid)
+{
+ struct server *srv, *avoided;
+ struct eb32_node *node;
+
+ srv = avoided = NULL;
+
+ HA_RWLOCK_RDLOCK(LBPRM_LOCK, &p->lbprm.lock);
+ if (p->srv_act)
+ node = eb32_first(&p->lbprm.fas.act);
+ else if (p->lbprm.fbck) {
+ srv = p->lbprm.fbck;
+ goto out;
+ }
+ else if (p->srv_bck)
+ node = eb32_first(&p->lbprm.fas.bck);
+ else {
+ srv = NULL;
+ goto out;
+ }
+
+ while (node) {
+ /* OK, we have a server. However, it may be saturated, in which
+ * case we don't want to reconsider it for now, so we'll simply
+ * skip it. Same if it's the server we try to avoid, in which
+ * case we simply remember it for later use if needed.
+ */
+ struct server *s;
+
+ s = eb32_entry(node, struct server, lb_node);
+ if (!s->maxconn || (!s->queue.length && s->served < srv_dynamic_maxconn(s))) {
+ if (s != srvtoavoid) {
+ srv = s;
+ break;
+ }
+ avoided = s;
+ }
+ node = eb32_next(node);
+ }
+
+ if (!srv)
+ srv = avoided;
+ out:
+ HA_RWLOCK_RDUNLOCK(LBPRM_LOCK, &p->lbprm.lock);
+ return srv;
+}
+
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/src/lb_fwlc.c b/src/lb_fwlc.c
new file mode 100644
index 0000000..8e913d4
--- /dev/null
+++ b/src/lb_fwlc.c
@@ -0,0 +1,375 @@
+/*
+ * Fast Weighted Least Connection load balancing algorithm.
+ *
+ * Copyright 2000-2009 Willy Tarreau <w@1wt.eu>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <import/eb32tree.h>
+#include <haproxy/api.h>
+#include <haproxy/backend.h>
+#include <haproxy/queue.h>
+#include <haproxy/server-t.h>
+
+
+/* Remove a server from a tree. It must have previously been dequeued. This
+ * function is meant to be called when a server is going down or has its
+ * weight disabled.
+ *
+ * The server's lock and the lbprm's lock must be held.
+ */
+static inline void fwlc_remove_from_tree(struct server *s)
+{
+ s->lb_tree = NULL;
+}
+
+/* simply removes a server from a tree.
+ *
+ * The lbprm's lock must be held.
+ */
+static inline void fwlc_dequeue_srv(struct server *s)
+{
+ eb32_delete(&s->lb_node);
+}
+
+/* Queue a server in its associated tree, assuming the <eweight> is >0.
+ * Servers are sorted by (#conns+1)/weight. To ensure maximum accuracy,
+ * we use (#conns+1)*SRV_EWGHT_MAX/eweight as the sorting key. The reason
+ * for using #conns+1 is to sort by weights in case the server is picked
+ * and not before it is picked. This provides a better load accuracy for
+ * low connection counts when weights differ and makes sure the round-robin
+ * applies between servers of highest weight first. However servers with no
+ * connection are always picked first so that under low loads, it's not
+ * always the single server with the highest weight that gets picked.
+ *
+ * NOTE: Depending on the calling context, we use s->next_eweight or
+ * s->cur_eweight. The next value is used when the server state is updated
+ * (because the weight changed for instance). During this step, the server
+ * state is not yet committed. The current value is used to reposition the
+ * server in the tree. This happens when the server is used.
+ *
+ * The lbprm's lock must be held.
+ */
+static inline void fwlc_queue_srv(struct server *s, unsigned int eweight)
+{
+ unsigned int inflight = _HA_ATOMIC_LOAD(&s->served) + _HA_ATOMIC_LOAD(&s->queue.length);
+
+ s->lb_node.key = inflight ? (inflight + 1) * SRV_EWGHT_MAX / eweight : 0;
+ eb32_insert(s->lb_tree, &s->lb_node);
+}
+
+/* Re-position the server in the FWLC tree after it has been assigned one
+ * connection or after it has released one. Note that it is possible that
+ * the server has been moved out of the tree due to failed health-checks.
+ * The lbprm's lock will be used.
+ */
+static void fwlc_srv_reposition(struct server *s)
+{
+ unsigned int inflight = _HA_ATOMIC_LOAD(&s->served) + _HA_ATOMIC_LOAD(&s->queue.length);
+ unsigned int eweight = _HA_ATOMIC_LOAD(&s->cur_eweight);
+ unsigned int new_key = inflight ? (inflight + 1) * SRV_EWGHT_MAX / (eweight ? eweight : 1) : 0;
+
+ /* some calls will be made for no change (e.g connect_server() after
+ * assign_server(). Let's check that first.
+ */
+ if (s->lb_node.node.leaf_p && eweight && s->lb_node.key == new_key)
+ return;
+
+ HA_RWLOCK_WRLOCK(LBPRM_LOCK, &s->proxy->lbprm.lock);
+ if (s->lb_tree) {
+ /* we might have been waiting for a while on the lock above
+ * so it's worth testing again because other threads are very
+ * likely to have released a connection or taken one leading
+ * to our target value (50% of the case in measurements).
+ */
+ inflight = _HA_ATOMIC_LOAD(&s->served) + _HA_ATOMIC_LOAD(&s->queue.length);
+ eweight = _HA_ATOMIC_LOAD(&s->cur_eweight);
+ new_key = inflight ? (inflight + 1) * SRV_EWGHT_MAX / (eweight ? eweight : 1) : 0;
+ if (!s->lb_node.node.leaf_p || s->lb_node.key != new_key) {
+ eb32_delete(&s->lb_node);
+ s->lb_node.key = new_key;
+ eb32_insert(s->lb_tree, &s->lb_node);
+ }
+ }
+ HA_RWLOCK_WRUNLOCK(LBPRM_LOCK, &s->proxy->lbprm.lock);
+}
+
+/* This function updates the server trees according to server <srv>'s new
+ * state. It should be called when server <srv>'s status changes to down.
+ * It is not important whether the server was already down or not. It is not
+ * important either that the new state is completely down (the caller may not
+ * know all the variables of a server's state).
+ *
+ * The server's lock must be held. The lbprm's lock will be used.
+ */
+static void fwlc_set_server_status_down(struct server *srv)
+{
+ struct proxy *p = srv->proxy;
+
+ if (!srv_lb_status_changed(srv))
+ return;
+
+ if (srv_willbe_usable(srv))
+ goto out_update_state;
+ HA_RWLOCK_WRLOCK(LBPRM_LOCK, &p->lbprm.lock);
+
+
+ if (!srv_currently_usable(srv))
+ /* server was already down */
+ goto out_update_backend;
+
+ if (srv->flags & SRV_F_BACKUP) {
+ p->lbprm.tot_wbck -= srv->cur_eweight;
+ p->srv_bck--;
+
+ if (srv == p->lbprm.fbck) {
+ /* we lost the first backup server in a single-backup
+ * configuration, we must search another one.
+ */
+ struct server *srv2 = p->lbprm.fbck;
+ do {
+ srv2 = srv2->next;
+ } while (srv2 &&
+ !((srv2->flags & SRV_F_BACKUP) &&
+ srv_willbe_usable(srv2)));
+ p->lbprm.fbck = srv2;
+ }
+ } else {
+ p->lbprm.tot_wact -= srv->cur_eweight;
+ p->srv_act--;
+ }
+
+ fwlc_dequeue_srv(srv);
+ fwlc_remove_from_tree(srv);
+
+out_update_backend:
+ /* check/update tot_used, tot_weight */
+ update_backend_weight(p);
+ HA_RWLOCK_WRUNLOCK(LBPRM_LOCK, &p->lbprm.lock);
+
+ out_update_state:
+ srv_lb_commit_status(srv);
+}
+
+/* This function updates the server trees according to server <srv>'s new
+ * state. It should be called when server <srv>'s status changes to up.
+ * It is not important whether the server was already down or not. It is not
+ * important either that the new state is completely UP (the caller may not
+ * know all the variables of a server's state). This function will not change
+ * the weight of a server which was already up.
+ *
+ * The server's lock must be held. The lbprm's lock will be used.
+ */
+static void fwlc_set_server_status_up(struct server *srv)
+{
+ struct proxy *p = srv->proxy;
+
+ if (!srv_lb_status_changed(srv))
+ return;
+
+ if (!srv_willbe_usable(srv))
+ goto out_update_state;
+
+ HA_RWLOCK_WRLOCK(LBPRM_LOCK, &p->lbprm.lock);
+
+ if (srv_currently_usable(srv))
+ /* server was already up */
+ goto out_update_backend;
+
+ if (srv->flags & SRV_F_BACKUP) {
+ srv->lb_tree = &p->lbprm.fwlc.bck;
+ p->lbprm.tot_wbck += srv->next_eweight;
+ p->srv_bck++;
+
+ if (!(p->options & PR_O_USE_ALL_BK)) {
+ if (!p->lbprm.fbck) {
+ /* there was no backup server anymore */
+ p->lbprm.fbck = srv;
+ } else {
+ /* we may have restored a backup server prior to fbck,
+ * in which case it should replace it.
+ */
+ struct server *srv2 = srv;
+ do {
+ srv2 = srv2->next;
+ } while (srv2 && (srv2 != p->lbprm.fbck));
+ if (srv2)
+ p->lbprm.fbck = srv;
+ }
+ }
+ } else {
+ srv->lb_tree = &p->lbprm.fwlc.act;
+ p->lbprm.tot_wact += srv->next_eweight;
+ p->srv_act++;
+ }
+
+ /* note that eweight cannot be 0 here */
+ fwlc_queue_srv(srv, srv->next_eweight);
+
+ out_update_backend:
+ /* check/update tot_used, tot_weight */
+ update_backend_weight(p);
+ HA_RWLOCK_WRUNLOCK(LBPRM_LOCK, &p->lbprm.lock);
+
+ out_update_state:
+ srv_lb_commit_status(srv);
+}
+
+/* This function must be called after an update to server <srv>'s effective
+ * weight. It may be called after a state change too.
+ *
+ * The server's lock must be held. The lbprm's lock will be used.
+ */
+static void fwlc_update_server_weight(struct server *srv)
+{
+ int old_state, new_state;
+ struct proxy *p = srv->proxy;
+
+ if (!srv_lb_status_changed(srv))
+ return;
+
+ /* If changing the server's weight changes its state, we simply apply
+ * the procedures we already have for status change. If the state
+ * remains down, the server is not in any tree, so it's as easy as
+ * updating its values. If the state remains up with different weights,
+ * there are some computations to perform to find a new place and
+ * possibly a new tree for this server.
+ */
+
+ old_state = srv_currently_usable(srv);
+ new_state = srv_willbe_usable(srv);
+
+ if (!old_state && !new_state) {
+ srv_lb_commit_status(srv);
+ return;
+ }
+ else if (!old_state && new_state) {
+ fwlc_set_server_status_up(srv);
+ return;
+ }
+ else if (old_state && !new_state) {
+ fwlc_set_server_status_down(srv);
+ return;
+ }
+
+ HA_RWLOCK_WRLOCK(LBPRM_LOCK, &p->lbprm.lock);
+
+ if (srv->lb_tree)
+ fwlc_dequeue_srv(srv);
+
+ if (srv->flags & SRV_F_BACKUP) {
+ p->lbprm.tot_wbck += srv->next_eweight - srv->cur_eweight;
+ srv->lb_tree = &p->lbprm.fwlc.bck;
+ } else {
+ p->lbprm.tot_wact += srv->next_eweight - srv->cur_eweight;
+ srv->lb_tree = &p->lbprm.fwlc.act;
+ }
+
+ fwlc_queue_srv(srv, srv->next_eweight);
+
+ update_backend_weight(p);
+ HA_RWLOCK_WRUNLOCK(LBPRM_LOCK, &p->lbprm.lock);
+
+ srv_lb_commit_status(srv);
+}
+
+/* This function is responsible for building the trees in case of fast
+ * weighted least-conns. It also sets p->lbprm.wdiv to the eweight to
+ * uweight ratio. Both active and backup groups are initialized.
+ */
+void fwlc_init_server_tree(struct proxy *p)
+{
+ struct server *srv;
+ struct eb_root init_head = EB_ROOT;
+
+ p->lbprm.set_server_status_up = fwlc_set_server_status_up;
+ p->lbprm.set_server_status_down = fwlc_set_server_status_down;
+ p->lbprm.update_server_eweight = fwlc_update_server_weight;
+ p->lbprm.server_take_conn = fwlc_srv_reposition;
+ p->lbprm.server_drop_conn = fwlc_srv_reposition;
+
+ p->lbprm.wdiv = BE_WEIGHT_SCALE;
+ for (srv = p->srv; srv; srv = srv->next) {
+ srv->next_eweight = (srv->uweight * p->lbprm.wdiv + p->lbprm.wmult - 1) / p->lbprm.wmult;
+ srv_lb_commit_status(srv);
+ }
+
+ recount_servers(p);
+ update_backend_weight(p);
+
+ p->lbprm.fwlc.act = init_head;
+ p->lbprm.fwlc.bck = init_head;
+
+ /* queue active and backup servers in two distinct groups */
+ for (srv = p->srv; srv; srv = srv->next) {
+ if (!srv_currently_usable(srv))
+ continue;
+ srv->lb_tree = (srv->flags & SRV_F_BACKUP) ? &p->lbprm.fwlc.bck : &p->lbprm.fwlc.act;
+ fwlc_queue_srv(srv, srv->next_eweight);
+ }
+}
+
+/* Return next server from the FWLC tree in backend <p>. If the tree is empty,
+ * return NULL. Saturated servers are skipped.
+ *
+ * The lbprm's lock will be used in R/O mode. The server's lock is not used.
+ */
+struct server *fwlc_get_next_server(struct proxy *p, struct server *srvtoavoid)
+{
+ struct server *srv, *avoided;
+ struct eb32_node *node;
+
+ srv = avoided = NULL;
+
+ HA_RWLOCK_RDLOCK(LBPRM_LOCK, &p->lbprm.lock);
+ if (p->srv_act)
+ node = eb32_first(&p->lbprm.fwlc.act);
+ else if (p->lbprm.fbck) {
+ srv = p->lbprm.fbck;
+ goto out;
+ }
+ else if (p->srv_bck)
+ node = eb32_first(&p->lbprm.fwlc.bck);
+ else {
+ srv = NULL;
+ goto out;
+ }
+
+ while (node) {
+ /* OK, we have a server. However, it may be saturated, in which
+ * case we don't want to reconsider it for now, so we'll simply
+ * skip it. Same if it's the server we try to avoid, in which
+ * case we simply remember it for later use if needed.
+ */
+ struct server *s;
+
+ s = eb32_entry(node, struct server, lb_node);
+ if (!s->maxconn || s->served + s->queue.length < srv_dynamic_maxconn(s) + s->maxqueue) {
+ if (s != srvtoavoid) {
+ srv = s;
+ break;
+ }
+ avoided = s;
+ }
+ node = eb32_next(node);
+ }
+
+ if (!srv)
+ srv = avoided;
+ out:
+ HA_RWLOCK_RDUNLOCK(LBPRM_LOCK, &p->lbprm.lock);
+ return srv;
+}
+
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/src/lb_fwrr.c b/src/lb_fwrr.c
new file mode 100644
index 0000000..a762623
--- /dev/null
+++ b/src/lb_fwrr.c
@@ -0,0 +1,623 @@
+/*
+ * Fast Weighted Round Robin load balancing algorithm.
+ *
+ * Copyright 2000-2009 Willy Tarreau <w@1wt.eu>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <import/eb32tree.h>
+#include <haproxy/api.h>
+#include <haproxy/backend.h>
+#include <haproxy/queue.h>
+#include <haproxy/server-t.h>
+
+
+static inline void fwrr_remove_from_tree(struct server *s);
+static inline void fwrr_queue_by_weight(struct eb_root *root, struct server *s);
+static inline void fwrr_dequeue_srv(struct server *s);
+static void fwrr_get_srv(struct server *s);
+static void fwrr_queue_srv(struct server *s);
+
+
+/* This function updates the server trees according to server <srv>'s new
+ * state. It should be called when server <srv>'s status changes to down.
+ * It is not important whether the server was already down or not. It is not
+ * important either that the new state is completely down (the caller may not
+ * know all the variables of a server's state).
+ *
+ * The server's lock must be held. The lbprm's lock will be used.
+ */
+static void fwrr_set_server_status_down(struct server *srv)
+{
+ struct proxy *p = srv->proxy;
+ struct fwrr_group *grp;
+
+ if (!srv_lb_status_changed(srv))
+ return;
+
+ if (srv_willbe_usable(srv))
+ goto out_update_state;
+
+ HA_RWLOCK_WRLOCK(LBPRM_LOCK, &p->lbprm.lock);
+
+ if (!srv_currently_usable(srv))
+ /* server was already down */
+ goto out_update_backend;
+
+ grp = (srv->flags & SRV_F_BACKUP) ? &p->lbprm.fwrr.bck : &p->lbprm.fwrr.act;
+ grp->next_weight -= srv->cur_eweight;
+
+ if (srv->flags & SRV_F_BACKUP) {
+ p->lbprm.tot_wbck = p->lbprm.fwrr.bck.next_weight;
+ p->srv_bck--;
+
+ if (srv == p->lbprm.fbck) {
+ /* we lost the first backup server in a single-backup
+ * configuration, we must search another one.
+ */
+ struct server *srv2 = p->lbprm.fbck;
+ do {
+ srv2 = srv2->next;
+ } while (srv2 &&
+ !((srv2->flags & SRV_F_BACKUP) &&
+ srv_willbe_usable(srv2)));
+ p->lbprm.fbck = srv2;
+ }
+ } else {
+ p->lbprm.tot_wact = p->lbprm.fwrr.act.next_weight;
+ p->srv_act--;
+ }
+
+ fwrr_dequeue_srv(srv);
+ fwrr_remove_from_tree(srv);
+
+out_update_backend:
+ /* check/update tot_used, tot_weight */
+ update_backend_weight(p);
+ HA_RWLOCK_WRUNLOCK(LBPRM_LOCK, &p->lbprm.lock);
+
+ out_update_state:
+ srv_lb_commit_status(srv);
+}
+
+/* This function updates the server trees according to server <srv>'s new
+ * state. It should be called when server <srv>'s status changes to up.
+ * It is not important whether the server was already down or not. It is not
+ * important either that the new state is completely UP (the caller may not
+ * know all the variables of a server's state). This function will not change
+ * the weight of a server which was already up.
+ *
+ * The server's lock must be held. The lbprm's lock will be used.
+ */
+static void fwrr_set_server_status_up(struct server *srv)
+{
+ struct proxy *p = srv->proxy;
+ struct fwrr_group *grp;
+
+ if (!srv_lb_status_changed(srv))
+ return;
+
+ if (!srv_willbe_usable(srv))
+ goto out_update_state;
+
+ HA_RWLOCK_WRLOCK(LBPRM_LOCK, &p->lbprm.lock);
+
+ if (srv_currently_usable(srv))
+ /* server was already up */
+ goto out_update_backend;
+
+ grp = (srv->flags & SRV_F_BACKUP) ? &p->lbprm.fwrr.bck : &p->lbprm.fwrr.act;
+ grp->next_weight += srv->next_eweight;
+
+ if (srv->flags & SRV_F_BACKUP) {
+ p->lbprm.tot_wbck = p->lbprm.fwrr.bck.next_weight;
+ p->srv_bck++;
+
+ if (!(p->options & PR_O_USE_ALL_BK)) {
+ if (!p->lbprm.fbck) {
+ /* there was no backup server anymore */
+ p->lbprm.fbck = srv;
+ } else {
+ /* we may have restored a backup server prior to fbck,
+ * in which case it should replace it.
+ */
+ struct server *srv2 = srv;
+ do {
+ srv2 = srv2->next;
+ } while (srv2 && (srv2 != p->lbprm.fbck));
+ if (srv2)
+ p->lbprm.fbck = srv;
+ }
+ }
+ } else {
+ p->lbprm.tot_wact = p->lbprm.fwrr.act.next_weight;
+ p->srv_act++;
+ }
+
+ /* note that eweight cannot be 0 here */
+ fwrr_get_srv(srv);
+ srv->npos = grp->curr_pos + (grp->next_weight + grp->curr_weight - grp->curr_pos) / srv->next_eweight;
+ fwrr_queue_srv(srv);
+
+out_update_backend:
+ /* check/update tot_used, tot_weight */
+ update_backend_weight(p);
+ HA_RWLOCK_WRUNLOCK(LBPRM_LOCK, &p->lbprm.lock);
+
+ out_update_state:
+ srv_lb_commit_status(srv);
+}
+
+/* This function must be called after an update to server <srv>'s effective
+ * weight. It may be called after a state change too.
+ *
+ * The server's lock must be held. The lbprm's lock will be used.
+ */
+static void fwrr_update_server_weight(struct server *srv)
+{
+ int old_state, new_state;
+ struct proxy *p = srv->proxy;
+ struct fwrr_group *grp;
+
+ if (!srv_lb_status_changed(srv))
+ return;
+
+ /* If changing the server's weight changes its state, we simply apply
+ * the procedures we already have for status change. If the state
+ * remains down, the server is not in any tree, so it's as easy as
+ * updating its values. If the state remains up with different weights,
+ * there are some computations to perform to find a new place and
+ * possibly a new tree for this server.
+ */
+
+ old_state = srv_currently_usable(srv);
+ new_state = srv_willbe_usable(srv);
+
+ if (!old_state && !new_state) {
+ srv_lb_commit_status(srv);
+ return;
+ }
+ else if (!old_state && new_state) {
+ fwrr_set_server_status_up(srv);
+ return;
+ }
+ else if (old_state && !new_state) {
+ fwrr_set_server_status_down(srv);
+ return;
+ }
+
+ HA_RWLOCK_WRLOCK(LBPRM_LOCK, &p->lbprm.lock);
+
+ grp = (srv->flags & SRV_F_BACKUP) ? &p->lbprm.fwrr.bck : &p->lbprm.fwrr.act;
+ grp->next_weight = grp->next_weight - srv->cur_eweight + srv->next_eweight;
+
+ p->lbprm.tot_wact = p->lbprm.fwrr.act.next_weight;
+ p->lbprm.tot_wbck = p->lbprm.fwrr.bck.next_weight;
+
+ if (srv->lb_tree == grp->init) {
+ fwrr_dequeue_srv(srv);
+ fwrr_queue_by_weight(grp->init, srv);
+ }
+ else if (!srv->lb_tree) {
+ /* FIXME: server was down. This is not possible right now but
+ * may be needed soon for slowstart or graceful shutdown.
+ */
+ fwrr_dequeue_srv(srv);
+ fwrr_get_srv(srv);
+ srv->npos = grp->curr_pos + (grp->next_weight + grp->curr_weight - grp->curr_pos) / srv->next_eweight;
+ fwrr_queue_srv(srv);
+ } else {
+ /* The server is either active or in the next queue. If it's
+ * still in the active queue and it has not consumed all of its
+ * places, let's adjust its next position.
+ */
+ fwrr_get_srv(srv);
+
+ if (srv->next_eweight > 0) {
+ int prev_next = srv->npos;
+ int step = grp->next_weight / srv->next_eweight;
+
+ srv->npos = srv->lpos + step;
+ srv->rweight = 0;
+
+ if (srv->npos > prev_next)
+ srv->npos = prev_next;
+ if (srv->npos < grp->curr_pos + 2)
+ srv->npos = grp->curr_pos + step;
+ } else {
+ /* push it into the next tree */
+ srv->npos = grp->curr_pos + grp->curr_weight;
+ }
+
+ fwrr_dequeue_srv(srv);
+ fwrr_queue_srv(srv);
+ }
+
+ update_backend_weight(p);
+ HA_RWLOCK_WRUNLOCK(LBPRM_LOCK, &p->lbprm.lock);
+
+ srv_lb_commit_status(srv);
+}
+
+/* Remove a server from a tree. It must have previously been dequeued. This
+ * function is meant to be called when a server is going down or has its
+ * weight disabled.
+ *
+ * The lbprm's lock must be held. The server's lock is not used.
+ */
+static inline void fwrr_remove_from_tree(struct server *s)
+{
+ s->lb_tree = NULL;
+}
+
+/* Queue a server in the weight tree <root>, assuming the weight is >0.
+ * We want to sort them by inverted weights, because we need to place
+ * heavy servers first in order to get a smooth distribution.
+ *
+ * The lbprm's lock must be held. The server's lock is not used.
+ */
+static inline void fwrr_queue_by_weight(struct eb_root *root, struct server *s)
+{
+ s->lb_node.key = SRV_EWGHT_MAX - s->next_eweight;
+ eb32_insert(root, &s->lb_node);
+ s->lb_tree = root;
+}
+
+/* This function is responsible for building the weight trees in case of fast
+ * weighted round-robin. It also sets p->lbprm.wdiv to the eweight to uweight
+ * ratio. Both active and backup groups are initialized.
+ */
+void fwrr_init_server_groups(struct proxy *p)
+{
+ struct server *srv;
+ struct eb_root init_head = EB_ROOT;
+
+ p->lbprm.set_server_status_up = fwrr_set_server_status_up;
+ p->lbprm.set_server_status_down = fwrr_set_server_status_down;
+ p->lbprm.update_server_eweight = fwrr_update_server_weight;
+
+ p->lbprm.wdiv = BE_WEIGHT_SCALE;
+ for (srv = p->srv; srv; srv = srv->next) {
+ srv->next_eweight = (srv->uweight * p->lbprm.wdiv + p->lbprm.wmult - 1) / p->lbprm.wmult;
+ srv_lb_commit_status(srv);
+ }
+
+ recount_servers(p);
+ update_backend_weight(p);
+
+ /* prepare the active servers group */
+ p->lbprm.fwrr.act.curr_pos = p->lbprm.fwrr.act.curr_weight =
+ p->lbprm.fwrr.act.next_weight = p->lbprm.tot_wact;
+ p->lbprm.fwrr.act.curr = p->lbprm.fwrr.act.t0 =
+ p->lbprm.fwrr.act.t1 = init_head;
+ p->lbprm.fwrr.act.init = &p->lbprm.fwrr.act.t0;
+ p->lbprm.fwrr.act.next = &p->lbprm.fwrr.act.t1;
+
+ /* prepare the backup servers group */
+ p->lbprm.fwrr.bck.curr_pos = p->lbprm.fwrr.bck.curr_weight =
+ p->lbprm.fwrr.bck.next_weight = p->lbprm.tot_wbck;
+ p->lbprm.fwrr.bck.curr = p->lbprm.fwrr.bck.t0 =
+ p->lbprm.fwrr.bck.t1 = init_head;
+ p->lbprm.fwrr.bck.init = &p->lbprm.fwrr.bck.t0;
+ p->lbprm.fwrr.bck.next = &p->lbprm.fwrr.bck.t1;
+
+ /* queue active and backup servers in two distinct groups */
+ for (srv = p->srv; srv; srv = srv->next) {
+ if (!srv_currently_usable(srv))
+ continue;
+ fwrr_queue_by_weight((srv->flags & SRV_F_BACKUP) ?
+ p->lbprm.fwrr.bck.init :
+ p->lbprm.fwrr.act.init,
+ srv);
+ }
+}
+
+/* simply removes a server from a weight tree.
+ *
+ * The lbprm's lock must be held. The server's lock is not used.
+ */
+static inline void fwrr_dequeue_srv(struct server *s)
+{
+ eb32_delete(&s->lb_node);
+}
+
+/* queues a server into the appropriate group and tree depending on its
+ * backup status, and ->npos. If the server is disabled, simply assign
+ * it to the NULL tree.
+ *
+ * The lbprm's lock must be held. The server's lock is not used.
+ */
+static void fwrr_queue_srv(struct server *s)
+{
+ struct proxy *p = s->proxy;
+ struct fwrr_group *grp;
+
+ grp = (s->flags & SRV_F_BACKUP) ? &p->lbprm.fwrr.bck : &p->lbprm.fwrr.act;
+
+ /* Delay everything which does not fit into the window and everything
+ * which does not fit into the theoretical new window.
+ */
+ if (!srv_willbe_usable(s)) {
+ fwrr_remove_from_tree(s);
+ }
+ else if (s->next_eweight <= 0 ||
+ s->npos >= 2 * grp->curr_weight ||
+ s->npos >= grp->curr_weight + grp->next_weight) {
+ /* put into next tree, and readjust npos in case we could
+ * finally take this back to current. */
+ s->npos -= grp->curr_weight;
+ fwrr_queue_by_weight(grp->next, s);
+ }
+ else {
+ /* The sorting key is stored in units of s->npos * user_weight
+ * in order to avoid overflows. As stated in backend.h, the
+ * lower the scale, the rougher the weights modulation, and the
+ * higher the scale, the lower the number of servers without
+ * overflow. With this formula, the result is always positive,
+ * so we can use eb32_insert().
+ */
+ s->lb_node.key = SRV_UWGHT_RANGE * s->npos +
+ (unsigned)(SRV_EWGHT_MAX + s->rweight - s->next_eweight) / BE_WEIGHT_SCALE;
+
+ eb32_insert(&grp->curr, &s->lb_node);
+ s->lb_tree = &grp->curr;
+ }
+}
+
+/* prepares a server when extracting it from the "init" tree.
+ *
+ * The lbprm's lock must be held. The server's lock is not used.
+ */
+static inline void fwrr_get_srv_init(struct server *s)
+{
+ s->npos = s->rweight = 0;
+}
+
+/* prepares a server when extracting it from the "next" tree.
+ *
+ * The lbprm's lock must be held. The server's lock is not used.
+ */
+static inline void fwrr_get_srv_next(struct server *s)
+{
+ struct fwrr_group *grp = (s->flags & SRV_F_BACKUP) ?
+ &s->proxy->lbprm.fwrr.bck :
+ &s->proxy->lbprm.fwrr.act;
+
+ s->npos += grp->curr_weight;
+}
+
+/* prepares a server when it was marked down.
+ *
+ * The lbprm's lock must be held. The server's lock is not used.
+ */
+static inline void fwrr_get_srv_down(struct server *s)
+{
+ struct fwrr_group *grp = (s->flags & SRV_F_BACKUP) ?
+ &s->proxy->lbprm.fwrr.bck :
+ &s->proxy->lbprm.fwrr.act;
+
+ s->npos = grp->curr_pos;
+}
+
+/* prepares a server when extracting it from its tree.
+ *
+ * The lbprm's lock must be held. The server's lock is not used.
+ */
+static void fwrr_get_srv(struct server *s)
+{
+ struct proxy *p = s->proxy;
+ struct fwrr_group *grp = (s->flags & SRV_F_BACKUP) ?
+ &p->lbprm.fwrr.bck :
+ &p->lbprm.fwrr.act;
+
+ if (s->lb_tree == grp->init) {
+ fwrr_get_srv_init(s);
+ }
+ else if (s->lb_tree == grp->next) {
+ fwrr_get_srv_next(s);
+ }
+ else if (s->lb_tree == NULL) {
+ fwrr_get_srv_down(s);
+ }
+}
+
+/* switches trees "init" and "next" for FWRR group <grp>. "init" should be empty
+ * when this happens, and "next" filled with servers sorted by weights.
+ *
+ * The lbprm's lock must be held. The server's lock is not used.
+ */
+static inline void fwrr_switch_trees(struct fwrr_group *grp)
+{
+ struct eb_root *swap;
+ swap = grp->init;
+ grp->init = grp->next;
+ grp->next = swap;
+ grp->curr_weight = grp->next_weight;
+ grp->curr_pos = grp->curr_weight;
+}
+
+/* return next server from the current tree in FWRR group <grp>, or a server
+ * from the "init" tree if appropriate. If both trees are empty, return NULL.
+ *
+ * The lbprm's lock must be held. The server's lock is not used.
+ */
+static struct server *fwrr_get_server_from_group(struct fwrr_group *grp)
+{
+ struct eb32_node *node1;
+ struct eb32_node *node2;
+ struct server *s1 = NULL;
+ struct server *s2 = NULL;
+
+ node1 = eb32_first(&grp->curr);
+ if (node1) {
+ s1 = eb32_entry(node1, struct server, lb_node);
+ if (s1->cur_eweight && s1->npos <= grp->curr_pos)
+ return s1;
+ }
+
+ /* Either we have no server left, or we have a hole. We'll look in the
+ * init tree or a better proposal. At this point, if <s1> is non-null,
+ * it is guaranteed to remain available as the tree is locked.
+ */
+ node2 = eb32_first(grp->init);
+ if (node2) {
+ s2 = eb32_entry(node2, struct server, lb_node);
+ if (s2->cur_eweight) {
+ fwrr_get_srv_init(s2);
+ return s2;
+ }
+ }
+ return s1;
+}
+
+/* Computes next position of server <s> in the group. Nothing is done if <s>
+ * has a zero weight.
+ *
+ * The lbprm's lock must be held to protect lpos/npos/rweight.
+ */
+static inline void fwrr_update_position(struct fwrr_group *grp, struct server *s)
+{
+ unsigned int eweight = *(volatile unsigned int *)&s->cur_eweight;
+
+ if (!eweight)
+ return;
+
+ if (!s->npos) {
+ /* first time ever for this server */
+ s->npos = grp->curr_pos;
+ }
+
+ s->lpos = s->npos;
+ s->npos += grp->next_weight / eweight;
+ s->rweight += grp->next_weight % eweight;
+
+ if (s->rweight >= eweight) {
+ s->rweight -= eweight;
+ s->npos++;
+ }
+}
+
+/* Return next server from the current tree in backend <p>, or a server from
+ * the init tree if appropriate. If both trees are empty, return NULL.
+ * Saturated servers are skipped and requeued.
+ *
+ * The lbprm's lock will be used in R/W mode. The server's lock is not used.
+ */
+struct server *fwrr_get_next_server(struct proxy *p, struct server *srvtoavoid)
+{
+ struct server *srv, *full, *avoided;
+ struct fwrr_group *grp;
+ int switched;
+
+ HA_RWLOCK_WRLOCK(LBPRM_LOCK, &p->lbprm.lock);
+ if (p->srv_act)
+ grp = &p->lbprm.fwrr.act;
+ else if (p->lbprm.fbck) {
+ srv = p->lbprm.fbck;
+ goto out;
+ }
+ else if (p->srv_bck)
+ grp = &p->lbprm.fwrr.bck;
+ else {
+ srv = NULL;
+ goto out;
+ }
+
+ switched = 0;
+ avoided = NULL;
+ full = NULL; /* NULL-terminated list of saturated servers */
+ while (1) {
+ /* if we see an empty group, let's first try to collect weights
+ * which might have recently changed.
+ */
+ if (!grp->curr_weight)
+ grp->curr_pos = grp->curr_weight = grp->next_weight;
+
+ /* get first server from the "current" tree. When the end of
+ * the tree is reached, we may have to switch, but only once.
+ */
+ while (1) {
+ srv = fwrr_get_server_from_group(grp);
+ if (srv)
+ break;
+ if (switched) {
+ if (avoided) {
+ srv = avoided;
+ goto take_this_one;
+ }
+ goto requeue_servers;
+ }
+ switched = 1;
+ fwrr_switch_trees(grp);
+ }
+
+ /* OK, we have a server. However, it may be saturated, in which
+ * case we don't want to reconsider it for now. We'll update
+ * its position and dequeue it anyway, so that we can move it
+ * to a better place afterwards.
+ */
+ fwrr_update_position(grp, srv);
+ fwrr_dequeue_srv(srv);
+ grp->curr_pos++;
+ if (!srv->maxconn || (!srv->queue.length && srv->served < srv_dynamic_maxconn(srv))) {
+ /* make sure it is not the server we are trying to exclude... */
+ if (srv != srvtoavoid || avoided)
+ break;
+
+ avoided = srv; /* ...but remember that is was selected yet avoided */
+ }
+
+ /* the server is saturated or avoided, let's chain it for later reinsertion.
+ */
+ srv->next_full = full;
+ full = srv;
+ }
+
+ take_this_one:
+ /* OK, we got the best server, let's update it */
+ fwrr_queue_srv(srv);
+
+ requeue_servers:
+ /* Requeue all extracted servers. If full==srv then it was
+ * avoided (unsuccessfully) and chained, omit it now. The
+ * only way to get there is by having <avoided>==NULL or
+ * <avoided>==<srv>.
+ */
+ if (unlikely(full != NULL)) {
+ if (switched) {
+ /* the tree has switched, requeue all extracted servers
+ * into "init", because their place was lost, and only
+ * their weight matters.
+ */
+ do {
+ if (likely(full != srv))
+ fwrr_queue_by_weight(grp->init, full);
+ full = full->next_full;
+ } while (full);
+ } else {
+ /* requeue all extracted servers just as if they were consumed
+ * so that they regain their expected place.
+ */
+ do {
+ if (likely(full != srv))
+ fwrr_queue_srv(full);
+ full = full->next_full;
+ } while (full);
+ }
+ }
+ out:
+ HA_RWLOCK_WRUNLOCK(LBPRM_LOCK, &p->lbprm.lock);
+ return srv;
+}
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/src/lb_map.c b/src/lb_map.c
new file mode 100644
index 0000000..592df91
--- /dev/null
+++ b/src/lb_map.c
@@ -0,0 +1,281 @@
+/*
+ * Map-based load-balancing (RR and HASH)
+ *
+ * Copyright 2000-2009 Willy Tarreau <w@1wt.eu>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <import/eb32tree.h>
+#include <haproxy/api.h>
+#include <haproxy/backend.h>
+#include <haproxy/lb_map.h>
+#include <haproxy/queue.h>
+#include <haproxy/server-t.h>
+
+/* this function updates the map according to server <srv>'s new state.
+ *
+ * The server's lock must be held. The lbprm's lock will be used.
+ */
+static void map_set_server_status_down(struct server *srv)
+{
+ struct proxy *p = srv->proxy;
+
+ if (!srv_lb_status_changed(srv))
+ return;
+
+ if (srv_willbe_usable(srv))
+ goto out_update_state;
+
+ /* FIXME: could be optimized since we know what changed */
+ HA_RWLOCK_WRLOCK(LBPRM_LOCK, &p->lbprm.lock);
+ recount_servers(p);
+ update_backend_weight(p);
+ recalc_server_map(p);
+ HA_RWLOCK_WRUNLOCK(LBPRM_LOCK, &p->lbprm.lock);
+ out_update_state:
+ srv_lb_commit_status(srv);
+}
+
+/* This function updates the map according to server <srv>'s new state.
+ *
+ * The server's lock must be held. The lbprm's lock will be used.
+ */
+static void map_set_server_status_up(struct server *srv)
+{
+ struct proxy *p = srv->proxy;
+
+ if (!srv_lb_status_changed(srv))
+ return;
+
+ if (!srv_willbe_usable(srv))
+ goto out_update_state;
+
+ /* FIXME: could be optimized since we know what changed */
+ HA_RWLOCK_WRLOCK(LBPRM_LOCK, &p->lbprm.lock);
+ recount_servers(p);
+ update_backend_weight(p);
+ recalc_server_map(p);
+ HA_RWLOCK_WRUNLOCK(LBPRM_LOCK, &p->lbprm.lock);
+ out_update_state:
+ srv_lb_commit_status(srv);
+}
+
+/* This function recomputes the server map for proxy px. It relies on
+ * px->lbprm.tot_wact, tot_wbck, tot_used, tot_weight, so it must be
+ * called after recount_servers(). It also expects px->lbprm.map.srv
+ * to be allocated with the largest size needed. It updates tot_weight.
+ *
+ * The lbprm's lock must be held.
+ */
+void recalc_server_map(struct proxy *px)
+{
+ int o, tot, flag;
+ struct server *cur, *best;
+
+ switch (px->lbprm.tot_used) {
+ case 0: /* no server */
+ return;
+ default:
+ tot = px->lbprm.tot_weight;
+ break;
+ }
+
+ /* here we *know* that we have some servers */
+ if (px->srv_act)
+ flag = 0;
+ else
+ flag = SRV_F_BACKUP;
+
+ /* this algorithm gives priority to the first server, which means that
+ * it will respect the declaration order for equivalent weights, and
+ * that whatever the weights, the first server called will always be
+ * the first declared. This is an important assumption for the backup
+ * case, where we want the first server only.
+ */
+ for (cur = px->srv; cur; cur = cur->next)
+ cur->wscore = 0;
+
+ for (o = 0; o < tot; o++) {
+ int max = 0;
+ best = NULL;
+ for (cur = px->srv; cur; cur = cur->next) {
+ if ((cur->flags & SRV_F_BACKUP) == flag &&
+ srv_willbe_usable(cur)) {
+ int v;
+
+ /* If we are forced to return only one server, we don't want to
+ * go further, because we would return the wrong one due to
+ * divide overflow.
+ */
+ if (tot == 1) {
+ best = cur;
+ /* note that best->wscore will be wrong but we don't care */
+ break;
+ }
+
+ _HA_ATOMIC_ADD(&cur->wscore, cur->next_eweight);
+ v = (cur->wscore + tot) / tot; /* result between 0 and 3 */
+ if (best == NULL || v > max) {
+ max = v;
+ best = cur;
+ }
+ }
+ }
+ px->lbprm.map.srv[o] = best;
+ if (best)
+ _HA_ATOMIC_SUB(&best->wscore, tot);
+ }
+}
+
+/* This function is responsible of building the server MAP for map-based LB
+ * algorithms, allocating the map, and setting p->lbprm.wmult to the GCD of the
+ * weights if applicable. It should be called only once per proxy, at config
+ * time.
+ */
+void init_server_map(struct proxy *p)
+{
+ struct server *srv;
+ int pgcd;
+ int act, bck;
+
+ p->lbprm.set_server_status_up = map_set_server_status_up;
+ p->lbprm.set_server_status_down = map_set_server_status_down;
+ p->lbprm.update_server_eweight = NULL;
+
+ if (!p->srv)
+ return;
+
+ /* We will factor the weights to reduce the table,
+ * using Euclide's largest common divisor algorithm.
+ * Since we may have zero weights, we have to first
+ * find a non-zero weight server.
+ */
+ pgcd = 1;
+ srv = p->srv;
+ while (srv && !srv->uweight)
+ srv = srv->next;
+
+ if (srv) {
+ pgcd = srv->uweight; /* note: cannot be zero */
+ while (pgcd > 1 && (srv = srv->next)) {
+ int w = srv->uweight;
+ while (w) {
+ int t = pgcd % w;
+ pgcd = w;
+ w = t;
+ }
+ }
+ }
+
+ /* It is sometimes useful to know what factor to apply
+ * to the backend's effective weight to know its real
+ * weight.
+ */
+ p->lbprm.wmult = pgcd;
+
+ act = bck = 0;
+ for (srv = p->srv; srv; srv = srv->next) {
+ srv->next_eweight = (srv->uweight * p->lbprm.wdiv + p->lbprm.wmult - 1) / p->lbprm.wmult;
+
+ if (srv->flags & SRV_F_BACKUP)
+ bck += srv->next_eweight;
+ else
+ act += srv->next_eweight;
+ srv_lb_commit_status(srv);
+ }
+
+ /* this is the largest map we will ever need for this servers list */
+ if (act < bck)
+ act = bck;
+
+ if (!act)
+ act = 1;
+
+ p->lbprm.map.srv = calloc(act, sizeof(*p->lbprm.map.srv));
+ /* recounts servers and their weights */
+ recount_servers(p);
+ update_backend_weight(p);
+ recalc_server_map(p);
+}
+
+/*
+ * This function tries to find a running server with free connection slots for
+ * the proxy <px> following the round-robin method.
+ * If any server is found, it will be returned and px->lbprm.map.rr_idx will be updated
+ * to point to the next server. If no valid server is found, NULL is returned.
+ *
+ * The lbprm's lock will be used.
+ */
+struct server *map_get_server_rr(struct proxy *px, struct server *srvtoavoid)
+{
+ int newidx, avoididx;
+ struct server *srv, *avoided;
+
+ HA_RWLOCK_SKLOCK(LBPRM_LOCK, &px->lbprm.lock);
+ if (px->lbprm.tot_weight == 0) {
+ avoided = NULL;
+ goto out;
+ }
+
+ if (px->lbprm.map.rr_idx < 0 || px->lbprm.map.rr_idx >= px->lbprm.tot_weight)
+ px->lbprm.map.rr_idx = 0;
+ newidx = px->lbprm.map.rr_idx;
+
+ avoided = NULL;
+ avoididx = 0; /* shut a gcc warning */
+ do {
+ srv = px->lbprm.map.srv[newidx++];
+ if (!srv->maxconn || (!srv->queue.length && srv->served < srv_dynamic_maxconn(srv))) {
+ /* make sure it is not the server we are try to exclude... */
+ /* ...but remember that is was selected yet avoided */
+ avoided = srv;
+ avoididx = newidx;
+ if (srv != srvtoavoid) {
+ px->lbprm.map.rr_idx = newidx;
+ goto out;
+ }
+ }
+ if (newidx == px->lbprm.tot_weight)
+ newidx = 0;
+ } while (newidx != px->lbprm.map.rr_idx);
+
+ if (avoided)
+ px->lbprm.map.rr_idx = avoididx;
+
+ out:
+ HA_RWLOCK_SKUNLOCK(LBPRM_LOCK, &px->lbprm.lock);
+ /* return NULL or srvtoavoid if found */
+ return avoided;
+}
+
+/*
+ * This function returns the running server from the map at the location
+ * pointed to by the result of a modulo operation on <hash>. The server map may
+ * be recomputed if required before being looked up. If any server is found, it
+ * will be returned. If no valid server is found, NULL is returned.
+ *
+ * The lbprm's lock will be used.
+ */
+struct server *map_get_server_hash(struct proxy *px, unsigned int hash)
+{
+ struct server *srv = NULL;
+
+ HA_RWLOCK_RDLOCK(LBPRM_LOCK, &px->lbprm.lock);
+ if (px->lbprm.tot_weight)
+ srv = px->lbprm.map.srv[hash % px->lbprm.tot_weight];
+ HA_RWLOCK_RDUNLOCK(LBPRM_LOCK, &px->lbprm.lock);
+ return srv;
+}
+
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/src/linuxcap.c b/src/linuxcap.c
new file mode 100644
index 0000000..919086c
--- /dev/null
+++ b/src/linuxcap.c
@@ -0,0 +1,191 @@
+/*
+ * Minimal handling of Linux kernel capabilities
+ *
+ * Copyright 2000-2023 Willy Tarreau <w@1wt.eu>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+/* Depending on distros, some have capset(), others use the more complicated
+ * libcap. Let's stick to what we need and the kernel documents (capset).
+ * Note that prctl is needed here.
+ */
+#include <linux/capability.h>
+#include <sys/prctl.h>
+#include <errno.h>
+#include <unistd.h>
+#include <syscall.h>
+
+#include <haproxy/api.h>
+#include <haproxy/cfgparse.h>
+#include <haproxy/errors.h>
+#include <haproxy/tools.h>
+
+/* supported names, zero-terminated */
+static const struct {
+ int cap;
+ const char *name;
+} known_caps[] = {
+#ifdef CAP_NET_RAW
+ { CAP_NET_RAW, "cap_net_raw" },
+#endif
+#ifdef CAP_NET_ADMIN
+ { CAP_NET_ADMIN, "cap_net_admin" },
+#endif
+#ifdef CAP_NET_BIND_SERVICE
+ { CAP_NET_BIND_SERVICE, "cap_net_bind_service" },
+#endif
+ /* must be last */
+ { 0, 0 }
+};
+
+/* provided by sys/capability.h on some distros */
+static inline int capset(cap_user_header_t hdrp, const cap_user_data_t datap)
+{
+ return syscall(SYS_capset, hdrp, datap);
+}
+
+/* defaults to zero, i.e. we don't keep any cap after setuid() */
+static uint32_t caplist;
+
+/* try to apply capabilities before switching UID from <from_uid> to <to_uid>.
+ * In practice we need to do this in 4 steps:
+ * - set PR_SET_KEEPCAPS to preserve caps across the final setuid()
+ * - set the effective and permitted caps ;
+ * - switch euid to non-zero
+ * - set the effective and permitted caps again
+ * - then the caller can safely call setuid()
+ * We don't do this if the current euid is not zero or if the target uid
+ * is zero. Returns >=0 on success, negative on failure. Alerts or warnings
+ * may be emitted.
+ */
+int prepare_caps_for_setuid(int from_uid, int to_uid)
+{
+ struct __user_cap_data_struct cap_data = { };
+ struct __user_cap_header_struct cap_hdr = {
+ .pid = 0, /* current process */
+ .version = _LINUX_CAPABILITY_VERSION_1,
+ };
+
+ if (from_uid != 0)
+ return 0;
+
+ if (!to_uid)
+ return 0;
+
+ if (!caplist)
+ return 0;
+
+ if (prctl(PR_SET_KEEPCAPS, 1) == -1) {
+ ha_alert("Failed to preserve capabilities using prctl(): %s\n", strerror(errno));
+ return -1;
+ }
+
+ cap_data.effective = cap_data.permitted = caplist | (1 << CAP_SETUID);
+ if (capset(&cap_hdr, &cap_data) == -1) {
+ ha_alert("Failed to preset the capabilities to preserve using capset(): %s\n", strerror(errno));
+ return -1;
+ }
+
+ if (seteuid(to_uid) == -1) {
+ ha_alert("Failed to set effective uid to %d: %s\n", to_uid, strerror(errno));
+ return -1;
+ }
+
+ cap_data.effective = cap_data.permitted = caplist | (1 << CAP_SETUID);
+ if (capset(&cap_hdr, &cap_data) == -1) {
+ ha_alert("Failed to set the final capabilities using capset(): %s\n", strerror(errno));
+ return -1;
+ }
+ /* all's good */
+ return 0;
+}
+
+/* finalize the capabilities after setuid(). The most important is to drop the
+ * CAP_SET_SETUID capability, which would otherwise allow to switch back to any
+ * UID and recover everything.
+ */
+int finalize_caps_after_setuid(int from_uid, int to_uid)
+{
+ struct __user_cap_data_struct cap_data = { };
+ struct __user_cap_header_struct cap_hdr = {
+ .pid = 0, /* current process */
+ .version = _LINUX_CAPABILITY_VERSION_1,
+ };
+
+ if (from_uid != 0)
+ return 0;
+
+ if (!to_uid)
+ return 0;
+
+ if (!caplist)
+ return 0;
+
+ cap_data.effective = cap_data.permitted = caplist;
+ if (capset(&cap_hdr, &cap_data) == -1) {
+ ha_alert("Failed to drop the setuid capability using capset(): %s\n", strerror(errno));
+ return -1;
+ }
+ /* all's good */
+ return 0;
+}
+
+/* parse the "setcap" global keyword. Returns -1 on failure, 0 on success. */
+static int cfg_parse_global_setcap(char **args, int section_type,
+ struct proxy *curpx, const struct proxy *defpx,
+ const char *file, int line, char **err)
+{
+ char *name = args[1];
+ char *next;
+ uint32_t caps = 0;
+ int id;
+
+ if (!*name) {
+ memprintf(err, "'%s' : missing capability name(s). ", args[0]);
+ goto dump_caps;
+ }
+
+ while (name && *name) {
+ next = strchr(name, ',');
+ if (next)
+ *(next++) = '\0';
+
+ for (id = 0; known_caps[id].cap; id++) {
+ if (strcmp(name, known_caps[id].name) == 0) {
+ caps |= 1U << known_caps[id].cap;
+ break;
+ }
+ }
+
+ if (!known_caps[id].cap) {
+ memprintf(err, "'%s' : unsupported capability '%s'. ", args[0], args[1]);
+ goto dump_caps;
+ }
+ name = next;
+ }
+
+ caplist |= caps;
+ return 0;
+
+
+ dump_caps:
+ memprintf(err, "%s Supported ones are: ", *err);
+
+ for (id = 0; known_caps[id].cap; id++)
+ memprintf(err, "%s%s%s%s", *err,
+ id ? known_caps[id+1].cap ? ", " : " and " : "",
+ known_caps[id].name, known_caps[id+1].cap ? "" : ".");
+ return -1;
+}
+
+static struct cfg_kw_list cfg_kws = {ILH, {
+ { CFG_GLOBAL, "setcap", cfg_parse_global_setcap },
+ { 0, NULL, NULL }
+}};
+
+INITCALL1(STG_REGISTER, cfg_register_keywords, &cfg_kws);
diff --git a/src/listener.c b/src/listener.c
new file mode 100644
index 0000000..86d0945
--- /dev/null
+++ b/src/listener.c
@@ -0,0 +1,2487 @@
+/*
+ * Listener management functions.
+ *
+ * Copyright 2000-2013 Willy Tarreau <w@1wt.eu>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <ctype.h>
+#include <errno.h>
+#include <stdio.h>
+#include <string.h>
+#include <unistd.h>
+
+#include <haproxy/acl.h>
+#include <haproxy/api.h>
+#include <haproxy/activity.h>
+#include <haproxy/cfgparse.h>
+#include <haproxy/cli-t.h>
+#include <haproxy/connection.h>
+#include <haproxy/errors.h>
+#include <haproxy/fd.h>
+#include <haproxy/freq_ctr.h>
+#include <haproxy/frontend.h>
+#include <haproxy/global.h>
+#include <haproxy/list.h>
+#include <haproxy/listener.h>
+#include <haproxy/log.h>
+#include <haproxy/protocol.h>
+#include <haproxy/proxy.h>
+#include <haproxy/quic_tp.h>
+#include <haproxy/sample.h>
+#include <haproxy/stream.h>
+#include <haproxy/task.h>
+#include <haproxy/ticks.h>
+#include <haproxy/tools.h>
+
+
+/* List head of all known bind keywords */
+struct bind_kw_list bind_keywords = {
+ .list = LIST_HEAD_INIT(bind_keywords.list)
+};
+
+/* list of the temporarily limited listeners because of lack of resource */
+static struct mt_list global_listener_queue = MT_LIST_HEAD_INIT(global_listener_queue);
+static struct task *global_listener_queue_task;
+/* number of times an accepted connection resulted in maxconn being reached */
+ullong maxconn_reached = 0;
+__decl_thread(static HA_RWLOCK_T global_listener_rwlock);
+
+/* listener status for stats */
+const char* li_status_st[LI_STATE_COUNT] = {
+ [LI_STATUS_WAITING] = "WAITING",
+ [LI_STATUS_OPEN] = "OPEN",
+ [LI_STATUS_FULL] = "FULL",
+};
+
+#if defined(USE_THREAD)
+
+struct accept_queue_ring accept_queue_rings[MAX_THREADS] __attribute__((aligned(64))) = { };
+
+/* dequeue and process a pending connection from the local accept queue (single
+ * consumer). Returns the accepted connection or NULL if none was found.
+ */
+struct connection *accept_queue_pop_sc(struct accept_queue_ring *ring)
+{
+ unsigned int pos, next;
+ struct connection *ptr;
+ struct connection **e;
+ uint32_t idx = _HA_ATOMIC_LOAD(&ring->idx); /* (head << 16) + tail */
+
+ pos = idx >> 16;
+ if (pos == (uint16_t)idx)
+ return NULL;
+
+ next = pos + 1;
+ if (next >= ACCEPT_QUEUE_SIZE)
+ next = 0;
+
+ e = &ring->entry[pos];
+
+ /* wait for the producer to update the listener's pointer */
+ while (1) {
+ ptr = *e;
+ __ha_barrier_load();
+ if (ptr)
+ break;
+ pl_cpu_relax();
+ }
+
+ /* release the entry */
+ *e = NULL;
+
+ __ha_barrier_store();
+ do {
+ pos = (next << 16) | (idx & 0xffff);
+ } while (unlikely(!HA_ATOMIC_CAS(&ring->idx, &idx, pos) && __ha_cpu_relax()));
+
+ return ptr;
+}
+
+
+/* tries to push a new accepted connection <conn> into ring <ring>. Returns
+ * non-zero if it succeeds, or zero if the ring is full. Supports multiple
+ * producers.
+ */
+int accept_queue_push_mp(struct accept_queue_ring *ring, struct connection *conn)
+{
+ unsigned int pos, next;
+ uint32_t idx = _HA_ATOMIC_LOAD(&ring->idx); /* (head << 16) + tail */
+
+ do {
+ pos = (uint16_t)idx;
+ next = pos + 1;
+ if (next >= ACCEPT_QUEUE_SIZE)
+ next = 0;
+ if (next == (idx >> 16))
+ return 0; // ring full
+ next |= (idx & 0xffff0000U);
+ } while (unlikely(!_HA_ATOMIC_CAS(&ring->idx, &idx, next) && __ha_cpu_relax()));
+
+ ring->entry[pos] = conn;
+ __ha_barrier_store();
+ return 1;
+}
+
+/* proceed with accepting new connections. Don't mark it static so that it appears
+ * in task dumps.
+ */
+struct task *accept_queue_process(struct task *t, void *context, unsigned int state)
+{
+ struct accept_queue_ring *ring = context;
+ struct connection *conn;
+ struct listener *li;
+ unsigned int max_accept;
+ int ret;
+
+ /* if global.tune.maxaccept is -1, then max_accept is UINT_MAX. It
+ * is not really illimited, but it is probably enough.
+ */
+ max_accept = global.tune.maxaccept ? global.tune.maxaccept : MAX_ACCEPT;
+ for (; max_accept; max_accept--) {
+ conn = accept_queue_pop_sc(ring);
+ if (!conn)
+ break;
+
+ li = __objt_listener(conn->target);
+ _HA_ATOMIC_INC(&li->thr_conn[ti->ltid]);
+ ret = li->bind_conf->accept(conn);
+ if (ret <= 0) {
+ /* connection was terminated by the application */
+ continue;
+ }
+
+ /* increase the per-process number of cumulated sessions, this
+ * may only be done once l->bind_conf->accept() has accepted the
+ * connection.
+ */
+ if (!(li->bind_conf->options & BC_O_UNLIMITED)) {
+ HA_ATOMIC_UPDATE_MAX(&global.sps_max,
+ update_freq_ctr(&global.sess_per_sec, 1));
+ if (li->bind_conf->options & BC_O_USE_SSL) {
+ HA_ATOMIC_UPDATE_MAX(&global.ssl_max,
+ update_freq_ctr(&global.ssl_per_sec, 1));
+ }
+ }
+ }
+
+ /* ran out of budget ? Let's come here ASAP */
+ if (!max_accept)
+ tasklet_wakeup(ring->tasklet);
+
+ return NULL;
+}
+
+/* Initializes the accept-queues. Returns 0 on success, otherwise ERR_* flags */
+static int accept_queue_init()
+{
+ struct tasklet *t;
+ int i;
+
+ for (i = 0; i < global.nbthread; i++) {
+ t = tasklet_new();
+ if (!t) {
+ ha_alert("Out of memory while initializing accept queue for thread %d\n", i);
+ return ERR_FATAL|ERR_ABORT;
+ }
+ t->tid = i;
+ t->process = accept_queue_process;
+ t->context = &accept_queue_rings[i];
+ accept_queue_rings[i].tasklet = t;
+ }
+ return 0;
+}
+
+REGISTER_CONFIG_POSTPARSER("multi-threaded accept queue", accept_queue_init);
+
+static void accept_queue_deinit()
+{
+ int i;
+
+ for (i = 0; i < global.nbthread; i++) {
+ tasklet_free(accept_queue_rings[i].tasklet);
+ }
+}
+
+REGISTER_POST_DEINIT(accept_queue_deinit);
+
+#endif // USE_THREAD
+
+/* Memory allocation and initialization of the per_thr field (one entry per
+ * bound thread).
+ * Returns 0 if the field has been successfully initialized, -1 on failure.
+ */
+int li_init_per_thr(struct listener *li)
+{
+ int nbthr = MIN(global.nbthread, MAX_THREADS_PER_GROUP);
+ int i;
+
+ /* allocate per-thread elements for listener */
+ li->per_thr = calloc(nbthr, sizeof(*li->per_thr));
+ if (!li->per_thr)
+ return -1;
+
+ for (i = 0; i < nbthr; ++i) {
+ MT_LIST_INIT(&li->per_thr[i].quic_accept.list);
+ MT_LIST_INIT(&li->per_thr[i].quic_accept.conns);
+
+ li->per_thr[i].li = li;
+ }
+
+ return 0;
+}
+
+/* helper to get listener status for stats */
+enum li_status get_li_status(struct listener *l)
+{
+ if (!l->bind_conf->maxconn || l->nbconn < l->bind_conf->maxconn) {
+ if (l->state == LI_LIMITED)
+ return LI_STATUS_WAITING;
+ else
+ return LI_STATUS_OPEN;
+ }
+ return LI_STATUS_FULL;
+}
+
+/* adjust the listener's state and its proxy's listener counters if needed.
+ * It must be called under the listener's lock, but uses atomic ops to change
+ * the proxy's counters so that the proxy lock is not needed.
+ */
+void listener_set_state(struct listener *l, enum li_state st)
+{
+ struct proxy *px = l->bind_conf->frontend;
+
+ if (px) {
+ /* from state */
+ switch (l->state) {
+ case LI_NEW: /* first call */
+ _HA_ATOMIC_INC(&px->li_all);
+ break;
+ case LI_INIT:
+ case LI_ASSIGNED:
+ break;
+ case LI_PAUSED:
+ _HA_ATOMIC_DEC(&px->li_paused);
+ break;
+ case LI_LISTEN:
+ _HA_ATOMIC_DEC(&px->li_bound);
+ break;
+ case LI_READY:
+ case LI_FULL:
+ case LI_LIMITED:
+ _HA_ATOMIC_DEC(&px->li_ready);
+ break;
+ }
+
+ /* to state */
+ switch (st) {
+ case LI_NEW:
+ case LI_INIT:
+ case LI_ASSIGNED:
+ break;
+ case LI_PAUSED:
+ BUG_ON(l->rx.fd == -1);
+ _HA_ATOMIC_INC(&px->li_paused);
+ break;
+ case LI_LISTEN:
+ BUG_ON(l->rx.fd == -1 && !l->rx.rhttp.task);
+ _HA_ATOMIC_INC(&px->li_bound);
+ break;
+ case LI_READY:
+ case LI_FULL:
+ case LI_LIMITED:
+ BUG_ON(l->rx.fd == -1 && !l->rx.rhttp.task);
+ _HA_ATOMIC_INC(&px->li_ready);
+ l->flags |= LI_F_FINALIZED;
+ break;
+ }
+ }
+ l->state = st;
+}
+
+/* This function adds the specified listener's file descriptor to the polling
+ * lists if it is in the LI_LISTEN state. The listener enters LI_READY or
+ * LI_FULL state depending on its number of connections. In daemon mode, we
+ * also support binding only the relevant processes to their respective
+ * listeners. We don't do that in debug mode however.
+ */
+void enable_listener(struct listener *listener)
+{
+ HA_RWLOCK_WRLOCK(LISTENER_LOCK, &listener->lock);
+
+ /* If this listener is supposed to be only in the master, close it in
+ * the workers. Conversely, if it's supposed to be only in the workers
+ * close it in the master.
+ */
+ if (!!master != !!(listener->rx.flags & RX_F_MWORKER))
+ do_unbind_listener(listener);
+
+ if (listener->state == LI_LISTEN) {
+ BUG_ON(listener->rx.fd == -1 && !listener->rx.rhttp.task);
+ if ((global.mode & (MODE_DAEMON | MODE_MWORKER)) &&
+ (!!master != !!(listener->rx.flags & RX_F_MWORKER))) {
+ /* we don't want to enable this listener and don't
+ * want any fd event to reach it.
+ */
+ do_unbind_listener(listener);
+ }
+ else if (!listener->bind_conf->maxconn || listener->nbconn < listener->bind_conf->maxconn) {
+ listener->rx.proto->enable(listener);
+ listener_set_state(listener, LI_READY);
+ }
+ else {
+ listener_set_state(listener, LI_FULL);
+ }
+ }
+
+ HA_RWLOCK_WRUNLOCK(LISTENER_LOCK, &listener->lock);
+}
+
+/*
+ * This function completely stops a listener.
+ * The proxy's listeners count is updated and the proxy is
+ * disabled and woken up after the last one is gone.
+ * It will need to operate under the proxy's lock, the protocol's lock and
+ * the listener's lock. The caller is responsible for indicating in lpx,
+ * lpr, lli whether the respective locks are already held (non-zero) or
+ * not (zero) so that the function picks the missing ones, in this order.
+ */
+void stop_listener(struct listener *l, int lpx, int lpr, int lli)
+{
+ struct proxy *px = l->bind_conf->frontend;
+
+ if (l->bind_conf->options & BC_O_NOSTOP) {
+ /* master-worker sockpairs are never closed but don't count as a
+ * job.
+ */
+ return;
+ }
+
+ if (!lpx && px)
+ HA_RWLOCK_WRLOCK(PROXY_LOCK, &px->lock);
+
+ if (!lpr)
+ HA_SPIN_LOCK(PROTO_LOCK, &proto_lock);
+
+ if (!lli)
+ HA_RWLOCK_WRLOCK(LISTENER_LOCK, &l->lock);
+
+ if (l->state > LI_INIT) {
+ do_unbind_listener(l);
+
+ if (l->state >= LI_ASSIGNED)
+ __delete_listener(l);
+
+ if (px)
+ proxy_cond_disable(px);
+ }
+
+ if (!lli)
+ HA_RWLOCK_WRUNLOCK(LISTENER_LOCK, &l->lock);
+
+ if (!lpr)
+ HA_SPIN_UNLOCK(PROTO_LOCK, &proto_lock);
+
+ if (!lpx && px)
+ HA_RWLOCK_WRUNLOCK(PROXY_LOCK, &px->lock);
+}
+
+/* This function adds the specified <listener> to the protocol <proto>. It
+ * does nothing if the protocol was already added. The listener's state is
+ * automatically updated from LI_INIT to LI_ASSIGNED. The number of listeners
+ * for the protocol is updated. This must be called with the proto lock held.
+ */
+void default_add_listener(struct protocol *proto, struct listener *listener)
+{
+ if (listener->state != LI_INIT)
+ return;
+ listener_set_state(listener, LI_ASSIGNED);
+ listener->rx.proto = proto;
+ LIST_APPEND(&proto->receivers, &listener->rx.proto_list);
+ proto->nb_receivers++;
+}
+
+/* default function called to suspend a listener: it simply passes the call to
+ * the underlying receiver. This is find for most socket-based protocols. This
+ * must be called under the listener's lock. It will return < 0 in case of
+ * failure, 0 if the listener was totally stopped, or > 0 if correctly paused..
+ * If no receiver-level suspend is provided, the operation is assumed
+ * to succeed.
+ */
+int default_suspend_listener(struct listener *l)
+{
+ if (!l->rx.proto->rx_suspend)
+ return 1;
+
+ return l->rx.proto->rx_suspend(&l->rx);
+}
+
+
+/* Tries to resume a suspended listener, and returns non-zero on success or
+ * zero on failure. On certain errors, an alert or a warning might be displayed.
+ * It must be called with the listener's lock held. Depending on the listener's
+ * state and protocol, a listen() call might be used to resume operations, or a
+ * call to the receiver's resume() function might be used as well. This is
+ * suitable as a default function for TCP and UDP. This must be called with the
+ * listener's lock held.
+ */
+int default_resume_listener(struct listener *l)
+{
+ int ret = 1;
+
+ if (l->state == LI_ASSIGNED) {
+ char msg[100];
+ char *errmsg;
+ int err;
+
+ /* first, try to bind the receiver */
+ err = l->rx.proto->fam->bind(&l->rx, &errmsg);
+ if (err != ERR_NONE) {
+ if (err & ERR_WARN)
+ ha_warning("Resuming listener: %s\n", errmsg);
+ else if (err & ERR_ALERT)
+ ha_alert("Resuming listener: %s\n", errmsg);
+ ha_free(&errmsg);
+ if (err & (ERR_FATAL | ERR_ABORT)) {
+ ret = 0;
+ goto end;
+ }
+ }
+
+ /* then, try to listen:
+ * for now there's still always a listening function
+ * (same check performed in protocol_bind_all()
+ */
+ BUG_ON(!l->rx.proto->listen);
+ err = l->rx.proto->listen(l, msg, sizeof(msg));
+ if (err & ERR_ALERT)
+ ha_alert("Resuming listener: %s\n", msg);
+ else if (err & ERR_WARN)
+ ha_warning("Resuming listener: %s\n", msg);
+
+ if (err & (ERR_FATAL | ERR_ABORT)) {
+ ret = 0;
+ goto end;
+ }
+ }
+
+ if (l->state < LI_PAUSED) {
+ ret = 0;
+ goto end;
+ }
+
+ if (l->state == LI_PAUSED && l->rx.proto->rx_resume &&
+ l->rx.proto->rx_resume(&l->rx) <= 0)
+ ret = 0;
+ end:
+ return ret;
+}
+
+
+/* This function tries to temporarily disable a listener, depending on the OS
+ * capabilities. Linux unbinds the listen socket after a SHUT_RD, and ignores
+ * SHUT_WR. Solaris refuses either shutdown(). OpenBSD ignores SHUT_RD but
+ * closes upon SHUT_WR and refuses to rebind. So a common validation path
+ * involves SHUT_WR && listen && SHUT_RD. In case of success, the FD's polling
+ * is disabled. It normally returns non-zero, unless an error is reported.
+ * suspend() may totally stop a listener if it doesn't support the PAUSED
+ * state, in which case state will be set to ASSIGNED.
+ * It will need to operate under the proxy's lock and the listener's lock.
+ * The caller is responsible for indicating in lpx, lli whether the respective
+ * locks are already held (non-zero) or not (zero) so that the function pick
+ * the missing ones, in this order.
+ */
+int suspend_listener(struct listener *l, int lpx, int lli)
+{
+ struct proxy *px = l->bind_conf->frontend;
+ int ret = 1;
+
+ if (!lpx && px)
+ HA_RWLOCK_WRLOCK(PROXY_LOCK, &px->lock);
+
+ if (!lli)
+ HA_RWLOCK_WRLOCK(LISTENER_LOCK, &l->lock);
+
+ if (!(l->flags & LI_F_FINALIZED) || l->state <= LI_PAUSED)
+ goto end;
+
+ if (l->rx.proto->suspend) {
+ ret = l->rx.proto->suspend(l);
+ /* if the suspend() fails, we don't want to change the
+ * current listener state
+ */
+ if (ret < 0)
+ goto end;
+ }
+
+ MT_LIST_DELETE(&l->wait_queue);
+
+ /* ret == 0 means that the suspend() has been turned into
+ * an unbind(), meaning the listener is now stopped (ie: ABNS), we need
+ * to report this state change properly
+ */
+ listener_set_state(l, ((ret) ? LI_PAUSED : LI_ASSIGNED));
+
+ if (px && !(l->flags & LI_F_SUSPENDED))
+ px->li_suspended++;
+ l->flags |= LI_F_SUSPENDED;
+
+ /* at this point, everything is under control, no error should be
+ * returned to calling function
+ */
+ ret = 1;
+
+ if (px && !(px->flags & PR_FL_PAUSED) && !px->li_ready) {
+ /* PROXY_LOCK is required */
+ proxy_cond_pause(px);
+ ha_warning("Paused %s %s.\n", proxy_cap_str(px->cap), px->id);
+ send_log(px, LOG_WARNING, "Paused %s %s.\n", proxy_cap_str(px->cap), px->id);
+ }
+ end:
+ if (!lli)
+ HA_RWLOCK_WRUNLOCK(LISTENER_LOCK, &l->lock);
+
+ if (!lpx && px)
+ HA_RWLOCK_WRUNLOCK(PROXY_LOCK, &px->lock);
+
+ return ret;
+}
+
+/* This function tries to resume a temporarily disabled listener. Paused, full,
+ * limited and disabled listeners are handled, which means that this function
+ * may replace enable_listener(). The resulting state will either be LI_READY
+ * or LI_FULL. 0 is returned in case of failure to resume (eg: dead socket).
+ * Listeners bound to a different process are not woken up unless we're in
+ * foreground mode, and are ignored. If the listener was only in the assigned
+ * state, it's totally rebound. This can happen if a suspend() has completely
+ * stopped it. If the resume fails, 0 is returned and an error might be
+ * displayed.
+ * It will need to operate under the proxy's lock and the listener's lock.
+ * The caller is responsible for indicating in lpx, lli whether the respective
+ * locks are already held (non-zero) or not (zero) so that the function pick
+ * the missing ones, in this order.
+ */
+int resume_listener(struct listener *l, int lpx, int lli)
+{
+ struct proxy *px = l->bind_conf->frontend;
+ int ret = 1;
+
+ if (!lpx && px)
+ HA_RWLOCK_WRLOCK(PROXY_LOCK, &px->lock);
+
+ if (!lli)
+ HA_RWLOCK_WRLOCK(LISTENER_LOCK, &l->lock);
+
+ /* check that another thread didn't to the job in parallel (e.g. at the
+ * end of listen_accept() while we'd come from dequeue_all_listeners().
+ */
+ if (MT_LIST_INLIST(&l->wait_queue))
+ goto end;
+
+ if (!(l->flags & LI_F_FINALIZED) || l->state == LI_READY)
+ goto end;
+
+ if (l->rx.proto->resume) {
+ ret = l->rx.proto->resume(l);
+ if (!ret)
+ goto end; /* failure to resume */
+ }
+
+ if (l->bind_conf->maxconn && l->nbconn >= l->bind_conf->maxconn) {
+ l->rx.proto->disable(l);
+ listener_set_state(l, LI_FULL);
+ goto done;
+ }
+
+ l->rx.proto->enable(l);
+ listener_set_state(l, LI_READY);
+
+ done:
+ if (px && (l->flags & LI_F_SUSPENDED))
+ px->li_suspended--;
+ l->flags &= ~LI_F_SUSPENDED;
+
+ if (px && (px->flags & PR_FL_PAUSED) && !px->li_suspended) {
+ /* PROXY_LOCK is required */
+ proxy_cond_resume(px);
+ ha_warning("Resumed %s %s.\n", proxy_cap_str(px->cap), px->id);
+ send_log(px, LOG_WARNING, "Resumed %s %s.\n", proxy_cap_str(px->cap), px->id);
+ }
+ end:
+ if (!lli)
+ HA_RWLOCK_WRUNLOCK(LISTENER_LOCK, &l->lock);
+
+ if (!lpx && px)
+ HA_RWLOCK_WRUNLOCK(PROXY_LOCK, &px->lock);
+
+ return ret;
+}
+
+/* Same as resume_listener(), but will only work to resume from
+ * LI_FULL or LI_LIMITED states because we try to relax listeners that
+ * were temporarily restricted and not to resume inactive listeners that
+ * may have been paused or completely stopped in the meantime.
+ * Returns positive value for success and 0 for failure.
+ * It will need to operate under the proxy's lock and the listener's lock.
+ * The caller is responsible for indicating in lpx, lli whether the respective
+ * locks are already held (non-zero) or not (zero) so that the function pick
+ * the missing ones, in this order.
+ */
+int relax_listener(struct listener *l, int lpx, int lli)
+{
+ struct proxy *px = l->bind_conf->frontend;
+ int ret = 1;
+
+ if (!lpx && px)
+ HA_RWLOCK_WRLOCK(PROXY_LOCK, &px->lock);
+
+ if (!lli)
+ HA_RWLOCK_WRLOCK(LISTENER_LOCK, &l->lock);
+
+ if (l->state != LI_FULL && l->state != LI_LIMITED)
+ goto end; /* listener may be suspended or even stopped */
+ ret = resume_listener(l, 1, 1);
+
+ end:
+ if (!lli)
+ HA_RWLOCK_WRUNLOCK(LISTENER_LOCK, &l->lock);
+
+ if (!lpx && px)
+ HA_RWLOCK_WRUNLOCK(PROXY_LOCK, &px->lock);
+
+ return ret;
+}
+
+/* Marks a ready listener as full so that the stream code tries to re-enable
+ * it upon next close() using relax_listener().
+ */
+static void listener_full(struct listener *l)
+{
+ HA_RWLOCK_WRLOCK(LISTENER_LOCK, &l->lock);
+ if (l->state >= LI_READY) {
+ MT_LIST_DELETE(&l->wait_queue);
+ if (l->state != LI_FULL) {
+ l->rx.proto->disable(l);
+ listener_set_state(l, LI_FULL);
+ }
+ }
+ HA_RWLOCK_WRUNLOCK(LISTENER_LOCK, &l->lock);
+}
+
+/* Marks a ready listener as limited so that we only try to re-enable it when
+ * resources are free again. It will be queued into the specified queue.
+ */
+static void limit_listener(struct listener *l, struct mt_list *list)
+{
+ HA_RWLOCK_WRLOCK(LISTENER_LOCK, &l->lock);
+ if (l->state == LI_READY) {
+ MT_LIST_TRY_APPEND(list, &l->wait_queue);
+ l->rx.proto->disable(l);
+ listener_set_state(l, LI_LIMITED);
+ }
+ HA_RWLOCK_WRUNLOCK(LISTENER_LOCK, &l->lock);
+}
+
+/* Dequeues all listeners waiting for a resource the global wait queue */
+void dequeue_all_listeners()
+{
+ struct listener *listener;
+
+ while ((listener = MT_LIST_POP(&global_listener_queue, struct listener *, wait_queue))) {
+ /* This cannot fail because the listeners are by definition in
+ * the LI_LIMITED state.
+ */
+ relax_listener(listener, 0, 0);
+ }
+}
+
+/* Dequeues all listeners waiting for a resource in proxy <px>'s queue */
+void dequeue_proxy_listeners(struct proxy *px)
+{
+ struct listener *listener;
+
+ while ((listener = MT_LIST_POP(&px->listener_queue, struct listener *, wait_queue))) {
+ /* This cannot fail because the listeners are by definition in
+ * the LI_LIMITED state.
+ */
+ relax_listener(listener, 0, 0);
+ }
+}
+
+
+/* default function used to unbind a listener. This is for use by standard
+ * protocols working on top of accepted sockets. The receiver's rx_unbind()
+ * will automatically be used after the listener is disabled if the socket is
+ * still bound. This must be used under the listener's lock.
+ */
+void default_unbind_listener(struct listener *listener)
+{
+ if (listener->state <= LI_ASSIGNED)
+ goto out_close;
+
+ if (listener->rx.fd == -1) {
+ listener_set_state(listener, LI_ASSIGNED);
+ goto out_close;
+ }
+
+ if (listener->state >= LI_READY) {
+ listener->rx.proto->disable(listener);
+ if (listener->rx.flags & RX_F_BOUND)
+ listener_set_state(listener, LI_LISTEN);
+ }
+
+ out_close:
+ if (listener->rx.flags & RX_F_BOUND)
+ listener->rx.proto->rx_unbind(&listener->rx);
+}
+
+/* This function closes the listening socket for the specified listener,
+ * provided that it's already in a listening state. The protocol's unbind()
+ * is called to put the listener into LI_ASSIGNED or LI_LISTEN and handle
+ * the unbinding tasks. The listener enters then the LI_ASSIGNED state if
+ * the receiver is unbound. Must be called with the lock held.
+ */
+void do_unbind_listener(struct listener *listener)
+{
+ MT_LIST_DELETE(&listener->wait_queue);
+
+ if (listener->rx.proto->unbind)
+ listener->rx.proto->unbind(listener);
+
+ /* we may have to downgrade the listener if the rx was closed */
+ if (!(listener->rx.flags & RX_F_BOUND) && listener->state > LI_ASSIGNED)
+ listener_set_state(listener, LI_ASSIGNED);
+}
+
+/* This function closes the listening socket for the specified listener,
+ * provided that it's already in a listening state. The listener enters the
+ * LI_ASSIGNED state, except if the FD is not closed, in which case it may
+ * remain in LI_LISTEN. This function is intended to be used as a generic
+ * function for standard protocols.
+ */
+void unbind_listener(struct listener *listener)
+{
+ HA_RWLOCK_WRLOCK(LISTENER_LOCK, &listener->lock);
+ do_unbind_listener(listener);
+ HA_RWLOCK_WRUNLOCK(LISTENER_LOCK, &listener->lock);
+}
+
+/* creates one or multiple listeners for bind_conf <bc> on sockaddr <ss> on port
+ * range <portl> to <porth>, and possibly attached to fd <fd> (or -1 for auto
+ * allocation). The address family is taken from ss->ss_family, and the protocol
+ * passed in <proto> must be usable on this family. The protocol's default iocb
+ * is automatically preset as the receivers' iocb. The number of jobs and
+ * listeners is automatically increased by the number of listeners created. It
+ * returns non-zero on success, zero on error with the error message set in <err>.
+ */
+int create_listeners(struct bind_conf *bc, const struct sockaddr_storage *ss,
+ int portl, int porth, int fd, struct protocol *proto, char **err)
+{
+ struct listener *l;
+ int port;
+
+ for (port = portl; port <= porth; port++) {
+ l = calloc(1, sizeof(*l));
+ if (!l) {
+ memprintf(err, "out of memory");
+ return 0;
+ }
+ l->obj_type = OBJ_TYPE_LISTENER;
+ LIST_APPEND(&bc->frontend->conf.listeners, &l->by_fe);
+ LIST_APPEND(&bc->listeners, &l->by_bind);
+ l->bind_conf = bc;
+ l->rx.settings = &bc->settings;
+ l->rx.owner = l;
+ l->rx.iocb = proto->default_iocb;
+ l->rx.fd = fd;
+
+ l->rx.rhttp.task = NULL;
+ l->rx.rhttp.srv = NULL;
+ l->rx.rhttp.pend_conn = NULL;
+
+ memcpy(&l->rx.addr, ss, sizeof(*ss));
+ if (proto->fam->set_port)
+ proto->fam->set_port(&l->rx.addr, port);
+
+ MT_LIST_INIT(&l->wait_queue);
+ listener_set_state(l, LI_INIT);
+
+ proto->add(proto, l);
+
+ if (fd != -1)
+ l->rx.flags |= RX_F_INHERITED;
+
+ l->extra_counters = NULL;
+
+ HA_RWLOCK_INIT(&l->lock);
+ _HA_ATOMIC_INC(&jobs);
+ _HA_ATOMIC_INC(&listeners);
+ }
+ return 1;
+}
+
+/* Optionally allocates a new shard info (if si == NULL) for receiver rx and
+ * assigns it to it, or attaches to an existing one. If the rx already had a
+ * shard_info, it is simply returned. It is illegal to call this function with
+ * an rx that's part of a group that is already attached. Attaching means the
+ * shard_info's thread count and group count are updated so the rx's group is
+ * added to the shard_info's group mask. The rx are added to the members in the
+ * attachment order, though it must not matter. It is meant for boot time setup
+ * and is not thread safe. NULL is returned on allocation failure.
+ */
+struct shard_info *shard_info_attach(struct receiver *rx, struct shard_info *si)
+{
+ if (rx->shard_info)
+ return rx->shard_info;
+
+ if (!si) {
+ si = calloc(1, sizeof(*si));
+ if (!si)
+ return NULL;
+
+ si->ref = rx;
+ }
+
+ rx->shard_info = si;
+ BUG_ON (si->tgroup_mask & 1UL << (rx->bind_tgroup - 1));
+ si->tgroup_mask |= 1UL << (rx->bind_tgroup - 1);
+ si->nbgroups = my_popcountl(si->tgroup_mask);
+ si->nbthreads += my_popcountl(rx->bind_thread);
+ si->members[si->nbgroups - 1] = rx;
+ return si;
+}
+
+/* Detaches the rx from an optional shard_info it may be attached to. If so,
+ * the thread counts, group masks and refcounts are updated. The members list
+ * remains contiguous by replacing the current entry with the last one. The
+ * reference continues to point to the first receiver. If the group count
+ * reaches zero, the shard_info is automatically released.
+ */
+void shard_info_detach(struct receiver *rx)
+{
+ struct shard_info *si = rx->shard_info;
+ uint gr;
+
+ if (!si)
+ return;
+
+ rx->shard_info = NULL;
+
+ /* find the member slot this rx was attached to */
+ for (gr = 0; gr < MAX_TGROUPS && si->members[gr] != rx; gr++)
+ ;
+
+ BUG_ON(gr == MAX_TGROUPS);
+
+ si->nbthreads -= my_popcountl(rx->bind_thread);
+ si->tgroup_mask &= ~(1UL << (rx->bind_tgroup - 1));
+ si->nbgroups = my_popcountl(si->tgroup_mask);
+
+ /* replace the member by the last one. If we removed the reference, we
+ * have to switch to another one. It's always the first entry so we can
+ * simply enforce it upon every removal.
+ */
+ si->members[gr] = si->members[si->nbgroups];
+ si->members[si->nbgroups] = NULL;
+ si->ref = si->members[0];
+
+ if (!si->nbgroups)
+ free(si);
+}
+
+/* clones listener <src> and returns the new one. All dynamically allocated
+ * fields are reallocated (name for now). The new listener is inserted before
+ * the original one in the bind_conf and frontend lists. This allows it to be
+ * duplicated while iterating over the current list. The original listener must
+ * only be in the INIT or ASSIGNED states, and the new listener will only be
+ * placed into the INIT state. The counters are always set to NULL. Maxsock is
+ * updated. Returns NULL on allocation error. The shard_info is never taken so
+ * that the caller can decide what to do with it depending on how it intends to
+ * clone the listener.
+ */
+struct listener *clone_listener(struct listener *src)
+{
+ struct listener *l;
+
+ l = calloc(1, sizeof(*l));
+ if (!l)
+ goto oom1;
+ memcpy(l, src, sizeof(*l));
+
+ if (l->name) {
+ l->name = strdup(l->name);
+ if (!l->name)
+ goto oom2;
+ }
+
+ l->rx.owner = l;
+ l->rx.shard_info = NULL;
+ l->state = LI_INIT;
+ l->counters = NULL;
+ l->extra_counters = NULL;
+
+ LIST_APPEND(&src->by_fe, &l->by_fe);
+ LIST_APPEND(&src->by_bind, &l->by_bind);
+
+ MT_LIST_INIT(&l->wait_queue);
+
+ l->rx.proto->add(l->rx.proto, l);
+
+ HA_RWLOCK_INIT(&l->lock);
+ _HA_ATOMIC_INC(&jobs);
+ _HA_ATOMIC_INC(&listeners);
+ global.maxsock++;
+ return l;
+
+ oom2:
+ free(l);
+ oom1:
+ return NULL;
+}
+
+/* Delete a listener from its protocol's list of listeners. The listener's
+ * state is automatically updated from LI_ASSIGNED to LI_INIT. The protocol's
+ * number of listeners is updated, as well as the global number of listeners
+ * and jobs. Note that the listener must have previously been unbound. This
+ * is a low-level function expected to be called with the proto_lock and the
+ * listener's lock held.
+ */
+void __delete_listener(struct listener *listener)
+{
+ if (listener->state == LI_ASSIGNED) {
+ listener_set_state(listener, LI_INIT);
+ LIST_DELETE(&listener->rx.proto_list);
+ shard_info_detach(&listener->rx);
+ listener->rx.proto->nb_receivers--;
+ _HA_ATOMIC_DEC(&jobs);
+ _HA_ATOMIC_DEC(&listeners);
+ }
+}
+
+/* Delete a listener from its protocol's list of listeners (please check
+ * __delete_listener() above). The proto_lock and the listener's lock will
+ * be grabbed in this order.
+ */
+void delete_listener(struct listener *listener)
+{
+ HA_SPIN_LOCK(PROTO_LOCK, &proto_lock);
+ HA_RWLOCK_WRLOCK(LISTENER_LOCK, &listener->lock);
+ __delete_listener(listener);
+ HA_RWLOCK_WRUNLOCK(LISTENER_LOCK, &listener->lock);
+ HA_SPIN_UNLOCK(PROTO_LOCK, &proto_lock);
+}
+
+/* Returns a suitable value for a listener's backlog. It uses the listener's,
+ * otherwise the frontend's backlog, otherwise the listener's maxconn,
+ * otherwise the frontend's maxconn, otherwise 1024.
+ */
+int listener_backlog(const struct listener *l)
+{
+ if (l->bind_conf->backlog)
+ return l->bind_conf->backlog;
+
+ if (l->bind_conf->frontend->backlog)
+ return l->bind_conf->frontend->backlog;
+
+ if (l->bind_conf->maxconn)
+ return l->bind_conf->maxconn;
+
+ if (l->bind_conf->frontend->maxconn)
+ return l->bind_conf->frontend->maxconn;
+
+ return 1024;
+}
+
+/* Returns true if listener <l> must check maxconn limit prior to accept. */
+static inline int listener_uses_maxconn(const struct listener *l)
+{
+ return !(l->bind_conf->options & (BC_O_UNLIMITED|BC_O_XPRT_MAXCONN));
+}
+
+/* This function is called on a read event from a listening socket, corresponding
+ * to an accept. It tries to accept as many connections as possible, and for each
+ * calls the listener's accept handler (generally the frontend's accept handler).
+ */
+void listener_accept(struct listener *l)
+{
+ struct connection *cli_conn;
+ struct proxy *p;
+ unsigned int max_accept;
+ int next_conn = 0;
+ int next_feconn = 0;
+ int next_actconn = 0;
+ int expire;
+ int ret;
+
+ p = l->bind_conf->frontend;
+
+ /* if l->bind_conf->maxaccept is -1, then max_accept is UINT_MAX. It is
+ * not really illimited, but it is probably enough.
+ */
+ max_accept = l->bind_conf->maxaccept ? l->bind_conf->maxaccept : 1;
+
+ if (!(l->bind_conf->options & BC_O_UNLIMITED) && global.sps_lim) {
+ int max = freq_ctr_remain(&global.sess_per_sec, global.sps_lim, 0);
+
+ if (unlikely(!max)) {
+ /* frontend accept rate limit was reached */
+ expire = tick_add(now_ms, next_event_delay(&global.sess_per_sec, global.sps_lim, 0));
+ goto limit_global;
+ }
+
+ if (max_accept > max)
+ max_accept = max;
+ }
+
+ if (!(l->bind_conf->options & BC_O_UNLIMITED) && global.cps_lim) {
+ int max = freq_ctr_remain(&global.conn_per_sec, global.cps_lim, 0);
+
+ if (unlikely(!max)) {
+ /* frontend accept rate limit was reached */
+ expire = tick_add(now_ms, next_event_delay(&global.conn_per_sec, global.cps_lim, 0));
+ goto limit_global;
+ }
+
+ if (max_accept > max)
+ max_accept = max;
+ }
+#ifdef USE_OPENSSL
+ if (!(l->bind_conf->options & BC_O_UNLIMITED) && global.ssl_lim &&
+ l->bind_conf && l->bind_conf->options & BC_O_USE_SSL) {
+ int max = freq_ctr_remain(&global.ssl_per_sec, global.ssl_lim, 0);
+
+ if (unlikely(!max)) {
+ /* frontend accept rate limit was reached */
+ expire = tick_add(now_ms, next_event_delay(&global.ssl_per_sec, global.ssl_lim, 0));
+ goto limit_global;
+ }
+
+ if (max_accept > max)
+ max_accept = max;
+ }
+#endif
+ if (p && p->fe_sps_lim) {
+ int max = freq_ctr_remain(&p->fe_sess_per_sec, p->fe_sps_lim, 0);
+
+ if (unlikely(!max)) {
+ /* frontend accept rate limit was reached */
+ expire = tick_add(now_ms, next_event_delay(&p->fe_sess_per_sec, p->fe_sps_lim, 0));
+ goto limit_proxy;
+ }
+
+ if (max_accept > max)
+ max_accept = max;
+ }
+
+ /* Note: if we fail to allocate a connection because of configured
+ * limits, we'll schedule a new attempt worst 1 second later in the
+ * worst case. If we fail due to system limits or temporary resource
+ * shortage, we try again 100ms later in the worst case.
+ */
+ for (; max_accept; next_conn = next_feconn = next_actconn = 0, max_accept--) {
+ unsigned int count;
+ int status;
+ __decl_thread(unsigned long mask);
+
+ /* pre-increase the number of connections without going too far.
+ * We process the listener, then the proxy, then the process.
+ * We know which ones to unroll based on the next_xxx value.
+ */
+ do {
+ count = l->nbconn;
+ if (unlikely(l->bind_conf->maxconn && count >= l->bind_conf->maxconn)) {
+ /* the listener was marked full or another
+ * thread is going to do it.
+ */
+ next_conn = 0;
+ listener_full(l);
+ goto end;
+ }
+ next_conn = count + 1;
+ } while (!_HA_ATOMIC_CAS(&l->nbconn, (int *)(&count), next_conn));
+
+ if (p) {
+ do {
+ count = p->feconn;
+ if (unlikely(count >= p->maxconn)) {
+ /* the frontend was marked full or another
+ * thread is going to do it.
+ */
+ next_feconn = 0;
+ expire = TICK_ETERNITY;
+ goto limit_proxy;
+ }
+ next_feconn = count + 1;
+ } while (!_HA_ATOMIC_CAS(&p->feconn, &count, next_feconn));
+ }
+
+ if (listener_uses_maxconn(l)) {
+ next_actconn = increment_actconn();
+ if (!next_actconn) {
+ /* the process was marked full or another
+ * thread is going to do it.
+ */
+ expire = tick_add(now_ms, 1000); /* try again in 1 second */
+ goto limit_global;
+ }
+ }
+
+ /* be careful below, the listener might be shutting down in
+ * another thread on error and we must not dereference its
+ * FD without a bit of protection.
+ */
+ cli_conn = NULL;
+ status = CO_AC_PERMERR;
+
+ HA_RWLOCK_RDLOCK(LISTENER_LOCK, &l->lock);
+ if (l->rx.flags & RX_F_BOUND)
+ cli_conn = l->rx.proto->accept_conn(l, &status);
+ HA_RWLOCK_RDUNLOCK(LISTENER_LOCK, &l->lock);
+
+ if (!cli_conn) {
+ switch (status) {
+ case CO_AC_DONE:
+ goto end;
+
+ case CO_AC_RETRY: /* likely a signal */
+ _HA_ATOMIC_DEC(&l->nbconn);
+ if (p)
+ _HA_ATOMIC_DEC(&p->feconn);
+ if (listener_uses_maxconn(l))
+ _HA_ATOMIC_DEC(&actconn);
+ continue;
+
+ case CO_AC_YIELD:
+ max_accept = 0;
+ goto end;
+
+ default:
+ goto transient_error;
+ }
+ }
+
+ /* The connection was accepted, it must be counted as such */
+ if (l->counters)
+ HA_ATOMIC_UPDATE_MAX(&l->counters->conn_max, next_conn);
+
+ if (p) {
+ HA_ATOMIC_UPDATE_MAX(&p->fe_counters.conn_max, next_feconn);
+ proxy_inc_fe_conn_ctr(l, p);
+ }
+
+ if (!(l->bind_conf->options & BC_O_UNLIMITED)) {
+ count = update_freq_ctr(&global.conn_per_sec, 1);
+ HA_ATOMIC_UPDATE_MAX(&global.cps_max, count);
+ }
+
+ _HA_ATOMIC_INC(&activity[tid].accepted);
+
+ /* count the number of times an accepted connection resulted in
+ * maxconn being reached.
+ */
+ if (unlikely(_HA_ATOMIC_LOAD(&actconn) + 1 >= global.maxconn))
+ _HA_ATOMIC_INC(&maxconn_reached);
+
+ /* past this point, l->bind_conf->accept() will automatically decrement
+ * l->nbconn, feconn and actconn once done. Setting next_*conn=0
+ * allows the error path not to rollback on nbconn. It's more
+ * convenient than duplicating all exit labels.
+ */
+ next_conn = 0;
+ next_feconn = 0;
+ next_actconn = 0;
+
+
+#if defined(USE_THREAD)
+ if (!(global.tune.options & GTUNE_LISTENER_MQ_ANY) || stopping)
+ goto local_accept;
+
+ /* we want to perform thread rebalancing if the listener is
+ * bound to more than one thread or if it's part of a shard
+ * with more than one listener.
+ */
+ mask = l->rx.bind_thread & _HA_ATOMIC_LOAD(&tg->threads_enabled);
+ if (l->rx.shard_info || atleast2(mask)) {
+ struct accept_queue_ring *ring;
+ struct listener *new_li;
+ uint r1, r2, t, t1, t2;
+ ulong n0, n1;
+ const struct tgroup_info *g1, *g2;
+ ulong m1, m2;
+ ulong *thr_idx_ptr;
+
+ /* The principle is that we have two running indexes,
+ * each visiting in turn all threads bound to this
+ * listener's shard. The connection will be assigned to
+ * the one with the least connections, and the other
+ * one will be updated. This provides a good fairness
+ * on short connections (round robin) and on long ones
+ * (conn count), without ever missing any idle thread.
+ * Each thread number is encoded as a combination of
+ * times the receiver number and its local thread
+ * number from 0 to MAX_THREADS_PER_GROUP - 1. The two
+ * indexes are stored as 10/12 bit numbers in the thr_idx
+ * array, since there are up to LONGBITS threads and
+ * groups that can be represented. They are represented
+ * like this:
+ * 31:20 19:15 14:10 9:5 4:0
+ * 32b: [ counter | r2num | t2num | r1num | t1num ]
+ *
+ * 63:24 23:18 17:12 11:6 5:0
+ * 64b: [ counter | r2num | t2num | r1num | t1num ]
+ *
+ * The change counter is only used to avoid swapping too
+ * old a value when the value loops back.
+ *
+ * In the loop below we have this for each index:
+ * - n is the thread index
+ * - r is the receiver number
+ * - g is the receiver's thread group
+ * - t is the thread number in this receiver
+ * - m is the receiver's thread mask shifted by the thread number
+ */
+
+ /* keep a copy for the final update. thr_idx is composite
+ * and made of (n2<<16) + n1.
+ */
+ thr_idx_ptr = l->rx.shard_info ? &((struct listener *)(l->rx.shard_info->ref->owner))->thr_idx : &l->thr_idx;
+ while (1) {
+ int q0, q1, q2;
+
+ /* calculate r1/g1/t1 first (ascending idx) */
+ n0 = _HA_ATOMIC_LOAD(thr_idx_ptr);
+ new_li = NULL;
+
+ t1 = (uint)n0 & (LONGBITS - 1);
+ r1 = ((uint)n0 / LONGBITS) & (LONGBITS - 1);
+
+ while (1) {
+ if (l->rx.shard_info) {
+ /* multiple listeners, take the group into account */
+ if (r1 >= l->rx.shard_info->nbgroups)
+ r1 = 0;
+
+ g1 = &ha_tgroup_info[l->rx.shard_info->members[r1]->bind_tgroup - 1];
+ m1 = l->rx.shard_info->members[r1]->bind_thread;
+ } else {
+ /* single listener */
+ r1 = 0;
+ g1 = tg;
+ m1 = l->rx.bind_thread;
+ }
+ m1 &= _HA_ATOMIC_LOAD(&g1->threads_enabled);
+ m1 >>= t1;
+
+ /* find first existing thread */
+ if (unlikely(!(m1 & 1))) {
+ m1 &= ~1UL;
+ if (!m1) {
+ /* no more threads here, switch to
+ * first thread of next group.
+ */
+ t1 = 0;
+ if (l->rx.shard_info)
+ r1++;
+ /* loop again */
+ continue;
+ }
+ t1 += my_ffsl(m1) - 1;
+ }
+ /* done: r1 and t1 are OK */
+ break;
+ }
+
+ /* now r2/g2/t2 (descending idx) */
+ t2 = ((uint)n0 / LONGBITS / LONGBITS) & (LONGBITS - 1);
+ r2 = ((uint)n0 / LONGBITS / LONGBITS / LONGBITS) & (LONGBITS - 1);
+
+ /* if running in round-robin mode ("fair"), we don't need
+ * to go further.
+ */
+ if ((global.tune.options & GTUNE_LISTENER_MQ_ANY) == GTUNE_LISTENER_MQ_FAIR) {
+ t = g1->base + t1;
+ if (l->rx.shard_info && t != tid)
+ new_li = l->rx.shard_info->members[r1]->owner;
+ goto updt_t1;
+ }
+
+ while (1) {
+ if (l->rx.shard_info) {
+ /* multiple listeners, take the group into account */
+ if (r2 >= l->rx.shard_info->nbgroups)
+ r2 = l->rx.shard_info->nbgroups - 1;
+
+ g2 = &ha_tgroup_info[l->rx.shard_info->members[r2]->bind_tgroup - 1];
+ m2 = l->rx.shard_info->members[r2]->bind_thread;
+ } else {
+ /* single listener */
+ r2 = 0;
+ g2 = tg;
+ m2 = l->rx.bind_thread;
+ }
+ m2 &= _HA_ATOMIC_LOAD(&g2->threads_enabled);
+ m2 &= nbits(t2 + 1);
+
+ /* find previous existing thread */
+ if (unlikely(!(m2 & (1UL << t2)) || (g1 == g2 && t1 == t2))) {
+ /* highest bit not set or colliding threads, let's check
+ * if we still have other threads available after this
+ * one.
+ */
+ m2 &= ~(1UL << t2);
+ if (!m2) {
+ /* no more threads here, switch to
+ * last thread of previous group.
+ */
+ t2 = MAX_THREADS_PER_GROUP - 1;
+ if (l->rx.shard_info)
+ r2--;
+ /* loop again */
+ continue;
+ }
+ t2 = my_flsl(m2) - 1;
+ }
+ /* done: r2 and t2 are OK */
+ break;
+ }
+
+ /* tests show that it's worth checking that other threads have not
+ * already changed the index to save the rest of the calculation,
+ * or we'd have to redo it anyway.
+ */
+ if (n0 != _HA_ATOMIC_LOAD(thr_idx_ptr))
+ continue;
+
+ /* here we have (r1,g1,t1) that designate the first receiver, its
+ * thread group and local thread, and (r2,g2,t2) that designate
+ * the second receiver, its thread group and local thread. We'll
+ * also consider the local thread with q0.
+ */
+ q0 = accept_queue_ring_len(&accept_queue_rings[tid]);
+ q1 = accept_queue_ring_len(&accept_queue_rings[g1->base + t1]);
+ q2 = accept_queue_ring_len(&accept_queue_rings[g2->base + t2]);
+
+ /* add to this the currently active connections */
+ q0 += _HA_ATOMIC_LOAD(&l->thr_conn[ti->ltid]);
+ if (l->rx.shard_info) {
+ q1 += _HA_ATOMIC_LOAD(&((struct listener *)l->rx.shard_info->members[r1]->owner)->thr_conn[t1]);
+ q2 += _HA_ATOMIC_LOAD(&((struct listener *)l->rx.shard_info->members[r2]->owner)->thr_conn[t2]);
+ } else {
+ q1 += _HA_ATOMIC_LOAD(&l->thr_conn[t1]);
+ q2 += _HA_ATOMIC_LOAD(&l->thr_conn[t2]);
+ }
+
+ /* we have 3 possibilities now :
+ * q1 < q2 : t1 is less loaded than t2, so we pick it
+ * and update t2 (since t1 might still be
+ * lower than another thread)
+ * q1 > q2 : t2 is less loaded than t1, so we pick it
+ * and update t1 (since t2 might still be
+ * lower than another thread)
+ * q1 = q2 : both are equally loaded, thus we pick t1
+ * and update t1 as it will become more loaded
+ * than t2.
+ * On top of that, if in the end the current thread appears
+ * to be as good of a deal, we'll prefer it over a foreign
+ * one as it will improve locality and avoid a migration.
+ */
+
+ if (q1 - q2 < 0) {
+ t = g1->base + t1;
+ if (q0 <= q1)
+ t = tid;
+
+ if (l->rx.shard_info && t != tid)
+ new_li = l->rx.shard_info->members[r1]->owner;
+
+ t2--;
+ if (t2 >= MAX_THREADS_PER_GROUP) {
+ if (l->rx.shard_info)
+ r2--;
+ t2 = MAX_THREADS_PER_GROUP - 1;
+ }
+ }
+ else if (q1 - q2 > 0) {
+ t = g2->base + t2;
+ if (q0 <= q2)
+ t = tid;
+
+ if (l->rx.shard_info && t != tid)
+ new_li = l->rx.shard_info->members[r2]->owner;
+ goto updt_t1;
+ }
+ else { // q1 == q2
+ t = g1->base + t1;
+ if (q0 < q1) // local must be strictly better than both
+ t = tid;
+
+ if (l->rx.shard_info && t != tid)
+ new_li = l->rx.shard_info->members[r1]->owner;
+ updt_t1:
+ t1++;
+ if (t1 >= MAX_THREADS_PER_GROUP) {
+ if (l->rx.shard_info)
+ r1++;
+ t1 = 0;
+ }
+ }
+
+ /* The target thread number is in <t> now. Let's
+ * compute the new index and try to update it.
+ */
+
+ /* take previous counter and increment it */
+ n1 = n0 & -(ulong)(LONGBITS * LONGBITS * LONGBITS * LONGBITS);
+ n1 += LONGBITS * LONGBITS * LONGBITS * LONGBITS;
+ n1 += (((r2 * LONGBITS) + t2) * LONGBITS * LONGBITS);
+ n1 += (r1 * LONGBITS) + t1;
+ if (likely(_HA_ATOMIC_CAS(thr_idx_ptr, &n0, n1)))
+ break;
+
+ /* bah we lost the race, try again */
+ __ha_cpu_relax();
+ } /* end of main while() loop */
+
+ /* we may need to update the listener in the connection
+ * if we switched to another group.
+ */
+ if (new_li)
+ cli_conn->target = &new_li->obj_type;
+
+ /* here we have the target thread number in <t> and we hold a
+ * reservation in the target ring.
+ */
+
+ if (l->rx.proto && l->rx.proto->set_affinity) {
+ if (l->rx.proto->set_affinity(cli_conn, t)) {
+ /* Failed migration, stay on the same thread. */
+ goto local_accept;
+ }
+ }
+
+ /* We successfully selected the best thread "t" for this
+ * connection. We use deferred accepts even if it's the
+ * local thread because tests show that it's the best
+ * performing model, likely due to better cache locality
+ * when processing this loop.
+ */
+ ring = &accept_queue_rings[t];
+ if (accept_queue_push_mp(ring, cli_conn)) {
+ _HA_ATOMIC_INC(&activity[t].accq_pushed);
+ tasklet_wakeup(ring->tasklet);
+ continue;
+ }
+ /* If the ring is full we do a synchronous accept on
+ * the local thread here.
+ */
+ _HA_ATOMIC_INC(&activity[t].accq_full);
+ }
+#endif // USE_THREAD
+
+ local_accept:
+ /* restore the connection's listener in case we failed to migrate above */
+ cli_conn->target = &l->obj_type;
+ _HA_ATOMIC_INC(&l->thr_conn[ti->ltid]);
+ ret = l->bind_conf->accept(cli_conn);
+ if (unlikely(ret <= 0)) {
+ /* The connection was closed by stream_accept(). Either
+ * we just have to ignore it (ret == 0) or it's a critical
+ * error due to a resource shortage, and we must stop the
+ * listener (ret < 0).
+ */
+ if (ret == 0) /* successful termination */
+ continue;
+
+ goto transient_error;
+ }
+
+ /* increase the per-process number of cumulated sessions, this
+ * may only be done once l->bind_conf->accept() has accepted the
+ * connection.
+ */
+ if (!(l->bind_conf->options & BC_O_UNLIMITED)) {
+ count = update_freq_ctr(&global.sess_per_sec, 1);
+ HA_ATOMIC_UPDATE_MAX(&global.sps_max, count);
+ }
+#ifdef USE_OPENSSL
+ if (!(l->bind_conf->options & BC_O_UNLIMITED) &&
+ l->bind_conf && l->bind_conf->options & BC_O_USE_SSL) {
+ count = update_freq_ctr(&global.ssl_per_sec, 1);
+ HA_ATOMIC_UPDATE_MAX(&global.ssl_max, count);
+ }
+#endif
+
+ _HA_ATOMIC_AND(&th_ctx->flags, ~TH_FL_STUCK); // this thread is still running
+ } /* end of for (max_accept--) */
+
+ end:
+ if (next_conn)
+ _HA_ATOMIC_DEC(&l->nbconn);
+
+ if (p && next_feconn)
+ _HA_ATOMIC_DEC(&p->feconn);
+
+ if (next_actconn)
+ _HA_ATOMIC_DEC(&actconn);
+
+ if ((l->state == LI_FULL && (!l->bind_conf->maxconn || l->nbconn < l->bind_conf->maxconn)) ||
+ (l->state == LI_LIMITED &&
+ ((!p || p->feconn < p->maxconn) && (actconn < global.maxconn) &&
+ (!tick_isset(global_listener_queue_task->expire) ||
+ tick_is_expired(global_listener_queue_task->expire, now_ms))))) {
+ /* at least one thread has to this when quitting */
+ relax_listener(l, 0, 0);
+
+ /* Dequeues all of the listeners waiting for a resource */
+ dequeue_all_listeners();
+
+ if (p && !MT_LIST_ISEMPTY(&p->listener_queue) &&
+ (!p->fe_sps_lim || freq_ctr_remain(&p->fe_sess_per_sec, p->fe_sps_lim, 0) > 0))
+ dequeue_proxy_listeners(p);
+ }
+ return;
+
+ transient_error:
+ /* pause the listener for up to 100 ms */
+ expire = tick_add(now_ms, 100);
+
+ /* This may be a shared socket that was paused by another process.
+ * Let's put it to pause in this case.
+ */
+ if (l->rx.proto && l->rx.proto->rx_listening(&l->rx) == 0) {
+ suspend_listener(l, 0, 0);
+ goto end;
+ }
+
+ limit_global:
+ /* (re-)queue the listener to the global queue and set it to expire no
+ * later than <expire> ahead. The listener turns to LI_LIMITED.
+ */
+ limit_listener(l, &global_listener_queue);
+ HA_RWLOCK_RDLOCK(LISTENER_LOCK, &global_listener_rwlock);
+ task_schedule(global_listener_queue_task, expire);
+ HA_RWLOCK_RDUNLOCK(LISTENER_LOCK, &global_listener_rwlock);
+ goto end;
+
+ limit_proxy:
+ /* (re-)queue the listener to the proxy's queue and set it to expire no
+ * later than <expire> ahead. The listener turns to LI_LIMITED.
+ */
+ limit_listener(l, &p->listener_queue);
+ if (p->task && tick_isset(expire))
+ task_schedule(p->task, expire);
+ goto end;
+}
+
+/* Notify the listener that a connection initiated from it was released. This
+ * is used to keep the connection count consistent and to possibly re-open
+ * listening when it was limited.
+ */
+void listener_release(struct listener *l)
+{
+ struct proxy *fe = l->bind_conf->frontend;
+
+ if (listener_uses_maxconn(l))
+ _HA_ATOMIC_DEC(&actconn);
+ if (fe)
+ _HA_ATOMIC_DEC(&fe->feconn);
+ _HA_ATOMIC_DEC(&l->nbconn);
+ _HA_ATOMIC_DEC(&l->thr_conn[ti->ltid]);
+
+ if (l->state == LI_FULL || l->state == LI_LIMITED)
+ relax_listener(l, 0, 0);
+
+ /* Dequeues all of the listeners waiting for a resource */
+ dequeue_all_listeners();
+
+ if (fe && !MT_LIST_ISEMPTY(&fe->listener_queue) &&
+ (!fe->fe_sps_lim || freq_ctr_remain(&fe->fe_sess_per_sec, fe->fe_sps_lim, 0) > 0))
+ dequeue_proxy_listeners(fe);
+}
+
+/* Initializes the listener queues. Returns 0 on success, otherwise ERR_* flags */
+static int listener_queue_init()
+{
+ global_listener_queue_task = task_new_anywhere();
+ if (!global_listener_queue_task) {
+ ha_alert("Out of memory when initializing global listener queue\n");
+ return ERR_FATAL|ERR_ABORT;
+ }
+ /* very simple initialization, users will queue the task if needed */
+ global_listener_queue_task->context = NULL; /* not even a context! */
+ global_listener_queue_task->process = manage_global_listener_queue;
+ HA_RWLOCK_INIT(&global_listener_rwlock);
+
+ return 0;
+}
+
+static void listener_queue_deinit()
+{
+ task_destroy(global_listener_queue_task);
+ global_listener_queue_task = NULL;
+}
+
+REGISTER_CONFIG_POSTPARSER("multi-threaded listener queue", listener_queue_init);
+REGISTER_POST_DEINIT(listener_queue_deinit);
+
+
+/* This is the global management task for listeners. It enables listeners waiting
+ * for global resources when there are enough free resource, or at least once in
+ * a while. It is designed to be called as a task. It's exported so that it's easy
+ * to spot in "show tasks" or "show profiling".
+ */
+struct task *manage_global_listener_queue(struct task *t, void *context, unsigned int state)
+{
+ /* If there are still too many concurrent connections, let's wait for
+ * some of them to go away. We don't need to re-arm the timer because
+ * each of them will scan the queue anyway.
+ */
+ if (unlikely(actconn >= global.maxconn))
+ goto out;
+
+ /* We should periodically try to enable listeners waiting for a global
+ * resource here, because it is possible, though very unlikely, that
+ * they have been blocked by a temporary lack of global resource such
+ * as a file descriptor or memory and that the temporary condition has
+ * disappeared.
+ */
+ dequeue_all_listeners();
+
+ out:
+ HA_RWLOCK_WRLOCK(LISTENER_LOCK, &global_listener_rwlock);
+ t->expire = TICK_ETERNITY;
+ HA_RWLOCK_WRUNLOCK(LISTENER_LOCK, &global_listener_rwlock);
+ return t;
+}
+
+/* Applies the thread mask, shards etc to the bind_conf. It normally returns 0
+ * otherwie the number of errors. Upon error it may set error codes (ERR_*) in
+ * err_code. It is supposed to be called only once very late in the boot process
+ * after the bind_conf's thread_set is fixed. The function may emit warnings and
+ * alerts. Extra listeners may be created on the fly.
+ */
+int bind_complete_thread_setup(struct bind_conf *bind_conf, int *err_code)
+{
+ struct proxy *fe = bind_conf->frontend;
+ struct listener *li, *new_li, *ref;
+ struct thread_set new_ts;
+ int shard, shards, todo, done, grp, dups;
+ ulong mask, gmask, bit;
+ int cfgerr = 0;
+ char *err;
+
+ err = NULL;
+ if (thread_resolve_group_mask(&bind_conf->thread_set, 0, &err) < 0) {
+ ha_alert("%s '%s': %s in 'bind %s' at [%s:%d].\n",
+ proxy_type_str(fe),
+ fe->id, err, bind_conf->arg, bind_conf->file, bind_conf->line);
+ free(err);
+ cfgerr++;
+ return cfgerr;
+ }
+
+ /* apply thread masks and groups to all receivers */
+ list_for_each_entry(li, &bind_conf->listeners, by_bind) {
+ shards = bind_conf->settings.shards;
+ todo = thread_set_count(&bind_conf->thread_set);
+
+ /* special values: -1 = "by-thread", -2 = "by-group" */
+ if (shards == -1) {
+ if (protocol_supports_flag(li->rx.proto, PROTO_F_REUSEPORT_SUPPORTED))
+ shards = todo;
+ else {
+ if (fe != global.cli_fe)
+ ha_diag_warning("[%s:%d]: Disabling per-thread sharding for listener in"
+ " %s '%s' because SO_REUSEPORT is disabled\n",
+ bind_conf->file, bind_conf->line, proxy_type_str(fe), fe->id);
+ shards = 1;
+ }
+ }
+ else if (shards == -2)
+ shards = protocol_supports_flag(li->rx.proto, PROTO_F_REUSEPORT_SUPPORTED) ? my_popcountl(bind_conf->thread_set.grps) : 1;
+
+ /* no more shards than total threads */
+ if (shards > todo)
+ shards = todo;
+
+ /* We also need to check if an explicit shards count was set and cannot be honored */
+ if (shards > 1 && !protocol_supports_flag(li->rx.proto, PROTO_F_REUSEPORT_SUPPORTED)) {
+ ha_warning("[%s:%d]: Disabling sharding for listener in %s '%s' because SO_REUSEPORT is disabled\n",
+ bind_conf->file, bind_conf->line, proxy_type_str(fe), fe->id);
+ shards = 1;
+ }
+
+ shard = done = grp = bit = mask = 0;
+ new_li = li;
+
+ while (shard < shards) {
+ memset(&new_ts, 0, sizeof(new_ts));
+ while (grp < global.nbtgroups && done < todo) {
+ /* enlarge mask to cover next bit of bind_thread till we
+ * have enough bits for one shard. We restart from the
+ * current grp+bit.
+ */
+
+ /* first let's find the first non-empty group starting at <mask> */
+ if (!(bind_conf->thread_set.rel[grp] & ha_tgroup_info[grp].threads_enabled & ~mask)) {
+ grp++;
+ mask = 0;
+ continue;
+ }
+
+ /* take next unassigned bit */
+ bit = (bind_conf->thread_set.rel[grp] & ~mask) & -(bind_conf->thread_set.rel[grp] & ~mask);
+ new_ts.rel[grp] |= bit;
+ mask |= bit;
+ new_ts.grps |= 1UL << grp;
+
+ done += shards;
+ };
+
+ BUG_ON(!new_ts.grps); // no more bits left unassigned
+
+ /* Create all required listeners for all bound groups. If more than one group is
+ * needed, the first receiver serves as a reference, and subsequent ones point to
+ * it. We already have a listener available in new_li() so we only allocate a new
+ * one if we're not on the last one. We count the remaining groups by copying their
+ * mask into <gmask> and dropping the lowest bit at the end of the loop until there
+ * is no more. Ah yes, it's not pretty :-/
+ */
+ ref = new_li;
+ gmask = new_ts.grps;
+ for (dups = 0; gmask; dups++) {
+ /* assign the first (and only) thread and group */
+ new_li->rx.bind_thread = thread_set_nth_tmask(&new_ts, dups);
+ new_li->rx.bind_tgroup = thread_set_nth_group(&new_ts, dups);
+
+ if (dups) {
+ /* it has been allocated already in the previous round */
+ shard_info_attach(&new_li->rx, ref->rx.shard_info);
+ new_li->rx.flags |= RX_F_MUST_DUP;
+ }
+
+ gmask &= gmask - 1; // drop lowest bit
+ if (gmask) {
+ /* yet another listener expected in this shard, let's
+ * chain it.
+ */
+ struct listener *tmp_li = clone_listener(new_li);
+
+ if (!tmp_li) {
+ ha_alert("Out of memory while trying to allocate extra listener for group %u of shard %d in %s %s\n",
+ new_li->rx.bind_tgroup, shard, proxy_type_str(fe), fe->id);
+ cfgerr++;
+ *err_code |= ERR_FATAL | ERR_ALERT;
+ return cfgerr;
+ }
+
+ /* if we're forced to create at least two listeners, we have to
+ * allocate a shared shard_info that's linked to from the reference
+ * and each other listener, so we'll create it here.
+ */
+ if (!shard_info_attach(&ref->rx, NULL)) {
+ ha_alert("Out of memory while trying to allocate shard_info for listener for group %u of shard %d in %s %s\n",
+ new_li->rx.bind_tgroup, shard, proxy_type_str(fe), fe->id);
+ cfgerr++;
+ *err_code |= ERR_FATAL | ERR_ALERT;
+ return cfgerr;
+ }
+ new_li = tmp_li;
+ }
+ }
+ done -= todo;
+
+ shard++;
+ if (shard >= shards)
+ break;
+
+ /* create another listener for new shards */
+ new_li = clone_listener(li);
+ if (!new_li) {
+ ha_alert("Out of memory while trying to allocate extra listener for shard %d in %s %s\n",
+ shard, proxy_type_str(fe), fe->id);
+ cfgerr++;
+ *err_code |= ERR_FATAL | ERR_ALERT;
+ return cfgerr;
+ }
+ }
+ }
+
+ /* success */
+ return cfgerr;
+}
+
+/*
+ * Registers the bind keyword list <kwl> as a list of valid keywords for next
+ * parsing sessions.
+ */
+void bind_register_keywords(struct bind_kw_list *kwl)
+{
+ LIST_APPEND(&bind_keywords.list, &kwl->list);
+}
+
+/* Return a pointer to the bind keyword <kw>, or NULL if not found. If the
+ * keyword is found with a NULL ->parse() function, then an attempt is made to
+ * find one with a valid ->parse() function. This way it is possible to declare
+ * platform-dependant, known keywords as NULL, then only declare them as valid
+ * if some options are met. Note that if the requested keyword contains an
+ * opening parenthesis, everything from this point is ignored.
+ */
+struct bind_kw *bind_find_kw(const char *kw)
+{
+ int index;
+ const char *kwend;
+ struct bind_kw_list *kwl;
+ struct bind_kw *ret = NULL;
+
+ kwend = strchr(kw, '(');
+ if (!kwend)
+ kwend = kw + strlen(kw);
+
+ list_for_each_entry(kwl, &bind_keywords.list, list) {
+ for (index = 0; kwl->kw[index].kw != NULL; index++) {
+ if ((strncmp(kwl->kw[index].kw, kw, kwend - kw) == 0) &&
+ kwl->kw[index].kw[kwend-kw] == 0) {
+ if (kwl->kw[index].parse)
+ return &kwl->kw[index]; /* found it !*/
+ else
+ ret = &kwl->kw[index]; /* may be OK */
+ }
+ }
+ }
+ return ret;
+}
+
+/* Dumps all registered "bind" keywords to the <out> string pointer. The
+ * unsupported keywords are only dumped if their supported form was not
+ * found.
+ */
+void bind_dump_kws(char **out)
+{
+ struct bind_kw_list *kwl;
+ int index;
+
+ if (!out)
+ return;
+
+ *out = NULL;
+ list_for_each_entry(kwl, &bind_keywords.list, list) {
+ for (index = 0; kwl->kw[index].kw != NULL; index++) {
+ if (kwl->kw[index].parse ||
+ bind_find_kw(kwl->kw[index].kw) == &kwl->kw[index]) {
+ memprintf(out, "%s[%4s] %s%s%s\n", *out ? *out : "",
+ kwl->scope,
+ kwl->kw[index].kw,
+ kwl->kw[index].skip ? " <arg>" : "",
+ kwl->kw[index].parse ? "" : " (not supported)");
+ }
+ }
+ }
+}
+
+/* Try to find in srv_keyword the word that looks closest to <word> by counting
+ * transitions between letters, digits and other characters. Will return the
+ * best matching word if found, otherwise NULL.
+ */
+const char *bind_find_best_kw(const char *word)
+{
+ uint8_t word_sig[1024];
+ uint8_t list_sig[1024];
+ const struct bind_kw_list *kwl;
+ const char *best_ptr = NULL;
+ int dist, best_dist = INT_MAX;
+ int index;
+
+ make_word_fingerprint(word_sig, word);
+ list_for_each_entry(kwl, &bind_keywords.list, list) {
+ for (index = 0; kwl->kw[index].kw != NULL; index++) {
+ make_word_fingerprint(list_sig, kwl->kw[index].kw);
+ dist = word_fingerprint_distance(word_sig, list_sig);
+ if (dist < best_dist) {
+ best_dist = dist;
+ best_ptr = kwl->kw[index].kw;
+ }
+ }
+ }
+
+ if (best_dist > 2 * strlen(word) || (best_ptr && best_dist > 2 * strlen(best_ptr)))
+ best_ptr = NULL;
+
+ return best_ptr;
+}
+
+/* allocate an bind_conf struct for a bind line, and chain it to the frontend <fe>.
+ * If <arg> is not NULL, it is duplicated into ->arg to store useful config
+ * information for error reporting. NULL is returned on error.
+ */
+struct bind_conf *bind_conf_alloc(struct proxy *fe, const char *file,
+ int line, const char *arg, struct xprt_ops *xprt)
+{
+ struct bind_conf *bind_conf = calloc(1, sizeof(*bind_conf));
+
+ if (!bind_conf)
+ goto err;
+
+ bind_conf->file = strdup(file);
+ if (!bind_conf->file)
+ goto err;
+ bind_conf->line = line;
+ if (arg) {
+ bind_conf->arg = strdup(arg);
+ if (!bind_conf->arg)
+ goto err;
+ }
+
+ LIST_APPEND(&fe->conf.bind, &bind_conf->by_fe);
+ bind_conf->settings.ux.uid = -1;
+ bind_conf->settings.ux.gid = -1;
+ bind_conf->settings.ux.mode = 0;
+ bind_conf->settings.shards = global.tune.default_shards;
+ bind_conf->xprt = xprt;
+ bind_conf->frontend = fe;
+ bind_conf->analysers = fe->fe_req_ana;
+ bind_conf->severity_output = CLI_SEVERITY_NONE;
+#ifdef USE_OPENSSL
+ HA_RWLOCK_INIT(&bind_conf->sni_lock);
+ bind_conf->sni_ctx = EB_ROOT;
+ bind_conf->sni_w_ctx = EB_ROOT;
+#endif
+#ifdef USE_QUIC
+ /* Use connection socket for QUIC by default. */
+ bind_conf->quic_mode = QUIC_SOCK_MODE_CONN;
+ bind_conf->max_cwnd =
+ global.tune.bufsize * global.tune.quic_streams_buf;
+#endif
+ LIST_INIT(&bind_conf->listeners);
+
+ bind_conf->rhttp_srvname = NULL;
+
+ return bind_conf;
+
+ err:
+ if (bind_conf) {
+ ha_free(&bind_conf->file);
+ ha_free(&bind_conf->arg);
+ }
+ ha_free(&bind_conf);
+ return NULL;
+}
+
+const char *listener_state_str(const struct listener *l)
+{
+ static const char *states[8] = {
+ "NEW", "INI", "ASS", "PAU", "LIS", "RDY", "FUL", "LIM",
+ };
+ unsigned int st = l->state;
+
+ if (st >= sizeof(states) / sizeof(*states))
+ return "INVALID";
+ return states[st];
+}
+
+/************************************************************************/
+/* All supported sample and ACL keywords must be declared here. */
+/************************************************************************/
+
+/* set temp integer to the number of connexions to the same listening socket */
+static int
+smp_fetch_dconn(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ smp->data.type = SMP_T_SINT;
+ smp->data.u.sint = smp->sess->listener->nbconn;
+ return 1;
+}
+
+/* set temp integer to the id of the socket (listener) */
+static int
+smp_fetch_so_id(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ smp->data.type = SMP_T_SINT;
+ smp->data.u.sint = smp->sess->listener->luid;
+ return 1;
+}
+static int
+smp_fetch_so_name(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ smp->data.u.str.area = smp->sess->listener->name;
+ if (!smp->data.u.str.area)
+ return 0;
+
+ smp->data.type = SMP_T_STR;
+ smp->flags = SMP_F_CONST;
+ smp->data.u.str.data = strlen(smp->data.u.str.area);
+ return 1;
+}
+
+/* parse the "accept-proxy" bind keyword */
+static int bind_parse_accept_proxy(char **args, int cur_arg, struct proxy *px, struct bind_conf *conf, char **err)
+{
+ conf->options |= BC_O_ACC_PROXY;
+ return 0;
+}
+
+/* parse the "accept-netscaler-cip" bind keyword */
+static int bind_parse_accept_netscaler_cip(char **args, int cur_arg, struct proxy *px, struct bind_conf *conf, char **err)
+{
+ uint32_t val;
+
+ if (!*args[cur_arg + 1]) {
+ memprintf(err, "'%s' : missing value", args[cur_arg]);
+ return ERR_ALERT | ERR_FATAL;
+ }
+
+ val = atol(args[cur_arg + 1]);
+ if (val <= 0) {
+ memprintf(err, "'%s' : invalid value %d, must be >= 0", args[cur_arg], val);
+ return ERR_ALERT | ERR_FATAL;
+ }
+
+ conf->options |= BC_O_ACC_CIP;
+ conf->ns_cip_magic = val;
+ return 0;
+}
+
+/* parse the "backlog" bind keyword */
+static int bind_parse_backlog(char **args, int cur_arg, struct proxy *px, struct bind_conf *conf, char **err)
+{
+ int val;
+
+ if (!*args[cur_arg + 1]) {
+ memprintf(err, "'%s' : missing value", args[cur_arg]);
+ return ERR_ALERT | ERR_FATAL;
+ }
+
+ val = atol(args[cur_arg + 1]);
+ if (val < 0) {
+ memprintf(err, "'%s' : invalid value %d, must be > 0", args[cur_arg], val);
+ return ERR_ALERT | ERR_FATAL;
+ }
+
+ conf->backlog = val;
+ return 0;
+}
+
+/* parse the "id" bind keyword */
+static int bind_parse_id(char **args, int cur_arg, struct proxy *px, struct bind_conf *conf, char **err)
+{
+ struct eb32_node *node;
+ struct listener *l, *new;
+ char *error;
+
+ if (conf->listeners.n != conf->listeners.p) {
+ memprintf(err, "'%s' can only be used with a single socket", args[cur_arg]);
+ return ERR_ALERT | ERR_FATAL;
+ }
+
+ if (!*args[cur_arg + 1]) {
+ memprintf(err, "'%s' : expects an integer argument", args[cur_arg]);
+ return ERR_ALERT | ERR_FATAL;
+ }
+
+ new = LIST_NEXT(&conf->listeners, struct listener *, by_bind);
+ new->luid = strtol(args[cur_arg + 1], &error, 10);
+ if (*error != '\0') {
+ memprintf(err, "'%s' : expects an integer argument, found '%s'", args[cur_arg], args[cur_arg + 1]);
+ return ERR_ALERT | ERR_FATAL;
+ }
+ new->conf.id.key = new->luid;
+
+ if (new->luid <= 0) {
+ memprintf(err, "'%s' : custom id has to be > 0", args[cur_arg]);
+ return ERR_ALERT | ERR_FATAL;
+ }
+
+ node = eb32_lookup(&px->conf.used_listener_id, new->luid);
+ if (node) {
+ l = container_of(node, struct listener, conf.id);
+ memprintf(err, "'%s' : custom id %d already used at %s:%d ('bind %s')",
+ args[cur_arg], l->luid, l->bind_conf->file, l->bind_conf->line,
+ l->bind_conf->arg);
+ return ERR_ALERT | ERR_FATAL;
+ }
+
+ eb32_insert(&px->conf.used_listener_id, &new->conf.id);
+ return 0;
+}
+
+/* Complete a bind_conf by parsing the args after the address. <args> is the
+ * arguments array, <cur_arg> is the first one to be considered. <section> is
+ * the section name to report in error messages, and <file> and <linenum> are
+ * the file name and line number respectively. Note that args[0..1] are used
+ * in error messages to provide some context. The return value is an error
+ * code, zero on success or an OR of ERR_{FATAL,ABORT,ALERT,WARN}.
+ */
+int bind_parse_args_list(struct bind_conf *bind_conf, char **args, int cur_arg, const char *section, const char *file, int linenum)
+{
+ int err_code = 0;
+
+ while (*(args[cur_arg])) {
+ struct bind_kw *kw;
+ const char *best;
+
+ kw = bind_find_kw(args[cur_arg]);
+ if (kw) {
+ char *err = NULL;
+ int code;
+
+ if (!kw->parse) {
+ ha_alert("parsing [%s:%d] : '%s %s' in section '%s' : '%s' option is not implemented in this version (check build options).\n",
+ file, linenum, args[0], args[1], section, args[cur_arg]);
+ cur_arg += 1 + kw->skip ;
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+
+ if ((bind_conf->options & BC_O_REVERSE_HTTP) && !kw->rhttp_ok) {
+ ha_alert("'%s' option is not accepted for reverse HTTP\n",
+ args[cur_arg]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+
+ code = kw->parse(args, cur_arg, bind_conf->frontend, bind_conf, &err);
+ err_code |= code;
+
+ if (code) {
+ if (err && *err) {
+ indent_msg(&err, 2);
+ if (((code & (ERR_WARN|ERR_ALERT)) == ERR_WARN))
+ ha_warning("parsing [%s:%d] : '%s %s' in section '%s' : %s\n", file, linenum, args[0], args[1], section, err);
+ else
+ ha_alert("parsing [%s:%d] : '%s %s' in section '%s' : %s\n", file, linenum, args[0], args[1], section, err);
+ }
+ else
+ ha_alert("parsing [%s:%d] : '%s %s' in section '%s' : error encountered while processing '%s'.\n",
+ file, linenum, args[0], args[1], section, args[cur_arg]);
+ if (code & ERR_FATAL) {
+ free(err);
+ cur_arg += 1 + kw->skip;
+ goto out;
+ }
+ }
+ free(err);
+ cur_arg += 1 + kw->skip;
+ continue;
+ }
+
+ best = bind_find_best_kw(args[cur_arg]);
+ if (best)
+ ha_alert("parsing [%s:%d] : '%s %s' in section '%s': unknown keyword '%s'; did you mean '%s' maybe ?\n",
+ file, linenum, args[0], args[1], section, args[cur_arg], best);
+ else
+ ha_alert("parsing [%s:%d] : '%s %s' in section '%s': unknown keyword '%s'.\n",
+ file, linenum, args[0], args[1], section, args[cur_arg]);
+
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+
+ if ((bind_conf->options & (BC_O_USE_SOCK_DGRAM|BC_O_USE_SOCK_STREAM)) == (BC_O_USE_SOCK_DGRAM|BC_O_USE_SOCK_STREAM) ||
+ (bind_conf->options & (BC_O_USE_XPRT_DGRAM|BC_O_USE_XPRT_STREAM)) == (BC_O_USE_XPRT_DGRAM|BC_O_USE_XPRT_STREAM)) {
+ ha_alert("parsing [%s:%d] : '%s %s' in section '%s' : cannot mix datagram and stream protocols.\n",
+ file, linenum, args[0], args[1], section);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+
+ /* The transport layer automatically switches to QUIC when QUIC is
+ * selected, regardless of bind_conf settings. We then need to
+ * initialize QUIC params.
+ */
+ if ((bind_conf->options & (BC_O_USE_SOCK_DGRAM|BC_O_USE_XPRT_STREAM)) == (BC_O_USE_SOCK_DGRAM|BC_O_USE_XPRT_STREAM)) {
+#ifdef USE_QUIC
+ bind_conf->xprt = xprt_get(XPRT_QUIC);
+ if (!(bind_conf->options & BC_O_USE_SSL)) {
+ bind_conf->options |= BC_O_USE_SSL;
+ ha_warning("parsing [%s:%d] : '%s %s' in section '%s' : QUIC protocol detected, enabling ssl. Use 'ssl' to shut this warning.\n",
+ file, linenum, args[0], args[1], section);
+ }
+ quic_transport_params_init(&bind_conf->quic_params, 1);
+#else
+ ha_alert("parsing [%s:%d] : '%s %s' in section '%s' : QUIC protocol selected but support not compiled in (check build options).\n",
+ file, linenum, args[0], args[1], section);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+#endif
+ }
+ else if (bind_conf->options & BC_O_USE_SSL) {
+ bind_conf->xprt = xprt_get(XPRT_SSL);
+ }
+
+ out:
+ return err_code;
+}
+
+/* parse the "maxconn" bind keyword */
+static int bind_parse_maxconn(char **args, int cur_arg, struct proxy *px, struct bind_conf *conf, char **err)
+{
+ int val;
+
+ if (!*args[cur_arg + 1]) {
+ memprintf(err, "'%s' : missing value", args[cur_arg]);
+ return ERR_ALERT | ERR_FATAL;
+ }
+
+ val = atol(args[cur_arg + 1]);
+ if (val < 0) {
+ memprintf(err, "'%s' : invalid value %d, must be >= 0", args[cur_arg], val);
+ return ERR_ALERT | ERR_FATAL;
+ }
+
+ conf->maxconn = val;
+ return 0;
+}
+
+/* parse the "name" bind keyword */
+static int bind_parse_name(char **args, int cur_arg, struct proxy *px, struct bind_conf *conf, char **err)
+{
+ struct listener *l;
+
+ if (!*args[cur_arg + 1]) {
+ memprintf(err, "'%s' : missing name", args[cur_arg]);
+ return ERR_ALERT | ERR_FATAL;
+ }
+
+ list_for_each_entry(l, &conf->listeners, by_bind)
+ l->name = strdup(args[cur_arg + 1]);
+
+ return 0;
+}
+
+/* parse the "nbconn" bind keyword */
+static int bind_parse_nbconn(char **args, int cur_arg, struct proxy *px, struct bind_conf *conf, char **err)
+{
+ int val;
+ const struct listener *l;
+
+ /* TODO duplicated code from check_kw_experimental() */
+ if (!experimental_directives_allowed) {
+ memprintf(err, "'%s' is experimental, must be allowed via a global 'expose-experimental-directives'",
+ args[cur_arg]);
+ return ERR_ALERT | ERR_FATAL;
+ }
+ mark_tainted(TAINTED_CONFIG_EXP_KW_DECLARED);
+
+ l = LIST_NEXT(&conf->listeners, struct listener *, by_bind);
+ if (l->rx.addr.ss_family != AF_CUST_RHTTP_SRV) {
+ memprintf(err, "'%s' : only valid for reverse HTTP listeners.", args[cur_arg]);
+ return ERR_ALERT | ERR_FATAL;
+ }
+
+ if (!*args[cur_arg + 1]) {
+ memprintf(err, "'%s' : missing value.", args[cur_arg]);
+ return ERR_ALERT | ERR_FATAL;
+ }
+
+ val = atol(args[cur_arg + 1]);
+ if (val <= 0) {
+ memprintf(err, "'%s' : invalid value %d, must be > 0.", args[cur_arg], val);
+ return ERR_ALERT | ERR_FATAL;
+ }
+
+ conf->rhttp_nbconn = val;
+ return 0;
+}
+
+/* parse the "nice" bind keyword */
+static int bind_parse_nice(char **args, int cur_arg, struct proxy *px, struct bind_conf *conf, char **err)
+{
+ int val;
+
+ if (!*args[cur_arg + 1]) {
+ memprintf(err, "'%s' : missing value", args[cur_arg]);
+ return ERR_ALERT | ERR_FATAL;
+ }
+
+ val = atol(args[cur_arg + 1]);
+ if (val < -1024 || val > 1024) {
+ memprintf(err, "'%s' : invalid value %d, allowed range is -1024..1024", args[cur_arg], val);
+ return ERR_ALERT | ERR_FATAL;
+ }
+
+ conf->nice = val;
+ return 0;
+}
+
+/* parse the "process" bind keyword */
+static int bind_parse_process(char **args, int cur_arg, struct proxy *px, struct bind_conf *conf, char **err)
+{
+ memprintf(err, "'process %s' on 'bind' lines is not supported anymore, please use 'thread' instead.", args[cur_arg+1]);
+ return ERR_ALERT | ERR_FATAL;
+}
+
+/* parse the "proto" bind keyword */
+static int bind_parse_proto(char **args, int cur_arg, struct proxy *px, struct bind_conf *conf, char **err)
+{
+ struct ist proto;
+
+ if (!*args[cur_arg + 1]) {
+ memprintf(err, "'%s' : missing value", args[cur_arg]);
+ return ERR_ALERT | ERR_FATAL;
+ }
+
+ proto = ist(args[cur_arg + 1]);
+ conf->mux_proto = get_mux_proto(proto);
+ if (!conf->mux_proto) {
+ memprintf(err, "'%s' : unknown MUX protocol '%s'", args[cur_arg], args[cur_arg+1]);
+ return ERR_ALERT | ERR_FATAL;
+ }
+ return 0;
+}
+
+/* parse the "shards" bind keyword. Takes an integer, "by-thread", or "by-group" */
+static int bind_parse_shards(char **args, int cur_arg, struct proxy *px, struct bind_conf *conf, char **err)
+{
+ int val;
+
+ if (!*args[cur_arg + 1]) {
+ memprintf(err, "'%s' : missing value", args[cur_arg]);
+ return ERR_ALERT | ERR_FATAL;
+ }
+
+ if (strcmp(args[cur_arg + 1], "by-thread") == 0) {
+ val = -1; /* -1 = "by-thread", will be fixed in check_config_validity() */
+ } else if (strcmp(args[cur_arg + 1], "by-group") == 0) {
+ val = -2; /* -2 = "by-group", will be fixed in check_config_validity() */
+ } else {
+ val = atol(args[cur_arg + 1]);
+ if (val < 1 || val > MAX_THREADS) {
+ memprintf(err, "'%s' : invalid value %d, allowed range is %d..%d or 'by-thread'", args[cur_arg], val, 1, MAX_THREADS);
+ return ERR_ALERT | ERR_FATAL;
+ }
+ }
+
+ conf->settings.shards = val;
+ return 0;
+}
+
+/* parse the "thread" bind keyword. This will replace any preset thread_set */
+static int bind_parse_thread(char **args, int cur_arg, struct proxy *px, struct bind_conf *conf, char **err)
+{
+ const struct listener *l;
+
+ /* note that the thread set is zeroed before first call, and we don't
+ * want to reset it so that it remains possible to chain multiple
+ * "thread" directives.
+ */
+ if (parse_thread_set(args[cur_arg+1], &conf->thread_set, err) < 0)
+ return ERR_ALERT | ERR_FATAL;
+
+ l = LIST_NEXT(&conf->listeners, struct listener *, by_bind);
+ if (l->rx.addr.ss_family == AF_CUST_RHTTP_SRV &&
+ atleast2(conf->thread_set.grps)) {
+ memprintf(err, "'%s' : reverse HTTP bind cannot span multiple thread groups.", args[cur_arg]);
+ return ERR_ALERT | ERR_FATAL;
+ }
+
+ return 0;
+}
+
+/* config parser for global "tune.listener.default-shards" */
+static int cfg_parse_tune_listener_shards(char **args, int section_type, struct proxy *curpx,
+ const struct proxy *defpx, const char *file, int line,
+ char **err)
+{
+ if (too_many_args(1, args, err, NULL))
+ return -1;
+
+ if (strcmp(args[1], "by-thread") == 0)
+ global.tune.default_shards = -1;
+ else if (strcmp(args[1], "by-group") == 0)
+ global.tune.default_shards = -2;
+ else if (strcmp(args[1], "by-process") == 0)
+ global.tune.default_shards = 1;
+ else {
+ memprintf(err, "'%s' expects either 'by-process', 'by-group', or 'by-thread' but got '%s'.", args[0], args[1]);
+ return -1;
+ }
+ return 0;
+}
+
+/* config parser for global "tune.listener.multi-queue", accepts "on", "fair" or "off" */
+static int cfg_parse_tune_listener_mq(char **args, int section_type, struct proxy *curpx,
+ const struct proxy *defpx, const char *file, int line,
+ char **err)
+{
+ if (too_many_args(1, args, err, NULL))
+ return -1;
+
+ if (strcmp(args[1], "on") == 0)
+ global.tune.options = (global.tune.options & ~GTUNE_LISTENER_MQ_ANY) | GTUNE_LISTENER_MQ_OPT;
+ else if (strcmp(args[1], "fair") == 0)
+ global.tune.options = (global.tune.options & ~GTUNE_LISTENER_MQ_ANY) | GTUNE_LISTENER_MQ_FAIR;
+ else if (strcmp(args[1], "off") == 0)
+ global.tune.options &= ~GTUNE_LISTENER_MQ_ANY;
+ else {
+ memprintf(err, "'%s' expects either 'on', 'fair', or 'off' but got '%s'.", args[0], args[1]);
+ return -1;
+ }
+ return 0;
+}
+
+/* Note: must not be declared <const> as its list will be overwritten.
+ * Please take care of keeping this list alphabetically sorted.
+ */
+static struct sample_fetch_kw_list smp_kws = {ILH, {
+ { "dst_conn", smp_fetch_dconn, 0, NULL, SMP_T_SINT, SMP_USE_FTEND, },
+ { "so_id", smp_fetch_so_id, 0, NULL, SMP_T_SINT, SMP_USE_FTEND, },
+ { "so_name", smp_fetch_so_name, 0, NULL, SMP_T_STR, SMP_USE_FTEND, },
+ { /* END */ },
+}};
+
+INITCALL1(STG_REGISTER, sample_register_fetches, &smp_kws);
+
+/* Note: must not be declared <const> as its list will be overwritten.
+ * Please take care of keeping this list alphabetically sorted.
+ */
+static struct acl_kw_list acl_kws = {ILH, {
+ { /* END */ },
+}};
+
+INITCALL1(STG_REGISTER, acl_register_keywords, &acl_kws);
+
+/* Note: must not be declared <const> as its list will be overwritten.
+ * Please take care of keeping this list alphabetically sorted, doing so helps
+ * all code contributors.
+ * Optional keywords are also declared with a NULL ->parse() function so that
+ * the config parser can report an appropriate error when a known keyword was
+ * not enabled.
+ */
+static struct bind_kw_list bind_kws = { "ALL", { }, {
+ { "accept-netscaler-cip", bind_parse_accept_netscaler_cip, 1, 0 }, /* enable NetScaler Client IP insertion protocol */
+ { "accept-proxy", bind_parse_accept_proxy, 0, 0 }, /* enable PROXY protocol */
+ { "backlog", bind_parse_backlog, 1, 0 }, /* set backlog of listening socket */
+ { "id", bind_parse_id, 1, 1 }, /* set id of listening socket */
+ { "maxconn", bind_parse_maxconn, 1, 0 }, /* set maxconn of listening socket */
+ { "name", bind_parse_name, 1, 1 }, /* set name of listening socket */
+ { "nbconn", bind_parse_nbconn, 1, 1 }, /* set number of connection on active preconnect */
+ { "nice", bind_parse_nice, 1, 0 }, /* set nice of listening socket */
+ { "process", bind_parse_process, 1, 0 }, /* set list of allowed process for this socket */
+ { "proto", bind_parse_proto, 1, 0 }, /* set the proto to use for all incoming connections */
+ { "shards", bind_parse_shards, 1, 0 }, /* set number of shards */
+ { "thread", bind_parse_thread, 1, 1 }, /* set list of allowed threads for this socket */
+ { /* END */ },
+}};
+
+INITCALL1(STG_REGISTER, bind_register_keywords, &bind_kws);
+
+/* config keyword parsers */
+static struct cfg_kw_list cfg_kws = {ILH, {
+ { CFG_GLOBAL, "tune.listener.default-shards", cfg_parse_tune_listener_shards },
+ { CFG_GLOBAL, "tune.listener.multi-queue", cfg_parse_tune_listener_mq },
+ { 0, NULL, NULL }
+}};
+
+INITCALL1(STG_REGISTER, cfg_register_keywords, &cfg_kws);
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/src/log.c b/src/log.c
new file mode 100644
index 0000000..010ace9
--- /dev/null
+++ b/src/log.c
@@ -0,0 +1,4659 @@
+/*
+ * General logging functions.
+ *
+ * Copyright 2000-2008 Willy Tarreau <w@1wt.eu>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <ctype.h>
+#include <stdarg.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <syslog.h>
+#include <time.h>
+#include <unistd.h>
+#include <errno.h>
+
+#include <sys/time.h>
+#include <sys/uio.h>
+
+#include <haproxy/api.h>
+#include <haproxy/applet.h>
+#include <haproxy/cfgparse.h>
+#include <haproxy/clock.h>
+#include <haproxy/fd.h>
+#include <haproxy/frontend.h>
+#include <haproxy/global.h>
+#include <haproxy/http.h>
+#include <haproxy/http_ana.h>
+#include <haproxy/listener.h>
+#include <haproxy/log.h>
+#include <haproxy/proxy.h>
+#include <haproxy/sample.h>
+#include <haproxy/sc_strm.h>
+#include <haproxy/sink.h>
+#include <haproxy/ssl_sock.h>
+#include <haproxy/stconn.h>
+#include <haproxy/stream.h>
+#include <haproxy/action.h>
+#include <haproxy/time.h>
+#include <haproxy/hash.h>
+#include <haproxy/tools.h>
+
+/* global recv logs counter */
+int cum_log_messages;
+
+/* log forward proxy list */
+struct proxy *cfg_log_forward;
+
+struct log_fmt_st {
+ char *name;
+};
+
+static const struct log_fmt_st log_formats[LOG_FORMATS] = {
+ [LOG_FORMAT_LOCAL] = {
+ .name = "local",
+ },
+ [LOG_FORMAT_RFC3164] = {
+ .name = "rfc3164",
+ },
+ [LOG_FORMAT_RFC5424] = {
+ .name = "rfc5424",
+ },
+ [LOG_FORMAT_PRIO] = {
+ .name = "priority",
+ },
+ [LOG_FORMAT_SHORT] = {
+ .name = "short",
+ },
+ [LOG_FORMAT_TIMED] = {
+ .name = "timed",
+ },
+ [LOG_FORMAT_ISO] = {
+ .name = "iso",
+ },
+ [LOG_FORMAT_RAW] = {
+ .name = "raw",
+ },
+};
+
+/*
+ * This map is used with all the FD_* macros to check whether a particular bit
+ * is set or not. Each bit represents an ASCII code. ha_bit_set() sets those
+ * bytes which should be escaped. When ha_bit_test() returns non-zero, it means
+ * that the byte should be escaped. Be careful to always pass bytes from 0 to
+ * 255 exclusively to the macros.
+ */
+long rfc5424_escape_map[(256/8) / sizeof(long)];
+long hdr_encode_map[(256/8) / sizeof(long)];
+long url_encode_map[(256/8) / sizeof(long)];
+long http_encode_map[(256/8) / sizeof(long)];
+
+
+const char *log_facilities[NB_LOG_FACILITIES] = {
+ "kern", "user", "mail", "daemon",
+ "auth", "syslog", "lpr", "news",
+ "uucp", "cron", "auth2", "ftp",
+ "ntp", "audit", "alert", "cron2",
+ "local0", "local1", "local2", "local3",
+ "local4", "local5", "local6", "local7"
+};
+
+const char *log_levels[NB_LOG_LEVELS] = {
+ "emerg", "alert", "crit", "err",
+ "warning", "notice", "info", "debug"
+};
+
+const char sess_term_cond[16] = "-LcCsSPRIDKUIIII"; /* normal, Local, CliTo, CliErr, SrvTo, SrvErr, PxErr, Resource, Internal, Down, Killed, Up, -- */
+const char sess_fin_state[8] = "-RCHDLQT"; /* cliRequest, srvConnect, srvHeader, Data, Last, Queue, Tarpit */
+
+
+/* log_format */
+struct logformat_type {
+ char *name;
+ int type;
+ int mode;
+ int lw; /* logwait bitsfield */
+ int (*config_callback)(struct logformat_node *node, struct proxy *curproxy);
+};
+
+int prepare_addrsource(struct logformat_node *node, struct proxy *curproxy);
+
+/* log_format variable names */
+static const struct logformat_type logformat_keywords[] = {
+ { "o", LOG_FMT_GLOBAL, PR_MODE_TCP, 0, NULL }, /* global option */
+
+ /* please keep these lines sorted ! */
+ { "B", LOG_FMT_BYTES, PR_MODE_TCP, LW_BYTES, NULL }, /* bytes from server to client */
+ { "CC", LOG_FMT_CCLIENT, PR_MODE_HTTP, LW_REQHDR, NULL }, /* client cookie */
+ { "CS", LOG_FMT_CSERVER, PR_MODE_HTTP, LW_RSPHDR, NULL }, /* server cookie */
+ { "H", LOG_FMT_HOSTNAME, PR_MODE_TCP, LW_INIT, NULL }, /* Hostname */
+ { "ID", LOG_FMT_UNIQUEID, PR_MODE_TCP, LW_BYTES, NULL }, /* Unique ID */
+ { "ST", LOG_FMT_STATUS, PR_MODE_TCP, LW_RESP, NULL }, /* status code */
+ { "T", LOG_FMT_DATEGMT, PR_MODE_TCP, LW_INIT, NULL }, /* date GMT */
+ { "Ta", LOG_FMT_Ta, PR_MODE_HTTP, LW_BYTES, NULL }, /* Time active (tr to end) */
+ { "Tc", LOG_FMT_TC, PR_MODE_TCP, LW_BYTES, NULL }, /* Tc */
+ { "Th", LOG_FMT_Th, PR_MODE_TCP, LW_BYTES, NULL }, /* Time handshake */
+ { "Ti", LOG_FMT_Ti, PR_MODE_HTTP, LW_BYTES, NULL }, /* Time idle */
+ { "Tl", LOG_FMT_DATELOCAL, PR_MODE_TCP, LW_INIT, NULL }, /* date local timezone */
+ { "Tq", LOG_FMT_TQ, PR_MODE_HTTP, LW_BYTES, NULL }, /* Tq=Th+Ti+TR */
+ { "Tr", LOG_FMT_Tr, PR_MODE_HTTP, LW_BYTES, NULL }, /* Tr */
+ { "TR", LOG_FMT_TR, PR_MODE_HTTP, LW_BYTES, NULL }, /* Time to receive a valid request */
+ { "Td", LOG_FMT_TD, PR_MODE_TCP, LW_BYTES, NULL }, /* Td = Tt - (Tq + Tw + Tc + Tr) */
+ { "Ts", LOG_FMT_TS, PR_MODE_TCP, LW_INIT, NULL }, /* timestamp GMT */
+ { "Tt", LOG_FMT_TT, PR_MODE_TCP, LW_BYTES, NULL }, /* Tt */
+ { "Tu", LOG_FMT_TU, PR_MODE_TCP, LW_BYTES, NULL }, /* Tu = Tt -Ti */
+ { "Tw", LOG_FMT_TW, PR_MODE_TCP, LW_BYTES, NULL }, /* Tw */
+ { "U", LOG_FMT_BYTES_UP, PR_MODE_TCP, LW_BYTES, NULL }, /* bytes from client to server */
+ { "ac", LOG_FMT_ACTCONN, PR_MODE_TCP, LW_BYTES, NULL }, /* actconn */
+ { "b", LOG_FMT_BACKEND, PR_MODE_TCP, LW_INIT, NULL }, /* backend */
+ { "bc", LOG_FMT_BECONN, PR_MODE_TCP, LW_BYTES, NULL }, /* beconn */
+ { "bi", LOG_FMT_BACKENDIP, PR_MODE_TCP, LW_BCKIP, prepare_addrsource }, /* backend source ip */
+ { "bp", LOG_FMT_BACKENDPORT, PR_MODE_TCP, LW_BCKIP, prepare_addrsource }, /* backend source port */
+ { "bq", LOG_FMT_BCKQUEUE, PR_MODE_TCP, LW_BYTES, NULL }, /* backend_queue */
+ { "ci", LOG_FMT_CLIENTIP, PR_MODE_TCP, LW_CLIP | LW_XPRT, NULL }, /* client ip */
+ { "cp", LOG_FMT_CLIENTPORT, PR_MODE_TCP, LW_CLIP | LW_XPRT, NULL }, /* client port */
+ { "f", LOG_FMT_FRONTEND, PR_MODE_TCP, LW_INIT, NULL }, /* frontend */
+ { "fc", LOG_FMT_FECONN, PR_MODE_TCP, LW_BYTES, NULL }, /* feconn */
+ { "fi", LOG_FMT_FRONTENDIP, PR_MODE_TCP, LW_FRTIP | LW_XPRT, NULL }, /* frontend ip */
+ { "fp", LOG_FMT_FRONTENDPORT, PR_MODE_TCP, LW_FRTIP | LW_XPRT, NULL }, /* frontend port */
+ { "ft", LOG_FMT_FRONTEND_XPRT, PR_MODE_TCP, LW_INIT, NULL }, /* frontend with transport mode */
+ { "hr", LOG_FMT_HDRREQUEST, PR_MODE_TCP, LW_REQHDR, NULL }, /* header request */
+ { "hrl", LOG_FMT_HDRREQUESTLIST, PR_MODE_TCP, LW_REQHDR, NULL }, /* header request list */
+ { "hs", LOG_FMT_HDRRESPONS, PR_MODE_TCP, LW_RSPHDR, NULL }, /* header response */
+ { "hsl", LOG_FMT_HDRRESPONSLIST, PR_MODE_TCP, LW_RSPHDR, NULL }, /* header response list */
+ { "HM", LOG_FMT_HTTP_METHOD, PR_MODE_HTTP, LW_REQ, NULL }, /* HTTP method */
+ { "HP", LOG_FMT_HTTP_PATH, PR_MODE_HTTP, LW_REQ, NULL }, /* HTTP relative or absolute path */
+ { "HPO", LOG_FMT_HTTP_PATH_ONLY, PR_MODE_HTTP, LW_REQ, NULL }, /* HTTP path only (without host nor query string) */
+ { "HQ", LOG_FMT_HTTP_QUERY, PR_MODE_HTTP, LW_REQ, NULL }, /* HTTP query */
+ { "HU", LOG_FMT_HTTP_URI, PR_MODE_HTTP, LW_REQ, NULL }, /* HTTP full URI */
+ { "HV", LOG_FMT_HTTP_VERSION, PR_MODE_HTTP, LW_REQ, NULL }, /* HTTP version */
+ { "lc", LOG_FMT_LOGCNT, PR_MODE_TCP, LW_INIT, NULL }, /* log counter */
+ { "ms", LOG_FMT_MS, PR_MODE_TCP, LW_INIT, NULL }, /* accept date millisecond */
+ { "pid", LOG_FMT_PID, PR_MODE_TCP, LW_INIT, NULL }, /* log pid */
+ { "r", LOG_FMT_REQ, PR_MODE_HTTP, LW_REQ, NULL }, /* request */
+ { "rc", LOG_FMT_RETRIES, PR_MODE_TCP, LW_BYTES, NULL }, /* retries */
+ { "rt", LOG_FMT_COUNTER, PR_MODE_TCP, LW_REQ, NULL }, /* request counter (HTTP or TCP session) */
+ { "s", LOG_FMT_SERVER, PR_MODE_TCP, LW_SVID, NULL }, /* server */
+ { "sc", LOG_FMT_SRVCONN, PR_MODE_TCP, LW_BYTES, NULL }, /* srv_conn */
+ { "si", LOG_FMT_SERVERIP, PR_MODE_TCP, LW_SVIP, NULL }, /* server destination ip */
+ { "sp", LOG_FMT_SERVERPORT, PR_MODE_TCP, LW_SVIP, NULL }, /* server destination port */
+ { "sq", LOG_FMT_SRVQUEUE, PR_MODE_TCP, LW_BYTES, NULL }, /* srv_queue */
+ { "sslc", LOG_FMT_SSL_CIPHER, PR_MODE_TCP, LW_XPRT, NULL }, /* client-side SSL ciphers */
+ { "sslv", LOG_FMT_SSL_VERSION, PR_MODE_TCP, LW_XPRT, NULL }, /* client-side SSL protocol version */
+ { "t", LOG_FMT_DATE, PR_MODE_TCP, LW_INIT, NULL }, /* date */
+ { "tr", LOG_FMT_tr, PR_MODE_HTTP, LW_INIT, NULL }, /* date of start of request */
+ { "trg",LOG_FMT_trg, PR_MODE_HTTP, LW_INIT, NULL }, /* date of start of request, GMT */
+ { "trl",LOG_FMT_trl, PR_MODE_HTTP, LW_INIT, NULL }, /* date of start of request, local */
+ { "ts", LOG_FMT_TERMSTATE, PR_MODE_TCP, LW_BYTES, NULL },/* termination state */
+ { "tsc", LOG_FMT_TERMSTATE_CK, PR_MODE_TCP, LW_INIT, NULL },/* termination state */
+ { 0, 0, 0, 0, NULL }
+};
+
+char httpclient_log_format[] = "%ci:%cp [%tr] %ft -/- %TR/%Tw/%Tc/%Tr/%Ta %ST %B %CC %CS %tsc %ac/%fc/%bc/%sc/%rc %sq/%bq %hr %hs %{+Q}r";
+char default_http_log_format[] = "%ci:%cp [%tr] %ft %b/%s %TR/%Tw/%Tc/%Tr/%Ta %ST %B %CC %CS %tsc %ac/%fc/%bc/%sc/%rc %sq/%bq %hr %hs %{+Q}r"; // default format
+char default_https_log_format[] = "%ci:%cp [%tr] %ft %b/%s %TR/%Tw/%Tc/%Tr/%Ta %ST %B %CC %CS %tsc %ac/%fc/%bc/%sc/%rc %sq/%bq %hr %hs %{+Q}r %[fc_err]/%[ssl_fc_err,hex]/%[ssl_c_err]/%[ssl_c_ca_err]/%[ssl_fc_is_resumed] %[ssl_fc_sni]/%sslv/%sslc";
+char clf_http_log_format[] = "%{+Q}o %{-Q}ci - - [%trg] %r %ST %B \"\" \"\" %cp %ms %ft %b %s %TR %Tw %Tc %Tr %Ta %tsc %ac %fc %bc %sc %rc %sq %bq %CC %CS %hrl %hsl";
+char default_tcp_log_format[] = "%ci:%cp [%t] %ft %b/%s %Tw/%Tc/%Tt %B %ts %ac/%fc/%bc/%sc/%rc %sq/%bq";
+char *log_format = NULL;
+
+/* Default string used for structured-data part in RFC5424 formatted
+ * syslog messages.
+ */
+char default_rfc5424_sd_log_format[] = "- ";
+
+/* total number of dropped logs */
+unsigned int dropped_logs = 0;
+
+/* This is a global syslog message buffer, common to all outgoing
+ * messages. It contains only the data part.
+ */
+THREAD_LOCAL char *logline = NULL;
+
+/* A global syslog message buffer, common to all RFC5424 syslog messages.
+ * Currently, it is used for generating the structured-data part.
+ */
+THREAD_LOCAL char *logline_rfc5424 = NULL;
+
+struct logformat_var_args {
+ char *name;
+ int mask;
+};
+
+struct logformat_var_args var_args_list[] = {
+// global
+ { "M", LOG_OPT_MANDATORY },
+ { "Q", LOG_OPT_QUOTE },
+ { "X", LOG_OPT_HEXA },
+ { "E", LOG_OPT_ESC },
+ { 0, 0 }
+};
+
+/*
+ * callback used to configure addr source retrieval
+ */
+int prepare_addrsource(struct logformat_node *node, struct proxy *curproxy)
+{
+ curproxy->options2 |= PR_O2_SRC_ADDR;
+
+ return 0;
+}
+
+
+/*
+ * Parse args in a logformat_var. Returns 0 in error
+ * case, otherwise, it returns 1.
+ */
+int parse_logformat_var_args(char *args, struct logformat_node *node, char **err)
+{
+ int i = 0;
+ int end = 0;
+ int flags = 0; // 1 = + 2 = -
+ char *sp = NULL; // start pointer
+
+ if (args == NULL) {
+ memprintf(err, "internal error: parse_logformat_var_args() expects non null 'args'");
+ return 0;
+ }
+
+ while (1) {
+ if (*args == '\0')
+ end = 1;
+
+ if (*args == '+') {
+ // add flag
+ sp = args + 1;
+ flags = 1;
+ }
+ if (*args == '-') {
+ // delete flag
+ sp = args + 1;
+ flags = 2;
+ }
+
+ if (*args == '\0' || *args == ',') {
+ *args = '\0';
+ for (i = 0; sp && var_args_list[i].name; i++) {
+ if (strcmp(sp, var_args_list[i].name) == 0) {
+ if (flags == 1) {
+ node->options |= var_args_list[i].mask;
+ break;
+ } else if (flags == 2) {
+ node->options &= ~var_args_list[i].mask;
+ break;
+ }
+ }
+ }
+ sp = NULL;
+ if (end)
+ break;
+ }
+ args++;
+ }
+ return 1;
+}
+
+/*
+ * Parse a variable '%varname' or '%{args}varname' in log-format. The caller
+ * must pass the args part in the <arg> pointer with its length in <arg_len>,
+ * and varname with its length in <var> and <var_len> respectively. <arg> is
+ * ignored when arg_len is 0. Neither <var> nor <var_len> may be null.
+ * Returns false in error case and err is filled, otherwise returns true.
+ */
+int parse_logformat_var(char *arg, int arg_len, char *var, int var_len, struct proxy *curproxy, struct list *list_format, int *defoptions, char **err)
+{
+ int j;
+ struct logformat_node *node = NULL;
+
+ for (j = 0; logformat_keywords[j].name; j++) { // search a log type
+ if (strlen(logformat_keywords[j].name) == var_len &&
+ strncmp(var, logformat_keywords[j].name, var_len) == 0) {
+ if (logformat_keywords[j].mode != PR_MODE_HTTP || curproxy->mode == PR_MODE_HTTP) {
+ node = calloc(1, sizeof(*node));
+ if (!node) {
+ memprintf(err, "out of memory error");
+ goto error_free;
+ }
+ node->type = logformat_keywords[j].type;
+ node->options = *defoptions;
+ if (arg_len) {
+ node->arg = my_strndup(arg, arg_len);
+ if (!parse_logformat_var_args(node->arg, node, err))
+ goto error_free;
+ }
+ if (node->type == LOG_FMT_GLOBAL) {
+ *defoptions = node->options;
+ free(node->arg);
+ free(node);
+ } else {
+ if (logformat_keywords[j].config_callback &&
+ logformat_keywords[j].config_callback(node, curproxy) != 0) {
+ goto error_free;
+ }
+ curproxy->to_log |= logformat_keywords[j].lw;
+ LIST_APPEND(list_format, &node->list);
+ }
+ return 1;
+ } else {
+ memprintf(err, "format variable '%s' is reserved for HTTP mode",
+ logformat_keywords[j].name);
+ goto error_free;
+ }
+ }
+ }
+
+ j = var[var_len];
+ var[var_len] = 0;
+ memprintf(err, "no such format variable '%s'. If you wanted to emit the '%%' character verbatim, you need to use '%%%%'", var);
+ var[var_len] = j;
+
+ error_free:
+ if (node) {
+ free(node->arg);
+ free(node);
+ }
+ return 0;
+}
+
+/*
+ * push to the logformat linked list
+ *
+ * start: start pointer
+ * end: end text pointer
+ * type: string type
+ * list_format: destination list
+ *
+ * LOG_TEXT: copy chars from start to end excluding end.
+ *
+*/
+int add_to_logformat_list(char *start, char *end, int type, struct list *list_format, char **err)
+{
+ char *str;
+
+ if (type == LF_TEXT) { /* type text */
+ struct logformat_node *node = calloc(1, sizeof(*node));
+ if (!node) {
+ memprintf(err, "out of memory error");
+ return 0;
+ }
+ str = calloc(1, end - start + 1);
+ strncpy(str, start, end - start);
+ str[end - start] = '\0';
+ node->arg = str;
+ node->type = LOG_FMT_TEXT; // type string
+ LIST_APPEND(list_format, &node->list);
+ } else if (type == LF_SEPARATOR) {
+ struct logformat_node *node = calloc(1, sizeof(*node));
+ if (!node) {
+ memprintf(err, "out of memory error");
+ return 0;
+ }
+ node->type = LOG_FMT_SEPARATOR;
+ LIST_APPEND(list_format, &node->list);
+ }
+ return 1;
+}
+
+/*
+ * Parse the sample fetch expression <text> and add a node to <list_format> upon
+ * success. At the moment, sample converters are not yet supported but fetch arguments
+ * should work. The curpx->conf.args.ctx must be set by the caller. If an end pointer
+ * is passed in <endptr>, it will be updated with the pointer to the first character
+ * not part of the sample expression.
+ *
+ * In error case, the function returns 0, otherwise it returns 1.
+ */
+int add_sample_to_logformat_list(char *text, char *arg, int arg_len, struct proxy *curpx, struct list *list_format, int options, int cap, char **err, char **endptr)
+{
+ char *cmd[2];
+ struct sample_expr *expr = NULL;
+ struct logformat_node *node = NULL;
+ int cmd_arg;
+
+ cmd[0] = text;
+ cmd[1] = "";
+ cmd_arg = 0;
+
+ expr = sample_parse_expr(cmd, &cmd_arg, curpx->conf.args.file, curpx->conf.args.line, err,
+ &curpx->conf.args, endptr);
+ if (!expr) {
+ memprintf(err, "failed to parse sample expression <%s> : %s", text, *err);
+ goto error_free;
+ }
+
+ node = calloc(1, sizeof(*node));
+ if (!node) {
+ memprintf(err, "out of memory error");
+ goto error_free;
+ }
+ node->type = LOG_FMT_EXPR;
+ node->expr = expr;
+ node->options = options;
+
+ if (arg_len) {
+ node->arg = my_strndup(arg, arg_len);
+ if (!parse_logformat_var_args(node->arg, node, err))
+ goto error_free;
+ }
+ if (expr->fetch->val & cap & SMP_VAL_REQUEST)
+ node->options |= LOG_OPT_REQ_CAP; /* fetch method is request-compatible */
+
+ if (expr->fetch->val & cap & SMP_VAL_RESPONSE)
+ node->options |= LOG_OPT_RES_CAP; /* fetch method is response-compatible */
+
+ if (!(expr->fetch->val & cap)) {
+ memprintf(err, "sample fetch <%s> may not be reliably used here because it needs '%s' which is not available here",
+ text, sample_src_names(expr->fetch->use));
+ goto error_free;
+ }
+
+ if ((options & LOG_OPT_HTTP) && (expr->fetch->use & (SMP_USE_L6REQ|SMP_USE_L6RES))) {
+ ha_warning("parsing [%s:%d] : L6 sample fetch <%s> ignored in HTTP log-format string.\n",
+ curpx->conf.args.file, curpx->conf.args.line, text);
+ }
+
+ /* check if we need to allocate an http_txn struct for HTTP parsing */
+ /* Note, we may also need to set curpx->to_log with certain fetches */
+ curpx->http_needed |= !!(expr->fetch->use & SMP_USE_HTTP_ANY);
+
+ /* FIXME: temporary workaround for missing LW_XPRT and LW_REQ flags
+ * needed with some sample fetches (eg: ssl*). We always set it for
+ * now on, but this will leave with sample capabilities soon.
+ */
+ curpx->to_log |= LW_XPRT;
+ if (curpx->http_needed)
+ curpx->to_log |= LW_REQ;
+ LIST_APPEND(list_format, &node->list);
+ return 1;
+
+ error_free:
+ release_sample_expr(expr);
+ if (node) {
+ free(node->arg);
+ free(node);
+ }
+ return 0;
+}
+
+/*
+ * Parse the log_format string and fill a linked list.
+ * Variable name are preceded by % and composed by characters [a-zA-Z0-9]* : %varname
+ * You can set arguments using { } : %{many arguments}varname.
+ * The curproxy->conf.args.ctx must be set by the caller.
+ *
+ * fmt: the string to parse
+ * curproxy: the proxy affected
+ * list_format: the destination list
+ * options: LOG_OPT_* to force on every node
+ * cap: all SMP_VAL_* flags supported by the consumer
+ *
+ * The function returns 1 in success case, otherwise, it returns 0 and err is filled.
+ */
+int parse_logformat_string(const char *fmt, struct proxy *curproxy, struct list *list_format, int options, int cap, char **err)
+{
+ char *sp, *str, *backfmt; /* start pointer for text parts */
+ char *arg = NULL; /* start pointer for args */
+ char *var = NULL; /* start pointer for vars */
+ int arg_len = 0;
+ int var_len = 0;
+ int cformat; /* current token format */
+ int pformat; /* previous token format */
+ struct logformat_node *tmplf, *back;
+
+ sp = str = backfmt = strdup(fmt);
+ if (!str) {
+ memprintf(err, "out of memory error");
+ return 0;
+ }
+ curproxy->to_log |= LW_INIT;
+
+ /* flush the list first. */
+ list_for_each_entry_safe(tmplf, back, list_format, list) {
+ LIST_DELETE(&tmplf->list);
+ release_sample_expr(tmplf->expr);
+ free(tmplf->arg);
+ free(tmplf);
+ }
+
+ for (cformat = LF_INIT; cformat != LF_END; str++) {
+ pformat = cformat;
+
+ if (!*str)
+ cformat = LF_END; // preset it to save all states from doing this
+
+ /* The principle of the two-step state machine below is to first detect a change, and
+ * second have all common paths processed at one place. The common paths are the ones
+ * encountered in text areas (LF_INIT, LF_TEXT, LF_SEPARATOR) and at the end (LF_END).
+ * We use the common LF_INIT state to dispatch to the different final states.
+ */
+ switch (pformat) {
+ case LF_STARTVAR: // text immediately following a '%'
+ arg = NULL; var = NULL;
+ arg_len = var_len = 0;
+ if (*str == '{') { // optional argument
+ cformat = LF_STARG;
+ arg = str + 1;
+ }
+ else if (*str == '[') {
+ cformat = LF_STEXPR;
+ var = str + 1; // store expr in variable name
+ }
+ else if (isalpha((unsigned char)*str)) { // variable name
+ cformat = LF_VAR;
+ var = str;
+ }
+ else if (*str == '%')
+ cformat = LF_TEXT; // convert this character to a literal (useful for '%')
+ else if (isdigit((unsigned char)*str) || *str == ' ' || *str == '\t') {
+ /* single '%' followed by blank or digit, send them both */
+ cformat = LF_TEXT;
+ pformat = LF_TEXT; /* finally we include the previous char as well */
+ sp = str - 1; /* send both the '%' and the current char */
+ memprintf(err, "unexpected variable name near '%c' at position %d line : '%s'. Maybe you want to write a single '%%', use the syntax '%%%%'",
+ *str, (int)(str - backfmt), fmt);
+ goto fail;
+
+ }
+ else
+ cformat = LF_INIT; // handle other cases of literals
+ break;
+
+ case LF_STARG: // text immediately following '%{'
+ if (*str == '}') { // end of arg
+ cformat = LF_EDARG;
+ arg_len = str - arg;
+ *str = 0; // used for reporting errors
+ }
+ break;
+
+ case LF_EDARG: // text immediately following '%{arg}'
+ if (*str == '[') {
+ cformat = LF_STEXPR;
+ var = str + 1; // store expr in variable name
+ break;
+ }
+ else if (isalnum((unsigned char)*str)) { // variable name
+ cformat = LF_VAR;
+ var = str;
+ break;
+ }
+ memprintf(err, "parse argument modifier without variable name near '%%{%s}'", arg);
+ goto fail;
+
+ case LF_STEXPR: // text immediately following '%['
+ /* the whole sample expression is parsed at once,
+ * returning the pointer to the first character not
+ * part of the expression, which MUST be the trailing
+ * angle bracket.
+ */
+ if (!add_sample_to_logformat_list(var, arg, arg_len, curproxy, list_format, options, cap, err, &str))
+ goto fail;
+
+ if (*str == ']') {
+ // end of arg, go on with next state
+ cformat = pformat = LF_EDEXPR;
+ sp = str;
+ }
+ else {
+ char c = *str;
+ *str = 0;
+ if (isprint((unsigned char)c))
+ memprintf(err, "expected ']' after '%s', but found '%c'", var, c);
+ else
+ memprintf(err, "missing ']' after '%s'", var);
+ goto fail;
+ }
+ break;
+
+ case LF_VAR: // text part of a variable name
+ var_len = str - var;
+ if (!isalnum((unsigned char)*str))
+ cformat = LF_INIT; // not variable name anymore
+ break;
+
+ default: // LF_INIT, LF_TEXT, LF_SEPARATOR, LF_END, LF_EDEXPR
+ cformat = LF_INIT;
+ }
+
+ if (cformat == LF_INIT) { /* resynchronize state to text/sep/startvar */
+ switch (*str) {
+ case '%': cformat = LF_STARTVAR; break;
+ case 0 : cformat = LF_END; break;
+ case ' ':
+ if (options & LOG_OPT_MERGE_SPACES) {
+ cformat = LF_SEPARATOR;
+ break;
+ }
+ __fallthrough;
+ default : cformat = LF_TEXT; break;
+ }
+ }
+
+ if (cformat != pformat || pformat == LF_SEPARATOR) {
+ switch (pformat) {
+ case LF_VAR:
+ if (!parse_logformat_var(arg, arg_len, var, var_len, curproxy, list_format, &options, err))
+ goto fail;
+ break;
+ case LF_TEXT:
+ case LF_SEPARATOR:
+ if (!add_to_logformat_list(sp, str, pformat, list_format, err))
+ goto fail;
+ break;
+ }
+ sp = str; /* new start of text at every state switch and at every separator */
+ }
+ }
+
+ if (pformat == LF_STARTVAR || pformat == LF_STARG || pformat == LF_STEXPR) {
+ memprintf(err, "truncated line after '%s'", var ? var : arg ? arg : "%");
+ goto fail;
+ }
+ free(backfmt);
+
+ return 1;
+ fail:
+ free(backfmt);
+ return 0;
+}
+
+/*
+ * Parse the first range of indexes from a string made of a list of comma separated
+ * ranges of indexes. Note that an index may be considered as a particular range
+ * with a high limit to the low limit.
+ */
+int get_logger_smp_range(unsigned int *low, unsigned int *high, char **arg, char **err)
+{
+ char *end, *p;
+
+ *low = *high = 0;
+
+ p = *arg;
+ end = strchr(p, ',');
+ if (!end)
+ end = p + strlen(p);
+
+ *high = *low = read_uint((const char **)&p, end);
+ if (!*low || (p != end && *p != '-'))
+ goto err;
+
+ if (p == end)
+ goto done;
+
+ p++;
+ *high = read_uint((const char **)&p, end);
+ if (!*high || *high <= *low || p != end)
+ goto err;
+
+ done:
+ if (*end == ',')
+ end++;
+ *arg = end;
+ return 1;
+
+ err:
+ memprintf(err, "wrong sample range '%s'", *arg);
+ return 0;
+}
+
+/*
+ * Returns 1 if the range defined by <low> and <high> overlaps
+ * one of them in <rgs> array of ranges with <sz> the size of this
+ * array, 0 if not.
+ */
+int smp_log_ranges_overlap(struct smp_log_range *rgs, size_t sz,
+ unsigned int low, unsigned int high, char **err)
+{
+ size_t i;
+
+ for (i = 0; i < sz; i++) {
+ if ((low >= rgs[i].low && low <= rgs[i].high) ||
+ (high >= rgs[i].low && high <= rgs[i].high)) {
+ memprintf(err, "ranges are overlapping");
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+int smp_log_range_cmp(const void *a, const void *b)
+{
+ const struct smp_log_range *rg_a = a;
+ const struct smp_log_range *rg_b = b;
+
+ if (rg_a->high < rg_b->low)
+ return -1;
+ else if (rg_a->low > rg_b->high)
+ return 1;
+
+ return 0;
+}
+
+/* helper func */
+static inline void init_log_target(struct log_target *target)
+{
+ target->type = 0;
+ target->flags = LOG_TARGET_FL_NONE;
+ target->addr = NULL;
+ target->resolv_name = NULL;
+}
+
+void deinit_log_target(struct log_target *target)
+{
+ ha_free(&target->addr);
+ if (!(target->flags & LOG_TARGET_FL_RESOLVED))
+ ha_free(&target->resolv_name);
+}
+
+/* returns 0 on failure and positive value on success */
+static int dup_log_target(struct log_target *def, struct log_target *cpy)
+{
+ BUG_ON((def->flags & LOG_TARGET_FL_RESOLVED)); /* postparsing already done, invalid use */
+ init_log_target(cpy);
+ if (def->addr) {
+ cpy->addr = malloc(sizeof(*cpy->addr));
+ if (!cpy->addr)
+ goto error;
+ *cpy->addr = *def->addr;
+ }
+ if (def->resolv_name) {
+ cpy->resolv_name = strdup(def->resolv_name);
+ if (!cpy->resolv_name)
+ goto error;
+ }
+ cpy->type = def->type;
+ return 1;
+ error:
+ deinit_log_target(cpy);
+ return 0;
+}
+
+/* must be called under the lbprm lock */
+static void _log_backend_srv_queue(struct server *srv)
+{
+ struct proxy *p = srv->proxy;
+
+ /* queue the server in the proxy lb array to make it easily searchable by
+ * log-balance algorithms. Here we use the srv array as a general server
+ * pool of in-use servers, lookup is done using a relative positional id
+ * (array is contiguous)
+ *
+ * We use the avail server list to get a quick hand on available servers
+ * (those that are UP)
+ */
+ if (srv->flags & SRV_F_BACKUP) {
+ if (!p->srv_act)
+ p->lbprm.log.srv[p->srv_bck] = srv;
+ p->srv_bck++;
+ }
+ else {
+ if (!p->srv_act) {
+ /* we will be switching to act tree in LB logic, thus we need to
+ * reset the lastid
+ */
+ HA_ATOMIC_STORE(&p->lbprm.log.lastid, 0);
+ }
+ p->lbprm.log.srv[p->srv_act] = srv;
+ p->srv_act++;
+ }
+ /* append the server to the list of available servers */
+ LIST_APPEND(&p->lbprm.log.avail, &srv->lb_list);
+
+ p->lbprm.tot_weight = (p->srv_act) ? p->srv_act : p->srv_bck;
+}
+
+static void log_backend_srv_up(struct server *srv)
+{
+ struct proxy *p __maybe_unused = srv->proxy;
+
+ if (!srv_lb_status_changed(srv))
+ return; /* nothing to do */
+ if (srv_currently_usable(srv) || !srv_willbe_usable(srv))
+ return; /* false alarm */
+
+ HA_RWLOCK_WRLOCK(LBPRM_LOCK, &p->lbprm.lock);
+ _log_backend_srv_queue(srv);
+ HA_RWLOCK_WRUNLOCK(LBPRM_LOCK, &p->lbprm.lock);
+}
+
+/* must be called under lbprm lock */
+static void _log_backend_srv_recalc(struct proxy *p)
+{
+ unsigned int it = 0;
+ struct server *cur_srv;
+
+ list_for_each_entry(cur_srv, &p->lbprm.log.avail, lb_list) {
+ uint8_t backup = cur_srv->flags & SRV_F_BACKUP;
+
+ if ((!p->srv_act && backup) ||
+ (p->srv_act && !backup))
+ p->lbprm.log.srv[it++] = cur_srv;
+ }
+}
+
+/* must be called under the lbprm lock */
+static void _log_backend_srv_dequeue(struct server *srv)
+{
+ struct proxy *p = srv->proxy;
+
+ if (srv->flags & SRV_F_BACKUP) {
+ p->srv_bck--;
+ }
+ else {
+ p->srv_act--;
+ if (!p->srv_act) {
+ /* we will be switching to bck tree in LB logic, thus we need to
+ * reset the lastid
+ */
+ HA_ATOMIC_STORE(&p->lbprm.log.lastid, 0);
+ }
+ }
+
+ /* remove the srv from the list of available (UP) servers */
+ LIST_DELETE(&srv->lb_list);
+
+ /* reconstruct the array of usable servers */
+ _log_backend_srv_recalc(p);
+
+ p->lbprm.tot_weight = (p->srv_act) ? p->srv_act : p->srv_bck;
+}
+
+static void log_backend_srv_down(struct server *srv)
+{
+ struct proxy *p __maybe_unused = srv->proxy;
+
+ if (!srv_lb_status_changed(srv))
+ return; /* nothing to do */
+ if (!srv_currently_usable(srv) || srv_willbe_usable(srv))
+ return; /* false alarm */
+
+ HA_RWLOCK_WRLOCK(LBPRM_LOCK, &p->lbprm.lock);
+ _log_backend_srv_dequeue(srv);
+ HA_RWLOCK_WRUNLOCK(LBPRM_LOCK, &p->lbprm.lock);
+}
+
+/* check that current configuration is compatible with "mode log" */
+static int _postcheck_log_backend_compat(struct proxy *be)
+{
+ int err_code = ERR_NONE;
+
+ if (!LIST_ISEMPTY(&be->tcp_req.inspect_rules) ||
+ !LIST_ISEMPTY(&be->tcp_req.l4_rules) ||
+ !LIST_ISEMPTY(&be->tcp_req.l5_rules)) {
+ ha_warning("Cannot use tcp-request rules with 'mode log' in %s '%s'. They will be ignored.\n",
+ proxy_type_str(be), be->id);
+
+ err_code |= ERR_WARN;
+ free_act_rules(&be->tcp_req.inspect_rules);
+ free_act_rules(&be->tcp_req.l4_rules);
+ free_act_rules(&be->tcp_req.l5_rules);
+ }
+ if (!LIST_ISEMPTY(&be->tcp_rep.inspect_rules)) {
+ ha_warning("Cannot use tcp-response rules with 'mode log' in %s '%s'. They will be ignored.\n",
+ proxy_type_str(be), be->id);
+
+ err_code |= ERR_WARN;
+ free_act_rules(&be->tcp_rep.inspect_rules);
+ }
+ if (be->table) {
+ ha_warning("Cannot use stick table with 'mode log' in %s '%s'. It will be ignored.\n",
+ proxy_type_str(be), be->id);
+
+ err_code |= ERR_WARN;
+ stktable_deinit(be->table);
+ ha_free(&be->table);
+ }
+ if (!LIST_ISEMPTY(&be->storersp_rules) ||
+ !LIST_ISEMPTY(&be->sticking_rules)) {
+ ha_warning("Cannot use sticking rules with 'mode log' in %s '%s'. They will be ignored.\n",
+ proxy_type_str(be), be->id);
+
+ err_code |= ERR_WARN;
+ free_stick_rules(&be->storersp_rules);
+ free_stick_rules(&be->sticking_rules);
+ }
+ if (isttest(be->server_id_hdr_name)) {
+ ha_warning("Cannot set \"server_id_hdr_name\" with 'mode log' in %s '%s'. It will be ignored.\n",
+ proxy_type_str(be), be->id);
+
+ err_code |= ERR_WARN;
+ istfree(&be->server_id_hdr_name);
+ }
+ if (be->dyncookie_key) {
+ ha_warning("Cannot set \"dynamic-cookie-key\" with 'mode log' in %s '%s'. It will be ignored.\n",
+ proxy_type_str(be), be->id);
+
+ err_code |= ERR_WARN;
+ ha_free(&be->dyncookie_key);
+ }
+ if (!LIST_ISEMPTY(&be->server_rules)) {
+ ha_warning("Cannot use \"use-server\" rules with 'mode log' in %s '%s'. They will be ignored.\n",
+ proxy_type_str(be), be->id);
+
+ err_code |= ERR_WARN;
+ free_server_rules(&be->server_rules);
+ }
+ return err_code;
+}
+
+static int postcheck_log_backend(struct proxy *be)
+{
+ char *msg = NULL;
+ struct server *srv;
+ int err_code = ERR_NONE;
+ int target_type = -1; // -1 is unused in log_tgt enum
+
+ if (be->mode != PR_MODE_SYSLOG ||
+ (be->flags & (PR_FL_DISABLED|PR_FL_STOPPED)))
+ return ERR_NONE; /* nothing to do */
+
+ err_code |= _postcheck_log_backend_compat(be);
+ if (err_code & ERR_CODE)
+ return err_code;
+
+ /* First time encountering this log backend, perform some init
+ */
+ be->lbprm.set_server_status_up = log_backend_srv_up;
+ be->lbprm.set_server_status_down = log_backend_srv_down;
+ be->lbprm.log.lastid = 0; /* initial value */
+ LIST_INIT(&be->lbprm.log.avail);
+
+ /* alloc srv array (it will be used for active and backup server lists in turn,
+ * so we ensure that the longest list will fit
+ */
+ be->lbprm.log.srv = calloc(MAX(be->srv_act, be->srv_bck),
+ sizeof(*be->lbprm.log.srv));
+
+ if (!be->lbprm.log.srv ) {
+ memprintf(&msg, "memory error when allocating server array (%d entries)",
+ MAX(be->srv_act, be->srv_bck));
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto end;
+ }
+
+ /* reinit srv counters, lbprm queueing will recount */
+ be->srv_act = 0;
+ be->srv_bck = 0;
+
+ /* "log-balance hash" needs to compile its expression */
+ if ((be->lbprm.algo & BE_LB_ALGO) == BE_LB_ALGO_LH) {
+ struct sample_expr *expr;
+ char *expr_str = NULL;
+ char *err_str = NULL;
+ int idx = 0;
+
+ /* only map-based hash method is supported for now */
+ if ((be->lbprm.algo & BE_LB_HASH_TYPE) != BE_LB_HASH_MAP) {
+ memprintf(&msg, "unsupported hash method (from \"hash-type\")");
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto end;
+ }
+
+ /* a little bit of explanation about what we're going to do here:
+ * as the user gave us a list of converters, instead of the fetch+conv list
+ * tuple as we're used to, we need to insert a dummy fetch at the start of
+ * the converter list so that sample_parse_expr() is able to properly parse
+ * the expr. We're explicitly using str() as dummy fetch, since the input
+ * sample that will be passed to the converter list at runtime will be a
+ * string (the log message about to be sent). Doing so allows sample_parse_expr()
+ * to ensure that the provided converters will be compatible with string type.
+ */
+ memprintf(&expr_str, "str(dummy),%s", be->lbprm.arg_str);
+ if (!expr_str) {
+ memprintf(&msg, "memory error during converter list argument parsing (from \"log-balance hash\")");
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto end;
+ }
+ expr = sample_parse_expr((char*[]){expr_str, NULL}, &idx,
+ be->conf.file,
+ be->conf.line,
+ &err_str, NULL, NULL);
+ if (!expr) {
+ memprintf(&msg, "%s (from converter list argument in \"log-balance hash\")", err_str);
+ ha_free(&err_str);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ ha_free(&expr_str);
+ goto end;
+ }
+
+ /* We expect the log_message->conv_list expr to resolve as a binary-compatible
+ * value because its output will be passed to gen_hash() to compute the hash.
+ *
+ * So we check the last converter's output type to ensure that it can be
+ * converted into the expected type. Invalid output type will result in an
+ * error to prevent unexpected results during runtime.
+ */
+ if (sample_casts[smp_expr_output_type(expr)][SMP_T_BIN] == NULL) {
+ memprintf(&msg, "invalid output type at the end of converter list for \"log-balance hash\" directive");
+ err_code |= ERR_ALERT | ERR_FATAL;
+ release_sample_expr(expr);
+ ha_free(&expr_str);
+ goto end;
+ }
+ ha_free(&expr_str);
+ be->lbprm.expr = expr;
+ }
+
+ /* finish the initialization of proxy's servers */
+ srv = be->srv;
+ while (srv) {
+ BUG_ON(srv->log_target);
+ BUG_ON(srv->addr_type.proto_type != PROTO_TYPE_DGRAM &&
+ srv->addr_type.proto_type != PROTO_TYPE_STREAM);
+
+ srv->log_target = malloc(sizeof(*srv->log_target));
+ if (!srv->log_target) {
+ memprintf(&msg, "memory error when allocating log server '%s'\n", srv->id);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto end;
+ }
+ init_log_target(srv->log_target);
+ if (srv->addr_type.proto_type == PROTO_TYPE_DGRAM) {
+ srv->log_target->type = LOG_TARGET_DGRAM;
+ /* Try to allocate log target addr (only used in DGRAM mode) */
+ srv->log_target->addr = calloc(1, sizeof(*srv->log_target->addr));
+ if (!srv->log_target->addr) {
+ memprintf(&msg, "memory error when allocating log server '%s'\n", srv->id);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto end;
+ }
+ /* We must initialize it with known addr:svc_port, it will then
+ * be updated automatically by the server API for runtime changes
+ */
+ ipcpy(&srv->addr, srv->log_target->addr);
+ set_host_port(srv->log_target->addr, srv->svc_port);
+ }
+ else {
+ /* for now BUFFER type only supports TCP server to it's almost
+ * explicit
+ */
+ srv->log_target->type = LOG_TARGET_BUFFER;
+ srv->log_target->sink = sink_new_from_srv(srv, "log backend");
+ if (!srv->log_target->sink) {
+ memprintf(&msg, "error when creating sink from '%s' log server", srv->id);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto end;
+ }
+ }
+
+ if (target_type == -1)
+ target_type = srv->log_target->type;
+
+ if (target_type != srv->log_target->type) {
+ memprintf(&msg, "cannot mix server types within a log backend, '%s' srv's network type differs from previous server", srv->id);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto end;
+ }
+ srv->log_target->flags |= LOG_TARGET_FL_RESOLVED;
+ srv->cur_eweight = 1; /* ignore weights, all servers have the same weight */
+ _log_backend_srv_queue(srv);
+ srv = srv->next;
+ }
+ end:
+ if (err_code & ERR_CODE) {
+ ha_free(&be->lbprm.log.srv); /* free log servers array */
+ ha_alert("log backend '%s': failed to initialize: %s.\n", be->id, msg);
+ ha_free(&msg);
+ }
+
+ return err_code;
+}
+
+/* resolves a single logger entry (it is expected to be called
+ * at postparsing stage)
+ *
+ * <logger> is parent logger used for implicit settings
+ *
+ * Returns err_code which defaults to ERR_NONE and can be set to a combination
+ * of ERR_WARN, ERR_ALERT, ERR_FATAL and ERR_ABORT in case of errors.
+ * <msg> could be set at any time (it will usually be set on error, but
+ * could also be set when no error occurred to report a diag warning), thus is
+ * up to the caller to check it and to free it.
+ */
+int resolve_logger(struct logger *logger, char **msg)
+{
+ struct log_target *target = &logger->target;
+ int err_code = ERR_NONE;
+
+ if (target->type == LOG_TARGET_BUFFER)
+ err_code = sink_resolve_logger_buffer(logger, msg);
+ else if (target->type == LOG_TARGET_BACKEND) {
+ struct proxy *be;
+
+ /* special case */
+ be = proxy_find_by_name(target->be_name, PR_CAP_BE, 0);
+ if (!be) {
+ memprintf(msg, "uses unknown log backend '%s'", target->be_name);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto end;
+ }
+ else if (be->mode != PR_MODE_SYSLOG) {
+ memprintf(msg, "uses incompatible log backend '%s'", target->be_name);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto end;
+ }
+ ha_free(&target->be_name); /* backend is resolved and will replace name hint */
+ target->be = be;
+ }
+
+ end:
+ target->flags |= LOG_TARGET_FL_RESOLVED;
+
+ return err_code;
+}
+
+/* tries to duplicate <def> logger
+ *
+ * Returns the newly allocated and duplicated logger or NULL
+ * in case of error.
+ */
+struct logger *dup_logger(struct logger *def)
+{
+ struct logger *cpy = malloc(sizeof(*cpy));
+
+ /* copy everything that can be easily copied */
+ memcpy(cpy, def, sizeof(*cpy));
+
+ /* default values */
+ cpy->conf.file = NULL;
+ LIST_INIT(&cpy->list);
+
+ /* special members */
+ if (dup_log_target(&def->target, &cpy->target) == 0)
+ goto error;
+ if (def->conf.file) {
+ cpy->conf.file = strdup(def->conf.file);
+ if (!cpy->conf.file)
+ goto error;
+ }
+
+ /* inherit from original reference if set */
+ cpy->ref = (def->ref) ? def->ref : def;
+
+ return cpy;
+
+ error:
+ free_logger(cpy);
+ return NULL;
+}
+
+/* frees <logger> after freeing all of its allocated fields. The
+ * server must not belong to a list anymore. Logsrv may be NULL, which is
+ * silently ignored.
+ */
+void free_logger(struct logger *logger)
+{
+ if (!logger)
+ return;
+
+ BUG_ON(LIST_INLIST(&logger->list));
+ ha_free(&logger->conf.file);
+ deinit_log_target(&logger->target);
+ free(logger);
+}
+
+/* Parse single log target
+ * Returns 0 on failure and positive value on success
+ */
+static int parse_log_target(char *raw, struct log_target *target, char **err)
+{
+ int port1, port2, fd;
+ struct protocol *proto;
+ struct sockaddr_storage *sk;
+
+ init_log_target(target);
+ // target addr is NULL at this point
+
+ if (strncmp(raw, "ring@", 5) == 0) {
+ target->type = LOG_TARGET_BUFFER;
+ target->ring_name = strdup(raw + 5);
+ goto done;
+ }
+ else if (strncmp(raw, "backend@", 8) == 0) {
+ target->type = LOG_TARGET_BACKEND;
+ target->be_name = strdup(raw + 8);
+ goto done;
+ }
+
+ /* try to allocate log target addr */
+ target->addr = malloc(sizeof(*target->addr));
+ if (!target->addr) {
+ memprintf(err, "memory error");
+ goto error;
+ }
+
+ target->type = LOG_TARGET_DGRAM; // default type
+
+ /* parse the target address */
+ sk = str2sa_range(raw, NULL, &port1, &port2, &fd, &proto, NULL,
+ err, NULL, NULL,
+ PA_O_RESOLVE | PA_O_PORT_OK | PA_O_RAW_FD | PA_O_DGRAM | PA_O_STREAM | PA_O_DEFAULT_DGRAM);
+ if (!sk)
+ goto error;
+ if (fd != -1)
+ target->type = LOG_TARGET_FD;
+ *target->addr = *sk;
+
+ if (sk->ss_family == AF_INET || sk->ss_family == AF_INET6) {
+ if (!port1)
+ set_host_port(target->addr, SYSLOG_PORT);
+ }
+
+ if (proto && proto->xprt_type == PROTO_TYPE_STREAM) {
+ static unsigned long ring_ids;
+
+ /* Implicit sink buffer will be initialized in post_check
+ * (target->addr is set in this case)
+ */
+ target->type = LOG_TARGET_BUFFER;
+ /* compute unique name for the ring */
+ memprintf(&target->ring_name, "ring#%lu", ++ring_ids);
+ }
+
+ done:
+ return 1;
+ error:
+ deinit_log_target(target);
+ return 0;
+}
+
+/*
+ * Parse "log" keyword and update <loggers> list accordingly.
+ *
+ * When <do_del> is set, it means the "no log" line was parsed, so all log
+ * servers in <loggers> are released.
+ *
+ * Otherwise, we try to parse the "log" line. First of all, when the list is not
+ * the global one, we look for the parameter "global". If we find it,
+ * global.loggers is copied. Else we parse each arguments.
+ *
+ * The function returns 1 in success case, otherwise, it returns 0 and err is
+ * filled.
+ */
+int parse_logger(char **args, struct list *loggers, int do_del, const char *file, int linenum, char **err)
+{
+ struct smp_log_range *smp_rgs = NULL;
+ struct logger *logger = NULL;
+ int cur_arg;
+
+ /*
+ * "no log": delete previous herited or defined syslog
+ * servers.
+ */
+ if (do_del) {
+ struct logger *back;
+
+ if (*(args[1]) != 0) {
+ memprintf(err, "'no log' does not expect arguments");
+ goto error;
+ }
+
+ list_for_each_entry_safe(logger, back, loggers, list) {
+ LIST_DEL_INIT(&logger->list);
+ free_logger(logger);
+ }
+ return 1;
+ }
+
+ /*
+ * "log global": copy global.loggers linked list to the end of loggers
+ * list. But first, we check (loggers != global.loggers).
+ */
+ if (*(args[1]) && *(args[2]) == 0 && strcmp(args[1], "global") == 0) {
+ if (loggers == &global.loggers) {
+ memprintf(err, "'global' is not supported for a global syslog server");
+ goto error;
+ }
+ list_for_each_entry(logger, &global.loggers, list) {
+ struct logger *node;
+
+ list_for_each_entry(node, loggers, list) {
+ if (node->ref == logger)
+ goto skip_logger;
+ }
+
+ /* duplicate logger from global */
+ node = dup_logger(logger);
+ if (!node) {
+ memprintf(err, "out of memory error");
+ goto error;
+ }
+
+ /* manually override some values */
+ ha_free(&node->conf.file);
+ node->conf.file = strdup(file);
+ node->conf.line = linenum;
+
+ /* add to list */
+ LIST_APPEND(loggers, &node->list);
+
+ skip_logger:
+ continue;
+ }
+ return 1;
+ }
+
+ /*
+ * "log <address> ...: parse a syslog server line
+ */
+ if (*(args[1]) == 0 || *(args[2]) == 0) {
+ memprintf(err, "expects <address> and <facility> %s as arguments",
+ ((loggers == &global.loggers) ? "" : "or global"));
+ goto error;
+ }
+
+ /* take care of "stdout" and "stderr" as regular aliases for fd@1 / fd@2 */
+ if (strcmp(args[1], "stdout") == 0)
+ args[1] = "fd@1";
+ else if (strcmp(args[1], "stderr") == 0)
+ args[1] = "fd@2";
+
+ logger = calloc(1, sizeof(*logger));
+ if (!logger) {
+ memprintf(err, "out of memory");
+ goto error;
+ }
+ LIST_INIT(&logger->list);
+ logger->conf.file = strdup(file);
+ logger->conf.line = linenum;
+
+ /* skip address for now, it will be parsed at the end */
+ cur_arg = 2;
+
+ /* just after the address, a length may be specified */
+ logger->maxlen = MAX_SYSLOG_LEN;
+ if (strcmp(args[cur_arg], "len") == 0) {
+ int len = atoi(args[cur_arg+1]);
+ if (len < 80 || len > 65535) {
+ memprintf(err, "invalid log length '%s', must be between 80 and 65535",
+ args[cur_arg+1]);
+ goto error;
+ }
+ logger->maxlen = len;
+ cur_arg += 2;
+ }
+ if (logger->maxlen > global.max_syslog_len)
+ global.max_syslog_len = logger->maxlen;
+
+ /* after the length, a format may be specified */
+ if (strcmp(args[cur_arg], "format") == 0) {
+ logger->format = get_log_format(args[cur_arg+1]);
+ if (logger->format == LOG_FORMAT_UNSPEC) {
+ memprintf(err, "unknown log format '%s'", args[cur_arg+1]);
+ goto error;
+ }
+ cur_arg += 2;
+ }
+
+ if (strcmp(args[cur_arg], "sample") == 0) {
+ unsigned low, high;
+ char *p, *beg, *end, *smp_sz_str;
+ size_t smp_rgs_sz = 0, smp_sz = 0, new_smp_sz;
+
+ p = args[cur_arg+1];
+ smp_sz_str = strchr(p, ':');
+ if (!smp_sz_str) {
+ memprintf(err, "Missing sample size");
+ goto error;
+ }
+
+ *smp_sz_str++ = '\0';
+
+ end = p + strlen(p);
+
+ while (p != end) {
+ if (!get_logger_smp_range(&low, &high, &p, err))
+ goto error;
+
+ if (smp_rgs && smp_log_ranges_overlap(smp_rgs, smp_rgs_sz, low, high, err))
+ goto error;
+
+ smp_rgs = my_realloc2(smp_rgs, (smp_rgs_sz + 1) * sizeof *smp_rgs);
+ if (!smp_rgs) {
+ memprintf(err, "out of memory error");
+ goto error;
+ }
+
+ smp_rgs[smp_rgs_sz].low = low;
+ smp_rgs[smp_rgs_sz].high = high;
+ smp_rgs[smp_rgs_sz].sz = high - low + 1;
+ if (smp_rgs[smp_rgs_sz].high > smp_sz)
+ smp_sz = smp_rgs[smp_rgs_sz].high;
+ smp_rgs_sz++;
+ }
+
+ if (smp_rgs == NULL) {
+ memprintf(err, "no sampling ranges given");
+ goto error;
+ }
+
+ beg = smp_sz_str;
+ end = beg + strlen(beg);
+ new_smp_sz = read_uint((const char **)&beg, end);
+ if (!new_smp_sz || beg != end) {
+ memprintf(err, "wrong sample size '%s' for sample range '%s'",
+ smp_sz_str, args[cur_arg+1]);
+ goto error;
+ }
+
+ if (new_smp_sz < smp_sz) {
+ memprintf(err, "sample size %zu should be greater or equal to "
+ "%zu the maximum of the high ranges limits",
+ new_smp_sz, smp_sz);
+ goto error;
+ }
+ smp_sz = new_smp_sz;
+
+ /* Let's order <smp_rgs> array. */
+ qsort(smp_rgs, smp_rgs_sz, sizeof(struct smp_log_range), smp_log_range_cmp);
+
+ logger->lb.smp_rgs = smp_rgs;
+ logger->lb.smp_rgs_sz = smp_rgs_sz;
+ logger->lb.smp_sz = smp_sz;
+
+ cur_arg += 2;
+ }
+
+ /* parse the facility */
+ logger->facility = get_log_facility(args[cur_arg]);
+ if (logger->facility < 0) {
+ memprintf(err, "unknown log facility '%s'", args[cur_arg]);
+ goto error;
+ }
+ cur_arg++;
+
+ /* parse the max syslog level (default: debug) */
+ logger->level = 7;
+ if (*(args[cur_arg])) {
+ logger->level = get_log_level(args[cur_arg]);
+ if (logger->level < 0) {
+ memprintf(err, "unknown optional log level '%s'", args[cur_arg]);
+ goto error;
+ }
+ cur_arg++;
+ }
+
+ /* parse the limit syslog level (default: emerg) */
+ logger->minlvl = 0;
+ if (*(args[cur_arg])) {
+ logger->minlvl = get_log_level(args[cur_arg]);
+ if (logger->minlvl < 0) {
+ memprintf(err, "unknown optional minimum log level '%s'", args[cur_arg]);
+ goto error;
+ }
+ cur_arg++;
+ }
+
+ /* Too many args */
+ if (*(args[cur_arg])) {
+ memprintf(err, "cannot handle unexpected argument '%s'", args[cur_arg]);
+ goto error;
+ }
+
+ /* now, back to the log target */
+ if (!parse_log_target(args[1], &logger->target, err))
+ goto error;
+
+ done:
+ LIST_APPEND(loggers, &logger->list);
+ return 1;
+
+ error:
+ free(smp_rgs);
+ free_logger(logger);
+ return 0;
+}
+
+
+/*
+ * returns log format, LOG_FORMAT_UNSPEC is return if not found.
+ */
+enum log_fmt get_log_format(const char *fmt)
+{
+ enum log_fmt format;
+
+ format = LOG_FORMATS - 1;
+ while (format > 0 && log_formats[format].name
+ && strcmp(log_formats[format].name, fmt) != 0)
+ format--;
+
+ /* Note: 0 is LOG_FORMAT_UNSPEC */
+ return format;
+}
+
+/*
+ * returns log level for <lev> or -1 if not found.
+ */
+int get_log_level(const char *lev)
+{
+ int level;
+
+ level = NB_LOG_LEVELS - 1;
+ while (level >= 0 && strcmp(log_levels[level], lev) != 0)
+ level--;
+
+ return level;
+}
+
+/*
+ * returns log facility for <fac> or -1 if not found.
+ */
+int get_log_facility(const char *fac)
+{
+ int facility;
+
+ facility = NB_LOG_FACILITIES - 1;
+ while (facility >= 0 && strcmp(log_facilities[facility], fac) != 0)
+ facility--;
+
+ return facility;
+}
+
+/*
+ * Encode the string.
+ *
+ * When using the +E log format option, it will try to escape '"\]'
+ * characters with '\' as prefix. The same prefix should not be used as
+ * <escape>.
+ */
+static char *lf_encode_string(char *start, char *stop,
+ const char escape, const long *map,
+ const char *string,
+ struct logformat_node *node)
+{
+ if (node->options & LOG_OPT_ESC) {
+ if (start < stop) {
+ stop--; /* reserve one byte for the final '\0' */
+ while (start < stop && *string != '\0') {
+ if (!ha_bit_test((unsigned char)(*string), map)) {
+ if (!ha_bit_test((unsigned char)(*string), rfc5424_escape_map))
+ *start++ = *string;
+ else {
+ if (start + 2 >= stop)
+ break;
+ *start++ = '\\';
+ *start++ = *string;
+ }
+ }
+ else {
+ if (start + 3 >= stop)
+ break;
+ *start++ = escape;
+ *start++ = hextab[(*string >> 4) & 15];
+ *start++ = hextab[*string & 15];
+ }
+ string++;
+ }
+ *start = '\0';
+ }
+ }
+ else {
+ return encode_string(start, stop, escape, map, string);
+ }
+
+ return start;
+}
+
+/*
+ * Encode the chunk.
+ *
+ * When using the +E log format option, it will try to escape '"\]'
+ * characters with '\' as prefix. The same prefix should not be used as
+ * <escape>.
+ */
+static char *lf_encode_chunk(char *start, char *stop,
+ const char escape, const long *map,
+ const struct buffer *chunk,
+ struct logformat_node *node)
+{
+ char *str, *end;
+
+ if (node->options & LOG_OPT_ESC) {
+ if (start < stop) {
+ str = chunk->area;
+ end = chunk->area + chunk->data;
+
+ stop--; /* reserve one byte for the final '\0' */
+ while (start < stop && str < end) {
+ if (!ha_bit_test((unsigned char)(*str), map)) {
+ if (!ha_bit_test((unsigned char)(*str), rfc5424_escape_map))
+ *start++ = *str;
+ else {
+ if (start + 2 >= stop)
+ break;
+ *start++ = '\\';
+ *start++ = *str;
+ }
+ }
+ else {
+ if (start + 3 >= stop)
+ break;
+ *start++ = escape;
+ *start++ = hextab[(*str >> 4) & 15];
+ *start++ = hextab[*str & 15];
+ }
+ str++;
+ }
+ *start = '\0';
+ }
+ }
+ else {
+ return encode_chunk(start, stop, escape, map, chunk);
+ }
+
+ return start;
+}
+
+/*
+ * Write a string in the log string
+ * Take cares of quote and escape options
+ *
+ * Return the address of the \0 character, or NULL on error
+ */
+char *lf_text_len(char *dst, const char *src, size_t len, size_t size, const struct logformat_node *node)
+{
+ if (size < 2)
+ return NULL;
+
+ if (node->options & LOG_OPT_QUOTE) {
+ *(dst++) = '"';
+ size--;
+ }
+
+ if (src && len) {
+ /* escape_string and strlcpy2 will both try to add terminating NULL-byte
+ * to dst, so we need to make sure that extra byte will fit into dst
+ * before calling them
+ */
+ if (node->options & LOG_OPT_ESC) {
+ char *ret;
+
+ ret = escape_string(dst, (dst + size - 1), '\\', rfc5424_escape_map, src, src + len);
+ if (ret == NULL || *ret != '\0')
+ return NULL;
+ len = ret - dst;
+ }
+ else {
+ if (++len > size)
+ len = size;
+ len = strlcpy2(dst, src, len);
+ }
+
+ size -= len;
+ dst += len;
+ }
+ else if ((node->options & (LOG_OPT_QUOTE|LOG_OPT_MANDATORY)) == LOG_OPT_MANDATORY) {
+ if (size < 2)
+ return NULL;
+ *(dst++) = '-';
+ size -= 1;
+ }
+
+ if (node->options & LOG_OPT_QUOTE) {
+ if (size < 2)
+ return NULL;
+ *(dst++) = '"';
+ }
+
+ *dst = '\0';
+ return dst;
+}
+
+static inline char *lf_text(char *dst, const char *src, size_t size, const struct logformat_node *node)
+{
+ return lf_text_len(dst, src, size, size, node);
+}
+
+/*
+ * Write a IP address to the log string
+ * +X option write in hexadecimal notation, most significant byte on the left
+ */
+char *lf_ip(char *dst, const struct sockaddr *sockaddr, size_t size, const struct logformat_node *node)
+{
+ char *ret = dst;
+ int iret;
+ char pn[INET6_ADDRSTRLEN];
+
+ if (node->options & LOG_OPT_HEXA) {
+ unsigned char *addr = NULL;
+ switch (sockaddr->sa_family) {
+ case AF_INET:
+ addr = (unsigned char *)&((struct sockaddr_in *)sockaddr)->sin_addr.s_addr;
+ iret = snprintf(dst, size, "%02X%02X%02X%02X", addr[0], addr[1], addr[2], addr[3]);
+ break;
+ case AF_INET6:
+ addr = (unsigned char *)&((struct sockaddr_in6 *)sockaddr)->sin6_addr.s6_addr;
+ iret = snprintf(dst, size, "%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X",
+ addr[0], addr[1], addr[2], addr[3], addr[4], addr[5], addr[6], addr[7],
+ addr[8], addr[9], addr[10], addr[11], addr[12], addr[13], addr[14], addr[15]);
+ break;
+ default:
+ return NULL;
+ }
+ if (iret < 0 || iret > size)
+ return NULL;
+ ret += iret;
+ } else {
+ addr_to_str((struct sockaddr_storage *)sockaddr, pn, sizeof(pn));
+ ret = lf_text(dst, pn, size, node);
+ if (ret == NULL)
+ return NULL;
+ }
+ return ret;
+}
+
+/*
+ * Write a port to the log
+ * +X option write in hexadecimal notation, most significant byte on the left
+ */
+char *lf_port(char *dst, const struct sockaddr *sockaddr, size_t size, const struct logformat_node *node)
+{
+ char *ret = dst;
+ int iret;
+
+ if (node->options & LOG_OPT_HEXA) {
+ const unsigned char *port = (const unsigned char *)&((struct sockaddr_in *)sockaddr)->sin_port;
+ iret = snprintf(dst, size, "%02X%02X", port[0], port[1]);
+ if (iret < 0 || iret > size)
+ return NULL;
+ ret += iret;
+ } else {
+ ret = ltoa_o(get_host_port((struct sockaddr_storage *)sockaddr), dst, size);
+ if (ret == NULL)
+ return NULL;
+ }
+ return ret;
+}
+
+
+/*
+ * This function sends the syslog message using a printf format string. It
+ * expects an LF-terminated message.
+ */
+void send_log(struct proxy *p, int level, const char *format, ...)
+{
+ va_list argp;
+ int data_len;
+
+ if (level < 0 || format == NULL || logline == NULL)
+ return;
+
+ va_start(argp, format);
+ data_len = vsnprintf(logline, global.max_syslog_len, format, argp);
+ if (data_len < 0 || data_len > global.max_syslog_len)
+ data_len = global.max_syslog_len;
+ va_end(argp);
+
+ __send_log((p ? &p->loggers : NULL), (p ? &p->log_tag : NULL), level,
+ logline, data_len, default_rfc5424_sd_log_format, 2);
+}
+/*
+ * This function builds a log header according to <hdr> settings.
+ *
+ * If hdr.format is set to LOG_FORMAT_UNSPEC, it tries to determine
+ * format based on hdr.metadata. It is useful for log-forwarding to be
+ * able to forward any format without settings.
+ *
+ * This function returns a struct ist array of elements of the header
+ * nbelem is set to the number of available elements.
+ * This function returns currently a maximum of NB_LOG_HDR_IST_ELEMENTS
+ * elements.
+ */
+struct ist *build_log_header(struct log_header hdr, size_t *nbelem)
+{
+ static THREAD_LOCAL struct {
+ struct ist ist_vector[NB_LOG_HDR_MAX_ELEMENTS];
+ char timestamp_buffer[LOG_LEGACYTIME_LEN+1+1];
+ time_t cur_legacy_time;
+ char priority_buffer[6];
+ } hdr_ctx = { .priority_buffer = "<<<<>" };
+
+ struct tm logtime;
+ int len;
+ int fac_level = 0;
+ time_t time = date.tv_sec;
+ struct ist *metadata = hdr.metadata;
+ enum log_fmt format = hdr.format;
+ int facility = hdr.facility;
+ int level = hdr.level;
+
+ *nbelem = 0;
+
+
+ if (format == LOG_FORMAT_UNSPEC) {
+ format = LOG_FORMAT_RAW;
+ if (metadata) {
+ /* If a hostname is set, it appears we want to perform syslog
+ * because only rfc5427 or rfc3164 support an hostname.
+ */
+ if (metadata[LOG_META_HOST].len) {
+ /* If a rfc5424 compliant timestamp is used we consider
+ * that output format is rfc5424, else legacy format
+ * is used as specified default for local logs
+ * in documentation.
+ */
+ if ((metadata[LOG_META_TIME].len == 1 && metadata[LOG_META_TIME].ptr[0] == '-')
+ || (metadata[LOG_META_TIME].len >= LOG_ISOTIME_MINLEN))
+ format = LOG_FORMAT_RFC5424;
+ else
+ format = LOG_FORMAT_RFC3164;
+ }
+ else if (metadata[LOG_META_TAG].len) {
+ /* Tag is present but no hostname, we should
+ * consider we try to emit a local log
+ * in legacy format (analog to RFC3164 but
+ * with stripped hostname).
+ */
+ format = LOG_FORMAT_LOCAL;
+ }
+ else if (metadata[LOG_META_PRIO].len) {
+ /* the source seems a parsed message
+ * offering a valid level/prio prefix
+ * so we consider this format.
+ */
+ format = LOG_FORMAT_PRIO;
+ }
+ }
+ }
+
+ /* prepare priority, stored into 1 single elem */
+ switch (format) {
+ case LOG_FORMAT_LOCAL:
+ case LOG_FORMAT_RFC3164:
+ case LOG_FORMAT_RFC5424:
+ case LOG_FORMAT_PRIO:
+ fac_level = facility << 3;
+ /* further format ignore the facility */
+ __fallthrough;
+ case LOG_FORMAT_TIMED:
+ case LOG_FORMAT_SHORT:
+ fac_level += level;
+ hdr_ctx.ist_vector[*nbelem].ptr = &hdr_ctx.priority_buffer[3]; /* last digit of the log level */
+ do {
+ *hdr_ctx.ist_vector[*nbelem].ptr = '0' + fac_level % 10;
+ fac_level /= 10;
+ hdr_ctx.ist_vector[*nbelem].ptr--;
+ } while (fac_level && hdr_ctx.ist_vector[*nbelem].ptr > &hdr_ctx.priority_buffer[0]);
+ *hdr_ctx.ist_vector[*nbelem].ptr = '<';
+ hdr_ctx.ist_vector[(*nbelem)++].len = &hdr_ctx.priority_buffer[5] - hdr_ctx.ist_vector[0].ptr;
+ break;
+ case LOG_FORMAT_ISO:
+ case LOG_FORMAT_RAW:
+ break;
+ case LOG_FORMAT_UNSPEC:
+ case LOG_FORMATS:
+ ABORT_NOW();
+ }
+
+
+ /* prepare timestamp, stored into a max of 4 elems */
+ switch (format) {
+ case LOG_FORMAT_LOCAL:
+ case LOG_FORMAT_RFC3164:
+ /* rfc3164 ex: 'Jan 1 00:00:00 ' */
+ if (metadata && metadata[LOG_META_TIME].len == LOG_LEGACYTIME_LEN) {
+ hdr_ctx.ist_vector[(*nbelem)++] = metadata[LOG_META_TIME];
+ hdr_ctx.ist_vector[(*nbelem)++] = ist2(" ", 1);
+ /* time is set, break immediately */
+ break;
+ }
+ else if (metadata && metadata[LOG_META_TIME].len >= LOG_ISOTIME_MINLEN) {
+ int month;
+ char *timestamp = metadata[LOG_META_TIME].ptr;
+
+ /* iso time always begins like this: '1970-01-01T00:00:00' */
+
+ /* compute month */
+ month = 10*(timestamp[5] - '0') + (timestamp[6] - '0');
+ if (month)
+ month--;
+ if (month <= 11) {
+ /* builds log prefix ex: 'Jan 1 ' */
+ len = snprintf(hdr_ctx.timestamp_buffer, sizeof(hdr_ctx.timestamp_buffer),
+ "%s %c%c ", monthname[month],
+ timestamp[8] != '0' ? timestamp[8] : ' ',
+ timestamp[9]);
+ /* we reused the timestamp_buffer, signal that it does not
+ * contain local time anymore
+ */
+ hdr_ctx.cur_legacy_time = 0;
+ if (len == 7) {
+ hdr_ctx.ist_vector[(*nbelem)++] = ist2(&hdr_ctx.timestamp_buffer[0], len);
+ /* adds 'HH:MM:SS' from iso time */
+ hdr_ctx.ist_vector[(*nbelem)++] = ist2(&timestamp[11], 8);
+ hdr_ctx.ist_vector[(*nbelem)++] = ist2(" ", 1);
+ /* we successfully reuse iso time, we can break */
+ break;
+ }
+ }
+ /* Failed to reuse isotime time, fallback to local legacy time */
+ }
+
+ if (unlikely(time != hdr_ctx.cur_legacy_time)) {
+ /* re-builds timestamp from the current local time */
+ get_localtime(time, &logtime);
+
+ len = snprintf(hdr_ctx.timestamp_buffer, sizeof(hdr_ctx.timestamp_buffer),
+ "%s %2d %02d:%02d:%02d ",
+ monthname[logtime.tm_mon],
+ logtime.tm_mday, logtime.tm_hour, logtime.tm_min, logtime.tm_sec);
+ if (len != LOG_LEGACYTIME_LEN+1)
+ hdr_ctx.cur_legacy_time = 0;
+ else
+ hdr_ctx.cur_legacy_time = time;
+ }
+ if (likely(hdr_ctx.cur_legacy_time))
+ hdr_ctx.ist_vector[(*nbelem)++] = ist2(&hdr_ctx.timestamp_buffer[0], LOG_LEGACYTIME_LEN+1);
+ else
+ hdr_ctx.ist_vector[(*nbelem)++] = ist2("Jan 1 00:00:00 ", LOG_LEGACYTIME_LEN+1);
+ break;
+ case LOG_FORMAT_RFC5424:
+ /* adds rfc5425 version prefix */
+ hdr_ctx.ist_vector[(*nbelem)++] = ist2("1 ", 2);
+ if (metadata && metadata[LOG_META_TIME].len == 1 && metadata[LOG_META_TIME].ptr[0] == '-') {
+ /* submitted len is NILVALUE, it is a valid timestamp for rfc5425 */
+ hdr_ctx.ist_vector[(*nbelem)++] = metadata[LOG_META_TIME];
+ hdr_ctx.ist_vector[(*nbelem)++] = ist2(" ", 1);
+ break;
+ }
+ /* let continue as 'timed' and 'iso' format for usual timestamp */
+ __fallthrough;
+ case LOG_FORMAT_TIMED:
+ case LOG_FORMAT_ISO:
+ /* ISO format ex: '1900:01:01T12:00:00.123456Z'
+ * '1900:01:01T14:00:00+02:00'
+ * '1900:01:01T10:00:00.123456-02:00'
+ */
+ if (metadata && metadata[LOG_META_TIME].len >= LOG_ISOTIME_MINLEN) {
+ hdr_ctx.ist_vector[(*nbelem)++] = metadata[LOG_META_TIME];
+ hdr_ctx.ist_vector[(*nbelem)++] = ist2(" ", 1);
+ /* time is set, break immediately */
+ break;
+ }
+ else if (metadata && metadata[LOG_META_TIME].len == LOG_LEGACYTIME_LEN) {
+ int month;
+ char *timestamp = metadata[LOG_META_TIME].ptr;
+
+ for (month = 0; month < 12; month++)
+ if (!memcmp(monthname[month], timestamp, 3))
+ break;
+
+ if (month < 12) {
+
+ /* get local time to retrieve year */
+ get_localtime(time, &logtime);
+
+ /* year seems changed since log */
+ if (logtime.tm_mon < month)
+ logtime.tm_year--;
+
+ /* builds rfc5424 prefix ex: '1900-01-01T' */
+ len = snprintf(hdr_ctx.timestamp_buffer, sizeof(hdr_ctx.timestamp_buffer),
+ "%4d-%02d-%c%cT",
+ logtime.tm_year+1900, month+1,
+ timestamp[4] != ' ' ? timestamp[4] : '0',
+ timestamp[5]);
+
+ /* we reused the timestamp_buffer, signal that it does not
+ * contain local time anymore
+ */
+ hdr_ctx.cur_legacy_time = 0;
+ if (len == 11) {
+ hdr_ctx.ist_vector[(*nbelem)++] = ist2(&hdr_ctx.timestamp_buffer[0], len);
+ /* adds HH:MM:SS from legacy timestamp */
+ hdr_ctx.ist_vector[(*nbelem)++] = ist2(&timestamp[7], 8);
+ /* skip secfraq because it is optional */
+ /* according to rfc: -00:00 means we don't know the timezone */
+ hdr_ctx.ist_vector[(*nbelem)++] = ist2("-00:00 ", 7);
+ /* we successfully reuse legacy time, we can break */
+ break;
+ }
+ }
+ /* Failed to reuse legacy time, fallback to local iso time */
+ }
+ hdr_ctx.ist_vector[(*nbelem)++] = ist2(timeofday_as_iso_us(1), LOG_ISOTIME_MAXLEN + 1);
+ break;
+ case LOG_FORMAT_PRIO:
+ case LOG_FORMAT_SHORT:
+ case LOG_FORMAT_RAW:
+ break;
+ case LOG_FORMAT_UNSPEC:
+ case LOG_FORMATS:
+ ABORT_NOW();
+ }
+
+ /* prepare other meta data, stored into a max of 10 elems */
+ switch (format) {
+ case LOG_FORMAT_RFC3164:
+ if (metadata && metadata[LOG_META_HOST].len) {
+ hdr_ctx.ist_vector[(*nbelem)++] = metadata[LOG_META_HOST];
+ hdr_ctx.ist_vector[(*nbelem)++] = ist2(" ", 1);
+ }
+ else /* the caller MUST fill the hostname, this field is mandatory */
+ hdr_ctx.ist_vector[(*nbelem)++] = ist2("localhost ", 10);
+ __fallthrough;
+ case LOG_FORMAT_LOCAL:
+ if (!metadata || !metadata[LOG_META_TAG].len)
+ break;
+
+ hdr_ctx.ist_vector[(*nbelem)++] = metadata[LOG_META_TAG];
+ if (metadata[LOG_META_PID].len) {
+ hdr_ctx.ist_vector[(*nbelem)++] = ist2("[", 1);
+ hdr_ctx.ist_vector[(*nbelem)++] = metadata[LOG_META_PID];
+ hdr_ctx.ist_vector[(*nbelem)++] = ist2("]", 1);
+ }
+ hdr_ctx.ist_vector[(*nbelem)++] = ist2(": ", 2);
+ break;
+ case LOG_FORMAT_RFC5424:
+ if (metadata && metadata[LOG_META_HOST].len) {
+ hdr_ctx.ist_vector[(*nbelem)++] = metadata[LOG_META_HOST];
+ hdr_ctx.ist_vector[(*nbelem)++] = ist2(" ", 1);
+ }
+ else
+ hdr_ctx.ist_vector[(*nbelem)++] = ist2("- ", 2);
+
+ if (metadata && metadata[LOG_META_TAG].len) {
+ hdr_ctx.ist_vector[(*nbelem)++] = metadata[LOG_META_TAG];
+ hdr_ctx.ist_vector[(*nbelem)++] = ist2(" ", 1);
+ }
+ else
+ hdr_ctx.ist_vector[(*nbelem)++] = ist2("- ", 2);
+
+ if (metadata && metadata[LOG_META_PID].len) {
+ hdr_ctx.ist_vector[(*nbelem)++] = metadata[LOG_META_PID];
+ hdr_ctx.ist_vector[(*nbelem)++] = ist2(" ", 1);
+ }
+ else
+ hdr_ctx.ist_vector[(*nbelem)++] = ist2("- ", 2);
+
+ if (metadata && metadata[LOG_META_MSGID].len) {
+ hdr_ctx.ist_vector[(*nbelem)++] = metadata[LOG_META_MSGID];
+ hdr_ctx.ist_vector[(*nbelem)++] = ist2(" ", 1);
+ }
+ else
+ hdr_ctx.ist_vector[(*nbelem)++] = ist2("- ", 2);
+
+ if (metadata && metadata[LOG_META_STDATA].len) {
+ hdr_ctx.ist_vector[(*nbelem)++] = metadata[LOG_META_STDATA];
+ hdr_ctx.ist_vector[(*nbelem)++] = ist2(" ", 1);
+ }
+ else
+ hdr_ctx.ist_vector[(*nbelem)++] = ist2("- ", 2);
+ break;
+ case LOG_FORMAT_PRIO:
+ case LOG_FORMAT_SHORT:
+ case LOG_FORMAT_TIMED:
+ case LOG_FORMAT_ISO:
+ case LOG_FORMAT_RAW:
+ break;
+ case LOG_FORMAT_UNSPEC:
+ case LOG_FORMATS:
+ ABORT_NOW();
+ }
+
+ return hdr_ctx.ist_vector;
+}
+
+/*
+ * This function sends a syslog message.
+ * <target> is the actual log target where log will be sent,
+ *
+ * Message will be prefixed by header according to <hdr> setting.
+ * Final message will be truncated <maxlen> parameter and will be
+ * terminated with an LF character.
+ *
+ * Does not return any error
+ */
+static inline void __do_send_log(struct log_target *target, struct log_header hdr,
+ int nblogger, size_t maxlen,
+ char *message, size_t size)
+{
+ static THREAD_LOCAL struct iovec iovec[NB_LOG_HDR_MAX_ELEMENTS+1+1] = { }; /* header elements + message + LF */
+ static THREAD_LOCAL struct msghdr msghdr = {
+ //.msg_iov = iovec,
+ .msg_iovlen = NB_LOG_HDR_MAX_ELEMENTS+2
+ };
+ static THREAD_LOCAL int logfdunix = -1; /* syslog to AF_UNIX socket */
+ static THREAD_LOCAL int logfdinet = -1; /* syslog to AF_INET socket */
+ int *plogfd;
+ int sent;
+ size_t nbelem;
+ struct ist *msg_header = NULL;
+
+ msghdr.msg_iov = iovec;
+
+ /* historically some messages used to already contain the trailing LF
+ * or Zero. Let's remove all trailing LF or Zero
+ */
+ while (size && (message[size-1] == '\n' || (message[size-1] == 0)))
+ size--;
+
+ if (target->type == LOG_TARGET_BUFFER) {
+ plogfd = NULL;
+ goto send;
+ }
+ else if (target->addr->ss_family == AF_CUST_EXISTING_FD) {
+ /* the socket's address is a file descriptor */
+ plogfd = (int *)&((struct sockaddr_in *)target->addr)->sin_addr.s_addr;
+ }
+ else if (target->addr->ss_family == AF_UNIX)
+ plogfd = &logfdunix;
+ else
+ plogfd = &logfdinet;
+
+ if (plogfd && unlikely(*plogfd < 0)) {
+ /* socket not successfully initialized yet */
+ if ((*plogfd = socket(target->addr->ss_family, SOCK_DGRAM,
+ (target->addr->ss_family == AF_UNIX) ? 0 : IPPROTO_UDP)) < 0) {
+ static char once;
+
+ if (!once) {
+ once = 1; /* note: no need for atomic ops here */
+ ha_alert("socket() failed in logger #%d: %s (errno=%d)\n",
+ nblogger, strerror(errno), errno);
+ }
+ return;
+ } else {
+ /* we don't want to receive anything on this socket */
+ setsockopt(*plogfd, SOL_SOCKET, SO_RCVBUF, &zero, sizeof(zero));
+ /* we may want to adjust the output buffer (tune.sndbuf.backend) */
+ if (global.tune.backend_sndbuf)
+ setsockopt(*plogfd, SOL_SOCKET, SO_SNDBUF, &global.tune.backend_sndbuf, sizeof(global.tune.backend_sndbuf));
+ /* does nothing under Linux, maybe needed for others */
+ shutdown(*plogfd, SHUT_RD);
+ fd_set_cloexec(*plogfd);
+ }
+ }
+
+ msg_header = build_log_header(hdr, &nbelem);
+ send:
+ if (target->type == LOG_TARGET_BUFFER) {
+ struct ist msg;
+ size_t e_maxlen = maxlen;
+
+ msg = ist2(message, size);
+
+ /* make room for the final '\n' which may be forcefully inserted
+ * by tcp forwarder applet (sink_forward_io_handler)
+ */
+ e_maxlen -= 1;
+
+ sent = sink_write(target->sink, hdr, e_maxlen, &msg, 1);
+ }
+ else if (target->addr->ss_family == AF_CUST_EXISTING_FD) {
+ struct ist msg;
+
+ msg = ist2(message, size);
+
+ sent = fd_write_frag_line(*plogfd, maxlen, msg_header, nbelem, &msg, 1, 1);
+ }
+ else {
+ int i = 0;
+ int totlen = maxlen - 1; /* save space for the final '\n' */
+
+ for (i = 0 ; i < nbelem ; i++ ) {
+ iovec[i].iov_base = msg_header[i].ptr;
+ iovec[i].iov_len = msg_header[i].len;
+ if (totlen <= iovec[i].iov_len) {
+ iovec[i].iov_len = totlen;
+ totlen = 0;
+ break;
+ }
+ totlen -= iovec[i].iov_len;
+ }
+ if (totlen) {
+ iovec[i].iov_base = message;
+ iovec[i].iov_len = size;
+ if (totlen <= iovec[i].iov_len)
+ iovec[i].iov_len = totlen;
+ i++;
+ }
+ iovec[i].iov_base = "\n"; /* insert a \n at the end of the message */
+ iovec[i].iov_len = 1;
+ i++;
+
+ msghdr.msg_iovlen = i;
+ msghdr.msg_name = (struct sockaddr *)target->addr;
+ msghdr.msg_namelen = get_addr_len(target->addr);
+
+ sent = sendmsg(*plogfd, &msghdr, MSG_DONTWAIT | MSG_NOSIGNAL);
+ }
+
+ if (sent < 0) {
+ static char once;
+
+ if (errno == EAGAIN || errno == EWOULDBLOCK)
+ _HA_ATOMIC_INC(&dropped_logs);
+ else if (!once) {
+ once = 1; /* note: no need for atomic ops here */
+ ha_alert("sendmsg()/writev() failed in logger #%d: %s (errno=%d)\n",
+ nblogger, strerror(errno), errno);
+ }
+ }
+}
+
+/* does the same as __do_send_log() does for a single target, but here the log
+ * will be sent according to the log backend's lb settings. The function will
+ * leverage __do_send_log() function to actually send the log messages.
+ */
+static inline void __do_send_log_backend(struct proxy *be, struct log_header hdr,
+ int nblogger, size_t maxlen,
+ char *message, size_t size)
+{
+ struct server *srv;
+ uint32_t targetid = ~0; /* default value to check if it was explicitly assigned */
+ uint32_t nb_srv;
+
+ HA_RWLOCK_RDLOCK(LBPRM_LOCK, &be->lbprm.lock);
+
+ if (be->srv_act) {
+ nb_srv = be->srv_act;
+ }
+ else if (be->srv_bck) {
+ /* no more active servers but backup ones are, switch to backup farm */
+ nb_srv = be->srv_bck;
+ if (!(be->options & PR_O_USE_ALL_BK)) {
+ /* log balancing disabled on backup farm */
+ targetid = 0; /* use first server */
+ goto skip_lb;
+ }
+ }
+ else {
+ /* no srv available, can't log */
+ goto drop;
+ }
+
+ /* log-balancing logic: */
+
+ if ((be->lbprm.algo & BE_LB_ALGO) == BE_LB_ALGO_RR) {
+ /* Atomically load and update lastid since it's not protected
+ * by any write lock
+ *
+ * Wrapping is expected and could lead to unexpected ID reset in the
+ * middle of a cycle, but given that this only happens once in every
+ * 4 billions it is quite negligible
+ */
+ targetid = HA_ATOMIC_FETCH_ADD(&be->lbprm.log.lastid, 1) % nb_srv;
+ }
+ else if ((be->lbprm.algo & BE_LB_ALGO) == BE_LB_ALGO_LS) {
+ /* sticky mode: use first server in the pool, which will always stay
+ * first during dequeuing and requeuing, unless it becomes unavailable
+ * and will be replaced by another one
+ */
+ targetid = 0;
+ }
+ else if ((be->lbprm.algo & BE_LB_ALGO) == BE_LB_ALGO_RND) {
+ /* random mode */
+ targetid = statistical_prng() % nb_srv;
+ }
+ else if ((be->lbprm.algo & BE_LB_ALGO) == BE_LB_ALGO_LH) {
+ struct sample result;
+
+ /* log-balance hash */
+ memset(&result, 0, sizeof(result));
+ result.data.type = SMP_T_STR;
+ result.flags = SMP_F_CONST;
+ result.data.u.str.area = message;
+ result.data.u.str.data = size;
+ result.data.u.str.size = size + 1; /* with terminating NULL byte */
+ if (sample_process_cnv(be->lbprm.expr, &result)) {
+ /* gen_hash takes binary input, ensure that we provide such value to it */
+ if (result.data.type == SMP_T_BIN || sample_casts[result.data.type][SMP_T_BIN]) {
+ sample_casts[result.data.type][SMP_T_BIN](&result);
+ targetid = gen_hash(be, result.data.u.str.area, result.data.u.str.data) % nb_srv;
+ }
+ }
+ }
+
+ skip_lb:
+
+ if (targetid == ~0) {
+ /* no target assigned, nothing to do */
+ goto drop;
+ }
+
+ /* find server based on targetid */
+ srv = be->lbprm.log.srv[targetid];
+ HA_RWLOCK_RDUNLOCK(LBPRM_LOCK, &be->lbprm.lock);
+
+ __do_send_log(srv->log_target, hdr, nblogger, maxlen, message, size);
+ return;
+
+ drop:
+ HA_RWLOCK_RDUNLOCK(LBPRM_LOCK, &be->lbprm.lock);
+ _HA_ATOMIC_INC(&dropped_logs);
+}
+
+/*
+ * This function sends a syslog message.
+ * It doesn't care about errors nor does it report them.
+ * The argument <metadata> MUST be an array of size
+ * LOG_META_FIELDS*sizeof(struct ist) containing
+ * data to build the header.
+ */
+void process_send_log(struct list *loggers, int level, int facility,
+ struct ist *metadata, char *message, size_t size)
+{
+ struct logger *logger;
+ int nblogger;
+
+ /* Send log messages to syslog server. */
+ nblogger = 0;
+ list_for_each_entry(logger, loggers, list) {
+ int in_range = 1;
+
+ /* we can filter the level of the messages that are sent to each logger */
+ if (level > logger->level)
+ continue;
+
+ if (logger->lb.smp_rgs) {
+ struct smp_log_range *smp_rg;
+ uint next_idx, curr_rg;
+ ullong curr_rg_idx, next_rg_idx;
+
+ curr_rg_idx = _HA_ATOMIC_LOAD(&logger->lb.curr_rg_idx);
+ do {
+ next_idx = (curr_rg_idx & 0xFFFFFFFFU) + 1;
+ curr_rg = curr_rg_idx >> 32;
+ smp_rg = &logger->lb.smp_rgs[curr_rg];
+
+ /* check if the index we're going to take is within range */
+ in_range = smp_rg->low <= next_idx && next_idx <= smp_rg->high;
+ if (in_range) {
+ /* Let's consume this range. */
+ if (next_idx == smp_rg->high) {
+ /* If consumed, let's select the next range. */
+ curr_rg = (curr_rg + 1) % logger->lb.smp_rgs_sz;
+ }
+ }
+
+ next_idx = next_idx % logger->lb.smp_sz;
+ next_rg_idx = ((ullong)curr_rg << 32) + next_idx;
+ } while (!_HA_ATOMIC_CAS(&logger->lb.curr_rg_idx, &curr_rg_idx, next_rg_idx) &&
+ __ha_cpu_relax());
+ }
+ if (in_range) {
+ struct log_header hdr;
+
+ hdr.level = MAX(level, logger->minlvl);
+ hdr.facility = (facility == -1) ? logger->facility : facility;
+ hdr.format = logger->format;
+ hdr.metadata = metadata;
+
+ nblogger += 1;
+ if (logger->target.type == LOG_TARGET_BACKEND) {
+ __do_send_log_backend(logger->target.be, hdr, nblogger, logger->maxlen, message, size);
+ }
+ else {
+ /* normal target */
+ __do_send_log(&logger->target, hdr, nblogger, logger->maxlen, message, size);
+ }
+ }
+ }
+}
+
+/*
+ * This function sends a syslog message.
+ * It doesn't care about errors nor does it report them.
+ * The arguments <sd> and <sd_size> are used for the structured-data part
+ * in RFC5424 formatted syslog messages.
+ */
+void __send_log(struct list *loggers, struct buffer *tagb, int level,
+ char *message, size_t size, char *sd, size_t sd_size)
+{
+ static THREAD_LOCAL pid_t curr_pid;
+ static THREAD_LOCAL char pidstr[16];
+ static THREAD_LOCAL struct ist metadata[LOG_META_FIELDS];
+
+ if (loggers == NULL) {
+ if (!LIST_ISEMPTY(&global.loggers)) {
+ loggers = &global.loggers;
+ }
+ }
+ if (!loggers || LIST_ISEMPTY(loggers))
+ return;
+
+ if (!metadata[LOG_META_HOST].len) {
+ if (global.log_send_hostname)
+ metadata[LOG_META_HOST] = ist(global.log_send_hostname);
+ }
+
+ if (!tagb || !tagb->area)
+ tagb = &global.log_tag;
+
+ if (tagb)
+ metadata[LOG_META_TAG] = ist2(tagb->area, tagb->data);
+
+ if (unlikely(curr_pid != getpid()))
+ metadata[LOG_META_PID].len = 0;
+
+ if (!metadata[LOG_META_PID].len) {
+ curr_pid = getpid();
+ ltoa_o(curr_pid, pidstr, sizeof(pidstr));
+ metadata[LOG_META_PID] = ist2(pidstr, strlen(pidstr));
+ }
+
+ metadata[LOG_META_STDATA] = ist2(sd, sd_size);
+
+ /* Remove trailing space of structured data */
+ while (metadata[LOG_META_STDATA].len && metadata[LOG_META_STDATA].ptr[metadata[LOG_META_STDATA].len-1] == ' ')
+ metadata[LOG_META_STDATA].len--;
+
+ return process_send_log(loggers, level, -1, metadata, message, size);
+}
+
+const char sess_cookie[8] = "NIDVEOU7"; /* No cookie, Invalid cookie, cookie for a Down server, Valid cookie, Expired cookie, Old cookie, Unused, unknown */
+const char sess_set_cookie[8] = "NPDIRU67"; /* No set-cookie, Set-cookie found and left unchanged (passive),
+ Set-cookie Deleted, Set-Cookie Inserted, Set-cookie Rewritten,
+ Set-cookie Updated, unknown, unknown */
+
+/*
+ * try to write a character if there is enough space, or goto out
+ */
+#define LOGCHAR(x) do { \
+ if (tmplog < dst + maxsize - 1) { \
+ *(tmplog++) = (x); \
+ } else { \
+ goto out; \
+ } \
+ } while(0)
+
+
+/* Initializes some log data at boot */
+static void init_log()
+{
+ char *tmp;
+ int i;
+
+ /* Initialize the escape map for the RFC5424 structured-data : '"\]'
+ * inside PARAM-VALUE should be escaped with '\' as prefix.
+ * See https://tools.ietf.org/html/rfc5424#section-6.3.3 for more
+ * details.
+ */
+ memset(rfc5424_escape_map, 0, sizeof(rfc5424_escape_map));
+
+ tmp = "\"\\]";
+ while (*tmp) {
+ ha_bit_set(*tmp, rfc5424_escape_map);
+ tmp++;
+ }
+
+ /* initialize the log header encoding map : '{|}"#' should be encoded with
+ * '#' as prefix, as well as non-printable characters ( <32 or >= 127 ).
+ * URL encoding only requires '"', '#' to be encoded as well as non-
+ * printable characters above.
+ */
+ memset(hdr_encode_map, 0, sizeof(hdr_encode_map));
+ memset(url_encode_map, 0, sizeof(url_encode_map));
+ for (i = 0; i < 32; i++) {
+ ha_bit_set(i, hdr_encode_map);
+ ha_bit_set(i, url_encode_map);
+ }
+ for (i = 127; i < 256; i++) {
+ ha_bit_set(i, hdr_encode_map);
+ ha_bit_set(i, url_encode_map);
+ }
+
+ tmp = "\"#{|}";
+ while (*tmp) {
+ ha_bit_set(*tmp, hdr_encode_map);
+ tmp++;
+ }
+
+ tmp = "\"#";
+ while (*tmp) {
+ ha_bit_set(*tmp, url_encode_map);
+ tmp++;
+ }
+
+ /* initialize the http header encoding map. The draft httpbis define the
+ * header content as:
+ *
+ * HTTP-message = start-line
+ * *( header-field CRLF )
+ * CRLF
+ * [ message-body ]
+ * header-field = field-name ":" OWS field-value OWS
+ * field-value = *( field-content / obs-fold )
+ * field-content = field-vchar [ 1*( SP / HTAB ) field-vchar ]
+ * obs-fold = CRLF 1*( SP / HTAB )
+ * field-vchar = VCHAR / obs-text
+ * VCHAR = %x21-7E
+ * obs-text = %x80-FF
+ *
+ * All the chars are encoded except "VCHAR", "obs-text", SP and HTAB.
+ * The encoded chars are form 0x00 to 0x08, 0x0a to 0x1f and 0x7f. The
+ * "obs-fold" is voluntarily forgotten because haproxy remove this.
+ */
+ memset(http_encode_map, 0, sizeof(http_encode_map));
+ for (i = 0x00; i <= 0x08; i++)
+ ha_bit_set(i, http_encode_map);
+ for (i = 0x0a; i <= 0x1f; i++)
+ ha_bit_set(i, http_encode_map);
+ ha_bit_set(0x7f, http_encode_map);
+}
+
+INITCALL0(STG_PREPARE, init_log);
+
+/* Initialize log buffers used for syslog messages */
+int init_log_buffers()
+{
+ logline = my_realloc2(logline, global.max_syslog_len + 1);
+ logline_rfc5424 = my_realloc2(logline_rfc5424, global.max_syslog_len + 1);
+ if (!logline || !logline_rfc5424)
+ return 0;
+ return 1;
+}
+
+/* Deinitialize log buffers used for syslog messages */
+void deinit_log_buffers()
+{
+ free(logline);
+ free(logline_rfc5424);
+ logline = NULL;
+ logline_rfc5424 = NULL;
+}
+
+/* Deinitialize log forwarder proxies used for syslog messages */
+void deinit_log_forward()
+{
+ struct proxy *p, *p0;
+
+ p = cfg_log_forward;
+ /* we need to manually clean cfg_log_forward proxy list */
+ while (p) {
+ p0 = p;
+ p = p->next;
+ free_proxy(p0);
+ }
+}
+
+/* Builds a log line in <dst> based on <list_format>, and stops before reaching
+ * <maxsize> characters. Returns the size of the output string in characters,
+ * not counting the trailing zero which is always added if the resulting size
+ * is not zero. It requires a valid session and optionally a stream. If the
+ * stream is NULL, default values will be assumed for the stream part.
+ */
+int sess_build_logline(struct session *sess, struct stream *s, char *dst, size_t maxsize, struct list *list_format)
+{
+ struct proxy *fe = sess->fe;
+ struct proxy *be;
+ struct http_txn *txn;
+ const struct strm_logs *logs;
+ struct connection *fe_conn, *be_conn;
+ unsigned int s_flags;
+ unsigned int uniq_id;
+ struct buffer chunk;
+ char *uri;
+ char *spc;
+ char *qmark;
+ char *end;
+ struct tm tm;
+ int t_request;
+ int hdr;
+ int last_isspace = 1;
+ int nspaces = 0;
+ char *tmplog;
+ char *ret;
+ int iret;
+ int status;
+ struct logformat_node *tmp;
+ struct timeval tv;
+ struct strm_logs tmp_strm_log;
+ struct ist path;
+ struct http_uri_parser parser;
+
+ /* FIXME: let's limit ourselves to frontend logging for now. */
+
+ if (likely(s)) {
+ be = s->be;
+ txn = s->txn;
+ be_conn = sc_conn(s->scb);
+ status = (txn ? txn->status : 0);
+ s_flags = s->flags;
+ uniq_id = s->uniq_id;
+ logs = &s->logs;
+ } else {
+ /* we have no stream so we first need to initialize a few
+ * things that are needed later. We do increment the request
+ * ID so that it's uniquely assigned to this request just as
+ * if the request had reached the point of being processed.
+ * A request error is reported as it's the only element we have
+ * here and which justifies emitting such a log.
+ */
+ be = ((obj_type(sess->origin) == OBJ_TYPE_CHECK) ? __objt_check(sess->origin)->proxy : fe);
+ txn = NULL;
+ fe_conn = objt_conn(sess->origin);
+ be_conn = ((obj_type(sess->origin) == OBJ_TYPE_CHECK) ? sc_conn(__objt_check(sess->origin)->sc) : NULL);
+ status = 0;
+ s_flags = SF_ERR_PRXCOND | SF_FINST_R;
+ uniq_id = _HA_ATOMIC_FETCH_ADD(&global.req_count, 1);
+
+ /* prepare a valid log structure */
+ tmp_strm_log.accept_ts = sess->accept_ts;
+ tmp_strm_log.accept_date = sess->accept_date;
+ tmp_strm_log.t_handshake = sess->t_handshake;
+ tmp_strm_log.t_idle = (sess->t_idle >= 0 ? sess->t_idle : 0);
+ tmp_strm_log.request_ts = 0;
+ tmp_strm_log.t_queue = -1;
+ tmp_strm_log.t_connect = -1;
+ tmp_strm_log.t_data = -1;
+ tmp_strm_log.t_close = ns_to_ms(now_ns - sess->accept_ts);
+ tmp_strm_log.bytes_in = 0;
+ tmp_strm_log.bytes_out = 0;
+ tmp_strm_log.prx_queue_pos = 0;
+ tmp_strm_log.srv_queue_pos = 0;
+
+ logs = &tmp_strm_log;
+
+ if ((fe->mode == PR_MODE_HTTP) && fe_conn && fe_conn->mux && fe_conn->mux->ctl) {
+ enum mux_exit_status es = fe_conn->mux->ctl(fe_conn, MUX_CTL_EXIT_STATUS, &status);
+
+ switch (es) {
+ case MUX_ES_SUCCESS:
+ break;
+ case MUX_ES_INVALID_ERR:
+ status = (status ? status : 400);
+ if ((fe_conn->flags & CO_FL_ERROR) || conn_xprt_read0_pending(fe_conn))
+ s_flags = SF_ERR_CLICL | SF_FINST_R;
+ else
+ s_flags = SF_ERR_PRXCOND | SF_FINST_R;
+ break;
+ case MUX_ES_TOUT_ERR:
+ status = (status ? status : 408);
+ s_flags = SF_ERR_CLITO | SF_FINST_R;
+ break;
+ case MUX_ES_NOTIMPL_ERR:
+ status = (status ? status : 501);
+ s_flags = SF_ERR_PRXCOND | SF_FINST_R;
+ break;
+ case MUX_ES_INTERNAL_ERR:
+ status = (status ? status : 500);
+ s_flags = SF_ERR_INTERNAL | SF_FINST_R;
+ break;
+ default:
+ break;
+ }
+ }
+ }
+
+ t_request = -1;
+ if ((llong)(logs->request_ts - logs->accept_ts) >= 0)
+ t_request = ns_to_ms(logs->request_ts - logs->accept_ts);
+
+ tmplog = dst;
+
+ /* fill logbuffer */
+ if (LIST_ISEMPTY(list_format))
+ return 0;
+
+ list_for_each_entry(tmp, list_format, list) {
+#ifdef USE_OPENSSL
+ struct connection *conn;
+#endif
+ const struct sockaddr_storage *addr;
+ const char *src = NULL;
+ struct sample *key;
+ const struct buffer empty = { };
+
+ switch (tmp->type) {
+ case LOG_FMT_SEPARATOR:
+ if (!last_isspace) {
+ LOGCHAR(' ');
+ last_isspace = 1;
+ }
+ break;
+
+ case LOG_FMT_TEXT: // text
+ src = tmp->arg;
+ iret = strlcpy2(tmplog, src, dst + maxsize - tmplog);
+ if (iret == 0)
+ goto out;
+ tmplog += iret;
+ last_isspace = 0;
+ break;
+
+ case LOG_FMT_EXPR: // sample expression, may be request or response
+ key = NULL;
+ if (tmp->options & LOG_OPT_REQ_CAP)
+ key = sample_fetch_as_type(be, sess, s, SMP_OPT_DIR_REQ|SMP_OPT_FINAL, tmp->expr, SMP_T_STR);
+
+ if (!key && (tmp->options & LOG_OPT_RES_CAP))
+ key = sample_fetch_as_type(be, sess, s, SMP_OPT_DIR_RES|SMP_OPT_FINAL, tmp->expr, SMP_T_STR);
+
+ if (!key && !(tmp->options & (LOG_OPT_REQ_CAP|LOG_OPT_RES_CAP))) // cfg, cli
+ key = sample_fetch_as_type(be, sess, s, SMP_OPT_FINAL, tmp->expr, SMP_T_STR);
+
+ if (tmp->options & LOG_OPT_HTTP)
+ ret = lf_encode_chunk(tmplog, dst + maxsize,
+ '%', http_encode_map, key ? &key->data.u.str : &empty, tmp);
+ else
+ ret = lf_text_len(tmplog,
+ key ? key->data.u.str.area : NULL,
+ key ? key->data.u.str.data : 0,
+ dst + maxsize - tmplog,
+ tmp);
+ if (ret == 0)
+ goto out;
+ tmplog = ret;
+ last_isspace = 0;
+ break;
+
+ case LOG_FMT_CLIENTIP: // %ci
+ addr = (s ? sc_src(s->scf) : sess_src(sess));
+ if (addr)
+ ret = lf_ip(tmplog, (struct sockaddr *)addr, dst + maxsize - tmplog, tmp);
+ else
+ ret = lf_text_len(tmplog, NULL, 0, dst + maxsize - tmplog, tmp);
+
+ if (ret == NULL)
+ goto out;
+ tmplog = ret;
+ last_isspace = 0;
+ break;
+
+ case LOG_FMT_CLIENTPORT: // %cp
+ addr = (s ? sc_src(s->scf) : sess_src(sess));
+ if (addr) {
+ /* sess->listener is always defined when the session's owner is an inbound connections */
+ if (addr->ss_family == AF_UNIX)
+ ret = ltoa_o(sess->listener->luid, tmplog, dst + maxsize - tmplog);
+ else
+ ret = lf_port(tmplog, (struct sockaddr *)addr, dst + maxsize - tmplog, tmp);
+ }
+ else
+ ret = lf_text_len(tmplog, NULL, 0, dst + maxsize - tmplog, tmp);
+
+ if (ret == NULL)
+ goto out;
+ tmplog = ret;
+ last_isspace = 0;
+ break;
+
+ case LOG_FMT_FRONTENDIP: // %fi
+ addr = (s ? sc_dst(s->scf) : sess_dst(sess));
+ if (addr)
+ ret = lf_ip(tmplog, (struct sockaddr *)addr, dst + maxsize - tmplog, tmp);
+ else
+ ret = lf_text_len(tmplog, NULL, 0, dst + maxsize - tmplog, tmp);
+
+ if (ret == NULL)
+ goto out;
+ tmplog = ret;
+ last_isspace = 0;
+ break;
+
+ case LOG_FMT_FRONTENDPORT: // %fp
+ addr = (s ? sc_dst(s->scf) : sess_dst(sess));
+ if (addr) {
+ /* sess->listener is always defined when the session's owner is an inbound connections */
+ if (addr->ss_family == AF_UNIX)
+ ret = ltoa_o(sess->listener->luid, tmplog, dst + maxsize - tmplog);
+ else
+ ret = lf_port(tmplog, (struct sockaddr *)addr, dst + maxsize - tmplog, tmp);
+ }
+ else
+ ret = lf_text_len(tmplog, NULL, 0, dst + maxsize - tmplog, tmp);
+
+ if (ret == NULL)
+ goto out;
+ tmplog = ret;
+ last_isspace = 0;
+ break;
+
+ case LOG_FMT_BACKENDIP: // %bi
+ if (be_conn && conn_get_src(be_conn))
+ ret = lf_ip(tmplog, (const struct sockaddr *)be_conn->src, dst + maxsize - tmplog, tmp);
+ else
+ ret = lf_text_len(tmplog, NULL, 0, dst + maxsize - tmplog, tmp);
+
+ if (ret == NULL)
+ goto out;
+ tmplog = ret;
+ last_isspace = 0;
+ break;
+
+ case LOG_FMT_BACKENDPORT: // %bp
+ if (be_conn && conn_get_src(be_conn))
+ ret = lf_port(tmplog, (struct sockaddr *)be_conn->src, dst + maxsize - tmplog, tmp);
+ else
+ ret = lf_text_len(tmplog, NULL, 0, dst + maxsize - tmplog, tmp);
+
+ if (ret == NULL)
+ goto out;
+ tmplog = ret;
+ last_isspace = 0;
+ break;
+
+ case LOG_FMT_SERVERIP: // %si
+ if (be_conn && conn_get_dst(be_conn))
+ ret = lf_ip(tmplog, (struct sockaddr *)be_conn->dst, dst + maxsize - tmplog, tmp);
+ else
+ ret = lf_text_len(tmplog, NULL, 0, dst + maxsize - tmplog, tmp);
+
+ if (ret == NULL)
+ goto out;
+ tmplog = ret;
+ last_isspace = 0;
+ break;
+
+ case LOG_FMT_SERVERPORT: // %sp
+ if (be_conn && conn_get_dst(be_conn))
+ ret = lf_port(tmplog, (struct sockaddr *)be_conn->dst, dst + maxsize - tmplog, tmp);
+ else
+ ret = lf_text_len(tmplog, NULL, 0, dst + maxsize - tmplog, tmp);
+
+ if (ret == NULL)
+ goto out;
+ tmplog = ret;
+ last_isspace = 0;
+ break;
+
+ case LOG_FMT_DATE: // %t = accept date
+ get_localtime(logs->accept_date.tv_sec, &tm);
+ ret = date2str_log(tmplog, &tm, &logs->accept_date, dst + maxsize - tmplog);
+ if (ret == NULL)
+ goto out;
+ tmplog = ret;
+ last_isspace = 0;
+ break;
+
+ case LOG_FMT_tr: // %tr = start of request date
+ /* Note that the timers are valid if we get here */
+ tv_ms_add(&tv, &logs->accept_date, logs->t_idle >= 0 ? logs->t_idle + logs->t_handshake : 0);
+ get_localtime(tv.tv_sec, &tm);
+ ret = date2str_log(tmplog, &tm, &tv, dst + maxsize - tmplog);
+ if (ret == NULL)
+ goto out;
+ tmplog = ret;
+ last_isspace = 0;
+ break;
+
+ case LOG_FMT_DATEGMT: // %T = accept date, GMT
+ get_gmtime(logs->accept_date.tv_sec, &tm);
+ ret = gmt2str_log(tmplog, &tm, dst + maxsize - tmplog);
+ if (ret == NULL)
+ goto out;
+ tmplog = ret;
+ last_isspace = 0;
+ break;
+
+ case LOG_FMT_trg: // %trg = start of request date, GMT
+ tv_ms_add(&tv, &logs->accept_date, logs->t_idle >= 0 ? logs->t_idle + logs->t_handshake : 0);
+ get_gmtime(tv.tv_sec, &tm);
+ ret = gmt2str_log(tmplog, &tm, dst + maxsize - tmplog);
+ if (ret == NULL)
+ goto out;
+ tmplog = ret;
+ last_isspace = 0;
+ break;
+
+ case LOG_FMT_DATELOCAL: // %Tl = accept date, local
+ get_localtime(logs->accept_date.tv_sec, &tm);
+ ret = localdate2str_log(tmplog, logs->accept_date.tv_sec, &tm, dst + maxsize - tmplog);
+ if (ret == NULL)
+ goto out;
+ tmplog = ret;
+ last_isspace = 0;
+ break;
+
+ case LOG_FMT_trl: // %trl = start of request date, local
+ tv_ms_add(&tv, &logs->accept_date, logs->t_idle >= 0 ? logs->t_idle + logs->t_handshake : 0);
+ get_localtime(tv.tv_sec, &tm);
+ ret = localdate2str_log(tmplog, tv.tv_sec, &tm, dst + maxsize - tmplog);
+ if (ret == NULL)
+ goto out;
+ tmplog = ret;
+ last_isspace = 0;
+ break;
+
+ case LOG_FMT_TS: // %Ts
+ if (tmp->options & LOG_OPT_HEXA) {
+ iret = snprintf(tmplog, dst + maxsize - tmplog, "%04X", (unsigned int)logs->accept_date.tv_sec);
+ if (iret < 0 || iret > dst + maxsize - tmplog)
+ goto out;
+ last_isspace = 0;
+ tmplog += iret;
+ } else {
+ ret = ltoa_o(logs->accept_date.tv_sec, tmplog, dst + maxsize - tmplog);
+ if (ret == NULL)
+ goto out;
+ tmplog = ret;
+ last_isspace = 0;
+ }
+ break;
+
+ case LOG_FMT_MS: // %ms
+ if (tmp->options & LOG_OPT_HEXA) {
+ iret = snprintf(tmplog, dst + maxsize - tmplog, "%02X",(unsigned int)logs->accept_date.tv_usec/1000);
+ if (iret < 0 || iret > dst + maxsize - tmplog)
+ goto out;
+ last_isspace = 0;
+ tmplog += iret;
+ } else {
+ if ((dst + maxsize - tmplog) < 4)
+ goto out;
+ ret = utoa_pad((unsigned int)logs->accept_date.tv_usec/1000,
+ tmplog, 4);
+ if (ret == NULL)
+ goto out;
+ tmplog = ret;
+ last_isspace = 0;
+ }
+ break;
+
+ case LOG_FMT_FRONTEND: // %f
+ src = fe->id;
+ ret = lf_text(tmplog, src, dst + maxsize - tmplog, tmp);
+ if (ret == NULL)
+ goto out;
+ tmplog = ret;
+ last_isspace = 0;
+ break;
+
+ case LOG_FMT_FRONTEND_XPRT: // %ft
+ src = fe->id;
+ if (tmp->options & LOG_OPT_QUOTE)
+ LOGCHAR('"');
+ iret = strlcpy2(tmplog, src, dst + maxsize - tmplog);
+ if (iret == 0)
+ goto out;
+ tmplog += iret;
+
+ /* sess->listener may be undefined if the session's owner is a health-check */
+ if (sess->listener && sess->listener->bind_conf->xprt->get_ssl_sock_ctx)
+ LOGCHAR('~');
+ if (tmp->options & LOG_OPT_QUOTE)
+ LOGCHAR('"');
+ last_isspace = 0;
+ break;
+#ifdef USE_OPENSSL
+ case LOG_FMT_SSL_CIPHER: // %sslc
+ src = NULL;
+ conn = objt_conn(sess->origin);
+ if (conn) {
+ src = ssl_sock_get_cipher_name(conn);
+ }
+ ret = lf_text(tmplog, src, dst + maxsize - tmplog, tmp);
+ if (ret == NULL)
+ goto out;
+ tmplog = ret;
+ last_isspace = 0;
+ break;
+
+ case LOG_FMT_SSL_VERSION: // %sslv
+ src = NULL;
+ conn = objt_conn(sess->origin);
+ if (conn) {
+ src = ssl_sock_get_proto_version(conn);
+ }
+ ret = lf_text(tmplog, src, dst + maxsize - tmplog, tmp);
+ if (ret == NULL)
+ goto out;
+ tmplog = ret;
+ last_isspace = 0;
+ break;
+#endif
+ case LOG_FMT_BACKEND: // %b
+ src = be->id;
+ ret = lf_text(tmplog, src, dst + maxsize - tmplog, tmp);
+ if (ret == NULL)
+ goto out;
+ tmplog = ret;
+ last_isspace = 0;
+ break;
+
+ case LOG_FMT_SERVER: // %s
+ switch (obj_type(s ? s->target : sess->origin)) {
+ case OBJ_TYPE_SERVER:
+ src = __objt_server(s->target)->id;
+ break;
+ case OBJ_TYPE_APPLET:
+ src = __objt_applet(s->target)->name;
+ break;
+ case OBJ_TYPE_CHECK:
+ src = (__objt_check(sess->origin)->server
+ ? __objt_check(sess->origin)->server->id
+ : "<NOSRV>");
+ break;
+ default:
+ src = "<NOSRV>";
+ break;
+ }
+ ret = lf_text(tmplog, src, dst + maxsize - tmplog, tmp);
+ if (ret == NULL)
+ goto out;
+ tmplog = ret;
+ last_isspace = 0;
+ break;
+
+ case LOG_FMT_Th: // %Th = handshake time
+ ret = ltoa_o(logs->t_handshake, tmplog, dst + maxsize - tmplog);
+ if (ret == NULL)
+ goto out;
+ tmplog = ret;
+ last_isspace = 0;
+ break;
+
+ case LOG_FMT_Ti: // %Ti = HTTP idle time
+ ret = ltoa_o(logs->t_idle, tmplog, dst + maxsize - tmplog);
+ if (ret == NULL)
+ goto out;
+ tmplog = ret;
+ last_isspace = 0;
+ break;
+
+ case LOG_FMT_TR: // %TR = HTTP request time
+ ret = ltoa_o((t_request >= 0) ? t_request - logs->t_idle - logs->t_handshake : -1,
+ tmplog, dst + maxsize - tmplog);
+ if (ret == NULL)
+ goto out;
+ tmplog = ret;
+ last_isspace = 0;
+ break;
+
+ case LOG_FMT_TQ: // %Tq = Th + Ti + TR
+ ret = ltoa_o(t_request, tmplog, dst + maxsize - tmplog);
+ if (ret == NULL)
+ goto out;
+ tmplog = ret;
+ last_isspace = 0;
+ break;
+
+ case LOG_FMT_TW: // %Tw
+ ret = ltoa_o((logs->t_queue >= 0) ? logs->t_queue - t_request : -1,
+ tmplog, dst + maxsize - tmplog);
+ if (ret == NULL)
+ goto out;
+ tmplog = ret;
+ last_isspace = 0;
+ break;
+
+ case LOG_FMT_TC: // %Tc
+ ret = ltoa_o((logs->t_connect >= 0) ? logs->t_connect - logs->t_queue : -1,
+ tmplog, dst + maxsize - tmplog);
+ if (ret == NULL)
+ goto out;
+ tmplog = ret;
+ last_isspace = 0;
+ break;
+
+ case LOG_FMT_Tr: // %Tr
+ ret = ltoa_o((logs->t_data >= 0) ? logs->t_data - logs->t_connect : -1,
+ tmplog, dst + maxsize - tmplog);
+ if (ret == NULL)
+ goto out;
+ tmplog = ret;
+ last_isspace = 0;
+ break;
+
+ case LOG_FMT_TD: // %Td
+ if (be->mode == PR_MODE_HTTP)
+ ret = ltoa_o((logs->t_data >= 0) ? logs->t_close - logs->t_data : -1,
+ tmplog, dst + maxsize - tmplog);
+ else
+ ret = ltoa_o((logs->t_connect >= 0) ? logs->t_close - logs->t_connect : -1,
+ tmplog, dst + maxsize - tmplog);
+ if (ret == NULL)
+ goto out;
+ tmplog = ret;
+ last_isspace = 0;
+ break;
+
+ case LOG_FMT_Ta: // %Ta = active time = Tt - Th - Ti
+ if (!(fe->to_log & LW_BYTES))
+ LOGCHAR('+');
+ ret = ltoa_o(logs->t_close - (logs->t_idle >= 0 ? logs->t_idle + logs->t_handshake : 0),
+ tmplog, dst + maxsize - tmplog);
+ if (ret == NULL)
+ goto out;
+ tmplog = ret;
+ last_isspace = 0;
+ break;
+
+ case LOG_FMT_TT: // %Tt = total time
+ if (!(fe->to_log & LW_BYTES))
+ LOGCHAR('+');
+ ret = ltoa_o(logs->t_close, tmplog, dst + maxsize - tmplog);
+ if (ret == NULL)
+ goto out;
+ tmplog = ret;
+ last_isspace = 0;
+ break;
+
+ case LOG_FMT_TU: // %Tu = total time seen by user = Tt - Ti
+ if (!(fe->to_log & LW_BYTES))
+ LOGCHAR('+');
+ ret = ltoa_o(logs->t_close - (logs->t_idle >= 0 ? logs->t_idle : 0),
+ tmplog, dst + maxsize - tmplog);
+ if (ret == NULL)
+ goto out;
+ tmplog = ret;
+ last_isspace = 0;
+ break;
+
+ case LOG_FMT_STATUS: // %ST
+ ret = ltoa_o(status, tmplog, dst + maxsize - tmplog);
+ if (ret == NULL)
+ goto out;
+ tmplog = ret;
+ last_isspace = 0;
+ break;
+
+ case LOG_FMT_BYTES: // %B
+ if (!(fe->to_log & LW_BYTES))
+ LOGCHAR('+');
+ ret = lltoa(logs->bytes_out, tmplog, dst + maxsize - tmplog);
+ if (ret == NULL)
+ goto out;
+ tmplog = ret;
+ last_isspace = 0;
+ break;
+
+ case LOG_FMT_BYTES_UP: // %U
+ ret = lltoa(logs->bytes_in, tmplog, dst + maxsize - tmplog);
+ if (ret == NULL)
+ goto out;
+ tmplog = ret;
+ last_isspace = 0;
+ break;
+
+ case LOG_FMT_CCLIENT: // %CC
+ src = txn ? txn->cli_cookie : NULL;
+ ret = lf_text(tmplog, src, dst + maxsize - tmplog, tmp);
+ if (ret == NULL)
+ goto out;
+ tmplog = ret;
+ last_isspace = 0;
+ break;
+
+ case LOG_FMT_CSERVER: // %CS
+ src = txn ? txn->srv_cookie : NULL;
+ ret = lf_text(tmplog, src, dst + maxsize - tmplog, tmp);
+ if (ret == NULL)
+ goto out;
+ tmplog = ret;
+ last_isspace = 0;
+ break;
+
+ case LOG_FMT_TERMSTATE: // %ts
+ LOGCHAR(sess_term_cond[(s_flags & SF_ERR_MASK) >> SF_ERR_SHIFT]);
+ LOGCHAR(sess_fin_state[(s_flags & SF_FINST_MASK) >> SF_FINST_SHIFT]);
+ *tmplog = '\0';
+ last_isspace = 0;
+ break;
+
+ case LOG_FMT_TERMSTATE_CK: // %tsc, same as TS with cookie state (for mode HTTP)
+ LOGCHAR(sess_term_cond[(s_flags & SF_ERR_MASK) >> SF_ERR_SHIFT]);
+ LOGCHAR(sess_fin_state[(s_flags & SF_FINST_MASK) >> SF_FINST_SHIFT]);
+ LOGCHAR((txn && (be->ck_opts & PR_CK_ANY)) ? sess_cookie[(txn->flags & TX_CK_MASK) >> TX_CK_SHIFT] : '-');
+ LOGCHAR((txn && (be->ck_opts & PR_CK_ANY)) ? sess_set_cookie[(txn->flags & TX_SCK_MASK) >> TX_SCK_SHIFT] : '-');
+ last_isspace = 0;
+ break;
+
+ case LOG_FMT_ACTCONN: // %ac
+ ret = ltoa_o(actconn, tmplog, dst + maxsize - tmplog);
+ if (ret == NULL)
+ goto out;
+ tmplog = ret;
+ last_isspace = 0;
+ break;
+
+ case LOG_FMT_FECONN: // %fc
+ ret = ltoa_o(fe->feconn, tmplog, dst + maxsize - tmplog);
+ if (ret == NULL)
+ goto out;
+ tmplog = ret;
+ last_isspace = 0;
+ break;
+
+ case LOG_FMT_BECONN: // %bc
+ ret = ltoa_o(be->beconn, tmplog, dst + maxsize - tmplog);
+ if (ret == NULL)
+ goto out;
+ tmplog = ret;
+ last_isspace = 0;
+ break;
+
+ case LOG_FMT_SRVCONN: // %sc
+ switch (obj_type(s ? s->target : sess->origin)) {
+ case OBJ_TYPE_SERVER:
+ ret = ultoa_o(__objt_server(s->target)->cur_sess,
+ tmplog, dst + maxsize - tmplog);
+ break;
+ case OBJ_TYPE_CHECK:
+ ret = ultoa_o(__objt_check(sess->origin)->server
+ ? __objt_check(sess->origin)->server->cur_sess
+ : 0, tmplog, dst + maxsize - tmplog);
+ break;
+ default:
+ ret = ultoa_o(0, tmplog, dst + maxsize - tmplog);
+ break;
+ }
+
+ if (ret == NULL)
+ goto out;
+ tmplog = ret;
+ last_isspace = 0;
+ break;
+
+ case LOG_FMT_RETRIES: // %rc
+ if (s_flags & SF_REDISP)
+ LOGCHAR('+');
+ ret = ltoa_o((s ? s->conn_retries : 0), tmplog, dst + maxsize - tmplog);
+ if (ret == NULL)
+ goto out;
+ tmplog = ret;
+ last_isspace = 0;
+ break;
+
+ case LOG_FMT_SRVQUEUE: // %sq
+ ret = ltoa_o(logs->srv_queue_pos, tmplog, dst + maxsize - tmplog);
+ if (ret == NULL)
+ goto out;
+ tmplog = ret;
+ last_isspace = 0;
+ break;
+
+ case LOG_FMT_BCKQUEUE: // %bq
+ ret = ltoa_o(logs->prx_queue_pos, tmplog, dst + maxsize - tmplog);
+ if (ret == NULL)
+ goto out;
+ tmplog = ret;
+ last_isspace = 0;
+ break;
+
+ case LOG_FMT_HDRREQUEST: // %hr
+ /* request header */
+ if (fe->nb_req_cap && s && s->req_cap) {
+ if (tmp->options & LOG_OPT_QUOTE)
+ LOGCHAR('"');
+ LOGCHAR('{');
+ for (hdr = 0; hdr < fe->nb_req_cap; hdr++) {
+ if (hdr)
+ LOGCHAR('|');
+ if (s->req_cap[hdr] != NULL) {
+ ret = lf_encode_string(tmplog, dst + maxsize,
+ '#', hdr_encode_map, s->req_cap[hdr], tmp);
+ if (ret == NULL || *ret != '\0')
+ goto out;
+ tmplog = ret;
+ }
+ }
+ LOGCHAR('}');
+ if (tmp->options & LOG_OPT_QUOTE)
+ LOGCHAR('"');
+ last_isspace = 0;
+ }
+ break;
+
+ case LOG_FMT_HDRREQUESTLIST: // %hrl
+ /* request header list */
+ if (fe->nb_req_cap && s && s->req_cap) {
+ for (hdr = 0; hdr < fe->nb_req_cap; hdr++) {
+ if (hdr > 0)
+ LOGCHAR(' ');
+ if (tmp->options & LOG_OPT_QUOTE)
+ LOGCHAR('"');
+ if (s->req_cap[hdr] != NULL) {
+ ret = lf_encode_string(tmplog, dst + maxsize,
+ '#', hdr_encode_map, s->req_cap[hdr], tmp);
+ if (ret == NULL || *ret != '\0')
+ goto out;
+ tmplog = ret;
+ } else if (!(tmp->options & LOG_OPT_QUOTE))
+ LOGCHAR('-');
+ if (tmp->options & LOG_OPT_QUOTE)
+ LOGCHAR('"');
+ last_isspace = 0;
+ }
+ }
+ break;
+
+
+ case LOG_FMT_HDRRESPONS: // %hs
+ /* response header */
+ if (fe->nb_rsp_cap && s && s->res_cap) {
+ if (tmp->options & LOG_OPT_QUOTE)
+ LOGCHAR('"');
+ LOGCHAR('{');
+ for (hdr = 0; hdr < fe->nb_rsp_cap; hdr++) {
+ if (hdr)
+ LOGCHAR('|');
+ if (s->res_cap[hdr] != NULL) {
+ ret = lf_encode_string(tmplog, dst + maxsize,
+ '#', hdr_encode_map, s->res_cap[hdr], tmp);
+ if (ret == NULL || *ret != '\0')
+ goto out;
+ tmplog = ret;
+ }
+ }
+ LOGCHAR('}');
+ last_isspace = 0;
+ if (tmp->options & LOG_OPT_QUOTE)
+ LOGCHAR('"');
+ }
+ break;
+
+ case LOG_FMT_HDRRESPONSLIST: // %hsl
+ /* response header list */
+ if (fe->nb_rsp_cap && s && s->res_cap) {
+ for (hdr = 0; hdr < fe->nb_rsp_cap; hdr++) {
+ if (hdr > 0)
+ LOGCHAR(' ');
+ if (tmp->options & LOG_OPT_QUOTE)
+ LOGCHAR('"');
+ if (s->res_cap[hdr] != NULL) {
+ ret = lf_encode_string(tmplog, dst + maxsize,
+ '#', hdr_encode_map, s->res_cap[hdr], tmp);
+ if (ret == NULL || *ret != '\0')
+ goto out;
+ tmplog = ret;
+ } else if (!(tmp->options & LOG_OPT_QUOTE))
+ LOGCHAR('-');
+ if (tmp->options & LOG_OPT_QUOTE)
+ LOGCHAR('"');
+ last_isspace = 0;
+ }
+ }
+ break;
+
+ case LOG_FMT_REQ: // %r
+ /* Request */
+ if (tmp->options & LOG_OPT_QUOTE)
+ LOGCHAR('"');
+ uri = txn && txn->uri ? txn->uri : "<BADREQ>";
+ ret = lf_encode_string(tmplog, dst + maxsize,
+ '#', url_encode_map, uri, tmp);
+ if (ret == NULL || *ret != '\0')
+ goto out;
+ tmplog = ret;
+ if (tmp->options & LOG_OPT_QUOTE)
+ LOGCHAR('"');
+ last_isspace = 0;
+ break;
+
+ case LOG_FMT_HTTP_PATH: // %HP
+ uri = txn && txn->uri ? txn->uri : "<BADREQ>";
+
+ if (tmp->options & LOG_OPT_QUOTE)
+ LOGCHAR('"');
+
+ end = uri + strlen(uri);
+ // look for the first whitespace character
+ while (uri < end && !HTTP_IS_SPHT(*uri))
+ uri++;
+
+ // keep advancing past multiple spaces
+ while (uri < end && HTTP_IS_SPHT(*uri)) {
+ uri++; nspaces++;
+ }
+
+ // look for first space or question mark after url
+ spc = uri;
+ while (spc < end && *spc != '?' && !HTTP_IS_SPHT(*spc))
+ spc++;
+
+ if (!txn || !txn->uri || nspaces == 0) {
+ chunk.area = "<BADREQ>";
+ chunk.data = strlen("<BADREQ>");
+ } else {
+ chunk.area = uri;
+ chunk.data = spc - uri;
+ }
+
+ ret = lf_encode_chunk(tmplog, dst + maxsize, '#', url_encode_map, &chunk, tmp);
+ if (ret == NULL || *ret != '\0')
+ goto out;
+
+ tmplog = ret;
+ if (tmp->options & LOG_OPT_QUOTE)
+ LOGCHAR('"');
+
+ last_isspace = 0;
+ break;
+
+ case LOG_FMT_HTTP_PATH_ONLY: // %HPO
+ uri = txn && txn->uri ? txn->uri : "<BADREQ>";
+
+ if (tmp->options & LOG_OPT_QUOTE)
+ LOGCHAR('"');
+
+ end = uri + strlen(uri);
+
+ // look for the first whitespace character
+ while (uri < end && !HTTP_IS_SPHT(*uri))
+ uri++;
+
+ // keep advancing past multiple spaces
+ while (uri < end && HTTP_IS_SPHT(*uri)) {
+ uri++; nspaces++;
+ }
+
+ // look for first space after url
+ spc = uri;
+ while (spc < end && !HTTP_IS_SPHT(*spc))
+ spc++;
+
+ path = ist2(uri, spc - uri);
+
+ // extract relative path without query params from url
+ parser = http_uri_parser_init(path);
+ path = iststop(http_parse_path(&parser), '?');
+ if (!txn || !txn->uri || nspaces == 0) {
+ chunk.area = "<BADREQ>";
+ chunk.data = strlen("<BADREQ>");
+ } else {
+ chunk.area = path.ptr;
+ chunk.data = path.len;
+ }
+
+ ret = lf_encode_chunk(tmplog, dst + maxsize, '#', url_encode_map, &chunk, tmp);
+ if (ret == NULL || *ret != '\0')
+ goto out;
+
+ tmplog = ret;
+ if (tmp->options & LOG_OPT_QUOTE)
+ LOGCHAR('"');
+
+ last_isspace = 0;
+ break;
+
+ case LOG_FMT_HTTP_QUERY: // %HQ
+ if (tmp->options & LOG_OPT_QUOTE)
+ LOGCHAR('"');
+
+ if (!txn || !txn->uri) {
+ chunk.area = "<BADREQ>";
+ chunk.data = strlen("<BADREQ>");
+ } else {
+ uri = txn->uri;
+ end = uri + strlen(uri);
+ // look for the first question mark
+ while (uri < end && *uri != '?')
+ uri++;
+
+ qmark = uri;
+ // look for first space or question mark after url
+ while (uri < end && !HTTP_IS_SPHT(*uri))
+ uri++;
+
+ chunk.area = qmark;
+ chunk.data = uri - qmark;
+ }
+
+ ret = lf_encode_chunk(tmplog, dst + maxsize, '#', url_encode_map, &chunk, tmp);
+ if (ret == NULL || *ret != '\0')
+ goto out;
+
+ tmplog = ret;
+ if (tmp->options & LOG_OPT_QUOTE)
+ LOGCHAR('"');
+
+ last_isspace = 0;
+ break;
+
+ case LOG_FMT_HTTP_URI: // %HU
+ uri = txn && txn->uri ? txn->uri : "<BADREQ>";
+
+ if (tmp->options & LOG_OPT_QUOTE)
+ LOGCHAR('"');
+
+ end = uri + strlen(uri);
+ // look for the first whitespace character
+ while (uri < end && !HTTP_IS_SPHT(*uri))
+ uri++;
+
+ // keep advancing past multiple spaces
+ while (uri < end && HTTP_IS_SPHT(*uri)) {
+ uri++; nspaces++;
+ }
+
+ // look for first space after url
+ spc = uri;
+ while (spc < end && !HTTP_IS_SPHT(*spc))
+ spc++;
+
+ if (!txn || !txn->uri || nspaces == 0) {
+ chunk.area = "<BADREQ>";
+ chunk.data = strlen("<BADREQ>");
+ } else {
+ chunk.area = uri;
+ chunk.data = spc - uri;
+ }
+
+ ret = lf_encode_chunk(tmplog, dst + maxsize, '#', url_encode_map, &chunk, tmp);
+ if (ret == NULL || *ret != '\0')
+ goto out;
+
+ tmplog = ret;
+ if (tmp->options & LOG_OPT_QUOTE)
+ LOGCHAR('"');
+
+ last_isspace = 0;
+ break;
+
+ case LOG_FMT_HTTP_METHOD: // %HM
+ uri = txn && txn->uri ? txn->uri : "<BADREQ>";
+ if (tmp->options & LOG_OPT_QUOTE)
+ LOGCHAR('"');
+
+ end = uri + strlen(uri);
+ // look for the first whitespace character
+ spc = uri;
+ while (spc < end && !HTTP_IS_SPHT(*spc))
+ spc++;
+
+ if (spc == end) { // odd case, we have txn->uri, but we only got a verb
+ chunk.area = "<BADREQ>";
+ chunk.data = strlen("<BADREQ>");
+ } else {
+ chunk.area = uri;
+ chunk.data = spc - uri;
+ }
+
+ ret = lf_encode_chunk(tmplog, dst + maxsize, '#', url_encode_map, &chunk, tmp);
+ if (ret == NULL || *ret != '\0')
+ goto out;
+
+ tmplog = ret;
+ if (tmp->options & LOG_OPT_QUOTE)
+ LOGCHAR('"');
+
+ last_isspace = 0;
+ break;
+
+ case LOG_FMT_HTTP_VERSION: // %HV
+ uri = txn && txn->uri ? txn->uri : "<BADREQ>";
+ if (tmp->options & LOG_OPT_QUOTE)
+ LOGCHAR('"');
+
+ end = uri + strlen(uri);
+ // look for the first whitespace character
+ while (uri < end && !HTTP_IS_SPHT(*uri))
+ uri++;
+
+ // keep advancing past multiple spaces
+ while (uri < end && HTTP_IS_SPHT(*uri)) {
+ uri++; nspaces++;
+ }
+
+ // look for the next whitespace character
+ while (uri < end && !HTTP_IS_SPHT(*uri))
+ uri++;
+
+ // keep advancing past multiple spaces
+ while (uri < end && HTTP_IS_SPHT(*uri))
+ uri++;
+
+ if (!txn || !txn->uri || nspaces == 0) {
+ chunk.area = "<BADREQ>";
+ chunk.data = strlen("<BADREQ>");
+ } else if (uri == end) {
+ chunk.area = "HTTP/0.9";
+ chunk.data = strlen("HTTP/0.9");
+ } else {
+ chunk.area = uri;
+ chunk.data = end - uri;
+ }
+
+ ret = lf_encode_chunk(tmplog, dst + maxsize, '#', url_encode_map, &chunk, tmp);
+ if (ret == NULL || *ret != '\0')
+ goto out;
+
+ tmplog = ret;
+ if (tmp->options & LOG_OPT_QUOTE)
+ LOGCHAR('"');
+
+ last_isspace = 0;
+ break;
+
+ case LOG_FMT_COUNTER: // %rt
+ if (tmp->options & LOG_OPT_HEXA) {
+ iret = snprintf(tmplog, dst + maxsize - tmplog, "%04X", uniq_id);
+ if (iret < 0 || iret > dst + maxsize - tmplog)
+ goto out;
+ last_isspace = 0;
+ tmplog += iret;
+ } else {
+ ret = ltoa_o(uniq_id, tmplog, dst + maxsize - tmplog);
+ if (ret == NULL)
+ goto out;
+ tmplog = ret;
+ last_isspace = 0;
+ }
+ break;
+
+ case LOG_FMT_LOGCNT: // %lc
+ if (tmp->options & LOG_OPT_HEXA) {
+ iret = snprintf(tmplog, dst + maxsize - tmplog, "%04X", fe->log_count);
+ if (iret < 0 || iret > dst + maxsize - tmplog)
+ goto out;
+ last_isspace = 0;
+ tmplog += iret;
+ } else {
+ ret = ultoa_o(fe->log_count, tmplog, dst + maxsize - tmplog);
+ if (ret == NULL)
+ goto out;
+ tmplog = ret;
+ last_isspace = 0;
+ }
+ break;
+
+ case LOG_FMT_HOSTNAME: // %H
+ src = hostname;
+ ret = lf_text(tmplog, src, dst + maxsize - tmplog, tmp);
+ if (ret == NULL)
+ goto out;
+ tmplog = ret;
+ last_isspace = 0;
+ break;
+
+ case LOG_FMT_PID: // %pid
+ if (tmp->options & LOG_OPT_HEXA) {
+ iret = snprintf(tmplog, dst + maxsize - tmplog, "%04X", pid);
+ if (iret < 0 || iret > dst + maxsize - tmplog)
+ goto out;
+ last_isspace = 0;
+ tmplog += iret;
+ } else {
+ ret = ltoa_o(pid, tmplog, dst + maxsize - tmplog);
+ if (ret == NULL)
+ goto out;
+ tmplog = ret;
+ last_isspace = 0;
+ }
+ break;
+
+ case LOG_FMT_UNIQUEID: // %ID
+ ret = NULL;
+ if (s)
+ ret = lf_text_len(tmplog, s->unique_id.ptr, s->unique_id.len, maxsize - (tmplog - dst), tmp);
+ else
+ ret = lf_text_len(tmplog, NULL, 0, maxsize - (tmplog - dst), tmp);
+ if (ret == NULL)
+ goto out;
+ tmplog = ret;
+ last_isspace = 0;
+ break;
+
+ }
+ }
+
+out:
+ /* *tmplog is a unused character */
+ *tmplog = '\0';
+ return tmplog - dst;
+
+}
+
+/*
+ * send a log for the stream when we have enough info about it.
+ * Will not log if the frontend has no log defined.
+ */
+void strm_log(struct stream *s)
+{
+ struct session *sess = s->sess;
+ int size, err, level;
+ int sd_size = 0;
+
+ /* if we don't want to log normal traffic, return now */
+ err = (s->flags & SF_REDISP) ||
+ ((s->flags & SF_ERR_MASK) > SF_ERR_LOCAL) ||
+ (((s->flags & SF_ERR_MASK) == SF_ERR_NONE) && s->conn_retries) ||
+ ((sess->fe->mode == PR_MODE_HTTP) && s->txn && s->txn->status >= 500);
+
+ if (!err && (sess->fe->options2 & PR_O2_NOLOGNORM))
+ return;
+
+ if (LIST_ISEMPTY(&sess->fe->loggers))
+ return;
+
+ if (s->logs.level) { /* loglevel was overridden */
+ if (s->logs.level == -1) {
+ s->logs.logwait = 0; /* logs disabled */
+ return;
+ }
+ level = s->logs.level - 1;
+ }
+ else {
+ level = LOG_INFO;
+ if (err && (sess->fe->options2 & PR_O2_LOGERRORS))
+ level = LOG_ERR;
+ }
+
+ /* if unique-id was not generated */
+ if (!isttest(s->unique_id) && !LIST_ISEMPTY(&sess->fe->format_unique_id)) {
+ stream_generate_unique_id(s, &sess->fe->format_unique_id);
+ }
+
+ if (!LIST_ISEMPTY(&sess->fe->logformat_sd)) {
+ sd_size = build_logline(s, logline_rfc5424, global.max_syslog_len,
+ &sess->fe->logformat_sd);
+ }
+
+ size = build_logline(s, logline, global.max_syslog_len, &sess->fe->logformat);
+ if (size > 0) {
+ _HA_ATOMIC_INC(&sess->fe->log_count);
+ __send_log(&sess->fe->loggers, &sess->fe->log_tag, level,
+ logline, size + 1, logline_rfc5424, sd_size);
+ s->logs.logwait = 0;
+ }
+}
+
+/*
+ * send a minimalist log for the session. Will not log if the frontend has no
+ * log defined. It is assumed that this is only used to report anomalies that
+ * cannot lead to the creation of a regular stream. Because of this the log
+ * level is LOG_INFO or LOG_ERR depending on the "log-separate-error" setting
+ * in the frontend. The caller must simply know that it should not call this
+ * function to report unimportant events. It is safe to call this function with
+ * sess==NULL (will not do anything).
+ */
+void sess_log(struct session *sess)
+{
+ int size, level;
+ int sd_size = 0;
+
+ if (!sess)
+ return;
+
+ if (LIST_ISEMPTY(&sess->fe->loggers))
+ return;
+
+ level = LOG_INFO;
+ if (sess->fe->options2 & PR_O2_LOGERRORS)
+ level = LOG_ERR;
+
+ if (!LIST_ISEMPTY(&sess->fe->logformat_sd)) {
+ sd_size = sess_build_logline(sess, NULL,
+ logline_rfc5424, global.max_syslog_len,
+ &sess->fe->logformat_sd);
+ }
+
+ if (!LIST_ISEMPTY(&sess->fe->logformat_error))
+ size = sess_build_logline(sess, NULL, logline, global.max_syslog_len, &sess->fe->logformat_error);
+ else
+ size = sess_build_logline(sess, NULL, logline, global.max_syslog_len, &sess->fe->logformat);
+ if (size > 0) {
+ _HA_ATOMIC_INC(&sess->fe->log_count);
+ __send_log(&sess->fe->loggers, &sess->fe->log_tag, level,
+ logline, size + 1, logline_rfc5424, sd_size);
+ }
+}
+
+void app_log(struct list *loggers, struct buffer *tag, int level, const char *format, ...)
+{
+ va_list argp;
+ int data_len;
+
+ if (level < 0 || format == NULL || logline == NULL)
+ return;
+
+ va_start(argp, format);
+ data_len = vsnprintf(logline, global.max_syslog_len, format, argp);
+ if (data_len < 0 || data_len > global.max_syslog_len)
+ data_len = global.max_syslog_len;
+ va_end(argp);
+
+ __send_log(loggers, tag, level, logline, data_len, default_rfc5424_sd_log_format, 2);
+}
+/*
+ * This function parse a received log message <buf>, of size <buflen>
+ * it fills <level>, <facility> and <metadata> depending of the detected
+ * header format and message will point on remaining payload of <size>
+ *
+ * <metadata> must point on a preallocated array of LOG_META_FIELDS*sizeof(struct ist)
+ * struct ist len will be set to 0 if field is not found
+ * <level> and <facility> will be set to -1 if not found.
+ */
+void parse_log_message(char *buf, size_t buflen, int *level, int *facility,
+ struct ist *metadata, char **message, size_t *size)
+{
+
+ char *p;
+ int fac_level = 0;
+
+ *level = *facility = -1;
+
+ *message = buf;
+ *size = buflen;
+
+ memset(metadata, 0, LOG_META_FIELDS*sizeof(struct ist));
+
+ p = buf;
+ if (*size < 2 || *p != '<')
+ return;
+
+ p++;
+ while (*p != '>') {
+ if (*p > '9' || *p < '0')
+ return;
+ fac_level = 10*fac_level + (*p - '0');
+ p++;
+ if ((p - buf) > buflen)
+ return;
+ }
+
+ *facility = fac_level >> 3;
+ *level = fac_level & 0x7;
+ p++;
+
+ metadata[LOG_META_PRIO] = ist2(buf, p - buf);
+
+ buflen -= p - buf;
+ buf = p;
+
+ *size = buflen;
+ *message = buf;
+
+ /* for rfc5424, prio is always followed by '1' and ' ' */
+ if ((*size > 2) && (p[0] == '1') && (p[1] == ' ')) {
+ /* format is always '1 TIMESTAMP HOSTNAME TAG PID MSGID STDATA '
+ * followed by message.
+ * Each header field can present NILVALUE: '-'
+ */
+
+ p += 2;
+ *size -= 2;
+ /* timestamp is NILVALUE '-' */
+ if (*size > 2 && (p[0] == '-') && p[1] == ' ') {
+ metadata[LOG_META_TIME] = ist2(p, 1);
+ p++;
+ }
+ else if (*size > LOG_ISOTIME_MINLEN) {
+ metadata[LOG_META_TIME].ptr = p;
+
+ /* check if optional secfrac is present
+ * in timestamp.
+ * possible format are:
+ * ex: '1970-01-01T00:00:00.000000Z'
+ * '1970-01-01T00:00:00.000000+00:00'
+ * '1970-01-01T00:00:00.000000-00:00'
+ * '1970-01-01T00:00:00Z'
+ * '1970-01-01T00:00:00+00:00'
+ * '1970-01-01T00:00:00-00:00'
+ */
+ p += 19;
+ if (*p == '.') {
+ p++;
+ if ((p - buf) >= buflen)
+ goto bad_format;
+ while (*p != 'Z' && *p != '+' && *p != '-') {
+ if ((unsigned char)(*p - '0') > 9)
+ goto bad_format;
+
+ p++;
+ if ((p - buf) >= buflen)
+ goto bad_format;
+ }
+ }
+
+ if (*p == 'Z')
+ p++;
+ else
+ p += 6; /* case of '+00:00 or '-00:00' */
+
+ if ((p - buf) >= buflen || *p != ' ')
+ goto bad_format;
+ metadata[LOG_META_TIME].len = p - metadata[LOG_META_TIME].ptr;
+ }
+ else
+ goto bad_format;
+
+
+ p++;
+ if ((p - buf) >= buflen || *p == ' ')
+ goto bad_format;
+
+ metadata[LOG_META_HOST].ptr = p;
+ while (*p != ' ') {
+ p++;
+ if ((p - buf) >= buflen)
+ goto bad_format;
+ }
+ metadata[LOG_META_HOST].len = p - metadata[LOG_META_HOST].ptr;
+ if (metadata[LOG_META_HOST].len == 1 && metadata[LOG_META_HOST].ptr[0] == '-')
+ metadata[LOG_META_HOST].len = 0;
+
+ p++;
+ if ((p - buf) >= buflen || *p == ' ')
+ goto bad_format;
+
+ metadata[LOG_META_TAG].ptr = p;
+ while (*p != ' ') {
+ p++;
+ if ((p - buf) >= buflen)
+ goto bad_format;
+ }
+ metadata[LOG_META_TAG].len = p - metadata[LOG_META_TAG].ptr;
+ if (metadata[LOG_META_TAG].len == 1 && metadata[LOG_META_TAG].ptr[0] == '-')
+ metadata[LOG_META_TAG].len = 0;
+
+ p++;
+ if ((p - buf) >= buflen || *p == ' ')
+ goto bad_format;
+
+ metadata[LOG_META_PID].ptr = p;
+ while (*p != ' ') {
+ p++;
+ if ((p - buf) >= buflen)
+ goto bad_format;
+ }
+ metadata[LOG_META_PID].len = p - metadata[LOG_META_PID].ptr;
+ if (metadata[LOG_META_PID].len == 1 && metadata[LOG_META_PID].ptr[0] == '-')
+ metadata[LOG_META_PID].len = 0;
+
+ p++;
+ if ((p - buf) >= buflen || *p == ' ')
+ goto bad_format;
+
+ metadata[LOG_META_MSGID].ptr = p;
+ while (*p != ' ') {
+ p++;
+ if ((p - buf) >= buflen)
+ goto bad_format;
+ }
+ metadata[LOG_META_MSGID].len = p - metadata[LOG_META_MSGID].ptr;
+ if (metadata[LOG_META_MSGID].len == 1 && metadata[LOG_META_MSGID].ptr[0] == '-')
+ metadata[LOG_META_MSGID].len = 0;
+
+ p++;
+ if ((p - buf) >= buflen || *p == ' ')
+ goto bad_format;
+
+ /* structured data format is:
+ * ex:
+ * '[key1=value1 key2=value2][key3=value3]'
+ *
+ * space is invalid outside [] because
+ * considered as the end of structured data field
+ */
+ metadata[LOG_META_STDATA].ptr = p;
+ if (*p == '[') {
+ int elem = 0;
+
+ while (1) {
+ if (elem) {
+ /* according to rfc this char is escaped in param values */
+ if (*p == ']' && *(p-1) != '\\')
+ elem = 0;
+ }
+ else {
+ if (*p == '[')
+ elem = 1;
+ else if (*p == ' ')
+ break;
+ else
+ goto bad_format;
+ }
+ p++;
+ if ((p - buf) >= buflen)
+ goto bad_format;
+ }
+ }
+ else if (*p == '-') {
+ /* case of NILVALUE */
+ p++;
+ if ((p - buf) >= buflen || *p != ' ')
+ goto bad_format;
+ }
+ else
+ goto bad_format;
+
+ metadata[LOG_META_STDATA].len = p - metadata[LOG_META_STDATA].ptr;
+ if (metadata[LOG_META_STDATA].len == 1 && metadata[LOG_META_STDATA].ptr[0] == '-')
+ metadata[LOG_META_STDATA].len = 0;
+
+ p++;
+
+ buflen -= p - buf;
+ buf = p;
+
+ *size = buflen;
+ *message = p;
+ }
+ else if (*size > LOG_LEGACYTIME_LEN) {
+ int m;
+
+ /* supported header format according to rfc3164.
+ * ex:
+ * 'Jan 1 00:00:00 HOSTNAME TAG[PID]: '
+ * or 'Jan 1 00:00:00 HOSTNAME TAG: '
+ * or 'Jan 1 00:00:00 HOSTNAME '
+ * Note: HOSTNAME is mandatory, and day
+ * of month uses a single space prefix if
+ * less than 10 to ensure hour offset is
+ * always the same.
+ */
+
+ /* Check month to see if it correspond to a rfc3164
+ * header ex 'Jan 1 00:00:00' */
+ for (m = 0; m < 12; m++)
+ if (!memcmp(monthname[m], p, 3))
+ break;
+ /* Month not found */
+ if (m == 12)
+ goto bad_format;
+
+ metadata[LOG_META_TIME] = ist2(p, LOG_LEGACYTIME_LEN);
+
+ p += LOG_LEGACYTIME_LEN;
+ if ((p - buf) >= buflen || *p != ' ')
+ goto bad_format;
+
+ p++;
+ if ((p - buf) >= buflen || *p == ' ')
+ goto bad_format;
+
+ metadata[LOG_META_HOST].ptr = p;
+ while (*p != ' ') {
+ p++;
+ if ((p - buf) >= buflen)
+ goto bad_format;
+ }
+ metadata[LOG_META_HOST].len = p - metadata[LOG_META_HOST].ptr;
+
+ /* TAG seems to no be mandatory */
+ p++;
+
+ buflen -= p - buf;
+ buf = p;
+
+ *size = buflen;
+ *message = buf;
+
+ if (!buflen)
+ return;
+
+ while (((p - buf) < buflen) && *p != ' ' && *p != ':')
+ p++;
+
+ /* a tag must present a trailing ':' */
+ if (((p - buf) >= buflen) || *p != ':')
+ return;
+ p++;
+ /* followed by a space */
+ if (((p - buf) >= buflen) || *p != ' ')
+ return;
+
+ /* rewind to parse tag and pid */
+ p = buf;
+ metadata[LOG_META_TAG].ptr = p;
+ /* we have the guarantee that ':' will be reach before size limit */
+ while (*p != ':') {
+ if (*p == '[') {
+ metadata[LOG_META_TAG].len = p - metadata[LOG_META_TAG].ptr;
+ metadata[LOG_META_PID].ptr = p + 1;
+ }
+ else if (*p == ']' && isttest(metadata[LOG_META_PID])) {
+ if (p[1] != ':')
+ return;
+ metadata[LOG_META_PID].len = p - metadata[LOG_META_PID].ptr;
+ }
+ p++;
+ }
+ if (!metadata[LOG_META_TAG].len)
+ metadata[LOG_META_TAG].len = p - metadata[LOG_META_TAG].ptr;
+
+ /* let pass ':' and ' ', we still have warranty size is large enough */
+ p += 2;
+
+ buflen -= p - buf;
+ buf = p;
+
+ *size = buflen;
+ *message = buf;
+ }
+
+ return;
+
+bad_format:
+ /* bad syslog format, we reset all parsed syslog fields
+ * but priority is kept because we are able to re-build
+ * this message using LOF_FORMAT_PRIO.
+ */
+ metadata[LOG_META_TIME].len = 0;
+ metadata[LOG_META_HOST].len = 0;
+ metadata[LOG_META_TAG].len = 0;
+ metadata[LOG_META_PID].len = 0;
+ metadata[LOG_META_MSGID].len = 0;
+ metadata[LOG_META_STDATA].len = 0;
+
+ return;
+}
+
+/*
+ * UDP syslog fd handler
+ */
+void syslog_fd_handler(int fd)
+{
+ static THREAD_LOCAL struct ist metadata[LOG_META_FIELDS];
+ ssize_t ret = 0;
+ struct buffer *buf = get_trash_chunk();
+ size_t size;
+ char *message;
+ int level;
+ int facility;
+ struct listener *l = objt_listener(fdtab[fd].owner);
+ int max_accept;
+
+ BUG_ON(!l);
+
+ if (fdtab[fd].state & FD_POLL_IN) {
+
+ if (!fd_recv_ready(fd))
+ return;
+
+ max_accept = l->bind_conf->maxaccept ? l->bind_conf->maxaccept : 1;
+
+ do {
+ /* Source address */
+ struct sockaddr_storage saddr = {0};
+ socklen_t saddrlen;
+
+ saddrlen = sizeof(saddr);
+
+ ret = recvfrom(fd, buf->area, buf->size, 0, (struct sockaddr *)&saddr, &saddrlen);
+ if (ret < 0) {
+ if (errno == EINTR)
+ continue;
+ if (errno == EAGAIN || errno == EWOULDBLOCK)
+ fd_cant_recv(fd);
+ goto out;
+ }
+ buf->data = ret;
+
+ /* update counters */
+ _HA_ATOMIC_INC(&cum_log_messages);
+ proxy_inc_fe_req_ctr(l, l->bind_conf->frontend, 0);
+
+ parse_log_message(buf->area, buf->data, &level, &facility, metadata, &message, &size);
+
+ process_send_log(&l->bind_conf->frontend->loggers, level, facility, metadata, message, size);
+
+ } while (--max_accept);
+ }
+
+out:
+ return;
+}
+
+/*
+ * IO Handler to handle message exchange with a syslog tcp client
+ */
+static void syslog_io_handler(struct appctx *appctx)
+{
+ static THREAD_LOCAL struct ist metadata[LOG_META_FIELDS];
+ struct stconn *sc = appctx_sc(appctx);
+ struct stream *s = __sc_strm(sc);
+ struct proxy *frontend = strm_fe(s);
+ struct listener *l = strm_li(s);
+ struct buffer *buf = get_trash_chunk();
+ int max_accept;
+ int to_skip;
+ int facility;
+ int level;
+ char *message;
+ size_t size;
+
+ if (unlikely(se_fl_test(appctx->sedesc, (SE_FL_EOS|SE_FL_ERROR|SE_FL_SHR|SE_FL_SHW)))) {
+ co_skip(sc_oc(sc), co_data(sc_oc(sc)));
+ goto out;
+ }
+
+ max_accept = l->bind_conf->maxaccept ? l->bind_conf->maxaccept : 1;
+ while (1) {
+ char c;
+
+ if (max_accept <= 0)
+ goto missing_budget;
+ max_accept--;
+
+ to_skip = co_getchar(sc_oc(sc), &c);
+ if (!to_skip)
+ goto missing_data;
+ else if (to_skip < 0)
+ goto cli_abort;
+
+ if (c == '<') {
+ /* rfc-6587, Non-Transparent-Framing: messages separated by
+ * a trailing LF or CR LF
+ */
+ to_skip = co_getline(sc_oc(sc), buf->area, buf->size);
+ if (!to_skip)
+ goto missing_data;
+ else if (to_skip < 0)
+ goto cli_abort;
+
+ if (buf->area[to_skip - 1] != '\n')
+ goto parse_error;
+
+ buf->data = to_skip - 1;
+
+ /* according to rfc-6587, some devices adds CR before LF */
+ if (buf->data && buf->area[buf->data - 1] == '\r')
+ buf->data--;
+
+ }
+ else if ((unsigned char)(c - '1') <= 8) {
+ /* rfc-6587, Octet-Counting: message length in ASCII
+ * (first digit can not be ZERO), followed by a space
+ * and message length
+ */
+ char *p = NULL;
+ int msglen;
+
+ to_skip = co_getword(sc_oc(sc), buf->area, buf->size, ' ');
+ if (!to_skip)
+ goto missing_data;
+ else if (to_skip < 0)
+ goto cli_abort;
+
+ if (buf->area[to_skip - 1] != ' ')
+ goto parse_error;
+
+ msglen = strtol(buf->area, &p, 10);
+ if (!msglen || p != &buf->area[to_skip - 1])
+ goto parse_error;
+
+ /* message seems too large */
+ if (msglen > buf->size)
+ goto parse_error;
+
+ msglen = co_getblk(sc_oc(sc), buf->area, msglen, to_skip);
+ if (!msglen)
+ goto missing_data;
+ else if (msglen < 0)
+ goto cli_abort;
+
+
+ buf->data = msglen;
+ to_skip += msglen;
+ }
+ else
+ goto parse_error;
+
+ co_skip(sc_oc(sc), to_skip);
+
+ /* update counters */
+ _HA_ATOMIC_INC(&cum_log_messages);
+ proxy_inc_fe_req_ctr(l, frontend, 0);
+
+ parse_log_message(buf->area, buf->data, &level, &facility, metadata, &message, &size);
+
+ process_send_log(&frontend->loggers, level, facility, metadata, message, size);
+
+ }
+
+missing_data:
+ /* we need more data to read */
+ applet_need_more_data(appctx);
+ return;
+
+missing_budget:
+ /* it may remain some stuff to do, let's retry later */
+ appctx_wakeup(appctx);
+ return;
+
+parse_error:
+ if (l->counters)
+ _HA_ATOMIC_INC(&l->counters->failed_req);
+ _HA_ATOMIC_INC(&frontend->fe_counters.failed_req);
+
+ goto error;
+
+cli_abort:
+ if (l->counters)
+ _HA_ATOMIC_INC(&l->counters->cli_aborts);
+ _HA_ATOMIC_INC(&frontend->fe_counters.cli_aborts);
+
+error:
+ se_fl_set(appctx->sedesc, SE_FL_ERROR);
+
+out:
+ return;
+}
+
+static struct applet syslog_applet = {
+ .obj_type = OBJ_TYPE_APPLET,
+ .name = "<SYSLOG>", /* used for logging */
+ .fct = syslog_io_handler,
+ .release = NULL,
+};
+
+/*
+ * Parse "log-forward" section and create corresponding sink buffer.
+ *
+ * The function returns 0 in success case, otherwise, it returns error
+ * flags.
+ */
+int cfg_parse_log_forward(const char *file, int linenum, char **args, int kwm)
+{
+ int err_code = ERR_NONE;
+ struct proxy *px;
+ char *errmsg = NULL;
+ const char *err = NULL;
+
+ if (strcmp(args[0], "log-forward") == 0) {
+ if (!*args[1]) {
+ ha_alert("parsing [%s:%d] : missing name for log-forward section.\n", file, linenum);
+ err_code |= ERR_ALERT | ERR_ABORT;
+ goto out;
+ }
+
+ if (alertif_too_many_args(1, file, linenum, args, &err_code))
+ goto out;
+
+ err = invalid_char(args[1]);
+ if (err) {
+ ha_alert("parsing [%s:%d] : character '%c' is not permitted in '%s' name '%s'.\n",
+ file, linenum, *err, args[0], args[1]);
+ err_code |= ERR_ALERT | ERR_ABORT;
+ goto out;
+ }
+
+ px = log_forward_by_name(args[1]);
+ if (px) {
+ ha_alert("Parsing [%s:%d]: log-forward section '%s' has the same name as another log-forward section declared at %s:%d.\n",
+ file, linenum, args[1], px->conf.file, px->conf.line);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+
+ px = proxy_find_by_name(args[1], 0, 0);
+ if (px) {
+ ha_alert("Parsing [%s:%d]: log forward section '%s' has the same name as %s '%s' declared at %s:%d.\n",
+ file, linenum, args[1], proxy_type_str(px),
+ px->id, px->conf.file, px->conf.line);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+
+ px = calloc(1, sizeof *px);
+ if (!px) {
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+
+ init_new_proxy(px);
+ px->next = cfg_log_forward;
+ cfg_log_forward = px;
+ px->conf.file = strdup(file);
+ px->conf.line = linenum;
+ px->mode = PR_MODE_SYSLOG;
+ px->last_change = ns_to_sec(now_ns);
+ px->cap = PR_CAP_FE;
+ px->maxconn = 10;
+ px->timeout.client = TICK_ETERNITY;
+ px->accept = frontend_accept;
+ px->default_target = &syslog_applet.obj_type;
+ px->id = strdup(args[1]);
+ }
+ else if (strcmp(args[0], "maxconn") == 0) { /* maxconn */
+ if (warnifnotcap(cfg_log_forward, PR_CAP_FE, file, linenum, args[0], " Maybe you want 'fullconn' instead ?"))
+ err_code |= ERR_WARN;
+
+ if (*(args[1]) == 0) {
+ ha_alert("parsing [%s:%d] : '%s' expects an integer argument.\n", file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ cfg_log_forward->maxconn = atol(args[1]);
+ if (alertif_too_many_args(1, file, linenum, args, &err_code))
+ goto out;
+ }
+ else if (strcmp(args[0], "backlog") == 0) { /* backlog */
+ if (warnifnotcap(cfg_log_forward, PR_CAP_FE, file, linenum, args[0], NULL))
+ err_code |= ERR_WARN;
+
+ if (*(args[1]) == 0) {
+ ha_alert("parsing [%s:%d] : '%s' expects an integer argument.\n", file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ cfg_log_forward->backlog = atol(args[1]);
+ if (alertif_too_many_args(1, file, linenum, args, &err_code))
+ goto out;
+ }
+ else if (strcmp(args[0], "bind") == 0) {
+ int cur_arg;
+ struct bind_conf *bind_conf;
+ struct listener *l;
+ int ret;
+
+ cur_arg = 1;
+
+ bind_conf = bind_conf_alloc(cfg_log_forward, file, linenum,
+ NULL, xprt_get(XPRT_RAW));
+ if (!bind_conf) {
+ ha_alert("parsing [%s:%d] : out of memory error.", file, linenum);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+
+ bind_conf->maxaccept = global.tune.maxaccept ? global.tune.maxaccept : MAX_ACCEPT;
+ bind_conf->accept = session_accept_fd;
+
+ if (!str2listener(args[1], cfg_log_forward, bind_conf, file, linenum, &errmsg)) {
+ if (errmsg && *errmsg) {
+ indent_msg(&errmsg, 2);
+ ha_alert("parsing [%s:%d] : '%s %s' : %s\n", file, linenum, args[0], args[1], errmsg);
+ }
+ else {
+ ha_alert("parsing [%s:%d] : '%s %s' : error encountered while parsing listening address %s.\n",
+ file, linenum, args[0], args[1], args[2]);
+ }
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ list_for_each_entry(l, &bind_conf->listeners, by_bind) {
+ global.maxsock++;
+ }
+ cur_arg++;
+
+ ret = bind_parse_args_list(bind_conf, args, cur_arg, cursection, file, linenum);
+ err_code |= ret;
+ if (ret != 0) {
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ }
+ else if (strcmp(args[0], "dgram-bind") == 0) {
+ int cur_arg;
+ struct bind_conf *bind_conf;
+ struct bind_kw *kw;
+ struct listener *l;
+
+ cur_arg = 1;
+
+ bind_conf = bind_conf_alloc(cfg_log_forward, file, linenum,
+ NULL, xprt_get(XPRT_RAW));
+ if (!bind_conf) {
+ ha_alert("parsing [%s:%d] : out of memory error.", file, linenum);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+
+ bind_conf->maxaccept = global.tune.maxaccept ? global.tune.maxaccept : MAX_ACCEPT;
+
+ if (!str2receiver(args[1], cfg_log_forward, bind_conf, file, linenum, &errmsg)) {
+ if (errmsg && *errmsg) {
+ indent_msg(&errmsg, 2);
+ ha_alert("parsing [%s:%d] : '%s %s' : %s\n", file, linenum, args[0], args[1], errmsg);
+ }
+ else {
+ ha_alert("parsing [%s:%d] : '%s %s' : error encountered while parsing listening address %s.\n",
+ file, linenum, args[0], args[1], args[2]);
+ }
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ list_for_each_entry(l, &bind_conf->listeners, by_bind) {
+ /* the fact that the sockets are of type dgram is guaranteed by str2receiver() */
+ l->rx.iocb = syslog_fd_handler;
+ global.maxsock++;
+ }
+ cur_arg++;
+
+ while (*args[cur_arg] && (kw = bind_find_kw(args[cur_arg]))) {
+ int ret;
+
+ ret = kw->parse(args, cur_arg, cfg_log_forward, bind_conf, &errmsg);
+ err_code |= ret;
+ if (ret) {
+ if (errmsg && *errmsg) {
+ indent_msg(&errmsg, 2);
+ ha_alert("parsing [%s:%d] : %s\n", file, linenum, errmsg);
+ }
+ else
+ ha_alert("parsing [%s:%d]: error encountered while processing '%s'\n",
+ file, linenum, args[cur_arg]);
+ if (ret & ERR_FATAL)
+ goto out;
+ }
+ cur_arg += 1 + kw->skip;
+ }
+ if (*args[cur_arg] != 0) {
+ const char *best = bind_find_best_kw(args[cur_arg]);
+ if (best)
+ ha_alert("parsing [%s:%d] : unknown keyword '%s' in '%s' section; did you mean '%s' maybe ?\n",
+ file, linenum, args[cur_arg], cursection, best);
+ else
+ ha_alert("parsing [%s:%d] : unknown keyword '%s' in '%s' section.\n",
+ file, linenum, args[cur_arg], cursection);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ }
+ else if (strcmp(args[0], "log") == 0) {
+ if (!parse_logger(args, &cfg_log_forward->loggers, (kwm == KWM_NO), file, linenum, &errmsg)) {
+ ha_alert("parsing [%s:%d] : %s : %s\n", file, linenum, args[0], errmsg);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ }
+ else if (strcmp(args[0], "timeout") == 0) {
+ const char *res;
+ unsigned timeout;
+
+ if (strcmp(args[1], "client") != 0) {
+ ha_alert("parsing [%s:%d] : unknown keyword '%s %s' in log-forward section.\n", file, linenum, args[0], args[1]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+
+ if (*args[2] == 0) {
+ ha_alert("parsing [%s:%d] : missing timeout client value.\n", file, linenum);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ res = parse_time_err(args[2], &timeout, TIME_UNIT_MS);
+ if (res == PARSE_TIME_OVER) {
+ memprintf(&errmsg, "timer overflow in argument '%s' to 'timeout client' (maximum value is 2147483647 ms or ~24.8 days)", args[2]);
+ }
+ else if (res == PARSE_TIME_UNDER) {
+ memprintf(&errmsg, "timer underflow in argument '%s' to 'timeout client' (minimum non-null value is 1 ms)", args[2]);
+ }
+ else if (res) {
+ memprintf(&errmsg, "unexpected character '%c' in 'timeout client'", *res);
+ }
+
+ if (res) {
+ ha_alert("parsing [%s:%d] : %s : %s\n", file, linenum, args[0], errmsg);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ cfg_log_forward->timeout.client = MS_TO_TICKS(timeout);
+ }
+ else {
+ ha_alert("parsing [%s:%d] : unknown keyword '%s' in log-forward section.\n", file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_ABORT;
+ goto out;
+ }
+out:
+ ha_free(&errmsg);
+ return err_code;
+}
+
+/* function: post-resolve a single list of loggers
+ *
+ * Returns err_code which defaults to ERR_NONE and can be set to a combination
+ * of ERR_WARN, ERR_ALERT, ERR_FATAL and ERR_ABORT in case of errors.
+ */
+int postresolve_logger_list(struct list *loggers, const char *section, const char *section_name)
+{
+ int err_code = ERR_NONE;
+ struct logger *logger;
+
+ list_for_each_entry(logger, loggers, list) {
+ int cur_code;
+ char *msg = NULL;
+
+ cur_code = resolve_logger(logger, &msg);
+ if (msg) {
+ void (*e_func)(const char *fmt, ...) = NULL;
+
+ if (cur_code & ERR_ALERT)
+ e_func = ha_alert;
+ else if (cur_code & ERR_WARN)
+ e_func = ha_warning;
+ else
+ e_func = ha_diag_warning;
+ if (!section)
+ e_func("global log directive declared in file %s at line '%d' %s.\n",
+ logger->conf.file, logger->conf.line, msg);
+ else
+ e_func("log directive declared in %s section '%s' in file '%s' at line %d %s.\n",
+ section, section_name, logger->conf.file, logger->conf.line, msg);
+ ha_free(&msg);
+ }
+ err_code |= cur_code;
+ }
+ return err_code;
+}
+
+/* resolve default log directives at end of config. Returns 0 on success
+ * otherwise error flags.
+*/
+static int postresolve_loggers()
+{
+ struct proxy *px;
+ int err_code = ERR_NONE;
+
+ /* global log directives */
+ err_code |= postresolve_logger_list(&global.loggers, NULL, NULL);
+ /* proxy log directives */
+ for (px = proxies_list; px; px = px->next)
+ err_code |= postresolve_logger_list(&px->loggers, "proxy", px->id);
+ /* log-forward log directives */
+ for (px = cfg_log_forward; px; px = px->next)
+ err_code |= postresolve_logger_list(&px->loggers, "log-forward", px->id);
+
+ return err_code;
+}
+
+
+/* config parsers for this section */
+REGISTER_CONFIG_SECTION("log-forward", cfg_parse_log_forward, NULL);
+REGISTER_POST_CHECK(postresolve_loggers);
+REGISTER_POST_PROXY_CHECK(postcheck_log_backend);
+
+REGISTER_PER_THREAD_ALLOC(init_log_buffers);
+REGISTER_PER_THREAD_FREE(deinit_log_buffers);
+
+REGISTER_POST_DEINIT(deinit_log_forward);
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/src/lru.c b/src/lru.c
new file mode 100644
index 0000000..07ef50c
--- /dev/null
+++ b/src/lru.c
@@ -0,0 +1,305 @@
+/*
+ * Copyright (C) 2015 Willy Tarreau <w@1wt.eu>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <import/lru.h>
+
+/* Minimal list manipulation macros for lru64_list */
+#define LIST_INSERT(lh, el) ({ (el)->n = (lh)->n; (el)->n->p = (lh)->n = (el); (el)->p = (lh); })
+#define LIST_DELETE(el) ({ (el)->n->p = (el)->p; (el)->p->n = (el)->n; })
+
+
+/* Lookup key <key> in LRU cache <lru> for use with domain <domain> whose data's
+ * current version is <revision>. It differs from lru64_get as it does not
+ * create missing keys. The function returns NULL if an error or a cache miss
+ * occurs. */
+struct lru64 *lru64_lookup(unsigned long long key, struct lru64_head *lru,
+ void *domain, unsigned long long revision)
+{
+ struct eb64_node *node;
+ struct lru64 *elem;
+
+ node = __eb64_lookup(&lru->keys, key);
+ elem = container_of(node, typeof(*elem), node);
+ if (elem) {
+ /* Existing entry found, check validity then move it at the
+ * head of the LRU list.
+ */
+ if (elem->domain == domain && elem->revision == revision) {
+ LIST_DELETE(&elem->lru);
+ LIST_INSERT(&lru->list, &elem->lru);
+ return elem;
+ }
+ }
+ return NULL;
+}
+
+/* Get key <key> from LRU cache <lru> for use with domain <domain> whose data's
+ * current revision is <revision>. If the key doesn't exist it's first created
+ * with ->domain = NULL. The caller detects this situation by checking ->domain
+ * and must perform the operation to be cached then call lru64_commit() to
+ * complete the operation. A lock (mutex or spinlock) may be added around the
+ * function to permit use in a multi-threaded environment. The function may
+ * return NULL upon memory allocation failure.
+ */
+struct lru64 *lru64_get(unsigned long long key, struct lru64_head *lru,
+ void *domain, unsigned long long revision)
+{
+ struct eb64_node *node;
+ struct lru64 *elem;
+
+ if (!lru->spare) {
+ if (!lru->cache_size)
+ return NULL;
+ lru->spare = malloc(sizeof(*lru->spare));
+ if (!lru->spare)
+ return NULL;
+ lru->spare->domain = NULL;
+ }
+
+ /* Lookup or insert */
+ lru->spare->node.key = key;
+ node = __eb64_insert(&lru->keys, &lru->spare->node);
+ elem = container_of(node, typeof(*elem), node);
+
+ if (elem != lru->spare) {
+ /* Existing entry found, check validity then move it at the
+ * head of the LRU list.
+ */
+ if (elem->domain == domain && elem->revision == revision) {
+ LIST_DELETE(&elem->lru);
+ LIST_INSERT(&lru->list, &elem->lru);
+ return elem;
+ }
+
+ if (!elem->domain)
+ return NULL; // currently locked
+
+ /* recycle this entry */
+ LIST_DELETE(&elem->lru);
+ }
+ else {
+ /* New entry inserted, initialize and move to the head of the
+ * LRU list, and lock it until commit.
+ */
+ lru->cache_usage++;
+ lru->spare = NULL; // used, need a new one next time
+ }
+
+ elem->domain = NULL;
+ LIST_INSERT(&lru->list, &elem->lru);
+
+ if (lru->cache_usage > lru->cache_size) {
+ /* try to kill oldest entry */
+ struct lru64 *old;
+
+ old = container_of(lru->list.p, typeof(*old), lru);
+ if (old->domain) {
+ /* not locked */
+ LIST_DELETE(&old->lru);
+ __eb64_delete(&old->node);
+ if (old->data && old->free)
+ old->free(old->data);
+ if (!lru->spare)
+ lru->spare = old;
+ else {
+ free(old);
+ }
+ lru->cache_usage--;
+ }
+ }
+ return elem;
+}
+
+/* Commit element <elem> with data <data>, domain <domain> and revision
+ * <revision>. <elem> is checked for NULL so that it's possible to call it
+ * with the result from a call to lru64_get(). The caller might lock it using a
+ * spinlock or mutex shared with the one around lru64_get().
+ */
+void lru64_commit(struct lru64 *elem, void *data, void *domain,
+ unsigned long long revision, void (*free)(void *))
+{
+ if (!elem)
+ return;
+
+ elem->data = data;
+ elem->revision = revision;
+ elem->domain = domain;
+ elem->free = free;
+}
+
+/* Create a new LRU cache of <size> entries. Returns the new cache or NULL in
+ * case of allocation failure.
+ */
+struct lru64_head *lru64_new(int size)
+{
+ struct lru64_head *lru;
+
+ lru = malloc(sizeof(*lru));
+ if (lru) {
+ lru->list.p = lru->list.n = &lru->list;
+ lru->keys = EB_ROOT_UNIQUE;
+ lru->spare = NULL;
+ lru->cache_size = size;
+ lru->cache_usage = 0;
+ }
+ return lru;
+}
+
+/* Tries to destroy the LRU cache <lru>. Returns the number of locked entries
+ * that prevent it from being destroyed, or zero meaning everything was done.
+ */
+int lru64_destroy(struct lru64_head *lru)
+{
+ struct lru64 *elem, *next;
+
+ if (!lru)
+ return 0;
+
+ elem = container_of(lru->list.p, typeof(*elem), lru);
+ while (&elem->lru != &lru->list) {
+ next = container_of(elem->lru.p, typeof(*next), lru);
+ if (elem->domain) {
+ /* not locked */
+ LIST_DELETE(&elem->lru);
+ eb64_delete(&elem->node);
+ if (elem->data && elem->free)
+ elem->free(elem->data);
+ free(elem);
+ lru->cache_usage--;
+ lru->cache_size--;
+ }
+ elem = next;
+ }
+
+ if (lru->cache_usage)
+ return lru->cache_usage;
+
+ free(lru);
+ return 0;
+}
+
+/* kill the <nb> least used entries from the <lru> cache */
+void lru64_kill_oldest(struct lru64_head *lru, unsigned long int nb)
+{
+ struct lru64 *elem, *next;
+
+ for (elem = container_of(lru->list.p, typeof(*elem), lru);
+ nb && (&elem->lru != &lru->list);
+ elem = next) {
+ next = container_of(elem->lru.p, typeof(*next), lru);
+ if (!elem->domain)
+ continue; /* locked entry */
+
+ LIST_DELETE(&elem->lru);
+ eb64_delete(&elem->node);
+ if (elem->data && elem->free)
+ elem->free(elem->data);
+ if (!lru->spare)
+ lru->spare = elem;
+ else
+ free(elem);
+ lru->cache_usage--;
+ nb--;
+ }
+}
+
+/* The code below is just for validation and performance testing. It's an
+ * example of a function taking some time to return results that could be
+ * cached.
+ */
+#ifdef STANDALONE
+
+#include <stdio.h>
+
+static unsigned int misses;
+
+static unsigned long long sum(unsigned long long x)
+{
+#ifndef TEST_LRU_FAST_OPERATION
+ if (x < 1)
+ return 0;
+ return x + sum(x * 99 / 100 - 1);
+#else
+ return (x << 16) - (x << 8) - 1;
+#endif
+}
+
+static long get_value(struct lru64_head *lru, long a)
+{
+ struct lru64 *item = NULL;
+
+ if (lru) {
+ item = lru64_get(a, lru, lru, 0);
+ if (item && item->domain)
+ return (long)item->data;
+ }
+ misses++;
+ /* do the painful work here */
+ a = sum(a);
+ if (item)
+ lru64_commit(item, (void *)a, lru, 1, 0);
+ return a;
+}
+
+static inline unsigned int statistical_prng()
+{
+ static unsigned int statistical_prng_state = 0x12345678;
+ unsigned int x = statistical_prng_state;
+
+ x ^= x << 13;
+ x ^= x >> 17;
+ x ^= x << 5;
+ return statistical_prng_state = x;
+}
+
+/* pass #of loops in argv[1] and set argv[2] to something to use the LRU */
+int main(int argc, char **argv)
+{
+ struct lru64_head *lru = NULL;
+ long long ret;
+ int total, loops;
+
+ if (argc < 2) {
+ printf("Need a number of rounds and optionally an LRU cache size (0..65536)\n");
+ exit(1);
+ }
+
+ total = atoi(argv[1]);
+
+ if (argc > 2) /* cache size */
+ lru = lru64_new(atoi(argv[2]));
+
+ ret = 0;
+ for (loops = 0; loops < total; loops++) {
+ ret += get_value(lru, statistical_prng() & 65535);
+ }
+ /* just for accuracy control */
+ printf("ret=%llx, hits=%u, misses=%u (%d %% hits)\n", ret, (unsigned)(total-misses), misses, (int)((float)(total-misses) * 100.0 / total));
+
+ while (lru64_destroy(lru));
+
+ return 0;
+}
+
+#endif
diff --git a/src/mailers.c b/src/mailers.c
new file mode 100644
index 0000000..c09e73c
--- /dev/null
+++ b/src/mailers.c
@@ -0,0 +1,329 @@
+/*
+ * Mailer management.
+ *
+ * Copyright 2015 Horms Solutions Ltd, Simon Horman <horms@verge.net.au>
+ * Copyright 2020 Willy Tarreau <w@1wt.eu>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <stdlib.h>
+
+#include <haproxy/action-t.h>
+#include <haproxy/api.h>
+#include <haproxy/check.h>
+#include <haproxy/errors.h>
+#include <haproxy/list.h>
+#include <haproxy/mailers.h>
+#include <haproxy/pool.h>
+#include <haproxy/proxy-t.h>
+#include <haproxy/server-t.h>
+#include <haproxy/task.h>
+#include <haproxy/tcpcheck.h>
+#include <haproxy/thread.h>
+#include <haproxy/time.h>
+#include <haproxy/tools.h>
+
+
+struct mailers *mailers = NULL;
+
+/* Set to 1 to disable email sending through checks even if the
+ * mailers are configured to do so. (e.g.: disable from lua)
+ */
+int send_email_disabled = 0;
+
+DECLARE_STATIC_POOL(pool_head_email_alert, "email_alert", sizeof(struct email_alert));
+
+/****************************** Email alerts ******************************/
+/* NOTE: It may be pertinent to use an applet to handle email alerts */
+/* instead of a tcp-check ruleset */
+/**************************************************************************/
+void email_alert_free(struct email_alert *alert)
+{
+ struct tcpcheck_rule *rule, *back;
+
+ if (!alert)
+ return;
+
+ if (alert->rules.list) {
+ list_for_each_entry_safe(rule, back, alert->rules.list, list) {
+ LIST_DELETE(&rule->list);
+ free_tcpcheck(rule, 1);
+ }
+ free_tcpcheck_vars(&alert->rules.preset_vars);
+ ha_free(&alert->rules.list);
+ }
+ pool_free(pool_head_email_alert, alert);
+}
+
+static struct task *process_email_alert(struct task *t, void *context, unsigned int state)
+{
+ struct check *check = context;
+ struct email_alertq *q;
+ struct email_alert *alert;
+
+ q = container_of(check, typeof(*q), check);
+
+ HA_SPIN_LOCK(EMAIL_ALERTS_LOCK, &q->lock);
+ while (1) {
+ if (!(check->state & CHK_ST_ENABLED)) {
+ if (LIST_ISEMPTY(&q->email_alerts)) {
+ /* All alerts processed, queue the task */
+ t->expire = TICK_ETERNITY;
+ task_queue(t);
+ goto end;
+ }
+
+ alert = LIST_NEXT(&q->email_alerts, typeof(alert), list);
+ LIST_DELETE(&alert->list);
+ t->expire = now_ms;
+ check->tcpcheck_rules = &alert->rules;
+ check->status = HCHK_STATUS_INI;
+ check->state |= CHK_ST_ENABLED;
+ }
+
+ process_chk(t, context, state);
+ if (check->state & CHK_ST_INPROGRESS)
+ break;
+
+ alert = container_of(check->tcpcheck_rules, typeof(*alert), rules);
+ email_alert_free(alert);
+ check->tcpcheck_rules = NULL;
+ check->server = NULL;
+ check->state &= ~CHK_ST_ENABLED;
+ }
+ end:
+ HA_SPIN_UNLOCK(EMAIL_ALERTS_LOCK, &q->lock);
+ return t;
+}
+
+/* Initializes mailer alerts for the proxy <p> using <mls> parameters.
+ *
+ * The function returns 1 in success case, otherwise, it returns 0 and err is
+ * filled.
+ */
+int init_email_alert(struct mailers *mls, struct proxy *p, char **err)
+{
+ struct mailer *mailer;
+ struct email_alertq *queues;
+ const char *err_str;
+ int i = 0;
+
+ if ((queues = calloc(mls->count, sizeof(*queues))) == NULL) {
+ memprintf(err, "out of memory while allocating mailer alerts queues");
+ goto fail_no_queue;
+ }
+
+ for (mailer = mls->mailer_list; mailer; i++, mailer = mailer->next) {
+ struct email_alertq *q = &queues[i];
+ struct check *check = &q->check;
+ struct task *t;
+
+ LIST_INIT(&q->email_alerts);
+ HA_SPIN_INIT(&q->lock);
+ check->obj_type = OBJ_TYPE_CHECK;
+ check->inter = mls->timeout.mail;
+ check->rise = DEF_AGENT_RISETIME;
+ check->proxy = p;
+ check->fall = DEF_AGENT_FALLTIME;
+ if ((err_str = init_check(check, PR_O2_TCPCHK_CHK))) {
+ memprintf(err, "%s", err_str);
+ goto error;
+ }
+
+ check->xprt = mailer->xprt;
+ check->addr = mailer->addr;
+ check->port = get_host_port(&mailer->addr);
+
+ if ((t = task_new_anywhere()) == NULL) {
+ memprintf(err, "out of memory while allocating mailer alerts task");
+ goto error;
+ }
+
+ check->task = t;
+ t->process = process_email_alert;
+ t->context = check;
+
+ /* check this in one ms */
+ t->expire = TICK_ETERNITY;
+ check->start = now_ns;
+ task_queue(t);
+ }
+
+ mls->users++;
+ free(p->email_alert.mailers.name);
+ p->email_alert.mailers.m = mls;
+ p->email_alert.queues = queues;
+ return 0;
+
+ error:
+ for (i = 0; i < mls->count; i++) {
+ struct email_alertq *q = &queues[i];
+ struct check *check = &q->check;
+
+ free_check(check);
+ }
+ free(queues);
+ fail_no_queue:
+ return 1;
+}
+
+static int enqueue_one_email_alert(struct proxy *p, struct server *s,
+ struct email_alertq *q, const char *msg)
+{
+ struct email_alert *alert;
+ struct tcpcheck_rule *tcpcheck;
+ struct check *check = &q->check;
+
+ if ((alert = pool_alloc(pool_head_email_alert)) == NULL)
+ goto error;
+ LIST_INIT(&alert->list);
+ alert->rules.flags = TCPCHK_RULES_TCP_CHK;
+ alert->rules.list = calloc(1, sizeof(*alert->rules.list));
+ if (!alert->rules.list)
+ goto error;
+ LIST_INIT(alert->rules.list);
+ LIST_INIT(&alert->rules.preset_vars); /* unused for email alerts */
+ alert->srv = s;
+
+ if ((tcpcheck = pool_zalloc(pool_head_tcpcheck_rule)) == NULL)
+ goto error;
+ tcpcheck->action = TCPCHK_ACT_CONNECT;
+ tcpcheck->comment = NULL;
+
+ LIST_APPEND(alert->rules.list, &tcpcheck->list);
+
+ if (!add_tcpcheck_expect_str(&alert->rules, "220 "))
+ goto error;
+
+ {
+ const char * const strs[4] = { "HELO ", p->email_alert.myhostname, "\r\n" };
+ if (!add_tcpcheck_send_strs(&alert->rules, strs))
+ goto error;
+ }
+
+ if (!add_tcpcheck_expect_str(&alert->rules, "250 "))
+ goto error;
+
+ {
+ const char * const strs[4] = { "MAIL FROM:<", p->email_alert.from, ">\r\n" };
+ if (!add_tcpcheck_send_strs(&alert->rules, strs))
+ goto error;
+ }
+
+ if (!add_tcpcheck_expect_str(&alert->rules, "250 "))
+ goto error;
+
+ {
+ const char * const strs[4] = { "RCPT TO:<", p->email_alert.to, ">\r\n" };
+ if (!add_tcpcheck_send_strs(&alert->rules, strs))
+ goto error;
+ }
+
+ if (!add_tcpcheck_expect_str(&alert->rules, "250 "))
+ goto error;
+
+ {
+ const char * const strs[2] = { "DATA\r\n" };
+ if (!add_tcpcheck_send_strs(&alert->rules, strs))
+ goto error;
+ }
+
+ if (!add_tcpcheck_expect_str(&alert->rules, "354 "))
+ goto error;
+
+ {
+ struct tm tm;
+ char datestr[48];
+ const char * const strs[18] = {
+ "From: ", p->email_alert.from, "\r\n",
+ "To: ", p->email_alert.to, "\r\n",
+ "Date: ", datestr, "\r\n",
+ "Subject: [HAProxy Alert] ", msg, "\r\n",
+ "\r\n",
+ msg, "\r\n",
+ "\r\n",
+ ".\r\n",
+ NULL
+ };
+
+ get_localtime(date.tv_sec, &tm);
+
+ if (strftime(datestr, sizeof(datestr), "%a, %d %b %Y %T %z (%Z)", &tm) == 0) {
+ goto error;
+ }
+
+ if (!add_tcpcheck_send_strs(&alert->rules, strs))
+ goto error;
+ }
+
+ if (!add_tcpcheck_expect_str(&alert->rules, "250 "))
+ goto error;
+
+ {
+ const char * const strs[2] = { "QUIT\r\n" };
+ if (!add_tcpcheck_send_strs(&alert->rules, strs))
+ goto error;
+ }
+
+ if (!add_tcpcheck_expect_str(&alert->rules, "221 "))
+ goto error;
+
+ HA_SPIN_LOCK(EMAIL_ALERTS_LOCK, &q->lock);
+ task_wakeup(check->task, TASK_WOKEN_MSG);
+ LIST_APPEND(&q->email_alerts, &alert->list);
+ HA_SPIN_UNLOCK(EMAIL_ALERTS_LOCK, &q->lock);
+ return 1;
+
+error:
+ email_alert_free(alert);
+ return 0;
+}
+
+static void enqueue_email_alert(struct proxy *p, struct server *s, const char *msg)
+{
+ int i;
+ struct mailer *mailer;
+
+ for (i = 0, mailer = p->email_alert.mailers.m->mailer_list;
+ i < p->email_alert.mailers.m->count; i++, mailer = mailer->next) {
+ if (!enqueue_one_email_alert(p, s, &p->email_alert.queues[i], msg)) {
+ ha_alert("Email alert [%s] could not be enqueued: out of memory\n", p->id);
+ return;
+ }
+ }
+
+ return;
+}
+
+/*
+ * Send email alert if configured.
+ */
+void send_email_alert(struct server *s, int level, const char *format, ...)
+{
+ va_list argp;
+ char buf[1024];
+ int len;
+ struct proxy *p = s->proxy;
+
+ if (send_email_disabled)
+ return;
+
+ if (!p->email_alert.mailers.m || level > p->email_alert.level || format == NULL)
+ return;
+
+ va_start(argp, format);
+ len = vsnprintf(buf, sizeof(buf), format, argp);
+ va_end(argp);
+
+ if (len < 0 || len >= sizeof(buf)) {
+ ha_alert("Email alert [%s] could not format message\n", p->id);
+ return;
+ }
+
+ enqueue_email_alert(p, s, buf);
+}
diff --git a/src/map.c b/src/map.c
new file mode 100644
index 0000000..ba7fd81
--- /dev/null
+++ b/src/map.c
@@ -0,0 +1,1232 @@
+/*
+ * MAP management functions.
+ *
+ * Copyright 2000-2013 Willy Tarreau <w@1wt.eu>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <stdio.h>
+#include <syslog.h>
+
+#include <haproxy/api.h>
+#include <haproxy/applet.h>
+#include <haproxy/arg.h>
+#include <haproxy/cli.h>
+#include <haproxy/map.h>
+#include <haproxy/pattern.h>
+#include <haproxy/regex.h>
+#include <haproxy/sample.h>
+#include <haproxy/sc_strm.h>
+#include <haproxy/stats-t.h>
+#include <haproxy/stconn.h>
+#include <haproxy/tools.h>
+
+
+/* Parse an IPv4 or IPv6 address and store it into the sample.
+ * The output type is IPv4 or IPv6.
+ */
+int map_parse_ip(const char *text, struct sample_data *data)
+{
+ int len = strlen(text);
+
+ if (buf2ip(text, len, &data->u.ipv4)) {
+ data->type = SMP_T_IPV4;
+ return 1;
+ }
+ if (buf2ip6(text, len, &data->u.ipv6)) {
+ data->type = SMP_T_IPV6;
+ return 1;
+ }
+ return 0;
+}
+
+/* Parse a string and store a pointer to it into the sample. The original
+ * string must be left in memory because we return a direct memory reference.
+ * The output type is SMP_T_STR. There is no risk that the data will be
+ * overwritten because sample_conv_map() makes a const sample with this
+ * output.
+ */
+int map_parse_str(const char *text, struct sample_data *data)
+{
+ data->u.str.area = (char *)text;
+ data->u.str.data = strlen(text);
+ data->u.str.size = data->u.str.data + 1;
+ data->type = SMP_T_STR;
+ return 1;
+}
+
+/* Parse an integer and convert it to a sample. The output type is SINT if the
+ * number is negative, or UINT if it is positive or null. The function returns
+ * zero (error) if the number is too large.
+ */
+int map_parse_int(const char *text, struct sample_data *data)
+{
+ data->type = SMP_T_SINT;
+ data->u.sint = read_int64(&text, text + strlen(text));
+ if (*text != '\0')
+ return 0;
+ return 1;
+}
+
+/* This crete and initialize map descriptor.
+ * Return NULL if out of memory error
+ */
+static struct map_descriptor *map_create_descriptor(struct sample_conv *conv)
+{
+ struct map_descriptor *desc;
+
+ desc = calloc(1, sizeof(*desc));
+ if (!desc)
+ return NULL;
+
+ desc->conv = conv;
+
+ return desc;
+}
+
+/* This function load the map file according with data type declared into
+ * the "struct sample_conv".
+ *
+ * This function choose the indexation type (ebtree or list) according with
+ * the type of match needed.
+ */
+int sample_load_map(struct arg *arg, struct sample_conv *conv,
+ const char *file, int line, char **err)
+{
+ struct map_descriptor *desc;
+
+ if (!(global.mode & MODE_STARTING)) {
+ memprintf(err, "map: cannot load map at runtime");
+ return 0;
+ }
+
+ /* create new map descriptor */
+ desc = map_create_descriptor(conv);
+ if (!desc) {
+ memprintf(err, "out of memory");
+ return 0;
+ }
+
+ /* Initialize pattern */
+ pattern_init_head(&desc->pat);
+
+ /* This is original pattern, must free */
+ desc->do_free = 1;
+
+ /* Set the match method. */
+ desc->pat.match = pat_match_fcts[(long)conv->private];
+ desc->pat.parse = pat_parse_fcts[(long)conv->private];
+ desc->pat.index = pat_index_fcts[(long)conv->private];
+ desc->pat.prune = pat_prune_fcts[(long)conv->private];
+ desc->pat.expect_type = pat_match_types[(long)conv->private];
+
+ /* Set the output parse method. */
+ switch (desc->conv->out_type) {
+ case SMP_T_STR: desc->pat.parse_smp = map_parse_str; break;
+ case SMP_T_SINT: desc->pat.parse_smp = map_parse_int; break;
+ case SMP_T_ADDR: desc->pat.parse_smp = map_parse_ip; break;
+ default:
+ memprintf(err, "map: internal haproxy error: no default parse case for the input type <%d>.",
+ conv->out_type);
+ free(desc);
+ return 0;
+ }
+
+ /* Load map. */
+ if (!pattern_read_from_file(&desc->pat, PAT_REF_MAP, arg[0].data.str.area, PAT_MF_NO_DNS,
+ 1, err, file, line))
+ return 0;
+
+ /* the maps of type IP support a string as default value. This
+ * string can be an ipv4 or an ipv6, we must convert it.
+ */
+ if (arg[1].type != ARGT_STOP && desc->conv->out_type == SMP_T_ADDR) {
+ struct sample_data data;
+ if (!map_parse_ip(arg[1].data.str.area, &data)) {
+ memprintf(err, "map: cannot parse default ip <%s>.",
+ arg[1].data.str.area);
+ return 0;
+ }
+ chunk_destroy(&arg[1].data.str);
+ if (data.type == SMP_T_IPV4) {
+ arg[1].type = ARGT_IPV4;
+ arg[1].data.ipv4 = data.u.ipv4;
+ } else {
+ arg[1].type = ARGT_IPV6;
+ arg[1].data.ipv6 = data.u.ipv6;
+ }
+ }
+
+ /* replace the first argument by this definition */
+ chunk_destroy(&arg[0].data.str);
+ arg[0].type = ARGT_MAP;
+ arg[0].data.map = desc;
+
+ return 1;
+}
+
+static int sample_conv_map(const struct arg *arg_p, struct sample *smp, void *private)
+{
+ struct map_descriptor *desc;
+ struct pattern *pat;
+ struct buffer *str;
+
+ /* get config */
+ desc = arg_p[0].data.map;
+
+ /* Execute the match function. */
+ pat = pattern_exec_match(&desc->pat, smp, 1);
+
+ /* Match case. */
+ if (pat) {
+ if (pat->data) {
+ /* In the regm case, merge the sample with the input. */
+ if ((long)private == PAT_MATCH_REGM) {
+ struct buffer *tmptrash;
+ int len;
+
+ /* Copy the content of the sample because it could
+ be scratched by incoming get_trash_chunk */
+ tmptrash = alloc_trash_chunk();
+ if (!tmptrash)
+ return 0;
+
+ tmptrash->data = smp->data.u.str.data;
+ if (tmptrash->data > (tmptrash->size-1))
+ tmptrash->data = tmptrash->size-1;
+
+ memcpy(tmptrash->area, smp->data.u.str.area, tmptrash->data);
+ tmptrash->area[tmptrash->data] = 0;
+
+ str = get_trash_chunk();
+ len = exp_replace(str->area, str->size,
+ tmptrash->area,
+ pat->data->u.str.area,
+ (regmatch_t *)smp->ctx.a[0]);
+ free_trash_chunk(tmptrash);
+
+ if (len == -1)
+ return 0;
+
+ str->data = len;
+ smp->data.u.str = *str;
+ return 1;
+ }
+ /* Copy sample. */
+ smp->data = *pat->data;
+ smp->flags |= SMP_F_CONST;
+ return 1;
+ }
+
+ /* Return just int sample containing 1. */
+ smp->data.type = SMP_T_SINT;
+ smp->data.u.sint = 1;
+ return 1;
+ }
+
+ /* If no default value available, the converter fails. */
+ if (arg_p[1].type == ARGT_STOP)
+ return 0;
+
+ /* Return the default value. */
+ switch (desc->conv->out_type) {
+
+ case SMP_T_STR:
+ smp->data.type = SMP_T_STR;
+ smp->flags |= SMP_F_CONST;
+ smp->data.u.str = arg_p[1].data.str;
+ break;
+
+ case SMP_T_SINT:
+ smp->data.type = SMP_T_SINT;
+ smp->data.u.sint = arg_p[1].data.sint;
+ break;
+
+ case SMP_T_ADDR:
+ if (arg_p[1].type == ARGT_IPV4) {
+ smp->data.type = SMP_T_IPV4;
+ smp->data.u.ipv4 = arg_p[1].data.ipv4;
+ } else {
+ smp->data.type = SMP_T_IPV6;
+ smp->data.u.ipv6 = arg_p[1].data.ipv6;
+ }
+ break;
+ }
+
+ return 1;
+}
+
+/* This function is used with map and acl management. It permits to browse
+ * each reference. The variable <getnext> must contain the current node,
+ * <end> point to the root node and the <flags> permit to filter required
+ * nodes.
+ */
+static inline
+struct pat_ref *pat_list_get_next(struct pat_ref *getnext, struct list *end,
+ unsigned int flags)
+{
+ struct pat_ref *ref = getnext;
+
+ while (1) {
+
+ /* Get next list entry. */
+ ref = LIST_NEXT(&ref->list, struct pat_ref *, list);
+
+ /* If the entry is the last of the list, return NULL. */
+ if (&ref->list == end)
+ return NULL;
+
+ /* If the entry match the flag, return it. */
+ if (ref->flags & flags)
+ return ref;
+ }
+}
+
+static inline
+struct pat_ref *pat_ref_lookup_ref(const char *reference)
+{
+ int id;
+ char *error;
+
+ /* If the reference starts by a '#', this is numeric id. */
+ if (reference[0] == '#') {
+ /* Try to convert the numeric id. If the conversion fails, the lookup fails. */
+ id = strtol(reference + 1, &error, 10);
+ if (*error != '\0')
+ return NULL;
+
+ /* Perform the unique id lookup. */
+ return pat_ref_lookupid(id);
+ }
+
+ /* Perform the string lookup. */
+ return pat_ref_lookup(reference);
+}
+
+/* This function is used with map and acl management. It permits to browse
+ * each reference.
+ */
+static inline
+struct pattern_expr *pat_expr_get_next(struct pattern_expr *getnext, struct list *end)
+{
+ struct pattern_expr *expr;
+ expr = LIST_NEXT(&getnext->list, struct pattern_expr *, list);
+ if (&expr->list == end)
+ return NULL;
+ return expr;
+}
+
+/* appctx context for the "{show|get|add|del|*} {map|acl}" commands. This is
+ * used even by commands that only have a parser and no I/O handler because
+ * it provides a unified way to manipulate some fields and will allow to
+ * expand some of them more easily later if needed.
+ */
+struct show_map_ctx {
+ struct pat_ref *ref;
+ struct bref bref; /* back-reference from the pat_ref_elt being dumped */
+ struct pattern_expr *expr;
+ struct buffer chunk;
+ unsigned int display_flags;
+ unsigned int curr_gen; /* current/latest generation, for show/clear */
+ unsigned int prev_gen; /* prev generation, for clear */
+ enum {
+ STATE_INIT = 0, /* initialize list and backrefs */
+ STATE_LIST, /* list entries */
+ STATE_DONE, /* finished */
+ } state; /* state of the dump */
+};
+
+/* expects the current generation ID in ctx->curr_gen */
+static int cli_io_handler_pat_list(struct appctx *appctx)
+{
+ struct show_map_ctx *ctx = appctx->svcctx;
+ struct stconn *sc = appctx_sc(appctx);
+ struct pat_ref_elt *elt;
+
+ /* FIXME: Don't watch the other side !*/
+ if (unlikely(sc_opposite(sc)->flags & SC_FL_SHUT_DONE)) {
+ /* If we're forced to shut down, we might have to remove our
+ * reference to the last ref_elt being dumped.
+ */
+ if (!LIST_ISEMPTY(&ctx->bref.users)) {
+ HA_RWLOCK_WRLOCK(PATREF_LOCK, &ctx->ref->lock);
+ LIST_DEL_INIT(&ctx->bref.users);
+ HA_RWLOCK_WRUNLOCK(PATREF_LOCK, &ctx->ref->lock);
+ }
+ return 1;
+ }
+
+ switch (ctx->state) {
+ case STATE_INIT:
+ ctx->state = STATE_LIST;
+ __fallthrough;
+
+ case STATE_LIST:
+ HA_RWLOCK_WRLOCK(PATREF_LOCK, &ctx->ref->lock);
+
+ if (!LIST_ISEMPTY(&ctx->bref.users)) {
+ LIST_DELETE(&ctx->bref.users);
+ LIST_INIT(&ctx->bref.users);
+ } else {
+ ctx->bref.ref = ctx->ref->head.n;
+ }
+
+ while (ctx->bref.ref != &ctx->ref->head) {
+ chunk_reset(&trash);
+
+ elt = LIST_ELEM(ctx->bref.ref, struct pat_ref_elt *, list);
+
+ if (elt->gen_id != ctx->curr_gen)
+ goto skip;
+
+ /* build messages */
+ if (elt->sample)
+ chunk_appendf(&trash, "%p %s %s\n",
+ elt, elt->pattern,
+ elt->sample);
+ else
+ chunk_appendf(&trash, "%p %s\n",
+ elt, elt->pattern);
+
+ if (applet_putchk(appctx, &trash) == -1) {
+ /* let's try again later from this stream. We add ourselves into
+ * this stream's users so that it can remove us upon termination.
+ */
+ LIST_APPEND(&elt->back_refs, &ctx->bref.users);
+ HA_RWLOCK_WRUNLOCK(PATREF_LOCK, &ctx->ref->lock);
+ return 0;
+ }
+ skip:
+ /* get next list entry and check the end of the list */
+ ctx->bref.ref = elt->list.n;
+ }
+ HA_RWLOCK_WRUNLOCK(PATREF_LOCK, &ctx->ref->lock);
+ __fallthrough;
+
+ default:
+ ctx->state = STATE_DONE;
+ return 1;
+ }
+}
+
+static int cli_io_handler_pats_list(struct appctx *appctx)
+{
+ struct show_map_ctx *ctx = appctx->svcctx;
+
+ switch (ctx->state) {
+ case STATE_INIT:
+ /* Display the column headers. If the message cannot be sent,
+ * quit the function with returning 0. The function is called
+ * later and restarted at the state "STATE_INIT".
+ */
+ chunk_reset(&trash);
+ chunk_appendf(&trash, "# id (file) description\n");
+ if (applet_putchk(appctx, &trash) == -1)
+ return 0;
+
+ /* Now, we start the browsing of the references lists.
+ * Note that the following call to LIST_ELEM returns a bad pointer. The only
+ * available field of this pointer is <list>. It is used with the function
+ * pat_list_get_next() for returning the first available entry
+ */
+ ctx->ref = LIST_ELEM(&pattern_reference, struct pat_ref *, list);
+ ctx->ref = pat_list_get_next(ctx->ref, &pattern_reference,
+ ctx->display_flags);
+ ctx->state = STATE_LIST;
+ __fallthrough;
+
+ case STATE_LIST:
+ while (ctx->ref) {
+ chunk_reset(&trash);
+
+ /* Build messages. If the reference is used by another category than
+ * the listed categories, display the information in the message.
+ */
+ chunk_appendf(&trash, "%d (%s) %s. curr_ver=%u next_ver=%u entry_cnt=%llu\n", ctx->ref->unique_id,
+ ctx->ref->reference ? ctx->ref->reference : "",
+ ctx->ref->display, ctx->ref->curr_gen, ctx->ref->next_gen,
+ ctx->ref->entry_cnt);
+
+ if (applet_putchk(appctx, &trash) == -1) {
+ /* let's try again later from this stream. We add ourselves into
+ * this stream's users so that it can remove us upon termination.
+ */
+ return 0;
+ }
+
+ /* get next list entry and check the end of the list */
+ ctx->ref = pat_list_get_next(ctx->ref, &pattern_reference,
+ ctx->display_flags);
+ }
+
+ __fallthrough;
+
+ default:
+ ctx->state = STATE_DONE;
+ return 1;
+ }
+ return 0;
+}
+
+static int cli_io_handler_map_lookup(struct appctx *appctx)
+{
+ struct show_map_ctx *ctx = appctx->svcctx;
+ struct sample sample;
+ struct pattern *pat;
+ int match_method;
+
+ switch (ctx->state) {
+ case STATE_INIT:
+ /* Init to the first entry. The list cannot be change */
+ ctx->expr = LIST_ELEM(&ctx->ref->pat, struct pattern_expr *, list);
+ ctx->expr = pat_expr_get_next(ctx->expr, &ctx->ref->pat);
+ ctx->state = STATE_LIST;
+ __fallthrough;
+
+ case STATE_LIST:
+ HA_RWLOCK_RDLOCK(PATREF_LOCK, &ctx->ref->lock);
+ /* for each lookup type */
+ while (ctx->expr) {
+ /* initialise chunk to build new message */
+ chunk_reset(&trash);
+
+ /* execute pattern matching */
+ sample.data.type = SMP_T_STR;
+ sample.flags = SMP_F_CONST;
+ sample.data.u.str.data = ctx->chunk.data;
+ sample.data.u.str.area = ctx->chunk.area;
+
+ if (ctx->expr->pat_head->match &&
+ sample_convert(&sample, ctx->expr->pat_head->expect_type))
+ pat = ctx->expr->pat_head->match(&sample, ctx->expr, 1);
+ else
+ pat = NULL;
+
+ /* build return message: set type of match */
+ for (match_method=0; match_method<PAT_MATCH_NUM; match_method++)
+ if (ctx->expr->pat_head->match == pat_match_fcts[match_method])
+ break;
+ if (match_method >= PAT_MATCH_NUM)
+ chunk_appendf(&trash, "type=unknown(%p)", ctx->expr->pat_head->match);
+ else
+ chunk_appendf(&trash, "type=%s", pat_match_names[match_method]);
+
+ /* case sensitive */
+ if (ctx->expr->mflags & PAT_MF_IGNORE_CASE)
+ chunk_appendf(&trash, ", case=insensitive");
+ else
+ chunk_appendf(&trash, ", case=sensitive");
+
+ /* Display no match, and set default value */
+ if (!pat) {
+ if (ctx->display_flags == PAT_REF_MAP)
+ chunk_appendf(&trash, ", found=no");
+ else
+ chunk_appendf(&trash, ", match=no");
+ }
+
+ /* Display match and match info */
+ else {
+ /* display match */
+ if (ctx->display_flags == PAT_REF_MAP)
+ chunk_appendf(&trash, ", found=yes");
+ else
+ chunk_appendf(&trash, ", match=yes");
+
+ /* display index mode */
+ if (pat->sflags & PAT_SF_TREE)
+ chunk_appendf(&trash, ", idx=tree");
+ else
+ chunk_appendf(&trash, ", idx=list");
+
+ /* display pattern */
+ if (ctx->display_flags == PAT_REF_MAP) {
+ if (pat->ref)
+ chunk_appendf(&trash, ", key=\"%s\"", pat->ref->pattern);
+ else
+ chunk_appendf(&trash, ", key=unknown");
+ }
+ else {
+ if (pat->ref)
+ chunk_appendf(&trash, ", pattern=\"%s\"", pat->ref->pattern);
+ else
+ chunk_appendf(&trash, ", pattern=unknown");
+ }
+
+ /* display return value */
+ if (ctx->display_flags == PAT_REF_MAP) {
+ if (pat->data && pat->ref && pat->ref->sample)
+ chunk_appendf(&trash, ", value=\"%s\", type=\"%s\"", pat->ref->sample,
+ smp_to_type[pat->data->type]);
+ else
+ chunk_appendf(&trash, ", value=none");
+ }
+ }
+
+ chunk_appendf(&trash, "\n");
+
+ /* display response */
+ if (applet_putchk(appctx, &trash) == -1) {
+ /* let's try again later from this stream. We add ourselves into
+ * this stream's users so that it can remove us upon termination.
+ */
+ HA_RWLOCK_RDUNLOCK(PATREF_LOCK, &ctx->ref->lock);
+ return 0;
+ }
+
+ /* get next entry */
+ ctx->expr = pat_expr_get_next(ctx->expr,
+ &ctx->ref->pat);
+ }
+ HA_RWLOCK_RDUNLOCK(PATREF_LOCK, &ctx->ref->lock);
+ __fallthrough;
+
+ default:
+ ctx->state = STATE_DONE;
+ return 1;
+ }
+}
+
+static void cli_release_mlook(struct appctx *appctx)
+{
+ struct show_map_ctx *ctx = appctx->svcctx;
+
+ ha_free(&ctx->chunk.area);
+}
+
+
+static int cli_parse_get_map(char **args, char *payload, struct appctx *appctx, void *private)
+{
+ struct show_map_ctx *ctx = applet_reserve_svcctx(appctx, sizeof(*ctx));
+
+ if (strcmp(args[1], "map") == 0 || strcmp(args[1], "acl") == 0) {
+ /* Set flags. */
+ if (args[1][0] == 'm')
+ ctx->display_flags = PAT_REF_MAP;
+ else
+ ctx->display_flags = PAT_REF_ACL;
+
+ /* No parameter. */
+ if (!*args[2] || !*args[3]) {
+ if (ctx->display_flags == PAT_REF_MAP)
+ return cli_err(appctx, "Missing map identifier and/or key.\n");
+ else
+ return cli_err(appctx, "Missing ACL identifier and/or key.\n");
+ }
+
+ /* lookup into the maps */
+ ctx->ref = pat_ref_lookup_ref(args[2]);
+ if (!ctx->ref) {
+ if (ctx->display_flags == PAT_REF_MAP)
+ return cli_err(appctx, "Unknown map identifier. Please use #<id> or <file>.\n");
+ else
+ return cli_err(appctx, "Unknown ACL identifier. Please use #<id> or <file>.\n");
+ }
+
+ /* copy input string. The string must be allocated because
+ * it may be used over multiple iterations. It's released
+ * at the end and upon abort anyway.
+ */
+ ctx->chunk.data = strlen(args[3]);
+ ctx->chunk.size = ctx->chunk.data + 1;
+ ctx->chunk.area = strdup(args[3]);
+ if (!ctx->chunk.area)
+ return cli_err(appctx, "Out of memory error.\n");
+
+ return 0;
+ }
+ return 1;
+}
+
+static int cli_parse_prepare_map(char **args, char *payload, struct appctx *appctx, void *private)
+{
+ struct show_map_ctx *ctx = applet_reserve_svcctx(appctx, sizeof(*ctx));
+
+ if (strcmp(args[1], "map") == 0 ||
+ strcmp(args[1], "acl") == 0) {
+ uint next_gen;
+ char *msg = NULL;
+
+ /* Set ACL or MAP flags. */
+ if (args[1][0] == 'm')
+ ctx->display_flags = PAT_REF_MAP;
+ else
+ ctx->display_flags = PAT_REF_ACL;
+
+ /* lookup into the refs and check the map flag */
+ ctx->ref = pat_ref_lookup_ref(args[2]);
+ if (!ctx->ref ||
+ !(ctx->ref->flags & ctx->display_flags)) {
+ if (ctx->display_flags == PAT_REF_MAP)
+ return cli_err(appctx, "Unknown map identifier. Please use #<id> or <file>.\n");
+ else
+ return cli_err(appctx, "Unknown ACL identifier. Please use #<id> or <file>.\n");
+ }
+ next_gen = pat_ref_newgen(ctx->ref);
+ return cli_dynmsg(appctx, LOG_INFO, memprintf(&msg, "New version created: %u\n", next_gen));
+ }
+
+ return 0;
+}
+
+static void cli_release_show_map(struct appctx *appctx)
+{
+ struct show_map_ctx *ctx = appctx->svcctx;
+
+ if (!LIST_ISEMPTY(&ctx->bref.users)) {
+ HA_RWLOCK_WRLOCK(PATREF_LOCK, &ctx->ref->lock);
+ LIST_DEL_INIT(&ctx->bref.users);
+ HA_RWLOCK_WRUNLOCK(PATREF_LOCK, &ctx->ref->lock);
+ }
+}
+
+static int cli_parse_show_map(char **args, char *payload, struct appctx *appctx, void *private)
+{
+ struct show_map_ctx *ctx = applet_reserve_svcctx(appctx, sizeof(*ctx));
+
+ if (strcmp(args[1], "map") == 0 ||
+ strcmp(args[1], "acl") == 0) {
+ const char *gen = NULL;
+
+ /* Set ACL or MAP flags. */
+ if (args[1][0] == 'm')
+ ctx->display_flags = PAT_REF_MAP;
+ else
+ ctx->display_flags = PAT_REF_ACL;
+
+ /* no parameter: display all map available */
+ if (!*args[2]) {
+ appctx->io_handler = cli_io_handler_pats_list;
+ return 0;
+ }
+
+ /* For both "map" and "acl" we may have an optional generation
+ * number specified using a "@" character before the pattern
+ * file name.
+ */
+ if (*args[2] == '@') {
+ gen = args[2] + 1;
+ args++;
+ }
+
+ /* lookup into the refs and check the map flag */
+ ctx->ref = pat_ref_lookup_ref(args[2]);
+ if (!ctx->ref ||
+ !(ctx->ref->flags & ctx->display_flags)) {
+ if (ctx->display_flags == PAT_REF_MAP)
+ return cli_err(appctx, "Unknown map identifier. Please use #<id> or <file>.\n");
+ else
+ return cli_err(appctx, "Unknown ACL identifier. Please use #<id> or <file>.\n");
+ }
+
+ /* set the desired generation id in curr_gen */
+ if (gen)
+ ctx->curr_gen = str2uic(gen);
+ else
+ ctx->curr_gen = ctx->ref->curr_gen;
+
+ LIST_INIT(&ctx->bref.users);
+ appctx->io_handler = cli_io_handler_pat_list;
+ appctx->io_release = cli_release_show_map;
+ return 0;
+ }
+
+ return 0;
+}
+
+static int cli_parse_set_map(char **args, char *payload, struct appctx *appctx, void *private)
+{
+ struct show_map_ctx *ctx = applet_reserve_svcctx(appctx, sizeof(*ctx));
+
+ if (strcmp(args[1], "map") == 0) {
+ char *err;
+
+ /* Set flags. */
+ ctx->display_flags = PAT_REF_MAP;
+
+ /* Expect three parameters: map name, key and new value. */
+ if (!*args[2] || !*args[3] || !*args[4])
+ return cli_err(appctx, "'set map' expects three parameters: map identifier, key and value.\n");
+
+ /* Lookup the reference in the maps. */
+ ctx->ref = pat_ref_lookup_ref(args[2]);
+ if (!ctx->ref)
+ return cli_err(appctx, "Unknown map identifier. Please use #<id> or <file>.\n");
+
+ /* If the entry identifier start with a '#', it is considered as
+ * pointer id
+ */
+ if (args[3][0] == '#' && args[3][1] == '0' && args[3][2] == 'x') {
+ struct pat_ref_elt *ref;
+ long long int conv;
+ char *error;
+
+ /* Convert argument to integer value. */
+ conv = strtoll(&args[3][1], &error, 16);
+ if (*error != '\0')
+ return cli_err(appctx, "Malformed identifier. Please use #<id> or <file>.\n");
+
+ /* Convert and check integer to pointer. */
+ ref = (struct pat_ref_elt *)(long)conv;
+ if ((long long int)(long)ref != conv)
+ return cli_err(appctx, "Malformed identifier. Please use #<id> or <file>.\n");
+
+ /* Try to modify the entry. */
+ err = NULL;
+ HA_RWLOCK_WRLOCK(PATREF_LOCK, &ctx->ref->lock);
+ if (!pat_ref_set_by_id(ctx->ref, ref, args[4], &err)) {
+ HA_RWLOCK_WRUNLOCK(PATREF_LOCK, &ctx->ref->lock);
+ if (err)
+ return cli_dynerr(appctx, memprintf(&err, "%s.\n", err));
+ else
+ return cli_err(appctx, "Failed to update an entry.\n");
+ }
+ HA_RWLOCK_WRUNLOCK(PATREF_LOCK, &ctx->ref->lock);
+ }
+ else {
+ /* Else, use the entry identifier as pattern
+ * string, and update the value.
+ */
+ err = NULL;
+ HA_RWLOCK_WRLOCK(PATREF_LOCK, &ctx->ref->lock);
+ if (!pat_ref_set(ctx->ref, args[3], args[4], &err, NULL)) {
+ HA_RWLOCK_WRUNLOCK(PATREF_LOCK, &ctx->ref->lock);
+ if (err)
+ return cli_dynerr(appctx, memprintf(&err, "%s.\n", err));
+ else
+ return cli_err(appctx, "Failed to update an entry.\n");
+ }
+ HA_RWLOCK_WRUNLOCK(PATREF_LOCK, &ctx->ref->lock);
+ }
+
+ /* The set is done, send message. */
+ appctx->st0 = CLI_ST_PROMPT;
+ return 0;
+ }
+ return 1;
+}
+
+static int cli_parse_add_map(char **args, char *payload, struct appctx *appctx, void *private)
+{
+ struct show_map_ctx *ctx = applet_reserve_svcctx(appctx, sizeof(*ctx));
+
+ if (strcmp(args[1], "map") == 0 ||
+ strcmp(args[1], "acl") == 0) {
+ const char *gen = NULL;
+ uint genid = 0;
+ int ret;
+ char *err;
+
+ /* Set flags. */
+ if (args[1][0] == 'm')
+ ctx->display_flags = PAT_REF_MAP;
+ else
+ ctx->display_flags = PAT_REF_ACL;
+
+ /* For both "map" and "acl" we may have an optional generation
+ * number specified using a "@" character before the pattern
+ * file name.
+ */
+ if (*args[2] == '@') {
+ gen = args[2] + 1;
+ args++;
+ }
+
+ /* If the keyword is "map", we expect:
+ * - three parameters if there is no payload
+ * - one parameter if there is a payload
+ * If it is "acl", we expect only two parameters
+ */
+ if (ctx->display_flags == PAT_REF_MAP) {
+ if ((!payload && (!*args[2] || !*args[3] || !*args[4])) ||
+ (payload && !*args[2]))
+ return cli_err(appctx,
+ "'add map' expects three parameters (map identifier, key and value)"
+ " or one parameter (map identifier) and a payload\n");
+ }
+ else if (!*args[2] || !*args[3])
+ return cli_err(appctx, "'add acl' expects two parameters: ACL identifier and pattern.\n");
+
+ /* Lookup for the reference. */
+ ctx->ref = pat_ref_lookup_ref(args[2]);
+ if (!ctx->ref) {
+ if (ctx->display_flags == PAT_REF_MAP)
+ return cli_err(appctx, "Unknown map identifier. Please use #<id> or <file>.\n");
+ else
+ return cli_err(appctx, "Unknown ACL identifier. Please use #<id> or <file>.\n");
+ }
+
+ if (gen) {
+ genid = str2uic(gen);
+ if ((int)(genid - ctx->ref->next_gen) > 0) {
+ if (ctx->display_flags == PAT_REF_MAP)
+ return cli_err(appctx, "Version number in the future, please use 'prepare map' before.\n");
+ else
+ return cli_err(appctx, "Version number in the future, please use 'prepare acl' before.\n");
+ }
+ }
+
+ /* The command "add acl" is prohibited if the reference
+ * use samples.
+ */
+ if ((ctx->display_flags & PAT_REF_ACL) &&
+ (ctx->ref->flags & PAT_REF_SMP)) {
+ return cli_err(appctx,
+ "This ACL is shared with a map containing samples. "
+ "You must use the command 'add map' to add values.\n");
+ }
+
+ /* Add value(s). If no payload is used, key and value are read
+ * from the command line and only one key is set. If a payload
+ * is passed, one key/value pair is read per line till the end
+ * of the payload is reached.
+ */
+ err = NULL;
+
+ do {
+ char *key = args[3];
+ char *value = args[4];
+ size_t l;
+
+ if (payload) {
+ /* key and value passed as payload, one pair per line */
+ if (!*payload)
+ break;
+
+ key = payload;
+ l = strcspn(key, " \t");
+ payload += l;
+
+ if (!*payload && ctx->display_flags == PAT_REF_MAP)
+ return cli_dynerr(appctx, memprintf(&err, "Missing value for key '%s'.\n", key));
+
+ key[l] = 0;
+ payload++;
+
+ /* value */
+ payload += strspn(payload, " \t");
+ value = payload;
+ l = strcspn(value, "\n");
+ payload += l;
+ if (*payload)
+ payload++;
+ value[l] = 0;
+ }
+
+ if (ctx->display_flags != PAT_REF_MAP)
+ value = NULL;
+
+ HA_RWLOCK_WRLOCK(PATREF_LOCK, &ctx->ref->lock);
+ ret = !!pat_ref_load(ctx->ref, gen ? genid : ctx->ref->curr_gen, key, value, -1, &err);
+ HA_RWLOCK_WRUNLOCK(PATREF_LOCK, &ctx->ref->lock);
+
+ if (!ret) {
+ if (err)
+ return cli_dynerr(appctx, memprintf(&err, "%s.\n", err));
+ else
+ return cli_err(appctx, "Failed to add a key.\n");
+ }
+ } while (payload && *payload);
+
+ /* The add is done, send message. */
+ appctx->st0 = CLI_ST_PROMPT;
+ return 1;
+ }
+
+ return 0;
+}
+
+static int cli_parse_del_map(char **args, char *payload, struct appctx *appctx, void *private)
+{
+ struct show_map_ctx *ctx = applet_reserve_svcctx(appctx, sizeof(*ctx));
+
+ if (args[1][0] == 'm')
+ ctx->display_flags = PAT_REF_MAP;
+ else
+ ctx->display_flags = PAT_REF_ACL;
+
+ /* Expect two parameters: map name and key. */
+ if (!*args[2] || !*args[3]) {
+ if (ctx->display_flags == PAT_REF_MAP)
+ return cli_err(appctx, "This command expects two parameters: map identifier and key.\n");
+ else
+ return cli_err(appctx, "This command expects two parameters: ACL identifier and key.\n");
+ }
+
+ /* Lookup the reference in the maps. */
+ ctx->ref = pat_ref_lookup_ref(args[2]);
+ if (!ctx->ref ||
+ !(ctx->ref->flags & ctx->display_flags))
+ return cli_err(appctx, "Unknown map identifier. Please use #<id> or <file>.\n");
+
+ /* If the entry identifier start with a '#', it is considered as
+ * pointer id
+ */
+ if (args[3][0] == '#' && args[3][1] == '0' && args[3][2] == 'x') {
+ struct pat_ref_elt *ref;
+ long long int conv;
+ char *error;
+
+ /* Convert argument to integer value. */
+ conv = strtoll(&args[3][1], &error, 16);
+ if (*error != '\0')
+ return cli_err(appctx, "Malformed identifier. Please use #<id> or <file>.\n");
+
+ /* Convert and check integer to pointer. */
+ ref = (struct pat_ref_elt *)(long)conv;
+ if ((long long int)(long)ref != conv)
+ return cli_err(appctx, "Malformed identifier. Please use #<id> or <file>.\n");
+
+ /* Try to delete the entry. */
+ HA_RWLOCK_WRLOCK(PATREF_LOCK, &ctx->ref->lock);
+ if (!pat_ref_delete_by_id(ctx->ref, ref)) {
+ HA_RWLOCK_WRUNLOCK(PATREF_LOCK, &ctx->ref->lock);
+ /* The entry is not found, send message. */
+ return cli_err(appctx, "Key not found.\n");
+ }
+ HA_RWLOCK_WRUNLOCK(PATREF_LOCK, &ctx->ref->lock);
+ }
+ else {
+ /* Else, use the entry identifier as pattern
+ * string and try to delete the entry.
+ */
+ HA_RWLOCK_WRLOCK(PATREF_LOCK, &ctx->ref->lock);
+ if (!pat_ref_delete(ctx->ref, args[3])) {
+ HA_RWLOCK_WRUNLOCK(PATREF_LOCK, &ctx->ref->lock);
+ /* The entry is not found, send message. */
+ return cli_err(appctx, "Key not found.\n");
+ }
+ HA_RWLOCK_WRUNLOCK(PATREF_LOCK, &ctx->ref->lock);
+ }
+
+ /* The deletion is done, send message. */
+ appctx->st0 = CLI_ST_PROMPT;
+ return 1;
+}
+
+/* continue to clear a map which was started in the parser. The range of
+ * generations this applies to is taken from ctx->curr_gen for the oldest
+ * and ctx->prev_gen for the latest.
+ */
+static int cli_io_handler_clear_map(struct appctx *appctx)
+{
+ struct show_map_ctx *ctx = appctx->svcctx;
+ int finished;
+
+ HA_RWLOCK_WRLOCK(PATREF_LOCK, &ctx->ref->lock);
+ finished = pat_ref_purge_range(ctx->ref, ctx->curr_gen, ctx->prev_gen, 100);
+ HA_RWLOCK_WRUNLOCK(PATREF_LOCK, &ctx->ref->lock);
+
+ if (!finished) {
+ /* let's come back later */
+ applet_have_more_data(appctx);
+ return 0;
+ }
+
+ trim_all_pools();
+ return 1;
+}
+
+/* note: sets ctx->curr_gen and ctx->prev_gen to the oldest and
+ * latest generations to clear, respectively, and will call the clear_map
+ * handler.
+ */
+static int cli_parse_clear_map(char **args, char *payload, struct appctx *appctx, void *private)
+{
+ struct show_map_ctx *ctx = applet_reserve_svcctx(appctx, sizeof(*ctx));
+
+ if (strcmp(args[1], "map") == 0 || strcmp(args[1], "acl") == 0) {
+ const char *gen = NULL;
+
+ /* Set ACL or MAP flags. */
+ if (args[1][0] == 'm')
+ ctx->display_flags = PAT_REF_MAP;
+ else
+ ctx->display_flags = PAT_REF_ACL;
+
+ /* For both "map" and "acl" we may have an optional generation
+ * number specified using a "@" character before the pattern
+ * file name.
+ */
+ if (*args[2] == '@') {
+ gen = args[2] + 1;
+ args++;
+ }
+
+ /* no parameter */
+ if (!*args[2]) {
+ if (ctx->display_flags == PAT_REF_MAP)
+ return cli_err(appctx, "Missing map identifier.\n");
+ else
+ return cli_err(appctx, "Missing ACL identifier.\n");
+ }
+
+ /* lookup into the refs and check the map flag */
+ ctx->ref = pat_ref_lookup_ref(args[2]);
+ if (!ctx->ref ||
+ !(ctx->ref->flags & ctx->display_flags)) {
+ if (ctx->display_flags == PAT_REF_MAP)
+ return cli_err(appctx, "Unknown map identifier. Please use #<id> or <file>.\n");
+ else
+ return cli_err(appctx, "Unknown ACL identifier. Please use #<id> or <file>.\n");
+ }
+
+ /* set the desired generation id in curr_gen/prev_gen */
+ if (gen)
+ ctx->prev_gen = ctx->curr_gen = str2uic(gen);
+ else
+ ctx->prev_gen = ctx->curr_gen = ctx->ref->curr_gen;
+
+ /* delegate the clearing to the I/O handler which can yield */
+ return 0;
+ }
+ return 1;
+}
+
+/* note: sets ctx->curr_gen and ctx->prev_gen to the oldest and
+ * latest generations to clear, respectively, and will call the clear_map
+ * handler.
+ */
+static int cli_parse_commit_map(char **args, char *payload, struct appctx *appctx, void *private)
+{
+ struct show_map_ctx *ctx = applet_reserve_svcctx(appctx, sizeof(*ctx));
+
+ if (strcmp(args[1], "map") == 0 || strcmp(args[1], "acl") == 0) {
+ const char *gen = NULL;
+ uint genid;
+ uint ret;
+
+ /* Set ACL or MAP flags. */
+ if (args[1][0] == 'm')
+ ctx->display_flags = PAT_REF_MAP;
+ else
+ ctx->display_flags = PAT_REF_ACL;
+
+ if (*args[2] != '@')
+ return cli_err(appctx, "Missing version number.\n");
+
+ /* The generation number is mandatory for a commit. The range
+ * of generations that get trashed by a commit starts from the
+ * opposite of the current one and ends at the previous one.
+ */
+ gen = args[2] + 1;
+ genid = str2uic(gen);
+ ctx->prev_gen = genid - 1;
+ ctx->curr_gen = ctx->prev_gen - ((~0U) >> 1);
+
+ /* no parameter */
+ if (!*args[3]) {
+ if (ctx->display_flags == PAT_REF_MAP)
+ return cli_err(appctx, "Missing map identifier.\n");
+ else
+ return cli_err(appctx, "Missing ACL identifier.\n");
+ }
+
+ /* lookup into the refs and check the map flag */
+ ctx->ref = pat_ref_lookup_ref(args[3]);
+ if (!ctx->ref ||
+ !(ctx->ref->flags & ctx->display_flags)) {
+ if (ctx->display_flags == PAT_REF_MAP)
+ return cli_err(appctx, "Unknown map identifier. Please use #<id> or <file>.\n");
+ else
+ return cli_err(appctx, "Unknown ACL identifier. Please use #<id> or <file>.\n");
+ }
+
+ HA_RWLOCK_WRLOCK(PATREF_LOCK, &ctx->ref->lock);
+ if (genid - (ctx->ref->curr_gen + 1) <
+ ctx->ref->next_gen - ctx->ref->curr_gen)
+ ret = pat_ref_commit(ctx->ref, genid);
+ else
+ ret = 1;
+ HA_RWLOCK_WRUNLOCK(PATREF_LOCK, &ctx->ref->lock);
+
+ if (ret != 0)
+ return cli_err(appctx, "Version number out of range.\n");
+
+ /* delegate the clearing to the I/O handler which can yield */
+ return 0;
+ }
+ return 1;
+}
+
+/* register cli keywords */
+
+static struct cli_kw_list cli_kws = {{ },{
+ { { "add", "acl", NULL }, "add acl [@<ver>] <acl> <pattern> : add an acl entry", cli_parse_add_map, NULL },
+ { { "clear", "acl", NULL }, "clear acl [@<ver>] <acl> : clear the contents of this acl", cli_parse_clear_map, cli_io_handler_clear_map, NULL },
+ { { "commit","acl", NULL }, "commit acl @<ver> <acl> : commit the ACL at this version", cli_parse_commit_map, cli_io_handler_clear_map, NULL },
+ { { "del", "acl", NULL }, "del acl <acl> [<key>|#<ref>] : delete acl entries matching <key>", cli_parse_del_map, NULL },
+ { { "get", "acl", NULL }, "get acl <acl> <value> : report the patterns matching a sample for an ACL", cli_parse_get_map, cli_io_handler_map_lookup, cli_release_mlook },
+ { { "prepare","acl",NULL }, "prepare acl <acl> : prepare a new version for atomic ACL replacement", cli_parse_prepare_map, NULL },
+ { { "show", "acl", NULL }, "show acl [@<ver>] <acl>] : report available acls or dump an acl's contents", cli_parse_show_map, NULL },
+ { { "add", "map", NULL }, "add map [@<ver>] <map> <key> <val> : add a map entry (payload supported instead of key/val)", cli_parse_add_map, NULL },
+ { { "clear", "map", NULL }, "clear map [@<ver>] <map> : clear the contents of this map", cli_parse_clear_map, cli_io_handler_clear_map, NULL },
+ { { "commit","map", NULL }, "commit map @<ver> <map> : commit the map at this version", cli_parse_commit_map, cli_io_handler_clear_map, NULL },
+ { { "del", "map", NULL }, "del map <map> [<key>|#<ref>] : delete map entries matching <key>", cli_parse_del_map, NULL },
+ { { "get", "map", NULL }, "get map <acl> <value> : report the keys and values matching a sample for a map", cli_parse_get_map, cli_io_handler_map_lookup, cli_release_mlook },
+ { { "prepare","map",NULL }, "prepare map <acl> : prepare a new version for atomic map replacement", cli_parse_prepare_map, NULL },
+ { { "set", "map", NULL }, "set map <map> [<key>|#<ref>] <value> : modify a map entry", cli_parse_set_map, NULL },
+ { { "show", "map", NULL }, "show map [@ver] [map] : report available maps or dump a map's contents", cli_parse_show_map, NULL },
+ { { NULL }, NULL, NULL, NULL }
+}};
+
+INITCALL1(STG_REGISTER, cli_register_kw, &cli_kws);
+
+/* Note: must not be declared <const> as its list will be overwritten
+ *
+ * For the map_*_int keywords, the output is declared as SMP_T_UINT, but the converter function
+ * can provide SMP_T_UINT, SMP_T_SINT or SMP_T_BOOL depending on how the patterns found in the
+ * file can be parsed.
+ *
+ * For the map_*_ip keyword, the output is declared as SMP_T_IPV4, but the converter function
+ * can provide SMP_T_IPV4 or SMP_T_IPV6 depending on the patterns found in the file.
+ *
+ * The map_* keywords only emit strings.
+ *
+ * The output type is only used during the configuration parsing. It is used for detecting
+ * compatibility problems.
+ *
+ * The arguments are: <file>[,<default value>]
+ */
+static struct sample_conv_kw_list sample_conv_kws = {ILH, {
+ { "map", sample_conv_map, ARG2(1,STR,STR), sample_load_map, SMP_T_STR, SMP_T_STR, (void *)PAT_MATCH_STR },
+ { "map_str", sample_conv_map, ARG2(1,STR,STR), sample_load_map, SMP_T_STR, SMP_T_STR, (void *)PAT_MATCH_STR },
+ { "map_beg", sample_conv_map, ARG2(1,STR,STR), sample_load_map, SMP_T_STR, SMP_T_STR, (void *)PAT_MATCH_BEG },
+ { "map_sub", sample_conv_map, ARG2(1,STR,STR), sample_load_map, SMP_T_STR, SMP_T_STR, (void *)PAT_MATCH_SUB },
+ { "map_dir", sample_conv_map, ARG2(1,STR,STR), sample_load_map, SMP_T_STR, SMP_T_STR, (void *)PAT_MATCH_DIR },
+ { "map_dom", sample_conv_map, ARG2(1,STR,STR), sample_load_map, SMP_T_STR, SMP_T_STR, (void *)PAT_MATCH_DOM },
+ { "map_end", sample_conv_map, ARG2(1,STR,STR), sample_load_map, SMP_T_STR, SMP_T_STR, (void *)PAT_MATCH_END },
+ { "map_reg", sample_conv_map, ARG2(1,STR,STR), sample_load_map, SMP_T_STR, SMP_T_STR, (void *)PAT_MATCH_REG },
+ { "map_regm", sample_conv_map, ARG2(1,STR,STR), sample_load_map, SMP_T_STR, SMP_T_STR, (void *)PAT_MATCH_REGM},
+ { "map_int", sample_conv_map, ARG2(1,STR,STR), sample_load_map, SMP_T_SINT, SMP_T_STR, (void *)PAT_MATCH_INT },
+ { "map_ip", sample_conv_map, ARG2(1,STR,STR), sample_load_map, SMP_T_ADDR, SMP_T_STR, (void *)PAT_MATCH_IP },
+
+ { "map_str_int", sample_conv_map, ARG2(1,STR,SINT), sample_load_map, SMP_T_STR, SMP_T_SINT, (void *)PAT_MATCH_STR },
+ { "map_beg_int", sample_conv_map, ARG2(1,STR,SINT), sample_load_map, SMP_T_STR, SMP_T_SINT, (void *)PAT_MATCH_BEG },
+ { "map_sub_int", sample_conv_map, ARG2(1,STR,SINT), sample_load_map, SMP_T_STR, SMP_T_SINT, (void *)PAT_MATCH_SUB },
+ { "map_dir_int", sample_conv_map, ARG2(1,STR,SINT), sample_load_map, SMP_T_STR, SMP_T_SINT, (void *)PAT_MATCH_DIR },
+ { "map_dom_int", sample_conv_map, ARG2(1,STR,SINT), sample_load_map, SMP_T_STR, SMP_T_SINT, (void *)PAT_MATCH_DOM },
+ { "map_end_int", sample_conv_map, ARG2(1,STR,SINT), sample_load_map, SMP_T_STR, SMP_T_SINT, (void *)PAT_MATCH_END },
+ { "map_reg_int", sample_conv_map, ARG2(1,STR,SINT), sample_load_map, SMP_T_STR, SMP_T_SINT, (void *)PAT_MATCH_REG },
+ { "map_int_int", sample_conv_map, ARG2(1,STR,SINT), sample_load_map, SMP_T_SINT, SMP_T_SINT, (void *)PAT_MATCH_INT },
+ { "map_ip_int", sample_conv_map, ARG2(1,STR,SINT), sample_load_map, SMP_T_ADDR, SMP_T_SINT, (void *)PAT_MATCH_IP },
+
+ { "map_str_ip", sample_conv_map, ARG2(1,STR,STR), sample_load_map, SMP_T_STR, SMP_T_ADDR, (void *)PAT_MATCH_STR },
+ { "map_beg_ip", sample_conv_map, ARG2(1,STR,STR), sample_load_map, SMP_T_STR, SMP_T_ADDR, (void *)PAT_MATCH_BEG },
+ { "map_sub_ip", sample_conv_map, ARG2(1,STR,STR), sample_load_map, SMP_T_STR, SMP_T_ADDR, (void *)PAT_MATCH_SUB },
+ { "map_dir_ip", sample_conv_map, ARG2(1,STR,STR), sample_load_map, SMP_T_STR, SMP_T_ADDR, (void *)PAT_MATCH_DIR },
+ { "map_dom_ip", sample_conv_map, ARG2(1,STR,STR), sample_load_map, SMP_T_STR, SMP_T_ADDR, (void *)PAT_MATCH_DOM },
+ { "map_end_ip", sample_conv_map, ARG2(1,STR,STR), sample_load_map, SMP_T_STR, SMP_T_ADDR, (void *)PAT_MATCH_END },
+ { "map_reg_ip", sample_conv_map, ARG2(1,STR,STR), sample_load_map, SMP_T_STR, SMP_T_ADDR, (void *)PAT_MATCH_REG },
+ { "map_int_ip", sample_conv_map, ARG2(1,STR,STR), sample_load_map, SMP_T_SINT, SMP_T_ADDR, (void *)PAT_MATCH_INT },
+ { "map_ip_ip", sample_conv_map, ARG2(1,STR,STR), sample_load_map, SMP_T_ADDR, SMP_T_ADDR, (void *)PAT_MATCH_IP },
+
+ { /* END */ },
+}};
+
+INITCALL1(STG_REGISTER, sample_register_convs, &sample_conv_kws);
diff --git a/src/mjson.c b/src/mjson.c
new file mode 100644
index 0000000..73b7a57
--- /dev/null
+++ b/src/mjson.c
@@ -0,0 +1,1048 @@
+// Copyright (c) 2018-2020 Cesanta Software Limited
+// All rights reserved
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE.
+
+#include <float.h>
+#include <math.h>
+
+#include <import/mjson.h>
+
+#if defined(_MSC_VER)
+#define alloca(x) _alloca(x)
+#endif
+
+#if defined(_MSC_VER) && _MSC_VER < 1700
+#define va_copy(x, y) (x) = (y)
+#define isinf(x) !_finite(x)
+#define isnan(x) _isnan(x)
+#endif
+
+static double mystrtod(const char *str, char **end);
+
+static int mjson_esc(int c, int esc) {
+ const char *p, *esc1 = "\b\f\n\r\t\\\"", *esc2 = "bfnrt\\\"";
+ for (p = esc ? esc1 : esc2; *p != '\0'; p++) {
+ if (*p == c) return esc ? esc2[p - esc1] : esc1[p - esc2];
+ }
+ return 0;
+}
+
+static int mjson_escape(int c) {
+ return mjson_esc(c, 1);
+}
+
+static int mjson_pass_string(const char *s, int len) {
+ int i;
+ for (i = 0; i < len; i++) {
+ if (s[i] == '\\' && i + 1 < len && mjson_escape(s[i + 1])) {
+ i++;
+ } else if (s[i] == '\0') {
+ return MJSON_ERROR_INVALID_INPUT;
+ } else if (s[i] == '"') {
+ return i;
+ }
+ }
+ return MJSON_ERROR_INVALID_INPUT;
+}
+
+int mjson(const char *s, int len, mjson_cb_t cb, void *ud) {
+ enum { S_VALUE, S_KEY, S_COLON, S_COMMA_OR_EOO } expecting = S_VALUE;
+ unsigned char nesting[MJSON_MAX_DEPTH];
+ int i, depth = 0;
+#define MJSONCALL(ev) \
+ if (cb != NULL && cb(ev, s, start, i - start + 1, ud)) return i + 1;
+
+// In the ascii table, the distance between `[` and `]` is 2.
+// Ditto for `{` and `}`. Hence +2 in the code below.
+#define MJSONEOO() \
+ do { \
+ if (c != nesting[depth - 1] + 2) return MJSON_ERROR_INVALID_INPUT; \
+ depth--; \
+ if (depth == 0) { \
+ MJSONCALL(tok); \
+ return i + 1; \
+ } \
+ } while (0)
+
+ for (i = 0; i < len; i++) {
+ int start = i;
+ unsigned char c = ((unsigned char *) s)[i];
+ int tok = c;
+ if (c == ' ' || c == '\t' || c == '\n' || c == '\r') continue;
+ // printf("- %c [%.*s] %d %d\n", c, i, s, depth, expecting);
+ switch (expecting) {
+ case S_VALUE:
+ if (c == '{') {
+ if (depth >= (int) sizeof(nesting)) return MJSON_ERROR_TOO_DEEP;
+ nesting[depth++] = c;
+ expecting = S_KEY;
+ break;
+ } else if (c == '[') {
+ if (depth >= (int) sizeof(nesting)) return MJSON_ERROR_TOO_DEEP;
+ nesting[depth++] = c;
+ break;
+ } else if (c == ']' && depth > 0) { // Empty array
+ MJSONEOO();
+ } else if (c == 't' && i + 3 < len && memcmp(&s[i], "true", 4) == 0) {
+ i += 3;
+ tok = MJSON_TOK_TRUE;
+ } else if (c == 'n' && i + 3 < len && memcmp(&s[i], "null", 4) == 0) {
+ i += 3;
+ tok = MJSON_TOK_NULL;
+ } else if (c == 'f' && i + 4 < len && memcmp(&s[i], "false", 5) == 0) {
+ i += 4;
+ tok = MJSON_TOK_FALSE;
+ } else if (c == '-' || ((c >= '0' && c <= '9'))) {
+ char *end = NULL;
+ mystrtod(&s[i], &end);
+ if (end != NULL) i += (int) (end - &s[i] - 1);
+ tok = MJSON_TOK_NUMBER;
+ } else if (c == '"') {
+ int n = mjson_pass_string(&s[i + 1], len - i - 1);
+ if (n < 0) return n;
+ i += n + 1;
+ tok = MJSON_TOK_STRING;
+ } else {
+ return MJSON_ERROR_INVALID_INPUT;
+ }
+ if (depth == 0) {
+ MJSONCALL(tok);
+ return i + 1;
+ }
+ expecting = S_COMMA_OR_EOO;
+ break;
+
+ case S_KEY:
+ if (c == '"') {
+ int n = mjson_pass_string(&s[i + 1], len - i - 1);
+ if (n < 0) return n;
+ i += n + 1;
+ tok = MJSON_TOK_KEY;
+ expecting = S_COLON;
+ } else if (c == '}') { // Empty object
+ MJSONEOO();
+ expecting = S_COMMA_OR_EOO;
+ } else {
+ return MJSON_ERROR_INVALID_INPUT;
+ }
+ break;
+
+ case S_COLON:
+ if (c == ':') {
+ expecting = S_VALUE;
+ } else {
+ return MJSON_ERROR_INVALID_INPUT;
+ }
+ break;
+
+ case S_COMMA_OR_EOO:
+ if (depth <= 0) return MJSON_ERROR_INVALID_INPUT;
+ if (c == ',') {
+ expecting = (nesting[depth - 1] == '{') ? S_KEY : S_VALUE;
+ } else if (c == ']' || c == '}') {
+ MJSONEOO();
+ } else {
+ return MJSON_ERROR_INVALID_INPUT;
+ }
+ break;
+ }
+ MJSONCALL(tok);
+ }
+ return MJSON_ERROR_INVALID_INPUT;
+}
+
+struct msjon_get_data {
+ const char *path; // Lookup json path
+ int pos; // Current path index
+ int d1; // Current depth of traversal
+ int d2; // Expected depth of traversal
+ int i1; // Index in an array
+ int i2; // Expected index in an array
+ int obj; // If the value is array/object, offset where it starts
+ const char **tokptr; // Destination
+ int *toklen; // Destination length
+ int tok; // Returned token
+};
+
+#include <stdio.h>
+
+static int plen1(const char *s) {
+ int i = 0, n = 0;
+ while (s[i] != '\0' && s[i] != '.' && s[i] != '[')
+ n++, i += s[i] == '\\' ? 2 : 1;
+ // printf("PLEN: s: [%s], [%.*s] => %d\n", s, i, s, n);
+ return n;
+}
+
+static int plen2(const char *s) {
+ int i = 0, __attribute__((unused)) n = 0;
+ while (s[i] != '\0' && s[i] != '.' && s[i] != '[')
+ n++, i += s[i] == '\\' ? 2 : 1;
+ // printf("PLEN: s: [%s], [%.*s] => %d\n", s, i, s, n);
+ return i;
+}
+
+static int kcmp(const char *a, const char *b, int n) {
+ int i = 0, j = 0, r = 0;
+ for (i = 0, j = 0; j < n; i++, j++) {
+ if (b[i] == '\\') i++;
+ if ((r = a[j] - b[i]) != 0) return r;
+ }
+ // printf("KCMP: a: [%.*s], b:[%.*s] ==> %d\n", n, a, i, b, r);
+ return r;
+}
+
+static int mjson_get_cb(int tok, const char *s, int off, int len, void *ud) {
+ struct msjon_get_data *data = (struct msjon_get_data *) ud;
+ // printf("--> %2x %2d %2d %2d %2d\t'%s'\t'%.*s'\t\t'%.*s'\n", tok, data->d1,
+ // data->d2, data->i1, data->i2, data->path + data->pos, off, s, len,
+ // s + off);
+ if (data->tok != MJSON_TOK_INVALID) return 1; // Found
+
+ if (tok == '{') {
+ if (!data->path[data->pos] && data->d1 == data->d2) data->obj = off;
+ data->d1++;
+ } else if (tok == '[') {
+ if (data->d1 == data->d2 && data->path[data->pos] == '[') {
+ data->i1 = 0;
+ data->i2 = (int) mystrtod(&data->path[data->pos + 1], NULL);
+ if (data->i1 == data->i2) {
+ data->d2++;
+ data->pos += 3;
+ }
+ }
+ if (!data->path[data->pos] && data->d1 == data->d2) data->obj = off;
+ data->d1++;
+ } else if (tok == ',') {
+ if (data->d1 == data->d2 + 1) {
+ data->i1++;
+ if (data->i1 == data->i2) {
+ while (data->path[data->pos] != ']') data->pos++;
+ data->pos++;
+ data->d2++;
+ }
+ }
+ } else if (tok == MJSON_TOK_KEY && data->d1 == data->d2 + 1 &&
+ data->path[data->pos] == '.' && s[off] == '"' &&
+ s[off + len - 1] == '"' &&
+ plen1(&data->path[data->pos + 1]) == len - 2 &&
+ kcmp(s + off + 1, &data->path[data->pos + 1], len - 2) == 0) {
+ data->d2++;
+ data->pos += plen2(&data->path[data->pos + 1]) + 1;
+ } else if (tok == MJSON_TOK_KEY && data->d1 == data->d2) {
+ return 1; // Exhausted path, not found
+ } else if (tok == '}' || tok == ']') {
+ data->d1--;
+ // data->d2--;
+ if (!data->path[data->pos] && data->d1 == data->d2 && data->obj != -1) {
+ data->tok = tok - 2;
+ if (data->tokptr) *data->tokptr = s + data->obj;
+ if (data->toklen) *data->toklen = off - data->obj + 1;
+ return 1;
+ }
+ } else if (MJSON_TOK_IS_VALUE(tok)) {
+ // printf("TOK --> %d\n", tok);
+ if (data->d1 == data->d2 && !data->path[data->pos]) {
+ data->tok = tok;
+ if (data->tokptr) *data->tokptr = s + off;
+ if (data->toklen) *data->toklen = len;
+ return 1;
+ }
+ }
+ return 0;
+}
+
+enum mjson_tok mjson_find(const char *s, int len, const char *jp,
+ const char **tokptr, int *toklen) {
+ struct msjon_get_data data = {jp, 1, 0, 0, 0,
+ 0, -1, tokptr, toklen, MJSON_TOK_INVALID};
+ if (jp[0] != '$') return MJSON_TOK_INVALID;
+ if (mjson(s, len, mjson_get_cb, &data) < 0) return MJSON_TOK_INVALID;
+ return (enum mjson_tok) data.tok;
+}
+
+int mjson_get_number(const char *s, int len, const char *path, double *v) {
+ const char *p;
+ int tok, n;
+ if ((tok = mjson_find(s, len, path, &p, &n)) == MJSON_TOK_NUMBER) {
+ if (v != NULL) *v = mystrtod(p, NULL);
+ }
+ return tok == MJSON_TOK_NUMBER ? 1 : 0;
+}
+
+int mjson_get_bool(const char *s, int len, const char *path, int *v) {
+ int tok = mjson_find(s, len, path, NULL, NULL);
+ if (tok == MJSON_TOK_TRUE && v != NULL) *v = 1;
+ if (tok == MJSON_TOK_FALSE && v != NULL) *v = 0;
+ return tok == MJSON_TOK_TRUE || tok == MJSON_TOK_FALSE ? 1 : 0;
+}
+
+static unsigned char mjson_unhex_nimble(const char *s) {
+ unsigned char i, v = 0;
+ for (i = 0; i < 2; i++) {
+ int c = s[i];
+ if (i > 0) v <<= 4;
+ v |= (c >= '0' && c <= '9') ? c - '0'
+ : (c >= 'A' && c <= 'F') ? c - '7' : c - 'W';
+ }
+ return v;
+}
+
+static int mjson_unescape(const char *s, int len, char *to, int n) {
+ int i, j;
+ for (i = 0, j = 0; i < len && j < n; i++, j++) {
+ if (s[i] == '\\' && i + 5 < len && s[i + 1] == 'u') {
+ // \uXXXX escape. We could process a simple one-byte chars
+ // \u00xx from the ASCII range. More complex chars would require
+ // dragging in a UTF8 library, which is too much for us
+ if (s[i + 2] != '0' || s[i + 3] != '0') return -1; // Too much, give up
+ to[j] = mjson_unhex_nimble(s + i + 4);
+ i += 5;
+ } else if (s[i] == '\\' && i + 1 < len) {
+ int c = mjson_esc(s[i + 1], 0);
+ if (c == 0) return -1;
+ to[j] = c;
+ i++;
+ } else {
+ to[j] = s[i];
+ }
+ }
+ if (j >= n) return -1;
+ if (n > 0) to[j] = '\0';
+ return j;
+}
+
+int mjson_get_string(const char *s, int len, const char *path, char *to,
+ int n) {
+ const char *p;
+ int sz;
+ if (mjson_find(s, len, path, &p, &sz) != MJSON_TOK_STRING) return -1;
+ return mjson_unescape(p + 1, sz - 2, to, n);
+}
+
+int mjson_get_hex(const char *s, int len, const char *x, char *to, int n) {
+ const char *p;
+ int i, j, sz;
+ if (mjson_find(s, len, x, &p, &sz) != MJSON_TOK_STRING) return -1;
+ for (i = j = 0; i < sz - 3 && j < n; i += 2, j++) {
+ ((unsigned char *) to)[j] = mjson_unhex_nimble(p + i + 1);
+ }
+ if (j < n) to[j] = '\0';
+ return j;
+}
+
+#if MJSON_ENABLE_BASE64
+static int mjson_base64rev(int c) {
+ if (c >= 'A' && c <= 'Z') {
+ return c - 'A';
+ } else if (c >= 'a' && c <= 'z') {
+ return c + 26 - 'a';
+ } else if (c >= '0' && c <= '9') {
+ return c + 52 - '0';
+ } else if (c == '+') {
+ return 62;
+ } else if (c == '/') {
+ return 63;
+ } else {
+ return 64;
+ }
+}
+
+int mjson_base64_dec(const char *src, int n, char *dst, int dlen) {
+ const char *end = src + n;
+ int len = 0;
+ while (src + 3 < end && len < dlen) {
+ int a = mjson_base64rev(src[0]), b = mjson_base64rev(src[1]),
+ c = mjson_base64rev(src[2]), d = mjson_base64rev(src[3]);
+ dst[len++] = (a << 2) | (b >> 4);
+ if (src[2] != '=' && len < dlen) {
+ dst[len++] = (b << 4) | (c >> 2);
+ if (src[3] != '=' && len < dlen) {
+ dst[len++] = (c << 6) | d;
+ }
+ }
+ src += 4;
+ }
+ if (len < dlen) dst[len] = '\0';
+ return len;
+}
+
+int mjson_get_base64(const char *s, int len, const char *path, char *to,
+ int n) {
+ const char *p;
+ int sz;
+ if (mjson_find(s, len, path, &p, &sz) != MJSON_TOK_STRING) return 0;
+ return mjson_base64_dec(p + 1, sz - 2, to, n);
+}
+#endif // MJSON_ENABLE_BASE64
+
+#if MJSON_ENABLE_NEXT
+struct nextdata {
+ int off, len, depth, t, vo, arrayindex;
+ int *koff, *klen, *voff, *vlen, *vtype;
+};
+
+static int next_cb(int tok, const char *s, int off, int len, void *ud) {
+ struct nextdata *d = (struct nextdata *) ud;
+ // int i;
+ switch (tok) {
+ case '{':
+ case '[':
+ if (d->depth == 0 && tok == '[') d->arrayindex = 0;
+ if (d->depth == 1 && off > d->off) {
+ d->vo = off;
+ d->t = tok == '{' ? MJSON_TOK_OBJECT : MJSON_TOK_ARRAY;
+ if (d->voff) *d->voff = off;
+ if (d->vtype) *d->vtype = d->t;
+ }
+ d->depth++;
+ break;
+ case '}':
+ case ']':
+ d->depth--;
+ if (d->depth == 1 && d->vo) {
+ d->len = off + len;
+ if (d->vlen) *d->vlen = d->len - d->vo;
+ if (d->arrayindex >= 0) {
+ if (d->koff) *d->koff = d->arrayindex; // koff holds array index
+ if (d->klen) *d->klen = 0; // klen holds 0
+ }
+ return 1;
+ }
+ if (d->depth == 1 && d->arrayindex >= 0) d->arrayindex++;
+ break;
+ case ',':
+ case ':':
+ break;
+ case MJSON_TOK_KEY:
+ if (d->depth == 1 && d->off < off) {
+ if (d->koff) *d->koff = off; // And report back to the user
+ if (d->klen) *d->klen = len; // If we have to
+ }
+ break;
+ default:
+ if (d->depth != 1) break;
+ // If we're iterating over the array
+ if (off > d->off) {
+ d->len = off + len;
+ if (d->vlen) *d->vlen = len; // value length
+ if (d->voff) *d->voff = off; // value offset
+ if (d->vtype) *d->vtype = tok; // value type
+ if (d->arrayindex >= 0) {
+ if (d->koff) *d->koff = d->arrayindex; // koff holds array index
+ if (d->klen) *d->klen = 0; // klen holds 0
+ }
+ return 1;
+ }
+ if (d->arrayindex >= 0) d->arrayindex++;
+ break;
+ }
+ (void) s;
+ return 0;
+}
+
+int mjson_next(const char *s, int n, int off, int *koff, int *klen, int *voff,
+ int *vlen, int *vtype) {
+ struct nextdata d = {off, 0, 0, 0, 0, -1, koff, klen, voff, vlen, vtype};
+ mjson(s, n, next_cb, &d);
+ return d.len;
+}
+#endif
+
+#if MJSON_ENABLE_PRINT
+int mjson_print_fixed_buf(const char *ptr, int len, void *fndata) {
+ struct mjson_fixedbuf *fb = (struct mjson_fixedbuf *) fndata;
+ int i, left = fb->size - 1 - fb->len;
+ if (left < len) len = left;
+ for (i = 0; i < len; i++) fb->ptr[fb->len + i] = ptr[i];
+ fb->len += len;
+ fb->ptr[fb->len] = '\0';
+ return len;
+}
+
+// This function allocates memory in chunks of size MJSON_DYNBUF_CHUNK
+// to decrease memory fragmentation, when many calls are executed to
+// print e.g. a base64 string or a hex string.
+int mjson_print_dynamic_buf(const char *ptr, int len, void *fndata) {
+ char *s, *buf = *(char **) fndata;
+ size_t curlen = buf == NULL ? 0 : strlen(buf);
+ size_t new_size = curlen + len + 1 + MJSON_DYNBUF_CHUNK;
+ new_size -= new_size % MJSON_DYNBUF_CHUNK;
+
+ if ((s = (char *) realloc(buf, new_size)) == NULL) {
+ return 0;
+ } else {
+ memcpy(s + curlen, ptr, len);
+ s[curlen + len] = '\0';
+ *(char **) fndata = s;
+ return len;
+ }
+}
+
+int mjson_print_null(const char *ptr, int len, void *userdata) {
+ (void) ptr;
+ (void) userdata;
+ return len;
+}
+
+int mjson_print_buf(mjson_print_fn_t fn, void *fnd, const char *buf, int len) {
+ return fn(buf, len, fnd);
+}
+
+int mjson_print_long(mjson_print_fn_t fn, void *fnd, long val, int is_signed) {
+ unsigned long v = val, s = 0, n, i;
+ char buf[20], t;
+ if (is_signed && val < 0) {
+ buf[s++] = '-', v = -val;
+ }
+ // This loop prints a number in reverse order. I guess this is because we
+ // write numbers from right to left: least significant digit comes last.
+ // Maybe because we use Arabic numbers, and Arabs write RTL?
+ for (n = 0; v > 0; v /= 10) buf[s + n++] = "0123456789"[v % 10];
+ // Reverse a string
+ for (i = 0; i < n / 2; i++)
+ t = buf[s + i], buf[s + i] = buf[s + n - i - 1], buf[s + n - i - 1] = t;
+ if (val == 0) buf[n++] = '0'; // Handle special case
+ return fn(buf, s + n, fnd);
+}
+
+int mjson_print_int(mjson_print_fn_t fn, void *fnd, int v, int s) {
+ return mjson_print_long(fn, fnd, s ? (long) v : (unsigned) v, s);
+}
+
+static int addexp(char *buf, int e, int sign) {
+ int n = 0;
+ buf[n++] = 'e';
+ buf[n++] = sign;
+ if (e > 400) return 0;
+ if (e < 10) buf[n++] = '0';
+ if (e >= 100) buf[n++] = (e / 100) + '0', e -= 100 * (e / 100);
+ if (e >= 10) buf[n++] = (e / 10) + '0', e -= 10 * (e / 10);
+ buf[n++] = e + '0';
+ return n;
+}
+
+int mjson_print_dbl(mjson_print_fn_t fn, void *fnd, double d, int width) {
+ char buf[40];
+ int i, s = 0, n = 0, e = 0;
+ double t, mul, saved;
+ if (d == 0.0) return fn("0", 1, fnd);
+ if (isinf(d)) return fn(d > 0 ? "inf" : "-inf", d > 0 ? 3 : 4, fnd);
+ if (isnan(d)) return fn("nan", 3, fnd);
+ if (d < 0.0) d = -d, buf[s++] = '-';
+
+ // Round
+ saved = d;
+ mul = 1.0;
+ while (d >= 10.0 && d / mul >= 10.0) mul *= 10.0;
+ while (d <= 1.0 && d / mul <= 1.0) mul /= 10.0;
+ for (i = 0, t = mul * 5; i < width; i++) t /= 10.0;
+ d += t;
+ // Calculate exponent, and 'mul' for scientific representation
+ mul = 1.0;
+ while (d >= 10.0 && d / mul >= 10.0) mul *= 10.0, e++;
+ while (d < 1.0 && d / mul < 1.0) mul /= 10.0, e--;
+ // printf(" --> %g %d %g %g\n", saved, e, t, mul);
+
+ if (e >= width) {
+ struct mjson_fixedbuf fb = {buf + s, (int) sizeof(buf) - s, 0};
+ n = mjson_print_dbl(mjson_print_fixed_buf, &fb, saved / mul, width);
+ // printf(" --> %.*g %d [%.*s]\n", 10, d / t, e, fb.len, fb.ptr);
+ n += addexp(buf + s + n, e, '+');
+ return fn(buf, s + n, fnd);
+ } else if (e <= -width) {
+ struct mjson_fixedbuf fb = {buf + s, (int) sizeof(buf) - s, 0};
+ n = mjson_print_dbl(mjson_print_fixed_buf, &fb, saved / mul, width);
+ // printf(" --> %.*g %d [%.*s]\n", 10, d / mul, e, fb.len, fb.ptr);
+ n += addexp(buf + s + n, -e, '-');
+ return fn(buf, s + n, fnd);
+ } else {
+ for (i = 0, t = mul; d >= 1.0 && s + n < (int) sizeof(buf); i++) {
+ int ch = (int) (d / t);
+ if (n > 0 || ch > 0) buf[s + n++] = ch + '0';
+ d -= ch * t;
+ t /= 10.0;
+ }
+ // printf(" --> [%g] -> %g %g (%d) [%.*s]\n", saved, d, t, n, s + n, buf);
+ if (n == 0) buf[s++] = '0';
+ while (t >= 1.0 && n + s < (int) sizeof(buf)) buf[n++] = '0', t /= 10.0;
+ if (s + n < (int) sizeof(buf)) buf[n + s++] = '.';
+ // printf(" 1--> [%g] -> [%.*s]\n", saved, s + n, buf);
+ for (i = 0, t = 0.1; s + n < (int) sizeof(buf) && n < width; i++) {
+ int ch = (int) (d / t);
+ buf[s + n++] = ch + '0';
+ d -= ch * t;
+ t /= 10.0;
+ }
+ }
+ while (n > 0 && buf[s + n - 1] == '0') n--; // Trim trailing zeros
+ if (n > 0 && buf[s + n - 1] == '.') n--; // Trim trailing dot
+ return fn(buf, s + n, fnd);
+}
+
+int mjson_print_str(mjson_print_fn_t fn, void *fnd, const char *s, int len) {
+ int i, n = fn("\"", 1, fnd);
+ for (i = 0; i < len; i++) {
+ char c = mjson_escape(s[i]);
+ if (c) {
+ n += fn("\\", 1, fnd);
+ n += fn(&c, 1, fnd);
+ } else {
+ n += fn(&s[i], 1, fnd);
+ }
+ }
+ return n + fn("\"", 1, fnd);
+}
+
+#if MJSON_ENABLE_BASE64
+int mjson_print_b64(mjson_print_fn_t fn, void *fnd, const unsigned char *s,
+ int n) {
+ const char *t =
+ "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
+ int i, len = fn("\"", 1, fnd);
+ for (i = 0; i < n; i += 3) {
+ int a = s[i], b = i + 1 < n ? s[i + 1] : 0, c = i + 2 < n ? s[i + 2] : 0;
+ char buf[4] = {t[a >> 2], t[(a & 3) << 4 | (b >> 4)], '=', '='};
+ if (i + 1 < n) buf[2] = t[(b & 15) << 2 | (c >> 6)];
+ if (i + 2 < n) buf[3] = t[c & 63];
+ len += fn(buf, sizeof(buf), fnd);
+ }
+ return len + fn("\"", 1, fnd);
+}
+#endif /* MJSON_ENABLE_BASE64 */
+
+int mjson_vprintf(mjson_print_fn_t fn, void *fnd, const char *fmt,
+ va_list xap) {
+ int i = 0, n = 0;
+ va_list ap;
+ va_copy(ap, xap);
+ while (fmt[i] != '\0') {
+ if (fmt[i] == '%') {
+ char fc = fmt[++i];
+ int is_long = 0;
+ if (fc == 'l') {
+ is_long = 1;
+ fc = fmt[i + 1];
+ }
+ if (fc == 'Q') {
+ char *buf = va_arg(ap, char *);
+ n += mjson_print_str(fn, fnd, buf ? buf : "",
+ buf ? (int) strlen(buf) : 0);
+ } else if (strncmp(&fmt[i], ".*Q", 3) == 0) {
+ int len = va_arg(ap, int);
+ char *buf = va_arg(ap, char *);
+ n += mjson_print_str(fn, fnd, buf, len);
+ i += 2;
+ } else if (fc == 'd' || fc == 'u') {
+ int is_signed = (fc == 'd');
+ if (is_long) {
+ long val = va_arg(ap, long);
+ n += mjson_print_long(fn, fnd, val, is_signed);
+ i++;
+ } else {
+ int val = va_arg(ap, int);
+ n += mjson_print_int(fn, fnd, val, is_signed);
+ }
+ } else if (fc == 'B') {
+ const char *s = va_arg(ap, int) ? "true" : "false";
+ n += mjson_print_buf(fn, fnd, s, (int) strlen(s));
+ } else if (fc == 's') {
+ char *buf = va_arg(ap, char *);
+ n += mjson_print_buf(fn, fnd, buf, (int) strlen(buf));
+ } else if (strncmp(&fmt[i], ".*s", 3) == 0) {
+ int len = va_arg(ap, int);
+ char *buf = va_arg(ap, char *);
+ n += mjson_print_buf(fn, fnd, buf, len);
+ i += 2;
+ } else if (fc == 'g') {
+ n += mjson_print_dbl(fn, fnd, va_arg(ap, double), 6);
+ } else if (strncmp(&fmt[i], ".*g", 3) == 0) {
+ int width = va_arg(ap, int);
+ n += mjson_print_dbl(fn, fnd, va_arg(ap, double), width);
+ i += 2;
+#if MJSON_ENABLE_BASE64
+ } else if (fc == 'V') {
+ int len = va_arg(ap, int);
+ const char *buf = va_arg(ap, const char *);
+ n += mjson_print_b64(fn, fnd, (unsigned char *) buf, len);
+#endif
+ } else if (fc == 'H') {
+ const char *hex = "0123456789abcdef";
+ int i, len = va_arg(ap, int);
+ const unsigned char *p = va_arg(ap, const unsigned char *);
+ n += fn("\"", 1, fnd);
+ for (i = 0; i < len; i++) {
+ n += fn(&hex[(p[i] >> 4) & 15], 1, fnd);
+ n += fn(&hex[p[i] & 15], 1, fnd);
+ }
+ n += fn("\"", 1, fnd);
+ } else if (fc == 'M') {
+ mjson_vprint_fn_t vfn = va_arg(ap, mjson_vprint_fn_t);
+ n += vfn(fn, fnd, &ap);
+ }
+ i++;
+ } else {
+ n += mjson_print_buf(fn, fnd, &fmt[i++], 1);
+ }
+ }
+ va_end(xap);
+ va_end(ap);
+ return n;
+}
+
+int mjson_printf(mjson_print_fn_t fn, void *fnd, const char *fmt, ...) {
+ va_list ap;
+ int len;
+ va_start(ap, fmt);
+ len = mjson_vprintf(fn, fnd, fmt, ap);
+ va_end(ap);
+ return len;
+}
+#endif /* MJSON_ENABLE_PRINT */
+
+static int is_digit(int c) {
+ return c >= '0' && c <= '9';
+}
+
+/* NOTE: strtod() implementation by Yasuhiro Matsumoto. */
+static double mystrtod(const char *str, char **end) {
+ double d = 0.0;
+ int sign = 1, __attribute__((unused)) n = 0;
+ const char *p = str, *a = str;
+
+ /* decimal part */
+ if (*p == '-') {
+ sign = -1;
+ ++p;
+ } else if (*p == '+') {
+ ++p;
+ }
+ if (is_digit(*p)) {
+ d = (double) (*p++ - '0');
+ while (*p && is_digit(*p)) {
+ d = d * 10.0 + (double) (*p - '0');
+ ++p;
+ ++n;
+ }
+ a = p;
+ } else if (*p != '.') {
+ goto done;
+ }
+ d *= sign;
+
+ /* fraction part */
+ if (*p == '.') {
+ double f = 0.0;
+ double base = 0.1;
+ ++p;
+
+ if (is_digit(*p)) {
+ while (*p && is_digit(*p)) {
+ f += base * (*p - '0');
+ base /= 10.0;
+ ++p;
+ ++n;
+ }
+ }
+ d += f * sign;
+ a = p;
+ }
+
+ /* exponential part */
+ if ((*p == 'E') || (*p == 'e')) {
+ int i, e = 0, neg = 0;
+ p++;
+ if (*p == '-') p++, neg++;
+ if (*p == '+') p++;
+ while (is_digit(*p)) e = e * 10 + *p++ - '0';
+ if (neg) e = -e;
+#if 0
+ if (d == 2.2250738585072011 && e == -308) {
+ d = 0.0;
+ a = p;
+ goto done;
+ }
+ if (d == 2.2250738585072012 && e <= -308) {
+ d *= 1.0e-308;
+ a = p;
+ goto done;
+ }
+#endif
+ for (i = 0; i < e; i++) d *= 10;
+ for (i = 0; i < -e; i++) d /= 10;
+ a = p;
+ } else if (p > str && !is_digit(*(p - 1))) {
+ a = str;
+ goto done;
+ }
+
+done:
+ if (end) *end = (char *) a;
+ return d;
+}
+
+#if MJSON_ENABLE_MERGE
+int mjson_merge(const char *s, int n, const char *s2, int n2,
+ mjson_print_fn_t fn, void *userdata) {
+ int koff, klen, voff, vlen, t, t2, k, off = 0, len = 0, comma = 0;
+ if (n < 2) return len;
+ len += fn("{", 1, userdata);
+ while ((off = mjson_next(s, n, off, &koff, &klen, &voff, &vlen, &t)) != 0) {
+ char *path = (char *) alloca(klen + 1);
+ const char *val;
+ memcpy(path, "$.", 2);
+ memcpy(path + 2, s + koff + 1, klen - 2);
+ path[klen] = '\0';
+ if ((t2 = mjson_find(s2, n2, path, &val, &k)) != MJSON_TOK_INVALID) {
+ if (t2 == MJSON_TOK_NULL) continue; // null deletes the key
+ } else {
+ val = s + voff; // Key is not found in the update. Copy the old value.
+ }
+ if (comma) len += fn(",", 1, userdata);
+ len += fn(s + koff, klen, userdata);
+ len += fn(":", 1, userdata);
+ if (t == MJSON_TOK_OBJECT && t2 == MJSON_TOK_OBJECT) {
+ len += mjson_merge(s + voff, vlen, val, k, fn, userdata);
+ } else {
+ if (t2 != MJSON_TOK_INVALID) vlen = k;
+ len += fn(val, vlen, userdata);
+ }
+ comma = 1;
+ }
+ // Add missing keys
+ off = 0;
+ while ((off = mjson_next(s2, n2, off, &koff, &klen, &voff, &vlen, &t)) != 0) {
+ char *path = (char *) alloca(klen + 1);
+ const char *val;
+ if (t == MJSON_TOK_NULL) continue;
+ memcpy(path, "$.", 2);
+ memcpy(path + 2, s2 + koff + 1, klen - 2);
+ path[klen] = '\0';
+ if (mjson_find(s, n, path, &val, &vlen) != MJSON_TOK_INVALID) continue;
+ if (comma) len += fn(",", 1, userdata);
+ len += fn(s2 + koff, klen, userdata);
+ len += fn(":", 1, userdata);
+ len += fn(s2 + voff, vlen, userdata);
+ comma = 1;
+ }
+ len += fn("}", 1, userdata);
+ return len;
+}
+#endif // MJSON_ENABLE_MERGE
+
+#if MJSON_ENABLE_PRETTY
+struct prettydata {
+ int level;
+ int len;
+ int prev;
+ const char *pad;
+ int padlen;
+ mjson_print_fn_t fn;
+ void *userdata;
+};
+
+static int pretty_cb(int ev, const char *s, int off, int len, void *ud) {
+ struct prettydata *d = (struct prettydata *) ud;
+ int i;
+ switch (ev) {
+ case '{':
+ case '[':
+ d->level++;
+ d->len += d->fn(s + off, len, d->userdata);
+ break;
+ case '}':
+ case ']':
+ d->level--;
+ if (d->prev != '[' && d->prev != '{' && d->padlen > 0) {
+ d->len += d->fn("\n", 1, d->userdata);
+ for (i = 0; i < d->level; i++)
+ d->len += d->fn(d->pad, d->padlen, d->userdata);
+ }
+ d->len += d->fn(s + off, len, d->userdata);
+ break;
+ case ',':
+ d->len += d->fn(s + off, len, d->userdata);
+ if (d->padlen > 0) {
+ d->len += d->fn("\n", 1, d->userdata);
+ for (i = 0; i < d->level; i++)
+ d->len += d->fn(d->pad, d->padlen, d->userdata);
+ }
+ break;
+ case ':':
+ d->len += d->fn(s + off, len, d->userdata);
+ if (d->padlen > 0) d->len += d->fn(" ", 1, d->userdata);
+ break;
+ case MJSON_TOK_KEY:
+ if (d->prev == '{' && d->padlen > 0) {
+ d->len += d->fn("\n", 1, d->userdata);
+ for (i = 0; i < d->level; i++)
+ d->len += d->fn(d->pad, d->padlen, d->userdata);
+ }
+ d->len += d->fn(s + off, len, d->userdata);
+ break;
+ default:
+ if (d->prev == '[' && d->padlen > 0) {
+ d->len += d->fn("\n", 1, d->userdata);
+ for (i = 0; i < d->level; i++)
+ d->len += d->fn(d->pad, d->padlen, d->userdata);
+ }
+ d->len += d->fn(s + off, len, d->userdata);
+ break;
+ }
+ d->prev = ev;
+ return 0;
+}
+
+int mjson_pretty(const char *s, int n, const char *pad, mjson_print_fn_t fn,
+ void *userdata) {
+ struct prettydata d = {0, 0, 0, pad, (int) strlen(pad), fn, userdata};
+ if (mjson(s, n, pretty_cb, &d) < 0) return -1;
+ return d.len;
+}
+#endif // MJSON_ENABLE_PRETTY
+
+#if MJSON_ENABLE_RPC
+struct jsonrpc_ctx jsonrpc_default_context;
+
+int mjson_globmatch(const char *s1, int n1, const char *s2, int n2) {
+ int i = 0, j = 0, ni = 0, nj = 0;
+ while (i < n1 || j < n2) {
+ if (i < n1 && j < n2 && (s1[i] == '?' || s2[j] == s1[i])) {
+ i++, j++;
+ } else if (i < n1 && (s1[i] == '*' || s1[i] == '#')) {
+ ni = i, nj = j + 1, i++;
+ } else if (nj > 0 && nj <= n2 && (s1[i - 1] == '#' || s2[j] != '/')) {
+ i = ni, j = nj;
+ } else {
+ return 0;
+ }
+ }
+ return 1;
+}
+
+void jsonrpc_return_errorv(struct jsonrpc_request *r, int code,
+ const char *message, const char *data_fmt,
+ va_list ap) {
+ if (r->id_len == 0) return;
+ mjson_printf(r->fn, r->fndata,
+ "{\"id\":%.*s,\"error\":{\"code\":%d,\"message\":%Q", r->id_len,
+ r->id, code, message == NULL ? "" : message);
+ if (data_fmt != NULL) {
+ mjson_printf(r->fn, r->fndata, ",\"data\":");
+ mjson_vprintf(r->fn, r->fndata, data_fmt, ap);
+ }
+ mjson_printf(r->fn, r->fndata, "}}\n");
+}
+
+void jsonrpc_return_error(struct jsonrpc_request *r, int code,
+ const char *message, const char *data_fmt, ...) {
+ va_list ap;
+ va_start(ap, data_fmt);
+ jsonrpc_return_errorv(r, code, message, data_fmt, ap);
+ va_end(ap);
+}
+
+void jsonrpc_return_successv(struct jsonrpc_request *r, const char *result_fmt,
+ va_list ap) {
+ if (r->id_len == 0) return;
+ mjson_printf(r->fn, r->fndata, "{\"id\":%.*s,\"result\":", r->id_len, r->id);
+ if (result_fmt != NULL) {
+ mjson_vprintf(r->fn, r->fndata, result_fmt, ap);
+ } else {
+ mjson_printf(r->fn, r->fndata, "%s", "null");
+ }
+ mjson_printf(r->fn, r->fndata, "}\n");
+}
+
+void jsonrpc_return_success(struct jsonrpc_request *r, const char *result_fmt,
+ ...) {
+ va_list ap;
+ va_start(ap, result_fmt);
+ jsonrpc_return_successv(r, result_fmt, ap);
+ va_end(ap);
+}
+
+void jsonrpc_ctx_process(struct jsonrpc_ctx *ctx, const char *buf, int len,
+ mjson_print_fn_t fn, void *fndata, void *ud) {
+ const char *result = NULL, *error = NULL;
+ int result_sz = 0, error_sz = 0;
+ struct jsonrpc_method *m = NULL;
+ struct jsonrpc_request r = {ctx, buf, len, 0, 0, 0, 0, 0, 0, fn, fndata, ud};
+
+ // Is is a response frame?
+ mjson_find(buf, len, "$.result", &result, &result_sz);
+ if (result == NULL) mjson_find(buf, len, "$.error", &error, &error_sz);
+ if (result_sz > 0 || error_sz > 0) {
+ if (ctx->response_cb) ctx->response_cb(buf, len, ctx->response_cb_data);
+ return;
+ }
+
+ // Method must exist and must be a string
+ if (mjson_find(buf, len, "$.method", &r.method, &r.method_len) !=
+ MJSON_TOK_STRING) {
+ mjson_printf(fn, fndata, "{\"error\":{\"code\":-32700,\"message\":%.*Q}}\n",
+ len, buf);
+ return;
+ }
+
+ // id and params are optional
+ mjson_find(buf, len, "$.id", &r.id, &r.id_len);
+ mjson_find(buf, len, "$.params", &r.params, &r.params_len);
+
+ for (m = ctx->methods; m != NULL; m = m->next) {
+ if (mjson_globmatch(m->method, m->method_sz, r.method + 1,
+ r.method_len - 2) > 0) {
+ if (r.params == NULL) r.params = "";
+ m->cb(&r);
+ break;
+ }
+ }
+ if (m == NULL) {
+ jsonrpc_return_error(&r, JSONRPC_ERROR_NOT_FOUND, "method not found", NULL);
+ }
+}
+
+static int jsonrpc_print_methods(mjson_print_fn_t fn, void *fndata,
+ va_list *ap) {
+ struct jsonrpc_ctx *ctx = va_arg(*ap, struct jsonrpc_ctx *);
+ struct jsonrpc_method *m;
+ int len = 0;
+ for (m = ctx->methods; m != NULL; m = m->next) {
+ if (m != ctx->methods) len += mjson_print_buf(fn, fndata, ",", 1);
+ len += mjson_print_str(fn, fndata, m->method, (int) strlen(m->method));
+ }
+ return len;
+}
+
+static void rpclist(struct jsonrpc_request *r) {
+ jsonrpc_return_success(r, "[%M]", jsonrpc_print_methods, r->ctx);
+}
+
+void jsonrpc_ctx_init(struct jsonrpc_ctx *ctx, mjson_print_fn_t response_cb,
+ void *response_cb_data) {
+ ctx->response_cb = response_cb;
+ ctx->response_cb_data = response_cb_data;
+ jsonrpc_ctx_export(ctx, MJSON_RPC_LIST_NAME, rpclist);
+}
+
+void jsonrpc_init(mjson_print_fn_t response_cb, void *userdata) {
+ jsonrpc_ctx_init(&jsonrpc_default_context, response_cb, userdata);
+}
+#endif // MJSON_ENABLE_RPC
diff --git a/src/mqtt.c b/src/mqtt.c
new file mode 100644
index 0000000..5688296
--- /dev/null
+++ b/src/mqtt.c
@@ -0,0 +1,1281 @@
+/*
+ * MQTT Protocol
+ *
+ * Copyright 2020 Baptiste Assmann <bedis9@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <haproxy/chunk.h>
+#include <haproxy/mqtt.h>
+
+uint8_t mqtt_cpt_flags[MQTT_CPT_ENTRIES] = {
+ [MQTT_CPT_INVALID] = 0x00,
+ [MQTT_CPT_CONNECT] = 0x00,
+ [MQTT_CPT_CONNACK] = 0x00,
+
+ /* MQTT_CPT_PUBLISH flags can have different values (DUP, QoS, RETAIN), must be
+ * check more carefully
+ */
+ [MQTT_CPT_PUBLISH] = 0x0F,
+
+ [MQTT_CPT_PUBACK] = 0x00,
+ [MQTT_CPT_PUBREC] = 0x00,
+ [MQTT_CPT_PUBREL] = 0x02,
+ [MQTT_CPT_PUBCOMP] = 0x00,
+ [MQTT_CPT_SUBSCRIBE] = 0x02,
+ [MQTT_CPT_SUBACK] = 0x00,
+ [MQTT_CPT_UNSUBSCRIBE] = 0x02,
+ [MQTT_CPT_UNSUBACK] = 0x00,
+ [MQTT_CPT_PINGREQ] = 0x00,
+ [MQTT_CPT_PINGRESP] = 0x00,
+ [MQTT_CPT_DISCONNECT] = 0x00,
+ [MQTT_CPT_AUTH] = 0x00,
+};
+
+const struct ist mqtt_fields_string[MQTT_FN_ENTRIES] = {
+ [MQTT_FN_INVALID] = IST(""),
+
+ /* it's MQTT 3.1, 3.1.1 and 5.0, those fields have no unique id, so we use strings */
+ [MQTT_FN_FLAGS] = IST("flags"),
+ [MQTT_FN_REASON_CODE] = IST("reason_code"), /* MQTT 3.1 and 3.1.1: return_code */
+ [MQTT_FN_PROTOCOL_NAME] = IST("protocol_name"),
+ [MQTT_FN_PROTOCOL_VERSION] = IST("protocol_version"), /* MQTT 3.1.1: protocol_level */
+ [MQTT_FN_CLIENT_IDENTIFIER] = IST("client_identifier"),
+ [MQTT_FN_WILL_TOPIC] = IST("will_topic"),
+ [MQTT_FN_WILL_PAYLOAD] = IST("will_payload"), /* MQTT 3.1 and 3.1.1: will_message */
+ [MQTT_FN_USERNAME] = IST("username"),
+ [MQTT_FN_PASSWORD] = IST("password"),
+ [MQTT_FN_KEEPALIVE] = IST("keepalive"),
+ /* from here, it's MQTT 5.0 only */
+ [MQTT_FN_PAYLOAD_FORMAT_INDICATOR] = IST("1"),
+ [MQTT_FN_MESSAGE_EXPIRY_INTERVAL] = IST("2"),
+ [MQTT_FN_CONTENT_TYPE] = IST("3"),
+ [MQTT_FN_RESPONSE_TOPIC] = IST("8"),
+ [MQTT_FN_CORRELATION_DATA] = IST("9"),
+ [MQTT_FN_SUBSCRIPTION_IDENTIFIER] = IST("11"),
+ [MQTT_FN_SESSION_EXPIRY_INTERVAL] = IST("17"),
+ [MQTT_FN_ASSIGNED_CLIENT_IDENTIFIER] = IST("18"),
+ [MQTT_FN_SERVER_KEEPALIVE] = IST("19"),
+ [MQTT_FN_AUTHENTICATION_METHOD] = IST("21"),
+ [MQTT_FN_AUTHENTICATION_DATA] = IST("22"),
+ [MQTT_FN_REQUEST_PROBLEM_INFORMATION] = IST("23"),
+ [MQTT_FN_DELAY_INTERVAL] = IST("24"),
+ [MQTT_FN_REQUEST_RESPONSE_INFORMATION] = IST("25"),
+ [MQTT_FN_RESPONSE_INFORMATION] = IST("26"),
+ [MQTT_FN_SERVER_REFERENCE] = IST("28"),
+ [MQTT_FN_REASON_STRING] = IST("31"),
+ [MQTT_FN_RECEIVE_MAXIMUM] = IST("33"),
+ [MQTT_FN_TOPIC_ALIAS_MAXIMUM] = IST("34"),
+ [MQTT_FN_TOPIC_ALIAS] = IST("35"),
+ [MQTT_FN_MAXIMUM_QOS] = IST("36"),
+ [MQTT_FN_RETAIN_AVAILABLE] = IST("37"),
+ [MQTT_FN_USER_PROPERTY] = IST("38"),
+ [MQTT_FN_MAXIMUM_PACKET_SIZE] = IST("39"),
+ [MQTT_FN_WILDCARD_SUBSCRIPTION_AVAILABLE] = IST("40"),
+ [MQTT_FN_SUBSCRIPTION_IDENTIFIERS_AVAILABLE] = IST("41"),
+ [MQTT_FN_SHARED_SUBSCRIPTION_AVAILABLE] = IST("42"),
+};
+
+/* list of supported capturable field names for each MQTT control packet type */
+const uint64_t mqtt_fields_per_packet[MQTT_CPT_ENTRIES] = {
+ [MQTT_CPT_INVALID] = 0,
+
+ [MQTT_CPT_CONNECT] = MQTT_FN_BIT_PROTOCOL_NAME | MQTT_FN_BIT_PROTOCOL_VERSION |
+ MQTT_FN_BIT_FLAGS | MQTT_FN_BIT_KEEPALIVE |
+ MQTT_FN_BIT_SESSION_EXPIRY_INTERVAL | MQTT_FN_BIT_RECEIVE_MAXIMUM |
+ MQTT_FN_BIT_MAXIMUM_PACKET_SIZE | MQTT_FN_BIT_TOPIC_ALIAS_MAXIMUM |
+ MQTT_FN_BIT_REQUEST_RESPONSE_INFORMATION | MQTT_FN_BIT_REQUEST_PROBLEM_INFORMATION |
+ MQTT_FN_BIT_USER_PROPERTY | MQTT_FN_BIT_AUTHENTICATION_METHOD |
+ MQTT_FN_BIT_AUTHENTICATION_DATA | MQTT_FN_BIT_CLIENT_IDENTIFIER |
+ MQTT_FN_BIT_DELAY_INTERVAL | MQTT_FN_BIT_PAYLOAD_FORMAT_INDICATOR |
+ MQTT_FN_BIT_MESSAGE_EXPIRY_INTERVAL | MQTT_FN_BIT_CONTENT_TYPE |
+ MQTT_FN_BIT_RESPONSE_TOPIC | MQTT_FN_BIT_CORRELATION_DATA |
+ MQTT_FN_BIT_USER_PROPERTY | MQTT_FN_BIT_WILL_TOPIC |
+ MQTT_FN_BIT_WILL_PAYLOAD | MQTT_FN_BIT_USERNAME |
+ MQTT_FN_BIT_PASSWORD,
+
+ [MQTT_CPT_CONNACK] = MQTT_FN_BIT_FLAGS | MQTT_FN_BIT_PROTOCOL_VERSION |
+ MQTT_FN_BIT_REASON_CODE | MQTT_FN_BIT_SESSION_EXPIRY_INTERVAL |
+ MQTT_FN_BIT_RECEIVE_MAXIMUM | MQTT_FN_BIT_MAXIMUM_QOS |
+ MQTT_FN_BIT_RETAIN_AVAILABLE | MQTT_FN_BIT_MAXIMUM_PACKET_SIZE |
+ MQTT_FN_BIT_ASSIGNED_CLIENT_IDENTIFIER | MQTT_FN_BIT_TOPIC_ALIAS_MAXIMUM |
+ MQTT_FN_BIT_REASON_STRING | MQTT_FN_BIT_WILDCARD_SUBSCRIPTION_AVAILABLE |
+ MQTT_FN_BIT_SUBSCRIPTION_IDENTIFIERS_AVAILABLE| MQTT_FN_BIT_SHARED_SUBSCRIPTION_AVAILABLE |
+ MQTT_FN_BIT_SERVER_KEEPALIVE | MQTT_FN_BIT_RESPONSE_INFORMATION |
+ MQTT_FN_BIT_SERVER_REFERENCE | MQTT_FN_BIT_USER_PROPERTY |
+ MQTT_FN_BIT_AUTHENTICATION_METHOD | MQTT_FN_BIT_AUTHENTICATION_DATA,
+
+ [MQTT_CPT_PUBLISH] = MQTT_FN_BIT_PAYLOAD_FORMAT_INDICATOR | MQTT_FN_BIT_MESSAGE_EXPIRY_INTERVAL |
+ MQTT_FN_BIT_CONTENT_TYPE | MQTT_FN_BIT_RESPONSE_TOPIC |
+ MQTT_FN_BIT_CORRELATION_DATA | MQTT_FN_BIT_SUBSCRIPTION_IDENTIFIER |
+ MQTT_FN_BIT_TOPIC_ALIAS | MQTT_FN_BIT_USER_PROPERTY,
+
+ [MQTT_CPT_PUBACK] = MQTT_FN_BIT_REASON_CODE | MQTT_FN_BIT_REASON_STRING | MQTT_FN_BIT_USER_PROPERTY,
+
+ [MQTT_CPT_PUBREC] = MQTT_FN_BIT_REASON_CODE | MQTT_FN_BIT_REASON_STRING | MQTT_FN_BIT_USER_PROPERTY,
+
+ [MQTT_CPT_PUBREL] = MQTT_FN_BIT_REASON_CODE | MQTT_FN_BIT_REASON_STRING | MQTT_FN_BIT_USER_PROPERTY,
+
+ [MQTT_CPT_PUBCOMP] = MQTT_FN_BIT_REASON_CODE | MQTT_FN_BIT_REASON_STRING | MQTT_FN_BIT_USER_PROPERTY,
+
+ [MQTT_CPT_SUBSCRIBE] = MQTT_FN_BIT_SUBSCRIPTION_IDENTIFIER | MQTT_FN_BIT_USER_PROPERTY,
+
+ [MQTT_CPT_SUBACK] = MQTT_FN_BIT_REASON_STRING | MQTT_FN_BIT_USER_PROPERTY,
+
+ [MQTT_CPT_UNSUBSCRIBE] = MQTT_FN_BIT_USER_PROPERTY,
+
+ [MQTT_CPT_UNSUBACK] = MQTT_FN_BIT_REASON_STRING | MQTT_FN_BIT_USER_PROPERTY,
+
+ [MQTT_CPT_PINGREQ] = 0,
+
+ [MQTT_CPT_PINGRESP] = 0,
+
+ [MQTT_CPT_DISCONNECT] = MQTT_FN_BIT_REASON_CODE | MQTT_FN_BIT_SESSION_EXPIRY_INTERVAL |
+ MQTT_FN_BIT_SERVER_REFERENCE | MQTT_FN_BIT_REASON_STRING |
+ MQTT_FN_BIT_USER_PROPERTY,
+
+ [MQTT_CPT_AUTH] = MQTT_FN_BIT_AUTHENTICATION_METHOD | MQTT_FN_BIT_AUTHENTICATION_DATA |
+ MQTT_FN_BIT_REASON_STRING | MQTT_FN_BIT_USER_PROPERTY,
+};
+
+/* Checks the first byte of a message to read the fixed header and extract the
+ * packet type and flags. <parser> is supposed to point to the fix header byte.
+ *
+ * Fix header looks like:
+ * +-------+-----------+-----------+-----------+---------+----------+----------+---------+------------+
+ * | bit | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0 |
+ * +-------+-----------+-----------+-----------+---------+----------+----------+---------+------------+
+ * | field | MQTT Control Packet Type | Flags specific to each Control Packet type |
+ * +-------+---------------------------------------------+--------------------------------------------+
+ *
+ * On success, <ptk> is updated with the packet type and flags and the new parser
+ * state is returned. On error, IST_NULL is returned.
+ */
+static inline struct ist mqtt_read_fixed_hdr(struct ist parser, struct mqtt_pkt *pkt)
+{
+ uint8_t type = (uint8_t)*istptr(parser);
+ uint8_t ptype = (type & 0xF0) >> 4;
+ uint8_t flags = type & 0x0F;
+
+ if (ptype == MQTT_CPT_INVALID || ptype >= MQTT_CPT_ENTRIES || flags != mqtt_cpt_flags[ptype])
+ return IST_NULL;
+
+ pkt->fixed_hdr.type = ptype;
+ pkt->fixed_hdr.flags = flags;
+ return istnext(parser);
+}
+
+/* Reads a one byte integer. more information here :
+ * https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901007
+ *
+ * <parser> is supposed to point to the first byte of the integer. On success
+ * the integer is stored in <*i>, if provided, and the new parser state is returned. On
+ * error, IST_NULL is returned.
+*/
+static inline struct ist mqtt_read_1byte_int(struct ist parser, uint8_t *i)
+{
+ if (istlen(parser) < 1)
+ return IST_NULL;
+ if (i)
+ *i = (uint8_t)*istptr(parser);
+ parser = istnext(parser);
+ return parser;
+}
+
+/* Reads a two byte integer. more information here :
+ * https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901008
+ *
+ * <parser> is supposed to point to the first byte of the integer. On success
+ * the integer is stored in <*i>, if provided, and the new parser state is returned. On
+ * error, IST_NULL is returned.
+*/
+static inline struct ist mqtt_read_2byte_int(struct ist parser, uint16_t *i)
+{
+ if (istlen(parser) < 2)
+ return IST_NULL;
+ if (i) {
+ *i = (uint8_t)*istptr(parser) << 8;
+ *i += (uint8_t)*(istptr(parser) + 1);
+ }
+ parser = istadv(parser, 2);
+ return parser;
+}
+
+/* Reads a four byte integer. more information here :
+ * https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901009
+ *
+ * <parser> is supposed to point to the first byte of the integer. On success
+ * the integer is stored in <*i>, if provided, and the new parser state is returned. On
+ * error, IST_NULL is returned.
+*/
+static inline struct ist mqtt_read_4byte_int(struct ist parser, uint32_t *i)
+{
+ if (istlen(parser) < 4)
+ return IST_NULL;
+ if (i) {
+ *i = (uint8_t)*istptr(parser) << 24;
+ *i += (uint8_t)*(istptr(parser) + 1) << 16;
+ *i += (uint8_t)*(istptr(parser) + 2) << 8;
+ *i += (uint8_t)*(istptr(parser) + 3);
+ }
+ parser = istadv(parser, 4);
+ return parser;
+}
+
+/* Reads a variable byte integer. more information here :
+ * https://docs.oasis-open.org/mqtt/mqtt/v3.1.1/os/mqtt-v3.1.1-os.html#_Toc398718023
+ * https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901011
+ *
+ * It is encoded using a variable length encoding scheme which uses a single
+ * byte for values up to 127. Larger values are handled as follows. The least
+ * significant seven bits of each byte encode the data, and the most significant
+ * bit is used to indicate that there are following bytes in the representation.
+ * Thus each byte encodes 128 values and a "continuation bit".
+ *
+ * The maximum number of bytes in the Remaining Length field is four
+ * (MQTT_REMAINING_LENGHT_MAX_SIZE).
+ *
+ * <parser> is supposed to point to the first byte of the integer. On success
+ * the integer is stored in <*i> and the new parser state is returned. On
+ * error, IST_NULL is returned.
+ */
+static inline struct ist mqtt_read_varint(struct ist parser, uint32_t *i)
+{
+ int off, m;
+
+ off = m = 0;
+ if (i)
+ *i = 0;
+ for (off = 0; off < MQTT_REMAINING_LENGHT_MAX_SIZE && istlen(parser); off++) {
+ uint8_t byte = (uint8_t)*istptr(parser);
+
+ if (i) {
+ *i += (byte & 127) << m;
+ m += 7; /* preparing <m> for next byte */
+ }
+ parser = istnext(parser);
+
+ /* we read the latest byte for the remaining length field */
+ if (byte <= 127)
+ break;
+ }
+
+ if (off == MQTT_REMAINING_LENGHT_MAX_SIZE)
+ return IST_NULL;
+ return parser;
+}
+
+/* Reads a MQTT string. more information here :
+ * http://docs.oasis-open.org/mqtt/mqtt/v3.1.1/os/mqtt-v3.1.1-os.html#_Toc398718016
+ * https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901010
+ *
+ * In MQTT, strings are prefixed by their size, encoded over 2 bytes:
+ * byte 1: length MSB
+ * byte 2: length LSB
+ * byte 3: string
+ * ...
+ *
+ * string size is MSB * 256 + LSB
+ *
+ * <parser> is supposed to point to the first byte of the string. On success the
+ * string is stored in <*str>, if provided, and the new parser state is
+ * returned. On error, IST_NULL is returned.
+ */
+static inline struct ist mqtt_read_string(struct ist parser, struct ist *str)
+{
+ uint16_t len = 0;
+
+ /* read and compute the string length */
+ if (istlen(parser) < 2)
+ goto error;
+
+ parser = mqtt_read_2byte_int(parser, &len);
+ if (!isttest(parser) || istlen(parser) < len)
+ goto error;
+
+ if (str) {
+ str->ptr = istptr(parser);
+ str->len = len;
+ }
+
+ return istadv(parser, len);
+
+ error:
+ return IST_NULL;
+}
+
+/* Helper function to convert a unsigned integer to a string. The result is
+ * written in <buf>. On success, the written size is returned, otherwise, on
+ * error, 0 is returned.
+ */
+static inline size_t mqtt_uint2str(struct buffer *buf, uint32_t i)
+{
+ char *end;
+
+ end = ultoa_o(i, buf->area, buf->size);
+ if (!end)
+ return 0;
+ buf->data = end - buf->area;
+ return buf->data;
+}
+
+/* Extracts the value of a <fieldname_id> of type <type> from a given MQTT
+ * message <msg>. IST_NULL is returned if an error occurred while parsing or if
+ * the field could not be found. If more data are required, the message with a
+ * length set to 0 is returned. If the field is found, the response is returned
+ * as a struct ist.
+ */
+struct ist mqtt_field_value(struct ist msg, int type, int fieldname_id)
+{
+ struct buffer *trash = get_trash_chunk();
+ struct mqtt_pkt mpkt;
+ struct ist res;
+
+ switch (mqtt_validate_message(msg, &mpkt)) {
+ case MQTT_VALID_MESSAGE:
+ if (mpkt.fixed_hdr.type != type)
+ goto not_found_or_invalid;
+ break;
+ case MQTT_NEED_MORE_DATA:
+ goto need_more;
+ case MQTT_INVALID_MESSAGE:
+ goto not_found_or_invalid;
+ }
+
+ switch (type) {
+ case MQTT_CPT_CONNECT:
+ switch (fieldname_id) {
+ case MQTT_FN_FLAGS:
+ if (!mqtt_uint2str(trash, mpkt.data.connect.var_hdr.flags))
+ goto not_found_or_invalid;
+ res = ist2(trash->area, trash->data);
+ goto end;
+
+ case MQTT_FN_PROTOCOL_NAME:
+ if (!istlen(mpkt.data.connect.var_hdr.protocol_name))
+ goto not_found_or_invalid;
+ res = mpkt.data.connect.var_hdr.protocol_name;
+ goto end;
+
+ case MQTT_FN_PROTOCOL_VERSION:
+ if (!mqtt_uint2str(trash, mpkt.data.connect.var_hdr.protocol_version))
+ goto not_found_or_invalid;
+ res = ist2(trash->area, trash->data);
+ goto end;
+
+ case MQTT_FN_CLIENT_IDENTIFIER:
+ if (!istlen(mpkt.data.connect.payload.client_identifier))
+ goto not_found_or_invalid;
+ res = mpkt.data.connect.payload.client_identifier;
+ goto end;
+
+ case MQTT_FN_WILL_TOPIC:
+ if (!istlen(mpkt.data.connect.payload.will_topic))
+ goto not_found_or_invalid;
+ res = mpkt.data.connect.payload.will_topic;
+ goto end;
+
+ case MQTT_FN_WILL_PAYLOAD:
+ if (!istlen(mpkt.data.connect.payload.will_payload))
+ goto not_found_or_invalid;
+ res = mpkt.data.connect.payload.will_payload;
+ goto end;
+
+ case MQTT_FN_USERNAME:
+ if (!istlen(mpkt.data.connect.payload.username))
+ goto not_found_or_invalid;
+ res = mpkt.data.connect.payload.username;
+ goto end;
+
+ case MQTT_FN_PASSWORD:
+ if (!istlen(mpkt.data.connect.payload.password))
+ goto not_found_or_invalid;
+ res = mpkt.data.connect.payload.password;
+ goto end;
+
+ case MQTT_FN_KEEPALIVE:
+ if (!mqtt_uint2str(trash, mpkt.data.connect.var_hdr.keepalive))
+ goto not_found_or_invalid;
+ res = ist2(trash->area, trash->data);
+ goto end;
+
+ case MQTT_FN_PAYLOAD_FORMAT_INDICATOR:
+ if ((mpkt.data.connect.var_hdr.protocol_version != MQTT_VERSION_5_0) ||
+ !(mpkt.data.connect.var_hdr.flags & MQTT_CONNECT_FL_WILL))
+ goto not_found_or_invalid;
+ if (!mqtt_uint2str(trash, mpkt.data.connect.payload.will_props.payload_format_indicator))
+ goto not_found_or_invalid;
+ res = ist2(trash->area, trash->data);
+ goto end;
+
+ case MQTT_FN_MESSAGE_EXPIRY_INTERVAL:
+ if ((mpkt.data.connect.var_hdr.protocol_version != MQTT_VERSION_5_0) ||
+ !(mpkt.data.connect.var_hdr.flags & MQTT_CONNECT_FL_WILL))
+ goto not_found_or_invalid;
+ if (!mqtt_uint2str(trash, mpkt.data.connect.payload.will_props.message_expiry_interval))
+ goto not_found_or_invalid;
+ res = ist2(trash->area, trash->data);
+ goto end;
+
+ case MQTT_FN_CONTENT_TYPE:
+ if ((mpkt.data.connect.var_hdr.protocol_version != MQTT_VERSION_5_0) ||
+ !(mpkt.data.connect.var_hdr.flags & MQTT_CONNECT_FL_WILL))
+ goto not_found_or_invalid;
+ if (!istlen(mpkt.data.connect.payload.will_props.content_type))
+ goto not_found_or_invalid;
+ res = mpkt.data.connect.payload.will_props.content_type;
+ goto end;
+
+ case MQTT_FN_RESPONSE_TOPIC:
+ if ((mpkt.data.connect.var_hdr.protocol_version != MQTT_VERSION_5_0) ||
+ !(mpkt.data.connect.var_hdr.flags & MQTT_CONNECT_FL_WILL))
+ goto not_found_or_invalid;
+ if (!istlen(mpkt.data.connect.payload.will_props.response_topic))
+ goto not_found_or_invalid;
+ res = mpkt.data.connect.payload.will_props.response_topic;
+ goto end;
+
+ case MQTT_FN_CORRELATION_DATA:
+ if ((mpkt.data.connect.var_hdr.protocol_version != MQTT_VERSION_5_0) ||
+ !(mpkt.data.connect.var_hdr.flags & MQTT_CONNECT_FL_WILL))
+ goto not_found_or_invalid;
+ if (!istlen(mpkt.data.connect.payload.will_props.correlation_data))
+ goto not_found_or_invalid;
+ res = mpkt.data.connect.payload.will_props.correlation_data;
+ goto end;
+
+ case MQTT_FN_SESSION_EXPIRY_INTERVAL:
+ if (mpkt.data.connect.var_hdr.protocol_version != MQTT_VERSION_5_0)
+ goto not_found_or_invalid;
+ if (!mqtt_uint2str(trash, mpkt.data.connect.var_hdr.props.session_expiry_interval))
+ goto not_found_or_invalid;
+ res = ist2(trash->area, trash->data);
+ goto end;
+
+ case MQTT_FN_AUTHENTICATION_METHOD:
+ if (mpkt.data.connect.var_hdr.protocol_version != MQTT_VERSION_5_0)
+ goto not_found_or_invalid;
+ if (!istlen(mpkt.data.connect.var_hdr.props.authentication_method))
+ goto not_found_or_invalid;
+ res = mpkt.data.connect.var_hdr.props.authentication_method;
+ goto end;
+
+ case MQTT_FN_AUTHENTICATION_DATA:
+ if (mpkt.data.connect.var_hdr.protocol_version != MQTT_VERSION_5_0)
+ goto not_found_or_invalid;
+ if (!istlen(mpkt.data.connect.var_hdr.props.authentication_data))
+ goto not_found_or_invalid;
+ res = mpkt.data.connect.var_hdr.props.authentication_data;
+ goto end;
+
+ case MQTT_FN_REQUEST_PROBLEM_INFORMATION:
+ if (mpkt.data.connect.var_hdr.protocol_version != MQTT_VERSION_5_0)
+ goto not_found_or_invalid;
+ if (!mqtt_uint2str(trash, mpkt.data.connect.var_hdr.props.request_problem_information))
+ goto not_found_or_invalid;
+ res = ist2(trash->area, trash->data);
+ goto end;
+
+ case MQTT_FN_DELAY_INTERVAL:
+ if ((mpkt.data.connect.var_hdr.protocol_version != MQTT_VERSION_5_0) ||
+ !(mpkt.data.connect.var_hdr.flags & MQTT_CONNECT_FL_WILL))
+ goto not_found_or_invalid;
+ if (!mqtt_uint2str(trash, mpkt.data.connect.payload.will_props.delay_interval))
+ goto not_found_or_invalid;
+ res = ist2(trash->area, trash->data);
+ goto end;
+
+ case MQTT_FN_REQUEST_RESPONSE_INFORMATION:
+ if (mpkt.data.connect.var_hdr.protocol_version != MQTT_VERSION_5_0)
+ goto not_found_or_invalid;
+ if (!mqtt_uint2str(trash, mpkt.data.connect.var_hdr.props.request_response_information))
+ goto not_found_or_invalid;
+ res = ist2(trash->area, trash->data);
+ goto end;
+
+ case MQTT_FN_RECEIVE_MAXIMUM:
+ if (mpkt.data.connect.var_hdr.protocol_version != MQTT_VERSION_5_0)
+ goto not_found_or_invalid;
+ if (!mqtt_uint2str(trash, mpkt.data.connect.var_hdr.props.receive_maximum))
+ goto not_found_or_invalid;
+ res = ist2(trash->area, trash->data);
+ goto end;
+
+ case MQTT_FN_TOPIC_ALIAS_MAXIMUM:
+ if (mpkt.data.connect.var_hdr.protocol_version != MQTT_VERSION_5_0)
+ goto not_found_or_invalid;
+ if (!mqtt_uint2str(trash, mpkt.data.connect.var_hdr.props.topic_alias_maximum))
+ goto not_found_or_invalid;
+ res = ist2(trash->area, trash->data);
+ goto end;
+
+ case MQTT_FN_MAXIMUM_PACKET_SIZE:
+ if (mpkt.data.connect.var_hdr.protocol_version != MQTT_VERSION_5_0)
+ goto not_found_or_invalid;
+ if (!mqtt_uint2str(trash, mpkt.data.connect.var_hdr.props.maximum_packet_size))
+ goto not_found_or_invalid;
+ res = ist2(trash->area, trash->data);
+ goto end;
+
+ default:
+ goto not_found_or_invalid;
+ }
+ break;
+
+ case MQTT_CPT_CONNACK:
+ switch (fieldname_id) {
+ case MQTT_FN_FLAGS:
+ if (!mqtt_uint2str(trash, mpkt.data.connack.var_hdr.flags))
+ goto not_found_or_invalid;
+ res = ist2(trash->area, trash->data);
+ goto end;
+
+ case MQTT_FN_REASON_CODE:
+ if (!mqtt_uint2str(trash, mpkt.data.connack.var_hdr.reason_code))
+ goto not_found_or_invalid;
+ res = ist2(trash->area, trash->data);
+ goto end;
+
+ case MQTT_FN_PROTOCOL_VERSION:
+ if (!mqtt_uint2str(trash, mpkt.data.connack.var_hdr.protocol_version))
+ goto not_found_or_invalid;
+ res = ist2(trash->area, trash->data);
+ goto end;
+
+ case MQTT_FN_SESSION_EXPIRY_INTERVAL:
+ if (mpkt.data.connack.var_hdr.protocol_version != MQTT_VERSION_5_0)
+ goto not_found_or_invalid;
+ if (!mqtt_uint2str(trash, mpkt.data.connack.var_hdr.props.session_expiry_interval))
+ goto not_found_or_invalid;
+ res = ist2(trash->area, trash->data);
+ goto end;
+
+ case MQTT_FN_ASSIGNED_CLIENT_IDENTIFIER:
+ if (mpkt.data.connack.var_hdr.protocol_version != MQTT_VERSION_5_0)
+ goto not_found_or_invalid;
+ if (!istlen(mpkt.data.connack.var_hdr.props.assigned_client_identifier))
+ goto not_found_or_invalid;
+ res = mpkt.data.connack.var_hdr.props.assigned_client_identifier;
+ goto end;
+
+ case MQTT_FN_SERVER_KEEPALIVE:
+ if (mpkt.data.connack.var_hdr.protocol_version != MQTT_VERSION_5_0)
+ goto not_found_or_invalid;
+ if (!mqtt_uint2str(trash, mpkt.data.connack.var_hdr.props.server_keepalive))
+ goto not_found_or_invalid;
+ res = ist2(trash->area, trash->data);
+ goto end;
+
+ case MQTT_FN_AUTHENTICATION_METHOD:
+ if (mpkt.data.connack.var_hdr.protocol_version != MQTT_VERSION_5_0)
+ goto not_found_or_invalid;
+ if (!istlen(mpkt.data.connack.var_hdr.props.authentication_method))
+ goto not_found_or_invalid;
+ res = mpkt.data.connack.var_hdr.props.authentication_method;
+ goto end;
+
+ case MQTT_FN_AUTHENTICATION_DATA:
+ if (mpkt.data.connack.var_hdr.protocol_version != MQTT_VERSION_5_0)
+ goto not_found_or_invalid;
+ if (!istlen(mpkt.data.connack.var_hdr.props.authentication_data))
+ goto not_found_or_invalid;
+ res = mpkt.data.connack.var_hdr.props.authentication_data;
+ goto end;
+
+ case MQTT_FN_RESPONSE_INFORMATION:
+ if (mpkt.data.connack.var_hdr.protocol_version != MQTT_VERSION_5_0)
+ goto not_found_or_invalid;
+ if (!istlen(mpkt.data.connack.var_hdr.props.response_information))
+ goto not_found_or_invalid;
+ res = mpkt.data.connack.var_hdr.props.response_information;
+ goto end;
+
+ case MQTT_FN_SERVER_REFERENCE:
+ if (mpkt.data.connack.var_hdr.protocol_version != MQTT_VERSION_5_0)
+ goto not_found_or_invalid;
+ if (!istlen(mpkt.data.connack.var_hdr.props.server_reference))
+ goto not_found_or_invalid;
+ res = mpkt.data.connack.var_hdr.props.server_reference;
+ goto end;
+
+ case MQTT_FN_REASON_STRING:
+ if (mpkt.data.connack.var_hdr.protocol_version != MQTT_VERSION_5_0)
+ goto not_found_or_invalid;
+ if (!istlen(mpkt.data.connack.var_hdr.props.reason_string))
+ goto not_found_or_invalid;
+ res = mpkt.data.connack.var_hdr.props.reason_string;
+ goto end;
+
+ case MQTT_FN_RECEIVE_MAXIMUM:
+ if (mpkt.data.connack.var_hdr.protocol_version != MQTT_VERSION_5_0)
+ goto not_found_or_invalid;
+ if (!mqtt_uint2str(trash, mpkt.data.connack.var_hdr.props.receive_maximum))
+ goto not_found_or_invalid;
+ res = ist2(trash->area, trash->data);
+ goto end;
+
+ case MQTT_FN_TOPIC_ALIAS_MAXIMUM:
+ if (mpkt.data.connack.var_hdr.protocol_version != MQTT_VERSION_5_0)
+ goto not_found_or_invalid;
+ if (!mqtt_uint2str(trash, mpkt.data.connack.var_hdr.props.topic_alias_maximum))
+ goto not_found_or_invalid;
+ res = ist2(trash->area, trash->data);
+ goto end;
+
+ case MQTT_FN_MAXIMUM_QOS:
+ if (mpkt.data.connack.var_hdr.protocol_version != MQTT_VERSION_5_0)
+ goto not_found_or_invalid;
+ if (!mqtt_uint2str(trash, mpkt.data.connack.var_hdr.props.maximum_qos))
+ goto not_found_or_invalid;
+ res = ist2(trash->area, trash->data);
+ goto end;
+
+ case MQTT_FN_RETAIN_AVAILABLE:
+ if (mpkt.data.connack.var_hdr.protocol_version != MQTT_VERSION_5_0)
+ goto not_found_or_invalid;
+ if (!mqtt_uint2str(trash, mpkt.data.connack.var_hdr.props.retain_available))
+ goto not_found_or_invalid;
+ res = ist2(trash->area, trash->data);
+ goto end;
+
+ case MQTT_FN_MAXIMUM_PACKET_SIZE:
+ if (mpkt.data.connack.var_hdr.protocol_version != MQTT_VERSION_5_0)
+ goto not_found_or_invalid;
+ if (!mqtt_uint2str(trash, mpkt.data.connack.var_hdr.props.maximum_packet_size))
+ goto not_found_or_invalid;
+ res = ist2(trash->area, trash->data);
+ goto end;
+
+ case MQTT_FN_WILDCARD_SUBSCRIPTION_AVAILABLE:
+ if (mpkt.data.connack.var_hdr.protocol_version != MQTT_VERSION_5_0)
+ goto not_found_or_invalid;
+ if (!mqtt_uint2str(trash, mpkt.data.connack.var_hdr.props.wildcard_subscription_available))
+ goto not_found_or_invalid;
+ res = ist2(trash->area, trash->data);
+ goto end;
+
+ case MQTT_FN_SUBSCRIPTION_IDENTIFIERS_AVAILABLE:
+ if (mpkt.data.connack.var_hdr.protocol_version != MQTT_VERSION_5_0)
+ goto not_found_or_invalid;
+ if (!mqtt_uint2str(trash, mpkt.data.connack.var_hdr.props.subscription_identifiers_available))
+ goto not_found_or_invalid;
+ res = ist2(trash->area, trash->data);
+ goto end;
+
+ case MQTT_FN_SHARED_SUBSCRIPTION_AVAILABLE:
+ if (mpkt.data.connack.var_hdr.protocol_version != MQTT_VERSION_5_0)
+ goto not_found_or_invalid;
+ if (!mqtt_uint2str(trash, mpkt.data.connack.var_hdr.props.shared_subsription_available))
+ goto not_found_or_invalid;
+ res = ist2(trash->area, trash->data);
+ goto end;
+
+ default:
+ goto not_found_or_invalid;
+ }
+ break;
+
+ default:
+ goto not_found_or_invalid;
+ }
+
+ end:
+ return res;
+
+ need_more:
+ return ist2(istptr(msg), 0);
+
+ not_found_or_invalid:
+ return IST_NULL;
+}
+
+/* Parses a CONNECT packet :
+ * https://public.dhe.ibm.com/software/dw/webservices/ws-mqtt/mqtt-v3r1.html#connect
+ * https://docs.oasis-open.org/mqtt/mqtt/v3.1.1/os/mqtt-v3.1.1-os.html#_Toc398718028
+ * https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901033
+ *
+ * <parser> should point right after the MQTT fixed header. The remaining length
+ * was already checked, thus missing data is an error. On success, the result of
+ * the parsing is stored in <mpkt>.
+ *
+ * Returns:
+ * MQTT_INVALID_MESSAGE if the CONNECT message is invalid
+ * MQTT_VALID_MESSAGE if the CONNECT message looks valid
+ */
+static int mqtt_parse_connect(struct ist parser, struct mqtt_pkt *mpkt)
+{
+ /* The parser length is stored to be sure exactly consumed the announced
+ * remaining length. */
+ size_t orig_len = istlen(parser);
+ int ret = MQTT_INVALID_MESSAGE;
+
+ /*
+ * parsing variable header
+ */
+ /* read protocol_name */
+ parser = mqtt_read_string(parser, &mpkt->data.connect.var_hdr.protocol_name);
+ if (!isttest(parser) || !(isteqi(mpkt->data.connect.var_hdr.protocol_name, ist("MQTT")) || isteqi(mpkt->data.connect.var_hdr.protocol_name, ist("MQIsdp"))))
+ goto end;
+
+ /* read protocol_version */
+ parser = mqtt_read_1byte_int(parser, &mpkt->data.connect.var_hdr.protocol_version);
+ if (!isttest(parser))
+ goto end;
+ if (mpkt->data.connect.var_hdr.protocol_version != MQTT_VERSION_3_1 &&
+ mpkt->data.connect.var_hdr.protocol_version != MQTT_VERSION_3_1_1 &&
+ mpkt->data.connect.var_hdr.protocol_version != MQTT_VERSION_5_0)
+ goto end;
+
+ /* read flags */
+ /* bit 1 is 'reserved' and must be set to 0 in CONNECT message flags */
+ parser = mqtt_read_1byte_int(parser, &mpkt->data.connect.var_hdr.flags);
+ if (!isttest(parser) || (mpkt->data.connect.var_hdr.flags & MQTT_CONNECT_FL_RESERVED))
+ goto end;
+
+ /* if WILL flag must be set to have WILL_QOS flag or WILL_RETAIN set */
+ if ((mpkt->data.connect.var_hdr.flags & (MQTT_CONNECT_FL_WILL|MQTT_CONNECT_FL_WILL_QOS|MQTT_CONNECT_FL_WILL_RETAIN)) == MQTT_CONNECT_FL_WILL_QOS)
+ goto end;
+
+ /* read keepalive */
+ parser = mqtt_read_2byte_int(parser, &mpkt->data.connect.var_hdr.keepalive);
+ if (!isttest(parser))
+ goto end;
+
+ /* read properties, only available in MQTT_VERSION_5_0 */
+ if (mpkt->data.connect.var_hdr.protocol_version == MQTT_VERSION_5_0) {
+ struct ist props;
+ unsigned int user_prop_idx = 0;
+ uint64_t fields = 0;
+ uint32_t plen = 0;
+
+ parser = mqtt_read_varint(parser, &plen);
+ if (!isttest(parser) || istlen(parser) < plen)
+ goto end;
+ props = ist2(istptr(parser), plen);
+ parser = istadv(parser, props.len);
+
+ while (istlen(props) > 0) {
+ switch (*istptr(props)) {
+ case MQTT_PROP_SESSION_EXPIRY_INTERVAL:
+ if (fields & MQTT_FN_BIT_SESSION_EXPIRY_INTERVAL)
+ goto end;
+ props = mqtt_read_4byte_int(istnext(props), &mpkt->data.connect.var_hdr.props.session_expiry_interval);
+ fields |= MQTT_FN_BIT_SESSION_EXPIRY_INTERVAL;
+ break;
+
+ case MQTT_PROP_RECEIVE_MAXIMUM:
+ if (fields & MQTT_FN_BIT_RECEIVE_MAXIMUM)
+ goto end;
+ props = mqtt_read_2byte_int(istnext(props), &mpkt->data.connect.var_hdr.props.receive_maximum);
+ /* cannot be 0 */
+ if (!mpkt->data.connect.var_hdr.props.receive_maximum)
+ goto end;
+ fields |= MQTT_FN_BIT_RECEIVE_MAXIMUM;
+ break;
+
+ case MQTT_PROP_MAXIMUM_PACKET_SIZE:
+ if (fields & MQTT_FN_BIT_MAXIMUM_PACKET_SIZE)
+ goto end;
+ props = mqtt_read_4byte_int(istnext(props), &mpkt->data.connect.var_hdr.props.maximum_packet_size);
+ /* cannot be 0 */
+ if (!mpkt->data.connect.var_hdr.props.maximum_packet_size)
+ goto end;
+ fields |= MQTT_FN_BIT_MAXIMUM_PACKET_SIZE;
+ break;
+
+ case MQTT_PROP_TOPIC_ALIAS_MAXIMUM:
+ if (fields & MQTT_FN_BIT_TOPIC_ALIAS)
+ goto end;
+ props = mqtt_read_2byte_int(istnext(props), &mpkt->data.connect.var_hdr.props.topic_alias_maximum);
+ fields |= MQTT_FN_BIT_TOPIC_ALIAS;
+ break;
+
+ case MQTT_PROP_REQUEST_RESPONSE_INFORMATION:
+ if (fields & MQTT_FN_BIT_REQUEST_RESPONSE_INFORMATION)
+ goto end;
+ props = mqtt_read_1byte_int(istnext(props), &mpkt->data.connect.var_hdr.props.request_response_information);
+ /* can have only 2 values: 0 or 1 */
+ if (mpkt->data.connect.var_hdr.props.request_response_information > 1)
+ goto end;
+ fields |= MQTT_FN_BIT_REQUEST_RESPONSE_INFORMATION;
+ break;
+
+ case MQTT_PROP_REQUEST_PROBLEM_INFORMATION:
+ if (fields & MQTT_FN_BIT_REQUEST_PROBLEM_INFORMATION)
+ goto end;
+ props = mqtt_read_1byte_int(istnext(props), &mpkt->data.connect.var_hdr.props.request_problem_information);
+ /* can have only 2 values: 0 or 1 */
+ if (mpkt->data.connect.var_hdr.props.request_problem_information > 1)
+ goto end;
+ fields |= MQTT_FN_BIT_REQUEST_PROBLEM_INFORMATION;
+ break;
+
+ case MQTT_PROP_USER_PROPERTIES:
+ /* if we reached MQTT_PROP_USER_PROPERTY_ENTRIES already, then
+ * we start writing over the first property */
+ if (user_prop_idx >= MQTT_PROP_USER_PROPERTY_ENTRIES)
+ user_prop_idx = 0;
+
+ /* read user property name and value */
+ props = mqtt_read_string(istnext(props), &mpkt->data.connect.var_hdr.props.user_props[user_prop_idx].name);
+ if (!isttest(props))
+ goto end;
+ props = mqtt_read_string(props, &mpkt->data.connect.var_hdr.props.user_props[user_prop_idx].value);
+ ++user_prop_idx;
+ break;
+
+ case MQTT_PROP_AUTHENTICATION_METHOD:
+ if (fields & MQTT_FN_BIT_AUTHENTICATION_METHOD)
+ goto end;
+ props = mqtt_read_string(istnext(props), &mpkt->data.connect.var_hdr.props.authentication_method);
+ fields |= MQTT_FN_BIT_AUTHENTICATION_METHOD;
+ break;
+
+ case MQTT_PROP_AUTHENTICATION_DATA:
+ if (fields & MQTT_FN_BIT_AUTHENTICATION_DATA)
+ goto end;
+ props = mqtt_read_string(istnext(props), &mpkt->data.connect.var_hdr.props.authentication_data);
+ fields |= MQTT_FN_BIT_AUTHENTICATION_DATA;
+ break;
+
+ default:
+ goto end;
+ }
+
+ if (!isttest(props))
+ goto end;
+ }
+ }
+
+ /* cannot have auth data without auth method */
+ if (!istlen(mpkt->data.connect.var_hdr.props.authentication_method) &&
+ istlen(mpkt->data.connect.var_hdr.props.authentication_data))
+ goto end;
+
+ /* parsing payload
+ *
+ * Content of payload is related to flags parsed above and the field order is pre-defined:
+ * Client Identifier, Will Topic, Will Message, User Name, Password
+ */
+ /* read client identifier */
+ parser = mqtt_read_string(parser, &mpkt->data.connect.payload.client_identifier);
+ if (!isttest(parser))
+ goto end;
+
+ /* read Will Properties, for MQTT v5 only
+ * https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901060
+ */
+ if ((mpkt->data.connect.var_hdr.protocol_version == MQTT_VERSION_5_0) &&
+ (mpkt->data.connect.var_hdr.flags & MQTT_CONNECT_FL_WILL)) {
+ struct ist props;
+ unsigned int user_prop_idx = 0;
+ uint64_t fields = 0;
+ uint32_t plen = 0;
+
+ parser = mqtt_read_varint(parser, &plen);
+ if (!isttest(parser) || istlen(parser) < plen)
+ goto end;
+ props = ist2(istptr(parser), plen);
+ parser = istadv(parser, props.len);
+
+ while (istlen(props) > 0) {
+ switch (*istptr(props)) {
+ case MQTT_PROP_WILL_DELAY_INTERVAL:
+ if (fields & MQTT_FN_BIT_DELAY_INTERVAL)
+ goto end;
+ props = mqtt_read_4byte_int(istnext(props), &mpkt->data.connect.payload.will_props.delay_interval);
+ fields |= MQTT_FN_BIT_DELAY_INTERVAL;
+ break;
+
+ case MQTT_PROP_PAYLOAD_FORMAT_INDICATOR:
+ if (fields & MQTT_FN_BIT_PAYLOAD_FORMAT_INDICATOR)
+ goto end;
+ props = mqtt_read_1byte_int(istnext(props), &mpkt->data.connect.payload.will_props.payload_format_indicator);
+ /* can have only 2 values: 0 or 1 */
+ if (mpkt->data.connect.payload.will_props.payload_format_indicator > 1)
+ goto end;
+ fields |= MQTT_FN_BIT_PAYLOAD_FORMAT_INDICATOR;
+ break;
+
+ case MQTT_PROP_MESSAGE_EXPIRY_INTERVAL:
+ if (fields & MQTT_FN_BIT_MESSAGE_EXPIRY_INTERVAL)
+ goto end;
+ props = mqtt_read_4byte_int(istnext(props), &mpkt->data.connect.payload.will_props.message_expiry_interval);
+ fields |= MQTT_FN_BIT_MESSAGE_EXPIRY_INTERVAL;
+ break;
+
+ case MQTT_PROP_CONTENT_TYPE:
+ if (fields & MQTT_FN_BIT_CONTENT_TYPE)
+ goto end;
+ props = mqtt_read_string(istnext(props), &mpkt->data.connect.payload.will_props.content_type);
+ fields |= MQTT_FN_BIT_CONTENT_TYPE;
+ break;
+
+ case MQTT_PROP_RESPONSE_TOPIC:
+ if (fields & MQTT_FN_BIT_RESPONSE_TOPIC)
+ goto end;
+ props = mqtt_read_string(istnext(props), &mpkt->data.connect.payload.will_props.response_topic);
+ fields |= MQTT_FN_BIT_RESPONSE_TOPIC;
+ break;
+
+ case MQTT_PROP_CORRELATION_DATA:
+ if (fields & MQTT_FN_BIT_CORRELATION_DATA)
+ goto end;
+ props = mqtt_read_string(istnext(props), &mpkt->data.connect.payload.will_props.correlation_data);
+ fields |= MQTT_FN_BIT_CORRELATION_DATA;
+ break;
+
+ case MQTT_PROP_USER_PROPERTIES:
+ /* if we reached MQTT_PROP_USER_PROPERTY_ENTRIES already, then
+ * we start writing over the first property */
+ if (user_prop_idx >= MQTT_PROP_USER_PROPERTY_ENTRIES)
+ user_prop_idx = 0;
+
+ /* read user property name and value */
+ props = mqtt_read_string(istnext(props), &mpkt->data.connect.payload.will_props.user_props[user_prop_idx].name);
+ if (!isttest(props))
+ goto end;
+ props = mqtt_read_string(props, &mpkt->data.connect.payload.will_props.user_props[user_prop_idx].value);
+ ++user_prop_idx;
+ break;
+
+ default:
+ goto end;
+ }
+
+ if (!isttest(props))
+ goto end;
+ }
+ }
+
+ /* read Will Topic and Will Message (MQTT 3.1.1) or Payload (MQTT 5.0) */
+ if (mpkt->data.connect.var_hdr.flags & MQTT_CONNECT_FL_WILL) {
+ parser = mqtt_read_string(parser, &mpkt->data.connect.payload.will_topic);
+ if (!isttest(parser))
+ goto end;
+ parser = mqtt_read_string(parser, &mpkt->data.connect.payload.will_payload);
+ if (!isttest(parser))
+ goto end;
+ }
+
+ /* read User Name */
+ if (mpkt->data.connect.var_hdr.flags & MQTT_CONNECT_FL_USERNAME) {
+ parser = mqtt_read_string(parser, &mpkt->data.connect.payload.username);
+ if (!isttest(parser))
+ goto end;
+ }
+
+ /* read Password */
+ if (mpkt->data.connect.var_hdr.flags & MQTT_CONNECT_FL_PASSWORD) {
+ parser = mqtt_read_string(parser, &mpkt->data.connect.payload.password);
+ if (!isttest(parser))
+ goto end;
+ }
+
+ if ((orig_len - istlen(parser)) == mpkt->fixed_hdr.remaining_length)
+ ret = MQTT_VALID_MESSAGE;
+
+ end:
+ return ret;
+}
+
+/* Parses a CONNACK packet :
+ * https://docs.oasis-open.org/mqtt/mqtt/v3.1.1/os/mqtt-v3.1.1-os.html#_Toc398718033
+ * https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901074
+ *
+ * <parser> should point right after the MQTT fixed header. The remaining length
+ * was already checked, thus missing data is an error. On success, the result of
+ * the parsing is stored in <mpkt>.
+ *
+ * Returns:
+ * MQTT_INVALID_MESSAGE if the CONNECT message is invalid
+ * MQTT_VALID_MESSAGE if the CONNECT message looks valid
+ */
+static int mqtt_parse_connack(struct ist parser, struct mqtt_pkt *mpkt)
+{
+ /* The parser length is stored to be sure exactly consumed the announced
+ * remaining length. */
+ size_t orig_len = istlen(parser);
+ int ret = MQTT_INVALID_MESSAGE;
+
+ if (istlen(parser) < 2)
+ goto end;
+ else if (istlen(parser) == 2)
+ mpkt->data.connack.var_hdr.protocol_version = MQTT_VERSION_3_1_1;
+ else
+ mpkt->data.connack.var_hdr.protocol_version = MQTT_VERSION_5_0;
+
+ /*
+ * parsing variable header
+ */
+ /* read flags */
+ /* bits 7 to 1 on flags are reserved and must be 0 */
+ parser = mqtt_read_1byte_int(parser, &mpkt->data.connack.var_hdr.flags);
+ if (!isttest(parser) || (mpkt->data.connack.var_hdr.flags & 0xFE))
+ goto end;
+
+ /* read reason_code */
+ parser = mqtt_read_1byte_int(parser, &mpkt->data.connack.var_hdr.reason_code);
+ if (!isttest(parser))
+ goto end;
+
+ /* we can leave here for MQTT 3.1.1 */
+ if (mpkt->data.connack.var_hdr.protocol_version == MQTT_VERSION_3_1_1) {
+ if ((orig_len - istlen(parser)) == mpkt->fixed_hdr.remaining_length)
+ ret = MQTT_VALID_MESSAGE;
+ goto end;
+ }
+
+ /* read properties, only available in MQTT_VERSION_5_0 */
+ if (mpkt->data.connack.var_hdr.protocol_version == MQTT_VERSION_5_0) {
+ struct ist props;
+ unsigned int user_prop_idx = 0;
+ uint64_t fields = 0;
+ uint32_t plen = 0;
+
+ parser = mqtt_read_varint(parser, &plen);
+ if (!isttest(parser) || istlen(parser) < plen)
+ goto end;
+ props = ist2(istptr(parser), plen);
+ parser = istadv(parser, props.len);
+
+ while (istlen(props) > 0) {
+ switch (*istptr(props)) {
+ case MQTT_PROP_SESSION_EXPIRY_INTERVAL:
+ if (fields & MQTT_FN_BIT_SESSION_EXPIRY_INTERVAL)
+ goto end;
+ props = mqtt_read_4byte_int(istnext(props), &mpkt->data.connack.var_hdr.props.session_expiry_interval);
+ fields |= MQTT_FN_BIT_SESSION_EXPIRY_INTERVAL;
+ break;
+
+ case MQTT_PROP_RECEIVE_MAXIMUM:
+ if (fields & MQTT_FN_BIT_RECEIVE_MAXIMUM)
+ goto end;
+ props = mqtt_read_2byte_int(istnext(props), &mpkt->data.connack.var_hdr.props.receive_maximum);
+ /* cannot be 0 */
+ if (!mpkt->data.connack.var_hdr.props.receive_maximum)
+ goto end;
+ fields |= MQTT_FN_BIT_RECEIVE_MAXIMUM;
+ break;
+
+ case MQTT_PROP_MAXIMUM_QOS:
+ if (fields & MQTT_FN_BIT_MAXIMUM_QOS)
+ goto end;
+ props = mqtt_read_1byte_int(istnext(props), &mpkt->data.connack.var_hdr.props.maximum_qos);
+ /* can have only 2 values: 0 or 1 */
+ if (mpkt->data.connack.var_hdr.props.maximum_qos > 1)
+ goto end;
+ fields |= MQTT_FN_BIT_MAXIMUM_QOS;
+ break;
+
+ case MQTT_PROP_RETAIN_AVAILABLE:
+ if (fields & MQTT_FN_BIT_RETAIN_AVAILABLE)
+ goto end;
+ props = mqtt_read_1byte_int(istnext(props), &mpkt->data.connack.var_hdr.props.retain_available);
+ /* can have only 2 values: 0 or 1 */
+ if (mpkt->data.connack.var_hdr.props.retain_available > 1)
+ goto end;
+ fields |= MQTT_FN_BIT_RETAIN_AVAILABLE;
+ break;
+
+ case MQTT_PROP_MAXIMUM_PACKET_SIZE:
+ if (fields & MQTT_FN_BIT_MAXIMUM_PACKET_SIZE)
+ goto end;
+ props = mqtt_read_4byte_int(istnext(props), &mpkt->data.connack.var_hdr.props.maximum_packet_size);
+ /* cannot be 0 */
+ if (!mpkt->data.connack.var_hdr.props.maximum_packet_size)
+ goto end;
+ fields |= MQTT_FN_BIT_MAXIMUM_PACKET_SIZE;
+ break;
+
+ case MQTT_PROP_ASSIGNED_CLIENT_IDENTIFIER:
+ if (fields & MQTT_FN_BIT_ASSIGNED_CLIENT_IDENTIFIER)
+ goto end;
+ props = mqtt_read_string(istnext(props), &mpkt->data.connack.var_hdr.props.assigned_client_identifier);
+ if (!istlen(mpkt->data.connack.var_hdr.props.assigned_client_identifier))
+ goto end;
+ fields |= MQTT_FN_BIT_ASSIGNED_CLIENT_IDENTIFIER;
+ break;
+
+ case MQTT_PROP_TOPIC_ALIAS_MAXIMUM:
+ if (fields & MQTT_FN_BIT_TOPIC_ALIAS_MAXIMUM)
+ goto end;
+ props = mqtt_read_2byte_int(istnext(props), &mpkt->data.connack.var_hdr.props.topic_alias_maximum);
+ fields |= MQTT_FN_BIT_TOPIC_ALIAS_MAXIMUM;
+ break;
+
+ case MQTT_PROP_REASON_STRING:
+ if (fields & MQTT_FN_BIT_REASON_STRING)
+ goto end;
+ props = mqtt_read_string(istnext(props), &mpkt->data.connack.var_hdr.props.reason_string);
+ fields |= MQTT_FN_BIT_REASON_STRING;
+ break;
+
+ case MQTT_PROP_WILDCARD_SUBSCRIPTION_AVAILABLE:
+ if (fields & MQTT_FN_BIT_WILDCARD_SUBSCRIPTION_AVAILABLE)
+ goto end;
+ props = mqtt_read_1byte_int(istnext(props), &mpkt->data.connack.var_hdr.props.wildcard_subscription_available);
+ /* can have only 2 values: 0 or 1 */
+ if (mpkt->data.connack.var_hdr.props.wildcard_subscription_available > 1)
+ goto end;
+ fields |= MQTT_FN_BIT_WILDCARD_SUBSCRIPTION_AVAILABLE;
+ break;
+
+ case MQTT_PROP_SUBSCRIPTION_IDENTIFIERS_AVAILABLE:
+ if (fields & MQTT_FN_BIT_SUBSCRIPTION_IDENTIFIER)
+ goto end;
+ props = mqtt_read_1byte_int(istnext(props), &mpkt->data.connack.var_hdr.props.subscription_identifiers_available);
+ /* can have only 2 values: 0 or 1 */
+ if (mpkt->data.connack.var_hdr.props.subscription_identifiers_available > 1)
+ goto end;
+ fields |= MQTT_FN_BIT_SUBSCRIPTION_IDENTIFIER;
+ break;
+
+ case MQTT_PROP_SHARED_SUBSRIPTION_AVAILABLE:
+ if (fields & MQTT_FN_BIT_SHARED_SUBSCRIPTION_AVAILABLE)
+ goto end;
+ props = mqtt_read_1byte_int(istnext(props), &mpkt->data.connack.var_hdr.props.shared_subsription_available);
+ /* can have only 2 values: 0 or 1 */
+ if (mpkt->data.connack.var_hdr.props.shared_subsription_available > 1)
+ goto end;
+ fields |= MQTT_FN_BIT_SHARED_SUBSCRIPTION_AVAILABLE;
+ break;
+
+ case MQTT_PROP_SERVER_KEEPALIVE:
+ if (fields & MQTT_FN_BIT_SERVER_KEEPALIVE)
+ goto end;
+ props = mqtt_read_2byte_int(istnext(props), &mpkt->data.connack.var_hdr.props.server_keepalive);
+ fields |= MQTT_FN_BIT_SERVER_KEEPALIVE;
+ break;
+
+ case MQTT_PROP_RESPONSE_INFORMATION:
+ if (fields & MQTT_FN_BIT_RESPONSE_INFORMATION)
+ goto end;
+ props = mqtt_read_string(istnext(props), &mpkt->data.connack.var_hdr.props.response_information);
+ fields |= MQTT_FN_BIT_RESPONSE_INFORMATION;
+ break;
+
+ case MQTT_PROP_SERVER_REFERENCE:
+ if (fields & MQTT_FN_BIT_SERVER_REFERENCE)
+ goto end;
+ props = mqtt_read_string(istnext(props), &mpkt->data.connack.var_hdr.props.server_reference);
+ fields |= MQTT_FN_BIT_SERVER_REFERENCE;
+ break;
+
+ case MQTT_PROP_USER_PROPERTIES:
+ /* if we reached MQTT_PROP_USER_PROPERTY_ENTRIES already, then
+ * we start writing over the first property */
+ if (user_prop_idx >= MQTT_PROP_USER_PROPERTY_ENTRIES)
+ user_prop_idx = 0;
+
+ /* read user property name and value */
+ props = mqtt_read_string(istnext(props), &mpkt->data.connack.var_hdr.props.user_props[user_prop_idx].name);
+ if (!isttest(props))
+ goto end;
+ props = mqtt_read_string(props, &mpkt->data.connack.var_hdr.props.user_props[user_prop_idx].value);
+ ++user_prop_idx;
+ break;
+
+ case MQTT_PROP_AUTHENTICATION_METHOD:
+ if (fields & MQTT_FN_BIT_AUTHENTICATION_METHOD)
+ goto end;
+ props = mqtt_read_string(istnext(props), &mpkt->data.connack.var_hdr.props.authentication_method);
+ fields |= MQTT_FN_BIT_AUTHENTICATION_METHOD;
+ break;
+
+ case MQTT_PROP_AUTHENTICATION_DATA:
+ if (fields & MQTT_FN_BIT_AUTHENTICATION_DATA)
+ goto end;
+ props = mqtt_read_string(istnext(props), &mpkt->data.connack.var_hdr.props.authentication_data);
+ fields |= MQTT_FN_BIT_AUTHENTICATION_DATA;
+ break;
+
+ default:
+ return 0;
+ }
+
+ if (!isttest(props))
+ goto end;
+ }
+ }
+
+ if ((orig_len - istlen(parser)) == mpkt->fixed_hdr.remaining_length)
+ ret = MQTT_VALID_MESSAGE;
+ end:
+ return ret;
+}
+
+
+/* Parses and validates a MQTT packet
+ * https://docs.oasis-open.org/mqtt/mqtt/v3.1.1/os/mqtt-v3.1.1-os.html#_Toc398718028
+ *
+ * For now, due to HAProxy limitation, only validation of CONNECT and CONNACK packets
+ * are supported.
+ *
+ * - check FIXED_HDR
+ * - check remaining length
+ * - check variable headers and payload
+ *
+ * if <mpkt> is not NULL, then this structure will be filled up as well. An
+ * unsupported packet type is considered as invalid. It is not a problem for now
+ * because only the first packet on each side can be parsed (CONNECT for the
+ * client and CONNACK for the server).
+ *
+ * Returns:
+ * MQTT_INVALID_MESSAGE if the message is invalid
+ * MQTT_NEED_MORE_DATA if we need more data to fully validate the message
+ * MQTT_VALID_MESSAGE if the message looks valid
+ */
+int mqtt_validate_message(const struct ist msg, struct mqtt_pkt *mpkt)
+{
+ struct ist parser;
+ struct mqtt_pkt tmp_mpkt;
+ int ret = MQTT_INVALID_MESSAGE;
+
+ if (!mpkt)
+ mpkt = &tmp_mpkt;
+ memset(mpkt, 0, sizeof(*mpkt));
+
+ parser = msg;
+ if (istlen(msg) < MQTT_MIN_PKT_SIZE) {
+ ret = MQTT_NEED_MORE_DATA;
+ goto end;
+ }
+
+ /* parse the MQTT fixed header */
+ parser = mqtt_read_fixed_hdr(parser, mpkt);
+ if (!isttest(parser)) {
+ ret = MQTT_INVALID_MESSAGE;
+ goto end;
+ }
+
+ /* Now parsing "remaining length" field */
+ parser = mqtt_read_varint(parser, &mpkt->fixed_hdr.remaining_length);
+ if (!isttest(parser)) {
+ ret = MQTT_INVALID_MESSAGE;
+ goto end;
+ }
+
+ if (istlen(parser) < mpkt->fixed_hdr.remaining_length)
+ return MQTT_NEED_MORE_DATA;
+
+ /* Now parsing the variable header and payload, which is based on the packet type */
+ switch (mpkt->fixed_hdr.type) {
+ case MQTT_CPT_CONNECT:
+ ret = mqtt_parse_connect(parser, mpkt);
+ break;
+ case MQTT_CPT_CONNACK:
+ ret = mqtt_parse_connack(parser, mpkt);
+ break;
+ default:
+ break;
+ }
+
+ end:
+ return ret;
+}
diff --git a/src/mux_fcgi.c b/src/mux_fcgi.c
new file mode 100644
index 0000000..0230e6b
--- /dev/null
+++ b/src/mux_fcgi.c
@@ -0,0 +1,4268 @@
+/*
+ * FastCGI mux-demux for connections
+ *
+ * Copyright (C) 2019 HAProxy Technologies, Christopher Faulet <cfaulet@haproxy.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <import/ist.h>
+#include <import/eb32tree.h>
+#include <import/ebmbtree.h>
+
+#include <haproxy/api.h>
+#include <haproxy/cfgparse.h>
+#include <haproxy/connection.h>
+#include <haproxy/dynbuf.h>
+#include <haproxy/errors.h>
+#include <haproxy/fcgi-app.h>
+#include <haproxy/fcgi.h>
+#include <haproxy/h1.h>
+#include <haproxy/h1_htx.h>
+#include <haproxy/http_htx.h>
+#include <haproxy/htx.h>
+#include <haproxy/list.h>
+#include <haproxy/log.h>
+#include <haproxy/mux_fcgi-t.h>
+#include <haproxy/net_helper.h>
+#include <haproxy/proxy.h>
+#include <haproxy/regex.h>
+#include <haproxy/sc_strm.h>
+#include <haproxy/server.h>
+#include <haproxy/session-t.h>
+#include <haproxy/stconn.h>
+#include <haproxy/stream.h>
+#include <haproxy/trace.h>
+#include <haproxy/version.h>
+
+/* 32 buffers: one for the ring's root, rest for the mbuf itself */
+#define FCGI_C_MBUF_CNT 32
+
+/* Size for a record header (also size of empty record) */
+#define FCGI_RECORD_HEADER_SZ 8
+
+/* FCGI connection descriptor */
+struct fcgi_conn {
+ struct connection *conn;
+
+ enum fcgi_conn_st state; /* FCGI connection state */
+ int16_t max_id; /* highest ID known on this connection, <0 before mgmt records */
+ uint32_t streams_limit; /* maximum number of concurrent streams the peer supports */
+ uint32_t flags; /* Connection flags: FCGI_CF_* */
+
+ int16_t dsi; /* dmux stream ID (<0 = idle ) */
+ uint16_t drl; /* demux record length (if dsi >= 0) */
+ uint8_t drt; /* demux record type (if dsi >= 0) */
+ uint8_t drp; /* demux record padding (if dsi >= 0) */
+
+ struct buffer dbuf; /* demux buffer */
+ struct buffer mbuf[FCGI_C_MBUF_CNT]; /* mux buffers (ring) */
+
+ int timeout; /* idle timeout duration in ticks */
+ int shut_timeout; /* idle timeout duration in ticks after shutdown */
+ unsigned int nb_streams; /* number of streams in the tree */
+ unsigned int nb_sc; /* number of attached stream connectors */
+ unsigned int nb_reserved; /* number of reserved streams */
+ unsigned int stream_cnt; /* total number of streams seen */
+
+ struct proxy *proxy; /* the proxy this connection was created for */
+ struct fcgi_app *app; /* FCGI application used by this mux */
+ struct task *task; /* timeout management task */
+ struct eb_root streams_by_id; /* all active streams by their ID */
+
+ struct list send_list; /* list of blocked streams requesting to send */
+
+ struct buffer_wait buf_wait; /* Wait list for buffer allocation */
+ struct wait_event wait_event; /* To be used if we're waiting for I/Os */
+};
+
+
+/* FCGI stream descriptor */
+struct fcgi_strm {
+ struct sedesc *sd;
+ struct session *sess;
+ struct fcgi_conn *fconn;
+
+ int32_t id; /* stream ID */
+
+ uint32_t flags; /* Connection flags: FCGI_SF_* */
+ enum fcgi_strm_st state; /* FCGI stream state */
+ int proto_status; /* FCGI_PS_* */
+
+ struct h1m h1m; /* response parser state for H1 */
+
+ struct buffer rxbuf; /* receive buffer, always valid (buf_empty or real buffer) */
+
+ struct eb32_node by_id; /* place in fcgi_conn's streams_by_id */
+ struct wait_event *subs; /* Address of the wait_event the stream connector associated is waiting on */
+ struct list send_list; /* To be used when adding in fcgi_conn->send_list */
+ struct tasklet *shut_tl; /* deferred shutdown tasklet, to retry to close after we failed to by lack of space */
+};
+
+/* Flags representing all default FCGI parameters */
+#define FCGI_SP_CGI_GATEWAY 0x00000001
+#define FCGI_SP_DOC_ROOT 0x00000002
+#define FCGI_SP_SCRIPT_NAME 0x00000004
+#define FCGI_SP_PATH_INFO 0x00000008
+#define FCGI_SP_REQ_URI 0x00000010
+#define FCGI_SP_REQ_METH 0x00000020
+#define FCGI_SP_REQ_QS 0x00000040
+#define FCGI_SP_SRV_PORT 0x00000080
+#define FCGI_SP_SRV_PROTO 0x00000100
+#define FCGI_SP_SRV_NAME 0x00000200
+#define FCGI_SP_REM_ADDR 0x00000400
+#define FCGI_SP_REM_PORT 0x00000800
+#define FCGI_SP_SCRIPT_FILE 0x00001000
+#define FCGI_SP_PATH_TRANS 0x00002000
+#define FCGI_SP_CONT_LEN 0x00004000
+#define FCGI_SP_HTTPS 0x00008000
+#define FCGI_SP_SRV_SOFT 0x00010000
+#define FCGI_SP_MASK 0x0001FFFF
+#define FCGI_SP_URI_MASK (FCGI_SP_SCRIPT_NAME|FCGI_SP_PATH_INFO|FCGI_SP_REQ_QS)
+
+/* FCGI parameters used when PARAMS record is sent */
+struct fcgi_strm_params {
+ uint32_t mask;
+ struct ist docroot;
+ struct ist scriptname;
+ struct ist pathinfo;
+ struct ist meth;
+ struct ist uri;
+ struct ist vsn;
+ struct ist qs;
+ struct ist srv_name;
+ struct ist srv_port;
+ struct ist rem_addr;
+ struct ist rem_port;
+ struct ist cont_len;
+ struct ist srv_soft;
+ int https;
+ struct buffer *p;
+};
+
+/* Maximum amount of data we're OK with re-aligning for buffer optimizations */
+#define MAX_DATA_REALIGN 1024
+
+/* trace source and events */
+static void fcgi_trace(enum trace_level level, uint64_t mask,
+ const struct trace_source *src,
+ const struct ist where, const struct ist func,
+ const void *a1, const void *a2, const void *a3, const void *a4);
+
+/* The event representation is split like this :
+ * fconn - internal FCGI connection
+ * fstrm - internal FCGI stream
+ * strm - application layer
+ * rx - data receipt
+ * tx - data transmission
+ * rsp - response parsing
+ */
+static const struct trace_event fcgi_trace_events[] = {
+#define FCGI_EV_FCONN_NEW (1ULL << 0)
+ { .mask = FCGI_EV_FCONN_NEW, .name = "fconn_new", .desc = "new FCGI connection" },
+#define FCGI_EV_FCONN_RECV (1ULL << 1)
+ { .mask = FCGI_EV_FCONN_RECV, .name = "fconn_recv", .desc = "Rx on FCGI connection" },
+#define FCGI_EV_FCONN_SEND (1ULL << 2)
+ { .mask = FCGI_EV_FCONN_SEND, .name = "fconn_send", .desc = "Tx on FCGI connection" },
+#define FCGI_EV_FCONN_BLK (1ULL << 3)
+ { .mask = FCGI_EV_FCONN_BLK, .name = "fconn_blk", .desc = "FCGI connection blocked" },
+#define FCGI_EV_FCONN_WAKE (1ULL << 4)
+ { .mask = FCGI_EV_FCONN_WAKE, .name = "fconn_wake", .desc = "FCGI connection woken up" },
+#define FCGI_EV_FCONN_END (1ULL << 5)
+ { .mask = FCGI_EV_FCONN_END, .name = "fconn_end", .desc = "FCGI connection terminated" },
+#define FCGI_EV_FCONN_ERR (1ULL << 6)
+ { .mask = FCGI_EV_FCONN_ERR, .name = "fconn_err", .desc = "error on FCGI connection" },
+
+#define FCGI_EV_RX_FHDR (1ULL << 7)
+ { .mask = FCGI_EV_RX_FHDR, .name = "rx_fhdr", .desc = "FCGI record header received" },
+#define FCGI_EV_RX_RECORD (1ULL << 8)
+ { .mask = FCGI_EV_RX_RECORD, .name = "rx_record", .desc = "receipt of any FCGI record" },
+#define FCGI_EV_RX_EOI (1ULL << 9)
+ { .mask = FCGI_EV_RX_EOI, .name = "rx_eoi", .desc = "receipt of end of FCGI input" },
+#define FCGI_EV_RX_GETVAL (1ULL << 10)
+ { .mask = FCGI_EV_RX_GETVAL, .name = "rx_get_values", .desc = "receipt of FCGI GET_VALUES_RESULT record" },
+#define FCGI_EV_RX_STDOUT (1ULL << 11)
+ { .mask = FCGI_EV_RX_STDOUT, .name = "rx_stdout", .desc = "receipt of FCGI STDOUT record" },
+#define FCGI_EV_RX_STDERR (1ULL << 12)
+ { .mask = FCGI_EV_RX_STDERR, .name = "rx_stderr", .desc = "receipt of FCGI STDERR record" },
+#define FCGI_EV_RX_ENDREQ (1ULL << 13)
+ { .mask = FCGI_EV_RX_ENDREQ, .name = "rx_end_req", .desc = "receipt of FCGI END_REQUEST record" },
+
+#define FCGI_EV_TX_RECORD (1ULL << 14)
+ { .mask = FCGI_EV_TX_RECORD, .name = "tx_record", .desc = "transmission of any FCGI record" },
+#define FCGI_EV_TX_EOI (1ULL << 15)
+ { .mask = FCGI_EV_TX_EOI, .name = "tx_eoi", .desc = "transmission of FCGI end of input" },
+#define FCGI_EV_TX_BEGREQ (1ULL << 16)
+ { .mask = FCGI_EV_TX_BEGREQ, .name = "tx_begin_request", .desc = "transmission of FCGI BEGIN_REQUEST record" },
+#define FCGI_EV_TX_GETVAL (1ULL << 17)
+ { .mask = FCGI_EV_TX_GETVAL, .name = "tx_get_values", .desc = "transmission of FCGI GET_VALUES record" },
+#define FCGI_EV_TX_PARAMS (1ULL << 18)
+ { .mask = FCGI_EV_TX_PARAMS, .name = "tx_params", .desc = "transmission of FCGI PARAMS record" },
+#define FCGI_EV_TX_STDIN (1ULL << 19)
+ { .mask = FCGI_EV_TX_STDIN, .name = "tx_stding", .desc = "transmission of FCGI STDIN record" },
+#define FCGI_EV_TX_ABORT (1ULL << 20)
+ { .mask = FCGI_EV_TX_ABORT, .name = "tx_abort", .desc = "transmission of FCGI ABORT record" },
+
+#define FCGI_EV_RSP_DATA (1ULL << 21)
+ { .mask = FCGI_EV_RSP_DATA, .name = "rsp_data", .desc = "parse any data of H1 response" },
+#define FCGI_EV_RSP_EOM (1ULL << 22)
+ { .mask = FCGI_EV_RSP_EOM, .name = "rsp_eom", .desc = "reach the end of message of H1 response" },
+#define FCGI_EV_RSP_HDRS (1ULL << 23)
+ { .mask = FCGI_EV_RSP_HDRS, .name = "rsp_headers", .desc = "parse headers of H1 response" },
+#define FCGI_EV_RSP_BODY (1ULL << 24)
+ { .mask = FCGI_EV_RSP_BODY, .name = "rsp_body", .desc = "parse body part of H1 response" },
+#define FCGI_EV_RSP_TLRS (1ULL << 25)
+ { .mask = FCGI_EV_RSP_TLRS, .name = "rsp_trailerus", .desc = "parse trailers of H1 response" },
+
+#define FCGI_EV_FSTRM_NEW (1ULL << 26)
+ { .mask = FCGI_EV_FSTRM_NEW, .name = "fstrm_new", .desc = "new FCGI stream" },
+#define FCGI_EV_FSTRM_BLK (1ULL << 27)
+ { .mask = FCGI_EV_FSTRM_BLK, .name = "fstrm_blk", .desc = "FCGI stream blocked" },
+#define FCGI_EV_FSTRM_END (1ULL << 28)
+ { .mask = FCGI_EV_FSTRM_END, .name = "fstrm_end", .desc = "FCGI stream terminated" },
+#define FCGI_EV_FSTRM_ERR (1ULL << 29)
+ { .mask = FCGI_EV_FSTRM_ERR, .name = "fstrm_err", .desc = "error on FCGI stream" },
+
+#define FCGI_EV_STRM_NEW (1ULL << 30)
+ { .mask = FCGI_EV_STRM_NEW, .name = "strm_new", .desc = "app-layer stream creation" },
+#define FCGI_EV_STRM_RECV (1ULL << 31)
+ { .mask = FCGI_EV_STRM_RECV, .name = "strm_recv", .desc = "receiving data for stream" },
+#define FCGI_EV_STRM_SEND (1ULL << 32)
+ { .mask = FCGI_EV_STRM_SEND, .name = "strm_send", .desc = "sending data for stream" },
+#define FCGI_EV_STRM_FULL (1ULL << 33)
+ { .mask = FCGI_EV_STRM_FULL, .name = "strm_full", .desc = "stream buffer full" },
+#define FCGI_EV_STRM_WAKE (1ULL << 34)
+ { .mask = FCGI_EV_STRM_WAKE, .name = "strm_wake", .desc = "stream woken up" },
+#define FCGI_EV_STRM_SHUT (1ULL << 35)
+ { .mask = FCGI_EV_STRM_SHUT, .name = "strm_shut", .desc = "stream shutdown" },
+#define FCGI_EV_STRM_END (1ULL << 36)
+ { .mask = FCGI_EV_STRM_END, .name = "strm_end", .desc = "detaching app-layer stream" },
+#define FCGI_EV_STRM_ERR (1ULL << 37)
+ { .mask = FCGI_EV_STRM_ERR, .name = "strm_err", .desc = "stream error" },
+
+ { }
+};
+
+static const struct name_desc fcgi_trace_lockon_args[4] = {
+ /* arg1 */ { /* already used by the connection */ },
+ /* arg2 */ { .name="fstrm", .desc="FCGI stream" },
+ /* arg3 */ { },
+ /* arg4 */ { }
+};
+
+
+static const struct name_desc fcgi_trace_decoding[] = {
+#define FCGI_VERB_CLEAN 1
+ { .name="clean", .desc="only user-friendly stuff, generally suitable for level \"user\"" },
+#define FCGI_VERB_MINIMAL 2
+ { .name="minimal", .desc="report only fconn/fstrm state and flags, no real decoding" },
+#define FCGI_VERB_SIMPLE 3
+ { .name="simple", .desc="add request/response status line or htx info when available" },
+#define FCGI_VERB_ADVANCED 4
+ { .name="advanced", .desc="add header fields or record decoding when available" },
+#define FCGI_VERB_COMPLETE 5
+ { .name="complete", .desc="add full data dump when available" },
+ { /* end */ }
+};
+
+static struct trace_source trace_fcgi __read_mostly = {
+ .name = IST("fcgi"),
+ .desc = "FastCGI multiplexer",
+ .arg_def = TRC_ARG1_CONN, // TRACE()'s first argument is always a connection
+ .default_cb = fcgi_trace,
+ .known_events = fcgi_trace_events,
+ .lockon_args = fcgi_trace_lockon_args,
+ .decoding = fcgi_trace_decoding,
+ .report_events = ~0, // report everything by default
+};
+
+#define TRACE_SOURCE &trace_fcgi
+INITCALL1(STG_REGISTER, trace_register_source, TRACE_SOURCE);
+
+/* FCGI connection and stream pools */
+DECLARE_STATIC_POOL(pool_head_fcgi_conn, "fcgi_conn", sizeof(struct fcgi_conn));
+DECLARE_STATIC_POOL(pool_head_fcgi_strm, "fcgi_strm", sizeof(struct fcgi_strm));
+
+struct task *fcgi_timeout_task(struct task *t, void *context, unsigned int state);
+static int fcgi_process(struct fcgi_conn *fconn);
+/* fcgi_io_cb is exported to see it resolved in "show fd" */
+struct task *fcgi_io_cb(struct task *t, void *ctx, unsigned int state);
+static inline struct fcgi_strm *fcgi_conn_st_by_id(struct fcgi_conn *fconn, int id);
+struct task *fcgi_deferred_shut(struct task *t, void *ctx, unsigned int state);
+static struct fcgi_strm *fcgi_stconn_new(struct fcgi_conn *fconn, struct stconn *sc, struct session *sess);
+static void fcgi_strm_notify_recv(struct fcgi_strm *fstrm);
+static void fcgi_strm_notify_send(struct fcgi_strm *fstrm);
+static void fcgi_strm_alert(struct fcgi_strm *fstrm);
+static int fcgi_strm_send_abort(struct fcgi_conn *fconn, struct fcgi_strm *fstrm);
+
+/* a dummy closed endpoint */
+static const struct sedesc closed_ep = {
+ .sc = NULL,
+ .flags = SE_FL_DETACHED,
+};
+
+/* a dmumy management stream */
+static const struct fcgi_strm *fcgi_mgmt_stream = &(const struct fcgi_strm){
+ .sd = (struct sedesc*)&closed_ep,
+ .fconn = NULL,
+ .state = FCGI_SS_CLOSED,
+ .flags = FCGI_SF_NONE,
+ .id = 0,
+};
+
+/* and a dummy idle stream for use with any unknown stream */
+static const struct fcgi_strm *fcgi_unknown_stream = &(const struct fcgi_strm){
+ .sd = (struct sedesc*)&closed_ep,
+ .fconn = NULL,
+ .state = FCGI_SS_IDLE,
+ .flags = FCGI_SF_NONE,
+ .id = 0,
+};
+
+/* returns the stconn associated to the FCGI stream */
+static forceinline struct stconn *fcgi_strm_sc(const struct fcgi_strm *fstrm)
+{
+ return fstrm->sd->sc;
+}
+
+
+/* the FCGI traces always expect that arg1, if non-null, is of type connection
+ * (from which we can derive fconn), that arg2, if non-null, is of type fstrm,
+ * and that arg3, if non-null, is a htx for rx/tx headers.
+ */
+static void fcgi_trace(enum trace_level level, uint64_t mask, const struct trace_source *src,
+ const struct ist where, const struct ist func,
+ const void *a1, const void *a2, const void *a3, const void *a4)
+{
+ const struct connection *conn = a1;
+ struct fcgi_conn *fconn = conn ? conn->ctx : NULL;
+ const struct fcgi_strm *fstrm = a2;
+ const struct htx *htx = a3;
+ const size_t *val = a4;
+
+ if (!fconn)
+ fconn = (fstrm ? fstrm->fconn : NULL);
+
+ if (!fconn || src->verbosity < FCGI_VERB_CLEAN)
+ return;
+
+ /* Display the response state if fstrm is defined */
+ if (fstrm)
+ chunk_appendf(&trace_buf, " [rsp:%s]", h1m_state_str(fstrm->h1m.state));
+
+ if (src->verbosity == FCGI_VERB_CLEAN)
+ return;
+
+ /* Display the value to the 4th argument (level > STATE) */
+ if (src->level > TRACE_LEVEL_STATE && val)
+ chunk_appendf(&trace_buf, " - VAL=%lu", (long)*val);
+
+ /* Display status-line if possible (verbosity > MINIMAL) */
+ if (src->verbosity > FCGI_VERB_MINIMAL && htx && htx_nbblks(htx)) {
+ const struct htx_blk *blk = __htx_get_head_blk(htx);
+ const struct htx_sl *sl = htx_get_blk_ptr(htx, blk);
+ enum htx_blk_type type = htx_get_blk_type(blk);
+
+ if (type == HTX_BLK_REQ_SL || type == HTX_BLK_RES_SL)
+ chunk_appendf(&trace_buf, " - \"%.*s %.*s %.*s\"",
+ HTX_SL_P1_LEN(sl), HTX_SL_P1_PTR(sl),
+ HTX_SL_P2_LEN(sl), HTX_SL_P2_PTR(sl),
+ HTX_SL_P3_LEN(sl), HTX_SL_P3_PTR(sl));
+ }
+
+ /* Display fconn info and, if defined, fstrm info */
+ chunk_appendf(&trace_buf, " - fconn=%p(%s,0x%08x)", fconn, fconn_st_to_str(fconn->state), fconn->flags);
+ if (fstrm)
+ chunk_appendf(&trace_buf, " fstrm=%p(%d,%s,0x%08x)", fstrm, fstrm->id, fstrm_st_to_str(fstrm->state), fstrm->flags);
+
+ if (!fstrm || fstrm->id <= 0)
+ chunk_appendf(&trace_buf, " dsi=%d", fconn->dsi);
+ if (fconn->dsi >= 0 && (mask & FCGI_EV_RX_FHDR))
+ chunk_appendf(&trace_buf, " drt=%s", fcgi_rt_str(fconn->drt));
+
+ if (src->verbosity == FCGI_VERB_MINIMAL)
+ return;
+
+ /* Display mbuf and dbuf info (level > USER & verbosity > SIMPLE) */
+ if (src->level > TRACE_LEVEL_USER) {
+ if (src->verbosity == FCGI_VERB_COMPLETE ||
+ (src->verbosity == FCGI_VERB_ADVANCED && (mask & (FCGI_EV_FCONN_RECV|FCGI_EV_RX_RECORD))))
+ chunk_appendf(&trace_buf, " dbuf=%u@%p+%u/%u",
+ (unsigned int)b_data(&fconn->dbuf), b_orig(&fconn->dbuf),
+ (unsigned int)b_head_ofs(&fconn->dbuf), (unsigned int)b_size(&fconn->dbuf));
+ if (src->verbosity == FCGI_VERB_COMPLETE ||
+ (src->verbosity == FCGI_VERB_ADVANCED && (mask & (FCGI_EV_FCONN_SEND|FCGI_EV_TX_RECORD)))) {
+ struct buffer *hmbuf = br_head(fconn->mbuf);
+ struct buffer *tmbuf = br_tail(fconn->mbuf);
+
+ chunk_appendf(&trace_buf, " .mbuf=[%u..%u|%u],h=[%u@%p+%u/%u],t=[%u@%p+%u/%u]",
+ br_head_idx(fconn->mbuf), br_tail_idx(fconn->mbuf), br_size(fconn->mbuf),
+ (unsigned int)b_data(hmbuf), b_orig(hmbuf),
+ (unsigned int)b_head_ofs(hmbuf), (unsigned int)b_size(hmbuf),
+ (unsigned int)b_data(tmbuf), b_orig(tmbuf),
+ (unsigned int)b_head_ofs(tmbuf), (unsigned int)b_size(tmbuf));
+ }
+
+ if (fstrm && (src->verbosity == FCGI_VERB_COMPLETE ||
+ (src->verbosity == FCGI_VERB_ADVANCED && (mask & (FCGI_EV_STRM_RECV|FCGI_EV_RSP_DATA)))))
+ chunk_appendf(&trace_buf, " rxbuf=%u@%p+%u/%u",
+ (unsigned int)b_data(&fstrm->rxbuf), b_orig(&fstrm->rxbuf),
+ (unsigned int)b_head_ofs(&fstrm->rxbuf), (unsigned int)b_size(&fstrm->rxbuf));
+ }
+
+ /* Display htx info if defined (level > USER) */
+ if (src->level > TRACE_LEVEL_USER && htx) {
+ int full = 0;
+
+ /* Full htx info (level > STATE && verbosity > SIMPLE) */
+ if (src->level > TRACE_LEVEL_STATE) {
+ if (src->verbosity == FCGI_VERB_COMPLETE)
+ full = 1;
+ else if (src->verbosity == FCGI_VERB_ADVANCED && (mask & (FCGI_EV_RSP_HDRS|FCGI_EV_TX_PARAMS)))
+ full = 1;
+ }
+
+ chunk_memcat(&trace_buf, "\n\t", 2);
+ htx_dump(&trace_buf, htx, full);
+ }
+}
+
+/*****************************************************/
+/* functions below are for dynamic buffer management */
+/*****************************************************/
+
+/* Indicates whether or not the we may call the fcgi_recv() function to attempt
+ * to receive data into the buffer and/or demux pending data. The condition is
+ * a bit complex due to some API limits for now. The rules are the following :
+ * - if an error or a shutdown was detected on the connection and the buffer
+ * is empty, we must not attempt to receive
+ * - if the demux buf failed to be allocated, we must not try to receive and
+ * we know there is nothing pending
+ * - if no flag indicates a blocking condition, we may attempt to receive,
+ * regardless of whether the demux buffer is full or not, so that only
+ * de demux part decides whether or not to block. This is needed because
+ * the connection API indeed prevents us from re-enabling receipt that is
+ * already enabled in a polled state, so we must always immediately stop
+ * as soon as the demux can't proceed so as never to hit an end of read
+ * with data pending in the buffers.
+ * - otherwise must may not attempt
+ */
+static inline int fcgi_recv_allowed(const struct fcgi_conn *fconn)
+{
+ if (fconn->flags & (FCGI_CF_EOS|FCGI_CF_ERROR))
+ return 0;
+
+ if (b_data(&fconn->dbuf) == 0 && fconn->state == FCGI_CS_CLOSED)
+ return 0;
+
+ if (!(fconn->flags & FCGI_CF_DEM_DALLOC) &&
+ !(fconn->flags & FCGI_CF_DEM_BLOCK_ANY))
+ return 1;
+
+ return 0;
+}
+
+/* Restarts reading on the connection if it was not enabled */
+static inline void fcgi_conn_restart_reading(const struct fcgi_conn *fconn, int consider_buffer)
+{
+ if (!fcgi_recv_allowed(fconn))
+ return;
+ if ((!consider_buffer || !b_data(&fconn->dbuf)) &&
+ (fconn->wait_event.events & SUB_RETRY_RECV))
+ return;
+ tasklet_wakeup(fconn->wait_event.tasklet);
+}
+
+
+/* Tries to grab a buffer and to re-enable processing on mux <target>. The
+ * fcgi_conn flags are used to figure what buffer was requested. It returns 1 if
+ * the allocation succeeds, in which case the connection is woken up, or 0 if
+ * it's impossible to wake up and we prefer to be woken up later.
+ */
+static int fcgi_buf_available(void *target)
+{
+ struct fcgi_conn *fconn = target;
+ struct fcgi_strm *fstrm;
+
+ if ((fconn->flags & FCGI_CF_DEM_DALLOC) && b_alloc(&fconn->dbuf)) {
+ TRACE_STATE("unblocking fconn, dbuf allocated", FCGI_EV_FCONN_RECV|FCGI_EV_FCONN_BLK|FCGI_EV_FCONN_WAKE, fconn->conn);
+ fconn->flags &= ~FCGI_CF_DEM_DALLOC;
+ fcgi_conn_restart_reading(fconn, 1);
+ return 1;
+ }
+
+ if ((fconn->flags & FCGI_CF_MUX_MALLOC) && b_alloc(br_tail(fconn->mbuf))) {
+ TRACE_STATE("unblocking fconn, mbuf allocated", FCGI_EV_FCONN_SEND|FCGI_EV_FCONN_BLK|FCGI_EV_FCONN_WAKE, fconn->conn);
+ fconn->flags &= ~FCGI_CF_MUX_MALLOC;
+ if (fconn->flags & FCGI_CF_DEM_MROOM) {
+ fconn->flags &= ~FCGI_CF_DEM_MROOM;
+ fcgi_conn_restart_reading(fconn, 1);
+ }
+ return 1;
+ }
+
+ if ((fconn->flags & FCGI_CF_DEM_SALLOC) &&
+ (fstrm = fcgi_conn_st_by_id(fconn, fconn->dsi)) && fcgi_strm_sc(fstrm) &&
+ b_alloc(&fstrm->rxbuf)) {
+ TRACE_STATE("unblocking fstrm, rxbuf allocated", FCGI_EV_STRM_RECV|FCGI_EV_FSTRM_BLK|FCGI_EV_STRM_WAKE, fconn->conn, fstrm);
+ fconn->flags &= ~FCGI_CF_DEM_SALLOC;
+ fcgi_conn_restart_reading(fconn, 1);
+ fcgi_strm_notify_recv(fstrm);
+ return 1;
+ }
+
+ return 0;
+}
+
+static inline struct buffer *fcgi_get_buf(struct fcgi_conn *fconn, struct buffer *bptr)
+{
+ struct buffer *buf = NULL;
+
+ if (likely(!LIST_INLIST(&fconn->buf_wait.list)) &&
+ unlikely((buf = b_alloc(bptr)) == NULL)) {
+ fconn->buf_wait.target = fconn;
+ fconn->buf_wait.wakeup_cb = fcgi_buf_available;
+ LIST_APPEND(&th_ctx->buffer_wq, &fconn->buf_wait.list);
+ }
+ return buf;
+}
+
+static inline void fcgi_release_buf(struct fcgi_conn *fconn, struct buffer *bptr)
+{
+ if (bptr->size) {
+ b_free(bptr);
+ offer_buffers(NULL, 1);
+ }
+}
+
+static inline void fcgi_release_mbuf(struct fcgi_conn *fconn)
+{
+ struct buffer *buf;
+ unsigned int count = 0;
+
+ while (b_size(buf = br_head_pick(fconn->mbuf))) {
+ b_free(buf);
+ count++;
+ }
+ if (count)
+ offer_buffers(NULL, count);
+}
+
+/* Returns the number of allocatable outgoing streams for the connection taking
+ * the number reserved streams into account.
+ */
+static inline int fcgi_streams_left(const struct fcgi_conn *fconn)
+{
+ int ret;
+
+ ret = (unsigned int)(0x7FFF - fconn->max_id) - fconn->nb_reserved - 1;
+ if (ret < 0)
+ ret = 0;
+ return ret;
+}
+
+/* Returns the number of streams in use on a connection to figure if it's
+ * idle or not. We check nb_sc and not nb_streams as the caller will want
+ * to know if it was the last one after a detach().
+ */
+static int fcgi_used_streams(struct connection *conn)
+{
+ struct fcgi_conn *fconn = conn->ctx;
+
+ return fconn->nb_sc;
+}
+
+/* Returns the number of concurrent streams available on the connection */
+static int fcgi_avail_streams(struct connection *conn)
+{
+ struct server *srv = objt_server(conn->target);
+ struct fcgi_conn *fconn = conn->ctx;
+ int ret1, ret2;
+
+ /* Don't open new stream if the connection is closed */
+ if (fconn->state == FCGI_CS_CLOSED)
+ return 0;
+
+ /* May be negative if this setting has changed */
+ ret1 = (fconn->streams_limit - fconn->nb_streams);
+
+ /* we must also consider the limit imposed by stream IDs */
+ ret2 = fcgi_streams_left(fconn);
+ ret1 = MIN(ret1, ret2);
+ if (ret1 > 0 && srv && srv->max_reuse >= 0) {
+ ret2 = ((fconn->stream_cnt <= srv->max_reuse) ? srv->max_reuse - fconn->stream_cnt + 1: 0);
+ ret1 = MIN(ret1, ret2);
+ }
+ return ret1;
+}
+
+/*****************************************************************/
+/* functions below are dedicated to the mux setup and management */
+/*****************************************************************/
+
+/* Initializes the mux once it's attached. Only outgoing connections are
+ * supported. So the context is already initialized before installing the
+ * mux. <input> is always used as Input buffer and may contain data. It is the
+ * caller responsibility to not reuse it anymore. Returns < 0 on error.
+ */
+static int fcgi_init(struct connection *conn, struct proxy *px, struct session *sess,
+ struct buffer *input)
+{
+ struct fcgi_conn *fconn;
+ struct fcgi_strm *fstrm;
+ struct fcgi_app *app = get_px_fcgi_app(px);
+ struct task *t = NULL;
+ void *conn_ctx = conn->ctx;
+
+ TRACE_ENTER(FCGI_EV_FSTRM_NEW);
+
+ if (!app) {
+ TRACE_ERROR("No FCGI app found, don't create fconn", FCGI_EV_FCONN_NEW|FCGI_EV_FCONN_END|FCGI_EV_FCONN_ERR);
+ goto fail_conn;
+ }
+
+ fconn = pool_alloc(pool_head_fcgi_conn);
+ if (!fconn) {
+ TRACE_ERROR("fconn allocation failure", FCGI_EV_FCONN_NEW|FCGI_EV_FCONN_END|FCGI_EV_FCONN_ERR);
+ goto fail_conn;
+ }
+
+ fconn->shut_timeout = fconn->timeout = px->timeout.server;
+ if (tick_isset(px->timeout.serverfin))
+ fconn->shut_timeout = px->timeout.serverfin;
+
+ fconn->flags = FCGI_CF_NONE;
+
+ /* Retrieve useful info from the FCGI app */
+ if (app->flags & FCGI_APP_FL_KEEP_CONN)
+ fconn->flags |= FCGI_CF_KEEP_CONN;
+ if (app->flags & FCGI_APP_FL_GET_VALUES)
+ fconn->flags |= FCGI_CF_GET_VALUES;
+ if (app->flags & FCGI_APP_FL_MPXS_CONNS)
+ fconn->flags |= FCGI_CF_MPXS_CONNS;
+
+ fconn->proxy = px;
+ fconn->app = app;
+ fconn->task = NULL;
+ if (tick_isset(fconn->timeout)) {
+ t = task_new_here();
+ if (!t) {
+ TRACE_ERROR("fconn task allocation failure", FCGI_EV_FCONN_NEW|FCGI_EV_FCONN_END|FCGI_EV_FCONN_ERR);
+ goto fail;
+ }
+
+ fconn->task = t;
+ t->process = fcgi_timeout_task;
+ t->context = fconn;
+ t->expire = tick_add(now_ms, fconn->timeout);
+ }
+
+ fconn->wait_event.tasklet = tasklet_new();
+ if (!fconn->wait_event.tasklet)
+ goto fail;
+ fconn->wait_event.tasklet->process = fcgi_io_cb;
+ fconn->wait_event.tasklet->context = fconn;
+ fconn->wait_event.events = 0;
+
+ /* Initialise the context. */
+ fconn->state = FCGI_CS_INIT;
+ fconn->conn = conn;
+ fconn->streams_limit = app->maxreqs;
+ fconn->max_id = -1;
+ fconn->nb_streams = 0;
+ fconn->nb_sc = 0;
+ fconn->nb_reserved = 0;
+ fconn->stream_cnt = 0;
+
+ fconn->dbuf = *input;
+ fconn->dsi = -1;
+
+ br_init(fconn->mbuf, sizeof(fconn->mbuf) / sizeof(fconn->mbuf[0]));
+ fconn->streams_by_id = EB_ROOT;
+ LIST_INIT(&fconn->send_list);
+ LIST_INIT(&fconn->buf_wait.list);
+
+ conn->ctx = fconn;
+
+ if (t)
+ task_queue(t);
+
+ /* FIXME: this is temporary, for outgoing connections we need to
+ * immediately allocate a stream until the code is modified so that the
+ * caller calls ->attach(). For now the outgoing sc is stored as
+ * conn->ctx by the caller and saved in conn_ctx.
+ */
+ fstrm = fcgi_stconn_new(fconn, conn_ctx, sess);
+ if (!fstrm)
+ goto fail;
+
+
+ /* Repare to read something */
+ fcgi_conn_restart_reading(fconn, 1);
+ TRACE_LEAVE(FCGI_EV_FCONN_NEW, conn);
+ return 0;
+
+ fail:
+ task_destroy(t);
+ tasklet_free(fconn->wait_event.tasklet);
+ pool_free(pool_head_fcgi_conn, fconn);
+ fail_conn:
+ conn->ctx = conn_ctx; // restore saved ctx
+ TRACE_DEVEL("leaving in error", FCGI_EV_FCONN_NEW|FCGI_EV_FCONN_END|FCGI_EV_FCONN_ERR);
+ return -1;
+}
+
+/* Returns the next allocatable outgoing stream ID for the FCGI connection, or
+ * -1 if no more is allocatable.
+ */
+static inline int32_t fcgi_conn_get_next_sid(const struct fcgi_conn *fconn)
+{
+ int32_t id = (fconn->max_id + 1) | 1;
+
+ if ((id & 0x80000000U))
+ id = -1;
+ return id;
+}
+
+/* Returns the stream associated with id <id> or NULL if not found */
+static inline struct fcgi_strm *fcgi_conn_st_by_id(struct fcgi_conn *fconn, int id)
+{
+ struct eb32_node *node;
+
+ if (id == 0)
+ return (struct fcgi_strm *)fcgi_mgmt_stream;
+
+ if (id > fconn->max_id)
+ return (struct fcgi_strm *)fcgi_unknown_stream;
+
+ node = eb32_lookup(&fconn->streams_by_id, id);
+ if (!node)
+ return (struct fcgi_strm *)fcgi_unknown_stream;
+ return container_of(node, struct fcgi_strm, by_id);
+}
+
+
+/* Release function. This one should be called to free all resources allocated
+ * to the mux.
+ */
+static void fcgi_release(struct fcgi_conn *fconn)
+{
+ struct connection *conn = fconn->conn;
+
+ TRACE_POINT(FCGI_EV_FCONN_END);
+
+ if (LIST_INLIST(&fconn->buf_wait.list))
+ LIST_DEL_INIT(&fconn->buf_wait.list);
+
+ fcgi_release_buf(fconn, &fconn->dbuf);
+ fcgi_release_mbuf(fconn);
+
+ if (fconn->task) {
+ fconn->task->context = NULL;
+ task_wakeup(fconn->task, TASK_WOKEN_OTHER);
+ fconn->task = NULL;
+ }
+ tasklet_free(fconn->wait_event.tasklet);
+ if (conn && fconn->wait_event.events != 0)
+ conn->xprt->unsubscribe(conn, conn->xprt_ctx, fconn->wait_event.events,
+ &fconn->wait_event);
+
+ pool_free(pool_head_fcgi_conn, fconn);
+
+ if (conn) {
+ conn->mux = NULL;
+ conn->ctx = NULL;
+ TRACE_DEVEL("freeing conn", FCGI_EV_FCONN_END, conn);
+
+ conn_stop_tracking(conn);
+ conn_full_close(conn);
+ if (conn->destroy_cb)
+ conn->destroy_cb(conn);
+ conn_free(conn);
+ }
+}
+
+/* Detect a pending read0 for a FCGI connection. It happens if a read0 is
+ * pending on the connection AND if there is no more data in the demux
+ * buffer. The function returns 1 to report a read0 or 0 otherwise.
+ */
+static int fcgi_conn_read0_pending(struct fcgi_conn *fconn)
+{
+ if ((fconn->flags & FCGI_CF_EOS) && !b_data(&fconn->dbuf))
+ return 1;
+ return 0;
+}
+
+
+/* Returns true if the FCGI connection must be release */
+static inline int fcgi_conn_is_dead(struct fcgi_conn *fconn)
+{
+ if (eb_is_empty(&fconn->streams_by_id) && /* don't close if streams exist */
+ (!(fconn->flags & FCGI_CF_KEEP_CONN) || /* don't keep the connection alive */
+ (fconn->flags & FCGI_CF_ERROR) || /* errors close immediately */
+ (fconn->state == FCGI_CS_CLOSED && !fconn->task) ||/* a timeout stroke earlier */
+ (!(fconn->conn->owner)) || /* Nobody's left to take care of the connection, drop it now */
+ (!br_data(fconn->mbuf) && /* mux buffer empty, also process clean events below */
+ (fconn->flags & FCGI_CF_EOS))))
+ return 1;
+ return 0;
+}
+
+
+/********************************************************/
+/* functions below are for the FCGI protocol processing */
+/********************************************************/
+
+/* Marks an error on the stream. */
+static inline void fcgi_strm_error(struct fcgi_strm *fstrm)
+{
+ if (fstrm->id && fstrm->state != FCGI_SS_ERROR) {
+ TRACE_POINT(FCGI_EV_FSTRM_ERR, fstrm->fconn->conn, fstrm);
+ if (fstrm->state < FCGI_SS_ERROR) {
+ fstrm->state = FCGI_SS_ERROR;
+ TRACE_STATE("switching to ERROR", FCGI_EV_FSTRM_ERR, fstrm->fconn->conn, fstrm);
+ }
+ se_fl_set_error(fstrm->sd);
+ }
+}
+
+/* Attempts to notify the data layer of recv availability */
+static void fcgi_strm_notify_recv(struct fcgi_strm *fstrm)
+{
+ if (fstrm->subs && (fstrm->subs->events & SUB_RETRY_RECV)) {
+ TRACE_POINT(FCGI_EV_STRM_WAKE, fstrm->fconn->conn, fstrm);
+ tasklet_wakeup(fstrm->subs->tasklet);
+ fstrm->subs->events &= ~SUB_RETRY_RECV;
+ if (!fstrm->subs->events)
+ fstrm->subs = NULL;
+ }
+}
+
+/* Attempts to notify the data layer of send availability */
+static void fcgi_strm_notify_send(struct fcgi_strm *fstrm)
+{
+ if (fstrm->subs && (fstrm->subs->events & SUB_RETRY_SEND)) {
+ TRACE_POINT(FCGI_EV_STRM_WAKE, fstrm->fconn->conn, fstrm);
+ fstrm->flags |= FCGI_SF_NOTIFIED;
+ tasklet_wakeup(fstrm->subs->tasklet);
+ fstrm->subs->events &= ~SUB_RETRY_SEND;
+ if (!fstrm->subs->events)
+ fstrm->subs = NULL;
+ }
+ else if (fstrm->flags & (FCGI_SF_WANT_SHUTR | FCGI_SF_WANT_SHUTW)) {
+ TRACE_POINT(FCGI_EV_STRM_WAKE, fstrm->fconn->conn, fstrm);
+ tasklet_wakeup(fstrm->shut_tl);
+ }
+}
+
+/* Alerts the data layer, trying to wake it up by all means, following
+ * this sequence :
+ * - if the fcgi stream' data layer is subscribed to recv, then it's woken up
+ * for recv
+ * - if its subscribed to send, then it's woken up for send
+ * - if it was subscribed to neither, its ->wake() callback is called
+ * It is safe to call this function with a closed stream which doesn't have a
+ * stream connector anymore.
+ */
+static void fcgi_strm_alert(struct fcgi_strm *fstrm)
+{
+ TRACE_POINT(FCGI_EV_STRM_WAKE, fstrm->fconn->conn, fstrm);
+ if (fstrm->subs ||
+ (fstrm->flags & (FCGI_SF_WANT_SHUTR|FCGI_SF_WANT_SHUTW))) {
+ fcgi_strm_notify_recv(fstrm);
+ fcgi_strm_notify_send(fstrm);
+ }
+ else if (fcgi_strm_sc(fstrm) && fcgi_strm_sc(fstrm)->app_ops->wake != NULL) {
+ TRACE_POINT(FCGI_EV_STRM_WAKE, fstrm->fconn->conn, fstrm);
+ fcgi_strm_sc(fstrm)->app_ops->wake(fcgi_strm_sc(fstrm));
+ }
+}
+
+/* Writes the 16-bit record size <len> at address <record> */
+static inline void fcgi_set_record_size(void *record, uint16_t len)
+{
+ uint8_t *out = (record + 4);
+
+ *out = (len >> 8);
+ *(out + 1) = (len & 0xff);
+}
+
+/* Writes the 16-bit stream id <id> at address <record> */
+static inline void fcgi_set_record_id(void *record, uint16_t id)
+{
+ uint8_t *out = (record + 2);
+
+ *out = (id >> 8);
+ *(out + 1) = (id & 0xff);
+}
+
+/* Marks a FCGI stream as CLOSED and decrement the number of active streams for
+ * its connection if the stream was not yet closed. Please use this exclusively
+ * before closing a stream to ensure stream count is well maintained.
+ */
+static inline void fcgi_strm_close(struct fcgi_strm *fstrm)
+{
+ if (fstrm->state != FCGI_SS_CLOSED) {
+ TRACE_ENTER(FCGI_EV_FSTRM_END, fstrm->fconn->conn, fstrm);
+ fstrm->fconn->nb_streams--;
+ if (!fstrm->id)
+ fstrm->fconn->nb_reserved--;
+ if (fcgi_strm_sc(fstrm)) {
+ if (!se_fl_test(fstrm->sd, SE_FL_EOS) && !b_data(&fstrm->rxbuf))
+ fcgi_strm_notify_recv(fstrm);
+ }
+ fstrm->state = FCGI_SS_CLOSED;
+ TRACE_STATE("switching to CLOSED", FCGI_EV_FSTRM_END, fstrm->fconn->conn, fstrm);
+ TRACE_LEAVE(FCGI_EV_FSTRM_END, fstrm->fconn->conn, fstrm);
+ }
+}
+
+/* Detaches a FCGI stream from its FCGI connection and releases it to the
+ * fcgi_strm pool.
+ */
+static void fcgi_strm_destroy(struct fcgi_strm *fstrm)
+{
+ struct connection *conn = fstrm->fconn->conn;
+
+ TRACE_ENTER(FCGI_EV_FSTRM_END, conn, fstrm);
+
+ fcgi_strm_close(fstrm);
+ eb32_delete(&fstrm->by_id);
+ if (b_size(&fstrm->rxbuf)) {
+ b_free(&fstrm->rxbuf);
+ offer_buffers(NULL, 1);
+ }
+ if (fstrm->subs)
+ fstrm->subs->events = 0;
+ /* There's no need to explicitly call unsubscribe here, the only
+ * reference left would be in the fconn send_list/fctl_list, and if
+ * we're in it, we're getting out anyway
+ */
+ LIST_DEL_INIT(&fstrm->send_list);
+ tasklet_free(fstrm->shut_tl);
+ BUG_ON(fstrm->sd && !se_fl_test(fstrm->sd, SE_FL_ORPHAN));
+ sedesc_free(fstrm->sd);
+ pool_free(pool_head_fcgi_strm, fstrm);
+
+ TRACE_LEAVE(FCGI_EV_FSTRM_END, conn);
+}
+
+/* Allocates a new stream <id> for connection <fconn> and adds it into fconn's
+ * stream tree. In case of error, nothing is added and NULL is returned. The
+ * causes of errors can be any failed memory allocation. The caller is
+ * responsible for checking if the connection may support an extra stream prior
+ * to calling this function.
+ */
+static struct fcgi_strm *fcgi_strm_new(struct fcgi_conn *fconn, int id)
+{
+ struct fcgi_strm *fstrm;
+
+ TRACE_ENTER(FCGI_EV_FSTRM_NEW, fconn->conn);
+
+ fstrm = pool_alloc(pool_head_fcgi_strm);
+ if (!fstrm) {
+ TRACE_ERROR("fstrm allocation failure", FCGI_EV_FSTRM_NEW|FCGI_EV_FSTRM_ERR|FCGI_EV_FSTRM_END, fconn->conn);
+ goto out;
+ }
+
+ fstrm->shut_tl = tasklet_new();
+ if (!fstrm->shut_tl) {
+ TRACE_ERROR("fstrm shut tasklet allocation failure", FCGI_EV_FSTRM_NEW|FCGI_EV_FSTRM_ERR|FCGI_EV_FSTRM_END, fconn->conn);
+ pool_free(pool_head_fcgi_strm, fstrm);
+ goto out;
+ }
+ fstrm->subs = NULL;
+ fstrm->shut_tl->process = fcgi_deferred_shut;
+ fstrm->shut_tl->context = fstrm;
+ LIST_INIT(&fstrm->send_list);
+ fstrm->fconn = fconn;
+ fstrm->sd = NULL;
+ fstrm->flags = FCGI_SF_NONE;
+ fstrm->proto_status = 0;
+ fstrm->state = FCGI_SS_IDLE;
+ fstrm->rxbuf = BUF_NULL;
+
+ h1m_init_res(&fstrm->h1m);
+ fstrm->h1m.err_pos = -1; // don't care about errors on the request path
+ fstrm->h1m.flags |= (H1_MF_NO_PHDR|H1_MF_CLEAN_CONN_HDR);
+
+ fstrm->by_id.key = fstrm->id = id;
+ if (id > 0)
+ fconn->max_id = id;
+ else
+ fconn->nb_reserved++;
+
+ eb32_insert(&fconn->streams_by_id, &fstrm->by_id);
+ fconn->nb_streams++;
+ fconn->stream_cnt++;
+
+ TRACE_LEAVE(FCGI_EV_FSTRM_NEW, fconn->conn, fstrm);
+ return fstrm;
+
+ out:
+ TRACE_DEVEL("leaving in error", FCGI_EV_FSTRM_NEW|FCGI_EV_FSTRM_ERR|FCGI_EV_FSTRM_END, fconn->conn);
+ return NULL;
+}
+
+/* Allocates a new stream associated to stream connector <sc> on the FCGI connection
+ * <fconn> and returns it, or NULL in case of memory allocation error or if the
+ * highest possible stream ID was reached.
+ */
+static struct fcgi_strm *fcgi_stconn_new(struct fcgi_conn *fconn, struct stconn *sc,
+ struct session *sess)
+{
+ struct fcgi_strm *fstrm = NULL;
+
+ TRACE_ENTER(FCGI_EV_FSTRM_NEW, fconn->conn);
+ if (fconn->nb_streams >= fconn->streams_limit) {
+ TRACE_ERROR("streams_limit reached", FCGI_EV_FSTRM_NEW|FCGI_EV_FSTRM_END|FCGI_EV_FSTRM_ERR, fconn->conn);
+ goto out;
+ }
+
+ if (fcgi_streams_left(fconn) < 1) {
+ TRACE_ERROR("!streams_left", FCGI_EV_FSTRM_NEW|FCGI_EV_FSTRM_END|FCGI_EV_FSTRM_ERR, fconn->conn);
+ goto out;
+ }
+
+ /* Defer choosing the ID until we send the first message to create the stream */
+ fstrm = fcgi_strm_new(fconn, 0);
+ if (!fstrm) {
+ TRACE_ERROR("fstream allocation failure", FCGI_EV_FSTRM_NEW|FCGI_EV_FSTRM_END|FCGI_EV_FSTRM_ERR, fconn->conn);
+ goto out;
+ }
+ if (sc_attach_mux(sc, fstrm, fconn->conn) < 0)
+ goto out;
+ fstrm->sd = sc->sedesc;
+ fstrm->sess = sess;
+ fconn->nb_sc++;
+
+ TRACE_LEAVE(FCGI_EV_FSTRM_NEW, fconn->conn, fstrm);
+ return fstrm;
+
+ out:
+ TRACE_DEVEL("leaving on error", FCGI_EV_FSTRM_NEW|FCGI_EV_FSTRM_END|FCGI_EV_FSTRM_ERR, fconn->conn);
+ fcgi_strm_destroy(fstrm);
+ return NULL;
+}
+
+/* Wakes a specific stream and assign its stream connector some SE_FL_* flags among
+ * SE_FL_ERR_PENDING and SE_FL_ERROR if needed. The stream's state is
+ * automatically updated accordingly. If the stream is orphaned, it is
+ * destroyed.
+ */
+static void fcgi_strm_wake_one_stream(struct fcgi_strm *fstrm)
+{
+ struct fcgi_conn *fconn = fstrm->fconn;
+
+ TRACE_ENTER(FCGI_EV_STRM_WAKE, fconn->conn, fstrm);
+
+ if (!fcgi_strm_sc(fstrm)) {
+ /* this stream was already orphaned */
+ fcgi_strm_destroy(fstrm);
+ TRACE_DEVEL("leaving with no fstrm", FCGI_EV_STRM_WAKE, fconn->conn);
+ return;
+ }
+
+ if (fcgi_conn_read0_pending(fconn)) {
+ if (fstrm->state == FCGI_SS_OPEN) {
+ fstrm->state = FCGI_SS_HREM;
+ TRACE_STATE("switching to HREM", FCGI_EV_STRM_WAKE|FCGI_EV_FSTRM_END, fconn->conn, fstrm);
+ }
+ else if (fstrm->state == FCGI_SS_HLOC)
+ fcgi_strm_close(fstrm);
+ }
+
+ if (fconn->state == FCGI_CS_CLOSED || (fconn->flags & (FCGI_CF_ERR_PENDING|FCGI_CF_ERROR))) {
+ se_fl_set_error(fstrm->sd);
+
+ if (fstrm->state < FCGI_SS_ERROR) {
+ fstrm->state = FCGI_SS_ERROR;
+ TRACE_STATE("switching to ERROR", FCGI_EV_STRM_WAKE|FCGI_EV_FSTRM_END, fconn->conn, fstrm);
+ }
+ }
+
+ fcgi_strm_alert(fstrm);
+
+ TRACE_LEAVE(FCGI_EV_STRM_WAKE, fconn->conn, fstrm);
+}
+
+/* Wakes unassigned streams (ID == 0) attached to the connection. */
+static void fcgi_wake_unassigned_streams(struct fcgi_conn *fconn)
+{
+ struct eb32_node *node;
+ struct fcgi_strm *fstrm;
+
+ node = eb32_lookup(&fconn->streams_by_id, 0);
+ while (node) {
+ fstrm = container_of(node, struct fcgi_strm, by_id);
+ if (fstrm->id > 0)
+ break;
+ node = eb32_next(node);
+ fcgi_strm_wake_one_stream(fstrm);
+ }
+}
+
+/* Wakes the streams attached to the connection, whose id is greater than <last>
+ * or unassigned.
+ */
+static void fcgi_wake_some_streams(struct fcgi_conn *fconn, int last)
+{
+ struct eb32_node *node;
+ struct fcgi_strm *fstrm;
+
+ TRACE_ENTER(FCGI_EV_STRM_WAKE, fconn->conn);
+
+ /* Wake all streams with ID > last */
+ node = eb32_lookup_ge(&fconn->streams_by_id, last + 1);
+ while (node) {
+ fstrm = container_of(node, struct fcgi_strm, by_id);
+ node = eb32_next(node);
+ fcgi_strm_wake_one_stream(fstrm);
+ }
+ fcgi_wake_unassigned_streams(fconn);
+
+ TRACE_LEAVE(FCGI_EV_STRM_WAKE, fconn->conn);
+}
+
+static int fcgi_set_default_param(struct fcgi_conn *fconn, struct fcgi_strm *fstrm,
+ struct htx *htx, struct htx_sl *sl,
+ struct fcgi_strm_params *params)
+{
+ struct connection *cli_conn = objt_conn(fstrm->sess->origin);
+ const struct sockaddr_storage *src = (sc_check(fcgi_strm_sc(fstrm)) ? conn_src(fconn->conn) : sc_src(sc_opposite(fcgi_strm_sc(fstrm))));
+ const struct sockaddr_storage *dst = (sc_check(fcgi_strm_sc(fstrm)) ? conn_dst(fconn->conn) : sc_dst(sc_opposite(fcgi_strm_sc(fstrm))));
+ struct ist p;
+
+ if (!sl)
+ goto error;
+
+ if (!(params->mask & FCGI_SP_DOC_ROOT))
+ params->docroot = fconn->app->docroot;
+
+ if (!(params->mask & FCGI_SP_REQ_METH)) {
+ p = htx_sl_req_meth(sl);
+ params->meth = ist2(b_tail(params->p), p.len);
+ chunk_istcat(params->p, p);
+ }
+ if (!(params->mask & FCGI_SP_REQ_URI)) {
+ p = h1_get_uri(sl);
+ params->uri = ist2(b_tail(params->p), p.len);
+ chunk_istcat(params->p, p);
+ }
+ if (!(params->mask & FCGI_SP_SRV_PROTO)) {
+ p = htx_sl_req_vsn(sl);
+ params->vsn = ist2(b_tail(params->p), p.len);
+ chunk_istcat(params->p, p);
+ }
+ if (!(params->mask & FCGI_SP_SRV_PORT)) {
+ char *end;
+ int port = 0;
+ if (dst)
+ port = get_host_port(dst);
+ end = ultoa_o(port, b_tail(params->p), b_room(params->p));
+ if (!end)
+ goto error;
+ params->srv_port = ist2(b_tail(params->p), end - b_tail(params->p));
+ params->p->data += params->srv_port.len;
+ }
+ if (!(params->mask & FCGI_SP_SRV_NAME)) {
+ /* If no Host header found, use the server address to fill
+ * srv_name */
+ if (!istlen(params->srv_name)) {
+ char *ptr = NULL;
+
+ if (dst)
+ if (addr_to_str(dst, b_tail(params->p), b_room(params->p)) != -1)
+ ptr = b_tail(params->p);
+ if (ptr) {
+ params->srv_name = ist(ptr);
+ params->p->data += params->srv_name.len;
+ }
+ }
+ }
+ if (!(params->mask & FCGI_SP_REM_ADDR)) {
+ char *ptr = NULL;
+
+ if (src)
+ if (addr_to_str(src, b_tail(params->p), b_room(params->p)) != -1)
+ ptr = b_tail(params->p);
+ if (ptr) {
+ params->rem_addr = ist(ptr);
+ params->p->data += params->rem_addr.len;
+ }
+ }
+ if (!(params->mask & FCGI_SP_REM_PORT)) {
+ char *end;
+ int port = 0;
+ if (src)
+ port = get_host_port(src);
+ end = ultoa_o(port, b_tail(params->p), b_room(params->p));
+ if (!end)
+ goto error;
+ params->rem_port = ist2(b_tail(params->p), end - b_tail(params->p));
+ params->p->data += params->rem_port.len;
+ }
+ if (!(params->mask & FCGI_SP_CONT_LEN)) {
+ struct htx_blk *blk;
+ enum htx_blk_type type;
+ char *end;
+ size_t len = 0;
+
+ for (blk = htx_get_head_blk(htx); blk; blk = htx_get_next_blk(htx, blk)) {
+ type = htx_get_blk_type(blk);
+
+ if (type == HTX_BLK_TLR || type == HTX_BLK_EOT)
+ break;
+ if (type == HTX_BLK_DATA)
+ len += htx_get_blksz(blk);
+ }
+ end = ultoa_o(len, b_tail(params->p), b_room(params->p));
+ if (!end)
+ goto error;
+ params->cont_len = ist2(b_tail(params->p), end - b_tail(params->p));
+ params->p->data += params->cont_len.len;
+ }
+
+ if (!(params->mask & FCGI_SP_HTTPS)) {
+ if (cli_conn)
+ params->https = conn_is_ssl(cli_conn);
+ }
+
+ if ((params->mask & FCGI_SP_URI_MASK) != FCGI_SP_URI_MASK) {
+ /* one of scriptname, pathinfo or query_string is no set */
+ struct http_uri_parser parser = http_uri_parser_init(params->uri);
+ struct ist path = http_parse_path(&parser);
+ int len;
+
+ /* No scrit_name set but no valid path ==> error */
+ if (!(params->mask & FCGI_SP_SCRIPT_NAME) && !istlen(path))
+ goto error;
+
+ /* If there is a query-string, Set it if not already set */
+ if (!(params->mask & FCGI_SP_REQ_QS)) {
+ struct ist qs = istfind(path, '?');
+
+ /* Update the path length */
+ path.len -= qs.len;
+
+ /* Set the query-string skipping the '?', if any */
+ if (istlen(qs))
+ params->qs = istnext(qs);
+ }
+
+ /* If the script_name is set, don't try to deduce the path_info
+ * too. The opposite is not true.
+ */
+ if (params->mask & FCGI_SP_SCRIPT_NAME) {
+ params->mask |= FCGI_SP_PATH_INFO;
+ goto end;
+ }
+
+ /* Decode the path. it must first be copied to keep the URI
+ * untouched.
+ */
+ chunk_istcat(params->p, path);
+ path.ptr = b_tail(params->p) - path.len;
+ len = url_decode(ist0(path), 0);
+ if (len < 0)
+ goto error;
+ path.len = len;
+
+ /* script_name not set, preset it with the path for now */
+ params->scriptname = path;
+
+ /* If there is no regex to match the pathinfo, just to the last
+ * part and see if the index must be used.
+ */
+ if (!fconn->app->pathinfo_re)
+ goto check_index;
+
+ /* If some special characters are found in the decoded path (\n
+ * or \0), the PATH_INFO regex cannot match. This is theoretically
+ * valid, but probably unexpected, to have such characters. So,
+ * to avoid any surprises, an error is triggered in this
+ * case.
+ */
+ if (istchr(path, '\n') || istchr(path, '\0'))
+ goto error;
+
+ /* The regex does not match, just to the last part and see if
+ * the index must be used.
+ */
+ if (!regex_exec_match2(fconn->app->pathinfo_re, path.ptr, len, MAX_MATCH, pmatch, 0))
+ goto check_index;
+
+ /* We must have at least 1 capture for the script name,
+ * otherwise we do nothing and jump to the last part.
+ */
+ if (pmatch[1].rm_so == -1 || pmatch[1].rm_eo == -1)
+ goto check_index;
+
+ /* Finally we can set the script_name and the path_info. The
+ * path_info is set if not already defined, and if it was
+ * captured
+ */
+ params->scriptname = ist2(path.ptr + pmatch[1].rm_so, pmatch[1].rm_eo - pmatch[1].rm_so);
+ if (!(params->mask & FCGI_SP_PATH_INFO) && !(pmatch[2].rm_so == -1 || pmatch[2].rm_eo == -1))
+ params->pathinfo = ist2(path.ptr + pmatch[2].rm_so, pmatch[2].rm_eo - pmatch[2].rm_so);
+
+ check_index:
+ len = params->scriptname.len;
+ /* the script_name if finished by a '/' so we can add the index
+ * part, if any.
+ */
+ if (istlen(fconn->app->index) && params->scriptname.ptr[len-1] == '/') {
+ struct ist sn = params->scriptname;
+
+ params->scriptname = ist2(b_tail(params->p), len+fconn->app->index.len);
+ chunk_istcat(params->p, sn);
+ chunk_istcat(params->p, fconn->app->index);
+ }
+ }
+
+ if (!(params->mask & FCGI_SP_SRV_SOFT)) {
+ params->srv_soft = ist2(b_tail(params->p), 0);
+ chunk_appendf(params->p, "HAProxy %s", haproxy_version);
+ params->srv_soft.len = b_tail(params->p) - params->srv_soft.ptr;
+ }
+
+ end:
+ return 1;
+ error:
+ return 0;
+}
+
+static int fcgi_encode_default_param(struct fcgi_conn *fconn, struct fcgi_strm *fstrm,
+ struct fcgi_strm_params *params, struct buffer *outbuf, int flag)
+{
+ struct fcgi_param p;
+
+ if (params->mask & flag)
+ return 1;
+
+ chunk_reset(&trash);
+
+ switch (flag) {
+ case FCGI_SP_CGI_GATEWAY:
+ p.n = ist("GATEWAY_INTERFACE");
+ p.v = ist("CGI/1.1");
+ goto encode;
+ case FCGI_SP_DOC_ROOT:
+ p.n = ist("DOCUMENT_ROOT");
+ p.v = params->docroot;
+ goto encode;
+ case FCGI_SP_SCRIPT_NAME:
+ p.n = ist("SCRIPT_NAME");
+ p.v = params->scriptname;
+ goto encode;
+ case FCGI_SP_PATH_INFO:
+ p.n = ist("PATH_INFO");
+ p.v = params->pathinfo;
+ goto encode;
+ case FCGI_SP_REQ_URI:
+ p.n = ist("REQUEST_URI");
+ p.v = params->uri;
+ goto encode;
+ case FCGI_SP_REQ_METH:
+ p.n = ist("REQUEST_METHOD");
+ p.v = params->meth;
+ goto encode;
+ case FCGI_SP_REQ_QS:
+ p.n = ist("QUERY_STRING");
+ p.v = params->qs;
+ goto encode;
+ case FCGI_SP_SRV_NAME:
+ p.n = ist("SERVER_NAME");
+ p.v = params->srv_name;
+ goto encode;
+ case FCGI_SP_SRV_PORT:
+ p.n = ist("SERVER_PORT");
+ p.v = params->srv_port;
+ goto encode;
+ case FCGI_SP_SRV_PROTO:
+ p.n = ist("SERVER_PROTOCOL");
+ p.v = params->vsn;
+ goto encode;
+ case FCGI_SP_REM_ADDR:
+ p.n = ist("REMOTE_ADDR");
+ p.v = params->rem_addr;
+ goto encode;
+ case FCGI_SP_REM_PORT:
+ p.n = ist("REMOTE_PORT");
+ p.v = params->rem_port;
+ goto encode;
+ case FCGI_SP_SCRIPT_FILE:
+ p.n = ist("SCRIPT_FILENAME");
+ chunk_istcat(&trash, params->docroot);
+ chunk_istcat(&trash, params->scriptname);
+ p.v = ist2(b_head(&trash), b_data(&trash));
+ goto encode;
+ case FCGI_SP_PATH_TRANS:
+ if (!istlen(params->pathinfo))
+ goto skip;
+ p.n = ist("PATH_TRANSLATED");
+ chunk_istcat(&trash, params->docroot);
+ chunk_istcat(&trash, params->pathinfo);
+ p.v = ist2(b_head(&trash), b_data(&trash));
+ goto encode;
+ case FCGI_SP_CONT_LEN:
+ p.n = ist("CONTENT_LENGTH");
+ p.v = params->cont_len;
+ goto encode;
+ case FCGI_SP_HTTPS:
+ if (!params->https)
+ goto skip;
+ p.n = ist("HTTPS");
+ p.v = ist("on");
+ goto encode;
+ case FCGI_SP_SRV_SOFT:
+ p.n = ist("SERVER_SOFTWARE");
+ p.v = params->srv_soft;
+ goto encode;
+ default:
+ goto skip;
+ }
+
+ encode:
+ if (!istlen(p.v))
+ goto skip;
+ if (!fcgi_encode_param(outbuf, &p))
+ return 0;
+ skip:
+ params->mask |= flag;
+ return 1;
+}
+
+/* Sends a GET_VALUES record. Returns > 0 on success, 0 if it couldn't do
+ * anything. It is highly unexpected, but if the record is larger than a buffer
+ * and cannot be encoded in one time, an error is triggered and the connection is
+ * closed. GET_VALUES record cannot be split.
+ */
+static int fcgi_conn_send_get_values(struct fcgi_conn *fconn)
+{
+ struct buffer outbuf;
+ struct buffer *mbuf;
+ struct fcgi_param max_reqs = { .n = ist("FCGI_MAX_REQS"), .v = ist("")};
+ struct fcgi_param mpxs_conns = { .n = ist("FCGI_MPXS_CONNS"), .v = ist("")};
+ int ret = 0;
+
+ TRACE_ENTER(FCGI_EV_TX_RECORD|FCGI_EV_TX_GETVAL, fconn->conn);
+
+ mbuf = br_tail(fconn->mbuf);
+ retry:
+ if (!fcgi_get_buf(fconn, mbuf)) {
+ fconn->flags |= FCGI_CF_MUX_MALLOC;
+ fconn->flags |= FCGI_CF_DEM_MROOM;
+ TRACE_STATE("waiting for fconn mbuf ring allocation", FCGI_EV_TX_RECORD|FCGI_EV_FCONN_BLK, fconn->conn);
+ ret = 0;
+ goto end;
+ }
+
+ while (1) {
+ outbuf = b_make(b_tail(mbuf), b_contig_space(mbuf), 0, 0);
+ if (outbuf.size >= FCGI_RECORD_HEADER_SZ || !b_space_wraps(mbuf))
+ break;
+ realign_again:
+ b_slow_realign(mbuf, trash.area, b_data(mbuf));
+ }
+
+ if (outbuf.size < FCGI_RECORD_HEADER_SZ)
+ goto full;
+
+ /* vsn: 1(FCGI_VERSION), type: (9)FCGI_GET_VALUES, id: 0x0000,
+ * len: 0x0000 (fill later), padding: 0x00, rsv: 0x00 */
+ memcpy(outbuf.area, "\x01\x09\x00\x00\x00\x00\x00\x00", FCGI_RECORD_HEADER_SZ);
+ outbuf.data = FCGI_RECORD_HEADER_SZ;
+
+ /* Note: Don't send the param FCGI_MAX_CONNS because its value cannot be
+ * handled by HAProxy.
+ */
+ if (!fcgi_encode_param(&outbuf, &max_reqs) || !fcgi_encode_param(&outbuf, &mpxs_conns))
+ goto full;
+
+ /* update the record's size now */
+ TRACE_PROTO("FCGI GET_VALUES record xferred", FCGI_EV_TX_RECORD|FCGI_EV_TX_GETVAL, fconn->conn, 0, 0, (size_t[]){outbuf.data-8});
+ fcgi_set_record_size(outbuf.area, outbuf.data - FCGI_RECORD_HEADER_SZ);
+ b_add(mbuf, outbuf.data);
+ ret = 1;
+
+ end:
+ TRACE_LEAVE(FCGI_EV_TX_RECORD|FCGI_EV_TX_GETVAL, fconn->conn);
+ return ret;
+ full:
+ /* Too large to be encoded. For GET_VALUES records, it is an error */
+ if (!b_data(mbuf)) {
+ TRACE_ERROR("GET_VALUES record too large", FCGI_EV_TX_RECORD|FCGI_EV_TX_GETVAL|FCGI_EV_FCONN_ERR, fconn->conn);
+ goto fail;
+ }
+
+ if ((mbuf = br_tail_add(fconn->mbuf)) != NULL)
+ goto retry;
+ fconn->flags |= FCGI_CF_MUX_MFULL;
+ fconn->flags |= FCGI_CF_DEM_MROOM;
+ TRACE_STATE("mbuf ring full", FCGI_EV_TX_RECORD|FCGI_EV_FCONN_BLK, fconn->conn);
+ ret = 0;
+ goto end;
+ fail:
+ fconn->state = FCGI_CS_CLOSED;
+ TRACE_STATE("switching to CLOSED", FCGI_EV_TX_RECORD|FCGI_EV_TX_GETVAL|FCGI_EV_FCONN_END, fconn->conn);
+ TRACE_DEVEL("leaving on error", FCGI_EV_TX_RECORD|FCGI_EV_TX_GETVAL|FCGI_EV_FCONN_ERR, fconn->conn);
+ return 0;
+}
+
+/* Processes a GET_VALUES_RESULT record. Returns > 0 on success, 0 if it
+ * couldn't do anything. It is highly unexpected, but if the record is larger
+ * than a buffer and cannot be decoded in one time, an error is triggered and
+ * the connection is closed. GET_VALUES_RESULT record cannot be split.
+ */
+static int fcgi_conn_handle_values_result(struct fcgi_conn *fconn)
+{
+ struct buffer inbuf;
+ struct buffer *dbuf;
+ size_t offset;
+
+ TRACE_ENTER(FCGI_EV_RX_RECORD|FCGI_EV_RX_GETVAL, fconn->conn);
+
+ dbuf = &fconn->dbuf;
+
+ /* Record too large to be fully decoded */
+ if (b_size(dbuf) < (fconn->drl + fconn->drp))
+ goto fail;
+
+ /* process full record only */
+ if (b_data(dbuf) < (fconn->drl + fconn->drp)) {
+ TRACE_DEVEL("leaving on missing data", FCGI_EV_RX_RECORD|FCGI_EV_RX_GETVAL, fconn->conn);
+ return 0;
+ }
+
+ if (unlikely(b_contig_data(dbuf, b_head_ofs(dbuf)) < fconn->drl)) {
+ /* Realign the dmux buffer if the record wraps. It is unexpected
+ * at this stage because it should be the first record received
+ * from the FCGI application.
+ */
+ b_slow_realign_ofs(dbuf, trash.area, 0);
+ }
+
+ inbuf = b_make(b_head(dbuf), b_data(dbuf), 0, fconn->drl);
+
+ for (offset = 0; offset < b_data(&inbuf); ) {
+ struct fcgi_param p;
+ size_t ret;
+
+ ret = fcgi_aligned_decode_param(&inbuf, offset, &p);
+ if (!ret) {
+ /* name or value too large to be decoded at once */
+ TRACE_ERROR("error decoding GET_VALUES_RESULT param", FCGI_EV_RX_RECORD|FCGI_EV_RX_GETVAL|FCGI_EV_FCONN_ERR, fconn->conn);
+ goto fail;
+ }
+ offset += ret;
+
+ if (isteqi(p.n, ist("FCGI_MPXS_CONNS"))) {
+ if (isteq(p.v, ist("1"))) {
+ TRACE_STATE("set mpxs param", FCGI_EV_RX_RECORD|FCGI_EV_RX_GETVAL, fconn->conn, 0, 0, (size_t[]){1});
+ fconn->flags |= FCGI_CF_MPXS_CONNS;
+ }
+ else {
+ TRACE_STATE("set mpxs param", FCGI_EV_RX_RECORD|FCGI_EV_RX_GETVAL, fconn->conn, 0, 0, (size_t[]){0});
+ fconn->flags &= ~FCGI_CF_MPXS_CONNS;
+ }
+ }
+ else if (isteqi(p.n, ist("FCGI_MAX_REQS"))) {
+ fconn->streams_limit = strl2ui(p.v.ptr, p.v.len);
+ TRACE_STATE("set streams_limit", FCGI_EV_RX_RECORD|FCGI_EV_RX_GETVAL, fconn->conn, 0, 0, (size_t[]){fconn->streams_limit});
+ }
+ /*
+ * Ignore all other params
+ */
+ }
+
+ /* Reset the number of concurrent streams supported if the FCGI
+ * application does not support connection multiplexing
+ */
+ if (!(fconn->flags & FCGI_CF_MPXS_CONNS)) {
+ fconn->streams_limit = 1;
+ TRACE_STATE("no mpxs for streams_limit to 1", FCGI_EV_RX_RECORD|FCGI_EV_RX_GETVAL, fconn->conn);
+ }
+
+ /* We must be sure to have read exactly the announced record length, no
+ * more no less
+ */
+ if (offset != fconn->drl) {
+ TRACE_ERROR("invalid GET_VALUES_RESULT record length", FCGI_EV_RX_RECORD|FCGI_EV_RX_GETVAL|FCGI_EV_FCONN_ERR, fconn->conn);
+ goto fail;
+ }
+
+ TRACE_PROTO("FCGI GET_VALUES_RESULT record rcvd", FCGI_EV_RX_RECORD|FCGI_EV_RX_GETVAL, fconn->conn, 0, 0, (size_t[]){fconn->drl});
+ b_del(&fconn->dbuf, fconn->drl + fconn->drp);
+ fconn->drl = 0;
+ fconn->drp = 0;
+ fconn->state = FCGI_CS_RECORD_H;
+ fcgi_wake_unassigned_streams(fconn);
+ TRACE_STATE("switching to RECORD_H", FCGI_EV_RX_RECORD|FCGI_EV_RX_FHDR, fconn->conn);
+ TRACE_LEAVE(FCGI_EV_RX_RECORD|FCGI_EV_RX_GETVAL, fconn->conn);
+ return 1;
+ fail:
+ fconn->state = FCGI_CS_CLOSED;
+ TRACE_STATE("switching to CLOSED", FCGI_EV_RX_RECORD|FCGI_EV_RX_GETVAL, fconn->conn);
+ TRACE_DEVEL("leaving on error", FCGI_EV_RX_RECORD|FCGI_EV_RX_GETVAL|FCGI_EV_FCONN_ERR, fconn->conn);
+ return 0;
+}
+
+/* Sends an ABORT_REQUEST record for each active streams. Closed streams are
+ * excluded, as the streams which already received the end-of-stream. It returns
+ * > 0 if the record was sent tp all streams. Otherwise it returns 0.
+ */
+static int fcgi_conn_send_aborts(struct fcgi_conn *fconn)
+{
+ struct eb32_node *node;
+ struct fcgi_strm *fstrm;
+
+ TRACE_ENTER(FCGI_EV_TX_RECORD, fconn->conn);
+
+ node = eb32_lookup_ge(&fconn->streams_by_id, 1);
+ while (node) {
+ fstrm = container_of(node, struct fcgi_strm, by_id);
+ node = eb32_next(node);
+ if (fstrm->state != FCGI_SS_CLOSED &&
+ !(fstrm->flags & (FCGI_SF_ES_RCVD|FCGI_SF_ABRT_SENT)) &&
+ !fcgi_strm_send_abort(fconn, fstrm))
+ return 0;
+ }
+ fconn->flags |= FCGI_CF_ABRTS_SENT;
+ TRACE_STATE("aborts sent to all fstrms", FCGI_EV_TX_RECORD, fconn->conn);
+ TRACE_LEAVE(FCGI_EV_TX_RECORD, fconn->conn);
+ return 1;
+}
+
+/* Sends a BEGIN_REQUEST record. It returns > 0 on success, 0 if it couldn't do
+ * anything. BEGIN_REQUEST record cannot be split. So we wait to have enough
+ * space to proceed. It is small enough to be encoded in an empty buffer.
+ */
+static int fcgi_strm_send_begin_request(struct fcgi_conn *fconn, struct fcgi_strm *fstrm)
+{
+ struct buffer outbuf;
+ struct buffer *mbuf;
+ struct fcgi_begin_request rec = { .role = FCGI_RESPONDER, .flags = 0};
+ int ret;
+
+ TRACE_ENTER(FCGI_EV_TX_RECORD|FCGI_EV_TX_BEGREQ, fconn->conn, fstrm);
+
+ mbuf = br_tail(fconn->mbuf);
+ retry:
+ if (!fcgi_get_buf(fconn, mbuf)) {
+ fconn->flags |= FCGI_CF_MUX_MALLOC;
+ fstrm->flags |= FCGI_SF_BLK_MROOM;
+ TRACE_STATE("waiting for fconn mbuf ring allocation", FCGI_EV_TX_RECORD|FCGI_EV_FSTRM_BLK|FCGI_EV_FCONN_BLK, fconn->conn, fstrm);
+ ret = 0;
+ goto end;
+ }
+
+ while (1) {
+ outbuf = b_make(b_tail(mbuf), b_contig_space(mbuf), 0, 0);
+ if (outbuf.size >= FCGI_RECORD_HEADER_SZ || !b_space_wraps(mbuf))
+ break;
+ realign_again:
+ b_slow_realign(mbuf, trash.area, b_data(mbuf));
+ }
+
+ if (outbuf.size < FCGI_RECORD_HEADER_SZ)
+ goto full;
+
+ /* vsn: 1(FCGI_VERSION), type: (1)FCGI_BEGIN_REQUEST, id: fstrm->id,
+ * len: 0x0008, padding: 0x00, rsv: 0x00 */
+ memcpy(outbuf.area, "\x01\x01\x00\x00\x00\x08\x00\x00", FCGI_RECORD_HEADER_SZ);
+ fcgi_set_record_id(outbuf.area, fstrm->id);
+ outbuf.data = FCGI_RECORD_HEADER_SZ;
+
+ if (fconn->flags & FCGI_CF_KEEP_CONN) {
+ TRACE_STATE("keep connection opened", FCGI_EV_TX_RECORD|FCGI_EV_TX_BEGREQ, fconn->conn, fstrm);
+ rec.flags |= FCGI_KEEP_CONN;
+ }
+ if (!fcgi_encode_begin_request(&outbuf, &rec))
+ goto full;
+
+ /* commit the record */
+ TRACE_PROTO("FCGI BEGIN_REQUEST record xferred", FCGI_EV_TX_RECORD|FCGI_EV_TX_BEGREQ, fconn->conn, fstrm, 0, (size_t[]){0});
+ b_add(mbuf, outbuf.data);
+ fstrm->flags |= FCGI_SF_BEGIN_SENT;
+ fstrm->state = FCGI_SS_OPEN;
+ TRACE_STATE("switching to OPEN", FCGI_EV_TX_RECORD|FCGI_EV_TX_BEGREQ, fconn->conn, fstrm);
+ ret = 1;
+
+ end:
+ TRACE_LEAVE(FCGI_EV_TX_RECORD|FCGI_EV_TX_BEGREQ, fconn->conn, fstrm);
+ return ret;
+ full:
+ if ((mbuf = br_tail_add(fconn->mbuf)) != NULL)
+ goto retry;
+ fconn->flags |= FCGI_CF_MUX_MFULL;
+ fstrm->flags |= FCGI_SF_BLK_MROOM;
+ TRACE_STATE("mbuf ring full", FCGI_EV_TX_RECORD|FCGI_EV_FSTRM_BLK|FCGI_EV_FCONN_BLK, fconn->conn);
+ ret = 0;
+ goto end;
+}
+
+/* Sends an empty record of type <rtype>. It returns > 0 on success, 0 if it
+ * couldn't do anything. Empty record cannot be split. So we wait to have enough
+ * space to proceed. It is small enough to be encoded in an empty buffer.
+ */
+static int fcgi_strm_send_empty_record(struct fcgi_conn *fconn, struct fcgi_strm *fstrm,
+ enum fcgi_record_type rtype)
+{
+ struct buffer outbuf;
+ struct buffer *mbuf;
+ int ret;
+
+ TRACE_ENTER(FCGI_EV_TX_RECORD, fconn->conn, fstrm);
+ mbuf = br_tail(fconn->mbuf);
+ retry:
+ if (!fcgi_get_buf(fconn, mbuf)) {
+ fconn->flags |= FCGI_CF_MUX_MALLOC;
+ fstrm->flags |= FCGI_SF_BLK_MROOM;
+ TRACE_STATE("waiting for fconn mbuf ring allocation", FCGI_EV_TX_RECORD|FCGI_EV_FSTRM_BLK|FCGI_EV_FCONN_BLK, fconn->conn, fstrm);
+ ret = 0;
+ goto end;
+ }
+
+ while (1) {
+ outbuf = b_make(b_tail(mbuf), b_contig_space(mbuf), 0, 0);
+ if (outbuf.size >= FCGI_RECORD_HEADER_SZ || !b_space_wraps(mbuf))
+ break;
+ realign_again:
+ b_slow_realign(mbuf, trash.area, b_data(mbuf));
+ }
+
+ if (outbuf.size < FCGI_RECORD_HEADER_SZ)
+ goto full;
+
+ /* vsn: 1(FCGI_VERSION), type: rtype, id: fstrm->id,
+ * len: 0x0000, padding: 0x00, rsv: 0x00 */
+ memcpy(outbuf.area, "\x01\x05\x00\x00\x00\x00\x00\x00", FCGI_RECORD_HEADER_SZ);
+ outbuf.area[1] = rtype;
+ fcgi_set_record_id(outbuf.area, fstrm->id);
+ outbuf.data = FCGI_RECORD_HEADER_SZ;
+
+ /* commit the record */
+ b_add(mbuf, outbuf.data);
+ ret = 1;
+
+ end:
+ TRACE_LEAVE(FCGI_EV_TX_RECORD, fconn->conn, fstrm);
+ return ret;
+ full:
+ if ((mbuf = br_tail_add(fconn->mbuf)) != NULL)
+ goto retry;
+ fconn->flags |= FCGI_CF_MUX_MFULL;
+ fstrm->flags |= FCGI_SF_BLK_MROOM;
+ TRACE_STATE("mbuf ring full", FCGI_EV_TX_RECORD|FCGI_EV_FSTRM_BLK|FCGI_EV_FCONN_BLK, fconn->conn, fstrm);
+ ret = 0;
+ goto end;
+}
+
+
+/* Sends an empty PARAMS record. It relies on fcgi_strm_send_empty_record(). It
+ * marks the end of params.
+ */
+static int fcgi_strm_send_empty_params(struct fcgi_conn *fconn, struct fcgi_strm *fstrm)
+{
+ int ret;
+
+ TRACE_POINT(FCGI_EV_TX_RECORD|FCGI_EV_TX_PARAMS, fconn->conn, fstrm);
+ ret = fcgi_strm_send_empty_record(fconn, fstrm, FCGI_PARAMS);
+ if (ret) {
+ fstrm->flags |= FCGI_SF_EP_SENT;
+ TRACE_PROTO("FCGI PARAMS record xferred", FCGI_EV_TX_RECORD|FCGI_EV_TX_STDIN, fconn->conn, fstrm, 0, (size_t[]){0});
+ }
+ return ret;
+}
+
+/* Sends an empty STDIN record. It relies on fcgi_strm_send_empty_record(). It
+ * marks the end of input. On success, all the request was successfully sent.
+ */
+static int fcgi_strm_send_empty_stdin(struct fcgi_conn *fconn, struct fcgi_strm *fstrm)
+{
+ int ret;
+
+ TRACE_POINT(FCGI_EV_TX_RECORD|FCGI_EV_TX_STDIN|FCGI_EV_TX_EOI, fconn->conn, fstrm);
+ ret = fcgi_strm_send_empty_record(fconn, fstrm, FCGI_STDIN);
+ if (ret) {
+ fstrm->flags |= FCGI_SF_ES_SENT;
+ TRACE_PROTO("FCGI STDIN record xferred", FCGI_EV_TX_RECORD|FCGI_EV_TX_STDIN, fconn->conn, fstrm, 0, (size_t[]){0});
+ TRACE_USER("FCGI request fully xferred", FCGI_EV_TX_RECORD|FCGI_EV_TX_STDIN|FCGI_EV_TX_EOI, fconn->conn, fstrm);
+ TRACE_STATE("stdin data fully sent", FCGI_EV_TX_RECORD|FCGI_EV_TX_STDIN|FCGI_EV_TX_EOI, fconn->conn, fstrm);
+ }
+ return ret;
+}
+
+/* Sends an ABORT_REQUEST record. It relies on fcgi_strm_send_empty_record(). It
+ * stops the request processing.
+ */
+static int fcgi_strm_send_abort(struct fcgi_conn *fconn, struct fcgi_strm *fstrm)
+{
+ int ret;
+
+ TRACE_POINT(FCGI_EV_TX_RECORD|FCGI_EV_TX_ABORT, fconn->conn, fstrm);
+ ret = fcgi_strm_send_empty_record(fconn, fstrm, FCGI_ABORT_REQUEST);
+ if (ret) {
+ fstrm->flags |= FCGI_SF_ABRT_SENT;
+ TRACE_PROTO("FCGI ABORT record xferred", FCGI_EV_TX_RECORD|FCGI_EV_TX_ABORT, fconn->conn, fstrm, 0, (size_t[]){0});
+ TRACE_USER("FCGI request aborted", FCGI_EV_TX_RECORD|FCGI_EV_TX_ABORT, fconn->conn, fstrm);
+ TRACE_STATE("abort sent", FCGI_EV_TX_RECORD|FCGI_EV_TX_ABORT, fconn->conn, fstrm);
+ }
+ return ret;
+}
+
+/* Sends a PARAMS record. Returns > 0 on success, 0 if it couldn't do
+ * anything. If there are too much K/V params to be encoded in a PARAMS record,
+ * several records are sent. However, a K/V param cannot be split between 2
+ * records.
+ */
+static size_t fcgi_strm_send_params(struct fcgi_conn *fconn, struct fcgi_strm *fstrm,
+ struct htx *htx)
+{
+ struct buffer outbuf;
+ struct buffer *mbuf;
+ struct htx_blk *blk;
+ struct htx_sl *sl = NULL;
+ struct fcgi_strm_params params;
+ size_t total = 0;
+
+ TRACE_ENTER(FCGI_EV_TX_RECORD|FCGI_EV_TX_PARAMS, fconn->conn, fstrm, htx);
+
+ memset(&params, 0, sizeof(params));
+ params.p = get_trash_chunk();
+
+ mbuf = br_tail(fconn->mbuf);
+ retry:
+ if (!fcgi_get_buf(fconn, mbuf)) {
+ fconn->flags |= FCGI_CF_MUX_MALLOC;
+ fstrm->flags |= FCGI_SF_BLK_MROOM;
+ TRACE_STATE("waiting for fconn mbuf ring allocation", FCGI_EV_TX_RECORD|FCGI_EV_FSTRM_BLK|FCGI_EV_FCONN_BLK, fconn->conn, fstrm);
+ goto end;
+ }
+
+ while (1) {
+ outbuf = b_make(b_tail(mbuf), b_contig_space(mbuf), 0, 0);
+ if (outbuf.size >= FCGI_RECORD_HEADER_SZ || !b_space_wraps(mbuf))
+ break;
+ realign_again:
+ b_slow_realign(mbuf, trash.area, b_data(mbuf));
+ }
+
+ if (outbuf.size < FCGI_RECORD_HEADER_SZ)
+ goto full;
+
+ /* vsn: 1(FCGI_VERSION), type: (4)FCGI_PARAMS, id: fstrm->id,
+ * len: 0x0000 (fill later), padding: 0x00, rsv: 0x00 */
+ memcpy(outbuf.area, "\x01\x04\x00\x00\x00\x00\x00\x00", FCGI_RECORD_HEADER_SZ);
+ fcgi_set_record_id(outbuf.area, fstrm->id);
+ outbuf.data = FCGI_RECORD_HEADER_SZ;
+
+ blk = htx_get_head_blk(htx);
+ while (blk) {
+ enum htx_blk_type type;
+ uint32_t size = htx_get_blksz(blk);
+ struct fcgi_param p;
+
+ type = htx_get_blk_type(blk);
+ switch (type) {
+ case HTX_BLK_REQ_SL:
+ sl = htx_get_blk_ptr(htx, blk);
+ if (sl->info.req.meth == HTTP_METH_HEAD)
+ fstrm->h1m.flags |= H1_MF_METH_HEAD;
+ if (sl->flags & HTX_SL_F_VER_11)
+ fstrm->h1m.flags |= H1_MF_VER_11;
+ break;
+
+ case HTX_BLK_HDR:
+ p.n = htx_get_blk_name(htx, blk);
+ p.v = htx_get_blk_value(htx, blk);
+
+ if (istmatch(p.n, ist(":fcgi-"))) {
+ p.n = istadv(p.n, 6);
+ if (isteq(p.n, ist("gateway_interface")))
+ params.mask |= FCGI_SP_CGI_GATEWAY;
+ else if (isteq(p.n, ist("document_root"))) {
+ params.mask |= FCGI_SP_DOC_ROOT;
+ params.docroot = p.v;
+ }
+ else if (isteq(p.n, ist("script_name"))) {
+ params.mask |= FCGI_SP_SCRIPT_NAME;
+ params.scriptname = p.v;
+ }
+ else if (isteq(p.n, ist("path_info"))) {
+ params.mask |= FCGI_SP_PATH_INFO;
+ params.pathinfo = p.v;
+ }
+ else if (isteq(p.n, ist("request_uri"))) {
+ params.mask |= FCGI_SP_REQ_URI;
+ params.uri = p.v;
+ }
+ else if (isteq(p.n, ist("request_meth")))
+ params.mask |= FCGI_SP_REQ_METH;
+ else if (isteq(p.n, ist("query_string")))
+ params.mask |= FCGI_SP_REQ_QS;
+ else if (isteq(p.n, ist("server_name")))
+ params.mask |= FCGI_SP_SRV_NAME;
+ else if (isteq(p.n, ist("server_port")))
+ params.mask |= FCGI_SP_SRV_PORT;
+ else if (isteq(p.n, ist("server_protocol")))
+ params.mask |= FCGI_SP_SRV_PROTO;
+ else if (isteq(p.n, ist("remote_addr")))
+ params.mask |= FCGI_SP_REM_ADDR;
+ else if (isteq(p.n, ist("remote_port")))
+ params.mask |= FCGI_SP_REM_PORT;
+ else if (isteq(p.n, ist("script_filename")))
+ params.mask |= FCGI_SP_SCRIPT_FILE;
+ else if (isteq(p.n, ist("path_translated")))
+ params.mask |= FCGI_SP_PATH_TRANS;
+ else if (isteq(p.n, ist("https")))
+ params.mask |= FCGI_SP_HTTPS;
+ else if (isteq(p.n, ist("server_software")))
+ params.mask |= FCGI_SP_SRV_SOFT;
+ }
+ else if (isteq(p.n, ist("content-length"))) {
+ p.n = ist("CONTENT_LENGTH");
+ params.mask |= FCGI_SP_CONT_LEN;
+ }
+ else if (isteq(p.n, ist("content-type")))
+ p.n = ist("CONTENT_TYPE");
+ else {
+ struct ist n;
+
+ if (isteq(p.n, ist("host")))
+ params.srv_name = p.v;
+ else if (isteq(p.n, ist("te"))) {
+ /* "te" may only be sent with "trailers" if this value
+ * is present, otherwise it must be deleted.
+ */
+ p.v = istist(p.v, ist("trailers"));
+ if (!isttest(p.v) || (p.v.len > 8 && p.v.ptr[8] != ','))
+ break;
+ p.v = ist("trailers");
+ }
+
+ /* Skip header if same name is used to add the server name */
+ if (isttest(fconn->proxy->server_id_hdr_name) && isteq(p.n, fconn->proxy->server_id_hdr_name))
+ break;
+
+ n = ist2(trash.area, 0);
+ istcat(&n, ist("http_"), trash.size);
+ istcat(&n, p.n, trash.size);
+ p.n = n;
+ }
+
+ if (!fcgi_encode_param(&outbuf, &p)) {
+ if (b_space_wraps(mbuf))
+ goto realign_again;
+ if (outbuf.data == FCGI_RECORD_HEADER_SZ)
+ goto full;
+ goto done;
+ }
+ break;
+
+ case HTX_BLK_EOH:
+ if (isttest(fconn->proxy->server_id_hdr_name)) {
+ struct server *srv = objt_server(fconn->conn->target);
+
+ if (!srv)
+ goto done;
+
+ p.n = ist2(trash.area, 0);
+ istcat(&p.n, ist("http_"), trash.size);
+ istcat(&p.n, fconn->proxy->server_id_hdr_name, trash.size);
+ p.v = ist(srv->id);
+
+ if (!fcgi_encode_param(&outbuf, &p)) {
+ if (b_space_wraps(mbuf))
+ goto realign_again;
+ if (outbuf.data == FCGI_RECORD_HEADER_SZ)
+ goto full;
+ }
+ TRACE_STATE("add server name header", FCGI_EV_TX_RECORD|FCGI_EV_TX_PARAMS, fconn->conn, fstrm);
+ }
+ goto done;
+
+ default:
+ break;
+ }
+ total += size;
+ blk = htx_remove_blk(htx, blk);
+ }
+
+ done:
+ if (!fcgi_set_default_param(fconn, fstrm, htx, sl, &params)) {
+ TRACE_ERROR("error setting default params", FCGI_EV_TX_RECORD|FCGI_EV_STRM_ERR, fconn->conn, fstrm);
+ goto error;
+ }
+
+ if (!fcgi_encode_default_param(fconn, fstrm, &params, &outbuf, FCGI_SP_CGI_GATEWAY) ||
+ !fcgi_encode_default_param(fconn, fstrm, &params, &outbuf, FCGI_SP_DOC_ROOT) ||
+ !fcgi_encode_default_param(fconn, fstrm, &params, &outbuf, FCGI_SP_SCRIPT_NAME) ||
+ !fcgi_encode_default_param(fconn, fstrm, &params, &outbuf, FCGI_SP_PATH_INFO) ||
+ !fcgi_encode_default_param(fconn, fstrm, &params, &outbuf, FCGI_SP_REQ_URI) ||
+ !fcgi_encode_default_param(fconn, fstrm, &params, &outbuf, FCGI_SP_REQ_METH) ||
+ !fcgi_encode_default_param(fconn, fstrm, &params, &outbuf, FCGI_SP_REQ_QS) ||
+ !fcgi_encode_default_param(fconn, fstrm, &params, &outbuf, FCGI_SP_SRV_NAME) ||
+ !fcgi_encode_default_param(fconn, fstrm, &params, &outbuf, FCGI_SP_SRV_PORT) ||
+ !fcgi_encode_default_param(fconn, fstrm, &params, &outbuf, FCGI_SP_SRV_PROTO) ||
+ !fcgi_encode_default_param(fconn, fstrm, &params, &outbuf, FCGI_SP_REM_ADDR) ||
+ !fcgi_encode_default_param(fconn, fstrm, &params, &outbuf, FCGI_SP_REM_PORT) ||
+ !fcgi_encode_default_param(fconn, fstrm, &params, &outbuf, FCGI_SP_SCRIPT_FILE) ||
+ !fcgi_encode_default_param(fconn, fstrm, &params, &outbuf, FCGI_SP_PATH_TRANS) ||
+ !fcgi_encode_default_param(fconn, fstrm, &params, &outbuf, FCGI_SP_CONT_LEN) ||
+ !fcgi_encode_default_param(fconn, fstrm, &params, &outbuf, FCGI_SP_SRV_SOFT) ||
+ !fcgi_encode_default_param(fconn, fstrm, &params, &outbuf, FCGI_SP_HTTPS)) {
+ TRACE_ERROR("error encoding default params", FCGI_EV_TX_RECORD|FCGI_EV_STRM_ERR, fconn->conn, fstrm);
+ goto error;
+ }
+
+ /* update the record's size */
+ TRACE_PROTO("FCGI PARAMS record xferred", FCGI_EV_TX_RECORD|FCGI_EV_TX_PARAMS, fconn->conn, fstrm, 0, (size_t[]){outbuf.data - FCGI_RECORD_HEADER_SZ});
+ fcgi_set_record_size(outbuf.area, outbuf.data - FCGI_RECORD_HEADER_SZ);
+ b_add(mbuf, outbuf.data);
+
+ end:
+ TRACE_LEAVE(FCGI_EV_TX_RECORD|FCGI_EV_TX_PARAMS, fconn->conn, fstrm, htx, (size_t[]){total});
+ return total;
+ full:
+ if ((mbuf = br_tail_add(fconn->mbuf)) != NULL)
+ goto retry;
+ fconn->flags |= FCGI_CF_MUX_MFULL;
+ fstrm->flags |= FCGI_SF_BLK_MROOM;
+ TRACE_STATE("mbuf ring full", FCGI_EV_TX_RECORD|FCGI_EV_FSTRM_BLK|FCGI_EV_FCONN_BLK, fconn->conn, fstrm);
+ if (total)
+ goto error;
+ goto end;
+
+ error:
+ htx->flags |= HTX_FL_PROCESSING_ERROR;
+ TRACE_ERROR("processing error sending PARAMS record", FCGI_EV_TX_RECORD|FCGI_EV_STRM_ERR, fconn->conn, fstrm);
+ fcgi_strm_error(fstrm);
+ goto end;
+}
+
+/* Sends a STDIN record. Returns > 0 on success, 0 if it couldn't do
+ * anything. STDIN records contain the request body.
+ */
+static size_t fcgi_strm_send_stdin(struct fcgi_conn *fconn, struct fcgi_strm *fstrm,
+ struct htx *htx, size_t count, struct buffer *buf)
+{
+ struct buffer outbuf;
+ struct buffer *mbuf;
+ struct htx_blk *blk;
+ enum htx_blk_type type;
+ uint32_t size, extra_bytes;
+ size_t total = 0;
+
+ extra_bytes = 0;
+
+ TRACE_ENTER(FCGI_EV_TX_RECORD|FCGI_EV_TX_STDIN, fconn->conn, fstrm, htx, (size_t[]){count});
+ if (!count)
+ goto end;
+
+ mbuf = br_tail(fconn->mbuf);
+ retry:
+ if (!fcgi_get_buf(fconn, mbuf)) {
+ fconn->flags |= FCGI_CF_MUX_MALLOC;
+ fstrm->flags |= FCGI_SF_BLK_MROOM;
+ TRACE_STATE("waiting for fconn mbuf ring allocation", FCGI_EV_TX_RECORD|FCGI_EV_FSTRM_BLK|FCGI_EV_FCONN_BLK, fconn->conn, fstrm);
+ goto end;
+ }
+
+ /* Perform some optimizations to reduce the number of buffer copies.
+ * First, if the mux's buffer is empty and the htx area contains exactly
+ * one data block of the same size as the requested count, and this
+ * count fits within the record size, then it's possible to simply swap
+ * the caller's buffer with the mux's output buffer and adjust offsets
+ * and length to match the entire DATA HTX block in the middle. In this
+ * case we perform a true zero-copy operation from end-to-end. This is
+ * the situation that happens all the time with large files. Second, if
+ * this is not possible, but the mux's output buffer is empty, we still
+ * have an opportunity to avoid the copy to the intermediary buffer, by
+ * making the intermediary buffer's area point to the output buffer's
+ * area. In this case we want to skip the HTX header to make sure that
+ * copies remain aligned and that this operation remains possible all
+ * the time. This goes for headers, data blocks and any data extracted
+ * from the HTX blocks.
+ */
+ blk = htx_get_head_blk(htx);
+ if (!blk)
+ goto end;
+ type = htx_get_blk_type(blk);
+ size = htx_get_blksz(blk);
+ if (unlikely(size == count && htx_nbblks(htx) == 1 && type == HTX_BLK_DATA)) {
+ void *old_area = mbuf->area;
+ int eom = (htx->flags & HTX_FL_EOM);
+
+ /* Last block of the message: Reserve the size for the empty stdin record */
+ if (eom)
+ extra_bytes = FCGI_RECORD_HEADER_SZ;
+
+ if (b_data(mbuf)) {
+ /* Too bad there are data left there. We're willing to memcpy/memmove
+ * up to 1/4 of the buffer, which means that it's OK to copy a large
+ * record into a buffer containing few data if it needs to be realigned,
+ * and that it's also OK to copy few data without realigning. Otherwise
+ * we'll pretend the mbuf is full and wait for it to become empty.
+ */
+ if (size + FCGI_RECORD_HEADER_SZ + extra_bytes <= b_room(mbuf) &&
+ (b_data(mbuf) <= b_size(mbuf) / 4 ||
+ (size <= b_size(mbuf) / 4 && size + FCGI_RECORD_HEADER_SZ + extra_bytes <= b_contig_space(mbuf))))
+ goto copy;
+ goto full;
+ }
+
+ TRACE_PROTO("sending stding data (zero-copy)", FCGI_EV_TX_RECORD|FCGI_EV_TX_STDIN, fconn->conn, fstrm, htx, (size_t[]){size});
+ /* map a FCGI record to the HTX block so that we can put the
+ * record header there.
+ */
+ *mbuf = b_make(buf->area, buf->size, sizeof(struct htx) + blk->addr - FCGI_RECORD_HEADER_SZ, size + FCGI_RECORD_HEADER_SZ);
+ outbuf.area = b_head(mbuf);
+
+ /* prepend a FCGI record header just before the DATA block */
+ memcpy(outbuf.area, "\x01\x05\x00\x00\x00\x00\x00\x00", FCGI_RECORD_HEADER_SZ);
+ fcgi_set_record_id(outbuf.area, fstrm->id);
+ fcgi_set_record_size(outbuf.area, size);
+
+ /* and exchange with our old area */
+ buf->area = old_area;
+ buf->data = buf->head = 0;
+ total += size;
+
+ htx = (struct htx *)buf->area;
+ htx_reset(htx);
+ if (eom)
+ goto empty_stdin;
+ goto end;
+ }
+
+ copy:
+ while (1) {
+ outbuf = b_make(b_tail(mbuf), b_contig_space(mbuf), 0, 0);
+ if (outbuf.size >= FCGI_RECORD_HEADER_SZ + extra_bytes || !b_space_wraps(mbuf))
+ break;
+ realign_again:
+ b_slow_realign(mbuf, trash.area, b_data(mbuf));
+ }
+
+ if (outbuf.size < FCGI_RECORD_HEADER_SZ + extra_bytes)
+ goto full;
+
+ /* vsn: 1(FCGI_VERSION), type: (5)FCGI_STDIN, id: fstrm->id,
+ * len: 0x0000 (fill later), padding: 0x00, rsv: 0x00 */
+ memcpy(outbuf.area, "\x01\x05\x00\x00\x00\x00\x00\x00", FCGI_RECORD_HEADER_SZ);
+ fcgi_set_record_id(outbuf.area, fstrm->id);
+ outbuf.data = FCGI_RECORD_HEADER_SZ;
+
+ blk = htx_get_head_blk(htx);
+ while (blk && count) {
+ enum htx_blk_type type = htx_get_blk_type(blk);
+ uint32_t size = htx_get_blksz(blk);
+ struct ist v;
+
+ switch (type) {
+ case HTX_BLK_DATA:
+ TRACE_PROTO("sending stding data", FCGI_EV_TX_RECORD|FCGI_EV_TX_STDIN, fconn->conn, fstrm, htx, (size_t[]){size});
+ v = htx_get_blk_value(htx, blk);
+
+ if (htx_is_unique_blk(htx, blk) && (htx->flags & HTX_FL_EOM))
+ extra_bytes = FCGI_RECORD_HEADER_SZ; /* Last block of the message */
+
+ if (v.len > count) {
+ v.len = count;
+ extra_bytes = 0;
+ }
+
+ if (v.len + FCGI_RECORD_HEADER_SZ + extra_bytes > b_room(&outbuf)) {
+ /* It doesn't fit at once. If it at least fits once split and
+ * the amount of data to move is low, let's defragment the
+ * buffer now.
+ */
+ if (b_space_wraps(mbuf) &&
+ b_data(&outbuf) + v.len + extra_bytes <= b_room(mbuf) &&
+ b_data(mbuf) <= MAX_DATA_REALIGN)
+ goto realign_again;
+ v.len = (FCGI_RECORD_HEADER_SZ + extra_bytes > b_room(&outbuf)
+ ? 0
+ : b_room(&outbuf) - FCGI_RECORD_HEADER_SZ - extra_bytes);
+ }
+ if (!v.len || !chunk_memcat(&outbuf, v.ptr, v.len)) {
+ if (outbuf.data == FCGI_RECORD_HEADER_SZ)
+ goto full;
+ goto done;
+ }
+ if (v.len != size) {
+ total += v.len;
+ count -= v.len;
+ htx_cut_data_blk(htx, blk, v.len);
+ goto done;
+ }
+ break;
+
+ default:
+ break;
+ }
+ total += size;
+ count -= size;
+ blk = htx_remove_blk(htx, blk);
+ }
+
+ done:
+ /* update the record's size */
+ TRACE_PROTO("FCGI STDIN record xferred", FCGI_EV_TX_RECORD|FCGI_EV_TX_STDIN, fconn->conn, fstrm, 0, (size_t[]){outbuf.data - FCGI_RECORD_HEADER_SZ});
+ fcgi_set_record_size(outbuf.area, outbuf.data - FCGI_RECORD_HEADER_SZ);
+ b_add(mbuf, outbuf.data);
+
+ /* Send the empty stding here to finish the message */
+ if (htx_is_empty(htx) && (htx->flags & HTX_FL_EOM)) {
+ empty_stdin:
+ TRACE_PROTO("sending FCGI STDIN record", FCGI_EV_TX_RECORD|FCGI_EV_TX_STDIN, fconn->conn, fstrm, htx);
+ if (!fcgi_strm_send_empty_stdin(fconn, fstrm)) {
+ /* bytes already reserved for this record. It should not fail */
+ htx->flags |= HTX_FL_PROCESSING_ERROR;
+ TRACE_ERROR("processing error sending empty STDIN record", FCGI_EV_TX_RECORD|FCGI_EV_STRM_ERR, fconn->conn, fstrm);
+ fcgi_strm_error(fstrm);
+ }
+ }
+
+ end:
+ TRACE_LEAVE(FCGI_EV_TX_RECORD|FCGI_EV_TX_STDIN, fconn->conn, fstrm, htx, (size_t[]){total});
+ return total;
+ full:
+ if ((mbuf = br_tail_add(fconn->mbuf)) != NULL)
+ goto retry;
+ fconn->flags |= FCGI_CF_MUX_MFULL;
+ fstrm->flags |= FCGI_SF_BLK_MROOM;
+ TRACE_STATE("mbuf ring full", FCGI_EV_TX_RECORD|FCGI_EV_FSTRM_BLK|FCGI_EV_FCONN_BLK, fconn->conn, fstrm);
+ goto end;
+}
+
+/* Processes a STDOUT record. Returns > 0 on success, 0 if it couldn't do
+ * anything. STDOUT records contain the entire response. All the content is
+ * copied in the stream's rxbuf. The parsing will be handled in fcgi_rcv_buf().
+ */
+static int fcgi_strm_handle_stdout(struct fcgi_conn *fconn, struct fcgi_strm *fstrm)
+{
+ struct buffer *dbuf;
+ size_t ret;
+ size_t max;
+
+ TRACE_ENTER(FCGI_EV_RX_RECORD|FCGI_EV_RX_STDOUT, fconn->conn, fstrm);
+
+ dbuf = &fconn->dbuf;
+
+ /* Only padding remains */
+ if (fconn->state == FCGI_CS_RECORD_P)
+ goto end_transfer;
+
+ if (b_data(dbuf) < (fconn->drl + fconn->drp) &&
+ b_size(dbuf) > (fconn->drl + fconn->drp) &&
+ buf_room_for_htx_data(dbuf))
+ goto fail; // incomplete record
+
+ if (!fcgi_get_buf(fconn, &fstrm->rxbuf)) {
+ fconn->flags |= FCGI_CF_DEM_SALLOC;
+ TRACE_STATE("waiting for fstrm rxbuf allocation", FCGI_EV_RX_RECORD|FCGI_EV_FSTRM_BLK, fconn->conn, fstrm);
+ goto fail;
+ }
+
+ /*max = MIN(b_room(&fstrm->rxbuf), fconn->drl);*/
+ max = buf_room_for_htx_data(&fstrm->rxbuf);
+ if (!b_data(&fstrm->rxbuf))
+ fstrm->rxbuf.head = sizeof(struct htx);
+ if (max > fconn->drl)
+ max = fconn->drl;
+
+ ret = b_xfer(&fstrm->rxbuf, dbuf, max);
+ if (!ret)
+ goto fail;
+ fconn->drl -= ret;
+ TRACE_DATA("move some data to fstrm rxbuf", FCGI_EV_RX_RECORD|FCGI_EV_RX_STDOUT, fconn->conn, fstrm, 0, (size_t[]){ret});
+ TRACE_PROTO("FCGI STDOUT record rcvd", FCGI_EV_RX_RECORD|FCGI_EV_RX_STDOUT, fconn->conn, fstrm, 0, (size_t[]){ret});
+
+ if (!buf_room_for_htx_data(&fstrm->rxbuf)) {
+ fconn->flags |= FCGI_CF_DEM_SFULL;
+ TRACE_STATE("fstrm rxbuf full", FCGI_EV_RX_RECORD|FCGI_EV_FSTRM_BLK, fconn->conn, fstrm);
+ }
+
+ if (fconn->drl)
+ goto fail;
+
+ end_transfer:
+ fconn->state = FCGI_CS_RECORD_P;
+ fconn->drl += fconn->drp;
+ fconn->drp = 0;
+ ret = MIN(b_data(&fconn->dbuf), fconn->drl);
+ b_del(&fconn->dbuf, ret);
+ fconn->drl -= ret;
+ if (fconn->drl)
+ goto fail;
+
+ fconn->state = FCGI_CS_RECORD_H;
+ TRACE_STATE("switching to RECORD_H", FCGI_EV_RX_RECORD|FCGI_EV_RX_FHDR, fconn->conn, fstrm);
+ TRACE_LEAVE(FCGI_EV_RX_RECORD|FCGI_EV_RX_STDOUT, fconn->conn, fstrm);
+ return 1;
+ fail:
+ TRACE_DEVEL("leaving on missing data or error", FCGI_EV_RX_RECORD|FCGI_EV_RX_STDOUT, fconn->conn, fstrm);
+ return 0;
+}
+
+
+/* Processes an empty STDOUT. Returns > 0 on success, 0 if it couldn't do
+ * anything. It only skip the padding in fact, there is no payload for such
+ * records. It marks the end of the response.
+ */
+static int fcgi_strm_handle_empty_stdout(struct fcgi_conn *fconn, struct fcgi_strm *fstrm)
+{
+ int ret;
+
+ TRACE_ENTER(FCGI_EV_RX_RECORD|FCGI_EV_RX_STDOUT, fconn->conn, fstrm);
+
+ fconn->state = FCGI_CS_RECORD_P;
+ TRACE_STATE("switching to RECORD_P", FCGI_EV_RX_RECORD|FCGI_EV_RX_STDOUT, fconn->conn, fstrm);
+ fconn->drl += fconn->drp;
+ fconn->drp = 0;
+ ret = MIN(b_data(&fconn->dbuf), fconn->drl);
+ b_del(&fconn->dbuf, ret);
+ fconn->drl -= ret;
+ if (fconn->drl) {
+ TRACE_DEVEL("leaving on missing data or error", FCGI_EV_RX_RECORD|FCGI_EV_RX_STDOUT, fconn->conn, fstrm);
+ return 0;
+ }
+ fconn->state = FCGI_CS_RECORD_H;
+ fstrm->flags |= FCGI_SF_ES_RCVD;
+ TRACE_PROTO("FCGI STDOUT record rcvd", FCGI_EV_RX_RECORD|FCGI_EV_RX_STDOUT, fconn->conn, fstrm, 0, (size_t[]){0});
+ TRACE_STATE("stdout data fully send, switching to RECORD_H", FCGI_EV_RX_RECORD|FCGI_EV_RX_FHDR|FCGI_EV_RX_EOI, fconn->conn, fstrm);
+ TRACE_LEAVE(FCGI_EV_RX_RECORD|FCGI_EV_RX_STDOUT, fconn->conn, fstrm);
+ return 1;
+}
+
+/* Processes a STDERR record. Returns > 0 on success, 0 if it couldn't do
+ * anything.
+ */
+static int fcgi_strm_handle_stderr(struct fcgi_conn *fconn, struct fcgi_strm *fstrm)
+{
+ struct buffer *dbuf;
+ struct buffer tag;
+ size_t ret;
+
+ TRACE_ENTER(FCGI_EV_RX_RECORD|FCGI_EV_RX_STDERR, fconn->conn, fstrm);
+ dbuf = &fconn->dbuf;
+
+ /* Only padding remains */
+ if (fconn->state == FCGI_CS_RECORD_P || !fconn->drl)
+ goto end_transfer;
+
+ if (b_data(dbuf) < (fconn->drl + fconn->drp) &&
+ b_size(dbuf) > (fconn->drl + fconn->drp) &&
+ buf_room_for_htx_data(dbuf))
+ goto fail; // incomplete record
+
+ chunk_reset(&trash);
+ ret = b_force_xfer(&trash, dbuf, MIN(b_room(&trash), fconn->drl));
+ if (!ret)
+ goto fail;
+ fconn->drl -= ret;
+ TRACE_PROTO("FCGI STDERR record rcvd", FCGI_EV_RX_RECORD|FCGI_EV_RX_STDERR, fconn->conn, fstrm, 0, (size_t[]){ret});
+
+ trash.area[ret] = '\n';
+ trash.area[ret+1] = '\0';
+ tag.area = fconn->app->name; tag.data = strlen(fconn->app->name);
+ app_log(&fconn->app->loggers, &tag, LOG_ERR, "%s", trash.area);
+
+ if (fconn->drl)
+ goto fail;
+
+ end_transfer:
+ fconn->state = FCGI_CS_RECORD_P;
+ fconn->drl += fconn->drp;
+ fconn->drp = 0;
+ ret = MIN(b_data(&fconn->dbuf), fconn->drl);
+ b_del(&fconn->dbuf, ret);
+ fconn->drl -= ret;
+ if (fconn->drl)
+ goto fail;
+ fconn->state = FCGI_CS_RECORD_H;
+ TRACE_STATE("switching to RECORD_H", FCGI_EV_RX_RECORD|FCGI_EV_RX_FHDR, fconn->conn, fstrm);
+ TRACE_LEAVE(FCGI_EV_RX_RECORD|FCGI_EV_RX_STDERR, fconn->conn, fstrm);
+ return 1;
+ fail:
+ TRACE_DEVEL("leaving on missing data or error", FCGI_EV_RX_RECORD|FCGI_EV_RX_STDERR, fconn->conn, fstrm);
+ return 0;
+}
+
+/* Processes an END_REQUEST record. Returns > 0 on success, 0 if it couldn't do
+ * anything. If the empty STDOUT record is not already received, this one marks
+ * the end of the response. It is highly unexpected, but if the record is larger
+ * than a buffer and cannot be decoded in one time, an error is triggered and
+ * the connection is closed. END_REQUEST record cannot be split.
+ */
+static int fcgi_strm_handle_end_request(struct fcgi_conn *fconn, struct fcgi_strm *fstrm)
+{
+ struct buffer inbuf;
+ struct buffer *dbuf;
+ struct fcgi_end_request endreq;
+
+ TRACE_ENTER(FCGI_EV_RX_RECORD|FCGI_EV_RX_ENDREQ, fconn->conn, fstrm);
+ dbuf = &fconn->dbuf;
+
+ /* Record too large to be fully decoded */
+ if (b_size(dbuf) < (fconn->drl + fconn->drp)) {
+ TRACE_ERROR("END_REQUEST record too large", FCGI_EV_RX_RECORD|FCGI_EV_RX_ENDREQ|FCGI_EV_FSTRM_ERR, fconn->conn, fstrm);
+ goto fail;
+ }
+
+ /* process full record only */
+ if (b_data(dbuf) < (fconn->drl + fconn->drp)) {
+ TRACE_DEVEL("leaving on missing data", FCGI_EV_RX_RECORD|FCGI_EV_RX_ENDREQ, fconn->conn);
+ return 0;
+ }
+
+ if (unlikely(b_contig_data(dbuf, b_head_ofs(dbuf)) < fconn->drl)) {
+ /* Realign the dmux buffer if the record wraps. It is unexpected
+ * at this stage because it should be the first record received
+ * from the FCGI application.
+ */
+ b_slow_realign_ofs(dbuf, trash.area, 0);
+ }
+
+ inbuf = b_make(b_head(dbuf), b_data(dbuf), 0, fconn->drl);
+
+ if (!fcgi_decode_end_request(&inbuf, 0, &endreq)) {
+ TRACE_ERROR("END_REQUEST record decoding failure", FCGI_EV_RX_RECORD|FCGI_EV_RX_ENDREQ|FCGI_EV_FSTRM_ERR, fconn->conn, fstrm);
+ goto fail;
+ }
+
+ fstrm->flags |= FCGI_SF_ES_RCVD;
+ TRACE_STATE("end of script reported", FCGI_EV_RX_RECORD|FCGI_EV_RX_ENDREQ|FCGI_EV_RX_EOI, fconn->conn, fstrm);
+ TRACE_PROTO("FCGI END_REQUEST record rcvd", FCGI_EV_RX_RECORD|FCGI_EV_RX_ENDREQ, fconn->conn, fstrm, 0, (size_t[]){fconn->drl});
+ fstrm->proto_status = endreq.errcode;
+ fcgi_strm_close(fstrm);
+
+ b_del(&fconn->dbuf, fconn->drl + fconn->drp);
+ fconn->drl = 0;
+ fconn->drp = 0;
+ fconn->state = FCGI_CS_RECORD_H;
+ TRACE_STATE("switching to RECORD_H", FCGI_EV_RX_RECORD|FCGI_EV_RX_FHDR, fconn->conn, fstrm);
+ TRACE_LEAVE(FCGI_EV_RX_RECORD|FCGI_EV_RX_ENDREQ, fconn->conn, fstrm);
+ return 1;
+
+ fail:
+ fcgi_strm_error(fstrm);
+ TRACE_DEVEL("leaving on error", FCGI_EV_RX_RECORD|FCGI_EV_RX_ENDREQ|FCGI_EV_FSTRM_ERR, fconn->conn, fstrm);
+ return 0;
+}
+
+/* process Rx records to be demultiplexed */
+static void fcgi_process_demux(struct fcgi_conn *fconn)
+{
+ struct fcgi_strm *fstrm = NULL, *tmp_fstrm;
+ struct fcgi_header hdr;
+ int ret;
+
+ TRACE_ENTER(FCGI_EV_FCONN_WAKE, fconn->conn);
+
+ if (fconn->state == FCGI_CS_CLOSED)
+ return;
+
+ if (unlikely(fconn->state < FCGI_CS_RECORD_H)) {
+ if (fconn->state == FCGI_CS_INIT) {
+ TRACE_STATE("waiting FCGI GET_VALUES to be sent", FCGI_EV_RX_RECORD|FCGI_EV_RX_FHDR|FCGI_EV_RX_GETVAL, fconn->conn);
+ return;
+ }
+ if (fconn->state == FCGI_CS_SETTINGS) {
+ /* ensure that what is pending is a valid GET_VALUES_RESULT record. */
+ TRACE_STATE("receiving FCGI record header", FCGI_EV_RX_RECORD|FCGI_EV_RX_FHDR, fconn->conn);
+ ret = fcgi_decode_record_hdr(&fconn->dbuf, 0, &hdr);
+ if (!ret) {
+ TRACE_ERROR("header record decoding failure", FCGI_EV_RX_RECORD|FCGI_EV_RX_ENDREQ|FCGI_EV_FSTRM_ERR, fconn->conn, fstrm);
+ goto fail;
+ }
+ b_del(&fconn->dbuf, ret);
+
+ if (hdr.id || (hdr.type != FCGI_GET_VALUES_RESULT && hdr.type != FCGI_UNKNOWN_TYPE)) {
+ fconn->state = FCGI_CS_CLOSED;
+ TRACE_ERROR("unexpected record type or flags", FCGI_EV_RX_RECORD|FCGI_EV_RX_FHDR|FCGI_EV_RX_GETVAL|FCGI_EV_FCONN_ERR, fconn->conn);
+ TRACE_STATE("switching to CLOSED", FCGI_EV_RX_RECORD|FCGI_EV_RX_FHDR|FCGI_EV_RX_GETVAL|FCGI_EV_FCONN_ERR, fconn->conn);
+ goto fail;
+ }
+ goto new_record;
+ }
+ }
+
+ /* process as many incoming records as possible below */
+ while (1) {
+ if (!b_data(&fconn->dbuf)) {
+ TRACE_DEVEL("no more Rx data", FCGI_EV_RX_RECORD, fconn->conn);
+ break;
+ }
+
+ if (fconn->state == FCGI_CS_CLOSED) {
+ TRACE_STATE("end of connection reported", FCGI_EV_RX_RECORD|FCGI_EV_RX_EOI, fconn->conn);
+ break;
+ }
+
+ if (fconn->state == FCGI_CS_RECORD_H) {
+ TRACE_PROTO("receiving FCGI record header", FCGI_EV_RX_RECORD|FCGI_EV_RX_FHDR, fconn->conn);
+ ret = fcgi_decode_record_hdr(&fconn->dbuf, 0, &hdr);
+ if (!ret)
+ break;
+ b_del(&fconn->dbuf, ret);
+
+ new_record:
+ fconn->dsi = hdr.id;
+ fconn->drt = hdr.type;
+ fconn->drl = hdr.len;
+ fconn->drp = hdr.padding;
+ fconn->state = FCGI_CS_RECORD_D;
+ TRACE_STATE("FCGI record header rcvd, switching to RECORD_D", FCGI_EV_RX_RECORD|FCGI_EV_RX_FHDR, fconn->conn);
+ }
+
+ /* Only FCGI_CS_RECORD_D or FCGI_CS_RECORD_P */
+ tmp_fstrm = fcgi_conn_st_by_id(fconn, fconn->dsi);
+
+ if (tmp_fstrm != fstrm && fstrm && fcgi_strm_sc(fstrm) &&
+ (b_data(&fstrm->rxbuf) ||
+ fcgi_conn_read0_pending(fconn) ||
+ fstrm->state == FCGI_SS_CLOSED ||
+ (fstrm->flags & FCGI_SF_ES_RCVD) ||
+ se_fl_test(fstrm->sd, SE_FL_ERROR | SE_FL_ERR_PENDING | SE_FL_EOS))) {
+ /* we may have to signal the upper layers */
+ TRACE_DEVEL("notifying stream before switching SID", FCGI_EV_RX_RECORD|FCGI_EV_STRM_WAKE, fconn->conn, fstrm);
+ se_fl_set(fstrm->sd, SE_FL_RCV_MORE);
+ fcgi_strm_notify_recv(fstrm);
+ }
+ fstrm = tmp_fstrm;
+
+ if (fstrm->state == FCGI_SS_CLOSED && fconn->dsi != 0) {
+ /* ignore all record for closed streams */
+ goto ignore_record;
+ }
+ if (fstrm->state == FCGI_SS_IDLE) {
+ /* ignore all record for unknown streams */
+ goto ignore_record;
+ }
+
+ switch (fconn->drt) {
+ case FCGI_GET_VALUES_RESULT:
+ TRACE_PROTO("receiving FCGI GET_VALUES_RESULT record", FCGI_EV_RX_RECORD|FCGI_EV_RX_GETVAL, fconn->conn);
+ ret = fcgi_conn_handle_values_result(fconn);
+ break;
+
+ case FCGI_STDOUT:
+ if (fstrm->flags & FCGI_SF_ES_RCVD)
+ goto ignore_record;
+
+ TRACE_PROTO("receiving FCGI STDOUT record", FCGI_EV_RX_RECORD|FCGI_EV_RX_STDOUT, fconn->conn, fstrm);
+ if (fconn->drl)
+ ret = fcgi_strm_handle_stdout(fconn, fstrm);
+ else
+ ret = fcgi_strm_handle_empty_stdout(fconn, fstrm);
+ break;
+
+ case FCGI_STDERR:
+ TRACE_PROTO("receiving FCGI STDERR record", FCGI_EV_RX_RECORD|FCGI_EV_RX_STDERR, fconn->conn, fstrm);
+ ret = fcgi_strm_handle_stderr(fconn, fstrm);
+ break;
+
+ case FCGI_END_REQUEST:
+ TRACE_PROTO("receiving FCGI END_REQUEST record", FCGI_EV_RX_RECORD|FCGI_EV_RX_ENDREQ, fconn->conn, fstrm);
+ ret = fcgi_strm_handle_end_request(fconn, fstrm);
+ break;
+
+ /* implement all extra record types here */
+ default:
+ ignore_record:
+ /* drop records that we ignore. They may be
+ * larger than the buffer so we drain all of
+ * their contents until we reach the end.
+ */
+ fconn->state = FCGI_CS_RECORD_P;
+ fconn->drl += fconn->drp;
+ fconn->drp = 0;
+ ret = MIN(b_data(&fconn->dbuf), fconn->drl);
+ TRACE_PROTO("receiving FCGI ignored record", FCGI_EV_RX_RECORD, fconn->conn, fstrm, 0, (size_t[]){ret});
+ TRACE_STATE("switching to RECORD_P", FCGI_EV_RX_RECORD, fconn->conn, fstrm);
+ b_del(&fconn->dbuf, ret);
+ fconn->drl -= ret;
+ ret = (fconn->drl == 0);
+ }
+
+ /* error or missing data condition met above ? */
+ if (ret <= 0) {
+ TRACE_DEVEL("insufficient data to proceed", FCGI_EV_RX_RECORD, fconn->conn, fstrm);
+ break;
+ }
+
+ if (fconn->state != FCGI_CS_RECORD_H && !(fconn->drl+fconn->drp)) {
+ fconn->state = FCGI_CS_RECORD_H;
+ TRACE_STATE("switching to RECORD_H", FCGI_EV_RX_RECORD|FCGI_EV_RX_FHDR, fconn->conn);
+ }
+ }
+
+ fail:
+ /* we can go here on missing data, blocked response or error */
+ if (fstrm && fcgi_strm_sc(fstrm) &&
+ (b_data(&fstrm->rxbuf) ||
+ fcgi_conn_read0_pending(fconn) ||
+ fstrm->state == FCGI_SS_CLOSED ||
+ (fstrm->flags & FCGI_SF_ES_RCVD) ||
+ se_fl_test(fstrm->sd, SE_FL_ERROR | SE_FL_ERR_PENDING | SE_FL_EOS))) {
+ /* we may have to signal the upper layers */
+ TRACE_DEVEL("notifying stream before switching SID", FCGI_EV_RX_RECORD|FCGI_EV_STRM_WAKE, fconn->conn, fstrm);
+ se_fl_set(fstrm->sd, SE_FL_RCV_MORE);
+ fcgi_strm_notify_recv(fstrm);
+ }
+
+ fcgi_conn_restart_reading(fconn, 0);
+}
+
+/* process Tx records from streams to be multiplexed. Returns > 0 if it reached
+ * the end.
+ */
+static int fcgi_process_mux(struct fcgi_conn *fconn)
+{
+ struct fcgi_strm *fstrm, *fstrm_back;
+
+ TRACE_ENTER(FCGI_EV_FCONN_WAKE, fconn->conn);
+
+ if (unlikely(fconn->state < FCGI_CS_RECORD_H)) {
+ if (unlikely(fconn->state == FCGI_CS_INIT)) {
+ if (!(fconn->flags & FCGI_CF_GET_VALUES)) {
+ fconn->state = FCGI_CS_RECORD_H;
+ TRACE_STATE("switching to RECORD_H", FCGI_EV_TX_RECORD|FCGI_EV_RX_RECORD|FCGI_EV_RX_FHDR, fconn->conn);
+ fcgi_wake_unassigned_streams(fconn);
+ goto mux;
+ }
+ TRACE_PROTO("sending FCGI GET_VALUES record", FCGI_EV_TX_RECORD|FCGI_EV_TX_GETVAL, fconn->conn);
+ if (unlikely(!fcgi_conn_send_get_values(fconn)))
+ goto fail;
+ fconn->state = FCGI_CS_SETTINGS;
+ TRACE_STATE("switching to SETTINGS", FCGI_EV_TX_RECORD|FCGI_EV_RX_RECORD|FCGI_EV_RX_GETVAL, fconn->conn);
+ }
+ /* need to wait for the other side */
+ if (fconn->state < FCGI_CS_RECORD_H)
+ goto done;
+ }
+
+ mux:
+ list_for_each_entry_safe(fstrm, fstrm_back, &fconn->send_list, send_list) {
+ if (fconn->state == FCGI_CS_CLOSED || fconn->flags & FCGI_CF_MUX_BLOCK_ANY)
+ break;
+
+ if (fstrm->flags & FCGI_SF_NOTIFIED)
+ continue;
+
+ /* If the sender changed his mind and unsubscribed, let's just
+ * remove the stream from the send_list.
+ */
+ if (!(fstrm->flags & (FCGI_SF_WANT_SHUTR|FCGI_SF_WANT_SHUTW)) &&
+ (!fstrm->subs || !(fstrm->subs->events & SUB_RETRY_SEND))) {
+ LIST_DEL_INIT(&fstrm->send_list);
+ continue;
+ }
+
+ if (fstrm->subs && fstrm->subs->events & SUB_RETRY_SEND) {
+ TRACE_POINT(FCGI_EV_STRM_WAKE, fconn->conn, fstrm);
+ fstrm->flags &= ~FCGI_SF_BLK_ANY;
+ fstrm->flags |= FCGI_SF_NOTIFIED;
+ tasklet_wakeup(fstrm->subs->tasklet);
+ fstrm->subs->events &= ~SUB_RETRY_SEND;
+ if (!fstrm->subs->events)
+ fstrm->subs = NULL;
+ } else {
+ /* it's the shut request that was queued */
+ TRACE_POINT(FCGI_EV_STRM_WAKE, fconn->conn, fstrm);
+ tasklet_wakeup(fstrm->shut_tl);
+ }
+ }
+
+ fail:
+ if (fconn->state == FCGI_CS_CLOSED) {
+ if (fconn->stream_cnt - fconn->nb_reserved > 0) {
+ fcgi_conn_send_aborts(fconn);
+ if (fconn->flags & FCGI_CF_MUX_BLOCK_ANY) {
+ TRACE_DEVEL("leaving in blocked situation", FCGI_EV_FCONN_WAKE|FCGI_EV_FCONN_BLK, fconn->conn);
+ return 0;
+ }
+ }
+ }
+
+ done:
+ TRACE_LEAVE(FCGI_EV_FCONN_WAKE, fconn->conn);
+ return 1;
+}
+
+
+/* Attempt to read data, and subscribe if none available.
+ * The function returns 1 if data has been received, otherwise zero.
+ */
+static int fcgi_recv(struct fcgi_conn *fconn)
+{
+ struct connection *conn = fconn->conn;
+ struct buffer *buf;
+ int max;
+ size_t ret;
+
+ TRACE_ENTER(FCGI_EV_FCONN_RECV, conn);
+
+ if (fconn->wait_event.events & SUB_RETRY_RECV) {
+ TRACE_DEVEL("leaving on sub_recv", FCGI_EV_FCONN_RECV, conn);
+ return (b_data(&fconn->dbuf));
+ }
+
+ if (!fcgi_recv_allowed(fconn)) {
+ TRACE_DEVEL("leaving on !recv_allowed", FCGI_EV_FCONN_RECV, conn);
+ return 1;
+ }
+
+ buf = fcgi_get_buf(fconn, &fconn->dbuf);
+ if (!buf) {
+ TRACE_DEVEL("waiting for fconn dbuf allocation", FCGI_EV_FCONN_RECV|FCGI_EV_FCONN_BLK, conn);
+ fconn->flags |= FCGI_CF_DEM_DALLOC;
+ return 0;
+ }
+
+ if (!b_data(buf)) {
+ /* try to pre-align the buffer like the
+ * rxbufs will be to optimize memory copies. We'll make
+ * sure that the record header lands at the end of the
+ * HTX block to alias it upon recv. We cannot use the
+ * head because rcv_buf() will realign the buffer if
+ * it's empty. Thus we cheat and pretend we already
+ * have a few bytes there.
+ */
+ max = buf_room_for_htx_data(buf) + (fconn->state == FCGI_CS_RECORD_H ? FCGI_RECORD_HEADER_SZ : 0);
+ buf->head = sizeof(struct htx) - (fconn->state == FCGI_CS_RECORD_H ? FCGI_RECORD_HEADER_SZ : 0);
+ }
+ else
+ max = buf_room_for_htx_data(buf);
+
+ ret = max ? conn->xprt->rcv_buf(conn, conn->xprt_ctx, buf, max, 0) : 0;
+
+ if (max && !ret && fcgi_recv_allowed(fconn)) {
+ TRACE_DATA("failed to receive data, subscribing", FCGI_EV_FCONN_RECV, conn);
+ conn->xprt->subscribe(conn, conn->xprt_ctx, SUB_RETRY_RECV, &fconn->wait_event);
+ }
+ else
+ TRACE_DATA("recv data", FCGI_EV_FCONN_RECV, conn, 0, 0, (size_t[]){ret});
+
+ if (conn_xprt_read0_pending(conn)) {
+ TRACE_DATA("received read0", FCGI_EV_FCONN_RECV, conn);
+ fconn->flags |= FCGI_CF_EOS;
+ }
+ if (conn->flags & CO_FL_ERROR) {
+ TRACE_DATA("connection error", FCGI_EV_FCONN_RECV, conn);
+ fconn->flags |= FCGI_CF_ERROR;
+ }
+
+ if (!b_data(buf)) {
+ fcgi_release_buf(fconn, &fconn->dbuf);
+ goto end;
+ }
+
+ if (ret == max) {
+ TRACE_DEVEL("fconn dbuf full", FCGI_EV_FCONN_RECV|FCGI_EV_FCONN_BLK, conn);
+ fconn->flags |= FCGI_CF_DEM_DFULL;
+ }
+
+end:
+ TRACE_LEAVE(FCGI_EV_FCONN_RECV, conn);
+ return !!ret || (fconn->flags & (FCGI_CF_EOS|FCGI_CF_ERROR));
+}
+
+
+/* Try to send data if possible.
+ * The function returns 1 if data have been sent, otherwise zero.
+ */
+static int fcgi_send(struct fcgi_conn *fconn)
+{
+ struct connection *conn = fconn->conn;
+ int done;
+ int sent = 0;
+
+ TRACE_ENTER(FCGI_EV_FCONN_SEND, conn);
+
+ if (fconn->flags & (FCGI_CF_ERROR|FCGI_CF_ERR_PENDING)) {
+ TRACE_DEVEL("leaving on connection error", FCGI_EV_FCONN_SEND, conn);
+ if (fconn->flags & FCGI_CF_EOS)
+ fconn->flags |= FCGI_CF_ERROR;
+ b_reset(br_tail(fconn->mbuf));
+ return 1;
+ }
+
+
+ if (conn->flags & CO_FL_WAIT_XPRT) {
+ /* a handshake was requested */
+ goto schedule;
+ }
+
+ /* This loop is quite simple : it tries to fill as much as it can from
+ * pending streams into the existing buffer until it's reportedly full
+ * or the end of send requests is reached. Then it tries to send this
+ * buffer's contents out, marks it not full if at least one byte could
+ * be sent, and tries again.
+ *
+ * The snd_buf() function normally takes a "flags" argument which may
+ * be made of a combination of CO_SFL_MSG_MORE to indicate that more
+ * data immediately comes and CO_SFL_STREAMER to indicate that the
+ * connection is streaming lots of data (used to increase TLS record
+ * size at the expense of latency). The former can be sent any time
+ * there's a buffer full flag, as it indicates at least one stream
+ * attempted to send and failed so there are pending data. An
+ * alternative would be to set it as long as there's an active stream
+ * but that would be problematic for ACKs until we have an absolute
+ * guarantee that all waiters have at least one byte to send. The
+ * latter should possibly not be set for now.
+ */
+
+ done = 0;
+ while (!done) {
+ unsigned int flags = 0;
+ unsigned int released = 0;
+ struct buffer *buf;
+
+ /* fill as much as we can into the current buffer */
+ while (((fconn->flags & (FCGI_CF_MUX_MFULL|FCGI_CF_MUX_MALLOC)) == 0) && !done)
+ done = fcgi_process_mux(fconn);
+
+ if (fconn->flags & FCGI_CF_MUX_MALLOC)
+ done = 1; // we won't go further without extra buffers
+
+ if (conn->flags & CO_FL_ERROR)
+ break;
+
+ if (fconn->flags & (FCGI_CF_MUX_MFULL | FCGI_CF_DEM_MROOM))
+ flags |= CO_SFL_MSG_MORE;
+
+ for (buf = br_head(fconn->mbuf); b_size(buf); buf = br_del_head(fconn->mbuf)) {
+ if (b_data(buf)) {
+ int ret;
+
+ ret = conn->xprt->snd_buf(conn, conn->xprt_ctx, buf, b_data(buf), flags);
+ if (!ret) {
+ done = 1;
+ break;
+ }
+ sent = 1;
+ TRACE_DATA("send data", FCGI_EV_FCONN_SEND, conn, 0, 0, (size_t[]){ret});
+ b_del(buf, ret);
+ if (b_data(buf)) {
+ done = 1;
+ break;
+ }
+ }
+ b_free(buf);
+ released++;
+ }
+
+ if (released)
+ offer_buffers(NULL, released);
+
+ /* wrote at least one byte, the buffer is not full anymore */
+ if (fconn->flags & (FCGI_CF_MUX_MFULL | FCGI_CF_DEM_MROOM))
+ TRACE_STATE("fconn mbuf ring not fill anymore", FCGI_EV_FCONN_SEND|FCGI_EV_FCONN_BLK, conn);
+ fconn->flags &= ~(FCGI_CF_MUX_MFULL | FCGI_CF_DEM_MROOM);
+ }
+
+ if (conn->flags & CO_FL_ERROR) {
+ fconn->flags |= FCGI_CF_ERR_PENDING;
+ if (fconn->flags & FCGI_CF_EOS)
+ fconn->flags |= FCGI_CF_ERROR;
+ b_reset(br_tail(fconn->mbuf));
+ }
+
+ /* We're not full anymore, so we can wake any task that are waiting
+ * for us.
+ */
+ if (!(fconn->flags & (FCGI_CF_MUX_MFULL | FCGI_CF_DEM_MROOM)) && fconn->state >= FCGI_CS_RECORD_H) {
+ struct fcgi_strm *fstrm;
+
+ list_for_each_entry(fstrm, &fconn->send_list, send_list) {
+ if (fconn->state == FCGI_CS_CLOSED || fconn->flags & FCGI_CF_MUX_BLOCK_ANY)
+ break;
+
+ if (fstrm->flags & FCGI_SF_NOTIFIED)
+ continue;
+
+ /* If the sender changed his mind and unsubscribed, let's just
+ * remove the stream from the send_list.
+ */
+ if (!(fstrm->flags & (FCGI_SF_WANT_SHUTR|FCGI_SF_WANT_SHUTW)) &&
+ (!fstrm->subs || !(fstrm->subs->events & SUB_RETRY_SEND))) {
+ LIST_DEL_INIT(&fstrm->send_list);
+ continue;
+ }
+
+ if (fstrm->subs && fstrm->subs->events & SUB_RETRY_SEND) {
+ TRACE_DEVEL("waking up pending stream", FCGI_EV_FCONN_SEND|FCGI_EV_STRM_WAKE, conn, fstrm);
+ fstrm->flags &= ~FCGI_SF_BLK_ANY;
+ fstrm->flags |= FCGI_SF_NOTIFIED;
+ tasklet_wakeup(fstrm->subs->tasklet);
+ fstrm->subs->events &= ~SUB_RETRY_SEND;
+ if (!fstrm->subs->events)
+ fstrm->subs = NULL;
+ } else {
+ /* it's the shut request that was queued */
+ TRACE_POINT(FCGI_EV_STRM_WAKE, fconn->conn, fstrm);
+ tasklet_wakeup(fstrm->shut_tl);
+ }
+ }
+ }
+ /* We're done, no more to send */
+ if (!br_data(fconn->mbuf)) {
+ TRACE_DEVEL("leaving with everything sent", FCGI_EV_FCONN_SEND, conn);
+ goto end;
+ }
+schedule:
+ if (!(conn->flags & CO_FL_ERROR) && !(fconn->wait_event.events & SUB_RETRY_SEND)) {
+ TRACE_STATE("more data to send, subscribing", FCGI_EV_FCONN_SEND, conn);
+ conn->xprt->subscribe(conn, conn->xprt_ctx, SUB_RETRY_SEND, &fconn->wait_event);
+ }
+
+ TRACE_DEVEL("leaving with some data left to send", FCGI_EV_FCONN_SEND, conn);
+end:
+ return sent || (fconn->flags & (FCGI_CF_ERR_PENDING|FCGI_CF_ERROR));
+}
+
+/* this is the tasklet referenced in fconn->wait_event.tasklet */
+struct task *fcgi_io_cb(struct task *t, void *ctx, unsigned int state)
+{
+ struct connection *conn;
+ struct fcgi_conn *fconn = ctx;
+ struct tasklet *tl = (struct tasklet *)t;
+ int conn_in_list;
+ int ret = 0;
+
+ if (state & TASK_F_USR1) {
+ /* the tasklet was idling on an idle connection, it might have
+ * been stolen, let's be careful!
+ */
+ HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
+ if (tl->context == NULL) {
+ /* The connection has been taken over by another thread,
+ * we're no longer responsible for it, so just free the
+ * tasklet, and do nothing.
+ */
+ HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
+ tasklet_free(tl);
+ return NULL;
+ }
+ conn = fconn->conn;
+ TRACE_POINT(FCGI_EV_FCONN_WAKE, conn);
+
+ conn_in_list = conn->flags & CO_FL_LIST_MASK;
+ if (conn_in_list)
+ conn_delete_from_tree(conn);
+
+ HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
+ } else {
+ /* we're certain the connection was not in an idle list */
+ conn = fconn->conn;
+ TRACE_ENTER(FCGI_EV_FCONN_WAKE, conn);
+ conn_in_list = 0;
+ }
+
+ if (!(fconn->wait_event.events & SUB_RETRY_SEND))
+ ret = fcgi_send(fconn);
+ if (!(fconn->wait_event.events & SUB_RETRY_RECV))
+ ret |= fcgi_recv(fconn);
+ if (ret || b_data(&fconn->dbuf))
+ ret = fcgi_process(fconn);
+
+ /* If we were in an idle list, we want to add it back into it,
+ * unless fcgi_process() returned -1, which mean it has destroyed
+ * the connection (testing !ret is enough, if fcgi_process() wasn't
+ * called then ret will be 0 anyway.
+ */
+ if (ret < 0)
+ t = NULL;
+
+ if (!ret && conn_in_list) {
+ struct server *srv = objt_server(conn->target);
+
+ HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
+ _srv_add_idle(srv, conn, conn_in_list == CO_FL_SAFE_LIST);
+ HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
+ }
+ return t;
+}
+
+/* callback called on any event by the connection handler.
+ * It applies changes and returns zero, or < 0 if it wants immediate
+ * destruction of the connection (which normally doesn not happen in FCGI).
+ */
+static int fcgi_process(struct fcgi_conn *fconn)
+{
+ struct connection *conn = fconn->conn;
+
+ TRACE_POINT(FCGI_EV_FCONN_WAKE, conn);
+
+ if (b_data(&fconn->dbuf) && !(fconn->flags & FCGI_CF_DEM_BLOCK_ANY)) {
+ fcgi_process_demux(fconn);
+
+ if (fconn->state == FCGI_CS_CLOSED || (fconn->flags & FCGI_CF_ERROR))
+ b_reset(&fconn->dbuf);
+
+ if (buf_room_for_htx_data(&fconn->dbuf))
+ fconn->flags &= ~FCGI_CF_DEM_DFULL;
+ }
+ fcgi_send(fconn);
+
+ if (unlikely(fconn->proxy->flags & (PR_FL_DISABLED|PR_FL_STOPPED))) {
+ /* frontend is stopping, reload likely in progress, let's try
+ * to announce a graceful shutdown if not yet done. We don't
+ * care if it fails, it will be tried again later.
+ */
+ TRACE_STATE("proxy stopped, sending ABORT to all streams", FCGI_EV_FCONN_WAKE|FCGI_EV_TX_RECORD, conn);
+ if (!(fconn->flags & (FCGI_CF_ABRTS_SENT|FCGI_CF_ABRTS_FAILED))) {
+ if (fconn->stream_cnt - fconn->nb_reserved > 0)
+ fcgi_conn_send_aborts(fconn);
+ }
+ }
+
+ /*
+ * If we received early data, and the handshake is done, wake
+ * any stream that was waiting for it.
+ */
+ if (!(fconn->flags & FCGI_CF_WAIT_FOR_HS) &&
+ (conn->flags & (CO_FL_EARLY_SSL_HS | CO_FL_WAIT_XPRT | CO_FL_EARLY_DATA)) == CO_FL_EARLY_DATA) {
+ struct eb32_node *node;
+ struct fcgi_strm *fstrm;
+
+ fconn->flags |= FCGI_CF_WAIT_FOR_HS;
+ node = eb32_lookup_ge(&fconn->streams_by_id, 1);
+
+ while (node) {
+ fstrm = container_of(node, struct fcgi_strm, by_id);
+ if (fcgi_strm_sc(fstrm) && se_fl_test(fstrm->sd, SE_FL_WAIT_FOR_HS))
+ fcgi_strm_notify_recv(fstrm);
+ node = eb32_next(node);
+ }
+ }
+
+ if ((fconn->flags & FCGI_CF_ERROR) || fcgi_conn_read0_pending(fconn) ||
+ fconn->state == FCGI_CS_CLOSED || (fconn->flags & FCGI_CF_ABRTS_FAILED) ||
+ eb_is_empty(&fconn->streams_by_id)) {
+ fcgi_wake_some_streams(fconn, 0);
+
+ if (eb_is_empty(&fconn->streams_by_id)) {
+ /* no more stream, kill the connection now */
+ fcgi_release(fconn);
+ TRACE_DEVEL("leaving after releasing the connection", FCGI_EV_FCONN_WAKE);
+ return -1;
+ }
+ }
+
+ if (!b_data(&fconn->dbuf))
+ fcgi_release_buf(fconn, &fconn->dbuf);
+
+ if (fconn->state == FCGI_CS_CLOSED || (fconn->flags & FCGI_CF_ABRTS_FAILED) ||
+ (!br_data(fconn->mbuf) && ((fconn->flags & FCGI_CF_MUX_BLOCK_ANY) || LIST_ISEMPTY(&fconn->send_list))))
+ fcgi_release_mbuf(fconn);
+
+ if (fconn->task) {
+ fconn->task->expire = tick_add(now_ms, (fconn->state == FCGI_CS_CLOSED ? fconn->shut_timeout : fconn->timeout));
+ task_queue(fconn->task);
+ }
+
+ fcgi_send(fconn);
+ TRACE_LEAVE(FCGI_EV_FCONN_WAKE, conn);
+ return 0;
+}
+
+
+/* wake-up function called by the connection layer (mux_ops.wake) */
+static int fcgi_wake(struct connection *conn)
+{
+ struct fcgi_conn *fconn = conn->ctx;
+
+ TRACE_POINT(FCGI_EV_FCONN_WAKE, conn);
+ return (fcgi_process(fconn));
+}
+
+
+static int fcgi_ctl(struct connection *conn, enum mux_ctl_type mux_ctl, void *output)
+{
+ int ret = 0;
+ switch (mux_ctl) {
+ case MUX_CTL_STATUS:
+ if (!(conn->flags & CO_FL_WAIT_XPRT))
+ ret |= MUX_STATUS_READY;
+ return ret;
+ case MUX_CTL_EXIT_STATUS:
+ return MUX_ES_UNKNOWN;
+ default:
+ return -1;
+ }
+}
+
+static int fcgi_sctl(struct stconn *sc, enum mux_sctl_type mux_sctl, void *output)
+{
+ int ret = 0;
+ struct fcgi_strm *fstrm = __sc_mux_strm(sc);
+
+ switch (mux_sctl) {
+ case MUX_SCTL_SID:
+ if (output)
+ *((int64_t *)output) = fstrm->id;
+ return ret;
+
+ default:
+ return -1;
+ }
+}
+
+/* Connection timeout management. The principle is that if there's no receipt
+ * nor sending for a certain amount of time, the connection is closed. If the
+ * MUX buffer still has lying data or is not allocatable, the connection is
+ * immediately killed. If it's allocatable and empty, we attempt to send a
+ * ABORT records.
+ */
+struct task *fcgi_timeout_task(struct task *t, void *context, unsigned int state)
+{
+ struct fcgi_conn *fconn = context;
+ int expired = tick_is_expired(t->expire, now_ms);
+
+ TRACE_ENTER(FCGI_EV_FCONN_WAKE, (fconn ? fconn->conn : NULL));
+
+ if (fconn) {
+ HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
+
+ /* Somebody already stole the connection from us, so we should not
+ * free it, we just have to free the task.
+ */
+ if (!t->context) {
+ HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
+ fconn = NULL;
+ goto do_leave;
+ }
+
+ if (!expired) {
+ HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
+ TRACE_DEVEL("leaving (not expired)", FCGI_EV_FCONN_WAKE, fconn->conn);
+ return t;
+ }
+
+ /* We're about to destroy the connection, so make sure nobody attempts
+ * to steal it from us.
+ */
+ if (fconn->conn->flags & CO_FL_LIST_MASK)
+ conn_delete_from_tree(fconn->conn);
+
+ HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
+ }
+
+do_leave:
+ task_destroy(t);
+
+ if (!fconn) {
+ /* resources were already deleted */
+ TRACE_DEVEL("leaving (not more fconn)", FCGI_EV_FCONN_WAKE);
+ return NULL;
+ }
+
+ fconn->task = NULL;
+ fconn->state = FCGI_CS_CLOSED;
+ fcgi_wake_some_streams(fconn, 0);
+
+ if (br_data(fconn->mbuf)) {
+ /* don't even try to send aborts, the buffer is stuck */
+ fconn->flags |= FCGI_CF_ABRTS_FAILED;
+ goto end;
+ }
+
+ /* try to send but no need to insist */
+ if (!fcgi_conn_send_aborts(fconn))
+ fconn->flags |= FCGI_CF_ABRTS_FAILED;
+
+ if (br_data(fconn->mbuf) && !(fconn->flags & FCGI_CF_ABRTS_FAILED) &&
+ conn_xprt_ready(fconn->conn)) {
+ unsigned int released = 0;
+ struct buffer *buf;
+
+ for (buf = br_head(fconn->mbuf); b_size(buf); buf = br_del_head(fconn->mbuf)) {
+ if (b_data(buf)) {
+ int ret = fconn->conn->xprt->snd_buf(fconn->conn, fconn->conn->xprt_ctx,
+ buf, b_data(buf), 0);
+ if (!ret)
+ break;
+ b_del(buf, ret);
+ if (b_data(buf))
+ break;
+ b_free(buf);
+ released++;
+ }
+ }
+
+ if (released)
+ offer_buffers(NULL, released);
+ }
+
+ end:
+ /* either we can release everything now or it will be done later once
+ * the last stream closes.
+ */
+ if (eb_is_empty(&fconn->streams_by_id))
+ fcgi_release(fconn);
+
+ TRACE_LEAVE(FCGI_EV_FCONN_WAKE);
+ return NULL;
+}
+
+
+/*******************************************/
+/* functions below are used by the streams */
+/*******************************************/
+
+/* Append the description of what is present in error snapshot <es> into <out>.
+ * The description must be small enough to always fit in a buffer. The output
+ * buffer may be the trash so the trash must not be used inside this function.
+ */
+static void fcgi_show_error_snapshot(struct buffer *out, const struct error_snapshot *es)
+{
+ chunk_appendf(out,
+ " FCGI connection flags 0x%08x, FCGI stream flags 0x%08x\n"
+ " H1 msg state %s(%d), H1 msg flags 0x%08x\n"
+ " H1 chunk len %lld bytes, H1 body len %lld bytes :\n",
+ es->ctx.h1.c_flags, es->ctx.h1.s_flags,
+ h1m_state_str(es->ctx.h1.state), es->ctx.h1.state,
+ es->ctx.h1.m_flags, es->ctx.h1.m_clen, es->ctx.h1.m_blen);
+}
+/*
+ * Capture a bad response and archive it in the proxy's structure. By default
+ * it tries to report the error position as h1m->err_pos. However if this one is
+ * not set, it will then report h1m->next, which is the last known parsing
+ * point. The function is able to deal with wrapping buffers. It always displays
+ * buffers as a contiguous area starting at buf->p. The direction is determined
+ * thanks to the h1m's flags.
+ */
+static void fcgi_strm_capture_bad_message(struct fcgi_conn *fconn, struct fcgi_strm *fstrm,
+ struct h1m *h1m, struct buffer *buf)
+{
+ struct session *sess = fstrm->sess;
+ struct proxy *proxy = fconn->proxy;
+ struct proxy *other_end;
+ union error_snapshot_ctx ctx;
+
+ if (fcgi_strm_sc(fstrm) && sc_strm(fcgi_strm_sc(fstrm))) {
+ if (sess == NULL)
+ sess = __sc_strm(fcgi_strm_sc(fstrm))->sess;
+ if (!(h1m->flags & H1_MF_RESP))
+ other_end = __sc_strm(fcgi_strm_sc(fstrm))->be;
+ else
+ other_end = sess->fe;
+ } else
+ other_end = NULL;
+ /* http-specific part now */
+ ctx.h1.state = h1m->state;
+ ctx.h1.c_flags = fconn->flags;
+ ctx.h1.s_flags = fstrm->flags;
+ ctx.h1.m_flags = h1m->flags;
+ ctx.h1.m_clen = h1m->curr_len;
+ ctx.h1.m_blen = h1m->body_len;
+
+ proxy_capture_error(proxy, 1, other_end, fconn->conn->target, sess, buf, 0, 0,
+ (h1m->err_pos >= 0) ? h1m->err_pos : h1m->next,
+ &ctx, fcgi_show_error_snapshot);
+}
+
+static size_t fcgi_strm_parse_headers(struct fcgi_strm *fstrm, struct h1m *h1m, struct htx *htx,
+ struct buffer *buf, size_t *ofs, size_t max)
+{
+ int ret;
+
+ TRACE_ENTER(FCGI_EV_RSP_DATA|FCGI_EV_RSP_HDRS, fstrm->fconn->conn, fstrm, 0, (size_t[]){max});
+ ret = h1_parse_msg_hdrs(h1m, NULL, htx, buf, *ofs, max);
+ if (ret <= 0) {
+ TRACE_DEVEL("leaving on missing data or error", FCGI_EV_RSP_DATA|FCGI_EV_RSP_HDRS, fstrm->fconn->conn, fstrm);
+ if (htx->flags & HTX_FL_PARSING_ERROR) {
+ TRACE_ERROR("parsing error, reject H1 response", FCGI_EV_RSP_DATA|FCGI_EV_RSP_HDRS|FCGI_EV_FSTRM_ERR, fstrm->fconn->conn, fstrm);
+ fcgi_strm_error(fstrm);
+ fcgi_strm_capture_bad_message(fstrm->fconn, fstrm, h1m, buf);
+ }
+ ret = 0;
+ goto end;
+ }
+
+ /* Reject any message with an unknown transfer-encoding. In fact if any
+ * encoding other than "chunked". A 422-Unprocessable-Content is
+ * returned for an invalid request, a 502-Bad-Gateway for an invalid
+ * response.
+ */
+ if (h1m->flags & H1_MF_TE_OTHER) {
+ htx->flags |= HTX_FL_PARSING_ERROR;
+ TRACE_ERROR("Unknown transfer-encoding", FCGI_EV_RSP_DATA|FCGI_EV_RSP_HDRS|FCGI_EV_FSTRM_ERR, fstrm->fconn->conn, fstrm);
+ fcgi_strm_error(fstrm);
+ fcgi_strm_capture_bad_message(fstrm->fconn, fstrm, h1m, buf);
+ ret = 0;
+ goto end;
+ }
+
+ *ofs += ret;
+ end:
+ TRACE_LEAVE(FCGI_EV_RSP_DATA|FCGI_EV_RSP_HDRS, fstrm->fconn->conn, fstrm, 0, (size_t[]){ret});
+ return ret;
+
+}
+
+static size_t fcgi_strm_parse_data(struct fcgi_strm *fstrm, struct h1m *h1m, struct htx **htx,
+ struct buffer *buf, size_t *ofs, size_t max, struct buffer *htxbuf)
+{
+ size_t ret;
+
+ TRACE_ENTER(FCGI_EV_RSP_DATA|FCGI_EV_RSP_BODY, fstrm->fconn->conn, fstrm, 0, (size_t[]){max});
+ ret = h1_parse_msg_data(h1m, htx, buf, *ofs, max, htxbuf);
+ if (!ret) {
+ TRACE_DEVEL("leaving on missing data or error", FCGI_EV_RSP_DATA|FCGI_EV_RSP_BODY, fstrm->fconn->conn, fstrm);
+ if ((*htx)->flags & HTX_FL_PARSING_ERROR) {
+ TRACE_ERROR("parsing error, reject H1 response", FCGI_EV_RSP_DATA|FCGI_EV_RSP_BODY|FCGI_EV_FSTRM_ERR, fstrm->fconn->conn, fstrm);
+ fcgi_strm_error(fstrm);
+ fcgi_strm_capture_bad_message(fstrm->fconn, fstrm, h1m, buf);
+ }
+ goto end;
+ }
+ *ofs += ret;
+ end:
+ TRACE_LEAVE(FCGI_EV_RSP_DATA|FCGI_EV_RSP_BODY, fstrm->fconn->conn, fstrm, 0, (size_t[]){ret});
+ return ret;
+}
+
+static size_t fcgi_strm_parse_trailers(struct fcgi_strm *fstrm, struct h1m *h1m, struct htx *htx,
+ struct buffer *buf, size_t *ofs, size_t max)
+{
+ int ret;
+
+ TRACE_ENTER(FCGI_EV_RSP_DATA|FCGI_EV_RSP_TLRS, fstrm->fconn->conn, fstrm, 0, (size_t[]){max});
+ ret = h1_parse_msg_tlrs(h1m, htx, buf, *ofs, max);
+ if (ret <= 0) {
+ TRACE_DEVEL("leaving on missing data or error", FCGI_EV_RSP_DATA|FCGI_EV_RSP_TLRS, fstrm->fconn->conn, fstrm);
+ if (htx->flags & HTX_FL_PARSING_ERROR) {
+ TRACE_ERROR("parsing error, reject H1 response", FCGI_EV_RSP_DATA|FCGI_EV_RSP_TLRS|FCGI_EV_FSTRM_ERR, fstrm->fconn->conn, fstrm);
+ fcgi_strm_error(fstrm);
+ fcgi_strm_capture_bad_message(fstrm->fconn, fstrm, h1m, buf);
+ }
+ ret = 0;
+ goto end;
+ }
+ *ofs += ret;
+ end:
+ TRACE_LEAVE(FCGI_EV_RSP_DATA|FCGI_EV_RSP_TLRS, fstrm->fconn->conn, fstrm, 0, (size_t[]){ret});
+ return ret;
+}
+
+static size_t fcgi_strm_parse_response(struct fcgi_strm *fstrm, struct buffer *buf, size_t count)
+{
+ struct fcgi_conn *fconn = fstrm->fconn;
+ struct htx *htx;
+ struct h1m *h1m = &fstrm->h1m;
+ size_t ret, data, total = 0;
+
+ htx = htx_from_buf(buf);
+ TRACE_ENTER(FCGI_EV_RSP_DATA, fconn->conn, fstrm, htx, (size_t[]){count});
+
+ data = htx->data;
+ if (fstrm->state == FCGI_SS_ERROR)
+ goto end;
+
+ do {
+ size_t used = htx_used_space(htx);
+
+ if (h1m->state <= H1_MSG_LAST_LF) {
+ TRACE_PROTO("parsing response headers", FCGI_EV_RSP_DATA|FCGI_EV_RSP_HDRS, fconn->conn, fstrm);
+ ret = fcgi_strm_parse_headers(fstrm, h1m, htx, &fstrm->rxbuf, &total, count);
+ if (!ret)
+ break;
+
+ TRACE_USER("rcvd H1 response headers", FCGI_EV_RSP_DATA|FCGI_EV_RSP_HDRS, fconn->conn, fstrm, htx);
+
+ if ((h1m->flags & (H1_MF_VER_11|H1_MF_XFER_LEN)) == H1_MF_VER_11) {
+ struct htx_blk *blk = htx_get_head_blk(htx);
+ struct htx_sl *sl;
+
+ if (!blk)
+ break;
+ sl = htx_get_blk_ptr(htx, blk);
+ sl->flags |= HTX_SL_F_XFER_LEN;
+ htx->extra = 0;
+ }
+ }
+ else if (h1m->state < H1_MSG_TRAILERS) {
+ TRACE_PROTO("parsing response payload", FCGI_EV_RSP_DATA|FCGI_EV_RSP_BODY, fconn->conn, fstrm);
+ fcgi_strm_parse_data(fstrm, h1m, &htx, &fstrm->rxbuf, &total, count, buf);
+
+ if (!(h1m->flags & H1_MF_XFER_LEN) && fstrm->state != FCGI_SS_ERROR &&
+ (fstrm->flags & FCGI_SF_ES_RCVD) && b_data(&fstrm->rxbuf) == total) {
+ TRACE_DEVEL("end of data", FCGI_EV_RSP_DATA, fconn->conn, fstrm);
+ if (htx_is_empty(htx) && !htx_add_endof(htx, HTX_BLK_EOT))
+ break;
+ htx->flags |= HTX_FL_EOM;
+ h1m->state = H1_MSG_DONE;
+ TRACE_USER("H1 response fully rcvd", FCGI_EV_RSP_DATA|FCGI_EV_RSP_EOM, fconn->conn, fstrm, htx);
+ }
+
+ if (h1m->state < H1_MSG_TRAILERS)
+ break;
+
+ TRACE_PROTO("rcvd response payload data", FCGI_EV_RSP_DATA|FCGI_EV_RSP_BODY, fconn->conn, fstrm, htx);
+ }
+ else if (h1m->state == H1_MSG_TRAILERS) {
+ TRACE_PROTO("parsing response trailers", FCGI_EV_RSP_DATA|FCGI_EV_RSP_TLRS, fconn->conn, fstrm);
+ fcgi_strm_parse_trailers(fstrm, h1m, htx, &fstrm->rxbuf, &total, count);
+ if (h1m->state != H1_MSG_DONE)
+ break;
+
+ TRACE_PROTO("rcvd H1 response trailers", FCGI_EV_RSP_DATA|FCGI_EV_RSP_TLRS, fconn->conn, fstrm, htx);
+ }
+ else if (h1m->state == H1_MSG_DONE) {
+ TRACE_USER("H1 response fully rcvd", FCGI_EV_RSP_DATA|FCGI_EV_RSP_EOM, fconn->conn, fstrm, htx);
+ if (b_data(&fstrm->rxbuf) > total) {
+ htx->flags |= HTX_FL_PARSING_ERROR;
+ TRACE_PROTO("too much data, parsing error", FCGI_EV_RSP_DATA, fconn->conn, fstrm);
+ fcgi_strm_error(fstrm);
+ }
+ break;
+ }
+ else {
+ htx->flags |= HTX_FL_PROCESSING_ERROR;
+ TRACE_ERROR("unexpected processing error", FCGI_EV_RSP_DATA|FCGI_EV_STRM_ERR, fconn->conn, fstrm);
+ fcgi_strm_error(fstrm);
+ break;
+ }
+
+ count -= htx_used_space(htx) - used;
+ } while (fstrm->state != FCGI_SS_ERROR);
+
+ if (fstrm->state == FCGI_SS_ERROR) {
+ b_reset(&fstrm->rxbuf);
+ htx_to_buf(htx, buf);
+ TRACE_DEVEL("leaving on error", FCGI_EV_RSP_DATA|FCGI_EV_STRM_ERR, fconn->conn, fstrm);
+ return 0;
+ }
+
+ b_del(&fstrm->rxbuf, total);
+
+ end:
+ htx_to_buf(htx, buf);
+ ret = htx->data - data;
+ TRACE_LEAVE(FCGI_EV_RSP_DATA, fconn->conn, fstrm, htx, (size_t[]){ret});
+ return ret;
+}
+
+/*
+ * Attach a new stream to a connection
+ * (Used for outgoing connections)
+ */
+static int fcgi_attach(struct connection *conn, struct sedesc *sd, struct session *sess)
+{
+ struct fcgi_strm *fstrm;
+ struct fcgi_conn *fconn = conn->ctx;
+
+ TRACE_ENTER(FCGI_EV_FSTRM_NEW, conn);
+ fstrm = fcgi_stconn_new(fconn, sd->sc, sess);
+ if (!fstrm)
+ goto err;
+
+ /* the connection is not idle anymore, let's mark this */
+ HA_ATOMIC_AND(&fconn->wait_event.tasklet->state, ~TASK_F_USR1);
+ xprt_set_used(conn, conn->xprt, conn->xprt_ctx);
+
+ TRACE_LEAVE(FCGI_EV_FSTRM_NEW, conn, fstrm);
+ return 0;
+
+ err:
+ TRACE_DEVEL("leaving on error", FCGI_EV_FSTRM_NEW|FCGI_EV_FSTRM_ERR, conn);
+ return -1;
+}
+
+/* Retrieves the first valid stream connector from this connection, or returns NULL.
+ * We have to scan because we may have some orphan streams. It might be
+ * beneficial to scan backwards from the end to reduce the likeliness to find
+ * orphans.
+ */
+static struct stconn *fcgi_get_first_sc(const struct connection *conn)
+{
+ struct fcgi_conn *fconn = conn->ctx;
+ struct fcgi_strm *fstrm;
+ struct eb32_node *node;
+
+ node = eb32_first(&fconn->streams_by_id);
+ while (node) {
+ fstrm = container_of(node, struct fcgi_strm, by_id);
+ if (fcgi_strm_sc(fstrm))
+ return fcgi_strm_sc(fstrm);
+ node = eb32_next(node);
+ }
+ return NULL;
+}
+
+/*
+ * Destroy the mux and the associated connection, if it is no longer used
+ */
+static void fcgi_destroy(void *ctx)
+{
+ struct fcgi_conn *fconn = ctx;
+
+ TRACE_POINT(FCGI_EV_FCONN_END, fconn->conn);
+ if (eb_is_empty(&fconn->streams_by_id)) {
+ BUG_ON(fconn->conn->ctx != fconn);
+ fcgi_release(fconn);
+ }
+}
+
+/*
+ * Detach the stream from the connection and possibly release the connection.
+ */
+static void fcgi_detach(struct sedesc *sd)
+{
+ struct fcgi_strm *fstrm = sd->se;
+ struct fcgi_conn *fconn;
+ struct session *sess;
+
+ TRACE_ENTER(FCGI_EV_STRM_END, (fstrm ? fstrm->fconn->conn : NULL), fstrm);
+
+ if (!fstrm) {
+ TRACE_LEAVE(FCGI_EV_STRM_END);
+ return;
+ }
+
+ /* there's no txbuf so we're certain no to be able to send anything */
+ fstrm->flags &= ~FCGI_SF_NOTIFIED;
+
+ sess = fstrm->sess;
+ fconn = fstrm->fconn;
+ fconn->nb_sc--;
+
+ if (fstrm->proto_status == FCGI_PS_CANT_MPX_CONN) {
+ fconn->flags &= ~FCGI_CF_MPXS_CONNS;
+ fconn->streams_limit = 1;
+ }
+ else if (fstrm->proto_status == FCGI_PS_OVERLOADED ||
+ fstrm->proto_status == FCGI_PS_UNKNOWN_ROLE) {
+ fconn->flags &= ~FCGI_CF_KEEP_CONN;
+ fconn->state = FCGI_CS_CLOSED;
+ }
+
+ /* this stream may be blocked waiting for some data to leave, so orphan
+ * it in this case.
+ */
+ if (!(fconn->flags & (FCGI_CF_ERR_PENDING|FCGI_CF_ERROR)) && // FIXME: Be sure for ERR_PENDING
+ (fconn->state != FCGI_CS_CLOSED) &&
+ (fstrm->flags & (FCGI_SF_BLK_MBUSY|FCGI_SF_BLK_MROOM)) &&
+ (fstrm->subs || (fstrm->flags & (FCGI_SF_WANT_SHUTR|FCGI_SF_WANT_SHUTW)))) {
+ TRACE_DEVEL("leaving on stream blocked", FCGI_EV_STRM_END|FCGI_EV_FSTRM_BLK, fconn->conn, fstrm);
+ return;
+ }
+
+ if ((fconn->flags & FCGI_CF_DEM_BLOCK_ANY && fstrm->id == fconn->dsi)) {
+ /* unblock the connection if it was blocked on this stream. */
+ fconn->flags &= ~FCGI_CF_DEM_BLOCK_ANY;
+ fcgi_conn_restart_reading(fconn, 1);
+ }
+
+ fcgi_strm_destroy(fstrm);
+
+ if (!(fconn->flags & (FCGI_CF_EOS|FCGI_CF_ERR_PENDING|FCGI_CF_ERROR)) &&
+ (fconn->flags & FCGI_CF_KEEP_CONN)) {
+ if (fconn->conn->flags & CO_FL_PRIVATE) {
+ /* Add the connection in the session serverlist, if not already done */
+ if (!session_add_conn(sess, fconn->conn, fconn->conn->target)) {
+ fconn->conn->owner = NULL;
+ if (eb_is_empty(&fconn->streams_by_id)) {
+ /* let's kill the connection right away */
+ fconn->conn->mux->destroy(fconn);
+ TRACE_DEVEL("outgoing connection killed", FCGI_EV_STRM_END|FCGI_EV_FCONN_ERR);
+ return;
+ }
+ }
+ if (eb_is_empty(&fconn->streams_by_id)) {
+ if (session_check_idle_conn(fconn->conn->owner, fconn->conn) != 0) {
+ /* The connection is destroyed, let's leave */
+ TRACE_DEVEL("outgoing connection killed", FCGI_EV_STRM_END|FCGI_EV_FCONN_ERR);
+ return;
+ }
+ }
+ }
+ else {
+ if (eb_is_empty(&fconn->streams_by_id)) {
+ /* If the connection is owned by the session, first remove it
+ * from its list
+ */
+ if (fconn->conn->owner) {
+ session_unown_conn(fconn->conn->owner, fconn->conn);
+ fconn->conn->owner = NULL;
+ }
+
+ /* mark that the tasklet may lose its context to another thread and
+ * that the handler needs to check it under the idle conns lock.
+ */
+ HA_ATOMIC_OR(&fconn->wait_event.tasklet->state, TASK_F_USR1);
+ xprt_set_idle(fconn->conn, fconn->conn->xprt, fconn->conn->xprt_ctx);
+
+ if (!srv_add_to_idle_list(objt_server(fconn->conn->target), fconn->conn, 1)) {
+ /* The server doesn't want it, let's kill the connection right away */
+ fconn->conn->mux->destroy(fconn);
+ TRACE_DEVEL("outgoing connection killed", FCGI_EV_STRM_END|FCGI_EV_FCONN_ERR);
+ return;
+ }
+ /* At this point, the connection has been added to the
+ * server idle list, so another thread may already have
+ * hijacked it, so we can't do anything with it.
+ */
+ TRACE_DEVEL("reusable idle connection", FCGI_EV_STRM_END, fconn->conn);
+ return;
+ }
+ else if (!fconn->conn->hash_node->node.node.leaf_p &&
+ fcgi_avail_streams(fconn->conn) > 0 && objt_server(fconn->conn->target) &&
+ !LIST_INLIST(&fconn->conn->session_list)) {
+ srv_add_to_avail_list(__objt_server(fconn->conn->target), fconn->conn);
+ }
+ }
+ }
+
+ /* We don't want to close right now unless we're removing the last
+ * stream and the connection is in error.
+ */
+ if (fcgi_conn_is_dead(fconn)) {
+ /* no more stream will come, kill it now */
+ TRACE_DEVEL("leaving, killing dead connection", FCGI_EV_STRM_END, fconn->conn);
+ fcgi_release(fconn);
+ }
+ else if (fconn->task) {
+ fconn->task->expire = tick_add(now_ms, (fconn->state == FCGI_CS_CLOSED ? fconn->shut_timeout : fconn->timeout));
+ task_queue(fconn->task);
+ TRACE_DEVEL("leaving, refreshing connection's timeout", FCGI_EV_STRM_END, fconn->conn);
+ }
+ else
+ TRACE_DEVEL("leaving", FCGI_EV_STRM_END, fconn->conn);
+}
+
+
+/* Performs a synchronous or asynchronous shutr(). */
+static void fcgi_do_shutr(struct fcgi_strm *fstrm)
+{
+ struct fcgi_conn *fconn = fstrm->fconn;
+
+ TRACE_ENTER(FCGI_EV_STRM_SHUT, fconn->conn, fstrm);
+
+ if (fstrm->state == FCGI_SS_CLOSED)
+ goto done;
+
+ /* a connstream may require us to immediately kill the whole connection
+ * for example because of a "tcp-request content reject" rule that is
+ * normally used to limit abuse.
+ */
+ if (se_fl_test(fstrm->sd, SE_FL_KILL_CONN) &&
+ !(fconn->flags & (FCGI_CF_ABRTS_SENT|FCGI_CF_ABRTS_FAILED))) {
+ TRACE_STATE("stream wants to kill the connection", FCGI_EV_STRM_SHUT, fconn->conn, fstrm);
+ fconn->state = FCGI_CS_CLOSED;
+ }
+ else if (fstrm->flags & FCGI_SF_BEGIN_SENT) {
+ TRACE_STATE("no headers sent yet, trying a retryable abort", FCGI_EV_STRM_SHUT, fconn->conn, fstrm);
+ if (!(fstrm->flags & (FCGI_SF_ES_SENT|FCGI_SF_ABRT_SENT)) &&
+ !fcgi_strm_send_abort(fconn, fstrm))
+ goto add_to_list;
+ }
+
+ fcgi_strm_close(fstrm);
+
+ if (!(fconn->wait_event.events & SUB_RETRY_SEND))
+ tasklet_wakeup(fconn->wait_event.tasklet);
+ done:
+ fstrm->flags &= ~FCGI_SF_WANT_SHUTR;
+ TRACE_LEAVE(FCGI_EV_STRM_SHUT, fconn->conn, fstrm);
+ return;
+
+ add_to_list:
+ /* Let the handler know we want to shutr, and add ourselves to the
+ * send list if not yet done. fcgi_deferred_shut() will be
+ * automatically called via the shut_tl tasklet when there's room
+ * again.
+ */
+ if (!LIST_INLIST(&fstrm->send_list)) {
+ if (fstrm->flags & (FCGI_SF_BLK_MBUSY|FCGI_SF_BLK_MROOM)) {
+ LIST_APPEND(&fconn->send_list, &fstrm->send_list);
+ }
+ }
+ fstrm->flags |= FCGI_SF_WANT_SHUTR;
+ TRACE_LEAVE(FCGI_EV_STRM_SHUT, fconn->conn, fstrm);
+ return;
+}
+
+/* Performs a synchronous or asynchronous shutw(). */
+static void fcgi_do_shutw(struct fcgi_strm *fstrm)
+{
+ struct fcgi_conn *fconn = fstrm->fconn;
+
+ TRACE_ENTER(FCGI_EV_STRM_SHUT, fconn->conn, fstrm);
+
+ if (fstrm->state != FCGI_SS_HLOC || fstrm->state == FCGI_SS_CLOSED)
+ goto done;
+
+ if (fstrm->state != FCGI_SS_ERROR && (fstrm->flags & FCGI_SF_BEGIN_SENT)) {
+ if (!(fstrm->flags & (FCGI_SF_ES_SENT|FCGI_SF_ABRT_SENT)) &&
+ !fcgi_strm_send_abort(fconn, fstrm))
+ goto add_to_list;
+
+ if (fstrm->state == FCGI_SS_HREM)
+ fcgi_strm_close(fstrm);
+ else
+ fstrm->state = FCGI_SS_HLOC;
+ } else {
+ /* a connstream may require us to immediately kill the whole connection
+ * for example because of a "tcp-request content reject" rule that is
+ * normally used to limit abuse.
+ */
+ if (se_fl_test(fstrm->sd, SE_FL_KILL_CONN) &&
+ !(fconn->flags & (FCGI_CF_ABRTS_SENT|FCGI_CF_ABRTS_FAILED))) {
+ TRACE_STATE("stream wants to kill the connection", FCGI_EV_STRM_SHUT, fconn->conn, fstrm);
+ fconn->state = FCGI_CS_CLOSED;
+ }
+
+ fcgi_strm_close(fstrm);
+ }
+
+ if (!(fconn->wait_event.events & SUB_RETRY_SEND))
+ tasklet_wakeup(fconn->wait_event.tasklet);
+ done:
+ fstrm->flags &= ~FCGI_SF_WANT_SHUTW;
+ TRACE_LEAVE(FCGI_EV_STRM_SHUT, fconn->conn, fstrm);
+ return;
+
+ add_to_list:
+ /* Let the handler know we want to shutr, and add ourselves to the
+ * send list if not yet done. fcgi_deferred_shut() will be
+ * automatically called via the shut_tl tasklet when there's room
+ * again.
+ */
+ if (!LIST_INLIST(&fstrm->send_list)) {
+ if (fstrm->flags & (FCGI_SF_BLK_MBUSY|FCGI_SF_BLK_MROOM)) {
+ LIST_APPEND(&fconn->send_list, &fstrm->send_list);
+ }
+ }
+ fstrm->flags |= FCGI_SF_WANT_SHUTW;
+ TRACE_LEAVE(FCGI_EV_STRM_SHUT, fconn->conn, fstrm);
+ return;
+}
+
+/* This is the tasklet referenced in fstrm->shut_tl, it is used for
+ * deferred shutdowns when the fcgi_detach() was done but the mux buffer was full
+ * and prevented the last record from being emitted.
+ */
+struct task *fcgi_deferred_shut(struct task *t, void *ctx, unsigned int state)
+{
+ struct fcgi_strm *fstrm = ctx;
+ struct fcgi_conn *fconn = fstrm->fconn;
+
+ TRACE_ENTER(FCGI_EV_STRM_SHUT, fconn->conn, fstrm);
+
+ if (fstrm->flags & FCGI_SF_NOTIFIED) {
+ /* some data processing remains to be done first */
+ goto end;
+ }
+
+ if (fstrm->flags & FCGI_SF_WANT_SHUTW)
+ fcgi_do_shutw(fstrm);
+
+ if (fstrm->flags & FCGI_SF_WANT_SHUTR)
+ fcgi_do_shutr(fstrm);
+
+ if (!(fstrm->flags & (FCGI_SF_WANT_SHUTR|FCGI_SF_WANT_SHUTW))) {
+ /* We're done trying to send, remove ourself from the send_list */
+ LIST_DEL_INIT(&fstrm->send_list);
+
+ if (!fcgi_strm_sc(fstrm)) {
+ fcgi_strm_destroy(fstrm);
+ if (fcgi_conn_is_dead(fconn))
+ fcgi_release(fconn);
+ }
+ }
+ end:
+ TRACE_LEAVE(FCGI_EV_STRM_SHUT);
+ return NULL;
+}
+
+/* shutr() called by the stream connector (mux_ops.shutr) */
+static void fcgi_shutr(struct stconn *sc, enum co_shr_mode mode)
+{
+ struct fcgi_strm *fstrm = __sc_mux_strm(sc);
+
+ TRACE_POINT(FCGI_EV_STRM_SHUT, fstrm->fconn->conn, fstrm);
+ if (!mode)
+ return;
+ fcgi_do_shutr(fstrm);
+}
+
+/* shutw() called by the stream connector (mux_ops.shutw) */
+static void fcgi_shutw(struct stconn *sc, enum co_shw_mode mode)
+{
+ struct fcgi_strm *fstrm = __sc_mux_strm(sc);
+
+ TRACE_POINT(FCGI_EV_STRM_SHUT, fstrm->fconn->conn, fstrm);
+ fcgi_do_shutw(fstrm);
+}
+
+/* Called from the upper layer, to subscribe <es> to events <event_type>. The
+ * event subscriber <es> is not allowed to change from a previous call as long
+ * as at least one event is still subscribed. The <event_type> must only be a
+ * combination of SUB_RETRY_RECV and SUB_RETRY_SEND. It always returns 0.
+ */
+static int fcgi_subscribe(struct stconn *sc, int event_type, struct wait_event *es)
+{
+ struct fcgi_strm *fstrm = __sc_mux_strm(sc);
+ struct fcgi_conn *fconn = fstrm->fconn;
+
+ BUG_ON(event_type & ~(SUB_RETRY_SEND|SUB_RETRY_RECV));
+ BUG_ON(fstrm->subs && fstrm->subs != es);
+
+ es->events |= event_type;
+ fstrm->subs = es;
+
+ if (event_type & SUB_RETRY_RECV)
+ TRACE_DEVEL("unsubscribe(recv)", FCGI_EV_STRM_RECV, fconn->conn, fstrm);
+
+ if (event_type & SUB_RETRY_SEND) {
+ TRACE_DEVEL("unsubscribe(send)", FCGI_EV_STRM_SEND, fconn->conn, fstrm);
+ if (!LIST_INLIST(&fstrm->send_list))
+ LIST_APPEND(&fconn->send_list, &fstrm->send_list);
+ }
+ return 0;
+}
+
+/* Called from the upper layer, to unsubscribe <es> from events <event_type>
+ * (undo fcgi_subscribe). The <es> pointer is not allowed to differ from the one
+ * passed to the subscribe() call. It always returns zero.
+ */
+static int fcgi_unsubscribe(struct stconn *sc, int event_type, struct wait_event *es)
+{
+ struct fcgi_strm *fstrm = __sc_mux_strm(sc);
+ struct fcgi_conn *fconn = fstrm->fconn;
+
+ BUG_ON(event_type & ~(SUB_RETRY_SEND|SUB_RETRY_RECV));
+ BUG_ON(fstrm->subs && fstrm->subs != es);
+
+ es->events &= ~event_type;
+ if (!es->events)
+ fstrm->subs = NULL;
+
+ if (event_type & SUB_RETRY_RECV)
+ TRACE_DEVEL("subscribe(recv)", FCGI_EV_STRM_RECV, fconn->conn, fstrm);
+
+ if (event_type & SUB_RETRY_SEND) {
+ TRACE_DEVEL("subscribe(send)", FCGI_EV_STRM_SEND, fconn->conn, fstrm);
+ fstrm->flags &= ~FCGI_SF_NOTIFIED;
+ if (!(fstrm->flags & (FCGI_SF_WANT_SHUTR|FCGI_SF_WANT_SHUTW)))
+ LIST_DEL_INIT(&fstrm->send_list);
+ }
+ return 0;
+}
+
+/* Called from the upper layer, to receive data
+ *
+ * The caller is responsible for defragmenting <buf> if necessary. But <flags>
+ * must be tested to know the calling context. If CO_RFL_BUF_FLUSH is set, it
+ * means the caller wants to flush input data (from the mux buffer and the
+ * channel buffer) to be able to use kernel splicing or any kind of mux-to-mux
+ * xfer. If CO_RFL_KEEP_RECV is set, the mux must always subscribe for read
+ * events before giving back. CO_RFL_BUF_WET is set if <buf> is congested with
+ * data scheduled for leaving soon. CO_RFL_BUF_NOT_STUCK is set to instruct the
+ * mux it may optimize the data copy to <buf> if necessary. Otherwise, it should
+ * copy as much data as possible.
+ */
+static size_t fcgi_rcv_buf(struct stconn *sc, struct buffer *buf, size_t count, int flags)
+{
+ struct fcgi_strm *fstrm = __sc_mux_strm(sc);
+ struct fcgi_conn *fconn = fstrm->fconn;
+ size_t ret = 0;
+
+ TRACE_ENTER(FCGI_EV_STRM_RECV, fconn->conn, fstrm);
+
+ if (!(fconn->flags & FCGI_CF_DEM_SALLOC))
+ ret = fcgi_strm_parse_response(fstrm, buf, count);
+ else
+ TRACE_STATE("fstrm rxbuf not allocated", FCGI_EV_STRM_RECV|FCGI_EV_FSTRM_BLK, fconn->conn, fstrm);
+
+ if (b_data(&fstrm->rxbuf)) {
+ /* If the channel buffer is not empty, consider the mux is
+ * blocked because it needs more room. But if the channel buffer
+ * is empty, it means partial data were received and the mux
+ * needs to receive more data to be able to parse it.
+ */
+ if (b_data(buf))
+ se_fl_set(fstrm->sd, SE_FL_RCV_MORE | SE_FL_WANT_ROOM);
+ }
+ else {
+ se_fl_clr(fstrm->sd, SE_FL_RCV_MORE | SE_FL_WANT_ROOM);
+ if (fstrm->state == FCGI_SS_ERROR || (fstrm->h1m.state == H1_MSG_DONE)) {
+ se_fl_set(fstrm->sd, SE_FL_EOI);
+ if (!(fstrm->h1m.flags & (H1_MF_VER_11|H1_MF_XFER_LEN)))
+ se_fl_set(fstrm->sd, SE_FL_EOS);
+ }
+ if (fcgi_conn_read0_pending(fconn)) {
+ se_fl_set(fstrm->sd, SE_FL_EOS);
+ if (!se_fl_test(fstrm->sd, SE_FL_EOI))
+ se_fl_set(fstrm->sd, SE_FL_ERROR);
+ }
+ if (se_fl_test(fstrm->sd, SE_FL_ERR_PENDING))
+ se_fl_set(fstrm->sd, SE_FL_ERROR);
+ fcgi_release_buf(fconn, &fstrm->rxbuf);
+ }
+
+ if (ret && fconn->dsi == fstrm->id) {
+ /* demux is blocking on this stream's buffer */
+ fconn->flags &= ~FCGI_CF_DEM_SFULL;
+ fcgi_conn_restart_reading(fconn, 1);
+ }
+
+ TRACE_LEAVE(FCGI_EV_STRM_RECV, fconn->conn, fstrm);
+ return ret;
+}
+
+
+/* Called from the upper layer, to send data from buffer <buf> for no more than
+ * <count> bytes. Returns the number of bytes effectively sent. Some status
+ * flags may be updated on the stream connector.
+ */
+static size_t fcgi_snd_buf(struct stconn *sc, struct buffer *buf, size_t count, int flags)
+{
+ struct fcgi_strm *fstrm = __sc_mux_strm(sc);
+ struct fcgi_conn *fconn = fstrm->fconn;
+ size_t total = 0;
+ size_t ret;
+ struct htx *htx = NULL;
+ struct htx_sl *sl;
+ struct htx_blk *blk;
+ uint32_t bsize;
+
+ TRACE_ENTER(FCGI_EV_STRM_SEND, fconn->conn, fstrm, 0, (size_t[]){count});
+
+ /* If we were not just woken because we wanted to send but couldn't,
+ * and there's somebody else that is waiting to send, do nothing,
+ * we will subscribe later and be put at the end of the list
+ */
+ if (!(fstrm->flags & FCGI_SF_NOTIFIED) && !LIST_ISEMPTY(&fconn->send_list)) {
+ TRACE_STATE("other streams already waiting, going to the queue and leaving", FCGI_EV_STRM_SEND|FCGI_EV_FSTRM_BLK, fconn->conn, fstrm);
+ return 0;
+ }
+ fstrm->flags &= ~FCGI_SF_NOTIFIED;
+
+ if (fconn->state < FCGI_CS_RECORD_H) {
+ TRACE_STATE("connection not ready, leaving", FCGI_EV_STRM_SEND|FCGI_EV_FSTRM_BLK, fconn->conn, fstrm);
+ return 0;
+ }
+
+ htx = htxbuf(buf);
+ if (fstrm->id == 0) {
+ int32_t id = fcgi_conn_get_next_sid(fconn);
+
+ if (id < 0) {
+ fcgi_strm_close(fstrm);
+ se_fl_set(fstrm->sd, SE_FL_ERROR);
+ TRACE_DEVEL("couldn't get a stream ID, leaving in error", FCGI_EV_STRM_SEND|FCGI_EV_FSTRM_ERR|FCGI_EV_STRM_ERR, fconn->conn, fstrm);
+ return 0;
+ }
+
+ eb32_delete(&fstrm->by_id);
+ fstrm->by_id.key = fstrm->id = id;
+ fconn->max_id = id;
+ fconn->nb_reserved--;
+ eb32_insert(&fconn->streams_by_id, &fstrm->by_id);
+
+
+ /* Check if length of the body is known or if the message is
+ * full. Otherwise, the request is invalid.
+ */
+ sl = http_get_stline(htx);
+ if (!sl || (!(sl->flags & HTX_SL_F_CLEN) && !(htx->flags & HTX_FL_EOM))) {
+ htx->flags |= HTX_FL_PARSING_ERROR;
+ fcgi_strm_error(fstrm);
+ goto done;
+ }
+ }
+
+ if (!(fstrm->flags & FCGI_SF_BEGIN_SENT)) {
+ TRACE_PROTO("sending FCGI BEGIN_REQUEST record", FCGI_EV_TX_RECORD|FCGI_EV_TX_BEGREQ, fconn->conn, fstrm);
+ if (!fcgi_strm_send_begin_request(fconn, fstrm))
+ goto done;
+ }
+
+ if (!(fstrm->flags & FCGI_SF_OUTGOING_DATA) && count)
+ fstrm->flags |= FCGI_SF_OUTGOING_DATA;
+
+ while (fstrm->state < FCGI_SS_HLOC && !(fstrm->flags & FCGI_SF_BLK_ANY) &&
+ count && !htx_is_empty(htx)) {
+ blk = htx_get_head_blk(htx);
+ ALREADY_CHECKED(blk);
+ bsize = htx_get_blksz(blk);
+
+ switch (htx_get_blk_type(blk)) {
+ case HTX_BLK_REQ_SL:
+ case HTX_BLK_HDR:
+ TRACE_USER("sending FCGI PARAMS record", FCGI_EV_TX_RECORD|FCGI_EV_TX_PARAMS, fconn->conn, fstrm, htx);
+ ret = fcgi_strm_send_params(fconn, fstrm, htx);
+ if (!ret) {
+ goto done;
+ }
+ total += ret;
+ count -= ret;
+ break;
+
+ case HTX_BLK_EOH:
+ if (!(fstrm->flags & FCGI_SF_EP_SENT)) {
+ TRACE_PROTO("sending FCGI PARAMS record", FCGI_EV_TX_RECORD|FCGI_EV_TX_PARAMS, fconn->conn, fstrm, htx);
+ ret = fcgi_strm_send_empty_params(fconn, fstrm);
+ if (!ret)
+ goto done;
+ }
+ if (htx_is_unique_blk(htx, blk) && (htx->flags & HTX_FL_EOM)) {
+ TRACE_PROTO("sending FCGI STDIN record", FCGI_EV_TX_RECORD|FCGI_EV_TX_STDIN, fconn->conn, fstrm, htx);
+ ret = fcgi_strm_send_empty_stdin(fconn, fstrm);
+ if (!ret)
+ goto done;
+ }
+ goto remove_blk;
+
+ case HTX_BLK_DATA:
+ TRACE_PROTO("sending FCGI STDIN record", FCGI_EV_TX_RECORD|FCGI_EV_TX_STDIN, fconn->conn, fstrm, htx);
+ ret = fcgi_strm_send_stdin(fconn, fstrm, htx, count, buf);
+ if (ret > 0) {
+ htx = htx_from_buf(buf);
+ total += ret;
+ count -= ret;
+ if (ret < bsize)
+ goto done;
+ }
+ break;
+
+ default:
+ remove_blk:
+ htx_remove_blk(htx, blk);
+ total += bsize;
+ count -= bsize;
+ break;
+ }
+ }
+
+ done:
+ if (fstrm->state >= FCGI_SS_HLOC) {
+ /* trim any possibly pending data after we close (extra CR-LF,
+ * unprocessed trailers, abnormal extra data, ...)
+ */
+ total += count;
+ count = 0;
+ }
+
+ if (fstrm->state == FCGI_SS_ERROR) {
+ TRACE_DEVEL("reporting error to the app-layer stream", FCGI_EV_STRM_SEND|FCGI_EV_FSTRM_ERR|FCGI_EV_STRM_ERR, fconn->conn, fstrm);
+ se_fl_set_error(fstrm->sd);
+ if (!(fstrm->flags & FCGI_SF_BEGIN_SENT) || fcgi_strm_send_abort(fconn, fstrm))
+ fcgi_strm_close(fstrm);
+ }
+
+ if (htx)
+ htx_to_buf(htx, buf);
+
+ if (total > 0) {
+ if (!(fconn->wait_event.events & SUB_RETRY_SEND)) {
+ TRACE_DEVEL("data queued, waking up fconn sender", FCGI_EV_STRM_SEND|FCGI_EV_FCONN_SEND|FCGI_EV_FCONN_WAKE, fconn->conn, fstrm);
+ tasklet_wakeup(fconn->wait_event.tasklet);
+ }
+
+ /* Ok we managed to send something, leave the send_list */
+ if (!(fstrm->flags & (FCGI_SF_WANT_SHUTR|FCGI_SF_WANT_SHUTW)))
+ LIST_DEL_INIT(&fstrm->send_list);
+ }
+
+ TRACE_LEAVE(FCGI_EV_STRM_SEND, fconn->conn, fstrm, htx, (size_t[]){total});
+ return total;
+}
+
+/* for debugging with CLI's "show fd" command */
+static int fcgi_show_fd(struct buffer *msg, struct connection *conn)
+{
+ struct fcgi_conn *fconn = conn->ctx;
+ struct fcgi_strm *fstrm = NULL;
+ struct eb32_node *node;
+ int send_cnt = 0;
+ int tree_cnt = 0;
+ int orph_cnt = 0;
+ struct buffer *hmbuf, *tmbuf;
+
+ if (!fconn)
+ return 0;
+
+ list_for_each_entry(fstrm, &fconn->send_list, send_list)
+ send_cnt++;
+
+ fstrm = NULL;
+ node = eb32_first(&fconn->streams_by_id);
+ while (node) {
+ fstrm = container_of(node, struct fcgi_strm, by_id);
+ tree_cnt++;
+ if (!fcgi_strm_sc(fstrm))
+ orph_cnt++;
+ node = eb32_next(node);
+ }
+
+ hmbuf = br_head(fconn->mbuf);
+ tmbuf = br_tail(fconn->mbuf);
+ chunk_appendf(msg, " fconn.st0=%d .maxid=%d .flg=0x%04x .nbst=%u"
+ " .nbcs=%u .send_cnt=%d .tree_cnt=%d .orph_cnt=%d .sub=%d "
+ ".dsi=%d .dbuf=%u@%p+%u/%u .mbuf=[%u..%u|%u],h=[%u@%p+%u/%u],t=[%u@%p+%u/%u]",
+ fconn->state, fconn->max_id, fconn->flags,
+ fconn->nb_streams, fconn->nb_sc, send_cnt, tree_cnt, orph_cnt,
+ fconn->wait_event.events, fconn->dsi,
+ (unsigned int)b_data(&fconn->dbuf), b_orig(&fconn->dbuf),
+ (unsigned int)b_head_ofs(&fconn->dbuf), (unsigned int)b_size(&fconn->dbuf),
+ br_head_idx(fconn->mbuf), br_tail_idx(fconn->mbuf), br_size(fconn->mbuf),
+ (unsigned int)b_data(hmbuf), b_orig(hmbuf),
+ (unsigned int)b_head_ofs(hmbuf), (unsigned int)b_size(hmbuf),
+ (unsigned int)b_data(tmbuf), b_orig(tmbuf),
+ (unsigned int)b_head_ofs(tmbuf), (unsigned int)b_size(tmbuf));
+
+ if (fstrm) {
+ chunk_appendf(msg, " last_fstrm=%p .id=%d .flg=0x%04x .rxbuf=%u@%p+%u/%u .sc=%p",
+ fstrm, fstrm->id, fstrm->flags,
+ (unsigned int)b_data(&fstrm->rxbuf), b_orig(&fstrm->rxbuf),
+ (unsigned int)b_head_ofs(&fstrm->rxbuf), (unsigned int)b_size(&fstrm->rxbuf),
+ fcgi_strm_sc(fstrm));
+
+ chunk_appendf(msg, " .sd.flg=0x%08x", se_fl_get(fstrm->sd));
+ if (!se_fl_test(fstrm->sd, SE_FL_ORPHAN))
+ chunk_appendf(msg, " .sc.flg=0x%08x .sc.app=%p",
+ fcgi_strm_sc(fstrm)->flags, fcgi_strm_sc(fstrm)->app);
+
+ chunk_appendf(msg, " .subs=%p", fstrm->subs);
+ if (fstrm->subs) {
+ chunk_appendf(msg, "(ev=%d tl=%p", fstrm->subs->events, fstrm->subs->tasklet);
+ chunk_appendf(msg, " tl.calls=%d tl.ctx=%p tl.fct=",
+ fstrm->subs->tasklet->calls,
+ fstrm->subs->tasklet->context);
+ resolve_sym_name(msg, NULL, fstrm->subs->tasklet->process);
+ chunk_appendf(msg, ")");
+ }
+ }
+ return 0;
+}
+
+/* Migrate the the connection to the current thread.
+ * Return 0 if successful, non-zero otherwise.
+ * Expected to be called with the old thread lock held.
+ */
+static int fcgi_takeover(struct connection *conn, int orig_tid)
+{
+ struct fcgi_conn *fcgi = conn->ctx;
+ struct task *task;
+ struct task *new_task;
+ struct tasklet *new_tasklet;
+
+ /* Pre-allocate tasks so that we don't have to roll back after the xprt
+ * has been migrated.
+ */
+ new_task = task_new_here();
+ new_tasklet = tasklet_new();
+ if (!new_task || !new_tasklet)
+ goto fail;
+
+ if (fd_takeover(conn->handle.fd, conn) != 0)
+ goto fail;
+
+ if (conn->xprt->takeover && conn->xprt->takeover(conn, conn->xprt_ctx, orig_tid) != 0) {
+ /* We failed to takeover the xprt, even if the connection may
+ * still be valid, flag it as error'd, as we have already
+ * taken over the fd, and wake the tasklet, so that it will
+ * destroy it.
+ */
+ conn->flags |= CO_FL_ERROR;
+ tasklet_wakeup_on(fcgi->wait_event.tasklet, orig_tid);
+ goto fail;
+ }
+
+ if (fcgi->wait_event.events)
+ fcgi->conn->xprt->unsubscribe(fcgi->conn, fcgi->conn->xprt_ctx,
+ fcgi->wait_event.events, &fcgi->wait_event);
+
+ task = fcgi->task;
+ if (task) {
+ /* only assign a task if there was already one, otherwise
+ * the preallocated new task will be released.
+ */
+ task->context = NULL;
+ fcgi->task = NULL;
+ __ha_barrier_store();
+ task_kill(task);
+
+ fcgi->task = new_task;
+ new_task = NULL;
+ fcgi->task->process = fcgi_timeout_task;
+ fcgi->task->context = fcgi;
+ }
+
+ /* To let the tasklet know it should free itself, and do nothing else,
+ * set its context to NULL;
+ */
+ fcgi->wait_event.tasklet->context = NULL;
+ tasklet_wakeup_on(fcgi->wait_event.tasklet, orig_tid);
+
+ fcgi->wait_event.tasklet = new_tasklet;
+ fcgi->wait_event.tasklet->process = fcgi_io_cb;
+ fcgi->wait_event.tasklet->context = fcgi;
+ fcgi->conn->xprt->subscribe(fcgi->conn, fcgi->conn->xprt_ctx,
+ SUB_RETRY_RECV, &fcgi->wait_event);
+
+ if (new_task)
+ __task_free(new_task);
+ return 0;
+ fail:
+ if (new_task)
+ __task_free(new_task);
+ tasklet_free(new_tasklet);
+ return -1;
+}
+
+/****************************************/
+/* MUX initialization and instantiation */
+/****************************************/
+
+/* The mux operations */
+static const struct mux_ops mux_fcgi_ops = {
+ .init = fcgi_init,
+ .wake = fcgi_wake,
+ .attach = fcgi_attach,
+ .get_first_sc = fcgi_get_first_sc,
+ .detach = fcgi_detach,
+ .destroy = fcgi_destroy,
+ .avail_streams = fcgi_avail_streams,
+ .used_streams = fcgi_used_streams,
+ .rcv_buf = fcgi_rcv_buf,
+ .snd_buf = fcgi_snd_buf,
+ .subscribe = fcgi_subscribe,
+ .unsubscribe = fcgi_unsubscribe,
+ .shutr = fcgi_shutr,
+ .shutw = fcgi_shutw,
+ .ctl = fcgi_ctl,
+ .sctl = fcgi_sctl,
+ .show_fd = fcgi_show_fd,
+ .takeover = fcgi_takeover,
+ .flags = MX_FL_HTX|MX_FL_HOL_RISK|MX_FL_NO_UPG,
+ .name = "FCGI",
+};
+
+
+/* this mux registers FCGI proto */
+static struct mux_proto_list mux_proto_fcgi =
+{ .token = IST("fcgi"), .mode = PROTO_MODE_HTTP, .side = PROTO_SIDE_BE, .mux = &mux_fcgi_ops };
+
+INITCALL1(STG_REGISTER, register_mux_proto, &mux_proto_fcgi);
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/src/mux_h1.c b/src/mux_h1.c
new file mode 100644
index 0000000..455ebeb
--- /dev/null
+++ b/src/mux_h1.c
@@ -0,0 +1,5374 @@
+/*
+ * HTTP/1 mux-demux for connections
+ *
+ * Copyright 2018 Christopher Faulet <cfaulet@haproxy.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+#include <import/ebistree.h>
+#include <import/ebmbtree.h>
+
+#include <haproxy/api.h>
+#include <haproxy/cfgparse.h>
+#include <haproxy/connection.h>
+#include <haproxy/dynbuf.h>
+#include <haproxy/h1.h>
+#include <haproxy/h1_htx.h>
+#include <haproxy/h2.h>
+#include <haproxy/http_htx.h>
+#include <haproxy/htx.h>
+#include <haproxy/istbuf.h>
+#include <haproxy/log.h>
+#include <haproxy/mux_h1-t.h>
+#include <haproxy/pipe.h>
+#include <haproxy/proxy.h>
+#include <haproxy/session-t.h>
+#include <haproxy/stats.h>
+#include <haproxy/stconn.h>
+#include <haproxy/stream.h>
+#include <haproxy/trace.h>
+#include <haproxy/xref.h>
+
+/* H1 connection descriptor */
+struct h1c {
+ struct connection *conn;
+ struct h1s *h1s; /* H1 stream descriptor */
+ struct task *task; /* timeout management task */
+
+ uint32_t flags; /* Connection flags: H1C_F_* */
+ enum h1_cs state; /* Connection state */
+
+
+ struct buffer ibuf; /* Input buffer to store data before parsing */
+ struct buffer obuf; /* Output buffer to store data after reformatting */
+ struct proxy *px;
+
+ unsigned int errcode; /* Status code when an error occurred at the H1 connection level */
+
+ int idle_exp; /* idle expiration date (http-keep-alive or http-request timeout) */
+ int timeout; /* client/server timeout duration */
+ int shut_timeout; /* client-fin/server-fin timeout duration */
+
+ unsigned int req_count; /* The number of requests handled by this H1 connection */
+
+ struct h1_counters *px_counters; /* h1 counters attached to proxy */
+ struct buffer_wait buf_wait; /* Wait list for buffer allocation */
+ struct wait_event wait_event; /* To be used if we're waiting for I/Os */
+};
+
+/* H1 stream descriptor */
+struct h1s {
+ struct h1c *h1c;
+ struct sedesc *sd;
+ uint32_t flags; /* Connection flags: H1S_F_* */
+
+ struct wait_event *subs; /* Address of the wait_event the stream connector associated is waiting on */
+
+ struct session *sess; /* Associated session */
+ struct buffer rxbuf; /* receive buffer, always valid (buf_empty or real buffer) */
+ struct h1m req;
+ struct h1m res;
+
+ enum http_meth_t meth; /* HTTP request method */
+ uint16_t status; /* HTTP response status */
+
+ char ws_key[25]; /* websocket handshake key */
+};
+
+/* Map of headers used to convert outgoing headers */
+struct h1_hdrs_map {
+ char *name;
+ struct eb_root map;
+};
+
+/* An entry in a headers map */
+struct h1_hdr_entry {
+ struct ist name;
+ struct ebpt_node node;
+};
+
+/* Declare the headers map */
+static struct h1_hdrs_map hdrs_map = { .name = NULL, .map = EB_ROOT };
+static int accept_payload_with_any_method = 0;
+
+/* trace source and events */
+static void h1_trace(enum trace_level level, uint64_t mask,
+ const struct trace_source *src,
+ const struct ist where, const struct ist func,
+ const void *a1, const void *a2, const void *a3, const void *a4);
+
+/* The event representation is split like this :
+ * h1c - internal H1 connection
+ * h1s - internal H1 stream
+ * strm - application layer
+ * rx - data receipt
+ * tx - data transmission
+ *
+ */
+static const struct trace_event h1_trace_events[] = {
+#define H1_EV_H1C_NEW (1ULL << 0)
+ { .mask = H1_EV_H1C_NEW, .name = "h1c_new", .desc = "new H1 connection" },
+#define H1_EV_H1C_RECV (1ULL << 1)
+ { .mask = H1_EV_H1C_RECV, .name = "h1c_recv", .desc = "Rx on H1 connection" },
+#define H1_EV_H1C_SEND (1ULL << 2)
+ { .mask = H1_EV_H1C_SEND, .name = "h1c_send", .desc = "Tx on H1 connection" },
+#define H1_EV_H1C_BLK (1ULL << 3)
+ { .mask = H1_EV_H1C_BLK, .name = "h1c_blk", .desc = "H1 connection blocked" },
+#define H1_EV_H1C_WAKE (1ULL << 4)
+ { .mask = H1_EV_H1C_WAKE, .name = "h1c_wake", .desc = "H1 connection woken up" },
+#define H1_EV_H1C_END (1ULL << 5)
+ { .mask = H1_EV_H1C_END, .name = "h1c_end", .desc = "H1 connection terminated" },
+#define H1_EV_H1C_ERR (1ULL << 6)
+ { .mask = H1_EV_H1C_ERR, .name = "h1c_err", .desc = "error on H1 connection" },
+
+#define H1_EV_RX_DATA (1ULL << 7)
+ { .mask = H1_EV_RX_DATA, .name = "rx_data", .desc = "receipt of any H1 data" },
+#define H1_EV_RX_EOI (1ULL << 8)
+ { .mask = H1_EV_RX_EOI, .name = "rx_eoi", .desc = "receipt of end of H1 input" },
+#define H1_EV_RX_HDRS (1ULL << 9)
+ { .mask = H1_EV_RX_HDRS, .name = "rx_headers", .desc = "receipt of H1 headers" },
+#define H1_EV_RX_BODY (1ULL << 10)
+ { .mask = H1_EV_RX_BODY, .name = "rx_body", .desc = "receipt of H1 body" },
+#define H1_EV_RX_TLRS (1ULL << 11)
+ { .mask = H1_EV_RX_TLRS, .name = "rx_trailerus", .desc = "receipt of H1 trailers" },
+
+#define H1_EV_TX_DATA (1ULL << 12)
+ { .mask = H1_EV_TX_DATA, .name = "tx_data", .desc = "transmission of any H1 data" },
+#define H1_EV_TX_EOI (1ULL << 13)
+ { .mask = H1_EV_TX_EOI, .name = "tx_eoi", .desc = "transmission of end of H1 input" },
+#define H1_EV_TX_HDRS (1ULL << 14)
+ { .mask = H1_EV_TX_HDRS, .name = "tx_headers", .desc = "transmission of all headers" },
+#define H1_EV_TX_BODY (1ULL << 15)
+ { .mask = H1_EV_TX_BODY, .name = "tx_body", .desc = "transmission of H1 body" },
+#define H1_EV_TX_TLRS (1ULL << 16)
+ { .mask = H1_EV_TX_TLRS, .name = "tx_trailerus", .desc = "transmission of H1 trailers" },
+
+#define H1_EV_H1S_NEW (1ULL << 17)
+ { .mask = H1_EV_H1S_NEW, .name = "h1s_new", .desc = "new H1 stream" },
+#define H1_EV_H1S_BLK (1ULL << 18)
+ { .mask = H1_EV_H1S_BLK, .name = "h1s_blk", .desc = "H1 stream blocked" },
+#define H1_EV_H1S_END (1ULL << 19)
+ { .mask = H1_EV_H1S_END, .name = "h1s_end", .desc = "H1 stream terminated" },
+#define H1_EV_H1S_ERR (1ULL << 20)
+ { .mask = H1_EV_H1S_ERR, .name = "h1s_err", .desc = "error on H1 stream" },
+
+#define H1_EV_STRM_NEW (1ULL << 21)
+ { .mask = H1_EV_STRM_NEW, .name = "strm_new", .desc = "app-layer stream creation" },
+#define H1_EV_STRM_RECV (1ULL << 22)
+ { .mask = H1_EV_STRM_RECV, .name = "strm_recv", .desc = "receiving data for stream" },
+#define H1_EV_STRM_SEND (1ULL << 23)
+ { .mask = H1_EV_STRM_SEND, .name = "strm_send", .desc = "sending data for stream" },
+#define H1_EV_STRM_WAKE (1ULL << 24)
+ { .mask = H1_EV_STRM_WAKE, .name = "strm_wake", .desc = "stream woken up" },
+#define H1_EV_STRM_SHUT (1ULL << 25)
+ { .mask = H1_EV_STRM_SHUT, .name = "strm_shut", .desc = "stream shutdown" },
+#define H1_EV_STRM_END (1ULL << 26)
+ { .mask = H1_EV_STRM_END, .name = "strm_end", .desc = "detaching app-layer stream" },
+#define H1_EV_STRM_ERR (1ULL << 27)
+ { .mask = H1_EV_STRM_ERR, .name = "strm_err", .desc = "stream error" },
+
+ { }
+};
+
+static const struct name_desc h1_trace_lockon_args[4] = {
+ /* arg1 */ { /* already used by the connection */ },
+ /* arg2 */ { .name="h1s", .desc="H1 stream" },
+ /* arg3 */ { },
+ /* arg4 */ { }
+};
+
+static const struct name_desc h1_trace_decoding[] = {
+#define H1_VERB_CLEAN 1
+ { .name="clean", .desc="only user-friendly stuff, generally suitable for level \"user\"" },
+#define H1_VERB_MINIMAL 2
+ { .name="minimal", .desc="report only h1c/h1s state and flags, no real decoding" },
+#define H1_VERB_SIMPLE 3
+ { .name="simple", .desc="add request/response status line or htx info when available" },
+#define H1_VERB_ADVANCED 4
+ { .name="advanced", .desc="add header fields or frame decoding when available" },
+#define H1_VERB_COMPLETE 5
+ { .name="complete", .desc="add full data dump when available" },
+ { /* end */ }
+};
+
+static struct trace_source trace_h1 __read_mostly = {
+ .name = IST("h1"),
+ .desc = "HTTP/1 multiplexer",
+ .arg_def = TRC_ARG1_CONN, // TRACE()'s first argument is always a connection
+ .default_cb = h1_trace,
+ .known_events = h1_trace_events,
+ .lockon_args = h1_trace_lockon_args,
+ .decoding = h1_trace_decoding,
+ .report_events = ~0, // report everything by default
+};
+
+#define TRACE_SOURCE &trace_h1
+INITCALL1(STG_REGISTER, trace_register_source, TRACE_SOURCE);
+
+
+/* h1 stats module */
+enum {
+ H1_ST_OPEN_CONN,
+ H1_ST_OPEN_STREAM,
+ H1_ST_TOTAL_CONN,
+ H1_ST_TOTAL_STREAM,
+
+ H1_ST_BYTES_IN,
+ H1_ST_BYTES_OUT,
+#if defined(USE_LINUX_SPLICE)
+ H1_ST_SPLICED_BYTES_IN,
+ H1_ST_SPLICED_BYTES_OUT,
+#endif
+ H1_STATS_COUNT /* must be the last member of the enum */
+};
+
+
+static struct name_desc h1_stats[] = {
+ [H1_ST_OPEN_CONN] = { .name = "h1_open_connections",
+ .desc = "Count of currently open connections" },
+ [H1_ST_OPEN_STREAM] = { .name = "h1_open_streams",
+ .desc = "Count of currently open streams" },
+ [H1_ST_TOTAL_CONN] = { .name = "h1_total_connections",
+ .desc = "Total number of connections" },
+ [H1_ST_TOTAL_STREAM] = { .name = "h1_total_streams",
+ .desc = "Total number of streams" },
+
+ [H1_ST_BYTES_IN] = { .name = "h1_bytes_in",
+ .desc = "Total number of bytes received" },
+ [H1_ST_BYTES_OUT] = { .name = "h1_bytes_out",
+ .desc = "Total number of bytes send" },
+#if defined(USE_LINUX_SPLICE)
+ [H1_ST_SPLICED_BYTES_IN] = { .name = "h1_spliced_bytes_in",
+ .desc = "Total number of bytes received using kernel splicing" },
+ [H1_ST_SPLICED_BYTES_OUT] = { .name = "h1_spliced_bytes_out",
+ .desc = "Total number of bytes sendusing kernel splicing" },
+#endif
+
+};
+
+static struct h1_counters {
+ long long open_conns; /* count of currently open connections */
+ long long open_streams; /* count of currently open streams */
+ long long total_conns; /* total number of connections */
+ long long total_streams; /* total number of streams */
+
+ long long bytes_in; /* number of bytes received */
+ long long bytes_out; /* number of bytes sent */
+#if defined(USE_LINUX_SPLICE)
+ long long spliced_bytes_in; /* number of bytes received using kernel splicing */
+ long long spliced_bytes_out; /* number of bytes sent using kernel splicing */
+#endif
+} h1_counters;
+
+static void h1_fill_stats(void *data, struct field *stats)
+{
+ struct h1_counters *counters = data;
+
+ stats[H1_ST_OPEN_CONN] = mkf_u64(FN_GAUGE, counters->open_conns);
+ stats[H1_ST_OPEN_STREAM] = mkf_u64(FN_GAUGE, counters->open_streams);
+ stats[H1_ST_TOTAL_CONN] = mkf_u64(FN_COUNTER, counters->total_conns);
+ stats[H1_ST_TOTAL_STREAM] = mkf_u64(FN_COUNTER, counters->total_streams);
+
+ stats[H1_ST_BYTES_IN] = mkf_u64(FN_COUNTER, counters->bytes_in);
+ stats[H1_ST_BYTES_OUT] = mkf_u64(FN_COUNTER, counters->bytes_out);
+#if defined(USE_LINUX_SPLICE)
+ stats[H1_ST_SPLICED_BYTES_IN] = mkf_u64(FN_COUNTER, counters->spliced_bytes_in);
+ stats[H1_ST_SPLICED_BYTES_OUT] = mkf_u64(FN_COUNTER, counters->spliced_bytes_out);
+#endif
+}
+
+static struct stats_module h1_stats_module = {
+ .name = "h1",
+ .fill_stats = h1_fill_stats,
+ .stats = h1_stats,
+ .stats_count = H1_STATS_COUNT,
+ .counters = &h1_counters,
+ .counters_size = sizeof(h1_counters),
+ .domain_flags = MK_STATS_PROXY_DOMAIN(STATS_PX_CAP_FE|STATS_PX_CAP_BE),
+ .clearable = 1,
+};
+
+INITCALL1(STG_REGISTER, stats_register_module, &h1_stats_module);
+
+
+/* the h1c and h1s pools */
+DECLARE_STATIC_POOL(pool_head_h1c, "h1c", sizeof(struct h1c));
+DECLARE_STATIC_POOL(pool_head_h1s, "h1s", sizeof(struct h1s));
+
+static int h1_recv(struct h1c *h1c);
+static int h1_send(struct h1c *h1c);
+static int h1_process(struct h1c *h1c);
+/* h1_io_cb is exported to see it resolved in "show fd" */
+struct task *h1_io_cb(struct task *t, void *ctx, unsigned int state);
+struct task *h1_timeout_task(struct task *t, void *context, unsigned int state);
+static void h1_shutw_conn(struct connection *conn);
+static void h1_wake_stream_for_recv(struct h1s *h1s);
+static void h1_wake_stream_for_send(struct h1s *h1s);
+static void h1s_destroy(struct h1s *h1s);
+
+/* returns the stconn associated to the H1 stream */
+static forceinline struct stconn *h1s_sc(const struct h1s *h1s)
+{
+ return h1s->sd->sc;
+}
+
+/* the H1 traces always expect that arg1, if non-null, is of type connection
+ * (from which we can derive h1c), that arg2, if non-null, is of type h1s, and
+ * that arg3, if non-null, is a htx for rx/tx headers.
+ */
+static void h1_trace(enum trace_level level, uint64_t mask, const struct trace_source *src,
+ const struct ist where, const struct ist func,
+ const void *a1, const void *a2, const void *a3, const void *a4)
+{
+ const struct connection *conn = a1;
+ const struct h1c *h1c = conn ? conn->ctx : NULL;
+ const struct h1s *h1s = a2;
+ const struct htx *htx = a3;
+ const size_t *val = a4;
+
+ if (!h1c)
+ h1c = (h1s ? h1s->h1c : NULL);
+
+ if (!h1c || src->verbosity < H1_VERB_CLEAN)
+ return;
+
+ /* Display frontend/backend info by default */
+ chunk_appendf(&trace_buf, " : [%c,%s]", ((h1c->flags & H1C_F_IS_BACK) ? 'B' : 'F'), h1c_st_to_str(h1c->state));
+
+ /* Display request and response states if h1s is defined */
+ if (h1s) {
+ chunk_appendf(&trace_buf, " [%s, %s]",
+ h1m_state_str(h1s->req.state), h1m_state_str(h1s->res.state));
+
+ if (src->verbosity > H1_VERB_SIMPLE) {
+ chunk_appendf(&trace_buf, " - req=(.fl=0x%08x .curr_len=%lu .body_len=%lu)",
+ h1s->req.flags, (unsigned long)h1s->req.curr_len, (unsigned long)h1s->req.body_len);
+ chunk_appendf(&trace_buf, " res=(.fl=0x%08x .curr_len=%lu .body_len=%lu)",
+ h1s->res.flags, (unsigned long)h1s->res.curr_len, (unsigned long)h1s->res.body_len);
+ }
+
+ }
+
+ if (src->verbosity == H1_VERB_CLEAN)
+ return;
+
+ /* Display the value to the 4th argument (level > STATE) */
+ if (src->level > TRACE_LEVEL_STATE && val)
+ chunk_appendf(&trace_buf, " - VAL=%lu", (long)*val);
+
+ /* Display status-line if possible (verbosity > MINIMAL) */
+ if (src->verbosity > H1_VERB_MINIMAL && htx && htx_nbblks(htx)) {
+ const struct htx_blk *blk = htx_get_head_blk(htx);
+ const struct htx_sl *sl = htx_get_blk_ptr(htx, blk);
+ enum htx_blk_type type = htx_get_blk_type(blk);
+
+ if (type == HTX_BLK_REQ_SL || type == HTX_BLK_RES_SL)
+ chunk_appendf(&trace_buf, " - \"%.*s %.*s %.*s\"",
+ HTX_SL_P1_LEN(sl), HTX_SL_P1_PTR(sl),
+ HTX_SL_P2_LEN(sl), HTX_SL_P2_PTR(sl),
+ HTX_SL_P3_LEN(sl), HTX_SL_P3_PTR(sl));
+ }
+
+ /* Display h1c info and, if defined, h1s info (pointer + flags) */
+ chunk_appendf(&trace_buf, " - h1c=%p(0x%08x)", h1c, h1c->flags);
+ if (h1c->conn)
+ chunk_appendf(&trace_buf, " conn=%p(0x%08x)", h1c->conn, h1c->conn->flags);
+ if (h1s) {
+ chunk_appendf(&trace_buf, " h1s=%p(0x%08x)", h1s, h1s->flags);
+ if (h1s->sd)
+ chunk_appendf(&trace_buf, " sd=%p(0x%08x)", h1s->sd, se_fl_get(h1s->sd));
+ if (h1s->sd && h1s_sc(h1s))
+ chunk_appendf(&trace_buf, " sc=%p(0x%08x)", h1s_sc(h1s), h1s_sc(h1s)->flags);
+ }
+
+ if (src->verbosity == H1_VERB_MINIMAL)
+ return;
+
+ /* Display input and output buffer info (level > USER & verbosity > SIMPLE) */
+ if (src->level > TRACE_LEVEL_USER) {
+ if (src->verbosity == H1_VERB_COMPLETE ||
+ (src->verbosity == H1_VERB_ADVANCED && (mask & (H1_EV_H1C_RECV|H1_EV_STRM_RECV))))
+ chunk_appendf(&trace_buf, " ibuf=%u@%p+%u/%u",
+ (unsigned int)b_data(&h1c->ibuf), b_orig(&h1c->ibuf),
+ (unsigned int)b_head_ofs(&h1c->ibuf), (unsigned int)b_size(&h1c->ibuf));
+ if (src->verbosity == H1_VERB_COMPLETE ||
+ (src->verbosity == H1_VERB_ADVANCED && (mask & (H1_EV_H1C_SEND|H1_EV_STRM_SEND))))
+ chunk_appendf(&trace_buf, " obuf=%u@%p+%u/%u",
+ (unsigned int)b_data(&h1c->obuf), b_orig(&h1c->obuf),
+ (unsigned int)b_head_ofs(&h1c->obuf), (unsigned int)b_size(&h1c->obuf));
+ }
+
+ /* Display htx info if defined (level > USER) */
+ if (src->level > TRACE_LEVEL_USER && htx) {
+ int full = 0;
+
+ /* Full htx info (level > STATE && verbosity > SIMPLE) */
+ if (src->level > TRACE_LEVEL_STATE) {
+ if (src->verbosity == H1_VERB_COMPLETE)
+ full = 1;
+ else if (src->verbosity == H1_VERB_ADVANCED && (mask & (H1_EV_RX_HDRS|H1_EV_TX_HDRS)))
+ full = 1;
+ }
+
+ chunk_memcat(&trace_buf, "\n\t", 2);
+ htx_dump(&trace_buf, htx, full);
+ }
+}
+
+
+/*****************************************************/
+/* functions below are for dynamic buffer management */
+/*****************************************************/
+/*
+ * Indicates whether or not we may receive data. The rules are the following :
+ * - if an error or a shutdown for reads was detected on the H1 connection we
+ * must not attempt to receive
+ * - if we are waiting for the connection establishment, we must not attempt
+ * to receive
+ * - if reads are explicitly disabled, we must not attempt to receive
+ * - if the input buffer failed to be allocated or is full , we must not try
+ * to receive
+ * - if the mux is blocked on an input condition, we must may not attempt to
+ * receive
+ * - otherwise we may attempt to receive
+ */
+static inline int h1_recv_allowed(const struct h1c *h1c)
+{
+ if (h1c->flags & (H1C_F_EOS|H1C_F_ERROR)) {
+ TRACE_DEVEL("recv not allowed because of (eos|error) on h1c", H1_EV_H1C_RECV|H1_EV_H1C_BLK, h1c->conn);
+ return 0;
+ }
+
+ if (h1c->conn->flags & (CO_FL_WAIT_L4_CONN|CO_FL_WAIT_L6_CONN)) {
+ TRACE_DEVEL("recv not allowed because of (waitl4|waitl6) on connection", H1_EV_H1C_RECV|H1_EV_H1C_BLK, h1c->conn);
+ return 0;
+ }
+
+ if ((h1c->flags & (H1C_F_IN_ALLOC|H1C_F_IN_FULL|H1C_F_IN_SALLOC))) {
+ TRACE_DEVEL("recv not allowed because input is blocked", H1_EV_H1C_RECV|H1_EV_H1C_BLK, h1c->conn);
+ return 0;
+ }
+
+ return 1;
+}
+
+/*
+ * Tries to grab a buffer and to re-enables processing on mux <target>. The h1
+ * flags are used to figure what buffer was requested. It returns 1 if the
+ * allocation succeeds, in which case the connection is woken up, or 0 if it's
+ * impossible to wake up and we prefer to be woken up later.
+ */
+static int h1_buf_available(void *target)
+{
+ struct h1c *h1c = target;
+
+ if ((h1c->flags & H1C_F_IN_ALLOC) && b_alloc(&h1c->ibuf)) {
+ TRACE_STATE("unblocking h1c, ibuf allocated", H1_EV_H1C_RECV|H1_EV_H1C_BLK|H1_EV_H1C_WAKE, h1c->conn);
+ h1c->flags &= ~H1C_F_IN_ALLOC;
+ if (h1_recv_allowed(h1c))
+ tasklet_wakeup(h1c->wait_event.tasklet);
+ return 1;
+ }
+
+ if ((h1c->flags & H1C_F_OUT_ALLOC) && b_alloc(&h1c->obuf)) {
+ TRACE_STATE("unblocking h1s, obuf allocated", H1_EV_TX_DATA|H1_EV_H1S_BLK|H1_EV_STRM_WAKE, h1c->conn, h1c->h1s);
+ h1c->flags &= ~H1C_F_OUT_ALLOC;
+ if (h1c->h1s)
+ h1_wake_stream_for_send(h1c->h1s);
+ return 1;
+ }
+
+ if ((h1c->flags & H1C_F_IN_SALLOC) && h1c->h1s && b_alloc(&h1c->h1s->rxbuf)) {
+ TRACE_STATE("unblocking h1c, stream rxbuf allocated", H1_EV_H1C_RECV|H1_EV_H1C_BLK|H1_EV_H1C_WAKE, h1c->conn);
+ h1c->flags &= ~H1C_F_IN_SALLOC;
+ tasklet_wakeup(h1c->wait_event.tasklet);
+ return 1;
+ }
+
+ return 0;
+}
+
+/*
+ * Allocate a buffer. If if fails, it adds the mux in buffer wait queue.
+ */
+static inline struct buffer *h1_get_buf(struct h1c *h1c, struct buffer *bptr)
+{
+ struct buffer *buf = NULL;
+
+ if (likely(!LIST_INLIST(&h1c->buf_wait.list)) &&
+ unlikely((buf = b_alloc(bptr)) == NULL)) {
+ h1c->buf_wait.target = h1c;
+ h1c->buf_wait.wakeup_cb = h1_buf_available;
+ LIST_APPEND(&th_ctx->buffer_wq, &h1c->buf_wait.list);
+ }
+ return buf;
+}
+
+/*
+ * Release a buffer, if any, and try to wake up entities waiting in the buffer
+ * wait queue.
+ */
+static inline void h1_release_buf(struct h1c *h1c, struct buffer *bptr)
+{
+ if (bptr->size) {
+ b_free(bptr);
+ offer_buffers(h1c->buf_wait.target, 1);
+ }
+}
+
+/* Returns 1 if the H1 connection is alive (IDLE, EMBRYONIC, RUNNING or
+ * RUNNING). Ortherwise 0 is returned.
+ */
+static inline int h1_is_alive(const struct h1c *h1c)
+{
+ return (h1c->state <= H1_CS_RUNNING);
+}
+
+/* Switch the H1 connection to CLOSING or CLOSED mode, depending on the output
+ * buffer state and if there is still a H1 stream or not. If there are sill
+ * pending outgoing data or if there is still a H1 stream, it is set to CLOSING
+ * state. Otherwise it is set to CLOSED mode. */
+static inline void h1_close(struct h1c *h1c)
+{
+ h1c->state = ((h1c->h1s || b_data(&h1c->obuf)) ? H1_CS_CLOSING : H1_CS_CLOSED);
+}
+
+/* returns the number of streams in use on a connection to figure if it's idle
+ * or not. We rely on H1C state to know if the connection is in-use or not. It
+ * is IDLE only when no H1 stream is attached and when the previous stream, if
+ * any, was fully terminated without any error and in K/A mode.
+ */
+static int h1_used_streams(struct connection *conn)
+{
+ struct h1c *h1c = conn->ctx;
+
+ return ((h1c->state == H1_CS_IDLE) ? 0 : 1);
+}
+
+/* returns the number of streams still available on a connection */
+static int h1_avail_streams(struct connection *conn)
+{
+ return 1 - h1_used_streams(conn);
+}
+
+/* Refresh the h1c task timeout if necessary */
+static void h1_refresh_timeout(struct h1c *h1c)
+{
+ int is_idle_conn = 0;
+
+ if (h1c->task) {
+ if (!h1_is_alive(h1c)) {
+ /* half-closed or dead connections : switch to clientfin/serverfin
+ * timeouts so that we don't hang too long on clients that have
+ * gone away (especially in tunnel mode).
+ */
+ h1c->task->expire = tick_add(now_ms, h1c->shut_timeout);
+ TRACE_DEVEL("refreshing connection's timeout (dead or half-closed)", H1_EV_H1C_SEND|H1_EV_H1C_RECV, h1c->conn);
+ is_idle_conn = 1;
+ }
+ else if (b_data(&h1c->obuf)) {
+ /* alive connection with pending outgoing data, need a timeout (server or client). */
+ h1c->task->expire = tick_add(now_ms, h1c->timeout);
+ TRACE_DEVEL("refreshing connection's timeout (pending outgoing data)", H1_EV_H1C_SEND|H1_EV_H1C_RECV, h1c->conn);
+ }
+ else if (!(h1c->flags & H1C_F_IS_BACK) && (h1c->state == H1_CS_IDLE)) {
+ /* idle front connections. */
+ h1c->task->expire = (tick_isset(h1c->idle_exp) ? h1c->idle_exp : tick_add(now_ms, h1c->timeout));
+ TRACE_DEVEL("refreshing connection's timeout (idle front h1c)", H1_EV_H1C_SEND|H1_EV_H1C_RECV, h1c->conn);
+ is_idle_conn = 1;
+ }
+ else if (!(h1c->flags & H1C_F_IS_BACK) && (h1c->state != H1_CS_RUNNING)) {
+ /* alive front connections waiting for a fully usable stream need a timeout. */
+ h1c->task->expire = tick_add(now_ms, h1c->timeout);
+ TRACE_DEVEL("refreshing connection's timeout (alive front h1c but not ready)", H1_EV_H1C_SEND|H1_EV_H1C_RECV, h1c->conn);
+ /* A frontend connection not yet ready could be treated the same way as an idle
+ * one in case of soft-close.
+ */
+ is_idle_conn = 1;
+ }
+ else {
+ /* alive back connections of front connections with a stream connector attached */
+ h1c->task->expire = TICK_ETERNITY;
+ TRACE_DEVEL("no connection timeout (alive back h1c or front h1c with an SC)", H1_EV_H1C_SEND|H1_EV_H1C_RECV, h1c->conn);
+ }
+
+ /* Finally set the idle expiration date if shorter */
+ h1c->task->expire = tick_first(h1c->task->expire, h1c->idle_exp);
+
+ if ((h1c->px->flags & (PR_FL_DISABLED|PR_FL_STOPPED)) &&
+ is_idle_conn && tick_isset(global.close_spread_end)) {
+ /* If a soft-stop is in progress and a close-spread-time
+ * is set, we want to spread idle connection closing roughly
+ * evenly across the defined window. This should only
+ * act on idle frontend connections.
+ * If the window end is already in the past, we wake the
+ * timeout task up immediately so that it can be closed.
+ */
+ int remaining_window = tick_remain(now_ms, global.close_spread_end);
+ if (remaining_window) {
+ /* We don't need to reset the expire if it would
+ * already happen before the close window end.
+ */
+ if (tick_is_le(global.close_spread_end, h1c->task->expire)) {
+ /* Set an expire value shorter than the current value
+ * because the close spread window end comes earlier.
+ */
+ h1c->task->expire = tick_add(now_ms, statistical_prng_range(remaining_window));
+ TRACE_DEVEL("connection timeout set to value before close-spread window end", H1_EV_H1C_SEND|H1_EV_H1C_RECV, h1c->conn);
+ }
+ }
+ else {
+ /* We are past the soft close window end, wake the timeout
+ * task up immediately.
+ */
+ task_wakeup(h1c->task, TASK_WOKEN_TIMER);
+ }
+ }
+ TRACE_DEVEL("new expiration date", H1_EV_H1C_SEND|H1_EV_H1C_RECV, h1c->conn, 0, 0, (size_t[]){h1c->task->expire});
+ task_queue(h1c->task);
+ }
+}
+
+static void h1_set_idle_expiration(struct h1c *h1c)
+{
+ if (h1c->flags & H1C_F_IS_BACK || !h1c->task) {
+ TRACE_DEVEL("no idle expiration (backend connection || no task)", H1_EV_H1C_RECV, h1c->conn);
+ h1c->idle_exp = TICK_ETERNITY;
+ return;
+ }
+ if (h1c->state == H1_CS_IDLE) {
+ if (!tick_isset(h1c->idle_exp)) {
+ if ((h1c->flags & H1C_F_WAIT_NEXT_REQ) && /* Not the first request */
+ !b_data(&h1c->ibuf) && /* No input data */
+ tick_isset(h1c->px->timeout.httpka)) { /* K-A timeout set */
+ h1c->idle_exp = tick_add_ifset(now_ms, h1c->px->timeout.httpka);
+ TRACE_DEVEL("set idle expiration (keep-alive timeout)", H1_EV_H1C_RECV, h1c->conn);
+ }
+ else {
+ h1c->idle_exp = tick_add_ifset(now_ms, h1c->px->timeout.httpreq);
+ TRACE_DEVEL("set idle expiration (http-request timeout)", H1_EV_H1C_RECV, h1c->conn);
+ }
+ }
+ }
+ else if (h1c->state < H1_CS_RUNNING) {
+ if (!tick_isset(h1c->idle_exp)) {
+ h1c->idle_exp = tick_add_ifset(now_ms, h1c->px->timeout.httpreq);
+ TRACE_DEVEL("set idle expiration (http-request timeout)", H1_EV_H1C_RECV, h1c->conn);
+ }
+ }
+ else {
+ h1c->idle_exp = TICK_ETERNITY;
+ TRACE_DEVEL("unset idle expiration (running or closing)", H1_EV_H1C_RECV, h1c->conn);
+ }
+}
+/*****************************************************************/
+/* functions below are dedicated to the mux setup and management */
+/*****************************************************************/
+
+/* returns non-zero if there are input data pending for stream h1s. */
+static inline size_t h1s_data_pending(const struct h1s *h1s)
+{
+ const struct h1m *h1m;
+
+ h1m = ((h1s->h1c->flags & H1C_F_IS_BACK) ? &h1s->res : &h1s->req);
+ return ((h1m->state == H1_MSG_DONE) ? 0 : b_data(&h1s->h1c->ibuf));
+}
+
+/* Creates a new stream connector and the associate stream. <input> is used as input
+ * buffer for the stream. On success, it is transferred to the stream and the
+ * mux is no longer responsible of it. On error, <input> is unchanged, thus the
+ * mux must still take care of it. However, there is nothing special to do
+ * because, on success, <input> is updated to points on BUF_NULL. Thus, calling
+ * b_free() on it is always safe. This function returns the stream connector on
+ * success or NULL on error. */
+static struct stconn *h1s_new_sc(struct h1s *h1s, struct buffer *input)
+{
+ struct h1c *h1c = h1s->h1c;
+
+ TRACE_ENTER(H1_EV_STRM_NEW, h1c->conn, h1s);
+
+ if (h1s->flags & H1S_F_NOT_FIRST)
+ se_fl_set(h1s->sd, SE_FL_NOT_FIRST);
+ if (h1s->req.flags & H1_MF_UPG_WEBSOCKET)
+ se_fl_set(h1s->sd, SE_FL_WEBSOCKET);
+
+ if (!sc_new_from_endp(h1s->sd, h1c->conn->owner, input)) {
+ TRACE_ERROR("SC allocation failure", H1_EV_STRM_NEW|H1_EV_STRM_END|H1_EV_STRM_ERR, h1c->conn, h1s);
+ goto err;
+ }
+
+ h1c->state = H1_CS_RUNNING;
+ TRACE_LEAVE(H1_EV_STRM_NEW, h1c->conn, h1s);
+ return h1s_sc(h1s);
+
+ err:
+ TRACE_DEVEL("leaving on error", H1_EV_STRM_NEW|H1_EV_STRM_ERR, h1c->conn, h1s);
+ return NULL;
+}
+
+static struct stconn *h1s_upgrade_sc(struct h1s *h1s, struct buffer *input)
+{
+ TRACE_ENTER(H1_EV_STRM_NEW, h1s->h1c->conn, h1s);
+
+ if (stream_upgrade_from_sc(h1s_sc(h1s), input) < 0) {
+ TRACE_ERROR("stream upgrade failure", H1_EV_STRM_NEW|H1_EV_STRM_END|H1_EV_STRM_ERR, h1s->h1c->conn, h1s);
+ goto err;
+ }
+
+ h1s->h1c->state = H1_CS_RUNNING;
+ TRACE_LEAVE(H1_EV_STRM_NEW, h1s->h1c->conn, h1s);
+ return h1s_sc(h1s);
+
+ err:
+ TRACE_DEVEL("leaving on error", H1_EV_STRM_NEW|H1_EV_STRM_ERR, h1s->h1c->conn, h1s);
+ return NULL;
+}
+
+static struct h1s *h1s_new(struct h1c *h1c)
+{
+ struct h1s *h1s;
+
+ TRACE_ENTER(H1_EV_H1S_NEW, h1c->conn);
+
+ h1s = pool_alloc(pool_head_h1s);
+ if (!h1s) {
+ TRACE_ERROR("H1S allocation failure", H1_EV_H1S_NEW|H1_EV_H1S_END|H1_EV_H1S_ERR, h1c->conn);
+ goto fail;
+ }
+ h1s->h1c = h1c;
+ h1c->h1s = h1s;
+ h1s->sess = NULL;
+ h1s->sd = NULL;
+ h1s->flags = H1S_F_WANT_KAL;
+ h1s->subs = NULL;
+ h1s->rxbuf = BUF_NULL;
+ memset(h1s->ws_key, 0, sizeof(h1s->ws_key));
+
+ h1m_init_req(&h1s->req);
+ h1s->req.flags |= (H1_MF_NO_PHDR|H1_MF_CLEAN_CONN_HDR);
+
+ h1m_init_res(&h1s->res);
+ h1s->res.flags |= (H1_MF_NO_PHDR|H1_MF_CLEAN_CONN_HDR);
+
+ h1s->status = 0;
+ h1s->meth = HTTP_METH_OTHER;
+
+ if (h1c->flags & H1C_F_WAIT_NEXT_REQ)
+ h1s->flags |= H1S_F_NOT_FIRST;
+ h1s->h1c->state = H1_CS_EMBRYONIC;
+ h1s->h1c->flags &= ~H1C_F_WAIT_NEXT_REQ;
+ TRACE_LEAVE(H1_EV_H1S_NEW, h1c->conn, h1s);
+ return h1s;
+
+ fail:
+ TRACE_DEVEL("leaving on error", H1_EV_STRM_NEW|H1_EV_STRM_ERR, h1c->conn);
+ return NULL;
+}
+
+static struct h1s *h1c_frt_stream_new(struct h1c *h1c, struct stconn *sc, struct session *sess)
+{
+ struct h1s *h1s;
+
+ TRACE_ENTER(H1_EV_H1S_NEW, h1c->conn);
+
+ h1s = h1s_new(h1c);
+ if (!h1s)
+ goto fail;
+
+ if (sc) {
+ if (sc_attach_mux(sc, h1s, h1c->conn) < 0)
+ goto fail;
+ h1s->sd = sc->sedesc;
+ }
+ else {
+ h1s->sd = sedesc_new();
+ if (!h1s->sd)
+ goto fail;
+ h1s->sd->se = h1s;
+ h1s->sd->conn = h1c->conn;
+ se_fl_set(h1s->sd, SE_FL_T_MUX | SE_FL_ORPHAN);
+ }
+ /* When a request starts, the H1S does not expect data while the request
+ * is not finished. It does not mean the response must not be received,
+ * especially if headers were already forwarded. But it is not
+ * mandatory.
+ */
+ if (!(global.tune.no_zero_copy_fwd & NO_ZERO_COPY_FWD_H1_SND))
+ se_fl_set(h1s->sd, SE_FL_MAY_FASTFWD_CONS);
+ se_expect_no_data(h1s->sd);
+ h1s->sess = sess;
+
+ if (h1c->px->options2 & PR_O2_REQBUG_OK)
+ h1s->req.err_pos = -1;
+
+ HA_ATOMIC_INC(&h1c->px_counters->open_streams);
+ HA_ATOMIC_INC(&h1c->px_counters->total_streams);
+
+ h1c->idle_exp = TICK_ETERNITY;
+ h1_set_idle_expiration(h1c);
+ TRACE_LEAVE(H1_EV_H1S_NEW, h1c->conn, h1s);
+ return h1s;
+
+ fail:
+ TRACE_DEVEL("leaving on error", H1_EV_STRM_NEW|H1_EV_STRM_ERR, h1c->conn);
+ h1s_destroy(h1s);
+ return NULL;
+}
+
+static struct h1s *h1c_bck_stream_new(struct h1c *h1c, struct stconn *sc, struct session *sess)
+{
+ struct h1s *h1s;
+
+ TRACE_ENTER(H1_EV_H1S_NEW, h1c->conn);
+
+ h1s = h1s_new(h1c);
+ if (!h1s)
+ goto fail;
+
+ if (sc_attach_mux(sc, h1s, h1c->conn) < 0)
+ goto fail;
+
+ h1s->flags |= H1S_F_RX_BLK;
+ h1s->sd = sc->sedesc;
+ h1s->sess = sess;
+
+ if (!(global.tune.no_zero_copy_fwd & NO_ZERO_COPY_FWD_H1_SND))
+ se_fl_set(h1s->sd, SE_FL_MAY_FASTFWD_CONS);
+ h1c->state = H1_CS_RUNNING;
+
+ if (h1c->px->options2 & PR_O2_RSPBUG_OK)
+ h1s->res.err_pos = -1;
+
+ HA_ATOMIC_INC(&h1c->px_counters->open_streams);
+ HA_ATOMIC_INC(&h1c->px_counters->total_streams);
+
+ TRACE_LEAVE(H1_EV_H1S_NEW, h1c->conn, h1s);
+ return h1s;
+
+ fail:
+ TRACE_DEVEL("leaving on error", H1_EV_STRM_NEW|H1_EV_STRM_ERR, h1c->conn);
+ h1s_destroy(h1s);
+ return NULL;
+}
+
+static void h1s_destroy(struct h1s *h1s)
+{
+ if (h1s) {
+ struct h1c *h1c = h1s->h1c;
+
+ TRACE_POINT(H1_EV_H1S_END, h1c->conn, h1s);
+ h1c->h1s = NULL;
+
+ if (h1s->subs)
+ h1s->subs->events = 0;
+
+ h1_release_buf(h1c, &h1s->rxbuf);
+
+ h1c->flags &= ~(H1C_F_WANT_FASTFWD|
+ H1C_F_OUT_FULL|H1C_F_OUT_ALLOC|H1C_F_IN_SALLOC|
+ H1C_F_CO_MSG_MORE|H1C_F_CO_STREAMER);
+
+ if (!(h1c->flags & (H1C_F_EOS|H1C_F_ERR_PENDING|H1C_F_ERROR|H1C_F_ABRT_PENDING|H1C_F_ABRTED)) && /* No error/read0/abort */
+ h1_is_alive(h1c) && /* still alive */
+ (h1s->flags & H1S_F_WANT_KAL) && /* K/A possible */
+ h1s->req.state == H1_MSG_DONE && h1s->res.state == H1_MSG_DONE) { /* req/res in DONE state */
+ h1c->state = H1_CS_IDLE;
+ h1c->flags |= H1C_F_WAIT_NEXT_REQ;
+ h1c->req_count++;
+ TRACE_STATE("set idle mode on h1c, waiting for the next request", H1_EV_H1C_ERR, h1c->conn, h1s);
+ }
+ else {
+ h1_close(h1c);
+ TRACE_STATE("close h1c", H1_EV_H1S_END, h1c->conn, h1s);
+ }
+
+ HA_ATOMIC_DEC(&h1c->px_counters->open_streams);
+ BUG_ON(h1s->sd && !se_fl_test(h1s->sd, SE_FL_ORPHAN));
+ sedesc_free(h1s->sd);
+ pool_free(pool_head_h1s, h1s);
+ }
+}
+
+/*
+ * Initialize the mux once it's attached. It is expected that conn->ctx points
+ * to the existing stream connector (for outgoing connections or for incoming
+ * ones during a mux upgrade) or NULL (for incoming ones during the connection
+ * establishment). <input> is always used as Input buffer and may contain
+ * data. It is the caller responsibility to not reuse it anymore. Returns < 0 on
+ * error.
+ */
+static int h1_init(struct connection *conn, struct proxy *proxy, struct session *sess,
+ struct buffer *input)
+{
+ struct h1c *h1c;
+ struct task *t = NULL;
+ void *conn_ctx = conn->ctx;
+
+ TRACE_ENTER(H1_EV_H1C_NEW);
+
+ h1c = pool_alloc(pool_head_h1c);
+ if (!h1c) {
+ TRACE_ERROR("H1C allocation failure", H1_EV_H1C_NEW|H1_EV_H1C_END|H1_EV_H1C_ERR);
+ goto fail_h1c;
+ }
+ h1c->conn = conn;
+ h1c->px = proxy;
+
+ h1c->state = H1_CS_IDLE;
+ h1c->flags = H1C_F_NONE;
+ h1c->errcode = 0;
+ h1c->ibuf = *input;
+ h1c->obuf = BUF_NULL;
+ h1c->h1s = NULL;
+ h1c->task = NULL;
+ h1c->req_count = 0;
+
+ LIST_INIT(&h1c->buf_wait.list);
+ h1c->wait_event.tasklet = tasklet_new();
+ if (!h1c->wait_event.tasklet)
+ goto fail;
+ h1c->wait_event.tasklet->process = h1_io_cb;
+ h1c->wait_event.tasklet->context = h1c;
+ h1c->wait_event.events = 0;
+ h1c->idle_exp = TICK_ETERNITY;
+
+ if (conn_is_back(conn)) {
+ h1c->flags |= H1C_F_IS_BACK;
+ h1c->shut_timeout = h1c->timeout = proxy->timeout.server;
+ if (tick_isset(proxy->timeout.serverfin))
+ h1c->shut_timeout = proxy->timeout.serverfin;
+
+ h1c->px_counters = EXTRA_COUNTERS_GET(proxy->extra_counters_be,
+ &h1_stats_module);
+ } else {
+ h1c->shut_timeout = h1c->timeout = proxy->timeout.client;
+ if (tick_isset(proxy->timeout.clientfin))
+ h1c->shut_timeout = proxy->timeout.clientfin;
+
+ h1c->px_counters = EXTRA_COUNTERS_GET(proxy->extra_counters_fe,
+ &h1_stats_module);
+
+ LIST_APPEND(&mux_stopping_data[tid].list,
+ &h1c->conn->stopping_list);
+ }
+ if (tick_isset(h1c->timeout)) {
+ t = task_new_here();
+ if (!t) {
+ TRACE_ERROR("H1C task allocation failure", H1_EV_H1C_NEW|H1_EV_H1C_END|H1_EV_H1C_ERR);
+ goto fail;
+ }
+
+ h1c->task = t;
+ t->process = h1_timeout_task;
+ t->context = h1c;
+
+ t->expire = tick_add(now_ms, h1c->timeout);
+ }
+
+ conn->ctx = h1c;
+
+ if (h1c->flags & H1C_F_IS_BACK) {
+ /* Create a new H1S now for backend connection only */
+ if (!h1c_bck_stream_new(h1c, conn_ctx, sess))
+ goto fail;
+ }
+ else if (conn_ctx) {
+ /* Upgraded frontend connection (from TCP) */
+ if (!h1c_frt_stream_new(h1c, conn_ctx, h1c->conn->owner))
+ goto fail;
+
+ /* Attach the SC but Not ready yet */
+ h1c->state = H1_CS_UPGRADING;
+ TRACE_DEVEL("Inherit the SC from TCP connection to perform an upgrade",
+ H1_EV_H1C_NEW|H1_EV_STRM_NEW, h1c->conn, h1c->h1s);
+ }
+
+ if (t) {
+ h1_set_idle_expiration(h1c);
+ t->expire = tick_first(t->expire, h1c->idle_exp);
+ task_queue(t);
+ }
+
+ /* prepare to read something */
+ if (b_data(&h1c->ibuf))
+ tasklet_wakeup(h1c->wait_event.tasklet);
+ else if (h1_recv_allowed(h1c))
+ h1c->conn->xprt->subscribe(h1c->conn, h1c->conn->xprt_ctx, SUB_RETRY_RECV, &h1c->wait_event);
+
+ if (!conn_is_back(conn))
+ proxy_inc_fe_cum_sess_ver_ctr(sess->listener, proxy, 1);
+ HA_ATOMIC_INC(&h1c->px_counters->open_conns);
+ HA_ATOMIC_INC(&h1c->px_counters->total_conns);
+
+ /* mux->wake will be called soon to complete the operation */
+ TRACE_LEAVE(H1_EV_H1C_NEW, conn, h1c->h1s);
+ return 0;
+
+ fail:
+ task_destroy(t);
+ tasklet_free(h1c->wait_event.tasklet);
+ pool_free(pool_head_h1c, h1c);
+ fail_h1c:
+ if (!conn_is_back(conn))
+ LIST_DEL_INIT(&conn->stopping_list);
+ conn->ctx = conn_ctx; // restore saved context
+ TRACE_DEVEL("leaving in error", H1_EV_H1C_NEW|H1_EV_H1C_END|H1_EV_H1C_ERR);
+ return -1;
+}
+
+/* release function. This one should be called to free all resources allocated
+ * to the mux.
+ */
+static void h1_release(struct h1c *h1c)
+{
+ struct connection *conn = NULL;
+
+ TRACE_POINT(H1_EV_H1C_END);
+
+ /* The connection must be aattached to this mux to be released */
+ if (h1c->conn && h1c->conn->ctx == h1c)
+ conn = h1c->conn;
+
+ if (conn && h1c->flags & H1C_F_UPG_H2C) {
+ TRACE_DEVEL("upgrading H1 to H2", H1_EV_H1C_END, conn);
+ /* Make sure we're no longer subscribed to anything */
+ if (h1c->wait_event.events)
+ conn->xprt->unsubscribe(conn, conn->xprt_ctx,
+ h1c->wait_event.events, &h1c->wait_event);
+ if (conn_upgrade_mux_fe(conn, NULL, &h1c->ibuf, ist("h2"), PROTO_MODE_HTTP) != -1) {
+ /* connection successfully upgraded to H2, this
+ * mux was already released */
+ return;
+ }
+ TRACE_ERROR("h2 upgrade failed", H1_EV_H1C_END|H1_EV_H1C_ERR, conn);
+ sess_log(conn->owner); /* Log if the upgrade failed */
+ }
+
+
+ if (LIST_INLIST(&h1c->buf_wait.list))
+ LIST_DEL_INIT(&h1c->buf_wait.list);
+
+ h1_release_buf(h1c, &h1c->ibuf);
+ h1_release_buf(h1c, &h1c->obuf);
+
+ if (h1c->task) {
+ h1c->task->context = NULL;
+ task_wakeup(h1c->task, TASK_WOKEN_OTHER);
+ h1c->task = NULL;
+ }
+
+ if (h1c->wait_event.tasklet) {
+ tasklet_free(h1c->wait_event.tasklet);
+ h1c->wait_event.tasklet = NULL;
+ }
+
+ h1s_destroy(h1c->h1s);
+ if (conn) {
+ if (h1c->wait_event.events != 0)
+ conn->xprt->unsubscribe(conn, conn->xprt_ctx, h1c->wait_event.events,
+ &h1c->wait_event);
+ h1_shutw_conn(conn);
+ }
+
+ HA_ATOMIC_DEC(&h1c->px_counters->open_conns);
+ pool_free(pool_head_h1c, h1c);
+
+ if (conn) {
+ if (!conn_is_back(conn))
+ LIST_DEL_INIT(&conn->stopping_list);
+
+ conn->mux = NULL;
+ conn->ctx = NULL;
+ TRACE_DEVEL("freeing conn", H1_EV_H1C_END, conn);
+
+ conn_stop_tracking(conn);
+ conn_full_close(conn);
+ if (conn->destroy_cb)
+ conn->destroy_cb(conn);
+ conn_free(conn);
+ }
+}
+
+/******************************************************/
+/* functions below are for the H1 protocol processing */
+/******************************************************/
+/* Parse the request version and set H1_MF_VER_11 on <h1m> if the version is
+ * greater or equal to 1.1
+ */
+static void h1_parse_req_vsn(struct h1m *h1m, const struct htx_sl *sl)
+{
+ const char *p = HTX_SL_REQ_VPTR(sl);
+
+ if ((HTX_SL_REQ_VLEN(sl) == 8) &&
+ (*(p + 5) > '1' ||
+ (*(p + 5) == '1' && *(p + 7) >= '1')))
+ h1m->flags |= H1_MF_VER_11;
+}
+
+/* Parse the response version and set H1_MF_VER_11 on <h1m> if the version is
+ * greater or equal to 1.1
+ */
+static void h1_parse_res_vsn(struct h1m *h1m, const struct htx_sl *sl)
+{
+ const char *p = HTX_SL_RES_VPTR(sl);
+
+ if ((HTX_SL_RES_VLEN(sl) == 8) &&
+ (*(p + 5) > '1' ||
+ (*(p + 5) == '1' && *(p + 7) >= '1')))
+ h1m->flags |= H1_MF_VER_11;
+}
+
+/* Deduce the connection mode of the client connection, depending on the
+ * configuration and the H1 message flags. This function is called twice, the
+ * first time when the request is parsed and the second time when the response
+ * is parsed.
+ */
+static void h1_set_cli_conn_mode(struct h1s *h1s, struct h1m *h1m)
+{
+ struct proxy *fe = h1s->h1c->px;
+
+ if (h1m->flags & H1_MF_RESP) {
+ /* Output direction: second pass */
+ if ((h1s->meth == HTTP_METH_CONNECT && h1s->status >= 200 && h1s->status < 300) ||
+ h1s->status == 101) {
+ /* Either we've established an explicit tunnel, or we're
+ * switching the protocol. In both cases, we're very unlikely to
+ * understand the next protocols. We have to switch to tunnel
+ * mode, so that we transfer the request and responses then let
+ * this protocol pass unmodified. When we later implement
+ * specific parsers for such protocols, we'll want to check the
+ * Upgrade header which contains information about that protocol
+ * for responses with status 101 (eg: see RFC2817 about TLS).
+ */
+ h1s->flags = (h1s->flags & ~H1S_F_WANT_MSK) | H1S_F_WANT_TUN;
+ TRACE_STATE("set tunnel mode (resp)", H1_EV_TX_DATA|H1_EV_TX_HDRS, h1s->h1c->conn, h1s);
+ }
+ else if (h1s->flags & H1S_F_WANT_KAL) {
+ /* By default the client is in KAL mode. CLOSE mode mean
+ * it is imposed by the client itself. So only change
+ * KAL mode here. */
+ if (!(h1m->flags & H1_MF_XFER_LEN) || (h1m->flags & H1_MF_CONN_CLO)) {
+ /* no length known or explicit close => close */
+ h1s->flags = (h1s->flags & ~H1S_F_WANT_MSK) | H1S_F_WANT_CLO;
+ TRACE_STATE("detect close mode (resp)", H1_EV_TX_DATA|H1_EV_TX_HDRS, h1s->h1c->conn, h1s);
+ }
+ else if (!(h1m->flags & H1_MF_CONN_KAL) &&
+ (fe->options & PR_O_HTTP_MODE) == PR_O_HTTP_CLO) {
+ /* no explicit keep-alive and option httpclose => close */
+ h1s->flags = (h1s->flags & ~H1S_F_WANT_MSK) | H1S_F_WANT_CLO;
+ TRACE_STATE("force close mode (resp)", H1_EV_TX_DATA|H1_EV_TX_HDRS, h1s->h1c->conn, h1s);
+ }
+ }
+ }
+ else {
+ /* Input direction: first pass */
+ if (!(h1m->flags & (H1_MF_VER_11|H1_MF_CONN_KAL)) || h1m->flags & H1_MF_CONN_CLO) {
+ /* no explicit keep-alive in HTTP/1.0 or explicit close => close*/
+ h1s->flags = (h1s->flags & ~H1S_F_WANT_MSK) | H1S_F_WANT_CLO;
+ TRACE_STATE("detect close mode (req)", H1_EV_RX_DATA|H1_EV_RX_HDRS, h1s->h1c->conn, h1s);
+ }
+ }
+
+ /* If KAL, check if the frontend is stopping. If yes, switch in CLO mode
+ * unless a 'close-spread-time' option is set (either to define a
+ * soft-close window or to disable active closing (close-spread-time
+ * option set to 0).
+ */
+ if (h1s->flags & H1S_F_WANT_KAL && (fe->flags & (PR_FL_DISABLED|PR_FL_STOPPED))) {
+ int want_clo = 1;
+ /* If a close-spread-time option is set, we want to avoid
+ * closing all the active HTTP connections at once so we add a
+ * random factor that will spread the closing.
+ */
+ if (tick_isset(global.close_spread_end)) {
+ int remaining_window = tick_remain(now_ms, global.close_spread_end);
+ if (remaining_window) {
+ /* This should increase the closing rate the further along
+ * the window we are.
+ */
+ want_clo = (remaining_window <= statistical_prng_range(global.close_spread_time));
+ }
+ }
+ else if (global.tune.options & GTUNE_DISABLE_ACTIVE_CLOSE)
+ want_clo = 0;
+
+ if (want_clo) {
+ h1s->flags = (h1s->flags & ~H1S_F_WANT_MSK) | H1S_F_WANT_CLO;
+ TRACE_STATE("stopping, set close mode", H1_EV_RX_DATA|H1_EV_RX_HDRS|H1_EV_TX_DATA|H1_EV_TX_HDRS, h1s->h1c->conn, h1s);
+ }
+ }
+}
+
+/* Deduce the connection mode of the client connection, depending on the
+ * configuration and the H1 message flags. This function is called twice, the
+ * first time when the request is parsed and the second time when the response
+ * is parsed.
+ */
+static void h1_set_srv_conn_mode(struct h1s *h1s, struct h1m *h1m)
+{
+ struct session *sess = h1s->sess;
+ struct proxy *be = h1s->h1c->px;
+ int fe_flags = sess ? sess->fe->options : 0;
+
+ if (h1m->flags & H1_MF_RESP) {
+ /* Input direction: second pass */
+ if ((h1s->meth == HTTP_METH_CONNECT && h1s->status >= 200 && h1s->status < 300) ||
+ h1s->status == 101) {
+ /* Either we've established an explicit tunnel, or we're
+ * switching the protocol. In both cases, we're very unlikely to
+ * understand the next protocols. We have to switch to tunnel
+ * mode, so that we transfer the request and responses then let
+ * this protocol pass unmodified. When we later implement
+ * specific parsers for such protocols, we'll want to check the
+ * Upgrade header which contains information about that protocol
+ * for responses with status 101 (eg: see RFC2817 about TLS).
+ */
+ h1s->flags = (h1s->flags & ~H1S_F_WANT_MSK) | H1S_F_WANT_TUN;
+ TRACE_STATE("set tunnel mode (resp)", H1_EV_RX_DATA|H1_EV_RX_HDRS, h1s->h1c->conn, h1s);
+ }
+ else if (h1s->flags & H1S_F_WANT_KAL) {
+ /* By default the server is in KAL mode. CLOSE mode mean
+ * it is imposed by haproxy itself. So only change KAL
+ * mode here. */
+ if (!(h1m->flags & H1_MF_XFER_LEN) || h1m->flags & H1_MF_CONN_CLO ||
+ !(h1m->flags & (H1_MF_VER_11|H1_MF_CONN_KAL))){
+ /* no length known or explicit close or no explicit keep-alive in HTTP/1.0 => close */
+ h1s->flags = (h1s->flags & ~H1S_F_WANT_MSK) | H1S_F_WANT_CLO;
+ TRACE_STATE("detect close mode (resp)", H1_EV_RX_DATA|H1_EV_RX_HDRS, h1s->h1c->conn, h1s);
+ }
+ }
+ }
+ else {
+ /* Output direction: first pass */
+ if (h1m->flags & H1_MF_CONN_CLO) {
+ /* explicit close => close */
+ h1s->flags = (h1s->flags & ~H1S_F_WANT_MSK) | H1S_F_WANT_CLO;
+ TRACE_STATE("detect close mode (req)", H1_EV_TX_DATA|H1_EV_TX_HDRS, h1s->h1c->conn, h1s);
+ }
+ else if (!(h1m->flags & H1_MF_CONN_KAL) &&
+ ((fe_flags & PR_O_HTTP_MODE) == PR_O_HTTP_SCL ||
+ (be->options & PR_O_HTTP_MODE) == PR_O_HTTP_SCL ||
+ (be->options & PR_O_HTTP_MODE) == PR_O_HTTP_CLO)) {
+ /* no explicit keep-alive option httpclose/server-close => close */
+ h1s->flags = (h1s->flags & ~H1S_F_WANT_MSK) | H1S_F_WANT_CLO;
+ TRACE_STATE("force close mode (req)", H1_EV_TX_DATA|H1_EV_TX_HDRS, h1s->h1c->conn, h1s);
+ }
+ }
+
+ /* If KAL, check if the backend is stopping. If yes, switch in CLO mode */
+ if (h1s->flags & H1S_F_WANT_KAL && (be->flags & (PR_FL_DISABLED|PR_FL_STOPPED))) {
+ h1s->flags = (h1s->flags & ~H1S_F_WANT_MSK) | H1S_F_WANT_CLO;
+ TRACE_STATE("stopping, set close mode", H1_EV_RX_DATA|H1_EV_RX_HDRS|H1_EV_TX_DATA|H1_EV_TX_HDRS, h1s->h1c->conn, h1s);
+ }
+}
+
+static void h1_update_req_conn_value(struct h1s *h1s, struct h1m *h1m, struct ist *conn_val)
+{
+ struct proxy *px = h1s->h1c->px;
+
+ /* Don't update "Connection:" header in TUNNEL mode or if "Upgrage"
+ * token is found
+ */
+ if (h1s->flags & H1S_F_WANT_TUN || h1m->flags & H1_MF_CONN_UPG)
+ return;
+
+ if (h1s->flags & H1S_F_WANT_KAL || px->options2 & PR_O2_FAKE_KA) {
+ if (!(h1m->flags & H1_MF_VER_11)) {
+ TRACE_STATE("add \"Connection: keep-alive\"", H1_EV_TX_DATA|H1_EV_TX_HDRS, h1s->h1c->conn, h1s);
+ *conn_val = ist("keep-alive");
+ }
+ }
+ else { /* H1S_F_WANT_CLO && !PR_O2_FAKE_KA */
+ if (h1m->flags & H1_MF_VER_11) {
+ TRACE_STATE("add \"Connection: close\"", H1_EV_TX_DATA|H1_EV_TX_HDRS, h1s->h1c->conn, h1s);
+ *conn_val = ist("close");
+ }
+ }
+}
+
+static void h1_update_res_conn_value(struct h1s *h1s, struct h1m *h1m, struct ist *conn_val)
+{
+ /* Don't update "Connection:" header in TUNNEL mode or if "Upgrage"
+ * token is found
+ */
+ if (h1s->flags & H1S_F_WANT_TUN || h1m->flags & H1_MF_CONN_UPG)
+ return;
+
+ if (h1s->flags & H1S_F_WANT_KAL) {
+ if (!(h1m->flags & H1_MF_VER_11) ||
+ !((h1m->flags & h1s->req.flags) & H1_MF_VER_11)) {
+ TRACE_STATE("add \"Connection: keep-alive\"", H1_EV_TX_DATA|H1_EV_TX_HDRS, h1s->h1c->conn, h1s);
+ *conn_val = ist("keep-alive");
+ }
+ }
+ else { /* H1S_F_WANT_CLO */
+ if (h1m->flags & H1_MF_VER_11) {
+ TRACE_STATE("add \"Connection: close\"", H1_EV_TX_DATA|H1_EV_TX_HDRS, h1s->h1c->conn, h1s);
+ *conn_val = ist("close");
+ }
+ }
+}
+
+static void h1_process_input_conn_mode(struct h1s *h1s, struct h1m *h1m, struct htx *htx)
+{
+ if (!(h1s->h1c->flags & H1C_F_IS_BACK))
+ h1_set_cli_conn_mode(h1s, h1m);
+ else
+ h1_set_srv_conn_mode(h1s, h1m);
+}
+
+static void h1_process_output_conn_mode(struct h1s *h1s, struct h1m *h1m, struct ist *conn_val)
+{
+ if (!(h1s->h1c->flags & H1C_F_IS_BACK))
+ h1_set_cli_conn_mode(h1s, h1m);
+ else
+ h1_set_srv_conn_mode(h1s, h1m);
+
+ if (!(h1m->flags & H1_MF_RESP))
+ h1_update_req_conn_value(h1s, h1m, conn_val);
+ else
+ h1_update_res_conn_value(h1s, h1m, conn_val);
+}
+
+/* Try to adjust the case of the message header name using the global map
+ * <hdrs_map>.
+ */
+static void h1_adjust_case_outgoing_hdr(struct h1s *h1s, struct h1m *h1m, struct ist *name)
+{
+ struct ebpt_node *node;
+ struct h1_hdr_entry *entry;
+
+ /* No entry in the map, do nothing */
+ if (eb_is_empty(&hdrs_map.map))
+ return;
+
+ /* No conversion for the request headers */
+ if (!(h1m->flags & H1_MF_RESP) && !(h1s->h1c->px->options2 & PR_O2_H1_ADJ_BUGSRV))
+ return;
+
+ /* No conversion for the response headers */
+ if ((h1m->flags & H1_MF_RESP) && !(h1s->h1c->px->options2 & PR_O2_H1_ADJ_BUGCLI))
+ return;
+
+ node = ebis_lookup_len(&hdrs_map.map, name->ptr, name->len);
+ if (!node)
+ return;
+ entry = container_of(node, struct h1_hdr_entry, node);
+ name->ptr = entry->name.ptr;
+ name->len = entry->name.len;
+}
+
+/* Append the description of what is present in error snapshot <es> into <out>.
+ * The description must be small enough to always fit in a buffer. The output
+ * buffer may be the trash so the trash must not be used inside this function.
+ */
+static void h1_show_error_snapshot(struct buffer *out, const struct error_snapshot *es)
+{
+ chunk_appendf(out,
+ " H1 connection flags 0x%08x, H1 stream flags 0x%08x\n"
+ " H1 msg state %s(%d), H1 msg flags 0x%08x\n"
+ " H1 chunk len %lld bytes, H1 body len %lld bytes :\n",
+ es->ctx.h1.c_flags, es->ctx.h1.s_flags,
+ h1m_state_str(es->ctx.h1.state), es->ctx.h1.state,
+ es->ctx.h1.m_flags, es->ctx.h1.m_clen, es->ctx.h1.m_blen);
+}
+/*
+ * Capture a bad request or response and archive it in the proxy's structure.
+ * By default it tries to report the error position as h1m->err_pos. However if
+ * this one is not set, it will then report h1m->next, which is the last known
+ * parsing point. The function is able to deal with wrapping buffers. It always
+ * displays buffers as a contiguous area starting at buf->p. The direction is
+ * determined thanks to the h1m's flags.
+ */
+static void h1_capture_bad_message(struct h1c *h1c, struct h1s *h1s,
+ struct h1m *h1m, struct buffer *buf)
+{
+ struct session *sess = h1s->sess;
+ struct proxy *proxy = h1c->px;
+ struct proxy *other_end;
+ union error_snapshot_ctx ctx;
+
+ if (h1c->state == H1_CS_UPGRADING || h1c->state == H1_CS_RUNNING) {
+ if (sess == NULL)
+ sess = __sc_strm(h1s_sc(h1s))->sess;
+ if (!(h1m->flags & H1_MF_RESP))
+ other_end = __sc_strm(h1s_sc(h1s))->be;
+ else
+ other_end = sess->fe;
+ } else
+ other_end = NULL;
+
+ /* http-specific part now */
+ ctx.h1.state = h1m->state;
+ ctx.h1.c_flags = h1c->flags;
+ ctx.h1.s_flags = h1s->flags;
+ ctx.h1.m_flags = h1m->flags;
+ ctx.h1.m_clen = h1m->curr_len;
+ ctx.h1.m_blen = h1m->body_len;
+
+ proxy_capture_error(proxy, !!(h1m->flags & H1_MF_RESP), other_end,
+ h1c->conn->target, sess, buf, 0, 0,
+ (h1m->err_pos >= 0) ? h1m->err_pos : h1m->next,
+ &ctx, h1_show_error_snapshot);
+}
+
+/* Emit the chunksize followed by a CRLF in front of data of the buffer
+ * <buf>. It goes backwards and starts with the byte before the buffer's
+ * head. The caller is responsible for ensuring there is enough room left before
+ * the buffer's head for the string.
+ */
+static void h1_prepend_chunk_size(struct buffer *buf, size_t chksz)
+{
+ char *beg, *end;
+
+ beg = end = b_head(buf);
+ *--beg = '\n';
+ *--beg = '\r';
+ do {
+ *--beg = hextab[chksz & 0xF];
+ } while (chksz >>= 4);
+ buf->head -= (end - beg);
+ b_add(buf, end - beg);
+}
+
+/* Emit the chunksize followed by a CRLF after the data of the buffer
+ * <buf>. Returns 0 on error.
+ */
+static int h1_append_chunk_size(struct buffer *buf, size_t chksz)
+{
+ char tmp[10];
+ char *beg, *end;
+
+ beg = end = tmp+10;
+ *--beg = '\n';
+ *--beg = '\r';
+ do {
+ *--beg = hextab[chksz & 0xF];
+ } while (chksz >>= 4);
+
+ return chunk_memcat(buf, beg, end - beg);
+}
+
+/* Emit a CRLF in front of data of the buffer <buf>. It goes backwards and
+ * starts with the byte before the buffer's head. The caller is responsible for
+ * ensuring there is enough room left before the buffer's head for the string.
+ */
+static void h1_prepend_chunk_crlf(struct buffer *buf)
+{
+ char *head;
+
+ head = b_head(buf);
+ *--head = '\n';
+ *--head = '\r';
+ buf->head -= 2;
+ b_add(buf, 2);
+}
+
+
+/* Emit a CRLF after the data of the buffer <buf>. The caller is responsible for
+ * ensuring there is enough room left in the buffer for the string. */
+static void h1_append_chunk_crlf(struct buffer *buf)
+{
+ *(b_peek(buf, b_data(buf))) = '\r';
+ *(b_peek(buf, b_data(buf) + 1)) = '\n';
+ b_add(buf, 2);
+}
+
+/*
+ * Switch the stream to tunnel mode. This function must only be called on 2xx
+ * (successful) replies to CONNECT requests or on 101 (switching protocol).
+ */
+static void h1_set_tunnel_mode(struct h1s *h1s)
+{
+ struct h1c *h1c = h1s->h1c;
+
+ h1s->req.state = H1_MSG_TUNNEL;
+ h1s->req.flags &= ~(H1_MF_XFER_LEN|H1_MF_CLEN|H1_MF_CHNK);
+
+ h1s->res.state = H1_MSG_TUNNEL;
+ h1s->res.flags &= ~(H1_MF_XFER_LEN|H1_MF_CLEN|H1_MF_CHNK);
+
+ TRACE_STATE("switch H1 stream in tunnel mode", H1_EV_TX_DATA|H1_EV_TX_HDRS, h1c->conn, h1s);
+
+ if (h1s->flags & H1S_F_RX_BLK) {
+ h1s->flags &= ~H1S_F_RX_BLK;
+ h1_wake_stream_for_recv(h1s);
+ TRACE_STATE("Re-enable input processing", H1_EV_RX_DATA|H1_EV_H1S_BLK|H1_EV_STRM_WAKE, h1c->conn, h1s);
+ }
+ if (h1s->flags & H1S_F_TX_BLK) {
+ h1s->flags &= ~H1S_F_TX_BLK;
+ h1_wake_stream_for_send(h1s);
+ TRACE_STATE("Re-enable output processing", H1_EV_TX_DATA|H1_EV_H1S_BLK|H1_EV_STRM_WAKE, h1c->conn, h1s);
+ }
+}
+
+/* Search for a websocket key header. The message should have been identified
+ * as a valid websocket handshake.
+ *
+ * On the request side, if found the key is stored in the session. It might be
+ * needed to calculate response key if the server side is using http/2.
+ *
+ * On the response side, the key might be verified if haproxy has been
+ * responsible for the generation of a key. This happens when a h2 client is
+ * interfaced with a h1 server.
+ *
+ * Returns 0 if no key found or invalid key
+ */
+static int h1_search_websocket_key(struct h1s *h1s, struct h1m *h1m, struct htx *htx)
+{
+ struct htx_blk *blk;
+ enum htx_blk_type type;
+ struct ist n, v;
+ int ws_key_found = 0, idx;
+
+ idx = htx_get_head(htx); // returns the SL that we skip
+ while ((idx = htx_get_next(htx, idx)) != -1) {
+ blk = htx_get_blk(htx, idx);
+ type = htx_get_blk_type(blk);
+
+ if (type == HTX_BLK_UNUSED)
+ continue;
+
+ if (type != HTX_BLK_HDR)
+ break;
+
+ n = htx_get_blk_name(htx, blk);
+ v = htx_get_blk_value(htx, blk);
+
+ /* Websocket key is base64 encoded of 16 bytes */
+ if (isteqi(n, ist("sec-websocket-key")) && v.len == 24 &&
+ !(h1m->flags & H1_MF_RESP)) {
+ /* Copy the key on request side
+ * we might need it if the server is using h2 and does
+ * not provide the response
+ */
+ memcpy(h1s->ws_key, v.ptr, 24);
+ ws_key_found = 1;
+ break;
+ }
+ else if (isteqi(n, ist("sec-websocket-accept")) &&
+ h1m->flags & H1_MF_RESP) {
+ /* Need to verify the response key if the input was
+ * generated by haproxy
+ */
+ if (h1s->ws_key[0]) {
+ char key[29];
+ h1_calculate_ws_output_key(h1s->ws_key, key);
+ if (!isteqi(ist(key), v))
+ break;
+ }
+ ws_key_found = 1;
+ break;
+ }
+ }
+
+ /* missing websocket key, reject the message */
+ if (!ws_key_found) {
+ htx->flags |= HTX_FL_PARSING_ERROR;
+ return 0;
+ }
+
+ return 1;
+}
+
+/*
+ * Parse HTTP/1 headers. It returns the number of bytes parsed if > 0, or 0 if
+ * it couldn't proceed. Parsing errors are reported by setting H1S_F_*_ERROR
+ * flag. If more room is requested, H1S_F_RX_CONGESTED flag is set. If relies on
+ * the function http_parse_msg_hdrs() to do the parsing.
+ */
+static size_t h1_handle_headers(struct h1s *h1s, struct h1m *h1m, struct htx *htx,
+ struct buffer *buf, size_t *ofs, size_t max)
+{
+ union h1_sl h1sl;
+ int ret = 0;
+
+ TRACE_ENTER(H1_EV_RX_DATA|H1_EV_RX_HDRS, h1s->h1c->conn, h1s, 0, (size_t[]){max});
+
+ if (h1s->meth == HTTP_METH_CONNECT)
+ h1m->flags |= H1_MF_METH_CONNECT;
+ if (h1s->meth == HTTP_METH_HEAD)
+ h1m->flags |= H1_MF_METH_HEAD;
+
+ ret = h1_parse_msg_hdrs(h1m, &h1sl, htx, buf, *ofs, max);
+ if (ret <= 0) {
+ TRACE_DEVEL("leaving on missing data or error", H1_EV_RX_DATA|H1_EV_RX_HDRS, h1s->h1c->conn, h1s);
+ if (ret == -1) {
+ h1s->flags |= H1S_F_PARSING_ERROR;
+ TRACE_ERROR("parsing error, reject H1 message", H1_EV_RX_DATA|H1_EV_RX_HDRS|H1_EV_H1S_ERR, h1s->h1c->conn, h1s);
+ h1_capture_bad_message(h1s->h1c, h1s, h1m, buf);
+ }
+ else if (ret == -2) {
+ TRACE_STATE("RX path congested, waiting for more space", H1_EV_RX_DATA|H1_EV_RX_HDRS|H1_EV_H1S_BLK, h1s->h1c->conn, h1s);
+ h1s->flags |= H1S_F_RX_CONGESTED;
+ }
+ ret = 0;
+ goto end;
+ }
+
+
+ /* Reject HTTP/1.0 GET/HEAD/DELETE requests with a payload except if
+ * accept_payload_with_any_method global option is set.
+ *There is a payload if the c-l is not null or the the payload is
+ * chunk-encoded. A parsing error is reported but a A
+ * 413-Payload-Too-Large is returned instead of a 400-Bad-Request.
+ */
+ if (!accept_payload_with_any_method &&
+ !(h1m->flags & (H1_MF_RESP|H1_MF_VER_11)) &&
+ (((h1m->flags & H1_MF_CLEN) && h1m->body_len) || (h1m->flags & H1_MF_CHNK)) &&
+ (h1sl.rq.meth == HTTP_METH_GET || h1sl.rq.meth == HTTP_METH_HEAD || h1sl.rq.meth == HTTP_METH_DELETE)) {
+ h1s->flags |= H1S_F_PARSING_ERROR;
+ htx->flags |= HTX_FL_PARSING_ERROR;
+ h1s->h1c->errcode = 413;
+ TRACE_ERROR("HTTP/1.0 GET/HEAD/DELETE request with a payload forbidden", H1_EV_RX_DATA|H1_EV_RX_HDRS|H1_EV_H1S_ERR, h1s->h1c->conn, h1s);
+ h1_capture_bad_message(h1s->h1c, h1s, h1m, buf);
+ ret = 0;
+ goto end;
+ }
+
+ /* Reject any message with an unknown transfer-encoding. In fact if any
+ * encoding other than "chunked". A 422-Unprocessable-Content is
+ * returned for an invalid request, a 502-Bad-Gateway for an invalid
+ * response.
+ */
+ if (h1m->flags & H1_MF_TE_OTHER) {
+ h1s->flags |= H1S_F_PARSING_ERROR;
+ htx->flags |= HTX_FL_PARSING_ERROR;
+ if (!(h1m->flags & H1_MF_RESP))
+ h1s->h1c->errcode = 422;
+ TRACE_ERROR("Unknown transfer-encoding", H1_EV_RX_DATA|H1_EV_RX_HDRS|H1_EV_H1S_ERR, h1s->h1c->conn, h1s);
+ h1_capture_bad_message(h1s->h1c, h1s, h1m, buf);
+ ret = 0;
+ goto end;
+ }
+
+ /* If websocket handshake, search for the websocket key */
+ if ((h1m->flags & (H1_MF_CONN_UPG|H1_MF_UPG_WEBSOCKET)) ==
+ (H1_MF_CONN_UPG|H1_MF_UPG_WEBSOCKET)) {
+ int ws_ret = h1_search_websocket_key(h1s, h1m, htx);
+ if (!ws_ret) {
+ h1s->flags |= H1S_F_PARSING_ERROR;
+ TRACE_ERROR("missing/invalid websocket key, reject H1 message", H1_EV_RX_DATA|H1_EV_RX_HDRS|H1_EV_H1S_ERR, h1s->h1c->conn, h1s);
+ h1_capture_bad_message(h1s->h1c, h1s, h1m, buf);
+
+ ret = 0;
+ goto end;
+ }
+ }
+
+ if (h1m->err_pos >= 0) {
+ /* Maybe we found an error during the parsing while we were
+ * configured not to block on that, so we have to capture it
+ * now.
+ */
+ TRACE_STATE("Ignored parsing error", H1_EV_RX_DATA|H1_EV_RX_HDRS, h1s->h1c->conn, h1s);
+ h1_capture_bad_message(h1s->h1c, h1s, h1m, buf);
+ }
+
+ if (!(h1m->flags & H1_MF_RESP)) {
+ h1s->meth = h1sl.rq.meth;
+ if (h1s->meth == HTTP_METH_HEAD)
+ h1s->flags |= H1S_F_BODYLESS_RESP;
+ }
+ else {
+ h1s->status = h1sl.st.status;
+ if (h1s->status == 204 || h1s->status == 304)
+ h1s->flags |= H1S_F_BODYLESS_RESP;
+ }
+ h1_process_input_conn_mode(h1s, h1m, htx);
+ *ofs += ret;
+
+ end:
+ TRACE_LEAVE(H1_EV_RX_DATA|H1_EV_RX_HDRS, h1s->h1c->conn, h1s, 0, (size_t[]){ret});
+ return ret;
+}
+
+/*
+ * Parse HTTP/1 body. It returns the number of bytes parsed if > 0, or 0 if it
+ * couldn't proceed. Parsing errors are reported by setting H1S_F_*_ERROR flag.
+ * If relies on the function http_parse_msg_data() to do the parsing.
+ */
+static size_t h1_handle_data(struct h1s *h1s, struct h1m *h1m, struct htx **htx,
+ struct buffer *buf, size_t *ofs, size_t max,
+ struct buffer *htxbuf)
+{
+ size_t ret;
+
+ TRACE_ENTER(H1_EV_RX_DATA|H1_EV_RX_BODY, h1s->h1c->conn, h1s, 0, (size_t[]){max});
+ ret = h1_parse_msg_data(h1m, htx, buf, *ofs, max, htxbuf);
+ if (!ret) {
+ TRACE_DEVEL("leaving on missing data or error", H1_EV_RX_DATA|H1_EV_RX_BODY, h1s->h1c->conn, h1s);
+ if ((*htx)->flags & HTX_FL_PARSING_ERROR) {
+ h1s->flags |= H1S_F_PARSING_ERROR;
+ TRACE_ERROR("parsing error, reject H1 message", H1_EV_RX_DATA|H1_EV_RX_BODY|H1_EV_H1S_ERR, h1s->h1c->conn, h1s);
+ h1_capture_bad_message(h1s->h1c, h1s, h1m, buf);
+ }
+ goto end;
+ }
+
+ *ofs += ret;
+
+ end:
+ if (b_data(buf) != *ofs && (h1m->state == H1_MSG_DATA || h1m->state == H1_MSG_TUNNEL)) {
+ TRACE_STATE("RX path congested, waiting for more space", H1_EV_RX_DATA|H1_EV_RX_BODY|H1_EV_H1S_BLK, h1s->h1c->conn, h1s);
+ h1s->flags |= H1S_F_RX_CONGESTED;
+ }
+
+ TRACE_LEAVE(H1_EV_RX_DATA|H1_EV_RX_BODY, h1s->h1c->conn, h1s, 0, (size_t[]){ret});
+ return ret;
+}
+
+/*
+ * Parse HTTP/1 trailers. It returns the number of bytes parsed if > 0, or 0 if
+ * it couldn't proceed. Parsing errors are reported by setting H1S_F_*_ERROR
+ * flag and filling h1s->err_pos and h1s->err_state fields. This functions is
+ * responsible to update the parser state <h1m>. If more room is requested,
+ * H1S_F_RX_CONGESTED flag is set.
+ */
+static size_t h1_handle_trailers(struct h1s *h1s, struct h1m *h1m, struct htx *htx,
+ struct buffer *buf, size_t *ofs, size_t max)
+{
+ int ret;
+
+ TRACE_ENTER(H1_EV_RX_DATA|H1_EV_RX_TLRS, h1s->h1c->conn, h1s, 0, (size_t[]){max});
+ ret = h1_parse_msg_tlrs(h1m, htx, buf, *ofs, max);
+ if (ret <= 0) {
+ TRACE_DEVEL("leaving on missing data or error", H1_EV_RX_DATA|H1_EV_RX_BODY, h1s->h1c->conn, h1s);
+ if (ret == -1) {
+ h1s->flags |= H1S_F_PARSING_ERROR;
+ TRACE_ERROR("parsing error, reject H1 message", H1_EV_RX_DATA|H1_EV_RX_TLRS|H1_EV_H1S_ERR, h1s->h1c->conn, h1s);
+ h1_capture_bad_message(h1s->h1c, h1s, h1m, buf);
+ }
+ else if (ret == -2) {
+ TRACE_STATE("RX path congested, waiting for more space", H1_EV_RX_DATA|H1_EV_RX_TLRS|H1_EV_H1S_BLK, h1s->h1c->conn, h1s);
+ h1s->flags |= H1S_F_RX_CONGESTED;
+ }
+ ret = 0;
+ goto end;
+ }
+
+ *ofs += ret;
+
+ end:
+ TRACE_LEAVE(H1_EV_RX_DATA|H1_EV_RX_TLRS, h1s->h1c->conn, h1s, 0, (size_t[]){ret});
+ return ret;
+}
+
+/*
+ * Process incoming data. It parses data and transfer them from h1c->ibuf into
+ * <buf>. It returns the number of bytes parsed and transferred if > 0, or 0 if
+ * it couldn't proceed.
+ *
+ * WARNING: H1S_F_RX_CONGESTED flag must be removed before processing input data.
+ */
+static size_t h1_process_demux(struct h1c *h1c, struct buffer *buf, size_t count)
+{
+ struct h1s *h1s = h1c->h1s;
+ struct h1m *h1m;
+ struct htx *htx;
+ size_t data;
+ size_t ret = 0;
+ size_t total = 0;
+
+ htx = htx_from_buf(buf);
+ TRACE_ENTER(H1_EV_RX_DATA, h1c->conn, h1s, htx, (size_t[]){count});
+
+ h1m = (!(h1c->flags & H1C_F_IS_BACK) ? &h1s->req : &h1s->res);
+ data = htx->data;
+
+ if (h1s->flags & (H1S_F_INTERNAL_ERROR|H1S_F_PARSING_ERROR|H1S_F_NOT_IMPL_ERROR))
+ goto end;
+
+ if (h1s->flags & H1S_F_RX_BLK)
+ goto out;
+
+ /* Always remove congestion flags and try to process more input data */
+ h1s->flags &= ~H1S_F_RX_CONGESTED;
+
+ do {
+ size_t used = htx_used_space(htx);
+
+ if (h1m->state <= H1_MSG_LAST_LF) {
+ TRACE_PROTO("parsing message headers", H1_EV_RX_DATA|H1_EV_RX_HDRS, h1c->conn, h1s);
+ ret = h1_handle_headers(h1s, h1m, htx, &h1c->ibuf, &total, count);
+ if (!ret)
+ break;
+
+ TRACE_USER((!(h1m->flags & H1_MF_RESP) ? "rcvd H1 request headers" : "rcvd H1 response headers"),
+ H1_EV_RX_DATA|H1_EV_RX_HDRS, h1c->conn, h1s, htx, (size_t[]){ret});
+
+ if ((h1m->flags & H1_MF_RESP) &&
+ h1s->status < 200 && (h1s->status == 100 || h1s->status >= 102)) {
+ h1m_init_res(&h1s->res);
+ h1m->flags |= (H1_MF_NO_PHDR|H1_MF_CLEAN_CONN_HDR);
+ TRACE_STATE("1xx response rcvd", H1_EV_RX_DATA|H1_EV_RX_HDRS, h1c->conn, h1s);
+ }
+ }
+ else if (h1m->state < H1_MSG_TRAILERS) {
+ TRACE_PROTO("parsing message payload", H1_EV_RX_DATA|H1_EV_RX_BODY, h1c->conn, h1s);
+ ret = h1_handle_data(h1s, h1m, &htx, &h1c->ibuf, &total, count, buf);
+ if (h1m->state < H1_MSG_TRAILERS)
+ break;
+
+ TRACE_PROTO((!(h1m->flags & H1_MF_RESP) ? "rcvd H1 request payload data" : "rcvd H1 response payload data"),
+ H1_EV_RX_DATA|H1_EV_RX_BODY, h1c->conn, h1s, htx, (size_t[]){ret});
+ }
+ else if (h1m->state == H1_MSG_TRAILERS) {
+ TRACE_PROTO("parsing message trailers", H1_EV_RX_DATA|H1_EV_RX_TLRS, h1c->conn, h1s);
+ ret = h1_handle_trailers(h1s, h1m, htx, &h1c->ibuf, &total, count);
+ if (h1m->state != H1_MSG_DONE)
+ break;
+
+ TRACE_PROTO((!(h1m->flags & H1_MF_RESP) ? "rcvd H1 request trailers" : "rcvd H1 response trailers"),
+ H1_EV_RX_DATA|H1_EV_RX_TLRS, h1c->conn, h1s, htx, (size_t[]){ret});
+ }
+ else if (h1m->state == H1_MSG_DONE) {
+ TRACE_USER((!(h1m->flags & H1_MF_RESP) ? "H1 request fully rcvd" : "H1 response fully rcvd"),
+ H1_EV_RX_DATA|H1_EV_RX_EOI, h1c->conn, h1s, htx);
+
+ if (!(h1c->flags & H1C_F_IS_BACK)) {
+ /* The request was fully received. It means the H1S now
+ * expect data from the opposite side
+ */
+ se_expect_data(h1s->sd);
+ }
+
+ if ((h1m->flags & H1_MF_RESP) &&
+ ((h1s->meth == HTTP_METH_CONNECT && h1s->status >= 200 && h1s->status < 300) || h1s->status == 101))
+ h1_set_tunnel_mode(h1s);
+ else {
+ if (h1s->req.state < H1_MSG_DONE || h1s->res.state < H1_MSG_DONE) {
+ /* Unfinished transaction: block this input side waiting the end of the output side */
+ h1s->flags |= H1S_F_RX_BLK;
+ TRACE_STATE("Disable input processing", H1_EV_RX_DATA|H1_EV_H1S_BLK, h1c->conn, h1s);
+ }
+ if (h1s->flags & H1S_F_TX_BLK) {
+ h1s->flags &= ~H1S_F_TX_BLK;
+ h1_wake_stream_for_send(h1s);
+ TRACE_STATE("Re-enable output processing", H1_EV_TX_DATA|H1_EV_H1S_BLK|H1_EV_STRM_WAKE, h1c->conn, h1s);
+ }
+ break;
+ }
+ }
+ else if (h1m->state == H1_MSG_TUNNEL) {
+ TRACE_PROTO("parsing tunneled data", H1_EV_RX_DATA, h1c->conn, h1s);
+ ret = h1_handle_data(h1s, h1m, &htx, &h1c->ibuf, &total, count, buf);
+ if (!ret)
+ break;
+
+ TRACE_PROTO((!(h1m->flags & H1_MF_RESP) ? "rcvd H1 request tunneled data" : "rcvd H1 response tunneled data"),
+ H1_EV_RX_DATA|H1_EV_RX_EOI, h1c->conn, h1s, htx, (size_t[]){ret});
+ }
+ else {
+ h1s->flags |= H1S_F_PARSING_ERROR;
+ break;
+ }
+
+ count -= htx_used_space(htx) - used;
+ } while (!(h1s->flags & (H1S_F_PARSING_ERROR|H1S_F_NOT_IMPL_ERROR|H1S_F_RX_BLK|H1S_F_RX_CONGESTED)));
+
+
+ if (h1s->flags & (H1S_F_PARSING_ERROR|H1S_F_NOT_IMPL_ERROR)) {
+ TRACE_ERROR("parsing or not-implemented error", H1_EV_RX_DATA|H1_EV_H1S_ERR, h1c->conn, h1s);
+ goto err;
+ }
+
+ b_del(&h1c->ibuf, total);
+
+ TRACE_DEVEL("incoming data parsed", H1_EV_RX_DATA, h1c->conn, h1s, htx, (size_t[]){ret});
+
+ ret = htx->data - data;
+ if ((h1c->flags & H1C_F_IN_FULL) && buf_room_for_htx_data(&h1c->ibuf)) {
+ h1c->flags &= ~H1C_F_IN_FULL;
+ TRACE_STATE("h1c ibuf not full anymore", H1_EV_RX_DATA|H1_EV_H1C_BLK|H1_EV_H1C_WAKE, h1c->conn, h1s);
+ h1c->conn->xprt->subscribe(h1c->conn, h1c->conn->xprt_ctx, SUB_RETRY_RECV, &h1c->wait_event);
+ }
+
+ if (!b_data(&h1c->ibuf))
+ h1_release_buf(h1c, &h1c->ibuf);
+
+ if (h1m->state <= H1_MSG_LAST_LF)
+ goto out;
+
+ if (h1c->state < H1_CS_RUNNING) {
+ /* The H1 connection is not ready. Most of time, there is no SC
+ * attached, except for TCP>H1 upgrade, from a TCP frontend. In both
+ * cases, it is only possible on the client side.
+ */
+ BUG_ON(h1c->flags & H1C_F_IS_BACK);
+
+ if (h1c->state == H1_CS_EMBRYONIC) {
+ TRACE_DEVEL("request headers fully parsed, create and attach the SC", H1_EV_RX_DATA, h1c->conn, h1s);
+ BUG_ON(h1s_sc(h1s));
+ if (!h1s_new_sc(h1s, buf)) {
+ h1s->flags |= H1S_F_INTERNAL_ERROR;
+ goto err;
+ }
+ }
+ else {
+ TRACE_DEVEL("request headers fully parsed, upgrade the inherited SC", H1_EV_RX_DATA, h1c->conn, h1s);
+ BUG_ON(h1s_sc(h1s) == NULL);
+ if (!h1s_upgrade_sc(h1s, buf)) {
+ h1s->flags |= H1S_F_INTERNAL_ERROR;
+ TRACE_ERROR("H1S upgrade failure", H1_EV_RX_DATA|H1_EV_H1S_ERR, h1c->conn, h1s);
+ goto err;
+ }
+ }
+ }
+
+ /* Here h1s_sc(h1s) is always defined */
+ if (!(h1c->flags & H1C_F_CANT_FASTFWD) &&
+ (!(h1m->flags & H1_MF_RESP) || !(h1s->flags & H1S_F_BODYLESS_RESP)) &&
+ (h1m->state == H1_MSG_DATA || h1m->state == H1_MSG_TUNNEL) &&
+ !(global.tune.no_zero_copy_fwd & NO_ZERO_COPY_FWD_H1_RCV)) {
+ TRACE_STATE("notify the mux can use fast-forward", H1_EV_RX_DATA|H1_EV_RX_BODY, h1c->conn, h1s);
+ se_fl_set(h1s->sd, SE_FL_MAY_FASTFWD_PROD);
+ }
+ else {
+ TRACE_STATE("notify the mux can't use fast-forward anymore", H1_EV_RX_DATA|H1_EV_RX_BODY, h1c->conn, h1s);
+ se_fl_clr(h1s->sd, SE_FL_MAY_FASTFWD_PROD);
+ h1c->flags &= ~H1C_F_WANT_FASTFWD;
+ }
+
+ /* Set EOI on stream connector in DONE state iff:
+ * - it is a response
+ * - it is a request but no a protocol upgrade nor a CONNECT
+ *
+ * If not set, Wait the response to do so or not depending on the status
+ * code.
+ */
+ if (((h1m->state == H1_MSG_DONE) && (h1m->flags & H1_MF_RESP)) ||
+ ((h1m->state == H1_MSG_DONE) && (h1s->meth != HTTP_METH_CONNECT) && !(h1m->flags & H1_MF_CONN_UPG)))
+ se_fl_set(h1s->sd, SE_FL_EOI);
+
+ out:
+ /* When Input data are pending for this message, notify upper layer that
+ * the mux need more space in the HTX buffer to continue if :
+ *
+ * - The parser is blocked in MSG_DATA or MSG_TUNNEL state
+ * - Headers or trailers are pending to be copied.
+ */
+ if (h1s->flags & (H1S_F_RX_CONGESTED)) {
+ se_fl_set(h1s->sd, SE_FL_RCV_MORE | SE_FL_WANT_ROOM);
+ TRACE_STATE("waiting for more room", H1_EV_RX_DATA|H1_EV_H1S_BLK, h1c->conn, h1s);
+ }
+ else {
+ se_fl_clr(h1s->sd, SE_FL_RCV_MORE | SE_FL_WANT_ROOM);
+ if (h1c->flags & H1C_F_EOS) {
+ se_fl_set(h1s->sd, SE_FL_EOS);
+ TRACE_STATE("report EOS to SE", H1_EV_RX_DATA, h1c->conn, h1s);
+ if (h1m->state >= H1_MSG_DONE || (h1m->state > H1_MSG_LAST_LF && !(h1m->flags & H1_MF_XFER_LEN))) {
+ /* DONE or TUNNEL or SHUTR without XFER_LEN, set
+ * EOI on the stream connector */
+ se_fl_set(h1s->sd, SE_FL_EOI);
+ TRACE_STATE("report EOI to SE", H1_EV_RX_DATA, h1c->conn, h1s);
+ }
+ else if (h1m->state < H1_MSG_DONE) {
+ if (h1m->state <= H1_MSG_LAST_LF && b_data(&h1c->ibuf))
+ htx->flags |= HTX_FL_PARSING_ERROR;
+ se_fl_set(h1s->sd, SE_FL_ERROR);
+ TRACE_ERROR("message aborted, set error on SC", H1_EV_RX_DATA|H1_EV_H1S_ERR, h1c->conn, h1s);
+ }
+
+ if (h1s->flags & H1S_F_TX_BLK) {
+ h1s->flags &= ~H1S_F_TX_BLK;
+ h1_wake_stream_for_send(h1s);
+ TRACE_STATE("Re-enable output processing", H1_EV_TX_DATA|H1_EV_H1S_BLK|H1_EV_STRM_WAKE, h1c->conn, h1s);
+ }
+ }
+ if (h1c->flags & H1C_F_ERROR) {
+ /* Report a terminal error to the SE if a previous read error was detected */
+ se_fl_set(h1s->sd, SE_FL_ERROR);
+ TRACE_STATE("report ERROR to SE", H1_EV_RX_DATA|H1_EV_H1S_ERR, h1c->conn, h1s);
+ }
+ }
+
+ end:
+ htx_to_buf(htx, buf);
+ TRACE_LEAVE(H1_EV_RX_DATA, h1c->conn, h1s, htx, (size_t[]){ret});
+ return ret;
+
+ err:
+ htx_to_buf(htx, buf);
+ se_fl_set(h1s->sd, SE_FL_EOI);
+ if (h1c->state < H1_CS_RUNNING) {
+ h1c->flags |= H1C_F_EOS;
+ se_fl_set(h1s->sd, SE_FL_EOS);
+ }
+ TRACE_DEVEL("leaving on error", H1_EV_RX_DATA|H1_EV_STRM_ERR, h1c->conn, h1s);
+ return 0;
+}
+
+/* Try to send the request line from the HTX message <htx> for the stream
+ * <h1s>. It returns the number of bytes consumed or zero if nothing was done or
+ * if an error occurred. No more than <count> bytes can be sent.
+ */
+static size_t h1_make_reqline(struct h1s *h1s, struct h1m *h1m, struct htx *htx, size_t count)
+{
+ struct h1c *h1c = h1s->h1c;
+ struct htx_blk *blk;
+ struct htx_sl *sl;
+ enum htx_blk_type type;
+ uint32_t sz;
+ size_t ret = 0;
+
+ TRACE_ENTER(H1_EV_TX_DATA|H1_EV_TX_HDRS, h1c->conn, h1s, htx, (size_t[]){count});
+
+ while (1) {
+ blk = htx_get_head_blk(htx);
+ if (!blk)
+ goto end;
+ type = htx_get_blk_type(blk);
+ sz = htx_get_blksz(blk);
+ if (type == HTX_BLK_UNUSED)
+ continue;
+ if (type != HTX_BLK_REQ_SL || sz > count)
+ goto error;
+ break;
+ }
+
+ TRACE_USER("sending request headers", H1_EV_TX_DATA|H1_EV_TX_HDRS, h1c->conn, h1s, htx);
+
+ if (b_space_wraps(&h1c->obuf))
+ b_slow_realign(&h1c->obuf, trash.area, b_data(&h1c->obuf));
+
+ sl = htx_get_blk_ptr(htx, blk);
+ if (!h1_format_htx_reqline(sl, &h1c->obuf))
+ goto full;
+
+ h1s->meth = sl->info.req.meth;
+ h1_parse_req_vsn(h1m, sl);
+
+ h1m->flags |= H1_MF_XFER_LEN;
+ if (sl->flags & HTX_SL_F_CHNK)
+ h1m->flags |= H1_MF_CHNK;
+ else if (sl->flags & HTX_SL_F_CLEN)
+ h1m->flags |= H1_MF_CLEN;
+ if (sl->flags & HTX_SL_F_XFER_ENC)
+ h1m->flags |= H1_MF_XFER_ENC;
+
+ if (sl->flags & HTX_SL_F_BODYLESS && !(h1m->flags & H1_MF_CLEN)) {
+ h1m->flags = (h1m->flags & ~H1_MF_CHNK) | H1_MF_CLEN;
+ h1s->flags |= H1S_F_HAVE_CLEN;
+ }
+ if ((sl->flags & HTX_SL_F_BODYLESS_RESP) || h1s->meth == HTTP_METH_HEAD)
+ h1s->flags |= H1S_F_BODYLESS_RESP;
+
+ if (h1s->flags & H1S_F_RX_BLK) {
+ h1s->flags &= ~H1S_F_RX_BLK;
+ h1_wake_stream_for_recv(h1s);
+ TRACE_STATE("Re-enable input processing", H1_EV_TX_DATA|H1_EV_H1S_BLK|H1_EV_STRM_WAKE, h1c->conn, h1s);
+ }
+
+ h1m->state = H1_MSG_HDR_NAME;
+ ret += sz;
+ htx_remove_blk(htx, blk);
+
+ end:
+ TRACE_LEAVE(H1_EV_TX_DATA|H1_EV_TX_HDRS, h1c->conn, h1s, htx, (size_t[]){ret});
+ return ret;
+
+ full:
+ TRACE_STATE("h1c obuf full", H1_EV_TX_DATA|H1_EV_H1S_BLK, h1c->conn, h1s);
+ h1c->flags |= H1C_F_OUT_FULL;
+ goto end;
+
+ error:
+ htx->flags |= HTX_FL_PROCESSING_ERROR;
+ h1s->flags |= H1S_F_PROCESSING_ERROR;
+ se_fl_set(h1s->sd, SE_FL_ERROR);
+ TRACE_ERROR("processing error on request start-line",
+ H1_EV_TX_DATA|H1_EV_STRM_ERR|H1_EV_H1S_ERR, h1c->conn, h1s);
+ goto end;
+}
+
+/* Try to send the status line from the HTX message <htx> for the stream
+ * <h1s>. It returns the number of bytes consumed or zero if nothing was done or
+ * if an error occurred. No more than <count> bytes can be sent.
+ */
+static size_t h1_make_stline(struct h1s *h1s, struct h1m *h1m, struct htx *htx, size_t count)
+{
+ struct h1c *h1c = h1s->h1c;
+ struct htx_blk *blk;
+ struct htx_sl *sl;
+ enum htx_blk_type type;
+ uint32_t sz;
+ size_t ret = 0;
+
+ TRACE_ENTER(H1_EV_TX_DATA|H1_EV_TX_HDRS, h1c->conn, h1s, htx, (size_t[]){count});
+
+ while (1) {
+ blk = htx_get_head_blk(htx);
+ if (!blk)
+ goto end;
+
+ type = htx_get_blk_type(blk);
+ sz = htx_get_blksz(blk);
+
+ if (type == HTX_BLK_UNUSED)
+ continue;
+ if (type != HTX_BLK_RES_SL || sz > count)
+ goto error;
+ break;
+ }
+
+ TRACE_USER("sending response headers", H1_EV_TX_DATA|H1_EV_TX_HDRS, h1c->conn, h1s, htx);
+
+ if (b_space_wraps(&h1c->obuf))
+ b_slow_realign(&h1c->obuf, trash.area, b_data(&h1c->obuf));
+
+ sl = htx_get_blk_ptr(htx, blk);
+ if (!h1_format_htx_stline(sl, &h1c->obuf))
+ goto full;
+
+ h1s->status = sl->info.res.status;
+ h1_parse_res_vsn(h1m, sl);
+
+ if (sl->flags & HTX_SL_F_XFER_LEN) {
+ h1m->flags |= H1_MF_XFER_LEN;
+ if (sl->flags & HTX_SL_F_CHNK)
+ h1m->flags |= H1_MF_CHNK;
+ else if (sl->flags & HTX_SL_F_CLEN)
+ h1m->flags |= H1_MF_CLEN;
+ if (sl->flags & HTX_SL_F_XFER_ENC)
+ h1m->flags |= H1_MF_XFER_ENC;
+ }
+ if (h1s->status < 200)
+ h1s->flags |= H1S_F_HAVE_O_CONN;
+ else if ((sl->flags & HTX_SL_F_BODYLESS_RESP) || h1s->status == 204 || h1s->status == 304)
+ h1s->flags |= H1S_F_BODYLESS_RESP;
+
+ h1m->state = H1_MSG_HDR_NAME;
+ ret += sz;
+ htx_remove_blk(htx, blk);
+
+ end:
+ TRACE_LEAVE(H1_EV_TX_DATA|H1_EV_TX_HDRS, h1c->conn, h1s, htx, (size_t[]){ret});
+ return ret;
+
+ full:
+ TRACE_STATE("h1c obuf full", H1_EV_TX_DATA|H1_EV_H1S_BLK, h1c->conn, h1s);
+ h1c->flags |= H1C_F_OUT_FULL;
+ goto end;
+
+ error:
+ htx->flags |= HTX_FL_PROCESSING_ERROR;
+ h1s->flags |= H1S_F_PROCESSING_ERROR;
+ se_fl_set(h1s->sd, SE_FL_ERROR);
+ TRACE_ERROR("processing error on response start-line",
+ H1_EV_TX_DATA|H1_EV_STRM_ERR|H1_EV_H1S_ERR, h1c->conn, h1s);
+ goto end;
+}
+
+/* Try to send the message headers from the HTX message <htx> for the stream
+ * <h1s>. It returns the number of bytes consumed or zero if nothing was done or
+ * if an error occurred. No more than <count> bytes can be sent.
+ */
+static size_t h1_make_headers(struct h1s *h1s, struct h1m *h1m, struct htx *htx, size_t count)
+{
+ struct h1c *h1c = h1s->h1c;
+ struct htx_blk *blk;
+ struct buffer outbuf;
+ enum htx_blk_type type;
+ struct ist n, v;
+ uint32_t sz;
+ size_t ret = 0;
+
+ TRACE_ENTER(H1_EV_TX_DATA|H1_EV_TX_HDRS, h1c->conn, h1s, htx, (size_t[]){count});
+
+ if (b_space_wraps(&h1c->obuf))
+ b_slow_realign(&h1c->obuf, trash.area, b_data(&h1c->obuf));
+ outbuf = b_make(b_tail(&h1c->obuf), b_contig_space(&h1c->obuf), 0, 0);
+
+ blk = htx_get_head_blk(htx);
+ while (blk) {
+ type = htx_get_blk_type(blk);
+ sz = htx_get_blksz(blk);
+
+ if (type == HTX_BLK_HDR) {
+ if (sz > count)
+ goto error;
+
+ n = htx_get_blk_name(htx, blk);
+ v = htx_get_blk_value(htx, blk);
+
+ /* Skip all pseudo-headers */
+ if (*(n.ptr) == ':')
+ goto nextblk;
+
+ if (isteq(n, ist("transfer-encoding"))) {
+ if ((h1m->flags & H1_MF_RESP) && (h1s->status < 200 || h1s->status == 204))
+ goto nextblk;
+ if (!(h1m->flags & H1_MF_CHNK))
+ goto nextblk;
+ if (h1_parse_xfer_enc_header(h1m, v) < 0)
+ goto error;
+ h1s->flags |= H1S_F_HAVE_CHNK;
+ }
+ else if (isteq(n, ist("content-length"))) {
+ if ((h1m->flags & H1_MF_RESP) && (h1s->status < 200 || h1s->status == 204))
+ goto nextblk;
+ if (!(h1m->flags & H1_MF_CLEN))
+ goto nextblk;
+ if (!(h1s->flags & H1S_F_HAVE_CLEN))
+ h1m->flags &= ~H1_MF_CLEN;
+ /* Only skip C-L header with invalid value. */
+ if (h1_parse_cont_len_header(h1m, &v) < 0)
+ goto error;
+ if (h1s->flags & H1S_F_HAVE_CLEN)
+ goto nextblk;
+ h1s->flags |= H1S_F_HAVE_CLEN;
+ }
+ else if (isteq(n, ist("connection"))) {
+ h1_parse_connection_header(h1m, &v);
+ if (!v.len)
+ goto nextblk;
+ }
+ else if (isteq(n, ist("upgrade"))) {
+ h1_parse_upgrade_header(h1m, v);
+ }
+ else if ((isteq(n, ist("sec-websocket-accept")) && h1m->flags & H1_MF_RESP) ||
+ (isteq(n, ist("sec-websocket-key")) && !(h1m->flags & H1_MF_RESP))) {
+ h1s->flags |= H1S_F_HAVE_WS_KEY;
+ }
+ else if (isteq(n, ist("te"))) {
+ /* "te" may only be sent with "trailers" if this value
+ * is present, otherwise it must be deleted.
+ */
+ v = istist(v, ist("trailers"));
+ if (!isttest(v) || (v.len > 8 && v.ptr[8] != ','))
+ goto nextblk;
+ v = ist("trailers");
+ }
+
+ /* Skip header if same name is used to add the server name */
+ if (!(h1m->flags & H1_MF_RESP) && isttest(h1c->px->server_id_hdr_name) &&
+ isteqi(n, h1c->px->server_id_hdr_name))
+ goto nextblk;
+
+ /* Try to adjust the case of the header name */
+ if (h1c->px->options2 & (PR_O2_H1_ADJ_BUGCLI|PR_O2_H1_ADJ_BUGSRV))
+ h1_adjust_case_outgoing_hdr(h1s, h1m, &n);
+ if (!h1_format_htx_hdr(n, v, &outbuf))
+ goto full;
+ }
+ else if (type == HTX_BLK_EOH) {
+ h1m->state = H1_MSG_LAST_LF;
+ break; /* Do not consume this block */
+ }
+ else if (type == HTX_BLK_UNUSED)
+ goto nextblk;
+ else
+ goto error;
+
+ nextblk:
+ ret += sz;
+ count -= sz;
+ blk = htx_remove_blk(htx, blk);
+ }
+
+ copy:
+ b_add(&h1c->obuf, outbuf.data);
+
+ end:
+ TRACE_LEAVE(H1_EV_TX_DATA|H1_EV_TX_HDRS, h1c->conn, h1s, htx, (size_t[]){ret});
+ return ret;
+
+ full:
+ TRACE_STATE("h1c obuf full", H1_EV_TX_DATA|H1_EV_H1S_BLK, h1c->conn, h1s);
+ h1c->flags |= H1C_F_OUT_FULL;
+ goto copy;
+
+ error:
+ ret = 0;
+ htx->flags |= HTX_FL_PROCESSING_ERROR;
+ h1s->flags |= H1S_F_PROCESSING_ERROR;
+ se_fl_set(h1s->sd, SE_FL_ERROR);
+ TRACE_ERROR("processing error on message headers",
+ H1_EV_TX_DATA|H1_EV_STRM_ERR|H1_EV_H1S_ERR, h1c->conn, h1s);
+ goto end;
+}
+
+/* Handle the EOH and perform last processing before sending the data. It
+ * returns the number of bytes consumed or zero if nothing was done or if an
+ * error occurred. No more than <count> bytes can be sent.
+ */
+static size_t h1_make_eoh(struct h1s *h1s, struct h1m *h1m, struct htx *htx, size_t count)
+{
+ struct h1c *h1c = h1s->h1c;
+ struct htx_blk *blk;
+ struct buffer outbuf;
+ enum htx_blk_type type;
+ struct ist n, v;
+ uint32_t sz;
+ size_t ret = 0;
+
+ TRACE_ENTER(H1_EV_TX_DATA|H1_EV_TX_HDRS, h1c->conn, h1s, htx, (size_t[]){count});
+
+ while (1) {
+ blk = htx_get_head_blk(htx);
+ if (!blk)
+ goto end;
+
+ type = htx_get_blk_type(blk);
+ sz = htx_get_blksz(blk);
+
+ if (type == HTX_BLK_UNUSED)
+ continue;
+ if (type != HTX_BLK_EOH || sz > count)
+ goto error;
+ break;
+ }
+
+ if (b_space_wraps(&h1c->obuf))
+ b_slow_realign(&h1c->obuf, trash.area, b_data(&h1c->obuf));
+ outbuf = b_make(b_tail(&h1c->obuf), b_contig_space(&h1c->obuf), 0, 0);
+
+ /* Deal with "Connection" header */
+ if (!(h1s->flags & H1S_F_HAVE_O_CONN)) {
+ if ((htx->flags & HTX_FL_PROXY_RESP) && h1s->req.state != H1_MSG_DONE) {
+ /* If the reply comes from haproxy while the request is
+ * not finished, we force the connection close. */
+ h1s->flags = (h1s->flags & ~H1S_F_WANT_MSK) | H1S_F_WANT_CLO;
+ TRACE_STATE("force close mode (resp)", H1_EV_TX_DATA|H1_EV_TX_HDRS, h1s->h1c->conn, h1s);
+ }
+ else if ((h1m->flags & (H1_MF_XFER_ENC|H1_MF_CLEN)) == (H1_MF_XFER_ENC|H1_MF_CLEN)) {
+ /* T-E + C-L: force close */
+ h1s->flags = (h1s->flags & ~H1S_F_WANT_MSK) | H1S_F_WANT_CLO;
+ h1m->flags &= ~H1_MF_CLEN;
+ TRACE_STATE("force close mode (T-E + C-L)", H1_EV_TX_DATA|H1_EV_TX_HDRS, h1s->h1c->conn, h1s);
+ }
+ else if ((h1m->flags & (H1_MF_VER_11|H1_MF_XFER_ENC)) == H1_MF_XFER_ENC) {
+ /* T-E + HTTP/1.0: force close */
+ h1s->flags = (h1s->flags & ~H1S_F_WANT_MSK) | H1S_F_WANT_CLO;
+ TRACE_STATE("force close mode (T-E + HTTP/1.0)", H1_EV_TX_DATA|H1_EV_TX_HDRS, h1s->h1c->conn, h1s);
+ }
+
+ /* the conn_mode must be processed. So do it */
+ n = ist("connection");
+ v = ist("");
+ h1_process_output_conn_mode(h1s, h1m, &v);
+ if (v.len) {
+ /* Try to adjust the case of the header name */
+ if (h1c->px->options2 & (PR_O2_H1_ADJ_BUGCLI|PR_O2_H1_ADJ_BUGSRV))
+ h1_adjust_case_outgoing_hdr(h1s, h1m, &n);
+ if (!h1_format_htx_hdr(n, v, &outbuf))
+ goto full;
+ }
+ h1s->flags |= H1S_F_HAVE_O_CONN;
+ }
+
+ /* Deal with "Transfer-Encoding" header */
+ if ((h1s->meth != HTTP_METH_CONNECT &&
+ (h1m->flags & (H1_MF_VER_11|H1_MF_RESP|H1_MF_CLEN|H1_MF_CHNK|H1_MF_XFER_LEN)) ==
+ (H1_MF_VER_11|H1_MF_XFER_LEN)) ||
+ (h1s->status >= 200 && !(h1s->flags & H1S_F_BODYLESS_RESP) &&
+ !(h1s->meth == HTTP_METH_CONNECT && h1s->status >= 200 && h1s->status < 300) &&
+ (h1m->flags & (H1_MF_VER_11|H1_MF_RESP|H1_MF_CLEN|H1_MF_CHNK|H1_MF_XFER_LEN)) ==
+ (H1_MF_VER_11|H1_MF_RESP|H1_MF_XFER_LEN)))
+ h1m->flags |= H1_MF_CHNK;
+
+ if ((h1m->flags & H1_MF_CHNK) && !(h1s->flags & H1S_F_HAVE_CHNK)) {
+ /* chunking needed but header not seen */
+ n = ist("transfer-encoding");
+ v = ist("chunked");
+ if (h1c->px->options2 & (PR_O2_H1_ADJ_BUGCLI|PR_O2_H1_ADJ_BUGSRV))
+ h1_adjust_case_outgoing_hdr(h1s, h1m, &n);
+ if (!h1_format_htx_hdr(n, v, &outbuf))
+ goto full;
+ TRACE_STATE("add \"Transfer-Encoding: chunked\"", H1_EV_TX_DATA|H1_EV_TX_HDRS, h1c->conn, h1s);
+ h1s->flags |= H1S_F_HAVE_CHNK;
+ }
+
+ /* Deal with "Content-Length header */
+ if ((h1m->flags & H1_MF_CLEN) && !(h1s->flags & H1S_F_HAVE_CLEN)) {
+ char *end;
+
+ h1m->curr_len = h1m->body_len = htx->data + htx->extra - sz;
+ end = DISGUISE(ulltoa(h1m->body_len, trash.area, b_size(&trash)));
+
+ n = ist("content-length");
+ v = ist2(trash.area, end-trash.area);
+ if (h1c->px->options2 & (PR_O2_H1_ADJ_BUGCLI|PR_O2_H1_ADJ_BUGSRV))
+ h1_adjust_case_outgoing_hdr(h1s, h1m, &n);
+ if (!h1_format_htx_hdr(n, v, &outbuf))
+ goto full;
+ TRACE_STATE("add \"Content-Length: <LEN>\"", H1_EV_TX_DATA|H1_EV_TX_HDRS, h1c->conn, h1s);
+ h1s->flags |= H1S_F_HAVE_CLEN;
+ }
+
+ /* Add the server name to a header (if requested) */
+ if (!(h1s->flags & H1S_F_HAVE_SRV_NAME) &&
+ !(h1m->flags & H1_MF_RESP) && isttest(h1c->px->server_id_hdr_name)) {
+ struct server *srv = objt_server(h1c->conn->target);
+
+ if (srv) {
+ n = h1c->px->server_id_hdr_name;
+ v = ist(srv->id);
+
+ /* Try to adjust the case of the header name */
+ if (h1c->px->options2 & (PR_O2_H1_ADJ_BUGCLI|PR_O2_H1_ADJ_BUGSRV))
+ h1_adjust_case_outgoing_hdr(h1s, h1m, &n);
+ if (!h1_format_htx_hdr(n, v, &outbuf))
+ goto full;
+ }
+ TRACE_STATE("add server name header", H1_EV_TX_DATA|H1_EV_TX_HDRS, h1c->conn, h1s);
+ h1s->flags |= H1S_F_HAVE_SRV_NAME;
+ }
+
+ /* Add websocket handshake key if needed */
+ if (!(h1s->flags & H1S_F_HAVE_WS_KEY) &&
+ (h1m->flags & (H1_MF_CONN_UPG|H1_MF_UPG_WEBSOCKET)) == (H1_MF_CONN_UPG|H1_MF_UPG_WEBSOCKET)) {
+ if (!(h1m->flags & H1_MF_RESP)) {
+ /* generate a random websocket key
+ * stored in the session to
+ * verify it on the response side
+ */
+ h1_generate_random_ws_input_key(h1s->ws_key);
+
+ if (!h1_format_htx_hdr(ist("Sec-Websocket-Key"),
+ ist(h1s->ws_key),
+ &outbuf)) {
+ goto full;
+ }
+ }
+ else {
+ /* add the response header key */
+ char key[29];
+
+ h1_calculate_ws_output_key(h1s->ws_key, key);
+ if (!h1_format_htx_hdr(ist("Sec-Websocket-Accept"),
+ ist(key),
+ &outbuf)) {
+ goto full;
+ }
+ }
+ h1s->flags |= H1S_F_HAVE_WS_KEY;
+ }
+
+ /*
+ * All headers was sent, now process EOH
+ */
+ if (!(h1m->flags & H1_MF_RESP) && h1s->meth == HTTP_METH_CONNECT) {
+ if (!chunk_memcat(&outbuf, "\r\n", 2))
+ goto full;
+ /* a CONNECT request was sent. Output processing is now blocked
+ * waiting the server response.
+ */
+ h1m->state = H1_MSG_DONE;
+ h1s->flags |= H1S_F_TX_BLK;
+ TRACE_STATE("CONNECT request waiting for tunnel mode", H1_EV_TX_DATA|H1_EV_H1S_BLK, h1c->conn, h1s);
+ }
+ else if ((h1m->flags & H1_MF_RESP) &&
+ ((h1s->meth == HTTP_METH_CONNECT && h1s->status >= 200 && h1s->status < 300) || h1s->status == 101)) {
+ if (!chunk_memcat(&outbuf, "\r\n", 2))
+ goto full;
+ /* a successful reply to a CONNECT or a protocol switching is sent
+ * to the client. Switch the response to tunnel mode.
+ */
+ h1_set_tunnel_mode(h1s);
+ }
+ else if ((h1m->flags & H1_MF_RESP) &&
+ h1s->status < 200 && (h1s->status == 100 || h1s->status >= 102)) {
+ if (!chunk_memcat(&outbuf, "\r\n", 2))
+ goto full;
+ /* 1xx response was sent, reset response processing */
+ h1m_init_res(h1m);
+ h1m->flags |= (H1_MF_NO_PHDR|H1_MF_CLEAN_CONN_HDR);
+ h1s->flags &= ~H1S_F_HAVE_O_CONN;
+ TRACE_STATE("1xx response xferred", H1_EV_TX_DATA|H1_EV_TX_HDRS, h1c->conn, h1s);
+ }
+ else if (htx_is_unique_blk(htx, blk) &&
+ ((htx->flags & HTX_FL_EOM) || ((h1m->flags & H1_MF_CLEN) && !h1m->curr_len))) {
+ /* EOM flag is set and it is the last block or there is no
+ * payload. If cannot be removed now. We must emit the end of
+ * the message first to be sure the output buffer is not full
+ */
+ if ((h1m->flags & H1_MF_CHNK) && !(h1s->flags & H1S_F_BODYLESS_RESP)) {
+ if (!chunk_memcat(&outbuf, "\r\n0\r\n\r\n", 7))
+ goto full;
+ }
+ else if (!chunk_memcat(&outbuf, "\r\n", 2))
+ goto full;
+ h1m->state = ((htx->flags & HTX_FL_EOM) ? H1_MSG_DONE : H1_MSG_TRAILERS);
+ }
+ else {
+ if (!chunk_memcat(&outbuf, "\r\n", 2))
+ goto full;
+ h1m->state = ((h1m->flags & H1_MF_CHNK) ? H1_MSG_CHUNK_SIZE: H1_MSG_DATA);
+ }
+
+ TRACE_PROTO((!(h1m->flags & H1_MF_RESP) ? "H1 request headers xferred" : "H1 response headers xferred"),
+ H1_EV_TX_DATA|H1_EV_TX_HDRS, h1c->conn, h1s);
+ ret += sz;
+ htx_remove_blk(htx, blk);
+
+ copy:
+ b_add(&h1c->obuf, outbuf.data);
+ end:
+ TRACE_LEAVE(H1_EV_TX_DATA|H1_EV_TX_HDRS, h1c->conn, h1s, htx, (size_t[]){ret});
+ return ret;
+
+ full:
+ TRACE_STATE("h1c obuf full", H1_EV_TX_DATA|H1_EV_H1S_BLK, h1c->conn, h1s);
+ h1c->flags |= H1C_F_OUT_FULL;
+ goto copy;
+
+ error:
+ htx->flags |= HTX_FL_PROCESSING_ERROR;
+ h1s->flags |= H1S_F_PROCESSING_ERROR;
+ se_fl_set(h1s->sd, SE_FL_ERROR);
+ TRACE_ERROR("processing error on message EOH",
+ H1_EV_TX_DATA|H1_EV_STRM_ERR|H1_EV_H1S_ERR, h1c->conn, h1s);
+ goto end;
+}
+
+/* Try to send the message payload from the HTX message <htx> for the stream
+ * <h1s>. In this case, we are not in TUNNEL mode. It returns the number of
+ * bytes consumed or zero if nothing was done or if an error occurred. No more
+ * than <count> bytes can be sent.
+ */
+static size_t h1_make_data(struct h1s *h1s, struct h1m *h1m, struct buffer *buf, size_t count)
+{
+ struct h1c *h1c = h1s->h1c;
+ struct htx *htx = htx_from_buf(buf);
+ struct htx_blk *blk;
+ struct buffer outbuf;
+ enum htx_blk_type type;
+ struct ist v;
+ uint32_t sz;
+ size_t ret = 0;
+
+ TRACE_ENTER(H1_EV_TX_DATA|H1_EV_TX_BODY, h1c->conn, h1s, htx, (size_t[]){count});
+ blk = htx_get_head_blk(htx);
+
+ /* Perform some optimizations to reduce the number of buffer copies. If
+ * the mux's buffer is empty and the htx area contains exactly one data
+ * block of the same size as the requested count, then it's possible to
+ * simply swap the caller's buffer with the mux's output buffer and
+ * adjust offsets and length to match the entire DATA HTX block in the
+ * middle. In this case we perform a true zero-copy operation from
+ * end-to-end. This is the situation that happens all the time with
+ * large files.
+ */
+ if ((!(h1m->flags & H1_MF_RESP) || !(h1s->flags & H1S_F_BODYLESS_RESP)) &&
+ !b_data(&h1c->obuf) &&
+ (!(h1m->flags & H1_MF_CHNK) || ((h1m->flags & H1_MF_CHNK) && (!h1m->curr_len || count == h1m->curr_len))) &&
+ htx_nbblks(htx) == 1 &&
+ htx_get_blk_type(blk) == HTX_BLK_DATA &&
+ htx_get_blk_value(htx, blk).len == count) {
+ void *old_area;
+ uint64_t extra;
+ int eom = (htx->flags & HTX_FL_EOM);
+
+ extra = htx->extra;
+ old_area = h1c->obuf.area;
+ h1c->obuf.area = buf->area;
+ h1c->obuf.head = sizeof(struct htx) + blk->addr;
+ h1c->obuf.data = count;
+
+ buf->area = old_area;
+ buf->data = buf->head = 0;
+
+ htx = (struct htx *)buf->area;
+ htx_reset(htx);
+ htx->extra = extra;
+
+ if (h1m->flags & H1_MF_CLEN) {
+ if (count > h1m->curr_len) {
+ TRACE_ERROR("more payload than announced",
+ H1_EV_TX_DATA|H1_EV_STRM_ERR|H1_EV_H1C_ERR|H1_EV_H1S_ERR, h1c->conn, h1s);
+ goto error;
+ }
+ h1m->curr_len -= count;
+ if (!h1m->curr_len)
+ h1m->state = (eom ? H1_MSG_DONE : H1_MSG_TRAILERS);
+ }
+ else if (h1m->flags & H1_MF_CHNK) {
+ /* The message is chunked. We need to check if we must
+ * emit the chunk size, the CRLF marking the end of the
+ * current chunk and eventually the CRLF marking the end
+ * of the previous chunk (because of fast-forwarding).
+ * If it is the end of the message, we must
+ * also emit the last chunk.
+ *
+ * We have at least the size of the struct htx to write
+ * the chunk envelope. It should be enough.
+ */
+
+ /* If is a new chunk, prepend the chunk size */
+ if (h1m->state == H1_MSG_CHUNK_CRLF || h1m->state == H1_MSG_CHUNK_SIZE) {
+ if (h1m->curr_len) {
+ TRACE_ERROR("chunk bigger than announced",
+ H1_EV_TX_DATA|H1_EV_STRM_ERR|H1_EV_H1C_ERR|H1_EV_H1S_ERR, h1c->conn, h1s);
+ goto error;
+ }
+ h1m->curr_len = count + (htx->extra != HTX_UNKOWN_PAYLOAD_LENGTH ? htx->extra : 0);
+
+ /* Because chunk meta-data are prepended, the chunk size of the current chunk
+ * must be handled before the end of the previous chunk.
+ */
+ h1_prepend_chunk_size(&h1c->obuf, h1m->curr_len);
+ if (h1m->state == H1_MSG_CHUNK_CRLF)
+ h1_prepend_chunk_crlf(&h1c->obuf);
+
+ h1m->state = H1_MSG_DATA;
+ }
+
+ h1m->curr_len -= count;
+
+ /* It is the end of the chunk, append the CRLF */
+ if (!h1m->curr_len) {
+ h1_append_chunk_crlf(&h1c->obuf);
+ h1m->state = H1_MSG_CHUNK_SIZE;
+ }
+
+ /* It is the end of the message, add the last chunk with the extra CRLF */
+ if (eom) {
+ if (h1m->curr_len) {
+ TRACE_ERROR("chunk smaller than announced",
+ H1_EV_TX_DATA|H1_EV_STRM_ERR|H1_EV_H1C_ERR|H1_EV_H1S_ERR, h1c->conn, h1s);
+ goto error;
+ }
+ /* Emit the last chunk too at the buffer's end */
+ b_putblk(&h1c->obuf, "0\r\n\r\n", 5);
+ h1m->state = H1_MSG_DONE;
+ }
+ }
+ /* Nothing to do if XFER len is unknown */
+
+ ret = count;
+ TRACE_PROTO("H1 message payload data xferred (zero-copy)", H1_EV_TX_DATA|H1_EV_TX_BODY, h1c->conn, h1s, 0, (size_t[]){ret});
+ goto end;
+ }
+
+ if (b_space_wraps(&h1c->obuf))
+ b_slow_realign(&h1c->obuf, trash.area, b_data(&h1c->obuf));
+ outbuf = b_make(b_tail(&h1c->obuf), b_contig_space(&h1c->obuf), 0, 0);
+
+
+ /* Handle now case of CRLF at the end of a chun. */
+ if ((h1m->flags & H1_MF_CHNK) && h1m->state == H1_MSG_CHUNK_CRLF) {
+ if (h1m->curr_len) {
+ TRACE_ERROR("chunk bigger than announced",
+ H1_EV_TX_DATA|H1_EV_STRM_ERR|H1_EV_H1C_ERR|H1_EV_H1S_ERR, h1c->conn, h1s);
+ goto error;
+ }
+ if (!chunk_memcat(&outbuf, "\r\n", 2))
+ goto full;
+ h1m->state = H1_MSG_CHUNK_SIZE;
+ }
+
+ while (blk && count) {
+ uint32_t vlen, chklen;
+ int last_data = 0;
+
+ type = htx_get_blk_type(blk);
+ sz = htx_get_blksz(blk);
+ vlen = sz;
+ if (type == HTX_BLK_DATA) {
+ if (vlen > count) {
+ /* Get the maximum amount of data we can xferred */
+ vlen = count;
+ }
+ else if (htx_is_unique_blk(htx, blk) && (htx->flags & HTX_FL_EOM)) {
+ /* It is the last block of this message. After this one,
+ * only tunneled data may be forwarded. */
+ TRACE_DEVEL("last message block", H1_EV_TX_DATA|H1_EV_TX_BODY, h1c->conn, h1s);
+ last_data = 1;
+ }
+
+ if ((h1m->flags & H1_MF_RESP) && (h1s->flags & H1S_F_BODYLESS_RESP)) {
+ TRACE_PROTO("Skip data for bodyless response", H1_EV_TX_DATA|H1_EV_TX_BODY, h1c->conn, h1s, htx);
+ goto nextblk;
+ }
+
+ chklen = 0;
+ if (h1m->flags & H1_MF_CHNK) {
+ /* If is a new chunk, prepend the chunk size */
+ if (h1m->state == H1_MSG_CHUNK_SIZE) {
+ h1m->curr_len = (htx->extra && htx->extra != HTX_UNKOWN_PAYLOAD_LENGTH ? htx->data + htx->extra : vlen);
+ if (!h1_append_chunk_size(&outbuf, h1m->curr_len)) {
+ h1m->curr_len = 0;
+ goto full;
+ }
+ h1m->state = H1_MSG_DATA;
+ }
+
+ if (vlen > h1m->curr_len) {
+ vlen = h1m->curr_len;
+ last_data = 0;
+ }
+
+ chklen = 0;
+ if (h1m->curr_len == vlen)
+ chklen += 2;
+ if (last_data)
+ chklen += 5;
+ }
+
+ if (vlen + chklen > b_room(&outbuf)) {
+ /* too large for the buffer */
+ if (chklen >= b_room(&outbuf))
+ goto full;
+ vlen = b_room(&outbuf) - chklen;
+ last_data = 0;
+ }
+
+ v = htx_get_blk_value(htx, blk);
+ v.len = vlen;
+ if (!h1_format_htx_data(v, &outbuf, 0))
+ goto full;
+
+ if (h1m->flags & H1_MF_CLEN) {
+ if (vlen > h1m->curr_len) {
+ TRACE_ERROR("more payload than announced",
+ H1_EV_TX_DATA|H1_EV_STRM_ERR|H1_EV_H1C_ERR|H1_EV_H1S_ERR, h1c->conn, h1s);
+ goto error;
+ }
+ h1m->curr_len -= vlen;
+ }
+ else if (h1m->flags & H1_MF_CHNK) {
+ h1m->curr_len -= vlen;
+ /* Space already reserved, so it must succeed */
+ if (!h1m->curr_len) {
+ if (!chunk_memcat(&outbuf, "\r\n", 2))
+ goto error;
+ h1m->state = H1_MSG_CHUNK_SIZE;
+ }
+ if (last_data) {
+ if (h1m->curr_len) {
+ TRACE_ERROR("chunk smaller than announced",
+ H1_EV_TX_DATA|H1_EV_STRM_ERR|H1_EV_H1C_ERR|H1_EV_H1S_ERR, h1c->conn, h1s);
+ goto error;
+ }
+ if (!chunk_memcat(&outbuf, "0\r\n\r\n", 5))
+ goto error;
+ }
+ }
+
+
+ }
+ else if (type == HTX_BLK_EOT || type == HTX_BLK_TLR) {
+ if ((h1m->flags & H1_MF_RESP) && (h1s->flags & H1S_F_BODYLESS_RESP)) {
+ /* Do nothing the payload must be skipped
+ * because it is a bodyless response
+ */
+ }
+ else if (h1m->flags & H1_MF_CHNK) {
+ /* Emit last chunk for chunked messages only */
+ if (!chunk_memcat(&outbuf, "0\r\n", 3))
+ goto full;
+ }
+ h1m->state = H1_MSG_TRAILERS;
+ break;
+ }
+ else if (type == HTX_BLK_UNUSED)
+ goto nextblk;
+ else
+ goto error;
+
+ nextblk:
+ ret += vlen;
+ count -= vlen;
+ if (sz == vlen)
+ blk = htx_remove_blk(htx, blk);
+ else {
+ htx_cut_data_blk(htx, blk, vlen);
+ if (!b_room(&outbuf))
+ goto full;
+ }
+
+ if (last_data)
+ h1m->state = H1_MSG_DONE;
+ }
+
+ copy:
+ TRACE_PROTO("H1 message payload data xferred", H1_EV_TX_DATA|H1_EV_TX_BODY, h1c->conn, h1s, 0, (size_t[]){ret});
+ b_add(&h1c->obuf, outbuf.data);
+ end:
+ TRACE_LEAVE(H1_EV_TX_DATA|H1_EV_TX_BODY, h1c->conn, h1s, htx, (size_t[]){ret});
+ return ret;
+ full:
+ TRACE_STATE("h1c obuf full", H1_EV_TX_DATA|H1_EV_H1S_BLK, h1c->conn, h1s);
+ h1c->flags |= H1C_F_OUT_FULL;
+ goto copy;
+ error:
+ ret = 0;
+ htx->flags |= HTX_FL_PROCESSING_ERROR;
+ h1s->flags |= H1S_F_PROCESSING_ERROR;
+ se_fl_set(h1s->sd, SE_FL_ERROR);
+ TRACE_ERROR("processing error on message payload",
+ H1_EV_TX_DATA|H1_EV_STRM_ERR|H1_EV_H1C_ERR|H1_EV_H1S_ERR, h1c->conn, h1s);
+ goto end;
+}
+
+/* Try to send the tunneled data from the HTX message <htx> for the stream
+ * <h1s>. In this case, we are in TUNNEL mode. It returns the number of bytes
+ * consumed or zero if nothing was done or if an error occurred. No more than
+ * <count> bytes can be sent.
+ */
+static size_t h1_make_tunnel(struct h1s *h1s, struct h1m *h1m, struct buffer *buf, size_t count)
+{
+ struct h1c *h1c = h1s->h1c;
+ struct htx *htx = htx_from_buf(buf);
+ struct htx_blk *blk;
+ struct buffer outbuf;
+ enum htx_blk_type type;
+ struct ist v;
+ uint32_t sz;
+ size_t ret = 0;
+
+ TRACE_ENTER(H1_EV_TX_DATA|H1_EV_TX_BODY, h1c->conn, h1s, htx, (size_t[]){count});
+
+ blk = htx_get_head_blk(htx);
+
+ /* Perform some optimizations to reduce the number of buffer copies. If
+ * the mux's buffer is empty and the htx area contains exactly one data
+ * block of the same size as the requested count, then it's possible to
+ * simply swap the caller's buffer with the mux's output buffer and
+ * adjust offsets and length to match the entire DATA HTX block in the
+ * middle. In this case we perform a true zero-copy operation from
+ * end-to-end. This is the situation that happens all the time with
+ * large files.
+ */
+ if (!b_data(&h1c->obuf) &&
+ htx_nbblks(htx) == 1 &&
+ htx_get_blk_type(blk) == HTX_BLK_DATA &&
+ htx_get_blksz(blk) == count) {
+ void *old_area;
+
+ old_area = h1c->obuf.area;
+ h1c->obuf.area = buf->area;
+ h1c->obuf.head = sizeof(struct htx) + blk->addr;
+ h1c->obuf.data = count;
+
+ buf->area = old_area;
+ buf->data = buf->head = 0;
+
+ htx = (struct htx *)buf->area;
+ htx_reset(htx);
+
+ ret = count;
+ TRACE_PROTO("H1 tunneled data xferred (zero-copy)", H1_EV_TX_DATA|H1_EV_TX_BODY, h1c->conn, h1s, 0, (size_t[]){ret});
+ goto end;
+ }
+
+ if (b_space_wraps(&h1c->obuf))
+ b_slow_realign(&h1c->obuf, trash.area, b_data(&h1c->obuf));
+ outbuf = b_make(b_tail(&h1c->obuf), b_contig_space(&h1c->obuf), 0, 0);
+
+ while (blk) {
+ uint32_t vlen;
+
+ type = htx_get_blk_type(blk);
+ sz = htx_get_blksz(blk);
+ vlen = sz;
+
+ if (type == HTX_BLK_DATA) {
+ if (vlen > count) {
+ /* Get the maximum amount of data we can xferred */
+ vlen = count;
+ }
+
+ if (vlen > b_room(&outbuf)) {
+ /* too large for the buffer */
+ vlen = b_room(&outbuf);
+ }
+
+ v = htx_get_blk_value(htx, blk);
+ v.len = vlen;
+ if (!h1_format_htx_data(v, &outbuf, 0))
+ goto full;
+ }
+ else if (type == HTX_BLK_UNUSED)
+ goto nextblk;
+ else
+ goto error;
+
+ nextblk:
+ ret += vlen;
+ count -= vlen;
+ if (sz == vlen)
+ blk = htx_remove_blk(htx, blk);
+ else {
+ htx_cut_data_blk(htx, blk, vlen);
+ break;
+ }
+ }
+
+ copy:
+ TRACE_PROTO("H1 tunneled data xferred", H1_EV_TX_DATA|H1_EV_TX_BODY, h1c->conn, h1s, 0, (size_t[]){ret});
+ b_add(&h1c->obuf, outbuf.data);
+
+ end:
+ TRACE_LEAVE(H1_EV_TX_DATA|H1_EV_TX_BODY, h1c->conn, h1s, htx, (size_t[]){ret});
+ return ret;
+
+ full:
+ TRACE_STATE("h1c obuf full", H1_EV_TX_DATA|H1_EV_H1S_BLK, h1c->conn, h1s);
+ h1c->flags |= H1C_F_OUT_FULL;
+ goto copy;
+
+ error:
+ ret = 0;
+ htx->flags |= HTX_FL_PROCESSING_ERROR;
+ h1s->flags |= H1S_F_PROCESSING_ERROR;
+ se_fl_set(h1s->sd, SE_FL_ERROR);
+ TRACE_ERROR("processing error on tunneled",
+ H1_EV_TX_DATA|H1_EV_STRM_ERR|H1_EV_H1C_ERR|H1_EV_H1S_ERR, h1c->conn, h1s);
+ goto end;
+}
+
+/* Try to send the trailers from the HTX message <htx> for the stream <h1s>. It
+ * returns the number of bytes consumed or zero if nothing was done or if an
+ * error occurred. No more than <count> bytes can be sent.
+ */
+static size_t h1_make_trailers(struct h1s *h1s, struct h1m *h1m, struct htx *htx, size_t count)
+{
+ struct h1c *h1c = h1s->h1c;
+ struct htx_blk *blk;
+ struct buffer outbuf;
+ enum htx_blk_type type;
+ struct ist n, v;
+ uint32_t sz;
+ size_t ret = 0;
+
+ TRACE_ENTER(H1_EV_TX_DATA|H1_EV_TX_TLRS, h1c->conn, h1s, htx, (size_t[]){count});
+
+ if (b_space_wraps(&h1c->obuf))
+ b_slow_realign(&h1c->obuf, trash.area, b_data(&h1c->obuf));
+ chunk_reset(&outbuf);
+ outbuf = b_make(b_tail(&h1c->obuf), b_contig_space(&h1c->obuf), 0, 0);
+
+ blk = htx_get_head_blk(htx);
+ while (blk) {
+ type = htx_get_blk_type(blk);
+ sz = htx_get_blksz(blk);
+
+ if (type == HTX_BLK_TLR) {
+ if (sz > count)
+ goto error;
+
+ if (!(h1m->flags & H1_MF_CHNK) || ((h1m->flags & H1_MF_RESP) && (h1s->flags & H1S_F_BODYLESS_RESP)))
+ goto nextblk;
+
+ n = htx_get_blk_name(htx, blk);
+ v = htx_get_blk_value(htx, blk);
+
+ /* Try to adjust the case of the header name */
+ if (h1c->px->options2 & (PR_O2_H1_ADJ_BUGCLI|PR_O2_H1_ADJ_BUGSRV))
+ h1_adjust_case_outgoing_hdr(h1s, h1m, &n);
+ if (!h1_format_htx_hdr(n, v, &outbuf))
+ goto full;
+ }
+ else if (type == HTX_BLK_EOT) {
+ if (!(h1m->flags & H1_MF_CHNK) || ((h1m->flags & H1_MF_RESP) && (h1s->flags & H1S_F_BODYLESS_RESP))) {
+ TRACE_PROTO((!(h1m->flags & H1_MF_RESP) ? "H1 request trailers skipped" : "H1 response trailers skipped"),
+ H1_EV_TX_DATA|H1_EV_TX_TLRS, h1c->conn, h1s);
+ }
+ else {
+ if (!chunk_memcat(&outbuf, "\r\n", 2))
+ goto full;
+ TRACE_PROTO((!(h1m->flags & H1_MF_RESP) ? "H1 request trailers xferred" : "H1 response trailers xferred"),
+ H1_EV_TX_DATA|H1_EV_TX_TLRS, h1c->conn, h1s);
+ }
+ h1m->state = H1_MSG_DONE;
+ }
+ else if (type == HTX_BLK_UNUSED)
+ goto nextblk;
+ else
+ goto error;
+
+ nextblk:
+ ret += sz;
+ count -= sz;
+ blk = htx_remove_blk(htx, blk);
+ }
+
+ copy:
+ b_add(&h1c->obuf, outbuf.data);
+
+ end:
+ TRACE_LEAVE(H1_EV_TX_DATA|H1_EV_TX_TLRS, h1c->conn, h1s, htx, (size_t[]){ret});
+ return ret;
+
+ full:
+ TRACE_STATE("h1c obuf full", H1_EV_TX_DATA|H1_EV_H1S_BLK, h1c->conn, h1s);
+ h1c->flags |= H1C_F_OUT_FULL;
+ goto copy;
+
+ error:
+ ret = 0;
+ htx->flags |= HTX_FL_PROCESSING_ERROR;
+ h1s->flags |= H1S_F_PROCESSING_ERROR;
+ se_fl_set(h1s->sd, SE_FL_ERROR);
+ TRACE_ERROR("processing error on message trailers",
+ H1_EV_TX_DATA|H1_EV_STRM_ERR|H1_EV_H1C_ERR|H1_EV_H1S_ERR, h1c->conn, h1s);
+ goto end;
+}
+
+/* Try to send the header for a chunk of <len> bytes. It returns the number of
+ * bytes consumed or zero if nothing was done or if an error occurred..
+ */
+static size_t h1_make_chunk(struct h1s *h1s, struct h1m * h1m, size_t len)
+{
+ struct h1c *h1c = h1s->h1c;
+ struct buffer outbuf;
+ size_t ret = 0;
+
+ TRACE_ENTER(H1_EV_TX_DATA|H1_EV_TX_BODY, h1c->conn, h1s);
+
+ if (!h1_get_buf(h1c, &h1c->obuf)) {
+ h1c->flags |= H1C_F_OUT_ALLOC;
+ TRACE_STATE("waiting for h1c obuf allocation", H1_EV_TX_DATA|H1_EV_H1S_BLK, h1c->conn, h1s);
+ goto end;
+ }
+
+ if (b_space_wraps(&h1c->obuf))
+ b_slow_realign(&h1c->obuf, trash.area, b_data(&h1c->obuf));
+ outbuf = b_make(b_tail(&h1c->obuf), b_contig_space(&h1c->obuf), 0, 0);
+
+ if (h1m->state == H1_MSG_CHUNK_CRLF) {
+ if (!chunk_memcat(&outbuf, "\r\n", 2))
+ goto full;
+ h1m->state = H1_MSG_CHUNK_SIZE;
+ }
+ if (!h1_append_chunk_size(&outbuf, len))
+ goto full;
+
+ h1m->state = H1_MSG_DATA;
+
+ TRACE_PROTO("H1 chunk info xferred", H1_EV_TX_DATA|H1_EV_TX_BODY, h1c->conn, h1s, 0, (size_t[]){ret});
+ b_add(&h1c->obuf, outbuf.data);
+ ret = outbuf.data;
+
+end:
+ TRACE_LEAVE(H1_EV_TX_DATA|H1_EV_TX_BODY, h1c->conn, h1s, NULL, (size_t[]){ret});
+ return ret;
+full:
+ TRACE_STATE("h1c obuf full", H1_EV_TX_DATA|H1_EV_H1S_BLK, h1c->conn, h1s);
+ h1c->flags |= H1C_F_OUT_FULL;
+ goto end;
+}
+
+/*
+ * Process outgoing data. It parses data and transfer them from the channel buffer into
+ * h1c->obuf. It returns the number of bytes parsed and transferred if > 0, or
+ * 0 if it couldn't proceed.
+ */
+static size_t h1_process_mux(struct h1c *h1c, struct buffer *buf, size_t count)
+{
+ struct h1s *h1s = h1c->h1s;
+ struct h1m *h1m;
+ struct htx *htx;
+ size_t ret, total = 0;
+
+ htx = htxbuf(buf);
+ TRACE_ENTER(H1_EV_TX_DATA, h1c->conn, h1s, htx, (size_t[]){count});
+
+ if (htx_is_empty(htx))
+ goto end;
+
+ if (h1s->flags & (H1S_F_INTERNAL_ERROR|H1S_F_PROCESSING_ERROR|H1S_F_TX_BLK))
+ goto end;
+
+ if (!h1_get_buf(h1c, &h1c->obuf)) {
+ h1c->flags |= H1C_F_OUT_ALLOC;
+ TRACE_STATE("waiting for h1c obuf allocation", H1_EV_TX_DATA|H1_EV_H1S_BLK, h1c->conn, h1s);
+ goto end;
+ }
+ h1m = (!(h1c->flags & H1C_F_IS_BACK) ? &h1s->res : &h1s->req);
+
+ while (!(h1c->flags & H1C_F_OUT_FULL) &&
+ !(h1s->flags & (H1S_F_PROCESSING_ERROR|H1S_F_TX_BLK)) &&
+ !htx_is_empty(htx) && count) {
+ switch (h1m->state) {
+ case H1_MSG_RQBEFORE:
+ ret = h1_make_reqline(h1s, h1m, htx, count);
+ break;
+
+ case H1_MSG_RPBEFORE:
+ ret = h1_make_stline(h1s, h1m, htx, count);
+ break;
+
+ case H1_MSG_HDR_NAME:
+ ret = h1_make_headers(h1s, h1m, htx, count);
+ if (unlikely(h1m->state == H1_MSG_LAST_LF)) // in case of no header
+ ret += h1_make_eoh(h1s, h1m, htx, count);
+ break;
+
+ case H1_MSG_LAST_LF:
+ ret = h1_make_eoh(h1s, h1m, htx, count);
+ break;
+
+ case H1_MSG_CHUNK_SIZE:
+ case H1_MSG_CHUNK_CRLF:
+ case H1_MSG_DATA:
+ ret = h1_make_data(h1s, h1m, buf, count);
+ if (ret > 0)
+ htx = htx_from_buf(buf);
+ if (unlikely(h1m->state == H1_MSG_TRAILERS)) // in case of no data
+ ret += h1_make_trailers(h1s, h1m, htx, count);
+ break;
+
+ case H1_MSG_TUNNEL:
+ ret = h1_make_tunnel(h1s, h1m, buf, count);
+ if (ret > 0)
+ htx = htx_from_buf(buf);
+ break;
+
+ case H1_MSG_TRAILERS:
+ ret = h1_make_trailers(h1s, h1m, htx, count);
+ break;
+
+ case H1_MSG_DONE:
+ TRACE_STATE("unexpected data xferred in done state", H1_EV_TX_DATA|H1_EV_H1C_ERR|H1_EV_H1S_ERR, h1c->conn, h1s);
+ __fallthrough;
+
+ default:
+ ret = 0;
+ htx->flags |= HTX_FL_PROCESSING_ERROR;
+ h1s->flags |= H1S_F_PROCESSING_ERROR;
+ se_fl_set(h1s->sd, SE_FL_ERROR);
+ TRACE_ERROR("processing error", H1_EV_TX_DATA|H1_EV_STRM_ERR|H1_EV_H1C_ERR|H1_EV_H1S_ERR, h1c->conn, h1s);
+ break;
+ }
+
+ if (!ret)
+ break;
+ total += ret;
+ count -= ret;
+
+ if (h1m->state == H1_MSG_DONE) {
+ TRACE_USER((!(h1m->flags & H1_MF_RESP) ? "H1 request fully xferred" : "H1 response fully xferred"),
+ H1_EV_TX_DATA, h1c->conn, h1s);
+
+ if (h1s->flags & H1S_F_RX_BLK) {
+ h1s->flags &= ~H1S_F_RX_BLK;
+ h1_wake_stream_for_recv(h1s);
+ TRACE_STATE("Re-enable input processing", H1_EV_TX_DATA|H1_EV_H1S_BLK|H1_EV_STRM_WAKE, h1c->conn, h1s);
+ }
+ }
+ }
+
+ htx_to_buf(htx, buf);
+ if (!buf_room_for_htx_data(&h1c->obuf)) {
+ TRACE_STATE("h1c obuf full", H1_EV_TX_DATA|H1_EV_H1S_BLK, h1c->conn, h1s);
+ h1c->flags |= H1C_F_OUT_FULL;
+ }
+
+ end:
+
+ /* Both the request and the response reached the DONE state. So set EOI
+ * flag on the conn-stream. Most of time, the flag will already be set,
+ * except for protocol upgrades. Report an error if data remains blocked
+ * in the output buffer.
+ */
+ if (h1s->req.state == H1_MSG_DONE && h1s->res.state == H1_MSG_DONE) {
+ se_fl_set(h1s->sd, SE_FL_EOI);
+ if (!htx_is_empty(htx)) {
+ htx->flags |= HTX_FL_PROCESSING_ERROR;
+ h1s->flags |= H1S_F_PROCESSING_ERROR;
+ se_fl_set(h1s->sd, SE_FL_ERROR);
+ TRACE_ERROR("txn done but data waiting to be sent, set error on h1c", H1_EV_H1C_ERR, h1c->conn, h1s);
+ }
+ }
+
+ TRACE_LEAVE(H1_EV_TX_DATA, h1c->conn, h1s, htx, (size_t[]){total});
+ return total;
+}
+
+/*********************************************************/
+/* functions below are I/O callbacks from the connection */
+/*********************************************************/
+static void h1_wake_stream_for_recv(struct h1s *h1s)
+{
+ if (h1s && h1s->subs && h1s->subs->events & SUB_RETRY_RECV) {
+ TRACE_POINT(H1_EV_STRM_WAKE, h1s->h1c->conn, h1s);
+ tasklet_wakeup(h1s->subs->tasklet);
+ h1s->subs->events &= ~SUB_RETRY_RECV;
+ if (!h1s->subs->events)
+ h1s->subs = NULL;
+ }
+}
+static void h1_wake_stream_for_send(struct h1s *h1s)
+{
+ if (h1s && h1s->subs && h1s->subs->events & SUB_RETRY_SEND) {
+ TRACE_POINT(H1_EV_STRM_WAKE, h1s->h1c->conn, h1s);
+ tasklet_wakeup(h1s->subs->tasklet);
+ h1s->subs->events &= ~SUB_RETRY_SEND;
+ if (!h1s->subs->events)
+ h1s->subs = NULL;
+ }
+}
+
+/* alerts the data layer following this sequence :
+ * - if the h1s' data layer is subscribed to recv, then it's woken up for recv
+ * - if its subscribed to send, then it's woken up for send
+ * - if it was subscribed to neither, its ->wake() callback is called
+ */
+static void h1_alert(struct h1s *h1s)
+{
+ if (h1s->subs) {
+ h1_wake_stream_for_recv(h1s);
+ h1_wake_stream_for_send(h1s);
+ }
+ else if (h1s_sc(h1s) && h1s_sc(h1s)->app_ops->wake != NULL) {
+ TRACE_POINT(H1_EV_STRM_WAKE, h1s->h1c->conn, h1s);
+ h1s_sc(h1s)->app_ops->wake(h1s_sc(h1s));
+ }
+}
+
+/* Try to send an HTTP error with h1c->errcode status code. It returns 1 on success
+ * and 0 on error. The flag H1C_F_ABRT_PENDING is set on the H1 connection for
+ * retryable errors (allocation error or buffer full). On success, the error is
+ * copied in the output buffer.
+*/
+static int h1_send_error(struct h1c *h1c)
+{
+ int rc = http_get_status_idx(h1c->errcode);
+ int ret = 0;
+
+ TRACE_ENTER(H1_EV_H1C_ERR, h1c->conn, 0, 0, (size_t[]){h1c->errcode});
+
+ /* Verify if the error is mapped on /dev/null or any empty file */
+ /// XXX: do a function !
+ if (h1c->px->replies[rc] &&
+ h1c->px->replies[rc]->type == HTTP_REPLY_ERRMSG &&
+ h1c->px->replies[rc]->body.errmsg &&
+ b_is_null(h1c->px->replies[rc]->body.errmsg)) {
+ /* Empty error, so claim a success */
+ ret = 1;
+ goto out;
+ }
+
+ if (h1c->flags & (H1C_F_OUT_ALLOC|H1C_F_OUT_FULL)) {
+ h1c->flags |= H1C_F_ABRT_PENDING;
+ goto out;
+ }
+
+ if (!h1_get_buf(h1c, &h1c->obuf)) {
+ h1c->flags |= (H1C_F_OUT_ALLOC|H1C_F_ABRT_PENDING);
+ TRACE_STATE("waiting for h1c obuf allocation", H1_EV_H1C_ERR|H1_EV_H1C_BLK, h1c->conn);
+ goto out;
+ }
+ ret = b_istput(&h1c->obuf, ist(http_err_msgs[rc]));
+ if (unlikely(ret <= 0)) {
+ if (!ret) {
+ h1c->flags |= (H1C_F_OUT_FULL|H1C_F_ABRT_PENDING);
+ TRACE_STATE("h1c obuf full", H1_EV_H1C_ERR|H1_EV_H1C_BLK, h1c->conn);
+ goto out;
+ }
+ else {
+ /* we cannot report this error, so claim a success */
+ ret = 1;
+ }
+ }
+
+ if (h1c->state == H1_CS_EMBRYONIC) {
+ BUG_ON(h1c->h1s == NULL || h1s_sc(h1c->h1s) != NULL);
+ TRACE_DEVEL("Abort embryonic H1S", H1_EV_H1C_ERR, h1c->conn, h1c->h1s);
+ h1s_destroy(h1c->h1s);
+ }
+
+ h1c->flags = (h1c->flags & ~(H1C_F_WAIT_NEXT_REQ|H1C_F_ABRT_PENDING)) | H1C_F_ABRTED;
+ h1_close(h1c);
+ out:
+ TRACE_LEAVE(H1_EV_H1C_ERR, h1c->conn);
+ return ret;
+}
+
+/* Try to send a 500 internal error. It relies on h1_send_error to send the
+ * error. This function takes care of incrementing stats and tracked counters.
+ */
+static int h1_handle_internal_err(struct h1c *h1c)
+{
+ struct session *sess = h1c->conn->owner;
+ int ret = 0;
+
+ session_inc_http_req_ctr(sess);
+ proxy_inc_fe_req_ctr(sess->listener, sess->fe, 1);
+ _HA_ATOMIC_INC(&sess->fe->fe_counters.p.http.rsp[5]);
+ _HA_ATOMIC_INC(&sess->fe->fe_counters.internal_errors);
+ if (sess->listener && sess->listener->counters)
+ _HA_ATOMIC_INC(&sess->listener->counters->internal_errors);
+
+ h1c->errcode = 500;
+ ret = h1_send_error(h1c);
+ sess_log(sess);
+ return ret;
+}
+
+/* Try to send an error because of a parsing error. By default a 400 bad request
+ * error is returned. But the status code may be specified by setting
+ * h1c->errcode. It relies on h1_send_error to send the error. This function
+ * takes care of incrementing stats and tracked counters.
+ */
+static int h1_handle_parsing_error(struct h1c *h1c)
+{
+ struct session *sess = h1c->conn->owner;
+ int ret = 0;
+
+ if (!b_data(&h1c->ibuf) && ((h1c->flags & H1C_F_WAIT_NEXT_REQ) || (sess->fe->options & PR_O_IGNORE_PRB))) {
+ h1c->flags = (h1c->flags & ~H1C_F_WAIT_NEXT_REQ) | H1C_F_ABRTED;
+ h1_close(h1c);
+ goto end;
+ }
+
+ session_inc_http_req_ctr(sess);
+ session_inc_http_err_ctr(sess);
+ proxy_inc_fe_req_ctr(sess->listener, sess->fe, 1);
+ _HA_ATOMIC_INC(&sess->fe->fe_counters.p.http.rsp[4]);
+ _HA_ATOMIC_INC(&sess->fe->fe_counters.failed_req);
+ if (sess->listener && sess->listener->counters)
+ _HA_ATOMIC_INC(&sess->listener->counters->failed_req);
+
+ if (!h1c->errcode)
+ h1c->errcode = 400;
+ ret = h1_send_error(h1c);
+ if (b_data(&h1c->ibuf) || !(sess->fe->options & PR_O_NULLNOLOG))
+ sess_log(sess);
+
+ end:
+ return ret;
+}
+
+/* Try to send a 501 not implemented error. It relies on h1_send_error to send
+ * the error. This function takes care of incrementing stats and tracked
+ * counters.
+ */
+static int h1_handle_not_impl_err(struct h1c *h1c)
+{
+ struct session *sess = h1c->conn->owner;
+ int ret = 0;
+
+ if (!b_data(&h1c->ibuf) && ((h1c->flags & H1C_F_WAIT_NEXT_REQ) || (sess->fe->options & PR_O_IGNORE_PRB))) {
+ h1c->flags = (h1c->flags & ~H1C_F_WAIT_NEXT_REQ) | H1C_F_ABRTED;
+ h1_close(h1c);
+ goto end;
+ }
+
+ session_inc_http_req_ctr(sess);
+ proxy_inc_fe_req_ctr(sess->listener, sess->fe, 1);
+ _HA_ATOMIC_INC(&sess->fe->fe_counters.p.http.rsp[4]);
+ _HA_ATOMIC_INC(&sess->fe->fe_counters.failed_req);
+ if (sess->listener && sess->listener->counters)
+ _HA_ATOMIC_INC(&sess->listener->counters->failed_req);
+
+ h1c->errcode = 501;
+ ret = h1_send_error(h1c);
+ if (b_data(&h1c->ibuf) || !(sess->fe->options & PR_O_NULLNOLOG))
+ sess_log(sess);
+
+ end:
+ return ret;
+}
+
+/* Try to send a 408 timeout error. It relies on h1_send_error to send the
+ * error. This function takes care of incrementing stats and tracked counters.
+ */
+static int h1_handle_req_tout(struct h1c *h1c)
+{
+ struct session *sess = h1c->conn->owner;
+ int ret = 0;
+
+ if (!b_data(&h1c->ibuf) && ((h1c->flags & H1C_F_WAIT_NEXT_REQ) || (sess->fe->options & PR_O_IGNORE_PRB))) {
+ h1c->flags = (h1c->flags & ~H1C_F_WAIT_NEXT_REQ) | H1C_F_ABRTED;
+ h1_close(h1c);
+ goto end;
+ }
+
+ session_inc_http_req_ctr(sess);
+ proxy_inc_fe_req_ctr(sess->listener, sess->fe, 1);
+ _HA_ATOMIC_INC(&sess->fe->fe_counters.p.http.rsp[4]);
+ _HA_ATOMIC_INC(&sess->fe->fe_counters.failed_req);
+ if (sess->listener && sess->listener->counters)
+ _HA_ATOMIC_INC(&sess->listener->counters->failed_req);
+
+ h1c->errcode = 408;
+ ret = h1_send_error(h1c);
+ if (b_data(&h1c->ibuf) || !(sess->fe->options & PR_O_NULLNOLOG))
+ sess_log(sess);
+
+ end:
+ return ret;
+}
+
+
+/*
+ * Attempt to read data, and subscribe if none available
+ */
+static int h1_recv(struct h1c *h1c)
+{
+ struct connection *conn = h1c->conn;
+ size_t ret = 0, max;
+ int flags = 0;
+
+ TRACE_ENTER(H1_EV_H1C_RECV, h1c->conn);
+
+ if (h1c->wait_event.events & SUB_RETRY_RECV) {
+ TRACE_DEVEL("leaving on sub_recv", H1_EV_H1C_RECV, h1c->conn);
+ return (b_data(&h1c->ibuf));
+ }
+
+ if ((h1c->flags & H1C_F_WANT_FASTFWD) || !h1_recv_allowed(h1c)) {
+ TRACE_DEVEL("leaving on (want_fastfwde|!recv_allowed)", H1_EV_H1C_RECV, h1c->conn);
+ return 1;
+ }
+
+ if (!h1_get_buf(h1c, &h1c->ibuf)) {
+ h1c->flags |= H1C_F_IN_ALLOC;
+ TRACE_STATE("waiting for h1c ibuf allocation", H1_EV_H1C_RECV|H1_EV_H1C_BLK, h1c->conn);
+ return 0;
+ }
+
+ /*
+ * If we only have a small amount of data, realign it,
+ * it's probably cheaper than doing 2 recv() calls.
+ */
+ if (b_data(&h1c->ibuf) > 0 && b_data(&h1c->ibuf) < 128)
+ b_slow_realign_ofs(&h1c->ibuf, trash.area, sizeof(struct htx));
+
+ max = buf_room_for_htx_data(&h1c->ibuf);
+
+ /* avoid useless reads after first responses */
+ if (!h1c->h1s ||
+ (!(h1c->flags & H1C_F_IS_BACK) && h1c->h1s->req.state == H1_MSG_RQBEFORE) ||
+ ((h1c->flags & H1C_F_IS_BACK) && h1c->h1s->res.state == H1_MSG_RPBEFORE)) {
+ flags |= CO_RFL_READ_ONCE;
+
+ /* we know that the first read will be constrained to a smaller
+ * read by the stream layer in order to respect the reserve.
+ * Reading too much will result in global.tune.maxrewrite being
+ * left at the end of the buffer, and in a very small read
+ * being performed again to complete them (typically 16 bytes
+ * freed in the index after headers were consumed) before
+ * another larger read. Instead, given that we know we're
+ * waiting for a header and we'll be limited, let's perform a
+ * shorter first read that the upper layer can retrieve by just
+ * a pointer swap and the next read will be doable at once in
+ * an empty buffer.
+ */
+ if (max > global.tune.bufsize - global.tune.maxrewrite)
+ max = global.tune.bufsize - global.tune.maxrewrite;
+ }
+
+ if (max) {
+ if (h1c->flags & H1C_F_IN_FULL) {
+ h1c->flags &= ~H1C_F_IN_FULL;
+ TRACE_STATE("h1c ibuf not full anymore", H1_EV_H1C_RECV|H1_EV_H1C_BLK);
+ }
+
+ if (!b_data(&h1c->ibuf)) {
+ /* try to pre-align the buffer like the rxbufs will be
+ * to optimize memory copies.
+ */
+ h1c->ibuf.head = sizeof(struct htx);
+ }
+ ret = conn->xprt->rcv_buf(conn, conn->xprt_ctx, &h1c->ibuf, max, flags);
+ HA_ATOMIC_ADD(&h1c->px_counters->bytes_in, ret);
+ }
+
+ if (conn_xprt_read0_pending(conn)) {
+ TRACE_DEVEL("read0 on connection", H1_EV_H1C_RECV, h1c->conn);
+ h1c->flags |= H1C_F_EOS;
+ }
+ if (h1c->conn->flags & CO_FL_ERROR) {
+ TRACE_DEVEL("connection error", H1_EV_H1C_RECV, h1c->conn);
+ h1c->flags |= H1C_F_ERROR;
+ }
+
+ if (max && !ret && h1_recv_allowed(h1c)) {
+ TRACE_STATE("failed to receive data, subscribing", H1_EV_H1C_RECV, h1c->conn);
+ conn->xprt->subscribe(conn, conn->xprt_ctx, SUB_RETRY_RECV, &h1c->wait_event);
+ }
+ else {
+ TRACE_DATA("data received or pending or connection error", H1_EV_H1C_RECV, h1c->conn, 0, 0, (size_t[]){ret});
+ h1_wake_stream_for_recv(h1c->h1s);
+ }
+
+ if (!b_data(&h1c->ibuf))
+ h1_release_buf(h1c, &h1c->ibuf);
+ else if (!buf_room_for_htx_data(&h1c->ibuf)) {
+ h1c->flags |= H1C_F_IN_FULL;
+ TRACE_STATE("h1c ibuf full", H1_EV_H1C_RECV|H1_EV_H1C_BLK);
+ }
+
+ TRACE_LEAVE(H1_EV_H1C_RECV, h1c->conn);
+ return !!ret || (h1c->flags & (H1C_F_EOS|H1C_F_ERROR));
+}
+
+
+/*
+ * Try to send data if possible
+ */
+static int h1_send(struct h1c *h1c)
+{
+ struct connection *conn = h1c->conn;
+ unsigned int flags = 0;
+ size_t ret;
+ int sent = 0;
+
+ TRACE_ENTER(H1_EV_H1C_SEND, h1c->conn);
+
+ if (h1c->flags & (H1C_F_ERROR|H1C_F_ERR_PENDING)) {
+ TRACE_DEVEL("leaving on H1C error|err_pending", H1_EV_H1C_SEND, h1c->conn);
+ b_reset(&h1c->obuf);
+ if (h1c->flags & H1C_F_EOS)
+ h1c->flags |= H1C_F_ERROR;
+ return 1;
+ }
+
+ if (!b_data(&h1c->obuf))
+ goto end;
+
+ if (h1c->flags & H1C_F_CO_MSG_MORE)
+ flags |= CO_SFL_MSG_MORE;
+ if (h1c->flags & H1C_F_CO_STREAMER)
+ flags |= CO_SFL_STREAMER;
+
+ ret = conn->xprt->snd_buf(conn, conn->xprt_ctx, &h1c->obuf, b_data(&h1c->obuf), flags);
+ if (ret > 0) {
+ TRACE_DATA("data sent", H1_EV_H1C_SEND, h1c->conn, 0, 0, (size_t[]){ret});
+ if (h1c->flags & H1C_F_OUT_FULL) {
+ h1c->flags &= ~H1C_F_OUT_FULL;
+ TRACE_STATE("h1c obuf not full anymore", H1_EV_STRM_SEND|H1_EV_H1S_BLK, h1c->conn);
+ }
+ HA_ATOMIC_ADD(&h1c->px_counters->bytes_out, ret);
+ b_del(&h1c->obuf, ret);
+ sent = 1;
+ }
+
+ if (conn->flags & CO_FL_ERROR) {
+ /* connection error, nothing to send, clear the buffer to release it */
+ TRACE_DEVEL("connection error", H1_EV_H1C_SEND, h1c->conn);
+ h1c->flags |= H1C_F_ERR_PENDING;
+ if (h1c->flags & H1C_F_EOS)
+ h1c->flags |= H1C_F_ERROR;
+ else if (!(h1c->wait_event.events & SUB_RETRY_RECV)) {
+ /* EOS not seen, so subscribe for reads to be able to
+ * catch the error on the reading path. It is especially
+ * important if EOI was reached.
+ */
+ h1c->conn->xprt->subscribe(h1c->conn, h1c->conn->xprt_ctx, SUB_RETRY_RECV, &h1c->wait_event);
+ }
+ b_reset(&h1c->obuf);
+ }
+
+ end:
+ if (!(h1c->flags & (H1C_F_OUT_FULL|H1C_F_OUT_ALLOC)))
+ h1_wake_stream_for_send(h1c->h1s);
+
+ /* We're done, no more to send */
+ if (!b_data(&h1c->obuf)) {
+ TRACE_DEVEL("leaving with everything sent", H1_EV_H1C_SEND, h1c->conn);
+ h1_release_buf(h1c, &h1c->obuf);
+ if (h1c->state == H1_CS_CLOSING) {
+ TRACE_STATE("process pending shutdown for writes", H1_EV_H1C_SEND, h1c->conn);
+ h1_shutw_conn(conn);
+ }
+ }
+ else if (!(h1c->wait_event.events & SUB_RETRY_SEND)) {
+ TRACE_STATE("more data to send, subscribing", H1_EV_H1C_SEND, h1c->conn);
+ conn->xprt->subscribe(conn, conn->xprt_ctx, SUB_RETRY_SEND, &h1c->wait_event);
+ }
+
+ TRACE_LEAVE(H1_EV_H1C_SEND, h1c->conn);
+ return sent || (h1c->flags & (H1C_F_ERR_PENDING|H1C_F_ERROR)) || (h1c->state == H1_CS_CLOSED);
+}
+
+/* callback called on any event by the connection handler.
+ * It applies changes and returns zero, or < 0 if it wants immediate
+ * destruction of the connection.
+ */
+static int h1_process(struct h1c * h1c)
+{
+ struct connection *conn = h1c->conn;
+
+ TRACE_ENTER(H1_EV_H1C_WAKE, conn);
+
+ /* Try to parse now the first block of a request, creating the H1 stream if necessary */
+ if (b_data(&h1c->ibuf) && /* Input data to be processed */
+ (h1c->state < H1_CS_RUNNING) && /* IDLE, EMBRYONIC or UPGRADING */
+ !(h1c->flags & (H1C_F_IN_SALLOC|H1C_F_ABRT_PENDING))) { /* No allocation failure on the stream rxbuf and no ERROR on the H1C */
+ struct h1s *h1s = h1c->h1s;
+ struct buffer *buf;
+ size_t count;
+
+ /* When it happens for a backend connection, we may release it (it is probably a 408) */
+ if (h1c->flags & H1C_F_IS_BACK)
+ goto release;
+
+ /* First of all handle H1 to H2 upgrade (no need to create the H1 stream) */
+ if (!(h1c->flags & H1C_F_WAIT_NEXT_REQ) && /* First request */
+ !(h1c->px->options2 & PR_O2_NO_H2_UPGRADE) && /* H2 upgrade supported by the proxy */
+ !(conn->mux->flags & MX_FL_NO_UPG)) { /* the current mux supports upgrades */
+ /* Try to match H2 preface before parsing the request headers. */
+ if (b_isteq(&h1c->ibuf, 0, b_data(&h1c->ibuf), ist(H2_CONN_PREFACE)) > 0) {
+ h1c->flags |= H1C_F_UPG_H2C;
+ if (h1c->state == H1_CS_UPGRADING) {
+ BUG_ON(!h1s);
+ se_fl_set(h1s->sd, SE_FL_EOI|SE_FL_EOS); /* Set EOS here to release the SC */
+ }
+ TRACE_STATE("release h1c to perform H2 upgrade ", H1_EV_RX_DATA|H1_EV_H1C_WAKE);
+ goto release;
+ }
+ }
+
+ /* Create the H1 stream if not already there */
+ if (!h1s) {
+ h1s = h1c_frt_stream_new(h1c, NULL, h1c->conn->owner);
+ if (!h1s) {
+ b_reset(&h1c->ibuf);
+ h1_handle_internal_err(h1c);
+ TRACE_ERROR("alloc error", H1_EV_H1C_WAKE|H1_EV_H1C_ERR);
+ goto no_parsing;
+ }
+ }
+
+ if (h1s->sess->t_idle == -1)
+ h1s->sess->t_idle = ns_to_ms(now_ns - h1s->sess->accept_ts) - h1s->sess->t_handshake;
+
+ /* Get the stream rxbuf */
+ buf = h1_get_buf(h1c, &h1s->rxbuf);
+ if (!buf) {
+ h1c->flags |= H1C_F_IN_SALLOC;
+ TRACE_STATE("waiting for stream rxbuf allocation", H1_EV_H1C_WAKE|H1_EV_H1C_BLK, h1c->conn);
+ return 0;
+ }
+
+ count = (buf->size - sizeof(struct htx) - global.tune.maxrewrite);
+ h1_process_demux(h1c, buf, count);
+ h1_release_buf(h1c, &h1s->rxbuf);
+ h1_set_idle_expiration(h1c);
+ if (h1c->state < H1_CS_RUNNING) {
+ if (h1s->flags & H1S_F_INTERNAL_ERROR) {
+ h1_handle_internal_err(h1c);
+ TRACE_ERROR("internal error detected", H1_EV_H1C_WAKE|H1_EV_H1C_ERR);
+ }
+ else if (h1s->flags & H1S_F_NOT_IMPL_ERROR) {
+ h1_handle_not_impl_err(h1c);
+ TRACE_ERROR("not-implemented error detected", H1_EV_H1C_WAKE|H1_EV_H1C_ERR);
+ }
+ else if (h1s->flags & H1S_F_PARSING_ERROR || se_fl_test(h1s->sd, SE_FL_ERROR)) {
+ h1_handle_parsing_error(h1c);
+ TRACE_ERROR("parsing error detected", H1_EV_H1C_WAKE|H1_EV_H1C_ERR);
+ }
+ else {
+ TRACE_STATE("Incomplete message, subscribing", H1_EV_RX_DATA|H1_EV_H1C_BLK|H1_EV_H1C_WAKE, h1c->conn, h1s);
+ h1c->conn->xprt->subscribe(h1c->conn, h1c->conn->xprt_ctx, SUB_RETRY_RECV, &h1c->wait_event);
+ }
+ }
+ }
+
+ no_parsing:
+ h1_send(h1c);
+
+ /* H1 connection must be released ASAP if:
+ * - an error occurred on the H1C or
+ * - a read0 was received or
+ * - a silent shutdown was emitted and all outgoing data sent
+ */
+ if ((h1c->flags & (H1C_F_EOS|H1C_F_ERROR|H1C_F_ABRT_PENDING|H1C_F_ABRTED)) ||
+ (h1c->state >= H1_CS_CLOSING && (h1c->flags & H1C_F_SILENT_SHUT) && !b_data(&h1c->obuf))) {
+ if (h1c->state != H1_CS_RUNNING) {
+ /* No stream connector or upgrading */
+ if (h1c->state < H1_CS_RUNNING && !(h1c->flags & (H1C_F_IS_BACK|H1C_F_ABRT_PENDING))) {
+ /* shutdown for reads and no error on the frontend connection: Send an error */
+ if (h1_handle_parsing_error(h1c))
+ h1_send(h1c);
+ }
+ else if (h1c->flags & H1C_F_ABRT_PENDING) {
+ /* Handle pending error, if any (only possible on frontend connection) */
+ BUG_ON(h1c->flags & H1C_F_IS_BACK);
+ if (h1_send_error(h1c))
+ h1_send(h1c);
+ }
+ else {
+ h1_close(h1c);
+ TRACE_STATE("close h1c", H1_EV_H1S_END, h1c->conn);
+ }
+
+ /* If there is some pending outgoing data or error, just wait */
+ if (h1c->state == H1_CS_CLOSING || (h1c->flags & H1C_F_ABRT_PENDING))
+ goto end;
+
+ /* Otherwise we can release the H1 connection */
+ goto release;
+ }
+ else {
+ struct h1s *h1s = h1c->h1s;
+
+ /* Here there is still a H1 stream with a stream connector.
+ * Report an error at the stream level and wake up the stream
+ */
+ BUG_ON(!h1s);
+
+ if (h1c->flags & (H1C_F_ERR_PENDING|H1C_F_ERROR)) {
+ se_fl_set_error(h1s->sd);
+ TRACE_STATE("report (ERR_PENDING|ERROR) to SE", H1_EV_H1C_RECV, conn, h1s);
+ }
+ TRACE_POINT(H1_EV_STRM_WAKE, h1c->conn, h1s);
+ h1_alert(h1s);
+ }
+ }
+
+ if (!b_data(&h1c->ibuf))
+ h1_release_buf(h1c, &h1c->ibuf);
+
+ /* Check if a soft-stop is in progress.
+ * Release idling front connection if this is the case.
+ */
+ if (!(h1c->flags & H1C_F_IS_BACK)) {
+ if (unlikely(h1c->px->flags & (PR_FL_DISABLED|PR_FL_STOPPED))) {
+ if (!(h1c->px->options & PR_O_IDLE_CLOSE_RESP) &&
+ h1c->flags & H1C_F_WAIT_NEXT_REQ) {
+
+ int send_close = 1;
+ /* If a close-spread-time option is set, we want to avoid
+ * closing all the active HTTP2 connections at once so we add a
+ * random factor that will spread the closing.
+ */
+ if (tick_isset(global.close_spread_end)) {
+ int remaining_window = tick_remain(now_ms, global.close_spread_end);
+ if (remaining_window) {
+ /* This should increase the closing rate the
+ * further along the window we are.
+ */
+ send_close = (remaining_window <= statistical_prng_range(global.close_spread_time));
+ }
+ }
+ else if (global.tune.options & GTUNE_DISABLE_ACTIVE_CLOSE)
+ send_close = 0; /* let the client close his connection himself */
+ if (send_close)
+ goto release;
+ }
+ }
+ }
+
+ if (h1c->state == H1_CS_RUNNING && (h1c->flags & H1C_F_WANT_FASTFWD) && !h1s_data_pending(h1c->h1s)) {
+ TRACE_DEVEL("xprt rcv_buf blocked (want_fastfwd), notify h1s for recv", H1_EV_H1C_RECV, h1c->conn);
+ h1_wake_stream_for_recv(h1c->h1s);
+ }
+
+ end:
+ h1_refresh_timeout(h1c);
+ TRACE_LEAVE(H1_EV_H1C_WAKE, conn);
+ return 0;
+
+ release:
+ if (h1c->state == H1_CS_UPGRADING) {
+ struct h1s *h1s = h1c->h1s;
+
+ /* Don't release the H1 connection right now, we must destroy
+ * the attached SC first */
+ BUG_ON(!h1s);
+
+ if (h1c->flags & H1C_F_EOS) {
+ se_fl_set(h1s->sd, SE_FL_EOI|SE_FL_EOS);
+ TRACE_STATE("report EOS to SE", H1_EV_H1C_RECV, conn, h1s);
+ }
+ if (h1c->flags & (H1C_F_ERR_PENDING|H1C_F_ERROR)) {
+ se_fl_set_error(h1s->sd);
+ TRACE_STATE("report (ERR_PENDING|ERROR) to SE", H1_EV_H1C_RECV, conn, h1s);
+ }
+ h1_alert(h1s);
+ TRACE_DEVEL("waiting to release the SC before releasing the connection", H1_EV_H1C_WAKE);
+ }
+ else {
+ h1_release(h1c);
+ TRACE_DEVEL("leaving after releasing the connection", H1_EV_H1C_WAKE);
+ }
+ return -1;
+}
+
+struct task *h1_io_cb(struct task *t, void *ctx, unsigned int state)
+{
+ struct connection *conn;
+ struct tasklet *tl = (struct tasklet *)t;
+ int conn_in_list;
+ struct h1c *h1c = ctx;
+ int ret = 0;
+
+ if (state & TASK_F_USR1) {
+ /* the tasklet was idling on an idle connection, it might have
+ * been stolen, let's be careful!
+ */
+ HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
+ if (tl->context == NULL) {
+ /* The connection has been taken over by another thread,
+ * we're no longer responsible for it, so just free the
+ * tasklet, and do nothing.
+ */
+ HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
+ tasklet_free(tl);
+ return NULL;
+ }
+ conn = h1c->conn;
+ TRACE_POINT(H1_EV_H1C_WAKE, conn);
+
+ /* Remove the connection from the list, to be sure nobody attempts
+ * to use it while we handle the I/O events
+ */
+ conn_in_list = conn->flags & CO_FL_LIST_MASK;
+ if (conn_in_list)
+ conn_delete_from_tree(conn);
+
+ HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
+ } else {
+ /* we're certain the connection was not in an idle list */
+ conn = h1c->conn;
+ TRACE_ENTER(H1_EV_H1C_WAKE, conn);
+ conn_in_list = 0;
+ }
+
+ if (!(h1c->wait_event.events & SUB_RETRY_SEND))
+ ret = h1_send(h1c);
+ if (!(h1c->wait_event.events & SUB_RETRY_RECV))
+ ret |= h1_recv(h1c);
+ if (ret || b_data(&h1c->ibuf))
+ ret = h1_process(h1c);
+
+ /* If we were in an idle list, we want to add it back into it,
+ * unless h1_process() returned -1, which mean it has destroyed
+ * the connection (testing !ret is enough, if h1_process() wasn't
+ * called then ret will be 0 anyway.
+ */
+ if (ret < 0)
+ t = NULL;
+
+ if (!ret && conn_in_list) {
+ struct server *srv = objt_server(conn->target);
+
+ HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
+ _srv_add_idle(srv, conn, conn_in_list == CO_FL_SAFE_LIST);
+ HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
+ }
+ return t;
+}
+
+static int h1_wake(struct connection *conn)
+{
+ struct h1c *h1c = conn->ctx;
+ int ret;
+
+ TRACE_POINT(H1_EV_H1C_WAKE, conn);
+
+ h1_send(h1c);
+ ret = h1_process(h1c);
+ if (ret == 0) {
+ struct h1s *h1s = h1c->h1s;
+
+ if (h1c->state == H1_CS_UPGRADING || h1c->state == H1_CS_RUNNING)
+ h1_alert(h1s);
+ }
+ return ret;
+}
+
+/* Connection timeout management. The principle is that if there's no receipt
+ * nor sending for a certain amount of time, the connection is closed.
+ */
+struct task *h1_timeout_task(struct task *t, void *context, unsigned int state)
+{
+ struct h1c *h1c = context;
+ int expired = tick_is_expired(t->expire, now_ms);
+
+ TRACE_ENTER(H1_EV_H1C_WAKE, h1c ? h1c->conn : NULL);
+
+ if (h1c) {
+ /* Make sure nobody stole the connection from us */
+ HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
+
+ /* Somebody already stole the connection from us, so we should not
+ * free it, we just have to free the task.
+ */
+ if (!t->context) {
+ h1c = NULL;
+ HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
+ goto do_leave;
+ }
+
+ if (!expired) {
+ HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
+ TRACE_DEVEL("leaving (not expired)", H1_EV_H1C_WAKE, h1c->conn, h1c->h1s);
+ return t;
+ }
+
+ /* If a stream connector is still attached and ready to the mux, wait for the
+ * stream's timeout
+ */
+ if (h1c->state == H1_CS_RUNNING) {
+ HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
+ t->expire = TICK_ETERNITY;
+ TRACE_DEVEL("leaving (SC still attached)", H1_EV_H1C_WAKE, h1c->conn, h1c->h1s);
+ return t;
+ }
+
+ /* Try to send an error to the client */
+ if (h1c->state != H1_CS_CLOSING && !(h1c->flags & (H1C_F_IS_BACK|H1C_F_ERROR|H1C_F_ABRT_PENDING))) {
+ TRACE_DEVEL("timeout error detected", H1_EV_H1C_WAKE|H1_EV_H1C_ERR, h1c->conn, h1c->h1s);
+ if (h1_handle_req_tout(h1c))
+ h1_send(h1c);
+ if (b_data(&h1c->obuf) || (h1c->flags & H1C_F_ABRT_PENDING)) {
+ h1_refresh_timeout(h1c);
+ HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
+ return t;
+ }
+ }
+
+ if (h1c->h1s && !se_fl_test(h1c->h1s->sd, SE_FL_ORPHAN)) {
+ /* Don't release the H1 connection right now, we must destroy the
+ * attached SC first. */
+ se_fl_set(h1c->h1s->sd, SE_FL_EOS | SE_FL_ERROR);
+ h1_alert(h1c->h1s);
+ h1_refresh_timeout(h1c);
+ HA_SPIN_UNLOCK(OTHER_LOCK, &idle_conns[tid].idle_conns_lock);
+ TRACE_DEVEL("waiting to release the SC before releasing the connection", H1_EV_H1C_WAKE);
+ return t;
+ }
+
+ /* We're about to destroy the connection, so make sure nobody attempts
+ * to steal it from us.
+ */
+ if (h1c->conn->flags & CO_FL_LIST_MASK)
+ conn_delete_from_tree(h1c->conn);
+
+ HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
+ }
+
+ do_leave:
+ task_destroy(t);
+
+ if (!h1c) {
+ /* resources were already deleted */
+ TRACE_DEVEL("leaving (not more h1c)", H1_EV_H1C_WAKE);
+ return NULL;
+ }
+
+ h1c->task = NULL;
+ h1_release(h1c);
+ TRACE_LEAVE(H1_EV_H1C_WAKE);
+ return NULL;
+}
+
+/*******************************************/
+/* functions below are used by the streams */
+/*******************************************/
+
+/*
+ * Attach a new stream to a connection
+ * (Used for outgoing connections)
+ */
+static int h1_attach(struct connection *conn, struct sedesc *sd, struct session *sess)
+{
+ struct h1c *h1c = conn->ctx;
+ struct h1s *h1s;
+
+ /* this connection is no more idle (if it was at all) */
+ h1c->flags &= ~H1C_F_SILENT_SHUT;
+
+ TRACE_ENTER(H1_EV_STRM_NEW, conn);
+ if (h1c->flags & (H1C_F_ERR_PENDING|H1C_F_ERROR)) {
+ TRACE_ERROR("h1c on error", H1_EV_STRM_NEW|H1_EV_STRM_END|H1_EV_STRM_ERR, conn);
+ goto err;
+ }
+
+ h1s = h1c_bck_stream_new(h1c, sd->sc, sess);
+ if (h1s == NULL) {
+ TRACE_ERROR("h1s creation failure", H1_EV_STRM_NEW|H1_EV_STRM_END|H1_EV_STRM_ERR, conn);
+ goto err;
+ }
+
+ /* the connection is not idle anymore, let's mark this */
+ HA_ATOMIC_AND(&h1c->wait_event.tasklet->state, ~TASK_F_USR1);
+ xprt_set_used(conn, conn->xprt, conn->xprt_ctx);
+
+ TRACE_LEAVE(H1_EV_STRM_NEW, conn, h1s);
+ return 0;
+ err:
+ TRACE_DEVEL("leaving on error", H1_EV_STRM_NEW|H1_EV_STRM_END|H1_EV_STRM_ERR, conn);
+ return -1;
+}
+
+/* Retrieves a valid stream connector from this connection, or returns NULL.
+ * For this mux, it's easy as we can only store a single stream connector.
+ */
+static struct stconn *h1_get_first_sc(const struct connection *conn)
+{
+ struct h1c *h1c = conn->ctx;
+ struct h1s *h1s = h1c->h1s;
+
+ if (h1s)
+ return h1s_sc(h1s);
+
+ return NULL;
+}
+
+static void h1_destroy(void *ctx)
+{
+ struct h1c *h1c = ctx;
+
+ TRACE_POINT(H1_EV_H1C_END, h1c->conn);
+ if (!h1c->h1s || h1c->conn->ctx != h1c)
+ h1_release(h1c);
+}
+
+/*
+ * Detach the stream from the connection and possibly release the connection.
+ */
+static void h1_detach(struct sedesc *sd)
+{
+ struct h1s *h1s = sd->se;
+ struct h1c *h1c;
+ struct session *sess;
+ int is_not_first;
+
+ TRACE_ENTER(H1_EV_STRM_END, h1s ? h1s->h1c->conn : NULL, h1s);
+
+ if (!h1s) {
+ TRACE_LEAVE(H1_EV_STRM_END);
+ return;
+ }
+
+ sess = h1s->sess;
+ h1c = h1s->h1c;
+
+ sess->accept_date = date;
+ sess->accept_ts = now_ns;
+ sess->t_handshake = 0;
+ sess->t_idle = -1;
+
+ is_not_first = h1s->flags & H1S_F_NOT_FIRST;
+ h1s_destroy(h1s);
+
+ if (h1c->state == H1_CS_IDLE && (h1c->flags & H1C_F_IS_BACK)) {
+ /* this connection may be killed at any moment, we want it to
+ * die "cleanly" (i.e. only an RST).
+ */
+ h1c->flags |= H1C_F_SILENT_SHUT;
+
+ /* If there are any excess server data in the input buffer,
+ * release it and close the connection ASAP (some data may
+ * remain in the output buffer). This happens if a server sends
+ * invalid responses. So in such case, we don't want to reuse
+ * the connection
+ */
+ if (b_data(&h1c->ibuf)) {
+ h1_release_buf(h1c, &h1c->ibuf);
+ h1_close(h1c);
+ TRACE_DEVEL("remaining data on detach, kill connection", H1_EV_STRM_END|H1_EV_H1C_END);
+ goto release;
+ }
+
+ if (h1c->conn->flags & CO_FL_PRIVATE) {
+ /* Add the connection in the session server list, if not already done */
+ if (!session_add_conn(sess, h1c->conn, h1c->conn->target)) {
+ h1c->conn->owner = NULL;
+ h1c->conn->mux->destroy(h1c);
+ goto end;
+ }
+ /* Always idle at this step */
+ if (session_check_idle_conn(sess, h1c->conn)) {
+ /* The connection got destroyed, let's leave */
+ TRACE_DEVEL("outgoing connection killed", H1_EV_STRM_END|H1_EV_H1C_END);
+ goto end;
+ }
+ }
+ else {
+ if (h1c->conn->owner == sess)
+ h1c->conn->owner = NULL;
+
+ /* mark that the tasklet may lose its context to another thread and
+ * that the handler needs to check it under the idle conns lock.
+ */
+ HA_ATOMIC_OR(&h1c->wait_event.tasklet->state, TASK_F_USR1);
+ h1c->conn->xprt->subscribe(h1c->conn, h1c->conn->xprt_ctx, SUB_RETRY_RECV, &h1c->wait_event);
+ xprt_set_idle(h1c->conn, h1c->conn->xprt, h1c->conn->xprt_ctx);
+
+ if (!srv_add_to_idle_list(objt_server(h1c->conn->target), h1c->conn, is_not_first)) {
+ /* The server doesn't want it, let's kill the connection right away */
+ h1c->conn->mux->destroy(h1c);
+ TRACE_DEVEL("outgoing connection killed", H1_EV_STRM_END|H1_EV_H1C_END);
+ goto end;
+ }
+ /* At this point, the connection has been added to the
+ * server idle list, so another thread may already have
+ * hijacked it, so we can't do anything with it.
+ */
+ return;
+ }
+ }
+
+ release:
+ /* We don't want to close right now unless the connection is in error or shut down for writes */
+ if ((h1c->flags & H1C_F_ERROR) ||
+ (h1c->state == H1_CS_CLOSED) ||
+ (h1c->state == H1_CS_CLOSING && !b_data(&h1c->obuf)) ||
+ !h1c->conn->owner) {
+ TRACE_DEVEL("killing dead connection", H1_EV_STRM_END, h1c->conn);
+ h1_release(h1c);
+ }
+ else {
+ if (h1c->state == H1_CS_IDLE) {
+ /* If we have a new request, process it immediately or
+ * subscribe for reads waiting for new data
+ */
+ if (unlikely(b_data(&h1c->ibuf))) {
+ if (h1_process(h1c) == -1)
+ goto end;
+ }
+ else
+ h1c->conn->xprt->subscribe(h1c->conn, h1c->conn->xprt_ctx, SUB_RETRY_RECV, &h1c->wait_event);
+ }
+ h1_set_idle_expiration(h1c);
+ h1_refresh_timeout(h1c);
+ }
+ end:
+ TRACE_LEAVE(H1_EV_STRM_END);
+}
+
+
+static void h1_shutr(struct stconn *sc, enum co_shr_mode mode)
+{
+ struct h1s *h1s = __sc_mux_strm(sc);
+ struct h1c *h1c;
+
+ if (!h1s)
+ return;
+ h1c = h1s->h1c;
+
+ TRACE_POINT(H1_EV_STRM_SHUT, h1c->conn, h1s, 0, (size_t[]){mode});
+}
+
+static void h1_shutw(struct stconn *sc, enum co_shw_mode mode)
+{
+ struct h1s *h1s = __sc_mux_strm(sc);
+ struct h1c *h1c;
+
+ if (!h1s)
+ return;
+ h1c = h1s->h1c;
+
+ TRACE_ENTER(H1_EV_STRM_SHUT, h1c->conn, h1s, 0, (size_t[]){mode});
+
+ if (se_fl_test(h1s->sd, SE_FL_KILL_CONN)) {
+ TRACE_STATE("stream wants to kill the connection", H1_EV_STRM_SHUT, h1c->conn, h1s);
+ goto do_shutw;
+ }
+ if (h1c->state == H1_CS_CLOSING || (h1c->flags & (H1C_F_EOS|H1C_F_ERR_PENDING|H1C_F_ERROR))) {
+ TRACE_STATE("shutdown on connection (EOS || CLOSING || ERROR)", H1_EV_STRM_SHUT, h1c->conn, h1s);
+ goto do_shutw;
+ }
+
+ if (h1c->state == H1_CS_UPGRADING) {
+ TRACE_STATE("keep connection alive (UPGRADING)", H1_EV_STRM_SHUT, h1c->conn, h1s);
+ goto end;
+ }
+ if (((h1s->flags & H1S_F_WANT_KAL) && h1s->req.state == H1_MSG_DONE && h1s->res.state == H1_MSG_DONE)) {
+ TRACE_STATE("keep connection alive (want_kal)", H1_EV_STRM_SHUT, h1c->conn, h1s);
+ goto end;
+ }
+
+ do_shutw:
+ h1_close(h1c);
+ if (mode != CO_SHW_NORMAL)
+ h1c->flags |= H1C_F_SILENT_SHUT;
+
+ if (!b_data(&h1c->obuf))
+ h1_shutw_conn(h1c->conn);
+ end:
+ TRACE_LEAVE(H1_EV_STRM_SHUT, h1c->conn, h1s);
+}
+
+static void h1_shutw_conn(struct connection *conn)
+{
+ struct h1c *h1c = conn->ctx;
+
+ TRACE_ENTER(H1_EV_H1C_END, conn);
+ h1_close(h1c);
+ if (conn->flags & CO_FL_SOCK_WR_SH)
+ return;
+
+ conn_xprt_shutw(conn);
+ conn_sock_shutw(conn, !(h1c->flags & H1C_F_SILENT_SHUT));
+
+ if (h1c->wait_event.tasklet && !h1c->wait_event.events)
+ tasklet_wakeup(h1c->wait_event.tasklet);
+
+ TRACE_LEAVE(H1_EV_H1C_END, conn);
+}
+
+/* Called from the upper layer, to unsubscribe <es> from events <event_type>
+ * The <es> pointer is not allowed to differ from the one passed to the
+ * subscribe() call. It always returns zero.
+ */
+static int h1_unsubscribe(struct stconn *sc, int event_type, struct wait_event *es)
+{
+ struct h1s *h1s = __sc_mux_strm(sc);
+
+ if (!h1s)
+ return 0;
+
+ BUG_ON(event_type & ~(SUB_RETRY_SEND|SUB_RETRY_RECV));
+ BUG_ON(h1s->subs && h1s->subs != es);
+
+ es->events &= ~event_type;
+ if (!es->events)
+ h1s->subs = NULL;
+
+ if (event_type & SUB_RETRY_RECV)
+ TRACE_DEVEL("unsubscribe(recv)", H1_EV_STRM_RECV, h1s->h1c->conn, h1s);
+
+ if (event_type & SUB_RETRY_SEND)
+ TRACE_DEVEL("unsubscribe(send)", H1_EV_STRM_SEND, h1s->h1c->conn, h1s);
+
+ return 0;
+}
+
+/* Called from the upper layer, to subscribe <es> to events <event_type>. The
+ * event subscriber <es> is not allowed to change from a previous call as long
+ * as at least one event is still subscribed. The <event_type> must only be a
+ * combination of SUB_RETRY_RECV and SUB_RETRY_SEND. It always returns 0, unless
+ * the stream connector <sc> was already detached, in which case it will return
+ * -1.
+ */
+static int h1_subscribe(struct stconn *sc, int event_type, struct wait_event *es)
+{
+ struct h1s *h1s = __sc_mux_strm(sc);
+ struct h1c *h1c;
+
+ if (!h1s)
+ return -1;
+
+ BUG_ON(event_type & ~(SUB_RETRY_SEND|SUB_RETRY_RECV));
+ BUG_ON(h1s->subs && h1s->subs != es);
+
+ es->events |= event_type;
+ h1s->subs = es;
+
+ if (event_type & SUB_RETRY_RECV)
+ TRACE_DEVEL("subscribe(recv)", H1_EV_STRM_RECV, h1s->h1c->conn, h1s);
+
+
+ if (event_type & SUB_RETRY_SEND) {
+ TRACE_DEVEL("subscribe(send)", H1_EV_STRM_SEND, h1s->h1c->conn, h1s);
+ /*
+ * If the stconn attempts to subscribe, and the
+ * mux isn't subscribed to the connection, then it
+ * probably means the connection wasn't established
+ * yet, so we have to subscribe.
+ */
+ h1c = h1s->h1c;
+ if (!(h1c->wait_event.events & SUB_RETRY_SEND))
+ h1c->conn->xprt->subscribe(h1c->conn,
+ h1c->conn->xprt_ctx,
+ SUB_RETRY_SEND,
+ &h1c->wait_event);
+ }
+ return 0;
+}
+
+/* Called from the upper layer, to receive data.
+ *
+ * The caller is responsible for defragmenting <buf> if necessary. But <flags>
+ * must be tested to know the calling context. If CO_RFL_BUF_FLUSH is set, it
+ * means the caller wants to flush input data (from the mux buffer and the
+ * channel buffer) to be able to use fast-forwarding.
+ * If CO_RFL_KEEP_RECV is set, the mux must always subscribe for read
+ * events before giving back. CO_RFL_BUF_WET is set if <buf> is congested with
+ * data scheduled for leaving soon. CO_RFL_BUF_NOT_STUCK is set to instruct the
+ * mux it may optimize the data copy to <buf> if necessary. Otherwise, it should
+ * copy as much data as possible.
+ */
+static size_t h1_rcv_buf(struct stconn *sc, struct buffer *buf, size_t count, int flags)
+{
+ struct h1s *h1s = __sc_mux_strm(sc);
+ struct h1c *h1c = h1s->h1c;
+ struct h1m *h1m = (!(h1c->flags & H1C_F_IS_BACK) ? &h1s->req : &h1s->res);
+ size_t ret = 0;
+
+ TRACE_ENTER(H1_EV_STRM_RECV, h1c->conn, h1s, 0, (size_t[]){count});
+
+ /* Do nothing for now if not RUNNING (implies UPGRADING) */
+ if (h1c->state < H1_CS_RUNNING) {
+ TRACE_DEVEL("h1c not ready yet", H1_EV_H1C_RECV|H1_EV_H1C_BLK, h1c->conn);
+ goto end;
+ }
+
+ if (!(h1c->flags & H1C_F_IN_ALLOC))
+ ret = h1_process_demux(h1c, buf, count);
+ else
+ TRACE_DEVEL("h1c ibuf not allocated", H1_EV_H1C_RECV|H1_EV_H1C_BLK, h1c->conn);
+
+ if ((flags & CO_RFL_BUF_FLUSH) && se_fl_test(h1s->sd, SE_FL_MAY_FASTFWD_PROD)) {
+ h1c->flags |= H1C_F_WANT_FASTFWD;
+ TRACE_STATE("Block xprt rcv_buf to flush stream's buffer (want_fastfwd)", H1_EV_STRM_RECV, h1c->conn, h1s);
+ }
+ else {
+ if (((flags & CO_RFL_KEEP_RECV) || (h1m->state != H1_MSG_DONE)) && !(h1c->wait_event.events & SUB_RETRY_RECV))
+ h1c->conn->xprt->subscribe(h1c->conn, h1c->conn->xprt_ctx, SUB_RETRY_RECV, &h1c->wait_event);
+ }
+
+ end:
+ TRACE_LEAVE(H1_EV_STRM_RECV, h1c->conn, h1s, 0, (size_t[]){ret});
+ return ret;
+}
+
+
+/* Called from the upper layer, to send data */
+static size_t h1_snd_buf(struct stconn *sc, struct buffer *buf, size_t count, int flags)
+{
+ struct h1s *h1s = __sc_mux_strm(sc);
+ struct h1c *h1c;
+ size_t total = 0;
+
+ if (!h1s)
+ return 0;
+ h1c = h1s->h1c;
+
+ TRACE_ENTER(H1_EV_STRM_SEND, h1c->conn, h1s, 0, (size_t[]){count});
+
+ /* If we're not connected yet, or we're waiting for a handshake, stop
+ * now, as we don't want to remove everything from the channel buffer
+ * before we're sure we can send it.
+ */
+ if (h1c->conn->flags & CO_FL_WAIT_XPRT) {
+ TRACE_LEAVE(H1_EV_STRM_SEND, h1c->conn, h1s);
+ return 0;
+ }
+
+ if (h1c->flags & (H1C_F_ERR_PENDING|H1C_F_ERROR)) {
+ se_fl_set_error(h1s->sd);
+ TRACE_ERROR("H1C on error, leaving in error", H1_EV_STRM_SEND|H1_EV_H1C_ERR|H1_EV_H1S_ERR|H1_EV_STRM_ERR, h1c->conn, h1s);
+ return 0;
+ }
+
+ /* Inherit some flags from the upper layer */
+ h1c->flags &= ~(H1C_F_CO_MSG_MORE|H1C_F_CO_STREAMER);
+ if (flags & CO_SFL_MSG_MORE)
+ h1c->flags |= H1C_F_CO_MSG_MORE;
+ if (flags & CO_SFL_STREAMER)
+ h1c->flags |= H1C_F_CO_STREAMER;
+
+ while (count) {
+ size_t ret = 0;
+
+ if (!(h1c->flags & (H1C_F_OUT_FULL|H1C_F_OUT_ALLOC)))
+ ret = h1_process_mux(h1c, buf, count);
+ else
+ TRACE_DEVEL("h1c obuf not allocated", H1_EV_STRM_SEND|H1_EV_H1S_BLK, h1c->conn, h1s);
+
+ if (!ret)
+ break;
+
+ if ((count - ret) > 0)
+ h1c->flags |= H1C_F_CO_MSG_MORE;
+
+ total += ret;
+ count -= ret;
+
+ if ((h1c->wait_event.events & SUB_RETRY_SEND) || !h1_send(h1c))
+ break;
+
+ if ((h1c->conn->flags & (CO_FL_ERROR|CO_FL_SOCK_WR_SH)))
+ break;
+ }
+
+ if (h1c->flags & (H1C_F_ERR_PENDING|H1C_F_ERROR)) {
+ // FIXME: following test was removed :
+ // ((h1c->conn->flags & CO_FL_ERROR) && (se_fl_test(h1s->sd, SE_FL_EOI | SE_FL_EOS) || !b_data(&h1c->ibuf)))) {
+ se_fl_set_error(h1s->sd);
+ TRACE_ERROR("reporting error to the app-layer stream", H1_EV_STRM_SEND|H1_EV_H1S_ERR|H1_EV_STRM_ERR, h1c->conn, h1s);
+ }
+
+ h1_refresh_timeout(h1c);
+ TRACE_LEAVE(H1_EV_STRM_SEND, h1c->conn, h1s, 0, (size_t[]){total});
+ return total;
+}
+
+static inline struct sedesc *h1s_opposite_sd(struct h1s *h1s)
+{
+ struct xref *peer;
+ struct sedesc *sdo;
+
+ peer = xref_get_peer_and_lock(&h1s->sd->xref);
+ if (!peer)
+ return NULL;
+
+ sdo = container_of(peer, struct sedesc, xref);
+ xref_unlock(&h1s->sd->xref, peer);
+ return sdo;
+}
+
+static size_t h1_nego_ff(struct stconn *sc, struct buffer *input, size_t count, unsigned int may_splice)
+{
+ struct h1s *h1s = __sc_mux_strm(sc);
+ struct h1c *h1c = h1s->h1c;
+ struct h1m *h1m = (!(h1c->flags & H1C_F_IS_BACK) ? &h1s->res : &h1s->req);
+ size_t ret = 0;
+
+ TRACE_ENTER(H1_EV_STRM_SEND, h1c->conn, h1s, 0, (size_t[]){count});
+
+
+ if (global.tune.no_zero_copy_fwd & NO_ZERO_COPY_FWD_H1_SND) {
+ h1s->sd->iobuf.flags |= IOBUF_FL_NO_FF;
+ goto out;
+ }
+
+ /* TODO: add check on curr_len if CLEN */
+
+ if (h1m->flags & H1_MF_CHNK) {
+ if (h1m->curr_len) {
+ BUG_ON(h1m->state != H1_MSG_DATA);
+ if (count > h1m->curr_len)
+ count = h1m->curr_len;
+ }
+ else {
+ BUG_ON(h1m->state != H1_MSG_CHUNK_CRLF && h1m->state != H1_MSG_CHUNK_SIZE);
+ if (!h1_make_chunk(h1s, h1m, count))
+ goto out;
+ h1m->curr_len = count;
+ }
+ }
+
+ /* Use kernel splicing if it is supported by the sender and if there
+ * are no input data _AND_ no output data.
+ *
+ * TODO: It may be good to add a flag to send obuf data first if any,
+ * and then data in pipe, or the opposite. For now, it is not
+ * supported to mix data.
+ */
+ if (!b_data(input) && !b_data(&h1c->obuf) && may_splice) {
+#if defined(USE_LINUX_SPLICE)
+ if (h1c->conn->xprt->snd_pipe && (h1s->sd->iobuf.pipe || (pipes_used < global.maxpipes && (h1s->sd->iobuf.pipe = get_pipe())))) {
+ h1s->sd->iobuf.offset = 0;
+ h1s->sd->iobuf.data = 0;
+ ret = count;
+ goto out;
+ }
+#endif
+ h1s->sd->iobuf.flags |= IOBUF_FL_NO_SPLICING;
+ TRACE_DEVEL("Unable to allocate pipe for splicing, fallback to buffer", H1_EV_STRM_SEND, h1c->conn, h1s);
+ }
+
+ if (!h1_get_buf(h1c, &h1c->obuf)) {
+ h1c->flags |= H1C_F_OUT_ALLOC;
+ TRACE_STATE("waiting for opposite h1c obuf allocation", H1_EV_STRM_SEND|H1_EV_H1S_BLK, h1c->conn, h1s);
+ goto out;
+ }
+
+ if (b_space_wraps(&h1c->obuf))
+ b_slow_realign(&h1c->obuf, trash.area, b_data(&h1c->obuf));
+
+ h1s->sd->iobuf.buf = &h1c->obuf;
+ h1s->sd->iobuf.offset = 0;
+ h1s->sd->iobuf.data = 0;
+
+ /* Cannot forward more than available room in output buffer */
+ if (count > b_room(&h1c->obuf))
+ count = b_room(&h1c->obuf);
+
+ if (!count) {
+ h1c->flags |= H1C_F_OUT_FULL;
+ h1s->sd->iobuf.flags |= IOBUF_FL_FF_BLOCKED;
+ TRACE_STATE("output buffer full", H1_EV_STRM_SEND|H1_EV_H1S_BLK, h1c->conn, h1s);
+ goto out;
+ }
+
+ /* forward remaining input data */
+ if (b_data(input)) {
+ size_t xfer = count;
+
+ if (xfer > b_data(input))
+ xfer = b_data(input);
+ h1s->sd->iobuf.data = b_xfer(&h1c->obuf, input, xfer);
+
+ /* Cannot forward more data, wait for room */
+ if (b_data(input))
+ goto out;
+ }
+
+ ret = count - h1s->sd->iobuf.data;
+
+ out:
+ TRACE_LEAVE(H1_EV_STRM_SEND, h1c->conn, h1s, 0, (size_t[]){ret});
+ return ret;
+}
+
+static size_t h1_done_ff(struct stconn *sc)
+{
+ struct h1s *h1s = __sc_mux_strm(sc);
+ struct h1c *h1c = h1s->h1c;
+ struct h1m *h1m = (!(h1c->flags & H1C_F_IS_BACK) ? &h1s->res : &h1s->req);
+ struct sedesc *sd = h1s->sd;
+ size_t total = 0;
+
+ TRACE_ENTER(H1_EV_STRM_SEND, h1c->conn, h1s);
+
+#if defined(USE_LINUX_SPLICE)
+ if (sd->iobuf.pipe) {
+ total = h1c->conn->xprt->snd_pipe(h1c->conn, h1c->conn->xprt_ctx, sd->iobuf.pipe, sd->iobuf.pipe->data);
+ if (total > 0)
+ HA_ATOMIC_ADD(&h1c->px_counters->spliced_bytes_out, total);
+ if (!sd->iobuf.pipe->data) {
+ put_pipe(sd->iobuf.pipe);
+ sd->iobuf.pipe = NULL;
+ }
+ goto out;
+ }
+#endif
+ if (!sd->iobuf.pipe) {
+ if (b_room(&h1c->obuf) == sd->iobuf.offset)
+ h1c->flags |= H1C_F_OUT_FULL;
+
+ total = sd->iobuf.data;
+ sd->iobuf.buf = NULL;
+ sd->iobuf.offset = 0;
+ sd->iobuf.data = 0;
+
+ if (sd->iobuf.flags & IOBUF_FL_EOI)
+ h1c->flags &= ~H1C_F_CO_MSG_MORE;
+
+ /* Perform a synchronous send but in all cases, consider
+ * everything was already sent from the SC point of view.
+ */
+ h1_send(h1c);
+ }
+
+ out:
+ if (h1m->curr_len)
+ h1m->curr_len -= total;
+
+ if (!h1m->curr_len && (h1m->flags & H1_MF_CLEN))
+ h1m->state = ((sd->iobuf.flags & IOBUF_FL_EOI) ? H1_MSG_DONE : H1_MSG_TRAILERS);
+ else if (!h1m->curr_len && (h1m->flags & H1_MF_CHNK)) {
+ if (h1m->state == H1_MSG_DATA)
+ h1m->state = H1_MSG_CHUNK_CRLF;
+ }
+
+ HA_ATOMIC_ADD(&h1c->px_counters->bytes_out, total);
+
+ // TODO: should we call h1_process() instead ?
+ if (h1c->conn->flags & CO_FL_ERROR) {
+ h1c->flags = (h1c->flags & ~H1C_F_WANT_FASTFWD) | H1C_F_ERR_PENDING;
+ if (h1c->flags & H1C_F_EOS)
+ h1c->flags |= H1C_F_ERROR;
+ else if (!(h1c->wait_event.events & SUB_RETRY_RECV)) {
+ /* EOS not seen, so subscribe for reads to be able to
+ * catch the error on the reading path. It is especially
+ * important if EOI was reached.
+ */
+ h1c->conn->xprt->subscribe(h1c->conn, h1c->conn->xprt_ctx, SUB_RETRY_RECV, &h1c->wait_event);
+ }
+ se_fl_set_error(h1s->sd);
+ TRACE_DEVEL("connection error", H1_EV_STRM_ERR|H1_EV_H1C_ERR|H1_EV_H1S_ERR, h1c->conn, h1s);
+ }
+
+ TRACE_LEAVE(H1_EV_STRM_RECV, h1c->conn, h1s, 0, (size_t[]){total});
+ return total;
+}
+
+static int h1_fastfwd(struct stconn *sc, unsigned int count, unsigned int flags)
+{
+ struct h1s *h1s = __sc_mux_strm(sc);
+ struct h1c *h1c = h1s->h1c;
+ struct h1m *h1m = (!(h1c->flags & H1C_F_IS_BACK) ? &h1s->req : &h1s->res);
+ struct sedesc *sdo = NULL;
+ size_t total = 0, try = 0;
+ int ret = 0;
+
+ TRACE_ENTER(H1_EV_STRM_RECV, h1c->conn, h1s, 0, (size_t[]){count});
+
+ if (h1m->state != H1_MSG_DATA && h1m->state != H1_MSG_TUNNEL) {
+ h1c->flags &= ~H1C_F_WANT_FASTFWD;
+ TRACE_STATE("Cannot fast-forwad data now !(msg_data|msg_tunnel)", H1_EV_STRM_RECV, h1c->conn, h1s);
+ goto end;
+ }
+
+ se_fl_clr(h1s->sd, SE_FL_RCV_MORE | SE_FL_WANT_ROOM);
+ h1c->conn->flags &= ~CO_FL_WAIT_ROOM;
+ h1c->flags |= H1C_F_WANT_FASTFWD;
+
+ if (h1c->flags & (H1C_F_EOS|H1C_F_ERROR)) {
+ h1c->flags &= ~H1C_F_WANT_FASTFWD;
+ TRACE_DEVEL("leaving on (EOS|ERROR)", H1_EV_STRM_RECV, h1c->conn, h1s);
+ goto end;
+ }
+
+ sdo = h1s_opposite_sd(h1s);
+ if (!sdo) {
+ TRACE_STATE("Opposite endpoint not available yet", H1_EV_STRM_RECV, h1c->conn, h1s);
+ goto out;
+ }
+
+ retry:
+ ret = 0;
+
+ if (h1m->state == H1_MSG_DATA && (h1m->flags & (H1_MF_CHNK|H1_MF_CLEN)) && count > h1m->curr_len)
+ count = h1m->curr_len;
+
+ try = se_nego_ff(sdo, &h1c->ibuf, count, h1c->conn->xprt->rcv_pipe && !!(flags & CO_RFL_MAY_SPLICE) && !(sdo->iobuf.flags & IOBUF_FL_NO_SPLICING));
+ if (b_room(&h1c->ibuf) && (h1c->flags & H1C_F_IN_FULL)) {
+ h1c->flags &= ~H1C_F_IN_FULL;
+ TRACE_STATE("h1c ibuf not full anymore", H1_EV_STRM_RECV|H1_EV_H1C_BLK);
+ }
+ if (!b_data(&h1c->ibuf))
+ h1_release_buf(h1c, &h1c->ibuf);
+
+ if (sdo->iobuf.flags & IOBUF_FL_NO_FF) {
+ /* Fast forwarding is not supported by the consumer */
+ h1c->flags = (h1c->flags & ~H1C_F_WANT_FASTFWD) | H1C_F_CANT_FASTFWD;
+ TRACE_DEVEL("Fast-forwarding not supported by opposite endpoint, disable it", H1_EV_STRM_RECV, h1c->conn, h1s);
+ goto end;
+ }
+ if (sdo->iobuf.flags & IOBUF_FL_FF_BLOCKED) {
+ se_fl_set(h1s->sd, SE_FL_RCV_MORE | SE_FL_WANT_ROOM);
+ TRACE_STATE("waiting for more room", H1_EV_STRM_RECV|H1_EV_H1S_ERR, h1c->conn, h1s);
+ goto out;
+ }
+
+ total += sdo->iobuf.data;
+ count -= sdo->iobuf.data;
+#if defined(USE_LINUX_SPLICE)
+ if (sdo->iobuf.pipe) {
+ /* Here, not data was xferred */
+ ret = h1c->conn->xprt->rcv_pipe(h1c->conn, h1c->conn->xprt_ctx, sdo->iobuf.pipe, try);
+ if (ret < 0) {
+ h1c->flags = (h1c->flags & ~H1C_F_WANT_FASTFWD) | H1C_F_CANT_FASTFWD;
+ TRACE_ERROR("Error when trying to fast-forward data, disable it and abort",
+ H1_EV_STRM_RECV|H1_EV_STRM_ERR|H1_EV_H1C_ERR|H1_EV_H1S_ERR, h1c->conn, h1s);
+ BUG_ON(sdo->iobuf.pipe->data);
+ put_pipe(sdo->iobuf.pipe);
+ sdo->iobuf.pipe = NULL;
+ goto end;
+ }
+ total += ret;
+ count -= ret;
+ if (!ret) {
+ TRACE_STATE("failed to receive data, subscribing", H1_EV_STRM_RECV, h1c->conn);
+ h1c->conn->xprt->subscribe(h1c->conn, h1c->conn->xprt_ctx, SUB_RETRY_RECV, &h1c->wait_event);
+ }
+ HA_ATOMIC_ADD(&h1c->px_counters->spliced_bytes_in, ret);
+ }
+#endif
+ if (!sdo->iobuf.pipe) {
+ b_add(sdo->iobuf.buf, sdo->iobuf.offset);
+ ret = h1c->conn->xprt->rcv_buf(h1c->conn, h1c->conn->xprt_ctx, sdo->iobuf.buf, try, flags);
+ if (ret < try) {
+ TRACE_STATE("failed to receive data, subscribing", H1_EV_STRM_RECV, h1c->conn);
+ h1c->conn->xprt->subscribe(h1c->conn, h1c->conn->xprt_ctx, SUB_RETRY_RECV, &h1c->wait_event);
+ }
+ b_sub(sdo->iobuf.buf, sdo->iobuf.offset);
+ total += ret;
+ count -= ret;
+ sdo->iobuf.data += ret;
+ }
+
+ /* Till now, we forwarded less than a buffer, we can immediately retry
+ * to fast-forward more data. Instruct the consumer it is an interim
+ * fast-forward. It is of course only possible if there is still data to
+ * fast-forward (count > 0), if the previous attempt was a full success
+ * (0 > ret == try) and if we are not splicing (iobuf.buf != NULL).
+ */
+ if (ret > 0 && ret == try && count && sdo->iobuf.buf && total < b_size(sdo->iobuf.buf)) {
+ sdo->iobuf.flags |= IOBUF_FL_INTERIM_FF;
+ se_done_ff(sdo);
+ goto retry;
+ }
+
+ out:
+ if (h1m->state == H1_MSG_DATA && (h1m->flags & (H1_MF_CHNK|H1_MF_CLEN))) {
+ if (total > h1m->curr_len) {
+ h1s->flags |= H1S_F_PARSING_ERROR;
+ se_fl_set(h1s->sd, SE_FL_ERROR);
+ TRACE_ERROR("too much payload, more than announced",
+ H1_EV_STRM_RECV|H1_EV_STRM_ERR|H1_EV_H1C_ERR|H1_EV_H1S_ERR, h1c->conn, h1s);
+ goto end;
+ }
+ h1m->curr_len -= total;
+ if (!h1m->curr_len) {
+ if (h1m->flags & H1_MF_CLEN) {
+ h1m->state = H1_MSG_DONE;
+ se_fl_set(h1s->sd, SE_FL_EOI); /* TODO: this line is tricky and must be evaluated first
+ * Its purpose is to avoid to set CO_SFL_MSG_MORE on the
+ * next calls to ->complete_fastfwd().
+ */
+ }
+ else
+ h1m->state = H1_MSG_CHUNK_CRLF;
+ h1c->flags &= ~H1C_F_WANT_FASTFWD;
+
+ if (!(h1c->flags & H1C_F_IS_BACK)) {
+ /* The request was fully received. It means the H1S now
+ * expect data from the opposite side
+ */
+ se_expect_data(h1s->sd);
+ }
+
+ TRACE_STATE("payload fully received", H1_EV_STRM_RECV, h1c->conn, h1s);
+ }
+ }
+
+ if (conn_xprt_read0_pending(h1c->conn)) {
+ se_fl_set(h1s->sd, SE_FL_EOS);
+ TRACE_STATE("report EOS to SE", H1_EV_STRM_RECV, h1c->conn, h1s);
+ if (h1m->state >= H1_MSG_DONE || !(h1m->flags & H1_MF_XFER_LEN)) {
+ /* DONE or TUNNEL or SHUTR without XFER_LEN, set
+ * EOI on the stream connector */
+ se_fl_set(h1s->sd, SE_FL_EOI);
+ TRACE_STATE("report EOI to SE", H1_EV_STRM_RECV, h1c->conn, h1s);
+ }
+ else {
+ se_fl_set(h1s->sd, SE_FL_ERROR);
+ h1c->flags = (h1c->flags & ~H1C_F_WANT_FASTFWD) | H1C_F_ERROR;
+ TRACE_ERROR("message aborted, set error on SC", H1_EV_STRM_RECV|H1_EV_H1S_ERR, h1c->conn, h1s);
+ }
+ h1c->flags = (h1c->flags & ~H1C_F_WANT_FASTFWD) | H1C_F_EOS;
+ TRACE_STATE("Allow xprt rcv_buf on read0", H1_EV_STRM_RECV, h1c->conn, h1s);
+ }
+ if (h1c->conn->flags & CO_FL_ERROR) {
+ se_fl_set(h1s->sd, SE_FL_ERROR);
+ h1c->flags = (h1c->flags & ~H1C_F_WANT_FASTFWD) | H1C_F_ERROR;
+ TRACE_DEVEL("connection error", H1_EV_STRM_ERR|H1_EV_H1C_ERR|H1_EV_H1S_ERR, h1c->conn, h1s);
+ }
+
+
+ sdo->iobuf.flags &= ~IOBUF_FL_INTERIM_FF;
+ if (se_fl_test(h1s->sd, SE_FL_EOI)) {
+ sdo->iobuf.flags |= IOBUF_FL_EOI; /* TODO: it may be good to have a flag to be sure we can
+ * forward the EOI the to consumer side
+ */
+ }
+ se_done_ff(sdo);
+
+ ret = total;
+ HA_ATOMIC_ADD(&h1c->px_counters->bytes_in, total);
+
+ if (sdo->iobuf.pipe) {
+ se_fl_set(h1s->sd, SE_FL_RCV_MORE | SE_FL_WANT_ROOM);
+ }
+
+ end:
+
+ if (!(h1c->flags & H1C_F_WANT_FASTFWD)) {
+ TRACE_STATE("notify the mux can't use fast-forward anymore", H1_EV_STRM_RECV, h1c->conn, h1s);
+ se_fl_clr(h1s->sd, SE_FL_MAY_FASTFWD_PROD);
+ if (!(h1c->wait_event.events & SUB_RETRY_RECV)) {
+ TRACE_STATE("restart receiving data, subscribing", H1_EV_STRM_RECV, h1c->conn, h1s);
+ h1c->conn->xprt->subscribe(h1c->conn, h1c->conn->xprt_ctx, SUB_RETRY_RECV, &h1c->wait_event);
+ }
+ }
+
+ TRACE_LEAVE(H1_EV_STRM_RECV, h1c->conn, h1s, 0, (size_t[]){ret});
+ return ret;
+}
+
+static int h1_resume_fastfwd(struct stconn *sc, unsigned int flags)
+{
+ struct h1s *h1s = __sc_mux_strm(sc);
+ struct h1c *h1c = h1s->h1c;
+ int ret = 0;
+
+ TRACE_ENTER(H1_EV_STRM_SEND, h1c->conn, h1s, 0, (size_t[]){flags});
+
+#if defined(USE_LINUX_SPLICE)
+ if (h1s->sd->iobuf.pipe) {
+ struct h1m *h1m = (!(h1c->flags & H1C_F_IS_BACK) ? &h1s->res : &h1s->req);
+ struct sedesc *sd = h1s->sd;
+
+ ret = h1c->conn->xprt->snd_pipe(h1c->conn, h1c->conn->xprt_ctx, sd->iobuf.pipe, sd->iobuf.pipe->data);
+ if (ret > 0)
+ HA_ATOMIC_ADD(&h1c->px_counters->spliced_bytes_out, ret);
+ if (!sd->iobuf.pipe->data) {
+ put_pipe(sd->iobuf.pipe);
+ sd->iobuf.pipe = NULL;
+ }
+
+ h1m->curr_len -= ret;
+
+ if (!h1m->curr_len && (h1m->flags & H1_MF_CLEN))
+ h1m->state = H1_MSG_DONE;
+ else if (!h1m->curr_len && (h1m->flags & H1_MF_CHNK)) {
+ if (h1m->state == H1_MSG_DATA)
+ h1m->state = H1_MSG_CHUNK_CRLF;
+ }
+
+ HA_ATOMIC_ADD(&h1c->px_counters->bytes_out, ret);
+ }
+#endif
+
+ // TODO: should we call h1_process() instead ?
+ if (h1c->conn->flags & CO_FL_ERROR) {
+ h1c->flags = (h1c->flags & ~H1C_F_WANT_FASTFWD) | H1C_F_ERR_PENDING;
+ if (h1c->flags & H1C_F_EOS)
+ h1c->flags |= H1C_F_ERROR;
+ else if (!(h1c->wait_event.events & SUB_RETRY_RECV)) {
+ /* EOS not seen, so subscribe for reads to be able to
+ * catch the error on the reading path. It is especially
+ * important if EOI was reached.
+ */
+ h1c->conn->xprt->subscribe(h1c->conn, h1c->conn->xprt_ctx, SUB_RETRY_RECV, &h1c->wait_event);
+ }
+ se_fl_set_error(h1s->sd);
+ TRACE_DEVEL("connection error", H1_EV_STRM_ERR|H1_EV_H1C_ERR|H1_EV_H1S_ERR, h1c->conn, h1s);
+ }
+
+ TRACE_LEAVE(H1_EV_STRM_RECV, h1c->conn, h1s, 0, (size_t[]){ret});
+ return ret;
+}
+
+static int h1_ctl(struct connection *conn, enum mux_ctl_type mux_ctl, void *output)
+{
+ struct h1c *h1c = conn->ctx;
+ int ret = 0;
+
+ switch (mux_ctl) {
+ case MUX_CTL_STATUS:
+ if (!(conn->flags & CO_FL_WAIT_XPRT))
+ ret |= MUX_STATUS_READY;
+ return ret;
+ case MUX_CTL_EXIT_STATUS:
+ if (output)
+ *((int *)output) = h1c->errcode;
+ ret = (h1c->errcode == 408 ? MUX_ES_TOUT_ERR :
+ (h1c->errcode == 501 ? MUX_ES_NOTIMPL_ERR :
+ (h1c->errcode == 500 ? MUX_ES_INTERNAL_ERR :
+ ((h1c->errcode >= 400 && h1c->errcode <= 499) ? MUX_ES_INVALID_ERR :
+ MUX_ES_SUCCESS))));
+ return ret;
+ case MUX_CTL_SUBS_RECV:
+ if (!(h1c->wait_event.events & SUB_RETRY_RECV))
+ h1c->conn->xprt->subscribe(h1c->conn, h1c->conn->xprt_ctx, SUB_RETRY_RECV, &h1c->wait_event);
+ return 0;
+ default:
+ return -1;
+ }
+}
+
+static int h1_sctl(struct stconn *sc, enum mux_sctl_type mux_sctl, void *output)
+{
+ int ret = 0;
+ struct h1s *h1s = __sc_mux_strm(sc);
+
+ switch (mux_sctl) {
+ case MUX_SCTL_SID:
+ if (output)
+ *((int64_t *)output) = h1s->h1c->req_count;
+ return ret;
+
+ default:
+ return -1;
+ }
+}
+
+/* appends some info about connection <h1c> to buffer <msg>, or does nothing if
+ * <h1c> is NULL. Returns non-zero if the connection is considered suspicious.
+ * May emit multiple lines, each new one being prefixed with <pfx>, if <pfx> is
+ * not NULL, otherwise a single line is used.
+ */
+static int h1_dump_h1c_info(struct buffer *msg, struct h1c *h1c, const char *pfx)
+{
+ int ret = 0;
+
+ if (!h1c)
+ return ret;
+
+ chunk_appendf(msg, " h1c.flg=0x%x .sub=%d .ibuf=%u@%p+%u/%u .obuf=%u@%p+%u/%u",
+ h1c->flags, h1c->wait_event.events,
+ (unsigned int)b_data(&h1c->ibuf), b_orig(&h1c->ibuf),
+ (unsigned int)b_head_ofs(&h1c->ibuf), (unsigned int)b_size(&h1c->ibuf),
+ (unsigned int)b_data(&h1c->obuf), b_orig(&h1c->obuf),
+ (unsigned int)b_head_ofs(&h1c->obuf), (unsigned int)b_size(&h1c->obuf));
+
+ chunk_appendf(msg, " .task=%p", h1c->task);
+ if (h1c->task) {
+ chunk_appendf(msg, " .exp=%s",
+ h1c->task->expire ? tick_is_expired(h1c->task->expire, now_ms) ? "<PAST>" :
+ human_time(TICKS_TO_MS(h1c->task->expire - now_ms), TICKS_TO_MS(1000)) : "<NEVER>");
+ }
+
+ return ret;
+}
+
+/* appends some info about stream <h1s> to buffer <msg>, or does nothing if
+ * <h1s> is NULL. Returns non-zero if the stream is considered suspicious. May
+ * emit multiple lines, each new one being prefixed with <pfx>, if <pfx> is not
+ * NULL, otherwise a single line is used.
+ */
+static int h1_dump_h1s_info(struct buffer *msg, const struct h1s *h1s, const char *pfx)
+{
+ const char *method;
+ int ret = 0;
+
+ if (!h1s)
+ return ret;
+
+ if (h1s->meth < HTTP_METH_OTHER)
+ method = http_known_methods[h1s->meth].ptr;
+ else
+ method = "UNKNOWN";
+
+ chunk_appendf(msg, " h1s=%p h1s.flg=0x%x .sd.flg=0x%x .req.state=%s .res.state=%s",
+ h1s, h1s->flags, se_fl_get(h1s->sd),
+ h1m_state_str(h1s->req.state), h1m_state_str(h1s->res.state));
+
+ if (pfx)
+ chunk_appendf(msg, "\n%s", pfx);
+
+ chunk_appendf(msg, " .meth=%s status=%d",
+ method, h1s->status);
+
+ chunk_appendf(msg, " .sd.flg=0x%08x", se_fl_get(h1s->sd));
+ if (!se_fl_test(h1s->sd, SE_FL_ORPHAN))
+ chunk_appendf(msg, " .sc.flg=0x%08x .sc.app=%p",
+ h1s_sc(h1s)->flags, h1s_sc(h1s)->app);
+
+ if (pfx && h1s->subs)
+ chunk_appendf(msg, "\n%s", pfx);
+
+ chunk_appendf(msg, " .subs=%p", h1s->subs);
+ if (h1s->subs) {
+ chunk_appendf(msg, "(ev=%d tl=%p", h1s->subs->events, h1s->subs->tasklet);
+ chunk_appendf(msg, " tl.calls=%d tl.ctx=%p tl.fct=",
+ h1s->subs->tasklet->calls,
+ h1s->subs->tasklet->context);
+ if (h1s->subs->tasklet->calls >= 1000000)
+ ret = 1;
+ resolve_sym_name(msg, NULL, h1s->subs->tasklet->process);
+ chunk_appendf(msg, ")");
+ }
+ return ret;
+}
+
+/* for debugging with CLI's "show fd" command */
+static int h1_show_fd(struct buffer *msg, struct connection *conn)
+{
+ struct h1c *h1c = conn->ctx;
+ struct h1s *h1s = h1c->h1s;
+ int ret = 0;
+
+ ret |= h1_dump_h1c_info(msg, h1c, NULL);
+
+ if (h1s)
+ ret |= h1_dump_h1s_info(msg, h1s, NULL);
+
+ return ret;
+}
+
+/* for debugging with CLI's "show sess" command. May emit multiple lines, each
+ * new one being prefixed with <pfx>, if <pfx> is not NULL, otherwise a single
+ * line is used. Each field starts with a space so it's safe to print it after
+ * existing fields.
+ */
+static int h1_show_sd(struct buffer *msg, struct sedesc *sd, const char *pfx)
+{
+ struct h1s *h1s = sd->se;
+ int ret = 0;
+
+ if (!h1s)
+ return ret;
+
+ ret |= h1_dump_h1s_info(msg, h1s, pfx);
+ if (pfx)
+ chunk_appendf(msg, "\n%s", pfx);
+ chunk_appendf(msg, " h1c=%p", h1s->h1c);
+ ret |= h1_dump_h1c_info(msg, h1s->h1c, pfx);
+ return ret;
+}
+
+
+/* Add an entry in the headers map. Returns -1 on error and 0 on success. */
+static int add_hdr_case_adjust(const char *from, const char *to, char **err)
+{
+ struct h1_hdr_entry *entry;
+
+ /* Be sure there is a non-empty <to> */
+ if (!strlen(to)) {
+ memprintf(err, "expect <to>");
+ return -1;
+ }
+
+ /* Be sure only the case differs between <from> and <to> */
+ if (strcasecmp(from, to) != 0) {
+ memprintf(err, "<from> and <to> must not differ except the case");
+ return -1;
+ }
+
+ /* Be sure <from> does not already existsin the tree */
+ if (ebis_lookup(&hdrs_map.map, from)) {
+ memprintf(err, "duplicate entry '%s'", from);
+ return -1;
+ }
+
+ /* Create the entry and insert it in the tree */
+ entry = malloc(sizeof(*entry));
+ if (!entry) {
+ memprintf(err, "out of memory");
+ return -1;
+ }
+
+ entry->node.key = strdup(from);
+ entry->name = ist(strdup(to));
+ if (!entry->node.key || !isttest(entry->name)) {
+ free(entry->node.key);
+ istfree(&entry->name);
+ free(entry);
+ memprintf(err, "out of memory");
+ return -1;
+ }
+ ebis_insert(&hdrs_map.map, &entry->node);
+ return 0;
+}
+
+/* Migrate the the connection to the current thread.
+ * Return 0 if successful, non-zero otherwise.
+ * Expected to be called with the old thread lock held.
+ */
+static int h1_takeover(struct connection *conn, int orig_tid)
+{
+ struct h1c *h1c = conn->ctx;
+ struct task *task;
+ struct task *new_task;
+ struct tasklet *new_tasklet;
+
+ /* Pre-allocate tasks so that we don't have to roll back after the xprt
+ * has been migrated.
+ */
+ new_task = task_new_here();
+ new_tasklet = tasklet_new();
+ if (!new_task || !new_tasklet)
+ goto fail;
+
+ if (fd_takeover(conn->handle.fd, conn) != 0)
+ goto fail;
+
+ if (conn->xprt->takeover && conn->xprt->takeover(conn, conn->xprt_ctx, orig_tid) != 0) {
+ /* We failed to takeover the xprt, even if the connection may
+ * still be valid, flag it as error'd, as we have already
+ * taken over the fd, and wake the tasklet, so that it will
+ * destroy it.
+ */
+ conn->flags |= CO_FL_ERROR;
+ tasklet_wakeup_on(h1c->wait_event.tasklet, orig_tid);
+ goto fail;
+ }
+
+ if (h1c->wait_event.events)
+ h1c->conn->xprt->unsubscribe(h1c->conn, h1c->conn->xprt_ctx,
+ h1c->wait_event.events, &h1c->wait_event);
+
+ task = h1c->task;
+ if (task) {
+ /* only assign a task if there was already one, otherwise
+ * the preallocated new task will be released.
+ */
+ task->context = NULL;
+ h1c->task = NULL;
+ __ha_barrier_store();
+ task_kill(task);
+
+ h1c->task = new_task;
+ new_task = NULL;
+ h1c->task->process = h1_timeout_task;
+ h1c->task->context = h1c;
+ }
+
+ /* To let the tasklet know it should free itself, and do nothing else,
+ * set its context to NULL.
+ */
+ h1c->wait_event.tasklet->context = NULL;
+ tasklet_wakeup_on(h1c->wait_event.tasklet, orig_tid);
+
+ h1c->wait_event.tasklet = new_tasklet;
+ h1c->wait_event.tasklet->process = h1_io_cb;
+ h1c->wait_event.tasklet->context = h1c;
+ h1c->conn->xprt->subscribe(h1c->conn, h1c->conn->xprt_ctx,
+ SUB_RETRY_RECV, &h1c->wait_event);
+
+ if (new_task)
+ __task_free(new_task);
+ return 0;
+ fail:
+ if (new_task)
+ __task_free(new_task);
+ tasklet_free(new_tasklet);
+ return -1;
+}
+
+
+static void h1_hdeaders_case_adjust_deinit()
+{
+ struct ebpt_node *node, *next;
+ struct h1_hdr_entry *entry;
+
+ node = ebpt_first(&hdrs_map.map);
+ while (node) {
+ next = ebpt_next(node);
+ ebpt_delete(node);
+ entry = container_of(node, struct h1_hdr_entry, node);
+ free(entry->node.key);
+ istfree(&entry->name);
+ free(entry);
+ node = next;
+ }
+ free(hdrs_map.name);
+}
+
+static int cfg_h1_headers_case_adjust_postparser()
+{
+ FILE *file = NULL;
+ char *c, *key_beg, *key_end, *value_beg, *value_end;
+ char *err;
+ int rc, line = 0, err_code = 0;
+
+ if (!hdrs_map.name)
+ goto end;
+
+ file = fopen(hdrs_map.name, "r");
+ if (!file) {
+ ha_alert("h1-headers-case-adjust-file '%s': failed to open file.\n",
+ hdrs_map.name);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto end;
+ }
+
+ /* now parse all lines. The file may contain only two header name per
+ * line, separated by spaces. All heading and trailing spaces will be
+ * ignored. Lines starting with a # are ignored.
+ */
+ while (fgets(trash.area, trash.size, file) != NULL) {
+ line++;
+ c = trash.area;
+
+ /* strip leading spaces and tabs */
+ while (*c == ' ' || *c == '\t')
+ c++;
+
+ /* ignore emptu lines, or lines beginning with a dash */
+ if (*c == '#' || *c == '\0' || *c == '\r' || *c == '\n')
+ continue;
+
+ /* look for the end of the key */
+ key_beg = c;
+ while (*c != '\0' && *c != ' ' && *c != '\t' && *c != '\n' && *c != '\r')
+ c++;
+ key_end = c;
+
+ /* strip middle spaces and tabs */
+ while (*c == ' ' || *c == '\t')
+ c++;
+
+ /* look for the end of the value, it is the end of the line */
+ value_beg = c;
+ while (*c && *c != '\n' && *c != '\r')
+ c++;
+ value_end = c;
+
+ /* trim possibly trailing spaces and tabs */
+ while (value_end > value_beg && (value_end[-1] == ' ' || value_end[-1] == '\t'))
+ value_end--;
+
+ /* set final \0 and check entries */
+ *key_end = '\0';
+ *value_end = '\0';
+
+ err = NULL;
+ rc = add_hdr_case_adjust(key_beg, value_beg, &err);
+ if (rc < 0) {
+ ha_alert("h1-headers-case-adjust-file '%s' : %s at line %d.\n",
+ hdrs_map.name, err, line);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ free(err);
+ goto end;
+ }
+ if (rc > 0) {
+ ha_warning("h1-headers-case-adjust-file '%s' : %s at line %d.\n",
+ hdrs_map.name, err, line);
+ err_code |= ERR_WARN;
+ free(err);
+ }
+ }
+
+ end:
+ if (file)
+ fclose(file);
+ hap_register_post_deinit(h1_hdeaders_case_adjust_deinit);
+ return err_code;
+}
+
+/* config parser for global "h1-accept-payload_=-with-any-method" */
+static int cfg_parse_h1_accept_payload_with_any_method(char **args, int section_type, struct proxy *curpx,
+ const struct proxy *defpx, const char *file, int line,
+ char **err)
+{
+ if (too_many_args(0, args, err, NULL))
+ return -1;
+ accept_payload_with_any_method = 1;
+ return 0;
+}
+
+
+/* config parser for global "h1-header-case-adjust" */
+static int cfg_parse_h1_header_case_adjust(char **args, int section_type, struct proxy *curpx,
+ const struct proxy *defpx, const char *file, int line,
+ char **err)
+{
+ if (too_many_args(2, args, err, NULL))
+ return -1;
+ if (!*(args[1]) || !*(args[2])) {
+ memprintf(err, "'%s' expects <from> and <to> as argument.", args[0]);
+ return -1;
+ }
+ return add_hdr_case_adjust(args[1], args[2], err);
+}
+
+/* config parser for global "h1-headers-case-adjust-file" */
+static int cfg_parse_h1_headers_case_adjust_file(char **args, int section_type, struct proxy *curpx,
+ const struct proxy *defpx, const char *file, int line,
+ char **err)
+{
+ if (too_many_args(1, args, err, NULL))
+ return -1;
+ if (!*(args[1])) {
+ memprintf(err, "'%s' expects <file> as argument.", args[0]);
+ return -1;
+ }
+ free(hdrs_map.name);
+ hdrs_map.name = strdup(args[1]);
+ return 0;
+}
+
+/* config parser for global "tune.h1.zero-copy-fwd-recv" */
+static int cfg_parse_h1_zero_copy_fwd_rcv(char **args, int section_type, struct proxy *curpx,
+ const struct proxy *defpx, const char *file, int line,
+ char **err)
+{
+ if (too_many_args(1, args, err, NULL))
+ return -1;
+
+ if (strcmp(args[1], "on") == 0)
+ global.tune.no_zero_copy_fwd &= ~NO_ZERO_COPY_FWD_H1_RCV;
+ else if (strcmp(args[1], "off") == 0)
+ global.tune.no_zero_copy_fwd |= NO_ZERO_COPY_FWD_H1_RCV;
+ else {
+ memprintf(err, "'%s' expects 'on' or 'off'.", args[0]);
+ return -1;
+ }
+ return 0;
+}
+
+/* config parser for global "tune.h1.zero-copy-fwd-send" */
+static int cfg_parse_h1_zero_copy_fwd_snd(char **args, int section_type, struct proxy *curpx,
+ const struct proxy *defpx, const char *file, int line,
+ char **err)
+{
+ if (too_many_args(1, args, err, NULL))
+ return -1;
+
+ if (strcmp(args[1], "on") == 0)
+ global.tune.no_zero_copy_fwd &= ~NO_ZERO_COPY_FWD_H1_SND;
+ else if (strcmp(args[1], "off") == 0)
+ global.tune.no_zero_copy_fwd |= NO_ZERO_COPY_FWD_H1_SND;
+ else {
+ memprintf(err, "'%s' expects 'on' or 'off'.", args[0]);
+ return -1;
+ }
+ return 0;
+}
+
+/* config keyword parsers */
+static struct cfg_kw_list cfg_kws = {{ }, {
+ { CFG_GLOBAL, "h1-accept-payload-with-any-method", cfg_parse_h1_accept_payload_with_any_method },
+ { CFG_GLOBAL, "h1-case-adjust", cfg_parse_h1_header_case_adjust },
+ { CFG_GLOBAL, "h1-case-adjust-file", cfg_parse_h1_headers_case_adjust_file },
+ { CFG_GLOBAL, "tune.h1.zero-copy-fwd-recv", cfg_parse_h1_zero_copy_fwd_rcv },
+ { CFG_GLOBAL, "tune.h1.zero-copy-fwd-send", cfg_parse_h1_zero_copy_fwd_snd },
+ { 0, NULL, NULL },
+ }
+};
+
+INITCALL1(STG_REGISTER, cfg_register_keywords, &cfg_kws);
+REGISTER_CONFIG_POSTPARSER("h1-headers-map", cfg_h1_headers_case_adjust_postparser);
+
+
+/****************************************/
+/* MUX initialization and instantiation */
+/****************************************/
+
+/* The mux operations */
+static const struct mux_ops mux_http_ops = {
+ .init = h1_init,
+ .wake = h1_wake,
+ .attach = h1_attach,
+ .get_first_sc = h1_get_first_sc,
+ .detach = h1_detach,
+ .destroy = h1_destroy,
+ .avail_streams = h1_avail_streams,
+ .used_streams = h1_used_streams,
+ .rcv_buf = h1_rcv_buf,
+ .snd_buf = h1_snd_buf,
+ .nego_fastfwd = h1_nego_ff,
+ .done_fastfwd = h1_done_ff,
+ .fastfwd = h1_fastfwd,
+ .resume_fastfwd = h1_resume_fastfwd,
+ .subscribe = h1_subscribe,
+ .unsubscribe = h1_unsubscribe,
+ .shutr = h1_shutr,
+ .shutw = h1_shutw,
+ .show_fd = h1_show_fd,
+ .show_sd = h1_show_sd,
+ .ctl = h1_ctl,
+ .sctl = h1_sctl,
+ .takeover = h1_takeover,
+ .flags = MX_FL_HTX,
+ .name = "H1",
+};
+
+static const struct mux_ops mux_h1_ops = {
+ .init = h1_init,
+ .wake = h1_wake,
+ .attach = h1_attach,
+ .get_first_sc = h1_get_first_sc,
+ .detach = h1_detach,
+ .destroy = h1_destroy,
+ .avail_streams = h1_avail_streams,
+ .used_streams = h1_used_streams,
+ .rcv_buf = h1_rcv_buf,
+ .snd_buf = h1_snd_buf,
+ .nego_fastfwd = h1_nego_ff,
+ .done_fastfwd = h1_done_ff,
+ .fastfwd = h1_fastfwd,
+ .resume_fastfwd = h1_resume_fastfwd,
+ .subscribe = h1_subscribe,
+ .unsubscribe = h1_unsubscribe,
+ .shutr = h1_shutr,
+ .shutw = h1_shutw,
+ .show_fd = h1_show_fd,
+ .show_sd = h1_show_sd,
+ .ctl = h1_ctl,
+ .sctl = h1_sctl,
+ .takeover = h1_takeover,
+ .flags = MX_FL_HTX|MX_FL_NO_UPG,
+ .name = "H1",
+};
+
+/* this mux registers default HTX proto but also h1 proto (to be referenced in the conf */
+static struct mux_proto_list mux_proto_h1 =
+ { .token = IST("h1"), .mode = PROTO_MODE_HTTP, .side = PROTO_SIDE_BOTH, .mux = &mux_h1_ops };
+static struct mux_proto_list mux_proto_http =
+ { .token = IST(""), .mode = PROTO_MODE_HTTP, .side = PROTO_SIDE_BOTH, .mux = &mux_http_ops };
+
+INITCALL1(STG_REGISTER, register_mux_proto, &mux_proto_h1);
+INITCALL1(STG_REGISTER, register_mux_proto, &mux_proto_http);
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/src/mux_h2.c b/src/mux_h2.c
new file mode 100644
index 0000000..273e1f5
--- /dev/null
+++ b/src/mux_h2.c
@@ -0,0 +1,7598 @@
+/*
+ * HTTP/2 mux-demux for connections
+ *
+ * Copyright 2017 Willy Tarreau <w@1wt.eu>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <import/eb32tree.h>
+#include <import/ebmbtree.h>
+#include <haproxy/api.h>
+#include <haproxy/cfgparse.h>
+#include <haproxy/connection.h>
+#include <haproxy/dynbuf.h>
+#include <haproxy/h2.h>
+#include <haproxy/hpack-dec.h>
+#include <haproxy/hpack-enc.h>
+#include <haproxy/hpack-tbl.h>
+#include <haproxy/http_htx.h>
+#include <haproxy/htx.h>
+#include <haproxy/istbuf.h>
+#include <haproxy/log.h>
+#include <haproxy/mux_h2-t.h>
+#include <haproxy/net_helper.h>
+#include <haproxy/proxy.h>
+#include <haproxy/server.h>
+#include <haproxy/session-t.h>
+#include <haproxy/stats.h>
+#include <haproxy/stconn.h>
+#include <haproxy/stream.h>
+#include <haproxy/trace.h>
+#include <haproxy/xref.h>
+
+
+/* dummy streams returned for closed, error, refused, idle and states */
+static const struct h2s *h2_closed_stream;
+static const struct h2s *h2_error_stream;
+static const struct h2s *h2_refused_stream;
+static const struct h2s *h2_idle_stream;
+
+
+/**** H2 connection descriptor ****/
+struct h2c {
+ struct connection *conn;
+
+ enum h2_cs st0; /* mux state */
+ enum h2_err errcode; /* H2 err code (H2_ERR_*) */
+
+ /* 16 bit hole here */
+ uint32_t flags; /* connection flags: H2_CF_* */
+ uint32_t streams_limit; /* maximum number of concurrent streams the peer supports */
+ int32_t max_id; /* highest ID known on this connection, <0 before preface */
+ uint32_t rcvd_c; /* newly received data to ACK for the connection */
+ uint32_t rcvd_s; /* newly received data to ACK for the current stream (dsi) or zero */
+
+ /* states for the demux direction */
+ struct hpack_dht *ddht; /* demux dynamic header table */
+ struct buffer dbuf; /* demux buffer */
+
+ int32_t dsi; /* demux stream ID (<0 = idle) */
+ int32_t dfl; /* demux frame length (if dsi >= 0) */
+ int8_t dft; /* demux frame type (if dsi >= 0) */
+ int8_t dff; /* demux frame flags (if dsi >= 0) */
+ uint8_t dpl; /* demux pad length (part of dfl), init to 0 */
+ /* 8 bit hole here */
+ int32_t last_sid; /* last processed stream ID for GOAWAY, <0 before preface */
+
+ /* states for the mux direction */
+ struct buffer mbuf[H2C_MBUF_CNT]; /* mux buffers (ring) */
+ int32_t miw; /* mux initial window size for all new streams */
+ int32_t mws; /* mux window size. Can be negative. */
+ int32_t mfs; /* mux's max frame size */
+
+ int timeout; /* idle timeout duration in ticks */
+ int shut_timeout; /* idle timeout duration in ticks after GOAWAY was sent */
+ int idle_start; /* date of the last time the connection went idle (no stream + empty mbuf), or the start of current http req */
+ /* 32-bit hole here */
+ unsigned int nb_streams; /* number of streams in the tree */
+ unsigned int nb_sc; /* number of attached stream connectors */
+ unsigned int nb_reserved; /* number of reserved streams */
+ unsigned int stream_cnt; /* total number of streams seen */
+ struct proxy *proxy; /* the proxy this connection was created for */
+ struct task *task; /* timeout management task */
+ struct h2_counters *px_counters; /* h2 counters attached to proxy */
+ struct eb_root streams_by_id; /* all active streams by their ID */
+ struct list send_list; /* list of blocked streams requesting to send */
+ struct list fctl_list; /* list of streams blocked by connection's fctl */
+ struct list blocked_list; /* list of streams blocked for other reasons (e.g. sfctl, dep) */
+ struct buffer_wait buf_wait; /* wait list for buffer allocations */
+ struct wait_event wait_event; /* To be used if we're waiting for I/Os */
+};
+
+
+/* H2 stream descriptor, describing the stream as it appears in the H2C, and as
+ * it is being processed in the internal HTTP representation (HTX).
+ */
+struct h2s {
+ struct sedesc *sd;
+ struct session *sess;
+ struct h2c *h2c;
+ struct eb32_node by_id; /* place in h2c's streams_by_id */
+ int32_t id; /* stream ID */
+ uint32_t flags; /* H2_SF_* */
+ int sws; /* stream window size, to be added to the mux's initial window size */
+ enum h2_err errcode; /* H2 err code (H2_ERR_*) */
+ enum h2_ss st;
+ uint16_t status; /* HTTP response status */
+ unsigned long long body_len; /* remaining body length according to content-length if H2_SF_DATA_CLEN */
+ struct buffer rxbuf; /* receive buffer, always valid (buf_empty or real buffer) */
+ struct wait_event *subs; /* recv wait_event the stream connector associated is waiting on (via h2_subscribe) */
+ struct list list; /* To be used when adding in h2c->send_list or h2c->fctl_lsit */
+ struct tasklet *shut_tl; /* deferred shutdown tasklet, to retry to send an RST after we failed to,
+ * in case there's no other subscription to do it */
+
+ char upgrade_protocol[16]; /* rfc 8441: requested protocol on Extended CONNECT */
+};
+
+/* descriptor for an h2 frame header */
+struct h2_fh {
+ uint32_t len; /* length, host order, 24 bits */
+ uint32_t sid; /* stream id, host order, 31 bits */
+ uint8_t ft; /* frame type */
+ uint8_t ff; /* frame flags */
+};
+
+/* trace source and events */
+static void h2_trace(enum trace_level level, uint64_t mask, \
+ const struct trace_source *src,
+ const struct ist where, const struct ist func,
+ const void *a1, const void *a2, const void *a3, const void *a4);
+
+/* The event representation is split like this :
+ * strm - application layer
+ * h2s - internal H2 stream
+ * h2c - internal H2 connection
+ * conn - external connection
+ *
+ */
+static const struct trace_event h2_trace_events[] = {
+#define H2_EV_H2C_NEW (1ULL << 0)
+ { .mask = H2_EV_H2C_NEW, .name = "h2c_new", .desc = "new H2 connection" },
+#define H2_EV_H2C_RECV (1ULL << 1)
+ { .mask = H2_EV_H2C_RECV, .name = "h2c_recv", .desc = "Rx on H2 connection" },
+#define H2_EV_H2C_SEND (1ULL << 2)
+ { .mask = H2_EV_H2C_SEND, .name = "h2c_send", .desc = "Tx on H2 connection" },
+#define H2_EV_H2C_FCTL (1ULL << 3)
+ { .mask = H2_EV_H2C_FCTL, .name = "h2c_fctl", .desc = "H2 connection flow-controlled" },
+#define H2_EV_H2C_BLK (1ULL << 4)
+ { .mask = H2_EV_H2C_BLK, .name = "h2c_blk", .desc = "H2 connection blocked" },
+#define H2_EV_H2C_WAKE (1ULL << 5)
+ { .mask = H2_EV_H2C_WAKE, .name = "h2c_wake", .desc = "H2 connection woken up" },
+#define H2_EV_H2C_END (1ULL << 6)
+ { .mask = H2_EV_H2C_END, .name = "h2c_end", .desc = "H2 connection terminated" },
+#define H2_EV_H2C_ERR (1ULL << 7)
+ { .mask = H2_EV_H2C_ERR, .name = "h2c_err", .desc = "error on H2 connection" },
+#define H2_EV_RX_FHDR (1ULL << 8)
+ { .mask = H2_EV_RX_FHDR, .name = "rx_fhdr", .desc = "H2 frame header received" },
+#define H2_EV_RX_FRAME (1ULL << 9)
+ { .mask = H2_EV_RX_FRAME, .name = "rx_frame", .desc = "receipt of any H2 frame" },
+#define H2_EV_RX_EOI (1ULL << 10)
+ { .mask = H2_EV_RX_EOI, .name = "rx_eoi", .desc = "receipt of end of H2 input (ES or RST)" },
+#define H2_EV_RX_PREFACE (1ULL << 11)
+ { .mask = H2_EV_RX_PREFACE, .name = "rx_preface", .desc = "receipt of H2 preface" },
+#define H2_EV_RX_DATA (1ULL << 12)
+ { .mask = H2_EV_RX_DATA, .name = "rx_data", .desc = "receipt of H2 DATA frame" },
+#define H2_EV_RX_HDR (1ULL << 13)
+ { .mask = H2_EV_RX_HDR, .name = "rx_hdr", .desc = "receipt of H2 HEADERS frame" },
+#define H2_EV_RX_PRIO (1ULL << 14)
+ { .mask = H2_EV_RX_PRIO, .name = "rx_prio", .desc = "receipt of H2 PRIORITY frame" },
+#define H2_EV_RX_RST (1ULL << 15)
+ { .mask = H2_EV_RX_RST, .name = "rx_rst", .desc = "receipt of H2 RST_STREAM frame" },
+#define H2_EV_RX_SETTINGS (1ULL << 16)
+ { .mask = H2_EV_RX_SETTINGS, .name = "rx_settings", .desc = "receipt of H2 SETTINGS frame" },
+#define H2_EV_RX_PUSH (1ULL << 17)
+ { .mask = H2_EV_RX_PUSH, .name = "rx_push", .desc = "receipt of H2 PUSH_PROMISE frame" },
+#define H2_EV_RX_PING (1ULL << 18)
+ { .mask = H2_EV_RX_PING, .name = "rx_ping", .desc = "receipt of H2 PING frame" },
+#define H2_EV_RX_GOAWAY (1ULL << 19)
+ { .mask = H2_EV_RX_GOAWAY, .name = "rx_goaway", .desc = "receipt of H2 GOAWAY frame" },
+#define H2_EV_RX_WU (1ULL << 20)
+ { .mask = H2_EV_RX_WU, .name = "rx_wu", .desc = "receipt of H2 WINDOW_UPDATE frame" },
+#define H2_EV_RX_CONT (1ULL << 21)
+ { .mask = H2_EV_RX_CONT, .name = "rx_cont", .desc = "receipt of H2 CONTINUATION frame" },
+#define H2_EV_TX_FRAME (1ULL << 22)
+ { .mask = H2_EV_TX_FRAME, .name = "tx_frame", .desc = "transmission of any H2 frame" },
+#define H2_EV_TX_EOI (1ULL << 23)
+ { .mask = H2_EV_TX_EOI, .name = "tx_eoi", .desc = "transmission of H2 end of input (ES or RST)" },
+#define H2_EV_TX_PREFACE (1ULL << 24)
+ { .mask = H2_EV_TX_PREFACE, .name = "tx_preface", .desc = "transmission of H2 preface" },
+#define H2_EV_TX_DATA (1ULL << 25)
+ { .mask = H2_EV_TX_DATA, .name = "tx_data", .desc = "transmission of H2 DATA frame" },
+#define H2_EV_TX_HDR (1ULL << 26)
+ { .mask = H2_EV_TX_HDR, .name = "tx_hdr", .desc = "transmission of H2 HEADERS frame" },
+#define H2_EV_TX_PRIO (1ULL << 27)
+ { .mask = H2_EV_TX_PRIO, .name = "tx_prio", .desc = "transmission of H2 PRIORITY frame" },
+#define H2_EV_TX_RST (1ULL << 28)
+ { .mask = H2_EV_TX_RST, .name = "tx_rst", .desc = "transmission of H2 RST_STREAM frame" },
+#define H2_EV_TX_SETTINGS (1ULL << 29)
+ { .mask = H2_EV_TX_SETTINGS, .name = "tx_settings", .desc = "transmission of H2 SETTINGS frame" },
+#define H2_EV_TX_PUSH (1ULL << 30)
+ { .mask = H2_EV_TX_PUSH, .name = "tx_push", .desc = "transmission of H2 PUSH_PROMISE frame" },
+#define H2_EV_TX_PING (1ULL << 31)
+ { .mask = H2_EV_TX_PING, .name = "tx_ping", .desc = "transmission of H2 PING frame" },
+#define H2_EV_TX_GOAWAY (1ULL << 32)
+ { .mask = H2_EV_TX_GOAWAY, .name = "tx_goaway", .desc = "transmission of H2 GOAWAY frame" },
+#define H2_EV_TX_WU (1ULL << 33)
+ { .mask = H2_EV_TX_WU, .name = "tx_wu", .desc = "transmission of H2 WINDOW_UPDATE frame" },
+#define H2_EV_TX_CONT (1ULL << 34)
+ { .mask = H2_EV_TX_CONT, .name = "tx_cont", .desc = "transmission of H2 CONTINUATION frame" },
+#define H2_EV_H2S_NEW (1ULL << 35)
+ { .mask = H2_EV_H2S_NEW, .name = "h2s_new", .desc = "new H2 stream" },
+#define H2_EV_H2S_RECV (1ULL << 36)
+ { .mask = H2_EV_H2S_RECV, .name = "h2s_recv", .desc = "Rx for H2 stream" },
+#define H2_EV_H2S_SEND (1ULL << 37)
+ { .mask = H2_EV_H2S_SEND, .name = "h2s_send", .desc = "Tx for H2 stream" },
+#define H2_EV_H2S_FCTL (1ULL << 38)
+ { .mask = H2_EV_H2S_FCTL, .name = "h2s_fctl", .desc = "H2 stream flow-controlled" },
+#define H2_EV_H2S_BLK (1ULL << 39)
+ { .mask = H2_EV_H2S_BLK, .name = "h2s_blk", .desc = "H2 stream blocked" },
+#define H2_EV_H2S_WAKE (1ULL << 40)
+ { .mask = H2_EV_H2S_WAKE, .name = "h2s_wake", .desc = "H2 stream woken up" },
+#define H2_EV_H2S_END (1ULL << 41)
+ { .mask = H2_EV_H2S_END, .name = "h2s_end", .desc = "H2 stream terminated" },
+#define H2_EV_H2S_ERR (1ULL << 42)
+ { .mask = H2_EV_H2S_ERR, .name = "h2s_err", .desc = "error on H2 stream" },
+#define H2_EV_STRM_NEW (1ULL << 43)
+ { .mask = H2_EV_STRM_NEW, .name = "strm_new", .desc = "app-layer stream creation" },
+#define H2_EV_STRM_RECV (1ULL << 44)
+ { .mask = H2_EV_STRM_RECV, .name = "strm_recv", .desc = "receiving data for stream" },
+#define H2_EV_STRM_SEND (1ULL << 45)
+ { .mask = H2_EV_STRM_SEND, .name = "strm_send", .desc = "sending data for stream" },
+#define H2_EV_STRM_FULL (1ULL << 46)
+ { .mask = H2_EV_STRM_FULL, .name = "strm_full", .desc = "stream buffer full" },
+#define H2_EV_STRM_WAKE (1ULL << 47)
+ { .mask = H2_EV_STRM_WAKE, .name = "strm_wake", .desc = "stream woken up" },
+#define H2_EV_STRM_SHUT (1ULL << 48)
+ { .mask = H2_EV_STRM_SHUT, .name = "strm_shut", .desc = "stream shutdown" },
+#define H2_EV_STRM_END (1ULL << 49)
+ { .mask = H2_EV_STRM_END, .name = "strm_end", .desc = "detaching app-layer stream" },
+#define H2_EV_STRM_ERR (1ULL << 50)
+ { .mask = H2_EV_STRM_ERR, .name = "strm_err", .desc = "stream error" },
+#define H2_EV_PROTO_ERR (1ULL << 51)
+ { .mask = H2_EV_PROTO_ERR, .name = "proto_err", .desc = "protocol error" },
+ { }
+};
+
+static const struct name_desc h2_trace_lockon_args[4] = {
+ /* arg1 */ { /* already used by the connection */ },
+ /* arg2 */ { .name="h2s", .desc="H2 stream" },
+ /* arg3 */ { },
+ /* arg4 */ { }
+};
+
+static const struct name_desc h2_trace_decoding[] = {
+#define H2_VERB_CLEAN 1
+ { .name="clean", .desc="only user-friendly stuff, generally suitable for level \"user\"" },
+#define H2_VERB_MINIMAL 2
+ { .name="minimal", .desc="report only h2c/h2s state and flags, no real decoding" },
+#define H2_VERB_SIMPLE 3
+ { .name="simple", .desc="add request/response status line or frame info when available" },
+#define H2_VERB_ADVANCED 4
+ { .name="advanced", .desc="add header fields or frame decoding when available" },
+#define H2_VERB_COMPLETE 5
+ { .name="complete", .desc="add full data dump when available" },
+ { /* end */ }
+};
+
+static struct trace_source trace_h2 __read_mostly = {
+ .name = IST("h2"),
+ .desc = "HTTP/2 multiplexer",
+ .arg_def = TRC_ARG1_CONN, // TRACE()'s first argument is always a connection
+ .default_cb = h2_trace,
+ .known_events = h2_trace_events,
+ .lockon_args = h2_trace_lockon_args,
+ .decoding = h2_trace_decoding,
+ .report_events = ~0, // report everything by default
+};
+
+#define TRACE_SOURCE &trace_h2
+INITCALL1(STG_REGISTER, trace_register_source, TRACE_SOURCE);
+
+/* h2 stats module */
+enum {
+ H2_ST_HEADERS_RCVD,
+ H2_ST_DATA_RCVD,
+ H2_ST_SETTINGS_RCVD,
+ H2_ST_RST_STREAM_RCVD,
+ H2_ST_GOAWAY_RCVD,
+
+ H2_ST_CONN_PROTO_ERR,
+ H2_ST_STRM_PROTO_ERR,
+ H2_ST_RST_STREAM_RESP,
+ H2_ST_GOAWAY_RESP,
+
+ H2_ST_OPEN_CONN,
+ H2_ST_OPEN_STREAM,
+ H2_ST_TOTAL_CONN,
+ H2_ST_TOTAL_STREAM,
+
+ H2_STATS_COUNT /* must be the last member of the enum */
+};
+
+static struct name_desc h2_stats[] = {
+ [H2_ST_HEADERS_RCVD] = { .name = "h2_headers_rcvd",
+ .desc = "Total number of received HEADERS frames" },
+ [H2_ST_DATA_RCVD] = { .name = "h2_data_rcvd",
+ .desc = "Total number of received DATA frames" },
+ [H2_ST_SETTINGS_RCVD] = { .name = "h2_settings_rcvd",
+ .desc = "Total number of received SETTINGS frames" },
+ [H2_ST_RST_STREAM_RCVD] = { .name = "h2_rst_stream_rcvd",
+ .desc = "Total number of received RST_STREAM frames" },
+ [H2_ST_GOAWAY_RCVD] = { .name = "h2_goaway_rcvd",
+ .desc = "Total number of received GOAWAY frames" },
+
+ [H2_ST_CONN_PROTO_ERR] = { .name = "h2_detected_conn_protocol_errors",
+ .desc = "Total number of connection protocol errors" },
+ [H2_ST_STRM_PROTO_ERR] = { .name = "h2_detected_strm_protocol_errors",
+ .desc = "Total number of stream protocol errors" },
+ [H2_ST_RST_STREAM_RESP] = { .name = "h2_rst_stream_resp",
+ .desc = "Total number of RST_STREAM sent on detected error" },
+ [H2_ST_GOAWAY_RESP] = { .name = "h2_goaway_resp",
+ .desc = "Total number of GOAWAY sent on detected error" },
+
+ [H2_ST_OPEN_CONN] = { .name = "h2_open_connections",
+ .desc = "Count of currently open connections" },
+ [H2_ST_OPEN_STREAM] = { .name = "h2_backend_open_streams",
+ .desc = "Count of currently open streams" },
+ [H2_ST_TOTAL_CONN] = { .name = "h2_total_connections",
+ .desc = "Total number of connections" },
+ [H2_ST_TOTAL_STREAM] = { .name = "h2_backend_total_streams",
+ .desc = "Total number of streams" },
+};
+
+static struct h2_counters {
+ long long headers_rcvd; /* total number of HEADERS frame received */
+ long long data_rcvd; /* total number of DATA frame received */
+ long long settings_rcvd; /* total number of SETTINGS frame received */
+ long long rst_stream_rcvd; /* total number of RST_STREAM frame received */
+ long long goaway_rcvd; /* total number of GOAWAY frame received */
+
+ long long conn_proto_err; /* total number of protocol errors detected */
+ long long strm_proto_err; /* total number of protocol errors detected */
+ long long rst_stream_resp; /* total number of RST_STREAM frame sent on error */
+ long long goaway_resp; /* total number of GOAWAY frame sent on error */
+
+ long long open_conns; /* count of currently open connections */
+ long long open_streams; /* count of currently open streams */
+ long long total_conns; /* total number of connections */
+ long long total_streams; /* total number of streams */
+} h2_counters;
+
+static void h2_fill_stats(void *data, struct field *stats)
+{
+ struct h2_counters *counters = data;
+
+ stats[H2_ST_HEADERS_RCVD] = mkf_u64(FN_COUNTER, counters->headers_rcvd);
+ stats[H2_ST_DATA_RCVD] = mkf_u64(FN_COUNTER, counters->data_rcvd);
+ stats[H2_ST_SETTINGS_RCVD] = mkf_u64(FN_COUNTER, counters->settings_rcvd);
+ stats[H2_ST_RST_STREAM_RCVD] = mkf_u64(FN_COUNTER, counters->rst_stream_rcvd);
+ stats[H2_ST_GOAWAY_RCVD] = mkf_u64(FN_COUNTER, counters->goaway_rcvd);
+
+ stats[H2_ST_CONN_PROTO_ERR] = mkf_u64(FN_COUNTER, counters->conn_proto_err);
+ stats[H2_ST_STRM_PROTO_ERR] = mkf_u64(FN_COUNTER, counters->strm_proto_err);
+ stats[H2_ST_RST_STREAM_RESP] = mkf_u64(FN_COUNTER, counters->rst_stream_resp);
+ stats[H2_ST_GOAWAY_RESP] = mkf_u64(FN_COUNTER, counters->goaway_resp);
+
+ stats[H2_ST_OPEN_CONN] = mkf_u64(FN_GAUGE, counters->open_conns);
+ stats[H2_ST_OPEN_STREAM] = mkf_u64(FN_GAUGE, counters->open_streams);
+ stats[H2_ST_TOTAL_CONN] = mkf_u64(FN_COUNTER, counters->total_conns);
+ stats[H2_ST_TOTAL_STREAM] = mkf_u64(FN_COUNTER, counters->total_streams);
+}
+
+static struct stats_module h2_stats_module = {
+ .name = "h2",
+ .fill_stats = h2_fill_stats,
+ .stats = h2_stats,
+ .stats_count = H2_STATS_COUNT,
+ .counters = &h2_counters,
+ .counters_size = sizeof(h2_counters),
+ .domain_flags = MK_STATS_PROXY_DOMAIN(STATS_PX_CAP_FE|STATS_PX_CAP_BE),
+ .clearable = 1,
+};
+
+INITCALL1(STG_REGISTER, stats_register_module, &h2_stats_module);
+
+/* the h2c connection pool */
+DECLARE_STATIC_POOL(pool_head_h2c, "h2c", sizeof(struct h2c));
+
+/* the h2s stream pool */
+DECLARE_STATIC_POOL(pool_head_h2s, "h2s", sizeof(struct h2s));
+
+/* The default connection window size is 65535, it may only be enlarged using
+ * a WINDOW_UPDATE message. Since the window must never be larger than 2G-1,
+ * we'll pretend we already received the difference between the two to send
+ * an equivalent window update to enlarge it to 2G-1.
+ */
+#define H2_INITIAL_WINDOW_INCREMENT ((1U<<31)-1 - 65535)
+
+/* maximum amount of data we're OK with re-aligning for buffer optimizations */
+#define MAX_DATA_REALIGN 1024
+
+/* a few settings from the global section */
+static int h2_settings_header_table_size = 4096; /* initial value */
+static int h2_settings_initial_window_size = 65536; /* default initial value */
+static int h2_be_settings_initial_window_size = 0; /* backend's default initial value */
+static int h2_fe_settings_initial_window_size = 0; /* frontend's default initial value */
+static unsigned int h2_settings_max_concurrent_streams = 100; /* default value */
+static unsigned int h2_be_settings_max_concurrent_streams = 0; /* backend value */
+static unsigned int h2_fe_settings_max_concurrent_streams = 0; /* frontend value */
+static int h2_settings_max_frame_size = 0; /* unset */
+
+/* other non-protocol settings */
+static unsigned int h2_fe_max_total_streams = 0; /* frontend value */
+
+/* a dummy closed endpoint */
+static const struct sedesc closed_ep = {
+ .sc = NULL,
+ .flags = SE_FL_DETACHED,
+};
+
+/* a dmumy closed stream */
+static const struct h2s *h2_closed_stream = &(const struct h2s){
+ .sd = (struct sedesc *)&closed_ep,
+ .h2c = NULL,
+ .st = H2_SS_CLOSED,
+ .errcode = H2_ERR_STREAM_CLOSED,
+ .flags = H2_SF_RST_RCVD,
+ .id = 0,
+};
+
+/* a dmumy closed stream returning a PROTOCOL_ERROR error */
+static const struct h2s *h2_error_stream = &(const struct h2s){
+ .sd = (struct sedesc *)&closed_ep,
+ .h2c = NULL,
+ .st = H2_SS_CLOSED,
+ .errcode = H2_ERR_PROTOCOL_ERROR,
+ .flags = 0,
+ .id = 0,
+};
+
+/* a dmumy closed stream returning a REFUSED_STREAM error */
+static const struct h2s *h2_refused_stream = &(const struct h2s){
+ .sd = (struct sedesc *)&closed_ep,
+ .h2c = NULL,
+ .st = H2_SS_CLOSED,
+ .errcode = H2_ERR_REFUSED_STREAM,
+ .flags = 0,
+ .id = 0,
+};
+
+/* and a dummy idle stream for use with any unannounced stream */
+static const struct h2s *h2_idle_stream = &(const struct h2s){
+ .sd = (struct sedesc *)&closed_ep,
+ .h2c = NULL,
+ .st = H2_SS_IDLE,
+ .errcode = H2_ERR_STREAM_CLOSED,
+ .id = 0,
+};
+
+
+struct task *h2_timeout_task(struct task *t, void *context, unsigned int state);
+static int h2_send(struct h2c *h2c);
+static int h2_recv(struct h2c *h2c);
+static int h2_process(struct h2c *h2c);
+/* h2_io_cb is exported to see it resolved in "show fd" */
+struct task *h2_io_cb(struct task *t, void *ctx, unsigned int state);
+static inline struct h2s *h2c_st_by_id(struct h2c *h2c, int id);
+static int h2c_dec_hdrs(struct h2c *h2c, struct buffer *rxbuf, uint32_t *flags, unsigned long long *body_len, char *upgrade_protocol);
+static int h2_frt_transfer_data(struct h2s *h2s);
+struct task *h2_deferred_shut(struct task *t, void *ctx, unsigned int state);
+static struct h2s *h2c_bck_stream_new(struct h2c *h2c, struct stconn *sc, struct session *sess);
+static void h2s_alert(struct h2s *h2s);
+static inline void h2_remove_from_list(struct h2s *h2s);
+
+/* returns the stconn associated to the H2 stream */
+static forceinline struct stconn *h2s_sc(const struct h2s *h2s)
+{
+ return h2s->sd->sc;
+}
+
+/* the H2 traces always expect that arg1, if non-null, is of type connection
+ * (from which we can derive h2c), that arg2, if non-null, is of type h2s, and
+ * that arg3, if non-null, is either of type htx for tx headers, or of type
+ * buffer for everything else.
+ */
+static void h2_trace(enum trace_level level, uint64_t mask, const struct trace_source *src,
+ const struct ist where, const struct ist func,
+ const void *a1, const void *a2, const void *a3, const void *a4)
+{
+ const struct connection *conn = a1;
+ const struct h2c *h2c = conn ? conn->ctx : NULL;
+ const struct h2s *h2s = a2;
+ const struct buffer *buf = a3;
+ const struct htx *htx;
+ int pos;
+
+ if (!h2c) // nothing to add
+ return;
+
+ if (src->verbosity > H2_VERB_CLEAN) {
+ chunk_appendf(&trace_buf, " : h2c=%p(%c,%s)", h2c, conn_is_back(conn) ? 'B' : 'F', h2c_st_to_str(h2c->st0));
+
+ if (mask & H2_EV_H2C_NEW) // inside h2_init, otherwise it's hard to match conn & h2c
+ conn_append_debug_info(&trace_buf, conn, " : ");
+
+ if (h2c->errcode)
+ chunk_appendf(&trace_buf, " err=%s/%02x", h2_err_str(h2c->errcode), h2c->errcode);
+
+ if (h2c->flags & H2_CF_DEM_IN_PROGRESS && // frame processing has started, type and length are valid
+ (mask & (H2_EV_RX_FRAME|H2_EV_RX_FHDR)) == (H2_EV_RX_FRAME|H2_EV_RX_FHDR)) {
+ chunk_appendf(&trace_buf, " dft=%s/%02x dfl=%d", h2_ft_str(h2c->dft), h2c->dff, h2c->dfl);
+ }
+
+ if (h2s) {
+ if (h2s->id <= 0)
+ chunk_appendf(&trace_buf, " dsi=%d", h2c->dsi);
+ if (h2s == h2_idle_stream)
+ chunk_appendf(&trace_buf, " h2s=IDL");
+ else if (h2s != h2_closed_stream && h2s != h2_refused_stream && h2s != h2_error_stream)
+ chunk_appendf(&trace_buf, " h2s=%p(%d,%s)", h2s, h2s->id, h2s_st_to_str(h2s->st));
+ else if (h2c->dsi > 0) // don't show that before sid is known
+ chunk_appendf(&trace_buf, " h2s=CLO");
+ if (h2s->id && h2s->errcode)
+ chunk_appendf(&trace_buf, " err=%s/%02x", h2_err_str(h2s->errcode), h2s->errcode);
+ }
+ }
+
+ /* Let's dump decoded requests and responses right after parsing. They
+ * are traced at level USER with a few recognizable flags.
+ */
+ if ((mask == (H2_EV_RX_FRAME|H2_EV_RX_HDR|H2_EV_STRM_NEW) ||
+ mask == (H2_EV_RX_FRAME|H2_EV_RX_HDR)) && buf)
+ htx = htxbuf(buf); // recv req/res
+ else if (mask == (H2_EV_TX_FRAME|H2_EV_TX_HDR))
+ htx = a3; // send req/res
+ else
+ htx = NULL;
+
+ if (level == TRACE_LEVEL_USER && src->verbosity != H2_VERB_MINIMAL && htx && (pos = htx_get_head(htx)) != -1) {
+ const struct htx_blk *blk = htx_get_blk(htx, pos);
+ const struct htx_sl *sl = htx_get_blk_ptr(htx, blk);
+ enum htx_blk_type type = htx_get_blk_type(blk);
+
+ if (type == HTX_BLK_REQ_SL)
+ chunk_appendf(&trace_buf, " : [%d] H2 REQ: %.*s %.*s %.*s",
+ h2s ? h2s->id : h2c->dsi,
+ HTX_SL_P1_LEN(sl), HTX_SL_P1_PTR(sl),
+ HTX_SL_P2_LEN(sl), HTX_SL_P2_PTR(sl),
+ HTX_SL_P3_LEN(sl), HTX_SL_P3_PTR(sl));
+ else if (type == HTX_BLK_RES_SL)
+ chunk_appendf(&trace_buf, " : [%d] H2 RES: %.*s %.*s %.*s",
+ h2s ? h2s->id : h2c->dsi,
+ HTX_SL_P1_LEN(sl), HTX_SL_P1_PTR(sl),
+ HTX_SL_P2_LEN(sl), HTX_SL_P2_PTR(sl),
+ HTX_SL_P3_LEN(sl), HTX_SL_P3_PTR(sl));
+ }
+}
+
+
+/* Detect a pending read0 for a H2 connection. It happens if a read0 was
+ * already reported on a previous xprt->rcvbuf() AND a frame parser failed
+ * to parse pending data, confirming no more progress is possible because
+ * we're facing a truncated frame. The function returns 1 to report a read0
+ * or 0 otherwise.
+ */
+static inline int h2c_read0_pending(struct h2c *h2c)
+{
+ return !!(h2c->flags & H2_CF_END_REACHED);
+}
+
+/* returns true if the connection is allowed to expire, false otherwise. A
+ * connection may expire when it has no attached streams. As long as streams
+ * are attached, the application layer is responsible for timeout management,
+ * and each layer will detach when it doesn't want to wait anymore. When the
+ * last one leaves, the connection must take over timeout management.
+ */
+static inline int h2c_may_expire(const struct h2c *h2c)
+{
+ return !h2c->nb_sc;
+}
+
+/* returns the number of max concurrent streams permitted on a connection,
+ * depending on its side (frontend or backend), falling back to the default
+ * h2_settings_max_concurrent_streams. It may even be zero.
+ */
+static inline int h2c_max_concurrent_streams(const struct h2c *h2c)
+{
+ int ret;
+
+ ret = (h2c->flags & H2_CF_IS_BACK) ?
+ h2_be_settings_max_concurrent_streams :
+ h2_fe_settings_max_concurrent_streams;
+
+ ret = ret ? ret : h2_settings_max_concurrent_streams;
+ return ret;
+}
+
+
+/* update h2c timeout if needed */
+static void h2c_update_timeout(struct h2c *h2c)
+{
+ int is_idle_conn = 0;
+
+ TRACE_ENTER(H2_EV_H2C_WAKE, h2c->conn);
+
+ if (!h2c->task)
+ goto leave;
+
+ if (h2c_may_expire(h2c)) {
+ /* no more streams attached */
+ if (br_data(h2c->mbuf)) {
+ /* pending output data: always the regular data timeout */
+ h2c->task->expire = tick_add_ifset(now_ms, h2c->timeout);
+ } else {
+ /* no stream, no output data */
+ if (!(h2c->flags & H2_CF_IS_BACK)) {
+ int to;
+
+ if (h2c->max_id > 0 && !b_data(&h2c->dbuf) &&
+ tick_isset(h2c->proxy->timeout.httpka)) {
+ /* idle after having seen one stream => keep-alive */
+ to = h2c->proxy->timeout.httpka;
+ } else {
+ /* before first request, or started to deserialize a
+ * new req => http-request.
+ */
+ to = h2c->proxy->timeout.httpreq;
+ }
+
+ h2c->task->expire = tick_add_ifset(h2c->idle_start, to);
+ is_idle_conn = 1;
+ }
+
+ if (h2c->flags & (H2_CF_GOAWAY_SENT|H2_CF_GOAWAY_FAILED)) {
+ /* GOAWAY sent (or failed), closing in progress */
+ int exp = tick_add_ifset(now_ms, h2c->shut_timeout);
+
+ h2c->task->expire = tick_first(h2c->task->expire, exp);
+ is_idle_conn = 1;
+ }
+
+ /* if a timeout above was not set, fall back to the default one */
+ if (!tick_isset(h2c->task->expire))
+ h2c->task->expire = tick_add_ifset(now_ms, h2c->timeout);
+ }
+
+ if ((h2c->proxy->flags & (PR_FL_DISABLED|PR_FL_STOPPED)) &&
+ is_idle_conn && tick_isset(global.close_spread_end)) {
+ /* If a soft-stop is in progress and a close-spread-time
+ * is set, we want to spread idle connection closing roughly
+ * evenly across the defined window. This should only
+ * act on idle frontend connections.
+ * If the window end is already in the past, we wake the
+ * timeout task up immediately so that it can be closed.
+ */
+ int remaining_window = tick_remain(now_ms, global.close_spread_end);
+ if (remaining_window) {
+ /* We don't need to reset the expire if it would
+ * already happen before the close window end.
+ */
+ if (tick_isset(h2c->task->expire) &&
+ tick_is_le(global.close_spread_end, h2c->task->expire)) {
+ /* Set an expire value shorter than the current value
+ * because the close spread window end comes earlier.
+ */
+ h2c->task->expire = tick_add(now_ms, statistical_prng_range(remaining_window));
+ }
+ }
+ else {
+ /* We are past the soft close window end, wake the timeout
+ * task up immediately.
+ */
+ task_wakeup(h2c->task, TASK_WOKEN_TIMER);
+ }
+ }
+
+ } else {
+ h2c->task->expire = TICK_ETERNITY;
+ }
+ task_queue(h2c->task);
+ leave:
+ TRACE_LEAVE(H2_EV_H2C_WAKE);
+}
+
+static __inline int
+h2c_is_dead(const struct h2c *h2c)
+{
+ if (eb_is_empty(&h2c->streams_by_id) && /* don't close if streams exist */
+ ((h2c->flags & H2_CF_ERROR) || /* errors close immediately */
+ (h2c->flags & H2_CF_ERR_PENDING && h2c->st0 < H2_CS_FRAME_H) || /* early error during connect */
+ (h2c->st0 >= H2_CS_ERROR && !h2c->task) || /* a timeout stroke earlier */
+ (!(h2c->conn->owner) && !conn_is_reverse(h2c->conn)) || /* Nobody's left to take care of the connection, drop it now */
+ (!br_data(h2c->mbuf) && /* mux buffer empty, also process clean events below */
+ ((h2c->flags & H2_CF_RCVD_SHUT) ||
+ (h2c->last_sid >= 0 && h2c->max_id >= h2c->last_sid)))))
+ return 1;
+
+ return 0;
+}
+
+/*****************************************************/
+/* functions below are for dynamic buffer management */
+/*****************************************************/
+
+/* indicates whether or not the we may call the h2_recv() function to attempt
+ * to receive data into the buffer and/or demux pending data. The condition is
+ * a bit complex due to some API limits for now. The rules are the following :
+ * - if an error or a shutdown was detected on the connection and the buffer
+ * is empty, we must not attempt to receive
+ * - if the demux buf failed to be allocated, we must not try to receive and
+ * we know there is nothing pending
+ * - if no flag indicates a blocking condition, we may attempt to receive,
+ * regardless of whether the demux buffer is full or not, so that only
+ * de demux part decides whether or not to block. This is needed because
+ * the connection API indeed prevents us from re-enabling receipt that is
+ * already enabled in a polled state, so we must always immediately stop
+ * as soon as the demux can't proceed so as never to hit an end of read
+ * with data pending in the buffers.
+ * - otherwise must may not attempt
+ */
+static inline int h2_recv_allowed(const struct h2c *h2c)
+{
+ if (b_data(&h2c->dbuf) == 0 &&
+ ((h2c->flags & (H2_CF_RCVD_SHUT|H2_CF_ERROR)) || h2c->st0 >= H2_CS_ERROR))
+ return 0;
+
+ if (!(h2c->flags & H2_CF_DEM_DALLOC) &&
+ !(h2c->flags & H2_CF_DEM_BLOCK_ANY))
+ return 1;
+
+ return 0;
+}
+
+/* restarts reading on the connection if it was not enabled */
+static inline void h2c_restart_reading(const struct h2c *h2c, int consider_buffer)
+{
+ if (!h2_recv_allowed(h2c))
+ return;
+ if ((!consider_buffer || !b_data(&h2c->dbuf))
+ && (h2c->wait_event.events & SUB_RETRY_RECV))
+ return;
+ tasklet_wakeup(h2c->wait_event.tasklet);
+}
+
+
+/* returns true if the front connection has too many stream connectors attached */
+static inline int h2_frt_has_too_many_sc(const struct h2c *h2c)
+{
+ return h2c->nb_sc > h2c_max_concurrent_streams(h2c) ||
+ unlikely(conn_reverse_in_preconnect(h2c->conn));
+}
+
+/* Tries to grab a buffer and to re-enable processing on mux <target>. The h2c
+ * flags are used to figure what buffer was requested. It returns 1 if the
+ * allocation succeeds, in which case the connection is woken up, or 0 if it's
+ * impossible to wake up and we prefer to be woken up later.
+ */
+static int h2_buf_available(void *target)
+{
+ struct h2c *h2c = target;
+ struct h2s *h2s;
+
+ if ((h2c->flags & H2_CF_DEM_DALLOC) && b_alloc(&h2c->dbuf)) {
+ h2c->flags &= ~H2_CF_DEM_DALLOC;
+ h2c_restart_reading(h2c, 1);
+ return 1;
+ }
+
+ if ((h2c->flags & H2_CF_MUX_MALLOC) && b_alloc(br_tail(h2c->mbuf))) {
+ h2c->flags &= ~H2_CF_MUX_MALLOC;
+
+ if (h2c->flags & H2_CF_DEM_MROOM) {
+ h2c->flags &= ~H2_CF_DEM_MROOM;
+ h2c_restart_reading(h2c, 1);
+ }
+ return 1;
+ }
+
+ if ((h2c->flags & H2_CF_DEM_SALLOC) &&
+ (h2s = h2c_st_by_id(h2c, h2c->dsi)) && h2s_sc(h2s) &&
+ b_alloc(&h2s->rxbuf)) {
+ h2c->flags &= ~H2_CF_DEM_SALLOC;
+ h2c_restart_reading(h2c, 1);
+ return 1;
+ }
+
+ return 0;
+}
+
+static inline struct buffer *h2_get_buf(struct h2c *h2c, struct buffer *bptr)
+{
+ struct buffer *buf = NULL;
+
+ if (likely(!LIST_INLIST(&h2c->buf_wait.list)) &&
+ unlikely((buf = b_alloc(bptr)) == NULL)) {
+ h2c->buf_wait.target = h2c;
+ h2c->buf_wait.wakeup_cb = h2_buf_available;
+ LIST_APPEND(&th_ctx->buffer_wq, &h2c->buf_wait.list);
+ }
+ return buf;
+}
+
+static inline void h2_release_buf(struct h2c *h2c, struct buffer *bptr)
+{
+ if (bptr->size) {
+ b_free(bptr);
+ offer_buffers(NULL, 1);
+ }
+}
+
+static inline void h2_release_mbuf(struct h2c *h2c)
+{
+ struct buffer *buf;
+ unsigned int count = 0;
+
+ while (b_size(buf = br_head_pick(h2c->mbuf))) {
+ b_free(buf);
+ count++;
+ }
+ if (count)
+ offer_buffers(NULL, count);
+}
+
+/* returns the number of allocatable outgoing streams for the connection taking
+ * the last_sid and the reserved ones into account.
+ */
+static inline int h2_streams_left(const struct h2c *h2c)
+{
+ int ret;
+
+ /* consider the number of outgoing streams we're allowed to create before
+ * reaching the last GOAWAY frame seen. max_id is the last assigned id,
+ * nb_reserved is the number of streams which don't yet have an ID.
+ */
+ ret = (h2c->last_sid >= 0) ? h2c->last_sid : 0x7FFFFFFF;
+ ret = (unsigned int)(ret - h2c->max_id) / 2 - h2c->nb_reserved - 1;
+ if (ret < 0)
+ ret = 0;
+ return ret;
+}
+
+/* returns the number of streams in use on a connection to figure if it's
+ * idle or not. We check nb_sc and not nb_streams as the caller will want
+ * to know if it was the last one after a detach().
+ */
+static int h2_used_streams(struct connection *conn)
+{
+ struct h2c *h2c = conn->ctx;
+
+ return h2c->nb_sc;
+}
+
+/* returns the number of concurrent streams available on the connection */
+static int h2_avail_streams(struct connection *conn)
+{
+ struct server *srv = objt_server(conn->target);
+ struct h2c *h2c = conn->ctx;
+ int ret1, ret2;
+
+ /* RFC7540#6.8: Receivers of a GOAWAY frame MUST NOT open additional
+ * streams on the connection.
+ */
+ if (h2c->last_sid >= 0)
+ return 0;
+
+ if (h2c->st0 >= H2_CS_ERROR)
+ return 0;
+
+ /* note: may be negative if a SETTINGS frame changes the limit */
+ ret1 = h2c->streams_limit - h2c->nb_streams;
+
+ /* we must also consider the limit imposed by stream IDs */
+ ret2 = h2_streams_left(h2c);
+ ret1 = MIN(ret1, ret2);
+ if (ret1 > 0 && srv && srv->max_reuse >= 0) {
+ ret2 = h2c->stream_cnt <= srv->max_reuse ? srv->max_reuse - h2c->stream_cnt + 1: 0;
+ ret1 = MIN(ret1, ret2);
+ }
+ return ret1;
+}
+
+/* Unconditionally produce a trace of the header. Please do not call this one
+ * and use h2_trace_header() instead which first checks if traces are enabled.
+ */
+void _h2_trace_header(const struct ist hn, const struct ist hv,
+ uint64_t mask, const struct ist trc_loc, const char *func,
+ const struct h2c *h2c, const struct h2s *h2s)
+{
+ struct ist n_ist, v_ist;
+ const char *c_str, *s_str;
+
+ chunk_reset(&trash);
+ c_str = chunk_newstr(&trash);
+ if (h2c) {
+ chunk_appendf(&trash, "h2c=%p(%c,%s) ",
+ h2c, (h2c->flags & H2_CF_IS_BACK) ? 'B' : 'F', h2c_st_to_str(h2c->st0));
+ }
+
+ s_str = chunk_newstr(&trash);
+ if (h2s) {
+ if (h2s->id <= 0)
+ chunk_appendf(&trash, "dsi=%d ", h2s->h2c->dsi);
+ chunk_appendf(&trash, "h2s=%p(%d,%s) ", h2s, h2s->id, h2s_st_to_str(h2s->st));
+ }
+ else if (h2c)
+ chunk_appendf(&trash, "dsi=%d ", h2c->dsi);
+
+ n_ist = ist2(chunk_newstr(&trash), 0);
+ istscpy(&n_ist, hn, 256);
+ trash.data += n_ist.len;
+ if (n_ist.len != hn.len)
+ chunk_appendf(&trash, " (... +%ld)", (long)(hn.len - n_ist.len));
+
+ v_ist = ist2(chunk_newstr(&trash), 0);
+ istscpy(&v_ist, hv, 1024);
+ trash.data += v_ist.len;
+ if (v_ist.len != hv.len)
+ chunk_appendf(&trash, " (... +%ld)", (long)(hv.len - v_ist.len));
+
+ TRACE_PRINTF_LOC(TRACE_LEVEL_USER, mask, trc_loc, func,
+ (h2c ? h2c->conn : 0), 0, 0, 0,
+ "%s%s%s %s: %s", c_str, s_str,
+ (mask & H2_EV_TX_HDR) ? "sndh" : "rcvh",
+ n_ist.ptr, v_ist.ptr);
+}
+
+/* produce a trace of the header after checking that tracing is enabled */
+static inline void h2_trace_header(const struct ist hn, const struct ist hv,
+ uint64_t mask, const struct ist trc_loc, const char *func,
+ const struct h2c *h2c, const struct h2s *h2s)
+{
+ if ((TRACE_SOURCE)->verbosity >= H2_VERB_ADVANCED &&
+ TRACE_ENABLED(TRACE_LEVEL_USER, mask, h2c ? h2c->conn : 0, h2s, 0, 0))
+ _h2_trace_header(hn, hv, mask, trc_loc, func, h2c, h2s);
+}
+
+/* hpack-encode header name <hn> and value <hv>, possibly emitting a trace if
+ * currently enabled. This is done on behalf of function <func> at <trc_loc>
+ * passed as ist(TRC_LOC), h2c <h2c>, and h2s <h2s>, all of which may be NULL.
+ * The trace is only emitted if the header is emitted (in which case non-zero
+ * is returned). The trash is modified. In the traces, the header's name will
+ * be truncated to 256 chars and the header's value to 1024 chars.
+ */
+static inline int h2_encode_header(struct buffer *buf, const struct ist hn, const struct ist hv,
+ uint64_t mask, const struct ist trc_loc, const char *func,
+ const struct h2c *h2c, const struct h2s *h2s)
+{
+ int ret;
+
+ ret = hpack_encode_header(buf, hn, hv);
+ if (ret)
+ h2_trace_header(hn, hv, mask, trc_loc, func, h2c, h2s);
+
+ return ret;
+}
+
+/*****************************************************************/
+/* functions below are dedicated to the mux setup and management */
+/*****************************************************************/
+
+/* Initialize the mux once it's attached. For outgoing connections, the context
+ * is already initialized before installing the mux, so we detect incoming
+ * connections from the fact that the context is still NULL (even during mux
+ * upgrades). <input> is always used as Input buffer and may contain data. It is
+ * the caller responsibility to not reuse it anymore. Returns < 0 on error.
+ */
+static int h2_init(struct connection *conn, struct proxy *prx, struct session *sess,
+ struct buffer *input)
+{
+ struct h2c *h2c;
+ struct task *t = NULL;
+ void *conn_ctx = conn->ctx;
+
+ TRACE_ENTER(H2_EV_H2C_NEW);
+
+ h2c = pool_alloc(pool_head_h2c);
+ if (!h2c)
+ goto fail_no_h2c;
+
+ if (conn_is_back(conn)) {
+ h2c->flags = H2_CF_IS_BACK;
+ h2c->shut_timeout = h2c->timeout = prx->timeout.server;
+ if (tick_isset(prx->timeout.serverfin))
+ h2c->shut_timeout = prx->timeout.serverfin;
+
+ h2c->px_counters = EXTRA_COUNTERS_GET(prx->extra_counters_be,
+ &h2_stats_module);
+ } else {
+ h2c->flags = H2_CF_NONE;
+ h2c->shut_timeout = h2c->timeout = prx->timeout.client;
+ if (tick_isset(prx->timeout.clientfin))
+ h2c->shut_timeout = prx->timeout.clientfin;
+
+ h2c->px_counters = EXTRA_COUNTERS_GET(prx->extra_counters_fe,
+ &h2_stats_module);
+ }
+
+ h2c->proxy = prx;
+ h2c->task = NULL;
+ h2c->wait_event.tasklet = NULL;
+ h2c->idle_start = now_ms;
+ if (tick_isset(h2c->timeout)) {
+ t = task_new_here();
+ if (!t)
+ goto fail;
+
+ h2c->task = t;
+ t->process = h2_timeout_task;
+ t->context = h2c;
+ t->expire = tick_add(now_ms, h2c->timeout);
+ }
+
+ h2c->wait_event.tasklet = tasklet_new();
+ if (!h2c->wait_event.tasklet)
+ goto fail;
+ h2c->wait_event.tasklet->process = h2_io_cb;
+ h2c->wait_event.tasklet->context = h2c;
+ h2c->wait_event.events = 0;
+ if (!conn_is_back(conn)) {
+ /* Connection might already be in the stopping_list if subject
+ * to h1->h2 upgrade.
+ */
+ if (!LIST_INLIST(&conn->stopping_list)) {
+ LIST_APPEND(&mux_stopping_data[tid].list,
+ &conn->stopping_list);
+ }
+ }
+
+ h2c->ddht = hpack_dht_alloc();
+ if (!h2c->ddht)
+ goto fail;
+
+ /* Initialise the context. */
+ h2c->st0 = H2_CS_PREFACE;
+ h2c->conn = conn;
+ h2c->streams_limit = h2c_max_concurrent_streams(h2c);
+ h2c->max_id = -1;
+ h2c->errcode = H2_ERR_NO_ERROR;
+ h2c->rcvd_c = 0;
+ h2c->rcvd_s = 0;
+ h2c->nb_streams = 0;
+ h2c->nb_sc = 0;
+ h2c->nb_reserved = 0;
+ h2c->stream_cnt = 0;
+
+ h2c->dbuf = *input;
+ h2c->dsi = -1;
+
+ h2c->last_sid = -1;
+
+ br_init(h2c->mbuf, sizeof(h2c->mbuf) / sizeof(h2c->mbuf[0]));
+ h2c->miw = 65535; /* mux initial window size */
+ h2c->mws = 65535; /* mux window size */
+ h2c->mfs = 16384; /* initial max frame size */
+ h2c->streams_by_id = EB_ROOT;
+ LIST_INIT(&h2c->send_list);
+ LIST_INIT(&h2c->fctl_list);
+ LIST_INIT(&h2c->blocked_list);
+ LIST_INIT(&h2c->buf_wait.list);
+
+ conn->ctx = h2c;
+
+ TRACE_USER("new H2 connection", H2_EV_H2C_NEW, conn);
+
+ if (t)
+ task_queue(t);
+
+ if (h2c->flags & H2_CF_IS_BACK && likely(!conn_is_reverse(h2c->conn))) {
+ /* FIXME: this is temporary, for outgoing connections we need
+ * to immediately allocate a stream until the code is modified
+ * so that the caller calls ->attach(). For now the outgoing sc
+ * is stored as conn->ctx by the caller and saved in conn_ctx.
+ */
+ struct h2s *h2s;
+
+ h2s = h2c_bck_stream_new(h2c, conn_ctx, sess);
+ if (!h2s)
+ goto fail_stream;
+ }
+
+ if (sess)
+ proxy_inc_fe_cum_sess_ver_ctr(sess->listener, prx, 2);
+ HA_ATOMIC_INC(&h2c->px_counters->open_conns);
+ HA_ATOMIC_INC(&h2c->px_counters->total_conns);
+
+ /* prepare to read something */
+ h2c_restart_reading(h2c, 1);
+ TRACE_LEAVE(H2_EV_H2C_NEW, conn);
+ return 0;
+ fail_stream:
+ hpack_dht_free(h2c->ddht);
+ fail:
+ task_destroy(t);
+ tasklet_free(h2c->wait_event.tasklet);
+ pool_free(pool_head_h2c, h2c);
+ fail_no_h2c:
+ if (!conn_is_back(conn))
+ LIST_DEL_INIT(&conn->stopping_list);
+ conn->ctx = conn_ctx; /* restore saved ctx */
+ TRACE_DEVEL("leaving in error", H2_EV_H2C_NEW|H2_EV_H2C_END|H2_EV_H2C_ERR);
+ return -1;
+}
+
+/* returns the next allocatable outgoing stream ID for the H2 connection, or
+ * -1 if no more is allocatable.
+ */
+static inline int32_t h2c_get_next_sid(const struct h2c *h2c)
+{
+ int32_t id = (h2c->max_id + 1) | 1;
+
+ if ((id & 0x80000000U) || (h2c->last_sid >= 0 && id > h2c->last_sid))
+ id = -1;
+ return id;
+}
+
+/* returns the stream associated with id <id> or NULL if not found */
+static inline struct h2s *h2c_st_by_id(struct h2c *h2c, int id)
+{
+ struct eb32_node *node;
+
+ if (id == 0)
+ return (struct h2s *)h2_closed_stream;
+
+ if (id > h2c->max_id)
+ return (struct h2s *)h2_idle_stream;
+
+ node = eb32_lookup(&h2c->streams_by_id, id);
+ if (!node)
+ return (struct h2s *)h2_closed_stream;
+
+ return container_of(node, struct h2s, by_id);
+}
+
+/* release function. This one should be called to free all resources allocated
+ * to the mux.
+ */
+static void h2_release(struct h2c *h2c)
+{
+ struct connection *conn = h2c->conn;
+
+ TRACE_ENTER(H2_EV_H2C_END);
+
+ hpack_dht_free(h2c->ddht);
+
+ if (LIST_INLIST(&h2c->buf_wait.list))
+ LIST_DEL_INIT(&h2c->buf_wait.list);
+
+ h2_release_buf(h2c, &h2c->dbuf);
+ h2_release_mbuf(h2c);
+
+ if (h2c->task) {
+ h2c->task->context = NULL;
+ task_wakeup(h2c->task, TASK_WOKEN_OTHER);
+ h2c->task = NULL;
+ }
+ tasklet_free(h2c->wait_event.tasklet);
+ if (conn && h2c->wait_event.events != 0)
+ conn->xprt->unsubscribe(conn, conn->xprt_ctx, h2c->wait_event.events,
+ &h2c->wait_event);
+
+ HA_ATOMIC_DEC(&h2c->px_counters->open_conns);
+
+ pool_free(pool_head_h2c, h2c);
+
+ if (conn) {
+ if (!conn_is_back(conn))
+ LIST_DEL_INIT(&conn->stopping_list);
+
+ conn->mux = NULL;
+ conn->ctx = NULL;
+ TRACE_DEVEL("freeing conn", H2_EV_H2C_END, conn);
+
+ conn_stop_tracking(conn);
+
+ /* there might be a GOAWAY frame still pending in the TCP
+ * stack, and if the peer continues to send (i.e. window
+ * updates etc), this can result in losing the GOAWAY. For
+ * this reason we try to drain anything received in between.
+ */
+ conn->flags |= CO_FL_WANT_DRAIN;
+
+ conn_xprt_shutw(conn);
+ conn_xprt_close(conn);
+ conn_sock_shutw(conn, !conn_is_back(conn));
+ conn_ctrl_close(conn);
+
+ if (conn->destroy_cb)
+ conn->destroy_cb(conn);
+ conn_free(conn);
+ }
+
+ TRACE_LEAVE(H2_EV_H2C_END);
+}
+
+
+/******************************************************/
+/* functions below are for the H2 protocol processing */
+/******************************************************/
+
+/* returns the stream if of stream <h2s> or 0 if <h2s> is NULL */
+static inline __maybe_unused int h2s_id(const struct h2s *h2s)
+{
+ return h2s ? h2s->id : 0;
+}
+
+/* returns the sum of the stream's own window size and the mux's initial
+ * window, which together form the stream's effective window size.
+ */
+static inline int h2s_mws(const struct h2s *h2s)
+{
+ return h2s->sws + h2s->h2c->miw;
+}
+
+/* marks an error on the connection. Before settings are sent, we must not send
+ * a GOAWAY frame, and the error state will prevent h2c_send_goaway_error()
+ * from verifying this so we set H2_CF_GOAWAY_FAILED to make sure it will not
+ * even try.
+ */
+static inline __maybe_unused void h2c_error(struct h2c *h2c, enum h2_err err)
+{
+ TRACE_POINT(H2_EV_H2C_ERR, h2c->conn, 0, 0, (void *)(long)(err));
+ h2c->errcode = err;
+ if (h2c->st0 < H2_CS_SETTINGS1)
+ h2c->flags |= H2_CF_GOAWAY_FAILED;
+ h2c->st0 = H2_CS_ERROR;
+}
+
+/* marks an error on the stream. It may also update an already closed stream
+ * (e.g. to report an error after an RST was received).
+ */
+static inline __maybe_unused void h2s_error(struct h2s *h2s, enum h2_err err)
+{
+ if (h2s->id && h2s->st != H2_SS_ERROR) {
+ TRACE_POINT(H2_EV_H2S_ERR, h2s->h2c->conn, h2s, 0, (void *)(long)(err));
+ h2s->errcode = err;
+ if (h2s->st < H2_SS_ERROR)
+ h2s->st = H2_SS_ERROR;
+ se_fl_set_error(h2s->sd);
+ }
+}
+
+/* attempt to notify the data layer of recv availability */
+static void __maybe_unused h2s_notify_recv(struct h2s *h2s)
+{
+ if (h2s->subs && h2s->subs->events & SUB_RETRY_RECV) {
+ TRACE_POINT(H2_EV_STRM_WAKE, h2s->h2c->conn, h2s);
+ tasklet_wakeup(h2s->subs->tasklet);
+ h2s->subs->events &= ~SUB_RETRY_RECV;
+ if (!h2s->subs->events)
+ h2s->subs = NULL;
+ }
+}
+
+/* attempt to notify the data layer of send availability */
+static void __maybe_unused h2s_notify_send(struct h2s *h2s)
+{
+ if (h2s->subs && h2s->subs->events & SUB_RETRY_SEND) {
+ TRACE_POINT(H2_EV_STRM_WAKE, h2s->h2c->conn, h2s);
+ h2s->flags |= H2_SF_NOTIFIED;
+ tasklet_wakeup(h2s->subs->tasklet);
+ h2s->subs->events &= ~SUB_RETRY_SEND;
+ if (!h2s->subs->events)
+ h2s->subs = NULL;
+ }
+ else if (h2s->flags & (H2_SF_WANT_SHUTR | H2_SF_WANT_SHUTW)) {
+ TRACE_POINT(H2_EV_STRM_WAKE, h2s->h2c->conn, h2s);
+ tasklet_wakeup(h2s->shut_tl);
+ }
+}
+
+/* alerts the data layer, trying to wake it up by all means, following
+ * this sequence :
+ * - if the h2s' data layer is subscribed to recv, then it's woken up for recv
+ * - if its subscribed to send, then it's woken up for send
+ * - if it was subscribed to neither, its ->wake() callback is called
+ * It is safe to call this function with a closed stream which doesn't have a
+ * stream connector anymore.
+ */
+static void __maybe_unused h2s_alert(struct h2s *h2s)
+{
+ TRACE_ENTER(H2_EV_H2S_WAKE, h2s->h2c->conn, h2s);
+
+ if (h2s->subs ||
+ (h2s->flags & (H2_SF_WANT_SHUTR | H2_SF_WANT_SHUTW))) {
+ h2s_notify_recv(h2s);
+ h2s_notify_send(h2s);
+ }
+ else if (h2s_sc(h2s) && h2s_sc(h2s)->app_ops->wake != NULL) {
+ TRACE_POINT(H2_EV_STRM_WAKE, h2s->h2c->conn, h2s);
+ h2s_sc(h2s)->app_ops->wake(h2s_sc(h2s));
+ }
+
+ TRACE_LEAVE(H2_EV_H2S_WAKE, h2s->h2c->conn, h2s);
+}
+
+/* writes the 24-bit frame size <len> at address <frame> */
+static inline __maybe_unused void h2_set_frame_size(void *frame, uint32_t len)
+{
+ uint8_t *out = frame;
+
+ *out = len >> 16;
+ write_n16(out + 1, len);
+}
+
+/* reads <bytes> bytes from buffer <b> starting at relative offset <o> from the
+ * current pointer, dealing with wrapping, and stores the result in <dst>. It's
+ * the caller's responsibility to verify that there are at least <bytes> bytes
+ * available in the buffer's input prior to calling this function. The buffer
+ * is assumed not to hold any output data.
+ */
+static inline __maybe_unused void h2_get_buf_bytes(void *dst, size_t bytes,
+ const struct buffer *b, int o)
+{
+ readv_bytes(dst, bytes, b_peek(b, o), b_wrap(b) - b_peek(b, o), b_orig(b));
+}
+
+static inline __maybe_unused uint16_t h2_get_n16(const struct buffer *b, int o)
+{
+ return readv_n16(b_peek(b, o), b_wrap(b) - b_peek(b, o), b_orig(b));
+}
+
+static inline __maybe_unused uint32_t h2_get_n32(const struct buffer *b, int o)
+{
+ return readv_n32(b_peek(b, o), b_wrap(b) - b_peek(b, o), b_orig(b));
+}
+
+static inline __maybe_unused uint64_t h2_get_n64(const struct buffer *b, int o)
+{
+ return readv_n64(b_peek(b, o), b_wrap(b) - b_peek(b, o), b_orig(b));
+}
+
+
+/* Peeks an H2 frame header from offset <o> of buffer <b> into descriptor <h>.
+ * The algorithm is not obvious. It turns out that H2 headers are neither
+ * aligned nor do they use regular sizes. And to add to the trouble, the buffer
+ * may wrap so each byte read must be checked. The header is formed like this :
+ *
+ * b0 b1 b2 b3 b4 b5..b8
+ * +----------+---------+--------+----+----+----------------------+
+ * |len[23:16]|len[15:8]|len[7:0]|type|flag|sid[31:0] (big endian)|
+ * +----------+---------+--------+----+----+----------------------+
+ *
+ * Here we read a big-endian 64 bit word from h[1]. This way in a single read
+ * we get the sid properly aligned and ordered, and 16 bits of len properly
+ * ordered as well. The type and flags can be extracted using bit shifts from
+ * the word, and only one extra read is needed to fetch len[16:23].
+ * Returns zero if some bytes are missing, otherwise non-zero on success. The
+ * buffer is assumed not to contain any output data.
+ */
+static __maybe_unused int h2_peek_frame_hdr(const struct buffer *b, int o, struct h2_fh *h)
+{
+ uint64_t w;
+
+ if (b_data(b) < o + 9)
+ return 0;
+
+ w = h2_get_n64(b, o + 1);
+ h->len = *(uint8_t*)b_peek(b, o) << 16;
+ h->sid = w & 0x7FFFFFFF; /* RFC7540#4.1: R bit must be ignored */
+ h->ff = w >> 32;
+ h->ft = w >> 40;
+ h->len += w >> 48;
+ return 1;
+}
+
+/* skip the next 9 bytes corresponding to the frame header possibly parsed by
+ * h2_peek_frame_hdr() above.
+ */
+static inline __maybe_unused void h2_skip_frame_hdr(struct buffer *b)
+{
+ b_del(b, 9);
+}
+
+/* same as above, automatically advances the buffer on success */
+static inline __maybe_unused int h2_get_frame_hdr(struct buffer *b, struct h2_fh *h)
+{
+ int ret;
+
+ ret = h2_peek_frame_hdr(b, 0, h);
+ if (ret > 0)
+ h2_skip_frame_hdr(b);
+ return ret;
+}
+
+
+/* try to fragment the headers frame present at the beginning of buffer <b>,
+ * enforcing a limit of <mfs> bytes per frame. Returns 0 on failure, 1 on
+ * success. Typical causes of failure include a buffer not large enough to
+ * add extra frame headers. The existing frame size is read in the current
+ * frame. Its EH flag will be cleared if CONTINUATION frames need to be added,
+ * and its length will be adjusted. The stream ID for continuation frames will
+ * be copied from the initial frame's.
+ */
+static int h2_fragment_headers(struct buffer *b, uint32_t mfs)
+{
+ size_t remain = b->data - 9;
+ int extra_frames = (remain - 1) / mfs;
+ size_t fsize;
+ char *fptr;
+ int frame;
+
+ if (b->data <= mfs + 9)
+ return 1;
+
+ /* Too large a frame, we need to fragment it using CONTINUATION
+ * frames. We start from the end and move tails as needed.
+ */
+ if (b->data + extra_frames * 9 > b->size)
+ return 0;
+
+ for (frame = extra_frames; frame; frame--) {
+ fsize = ((remain - 1) % mfs) + 1;
+ remain -= fsize;
+
+ /* move data */
+ fptr = b->area + 9 + remain + (frame - 1) * 9;
+ memmove(fptr + 9, b->area + 9 + remain, fsize);
+ b->data += 9;
+
+ /* write new frame header */
+ h2_set_frame_size(fptr, fsize);
+ fptr[3] = H2_FT_CONTINUATION;
+ fptr[4] = (frame == extra_frames) ? H2_F_HEADERS_END_HEADERS : 0;
+ write_n32(fptr + 5, read_n32(b->area + 5));
+ }
+
+ b->area[4] &= ~H2_F_HEADERS_END_HEADERS;
+ h2_set_frame_size(b->area, remain);
+ return 1;
+}
+
+
+/* marks stream <h2s> as CLOSED and decrement the number of active streams for
+ * its connection if the stream was not yet closed. Please use this exclusively
+ * before closing a stream to ensure stream count is well maintained. Note that
+ * it does explicitly support being called with a partially initialized h2s
+ * (e.g. sd==NULL).
+ */
+static inline void h2s_close(struct h2s *h2s)
+{
+ if (h2s->st != H2_SS_CLOSED) {
+ TRACE_ENTER(H2_EV_H2S_END, h2s->h2c->conn, h2s);
+ h2s->h2c->nb_streams--;
+ if (!h2s->id)
+ h2s->h2c->nb_reserved--;
+ if (h2s->sd && h2s_sc(h2s)) {
+ if (!se_fl_test(h2s->sd, SE_FL_EOS) && !b_data(&h2s->rxbuf))
+ h2s_notify_recv(h2s);
+ }
+ HA_ATOMIC_DEC(&h2s->h2c->px_counters->open_streams);
+
+ TRACE_LEAVE(H2_EV_H2S_END, h2s->h2c->conn, h2s);
+ }
+ h2s->st = H2_SS_CLOSED;
+}
+
+/* Check h2c and h2s flags to evaluate if EOI/EOS/ERR_PENDING/ERROR flags must
+ * be set on the SE.
+ */
+static inline void h2s_propagate_term_flags(struct h2c *h2c, struct h2s *h2s)
+{
+ if (h2s->flags & H2_SF_ES_RCVD) {
+ se_fl_set(h2s->sd, SE_FL_EOI);
+ /* Add EOS flag for tunnel */
+ if (h2s->flags & H2_SF_BODY_TUNNEL)
+ se_fl_set(h2s->sd, SE_FL_EOS);
+ }
+ if (h2c_read0_pending(h2c) || h2s->st == H2_SS_CLOSED) {
+ se_fl_set(h2s->sd, SE_FL_EOS);
+ if (!se_fl_test(h2s->sd, SE_FL_EOI))
+ se_fl_set(h2s->sd, SE_FL_ERROR);
+ }
+ if (se_fl_test(h2s->sd, SE_FL_ERR_PENDING))
+ se_fl_set(h2s->sd, SE_FL_ERROR);
+}
+
+/* detaches an H2 stream from its H2C and releases it to the H2S pool. */
+/* h2s_destroy should only ever be called by the thread that owns the stream,
+ * that means that a tasklet should be used if we want to destroy the h2s
+ * from another thread
+ */
+static void h2s_destroy(struct h2s *h2s)
+{
+ struct connection *conn = h2s->h2c->conn;
+
+ TRACE_ENTER(H2_EV_H2S_END, conn, h2s);
+
+ h2s_close(h2s);
+ eb32_delete(&h2s->by_id);
+ if (b_size(&h2s->rxbuf)) {
+ b_free(&h2s->rxbuf);
+ offer_buffers(NULL, 1);
+ }
+
+ if (h2s->subs)
+ h2s->subs->events = 0;
+
+ /* There's no need to explicitly call unsubscribe here, the only
+ * reference left would be in the h2c send_list/fctl_list, and if
+ * we're in it, we're getting out anyway
+ */
+ h2_remove_from_list(h2s);
+
+ /* ditto, calling tasklet_free() here should be ok */
+ tasklet_free(h2s->shut_tl);
+ BUG_ON(h2s->sd && !se_fl_test(h2s->sd, SE_FL_ORPHAN));
+ sedesc_free(h2s->sd);
+ pool_free(pool_head_h2s, h2s);
+
+ TRACE_LEAVE(H2_EV_H2S_END, conn);
+}
+
+/* allocates a new stream <id> for connection <h2c> and adds it into h2c's
+ * stream tree. In case of error, nothing is added and NULL is returned. The
+ * causes of errors can be any failed memory allocation. The caller is
+ * responsible for checking if the connection may support an extra stream
+ * prior to calling this function.
+ */
+static struct h2s *h2s_new(struct h2c *h2c, int id)
+{
+ struct h2s *h2s;
+
+ TRACE_ENTER(H2_EV_H2S_NEW, h2c->conn);
+
+ h2s = pool_alloc(pool_head_h2s);
+ if (!h2s)
+ goto out;
+
+ h2s->shut_tl = tasklet_new();
+ if (!h2s->shut_tl) {
+ pool_free(pool_head_h2s, h2s);
+ goto out;
+ }
+ h2s->subs = NULL;
+ h2s->shut_tl->process = h2_deferred_shut;
+ h2s->shut_tl->context = h2s;
+ LIST_INIT(&h2s->list);
+ h2s->h2c = h2c;
+ h2s->sd = NULL;
+ h2s->sws = 0;
+ h2s->flags = H2_SF_NONE;
+ h2s->errcode = H2_ERR_NO_ERROR;
+ h2s->st = H2_SS_IDLE;
+ h2s->status = 0;
+ h2s->body_len = 0;
+ h2s->rxbuf = BUF_NULL;
+ memset(h2s->upgrade_protocol, 0, sizeof(h2s->upgrade_protocol));
+
+ h2s->by_id.key = h2s->id = id;
+ if (id > 0)
+ h2c->max_id = id;
+ else
+ h2c->nb_reserved++;
+
+ eb32_insert(&h2c->streams_by_id, &h2s->by_id);
+ h2c->nb_streams++;
+
+ HA_ATOMIC_INC(&h2c->px_counters->open_streams);
+ HA_ATOMIC_INC(&h2c->px_counters->total_streams);
+
+ TRACE_LEAVE(H2_EV_H2S_NEW, h2c->conn, h2s);
+ return h2s;
+ out:
+ TRACE_DEVEL("leaving in error", H2_EV_H2S_ERR|H2_EV_H2S_END, h2c->conn);
+ return NULL;
+}
+
+/* creates a new stream <id> on the h2c connection and returns it, or NULL in
+ * case of memory allocation error. <input> is used as input buffer for the new
+ * stream. On success, it is transferred to the stream and the mux is no longer
+ * responsible of it. On error, <input> is unchanged, thus the mux must still
+ * take care of it.
+ */
+static struct h2s *h2c_frt_stream_new(struct h2c *h2c, int id, struct buffer *input, uint32_t flags)
+{
+ struct session *sess = h2c->conn->owner;
+ struct h2s *h2s;
+
+ TRACE_ENTER(H2_EV_H2S_NEW, h2c->conn);
+
+ /* Cannot handle stream if active reversed connection is not yet accepted. */
+ BUG_ON(conn_reverse_in_preconnect(h2c->conn));
+
+ if (h2c->nb_streams >= h2c_max_concurrent_streams(h2c)) {
+ TRACE_ERROR("HEADERS frame causing MAX_CONCURRENT_STREAMS to be exceeded", H2_EV_H2S_NEW|H2_EV_RX_FRAME|H2_EV_RX_HDR, h2c->conn);
+ session_inc_http_req_ctr(sess);
+ session_inc_http_err_ctr(sess);
+ goto out;
+ }
+
+ h2s = h2s_new(h2c, id);
+ if (!h2s)
+ goto out_alloc;
+
+ h2s->sd = sedesc_new();
+ if (!h2s->sd)
+ goto out_close;
+ h2s->sd->se = h2s;
+ h2s->sd->conn = h2c->conn;
+ se_fl_set(h2s->sd, SE_FL_T_MUX | SE_FL_ORPHAN | SE_FL_NOT_FIRST);
+
+ if (!(global.tune.no_zero_copy_fwd & NO_ZERO_COPY_FWD_H2_SND))
+ se_fl_set(h2s->sd, SE_FL_MAY_FASTFWD_CONS);
+
+ /* The request is not finished, don't expect data from the opposite side
+ * yet
+ */
+ if (!(h2c->dff & (H2_F_HEADERS_END_STREAM| H2_F_DATA_END_STREAM)) && !(flags & H2_SF_BODY_TUNNEL))
+ se_expect_no_data(h2s->sd);
+
+ /* FIXME wrong analogy between ext-connect and websocket, this need to
+ * be refine.
+ */
+ if (flags & H2_SF_EXT_CONNECT_RCVD)
+ se_fl_set(h2s->sd, SE_FL_WEBSOCKET);
+
+ /* The stream will record the request's accept date (which is either the
+ * end of the connection's or the date immediately after the previous
+ * request) and the idle time, which is the delay since the previous
+ * request. We can set the value now, it will be copied by stream_new().
+ */
+ sess->t_idle = ns_to_ms(now_ns - sess->accept_ts) - sess->t_handshake;
+
+ if (!sc_new_from_endp(h2s->sd, sess, input))
+ goto out_close;
+
+ h2c->nb_sc++;
+
+ /* We want the accept date presented to the next stream to be the one
+ * we have now, the handshake time to be null (since the next stream
+ * is not delayed by a handshake), and the idle time to count since
+ * right now.
+ */
+ sess->accept_date = date;
+ sess->accept_ts = now_ns;
+ sess->t_handshake = 0;
+ sess->t_idle = 0;
+
+ /* OK done, the stream lives its own life now */
+ if (h2_frt_has_too_many_sc(h2c))
+ h2c->flags |= H2_CF_DEM_TOOMANY;
+ TRACE_LEAVE(H2_EV_H2S_NEW, h2c->conn);
+ return h2s;
+
+ out_close:
+ h2s_destroy(h2s);
+ out_alloc:
+ TRACE_ERROR("Failed to allocate a new stream", H2_EV_H2S_NEW|H2_EV_RX_FRAME|H2_EV_RX_HDR, h2c->conn);
+ out:
+ sess_log(sess);
+ TRACE_LEAVE(H2_EV_H2S_NEW|H2_EV_H2S_ERR|H2_EV_H2S_END, h2c->conn);
+ return NULL;
+}
+
+/* allocates a new stream associated to stream connector <sc> on the h2c
+ * connection and returns it, or NULL in case of memory allocation error or if
+ * the highest possible stream ID was reached.
+ */
+static struct h2s *h2c_bck_stream_new(struct h2c *h2c, struct stconn *sc, struct session *sess)
+{
+ struct h2s *h2s = NULL;
+
+ TRACE_ENTER(H2_EV_H2S_NEW, h2c->conn);
+
+ /* Cannot handle stream if connection waiting to be reversed. */
+ BUG_ON(conn_reverse_in_preconnect(h2c->conn));
+
+ if (h2c->nb_streams >= h2c->streams_limit) {
+ TRACE_ERROR("Aborting stream since negotiated limit is too low", H2_EV_H2S_NEW, h2c->conn);
+ goto out;
+ }
+
+ if (h2_streams_left(h2c) < 1) {
+ TRACE_ERROR("Aborting stream since no more streams left", H2_EV_H2S_NEW, h2c->conn);
+ goto out;
+ }
+
+ /* Defer choosing the ID until we send the first message to create the stream */
+ h2s = h2s_new(h2c, 0);
+ if (!h2s) {
+ TRACE_ERROR("Failed to allocate a new stream", H2_EV_H2S_NEW, h2c->conn);
+ goto out;
+ }
+
+ if (sc_attach_mux(sc, h2s, h2c->conn) < 0) {
+ TRACE_ERROR("Failed to allocate a new stream", H2_EV_H2S_NEW, h2c->conn);
+ h2s_destroy(h2s);
+ h2s = NULL;
+ goto out;
+ }
+ h2s->sd = sc->sedesc;
+ h2s->sess = sess;
+ h2c->nb_sc++;
+
+ if (!(global.tune.no_zero_copy_fwd & NO_ZERO_COPY_FWD_H2_SND))
+ se_fl_set(h2s->sd, SE_FL_MAY_FASTFWD_CONS);
+ /* on the backend we can afford to only count total streams upon success */
+ h2c->stream_cnt++;
+
+ out:
+ if (likely(h2s))
+ TRACE_LEAVE(H2_EV_H2S_NEW, h2c->conn, h2s);
+ else
+ TRACE_LEAVE(H2_EV_H2S_NEW|H2_EV_H2S_ERR|H2_EV_H2S_END, h2c->conn, h2s);
+ return h2s;
+}
+
+/* try to send a settings frame on the connection. Returns > 0 on success, 0 if
+ * it couldn't do anything. It may return an error in h2c. See RFC7540#11.3 for
+ * the various settings codes.
+ */
+static int h2c_send_settings(struct h2c *h2c)
+{
+ struct buffer *res;
+ char buf_data[100]; // enough for 15 settings
+ struct buffer buf;
+ int iws;
+ int mfs;
+ int mcs;
+ int ret = 0;
+
+ TRACE_ENTER(H2_EV_TX_FRAME|H2_EV_TX_SETTINGS, h2c->conn);
+
+ chunk_init(&buf, buf_data, sizeof(buf_data));
+ chunk_memcpy(&buf,
+ "\x00\x00\x00" /* length : 0 for now */
+ "\x04\x00" /* type : 4 (settings), flags : 0 */
+ "\x00\x00\x00\x00", /* stream ID : 0 */
+ 9);
+
+ if (h2c->flags & H2_CF_IS_BACK) {
+ /* send settings_enable_push=0 */
+ chunk_memcat(&buf, "\x00\x02\x00\x00\x00\x00", 6);
+ }
+
+ /* rfc 8441 #3 SETTINGS_ENABLE_CONNECT_PROTOCOL=1,
+ * sent automatically unless disabled in the global config */
+ if (!(global.tune.options & GTUNE_DISABLE_H2_WEBSOCKET))
+ chunk_memcat(&buf, "\x00\x08\x00\x00\x00\x01", 6);
+
+ if (h2_settings_header_table_size != 4096) {
+ char str[6] = "\x00\x01"; /* header_table_size */
+
+ write_n32(str + 2, h2_settings_header_table_size);
+ chunk_memcat(&buf, str, 6);
+ }
+
+ iws = (h2c->flags & H2_CF_IS_BACK) ?
+ h2_be_settings_initial_window_size:
+ h2_fe_settings_initial_window_size;
+ iws = iws ? iws : h2_settings_initial_window_size;
+
+ if (iws != 65535) {
+ char str[6] = "\x00\x04"; /* initial_window_size */
+
+ write_n32(str + 2, iws);
+ chunk_memcat(&buf, str, 6);
+ }
+
+ mcs = h2c_max_concurrent_streams(h2c);
+ if (mcs != 0) {
+ char str[6] = "\x00\x03"; /* max_concurrent_streams */
+
+ /* Note: 0 means "unlimited" for haproxy's config but not for
+ * the protocol, so never send this value!
+ */
+ write_n32(str + 2, mcs);
+ chunk_memcat(&buf, str, 6);
+ }
+
+ mfs = h2_settings_max_frame_size;
+ if (mfs > global.tune.bufsize)
+ mfs = global.tune.bufsize;
+
+ if (!mfs)
+ mfs = global.tune.bufsize;
+
+ if (mfs != 16384) {
+ char str[6] = "\x00\x05"; /* max_frame_size */
+
+ /* note: similarly we could also emit MAX_HEADER_LIST_SIZE to
+ * match bufsize - rewrite size, but at the moment it seems
+ * that clients don't take care of it.
+ */
+ write_n32(str + 2, mfs);
+ chunk_memcat(&buf, str, 6);
+ }
+
+ h2_set_frame_size(buf.area, buf.data - 9);
+
+ res = br_tail(h2c->mbuf);
+ retry:
+ if (!h2_get_buf(h2c, res)) {
+ h2c->flags |= H2_CF_MUX_MALLOC;
+ h2c->flags |= H2_CF_DEM_MROOM;
+ goto out;
+ }
+
+ ret = b_istput(res, ist2(buf.area, buf.data));
+ if (unlikely(ret <= 0)) {
+ if (!ret) {
+ if ((res = br_tail_add(h2c->mbuf)) != NULL)
+ goto retry;
+ h2c->flags |= H2_CF_MUX_MFULL;
+ h2c->flags |= H2_CF_DEM_MROOM;
+ }
+ else {
+ h2c_error(h2c, H2_ERR_INTERNAL_ERROR);
+ ret = 0;
+ }
+ }
+ out:
+ TRACE_LEAVE(H2_EV_TX_FRAME|H2_EV_TX_SETTINGS, h2c->conn);
+ return ret;
+}
+
+/* Try to receive a connection preface, then upon success try to send our
+ * preface which is a SETTINGS frame. Returns > 0 on success or zero on
+ * missing data. It may return an error in h2c.
+ */
+static int h2c_frt_recv_preface(struct h2c *h2c)
+{
+ int ret1;
+ int ret2;
+
+ TRACE_ENTER(H2_EV_RX_FRAME|H2_EV_RX_PREFACE, h2c->conn);
+
+ ret1 = b_isteq(&h2c->dbuf, 0, b_data(&h2c->dbuf), ist(H2_CONN_PREFACE));
+
+ if (unlikely(ret1 <= 0)) {
+ if (!ret1)
+ h2c->flags |= H2_CF_DEM_SHORT_READ;
+ if (ret1 < 0 || (h2c->flags & H2_CF_RCVD_SHUT)) {
+ TRACE_ERROR("I/O error or short read", H2_EV_RX_FRAME|H2_EV_RX_PREFACE, h2c->conn);
+ h2c_error(h2c, H2_ERR_PROTOCOL_ERROR);
+ if (b_data(&h2c->dbuf) ||
+ !(((const struct session *)h2c->conn->owner)->fe->options & PR_O_IGNORE_PRB))
+ HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
+ }
+ ret2 = 0;
+ goto out;
+ }
+
+ ret2 = h2c_send_settings(h2c);
+ if (ret2 > 0)
+ b_del(&h2c->dbuf, ret1);
+ out:
+ TRACE_LEAVE(H2_EV_RX_FRAME|H2_EV_RX_PREFACE, h2c->conn);
+ return ret2;
+}
+
+/* Try to send a connection preface, then upon success try to send our
+ * preface which is a SETTINGS frame. Returns > 0 on success or zero on
+ * missing data. It may return an error in h2c.
+ */
+static int h2c_bck_send_preface(struct h2c *h2c)
+{
+ struct buffer *res;
+ int ret = 0;
+
+ TRACE_ENTER(H2_EV_TX_FRAME|H2_EV_TX_PREFACE, h2c->conn);
+
+ res = br_tail(h2c->mbuf);
+ retry:
+ if (!h2_get_buf(h2c, res)) {
+ h2c->flags |= H2_CF_MUX_MALLOC;
+ h2c->flags |= H2_CF_DEM_MROOM;
+ goto out;
+ }
+
+ if (!b_data(res)) {
+ /* preface not yet sent */
+ ret = b_istput(res, ist(H2_CONN_PREFACE));
+ if (unlikely(ret <= 0)) {
+ if (!ret) {
+ if ((res = br_tail_add(h2c->mbuf)) != NULL)
+ goto retry;
+ h2c->flags |= H2_CF_MUX_MFULL;
+ h2c->flags |= H2_CF_DEM_MROOM;
+ goto out;
+ }
+ else {
+ h2c_error(h2c, H2_ERR_INTERNAL_ERROR);
+ ret = 0;
+ goto out;
+ }
+ }
+ }
+ ret = h2c_send_settings(h2c);
+ out:
+ TRACE_LEAVE(H2_EV_TX_FRAME|H2_EV_TX_PREFACE, h2c->conn);
+ return ret;
+}
+
+/* try to send a GOAWAY frame on the connection to report an error or a graceful
+ * shutdown, with h2c->errcode as the error code. Returns > 0 on success or zero
+ * if nothing was done. It uses h2c->last_sid as the advertised ID, or copies it
+ * from h2c->max_id if it's not set yet (<0). In case of lack of room to write
+ * the message, it subscribes the requester (either <h2s> or <h2c>) to future
+ * notifications. It sets H2_CF_GOAWAY_SENT on success, and H2_CF_GOAWAY_FAILED
+ * on unrecoverable failure. It will not attempt to send one again in this last
+ * case, nor will it send one if settings were not sent (e.g. still waiting for
+ * a preface) so that it is safe to use h2c_error() to report such errors.
+ */
+static int h2c_send_goaway_error(struct h2c *h2c, struct h2s *h2s)
+{
+ struct buffer *res;
+ char str[17];
+ int ret = 0;
+
+ TRACE_ENTER(H2_EV_TX_FRAME|H2_EV_TX_GOAWAY, h2c->conn);
+
+ if ((h2c->flags & H2_CF_GOAWAY_FAILED) || h2c->st0 < H2_CS_SETTINGS1) {
+ ret = 1; // claim that it worked
+ goto out;
+ }
+
+ /* len: 8, type: 7, flags: none, sid: 0 */
+ memcpy(str, "\x00\x00\x08\x07\x00\x00\x00\x00\x00", 9);
+
+ if (h2c->last_sid < 0)
+ h2c->last_sid = h2c->max_id;
+
+ write_n32(str + 9, h2c->last_sid);
+ write_n32(str + 13, h2c->errcode);
+
+ res = br_tail(h2c->mbuf);
+ retry:
+ if (!h2_get_buf(h2c, res)) {
+ h2c->flags |= H2_CF_MUX_MALLOC;
+ if (h2s)
+ h2s->flags |= H2_SF_BLK_MROOM;
+ else
+ h2c->flags |= H2_CF_DEM_MROOM;
+ goto out;
+ }
+
+ ret = b_istput(res, ist2(str, 17));
+ if (unlikely(ret <= 0)) {
+ if (!ret) {
+ if ((res = br_tail_add(h2c->mbuf)) != NULL)
+ goto retry;
+ h2c->flags |= H2_CF_MUX_MFULL;
+ if (h2s)
+ h2s->flags |= H2_SF_BLK_MROOM;
+ else
+ h2c->flags |= H2_CF_DEM_MROOM;
+ goto out;
+ }
+ else {
+ /* we cannot report this error using GOAWAY, so we mark
+ * it and claim a success.
+ */
+ h2c_error(h2c, H2_ERR_INTERNAL_ERROR);
+ h2c->flags |= H2_CF_GOAWAY_FAILED;
+ ret = 1;
+ goto out;
+ }
+ }
+ h2c->flags |= H2_CF_GOAWAY_SENT;
+
+ /* some codes are not for real errors, just attempts to close cleanly */
+ switch (h2c->errcode) {
+ case H2_ERR_NO_ERROR:
+ case H2_ERR_ENHANCE_YOUR_CALM:
+ case H2_ERR_REFUSED_STREAM:
+ case H2_ERR_CANCEL:
+ break;
+ default:
+ HA_ATOMIC_INC(&h2c->px_counters->goaway_resp);
+ }
+ out:
+ TRACE_LEAVE(H2_EV_TX_FRAME|H2_EV_TX_GOAWAY, h2c->conn);
+ return ret;
+}
+
+/* Try to send an RST_STREAM frame on the connection for the indicated stream
+ * during mux operations. This stream must be valid and cannot be closed
+ * already. h2s->id will be used for the stream ID and h2s->errcode will be
+ * used for the error code. h2s->st will be update to H2_SS_CLOSED if it was
+ * not yet.
+ *
+ * Returns > 0 on success or zero if nothing was done. In case of lack of room
+ * to write the message, it subscribes the stream to future notifications.
+ */
+static int h2s_send_rst_stream(struct h2c *h2c, struct h2s *h2s)
+{
+ struct buffer *res;
+ char str[13];
+ int ret = 0;
+
+ TRACE_ENTER(H2_EV_TX_FRAME|H2_EV_TX_RST, h2c->conn, h2s);
+
+ if (!h2s || h2s->st == H2_SS_CLOSED) {
+ ret = 1;
+ goto out;
+ }
+
+ /* RFC7540#5.4.2: To avoid looping, an endpoint MUST NOT send a
+ * RST_STREAM in response to a RST_STREAM frame.
+ */
+ if (h2c->dsi == h2s->id && h2c->dft == H2_FT_RST_STREAM) {
+ ret = 1;
+ goto ignore;
+ }
+
+ /* len: 4, type: 3, flags: none */
+ memcpy(str, "\x00\x00\x04\x03\x00", 5);
+ write_n32(str + 5, h2s->id);
+ write_n32(str + 9, h2s->errcode);
+
+ res = br_tail(h2c->mbuf);
+ retry:
+ if (!h2_get_buf(h2c, res)) {
+ h2c->flags |= H2_CF_MUX_MALLOC;
+ h2s->flags |= H2_SF_BLK_MROOM;
+ goto out;
+ }
+
+ ret = b_istput(res, ist2(str, 13));
+ if (unlikely(ret <= 0)) {
+ if (!ret) {
+ if ((res = br_tail_add(h2c->mbuf)) != NULL)
+ goto retry;
+ h2c->flags |= H2_CF_MUX_MFULL;
+ h2s->flags |= H2_SF_BLK_MROOM;
+ goto out;
+ }
+ else {
+ h2c_error(h2c, H2_ERR_INTERNAL_ERROR);
+ ret = 0;
+ goto out;
+ }
+ }
+
+ ignore:
+ h2s->flags |= H2_SF_RST_SENT;
+ h2s_close(h2s);
+ out:
+ TRACE_LEAVE(H2_EV_TX_FRAME|H2_EV_TX_RST, h2c->conn, h2s);
+ return ret;
+}
+
+/* Try to send an RST_STREAM frame on the connection for the stream being
+ * demuxed using h2c->dsi for the stream ID. It will use h2s->errcode as the
+ * error code, even if the stream is one of the dummy ones, and will update
+ * h2s->st to H2_SS_CLOSED if it was not yet.
+ *
+ * Returns > 0 on success or zero if nothing was done. In case of lack of room
+ * to write the message, it blocks the demuxer and subscribes it to future
+ * notifications. It's worth mentioning that an RST may even be sent for a
+ * closed stream.
+ */
+static int h2c_send_rst_stream(struct h2c *h2c, struct h2s *h2s)
+{
+ struct buffer *res;
+ char str[13];
+ int ret = 0;
+
+ TRACE_ENTER(H2_EV_TX_FRAME|H2_EV_TX_RST, h2c->conn, h2s);
+
+ /* RFC7540#5.4.2: To avoid looping, an endpoint MUST NOT send a
+ * RST_STREAM in response to a RST_STREAM frame.
+ */
+ if (h2c->dft == H2_FT_RST_STREAM) {
+ ret = 1;
+ goto ignore;
+ }
+
+ /* len: 4, type: 3, flags: none */
+ memcpy(str, "\x00\x00\x04\x03\x00", 5);
+
+ write_n32(str + 5, h2c->dsi);
+ write_n32(str + 9, h2s->errcode);
+
+ res = br_tail(h2c->mbuf);
+ retry:
+ if (!h2_get_buf(h2c, res)) {
+ h2c->flags |= H2_CF_MUX_MALLOC;
+ h2c->flags |= H2_CF_DEM_MROOM;
+ goto out;
+ }
+
+ ret = b_istput(res, ist2(str, 13));
+ if (unlikely(ret <= 0)) {
+ if (!ret) {
+ if ((res = br_tail_add(h2c->mbuf)) != NULL)
+ goto retry;
+ h2c->flags |= H2_CF_MUX_MFULL;
+ h2c->flags |= H2_CF_DEM_MROOM;
+ goto out;
+ }
+ else {
+ h2c_error(h2c, H2_ERR_INTERNAL_ERROR);
+ ret = 0;
+ goto out;
+ }
+ }
+
+ ignore:
+ if (h2s->id) {
+ h2s->flags |= H2_SF_RST_SENT;
+ h2s_close(h2s);
+ }
+
+ out:
+ HA_ATOMIC_INC(&h2c->px_counters->rst_stream_resp);
+ TRACE_LEAVE(H2_EV_TX_FRAME|H2_EV_TX_RST, h2c->conn, h2s);
+ return ret;
+}
+
+/* try to send an empty DATA frame with the ES flag set to notify about the
+ * end of stream and match a shutdown(write). If an ES was already sent as
+ * indicated by HLOC/ERROR/RESET/CLOSED states, nothing is done. Returns > 0
+ * on success or zero if nothing was done. In case of lack of room to write the
+ * message, it subscribes the requesting stream to future notifications.
+ */
+static int h2_send_empty_data_es(struct h2s *h2s)
+{
+ struct h2c *h2c = h2s->h2c;
+ struct buffer *res;
+ char str[9];
+ int ret = 0;
+
+ TRACE_ENTER(H2_EV_TX_FRAME|H2_EV_TX_DATA|H2_EV_TX_EOI, h2c->conn, h2s);
+
+ if (h2s->st == H2_SS_HLOC || h2s->st == H2_SS_ERROR || h2s->st == H2_SS_CLOSED) {
+ ret = 1;
+ goto out;
+ }
+
+ /* len: 0x000000, type: 0(DATA), flags: ES=1 */
+ memcpy(str, "\x00\x00\x00\x00\x01", 5);
+ write_n32(str + 5, h2s->id);
+
+ res = br_tail(h2c->mbuf);
+ retry:
+ if (!h2_get_buf(h2c, res)) {
+ h2c->flags |= H2_CF_MUX_MALLOC;
+ h2s->flags |= H2_SF_BLK_MROOM;
+ goto out;
+ }
+
+ ret = b_istput(res, ist2(str, 9));
+ if (likely(ret > 0)) {
+ h2s->flags |= H2_SF_ES_SENT;
+ }
+ else if (!ret) {
+ if ((res = br_tail_add(h2c->mbuf)) != NULL)
+ goto retry;
+ h2c->flags |= H2_CF_MUX_MFULL;
+ h2s->flags |= H2_SF_BLK_MROOM;
+ }
+ else {
+ h2c_error(h2c, H2_ERR_INTERNAL_ERROR);
+ ret = 0;
+ }
+ out:
+ TRACE_LEAVE(H2_EV_TX_FRAME|H2_EV_TX_DATA|H2_EV_TX_EOI, h2c->conn, h2s);
+ return ret;
+}
+
+/* wake a specific stream and assign its stream connector some SE_FL_* flags
+ * among SE_FL_ERR_PENDING and SE_FL_ERROR if needed. The stream's state
+ * is automatically updated accordingly. If the stream is orphaned, it is
+ * destroyed.
+ */
+static void h2s_wake_one_stream(struct h2s *h2s)
+{
+ struct h2c *h2c = h2s->h2c;
+
+ TRACE_ENTER(H2_EV_H2S_WAKE, h2c->conn, h2s);
+
+ if (!h2s_sc(h2s)) {
+ /* this stream was already orphaned */
+ h2s_destroy(h2s);
+ TRACE_DEVEL("leaving with no h2s", H2_EV_H2S_WAKE, h2c->conn);
+ return;
+ }
+
+ if (h2c_read0_pending(h2s->h2c)) {
+ if (h2s->st == H2_SS_OPEN)
+ h2s->st = H2_SS_HREM;
+ else if (h2s->st == H2_SS_HLOC)
+ h2s_close(h2s);
+ }
+
+ if ((h2s->st != H2_SS_CLOSED) &&
+ (h2s->h2c->st0 >= H2_CS_ERROR || (h2s->h2c->flags & H2_CF_ERROR) ||
+ (h2s->h2c->last_sid > 0 && (!h2s->id || h2s->id > h2s->h2c->last_sid)))) {
+ se_fl_set_error(h2s->sd);
+
+ if (h2s->st < H2_SS_ERROR)
+ h2s->st = H2_SS_ERROR;
+ }
+
+ h2s_alert(h2s);
+ TRACE_LEAVE(H2_EV_H2S_WAKE, h2c->conn);
+}
+
+/* wake the streams attached to the connection, whose id is greater than <last>
+ * or unassigned.
+ */
+static void h2_wake_some_streams(struct h2c *h2c, int last)
+{
+ struct eb32_node *node;
+ struct h2s *h2s;
+
+ TRACE_ENTER(H2_EV_H2S_WAKE, h2c->conn);
+
+ /* Wake all streams with ID > last */
+ node = eb32_lookup_ge(&h2c->streams_by_id, last + 1);
+ while (node) {
+ h2s = container_of(node, struct h2s, by_id);
+ node = eb32_next(node);
+ h2s_wake_one_stream(h2s);
+ }
+
+ /* Wake all streams with unassigned ID (ID == 0) */
+ node = eb32_lookup(&h2c->streams_by_id, 0);
+ while (node) {
+ h2s = container_of(node, struct h2s, by_id);
+ if (h2s->id > 0)
+ break;
+ node = eb32_next(node);
+ h2s_wake_one_stream(h2s);
+ }
+
+ TRACE_LEAVE(H2_EV_H2S_WAKE, h2c->conn);
+}
+
+/* Wake up all blocked streams whose window size has become positive after the
+ * mux's initial window was adjusted. This should be done after having processed
+ * SETTINGS frames which have updated the mux's initial window size.
+ */
+static void h2c_unblock_sfctl(struct h2c *h2c)
+{
+ struct h2s *h2s;
+ struct eb32_node *node;
+
+ TRACE_ENTER(H2_EV_H2C_WAKE, h2c->conn);
+
+ node = eb32_first(&h2c->streams_by_id);
+ while (node) {
+ h2s = container_of(node, struct h2s, by_id);
+ if (h2s->flags & H2_SF_BLK_SFCTL && h2s_mws(h2s) > 0) {
+ h2s->flags &= ~H2_SF_BLK_SFCTL;
+ LIST_DEL_INIT(&h2s->list);
+ if ((h2s->subs && h2s->subs->events & SUB_RETRY_SEND) ||
+ h2s->flags & (H2_SF_WANT_SHUTR|H2_SF_WANT_SHUTW))
+ LIST_APPEND(&h2c->send_list, &h2s->list);
+ }
+ node = eb32_next(node);
+ }
+
+ TRACE_LEAVE(H2_EV_H2C_WAKE, h2c->conn);
+}
+
+/* processes a SETTINGS frame whose payload is <payload> for <plen> bytes, and
+ * ACKs it if needed. Returns > 0 on success or zero on missing data. It may
+ * return an error in h2c. The caller must have already verified frame length
+ * and stream ID validity. Described in RFC7540#6.5.
+ */
+static int h2c_handle_settings(struct h2c *h2c)
+{
+ unsigned int offset;
+ int error;
+
+ TRACE_ENTER(H2_EV_RX_FRAME|H2_EV_RX_SETTINGS, h2c->conn);
+
+ if (h2c->dff & H2_F_SETTINGS_ACK) {
+ if (h2c->dfl) {
+ error = H2_ERR_FRAME_SIZE_ERROR;
+ goto fail;
+ }
+ goto done;
+ }
+
+ /* process full frame only */
+ if (b_data(&h2c->dbuf) < h2c->dfl) {
+ h2c->flags |= H2_CF_DEM_SHORT_READ;
+ goto out0;
+ }
+
+ /* parse the frame */
+ for (offset = 0; offset < h2c->dfl; offset += 6) {
+ uint16_t type = h2_get_n16(&h2c->dbuf, offset);
+ int32_t arg = h2_get_n32(&h2c->dbuf, offset + 2);
+
+ switch (type) {
+ case H2_SETTINGS_INITIAL_WINDOW_SIZE:
+ /* we need to update all existing streams with the
+ * difference from the previous iws.
+ */
+ if (arg < 0) { // RFC7540#6.5.2
+ error = H2_ERR_FLOW_CONTROL_ERROR;
+ goto fail;
+ }
+ h2c->miw = arg;
+ break;
+ case H2_SETTINGS_MAX_FRAME_SIZE:
+ if (arg < 16384 || arg > 16777215) { // RFC7540#6.5.2
+ TRACE_ERROR("MAX_FRAME_SIZE out of range", H2_EV_RX_FRAME|H2_EV_RX_SETTINGS, h2c->conn);
+ error = H2_ERR_PROTOCOL_ERROR;
+ HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
+ goto fail;
+ }
+ h2c->mfs = arg;
+ break;
+ case H2_SETTINGS_HEADER_TABLE_SIZE:
+ h2c->flags |= H2_CF_SHTS_UPDATED;
+ break;
+ case H2_SETTINGS_ENABLE_PUSH:
+ if (arg < 0 || arg > 1) { // RFC7540#6.5.2
+ TRACE_ERROR("ENABLE_PUSH out of range", H2_EV_RX_FRAME|H2_EV_RX_SETTINGS, h2c->conn);
+ error = H2_ERR_PROTOCOL_ERROR;
+ HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
+ goto fail;
+ }
+ break;
+ case H2_SETTINGS_MAX_CONCURRENT_STREAMS:
+ if (h2c->flags & H2_CF_IS_BACK) {
+ /* the limit is only for the backend; for the frontend it is our limit */
+ if ((unsigned int)arg > h2c_max_concurrent_streams(h2c))
+ arg = h2c_max_concurrent_streams(h2c);
+ h2c->streams_limit = arg;
+ }
+ break;
+ case H2_SETTINGS_ENABLE_CONNECT_PROTOCOL:
+ if (arg == 1)
+ h2c->flags |= H2_CF_RCVD_RFC8441;
+ break;
+ }
+ }
+
+ /* need to ACK this frame now */
+ h2c->st0 = H2_CS_FRAME_A;
+ done:
+ TRACE_LEAVE(H2_EV_RX_FRAME|H2_EV_RX_SETTINGS, h2c->conn);
+ return 1;
+ fail:
+ if (!(h2c->flags & H2_CF_IS_BACK))
+ sess_log(h2c->conn->owner);
+ h2c_error(h2c, error);
+ out0:
+ TRACE_DEVEL("leaving with missing data or error", H2_EV_RX_FRAME|H2_EV_RX_SETTINGS, h2c->conn);
+ return 0;
+}
+
+/* try to send an ACK for a settings frame on the connection. Returns > 0 on
+ * success or one of the h2_status values.
+ */
+static int h2c_ack_settings(struct h2c *h2c)
+{
+ struct buffer *res;
+ char str[9];
+ int ret = 0;
+
+ TRACE_ENTER(H2_EV_TX_FRAME|H2_EV_TX_SETTINGS, h2c->conn);
+
+ memcpy(str,
+ "\x00\x00\x00" /* length : 0 (no data) */
+ "\x04" "\x01" /* type : 4, flags : ACK */
+ "\x00\x00\x00\x00" /* stream ID */, 9);
+
+ res = br_tail(h2c->mbuf);
+ retry:
+ if (!h2_get_buf(h2c, res)) {
+ h2c->flags |= H2_CF_MUX_MALLOC;
+ h2c->flags |= H2_CF_DEM_MROOM;
+ goto out;
+ }
+
+ ret = b_istput(res, ist2(str, 9));
+ if (unlikely(ret <= 0)) {
+ if (!ret) {
+ if ((res = br_tail_add(h2c->mbuf)) != NULL)
+ goto retry;
+ h2c->flags |= H2_CF_MUX_MFULL;
+ h2c->flags |= H2_CF_DEM_MROOM;
+ }
+ else {
+ h2c_error(h2c, H2_ERR_INTERNAL_ERROR);
+ ret = 0;
+ }
+ }
+ out:
+ TRACE_LEAVE(H2_EV_TX_FRAME|H2_EV_TX_SETTINGS, h2c->conn);
+ return ret;
+}
+
+/* processes a PING frame and schedules an ACK if needed. The caller must pass
+ * the pointer to the payload in <payload>. Returns > 0 on success or zero on
+ * missing data. The caller must have already verified frame length
+ * and stream ID validity.
+ */
+static int h2c_handle_ping(struct h2c *h2c)
+{
+ /* schedule a response */
+ if (!(h2c->dff & H2_F_PING_ACK))
+ h2c->st0 = H2_CS_FRAME_A;
+ return 1;
+}
+
+/* Try to send a window update for stream id <sid> and value <increment>.
+ * Returns > 0 on success or zero on missing room or failure. It may return an
+ * error in h2c.
+ */
+static int h2c_send_window_update(struct h2c *h2c, int sid, uint32_t increment)
+{
+ struct buffer *res;
+ char str[13];
+ int ret = 0;
+
+ TRACE_ENTER(H2_EV_TX_FRAME|H2_EV_TX_WU, h2c->conn);
+
+ /* length: 4, type: 8, flags: none */
+ memcpy(str, "\x00\x00\x04\x08\x00", 5);
+ write_n32(str + 5, sid);
+ write_n32(str + 9, increment);
+
+ res = br_tail(h2c->mbuf);
+ retry:
+ if (!h2_get_buf(h2c, res)) {
+ h2c->flags |= H2_CF_MUX_MALLOC;
+ h2c->flags |= H2_CF_DEM_MROOM;
+ goto out;
+ }
+
+ ret = b_istput(res, ist2(str, 13));
+ if (unlikely(ret <= 0)) {
+ if (!ret) {
+ if ((res = br_tail_add(h2c->mbuf)) != NULL)
+ goto retry;
+ h2c->flags |= H2_CF_MUX_MFULL;
+ h2c->flags |= H2_CF_DEM_MROOM;
+ }
+ else {
+ h2c_error(h2c, H2_ERR_INTERNAL_ERROR);
+ ret = 0;
+ }
+ }
+ out:
+ TRACE_LEAVE(H2_EV_TX_FRAME|H2_EV_TX_WU, h2c->conn);
+ return ret;
+}
+
+/* try to send pending window update for the connection. It's safe to call it
+ * with no pending updates. Returns > 0 on success or zero on missing room or
+ * failure. It may return an error in h2c.
+ */
+static int h2c_send_conn_wu(struct h2c *h2c)
+{
+ int ret = 1;
+
+ TRACE_ENTER(H2_EV_TX_FRAME|H2_EV_TX_WU, h2c->conn);
+
+ if (h2c->rcvd_c <= 0)
+ goto out;
+
+ if (!(h2c->flags & H2_CF_WINDOW_OPENED)) {
+ /* increase the advertised connection window to 2G on
+ * first update.
+ */
+ h2c->flags |= H2_CF_WINDOW_OPENED;
+ h2c->rcvd_c += H2_INITIAL_WINDOW_INCREMENT;
+ }
+
+ /* send WU for the connection */
+ ret = h2c_send_window_update(h2c, 0, h2c->rcvd_c);
+ if (ret > 0)
+ h2c->rcvd_c = 0;
+
+ out:
+ TRACE_LEAVE(H2_EV_TX_FRAME|H2_EV_TX_WU, h2c->conn);
+ return ret;
+}
+
+/* try to send pending window update for the current dmux stream. It's safe to
+ * call it with no pending updates. Returns > 0 on success or zero on missing
+ * room or failure. It may return an error in h2c.
+ */
+static int h2c_send_strm_wu(struct h2c *h2c)
+{
+ int ret = 1;
+
+ TRACE_ENTER(H2_EV_TX_FRAME|H2_EV_TX_WU, h2c->conn);
+
+ if (h2c->rcvd_s <= 0)
+ goto out;
+
+ /* send WU for the stream */
+ ret = h2c_send_window_update(h2c, h2c->dsi, h2c->rcvd_s);
+ if (ret > 0)
+ h2c->rcvd_s = 0;
+ out:
+ TRACE_LEAVE(H2_EV_TX_FRAME|H2_EV_TX_WU, h2c->conn);
+ return ret;
+}
+
+/* try to send an ACK for a ping frame on the connection. Returns > 0 on
+ * success, 0 on missing data or one of the h2_status values.
+ */
+static int h2c_ack_ping(struct h2c *h2c)
+{
+ struct buffer *res;
+ char str[17];
+ int ret = 0;
+
+ TRACE_ENTER(H2_EV_TX_FRAME|H2_EV_TX_PING, h2c->conn);
+
+ if (b_data(&h2c->dbuf) < 8)
+ goto out;
+
+ memcpy(str,
+ "\x00\x00\x08" /* length : 8 (same payload) */
+ "\x06" "\x01" /* type : 6, flags : ACK */
+ "\x00\x00\x00\x00" /* stream ID */, 9);
+
+ /* copy the original payload */
+ h2_get_buf_bytes(str + 9, 8, &h2c->dbuf, 0);
+
+ res = br_tail(h2c->mbuf);
+ retry:
+ if (!h2_get_buf(h2c, res)) {
+ h2c->flags |= H2_CF_MUX_MALLOC;
+ h2c->flags |= H2_CF_DEM_MROOM;
+ goto out;
+ }
+
+ ret = b_istput(res, ist2(str, 17));
+ if (unlikely(ret <= 0)) {
+ if (!ret) {
+ if ((res = br_tail_add(h2c->mbuf)) != NULL)
+ goto retry;
+ h2c->flags |= H2_CF_MUX_MFULL;
+ h2c->flags |= H2_CF_DEM_MROOM;
+ }
+ else {
+ h2c_error(h2c, H2_ERR_INTERNAL_ERROR);
+ ret = 0;
+ }
+ }
+ out:
+ TRACE_LEAVE(H2_EV_TX_FRAME|H2_EV_TX_PING, h2c->conn);
+ return ret;
+}
+
+/* processes a WINDOW_UPDATE frame whose payload is <payload> for <plen> bytes.
+ * Returns > 0 on success or zero on missing data. It may return an error in
+ * h2c or h2s. The caller must have already verified frame length and stream ID
+ * validity. Described in RFC7540#6.9.
+ */
+static int h2c_handle_window_update(struct h2c *h2c, struct h2s *h2s)
+{
+ int32_t inc;
+ int error;
+
+ TRACE_ENTER(H2_EV_RX_FRAME|H2_EV_RX_WU, h2c->conn);
+
+ /* process full frame only */
+ if (b_data(&h2c->dbuf) < h2c->dfl) {
+ h2c->flags |= H2_CF_DEM_SHORT_READ;
+ goto out0;
+ }
+
+ inc = h2_get_n32(&h2c->dbuf, 0);
+
+ if (h2c->dsi != 0) {
+ /* stream window update */
+
+ /* it's not an error to receive WU on a closed stream */
+ if (h2s->st == H2_SS_CLOSED)
+ goto done;
+
+ if (!inc) {
+ TRACE_ERROR("stream WINDOW_UPDATE inc=0", H2_EV_RX_FRAME|H2_EV_RX_WU, h2c->conn, h2s);
+ error = H2_ERR_PROTOCOL_ERROR;
+ HA_ATOMIC_INC(&h2c->px_counters->strm_proto_err);
+ goto strm_err;
+ }
+
+ if (h2s_mws(h2s) >= 0 && h2s_mws(h2s) + inc < 0) {
+ TRACE_ERROR("stream WINDOW_UPDATE inc<0", H2_EV_RX_FRAME|H2_EV_RX_WU, h2c->conn, h2s);
+ error = H2_ERR_FLOW_CONTROL_ERROR;
+ HA_ATOMIC_INC(&h2c->px_counters->strm_proto_err);
+ goto strm_err;
+ }
+
+ h2s->sws += inc;
+ if (h2s_mws(h2s) > 0 && (h2s->flags & H2_SF_BLK_SFCTL)) {
+ h2s->flags &= ~H2_SF_BLK_SFCTL;
+ LIST_DEL_INIT(&h2s->list);
+ if ((h2s->subs && h2s->subs->events & SUB_RETRY_SEND) ||
+ h2s->flags & (H2_SF_WANT_SHUTR|H2_SF_WANT_SHUTW))
+ LIST_APPEND(&h2c->send_list, &h2s->list);
+ }
+ }
+ else {
+ /* connection window update */
+ if (!inc) {
+ TRACE_ERROR("conn WINDOW_UPDATE inc=0", H2_EV_RX_FRAME|H2_EV_RX_WU, h2c->conn);
+ error = H2_ERR_PROTOCOL_ERROR;
+ HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
+ goto conn_err;
+ }
+
+ if (h2c->mws >= 0 && h2c->mws + inc < 0) {
+ TRACE_ERROR("conn WINDOW_UPDATE inc<0", H2_EV_RX_FRAME|H2_EV_RX_WU, h2c->conn);
+ error = H2_ERR_FLOW_CONTROL_ERROR;
+ goto conn_err;
+ }
+
+ h2c->mws += inc;
+ }
+
+ done:
+ TRACE_LEAVE(H2_EV_RX_FRAME|H2_EV_RX_WU, h2c->conn);
+ return 1;
+
+ conn_err:
+ h2c_error(h2c, error);
+ out0:
+ TRACE_DEVEL("leaving on missing data or error", H2_EV_RX_FRAME|H2_EV_RX_WU, h2c->conn);
+ return 0;
+
+ strm_err:
+ h2s_error(h2s, error);
+ h2c->st0 = H2_CS_FRAME_E;
+ TRACE_DEVEL("leaving on stream error", H2_EV_RX_FRAME|H2_EV_RX_WU, h2c->conn);
+ return 0;
+}
+
+/* processes a GOAWAY frame, and signals all streams whose ID is greater than
+ * the last ID. Returns > 0 on success or zero on missing data. The caller must
+ * have already verified frame length and stream ID validity. Described in
+ * RFC7540#6.8.
+ */
+static int h2c_handle_goaway(struct h2c *h2c)
+{
+ int last;
+
+ TRACE_ENTER(H2_EV_RX_FRAME|H2_EV_RX_GOAWAY, h2c->conn);
+ /* process full frame only */
+ if (b_data(&h2c->dbuf) < h2c->dfl) {
+ TRACE_DEVEL("leaving on missing data", H2_EV_RX_FRAME|H2_EV_RX_GOAWAY, h2c->conn);
+ h2c->flags |= H2_CF_DEM_SHORT_READ;
+ return 0;
+ }
+
+ last = h2_get_n32(&h2c->dbuf, 0);
+ h2c->errcode = h2_get_n32(&h2c->dbuf, 4);
+ if (h2c->last_sid < 0)
+ h2c->last_sid = last;
+ h2_wake_some_streams(h2c, last);
+ TRACE_LEAVE(H2_EV_RX_FRAME|H2_EV_RX_GOAWAY, h2c->conn);
+ return 1;
+}
+
+/* processes a PRIORITY frame, and either skips it or rejects if it is
+ * invalid. Returns > 0 on success or zero on missing data. It may return an
+ * error in h2c. The caller must have already verified frame length and stream
+ * ID validity. Described in RFC7540#6.3.
+ */
+static int h2c_handle_priority(struct h2c *h2c)
+{
+ TRACE_ENTER(H2_EV_RX_FRAME|H2_EV_RX_PRIO, h2c->conn);
+
+ /* process full frame only */
+ if (b_data(&h2c->dbuf) < h2c->dfl) {
+ TRACE_DEVEL("leaving on missing data", H2_EV_RX_FRAME|H2_EV_RX_PRIO, h2c->conn);
+ h2c->flags |= H2_CF_DEM_SHORT_READ;
+ return 0;
+ }
+
+ if (h2_get_n32(&h2c->dbuf, 0) == h2c->dsi) {
+ /* 7540#5.3 : can't depend on itself */
+ TRACE_ERROR("PRIORITY depends on itself", H2_EV_RX_FRAME|H2_EV_RX_WU, h2c->conn);
+ h2c_error(h2c, H2_ERR_PROTOCOL_ERROR);
+ HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
+ TRACE_DEVEL("leaving on error", H2_EV_RX_FRAME|H2_EV_RX_PRIO, h2c->conn);
+ return 0;
+ }
+ TRACE_LEAVE(H2_EV_RX_FRAME|H2_EV_RX_PRIO, h2c->conn);
+ return 1;
+}
+
+/* processes an RST_STREAM frame, and sets the 32-bit error code on the stream.
+ * Returns > 0 on success or zero on missing data. The caller must have already
+ * verified frame length and stream ID validity. Described in RFC7540#6.4.
+ */
+static int h2c_handle_rst_stream(struct h2c *h2c, struct h2s *h2s)
+{
+ TRACE_ENTER(H2_EV_RX_FRAME|H2_EV_RX_RST|H2_EV_RX_EOI, h2c->conn, h2s);
+
+ /* process full frame only */
+ if (b_data(&h2c->dbuf) < h2c->dfl) {
+ TRACE_DEVEL("leaving on missing data", H2_EV_RX_FRAME|H2_EV_RX_RST|H2_EV_RX_EOI, h2c->conn, h2s);
+ h2c->flags |= H2_CF_DEM_SHORT_READ;
+ return 0;
+ }
+
+ /* late RST, already handled */
+ if (h2s->st == H2_SS_CLOSED) {
+ TRACE_DEVEL("leaving on stream closed", H2_EV_RX_FRAME|H2_EV_RX_RST|H2_EV_RX_EOI, h2c->conn, h2s);
+ return 1;
+ }
+
+ h2s->errcode = h2_get_n32(&h2c->dbuf, 0);
+ h2s_close(h2s);
+
+ if (h2s_sc(h2s)) {
+ se_fl_set_error(h2s->sd);
+ h2s_alert(h2s);
+ }
+
+ h2s->flags |= H2_SF_RST_RCVD;
+ TRACE_LEAVE(H2_EV_RX_FRAME|H2_EV_RX_RST|H2_EV_RX_EOI, h2c->conn, h2s);
+ return 1;
+}
+
+/* processes a HEADERS frame. Returns h2s on success or NULL on missing data.
+ * It may return an error in h2c or h2s. The caller must consider that the
+ * return value is the new h2s in case one was allocated (most common case).
+ * Described in RFC7540#6.2. Most of the
+ * errors here are reported as connection errors since it's impossible to
+ * recover from such errors after the compression context has been altered.
+ */
+static struct h2s *h2c_frt_handle_headers(struct h2c *h2c, struct h2s *h2s)
+{
+ struct buffer rxbuf = BUF_NULL;
+ unsigned long long body_len = 0;
+ uint32_t flags = 0;
+ int error;
+
+ TRACE_ENTER(H2_EV_RX_FRAME|H2_EV_RX_HDR, h2c->conn, h2s);
+
+ if (!b_size(&h2c->dbuf)) {
+ h2c->flags |= H2_CF_DEM_SHORT_READ;
+ goto out; // empty buffer
+ }
+
+ if (b_data(&h2c->dbuf) < h2c->dfl && !b_full(&h2c->dbuf)) {
+ h2c->flags |= H2_CF_DEM_SHORT_READ;
+ goto out; // incomplete frame
+ }
+
+ /* now either the frame is complete or the buffer is complete */
+ if (h2s->st != H2_SS_IDLE) {
+ /* The stream exists/existed, this must be a trailers frame */
+ if (h2s->st != H2_SS_CLOSED) {
+ error = h2c_dec_hdrs(h2c, &h2s->rxbuf, &h2s->flags, &body_len, NULL);
+ /* unrecoverable error ? */
+ if (h2c->st0 >= H2_CS_ERROR) {
+ TRACE_USER("Unrecoverable error decoding H2 trailers", H2_EV_RX_FRAME|H2_EV_RX_HDR|H2_EV_STRM_NEW|H2_EV_STRM_END, h2c->conn, 0, &rxbuf);
+ sess_log(h2c->conn->owner);
+ goto out;
+ }
+
+ if (error == 0) {
+ /* Demux not blocked because of the stream, it is an incomplete frame */
+ if (!(h2c->flags &H2_CF_DEM_BLOCK_ANY))
+ h2c->flags |= H2_CF_DEM_SHORT_READ;
+ goto out; // missing data
+ }
+
+ if (error < 0) {
+ /* Failed to decode this frame (e.g. too large request)
+ * but the HPACK decompressor is still synchronized.
+ */
+ sess_log(h2c->conn->owner);
+ h2s_error(h2s, H2_ERR_INTERNAL_ERROR);
+ TRACE_USER("Stream error decoding H2 trailers", H2_EV_RX_FRAME|H2_EV_RX_HDR|H2_EV_STRM_NEW|H2_EV_STRM_END, h2c->conn, 0, &rxbuf);
+ h2c->st0 = H2_CS_FRAME_E;
+ goto out;
+ }
+ goto done;
+ }
+ /* the stream was already killed by an RST, let's consume
+ * the data and send another RST.
+ */
+ error = h2c_dec_hdrs(h2c, &rxbuf, &flags, &body_len, NULL);
+ sess_log(h2c->conn->owner);
+ h2s = (struct h2s*)h2_error_stream;
+ TRACE_USER("rcvd H2 trailers on closed stream", H2_EV_RX_FRAME|H2_EV_RX_HDR|H2_EV_STRM_NEW|H2_EV_STRM_END, h2c->conn, h2s, &rxbuf);
+ goto send_rst;
+ }
+ else if (h2c->dsi <= h2c->max_id || !(h2c->dsi & 1)) {
+ /* RFC7540#5.1.1 stream id > prev ones, and must be odd here */
+ error = H2_ERR_PROTOCOL_ERROR;
+ TRACE_ERROR("HEADERS on invalid stream ID", H2_EV_RX_FRAME|H2_EV_RX_HDR, h2c->conn);
+ HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
+ sess_log(h2c->conn->owner);
+ session_inc_http_req_ctr(h2c->conn->owner);
+ session_inc_http_err_ctr(h2c->conn->owner);
+ goto conn_err;
+ }
+ else if (h2c->flags & H2_CF_DEM_TOOMANY) {
+ goto out; // IDLE but too many sc still present
+ }
+ else if (h2_fe_max_total_streams &&
+ h2c->stream_cnt >= h2_fe_max_total_streams + h2c_max_concurrent_streams(h2c)) {
+ /* We've already told this client we were going to close a
+ * while ago and apparently it didn't care, so it's time to
+ * stop processing its requests for real.
+ */
+ error = H2_ERR_ENHANCE_YOUR_CALM;
+ TRACE_STATE("Stream limit violated", H2_EV_STRM_SHUT, h2c->conn);
+ HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
+ sess_log(h2c->conn->owner);
+ session_inc_http_req_ctr(h2c->conn->owner);
+ session_inc_http_err_ctr(h2c->conn->owner);
+ goto conn_err;
+ }
+
+ error = h2c_dec_hdrs(h2c, &rxbuf, &flags, &body_len, NULL);
+
+ if (error == 0) {
+ /* No error but missing data for demuxing, it is an incomplete frame */
+ if (!(h2c->flags &H2_CF_DEM_BLOCK_ANY))
+ h2c->flags |= H2_CF_DEM_SHORT_READ;
+ goto out;
+ }
+
+ /* Now we cannot roll back and we won't come back here anymore for this
+ * stream, so this stream ID is open from a protocol perspective, even
+ * if incomplete or broken, we want to count it as attempted.
+ */
+ if (h2c->dsi > h2c->max_id)
+ h2c->max_id = h2c->dsi;
+ h2c->stream_cnt++;
+
+ if (error < 0) {
+ /* Failed to decode this stream. This might be due to a
+ * recoverable error affecting only the stream (e.g. too large
+ * request for buffer, that leaves the HPACK decompressor still
+ * synchronized), or a non-recoverable error such as an invalid
+ * frame type sequence (e.g. other frame type interleaved with
+ * CONTINUATION), in which h2c_dec_hdrs() has already set the
+ * error code in the connection and counted it in the relevant
+ * stats. We still count a req error in both cases.
+ */
+ sess_log(h2c->conn->owner);
+ session_inc_http_req_ctr(h2c->conn->owner);
+ session_inc_http_err_ctr(h2c->conn->owner);
+
+ if (h2c->st0 >= H2_CS_ERROR) {
+ TRACE_USER("Unrecoverable error decoding H2 request", H2_EV_RX_FRAME|H2_EV_RX_HDR|H2_EV_STRM_NEW|H2_EV_STRM_END, h2c->conn, 0, &rxbuf);
+ goto out;
+ }
+
+ /* recoverable stream error (e.g. too large request) */
+ TRACE_USER("rcvd unparsable H2 request", H2_EV_RX_FRAME|H2_EV_RX_HDR|H2_EV_STRM_NEW|H2_EV_STRM_END, h2c->conn, h2s, &rxbuf);
+ goto strm_err;
+ }
+
+ TRACE_USER("rcvd H2 request ", H2_EV_RX_FRAME|H2_EV_RX_HDR|H2_EV_STRM_NEW, h2c->conn, 0, &rxbuf);
+
+ /* Note: we don't emit any other logs below because if we return
+ * positively from h2c_frt_stream_new(), the stream will report the error,
+ * and if we return in error, h2c_frt_stream_new() will emit the error.
+ *
+ * Xfer the rxbuf to the stream. On success, the new stream owns the
+ * rxbuf. On error, it is released here.
+ */
+ h2s = h2c_frt_stream_new(h2c, h2c->dsi, &rxbuf, flags);
+ if (!h2s) {
+ h2s = (struct h2s*)h2_refused_stream;
+ TRACE_USER("refused H2 req. ", H2_EV_RX_FRAME|H2_EV_RX_HDR|H2_EV_STRM_NEW|H2_EV_STRM_END, h2c->conn, h2s, &rxbuf);
+ goto send_rst;
+ }
+
+ h2s->st = H2_SS_OPEN;
+ h2s->flags |= flags;
+ h2s->body_len = body_len;
+ h2s_propagate_term_flags(h2c, h2s);
+
+ done:
+ if (h2s->flags & H2_SF_ES_RCVD) {
+ if (h2s->st == H2_SS_OPEN)
+ h2s->st = H2_SS_HREM;
+ else
+ h2s_close(h2s);
+ }
+ TRACE_LEAVE(H2_EV_RX_FRAME|H2_EV_RX_HDR, h2c->conn, h2s);
+ goto leave;
+
+ conn_err:
+ h2c_error(h2c, error);
+ out:
+ h2_release_buf(h2c, &rxbuf);
+ TRACE_DEVEL("leaving on missing data or error", H2_EV_RX_FRAME|H2_EV_RX_HDR, h2c->conn, h2s);
+ h2s = NULL;
+ goto leave;
+
+ strm_err:
+ h2s = (struct h2s*)h2_error_stream;
+
+ send_rst:
+ /* make the demux send an RST for the current stream. We may only
+ * do this if we're certain that the HEADERS frame was properly
+ * decompressed so that the HPACK decoder is still kept up to date.
+ */
+ h2_release_buf(h2c, &rxbuf);
+ h2c->st0 = H2_CS_FRAME_E;
+
+ TRACE_DEVEL("leaving on error", H2_EV_RX_FRAME|H2_EV_RX_HDR, h2c->conn, h2s);
+
+ leave:
+ if (h2_fe_max_total_streams && h2c->stream_cnt >= h2_fe_max_total_streams) {
+ /* we've had enough streams on this connection, time to renew it.
+ * In order to gracefully do this, we'll advertise a stream limit
+ * of the current one plus the max concurrent streams value in the
+ * GOAWAY frame, so that we're certain that the client is aware of
+ * the limit before creating a new stream, but knows we won't harm
+ * the streams in flight. Remember that client stream IDs are odd
+ * so we apply twice the concurrent streams value to the current
+ * ID.
+ */
+ if (h2c->last_sid <= 0 ||
+ h2c->last_sid > h2c->max_id + 2 * h2c_max_concurrent_streams(h2c)) {
+ /* not set yet or was too high */
+ h2c->last_sid = h2c->max_id + 2 * h2c_max_concurrent_streams(h2c);
+ h2c_send_goaway_error(h2c, NULL);
+ }
+ }
+
+ return h2s;
+}
+
+/* processes a HEADERS frame. Returns h2s on success or NULL on missing data.
+ * It may return an error in h2c or h2s. Described in RFC7540#6.2. Most of the
+ * errors here are reported as connection errors since it's impossible to
+ * recover from such errors after the compression context has been altered.
+ */
+static struct h2s *h2c_bck_handle_headers(struct h2c *h2c, struct h2s *h2s)
+{
+ struct buffer rxbuf = BUF_NULL;
+ unsigned long long body_len = 0;
+ uint32_t flags = 0;
+ int error;
+
+ TRACE_ENTER(H2_EV_RX_FRAME|H2_EV_RX_HDR, h2c->conn, h2s);
+
+ if (!b_size(&h2c->dbuf)) {
+ h2c->flags |= H2_CF_DEM_SHORT_READ;
+ goto fail; // empty buffer
+ }
+
+ if (b_data(&h2c->dbuf) < h2c->dfl && !b_full(&h2c->dbuf)) {
+ h2c->flags |= H2_CF_DEM_SHORT_READ;
+ goto fail; // incomplete frame
+ }
+
+ if (h2s->st != H2_SS_CLOSED) {
+ error = h2c_dec_hdrs(h2c, &h2s->rxbuf, &h2s->flags, &h2s->body_len, h2s->upgrade_protocol);
+ }
+ else {
+ /* the connection was already killed by an RST, let's consume
+ * the data and send another RST.
+ */
+ error = h2c_dec_hdrs(h2c, &rxbuf, &flags, &body_len, NULL);
+ h2s = (struct h2s*)h2_error_stream;
+ h2c->st0 = H2_CS_FRAME_E;
+ goto send_rst;
+ }
+
+ /* unrecoverable error ? */
+ if (h2c->st0 >= H2_CS_ERROR) {
+ TRACE_USER("Unrecoverable error decoding H2 HEADERS", H2_EV_RX_FRAME|H2_EV_RX_HDR, h2c->conn, h2s);
+ goto fail;
+ }
+
+ if (h2s->st != H2_SS_OPEN && h2s->st != H2_SS_HLOC) {
+ /* RFC7540#5.1 */
+ TRACE_ERROR("response HEADERS in invalid state", H2_EV_RX_FRAME|H2_EV_RX_HDR, h2c->conn, h2s);
+ h2s_error(h2s, H2_ERR_STREAM_CLOSED);
+ h2c->st0 = H2_CS_FRAME_E;
+ HA_ATOMIC_INC(&h2c->px_counters->strm_proto_err);
+ goto fail;
+ }
+
+ if (error <= 0) {
+ if (error == 0) {
+ /* Demux not blocked because of the stream, it is an incomplete frame */
+ if (!(h2c->flags &H2_CF_DEM_BLOCK_ANY))
+ h2c->flags |= H2_CF_DEM_SHORT_READ;
+ goto fail; // missing data
+ }
+
+ /* stream error : send RST_STREAM */
+ TRACE_ERROR("couldn't decode response HEADERS", H2_EV_RX_FRAME|H2_EV_RX_HDR, h2c->conn, h2s);
+ h2s_error(h2s, H2_ERR_PROTOCOL_ERROR);
+ h2c->st0 = H2_CS_FRAME_E;
+ HA_ATOMIC_INC(&h2c->px_counters->strm_proto_err);
+ goto fail;
+ }
+
+ if (se_fl_test(h2s->sd, SE_FL_ERROR) && h2s->st < H2_SS_ERROR)
+ h2s->st = H2_SS_ERROR;
+ else if (h2s->flags & H2_SF_ES_RCVD) {
+ if (h2s->st == H2_SS_OPEN)
+ h2s->st = H2_SS_HREM;
+ else if (h2s->st == H2_SS_HLOC)
+ h2s_close(h2s);
+ }
+
+ /* Unblock busy server h2s waiting for the response headers to validate
+ * the tunnel establishment or the end of the response of an oborted
+ * tunnel
+ */
+ if ((h2s->flags & (H2_SF_BODY_TUNNEL|H2_SF_BLK_MBUSY)) == (H2_SF_BODY_TUNNEL|H2_SF_BLK_MBUSY) ||
+ (h2s->flags & (H2_SF_TUNNEL_ABRT|H2_SF_ES_RCVD|H2_SF_BLK_MBUSY)) == (H2_SF_TUNNEL_ABRT|H2_SF_ES_RCVD|H2_SF_BLK_MBUSY)) {
+ TRACE_STATE("Unblock h2s blocked on tunnel establishment/abort", H2_EV_RX_FRAME|H2_EV_RX_DATA, h2c->conn, h2s);
+ h2s->flags &= ~H2_SF_BLK_MBUSY;
+ }
+
+ TRACE_USER("rcvd H2 response ", H2_EV_RX_FRAME|H2_EV_RX_HDR, h2c->conn, 0, &h2s->rxbuf);
+ TRACE_LEAVE(H2_EV_RX_FRAME|H2_EV_RX_HDR, h2c->conn, h2s);
+ return h2s;
+ fail:
+ TRACE_DEVEL("leaving on missing data or error", H2_EV_RX_FRAME|H2_EV_RX_HDR, h2c->conn, h2s);
+ return NULL;
+
+ send_rst:
+ /* make the demux send an RST for the current stream. We may only
+ * do this if we're certain that the HEADERS frame was properly
+ * decompressed so that the HPACK decoder is still kept up to date.
+ */
+ h2_release_buf(h2c, &rxbuf);
+ h2c->st0 = H2_CS_FRAME_E;
+
+ TRACE_USER("rejected H2 response", H2_EV_RX_FRAME|H2_EV_RX_HDR|H2_EV_STRM_NEW|H2_EV_STRM_END, h2c->conn, 0, &rxbuf);
+ TRACE_DEVEL("leaving on error", H2_EV_RX_FRAME|H2_EV_RX_HDR, h2c->conn, h2s);
+ return h2s;
+}
+
+/* processes a DATA frame. Returns > 0 on success or zero on missing data.
+ * It may return an error in h2c or h2s. Described in RFC7540#6.1.
+ */
+static int h2c_handle_data(struct h2c *h2c, struct h2s *h2s)
+{
+ int error;
+
+ TRACE_ENTER(H2_EV_RX_FRAME|H2_EV_RX_DATA, h2c->conn, h2s);
+
+ /* note that empty DATA frames are perfectly valid and sometimes used
+ * to signal an end of stream (with the ES flag).
+ */
+
+ if (!b_size(&h2c->dbuf) && h2c->dfl) {
+ h2c->flags |= H2_CF_DEM_SHORT_READ;
+ goto fail; // empty buffer
+ }
+
+ if (b_data(&h2c->dbuf) < h2c->dfl && !b_full(&h2c->dbuf)) {
+ h2c->flags |= H2_CF_DEM_SHORT_READ;
+ goto fail; // incomplete frame
+ }
+
+ /* now either the frame is complete or the buffer is complete */
+
+ if (h2s->st != H2_SS_OPEN && h2s->st != H2_SS_HLOC) {
+ /* RFC7540#6.1 */
+ error = H2_ERR_STREAM_CLOSED;
+ goto strm_err;
+ }
+
+ if (!(h2s->flags & H2_SF_HEADERS_RCVD)) {
+ /* RFC9113#8.1: The header section must be received before the message content */
+ TRACE_ERROR("Unexpected DATA frame before the message headers", H2_EV_RX_FRAME|H2_EV_RX_DATA, h2c->conn, h2s);
+ error = H2_ERR_PROTOCOL_ERROR;
+ HA_ATOMIC_INC(&h2c->px_counters->strm_proto_err);
+ goto strm_err;
+ }
+ if ((h2s->flags & H2_SF_DATA_CLEN) && (h2c->dfl - h2c->dpl) > h2s->body_len) {
+ /* RFC7540#8.1.2 */
+ TRACE_ERROR("DATA frame larger than content-length", H2_EV_RX_FRAME|H2_EV_RX_DATA, h2c->conn, h2s);
+ error = H2_ERR_PROTOCOL_ERROR;
+ HA_ATOMIC_INC(&h2c->px_counters->strm_proto_err);
+ goto strm_err;
+ }
+ if (!(h2c->flags & H2_CF_IS_BACK) &&
+ (h2s->flags & (H2_SF_TUNNEL_ABRT|H2_SF_ES_SENT)) == (H2_SF_TUNNEL_ABRT|H2_SF_ES_SENT) &&
+ ((h2c->dfl - h2c->dpl) || !(h2c->dff & H2_F_DATA_END_STREAM))) {
+ /* a tunnel attempt was aborted but the client still try to send some raw data.
+ * Thus the stream is closed with the CANCEL error. Here we take care it is not
+ * an empty DATA Frame with the ES flag. The error is only handled if ES was
+ * already sent to the client because depending on the scheduling, these data may
+ * have been sent before the server response but not handle here.
+ */
+ TRACE_ERROR("Request DATA frame for aborted tunnel", H2_EV_RX_FRAME|H2_EV_RX_DATA, h2c->conn, h2s);
+ error = H2_ERR_CANCEL;
+ goto strm_err;
+ }
+
+ if (!h2_frt_transfer_data(h2s))
+ goto fail;
+
+ /* call the upper layers to process the frame, then let the upper layer
+ * notify the stream about any change.
+ */
+ if (!h2s_sc(h2s)) {
+ /* The upper layer has already closed, this may happen on
+ * 4xx/redirects during POST, or when receiving a response
+ * from an H2 server after the client has aborted.
+ */
+ error = H2_ERR_CANCEL;
+ goto strm_err;
+ }
+
+ if (h2c->st0 >= H2_CS_ERROR)
+ goto fail;
+
+ if (h2s->st >= H2_SS_ERROR) {
+ /* stream error : send RST_STREAM */
+ h2c->st0 = H2_CS_FRAME_E;
+ }
+
+ /* check for completion : the callee will change this to FRAME_A or
+ * FRAME_H once done.
+ */
+ if (h2c->st0 == H2_CS_FRAME_P)
+ goto fail;
+
+ /* last frame */
+ if (h2c->dff & H2_F_DATA_END_STREAM) {
+ h2s->flags |= H2_SF_ES_RCVD;
+ if (h2s->st == H2_SS_OPEN)
+ h2s->st = H2_SS_HREM;
+ else
+ h2s_close(h2s);
+
+ if (h2s->flags & H2_SF_DATA_CLEN && h2s->body_len) {
+ /* RFC7540#8.1.2 */
+ TRACE_ERROR("ES on DATA frame before content-length", H2_EV_RX_FRAME|H2_EV_RX_DATA, h2c->conn, h2s);
+ error = H2_ERR_PROTOCOL_ERROR;
+ HA_ATOMIC_INC(&h2c->px_counters->strm_proto_err);
+ goto strm_err;
+ }
+ }
+
+ /* Unblock busy server h2s waiting for the end of the response for an
+ * aborted tunnel
+ */
+ if ((h2c->flags & H2_CF_IS_BACK) &&
+ (h2s->flags & (H2_SF_TUNNEL_ABRT|H2_SF_ES_RCVD|H2_SF_BLK_MBUSY)) == (H2_SF_TUNNEL_ABRT|H2_SF_ES_RCVD|H2_SF_BLK_MBUSY)) {
+ TRACE_STATE("Unblock h2s blocked on tunnel abort", H2_EV_RX_FRAME|H2_EV_RX_DATA, h2c->conn, h2s);
+ h2s->flags &= ~H2_SF_BLK_MBUSY;
+ }
+
+ TRACE_LEAVE(H2_EV_RX_FRAME|H2_EV_RX_DATA, h2c->conn, h2s);
+ return 1;
+
+ strm_err:
+ h2s_error(h2s, error);
+ h2c->st0 = H2_CS_FRAME_E;
+ fail:
+ TRACE_DEVEL("leaving on missing data or error", H2_EV_RX_FRAME|H2_EV_RX_DATA, h2c->conn, h2s);
+ return 0;
+}
+
+/* check that the current frame described in h2c->{dsi,dft,dfl,dff,...} is
+ * valid for the current stream state. This is needed only after parsing the
+ * frame header but in practice it can be performed at any time during
+ * H2_CS_FRAME_P since no state transition happens there. Returns >0 on success
+ * or 0 in case of error, in which case either h2s or h2c will carry an error.
+ */
+static int h2_frame_check_vs_state(struct h2c *h2c, struct h2s *h2s)
+{
+ TRACE_ENTER(H2_EV_RX_FRAME|H2_EV_RX_FHDR, h2c->conn, h2s);
+
+ if (h2s->st == H2_SS_IDLE &&
+ h2c->dft != H2_FT_HEADERS && h2c->dft != H2_FT_PRIORITY) {
+ /* RFC7540#5.1: any frame other than HEADERS or PRIORITY in
+ * this state MUST be treated as a connection error
+ */
+ TRACE_ERROR("invalid frame type for IDLE state", H2_EV_RX_FRAME|H2_EV_RX_FHDR, h2c->conn, h2s);
+ h2c_error(h2c, H2_ERR_PROTOCOL_ERROR);
+ if (!h2c->nb_streams && !(h2c->flags & H2_CF_IS_BACK)) {
+ /* only log if no other stream can report the error */
+ sess_log(h2c->conn->owner);
+ }
+ HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
+ TRACE_DEVEL("leaving in error (idle&!hdrs&!prio)", H2_EV_RX_FRAME|H2_EV_RX_FHDR|H2_EV_PROTO_ERR, h2c->conn, h2s);
+ return 0;
+ }
+
+ if (h2s->st == H2_SS_IDLE && (h2c->flags & H2_CF_IS_BACK)) {
+ /* only PUSH_PROMISE would be permitted here */
+ TRACE_ERROR("invalid frame type for IDLE state (back)", H2_EV_RX_FRAME|H2_EV_RX_FHDR, h2c->conn, h2s);
+ h2c_error(h2c, H2_ERR_PROTOCOL_ERROR);
+ HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
+ TRACE_DEVEL("leaving in error (idle&back)", H2_EV_RX_FRAME|H2_EV_RX_FHDR|H2_EV_PROTO_ERR, h2c->conn, h2s);
+ return 0;
+ }
+
+ if (h2s->st == H2_SS_HREM && h2c->dft != H2_FT_WINDOW_UPDATE &&
+ h2c->dft != H2_FT_RST_STREAM && h2c->dft != H2_FT_PRIORITY) {
+ /* RFC7540#5.1: any frame other than WU/PRIO/RST in
+ * this state MUST be treated as a stream error.
+ * 6.2, 6.6 and 6.10 further mandate that HEADERS/
+ * PUSH_PROMISE/CONTINUATION cause connection errors.
+ */
+ if (h2_ft_bit(h2c->dft) & H2_FT_HDR_MASK) {
+ TRACE_ERROR("invalid frame type for HREM state", H2_EV_RX_FRAME|H2_EV_RX_FHDR, h2c->conn, h2s);
+ h2c_error(h2c, H2_ERR_PROTOCOL_ERROR);
+ HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
+ }
+ else {
+ h2s_error(h2s, H2_ERR_STREAM_CLOSED);
+ }
+ TRACE_DEVEL("leaving in error (hrem&!wu&!rst&!prio)", H2_EV_RX_FRAME|H2_EV_RX_FHDR|H2_EV_PROTO_ERR, h2c->conn, h2s);
+ return 0;
+ }
+
+ /* Below the management of frames received in closed state is a
+ * bit hackish because the spec makes strong differences between
+ * streams closed by receiving RST, sending RST, and seeing ES
+ * in both directions. In addition to this, the creation of a
+ * new stream reusing the identifier of a closed one will be
+ * detected here. Given that we cannot keep track of all closed
+ * streams forever, we consider that unknown closed streams were
+ * closed on RST received, which allows us to respond with an
+ * RST without breaking the connection (eg: to abort a transfer).
+ * Some frames have to be silently ignored as well.
+ */
+ if (h2s->st == H2_SS_CLOSED && h2c->dsi) {
+ if (!(h2c->flags & H2_CF_IS_BACK) && h2_ft_bit(h2c->dft) & H2_FT_HDR_MASK) {
+ /* #5.1.1: The identifier of a newly
+ * established stream MUST be numerically
+ * greater than all streams that the initiating
+ * endpoint has opened or reserved. This
+ * governs streams that are opened using a
+ * HEADERS frame and streams that are reserved
+ * using PUSH_PROMISE. An endpoint that
+ * receives an unexpected stream identifier
+ * MUST respond with a connection error.
+ */
+ h2c_error(h2c, H2_ERR_STREAM_CLOSED);
+ TRACE_DEVEL("leaving in error (closed&hdrmask)", H2_EV_RX_FRAME|H2_EV_RX_FHDR|H2_EV_PROTO_ERR, h2c->conn, h2s);
+ return 0;
+ }
+
+ if (h2s->flags & H2_SF_RST_RCVD &&
+ !(h2_ft_bit(h2c->dft) & (H2_FT_HDR_MASK | H2_FT_RST_STREAM_BIT | H2_FT_PRIORITY_BIT | H2_FT_WINDOW_UPDATE_BIT))) {
+ /* RFC7540#5.1:closed: an endpoint that
+ * receives any frame other than PRIORITY after
+ * receiving a RST_STREAM MUST treat that as a
+ * stream error of type STREAM_CLOSED.
+ *
+ * Note that old streams fall into this category
+ * and will lead to an RST being sent.
+ *
+ * However, we cannot generalize this to all frame types. Those
+ * carrying compression state must still be processed before
+ * being dropped or we'll desynchronize the decoder. This can
+ * happen with request trailers received after sending an
+ * RST_STREAM, or with header/trailers responses received after
+ * sending RST_STREAM (aborted stream).
+ *
+ * In addition, since our CLOSED streams always carry the
+ * RST_RCVD bit, we don't want to accidentally catch valid
+ * frames for a closed stream, i.e. RST/PRIO/WU.
+ */
+ h2s_error(h2s, H2_ERR_STREAM_CLOSED);
+ h2c->st0 = H2_CS_FRAME_E;
+ TRACE_DEVEL("leaving in error (rst_rcvd&!hdrmask)", H2_EV_RX_FRAME|H2_EV_RX_FHDR|H2_EV_PROTO_ERR, h2c->conn, h2s);
+ return 0;
+ }
+
+ /* RFC7540#5.1:closed: if this state is reached as a
+ * result of sending a RST_STREAM frame, the peer that
+ * receives the RST_STREAM might have already sent
+ * frames on the stream that cannot be withdrawn. An
+ * endpoint MUST ignore frames that it receives on
+ * closed streams after it has sent a RST_STREAM
+ * frame. An endpoint MAY choose to limit the period
+ * over which it ignores frames and treat frames that
+ * arrive after this time as being in error.
+ */
+ if (h2s->id && !(h2s->flags & H2_SF_RST_SENT)) {
+ /* RFC7540#5.1:closed: any frame other than
+ * PRIO/WU/RST in this state MUST be treated as
+ * a connection error
+ */
+ if (h2c->dft != H2_FT_RST_STREAM &&
+ h2c->dft != H2_FT_PRIORITY &&
+ h2c->dft != H2_FT_WINDOW_UPDATE) {
+ h2c_error(h2c, H2_ERR_STREAM_CLOSED);
+ TRACE_DEVEL("leaving in error (rst_sent&!rst&!prio&!wu)", H2_EV_RX_FRAME|H2_EV_RX_FHDR|H2_EV_PROTO_ERR, h2c->conn, h2s);
+ return 0;
+ }
+ }
+ }
+ TRACE_LEAVE(H2_EV_RX_FRAME|H2_EV_RX_FHDR, h2c->conn, h2s);
+ return 1;
+}
+
+/* Reverse the connection <h2c>. Common operations are done for both active and
+ * passive reversal. Timeouts are inverted and H2_CF_IS_BACK is set or unset
+ * depending on the reversal direction.
+ *
+ * For active reversal, only minor steps are required. The connection should
+ * then be accepted by its listener before being able to use it for transfers.
+ *
+ * For passive reversal, connection is inserted in its targeted server idle
+ * pool. It can thus be reused immediately for future transfers on this server.
+ *
+ * Returns 1 on success else 0.
+ */
+static int h2_conn_reverse(struct h2c *h2c)
+{
+ struct connection *conn = h2c->conn;
+
+ TRACE_ENTER(H2_EV_H2C_WAKE, h2c->conn);
+
+ if (conn_reverse(conn)) {
+ TRACE_ERROR("reverse connection failed", H2_EV_H2C_WAKE, conn);
+ goto err;
+ }
+
+ TRACE_USER("reverse connection", H2_EV_H2C_WAKE, conn);
+
+ /* Check the connection new side after reversal. */
+ if (conn_is_back(conn)) {
+ struct server *srv = __objt_server(h2c->conn->target);
+ struct proxy *prx = srv->proxy;
+
+ h2c->flags |= H2_CF_IS_BACK;
+
+ h2c->shut_timeout = h2c->timeout = prx->timeout.server;
+ if (tick_isset(prx->timeout.serverfin))
+ h2c->shut_timeout = prx->timeout.serverfin;
+
+ h2c->px_counters = EXTRA_COUNTERS_GET(prx->extra_counters_be,
+ &h2_stats_module);
+
+ HA_ATOMIC_OR(&h2c->wait_event.tasklet->state, TASK_F_USR1);
+ xprt_set_idle(conn, conn->xprt, conn->xprt_ctx);
+ if (!srv_add_to_idle_list(srv, conn, 1))
+ goto err;
+ }
+ else {
+ struct listener *l = __objt_listener(h2c->conn->target);
+ struct proxy *prx = l->bind_conf->frontend;
+
+ h2c->flags &= ~H2_CF_IS_BACK;
+
+ h2c->shut_timeout = h2c->timeout = prx->timeout.client;
+ if (tick_isset(prx->timeout.clientfin))
+ h2c->shut_timeout = prx->timeout.clientfin;
+
+ h2c->px_counters = EXTRA_COUNTERS_GET(prx->extra_counters_fe,
+ &h2_stats_module);
+
+ proxy_inc_fe_cum_sess_ver_ctr(l, prx, 2);
+
+ BUG_ON(LIST_INLIST(&h2c->conn->stopping_list));
+ LIST_APPEND(&mux_stopping_data[tid].list,
+ &h2c->conn->stopping_list);
+ }
+
+ /* Check if stream creation is initially forbidden. This is the case
+ * for active preconnect until reversal is done.
+ */
+ if (conn_reverse_in_preconnect(h2c->conn)) {
+ TRACE_DEVEL("prevent stream demux until accept is done", H2_EV_H2C_WAKE, conn);
+ h2c->flags |= H2_CF_DEM_TOOMANY;
+ }
+
+ /* If only the new side has a defined timeout, task must be allocated.
+ * On the contrary, if only old side has a timeout, it must be freed.
+ */
+ if (!h2c->task && tick_isset(h2c->timeout)) {
+ h2c->task = task_new_here();
+ if (!h2c->task)
+ goto err;
+
+ h2c->task->process = h2_timeout_task;
+ h2c->task->context = h2c;
+ }
+ else if (!tick_isset(h2c->timeout)) {
+ task_destroy(h2c->task);
+ h2c->task = NULL;
+ }
+
+ /* Requeue task if instantiated with the new timeout value. */
+ if (h2c->task) {
+ h2c->task->expire = tick_add(now_ms, h2c->timeout);
+ task_queue(h2c->task);
+ }
+
+ TRACE_LEAVE(H2_EV_H2C_WAKE, h2c->conn);
+ return 1;
+
+ err:
+ h2c_error(h2c, H2_ERR_INTERNAL_ERROR);
+ TRACE_DEVEL("leaving on error", H2_EV_H2C_WAKE);
+ return 0;
+}
+
+/* process Rx frames to be demultiplexed */
+static void h2_process_demux(struct h2c *h2c)
+{
+ struct h2s *h2s = NULL, *tmp_h2s;
+ struct h2_fh hdr;
+ unsigned int padlen = 0;
+ int32_t old_iw = h2c->miw;
+
+ TRACE_ENTER(H2_EV_H2C_WAKE, h2c->conn);
+
+ if (h2c->st0 >= H2_CS_ERROR)
+ goto out;
+
+ if (unlikely(h2c->st0 < H2_CS_FRAME_H)) {
+ if (h2c->st0 == H2_CS_PREFACE) {
+ TRACE_STATE("expecting preface", H2_EV_RX_PREFACE, h2c->conn);
+ if (h2c->flags & H2_CF_IS_BACK)
+ goto out;
+
+ if (unlikely(h2c_frt_recv_preface(h2c) <= 0)) {
+ /* RFC7540#3.5: a GOAWAY frame MAY be omitted */
+ if (h2c->st0 == H2_CS_ERROR) {
+ TRACE_PROTO("failed to receive preface", H2_EV_RX_PREFACE|H2_EV_PROTO_ERR, h2c->conn);
+ h2c->st0 = H2_CS_ERROR2;
+ if (b_data(&h2c->dbuf) ||
+ !(((const struct session *)h2c->conn->owner)->fe->options & (PR_O_NULLNOLOG|PR_O_IGNORE_PRB)))
+ sess_log(h2c->conn->owner);
+ }
+ goto done;
+ }
+ TRACE_PROTO("received preface", H2_EV_RX_PREFACE, h2c->conn);
+
+ h2c->max_id = 0;
+ TRACE_STATE("switching to SETTINGS1", H2_EV_RX_PREFACE, h2c->conn);
+ h2c->st0 = H2_CS_SETTINGS1;
+ }
+
+ if (h2c->st0 == H2_CS_SETTINGS1) {
+ /* ensure that what is pending is a valid SETTINGS frame
+ * without an ACK.
+ */
+ TRACE_STATE("expecting settings", H2_EV_RX_FRAME|H2_EV_RX_FHDR|H2_EV_RX_SETTINGS, h2c->conn);
+ if (!h2_get_frame_hdr(&h2c->dbuf, &hdr)) {
+ /* RFC7540#3.5: a GOAWAY frame MAY be omitted */
+ h2c->flags |= H2_CF_DEM_SHORT_READ;
+ if (h2c->st0 == H2_CS_ERROR) {
+ TRACE_ERROR("failed to receive settings", H2_EV_RX_FRAME|H2_EV_RX_FHDR|H2_EV_RX_SETTINGS|H2_EV_PROTO_ERR, h2c->conn);
+ h2c->st0 = H2_CS_ERROR2;
+ if (!(h2c->flags & H2_CF_IS_BACK))
+ sess_log(h2c->conn->owner);
+ }
+ goto done;
+ }
+
+ if (hdr.sid || hdr.ft != H2_FT_SETTINGS || hdr.ff & H2_F_SETTINGS_ACK) {
+ /* RFC7540#3.5: a GOAWAY frame MAY be omitted */
+ TRACE_ERROR("unexpected frame type or flags", H2_EV_RX_FRAME|H2_EV_RX_FHDR|H2_EV_RX_SETTINGS|H2_EV_PROTO_ERR, h2c->conn);
+ h2c_error(h2c, H2_ERR_PROTOCOL_ERROR);
+ h2c->st0 = H2_CS_ERROR2;
+ if (!(h2c->flags & H2_CF_IS_BACK))
+ sess_log(h2c->conn->owner);
+ HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
+ goto done;
+ }
+
+ if ((int)hdr.len < 0 || (int)hdr.len > global.tune.bufsize) {
+ /* RFC7540#3.5: a GOAWAY frame MAY be omitted */
+ TRACE_ERROR("invalid settings frame length", H2_EV_RX_FRAME|H2_EV_RX_FHDR|H2_EV_RX_SETTINGS|H2_EV_PROTO_ERR, h2c->conn);
+ h2c_error(h2c, H2_ERR_FRAME_SIZE_ERROR);
+ h2c->st0 = H2_CS_ERROR2;
+ if (!(h2c->flags & H2_CF_IS_BACK))
+ sess_log(h2c->conn->owner);
+ goto done;
+ }
+
+ /* that's OK, switch to FRAME_P to process it. This is
+ * a SETTINGS frame whose header has already been
+ * deleted above.
+ */
+ padlen = 0;
+ HA_ATOMIC_INC(&h2c->px_counters->settings_rcvd);
+ goto new_frame;
+ }
+ }
+
+ /* process as many incoming frames as possible below */
+ while (1) {
+ int ret = 0;
+
+ if (!b_data(&h2c->dbuf)) {
+ TRACE_DEVEL("no more Rx data", H2_EV_RX_FRAME, h2c->conn);
+ h2c->flags |= H2_CF_DEM_SHORT_READ;
+ break;
+ }
+
+ if (h2c->st0 >= H2_CS_ERROR) {
+ TRACE_STATE("end of connection reported", H2_EV_RX_FRAME|H2_EV_RX_EOI, h2c->conn);
+ break;
+ }
+
+ if (h2c->st0 == H2_CS_FRAME_H) {
+ TRACE_STATE("expecting H2 frame header", H2_EV_RX_FRAME|H2_EV_RX_FHDR, h2c->conn);
+ if (!h2_peek_frame_hdr(&h2c->dbuf, 0, &hdr)) {
+ h2c->flags |= H2_CF_DEM_SHORT_READ;
+ break;
+ }
+
+ if ((int)hdr.len < 0 || (int)hdr.len > global.tune.bufsize) {
+ TRACE_ERROR("invalid H2 frame length", H2_EV_RX_FRAME|H2_EV_RX_FHDR|H2_EV_PROTO_ERR, h2c->conn);
+ h2c_error(h2c, H2_ERR_FRAME_SIZE_ERROR);
+ if (!h2c->nb_streams && !(h2c->flags & H2_CF_IS_BACK)) {
+ /* only log if no other stream can report the error */
+ sess_log(h2c->conn->owner);
+ }
+ HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
+ break;
+ }
+
+ if (h2c->rcvd_s && h2c->dsi != hdr.sid) {
+ /* changed stream with a pending WU, need to
+ * send it now.
+ */
+ TRACE_PROTO("sending stream WINDOW_UPDATE frame on stream switch", H2_EV_TX_FRAME|H2_EV_TX_WU, h2c->conn);
+ ret = h2c_send_strm_wu(h2c);
+ if (ret <= 0)
+ break;
+ }
+
+ padlen = 0;
+ if (h2_ft_bit(hdr.ft) & H2_FT_PADDED_MASK && hdr.ff & H2_F_PADDED) {
+ /* If the frame is padded (HEADERS, PUSH_PROMISE or DATA),
+ * we read the pad length and drop it from the remaining
+ * payload (one byte + the 9 remaining ones = 10 total
+ * removed), so we have a frame payload starting after the
+ * pad len. Flow controlled frames (DATA) also count the
+ * padlen in the flow control, so it must be adjusted.
+ */
+ if (hdr.len < 1) {
+ TRACE_ERROR("invalid H2 padded frame length", H2_EV_RX_FRAME|H2_EV_RX_FHDR|H2_EV_PROTO_ERR, h2c->conn);
+ h2c_error(h2c, H2_ERR_FRAME_SIZE_ERROR);
+ if (!(h2c->flags & H2_CF_IS_BACK))
+ sess_log(h2c->conn->owner);
+ HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
+ goto done;
+ }
+ hdr.len--;
+
+ if (b_data(&h2c->dbuf) < 10) {
+ h2c->flags |= H2_CF_DEM_SHORT_READ;
+ break; // missing padlen
+ }
+
+ padlen = *(uint8_t *)b_peek(&h2c->dbuf, 9);
+
+ if (padlen > hdr.len) {
+ TRACE_ERROR("invalid H2 padding length", H2_EV_RX_FRAME|H2_EV_RX_FHDR|H2_EV_PROTO_ERR, h2c->conn);
+ /* RFC7540#6.1 : pad length = length of
+ * frame payload or greater => error.
+ */
+ h2c_error(h2c, H2_ERR_PROTOCOL_ERROR);
+ if (!(h2c->flags & H2_CF_IS_BACK))
+ sess_log(h2c->conn->owner);
+ HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
+ goto done;
+ }
+
+ if (h2_ft_bit(hdr.ft) & H2_FT_FC_MASK) {
+ h2c->rcvd_c++;
+ h2c->rcvd_s++;
+ }
+ b_del(&h2c->dbuf, 1);
+ }
+ h2_skip_frame_hdr(&h2c->dbuf);
+
+ new_frame:
+ h2c->dfl = hdr.len;
+ h2c->dsi = hdr.sid;
+ h2c->dft = hdr.ft;
+ h2c->dff = hdr.ff;
+ h2c->dpl = padlen;
+ h2c->flags |= H2_CF_DEM_IN_PROGRESS;
+ TRACE_STATE("rcvd H2 frame header, switching to FRAME_P state", H2_EV_RX_FRAME|H2_EV_RX_FHDR, h2c->conn);
+ h2c->st0 = H2_CS_FRAME_P;
+
+ /* check for minimum basic frame format validity */
+ ret = h2_frame_check(h2c->dft, 1, h2c->dsi, h2c->dfl, global.tune.bufsize);
+ if (ret != H2_ERR_NO_ERROR) {
+ TRACE_ERROR("received invalid H2 frame header", H2_EV_RX_FRAME|H2_EV_RX_FHDR|H2_EV_PROTO_ERR, h2c->conn);
+ h2c_error(h2c, ret);
+ if (!(h2c->flags & H2_CF_IS_BACK))
+ sess_log(h2c->conn->owner);
+ HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
+ goto done;
+ }
+
+ /* transition to HEADERS frame ends the keep-alive idle
+ * timer and starts the http-request idle delay. It uses
+ * the idle_start timer as well.
+ */
+ if (hdr.ft == H2_FT_HEADERS)
+ h2c->idle_start = now_ms;
+ }
+
+ /* Only H2_CS_FRAME_P, H2_CS_FRAME_A and H2_CS_FRAME_E here.
+ * H2_CS_FRAME_P indicates an incomplete previous operation
+ * (most often the first attempt) and requires some validity
+ * checks for the frame and the current state. The two other
+ * ones are set after completion (or abortion) and must skip
+ * validity checks.
+ */
+ tmp_h2s = h2c_st_by_id(h2c, h2c->dsi);
+
+ if (tmp_h2s != h2s && h2s && h2s_sc(h2s) &&
+ (b_data(&h2s->rxbuf) ||
+ h2c_read0_pending(h2c) ||
+ h2s->st == H2_SS_CLOSED ||
+ (h2s->flags & H2_SF_ES_RCVD) ||
+ se_fl_test(h2s->sd, SE_FL_ERROR | SE_FL_ERR_PENDING | SE_FL_EOS))) {
+ /* we may have to signal the upper layers */
+ TRACE_DEVEL("notifying stream before switching SID", H2_EV_RX_FRAME|H2_EV_STRM_WAKE, h2c->conn, h2s);
+ se_fl_set(h2s->sd, SE_FL_RCV_MORE);
+ h2s_notify_recv(h2s);
+ }
+ h2s = tmp_h2s;
+
+ if (h2c->st0 == H2_CS_FRAME_E ||
+ (h2c->st0 == H2_CS_FRAME_P && !h2_frame_check_vs_state(h2c, h2s))) {
+ TRACE_PROTO("stream error reported", H2_EV_RX_FRAME|H2_EV_PROTO_ERR, h2c->conn, h2s);
+ goto strm_err;
+ }
+
+ switch (h2c->dft) {
+ case H2_FT_SETTINGS:
+ if (h2c->st0 == H2_CS_FRAME_P) {
+ TRACE_PROTO("receiving H2 SETTINGS frame", H2_EV_RX_FRAME|H2_EV_RX_SETTINGS, h2c->conn, h2s);
+ ret = h2c_handle_settings(h2c);
+ }
+ HA_ATOMIC_INC(&h2c->px_counters->settings_rcvd);
+
+ if (h2c->st0 == H2_CS_FRAME_A) {
+ TRACE_PROTO("sending H2 SETTINGS ACK frame", H2_EV_TX_FRAME|H2_EV_RX_SETTINGS, h2c->conn, h2s);
+ ret = h2c_ack_settings(h2c);
+
+ if (ret > 0 && conn_is_reverse(h2c->conn)) {
+ /* Initiate connection reversal after SETTINGS reception. */
+ ret = h2_conn_reverse(h2c);
+ }
+ }
+ break;
+
+ case H2_FT_PING:
+ if (h2c->st0 == H2_CS_FRAME_P) {
+ TRACE_PROTO("receiving H2 PING frame", H2_EV_RX_FRAME|H2_EV_RX_PING, h2c->conn, h2s);
+ ret = h2c_handle_ping(h2c);
+ }
+
+ if (h2c->st0 == H2_CS_FRAME_A) {
+ TRACE_PROTO("sending H2 PING ACK frame", H2_EV_TX_FRAME|H2_EV_TX_SETTINGS, h2c->conn, h2s);
+ ret = h2c_ack_ping(h2c);
+ }
+ break;
+
+ case H2_FT_WINDOW_UPDATE:
+ if (h2c->st0 == H2_CS_FRAME_P) {
+ TRACE_PROTO("receiving H2 WINDOW_UPDATE frame", H2_EV_RX_FRAME|H2_EV_RX_WU, h2c->conn, h2s);
+ ret = h2c_handle_window_update(h2c, h2s);
+ }
+ break;
+
+ case H2_FT_CONTINUATION:
+ /* RFC7540#6.10: CONTINUATION may only be preceded by
+ * a HEADERS/PUSH_PROMISE/CONTINUATION frame. These
+ * frames' parsers consume all following CONTINUATION
+ * frames so this one is out of sequence.
+ */
+ TRACE_ERROR("received unexpected H2 CONTINUATION frame", H2_EV_RX_FRAME|H2_EV_RX_CONT|H2_EV_H2C_ERR, h2c->conn, h2s);
+ h2c_error(h2c, H2_ERR_PROTOCOL_ERROR);
+ if (!(h2c->flags & H2_CF_IS_BACK))
+ sess_log(h2c->conn->owner);
+ HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
+ goto done;
+
+ case H2_FT_HEADERS:
+ if (h2c->st0 == H2_CS_FRAME_P) {
+ TRACE_PROTO("receiving H2 HEADERS frame", H2_EV_RX_FRAME|H2_EV_RX_HDR, h2c->conn, h2s);
+ if (h2c->flags & H2_CF_IS_BACK)
+ tmp_h2s = h2c_bck_handle_headers(h2c, h2s);
+ else
+ tmp_h2s = h2c_frt_handle_headers(h2c, h2s);
+ if (tmp_h2s) {
+ h2s = tmp_h2s;
+ ret = 1;
+ }
+ }
+ HA_ATOMIC_INC(&h2c->px_counters->headers_rcvd);
+ break;
+
+ case H2_FT_DATA:
+ if (h2c->st0 == H2_CS_FRAME_P) {
+ TRACE_PROTO("receiving H2 DATA frame", H2_EV_RX_FRAME|H2_EV_RX_DATA, h2c->conn, h2s);
+ ret = h2c_handle_data(h2c, h2s);
+ }
+ HA_ATOMIC_INC(&h2c->px_counters->data_rcvd);
+
+ if (h2c->st0 == H2_CS_FRAME_A) {
+ /* rcvd_s will suffice to trigger the sending of a WU */
+ h2c->st0 = H2_CS_FRAME_H;
+ }
+ break;
+
+ case H2_FT_PRIORITY:
+ if (h2c->st0 == H2_CS_FRAME_P) {
+ TRACE_PROTO("receiving H2 PRIORITY frame", H2_EV_RX_FRAME|H2_EV_RX_PRIO, h2c->conn, h2s);
+ ret = h2c_handle_priority(h2c);
+ }
+ break;
+
+ case H2_FT_RST_STREAM:
+ if (h2c->st0 == H2_CS_FRAME_P) {
+ TRACE_PROTO("receiving H2 RST_STREAM frame", H2_EV_RX_FRAME|H2_EV_RX_RST|H2_EV_RX_EOI, h2c->conn, h2s);
+ ret = h2c_handle_rst_stream(h2c, h2s);
+ }
+ HA_ATOMIC_INC(&h2c->px_counters->rst_stream_rcvd);
+ break;
+
+ case H2_FT_GOAWAY:
+ if (h2c->st0 == H2_CS_FRAME_P) {
+ TRACE_PROTO("receiving H2 GOAWAY frame", H2_EV_RX_FRAME|H2_EV_RX_GOAWAY, h2c->conn, h2s);
+ ret = h2c_handle_goaway(h2c);
+ }
+ HA_ATOMIC_INC(&h2c->px_counters->goaway_rcvd);
+ break;
+
+ /* implement all extra frame types here */
+ default:
+ TRACE_PROTO("receiving H2 ignored frame", H2_EV_RX_FRAME, h2c->conn, h2s);
+ /* drop frames that we ignore. They may be larger than
+ * the buffer so we drain all of their contents until
+ * we reach the end.
+ */
+ ret = MIN(b_data(&h2c->dbuf), h2c->dfl);
+ b_del(&h2c->dbuf, ret);
+ h2c->dfl -= ret;
+ ret = h2c->dfl == 0;
+ }
+
+ strm_err:
+ /* We may have to send an RST if not done yet */
+ if (h2s->st == H2_SS_ERROR) {
+ TRACE_STATE("stream error, switching to FRAME_E", H2_EV_RX_FRAME|H2_EV_H2S_ERR, h2c->conn, h2s);
+ h2c->st0 = H2_CS_FRAME_E;
+ }
+
+ if (h2c->st0 == H2_CS_FRAME_E) {
+ TRACE_PROTO("sending H2 RST_STREAM frame", H2_EV_TX_FRAME|H2_EV_TX_RST|H2_EV_TX_EOI, h2c->conn, h2s);
+ ret = h2c_send_rst_stream(h2c, h2s);
+ }
+
+ /* error or missing data condition met above ? */
+ if (ret <= 0)
+ break;
+
+ if (h2c->st0 != H2_CS_FRAME_H) {
+ if (h2c->dfl)
+ TRACE_DEVEL("skipping remaining frame payload", H2_EV_RX_FRAME, h2c->conn, h2s);
+ ret = MIN(b_data(&h2c->dbuf), h2c->dfl);
+ b_del(&h2c->dbuf, ret);
+ h2c->dfl -= ret;
+ if (!h2c->dfl) {
+ h2c->flags &= ~H2_CF_DEM_IN_PROGRESS;
+ TRACE_STATE("switching to FRAME_H", H2_EV_RX_FRAME|H2_EV_RX_FHDR, h2c->conn);
+ h2c->st0 = H2_CS_FRAME_H;
+ }
+ }
+ }
+
+ if (h2c->rcvd_s > 0 &&
+ !(h2c->flags & (H2_CF_MUX_MFULL | H2_CF_DEM_MROOM))) {
+ TRACE_PROTO("sending stream WINDOW_UPDATE frame", H2_EV_TX_FRAME|H2_EV_TX_WU, h2c->conn, h2s);
+ h2c_send_strm_wu(h2c);
+ }
+
+ if (h2c->rcvd_c > 0 &&
+ !(h2c->flags & (H2_CF_MUX_MFULL | H2_CF_DEM_MROOM))) {
+ TRACE_PROTO("sending H2 WINDOW_UPDATE frame", H2_EV_TX_FRAME|H2_EV_TX_WU, h2c->conn);
+ h2c_send_conn_wu(h2c);
+ }
+
+ done:
+ if (h2c->st0 >= H2_CS_ERROR || (h2c->flags & H2_CF_DEM_SHORT_READ)) {
+ if (h2c->flags & H2_CF_RCVD_SHUT)
+ h2c->flags |= H2_CF_END_REACHED;
+ }
+
+ if (h2s && h2s_sc(h2s) &&
+ (b_data(&h2s->rxbuf) ||
+ h2c_read0_pending(h2c) ||
+ h2s->st == H2_SS_CLOSED ||
+ (h2s->flags & H2_SF_ES_RCVD) ||
+ se_fl_test(h2s->sd, SE_FL_ERROR | SE_FL_ERR_PENDING | SE_FL_EOS))) {
+ /* we may have to signal the upper layers */
+ TRACE_DEVEL("notifying stream before switching SID", H2_EV_RX_FRAME|H2_EV_H2S_WAKE, h2c->conn, h2s);
+ se_fl_set(h2s->sd, SE_FL_RCV_MORE);
+ h2s_notify_recv(h2s);
+ }
+
+ if (old_iw != h2c->miw) {
+ TRACE_STATE("notifying streams about SFCTL increase", H2_EV_RX_FRAME|H2_EV_H2S_WAKE, h2c->conn);
+ h2c_unblock_sfctl(h2c);
+ }
+
+ h2c_restart_reading(h2c, 0);
+ out:
+ TRACE_LEAVE(H2_EV_H2C_WAKE, h2c->conn);
+ return;
+}
+
+/* resume each h2s eligible for sending in list head <head> */
+static void h2_resume_each_sending_h2s(struct h2c *h2c, struct list *head)
+{
+ struct h2s *h2s, *h2s_back;
+
+ TRACE_ENTER(H2_EV_H2C_SEND|H2_EV_H2S_WAKE, h2c->conn);
+
+ list_for_each_entry_safe(h2s, h2s_back, head, list) {
+ if (h2c->mws <= 0 ||
+ h2c->flags & H2_CF_MUX_BLOCK_ANY ||
+ h2c->st0 >= H2_CS_ERROR)
+ break;
+
+ h2s->flags &= ~H2_SF_BLK_ANY;
+
+ if (h2s->flags & H2_SF_NOTIFIED)
+ continue;
+
+ /* If the sender changed his mind and unsubscribed, let's just
+ * remove the stream from the send_list.
+ */
+ if (!(h2s->flags & (H2_SF_WANT_SHUTR|H2_SF_WANT_SHUTW)) &&
+ (!h2s->subs || !(h2s->subs->events & SUB_RETRY_SEND))) {
+ LIST_DEL_INIT(&h2s->list);
+ continue;
+ }
+
+ if (h2s->subs && h2s->subs->events & SUB_RETRY_SEND) {
+ h2s->flags |= H2_SF_NOTIFIED;
+ tasklet_wakeup(h2s->subs->tasklet);
+ h2s->subs->events &= ~SUB_RETRY_SEND;
+ if (!h2s->subs->events)
+ h2s->subs = NULL;
+ }
+ else if (h2s->flags & (H2_SF_WANT_SHUTR|H2_SF_WANT_SHUTW)) {
+ tasklet_wakeup(h2s->shut_tl);
+ }
+ }
+
+ TRACE_LEAVE(H2_EV_H2C_SEND|H2_EV_H2S_WAKE, h2c->conn);
+}
+
+/* removes a stream from the list it may be in. If a stream has recently been
+ * appended to the send_list, it might have been waiting on this one when
+ * entering h2_snd_buf() and expecting it to complete before starting to send
+ * in turn. For this reason we check (and clear) H2_CF_WAIT_INLIST to detect
+ * this condition, and we try to resume sending streams if it happens. Note
+ * that we don't need to do it for fctl_list as this list is relevant before
+ * (only consulted after) a window update on the connection, and not because
+ * of any competition with other streams.
+ */
+static inline void h2_remove_from_list(struct h2s *h2s)
+{
+ struct h2c *h2c = h2s->h2c;
+
+ if (!LIST_INLIST(&h2s->list))
+ return;
+
+ LIST_DEL_INIT(&h2s->list);
+ if (h2c->flags & H2_CF_WAIT_INLIST) {
+ h2c->flags &= ~H2_CF_WAIT_INLIST;
+ h2_resume_each_sending_h2s(h2c, &h2c->send_list);
+ }
+}
+
+/* process Tx frames from streams to be multiplexed. Returns > 0 if it reached
+ * the end.
+ */
+static int h2_process_mux(struct h2c *h2c)
+{
+ TRACE_ENTER(H2_EV_H2C_WAKE, h2c->conn);
+
+ if (unlikely(h2c->st0 < H2_CS_FRAME_H)) {
+ if (unlikely(h2c->st0 == H2_CS_PREFACE && (h2c->flags & H2_CF_IS_BACK))) {
+ if (unlikely(h2c_bck_send_preface(h2c) <= 0)) {
+ /* RFC7540#3.5: a GOAWAY frame MAY be omitted */
+ if (h2c->st0 == H2_CS_ERROR)
+ h2c->st0 = H2_CS_ERROR2;
+ goto fail;
+ }
+ h2c->st0 = H2_CS_SETTINGS1;
+ }
+ /* need to wait for the other side */
+ if (h2c->st0 < H2_CS_FRAME_H)
+ goto done;
+ }
+
+ /* start by sending possibly pending window updates */
+ if (h2c->rcvd_s > 0 &&
+ !(h2c->flags & (H2_CF_MUX_MFULL | H2_CF_MUX_MALLOC)) &&
+ h2c_send_strm_wu(h2c) < 0)
+ goto fail;
+
+ if (h2c->rcvd_c > 0 &&
+ !(h2c->flags & (H2_CF_MUX_MFULL | H2_CF_MUX_MALLOC)) &&
+ h2c_send_conn_wu(h2c) < 0)
+ goto fail;
+
+ /* First we always process the flow control list because the streams
+ * waiting there were already elected for immediate emission but were
+ * blocked just on this.
+ */
+ h2c->flags &= ~H2_CF_WAIT_INLIST;
+ h2_resume_each_sending_h2s(h2c, &h2c->fctl_list);
+ h2_resume_each_sending_h2s(h2c, &h2c->send_list);
+
+ fail:
+ if (unlikely(h2c->st0 >= H2_CS_ERROR)) {
+ if (h2c->st0 == H2_CS_ERROR) {
+ if (h2c->max_id >= 0) {
+ h2c_send_goaway_error(h2c, NULL);
+ if (h2c->flags & H2_CF_MUX_BLOCK_ANY)
+ goto out0;
+ }
+
+ h2c->st0 = H2_CS_ERROR2; // sent (or failed hard) !
+ }
+ }
+ done:
+ TRACE_LEAVE(H2_EV_H2C_WAKE, h2c->conn);
+ return 1;
+ out0:
+ TRACE_DEVEL("leaving in blocked situation", H2_EV_H2C_WAKE, h2c->conn);
+ return 0;
+}
+
+
+/* Attempt to read data, and subscribe if none available.
+ * The function returns 1 if data has been received, otherwise zero.
+ */
+static int h2_recv(struct h2c *h2c)
+{
+ struct connection *conn = h2c->conn;
+ struct buffer *buf;
+ int max;
+ size_t ret;
+
+ TRACE_ENTER(H2_EV_H2C_RECV, h2c->conn);
+
+ if (h2c->wait_event.events & SUB_RETRY_RECV) {
+ TRACE_DEVEL("leaving on sub_recv", H2_EV_H2C_RECV, h2c->conn);
+ return (b_data(&h2c->dbuf));
+ }
+
+ if (!h2_recv_allowed(h2c)) {
+ TRACE_DEVEL("leaving on !recv_allowed", H2_EV_H2C_RECV, h2c->conn);
+ return 1;
+ }
+
+ buf = h2_get_buf(h2c, &h2c->dbuf);
+ if (!buf) {
+ h2c->flags |= H2_CF_DEM_DALLOC;
+ TRACE_DEVEL("leaving on !alloc", H2_EV_H2C_RECV, h2c->conn);
+ return 0;
+ }
+
+ if (!b_data(buf)) {
+ /* try to pre-align the buffer like the
+ * rxbufs will be to optimize memory copies. We'll make
+ * sure that the frame header lands at the end of the
+ * HTX block to alias it upon recv. We cannot use the
+ * head because rcv_buf() will realign the buffer if
+ * it's empty. Thus we cheat and pretend we already
+ * have a few bytes there.
+ */
+ max = buf_room_for_htx_data(buf) + 9;
+ buf->head = sizeof(struct htx) - 9;
+ }
+ else
+ max = b_room(buf);
+
+ ret = max ? conn->xprt->rcv_buf(conn, conn->xprt_ctx, buf, max, 0) : 0;
+
+ if (max && !ret && h2_recv_allowed(h2c)) {
+ TRACE_DATA("failed to receive data, subscribing", H2_EV_H2C_RECV, h2c->conn);
+ conn->xprt->subscribe(conn, conn->xprt_ctx, SUB_RETRY_RECV, &h2c->wait_event);
+ } else if (ret) {
+ TRACE_DATA("received data", H2_EV_H2C_RECV, h2c->conn, 0, 0, (void*)(long)ret);
+ h2c->flags &= ~H2_CF_DEM_SHORT_READ;
+ }
+
+ if (conn_xprt_read0_pending(h2c->conn)) {
+ TRACE_DATA("received read0", H2_EV_H2C_RECV, h2c->conn);
+ h2c->flags |= H2_CF_RCVD_SHUT;
+ }
+ if (h2c->conn->flags & CO_FL_ERROR && !b_data(&h2c->dbuf)) {
+ TRACE_DATA("connection error", H2_EV_H2C_RECV, h2c->conn);
+ h2c->flags |= H2_CF_ERROR;
+ }
+
+ if (!b_data(buf)) {
+ h2_release_buf(h2c, &h2c->dbuf);
+ goto end;
+ }
+
+ if (b_data(buf) == buf->size) {
+ h2c->flags |= H2_CF_DEM_DFULL;
+ TRACE_STATE("demux buffer full", H2_EV_H2C_RECV|H2_EV_H2C_BLK, h2c->conn);
+ }
+
+ end:
+ TRACE_LEAVE(H2_EV_H2C_RECV, h2c->conn);
+ return !!ret || (h2c->flags & (H2_CF_RCVD_SHUT|H2_CF_ERROR));
+}
+
+/* Try to send data if possible.
+ * The function returns 1 if data have been sent, otherwise zero.
+ */
+static int h2_send(struct h2c *h2c)
+{
+ struct connection *conn = h2c->conn;
+ int done;
+ int sent = 0;
+
+ TRACE_ENTER(H2_EV_H2C_SEND, h2c->conn);
+
+ if (h2c->flags & (H2_CF_ERROR|H2_CF_ERR_PENDING)) {
+ TRACE_DEVEL("leaving on error", H2_EV_H2C_SEND, h2c->conn);
+ if (h2c->flags & H2_CF_END_REACHED)
+ h2c->flags |= H2_CF_ERROR;
+ b_reset(br_tail(h2c->mbuf));
+ h2c->idle_start = now_ms;
+ return 1;
+ }
+
+ /* This loop is quite simple : it tries to fill as much as it can from
+ * pending streams into the existing buffer until it's reportedly full
+ * or the end of send requests is reached. Then it tries to send this
+ * buffer's contents out, marks it not full if at least one byte could
+ * be sent, and tries again.
+ *
+ * The snd_buf() function normally takes a "flags" argument which may
+ * be made of a combination of CO_SFL_MSG_MORE to indicate that more
+ * data immediately comes and CO_SFL_STREAMER to indicate that the
+ * connection is streaming lots of data (used to increase TLS record
+ * size at the expense of latency). The former can be sent any time
+ * there's a buffer full flag, as it indicates at least one stream
+ * attempted to send and failed so there are pending data. An
+ * alternative would be to set it as long as there's an active stream
+ * but that would be problematic for ACKs until we have an absolute
+ * guarantee that all waiters have at least one byte to send. The
+ * latter should possibly not be set for now.
+ */
+
+ done = 0;
+ while (!(conn->flags & CO_FL_WAIT_XPRT) && !done) {
+ unsigned int flags = 0;
+ unsigned int released = 0;
+ struct buffer *buf;
+ uint to_send;
+
+ /* fill as much as we can into the current buffer */
+ while (((h2c->flags & (H2_CF_MUX_MFULL|H2_CF_MUX_MALLOC)) == 0) && !done)
+ done = h2_process_mux(h2c);
+
+ if (h2c->flags & H2_CF_MUX_MALLOC)
+ done = 1; // we won't go further without extra buffers
+
+ if ((conn->flags & (CO_FL_SOCK_WR_SH|CO_FL_ERROR)) ||
+ (h2c->flags & H2_CF_GOAWAY_FAILED))
+ break;
+
+ if (h2c->flags & (H2_CF_MUX_MFULL | H2_CF_DEM_MROOM))
+ flags |= CO_SFL_MSG_MORE;
+
+ to_send = br_count(h2c->mbuf);
+ if (to_send > 1) {
+ /* usually we want to emit small TLS records to speed
+ * up the decoding on the client. That's what is being
+ * done by default. However if there is more than one
+ * buffer being allocated, we're streaming large data
+ * so we stich to large records.
+ */
+ flags |= CO_SFL_STREAMER;
+ }
+
+ for (buf = br_head(h2c->mbuf); b_size(buf); buf = br_del_head(h2c->mbuf)) {
+ if (b_data(buf)) {
+ int ret = conn->xprt->snd_buf(conn, conn->xprt_ctx, buf, b_data(buf),
+ flags | (to_send > 1 ? CO_SFL_MSG_MORE : 0));
+ if (!ret) {
+ done = 1;
+ break;
+ }
+ sent = 1;
+ to_send--;
+ TRACE_DATA("sent data", H2_EV_H2C_SEND, h2c->conn, 0, buf, (void*)(long)ret);
+ b_del(buf, ret);
+ if (b_data(buf)) {
+ done = 1;
+ break;
+ }
+ }
+ b_free(buf);
+ released++;
+ }
+
+ if (released)
+ offer_buffers(NULL, released);
+
+ /* Normally if wrote at least one byte, the buffer is not full
+ * anymore. However, if it was marked full because all of its
+ * buffers were used, we don't want to instantly wake up many
+ * streams because we'd create a thundering herd effect, notably
+ * when data are flushed in small chunks. Instead we wait for
+ * the buffer to be decongested again before allowing to send
+ * again. It also has the added benefit of not pumping more
+ * data from the other side when it's known that this one is
+ * still congested.
+ */
+ if (sent && br_single(h2c->mbuf))
+ h2c->flags &= ~(H2_CF_MUX_MFULL | H2_CF_DEM_MROOM);
+ }
+
+ if (conn->flags & CO_FL_ERROR) {
+ h2c->flags |= H2_CF_ERR_PENDING;
+ if (h2c->flags & H2_CF_END_REACHED)
+ h2c->flags |= H2_CF_ERROR;
+ b_reset(br_tail(h2c->mbuf));
+ }
+
+ /* We're not full anymore, so we can wake any task that are waiting
+ * for us.
+ */
+ if (!(h2c->flags & (H2_CF_MUX_MFULL | H2_CF_DEM_MROOM)) && h2c->st0 >= H2_CS_FRAME_H) {
+ h2c->flags &= ~H2_CF_WAIT_INLIST;
+ h2_resume_each_sending_h2s(h2c, &h2c->send_list);
+ }
+
+ /* We're done, no more to send */
+ if (!(conn->flags & CO_FL_WAIT_XPRT) && !br_data(h2c->mbuf)) {
+ TRACE_DEVEL("leaving with everything sent", H2_EV_H2C_SEND, h2c->conn);
+ if (h2c->flags & H2_CF_MBUF_HAS_DATA && !h2c->nb_sc) {
+ h2c->flags &= ~H2_CF_MBUF_HAS_DATA;
+ h2c->idle_start = now_ms;
+ }
+ goto end;
+ }
+
+ if (!(conn->flags & CO_FL_ERROR) && !(h2c->wait_event.events & SUB_RETRY_SEND)) {
+ TRACE_STATE("more data to send, subscribing", H2_EV_H2C_SEND, h2c->conn);
+ conn->xprt->subscribe(conn, conn->xprt_ctx, SUB_RETRY_SEND, &h2c->wait_event);
+ }
+ TRACE_DEVEL("leaving with some data left to send", H2_EV_H2C_SEND, h2c->conn);
+end:
+ return sent || (h2c->flags & (H2_CF_ERR_PENDING|H2_CF_ERROR));
+}
+
+/* this is the tasklet referenced in h2c->wait_event.tasklet */
+struct task *h2_io_cb(struct task *t, void *ctx, unsigned int state)
+{
+ struct connection *conn;
+ struct tasklet *tl = (struct tasklet *)t;
+ int conn_in_list;
+ struct h2c *h2c = ctx;
+ int ret = 0;
+
+ if (state & TASK_F_USR1) {
+ /* the tasklet was idling on an idle connection, it might have
+ * been stolen, let's be careful!
+ */
+ HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
+ if (t->context == NULL) {
+ /* The connection has been taken over by another thread,
+ * we're no longer responsible for it, so just free the
+ * tasklet, and do nothing.
+ */
+ HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
+ tasklet_free(tl);
+ t = NULL;
+ goto leave;
+ }
+ conn = h2c->conn;
+ TRACE_ENTER(H2_EV_H2C_WAKE, conn);
+
+ /* Remove the connection from the list, to be sure nobody attempts
+ * to use it while we handle the I/O events
+ */
+ conn_in_list = conn->flags & CO_FL_LIST_MASK;
+ if (conn_in_list)
+ conn_delete_from_tree(conn);
+
+ HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
+ } else {
+ /* we're certain the connection was not in an idle list */
+ conn = h2c->conn;
+ TRACE_ENTER(H2_EV_H2C_WAKE, conn);
+ conn_in_list = 0;
+ }
+
+ if (!(h2c->wait_event.events & SUB_RETRY_SEND))
+ ret = h2_send(h2c);
+ if (!(h2c->wait_event.events & SUB_RETRY_RECV))
+ ret |= h2_recv(h2c);
+ if (ret || b_data(&h2c->dbuf))
+ ret = h2_process(h2c);
+
+ /* If we were in an idle list, we want to add it back into it,
+ * unless h2_process() returned -1, which mean it has destroyed
+ * the connection (testing !ret is enough, if h2_process() wasn't
+ * called then ret will be 0 anyway.
+ */
+ if (ret < 0)
+ t = NULL;
+
+ if (!ret && conn_in_list) {
+ struct server *srv = objt_server(conn->target);
+
+ HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
+ _srv_add_idle(srv, conn, conn_in_list == CO_FL_SAFE_LIST);
+ HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
+ }
+
+leave:
+ TRACE_LEAVE(H2_EV_H2C_WAKE);
+ return t;
+}
+
+/* callback called on any event by the connection handler.
+ * It applies changes and returns zero, or < 0 if it wants immediate
+ * destruction of the connection (which normally doesn not happen in h2).
+ */
+static int h2_process(struct h2c *h2c)
+{
+ struct connection *conn = h2c->conn;
+
+ TRACE_ENTER(H2_EV_H2C_WAKE, conn);
+
+ if (!(h2c->flags & H2_CF_DEM_BLOCK_ANY) &&
+ (b_data(&h2c->dbuf) || (h2c->flags & H2_CF_RCVD_SHUT))) {
+ h2_process_demux(h2c);
+
+ if (h2c->st0 >= H2_CS_ERROR || (h2c->flags & H2_CF_ERROR))
+ b_reset(&h2c->dbuf);
+
+ if (!b_full(&h2c->dbuf))
+ h2c->flags &= ~H2_CF_DEM_DFULL;
+ }
+ h2_send(h2c);
+
+ if (unlikely(h2c->proxy->flags & (PR_FL_DISABLED|PR_FL_STOPPED)) && !(h2c->flags & H2_CF_IS_BACK)) {
+ int send_goaway = 1;
+ /* If a close-spread-time option is set, we want to avoid
+ * closing all the active HTTP2 connections at once so we add a
+ * random factor that will spread the closing.
+ */
+ if (tick_isset(global.close_spread_end)) {
+ int remaining_window = tick_remain(now_ms, global.close_spread_end);
+ if (remaining_window) {
+ /* This should increase the closing rate the
+ * further along the window we are. */
+ send_goaway = (remaining_window <= statistical_prng_range(global.close_spread_time));
+ }
+ }
+ else if (global.tune.options & GTUNE_DISABLE_ACTIVE_CLOSE)
+ send_goaway = 0; /* let the client close his connection himself */
+ /* frontend is stopping, reload likely in progress, let's try
+ * to announce a graceful shutdown if not yet done. We don't
+ * care if it fails, it will be tried again later.
+ */
+ if (send_goaway) {
+ TRACE_STATE("proxy stopped, sending GOAWAY", H2_EV_H2C_WAKE|H2_EV_TX_FRAME, conn);
+ if (!(h2c->flags & (H2_CF_GOAWAY_SENT|H2_CF_GOAWAY_FAILED))) {
+ if (h2c->last_sid < 0)
+ h2c->last_sid = (1U << 31) - 1;
+ h2c_send_goaway_error(h2c, NULL);
+ }
+ }
+ }
+
+ /*
+ * If we received early data, and the handshake is done, wake
+ * any stream that was waiting for it.
+ */
+ if (!(h2c->flags & H2_CF_WAIT_FOR_HS) &&
+ (conn->flags & (CO_FL_EARLY_SSL_HS | CO_FL_WAIT_XPRT | CO_FL_EARLY_DATA)) == CO_FL_EARLY_DATA) {
+ struct eb32_node *node;
+ struct h2s *h2s;
+
+ h2c->flags |= H2_CF_WAIT_FOR_HS;
+ node = eb32_lookup_ge(&h2c->streams_by_id, 1);
+
+ while (node) {
+ h2s = container_of(node, struct h2s, by_id);
+ if (se_fl_test(h2s->sd, SE_FL_WAIT_FOR_HS))
+ h2s_notify_recv(h2s);
+ node = eb32_next(node);
+ }
+ }
+
+ if ((h2c->flags & H2_CF_ERROR) || h2c_read0_pending(h2c) ||
+ h2c->st0 == H2_CS_ERROR2 || h2c->flags & H2_CF_GOAWAY_FAILED ||
+ (eb_is_empty(&h2c->streams_by_id) && h2c->last_sid >= 0 &&
+ h2c->max_id >= h2c->last_sid)) {
+ h2_wake_some_streams(h2c, 0);
+
+ if (eb_is_empty(&h2c->streams_by_id)) {
+ /* no more stream, kill the connection now */
+ h2_release(h2c);
+ TRACE_DEVEL("leaving after releasing the connection", H2_EV_H2C_WAKE);
+ return -1;
+ }
+
+ /* connections in error must be removed from the idle lists */
+ if (conn->flags & CO_FL_LIST_MASK) {
+ HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
+ conn_delete_from_tree(conn);
+ HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
+ }
+ }
+ else if (h2c->st0 == H2_CS_ERROR) {
+ /* connections in error must be removed from the idle lists */
+ if (conn->flags & CO_FL_LIST_MASK) {
+ HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
+ conn_delete_from_tree(conn);
+ HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
+ }
+ }
+
+ if (!b_data(&h2c->dbuf))
+ h2_release_buf(h2c, &h2c->dbuf);
+
+ if (h2c->st0 == H2_CS_ERROR2 || (h2c->flags & H2_CF_GOAWAY_FAILED) ||
+ (h2c->st0 != H2_CS_ERROR &&
+ !br_data(h2c->mbuf) &&
+ (h2c->mws <= 0 || LIST_ISEMPTY(&h2c->fctl_list)) &&
+ ((h2c->flags & H2_CF_MUX_BLOCK_ANY) || LIST_ISEMPTY(&h2c->send_list))))
+ h2_release_mbuf(h2c);
+
+ h2c_update_timeout(h2c);
+ h2_send(h2c);
+ TRACE_LEAVE(H2_EV_H2C_WAKE, conn);
+ return 0;
+}
+
+/* wake-up function called by the connection layer (mux_ops.wake) */
+static int h2_wake(struct connection *conn)
+{
+ struct h2c *h2c = conn->ctx;
+ int ret;
+
+ TRACE_ENTER(H2_EV_H2C_WAKE, conn);
+ ret = h2_process(h2c);
+ if (ret >= 0) {
+ h2_wake_some_streams(h2c, 0);
+
+ /* For active reverse connection, an explicit check is required if an
+ * error is pending to propagate the error as demux process is blocked
+ * until reversal. This allows to quickly close the connection and
+ * prepare a new one.
+ */
+ if (unlikely(conn_reverse_in_preconnect(conn)) && h2c_is_dead(h2c)) {
+ TRACE_DEVEL("leaving and killing dead connection", H2_EV_STRM_END, h2c->conn);
+ h2_release(h2c);
+ }
+ }
+
+ TRACE_LEAVE(H2_EV_H2C_WAKE);
+ return ret;
+}
+
+/* Connection timeout management. The principle is that if there's no receipt
+ * nor sending for a certain amount of time, the connection is closed. If the
+ * MUX buffer still has lying data or is not allocatable, the connection is
+ * immediately killed. If it's allocatable and empty, we attempt to send a
+ * GOAWAY frame.
+ */
+struct task *h2_timeout_task(struct task *t, void *context, unsigned int state)
+{
+ struct h2c *h2c = context;
+ int expired = tick_is_expired(t->expire, now_ms);
+
+ TRACE_ENTER(H2_EV_H2C_WAKE, h2c ? h2c->conn : NULL);
+
+ if (h2c) {
+ /* Make sure nobody stole the connection from us */
+ HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
+
+ /* Somebody already stole the connection from us, so we should not
+ * free it, we just have to free the task.
+ */
+ if (!t->context) {
+ h2c = NULL;
+ HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
+ goto do_leave;
+ }
+
+
+ if (!expired) {
+ HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
+ TRACE_DEVEL("leaving (not expired)", H2_EV_H2C_WAKE, h2c->conn);
+ return t;
+ }
+
+ if (!h2c_may_expire(h2c)) {
+ /* we do still have streams but all of them are idle, waiting
+ * for the data layer, so we must not enforce the timeout here.
+ */
+ HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
+ t->expire = TICK_ETERNITY;
+ return t;
+ }
+
+ /* We're about to destroy the connection, so make sure nobody attempts
+ * to steal it from us.
+ */
+ if (h2c->conn->flags & CO_FL_LIST_MASK)
+ conn_delete_from_tree(h2c->conn);
+
+ HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
+ }
+
+do_leave:
+ task_destroy(t);
+
+ if (!h2c) {
+ /* resources were already deleted */
+ TRACE_DEVEL("leaving (not more h2c)", H2_EV_H2C_WAKE);
+ return NULL;
+ }
+
+ h2c->task = NULL;
+ h2c_error(h2c, H2_ERR_NO_ERROR);
+ h2_wake_some_streams(h2c, 0);
+
+ if (br_data(h2c->mbuf)) {
+ /* don't even try to send a GOAWAY, the buffer is stuck */
+ h2c->flags |= H2_CF_GOAWAY_FAILED;
+ }
+
+ /* try to send but no need to insist */
+ h2c->last_sid = h2c->max_id;
+ if (h2c_send_goaway_error(h2c, NULL) <= 0)
+ h2c->flags |= H2_CF_GOAWAY_FAILED;
+
+ if (br_data(h2c->mbuf) && !(h2c->flags & H2_CF_GOAWAY_FAILED) && conn_xprt_ready(h2c->conn)) {
+ unsigned int released = 0;
+ struct buffer *buf;
+
+ for (buf = br_head(h2c->mbuf); b_size(buf); buf = br_del_head(h2c->mbuf)) {
+ if (b_data(buf)) {
+ int ret = h2c->conn->xprt->snd_buf(h2c->conn, h2c->conn->xprt_ctx, buf, b_data(buf), 0);
+ if (!ret)
+ break;
+ b_del(buf, ret);
+ if (b_data(buf))
+ break;
+ b_free(buf);
+ released++;
+ }
+ }
+
+ if (released)
+ offer_buffers(NULL, released);
+ }
+
+ /* in any case this connection must not be considered idle anymore */
+ if (h2c->conn->flags & CO_FL_LIST_MASK) {
+ HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
+ conn_delete_from_tree(h2c->conn);
+ HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
+ }
+
+ /* either we can release everything now or it will be done later once
+ * the last stream closes.
+ */
+ if (eb_is_empty(&h2c->streams_by_id))
+ h2_release(h2c);
+
+ TRACE_LEAVE(H2_EV_H2C_WAKE);
+ return NULL;
+}
+
+
+/*******************************************/
+/* functions below are used by the streams */
+/*******************************************/
+
+/*
+ * Attach a new stream to a connection
+ * (Used for outgoing connections)
+ */
+static int h2_attach(struct connection *conn, struct sedesc *sd, struct session *sess)
+{
+ struct h2s *h2s;
+ struct h2c *h2c = conn->ctx;
+
+ TRACE_ENTER(H2_EV_H2S_NEW, conn);
+ h2s = h2c_bck_stream_new(h2c, sd->sc, sess);
+ if (!h2s) {
+ TRACE_DEVEL("leaving on stream creation failure", H2_EV_H2S_NEW|H2_EV_H2S_ERR, conn);
+ return -1;
+ }
+
+ /* the connection is not idle anymore, let's mark this */
+ HA_ATOMIC_AND(&h2c->wait_event.tasklet->state, ~TASK_F_USR1);
+ xprt_set_used(h2c->conn, h2c->conn->xprt, h2c->conn->xprt_ctx);
+
+ TRACE_LEAVE(H2_EV_H2S_NEW, conn, h2s);
+ return 0;
+}
+
+/* Retrieves the first valid stream connector from this connection, or returns
+ * NULL. We have to scan because we may have some orphan streams. It might be
+ * beneficial to scan backwards from the end to reduce the likeliness to find
+ * orphans.
+ */
+static struct stconn *h2_get_first_sc(const struct connection *conn)
+{
+ struct h2c *h2c = conn->ctx;
+ struct h2s *h2s;
+ struct eb32_node *node;
+
+ node = eb32_first(&h2c->streams_by_id);
+ while (node) {
+ h2s = container_of(node, struct h2s, by_id);
+ if (h2s_sc(h2s))
+ return h2s_sc(h2s);
+ node = eb32_next(node);
+ }
+ return NULL;
+}
+
+static int h2_ctl(struct connection *conn, enum mux_ctl_type mux_ctl, void *output)
+{
+ int ret = 0;
+ struct h2c *h2c = conn->ctx;
+
+ switch (mux_ctl) {
+ case MUX_CTL_STATUS:
+ /* Only consider the mux to be ready if we're done with
+ * the preface and settings, and we had no error.
+ */
+ if (h2c->st0 >= H2_CS_FRAME_H && h2c->st0 < H2_CS_ERROR)
+ ret |= MUX_STATUS_READY;
+ return ret;
+ case MUX_CTL_EXIT_STATUS:
+ return MUX_ES_UNKNOWN;
+
+ case MUX_CTL_REVERSE_CONN:
+ BUG_ON(h2c->flags & H2_CF_IS_BACK);
+
+ TRACE_DEVEL("connection reverse done, restart demux", H2_EV_H2C_WAKE, h2c->conn);
+ h2c->flags &= ~H2_CF_DEM_TOOMANY;
+ tasklet_wakeup(h2c->wait_event.tasklet);
+ return 0;
+
+ default:
+ return -1;
+ }
+}
+
+static int h2_sctl(struct stconn *sc, enum mux_sctl_type mux_sctl, void *output)
+{
+ int ret = 0;
+ struct h2s *h2s = __sc_mux_strm(sc);
+
+ switch (mux_sctl) {
+ case MUX_SCTL_SID:
+ if (output)
+ *((int64_t *)output) = h2s->id;
+ return ret;
+
+ default:
+ return -1;
+ }
+}
+
+/*
+ * Destroy the mux and the associated connection, if it is no longer used
+ */
+static void h2_destroy(void *ctx)
+{
+ struct h2c *h2c = ctx;
+
+ TRACE_ENTER(H2_EV_H2C_END, h2c->conn);
+ if (eb_is_empty(&h2c->streams_by_id)) {
+ BUG_ON(h2c->conn->ctx != h2c);
+ h2_release(h2c);
+ }
+ TRACE_LEAVE(H2_EV_H2C_END);
+}
+
+/*
+ * Detach the stream from the connection and possibly release the connection.
+ */
+static void h2_detach(struct sedesc *sd)
+{
+ struct h2s *h2s = sd->se;
+ struct h2c *h2c;
+ struct session *sess;
+
+ TRACE_ENTER(H2_EV_STRM_END, h2s ? h2s->h2c->conn : NULL, h2s);
+
+ if (!h2s) {
+ TRACE_LEAVE(H2_EV_STRM_END);
+ return;
+ }
+
+ /* there's no txbuf so we're certain not to be able to send anything */
+ h2s->flags &= ~H2_SF_NOTIFIED;
+
+ sess = h2s->sess;
+ h2c = h2s->h2c;
+ h2c->nb_sc--;
+ if (!h2c->nb_sc && !br_data(h2c->mbuf))
+ h2c->idle_start = now_ms;
+
+ if ((h2c->flags & (H2_CF_IS_BACK|H2_CF_DEM_TOOMANY)) == H2_CF_DEM_TOOMANY &&
+ !h2_frt_has_too_many_sc(h2c)) {
+ /* frontend connection was blocking new streams creation */
+ h2c->flags &= ~H2_CF_DEM_TOOMANY;
+ h2c_restart_reading(h2c, 1);
+ }
+
+ /* this stream may be blocked waiting for some data to leave (possibly
+ * an ES or RST frame), so orphan it in this case.
+ */
+ if (!(h2c->flags & (H2_CF_ERR_PENDING|H2_CF_ERROR)) &&
+ (h2c->st0 < H2_CS_ERROR) &&
+ (h2s->flags & (H2_SF_BLK_MBUSY | H2_SF_BLK_MROOM | H2_SF_BLK_MFCTL)) &&
+ ((h2s->flags & (H2_SF_WANT_SHUTR | H2_SF_WANT_SHUTW)) || h2s->subs)) {
+ TRACE_DEVEL("leaving on stream blocked", H2_EV_STRM_END|H2_EV_H2S_BLK, h2c->conn, h2s);
+ /* refresh the timeout if none was active, so that the last
+ * leaving stream may arm it.
+ */
+ if (h2c->task && !tick_isset(h2c->task->expire))
+ h2c_update_timeout(h2c);
+ return;
+ }
+
+ if ((h2c->flags & H2_CF_DEM_BLOCK_ANY && h2s->id == h2c->dsi)) {
+ /* unblock the connection if it was blocked on this
+ * stream.
+ */
+ h2c->flags &= ~H2_CF_DEM_BLOCK_ANY;
+ h2c->flags &= ~H2_CF_MUX_BLOCK_ANY;
+ h2c_restart_reading(h2c, 1);
+ }
+
+ h2s_destroy(h2s);
+
+ if (h2c->flags & H2_CF_IS_BACK) {
+ if (!(h2c->flags & (H2_CF_RCVD_SHUT|H2_CF_ERR_PENDING|H2_CF_ERROR))) {
+ if (h2c->conn->flags & CO_FL_PRIVATE) {
+ /* Add the connection in the session server list, if not already done */
+ if (!session_add_conn(sess, h2c->conn, h2c->conn->target)) {
+ h2c->conn->owner = NULL;
+ if (eb_is_empty(&h2c->streams_by_id)) {
+ h2c->conn->mux->destroy(h2c);
+ TRACE_DEVEL("leaving on error after killing outgoing connection", H2_EV_STRM_END|H2_EV_H2C_ERR);
+ return;
+ }
+ }
+ if (eb_is_empty(&h2c->streams_by_id)) {
+ if (session_check_idle_conn(h2c->conn->owner, h2c->conn) != 0) {
+ /* At this point either the connection is destroyed, or it's been added to the server idle list, just stop */
+ TRACE_DEVEL("leaving without reusable idle connection", H2_EV_STRM_END);
+ return;
+ }
+ }
+ }
+ else {
+ if (eb_is_empty(&h2c->streams_by_id)) {
+ /* If the connection is owned by the session, first remove it
+ * from its list
+ */
+ if (h2c->conn->owner) {
+ session_unown_conn(h2c->conn->owner, h2c->conn);
+ h2c->conn->owner = NULL;
+ }
+
+ /* mark that the tasklet may lose its context to another thread and
+ * that the handler needs to check it under the idle conns lock.
+ */
+ HA_ATOMIC_OR(&h2c->wait_event.tasklet->state, TASK_F_USR1);
+ xprt_set_idle(h2c->conn, h2c->conn->xprt, h2c->conn->xprt_ctx);
+
+ if (!srv_add_to_idle_list(objt_server(h2c->conn->target), h2c->conn, 1)) {
+ /* The server doesn't want it, let's kill the connection right away */
+ h2c->conn->mux->destroy(h2c);
+ TRACE_DEVEL("leaving on error after killing outgoing connection", H2_EV_STRM_END|H2_EV_H2C_ERR);
+ return;
+ }
+ /* At this point, the connection has been added to the
+ * server idle list, so another thread may already have
+ * hijacked it, so we can't do anything with it.
+ */
+ TRACE_DEVEL("reusable idle connection", H2_EV_STRM_END);
+ return;
+
+ }
+ else if (!h2c->conn->hash_node->node.node.leaf_p &&
+ h2_avail_streams(h2c->conn) > 0 && objt_server(h2c->conn->target) &&
+ !LIST_INLIST(&h2c->conn->session_list)) {
+ srv_add_to_avail_list(__objt_server(h2c->conn->target), h2c->conn);
+ }
+ }
+ }
+ }
+
+ /* We don't want to close right now unless we're removing the
+ * last stream, and either the connection is in error, or it
+ * reached the ID already specified in a GOAWAY frame received
+ * or sent (as seen by last_sid >= 0).
+ */
+ if (h2c_is_dead(h2c)) {
+ /* no more stream will come, kill it now */
+ TRACE_DEVEL("leaving and killing dead connection", H2_EV_STRM_END, h2c->conn);
+ h2_release(h2c);
+ }
+ else if (h2c->task) {
+ h2c_update_timeout(h2c);
+ TRACE_DEVEL("leaving, refreshing connection's timeout", H2_EV_STRM_END, h2c->conn);
+ }
+ else
+ TRACE_DEVEL("leaving", H2_EV_STRM_END, h2c->conn);
+}
+
+/* Performs a synchronous or asynchronous shutr(). */
+static void h2_do_shutr(struct h2s *h2s)
+{
+ struct h2c *h2c = h2s->h2c;
+
+ if (h2s->st == H2_SS_CLOSED)
+ goto done;
+
+ TRACE_ENTER(H2_EV_STRM_SHUT, h2c->conn, h2s);
+
+ if (h2s->flags & H2_SF_WANT_SHUTW)
+ goto add_to_list;
+
+ /* a connstream may require us to immediately kill the whole connection
+ * for example because of a "tcp-request content reject" rule that is
+ * normally used to limit abuse. In this case we schedule a goaway to
+ * close the connection.
+ */
+ if (se_fl_test(h2s->sd, SE_FL_KILL_CONN) &&
+ !(h2c->flags & (H2_CF_GOAWAY_SENT|H2_CF_GOAWAY_FAILED))) {
+ TRACE_STATE("stream wants to kill the connection", H2_EV_STRM_SHUT, h2c->conn, h2s);
+ h2c_error(h2c, H2_ERR_ENHANCE_YOUR_CALM);
+ h2s_error(h2s, H2_ERR_ENHANCE_YOUR_CALM);
+ }
+ else if (!(h2s->flags & H2_SF_HEADERS_SENT)) {
+ /* Nothing was never sent for this stream, so reset with
+ * REFUSED_STREAM error to let the client retry the
+ * request.
+ */
+ TRACE_STATE("no headers sent yet, trying a retryable abort", H2_EV_STRM_SHUT, h2c->conn, h2s);
+ h2s_error(h2s, H2_ERR_REFUSED_STREAM);
+ }
+ else {
+ /* a final response was already provided, we don't want this
+ * stream anymore. This may happen when the server responds
+ * before the end of an upload and closes quickly (redirect,
+ * deny, ...)
+ */
+ h2s_error(h2s, H2_ERR_CANCEL);
+ }
+
+ if (!(h2s->flags & H2_SF_RST_SENT) &&
+ h2s_send_rst_stream(h2c, h2s) <= 0)
+ goto add_to_list;
+
+ if (!(h2c->wait_event.events & SUB_RETRY_SEND))
+ tasklet_wakeup(h2c->wait_event.tasklet);
+ h2s_close(h2s);
+ done:
+ h2s->flags &= ~H2_SF_WANT_SHUTR;
+ TRACE_LEAVE(H2_EV_STRM_SHUT, h2c->conn, h2s);
+ return;
+add_to_list:
+ /* Let the handler know we want to shutr, and add ourselves to the
+ * most relevant list if not yet done. h2_deferred_shut() will be
+ * automatically called via the shut_tl tasklet when there's room
+ * again.
+ */
+ h2s->flags |= H2_SF_WANT_SHUTR;
+ if (!LIST_INLIST(&h2s->list)) {
+ if (h2s->flags & H2_SF_BLK_MFCTL)
+ LIST_APPEND(&h2c->fctl_list, &h2s->list);
+ else if (h2s->flags & (H2_SF_BLK_MBUSY|H2_SF_BLK_MROOM))
+ LIST_APPEND(&h2c->send_list, &h2s->list);
+ }
+ TRACE_LEAVE(H2_EV_STRM_SHUT, h2c->conn, h2s);
+ return;
+}
+
+/* Performs a synchronous or asynchronous shutw(). */
+static void h2_do_shutw(struct h2s *h2s)
+{
+ struct h2c *h2c = h2s->h2c;
+
+ if (h2s->st == H2_SS_HLOC || h2s->st == H2_SS_CLOSED)
+ goto done;
+
+ TRACE_ENTER(H2_EV_STRM_SHUT, h2c->conn, h2s);
+
+ if (h2s->st != H2_SS_ERROR &&
+ (h2s->flags & (H2_SF_HEADERS_SENT | H2_SF_MORE_HTX_DATA)) == H2_SF_HEADERS_SENT) {
+ /* we can cleanly close using an empty data frame only after headers
+ * and if no more data is expected to be sent.
+ */
+ if (!(h2s->flags & (H2_SF_ES_SENT|H2_SF_RST_SENT)) &&
+ h2_send_empty_data_es(h2s) <= 0)
+ goto add_to_list;
+
+ if (h2s->st == H2_SS_HREM)
+ h2s_close(h2s);
+ else
+ h2s->st = H2_SS_HLOC;
+ } else {
+ /* a connstream may require us to immediately kill the whole connection
+ * for example because of a "tcp-request content reject" rule that is
+ * normally used to limit abuse. In this case we schedule a goaway to
+ * close the connection.
+ */
+ if (se_fl_test(h2s->sd, SE_FL_KILL_CONN) &&
+ !(h2c->flags & (H2_CF_GOAWAY_SENT|H2_CF_GOAWAY_FAILED))) {
+ TRACE_STATE("stream wants to kill the connection", H2_EV_STRM_SHUT, h2c->conn, h2s);
+ h2c_error(h2c, H2_ERR_ENHANCE_YOUR_CALM);
+ h2s_error(h2s, H2_ERR_ENHANCE_YOUR_CALM);
+ }
+ else if (h2s->flags & H2_SF_MORE_HTX_DATA) {
+ /* some unsent data were pending (e.g. abort during an upload),
+ * let's send a CANCEL.
+ */
+ TRACE_STATE("shutw before end of data, sending CANCEL", H2_EV_STRM_SHUT, h2c->conn, h2s);
+ h2s_error(h2s, H2_ERR_CANCEL);
+ }
+ else {
+ /* Nothing was never sent for this stream, so reset with
+ * REFUSED_STREAM error to let the client retry the
+ * request.
+ */
+ TRACE_STATE("no headers sent yet, trying a retryable abort", H2_EV_STRM_SHUT, h2c->conn, h2s);
+ h2s_error(h2s, H2_ERR_REFUSED_STREAM);
+ }
+
+ if (!(h2s->flags & H2_SF_RST_SENT) &&
+ h2s_send_rst_stream(h2c, h2s) <= 0)
+ goto add_to_list;
+
+ h2s_close(h2s);
+ }
+
+ if (!(h2c->wait_event.events & SUB_RETRY_SEND))
+ tasklet_wakeup(h2c->wait_event.tasklet);
+
+ TRACE_LEAVE(H2_EV_STRM_SHUT, h2c->conn, h2s);
+
+ done:
+ h2s->flags &= ~H2_SF_WANT_SHUTW;
+ return;
+
+ add_to_list:
+ /* Let the handler know we want to shutw, and add ourselves to the
+ * most relevant list if not yet done. h2_deferred_shut() will be
+ * automatically called via the shut_tl tasklet when there's room
+ * again.
+ */
+ h2s->flags |= H2_SF_WANT_SHUTW;
+ if (!LIST_INLIST(&h2s->list)) {
+ if (h2s->flags & H2_SF_BLK_MFCTL)
+ LIST_APPEND(&h2c->fctl_list, &h2s->list);
+ else if (h2s->flags & (H2_SF_BLK_MBUSY|H2_SF_BLK_MROOM))
+ LIST_APPEND(&h2c->send_list, &h2s->list);
+ }
+ TRACE_LEAVE(H2_EV_STRM_SHUT, h2c->conn, h2s);
+ return;
+}
+
+/* This is the tasklet referenced in h2s->shut_tl, it is used for
+ * deferred shutdowns when the h2_detach() was done but the mux buffer was full
+ * and prevented the last frame from being emitted.
+ */
+struct task *h2_deferred_shut(struct task *t, void *ctx, unsigned int state)
+{
+ struct h2s *h2s = ctx;
+ struct h2c *h2c = h2s->h2c;
+
+ TRACE_ENTER(H2_EV_STRM_SHUT, h2c->conn, h2s);
+
+ if (h2s->flags & H2_SF_NOTIFIED) {
+ /* some data processing remains to be done first */
+ goto end;
+ }
+
+ if (h2s->flags & H2_SF_WANT_SHUTW)
+ h2_do_shutw(h2s);
+
+ if (h2s->flags & H2_SF_WANT_SHUTR)
+ h2_do_shutr(h2s);
+
+ if (!(h2s->flags & (H2_SF_WANT_SHUTR|H2_SF_WANT_SHUTW))) {
+ /* We're done trying to send, remove ourself from the send_list */
+ h2_remove_from_list(h2s);
+
+ if (!h2s_sc(h2s)) {
+ h2s_destroy(h2s);
+ if (h2c_is_dead(h2c)) {
+ h2_release(h2c);
+ t = NULL;
+ }
+ }
+ }
+ end:
+ TRACE_LEAVE(H2_EV_STRM_SHUT);
+ return t;
+}
+
+/* shutr() called by the stream connector (mux_ops.shutr) */
+static void h2_shutr(struct stconn *sc, enum co_shr_mode mode)
+{
+ struct h2s *h2s = __sc_mux_strm(sc);
+
+ TRACE_ENTER(H2_EV_STRM_SHUT, h2s->h2c->conn, h2s);
+ if (mode)
+ h2_do_shutr(h2s);
+ TRACE_LEAVE(H2_EV_STRM_SHUT, h2s->h2c->conn, h2s);
+}
+
+/* shutw() called by the stream connector (mux_ops.shutw) */
+static void h2_shutw(struct stconn *sc, enum co_shw_mode mode)
+{
+ struct h2s *h2s = __sc_mux_strm(sc);
+
+ TRACE_ENTER(H2_EV_STRM_SHUT, h2s->h2c->conn, h2s);
+ h2_do_shutw(h2s);
+ TRACE_LEAVE(H2_EV_STRM_SHUT, h2s->h2c->conn, h2s);
+}
+
+/* Decode the payload of a HEADERS frame and produce the HTX request or response
+ * depending on the connection's side. Returns a positive value on success, a
+ * negative value on failure, or 0 if it couldn't proceed. May report connection
+ * errors in h2c->errcode if the frame is non-decodable and the connection
+ * unrecoverable. In absence of connection error when a failure is reported, the
+ * caller must assume a stream error.
+ *
+ * The function may fold CONTINUATION frames into the initial HEADERS frame
+ * by removing padding and next frame header, then moving the CONTINUATION
+ * frame's payload and adjusting h2c->dfl to match the new aggregated frame,
+ * leaving a hole between the main frame and the beginning of the next one.
+ * The possibly remaining incomplete or next frame at the end may be moved
+ * if the aggregated frame is not deleted, in order to fill the hole. Wrapped
+ * HEADERS frames are unwrapped into a temporary buffer before decoding.
+ *
+ * A buffer at the beginning of processing may look like this :
+ *
+ * ,---.---------.-----.--------------.--------------.------.---.
+ * |///| HEADERS | PAD | CONTINUATION | CONTINUATION | DATA |///|
+ * `---^---------^-----^--------------^--------------^------^---'
+ * | | <-----> | |
+ * area | dpl | wrap
+ * |<--------------> |
+ * | dfl |
+ * |<-------------------------------------------------->|
+ * head data
+ *
+ * Padding is automatically overwritten when folding, participating to the
+ * hole size after dfl :
+ *
+ * ,---.------------------------.-----.--------------.------.---.
+ * |///| HEADERS : CONTINUATION |/////| CONTINUATION | DATA |///|
+ * `---^------------------------^-----^--------------^------^---'
+ * | | <-----> | |
+ * area | hole | wrap
+ * |<-----------------------> |
+ * | dfl |
+ * |<-------------------------------------------------->|
+ * head data
+ *
+ * Please note that the HEADERS frame is always deprived from its PADLEN byte
+ * however it may start with the 5 stream-dep+weight bytes in case of PRIORITY
+ * bit.
+ *
+ * The <flags> field must point to either the stream's flags or to a copy of it
+ * so that the function can update the following flags :
+ * - H2_SF_DATA_CLEN when content-length is seen
+ * - H2_SF_HEADERS_RCVD once the frame is successfully decoded
+ *
+ * The H2_SF_HEADERS_RCVD flag is also looked at in the <flags> field prior to
+ * decoding, in order to detect if we're dealing with a headers or a trailers
+ * block (the trailers block appears after H2_SF_HEADERS_RCVD was seen).
+ */
+static int h2c_dec_hdrs(struct h2c *h2c, struct buffer *rxbuf, uint32_t *flags, unsigned long long *body_len, char *upgrade_protocol)
+{
+ const uint8_t *hdrs = (uint8_t *)b_head(&h2c->dbuf);
+ struct buffer *tmp = get_trash_chunk();
+ struct http_hdr list[global.tune.max_http_hdr * 2];
+ struct buffer *copy = NULL;
+ unsigned int msgf;
+ struct htx *htx = NULL;
+ int flen; // header frame len
+ int hole = 0;
+ int ret = 0;
+ int outlen;
+ int wrap;
+
+ TRACE_ENTER(H2_EV_RX_FRAME|H2_EV_RX_HDR, h2c->conn);
+
+next_frame:
+ if (b_data(&h2c->dbuf) - hole < h2c->dfl)
+ goto leave; // incomplete input frame
+
+ /* No END_HEADERS means there's one or more CONTINUATION frames. In
+ * this case, we'll try to paste it immediately after the initial
+ * HEADERS frame payload and kill any possible padding. The initial
+ * frame's length will be increased to represent the concatenation
+ * of the two frames. The next frame is read from position <tlen>
+ * and written at position <flen> (minus padding if some is present).
+ */
+ if (unlikely(!(h2c->dff & H2_F_HEADERS_END_HEADERS))) {
+ struct h2_fh hdr;
+ int clen; // CONTINUATION frame's payload length
+
+ TRACE_STATE("EH missing, expecting continuation frame", H2_EV_RX_FRAME|H2_EV_RX_FHDR|H2_EV_RX_HDR, h2c->conn);
+ if (!h2_peek_frame_hdr(&h2c->dbuf, h2c->dfl + hole, &hdr)) {
+ /* no more data, the buffer may be full, either due to
+ * too large a frame or because of too large a hole that
+ * we're going to compact at the end.
+ */
+ goto leave;
+ }
+
+ if (hdr.ft != H2_FT_CONTINUATION) {
+ /* RFC7540#6.10: frame of unexpected type */
+ TRACE_STATE("not continuation!", H2_EV_RX_FRAME|H2_EV_RX_FHDR|H2_EV_RX_HDR|H2_EV_RX_CONT|H2_EV_H2C_ERR|H2_EV_PROTO_ERR, h2c->conn);
+ h2c_error(h2c, H2_ERR_PROTOCOL_ERROR);
+ HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
+ goto fail;
+ }
+
+ if (hdr.sid != h2c->dsi) {
+ /* RFC7540#6.10: frame of different stream */
+ TRACE_STATE("different stream ID!", H2_EV_RX_FRAME|H2_EV_RX_FHDR|H2_EV_RX_HDR|H2_EV_RX_CONT|H2_EV_H2C_ERR|H2_EV_PROTO_ERR, h2c->conn);
+ h2c_error(h2c, H2_ERR_PROTOCOL_ERROR);
+ HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
+ goto fail;
+ }
+
+ if ((unsigned)hdr.len > (unsigned)global.tune.bufsize) {
+ /* RFC7540#4.2: invalid frame length */
+ TRACE_STATE("too large frame!", H2_EV_RX_FRAME|H2_EV_RX_FHDR|H2_EV_RX_HDR|H2_EV_RX_CONT|H2_EV_H2C_ERR|H2_EV_PROTO_ERR, h2c->conn);
+ h2c_error(h2c, H2_ERR_FRAME_SIZE_ERROR);
+ goto fail;
+ }
+
+ /* detect when we must stop aggregating frames */
+ h2c->dff |= hdr.ff & H2_F_HEADERS_END_HEADERS;
+
+ /* Take as much as we can of the CONTINUATION frame's payload */
+ clen = b_data(&h2c->dbuf) - (h2c->dfl + hole + 9);
+ if (clen > hdr.len)
+ clen = hdr.len;
+
+ /* Move the frame's payload over the padding, hole and frame
+ * header. At least one of hole or dpl is null (see diagrams
+ * above). The hole moves after the new aggregated frame.
+ */
+ b_move(&h2c->dbuf, b_peek_ofs(&h2c->dbuf, h2c->dfl + hole + 9), clen, -(h2c->dpl + hole + 9));
+ h2c->dfl += hdr.len - h2c->dpl;
+ hole += h2c->dpl + 9;
+ h2c->dpl = 0;
+ TRACE_STATE("waiting for next continuation frame", H2_EV_RX_FRAME|H2_EV_RX_FHDR|H2_EV_RX_CONT|H2_EV_RX_HDR, h2c->conn);
+ goto next_frame;
+ }
+
+ flen = h2c->dfl - h2c->dpl;
+
+ /* if the input buffer wraps, take a temporary copy of it (rare) */
+ wrap = b_wrap(&h2c->dbuf) - b_head(&h2c->dbuf);
+ if (wrap < h2c->dfl) {
+ copy = alloc_trash_chunk();
+ if (!copy) {
+ TRACE_DEVEL("failed to allocate temporary buffer", H2_EV_RX_FRAME|H2_EV_RX_HDR|H2_EV_H2C_ERR, h2c->conn);
+ h2c_error(h2c, H2_ERR_INTERNAL_ERROR);
+ goto fail;
+ }
+ memcpy(copy->area, b_head(&h2c->dbuf), wrap);
+ memcpy(copy->area + wrap, b_orig(&h2c->dbuf), h2c->dfl - wrap);
+ hdrs = (uint8_t *) copy->area;
+ }
+
+ /* Skip StreamDep and weight for now (we don't support PRIORITY) */
+ if (h2c->dff & H2_F_HEADERS_PRIORITY) {
+ if (read_n32(hdrs) == h2c->dsi) {
+ /* RFC7540#5.3.1 : stream dep may not depend on itself */
+ TRACE_STATE("invalid stream dependency!", H2_EV_RX_FRAME|H2_EV_RX_HDR|H2_EV_H2C_ERR|H2_EV_PROTO_ERR, h2c->conn);
+ h2c_error(h2c, H2_ERR_PROTOCOL_ERROR);
+ HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
+ goto fail;
+ }
+
+ if (flen < 5) {
+ TRACE_STATE("frame too short for priority!", H2_EV_RX_FRAME|H2_EV_RX_HDR|H2_EV_H2C_ERR|H2_EV_PROTO_ERR, h2c->conn);
+ h2c_error(h2c, H2_ERR_FRAME_SIZE_ERROR);
+ goto fail;
+ }
+
+ hdrs += 5; // stream dep = 4, weight = 1
+ flen -= 5;
+ }
+
+ if (!h2_get_buf(h2c, rxbuf)) {
+ TRACE_STATE("waiting for h2c rxbuf allocation", H2_EV_RX_FRAME|H2_EV_RX_HDR|H2_EV_H2C_BLK, h2c->conn);
+ h2c->flags |= H2_CF_DEM_SALLOC;
+ goto leave;
+ }
+
+ /* we can't retry a failed decompression operation so we must be very
+ * careful not to take any risks. In practice the output buffer is
+ * always empty except maybe for trailers, in which case we simply have
+ * to wait for the upper layer to finish consuming what is available.
+ */
+ htx = htx_from_buf(rxbuf);
+ if (!htx_is_empty(htx)) {
+ TRACE_STATE("waiting for room in h2c rxbuf", H2_EV_RX_FRAME|H2_EV_RX_HDR|H2_EV_H2C_BLK, h2c->conn);
+ h2c->flags |= H2_CF_DEM_SFULL;
+ goto leave;
+ }
+
+ /* past this point we cannot roll back in case of error */
+ outlen = hpack_decode_frame(h2c->ddht, hdrs, flen, list,
+ sizeof(list)/sizeof(list[0]), tmp);
+
+ if (outlen > 0 &&
+ (TRACE_SOURCE)->verbosity >= H2_VERB_ADVANCED &&
+ TRACE_ENABLED(TRACE_LEVEL_USER, H2_EV_RX_FRAME|H2_EV_RX_HDR, h2c->conn, 0, 0, 0)) {
+ struct ist n;
+ int i;
+
+ for (i = 0; list[i].n.len; i++) {
+ n = list[i].n;
+
+ if (!isttest(n)) {
+ /* this is in fact a pseudo header whose number is in n.len */
+ n = h2_phdr_to_ist(n.len);
+ }
+
+ h2_trace_header(n, list[i].v, H2_EV_RX_FRAME|H2_EV_RX_HDR,
+ ist(TRC_LOC), __FUNCTION__, h2c, NULL);
+ }
+ }
+
+ if (outlen < 0) {
+ TRACE_STATE("failed to decompress HPACK", H2_EV_RX_FRAME|H2_EV_RX_HDR|H2_EV_H2C_ERR|H2_EV_PROTO_ERR, h2c->conn);
+ h2c_error(h2c, H2_ERR_COMPRESSION_ERROR);
+ goto fail;
+ }
+
+ /* The PACK decompressor was updated, let's update the input buffer and
+ * the parser's state to commit these changes and allow us to later
+ * fail solely on the stream if needed.
+ */
+ b_del(&h2c->dbuf, h2c->dfl + hole);
+ h2c->dfl = hole = 0;
+ h2c->st0 = H2_CS_FRAME_H;
+
+ /* OK now we have our header list in <list> */
+ msgf = (h2c->dff & H2_F_HEADERS_END_STREAM) ? 0 : H2_MSGF_BODY;
+ msgf |= (*flags & H2_SF_BODY_TUNNEL) ? H2_MSGF_BODY_TUNNEL: 0;
+ /* If an Extended CONNECT has been sent on this stream, set message flag
+ * to convert 200 response to 101 htx response */
+ msgf |= (*flags & H2_SF_EXT_CONNECT_SENT) ? H2_MSGF_EXT_CONNECT: 0;
+
+ if (*flags & H2_SF_HEADERS_RCVD)
+ goto trailers;
+
+ /* This is the first HEADERS frame so it's a headers block */
+ if (h2c->flags & H2_CF_IS_BACK)
+ outlen = h2_make_htx_response(list, htx, &msgf, body_len, upgrade_protocol);
+ else
+ outlen = h2_make_htx_request(list, htx, &msgf, body_len,
+ !!(((const struct session *)h2c->conn->owner)->fe->options2 & PR_O2_REQBUG_OK));
+
+ if (outlen < 0 || htx_free_space(htx) < global.tune.maxrewrite) {
+ /* too large headers? this is a stream error only */
+ TRACE_STATE("message headers too large or invalid", H2_EV_RX_FRAME|H2_EV_RX_HDR|H2_EV_H2S_ERR|H2_EV_PROTO_ERR, h2c->conn);
+ htx->flags |= HTX_FL_PARSING_ERROR;
+ goto fail;
+ }
+
+ if (msgf & H2_MSGF_BODY) {
+ /* a payload is present */
+ if (msgf & H2_MSGF_BODY_CL) {
+ *flags |= H2_SF_DATA_CLEN;
+ htx->extra = *body_len;
+ }
+ }
+ if (msgf & H2_MSGF_BODYLESS_RSP)
+ *flags |= H2_SF_BODYLESS_RESP;
+
+ if (msgf & H2_MSGF_BODY_TUNNEL)
+ *flags |= H2_SF_BODY_TUNNEL;
+ else {
+ /* Abort the tunnel attempt, if any */
+ if (*flags & H2_SF_BODY_TUNNEL)
+ *flags |= H2_SF_TUNNEL_ABRT;
+ *flags &= ~H2_SF_BODY_TUNNEL;
+ }
+
+ done:
+ /* indicate that a HEADERS frame was received for this stream, except
+ * for 1xx responses. For 1xx responses, another HEADERS frame is
+ * expected.
+ */
+ if (!(msgf & H2_MSGF_RSP_1XX))
+ *flags |= H2_SF_HEADERS_RCVD;
+
+ if (h2c->dff & H2_F_HEADERS_END_STREAM) {
+ if (msgf & H2_MSGF_RSP_1XX) {
+ /* RFC9113#8.1 : HEADERS frame with the ES flag set that carries an informational status code is malformed */
+ TRACE_STATE("invalid interim response with ES flag!", H2_EV_RX_FRAME|H2_EV_RX_HDR|H2_EV_H2C_ERR|H2_EV_PROTO_ERR, h2c->conn);
+ goto fail;
+ }
+ /* no more data are expected for this message */
+ htx->flags |= HTX_FL_EOM;
+ *flags |= H2_SF_ES_RCVD;
+ }
+
+ if (msgf & H2_MSGF_EXT_CONNECT)
+ *flags |= H2_SF_EXT_CONNECT_RCVD;
+
+ /* success */
+ ret = 1;
+
+ leave:
+ /* If there is a hole left and it's not at the end, we are forced to
+ * move the remaining data over it.
+ */
+ if (hole) {
+ if (b_data(&h2c->dbuf) > h2c->dfl + hole)
+ b_move(&h2c->dbuf, b_peek_ofs(&h2c->dbuf, h2c->dfl + hole),
+ b_data(&h2c->dbuf) - (h2c->dfl + hole), -hole);
+ b_sub(&h2c->dbuf, hole);
+ }
+
+ if (b_full(&h2c->dbuf) && h2c->dfl && (!htx || htx_is_empty(htx))) {
+ /* too large frames */
+ h2c_error(h2c, H2_ERR_INTERNAL_ERROR);
+ ret = -1;
+ }
+
+ if (htx)
+ htx_to_buf(htx, rxbuf);
+ free_trash_chunk(copy);
+ TRACE_LEAVE(H2_EV_RX_FRAME|H2_EV_RX_HDR, h2c->conn);
+ return ret;
+
+ fail:
+ ret = -1;
+ goto leave;
+
+ trailers:
+ /* This is the last HEADERS frame hence a trailer */
+ if (!(h2c->dff & H2_F_HEADERS_END_STREAM)) {
+ /* It's a trailer but it's missing ES flag */
+ TRACE_STATE("missing EH on trailers frame", H2_EV_RX_FRAME|H2_EV_RX_HDR|H2_EV_H2C_ERR|H2_EV_PROTO_ERR, h2c->conn);
+ h2c_error(h2c, H2_ERR_PROTOCOL_ERROR);
+ HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
+ goto fail;
+ }
+
+ /* Trailers terminate a DATA sequence */
+ if (h2_make_htx_trailers(list, htx) <= 0) {
+ TRACE_STATE("failed to append HTX trailers into rxbuf", H2_EV_RX_FRAME|H2_EV_RX_HDR|H2_EV_H2S_ERR, h2c->conn);
+ goto fail;
+ }
+ *flags |= H2_SF_ES_RCVD;
+ goto done;
+}
+
+/* Transfer the payload of a DATA frame to the HTTP/1 side. The HTTP/2 frame
+ * parser state is automatically updated. Returns > 0 if it could completely
+ * send the current frame, 0 if it couldn't complete, in which case
+ * SE_FL_RCV_MORE must be checked to know if some data remain pending (an empty
+ * DATA frame can return 0 as a valid result). Stream errors are reported in
+ * h2s->errcode and connection errors in h2c->errcode. The caller must already
+ * have checked the frame header and ensured that the frame was complete or the
+ * buffer full. It changes the frame state to FRAME_A once done.
+ */
+static int h2_frt_transfer_data(struct h2s *h2s)
+{
+ struct h2c *h2c = h2s->h2c;
+ int block;
+ unsigned int flen = 0;
+ struct htx *htx = NULL;
+ struct buffer *scbuf;
+ unsigned int sent;
+
+ TRACE_ENTER(H2_EV_RX_FRAME|H2_EV_RX_DATA, h2c->conn, h2s);
+
+ h2c->flags &= ~H2_CF_DEM_SFULL;
+
+ scbuf = h2_get_buf(h2c, &h2s->rxbuf);
+ if (!scbuf) {
+ h2c->flags |= H2_CF_DEM_SALLOC;
+ TRACE_STATE("waiting for an h2s rxbuf", H2_EV_RX_FRAME|H2_EV_RX_DATA|H2_EV_H2S_BLK, h2c->conn, h2s);
+ goto fail;
+ }
+ htx = htx_from_buf(scbuf);
+
+try_again:
+ flen = h2c->dfl - h2c->dpl;
+ if (!flen)
+ goto end_transfer;
+
+ if (flen > b_data(&h2c->dbuf)) {
+ flen = b_data(&h2c->dbuf);
+ if (!flen)
+ goto fail;
+ }
+
+ block = htx_free_data_space(htx);
+ if (!block) {
+ h2c->flags |= H2_CF_DEM_SFULL;
+ TRACE_STATE("h2s rxbuf is full", H2_EV_RX_FRAME|H2_EV_RX_DATA|H2_EV_H2S_BLK, h2c->conn, h2s);
+ goto fail;
+ }
+ if (flen > block)
+ flen = block;
+
+ /* here, flen is the max we can copy into the output buffer */
+ block = b_contig_data(&h2c->dbuf, 0);
+ if (flen > block)
+ flen = block;
+
+ sent = htx_add_data(htx, ist2(b_head(&h2c->dbuf), flen));
+ TRACE_DATA("move some data to h2s rxbuf", H2_EV_RX_FRAME|H2_EV_RX_DATA, h2c->conn, h2s, 0, (void *)(long)sent);
+
+ b_del(&h2c->dbuf, sent);
+ h2c->dfl -= sent;
+ h2c->rcvd_c += sent;
+ h2c->rcvd_s += sent; // warning, this can also affect the closed streams!
+
+ if (h2s->flags & H2_SF_DATA_CLEN) {
+ h2s->body_len -= sent;
+ htx->extra = h2s->body_len;
+ }
+
+ if (sent < flen) {
+ h2c->flags |= H2_CF_DEM_SFULL;
+ TRACE_STATE("h2s rxbuf is full", H2_EV_RX_FRAME|H2_EV_RX_DATA|H2_EV_H2S_BLK, h2c->conn, h2s);
+ goto fail;
+ }
+
+ goto try_again;
+
+ end_transfer:
+ /* here we're done with the frame, all the payload (except padding) was
+ * transferred.
+ */
+
+ if (!(h2s->flags & H2_SF_BODY_TUNNEL) && (h2c->dff & H2_F_DATA_END_STREAM)) {
+ /* no more data are expected for this message. This add the EOM
+ * flag but only on the response path or if no tunnel attempt
+ * was aborted. Otherwise (request path + tunnel abrted), the
+ * EOM was already reported.
+ */
+ if ((h2c->flags & H2_CF_IS_BACK) || !(h2s->flags & H2_SF_TUNNEL_ABRT)) {
+ /* htx may be empty if receiving an empty DATA frame. */
+ if (!htx_set_eom(htx))
+ goto fail;
+ }
+ }
+
+ h2c->rcvd_c += h2c->dpl;
+ h2c->rcvd_s += h2c->dpl;
+ h2c->dpl = 0;
+ h2c->st0 = H2_CS_FRAME_A; // send the corresponding window update
+ htx_to_buf(htx, scbuf);
+ TRACE_LEAVE(H2_EV_RX_FRAME|H2_EV_RX_DATA, h2c->conn, h2s);
+ return 1;
+ fail:
+ if (htx)
+ htx_to_buf(htx, scbuf);
+ TRACE_LEAVE(H2_EV_RX_FRAME|H2_EV_RX_DATA, h2c->conn, h2s);
+ return 0;
+}
+
+/* Try to send a HEADERS frame matching HTX response present in HTX message
+ * <htx> for the H2 stream <h2s>. Returns the number of bytes sent. The caller
+ * must check the stream's status to detect any error which might have happened
+ * subsequently to a successful send. The htx blocks are automatically removed
+ * from the message. The htx message is assumed to be valid since produced from
+ * the internal code, hence it contains a start line, an optional series of
+ * header blocks and an end of header, otherwise an invalid frame could be
+ * emitted and the resulting htx message could be left in an inconsistent state.
+ */
+static size_t h2s_snd_fhdrs(struct h2s *h2s, struct htx *htx)
+{
+ struct http_hdr list[global.tune.max_http_hdr];
+ struct h2c *h2c = h2s->h2c;
+ struct htx_blk *blk;
+ struct buffer outbuf;
+ struct buffer *mbuf;
+ struct htx_sl *sl;
+ enum htx_blk_type type;
+ int es_now = 0;
+ int ret = 0;
+ int hdr;
+
+ TRACE_ENTER(H2_EV_TX_FRAME|H2_EV_TX_HDR, h2c->conn, h2s);
+
+ /* get the start line (we do have one) and the rest of the headers,
+ * that we dump starting at header 0 */
+ sl = NULL;
+ hdr = 0;
+ for (blk = htx_get_head_blk(htx); blk; blk = htx_get_next_blk(htx, blk)) {
+ type = htx_get_blk_type(blk);
+
+ if (type == HTX_BLK_UNUSED)
+ continue;
+
+ if (type == HTX_BLK_EOH)
+ break;
+
+ if (type == HTX_BLK_HDR) {
+ BUG_ON(!sl); /* The start-line mut be defined before any headers */
+ if (unlikely(hdr >= sizeof(list)/sizeof(list[0]) - 1)) {
+ TRACE_ERROR("too many headers", H2_EV_TX_FRAME|H2_EV_TX_HDR|H2_EV_H2S_ERR, h2c->conn, h2s);
+ goto fail;
+ }
+
+ list[hdr].n = htx_get_blk_name(htx, blk);
+ list[hdr].v = htx_get_blk_value(htx, blk);
+ hdr++;
+ }
+ else if (type == HTX_BLK_RES_SL) {
+ BUG_ON(sl); /* Only one start-line expected */
+ sl = htx_get_blk_ptr(htx, blk);
+ h2s->status = sl->info.res.status;
+ if ((sl->flags & HTX_SL_F_BODYLESS_RESP) || h2s->status == 204 || h2s->status == 304)
+ h2s->flags |= H2_SF_BODYLESS_RESP;
+ if (h2s->status < 100 || h2s->status > 999) {
+ TRACE_ERROR("will not encode an invalid status code", H2_EV_TX_FRAME|H2_EV_TX_HDR|H2_EV_H2S_ERR, h2c->conn, h2s);
+ goto fail;
+ }
+ else if (h2s->status == 101) {
+ if (unlikely(h2s->flags & H2_SF_EXT_CONNECT_RCVD)) {
+ /* If an Extended CONNECT has been received, we need to convert 101 to 200 */
+ h2s->status = 200;
+ h2s->flags &= ~H2_SF_EXT_CONNECT_RCVD;
+ }
+ else {
+ /* Otherwise, 101 responses are not supported in H2, so return a error (RFC7540#8.1.1) */
+ TRACE_ERROR("will not encode an invalid status code", H2_EV_TX_FRAME|H2_EV_TX_HDR|H2_EV_H2S_ERR, h2c->conn, h2s);
+ goto fail;
+ }
+ }
+ else if ((h2s->flags & H2_SF_BODY_TUNNEL) && h2s->status >= 300) {
+ /* Abort the tunnel attempt */
+ h2s->flags &= ~H2_SF_BODY_TUNNEL;
+ h2s->flags |= H2_SF_TUNNEL_ABRT;
+ }
+ }
+ else {
+ TRACE_ERROR("will not encode unexpected htx block", H2_EV_TX_FRAME|H2_EV_TX_HDR|H2_EV_H2S_ERR, h2c->conn, h2s);
+ goto fail;
+ }
+ }
+
+ /* The start-line me be defined */
+ BUG_ON(!sl);
+
+ /* marker for end of headers */
+ list[hdr].n = ist("");
+
+ mbuf = br_tail(h2c->mbuf);
+ retry:
+ if (!h2_get_buf(h2c, mbuf)) {
+ h2c->flags |= H2_CF_MUX_MALLOC;
+ h2s->flags |= H2_SF_BLK_MROOM;
+ TRACE_STATE("waiting for room in output buffer", H2_EV_TX_FRAME|H2_EV_TX_HDR|H2_EV_H2S_BLK, h2c->conn, h2s);
+ return 0;
+ }
+
+ chunk_reset(&outbuf);
+
+ while (1) {
+ outbuf = b_make(b_tail(mbuf), b_contig_space(mbuf), 0, 0);
+ if (outbuf.size >= 9 || !b_space_wraps(mbuf))
+ break;
+ realign_again:
+ b_slow_realign(mbuf, trash.area, b_data(mbuf));
+ }
+
+ if (outbuf.size < 9)
+ goto full;
+
+ /* len: 0x000000 (fill later), type: 1(HEADERS), flags: ENDH=4 */
+ memcpy(outbuf.area, "\x00\x00\x00\x01\x04", 5);
+ write_n32(outbuf.area + 5, h2s->id); // 4 bytes
+ outbuf.data = 9;
+
+ if ((h2c->flags & (H2_CF_SHTS_UPDATED|H2_CF_DTSU_EMITTED)) == H2_CF_SHTS_UPDATED) {
+ /* SETTINGS_HEADER_TABLE_SIZE changed, we must send an HPACK
+ * dynamic table size update so that some clients are not
+ * confused. In practice we only need to send the DTSU when the
+ * advertised size is lower than the current one, and since we
+ * don't use it and don't care about the default 4096 bytes,
+ * we only ack it with a zero size thus we at most have to deal
+ * with this once. See RFC7541#4.2 and #6.3 for the spec, and
+ * below for the whole context and interoperability risks:
+ * https://lists.w3.org/Archives/Public/ietf-http-wg/2021OctDec/0235.html
+ */
+ if (b_room(&outbuf) < 1)
+ goto full;
+ outbuf.area[outbuf.data++] = 0x20; // HPACK DTSU 0 bytes
+
+ /* let's not update the flags now but only once the buffer is
+ * really committed.
+ */
+ }
+
+ /* encode status, which necessarily is the first one */
+ if (!hpack_encode_int_status(&outbuf, h2s->status)) {
+ if (b_space_wraps(mbuf))
+ goto realign_again;
+ goto full;
+ }
+
+ if ((TRACE_SOURCE)->verbosity >= H2_VERB_ADVANCED) {
+ char sts[4];
+
+ h2_trace_header(ist(":status"), ist(ultoa_r(h2s->status, sts, sizeof(sts))),
+ H2_EV_TX_FRAME|H2_EV_TX_HDR, ist(TRC_LOC), __FUNCTION__,
+ h2c, h2s);
+ }
+
+ /* encode all headers, stop at empty name */
+ for (hdr = 0; hdr < sizeof(list)/sizeof(list[0]); hdr++) {
+ /* these ones do not exist in H2 and must be dropped. */
+ if (isteq(list[hdr].n, ist("connection")) ||
+ isteq(list[hdr].n, ist("proxy-connection")) ||
+ isteq(list[hdr].n, ist("keep-alive")) ||
+ isteq(list[hdr].n, ist("upgrade")) ||
+ isteq(list[hdr].n, ist("transfer-encoding")))
+ continue;
+
+ /* Skip all pseudo-headers */
+ if (*(list[hdr].n.ptr) == ':')
+ continue;
+
+ if (isteq(list[hdr].n, ist("")))
+ break; // end
+
+ if (!h2_encode_header(&outbuf, list[hdr].n, list[hdr].v, H2_EV_TX_FRAME|H2_EV_TX_HDR,
+ ist(TRC_LOC), __FUNCTION__, h2c, h2s)) {
+ /* output full */
+ if (b_space_wraps(mbuf))
+ goto realign_again;
+ goto full;
+ }
+ }
+
+ /* update the frame's size */
+ h2_set_frame_size(outbuf.area, outbuf.data - 9);
+
+ if (outbuf.data > h2c->mfs + 9) {
+ if (!h2_fragment_headers(&outbuf, h2c->mfs)) {
+ /* output full */
+ if (b_space_wraps(mbuf))
+ goto realign_again;
+ goto full;
+ }
+ }
+
+ TRACE_USER("sent H2 response ", H2_EV_TX_FRAME|H2_EV_TX_HDR, h2c->conn, h2s, htx);
+
+ /* remove all header blocks including the EOH and compute the
+ * corresponding size.
+ */
+ ret = 0;
+ blk = htx_get_head_blk(htx);
+ while (blk) {
+ type = htx_get_blk_type(blk);
+ ret += htx_get_blksz(blk);
+ blk = htx_remove_blk(htx, blk);
+ /* The removed block is the EOH */
+ if (type == HTX_BLK_EOH)
+ break;
+ }
+
+ if (!h2s_sc(h2s) || se_fl_test(h2s->sd, SE_FL_SHW)) {
+ /* Response already closed: add END_STREAM */
+ es_now = 1;
+ }
+ else if ((htx->flags & HTX_FL_EOM) && htx_is_empty(htx) && h2s->status >= 200) {
+ /* EOM+empty: we may need to add END_STREAM except for 1xx
+ * responses and tunneled response.
+ */
+ if (!(h2s->flags & H2_SF_BODY_TUNNEL) || h2s->status >= 300)
+ es_now = 1;
+ }
+
+ if (es_now)
+ outbuf.area[4] |= H2_F_HEADERS_END_STREAM;
+
+ /* commit the H2 response */
+ b_add(mbuf, outbuf.data);
+ h2c->flags |= H2_CF_MBUF_HAS_DATA;
+
+ /* indicates the HEADERS frame was sent, except for 1xx responses. For
+ * 1xx responses, another HEADERS frame is expected.
+ */
+ if (h2s->status >= 200)
+ h2s->flags |= H2_SF_HEADERS_SENT;
+
+ if (h2c->flags & H2_CF_SHTS_UPDATED) {
+ /* was sent above */
+ h2c->flags |= H2_CF_DTSU_EMITTED;
+ h2c->flags &= ~H2_CF_SHTS_UPDATED;
+ }
+
+ if (es_now) {
+ h2s->flags |= H2_SF_ES_SENT;
+ TRACE_PROTO("setting ES on HEADERS frame", H2_EV_TX_FRAME|H2_EV_TX_HDR, h2c->conn, h2s, htx);
+ if (h2s->st == H2_SS_OPEN)
+ h2s->st = H2_SS_HLOC;
+ else
+ h2s_close(h2s);
+ }
+
+ /* OK we could properly deliver the response */
+ end:
+ TRACE_LEAVE(H2_EV_TX_FRAME|H2_EV_TX_HDR, h2c->conn, h2s);
+ return ret;
+ full:
+ if ((mbuf = br_tail_add(h2c->mbuf)) != NULL)
+ goto retry;
+ h2c->flags |= H2_CF_MUX_MFULL;
+ h2s->flags |= H2_SF_BLK_MROOM;
+ ret = 0;
+ TRACE_STATE("mux buffer full", H2_EV_TX_FRAME|H2_EV_TX_HDR|H2_EV_H2S_BLK, h2c->conn, h2s);
+ goto end;
+ fail:
+ /* unparsable HTX messages, too large ones to be produced in the local
+ * list etc go here (unrecoverable errors).
+ */
+ h2s_error(h2s, H2_ERR_INTERNAL_ERROR);
+ ret = 0;
+ goto end;
+}
+
+/* Try to send a HEADERS frame matching HTX request present in HTX message
+ * <htx> for the H2 stream <h2s>. Returns the number of bytes sent. The caller
+ * must check the stream's status to detect any error which might have happened
+ * subsequently to a successful send. The htx blocks are automatically removed
+ * from the message. The htx message is assumed to be valid since produced from
+ * the internal code, hence it contains a start line, an optional series of
+ * header blocks and an end of header, otherwise an invalid frame could be
+ * emitted and the resulting htx message could be left in an inconsistent state.
+ */
+static size_t h2s_snd_bhdrs(struct h2s *h2s, struct htx *htx)
+{
+ struct http_hdr list[global.tune.max_http_hdr];
+ struct h2c *h2c = h2s->h2c;
+ struct htx_blk *blk;
+ struct buffer outbuf;
+ struct buffer *mbuf;
+ struct htx_sl *sl;
+ struct ist meth, uri, auth, host = IST_NULL;
+ enum htx_blk_type type;
+ int es_now = 0;
+ int ret = 0;
+ int hdr;
+ int extended_connect = 0;
+
+ TRACE_ENTER(H2_EV_TX_FRAME|H2_EV_TX_HDR, h2c->conn, h2s);
+
+ /* get the start line (we do have one) and the rest of the headers,
+ * that we dump starting at header 0 */
+ sl = NULL;
+ hdr = 0;
+ for (blk = htx_get_head_blk(htx); blk; blk = htx_get_next_blk(htx, blk)) {
+ type = htx_get_blk_type(blk);
+
+ if (type == HTX_BLK_UNUSED)
+ continue;
+
+ if (type == HTX_BLK_EOH)
+ break;
+
+ if (type == HTX_BLK_HDR) {
+ BUG_ON(!sl); /* The start-line mut be defined before any headers */
+ if (unlikely(hdr >= sizeof(list)/sizeof(list[0]) - 1)) {
+ TRACE_ERROR("too many headers", H2_EV_TX_FRAME|H2_EV_TX_HDR|H2_EV_H2S_ERR, h2c->conn, h2s);
+ goto fail;
+ }
+
+ list[hdr].n = htx_get_blk_name(htx, blk);
+ list[hdr].v = htx_get_blk_value(htx, blk);
+
+ /* Skip header if same name is used to add the server name */
+ if ((h2c->flags & H2_CF_IS_BACK) && isttest(h2c->proxy->server_id_hdr_name) &&
+ isteq(list[hdr].n, h2c->proxy->server_id_hdr_name))
+ continue;
+
+ /* Convert connection: upgrade to Extended connect from rfc 8441 */
+ if ((sl->flags & HTX_SL_F_CONN_UPG) && isteqi(list[hdr].n, ist("connection"))) {
+ /* rfc 7230 #6.1 Connection = list of tokens */
+ struct ist connection_ist = list[hdr].v;
+ do {
+ if (isteqi(iststop(connection_ist, ','),
+ ist("upgrade"))) {
+ if (!(h2c->flags & H2_CF_RCVD_RFC8441)) {
+ TRACE_STATE("reject upgrade because of no RFC8441 support", H2_EV_TX_FRAME|H2_EV_TX_HDR, h2c->conn, h2s);
+ goto fail;
+ }
+
+ TRACE_STATE("convert upgrade to extended connect method", H2_EV_TX_FRAME|H2_EV_TX_HDR, h2c->conn, h2s);
+ h2s->flags |= (H2_SF_BODY_TUNNEL|H2_SF_EXT_CONNECT_SENT);
+ sl->info.req.meth = HTTP_METH_CONNECT;
+ meth = ist("CONNECT");
+
+ extended_connect = 1;
+ break;
+ }
+
+ connection_ist = istadv(istfind(connection_ist, ','), 1);
+ } while (istlen(connection_ist));
+ }
+
+ if ((sl->flags & HTX_SL_F_CONN_UPG) && isteq(list[hdr].n, ist("upgrade"))) {
+ /* rfc 7230 #6.7 Upgrade = list of protocols
+ * rfc 8441 #4 Extended connect = :protocol is single-valued
+ *
+ * only first HTTP/1 protocol is preserved
+ */
+ const struct ist protocol = iststop(list[hdr].v, ',');
+ /* upgrade_protocol field is 16 bytes long in h2s */
+ istpad(h2s->upgrade_protocol, isttrim(protocol, 15));
+ }
+
+ if (isteq(list[hdr].n, ist("host")))
+ host = list[hdr].v;
+
+ hdr++;
+ }
+ else if (type == HTX_BLK_REQ_SL) {
+ BUG_ON(sl); /* Only one start-line expected */
+ sl = htx_get_blk_ptr(htx, blk);
+ meth = htx_sl_req_meth(sl);
+ uri = htx_sl_req_uri(sl);
+ if ((sl->flags & HTX_SL_F_BODYLESS_RESP) || sl->info.req.meth == HTTP_METH_HEAD)
+ h2s->flags |= H2_SF_BODYLESS_RESP;
+ if (unlikely(uri.len == 0)) {
+ TRACE_ERROR("no URI in HTX request", H2_EV_TX_FRAME|H2_EV_TX_HDR|H2_EV_H2S_ERR, h2c->conn, h2s);
+ goto fail;
+ }
+ }
+ else {
+ TRACE_ERROR("will not encode unexpected htx block", H2_EV_TX_FRAME|H2_EV_TX_HDR|H2_EV_H2S_ERR, h2c->conn, h2s);
+ goto fail;
+ }
+ }
+
+ /* The start-line me be defined */
+ BUG_ON(!sl);
+
+ /* Now add the server name to a header (if requested) */
+ if ((h2c->flags & H2_CF_IS_BACK) && isttest(h2c->proxy->server_id_hdr_name)) {
+ struct server *srv = objt_server(h2c->conn->target);
+
+ if (srv) {
+ list[hdr].n = h2c->proxy->server_id_hdr_name;
+ list[hdr].v = ist(srv->id);
+ hdr++;
+ }
+ }
+
+ /* marker for end of headers */
+ list[hdr].n = ist("");
+
+ mbuf = br_tail(h2c->mbuf);
+ retry:
+ if (!h2_get_buf(h2c, mbuf)) {
+ h2c->flags |= H2_CF_MUX_MALLOC;
+ h2s->flags |= H2_SF_BLK_MROOM;
+ TRACE_STATE("waiting for room in output buffer", H2_EV_TX_FRAME|H2_EV_TX_HDR|H2_EV_H2S_BLK, h2c->conn, h2s);
+ return 0;
+ }
+
+ chunk_reset(&outbuf);
+
+ while (1) {
+ outbuf = b_make(b_tail(mbuf), b_contig_space(mbuf), 0, 0);
+ if (outbuf.size >= 9 || !b_space_wraps(mbuf))
+ break;
+ realign_again:
+ b_slow_realign(mbuf, trash.area, b_data(mbuf));
+ }
+
+ if (outbuf.size < 9)
+ goto full;
+
+ /* len: 0x000000 (fill later), type: 1(HEADERS), flags: ENDH=4 */
+ memcpy(outbuf.area, "\x00\x00\x00\x01\x04", 5);
+ write_n32(outbuf.area + 5, h2s->id); // 4 bytes
+ outbuf.data = 9;
+
+ /* encode the method, which necessarily is the first one */
+ if (!hpack_encode_method(&outbuf, sl->info.req.meth, meth)) {
+ if (b_space_wraps(mbuf))
+ goto realign_again;
+ goto full;
+ }
+
+ h2_trace_header(ist(":method"), meth, H2_EV_TX_FRAME|H2_EV_TX_HDR, ist(TRC_LOC), __FUNCTION__, h2c, h2s);
+
+ auth = ist(NULL);
+
+ /* RFC7540 #8.3: the CONNECT method must have :
+ * - :authority set to the URI part (host:port)
+ * - :method set to CONNECT
+ * - :scheme and :path omitted
+ *
+ * Note that this is not applicable in case of the Extended CONNECT
+ * protocol from rfc 8441.
+ */
+ if (unlikely(sl->info.req.meth == HTTP_METH_CONNECT) && !extended_connect) {
+ auth = uri;
+
+ if (!h2_encode_header(&outbuf, ist(":authority"), auth, H2_EV_TX_FRAME|H2_EV_TX_HDR,
+ ist(TRC_LOC), __FUNCTION__, h2c, h2s)) {
+ /* output full */
+ if (b_space_wraps(mbuf))
+ goto realign_again;
+ goto full;
+ }
+
+ h2s->flags |= H2_SF_BODY_TUNNEL;
+ } else {
+ /* other methods need a :scheme. If an authority is known from
+ * the request line, it must be sent, otherwise only host is
+ * sent. Host is never sent as the authority.
+ *
+ * This code is also applicable for Extended CONNECT protocol
+ * from rfc 8441.
+ */
+ struct ist scheme = { };
+
+ if (uri.ptr[0] != '/' && uri.ptr[0] != '*') {
+ /* the URI seems to start with a scheme */
+ int len = 1;
+
+ while (len < uri.len && uri.ptr[len] != ':')
+ len++;
+
+ if (len + 2 < uri.len && uri.ptr[len + 1] == '/' && uri.ptr[len + 2] == '/') {
+ /* make the uri start at the authority now */
+ scheme = ist2(uri.ptr, len);
+ uri = istadv(uri, len + 3);
+
+ /* find the auth part of the URI */
+ auth = ist2(uri.ptr, 0);
+ while (auth.len < uri.len && auth.ptr[auth.len] != '/')
+ auth.len++;
+
+ uri = istadv(uri, auth.len);
+ }
+ }
+
+ /* For Extended CONNECT, the :authority must be present.
+ * Use host value for it.
+ */
+ if (unlikely(extended_connect) && isttest(host))
+ auth = host;
+
+ if (!scheme.len) {
+ /* no explicit scheme, we're using an origin-form URI,
+ * probably from an H1 request transcoded to H2 via an
+ * external layer, then received as H2 without authority.
+ * So we have to look up the scheme from the HTX flags.
+ * In such a case only http and https are possible, and
+ * https is the default (sent by browsers).
+ */
+ if ((sl->flags & (HTX_SL_F_HAS_SCHM|HTX_SL_F_SCHM_HTTP)) == (HTX_SL_F_HAS_SCHM|HTX_SL_F_SCHM_HTTP))
+ scheme = ist("http");
+ else
+ scheme = ist("https");
+ }
+
+ if (!hpack_encode_scheme(&outbuf, scheme)) {
+ /* output full */
+ if (b_space_wraps(mbuf))
+ goto realign_again;
+ goto full;
+ }
+
+ if (auth.len &&
+ !h2_encode_header(&outbuf, ist(":authority"), auth, H2_EV_TX_FRAME|H2_EV_TX_HDR,
+ ist(TRC_LOC), __FUNCTION__, h2c, h2s)) {
+ /* output full */
+ if (b_space_wraps(mbuf))
+ goto realign_again;
+ goto full;
+ }
+
+ /* encode the path. RFC7540#8.1.2.3: if path is empty it must
+ * be sent as '/' or '*'.
+ */
+ if (unlikely(!uri.len)) {
+ if (sl->info.req.meth == HTTP_METH_OPTIONS)
+ uri = ist("*");
+ else
+ uri = ist("/");
+ }
+
+ if (!hpack_encode_path(&outbuf, uri)) {
+ /* output full */
+ if (b_space_wraps(mbuf))
+ goto realign_again;
+ goto full;
+ }
+
+ h2_trace_header(ist(":path"), uri, H2_EV_TX_FRAME|H2_EV_TX_HDR, ist(TRC_LOC), __FUNCTION__, h2c, h2s);
+
+ /* encode the pseudo-header protocol from rfc8441 if using
+ * Extended CONNECT method.
+ */
+ if (unlikely(extended_connect)) {
+ const struct ist protocol = ist(h2s->upgrade_protocol);
+ if (isttest(protocol)) {
+ if (!h2_encode_header(&outbuf, ist(":protocol"), protocol, H2_EV_TX_FRAME|H2_EV_TX_HDR,
+ ist(TRC_LOC), __FUNCTION__, h2c, h2s)) {
+ /* output full */
+ if (b_space_wraps(mbuf))
+ goto realign_again;
+ goto full;
+ }
+ }
+ }
+ }
+
+ /* encode all headers, stop at empty name. Host is only sent if we
+ * do not provide an authority.
+ */
+ for (hdr = 0; hdr < sizeof(list)/sizeof(list[0]); hdr++) {
+ struct ist n = list[hdr].n;
+ struct ist v = list[hdr].v;
+
+ /* these ones do not exist in H2 and must be dropped. */
+ if (isteq(n, ist("connection")) ||
+ (auth.len && isteq(n, ist("host"))) ||
+ isteq(n, ist("proxy-connection")) ||
+ isteq(n, ist("keep-alive")) ||
+ isteq(n, ist("upgrade")) ||
+ isteq(n, ist("transfer-encoding")))
+ continue;
+
+ if (isteq(n, ist("te"))) {
+ /* "te" may only be sent with "trailers" if this value
+ * is present, otherwise it must be deleted.
+ */
+ v = istist(v, ist("trailers"));
+ if (!isttest(v) || (v.len > 8 && v.ptr[8] != ','))
+ continue;
+ v = ist("trailers");
+ }
+
+ /* Skip all pseudo-headers */
+ if (*(n.ptr) == ':')
+ continue;
+
+ if (isteq(n, ist("")))
+ break; // end
+
+ if (!h2_encode_header(&outbuf, n, v, H2_EV_TX_FRAME|H2_EV_TX_HDR, ist(TRC_LOC), __FUNCTION__, h2c, h2s)) {
+ /* output full */
+ if (b_space_wraps(mbuf))
+ goto realign_again;
+ goto full;
+ }
+ }
+
+ /* update the frame's size */
+ h2_set_frame_size(outbuf.area, outbuf.data - 9);
+
+ if (outbuf.data > h2c->mfs + 9) {
+ if (!h2_fragment_headers(&outbuf, h2c->mfs)) {
+ /* output full */
+ if (b_space_wraps(mbuf))
+ goto realign_again;
+ goto full;
+ }
+ }
+
+ TRACE_USER("sent H2 request ", H2_EV_TX_FRAME|H2_EV_TX_HDR, h2c->conn, h2s, htx);
+
+ /* remove all header blocks including the EOH and compute the
+ * corresponding size.
+ */
+ ret = 0;
+ blk = htx_get_head_blk(htx);
+ while (blk) {
+ type = htx_get_blk_type(blk);
+ ret += htx_get_blksz(blk);
+ blk = htx_remove_blk(htx, blk);
+ /* The removed block is the EOH */
+ if (type == HTX_BLK_EOH)
+ break;
+ }
+
+ if (!h2s_sc(h2s) || se_fl_test(h2s->sd, SE_FL_SHW)) {
+ /* Request already closed: add END_STREAM */
+ es_now = 1;
+ }
+ if ((htx->flags & HTX_FL_EOM) && htx_is_empty(htx)) {
+ /* EOM+empty: we may need to add END_STREAM (except for CONNECT
+ * request)
+ */
+ if (!(h2s->flags & H2_SF_BODY_TUNNEL))
+ es_now = 1;
+ }
+
+ if (es_now)
+ outbuf.area[4] |= H2_F_HEADERS_END_STREAM;
+
+ /* commit the H2 response */
+ b_add(mbuf, outbuf.data);
+ h2c->flags |= H2_CF_MBUF_HAS_DATA;
+ h2s->flags |= H2_SF_HEADERS_SENT;
+ h2s->st = H2_SS_OPEN;
+
+ if (es_now) {
+ TRACE_PROTO("setting ES on HEADERS frame", H2_EV_TX_FRAME|H2_EV_TX_HDR, h2c->conn, h2s, htx);
+ // trim any possibly pending data (eg: inconsistent content-length)
+ h2s->flags |= H2_SF_ES_SENT;
+ h2s->st = H2_SS_HLOC;
+ }
+
+ end:
+ return ret;
+ full:
+ if ((mbuf = br_tail_add(h2c->mbuf)) != NULL)
+ goto retry;
+ h2c->flags |= H2_CF_MUX_MFULL;
+ h2s->flags |= H2_SF_BLK_MROOM;
+ ret = 0;
+ TRACE_STATE("mux buffer full", H2_EV_TX_FRAME|H2_EV_TX_HDR|H2_EV_H2S_BLK, h2c->conn, h2s);
+ goto end;
+ fail:
+ /* unparsable HTX messages, too large ones to be produced in the local
+ * list etc go here (unrecoverable errors).
+ */
+ h2s_error(h2s, H2_ERR_INTERNAL_ERROR);
+ ret = 0;
+ goto end;
+}
+
+/* Try to send a DATA frame matching HTTP response present in HTX structure
+ * present in <buf>, for stream <h2s>. Returns the number of bytes sent. The
+ * caller must check the stream's status to detect any error which might have
+ * happened subsequently to a successful send. Returns the number of data bytes
+ * consumed, or zero if nothing done.
+ */
+static size_t h2s_make_data(struct h2s *h2s, struct buffer *buf, size_t count)
+{
+ struct h2c *h2c = h2s->h2c;
+ struct htx *htx;
+ struct buffer outbuf;
+ struct buffer *mbuf;
+ size_t total = 0;
+ int es_now = 0;
+ int bsize; /* htx block size */
+ int fsize; /* h2 frame size */
+ struct htx_blk *blk;
+ enum htx_blk_type type;
+ int trunc_out; /* non-zero if truncated on out buf */
+
+ TRACE_ENTER(H2_EV_TX_FRAME|H2_EV_TX_DATA, h2c->conn, h2s);
+
+ htx = htx_from_buf(buf);
+
+ /* We only come here with HTX_BLK_DATA blocks */
+
+ new_frame:
+ if (!count || htx_is_empty(htx))
+ goto end;
+
+ if ((h2c->flags & H2_CF_IS_BACK) &&
+ (h2s->flags & (H2_SF_HEADERS_RCVD|H2_SF_BODY_TUNNEL)) == H2_SF_BODY_TUNNEL) {
+ /* The response HEADERS frame not received yet. Thus the tunnel
+ * is not fully established yet. In this situation, we block
+ * data sending.
+ */
+ h2s->flags |= H2_SF_BLK_MBUSY;
+ TRACE_STATE("Request DATA frame blocked waiting for tunnel establishment", H2_EV_TX_FRAME|H2_EV_TX_DATA, h2c->conn, h2s);
+ goto end;
+ }
+ else if ((h2c->flags & H2_CF_IS_BACK) && (h2s->flags & H2_SF_TUNNEL_ABRT)) {
+ /* a tunnel attempt was aborted but the is pending raw data to xfer to the server.
+ * Thus the stream is closed with the CANCEL error. The error will be reported to
+ * the upper layer as aserver abort. But at this stage there is nothing more we can
+ * do. We just wait for the end of the response to be sure to not truncate it.
+ */
+ if (!(h2s->flags & H2_SF_ES_RCVD)) {
+ TRACE_STATE("Request DATA frame blocked waiting end of aborted tunnel", H2_EV_TX_FRAME|H2_EV_TX_DATA, h2c->conn, h2s);
+ h2s->flags |= H2_SF_BLK_MBUSY;
+ }
+ else {
+ TRACE_ERROR("Request DATA frame for aborted tunnel", H2_EV_RX_FRAME|H2_EV_RX_DATA, h2c->conn, h2s);
+ h2s_error(h2s, H2_ERR_CANCEL);
+ }
+ goto end;
+ }
+
+ blk = htx_get_head_blk(htx);
+ type = htx_get_blk_type(blk);
+ bsize = htx_get_blksz(blk);
+ fsize = bsize;
+ trunc_out = 0;
+ if (type != HTX_BLK_DATA)
+ goto end;
+
+ mbuf = br_tail(h2c->mbuf);
+ retry:
+ if (br_count(h2c->mbuf) > h2c->nb_streams) {
+ /* more buffers than streams allocated, pointless
+ * to continue, we'd use more RAM for no reason.
+ */
+ h2s->flags |= H2_SF_BLK_MROOM;
+ TRACE_STATE("waiting for room in output buffer", H2_EV_TX_FRAME|H2_EV_TX_DATA|H2_EV_H2S_BLK, h2c->conn, h2s);
+ goto end;
+ }
+
+ if (!h2_get_buf(h2c, mbuf)) {
+ h2c->flags |= H2_CF_MUX_MALLOC;
+ h2s->flags |= H2_SF_BLK_MROOM;
+ TRACE_STATE("waiting for room in output buffer", H2_EV_TX_FRAME|H2_EV_TX_DATA|H2_EV_H2S_BLK, h2c->conn, h2s);
+ goto end;
+ }
+
+ /* Perform some optimizations to reduce the number of buffer copies.
+ * First, if the mux's buffer is empty and the htx area contains
+ * exactly one data block of the same size as the requested count, and
+ * this count fits within the frame size, the stream's window size, and
+ * the connection's window size, then it's possible to simply swap the
+ * caller's buffer with the mux's output buffer and adjust offsets and
+ * length to match the entire DATA HTX block in the middle. In this
+ * case we perform a true zero-copy operation from end-to-end. This is
+ * the situation that happens all the time with large files. Second, if
+ * this is not possible, but the mux's output buffer is empty, we still
+ * have an opportunity to avoid the copy to the intermediary buffer, by
+ * making the intermediary buffer's area point to the output buffer's
+ * area. In this case we want to skip the HTX header to make sure that
+ * copies remain aligned and that this operation remains possible all
+ * the time. This goes for headers, data blocks and any data extracted
+ * from the HTX blocks.
+ */
+ if (unlikely(fsize == count &&
+ htx_nbblks(htx) == 1 && type == HTX_BLK_DATA &&
+ fsize <= h2s_mws(h2s) && fsize <= h2c->mws && fsize <= h2c->mfs)) {
+ void *old_area = mbuf->area;
+
+ if (b_data(mbuf)) {
+ /* Too bad there are data left there. We're willing to memcpy/memmove
+ * up to 1/4 of the buffer, which means that it's OK to copy a large
+ * frame into a buffer containing few data if it needs to be realigned,
+ * and that it's also OK to copy few data without realigning. Otherwise
+ * we'll pretend the mbuf is full and wait for it to become empty.
+ */
+ if (fsize + 9 <= b_room(mbuf) &&
+ (b_data(mbuf) <= b_size(mbuf) / 4 ||
+ (fsize <= b_size(mbuf) / 4 && fsize + 9 <= b_contig_space(mbuf)))) {
+ TRACE_STATE("small data present in output buffer, appending", H2_EV_TX_FRAME|H2_EV_TX_DATA, h2c->conn, h2s);
+ goto copy;
+ }
+
+ if ((mbuf = br_tail_add(h2c->mbuf)) != NULL)
+ goto retry;
+
+ h2c->flags |= H2_CF_MUX_MFULL;
+ h2s->flags |= H2_SF_BLK_MROOM;
+ TRACE_STATE("too large data present in output buffer, waiting for emptiness", H2_EV_TX_FRAME|H2_EV_TX_DATA, h2c->conn, h2s);
+ goto end;
+ }
+
+ if (htx->flags & HTX_FL_EOM) {
+ /* EOM+empty: we may need to add END_STREAM (except for tunneled
+ * message)
+ */
+ if (!(h2s->flags & H2_SF_BODY_TUNNEL))
+ es_now = 1;
+ }
+ /* map an H2 frame to the HTX block so that we can put the
+ * frame header there.
+ */
+ *mbuf = b_make(buf->area, buf->size, sizeof(struct htx) + blk->addr - 9, fsize + 9);
+ outbuf.area = b_head(mbuf);
+
+ /* prepend an H2 DATA frame header just before the DATA block */
+ memcpy(outbuf.area, "\x00\x00\x00\x00\x00", 5);
+ write_n32(outbuf.area + 5, h2s->id); // 4 bytes
+ if (es_now)
+ outbuf.area[4] |= H2_F_DATA_END_STREAM;
+ h2_set_frame_size(outbuf.area, fsize);
+
+ /* update windows */
+ h2s->sws -= fsize;
+ h2c->mws -= fsize;
+
+ /* and exchange with our old area */
+ buf->area = old_area;
+ buf->data = buf->head = 0;
+ total += fsize;
+ fsize = 0;
+ h2c->flags |= H2_CF_MBUF_HAS_DATA;
+
+ TRACE_PROTO("sent H2 DATA frame (zero-copy)", H2_EV_TX_FRAME|H2_EV_TX_DATA, h2c->conn, h2s);
+ goto out;
+ }
+
+ copy:
+ /* for DATA and EOM we'll have to emit a frame, even if empty */
+
+ while (1) {
+ outbuf = b_make(b_tail(mbuf), b_contig_space(mbuf), 0, 0);
+ if (outbuf.size >= 9 || !b_space_wraps(mbuf))
+ break;
+ realign_again:
+ b_slow_realign(mbuf, trash.area, b_data(mbuf));
+ }
+
+ if (outbuf.size < 9) {
+ if ((mbuf = br_tail_add(h2c->mbuf)) != NULL)
+ goto retry;
+ h2c->flags |= H2_CF_MUX_MFULL;
+ h2s->flags |= H2_SF_BLK_MROOM;
+ TRACE_STATE("output buffer full", H2_EV_TX_FRAME|H2_EV_TX_DATA, h2c->conn, h2s);
+ goto end;
+ }
+
+ /* len: 0x000000 (fill later), type: 0(DATA), flags: none=0 */
+ memcpy(outbuf.area, "\x00\x00\x00\x00\x00", 5);
+ write_n32(outbuf.area + 5, h2s->id); // 4 bytes
+ outbuf.data = 9;
+
+ /* we have in <fsize> the exact number of bytes we need to copy from
+ * the HTX buffer. We need to check this against the connection's and
+ * the stream's send windows, and to ensure that this fits in the max
+ * frame size and in the buffer's available space minus 9 bytes (for
+ * the frame header). The connection's flow control is applied last so
+ * that we can use a separate list of streams which are immediately
+ * unblocked on window opening. Note: we don't implement padding.
+ */
+
+ if (!fsize)
+ goto send_empty;
+
+ if (h2s_mws(h2s) <= 0) {
+ h2s->flags |= H2_SF_BLK_SFCTL;
+ if (LIST_INLIST(&h2s->list))
+ h2_remove_from_list(h2s);
+ LIST_APPEND(&h2c->blocked_list, &h2s->list);
+ TRACE_STATE("stream window <=0, flow-controlled", H2_EV_TX_FRAME|H2_EV_TX_DATA|H2_EV_H2S_FCTL, h2c->conn, h2s);
+ goto end;
+ }
+
+ if (fsize > count)
+ fsize = count;
+
+ if (fsize > h2s_mws(h2s))
+ fsize = h2s_mws(h2s); // >0
+
+ if (h2c->mfs && fsize > h2c->mfs)
+ fsize = h2c->mfs; // >0
+
+ if (fsize + 9 > outbuf.size) {
+ /* It doesn't fit at once. If it at least fits once split and
+ * the amount of data to move is low, let's defragment the
+ * buffer now.
+ */
+ if (b_space_wraps(mbuf) &&
+ (fsize + 9 <= b_room(mbuf)) &&
+ b_data(mbuf) <= MAX_DATA_REALIGN)
+ goto realign_again;
+ fsize = outbuf.size - 9;
+ trunc_out = 1;
+
+ if (fsize <= 0) {
+ /* no need to send an empty frame here */
+ if ((mbuf = br_tail_add(h2c->mbuf)) != NULL)
+ goto retry;
+ h2c->flags |= H2_CF_MUX_MFULL;
+ h2s->flags |= H2_SF_BLK_MROOM;
+ TRACE_STATE("output buffer full", H2_EV_TX_FRAME|H2_EV_TX_DATA, h2c->conn, h2s);
+ goto end;
+ }
+ }
+
+ if (h2c->mws <= 0) {
+ h2s->flags |= H2_SF_BLK_MFCTL;
+ TRACE_STATE("connection window <=0, stream flow-controlled", H2_EV_TX_FRAME|H2_EV_TX_DATA|H2_EV_H2C_FCTL, h2c->conn, h2s);
+ goto end;
+ }
+
+ if (fsize > h2c->mws)
+ fsize = h2c->mws;
+
+ /* now let's copy this this into the output buffer */
+ memcpy(outbuf.area + 9, htx_get_blk_ptr(htx, blk), fsize);
+ h2s->sws -= fsize;
+ h2c->mws -= fsize;
+ count -= fsize;
+
+ send_empty:
+ /* update the frame's size */
+ h2_set_frame_size(outbuf.area, fsize);
+
+ /* consume incoming HTX block */
+ total += fsize;
+ if (fsize == bsize) {
+ htx_remove_blk(htx, blk);
+ if ((htx->flags & HTX_FL_EOM) && htx_is_empty(htx)) {
+ /* EOM+empty: we may need to add END_STREAM (except for tunneled
+ * message)
+ */
+ if (!(h2s->flags & H2_SF_BODY_TUNNEL))
+ es_now = 1;
+ }
+ }
+ else {
+ /* we've truncated this block */
+ htx_cut_data_blk(htx, blk, fsize);
+ }
+
+ if (es_now)
+ outbuf.area[4] |= H2_F_DATA_END_STREAM;
+
+ /* commit the H2 response */
+ b_add(mbuf, fsize + 9);
+ h2c->flags |= H2_CF_MBUF_HAS_DATA;
+
+ out:
+ if (es_now) {
+ if (h2s->st == H2_SS_OPEN)
+ h2s->st = H2_SS_HLOC;
+ else
+ h2s_close(h2s);
+
+ h2s->flags |= H2_SF_ES_SENT;
+ TRACE_PROTO("ES flag set on outgoing frame", H2_EV_TX_FRAME|H2_EV_TX_DATA|H2_EV_TX_EOI, h2c->conn, h2s);
+ }
+ else if (fsize) {
+ if (fsize == bsize) {
+ TRACE_DEVEL("more data may be available, trying to send another frame", H2_EV_TX_FRAME|H2_EV_TX_DATA, h2c->conn, h2s);
+ goto new_frame;
+ }
+ else if (trunc_out) {
+ /* we've truncated this block */
+ goto new_frame;
+ }
+ }
+
+ end:
+ TRACE_LEAVE(H2_EV_TX_FRAME|H2_EV_TX_DATA, h2c->conn, h2s);
+ return total;
+}
+
+/* Skip the message payload (DATA blocks) and emit an empty DATA frame with the
+ * ES flag set for stream <h2s>. This function is called for response known to
+ * have no payload. Only DATA blocks are skipped. This means the trailers are
+ * still emitted. The caller must check the stream's status to detect any error
+ * which might have happened subsequently to a successful send. Returns the
+ * number of data bytes consumed, or zero if nothing done.
+ */
+static size_t h2s_skip_data(struct h2s *h2s, struct buffer *buf, size_t count)
+{
+ struct h2c *h2c = h2s->h2c;
+ struct htx *htx;
+ int bsize; /* htx block size */
+ int fsize; /* h2 frame size */
+ struct htx_blk *blk;
+ enum htx_blk_type type;
+ size_t total = 0;
+
+ TRACE_ENTER(H2_EV_TX_FRAME|H2_EV_TX_DATA, h2c->conn, h2s);
+
+ htx = htx_from_buf(buf);
+
+ next_data:
+ if (!count || htx_is_empty(htx))
+ goto end;
+ blk = htx_get_head_blk(htx);
+ type = htx_get_blk_type(blk);
+ bsize = htx_get_blksz(blk);
+ fsize = bsize;
+ if (type != HTX_BLK_DATA)
+ goto end;
+
+ if (fsize > count)
+ fsize = count;
+
+ if (fsize != bsize)
+ goto skip_data;
+
+ if (!(htx->flags & HTX_FL_EOM) || !htx_is_unique_blk(htx, blk))
+ goto skip_data;
+
+ /* Here, it is the last block and it is also the end of the message. So
+ * we can emit an empty DATA frame with the ES flag set
+ */
+ if (h2_send_empty_data_es(h2s) <= 0)
+ goto end;
+
+ if (h2s->st == H2_SS_OPEN)
+ h2s->st = H2_SS_HLOC;
+ else
+ h2s_close(h2s);
+
+ skip_data:
+ /* consume incoming HTX block */
+ total += fsize;
+ if (fsize == bsize) {
+ TRACE_DEVEL("more data may be available, trying to skip another frame", H2_EV_TX_FRAME|H2_EV_TX_DATA, h2c->conn, h2s);
+ htx_remove_blk(htx, blk);
+ goto next_data;
+ }
+ else {
+ /* we've truncated this block */
+ htx_cut_data_blk(htx, blk, fsize);
+ }
+
+ end:
+ TRACE_LEAVE(H2_EV_TX_FRAME|H2_EV_TX_DATA, h2c->conn, h2s);
+ return total;
+}
+
+/* Try to send a HEADERS frame matching HTX_BLK_TLR series of blocks present in
+ * HTX message <htx> for the H2 stream <h2s>. Returns the number of bytes
+ * processed. The caller must check the stream's status to detect any error
+ * which might have happened subsequently to a successful send. The htx blocks
+ * are automatically removed from the message. The htx message is assumed to be
+ * valid since produced from the internal code. Processing stops when meeting
+ * the EOT, which *is* removed. All trailers are processed at once and sent as a
+ * single frame. The ES flag is always set.
+ */
+static size_t h2s_make_trailers(struct h2s *h2s, struct htx *htx)
+{
+ struct http_hdr list[global.tune.max_http_hdr];
+ struct h2c *h2c = h2s->h2c;
+ struct htx_blk *blk;
+ struct buffer outbuf;
+ struct buffer *mbuf;
+ enum htx_blk_type type;
+ int ret = 0;
+ int hdr;
+ int idx;
+
+ TRACE_ENTER(H2_EV_TX_FRAME|H2_EV_TX_HDR, h2c->conn, h2s);
+
+ /* get trailers. */
+ hdr = 0;
+ for (blk = htx_get_head_blk(htx); blk; blk = htx_get_next_blk(htx, blk)) {
+ type = htx_get_blk_type(blk);
+
+ if (type == HTX_BLK_UNUSED)
+ continue;
+
+ if (type == HTX_BLK_EOT)
+ break;
+ if (type == HTX_BLK_TLR) {
+ if (unlikely(hdr >= sizeof(list)/sizeof(list[0]) - 1)) {
+ TRACE_ERROR("too many headers", H2_EV_TX_FRAME|H2_EV_TX_HDR|H2_EV_H2S_ERR, h2c->conn, h2s);
+ goto fail;
+ }
+
+ list[hdr].n = htx_get_blk_name(htx, blk);
+ list[hdr].v = htx_get_blk_value(htx, blk);
+ hdr++;
+ }
+ else {
+ TRACE_ERROR("will not encode unexpected htx block", H2_EV_TX_FRAME|H2_EV_TX_HDR|H2_EV_H2S_ERR, h2c->conn, h2s);
+ goto fail;
+ }
+ }
+
+ /* marker for end of trailers */
+ list[hdr].n = ist("");
+
+ mbuf = br_tail(h2c->mbuf);
+ retry:
+ if (!h2_get_buf(h2c, mbuf)) {
+ h2c->flags |= H2_CF_MUX_MALLOC;
+ h2s->flags |= H2_SF_BLK_MROOM;
+ TRACE_STATE("waiting for room in output buffer", H2_EV_TX_FRAME|H2_EV_TX_HDR|H2_EV_H2S_BLK, h2c->conn, h2s);
+ goto end;
+ }
+
+ chunk_reset(&outbuf);
+
+ while (1) {
+ outbuf = b_make(b_tail(mbuf), b_contig_space(mbuf), 0, 0);
+ if (outbuf.size >= 9 || !b_space_wraps(mbuf))
+ break;
+ realign_again:
+ b_slow_realign(mbuf, trash.area, b_data(mbuf));
+ }
+
+ if (outbuf.size < 9)
+ goto full;
+
+ /* len: 0x000000 (fill later), type: 1(HEADERS), flags: ENDH=4,ES=1 */
+ memcpy(outbuf.area, "\x00\x00\x00\x01\x05", 5);
+ write_n32(outbuf.area + 5, h2s->id); // 4 bytes
+ outbuf.data = 9;
+
+ /* encode all headers */
+ for (idx = 0; idx < hdr; idx++) {
+ /* these ones do not exist in H2 or must not appear in
+ * trailers and must be dropped.
+ */
+ if (isteq(list[idx].n, ist("host")) ||
+ isteq(list[idx].n, ist("content-length")) ||
+ isteq(list[idx].n, ist("connection")) ||
+ isteq(list[idx].n, ist("proxy-connection")) ||
+ isteq(list[idx].n, ist("keep-alive")) ||
+ isteq(list[idx].n, ist("upgrade")) ||
+ isteq(list[idx].n, ist("te")) ||
+ isteq(list[idx].n, ist("transfer-encoding")))
+ continue;
+
+ /* Skip all pseudo-headers */
+ if (*(list[idx].n.ptr) == ':')
+ continue;
+
+ if (!h2_encode_header(&outbuf, list[idx].n, list[idx].v, H2_EV_TX_FRAME|H2_EV_TX_HDR,
+ ist(TRC_LOC), __FUNCTION__, h2c, h2s)) {
+ /* output full */
+ if (b_space_wraps(mbuf))
+ goto realign_again;
+ goto full;
+ }
+ }
+
+ if (outbuf.data == 9) {
+ /* here we have a problem, we have nothing to emit (either we
+ * received an empty trailers block followed or we removed its
+ * contents above). Because of this we can't send a HEADERS
+ * frame, so we have to cheat and instead send an empty DATA
+ * frame conveying the ES flag.
+ */
+ outbuf.area[3] = H2_FT_DATA;
+ outbuf.area[4] = H2_F_DATA_END_STREAM;
+ }
+
+ /* update the frame's size */
+ h2_set_frame_size(outbuf.area, outbuf.data - 9);
+
+ if (outbuf.data > h2c->mfs + 9) {
+ if (!h2_fragment_headers(&outbuf, h2c->mfs)) {
+ /* output full */
+ if (b_space_wraps(mbuf))
+ goto realign_again;
+ goto full;
+ }
+ }
+
+ /* commit the H2 response */
+ TRACE_PROTO("sent H2 trailers HEADERS frame", H2_EV_TX_FRAME|H2_EV_TX_HDR|H2_EV_TX_EOI, h2c->conn, h2s);
+ b_add(mbuf, outbuf.data);
+ h2c->flags |= H2_CF_MBUF_HAS_DATA;
+ h2s->flags |= H2_SF_ES_SENT;
+
+ if (h2s->st == H2_SS_OPEN)
+ h2s->st = H2_SS_HLOC;
+ else
+ h2s_close(h2s);
+
+ /* OK we could properly deliver the response */
+ done:
+ /* remove all header blocks till the end and compute the corresponding size. */
+ ret = 0;
+ blk = htx_get_head_blk(htx);
+ while (blk) {
+ type = htx_get_blk_type(blk);
+ ret += htx_get_blksz(blk);
+ blk = htx_remove_blk(htx, blk);
+ /* The removed block is the EOT */
+ if (type == HTX_BLK_EOT)
+ break;
+ }
+
+ end:
+ TRACE_LEAVE(H2_EV_TX_FRAME|H2_EV_TX_HDR, h2c->conn, h2s);
+ return ret;
+ full:
+ if ((mbuf = br_tail_add(h2c->mbuf)) != NULL)
+ goto retry;
+ h2c->flags |= H2_CF_MUX_MFULL;
+ h2s->flags |= H2_SF_BLK_MROOM;
+ ret = 0;
+ TRACE_STATE("mux buffer full", H2_EV_TX_FRAME|H2_EV_TX_HDR|H2_EV_H2S_BLK, h2c->conn, h2s);
+ goto end;
+ fail:
+ /* unparsable HTX messages, too large ones to be produced in the local
+ * list etc go here (unrecoverable errors).
+ */
+ h2s_error(h2s, H2_ERR_INTERNAL_ERROR);
+ ret = 0;
+ goto end;
+}
+
+/* Called from the upper layer, to subscribe <es> to events <event_type>. The
+ * event subscriber <es> is not allowed to change from a previous call as long
+ * as at least one event is still subscribed. The <event_type> must only be a
+ * combination of SUB_RETRY_RECV and SUB_RETRY_SEND. It always returns 0.
+ */
+static int h2_subscribe(struct stconn *sc, int event_type, struct wait_event *es)
+{
+ struct h2s *h2s = __sc_mux_strm(sc);
+ struct h2c *h2c = h2s->h2c;
+
+ TRACE_ENTER(H2_EV_STRM_SEND|H2_EV_STRM_RECV, h2c->conn, h2s);
+
+ BUG_ON(event_type & ~(SUB_RETRY_SEND|SUB_RETRY_RECV));
+ BUG_ON(h2s->subs && h2s->subs != es);
+
+ es->events |= event_type;
+ h2s->subs = es;
+
+ if (event_type & SUB_RETRY_RECV)
+ TRACE_DEVEL("subscribe(recv)", H2_EV_STRM_RECV, h2c->conn, h2s);
+
+ if (event_type & SUB_RETRY_SEND) {
+ TRACE_DEVEL("subscribe(send)", H2_EV_STRM_SEND, h2c->conn, h2s);
+ if (!(h2s->flags & H2_SF_BLK_SFCTL) &&
+ !LIST_INLIST(&h2s->list)) {
+ if (h2s->flags & H2_SF_BLK_MFCTL) {
+ TRACE_DEVEL("Adding to fctl list", H2_EV_STRM_SEND, h2c->conn, h2s);
+ LIST_APPEND(&h2c->fctl_list, &h2s->list);
+ }
+ else {
+ TRACE_DEVEL("Adding to send list", H2_EV_STRM_SEND, h2c->conn, h2s);
+ LIST_APPEND(&h2c->send_list, &h2s->list);
+ }
+ }
+ }
+ TRACE_LEAVE(H2_EV_STRM_SEND|H2_EV_STRM_RECV, h2c->conn, h2s);
+ return 0;
+}
+
+/* Called from the upper layer, to unsubscribe <es> from events <event_type>.
+ * The <es> pointer is not allowed to differ from the one passed to the
+ * subscribe() call. It always returns zero.
+ */
+static int h2_unsubscribe(struct stconn *sc, int event_type, struct wait_event *es)
+{
+ struct h2s *h2s = __sc_mux_strm(sc);
+
+ TRACE_ENTER(H2_EV_STRM_SEND|H2_EV_STRM_RECV, h2s->h2c->conn, h2s);
+
+ BUG_ON(event_type & ~(SUB_RETRY_SEND|SUB_RETRY_RECV));
+ BUG_ON(h2s->subs && h2s->subs != es);
+
+ es->events &= ~event_type;
+ if (!es->events)
+ h2s->subs = NULL;
+
+ if (event_type & SUB_RETRY_RECV)
+ TRACE_DEVEL("unsubscribe(recv)", H2_EV_STRM_RECV, h2s->h2c->conn, h2s);
+
+ if (event_type & SUB_RETRY_SEND) {
+ TRACE_DEVEL("unsubscribe(send)", H2_EV_STRM_SEND, h2s->h2c->conn, h2s);
+ h2s->flags &= ~H2_SF_NOTIFIED;
+ if (!(h2s->flags & (H2_SF_WANT_SHUTR | H2_SF_WANT_SHUTW)))
+ h2_remove_from_list(h2s);
+ }
+
+ TRACE_LEAVE(H2_EV_STRM_SEND|H2_EV_STRM_RECV, h2s->h2c->conn, h2s);
+ return 0;
+}
+
+
+/* Called from the upper layer, to receive data
+ *
+ * The caller is responsible for defragmenting <buf> if necessary. But <flags>
+ * must be tested to know the calling context. If CO_RFL_BUF_FLUSH is set, it
+ * means the caller wants to flush input data (from the mux buffer and the
+ * channel buffer) to be able to use kernel splicing or any kind of mux-to-mux
+ * xfer. If CO_RFL_KEEP_RECV is set, the mux must always subscribe for read
+ * events before giving back. CO_RFL_BUF_WET is set if <buf> is congested with
+ * data scheduled for leaving soon. CO_RFL_BUF_NOT_STUCK is set to instruct the
+ * mux it may optimize the data copy to <buf> if necessary. Otherwise, it should
+ * copy as much data as possible.
+ */
+static size_t h2_rcv_buf(struct stconn *sc, struct buffer *buf, size_t count, int flags)
+{
+ struct h2s *h2s = __sc_mux_strm(sc);
+ struct h2c *h2c = h2s->h2c;
+ struct htx *h2s_htx = NULL;
+ struct htx *buf_htx = NULL;
+ size_t ret = 0;
+
+ TRACE_ENTER(H2_EV_STRM_RECV, h2c->conn, h2s);
+
+ /* transfer possibly pending data to the upper layer */
+ h2s_htx = htx_from_buf(&h2s->rxbuf);
+ if (htx_is_empty(h2s_htx) && !(h2s_htx->flags & HTX_FL_PARSING_ERROR)) {
+ /* Here htx_to_buf() will set buffer data to 0 because
+ * the HTX is empty.
+ */
+ htx_to_buf(h2s_htx, &h2s->rxbuf);
+ goto end;
+ }
+ ret = h2s_htx->data;
+ buf_htx = htx_from_buf(buf);
+
+ /* <buf> is empty and the message is small enough, swap the
+ * buffers. */
+ if (htx_is_empty(buf_htx) && htx_used_space(h2s_htx) <= count) {
+ htx_to_buf(buf_htx, buf);
+ htx_to_buf(h2s_htx, &h2s->rxbuf);
+ b_xfer(buf, &h2s->rxbuf, b_data(&h2s->rxbuf));
+ goto end;
+ }
+
+ htx_xfer_blks(buf_htx, h2s_htx, count, HTX_BLK_UNUSED);
+
+ if (h2s_htx->flags & HTX_FL_PARSING_ERROR) {
+ buf_htx->flags |= HTX_FL_PARSING_ERROR;
+ if (htx_is_empty(buf_htx))
+ se_fl_set(h2s->sd, SE_FL_EOI);
+ }
+ else if (htx_is_empty(h2s_htx)) {
+ buf_htx->flags |= (h2s_htx->flags & HTX_FL_EOM);
+ }
+
+ buf_htx->extra = (h2s_htx->extra ? (h2s_htx->data + h2s_htx->extra) : 0);
+ htx_to_buf(buf_htx, buf);
+ htx_to_buf(h2s_htx, &h2s->rxbuf);
+ ret -= h2s_htx->data;
+
+ end:
+ if (b_data(&h2s->rxbuf))
+ se_fl_set(h2s->sd, SE_FL_RCV_MORE | SE_FL_WANT_ROOM);
+ else {
+ if (!(h2c->flags & H2_CF_IS_BACK) && (h2s->flags & (H2_SF_BODY_TUNNEL|H2_SF_ES_RCVD))) {
+ /* If request ES is reported to the upper layer, it means the
+ * H2S now expects data from the opposite side.
+ */
+ se_expect_data(h2s->sd);
+ }
+
+ se_fl_clr(h2s->sd, SE_FL_RCV_MORE | SE_FL_WANT_ROOM);
+ h2s_propagate_term_flags(h2c, h2s);
+ if (b_size(&h2s->rxbuf)) {
+ b_free(&h2s->rxbuf);
+ offer_buffers(NULL, 1);
+ }
+ }
+
+ if (ret && h2c->dsi == h2s->id) {
+ /* demux is blocking on this stream's buffer */
+ h2c->flags &= ~H2_CF_DEM_SFULL;
+ h2c_restart_reading(h2c, 1);
+ }
+
+ TRACE_LEAVE(H2_EV_STRM_RECV, h2c->conn, h2s);
+ return ret;
+}
+
+
+/* Called from the upper layer, to send data from buffer <buf> for no more than
+ * <count> bytes. Returns the number of bytes effectively sent. Some status
+ * flags may be updated on the stream connector.
+ */
+static size_t h2_snd_buf(struct stconn *sc, struct buffer *buf, size_t count, int flags)
+{
+ struct h2s *h2s = __sc_mux_strm(sc);
+ size_t total = 0;
+ size_t ret;
+ struct htx *htx;
+ struct htx_blk *blk;
+ enum htx_blk_type btype;
+ uint32_t bsize;
+ int32_t idx;
+
+ TRACE_ENTER(H2_EV_H2S_SEND|H2_EV_STRM_SEND, h2s->h2c->conn, h2s);
+
+ /* If we were not just woken because we wanted to send but couldn't,
+ * and there's somebody else that is waiting to send, do nothing,
+ * we will subscribe later and be put at the end of the list
+ */
+ if (!(h2s->flags & H2_SF_NOTIFIED) &&
+ (!LIST_ISEMPTY(&h2s->h2c->send_list) || !LIST_ISEMPTY(&h2s->h2c->fctl_list))) {
+ if (LIST_INLIST(&h2s->list))
+ TRACE_DEVEL("stream already waiting, leaving", H2_EV_H2S_SEND|H2_EV_H2S_BLK, h2s->h2c->conn, h2s);
+ else {
+ TRACE_DEVEL("other streams already waiting, going to the queue and leaving", H2_EV_H2S_SEND|H2_EV_H2S_BLK, h2s->h2c->conn, h2s);
+ h2s->h2c->flags |= H2_CF_WAIT_INLIST;
+ }
+ return 0;
+ }
+ h2s->flags &= ~H2_SF_NOTIFIED;
+
+ if (h2s->h2c->st0 < H2_CS_FRAME_H) {
+ TRACE_DEVEL("connection not ready, leaving", H2_EV_H2S_SEND|H2_EV_H2S_BLK, h2s->h2c->conn, h2s);
+ return 0;
+ }
+
+ if (h2s->h2c->st0 >= H2_CS_ERROR) {
+ se_fl_set(h2s->sd, SE_FL_ERROR);
+ TRACE_DEVEL("connection is in error, leaving in error", H2_EV_H2S_SEND|H2_EV_H2S_BLK|H2_EV_H2S_ERR|H2_EV_STRM_ERR, h2s->h2c->conn, h2s);
+ return 0;
+ }
+
+ htx = htx_from_buf(buf);
+
+ if (!(h2s->flags & H2_SF_OUTGOING_DATA) && count)
+ h2s->flags |= H2_SF_OUTGOING_DATA;
+
+ if (htx->extra && htx->extra != HTX_UNKOWN_PAYLOAD_LENGTH)
+ h2s->flags |= H2_SF_MORE_HTX_DATA;
+ else
+ h2s->flags &= ~H2_SF_MORE_HTX_DATA;
+
+ if (h2s->id == 0) {
+ int32_t id = h2c_get_next_sid(h2s->h2c);
+
+ if (id < 0) {
+ se_fl_set(h2s->sd, SE_FL_ERROR);
+ TRACE_DEVEL("couldn't get a stream ID, leaving in error", H2_EV_H2S_SEND|H2_EV_H2S_BLK|H2_EV_H2S_ERR|H2_EV_STRM_ERR, h2s->h2c->conn, h2s);
+ return 0;
+ }
+
+ eb32_delete(&h2s->by_id);
+ h2s->by_id.key = h2s->id = id;
+ h2s->h2c->max_id = id;
+ h2s->h2c->nb_reserved--;
+ eb32_insert(&h2s->h2c->streams_by_id, &h2s->by_id);
+ }
+
+ while (h2s->st < H2_SS_HLOC && !(h2s->flags & H2_SF_BLK_ANY) &&
+ count && !htx_is_empty(htx)) {
+ idx = htx_get_head(htx);
+ blk = htx_get_blk(htx, idx);
+ btype = htx_get_blk_type(blk);
+ bsize = htx_get_blksz(blk);
+
+ switch (btype) {
+ case HTX_BLK_REQ_SL:
+ /* start-line before headers */
+ ret = h2s_snd_bhdrs(h2s, htx);
+ if (ret > 0) {
+ total += ret;
+ count -= ret;
+ if (ret < bsize)
+ goto done;
+ }
+ break;
+
+ case HTX_BLK_RES_SL:
+ /* start-line before headers */
+ ret = h2s_snd_fhdrs(h2s, htx);
+ if (ret > 0) {
+ total += ret;
+ count -= ret;
+ if (ret < bsize)
+ goto done;
+ }
+ break;
+
+ case HTX_BLK_DATA:
+ /* all these cause the emission of a DATA frame (possibly empty) */
+ if (!(h2s->h2c->flags & H2_CF_IS_BACK) &&
+ (h2s->flags & (H2_SF_BODY_TUNNEL|H2_SF_BODYLESS_RESP)) == H2_SF_BODYLESS_RESP)
+ ret = h2s_skip_data(h2s, buf, count);
+ else
+ ret = h2s_make_data(h2s, buf, count);
+ if (ret > 0) {
+ htx = htx_from_buf(buf);
+ total += ret;
+ count -= ret;
+ if (ret < bsize)
+ goto done;
+ }
+ break;
+
+ case HTX_BLK_TLR:
+ case HTX_BLK_EOT:
+ /* This is the first trailers block, all the subsequent ones */
+ ret = h2s_make_trailers(h2s, htx);
+ if (ret > 0) {
+ total += ret;
+ count -= ret;
+ if (ret < bsize)
+ goto done;
+ }
+ break;
+
+ default:
+ htx_remove_blk(htx, blk);
+ total += bsize;
+ count -= bsize;
+ break;
+ }
+ }
+
+ done:
+ if (h2s->st >= H2_SS_HLOC) {
+ /* trim any possibly pending data after we close (extra CR-LF,
+ * unprocessed trailers, abnormal extra data, ...)
+ */
+ total += count;
+ count = 0;
+ }
+
+ /* RST are sent similarly to frame acks */
+ if (h2s->st == H2_SS_ERROR || h2s->flags & H2_SF_RST_RCVD) {
+ TRACE_DEVEL("reporting RST/error to the app-layer stream", H2_EV_H2S_SEND|H2_EV_H2S_ERR|H2_EV_STRM_ERR, h2s->h2c->conn, h2s);
+ se_fl_set_error(h2s->sd);
+ if (h2s_send_rst_stream(h2s->h2c, h2s) > 0)
+ h2s_close(h2s);
+ }
+
+ htx_to_buf(htx, buf);
+
+ if (total > 0) {
+ if (!(h2s->h2c->wait_event.events & SUB_RETRY_SEND)) {
+ TRACE_DEVEL("data queued, waking up h2c sender", H2_EV_H2S_SEND|H2_EV_H2C_SEND, h2s->h2c->conn, h2s);
+ if (h2_send(h2s->h2c))
+ tasklet_wakeup(h2s->h2c->wait_event.tasklet);
+ }
+
+ }
+ /* If we're waiting for flow control, and we got a shutr on the
+ * connection, we will never be unlocked, so add an error on
+ * the stream connector.
+ */
+ if ((h2s->h2c->flags & H2_CF_RCVD_SHUT) &&
+ !b_data(&h2s->h2c->dbuf) &&
+ (h2s->flags & (H2_SF_BLK_SFCTL | H2_SF_BLK_MFCTL))) {
+ TRACE_DEVEL("fctl with shutr, reporting error to app-layer", H2_EV_H2S_SEND|H2_EV_STRM_SEND|H2_EV_STRM_ERR, h2s->h2c->conn, h2s);
+ se_fl_set_error(h2s->sd);
+ }
+
+ if (total > 0 && !(h2s->flags & H2_SF_BLK_SFCTL) &&
+ !(h2s->flags & (H2_SF_WANT_SHUTR|H2_SF_WANT_SHUTW))) {
+ /* Ok we managed to send something, leave the send_list if we were still there */
+ h2_remove_from_list(h2s);
+ TRACE_DEVEL("Removed from h2s list", H2_EV_H2S_SEND|H2_EV_H2C_SEND, h2s->h2c->conn, h2s);
+ }
+
+ TRACE_LEAVE(H2_EV_H2S_SEND|H2_EV_STRM_SEND, h2s->h2c->conn, h2s);
+ return total;
+}
+
+static size_t h2_nego_ff(struct stconn *sc, struct buffer *input, size_t count, unsigned int may_splice)
+{
+ struct h2s *h2s = __sc_mux_strm(sc);
+ struct h2c *h2c = h2s->h2c;
+ struct buffer *mbuf;
+ size_t sz , ret = 0;
+
+ TRACE_ENTER(H2_EV_H2S_SEND|H2_EV_STRM_SEND, h2s->h2c->conn, h2s);
+
+ /* If we were not just woken because we wanted to send but couldn't,
+ * and there's somebody else that is waiting to send, do nothing,
+ * we will subscribe later and be put at the end of the list
+ *
+ * WARNING: h2_done_ff() is responsible to remove H2_SF_NOTIFIED flags
+ * depending on iobuf flags.
+ */
+ if (!(h2s->flags & H2_SF_NOTIFIED) &&
+ (!LIST_ISEMPTY(&h2c->send_list) || !LIST_ISEMPTY(&h2c->fctl_list))) {
+ if (LIST_INLIST(&h2s->list))
+ TRACE_DEVEL("stream already waiting, leaving", H2_EV_H2S_SEND|H2_EV_H2S_BLK, h2s->h2c->conn, h2s);
+ else {
+ TRACE_DEVEL("other streams already waiting, going to the queue and leaving", H2_EV_H2S_SEND|H2_EV_H2S_BLK, h2s->h2c->conn, h2s);
+ h2s->h2c->flags |= H2_CF_WAIT_INLIST;
+ }
+ h2s->sd->iobuf.flags |= IOBUF_FL_FF_BLOCKED;
+ goto end;
+ }
+
+ if (h2s_mws(h2s) <= 0) {
+ h2s->flags |= H2_SF_BLK_SFCTL;
+ if (LIST_INLIST(&h2s->list))
+ LIST_DEL_INIT(&h2s->list);
+ LIST_APPEND(&h2c->blocked_list, &h2s->list);
+ h2s->sd->iobuf.flags |= IOBUF_FL_FF_BLOCKED;
+ TRACE_STATE("stream window <=0, flow-controlled", H2_EV_H2S_SEND|H2_EV_H2S_FCTL, h2c->conn, h2s);
+ goto end;
+ }
+ if (h2c->mws <= 0) {
+ h2s->flags |= H2_SF_BLK_MFCTL;
+ h2s->sd->iobuf.flags |= IOBUF_FL_FF_BLOCKED;
+ TRACE_STATE("connection window <=0, stream flow-controlled", H2_EV_H2S_SEND|H2_EV_H2C_FCTL, h2c->conn, h2s);
+ goto end;
+ }
+
+ sz = count;
+ if (sz > h2s_mws(h2s))
+ sz = h2s_mws(h2s);
+ if (h2c->mfs && sz > h2c->mfs)
+ sz = h2c->mfs; // >0
+ if (sz > h2c->mws)
+ sz = h2c->mws;
+
+ if (count > sz)
+ count = sz;
+
+ mbuf = br_tail(h2c->mbuf);
+ retry:
+ if (br_count(h2c->mbuf) > h2c->nb_streams) {
+ /* more buffers than streams allocated, pointless
+ * to continue, we'd use more RAM for no reason.
+ */
+ h2s->flags |= H2_SF_BLK_MROOM;
+ h2s->sd->iobuf.flags |= IOBUF_FL_FF_BLOCKED;
+ TRACE_STATE("waiting for room in output buffer", H2_EV_TX_FRAME|H2_EV_TX_DATA|H2_EV_H2S_BLK, h2c->conn, h2s);
+ goto end;
+ }
+
+ if (!h2_get_buf(h2c, mbuf)) {
+ h2c->flags |= H2_CF_MUX_MALLOC;
+ h2s->flags |= H2_SF_BLK_MROOM;
+ h2s->sd->iobuf.flags |= IOBUF_FL_FF_BLOCKED;
+ TRACE_STATE("waiting for room in output buffer", H2_EV_H2S_SEND|H2_EV_H2S_BLK, h2c->conn, h2s);
+ goto end;
+ }
+
+ if (b_room(mbuf) < sz && b_room(mbuf) < b_size(mbuf) / 4) {
+ if ((mbuf = br_tail_add(h2c->mbuf)) != NULL)
+ goto retry;
+ h2c->flags |= H2_CF_MUX_MFULL;
+ h2s->flags |= H2_SF_BLK_MROOM;
+ h2s->sd->iobuf.flags |= IOBUF_FL_FF_BLOCKED;
+ TRACE_STATE("too large data present in output buffer, waiting for emptiness", H2_EV_H2S_SEND|H2_EV_H2S_BLK, h2c->conn, h2s);
+ goto end;
+ }
+
+ while (1) {
+ if (b_contig_space(mbuf) >= 9 || !b_space_wraps(mbuf))
+ break;
+ b_slow_realign(mbuf, trash.area, b_data(mbuf));
+ }
+
+ if (b_contig_space(mbuf) <= 9) {
+ if ((mbuf = br_tail_add(h2c->mbuf)) != NULL)
+ goto retry;
+ h2c->flags |= H2_CF_MUX_MFULL;
+ h2s->flags |= H2_SF_BLK_MROOM;
+ h2s->sd->iobuf.flags |= IOBUF_FL_FF_BLOCKED;
+ TRACE_STATE("output buffer full", H2_EV_H2S_SEND|H2_EV_H2S_BLK, h2c->conn, h2s);
+ goto end;
+ }
+
+ /* Cannot forward more than available room in output buffer */
+ sz = b_contig_space(mbuf) - 9;
+ if (count > sz)
+ count = sz;
+
+ /* len: 0x000000 (fill later), type: 0(DATA), flags: none=0 */
+ memcpy(b_tail(mbuf), "\x00\x00\x00\x00\x00", 5);
+ write_n32(b_tail(mbuf) + 5, h2s->id); // 4 bytes
+
+ h2s->sd->iobuf.buf = mbuf;
+ h2s->sd->iobuf.offset = 9;
+ h2s->sd->iobuf.data = 0;
+
+ /* forward remaining input data */
+ if (b_data(input)) {
+ size_t xfer = count;
+
+ if (xfer > b_data(input))
+ xfer = b_data(input);
+ b_add(mbuf, 9);
+ h2s->sd->iobuf.data = b_xfer(mbuf, input, xfer);
+ b_sub(mbuf, 9);
+
+ /* Cannot forward more data, wait for room */
+ if (b_data(input))
+ goto end;
+ }
+
+ ret = count - h2s->sd->iobuf.data;
+ end:
+ if (h2s->sd->iobuf.flags & IOBUF_FL_FF_BLOCKED)
+ h2s->flags &= ~H2_SF_NOTIFIED;
+ TRACE_LEAVE(H2_EV_H2S_SEND|H2_EV_STRM_SEND, h2s->h2c->conn, h2s);
+ return ret;
+}
+
+static size_t h2_done_ff(struct stconn *sc)
+{
+ struct h2s *h2s = __sc_mux_strm(sc);
+ struct h2c *h2c = h2s->h2c;
+ struct sedesc *sd = h2s->sd;
+ struct buffer *mbuf;
+ char *head;
+ size_t total = 0;
+
+ TRACE_ENTER(H2_EV_H2S_SEND|H2_EV_STRM_SEND, h2s->h2c->conn, h2s);
+
+ mbuf = sd->iobuf.buf;
+ if (!mbuf)
+ goto end;
+ head = b_peek(mbuf, b_data(mbuf) - sd->iobuf.data);
+
+ if (sd->iobuf.flags & IOBUF_FL_EOI)
+ h2s->flags &= ~H2_SF_MORE_HTX_DATA;
+
+ if (!(sd->iobuf.flags & IOBUF_FL_FF_BLOCKED) &&
+ !(h2s->flags & H2_SF_BLK_SFCTL) &&
+ !(h2s->flags & (H2_SF_WANT_SHUTR|H2_SF_WANT_SHUTW))) {
+ /* Ok we managed to send something, leave the send_list if we were still there */
+ h2_remove_from_list(h2s);
+ }
+
+ if (!sd->iobuf.data)
+ goto end;
+
+ /* Perform a synchronous send but in all cases, consider
+ * everything was already sent from the SC point of view.
+ */
+ total = sd->iobuf.data;
+ h2_set_frame_size(head, total);
+ b_add(mbuf, 9);
+ h2s->sws -= total;
+ h2c->mws -= total;
+ if (h2_send(h2s->h2c))
+ tasklet_wakeup(h2s->h2c->wait_event.tasklet);
+
+ end:
+ sd->iobuf.buf = NULL;
+ sd->iobuf.offset = 0;
+ sd->iobuf.data = 0;
+
+ if (!(sd->iobuf.flags & IOBUF_FL_INTERIM_FF))
+ h2s->flags &= ~H2_SF_NOTIFIED;
+
+ TRACE_LEAVE(H2_EV_H2S_SEND|H2_EV_STRM_SEND, h2s->h2c->conn, h2s);
+ return total;
+}
+
+static int h2_resume_ff(struct stconn *sc, unsigned int flags)
+{
+ return 0;
+}
+
+/* appends some info about stream <h2s> to buffer <msg>, or does nothing if
+ * <h2s> is NULL. Returns non-zero if the stream is considered suspicious. May
+ * emit multiple lines, each new one being prefixed with <pfx>, if <pfx> is not
+ * NULL, otherwise a single line is used.
+ */
+static int h2_dump_h2s_info(struct buffer *msg, const struct h2s *h2s, const char *pfx)
+{
+ int ret = 0;
+
+ if (!h2s)
+ return ret;
+
+ chunk_appendf(msg, " h2s.id=%d .st=%s .flg=0x%04x .rxbuf=%u@%p+%u/%u",
+ h2s->id, h2s_st_to_str(h2s->st), h2s->flags,
+ (unsigned int)b_data(&h2s->rxbuf), b_orig(&h2s->rxbuf),
+ (unsigned int)b_head_ofs(&h2s->rxbuf), (unsigned int)b_size(&h2s->rxbuf));
+
+ if (pfx)
+ chunk_appendf(msg, "\n%s", pfx);
+
+ chunk_appendf(msg, " .sc=%p", h2s_sc(h2s));
+ if (h2s_sc(h2s))
+ chunk_appendf(msg, "(.flg=0x%08x .app=%p)",
+ h2s_sc(h2s)->flags, h2s_sc(h2s)->app);
+
+ chunk_appendf(msg, " .sd=%p", h2s->sd);
+ chunk_appendf(msg, "(.flg=0x%08x)", se_fl_get(h2s->sd));
+
+ if (pfx)
+ chunk_appendf(msg, "\n%s", pfx);
+
+ chunk_appendf(msg, " .subs=%p", h2s->subs);
+ if (h2s->subs) {
+ chunk_appendf(msg, "(ev=%d tl=%p", h2s->subs->events, h2s->subs->tasklet);
+ chunk_appendf(msg, " tl.calls=%d tl.ctx=%p tl.fct=",
+ h2s->subs->tasklet->calls,
+ h2s->subs->tasklet->context);
+ if (h2s->subs->tasklet->calls >= 1000000)
+ ret = 1;
+ resolve_sym_name(msg, NULL, h2s->subs->tasklet->process);
+ chunk_appendf(msg, ")");
+ }
+ return ret;
+}
+
+/* appends some info about connection <h2c> to buffer <msg>, or does nothing if
+ * <h2c> is NULL. Returns non-zero if the connection is considered suspicious.
+ * May emit multiple lines, each new one being prefixed with <pfx>, if <pfx> is
+ * not NULL, otherwise a single line is used.
+ */
+static int h2_dump_h2c_info(struct buffer *msg, struct h2c *h2c, const char *pfx)
+{
+ const struct buffer *hmbuf, *tmbuf;
+ const struct h2s *h2s = NULL;
+ struct eb32_node *node;
+ int fctl_cnt = 0;
+ int send_cnt = 0;
+ int tree_cnt = 0;
+ int orph_cnt = 0;
+ int ret = 0;
+
+ if (!h2c)
+ return ret;
+
+ list_for_each_entry(h2s, &h2c->fctl_list, list)
+ fctl_cnt++;
+
+ list_for_each_entry(h2s, &h2c->send_list, list)
+ send_cnt++;
+
+ node = eb32_first(&h2c->streams_by_id);
+ while (node) {
+ h2s = container_of(node, struct h2s, by_id);
+ tree_cnt++;
+ if (!h2s_sc(h2s))
+ orph_cnt++;
+ node = eb32_next(node);
+ }
+
+ hmbuf = br_head(h2c->mbuf);
+ tmbuf = br_tail(h2c->mbuf);
+ chunk_appendf(msg, " h2c.st0=%s .err=%d .maxid=%d .lastid=%d .flg=0x%04x"
+ " .nbst=%u .nbsc=%u",
+ h2c_st_to_str(h2c->st0), h2c->errcode, h2c->max_id, h2c->last_sid, h2c->flags,
+ h2c->nb_streams, h2c->nb_sc);
+
+ if (pfx)
+ chunk_appendf(msg, "\n%s", pfx);
+
+ chunk_appendf(msg, " .fctl_cnt=%d .send_cnt=%d .tree_cnt=%d"
+ " .orph_cnt=%d .sub=%d .dsi=%d .dbuf=%u@%p+%u/%u",
+ fctl_cnt, send_cnt, tree_cnt, orph_cnt,
+ h2c->wait_event.events, h2c->dsi,
+ (unsigned int)b_data(&h2c->dbuf), b_orig(&h2c->dbuf),
+ (unsigned int)b_head_ofs(&h2c->dbuf), (unsigned int)b_size(&h2c->dbuf));
+
+ if (pfx)
+ chunk_appendf(msg, "\n%s", pfx);
+
+ chunk_appendf(msg, " .mbuf=[%u..%u|%u],h=[%u@%p+%u/%u],t=[%u@%p+%u/%u]",
+ br_head_idx(h2c->mbuf), br_tail_idx(h2c->mbuf), br_size(h2c->mbuf),
+ (unsigned int)b_data(hmbuf), b_orig(hmbuf),
+ (unsigned int)b_head_ofs(hmbuf), (unsigned int)b_size(hmbuf),
+ (unsigned int)b_data(tmbuf), b_orig(tmbuf),
+ (unsigned int)b_head_ofs(tmbuf), (unsigned int)b_size(tmbuf));
+
+ chunk_appendf(msg, " .task=%p", h2c->task);
+ if (h2c->task) {
+ chunk_appendf(msg, " .exp=%s",
+ h2c->task->expire ? tick_is_expired(h2c->task->expire, now_ms) ? "<PAST>" :
+ human_time(TICKS_TO_MS(h2c->task->expire - now_ms), TICKS_TO_MS(1000)) : "<NEVER>");
+ }
+
+ return ret;
+}
+
+/* for debugging with CLI's "show fd" command */
+static int h2_show_fd(struct buffer *msg, struct connection *conn)
+{
+ struct h2c *h2c = conn->ctx;
+ const struct h2s *h2s;
+ struct eb32_node *node;
+ int ret = 0;
+
+ if (!h2c)
+ return ret;
+
+ ret |= h2_dump_h2c_info(msg, h2c, NULL);
+
+ node = eb32_last(&h2c->streams_by_id);
+ if (node) {
+ h2s = container_of(node, struct h2s, by_id);
+ chunk_appendf(msg, " last_h2s=%p", h2s);
+ ret |= h2_dump_h2s_info(msg, h2s, NULL);
+ }
+
+ return ret;
+}
+
+/* for debugging with CLI's "show sess" command. May emit multiple lines, each
+ * new one being prefixed with <pfx>, if <pfx> is not NULL, otherwise a single
+ * line is used. Each field starts with a space so it's safe to print it after
+ * existing fields.
+ */
+static int h2_show_sd(struct buffer *msg, struct sedesc *sd, const char *pfx)
+{
+ struct h2s *h2s = sd->se;
+ int ret = 0;
+
+ if (!h2s)
+ return ret;
+
+ chunk_appendf(msg, " h2s=%p", h2s);
+ ret |= h2_dump_h2s_info(msg, h2s, pfx);
+ if (pfx)
+ chunk_appendf(msg, "\n%s", pfx);
+ chunk_appendf(msg, " h2c=%p", h2s->h2c);
+ ret |= h2_dump_h2c_info(msg, h2s->h2c, pfx);
+ return ret;
+}
+
+/* Migrate the the connection to the current thread.
+ * Return 0 if successful, non-zero otherwise.
+ * Expected to be called with the old thread lock held.
+ */
+static int h2_takeover(struct connection *conn, int orig_tid)
+{
+ struct h2c *h2c = conn->ctx;
+ struct task *task;
+ struct task *new_task;
+ struct tasklet *new_tasklet;
+
+ /* Pre-allocate tasks so that we don't have to roll back after the xprt
+ * has been migrated.
+ */
+ new_task = task_new_here();
+ new_tasklet = tasklet_new();
+ if (!new_task || !new_tasklet)
+ goto fail;
+
+ if (fd_takeover(conn->handle.fd, conn) != 0)
+ goto fail;
+
+ if (conn->xprt->takeover && conn->xprt->takeover(conn, conn->xprt_ctx, orig_tid) != 0) {
+ /* We failed to takeover the xprt, even if the connection may
+ * still be valid, flag it as error'd, as we have already
+ * taken over the fd, and wake the tasklet, so that it will
+ * destroy it.
+ */
+ conn->flags |= CO_FL_ERROR;
+ tasklet_wakeup_on(h2c->wait_event.tasklet, orig_tid);
+ goto fail;
+ }
+
+ if (h2c->wait_event.events)
+ h2c->conn->xprt->unsubscribe(h2c->conn, h2c->conn->xprt_ctx,
+ h2c->wait_event.events, &h2c->wait_event);
+
+ task = h2c->task;
+ if (task) {
+ /* only assign a task if there was already one, otherwise
+ * the preallocated new task will be released.
+ */
+ task->context = NULL;
+ h2c->task = NULL;
+ __ha_barrier_store();
+ task_kill(task);
+
+ h2c->task = new_task;
+ new_task = NULL;
+ h2c->task->process = h2_timeout_task;
+ h2c->task->context = h2c;
+ }
+
+ /* To let the tasklet know it should free itself, and do nothing else,
+ * set its context to NULL.
+ */
+ h2c->wait_event.tasklet->context = NULL;
+ tasklet_wakeup_on(h2c->wait_event.tasklet, orig_tid);
+
+ h2c->wait_event.tasklet = new_tasklet;
+ h2c->wait_event.tasklet->process = h2_io_cb;
+ h2c->wait_event.tasklet->context = h2c;
+ h2c->conn->xprt->subscribe(h2c->conn, h2c->conn->xprt_ctx,
+ SUB_RETRY_RECV, &h2c->wait_event);
+
+ if (new_task)
+ __task_free(new_task);
+ return 0;
+ fail:
+ if (new_task)
+ __task_free(new_task);
+ tasklet_free(new_tasklet);
+ return -1;
+}
+
+/*******************************************************/
+/* functions below are dedicated to the config parsers */
+/*******************************************************/
+
+/* config parser for global "tune.h2.header-table-size" */
+static int h2_parse_header_table_size(char **args, int section_type, struct proxy *curpx,
+ const struct proxy *defpx, const char *file, int line,
+ char **err)
+{
+ if (too_many_args(1, args, err, NULL))
+ return -1;
+
+ h2_settings_header_table_size = atoi(args[1]);
+ if (h2_settings_header_table_size < 4096 || h2_settings_header_table_size > 65536) {
+ memprintf(err, "'%s' expects a numeric value between 4096 and 65536.", args[0]);
+ return -1;
+ }
+ return 0;
+}
+
+/* config parser for global "tune.h2.{be.,fe.,}initial-window-size" */
+static int h2_parse_initial_window_size(char **args, int section_type, struct proxy *curpx,
+ const struct proxy *defpx, const char *file, int line,
+ char **err)
+{
+ int *vptr;
+
+ if (too_many_args(1, args, err, NULL))
+ return -1;
+
+ /* backend/frontend/default */
+ vptr = (args[0][8] == 'b') ? &h2_be_settings_initial_window_size :
+ (args[0][8] == 'f') ? &h2_fe_settings_initial_window_size :
+ &h2_settings_initial_window_size;
+
+ *vptr = atoi(args[1]);
+ if (*vptr < 0) {
+ memprintf(err, "'%s' expects a positive numeric value.", args[0]);
+ return -1;
+ }
+ return 0;
+}
+
+/* config parser for global "tune.h2.{be.,fe.,}max-concurrent-streams" */
+static int h2_parse_max_concurrent_streams(char **args, int section_type, struct proxy *curpx,
+ const struct proxy *defpx, const char *file, int line,
+ char **err)
+{
+ uint *vptr;
+
+ if (too_many_args(1, args, err, NULL))
+ return -1;
+
+ /* backend/frontend/default */
+ vptr = (args[0][8] == 'b') ? &h2_be_settings_max_concurrent_streams :
+ (args[0][8] == 'f') ? &h2_fe_settings_max_concurrent_streams :
+ &h2_settings_max_concurrent_streams;
+
+ *vptr = atoi(args[1]);
+ if ((int)*vptr < 0) {
+ memprintf(err, "'%s' expects a positive numeric value.", args[0]);
+ return -1;
+ }
+ return 0;
+}
+
+/* config parser for global "tune.h2.fe.max-total-streams" */
+static int h2_parse_max_total_streams(char **args, int section_type, struct proxy *curpx,
+ const struct proxy *defpx, const char *file, int line,
+ char **err)
+{
+ uint *vptr;
+
+ if (too_many_args(1, args, err, NULL))
+ return -1;
+
+ /* frontend only for now */
+ vptr = &h2_fe_max_total_streams;
+
+ *vptr = atoi(args[1]);
+ if ((int)*vptr < 0) {
+ memprintf(err, "'%s' expects a positive numeric value.", args[0]);
+ return -1;
+ }
+ return 0;
+}
+
+/* config parser for global "tune.h2.max-frame-size" */
+static int h2_parse_max_frame_size(char **args, int section_type, struct proxy *curpx,
+ const struct proxy *defpx, const char *file, int line,
+ char **err)
+{
+ if (too_many_args(1, args, err, NULL))
+ return -1;
+
+ h2_settings_max_frame_size = atoi(args[1]);
+ if (h2_settings_max_frame_size < 16384 || h2_settings_max_frame_size > 16777215) {
+ memprintf(err, "'%s' expects a numeric value between 16384 and 16777215.", args[0]);
+ return -1;
+ }
+ return 0;
+}
+
+
+/* config parser for global "tune.h2.zero-copy-fwd-send" */
+static int h2_parse_zero_copy_fwd_snd(char **args, int section_type, struct proxy *curpx,
+ const struct proxy *defpx, const char *file, int line,
+ char **err)
+{
+ if (too_many_args(1, args, err, NULL))
+ return -1;
+
+ if (strcmp(args[1], "on") == 0)
+ global.tune.no_zero_copy_fwd &= ~NO_ZERO_COPY_FWD_H2_SND;
+ else if (strcmp(args[1], "off") == 0)
+ global.tune.no_zero_copy_fwd |= NO_ZERO_COPY_FWD_H2_SND;
+ else {
+ memprintf(err, "'%s' expects 'on' or 'off'.", args[0]);
+ return -1;
+ }
+ return 0;
+}
+
+/****************************************/
+/* MUX initialization and instantiation */
+/***************************************/
+
+/* The mux operations */
+static const struct mux_ops h2_ops = {
+ .init = h2_init,
+ .wake = h2_wake,
+ .snd_buf = h2_snd_buf,
+ .rcv_buf = h2_rcv_buf,
+ .nego_fastfwd = h2_nego_ff,
+ .done_fastfwd = h2_done_ff,
+ .resume_fastfwd = h2_resume_ff,
+ .subscribe = h2_subscribe,
+ .unsubscribe = h2_unsubscribe,
+ .attach = h2_attach,
+ .get_first_sc = h2_get_first_sc,
+ .detach = h2_detach,
+ .destroy = h2_destroy,
+ .avail_streams = h2_avail_streams,
+ .used_streams = h2_used_streams,
+ .shutr = h2_shutr,
+ .shutw = h2_shutw,
+ .ctl = h2_ctl,
+ .sctl = h2_sctl,
+ .show_fd = h2_show_fd,
+ .show_sd = h2_show_sd,
+ .takeover = h2_takeover,
+ .flags = MX_FL_HTX|MX_FL_HOL_RISK|MX_FL_NO_UPG|MX_FL_REVERSABLE,
+ .name = "H2",
+};
+
+static struct mux_proto_list mux_proto_h2 =
+ { .token = IST("h2"), .mode = PROTO_MODE_HTTP, .side = PROTO_SIDE_BOTH, .mux = &h2_ops };
+
+INITCALL1(STG_REGISTER, register_mux_proto, &mux_proto_h2);
+
+/* config keyword parsers */
+static struct cfg_kw_list cfg_kws = {ILH, {
+ { CFG_GLOBAL, "tune.h2.be.initial-window-size", h2_parse_initial_window_size },
+ { CFG_GLOBAL, "tune.h2.be.max-concurrent-streams", h2_parse_max_concurrent_streams },
+ { CFG_GLOBAL, "tune.h2.fe.initial-window-size", h2_parse_initial_window_size },
+ { CFG_GLOBAL, "tune.h2.fe.max-concurrent-streams", h2_parse_max_concurrent_streams },
+ { CFG_GLOBAL, "tune.h2.fe.max-total-streams", h2_parse_max_total_streams },
+ { CFG_GLOBAL, "tune.h2.header-table-size", h2_parse_header_table_size },
+ { CFG_GLOBAL, "tune.h2.initial-window-size", h2_parse_initial_window_size },
+ { CFG_GLOBAL, "tune.h2.max-concurrent-streams", h2_parse_max_concurrent_streams },
+ { CFG_GLOBAL, "tune.h2.max-frame-size", h2_parse_max_frame_size },
+ { CFG_GLOBAL, "tune.h2.zero-copy-fwd-send", h2_parse_zero_copy_fwd_snd },
+ { 0, NULL, NULL }
+}};
+
+INITCALL1(STG_REGISTER, cfg_register_keywords, &cfg_kws);
+
+/* initialize internal structs after the config is parsed.
+ * Returns zero on success, non-zero on error.
+ */
+static int init_h2()
+{
+ pool_head_hpack_tbl = create_pool("hpack_tbl",
+ h2_settings_header_table_size,
+ MEM_F_SHARED|MEM_F_EXACT);
+ if (!pool_head_hpack_tbl) {
+ ha_alert("failed to allocate hpack_tbl memory pool\n");
+ return (ERR_ALERT | ERR_FATAL);
+ }
+ return ERR_NONE;
+}
+
+REGISTER_POST_CHECK(init_h2);
diff --git a/src/mux_pt.c b/src/mux_pt.c
new file mode 100644
index 0000000..3cca6a1
--- /dev/null
+++ b/src/mux_pt.c
@@ -0,0 +1,904 @@
+/*
+ * Pass-through mux-demux for connections
+ *
+ * Copyright 2017 Willy Tarreau <w@1wt.eu>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <haproxy/api.h>
+#include <haproxy/buf.h>
+#include <haproxy/cfgparse.h>
+#include <haproxy/connection.h>
+#include <haproxy/pipe.h>
+#include <haproxy/stconn.h>
+#include <haproxy/stream.h>
+#include <haproxy/task.h>
+#include <haproxy/trace.h>
+#include <haproxy/xref.h>
+
+struct mux_pt_ctx {
+ struct sedesc *sd;
+ struct connection *conn;
+ struct wait_event wait_event;
+};
+
+DECLARE_STATIC_POOL(pool_head_pt_ctx, "mux_pt", sizeof(struct mux_pt_ctx));
+
+/* trace source and events */
+static void pt_trace(enum trace_level level, uint64_t mask,
+ const struct trace_source *src,
+ const struct ist where, const struct ist func,
+ const void *a1, const void *a2, const void *a3, const void *a4);
+
+/* The event representation is split like this :
+ * pt_ctx - internal PT context
+ * strm - application layer
+ */
+static const struct trace_event pt_trace_events[] = {
+#define PT_EV_CONN_NEW (1ULL << 0)
+ { .mask = PT_EV_CONN_NEW, .name = "pt_conn_new", .desc = "new PT connection" },
+#define PT_EV_CONN_WAKE (1ULL << 1)
+ { .mask = PT_EV_CONN_WAKE, .name = "pt_conn_wake", .desc = "PT connection woken up" },
+#define PT_EV_CONN_END (1ULL << 2)
+ { .mask = PT_EV_CONN_END, .name = "pt_conn_end", .desc = "PT connection terminated" },
+#define PT_EV_CONN_ERR (1ULL << 3)
+ { .mask = PT_EV_CONN_ERR, .name = "pt_conn_err", .desc = "error on PT connection" },
+#define PT_EV_STRM_NEW (1ULL << 4)
+ { .mask = PT_EV_STRM_NEW, .name = "strm_new", .desc = "app-layer stream creation" },
+#define PT_EV_STRM_SHUT (1ULL << 5)
+ { .mask = PT_EV_STRM_SHUT, .name = "strm_shut", .desc = "stream shutdown" },
+#define PT_EV_STRM_END (1ULL << 6)
+ { .mask = PT_EV_STRM_END, .name = "strm_end", .desc = "detaching app-layer stream" },
+#define PT_EV_STRM_ERR (1ULL << 7)
+ { .mask = PT_EV_STRM_ERR, .name = "strm_err", .desc = "stream error" },
+#define PT_EV_RX_DATA (1ULL << 8)
+ { .mask = PT_EV_RX_DATA, .name = "pt_rx_data", .desc = "Rx on PT connection" },
+#define PT_EV_TX_DATA (1ULL << 9)
+ { .mask = PT_EV_TX_DATA, .name = "pt_tx_data", .desc = "Tx on PT connection" },
+
+ {}
+};
+
+
+static const struct name_desc pt_trace_decoding[] = {
+#define PT_VERB_CLEAN 1
+ { .name="clean", .desc="only user-friendly stuff, generally suitable for level \"user\"" },
+#define PT_VERB_MINIMAL 2
+ { .name="minimal", .desc="report only h1c/h1s state and flags, no real decoding" },
+#define PT_VERB_SIMPLE 3
+ { .name="simple", .desc="add request/response status line or htx info when available" },
+#define PT_VERB_ADVANCED 4
+ { .name="advanced", .desc="add header fields or frame decoding when available" },
+#define PT_VERB_COMPLETE 5
+ { .name="complete", .desc="add full data dump when available" },
+ { /* end */ }
+};
+
+static struct trace_source trace_pt __read_mostly = {
+ .name = IST("pt"),
+ .desc = "Passthrough multiplexer",
+ .arg_def = TRC_ARG1_CONN, // TRACE()'s first argument is always a connection
+ .default_cb = pt_trace,
+ .known_events = pt_trace_events,
+ .lockon_args = NULL,
+ .decoding = pt_trace_decoding,
+ .report_events = ~0, // report everything by default
+};
+
+#define TRACE_SOURCE &trace_pt
+INITCALL1(STG_REGISTER, trace_register_source, TRACE_SOURCE);
+
+/* returns the stconn associated to the stream */
+static forceinline struct stconn *pt_sc(const struct mux_pt_ctx *pt)
+{
+ return pt->sd->sc;
+}
+
+static inline void pt_trace_buf(const struct buffer *buf, size_t ofs, size_t len)
+{
+ size_t block1, block2;
+ int line, ptr, newptr;
+
+ block1 = b_contig_data(buf, ofs);
+ block2 = 0;
+ if (block1 > len)
+ block1 = len;
+ block2 = len - block1;
+
+ ofs = b_peek_ofs(buf, ofs);
+
+ line = 0;
+ ptr = ofs;
+ while (ptr < ofs + block1) {
+ newptr = dump_text_line(&trace_buf, b_orig(buf), b_size(buf), ofs + block1, &line, ptr);
+ if (newptr == ptr)
+ break;
+ ptr = newptr;
+ }
+
+ line = ptr = 0;
+ while (ptr < block2) {
+ newptr = dump_text_line(&trace_buf, b_orig(buf), b_size(buf), block2, &line, ptr);
+ if (newptr == ptr)
+ break;
+ ptr = newptr;
+ }
+}
+
+/* the PT traces always expect that arg1, if non-null, is of type connection
+ * (from which we can derive the pt context), that arg2, if non-null, is a
+ * stream connector, and that arg3, if non-null, is a buffer.
+ */
+static void pt_trace(enum trace_level level, uint64_t mask, const struct trace_source *src,
+ const struct ist where, const struct ist func,
+ const void *a1, const void *a2, const void *a3, const void *a4)
+{
+ const struct connection *conn = a1;
+ const struct mux_pt_ctx *ctx = conn ? conn->ctx : NULL;
+ const struct stconn *sc = a2;
+ const struct buffer *buf = a3;
+ const size_t *val = a4;
+
+ if (!ctx || src->verbosity < PT_VERB_CLEAN)
+ return;
+
+ /* Display frontend/backend info by default */
+ chunk_appendf(&trace_buf, " : [%c]", (conn_is_back(conn) ? 'B' : 'F'));
+
+ if (src->verbosity == PT_VERB_CLEAN)
+ return;
+
+ if (!sc)
+ sc = pt_sc(ctx);
+
+ /* Display the value to the 4th argument (level > STATE) */
+ if (src->level > TRACE_LEVEL_STATE && val)
+ chunk_appendf(&trace_buf, " - VAL=%lu", (long)*val);
+
+ /* Display conn and sc info, if defined (pointer + flags) */
+ chunk_appendf(&trace_buf, " - conn=%p(0x%08x)", conn, conn->flags);
+ chunk_appendf(&trace_buf, " sd=%p(0x%08x)", ctx->sd, se_fl_get(ctx->sd));
+ if (sc)
+ chunk_appendf(&trace_buf, " sc=%p(0x%08x)", sc, sc->flags);
+
+ if (src->verbosity == PT_VERB_MINIMAL)
+ return;
+
+ /* Display buffer info, if defined (level > USER & verbosity > SIMPLE) */
+ if (src->level > TRACE_LEVEL_USER && buf) {
+ int full = 0, max = 3000, chunk = 1024;
+
+ /* Full info (level > STATE && verbosity > SIMPLE) */
+ if (src->level > TRACE_LEVEL_STATE) {
+ if (src->verbosity == PT_VERB_COMPLETE)
+ full = 1;
+ else if (src->verbosity == PT_VERB_ADVANCED) {
+ full = 1;
+ max = 256;
+ chunk = 64;
+ }
+ }
+
+ chunk_appendf(&trace_buf, " buf=%u@%p+%u/%u",
+ (unsigned int)b_data(buf), b_orig(buf),
+ (unsigned int)b_head_ofs(buf), (unsigned int)b_size(buf));
+
+ if (b_data(buf) && full) {
+ chunk_memcat(&trace_buf, "\n", 1);
+ if (b_data(buf) < max)
+ pt_trace_buf(buf, 0, b_data(buf));
+ else {
+ pt_trace_buf(buf, 0, chunk);
+ chunk_memcat(&trace_buf, " ...\n", 6);
+ pt_trace_buf(buf, b_data(buf) - chunk, chunk);
+ }
+ }
+ }
+}
+
+static void mux_pt_destroy(struct mux_pt_ctx *ctx)
+{
+ struct connection *conn = NULL;
+
+ TRACE_POINT(PT_EV_CONN_END);
+
+ /* The connection must be attached to this mux to be released */
+ if (ctx->conn && ctx->conn->ctx == ctx)
+ conn = ctx->conn;
+
+ tasklet_free(ctx->wait_event.tasklet);
+
+ if (conn && ctx->wait_event.events != 0)
+ conn->xprt->unsubscribe(conn, conn->xprt_ctx, ctx->wait_event.events,
+ &ctx->wait_event);
+ BUG_ON(ctx->sd && !se_fl_test(ctx->sd, SE_FL_ORPHAN));
+ sedesc_free(ctx->sd);
+ pool_free(pool_head_pt_ctx, ctx);
+
+ if (conn) {
+ conn->mux = NULL;
+ conn->ctx = NULL;
+ TRACE_DEVEL("freeing conn", PT_EV_CONN_END, conn);
+
+ conn_stop_tracking(conn);
+ conn_full_close(conn);
+ if (conn->destroy_cb)
+ conn->destroy_cb(conn);
+ conn_free(conn);
+ }
+}
+
+/* Callback, used when we get I/Os while in idle mode. This one is exported so
+ * that "show fd" can resolve it.
+ */
+struct task *mux_pt_io_cb(struct task *t, void *tctx, unsigned int status)
+{
+ struct mux_pt_ctx *ctx = tctx;
+
+ TRACE_ENTER(PT_EV_CONN_WAKE, ctx->conn);
+ if (!se_fl_test(ctx->sd, SE_FL_ORPHAN)) {
+ /* There's a small race condition.
+ * mux_pt_io_cb() is only supposed to be called if we have no
+ * stream attached. However, maybe the tasklet got woken up,
+ * and this connection was then attached to a new stream.
+ * If this happened, just wake the tasklet up if anybody
+ * subscribed to receive events, and otherwise call the wake
+ * method, to make sure the event is noticed.
+ */
+ if (ctx->conn->subs) {
+ ctx->conn->subs->events = 0;
+ tasklet_wakeup(ctx->conn->subs->tasklet);
+ ctx->conn->subs = NULL;
+ } else if (pt_sc(ctx)->app_ops->wake)
+ pt_sc(ctx)->app_ops->wake(pt_sc(ctx));
+ TRACE_DEVEL("leaving waking up SC", PT_EV_CONN_WAKE, ctx->conn);
+ return t;
+ }
+ conn_ctrl_drain(ctx->conn);
+ if (ctx->conn->flags & (CO_FL_ERROR | CO_FL_SOCK_RD_SH | CO_FL_SOCK_WR_SH)) {
+ TRACE_DEVEL("leaving destroying pt context", PT_EV_CONN_WAKE, ctx->conn);
+ mux_pt_destroy(ctx);
+ t = NULL;
+ }
+ else {
+ ctx->conn->xprt->subscribe(ctx->conn, ctx->conn->xprt_ctx, SUB_RETRY_RECV,
+ &ctx->wait_event);
+ TRACE_DEVEL("leaving subscribing for reads", PT_EV_CONN_WAKE, ctx->conn);
+ }
+
+ return t;
+}
+
+/* Initialize the mux once it's attached. It is expected that conn->ctx points
+ * to the existing stream connector (for outgoing connections) or NULL (for
+ * incoming ones, in which case one will be allocated and a new stream will be
+ * instantiated). Returns < 0 on error.
+ */
+static int mux_pt_init(struct connection *conn, struct proxy *prx, struct session *sess,
+ struct buffer *input)
+{
+ struct stconn *sc = conn->ctx;
+ struct mux_pt_ctx *ctx = pool_alloc(pool_head_pt_ctx);
+
+ TRACE_ENTER(PT_EV_CONN_NEW);
+
+ if (!ctx) {
+ TRACE_ERROR("PT context allocation failure", PT_EV_CONN_NEW|PT_EV_CONN_END|PT_EV_CONN_ERR);
+ goto fail;
+ }
+
+ ctx->wait_event.tasklet = tasklet_new();
+ if (!ctx->wait_event.tasklet)
+ goto fail_free_ctx;
+ ctx->wait_event.tasklet->context = ctx;
+ ctx->wait_event.tasklet->process = mux_pt_io_cb;
+ ctx->wait_event.events = 0;
+ ctx->conn = conn;
+
+ if (!sc) {
+ ctx->sd = sedesc_new();
+ if (!ctx->sd) {
+ TRACE_ERROR("SC allocation failure", PT_EV_STRM_NEW|PT_EV_STRM_END|PT_EV_STRM_ERR, conn);
+ goto fail_free_ctx;
+ }
+ ctx->sd->se = ctx;
+ ctx->sd->conn = conn;
+ se_fl_set(ctx->sd, SE_FL_T_MUX | SE_FL_ORPHAN);
+
+ sc = sc_new_from_endp(ctx->sd, sess, input);
+ if (!sc) {
+ TRACE_ERROR("SC allocation failure", PT_EV_STRM_NEW|PT_EV_STRM_END|PT_EV_STRM_ERR, conn);
+ goto fail_free_sd;
+ }
+ TRACE_POINT(PT_EV_STRM_NEW, conn, sc);
+ }
+ else {
+ if (sc_attach_mux(sc, ctx, conn) < 0)
+ goto fail_free_ctx;
+ ctx->sd = sc->sedesc;
+ }
+ conn->ctx = ctx;
+ se_fl_set(ctx->sd, SE_FL_RCV_MORE);
+ if ((global.tune.options & GTUNE_USE_SPLICE) && !(global.tune.no_zero_copy_fwd & NO_ZERO_COPY_FWD_PT))
+ se_fl_set(ctx->sd, SE_FL_MAY_FASTFWD_PROD|SE_FL_MAY_FASTFWD_CONS);
+
+ TRACE_LEAVE(PT_EV_CONN_NEW, conn);
+ return 0;
+
+ fail_free_sd:
+ sedesc_free(ctx->sd);
+ fail_free_ctx:
+ tasklet_free(ctx->wait_event.tasklet);
+ pool_free(pool_head_pt_ctx, ctx);
+ fail:
+ TRACE_DEVEL("leaving in error", PT_EV_CONN_NEW|PT_EV_CONN_END|PT_EV_CONN_ERR);
+ return -1;
+}
+
+/* callback to be used by default for the pass-through mux. It calls the data
+ * layer wake() callback if it is set otherwise returns 0.
+ */
+static int mux_pt_wake(struct connection *conn)
+{
+ struct mux_pt_ctx *ctx = conn->ctx;
+ int ret = 0;
+
+ TRACE_ENTER(PT_EV_CONN_WAKE, ctx->conn);
+ if (!se_fl_test(ctx->sd, SE_FL_ORPHAN)) {
+ ret = pt_sc(ctx)->app_ops->wake ? pt_sc(ctx)->app_ops->wake(pt_sc(ctx)) : 0;
+
+ if (ret < 0) {
+ TRACE_DEVEL("leaving waking up SC", PT_EV_CONN_WAKE, ctx->conn);
+ return ret;
+ }
+ } else {
+ conn_ctrl_drain(conn);
+ if (conn->flags & (CO_FL_ERROR | CO_FL_SOCK_RD_SH)) {
+ TRACE_DEVEL("leaving destroying PT context", PT_EV_CONN_WAKE, ctx->conn);
+ mux_pt_destroy(ctx);
+ return -1;
+ }
+ }
+
+ /* If we had early data, and we're done with the handshake
+ * then we know the data are safe, and we can remove the flag.
+ */
+ if ((conn->flags & (CO_FL_EARLY_DATA | CO_FL_EARLY_SSL_HS | CO_FL_WAIT_XPRT)) ==
+ CO_FL_EARLY_DATA)
+ conn->flags &= ~CO_FL_EARLY_DATA;
+
+ TRACE_LEAVE(PT_EV_CONN_WAKE, ctx->conn);
+ return ret;
+}
+
+/*
+ * Attach a new stream to a connection
+ * (Used for outgoing connections)
+ */
+static int mux_pt_attach(struct connection *conn, struct sedesc *sd, struct session *sess)
+{
+ struct mux_pt_ctx *ctx = conn->ctx;
+
+ TRACE_ENTER(PT_EV_STRM_NEW, conn);
+ if (ctx->wait_event.events)
+ conn->xprt->unsubscribe(ctx->conn, conn->xprt_ctx, SUB_RETRY_RECV, &ctx->wait_event);
+ if (sc_attach_mux(sd->sc, ctx, conn) < 0)
+ return -1;
+ ctx->sd = sd;
+ se_fl_set(ctx->sd, SE_FL_RCV_MORE);
+ if ((global.tune.options & GTUNE_USE_SPLICE) && !(global.tune.no_zero_copy_fwd & NO_ZERO_COPY_FWD_PT))
+ se_fl_set(ctx->sd, SE_FL_MAY_FASTFWD_PROD|SE_FL_MAY_FASTFWD_CONS);
+
+ TRACE_LEAVE(PT_EV_STRM_NEW, conn, sd->sc);
+ return 0;
+}
+
+/* Retrieves a valid stream connector from this connection, or returns NULL.
+ * For this mux, it's easy as we can only store a single stream connector.
+ */
+static struct stconn *mux_pt_get_first_sc(const struct connection *conn)
+{
+ struct mux_pt_ctx *ctx = conn->ctx;
+
+ return pt_sc(ctx);
+}
+
+/* Destroy the mux and the associated connection if still attached to this mux
+ * and no longer used */
+static void mux_pt_destroy_meth(void *ctx)
+{
+ struct mux_pt_ctx *pt = ctx;
+
+ TRACE_POINT(PT_EV_CONN_END, pt->conn, pt_sc(pt));
+ if (se_fl_test(pt->sd, SE_FL_ORPHAN) || pt->conn->ctx != pt) {
+ if (pt->conn->ctx != pt) {
+ pt->sd = NULL;
+ }
+ mux_pt_destroy(pt);
+ }
+}
+
+/*
+ * Detach the stream from the connection and possibly release the connection.
+ */
+static void mux_pt_detach(struct sedesc *sd)
+{
+ struct connection *conn = sd->conn;
+ struct mux_pt_ctx *ctx;
+
+ TRACE_ENTER(PT_EV_STRM_END, conn, sd->sc);
+
+ ctx = conn->ctx;
+
+ /* Subscribe, to know if we got disconnected */
+ if (!conn_is_back(conn) && conn->owner != NULL &&
+ !(conn->flags & (CO_FL_ERROR | CO_FL_SOCK_RD_SH | CO_FL_SOCK_WR_SH))) {
+ conn->xprt->subscribe(conn, conn->xprt_ctx, SUB_RETRY_RECV, &ctx->wait_event);
+ } else {
+ /* There's no session attached to that connection, destroy it */
+ TRACE_DEVEL("killing dead connection", PT_EV_STRM_END, conn, sd->sc);
+ mux_pt_destroy(ctx);
+ }
+
+ TRACE_LEAVE(PT_EV_STRM_END);
+}
+
+/* returns the number of streams in use on a connection */
+static int mux_pt_used_streams(struct connection *conn)
+{
+ struct mux_pt_ctx *ctx = conn->ctx;
+
+ return (!se_fl_test(ctx->sd, SE_FL_ORPHAN) ? 1 : 0);
+}
+
+/* returns the number of streams still available on a connection */
+static int mux_pt_avail_streams(struct connection *conn)
+{
+ return 1 - mux_pt_used_streams(conn);
+}
+
+static void mux_pt_shutr(struct stconn *sc, enum co_shr_mode mode)
+{
+ struct connection *conn = __sc_conn(sc);
+ struct mux_pt_ctx *ctx = conn->ctx;
+
+ TRACE_ENTER(PT_EV_STRM_SHUT, conn, sc);
+
+ se_fl_clr(ctx->sd, SE_FL_RCV_MORE | SE_FL_WANT_ROOM);
+ if (conn_xprt_ready(conn) && conn->xprt->shutr)
+ conn->xprt->shutr(conn, conn->xprt_ctx,
+ (mode == CO_SHR_DRAIN));
+ else if (mode == CO_SHR_DRAIN)
+ conn_ctrl_drain(conn);
+ if (se_fl_test(ctx->sd, SE_FL_SHW))
+ conn_full_close(conn);
+
+ TRACE_LEAVE(PT_EV_STRM_SHUT, conn, sc);
+}
+
+static void mux_pt_shutw(struct stconn *sc, enum co_shw_mode mode)
+{
+ struct connection *conn = __sc_conn(sc);
+ struct mux_pt_ctx *ctx = conn->ctx;
+
+ TRACE_ENTER(PT_EV_STRM_SHUT, conn, sc);
+
+ if (conn_xprt_ready(conn) && conn->xprt->shutw)
+ conn->xprt->shutw(conn, conn->xprt_ctx,
+ (mode == CO_SHW_NORMAL));
+ if (!se_fl_test(ctx->sd, SE_FL_SHR))
+ conn_sock_shutw(conn, (mode == CO_SHW_NORMAL));
+ else
+ conn_full_close(conn);
+
+ TRACE_LEAVE(PT_EV_STRM_SHUT, conn, sc);
+}
+
+/*
+ * Called from the upper layer, to get more data
+ *
+ * The caller is responsible for defragmenting <buf> if necessary. But <flags>
+ * must be tested to know the calling context. If CO_RFL_BUF_FLUSH is set, it
+ * means the caller wants to flush input data (from the mux buffer and the
+ * channel buffer) to be able to use kernel splicing or any kind of mux-to-mux
+ * xfer. If CO_RFL_KEEP_RECV is set, the mux must always subscribe for read
+ * events before giving back. CO_RFL_BUF_WET is set if <buf> is congested with
+ * data scheduled for leaving soon. CO_RFL_BUF_NOT_STUCK is set to instruct the
+ * mux it may optimize the data copy to <buf> if necessary. Otherwise, it should
+ * copy as much data as possible.
+ */
+static size_t mux_pt_rcv_buf(struct stconn *sc, struct buffer *buf, size_t count, int flags)
+{
+ struct connection *conn = __sc_conn(sc);
+ struct mux_pt_ctx *ctx = conn->ctx;
+ size_t ret = 0;
+
+ TRACE_ENTER(PT_EV_RX_DATA, conn, sc, buf, (size_t[]){count});
+
+ if (!count) {
+ se_fl_set(ctx->sd, SE_FL_RCV_MORE | SE_FL_WANT_ROOM);
+ goto end;
+ }
+ b_realign_if_empty(buf);
+ ret = conn->xprt->rcv_buf(conn, conn->xprt_ctx, buf, count, flags);
+ if (conn->flags & CO_FL_ERROR) {
+ se_fl_clr(ctx->sd, SE_FL_RCV_MORE | SE_FL_WANT_ROOM);
+ if (conn_xprt_read0_pending(conn))
+ se_fl_set(ctx->sd, SE_FL_EOS);
+ se_fl_set(ctx->sd, SE_FL_ERROR);
+ TRACE_DEVEL("error on connection", PT_EV_RX_DATA|PT_EV_CONN_ERR, conn, sc);
+ }
+ else if (conn_xprt_read0_pending(conn)) {
+ se_fl_clr(ctx->sd, SE_FL_RCV_MORE | SE_FL_WANT_ROOM);
+ se_fl_set(ctx->sd, (SE_FL_EOI|SE_FL_EOS));
+ TRACE_DEVEL("read0 on connection", PT_EV_RX_DATA, conn, sc);
+ }
+ end:
+ TRACE_LEAVE(PT_EV_RX_DATA, conn, sc, buf, (size_t[]){ret});
+ return ret;
+}
+
+/* Called from the upper layer, to send data */
+static size_t mux_pt_snd_buf(struct stconn *sc, struct buffer *buf, size_t count, int flags)
+{
+ struct connection *conn = __sc_conn(sc);
+ struct mux_pt_ctx *ctx = conn->ctx;
+ size_t ret;
+
+ TRACE_ENTER(PT_EV_TX_DATA, conn, sc, buf, (size_t[]){count});
+
+ ret = conn->xprt->snd_buf(conn, conn->xprt_ctx, buf, count, flags);
+
+ if (ret > 0)
+ b_del(buf, ret);
+
+ if (conn->flags & CO_FL_ERROR) {
+ if (conn_xprt_read0_pending(conn))
+ se_fl_set(ctx->sd, SE_FL_EOS);
+ se_fl_set_error(ctx->sd);
+ TRACE_DEVEL("error on connection", PT_EV_TX_DATA|PT_EV_CONN_ERR, conn, sc);
+ }
+
+ TRACE_LEAVE(PT_EV_TX_DATA, conn, sc, buf, (size_t[]){ret});
+ return ret;
+}
+
+static inline struct sedesc *mux_pt_opposite_sd(struct mux_pt_ctx *ctx)
+{
+ struct xref *peer;
+ struct sedesc *sdo;
+
+ peer = xref_get_peer_and_lock(&ctx->sd->xref);
+ if (!peer)
+ return NULL;
+
+ sdo = container_of(peer, struct sedesc, xref);
+ xref_unlock(&ctx->sd->xref, peer);
+ return sdo;
+}
+
+static size_t mux_pt_nego_ff(struct stconn *sc, struct buffer *input, size_t count, unsigned int may_splice)
+{
+ struct connection *conn = __sc_conn(sc);
+ struct mux_pt_ctx *ctx = conn->ctx;
+ size_t ret = 0;
+
+ TRACE_ENTER(PT_EV_TX_DATA, conn, sc, 0, (size_t[]){count});
+
+ /* Use kernel splicing if it is supported by the sender and if there
+ * are no input data _AND_ no output data.
+ *
+ * TODO: It may be good to add a flag to send obuf data first if any,
+ * and then data in pipe, or the opposite. For now, it is not
+ * supported to mix data.
+ */
+ if (!b_data(input) && may_splice) {
+ if (conn->xprt->snd_pipe && (ctx->sd->iobuf.pipe || (pipes_used < global.maxpipes && (ctx->sd->iobuf.pipe = get_pipe())))) {
+ ctx->sd->iobuf.offset = 0;
+ ctx->sd->iobuf.data = 0;
+ ret = count;
+ goto out;
+ }
+ ctx->sd->iobuf.flags |= IOBUF_FL_NO_SPLICING;
+ TRACE_DEVEL("Unable to allocate pipe for splicing, fallback to buffer", PT_EV_TX_DATA, conn, sc);
+ }
+
+ /* No buffer case */
+
+ out:
+ TRACE_LEAVE(PT_EV_TX_DATA, conn, sc, 0, (size_t[]){ret});
+ return ret;
+}
+
+static size_t mux_pt_done_ff(struct stconn *sc)
+{
+ struct connection *conn = __sc_conn(sc);
+ struct mux_pt_ctx *ctx = conn->ctx;
+ struct sedesc *sd = ctx->sd;
+ size_t total = 0;
+
+ TRACE_ENTER(PT_EV_TX_DATA, conn, sc);
+
+ if (sd->iobuf.pipe) {
+ total = conn->xprt->snd_pipe(conn, conn->xprt_ctx, sd->iobuf.pipe, sd->iobuf.pipe->data);
+ if (!sd->iobuf.pipe->data) {
+ put_pipe(sd->iobuf.pipe);
+ sd->iobuf.pipe = NULL;
+ }
+ }
+ else {
+ BUG_ON(sd->iobuf.buf);
+ }
+
+ out:
+ if (conn->flags & CO_FL_ERROR) {
+ if (conn_xprt_read0_pending(conn))
+ se_fl_set(ctx->sd, SE_FL_EOS);
+ se_fl_set_error(ctx->sd);
+ TRACE_DEVEL("error on connection", PT_EV_TX_DATA|PT_EV_CONN_ERR, conn, sc);
+ }
+
+ TRACE_LEAVE(PT_EV_TX_DATA, conn, sc, 0, (size_t[]){total});
+ return total;
+}
+
+static int mux_pt_fastfwd(struct stconn *sc, unsigned int count, unsigned int flags)
+{
+ struct connection *conn = __sc_conn(sc);
+ struct mux_pt_ctx *ctx = conn->ctx;
+ struct sedesc *sdo = NULL;
+ size_t total = 0, try = 0;
+ int ret = 0;
+
+ TRACE_ENTER(PT_EV_RX_DATA, conn, sc, 0, (size_t[]){count});
+
+ se_fl_clr(ctx->sd, SE_FL_RCV_MORE | SE_FL_WANT_ROOM);
+ conn->flags &= ~CO_FL_WAIT_ROOM;
+ sdo = mux_pt_opposite_sd(ctx);
+ if (!sdo) {
+ TRACE_STATE("Opposite endpoint not available yet", PT_EV_RX_DATA, conn, sc);
+ goto out;
+ }
+
+ try = se_nego_ff(sdo, &BUF_NULL, count, conn->xprt->rcv_pipe && !!(flags & CO_RFL_MAY_SPLICE) && !(sdo->iobuf.flags & IOBUF_FL_NO_SPLICING));
+ if (sdo->iobuf.flags & IOBUF_FL_NO_FF) {
+ /* Fast forwarding is not supported by the consumer */
+ se_fl_clr(ctx->sd, SE_FL_MAY_FASTFWD_PROD);
+ TRACE_DEVEL("Fast-forwarding not supported by opposite endpoint, disable it", PT_EV_RX_DATA, conn, sc);
+ goto end;
+ }
+ if (sdo->iobuf.flags & IOBUF_FL_FF_BLOCKED) {
+ se_fl_set(ctx->sd, SE_FL_RCV_MORE | SE_FL_WANT_ROOM);
+ TRACE_STATE("waiting for more room", PT_EV_RX_DATA|PT_EV_STRM_ERR, conn, sc);
+ goto out;
+ }
+
+ total += sdo->iobuf.data;
+
+ if (sdo->iobuf.pipe) {
+ /* Here, not data was xferred */
+ ret = conn->xprt->rcv_pipe(conn, conn->xprt_ctx, sdo->iobuf.pipe, try);
+ if (ret < 0) {
+ TRACE_ERROR("Error when trying to fast-forward data, disable it and abort",
+ PT_EV_RX_DATA|PT_EV_STRM_ERR|PT_EV_CONN_ERR, conn, sc);
+ se_fl_clr(ctx->sd, SE_FL_MAY_FASTFWD_PROD);
+ BUG_ON(sdo->iobuf.pipe->data);
+ put_pipe(sdo->iobuf.pipe);
+ sdo->iobuf.pipe = NULL;
+ goto end;
+ }
+ total += ret;
+ }
+ else {
+ BUG_ON(sdo->iobuf.buf);
+ ret = -1; /* abort splicing for now and fallback to buffer mode */
+ goto end;
+ }
+
+ ret = total;
+ se_done_ff(sdo);
+
+ if (sdo->iobuf.pipe) {
+ se_fl_set(ctx->sd, SE_FL_RCV_MORE | SE_FL_WANT_ROOM);
+ }
+
+ TRACE_DEVEL("Data fast-forwarded", PT_EV_RX_DATA, conn, sc, 0, (size_t[]){ret});
+
+
+ out:
+ if (conn->flags & CO_FL_ERROR) {
+ if (conn_xprt_read0_pending(conn))
+ se_fl_set(ctx->sd, SE_FL_EOS);
+ se_fl_set(ctx->sd, SE_FL_ERROR);
+ TRACE_DEVEL("error on connection", PT_EV_RX_DATA|PT_EV_CONN_ERR, conn, sc);
+ }
+ else if (conn_xprt_read0_pending(conn)) {
+ se_fl_set(ctx->sd, (SE_FL_EOS|SE_FL_EOI));
+ TRACE_DEVEL("read0 on connection", PT_EV_RX_DATA, conn, sc);
+ }
+ end:
+ TRACE_LEAVE(PT_EV_RX_DATA, conn, sc, 0, (size_t[]){ret});
+ return ret;
+}
+
+static int mux_pt_resume_fastfwd(struct stconn *sc, unsigned int flags)
+{
+ struct connection *conn = __sc_conn(sc);
+ struct mux_pt_ctx *ctx = conn->ctx;
+ struct sedesc *sd = ctx->sd;
+ size_t total = 0;
+
+ TRACE_ENTER(PT_EV_TX_DATA, conn, sc, 0, (size_t[]){flags});
+
+ if (sd->iobuf.pipe) {
+ total = conn->xprt->snd_pipe(conn, conn->xprt_ctx, sd->iobuf.pipe, sd->iobuf.pipe->data);
+ if (!sd->iobuf.pipe->data) {
+ put_pipe(sd->iobuf.pipe);
+ sd->iobuf.pipe = NULL;
+ }
+ }
+ else {
+ BUG_ON(sd->iobuf.buf);
+ }
+
+ out:
+ if (conn->flags & CO_FL_ERROR) {
+ if (conn_xprt_read0_pending(conn))
+ se_fl_set(ctx->sd, SE_FL_EOS);
+ se_fl_set_error(ctx->sd);
+ TRACE_DEVEL("error on connection", PT_EV_TX_DATA|PT_EV_CONN_ERR, conn, sc);
+ }
+
+ TRACE_LEAVE(PT_EV_TX_DATA, conn, sc, 0, (size_t[]){total});
+ return total;
+}
+
+/* Called from the upper layer, to subscribe <es> to events <event_type>. The
+ * event subscriber <es> is not allowed to change from a previous call as long
+ * as at least one event is still subscribed. The <event_type> must only be a
+ * combination of SUB_RETRY_RECV and SUB_RETRY_SEND. It always returns 0.
+ */
+static int mux_pt_subscribe(struct stconn *sc, int event_type, struct wait_event *es)
+{
+ struct connection *conn = __sc_conn(sc);
+
+ TRACE_POINT(PT_EV_RX_DATA|PT_EV_TX_DATA, conn, sc, 0, (size_t[]){event_type});
+ return conn->xprt->subscribe(conn, conn->xprt_ctx, event_type, es);
+}
+
+/* Called from the upper layer, to unsubscribe <es> from events <event_type>.
+ * The <es> pointer is not allowed to differ from the one passed to the
+ * subscribe() call. It always returns zero.
+ */
+static int mux_pt_unsubscribe(struct stconn *sc, int event_type, struct wait_event *es)
+{
+ struct connection *conn = __sc_conn(sc);
+
+ TRACE_POINT(PT_EV_RX_DATA|PT_EV_TX_DATA, conn, sc, 0, (size_t[]){event_type});
+ return conn->xprt->unsubscribe(conn, conn->xprt_ctx, event_type, es);
+}
+
+static int mux_pt_ctl(struct connection *conn, enum mux_ctl_type mux_ctl, void *output)
+{
+ int ret = 0;
+ switch (mux_ctl) {
+ case MUX_CTL_STATUS:
+ if (!(conn->flags & CO_FL_WAIT_XPRT))
+ ret |= MUX_STATUS_READY;
+ return ret;
+ case MUX_CTL_EXIT_STATUS:
+ return MUX_ES_UNKNOWN;
+ default:
+ return -1;
+ }
+}
+
+static int mux_pt_sctl(struct stconn *sc, enum mux_sctl_type mux_sctl, void *output)
+{
+ int ret = 0;
+
+ switch (mux_sctl) {
+ case MUX_SCTL_SID:
+ if (output)
+ *((int64_t *)output) = 0;
+ return ret;
+
+ default:
+ return -1;
+ }
+}
+
+/* config parser for global "tune.pt.zero-copy-forwarding" */
+static int cfg_parse_pt_zero_copy_fwd(char **args, int section_type, struct proxy *curpx,
+ const struct proxy *defpx, const char *file, int line,
+ char **err)
+{
+ if (too_many_args(1, args, err, NULL))
+ return -1;
+
+ if (strcmp(args[1], "on") == 0)
+ global.tune.no_zero_copy_fwd &= ~NO_ZERO_COPY_FWD_PT;
+ else if (strcmp(args[1], "off") == 0)
+ global.tune.no_zero_copy_fwd |= NO_ZERO_COPY_FWD_PT;
+ else {
+ memprintf(err, "'%s' expects 'on' or 'off'.", args[0]);
+ return -1;
+ }
+ return 0;
+}
+
+
+/* config keyword parsers */
+static struct cfg_kw_list cfg_kws = {ILH, {
+ { CFG_GLOBAL, "tune.pt.zero-copy-forwarding", cfg_parse_pt_zero_copy_fwd },
+ { 0, NULL, NULL }
+}};
+
+INITCALL1(STG_REGISTER, cfg_register_keywords, &cfg_kws);
+
+
+/* The mux operations */
+const struct mux_ops mux_tcp_ops = {
+ .init = mux_pt_init,
+ .wake = mux_pt_wake,
+ .rcv_buf = mux_pt_rcv_buf,
+ .snd_buf = mux_pt_snd_buf,
+ .nego_fastfwd = mux_pt_nego_ff,
+ .done_fastfwd = mux_pt_done_ff,
+ .fastfwd = mux_pt_fastfwd,
+ .resume_fastfwd = mux_pt_resume_fastfwd,
+ .subscribe = mux_pt_subscribe,
+ .unsubscribe = mux_pt_unsubscribe,
+ .attach = mux_pt_attach,
+ .get_first_sc = mux_pt_get_first_sc,
+ .detach = mux_pt_detach,
+ .avail_streams = mux_pt_avail_streams,
+ .used_streams = mux_pt_used_streams,
+ .destroy = mux_pt_destroy_meth,
+ .ctl = mux_pt_ctl,
+ .sctl = mux_pt_sctl,
+ .shutr = mux_pt_shutr,
+ .shutw = mux_pt_shutw,
+ .flags = MX_FL_NONE,
+ .name = "PASS",
+};
+
+
+const struct mux_ops mux_pt_ops = {
+ .init = mux_pt_init,
+ .wake = mux_pt_wake,
+ .rcv_buf = mux_pt_rcv_buf,
+ .snd_buf = mux_pt_snd_buf,
+ .nego_fastfwd = mux_pt_nego_ff,
+ .done_fastfwd = mux_pt_done_ff,
+ .fastfwd = mux_pt_fastfwd,
+ .resume_fastfwd = mux_pt_resume_fastfwd,
+ .subscribe = mux_pt_subscribe,
+ .unsubscribe = mux_pt_unsubscribe,
+ .attach = mux_pt_attach,
+ .get_first_sc = mux_pt_get_first_sc,
+ .detach = mux_pt_detach,
+ .avail_streams = mux_pt_avail_streams,
+ .used_streams = mux_pt_used_streams,
+ .destroy = mux_pt_destroy_meth,
+ .ctl = mux_pt_ctl,
+ .sctl = mux_pt_sctl,
+ .shutr = mux_pt_shutr,
+ .shutw = mux_pt_shutw,
+ .flags = MX_FL_NONE|MX_FL_NO_UPG,
+ .name = "PASS",
+};
+
+/* PROT selection : default mux has empty name */
+static struct mux_proto_list mux_proto_none =
+ { .token = IST("none"), .mode = PROTO_MODE_TCP, .side = PROTO_SIDE_BOTH, .mux = &mux_pt_ops };
+static struct mux_proto_list mux_proto_tcp =
+ { .token = IST(""), .mode = PROTO_MODE_TCP, .side = PROTO_SIDE_BOTH, .mux = &mux_tcp_ops };
+
+INITCALL1(STG_REGISTER, register_mux_proto, &mux_proto_none);
+INITCALL1(STG_REGISTER, register_mux_proto, &mux_proto_tcp);
diff --git a/src/mux_quic.c b/src/mux_quic.c
new file mode 100644
index 0000000..de87368
--- /dev/null
+++ b/src/mux_quic.c
@@ -0,0 +1,3067 @@
+#include <haproxy/mux_quic.h>
+
+#include <import/eb64tree.h>
+
+#include <haproxy/api.h>
+#include <haproxy/connection.h>
+#include <haproxy/dynbuf.h>
+#include <haproxy/h3.h>
+#include <haproxy/list.h>
+#include <haproxy/ncbuf.h>
+#include <haproxy/pool.h>
+#include <haproxy/proxy.h>
+#include <haproxy/qmux_http.h>
+#include <haproxy/qmux_trace.h>
+#include <haproxy/quic_conn.h>
+#include <haproxy/quic_frame.h>
+#include <haproxy/quic_sock.h>
+#include <haproxy/quic_stream.h>
+#include <haproxy/quic_tp-t.h>
+#include <haproxy/ssl_sock-t.h>
+#include <haproxy/stconn.h>
+#include <haproxy/time.h>
+#include <haproxy/trace.h>
+#include <haproxy/xref.h>
+
+DECLARE_POOL(pool_head_qcc, "qcc", sizeof(struct qcc));
+DECLARE_POOL(pool_head_qcs, "qcs", sizeof(struct qcs));
+
+static void qcs_free_ncbuf(struct qcs *qcs, struct ncbuf *ncbuf)
+{
+ struct buffer buf;
+
+ if (ncb_is_null(ncbuf))
+ return;
+
+ buf = b_make(ncbuf->area, ncbuf->size, 0, 0);
+ b_free(&buf);
+ offer_buffers(NULL, 1);
+
+ *ncbuf = NCBUF_NULL;
+
+ /* Reset DEM_FULL as buffer is released. This ensures mux is not woken
+ * up from rcv_buf stream callback when demux was previously blocked.
+ */
+ qcs->flags &= ~QC_SF_DEM_FULL;
+}
+
+/* Free <qcs> instance. This function is reserved for internal usage : it must
+ * only be called on qcs alloc error or on connection shutdown. Else
+ * qcs_destroy must be preferred to handle QUIC flow-control increase.
+ */
+static void qcs_free(struct qcs *qcs)
+{
+ struct qcc *qcc = qcs->qcc;
+
+ TRACE_ENTER(QMUX_EV_QCS_END, qcc->conn, qcs);
+
+ /* Safe to use even if already removed from the list. */
+ LIST_DEL_INIT(&qcs->el_opening);
+ LIST_DEL_INIT(&qcs->el_send);
+
+ /* Release stream endpoint descriptor. */
+ BUG_ON(qcs->sd && !se_fl_test(qcs->sd, SE_FL_ORPHAN));
+ sedesc_free(qcs->sd);
+
+ /* Release app-layer context. */
+ if (qcs->ctx && qcc->app_ops->detach)
+ qcc->app_ops->detach(qcs);
+
+ /* Release qc_stream_desc buffer from quic-conn layer. */
+ qc_stream_desc_release(qcs->stream, qcs->tx.sent_offset);
+
+ /* Free Rx/Tx buffers. */
+ qcs_free_ncbuf(qcs, &qcs->rx.ncbuf);
+ b_free(&qcs->tx.buf);
+
+ /* Remove qcs from qcc tree. */
+ eb64_delete(&qcs->by_id);
+
+ pool_free(pool_head_qcs, qcs);
+
+ TRACE_LEAVE(QMUX_EV_QCS_END, qcc->conn);
+}
+
+/* Allocate a new QUIC streams with id <id> and type <type>. */
+static struct qcs *qcs_new(struct qcc *qcc, uint64_t id, enum qcs_type type)
+{
+ struct qcs *qcs;
+
+ TRACE_ENTER(QMUX_EV_QCS_NEW, qcc->conn);
+
+ qcs = pool_alloc(pool_head_qcs);
+ if (!qcs) {
+ TRACE_ERROR("alloc failure", QMUX_EV_QCS_NEW, qcc->conn);
+ return NULL;
+ }
+
+ qcs->stream = NULL;
+ qcs->qcc = qcc;
+ qcs->sd = NULL;
+ qcs->flags = QC_SF_NONE;
+ qcs->st = QC_SS_IDLE;
+ qcs->ctx = NULL;
+
+ /* App callback attach may register the stream for http-request wait.
+ * These fields must be initialed before.
+ */
+ LIST_INIT(&qcs->el_opening);
+ LIST_INIT(&qcs->el_send);
+ qcs->start = TICK_ETERNITY;
+
+ /* store transport layer stream descriptor in qcc tree */
+ qcs->id = qcs->by_id.key = id;
+ eb64_insert(&qcc->streams_by_id, &qcs->by_id);
+
+ /* If stream is local, use peer remote-limit, or else the opposite. */
+ if (quic_stream_is_bidi(id)) {
+ qcs->tx.msd = quic_stream_is_local(qcc, id) ? qcc->rfctl.msd_bidi_r :
+ qcc->rfctl.msd_bidi_l;
+ }
+ else if (quic_stream_is_local(qcc, id)) {
+ qcs->tx.msd = qcc->rfctl.msd_uni_l;
+ }
+
+ /* Properly set flow-control blocking if initial MSD is nul. */
+ if (!qcs->tx.msd)
+ qcs->flags |= QC_SF_BLK_SFCTL;
+
+ qcs->rx.ncbuf = NCBUF_NULL;
+ qcs->rx.app_buf = BUF_NULL;
+ qcs->rx.offset = qcs->rx.offset_max = 0;
+
+ if (quic_stream_is_bidi(id)) {
+ qcs->rx.msd = quic_stream_is_local(qcc, id) ? qcc->lfctl.msd_bidi_l :
+ qcc->lfctl.msd_bidi_r;
+ }
+ else if (quic_stream_is_remote(qcc, id)) {
+ qcs->rx.msd = qcc->lfctl.msd_uni_r;
+ }
+ qcs->rx.msd_init = qcs->rx.msd;
+
+ qcs->tx.buf = BUF_NULL;
+ qcs->tx.offset = 0;
+ qcs->tx.sent_offset = 0;
+
+ qcs->wait_event.tasklet = NULL;
+ qcs->wait_event.events = 0;
+ qcs->subs = NULL;
+
+ qcs->err = 0;
+
+ /* Allocate transport layer stream descriptor. Only needed for TX. */
+ if (!quic_stream_is_uni(id) || !quic_stream_is_remote(qcc, id)) {
+ struct quic_conn *qc = qcc->conn->handle.qc;
+ qcs->stream = qc_stream_desc_new(id, type, qcs, qc);
+ if (!qcs->stream) {
+ TRACE_ERROR("qc_stream_desc alloc failure", QMUX_EV_QCS_NEW, qcc->conn, qcs);
+ goto err;
+ }
+ }
+
+ if (qcc->app_ops->attach && qcc->app_ops->attach(qcs, qcc->ctx)) {
+ TRACE_ERROR("app proto failure", QMUX_EV_QCS_NEW, qcc->conn, qcs);
+ goto err;
+ }
+
+ out:
+ TRACE_LEAVE(QMUX_EV_QCS_NEW, qcc->conn, qcs);
+ return qcs;
+
+ err:
+ qcs_free(qcs);
+ TRACE_LEAVE(QMUX_EV_QCS_NEW, qcc->conn);
+ return NULL;
+}
+
+static forceinline struct stconn *qcs_sc(const struct qcs *qcs)
+{
+ return qcs->sd ? qcs->sd->sc : NULL;
+}
+
+/* Reset the <qcc> inactivity timeout for http-keep-alive timeout. */
+static forceinline void qcc_reset_idle_start(struct qcc *qcc)
+{
+ qcc->idle_start = now_ms;
+}
+
+/* Decrement <qcc> sc. */
+static forceinline void qcc_rm_sc(struct qcc *qcc)
+{
+ BUG_ON(!qcc->nb_sc); /* Ensure sc count is always valid (ie >=0). */
+ --qcc->nb_sc;
+
+ /* Reset qcc idle start for http-keep-alive timeout. Timeout will be
+ * refreshed after this on stream detach.
+ */
+ if (!qcc->nb_sc && !qcc->nb_hreq)
+ qcc_reset_idle_start(qcc);
+}
+
+/* Decrement <qcc> hreq. */
+static forceinline void qcc_rm_hreq(struct qcc *qcc)
+{
+ BUG_ON(!qcc->nb_hreq); /* Ensure http req count is always valid (ie >=0). */
+ --qcc->nb_hreq;
+
+ /* Reset qcc idle start for http-keep-alive timeout. Timeout will be
+ * refreshed after this on I/O handler.
+ */
+ if (!qcc->nb_sc && !qcc->nb_hreq)
+ qcc_reset_idle_start(qcc);
+}
+
+static inline int qcc_is_dead(const struct qcc *qcc)
+{
+ /* Maintain connection if stream endpoints are still active. */
+ if (qcc->nb_sc)
+ return 0;
+
+ /* Connection considered dead if either :
+ * - remote error detected at transport level
+ * - error detected locally
+ * - MUX timeout expired
+ */
+ if (qcc->flags & (QC_CF_ERR_CONN|QC_CF_ERRL_DONE) ||
+ !qcc->task) {
+ return 1;
+ }
+
+ return 0;
+}
+
+/* Return true if the mux timeout should be armed. */
+static inline int qcc_may_expire(struct qcc *qcc)
+{
+ return !qcc->nb_sc;
+}
+
+/* Refresh the timeout on <qcc> if needed depending on its state. */
+static void qcc_refresh_timeout(struct qcc *qcc)
+{
+ const struct proxy *px = qcc->proxy;
+
+ TRACE_ENTER(QMUX_EV_QCC_WAKE, qcc->conn);
+
+ if (!qcc->task) {
+ TRACE_DEVEL("already expired", QMUX_EV_QCC_WAKE, qcc->conn);
+ goto leave;
+ }
+
+ /* Check if upper layer is responsible of timeout management. */
+ if (!qcc_may_expire(qcc)) {
+ TRACE_DEVEL("not eligible for timeout", QMUX_EV_QCC_WAKE, qcc->conn);
+ qcc->task->expire = TICK_ETERNITY;
+ task_queue(qcc->task);
+ goto leave;
+ }
+
+ /* Frontend timeout management
+ * - shutdown done -> timeout client-fin
+ * - detached streams with data left to send -> default timeout
+ * - stream waiting on incomplete request or no stream yet activated -> timeout http-request
+ * - idle after stream processing -> timeout http-keep-alive
+ *
+ * If proxy stop-stop in progress, immediate or spread close will be
+ * processed if shutdown already one or connection is idle.
+ */
+ if (!conn_is_back(qcc->conn)) {
+ if (qcc->nb_hreq && !(qcc->flags & QC_CF_APP_SHUT)) {
+ TRACE_DEVEL("one or more requests still in progress", QMUX_EV_QCC_WAKE, qcc->conn);
+ qcc->task->expire = tick_add_ifset(now_ms, qcc->timeout);
+ task_queue(qcc->task);
+ goto leave;
+ }
+
+ if ((!LIST_ISEMPTY(&qcc->opening_list) || unlikely(!qcc->largest_bidi_r)) &&
+ !(qcc->flags & QC_CF_APP_SHUT)) {
+ int timeout = px->timeout.httpreq;
+ struct qcs *qcs = NULL;
+ int base_time;
+
+ /* Use start time of first stream waiting on HTTP or
+ * qcc idle if no stream not yet used.
+ */
+ if (likely(!LIST_ISEMPTY(&qcc->opening_list)))
+ qcs = LIST_ELEM(qcc->opening_list.n, struct qcs *, el_opening);
+ base_time = qcs ? qcs->start : qcc->idle_start;
+
+ TRACE_DEVEL("waiting on http request", QMUX_EV_QCC_WAKE, qcc->conn, qcs);
+ qcc->task->expire = tick_add_ifset(base_time, timeout);
+ }
+ else {
+ if (qcc->flags & QC_CF_APP_SHUT) {
+ TRACE_DEVEL("connection in closing", QMUX_EV_QCC_WAKE, qcc->conn);
+ qcc->task->expire = tick_add_ifset(now_ms,
+ qcc->shut_timeout);
+ }
+ else {
+ /* Use http-request timeout if keep-alive timeout not set */
+ int timeout = tick_isset(px->timeout.httpka) ?
+ px->timeout.httpka : px->timeout.httpreq;
+ TRACE_DEVEL("at least one request achieved but none currently in progress", QMUX_EV_QCC_WAKE, qcc->conn);
+ qcc->task->expire = tick_add_ifset(qcc->idle_start, timeout);
+ }
+
+ /* If proxy soft-stop in progress and connection is
+ * inactive, close the connection immediately. If a
+ * close-spread-time is configured, randomly spread the
+ * timer over a closing window.
+ */
+ if ((qcc->proxy->flags & (PR_FL_DISABLED|PR_FL_STOPPED)) &&
+ !(global.tune.options & GTUNE_DISABLE_ACTIVE_CLOSE)) {
+
+ /* Wake timeout task immediately if window already expired. */
+ int remaining_window = tick_isset(global.close_spread_end) ?
+ tick_remain(now_ms, global.close_spread_end) : 0;
+
+ TRACE_DEVEL("proxy disabled, prepare connection soft-stop", QMUX_EV_QCC_WAKE, qcc->conn);
+ if (remaining_window) {
+ /* We don't need to reset the expire if it would
+ * already happen before the close window end.
+ */
+ if (!tick_isset(qcc->task->expire) ||
+ tick_is_le(global.close_spread_end, qcc->task->expire)) {
+ /* Set an expire value shorter than the current value
+ * because the close spread window end comes earlier.
+ */
+ qcc->task->expire = tick_add(now_ms,
+ statistical_prng_range(remaining_window));
+ }
+ }
+ else {
+ /* We are past the soft close window end, wake the timeout
+ * task up immediately.
+ */
+ qcc->task->expire = now_ms;
+ task_wakeup(qcc->task, TASK_WOKEN_TIMER);
+ }
+ }
+ }
+ }
+
+ /* fallback to default timeout if frontend specific undefined or for
+ * backend connections.
+ */
+ if (!tick_isset(qcc->task->expire)) {
+ TRACE_DEVEL("fallback to default timeout", QMUX_EV_QCC_WAKE, qcc->conn);
+ qcc->task->expire = tick_add_ifset(now_ms, qcc->timeout);
+ }
+
+ task_queue(qcc->task);
+
+ leave:
+ TRACE_LEAVE(QMUX_EV_QCS_NEW, qcc->conn);
+}
+
+/* Mark a stream as open if it was idle. This can be used on every
+ * successful emission/reception operation to update the stream state.
+ */
+static void qcs_idle_open(struct qcs *qcs)
+{
+ /* This operation must not be used if the stream is already closed. */
+ BUG_ON_HOT(qcs->st == QC_SS_CLO);
+
+ if (qcs->st == QC_SS_IDLE) {
+ TRACE_STATE("opening stream", QMUX_EV_QCS_NEW, qcs->qcc->conn, qcs);
+ qcs->st = QC_SS_OPEN;
+ }
+}
+
+/* Close the local channel of <qcs> instance. */
+static void qcs_close_local(struct qcs *qcs)
+{
+ TRACE_STATE("closing stream locally", QMUX_EV_QCS_SEND, qcs->qcc->conn, qcs);
+
+ /* The stream must have already been opened. */
+ BUG_ON_HOT(qcs->st == QC_SS_IDLE);
+
+ /* This operation cannot be used multiple times. */
+ BUG_ON_HOT(qcs->st == QC_SS_HLOC || qcs->st == QC_SS_CLO);
+
+ if (quic_stream_is_bidi(qcs->id)) {
+ qcs->st = (qcs->st == QC_SS_HREM) ? QC_SS_CLO : QC_SS_HLOC;
+
+ if (qcs->flags & QC_SF_HREQ_RECV)
+ qcc_rm_hreq(qcs->qcc);
+ }
+ else {
+ /* Only local uni streams are valid for this operation. */
+ BUG_ON_HOT(quic_stream_is_remote(qcs->qcc, qcs->id));
+ qcs->st = QC_SS_CLO;
+ }
+}
+
+/* Close the remote channel of <qcs> instance. */
+static void qcs_close_remote(struct qcs *qcs)
+{
+ TRACE_STATE("closing stream remotely", QMUX_EV_QCS_RECV, qcs->qcc->conn, qcs);
+
+ /* The stream must have already been opened. */
+ BUG_ON_HOT(qcs->st == QC_SS_IDLE);
+
+ /* This operation cannot be used multiple times. */
+ BUG_ON_HOT(qcs->st == QC_SS_HREM || qcs->st == QC_SS_CLO);
+
+ if (quic_stream_is_bidi(qcs->id)) {
+ qcs->st = (qcs->st == QC_SS_HLOC) ? QC_SS_CLO : QC_SS_HREM;
+ }
+ else {
+ /* Only remote uni streams are valid for this operation. */
+ BUG_ON_HOT(quic_stream_is_local(qcs->qcc, qcs->id));
+ qcs->st = QC_SS_CLO;
+ }
+}
+
+int qcs_is_close_local(struct qcs *qcs)
+{
+ return qcs->st == QC_SS_HLOC || qcs->st == QC_SS_CLO;
+}
+
+int qcs_is_close_remote(struct qcs *qcs)
+{
+ return qcs->st == QC_SS_HREM || qcs->st == QC_SS_CLO;
+}
+
+/* Allocate if needed buffer <bptr> for stream <qcs>.
+ *
+ * Returns the buffer instance or NULL on allocation failure.
+ */
+struct buffer *qcs_get_buf(struct qcs *qcs, struct buffer *bptr)
+{
+ return b_alloc(bptr);
+}
+
+/* Allocate if needed buffer <ncbuf> for stream <qcs>.
+ *
+ * Returns the buffer instance or NULL on allocation failure.
+ */
+static struct ncbuf *qcs_get_ncbuf(struct qcs *qcs, struct ncbuf *ncbuf)
+{
+ struct buffer buf = BUF_NULL;
+
+ if (ncb_is_null(ncbuf)) {
+ if (!b_alloc(&buf))
+ return NULL;
+
+ *ncbuf = ncb_make(buf.area, buf.size, 0);
+ ncb_init(ncbuf, 0);
+ }
+
+ return ncbuf;
+}
+
+/* Notify an eventual subscriber on <qcs> or else wakeup up the stconn layer if
+ * initialized.
+ */
+static void qcs_alert(struct qcs *qcs)
+{
+ if (qcs->subs) {
+ qcs_notify_recv(qcs);
+ qcs_notify_send(qcs);
+ }
+ else if (qcs_sc(qcs) && qcs->sd->sc->app_ops->wake) {
+ TRACE_POINT(QMUX_EV_STRM_WAKE, qcs->qcc->conn, qcs);
+ qcs->sd->sc->app_ops->wake(qcs->sd->sc);
+ }
+}
+
+int qcs_subscribe(struct qcs *qcs, int event_type, struct wait_event *es)
+{
+ struct qcc *qcc = qcs->qcc;
+
+ TRACE_ENTER(QMUX_EV_STRM_SEND|QMUX_EV_STRM_RECV, qcc->conn, qcs);
+
+ BUG_ON(event_type & ~(SUB_RETRY_SEND|SUB_RETRY_RECV));
+ BUG_ON(qcs->subs && qcs->subs != es);
+
+ es->events |= event_type;
+ qcs->subs = es;
+
+ if (event_type & SUB_RETRY_RECV)
+ TRACE_DEVEL("subscribe(recv)", QMUX_EV_STRM_RECV, qcc->conn, qcs);
+
+ if (event_type & SUB_RETRY_SEND)
+ TRACE_DEVEL("subscribe(send)", QMUX_EV_STRM_SEND, qcc->conn, qcs);
+
+ TRACE_LEAVE(QMUX_EV_STRM_SEND|QMUX_EV_STRM_RECV, qcc->conn, qcs);
+
+ return 0;
+}
+
+void qcs_notify_recv(struct qcs *qcs)
+{
+ if (qcs->subs && qcs->subs->events & SUB_RETRY_RECV) {
+ TRACE_POINT(QMUX_EV_STRM_WAKE, qcs->qcc->conn, qcs);
+ tasklet_wakeup(qcs->subs->tasklet);
+ qcs->subs->events &= ~SUB_RETRY_RECV;
+ if (!qcs->subs->events)
+ qcs->subs = NULL;
+ }
+}
+
+void qcs_notify_send(struct qcs *qcs)
+{
+ if (qcs->subs && qcs->subs->events & SUB_RETRY_SEND) {
+ TRACE_POINT(QMUX_EV_STRM_WAKE, qcs->qcc->conn, qcs);
+ tasklet_wakeup(qcs->subs->tasklet);
+ qcs->subs->events &= ~SUB_RETRY_SEND;
+ if (!qcs->subs->events)
+ qcs->subs = NULL;
+ }
+}
+
+/* A fatal error is detected locally for <qcc> connection. It should be closed
+ * with a CONNECTION_CLOSE using <err> code. Set <app> to true to indicate that
+ * the code must be considered as an application level error. This function
+ * must not be called more than once by connection.
+ */
+void qcc_set_error(struct qcc *qcc, int err, int app)
+{
+ /* This must not be called multiple times per connection. */
+ BUG_ON(qcc->flags & QC_CF_ERRL);
+
+ TRACE_STATE("connection on error", QMUX_EV_QCC_ERR, qcc->conn);
+
+ qcc->flags |= QC_CF_ERRL;
+ qcc->err = app ? quic_err_app(err) : quic_err_transport(err);
+
+ /* TODO
+ * Ensure qcc_io_send() will be conducted to convert QC_CF_ERRL in
+ * QC_CF_ERRL_DONE with CONNECTION_CLOSE frame emission. This may be
+ * unnecessary if we are currently in the MUX tasklet context, but it
+ * is too tedious too not forget a wakeup outside of this function for
+ * the moment.
+ */
+ tasklet_wakeup(qcc->wait_event.tasklet);
+}
+
+/* Open a locally initiated stream for the connection <qcc>. Set <bidi> for a
+ * bidirectional stream, else an unidirectional stream is opened. The next
+ * available ID on the connection will be used according to the stream type.
+ *
+ * Returns the allocated stream instance or NULL on error.
+ */
+struct qcs *qcc_init_stream_local(struct qcc *qcc, int bidi)
+{
+ struct qcs *qcs;
+ enum qcs_type type;
+ uint64_t *next;
+
+ TRACE_ENTER(QMUX_EV_QCS_NEW, qcc->conn);
+
+ if (bidi) {
+ next = &qcc->next_bidi_l;
+ type = conn_is_back(qcc->conn) ? QCS_CLT_BIDI : QCS_SRV_BIDI;
+ }
+ else {
+ next = &qcc->next_uni_l;
+ type = conn_is_back(qcc->conn) ? QCS_CLT_UNI : QCS_SRV_UNI;
+ }
+
+ /* TODO ensure that we won't overflow remote peer flow control limit on
+ * streams. Else, we should emit a STREAMS_BLOCKED frame.
+ */
+
+ qcs = qcs_new(qcc, *next, type);
+ if (!qcs) {
+ TRACE_LEAVE(QMUX_EV_QCS_NEW, qcc->conn);
+ qcc_set_error(qcc, QC_ERR_INTERNAL_ERROR, 0);
+ return NULL;
+ }
+
+ TRACE_PROTO("opening local stream", QMUX_EV_QCS_NEW, qcc->conn, qcs);
+ *next += 4;
+
+ TRACE_LEAVE(QMUX_EV_QCS_NEW, qcc->conn, qcs);
+ return qcs;
+}
+
+/* Open a remote initiated stream for the connection <qcc> with ID <id>. The
+ * caller is responsible to ensure that a stream with the same ID was not
+ * already opened. This function will also create all intermediaries streams
+ * with ID smaller than <id> not already opened before.
+ *
+ * Returns the allocated stream instance or NULL on error.
+ */
+static struct qcs *qcc_init_stream_remote(struct qcc *qcc, uint64_t id)
+{
+ struct qcs *qcs = NULL;
+ enum qcs_type type;
+ uint64_t *largest, max_id;
+
+ TRACE_ENTER(QMUX_EV_QCS_NEW, qcc->conn);
+
+ /* Function reserved to remote stream IDs. */
+ BUG_ON(quic_stream_is_local(qcc, id));
+
+ if (quic_stream_is_bidi(id)) {
+ largest = &qcc->largest_bidi_r;
+ type = conn_is_back(qcc->conn) ? QCS_SRV_BIDI : QCS_CLT_BIDI;
+ }
+ else {
+ largest = &qcc->largest_uni_r;
+ type = conn_is_back(qcc->conn) ? QCS_SRV_UNI : QCS_CLT_UNI;
+ }
+
+ /* RFC 9000 4.6. Controlling Concurrency
+ *
+ * An endpoint that receives a frame with a stream ID exceeding the
+ * limit it has sent MUST treat this as a connection error of type
+ * STREAM_LIMIT_ERROR
+ */
+ max_id = quic_stream_is_bidi(id) ? qcc->lfctl.ms_bidi * 4 :
+ qcc->lfctl.ms_uni * 4;
+ if (id >= max_id) {
+ TRACE_ERROR("flow control error", QMUX_EV_QCS_NEW|QMUX_EV_PROTO_ERR, qcc->conn);
+ qcc_set_error(qcc, QC_ERR_STREAM_LIMIT_ERROR, 0);
+ goto err;
+ }
+
+ /* Only stream ID not already opened can be used. */
+ BUG_ON(id < *largest);
+
+ while (id >= *largest) {
+ const char *str = *largest < id ? "initializing intermediary remote stream" :
+ "initializing remote stream";
+
+ qcs = qcs_new(qcc, *largest, type);
+ if (!qcs) {
+ TRACE_ERROR("stream fallocation failure", QMUX_EV_QCS_NEW, qcc->conn);
+ qcc_set_error(qcc, QC_ERR_INTERNAL_ERROR, 0);
+ goto err;
+ }
+
+ TRACE_PROTO(str, QMUX_EV_QCS_NEW, qcc->conn, qcs);
+ *largest += 4;
+ }
+
+ out:
+ TRACE_LEAVE(QMUX_EV_QCS_NEW, qcc->conn, qcs);
+ return qcs;
+
+ err:
+ TRACE_LEAVE(QMUX_EV_QCS_NEW, qcc->conn);
+ return NULL;
+}
+
+struct stconn *qcs_attach_sc(struct qcs *qcs, struct buffer *buf, char fin)
+{
+ struct qcc *qcc = qcs->qcc;
+ struct session *sess = qcc->conn->owner;
+
+ qcs->sd = sedesc_new();
+ if (!qcs->sd)
+ return NULL;
+
+ qcs->sd->se = qcs;
+ qcs->sd->conn = qcc->conn;
+ se_fl_set(qcs->sd, SE_FL_T_MUX | SE_FL_ORPHAN | SE_FL_NOT_FIRST);
+ se_expect_no_data(qcs->sd);
+
+ if (!(global.tune.no_zero_copy_fwd & NO_ZERO_COPY_FWD_QUIC_SND))
+ se_fl_set(qcs->sd, SE_FL_MAY_FASTFWD_CONS);
+
+ /* TODO duplicated from mux_h2 */
+ sess->t_idle = ns_to_ms(now_ns - sess->accept_ts) - sess->t_handshake;
+
+ if (!sc_new_from_endp(qcs->sd, sess, buf))
+ return NULL;
+
+ /* QC_SF_HREQ_RECV must be set once for a stream. Else, nb_hreq counter
+ * will be incorrect for the connection.
+ */
+ BUG_ON_HOT(qcs->flags & QC_SF_HREQ_RECV);
+ qcs->flags |= QC_SF_HREQ_RECV;
+ ++qcc->nb_sc;
+ ++qcc->nb_hreq;
+
+ /* TODO duplicated from mux_h2 */
+ sess->accept_date = date;
+ sess->accept_ts = now_ns;
+ sess->t_handshake = 0;
+ sess->t_idle = 0;
+
+ /* A stream must have been registered for HTTP wait before attaching
+ * it to sedesc. See <qcs_wait_http_req> for more info.
+ */
+ BUG_ON_HOT(!LIST_INLIST(&qcs->el_opening));
+ LIST_DEL_INIT(&qcs->el_opening);
+
+ if (fin) {
+ TRACE_STATE("report end-of-input", QMUX_EV_STRM_RECV, qcc->conn, qcs);
+ se_fl_set(qcs->sd, SE_FL_EOI);
+ }
+
+ /* A QCS can be already locally closed before stream layer
+ * instantiation. This notably happens if STOP_SENDING was the first
+ * frame received for this instance. In this case, an error is
+ * immediately to the stream layer to prevent transmission.
+ *
+ * TODO it could be better to not instantiate at all the stream layer.
+ * However, extra care is required to ensure QCS instance is released.
+ */
+ if (unlikely(qcs_is_close_local(qcs) || (qcs->flags & QC_SF_TO_RESET))) {
+ TRACE_STATE("report early error", QMUX_EV_STRM_RECV, qcc->conn, qcs);
+ se_fl_set_error(qcs->sd);
+ }
+
+ return qcs->sd->sc;
+}
+
+/* Use this function for a stream <id> which is not in <qcc> stream tree. It
+ * returns true if the associated stream is closed.
+ */
+static int qcc_stream_id_is_closed(struct qcc *qcc, uint64_t id)
+{
+ uint64_t *largest;
+
+ /* This function must only be used for stream not present in the stream tree. */
+ BUG_ON_HOT(eb64_lookup(&qcc->streams_by_id, id));
+
+ if (quic_stream_is_local(qcc, id)) {
+ largest = quic_stream_is_uni(id) ? &qcc->next_uni_l :
+ &qcc->next_bidi_l;
+ }
+ else {
+ largest = quic_stream_is_uni(id) ? &qcc->largest_uni_r :
+ &qcc->largest_bidi_r;
+ }
+
+ return id < *largest;
+}
+
+/* Retrieve the stream instance from <id> ID. This can be used when receiving
+ * STREAM, STREAM_DATA_BLOCKED, RESET_STREAM, MAX_STREAM_DATA or STOP_SENDING
+ * frames. Set to false <receive_only> or <send_only> if these particular types
+ * of streams are not allowed. If the stream instance is found, it is stored in
+ * <out>.
+ *
+ * Returns 0 on success else non-zero. On error, a RESET_STREAM or a
+ * CONNECTION_CLOSE is automatically emitted. Beware that <out> may be NULL
+ * on success if the stream has already been closed.
+ */
+int qcc_get_qcs(struct qcc *qcc, uint64_t id, int receive_only, int send_only,
+ struct qcs **out)
+{
+ struct eb64_node *node;
+
+ TRACE_ENTER(QMUX_EV_QCC_RECV, qcc->conn);
+ *out = NULL;
+
+ if (!receive_only && quic_stream_is_uni(id) && quic_stream_is_remote(qcc, id)) {
+ TRACE_ERROR("receive-only stream not allowed", QMUX_EV_QCC_RECV|QMUX_EV_QCC_NQCS|QMUX_EV_PROTO_ERR, qcc->conn, NULL, &id);
+ qcc_set_error(qcc, QC_ERR_STREAM_STATE_ERROR, 0);
+ goto err;
+ }
+
+ if (!send_only && quic_stream_is_uni(id) && quic_stream_is_local(qcc, id)) {
+ TRACE_ERROR("send-only stream not allowed", QMUX_EV_QCC_RECV|QMUX_EV_QCC_NQCS|QMUX_EV_PROTO_ERR, qcc->conn, NULL, &id);
+ qcc_set_error(qcc, QC_ERR_STREAM_STATE_ERROR, 0);
+ goto err;
+ }
+
+ /* Search the stream in the connection tree. */
+ node = eb64_lookup(&qcc->streams_by_id, id);
+ if (node) {
+ *out = eb64_entry(node, struct qcs, by_id);
+ TRACE_DEVEL("using stream from connection tree", QMUX_EV_QCC_RECV, qcc->conn, *out);
+ goto out;
+ }
+
+ /* Check if stream is already closed. */
+ if (qcc_stream_id_is_closed(qcc, id)) {
+ TRACE_DATA("already closed stream", QMUX_EV_QCC_RECV|QMUX_EV_QCC_NQCS, qcc->conn, NULL, &id);
+ /* Consider this as a success even if <out> is left NULL. */
+ goto out;
+ }
+
+ /* Create the stream. This is valid only for remote initiated one. A
+ * local stream must have already been explicitly created by the
+ * application protocol layer.
+ */
+ if (quic_stream_is_local(qcc, id)) {
+ /* RFC 9000 19.8. STREAM Frames
+ *
+ * An endpoint MUST terminate the connection with error
+ * STREAM_STATE_ERROR if it receives a STREAM frame for a locally
+ * initiated stream that has not yet been created, or for a send-only
+ * stream.
+ */
+ TRACE_ERROR("locally initiated stream not yet created", QMUX_EV_QCC_RECV|QMUX_EV_QCC_NQCS|QMUX_EV_PROTO_ERR, qcc->conn, NULL, &id);
+ qcc_set_error(qcc, QC_ERR_STREAM_STATE_ERROR, 0);
+ goto err;
+ }
+ else {
+ /* Remote stream not found - try to open it. */
+ *out = qcc_init_stream_remote(qcc, id);
+ if (!*out) {
+ TRACE_ERROR("stream creation error", QMUX_EV_QCC_RECV|QMUX_EV_QCC_NQCS, qcc->conn, NULL, &id);
+ goto err;
+ }
+ }
+
+ out:
+ TRACE_LEAVE(QMUX_EV_QCC_RECV, qcc->conn, *out);
+ return 0;
+
+ err:
+ TRACE_LEAVE(QMUX_EV_QCC_RECV, qcc->conn);
+ return 1;
+}
+
+/* Simple function to duplicate a buffer */
+static inline struct buffer qcs_b_dup(const struct ncbuf *b)
+{
+ return b_make(ncb_orig(b), b->size, b->head, ncb_data(b, 0));
+}
+
+/* Remove <bytes> from <qcs> Rx buffer. Flow-control for received offsets may
+ * be allocated for the peer if needed.
+ */
+static void qcs_consume(struct qcs *qcs, uint64_t bytes)
+{
+ struct qcc *qcc = qcs->qcc;
+ struct quic_frame *frm;
+ struct ncbuf *buf = &qcs->rx.ncbuf;
+ enum ncb_ret ret;
+
+ TRACE_ENTER(QMUX_EV_QCS_RECV, qcc->conn, qcs);
+
+ ret = ncb_advance(buf, bytes);
+ if (ret) {
+ ABORT_NOW(); /* should not happens because removal only in data */
+ }
+
+ if (ncb_is_empty(buf))
+ qcs_free_ncbuf(qcs, buf);
+
+ qcs->rx.offset += bytes;
+ /* Not necessary to emit a MAX_STREAM_DATA if all data received. */
+ if (qcs->flags & QC_SF_SIZE_KNOWN)
+ goto conn_fctl;
+
+ if (qcs->rx.msd - qcs->rx.offset < qcs->rx.msd_init / 2) {
+ TRACE_DATA("increase stream credit via MAX_STREAM_DATA", QMUX_EV_QCS_RECV, qcc->conn, qcs);
+ frm = qc_frm_alloc(QUIC_FT_MAX_STREAM_DATA);
+ if (!frm) {
+ qcc_set_error(qcc, QC_ERR_INTERNAL_ERROR, 0);
+ return;
+ }
+
+ qcs->rx.msd = qcs->rx.offset + qcs->rx.msd_init;
+
+ frm->max_stream_data.id = qcs->id;
+ frm->max_stream_data.max_stream_data = qcs->rx.msd;
+
+ LIST_APPEND(&qcc->lfctl.frms, &frm->list);
+ tasklet_wakeup(qcc->wait_event.tasklet);
+ }
+
+ conn_fctl:
+ qcc->lfctl.offsets_consume += bytes;
+ if (qcc->lfctl.md - qcc->lfctl.offsets_consume < qcc->lfctl.md_init / 2) {
+ TRACE_DATA("increase conn credit via MAX_DATA", QMUX_EV_QCS_RECV, qcc->conn, qcs);
+ frm = qc_frm_alloc(QUIC_FT_MAX_DATA);
+ if (!frm) {
+ qcc_set_error(qcc, QC_ERR_INTERNAL_ERROR, 0);
+ return;
+ }
+
+ qcc->lfctl.md = qcc->lfctl.offsets_consume + qcc->lfctl.md_init;
+
+ frm->max_data.max_data = qcc->lfctl.md;
+
+ LIST_APPEND(&qcs->qcc->lfctl.frms, &frm->list);
+ tasklet_wakeup(qcs->qcc->wait_event.tasklet);
+ }
+
+ TRACE_LEAVE(QMUX_EV_QCS_RECV, qcc->conn, qcs);
+}
+
+/* Decode the content of STREAM frames already received on the stream instance
+ * <qcs>.
+ *
+ * Returns 0 on success else non-zero.
+ */
+static int qcc_decode_qcs(struct qcc *qcc, struct qcs *qcs)
+{
+ struct buffer b;
+ ssize_t ret;
+ int fin = 0;
+
+ TRACE_ENTER(QMUX_EV_QCS_RECV, qcc->conn, qcs);
+
+ b = qcs_b_dup(&qcs->rx.ncbuf);
+
+ /* Signal FIN to application if STREAM FIN received with all data. */
+ if (qcs_is_close_remote(qcs))
+ fin = 1;
+
+ if (!(qcs->flags & QC_SF_READ_ABORTED)) {
+ ret = qcc->app_ops->decode_qcs(qcs, &b, fin);
+ if (ret < 0) {
+ TRACE_ERROR("decoding error", QMUX_EV_QCS_RECV, qcc->conn, qcs);
+ goto err;
+ }
+
+ if (qcs->flags & QC_SF_TO_RESET) {
+ if (qcs_sc(qcs) && !se_fl_test(qcs->sd, SE_FL_ERROR|SE_FL_ERR_PENDING)) {
+ se_fl_set_error(qcs->sd);
+ qcs_alert(qcs);
+ }
+ }
+ }
+ else {
+ TRACE_DATA("ignore read on stream", QMUX_EV_QCS_RECV, qcc->conn, qcs);
+ ret = b_data(&b);
+ }
+
+ if (ret)
+ qcs_consume(qcs, ret);
+ if (ret || (!b_data(&b) && fin))
+ qcs_notify_recv(qcs);
+
+ TRACE_LEAVE(QMUX_EV_QCS_RECV, qcc->conn, qcs);
+ return 0;
+
+ err:
+ TRACE_LEAVE(QMUX_EV_QCS_RECV, qcc->conn, qcs);
+ return 1;
+}
+
+/* Prepare for the emission of RESET_STREAM on <qcs> with error code <err>. */
+void qcc_reset_stream(struct qcs *qcs, int err)
+{
+ struct qcc *qcc = qcs->qcc;
+
+ if ((qcs->flags & QC_SF_TO_RESET) || qcs_is_close_local(qcs))
+ return;
+
+ TRACE_STATE("reset stream", QMUX_EV_QCS_END, qcc->conn, qcs);
+ qcs->flags |= QC_SF_TO_RESET;
+ qcs->err = err;
+
+ /* Remove prepared stream data from connection flow-control calcul. */
+ if (qcs->tx.offset > qcs->tx.sent_offset) {
+ const uint64_t diff = qcs->tx.offset - qcs->tx.sent_offset;
+ BUG_ON(qcc->tx.offsets - diff < qcc->tx.sent_offsets);
+ qcc->tx.offsets -= diff;
+ /* Reset qcs offset to prevent BUG_ON() on qcs_destroy(). */
+ qcs->tx.offset = qcs->tx.sent_offset;
+ }
+
+ /* Report send error to stream-endpoint layer. */
+ if (qcs_sc(qcs)) {
+ se_fl_set_error(qcs->sd);
+ qcs_alert(qcs);
+ }
+
+ qcc_send_stream(qcs, 1);
+ tasklet_wakeup(qcc->wait_event.tasklet);
+}
+
+/* Register <qcs> stream for emission of STREAM, STOP_SENDING or RESET_STREAM.
+ * Set <urg> to 1 if stream content should be treated in priority compared to
+ * other streams.
+ */
+void qcc_send_stream(struct qcs *qcs, int urg)
+{
+ struct qcc *qcc = qcs->qcc;
+
+ TRACE_ENTER(QMUX_EV_QCS_SEND, qcc->conn, qcs);
+
+ /* Cannot send if already closed. */
+ BUG_ON(qcs_is_close_local(qcs));
+
+ if (urg) {
+ LIST_DEL_INIT(&qcs->el_send);
+ LIST_INSERT(&qcc->send_list, &qcs->el_send);
+ }
+ else {
+ if (!LIST_INLIST(&qcs->el_send))
+ LIST_APPEND(&qcs->qcc->send_list, &qcs->el_send);
+ }
+
+ TRACE_LEAVE(QMUX_EV_QCS_SEND, qcc->conn, qcs);
+}
+
+/* Prepare for the emission of STOP_SENDING on <qcs>. */
+void qcc_abort_stream_read(struct qcs *qcs)
+{
+ struct qcc *qcc = qcs->qcc;
+
+ TRACE_ENTER(QMUX_EV_QCC_NEW, qcc->conn, qcs);
+
+ if ((qcs->flags & QC_SF_TO_STOP_SENDING) || qcs_is_close_remote(qcs))
+ goto end;
+
+ TRACE_STATE("abort stream read", QMUX_EV_QCS_END, qcc->conn, qcs);
+ qcs->flags |= (QC_SF_TO_STOP_SENDING|QC_SF_READ_ABORTED);
+
+ qcc_send_stream(qcs, 1);
+ tasklet_wakeup(qcc->wait_event.tasklet);
+
+ end:
+ TRACE_LEAVE(QMUX_EV_QCC_NEW, qcc->conn, qcs);
+}
+
+/* Install the <app_ops> applicative layer of a QUIC connection on mux <qcc>.
+ * Returns 0 on success else non-zero.
+ */
+int qcc_install_app_ops(struct qcc *qcc, const struct qcc_app_ops *app_ops)
+{
+ TRACE_ENTER(QMUX_EV_QCC_NEW, qcc->conn);
+
+ if (app_ops->init && !app_ops->init(qcc)) {
+ TRACE_ERROR("app ops init error", QMUX_EV_QCC_NEW, qcc->conn);
+ goto err;
+ }
+
+ TRACE_PROTO("application layer initialized", QMUX_EV_QCC_NEW, qcc->conn);
+ qcc->app_ops = app_ops;
+
+ /* RFC 9114 7.2.4.2. Initialization
+ *
+ * Endpoints MUST NOT require any data to be
+ * received from the peer prior to sending the SETTINGS frame;
+ * settings MUST be sent as soon as the transport is ready to
+ * send data.
+ */
+ if (qcc->app_ops->finalize) {
+ if (qcc->app_ops->finalize(qcc->ctx)) {
+ TRACE_ERROR("app ops finalize error", QMUX_EV_QCC_NEW, qcc->conn);
+ goto err;
+ }
+ tasklet_wakeup(qcc->wait_event.tasklet);
+ }
+
+ TRACE_LEAVE(QMUX_EV_QCC_NEW, qcc->conn);
+ return 0;
+
+ err:
+ TRACE_LEAVE(QMUX_EV_QCC_NEW, qcc->conn);
+ return 1;
+}
+
+/* Handle a new STREAM frame for stream with id <id>. Payload is pointed by
+ * <data> with length <len> and represents the offset <offset>. <fin> is set if
+ * the QUIC frame FIN bit is set.
+ *
+ * Returns 0 on success else non-zero. On error, the received frame should not
+ * be acknowledged.
+ */
+int qcc_recv(struct qcc *qcc, uint64_t id, uint64_t len, uint64_t offset,
+ char fin, char *data)
+{
+ struct qcs *qcs;
+ enum ncb_ret ret;
+
+ TRACE_ENTER(QMUX_EV_QCC_RECV, qcc->conn);
+
+ if (qcc->flags & QC_CF_ERRL) {
+ TRACE_DATA("connection on error", QMUX_EV_QCC_RECV, qcc->conn);
+ goto err;
+ }
+
+ /* RFC 9000 19.8. STREAM Frames
+ *
+ * An endpoint MUST terminate the connection with error
+ * STREAM_STATE_ERROR if it receives a STREAM frame for a locally
+ * initiated stream that has not yet been created, or for a send-only
+ * stream.
+ */
+ if (qcc_get_qcs(qcc, id, 1, 0, &qcs)) {
+ TRACE_DATA("qcs retrieval error", QMUX_EV_QCC_RECV, qcc->conn);
+ goto err;
+ }
+
+ if (!qcs) {
+ TRACE_DATA("already closed stream", QMUX_EV_QCC_RECV, qcc->conn);
+ goto out;
+ }
+
+ /* RFC 9000 4.5. Stream Final Size
+ *
+ * Once a final size for a stream is known, it cannot change. If a
+ * RESET_STREAM or STREAM frame is received indicating a change in the
+ * final size for the stream, an endpoint SHOULD respond with an error
+ * of type FINAL_SIZE_ERROR; see Section 11 for details on error
+ * handling.
+ */
+ if (qcs->flags & QC_SF_SIZE_KNOWN &&
+ (offset + len > qcs->rx.offset_max || (fin && offset + len < qcs->rx.offset_max))) {
+ TRACE_ERROR("final size error", QMUX_EV_QCC_RECV|QMUX_EV_QCS_RECV|QMUX_EV_PROTO_ERR, qcc->conn, qcs);
+ qcc_set_error(qcc, QC_ERR_FINAL_SIZE_ERROR, 0);
+ goto err;
+ }
+
+ if (qcs_is_close_remote(qcs)) {
+ TRACE_DATA("skipping STREAM for remotely closed", QMUX_EV_QCC_RECV, qcc->conn);
+ goto out;
+ }
+
+ if (offset + len < qcs->rx.offset ||
+ (offset + len == qcs->rx.offset && (!fin || (qcs->flags & QC_SF_SIZE_KNOWN)))) {
+ TRACE_DATA("already received offset", QMUX_EV_QCC_RECV|QMUX_EV_QCS_RECV, qcc->conn, qcs);
+ goto out;
+ }
+
+ TRACE_PROTO("receiving STREAM", QMUX_EV_QCC_RECV|QMUX_EV_QCS_RECV, qcc->conn, qcs);
+ qcs_idle_open(qcs);
+
+ if (offset + len > qcs->rx.offset_max) {
+ uint64_t diff = offset + len - qcs->rx.offset_max;
+ qcs->rx.offset_max = offset + len;
+ qcc->lfctl.offsets_recv += diff;
+
+ if (offset + len > qcs->rx.msd ||
+ qcc->lfctl.offsets_recv > qcc->lfctl.md) {
+ /* RFC 9000 4.1. Data Flow Control
+ *
+ * A receiver MUST close the connection with an error
+ * of type FLOW_CONTROL_ERROR if the sender violates
+ * the advertised connection or stream data limits
+ */
+ TRACE_ERROR("flow control error", QMUX_EV_QCC_RECV|QMUX_EV_QCS_RECV|QMUX_EV_PROTO_ERR,
+ qcc->conn, qcs);
+ qcc_set_error(qcc, QC_ERR_FLOW_CONTROL_ERROR, 0);
+ goto err;
+ }
+ }
+
+ if (!qcs_get_ncbuf(qcs, &qcs->rx.ncbuf) || ncb_is_null(&qcs->rx.ncbuf)) {
+ TRACE_ERROR("receive ncbuf alloc failure", QMUX_EV_QCC_RECV|QMUX_EV_QCS_RECV, qcc->conn, qcs);
+ qcc_set_error(qcc, QC_ERR_INTERNAL_ERROR, 0);
+ goto err;
+ }
+
+ TRACE_DATA("newly received offset", QMUX_EV_QCC_RECV|QMUX_EV_QCS_RECV, qcc->conn, qcs);
+ if (offset < qcs->rx.offset) {
+ size_t diff = qcs->rx.offset - offset;
+
+ len -= diff;
+ data += diff;
+ offset = qcs->rx.offset;
+ }
+
+ if (len) {
+ ret = ncb_add(&qcs->rx.ncbuf, offset - qcs->rx.offset, data, len, NCB_ADD_COMPARE);
+ switch (ret) {
+ case NCB_RET_OK:
+ break;
+
+ case NCB_RET_DATA_REJ:
+ /* RFC 9000 2.2. Sending and Receiving Data
+ *
+ * An endpoint could receive data for a stream at the
+ * same stream offset multiple times. Data that has
+ * already been received can be discarded. The data at
+ * a given offset MUST NOT change if it is sent
+ * multiple times; an endpoint MAY treat receipt of
+ * different data at the same offset within a stream as
+ * a connection error of type PROTOCOL_VIOLATION.
+ */
+ TRACE_ERROR("overlapping data rejected", QMUX_EV_QCC_RECV|QMUX_EV_QCS_RECV|QMUX_EV_PROTO_ERR,
+ qcc->conn, qcs);
+ qcc_set_error(qcc, QC_ERR_PROTOCOL_VIOLATION, 0);
+ return 1;
+
+ case NCB_RET_GAP_SIZE:
+ TRACE_DATA("cannot bufferize frame due to gap size limit", QMUX_EV_QCC_RECV|QMUX_EV_QCS_RECV,
+ qcc->conn, qcs);
+ return 1;
+ }
+ }
+
+ if (fin)
+ qcs->flags |= QC_SF_SIZE_KNOWN;
+
+ if (qcs->flags & QC_SF_SIZE_KNOWN &&
+ qcs->rx.offset_max == qcs->rx.offset + ncb_data(&qcs->rx.ncbuf, 0)) {
+ qcs_close_remote(qcs);
+ }
+
+ if ((ncb_data(&qcs->rx.ncbuf, 0) && !(qcs->flags & QC_SF_DEM_FULL)) || fin) {
+ qcc_decode_qcs(qcc, qcs);
+ qcc_refresh_timeout(qcc);
+ }
+
+ out:
+ TRACE_LEAVE(QMUX_EV_QCC_RECV, qcc->conn);
+ return 0;
+
+ err:
+ TRACE_LEAVE(QMUX_EV_QCC_RECV, qcc->conn);
+ return 1;
+}
+
+/* Handle a new MAX_DATA frame. <max> must contains the maximum data field of
+ * the frame.
+ *
+ * Returns 0 on success else non-zero.
+ */
+int qcc_recv_max_data(struct qcc *qcc, uint64_t max)
+{
+ TRACE_ENTER(QMUX_EV_QCC_RECV, qcc->conn);
+
+ TRACE_PROTO("receiving MAX_DATA", QMUX_EV_QCC_RECV, qcc->conn);
+ if (qcc->rfctl.md < max) {
+ qcc->rfctl.md = max;
+ TRACE_DATA("increase remote max-data", QMUX_EV_QCC_RECV, qcc->conn);
+
+ if (qcc->flags & QC_CF_BLK_MFCTL) {
+ qcc->flags &= ~QC_CF_BLK_MFCTL;
+ tasklet_wakeup(qcc->wait_event.tasklet);
+ }
+ }
+
+ TRACE_LEAVE(QMUX_EV_QCC_RECV, qcc->conn);
+ return 0;
+}
+
+/* Handle a new MAX_STREAM_DATA frame. <max> must contains the maximum data
+ * field of the frame and <id> is the identifier of the QUIC stream.
+ *
+ * Returns 0 on success else non-zero. On error, the received frame should not
+ * be acknowledged.
+ */
+int qcc_recv_max_stream_data(struct qcc *qcc, uint64_t id, uint64_t max)
+{
+ struct qcs *qcs;
+
+ TRACE_ENTER(QMUX_EV_QCC_RECV, qcc->conn);
+
+ if (qcc->flags & QC_CF_ERRL) {
+ TRACE_DATA("connection on error", QMUX_EV_QCC_RECV, qcc->conn);
+ goto err;
+ }
+
+ /* RFC 9000 19.10. MAX_STREAM_DATA Frames
+ *
+ * Receiving a MAX_STREAM_DATA frame for a locally
+ * initiated stream that has not yet been created MUST be treated as a
+ * connection error of type STREAM_STATE_ERROR. An endpoint that
+ * receives a MAX_STREAM_DATA frame for a receive-only stream MUST
+ * terminate the connection with error STREAM_STATE_ERROR.
+ */
+ if (qcc_get_qcs(qcc, id, 0, 1, &qcs))
+ goto err;
+
+ if (qcs) {
+ TRACE_PROTO("receiving MAX_STREAM_DATA", QMUX_EV_QCC_RECV|QMUX_EV_QCS_RECV, qcc->conn, qcs);
+ if (max > qcs->tx.msd) {
+ qcs->tx.msd = max;
+ TRACE_DATA("increase remote max-stream-data", QMUX_EV_QCC_RECV|QMUX_EV_QCS_RECV, qcc->conn, qcs);
+
+ if (qcs->flags & QC_SF_BLK_SFCTL) {
+ qcs->flags &= ~QC_SF_BLK_SFCTL;
+ /* TODO optim: only wakeup IO-CB if stream has data to sent. */
+ tasklet_wakeup(qcc->wait_event.tasklet);
+ }
+ }
+ }
+
+ if (qcc_may_expire(qcc) && !qcc->nb_hreq)
+ qcc_refresh_timeout(qcc);
+
+ TRACE_LEAVE(QMUX_EV_QCC_RECV, qcc->conn);
+ return 0;
+
+ err:
+ TRACE_DEVEL("leaving on error", QMUX_EV_QCC_RECV, qcc->conn);
+ return 1;
+}
+
+/* Handle a new RESET_STREAM frame from stream ID <id> with error code <err>
+ * and final stream size <final_size>.
+ *
+ * Returns 0 on success else non-zero. On error, the received frame should not
+ * be acknowledged.
+ */
+int qcc_recv_reset_stream(struct qcc *qcc, uint64_t id, uint64_t err, uint64_t final_size)
+{
+ struct qcs *qcs;
+
+ TRACE_ENTER(QMUX_EV_QCC_RECV, qcc->conn);
+
+ if (qcc->flags & QC_CF_ERRL) {
+ TRACE_DATA("connection on error", QMUX_EV_QCC_RECV, qcc->conn);
+ goto err;
+ }
+
+ /* RFC 9000 19.4. RESET_STREAM Frames
+ *
+ * An endpoint that receives a RESET_STREAM frame for a send-only stream
+ * MUST terminate the connection with error STREAM_STATE_ERROR.
+ */
+ if (qcc_get_qcs(qcc, id, 1, 0, &qcs)) {
+ TRACE_ERROR("RESET_STREAM for send-only stream received", QMUX_EV_QCC_RECV|QMUX_EV_QCS_RECV, qcc->conn, qcs);
+ goto err;
+ }
+
+ /* RFC 9000 3.2. Receiving Stream States
+ *
+ * A RESET_STREAM signal might be suppressed or withheld
+ * if stream data is completely received and is buffered to be read by
+ * the application. If the RESET_STREAM is suppressed, the receiving
+ * part of the stream remains in "Data Recvd".
+ */
+ if (!qcs || qcs_is_close_remote(qcs))
+ goto out;
+
+ TRACE_PROTO("receiving RESET_STREAM", QMUX_EV_QCC_RECV|QMUX_EV_QCS_RECV, qcc->conn, qcs);
+ qcs_idle_open(qcs);
+
+ /* Ensure stream closure is not forbidden by application protocol. */
+ if (qcc->app_ops->close) {
+ if (qcc->app_ops->close(qcs, QCC_APP_OPS_CLOSE_SIDE_RD)) {
+ TRACE_ERROR("closure rejected by app layer", QMUX_EV_QCC_RECV|QMUX_EV_QCS_RECV, qcc->conn, qcs);
+ goto out;
+ }
+ }
+
+ if (qcs->rx.offset_max > final_size ||
+ ((qcs->flags & QC_SF_SIZE_KNOWN) && qcs->rx.offset_max != final_size)) {
+ TRACE_ERROR("final size error on RESET_STREAM", QMUX_EV_QCC_RECV|QMUX_EV_QCS_RECV, qcc->conn, qcs);
+ qcc_set_error(qcc, QC_ERR_FINAL_SIZE_ERROR, 0);
+ goto err;
+ }
+
+ /* RFC 9000 3.2. Receiving Stream States
+ *
+ * An
+ * implementation MAY interrupt delivery of stream data, discard any
+ * data that was not consumed, and signal the receipt of the
+ * RESET_STREAM.
+ */
+ qcs->flags |= QC_SF_SIZE_KNOWN|QC_SF_RECV_RESET;
+ qcs_close_remote(qcs);
+ qcs_free_ncbuf(qcs, &qcs->rx.ncbuf);
+
+ out:
+ TRACE_LEAVE(QMUX_EV_QCC_RECV, qcc->conn);
+ return 0;
+
+ err:
+ TRACE_LEAVE(QMUX_EV_QCC_RECV, qcc->conn);
+ return 1;
+}
+
+/* Handle a new STOP_SENDING frame for stream ID <id>. The error code should be
+ * specified in <err>.
+ *
+ * Returns 0 on success else non-zero. On error, the received frame should not
+ * be acknowledged.
+ */
+int qcc_recv_stop_sending(struct qcc *qcc, uint64_t id, uint64_t err)
+{
+ struct qcs *qcs;
+
+ TRACE_ENTER(QMUX_EV_QCC_RECV, qcc->conn);
+
+ if (qcc->flags & QC_CF_ERRL) {
+ TRACE_DATA("connection on error", QMUX_EV_QCC_RECV, qcc->conn);
+ goto err;
+ }
+
+ /* RFC 9000 19.5. STOP_SENDING Frames
+ *
+ * Receiving a STOP_SENDING frame for a
+ * locally initiated stream that has not yet been created MUST be
+ * treated as a connection error of type STREAM_STATE_ERROR. An
+ * endpoint that receives a STOP_SENDING frame for a receive-only stream
+ * MUST terminate the connection with error STREAM_STATE_ERROR.
+ */
+ if (qcc_get_qcs(qcc, id, 0, 1, &qcs))
+ goto err;
+
+ if (!qcs)
+ goto out;
+
+ TRACE_PROTO("receiving STOP_SENDING", QMUX_EV_QCC_RECV|QMUX_EV_QCS_RECV, qcc->conn, qcs);
+
+ /* RFC 9000 3.5. Solicited State Transitions
+ *
+ * An endpoint is expected to send another STOP_SENDING frame if a
+ * packet containing a previous STOP_SENDING is lost. However, once
+ * either all stream data or a RESET_STREAM frame has been received for
+ * the stream -- that is, the stream is in any state other than "Recv"
+ * or "Size Known" -- sending a STOP_SENDING frame is unnecessary.
+ */
+
+ /* TODO thanks to previous RFC clause, STOP_SENDING is ignored if current stream
+ * has already been closed locally. This is useful to not emit multiple
+ * RESET_STREAM for a single stream. This is functional if stream is
+ * locally closed due to all data transmitted, but in this case the RFC
+ * advices to use an explicit RESET_STREAM.
+ */
+ if (qcs_is_close_local(qcs)) {
+ TRACE_STATE("ignoring STOP_SENDING", QMUX_EV_QCC_RECV|QMUX_EV_QCS_RECV, qcc->conn, qcs);
+ goto out;
+ }
+
+ qcs_idle_open(qcs);
+
+ if (qcc->app_ops->close) {
+ if (qcc->app_ops->close(qcs, QCC_APP_OPS_CLOSE_SIDE_WR)) {
+ TRACE_ERROR("closure rejected by app layer", QMUX_EV_QCC_RECV|QMUX_EV_QCS_RECV, qcc->conn, qcs);
+ goto out;
+ }
+ }
+
+ /* If FIN already reached, future RESET_STREAMS will be ignored.
+ * Manually set EOS in this case.
+ */
+ if (qcs_sc(qcs) && se_fl_test(qcs->sd, SE_FL_EOI)) {
+ se_fl_set(qcs->sd, SE_FL_EOS);
+ qcs_alert(qcs);
+ }
+
+ /* RFC 9000 3.5. Solicited State Transitions
+ *
+ * An endpoint that receives a STOP_SENDING frame
+ * MUST send a RESET_STREAM frame if the stream is in the "Ready" or
+ * "Send" state. If the stream is in the "Data Sent" state, the
+ * endpoint MAY defer sending the RESET_STREAM frame until the packets
+ * containing outstanding data are acknowledged or declared lost. If
+ * any outstanding data is declared lost, the endpoint SHOULD send a
+ * RESET_STREAM frame instead of retransmitting the data.
+ *
+ * An endpoint SHOULD copy the error code from the STOP_SENDING frame to
+ * the RESET_STREAM frame it sends, but it can use any application error
+ * code.
+ */
+ qcc_reset_stream(qcs, err);
+
+ if (qcc_may_expire(qcc) && !qcc->nb_hreq)
+ qcc_refresh_timeout(qcc);
+
+ out:
+ TRACE_LEAVE(QMUX_EV_QCC_RECV, qcc->conn);
+ return 0;
+
+ err:
+ TRACE_DEVEL("leaving on error", QMUX_EV_QCC_RECV, qcc->conn);
+ return 1;
+}
+
+/* Signal the closing of remote stream with id <id>. Flow-control for new
+ * streams may be allocated for the peer if needed.
+ */
+static int qcc_release_remote_stream(struct qcc *qcc, uint64_t id)
+{
+ struct quic_frame *frm;
+
+ TRACE_ENTER(QMUX_EV_QCS_END, qcc->conn);
+
+ if (quic_stream_is_bidi(id)) {
+ ++qcc->lfctl.cl_bidi_r;
+ if (qcc->lfctl.cl_bidi_r > qcc->lfctl.ms_bidi_init / 2) {
+ TRACE_DATA("increase max stream limit with MAX_STREAMS_BIDI", QMUX_EV_QCC_SEND, qcc->conn);
+ frm = qc_frm_alloc(QUIC_FT_MAX_STREAMS_BIDI);
+ if (!frm) {
+ qcc_set_error(qcc, QC_ERR_INTERNAL_ERROR, 0);
+ goto err;
+ }
+
+ frm->max_streams_bidi.max_streams = qcc->lfctl.ms_bidi +
+ qcc->lfctl.cl_bidi_r;
+ LIST_APPEND(&qcc->lfctl.frms, &frm->list);
+ tasklet_wakeup(qcc->wait_event.tasklet);
+
+ qcc->lfctl.ms_bidi += qcc->lfctl.cl_bidi_r;
+ qcc->lfctl.cl_bidi_r = 0;
+ }
+ }
+ else {
+ /* TODO unidirectional stream flow control with MAX_STREAMS_UNI
+ * emission not implemented. It should be unnecessary for
+ * HTTP/3 but may be required if other application protocols
+ * are supported.
+ */
+ }
+
+ TRACE_LEAVE(QMUX_EV_QCS_END, qcc->conn);
+
+ return 0;
+
+ err:
+ TRACE_DEVEL("leaving on error", QMUX_EV_QCS_END, qcc->conn);
+ return 1;
+}
+
+/* detaches the QUIC stream from its QCC and releases it to the QCS pool. */
+static void qcs_destroy(struct qcs *qcs)
+{
+ struct qcc *qcc = qcs->qcc;
+ struct connection *conn = qcc->conn;
+ const uint64_t id = qcs->id;
+
+ TRACE_ENTER(QMUX_EV_QCS_END, conn, qcs);
+
+ /* MUST not removed a stream with sending prepared data left. This is
+ * to ensure consistency on connection flow-control calculation.
+ */
+ BUG_ON(qcs->tx.offset < qcs->tx.sent_offset);
+
+ if (!(qcc->flags & QC_CF_ERRL)) {
+ if (quic_stream_is_remote(qcc, id))
+ qcc_release_remote_stream(qcc, id);
+ }
+
+ qcs_free(qcs);
+
+ TRACE_LEAVE(QMUX_EV_QCS_END, conn);
+}
+
+/* Transfer as much as possible data on <qcs> from <in> to <out>. This is done
+ * in respect with available flow-control at stream and connection level.
+ *
+ * Returns the total bytes of transferred data or a negative error code.
+ */
+static int qcs_xfer_data(struct qcs *qcs, struct buffer *out, struct buffer *in)
+{
+ struct qcc *qcc = qcs->qcc;
+ int left, to_xfer;
+ int total = 0;
+
+ TRACE_ENTER(QMUX_EV_QCS_SEND, qcc->conn, qcs);
+
+ if (!qcs_get_buf(qcs, out)) {
+ TRACE_ERROR("buffer alloc failure", QMUX_EV_QCS_SEND, qcc->conn, qcs);
+ goto err;
+ }
+
+ /*
+ * QCS out buffer diagram
+ * head left to_xfer
+ * -------------> ----------> ----->
+ * --------------------------------------------------
+ * |...............|xxxxxxxxxxx|<<<<<
+ * --------------------------------------------------
+ * ^ ack-off ^ sent-off ^ off
+ *
+ * STREAM frame
+ * ^ ^
+ * |xxxxxxxxxxxxxxxxx|
+ */
+
+ BUG_ON_HOT(qcs->tx.sent_offset < qcs->stream->ack_offset);
+ BUG_ON_HOT(qcs->tx.offset < qcs->tx.sent_offset);
+ BUG_ON_HOT(qcc->tx.offsets < qcc->tx.sent_offsets);
+
+ left = qcs->tx.offset - qcs->tx.sent_offset;
+ to_xfer = QUIC_MIN(b_data(in), b_room(out));
+
+ BUG_ON_HOT(qcs->tx.offset > qcs->tx.msd);
+ /* do not exceed flow control limit */
+ if (qcs->tx.offset + to_xfer > qcs->tx.msd) {
+ TRACE_DATA("do not exceed stream flow control", QMUX_EV_QCS_SEND, qcc->conn, qcs);
+ to_xfer = qcs->tx.msd - qcs->tx.offset;
+ }
+
+ BUG_ON_HOT(qcc->tx.offsets > qcc->rfctl.md);
+ /* do not overcome flow control limit on connection */
+ if (qcc->tx.offsets + to_xfer > qcc->rfctl.md) {
+ TRACE_DATA("do not exceed conn flow control", QMUX_EV_QCS_SEND, qcc->conn, qcs);
+ to_xfer = qcc->rfctl.md - qcc->tx.offsets;
+ }
+
+ if (!left && !to_xfer)
+ goto out;
+
+ total = b_force_xfer(out, in, to_xfer);
+
+ out:
+ {
+ struct qcs_xfer_data_trace_arg arg = {
+ .prep = b_data(out), .xfer = total,
+ };
+ TRACE_LEAVE(QMUX_EV_QCS_SEND|QMUX_EV_QCS_XFER_DATA,
+ qcc->conn, qcs, &arg);
+ }
+
+ return total;
+
+ err:
+ TRACE_DEVEL("leaving on error", QMUX_EV_QCS_SEND, qcc->conn, qcs);
+ return -1;
+}
+
+/* Prepare a STREAM frame for <qcs> instance using <out> as payload. The frame
+ * is appended in <frm_list>. Set <fin> if this is supposed to be the last
+ * stream frame. If <out> is NULL an empty STREAM frame is built : this may be
+ * useful if FIN needs to be sent without any data left.
+ *
+ * Returns the payload length of the STREAM frame or a negative error code.
+ */
+static int qcs_build_stream_frm(struct qcs *qcs, struct buffer *out, char fin,
+ struct list *frm_list)
+{
+ struct qcc *qcc = qcs->qcc;
+ struct quic_frame *frm;
+ int head, total;
+ uint64_t base_off;
+
+ TRACE_ENTER(QMUX_EV_QCS_SEND, qcc->conn, qcs);
+
+ /* if ack_offset < buf_offset, it points to an older buffer. */
+ base_off = MAX(qcs->stream->buf_offset, qcs->stream->ack_offset);
+ BUG_ON(qcs->tx.sent_offset < base_off);
+
+ head = qcs->tx.sent_offset - base_off;
+ total = out ? b_data(out) - head : 0;
+ BUG_ON(total < 0);
+
+ if (!total && !fin) {
+ /* No need to send anything if total is NULL and no FIN to signal. */
+ TRACE_LEAVE(QMUX_EV_QCS_SEND, qcc->conn, qcs);
+ return 0;
+ }
+ BUG_ON((!total && qcs->tx.sent_offset > qcs->tx.offset) ||
+ (total && qcs->tx.sent_offset >= qcs->tx.offset));
+ BUG_ON(qcs->tx.sent_offset + total > qcs->tx.offset);
+ BUG_ON(qcc->tx.sent_offsets + total > qcc->rfctl.md);
+
+ TRACE_PROTO("sending STREAM frame", QMUX_EV_QCS_SEND, qcc->conn, qcs);
+ frm = qc_frm_alloc(QUIC_FT_STREAM_8);
+ if (!frm) {
+ TRACE_ERROR("frame alloc failure", QMUX_EV_QCS_SEND, qcc->conn, qcs);
+ goto err;
+ }
+
+ frm->stream.stream = qcs->stream;
+ frm->stream.id = qcs->id;
+ frm->stream.offset.key = 0;
+ frm->stream.dup = 0;
+
+ if (total) {
+ frm->stream.buf = out;
+ frm->stream.data = (unsigned char *)b_peek(out, head);
+ }
+ else {
+ /* Empty STREAM frame. */
+ frm->stream.buf = NULL;
+ frm->stream.data = NULL;
+ }
+
+ /* FIN is positioned only when the buffer has been totally emptied. */
+ if (fin)
+ frm->type |= QUIC_STREAM_FRAME_TYPE_FIN_BIT;
+
+ if (qcs->tx.sent_offset) {
+ frm->type |= QUIC_STREAM_FRAME_TYPE_OFF_BIT;
+ frm->stream.offset.key = qcs->tx.sent_offset;
+ }
+
+ /* Always set length bit as we do not know if there is remaining frames
+ * in the final packet after this STREAM.
+ */
+ frm->type |= QUIC_STREAM_FRAME_TYPE_LEN_BIT;
+ frm->stream.len = total;
+
+ LIST_APPEND(frm_list, &frm->list);
+
+ out:
+ {
+ struct qcs_build_stream_trace_arg arg = {
+ .len = frm->stream.len, .fin = fin,
+ .offset = frm->stream.offset.key,
+ };
+ TRACE_LEAVE(QMUX_EV_QCS_SEND|QMUX_EV_QCS_BUILD_STRM,
+ qcc->conn, qcs, &arg);
+ }
+
+ return total;
+
+ err:
+ TRACE_LEAVE(QMUX_EV_QCS_SEND, qcc->conn, qcs);
+ return -1;
+}
+
+/* Check after transferring data from qcs.tx.buf if FIN must be set on the next
+ * STREAM frame for <qcs>.
+ *
+ * Returns true if FIN must be set else false.
+ */
+static int qcs_stream_fin(struct qcs *qcs)
+{
+ return qcs->flags & QC_SF_FIN_STREAM && !b_data(&qcs->tx.buf);
+}
+
+/* Return true if <qcs> has data to send in new STREAM frames. */
+static forceinline int qcs_need_sending(struct qcs *qcs)
+{
+ return b_data(&qcs->tx.buf) || qcs->tx.sent_offset < qcs->tx.offset ||
+ qcs_stream_fin(qcs);
+}
+
+/* This function must be called by the upper layer to inform about the sending
+ * of a STREAM frame for <qcs> instance. The frame is of <data> length and on
+ * <offset>.
+ */
+void qcc_streams_sent_done(struct qcs *qcs, uint64_t data, uint64_t offset)
+{
+ struct qcc *qcc = qcs->qcc;
+ uint64_t diff;
+
+ TRACE_ENTER(QMUX_EV_QCS_SEND, qcc->conn, qcs);
+
+ BUG_ON(offset > qcs->tx.sent_offset);
+ BUG_ON(offset + data > qcs->tx.offset);
+
+ /* check if the STREAM frame has already been notified. It can happen
+ * for retransmission.
+ */
+ if (offset + data < qcs->tx.sent_offset) {
+ TRACE_DEVEL("offset already notified", QMUX_EV_QCS_SEND, qcc->conn, qcs);
+ goto out;
+ }
+
+ qcs_idle_open(qcs);
+
+ diff = offset + data - qcs->tx.sent_offset;
+ if (diff) {
+ /* increase offset sum on connection */
+ qcc->tx.sent_offsets += diff;
+ BUG_ON_HOT(qcc->tx.sent_offsets > qcc->rfctl.md);
+ if (qcc->tx.sent_offsets == qcc->rfctl.md) {
+ qcc->flags |= QC_CF_BLK_MFCTL;
+ TRACE_STATE("connection flow-control reached", QMUX_EV_QCS_SEND, qcc->conn);
+ }
+
+ /* increase offset on stream */
+ qcs->tx.sent_offset += diff;
+ BUG_ON_HOT(qcs->tx.sent_offset > qcs->tx.msd);
+ BUG_ON_HOT(qcs->tx.sent_offset > qcs->tx.offset);
+ if (qcs->tx.sent_offset == qcs->tx.msd) {
+ qcs->flags |= QC_SF_BLK_SFCTL;
+ TRACE_STATE("stream flow-control reached", QMUX_EV_QCS_SEND, qcc->conn, qcs);
+ }
+
+ /* If qcs.stream.buf is full, release it to the lower layer. */
+ if (qcs->tx.offset == qcs->tx.sent_offset &&
+ b_full(&qcs->stream->buf->buf)) {
+ qc_stream_buf_release(qcs->stream);
+ }
+
+ /* Add measurement for send rate. This is done at the MUX layer
+ * to account only for STREAM frames without retransmission.
+ */
+ increment_send_rate(diff, 0);
+ }
+
+ if (qcs->tx.offset == qcs->tx.sent_offset && !b_data(&qcs->tx.buf)) {
+ /* Remove stream from send_list if all was sent. */
+ LIST_DEL_INIT(&qcs->el_send);
+ TRACE_STATE("stream sent done", QMUX_EV_QCS_SEND, qcc->conn, qcs);
+
+ if (qcs->flags & (QC_SF_FIN_STREAM|QC_SF_DETACH)) {
+ /* Close stream locally. */
+ qcs_close_local(qcs);
+ /* Reset flag to not emit multiple FIN STREAM frames. */
+ qcs->flags &= ~QC_SF_FIN_STREAM;
+ }
+ }
+
+ out:
+ TRACE_LEAVE(QMUX_EV_QCS_SEND, qcc->conn, qcs);
+}
+
+/* Returns true if subscribe set, false otherwise. */
+static int qcc_subscribe_send(struct qcc *qcc)
+{
+ struct connection *conn = qcc->conn;
+
+ /* Do not subscribe if lower layer in error. */
+ if (conn->flags & CO_FL_ERROR)
+ return 0;
+
+ if (qcc->wait_event.events & SUB_RETRY_SEND)
+ return 1;
+
+ TRACE_DEVEL("subscribe for send", QMUX_EV_QCC_SEND, qcc->conn);
+ conn->xprt->subscribe(conn, conn->xprt_ctx, SUB_RETRY_SEND, &qcc->wait_event);
+ return 1;
+}
+
+/* Wrapper for send on transport layer. Send a list of frames <frms> for the
+ * connection <qcc>.
+ *
+ * Returns 0 if all data sent with success else non-zero.
+ */
+static int qcc_send_frames(struct qcc *qcc, struct list *frms)
+{
+ TRACE_ENTER(QMUX_EV_QCC_SEND, qcc->conn);
+
+ if (LIST_ISEMPTY(frms)) {
+ TRACE_DEVEL("no frames to send", QMUX_EV_QCC_SEND, qcc->conn);
+ goto err;
+ }
+
+ if (!qc_send_mux(qcc->conn->handle.qc, frms)) {
+ TRACE_DEVEL("error on sending", QMUX_EV_QCC_SEND, qcc->conn);
+ qcc_subscribe_send(qcc);
+ goto err;
+ }
+
+ /* If there is frames left at this stage, transport layer is blocked.
+ * Subscribe on it to retry later.
+ */
+ if (!LIST_ISEMPTY(frms)) {
+ TRACE_DEVEL("remaining frames to send", QMUX_EV_QCC_SEND, qcc->conn);
+ qcc_subscribe_send(qcc);
+ goto err;
+ }
+
+ TRACE_LEAVE(QMUX_EV_QCC_SEND, qcc->conn);
+ return 0;
+
+ err:
+ TRACE_DEVEL("leaving on error", QMUX_EV_QCC_SEND, qcc->conn);
+ return 1;
+}
+
+/* Emit a RESET_STREAM on <qcs>.
+ *
+ * Returns 0 if the frame has been successfully sent else non-zero.
+ */
+static int qcs_send_reset(struct qcs *qcs)
+{
+ struct list frms = LIST_HEAD_INIT(frms);
+ struct quic_frame *frm;
+
+ TRACE_ENTER(QMUX_EV_QCS_SEND, qcs->qcc->conn, qcs);
+
+ frm = qc_frm_alloc(QUIC_FT_RESET_STREAM);
+ if (!frm) {
+ TRACE_LEAVE(QMUX_EV_QCS_SEND, qcs->qcc->conn, qcs);
+ return 1;
+ }
+
+ frm->reset_stream.id = qcs->id;
+ frm->reset_stream.app_error_code = qcs->err;
+ frm->reset_stream.final_size = qcs->tx.sent_offset;
+
+ LIST_APPEND(&frms, &frm->list);
+ if (qcc_send_frames(qcs->qcc, &frms)) {
+ if (!LIST_ISEMPTY(&frms))
+ qc_frm_free(qcs->qcc->conn->handle.qc, &frm);
+ TRACE_DEVEL("cannot send RESET_STREAM", QMUX_EV_QCS_SEND, qcs->qcc->conn, qcs);
+ return 1;
+ }
+
+ qcs_close_local(qcs);
+ qcs->flags &= ~QC_SF_TO_RESET;
+
+ TRACE_LEAVE(QMUX_EV_QCS_SEND, qcs->qcc->conn, qcs);
+ return 0;
+}
+
+/* Emit a STOP_SENDING on <qcs>.
+ *
+ * Returns 0 if the frame has been successfully sent else non-zero.
+ */
+static int qcs_send_stop_sending(struct qcs *qcs)
+{
+ struct list frms = LIST_HEAD_INIT(frms);
+ struct quic_frame *frm;
+ struct qcc *qcc = qcs->qcc;
+
+ TRACE_ENTER(QMUX_EV_QCS_SEND, qcs->qcc->conn, qcs);
+
+ /* RFC 9000 3.3. Permitted Frame Types
+ *
+ * A
+ * receiver MAY send a STOP_SENDING frame in any state where it has not
+ * received a RESET_STREAM frame -- that is, states other than "Reset
+ * Recvd" or "Reset Read". However, there is little value in sending a
+ * STOP_SENDING frame in the "Data Recvd" state, as all stream data has
+ * been received. A sender could receive either of these two types of
+ * frames in any state as a result of delayed delivery of packets.¶
+ */
+ if (qcs_is_close_remote(qcs)) {
+ TRACE_STATE("skip STOP_SENDING on remote already closed", QMUX_EV_QCS_SEND, qcc->conn, qcs);
+ goto done;
+ }
+
+ frm = qc_frm_alloc(QUIC_FT_STOP_SENDING);
+ if (!frm) {
+ TRACE_LEAVE(QMUX_EV_QCS_SEND, qcs->qcc->conn, qcs);
+ return 1;
+ }
+
+ frm->stop_sending.id = qcs->id;
+ frm->stop_sending.app_error_code = qcs->err;
+
+ LIST_APPEND(&frms, &frm->list);
+ if (qcc_send_frames(qcs->qcc, &frms)) {
+ if (!LIST_ISEMPTY(&frms))
+ qc_frm_free(qcc->conn->handle.qc, &frm);
+ TRACE_DEVEL("cannot send STOP_SENDING", QMUX_EV_QCS_SEND, qcs->qcc->conn, qcs);
+ return 1;
+ }
+
+ done:
+ qcs->flags &= ~QC_SF_TO_STOP_SENDING;
+
+ TRACE_LEAVE(QMUX_EV_QCS_SEND, qcs->qcc->conn, qcs);
+ return 0;
+}
+
+/* Used internally by qcc_io_send function. Proceed to send for <qcs>. This will
+ * transfer data from qcs buffer to its quic_stream counterpart. A STREAM frame
+ * is then generated and inserted in <frms> list.
+ *
+ * Returns the total bytes transferred between qcs and quic_stream buffers. Can
+ * be null if out buffer cannot be allocated. On error a negative error code is
+ * used.
+ */
+static int qcs_send(struct qcs *qcs, struct list *frms)
+{
+ struct qcc *qcc = qcs->qcc;
+ struct buffer *buf = &qcs->tx.buf;
+ struct buffer *out = qc_stream_buf_get(qcs->stream);
+ int xfer = 0, buf_avail;
+ char fin = 0;
+
+ TRACE_ENTER(QMUX_EV_QCS_SEND, qcc->conn, qcs);
+
+ /* Cannot send STREAM on remote unidirectional streams. */
+ BUG_ON(quic_stream_is_uni(qcs->id) && quic_stream_is_remote(qcc, qcs->id));
+
+ if (b_data(buf)) {
+ /* Allocate <out> buffer if not already done. */
+ if (!out) {
+ if (qcc->flags & QC_CF_CONN_FULL)
+ goto out;
+
+ out = qc_stream_buf_alloc(qcs->stream, qcs->tx.offset,
+ &buf_avail);
+ if (!out) {
+ if (buf_avail) {
+ TRACE_ERROR("stream desc alloc failure", QMUX_EV_QCS_SEND, qcc->conn, qcs);
+ goto err;
+ }
+
+ TRACE_STATE("hitting stream desc buffer limit", QMUX_EV_QCS_SEND, qcc->conn, qcs);
+ qcc->flags |= QC_CF_CONN_FULL;
+ goto out;
+ }
+ }
+
+ /* Transfer data from <buf> to <out>. */
+ xfer = qcs_xfer_data(qcs, out, buf);
+ if (xfer < 0)
+ goto err;
+
+ if (xfer > 0) {
+ qcs_notify_send(qcs);
+ qcs->flags &= ~QC_SF_BLK_MROOM;
+ }
+
+ qcs->tx.offset += xfer;
+ BUG_ON_HOT(qcs->tx.offset > qcs->tx.msd);
+ qcc->tx.offsets += xfer;
+ BUG_ON_HOT(qcc->tx.offsets > qcc->rfctl.md);
+
+ /* out buffer cannot be emptied if qcs offsets differ. */
+ BUG_ON(!b_data(out) && qcs->tx.sent_offset != qcs->tx.offset);
+ }
+
+ /* FIN is set if all incoming data were transferred. */
+ fin = qcs_stream_fin(qcs);
+
+ /* Build a new STREAM frame with <out> buffer. */
+ if (qcs->tx.sent_offset != qcs->tx.offset || fin) {
+ /* Skip STREAM frame allocation if already subscribed for send.
+ * Happens on sendto transient error or network congestion.
+ */
+ if (qcc->wait_event.events & SUB_RETRY_SEND) {
+ TRACE_DEVEL("already subscribed for sending",
+ QMUX_EV_QCS_SEND, qcc->conn, qcs);
+ goto err;
+ }
+
+ if (qcs_build_stream_frm(qcs, out, fin, frms) < 0)
+ goto err;
+ }
+
+ out:
+ TRACE_LEAVE(QMUX_EV_QCS_SEND, qcc->conn, qcs);
+ return xfer;
+
+ err:
+ TRACE_DEVEL("leaving on error", QMUX_EV_QCS_SEND, qcc->conn, qcs);
+ return -1;
+}
+
+/* Proceed to sending. Loop through all available streams for the <qcc>
+ * instance and try to send as much as possible.
+ *
+ * Returns the total of bytes sent to the transport layer.
+ */
+static int qcc_io_send(struct qcc *qcc)
+{
+ struct list frms = LIST_HEAD_INIT(frms);
+ /* Temporary list for QCS on error. */
+ struct list qcs_failed = LIST_HEAD_INIT(qcs_failed);
+ struct qcs *qcs, *qcs_tmp, *first_qcs = NULL;
+ int ret, total = 0;
+
+ TRACE_ENTER(QMUX_EV_QCC_SEND, qcc->conn);
+
+ /* TODO if socket in transient error, sending should be temporarily
+ * disabled for all frames. However, checking for send subscription is
+ * not valid as this may be caused by a congestion error which only
+ * apply for STREAM frames.
+ */
+
+ /* Check for transport error. */
+ if (qcc->flags & QC_CF_ERR_CONN || qcc->conn->flags & CO_FL_ERROR) {
+ TRACE_DEVEL("connection on error", QMUX_EV_QCC_SEND, qcc->conn);
+ goto out;
+ }
+
+ /* Check for locally detected connection error. */
+ if (qcc->flags & QC_CF_ERRL) {
+ /* Prepare a CONNECTION_CLOSE if not already done. */
+ if (!(qcc->flags & QC_CF_ERRL_DONE)) {
+ TRACE_DATA("report a connection error", QMUX_EV_QCC_SEND|QMUX_EV_QCC_ERR, qcc->conn);
+ quic_set_connection_close(qcc->conn->handle.qc, qcc->err);
+ qcc->flags |= QC_CF_ERRL_DONE;
+ }
+ goto out;
+ }
+
+ if (qcc->conn->flags & CO_FL_SOCK_WR_SH) {
+ qcc->conn->flags |= CO_FL_ERROR;
+ TRACE_DEVEL("connection on error", QMUX_EV_QCC_SEND, qcc->conn);
+ goto out;
+ }
+
+ if (!LIST_ISEMPTY(&qcc->lfctl.frms)) {
+ if (qcc_send_frames(qcc, &qcc->lfctl.frms)) {
+ TRACE_DEVEL("flow-control frames rejected by transport, aborting send", QMUX_EV_QCC_SEND, qcc->conn);
+ goto out;
+ }
+ }
+
+ /* Send STREAM/STOP_SENDING/RESET_STREAM data for registered streams. */
+ list_for_each_entry_safe(qcs, qcs_tmp, &qcc->send_list, el_send) {
+ /* Check if all QCS were processed. */
+ if (qcs == first_qcs)
+ break;
+
+ /* Stream must not be present in send_list if it has nothing to send. */
+ BUG_ON(!(qcs->flags & (QC_SF_TO_STOP_SENDING|QC_SF_TO_RESET)) &&
+ !qcs_need_sending(qcs));
+
+ /* Each STOP_SENDING/RESET_STREAM frame is sent individually to
+ * guarantee its emission.
+ *
+ * TODO multiplex several frames in same datagram to optimize sending
+ */
+ if (qcs->flags & QC_SF_TO_STOP_SENDING) {
+ if (qcs_send_stop_sending(qcs))
+ goto sent_done;
+
+ /* Remove stream from send_list if it had only STOP_SENDING
+ * to send.
+ */
+ if (!(qcs->flags & QC_SF_TO_RESET) && !qcs_need_sending(qcs)) {
+ LIST_DEL_INIT(&qcs->el_send);
+ continue;
+ }
+ }
+
+ if (qcs->flags & QC_SF_TO_RESET) {
+ if (qcs_send_reset(qcs))
+ goto sent_done;
+
+ /* RFC 9000 3.3. Permitted Frame Types
+ *
+ * A sender MUST NOT send
+ * a STREAM or STREAM_DATA_BLOCKED frame for a stream in the
+ * "Reset Sent" state or any terminal state -- that is, after
+ * sending a RESET_STREAM frame.
+ */
+ LIST_DEL_INIT(&qcs->el_send);
+ continue;
+ }
+
+ if (!(qcc->flags & QC_CF_BLK_MFCTL) &&
+ !(qcs->flags & QC_SF_BLK_SFCTL)) {
+ if ((ret = qcs_send(qcs, &frms)) < 0) {
+ /* Temporarily remove QCS from send-list. */
+ LIST_DEL_INIT(&qcs->el_send);
+ LIST_APPEND(&qcs_failed, &qcs->el_send);
+ continue;
+ }
+
+ total += ret;
+ if (ret) {
+ /* Move QCS with some bytes transferred at the
+ * end of send-list for next iterations.
+ */
+ LIST_DEL_INIT(&qcs->el_send);
+ LIST_APPEND(&qcc->send_list, &qcs->el_send);
+ /* Remember first moved QCS as checkpoint to interrupt loop */
+ if (!first_qcs)
+ first_qcs = qcs;
+ }
+ }
+ }
+
+ /* Retry sending until no frame to send, data rejected or connection
+ * flow-control limit reached.
+ */
+ while (qcc_send_frames(qcc, &frms) == 0 && !(qcc->flags & QC_CF_BLK_MFCTL)) {
+ /* Reloop over <qcc.send_list>. Useful for streams which have
+ * fulfilled their qc_stream_desc buf and have now release it.
+ */
+ list_for_each_entry_safe(qcs, qcs_tmp, &qcc->send_list, el_send) {
+ /* Only streams blocked on flow-control or waiting on a
+ * new qc_stream_desc should be present in send_list as
+ * long as transport layer can handle all data.
+ */
+ BUG_ON(qcs->stream->buf && !(qcs->flags & QC_SF_BLK_SFCTL));
+
+ if (!(qcs->flags & QC_SF_BLK_SFCTL)) {
+ if ((ret = qcs_send(qcs, &frms)) < 0) {
+ LIST_DEL_INIT(&qcs->el_send);
+ LIST_APPEND(&qcs_failed, &qcs->el_send);
+ continue;
+ }
+
+ total += ret;
+ }
+ }
+ }
+
+ sent_done:
+ /* Deallocate frames that the transport layer has rejected. */
+ if (!LIST_ISEMPTY(&frms)) {
+ struct quic_frame *frm, *frm2;
+
+ list_for_each_entry_safe(frm, frm2, &frms, list)
+ qc_frm_free(qcc->conn->handle.qc, &frm);
+ }
+
+ /* Re-insert on-error QCS at the end of the send-list. */
+ if (!LIST_ISEMPTY(&qcs_failed)) {
+ list_for_each_entry_safe(qcs, qcs_tmp, &qcs_failed, el_send) {
+ LIST_DEL_INIT(&qcs->el_send);
+ LIST_APPEND(&qcc->send_list, &qcs->el_send);
+ }
+
+ if (!(qcc->flags & QC_CF_BLK_MFCTL))
+ tasklet_wakeup(qcc->wait_event.tasklet);
+ }
+
+ out:
+ if (qcc->conn->flags & CO_FL_ERROR && !(qcc->flags & QC_CF_ERR_CONN)) {
+ TRACE_ERROR("error reported by transport layer",
+ QMUX_EV_QCC_SEND, qcc->conn);
+ qcc->flags |= QC_CF_ERR_CONN;
+ }
+
+ TRACE_LEAVE(QMUX_EV_QCC_SEND, qcc->conn);
+ return total;
+}
+
+/* Proceed on receiving. Loop through all streams from <qcc> and use decode_qcs
+ * operation.
+ *
+ * Returns 0 on success else non-zero.
+ */
+static int qcc_io_recv(struct qcc *qcc)
+{
+ struct eb64_node *node;
+ struct qcs *qcs;
+
+ TRACE_ENTER(QMUX_EV_QCC_RECV, qcc->conn);
+
+ if (qcc->flags & QC_CF_ERRL) {
+ TRACE_DATA("connection on error", QMUX_EV_QCC_RECV, qcc->conn);
+ TRACE_LEAVE(QMUX_EV_QCC_RECV, qcc->conn);
+ return 0;
+ }
+
+ node = eb64_first(&qcc->streams_by_id);
+ while (node) {
+ uint64_t id;
+
+ qcs = eb64_entry(node, struct qcs, by_id);
+ id = qcs->id;
+
+ if (!ncb_data(&qcs->rx.ncbuf, 0) || (qcs->flags & QC_SF_DEM_FULL)) {
+ node = eb64_next(node);
+ continue;
+ }
+
+ if (quic_stream_is_uni(id) && quic_stream_is_local(qcc, id)) {
+ node = eb64_next(node);
+ continue;
+ }
+
+ qcc_decode_qcs(qcc, qcs);
+ node = eb64_next(node);
+ }
+
+ TRACE_LEAVE(QMUX_EV_QCC_RECV, qcc->conn);
+ return 0;
+}
+
+
+/* Release all streams which have their transfer operation achieved.
+ *
+ * Returns true if at least one stream is released.
+ */
+static int qcc_purge_streams(struct qcc *qcc)
+{
+ struct eb64_node *node;
+ int release = 0;
+
+ TRACE_ENTER(QMUX_EV_QCC_WAKE, qcc->conn);
+
+ node = eb64_first(&qcc->streams_by_id);
+ while (node) {
+ struct qcs *qcs = eb64_entry(node, struct qcs, by_id);
+ node = eb64_next(node);
+
+ /* Release not attached closed streams. */
+ if (qcs->st == QC_SS_CLO && !qcs_sc(qcs)) {
+ TRACE_STATE("purging closed stream", QMUX_EV_QCC_WAKE, qcs->qcc->conn, qcs);
+ qcs_destroy(qcs);
+ release = 1;
+ continue;
+ }
+
+ /* Release detached streams with empty buffer. */
+ if (qcs->flags & QC_SF_DETACH) {
+ if (qcs_is_close_local(qcs)) {
+ TRACE_STATE("purging detached stream", QMUX_EV_QCC_WAKE, qcs->qcc->conn, qcs);
+ qcs_destroy(qcs);
+ release = 1;
+ continue;
+ }
+ }
+ }
+
+ TRACE_LEAVE(QMUX_EV_QCC_WAKE, qcc->conn);
+ return release;
+}
+
+/* Execute application layer shutdown. If this operation is not defined, a
+ * CONNECTION_CLOSE will be prepared as a fallback. This function is protected
+ * against multiple invocation with the flag QC_CF_APP_SHUT.
+ */
+static void qcc_shutdown(struct qcc *qcc)
+{
+ TRACE_ENTER(QMUX_EV_QCC_END, qcc->conn);
+
+ if (qcc->flags & (QC_CF_ERR_CONN|QC_CF_ERRL)) {
+ TRACE_DATA("connection on error", QMUX_EV_QCC_END, qcc->conn);
+ goto out;
+ }
+
+ if (qcc->flags & QC_CF_APP_SHUT)
+ goto out;
+
+ TRACE_STATE("perform graceful shutdown", QMUX_EV_QCC_END, qcc->conn);
+ if (qcc->app_ops && qcc->app_ops->shutdown) {
+ qcc->app_ops->shutdown(qcc->ctx);
+ qcc_io_send(qcc);
+ }
+ else {
+ qcc->err = quic_err_app(QC_ERR_NO_ERROR);
+ }
+
+ /* Register "no error" code at transport layer. Do not use
+ * quic_set_connection_close() as retransmission may be performed to
+ * finalized transfers. Do not overwrite quic-conn existing code if
+ * already set.
+ *
+ * TODO implement a wrapper function for this in quic-conn module
+ */
+ if (!(qcc->conn->handle.qc->flags & QUIC_FL_CONN_IMMEDIATE_CLOSE))
+ qcc->conn->handle.qc->err = qcc->err;
+
+ out:
+ qcc->flags |= QC_CF_APP_SHUT;
+ TRACE_LEAVE(QMUX_EV_QCC_END, qcc->conn);
+}
+
+/* Loop through all qcs from <qcc>. Report error on stream endpoint if
+ * connection on error and wake them.
+ */
+static int qcc_wake_some_streams(struct qcc *qcc)
+{
+ struct qcs *qcs;
+ struct eb64_node *node;
+
+ TRACE_POINT(QMUX_EV_QCC_WAKE, qcc->conn);
+
+ for (node = eb64_first(&qcc->streams_by_id); node;
+ node = eb64_next(node)) {
+ qcs = eb64_entry(node, struct qcs, by_id);
+
+ if (!qcs_sc(qcs))
+ continue;
+
+ if (qcc->flags & (QC_CF_ERR_CONN|QC_CF_ERRL)) {
+ TRACE_POINT(QMUX_EV_QCC_WAKE, qcc->conn, qcs);
+ se_fl_set_error(qcs->sd);
+ qcs_alert(qcs);
+ }
+ }
+
+ return 0;
+}
+
+/* Conduct operations which should be made for <qcc> connection after
+ * input/output. Most notably, closed streams are purged which may leave the
+ * connection has ready to be released.
+ *
+ * Returns 1 if <qcc> must be released else 0.
+ */
+static int qcc_io_process(struct qcc *qcc)
+{
+ qcc_purge_streams(qcc);
+
+ /* Check if a soft-stop is in progress.
+ *
+ * TODO this is relevant for frontend connections only.
+ */
+ if (unlikely(qcc->proxy->flags & (PR_FL_DISABLED|PR_FL_STOPPED))) {
+ int close = 1;
+
+ /* If using listener socket, soft-stop is not supported. The
+ * connection must be closed immediately.
+ */
+ if (!qc_test_fd(qcc->conn->handle.qc)) {
+ TRACE_DEVEL("proxy disabled with listener socket, closing connection", QMUX_EV_QCC_WAKE, qcc->conn);
+ qcc->conn->flags |= (CO_FL_SOCK_RD_SH|CO_FL_SOCK_WR_SH);
+ qcc_io_send(qcc);
+ goto out;
+ }
+
+ TRACE_DEVEL("proxy disabled, prepare connection soft-stop", QMUX_EV_QCC_WAKE, qcc->conn);
+
+ /* If a close-spread-time option is set, we want to avoid
+ * closing all the active HTTP3 connections at once so we add a
+ * random factor that will spread the closing.
+ */
+ if (tick_isset(global.close_spread_end)) {
+ int remaining_window = tick_remain(now_ms, global.close_spread_end);
+ if (remaining_window) {
+ /* This should increase the closing rate the
+ * further along the window we are. */
+ close = (remaining_window <= statistical_prng_range(global.close_spread_time));
+ }
+ }
+ else if (global.tune.options & GTUNE_DISABLE_ACTIVE_CLOSE) {
+ close = 0; /* let the client close his connection himself */
+ }
+
+ if (close)
+ qcc_shutdown(qcc);
+ }
+
+ /* Report error if set on stream endpoint layer. */
+ if (qcc->flags & (QC_CF_ERR_CONN|QC_CF_ERRL))
+ qcc_wake_some_streams(qcc);
+
+ out:
+ if (qcc_is_dead(qcc))
+ return 1;
+
+ return 0;
+}
+
+/* release function. This one should be called to free all resources allocated
+ * to the mux.
+ */
+static void qcc_release(struct qcc *qcc)
+{
+ struct connection *conn = qcc->conn;
+ struct eb64_node *node;
+
+ TRACE_ENTER(QMUX_EV_QCC_END, conn);
+
+ qcc_shutdown(qcc);
+
+ if (qcc->task) {
+ task_destroy(qcc->task);
+ qcc->task = NULL;
+ }
+
+ tasklet_free(qcc->wait_event.tasklet);
+ if (conn && qcc->wait_event.events) {
+ conn->xprt->unsubscribe(conn, conn->xprt_ctx,
+ qcc->wait_event.events,
+ &qcc->wait_event);
+ }
+
+ /* liberate remaining qcs instances */
+ node = eb64_first(&qcc->streams_by_id);
+ while (node) {
+ struct qcs *qcs = eb64_entry(node, struct qcs, by_id);
+ node = eb64_next(node);
+ qcs_free(qcs);
+ }
+
+ while (!LIST_ISEMPTY(&qcc->lfctl.frms)) {
+ struct quic_frame *frm = LIST_ELEM(qcc->lfctl.frms.n, struct quic_frame *, list);
+ qc_frm_free(qcc->conn->handle.qc, &frm);
+ }
+
+ if (qcc->app_ops && qcc->app_ops->release)
+ qcc->app_ops->release(qcc->ctx);
+ TRACE_PROTO("application layer released", QMUX_EV_QCC_END, conn);
+
+ pool_free(pool_head_qcc, qcc);
+
+ if (conn) {
+ LIST_DEL_INIT(&conn->stopping_list);
+
+ conn->handle.qc->conn = NULL;
+ conn->mux = NULL;
+ conn->ctx = NULL;
+
+ TRACE_DEVEL("freeing conn", QMUX_EV_QCC_END, conn);
+
+ conn_stop_tracking(conn);
+ conn_full_close(conn);
+ if (conn->destroy_cb)
+ conn->destroy_cb(conn);
+ conn_free(conn);
+ }
+
+ TRACE_LEAVE(QMUX_EV_QCC_END);
+}
+
+struct task *qcc_io_cb(struct task *t, void *ctx, unsigned int status)
+{
+ struct qcc *qcc = ctx;
+
+ TRACE_ENTER(QMUX_EV_QCC_WAKE, qcc->conn);
+
+ qcc_io_send(qcc);
+
+ qcc_io_recv(qcc);
+
+ if (qcc_io_process(qcc)) {
+ TRACE_STATE("releasing dead connection", QMUX_EV_QCC_WAKE, qcc->conn);
+ goto release;
+ }
+
+ qcc_refresh_timeout(qcc);
+
+ end:
+ TRACE_LEAVE(QMUX_EV_QCC_WAKE, qcc->conn);
+ return NULL;
+
+ release:
+ qcc_release(qcc);
+ TRACE_LEAVE(QMUX_EV_QCC_WAKE);
+ return NULL;
+}
+
+static struct task *qcc_timeout_task(struct task *t, void *ctx, unsigned int state)
+{
+ struct qcc *qcc = ctx;
+ int expired = tick_is_expired(t->expire, now_ms);
+
+ TRACE_ENTER(QMUX_EV_QCC_WAKE, qcc ? qcc->conn : NULL);
+
+ if (qcc) {
+ if (!expired) {
+ TRACE_DEVEL("not expired", QMUX_EV_QCC_WAKE, qcc->conn);
+ goto requeue;
+ }
+
+ if (!qcc_may_expire(qcc)) {
+ TRACE_DEVEL("cannot expired", QMUX_EV_QCC_WAKE, qcc->conn);
+ t->expire = TICK_ETERNITY;
+ goto requeue;
+ }
+ }
+
+ task_destroy(t);
+
+ if (!qcc) {
+ TRACE_DEVEL("no more qcc", QMUX_EV_QCC_WAKE);
+ goto out;
+ }
+
+ /* Mark timeout as triggered by setting task to NULL. */
+ qcc->task = NULL;
+
+ /* TODO depending on the timeout condition, different shutdown mode
+ * should be used. For http keep-alive or disabled proxy, a graceful
+ * shutdown should occurs. For all other cases, an immediate close
+ * seems legitimate.
+ */
+ if (qcc_is_dead(qcc)) {
+ TRACE_STATE("releasing dead connection", QMUX_EV_QCC_WAKE, qcc->conn);
+ qcc_release(qcc);
+ }
+
+ out:
+ TRACE_LEAVE(QMUX_EV_QCC_WAKE);
+ return NULL;
+
+ requeue:
+ TRACE_LEAVE(QMUX_EV_QCC_WAKE);
+ return t;
+}
+
+static int qmux_init(struct connection *conn, struct proxy *prx,
+ struct session *sess, struct buffer *input)
+{
+ struct qcc *qcc;
+ struct quic_transport_params *lparams, *rparams;
+
+ TRACE_ENTER(QMUX_EV_QCC_NEW);
+
+ qcc = pool_alloc(pool_head_qcc);
+ if (!qcc) {
+ TRACE_ERROR("alloc failure", QMUX_EV_QCC_NEW);
+ goto fail_no_qcc;
+ }
+
+ qcc->conn = conn;
+ conn->ctx = qcc;
+ qcc->nb_hreq = qcc->nb_sc = 0;
+ qcc->flags = 0;
+
+ qcc->app_ops = NULL;
+
+ qcc->streams_by_id = EB_ROOT_UNIQUE;
+
+ /* Server parameters, params used for RX flow control. */
+ lparams = &conn->handle.qc->rx.params;
+
+ qcc->tx.sent_offsets = qcc->tx.offsets = 0;
+
+ LIST_INIT(&qcc->lfctl.frms);
+ qcc->lfctl.ms_bidi = qcc->lfctl.ms_bidi_init = lparams->initial_max_streams_bidi;
+ qcc->lfctl.ms_uni = lparams->initial_max_streams_uni;
+ qcc->lfctl.msd_bidi_l = lparams->initial_max_stream_data_bidi_local;
+ qcc->lfctl.msd_bidi_r = lparams->initial_max_stream_data_bidi_remote;
+ qcc->lfctl.msd_uni_r = lparams->initial_max_stream_data_uni;
+ qcc->lfctl.cl_bidi_r = 0;
+
+ qcc->lfctl.md = qcc->lfctl.md_init = lparams->initial_max_data;
+ qcc->lfctl.offsets_recv = qcc->lfctl.offsets_consume = 0;
+
+ rparams = &conn->handle.qc->tx.params;
+ qcc->rfctl.md = rparams->initial_max_data;
+ qcc->rfctl.msd_bidi_l = rparams->initial_max_stream_data_bidi_local;
+ qcc->rfctl.msd_bidi_r = rparams->initial_max_stream_data_bidi_remote;
+ qcc->rfctl.msd_uni_l = rparams->initial_max_stream_data_uni;
+
+ if (conn_is_back(conn)) {
+ qcc->next_bidi_l = 0x00;
+ qcc->largest_bidi_r = 0x01;
+ qcc->next_uni_l = 0x02;
+ qcc->largest_uni_r = 0x03;
+ }
+ else {
+ qcc->largest_bidi_r = 0x00;
+ qcc->next_bidi_l = 0x01;
+ qcc->largest_uni_r = 0x02;
+ qcc->next_uni_l = 0x03;
+ }
+
+ qcc->wait_event.tasklet = tasklet_new();
+ if (!qcc->wait_event.tasklet) {
+ TRACE_ERROR("taslket alloc failure", QMUX_EV_QCC_NEW);
+ goto fail_no_tasklet;
+ }
+
+ LIST_INIT(&qcc->send_list);
+
+ qcc->wait_event.tasklet->process = qcc_io_cb;
+ qcc->wait_event.tasklet->context = qcc;
+ qcc->wait_event.events = 0;
+
+ qcc->proxy = prx;
+ /* haproxy timeouts */
+ if (conn_is_back(qcc->conn)) {
+ qcc->timeout = prx->timeout.server;
+ qcc->shut_timeout = tick_isset(prx->timeout.serverfin) ?
+ prx->timeout.serverfin : prx->timeout.server;
+ }
+ else {
+ qcc->timeout = prx->timeout.client;
+ qcc->shut_timeout = tick_isset(prx->timeout.clientfin) ?
+ prx->timeout.clientfin : prx->timeout.client;
+ }
+
+ /* Always allocate task even if timeout is unset. In MUX code, if task
+ * is NULL, it indicates that a timeout has stroke earlier.
+ */
+ qcc->task = task_new_here();
+ if (!qcc->task) {
+ TRACE_ERROR("timeout task alloc failure", QMUX_EV_QCC_NEW);
+ goto fail_no_timeout_task;
+ }
+ qcc->task->process = qcc_timeout_task;
+ qcc->task->context = qcc;
+ qcc->task->expire = tick_add_ifset(now_ms, qcc->timeout);
+
+ qcc_reset_idle_start(qcc);
+ LIST_INIT(&qcc->opening_list);
+
+ HA_ATOMIC_STORE(&conn->handle.qc->qcc, qcc);
+
+ if (qcc_install_app_ops(qcc, conn->handle.qc->app_ops)) {
+ TRACE_PROTO("Cannot install app layer", QMUX_EV_QCC_NEW|QMUX_EV_QCC_ERR, qcc->conn);
+ /* prepare a CONNECTION_CLOSE frame */
+ quic_set_connection_close(conn->handle.qc, quic_err_transport(QC_ERR_APPLICATION_ERROR));
+ goto fail_install_app_ops;
+ }
+
+ if (qcc->app_ops == &h3_ops)
+ proxy_inc_fe_cum_sess_ver_ctr(sess->listener, prx, 3);
+
+ /* Register conn for idle front closing. This is done once everything is allocated. */
+ if (!conn_is_back(conn))
+ LIST_APPEND(&mux_stopping_data[tid].list, &conn->stopping_list);
+
+ /* init read cycle */
+ tasklet_wakeup(qcc->wait_event.tasklet);
+
+ TRACE_LEAVE(QMUX_EV_QCC_NEW, qcc->conn);
+ return 0;
+
+ fail_install_app_ops:
+ if (qcc->app_ops && qcc->app_ops->release)
+ qcc->app_ops->release(qcc->ctx);
+ task_destroy(qcc->task);
+ fail_no_timeout_task:
+ tasklet_free(qcc->wait_event.tasklet);
+ fail_no_tasklet:
+ pool_free(pool_head_qcc, qcc);
+ fail_no_qcc:
+ TRACE_LEAVE(QMUX_EV_QCC_NEW);
+ return -1;
+}
+
+static void qmux_destroy(void *ctx)
+{
+ struct qcc *qcc = ctx;
+
+ TRACE_ENTER(QMUX_EV_QCC_END, qcc->conn);
+ qcc_release(qcc);
+ TRACE_LEAVE(QMUX_EV_QCC_END);
+}
+
+static void qmux_strm_detach(struct sedesc *sd)
+{
+ struct qcs *qcs = sd->se;
+ struct qcc *qcc = qcs->qcc;
+
+ TRACE_ENTER(QMUX_EV_STRM_END, qcc->conn, qcs);
+
+ /* TODO this BUG_ON_HOT() is not correct as the stconn layer may detach
+ * from the stream even if it is not closed remotely at the QUIC layer.
+ * This happens for example when a stream must be closed due to a
+ * rejected request. To better handle these cases, it will be required
+ * to implement shutr/shutw MUX operations. Once this is done, this
+ * BUG_ON_HOT() statement can be adjusted.
+ */
+ //BUG_ON_HOT(!qcs_is_close_remote(qcs));
+
+ qcc_rm_sc(qcc);
+
+ if (!qcs_is_close_local(qcs) &&
+ !(qcc->flags & (QC_CF_ERR_CONN|QC_CF_ERRL))) {
+ TRACE_STATE("remaining data, detaching qcs", QMUX_EV_STRM_END, qcc->conn, qcs);
+ qcs->flags |= QC_SF_DETACH;
+ qcc_refresh_timeout(qcc);
+
+ TRACE_LEAVE(QMUX_EV_STRM_END, qcc->conn, qcs);
+ return;
+ }
+
+ qcs_destroy(qcs);
+
+ if (qcc_is_dead(qcc)) {
+ TRACE_STATE("killing dead connection", QMUX_EV_STRM_END, qcc->conn);
+ goto release;
+ }
+ else {
+ TRACE_DEVEL("refreshing connection's timeout", QMUX_EV_STRM_END, qcc->conn);
+ qcc_refresh_timeout(qcc);
+ }
+
+ TRACE_LEAVE(QMUX_EV_STRM_END, qcc->conn);
+ return;
+
+ release:
+ qcc_release(qcc);
+ TRACE_LEAVE(QMUX_EV_STRM_END);
+ return;
+}
+
+/* Called from the upper layer, to receive data */
+static size_t qmux_strm_rcv_buf(struct stconn *sc, struct buffer *buf,
+ size_t count, int flags)
+{
+ struct qcs *qcs = __sc_mux_strm(sc);
+ struct qcc *qcc = qcs->qcc;
+ size_t ret = 0;
+ char fin = 0;
+
+ TRACE_ENTER(QMUX_EV_STRM_RECV, qcc->conn, qcs);
+
+ ret = qcs_http_rcv_buf(qcs, buf, count, &fin);
+
+ if (b_data(&qcs->rx.app_buf)) {
+ se_fl_set(qcs->sd, SE_FL_RCV_MORE | SE_FL_WANT_ROOM);
+ }
+ else {
+ se_fl_clr(qcs->sd, SE_FL_RCV_MORE | SE_FL_WANT_ROOM);
+
+ /* Set end-of-input when full message properly received. */
+ if (fin) {
+ TRACE_STATE("report end-of-input", QMUX_EV_STRM_RECV, qcc->conn, qcs);
+ se_fl_set(qcs->sd, SE_FL_EOI);
+
+ /* If request EOM is reported to the upper layer, it means the
+ * QCS now expects data from the opposite side.
+ */
+ se_expect_data(qcs->sd);
+ }
+
+ /* Set end-of-stream on read closed. */
+ if (qcs->flags & QC_SF_RECV_RESET ||
+ qcc->conn->flags & CO_FL_SOCK_RD_SH) {
+ TRACE_STATE("report end-of-stream", QMUX_EV_STRM_RECV, qcc->conn, qcs);
+ se_fl_set(qcs->sd, SE_FL_EOS);
+
+ /* Set error if EOI not reached. This may happen on
+ * RESET_STREAM reception or connection error.
+ */
+ if (!se_fl_test(qcs->sd, SE_FL_EOI)) {
+ TRACE_STATE("report error on stream aborted", QMUX_EV_STRM_RECV, qcc->conn, qcs);
+ se_fl_set(qcs->sd, SE_FL_ERROR);
+ }
+ }
+
+ if (se_fl_test(qcs->sd, SE_FL_ERR_PENDING)) {
+ TRACE_STATE("report error", QMUX_EV_STRM_RECV, qcc->conn, qcs);
+ se_fl_set(qcs->sd, SE_FL_ERROR);
+ }
+
+ if (b_size(&qcs->rx.app_buf)) {
+ b_free(&qcs->rx.app_buf);
+ offer_buffers(NULL, 1);
+ }
+ }
+
+ /* Restart demux if it was interrupted on full buffer. */
+ if (ret && qcs->flags & QC_SF_DEM_FULL) {
+ /* Ensure DEM_FULL is only set if there is available data to
+ * ensure we never do unnecessary wakeup here.
+ */
+ BUG_ON(!ncb_data(&qcs->rx.ncbuf, 0));
+
+ qcs->flags &= ~QC_SF_DEM_FULL;
+ if (!(qcc->flags & QC_CF_ERRL))
+ tasklet_wakeup(qcc->wait_event.tasklet);
+ }
+
+ TRACE_LEAVE(QMUX_EV_STRM_RECV, qcc->conn, qcs);
+
+ return ret;
+}
+
+static size_t qmux_strm_snd_buf(struct stconn *sc, struct buffer *buf,
+ size_t count, int flags)
+{
+ struct qcs *qcs = __sc_mux_strm(sc);
+ size_t ret = 0;
+ char fin;
+
+ TRACE_ENTER(QMUX_EV_STRM_SEND, qcs->qcc->conn, qcs);
+
+ /* stream layer has been detached so no transfer must occur after. */
+ BUG_ON_HOT(qcs->flags & QC_SF_DETACH);
+
+ /* Report error if set on stream endpoint layer. */
+ if (qcs->qcc->flags & (QC_CF_ERR_CONN|QC_CF_ERRL)) {
+ se_fl_set(qcs->sd, SE_FL_ERROR);
+ TRACE_DEVEL("connection in error", QMUX_EV_STRM_SEND, qcs->qcc->conn, qcs);
+ goto end;
+ }
+
+ if (qcs_is_close_local(qcs) || (qcs->flags & QC_SF_TO_RESET)) {
+ ret = qcs_http_reset_buf(qcs, buf, count);
+ goto end;
+ }
+
+ ret = qcs_http_snd_buf(qcs, buf, count, &fin);
+ if (fin) {
+ TRACE_STATE("reached stream fin", QMUX_EV_STRM_SEND, qcs->qcc->conn, qcs);
+ qcs->flags |= QC_SF_FIN_STREAM;
+ }
+
+ if (ret || fin) {
+ qcc_send_stream(qcs, 0);
+ if (!(qcs->qcc->wait_event.events & SUB_RETRY_SEND))
+ tasklet_wakeup(qcs->qcc->wait_event.tasklet);
+ }
+
+ end:
+ TRACE_LEAVE(QMUX_EV_STRM_SEND, qcs->qcc->conn, qcs);
+
+ return ret;
+}
+
+
+static size_t qmux_nego_ff(struct stconn *sc, struct buffer *input, size_t count, unsigned int may_splice)
+{
+ struct qcs *qcs = __sc_mux_strm(sc);
+ size_t ret = 0;
+
+ TRACE_ENTER(QMUX_EV_STRM_SEND, qcs->qcc->conn, qcs);
+
+ /* stream layer has been detached so no transfer must occur after. */
+ BUG_ON_HOT(qcs->flags & QC_SF_DETACH);
+
+ if (!qcs->qcc->app_ops->nego_ff || !qcs->qcc->app_ops->done_ff) {
+ /* Fast forwading is not supported by the QUIC application layer */
+ qcs->sd->iobuf.flags |= IOBUF_FL_NO_FF;
+ goto end;
+ }
+
+ if (qcs->qcc->flags & (QC_CF_ERR_CONN|QC_CF_ERRL)) {
+ /* Disable fast-forward if connection is on error. Eventually,
+ * error will be reported to stream-conn if snd_buf is invoked.
+ */
+ TRACE_DEVEL("connection in error", QMUX_EV_STRM_SEND, qcs->qcc->conn, qcs);
+ qcs->sd->iobuf.flags |= IOBUF_FL_NO_FF;
+ goto end;
+ }
+
+ /* Alawys disable splicing */
+ qcs->sd->iobuf.flags |= IOBUF_FL_NO_SPLICING;
+
+ ret = qcs->qcc->app_ops->nego_ff(qcs, count);
+ if (!ret)
+ goto end;
+
+ /* forward remaining input data */
+ if (b_data(input)) {
+ size_t xfer = ret;
+
+ if (xfer > b_data(input))
+ xfer = b_data(input);
+ b_add(qcs->sd->iobuf.buf, qcs->sd->iobuf.offset);
+ qcs->sd->iobuf.data = b_xfer(qcs->sd->iobuf.buf, input, xfer);
+ b_sub(qcs->sd->iobuf.buf, qcs->sd->iobuf.offset);
+
+ /* Cannot forward more data, wait for room */
+ if (b_data(input)) {
+ ret = 0;
+ goto end;
+ }
+ }
+ ret -= qcs->sd->iobuf.data;
+
+ end:
+ TRACE_LEAVE(QMUX_EV_STRM_SEND, qcs->qcc->conn, qcs);
+ return ret;
+}
+
+static size_t qmux_done_ff(struct stconn *sc)
+{
+ struct qcs *qcs = __sc_mux_strm(sc);
+ struct qcc *qcc = qcs->qcc;
+ struct sedesc *sd = qcs->sd;
+ size_t total = 0;
+
+ TRACE_ENTER(QMUX_EV_STRM_SEND, qcs->qcc->conn, qcs);
+
+ if (sd->iobuf.flags & IOBUF_FL_EOI)
+ qcs->flags |= QC_SF_FIN_STREAM;
+
+ if (!(qcs->flags & QC_SF_FIN_STREAM) && !sd->iobuf.data)
+ goto end;
+
+ total = qcs->qcc->app_ops->done_ff(qcs);
+
+ qcc_send_stream(qcs, 0);
+ if (!(qcs->qcc->wait_event.events & SUB_RETRY_SEND))
+ tasklet_wakeup(qcc->wait_event.tasklet);
+
+ end:
+ if (!b_data(&qcs->tx.buf))
+ b_free(&qcs->tx.buf);
+
+ TRACE_LEAVE(QMUX_EV_STRM_SEND, qcs->qcc->conn, qcs);
+ return total;
+}
+
+static int qmux_resume_ff(struct stconn *sc, unsigned int flags)
+{
+ return 0;
+}
+
+/* Called from the upper layer, to subscribe <es> to events <event_type>. The
+ * event subscriber <es> is not allowed to change from a previous call as long
+ * as at least one event is still subscribed. The <event_type> must only be a
+ * combination of SUB_RETRY_RECV and SUB_RETRY_SEND. It always returns 0.
+ */
+static int qmux_strm_subscribe(struct stconn *sc, int event_type,
+ struct wait_event *es)
+{
+ return qcs_subscribe(__sc_mux_strm(sc), event_type, es);
+}
+
+/* Called from the upper layer, to unsubscribe <es> from events <event_type>.
+ * The <es> pointer is not allowed to differ from the one passed to the
+ * subscribe() call. It always returns zero.
+ */
+static int qmux_strm_unsubscribe(struct stconn *sc, int event_type, struct wait_event *es)
+{
+ struct qcs *qcs = __sc_mux_strm(sc);
+
+ BUG_ON(event_type & ~(SUB_RETRY_SEND|SUB_RETRY_RECV));
+ BUG_ON(qcs->subs && qcs->subs != es);
+
+ es->events &= ~event_type;
+ if (!es->events)
+ qcs->subs = NULL;
+
+ return 0;
+}
+
+static int qmux_wake(struct connection *conn)
+{
+ struct qcc *qcc = conn->ctx;
+
+ TRACE_ENTER(QMUX_EV_QCC_WAKE, conn);
+
+ if (qcc_io_process(qcc)) {
+ TRACE_STATE("releasing dead connection", QMUX_EV_QCC_WAKE, qcc->conn);
+ goto release;
+ }
+
+ qcc_wake_some_streams(qcc);
+
+ qcc_refresh_timeout(qcc);
+
+ TRACE_LEAVE(QMUX_EV_QCC_WAKE, conn);
+ return 0;
+
+ release:
+ qcc_release(qcc);
+ TRACE_LEAVE(QMUX_EV_QCC_WAKE);
+ return 1;
+}
+
+static void qmux_strm_shutw(struct stconn *sc, enum co_shw_mode mode)
+{
+ struct qcs *qcs = __sc_mux_strm(sc);
+ struct qcc *qcc = qcs->qcc;
+
+ TRACE_ENTER(QMUX_EV_STRM_SHUT, qcc->conn, qcs);
+
+ /* Early closure reported if QC_SF_FIN_STREAM not yet set. */
+ if (!qcs_is_close_local(qcs) &&
+ !(qcs->flags & (QC_SF_FIN_STREAM|QC_SF_TO_RESET))) {
+
+ if (qcs->flags & QC_SF_UNKNOWN_PL_LENGTH) {
+ /* Close stream with a FIN STREAM frame. */
+ if (!(qcc->flags & (QC_CF_ERR_CONN|QC_CF_ERRL))) {
+ TRACE_STATE("set FIN STREAM",
+ QMUX_EV_STRM_SHUT, qcc->conn, qcs);
+ qcs->flags |= QC_SF_FIN_STREAM;
+ qcc_send_stream(qcs, 0);
+ }
+ }
+ else {
+ /* RESET_STREAM necessary. */
+ qcc_reset_stream(qcs, 0);
+ }
+
+ tasklet_wakeup(qcc->wait_event.tasklet);
+ }
+
+ out:
+ TRACE_LEAVE(QMUX_EV_STRM_SHUT, qcc->conn, qcs);
+}
+
+static int qmux_sctl(struct stconn *sc, enum mux_sctl_type mux_sctl, void *output)
+{
+ int ret = 0;
+ struct qcs *qcs = __sc_mux_strm(sc);
+
+ switch (mux_sctl) {
+ case MUX_SCTL_SID:
+ if (output)
+ *((int64_t *)output) = qcs->id;
+ return ret;
+
+ default:
+ return -1;
+ }
+}
+
+/* for debugging with CLI's "show sess" command. May emit multiple lines, each
+ * new one being prefixed with <pfx>, if <pfx> is not NULL, otherwise a single
+ * line is used. Each field starts with a space so it's safe to print it after
+ * existing fields.
+ */
+static int qmux_strm_show_sd(struct buffer *msg, struct sedesc *sd, const char *pfx)
+{
+ struct qcs *qcs = sd->se;
+ struct qcc *qcc;
+ int ret = 0;
+
+ if (!qcs)
+ return ret;
+
+ chunk_appendf(msg, " qcs=%p .flg=%#x .id=%llu .st=%s .ctx=%p, .err=%#llx",
+ qcs, qcs->flags, (ull)qcs->id, qcs_st_to_str(qcs->st), qcs->ctx, (ull)qcs->err);
+
+ if (pfx)
+ chunk_appendf(msg, "\n%s", pfx);
+
+ qcc = qcs->qcc;
+ chunk_appendf(msg, " qcc=%p .flg=%#x .nbsc=%llu .nbhreq=%llu, .task=%p",
+ qcc, qcc->flags, (ull)qcc->nb_sc, (ull)qcc->nb_hreq, qcc->task);
+ return ret;
+}
+
+
+static const struct mux_ops qmux_ops = {
+ .init = qmux_init,
+ .destroy = qmux_destroy,
+ .detach = qmux_strm_detach,
+ .rcv_buf = qmux_strm_rcv_buf,
+ .snd_buf = qmux_strm_snd_buf,
+ .nego_fastfwd = qmux_nego_ff,
+ .done_fastfwd = qmux_done_ff,
+ .resume_fastfwd = qmux_resume_ff,
+ .subscribe = qmux_strm_subscribe,
+ .unsubscribe = qmux_strm_unsubscribe,
+ .wake = qmux_wake,
+ .shutw = qmux_strm_shutw,
+ .sctl = qmux_sctl,
+ .show_sd = qmux_strm_show_sd,
+ .flags = MX_FL_HTX|MX_FL_NO_UPG|MX_FL_FRAMED,
+ .name = "QUIC",
+};
+
+static struct mux_proto_list mux_proto_quic =
+ { .token = IST("quic"), .mode = PROTO_MODE_HTTP, .side = PROTO_SIDE_FE, .mux = &qmux_ops };
+
+INITCALL1(STG_REGISTER, register_mux_proto, &mux_proto_quic);
diff --git a/src/mworker-prog.c b/src/mworker-prog.c
new file mode 100644
index 0000000..2734d95
--- /dev/null
+++ b/src/mworker-prog.c
@@ -0,0 +1,359 @@
+/*
+ * Master Worker - program
+ *
+ * Copyright HAProxy Technologies - William Lallemand <wlallemand@haproxy.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#define _GNU_SOURCE
+
+#include <sys/types.h>
+#include <errno.h>
+#include <grp.h>
+#include <pwd.h>
+#include <stdio.h>
+#include <string.h>
+#include <unistd.h>
+
+#include <haproxy/api.h>
+#include <haproxy/cfgparse.h>
+#include <haproxy/errors.h>
+#include <haproxy/global.h>
+#include <haproxy/mworker.h>
+#include <haproxy/task.h>
+#include <haproxy/time.h>
+#include <haproxy/tools.h>
+
+
+static int use_program = 0; /* do we use the program section ? */
+
+/*
+ * Launch every programs
+ */
+int mworker_ext_launch_all()
+{
+ int ret;
+ struct mworker_proc *child;
+ struct mworker_proc *tmp;
+ int reexec = 0;
+
+ if (!use_program)
+ return 0;
+
+ reexec = getenv("HAPROXY_MWORKER_REEXEC") ? 1 : 0;
+
+ /* find the right mworker_proc */
+ list_for_each_entry_safe(child, tmp, &proc_list, list) {
+ if (child->reloads == 0 && (child->options & PROC_O_TYPE_PROG)) {
+
+ if (reexec && (!(child->options & PROC_O_START_RELOAD))) {
+ struct mworker_proc *old_child;
+
+ /*
+ * This is a reload and we don't want to fork a
+ * new program so have to remove the entry in
+ * the list.
+ *
+ * But before that, we need to mark the
+ * previous program as not leaving, if we find one.
+ */
+
+ list_for_each_entry(old_child, &proc_list, list) {
+ if (!(old_child->options & PROC_O_TYPE_PROG) || (!(old_child->options & PROC_O_LEAVING)))
+ continue;
+
+ if (strcmp(old_child->id, child->id) == 0)
+ old_child->options &= ~PROC_O_LEAVING;
+ }
+
+
+ LIST_DELETE(&child->list);
+ mworker_free_child(child);
+ child = NULL;
+
+ continue;
+ }
+
+ child->timestamp = ns_to_sec(now_ns);
+
+ ret = fork();
+ if (ret < 0) {
+ ha_alert("Cannot fork program '%s'.\n", child->id);
+ exit(EXIT_FAILURE); /* there has been an error */
+ } else if (ret > 0) { /* parent */
+ child->pid = ret;
+ ha_notice("New program '%s' (%d) forked\n", child->id, ret);
+ continue;
+ } else if (ret == 0) {
+ /* In child */
+ mworker_unblock_signals();
+ mworker_cleanlisteners();
+ mworker_cleantasks();
+
+ /* setgid / setuid */
+ if (child->gid != -1) {
+ if (getgroups(0, NULL) > 0 && setgroups(0, NULL) == -1)
+ ha_warning("[%s.main()] Failed to drop supplementary groups. Using 'gid'/'group'"
+ " without 'uid'/'user' is generally useless.\n", child->command[0]);
+
+ if (setgid(child->gid) == -1) {
+ ha_alert("[%s.main()] Cannot set gid %d.\n", child->command[0], child->gid);
+ exit(1);
+ }
+ }
+
+ if (child->uid != -1 && setuid(child->uid) == -1) {
+ ha_alert("[%s.main()] Cannot set uid %d.\n", child->command[0], child->gid);
+ exit(1);
+ }
+
+ /* This one must not be exported, it's internal! */
+ unsetenv("HAPROXY_MWORKER_REEXEC");
+ unsetenv("HAPROXY_STARTUPLOGS_FD");
+ unsetenv("HAPROXY_MWORKER_WAIT_ONLY");
+ unsetenv("HAPROXY_PROCESSES");
+ execvp(child->command[0], child->command);
+
+ ha_alert("Cannot execute %s: %s\n", child->command[0], strerror(errno));
+ exit(EXIT_FAILURE);
+ }
+ }
+ }
+
+ return 0;
+
+}
+
+
+/* Configuration */
+
+int cfg_parse_program(const char *file, int linenum, char **args, int kwm)
+{
+ static struct mworker_proc *ext_child = NULL;
+ struct mworker_proc *child;
+ int err_code = 0;
+
+ if (strcmp(args[0], "program") == 0) {
+ if (alertif_too_many_args(1, file, linenum, args, &err_code)) {
+ err_code |= ERR_ABORT;
+ goto error;
+ }
+
+ if (!*args[1]) {
+ ha_alert("parsing [%s:%d] : '%s' expects an <id> argument\n",
+ file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_ABORT;
+ goto error;
+ }
+
+ ext_child = calloc(1, sizeof(*ext_child));
+ if (!ext_child) {
+ ha_alert("parsing [%s:%d] : out of memory.\n", file, linenum);
+ err_code |= ERR_ALERT | ERR_ABORT;
+ goto error;
+ }
+
+ ext_child->options |= PROC_O_TYPE_PROG; /* external process */
+ ext_child->command = NULL;
+ ext_child->path = NULL;
+ ext_child->id = NULL;
+ ext_child->pid = -1;
+ ext_child->reloads = 0;
+ ext_child->timestamp = -1;
+ ext_child->ipc_fd[0] = -1;
+ ext_child->ipc_fd[1] = -1;
+ ext_child->options |= PROC_O_START_RELOAD; /* restart the programs by default */
+ ext_child->uid = -1;
+ ext_child->gid = -1;
+ LIST_INIT(&ext_child->list);
+
+ list_for_each_entry(child, &proc_list, list) {
+ if (child->reloads == 0 && (child->options & PROC_O_TYPE_PROG)) {
+ if (strcmp(args[1], child->id) == 0) {
+ ha_alert("parsing [%s:%d]: '%s' program section already exists in the configuration.\n", file, linenum, args[1]);
+ err_code |= ERR_ALERT | ERR_ABORT;
+ goto error;
+ }
+ }
+ }
+
+ ext_child->id = strdup(args[1]);
+ if (!ext_child->id) {
+ ha_alert("parsing [%s:%d] : out of memory.\n", file, linenum);
+ err_code |= ERR_ALERT | ERR_ABORT;
+ goto error;
+ }
+
+ LIST_APPEND(&proc_list, &ext_child->list);
+
+ } else if (strcmp(args[0], "command") == 0) {
+ int arg_nb = 0;
+ int i = 0;
+
+ if (*(args[1]) == 0) {
+ ha_alert("parsing [%s:%d]: '%s' expects a command with optional arguments separated in words.\n", file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto error;
+ }
+
+ while (*args[arg_nb+1])
+ arg_nb++;
+
+ ext_child->command = calloc(arg_nb+1, sizeof(*ext_child->command));
+
+ if (!ext_child->command) {
+ ha_alert("parsing [%s:%d] : out of memory.\n", file, linenum);
+ err_code |= ERR_ALERT | ERR_ABORT;
+ goto error;
+ }
+
+ while (i < arg_nb) {
+ ext_child->command[i] = strdup(args[i+1]);
+ if (!ext_child->command[i]) {
+ ha_alert("parsing [%s:%d] : out of memory.\n", file, linenum);
+ err_code |= ERR_ALERT | ERR_ABORT;
+ goto error;
+ }
+ i++;
+ }
+ ext_child->command[i] = NULL;
+
+ } else if (strcmp(args[0], "option") == 0) {
+
+ if (*(args[1]) == '\0') {
+ ha_alert("parsing [%s:%d]: '%s' expects an option name.\n",
+ file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto error;
+ }
+
+ if (strcmp(args[1], "start-on-reload") == 0) {
+ if (alertif_too_many_args_idx(0, 1, file, linenum, args, &err_code))
+ goto error;
+ if (kwm == KWM_STD)
+ ext_child->options |= PROC_O_START_RELOAD;
+ else if (kwm == KWM_NO)
+ ext_child->options &= ~PROC_O_START_RELOAD;
+ goto out;
+
+ } else {
+ ha_alert("parsing [%s:%d] : unknown option '%s'.\n", file, linenum, args[1]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto error;
+ }
+ } else if (strcmp(args[0], "user") == 0) {
+ struct passwd *ext_child_user;
+ if (*(args[1]) == '\0') {
+ ha_alert("parsing [%s:%d]: '%s' expects a user name.\n",
+ file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto error;
+ }
+
+ if (alertif_too_many_args(1, file, linenum, args, &err_code))
+ goto error;
+
+ if (ext_child->uid != -1) {
+ ha_alert("parsing [%s:%d] : user/uid already specified. Continuing.\n", file, linenum);
+ err_code |= ERR_ALERT;
+ goto out;
+ }
+
+ ext_child_user = getpwnam(args[1]);
+ if (ext_child_user != NULL) {
+ ext_child->uid = (int)ext_child_user->pw_uid;
+ } else {
+ ha_alert("parsing [%s:%d] : cannot find user id for '%s' (%d:%s)\n", file, linenum, args[1], errno, strerror(errno));
+ err_code |= ERR_ALERT | ERR_FATAL;
+ }
+ } else if (strcmp(args[0], "group") == 0) {
+ struct group *ext_child_group;
+ if (*(args[1]) == '\0') {
+ ha_alert("parsing [%s:%d]: '%s' expects a group name.\n",
+ file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto error;
+ }
+
+ if (alertif_too_many_args(1, file, linenum, args, &err_code))
+ goto error;
+
+ if (ext_child->gid != -1) {
+ ha_alert("parsing [%s:%d] : group/gid already specified. Continuing.\n", file, linenum);
+ err_code |= ERR_ALERT;
+ goto out;
+ }
+
+ ext_child_group = getgrnam(args[1]);
+ if (ext_child_group != NULL) {
+ ext_child->gid = (int)ext_child_group->gr_gid;
+ } else {
+ ha_alert("parsing [%s:%d] : cannot find group id for '%s' (%d:%s)\n", file, linenum, args[1], errno, strerror(errno));
+ err_code |= ERR_ALERT | ERR_FATAL;
+ }
+ } else {
+ ha_alert("parsing [%s:%d] : unknown keyword '%s' in '%s' section\n", file, linenum, args[0], "program");
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto error;
+ }
+
+ use_program = 1;
+
+ return err_code;
+
+error:
+ if (ext_child) {
+ LIST_DELETE(&ext_child->list);
+ if (ext_child->command) {
+ int i;
+
+ for (i = 0; ext_child->command[i]; i++) {
+ ha_free(&ext_child->command[i]);
+ }
+ ha_free(&ext_child->command);
+ }
+ ha_free(&ext_child->id);
+ }
+
+ ha_free(&ext_child);
+
+out:
+ return err_code;
+
+}
+
+int cfg_program_postparser()
+{
+ int err_code = 0;
+ struct mworker_proc *child;
+
+ /* we only need to check this during configuration parsing,
+ * wait mode doesn't have the complete description of a program */
+ if (global.mode & MODE_MWORKER_WAIT)
+ return err_code;
+
+ list_for_each_entry(child, &proc_list, list) {
+ if (child->reloads == 0 && (child->options & PROC_O_TYPE_PROG)) {
+ if (child->command == NULL) {
+ ha_alert("The program section '%s' lacks a command to launch.\n", child->id);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ }
+ }
+ }
+
+ if (use_program && !(global.mode & MODE_MWORKER)) {
+ ha_alert("Can't use a 'program' section without master worker mode.\n");
+ err_code |= ERR_ALERT | ERR_FATAL;
+ }
+
+ return err_code;
+}
+
+
+REGISTER_CONFIG_SECTION("program", cfg_parse_program, NULL);
+REGISTER_CONFIG_POSTPARSER("program", cfg_program_postparser);
diff --git a/src/mworker.c b/src/mworker.c
new file mode 100644
index 0000000..c71446a
--- /dev/null
+++ b/src/mworker.c
@@ -0,0 +1,821 @@
+/*
+ * Master Worker
+ *
+ * Copyright HAProxy Technologies 2019 - William Lallemand <wlallemand@haproxy.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#define _GNU_SOURCE
+
+#include <errno.h>
+#include <fcntl.h>
+#include <signal.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/wait.h>
+#include <unistd.h>
+
+#if defined(USE_SYSTEMD)
+#include <systemd/sd-daemon.h>
+#endif
+
+#include <haproxy/api.h>
+#include <haproxy/cfgparse.h>
+#include <haproxy/cli.h>
+#include <haproxy/errors.h>
+#include <haproxy/fd.h>
+#include <haproxy/global.h>
+#include <haproxy/list.h>
+#include <haproxy/log.h>
+#include <haproxy/listener.h>
+#include <haproxy/mworker.h>
+#include <haproxy/peers.h>
+#include <haproxy/proto_sockpair.h>
+#include <haproxy/proxy.h>
+#include <haproxy/ring.h>
+#include <haproxy/sc_strm.h>
+#include <haproxy/signal.h>
+#include <haproxy/stconn.h>
+#include <haproxy/stream.h>
+#include <haproxy/tools.h>
+#include <haproxy/version.h>
+
+
+static int exitcode = -1;
+static int max_reloads = -1; /* number max of reloads a worker can have until they are killed */
+struct mworker_proc *proc_self = NULL; /* process structure of current process */
+
+/* ----- children processes handling ----- */
+
+/*
+ * Send signal to every known children.
+ */
+
+static void mworker_kill(int sig)
+{
+ struct mworker_proc *child;
+
+ list_for_each_entry(child, &proc_list, list) {
+ /* careful there, we must be sure that the pid > 0, we don't want to emit a kill -1 */
+ if ((child->options & (PROC_O_TYPE_WORKER|PROC_O_TYPE_PROG)) && (child->pid > 0))
+ kill(child->pid, sig);
+ }
+}
+
+void mworker_kill_max_reloads(int sig)
+{
+ struct mworker_proc *child;
+
+ list_for_each_entry(child, &proc_list, list) {
+ if (max_reloads != -1 && (child->options & PROC_O_TYPE_WORKER) &&
+ (child->pid > 0) && (child->reloads > max_reloads))
+ kill(child->pid, sig);
+ }
+}
+
+/* return 1 if a pid is a current child otherwise 0 */
+int mworker_current_child(int pid)
+{
+ struct mworker_proc *child;
+
+ list_for_each_entry(child, &proc_list, list) {
+ if ((child->options & (PROC_O_TYPE_WORKER|PROC_O_TYPE_PROG)) && (!(child->options & PROC_O_LEAVING)) && (child->pid == pid))
+ return 1;
+ }
+ return 0;
+}
+
+/*
+ * Return the number of new and old children (including workers and external
+ * processes)
+ */
+int mworker_child_nb()
+{
+ struct mworker_proc *child;
+ int ret = 0;
+
+ list_for_each_entry(child, &proc_list, list) {
+ if (child->options & (PROC_O_TYPE_WORKER|PROC_O_TYPE_PROG))
+ ret++;
+ }
+
+ return ret;
+}
+
+
+/*
+ * serialize the proc list and put it in the environment
+ */
+void mworker_proc_list_to_env()
+{
+ char *msg = NULL;
+ struct mworker_proc *child;
+ int minreloads = INT_MAX; /* minimum number of reloads to chose which processes are "current" ones */
+
+ list_for_each_entry(child, &proc_list, list) {
+ char type = '?';
+
+ if (child->options & PROC_O_TYPE_MASTER)
+ type = 'm';
+ else if (child->options & PROC_O_TYPE_PROG)
+ type = 'e';
+ else if (child->options &= PROC_O_TYPE_WORKER)
+ type = 'w';
+
+ if (child->reloads < minreloads)
+ minreloads = child->reloads;
+
+ if (child->pid > -1)
+ memprintf(&msg, "%s|type=%c;fd=%d;cfd=%d;pid=%d;reloads=%d;failedreloads=%d;timestamp=%d;id=%s;version=%s", msg ? msg : "", type, child->ipc_fd[0], child->ipc_fd[1], child->pid, child->reloads, child->failedreloads, child->timestamp, child->id ? child->id : "", child->version);
+ }
+ if (msg)
+ setenv("HAPROXY_PROCESSES", msg, 1);
+
+ list_for_each_entry(child, &proc_list, list) {
+ if (child->reloads > minreloads && !(child->options & PROC_O_TYPE_MASTER)) {
+ child->options |= PROC_O_LEAVING;
+ }
+ }
+
+
+}
+
+struct mworker_proc *mworker_proc_new()
+{
+ struct mworker_proc *child;
+
+ child = calloc(1, sizeof(*child));
+ if (!child)
+ return NULL;
+
+ child->failedreloads = 0;
+ child->reloads = 0;
+ child->pid = -1;
+ child->ipc_fd[0] = -1;
+ child->ipc_fd[1] = -1;
+ child->timestamp = -1;
+
+ return child;
+}
+
+
+/*
+ * unserialize the proc list from the environment
+ * Return < 0 upon error.
+ */
+int mworker_env_to_proc_list()
+{
+ char *env, *msg, *omsg = NULL, *token = NULL, *s1;
+ struct mworker_proc *child;
+ int minreloads = INT_MAX; /* minimum number of reloads to chose which processes are "current" ones */
+ int err = 0;
+
+ env = getenv("HAPROXY_PROCESSES");
+ if (!env)
+ goto no_env;
+
+ omsg = msg = strdup(env);
+ if (!msg) {
+ ha_alert("Out of memory while trying to allocate a worker process structure.");
+ err = -1;
+ goto out;
+ }
+
+ while ((token = strtok_r(msg, "|", &s1))) {
+ char *subtoken = NULL;
+ char *s2;
+
+ msg = NULL;
+
+ child = mworker_proc_new();
+ if (!child) {
+ ha_alert("out of memory while trying to allocate a worker process structure.");
+ err = -1;
+ goto out;
+ }
+
+ while ((subtoken = strtok_r(token, ";", &s2))) {
+
+ token = NULL;
+
+ if (strncmp(subtoken, "type=", 5) == 0) {
+ char type;
+
+ type = *(subtoken+5);
+ if (type == 'm') { /* we are in the master, assign it */
+ proc_self = child;
+ child->options |= PROC_O_TYPE_MASTER;
+ } else if (type == 'e') {
+ child->options |= PROC_O_TYPE_PROG;
+ } else if (type == 'w') {
+ child->options |= PROC_O_TYPE_WORKER;
+ }
+
+ } else if (strncmp(subtoken, "fd=", 3) == 0) {
+ child->ipc_fd[0] = atoi(subtoken+3);
+ if (child->ipc_fd[0] > -1)
+ global.maxsock++;
+ } else if (strncmp(subtoken, "cfd=", 4) == 0) {
+ child->ipc_fd[1] = atoi(subtoken+4);
+ if (child->ipc_fd[1] > -1)
+ global.maxsock++;
+ } else if (strncmp(subtoken, "pid=", 4) == 0) {
+ child->pid = atoi(subtoken+4);
+ } else if (strncmp(subtoken, "reloads=", 8) == 0) {
+ /* we only increment the number of asked reload */
+ child->reloads = atoi(subtoken+8);
+
+ if (child->reloads < minreloads)
+ minreloads = child->reloads;
+ } else if (strncmp(subtoken, "failedreloads=", 14) == 0) {
+ child->failedreloads = atoi(subtoken+14);
+ } else if (strncmp(subtoken, "timestamp=", 10) == 0) {
+ child->timestamp = atoi(subtoken+10);
+ } else if (strncmp(subtoken, "id=", 3) == 0) {
+ child->id = strdup(subtoken+3);
+ } else if (strncmp(subtoken, "version=", 8) == 0) {
+ child->version = strdup(subtoken+8);
+ }
+ }
+ if (child->pid) {
+ LIST_APPEND(&proc_list, &child->list);
+ } else {
+ mworker_free_child(child);
+ }
+ }
+
+ /* set the leaving processes once we know which number of reloads are the current processes */
+
+ list_for_each_entry(child, &proc_list, list) {
+ if (child->reloads > minreloads)
+ child->options |= PROC_O_LEAVING;
+ }
+
+ unsetenv("HAPROXY_PROCESSES");
+
+no_env:
+
+ if (!proc_self) {
+
+ proc_self = mworker_proc_new();
+ if (!proc_self) {
+ ha_alert("Cannot allocate process structures.\n");
+ err = -1;
+ goto out;
+ }
+ proc_self->options |= PROC_O_TYPE_MASTER;
+ proc_self->pid = pid;
+ proc_self->timestamp = 0; /* we don't know the startime anymore */
+
+ LIST_APPEND(&proc_list, &proc_self->list);
+ ha_warning("The master internals are corrupted or it was started with a too old version (< 1.9). Please restart the master process.\n");
+ }
+
+out:
+ free(omsg);
+ return err;
+}
+
+/* Signal blocking and unblocking */
+
+void mworker_block_signals()
+{
+ sigset_t set;
+
+ sigemptyset(&set);
+ sigaddset(&set, SIGUSR1);
+ sigaddset(&set, SIGUSR2);
+ sigaddset(&set, SIGTTIN);
+ sigaddset(&set, SIGTTOU);
+ sigaddset(&set, SIGHUP);
+ sigaddset(&set, SIGCHLD);
+ ha_sigmask(SIG_SETMASK, &set, NULL);
+}
+
+void mworker_unblock_signals()
+{
+ haproxy_unblock_signals();
+}
+
+/* ----- mworker signal handlers ----- */
+
+/* broadcast the configured signal to the workers */
+void mworker_broadcast_signal(struct sig_handler *sh)
+{
+ mworker_kill(sh->arg);
+}
+
+/*
+ * When called, this function reexec haproxy with -sf followed by current
+ * children PIDs and possibly old children PIDs if they didn't leave yet.
+ */
+void mworker_catch_sighup(struct sig_handler *sh)
+{
+ mworker_reload(0);
+}
+
+void mworker_catch_sigterm(struct sig_handler *sh)
+{
+ int sig = sh->arg;
+
+#if defined(USE_SYSTEMD)
+ if (global.tune.options & GTUNE_USE_SYSTEMD) {
+ sd_notify(0, "STOPPING=1");
+ }
+#endif
+ ha_warning("Exiting Master process...\n");
+ mworker_kill(sig);
+}
+
+/*
+ * Wait for every children to exit
+ */
+
+void mworker_catch_sigchld(struct sig_handler *sh)
+{
+ int exitpid = -1;
+ int status = 0;
+ int childfound;
+
+restart_wait:
+
+ childfound = 0;
+
+ exitpid = waitpid(-1, &status, WNOHANG);
+ if (exitpid > 0) {
+ struct mworker_proc *child, *it;
+
+ if (WIFEXITED(status))
+ status = WEXITSTATUS(status);
+ else if (WIFSIGNALED(status))
+ status = 128 + WTERMSIG(status);
+ else if (WIFSTOPPED(status))
+ status = 128 + WSTOPSIG(status);
+ else
+ status = 255;
+
+ /* delete the child from the process list */
+ list_for_each_entry_safe(child, it, &proc_list, list) {
+ if (child->pid != exitpid)
+ continue;
+
+ LIST_DELETE(&child->list);
+ close(child->ipc_fd[0]);
+ childfound = 1;
+ break;
+ }
+
+ if (!childfound) {
+ /* We didn't find the PID in the list, that shouldn't happen but we can emit a warning */
+ ha_warning("Process %d exited with code %d (%s)\n", exitpid, status, (status >= 128) ? strsignal(status - 128) : "Exit");
+ } else {
+ /* check if exited child is a current child */
+ if (!(child->options & PROC_O_LEAVING)) {
+ if (child->options & PROC_O_TYPE_WORKER) {
+ if (status < 128)
+ ha_warning("Current worker (%d) exited with code %d (%s)\n", exitpid, status, "Exit");
+ else
+ ha_alert("Current worker (%d) exited with code %d (%s)\n", exitpid, status, strsignal(status - 128));
+ }
+ else if (child->options & PROC_O_TYPE_PROG)
+ ha_alert("Current program '%s' (%d) exited with code %d (%s)\n", child->id, exitpid, status, (status >= 128) ? strsignal(status - 128) : "Exit");
+
+ if (status != 0 && status != 130 && status != 143) {
+ if (child->options & PROC_O_TYPE_WORKER) {
+ ha_warning("A worker process unexpectedly died and this can only be explained by a bug in haproxy or its dependencies.\nPlease check that you are running an up to date and maintained version of haproxy and open a bug report.\n");
+ display_version();
+ }
+ if (!(global.tune.options & GTUNE_NOEXIT_ONFAILURE)) {
+ ha_alert("exit-on-failure: killing every processes with SIGTERM\n");
+ mworker_kill(SIGTERM);
+ }
+ }
+ /* 0 & SIGTERM (143) are normal, but we should report SIGINT (130) and other signals */
+ if (exitcode < 0 && status != 0 && status != 143)
+ exitcode = status;
+ } else {
+ if (child->options & PROC_O_TYPE_WORKER) {
+ ha_warning("Former worker (%d) exited with code %d (%s)\n", exitpid, status, (status >= 128) ? strsignal(status - 128) : "Exit");
+ delete_oldpid(exitpid);
+ } else if (child->options & PROC_O_TYPE_PROG) {
+ ha_warning("Former program '%s' (%d) exited with code %d (%s)\n", child->id, exitpid, status, (status >= 128) ? strsignal(status - 128) : "Exit");
+ }
+ }
+ mworker_free_child(child);
+ child = NULL;
+ }
+
+ /* do it again to check if it was the last worker */
+ goto restart_wait;
+ }
+ /* Better rely on the system than on a list of process to check if it was the last one */
+ else if (exitpid == -1 && errno == ECHILD) {
+ ha_warning("All workers exited. Exiting... (%d)\n", (exitcode > 0) ? exitcode : EXIT_SUCCESS);
+ atexit_flag = 0;
+ if (exitcode > 0)
+ exit(exitcode); /* parent must leave using the status code that provoked the exit */
+ exit(EXIT_SUCCESS);
+ }
+
+}
+
+/* ----- IPC FD (sockpair) related ----- */
+
+/* This wrapper is called from the workers. It is registered instead of the
+ * normal listener_accept() so the worker can exit() when it detects that the
+ * master closed the IPC FD. If it's not a close, we just call the regular
+ * listener_accept() function.
+ */
+void mworker_accept_wrapper(int fd)
+{
+ char c;
+ int ret;
+
+ while (1) {
+ ret = recv(fd, &c, 1, MSG_PEEK);
+ if (ret == -1) {
+ if (errno == EINTR)
+ continue;
+ if (errno == EAGAIN || errno == EWOULDBLOCK) {
+ fd_cant_recv(fd);
+ return;
+ }
+ break;
+ } else if (ret > 0) {
+ struct listener *l = fdtab[fd].owner;
+
+ if (l)
+ listener_accept(l);
+ return;
+ } else if (ret == 0) {
+ /* At this step the master is down before
+ * this worker perform a 'normal' exit.
+ * So we want to exit with an error but
+ * other threads could currently process
+ * some stuff so we can't perform a clean
+ * deinit().
+ */
+ exit(EXIT_FAILURE);
+ }
+ }
+ return;
+}
+
+/*
+ * This function registers the accept wrapper for the sockpair of the master
+ * worker. It's only handled by worker thread #0. Other threads and master do
+ * nothing here. It always returns 1 (success).
+ */
+static int mworker_sockpair_register_per_thread()
+{
+ if (!(global.mode & MODE_MWORKER) || master)
+ return 1;
+
+ if (tid != 0)
+ return 1;
+
+ if (proc_self->ipc_fd[1] < 0) /* proc_self was incomplete and we can't find the socketpair */
+ return 1;
+
+ fd_set_nonblock(proc_self->ipc_fd[1]);
+ /* register the wrapper to handle read 0 when the master exits */
+ fdtab[proc_self->ipc_fd[1]].iocb = mworker_accept_wrapper;
+ fd_want_recv(proc_self->ipc_fd[1]);
+ return 1;
+}
+
+REGISTER_PER_THREAD_INIT(mworker_sockpair_register_per_thread);
+
+/* ----- proxies ----- */
+/*
+ * Upon a reload, the master worker needs to close all listeners FDs but the mworker_pipe
+ * fd, and the FD provided by fd@
+ */
+void mworker_cleanlisteners()
+{
+ struct listener *l, *l_next;
+ struct proxy *curproxy;
+ struct peers *curpeers;
+
+ /* peers proxies cleanup */
+ for (curpeers = cfg_peers; curpeers; curpeers = curpeers->next) {
+ if (!curpeers->peers_fe)
+ continue;
+
+ stop_proxy(curpeers->peers_fe);
+ /* disable this peer section so that it kills itself */
+ if (curpeers->sighandler)
+ signal_unregister_handler(curpeers->sighandler);
+ task_destroy(curpeers->sync_task);
+ curpeers->sync_task = NULL;
+ curpeers->peers_fe = NULL;
+ }
+
+ /* main proxies cleanup */
+ for (curproxy = proxies_list; curproxy; curproxy = curproxy->next) {
+ int listen_in_master = 0;
+
+ list_for_each_entry_safe(l, l_next, &curproxy->conf.listeners, by_fe) {
+ /* remove the listener, but not those we need in the master... */
+ if (!(l->rx.flags & RX_F_MWORKER)) {
+ unbind_listener(l);
+ delete_listener(l);
+ } else {
+ listen_in_master = 1;
+ }
+ }
+ /* if the proxy shouldn't be in the master, we stop it */
+ if (!listen_in_master)
+ curproxy->flags |= PR_FL_DISABLED;
+ }
+}
+
+/* Upon a configuration loading error some mworker_proc and FDs/server were
+ * assigned but the worker was never forked, we must close the FDs and
+ * remove the server
+ */
+void mworker_cleanup_proc()
+{
+ struct mworker_proc *child, *it;
+
+ list_for_each_entry_safe(child, it, &proc_list, list) {
+
+ if (child->pid == -1) {
+ /* Close the socketpairs. */
+ if (child->ipc_fd[0] > -1)
+ close(child->ipc_fd[0]);
+ if (child->ipc_fd[1] > -1)
+ close(child->ipc_fd[1]);
+ if (child->srv) {
+ /* only exists if we created a master CLI listener */
+ srv_drop(child->srv);
+ }
+ LIST_DELETE(&child->list);
+ mworker_free_child(child);
+ }
+ }
+}
+
+
+/* Displays workers and processes */
+static int cli_io_handler_show_proc(struct appctx *appctx)
+{
+ struct stconn *sc = appctx_sc(appctx);
+ struct mworker_proc *child;
+ int old = 0;
+ int up = date.tv_sec - proc_self->timestamp;
+ char *uptime = NULL;
+ char *reloadtxt = NULL;
+
+ /* FIXME: Don't watch the other side !*/
+ if (unlikely(sc_opposite(sc)->flags & SC_FL_SHUT_DONE))
+ return 1;
+
+ if (up < 0) /* must never be negative because of clock drift */
+ up = 0;
+
+ chunk_reset(&trash);
+
+ memprintf(&reloadtxt, "%d [failed: %d]", proc_self->reloads, proc_self->failedreloads);
+ chunk_printf(&trash, "#%-14s %-15s %-15s %-15s %-15s\n", "<PID>", "<type>", "<reloads>", "<uptime>", "<version>");
+ memprintf(&uptime, "%dd%02dh%02dm%02ds", up / 86400, (up % 86400) / 3600, (up % 3600) / 60, (up % 60));
+ chunk_appendf(&trash, "%-15u %-15s %-15s %-15s %-15s\n", (unsigned int)getpid(), "master", reloadtxt, uptime, haproxy_version);
+ ha_free(&reloadtxt);
+ ha_free(&uptime);
+
+ /* displays current processes */
+
+ chunk_appendf(&trash, "# workers\n");
+ list_for_each_entry(child, &proc_list, list) {
+ up = date.tv_sec - child->timestamp;
+ if (up < 0) /* must never be negative because of clock drift */
+ up = 0;
+
+ if (!(child->options & PROC_O_TYPE_WORKER))
+ continue;
+
+ if (child->options & PROC_O_LEAVING) {
+ old++;
+ continue;
+ }
+ memprintf(&uptime, "%dd%02dh%02dm%02ds", up / 86400, (up % 86400) / 3600, (up % 3600) / 60, (up % 60));
+ chunk_appendf(&trash, "%-15u %-15s %-15d %-15s %-15s\n", child->pid, "worker", child->reloads, uptime, child->version);
+ ha_free(&uptime);
+ }
+
+ /* displays old processes */
+
+ if (old) {
+ char *msg = NULL;
+
+ chunk_appendf(&trash, "# old workers\n");
+ list_for_each_entry(child, &proc_list, list) {
+ up = date.tv_sec - child->timestamp;
+ if (up <= 0) /* must never be negative because of clock drift */
+ up = 0;
+
+ if (!(child->options & PROC_O_TYPE_WORKER))
+ continue;
+
+ if (child->options & PROC_O_LEAVING) {
+ memprintf(&uptime, "%dd%02dh%02dm%02ds", up / 86400, (up % 86400) / 3600, (up % 3600) / 60, (up % 60));
+ chunk_appendf(&trash, "%-15u %-15s %-15d %-15s %-15s\n", child->pid, "worker", child->reloads, uptime, child->version);
+ ha_free(&uptime);
+ }
+ }
+ free(msg);
+ }
+
+ /* displays external process */
+ chunk_appendf(&trash, "# programs\n");
+ old = 0;
+ list_for_each_entry(child, &proc_list, list) {
+ up = date.tv_sec - child->timestamp;
+ if (up < 0) /* must never be negative because of clock drift */
+ up = 0;
+
+ if (!(child->options & PROC_O_TYPE_PROG))
+ continue;
+
+ if (child->options & PROC_O_LEAVING) {
+ old++;
+ continue;
+ }
+ memprintf(&uptime, "%dd%02dh%02dm%02ds", up / 86400, (up % 86400) / 3600, (up % 3600) / 60, (up % 60));
+ chunk_appendf(&trash, "%-15u %-15s %-15d %-15s %-15s\n", child->pid, child->id, child->reloads, uptime, "-");
+ ha_free(&uptime);
+ }
+
+ if (old) {
+ chunk_appendf(&trash, "# old programs\n");
+ list_for_each_entry(child, &proc_list, list) {
+ up = date.tv_sec - child->timestamp;
+ if (up < 0) /* must never be negative because of clock drift */
+ up = 0;
+
+ if (!(child->options & PROC_O_TYPE_PROG))
+ continue;
+
+ if (child->options & PROC_O_LEAVING) {
+ memprintf(&uptime, "%dd%02dh%02dm%02ds", up / 86400, (up % 86400) / 3600, (up % 3600) / 60, (up % 60));
+ chunk_appendf(&trash, "%-15u %-15s %-15d %-15s %-15s\n", child->pid, child->id, child->reloads, uptime, "-");
+ ha_free(&uptime);
+ }
+ }
+ }
+
+
+
+ if (applet_putchk(appctx, &trash) == -1)
+ return 0;
+
+ /* dump complete */
+ return 1;
+}
+
+/* reload the master process */
+static int cli_parse_reload(char **args, char *payload, struct appctx *appctx, void *private)
+{
+ struct stconn *scb = NULL;
+ struct stream *strm = NULL;
+ struct connection *conn = NULL;
+ int fd = -1;
+ int hardreload = 0;
+
+ if (!cli_has_level(appctx, ACCESS_LVL_OPER))
+ return 1;
+
+ /* hard reload requested */
+ if (*args[0] == 'h')
+ hardreload = 1;
+
+ /* This ask for a synchronous reload, which means we will keep this FD
+ instead of closing it. */
+
+ scb = appctx_sc(appctx);
+ if (scb)
+ strm = sc_strm(scb);
+ if (strm && strm->scf)
+ conn = sc_conn(strm->scf);
+ if (conn)
+ fd = conn_fd(conn);
+
+ /* Send the FD of the current session to the "cli_reload" FD, which won't be polled */
+ if (fd != -1 && send_fd_uxst(proc_self->ipc_fd[0], fd) == 0) {
+ fd_delete(fd); /* avoid the leak of the FD after sending it via the socketpair */
+ }
+ mworker_reload(hardreload);
+
+ return 1;
+}
+
+/* Displays if the current reload failed or succeed.
+ * If the startup-logs is available, dump it. */
+static int cli_io_handler_show_loadstatus(struct appctx *appctx)
+{
+ char *env;
+ struct stconn *sc = appctx_sc(appctx);
+
+ if (!cli_has_level(appctx, ACCESS_LVL_OPER))
+ return 1;
+
+ /* FIXME: Don't watch the other side !*/
+ if (unlikely(sc_opposite(sc)->flags & SC_FL_SHUT_DONE))
+ return 1;
+
+ env = getenv("HAPROXY_LOAD_SUCCESS");
+ if (!env)
+ return 1;
+
+ if (strcmp(env, "0") == 0) {
+ chunk_printf(&trash, "Success=0\n");
+ } else if (strcmp(env, "1") == 0) {
+ chunk_printf(&trash, "Success=1\n");
+ }
+#ifdef USE_SHM_OPEN
+ if (startup_logs && b_data(&startup_logs->buf) > 1)
+ chunk_appendf(&trash, "--\n");
+
+ if (applet_putchk(appctx, &trash) == -1)
+ return 0;
+
+ if (startup_logs) {
+ appctx->io_handler = NULL;
+ ring_attach_cli(startup_logs, appctx, 0);
+ return 0;
+ }
+#else
+ if (applet_putchk(appctx, &trash) == -1)
+ return 0;
+#endif
+ return 1;
+}
+
+static int mworker_parse_global_max_reloads(char **args, int section_type, struct proxy *curpx,
+ const struct proxy *defpx, const char *file, int linenum, char **err)
+{
+
+ int err_code = 0;
+
+ if (alertif_too_many_args(1, file, linenum, args, &err_code))
+ goto out;
+
+ if (*(args[1]) == 0) {
+ memprintf(err, "%sparsing [%s:%d] : '%s' expects an integer argument.\n", *err, file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+
+ max_reloads = atol(args[1]);
+ if (max_reloads < 0) {
+ memprintf(err, "%sparsing [%s:%d] '%s' : invalid value %d, must be >= 0", *err, file, linenum, args[0], max_reloads);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+
+out:
+ return err_code;
+}
+
+void mworker_free_child(struct mworker_proc *child)
+{
+ int i;
+
+ if (child == NULL)
+ return;
+
+ for (i = 0; child->command && child->command[i]; i++)
+ ha_free(&child->command[i]);
+
+ ha_free(&child->command);
+ ha_free(&child->id);
+ ha_free(&child->version);
+ free(child);
+}
+
+static struct cfg_kw_list mworker_kws = {{ }, {
+ { CFG_GLOBAL, "mworker-max-reloads", mworker_parse_global_max_reloads },
+ { 0, NULL, NULL },
+}};
+
+INITCALL1(STG_REGISTER, cfg_register_keywords, &mworker_kws);
+
+
+/* register cli keywords */
+static struct cli_kw_list cli_kws = {{ },{
+ { { "@<relative pid>", NULL }, "@<relative pid> : send a command to the <relative pid> process", NULL, cli_io_handler_show_proc, NULL, NULL, ACCESS_MASTER_ONLY},
+ { { "@!<pid>", NULL }, "@!<pid> : send a command to the <pid> process", cli_parse_default, NULL, NULL, NULL, ACCESS_MASTER_ONLY},
+ { { "@master", NULL }, "@master : send a command to the master process", cli_parse_default, NULL, NULL, NULL, ACCESS_MASTER_ONLY},
+ { { "show", "proc", NULL }, "show proc : show processes status", cli_parse_default, cli_io_handler_show_proc, NULL, NULL, ACCESS_MASTER_ONLY},
+ { { "reload", NULL }, "reload : achieve a soft-reload (-sf) of haproxy", cli_parse_reload, NULL, NULL, NULL, ACCESS_MASTER_ONLY},
+ { { "hard-reload", NULL }, "hard-reload : achieve a hard-reload (-st) of haproxy", cli_parse_reload, NULL, NULL, NULL, ACCESS_MASTER_ONLY},
+ { { "_loadstatus", NULL }, NULL, cli_parse_default, cli_io_handler_show_loadstatus, NULL, NULL, ACCESS_MASTER_ONLY},
+ {{},}
+}};
+
+INITCALL1(STG_REGISTER, cli_register_kw, &cli_kws);
diff --git a/src/namespace.c b/src/namespace.c
new file mode 100644
index 0000000..9cc85a3
--- /dev/null
+++ b/src/namespace.c
@@ -0,0 +1,132 @@
+#define _GNU_SOURCE
+
+#include <sched.h>
+#include <stdio.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <sys/types.h>
+#include <unistd.h>
+#include <sys/socket.h>
+
+#include <string.h>
+
+#include <haproxy/api.h>
+#include <haproxy/chunk.h>
+#include <haproxy/errors.h>
+#include <haproxy/global.h>
+#include <haproxy/hash.h>
+#include <haproxy/namespace.h>
+#include <haproxy/signal.h>
+
+/* Opens the namespace <ns_name> and returns the FD or -1 in case of error
+ * (check errno).
+ */
+static int open_named_namespace(const char *ns_name)
+{
+ if (chunk_printf(&trash, "/var/run/netns/%s", ns_name) < 0)
+ return -1;
+ return open(trash.area, O_RDONLY | O_CLOEXEC);
+}
+
+static int default_namespace = -1;
+
+static int init_default_namespace()
+{
+ if (chunk_printf(&trash, "/proc/%d/ns/net", getpid()) < 0)
+ return -1;
+ default_namespace = open(trash.area, O_RDONLY | O_CLOEXEC);
+ return default_namespace;
+}
+
+static struct eb_root namespace_tree_root = EB_ROOT;
+
+static void netns_sig_stop(struct sig_handler *sh)
+{
+ struct ebpt_node *node, *next;
+ struct netns_entry *entry;
+
+ /* close namespace file descriptors and remove registered namespaces from the
+ * tree when stopping */
+ node = ebpt_first(&namespace_tree_root);
+ while (node) {
+ next = ebpt_next(node);
+ ebpt_delete(node);
+ entry = container_of(node, struct netns_entry, node);
+ free(entry->node.key);
+ close(entry->fd);
+ free(entry);
+ node = next;
+ }
+}
+
+int netns_init(void)
+{
+ int err_code = 0;
+
+ /* if no namespaces have been defined in the config then
+ * there is no point in trying to initialize anything:
+ * my_socketat() will never be called with a valid namespace
+ * structure and thus switching back to the default namespace
+ * is not needed either */
+ if (!eb_is_empty(&namespace_tree_root)) {
+ if (init_default_namespace() < 0) {
+ ha_alert("Failed to open the default namespace.\n");
+ err_code |= ERR_ALERT | ERR_FATAL;
+ }
+ }
+
+ signal_register_fct(0, netns_sig_stop, 0);
+
+ return err_code;
+}
+
+struct netns_entry* netns_store_insert(const char *ns_name)
+{
+ struct netns_entry *entry = NULL;
+ int fd = open_named_namespace(ns_name);
+ if (fd == -1)
+ goto out;
+
+ entry = calloc(1, sizeof(*entry));
+ if (!entry)
+ goto out;
+ entry->fd = fd;
+ entry->node.key = strdup(ns_name);
+ entry->name_len = strlen(ns_name);
+ ebis_insert(&namespace_tree_root, &entry->node);
+out:
+ return entry;
+}
+
+const struct netns_entry* netns_store_lookup(const char *ns_name, size_t ns_name_len)
+{
+ struct ebpt_node *node;
+
+ node = ebis_lookup_len(&namespace_tree_root, ns_name, ns_name_len);
+ if (node)
+ return ebpt_entry(node, struct netns_entry, node);
+ else
+ return NULL;
+}
+
+/* Opens a socket in the namespace described by <ns> with the parameters <domain>,
+ * <type> and <protocol> and returns the FD or -1 in case of error (check errno).
+ */
+int my_socketat(const struct netns_entry *ns, int domain, int type, int protocol)
+{
+ int sock;
+
+ if (default_namespace >= 0 && ns && setns(ns->fd, CLONE_NEWNET) == -1)
+ return -1;
+
+ sock = socket(domain, type, protocol);
+
+ if (default_namespace >= 0 && ns && setns(default_namespace, CLONE_NEWNET) == -1) {
+ if (sock >= 0)
+ close(sock);
+ return -1;
+ }
+ return sock;
+}
+
+REGISTER_BUILD_OPTS("Built with network namespace support.");
diff --git a/src/ncbuf.c b/src/ncbuf.c
new file mode 100644
index 0000000..e1452f1
--- /dev/null
+++ b/src/ncbuf.c
@@ -0,0 +1,986 @@
+#include <haproxy/ncbuf.h>
+
+#include <string.h>
+
+#ifndef MIN
+#define MIN(a, b) (((a) < (b)) ? (a) : (b))
+#endif
+
+#ifdef STANDALONE
+#include <stdarg.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <unistd.h>
+
+#include <haproxy/list.h>
+#endif /* STANDALONE */
+
+#ifdef DEBUG_STRICT
+# include <haproxy/bug.h>
+#else
+# include <stdio.h>
+# include <stdlib.h>
+
+# undef BUG_ON
+# define BUG_ON(x) if (x) { fprintf(stderr, "CRASH ON %s:%d\n", __func__, __LINE__); abort(); }
+
+# undef BUG_ON_HOT
+# define BUG_ON_HOT(x) if (x) { fprintf(stderr, "CRASH ON %s:%d\n", __func__, __LINE__); abort(); }
+#endif /* DEBUG_DEV */
+
+#include <haproxy/compiler.h>
+
+/* ******** internal API ******** */
+
+#define NCB_BLK_NULL ((struct ncb_blk){ .st = NULL })
+
+#define NCB_BK_F_GAP 0x01 /* block represents a gap */
+#define NCB_BK_F_FIN 0x02 /* special reduced gap present at the end of the buffer */
+struct ncb_blk {
+ char *st; /* first byte of the block */
+ char *end; /* first byte after this block */
+
+ char *sz_ptr; /* pointer to size element - NULL for reduced gap */
+ ncb_sz_t sz; /* size of the block */
+ ncb_sz_t sz_data; /* size of the data following the block - invalid for reduced GAP */
+ ncb_sz_t off; /* offset of block in buffer */
+
+ char flag;
+};
+
+/* Return pointer to <off> relative to <buf> head. Support buffer wrapping. */
+static char *ncb_peek(const struct ncbuf *buf, ncb_sz_t off)
+{
+ char *ptr = ncb_head(buf) + off;
+ if (ptr >= buf->area + buf->size)
+ ptr -= buf->size;
+ return ptr;
+}
+
+/* Returns the reserved space of <buf> which contains the size of the first
+ * data block.
+ */
+static char *ncb_reserved(const struct ncbuf *buf)
+{
+ return ncb_peek(buf, buf->size - NCB_RESERVED_SZ);
+}
+
+/* Encode <off> at <st> position in <buf>. Support wrapping. */
+static forceinline void ncb_write_off(const struct ncbuf *buf, char *st, ncb_sz_t off)
+{
+ int i;
+
+ BUG_ON_HOT(st >= buf->area + buf->size);
+
+ for (i = 0; i < sizeof(ncb_sz_t); ++i) {
+ (*st) = off >> (8 * i) & 0xff;
+
+ if ((++st) == ncb_wrap(buf))
+ st = ncb_orig(buf);
+ }
+}
+
+/* Decode offset stored at <st> position in <buf>. Support wrapping. */
+static forceinline ncb_sz_t ncb_read_off(const struct ncbuf *buf, char *st)
+{
+ int i;
+ ncb_sz_t off = 0;
+
+ BUG_ON_HOT(st >= buf->area + buf->size);
+
+ for (i = 0; i < sizeof(ncb_sz_t); ++i) {
+ off |= (unsigned char )(*st) << (8 * i);
+
+ if ((++st) == ncb_wrap(buf))
+ st = ncb_orig(buf);
+ }
+
+ return off;
+}
+
+/* Add <off> to the offset stored at <st> in <buf>. Support wrapping. */
+static forceinline void ncb_inc_off(const struct ncbuf *buf, char *st, ncb_sz_t off)
+{
+ const ncb_sz_t old = ncb_read_off(buf, st);
+ ncb_write_off(buf, st, old + off);
+}
+
+/* Returns true if a gap cannot be inserted at <off> : a reduced gap must be used. */
+static forceinline int ncb_off_reduced(const struct ncbuf *b, ncb_sz_t off)
+{
+ return off + NCB_GAP_MIN_SZ > ncb_size(b);
+}
+
+/* Returns true if <blk> is the special NULL block. */
+static forceinline int ncb_blk_is_null(const struct ncb_blk *blk)
+{
+ return !blk->st;
+}
+
+/* Returns true if <blk> is the last block of <buf>. */
+static forceinline int ncb_blk_is_last(const struct ncbuf *buf, const struct ncb_blk *blk)
+{
+ BUG_ON_HOT(blk->off + blk->sz > ncb_size(buf));
+ return blk->off + blk->sz == ncb_size(buf);
+}
+
+/* Returns the first block of <buf> which is always a DATA. */
+static struct ncb_blk ncb_blk_first(const struct ncbuf *buf)
+{
+ struct ncb_blk blk;
+
+ if (ncb_is_null(buf))
+ return NCB_BLK_NULL;
+
+ blk.st = ncb_head(buf);
+
+ blk.sz_ptr = ncb_reserved(buf);
+ blk.sz = ncb_read_off(buf, ncb_reserved(buf));
+ blk.sz_data = 0;
+ BUG_ON_HOT(blk.sz > ncb_size(buf));
+
+ blk.end = ncb_peek(buf, blk.sz);
+ blk.off = 0;
+ blk.flag = 0;
+
+ return blk;
+}
+
+/* Returns the block following <prev> in the buffer <buf>. */
+static struct ncb_blk ncb_blk_next(const struct ncbuf *buf,
+ const struct ncb_blk *prev)
+{
+ struct ncb_blk blk;
+
+ BUG_ON_HOT(ncb_blk_is_null(prev));
+
+ if (ncb_blk_is_last(buf, prev))
+ return NCB_BLK_NULL;
+
+ blk.st = prev->end;
+ blk.off = prev->off + prev->sz;
+ blk.flag = ~prev->flag & NCB_BK_F_GAP;
+
+ if (blk.flag & NCB_BK_F_GAP) {
+ if (ncb_off_reduced(buf, blk.off)) {
+ blk.flag |= NCB_BK_F_FIN;
+ blk.sz_ptr = NULL;
+ blk.sz = ncb_size(buf) - blk.off;
+ blk.sz_data = 0;
+
+ /* A reduced gap can only be the last block. */
+ BUG_ON_HOT(!ncb_blk_is_last(buf, &blk));
+ }
+ else {
+ blk.sz_ptr = ncb_peek(buf, blk.off + NCB_GAP_SZ_OFF);
+ blk.sz = ncb_read_off(buf, blk.sz_ptr);
+ blk.sz_data = ncb_read_off(buf, ncb_peek(buf, blk.off + NCB_GAP_SZ_DATA_OFF));
+ BUG_ON_HOT(blk.sz < NCB_GAP_MIN_SZ);
+ }
+ }
+ else {
+ blk.sz_ptr = ncb_peek(buf, prev->off + NCB_GAP_SZ_DATA_OFF);
+ blk.sz = prev->sz_data;
+ blk.sz_data = 0;
+
+ /* only first DATA block can be empty. If this happens, a GAP
+ * merge should have been realized.
+ */
+ BUG_ON_HOT(!blk.sz);
+ }
+
+ BUG_ON_HOT(blk.off + blk.sz > ncb_size(buf));
+ blk.end = ncb_peek(buf, blk.off + blk.sz);
+
+ return blk;
+}
+
+/* Returns the block containing offset <off>. Note that if <off> is at the
+ * frontier between two blocks, this function will return the preceding one.
+ * This is done to easily merge blocks on insertion/deletion.
+ */
+static struct ncb_blk ncb_blk_find(const struct ncbuf *buf, ncb_sz_t off)
+{
+ struct ncb_blk blk;
+
+ if (ncb_is_null(buf))
+ return NCB_BLK_NULL;
+
+ BUG_ON_HOT(off >= ncb_size(buf));
+
+ for (blk = ncb_blk_first(buf); off > blk.off + blk.sz;
+ blk = ncb_blk_next(buf, &blk)) {
+ }
+
+ return blk;
+}
+
+/* Transform absolute offset <off> to a relative one from <blk> start. */
+static forceinline ncb_sz_t ncb_blk_off(const struct ncb_blk *blk, ncb_sz_t off)
+{
+ BUG_ON_HOT(off < blk->off || off > blk->off + blk->sz);
+ BUG_ON_HOT(off - blk->off > blk->sz);
+ return off - blk->off;
+}
+
+/* Simulate insertion in <buf> of <data> of length <len> at offset <off>. This
+ * ensures that minimal block size are respected for newly formed gaps. <blk>
+ * must be the block where the insert operation begins. If <mode> is
+ * NCB_ADD_COMPARE, old and new overlapped data are compared to validate the
+ * insertion.
+ *
+ * Returns NCB_RET_OK if insertion can proceed.
+ */
+static enum ncb_ret ncb_check_insert(const struct ncbuf *buf,
+ const struct ncb_blk *blk, ncb_sz_t off,
+ const char *data, ncb_sz_t len,
+ enum ncb_add_mode mode)
+{
+ struct ncb_blk next;
+ ncb_sz_t off_blk = ncb_blk_off(blk, off);
+ ncb_sz_t to_copy;
+ ncb_sz_t left = len;
+
+ /* If insertion starts in a gap, it must leave enough space to keep the
+ * gap header.
+ */
+ if (left && (blk->flag & NCB_BK_F_GAP)) {
+ if (off_blk < NCB_GAP_MIN_SZ)
+ return NCB_RET_GAP_SIZE;
+ }
+
+ next = *blk;
+ while (left) {
+ off_blk = ncb_blk_off(&next, off);
+ to_copy = MIN(left, next.sz - off_blk);
+
+ if (next.flag & NCB_BK_F_GAP && off_blk + to_copy < next.sz) {
+ /* Insertion must leave enough space for a new gap
+ * header if stopped in a middle of a gap.
+ */
+ const ncb_sz_t gap_sz = next.sz - (off_blk + to_copy);
+ if (gap_sz < NCB_GAP_MIN_SZ && !ncb_blk_is_last(buf, &next))
+ return NCB_RET_GAP_SIZE;
+ }
+ else if (!(next.flag & NCB_BK_F_GAP) && mode == NCB_ADD_COMPARE) {
+ /* Compare memory of data block in NCB_ADD_COMPARE mode. */
+ const ncb_sz_t off_blk = ncb_blk_off(&next, off);
+ char *st = ncb_peek(buf, off);
+
+ to_copy = MIN(left, next.sz - off_blk);
+ if (st + to_copy > ncb_wrap(buf)) {
+ const ncb_sz_t sz1 = ncb_wrap(buf) - st;
+ if (memcmp(st, data, sz1))
+ return NCB_RET_DATA_REJ;
+ if (memcmp(ncb_orig(buf), data + sz1, to_copy - sz1))
+ return NCB_RET_DATA_REJ;
+ }
+ else {
+ if (memcmp(st, data, to_copy))
+ return NCB_RET_DATA_REJ;
+ }
+ }
+
+ left -= to_copy;
+ data += to_copy;
+ off += to_copy;
+
+ next = ncb_blk_next(buf, &next);
+ }
+
+ return NCB_RET_OK;
+}
+
+/* Fill new <data> of length <len> inside an already existing data <blk> at
+ * offset <off>. Offset is relative to <blk> so it cannot be greater than the
+ * block size. <mode> specifies if old data are preserved or overwritten.
+ */
+static ncb_sz_t ncb_fill_data_blk(const struct ncbuf *buf,
+ const struct ncb_blk *blk, ncb_sz_t off,
+ const char *data, ncb_sz_t len,
+ enum ncb_add_mode mode)
+{
+ const ncb_sz_t to_copy = MIN(len, blk->sz - off);
+ char *ptr = NULL;
+
+ BUG_ON_HOT(off > blk->sz);
+ /* This can happens due to previous ncb_blk_find() usage. In this
+ * case the current fill is a noop.
+ */
+ if (off == blk->sz)
+ return 0;
+
+ if (mode == NCB_ADD_OVERWRT) {
+ ptr = ncb_peek(buf, blk->off + off);
+
+ if (ptr + to_copy >= ncb_wrap(buf)) {
+ const ncb_sz_t sz1 = ncb_wrap(buf) - ptr;
+ memcpy(ptr, data, sz1);
+ memcpy(ncb_orig(buf), data + sz1, to_copy - sz1);
+ }
+ else {
+ memcpy(ptr, data, to_copy);
+ }
+ }
+
+ return to_copy;
+}
+
+/* Fill the gap <blk> starting at <off> with new <data> of length <len>. <off>
+ * is relative to <blk> so it cannot be greater than the block size.
+ */
+static ncb_sz_t ncb_fill_gap_blk(const struct ncbuf *buf,
+ const struct ncb_blk *blk, ncb_sz_t off,
+ const char *data, ncb_sz_t len)
+{
+ const ncb_sz_t to_copy = MIN(len, blk->sz - off);
+ char *ptr;
+
+ BUG_ON_HOT(off > blk->sz);
+ /* This can happens due to previous ncb_blk_find() usage. In this
+ * case the current fill is a noop.
+ */
+ if (off == blk->sz)
+ return 0;
+
+ /* A new gap must be created if insertion stopped before gap end. */
+ if (off + to_copy < blk->sz) {
+ const ncb_sz_t gap_off = blk->off + off + to_copy;
+ const ncb_sz_t gap_sz = blk->sz - off - to_copy;
+
+ BUG_ON_HOT(!ncb_off_reduced(buf, gap_off) &&
+ blk->off + blk->sz - gap_off < NCB_GAP_MIN_SZ);
+
+ /* write the new gap header unless this is a reduced gap. */
+ if (!ncb_off_reduced(buf, gap_off)) {
+ char *gap_ptr = ncb_peek(buf, gap_off + NCB_GAP_SZ_OFF);
+ char *gap_data_ptr = ncb_peek(buf, gap_off + NCB_GAP_SZ_DATA_OFF);
+
+ ncb_write_off(buf, gap_ptr, gap_sz);
+ ncb_write_off(buf, gap_data_ptr, blk->sz_data);
+ }
+ }
+
+ /* fill the gap with new data */
+ ptr = ncb_peek(buf, blk->off + off);
+ if (ptr + to_copy >= ncb_wrap(buf)) {
+ ncb_sz_t sz1 = ncb_wrap(buf) - ptr;
+ memcpy(ptr, data, sz1);
+ memcpy(ncb_orig(buf), data + sz1, to_copy - sz1);
+ }
+ else {
+ memcpy(ptr, data, to_copy);
+ }
+
+ return to_copy;
+}
+
+/* ******** public API ******** */
+
+/* Initialize or reset <buf> by clearing all data. Its size is untouched.
+ * Buffer is positioned to <head> offset. Use 0 to realign it. <buf> must not
+ * be NCBUF_NULL.
+ */
+void ncb_init(struct ncbuf *buf, ncb_sz_t head)
+{
+ BUG_ON_HOT(ncb_is_null(buf));
+
+ BUG_ON_HOT(head >= buf->size);
+ buf->head = head;
+
+ ncb_write_off(buf, ncb_reserved(buf), 0);
+ ncb_write_off(buf, ncb_head(buf), ncb_size(buf));
+ ncb_write_off(buf, ncb_peek(buf, sizeof(ncb_sz_t)), 0);
+}
+
+/* Construct a ncbuf with all its parameters. */
+struct ncbuf ncb_make(char *area, ncb_sz_t size, ncb_sz_t head)
+{
+ struct ncbuf buf;
+
+ /* Ensure that there is enough space for the reserved space and data.
+ * This is the minimal value to not crash later.
+ */
+ BUG_ON_HOT(size <= NCB_RESERVED_SZ);
+
+ buf.area = area;
+ buf.size = size;
+ buf.head = head;
+
+ return buf;
+}
+
+/* Returns the total number of bytes stored in whole <buf>. */
+ncb_sz_t ncb_total_data(const struct ncbuf *buf)
+{
+ struct ncb_blk blk;
+ int total = 0;
+
+ for (blk = ncb_blk_first(buf); !ncb_blk_is_null(&blk); blk = ncb_blk_next(buf, &blk)) {
+ if (!(blk.flag & NCB_BK_F_GAP))
+ total += blk.sz;
+ }
+
+ return total;
+}
+
+/* Returns true if there is no data anywhere in <buf>. */
+int ncb_is_empty(const struct ncbuf *buf)
+{
+ int first_data, first_gap;
+
+ if (ncb_is_null(buf))
+ return 1;
+
+ first_data = ncb_read_off(buf, ncb_reserved(buf));
+ BUG_ON_HOT(first_data > ncb_size(buf));
+ /* Buffer is not empty if first data block is not nul. */
+ if (first_data)
+ return 0;
+
+ /* Head contains the first gap size if first data block is empty. */
+ first_gap = ncb_read_off(buf, ncb_head(buf));
+ BUG_ON_HOT(first_gap > ncb_size(buf));
+ return first_gap == ncb_size(buf);
+}
+
+/* Returns true if no more data can be inserted in <buf>. */
+int ncb_is_full(const struct ncbuf *buf)
+{
+ int first_data;
+
+ if (ncb_is_null(buf))
+ return 0;
+
+ /* First data block must cover whole buffer if full. */
+ first_data = ncb_read_off(buf, ncb_reserved(buf));
+ BUG_ON_HOT(first_data > ncb_size(buf));
+ return first_data == ncb_size(buf);
+}
+
+/* Returns true if <buf> contains data fragmented by gaps. */
+int ncb_is_fragmented(const struct ncbuf *buf)
+{
+ struct ncb_blk data, gap;
+
+ if (ncb_is_null(buf))
+ return 0;
+
+ /* check if buffer is empty or full */
+ if (ncb_is_empty(buf) || ncb_is_full(buf))
+ return 0;
+
+ /* check that following gap is the last block */
+ data = ncb_blk_first(buf);
+ gap = ncb_blk_next(buf, &data);
+ return !ncb_blk_is_last(buf, &gap);
+}
+
+/* Returns the number of bytes of data available in <buf> starting at offset
+ * <off> until the next gap or the buffer end. The counted data may wrapped if
+ * the buffer storage is not aligned.
+ */
+ncb_sz_t ncb_data(const struct ncbuf *buf, ncb_sz_t off)
+{
+ struct ncb_blk blk;
+ ncb_sz_t off_blk;
+
+ if (ncb_is_null(buf))
+ return 0;
+
+ blk = ncb_blk_find(buf, off);
+ off_blk = ncb_blk_off(&blk, off);
+
+ /* if <off> at the frontier between two and <blk> is gap, retrieve the
+ * next data block.
+ */
+ if (blk.flag & NCB_BK_F_GAP && off_blk == blk.sz &&
+ !ncb_blk_is_last(buf, &blk)) {
+ blk = ncb_blk_next(buf, &blk);
+ off_blk = ncb_blk_off(&blk, off);
+ }
+
+ if (blk.flag & NCB_BK_F_GAP)
+ return 0;
+
+ return blk.sz - off_blk;
+}
+
+/* Add a new block at <data> of size <len> in <buf> at offset <off>.
+ *
+ * Returns NCB_RET_OK on success. On error the following codes are returned :
+ * - NCB_RET_GAP_SIZE : cannot add data because the gap formed is too small
+ * - NCB_RET_DATA_REJ : old data would be overwritten by different ones in
+ * NCB_ADD_COMPARE mode.
+ */
+enum ncb_ret ncb_add(struct ncbuf *buf, ncb_sz_t off,
+ const char *data, ncb_sz_t len, enum ncb_add_mode mode)
+{
+ struct ncb_blk blk;
+ ncb_sz_t left = len;
+ enum ncb_ret ret;
+ char *new_sz;
+
+ if (!len)
+ return NCB_RET_OK;
+
+ BUG_ON_HOT(off + len > ncb_size(buf));
+
+ /* Get block where insertion begins. */
+ blk = ncb_blk_find(buf, off);
+
+ /* Check if insertion is possible. */
+ ret = ncb_check_insert(buf, &blk, off, data, len, mode);
+ if (ret != NCB_RET_OK)
+ return ret;
+
+ if (blk.flag & NCB_BK_F_GAP) {
+ /* Reduce gap size if insertion begins in a gap. Gap data size
+ * is reset and will be recalculated during insertion.
+ */
+ const ncb_sz_t gap_sz = off - blk.off;
+ BUG_ON_HOT(gap_sz < NCB_GAP_MIN_SZ);
+
+ /* pointer to data size to increase. */
+ new_sz = ncb_peek(buf, blk.off + NCB_GAP_SZ_DATA_OFF);
+
+ ncb_write_off(buf, blk.sz_ptr, gap_sz);
+ ncb_write_off(buf, new_sz, 0);
+ }
+ else {
+ /* pointer to data size to increase. */
+ new_sz = blk.sz_ptr;
+ }
+
+ /* insert data */
+ while (left) {
+ struct ncb_blk next;
+ const ncb_sz_t off_blk = ncb_blk_off(&blk, off);
+ ncb_sz_t done;
+
+ /* retrieve the next block. This is necessary to do this
+ * before overwriting a gap.
+ */
+ next = ncb_blk_next(buf, &blk);
+
+ if (blk.flag & NCB_BK_F_GAP) {
+ done = ncb_fill_gap_blk(buf, &blk, off_blk, data, left);
+
+ /* update the inserted data block size */
+ if (off + done == blk.off + blk.sz) {
+ /* merge next data block if insertion reached gap end */
+ ncb_inc_off(buf, new_sz, done + blk.sz_data);
+ }
+ else {
+ /* insertion stopped before gap end */
+ ncb_inc_off(buf, new_sz, done);
+ }
+ }
+ else {
+ done = ncb_fill_data_blk(buf, &blk, off_blk, data, left, mode);
+ }
+
+ BUG_ON_HOT(done > blk.sz || done > left);
+ left -= done;
+ data += done;
+ off += done;
+
+ blk = next;
+ }
+
+ return NCB_RET_OK;
+}
+
+/* Advance the head of <buf> to the offset <adv>. Data at the start of buffer
+ * will be lost while some space will be formed at the end to be able to insert
+ * new data.
+ *
+ * Returns NCB_RET_OK on success. It may return NCB_RET_GAP_SIZE if operation
+ * is rejected due to the formation of a too small gap in front. If advance is
+ * done only inside a data block it is guaranteed to succeed.
+ */
+enum ncb_ret ncb_advance(struct ncbuf *buf, ncb_sz_t adv)
+{
+ struct ncb_blk start, last;
+ ncb_sz_t off_blk;
+ ncb_sz_t first_data_sz;
+
+ BUG_ON_HOT(adv > ncb_size(buf));
+ if (!adv)
+ return NCB_RET_OK;
+
+ /* Special case if adv is full size. This is equivalent to a reset. */
+ if (adv == ncb_size(buf)) {
+ ncb_init(buf, buf->head);
+ return NCB_RET_OK;
+ }
+
+ start = ncb_blk_find(buf, adv);
+
+ /* Special case if advance until the last block which is a GAP. The
+ * buffer will be left empty and is thus equivalent to a reset.
+ */
+ if (ncb_blk_is_last(buf, &start) && (start.flag & NCB_BK_F_GAP)) {
+ ncb_sz_t new_head = buf->head + adv;
+ if (new_head >= buf->size)
+ new_head -= buf->size;
+
+ ncb_init(buf, new_head);
+ return NCB_RET_OK;
+ }
+
+ last = start;
+ while (!ncb_blk_is_last(buf, &last))
+ last = ncb_blk_next(buf, &last);
+
+ off_blk = ncb_blk_off(&start, adv);
+
+ if (start.flag & NCB_BK_F_GAP) {
+ /* If advance in a GAP, its new size must be big enough. */
+ if (start.sz == off_blk) {
+ /* GAP removed. Buffer will start with following DATA block. */
+ first_data_sz = start.sz_data;
+ }
+ else if (start.sz - off_blk < NCB_GAP_MIN_SZ) {
+ return NCB_RET_GAP_SIZE;
+ }
+ else {
+ /* Buffer will start with this GAP block. */
+ first_data_sz = 0;
+ }
+ }
+ else {
+ /* If off_blk less than start.sz, the data block will becomes the
+ * first block. If equal, the data block is completely removed
+ * and thus the following GAP will be the first block.
+ */
+ first_data_sz = start.sz - off_blk;
+ }
+
+ if (last.flag & NCB_BK_F_GAP) {
+ /* Extend last GAP unless this is a reduced gap. */
+ if (!(last.flag & NCB_BK_F_FIN) || last.sz + adv >= NCB_GAP_MIN_SZ) {
+ /* use .st instead of .sz_ptr which can be NULL if reduced gap */
+ ncb_write_off(buf, last.st, last.sz + adv);
+ ncb_write_off(buf, ncb_peek(buf, last.off + NCB_GAP_SZ_DATA_OFF), 0);
+ }
+ }
+ else {
+ /* Insert a GAP after the last DATA block. */
+ if (adv >= NCB_GAP_MIN_SZ) {
+ ncb_write_off(buf, ncb_peek(buf, last.off + last.sz + NCB_GAP_SZ_OFF), adv);
+ ncb_write_off(buf, ncb_peek(buf, last.off + last.sz + NCB_GAP_SZ_DATA_OFF), 0);
+ }
+ }
+
+ /* Advance head and update reserved header with new first data size. */
+ buf->head += adv;
+ if (buf->head >= buf->size)
+ buf->head -= buf->size;
+ ncb_write_off(buf, ncb_reserved(buf), first_data_sz);
+
+ /* If advance in a GAP, reduce its size. */
+ if (start.flag & NCB_BK_F_GAP && !first_data_sz) {
+ ncb_write_off(buf, ncb_head(buf), start.sz - off_blk);
+ /* Recopy the block sz_data at the new position. */
+ ncb_write_off(buf, ncb_peek(buf, NCB_GAP_SZ_DATA_OFF), start.sz_data);
+ }
+
+ return NCB_RET_OK;
+}
+
+/* ******** testing API ******** */
+/* To build it :
+ * gcc -Wall -DSTANDALONE -lasan -I./include -o ncbuf src/ncbuf.c
+ */
+#ifdef STANDALONE
+
+int ncb_print = 0;
+
+static void ncbuf_printf(char *str, ...)
+{
+ va_list args;
+
+ va_start(args, str);
+ if (ncb_print)
+ vfprintf(stderr, str, args);
+ va_end(args);
+}
+
+struct rand_off {
+ struct list el;
+ ncb_sz_t off;
+ ncb_sz_t len;
+};
+
+static struct rand_off *ncb_generate_rand_off(const struct ncbuf *buf)
+{
+ struct rand_off *roff;
+ roff = calloc(1, sizeof(*roff));
+ BUG_ON(!roff);
+
+ roff->off = rand() % (ncb_size(buf));
+ if (roff->off > 0 && roff->off < NCB_GAP_MIN_SZ)
+ roff->off = 0;
+
+ roff->len = rand() % (ncb_size(buf) - roff->off + 1);
+
+ return roff;
+}
+
+static void ncb_print_blk(const struct ncb_blk *blk)
+{
+ if (ncb_print) {
+ fprintf(stderr, "%s(%s): %2u/%u.\n",
+ blk->flag & NCB_BK_F_GAP ? "GAP " : "DATA",
+ blk->flag & NCB_BK_F_FIN ? "F" : "-", blk->off, blk->sz);
+ }
+}
+
+static int ncb_is_null_blk(const struct ncb_blk *blk)
+{
+ return !blk->st;
+}
+
+static void ncb_loop(const struct ncbuf *buf)
+{
+ struct ncb_blk blk;
+
+ blk = ncb_blk_first(buf);
+ do {
+ ncb_print_blk(&blk);
+ blk = ncb_blk_next(buf, &blk);
+ } while (!ncb_is_null_blk(&blk));
+
+ ncbuf_printf("\n");
+}
+
+static void ncbuf_print_buf(struct ncbuf *b, ncb_sz_t len,
+ unsigned char *area, int line)
+{
+ int i;
+
+ ncbuf_printf("buffer status at line %d\n", line);
+ for (i = 0; i < len; ++i) {
+ ncbuf_printf("%02x.", area[i]);
+ if (i && i % 32 == 31) ncbuf_printf("\n");
+ else if (i && i % 8 == 7) ncbuf_printf(" ");
+ }
+ ncbuf_printf("\n");
+
+ ncb_loop(b);
+
+ if (ncb_print)
+ getchar();
+}
+
+static struct ncbuf b;
+static unsigned char *bufarea = NULL;
+static ncb_sz_t bufsize = 16384;
+static ncb_sz_t bufhead = 15;
+
+#define NCB_INIT(buf) \
+ if ((reset)) { memset(bufarea, 0xaa, bufsize); } \
+ ncb_init(buf, bufhead); \
+ ncbuf_print_buf(&b, bufsize, bufarea, __LINE__);
+
+#define NCB_ADD_EQ(buf, off, data, sz, mode, ret) \
+ BUG_ON(ncb_add((buf), (off), (data), (sz), (mode)) != (ret)); \
+ ncbuf_print_buf(buf, bufsize, bufarea, __LINE__);
+
+#define NCB_ADD_NEQ(buf, off, data, sz, mode, ret) \
+ BUG_ON(ncb_add((buf), (off), (data), (sz), (mode)) == (ret)); \
+ ncbuf_print_buf(buf, bufsize, bufarea, __LINE__);
+
+#define NCB_ADVANCE_EQ(buf, off, ret) \
+ BUG_ON(ncb_advance((buf), (off)) != (ret)); \
+ ncbuf_print_buf(buf, bufsize, bufarea, __LINE__);
+
+#define NCB_TOTAL_DATA_EQ(buf, data) \
+ BUG_ON(ncb_total_data((buf)) != (data));
+
+#define NCB_DATA_EQ(buf, off, data) \
+ BUG_ON(ncb_data((buf), (off)) != (data));
+
+static int ncbuf_test(ncb_sz_t head, int reset, int print_delay)
+{
+ char *data0, data1[] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f };
+ struct list list = LIST_HEAD_INIT(list);
+ struct rand_off *roff, *roff_tmp;
+ enum ncb_ret ret;
+
+ data0 = malloc(bufsize);
+ BUG_ON(!data0);
+ memset(data0, 0xff, bufsize);
+
+ bufarea = malloc(bufsize);
+ BUG_ON(!bufarea);
+
+ fprintf(stderr, "running unit tests\n");
+
+ b = NCBUF_NULL;
+ BUG_ON(!ncb_is_null(&b));
+ NCB_DATA_EQ(&b, 0, 0);
+ NCB_TOTAL_DATA_EQ(&b, 0);
+ BUG_ON(ncb_size(&b) != 0);
+ BUG_ON(!ncb_is_empty(&b));
+ BUG_ON(ncb_is_full(&b));
+ BUG_ON(ncb_is_fragmented(&b));
+
+ b.area = (char *)bufarea;
+ b.size = bufsize;
+ b.head = head;
+ NCB_INIT(&b);
+
+ /* insertion test suite */
+ NCB_INIT(&b);
+ NCB_DATA_EQ(&b, 0, 0); NCB_DATA_EQ(&b, bufsize - NCB_RESERVED_SZ - 1, 0); /* first and last offset */
+ NCB_ADD_EQ(&b, 24, data0, 9, NCB_ADD_PRESERVE, NCB_RET_OK); NCB_DATA_EQ(&b, 24, 9);
+ /* insert new data at the same offset as old */
+ NCB_ADD_EQ(&b, 24, data0, 16, NCB_ADD_PRESERVE, NCB_RET_OK); NCB_DATA_EQ(&b, 24, 16);
+
+ NCB_INIT(&b); NCB_DATA_EQ(&b, 0, 0);
+ NCB_ADD_EQ(&b, 0, data0, 16, NCB_ADD_PRESERVE, NCB_RET_OK); NCB_DATA_EQ(&b, 0, 16);
+ BUG_ON(ncb_is_fragmented(&b));
+ NCB_ADD_EQ(&b, 24, data0, 16, NCB_ADD_PRESERVE, NCB_RET_OK); NCB_DATA_EQ(&b, 0, 16);
+ BUG_ON(!ncb_is_fragmented(&b));
+ /* insert data overlapping two data blocks and a gap */
+ NCB_ADD_EQ(&b, 12, data0, 16, NCB_ADD_PRESERVE, NCB_RET_OK); NCB_DATA_EQ(&b, 0, 40);
+ BUG_ON(ncb_is_fragmented(&b));
+
+ NCB_INIT(&b);
+ NCB_ADD_EQ(&b, 32, data0, 16, NCB_ADD_PRESERVE, NCB_RET_OK); NCB_DATA_EQ(&b, 0, 0); NCB_DATA_EQ(&b, 16, 0); NCB_DATA_EQ(&b, 32, 16);
+ BUG_ON(!ncb_is_fragmented(&b));
+ NCB_ADD_EQ(&b, 0, data0, 16, NCB_ADD_PRESERVE, NCB_RET_OK); NCB_DATA_EQ(&b, 0, 16); NCB_DATA_EQ(&b, 16, 0); NCB_DATA_EQ(&b, 32, 16);
+ BUG_ON(!ncb_is_fragmented(&b));
+ /* insert data to exactly cover a gap between two data blocks */
+ NCB_ADD_EQ(&b, 16, data0, 16, NCB_ADD_PRESERVE, NCB_RET_OK); NCB_DATA_EQ(&b, 0, 48); NCB_DATA_EQ(&b, 16, 32); NCB_DATA_EQ(&b, 32, 16);
+ BUG_ON(ncb_is_fragmented(&b));
+
+ NCB_INIT(&b);
+ NCB_ADD_EQ(&b, 0, data0, 8, NCB_ADD_PRESERVE, NCB_RET_OK);
+ /* this insertion must be rejected because of minimal gap size */
+ NCB_ADD_EQ(&b, 10, data0, 8, NCB_ADD_PRESERVE, NCB_RET_GAP_SIZE);
+
+ /* Test reduced gap support */
+ NCB_INIT(&b);
+ /* this insertion will form a reduced gap */
+ NCB_ADD_EQ(&b, 0, data0, bufsize - (NCB_GAP_MIN_SZ - 1), NCB_ADD_COMPARE, NCB_RET_OK);
+
+ /* Test the various insertion mode */
+ NCB_INIT(&b);
+ NCB_ADD_EQ(&b, 10, data1, 16, NCB_ADD_PRESERVE, NCB_RET_OK);
+ NCB_ADD_EQ(&b, 12, data1, 16, NCB_ADD_COMPARE, NCB_RET_DATA_REJ);
+ NCB_ADD_EQ(&b, 12, data1, 16, NCB_ADD_PRESERVE, NCB_RET_OK); BUG_ON(*ncb_peek(&b, 12) != data1[2]);
+ NCB_ADD_EQ(&b, 12, data1, 16, NCB_ADD_OVERWRT, NCB_RET_OK); BUG_ON(*ncb_peek(&b, 12) == data1[2]);
+
+ /* advance test suite */
+ NCB_INIT(&b);
+ NCB_ADVANCE_EQ(&b, 10, NCB_RET_OK); /* advance in an empty buffer; this ensures we do not leave an empty DATA in the middle of the buffer */
+ NCB_ADVANCE_EQ(&b, ncb_size(&b) - 2, NCB_RET_OK);
+
+ NCB_INIT(&b);
+ /* first fill the buffer */
+ NCB_ADD_EQ(&b, 0, data0, bufsize - NCB_RESERVED_SZ, NCB_ADD_COMPARE, NCB_RET_OK);
+ /* delete 2 bytes : a reduced gap must be created */
+ NCB_ADVANCE_EQ(&b, 2, NCB_RET_OK); NCB_TOTAL_DATA_EQ(&b, ncb_size(&b) - 2);
+ /* delete 1 byte : extend the reduced gap */
+ NCB_ADVANCE_EQ(&b, 1, NCB_RET_OK); NCB_TOTAL_DATA_EQ(&b, ncb_size(&b) - 3);
+ /* delete 5 bytes : a full gap must be present */
+ NCB_ADVANCE_EQ(&b, 5, NCB_RET_OK); NCB_TOTAL_DATA_EQ(&b, ncb_size(&b) - 8);
+ /* completely clear the buffer */
+ NCB_ADVANCE_EQ(&b, bufsize - NCB_RESERVED_SZ, NCB_RET_OK); NCB_TOTAL_DATA_EQ(&b, 0);
+
+
+ NCB_INIT(&b);
+ NCB_ADD_EQ(&b, 10, data0, 10, NCB_ADD_PRESERVE, NCB_RET_OK);
+ NCB_ADVANCE_EQ(&b, 2, NCB_RET_OK); /* reduce a gap in front of the buffer */
+ NCB_ADVANCE_EQ(&b, 1, NCB_RET_GAP_SIZE); /* reject */
+ NCB_ADVANCE_EQ(&b, 8, NCB_RET_OK); /* remove completely the gap */
+ NCB_ADVANCE_EQ(&b, 8, NCB_RET_OK); /* remove inside the data */
+ NCB_ADVANCE_EQ(&b, 10, NCB_RET_OK); /* remove completely the data */
+
+ fprintf(stderr, "first random pass\n");
+ NCB_INIT(&b);
+
+ /* generate randon data offsets until the buffer is full */
+ while (!ncb_is_full(&b)) {
+ roff = ncb_generate_rand_off(&b);
+ LIST_INSERT(&list, &roff->el);
+
+ ret = ncb_add(&b, roff->off, data0, roff->len, NCB_ADD_COMPARE);
+ BUG_ON(ret == NCB_RET_DATA_REJ);
+ ncbuf_print_buf(&b, bufsize, bufarea, __LINE__);
+ usleep(print_delay);
+ }
+
+ fprintf(stderr, "buf full, prepare for reverse random\n");
+ ncbuf_print_buf(&b, bufsize, bufarea, __LINE__);
+
+ /* insert the previously generated random offsets in the reverse order.
+ * At the end, the buffer should be full.
+ */
+ NCB_INIT(&b);
+ list_for_each_entry_safe(roff, roff_tmp, &list, el) {
+ int full = ncb_is_full(&b);
+ if (!full) {
+ ret = ncb_add(&b, roff->off, data0, roff->len, NCB_ADD_COMPARE);
+ BUG_ON(ret == NCB_RET_DATA_REJ);
+ ncbuf_print_buf(&b, bufsize, bufarea, __LINE__);
+ usleep(print_delay);
+ }
+
+ LIST_DELETE(&roff->el);
+ free(roff);
+ }
+
+ if (!ncb_is_full(&b))
+ abort();
+
+ fprintf(stderr, "done\n");
+
+ free(bufarea);
+ free(data0);
+
+ return 1;
+}
+
+int main(int argc, char **argv)
+{
+ int reset = 0;
+ int print_delay = 100000;
+ char c;
+
+ opterr = 0;
+ while ((c = getopt(argc, argv, "h:s:rp::")) != -1) {
+ switch (c) {
+ case 'h':
+ bufhead = atoi(optarg);
+ break;
+ case 's':
+ bufsize = atoi(optarg);
+ if (bufsize < 64) {
+ fprintf(stderr, "bufsize should be at least 64 bytes for unit test suite\n");
+ exit(127);
+ }
+ break;
+ case 'r':
+ reset = 1;
+ break;
+ case 'p':
+ if (optarg)
+ print_delay = atoi(optarg);
+ ncb_print = 1;
+ break;
+ case '?':
+ default:
+ fprintf(stderr, "usage: %s [-r] [-s bufsize] [-h bufhead] [-p <delay_msec>]\n", argv[0]);
+ exit(127);
+ }
+ }
+
+ ncbuf_test(bufhead, reset, print_delay);
+ return EXIT_SUCCESS;
+}
+
+#endif /* STANDALONE */
diff --git a/src/pattern.c b/src/pattern.c
new file mode 100644
index 0000000..52dda5e
--- /dev/null
+++ b/src/pattern.c
@@ -0,0 +1,2683 @@
+/*
+ * Pattern management functions.
+ *
+ * Copyright 2000-2013 Willy Tarreau <w@1wt.eu>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <ctype.h>
+#include <stdio.h>
+#include <errno.h>
+
+#include <import/ebistree.h>
+#include <import/ebpttree.h>
+#include <import/ebsttree.h>
+#include <import/lru.h>
+
+#include <haproxy/api.h>
+#include <haproxy/global.h>
+#include <haproxy/log.h>
+#include <haproxy/net_helper.h>
+#include <haproxy/pattern.h>
+#include <haproxy/regex.h>
+#include <haproxy/sample.h>
+#include <haproxy/tools.h>
+#include <haproxy/xxhash.h>
+
+
+const char *const pat_match_names[PAT_MATCH_NUM] = {
+ [PAT_MATCH_FOUND] = "found",
+ [PAT_MATCH_BOOL] = "bool",
+ [PAT_MATCH_INT] = "int",
+ [PAT_MATCH_IP] = "ip",
+ [PAT_MATCH_BIN] = "bin",
+ [PAT_MATCH_LEN] = "len",
+ [PAT_MATCH_STR] = "str",
+ [PAT_MATCH_BEG] = "beg",
+ [PAT_MATCH_SUB] = "sub",
+ [PAT_MATCH_DIR] = "dir",
+ [PAT_MATCH_DOM] = "dom",
+ [PAT_MATCH_END] = "end",
+ [PAT_MATCH_REG] = "reg",
+ [PAT_MATCH_REGM] = "regm",
+};
+
+int (*const pat_parse_fcts[PAT_MATCH_NUM])(const char *, struct pattern *, int, char **) = {
+ [PAT_MATCH_FOUND] = pat_parse_nothing,
+ [PAT_MATCH_BOOL] = pat_parse_nothing,
+ [PAT_MATCH_INT] = pat_parse_int,
+ [PAT_MATCH_IP] = pat_parse_ip,
+ [PAT_MATCH_BIN] = pat_parse_bin,
+ [PAT_MATCH_LEN] = pat_parse_int,
+ [PAT_MATCH_STR] = pat_parse_str,
+ [PAT_MATCH_BEG] = pat_parse_str,
+ [PAT_MATCH_SUB] = pat_parse_str,
+ [PAT_MATCH_DIR] = pat_parse_str,
+ [PAT_MATCH_DOM] = pat_parse_str,
+ [PAT_MATCH_END] = pat_parse_str,
+ [PAT_MATCH_REG] = pat_parse_reg,
+ [PAT_MATCH_REGM] = pat_parse_reg,
+};
+
+int (*const pat_index_fcts[PAT_MATCH_NUM])(struct pattern_expr *, struct pattern *, char **) = {
+ [PAT_MATCH_FOUND] = pat_idx_list_val,
+ [PAT_MATCH_BOOL] = pat_idx_list_val,
+ [PAT_MATCH_INT] = pat_idx_list_val,
+ [PAT_MATCH_IP] = pat_idx_tree_ip,
+ [PAT_MATCH_BIN] = pat_idx_list_ptr,
+ [PAT_MATCH_LEN] = pat_idx_list_val,
+ [PAT_MATCH_STR] = pat_idx_tree_str,
+ [PAT_MATCH_BEG] = pat_idx_tree_pfx,
+ [PAT_MATCH_SUB] = pat_idx_list_str,
+ [PAT_MATCH_DIR] = pat_idx_list_str,
+ [PAT_MATCH_DOM] = pat_idx_list_str,
+ [PAT_MATCH_END] = pat_idx_list_str,
+ [PAT_MATCH_REG] = pat_idx_list_reg,
+ [PAT_MATCH_REGM] = pat_idx_list_regm,
+};
+
+void (*const pat_prune_fcts[PAT_MATCH_NUM])(struct pattern_expr *) = {
+ [PAT_MATCH_FOUND] = pat_prune_gen,
+ [PAT_MATCH_BOOL] = pat_prune_gen,
+ [PAT_MATCH_INT] = pat_prune_gen,
+ [PAT_MATCH_IP] = pat_prune_gen,
+ [PAT_MATCH_BIN] = pat_prune_gen,
+ [PAT_MATCH_LEN] = pat_prune_gen,
+ [PAT_MATCH_STR] = pat_prune_gen,
+ [PAT_MATCH_BEG] = pat_prune_gen,
+ [PAT_MATCH_SUB] = pat_prune_gen,
+ [PAT_MATCH_DIR] = pat_prune_gen,
+ [PAT_MATCH_DOM] = pat_prune_gen,
+ [PAT_MATCH_END] = pat_prune_gen,
+ [PAT_MATCH_REG] = pat_prune_gen,
+ [PAT_MATCH_REGM] = pat_prune_gen,
+};
+
+struct pattern *(*const pat_match_fcts[PAT_MATCH_NUM])(struct sample *, struct pattern_expr *, int) = {
+ [PAT_MATCH_FOUND] = NULL,
+ [PAT_MATCH_BOOL] = pat_match_nothing,
+ [PAT_MATCH_INT] = pat_match_int,
+ [PAT_MATCH_IP] = pat_match_ip,
+ [PAT_MATCH_BIN] = pat_match_bin,
+ [PAT_MATCH_LEN] = pat_match_len,
+ [PAT_MATCH_STR] = pat_match_str,
+ [PAT_MATCH_BEG] = pat_match_beg,
+ [PAT_MATCH_SUB] = pat_match_sub,
+ [PAT_MATCH_DIR] = pat_match_dir,
+ [PAT_MATCH_DOM] = pat_match_dom,
+ [PAT_MATCH_END] = pat_match_end,
+ [PAT_MATCH_REG] = pat_match_reg,
+ [PAT_MATCH_REGM] = pat_match_regm,
+};
+
+/* Just used for checking configuration compatibility */
+int const pat_match_types[PAT_MATCH_NUM] = {
+ [PAT_MATCH_FOUND] = SMP_T_SINT,
+ [PAT_MATCH_BOOL] = SMP_T_SINT,
+ [PAT_MATCH_INT] = SMP_T_SINT,
+ [PAT_MATCH_IP] = SMP_T_ADDR,
+ [PAT_MATCH_BIN] = SMP_T_BIN,
+ [PAT_MATCH_LEN] = SMP_T_STR,
+ [PAT_MATCH_STR] = SMP_T_STR,
+ [PAT_MATCH_BEG] = SMP_T_STR,
+ [PAT_MATCH_SUB] = SMP_T_STR,
+ [PAT_MATCH_DIR] = SMP_T_STR,
+ [PAT_MATCH_DOM] = SMP_T_STR,
+ [PAT_MATCH_END] = SMP_T_STR,
+ [PAT_MATCH_REG] = SMP_T_STR,
+ [PAT_MATCH_REGM] = SMP_T_STR,
+};
+
+/* this struct is used to return information */
+static THREAD_LOCAL struct pattern static_pattern;
+static THREAD_LOCAL struct sample_data static_sample_data;
+
+/* This is the root of the list of all pattern_ref avalaibles. */
+struct list pattern_reference = LIST_HEAD_INIT(pattern_reference);
+
+static THREAD_LOCAL struct lru64_head *pat_lru_tree;
+static unsigned long long pat_lru_seed __read_mostly;
+
+/*
+ *
+ * The following functions are not exported and are used by internals process
+ * of pattern matching
+ *
+ */
+
+/* Background: Fast way to find a zero byte in a word
+ * http://graphics.stanford.edu/~seander/bithacks.html#ZeroInWord
+ * hasZeroByte = (v - 0x01010101UL) & ~v & 0x80808080UL;
+ *
+ * To look for 4 different byte values, xor the word with those bytes and
+ * then check for zero bytes:
+ *
+ * v = (((unsigned char)c * 0x1010101U) ^ delimiter)
+ * where <delimiter> is the 4 byte values to look for (as an uint)
+ * and <c> is the character that is being tested
+ */
+static inline unsigned int is_delimiter(unsigned char c, unsigned int mask)
+{
+ mask ^= (c * 0x01010101); /* propagate the char to all 4 bytes */
+ return (mask - 0x01010101) & ~mask & 0x80808080U;
+}
+
+static inline unsigned int make_4delim(unsigned char d1, unsigned char d2, unsigned char d3, unsigned char d4)
+{
+ return d1 << 24 | d2 << 16 | d3 << 8 | d4;
+}
+
+
+/*
+ *
+ * These functions are exported and may be used by any other component.
+ *
+ * The following functions are used for parsing pattern matching input value.
+ * The <text> contain the string to be parsed. <pattern> must be a preallocated
+ * pattern. The pat_parse_* functions fill this structure with the parsed value.
+ * <err> is filled with an error message built with memprintf() function. It is
+ * allowed to use a trash as a temporary storage for the returned pattern, as
+ * the next call after these functions will be pat_idx_*.
+ *
+ * In success case, the pat_parse_* function returns 1. If the function
+ * fails, it returns 0 and <err> is filled.
+ */
+
+/* ignore the current line */
+int pat_parse_nothing(const char *text, struct pattern *pattern, int mflags, char **err)
+{
+ return 1;
+}
+
+/* Parse a string. It is allocated and duplicated. */
+int pat_parse_str(const char *text, struct pattern *pattern, int mflags, char **err)
+{
+ pattern->type = SMP_T_STR;
+ pattern->ptr.str = (char *)text;
+ pattern->len = strlen(text);
+ return 1;
+}
+
+/* Parse a binary written in hexa. It is allocated. */
+int pat_parse_bin(const char *text, struct pattern *pattern, int mflags, char **err)
+{
+ struct buffer *trash;
+
+ pattern->type = SMP_T_BIN;
+ trash = get_trash_chunk();
+ pattern->len = trash->size;
+ pattern->ptr.str = trash->area;
+ return !!parse_binary(text, &pattern->ptr.str, &pattern->len, err);
+}
+
+/* Parse a regex. It is allocated. */
+int pat_parse_reg(const char *text, struct pattern *pattern, int mflags, char **err)
+{
+ pattern->ptr.str = (char *)text;
+ return 1;
+}
+
+/* Parse a range of positive integers delimited by either ':' or '-'. If only
+ * one integer is read, it is set as both min and max. An operator may be
+ * specified as the prefix, among this list of 5 :
+ *
+ * 0:eq, 1:gt, 2:ge, 3:lt, 4:le
+ *
+ * The default operator is "eq". It supports range matching. Ranges are
+ * rejected for other operators. The operator may be changed at any time.
+ * The operator is stored in the 'opaque' argument.
+ *
+ * If err is non-NULL, an error message will be returned there on errors and
+ * the caller will have to free it. The function returns zero on error, and
+ * non-zero on success.
+ *
+ */
+int pat_parse_int(const char *text, struct pattern *pattern, int mflags, char **err)
+{
+ const char *ptr = text;
+
+ pattern->type = SMP_T_SINT;
+
+ /* Empty string is not valid */
+ if (!*text)
+ goto not_valid_range;
+
+ /* Search ':' or '-' separator. */
+ while (*ptr != '\0' && *ptr != ':' && *ptr != '-')
+ ptr++;
+
+ /* If separator not found. */
+ if (!*ptr) {
+ if (strl2llrc(text, ptr - text, &pattern->val.range.min) != 0) {
+ memprintf(err, "'%s' is not a number", text);
+ return 0;
+ }
+ pattern->val.range.max = pattern->val.range.min;
+ pattern->val.range.min_set = 1;
+ pattern->val.range.max_set = 1;
+ return 1;
+ }
+
+ /* If the separator is the first character. */
+ if (ptr == text && *(ptr + 1) != '\0') {
+ if (strl2llrc(ptr + 1, strlen(ptr + 1), &pattern->val.range.max) != 0)
+ goto not_valid_range;
+
+ pattern->val.range.min_set = 0;
+ pattern->val.range.max_set = 1;
+ return 1;
+ }
+
+ /* If separator is the last character. */
+ if (*(ptr + 1) == '\0') {
+ if (strl2llrc(text, ptr - text, &pattern->val.range.min) != 0)
+ goto not_valid_range;
+
+ pattern->val.range.min_set = 1;
+ pattern->val.range.max_set = 0;
+ return 1;
+ }
+
+ /* Else, parse two numbers. */
+ if (strl2llrc(text, ptr - text, &pattern->val.range.min) != 0)
+ goto not_valid_range;
+
+ if (strl2llrc(ptr + 1, strlen(ptr + 1), &pattern->val.range.max) != 0)
+ goto not_valid_range;
+
+ if (pattern->val.range.min > pattern->val.range.max)
+ goto not_valid_range;
+
+ pattern->val.range.min_set = 1;
+ pattern->val.range.max_set = 1;
+ return 1;
+
+ not_valid_range:
+ memprintf(err, "'%s' is not a valid number range", text);
+ return 0;
+}
+
+/* Parse a range of positive 2-component versions delimited by either ':' or
+ * '-'. The version consists in a major and a minor, both of which must be
+ * smaller than 65536, because internally they will be represented as a 32-bit
+ * integer.
+ * If only one version is read, it is set as both min and max. Just like for
+ * pure integers, an operator may be specified as the prefix, among this list
+ * of 5 :
+ *
+ * 0:eq, 1:gt, 2:ge, 3:lt, 4:le
+ *
+ * The default operator is "eq". It supports range matching. Ranges are
+ * rejected for other operators. The operator may be changed at any time.
+ * The operator is stored in the 'opaque' argument. This allows constructs
+ * such as the following one :
+ *
+ * acl obsolete_ssl ssl_req_proto lt 3
+ * acl unsupported_ssl ssl_req_proto gt 3.1
+ * acl valid_ssl ssl_req_proto 3.0-3.1
+ *
+ */
+int pat_parse_dotted_ver(const char *text, struct pattern *pattern, int mflags, char **err)
+{
+ const char *ptr = text;
+
+ pattern->type = SMP_T_SINT;
+
+ /* Search ':' or '-' separator. */
+ while (*ptr != '\0' && *ptr != ':' && *ptr != '-')
+ ptr++;
+
+ /* If separator not found. */
+ if (*ptr == '\0' && ptr > text) {
+ if (strl2llrc_dotted(text, ptr-text, &pattern->val.range.min) != 0) {
+ memprintf(err, "'%s' is not a dotted number", text);
+ return 0;
+ }
+ pattern->val.range.max = pattern->val.range.min;
+ pattern->val.range.min_set = 1;
+ pattern->val.range.max_set = 1;
+ return 1;
+ }
+
+ /* If the separator is the first character. */
+ if (ptr == text && *(ptr+1) != '\0') {
+ if (strl2llrc_dotted(ptr+1, strlen(ptr+1), &pattern->val.range.max) != 0) {
+ memprintf(err, "'%s' is not a valid dotted number range", text);
+ return 0;
+ }
+ pattern->val.range.min_set = 0;
+ pattern->val.range.max_set = 1;
+ return 1;
+ }
+
+ /* If separator is the last character. */
+ if (ptr == &text[strlen(text)-1]) {
+ if (strl2llrc_dotted(text, ptr-text, &pattern->val.range.min) != 0) {
+ memprintf(err, "'%s' is not a valid dotted number range", text);
+ return 0;
+ }
+ pattern->val.range.min_set = 1;
+ pattern->val.range.max_set = 0;
+ return 1;
+ }
+
+ /* Else, parse two numbers. */
+ if (strl2llrc_dotted(text, ptr-text, &pattern->val.range.min) != 0) {
+ memprintf(err, "'%s' is not a valid dotted number range", text);
+ return 0;
+ }
+ if (strl2llrc_dotted(ptr+1, strlen(ptr+1), &pattern->val.range.max) != 0) {
+ memprintf(err, "'%s' is not a valid dotted number range", text);
+ return 0;
+ }
+ if (pattern->val.range.min > pattern->val.range.max) {
+ memprintf(err, "'%s' is not a valid dotted number range", text);
+ return 0;
+ }
+ pattern->val.range.min_set = 1;
+ pattern->val.range.max_set = 1;
+ return 1;
+}
+
+/* Parse an IP address and an optional mask in the form addr[/mask].
+ * The addr may either be an IPv4 address or a hostname. The mask
+ * may either be a dotted mask or a number of bits. Returns 1 if OK,
+ * otherwise 0. NOTE: IP address patterns are typed (IPV4/IPV6).
+ */
+int pat_parse_ip(const char *text, struct pattern *pattern, int mflags, char **err)
+{
+ if (str2net(text, !(mflags & PAT_MF_NO_DNS) && (global.mode & MODE_STARTING),
+ &pattern->val.ipv4.addr, &pattern->val.ipv4.mask)) {
+ pattern->type = SMP_T_IPV4;
+ return 1;
+ }
+ else if (str62net(text, &pattern->val.ipv6.addr, &pattern->val.ipv6.mask)) {
+ pattern->type = SMP_T_IPV6;
+ return 1;
+ }
+ else {
+ memprintf(err, "'%s' is not a valid IPv4 or IPv6 address", text);
+ return 0;
+ }
+}
+
+/*
+ *
+ * These functions are exported and may be used by any other component.
+ *
+ * This function just takes a sample <smp> and checks if this sample matches
+ * with the pattern <pattern>. This function returns only PAT_MATCH or
+ * PAT_NOMATCH.
+ *
+ */
+
+/* always return false */
+struct pattern *pat_match_nothing(struct sample *smp, struct pattern_expr *expr, int fill)
+{
+ if (smp->data.u.sint) {
+ if (fill) {
+ static_pattern.data = NULL;
+ static_pattern.ref = NULL;
+ static_pattern.type = 0;
+ static_pattern.ptr.str = NULL;
+ }
+ return &static_pattern;
+ }
+ else
+ return NULL;
+}
+
+
+/* NB: For two strings to be identical, it is required that their length match */
+struct pattern *pat_match_str(struct sample *smp, struct pattern_expr *expr, int fill)
+{
+ int icase;
+ struct ebmb_node *node;
+ struct pattern_tree *elt;
+ struct pattern_list *lst;
+ struct pattern *pattern;
+ struct pattern *ret = NULL;
+ struct lru64 *lru = NULL;
+
+ /* Lookup a string in the expression's pattern tree. */
+ if (!eb_is_empty(&expr->pattern_tree)) {
+ char prev = 0;
+
+ if (smp->data.u.str.data < smp->data.u.str.size) {
+ /* we may have to force a trailing zero on the test pattern and
+ * the buffer is large enough to accommodate it. If the flag
+ * CONST is set, duplicate the string
+ */
+ prev = smp->data.u.str.area[smp->data.u.str.data];
+ if (prev) {
+ if (smp->flags & SMP_F_CONST) {
+ if (!smp_dup(smp))
+ return NULL;
+ } else {
+ smp->data.u.str.area[smp->data.u.str.data] = '\0';
+ }
+ }
+ }
+ else {
+ /* Otherwise, the sample is duplicated. A trailing zero
+ * is automatically added to the string.
+ */
+ if (!smp_dup(smp))
+ return NULL;
+ }
+
+ node = ebst_lookup(&expr->pattern_tree, smp->data.u.str.area);
+ if (prev)
+ smp->data.u.str.area[smp->data.u.str.data] = prev;
+
+ while (node) {
+ elt = ebmb_entry(node, struct pattern_tree, node);
+ if (elt->ref->gen_id != expr->ref->curr_gen) {
+ node = ebmb_next_dup(node);
+ continue;
+ }
+ if (fill) {
+ static_pattern.data = elt->data;
+ static_pattern.ref = elt->ref;
+ static_pattern.sflags = PAT_SF_TREE;
+ static_pattern.type = SMP_T_STR;
+ static_pattern.ptr.str = (char *)elt->node.key;
+ }
+ return &static_pattern;
+ }
+ }
+
+ /* look in the list */
+ if (pat_lru_tree && !LIST_ISEMPTY(&expr->patterns)) {
+ unsigned long long seed = pat_lru_seed ^ (long)expr;
+
+ lru = lru64_get(XXH3(smp->data.u.str.area, smp->data.u.str.data, seed),
+ pat_lru_tree, expr, expr->ref->revision);
+ if (lru && lru->domain) {
+ ret = lru->data;
+ return ret;
+ }
+ }
+
+
+ list_for_each_entry(lst, &expr->patterns, list) {
+ pattern = &lst->pat;
+
+ if (pattern->ref->gen_id != expr->ref->curr_gen)
+ continue;
+
+ if (pattern->len != smp->data.u.str.data)
+ continue;
+
+ icase = expr->mflags & PAT_MF_IGNORE_CASE;
+ if ((icase && strncasecmp(pattern->ptr.str, smp->data.u.str.area, smp->data.u.str.data) == 0) ||
+ (!icase && strncmp(pattern->ptr.str, smp->data.u.str.area, smp->data.u.str.data) == 0)) {
+ ret = pattern;
+ break;
+ }
+ }
+
+ if (lru)
+ lru64_commit(lru, ret, expr, expr->ref->revision, NULL);
+
+ return ret;
+}
+
+/* NB: For two binaries buf to be identical, it is required that their lengths match */
+struct pattern *pat_match_bin(struct sample *smp, struct pattern_expr *expr, int fill)
+{
+ struct pattern_list *lst;
+ struct pattern *pattern;
+ struct pattern *ret = NULL;
+ struct lru64 *lru = NULL;
+
+ if (pat_lru_tree && !LIST_ISEMPTY(&expr->patterns)) {
+ unsigned long long seed = pat_lru_seed ^ (long)expr;
+
+ lru = lru64_get(XXH3(smp->data.u.str.area, smp->data.u.str.data, seed),
+ pat_lru_tree, expr, expr->ref->revision);
+ if (lru && lru->domain) {
+ ret = lru->data;
+ return ret;
+ }
+ }
+
+ list_for_each_entry(lst, &expr->patterns, list) {
+ pattern = &lst->pat;
+
+ if (pattern->ref->gen_id != expr->ref->curr_gen)
+ continue;
+
+ if (pattern->len != smp->data.u.str.data)
+ continue;
+
+ if (memcmp(pattern->ptr.str, smp->data.u.str.area, smp->data.u.str.data) == 0) {
+ ret = pattern;
+ break;
+ }
+ }
+
+ if (lru)
+ lru64_commit(lru, ret, expr, expr->ref->revision, NULL);
+
+ return ret;
+}
+
+/* Executes a regex. It temporarily changes the data to add a trailing zero,
+ * and restores the previous character when leaving. This function fills
+ * a matching array.
+ */
+struct pattern *pat_match_regm(struct sample *smp, struct pattern_expr *expr, int fill)
+{
+ struct pattern_list *lst;
+ struct pattern *pattern;
+ struct pattern *ret = NULL;
+
+ list_for_each_entry(lst, &expr->patterns, list) {
+ pattern = &lst->pat;
+
+ if (pattern->ref->gen_id != expr->ref->curr_gen)
+ continue;
+
+ if (regex_exec_match2(pattern->ptr.reg, smp->data.u.str.area, smp->data.u.str.data,
+ MAX_MATCH, pmatch, 0)) {
+ ret = pattern;
+ smp->ctx.a[0] = pmatch;
+ break;
+ }
+ }
+
+ return ret;
+}
+
+/* Executes a regex. It temporarily changes the data to add a trailing zero,
+ * and restores the previous character when leaving.
+ */
+struct pattern *pat_match_reg(struct sample *smp, struct pattern_expr *expr, int fill)
+{
+ struct pattern_list *lst;
+ struct pattern *pattern;
+ struct pattern *ret = NULL;
+ struct lru64 *lru = NULL;
+
+ if (pat_lru_tree && !LIST_ISEMPTY(&expr->patterns)) {
+ unsigned long long seed = pat_lru_seed ^ (long)expr;
+
+ lru = lru64_get(XXH3(smp->data.u.str.area, smp->data.u.str.data, seed),
+ pat_lru_tree, expr, expr->ref->revision);
+ if (lru && lru->domain) {
+ ret = lru->data;
+ return ret;
+ }
+ }
+
+ list_for_each_entry(lst, &expr->patterns, list) {
+ pattern = &lst->pat;
+
+ if (pattern->ref->gen_id != expr->ref->curr_gen)
+ continue;
+
+ if (regex_exec2(pattern->ptr.reg, smp->data.u.str.area, smp->data.u.str.data)) {
+ ret = pattern;
+ break;
+ }
+ }
+
+ if (lru)
+ lru64_commit(lru, ret, expr, expr->ref->revision, NULL);
+
+ return ret;
+}
+
+/* Checks that the pattern matches the beginning of the tested string. */
+struct pattern *pat_match_beg(struct sample *smp, struct pattern_expr *expr, int fill)
+{
+ int icase;
+ struct ebmb_node *node;
+ struct pattern_tree *elt;
+ struct pattern_list *lst;
+ struct pattern *pattern;
+ struct pattern *ret = NULL;
+ struct lru64 *lru = NULL;
+
+ /* Lookup a string in the expression's pattern tree. */
+ if (!eb_is_empty(&expr->pattern_tree)) {
+ char prev = 0;
+
+ if (smp->data.u.str.data < smp->data.u.str.size) {
+ /* we may have to force a trailing zero on the test pattern and
+ * the buffer is large enough to accommodate it.
+ */
+ prev = smp->data.u.str.area[smp->data.u.str.data];
+ if (prev)
+ smp->data.u.str.area[smp->data.u.str.data] = '\0';
+ }
+ else {
+ /* Otherwise, the sample is duplicated. A trailing zero
+ * is automatically added to the string.
+ */
+ if (!smp_dup(smp))
+ return NULL;
+ }
+
+ node = ebmb_lookup_longest(&expr->pattern_tree,
+ smp->data.u.str.area);
+ if (prev)
+ smp->data.u.str.area[smp->data.u.str.data] = prev;
+
+ while (node) {
+ elt = ebmb_entry(node, struct pattern_tree, node);
+ if (elt->ref->gen_id != expr->ref->curr_gen) {
+ node = ebmb_lookup_shorter(node);
+ continue;
+ }
+ if (fill) {
+ static_pattern.data = elt->data;
+ static_pattern.ref = elt->ref;
+ static_pattern.sflags = PAT_SF_TREE;
+ static_pattern.type = SMP_T_STR;
+ static_pattern.ptr.str = (char *)elt->node.key;
+ }
+ return &static_pattern;
+ }
+ }
+
+ /* look in the list */
+ if (pat_lru_tree && !LIST_ISEMPTY(&expr->patterns)) {
+ unsigned long long seed = pat_lru_seed ^ (long)expr;
+
+ lru = lru64_get(XXH3(smp->data.u.str.area, smp->data.u.str.data, seed),
+ pat_lru_tree, expr, expr->ref->revision);
+ if (lru && lru->domain) {
+ ret = lru->data;
+ return ret;
+ }
+ }
+
+ list_for_each_entry(lst, &expr->patterns, list) {
+ pattern = &lst->pat;
+
+ if (pattern->ref->gen_id != expr->ref->curr_gen)
+ continue;
+
+ if (pattern->len > smp->data.u.str.data)
+ continue;
+
+ icase = expr->mflags & PAT_MF_IGNORE_CASE;
+ if ((icase && strncasecmp(pattern->ptr.str, smp->data.u.str.area, pattern->len) != 0) ||
+ (!icase && strncmp(pattern->ptr.str, smp->data.u.str.area, pattern->len) != 0))
+ continue;
+
+ ret = pattern;
+ break;
+ }
+
+ if (lru)
+ lru64_commit(lru, ret, expr, expr->ref->revision, NULL);
+
+ return ret;
+}
+
+/* Checks that the pattern matches the end of the tested string. */
+struct pattern *pat_match_end(struct sample *smp, struct pattern_expr *expr, int fill)
+{
+ int icase;
+ struct pattern_list *lst;
+ struct pattern *pattern;
+ struct pattern *ret = NULL;
+ struct lru64 *lru = NULL;
+
+ if (pat_lru_tree && !LIST_ISEMPTY(&expr->patterns)) {
+ unsigned long long seed = pat_lru_seed ^ (long)expr;
+
+ lru = lru64_get(XXH3(smp->data.u.str.area, smp->data.u.str.data, seed),
+ pat_lru_tree, expr, expr->ref->revision);
+ if (lru && lru->domain) {
+ ret = lru->data;
+ return ret;
+ }
+ }
+
+ list_for_each_entry(lst, &expr->patterns, list) {
+ pattern = &lst->pat;
+
+ if (pattern->ref->gen_id != expr->ref->curr_gen)
+ continue;
+
+ if (pattern->len > smp->data.u.str.data)
+ continue;
+
+ icase = expr->mflags & PAT_MF_IGNORE_CASE;
+ if ((icase && strncasecmp(pattern->ptr.str, smp->data.u.str.area + smp->data.u.str.data - pattern->len, pattern->len) != 0) ||
+ (!icase && strncmp(pattern->ptr.str, smp->data.u.str.area + smp->data.u.str.data - pattern->len, pattern->len) != 0))
+ continue;
+
+ ret = pattern;
+ break;
+ }
+
+ if (lru)
+ lru64_commit(lru, ret, expr, expr->ref->revision, NULL);
+
+ return ret;
+}
+
+/* Checks that the pattern is included inside the tested string.
+ * NB: Suboptimal, should be rewritten using a Boyer-Moore method.
+ */
+struct pattern *pat_match_sub(struct sample *smp, struct pattern_expr *expr, int fill)
+{
+ int icase;
+ char *end;
+ char *c;
+ struct pattern_list *lst;
+ struct pattern *pattern;
+ struct pattern *ret = NULL;
+ struct lru64 *lru = NULL;
+
+ if (pat_lru_tree && !LIST_ISEMPTY(&expr->patterns)) {
+ unsigned long long seed = pat_lru_seed ^ (long)expr;
+
+ lru = lru64_get(XXH3(smp->data.u.str.area, smp->data.u.str.data, seed),
+ pat_lru_tree, expr, expr->ref->revision);
+ if (lru && lru->domain) {
+ ret = lru->data;
+ return ret;
+ }
+ }
+
+ list_for_each_entry(lst, &expr->patterns, list) {
+ pattern = &lst->pat;
+
+ if (pattern->ref->gen_id != expr->ref->curr_gen)
+ continue;
+
+ if (pattern->len > smp->data.u.str.data)
+ continue;
+
+ end = smp->data.u.str.area + smp->data.u.str.data - pattern->len;
+ icase = expr->mflags & PAT_MF_IGNORE_CASE;
+ if (icase) {
+ for (c = smp->data.u.str.area; c <= end; c++) {
+ if (tolower((unsigned char)*c) != tolower((unsigned char)*pattern->ptr.str))
+ continue;
+ if (strncasecmp(pattern->ptr.str, c, pattern->len) == 0) {
+ ret = pattern;
+ goto leave;
+ }
+ }
+ } else {
+ for (c = smp->data.u.str.area; c <= end; c++) {
+ if (*c != *pattern->ptr.str)
+ continue;
+ if (strncmp(pattern->ptr.str, c, pattern->len) == 0) {
+ ret = pattern;
+ goto leave;
+ }
+ }
+ }
+ }
+ leave:
+ if (lru)
+ lru64_commit(lru, ret, expr, expr->ref->revision, NULL);
+
+ return ret;
+}
+
+/* This one is used by other real functions. It checks that the pattern is
+ * included inside the tested string, but enclosed between the specified
+ * delimiters or at the beginning or end of the string. The delimiters are
+ * provided as an unsigned int made by make_4delim() and match up to 4 different
+ * delimiters. Delimiters are stripped at the beginning and end of the pattern.
+ */
+static int match_word(struct sample *smp, struct pattern *pattern, int mflags, unsigned int delimiters)
+{
+ int may_match, icase;
+ char *c, *end;
+ char *ps;
+ int pl;
+
+ pl = pattern->len;
+ ps = pattern->ptr.str;
+
+ while (pl > 0 && is_delimiter(*ps, delimiters)) {
+ pl--;
+ ps++;
+ }
+
+ while (pl > 0 && is_delimiter(ps[pl - 1], delimiters))
+ pl--;
+
+ if (pl > smp->data.u.str.data)
+ return PAT_NOMATCH;
+
+ may_match = 1;
+ icase = mflags & PAT_MF_IGNORE_CASE;
+ end = smp->data.u.str.area + smp->data.u.str.data - pl;
+ for (c = smp->data.u.str.area; c <= end; c++) {
+ if (is_delimiter(*c, delimiters)) {
+ may_match = 1;
+ continue;
+ }
+
+ if (!may_match)
+ continue;
+
+ if (icase) {
+ if ((tolower((unsigned char)*c) == tolower((unsigned char)*ps)) &&
+ (strncasecmp(ps, c, pl) == 0) &&
+ (c == end || is_delimiter(c[pl], delimiters)))
+ return PAT_MATCH;
+ } else {
+ if ((*c == *ps) &&
+ (strncmp(ps, c, pl) == 0) &&
+ (c == end || is_delimiter(c[pl], delimiters)))
+ return PAT_MATCH;
+ }
+ may_match = 0;
+ }
+ return PAT_NOMATCH;
+}
+
+/* Checks that the pattern is included inside the tested string, but enclosed
+ * between the delimiters '?' or '/' or at the beginning or end of the string.
+ * Delimiters at the beginning or end of the pattern are ignored.
+ */
+struct pattern *pat_match_dir(struct sample *smp, struct pattern_expr *expr, int fill)
+{
+ struct pattern_list *lst;
+ struct pattern *pattern;
+
+ list_for_each_entry(lst, &expr->patterns, list) {
+ pattern = &lst->pat;
+
+ if (pattern->ref->gen_id != expr->ref->curr_gen)
+ continue;
+
+ if (match_word(smp, pattern, expr->mflags, make_4delim('/', '?', '?', '?')))
+ return pattern;
+ }
+ return NULL;
+}
+
+/* Checks that the pattern is included inside the tested string, but enclosed
+ * between the delmiters '/', '?', '.' or ":" or at the beginning or end of
+ * the string. Delimiters at the beginning or end of the pattern are ignored.
+ */
+struct pattern *pat_match_dom(struct sample *smp, struct pattern_expr *expr, int fill)
+{
+ struct pattern_list *lst;
+ struct pattern *pattern;
+
+ list_for_each_entry(lst, &expr->patterns, list) {
+ pattern = &lst->pat;
+
+ if (pattern->ref->gen_id != expr->ref->curr_gen)
+ continue;
+
+ if (match_word(smp, pattern, expr->mflags, make_4delim('/', '?', '.', ':')))
+ return pattern;
+ }
+ return NULL;
+}
+
+/* Checks that the integer in <test> is included between min and max */
+struct pattern *pat_match_int(struct sample *smp, struct pattern_expr *expr, int fill)
+{
+ struct pattern_list *lst;
+ struct pattern *pattern;
+
+ list_for_each_entry(lst, &expr->patterns, list) {
+ pattern = &lst->pat;
+
+ if (pattern->ref->gen_id != expr->ref->curr_gen)
+ continue;
+
+ if ((!pattern->val.range.min_set || pattern->val.range.min <= smp->data.u.sint) &&
+ (!pattern->val.range.max_set || smp->data.u.sint <= pattern->val.range.max))
+ return pattern;
+ }
+ return NULL;
+}
+
+/* Checks that the length of the pattern in <test> is included between min and max */
+struct pattern *pat_match_len(struct sample *smp, struct pattern_expr *expr, int fill)
+{
+ struct pattern_list *lst;
+ struct pattern *pattern;
+
+ list_for_each_entry(lst, &expr->patterns, list) {
+ pattern = &lst->pat;
+
+ if (pattern->ref->gen_id != expr->ref->curr_gen)
+ continue;
+
+ if ((!pattern->val.range.min_set || pattern->val.range.min <= smp->data.u.str.data) &&
+ (!pattern->val.range.max_set || smp->data.u.str.data <= pattern->val.range.max))
+ return pattern;
+ }
+ return NULL;
+}
+
+/* Performs ipv4 key lookup in <expr> ipv4 tree
+ * Returns NULL on failure
+ */
+static struct pattern *_pat_match_tree_ipv4(struct in_addr *key, struct pattern_expr *expr, int fill)
+{
+ struct ebmb_node *node;
+ struct pattern_tree *elt;
+
+ /* Lookup an IPv4 address in the expression's pattern tree using
+ * the longest match method.
+ */
+ node = ebmb_lookup_longest(&expr->pattern_tree, key);
+ while (node) {
+ elt = ebmb_entry(node, struct pattern_tree, node);
+ if (elt->ref->gen_id != expr->ref->curr_gen) {
+ node = ebmb_lookup_shorter(node);
+ continue;
+ }
+ if (fill) {
+ static_pattern.data = elt->data;
+ static_pattern.ref = elt->ref;
+ static_pattern.sflags = PAT_SF_TREE;
+ static_pattern.type = SMP_T_IPV4;
+ static_pattern.val.ipv4.addr.s_addr = read_u32(elt->node.key);
+ if (!cidr2dotted(elt->node.node.pfx, &static_pattern.val.ipv4.mask))
+ return NULL;
+ }
+ return &static_pattern;
+ }
+ return NULL;
+}
+
+/* Performs ipv6 key lookup in <expr> ipv6 tree
+ * Returns NULL on failure
+ */
+static struct pattern *_pat_match_tree_ipv6(struct in6_addr *key, struct pattern_expr *expr, int fill)
+{
+ struct ebmb_node *node;
+ struct pattern_tree *elt;
+
+ /* Lookup an IPv6 address in the expression's pattern tree using
+ * the longest match method.
+ */
+ node = ebmb_lookup_longest(&expr->pattern_tree_2, key);
+ while (node) {
+ elt = ebmb_entry(node, struct pattern_tree, node);
+ if (elt->ref->gen_id != expr->ref->curr_gen) {
+ node = ebmb_lookup_shorter(node);
+ continue;
+ }
+ if (fill) {
+ static_pattern.data = elt->data;
+ static_pattern.ref = elt->ref;
+ static_pattern.sflags = PAT_SF_TREE;
+ static_pattern.type = SMP_T_IPV6;
+ memcpy(&static_pattern.val.ipv6.addr, elt->node.key, 16);
+ static_pattern.val.ipv6.mask = elt->node.node.pfx;
+ }
+ return &static_pattern;
+ }
+ return NULL;
+}
+
+struct pattern *pat_match_ip(struct sample *smp, struct pattern_expr *expr, int fill)
+{
+ struct in_addr v4;
+ struct in6_addr v6;
+ struct pattern_list *lst;
+ struct pattern *pattern;
+
+ /* The input sample is IPv4. Try to match in the trees. */
+ if (smp->data.type == SMP_T_IPV4) {
+ pattern = _pat_match_tree_ipv4(&smp->data.u.ipv4, expr, fill);
+ if (pattern)
+ return pattern;
+ /* The IPv4 sample don't match the IPv4 tree. Convert the IPv4
+ * sample address to IPv6 and try to lookup in the IPv6 tree.
+ */
+ v4tov6(&v6, &smp->data.u.ipv4);
+ pattern = _pat_match_tree_ipv6(&v6, expr, fill);
+ if (pattern)
+ return pattern;
+ /* eligible for list lookup using IPv4 address */
+ v4 = smp->data.u.ipv4;
+ goto list_lookup;
+ }
+
+ /* The input sample is IPv6. Try to match in the trees. */
+ if (smp->data.type == SMP_T_IPV6) {
+ pattern = _pat_match_tree_ipv6(&smp->data.u.ipv6, expr, fill);
+ if (pattern)
+ return pattern;
+ /* No match in the IPv6 tree. Try to convert 6 to 4 to lookup in
+ * the IPv4 tree
+ */
+ if (v6tov4(&v4, &smp->data.u.ipv6)) {
+ pattern = _pat_match_tree_ipv4(&v4, expr, fill);
+ if (pattern)
+ return pattern;
+ /* eligible for list lookup using IPv4 address */
+ goto list_lookup;
+ }
+ }
+
+ not_found:
+ return NULL;
+
+ list_lookup:
+ /* No match in the trees, but we still have a valid IPv4 address: lookup
+ * in the IPv4 list (non-contiguous masks list). This is our last resort
+ */
+ list_for_each_entry(lst, &expr->patterns, list) {
+ pattern = &lst->pat;
+
+ if (pattern->ref->gen_id != expr->ref->curr_gen)
+ continue;
+
+ /* Check if the input sample match the current pattern. */
+ if (((v4.s_addr ^ pattern->val.ipv4.addr.s_addr) & pattern->val.ipv4.mask.s_addr) == 0)
+ return pattern;
+ }
+ goto not_found;
+}
+
+/* finds the pattern holding <list> from list head <head> and deletes it.
+ * This is made for use for pattern removal within an expression.
+ */
+static void pat_unlink_from_head(void **head, void **list)
+{
+ while (*head) {
+ if (*head == list) {
+ *head = *list;
+ return;
+ }
+ head = *head;
+ }
+}
+
+void free_pattern_tree(struct eb_root *root)
+{
+ struct eb_node *node, *next;
+ struct pattern_tree *elt;
+
+ node = eb_first(root);
+ while (node) {
+ next = eb_next(node);
+ eb_delete(node);
+ elt = container_of(node, struct pattern_tree, node);
+ pat_unlink_from_head(&elt->ref->tree_head, &elt->from_ref);
+ free(elt->data);
+ free(elt);
+ node = next;
+ }
+}
+
+void pat_prune_gen(struct pattern_expr *expr)
+{
+ struct pattern_list *pat, *tmp;
+
+ list_for_each_entry_safe(pat, tmp, &expr->patterns, list) {
+ LIST_DELETE(&pat->list);
+ pat_unlink_from_head(&pat->pat.ref->list_head, &pat->from_ref);
+ if (pat->pat.sflags & PAT_SF_REGFREE)
+ regex_free(pat->pat.ptr.ptr);
+ else
+ free(pat->pat.ptr.ptr);
+ free(pat->pat.data);
+ free(pat);
+ }
+
+ free_pattern_tree(&expr->pattern_tree);
+ free_pattern_tree(&expr->pattern_tree_2);
+ LIST_INIT(&expr->patterns);
+ expr->ref->revision = rdtsc();
+ expr->ref->entry_cnt = 0;
+}
+
+/*
+ *
+ * The following functions are used for the pattern indexation
+ *
+ */
+
+int pat_idx_list_val(struct pattern_expr *expr, struct pattern *pat, char **err)
+{
+ struct pattern_list *patl;
+
+ /* allocate pattern */
+ patl = calloc(1, sizeof(*patl));
+ if (!patl) {
+ memprintf(err, "out of memory while indexing pattern");
+ return 0;
+ }
+
+ /* duplicate pattern */
+ memcpy(&patl->pat, pat, sizeof(*pat));
+
+ /* chain pattern in the expression */
+ LIST_APPEND(&expr->patterns, &patl->list);
+ patl->expr = expr;
+ /* and from the reference */
+ patl->from_ref = pat->ref->list_head;
+ pat->ref->list_head = &patl->from_ref;
+ expr->ref->revision = rdtsc();
+ expr->ref->entry_cnt++;
+
+ /* that's ok */
+ return 1;
+}
+
+int pat_idx_list_ptr(struct pattern_expr *expr, struct pattern *pat, char **err)
+{
+ struct pattern_list *patl;
+
+ /* allocate pattern */
+ patl = calloc(1, sizeof(*patl));
+ if (!patl) {
+ memprintf(err, "out of memory while indexing pattern");
+ return 0;
+ }
+
+ /* duplicate pattern */
+ memcpy(&patl->pat, pat, sizeof(*pat));
+ patl->pat.ptr.ptr = malloc(patl->pat.len);
+ if (!patl->pat.ptr.ptr) {
+ free(patl);
+ memprintf(err, "out of memory while indexing pattern");
+ return 0;
+ }
+ memcpy(patl->pat.ptr.ptr, pat->ptr.ptr, pat->len);
+
+ /* chain pattern in the expression */
+ LIST_APPEND(&expr->patterns, &patl->list);
+ patl->expr = expr;
+ /* and from the reference */
+ patl->from_ref = pat->ref->list_head;
+ pat->ref->list_head = &patl->from_ref;
+ expr->ref->revision = rdtsc();
+ expr->ref->entry_cnt++;
+
+ /* that's ok */
+ return 1;
+}
+
+int pat_idx_list_str(struct pattern_expr *expr, struct pattern *pat, char **err)
+{
+ struct pattern_list *patl;
+
+ /* allocate pattern */
+ patl = calloc(1, sizeof(*patl));
+ if (!patl) {
+ memprintf(err, "out of memory while indexing pattern");
+ return 0;
+ }
+
+ /* duplicate pattern */
+ memcpy(&patl->pat, pat, sizeof(*pat));
+ patl->pat.ptr.str = malloc(patl->pat.len + 1);
+ if (!patl->pat.ptr.str) {
+ free(patl);
+ memprintf(err, "out of memory while indexing pattern");
+ return 0;
+ }
+ memcpy(patl->pat.ptr.ptr, pat->ptr.ptr, pat->len);
+ patl->pat.ptr.str[patl->pat.len] = '\0';
+
+ /* chain pattern in the expression */
+ LIST_APPEND(&expr->patterns, &patl->list);
+ patl->expr = expr;
+ /* and from the reference */
+ patl->from_ref = pat->ref->list_head;
+ pat->ref->list_head = &patl->from_ref;
+ expr->ref->revision = rdtsc();
+ expr->ref->entry_cnt++;
+
+ /* that's ok */
+ return 1;
+}
+
+int pat_idx_list_reg_cap(struct pattern_expr *expr, struct pattern *pat, int cap, char **err)
+{
+ struct pattern_list *patl;
+
+ /* allocate pattern */
+ patl = calloc(1, sizeof(*patl));
+ if (!patl) {
+ memprintf(err, "out of memory while indexing pattern");
+ return 0;
+ }
+
+ /* duplicate pattern */
+ memcpy(&patl->pat, pat, sizeof(*pat));
+
+ /* compile regex */
+ patl->pat.sflags |= PAT_SF_REGFREE;
+ if (!(patl->pat.ptr.reg = regex_comp(pat->ptr.str, !(expr->mflags & PAT_MF_IGNORE_CASE),
+ cap, err))) {
+ free(patl);
+ return 0;
+ }
+
+ /* chain pattern in the expression */
+ LIST_APPEND(&expr->patterns, &patl->list);
+ patl->expr = expr;
+ /* and from the reference */
+ patl->from_ref = pat->ref->list_head;
+ pat->ref->list_head = &patl->from_ref;
+ expr->ref->revision = rdtsc();
+ expr->ref->entry_cnt++;
+
+ /* that's ok */
+ return 1;
+}
+
+int pat_idx_list_reg(struct pattern_expr *expr, struct pattern *pat, char **err)
+{
+ return pat_idx_list_reg_cap(expr, pat, 0, err);
+}
+
+int pat_idx_list_regm(struct pattern_expr *expr, struct pattern *pat, char **err)
+{
+ return pat_idx_list_reg_cap(expr, pat, 1, err);
+}
+
+int pat_idx_tree_ip(struct pattern_expr *expr, struct pattern *pat, char **err)
+{
+ unsigned int mask;
+ struct pattern_tree *node;
+
+ /* Only IPv4 can be indexed */
+ if (pat->type == SMP_T_IPV4) {
+ /* in IPv4 case, check if the mask is contiguous so that we can
+ * insert the network into the tree. A continuous mask has only
+ * ones on the left. This means that this mask + its lower bit
+ * added once again is null.
+ */
+ mask = ntohl(pat->val.ipv4.mask.s_addr);
+ if (mask + (mask & -mask) == 0) {
+ mask = mask ? 33 - flsnz(mask & -mask) : 0; /* equals cidr value */
+
+ /* node memory allocation */
+ node = calloc(1, sizeof(*node) + 4);
+ if (!node) {
+ memprintf(err, "out of memory while loading pattern");
+ return 0;
+ }
+
+ /* copy the pointer to sample associated to this node */
+ node->data = pat->data;
+ node->ref = pat->ref;
+
+ /* FIXME: insert <addr>/<mask> into the tree here */
+ memcpy(node->node.key, &pat->val.ipv4.addr, 4); /* network byte order */
+ node->node.node.pfx = mask;
+
+ /* Insert the entry. */
+ ebmb_insert_prefix(&expr->pattern_tree, &node->node, 4);
+
+ node->expr = expr;
+ node->from_ref = pat->ref->tree_head;
+ pat->ref->tree_head = &node->from_ref;
+ expr->ref->revision = rdtsc();
+ expr->ref->entry_cnt++;
+
+ /* that's ok */
+ return 1;
+ }
+ else {
+ /* If the mask is not contiguous, just add the pattern to the list */
+ return pat_idx_list_val(expr, pat, err);
+ }
+ }
+ else if (pat->type == SMP_T_IPV6) {
+ /* IPv6 also can be indexed */
+ node = calloc(1, sizeof(*node) + 16);
+ if (!node) {
+ memprintf(err, "out of memory while loading pattern");
+ return 0;
+ }
+
+ /* copy the pointer to sample associated to this node */
+ node->data = pat->data;
+ node->ref = pat->ref;
+
+ /* FIXME: insert <addr>/<mask> into the tree here */
+ memcpy(node->node.key, &pat->val.ipv6.addr, 16); /* network byte order */
+ node->node.node.pfx = pat->val.ipv6.mask;
+
+ /* Insert the entry. */
+ ebmb_insert_prefix(&expr->pattern_tree_2, &node->node, 16);
+
+ node->expr = expr;
+ node->from_ref = pat->ref->tree_head;
+ pat->ref->tree_head = &node->from_ref;
+ expr->ref->revision = rdtsc();
+ expr->ref->entry_cnt++;
+
+ /* that's ok */
+ return 1;
+ }
+
+ return 0;
+}
+
+int pat_idx_tree_str(struct pattern_expr *expr, struct pattern *pat, char **err)
+{
+ int len;
+ struct pattern_tree *node;
+
+ /* Only string can be indexed */
+ if (pat->type != SMP_T_STR) {
+ memprintf(err, "internal error: string expected, but the type is '%s'",
+ smp_to_type[pat->type]);
+ return 0;
+ }
+
+ /* If the flag PAT_F_IGNORE_CASE is set, we cannot use trees */
+ if (expr->mflags & PAT_MF_IGNORE_CASE)
+ return pat_idx_list_str(expr, pat, err);
+
+ /* Process the key len */
+ len = strlen(pat->ptr.str) + 1;
+
+ /* node memory allocation */
+ node = calloc(1, sizeof(*node) + len);
+ if (!node) {
+ memprintf(err, "out of memory while loading pattern");
+ return 0;
+ }
+
+ /* copy the pointer to sample associated to this node */
+ node->data = pat->data;
+ node->ref = pat->ref;
+
+ /* copy the string */
+ memcpy(node->node.key, pat->ptr.str, len);
+
+ /* index the new node */
+ ebst_insert(&expr->pattern_tree, &node->node);
+
+ node->expr = expr;
+ node->from_ref = pat->ref->tree_head;
+ pat->ref->tree_head = &node->from_ref;
+ expr->ref->revision = rdtsc();
+ expr->ref->entry_cnt++;
+
+ /* that's ok */
+ return 1;
+}
+
+int pat_idx_tree_pfx(struct pattern_expr *expr, struct pattern *pat, char **err)
+{
+ int len;
+ struct pattern_tree *node;
+
+ /* Only string can be indexed */
+ if (pat->type != SMP_T_STR) {
+ memprintf(err, "internal error: string expected, but the type is '%s'",
+ smp_to_type[pat->type]);
+ return 0;
+ }
+
+ /* If the flag PAT_F_IGNORE_CASE is set, we cannot use trees */
+ if (expr->mflags & PAT_MF_IGNORE_CASE)
+ return pat_idx_list_str(expr, pat, err);
+
+ /* Process the key len */
+ len = strlen(pat->ptr.str);
+
+ /* node memory allocation */
+ node = calloc(1, sizeof(*node) + len + 1);
+ if (!node) {
+ memprintf(err, "out of memory while loading pattern");
+ return 0;
+ }
+
+ /* copy the pointer to sample associated to this node */
+ node->data = pat->data;
+ node->ref = pat->ref;
+
+ /* copy the string and the trailing zero */
+ memcpy(node->node.key, pat->ptr.str, len + 1);
+ node->node.node.pfx = len * 8;
+
+ /* index the new node */
+ ebmb_insert_prefix(&expr->pattern_tree, &node->node, len);
+
+ node->expr = expr;
+ node->from_ref = pat->ref->tree_head;
+ pat->ref->tree_head = &node->from_ref;
+ expr->ref->revision = rdtsc();
+ expr->ref->entry_cnt++;
+
+ /* that's ok */
+ return 1;
+}
+
+/* Deletes all patterns from reference <elt>. Note that all of their
+ * expressions must be locked, and the pattern lock must be held as well.
+ */
+void pat_delete_gen(struct pat_ref *ref, struct pat_ref_elt *elt)
+{
+ struct pattern_tree *tree;
+ struct pattern_list *pat;
+ void **node;
+
+ /* delete all known tree nodes. They are all allocated inline */
+ for (node = elt->tree_head; node;) {
+ tree = container_of(node, struct pattern_tree, from_ref);
+ node = *node;
+ BUG_ON(tree->ref != elt);
+
+ ebmb_delete(&tree->node);
+ free(tree->data);
+ free(tree);
+ }
+
+ /* delete all list nodes and free their pattern entries (str/reg) */
+ for (node = elt->list_head; node;) {
+ pat = container_of(node, struct pattern_list, from_ref);
+ node = *node;
+ BUG_ON(pat->pat.ref != elt);
+
+ /* Delete and free entry. */
+ LIST_DELETE(&pat->list);
+ if (pat->pat.sflags & PAT_SF_REGFREE)
+ regex_free(pat->pat.ptr.reg);
+ else
+ free(pat->pat.ptr.ptr);
+ free(pat->pat.data);
+ free(pat);
+ }
+
+ /* update revision number to refresh the cache */
+ ref->revision = rdtsc();
+ ref->entry_cnt--;
+ elt->tree_head = NULL;
+ elt->list_head = NULL;
+}
+
+void pattern_init_expr(struct pattern_expr *expr)
+{
+ LIST_INIT(&expr->patterns);
+ expr->pattern_tree = EB_ROOT;
+ expr->pattern_tree_2 = EB_ROOT;
+}
+
+void pattern_init_head(struct pattern_head *head)
+{
+ LIST_INIT(&head->head);
+}
+
+/* The following functions are relative to the management of the reference
+ * lists. These lists are used to store the original pattern and associated
+ * value as string form.
+ *
+ * This is used with modifiable ACL and MAPS
+ *
+ * The pattern reference are stored with two identifiers: the unique_id and
+ * the reference.
+ *
+ * The reference identify a file. Each file with the same name point to the
+ * same reference. We can register many times one file. If the file is modified,
+ * all his dependencies are also modified. The reference can be used with map or
+ * acl.
+ *
+ * The unique_id identify inline acl. The unique id is unique for each acl.
+ * You cannot force the same id in the configuration file, because this repoort
+ * an error.
+ *
+ * A particular case appears if the filename is a number. In this case, the
+ * unique_id is set with the number represented by the filename and the
+ * reference is also set. This method prevent double unique_id.
+ *
+ */
+
+/* This function looks up a reference by name. If the reference is found, a
+ * pointer to the struct pat_ref is returned, otherwise NULL is returned.
+ */
+struct pat_ref *pat_ref_lookup(const char *reference)
+{
+ struct pat_ref *ref;
+
+ list_for_each_entry(ref, &pattern_reference, list)
+ if (ref->reference && strcmp(reference, ref->reference) == 0)
+ return ref;
+ return NULL;
+}
+
+/* This function looks up a reference's unique id. If the reference is found, a
+ * pointer to the struct pat_ref is returned, otherwise NULL is returned.
+ */
+struct pat_ref *pat_ref_lookupid(int unique_id)
+{
+ struct pat_ref *ref;
+
+ list_for_each_entry(ref, &pattern_reference, list)
+ if (ref->unique_id == unique_id)
+ return ref;
+ return NULL;
+}
+
+/* This function removes from the pattern reference <ref> all the patterns
+ * attached to the reference element <elt>, and the element itself. The
+ * reference must be locked.
+ */
+void pat_ref_delete_by_ptr(struct pat_ref *ref, struct pat_ref_elt *elt)
+{
+ struct pattern_expr *expr;
+ struct bref *bref, *back;
+
+ /*
+ * we have to unlink all watchers from this reference pattern. We must
+ * not relink them if this elt was the last one in the list.
+ */
+ list_for_each_entry_safe(bref, back, &elt->back_refs, users) {
+ LIST_DELETE(&bref->users);
+ LIST_INIT(&bref->users);
+ if (elt->list.n != &ref->head)
+ LIST_APPEND(&LIST_ELEM(elt->list.n, typeof(elt), list)->back_refs, &bref->users);
+ bref->ref = elt->list.n;
+ }
+
+ /* delete all entries from all expressions for this pattern */
+ list_for_each_entry(expr, &ref->pat, list)
+ HA_RWLOCK_WRLOCK(PATEXP_LOCK, &expr->lock);
+
+ pat_delete_gen(ref, elt);
+
+ list_for_each_entry(expr, &ref->pat, list)
+ HA_RWLOCK_WRUNLOCK(PATEXP_LOCK, &expr->lock);
+
+ LIST_DELETE(&elt->list);
+ ebmb_delete(&elt->node);
+ free(elt->sample);
+ free(elt);
+}
+
+/* This function removes the pattern matching the pointer <refelt> from
+ * the reference and from each expr member of this reference. This function
+ * returns 1 if the entry was found and deleted, otherwise zero.
+ *
+ * <refelt> is user input: it is provided as an ID and should never be
+ * dereferenced without making sure that it is valid.
+ */
+int pat_ref_delete_by_id(struct pat_ref *ref, struct pat_ref_elt *refelt)
+{
+ struct pat_ref_elt *elt, *safe;
+
+ /* delete pattern from reference */
+ list_for_each_entry_safe(elt, safe, &ref->head, list) {
+ if (elt == refelt) {
+ pat_ref_delete_by_ptr(ref, elt);
+ return 1;
+ }
+ }
+ return 0;
+}
+
+/* This function removes all patterns matching <key> from the reference
+ * and from each expr member of the reference. This function returns 1
+ * if the deletion is done and returns 0 is the entry is not found.
+ */
+int pat_ref_delete(struct pat_ref *ref, const char *key)
+{
+ struct ebmb_node *node;
+ int found = 0;
+
+ /* delete pattern from reference */
+ node = ebst_lookup(&ref->ebmb_root, key);
+ while (node) {
+ struct pat_ref_elt *elt;
+
+ elt = ebmb_entry(node, struct pat_ref_elt, node);
+ node = ebmb_next_dup(node);
+ pat_ref_delete_by_ptr(ref, elt);
+ found = 1;
+ }
+
+ return found;
+}
+
+/*
+ * find and return an element <elt> matching <key> in a reference <ref>
+ * return NULL if not found
+ */
+struct pat_ref_elt *pat_ref_find_elt(struct pat_ref *ref, const char *key)
+{
+ struct ebmb_node *node;
+
+ node = ebst_lookup(&ref->ebmb_root, key);
+ if (node)
+ return ebmb_entry(node, struct pat_ref_elt, node);
+
+ return NULL;
+}
+
+
+/* This function modifies the sample of pat_ref_elt <elt> in all expressions
+ * found under <ref> to become <value>. It is assumed that the caller has
+ * already verified that <elt> belongs to <ref>.
+ */
+static inline int pat_ref_set_elt(struct pat_ref *ref, struct pat_ref_elt *elt,
+ const char *value, char **err)
+{
+ struct pattern_expr *expr;
+ struct sample_data **data;
+ char *sample;
+ struct sample_data test;
+ struct pattern_tree *tree;
+ struct pattern_list *pat;
+ void **node;
+
+
+ /* Try all needed converters. */
+ list_for_each_entry(expr, &ref->pat, list) {
+ if (!expr->pat_head->parse_smp)
+ continue;
+
+ if (!expr->pat_head->parse_smp(value, &test)) {
+ memprintf(err, "unable to parse '%s'", value);
+ return 0;
+ }
+ }
+
+ /* Modify pattern from reference. */
+ sample = strdup(value);
+ if (!sample) {
+ memprintf(err, "out of memory error");
+ return 0;
+ }
+ /* Load sample in each reference. All the conversions are tested
+ * below, normally these calls don't fail.
+ */
+ for (node = elt->tree_head; node;) {
+ tree = container_of(node, struct pattern_tree, from_ref);
+ node = *node;
+ BUG_ON(tree->ref != elt);
+ expr = tree->expr;
+ if (!expr->pat_head->parse_smp)
+ continue;
+
+ data = &tree->data;
+ if (data && *data) {
+ HA_RWLOCK_WRLOCK(PATEXP_LOCK, &expr->lock);
+ if (!expr->pat_head->parse_smp(sample, *data))
+ *data = NULL;
+ HA_RWLOCK_WRUNLOCK(PATEXP_LOCK, &expr->lock);
+ }
+ }
+
+ for (node = elt->list_head; node;) {
+ pat = container_of(node, struct pattern_list, from_ref);
+ node = *node;
+ BUG_ON(pat->pat.ref != elt);
+ expr = pat->expr;
+ if (!expr->pat_head->parse_smp)
+ continue;
+
+ data = &pat->pat.data;
+ if (data && *data) {
+ HA_RWLOCK_WRLOCK(PATEXP_LOCK, &expr->lock);
+ if (!expr->pat_head->parse_smp(sample, *data))
+ *data = NULL;
+ HA_RWLOCK_WRUNLOCK(PATEXP_LOCK, &expr->lock);
+ }
+ }
+
+ /* free old sample only when all exprs are updated */
+ free(elt->sample);
+ elt->sample = sample;
+
+
+ return 1;
+}
+
+/* This function modifies the sample of pat_ref_elt <refelt> in all expressions
+ * found under <ref> to become <value>, after checking that <refelt> really
+ * belongs to <ref>.
+ *
+ * <refelt> is user input: it is provided as an ID and should never be
+ * dereferenced without making sure that it is valid.
+ */
+int pat_ref_set_by_id(struct pat_ref *ref, struct pat_ref_elt *refelt, const char *value, char **err)
+{
+ struct pat_ref_elt *elt;
+
+ /* Look for pattern in the reference. */
+ list_for_each_entry(elt, &ref->head, list) {
+ if (elt == refelt) {
+ if (!pat_ref_set_elt(ref, elt, value, err))
+ return 0;
+ return 1;
+ }
+ }
+
+ memprintf(err, "key or pattern not found");
+ return 0;
+}
+
+/* This function modifies to <value> the sample of all patterns matching <key>
+ * under <ref>.
+ */
+int pat_ref_set(struct pat_ref *ref, const char *key, const char *value, char **err, struct pat_ref_elt *elt)
+{
+ int found = 0;
+ char *_merr;
+ char **merr;
+ struct ebmb_node *node;
+
+ if (err) {
+ merr = &_merr;
+ *merr = NULL;
+ }
+ else
+ merr = NULL;
+
+ if (elt) {
+ node = &elt->node;
+ }
+ else {
+ /* Look for pattern in the reference. */
+ node = ebst_lookup(&ref->ebmb_root, key);
+ }
+
+ while (node) {
+ elt = ebmb_entry(node, struct pat_ref_elt, node);
+ node = ebmb_next_dup(node);
+ if (!pat_ref_set_elt(ref, elt, value, merr)) {
+ if (err && merr) {
+ if (!found) {
+ *err = *merr;
+ } else {
+ memprintf(err, "%s, %s", *err, *merr);
+ ha_free(merr);
+ }
+ }
+ }
+ found = 1;
+ }
+
+ if (!found) {
+ memprintf(err, "entry not found");
+ return 0;
+ }
+ return 1;
+}
+
+/* This function creates a new reference. <ref> is the reference name.
+ * <flags> are PAT_REF_*. /!\ The reference is not checked, and must
+ * be unique. The user must check the reference with "pat_ref_lookup()"
+ * before calling this function. If the function fails, it returns NULL,
+ * otherwise it returns the new struct pat_ref.
+ */
+struct pat_ref *pat_ref_new(const char *reference, const char *display, unsigned int flags)
+{
+ struct pat_ref *ref;
+
+ ref = calloc(1, sizeof(*ref));
+ if (!ref)
+ return NULL;
+
+ if (display) {
+ ref->display = strdup(display);
+ if (!ref->display) {
+ free(ref);
+ return NULL;
+ }
+ }
+
+ ref->reference = strdup(reference);
+ if (!ref->reference) {
+ free(ref->display);
+ free(ref);
+ return NULL;
+ }
+
+ ref->flags = flags;
+ ref->unique_id = -1;
+ ref->revision = 0;
+ ref->entry_cnt = 0;
+
+ LIST_INIT(&ref->head);
+ ref->ebmb_root = EB_ROOT;
+ LIST_INIT(&ref->pat);
+ HA_RWLOCK_INIT(&ref->lock);
+ LIST_APPEND(&pattern_reference, &ref->list);
+
+ return ref;
+}
+
+/* This function creates a new reference. <unique_id> is the unique id. If
+ * the value of <unique_id> is -1, the unique id is calculated later.
+ * <flags> are PAT_REF_*. /!\ The reference is not checked, and must
+ * be unique. The user must check the reference with "pat_ref_lookup()"
+ * or pat_ref_lookupid before calling this function. If the function
+ * fails, it returns NULL, otherwise it returns the new struct pat_ref.
+ */
+struct pat_ref *pat_ref_newid(int unique_id, const char *display, unsigned int flags)
+{
+ struct pat_ref *ref;
+
+ ref = calloc(1, sizeof(*ref));
+ if (!ref)
+ return NULL;
+
+ if (display) {
+ ref->display = strdup(display);
+ if (!ref->display) {
+ free(ref);
+ return NULL;
+ }
+ }
+
+ ref->reference = NULL;
+ ref->flags = flags;
+ ref->curr_gen = 0;
+ ref->next_gen = 0;
+ ref->unique_id = unique_id;
+ LIST_INIT(&ref->head);
+ ref->ebmb_root = EB_ROOT;
+ LIST_INIT(&ref->pat);
+ HA_RWLOCK_INIT(&ref->lock);
+ LIST_APPEND(&pattern_reference, &ref->list);
+
+ return ref;
+}
+
+/* This function adds entry to <ref>. It can fail on memory error. It returns
+ * the newly added element on success, or NULL on failure. The PATREF_LOCK on
+ * <ref> must be held. It sets the newly created pattern's generation number
+ * to the same value as the reference's.
+ */
+struct pat_ref_elt *pat_ref_append(struct pat_ref *ref, const char *pattern, const char *sample, int line)
+{
+ struct pat_ref_elt *elt;
+ int len = strlen(pattern);
+
+ elt = calloc(1, sizeof(*elt) + len + 1);
+ if (!elt)
+ goto fail;
+
+ elt->gen_id = ref->curr_gen;
+ elt->line = line;
+
+ memcpy((char*)elt->pattern, pattern, len + 1);
+
+ if (sample) {
+ elt->sample = strdup(sample);
+ if (!elt->sample)
+ goto fail;
+ }
+
+ LIST_INIT(&elt->back_refs);
+ elt->list_head = NULL;
+ elt->tree_head = NULL;
+ LIST_APPEND(&ref->head, &elt->list);
+ /* Even if calloc()'ed, ensure this node is not linked to a tree. */
+ elt->node.node.leaf_p = NULL;
+ ebst_insert(&ref->ebmb_root, &elt->node);
+ return elt;
+ fail:
+ free(elt);
+ return NULL;
+}
+
+/* This function creates sample found in <elt>, parses the pattern also
+ * found in <elt> and inserts it in <expr>. The function copies <patflags>
+ * into <expr>. If the function fails, it returns 0 and <err> is filled.
+ * In success case, the function returns 1.
+ */
+int pat_ref_push(struct pat_ref_elt *elt, struct pattern_expr *expr,
+ int patflags, char **err)
+{
+ struct sample_data *data;
+ struct pattern pattern;
+
+ /* Create sample */
+ if (elt->sample && expr->pat_head->parse_smp) {
+ /* New sample. */
+ data = malloc(sizeof(*data));
+ if (!data)
+ return 0;
+
+ /* Parse value. */
+ if (!expr->pat_head->parse_smp(elt->sample, data)) {
+ memprintf(err, "unable to parse '%s'", elt->sample);
+ free(data);
+ return 0;
+ }
+
+ }
+ else
+ data = NULL;
+
+ /* initialise pattern */
+ memset(&pattern, 0, sizeof(pattern));
+ pattern.data = data;
+ pattern.ref = elt;
+
+ /* parse pattern */
+ if (!expr->pat_head->parse(elt->pattern, &pattern, expr->mflags, err)) {
+ free(data);
+ return 0;
+ }
+
+ HA_RWLOCK_WRLOCK(PATEXP_LOCK, &expr->lock);
+ /* index pattern */
+ if (!expr->pat_head->index(expr, &pattern, err)) {
+ HA_RWLOCK_WRUNLOCK(PATEXP_LOCK, &expr->lock);
+ free(data);
+ return 0;
+ }
+ HA_RWLOCK_WRUNLOCK(PATEXP_LOCK, &expr->lock);
+
+ return 1;
+}
+
+/* This function tries to commit entry <elt> into <ref>. The new entry must
+ * have already been inserted using pat_ref_append(), and its generation number
+ * may have been adjusted as it will not be changed. <err> must point to a NULL
+ * pointer. The PATREF lock on <ref> must be held. All the pattern_expr for
+ * this reference will be updated (parsing, indexing). On success, non-zero is
+ * returned. On failure, all the operation is rolled back (the element is
+ * deleted from all expressions and is freed), zero is returned and the error
+ * pointer <err> may have been updated (and the caller must free it). Failure
+ * causes include memory allocation, parsing error or indexing error.
+ */
+int pat_ref_commit_elt(struct pat_ref *ref, struct pat_ref_elt *elt, char **err)
+{
+ struct pattern_expr *expr;
+
+ list_for_each_entry(expr, &ref->pat, list) {
+ if (!pat_ref_push(elt, expr, 0, err)) {
+ pat_ref_delete_by_ptr(ref, elt);
+ return 0;
+ }
+ }
+ return 1;
+}
+
+/* Loads <pattern>:<sample> into <ref> for generation <gen>. <sample> may be
+ * NULL if none exists (e.g. ACL). If not needed, the generation number should
+ * be set to ref->curr_gen. The error pointer must initially point to NULL. The
+ * new entry will be propagated to all use places, involving allocation, parsing
+ * and indexing. On error (parsing, allocation), the operation will be rolled
+ * back, an error may be reported, and NULL will be reported. On success, the
+ * freshly allocated element will be returned. The PATREF lock on <ref> must be
+ * held during the operation.
+ */
+struct pat_ref_elt *pat_ref_load(struct pat_ref *ref, unsigned int gen,
+ const char *pattern, const char *sample,
+ int line, char **err)
+{
+ struct pat_ref_elt *elt;
+
+ elt = pat_ref_append(ref, pattern, sample, line);
+ if (elt) {
+ elt->gen_id = gen;
+ if (!pat_ref_commit_elt(ref, elt, err))
+ elt = NULL;
+ } else
+ memprintf(err, "out of memory error");
+
+ return elt;
+}
+
+/* This function adds entry to <ref>. It can fail on memory error. The new
+ * entry is added at all the pattern_expr registered in this reference. The
+ * function stops on the first error encountered. It returns 0 and <err> is
+ * filled. If an error is encountered, the complete add operation is cancelled.
+ * If the insertion is a success the function returns 1.
+ */
+int pat_ref_add(struct pat_ref *ref,
+ const char *pattern, const char *sample,
+ char **err)
+{
+ return !!pat_ref_load(ref, ref->curr_gen, pattern, sample, -1, err);
+}
+
+/* This function purges all elements from <ref> whose generation is included in
+ * the range of <from> to <to> (inclusive), taking wrapping into consideration.
+ * It will not purge more than <budget> entries at once, in order to remain
+ * responsive. If budget is negative, no limit is applied.
+ * The caller must already hold the PATREF_LOCK on <ref>. The function will
+ * take the PATEXP_LOCK on all expressions of the pattern as needed. It returns
+ * non-zero on completion, or zero if it had to stop before the end after
+ * <budget> was depleted.
+ */
+int pat_ref_purge_range(struct pat_ref *ref, uint from, uint to, int budget)
+{
+ struct pat_ref_elt *elt, *elt_bck;
+ struct bref *bref, *bref_bck;
+ struct pattern_expr *expr;
+ int done;
+
+ list_for_each_entry(expr, &ref->pat, list)
+ HA_RWLOCK_WRLOCK(PATEXP_LOCK, &expr->lock);
+
+ /* all expr are locked, we can safely remove all pat_ref */
+
+ /* assume completion for e.g. empty lists */
+ done = 1;
+ list_for_each_entry_safe(elt, elt_bck, &ref->head, list) {
+ if (elt->gen_id - from > to - from)
+ continue;
+
+ if (budget >= 0 && !budget--) {
+ done = 0;
+ break;
+ }
+
+ /*
+ * we have to unlink all watchers from this reference pattern. We must
+ * not relink them if this elt was the last one in the list.
+ */
+ list_for_each_entry_safe(bref, bref_bck, &elt->back_refs, users) {
+ LIST_DELETE(&bref->users);
+ LIST_INIT(&bref->users);
+ if (elt->list.n != &ref->head)
+ LIST_APPEND(&LIST_ELEM(elt->list.n, typeof(elt), list)->back_refs, &bref->users);
+ bref->ref = elt->list.n;
+ }
+
+ /* delete the storage for all representations of this pattern. */
+ pat_delete_gen(ref, elt);
+
+ LIST_DELETE(&elt->list);
+ ebmb_delete(&elt->node);
+ free(elt->sample);
+ free(elt);
+ }
+
+ list_for_each_entry(expr, &ref->pat, list)
+ HA_RWLOCK_WRUNLOCK(PATEXP_LOCK, &expr->lock);
+
+ return done;
+}
+
+/* This function prunes all entries of <ref> and all their associated
+ * pattern_expr. It may return before the end of the list is reached,
+ * returning 0, to yield, indicating to the caller that it must call it again.
+ * until it returns non-zero. All patterns are purged, both current ones and
+ * future or incomplete ones. This is used by "clear map" or "clear acl".
+ */
+int pat_ref_prune(struct pat_ref *ref)
+{
+ return pat_ref_purge_range(ref, 0, ~0, 100);
+}
+
+/* This function looks up any existing reference <ref> in pattern_head <head>, and
+ * returns the associated pattern_expr pointer if found, otherwise NULL.
+ */
+struct pattern_expr *pattern_lookup_expr(struct pattern_head *head, struct pat_ref *ref)
+{
+ struct pattern_expr_list *expr;
+
+ list_for_each_entry(expr, &head->head, list)
+ if (expr->expr->ref == ref)
+ return expr->expr;
+ return NULL;
+}
+
+/* This function creates new pattern_expr associated to the reference <ref>.
+ * <ref> can be NULL. If an error occurs, the function returns NULL and
+ * <err> is filled. Otherwise, the function returns new pattern_expr linked
+ * with <head> and <ref>.
+ *
+ * The returned value can be an already filled pattern list, in this case the
+ * flag <reuse> is set.
+ */
+struct pattern_expr *pattern_new_expr(struct pattern_head *head, struct pat_ref *ref,
+ int patflags, char **err, int *reuse)
+{
+ struct pattern_expr *expr;
+ struct pattern_expr_list *list;
+
+ if (reuse)
+ *reuse = 0;
+
+ /* Memory and initialization of the chain element. */
+ list = calloc(1, sizeof(*list));
+ if (!list) {
+ memprintf(err, "out of memory");
+ return NULL;
+ }
+
+ /* Look for existing similar expr. No that only the index, parse and
+ * parse_smp function must be identical for having similar pattern.
+ * The other function depends of these first.
+ */
+ if (ref) {
+ list_for_each_entry(expr, &ref->pat, list)
+ if (expr->pat_head->index == head->index &&
+ expr->pat_head->parse == head->parse &&
+ expr->pat_head->parse_smp == head->parse_smp &&
+ expr->mflags == patflags)
+ break;
+ if (&expr->list == &ref->pat)
+ expr = NULL;
+ }
+ else
+ expr = NULL;
+
+ /* If no similar expr was found, we create new expr. */
+ if (!expr) {
+ /* Get a lot of memory for the expr struct. */
+ expr = calloc(1, sizeof(*expr));
+ if (!expr) {
+ free(list);
+ memprintf(err, "out of memory");
+ return NULL;
+ }
+
+ /* Initialize this new expr. */
+ pattern_init_expr(expr);
+
+ /* Copy the pattern matching and indexing flags. */
+ expr->mflags = patflags;
+
+ /* This new pattern expression reference one of his heads. */
+ expr->pat_head = head;
+
+ /* Link with ref, or to self to facilitate LIST_DELETE() */
+ if (ref)
+ LIST_APPEND(&ref->pat, &expr->list);
+ else
+ LIST_INIT(&expr->list);
+
+ expr->ref = ref;
+
+ HA_RWLOCK_INIT(&expr->lock);
+
+ /* We must free this pattern if it is no more used. */
+ list->do_free = 1;
+ }
+ else {
+ /* If the pattern used already exists, it is already linked
+ * with ref and we must not free it.
+ */
+ list->do_free = 0;
+ if (reuse)
+ *reuse = 1;
+ }
+
+ /* The new list element reference the pattern_expr. */
+ list->expr = expr;
+
+ /* Link the list element with the pattern_head. */
+ LIST_APPEND(&head->head, &list->list);
+ return expr;
+}
+
+/* Reads patterns from a file. If <err_msg> is non-NULL, an error message will
+ * be returned there on errors and the caller will have to free it.
+ *
+ * The file contains one key + value per line. Lines which start with '#' are
+ * ignored, just like empty lines. Leading tabs/spaces are stripped. The key is
+ * then the first "word" (series of non-space/tabs characters), and the value is
+ * what follows this series of space/tab till the end of the line excluding
+ * trailing spaces/tabs.
+ *
+ * Example :
+ *
+ * # this is a comment and is ignored
+ * 62.212.114.60 1wt.eu \n
+ * <-><-----------><---><----><---->
+ * | | | | `--- trailing spaces ignored
+ * | | | `-------- value
+ * | | `--------------- middle spaces ignored
+ * | `------------------------ key
+ * `-------------------------------- leading spaces ignored
+ *
+ * Return non-zero in case of success, otherwise 0.
+ */
+int pat_ref_read_from_file_smp(struct pat_ref *ref, const char *filename, char **err)
+{
+ FILE *file;
+ char *c;
+ int ret = 0;
+ int line = 0;
+ char *key_beg;
+ char *key_end;
+ char *value_beg;
+ char *value_end;
+
+ file = fopen(filename, "r");
+ if (!file) {
+ memprintf(err, "failed to open pattern file <%s>", filename);
+ return 0;
+ }
+
+ /* now parse all patterns. The file may contain only one pattern
+ * followed by one value per line. The start spaces, separator spaces
+ * and and spaces are stripped. Each can contain comment started by '#'
+ */
+ while (fgets(trash.area, trash.size, file) != NULL) {
+ line++;
+ c = trash.area;
+
+ /* ignore lines beginning with a dash */
+ if (*c == '#')
+ continue;
+
+ /* strip leading spaces and tabs */
+ while (*c == ' ' || *c == '\t')
+ c++;
+
+ /* empty lines are ignored too */
+ if (*c == '\0' || *c == '\r' || *c == '\n')
+ continue;
+
+ /* look for the end of the key */
+ key_beg = c;
+ while (*c && *c != ' ' && *c != '\t' && *c != '\n' && *c != '\r')
+ c++;
+
+ key_end = c;
+
+ /* strip middle spaces and tabs */
+ while (*c == ' ' || *c == '\t')
+ c++;
+
+ /* look for the end of the value, it is the end of the line */
+ value_beg = c;
+ while (*c && *c != '\n' && *c != '\r')
+ c++;
+ value_end = c;
+
+ /* trim possibly trailing spaces and tabs */
+ while (value_end > value_beg && (value_end[-1] == ' ' || value_end[-1] == '\t'))
+ value_end--;
+
+ /* set final \0 and check entries */
+ *key_end = '\0';
+ *value_end = '\0';
+
+ /* insert values */
+ if (!pat_ref_append(ref, key_beg, value_beg, line)) {
+ memprintf(err, "out of memory");
+ goto out_close;
+ }
+ }
+
+ if (ferror(file)) {
+ memprintf(err, "error encountered while reading <%s> : %s",
+ filename, strerror(errno));
+ goto out_close;
+ }
+ /* success */
+ ret = 1;
+
+ out_close:
+ fclose(file);
+ return ret;
+}
+
+/* Reads patterns from a file. If <err_msg> is non-NULL, an error message will
+ * be returned there on errors and the caller will have to free it.
+ */
+int pat_ref_read_from_file(struct pat_ref *ref, const char *filename, char **err)
+{
+ FILE *file;
+ char *c;
+ char *arg;
+ int ret = 0;
+ int line = 0;
+
+ file = fopen(filename, "r");
+ if (!file) {
+ memprintf(err, "failed to open pattern file <%s>", filename);
+ return 0;
+ }
+
+ /* now parse all patterns. The file may contain only one pattern per
+ * line. If the line contains spaces, they will be part of the pattern.
+ * The pattern stops at the first CR, LF or EOF encountered.
+ */
+ while (fgets(trash.area, trash.size, file) != NULL) {
+ line++;
+ c = trash.area;
+
+ /* ignore lines beginning with a dash */
+ if (*c == '#')
+ continue;
+
+ /* strip leading spaces and tabs */
+ while (*c == ' ' || *c == '\t')
+ c++;
+
+
+ arg = c;
+ while (*c && *c != '\n' && *c != '\r')
+ c++;
+ *c = 0;
+
+ /* empty lines are ignored too */
+ if (c == arg)
+ continue;
+
+ if (!pat_ref_append(ref, arg, NULL, line)) {
+ memprintf(err, "out of memory when loading patterns from file <%s>", filename);
+ goto out_close;
+ }
+ }
+
+ if (ferror(file)) {
+ memprintf(err, "error encountered while reading <%s> : %s",
+ filename, strerror(errno));
+ goto out_close;
+ }
+ ret = 1; /* success */
+
+ out_close:
+ fclose(file);
+ return ret;
+}
+
+int pattern_read_from_file(struct pattern_head *head, unsigned int refflags,
+ const char *filename, int patflags, int load_smp,
+ char **err, const char *file, int line)
+{
+ struct pat_ref *ref;
+ struct pattern_expr *expr;
+ struct pat_ref_elt *elt;
+ int reuse = 0;
+
+ /* Lookup for the existing reference. */
+ ref = pat_ref_lookup(filename);
+
+ /* If the reference doesn't exists, create it and load associated file. */
+ if (!ref) {
+ chunk_printf(&trash,
+ "pattern loaded from file '%s' used by %s at file '%s' line %d",
+ filename, refflags & PAT_REF_MAP ? "map" : "acl", file, line);
+
+ ref = pat_ref_new(filename, trash.area, refflags);
+ if (!ref) {
+ memprintf(err, "out of memory");
+ return 0;
+ }
+
+ if (load_smp) {
+ ref->flags |= PAT_REF_SMP;
+ if (!pat_ref_read_from_file_smp(ref, filename, err))
+ return 0;
+ }
+ else {
+ if (!pat_ref_read_from_file(ref, filename, err))
+ return 0;
+ }
+ }
+ else {
+ /* The reference already exists, check the map compatibility. */
+
+ /* If the load require samples and the flag PAT_REF_SMP is not set,
+ * the reference doesn't contain sample, and cannot be used.
+ */
+ if (load_smp) {
+ if (!(ref->flags & PAT_REF_SMP)) {
+ memprintf(err, "The file \"%s\" is already used as one column file "
+ "and cannot be used by as two column file.",
+ filename);
+ return 0;
+ }
+ }
+ else {
+ /* The load doesn't require samples. If the flag PAT_REF_SMP is
+ * set, the reference contains a sample, and cannot be used.
+ */
+ if (ref->flags & PAT_REF_SMP) {
+ memprintf(err, "The file \"%s\" is already used as two column file "
+ "and cannot be used by as one column file.",
+ filename);
+ return 0;
+ }
+ }
+
+ /* Extends display */
+ chunk_printf(&trash, "%s", ref->display);
+ chunk_appendf(&trash, ", by %s at file '%s' line %d",
+ refflags & PAT_REF_MAP ? "map" : "acl", file, line);
+ free(ref->display);
+ ref->display = strdup(trash.area);
+ if (!ref->display) {
+ memprintf(err, "out of memory");
+ return 0;
+ }
+
+ /* Merge flags. */
+ ref->flags |= refflags;
+ }
+
+ /* Now, we can loading patterns from the reference. */
+
+ /* Lookup for existing reference in the head. If the reference
+ * doesn't exists, create it.
+ */
+ expr = pattern_lookup_expr(head, ref);
+ if (!expr || (expr->mflags != patflags)) {
+ expr = pattern_new_expr(head, ref, patflags, err, &reuse);
+ if (!expr)
+ return 0;
+ }
+
+ /* The returned expression may be not empty, because the function
+ * "pattern_new_expr" lookup for similar pattern list and can
+ * reuse a already filled pattern list. In this case, we can not
+ * reload the patterns.
+ */
+ if (reuse)
+ return 1;
+
+ /* Load reference content in the pattern expression.
+ * We need to load elements in the same order they were seen in the
+ * file as list-based matching types may rely on it.
+ */
+ list_for_each_entry(elt, &ref->head, list) {
+ if (!pat_ref_push(elt, expr, patflags, err)) {
+ if (elt->line > 0)
+ memprintf(err, "%s at line %d of file '%s'",
+ *err, elt->line, filename);
+ return 0;
+ }
+ }
+
+ return 1;
+}
+
+/* This function executes a pattern match on a sample. It applies pattern <expr>
+ * to sample <smp>. The function returns NULL if the sample don't match. It returns
+ * non-null if the sample match. If <fill> is true and the sample match, the
+ * function returns the matched pattern. In many cases, this pattern can be a
+ * static buffer.
+ */
+struct pattern *pattern_exec_match(struct pattern_head *head, struct sample *smp, int fill)
+{
+ struct pattern_expr_list *list;
+ struct pattern *pat;
+
+ if (!head->match) {
+ if (fill) {
+ static_pattern.data = NULL;
+ static_pattern.ref = NULL;
+ static_pattern.sflags = 0;
+ static_pattern.type = SMP_T_SINT;
+ static_pattern.val.i = 1;
+ }
+ return &static_pattern;
+ }
+
+ /* convert input to string */
+ if (!sample_convert(smp, head->expect_type))
+ return NULL;
+
+ list_for_each_entry(list, &head->head, list) {
+ HA_RWLOCK_RDLOCK(PATEXP_LOCK, &list->expr->lock);
+ pat = head->match(smp, list->expr, fill);
+ if (pat) {
+ /* We duplicate the pattern cause it could be modified
+ by another thread */
+ if (pat != &static_pattern) {
+ memcpy(&static_pattern, pat, sizeof(struct pattern));
+ pat = &static_pattern;
+ }
+
+ /* We also duplicate the sample data for
+ same reason */
+ if (pat->data && (pat->data != &static_sample_data)) {
+ switch(pat->data->type) {
+ case SMP_T_STR:
+ static_sample_data.type = SMP_T_STR;
+ static_sample_data.u.str = *get_trash_chunk();
+ static_sample_data.u.str.data = pat->data->u.str.data;
+ if (static_sample_data.u.str.data >= static_sample_data.u.str.size)
+ static_sample_data.u.str.data = static_sample_data.u.str.size - 1;
+ memcpy(static_sample_data.u.str.area,
+ pat->data->u.str.area, static_sample_data.u.str.data);
+ static_sample_data.u.str.area[static_sample_data.u.str.data] = 0;
+ pat->data = &static_sample_data;
+ break;
+
+ case SMP_T_IPV4:
+ case SMP_T_IPV6:
+ case SMP_T_SINT:
+ memcpy(&static_sample_data, pat->data, sizeof(struct sample_data));
+ pat->data = &static_sample_data;
+ break;
+ default:
+ /* unimplemented pattern type */
+ pat->data = NULL;
+ break;
+ }
+ }
+ HA_RWLOCK_RDUNLOCK(PATEXP_LOCK, &list->expr->lock);
+ return pat;
+ }
+ HA_RWLOCK_RDUNLOCK(PATEXP_LOCK, &list->expr->lock);
+ }
+ return NULL;
+}
+
+/* This function prunes the pattern expressions starting at pattern_head <head>. */
+void pattern_prune(struct pattern_head *head)
+{
+ struct pattern_expr_list *list, *safe;
+
+ list_for_each_entry_safe(list, safe, &head->head, list) {
+ LIST_DELETE(&list->list);
+ if (list->do_free) {
+ LIST_DELETE(&list->expr->list);
+ HA_RWLOCK_WRLOCK(PATEXP_LOCK, &list->expr->lock);
+ head->prune(list->expr);
+ HA_RWLOCK_WRUNLOCK(PATEXP_LOCK, &list->expr->lock);
+ free(list->expr);
+ }
+ free(list);
+ }
+}
+
+/* This function compares two pat_ref** on their unique_id, and returns -1/0/1
+ * depending on their order (suitable for sorting).
+ */
+static int cmp_pat_ref(const void *_a, const void *_b)
+{
+ struct pat_ref * const *a = _a;
+ struct pat_ref * const *b = _b;
+
+ if ((*a)->unique_id < (*b)->unique_id)
+ return -1;
+ else if ((*a)->unique_id > (*b)->unique_id)
+ return 1;
+ return 0;
+}
+
+/* This function finalizes the configuration parsing. It sets all the
+ * automatic ids.
+ */
+int pattern_finalize_config(void)
+{
+ size_t len = 0;
+ size_t unassigned_pos = 0;
+ int next_unique_id = 0;
+ size_t i, j;
+ struct pat_ref *ref, **arr;
+ struct list pr = LIST_HEAD_INIT(pr);
+
+ pat_lru_seed = ha_random();
+
+ /* Count pat_refs with user defined unique_id and totalt count */
+ list_for_each_entry(ref, &pattern_reference, list) {
+ len++;
+ if (ref->unique_id != -1)
+ unassigned_pos++;
+ }
+
+ if (len == 0) {
+ return 0;
+ }
+
+ arr = calloc(len, sizeof(*arr));
+ if (arr == NULL) {
+ ha_alert("Out of memory error.\n");
+ return ERR_ALERT | ERR_FATAL;
+ }
+
+ i = 0;
+ j = unassigned_pos;
+ list_for_each_entry(ref, &pattern_reference, list) {
+ if (ref->unique_id != -1)
+ arr[i++] = ref;
+ else
+ arr[j++] = ref;
+ }
+
+ /* Sort first segment of array with user-defined unique ids for
+ * fast lookup when generating unique ids
+ */
+ qsort(arr, unassigned_pos, sizeof(*arr), cmp_pat_ref);
+
+ /* Assign unique ids to the rest of the elements */
+ for (i = unassigned_pos; i < len; i++) {
+ do {
+ arr[i]->unique_id = next_unique_id++;
+ } while (bsearch(&arr[i], arr, unassigned_pos, sizeof(*arr), cmp_pat_ref));
+ }
+
+ /* Sort complete array */
+ qsort(arr, len, sizeof(*arr), cmp_pat_ref);
+
+ /* Convert back to linked list */
+ for (i = 0; i < len; i++)
+ LIST_APPEND(&pr, &arr[i]->list);
+
+ /* swap root */
+ LIST_INSERT(&pr, &pattern_reference);
+ LIST_DELETE(&pr);
+
+ free(arr);
+ return 0;
+}
+
+static int pattern_per_thread_lru_alloc()
+{
+ if (!global.tune.pattern_cache)
+ return 1;
+ pat_lru_tree = lru64_new(global.tune.pattern_cache);
+ return !!pat_lru_tree;
+}
+
+static void pattern_per_thread_lru_free()
+{
+ lru64_destroy(pat_lru_tree);
+}
+
+REGISTER_PER_THREAD_ALLOC(pattern_per_thread_lru_alloc);
+REGISTER_PER_THREAD_FREE(pattern_per_thread_lru_free);
diff --git a/src/payload.c b/src/payload.c
new file mode 100644
index 0000000..6a536d7
--- /dev/null
+++ b/src/payload.c
@@ -0,0 +1,1448 @@
+/*
+ * General protocol-agnostic payload-based sample fetches and ACLs
+ *
+ * Copyright 2000-2013 Willy Tarreau <w@1wt.eu>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <stdlib.h>
+#include <string.h>
+
+#include <haproxy/acl.h>
+#include <haproxy/api.h>
+#include <haproxy/arg.h>
+#include <haproxy/channel.h>
+#include <haproxy/connection.h>
+#include <haproxy/htx.h>
+#include <haproxy/net_helper.h>
+#include <haproxy/pattern.h>
+#include <haproxy/payload.h>
+#include <haproxy/sample.h>
+#include <haproxy/stconn.h>
+#include <haproxy/tools.h>
+
+
+/************************************************************************/
+/* All supported sample fetch functions must be declared here */
+/************************************************************************/
+
+/* wait for more data as long as possible, then return TRUE. This should be
+ * used with content inspection.
+ */
+static int
+smp_fetch_wait_end(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ if (!(smp->opt & SMP_OPT_FINAL)) {
+ smp->flags |= SMP_F_MAY_CHANGE;
+ return 0;
+ }
+ smp->data.type = SMP_T_BOOL;
+ smp->data.u.sint = 1;
+ return 1;
+}
+
+/* return the number of bytes in the request buffer */
+static int
+smp_fetch_len(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ if (smp->strm) {
+ struct channel *chn = ((smp->opt & SMP_OPT_DIR) == SMP_OPT_DIR_RES) ? &smp->strm->res : &smp->strm->req;
+
+ /* Not accurate but kept for backward compatibility purpose */
+ if (IS_HTX_STRM(smp->strm)) {
+ struct htx *htx = htxbuf(&chn->buf);
+ smp->data.u.sint = htx->data - co_data(chn);
+ }
+ else
+ smp->data.u.sint = ci_data(chn);
+ }
+ else if (obj_type(smp->sess->origin) == OBJ_TYPE_CHECK) {
+ struct check *check = __objt_check(smp->sess->origin);
+
+ /* Not accurate but kept for backward compatibility purpose */
+ smp->data.u.sint = ((check->sc && IS_HTX_SC(check->sc)) ? (htxbuf(&check->bi))->data: b_data(&check->bi));
+ }
+ else
+ return 0;
+
+ smp->data.type = SMP_T_SINT;
+ smp->flags = SMP_F_VOLATILE | SMP_F_MAY_CHANGE;
+ return 1;
+}
+
+/* Returns 0 if the client didn't send a SessionTicket Extension
+ * Returns 1 if the client sent SessionTicket Extension
+ * Returns 2 if the client also sent non-zero length SessionTicket
+ * Returns SMP_T_SINT data type
+ */
+static int
+smp_fetch_req_ssl_st_ext(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ int hs_len, ext_len, bleft;
+ struct channel *chn;
+ unsigned char *data;
+
+ if (!smp->strm)
+ goto not_ssl_hello;
+
+ /* meaningless for HTX buffers */
+ if (IS_HTX_STRM(smp->strm))
+ goto not_ssl_hello;
+
+ chn = ((smp->opt & SMP_OPT_DIR) == SMP_OPT_DIR_RES) ? &smp->strm->res : &smp->strm->req;
+
+
+ bleft = ci_data(chn);
+ data = (unsigned char *)ci_head(chn);
+
+ /* Check for SSL/TLS Handshake */
+ if (!bleft)
+ goto too_short;
+ if (*data != 0x16)
+ goto not_ssl_hello;
+
+ /* Check for SSLv3 or later (SSL version >= 3.0) in the record layer*/
+ if (bleft < 3)
+ goto too_short;
+ if (data[1] < 0x03)
+ goto not_ssl_hello;
+
+ if (bleft < 5)
+ goto too_short;
+ hs_len = (data[3] << 8) + data[4];
+ if (hs_len < 1 + 3 + 2 + 32 + 1 + 2 + 2 + 1 + 1 + 2 + 2)
+ goto not_ssl_hello; /* too short to have an extension */
+
+ data += 5; /* enter TLS handshake */
+ bleft -= 5;
+
+ /* Check for a complete client hello starting at <data> */
+ if (bleft < 1)
+ goto too_short;
+ if (data[0] != 0x01) /* msg_type = Client Hello */
+ goto not_ssl_hello;
+
+ /* Check the Hello's length */
+ if (bleft < 4)
+ goto too_short;
+ hs_len = (data[1] << 16) + (data[2] << 8) + data[3];
+ if (hs_len < 2 + 32 + 1 + 2 + 2 + 1 + 1 + 2 + 2)
+ goto not_ssl_hello; /* too short to have an extension */
+
+ /* We want the full handshake here */
+ if (bleft < hs_len)
+ goto too_short;
+
+ data += 4;
+ /* Start of the ClientHello message */
+ if (data[0] < 0x03 || data[1] < 0x01) /* TLSv1 minimum */
+ goto not_ssl_hello;
+
+ ext_len = data[34]; /* session_id_len */
+ if (ext_len > 32 || ext_len > (hs_len - 35)) /* check for correct session_id len */
+ goto not_ssl_hello;
+
+ /* Jump to cipher suite */
+ hs_len -= 35 + ext_len;
+ data += 35 + ext_len;
+
+ if (hs_len < 4 || /* minimum one cipher */
+ (ext_len = (data[0] << 8) + data[1]) < 2 || /* minimum 2 bytes for a cipher */
+ ext_len > hs_len)
+ goto not_ssl_hello;
+
+ /* Jump to the compression methods */
+ hs_len -= 2 + ext_len;
+ data += 2 + ext_len;
+
+ if (hs_len < 2 || /* minimum one compression method */
+ data[0] < 1 || data[0] > hs_len) /* minimum 1 bytes for a method */
+ goto not_ssl_hello;
+
+ /* Jump to the extensions */
+ hs_len -= 1 + data[0];
+ data += 1 + data[0];
+
+ if (hs_len < 2 || /* minimum one extension list length */
+ (ext_len = (data[0] << 8) + data[1]) > hs_len - 2) /* list too long */
+ goto not_ssl_hello;
+
+ hs_len = ext_len; /* limit ourselves to the extension length */
+ data += 2;
+
+ while (hs_len >= 4) {
+ int ext_type, ext_len;
+
+ ext_type = (data[0] << 8) + data[1];
+ ext_len = (data[2] << 8) + data[3];
+
+ if (ext_len > hs_len - 4) /* Extension too long */
+ goto not_ssl_hello;
+
+ /* SesstionTicket extension */
+ if (ext_type == 35) {
+ smp->data.type = SMP_T_SINT;
+ /* SessionTicket also present */
+ if (ext_len > 0)
+ smp->data.u.sint = 2;
+ /* SessionTicket absent */
+ else
+ smp->data.u.sint = 1;
+ smp->flags = SMP_F_VOLATILE;
+ return 1;
+ }
+
+ hs_len -= 4 + ext_len;
+ data += 4 + ext_len;
+ }
+ /* SessionTicket Extension not found */
+ smp->data.type = SMP_T_SINT;
+ smp->data.u.sint = 0;
+ smp->flags = SMP_F_VOLATILE;
+ return 1;
+
+ too_short:
+ smp->flags = SMP_F_MAY_CHANGE;
+
+ not_ssl_hello:
+ return 0;
+}
+
+/* Returns TRUE if the client sent Supported Elliptic Curves Extension (0x000a)
+ * Mainly used to detect if client supports ECC cipher suites.
+ */
+static int
+smp_fetch_req_ssl_ec_ext(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ int hs_len, ext_len, bleft;
+ struct channel *chn;
+ unsigned char *data;
+
+ if (!smp->strm)
+ goto not_ssl_hello;
+
+ /* meaningless for HTX buffers */
+ if (IS_HTX_STRM(smp->strm))
+ goto not_ssl_hello;
+
+ chn = ((smp->opt & SMP_OPT_DIR) == SMP_OPT_DIR_RES) ? &smp->strm->res : &smp->strm->req;
+ bleft = ci_data(chn);
+ data = (unsigned char *)ci_head(chn);
+
+ /* Check for SSL/TLS Handshake */
+ if (!bleft)
+ goto too_short;
+ if (*data != 0x16)
+ goto not_ssl_hello;
+
+ /* Check for SSLv3 or later (SSL version >= 3.0) in the record layer*/
+ if (bleft < 3)
+ goto too_short;
+ if (data[1] < 0x03)
+ goto not_ssl_hello;
+
+ if (bleft < 5)
+ goto too_short;
+ hs_len = (data[3] << 8) + data[4];
+ if (hs_len < 1 + 3 + 2 + 32 + 1 + 2 + 2 + 1 + 1 + 2 + 2)
+ goto not_ssl_hello; /* too short to have an extension */
+
+ data += 5; /* enter TLS handshake */
+ bleft -= 5;
+
+ /* Check for a complete client hello starting at <data> */
+ if (bleft < 1)
+ goto too_short;
+ if (data[0] != 0x01) /* msg_type = Client Hello */
+ goto not_ssl_hello;
+
+ /* Check the Hello's length */
+ if (bleft < 4)
+ goto too_short;
+ hs_len = (data[1] << 16) + (data[2] << 8) + data[3];
+ if (hs_len < 2 + 32 + 1 + 2 + 2 + 1 + 1 + 2 + 2)
+ goto not_ssl_hello; /* too short to have an extension */
+
+ /* We want the full handshake here */
+ if (bleft < hs_len)
+ goto too_short;
+
+ data += 4;
+ /* Start of the ClientHello message */
+ if (data[0] < 0x03 || data[1] < 0x01) /* TLSv1 minimum */
+ goto not_ssl_hello;
+
+ ext_len = data[34]; /* session_id_len */
+ if (ext_len > 32 || ext_len > (hs_len - 35)) /* check for correct session_id len */
+ goto not_ssl_hello;
+
+ /* Jump to cipher suite */
+ hs_len -= 35 + ext_len;
+ data += 35 + ext_len;
+
+ if (hs_len < 4 || /* minimum one cipher */
+ (ext_len = (data[0] << 8) + data[1]) < 2 || /* minimum 2 bytes for a cipher */
+ ext_len > hs_len)
+ goto not_ssl_hello;
+
+ /* Jump to the compression methods */
+ hs_len -= 2 + ext_len;
+ data += 2 + ext_len;
+
+ if (hs_len < 2 || /* minimum one compression method */
+ data[0] < 1 || data[0] > hs_len) /* minimum 1 bytes for a method */
+ goto not_ssl_hello;
+
+ /* Jump to the extensions */
+ hs_len -= 1 + data[0];
+ data += 1 + data[0];
+
+ if (hs_len < 2 || /* minimum one extension list length */
+ (ext_len = (data[0] << 8) + data[1]) > hs_len - 2) /* list too long */
+ goto not_ssl_hello;
+
+ hs_len = ext_len; /* limit ourselves to the extension length */
+ data += 2;
+
+ while (hs_len >= 4) {
+ int ext_type, ext_len;
+
+ ext_type = (data[0] << 8) + data[1];
+ ext_len = (data[2] << 8) + data[3];
+
+ if (ext_len > hs_len - 4) /* Extension too long */
+ goto not_ssl_hello;
+
+ /* Elliptic curves extension */
+ if (ext_type == 10) {
+ smp->data.type = SMP_T_BOOL;
+ smp->data.u.sint = 1;
+ smp->flags = SMP_F_VOLATILE;
+ return 1;
+ }
+
+ hs_len -= 4 + ext_len;
+ data += 4 + ext_len;
+ }
+ /* server name not found */
+ goto not_ssl_hello;
+
+ too_short:
+ smp->flags = SMP_F_MAY_CHANGE;
+
+ not_ssl_hello:
+
+ return 0;
+}
+/* returns the type of SSL hello message (mainly used to detect an SSL hello) */
+static int
+smp_fetch_ssl_hello_type(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ int hs_len;
+ int hs_type, bleft;
+ struct channel *chn;
+ const unsigned char *data;
+
+ if (!smp->strm)
+ goto not_ssl_hello;
+
+ /* meaningless for HTX buffers */
+ if (IS_HTX_STRM(smp->strm))
+ goto not_ssl_hello;
+
+ chn = ((smp->opt & SMP_OPT_DIR) == SMP_OPT_DIR_RES) ? &smp->strm->res : &smp->strm->req;
+ bleft = ci_data(chn);
+ data = (const unsigned char *)ci_head(chn);
+
+ if (!bleft)
+ goto too_short;
+
+ if ((*data >= 0x14 && *data <= 0x17) || (*data == 0xFF)) {
+ /* SSLv3 header format */
+ if (bleft < 9)
+ goto too_short;
+
+ /* ssl version 3 */
+ if ((data[1] << 16) + data[2] < 0x00030000)
+ goto not_ssl_hello;
+
+ /* ssl message len must present handshake type and len */
+ if ((data[3] << 8) + data[4] < 4)
+ goto not_ssl_hello;
+
+ /* format introduced with SSLv3 */
+
+ hs_type = (int)data[5];
+ hs_len = ( data[6] << 16 ) + ( data[7] << 8 ) + data[8];
+
+ /* not a full handshake */
+ if (bleft < (9 + hs_len))
+ goto too_short;
+
+ }
+ else {
+ goto not_ssl_hello;
+ }
+
+ smp->data.type = SMP_T_SINT;
+ smp->data.u.sint = hs_type;
+ smp->flags = SMP_F_VOLATILE;
+
+ return 1;
+
+ too_short:
+ smp->flags = SMP_F_MAY_CHANGE;
+
+ not_ssl_hello:
+
+ return 0;
+}
+
+/* Return the version of the SSL protocol in the request. It supports both
+ * SSLv3 (TLSv1) header format for any message, and SSLv2 header format for
+ * the hello message. The SSLv3 format is described in RFC 2246 p49, and the
+ * SSLv2 format is described here, and completed p67 of RFC 2246 :
+ * http://wp.netscape.com/eng/security/SSL_2.html
+ *
+ * Note: this decoder only works with non-wrapping data.
+ */
+static int
+smp_fetch_req_ssl_ver(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ int version, bleft, msg_len;
+ const unsigned char *data;
+ struct channel *req;
+
+ if (!smp->strm)
+ goto not_ssl;
+
+ /* meaningless for HTX buffers */
+ if (IS_HTX_STRM(smp->strm))
+ goto not_ssl;
+
+ req = &smp->strm->req;
+ msg_len = 0;
+ bleft = ci_data(req);
+ if (!bleft)
+ goto too_short;
+
+ data = (const unsigned char *)ci_head(req);
+ if ((*data >= 0x14 && *data <= 0x17) || (*data == 0xFF)) {
+ /* SSLv3 header format */
+ if (bleft < 11)
+ goto too_short;
+
+ version = (data[1] << 16) + data[2]; /* record layer version: major, minor */
+ msg_len = (data[3] << 8) + data[4]; /* record length */
+
+ /* format introduced with SSLv3 */
+ if (version < 0x00030000)
+ goto not_ssl;
+
+ /* message length between 6 and 2^14 + 2048 */
+ if (msg_len < 6 || msg_len > ((1<<14) + 2048))
+ goto not_ssl;
+
+ bleft -= 5; data += 5;
+
+ /* return the client hello client version, not the record layer version */
+ version = (data[4] << 16) + data[5]; /* client hello version: major, minor */
+ } else {
+ /* SSLv2 header format, only supported for hello (msg type 1) */
+ int rlen, plen, cilen, silen, chlen;
+
+ if (*data & 0x80) {
+ if (bleft < 3)
+ goto too_short;
+ /* short header format : 15 bits for length */
+ rlen = ((data[0] & 0x7F) << 8) | data[1];
+ plen = 0;
+ bleft -= 2; data += 2;
+ } else {
+ if (bleft < 4)
+ goto too_short;
+ /* long header format : 14 bits for length + pad length */
+ rlen = ((data[0] & 0x3F) << 8) | data[1];
+ plen = data[2];
+ bleft -= 3; data += 3;
+ }
+
+ if (*data != 0x01)
+ goto not_ssl;
+ bleft--; data++;
+
+ if (bleft < 8)
+ goto too_short;
+ version = (data[0] << 16) + data[1]; /* version: major, minor */
+ cilen = (data[2] << 8) + data[3]; /* cipher len, multiple of 3 */
+ silen = (data[4] << 8) + data[5]; /* session_id_len: 0 or 16 */
+ chlen = (data[6] << 8) + data[7]; /* 16<=challenge length<=32 */
+
+ bleft -= 8; data += 8;
+ if (cilen % 3 != 0)
+ goto not_ssl;
+ if (silen && silen != 16)
+ goto not_ssl;
+ if (chlen < 16 || chlen > 32)
+ goto not_ssl;
+ if (rlen != 9 + cilen + silen + chlen)
+ goto not_ssl;
+
+ /* focus on the remaining data length */
+ msg_len = cilen + silen + chlen + plen;
+ }
+ /* We could recursively check that the buffer ends exactly on an SSL
+ * fragment boundary and that a possible next segment is still SSL,
+ * but that's a bit pointless. However, we could still check that
+ * all the part of the request which fits in a buffer is already
+ * there.
+ */
+ if (msg_len > channel_recv_limit(req) + b_orig(&req->buf) - ci_head(req))
+ msg_len = channel_recv_limit(req) + b_orig(&req->buf) - ci_head(req);
+
+ if (bleft < msg_len)
+ goto too_short;
+
+ /* OK that's enough. We have at least the whole message, and we have
+ * the protocol version.
+ */
+ smp->data.type = SMP_T_SINT;
+ smp->data.u.sint = version;
+ smp->flags = SMP_F_VOLATILE;
+ return 1;
+
+ too_short:
+ smp->flags = SMP_F_MAY_CHANGE;
+ not_ssl:
+ return 0;
+}
+
+/* Try to extract the Server Name Indication that may be presented in a TLS
+ * client hello handshake message. The format of the message is the following
+ * (cf RFC5246 + RFC6066) :
+ * TLS frame :
+ * - uint8 type = 0x16 (Handshake)
+ * - uint16 version >= 0x0301 (TLSv1)
+ * - uint16 length (frame length)
+ * - TLS handshake :
+ * - uint8 msg_type = 0x01 (ClientHello)
+ * - uint24 length (handshake message length)
+ * - ClientHello :
+ * - uint16 client_version >= 0x0301 (TLSv1)
+ * - uint8 Random[32] (4 first ones are timestamp)
+ * - SessionID :
+ * - uint8 session_id_len (0..32) (SessionID len in bytes)
+ * - uint8 session_id[session_id_len]
+ * - CipherSuite :
+ * - uint16 cipher_len >= 2 (Cipher length in bytes)
+ * - uint16 ciphers[cipher_len/2]
+ * - CompressionMethod :
+ * - uint8 compression_len >= 1 (# of supported methods)
+ * - uint8 compression_methods[compression_len]
+ * - optional client_extension_len (in bytes)
+ * - optional sequence of ClientHelloExtensions (as many bytes as above):
+ * - uint16 extension_type = 0 for server_name
+ * - uint16 extension_len
+ * - opaque extension_data[extension_len]
+ * - uint16 server_name_list_len (# of bytes here)
+ * - opaque server_names[server_name_list_len bytes]
+ * - uint8 name_type = 0 for host_name
+ * - uint16 name_len
+ * - opaque hostname[name_len bytes]
+ */
+static int
+smp_fetch_ssl_hello_sni(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ int hs_len, ext_len, bleft;
+ struct channel *chn;
+ unsigned char *data;
+
+ if (!smp->strm)
+ goto not_ssl_hello;
+
+ /* meaningless for HTX buffers */
+ if (IS_HTX_STRM(smp->strm))
+ goto not_ssl_hello;
+
+ chn = ((smp->opt & SMP_OPT_DIR) == SMP_OPT_DIR_RES) ? &smp->strm->res : &smp->strm->req;
+ bleft = ci_data(chn);
+ data = (unsigned char *)ci_head(chn);
+
+ /* Check for SSL/TLS Handshake */
+ if (!bleft)
+ goto too_short;
+ if (*data != 0x16)
+ goto not_ssl_hello;
+
+ /* Check for SSLv3 or later (SSL version >= 3.0) in the record layer*/
+ if (bleft < 3)
+ goto too_short;
+ if (data[1] < 0x03)
+ goto not_ssl_hello;
+
+ if (bleft < 5)
+ goto too_short;
+ hs_len = (data[3] << 8) + data[4];
+ if (hs_len < 1 + 3 + 2 + 32 + 1 + 2 + 2 + 1 + 1 + 2 + 2)
+ goto not_ssl_hello; /* too short to have an extension */
+
+ data += 5; /* enter TLS handshake */
+ bleft -= 5;
+
+ /* Check for a complete client hello starting at <data> */
+ if (bleft < 1)
+ goto too_short;
+ if (data[0] != 0x01) /* msg_type = Client Hello */
+ goto not_ssl_hello;
+
+ /* Check the Hello's length */
+ if (bleft < 4)
+ goto too_short;
+ hs_len = (data[1] << 16) + (data[2] << 8) + data[3];
+ if (hs_len < 2 + 32 + 1 + 2 + 2 + 1 + 1 + 2 + 2)
+ goto not_ssl_hello; /* too short to have an extension */
+
+ /* We want the full handshake here */
+ if (bleft < hs_len)
+ goto too_short;
+
+ data += 4;
+ /* Start of the ClientHello message */
+ if (data[0] < 0x03 || data[1] < 0x01) /* TLSv1 minimum */
+ goto not_ssl_hello;
+
+ ext_len = data[34]; /* session_id_len */
+ if (ext_len > 32 || ext_len > (hs_len - 35)) /* check for correct session_id len */
+ goto not_ssl_hello;
+
+ /* Jump to cipher suite */
+ hs_len -= 35 + ext_len;
+ data += 35 + ext_len;
+
+ if (hs_len < 4 || /* minimum one cipher */
+ (ext_len = (data[0] << 8) + data[1]) < 2 || /* minimum 2 bytes for a cipher */
+ ext_len > hs_len)
+ goto not_ssl_hello;
+
+ /* Jump to the compression methods */
+ hs_len -= 2 + ext_len;
+ data += 2 + ext_len;
+
+ if (hs_len < 2 || /* minimum one compression method */
+ data[0] < 1 || data[0] > hs_len) /* minimum 1 bytes for a method */
+ goto not_ssl_hello;
+
+ /* Jump to the extensions */
+ hs_len -= 1 + data[0];
+ data += 1 + data[0];
+
+ if (hs_len < 2 || /* minimum one extension list length */
+ (ext_len = (data[0] << 8) + data[1]) > hs_len - 2) /* list too long */
+ goto not_ssl_hello;
+
+ hs_len = ext_len; /* limit ourselves to the extension length */
+ data += 2;
+
+ while (hs_len >= 4) {
+ int ext_type, name_type, srv_len, name_len;
+
+ ext_type = (data[0] << 8) + data[1];
+ ext_len = (data[2] << 8) + data[3];
+
+ if (ext_len > hs_len - 4) /* Extension too long */
+ goto not_ssl_hello;
+
+ if (ext_type == 0) { /* Server name */
+ if (ext_len < 2) /* need one list length */
+ goto not_ssl_hello;
+
+ srv_len = (data[4] << 8) + data[5];
+ if (srv_len < 4 || srv_len > hs_len - 6)
+ goto not_ssl_hello; /* at least 4 bytes per server name */
+
+ name_type = data[6];
+ name_len = (data[7] << 8) + data[8];
+
+ if (name_type == 0) { /* hostname */
+ smp->data.type = SMP_T_STR;
+ smp->data.u.str.area = (char *)data + 9;
+ smp->data.u.str.data = name_len;
+ smp->flags = SMP_F_VOLATILE | SMP_F_CONST;
+ return 1;
+ }
+ }
+
+ hs_len -= 4 + ext_len;
+ data += 4 + ext_len;
+ }
+ /* server name not found */
+ goto not_ssl_hello;
+
+ too_short:
+ smp->flags = SMP_F_MAY_CHANGE;
+
+ not_ssl_hello:
+
+ return 0;
+}
+
+/* Try to extract the Application-Layer Protocol Negotiation (ALPN) protocol
+ * names that may be presented in a TLS client hello handshake message. As the
+ * message presents a list of protocol names in descending order of preference,
+ * it may return iteratively. The format of the message is the following
+ * (cf RFC5246 + RFC7301) :
+ * TLS frame :
+ * - uint8 type = 0x16 (Handshake)
+ * - uint16 version >= 0x0301 (TLSv1)
+ * - uint16 length (frame length)
+ * - TLS handshake :
+ * - uint8 msg_type = 0x01 (ClientHello)
+ * - uint24 length (handshake message length)
+ * - ClientHello :
+ * - uint16 client_version >= 0x0301 (TLSv1)
+ * - uint8 Random[32] (4 first ones are timestamp)
+ * - SessionID :
+ * - uint8 session_id_len (0..32) (SessionID len in bytes)
+ * - uint8 session_id[session_id_len]
+ * - CipherSuite :
+ * - uint16 cipher_len >= 2 (Cipher length in bytes)
+ * - uint16 ciphers[cipher_len/2]
+ * - CompressionMethod :
+ * - uint8 compression_len >= 1 (# of supported methods)
+ * - uint8 compression_methods[compression_len]
+ * - optional client_extension_len (in bytes)
+ * - optional sequence of ClientHelloExtensions (as many bytes as above):
+ * - uint16 extension_type = 16 for application_layer_protocol_negotiation
+ * - uint16 extension_len
+ * - opaque extension_data[extension_len]
+ * - uint16 protocol_names_len (# of bytes here)
+ * - opaque protocol_names[protocol_names_len bytes]
+ * - uint8 name_len
+ * - opaque protocol_name[name_len bytes]
+ */
+static int
+smp_fetch_ssl_hello_alpn(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ int hs_len, ext_len, bleft;
+ struct channel *chn;
+ unsigned char *data;
+
+ if (!smp->strm)
+ goto not_ssl_hello;
+
+ /* meaningless for HTX buffers */
+ if (IS_HTX_STRM(smp->strm))
+ goto not_ssl_hello;
+
+ chn = ((smp->opt & SMP_OPT_DIR) == SMP_OPT_DIR_RES) ? &smp->strm->res : &smp->strm->req;
+ bleft = ci_data(chn);
+ data = (unsigned char *)ci_head(chn);
+
+ /* Check for SSL/TLS Handshake */
+ if (!bleft)
+ goto too_short;
+ if (*data != 0x16)
+ goto not_ssl_hello;
+
+ /* Check for SSLv3 or later (SSL version >= 3.0) in the record layer*/
+ if (bleft < 3)
+ goto too_short;
+ if (data[1] < 0x03)
+ goto not_ssl_hello;
+
+ if (bleft < 5)
+ goto too_short;
+ hs_len = (data[3] << 8) + data[4];
+ if (hs_len < 1 + 3 + 2 + 32 + 1 + 2 + 2 + 1 + 1 + 2 + 2)
+ goto not_ssl_hello; /* too short to have an extension */
+
+ data += 5; /* enter TLS handshake */
+ bleft -= 5;
+
+ /* Check for a complete client hello starting at <data> */
+ if (bleft < 1)
+ goto too_short;
+ if (data[0] != 0x01) /* msg_type = Client Hello */
+ goto not_ssl_hello;
+
+ /* Check the Hello's length */
+ if (bleft < 4)
+ goto too_short;
+ hs_len = (data[1] << 16) + (data[2] << 8) + data[3];
+ if (hs_len < 2 + 32 + 1 + 2 + 2 + 1 + 1 + 2 + 2)
+ goto not_ssl_hello; /* too short to have an extension */
+
+ /* We want the full handshake here */
+ if (bleft < hs_len)
+ goto too_short;
+
+ data += 4;
+ /* Start of the ClientHello message */
+ if (data[0] < 0x03 || data[1] < 0x01) /* TLSv1 minimum */
+ goto not_ssl_hello;
+
+ ext_len = data[34]; /* session_id_len */
+ if (ext_len > 32 || ext_len > (hs_len - 35)) /* check for correct session_id len */
+ goto not_ssl_hello;
+
+ /* Jump to cipher suite */
+ hs_len -= 35 + ext_len;
+ data += 35 + ext_len;
+
+ if (hs_len < 4 || /* minimum one cipher */
+ (ext_len = (data[0] << 8) + data[1]) < 2 || /* minimum 2 bytes for a cipher */
+ ext_len > hs_len)
+ goto not_ssl_hello;
+
+ /* Jump to the compression methods */
+ hs_len -= 2 + ext_len;
+ data += 2 + ext_len;
+
+ if (hs_len < 2 || /* minimum one compression method */
+ data[0] < 1 || data[0] > hs_len) /* minimum 1 bytes for a method */
+ goto not_ssl_hello;
+
+ /* Jump to the extensions */
+ hs_len -= 1 + data[0];
+ data += 1 + data[0];
+
+ if (hs_len < 2 || /* minimum one extension list length */
+ (ext_len = (data[0] << 8) + data[1]) > hs_len - 2) /* list too long */
+ goto not_ssl_hello;
+
+ hs_len = ext_len; /* limit ourselves to the extension length */
+ data += 2;
+
+ while (hs_len >= 4) {
+ int ext_type, name_len, name_offset;
+
+ ext_type = (data[0] << 8) + data[1];
+ ext_len = (data[2] << 8) + data[3];
+
+ if (ext_len > hs_len - 4) /* Extension too long */
+ goto not_ssl_hello;
+
+ if (ext_type == 16) { /* ALPN */
+ if (ext_len < 3) /* one list length [uint16] + at least one name length [uint8] */
+ goto not_ssl_hello;
+
+ /* Name cursor in ctx, must begin after protocol_names_len */
+ name_offset = smp->ctx.i < 6 ? 6 : smp->ctx.i;
+ name_len = data[name_offset];
+
+ if (name_len + name_offset - 3 > ext_len)
+ goto not_ssl_hello;
+
+ smp->data.type = SMP_T_STR;
+ smp->data.u.str.area = (char *)data + name_offset + 1; /* +1 to skip name_len */
+ smp->data.u.str.data = name_len;
+ smp->flags = SMP_F_VOLATILE | SMP_F_CONST;
+
+ /* May have more protocol names remaining */
+ if (name_len + name_offset - 3 < ext_len) {
+ smp->ctx.i = name_offset + name_len + 1;
+ smp->flags |= SMP_F_NOT_LAST;
+ }
+
+ return 1;
+ }
+
+ hs_len -= 4 + ext_len;
+ data += 4 + ext_len;
+ }
+ /* alpn not found */
+ goto not_ssl_hello;
+
+ too_short:
+ smp->flags = SMP_F_MAY_CHANGE;
+
+ not_ssl_hello:
+
+ return 0;
+}
+
+/* Fetch the request RDP cookie identified in <cname>:<clen>, or any cookie if
+ * <clen> is empty (cname is then ignored). It returns the data into sample <smp>
+ * of type SMP_T_CSTR. Note: this decoder only works with non-wrapping data.
+ */
+int
+fetch_rdp_cookie_name(struct stream *s, struct sample *smp, const char *cname, int clen)
+{
+ int bleft;
+ const unsigned char *data;
+
+ smp->flags = SMP_F_CONST;
+ smp->data.type = SMP_T_STR;
+
+ bleft = ci_data(&s->req);
+ if (bleft <= 11)
+ goto too_short;
+
+ data = (const unsigned char *)ci_head(&s->req) + 11;
+ bleft -= 11;
+
+ if (bleft <= 7)
+ goto too_short;
+
+ if (strncasecmp((const char *)data, "Cookie:", 7) != 0)
+ goto not_cookie;
+
+ data += 7;
+ bleft -= 7;
+
+ while (bleft > 0 && *data == ' ') {
+ data++;
+ bleft--;
+ }
+
+ if (clen) {
+ if (bleft <= clen)
+ goto too_short;
+
+ if ((data[clen] != '=') ||
+ strncasecmp(cname, (const char *)data, clen) != 0)
+ goto not_cookie;
+
+ data += clen + 1;
+ bleft -= clen + 1;
+ } else {
+ while (bleft > 0 && *data != '=') {
+ if (*data == '\r' || *data == '\n')
+ goto not_cookie;
+ data++;
+ bleft--;
+ }
+
+ if (bleft < 1)
+ goto too_short;
+
+ if (*data != '=')
+ goto not_cookie;
+
+ data++;
+ bleft--;
+ }
+
+ /* data points to cookie value */
+ smp->data.u.str.area = (char *)data;
+ smp->data.u.str.data = 0;
+
+ while (bleft > 0 && *data != '\r') {
+ data++;
+ bleft--;
+ }
+
+ if (bleft < 2)
+ goto too_short;
+
+ if (data[0] != '\r' || data[1] != '\n')
+ goto not_cookie;
+
+ smp->data.u.str.data = (char *)data - smp->data.u.str.area;
+ smp->flags = SMP_F_VOLATILE | SMP_F_CONST;
+ return 1;
+
+ too_short:
+ smp->flags = SMP_F_MAY_CHANGE | SMP_F_CONST;
+ not_cookie:
+ return 0;
+}
+
+/* Fetch the request RDP cookie identified in the args, or any cookie if no arg
+ * is passed. It is usable both for ACL and for samples. Note: this decoder
+ * only works with non-wrapping data. Accepts either 0 or 1 argument. Argument
+ * is a string (cookie name), other types will lead to undefined behaviour. The
+ * returned sample has type SMP_T_CSTR.
+ */
+int
+smp_fetch_rdp_cookie(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ if (!smp->strm)
+ return 0;
+
+ /* meaningless for HTX buffers */
+ if (IS_HTX_STRM(smp->strm))
+ return 0;
+
+ return fetch_rdp_cookie_name(smp->strm, smp,
+ args ? args->data.str.area : NULL,
+ args ? args->data.str.data : 0);
+}
+
+/* returns either 1 or 0 depending on whether an RDP cookie is found or not */
+static int
+smp_fetch_rdp_cookie_cnt(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ int ret;
+
+ ret = smp_fetch_rdp_cookie(args, smp, kw, private);
+
+ if (smp->flags & SMP_F_MAY_CHANGE)
+ return 0;
+
+ smp->flags = SMP_F_VOLATILE;
+ smp->data.type = SMP_T_SINT;
+ smp->data.u.sint = ret;
+ return 1;
+}
+
+/* extracts part of a payload with offset and length at a given position */
+static int
+smp_fetch_payload_lv(const struct arg *arg_p, struct sample *smp, const char *kw, void *private)
+{
+ unsigned int len_offset = arg_p[0].data.sint;
+ unsigned int len_size = arg_p[1].data.sint;
+ unsigned int buf_offset;
+ unsigned int buf_size = 0;
+ struct channel *chn = NULL;
+ char *head = NULL;
+ size_t max, data;
+ int i;
+
+ /* Format is (len offset, len size, buf offset) or (len offset, len size) */
+ /* by default buf offset == len offset + len size */
+ /* buf offset could be absolute or relative to len offset + len size if prefixed by + or - */
+
+ if (smp->strm) {
+ /* meaningless for HTX buffers */
+ if (IS_HTX_STRM(smp->strm))
+ return 0;
+ chn = ((smp->opt & SMP_OPT_DIR) == SMP_OPT_DIR_RES) ? &smp->strm->res : &smp->strm->req;
+ head = ci_head(chn);
+ data = ci_data(chn);
+ }
+ else if (obj_type(smp->sess->origin) == OBJ_TYPE_CHECK) {
+ struct check *check = __objt_check(smp->sess->origin);
+
+ /* meaningless for HTX buffers */
+ if (check->sc && IS_HTX_SC(check->sc))
+ return 0;
+ head = b_head(&check->bi);
+ data = b_data(&check->bi);
+ }
+ max = global.tune.bufsize;
+ if (!head)
+ goto too_short;
+
+ if (len_offset + len_size > data)
+ goto too_short;
+
+ for (i = 0; i < len_size; i++) {
+ buf_size = (buf_size << 8) + ((unsigned char *)head)[i + len_offset];
+ }
+
+ /* buf offset may be implicit, absolute or relative. If the LSB
+ * is set, then the offset is relative otherwise it is absolute.
+ */
+ buf_offset = len_offset + len_size;
+ if (arg_p[2].type == ARGT_SINT) {
+ if (arg_p[2].data.sint & 1)
+ buf_offset += arg_p[2].data.sint >> 1;
+ else
+ buf_offset = arg_p[2].data.sint >> 1;
+ }
+
+ if (!buf_size || buf_size > max || buf_offset + buf_size > max) {
+ /* will never match */
+ smp->flags = 0;
+ return 0;
+ }
+
+ if (buf_offset + buf_size > data)
+ goto too_short;
+
+ /* init chunk as read only */
+ smp->data.type = SMP_T_BIN;
+ smp->flags = SMP_F_VOLATILE | SMP_F_CONST;
+ chunk_initlen(&smp->data.u.str, head + buf_offset, 0, buf_size);
+ return 1;
+
+ too_short:
+ smp->flags = SMP_F_MAY_CHANGE | SMP_F_CONST;
+ return 0;
+}
+
+/* extracts some payload at a fixed position and length */
+static int
+smp_fetch_payload(const struct arg *arg_p, struct sample *smp, const char *kw, void *private)
+{
+ unsigned int buf_offset = arg_p[0].data.sint;
+ unsigned int buf_size = arg_p[1].data.sint;
+ struct channel *chn = NULL;
+ char *head = NULL;
+ size_t max, data;
+
+ if (smp->strm) {
+ /* meaningless for HTX buffers */
+ if (IS_HTX_STRM(smp->strm))
+ return 0;
+ chn = ((smp->opt & SMP_OPT_DIR) == SMP_OPT_DIR_RES) ? &smp->strm->res : &smp->strm->req;
+ head = ci_head(chn);
+ data = ci_data(chn);
+ }
+ else if (obj_type(smp->sess->origin) == OBJ_TYPE_CHECK) {
+ struct check *check = __objt_check(smp->sess->origin);
+
+ /* meaningless for HTX buffers */
+ if (check->sc && IS_HTX_SC(check->sc))
+ return 0;
+ head = b_head(&check->bi);
+ data = b_data(&check->bi);
+ }
+ max = global.tune.bufsize;
+ if (!head)
+ goto too_short;
+
+ if (buf_size > max || buf_offset + buf_size > max) {
+ /* will never match */
+ smp->flags = 0;
+ return 0;
+ }
+ if (buf_offset + buf_size > data)
+ goto too_short;
+
+ /* init chunk as read only */
+ smp->data.type = SMP_T_BIN;
+ smp->flags = SMP_F_VOLATILE | SMP_F_CONST;
+ chunk_initlen(&smp->data.u.str, head + buf_offset, 0, buf_size ? buf_size : (data - buf_offset));
+
+ if (!buf_size && chn && channel_may_recv(chn) && !channel_input_closed(chn))
+ smp->flags |= SMP_F_MAY_CHANGE;
+
+ return 1;
+
+ too_short:
+ smp->flags = SMP_F_MAY_CHANGE | SMP_F_CONST;
+ return 0;
+}
+
+/* This function is used to validate the arguments passed to a "payload_lv" fetch
+ * keyword. This keyword allows two positive integers and an optional signed one,
+ * with the second one being strictly positive and the third one being greater than
+ * the opposite of the two others if negative. It is assumed that the types are
+ * already the correct ones. Returns 0 on error, non-zero if OK. If <err_msg> is
+ * not NULL, it will be filled with a pointer to an error message in case of
+ * error, that the caller is responsible for freeing. The initial location must
+ * either be freeable or NULL.
+ *
+ * Note that offset2 is stored with SINT type, but its not directly usable as is.
+ * The value is contained in the 63 MSB and the LSB is used as a flag for marking
+ * the "relative" property of the value.
+ */
+int val_payload_lv(struct arg *arg, char **err_msg)
+{
+ int relative = 0;
+ const char *str;
+
+ if (arg[0].data.sint < 0) {
+ memprintf(err_msg, "payload offset1 must be positive");
+ return 0;
+ }
+
+ if (!arg[1].data.sint) {
+ memprintf(err_msg, "payload length must be > 0");
+ return 0;
+ }
+
+ if (arg[2].type == ARGT_STR && arg[2].data.str.data > 0) {
+ long long int i;
+
+ if (arg[2].data.str.area[0] == '+' || arg[2].data.str.area[0] == '-')
+ relative = 1;
+ str = arg[2].data.str.area;
+ i = read_int64(&str, str + arg[2].data.str.data);
+ if (*str != '\0') {
+ memprintf(err_msg, "payload offset2 is not a number");
+ return 0;
+ }
+ chunk_destroy(&arg[2].data.str);
+ arg[2].type = ARGT_SINT;
+ arg[2].data.sint = i;
+
+ if (arg[0].data.sint + arg[1].data.sint + arg[2].data.sint < 0) {
+ memprintf(err_msg, "payload offset2 too negative");
+ return 0;
+ }
+ if (relative)
+ arg[2].data.sint = ( arg[2].data.sint << 1 ) + 1;
+ }
+ return 1;
+}
+
+/* extracts the parameter value of a distcc token */
+static int
+smp_fetch_distcc_param(const struct arg *arg_p, struct sample *smp, const char *kw, void *private)
+{
+ unsigned int match_tok = arg_p[0].data.sint;
+ unsigned int match_occ = arg_p[1].data.sint;
+ unsigned int token;
+ unsigned int param;
+ unsigned int body;
+ unsigned int ofs;
+ unsigned int occ;
+ struct channel *chn;
+ int i;
+
+ /* Format is (token[,occ]). occ starts at 1. */
+
+ if (!smp->strm)
+ return 0;
+
+ /* meaningless for HTX buffers */
+ if (IS_HTX_STRM(smp->strm))
+ return 0;
+
+ chn = ((smp->opt & SMP_OPT_DIR) == SMP_OPT_DIR_RES) ? &smp->strm->res : &smp->strm->req;
+
+ ofs = 0; occ = 0;
+ while (1) {
+ if (ofs + 12 > ci_data(chn)) {
+ /* not there yet but could it at least fit ? */
+ if (!chn->buf.size)
+ goto too_short;
+
+ if (ofs + 12 <= channel_recv_limit(chn) + b_orig(&chn->buf) - ci_head(chn))
+ goto too_short;
+
+ goto no_match;
+ }
+
+ token = read_n32(ci_head(chn) + ofs);
+ ofs += 4;
+
+ for (i = param = 0; i < 8; i++) {
+ int c = hex2i(ci_head(chn)[ofs + i]);
+
+ if (c < 0)
+ goto no_match;
+ param = (param << 4) + c;
+ }
+ ofs += 8;
+
+ /* these tokens don't have a body */
+ if (token != 0x41524743 /* ARGC */ && token != 0x44495354 /* DIST */ &&
+ token != 0x4E46494C /* NFIL */ && token != 0x53544154 /* STAT */ &&
+ token != 0x444F4E45 /* DONE */)
+ body = param;
+ else
+ body = 0;
+
+ if (token == match_tok) {
+ occ++;
+ if (!match_occ || match_occ == occ) {
+ /* found */
+ smp->data.type = SMP_T_SINT;
+ smp->data.u.sint = param;
+ smp->flags = SMP_F_VOLATILE | SMP_F_CONST;
+ return 1;
+ }
+ }
+ ofs += body;
+ }
+
+ too_short:
+ smp->flags = SMP_F_MAY_CHANGE | SMP_F_CONST;
+ return 0;
+ no_match:
+ /* will never match (end of buffer, or bad contents) */
+ smp->flags = 0;
+ return 0;
+
+}
+
+/* extracts the (possibly truncated) body of a distcc token */
+static int
+smp_fetch_distcc_body(const struct arg *arg_p, struct sample *smp, const char *kw, void *private)
+{
+ unsigned int match_tok = arg_p[0].data.sint;
+ unsigned int match_occ = arg_p[1].data.sint;
+ unsigned int token;
+ unsigned int param;
+ unsigned int ofs;
+ unsigned int occ;
+ unsigned int body;
+ struct channel *chn;
+ int i;
+
+ /* Format is (token[,occ]). occ starts at 1. */
+
+ if (!smp->strm)
+ return 0;
+
+ /* meaningless for HTX buffers */
+ if (IS_HTX_STRM(smp->strm))
+ return 0;
+
+ chn = ((smp->opt & SMP_OPT_DIR) == SMP_OPT_DIR_RES) ? &smp->strm->res : &smp->strm->req;
+
+ ofs = 0; occ = 0;
+ while (1) {
+ if (ofs + 12 > ci_data(chn)) {
+ if (!chn->buf.size)
+ goto too_short;
+
+ if (ofs + 12 <= channel_recv_limit(chn) + b_orig(&chn->buf) - ci_head(chn))
+ goto too_short;
+
+ goto no_match;
+ }
+
+ token = read_n32(ci_head(chn) + ofs);
+ ofs += 4;
+
+ for (i = param = 0; i < 8; i++) {
+ int c = hex2i(ci_head(chn)[ofs + i]);
+
+ if (c < 0)
+ goto no_match;
+ param = (param << 4) + c;
+ }
+ ofs += 8;
+
+ /* these tokens don't have a body */
+ if (token != 0x41524743 /* ARGC */ && token != 0x44495354 /* DIST */ &&
+ token != 0x4E46494C /* NFIL */ && token != 0x53544154 /* STAT */ &&
+ token != 0x444F4E45 /* DONE */)
+ body = param;
+ else
+ body = 0;
+
+ if (token == match_tok) {
+ occ++;
+ if (!match_occ || match_occ == occ) {
+ /* found */
+
+ smp->data.type = SMP_T_BIN;
+ smp->flags = SMP_F_VOLATILE | SMP_F_CONST;
+
+ if (ofs + body > ci_head(chn) - b_orig(&chn->buf) + ci_data(chn)) {
+ /* incomplete body */
+
+ if (ofs + body > channel_recv_limit(chn) + b_orig(&chn->buf) - ci_head(chn)) {
+ /* truncate it to whatever will fit */
+ smp->flags |= SMP_F_MAY_CHANGE;
+ body = channel_recv_limit(chn) + b_orig(&chn->buf) - ci_head(chn) - ofs;
+ }
+ }
+
+ chunk_initlen(&smp->data.u.str, ci_head(chn) + ofs, 0, body);
+ return 1;
+ }
+ }
+ ofs += body;
+ }
+
+ too_short:
+ smp->flags = SMP_F_MAY_CHANGE | SMP_F_CONST;
+ return 0;
+ no_match:
+ /* will never match (end of buffer, or bad contents) */
+ smp->flags = 0;
+ return 0;
+
+}
+
+/* This function is used to validate the arguments passed to a "distcc_param" or
+ * "distcc_body" sample fetch keyword. They take a mandatory token name of exactly
+ * 4 characters, followed by an optional occurrence number starting at 1. It is
+ * assumed that the types are already the correct ones. Returns 0 on error, non-
+ * zero if OK. If <err_msg> is not NULL, it will be filled with a pointer to an
+ * error message in case of error, that the caller is responsible for freeing.
+ * The initial location must either be freeable or NULL.
+ */
+int val_distcc(struct arg *arg, char **err_msg)
+{
+ unsigned int token;
+
+ if (arg[0].data.str.data != 4) {
+ memprintf(err_msg, "token name must be exactly 4 characters");
+ return 0;
+ }
+
+ /* convert the token name to an unsigned int (one byte per character,
+ * big endian format).
+ */
+ token = (arg[0].data.str.area[0] << 24) + (arg[0].data.str.area[1] << 16) +
+ (arg[0].data.str.area[2] << 8) + (arg[0].data.str.area[3] << 0);
+
+ chunk_destroy(&arg[0].data.str);
+ arg[0].type = ARGT_SINT;
+ arg[0].data.sint = token;
+
+ if (arg[1].type != ARGT_SINT) {
+ arg[1].type = ARGT_SINT;
+ arg[1].data.sint = 0;
+ }
+ return 1;
+}
+
+/************************************************************************/
+/* All supported sample and ACL keywords must be declared here. */
+/************************************************************************/
+
+/* Note: must not be declared <const> as its list will be overwritten.
+ * Note: fetches that may return multiple types should be declared using the
+ * appropriate pseudo-type. If not available it must be declared as the lowest
+ * common denominator, the type that can be casted into all other ones.
+ */
+static struct sample_fetch_kw_list smp_kws = {ILH, {
+ { "distcc_body", smp_fetch_distcc_body, ARG2(1,STR,SINT), val_distcc, SMP_T_BIN, SMP_USE_L6REQ|SMP_USE_L6RES },
+ { "distcc_param", smp_fetch_distcc_param, ARG2(1,STR,SINT), val_distcc, SMP_T_SINT, SMP_USE_L6REQ|SMP_USE_L6RES },
+ { "payload", smp_fetch_payload, ARG2(2,SINT,SINT), NULL, SMP_T_BIN, SMP_USE_L6REQ|SMP_USE_L6RES },
+ { "payload_lv", smp_fetch_payload_lv, ARG3(2,SINT,SINT,STR), val_payload_lv, SMP_T_BIN, SMP_USE_L6REQ|SMP_USE_L6RES },
+ { "rdp_cookie", smp_fetch_rdp_cookie, ARG1(0,STR), NULL, SMP_T_STR, SMP_USE_L6REQ },
+ { "rdp_cookie_cnt", smp_fetch_rdp_cookie_cnt, ARG1(0,STR), NULL, SMP_T_SINT, SMP_USE_L6REQ },
+ { "rep_ssl_hello_type", smp_fetch_ssl_hello_type, 0, NULL, SMP_T_SINT, SMP_USE_L6RES },
+ { "req_len", smp_fetch_len, 0, NULL, SMP_T_SINT, SMP_USE_L6REQ },
+ { "req_ssl_hello_type", smp_fetch_ssl_hello_type, 0, NULL, SMP_T_SINT, SMP_USE_L6REQ },
+ { "req_ssl_sni", smp_fetch_ssl_hello_sni, 0, NULL, SMP_T_STR, SMP_USE_L6REQ },
+ { "req_ssl_ver", smp_fetch_req_ssl_ver, 0, NULL, SMP_T_SINT, SMP_USE_L6REQ },
+
+ { "req.len", smp_fetch_len, 0, NULL, SMP_T_SINT, SMP_USE_L6REQ },
+ { "req.payload", smp_fetch_payload, ARG2(2,SINT,SINT), NULL, SMP_T_BIN, SMP_USE_L6REQ },
+ { "req.payload_lv", smp_fetch_payload_lv, ARG3(2,SINT,SINT,STR), val_payload_lv, SMP_T_BIN, SMP_USE_L6REQ },
+ { "req.rdp_cookie", smp_fetch_rdp_cookie, ARG1(0,STR), NULL, SMP_T_STR, SMP_USE_L6REQ },
+ { "req.rdp_cookie_cnt", smp_fetch_rdp_cookie_cnt, ARG1(0,STR), NULL, SMP_T_SINT, SMP_USE_L6REQ },
+ { "req.ssl_ec_ext", smp_fetch_req_ssl_ec_ext, 0, NULL, SMP_T_BOOL, SMP_USE_L6REQ },
+ { "req.ssl_st_ext", smp_fetch_req_ssl_st_ext, 0, NULL, SMP_T_SINT, SMP_USE_L6REQ },
+ { "req.ssl_hello_type", smp_fetch_ssl_hello_type, 0, NULL, SMP_T_SINT, SMP_USE_L6REQ },
+ { "req.ssl_sni", smp_fetch_ssl_hello_sni, 0, NULL, SMP_T_STR, SMP_USE_L6REQ },
+ { "req.ssl_alpn", smp_fetch_ssl_hello_alpn, 0, NULL, SMP_T_STR, SMP_USE_L6REQ },
+ { "req.ssl_ver", smp_fetch_req_ssl_ver, 0, NULL, SMP_T_SINT, SMP_USE_L6REQ },
+ { "res.len", smp_fetch_len, 0, NULL, SMP_T_SINT, SMP_USE_L6RES },
+ { "res.payload", smp_fetch_payload, ARG2(2,SINT,SINT), NULL, SMP_T_BIN, SMP_USE_L6RES },
+ { "res.payload_lv", smp_fetch_payload_lv, ARG3(2,SINT,SINT,STR), val_payload_lv, SMP_T_BIN, SMP_USE_L6RES },
+ { "res.ssl_hello_type", smp_fetch_ssl_hello_type, 0, NULL, SMP_T_SINT, SMP_USE_L6RES },
+ { "wait_end", smp_fetch_wait_end, 0, NULL, SMP_T_BOOL, SMP_USE_INTRN },
+ { /* END */ },
+}};
+
+INITCALL1(STG_REGISTER, sample_register_fetches, &smp_kws);
+
+/* Note: must not be declared <const> as its list will be overwritten.
+ * Please take care of keeping this list alphabetically sorted.
+ */
+static struct acl_kw_list acl_kws = {ILH, {
+ { "payload", "req.payload", PAT_MATCH_BIN },
+ { "payload_lv", "req.payload_lv", PAT_MATCH_BIN },
+ { "req_rdp_cookie", "req.rdp_cookie", PAT_MATCH_STR },
+ { "req_rdp_cookie_cnt", "req.rdp_cookie_cnt", PAT_MATCH_INT },
+ { "req_ssl_sni", "req.ssl_sni", PAT_MATCH_STR },
+ { "req_ssl_ver", "req.ssl_ver", PAT_MATCH_INT, pat_parse_dotted_ver },
+ { "req.ssl_ver", "req.ssl_ver", PAT_MATCH_INT, pat_parse_dotted_ver },
+ { /* END */ },
+}};
+
+INITCALL1(STG_REGISTER, acl_register_keywords, &acl_kws);
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/src/peers.c b/src/peers.c
new file mode 100644
index 0000000..5eefd18
--- /dev/null
+++ b/src/peers.c
@@ -0,0 +1,4231 @@
+/*
+ * Peer synchro management.
+ *
+ * Copyright 2010 EXCELIANCE, Emeric Brun <ebrun@exceliance.fr>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <sys/socket.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+
+#include <import/eb32tree.h>
+#include <import/ebmbtree.h>
+#include <import/ebpttree.h>
+
+#include <haproxy/api.h>
+#include <haproxy/applet.h>
+#include <haproxy/cfgparse.h>
+#include <haproxy/channel.h>
+#include <haproxy/cli.h>
+#include <haproxy/dict.h>
+#include <haproxy/errors.h>
+#include <haproxy/fd.h>
+#include <haproxy/frontend.h>
+#include <haproxy/net_helper.h>
+#include <haproxy/obj_type-t.h>
+#include <haproxy/peers.h>
+#include <haproxy/proxy.h>
+#include <haproxy/sc_strm.h>
+#include <haproxy/session-t.h>
+#include <haproxy/signal.h>
+#include <haproxy/stats-t.h>
+#include <haproxy/stconn.h>
+#include <haproxy/stick_table.h>
+#include <haproxy/stream.h>
+#include <haproxy/task.h>
+#include <haproxy/thread.h>
+#include <haproxy/time.h>
+#include <haproxy/tools.h>
+#include <haproxy/trace.h>
+
+
+/*******************************/
+/* Current peer learning state */
+/*******************************/
+
+/******************************/
+/* Current peers section resync state */
+/******************************/
+#define PEERS_F_RESYNC_LOCAL 0x00000001 /* Learn from local finished or no more needed */
+#define PEERS_F_RESYNC_REMOTE 0x00000002 /* Learn from remote finished or no more needed */
+#define PEERS_F_RESYNC_ASSIGN 0x00000004 /* A peer was assigned to learn our lesson */
+#define PEERS_F_RESYNC_PROCESS 0x00000008 /* The assigned peer was requested for resync */
+#define PEERS_F_RESYNC_LOCALTIMEOUT 0x00000010 /* Timeout waiting for a full resync from a local node */
+#define PEERS_F_RESYNC_REMOTETIMEOUT 0x00000020 /* Timeout waiting for a full resync from a remote node */
+#define PEERS_F_RESYNC_LOCALABORT 0x00000040 /* Session aborted learning from a local node */
+#define PEERS_F_RESYNC_REMOTEABORT 0x00000080 /* Session aborted learning from a remote node */
+#define PEERS_F_RESYNC_LOCALFINISHED 0x00000100 /* A local node teach us and was fully up to date */
+#define PEERS_F_RESYNC_REMOTEFINISHED 0x00000200 /* A remote node teach us and was fully up to date */
+#define PEERS_F_RESYNC_LOCALPARTIAL 0x00000400 /* A local node teach us but was partially up to date */
+#define PEERS_F_RESYNC_REMOTEPARTIAL 0x00000800 /* A remote node teach us but was partially up to date */
+#define PEERS_F_RESYNC_LOCALASSIGN 0x00001000 /* A local node was assigned for a full resync */
+#define PEERS_F_RESYNC_REMOTEASSIGN 0x00002000 /* A remote node was assigned for a full resync */
+#define PEERS_F_RESYNC_REQUESTED 0x00004000 /* A resync was explicitly requested */
+#define PEERS_F_DONOTSTOP 0x00010000 /* Main table sync task block process during soft stop
+ to push data to new process */
+
+#define PEERS_RESYNC_STATEMASK (PEERS_F_RESYNC_LOCAL|PEERS_F_RESYNC_REMOTE)
+#define PEERS_RESYNC_FROMLOCAL 0x00000000
+#define PEERS_RESYNC_FROMREMOTE PEERS_F_RESYNC_LOCAL
+#define PEERS_RESYNC_FINISHED (PEERS_F_RESYNC_LOCAL|PEERS_F_RESYNC_REMOTE)
+
+/***********************************/
+/* Current shared table sync state */
+/***********************************/
+#define SHTABLE_F_TEACH_STAGE1 0x00000001 /* Teach state 1 complete */
+#define SHTABLE_F_TEACH_STAGE2 0x00000002 /* Teach state 2 complete */
+
+/******************************/
+/* Remote peer teaching state */
+/******************************/
+#define PEER_F_TEACH_PROCESS 0x00000001 /* Teach a lesson to current peer */
+#define PEER_F_TEACH_FINISHED 0x00000008 /* Teach conclude, (wait for confirm) */
+#define PEER_F_TEACH_COMPLETE 0x00000010 /* All that we know already taught to current peer, used only for a local peer */
+#define PEER_F_LEARN_ASSIGN 0x00000100 /* Current peer was assigned for a lesson */
+#define PEER_F_LEARN_NOTUP2DATE 0x00000200 /* Learn from peer finished but peer is not up to date */
+#define PEER_F_ALIVE 0x20000000 /* Used to flag a peer a alive. */
+#define PEER_F_HEARTBEAT 0x40000000 /* Heartbeat message to send. */
+#define PEER_F_DWNGRD 0x80000000 /* When this flag is enabled, we must downgrade the supported version announced during peer sessions. */
+
+#define PEER_TEACH_RESET ~(PEER_F_TEACH_PROCESS|PEER_F_TEACH_FINISHED) /* PEER_F_TEACH_COMPLETE should never be reset */
+#define PEER_LEARN_RESET ~(PEER_F_LEARN_ASSIGN|PEER_F_LEARN_NOTUP2DATE)
+
+#define PEER_RESYNC_TIMEOUT 5000 /* 5 seconds */
+#define PEER_RECONNECT_TIMEOUT 5000 /* 5 seconds */
+#define PEER_LOCAL_RECONNECT_TIMEOUT 500 /* 500ms */
+#define PEER_HEARTBEAT_TIMEOUT 3000 /* 3 seconds */
+
+/* default maximum of updates sent at once */
+#define PEER_DEF_MAX_UPDATES_AT_ONCE 200
+
+/* flags for "show peers" */
+#define PEERS_SHOW_F_DICT 0x00000001 /* also show the contents of the dictionary */
+
+/*****************************/
+/* Sync message class */
+/*****************************/
+enum {
+ PEER_MSG_CLASS_CONTROL = 0,
+ PEER_MSG_CLASS_ERROR,
+ PEER_MSG_CLASS_STICKTABLE = 10,
+ PEER_MSG_CLASS_RESERVED = 255,
+};
+
+/*****************************/
+/* control message types */
+/*****************************/
+enum {
+ PEER_MSG_CTRL_RESYNCREQ = 0,
+ PEER_MSG_CTRL_RESYNCFINISHED,
+ PEER_MSG_CTRL_RESYNCPARTIAL,
+ PEER_MSG_CTRL_RESYNCCONFIRM,
+ PEER_MSG_CTRL_HEARTBEAT,
+};
+
+/*****************************/
+/* error message types */
+/*****************************/
+enum {
+ PEER_MSG_ERR_PROTOCOL = 0,
+ PEER_MSG_ERR_SIZELIMIT,
+};
+
+/* network key types;
+ * network types were directly and mistakenly
+ * mapped on sample types, to keep backward
+ * compatiblitiy we keep those values but
+ * we now use a internal/network mapping
+ * to avoid further mistakes adding or
+ * modifying internals types
+ */
+enum {
+ PEER_KT_ANY = 0, /* any type */
+ PEER_KT_RESV1, /* UNUSED */
+ PEER_KT_SINT, /* signed 64bits integer type */
+ PEER_KT_RESV3, /* UNUSED */
+ PEER_KT_IPV4, /* ipv4 type */
+ PEER_KT_IPV6, /* ipv6 type */
+ PEER_KT_STR, /* char string type */
+ PEER_KT_BIN, /* buffer type */
+ PEER_KT_TYPES /* number of types, must always be last */
+};
+
+/* Map used to retrieve network type from internal type
+ * Note: Undeclared mapping maps entry to PEER_KT_ANY == 0
+ */
+static int peer_net_key_type[SMP_TYPES] = {
+ [SMP_T_SINT] = PEER_KT_SINT,
+ [SMP_T_IPV4] = PEER_KT_IPV4,
+ [SMP_T_IPV6] = PEER_KT_IPV6,
+ [SMP_T_STR] = PEER_KT_STR,
+ [SMP_T_BIN] = PEER_KT_BIN,
+};
+
+/* Map used to retrieve internal type from external type
+ * Note: Undeclared mapping maps entry to SMP_T_ANY == 0
+ */
+static int peer_int_key_type[PEER_KT_TYPES] = {
+ [PEER_KT_SINT] = SMP_T_SINT,
+ [PEER_KT_IPV4] = SMP_T_IPV4,
+ [PEER_KT_IPV6] = SMP_T_IPV6,
+ [PEER_KT_STR] = SMP_T_STR,
+ [PEER_KT_BIN] = SMP_T_BIN,
+};
+
+/*
+ * Parameters used by functions to build peer protocol messages. */
+struct peer_prep_params {
+ struct {
+ struct peer *peer;
+ } hello;
+ struct {
+ unsigned int st1;
+ } error_status;
+ struct {
+ struct stksess *stksess;
+ struct shared_table *shared_table;
+ unsigned int updateid;
+ int use_identifier;
+ int use_timed;
+ struct peer *peer;
+ } updt;
+ struct {
+ struct shared_table *shared_table;
+ } swtch;
+ struct {
+ struct shared_table *shared_table;
+ } ack;
+ struct {
+ unsigned char head[2];
+ } control;
+ struct {
+ unsigned char head[2];
+ } error;
+};
+
+/*******************************/
+/* stick table sync mesg types */
+/* Note: ids >= 128 contains */
+/* id message contains data */
+/*******************************/
+#define PEER_MSG_STKT_UPDATE 0x80
+#define PEER_MSG_STKT_INCUPDATE 0x81
+#define PEER_MSG_STKT_DEFINE 0x82
+#define PEER_MSG_STKT_SWITCH 0x83
+#define PEER_MSG_STKT_ACK 0x84
+#define PEER_MSG_STKT_UPDATE_TIMED 0x85
+#define PEER_MSG_STKT_INCUPDATE_TIMED 0x86
+/* All the stick-table message identifiers abova have the #7 bit set */
+#define PEER_MSG_STKT_BIT 7
+#define PEER_MSG_STKT_BIT_MASK (1 << PEER_MSG_STKT_BIT)
+
+/* The maximum length of an encoded data length. */
+#define PEER_MSG_ENC_LENGTH_MAXLEN 5
+
+/* Minimum 64-bits value encoded with 2 bytes */
+#define PEER_ENC_2BYTES_MIN 0xf0 /* 0xf0 (or 240) */
+/* 3 bytes */
+#define PEER_ENC_3BYTES_MIN ((1ULL << 11) | PEER_ENC_2BYTES_MIN) /* 0x8f0 (or 2288) */
+/* 4 bytes */
+#define PEER_ENC_4BYTES_MIN ((1ULL << 18) | PEER_ENC_3BYTES_MIN) /* 0x408f0 (or 264432) */
+/* 5 bytes */
+#define PEER_ENC_5BYTES_MIN ((1ULL << 25) | PEER_ENC_4BYTES_MIN) /* 0x20408f0 (or 33818864) */
+/* 6 bytes */
+#define PEER_ENC_6BYTES_MIN ((1ULL << 32) | PEER_ENC_5BYTES_MIN) /* 0x1020408f0 (or 4328786160) */
+/* 7 bytes */
+#define PEER_ENC_7BYTES_MIN ((1ULL << 39) | PEER_ENC_6BYTES_MIN) /* 0x81020408f0 (or 554084600048) */
+/* 8 bytes */
+#define PEER_ENC_8BYTES_MIN ((1ULL << 46) | PEER_ENC_7BYTES_MIN) /* 0x4081020408f0 (or 70922828777712) */
+/* 9 bytes */
+#define PEER_ENC_9BYTES_MIN ((1ULL << 53) | PEER_ENC_8BYTES_MIN) /* 0x204081020408f0 (or 9078122083518704) */
+/* 10 bytes */
+#define PEER_ENC_10BYTES_MIN ((1ULL << 60) | PEER_ENC_9BYTES_MIN) /* 0x10204081020408f0 (or 1161999626690365680) */
+
+/* #7 bit used to detect the last byte to be encoded */
+#define PEER_ENC_STOP_BIT 7
+/* The byte minimum value with #7 bit set */
+#define PEER_ENC_STOP_BYTE (1 << PEER_ENC_STOP_BIT)
+/* The left most number of bits set for PEER_ENC_2BYTES_MIN */
+#define PEER_ENC_2BYTES_MIN_BITS 4
+
+#define PEER_MSG_HEADER_LEN 2
+
+#define PEER_STKT_CACHE_MAX_ENTRIES 128
+
+/**********************************/
+/* Peer Session IO handler states */
+/**********************************/
+
+enum {
+ PEER_SESS_ST_ACCEPT = 0, /* Initial state for session create by an accept, must be zero! */
+ PEER_SESS_ST_GETVERSION, /* Validate supported protocol version */
+ PEER_SESS_ST_GETHOST, /* Validate host ID correspond to local host id */
+ PEER_SESS_ST_GETPEER, /* Validate peer ID correspond to a known remote peer id */
+ /* after this point, data were possibly exchanged */
+ PEER_SESS_ST_SENDSUCCESS, /* Send ret code 200 (success) and wait for message */
+ PEER_SESS_ST_CONNECT, /* Initial state for session create on a connect, push presentation into buffer */
+ PEER_SESS_ST_GETSTATUS, /* Wait for the welcome message */
+ PEER_SESS_ST_WAITMSG, /* Wait for data messages */
+ PEER_SESS_ST_EXIT, /* Exit with status code */
+ PEER_SESS_ST_ERRPROTO, /* Send error proto message before exit */
+ PEER_SESS_ST_ERRSIZE, /* Send error size message before exit */
+ PEER_SESS_ST_END, /* Killed session */
+};
+
+/***************************************************/
+/* Peer Session status code - part of the protocol */
+/***************************************************/
+
+#define PEER_SESS_SC_CONNECTCODE 100 /* connect in progress */
+#define PEER_SESS_SC_CONNECTEDCODE 110 /* tcp connect success */
+
+#define PEER_SESS_SC_SUCCESSCODE 200 /* accept or connect successful */
+
+#define PEER_SESS_SC_TRYAGAIN 300 /* try again later */
+
+#define PEER_SESS_SC_ERRPROTO 501 /* error protocol */
+#define PEER_SESS_SC_ERRVERSION 502 /* unknown protocol version */
+#define PEER_SESS_SC_ERRHOST 503 /* bad host name */
+#define PEER_SESS_SC_ERRPEER 504 /* unknown peer */
+
+#define PEER_SESSION_PROTO_NAME "HAProxyS"
+#define PEER_MAJOR_VER 2
+#define PEER_MINOR_VER 1
+#define PEER_DWNGRD_MINOR_VER 0
+
+static size_t proto_len = sizeof(PEER_SESSION_PROTO_NAME) - 1;
+struct peers *cfg_peers = NULL;
+static int peers_max_updates_at_once = PEER_DEF_MAX_UPDATES_AT_ONCE;
+static void peer_session_forceshutdown(struct peer *peer);
+
+static struct ebpt_node *dcache_tx_insert(struct dcache *dc,
+ struct dcache_tx_entry *i);
+static inline void flush_dcache(struct peer *peer);
+
+/* trace source and events */
+static void peers_trace(enum trace_level level, uint64_t mask,
+ const struct trace_source *src,
+ const struct ist where, const struct ist func,
+ const void *a1, const void *a2, const void *a3, const void *a4);
+
+static const struct trace_event peers_trace_events[] = {
+#define PEERS_EV_UPDTMSG (1 << 0)
+ { .mask = PEERS_EV_UPDTMSG, .name = "updtmsg", .desc = "update message received" },
+#define PEERS_EV_ACKMSG (1 << 1)
+ { .mask = PEERS_EV_ACKMSG, .name = "ackmsg", .desc = "ack message received" },
+#define PEERS_EV_SWTCMSG (1 << 2)
+ { .mask = PEERS_EV_SWTCMSG, .name = "swtcmsg", .desc = "switch message received" },
+#define PEERS_EV_DEFMSG (1 << 3)
+ { .mask = PEERS_EV_DEFMSG, .name = "defmsg", .desc = "definition message received" },
+#define PEERS_EV_CTRLMSG (1 << 4)
+ { .mask = PEERS_EV_CTRLMSG, .name = "ctrlmsg", .desc = "control message sent/received" },
+#define PEERS_EV_SESSREL (1 << 5)
+ { .mask = PEERS_EV_SESSREL, .name = "sessrl", .desc = "peer session releasing" },
+#define PEERS_EV_PROTOERR (1 << 6)
+ { .mask = PEERS_EV_PROTOERR, .name = "protoerr", .desc = "protocol error" },
+};
+
+static const struct name_desc peers_trace_lockon_args[4] = {
+ /* arg1 */ { /* already used by the connection */ },
+ /* arg2 */ { .name="peers", .desc="Peers protocol" },
+ /* arg3 */ { },
+ /* arg4 */ { }
+};
+
+static const struct name_desc peers_trace_decoding[] = {
+#define PEERS_VERB_CLEAN 1
+ { .name="clean", .desc="only user-friendly stuff, generally suitable for level \"user\"" },
+ { /* end */ }
+};
+
+
+struct trace_source trace_peers = {
+ .name = IST("peers"),
+ .desc = "Peers protocol",
+ .arg_def = TRC_ARG1_CONN, /* TRACE()'s first argument is always a connection */
+ .default_cb = peers_trace,
+ .known_events = peers_trace_events,
+ .lockon_args = peers_trace_lockon_args,
+ .decoding = peers_trace_decoding,
+ .report_events = ~0, /* report everything by default */
+};
+
+/* Return peer control message types as strings (only for debugging purpose). */
+static inline char *ctrl_msg_type_str(unsigned int type)
+{
+ switch (type) {
+ case PEER_MSG_CTRL_RESYNCREQ:
+ return "RESYNCREQ";
+ case PEER_MSG_CTRL_RESYNCFINISHED:
+ return "RESYNCFINISHED";
+ case PEER_MSG_CTRL_RESYNCPARTIAL:
+ return "RESYNCPARTIAL";
+ case PEER_MSG_CTRL_RESYNCCONFIRM:
+ return "RESYNCCONFIRM";
+ case PEER_MSG_CTRL_HEARTBEAT:
+ return "HEARTBEAT";
+ default:
+ return "???";
+ }
+}
+
+#define TRACE_SOURCE &trace_peers
+INITCALL1(STG_REGISTER, trace_register_source, TRACE_SOURCE);
+
+static void peers_trace(enum trace_level level, uint64_t mask,
+ const struct trace_source *src,
+ const struct ist where, const struct ist func,
+ const void *a1, const void *a2, const void *a3, const void *a4)
+{
+ if (mask & (PEERS_EV_UPDTMSG|PEERS_EV_ACKMSG|PEERS_EV_SWTCMSG)) {
+ if (a2) {
+ const struct peer *peer = a2;
+
+ chunk_appendf(&trace_buf, " peer=%s", peer->id);
+ }
+ if (a3) {
+ const char *p = a3;
+
+ chunk_appendf(&trace_buf, " @%p", p);
+ }
+ if (a4) {
+ const size_t *val = a4;
+
+ chunk_appendf(&trace_buf, " %llu", (unsigned long long)*val);
+ }
+ }
+
+ if (mask & PEERS_EV_DEFMSG) {
+ if (a2) {
+ const struct peer *peer = a2;
+
+ chunk_appendf(&trace_buf, " peer=%s", peer->id);
+ }
+ if (a3) {
+ const char *p = a3;
+
+ chunk_appendf(&trace_buf, " @%p", p);
+ }
+ if (a4) {
+ const int *val = a4;
+
+ chunk_appendf(&trace_buf, " %d", *val);
+ }
+ }
+
+ if (mask & PEERS_EV_CTRLMSG) {
+ if (a2) {
+ const unsigned char *ctrl_msg_type = a2;
+
+ chunk_appendf(&trace_buf, " %s", ctrl_msg_type_str(*ctrl_msg_type));
+
+ }
+ if (a3) {
+ const char *local_peer = a3;
+
+ chunk_appendf(&trace_buf, " %s", local_peer);
+ }
+
+ if (a4) {
+ const char *remote_peer = a4;
+
+ chunk_appendf(&trace_buf, " -> %s", remote_peer);
+ }
+ }
+
+ if (mask & (PEERS_EV_SESSREL|PEERS_EV_PROTOERR)) {
+ if (a2) {
+ const struct peer *peer = a2;
+ struct peers *peers = NULL;
+
+ if (peer->appctx)
+ peers = peer->peers;
+
+ if (peers)
+ chunk_appendf(&trace_buf, " %s", peers->local->id);
+ chunk_appendf(&trace_buf, " -> %s", peer->id);
+ }
+
+ if (a3) {
+ const int *prev_state = a3;
+
+ chunk_appendf(&trace_buf, " prev_state=%d\n", *prev_state);
+ }
+ }
+}
+
+static const char *statuscode_str(int statuscode)
+{
+ switch (statuscode) {
+ case PEER_SESS_SC_CONNECTCODE:
+ return "CONN";
+ case PEER_SESS_SC_CONNECTEDCODE:
+ return "HSHK";
+ case PEER_SESS_SC_SUCCESSCODE:
+ return "ESTA";
+ case PEER_SESS_SC_TRYAGAIN:
+ return "RETR";
+ case PEER_SESS_SC_ERRPROTO:
+ return "PROT";
+ case PEER_SESS_SC_ERRVERSION:
+ return "VERS";
+ case PEER_SESS_SC_ERRHOST:
+ return "NAME";
+ case PEER_SESS_SC_ERRPEER:
+ return "UNKN";
+ default:
+ return "NONE";
+ }
+}
+
+/* This function encode an uint64 to 'dynamic' length format.
+ The encoded value is written at address *str, and the
+ caller must assure that size after *str is large enough.
+ At return, the *str is set at the next Byte after then
+ encoded integer. The function returns then length of the
+ encoded integer in Bytes */
+int intencode(uint64_t i, char **str) {
+ int idx = 0;
+ unsigned char *msg;
+
+ msg = (unsigned char *)*str;
+ if (i < PEER_ENC_2BYTES_MIN) {
+ msg[0] = (unsigned char)i;
+ *str = (char *)&msg[idx+1];
+ return (idx+1);
+ }
+
+ msg[idx] =(unsigned char)i | PEER_ENC_2BYTES_MIN;
+ i = (i - PEER_ENC_2BYTES_MIN) >> PEER_ENC_2BYTES_MIN_BITS;
+ while (i >= PEER_ENC_STOP_BYTE) {
+ msg[++idx] = (unsigned char)i | PEER_ENC_STOP_BYTE;
+ i = (i - PEER_ENC_STOP_BYTE) >> PEER_ENC_STOP_BIT;
+ }
+ msg[++idx] = (unsigned char)i;
+ *str = (char *)&msg[idx+1];
+ return (idx+1);
+}
+
+
+/* This function returns a decoded 64bits unsigned integer
+ * from a varint
+ *
+ * Calling:
+ * - *str must point on the first byte of the buffer to decode.
+ * - end must point on the next byte after the end of the buffer
+ * we are authorized to parse (buf + buflen)
+ *
+ * At return:
+ *
+ * On success *str will point at the byte following
+ * the fully decoded integer into the buffer. and
+ * the decoded value is returned.
+ *
+ * If end is reached before the integer was fully decoded,
+ * *str is set to NULL and the caller have to check this
+ * to know there is a decoding error. In this case
+ * the returned integer is also forced to 0
+ */
+uint64_t intdecode(char **str, char *end)
+{
+ unsigned char *msg;
+ uint64_t i;
+ int shift;
+
+ if (!*str)
+ return 0;
+
+ msg = (unsigned char *)*str;
+ if (msg >= (unsigned char *)end)
+ goto fail;
+
+ i = *(msg++);
+ if (i >= PEER_ENC_2BYTES_MIN) {
+ shift = PEER_ENC_2BYTES_MIN_BITS;
+ do {
+ if (msg >= (unsigned char *)end)
+ goto fail;
+ i += (uint64_t)*msg << shift;
+ shift += PEER_ENC_STOP_BIT;
+ } while (*(msg++) >= PEER_ENC_STOP_BYTE);
+ }
+ *str = (char *)msg;
+ return i;
+
+ fail:
+ *str = NULL;
+ return 0;
+}
+
+/*
+ * Build a "hello" peer protocol message.
+ * Return the number of written bytes written to build this messages if succeeded,
+ * 0 if not.
+ */
+static int peer_prepare_hellomsg(char *msg, size_t size, struct peer_prep_params *p)
+{
+ int min_ver, ret;
+ struct peer *peer;
+
+ peer = p->hello.peer;
+ min_ver = (peer->flags & PEER_F_DWNGRD) ? PEER_DWNGRD_MINOR_VER : PEER_MINOR_VER;
+ /* Prepare headers */
+ ret = snprintf(msg, size, PEER_SESSION_PROTO_NAME " %d.%d\n%s\n%s %d %d\n",
+ (int)PEER_MAJOR_VER, min_ver, peer->id, localpeer, (int)getpid(), (int)1);
+ if (ret >= size)
+ return 0;
+
+ return ret;
+}
+
+/*
+ * Build a "handshake succeeded" status message.
+ * Return the number of written bytes written to build this messages if succeeded,
+ * 0 if not.
+ */
+static int peer_prepare_status_successmsg(char *msg, size_t size, struct peer_prep_params *p)
+{
+ int ret;
+
+ ret = snprintf(msg, size, "%d\n", (int)PEER_SESS_SC_SUCCESSCODE);
+ if (ret >= size)
+ return 0;
+
+ return ret;
+}
+
+/*
+ * Build an error status message.
+ * Return the number of written bytes written to build this messages if succeeded,
+ * 0 if not.
+ */
+static int peer_prepare_status_errormsg(char *msg, size_t size, struct peer_prep_params *p)
+{
+ int ret;
+ unsigned int st1;
+
+ st1 = p->error_status.st1;
+ ret = snprintf(msg, size, "%u\n", st1);
+ if (ret >= size)
+ return 0;
+
+ return ret;
+}
+
+/* Set the stick-table UPDATE message type byte at <msg_type> address,
+ * depending on <use_identifier> and <use_timed> boolean parameters.
+ * Always successful.
+ */
+static inline void peer_set_update_msg_type(char *msg_type, int use_identifier, int use_timed)
+{
+ if (use_timed) {
+ if (use_identifier)
+ *msg_type = PEER_MSG_STKT_UPDATE_TIMED;
+ else
+ *msg_type = PEER_MSG_STKT_INCUPDATE_TIMED;
+ }
+ else {
+ if (use_identifier)
+ *msg_type = PEER_MSG_STKT_UPDATE;
+ else
+ *msg_type = PEER_MSG_STKT_INCUPDATE;
+ }
+}
+/*
+ * This prepare the data update message on the stick session <ts>, <st> is the considered
+ * stick table.
+ * <msg> is a buffer of <size> to receive data message content
+ * If function returns 0, the caller should consider we were unable to encode this message (TODO:
+ * check size)
+ */
+static int peer_prepare_updatemsg(char *msg, size_t size, struct peer_prep_params *p)
+{
+ uint32_t netinteger;
+ unsigned short datalen;
+ char *cursor, *datamsg;
+ unsigned int data_type;
+ void *data_ptr;
+ struct stksess *ts;
+ struct shared_table *st;
+ unsigned int updateid;
+ int use_identifier;
+ int use_timed;
+ struct peer *peer;
+
+ ts = p->updt.stksess;
+ st = p->updt.shared_table;
+ updateid = p->updt.updateid;
+ use_identifier = p->updt.use_identifier;
+ use_timed = p->updt.use_timed;
+ peer = p->updt.peer;
+
+ cursor = datamsg = msg + PEER_MSG_HEADER_LEN + PEER_MSG_ENC_LENGTH_MAXLEN;
+
+ /* construct message */
+
+ /* check if we need to send the update identifier */
+ if (!st->last_pushed || updateid < st->last_pushed || ((updateid - st->last_pushed) != 1)) {
+ use_identifier = 1;
+ }
+
+ /* encode update identifier if needed */
+ if (use_identifier) {
+ netinteger = htonl(updateid);
+ memcpy(cursor, &netinteger, sizeof(netinteger));
+ cursor += sizeof(netinteger);
+ }
+
+ if (use_timed) {
+ netinteger = htonl(tick_remain(now_ms, ts->expire));
+ memcpy(cursor, &netinteger, sizeof(netinteger));
+ cursor += sizeof(netinteger);
+ }
+
+ /* encode the key */
+ if (st->table->type == SMP_T_STR) {
+ int stlen = strlen((char *)ts->key.key);
+
+ intencode(stlen, &cursor);
+ memcpy(cursor, ts->key.key, stlen);
+ cursor += stlen;
+ }
+ else if (st->table->type == SMP_T_SINT) {
+ netinteger = htonl(read_u32(ts->key.key));
+ memcpy(cursor, &netinteger, sizeof(netinteger));
+ cursor += sizeof(netinteger);
+ }
+ else {
+ memcpy(cursor, ts->key.key, st->table->key_size);
+ cursor += st->table->key_size;
+ }
+
+ HA_RWLOCK_RDLOCK(STK_SESS_LOCK, &ts->lock);
+ /* encode values */
+ for (data_type = 0 ; data_type < STKTABLE_DATA_TYPES ; data_type++) {
+
+ data_ptr = stktable_data_ptr(st->table, ts, data_type);
+ if (data_ptr) {
+ /* in case of array all elements use
+ * the same std_type and they are linearly
+ * encoded.
+ */
+ if (stktable_data_types[data_type].is_array) {
+ unsigned int idx = 0;
+
+ switch (stktable_data_types[data_type].std_type) {
+ case STD_T_SINT: {
+ int data;
+
+ do {
+ data = stktable_data_cast(data_ptr, std_t_sint);
+ intencode(data, &cursor);
+
+ data_ptr = stktable_data_ptr_idx(st->table, ts, data_type, ++idx);
+ } while(data_ptr);
+ break;
+ }
+ case STD_T_UINT: {
+ unsigned int data;
+
+ do {
+ data = stktable_data_cast(data_ptr, std_t_uint);
+ intencode(data, &cursor);
+
+ data_ptr = stktable_data_ptr_idx(st->table, ts, data_type, ++idx);
+ } while(data_ptr);
+ break;
+ }
+ case STD_T_ULL: {
+ unsigned long long data;
+
+ do {
+ data = stktable_data_cast(data_ptr, std_t_ull);
+ intencode(data, &cursor);
+
+ data_ptr = stktable_data_ptr_idx(st->table, ts, data_type, ++idx);
+ } while(data_ptr);
+ break;
+ }
+ case STD_T_FRQP: {
+ struct freq_ctr *frqp;
+
+ do {
+ frqp = &stktable_data_cast(data_ptr, std_t_frqp);
+ intencode((unsigned int)(now_ms - frqp->curr_tick), &cursor);
+ intencode(frqp->curr_ctr, &cursor);
+ intencode(frqp->prev_ctr, &cursor);
+
+ data_ptr = stktable_data_ptr_idx(st->table, ts, data_type, ++idx);
+ } while(data_ptr);
+ break;
+ }
+ }
+
+ /* array elements fully encoded
+ * proceed next data_type.
+ */
+ continue;
+ }
+ switch (stktable_data_types[data_type].std_type) {
+ case STD_T_SINT: {
+ int data;
+
+ data = stktable_data_cast(data_ptr, std_t_sint);
+ intencode(data, &cursor);
+ break;
+ }
+ case STD_T_UINT: {
+ unsigned int data;
+
+ data = stktable_data_cast(data_ptr, std_t_uint);
+ intencode(data, &cursor);
+ break;
+ }
+ case STD_T_ULL: {
+ unsigned long long data;
+
+ data = stktable_data_cast(data_ptr, std_t_ull);
+ intencode(data, &cursor);
+ break;
+ }
+ case STD_T_FRQP: {
+ struct freq_ctr *frqp;
+
+ frqp = &stktable_data_cast(data_ptr, std_t_frqp);
+ intencode((unsigned int)(now_ms - frqp->curr_tick), &cursor);
+ intencode(frqp->curr_ctr, &cursor);
+ intencode(frqp->prev_ctr, &cursor);
+ break;
+ }
+ case STD_T_DICT: {
+ struct dict_entry *de;
+ struct ebpt_node *cached_de;
+ struct dcache_tx_entry cde = { };
+ char *beg, *end;
+ size_t value_len, data_len;
+ struct dcache *dc;
+
+ de = stktable_data_cast(data_ptr, std_t_dict);
+ if (!de) {
+ /* No entry */
+ intencode(0, &cursor);
+ break;
+ }
+
+ dc = peer->dcache;
+ cde.entry.key = de;
+ cached_de = dcache_tx_insert(dc, &cde);
+ if (cached_de == &cde.entry) {
+ if (cde.id + 1 >= PEER_ENC_2BYTES_MIN)
+ break;
+ /* Encode the length of the remaining data -> 1 */
+ intencode(1, &cursor);
+ /* Encode the cache entry ID */
+ intencode(cde.id + 1, &cursor);
+ }
+ else {
+ /* Leave enough room to encode the remaining data length. */
+ end = beg = cursor + PEER_MSG_ENC_LENGTH_MAXLEN;
+ /* Encode the dictionary entry key */
+ intencode(cde.id + 1, &end);
+ /* Encode the length of the dictionary entry data */
+ value_len = de->len;
+ intencode(value_len, &end);
+ /* Copy the data */
+ memcpy(end, de->value.key, value_len);
+ end += value_len;
+ /* Encode the length of the data */
+ data_len = end - beg;
+ intencode(data_len, &cursor);
+ memmove(cursor, beg, data_len);
+ cursor += data_len;
+ }
+ break;
+ }
+ }
+ }
+ }
+ HA_RWLOCK_RDUNLOCK(STK_SESS_LOCK, &ts->lock);
+
+ /* Compute datalen */
+ datalen = (cursor - datamsg);
+
+ /* prepare message header */
+ msg[0] = PEER_MSG_CLASS_STICKTABLE;
+ peer_set_update_msg_type(&msg[1], use_identifier, use_timed);
+ cursor = &msg[2];
+ intencode(datalen, &cursor);
+
+ /* move data after header */
+ memmove(cursor, datamsg, datalen);
+
+ /* return header size + data_len */
+ return (cursor - msg) + datalen;
+}
+
+/*
+ * This prepare the switch table message to targeted share table <st>.
+ * <msg> is a buffer of <size> to receive data message content
+ * If function returns 0, the caller should consider we were unable to encode this message (TODO:
+ * check size)
+ */
+static int peer_prepare_switchmsg(char *msg, size_t size, struct peer_prep_params *params)
+{
+ int len;
+ unsigned short datalen;
+ struct buffer *chunk;
+ char *cursor, *datamsg, *chunkp, *chunkq;
+ uint64_t data = 0;
+ unsigned int data_type;
+ struct shared_table *st;
+
+ st = params->swtch.shared_table;
+ cursor = datamsg = msg + PEER_MSG_HEADER_LEN + PEER_MSG_ENC_LENGTH_MAXLEN;
+
+ /* Encode data */
+
+ /* encode local id */
+ intencode(st->local_id, &cursor);
+
+ /* encode table name */
+ len = strlen(st->table->nid);
+ intencode(len, &cursor);
+ memcpy(cursor, st->table->nid, len);
+ cursor += len;
+
+ /* encode table type */
+
+ intencode(peer_net_key_type[st->table->type], &cursor);
+
+ /* encode table key size */
+ intencode(st->table->key_size, &cursor);
+
+ chunk = get_trash_chunk();
+ chunkp = chunkq = chunk->area;
+ /* encode available known data types in table */
+ for (data_type = 0 ; data_type < STKTABLE_DATA_TYPES ; data_type++) {
+ if (st->table->data_ofs[data_type]) {
+ /* stored data types parameters are all linearly encoded
+ * at the end of the 'table definition' message.
+ *
+ * Currently only array data_types and and data_types
+ * using freq_counter base type have parameters:
+ *
+ * - array has always at least one parameter set to the
+ * number of elements.
+ *
+ * - array of base-type freq_counters has an additional
+ * parameter set to the period used to compute those
+ * freq_counters.
+ *
+ * - simple freq counter has a parameter set to the period
+ * used to compute
+ *
+ * A set of parameter for a datatype MUST BE prefixed
+ * by the data-type id itself:
+ * This is useless because the data_types are ordered and
+ * the data_type bitfield already gives the information of
+ * stored types, but it was designed this way when the
+ * push of period parameter was added for freq counters
+ * and we don't want to break the compatibility.
+ *
+ */
+ if (stktable_data_types[data_type].is_array) {
+ /* This is an array type so we first encode
+ * the data_type itself to prefix parameters
+ */
+ intencode(data_type, &chunkq);
+
+ /* We encode the first parameter which is
+ * the number of elements of this array
+ */
+ intencode(st->table->data_nbelem[data_type], &chunkq);
+
+ /* for array of freq counters, there is an additional
+ * period parameter to encode
+ */
+ if (stktable_data_types[data_type].std_type == STD_T_FRQP)
+ intencode(st->table->data_arg[data_type].u, &chunkq);
+ }
+ else if (stktable_data_types[data_type].std_type == STD_T_FRQP) {
+ /* this datatype is a simple freq counter not part
+ * of an array. We encode the data_type itself
+ * to prefix the 'period' parameter
+ */
+ intencode(data_type, &chunkq);
+ intencode(st->table->data_arg[data_type].u, &chunkq);
+ }
+ /* set the bit corresponding to stored data type */
+ data |= 1ULL << data_type;
+ }
+ }
+ intencode(data, &cursor);
+
+ /* Encode stick-table entries duration. */
+ intencode(st->table->expire, &cursor);
+
+ if (chunkq > chunkp) {
+ chunk->data = chunkq - chunkp;
+ memcpy(cursor, chunk->area, chunk->data);
+ cursor += chunk->data;
+ }
+
+ /* Compute datalen */
+ datalen = (cursor - datamsg);
+
+ /* prepare message header */
+ msg[0] = PEER_MSG_CLASS_STICKTABLE;
+ msg[1] = PEER_MSG_STKT_DEFINE;
+ cursor = &msg[2];
+ intencode(datalen, &cursor);
+
+ /* move data after header */
+ memmove(cursor, datamsg, datalen);
+
+ /* return header size + data_len */
+ return (cursor - msg) + datalen;
+}
+
+/*
+ * This prepare the acknowledge message on the stick session <ts>, <st> is the considered
+ * stick table.
+ * <msg> is a buffer of <size> to receive data message content
+ * If function returns 0, the caller should consider we were unable to encode this message (TODO:
+ * check size)
+ */
+static int peer_prepare_ackmsg(char *msg, size_t size, struct peer_prep_params *p)
+{
+ unsigned short datalen;
+ char *cursor, *datamsg;
+ uint32_t netinteger;
+ struct shared_table *st;
+
+ cursor = datamsg = msg + PEER_MSG_HEADER_LEN + PEER_MSG_ENC_LENGTH_MAXLEN;
+
+ st = p->ack.shared_table;
+ intencode(st->remote_id, &cursor);
+ netinteger = htonl(st->last_get);
+ memcpy(cursor, &netinteger, sizeof(netinteger));
+ cursor += sizeof(netinteger);
+
+ /* Compute datalen */
+ datalen = (cursor - datamsg);
+
+ /* prepare message header */
+ msg[0] = PEER_MSG_CLASS_STICKTABLE;
+ msg[1] = PEER_MSG_STKT_ACK;
+ cursor = &msg[2];
+ intencode(datalen, &cursor);
+
+ /* move data after header */
+ memmove(cursor, datamsg, datalen);
+
+ /* return header size + data_len */
+ return (cursor - msg) + datalen;
+}
+
+/*
+ * Function to deinit connected peer
+ */
+void __peer_session_deinit(struct peer *peer)
+{
+ struct peers *peers = peer->peers;
+ int thr;
+
+ if (!peers || !peer->appctx)
+ return;
+
+ thr = peer->appctx->t->tid;
+ HA_ATOMIC_DEC(&peers->applet_count[thr]);
+
+ if (peer->appctx->st0 == PEER_SESS_ST_WAITMSG)
+ HA_ATOMIC_DEC(&connected_peers);
+
+ HA_ATOMIC_DEC(&active_peers);
+
+ flush_dcache(peer);
+
+ /* Re-init current table pointers to force announcement on re-connect */
+ peer->remote_table = peer->last_local_table = peer->stop_local_table = NULL;
+ peer->appctx = NULL;
+ if (peer->flags & PEER_F_LEARN_ASSIGN) {
+ /* unassign current peer for learning */
+ peer->flags &= ~(PEER_F_LEARN_ASSIGN);
+ peers->flags &= ~(PEERS_F_RESYNC_ASSIGN|PEERS_F_RESYNC_PROCESS);
+
+ if (peer->local)
+ peers->flags |= PEERS_F_RESYNC_LOCALABORT;
+ else
+ peers->flags |= PEERS_F_RESYNC_REMOTEABORT;
+ /* reschedule a resync */
+ peers->resync_timeout = tick_add(now_ms, MS_TO_TICKS(5000));
+ }
+ /* reset teaching and learning flags to 0 */
+ peer->flags &= PEER_TEACH_RESET;
+ peer->flags &= PEER_LEARN_RESET;
+ task_wakeup(peers->sync_task, TASK_WOKEN_MSG);
+}
+
+static int peer_session_init(struct appctx *appctx)
+{
+ struct peer *peer = appctx->svcctx;
+ struct stream *s;
+ struct sockaddr_storage *addr = NULL;
+
+ if (!sockaddr_alloc(&addr, &peer->addr, sizeof(peer->addr)))
+ goto out_error;
+
+ if (appctx_finalize_startup(appctx, peer->peers->peers_fe, &BUF_NULL) == -1)
+ goto out_free_addr;
+
+ s = appctx_strm(appctx);
+ /* applet is waiting for data */
+ applet_need_more_data(appctx);
+ appctx_wakeup(appctx);
+
+ /* initiate an outgoing connection */
+ s->scb->dst = addr;
+ s->scb->flags |= (SC_FL_RCV_ONCE|SC_FL_NOLINGER);
+ s->flags = SF_ASSIGNED;
+ s->target = peer_session_target(peer, s);
+
+ s->do_log = NULL;
+ s->uniq_id = 0;
+
+ _HA_ATOMIC_INC(&active_peers);
+ return 0;
+
+ out_free_addr:
+ sockaddr_free(&addr);
+ out_error:
+ return -1;
+}
+
+/*
+ * Callback to release a session with a peer
+ */
+static void peer_session_release(struct appctx *appctx)
+{
+ struct peer *peer = appctx->svcctx;
+
+ TRACE_PROTO("releasing peer session", PEERS_EV_SESSREL, NULL, peer);
+ /* appctx->svcctx is not a peer session */
+ if (appctx->st0 < PEER_SESS_ST_SENDSUCCESS)
+ return;
+
+ /* peer session identified */
+ if (peer) {
+ HA_SPIN_LOCK(PEER_LOCK, &peer->lock);
+ if (peer->appctx == appctx)
+ __peer_session_deinit(peer);
+ peer->flags &= ~PEER_F_ALIVE;
+ HA_SPIN_UNLOCK(PEER_LOCK, &peer->lock);
+ }
+}
+
+/* Retrieve the major and minor versions of peers protocol
+ * announced by a remote peer. <str> is a null-terminated
+ * string with the following format: "<maj_ver>.<min_ver>".
+ */
+static int peer_get_version(const char *str,
+ unsigned int *maj_ver, unsigned int *min_ver)
+{
+ unsigned int majv, minv;
+ const char *pos, *saved;
+ const char *end;
+
+ saved = pos = str;
+ end = str + strlen(str);
+
+ majv = read_uint(&pos, end);
+ if (saved == pos || *pos++ != '.')
+ return -1;
+
+ saved = pos;
+ minv = read_uint(&pos, end);
+ if (saved == pos || pos != end)
+ return -1;
+
+ *maj_ver = majv;
+ *min_ver = minv;
+
+ return 0;
+}
+
+/*
+ * Parse a line terminated by an optional '\r' character, followed by a mandatory
+ * '\n' character.
+ * Returns 1 if succeeded or 0 if a '\n' character could not be found, and -1 if
+ * a line could not be read because the communication channel is closed.
+ */
+static inline int peer_getline(struct appctx *appctx)
+{
+ struct stconn *sc = appctx_sc(appctx);
+ int n;
+
+ n = co_getline(sc_oc(sc), trash.area, trash.size);
+ if (!n)
+ return 0;
+
+ if (n < 0 || trash.area[n - 1] != '\n') {
+ appctx->st0 = PEER_SESS_ST_END;
+ return -1;
+ }
+
+ if (n > 1 && (trash.area[n - 2] == '\r'))
+ trash.area[n - 2] = 0;
+ else
+ trash.area[n - 1] = 0;
+
+ co_skip(sc_oc(sc), n);
+
+ return n;
+}
+
+/*
+ * Send a message after having called <peer_prepare_msg> to build it.
+ * Return 0 if the message could not be built modifying the appcxt st0 to PEER_SESS_ST_END value.
+ * Returns -1 if there was not enough room left to send the message,
+ * any other negative returned value must be considered as an error with an appcxt st0
+ * returned value equal to PEER_SESS_ST_END.
+ */
+static inline int peer_send_msg(struct appctx *appctx,
+ int (*peer_prepare_msg)(char *, size_t, struct peer_prep_params *),
+ struct peer_prep_params *params)
+{
+ int ret, msglen;
+
+ msglen = peer_prepare_msg(trash.area, trash.size, params);
+ if (!msglen) {
+ /* internal error: message does not fit in trash */
+ appctx->st0 = PEER_SESS_ST_END;
+ return 0;
+ }
+
+ /* message to buffer */
+ ret = applet_putblk(appctx, trash.area, msglen);
+ if (ret <= 0) {
+ if (ret != -1)
+ appctx->st0 = PEER_SESS_ST_END;
+ }
+
+ return ret;
+}
+
+/*
+ * Send a hello message.
+ * Return 0 if the message could not be built modifying the appcxt st0 to PEER_SESS_ST_END value.
+ * Returns -1 if there was not enough room left to send the message,
+ * any other negative returned value must be considered as an error with an appcxt st0
+ * returned value equal to PEER_SESS_ST_END.
+ */
+static inline int peer_send_hellomsg(struct appctx *appctx, struct peer *peer)
+{
+ struct peer_prep_params p = {
+ .hello.peer = peer,
+ };
+
+ return peer_send_msg(appctx, peer_prepare_hellomsg, &p);
+}
+
+/*
+ * Send a success peer handshake status message.
+ * Return 0 if the message could not be built modifying the appcxt st0 to PEER_SESS_ST_END value.
+ * Returns -1 if there was not enough room left to send the message,
+ * any other negative returned value must be considered as an error with an appcxt st0
+ * returned value equal to PEER_SESS_ST_END.
+ */
+static inline int peer_send_status_successmsg(struct appctx *appctx)
+{
+ return peer_send_msg(appctx, peer_prepare_status_successmsg, NULL);
+}
+
+/*
+ * Send a peer handshake status error message.
+ * Return 0 if the message could not be built modifying the appcxt st0 to PEER_SESS_ST_END value.
+ * Returns -1 if there was not enough room left to send the message,
+ * any other negative returned value must be considered as an error with an appcxt st0
+ * returned value equal to PEER_SESS_ST_END.
+ */
+static inline int peer_send_status_errormsg(struct appctx *appctx)
+{
+ struct peer_prep_params p = {
+ .error_status.st1 = appctx->st1,
+ };
+
+ return peer_send_msg(appctx, peer_prepare_status_errormsg, &p);
+}
+
+/*
+ * Send a stick-table switch message.
+ * Return 0 if the message could not be built modifying the appcxt st0 to PEER_SESS_ST_END value.
+ * Returns -1 if there was not enough room left to send the message,
+ * any other negative returned value must be considered as an error with an appcxt st0
+ * returned value equal to PEER_SESS_ST_END.
+ */
+static inline int peer_send_switchmsg(struct shared_table *st, struct appctx *appctx)
+{
+ struct peer_prep_params p = {
+ .swtch.shared_table = st,
+ };
+
+ return peer_send_msg(appctx, peer_prepare_switchmsg, &p);
+}
+
+/*
+ * Send a stick-table update acknowledgement message.
+ * Return 0 if the message could not be built modifying the appcxt st0 to PEER_SESS_ST_END value.
+ * Returns -1 if there was not enough room left to send the message,
+ * any other negative returned value must be considered as an error with an appcxt st0
+ * returned value equal to PEER_SESS_ST_END.
+ */
+static inline int peer_send_ackmsg(struct shared_table *st, struct appctx *appctx)
+{
+ struct peer_prep_params p = {
+ .ack.shared_table = st,
+ };
+
+ return peer_send_msg(appctx, peer_prepare_ackmsg, &p);
+}
+
+/*
+ * Send a stick-table update message.
+ * Return 0 if the message could not be built modifying the appcxt st0 to PEER_SESS_ST_END value.
+ * Returns -1 if there was not enough room left to send the message,
+ * any other negative returned value must be considered as an error with an appcxt st0
+ * returned value equal to PEER_SESS_ST_END.
+ */
+static inline int peer_send_updatemsg(struct shared_table *st, struct appctx *appctx, struct stksess *ts,
+ unsigned int updateid, int use_identifier, int use_timed)
+{
+ struct peer_prep_params p = {
+ .updt = {
+ .stksess = ts,
+ .shared_table = st,
+ .updateid = updateid,
+ .use_identifier = use_identifier,
+ .use_timed = use_timed,
+ .peer = appctx->svcctx,
+ },
+ };
+
+ return peer_send_msg(appctx, peer_prepare_updatemsg, &p);
+}
+
+/*
+ * Build a peer protocol control class message.
+ * Returns the number of written bytes used to build the message if succeeded,
+ * 0 if not.
+ */
+static int peer_prepare_control_msg(char *msg, size_t size, struct peer_prep_params *p)
+{
+ if (size < sizeof p->control.head)
+ return 0;
+
+ msg[0] = p->control.head[0];
+ msg[1] = p->control.head[1];
+
+ return 2;
+}
+
+/*
+ * Send a stick-table synchronization request message.
+ * Return 0 if the message could not be built modifying the appcxt st0 to PEER_SESS_ST_END value.
+ * Returns -1 if there was not enough room left to send the message,
+ * any other negative returned value must be considered as an error with an appctx st0
+ * returned value equal to PEER_SESS_ST_END.
+ */
+static inline int peer_send_resync_reqmsg(struct appctx *appctx,
+ struct peer *peer, struct peers *peers)
+{
+ struct peer_prep_params p = {
+ .control.head = { PEER_MSG_CLASS_CONTROL, PEER_MSG_CTRL_RESYNCREQ, },
+ };
+
+ TRACE_PROTO("send control message", PEERS_EV_CTRLMSG,
+ NULL, &p.control.head[1], peers->local->id, peer->id);
+
+ return peer_send_msg(appctx, peer_prepare_control_msg, &p);
+}
+
+/*
+ * Send a stick-table synchronization confirmation message.
+ * Return 0 if the message could not be built modifying the appcxt st0 to PEER_SESS_ST_END value.
+ * Returns -1 if there was not enough room left to send the message,
+ * any other negative returned value must be considered as an error with an appctx st0
+ * returned value equal to PEER_SESS_ST_END.
+ */
+static inline int peer_send_resync_confirmsg(struct appctx *appctx,
+ struct peer *peer, struct peers *peers)
+{
+ struct peer_prep_params p = {
+ .control.head = { PEER_MSG_CLASS_CONTROL, PEER_MSG_CTRL_RESYNCCONFIRM, },
+ };
+
+ TRACE_PROTO("send control message", PEERS_EV_CTRLMSG,
+ NULL, &p.control.head[1], peers->local->id, peer->id);
+
+ return peer_send_msg(appctx, peer_prepare_control_msg, &p);
+}
+
+/*
+ * Send a stick-table synchronization finished message.
+ * Return 0 if the message could not be built modifying the appcxt st0 to PEER_SESS_ST_END value.
+ * Returns -1 if there was not enough room left to send the message,
+ * any other negative returned value must be considered as an error with an appctx st0
+ * returned value equal to PEER_SESS_ST_END.
+ */
+static inline int peer_send_resync_finishedmsg(struct appctx *appctx,
+ struct peer *peer, struct peers *peers)
+{
+ struct peer_prep_params p = {
+ .control.head = { PEER_MSG_CLASS_CONTROL, },
+ };
+
+ p.control.head[1] = (peers->flags & PEERS_RESYNC_STATEMASK) == PEERS_RESYNC_FINISHED ?
+ PEER_MSG_CTRL_RESYNCFINISHED : PEER_MSG_CTRL_RESYNCPARTIAL;
+
+ TRACE_PROTO("send control message", PEERS_EV_CTRLMSG,
+ NULL, &p.control.head[1], peers->local->id, peer->id);
+
+ return peer_send_msg(appctx, peer_prepare_control_msg, &p);
+}
+
+/*
+ * Send a heartbeat message.
+ * Return 0 if the message could not be built modifying the appctx st0 to PEER_SESS_ST_END value.
+ * Returns -1 if there was not enough room left to send the message,
+ * any other negative returned value must be considered as an error with an appctx st0
+ * returned value equal to PEER_SESS_ST_END.
+ */
+static inline int peer_send_heartbeatmsg(struct appctx *appctx,
+ struct peer *peer, struct peers *peers)
+{
+ struct peer_prep_params p = {
+ .control.head = { PEER_MSG_CLASS_CONTROL, PEER_MSG_CTRL_HEARTBEAT, },
+ };
+
+ TRACE_PROTO("send control message", PEERS_EV_CTRLMSG,
+ NULL, &p.control.head[1], peers->local->id, peer->id);
+
+ return peer_send_msg(appctx, peer_prepare_control_msg, &p);
+}
+
+/*
+ * Build a peer protocol error class message.
+ * Returns the number of written bytes used to build the message if succeeded,
+ * 0 if not.
+ */
+static int peer_prepare_error_msg(char *msg, size_t size, struct peer_prep_params *p)
+{
+ if (size < sizeof p->error.head)
+ return 0;
+
+ msg[0] = p->error.head[0];
+ msg[1] = p->error.head[1];
+
+ return 2;
+}
+
+/*
+ * Send a "size limit reached" error message.
+ * Return 0 if the message could not be built modifying the appcxt st0 to PEER_SESS_ST_END value.
+ * Returns -1 if there was not enough room left to send the message,
+ * any other negative returned value must be considered as an error with an appctx st0
+ * returned value equal to PEER_SESS_ST_END.
+ */
+static inline int peer_send_error_size_limitmsg(struct appctx *appctx)
+{
+ struct peer_prep_params p = {
+ .error.head = { PEER_MSG_CLASS_ERROR, PEER_MSG_ERR_SIZELIMIT, },
+ };
+
+ return peer_send_msg(appctx, peer_prepare_error_msg, &p);
+}
+
+/*
+ * Send a "peer protocol" error message.
+ * Return 0 if the message could not be built modifying the appcxt st0 to PEER_SESS_ST_END value.
+ * Returns -1 if there was not enough room left to send the message,
+ * any other negative returned value must be considered as an error with an appctx st0
+ * returned value equal to PEER_SESS_ST_END.
+ */
+static inline int peer_send_error_protomsg(struct appctx *appctx)
+{
+ struct peer_prep_params p = {
+ .error.head = { PEER_MSG_CLASS_ERROR, PEER_MSG_ERR_PROTOCOL, },
+ };
+
+ return peer_send_msg(appctx, peer_prepare_error_msg, &p);
+}
+
+/*
+ * Function used to lookup for recent stick-table updates associated with
+ * <st> shared stick-table when a lesson must be taught a peer (PEER_F_LEARN_ASSIGN flag set).
+ */
+static inline struct stksess *peer_teach_process_stksess_lookup(struct shared_table *st)
+{
+ struct eb32_node *eb;
+
+ eb = eb32_lookup_ge(&st->table->updates, st->last_pushed+1);
+ if (!eb) {
+ eb = eb32_first(&st->table->updates);
+ if (!eb || (eb->key == st->last_pushed)) {
+ st->table->commitupdate = st->last_pushed = st->table->localupdate;
+ return NULL;
+ }
+ }
+
+ /* if distance between the last pushed and the retrieved key
+ * is greater than the distance last_pushed and the local_update
+ * this means we are beyond localupdate.
+ */
+ if ((eb->key - st->last_pushed) > (st->table->localupdate - st->last_pushed)) {
+ st->table->commitupdate = st->last_pushed = st->table->localupdate;
+ return NULL;
+ }
+
+ return eb32_entry(eb, struct stksess, upd);
+}
+
+/*
+ * Function used to lookup for recent stick-table updates associated with
+ * <st> shared stick-table during teach state 1 step.
+ */
+static inline struct stksess *peer_teach_stage1_stksess_lookup(struct shared_table *st)
+{
+ struct eb32_node *eb;
+
+ eb = eb32_lookup_ge(&st->table->updates, st->last_pushed+1);
+ if (!eb) {
+ st->flags |= SHTABLE_F_TEACH_STAGE1;
+ eb = eb32_first(&st->table->updates);
+ if (eb)
+ st->last_pushed = eb->key - 1;
+ return NULL;
+ }
+
+ return eb32_entry(eb, struct stksess, upd);
+}
+
+/*
+ * Function used to lookup for recent stick-table updates associated with
+ * <st> shared stick-table during teach state 2 step.
+ */
+static inline struct stksess *peer_teach_stage2_stksess_lookup(struct shared_table *st)
+{
+ struct eb32_node *eb;
+
+ eb = eb32_lookup_ge(&st->table->updates, st->last_pushed+1);
+ if (!eb || eb->key > st->teaching_origin) {
+ st->flags |= SHTABLE_F_TEACH_STAGE2;
+ return NULL;
+ }
+
+ return eb32_entry(eb, struct stksess, upd);
+}
+
+/*
+ * Generic function to emit update messages for <st> stick-table when a lesson must
+ * be taught to the peer <p>.
+ *
+ * This function temporary unlock/lock <st> when it sends stick-table updates or
+ * when decrementing its refcount in case of any error when it sends this updates.
+ * It must be called with the stick-table lock released.
+ *
+ * Return 0 if any message could not be built modifying the appcxt st0 to PEER_SESS_ST_END value.
+ * Returns -1 if there was not enough room left to send the message,
+ * any other negative returned value must be considered as an error with an appcxt st0
+ * returned value equal to PEER_SESS_ST_END.
+ * If it returns 0 or -1, this function leave <st> locked if already locked when entering this function
+ * unlocked if not already locked when entering this function.
+ */
+static inline int peer_send_teachmsgs(struct appctx *appctx, struct peer *p,
+ struct stksess *(*peer_stksess_lookup)(struct shared_table *),
+ struct shared_table *st)
+{
+ int ret, new_pushed, use_timed;
+ int updates_sent = 0;
+
+ ret = 1;
+ use_timed = 0;
+ if (st != p->last_local_table) {
+ ret = peer_send_switchmsg(st, appctx);
+ if (ret <= 0)
+ return ret;
+
+ p->last_local_table = st;
+ }
+
+ if (peer_stksess_lookup != peer_teach_process_stksess_lookup)
+ use_timed = !(p->flags & PEER_F_DWNGRD);
+
+ /* We force new pushed to 1 to force identifier in update message */
+ new_pushed = 1;
+
+ HA_RWLOCK_RDLOCK(STK_TABLE_LOCK, &st->table->updt_lock);
+
+ while (1) {
+ struct stksess *ts;
+ unsigned updateid;
+
+ /* push local updates */
+ ts = peer_stksess_lookup(st);
+ if (!ts) {
+ ret = 1; // done
+ break;
+ }
+
+ updateid = ts->upd.key;
+ if (p->srv->shard && ts->shard != p->srv->shard) {
+ /* Skip this entry */
+ st->last_pushed = updateid;
+ new_pushed = 1;
+ continue;
+ }
+
+ HA_ATOMIC_INC(&ts->ref_cnt);
+ HA_RWLOCK_RDUNLOCK(STK_TABLE_LOCK, &st->table->updt_lock);
+
+ ret = peer_send_updatemsg(st, appctx, ts, updateid, new_pushed, use_timed);
+ HA_RWLOCK_RDLOCK(STK_TABLE_LOCK, &st->table->updt_lock);
+ HA_ATOMIC_DEC(&ts->ref_cnt);
+ if (ret <= 0)
+ break;
+
+ st->last_pushed = updateid;
+
+ if (peer_stksess_lookup == peer_teach_process_stksess_lookup) {
+ uint commitid = _HA_ATOMIC_LOAD(&st->table->commitupdate);
+
+ while ((int)(updateid - commitid) > 0) {
+ if (_HA_ATOMIC_CAS(&st->table->commitupdate, &commitid, updateid))
+ break;
+ __ha_cpu_relax();
+ }
+ }
+
+ /* identifier may not needed in next update message */
+ new_pushed = 0;
+
+ updates_sent++;
+ if (updates_sent >= peers_max_updates_at_once) {
+ /* pretend we're full so that we get back ASAP */
+ struct stconn *sc = appctx_sc(appctx);
+
+ sc_need_room(sc, 0);
+ ret = -1;
+ break;
+ }
+ }
+
+ out:
+ HA_RWLOCK_RDUNLOCK(STK_TABLE_LOCK, &st->table->updt_lock);
+ return ret;
+}
+
+/*
+ * Function to emit update messages for <st> stick-table when a lesson must
+ * be taught to the peer <p> (PEER_F_LEARN_ASSIGN flag set).
+ *
+ * Note that <st> shared stick-table is locked when calling this function, and
+ * the lock is dropped then re-acquired.
+ *
+ * Return 0 if any message could not be built modifying the appcxt st0 to PEER_SESS_ST_END value.
+ * Returns -1 if there was not enough room left to send the message,
+ * any other negative returned value must be considered as an error with an appcxt st0
+ * returned value equal to PEER_SESS_ST_END.
+ */
+static inline int peer_send_teach_process_msgs(struct appctx *appctx, struct peer *p,
+ struct shared_table *st)
+{
+ int ret;
+
+ HA_RWLOCK_WRUNLOCK(STK_TABLE_LOCK, &st->table->lock);
+ ret = peer_send_teachmsgs(appctx, p, peer_teach_process_stksess_lookup, st);
+ HA_RWLOCK_WRLOCK(STK_TABLE_LOCK, &st->table->lock);
+
+ return ret;
+}
+
+/*
+ * Function to emit update messages for <st> stick-table when a lesson must
+ * be taught to the peer <p> during teach state 1 step. It must be called with
+ * the stick-table lock released.
+ *
+ * Return 0 if any message could not be built modifying the appcxt st0 to PEER_SESS_ST_END value.
+ * Returns -1 if there was not enough room left to send the message,
+ * any other negative returned value must be considered as an error with an appcxt st0
+ * returned value equal to PEER_SESS_ST_END.
+ */
+static inline int peer_send_teach_stage1_msgs(struct appctx *appctx, struct peer *p,
+ struct shared_table *st)
+{
+ return peer_send_teachmsgs(appctx, p, peer_teach_stage1_stksess_lookup, st);
+}
+
+/*
+ * Function to emit update messages for <st> stick-table when a lesson must
+ * be taught to the peer <p> during teach state 1 step. It must be called with
+ * the stick-table lock released.
+ *
+ * Return 0 if any message could not be built modifying the appcxt st0 to PEER_SESS_ST_END value.
+ * Returns -1 if there was not enough room left to send the message,
+ * any other negative returned value must be considered as an error with an appcxt st0
+ * returned value equal to PEER_SESS_ST_END.
+ */
+static inline int peer_send_teach_stage2_msgs(struct appctx *appctx, struct peer *p,
+ struct shared_table *st)
+{
+ return peer_send_teachmsgs(appctx, p, peer_teach_stage2_stksess_lookup, st);
+}
+
+
+/*
+ * Function used to parse a stick-table update message after it has been received
+ * by <p> peer with <msg_cur> as address of the pointer to the position in the
+ * receipt buffer with <msg_end> being position of the end of the stick-table message.
+ * Update <msg_curr> accordingly to the peer protocol specs if no peer protocol error
+ * was encountered.
+ * <exp> must be set if the stick-table entry expires.
+ * <updt> must be set for PEER_MSG_STKT_UPDATE or PEER_MSG_STKT_UPDATE_TIMED stick-table
+ * messages, in this case the stick-table update message is received with a stick-table
+ * update ID.
+ * <totl> is the length of the stick-table update message computed upon receipt.
+ */
+static int peer_treat_updatemsg(struct appctx *appctx, struct peer *p, int updt, int exp,
+ char **msg_cur, char *msg_end, int msg_len, int totl)
+{
+ struct shared_table *st = p->remote_table;
+ struct stktable *table;
+ struct stksess *ts, *newts;
+ struct stksess *wts = NULL; /* write_to stksess */
+ uint32_t update;
+ int expire;
+ unsigned int data_type;
+ size_t keylen;
+ void *data_ptr;
+ char *msg_save;
+
+ TRACE_ENTER(PEERS_EV_UPDTMSG, NULL, p);
+ /* Here we have data message */
+ if (!st)
+ goto ignore_msg;
+
+ table = st->table;
+
+ expire = MS_TO_TICKS(table->expire);
+
+ if (updt) {
+ if (msg_len < sizeof(update)) {
+ TRACE_PROTO("malformed message", PEERS_EV_UPDTMSG, NULL, p);
+ goto malformed_exit;
+ }
+
+ memcpy(&update, *msg_cur, sizeof(update));
+ *msg_cur += sizeof(update);
+ st->last_get = htonl(update);
+ }
+ else {
+ st->last_get++;
+ }
+
+ if (exp) {
+ size_t expire_sz = sizeof expire;
+
+ if (*msg_cur + expire_sz > msg_end) {
+ TRACE_PROTO("malformed message", PEERS_EV_UPDTMSG,
+ NULL, p, *msg_cur);
+ TRACE_PROTO("malformed message", PEERS_EV_UPDTMSG,
+ NULL, p, msg_end, &expire_sz);
+ goto malformed_exit;
+ }
+
+ memcpy(&expire, *msg_cur, expire_sz);
+ *msg_cur += expire_sz;
+ expire = ntohl(expire);
+ }
+
+ newts = stksess_new(table, NULL);
+ if (!newts)
+ goto ignore_msg;
+
+ if (table->type == SMP_T_STR) {
+ unsigned int to_read, to_store;
+
+ to_read = intdecode(msg_cur, msg_end);
+ if (!*msg_cur) {
+ TRACE_PROTO("malformed message", PEERS_EV_UPDTMSG, NULL, p);
+ goto malformed_free_newts;
+ }
+
+ to_store = MIN(to_read, table->key_size - 1);
+ if (*msg_cur + to_store > msg_end) {
+ TRACE_PROTO("malformed message", PEERS_EV_UPDTMSG,
+ NULL, p, *msg_cur);
+ TRACE_PROTO("malformed message", PEERS_EV_UPDTMSG,
+ NULL, p, msg_end, &to_store);
+ goto malformed_free_newts;
+ }
+
+ keylen = to_store;
+ memcpy(newts->key.key, *msg_cur, keylen);
+ newts->key.key[keylen] = 0;
+ *msg_cur += to_read;
+ }
+ else if (table->type == SMP_T_SINT) {
+ unsigned int netinteger;
+
+ if (*msg_cur + sizeof(netinteger) > msg_end) {
+ TRACE_PROTO("malformed message", PEERS_EV_UPDTMSG,
+ NULL, p, *msg_cur);
+ TRACE_PROTO("malformed message", PEERS_EV_UPDTMSG,
+ NULL, p, msg_end);
+ goto malformed_free_newts;
+ }
+
+ keylen = sizeof(netinteger);
+ memcpy(&netinteger, *msg_cur, keylen);
+ netinteger = ntohl(netinteger);
+ memcpy(newts->key.key, &netinteger, keylen);
+ *msg_cur += keylen;
+ }
+ else {
+ if (*msg_cur + table->key_size > msg_end) {
+ TRACE_PROTO("malformed message", PEERS_EV_UPDTMSG,
+ NULL, p, *msg_cur);
+ TRACE_PROTO("malformed message", PEERS_EV_UPDTMSG,
+ NULL, p, msg_end, &table->key_size);
+ goto malformed_free_newts;
+ }
+
+ keylen = table->key_size;
+ memcpy(newts->key.key, *msg_cur, keylen);
+ *msg_cur += keylen;
+ }
+
+ newts->shard = stktable_get_key_shard(table, newts->key.key, keylen);
+
+ /* lookup for existing entry */
+ ts = stktable_set_entry(table, newts);
+ if (ts != newts) {
+ stksess_free(table, newts);
+ newts = NULL;
+ }
+
+ msg_save = *msg_cur;
+
+ update_wts:
+
+ HA_RWLOCK_WRLOCK(STK_SESS_LOCK, &ts->lock);
+
+ for (data_type = 0 ; data_type < STKTABLE_DATA_TYPES ; data_type++) {
+ uint64_t decoded_int;
+ unsigned int idx;
+ int ignore = 0;
+
+ if (!((1ULL << data_type) & st->remote_data))
+ continue;
+
+ /* We shouldn't learn local-only values. Also, when handling the
+ * write_to table we must ignore types that can be processed
+ * so we don't interfere with any potential arithmetic logic
+ * performed on them (ie: cumulative counters).
+ */
+ if (stktable_data_types[data_type].is_local ||
+ (table != st->table && !stktable_data_types[data_type].as_is))
+ ignore = 1;
+
+ if (stktable_data_types[data_type].is_array) {
+ /* in case of array all elements
+ * use the same std_type and they
+ * are linearly encoded.
+ * The number of elements was provided
+ * by table definition message
+ */
+ switch (stktable_data_types[data_type].std_type) {
+ case STD_T_SINT:
+ for (idx = 0; idx < st->remote_data_nbelem[data_type]; idx++) {
+ decoded_int = intdecode(msg_cur, msg_end);
+ if (!*msg_cur) {
+ TRACE_PROTO("malformed message", PEERS_EV_UPDTMSG, NULL, p);
+ goto malformed_unlock;
+ }
+
+ data_ptr = stktable_data_ptr_idx(table, ts, data_type, idx);
+ if (data_ptr && !ignore)
+ stktable_data_cast(data_ptr, std_t_sint) = decoded_int;
+ }
+ break;
+ case STD_T_UINT:
+ for (idx = 0; idx < st->remote_data_nbelem[data_type]; idx++) {
+ decoded_int = intdecode(msg_cur, msg_end);
+ if (!*msg_cur) {
+ TRACE_PROTO("malformed message", PEERS_EV_UPDTMSG, NULL, p);
+ goto malformed_unlock;
+ }
+
+ data_ptr = stktable_data_ptr_idx(table, ts, data_type, idx);
+ if (data_ptr && !ignore)
+ stktable_data_cast(data_ptr, std_t_uint) = decoded_int;
+ }
+ break;
+ case STD_T_ULL:
+ for (idx = 0; idx < st->remote_data_nbelem[data_type]; idx++) {
+ decoded_int = intdecode(msg_cur, msg_end);
+ if (!*msg_cur) {
+ TRACE_PROTO("malformed message", PEERS_EV_UPDTMSG, NULL, p);
+ goto malformed_unlock;
+ }
+
+ data_ptr = stktable_data_ptr_idx(table, ts, data_type, idx);
+ if (data_ptr && !ignore)
+ stktable_data_cast(data_ptr, std_t_ull) = decoded_int;
+ }
+ break;
+ case STD_T_FRQP:
+ for (idx = 0; idx < st->remote_data_nbelem[data_type]; idx++) {
+ struct freq_ctr data;
+
+ /* First bit is reserved for the freq_ctr lock
+ * Note: here we're still protected by the stksess lock
+ * so we don't need to update the update the freq_ctr
+ * using its internal lock.
+ */
+
+ decoded_int = intdecode(msg_cur, msg_end);
+ if (!*msg_cur) {
+ TRACE_PROTO("malformed message", PEERS_EV_UPDTMSG, NULL, p);
+ goto malformed_unlock;
+ }
+
+ data.curr_tick = tick_add(now_ms, -decoded_int) & ~0x1;
+ data.curr_ctr = intdecode(msg_cur, msg_end);
+ if (!*msg_cur) {
+ TRACE_PROTO("malformed message", PEERS_EV_UPDTMSG, NULL, p);
+ goto malformed_unlock;
+ }
+
+ data.prev_ctr = intdecode(msg_cur, msg_end);
+ if (!*msg_cur) {
+ TRACE_PROTO("malformed message", PEERS_EV_UPDTMSG, NULL, p);
+ goto malformed_unlock;
+ }
+
+ data_ptr = stktable_data_ptr_idx(table, ts, data_type, idx);
+ if (data_ptr && !ignore)
+ stktable_data_cast(data_ptr, std_t_frqp) = data;
+ }
+ break;
+ }
+
+ /* array is fully decoded
+ * proceed next data_type.
+ */
+ continue;
+ }
+ decoded_int = intdecode(msg_cur, msg_end);
+ if (!*msg_cur) {
+ TRACE_PROTO("malformed message", PEERS_EV_UPDTMSG, NULL, p);
+ goto malformed_unlock;
+ }
+
+ switch (stktable_data_types[data_type].std_type) {
+ case STD_T_SINT:
+ data_ptr = stktable_data_ptr(table, ts, data_type);
+ if (data_ptr && !ignore)
+ stktable_data_cast(data_ptr, std_t_sint) = decoded_int;
+ break;
+
+ case STD_T_UINT:
+ data_ptr = stktable_data_ptr(table, ts, data_type);
+ if (data_ptr && !ignore)
+ stktable_data_cast(data_ptr, std_t_uint) = decoded_int;
+ break;
+
+ case STD_T_ULL:
+ data_ptr = stktable_data_ptr(table, ts, data_type);
+ if (data_ptr && !ignore)
+ stktable_data_cast(data_ptr, std_t_ull) = decoded_int;
+ break;
+
+ case STD_T_FRQP: {
+ struct freq_ctr data;
+
+ /* First bit is reserved for the freq_ctr lock
+ Note: here we're still protected by the stksess lock
+ so we don't need to update the update the freq_ctr
+ using its internal lock.
+ */
+
+ data.curr_tick = tick_add(now_ms, -decoded_int) & ~0x1;
+ data.curr_ctr = intdecode(msg_cur, msg_end);
+ if (!*msg_cur) {
+ TRACE_PROTO("malformed message", PEERS_EV_UPDTMSG, NULL, p);
+ goto malformed_unlock;
+ }
+
+ data.prev_ctr = intdecode(msg_cur, msg_end);
+ if (!*msg_cur) {
+ TRACE_PROTO("malformed message", PEERS_EV_UPDTMSG, NULL, p);
+ goto malformed_unlock;
+ }
+
+ data_ptr = stktable_data_ptr(table, ts, data_type);
+ if (data_ptr && !ignore)
+ stktable_data_cast(data_ptr, std_t_frqp) = data;
+ break;
+ }
+ case STD_T_DICT: {
+ struct buffer *chunk;
+ size_t data_len, value_len;
+ unsigned int id;
+ struct dict_entry *de;
+ struct dcache *dc;
+ char *end;
+
+ if (!decoded_int) {
+ /* No entry. */
+ break;
+ }
+ data_len = decoded_int;
+ if (*msg_cur + data_len > msg_end) {
+ TRACE_PROTO("malformed message", PEERS_EV_UPDTMSG,
+ NULL, p, *msg_cur);
+ TRACE_PROTO("malformed message", PEERS_EV_UPDTMSG,
+ NULL, p, msg_end, &data_len);
+ goto malformed_unlock;
+ }
+
+ /* Compute the end of the current data, <msg_end> being at the end of
+ * the entire message.
+ */
+ end = *msg_cur + data_len;
+ id = intdecode(msg_cur, end);
+ if (!*msg_cur || !id) {
+ TRACE_PROTO("malformed message", PEERS_EV_UPDTMSG,
+ NULL, p, *msg_cur, &id);
+ goto malformed_unlock;
+ }
+
+ dc = p->dcache;
+ if (*msg_cur == end) {
+ /* Dictionary entry key without value. */
+ if (id > dc->max_entries) {
+ TRACE_PROTO("malformed message", PEERS_EV_UPDTMSG,
+ NULL, p, NULL, &id);
+ goto malformed_unlock;
+ }
+ /* IDs sent over the network are numbered from 1. */
+ de = dc->rx[id - 1].de;
+ }
+ else {
+ chunk = get_trash_chunk();
+ value_len = intdecode(msg_cur, end);
+ if (!*msg_cur || *msg_cur + value_len > end ||
+ unlikely(value_len + 1 >= chunk->size)) {
+ TRACE_PROTO("malformed message", PEERS_EV_UPDTMSG,
+ NULL, p, *msg_cur, &value_len);
+ TRACE_PROTO("malformed message", PEERS_EV_UPDTMSG,
+ NULL, p, end, &chunk->size);
+ goto malformed_unlock;
+ }
+
+ chunk_memcpy(chunk, *msg_cur, value_len);
+ chunk->area[chunk->data] = '\0';
+ *msg_cur += value_len;
+
+ de = dict_insert(&server_key_dict, chunk->area);
+ dict_entry_unref(&server_key_dict, dc->rx[id - 1].de);
+ dc->rx[id - 1].de = de;
+ }
+ if (de) {
+ data_ptr = stktable_data_ptr(table, ts, data_type);
+ if (data_ptr && !ignore) {
+ HA_ATOMIC_INC(&de->refcount);
+ stktable_data_cast(data_ptr, std_t_dict) = de;
+ }
+ }
+ break;
+ }
+ }
+ }
+
+ if (st->table->write_to.t && table != st->table->write_to.t) {
+ struct stktable_key stkey = { .key = ts->key.key, .key_len = keylen };
+
+ /* While we're still under the main ts lock, try to get related
+ * write_to stksess with main ts key
+ */
+ wts = stktable_get_entry(st->table->write_to.t, &stkey);
+ }
+
+ /* Force new expiration */
+ ts->expire = tick_add(now_ms, expire);
+
+ HA_RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
+ stktable_touch_remote(table, ts, 1);
+
+ if (wts) {
+ /* Start over the message decoding for wts as we got a valid stksess
+ * for write_to table, so we need to refresh the entry with supported
+ * values.
+ *
+ * We prefer to do the decoding a second time even though it might
+ * cost a bit more than copying from main ts to wts, but doing so
+ * enables us to get rid of main ts lock: we only need the wts lock
+ * since upstream data is still available in msg_cur
+ */
+ ts = wts;
+ table = st->table->write_to.t;
+ wts = NULL; /* so we don't get back here */
+ *msg_cur = msg_save;
+ goto update_wts;
+ }
+
+ ignore_msg:
+ TRACE_LEAVE(PEERS_EV_UPDTMSG, NULL, p);
+ return 1;
+
+ malformed_unlock:
+ /* malformed message */
+ HA_RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
+ stktable_touch_remote(st->table, ts, 1);
+ appctx->st0 = PEER_SESS_ST_ERRPROTO;
+ TRACE_DEVEL("leaving in error", PEERS_EV_UPDTMSG);
+ return 0;
+
+ malformed_free_newts:
+ /* malformed message */
+ stksess_free(st->table, newts);
+ malformed_exit:
+ appctx->st0 = PEER_SESS_ST_ERRPROTO;
+ TRACE_DEVEL("leaving in error", PEERS_EV_UPDTMSG);
+ return 0;
+}
+
+/*
+ * Function used to parse a stick-table update acknowledgement message after it
+ * has been received by <p> peer with <msg_cur> as address of the pointer to the position in the
+ * receipt buffer with <msg_end> being the position of the end of the stick-table message.
+ * Update <msg_curr> accordingly to the peer protocol specs if no peer protocol error
+ * was encountered.
+ * Return 1 if succeeded, 0 if not with the appctx state st0 set to PEER_SESS_ST_ERRPROTO.
+ */
+static inline int peer_treat_ackmsg(struct appctx *appctx, struct peer *p,
+ char **msg_cur, char *msg_end)
+{
+ /* ack message */
+ uint32_t table_id ;
+ uint32_t update;
+ struct shared_table *st;
+
+ /* ignore ack during teaching process */
+ if (p->flags & PEER_F_TEACH_PROCESS)
+ return 1;
+
+ table_id = intdecode(msg_cur, msg_end);
+ if (!*msg_cur || (*msg_cur + sizeof(update) > msg_end)) {
+ /* malformed message */
+
+ TRACE_PROTO("malformed message", PEERS_EV_ACKMSG,
+ NULL, p, *msg_cur);
+ appctx->st0 = PEER_SESS_ST_ERRPROTO;
+ return 0;
+ }
+
+ memcpy(&update, *msg_cur, sizeof(update));
+ update = ntohl(update);
+
+ for (st = p->tables; st; st = st->next) {
+ if (st->local_id == table_id) {
+ st->update = update;
+ break;
+ }
+ }
+
+ return 1;
+}
+
+/*
+ * Function used to parse a stick-table switch message after it has been received
+ * by <p> peer with <msg_cur> as address of the pointer to the position in the
+ * receipt buffer with <msg_end> being the position of the end of the stick-table message.
+ * Update <msg_curr> accordingly to the peer protocol specs if no peer protocol error
+ * was encountered.
+ * Return 1 if succeeded, 0 if not with the appctx state st0 set to PEER_SESS_ST_ERRPROTO.
+ */
+static inline int peer_treat_switchmsg(struct appctx *appctx, struct peer *p,
+ char **msg_cur, char *msg_end)
+{
+ struct shared_table *st;
+ int table_id;
+
+ table_id = intdecode(msg_cur, msg_end);
+ if (!*msg_cur) {
+ TRACE_PROTO("malformed message", PEERS_EV_SWTCMSG, NULL, p);
+ /* malformed message */
+ appctx->st0 = PEER_SESS_ST_ERRPROTO;
+ return 0;
+ }
+
+ p->remote_table = NULL;
+ for (st = p->tables; st; st = st->next) {
+ if (st->remote_id == table_id) {
+ p->remote_table = st;
+ break;
+ }
+ }
+
+ return 1;
+}
+
+/*
+ * Function used to parse a stick-table definition message after it has been received
+ * by <p> peer with <msg_cur> as address of the pointer to the position in the
+ * receipt buffer with <msg_end> being the position of the end of the stick-table message.
+ * Update <msg_curr> accordingly to the peer protocol specs if no peer protocol error
+ * was encountered.
+ * <totl> is the length of the stick-table update message computed upon receipt.
+ * Return 1 if succeeded, 0 if not with the appctx state st0 set to PEER_SESS_ST_ERRPROTO.
+ */
+static inline int peer_treat_definemsg(struct appctx *appctx, struct peer *p,
+ char **msg_cur, char *msg_end, int totl)
+{
+ int table_id_len;
+ struct shared_table *st;
+ int table_type;
+ int table_keylen;
+ int table_id;
+ uint64_t table_data;
+
+ table_id = intdecode(msg_cur, msg_end);
+ if (!*msg_cur) {
+ TRACE_PROTO("malformed message", PEERS_EV_DEFMSG, NULL, p);
+ goto malformed_exit;
+ }
+
+ table_id_len = intdecode(msg_cur, msg_end);
+ if (!*msg_cur) {
+ TRACE_PROTO("malformed message", PEERS_EV_DEFMSG, NULL, p, *msg_cur);
+ goto malformed_exit;
+ }
+
+ p->remote_table = NULL;
+ if (!table_id_len || (*msg_cur + table_id_len) >= msg_end) {
+ TRACE_PROTO("malformed message", PEERS_EV_DEFMSG, NULL, p, *msg_cur, &table_id_len);
+ goto malformed_exit;
+ }
+
+ for (st = p->tables; st; st = st->next) {
+ /* Reset IDs */
+ if (st->remote_id == table_id)
+ st->remote_id = 0;
+
+ if (!p->remote_table && (table_id_len == strlen(st->table->nid)) &&
+ (memcmp(st->table->nid, *msg_cur, table_id_len) == 0))
+ p->remote_table = st;
+ }
+
+ if (!p->remote_table) {
+ TRACE_PROTO("ignored message", PEERS_EV_DEFMSG, NULL, p);
+ goto ignore_msg;
+ }
+
+ *msg_cur += table_id_len;
+ if (*msg_cur >= msg_end) {
+ TRACE_PROTO("malformed message", PEERS_EV_DEFMSG, NULL, p);
+ goto malformed_exit;
+ }
+
+ table_type = intdecode(msg_cur, msg_end);
+ if (!*msg_cur) {
+ TRACE_PROTO("malformed message", PEERS_EV_DEFMSG, NULL, p);
+ goto malformed_exit;
+ }
+
+ table_keylen = intdecode(msg_cur, msg_end);
+ if (!*msg_cur) {
+ TRACE_PROTO("malformed message", PEERS_EV_DEFMSG, NULL, p);
+ goto malformed_exit;
+ }
+
+ table_data = intdecode(msg_cur, msg_end);
+ if (!*msg_cur) {
+ TRACE_PROTO("malformed message", PEERS_EV_DEFMSG, NULL, p);
+ goto malformed_exit;
+ }
+
+ if (p->remote_table->table->type != peer_int_key_type[table_type]
+ || p->remote_table->table->key_size != table_keylen) {
+ p->remote_table = NULL;
+ TRACE_PROTO("ignored message", PEERS_EV_DEFMSG, NULL, p);
+ goto ignore_msg;
+ }
+
+ /* Check if there there is the additional expire data */
+ intdecode(msg_cur, msg_end);
+ if (*msg_cur) {
+ uint64_t data_type;
+ uint64_t type;
+
+ /* This define contains the expire data so we consider
+ * it also contain all data_types parameters.
+ */
+ for (data_type = 0; data_type < STKTABLE_DATA_TYPES; data_type++) {
+ if (table_data & (1ULL << data_type)) {
+ if (stktable_data_types[data_type].is_array) {
+ /* This should be an array
+ * so we parse the data_type prefix
+ * because we must have parameters.
+ */
+ type = intdecode(msg_cur, msg_end);
+ if (!*msg_cur) {
+ p->remote_table = NULL;
+ TRACE_PROTO("missing meta data for array", PEERS_EV_DEFMSG, NULL, p);
+ goto ignore_msg;
+ }
+
+ /* check if the data_type match the current from the bitfield */
+ if (type != data_type) {
+ p->remote_table = NULL;
+ TRACE_PROTO("meta data mismatch type", PEERS_EV_DEFMSG, NULL, p);
+ goto ignore_msg;
+ }
+
+ /* decode the nbelem of the array */
+ p->remote_table->remote_data_nbelem[type] = intdecode(msg_cur, msg_end);
+ if (!*msg_cur) {
+ p->remote_table = NULL;
+ TRACE_PROTO("missing array size meta data for array", PEERS_EV_DEFMSG, NULL, p);
+ goto ignore_msg;
+ }
+
+ /* if it is an array of frqp, we must also have the period to decode */
+ if (stktable_data_types[data_type].std_type == STD_T_FRQP) {
+ intdecode(msg_cur, msg_end);
+ if (!*msg_cur) {
+ p->remote_table = NULL;
+ TRACE_PROTO("missing period for frqp", PEERS_EV_DEFMSG, NULL, p);
+ goto ignore_msg;
+ }
+ }
+ }
+ else if (stktable_data_types[data_type].std_type == STD_T_FRQP) {
+ /* This should be a std freq counter data_type
+ * so we parse the data_type prefix
+ * because we must have parameters.
+ */
+ type = intdecode(msg_cur, msg_end);
+ if (!*msg_cur) {
+ p->remote_table = NULL;
+ TRACE_PROTO("missing meta data for frqp", PEERS_EV_DEFMSG, NULL, p);
+ goto ignore_msg;
+ }
+
+ /* check if the data_type match the current from the bitfield */
+ if (type != data_type) {
+ p->remote_table = NULL;
+ TRACE_PROTO("meta data mismatch type", PEERS_EV_DEFMSG, NULL, p);
+ goto ignore_msg;
+ }
+
+ /* decode the period */
+ intdecode(msg_cur, msg_end);
+ if (!*msg_cur) {
+ p->remote_table = NULL;
+ TRACE_PROTO("missing period for frqp", PEERS_EV_DEFMSG, NULL, p);
+ goto ignore_msg;
+ }
+ }
+ }
+ }
+ }
+ else {
+ uint64_t data_type;
+
+ /* There is not additional data but
+ * array size parameter is mandatory to parse array
+ * so we consider an error if an array data_type is define
+ * but there is no additional data.
+ */
+ for (data_type = 0; data_type < STKTABLE_DATA_TYPES; data_type++) {
+ if (table_data & (1ULL << data_type)) {
+ if (stktable_data_types[data_type].is_array) {
+ p->remote_table = NULL;
+ TRACE_PROTO("missing array size meta data for array", PEERS_EV_DEFMSG, NULL, p);
+ goto ignore_msg;
+ }
+ }
+ }
+ }
+
+ p->remote_table->remote_data = table_data;
+ p->remote_table->remote_id = table_id;
+
+ ignore_msg:
+ return 1;
+
+ malformed_exit:
+ /* malformed message */
+ appctx->st0 = PEER_SESS_ST_ERRPROTO;
+ return 0;
+}
+
+/*
+ * Receive a stick-table message or pre-parse any other message.
+ * The message's header will be sent into <msg_head> which must be at least
+ * <msg_head_sz> bytes long (at least 7 to store 32-bit variable lengths).
+ * The first two bytes are always read, and the rest is only read if the
+ * first bytes indicate a stick-table message. If the message is a stick-table
+ * message, the varint is decoded and the equivalent number of bytes will be
+ * copied into the trash at trash.area. <totl> is incremented by the number of
+ * bytes read EVEN IN CASE OF INCOMPLETE MESSAGES.
+ * Returns 1 if there was no error, if not, returns 0 if not enough data were available,
+ * -1 if there was an error updating the appctx state st0 accordingly.
+ */
+static inline int peer_recv_msg(struct appctx *appctx, char *msg_head, size_t msg_head_sz,
+ uint32_t *msg_len, int *totl)
+{
+ int reql;
+ struct stconn *sc = appctx_sc(appctx);
+ char *cur;
+
+ reql = co_getblk(sc_oc(sc), msg_head, 2 * sizeof(char), *totl);
+ if (reql <= 0) /* closed or EOL not found */
+ goto incomplete;
+
+ *totl += reql;
+
+ if (!(msg_head[1] & PEER_MSG_STKT_BIT_MASK))
+ return 1;
+
+ /* This is a stick-table message, let's go on */
+
+ /* Read and Decode message length */
+ msg_head += *totl;
+ msg_head_sz -= *totl;
+ reql = co_data(sc_oc(sc)) - *totl;
+ if (reql > msg_head_sz)
+ reql = msg_head_sz;
+
+ reql = co_getblk(sc_oc(sc), msg_head, reql, *totl);
+ if (reql <= 0) /* closed */
+ goto incomplete;
+
+ cur = msg_head;
+ *msg_len = intdecode(&cur, cur + reql);
+ if (!cur) {
+ /* the number is truncated, did we read enough ? */
+ if (reql < msg_head_sz)
+ goto incomplete;
+
+ /* malformed message */
+ TRACE_PROTO("malformed message: too large length encoding", PEERS_EV_UPDTMSG);
+ appctx->st0 = PEER_SESS_ST_ERRPROTO;
+ return -1;
+ }
+ *totl += cur - msg_head;
+
+ /* Read message content */
+ if (*msg_len) {
+ if (*msg_len > trash.size) {
+ /* Status code is not success, abort */
+ appctx->st0 = PEER_SESS_ST_ERRSIZE;
+ return -1;
+ }
+
+ reql = co_getblk(sc_oc(sc), trash.area, *msg_len, *totl);
+ if (reql <= 0) /* closed */
+ goto incomplete;
+ *totl += reql;
+ }
+
+ return 1;
+
+ incomplete:
+ if (reql < 0 || (sc->flags & (SC_FL_SHUT_DONE|SC_FL_SHUT_WANTED))) {
+ /* there was an error or the message was truncated */
+ appctx->st0 = PEER_SESS_ST_END;
+ return -1;
+ }
+
+ return 0;
+}
+
+/*
+ * Treat the awaited message with <msg_head> as header.*
+ * Return 1 if succeeded, 0 if not.
+ */
+static inline int peer_treat_awaited_msg(struct appctx *appctx, struct peer *peer, unsigned char *msg_head,
+ char **msg_cur, char *msg_end, int msg_len, int totl)
+{
+ struct peers *peers = peer->peers;
+
+ if (msg_head[0] == PEER_MSG_CLASS_CONTROL) {
+ if (msg_head[1] == PEER_MSG_CTRL_RESYNCREQ) {
+ struct shared_table *st;
+ /* Reset message: remote need resync */
+
+ TRACE_PROTO("received control message", PEERS_EV_CTRLMSG,
+ NULL, &msg_head[1], peers->local->id, peer->id);
+ /* prepare tables for a global push */
+ for (st = peer->tables; st; st = st->next) {
+ st->teaching_origin = st->last_pushed = st->update;
+ st->flags = 0;
+ }
+
+ /* reset teaching flags to 0 */
+ peer->flags &= PEER_TEACH_RESET;
+
+ /* flag to start to teach lesson */
+ peer->flags |= PEER_F_TEACH_PROCESS;
+ peers->flags |= PEERS_F_RESYNC_REQUESTED;
+ }
+ else if (msg_head[1] == PEER_MSG_CTRL_RESYNCFINISHED) {
+ TRACE_PROTO("received control message", PEERS_EV_CTRLMSG,
+ NULL, &msg_head[1], peers->local->id, peer->id);
+ if (peer->flags & PEER_F_LEARN_ASSIGN) {
+ int commit_a_finish = 1;
+
+ peer->flags &= ~PEER_F_LEARN_ASSIGN;
+ peers->flags &= ~(PEERS_F_RESYNC_ASSIGN|PEERS_F_RESYNC_PROCESS);
+ if (peer->srv->shard) {
+ struct peer *ps;
+
+ peers->flags |= PEERS_F_RESYNC_REMOTEPARTIAL;
+ peer->flags |= PEER_F_LEARN_NOTUP2DATE;
+ for (ps = peers->remote; ps; ps = ps->next) {
+ if (ps->srv->shard == peer->srv->shard) {
+ /* flag all peers from same shard
+ * notup2date to disable request
+ * of a resync frm them
+ */
+ ps->flags |= PEER_F_LEARN_NOTUP2DATE;
+ }
+ else if (ps->srv->shard && !(ps->flags & PEER_F_LEARN_NOTUP2DATE)) {
+ /* it remains some other shards not requested
+ * we don't commit a resync finish to request
+ * the other shards
+ */
+ commit_a_finish = 0;
+ }
+ }
+
+ if (!commit_a_finish) {
+ /* it remains some shard to request, we schedule a new request
+ */
+ peers->resync_timeout = tick_add(now_ms, MS_TO_TICKS(PEER_RESYNC_TIMEOUT));
+ task_wakeup(peers->sync_task, TASK_WOKEN_MSG);
+ }
+ }
+
+ if (commit_a_finish) {
+ peers->flags |= (PEERS_F_RESYNC_LOCAL|PEERS_F_RESYNC_REMOTE);
+ if (peer->local)
+ peers->flags |= PEERS_F_RESYNC_LOCALFINISHED;
+ else
+ peers->flags |= PEERS_F_RESYNC_REMOTEFINISHED;
+ }
+ }
+ peer->confirm++;
+ }
+ else if (msg_head[1] == PEER_MSG_CTRL_RESYNCPARTIAL) {
+ TRACE_PROTO("received control message", PEERS_EV_CTRLMSG,
+ NULL, &msg_head[1], peers->local->id, peer->id);
+ if (peer->flags & PEER_F_LEARN_ASSIGN) {
+ peer->flags &= ~PEER_F_LEARN_ASSIGN;
+ peers->flags &= ~(PEERS_F_RESYNC_ASSIGN|PEERS_F_RESYNC_PROCESS);
+
+ if (peer->local)
+ peers->flags |= PEERS_F_RESYNC_LOCALPARTIAL;
+ else
+ peers->flags |= PEERS_F_RESYNC_REMOTEPARTIAL;
+ peer->flags |= PEER_F_LEARN_NOTUP2DATE;
+ peers->resync_timeout = tick_add(now_ms, MS_TO_TICKS(PEER_RESYNC_TIMEOUT));
+ task_wakeup(peers->sync_task, TASK_WOKEN_MSG);
+ }
+ peer->confirm++;
+ }
+ else if (msg_head[1] == PEER_MSG_CTRL_RESYNCCONFIRM) {
+ struct shared_table *st;
+
+ TRACE_PROTO("received control message", PEERS_EV_CTRLMSG,
+ NULL, &msg_head[1], peers->local->id, peer->id);
+ /* If stopping state */
+ if (stopping) {
+ /* Close session, push resync no more needed */
+ peer->flags |= PEER_F_TEACH_COMPLETE;
+ appctx->st0 = PEER_SESS_ST_END;
+ return 0;
+ }
+ for (st = peer->tables; st; st = st->next) {
+ st->update = st->last_pushed = st->teaching_origin;
+ st->flags = 0;
+ }
+
+ /* reset teaching flags to 0 */
+ peer->flags &= PEER_TEACH_RESET;
+ }
+ else if (msg_head[1] == PEER_MSG_CTRL_HEARTBEAT) {
+ TRACE_PROTO("received control message", PEERS_EV_CTRLMSG,
+ NULL, &msg_head[1], peers->local->id, peer->id);
+ peer->reconnect = tick_add(now_ms, MS_TO_TICKS(PEER_RECONNECT_TIMEOUT));
+ peer->rx_hbt++;
+ }
+ }
+ else if (msg_head[0] == PEER_MSG_CLASS_STICKTABLE) {
+ if (msg_head[1] == PEER_MSG_STKT_DEFINE) {
+ if (!peer_treat_definemsg(appctx, peer, msg_cur, msg_end, totl))
+ return 0;
+ }
+ else if (msg_head[1] == PEER_MSG_STKT_SWITCH) {
+ if (!peer_treat_switchmsg(appctx, peer, msg_cur, msg_end))
+ return 0;
+ }
+ else if (msg_head[1] == PEER_MSG_STKT_UPDATE ||
+ msg_head[1] == PEER_MSG_STKT_INCUPDATE ||
+ msg_head[1] == PEER_MSG_STKT_UPDATE_TIMED ||
+ msg_head[1] == PEER_MSG_STKT_INCUPDATE_TIMED) {
+ int update, expire;
+
+ update = msg_head[1] == PEER_MSG_STKT_UPDATE || msg_head[1] == PEER_MSG_STKT_UPDATE_TIMED;
+ expire = msg_head[1] == PEER_MSG_STKT_UPDATE_TIMED || msg_head[1] == PEER_MSG_STKT_INCUPDATE_TIMED;
+ if (!peer_treat_updatemsg(appctx, peer, update, expire,
+ msg_cur, msg_end, msg_len, totl))
+ return 0;
+
+ }
+ else if (msg_head[1] == PEER_MSG_STKT_ACK) {
+ if (!peer_treat_ackmsg(appctx, peer, msg_cur, msg_end))
+ return 0;
+ }
+ }
+ else if (msg_head[0] == PEER_MSG_CLASS_RESERVED) {
+ appctx->st0 = PEER_SESS_ST_ERRPROTO;
+ return 0;
+ }
+
+ return 1;
+}
+
+
+/*
+ * Send any message to <peer> peer.
+ * Returns 1 if succeeded, or -1 or 0 if failed.
+ * -1 means an internal error occurred, 0 is for a peer protocol error leading
+ * to a peer state change (from the peer I/O handler point of view).
+ *
+ * - peer->last_local_table is the last table for which we send an update
+ * messages.
+ *
+ * - peer->stop_local_table is the last evaluated table. It is unset when the
+ * teaching process starts. But we use it as a
+ * restart point when the loop is interrupted. It is
+ * especially useful when the number of tables exceeds
+ * peers_max_updates_at_once value.
+ *
+ * When a teaching lopp is started, the peer's last_local_table is saved in a
+ * local variable. This variable is used as a finish point. When the crrent
+ * table is equal to it, it means all tables were evaluated, all updates where
+ * sent and the teaching process is finished.
+ *
+ * peer->stop_local_table is always NULL when the teaching process begins. It is
+ * only reset at the end. In the mean time, it always point on a table.
+ */
+
+static inline int peer_send_msgs(struct appctx *appctx,
+ struct peer *peer, struct peers *peers)
+{
+ int repl;
+
+ /* Need to request a resync */
+ if ((peer->flags & PEER_F_LEARN_ASSIGN) &&
+ (peers->flags & PEERS_F_RESYNC_ASSIGN) &&
+ !(peers->flags & PEERS_F_RESYNC_PROCESS)) {
+
+ repl = peer_send_resync_reqmsg(appctx, peer, peers);
+ if (repl <= 0)
+ return repl;
+
+ peers->flags |= PEERS_F_RESYNC_PROCESS;
+ }
+
+ /* Nothing to read, now we start to write */
+ if (peer->tables) {
+ struct shared_table *st;
+ struct shared_table *last_local_table;
+ int updates = 0;
+
+ last_local_table = peer->last_local_table;
+ if (!last_local_table)
+ last_local_table = peer->tables;
+ if (!peer->stop_local_table)
+ peer->stop_local_table = last_local_table;
+ st = peer->stop_local_table->next;
+
+ while (1) {
+ if (!st)
+ st = peer->tables;
+ /* It remains some updates to ack */
+ if (st->last_get != st->last_acked) {
+ repl = peer_send_ackmsg(st, appctx);
+ if (repl <= 0)
+ return repl;
+
+ st->last_acked = st->last_get;
+ }
+
+ if (!(peer->flags & PEER_F_TEACH_PROCESS)) {
+ HA_RWLOCK_WRLOCK(STK_TABLE_LOCK, &st->table->lock);
+ if (!(peer->flags & PEER_F_LEARN_ASSIGN) &&
+ (st->last_pushed != st->table->localupdate)) {
+
+ repl = peer_send_teach_process_msgs(appctx, peer, st);
+ if (repl <= 0) {
+ HA_RWLOCK_WRUNLOCK(STK_TABLE_LOCK, &st->table->lock);
+ peer->stop_local_table = peer->last_local_table;
+ return repl;
+ }
+ }
+ HA_RWLOCK_WRUNLOCK(STK_TABLE_LOCK, &st->table->lock);
+ }
+ else if (!(peer->flags & PEER_F_TEACH_FINISHED)) {
+ if (!(st->flags & SHTABLE_F_TEACH_STAGE1)) {
+ repl = peer_send_teach_stage1_msgs(appctx, peer, st);
+ if (repl <= 0) {
+ peer->stop_local_table = peer->last_local_table;
+ return repl;
+ }
+ }
+
+ if (!(st->flags & SHTABLE_F_TEACH_STAGE2)) {
+ repl = peer_send_teach_stage2_msgs(appctx, peer, st);
+ if (repl <= 0) {
+ peer->stop_local_table = peer->last_local_table;
+ return repl;
+ }
+ }
+ }
+
+ if (st == last_local_table) {
+ peer->stop_local_table = NULL;
+ break;
+ }
+
+ /* This one is to be sure to restart from <st->next> if we are interrupted
+ * because of peer_send_teach_stage2_msgs or because buffer is full
+ * when sedning an ackmsg. In both cases current <st> was evaluated and
+ * we must restart from <st->next>
+ */
+ peer->stop_local_table = st;
+
+ updates++;
+ if (updates >= peers_max_updates_at_once) {
+ /* pretend we're full so that we get back ASAP */
+ struct stconn *sc = appctx_sc(appctx);
+
+ sc_need_room(sc, 0);
+ return -1;
+ }
+
+ st = st->next;
+ }
+ }
+
+ if ((peer->flags & PEER_F_TEACH_PROCESS) && !(peer->flags & PEER_F_TEACH_FINISHED)) {
+ repl = peer_send_resync_finishedmsg(appctx, peer, peers);
+ if (repl <= 0)
+ return repl;
+
+ /* flag finished message sent */
+ peer->flags |= PEER_F_TEACH_FINISHED;
+ }
+
+ /* Confirm finished or partial messages */
+ while (peer->confirm) {
+ repl = peer_send_resync_confirmsg(appctx, peer, peers);
+ if (repl <= 0)
+ return repl;
+
+ peer->confirm--;
+ }
+
+ return 1;
+}
+
+/*
+ * Read and parse a first line of a "hello" peer protocol message.
+ * Returns 0 if could not read a line, -1 if there was a read error or
+ * the line is malformed, 1 if succeeded.
+ */
+static inline int peer_getline_version(struct appctx *appctx,
+ unsigned int *maj_ver, unsigned int *min_ver)
+{
+ int reql;
+
+ reql = peer_getline(appctx);
+ if (!reql)
+ return 0;
+
+ if (reql < 0)
+ return -1;
+
+ /* test protocol */
+ if (strncmp(PEER_SESSION_PROTO_NAME " ", trash.area, proto_len + 1) != 0) {
+ appctx->st0 = PEER_SESS_ST_EXIT;
+ appctx->st1 = PEER_SESS_SC_ERRPROTO;
+ return -1;
+ }
+ if (peer_get_version(trash.area + proto_len + 1, maj_ver, min_ver) == -1 ||
+ *maj_ver != PEER_MAJOR_VER || *min_ver > PEER_MINOR_VER) {
+ appctx->st0 = PEER_SESS_ST_EXIT;
+ appctx->st1 = PEER_SESS_SC_ERRVERSION;
+ return -1;
+ }
+
+ return 1;
+}
+
+/*
+ * Read and parse a second line of a "hello" peer protocol message.
+ * Returns 0 if could not read a line, -1 if there was a read error or
+ * the line is malformed, 1 if succeeded.
+ */
+static inline int peer_getline_host(struct appctx *appctx)
+{
+ int reql;
+
+ reql = peer_getline(appctx);
+ if (!reql)
+ return 0;
+
+ if (reql < 0)
+ return -1;
+
+ /* test hostname match */
+ if (strcmp(localpeer, trash.area) != 0) {
+ appctx->st0 = PEER_SESS_ST_EXIT;
+ appctx->st1 = PEER_SESS_SC_ERRHOST;
+ return -1;
+ }
+
+ return 1;
+}
+
+/*
+ * Read and parse a last line of a "hello" peer protocol message.
+ * Returns 0 if could not read a character, -1 if there was a read error or
+ * the line is malformed, 1 if succeeded.
+ * Set <curpeer> accordingly (the remote peer sending the "hello" message).
+ */
+static inline int peer_getline_last(struct appctx *appctx, struct peer **curpeer)
+{
+ char *p;
+ int reql;
+ struct peer *peer;
+ struct stream *s = appctx_strm(appctx);
+ struct peers *peers = strm_fe(s)->parent;
+
+ reql = peer_getline(appctx);
+ if (!reql)
+ return 0;
+
+ if (reql < 0)
+ return -1;
+
+ /* parse line "<peer name> <pid> <relative_pid>" */
+ p = strchr(trash.area, ' ');
+ if (!p) {
+ appctx->st0 = PEER_SESS_ST_EXIT;
+ appctx->st1 = PEER_SESS_SC_ERRPROTO;
+ return -1;
+ }
+ *p = 0;
+
+ /* lookup known peer */
+ for (peer = peers->remote; peer; peer = peer->next) {
+ if (strcmp(peer->id, trash.area) == 0)
+ break;
+ }
+
+ /* if unknown peer */
+ if (!peer) {
+ appctx->st0 = PEER_SESS_ST_EXIT;
+ appctx->st1 = PEER_SESS_SC_ERRPEER;
+ return -1;
+ }
+ *curpeer = peer;
+
+ return 1;
+}
+
+/*
+ * Init <peer> peer after having accepted it at peer protocol level.
+ */
+static inline void init_accepted_peer(struct peer *peer, struct peers *peers)
+{
+ struct shared_table *st;
+
+ peer->heartbeat = tick_add(now_ms, MS_TO_TICKS(PEER_HEARTBEAT_TIMEOUT));
+ /* Register status code */
+ peer->statuscode = PEER_SESS_SC_SUCCESSCODE;
+ peer->last_hdshk = now_ms;
+
+ /* Awake main task */
+ task_wakeup(peers->sync_task, TASK_WOKEN_MSG);
+
+ /* Init confirm counter */
+ peer->confirm = 0;
+
+ /* Init cursors */
+ for (st = peer->tables; st ; st = st->next) {
+ uint commitid, updateid;
+
+ st->last_get = st->last_acked = 0;
+ HA_RWLOCK_WRLOCK(STK_TABLE_LOCK, &st->table->lock);
+ /* if st->update appears to be in future it means
+ * that the last acked value is very old and we
+ * remain unconnected a too long time to use this
+ * acknowledgement as a reset.
+ * We should update the protocol to be able to
+ * signal the remote peer that it needs a full resync.
+ * Here a partial fix consist to set st->update at
+ * the max past value
+ */
+ if ((int)(st->table->localupdate - st->update) < 0)
+ st->update = st->table->localupdate + (2147483648U);
+ st->teaching_origin = st->last_pushed = st->update;
+ st->flags = 0;
+
+ updateid = st->last_pushed;
+ commitid = _HA_ATOMIC_LOAD(&st->table->commitupdate);
+
+ while ((int)(updateid - commitid) > 0) {
+ if (_HA_ATOMIC_CAS(&st->table->commitupdate, &commitid, updateid))
+ break;
+ __ha_cpu_relax();
+ }
+
+ HA_RWLOCK_WRUNLOCK(STK_TABLE_LOCK, &st->table->lock);
+ }
+
+ /* reset teaching and learning flags to 0 */
+ peer->flags &= PEER_TEACH_RESET;
+ peer->flags &= PEER_LEARN_RESET;
+
+ /* if current peer is local */
+ if (peer->local) {
+ /* if current host need resyncfrom local and no process assigned */
+ if ((peers->flags & PEERS_RESYNC_STATEMASK) == PEERS_RESYNC_FROMLOCAL &&
+ !(peers->flags & PEERS_F_RESYNC_ASSIGN)) {
+ /* assign local peer for a lesson, consider lesson already requested */
+ peer->flags |= PEER_F_LEARN_ASSIGN;
+ peers->flags |= (PEERS_F_RESYNC_ASSIGN|PEERS_F_RESYNC_PROCESS);
+ peers->flags |= PEERS_F_RESYNC_LOCALASSIGN;
+ }
+
+ }
+ else if ((peers->flags & PEERS_RESYNC_STATEMASK) == PEERS_RESYNC_FROMREMOTE &&
+ !(peers->flags & PEERS_F_RESYNC_ASSIGN)) {
+ /* assign peer for a lesson */
+ peer->flags |= PEER_F_LEARN_ASSIGN;
+ peers->flags |= PEERS_F_RESYNC_ASSIGN;
+ peers->flags |= PEERS_F_RESYNC_REMOTEASSIGN;
+ }
+}
+
+/*
+ * Init <peer> peer after having connected it at peer protocol level.
+ */
+static inline void init_connected_peer(struct peer *peer, struct peers *peers)
+{
+ struct shared_table *st;
+
+ peer->heartbeat = tick_add(now_ms, MS_TO_TICKS(PEER_HEARTBEAT_TIMEOUT));
+ /* Init cursors */
+ for (st = peer->tables; st ; st = st->next) {
+ uint updateid, commitid;
+
+ st->last_get = st->last_acked = 0;
+ HA_RWLOCK_WRLOCK(STK_TABLE_LOCK, &st->table->lock);
+ /* if st->update appears to be in future it means
+ * that the last acked value is very old and we
+ * remain unconnected a too long time to use this
+ * acknowledgement as a reset.
+ * We should update the protocol to be able to
+ * signal the remote peer that it needs a full resync.
+ * Here a partial fix consist to set st->update at
+ * the max past value.
+ */
+ if ((int)(st->table->localupdate - st->update) < 0)
+ st->update = st->table->localupdate + (2147483648U);
+ st->teaching_origin = st->last_pushed = st->update;
+ st->flags = 0;
+
+ updateid = st->last_pushed;
+ commitid = _HA_ATOMIC_LOAD(&st->table->commitupdate);
+
+ while ((int)(updateid - commitid) > 0) {
+ if (_HA_ATOMIC_CAS(&st->table->commitupdate, &commitid, updateid))
+ break;
+ __ha_cpu_relax();
+ }
+
+ HA_RWLOCK_WRUNLOCK(STK_TABLE_LOCK, &st->table->lock);
+ }
+
+ /* Init confirm counter */
+ peer->confirm = 0;
+
+ /* reset teaching and learning flags to 0 */
+ peer->flags &= PEER_TEACH_RESET;
+ peer->flags &= PEER_LEARN_RESET;
+
+ /* If current peer is local */
+ if (peer->local) {
+ /* flag to start to teach lesson */
+ peer->flags |= PEER_F_TEACH_PROCESS;
+ }
+ else if ((peers->flags & PEERS_RESYNC_STATEMASK) == PEERS_RESYNC_FROMREMOTE &&
+ !(peers->flags & PEERS_F_RESYNC_ASSIGN)) {
+ /* If peer is remote and resync from remote is needed,
+ and no peer currently assigned */
+
+ /* assign peer for a lesson */
+ peer->flags |= PEER_F_LEARN_ASSIGN;
+ peers->flags |= PEERS_F_RESYNC_ASSIGN;
+ peers->flags |= PEERS_F_RESYNC_REMOTEASSIGN;
+ }
+}
+
+/*
+ * IO Handler to handle message exchange with a peer
+ */
+static void peer_io_handler(struct appctx *appctx)
+{
+ struct stconn *sc = appctx_sc(appctx);
+ struct stream *s = __sc_strm(sc);
+ struct peers *curpeers = strm_fe(s)->parent;
+ struct peer *curpeer = NULL;
+ int reql = 0;
+ int repl = 0;
+ unsigned int maj_ver, min_ver;
+ int prev_state;
+
+ if (unlikely(se_fl_test(appctx->sedesc, (SE_FL_EOS|SE_FL_ERROR|SE_FL_SHR|SE_FL_SHW)))) {
+ co_skip(sc_oc(sc), co_data(sc_oc(sc)));
+ goto out;
+ }
+
+ /* Check if the input buffer is available. */
+ if (sc_ib(sc)->size == 0) {
+ sc_need_room(sc, 0);
+ goto out;
+ }
+
+ while (1) {
+ prev_state = appctx->st0;
+switchstate:
+ maj_ver = min_ver = (unsigned int)-1;
+ switch(appctx->st0) {
+ case PEER_SESS_ST_ACCEPT:
+ prev_state = appctx->st0;
+ appctx->svcctx = NULL;
+ appctx->st0 = PEER_SESS_ST_GETVERSION;
+ __fallthrough;
+ case PEER_SESS_ST_GETVERSION:
+ prev_state = appctx->st0;
+ reql = peer_getline_version(appctx, &maj_ver, &min_ver);
+ if (reql <= 0) {
+ if (!reql)
+ goto out;
+ goto switchstate;
+ }
+
+ appctx->st0 = PEER_SESS_ST_GETHOST;
+ __fallthrough;
+ case PEER_SESS_ST_GETHOST:
+ prev_state = appctx->st0;
+ reql = peer_getline_host(appctx);
+ if (reql <= 0) {
+ if (!reql)
+ goto out;
+ goto switchstate;
+ }
+
+ appctx->st0 = PEER_SESS_ST_GETPEER;
+ __fallthrough;
+ case PEER_SESS_ST_GETPEER: {
+ prev_state = appctx->st0;
+ reql = peer_getline_last(appctx, &curpeer);
+ if (reql <= 0) {
+ if (!reql)
+ goto out;
+ goto switchstate;
+ }
+
+ HA_SPIN_LOCK(PEER_LOCK, &curpeer->lock);
+ if (curpeer->appctx && curpeer->appctx != appctx) {
+ if (curpeer->local) {
+ /* Local connection, reply a retry */
+ appctx->st0 = PEER_SESS_ST_EXIT;
+ appctx->st1 = PEER_SESS_SC_TRYAGAIN;
+ goto switchstate;
+ }
+
+ /* we're killing a connection, we must apply a random delay before
+ * retrying otherwise the other end will do the same and we can loop
+ * for a while.
+ */
+ curpeer->reconnect = tick_add(now_ms, MS_TO_TICKS(50 + ha_random() % 2000));
+ peer_session_forceshutdown(curpeer);
+ curpeer->heartbeat = TICK_ETERNITY;
+ curpeer->coll++;
+ }
+ if (maj_ver != (unsigned int)-1 && min_ver != (unsigned int)-1) {
+ if (min_ver == PEER_DWNGRD_MINOR_VER) {
+ curpeer->flags |= PEER_F_DWNGRD;
+ }
+ else {
+ curpeer->flags &= ~PEER_F_DWNGRD;
+ }
+ }
+ curpeer->appctx = appctx;
+ curpeer->flags |= PEER_F_ALIVE;
+ appctx->svcctx = curpeer;
+ appctx->st0 = PEER_SESS_ST_SENDSUCCESS;
+ _HA_ATOMIC_INC(&active_peers);
+ }
+ __fallthrough;
+ case PEER_SESS_ST_SENDSUCCESS: {
+ prev_state = appctx->st0;
+ if (!curpeer) {
+ curpeer = appctx->svcctx;
+ HA_SPIN_LOCK(PEER_LOCK, &curpeer->lock);
+ if (curpeer->appctx != appctx) {
+ appctx->st0 = PEER_SESS_ST_END;
+ goto switchstate;
+ }
+ }
+
+ repl = peer_send_status_successmsg(appctx);
+ if (repl <= 0) {
+ if (repl == -1)
+ goto out;
+ goto switchstate;
+ }
+
+ init_accepted_peer(curpeer, curpeers);
+
+ /* switch to waiting message state */
+ _HA_ATOMIC_INC(&connected_peers);
+ appctx->st0 = PEER_SESS_ST_WAITMSG;
+ goto switchstate;
+ }
+ case PEER_SESS_ST_CONNECT: {
+ prev_state = appctx->st0;
+ if (!curpeer) {
+ curpeer = appctx->svcctx;
+ HA_SPIN_LOCK(PEER_LOCK, &curpeer->lock);
+ if (curpeer->appctx != appctx) {
+ appctx->st0 = PEER_SESS_ST_END;
+ goto switchstate;
+ }
+ }
+
+ repl = peer_send_hellomsg(appctx, curpeer);
+ if (repl <= 0) {
+ if (repl == -1)
+ goto out;
+ goto switchstate;
+ }
+
+ /* switch to the waiting statuscode state */
+ appctx->st0 = PEER_SESS_ST_GETSTATUS;
+ }
+ __fallthrough;
+ case PEER_SESS_ST_GETSTATUS: {
+ prev_state = appctx->st0;
+ if (!curpeer) {
+ curpeer = appctx->svcctx;
+ HA_SPIN_LOCK(PEER_LOCK, &curpeer->lock);
+ if (curpeer->appctx != appctx) {
+ appctx->st0 = PEER_SESS_ST_END;
+ goto switchstate;
+ }
+ }
+
+ if (sc_ic(sc)->flags & CF_WROTE_DATA)
+ curpeer->statuscode = PEER_SESS_SC_CONNECTEDCODE;
+
+ reql = peer_getline(appctx);
+ if (!reql)
+ goto out;
+
+ if (reql < 0)
+ goto switchstate;
+
+ /* Register status code */
+ curpeer->statuscode = atoi(trash.area);
+ curpeer->last_hdshk = now_ms;
+
+ /* Awake main task */
+ task_wakeup(curpeers->sync_task, TASK_WOKEN_MSG);
+
+ /* If status code is success */
+ if (curpeer->statuscode == PEER_SESS_SC_SUCCESSCODE) {
+ init_connected_peer(curpeer, curpeers);
+ }
+ else {
+ if (curpeer->statuscode == PEER_SESS_SC_ERRVERSION)
+ curpeer->flags |= PEER_F_DWNGRD;
+ /* Status code is not success, abort */
+ appctx->st0 = PEER_SESS_ST_END;
+ goto switchstate;
+ }
+ _HA_ATOMIC_INC(&connected_peers);
+ appctx->st0 = PEER_SESS_ST_WAITMSG;
+ }
+ __fallthrough;
+ case PEER_SESS_ST_WAITMSG: {
+ uint32_t msg_len = 0;
+ char *msg_cur = trash.area;
+ char *msg_end = trash.area;
+ unsigned char msg_head[7]; // 2 + 5 for varint32
+ int totl = 0;
+
+ prev_state = appctx->st0;
+ if (!curpeer) {
+ curpeer = appctx->svcctx;
+ HA_SPIN_LOCK(PEER_LOCK, &curpeer->lock);
+ if (curpeer->appctx != appctx) {
+ appctx->st0 = PEER_SESS_ST_END;
+ goto switchstate;
+ }
+ }
+
+ reql = peer_recv_msg(appctx, (char *)msg_head, sizeof msg_head, &msg_len, &totl);
+ if (reql <= 0) {
+ if (reql == -1)
+ goto switchstate;
+ goto send_msgs;
+ }
+
+ msg_end += msg_len;
+ if (!peer_treat_awaited_msg(appctx, curpeer, msg_head, &msg_cur, msg_end, msg_len, totl))
+ goto switchstate;
+
+ curpeer->flags |= PEER_F_ALIVE;
+
+ /* skip consumed message */
+ co_skip(sc_oc(sc), totl);
+ /* loop on that state to peek next message */
+ goto switchstate;
+
+send_msgs:
+ if (curpeer->flags & PEER_F_HEARTBEAT) {
+ curpeer->flags &= ~PEER_F_HEARTBEAT;
+ repl = peer_send_heartbeatmsg(appctx, curpeer, curpeers);
+ if (repl <= 0) {
+ if (repl == -1)
+ goto out;
+ goto switchstate;
+ }
+ curpeer->tx_hbt++;
+ }
+ /* we get here when a peer_recv_msg() returns 0 in reql */
+ repl = peer_send_msgs(appctx, curpeer, curpeers);
+ if (repl <= 0) {
+ if (repl == -1)
+ goto out;
+ goto switchstate;
+ }
+
+ /* noting more to do */
+ goto out;
+ }
+ case PEER_SESS_ST_EXIT:
+ if (prev_state == PEER_SESS_ST_WAITMSG)
+ _HA_ATOMIC_DEC(&connected_peers);
+ prev_state = appctx->st0;
+ if (peer_send_status_errormsg(appctx) == -1)
+ goto out;
+ appctx->st0 = PEER_SESS_ST_END;
+ goto switchstate;
+ case PEER_SESS_ST_ERRSIZE: {
+ if (prev_state == PEER_SESS_ST_WAITMSG)
+ _HA_ATOMIC_DEC(&connected_peers);
+ prev_state = appctx->st0;
+ if (peer_send_error_size_limitmsg(appctx) == -1)
+ goto out;
+ appctx->st0 = PEER_SESS_ST_END;
+ goto switchstate;
+ }
+ case PEER_SESS_ST_ERRPROTO: {
+ TRACE_PROTO("protocol error", PEERS_EV_PROTOERR,
+ NULL, curpeer, &prev_state);
+ if (curpeer)
+ curpeer->proto_err++;
+ if (prev_state == PEER_SESS_ST_WAITMSG)
+ _HA_ATOMIC_DEC(&connected_peers);
+ prev_state = appctx->st0;
+ if (peer_send_error_protomsg(appctx) == -1) {
+ TRACE_PROTO("could not send error message", PEERS_EV_PROTOERR);
+ goto out;
+ }
+ appctx->st0 = PEER_SESS_ST_END;
+ prev_state = appctx->st0;
+ }
+ __fallthrough;
+ case PEER_SESS_ST_END: {
+ if (prev_state == PEER_SESS_ST_WAITMSG)
+ _HA_ATOMIC_DEC(&connected_peers);
+ prev_state = appctx->st0;
+ if (curpeer) {
+ HA_SPIN_UNLOCK(PEER_LOCK, &curpeer->lock);
+ curpeer = NULL;
+ }
+ se_fl_set(appctx->sedesc, SE_FL_EOS|SE_FL_EOI);
+ co_skip(sc_oc(sc), co_data(sc_oc(sc)));
+ goto out;
+ }
+ }
+ }
+out:
+ sc_opposite(sc)->flags |= SC_FL_RCV_ONCE;
+
+ if (curpeer)
+ HA_SPIN_UNLOCK(PEER_LOCK, &curpeer->lock);
+ return;
+}
+
+static struct applet peer_applet = {
+ .obj_type = OBJ_TYPE_APPLET,
+ .name = "<PEER>", /* used for logging */
+ .fct = peer_io_handler,
+ .init = peer_session_init,
+ .release = peer_session_release,
+};
+
+
+/*
+ * Use this function to force a close of a peer session
+ */
+static void peer_session_forceshutdown(struct peer *peer)
+{
+ struct appctx *appctx = peer->appctx;
+
+ /* Note that the peer sessions which have just been created
+ * (->st0 == PEER_SESS_ST_CONNECT) must not
+ * be shutdown, if not, the TCP session will never be closed
+ * and stay in CLOSE_WAIT state after having been closed by
+ * the remote side.
+ */
+ if (!appctx || appctx->st0 == PEER_SESS_ST_CONNECT)
+ return;
+
+ if (appctx->applet != &peer_applet)
+ return;
+
+ __peer_session_deinit(peer);
+
+ appctx->st0 = PEER_SESS_ST_END;
+ appctx_wakeup(appctx);
+}
+
+/* Pre-configures a peers frontend to accept incoming connections */
+void peers_setup_frontend(struct proxy *fe)
+{
+ fe->last_change = ns_to_sec(now_ns);
+ fe->cap = PR_CAP_FE | PR_CAP_BE;
+ fe->mode = PR_MODE_PEERS;
+ fe->maxconn = 0;
+ fe->conn_retries = CONN_RETRIES;
+ fe->timeout.connect = MS_TO_TICKS(1000);
+ fe->timeout.client = MS_TO_TICKS(5000);
+ fe->timeout.server = MS_TO_TICKS(5000);
+ fe->accept = frontend_accept;
+ fe->default_target = &peer_applet.obj_type;
+ fe->options2 |= PR_O2_INDEPSTR | PR_O2_SMARTCON | PR_O2_SMARTACC;
+}
+
+/*
+ * Create a new peer session in assigned state (connect will start automatically)
+ */
+static struct appctx *peer_session_create(struct peers *peers, struct peer *peer)
+{
+ struct appctx *appctx;
+ unsigned int thr = 0;
+ int idx;
+
+ peer->new_conn++;
+ peer->reconnect = tick_add(now_ms, (stopping ? MS_TO_TICKS(PEER_LOCAL_RECONNECT_TIMEOUT) : MS_TO_TICKS(PEER_RECONNECT_TIMEOUT)));
+ peer->heartbeat = TICK_ETERNITY;
+ peer->statuscode = PEER_SESS_SC_CONNECTCODE;
+ peer->last_hdshk = now_ms;
+
+ for (idx = 0; idx < global.nbthread; idx++)
+ thr = peers->applet_count[idx] < peers->applet_count[thr] ? idx : thr;
+ appctx = appctx_new_on(&peer_applet, NULL, thr);
+ if (!appctx)
+ goto out_close;
+ appctx->svcctx = (void *)peer;
+
+ appctx->st0 = PEER_SESS_ST_CONNECT;
+ peer->appctx = appctx;
+
+ HA_ATOMIC_INC(&peers->applet_count[thr]);
+ appctx_wakeup(appctx);
+ return appctx;
+
+ out_close:
+ return NULL;
+}
+
+/*
+ * Task processing function to manage re-connect, peer session
+ * tasks wakeup on local update and heartbeat. Let's keep it exported so that it
+ * resolves in stack traces and "show tasks".
+ */
+struct task *process_peer_sync(struct task * task, void *context, unsigned int state)
+{
+ struct peers *peers = context;
+ struct peer *ps;
+ struct shared_table *st;
+
+ task->expire = TICK_ETERNITY;
+
+ /* Acquire lock for all peers of the section */
+ for (ps = peers->remote; ps; ps = ps->next)
+ HA_SPIN_LOCK(PEER_LOCK, &ps->lock);
+
+ if (!stopping) {
+ /* Normal case (not soft stop)*/
+
+ /* resync timeout set to TICK_ETERNITY means we just start
+ * a new process and timer was not initialized.
+ * We must arm this timer to switch to a request to a remote
+ * node if incoming connection from old local process never
+ * comes.
+ */
+ if (peers->resync_timeout == TICK_ETERNITY)
+ peers->resync_timeout = tick_add(now_ms, MS_TO_TICKS(PEER_RESYNC_TIMEOUT));
+
+ if (((peers->flags & PEERS_RESYNC_STATEMASK) == PEERS_RESYNC_FROMLOCAL) &&
+ (!nb_oldpids || tick_is_expired(peers->resync_timeout, now_ms)) &&
+ !(peers->flags & PEERS_F_RESYNC_ASSIGN)) {
+ /* Resync from local peer needed
+ no peer was assigned for the lesson
+ and no old local peer found
+ or resync timeout expire */
+
+ /* flag no more resync from local, to try resync from remotes */
+ peers->flags |= PEERS_F_RESYNC_LOCAL;
+ peers->flags |= PEERS_F_RESYNC_LOCALTIMEOUT;
+
+ /* reschedule a resync */
+ peers->resync_timeout = tick_add(now_ms, MS_TO_TICKS(PEER_RESYNC_TIMEOUT));
+ }
+
+ /* For each session */
+ for (ps = peers->remote; ps; ps = ps->next) {
+ /* For each remote peers */
+ if (!ps->local) {
+ if (!ps->appctx) {
+ /* no active peer connection */
+ if (ps->statuscode == 0 ||
+ ((ps->statuscode == PEER_SESS_SC_CONNECTCODE ||
+ ps->statuscode == PEER_SESS_SC_SUCCESSCODE ||
+ ps->statuscode == PEER_SESS_SC_CONNECTEDCODE) &&
+ tick_is_expired(ps->reconnect, now_ms))) {
+ /* connection never tried
+ * or previous peer connection established with success
+ * or previous peer connection failed while connecting
+ * and reconnection timer is expired */
+
+ /* retry a connect */
+ ps->appctx = peer_session_create(peers, ps);
+ }
+ else if (!tick_is_expired(ps->reconnect, now_ms)) {
+ /* If previous session failed during connection
+ * but reconnection timer is not expired */
+
+ /* reschedule task for reconnect */
+ task->expire = tick_first(task->expire, ps->reconnect);
+ }
+ /* else do nothing */
+ } /* !ps->appctx */
+ else if (ps->statuscode == PEER_SESS_SC_SUCCESSCODE) {
+ /* current peer connection is active and established */
+ if (((peers->flags & PEERS_RESYNC_STATEMASK) == PEERS_RESYNC_FROMREMOTE) &&
+ !(peers->flags & PEERS_F_RESYNC_ASSIGN) &&
+ !(ps->flags & PEER_F_LEARN_NOTUP2DATE)) {
+ /* Resync from a remote is needed
+ * and no peer was assigned for lesson
+ * and current peer may be up2date */
+
+ /* assign peer for the lesson */
+ ps->flags |= PEER_F_LEARN_ASSIGN;
+ peers->flags |= PEERS_F_RESYNC_ASSIGN;
+ peers->flags |= PEERS_F_RESYNC_REMOTEASSIGN;
+
+ /* wake up peer handler to handle a request of resync */
+ appctx_wakeup(ps->appctx);
+ }
+ else {
+ int update_to_push = 0;
+
+ /* Awake session if there is data to push */
+ for (st = ps->tables; st ; st = st->next) {
+ if (st->last_pushed != st->table->localupdate) {
+ /* wake up the peer handler to push local updates */
+ update_to_push = 1;
+ /* There is no need to send a heartbeat message
+ * when some updates must be pushed. The remote
+ * peer will consider <ps> peer as alive when it will
+ * receive these updates.
+ */
+ ps->flags &= ~PEER_F_HEARTBEAT;
+ /* Re-schedule another one later. */
+ ps->heartbeat = tick_add(now_ms, MS_TO_TICKS(PEER_HEARTBEAT_TIMEOUT));
+ /* Refresh reconnect if necessary */
+ if (tick_is_expired(ps->reconnect, now_ms))
+ ps->reconnect = tick_add(now_ms, MS_TO_TICKS(PEER_RECONNECT_TIMEOUT));
+ /* We are going to send updates, let's ensure we will
+ * come back to send heartbeat messages or to reconnect.
+ */
+ task->expire = tick_first(ps->reconnect, ps->heartbeat);
+ appctx_wakeup(ps->appctx);
+ break;
+ }
+ }
+ /* When there are updates to send we do not reconnect
+ * and do not send heartbeat message either.
+ */
+ if (!update_to_push) {
+ if (tick_is_expired(ps->reconnect, now_ms)) {
+ if (ps->flags & PEER_F_ALIVE) {
+ /* This peer was alive during a 'reconnect' period.
+ * Flag it as not alive again for the next period.
+ */
+ ps->flags &= ~PEER_F_ALIVE;
+ ps->reconnect = tick_add(now_ms, MS_TO_TICKS(PEER_RECONNECT_TIMEOUT));
+ }
+ else {
+ ps->reconnect = tick_add(now_ms, MS_TO_TICKS(50 + ha_random() % 2000));
+ ps->heartbeat = TICK_ETERNITY;
+ peer_session_forceshutdown(ps);
+ ps->no_hbt++;
+ }
+ }
+ else if (tick_is_expired(ps->heartbeat, now_ms)) {
+ ps->heartbeat = tick_add(now_ms, MS_TO_TICKS(PEER_HEARTBEAT_TIMEOUT));
+ ps->flags |= PEER_F_HEARTBEAT;
+ appctx_wakeup(ps->appctx);
+ }
+ task->expire = tick_first(ps->reconnect, ps->heartbeat);
+ }
+ }
+ /* else do nothing */
+ } /* SUCCESSCODE */
+ } /* !ps->peer->local */
+ } /* for */
+
+ /* Resync from remotes expired: consider resync is finished */
+ if (((peers->flags & PEERS_RESYNC_STATEMASK) == PEERS_RESYNC_FROMREMOTE) &&
+ !(peers->flags & PEERS_F_RESYNC_ASSIGN) &&
+ tick_is_expired(peers->resync_timeout, now_ms)) {
+ /* Resync from remote peer needed
+ * no peer was assigned for the lesson
+ * and resync timeout expire */
+
+ /* flag no more resync from remote, consider resync is finished */
+ peers->flags |= PEERS_F_RESYNC_REMOTE;
+ peers->flags |= PEERS_F_RESYNC_REMOTETIMEOUT;
+ }
+
+ if ((peers->flags & PEERS_RESYNC_STATEMASK) != PEERS_RESYNC_FINISHED) {
+ /* Resync not finished*/
+ /* reschedule task to resync timeout if not expired, to ended resync if needed */
+ if (!tick_is_expired(peers->resync_timeout, now_ms))
+ task->expire = tick_first(task->expire, peers->resync_timeout);
+ }
+ } /* !stopping */
+ else {
+ /* soft stop case */
+ if (state & TASK_WOKEN_SIGNAL) {
+ /* We've just received the signal */
+ if (!(peers->flags & PEERS_F_DONOTSTOP)) {
+ /* add DO NOT STOP flag if not present */
+ _HA_ATOMIC_INC(&jobs);
+ peers->flags |= PEERS_F_DONOTSTOP;
+
+ /* disconnect all connected peers to process a local sync
+ * this must be done only the first time we are switching
+ * in stopping state
+ */
+ for (ps = peers->remote; ps; ps = ps->next) {
+ /* we're killing a connection, we must apply a random delay before
+ * retrying otherwise the other end will do the same and we can loop
+ * for a while.
+ */
+ ps->reconnect = tick_add(now_ms, MS_TO_TICKS(50 + ha_random() % 2000));
+ if (ps->appctx) {
+ peer_session_forceshutdown(ps);
+ }
+ }
+
+ /* Set resync timeout for the local peer and request a immediate reconnect */
+ peers->resync_timeout = tick_add(now_ms, MS_TO_TICKS(PEER_RESYNC_TIMEOUT));
+ peers->local->reconnect = now_ms;
+ }
+ }
+
+ ps = peers->local;
+ if (ps->flags & PEER_F_TEACH_COMPLETE) {
+ if (peers->flags & PEERS_F_DONOTSTOP) {
+ /* resync of new process was complete, current process can die now */
+ _HA_ATOMIC_DEC(&jobs);
+ peers->flags &= ~PEERS_F_DONOTSTOP;
+ for (st = ps->tables; st ; st = st->next)
+ HA_ATOMIC_DEC(&st->table->refcnt);
+ }
+ }
+ else if (!ps->appctx) {
+ /* Re-arm resync timeout if necessary */
+ if (!tick_isset(peers->resync_timeout))
+ peers->resync_timeout = tick_add(now_ms, MS_TO_TICKS(PEER_RESYNC_TIMEOUT));
+
+ /* If there's no active peer connection */
+ if ((peers->flags & PEERS_RESYNC_STATEMASK) == PEERS_RESYNC_FINISHED &&
+ !tick_is_expired(peers->resync_timeout, now_ms) &&
+ (ps->statuscode == 0 ||
+ ps->statuscode == PEER_SESS_SC_SUCCESSCODE ||
+ ps->statuscode == PEER_SESS_SC_CONNECTEDCODE ||
+ ps->statuscode == PEER_SESS_SC_TRYAGAIN)) {
+ /* The resync is finished for the local peer and
+ * the resync timeout is not expired and
+ * connection never tried
+ * or previous peer connection was successfully established
+ * or previous tcp connect succeeded but init state incomplete
+ * or during previous connect, peer replies a try again statuscode */
+
+ if (!tick_is_expired(ps->reconnect, now_ms)) {
+ /* reconnection timer is not expired. reschedule task for reconnect */
+ task->expire = tick_first(task->expire, ps->reconnect);
+ }
+ else {
+ /* connect to the local peer if we must push a local sync */
+ if (peers->flags & PEERS_F_DONOTSTOP) {
+ peer_session_create(peers, ps);
+ }
+ }
+ }
+ else {
+ /* Other error cases */
+ if (peers->flags & PEERS_F_DONOTSTOP) {
+ /* unable to resync new process, current process can die now */
+ _HA_ATOMIC_DEC(&jobs);
+ peers->flags &= ~PEERS_F_DONOTSTOP;
+ for (st = ps->tables; st ; st = st->next)
+ HA_ATOMIC_DEC(&st->table->refcnt);
+ }
+ }
+ }
+ else if (ps->statuscode == PEER_SESS_SC_SUCCESSCODE ) {
+ /* Reset resync timeout during a resync */
+ peers->resync_timeout = TICK_ETERNITY;
+
+ /* current peer connection is active and established
+ * wake up all peer handlers to push remaining local updates */
+ for (st = ps->tables; st ; st = st->next) {
+ if (st->last_pushed != st->table->localupdate) {
+ appctx_wakeup(ps->appctx);
+ break;
+ }
+ }
+ }
+ } /* stopping */
+
+ /* Release lock for all peers of the section */
+ for (ps = peers->remote; ps; ps = ps->next)
+ HA_SPIN_UNLOCK(PEER_LOCK, &ps->lock);
+
+ /* Wakeup for re-connect */
+ return task;
+}
+
+
+/*
+ * returns 0 in case of error.
+ */
+int peers_init_sync(struct peers *peers)
+{
+ struct peer * curpeer;
+
+ for (curpeer = peers->remote; curpeer; curpeer = curpeer->next) {
+ peers->peers_fe->maxconn += 3;
+ }
+
+ peers->sync_task = task_new_anywhere();
+ if (!peers->sync_task)
+ return 0;
+
+ memset(peers->applet_count, 0, sizeof(peers->applet_count));
+ peers->sync_task->process = process_peer_sync;
+ peers->sync_task->context = (void *)peers;
+ peers->sighandler = signal_register_task(0, peers->sync_task, 0);
+ task_wakeup(peers->sync_task, TASK_WOKEN_INIT);
+ return 1;
+}
+
+/*
+ * Allocate a cache a dictionary entries used upon transmission.
+ */
+static struct dcache_tx *new_dcache_tx(size_t max_entries)
+{
+ struct dcache_tx *d;
+ struct ebpt_node *entries;
+
+ d = malloc(sizeof *d);
+ entries = calloc(max_entries, sizeof *entries);
+ if (!d || !entries)
+ goto err;
+
+ d->lru_key = 0;
+ d->prev_lookup = NULL;
+ d->cached_entries = EB_ROOT_UNIQUE;
+ d->entries = entries;
+
+ return d;
+
+ err:
+ free(d);
+ free(entries);
+ return NULL;
+}
+
+/*
+ * Allocate a cache of dictionary entries with <name> as name and <max_entries>
+ * as maximum of entries.
+ * Return the dictionary cache if succeeded, NULL if not.
+ * Must be deallocated calling free_dcache().
+ */
+static struct dcache *new_dcache(size_t max_entries)
+{
+ struct dcache_tx *dc_tx;
+ struct dcache *dc;
+ struct dcache_rx *dc_rx;
+
+ dc = calloc(1, sizeof *dc);
+ dc_tx = new_dcache_tx(max_entries);
+ dc_rx = calloc(max_entries, sizeof *dc_rx);
+ if (!dc || !dc_tx || !dc_rx)
+ goto err;
+
+ dc->tx = dc_tx;
+ dc->rx = dc_rx;
+ dc->max_entries = max_entries;
+
+ return dc;
+
+ err:
+ free(dc);
+ free(dc_tx);
+ free(dc_rx);
+ return NULL;
+}
+
+/*
+ * Look for the dictionary entry with the value of <i> in <d> cache of dictionary
+ * entries used upon transmission.
+ * Return the entry if found, NULL if not.
+ */
+static struct ebpt_node *dcache_tx_lookup_value(struct dcache_tx *d,
+ struct dcache_tx_entry *i)
+{
+ return ebpt_lookup(&d->cached_entries, i->entry.key);
+}
+
+/*
+ * Flush <dc> cache.
+ * Always succeeds.
+ */
+static inline void flush_dcache(struct peer *peer)
+{
+ int i;
+ struct dcache *dc = peer->dcache;
+
+ for (i = 0; i < dc->max_entries; i++) {
+ ebpt_delete(&dc->tx->entries[i]);
+ dc->tx->entries[i].key = NULL;
+ dict_entry_unref(&server_key_dict, dc->rx[i].de);
+ dc->rx[i].de = NULL;
+ }
+ dc->tx->prev_lookup = NULL;
+ dc->tx->lru_key = 0;
+
+ memset(dc->rx, 0, dc->max_entries * sizeof *dc->rx);
+}
+
+/*
+ * Insert a dictionary entry in <dc> cache part used upon transmission (->tx)
+ * with information provided by <i> dictionary cache entry (especially the value
+ * to be inserted if not already). Return <i> if already present in the cache
+ * or something different of <i> if not.
+ */
+static struct ebpt_node *dcache_tx_insert(struct dcache *dc, struct dcache_tx_entry *i)
+{
+ struct dcache_tx *dc_tx;
+ struct ebpt_node *o;
+
+ dc_tx = dc->tx;
+
+ if (dc_tx->prev_lookup && dc_tx->prev_lookup->key == i->entry.key) {
+ o = dc_tx->prev_lookup;
+ } else {
+ o = dcache_tx_lookup_value(dc_tx, i);
+ if (o) {
+ /* Save it */
+ dc_tx->prev_lookup = o;
+ }
+ }
+
+ if (o) {
+ /* Copy the ID. */
+ i->id = o - dc->tx->entries;
+ return &i->entry;
+ }
+
+ /* The new entry to put in cache */
+ dc_tx->prev_lookup = o = &dc_tx->entries[dc_tx->lru_key];
+
+ ebpt_delete(o);
+ o->key = i->entry.key;
+ ebpt_insert(&dc_tx->cached_entries, o);
+ i->id = dc_tx->lru_key;
+
+ /* Update the index for the next entry to put in cache */
+ dc_tx->lru_key = (dc_tx->lru_key + 1) & (dc->max_entries - 1);
+
+ return o;
+}
+
+/*
+ * Allocate a dictionary cache for each peer of <peers> section.
+ * Return 1 if succeeded, 0 if not.
+ */
+int peers_alloc_dcache(struct peers *peers)
+{
+ struct peer *p;
+
+ for (p = peers->remote; p; p = p->next) {
+ p->dcache = new_dcache(PEER_STKT_CACHE_MAX_ENTRIES);
+ if (!p->dcache)
+ return 0;
+ }
+
+ return 1;
+}
+
+/*
+ * Function used to register a table for sync on a group of peers
+ * Returns 0 in case of success.
+ */
+int peers_register_table(struct peers *peers, struct stktable *table)
+{
+ struct shared_table *st;
+ struct peer * curpeer;
+ int id = 0;
+ int retval = 0;
+
+ for (curpeer = peers->remote; curpeer; curpeer = curpeer->next) {
+ st = calloc(1,sizeof(*st));
+ if (!st) {
+ retval = 1;
+ break;
+ }
+ st->table = table;
+ st->next = curpeer->tables;
+ if (curpeer->tables)
+ id = curpeer->tables->local_id;
+ st->local_id = id + 1;
+
+ /* If peer is local we inc table
+ * refcnt to protect against flush
+ * until this process pushed all
+ * table content to the new one
+ */
+ if (curpeer->local)
+ HA_ATOMIC_INC(&st->table->refcnt);
+ curpeer->tables = st;
+ }
+
+ table->sync_task = peers->sync_task;
+
+ return retval;
+}
+
+/* context used by a "show peers" command */
+struct show_peers_ctx {
+ void *target; /* if non-null, dump only this section and stop */
+ struct peers *peers; /* "peers" section being currently dumped. */
+ struct peer *peer; /* "peer" being currently dumped. */
+ int flags; /* non-zero if "dict" dump requested */
+ enum {
+ STATE_HEAD = 0, /* dump the section's header */
+ STATE_PEER, /* dump the whole peer */
+ STATE_DONE, /* finished */
+ } state; /* parser's state */
+};
+
+/*
+ * Parse the "show peers" command arguments.
+ * Returns 0 if succeeded, 1 if not with the ->msg of the appctx set as
+ * error message.
+ */
+static int cli_parse_show_peers(char **args, char *payload, struct appctx *appctx, void *private)
+{
+ struct show_peers_ctx *ctx = applet_reserve_svcctx(appctx, sizeof(*ctx));
+
+ if (strcmp(args[2], "dict") == 0) {
+ /* show the dictionaries (large dump) */
+ ctx->flags |= PEERS_SHOW_F_DICT;
+ args++;
+ } else if (strcmp(args[2], "-") == 0)
+ args++; // allows to show a section called "dict"
+
+ if (*args[2]) {
+ struct peers *p;
+
+ for (p = cfg_peers; p; p = p->next) {
+ if (strcmp(p->id, args[2]) == 0) {
+ ctx->target = p;
+ break;
+ }
+ }
+
+ if (!p)
+ return cli_err(appctx, "No such peers\n");
+ }
+
+ /* where to start from */
+ ctx->peers = ctx->target ? ctx->target : cfg_peers;
+ return 0;
+}
+
+/*
+ * This function dumps the peer state information of <peers> "peers" section.
+ * Returns 0 if the output buffer is full and needs to be called again, non-zero if not.
+ * Dedicated to be called by cli_io_handler_show_peers() cli I/O handler.
+ */
+static int peers_dump_head(struct buffer *msg, struct appctx *appctx, struct peers *peers)
+{
+ struct tm tm;
+
+ get_localtime(peers->last_change, &tm);
+ chunk_appendf(msg, "%p: [%02d/%s/%04d:%02d:%02d:%02d] id=%s disabled=%d flags=0x%x resync_timeout=%s task_calls=%u\n",
+ peers,
+ tm.tm_mday, monthname[tm.tm_mon], tm.tm_year+1900,
+ tm.tm_hour, tm.tm_min, tm.tm_sec,
+ peers->id, peers->disabled, peers->flags,
+ peers->resync_timeout ?
+ tick_is_expired(peers->resync_timeout, now_ms) ? "<PAST>" :
+ human_time(TICKS_TO_MS(peers->resync_timeout - now_ms),
+ TICKS_TO_MS(1000)) : "<NEVER>",
+ peers->sync_task ? peers->sync_task->calls : 0);
+
+ if (applet_putchk(appctx, msg) == -1)
+ return 0;
+
+ return 1;
+}
+
+/*
+ * This function dumps <peer> state information.
+ * Returns 0 if the output buffer is full and needs to be called again, non-zero
+ * if not. Dedicated to be called by cli_io_handler_show_peers() cli I/O handler.
+ */
+static int peers_dump_peer(struct buffer *msg, struct appctx *appctx, struct peer *peer, int flags)
+{
+ struct connection *conn;
+ char pn[INET6_ADDRSTRLEN];
+ struct stconn *peer_cs;
+ struct stream *peer_s;
+ struct shared_table *st;
+
+ addr_to_str(&peer->addr, pn, sizeof pn);
+ chunk_appendf(msg, " %p: id=%s(%s,%s) addr=%s:%d last_status=%s",
+ peer, peer->id,
+ peer->local ? "local" : "remote",
+ peer->appctx ? "active" : "inactive",
+ pn, get_host_port(&peer->addr),
+ statuscode_str(peer->statuscode));
+
+ chunk_appendf(msg, " last_hdshk=%s\n",
+ peer->last_hdshk ? human_time(TICKS_TO_MS(now_ms - peer->last_hdshk),
+ TICKS_TO_MS(1000)) : "<NEVER>");
+
+ chunk_appendf(msg, " reconnect=%s",
+ peer->reconnect ?
+ tick_is_expired(peer->reconnect, now_ms) ? "<PAST>" :
+ human_time(TICKS_TO_MS(peer->reconnect - now_ms),
+ TICKS_TO_MS(1000)) : "<NEVER>");
+
+ chunk_appendf(msg, " heartbeat=%s",
+ peer->heartbeat ?
+ tick_is_expired(peer->heartbeat, now_ms) ? "<PAST>" :
+ human_time(TICKS_TO_MS(peer->heartbeat - now_ms),
+ TICKS_TO_MS(1000)) : "<NEVER>");
+
+ chunk_appendf(msg, " confirm=%u tx_hbt=%u rx_hbt=%u no_hbt=%u new_conn=%u proto_err=%u coll=%u\n",
+ peer->confirm, peer->tx_hbt, peer->rx_hbt,
+ peer->no_hbt, peer->new_conn, peer->proto_err, peer->coll);
+
+ chunk_appendf(&trash, " flags=0x%x", peer->flags);
+
+ if (!peer->appctx)
+ goto table_info;
+
+ chunk_appendf(&trash, " appctx:%p st0=%d st1=%d task_calls=%u",
+ peer->appctx, peer->appctx->st0, peer->appctx->st1,
+ peer->appctx->t ? peer->appctx->t->calls : 0);
+
+ peer_cs = appctx_sc(peer->appctx);
+ if (!peer_cs) {
+ /* the appctx might exist but not yet be initialized due to
+ * deferred initialization used to balance applets across
+ * threads.
+ */
+ goto table_info;
+ }
+
+ peer_s = __sc_strm(peer_cs);
+
+ chunk_appendf(&trash, " state=%s", sc_state_str(sc_opposite(peer_cs)->state));
+
+ conn = objt_conn(strm_orig(peer_s));
+ if (conn)
+ chunk_appendf(&trash, "\n xprt=%s", conn_get_xprt_name(conn));
+
+ switch (conn && conn_get_src(conn) ? addr_to_str(conn->src, pn, sizeof(pn)) : AF_UNSPEC) {
+ case AF_INET:
+ case AF_INET6:
+ chunk_appendf(&trash, " src=%s:%d", pn, get_host_port(conn->src));
+ break;
+ case AF_UNIX:
+ chunk_appendf(&trash, " src=unix:%d", strm_li(peer_s)->luid);
+ break;
+ }
+
+ switch (conn && conn_get_dst(conn) ? addr_to_str(conn->dst, pn, sizeof(pn)) : AF_UNSPEC) {
+ case AF_INET:
+ case AF_INET6:
+ chunk_appendf(&trash, " addr=%s:%d", pn, get_host_port(conn->dst));
+ break;
+ case AF_UNIX:
+ chunk_appendf(&trash, " addr=unix:%d", strm_li(peer_s)->luid);
+ break;
+ }
+
+ table_info:
+ if (peer->remote_table)
+ chunk_appendf(&trash, "\n remote_table:%p id=%s local_id=%d remote_id=%d",
+ peer->remote_table,
+ peer->remote_table->table->id,
+ peer->remote_table->local_id,
+ peer->remote_table->remote_id);
+
+ if (peer->last_local_table)
+ chunk_appendf(&trash, "\n last_local_table:%p id=%s local_id=%d remote_id=%d",
+ peer->last_local_table,
+ peer->last_local_table->table->id,
+ peer->last_local_table->local_id,
+ peer->last_local_table->remote_id);
+
+ if (peer->tables) {
+ chunk_appendf(&trash, "\n shared tables:");
+ for (st = peer->tables; st; st = st->next) {
+ int i, count;
+ struct stktable *t;
+ struct dcache *dcache;
+
+ t = st->table;
+ dcache = peer->dcache;
+
+ chunk_appendf(&trash, "\n %p local_id=%d remote_id=%d "
+ "flags=0x%x remote_data=0x%llx",
+ st, st->local_id, st->remote_id,
+ st->flags, (unsigned long long)st->remote_data);
+ chunk_appendf(&trash, "\n last_acked=%u last_pushed=%u last_get=%u"
+ " teaching_origin=%u update=%u",
+ st->last_acked, st->last_pushed, st->last_get,
+ st->teaching_origin, st->update);
+ chunk_appendf(&trash, "\n table:%p id=%s update=%u localupdate=%u"
+ " commitupdate=%u refcnt=%u",
+ t, t->id, t->update, t->localupdate, _HA_ATOMIC_LOAD(&t->commitupdate), t->refcnt);
+ if (flags & PEERS_SHOW_F_DICT) {
+ chunk_appendf(&trash, "\n TX dictionary cache:");
+ count = 0;
+ for (i = 0; i < dcache->max_entries; i++) {
+ struct ebpt_node *node;
+ struct dict_entry *de;
+
+ node = &dcache->tx->entries[i];
+ if (!node->key)
+ break;
+
+ if (!count++)
+ chunk_appendf(&trash, "\n ");
+ de = node->key;
+ chunk_appendf(&trash, " %3u -> %s", i, (char *)de->value.key);
+ count &= 0x3;
+ }
+ chunk_appendf(&trash, "\n RX dictionary cache:");
+ count = 0;
+ for (i = 0; i < dcache->max_entries; i++) {
+ if (!count++)
+ chunk_appendf(&trash, "\n ");
+ chunk_appendf(&trash, " %3u -> %s", i,
+ dcache->rx[i].de ?
+ (char *)dcache->rx[i].de->value.key : "-");
+ count &= 0x3;
+ }
+ } else {
+ chunk_appendf(&trash, "\n Dictionary cache not dumped (use \"show peers dict\")");
+ }
+ }
+ }
+
+ end:
+ chunk_appendf(&trash, "\n");
+ if (applet_putchk(appctx, msg) == -1)
+ return 0;
+
+ return 1;
+}
+
+/*
+ * This function dumps all the peers of "peers" section.
+ * Returns 0 if the output buffer is full and needs to be called
+ * again, non-zero if not. It proceeds in an isolated thread, so
+ * there is no thread safety issue here.
+ */
+static int cli_io_handler_show_peers(struct appctx *appctx)
+{
+ struct show_peers_ctx *ctx = appctx->svcctx;
+ int ret = 0, first_peers = 1;
+
+ thread_isolate();
+
+ chunk_reset(&trash);
+
+ while (ctx->state != STATE_DONE) {
+ switch (ctx->state) {
+ case STATE_HEAD:
+ if (!ctx->peers) {
+ /* No more peers list. */
+ ctx->state = STATE_DONE;
+ }
+ else {
+ if (!first_peers)
+ chunk_appendf(&trash, "\n");
+ else
+ first_peers = 0;
+ if (!peers_dump_head(&trash, appctx, ctx->peers))
+ goto out;
+
+ ctx->peer = ctx->peers->remote;
+ ctx->peers = ctx->peers->next;
+ ctx->state = STATE_PEER;
+ }
+ break;
+
+ case STATE_PEER:
+ if (!ctx->peer) {
+ /* End of peer list */
+ if (!ctx->target)
+ ctx->state = STATE_HEAD; // next one
+ else
+ ctx->state = STATE_DONE;
+ }
+ else {
+ if (!peers_dump_peer(&trash, appctx, ctx->peer, ctx->flags))
+ goto out;
+
+ ctx->peer = ctx->peer->next;
+ }
+ break;
+
+ default:
+ break;
+ }
+ }
+ ret = 1;
+ out:
+ thread_release();
+ return ret;
+}
+
+
+struct peers_kw_list peers_keywords = {
+ .list = LIST_HEAD_INIT(peers_keywords.list)
+};
+
+void peers_register_keywords(struct peers_kw_list *pkwl)
+{
+ LIST_APPEND(&peers_keywords.list, &pkwl->list);
+}
+
+/* config parser for global "tune.peers.max-updates-at-once" */
+static int cfg_parse_max_updt_at_once(char **args, int section_type, struct proxy *curpx,
+ const struct proxy *defpx, const char *file, int line,
+ char **err)
+{
+ int arg = -1;
+
+ if (too_many_args(1, args, err, NULL))
+ return -1;
+
+ if (*(args[1]) != 0)
+ arg = atoi(args[1]);
+
+ if (arg < 1) {
+ memprintf(err, "'%s' expects an integer argument greater than 0.", args[0]);
+ return -1;
+ }
+
+ peers_max_updates_at_once = arg;
+ return 0;
+}
+
+/* config keyword parsers */
+static struct cfg_kw_list cfg_kws = {ILH, {
+ { CFG_GLOBAL, "tune.peers.max-updates-at-once", cfg_parse_max_updt_at_once },
+ { 0, NULL, NULL }
+}};
+
+INITCALL1(STG_REGISTER, cfg_register_keywords, &cfg_kws);
+
+/*
+ * CLI keywords.
+ */
+static struct cli_kw_list cli_kws = {{ }, {
+ { { "show", "peers", NULL }, "show peers [dict|-] [section] : dump some information about all the peers or this peers section", cli_parse_show_peers, cli_io_handler_show_peers, },
+ {},
+}};
+
+/* Register cli keywords */
+INITCALL1(STG_REGISTER, cli_register_kw, &cli_kws);
diff --git a/src/pipe.c b/src/pipe.c
new file mode 100644
index 0000000..5599fe0
--- /dev/null
+++ b/src/pipe.c
@@ -0,0 +1,136 @@
+/*
+ * Pipe management
+ *
+ * Copyright 2000-2009 Willy Tarreau <w@1wt.eu>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <unistd.h>
+#include <fcntl.h>
+
+#include <haproxy/api.h>
+#include <haproxy/global.h>
+#include <haproxy/pipe-t.h>
+#include <haproxy/pool.h>
+#include <haproxy/thread.h>
+
+
+DECLARE_STATIC_POOL(pool_head_pipe, "pipe", sizeof(struct pipe));
+
+struct pipe *pipes_live = NULL; /* pipes which are still ready to use */
+
+__decl_spinlock(pipes_lock); /* lock used to protect pipes list */
+
+static THREAD_LOCAL int local_pipes_free = 0; /* #cache objects */
+static THREAD_LOCAL struct pipe *local_pipes = NULL;
+
+int pipes_used = 0; /* # of pipes in use (2 fds each) */
+int pipes_free = 0; /* # of pipes unused */
+
+/* return a pre-allocated empty pipe. Try to allocate one if there isn't any
+ * left. NULL is returned if a pipe could not be allocated.
+ */
+struct pipe *get_pipe()
+{
+ struct pipe *ret = NULL;
+ int pipefd[2];
+
+ ret = local_pipes;
+ if (likely(ret)) {
+ local_pipes = ret->next;
+ local_pipes_free--;
+ HA_ATOMIC_DEC(&pipes_free);
+ HA_ATOMIC_INC(&pipes_used);
+ goto out;
+ }
+
+ if (likely(pipes_live)) {
+ HA_SPIN_LOCK(PIPES_LOCK, &pipes_lock);
+ ret = pipes_live;
+ if (likely(ret))
+ pipes_live = ret->next;
+ HA_SPIN_UNLOCK(PIPES_LOCK, &pipes_lock);
+ if (ret) {
+ HA_ATOMIC_DEC(&pipes_free);
+ HA_ATOMIC_INC(&pipes_used);
+ goto out;
+ }
+ }
+
+ HA_ATOMIC_INC(&pipes_used);
+ if (pipes_used + pipes_free >= global.maxpipes)
+ goto fail;
+
+ ret = pool_alloc(pool_head_pipe);
+ if (!ret)
+ goto fail;
+
+ if (pipe(pipefd) < 0)
+ goto fail;
+
+#ifdef F_SETPIPE_SZ
+ if (global.tune.pipesize)
+ fcntl(pipefd[0], F_SETPIPE_SZ, global.tune.pipesize);
+#endif
+ ret->data = 0;
+ ret->prod = pipefd[1];
+ ret->cons = pipefd[0];
+ ret->next = NULL;
+ out:
+ return ret;
+ fail:
+ pool_free(pool_head_pipe, ret);
+ HA_ATOMIC_DEC(&pipes_used);
+ return NULL;
+
+}
+
+/* destroy a pipe, possibly because an error was encountered on it. Its FDs
+ * will be closed and it will not be reinjected into the live pool.
+ */
+void kill_pipe(struct pipe *p)
+{
+ close(p->prod);
+ close(p->cons);
+ pool_free(pool_head_pipe, p);
+ HA_ATOMIC_DEC(&pipes_used);
+}
+
+/* put back a unused pipe into the live pool. If it still has data in it, it is
+ * closed and not reinjected into the live pool. The caller is not allowed to
+ * use it once released.
+ */
+void put_pipe(struct pipe *p)
+{
+ if (unlikely(p->data)) {
+ kill_pipe(p);
+ return;
+ }
+
+ if (likely(local_pipes_free * global.nbthread < global.maxpipes - pipes_used)) {
+ p->next = local_pipes;
+ local_pipes = p;
+ local_pipes_free++;
+ goto out;
+ }
+
+ HA_SPIN_LOCK(PIPES_LOCK, &pipes_lock);
+ p->next = pipes_live;
+ pipes_live = p;
+ HA_SPIN_UNLOCK(PIPES_LOCK, &pipes_lock);
+ out:
+ HA_ATOMIC_INC(&pipes_free);
+ HA_ATOMIC_DEC(&pipes_used);
+}
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/src/pool.c b/src/pool.c
new file mode 100644
index 0000000..376b311
--- /dev/null
+++ b/src/pool.c
@@ -0,0 +1,1539 @@
+/*
+ * Memory management functions.
+ *
+ * Copyright 2000-2007 Willy Tarreau <w@1wt.eu>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <errno.h>
+
+#include <import/plock.h>
+
+#include <haproxy/activity.h>
+#include <haproxy/api.h>
+#include <haproxy/applet-t.h>
+#include <haproxy/cfgparse.h>
+#include <haproxy/channel.h>
+#include <haproxy/cli.h>
+#include <haproxy/errors.h>
+#include <haproxy/global.h>
+#include <haproxy/list.h>
+#include <haproxy/pool.h>
+#include <haproxy/pool-os.h>
+#include <haproxy/sc_strm.h>
+#include <haproxy/stats-t.h>
+#include <haproxy/stconn.h>
+#include <haproxy/thread.h>
+#include <haproxy/tools.h>
+
+
+/* These ones are initialized per-thread on startup by init_pools() */
+THREAD_LOCAL size_t pool_cache_bytes = 0; /* total cache size */
+THREAD_LOCAL size_t pool_cache_count = 0; /* #cache objects */
+
+static struct list pools __read_mostly = LIST_HEAD_INIT(pools);
+int mem_poison_byte __read_mostly = 'P';
+int pool_trim_in_progress = 0;
+uint pool_debugging __read_mostly = /* set of POOL_DBG_* flags */
+#ifdef DEBUG_FAIL_ALLOC
+ POOL_DBG_FAIL_ALLOC |
+#endif
+#ifdef DEBUG_DONT_SHARE_POOLS
+ POOL_DBG_DONT_MERGE |
+#endif
+#ifdef DEBUG_POOL_INTEGRITY
+ POOL_DBG_COLD_FIRST |
+#endif
+#ifdef DEBUG_POOL_INTEGRITY
+ POOL_DBG_INTEGRITY |
+#endif
+#ifdef CONFIG_HAP_NO_GLOBAL_POOLS
+ POOL_DBG_NO_GLOBAL |
+#endif
+#if defined(DEBUG_NO_POOLS) || defined(DEBUG_UAF)
+ POOL_DBG_NO_CACHE |
+#endif
+#if defined(DEBUG_POOL_TRACING)
+ POOL_DBG_CALLER |
+#endif
+#if defined(DEBUG_MEMORY_POOLS)
+ POOL_DBG_TAG |
+#endif
+#if defined(DEBUG_UAF)
+ POOL_DBG_UAF |
+#endif
+ 0;
+
+static const struct {
+ uint flg;
+ const char *set;
+ const char *clr;
+ const char *hlp;
+} dbg_options[] = {
+ /* flg, set, clr, hlp */
+ { POOL_DBG_FAIL_ALLOC, "fail", "no-fail", "randomly fail allocations" },
+ { POOL_DBG_DONT_MERGE, "no-merge", "merge", "disable merging of similar pools" },
+ { POOL_DBG_COLD_FIRST, "cold-first", "hot-first", "pick cold objects first" },
+ { POOL_DBG_INTEGRITY, "integrity", "no-integrity", "enable cache integrity checks" },
+ { POOL_DBG_NO_GLOBAL, "no-global", "global", "disable global shared cache" },
+ { POOL_DBG_NO_CACHE, "no-cache", "cache", "disable thread-local cache" },
+ { POOL_DBG_CALLER, "caller", "no-caller", "save caller information in cache" },
+ { POOL_DBG_TAG, "tag", "no-tag", "add tag at end of allocated objects" },
+ { POOL_DBG_POISON, "poison", "no-poison", "poison newly allocated objects" },
+ { POOL_DBG_UAF, "uaf", "no-uaf", "enable use-after-free checks (slow)" },
+ { 0 /* end */ }
+};
+
+/* describes a snapshot of a pool line about to be dumped by "show pools" */
+struct pool_dump_info {
+ const struct pool_head *entry;
+ ulong alloc_items;
+ ulong alloc_bytes;
+ ulong used_items;
+ ulong cached_items;
+ ulong need_avg;
+ ulong failed_items;
+};
+
+/* context used by "show pools" */
+struct show_pools_ctx {
+ char *prefix; /* if non-null, match this prefix name for the pool */
+ int by_what; /* 0=no sort, 1=by name, 2=by item size, 3=by total alloc */
+ int maxcnt; /* 0=no limit, other=max number of output entries */
+};
+
+static int mem_fail_rate __read_mostly = 0;
+static int using_default_allocator __read_mostly = 1; // linked-in allocator or LD_PRELOADed one ?
+static int disable_trim __read_mostly = 0;
+static int(*my_mallctl)(const char *, void *, size_t *, void *, size_t) = NULL;
+static int(*_malloc_trim)(size_t) = NULL;
+
+/* returns the pool hash bucket an object should use based on its pointer.
+ * Objects will needed consistent bucket assignment so that they may be
+ * allocated on one thread and released on another one. Thus only the
+ * pointer is usable.
+ */
+static forceinline unsigned int pool_pbucket(const void *ptr)
+{
+ return ptr_hash(ptr, CONFIG_HAP_POOL_BUCKETS_BITS);
+}
+
+/* returns the pool hash bucket to use for the current thread. This should only
+ * be used when no pointer is available (e.g. count alloc failures).
+ */
+static forceinline unsigned int pool_tbucket(void)
+{
+ return tid % CONFIG_HAP_POOL_BUCKETS;
+}
+
+/* ask the allocator to trim memory pools.
+ * This must run under thread isolation so that competing threads trying to
+ * allocate or release memory do not prevent the allocator from completing
+ * its job. We just have to be careful as callers might already be isolated
+ * themselves.
+ */
+void trim_all_pools(void)
+{
+ int isolated = thread_isolated();
+
+ if (!isolated)
+ thread_isolate();
+
+ malloc_trim(0);
+
+ if (!isolated)
+ thread_release();
+}
+
+/* check if we're using the same allocator as the one that provides
+ * malloc_trim() and mallinfo(). The principle is that on glibc, both
+ * malloc_trim() and mallinfo() are provided, and using mallinfo() we
+ * can check if malloc() is performed through glibc or any other one
+ * the executable was linked against (e.g. jemalloc). Prior to this we
+ * have to check whether we're running on jemalloc by verifying if the
+ * mallctl() function is provided. Its pointer will be used later.
+ */
+static void detect_allocator(void)
+{
+#if defined(__ELF__)
+ extern int mallctl(const char *, void *, size_t *, void *, size_t) __attribute__((weak));
+
+ my_mallctl = mallctl;
+#endif
+ if (!my_mallctl) {
+ /* trick: we won't enter here if mallctl() is known at link
+ * time. This allows to detect if the symbol was changed since
+ * the program was linked, indicating it's not running on the
+ * expected allocator (due to an LD_PRELOAD) and that we must
+ * be extra cautious and avoid some optimizations that are
+ * known to break such as malloc_trim().
+ */
+ my_mallctl = get_sym_curr_addr("mallctl");
+ using_default_allocator = (my_mallctl == NULL);
+ }
+
+ if (!my_mallctl) {
+#if defined(HA_HAVE_MALLOC_TRIM)
+#ifdef HA_HAVE_MALLINFO2
+ struct mallinfo2 mi1, mi2;
+#else
+ struct mallinfo mi1, mi2;
+#endif
+ void *ptr;
+
+#ifdef HA_HAVE_MALLINFO2
+ mi1 = mallinfo2();
+#else
+ mi1 = mallinfo();
+#endif
+ ptr = DISGUISE(malloc(1));
+#ifdef HA_HAVE_MALLINFO2
+ mi2 = mallinfo2();
+#else
+ mi2 = mallinfo();
+#endif
+ free(DISGUISE(ptr));
+
+ using_default_allocator = !!memcmp(&mi1, &mi2, sizeof(mi1));
+#elif defined(HA_HAVE_MALLOC_ZONE)
+ using_default_allocator = (malloc_default_zone() != NULL);
+#endif
+ }
+
+ /* detect presence of malloc_trim() */
+ _malloc_trim = get_sym_next_addr("malloc_trim");
+}
+
+/* replace the libc's malloc_trim() so that we can also intercept the calls
+ * from child libraries when the allocator is not the default one.
+ */
+int malloc_trim(size_t pad)
+{
+ int ret = 0;
+
+ if (disable_trim)
+ return ret;
+
+ HA_ATOMIC_INC(&pool_trim_in_progress);
+
+ if (my_mallctl) {
+ /* here we're on jemalloc and malloc_trim() is called either
+ * by haproxy or another dependency (the worst case that
+ * normally crashes). Instead of just failing, we can actually
+ * emulate it so let's do it now.
+ */
+ unsigned int i, narenas = 0;
+ size_t len = sizeof(narenas);
+
+ if (my_mallctl("arenas.narenas", &narenas, &len, NULL, 0) == 0) {
+ for (i = 0; i < narenas; i ++) {
+ char mib[32] = {0};
+ snprintf(mib, sizeof(mib), "arena.%u.purge", i);
+ (void)my_mallctl(mib, NULL, NULL, NULL, 0);
+ ret = 1; // success
+ }
+ }
+ }
+ else if (!using_default_allocator) {
+ /* special allocators that can be LD_PRELOADed end here */
+ ret = 0; // did nothing
+ }
+ else if (_malloc_trim) {
+ /* we're typically on glibc and not overridden */
+ ret = _malloc_trim(pad);
+ }
+#if defined(HA_HAVE_MALLOC_ZONE)
+ else {
+ /* we're on MacOS, there's an equivalent mechanism */
+ vm_address_t *zones;
+ unsigned int i, nzones;
+
+ if (malloc_get_all_zones(0, NULL, &zones, &nzones) == KERN_SUCCESS) {
+ for (i = 0; i < nzones; i ++) {
+ malloc_zone_t *zone = (malloc_zone_t *)zones[i];
+
+ /* we cannot purge anonymous zones */
+ if (zone->zone_name) {
+ malloc_zone_pressure_relief(zone, 0);
+ ret = 1; // success
+ }
+ }
+ }
+ }
+#endif
+ HA_ATOMIC_DEC(&pool_trim_in_progress);
+
+ /* here we have ret=0 if nothing was release, or 1 if some were */
+ return ret;
+}
+
+static int mem_should_fail(const struct pool_head *pool)
+{
+ int ret = 0;
+
+ if (mem_fail_rate > 0 && !(global.mode & MODE_STARTING)) {
+ if (mem_fail_rate > statistical_prng_range(100))
+ ret = 1;
+ else
+ ret = 0;
+ }
+ return ret;
+}
+
+/* Try to find an existing shared pool with the same characteristics and
+ * returns it, otherwise creates this one. NULL is returned if no memory
+ * is available for a new creation. Two flags are supported :
+ * - MEM_F_SHARED to indicate that the pool may be shared with other users
+ * - MEM_F_EXACT to indicate that the size must not be rounded up
+ */
+struct pool_head *create_pool(char *name, unsigned int size, unsigned int flags)
+{
+ unsigned int extra_mark, extra_caller, extra;
+ struct pool_head *pool;
+ struct pool_head *entry;
+ struct list *start;
+ unsigned int align;
+ int thr __maybe_unused;
+
+ extra_mark = (pool_debugging & POOL_DBG_TAG) ? POOL_EXTRA_MARK : 0;
+ extra_caller = (pool_debugging & POOL_DBG_CALLER) ? POOL_EXTRA_CALLER : 0;
+ extra = extra_mark + extra_caller;
+
+ if (!(pool_debugging & POOL_DBG_NO_CACHE)) {
+ /* we'll store two lists there, we need the room for this. Let's
+ * make sure it's always OK even when including the extra word
+ * that is stored after the pci struct.
+ */
+ if (size + extra - extra_caller < sizeof(struct pool_cache_item))
+ size = sizeof(struct pool_cache_item) + extra_caller - extra;
+ }
+
+ /* Now we know our size is set to the strict minimum possible. It may
+ * be OK for elements allocated with an exact size (e.g. buffers), but
+ * we're going to round the size up 16 bytes to merge almost identical
+ * pools together. We only round up however when we add the debugging
+ * tag since it's used to detect overflows. Otherwise we only round up
+ * to the size of a word to preserve alignment.
+ */
+ if (!(flags & MEM_F_EXACT)) {
+ align = (pool_debugging & POOL_DBG_TAG) ? sizeof(void *) : 16;
+ size = ((size + align - 1) & -align);
+ }
+
+ /* TODO: thread: we do not lock pool list for now because all pools are
+ * created during HAProxy startup (so before threads creation) */
+ start = &pools;
+ pool = NULL;
+
+ list_for_each_entry(entry, &pools, list) {
+ if (entry->size == size) {
+ /* either we can share this place and we take it, or
+ * we look for a shareable one or for the next position
+ * before which we will insert a new one.
+ */
+ if ((flags & entry->flags & MEM_F_SHARED) &&
+ (!(pool_debugging & POOL_DBG_DONT_MERGE) ||
+ strcmp(name, entry->name) == 0)) {
+ /* we can share this one */
+ pool = entry;
+ DPRINTF(stderr, "Sharing %s with %s\n", name, pool->name);
+ break;
+ }
+ }
+ else if (entry->size > size) {
+ /* insert before this one */
+ start = &entry->list;
+ break;
+ }
+ }
+
+ if (!pool) {
+ void *pool_addr;
+
+ pool_addr = calloc(1, sizeof(*pool) + __alignof__(*pool));
+ if (!pool_addr)
+ return NULL;
+
+ /* always provide an aligned pool */
+ pool = (struct pool_head*)((((size_t)pool_addr) + __alignof__(*pool)) & -(size_t)__alignof__(*pool));
+ pool->base_addr = pool_addr; // keep it, it's the address to free later
+
+ if (name)
+ strlcpy2(pool->name, name, sizeof(pool->name));
+ pool->alloc_sz = size + extra;
+ pool->size = size;
+ pool->flags = flags;
+ LIST_APPEND(start, &pool->list);
+
+ if (!(pool_debugging & POOL_DBG_NO_CACHE)) {
+ /* update per-thread pool cache if necessary */
+ for (thr = 0; thr < MAX_THREADS; thr++) {
+ LIST_INIT(&pool->cache[thr].list);
+ pool->cache[thr].tid = thr;
+ pool->cache[thr].pool = pool;
+ }
+ }
+ }
+ pool->users++;
+ return pool;
+}
+
+/* Tries to allocate an object for the pool <pool> using the system's allocator
+ * and directly returns it. The pool's allocated counter is checked but NOT
+ * updated, this is left to the caller, and but no other checks are performed.
+ */
+void *pool_get_from_os_noinc(struct pool_head *pool)
+{
+ if (!pool->limit || pool_allocated(pool) < pool->limit) {
+ void *ptr;
+
+ if (pool_debugging & POOL_DBG_UAF)
+ ptr = pool_alloc_area_uaf(pool->alloc_sz);
+ else
+ ptr = pool_alloc_area(pool->alloc_sz);
+ if (ptr)
+ return ptr;
+ _HA_ATOMIC_INC(&pool->buckets[pool_tbucket()].failed);
+ }
+ activity[tid].pool_fail++;
+ return NULL;
+
+}
+
+/* Releases a pool item back to the operating system but DOES NOT update
+ * the allocation counter, it's left to the caller to do it. It may be
+ * done before or after, it doesn't matter, the function does not use it.
+ */
+void pool_put_to_os_nodec(struct pool_head *pool, void *ptr)
+{
+ if (pool_debugging & POOL_DBG_UAF)
+ pool_free_area_uaf(ptr, pool->alloc_sz);
+ else
+ pool_free_area(ptr, pool->alloc_sz);
+}
+
+/* Tries to allocate an object for the pool <pool> using the system's allocator
+ * and directly returns it. The pool's counters are updated but the object is
+ * never cached, so this is usable with and without local or shared caches.
+ */
+void *pool_alloc_nocache(struct pool_head *pool, const void *caller)
+{
+ void *ptr = NULL;
+ uint bucket;
+
+ ptr = pool_get_from_os_noinc(pool);
+ if (!ptr)
+ return NULL;
+
+ bucket = pool_pbucket(ptr);
+ swrate_add_scaled_opportunistic(&pool->buckets[bucket].needed_avg, POOL_AVG_SAMPLES, pool->buckets[bucket].used, POOL_AVG_SAMPLES/4);
+ _HA_ATOMIC_INC(&pool->buckets[bucket].allocated);
+ _HA_ATOMIC_INC(&pool->buckets[bucket].used);
+
+ /* keep track of where the element was allocated from */
+ POOL_DEBUG_SET_MARK(pool, ptr);
+ POOL_DEBUG_TRACE_CALLER(pool, (struct pool_cache_item *)ptr, caller);
+ return ptr;
+}
+
+/* Release a pool item back to the OS and keeps the pool's counters up to date.
+ * This is always defined even when pools are not enabled (their usage stats
+ * are maintained).
+ */
+void pool_free_nocache(struct pool_head *pool, void *ptr)
+{
+ uint bucket = pool_pbucket(ptr);
+
+ _HA_ATOMIC_DEC(&pool->buckets[bucket].used);
+ _HA_ATOMIC_DEC(&pool->buckets[bucket].allocated);
+ swrate_add_opportunistic(&pool->buckets[bucket].needed_avg, POOL_AVG_SAMPLES, pool->buckets[bucket].used);
+
+ pool_put_to_os_nodec(pool, ptr);
+}
+
+
+/* Updates <pch>'s fill_pattern and fills the free area after <item> with it,
+ * up to <size> bytes. The item part is left untouched.
+ */
+void pool_fill_pattern(struct pool_cache_head *pch, struct pool_cache_item *item, uint size)
+{
+ ulong *ptr = (ulong *)item;
+ uint ofs;
+ ulong u;
+
+ if (size <= sizeof(*item))
+ return;
+
+ /* Upgrade the fill_pattern to change about half of the bits
+ * (to be sure to catch static flag corruption), and apply it.
+ */
+ u = pch->fill_pattern += ~0UL / 3; // 0x55...55
+ ofs = sizeof(*item) / sizeof(*ptr);
+ while (ofs < size / sizeof(*ptr))
+ ptr[ofs++] = u;
+}
+
+/* check for a pool_cache_item integrity after extracting it from the cache. It
+ * must have been previously initialized using pool_fill_pattern(). If any
+ * corruption is detected, the function provokes an immediate crash.
+ */
+void pool_check_pattern(struct pool_cache_head *pch, struct pool_head *pool, struct pool_cache_item *item, const void *caller)
+{
+ const ulong *ptr = (const ulong *)item;
+ uint size = pool->size;
+ uint ofs;
+ ulong u;
+
+ if (size <= sizeof(*item))
+ return;
+
+ /* let's check that all words past *item are equal */
+ ofs = sizeof(*item) / sizeof(*ptr);
+ u = ptr[ofs++];
+ while (ofs < size / sizeof(*ptr)) {
+ if (unlikely(ptr[ofs] != u)) {
+ pool_inspect_item("cache corruption detected", pool, item, caller);
+ ABORT_NOW();
+ }
+ ofs++;
+ }
+}
+
+/* removes up to <count> items from the end of the local pool cache <ph> for
+ * pool <pool>. The shared pool is refilled with these objects in the limit
+ * of the number of acceptable objects, and the rest will be released to the
+ * OS. It is not a problem is <count> is larger than the number of objects in
+ * the local cache. The counters are automatically updated. Must not be used
+ * with pools disabled.
+ */
+static void pool_evict_last_items(struct pool_head *pool, struct pool_cache_head *ph, uint count)
+{
+ struct pool_cache_item *item;
+ struct pool_item *pi, *head = NULL;
+ void *caller = __builtin_return_address(0);
+ uint released = 0;
+ uint cluster = 0;
+ uint to_free_max;
+ uint bucket;
+
+ BUG_ON(pool_debugging & POOL_DBG_NO_CACHE);
+
+ /* Note: this will be zero when global pools are disabled */
+ to_free_max = pool_releasable(pool);
+
+ while (released < count && !LIST_ISEMPTY(&ph->list)) {
+ item = LIST_PREV(&ph->list, typeof(item), by_pool);
+ BUG_ON(&item->by_pool == &ph->list);
+ if (unlikely(pool_debugging & POOL_DBG_INTEGRITY))
+ pool_check_pattern(ph, pool, item, caller);
+ LIST_DELETE(&item->by_pool);
+ LIST_DELETE(&item->by_lru);
+
+ bucket = pool_pbucket(item);
+ _HA_ATOMIC_DEC(&pool->buckets[bucket].used);
+ swrate_add_opportunistic(&pool->buckets[bucket].needed_avg, POOL_AVG_SAMPLES, pool->buckets[bucket].used);
+
+ if (to_free_max > released || cluster) {
+ /* will never match when global pools are disabled */
+ pi = (struct pool_item *)item;
+ pi->next = NULL;
+ pi->down = head;
+ head = pi;
+ cluster++;
+ if (cluster >= CONFIG_HAP_POOL_CLUSTER_SIZE) {
+ /* enough to make a cluster */
+ pool_put_to_shared_cache(pool, head);
+ cluster = 0;
+ head = NULL;
+ }
+ } else {
+ /* does pool_free_nocache() with a known bucket */
+ _HA_ATOMIC_DEC(&pool->buckets[bucket].allocated);
+ pool_put_to_os_nodec(pool, item);
+ }
+
+ released++;
+ }
+
+ /* incomplete cluster left */
+ if (cluster)
+ pool_put_to_shared_cache(pool, head);
+
+ ph->count -= released;
+ pool_cache_count -= released;
+ pool_cache_bytes -= released * pool->size;
+}
+
+/* Evicts some of the oldest objects from one local cache, until its number of
+ * objects is no more than 16+1/8 of the total number of locally cached objects
+ * or the total size of the local cache is no more than 75% of its maximum (i.e.
+ * we don't want a single cache to use all the cache for itself). For this, the
+ * list is scanned in reverse. If <full> is non-null, all objects are evicted.
+ * Must not be used when pools are disabled.
+ */
+void pool_evict_from_local_cache(struct pool_head *pool, int full)
+{
+ struct pool_cache_head *ph = &pool->cache[tid];
+
+ BUG_ON(pool_debugging & POOL_DBG_NO_CACHE);
+
+ while ((ph->count && full) ||
+ (ph->count >= CONFIG_HAP_POOL_CLUSTER_SIZE &&
+ ph->count >= 16 + pool_cache_count / 8 &&
+ pool_cache_bytes > global.tune.pool_cache_size * 3 / 4)) {
+ pool_evict_last_items(pool, ph, CONFIG_HAP_POOL_CLUSTER_SIZE);
+ }
+}
+
+/* Evicts some of the oldest objects from the local cache, pushing them to the
+ * global pool. Must not be used when pools are disabled.
+ */
+void pool_evict_from_local_caches()
+{
+ struct pool_cache_item *item;
+ struct pool_cache_head *ph;
+ struct pool_head *pool;
+
+ BUG_ON(pool_debugging & POOL_DBG_NO_CACHE);
+
+ do {
+ item = LIST_PREV(&th_ctx->pool_lru_head, struct pool_cache_item *, by_lru);
+ BUG_ON(&item->by_lru == &th_ctx->pool_lru_head);
+ /* note: by definition we remove oldest objects so they also are the
+ * oldest in their own pools, thus their next is the pool's head.
+ */
+ ph = LIST_NEXT(&item->by_pool, struct pool_cache_head *, list);
+ BUG_ON(ph->tid != tid);
+
+ pool = container_of(ph - tid, struct pool_head, cache);
+ BUG_ON(pool != ph->pool);
+
+ pool_evict_last_items(pool, ph, CONFIG_HAP_POOL_CLUSTER_SIZE);
+ } while (pool_cache_bytes > global.tune.pool_cache_size * 7 / 8);
+}
+
+/* Frees an object to the local cache, possibly pushing oldest objects to the
+ * shared cache, which itself may decide to release some of them to the OS.
+ * While it is unspecified what the object becomes past this point, it is
+ * guaranteed to be released from the users' perspective. A caller address may
+ * be passed and stored into the area when DEBUG_POOL_TRACING is set. Must not
+ * be used with pools disabled.
+ */
+void pool_put_to_cache(struct pool_head *pool, void *ptr, const void *caller)
+{
+ struct pool_cache_item *item = (struct pool_cache_item *)ptr;
+ struct pool_cache_head *ph = &pool->cache[tid];
+
+ BUG_ON(pool_debugging & POOL_DBG_NO_CACHE);
+
+ LIST_INSERT(&ph->list, &item->by_pool);
+ LIST_INSERT(&th_ctx->pool_lru_head, &item->by_lru);
+ POOL_DEBUG_TRACE_CALLER(pool, item, caller);
+ ph->count++;
+ if (unlikely(pool_debugging & POOL_DBG_INTEGRITY))
+ pool_fill_pattern(ph, item, pool->size);
+ pool_cache_count++;
+ pool_cache_bytes += pool->size;
+
+ if (unlikely(pool_cache_bytes > global.tune.pool_cache_size * 3 / 4)) {
+ if (ph->count >= 16 + pool_cache_count / 8 + CONFIG_HAP_POOL_CLUSTER_SIZE)
+ pool_evict_from_local_cache(pool, 0);
+ if (pool_cache_bytes > global.tune.pool_cache_size)
+ pool_evict_from_local_caches();
+ }
+}
+
+/* Tries to refill the local cache <pch> from the shared one for pool <pool>.
+ * This is only used when pools are in use and shared pools are enabled. No
+ * malloc() is attempted, and poisonning is never performed. The purpose is to
+ * get the fastest possible refilling so that the caller can easily check if
+ * the cache has enough objects for its use. Must not be used when pools are
+ * disabled.
+ */
+void pool_refill_local_from_shared(struct pool_head *pool, struct pool_cache_head *pch)
+{
+ struct pool_cache_item *item;
+ struct pool_item *ret, *down;
+ uint bucket;
+ uint count;
+
+ BUG_ON(pool_debugging & POOL_DBG_NO_CACHE);
+
+ /* we'll need to reference the first element to figure the next one. We
+ * must temporarily lock it so that nobody allocates then releases it,
+ * or the dereference could fail. In order to limit the locking,
+ * threads start from a bucket that depends on their ID.
+ */
+
+ bucket = pool_tbucket();
+ ret = _HA_ATOMIC_LOAD(&pool->buckets[bucket].free_list);
+ count = 0;
+ do {
+ /* look for an apparently non-busy entry. If we hit a busy pool
+ * we retry with another random bucket. And if we encounter a
+ * NULL, we retry once with another random bucket. This is in
+ * order to prevent object accumulation in other buckets.
+ */
+ while (unlikely(ret == POOL_BUSY || (ret == NULL && count++ < 1))) {
+ bucket = statistical_prng() % CONFIG_HAP_POOL_BUCKETS;
+ ret = _HA_ATOMIC_LOAD(&pool->buckets[bucket].free_list);
+ }
+ if (ret == NULL)
+ return;
+ } while (unlikely((ret = _HA_ATOMIC_XCHG(&pool->buckets[bucket].free_list, POOL_BUSY)) == POOL_BUSY));
+
+ if (unlikely(ret == NULL)) {
+ HA_ATOMIC_STORE(&pool->buckets[bucket].free_list, NULL);
+ return;
+ }
+
+ /* this releases the lock */
+ HA_ATOMIC_STORE(&pool->buckets[bucket].free_list, ret->next);
+
+ /* now store the retrieved object(s) into the local cache. Note that
+ * they don't all have the same hash and that it doesn't necessarily
+ * match the one from the pool.
+ */
+ count = 0;
+ for (; ret; ret = down) {
+ down = ret->down;
+ item = (struct pool_cache_item *)ret;
+ POOL_DEBUG_TRACE_CALLER(pool, item, NULL);
+ LIST_INSERT(&pch->list, &item->by_pool);
+ LIST_INSERT(&th_ctx->pool_lru_head, &item->by_lru);
+ _HA_ATOMIC_INC(&pool->buckets[pool_pbucket(item)].used);
+ count++;
+ if (unlikely(pool_debugging & POOL_DBG_INTEGRITY))
+ pool_fill_pattern(pch, item, pool->size);
+
+ }
+ pch->count += count;
+ pool_cache_count += count;
+ pool_cache_bytes += count * pool->size;
+}
+
+/* Adds pool item cluster <item> to the shared cache, which contains <count>
+ * elements. The caller is advised to first check using pool_releasable() if
+ * it's wise to add this series of objects there. Both the pool and the item's
+ * head must be valid.
+ */
+void pool_put_to_shared_cache(struct pool_head *pool, struct pool_item *item)
+{
+ struct pool_item *free_list;
+ uint bucket = pool_pbucket(item);
+
+ /* we prefer to put the item into the entry that corresponds to its own
+ * hash so that on return it remains in the right place, but that's not
+ * mandatory.
+ */
+ free_list = _HA_ATOMIC_LOAD(&pool->buckets[bucket].free_list);
+ do {
+ /* look for an apparently non-busy entry */
+ while (unlikely(free_list == POOL_BUSY)) {
+ bucket = (bucket + 1) % CONFIG_HAP_POOL_BUCKETS;
+ free_list = _HA_ATOMIC_LOAD(&pool->buckets[bucket].free_list);
+ }
+ _HA_ATOMIC_STORE(&item->next, free_list);
+ __ha_barrier_atomic_store();
+ } while (!_HA_ATOMIC_CAS(&pool->buckets[bucket].free_list, &free_list, item));
+ __ha_barrier_atomic_store();
+}
+
+/*
+ * This function frees whatever can be freed in pool <pool>.
+ */
+void pool_flush(struct pool_head *pool)
+{
+ struct pool_item *next, *temp, *down;
+ uint bucket;
+
+ if (!pool || (pool_debugging & (POOL_DBG_NO_CACHE|POOL_DBG_NO_GLOBAL)))
+ return;
+
+ /* The loop below atomically detaches the head of the free list and
+ * replaces it with a NULL. Then the list can be released.
+ */
+ for (bucket = 0; bucket < CONFIG_HAP_POOL_BUCKETS; bucket++) {
+ next = pool->buckets[bucket].free_list;
+ while (1) {
+ while (unlikely(next == POOL_BUSY))
+ next = (void*)pl_wait_new_long((ulong*)&pool->buckets[bucket].free_list, (ulong)next);
+
+ if (next == NULL)
+ break;
+
+ next = _HA_ATOMIC_XCHG(&pool->buckets[bucket].free_list, POOL_BUSY);
+ if (next != POOL_BUSY) {
+ HA_ATOMIC_STORE(&pool->buckets[bucket].free_list, NULL);
+ break;
+ }
+ }
+
+ while (next) {
+ temp = next;
+ next = temp->next;
+ for (; temp; temp = down) {
+ down = temp->down;
+ _HA_ATOMIC_DEC(&pool->buckets[pool_pbucket(temp)].allocated);
+ pool_put_to_os_nodec(pool, temp);
+ }
+ }
+ }
+ /* here, we should have pool->allocated == pool->used */
+}
+
+/*
+ * This function frees whatever can be freed in all pools, but respecting
+ * the minimum thresholds imposed by owners. It makes sure to be alone to
+ * run by using thread_isolate(). <pool_ctx> is unused.
+ */
+void pool_gc(struct pool_head *pool_ctx)
+{
+ struct pool_head *entry;
+ int isolated = thread_isolated();
+
+ if (!isolated)
+ thread_isolate();
+
+ list_for_each_entry(entry, &pools, list) {
+ struct pool_item *temp, *down;
+ uint allocated = pool_allocated(entry);
+ uint used = pool_used(entry);
+ int bucket = 0;
+
+ while ((int)(allocated - used) > (int)entry->minavail) {
+ /* ok let's find next entry to evict */
+ while (!entry->buckets[bucket].free_list && bucket < CONFIG_HAP_POOL_BUCKETS)
+ bucket++;
+
+ if (bucket >= CONFIG_HAP_POOL_BUCKETS)
+ break;
+
+ temp = entry->buckets[bucket].free_list;
+ entry->buckets[bucket].free_list = temp->next;
+ for (; temp; temp = down) {
+ down = temp->down;
+ allocated--;
+ _HA_ATOMIC_DEC(&entry->buckets[pool_pbucket(temp)].allocated);
+ pool_put_to_os_nodec(entry, temp);
+ }
+ }
+ }
+
+ trim_all_pools();
+
+ if (!isolated)
+ thread_release();
+}
+
+/*
+ * Returns a pointer to type <type> taken from the pool <pool_type> or
+ * dynamically allocated. In the first case, <pool_type> is updated to point to
+ * the next element in the list. <flags> is a binary-OR of POOL_F_* flags.
+ * Prefer using pool_alloc() which does the right thing without flags.
+ */
+void *__pool_alloc(struct pool_head *pool, unsigned int flags)
+{
+ void *p = NULL;
+ void *caller = __builtin_return_address(0);
+
+ if (unlikely(pool_debugging & POOL_DBG_FAIL_ALLOC))
+ if (!(flags & POOL_F_NO_FAIL) && mem_should_fail(pool))
+ return NULL;
+
+ if (likely(!(pool_debugging & POOL_DBG_NO_CACHE)) && !p)
+ p = pool_get_from_cache(pool, caller);
+
+ if (unlikely(!p))
+ p = pool_alloc_nocache(pool, caller);
+
+ if (likely(p)) {
+#ifdef USE_MEMORY_PROFILING
+ if (unlikely(profiling & HA_PROF_MEMORY)) {
+ extern struct memprof_stats memprof_stats[MEMPROF_HASH_BUCKETS + 1];
+ struct memprof_stats *bin;
+
+ bin = memprof_get_bin(__builtin_return_address(0), MEMPROF_METH_P_ALLOC);
+ _HA_ATOMIC_ADD(&bin->alloc_calls, 1);
+ _HA_ATOMIC_ADD(&bin->alloc_tot, pool->size);
+ _HA_ATOMIC_STORE(&bin->info, pool);
+ /* replace the caller with the allocated bin: this way
+ * we'll the pool_free() call will be able to update our
+ * entry. We only do it for non-colliding entries though,
+ * since these ones store the true caller location.
+ */
+ if (bin >= &memprof_stats[0] && bin < &memprof_stats[MEMPROF_HASH_BUCKETS])
+ POOL_DEBUG_TRACE_CALLER(pool, (struct pool_cache_item *)p, bin);
+ }
+#endif
+ if (unlikely(flags & POOL_F_MUST_ZERO))
+ memset(p, 0, pool->size);
+ else if (unlikely(!(flags & POOL_F_NO_POISON) && (pool_debugging & POOL_DBG_POISON)))
+ memset(p, mem_poison_byte, pool->size);
+ }
+ return p;
+}
+
+/*
+ * Puts a memory area back to the corresponding pool. <ptr> be valid. Using
+ * pool_free() is preferred.
+ */
+void __pool_free(struct pool_head *pool, void *ptr)
+{
+ const void *caller = __builtin_return_address(0);
+
+ /* we'll get late corruption if we refill to the wrong pool or double-free */
+ POOL_DEBUG_CHECK_MARK(pool, ptr, caller);
+ POOL_DEBUG_RESET_MARK(pool, ptr);
+
+#ifdef USE_MEMORY_PROFILING
+ if (unlikely(profiling & HA_PROF_MEMORY) && ptr) {
+ extern struct memprof_stats memprof_stats[MEMPROF_HASH_BUCKETS + 1];
+ struct memprof_stats *bin;
+
+ bin = memprof_get_bin(__builtin_return_address(0), MEMPROF_METH_P_FREE);
+ _HA_ATOMIC_ADD(&bin->free_calls, 1);
+ _HA_ATOMIC_ADD(&bin->free_tot, pool->size);
+ _HA_ATOMIC_STORE(&bin->info, pool);
+
+ /* check if the caller is an allocator, and if so, let's update
+ * its free() count.
+ */
+ bin = *(struct memprof_stats**)(((char *)ptr) + pool->alloc_sz - sizeof(void*));
+ if (bin >= &memprof_stats[0] && bin < &memprof_stats[MEMPROF_HASH_BUCKETS]) {
+ _HA_ATOMIC_ADD(&bin->free_calls, 1);
+ _HA_ATOMIC_ADD(&bin->free_tot, pool->size);
+ }
+ }
+#endif
+
+ if (unlikely((pool_debugging & POOL_DBG_NO_CACHE) ||
+ global.tune.pool_cache_size < pool->size)) {
+ pool_free_nocache(pool, ptr);
+ return;
+ }
+
+ pool_put_to_cache(pool, ptr, caller);
+}
+
+/*
+ * This function destroys a pool by freeing it completely, unless it's still
+ * in use. This should be called only under extreme circumstances. It always
+ * returns NULL if the resulting pool is empty, easing the clearing of the old
+ * pointer, otherwise it returns the pool.
+ * .
+ */
+void *pool_destroy(struct pool_head *pool)
+{
+ if (pool) {
+ if (!(pool_debugging & POOL_DBG_NO_CACHE))
+ pool_evict_from_local_cache(pool, 1);
+
+ pool_flush(pool);
+ if (pool_used(pool))
+ return pool;
+ pool->users--;
+ if (!pool->users) {
+ LIST_DELETE(&pool->list);
+ /* note that if used == 0, the cache is empty */
+ free(pool->base_addr);
+ }
+ }
+ return NULL;
+}
+
+/* This destroys all pools on exit. It is *not* thread safe. */
+void pool_destroy_all()
+{
+ struct pool_head *entry, *back;
+
+ list_for_each_entry_safe(entry, back, &pools, list) {
+ /* there's only one occurrence of each pool in the list,
+ * and we're existing instead of looping on the whole
+ * list just to decrement users, force it to 1 here.
+ */
+ entry->users = 1;
+ pool_destroy(entry);
+ }
+}
+
+/* carefully inspects an item upon fatal error and emit diagnostics */
+void pool_inspect_item(const char *msg, struct pool_head *pool, const void *item, const void *caller)
+{
+ const struct pool_head *the_pool = NULL;
+
+ chunk_printf(&trash,
+ "FATAL: pool inconsistency detected in thread %d: %s.\n"
+ " caller: %p (",
+ tid + 1, msg, caller);
+
+ resolve_sym_name(&trash, NULL, caller);
+
+ chunk_appendf(&trash,
+ ")\n"
+ " item: %p\n"
+ " pool: %p ('%s', size %u, real %u, users %u)\n",
+ item, pool, pool->name, pool->size, pool->alloc_sz, pool->users);
+
+ if (pool_debugging & POOL_DBG_TAG) {
+ const void **pool_mark;
+ struct pool_head *ph;
+ const void *tag;
+
+ pool_mark = (const void **)(((char *)item) + pool->size);
+ tag = may_access(pool_mark) ? *pool_mark : NULL;
+ if (tag == pool) {
+ chunk_appendf(&trash, " tag: @%p = %p (%s)\n", pool_mark, tag, pool->name);
+ the_pool = pool;
+ }
+ else {
+ if (!may_access(pool_mark))
+ chunk_appendf(&trash, "Tag not accessible. ");
+ else
+ chunk_appendf(&trash, "Tag does not match (%p). ", tag);
+
+ list_for_each_entry(ph, &pools, list) {
+ pool_mark = (const void **)(((char *)item) + ph->size);
+ if (!may_access(pool_mark))
+ continue;
+ tag = *pool_mark;
+
+ if (tag == ph) {
+ if (!the_pool)
+ chunk_appendf(&trash, "Possible origin pool(s):\n");
+
+ chunk_appendf(&trash, " tag: @%p = %p (%s, size %u, real %u, users %u)\n",
+ pool_mark, tag, ph->name, ph->size, ph->alloc_sz, ph->users);
+ if (!the_pool || the_pool->size < ph->size)
+ the_pool = ph;
+ }
+ }
+
+ if (!the_pool) {
+ const char *start, *end, *p;
+
+ pool_mark = (const void **)(((char *)item) + pool->size);
+ chunk_appendf(&trash,
+ "Tag does not match any other pool.\n"
+ "Contents around address %p+%lu=%p:\n",
+ item, (ulong)((const void*)pool_mark - (const void*)item),
+ pool_mark);
+
+ /* dump in word-sized blocks */
+ start = (const void *)(((uintptr_t)pool_mark - 32) & -sizeof(void*));
+ end = (const void *)(((uintptr_t)pool_mark + 32 + sizeof(void*) - 1) & -sizeof(void*));
+
+ while (start < end) {
+ dump_addr_and_bytes(&trash, " ", start, sizeof(void*));
+ chunk_strcat(&trash, " [");
+ for (p = start; p < start + sizeof(void*); p++) {
+ if (!may_access(p))
+ chunk_strcat(&trash, "*");
+ else if (isprint((unsigned char)*p))
+ chunk_appendf(&trash, "%c", *p);
+ else
+ chunk_strcat(&trash, ".");
+ }
+
+ if (may_access(start))
+ tag = *(const void **)start;
+ else
+ tag = NULL;
+
+ if (tag == pool) {
+ /* the pool can often be there so let's detect it */
+ chunk_appendf(&trash, "] [pool:%s", pool->name);
+ }
+ else if (tag) {
+ /* print pointers that resolve to a symbol */
+ size_t back_data = trash.data;
+ chunk_strcat(&trash, "] [");
+ if (!resolve_sym_name(&trash, NULL, tag))
+ trash.data = back_data;
+ }
+
+ chunk_strcat(&trash, "]\n");
+ start = p;
+ }
+ }
+ }
+ }
+
+ if (pool_debugging & POOL_DBG_CALLER) {
+ struct buffer *trash2 = get_trash_chunk();
+ const struct pool_head *ph;
+ const void **pool_mark;
+ const void *tag, *rec_tag;
+
+ ph = the_pool ? the_pool : pool;
+ pool_mark = (const void **)(((char *)item) + ph->alloc_sz - sizeof(void*));
+ rec_tag = may_access(pool_mark) ? *pool_mark : NULL;
+
+ if (rec_tag && resolve_sym_name(trash2, NULL, rec_tag))
+ chunk_appendf(&trash,
+ "Recorded caller if pool '%s':\n @%p (+%04u) = %p (%s)\n",
+ ph->name, pool_mark, (uint)(ph->alloc_sz - sizeof(void*)),
+ rec_tag, trash2->area);
+
+ if (!the_pool) {
+ /* the pool couldn't be formally verified */
+ chunk_appendf(&trash, "Other possible callers:\n");
+ list_for_each_entry(ph, &pools, list) {
+ if (ph == pool)
+ continue;
+ pool_mark = (const void **)(((char *)item) + ph->alloc_sz - sizeof(void*));
+ if (!may_access(pool_mark))
+ continue;
+ tag = *pool_mark;
+ if (tag == rec_tag)
+ continue;
+
+ /* see if we can resolve something */
+ chunk_printf(trash2, "@%p (+%04u) = %p (", pool_mark, (uint)(ph->alloc_sz - sizeof(void*)), tag);
+ if (resolve_sym_name(trash2, NULL, tag)) {
+ chunk_appendf(trash2, ")");
+ chunk_appendf(&trash,
+ " %s [as pool %s, size %u, real %u, users %u]\n",
+ trash2->area, ph->name, ph->size, ph->alloc_sz, ph->users);
+ }
+ }
+ }
+ }
+
+ chunk_appendf(&trash, "\n");
+ DISGUISE(write(2, trash.area, trash.data));
+}
+
+/* used by qsort in "show pools" to sort by name */
+static int cmp_dump_pools_name(const void *a, const void *b)
+{
+ const struct pool_dump_info *l = (const struct pool_dump_info *)a;
+ const struct pool_dump_info *r = (const struct pool_dump_info *)b;
+
+ return strcmp(l->entry->name, r->entry->name);
+}
+
+/* used by qsort in "show pools" to sort by item size */
+static int cmp_dump_pools_size(const void *a, const void *b)
+{
+ const struct pool_dump_info *l = (const struct pool_dump_info *)a;
+ const struct pool_dump_info *r = (const struct pool_dump_info *)b;
+
+ if (l->entry->size > r->entry->size)
+ return -1;
+ else if (l->entry->size < r->entry->size)
+ return 1;
+ else
+ return 0;
+}
+
+/* used by qsort in "show pools" to sort by usage */
+static int cmp_dump_pools_usage(const void *a, const void *b)
+{
+ const struct pool_dump_info *l = (const struct pool_dump_info *)a;
+ const struct pool_dump_info *r = (const struct pool_dump_info *)b;
+
+ if (l->alloc_bytes > r->alloc_bytes)
+ return -1;
+ else if (l->alloc_bytes < r->alloc_bytes)
+ return 1;
+ else
+ return 0;
+}
+
+/* will not dump more than this number of entries. Anything beyond this will
+ * likely not fit into a regular output buffer anyway.
+ */
+#define POOLS_MAX_DUMPED_ENTRIES 1024
+
+/* This function dumps memory usage information into the trash buffer.
+ * It may sort by a criterion if <by_what> is non-zero, and limit the
+ * number of output lines if <max> is non-zero. It may limit only to
+ * pools whose names start with <pfx> if <pfx> is non-null.
+ */
+void dump_pools_to_trash(int by_what, int max, const char *pfx)
+{
+ struct pool_dump_info pool_info[POOLS_MAX_DUMPED_ENTRIES];
+ struct pool_head *entry;
+ unsigned long long allocated, used;
+ int nbpools, i;
+ unsigned long long cached_bytes = 0;
+ uint cached = 0;
+ uint alloc_items;
+
+ allocated = used = nbpools = 0;
+
+ list_for_each_entry(entry, &pools, list) {
+ if (nbpools >= POOLS_MAX_DUMPED_ENTRIES)
+ break;
+
+ alloc_items = pool_allocated(entry);
+ /* do not dump unused entries when sorting by usage */
+ if (by_what == 3 && !alloc_items)
+ continue;
+
+ /* verify the pool name if a prefix is requested */
+ if (pfx && strncmp(entry->name, pfx, strlen(pfx)) != 0)
+ continue;
+
+ if (!(pool_debugging & POOL_DBG_NO_CACHE)) {
+ for (cached = i = 0; i < global.nbthread; i++)
+ cached += entry->cache[i].count;
+ }
+ pool_info[nbpools].entry = entry;
+ pool_info[nbpools].alloc_items = alloc_items;
+ pool_info[nbpools].alloc_bytes = (ulong)entry->size * alloc_items;
+ pool_info[nbpools].used_items = pool_used(entry);
+ pool_info[nbpools].cached_items = cached;
+ pool_info[nbpools].need_avg = swrate_avg(pool_needed_avg(entry), POOL_AVG_SAMPLES);
+ pool_info[nbpools].failed_items = pool_failed(entry);
+ nbpools++;
+ }
+
+ if (by_what == 1) /* sort by name */
+ qsort(pool_info, nbpools, sizeof(pool_info[0]), cmp_dump_pools_name);
+ else if (by_what == 2) /* sort by item size */
+ qsort(pool_info, nbpools, sizeof(pool_info[0]), cmp_dump_pools_size);
+ else if (by_what == 3) /* sort by total usage */
+ qsort(pool_info, nbpools, sizeof(pool_info[0]), cmp_dump_pools_usage);
+
+ chunk_printf(&trash, "Dumping pools usage");
+ if (!max || max >= POOLS_MAX_DUMPED_ENTRIES)
+ max = POOLS_MAX_DUMPED_ENTRIES;
+ if (nbpools >= max)
+ chunk_appendf(&trash, " (limited to the first %u entries)", max);
+ chunk_appendf(&trash, ". Use SIGQUIT to flush them.\n");
+
+ for (i = 0; i < nbpools && i < max; i++) {
+ chunk_appendf(&trash, " - Pool %s (%lu bytes) : %lu allocated (%lu bytes), %lu used"
+ " (~%lu by thread caches)"
+ ", needed_avg %lu, %lu failures, %u users, @%p%s\n",
+ pool_info[i].entry->name, (ulong)pool_info[i].entry->size,
+ pool_info[i].alloc_items, pool_info[i].alloc_bytes,
+ pool_info[i].used_items, pool_info[i].cached_items,
+ pool_info[i].need_avg, pool_info[i].failed_items,
+ pool_info[i].entry->users, pool_info[i].entry,
+ (pool_info[i].entry->flags & MEM_F_SHARED) ? " [SHARED]" : "");
+
+ cached_bytes += pool_info[i].cached_items * (ulong)pool_info[i].entry->size;
+ allocated += pool_info[i].alloc_items * (ulong)pool_info[i].entry->size;
+ used += pool_info[i].used_items * (ulong)pool_info[i].entry->size;
+ }
+
+ chunk_appendf(&trash, "Total: %d pools, %llu bytes allocated, %llu used"
+ " (~%llu by thread caches)"
+ ".\n",
+ nbpools, allocated, used, cached_bytes
+ );
+}
+
+/* Dump statistics on pools usage. */
+void dump_pools(void)
+{
+ dump_pools_to_trash(0, 0, NULL);
+ qfprintf(stderr, "%s", trash.area);
+}
+
+/* This function returns the total number of failed pool allocations */
+int pool_total_failures()
+{
+ struct pool_head *entry;
+ int failed = 0;
+
+ list_for_each_entry(entry, &pools, list)
+ failed += pool_failed(entry);
+ return failed;
+}
+
+/* This function returns the total amount of memory allocated in pools (in bytes) */
+unsigned long long pool_total_allocated()
+{
+ struct pool_head *entry;
+ unsigned long long allocated = 0;
+
+ list_for_each_entry(entry, &pools, list)
+ allocated += pool_allocated(entry) * (ullong)entry->size;
+ return allocated;
+}
+
+/* This function returns the total amount of memory used in pools (in bytes) */
+unsigned long long pool_total_used()
+{
+ struct pool_head *entry;
+ unsigned long long used = 0;
+
+ list_for_each_entry(entry, &pools, list)
+ used += pool_used(entry) * (ullong)entry->size;
+ return used;
+}
+
+/* This function parses a string made of a set of debugging features as
+ * specified after -dM on the command line, and will set pool_debugging
+ * accordingly. On success it returns a strictly positive value. It may zero
+ * with the first warning in <err>, -1 with a help message in <err>, or -2 with
+ * the first error in <err> return the first error in <err>. <err> is undefined
+ * on success, and will be non-null and locally allocated on help/error/warning.
+ * The caller must free it. Warnings are used to report features that were not
+ * enabled at build time, and errors are used to report unknown features.
+ */
+int pool_parse_debugging(const char *str, char **err)
+{
+ struct ist args;
+ char *end;
+ uint new_dbg;
+ int v;
+
+
+ /* if it's empty or starts with a number, it's the mem poisonning byte */
+ v = strtol(str, &end, 0);
+ if (!*end || *end == ',') {
+ mem_poison_byte = *str ? v : 'P';
+ if (mem_poison_byte >= 0)
+ pool_debugging |= POOL_DBG_POISON;
+ else
+ pool_debugging &= ~POOL_DBG_POISON;
+ str = end;
+ }
+
+ new_dbg = pool_debugging;
+
+ for (args = ist(str); istlen(args); args = istadv(istfind(args, ','), 1)) {
+ struct ist feat = iststop(args, ',');
+
+ if (!istlen(feat))
+ continue;
+
+ if (isteq(feat, ist("help"))) {
+ ha_free(err);
+ memprintf(err,
+ "-dM alone enables memory poisonning with byte 0x50 on allocation. A numeric\n"
+ "value may be appended immediately after -dM to use another value (0 supported).\n"
+ "Then an optional list of comma-delimited keywords may be appended to set or\n"
+ "clear some debugging options ('*' marks the current setting):\n\n"
+ " set clear description\n"
+ " -----------------+-----------------+-----------------------------------------\n");
+
+ for (v = 0; dbg_options[v].flg; v++) {
+ memprintf(err, "%s %c %-15s|%c %-15s| %s\n",
+ *err,
+ (pool_debugging & dbg_options[v].flg) ? '*' : ' ',
+ dbg_options[v].set,
+ (pool_debugging & dbg_options[v].flg) ? ' ' : '*',
+ dbg_options[v].clr,
+ dbg_options[v].hlp);
+ }
+
+ memprintf(err,
+ "%s -----------------+-----------------+-----------------------------------------\n"
+ "Examples:\n"
+ " Disable merging and enable poisonning with byte 'P': -dM0x50,no-merge\n"
+ " Randomly fail allocations: -dMfail\n"
+ " Detect out-of-bound corruptions: -dMno-merge,tag\n"
+ " Detect post-free cache corruptions: -dMno-merge,cold-first,integrity,caller\n"
+ " Detect all cache corruptions: -dMno-merge,cold-first,integrity,tag,caller\n"
+ " Detect UAF (disables cache, very slow): -dMuaf\n"
+ " Detect post-cache UAF: -dMuaf,cache,no-merge,cold-first,integrity,tag,caller\n"
+ " Detect post-free cache corruptions: -dMno-merge,cold-first,integrity,caller\n",
+ *err);
+ return -1;
+ }
+
+ for (v = 0; dbg_options[v].flg; v++) {
+ if (isteq(feat, ist(dbg_options[v].set))) {
+ new_dbg |= dbg_options[v].flg;
+ /* UAF implicitly disables caching, but it's
+ * still possible to forcefully re-enable it.
+ */
+ if (dbg_options[v].flg == POOL_DBG_UAF)
+ new_dbg |= POOL_DBG_NO_CACHE;
+ /* fail should preset the tune.fail-alloc ratio to 1% */
+ if (dbg_options[v].flg == POOL_DBG_FAIL_ALLOC)
+ mem_fail_rate = 1;
+ break;
+ }
+ else if (isteq(feat, ist(dbg_options[v].clr))) {
+ new_dbg &= ~dbg_options[v].flg;
+ /* no-fail should reset the tune.fail-alloc ratio */
+ if (dbg_options[v].flg == POOL_DBG_FAIL_ALLOC)
+ mem_fail_rate = 0;
+ break;
+ }
+ }
+
+ if (!dbg_options[v].flg) {
+ memprintf(err, "unknown pool debugging feature <%.*s>", (int)istlen(feat), istptr(feat));
+ return -2;
+ }
+ }
+
+ pool_debugging = new_dbg;
+ return 1;
+}
+
+/* parse a "show pools" command. It returns 1 on failure, 0 if it starts to dump. */
+static int cli_parse_show_pools(char **args, char *payload, struct appctx *appctx, void *private)
+{
+ struct show_pools_ctx *ctx = applet_reserve_svcctx(appctx, sizeof(*ctx));
+ int arg;
+
+ for (arg = 2; *args[arg]; arg++) {
+ if (strcmp(args[arg], "byname") == 0) {
+ ctx->by_what = 1; // sort output by name
+ }
+ else if (strcmp(args[arg], "bysize") == 0) {
+ ctx->by_what = 2; // sort output by item size
+ }
+ else if (strcmp(args[arg], "byusage") == 0) {
+ ctx->by_what = 3; // sort output by total allocated size
+ }
+ else if (strcmp(args[arg], "match") == 0 && *args[arg+1]) {
+ ctx->prefix = strdup(args[arg+1]); // only pools starting with this
+ arg++;
+ }
+ else if (isdigit((unsigned char)*args[arg])) {
+ ctx->maxcnt = atoi(args[arg]); // number of entries to dump
+ }
+ else
+ return cli_err(appctx, "Expects either 'byname', 'bysize', 'byusage', 'match <pfx>', or a max number of output lines.\n");
+ }
+ return 0;
+}
+
+/* release the "show pools" context */
+static void cli_release_show_pools(struct appctx *appctx)
+{
+ struct show_pools_ctx *ctx = appctx->svcctx;
+
+ ha_free(&ctx->prefix);
+}
+
+/* This function dumps memory usage information onto the stream connector's
+ * read buffer. It returns 0 as long as it does not complete, non-zero upon
+ * completion. No state is used.
+ */
+static int cli_io_handler_dump_pools(struct appctx *appctx)
+{
+ struct show_pools_ctx *ctx = appctx->svcctx;
+
+ dump_pools_to_trash(ctx->by_what, ctx->maxcnt, ctx->prefix);
+ if (applet_putchk(appctx, &trash) == -1)
+ return 0;
+ return 1;
+}
+
+/* callback used to create early pool <name> of size <size> and store the
+ * resulting pointer into <ptr>. If the allocation fails, it quits with after
+ * emitting an error message.
+ */
+void create_pool_callback(struct pool_head **ptr, char *name, unsigned int size)
+{
+ *ptr = create_pool(name, size, MEM_F_SHARED);
+ if (!*ptr) {
+ ha_alert("Failed to allocate pool '%s' of size %u : %s. Aborting.\n",
+ name, size, strerror(errno));
+ exit(1);
+ }
+}
+
+/* Initializes all per-thread arrays on startup */
+static void init_pools()
+{
+ int thr;
+
+ for (thr = 0; thr < MAX_THREADS; thr++) {
+ LIST_INIT(&ha_thread_ctx[thr].pool_lru_head);
+ }
+
+ detect_allocator();
+}
+
+INITCALL0(STG_PREPARE, init_pools);
+
+/* Report in build options if trim is supported */
+static void pools_register_build_options(void)
+{
+ if (!using_default_allocator) {
+ char *ptr = NULL;
+ memprintf(&ptr, "Running with a replaced memory allocator (e.g. via LD_PRELOAD).");
+ hap_register_build_opts(ptr, 1);
+ mark_tainted(TAINTED_REPLACED_MEM_ALLOCATOR);
+ }
+}
+INITCALL0(STG_REGISTER, pools_register_build_options);
+
+/* register cli keywords */
+static struct cli_kw_list cli_kws = {{ },{
+ { { "show", "pools", NULL }, "show pools [by*] [match <pfx>] [nb] : report information about the memory pools usage", cli_parse_show_pools, cli_io_handler_dump_pools, cli_release_show_pools },
+ {{},}
+}};
+
+INITCALL1(STG_REGISTER, cli_register_kw, &cli_kws);
+
+
+/* config parser for global "tune.fail-alloc" */
+static int mem_parse_global_fail_alloc(char **args, int section_type, struct proxy *curpx,
+ const struct proxy *defpx, const char *file, int line,
+ char **err)
+{
+ if (too_many_args(1, args, err, NULL))
+ return -1;
+ mem_fail_rate = atoi(args[1]);
+ if (mem_fail_rate < 0 || mem_fail_rate > 100) {
+ memprintf(err, "'%s' expects a numeric value between 0 and 100.", args[0]);
+ return -1;
+ }
+ return 0;
+}
+
+/* config parser for global "tune.memory.hot-size" */
+static int mem_parse_global_hot_size(char **args, int section_type, struct proxy *curpx,
+ const struct proxy *defpx, const char *file, int line,
+ char **err)
+{
+ long size;
+
+ if (too_many_args(1, args, err, NULL))
+ return -1;
+
+ size = atol(args[1]);
+ if (size <= 0) {
+ memprintf(err, "'%s' expects a strictly positive value.", args[0]);
+ return -1;
+ }
+
+ global.tune.pool_cache_size = size;
+ return 0;
+}
+
+/* config parser for global "no-memory-trimming" */
+static int mem_parse_global_no_mem_trim(char **args, int section_type, struct proxy *curpx,
+ const struct proxy *defpx, const char *file, int line,
+ char **err)
+{
+ if (too_many_args(0, args, err, NULL))
+ return -1;
+ disable_trim = 1;
+ return 0;
+}
+
+/* register global config keywords */
+static struct cfg_kw_list mem_cfg_kws = {ILH, {
+ { CFG_GLOBAL, "tune.fail-alloc", mem_parse_global_fail_alloc },
+ { CFG_GLOBAL, "tune.memory.hot-size", mem_parse_global_hot_size },
+ { CFG_GLOBAL, "no-memory-trimming", mem_parse_global_no_mem_trim },
+ { 0, NULL, NULL }
+}};
+
+INITCALL1(STG_REGISTER, cfg_register_keywords, &mem_cfg_kws);
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/src/proto_quic.c b/src/proto_quic.c
new file mode 100644
index 0000000..899cffe
--- /dev/null
+++ b/src/proto_quic.c
@@ -0,0 +1,799 @@
+/*
+ * AF_INET/AF_INET6 QUIC protocol layer.
+ *
+ * Copyright 2020 Frederic Lecaille <flecaille@haproxy.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <ctype.h>
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <time.h>
+
+#include <sys/param.h>
+#include <sys/socket.h>
+#include <sys/types.h>
+
+#include <netinet/udp.h>
+#include <netinet/in.h>
+
+#include <import/ebtree-t.h>
+
+#include <haproxy/api.h>
+#include <haproxy/arg.h>
+#include <haproxy/cbuf.h>
+#include <haproxy/connection.h>
+#include <haproxy/errors.h>
+#include <haproxy/fd.h>
+#include <haproxy/global.h>
+#include <haproxy/list.h>
+#include <haproxy/listener.h>
+#include <haproxy/log.h>
+#include <haproxy/namespace.h>
+#include <haproxy/port_range.h>
+#include <haproxy/protocol.h>
+#include <haproxy/proto_quic.h>
+#include <haproxy/proto_udp.h>
+#include <haproxy/proxy-t.h>
+#include <haproxy/quic_conn.h>
+#include <haproxy/quic_sock.h>
+#include <haproxy/sock.h>
+#include <haproxy/sock_inet.h>
+#include <haproxy/task.h>
+#include <haproxy/tools.h>
+
+/* per-thread quic datagram handlers */
+struct quic_dghdlr *quic_dghdlrs;
+struct eb_root *quic_cid_tree;
+
+/* global CID trees */
+#define QUIC_CID_TREES_CNT 256
+struct quic_cid_tree *quic_cid_trees;
+
+/* Size of the internal buffer of QUIC RX buffer at the fd level */
+#define QUIC_RX_BUFSZ (1UL << 18)
+
+DECLARE_STATIC_POOL(pool_head_quic_rxbuf, "quic_rxbuf", QUIC_RX_BUFSZ);
+
+static int quic_bind_listener(struct listener *listener, char *errmsg, int errlen);
+static int quic_connect_server(struct connection *conn, int flags);
+static void quic_enable_listener(struct listener *listener);
+static void quic_disable_listener(struct listener *listener);
+static int quic_set_affinity(struct connection *conn, int new_tid);
+
+/* Note: must not be declared <const> as its list will be overwritten */
+struct protocol proto_quic4 = {
+ .name = "quic4",
+
+ /* connection layer */
+ .xprt_type = PROTO_TYPE_STREAM,
+ .listen = quic_bind_listener,
+ .enable = quic_enable_listener,
+ .disable = quic_disable_listener,
+ .add = default_add_listener,
+ .unbind = default_unbind_listener,
+ .suspend = default_suspend_listener,
+ .resume = default_resume_listener,
+ .accept_conn = quic_sock_accept_conn,
+ .get_src = quic_sock_get_src,
+ .get_dst = quic_sock_get_dst,
+ .connect = quic_connect_server,
+ .set_affinity = quic_set_affinity,
+
+ /* binding layer */
+ .rx_suspend = udp_suspend_receiver,
+ .rx_resume = udp_resume_receiver,
+
+ /* address family */
+ .fam = &proto_fam_inet4,
+
+ /* socket layer */
+ .proto_type = PROTO_TYPE_DGRAM,
+ .sock_type = SOCK_DGRAM,
+ .sock_prot = IPPROTO_UDP,
+ .rx_enable = sock_enable,
+ .rx_disable = sock_disable,
+ .rx_unbind = sock_unbind,
+ .rx_listening = quic_sock_accepting_conn,
+ .default_iocb = quic_lstnr_sock_fd_iocb,
+ .receivers = LIST_HEAD_INIT(proto_quic4.receivers),
+ .nb_receivers = 0,
+#ifdef SO_REUSEPORT
+ .flags = PROTO_F_REUSEPORT_SUPPORTED,
+#endif
+};
+
+INITCALL1(STG_REGISTER, protocol_register, &proto_quic4);
+
+/* Note: must not be declared <const> as its list will be overwritten */
+struct protocol proto_quic6 = {
+ .name = "quic6",
+
+ /* connection layer */
+ .xprt_type = PROTO_TYPE_STREAM,
+ .listen = quic_bind_listener,
+ .enable = quic_enable_listener,
+ .disable = quic_disable_listener,
+ .add = default_add_listener,
+ .unbind = default_unbind_listener,
+ .suspend = default_suspend_listener,
+ .resume = default_resume_listener,
+ .accept_conn = quic_sock_accept_conn,
+ .get_src = quic_sock_get_src,
+ .get_dst = quic_sock_get_dst,
+ .connect = quic_connect_server,
+ .set_affinity = quic_set_affinity,
+
+ /* binding layer */
+ .rx_suspend = udp_suspend_receiver,
+ .rx_resume = udp_resume_receiver,
+
+ /* address family */
+ .fam = &proto_fam_inet6,
+
+ /* socket layer */
+ .proto_type = PROTO_TYPE_DGRAM,
+ .sock_type = SOCK_DGRAM,
+ .sock_prot = IPPROTO_UDP,
+ .rx_enable = sock_enable,
+ .rx_disable = sock_disable,
+ .rx_unbind = sock_unbind,
+ .rx_listening = quic_sock_accepting_conn,
+ .default_iocb = quic_lstnr_sock_fd_iocb,
+ .receivers = LIST_HEAD_INIT(proto_quic6.receivers),
+ .nb_receivers = 0,
+#ifdef SO_REUSEPORT
+ .flags = PROTO_F_REUSEPORT_SUPPORTED,
+#endif
+};
+
+INITCALL1(STG_REGISTER, protocol_register, &proto_quic6);
+
+/* Binds ipv4/ipv6 address <local> to socket <fd>, unless <flags> is set, in which
+ * case we try to bind <remote>. <flags> is a 2-bit field consisting of :
+ * - 0 : ignore remote address (may even be a NULL pointer)
+ * - 1 : use provided address
+ * - 2 : use provided port
+ * - 3 : use both
+ *
+ * The function supports multiple foreign binding methods :
+ * - linux_tproxy: we directly bind to the foreign address
+ * The second one can be used as a fallback for the first one.
+ * This function returns 0 when everything's OK, 1 if it could not bind, to the
+ * local address, 2 if it could not bind to the foreign address.
+ */
+int quic_bind_socket(int fd, int flags, struct sockaddr_storage *local, struct sockaddr_storage *remote)
+{
+ struct sockaddr_storage bind_addr;
+ int foreign_ok = 0;
+ int ret;
+ static THREAD_LOCAL int ip_transp_working = 1;
+ static THREAD_LOCAL int ip6_transp_working = 1;
+
+ switch (local->ss_family) {
+ case AF_INET:
+ if (flags && ip_transp_working) {
+ /* This deserves some explanation. Some platforms will support
+ * multiple combinations of certain methods, so we try the
+ * supported ones until one succeeds.
+ */
+ if (sock_inet4_make_foreign(fd))
+ foreign_ok = 1;
+ else
+ ip_transp_working = 0;
+ }
+ break;
+ case AF_INET6:
+ if (flags && ip6_transp_working) {
+ if (sock_inet6_make_foreign(fd))
+ foreign_ok = 1;
+ else
+ ip6_transp_working = 0;
+ }
+ break;
+ }
+
+ if (flags) {
+ memset(&bind_addr, 0, sizeof(bind_addr));
+ bind_addr.ss_family = remote->ss_family;
+ switch (remote->ss_family) {
+ case AF_INET:
+ if (flags & 1)
+ ((struct sockaddr_in *)&bind_addr)->sin_addr = ((struct sockaddr_in *)remote)->sin_addr;
+ if (flags & 2)
+ ((struct sockaddr_in *)&bind_addr)->sin_port = ((struct sockaddr_in *)remote)->sin_port;
+ break;
+ case AF_INET6:
+ if (flags & 1)
+ ((struct sockaddr_in6 *)&bind_addr)->sin6_addr = ((struct sockaddr_in6 *)remote)->sin6_addr;
+ if (flags & 2)
+ ((struct sockaddr_in6 *)&bind_addr)->sin6_port = ((struct sockaddr_in6 *)remote)->sin6_port;
+ break;
+ default:
+ /* we don't want to try to bind to an unknown address family */
+ foreign_ok = 0;
+ }
+ }
+
+ setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &one, sizeof(one));
+ if (foreign_ok) {
+ if (is_inet_addr(&bind_addr)) {
+ ret = bind(fd, (struct sockaddr *)&bind_addr, get_addr_len(&bind_addr));
+ if (ret < 0)
+ return 2;
+ }
+ }
+ else {
+ if (is_inet_addr(local)) {
+ ret = bind(fd, (struct sockaddr *)local, get_addr_len(local));
+ if (ret < 0)
+ return 1;
+ }
+ }
+
+ if (!flags)
+ return 0;
+
+ if (!foreign_ok)
+ /* we could not bind to a foreign address */
+ return 2;
+
+ return 0;
+}
+
+/*
+ * This function initiates a QUIC connection establishment to the target assigned
+ * to connection <conn> using (si->{target,dst}). A source address may be
+ * pointed to by conn->src in case of transparent proxying. Normal source
+ * bind addresses are still determined locally (due to the possible need of a
+ * source port). conn->target may point either to a valid server or to a backend,
+ * depending on conn->target. Only OBJ_TYPE_PROXY and OBJ_TYPE_SERVER are
+ * supported. The <data> parameter is a boolean indicating whether there are data
+ * waiting for being sent or not, in order to adjust data write polling and on
+ * some platforms, the ability to avoid an empty initial ACK. The <flags> argument
+ * is not used.
+ *
+ * Note that a pending send_proxy message accounts for data.
+ *
+ * It can return one of :
+ * - SF_ERR_NONE if everything's OK
+ * - SF_ERR_SRVTO if there are no more servers
+ * - SF_ERR_SRVCL if the connection was refused by the server
+ * - SF_ERR_PRXCOND if the connection has been limited by the proxy (maxconn)
+ * - SF_ERR_RESOURCE if a system resource is lacking (eg: fd limits, ports, ...)
+ * - SF_ERR_INTERNAL for any other purely internal errors
+ * Additionally, in the case of SF_ERR_RESOURCE, an emergency log will be emitted.
+ *
+ * The connection's fd is inserted only when SF_ERR_NONE is returned, otherwise
+ * it's invalid and the caller has nothing to do.
+ */
+
+int quic_connect_server(struct connection *conn, int flags)
+{
+ int fd;
+ struct server *srv;
+ struct proxy *be;
+ struct conn_src *src;
+ struct sockaddr_storage *addr;
+
+ BUG_ON(!conn->dst);
+
+ conn->flags |= CO_FL_WAIT_L4_CONN; /* connection in progress */
+
+ switch (obj_type(conn->target)) {
+ case OBJ_TYPE_PROXY:
+ be = __objt_proxy(conn->target);
+ srv = NULL;
+ break;
+ case OBJ_TYPE_SERVER:
+ srv = __objt_server(conn->target);
+ be = srv->proxy;
+ break;
+ default:
+ conn->flags |= CO_FL_ERROR;
+ return SF_ERR_INTERNAL;
+ }
+
+ fd = conn->handle.fd = sock_create_server_socket(conn);
+
+ if (fd == -1) {
+ qfprintf(stderr, "Cannot get a server socket.\n");
+
+ if (errno == ENFILE) {
+ conn->err_code = CO_ER_SYS_FDLIM;
+ send_log(be, LOG_EMERG,
+ "Proxy %s reached system FD limit (maxsock=%d). Please check system tunables.\n",
+ be->id, global.maxsock);
+ }
+ else if (errno == EMFILE) {
+ conn->err_code = CO_ER_PROC_FDLIM;
+ send_log(be, LOG_EMERG,
+ "Proxy %s reached process FD limit (maxsock=%d). Please check 'ulimit-n' and restart.\n",
+ be->id, global.maxsock);
+ }
+ else if (errno == ENOBUFS || errno == ENOMEM) {
+ conn->err_code = CO_ER_SYS_MEMLIM;
+ send_log(be, LOG_EMERG,
+ "Proxy %s reached system memory limit (maxsock=%d). Please check system tunables.\n",
+ be->id, global.maxsock);
+ }
+ else if (errno == EAFNOSUPPORT || errno == EPROTONOSUPPORT) {
+ conn->err_code = CO_ER_NOPROTO;
+ }
+ else
+ conn->err_code = CO_ER_SOCK_ERR;
+
+ /* this is a resource error */
+ conn->flags |= CO_FL_ERROR;
+ return SF_ERR_RESOURCE;
+ }
+
+ if (fd >= global.maxsock) {
+ /* do not log anything there, it's a normal condition when this option
+ * is used to serialize connections to a server !
+ */
+ ha_alert("socket(): not enough free sockets. Raise -n argument. Giving up.\n");
+ close(fd);
+ conn->err_code = CO_ER_CONF_FDLIM;
+ conn->flags |= CO_FL_ERROR;
+ return SF_ERR_PRXCOND; /* it is a configuration limit */
+ }
+
+ if (fd_set_nonblock(fd) == -1) {
+ qfprintf(stderr,"Cannot set client socket to non blocking mode.\n");
+ close(fd);
+ conn->err_code = CO_ER_SOCK_ERR;
+ conn->flags |= CO_FL_ERROR;
+ return SF_ERR_INTERNAL;
+ }
+
+ if (master == 1 && fd_set_cloexec(fd) == -1) {
+ ha_alert("Cannot set CLOEXEC on client socket.\n");
+ close(fd);
+ conn->err_code = CO_ER_SOCK_ERR;
+ conn->flags |= CO_FL_ERROR;
+ return SF_ERR_INTERNAL;
+ }
+
+ /* allow specific binding :
+ * - server-specific at first
+ * - proxy-specific next
+ */
+ if (srv && srv->conn_src.opts & CO_SRC_BIND)
+ src = &srv->conn_src;
+ else if (be->conn_src.opts & CO_SRC_BIND)
+ src = &be->conn_src;
+ else
+ src = NULL;
+
+ if (src) {
+ int ret, flags = 0;
+
+ if (conn->src && is_inet_addr(conn->src)) {
+ switch (src->opts & CO_SRC_TPROXY_MASK) {
+ case CO_SRC_TPROXY_CLI:
+ conn_set_private(conn);
+ __fallthrough;
+ case CO_SRC_TPROXY_ADDR:
+ flags = 3;
+ break;
+ case CO_SRC_TPROXY_CIP:
+ case CO_SRC_TPROXY_DYN:
+ conn_set_private(conn);
+ flags = 1;
+ break;
+ }
+ }
+
+#ifdef SO_BINDTODEVICE
+ /* Note: this might fail if not CAP_NET_RAW */
+ if (src->iface_name)
+ setsockopt(fd, SOL_SOCKET, SO_BINDTODEVICE, src->iface_name, src->iface_len + 1);
+#endif
+
+ if (src->sport_range) {
+ int attempts = 10; /* should be more than enough to find a spare port */
+ struct sockaddr_storage sa;
+
+ ret = 1;
+ memcpy(&sa, &src->source_addr, sizeof(sa));
+
+ do {
+ /* note: in case of retry, we may have to release a previously
+ * allocated port, hence this loop's construct.
+ */
+ port_range_release_port(fdinfo[fd].port_range, fdinfo[fd].local_port);
+ fdinfo[fd].port_range = NULL;
+
+ if (!attempts)
+ break;
+ attempts--;
+
+ fdinfo[fd].local_port = port_range_alloc_port(src->sport_range);
+ if (!fdinfo[fd].local_port) {
+ conn->err_code = CO_ER_PORT_RANGE;
+ break;
+ }
+
+ fdinfo[fd].port_range = src->sport_range;
+ set_host_port(&sa, fdinfo[fd].local_port);
+
+ ret = quic_bind_socket(fd, flags, &sa, conn->src);
+ if (ret != 0)
+ conn->err_code = CO_ER_CANT_BIND;
+ } while (ret != 0); /* binding NOK */
+ }
+ else {
+#ifdef IP_BIND_ADDRESS_NO_PORT
+ static THREAD_LOCAL int bind_address_no_port = 1;
+ setsockopt(fd, IPPROTO_IP, IP_BIND_ADDRESS_NO_PORT, (const void *) &bind_address_no_port, sizeof(int));
+#endif
+ ret = quic_bind_socket(fd, flags, &src->source_addr, conn->src);
+ if (ret != 0)
+ conn->err_code = CO_ER_CANT_BIND;
+ }
+
+ if (unlikely(ret != 0)) {
+ port_range_release_port(fdinfo[fd].port_range, fdinfo[fd].local_port);
+ fdinfo[fd].port_range = NULL;
+ close(fd);
+
+ if (ret == 1) {
+ ha_alert("Cannot bind to source address before connect() for backend %s. Aborting.\n",
+ be->id);
+ send_log(be, LOG_EMERG,
+ "Cannot bind to source address before connect() for backend %s.\n",
+ be->id);
+ } else {
+ ha_alert("Cannot bind to tproxy source address before connect() for backend %s. Aborting.\n",
+ be->id);
+ send_log(be, LOG_EMERG,
+ "Cannot bind to tproxy source address before connect() for backend %s.\n",
+ be->id);
+ }
+ conn->flags |= CO_FL_ERROR;
+ return SF_ERR_RESOURCE;
+ }
+ }
+
+ if (global.tune.server_sndbuf)
+ setsockopt(fd, SOL_SOCKET, SO_SNDBUF, &global.tune.server_sndbuf, sizeof(global.tune.server_sndbuf));
+
+ if (global.tune.server_rcvbuf)
+ setsockopt(fd, SOL_SOCKET, SO_RCVBUF, &global.tune.server_rcvbuf, sizeof(global.tune.server_rcvbuf));
+
+ addr = (conn->flags & CO_FL_SOCKS4) ? &srv->socks4_addr : conn->dst;
+ if (connect(fd, (const struct sockaddr *)addr, get_addr_len(addr)) == -1) {
+ if (errno == EINPROGRESS || errno == EALREADY) {
+ /* common case, let's wait for connect status */
+ conn->flags |= CO_FL_WAIT_L4_CONN;
+ }
+ else if (errno == EISCONN) {
+ /* should normally not happen but if so, indicates that it's OK */
+ conn->flags &= ~CO_FL_WAIT_L4_CONN;
+ }
+ else if (errno == EAGAIN || errno == EWOULDBLOCK || errno == EADDRINUSE || errno == EADDRNOTAVAIL) {
+ char *msg;
+ if (errno == EAGAIN || errno == EWOULDBLOCK || errno == EADDRNOTAVAIL) {
+ msg = "no free ports";
+ conn->err_code = CO_ER_FREE_PORTS;
+ }
+ else {
+ msg = "local address already in use";
+ conn->err_code = CO_ER_ADDR_INUSE;
+ }
+
+ qfprintf(stderr,"Connect() failed for backend %s: %s.\n", be->id, msg);
+ port_range_release_port(fdinfo[fd].port_range, fdinfo[fd].local_port);
+ fdinfo[fd].port_range = NULL;
+ close(fd);
+ send_log(be, LOG_ERR, "Connect() failed for backend %s: %s.\n", be->id, msg);
+ conn->flags |= CO_FL_ERROR;
+ return SF_ERR_RESOURCE;
+ } else if (errno == ETIMEDOUT) {
+ //qfprintf(stderr,"Connect(): ETIMEDOUT");
+ port_range_release_port(fdinfo[fd].port_range, fdinfo[fd].local_port);
+ fdinfo[fd].port_range = NULL;
+ close(fd);
+ conn->err_code = CO_ER_SOCK_ERR;
+ conn->flags |= CO_FL_ERROR;
+ return SF_ERR_SRVTO;
+ } else {
+ // (errno == ECONNREFUSED || errno == ENETUNREACH || errno == EACCES || errno == EPERM)
+ //qfprintf(stderr,"Connect(): %d", errno);
+ port_range_release_port(fdinfo[fd].port_range, fdinfo[fd].local_port);
+ fdinfo[fd].port_range = NULL;
+ close(fd);
+ conn->err_code = CO_ER_SOCK_ERR;
+ conn->flags |= CO_FL_ERROR;
+ return SF_ERR_SRVCL;
+ }
+ }
+ else {
+ /* connect() == 0, this is great! */
+ conn->flags &= ~CO_FL_WAIT_L4_CONN;
+ }
+
+ conn_ctrl_init(conn); /* registers the FD */
+ HA_ATOMIC_OR(&fdtab[fd].state, FD_LINGER_RISK); /* close hard if needed */
+
+ if (conn->flags & CO_FL_WAIT_L4_CONN) {
+ fd_want_send(fd);
+ fd_cant_send(fd);
+ fd_cant_recv(fd);
+ }
+
+ return SF_ERR_NONE; /* connection is OK */
+}
+
+/* Allocate the RX buffers for <l> listener.
+ * Return 1 if succeeded, 0 if not.
+ */
+static int quic_alloc_rxbufs_listener(struct listener *l)
+{
+ int i;
+ struct quic_receiver_buf *tmp;
+
+ MT_LIST_INIT(&l->rx.rxbuf_list);
+ for (i = 0; i < my_popcountl(l->rx.bind_thread); i++) {
+ struct quic_receiver_buf *rxbuf;
+ char *buf;
+
+ rxbuf = calloc(1, sizeof(*rxbuf));
+ if (!rxbuf)
+ goto err;
+
+ buf = pool_alloc(pool_head_quic_rxbuf);
+ if (!buf) {
+ free(rxbuf);
+ goto err;
+ }
+
+ rxbuf->buf = b_make(buf, QUIC_RX_BUFSZ, 0, 0);
+ LIST_INIT(&rxbuf->dgram_list);
+ MT_LIST_APPEND(&l->rx.rxbuf_list, &rxbuf->rxbuf_el);
+ }
+
+ return 1;
+
+ err:
+ while ((tmp = MT_LIST_POP(&l->rx.rxbuf_list, typeof(tmp), rxbuf_el))) {
+ pool_free(pool_head_quic_rxbuf, tmp->buf.area);
+ free(tmp);
+ }
+ return 0;
+}
+
+/* Check if platform supports the required feature set for quic-conn owned
+ * socket. <l> listener must already be binded; a dummy socket will be opened
+ * on the same address as one of the support test.
+ *
+ * Returns true if platform is deemed compatible else false.
+ */
+static int quic_test_sock_per_conn_support(struct listener *l)
+{
+ const struct receiver *rx = &l->rx;
+ int ret = 1, fdtest;
+
+ /* Check if IP destination address can be retrieved on recvfrom()
+ * operation.
+ */
+#if !defined(IP_PKTINFO) && !defined(IP_RECVDSTADDR)
+ ha_alert("Your platform does not seem to support UDP source address retrieval through IP_PKTINFO or an alternative flag. "
+ "QUIC connections will use listener socket.\n");
+ ret = 0;
+#endif
+
+ /* Check if platform support multiple UDP sockets bind on the same
+ * local address. Create a dummy socket and bind it on the same address
+ * as <l> listener. If bind system call fails, deactivate socket per
+ * connection. All other errors are not taken into account.
+ */
+ if (ret) {
+ fdtest = socket(rx->proto->fam->sock_domain,
+ rx->proto->sock_type, rx->proto->sock_prot);
+ if (fdtest >= 0) {
+ if (setsockopt(fdtest, SOL_SOCKET, SO_REUSEADDR, &one, sizeof(one)) &&
+ bind(fdtest, (struct sockaddr *)&rx->addr, rx->proto->fam->sock_addrlen) < 0) {
+ ha_alert("Your platform does not seem to support multiple UDP sockets binded on the same address. "
+ "QUIC connections will use listener socket.\n");
+ ret = 0;
+ }
+
+ close(fdtest);
+ }
+ }
+
+ return ret;
+}
+
+/* This function tries to bind a QUIC4/6 listener. It may return a warning or
+ * an error message in <errmsg> if the message is at most <errlen> bytes long
+ * (including '\0'). Note that <errmsg> may be NULL if <errlen> is also zero.
+ * The return value is composed from ERR_ABORT, ERR_WARN,
+ * ERR_ALERT, ERR_RETRYABLE and ERR_FATAL. ERR_NONE indicates that everything
+ * was alright and that no message was returned. ERR_RETRYABLE means that an
+ * error occurred but that it may vanish after a retry (eg: port in use), and
+ * ERR_FATAL indicates a non-fixable error. ERR_WARN and ERR_ALERT do not alter
+ * the meaning of the error, but just indicate that a message is present which
+ * should be displayed with the respective level. Last, ERR_ABORT indicates
+ * that it's pointless to try to start other listeners. No error message is
+ * returned if errlen is NULL.
+ */
+static int quic_bind_listener(struct listener *listener, char *errmsg, int errlen)
+{
+ const struct sockaddr_storage addr = listener->rx.addr;
+ int fd, err = ERR_NONE;
+ char *msg = NULL;
+
+ /* ensure we never return garbage */
+ if (errlen)
+ *errmsg = 0;
+
+ if (listener->state != LI_ASSIGNED)
+ return ERR_NONE; /* already bound */
+
+ if (!(listener->rx.flags & RX_F_BOUND)) {
+ msg = "receiving socket not bound";
+ goto udp_return;
+ }
+
+ /* Duplicate quic_mode setting from bind_conf. Useful to overwrite it
+ * at runtime per receiver instance.
+ */
+ listener->rx.quic_mode = listener->bind_conf->quic_mode;
+
+ /* Set IP_PKTINFO to retrieve destination address on recv. */
+ fd = listener->rx.fd;
+ switch (addr.ss_family) {
+ case AF_INET:
+#if defined(IP_PKTINFO)
+ setsockopt(fd, IPPROTO_IP, IP_PKTINFO, &one, sizeof(one));
+#elif defined(IP_RECVDSTADDR)
+ setsockopt(fd, IPPROTO_IP, IP_RECVDSTADDR, &one, sizeof(one));
+#endif /* IP_PKTINFO || IP_RECVDSTADDR */
+ break;
+ case AF_INET6:
+#ifdef IPV6_RECVPKTINFO
+ setsockopt(fd, IPPROTO_IPV6, IPV6_RECVPKTINFO, &one, sizeof(one));
+#endif
+ break;
+ default:
+ break;
+ }
+
+ if (!quic_alloc_rxbufs_listener(listener)) {
+ msg = "could not initialize tx/rx rings";
+ err |= ERR_WARN;
+ goto udp_return;
+ }
+
+ if (global.tune.options & GTUNE_QUIC_SOCK_PER_CONN) {
+ if (!quic_test_sock_per_conn_support(listener))
+ global.tune.options &= ~GTUNE_QUIC_SOCK_PER_CONN;
+ }
+
+ if (global.tune.frontend_rcvbuf)
+ setsockopt(fd, SOL_SOCKET, SO_RCVBUF, &global.tune.frontend_rcvbuf, sizeof(global.tune.frontend_rcvbuf));
+
+ if (global.tune.frontend_sndbuf)
+ setsockopt(fd, SOL_SOCKET, SO_SNDBUF, &global.tune.frontend_sndbuf, sizeof(global.tune.frontend_sndbuf));
+
+ listener_set_state(listener, LI_LISTEN);
+
+ udp_return:
+ if (msg && errlen) {
+ char pn[INET6_ADDRSTRLEN];
+
+ addr_to_str(&listener->rx.addr, pn, sizeof(pn));
+ snprintf(errmsg, errlen, "%s for [%s:%d]", msg, pn, get_host_port(&listener->rx.addr));
+ }
+ return err;
+}
+
+/* Enable receipt of incoming connections for listener <l>. The receiver must
+ * still be valid. Does nothing in early boot (needs fd_updt).
+ */
+static void quic_enable_listener(struct listener *l)
+{
+ /* FIXME: The following statements are incorrect. This
+ * is the responsibility of the QUIC xprt to stop accepting new
+ * connections.
+ */
+ if (fd_updt)
+ fd_want_recv(l->rx.fd);
+}
+
+/* Disable receipt of incoming connections for listener <l>. The receiver must
+ * still be valid. Does nothing in early boot (needs fd_updt).
+ */
+static void quic_disable_listener(struct listener *l)
+{
+ /* FIXME: The following statements are incorrect. This
+ * is the responsibility of the QUIC xprt to start accepting new
+ * connections again.
+ */
+ if (fd_updt)
+ fd_stop_recv(l->rx.fd);
+}
+
+/* change the connection's thread to <new_tid>. For frontend connections, the
+ * target is a listener, and the caller is responsible for guaranteeing that
+ * the listener assigned to the connection is bound to the requested thread.
+ */
+static int quic_set_affinity(struct connection *conn, int new_tid)
+{
+ struct quic_conn *qc = conn->handle.qc;
+ return qc_set_tid_affinity(qc, new_tid, objt_listener(conn->target));
+}
+
+static int quic_alloc_dghdlrs(void)
+{
+ int i;
+
+ quic_dghdlrs = calloc(global.nbthread, sizeof(*quic_dghdlrs));
+ if (!quic_dghdlrs) {
+ ha_alert("Failed to allocate the quic datagram handlers.\n");
+ return 0;
+ }
+
+ for (i = 0; i < global.nbthread; i++) {
+ struct quic_dghdlr *dghdlr = &quic_dghdlrs[i];
+
+ dghdlr->task = tasklet_new();
+ if (!dghdlr->task) {
+ ha_alert("Failed to allocate the quic datagram handler on thread %d.\n", i);
+ return 0;
+ }
+
+ tasklet_set_tid(dghdlr->task, i);
+ dghdlr->task->context = dghdlr;
+ dghdlr->task->process = quic_lstnr_dghdlr;
+
+ MT_LIST_INIT(&dghdlr->dgrams);
+ }
+
+ quic_cid_trees = calloc(QUIC_CID_TREES_CNT, sizeof(*quic_cid_trees));
+ if (!quic_cid_trees) {
+ ha_alert("Failed to allocate global CIDs trees.\n");
+ return 0;
+ }
+
+ for (i = 0; i < QUIC_CID_TREES_CNT; ++i) {
+ HA_RWLOCK_INIT(&quic_cid_trees[i].lock);
+ quic_cid_trees[i].root = EB_ROOT_UNIQUE;
+ }
+
+ return 1;
+}
+REGISTER_POST_CHECK(quic_alloc_dghdlrs);
+
+static int quic_deallocate_dghdlrs(void)
+{
+ int i;
+
+ if (quic_dghdlrs) {
+ for (i = 0; i < global.nbthread; ++i)
+ tasklet_free(quic_dghdlrs[i].task);
+ free(quic_dghdlrs);
+ }
+
+ ha_free(&quic_cid_trees);
+
+ return 1;
+}
+REGISTER_POST_DEINIT(quic_deallocate_dghdlrs);
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/src/proto_rhttp.c b/src/proto_rhttp.c
new file mode 100644
index 0000000..452ee32
--- /dev/null
+++ b/src/proto_rhttp.c
@@ -0,0 +1,464 @@
+#include <stdio.h>
+#include <string.h>
+
+#include <haproxy/api.h>
+#include <haproxy/connection.h>
+#include <haproxy/errors.h>
+#include <haproxy/intops.h>
+#include <haproxy/list.h>
+#include <haproxy/listener.h>
+#include <haproxy/log.h>
+#include <haproxy/proto_tcp.h>
+#include <haproxy/protocol.h>
+#include <haproxy/proxy.h>
+#include <haproxy/sample.h>
+#include <haproxy/server.h>
+#include <haproxy/sock.h>
+#include <haproxy/ssl_sock.h>
+#include <haproxy/task.h>
+
+#include <haproxy/proto_rhttp.h>
+
+struct proto_fam proto_fam_rhttp = {
+ .name = "rhttp",
+ .sock_domain = AF_CUST_RHTTP_SRV,
+ .sock_family = AF_INET,
+ .bind = rhttp_bind_receiver,
+};
+
+struct protocol proto_rhttp = {
+ .name = "rev",
+
+ /* connection layer (no outgoing connection) */
+ .listen = rhttp_bind_listener,
+ .enable = rhttp_enable_listener,
+ .disable = rhttp_disable_listener,
+ .add = default_add_listener,
+ .unbind = rhttp_unbind_receiver,
+ .resume = default_resume_listener,
+ .accept_conn = rhttp_accept_conn,
+ .set_affinity = rhttp_set_affinity,
+
+ /* address family */
+ .fam = &proto_fam_rhttp,
+
+ /* socket layer */
+ .proto_type = PROTO_TYPE_STREAM,
+ .sock_type = SOCK_STREAM,
+ .sock_prot = IPPROTO_TCP,
+ .rx_listening = rhttp_accepting_conn,
+ .receivers = LIST_HEAD_INIT(proto_rhttp.receivers),
+};
+
+static struct connection *new_reverse_conn(struct listener *l, struct server *srv)
+{
+ struct connection *conn = conn_new(srv);
+ struct sockaddr_storage *bind_addr = NULL;
+ if (!conn)
+ goto err;
+
+ HA_ATOMIC_INC(&th_ctx->nb_rhttp_conns);
+
+ conn_set_reverse(conn, &l->obj_type);
+
+ if (alloc_bind_address(&bind_addr, srv, srv->proxy, NULL) != SRV_STATUS_OK)
+ goto err;
+ conn->src = bind_addr;
+
+ sockaddr_alloc(&conn->dst, 0, 0);
+ if (!conn->dst)
+ goto err;
+ *conn->dst = srv->addr;
+ set_host_port(conn->dst, srv->svc_port);
+
+ if (conn_prepare(conn, protocol_lookup(conn->dst->ss_family, PROTO_TYPE_STREAM, 0), srv->xprt))
+ goto err;
+
+ if (conn->ctrl->connect(conn, 0) != SF_ERR_NONE)
+ goto err;
+
+#ifdef USE_OPENSSL
+ if (srv->ssl_ctx.sni) {
+ struct sample *sni_smp = NULL;
+ /* TODO remove NULL session which can cause crash depending on the SNI sample expr used. */
+ sni_smp = sample_fetch_as_type(srv->proxy, NULL, NULL,
+ SMP_OPT_DIR_REQ | SMP_OPT_FINAL,
+ srv->ssl_ctx.sni, SMP_T_STR);
+ if (smp_make_safe(sni_smp))
+ ssl_sock_set_servername(conn, sni_smp->data.u.str.area);
+ }
+#endif /* USE_OPENSSL */
+
+ if (conn_xprt_start(conn) < 0)
+ goto err;
+
+ if (!srv->use_ssl ||
+ (!srv->ssl_ctx.alpn_str && !srv->ssl_ctx.npn_str) ||
+ srv->mux_proto) {
+ if (conn_install_mux_be(conn, NULL, NULL, NULL) < 0)
+ goto err;
+ }
+
+ /* Not expected here. */
+ BUG_ON((conn->flags & CO_FL_HANDSHAKE));
+ return conn;
+
+ err:
+ if (conn) {
+ conn_stop_tracking(conn);
+ conn_xprt_shutw(conn);
+ conn_xprt_close(conn);
+ conn_sock_shutw(conn, 0);
+ conn_ctrl_close(conn);
+
+ if (conn->destroy_cb)
+ conn->destroy_cb(conn);
+
+ /* Mark connection as non-reversable. This prevents conn_free()
+ * to reschedule rhttp task on freeing a preconnect connection.
+ */
+ conn->reverse.target = NULL;
+ conn_free(conn);
+ }
+
+ return NULL;
+}
+
+/* Report that a connection used for preconnect on listener <l> is freed before
+ * reversal is completed. This is used to cleanup any reference to the
+ * connection and rearm a new preconnect attempt.
+ */
+void rhttp_notify_preconn_err(struct listener *l)
+{
+ /* Receiver must reference a reverse connection as pending. */
+ BUG_ON(!l->rx.rhttp.pend_conn);
+
+ /* Remove reference to the freed connection. */
+ l->rx.rhttp.pend_conn = NULL;
+
+ if (l->rx.rhttp.state != LI_PRECONN_ST_ERR) {
+ send_log(l->bind_conf->frontend, LOG_ERR,
+ "preconnect %s::%s: Error encountered.\n",
+ l->bind_conf->frontend->id, l->bind_conf->rhttp_srvname);
+ l->rx.rhttp.state = LI_PRECONN_ST_ERR;
+ }
+
+ /* Rearm a new preconnect attempt. */
+ l->rx.rhttp.task->expire = MS_TO_TICKS(now_ms + 1000);
+ task_queue(l->rx.rhttp.task);
+}
+
+/* Lookup over listener <l> threads for their current count of active reverse
+ * HTTP connections. Returns the less loaded thread ID.
+ */
+static unsigned int select_thread(struct listener *l)
+{
+ unsigned long mask = l->rx.bind_thread & _HA_ATOMIC_LOAD(&tg->threads_enabled);
+ unsigned int load_min = HA_ATOMIC_LOAD(&th_ctx->nb_rhttp_conns);
+ unsigned int load_thr;
+ unsigned int ret = tid;
+ int i;
+
+ /* Returns current tid if listener runs on one thread only. */
+ if (!atleast2(mask))
+ goto end;
+
+ /* Loop over all threads and return the less loaded one. This needs to
+ * be just an approximation so it's not important if the selected
+ * thread load has varied since its selection.
+ */
+
+ for (i = tg->base; mask; mask >>= 1, i++) {
+ if (!(mask & 0x1))
+ continue;
+
+ load_thr = HA_ATOMIC_LOAD(&ha_thread_ctx[i].nb_rhttp_conns);
+ if (load_min > load_thr) {
+ ret = i;
+ load_min = load_thr;
+ }
+ }
+
+ end:
+ return ret;
+}
+
+/* Detach <task> from its thread and assign it to <new_tid> thread. The task is
+ * queued to be woken up on the new thread.
+ */
+static void task_migrate(struct task *task, uint new_tid)
+{
+ task_unlink_wq(task);
+ task->expire = TICK_ETERNITY;
+ task_set_thread(task, new_tid);
+ task_wakeup(task, TASK_WOKEN_MSG);
+}
+
+struct task *rhttp_process(struct task *task, void *ctx, unsigned int state)
+{
+ struct listener *l = ctx;
+ struct connection *conn = l->rx.rhttp.pend_conn;
+
+ if (conn) {
+ /* Either connection is on error ot the connect timeout fired. */
+ if (conn->flags & CO_FL_ERROR || tick_is_expired(task->expire, now_ms)) {
+ /* If mux already instantiated, let it release the
+ * connection along with its context. Else do cleanup
+ * directly.
+ */
+ if (conn->mux && conn->mux->destroy) {
+ conn->mux->destroy(conn->ctx);
+ }
+ else {
+ conn_stop_tracking(conn);
+ conn_xprt_shutw(conn);
+ conn_xprt_close(conn);
+ conn_sock_shutw(conn, 0);
+ conn_ctrl_close(conn);
+
+ if (conn->destroy_cb)
+ conn->destroy_cb(conn);
+ conn_free(conn);
+ }
+
+ /* conn_free() must report preconnect failure using rhttp_notify_preconn_err(). */
+ BUG_ON(l->rx.rhttp.pend_conn);
+
+ l->rx.rhttp.task->expire = TICKS_TO_MS(now_ms);
+ }
+ else {
+ /* Spurious receiver task woken up despite pend_conn not ready/on error. */
+ BUG_ON(!(conn->flags & CO_FL_ACT_REVERSING));
+
+ /* A connection is ready to be accepted. */
+ listener_accept(l);
+ l->rx.rhttp.task->expire = TICK_ETERNITY;
+ }
+ }
+ else {
+ struct server *srv = l->rx.rhttp.srv;
+
+ if ((state & TASK_WOKEN_ANY) != TASK_WOKEN_MSG) {
+ unsigned int new_tid = select_thread(l);
+ if (new_tid != tid) {
+ task_migrate(l->rx.rhttp.task, new_tid);
+ return task;
+ }
+ }
+
+ /* No pending reverse connection, prepare a new one. Store it in the
+ * listener and return NULL. Connection will be returned later after
+ * reversal is completed.
+ */
+ conn = new_reverse_conn(l, srv);
+ l->rx.rhttp.pend_conn = conn;
+
+ /* On success task will be woken up by H2 mux after reversal. */
+ l->rx.rhttp.task->expire = conn ?
+ tick_add_ifset(now_ms, srv->proxy->timeout.connect) :
+ MS_TO_TICKS(now_ms + 1000);
+ }
+
+ return task;
+}
+
+int rhttp_bind_receiver(struct receiver *rx, char **errmsg)
+{
+ rx->flags |= RX_F_BOUND;
+ return ERR_NONE;
+}
+
+int rhttp_bind_listener(struct listener *listener, char *errmsg, int errlen)
+{
+ struct task *task;
+ struct proxy *be;
+ struct server *srv;
+ struct ist be_name, sv_name;
+ char *name = NULL;
+
+ unsigned long mask;
+ uint task_tid;
+
+ if (listener->state != LI_ASSIGNED)
+ return ERR_NONE; /* already bound */
+
+ /* Retrieve the first thread usable for this listener. */
+ mask = listener->rx.bind_thread & _HA_ATOMIC_LOAD(&tg->threads_enabled);
+ task_tid = my_ffsl(mask) + ha_tgroup_info[listener->rx.bind_tgroup].base;
+ if (!(task = task_new_on(task_tid))) {
+ snprintf(errmsg, errlen, "Out of memory.");
+ goto err;
+ }
+ task->process = rhttp_process;
+ task->context = listener;
+ listener->rx.rhttp.task = task;
+ listener->rx.rhttp.state = LI_PRECONN_ST_STOP;
+
+ /* Set maxconn which is defined via the special kw nbconn for reverse
+ * connect. Use a default value of 1 if not set. This guarantees that
+ * listener will be automatically re-enable each time it fell back below
+ * it due to a connection error.
+ */
+ listener->bind_conf->maxconn = listener->bind_conf->rhttp_nbconn;
+ if (!listener->bind_conf->maxconn)
+ listener->bind_conf->maxconn = 1;
+
+ name = strdup(listener->bind_conf->rhttp_srvname);
+ if (!name) {
+ snprintf(errmsg, errlen, "Out of memory.");
+ goto err;
+ }
+
+ sv_name = ist(name);
+ be_name = istsplit(&sv_name, '/');
+ if (!istlen(sv_name)) {
+ snprintf(errmsg, errlen, "Invalid server name: '%s'.", name);
+ goto err;
+ }
+
+ if (!(be = proxy_be_by_name(ist0(be_name)))) {
+ snprintf(errmsg, errlen, "No such backend: '%s'.", name);
+ goto err;
+ }
+ if (!(srv = server_find_by_name(be, ist0(sv_name)))) {
+ snprintf(errmsg, errlen, "No such server: '%s/%s'.", ist0(be_name), ist0(sv_name));
+ goto err;
+ }
+
+ if (srv->flags & SRV_F_RHTTP) {
+ snprintf(errmsg, errlen, "Cannot use reverse HTTP server '%s/%s' as target to a reverse bind.", ist0(be_name), ist0(sv_name));
+ goto err;
+ }
+
+ if (srv_is_transparent(srv)) {
+ snprintf(errmsg, errlen, "Cannot use transparent server '%s/%s' as target to a reverse bind.", ist0(be_name), ist0(sv_name));
+ goto err;
+ }
+
+ /* Check that server uses HTTP/2 either with proto or ALPN. */
+ if ((!srv->mux_proto || !isteqi(srv->mux_proto->token, ist("h2"))) &&
+ (!srv->use_ssl || !isteqi(ist(srv->ssl_ctx.alpn_str), ist("\x02h2")))) {
+ snprintf(errmsg, errlen, "Cannot reverse connect with server '%s/%s' unless HTTP/2 is activated on it with either proto or alpn keyword.", name, ist0(sv_name));
+ goto err;
+ }
+
+ /* Prevent dynamic source address settings. */
+ if (((srv->conn_src.opts & CO_SRC_TPROXY_MASK) &&
+ (srv->conn_src.opts & CO_SRC_TPROXY_MASK) != CO_SRC_TPROXY_ADDR) ||
+ ((srv->proxy->conn_src.opts & CO_SRC_TPROXY_MASK) &&
+ (srv->proxy->conn_src.opts & CO_SRC_TPROXY_MASK) != CO_SRC_TPROXY_ADDR)) {
+ snprintf(errmsg, errlen, "Cannot reverse connect with server '%s/%s' which uses dynamic source address setting.", name, ist0(sv_name));
+ goto err;
+ }
+
+ ha_free(&name);
+
+ listener->rx.rhttp.srv = srv;
+ listener_set_state(listener, LI_LISTEN);
+
+ return ERR_NONE;
+
+ err:
+ ha_free(&name);
+ return ERR_ALERT | ERR_FATAL;
+}
+
+void rhttp_enable_listener(struct listener *l)
+{
+ if (l->rx.rhttp.state < LI_PRECONN_ST_INIT) {
+ send_log(l->bind_conf->frontend, LOG_INFO,
+ "preconnect %s::%s: Initiating.\n",
+ l->bind_conf->frontend->id, l->bind_conf->rhttp_srvname);
+ l->rx.rhttp.state = LI_PRECONN_ST_INIT;
+ }
+
+ task_wakeup(l->rx.rhttp.task, TASK_WOKEN_ANY);
+}
+
+void rhttp_disable_listener(struct listener *l)
+{
+ if (l->rx.rhttp.state < LI_PRECONN_ST_FULL) {
+ send_log(l->bind_conf->frontend, LOG_INFO,
+ "preconnect %s::%s: Running with nbconn %d reached.\n",
+ l->bind_conf->frontend->id, l->bind_conf->rhttp_srvname,
+ l->bind_conf->maxconn);
+ l->rx.rhttp.state = LI_PRECONN_ST_FULL;
+ }
+}
+
+struct connection *rhttp_accept_conn(struct listener *l, int *status)
+{
+ struct connection *conn = l->rx.rhttp.pend_conn;
+
+ if (!conn) {
+ /* Reverse connect listener must have an explicit maxconn set
+ * to ensure it is re-enabled on connection error.
+ */
+ BUG_ON(!l->bind_conf->maxconn);
+
+ /* Instantiate a new conn if maxconn not yet exceeded. */
+ if (l->nbconn <= l->bind_conf->maxconn) {
+ /* Try first if a new thread should be used for the new connection. */
+ unsigned int new_tid = select_thread(l);
+ if (new_tid != tid) {
+ task_migrate(l->rx.rhttp.task, new_tid);
+ *status = CO_AC_DONE;
+ return NULL;
+ }
+
+ /* No need to use a new thread, use the opportunity to alloc the connection right now. */
+ l->rx.rhttp.pend_conn = new_reverse_conn(l, l->rx.rhttp.srv);
+ if (!l->rx.rhttp.pend_conn) {
+ *status = CO_AC_PAUSE;
+ return NULL;
+ }
+ }
+
+ *status = CO_AC_DONE;
+ return NULL;
+ }
+
+ /* listener_accept() must not be called if no pending connection is not yet reversed. */
+ BUG_ON(!(conn->flags & CO_FL_ACT_REVERSING));
+ conn->flags &= ~CO_FL_ACT_REVERSING;
+ conn->flags |= CO_FL_REVERSED;
+ conn->mux->ctl(conn, MUX_CTL_REVERSE_CONN, NULL);
+
+ l->rx.rhttp.pend_conn = NULL;
+ *status = CO_AC_NONE;
+
+ return conn;
+}
+
+void rhttp_unbind_receiver(struct listener *l)
+{
+ l->rx.flags &= ~RX_F_BOUND;
+}
+
+int rhttp_set_affinity(struct connection *conn, int new_tid)
+{
+ /* Explicitely disable connection thread migration on accept. Indeed,
+ * it's unsafe to move a connection with its FD to another thread. Note
+ * that active reverse task thread migration should be sufficient to
+ * ensure repartition of reversed connections accross listener threads.
+ */
+ return -1;
+}
+
+int rhttp_accepting_conn(const struct receiver *rx)
+{
+ return 1;
+}
+
+INITCALL1(STG_REGISTER, protocol_register, &proto_rhttp);
+
+/* perform minimal intializations */
+static void init_rhttp()
+{
+ int i;
+
+ for (i = 0; i < MAX_THREADS; i++)
+ ha_thread_ctx[i].nb_rhttp_conns = 0;
+}
+
+INITCALL0(STG_PREPARE, init_rhttp);
diff --git a/src/proto_sockpair.c b/src/proto_sockpair.c
new file mode 100644
index 0000000..a719063
--- /dev/null
+++ b/src/proto_sockpair.c
@@ -0,0 +1,589 @@
+/*
+ * Socket Pair protocol layer (sockpair)
+ *
+ * Copyright HAProxy Technologies - William Lallemand <wlallemand@haproxy.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <ctype.h>
+#include <errno.h>
+#include <pwd.h>
+#include <grp.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <syslog.h>
+#include <time.h>
+
+#include <sys/socket.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <sys/un.h>
+
+#include <haproxy/api.h>
+#include <haproxy/connection.h>
+#include <haproxy/errors.h>
+#include <haproxy/fd.h>
+#include <haproxy/freq_ctr.h>
+#include <haproxy/global.h>
+#include <haproxy/list.h>
+#include <haproxy/listener.h>
+#include <haproxy/log.h>
+#include <haproxy/protocol.h>
+#include <haproxy/proto_sockpair.h>
+#include <haproxy/sock.h>
+#include <haproxy/tools.h>
+#include <haproxy/version.h>
+
+
+static int sockpair_bind_listener(struct listener *listener, char *errmsg, int errlen);
+static void sockpair_enable_listener(struct listener *listener);
+static void sockpair_disable_listener(struct listener *listener);
+static int sockpair_connect_server(struct connection *conn, int flags);
+static int sockpair_accepting_conn(const struct receiver *rx);
+struct connection *sockpair_accept_conn(struct listener *l, int *status);
+
+struct proto_fam proto_fam_sockpair = {
+ .name = "sockpair",
+ .sock_domain = AF_CUST_SOCKPAIR,
+ .sock_family = AF_UNIX,
+ .sock_addrlen = sizeof(struct sockaddr_un),
+ .l3_addrlen = sizeof(((struct sockaddr_un*)0)->sun_path),
+ .addrcmp = NULL,
+ .bind = sockpair_bind_receiver,
+ .get_src = NULL,
+ .get_dst = NULL,
+};
+
+/* Note: must not be declared <const> as its list will be overwritten */
+struct protocol proto_sockpair = {
+ .name = "sockpair",
+
+ /* connection layer */
+ .xprt_type = PROTO_TYPE_STREAM,
+ .listen = sockpair_bind_listener,
+ .enable = sockpair_enable_listener,
+ .disable = sockpair_disable_listener,
+ .add = default_add_listener,
+ .unbind = default_unbind_listener,
+ .accept_conn = sockpair_accept_conn,
+ .ctrl_init = sock_conn_ctrl_init,
+ .ctrl_close = sock_conn_ctrl_close,
+ .connect = sockpair_connect_server,
+ .drain = sock_drain,
+ .check_events = sock_check_events,
+ .ignore_events = sock_ignore_events,
+
+ /* binding layer */
+ /* Note: suspend/resume not supported */
+
+ /* address family */
+ .fam = &proto_fam_sockpair,
+
+ /* socket layer */
+ .proto_type = PROTO_TYPE_STREAM,
+ .sock_type = SOCK_STREAM,
+ .sock_prot = 0,
+ .rx_enable = sock_enable,
+ .rx_disable = sock_disable,
+ .rx_unbind = sock_unbind,
+ .rx_listening = sockpair_accepting_conn,
+ .default_iocb = sock_accept_iocb,
+ .receivers = LIST_HEAD_INIT(proto_sockpair.receivers),
+ .nb_receivers = 0,
+};
+
+INITCALL1(STG_REGISTER, protocol_register, &proto_sockpair);
+
+/* Enable receipt of incoming connections for listener <l>. The receiver must
+ * still be valid.
+ */
+static void sockpair_enable_listener(struct listener *l)
+{
+ fd_want_recv_safe(l->rx.fd);
+}
+
+/* Disable receipt of incoming connections for listener <l>. The receiver must
+ * still be valid.
+ */
+static void sockpair_disable_listener(struct listener *l)
+{
+ fd_stop_recv(l->rx.fd);
+}
+
+/* Binds receiver <rx>, and assigns rx->iocb and rx->owner as the callback
+ * and context, respectively, with ->bind_thread as the thread mask. Returns an
+ * error code made of ERR_* bits on failure or ERR_NONE on success. On failure,
+ * an error message may be passed into <errmsg>. Note that the binding address
+ * is only an FD to receive the incoming FDs on. Thus by definition there is no
+ * real "bind" operation, this only completes the receiver. Such FDs are not
+ * inherited upon reload.
+ */
+int sockpair_bind_receiver(struct receiver *rx, char **errmsg)
+{
+ int err;
+
+ /* ensure we never return garbage */
+ if (errmsg)
+ *errmsg = 0;
+
+ err = ERR_NONE;
+
+ if (rx->flags & RX_F_BOUND)
+ return ERR_NONE;
+
+ if (rx->flags & RX_F_MUST_DUP) {
+ /* this is a secondary receiver that is an exact copy of a
+ * reference which must already be bound (or has failed).
+ * We'll try to dup() the other one's FD and take it. We
+ * try hard not to reconfigure the socket since it's shared.
+ */
+ BUG_ON(!rx->shard_info);
+ if (!(rx->shard_info->ref->flags & RX_F_BOUND)) {
+ /* it's assumed that the first one has already reported
+ * the error, let's not spam with another one, and do
+ * not set ERR_ALERT.
+ */
+ err |= ERR_RETRYABLE;
+ goto bind_ret_err;
+ }
+ /* taking the other one's FD will result in it being marked
+ * extern and being dup()ed. Let's mark the receiver as
+ * inherited so that it properly bypasses all second-stage
+ * setup and avoids being passed to new processes.
+ */
+ rx->flags |= RX_F_INHERITED;
+ rx->fd = rx->shard_info->ref->fd;
+ }
+
+ if (rx->fd == -1) {
+ err |= ERR_FATAL | ERR_ALERT;
+ memprintf(errmsg, "sockpair may be only used with inherited FDs");
+ goto bind_return;
+ }
+
+ if (rx->fd >= global.maxsock) {
+ err |= ERR_FATAL | ERR_ABORT | ERR_ALERT;
+ memprintf(errmsg, "not enough free sockets (raise '-n' parameter)");
+ goto bind_close_return;
+ }
+
+ if (fd_set_nonblock(rx->fd) == -1) {
+ err |= ERR_FATAL | ERR_ALERT;
+ memprintf(errmsg, "cannot make socket non-blocking");
+ goto bind_close_return;
+ }
+
+ rx->flags |= RX_F_BOUND;
+
+ fd_insert(rx->fd, rx->owner, rx->iocb, rx->bind_tgroup, rx->bind_thread);
+ return err;
+
+ bind_return:
+ if (errmsg && *errmsg)
+ memprintf(errmsg, "%s for [fd %d]", *errmsg, rx->fd);
+
+ bind_ret_err:
+ return err;
+
+ bind_close_return:
+ close(rx->fd);
+ goto bind_return;
+}
+
+/* This function changes the state from ASSIGNED to LISTEN. The socket is NOT
+ * enabled for polling. The return value is composed from ERR_NONE,
+ * ERR_RETRYABLE and ERR_FATAL. It may return a warning or an error message in
+ * <errmsg> if the message is at most <errlen> bytes long (including '\0').
+ * Note that <errmsg> may be NULL if <errlen> is also zero.
+ */
+static int sockpair_bind_listener(struct listener *listener, char *errmsg, int errlen)
+{
+ int err;
+ char *msg = NULL;
+
+ err = ERR_NONE;
+
+ /* ensure we never return garbage */
+ if (errlen)
+ *errmsg = 0;
+
+ if (listener->state != LI_ASSIGNED)
+ return ERR_NONE; /* already bound */
+
+ if (!(listener->rx.flags & RX_F_BOUND)) {
+ msg = "receiving socket not bound";
+ goto err_return;
+ }
+
+ listener_set_state(listener, LI_LISTEN);
+ return err;
+
+ err_return:
+ if (msg && errlen)
+ snprintf(errmsg, errlen, "%s [fd %d]", msg, listener->rx.fd);
+ return err;
+}
+
+/*
+ * Send FD over a unix socket
+ *
+ * <send_fd> is the FD to send
+ * <fd> is the fd of the unix socket to use for the transfer
+ *
+ * The iobuf variable could be use in the future to enhance the protocol.
+ */
+int send_fd_uxst(int fd, int send_fd)
+{
+ char iobuf[2];
+ struct iovec iov;
+ struct msghdr msghdr;
+
+ char cmsgbuf[CMSG_SPACE(sizeof(int))];
+ char buf[CMSG_SPACE(sizeof(int))];
+ struct cmsghdr *cmsg = (void *)buf;
+
+ int *fdptr;
+
+ iov.iov_base = iobuf;
+ iov.iov_len = sizeof(iobuf);
+
+ memset(&msghdr, 0, sizeof(msghdr));
+ msghdr.msg_iov = &iov;
+ msghdr.msg_iovlen = 1;
+
+ /* Now send the fds */
+ msghdr.msg_control = cmsgbuf;
+ msghdr.msg_controllen = CMSG_SPACE(sizeof(int));
+
+ cmsg = CMSG_FIRSTHDR(&msghdr);
+ cmsg->cmsg_len = CMSG_LEN(sizeof(int));
+ cmsg->cmsg_level = SOL_SOCKET;
+ cmsg->cmsg_type = SCM_RIGHTS;
+
+ fdptr = (int *)CMSG_DATA(cmsg);
+ memcpy(fdptr, &send_fd, sizeof(send_fd));
+
+ if (sendmsg(fd, &msghdr, 0) != sizeof(iobuf)) {
+ return -1;
+ }
+
+ return 0;
+}
+
+/*
+ *
+ * This function works like uxst_connect_server but instead of creating a
+ * socket and establishing a connection, it creates a pair of connected
+ * sockets, and send one of them through the destination FD. The destination FD
+ * is stored in conn->dst->sin_addr.s_addr during configuration parsing.
+ *
+ * conn->target may point either to a valid server or to a backend, depending
+ * on conn->target. Only OBJ_TYPE_PROXY and OBJ_TYPE_SERVER are supported. The
+ * <data> parameter is a boolean indicating whether there are data waiting for
+ * being sent or not, in order to adjust data write polling and on some
+ * platforms. The <delack> argument is ignored.
+ *
+ * Note that a pending send_proxy message accounts for data.
+ *
+ * It can return one of :
+ * - SF_ERR_NONE if everything's OK
+ * - SF_ERR_SRVTO if there are no more servers
+ * - SF_ERR_SRVCL if the connection was refused by the server
+ * - SF_ERR_PRXCOND if the connection has been limited by the proxy (maxconn)
+ * - SF_ERR_RESOURCE if a system resource is lacking (eg: fd limits, ports, ...)
+ * - SF_ERR_INTERNAL for any other purely internal errors
+ * Additionally, in the case of SF_ERR_RESOURCE, an emergency log will be emitted.
+ *
+ * The connection's fd is inserted only when SF_ERR_NONE is returned, otherwise
+ * it's invalid and the caller has nothing to do.
+ */
+static int sockpair_connect_server(struct connection *conn, int flags)
+{
+ int sv[2], fd, dst_fd = -1;
+
+ BUG_ON(!conn->dst);
+
+ /* the FD is stored in the sockaddr struct */
+ dst_fd = ((struct sockaddr_in *)conn->dst)->sin_addr.s_addr;
+
+ if (obj_type(conn->target) != OBJ_TYPE_PROXY &&
+ obj_type(conn->target) != OBJ_TYPE_SERVER) {
+ conn->flags |= CO_FL_ERROR;
+ return SF_ERR_INTERNAL;
+ }
+
+ if (socketpair(PF_UNIX, SOCK_STREAM, 0, sv) == -1) {
+ ha_alert("socketpair(): Cannot create socketpair. Giving up.\n");
+ conn->flags |= CO_FL_ERROR;
+ return SF_ERR_RESOURCE;
+ }
+
+ fd = conn->handle.fd = sv[1];
+
+ if (fd >= global.maxsock) {
+ /* do not log anything there, it's a normal condition when this option
+ * is used to serialize connections to a server !
+ */
+ ha_alert("socket(): not enough free sockets. Raise -n argument. Giving up.\n");
+ close(sv[0]);
+ close(sv[1]);
+ conn->err_code = CO_ER_CONF_FDLIM;
+ conn->flags |= CO_FL_ERROR;
+ return SF_ERR_PRXCOND; /* it is a configuration limit */
+ }
+
+ if (fd_set_nonblock(fd) == -1) {
+ qfprintf(stderr,"Cannot set client socket to non blocking mode.\n");
+ close(sv[0]);
+ close(sv[1]);
+ conn->err_code = CO_ER_SOCK_ERR;
+ conn->flags |= CO_FL_ERROR;
+ return SF_ERR_INTERNAL;
+ }
+
+ if (master == 1 && fd_set_cloexec(fd) == -1) {
+ ha_alert("Cannot set CLOEXEC on client socket.\n");
+ close(sv[0]);
+ close(sv[1]);
+ conn->err_code = CO_ER_SOCK_ERR;
+ conn->flags |= CO_FL_ERROR;
+ return SF_ERR_INTERNAL;
+ }
+
+ if (global.tune.server_sndbuf)
+ setsockopt(fd, SOL_SOCKET, SO_SNDBUF, &global.tune.server_sndbuf, sizeof(global.tune.server_sndbuf));
+
+ if (global.tune.server_rcvbuf)
+ setsockopt(fd, SOL_SOCKET, SO_RCVBUF, &global.tune.server_rcvbuf, sizeof(global.tune.server_rcvbuf));
+
+ /* The new socket is sent on the other side, it should be retrieved and
+ * considered as an 'accept' socket on the server side */
+ if (send_fd_uxst(dst_fd, sv[0]) == -1) {
+ ha_alert("socketpair: Cannot transfer the fd %d over sockpair@%d. Giving up.\n", sv[0], dst_fd);
+ close(sv[0]);
+ close(sv[1]);
+ conn->err_code = CO_ER_SOCK_ERR;
+ conn->flags |= CO_FL_ERROR;
+ return SF_ERR_INTERNAL;
+ }
+
+ close(sv[0]); /* we don't need this side anymore */
+
+ conn->flags &= ~CO_FL_WAIT_L4_CONN;
+
+ /* Prepare to send a few handshakes related to the on-wire protocol. */
+ if (conn->send_proxy_ofs)
+ conn->flags |= CO_FL_SEND_PROXY;
+
+ conn_ctrl_init(conn); /* registers the FD */
+ HA_ATOMIC_AND(&fdtab[fd].state, ~FD_LINGER_RISK); /* no need to disable lingering */
+
+ return SF_ERR_NONE; /* connection is OK */
+}
+
+
+/*
+ * Receives a file descriptor transferred from a unix socket.
+ *
+ * Return -1 or a socket fd;
+ *
+ * The iobuf variable could be used in the future to enhance the protocol.
+ */
+int recv_fd_uxst(int sock)
+{
+ struct msghdr msghdr;
+ struct iovec iov;
+ char iobuf[2];
+
+ char cmsgbuf[CMSG_SPACE(sizeof(int))];
+ char buf[CMSG_SPACE(sizeof(int))];
+ struct cmsghdr *cmsg = (void *)buf;
+
+
+ int recv_fd = -1;
+ int ret = -1;
+
+ memset(&msghdr, 0, sizeof(msghdr));
+
+ iov.iov_base = iobuf;
+ iov.iov_len = sizeof(iobuf);
+
+ msghdr.msg_iov = &iov;
+ msghdr.msg_iovlen = 1;
+
+ msghdr.msg_control = cmsgbuf;
+ msghdr.msg_controllen = CMSG_SPACE(sizeof(int));
+
+ iov.iov_len = sizeof(iobuf);
+ iov.iov_base = iobuf;
+
+ while (1) {
+ ret = recvmsg(sock, &msghdr, 0);
+ if (ret == -1 && errno == EINTR)
+ continue;
+ else
+ break;
+ }
+
+ if (ret == -1)
+ return ret;
+
+ cmsg = CMSG_FIRSTHDR(&msghdr);
+ if (cmsg && cmsg->cmsg_level == SOL_SOCKET &&
+ cmsg->cmsg_type == SCM_RIGHTS) {
+ size_t totlen = cmsg->cmsg_len -
+ CMSG_LEN(0);
+ memcpy(&recv_fd, CMSG_DATA(cmsg), totlen);
+ }
+ return recv_fd;
+}
+
+/* Tests if the receiver supports accepting connections. Returns positive on
+ * success, 0 if not possible, negative if the socket is non-recoverable. In
+ * practice zero is never returned since we don't support suspending sockets.
+ * The real test consists in verifying we have a connected SOCK_STREAM of
+ * family AF_UNIX.
+ */
+static int sockpair_accepting_conn(const struct receiver *rx)
+{
+ struct sockaddr sa;
+ socklen_t len;
+ int val;
+
+ len = sizeof(val);
+ if (getsockopt(rx->fd, SOL_SOCKET, SO_TYPE, &val, &len) == -1)
+ return -1;
+
+ if (val != SOCK_STREAM)
+ return -1;
+
+ len = sizeof(sa);
+ if (getsockname(rx->fd, &sa, &len) != 0)
+ return -1;
+
+ if (sa.sa_family != AF_UNIX)
+ return -1;
+
+ len = sizeof(val);
+ if (getsockopt(rx->fd, SOL_SOCKET, SO_ACCEPTCONN, &val, &len) == -1)
+ return -1;
+
+ /* Note: cannot be a listening socket, must be established */
+ if (val)
+ return -1;
+
+ return 1;
+}
+
+/* Accept an incoming connection from listener <l>, and return it, as well as
+ * a CO_AC_* status code into <status> if not null. Null is returned on error.
+ * <l> must be a valid listener with a valid frontend.
+ */
+struct connection *sockpair_accept_conn(struct listener *l, int *status)
+{
+ struct proxy *p = l->bind_conf->frontend;
+ struct connection *conn = NULL;
+ int ret;
+ int cfd;
+
+ if ((cfd = recv_fd_uxst(l->rx.fd)) != -1)
+ fd_set_nonblock(cfd);
+
+ if (likely(cfd != -1)) {
+ /* Perfect, the connection was accepted */
+ conn = conn_new(&l->obj_type);
+ if (!conn)
+ goto fail_conn;
+
+ if (!sockaddr_alloc(&conn->src, NULL, 0))
+ goto fail_addr;
+
+ /* just like with UNIX sockets, only the family is filled */
+ conn->src->ss_family = AF_UNIX;
+ conn->handle.fd = cfd;
+ ret = CO_AC_DONE;
+ goto done;
+ }
+
+ switch (errno) {
+#if defined(EWOULDBLOCK) && defined(EAGAIN) && EWOULDBLOCK != EAGAIN
+ case EWOULDBLOCK:
+#endif
+ case EAGAIN:
+ ret = CO_AC_DONE; /* nothing more to accept */
+ if (fdtab[l->rx.fd].state & (FD_POLL_HUP|FD_POLL_ERR)) {
+ /* the listening socket might have been disabled in a shared
+ * process and we're a collateral victim. We'll just pause for
+ * a while in case it comes back. In the mean time, we need to
+ * clear this sticky flag.
+ */
+ _HA_ATOMIC_AND(&fdtab[l->rx.fd].state, ~(FD_POLL_HUP|FD_POLL_ERR));
+ ret = CO_AC_PAUSE;
+ }
+ fd_cant_recv(l->rx.fd);
+ break;
+
+ case EINVAL:
+ /* might be trying to accept on a shut fd (eg: soft stop) */
+ ret = CO_AC_PAUSE;
+ break;
+
+ case EINTR:
+ case ECONNABORTED:
+ ret = CO_AC_RETRY;
+ break;
+
+ case ENFILE:
+ if (p)
+ send_log(p, LOG_EMERG,
+ "Proxy %s reached system FD limit (maxsock=%d). Please check system tunables.\n",
+ p->id, global.maxsock);
+ ret = CO_AC_PAUSE;
+ break;
+
+ case EMFILE:
+ if (p)
+ send_log(p, LOG_EMERG,
+ "Proxy %s reached process FD limit (maxsock=%d). Please check 'ulimit-n' and restart.\n",
+ p->id, global.maxsock);
+ ret = CO_AC_PAUSE;
+ break;
+
+ case ENOBUFS:
+ case ENOMEM:
+ if (p)
+ send_log(p, LOG_EMERG,
+ "Proxy %s reached system memory limit (maxsock=%d). Please check system tunables.\n",
+ p->id, global.maxsock);
+ ret = CO_AC_PAUSE;
+ break;
+
+ default:
+ /* unexpected result, let's give up and let other tasks run */
+ ret = CO_AC_YIELD;
+ }
+ done:
+ if (status)
+ *status = ret;
+ return conn;
+
+ fail_addr:
+ conn_free(conn);
+ conn = NULL;
+ fail_conn:
+ ret = CO_AC_PAUSE;
+ goto done;
+}
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/src/proto_tcp.c b/src/proto_tcp.c
new file mode 100644
index 0000000..45ce27f
--- /dev/null
+++ b/src/proto_tcp.c
@@ -0,0 +1,834 @@
+/*
+ * AF_INET/AF_INET6 SOCK_STREAM protocol layer (tcp)
+ *
+ * Copyright 2000-2013 Willy Tarreau <w@1wt.eu>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <ctype.h>
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <time.h>
+
+#include <sys/param.h>
+#include <sys/socket.h>
+#include <sys/types.h>
+
+#include <netinet/tcp.h>
+#include <netinet/in.h>
+
+#include <haproxy/api.h>
+#include <haproxy/arg.h>
+#include <haproxy/connection.h>
+#include <haproxy/errors.h>
+#include <haproxy/fd.h>
+#include <haproxy/global.h>
+#include <haproxy/list.h>
+#include <haproxy/listener.h>
+#include <haproxy/log.h>
+#include <haproxy/namespace.h>
+#include <haproxy/port_range.h>
+#include <haproxy/proto_tcp.h>
+#include <haproxy/protocol.h>
+#include <haproxy/proxy-t.h>
+#include <haproxy/sock.h>
+#include <haproxy/sock_inet.h>
+#include <haproxy/tools.h>
+
+
+static int tcp_bind_listener(struct listener *listener, char *errmsg, int errlen);
+static int tcp_suspend_receiver(struct receiver *rx);
+static int tcp_resume_receiver(struct receiver *rx);
+static void tcp_enable_listener(struct listener *listener);
+static void tcp_disable_listener(struct listener *listener);
+
+/* Note: must not be declared <const> as its list will be overwritten */
+struct protocol proto_tcpv4 = {
+ .name = "tcpv4",
+
+ /* connection layer */
+ .xprt_type = PROTO_TYPE_STREAM,
+ .listen = tcp_bind_listener,
+ .enable = tcp_enable_listener,
+ .disable = tcp_disable_listener,
+ .add = default_add_listener,
+ .unbind = default_unbind_listener,
+ .suspend = default_suspend_listener,
+ .resume = default_resume_listener,
+ .accept_conn = sock_accept_conn,
+ .ctrl_init = sock_conn_ctrl_init,
+ .ctrl_close = sock_conn_ctrl_close,
+ .connect = tcp_connect_server,
+ .drain = sock_drain,
+ .check_events = sock_check_events,
+ .ignore_events = sock_ignore_events,
+
+ /* binding layer */
+ .rx_suspend = tcp_suspend_receiver,
+ .rx_resume = tcp_resume_receiver,
+
+ /* address family */
+ .fam = &proto_fam_inet4,
+
+ /* socket layer */
+ .proto_type = PROTO_TYPE_STREAM,
+ .sock_type = SOCK_STREAM,
+ .sock_prot = IPPROTO_TCP,
+ .rx_enable = sock_enable,
+ .rx_disable = sock_disable,
+ .rx_unbind = sock_unbind,
+ .rx_listening = sock_accepting_conn,
+ .default_iocb = sock_accept_iocb,
+ .receivers = LIST_HEAD_INIT(proto_tcpv4.receivers),
+ .nb_receivers = 0,
+#ifdef SO_REUSEPORT
+ .flags = PROTO_F_REUSEPORT_SUPPORTED,
+#endif
+};
+
+INITCALL1(STG_REGISTER, protocol_register, &proto_tcpv4);
+
+/* Note: must not be declared <const> as its list will be overwritten */
+struct protocol proto_tcpv6 = {
+ .name = "tcpv6",
+
+ /* connection layer */
+ .xprt_type = PROTO_TYPE_STREAM,
+ .listen = tcp_bind_listener,
+ .enable = tcp_enable_listener,
+ .disable = tcp_disable_listener,
+ .add = default_add_listener,
+ .unbind = default_unbind_listener,
+ .suspend = default_suspend_listener,
+ .resume = default_resume_listener,
+ .accept_conn = sock_accept_conn,
+ .ctrl_init = sock_conn_ctrl_init,
+ .ctrl_close = sock_conn_ctrl_close,
+ .connect = tcp_connect_server,
+ .drain = sock_drain,
+ .check_events = sock_check_events,
+ .ignore_events = sock_ignore_events,
+
+ /* binding layer */
+ .rx_suspend = tcp_suspend_receiver,
+ .rx_resume = tcp_resume_receiver,
+
+ /* address family */
+ .fam = &proto_fam_inet6,
+
+ /* socket layer */
+ .proto_type = PROTO_TYPE_STREAM,
+ .sock_type = SOCK_STREAM,
+ .sock_prot = IPPROTO_TCP,
+ .rx_enable = sock_enable,
+ .rx_disable = sock_disable,
+ .rx_unbind = sock_unbind,
+ .rx_listening = sock_accepting_conn,
+ .default_iocb = sock_accept_iocb,
+ .receivers = LIST_HEAD_INIT(proto_tcpv6.receivers),
+ .nb_receivers = 0,
+#ifdef SO_REUSEPORT
+ .flags = PROTO_F_REUSEPORT_SUPPORTED,
+#endif
+};
+
+INITCALL1(STG_REGISTER, protocol_register, &proto_tcpv6);
+
+/* Binds ipv4/ipv6 address <local> to socket <fd>, unless <flags> is set, in which
+ * case we try to bind <remote>. <flags> is a 2-bit field consisting of :
+ * - 0 : ignore remote address (may even be a NULL pointer)
+ * - 1 : use provided address
+ * - 2 : use provided port
+ * - 3 : use both
+ *
+ * The function supports multiple foreign binding methods :
+ * - linux_tproxy: we directly bind to the foreign address
+ * The second one can be used as a fallback for the first one.
+ * This function returns 0 when everything's OK, 1 if it could not bind, to the
+ * local address, 2 if it could not bind to the foreign address.
+ */
+int tcp_bind_socket(int fd, int flags, struct sockaddr_storage *local, struct sockaddr_storage *remote)
+{
+ struct sockaddr_storage bind_addr;
+ int foreign_ok = 0;
+ int ret;
+ static THREAD_LOCAL int ip_transp_working = 1;
+ static THREAD_LOCAL int ip6_transp_working = 1;
+
+ switch (local->ss_family) {
+ case AF_INET:
+ if (flags && ip_transp_working) {
+ /* This deserves some explanation. Some platforms will support
+ * multiple combinations of certain methods, so we try the
+ * supported ones until one succeeds.
+ */
+ if (sock_inet4_make_foreign(fd))
+ foreign_ok = 1;
+ else
+ ip_transp_working = 0;
+ }
+ break;
+ case AF_INET6:
+ if (flags && ip6_transp_working) {
+ if (sock_inet6_make_foreign(fd))
+ foreign_ok = 1;
+ else
+ ip6_transp_working = 0;
+ }
+ break;
+ }
+
+ if (flags) {
+ memset(&bind_addr, 0, sizeof(bind_addr));
+ bind_addr.ss_family = remote->ss_family;
+ switch (remote->ss_family) {
+ case AF_INET:
+ if (flags & 1)
+ ((struct sockaddr_in *)&bind_addr)->sin_addr = ((struct sockaddr_in *)remote)->sin_addr;
+ if (flags & 2)
+ ((struct sockaddr_in *)&bind_addr)->sin_port = ((struct sockaddr_in *)remote)->sin_port;
+ break;
+ case AF_INET6:
+ if (flags & 1)
+ ((struct sockaddr_in6 *)&bind_addr)->sin6_addr = ((struct sockaddr_in6 *)remote)->sin6_addr;
+ if (flags & 2)
+ ((struct sockaddr_in6 *)&bind_addr)->sin6_port = ((struct sockaddr_in6 *)remote)->sin6_port;
+ break;
+ default:
+ /* we don't want to try to bind to an unknown address family */
+ foreign_ok = 0;
+ }
+ }
+
+ setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &one, sizeof(one));
+ if (foreign_ok) {
+ if (is_inet_addr(&bind_addr)) {
+ ret = bind(fd, (struct sockaddr *)&bind_addr, get_addr_len(&bind_addr));
+ if (ret < 0)
+ return 2;
+ }
+ }
+ else {
+ if (is_inet_addr(local)) {
+ ret = bind(fd, (struct sockaddr *)local, get_addr_len(local));
+ if (ret < 0)
+ return 1;
+ }
+ }
+
+ if (!flags)
+ return 0;
+
+ if (!foreign_ok)
+ /* we could not bind to a foreign address */
+ return 2;
+
+ return 0;
+}
+
+/*
+ * This function initiates a TCP connection establishment to the target assigned
+ * to connection <conn> using (si->{target,dst}). A source address may be
+ * pointed to by conn->src in case of transparent proxying. Normal source
+ * bind addresses are still determined locally (due to the possible need of a
+ * source port). conn->target may point either to a valid server or to a backend,
+ * depending on conn->target. Only OBJ_TYPE_PROXY and OBJ_TYPE_SERVER are
+ * supported. The <data> parameter is a boolean indicating whether there are data
+ * waiting for being sent or not, in order to adjust data write polling and on
+ * some platforms, the ability to avoid an empty initial ACK. The <flags> argument
+ * allows the caller to force using a delayed ACK when establishing the connection
+ * - 0 = no delayed ACK unless data are advertised and backend has tcp-smart-connect
+ * - CONNECT_DELACK_SMART_CONNECT = delayed ACK if backend has tcp-smart-connect, regardless of data
+ * - CONNECT_DELACK_ALWAYS = delayed ACK regardless of backend options
+ *
+ * Note that a pending send_proxy message accounts for data.
+ *
+ * It can return one of :
+ * - SF_ERR_NONE if everything's OK
+ * - SF_ERR_SRVTO if there are no more servers
+ * - SF_ERR_SRVCL if the connection was refused by the server
+ * - SF_ERR_PRXCOND if the connection has been limited by the proxy (maxconn)
+ * - SF_ERR_RESOURCE if a system resource is lacking (eg: fd limits, ports, ...)
+ * - SF_ERR_INTERNAL for any other purely internal errors
+ * Additionally, in the case of SF_ERR_RESOURCE, an emergency log will be emitted.
+ *
+ * The connection's fd is inserted only when SF_ERR_NONE is returned, otherwise
+ * it's invalid and the caller has nothing to do.
+ */
+
+int tcp_connect_server(struct connection *conn, int flags)
+{
+ int fd;
+ struct server *srv;
+ struct proxy *be;
+ struct conn_src *src;
+ int use_fastopen = 0;
+ struct sockaddr_storage *addr;
+
+ BUG_ON(!conn->dst);
+
+ conn->flags |= CO_FL_WAIT_L4_CONN; /* connection in progress */
+
+ switch (obj_type(conn->target)) {
+ case OBJ_TYPE_PROXY:
+ be = __objt_proxy(conn->target);
+ srv = NULL;
+ break;
+ case OBJ_TYPE_SERVER:
+ srv = __objt_server(conn->target);
+ be = srv->proxy;
+ /* Make sure we check that we have data before activating
+ * TFO, or we could trigger a kernel issue whereby after
+ * a successful connect() == 0, any subsequent connect()
+ * will return EINPROGRESS instead of EISCONN.
+ */
+ use_fastopen = (srv->flags & SRV_F_FASTOPEN) &&
+ ((flags & (CONNECT_CAN_USE_TFO | CONNECT_HAS_DATA)) ==
+ (CONNECT_CAN_USE_TFO | CONNECT_HAS_DATA));
+ break;
+ default:
+ conn->flags |= CO_FL_ERROR;
+ return SF_ERR_INTERNAL;
+ }
+
+ fd = conn->handle.fd = sock_create_server_socket(conn);
+
+ if (fd == -1) {
+ qfprintf(stderr, "Cannot get a server socket.\n");
+
+ if (errno == ENFILE) {
+ conn->err_code = CO_ER_SYS_FDLIM;
+ send_log(be, LOG_EMERG,
+ "Proxy %s reached system FD limit (maxsock=%d). Please check system tunables.\n",
+ be->id, global.maxsock);
+ }
+ else if (errno == EMFILE) {
+ conn->err_code = CO_ER_PROC_FDLIM;
+ send_log(be, LOG_EMERG,
+ "Proxy %s reached process FD limit (maxsock=%d). Please check 'ulimit-n' and restart.\n",
+ be->id, global.maxsock);
+ }
+ else if (errno == ENOBUFS || errno == ENOMEM) {
+ conn->err_code = CO_ER_SYS_MEMLIM;
+ send_log(be, LOG_EMERG,
+ "Proxy %s reached system memory limit (maxsock=%d). Please check system tunables.\n",
+ be->id, global.maxsock);
+ }
+ else if (errno == EAFNOSUPPORT || errno == EPROTONOSUPPORT) {
+ conn->err_code = CO_ER_NOPROTO;
+ }
+ else
+ conn->err_code = CO_ER_SOCK_ERR;
+
+ /* this is a resource error */
+ conn->flags |= CO_FL_ERROR;
+ return SF_ERR_RESOURCE;
+ }
+
+ if (fd >= global.maxsock) {
+ /* do not log anything there, it's a normal condition when this option
+ * is used to serialize connections to a server !
+ */
+ ha_alert("socket(): not enough free sockets. Raise -n argument. Giving up.\n");
+ close(fd);
+ conn->err_code = CO_ER_CONF_FDLIM;
+ conn->flags |= CO_FL_ERROR;
+ return SF_ERR_PRXCOND; /* it is a configuration limit */
+ }
+
+ if (fd_set_nonblock(fd) == -1 ||
+ (setsockopt(fd, IPPROTO_TCP, TCP_NODELAY, &one, sizeof(one)) == -1)) {
+ qfprintf(stderr,"Cannot set client socket to non blocking mode.\n");
+ close(fd);
+ conn->err_code = CO_ER_SOCK_ERR;
+ conn->flags |= CO_FL_ERROR;
+ return SF_ERR_INTERNAL;
+ }
+
+ if (master == 1 && fd_set_cloexec(fd) == -1) {
+ ha_alert("Cannot set CLOEXEC on client socket.\n");
+ close(fd);
+ conn->err_code = CO_ER_SOCK_ERR;
+ conn->flags |= CO_FL_ERROR;
+ return SF_ERR_INTERNAL;
+ }
+
+ if (be->options & PR_O_TCP_SRV_KA) {
+ setsockopt(fd, SOL_SOCKET, SO_KEEPALIVE, &one, sizeof(one));
+
+#ifdef TCP_KEEPCNT
+ if (be->srvtcpka_cnt)
+ setsockopt(fd, IPPROTO_TCP, TCP_KEEPCNT, &be->srvtcpka_cnt, sizeof(be->srvtcpka_cnt));
+#endif
+
+#ifdef TCP_KEEPIDLE
+ if (be->srvtcpka_idle)
+ setsockopt(fd, IPPROTO_TCP, TCP_KEEPIDLE, &be->srvtcpka_idle, sizeof(be->srvtcpka_idle));
+#endif
+
+#ifdef TCP_KEEPINTVL
+ if (be->srvtcpka_intvl)
+ setsockopt(fd, IPPROTO_TCP, TCP_KEEPINTVL, &be->srvtcpka_intvl, sizeof(be->srvtcpka_intvl));
+#endif
+ }
+
+ /* allow specific binding :
+ * - server-specific at first
+ * - proxy-specific next
+ */
+ if (srv && srv->conn_src.opts & CO_SRC_BIND)
+ src = &srv->conn_src;
+ else if (be->conn_src.opts & CO_SRC_BIND)
+ src = &be->conn_src;
+ else
+ src = NULL;
+
+ if (src) {
+ int ret, flags = 0;
+
+ if (conn->src && is_inet_addr(conn->src)) {
+ switch (src->opts & CO_SRC_TPROXY_MASK) {
+ case CO_SRC_TPROXY_CLI:
+ case CO_SRC_TPROXY_ADDR:
+ flags = 3;
+ break;
+ case CO_SRC_TPROXY_CIP:
+ case CO_SRC_TPROXY_DYN:
+ flags = 1;
+ break;
+ }
+ }
+
+#ifdef SO_BINDTODEVICE
+ /* Note: this might fail if not CAP_NET_RAW */
+ if (src->iface_name)
+ setsockopt(fd, SOL_SOCKET, SO_BINDTODEVICE, src->iface_name, src->iface_len + 1);
+#endif
+
+ if (src->sport_range) {
+ int attempts = 10; /* should be more than enough to find a spare port */
+ struct sockaddr_storage sa;
+
+ ret = 1;
+ memcpy(&sa, &src->source_addr, sizeof(sa));
+
+ do {
+ /* note: in case of retry, we may have to release a previously
+ * allocated port, hence this loop's construct.
+ */
+ port_range_release_port(fdinfo[fd].port_range, fdinfo[fd].local_port);
+ fdinfo[fd].port_range = NULL;
+
+ if (!attempts)
+ break;
+ attempts--;
+
+ fdinfo[fd].local_port = port_range_alloc_port(src->sport_range);
+ if (!fdinfo[fd].local_port) {
+ conn->err_code = CO_ER_PORT_RANGE;
+ break;
+ }
+
+ fdinfo[fd].port_range = src->sport_range;
+ set_host_port(&sa, fdinfo[fd].local_port);
+
+ ret = tcp_bind_socket(fd, flags, &sa, conn->src);
+ if (ret != 0)
+ conn->err_code = CO_ER_CANT_BIND;
+ } while (ret != 0); /* binding NOK */
+ }
+ else {
+#ifdef IP_BIND_ADDRESS_NO_PORT
+ static THREAD_LOCAL int bind_address_no_port = 1;
+ setsockopt(fd, IPPROTO_IP, IP_BIND_ADDRESS_NO_PORT, (const void *) &bind_address_no_port, sizeof(int));
+#endif
+ ret = tcp_bind_socket(fd, flags, &src->source_addr, conn->src);
+ if (ret != 0)
+ conn->err_code = CO_ER_CANT_BIND;
+ }
+
+ if (unlikely(ret != 0)) {
+ port_range_release_port(fdinfo[fd].port_range, fdinfo[fd].local_port);
+ fdinfo[fd].port_range = NULL;
+ close(fd);
+
+ if (ret == 1) {
+ ha_alert("Cannot bind to source address before connect() for backend %s. Aborting.\n",
+ be->id);
+ send_log(be, LOG_EMERG,
+ "Cannot bind to source address before connect() for backend %s.\n",
+ be->id);
+ } else {
+ ha_alert("Cannot bind to tproxy source address before connect() for backend %s. Aborting.\n",
+ be->id);
+ send_log(be, LOG_EMERG,
+ "Cannot bind to tproxy source address before connect() for backend %s.\n",
+ be->id);
+ }
+ conn->flags |= CO_FL_ERROR;
+ return SF_ERR_RESOURCE;
+ }
+ }
+
+#if defined(TCP_QUICKACK)
+ /* disabling tcp quick ack now allows the first request to leave the
+ * machine with the first ACK. We only do this if there are pending
+ * data in the buffer.
+ */
+ if (flags & (CONNECT_DELACK_ALWAYS) ||
+ ((flags & CONNECT_DELACK_SMART_CONNECT ||
+ (flags & CONNECT_HAS_DATA) || conn->send_proxy_ofs) &&
+ (be->options2 & PR_O2_SMARTCON)))
+ setsockopt(fd, IPPROTO_TCP, TCP_QUICKACK, &zero, sizeof(zero));
+#endif
+
+#ifdef TCP_USER_TIMEOUT
+ /* there is not much more we can do here when it fails, it's still minor */
+ if (srv && srv->tcp_ut)
+ setsockopt(fd, IPPROTO_TCP, TCP_USER_TIMEOUT, &srv->tcp_ut, sizeof(srv->tcp_ut));
+#endif
+
+ if (use_fastopen) {
+#if defined(TCP_FASTOPEN_CONNECT)
+ setsockopt(fd, IPPROTO_TCP, TCP_FASTOPEN_CONNECT, &one, sizeof(one));
+#endif
+ }
+ if (global.tune.server_sndbuf)
+ setsockopt(fd, SOL_SOCKET, SO_SNDBUF, &global.tune.server_sndbuf, sizeof(global.tune.server_sndbuf));
+
+ if (global.tune.server_rcvbuf)
+ setsockopt(fd, SOL_SOCKET, SO_RCVBUF, &global.tune.server_rcvbuf, sizeof(global.tune.server_rcvbuf));
+
+ addr = (conn->flags & CO_FL_SOCKS4) ? &srv->socks4_addr : conn->dst;
+ if (connect(fd, (const struct sockaddr *)addr, get_addr_len(addr)) == -1) {
+ if (errno == EINPROGRESS || errno == EALREADY) {
+ /* common case, let's wait for connect status */
+ conn->flags |= CO_FL_WAIT_L4_CONN;
+ }
+ else if (errno == EISCONN) {
+ /* should normally not happen but if so, indicates that it's OK */
+ conn->flags &= ~CO_FL_WAIT_L4_CONN;
+ }
+ else if (errno == EAGAIN || errno == EWOULDBLOCK || errno == EADDRINUSE || errno == EADDRNOTAVAIL) {
+ char *msg;
+ if (errno == EAGAIN || errno == EWOULDBLOCK || errno == EADDRNOTAVAIL) {
+ msg = "no free ports";
+ conn->err_code = CO_ER_FREE_PORTS;
+ }
+ else {
+ msg = "local address already in use";
+ conn->err_code = CO_ER_ADDR_INUSE;
+ }
+
+ qfprintf(stderr,"Connect() failed for backend %s: %s.\n", be->id, msg);
+ port_range_release_port(fdinfo[fd].port_range, fdinfo[fd].local_port);
+ fdinfo[fd].port_range = NULL;
+ close(fd);
+ send_log(be, LOG_ERR, "Connect() failed for backend %s: %s.\n", be->id, msg);
+ conn->flags |= CO_FL_ERROR;
+ return SF_ERR_RESOURCE;
+ } else if (errno == ETIMEDOUT) {
+ //qfprintf(stderr,"Connect(): ETIMEDOUT");
+ port_range_release_port(fdinfo[fd].port_range, fdinfo[fd].local_port);
+ fdinfo[fd].port_range = NULL;
+ close(fd);
+ conn->err_code = CO_ER_SOCK_ERR;
+ conn->flags |= CO_FL_ERROR;
+ return SF_ERR_SRVTO;
+ } else {
+ // (errno == ECONNREFUSED || errno == ENETUNREACH || errno == EACCES || errno == EPERM)
+ //qfprintf(stderr,"Connect(): %d", errno);
+ port_range_release_port(fdinfo[fd].port_range, fdinfo[fd].local_port);
+ fdinfo[fd].port_range = NULL;
+ close(fd);
+ conn->err_code = CO_ER_SOCK_ERR;
+ conn->flags |= CO_FL_ERROR;
+ return SF_ERR_SRVCL;
+ }
+ }
+ else {
+ /* connect() == 0, this is great! */
+ conn->flags &= ~CO_FL_WAIT_L4_CONN;
+ }
+
+ conn_ctrl_init(conn); /* registers the FD */
+ HA_ATOMIC_OR(&fdtab[fd].state, FD_LINGER_RISK); /* close hard if needed */
+
+ if (conn->flags & CO_FL_WAIT_L4_CONN) {
+ fd_want_send(fd);
+ fd_cant_send(fd);
+ fd_cant_recv(fd);
+ }
+
+ return SF_ERR_NONE; /* connection is OK */
+}
+
+/* This function tries to bind a TCPv4/v6 listener. It may return a warning or
+ * an error message in <errmsg> if the message is at most <errlen> bytes long
+ * (including '\0'). Note that <errmsg> may be NULL if <errlen> is also zero.
+ * The return value is composed from ERR_ABORT, ERR_WARN,
+ * ERR_ALERT, ERR_RETRYABLE and ERR_FATAL. ERR_NONE indicates that everything
+ * was alright and that no message was returned. ERR_RETRYABLE means that an
+ * error occurred but that it may vanish after a retry (eg: port in use), and
+ * ERR_FATAL indicates a non-fixable error. ERR_WARN and ERR_ALERT do not alter
+ * the meaning of the error, but just indicate that a message is present which
+ * should be displayed with the respective level. Last, ERR_ABORT indicates
+ * that it's pointless to try to start other listeners. No error message is
+ * returned if errlen is NULL.
+ */
+int tcp_bind_listener(struct listener *listener, char *errmsg, int errlen)
+{
+ int fd, err;
+ int ready;
+ struct buffer *msg = alloc_trash_chunk();
+
+ err = ERR_NONE;
+
+ if (!msg) {
+ if (errlen)
+ snprintf(errmsg, errlen, "out of memory");
+ return ERR_ALERT | ERR_FATAL;
+ }
+
+ /* ensure we never return garbage */
+ if (errlen)
+ *errmsg = 0;
+
+ if (listener->state != LI_ASSIGNED)
+ return ERR_NONE; /* already bound */
+
+ if (!(listener->rx.flags & RX_F_BOUND)) {
+ chunk_appendf(msg, "%sreceiving socket not bound", msg->data ? ", " : "");
+ goto tcp_return;
+ }
+
+ if (listener->rx.flags & RX_F_MUST_DUP)
+ goto done;
+
+ fd = listener->rx.fd;
+
+ if (listener->bind_conf->options & BC_O_NOLINGER)
+ setsockopt(fd, SOL_SOCKET, SO_LINGER, &nolinger, sizeof(struct linger));
+ else {
+ struct linger tmplinger;
+ socklen_t len = sizeof(tmplinger);
+ if (getsockopt(fd, SOL_SOCKET, SO_LINGER, &tmplinger, &len) == 0 &&
+ (tmplinger.l_onoff == 1 || tmplinger.l_linger == 0)) {
+ tmplinger.l_onoff = 0;
+ tmplinger.l_linger = 0;
+ setsockopt(fd, SOL_SOCKET, SO_LINGER, &tmplinger,
+ sizeof(tmplinger));
+ }
+ }
+
+#if defined(TCP_MAXSEG)
+ if (listener->bind_conf->maxseg > 0) {
+ if (setsockopt(fd, IPPROTO_TCP, TCP_MAXSEG,
+ &listener->bind_conf->maxseg, sizeof(listener->bind_conf->maxseg)) == -1) {
+ chunk_appendf(msg, "%scannot set MSS to %d", msg->data ? ", " : "", listener->bind_conf->maxseg);
+ err |= ERR_WARN;
+ }
+ } else {
+ /* we may want to try to restore the default MSS if the socket was inherited */
+ int tmpmaxseg = -1;
+ int defaultmss;
+ socklen_t len = sizeof(tmpmaxseg);
+
+ if (listener->rx.addr.ss_family == AF_INET)
+ defaultmss = sock_inet_tcp_maxseg_default;
+ else
+ defaultmss = sock_inet6_tcp_maxseg_default;
+
+ getsockopt(fd, IPPROTO_TCP, TCP_MAXSEG, &tmpmaxseg, &len);
+ if (defaultmss > 0 &&
+ tmpmaxseg != defaultmss &&
+ setsockopt(fd, IPPROTO_TCP, TCP_MAXSEG, &defaultmss, sizeof(defaultmss)) == -1) {
+ chunk_appendf(msg, "%scannot set MSS to %d", msg->data ? ", " : "", defaultmss);
+ err |= ERR_WARN;
+ }
+ }
+#endif
+#if defined(TCP_USER_TIMEOUT)
+ if (listener->bind_conf->tcp_ut) {
+ if (setsockopt(fd, IPPROTO_TCP, TCP_USER_TIMEOUT,
+ &listener->bind_conf->tcp_ut, sizeof(listener->bind_conf->tcp_ut)) == -1) {
+ chunk_appendf(msg, "%scannot set TCP User Timeout", msg->data ? ", " : "");
+ err |= ERR_WARN;
+ }
+ } else
+ setsockopt(fd, IPPROTO_TCP, TCP_USER_TIMEOUT, &zero,
+ sizeof(zero));
+#endif
+#if defined(TCP_DEFER_ACCEPT)
+ if (listener->bind_conf->options & BC_O_DEF_ACCEPT) {
+ /* defer accept by up to one second */
+ int accept_delay = 1;
+ if (setsockopt(fd, IPPROTO_TCP, TCP_DEFER_ACCEPT, &accept_delay, sizeof(accept_delay)) == -1) {
+ chunk_appendf(msg, "%scannot enable DEFER_ACCEPT", msg->data ? ", " : "");
+ err |= ERR_WARN;
+ }
+ } else
+ setsockopt(fd, IPPROTO_TCP, TCP_DEFER_ACCEPT, &zero,
+ sizeof(zero));
+#endif
+#if defined(TCP_FASTOPEN)
+ if (listener->bind_conf->options & BC_O_TCP_FO) {
+ /* TFO needs a queue length, let's use the configured backlog */
+ int qlen = listener_backlog(listener);
+ if (setsockopt(fd, IPPROTO_TCP, TCP_FASTOPEN, &qlen, sizeof(qlen)) == -1) {
+ chunk_appendf(msg, "%scannot enable TCP_FASTOPEN", msg->data ? ", " : "");
+ err |= ERR_WARN;
+ }
+ } else {
+ socklen_t len;
+ int qlen;
+ len = sizeof(qlen);
+ /* Only disable fast open if it was enabled, we don't want
+ * the kernel to create a fast open queue if there's none.
+ */
+ if (getsockopt(fd, IPPROTO_TCP, TCP_FASTOPEN, &qlen, &len) == 0 &&
+ qlen != 0) {
+ if (setsockopt(fd, IPPROTO_TCP, TCP_FASTOPEN, &zero,
+ sizeof(zero)) == -1) {
+ chunk_appendf(msg, "%scannot disable TCP_FASTOPEN", msg->data ? ", " : "");
+ err |= ERR_WARN;
+ }
+ }
+ }
+#endif
+
+ ready = sock_accepting_conn(&listener->rx) > 0;
+
+ if (!ready && /* only listen if not already done by external process */
+ listen(fd, listener_backlog(listener)) == -1) {
+ err |= ERR_RETRYABLE | ERR_ALERT;
+ chunk_appendf(msg, "%scannot listen to socket", msg->data ? ", " : "");
+ goto tcp_close_return;
+ }
+
+#if !defined(TCP_DEFER_ACCEPT) && defined(SO_ACCEPTFILTER)
+ /* the socket needs to listen first */
+ if (listener->bind_conf->options & BC_O_DEF_ACCEPT) {
+ struct accept_filter_arg accept;
+ memset(&accept, 0, sizeof(accept));
+ strlcpy2(accept.af_name, "dataready", sizeof(accept.af_name));
+ if (setsockopt(fd, SOL_SOCKET, SO_ACCEPTFILTER, &accept, sizeof(accept)) == -1) {
+ chunk_appendf(msg, "%scannot enable ACCEPT_FILTER", msg->data ? ", " : "");
+ err |= ERR_WARN;
+ }
+ }
+#endif
+#if defined(TCP_QUICKACK)
+ if (listener->bind_conf->options & BC_O_NOQUICKACK)
+ setsockopt(fd, IPPROTO_TCP, TCP_QUICKACK, &zero, sizeof(zero));
+ else
+ setsockopt(fd, IPPROTO_TCP, TCP_QUICKACK, &one, sizeof(one));
+#endif
+
+ done:
+ /* the socket is ready */
+ listener_set_state(listener, LI_LISTEN);
+ goto tcp_return;
+
+ tcp_close_return:
+ free_trash_chunk(msg);
+ msg = NULL;
+ close(fd);
+ tcp_return:
+ if (msg && errlen && msg->data) {
+ char pn[INET6_ADDRSTRLEN];
+
+ addr_to_str(&listener->rx.addr, pn, sizeof(pn));
+ snprintf(errmsg, errlen, "%s for [%s:%d]", msg->area, pn, get_host_port(&listener->rx.addr));
+ }
+ free_trash_chunk(msg);
+ msg = NULL;
+ return err;
+}
+
+/* Enable receipt of incoming connections for listener <l>. The receiver must
+ * still be valid.
+ */
+static void tcp_enable_listener(struct listener *l)
+{
+ fd_want_recv_safe(l->rx.fd);
+}
+
+/* Disable receipt of incoming connections for listener <l>. The receiver must
+ * still be valid.
+ */
+static void tcp_disable_listener(struct listener *l)
+{
+ fd_stop_recv(l->rx.fd);
+}
+
+/* Suspend a receiver. Returns < 0 in case of failure, 0 if the receiver
+ * was totally stopped, or > 0 if correctly suspended. Note that inherited FDs
+ * are neither suspended nor resumed, we only enable/disable polling on them.
+ */
+static int tcp_suspend_receiver(struct receiver *rx)
+{
+ const struct sockaddr sa = { .sa_family = AF_UNSPEC };
+ int ret;
+
+ /* We never disconnect a shared FD otherwise we'd break it in the
+ * parent process and any possible subsequent worker inheriting it.
+ * Thus we just stop receiving from it.
+ */
+ if (rx->flags & RX_F_INHERITED)
+ goto done;
+
+ if (connect(rx->fd, &sa, sizeof(sa)) < 0)
+ goto check_already_done;
+ done:
+ fd_stop_recv(rx->fd);
+ return 1;
+
+ check_already_done:
+ /* in case one of the shutdown() above fails, it might be because we're
+ * dealing with a socket that is shared with other processes doing the
+ * same. Let's check if it's still accepting connections.
+ */
+ ret = sock_accepting_conn(rx);
+ if (ret <= 0) {
+ /* unrecoverable or paused by another process */
+ fd_stop_recv(rx->fd);
+ return ret == 0;
+ }
+
+ /* still listening, that's not good */
+ return -1;
+}
+
+/* Resume a receiver. Returns < 0 in case of failure, 0 if the receiver
+ * was totally stopped, or > 0 if correctly resumed. Note that inherited FDs
+ * are neither suspended nor resumed, we only enable/disable polling on them.
+ */
+static int tcp_resume_receiver(struct receiver *rx)
+{
+ struct listener *l = LIST_ELEM(rx, struct listener *, rx);
+
+ if (rx->fd < 0)
+ return 0;
+
+ if ((rx->flags & RX_F_INHERITED) || listen(rx->fd, listener_backlog(l)) == 0) {
+ fd_want_recv(l->rx.fd);
+ return 1;
+ }
+ return -1;
+}
+
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/src/proto_udp.c b/src/proto_udp.c
new file mode 100644
index 0000000..9855974
--- /dev/null
+++ b/src/proto_udp.c
@@ -0,0 +1,247 @@
+/*
+ * UDP protocol layer on top of AF_INET/AF_INET6
+ *
+ * Copyright 2019 HAProxy Technologies, Frederic Lecaille <flecaille@haproxy.com>
+ *
+ * Partial merge by Emeric Brun <ebrun@haproxy.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <ctype.h>
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <time.h>
+
+#include <sys/param.h>
+#include <sys/socket.h>
+#include <sys/types.h>
+
+#include <netinet/udp.h>
+#include <netinet/in.h>
+
+#include <haproxy/fd.h>
+#include <haproxy/listener.h>
+#include <haproxy/log.h>
+#include <haproxy/namespace.h>
+#include <haproxy/port_range.h>
+#include <haproxy/protocol.h>
+#include <haproxy/proto_udp.h>
+#include <haproxy/proxy.h>
+#include <haproxy/server.h>
+#include <haproxy/sock.h>
+#include <haproxy/sock_inet.h>
+#include <haproxy/task.h>
+#include <haproxy/tools.h>
+
+static int udp_bind_listener(struct listener *listener, char *errmsg, int errlen);
+static void udp_enable_listener(struct listener *listener);
+static void udp_disable_listener(struct listener *listener);
+
+/* Note: must not be declared <const> as its list will be overwritten */
+struct protocol proto_udp4 = {
+ .name = "udp4",
+
+ /* connection layer */
+ .xprt_type = PROTO_TYPE_DGRAM,
+ .listen = udp_bind_listener,
+ .enable = udp_enable_listener,
+ .disable = udp_disable_listener,
+ .add = default_add_listener,
+ .unbind = default_unbind_listener,
+ .suspend = default_suspend_listener,
+ .resume = default_resume_listener,
+
+ /* binding layer */
+ .rx_suspend = udp_suspend_receiver,
+ .rx_resume = udp_resume_receiver,
+
+ /* address family */
+ .fam = &proto_fam_inet4,
+
+ /* socket layer */
+ .proto_type = PROTO_TYPE_DGRAM,
+ .sock_type = SOCK_DGRAM,
+ .sock_prot = IPPROTO_UDP,
+ .rx_enable = sock_enable,
+ .rx_disable = sock_disable,
+ .rx_unbind = sock_unbind,
+ .receivers = LIST_HEAD_INIT(proto_udp4.receivers),
+ .nb_receivers = 0,
+#ifdef SO_REUSEPORT
+ .flags = PROTO_F_REUSEPORT_SUPPORTED,
+#endif
+};
+
+INITCALL1(STG_REGISTER, protocol_register, &proto_udp4);
+
+/* Note: must not be declared <const> as its list will be overwritten */
+struct protocol proto_udp6 = {
+ .name = "udp6",
+
+ /* connection layer */
+ .xprt_type = PROTO_TYPE_DGRAM,
+ .listen = udp_bind_listener,
+ .enable = udp_enable_listener,
+ .disable = udp_disable_listener,
+ .add = default_add_listener,
+ .unbind = default_unbind_listener,
+ .suspend = default_suspend_listener,
+ .resume = default_resume_listener,
+
+ /* binding layer */
+ .rx_suspend = udp_suspend_receiver,
+ .rx_resume = udp_resume_receiver,
+
+ /* address family */
+ .fam = &proto_fam_inet6,
+
+ /* socket layer */
+ .proto_type = PROTO_TYPE_DGRAM,
+ .sock_type = SOCK_DGRAM,
+ .sock_prot = IPPROTO_UDP,
+ .rx_enable = sock_enable,
+ .rx_disable = sock_disable,
+ .rx_unbind = sock_unbind,
+ .receivers = LIST_HEAD_INIT(proto_udp6.receivers),
+ .nb_receivers = 0,
+#ifdef SO_REUSEPORT
+ .flags = PROTO_F_REUSEPORT_SUPPORTED,
+#endif
+};
+
+INITCALL1(STG_REGISTER, protocol_register, &proto_udp6);
+
+/* This function tries to bind a UDPv4/v6 listener. It may return a warning or
+ * an error message in <errmsg> if the message is at most <errlen> bytes long
+ * (including '\0'). Note that <errmsg> may be NULL if <errlen> is also zero.
+ * The return value is composed from ERR_ABORT, ERR_WARN,
+ * ERR_ALERT, ERR_RETRYABLE and ERR_FATAL. ERR_NONE indicates that everything
+ * was alright and that no message was returned. ERR_RETRYABLE means that an
+ * error occurred but that it may vanish after a retry (eg: port in use), and
+ * ERR_FATAL indicates a non-fixable error. ERR_WARN and ERR_ALERT do not alter
+ * the meaning of the error, but just indicate that a message is present which
+ * should be displayed with the respective level. Last, ERR_ABORT indicates
+ * that it's pointless to try to start other listeners. No error message is
+ * returned if errlen is NULL.
+ */
+int udp_bind_listener(struct listener *listener, char *errmsg, int errlen)
+{
+ int err = ERR_NONE;
+ char *msg = NULL;
+
+ /* ensure we never return garbage */
+ if (errlen)
+ *errmsg = 0;
+
+ if (listener->state != LI_ASSIGNED)
+ return ERR_NONE; /* already bound */
+
+ if (!(listener->rx.flags & RX_F_BOUND)) {
+ msg = "receiving socket not bound";
+ goto udp_return;
+ }
+
+ /* we may want to adjust the output buffer (tune.sndbuf.backend) */
+ if (global.tune.frontend_rcvbuf)
+ setsockopt(listener->rx.fd, SOL_SOCKET, SO_RCVBUF, &global.tune.frontend_rcvbuf, sizeof(global.tune.frontend_rcvbuf));
+
+ if (global.tune.frontend_sndbuf)
+ setsockopt(listener->rx.fd, SOL_SOCKET, SO_SNDBUF, &global.tune.frontend_sndbuf, sizeof(global.tune.frontend_sndbuf));
+
+ listener_set_state(listener, LI_LISTEN);
+
+ udp_return:
+ if (msg && errlen) {
+ char pn[INET6_ADDRSTRLEN];
+
+ addr_to_str(&listener->rx.addr, pn, sizeof(pn));
+ snprintf(errmsg, errlen, "%s for [%s:%d]", msg, pn, get_host_port(&listener->rx.addr));
+ }
+ return err;
+}
+
+/* Enable receipt of incoming connections for listener <l>. The receiver must
+ * still be valid.
+ */
+static void udp_enable_listener(struct listener *l)
+{
+ fd_want_recv_safe(l->rx.fd);
+}
+
+/* Disable receipt of incoming connections for listener <l>. The receiver must
+ * still be valid.
+ */
+static void udp_disable_listener(struct listener *l)
+{
+ fd_stop_recv(l->rx.fd);
+}
+
+/* Suspend a receiver. Returns < 0 in case of failure, 0 if the receiver
+ * was totally stopped, or > 0 if correctly suspended.
+ * The principle is a bit ugly but works well, at least on Linux: in order to
+ * suspend the receiver, we want it to stop receiving traffic, which means that
+ * the socket must be unhashed from the kernel's socket table. The simple way
+ * to do this is to connect to any address that is reachable and will not be
+ * used by regular traffic, and a great one is reconnecting to self. Note that
+ * inherited FDs are neither suspended nor resumed, we only enable/disable
+ * polling on them.
+ */
+int udp_suspend_receiver(struct receiver *rx)
+{
+ struct sockaddr_storage ss;
+ socklen_t len = sizeof(ss);
+
+ if (rx->fd < 0)
+ return 0;
+
+ /* we never do that with a shared FD otherwise we'd break it in the
+ * parent process and any possible subsequent worker inheriting it.
+ */
+ if (rx->flags & RX_F_INHERITED)
+ goto done;
+
+ if (getsockname(rx->fd, (struct sockaddr *)&ss, &len) < 0)
+ return -1;
+
+ if (connect(rx->fd, (struct sockaddr *)&ss, len) < 0)
+ return -1;
+ done:
+ /* not necessary but may make debugging clearer */
+ fd_stop_recv(rx->fd);
+ return 1;
+}
+
+/* Resume a receiver. Returns < 0 in case of failure, 0 if the receiver
+ * was totally stopped, or > 0 if correctly suspended.
+ * The principle is to reverse the change above, we'll break the connection by
+ * connecting to AF_UNSPEC. The association breaks and the socket starts to
+ * receive from everywhere again. Note that inherited FDs are neither suspended
+ * nor resumed, we only enable/disable polling on them.
+ */
+int udp_resume_receiver(struct receiver *rx)
+{
+ const struct sockaddr sa = { .sa_family = AF_UNSPEC };
+
+ if (rx->fd < 0)
+ return 0;
+
+ if (!(rx->flags & RX_F_INHERITED) && connect(rx->fd, &sa, sizeof(sa)) < 0)
+ return -1;
+
+ fd_want_recv(rx->fd);
+ return 1;
+}
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/src/proto_uxdg.c b/src/proto_uxdg.c
new file mode 100644
index 0000000..43cbe5a
--- /dev/null
+++ b/src/proto_uxdg.c
@@ -0,0 +1,159 @@
+/*
+ * DGRAM protocol layer on top of AF_UNIX
+ *
+ * Copyright 2020 HAProxy Technologies, Emeric Brun <ebrun@haproxy.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <ctype.h>
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <time.h>
+
+#include <sys/param.h>
+#include <sys/socket.h>
+#include <sys/types.h>
+#include <sys/un.h>
+
+#include <haproxy/fd.h>
+#include <haproxy/listener.h>
+#include <haproxy/log.h>
+#include <haproxy/namespace.h>
+#include <haproxy/protocol.h>
+#include <haproxy/sock.h>
+#include <haproxy/sock_unix.h>
+#include <haproxy/tools.h>
+
+static int uxdg_bind_listener(struct listener *listener, char *errmsg, int errlen);
+static void uxdg_enable_listener(struct listener *listener);
+static void uxdg_disable_listener(struct listener *listener);
+static int uxdg_suspend_receiver(struct receiver *rx);
+
+/* Note: must not be declared <const> as its list will be overwritten */
+struct protocol proto_uxdg = {
+ .name = "uxdg",
+
+ /* connection layer */
+ .xprt_type = PROTO_TYPE_DGRAM,
+ .listen = uxdg_bind_listener,
+ .enable = uxdg_enable_listener,
+ .disable = uxdg_disable_listener,
+ .add = default_add_listener,
+ .unbind = default_unbind_listener,
+ .suspend = default_suspend_listener,
+ .resume = default_resume_listener,
+
+ /* binding layer */
+ .rx_suspend = uxdg_suspend_receiver,
+
+ /* address family */
+ .fam = &proto_fam_unix,
+
+ /* socket layer */
+ .proto_type = PROTO_TYPE_DGRAM,
+ .sock_type = SOCK_DGRAM,
+ .sock_prot = 0,
+ .rx_enable = sock_enable,
+ .rx_disable = sock_disable,
+ .rx_unbind = sock_unbind,
+ .receivers = LIST_HEAD_INIT(proto_uxdg.receivers),
+ .nb_receivers = 0,
+};
+
+INITCALL1(STG_REGISTER, protocol_register, &proto_uxdg);
+
+/* This function tries to bind dgram unix socket listener. It may return a warning or
+ * an error message in <errmsg> if the message is at most <errlen> bytes long
+ * (including '\0'). Note that <errmsg> may be NULL if <errlen> is also zero.
+ * The return value is composed from ERR_ABORT, ERR_WARN,
+ * ERR_ALERT, ERR_RETRYABLE and ERR_FATAL. ERR_NONE indicates that everything
+ * was alright and that no message was returned. ERR_RETRYABLE means that an
+ * error occurred but that it may vanish after a retry (eg: port in use), and
+ * ERR_FATAL indicates a non-fixable error. ERR_WARN and ERR_ALERT do not alter
+ * the meaning of the error, but just indicate that a message is present which
+ * should be displayed with the respective level. Last, ERR_ABORT indicates
+ * that it's pointless to try to start other listeners. No error message is
+ * returned if errlen is NULL.
+ */
+int uxdg_bind_listener(struct listener *listener, char *errmsg, int errlen)
+{
+ int err = ERR_NONE;
+ char *msg = NULL;
+
+ /* ensure we never return garbage */
+ if (errlen)
+ *errmsg = 0;
+
+ if (listener->state != LI_ASSIGNED)
+ return ERR_NONE; /* already bound */
+
+ if (!(listener->rx.flags & RX_F_BOUND)) {
+ msg = "receiving socket not bound";
+ err |= ERR_FATAL | ERR_ALERT;
+ goto uxdg_return;
+ }
+
+ listener_set_state(listener, LI_LISTEN);
+
+ uxdg_return:
+ if (msg && errlen) {
+ char *path_str;
+
+ path_str = sa2str((struct sockaddr_storage *)&listener->rx.addr, 0, 0);
+ snprintf(errmsg, errlen, "%s for [%s]", msg, ((path_str) ? path_str : ""));
+ ha_free(&path_str);
+ }
+ return err;
+}
+
+/* Enable receipt of incoming connections for listener <l>. The receiver must
+ * still be valid.
+ */
+static void uxdg_enable_listener(struct listener *l)
+{
+ fd_want_recv_safe(l->rx.fd);
+}
+
+/* Disable receipt of incoming connections for listener <l>. The receiver must
+ * still be valid.
+ */
+static void uxdg_disable_listener(struct listener *l)
+{
+ fd_stop_recv(l->rx.fd);
+}
+
+/* Suspend a receiver. Returns < 0 in case of failure, 0 if the receiver
+ * was totally stopped, or > 0 if correctly suspended. For plain unix sockets
+ * we only disable the listener to prevent data from being handled but nothing
+ * more is done since currently it's the new process which handles the renaming.
+ * Abstract sockets are completely unbound and closed so there's no need to stop
+ * the poller.
+ */
+static int uxdg_suspend_receiver(struct receiver *rx)
+{
+ struct listener *l = LIST_ELEM(rx, struct listener *, rx);
+
+ if (((struct sockaddr_un *)&rx->addr)->sun_path[0]) {
+ uxdg_disable_listener(l);
+ return 1;
+ }
+
+ /* Listener's lock already held. Call lockless version of
+ * unbind_listener. */
+ do_unbind_listener(l);
+ return 0;
+}
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/src/proto_uxst.c b/src/proto_uxst.c
new file mode 100644
index 0000000..7988e00
--- /dev/null
+++ b/src/proto_uxst.c
@@ -0,0 +1,372 @@
+/*
+ * UNIX SOCK_STREAM protocol layer (uxst)
+ *
+ * Copyright 2000-2010 Willy Tarreau <w@1wt.eu>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <ctype.h>
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <syslog.h>
+#include <time.h>
+
+#include <sys/socket.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <sys/un.h>
+
+#include <haproxy/api.h>
+#include <haproxy/connection.h>
+#include <haproxy/errors.h>
+#include <haproxy/fd.h>
+#include <haproxy/global.h>
+#include <haproxy/list.h>
+#include <haproxy/listener.h>
+#include <haproxy/log.h>
+#include <haproxy/protocol.h>
+#include <haproxy/proto_uxst.h>
+#include <haproxy/sock.h>
+#include <haproxy/sock_unix.h>
+#include <haproxy/tools.h>
+#include <haproxy/version.h>
+
+
+static int uxst_bind_listener(struct listener *listener, char *errmsg, int errlen);
+static int uxst_connect_server(struct connection *conn, int flags);
+static void uxst_enable_listener(struct listener *listener);
+static void uxst_disable_listener(struct listener *listener);
+static int uxst_suspend_receiver(struct receiver *rx);
+
+/* Note: must not be declared <const> as its list will be overwritten */
+struct protocol proto_uxst = {
+ .name = "unix_stream",
+
+ /* connection layer */
+ .xprt_type = PROTO_TYPE_STREAM,
+ .listen = uxst_bind_listener,
+ .enable = uxst_enable_listener,
+ .disable = uxst_disable_listener,
+ .add = default_add_listener,
+ .unbind = default_unbind_listener,
+ .suspend = default_suspend_listener,
+ .resume = default_resume_listener,
+ .accept_conn = sock_accept_conn,
+ .ctrl_init = sock_conn_ctrl_init,
+ .ctrl_close = sock_conn_ctrl_close,
+ .connect = uxst_connect_server,
+ .drain = sock_drain,
+ .check_events = sock_check_events,
+ .ignore_events = sock_ignore_events,
+
+ /* binding layer */
+ .rx_suspend = uxst_suspend_receiver,
+
+ /* address family */
+ .fam = &proto_fam_unix,
+
+ /* socket layer */
+ .proto_type = PROTO_TYPE_STREAM,
+ .sock_type = SOCK_STREAM,
+ .sock_prot = 0,
+ .rx_enable = sock_enable,
+ .rx_disable = sock_disable,
+ .rx_unbind = sock_unbind,
+ .rx_listening = sock_accepting_conn,
+ .default_iocb = sock_accept_iocb,
+ .receivers = LIST_HEAD_INIT(proto_uxst.receivers),
+ .nb_receivers = 0,
+};
+
+INITCALL1(STG_REGISTER, protocol_register, &proto_uxst);
+
+/********************************
+ * 1) low-level socket functions
+ ********************************/
+
+
+/********************************
+ * 2) listener-oriented functions
+ ********************************/
+
+/* This function creates a UNIX socket associated to the listener. It changes
+ * the state from ASSIGNED to LISTEN. The socket is NOT enabled for polling.
+ * The return value is composed from ERR_NONE, ERR_RETRYABLE and ERR_FATAL. It
+ * may return a warning or an error message in <errmsg> if the message is at
+ * most <errlen> bytes long (including '\0'). Note that <errmsg> may be NULL if
+ * <errlen> is also zero.
+ */
+static int uxst_bind_listener(struct listener *listener, char *errmsg, int errlen)
+{
+ int fd, err;
+ int ready;
+ char *msg = NULL;
+
+ err = ERR_NONE;
+
+ /* ensure we never return garbage */
+ if (errlen)
+ *errmsg = 0;
+
+ if (listener->state != LI_ASSIGNED)
+ return ERR_NONE; /* already bound */
+
+ if (!(listener->rx.flags & RX_F_BOUND)) {
+ msg = "receiving socket not bound";
+ err |= ERR_FATAL | ERR_ALERT;
+ goto uxst_return;
+ }
+
+ if (listener->rx.flags & RX_F_MUST_DUP)
+ goto done;
+
+ fd = listener->rx.fd;
+ ready = sock_accepting_conn(&listener->rx) > 0;
+
+ if (!ready && /* only listen if not already done by external process */
+ listen(fd, listener_backlog(listener)) < 0) {
+ err |= ERR_FATAL | ERR_ALERT;
+ msg = "cannot listen to UNIX socket";
+ goto uxst_close_return;
+ }
+
+ done:
+ /* the socket is now listening */
+ listener_set_state(listener, LI_LISTEN);
+ return err;
+
+ uxst_close_return:
+ close(fd);
+ uxst_return:
+ if (msg && errlen) {
+ char *path_str;
+
+ path_str = sa2str((struct sockaddr_storage *)&listener->rx.addr, 0, 0);
+ snprintf(errmsg, errlen, "%s for [%s]", msg, ((path_str) ? path_str : ""));
+ ha_free(&path_str);
+ }
+ return err;
+}
+
+/* Enable receipt of incoming connections for listener <l>. The receiver must
+ * still be valid.
+ */
+static void uxst_enable_listener(struct listener *l)
+{
+ fd_want_recv_safe(l->rx.fd);
+}
+
+/* Disable receipt of incoming connections for listener <l>. The receiver must
+ * still be valid.
+ */
+static void uxst_disable_listener(struct listener *l)
+{
+ fd_stop_recv(l->rx.fd);
+}
+
+/* Suspend a receiver. Returns < 0 in case of failure, 0 if the receiver
+ * was totally stopped, or > 0 if correctly suspended. For plain unix sockets
+ * we only disable the listener to prevent data from being handled but nothing
+ * more is done since currently it's the new process which handles the renaming.
+ * Abstract sockets are completely unbound and closed so there's no need to stop
+ * the poller.
+ */
+static int uxst_suspend_receiver(struct receiver *rx)
+{
+ struct listener *l = LIST_ELEM(rx, struct listener *, rx);
+
+ if (((struct sockaddr_un *)&rx->addr)->sun_path[0]) {
+ uxst_disable_listener(l);
+ return 1;
+ }
+
+ /* Listener's lock already held. Call lockless version of
+ * unbind_listener. */
+ do_unbind_listener(l);
+ return 0;
+}
+
+
+/*
+ * This function initiates a UNIX connection establishment to the target assigned
+ * to connection <conn> using (si->{target,dst}). The source address is ignored
+ * and will be selected by the system. conn->target may point either to a valid
+ * server or to a backend, depending on conn->target. Only OBJ_TYPE_PROXY and
+ * OBJ_TYPE_SERVER are supported. The <data> parameter is a boolean indicating
+ * whether there are data waiting for being sent or not, in order to adjust data
+ * write polling and on some platforms. The <delack> argument is ignored.
+ *
+ * Note that a pending send_proxy message accounts for data.
+ *
+ * It can return one of :
+ * - SF_ERR_NONE if everything's OK
+ * - SF_ERR_SRVTO if there are no more servers
+ * - SF_ERR_SRVCL if the connection was refused by the server
+ * - SF_ERR_PRXCOND if the connection has been limited by the proxy (maxconn)
+ * - SF_ERR_RESOURCE if a system resource is lacking (eg: fd limits, ports, ...)
+ * - SF_ERR_INTERNAL for any other purely internal errors
+ * Additionally, in the case of SF_ERR_RESOURCE, an emergency log will be emitted.
+ *
+ * The connection's fd is inserted only when SF_ERR_NONE is returned, otherwise
+ * it's invalid and the caller has nothing to do.
+ */
+static int uxst_connect_server(struct connection *conn, int flags)
+{
+ int fd;
+ struct server *srv;
+ struct proxy *be;
+
+ BUG_ON(!conn->dst);
+
+ switch (obj_type(conn->target)) {
+ case OBJ_TYPE_PROXY:
+ be = __objt_proxy(conn->target);
+ srv = NULL;
+ break;
+ case OBJ_TYPE_SERVER:
+ srv = __objt_server(conn->target);
+ be = srv->proxy;
+ break;
+ default:
+ conn->flags |= CO_FL_ERROR;
+ return SF_ERR_INTERNAL;
+ }
+
+ if ((fd = conn->handle.fd = socket(PF_UNIX, SOCK_STREAM, 0)) == -1) {
+ qfprintf(stderr, "Cannot get a server socket.\n");
+
+ if (errno == ENFILE) {
+ conn->err_code = CO_ER_SYS_FDLIM;
+ send_log(be, LOG_EMERG,
+ "Proxy %s reached system FD limit (maxsock=%d). Please check system tunables.\n",
+ be->id, global.maxsock);
+ }
+ else if (errno == EMFILE) {
+ conn->err_code = CO_ER_PROC_FDLIM;
+ send_log(be, LOG_EMERG,
+ "Proxy %s reached process FD limit (maxsock=%d). Please check 'ulimit-n' and restart.\n",
+ be->id, global.maxsock);
+ }
+ else if (errno == ENOBUFS || errno == ENOMEM) {
+ conn->err_code = CO_ER_SYS_MEMLIM;
+ send_log(be, LOG_EMERG,
+ "Proxy %s reached system memory limit (maxsock=%d). Please check system tunables.\n",
+ be->id, global.maxsock);
+ }
+ else if (errno == EAFNOSUPPORT || errno == EPROTONOSUPPORT) {
+ conn->err_code = CO_ER_NOPROTO;
+ }
+ else
+ conn->err_code = CO_ER_SOCK_ERR;
+
+ /* this is a resource error */
+ conn->flags |= CO_FL_ERROR;
+ return SF_ERR_RESOURCE;
+ }
+
+ if (fd >= global.maxsock) {
+ /* do not log anything there, it's a normal condition when this option
+ * is used to serialize connections to a server !
+ */
+ ha_alert("socket(): not enough free sockets. Raise -n argument. Giving up.\n");
+ close(fd);
+ conn->err_code = CO_ER_CONF_FDLIM;
+ conn->flags |= CO_FL_ERROR;
+ return SF_ERR_PRXCOND; /* it is a configuration limit */
+ }
+
+ if (fd_set_nonblock(fd) == -1) {
+ qfprintf(stderr,"Cannot set client socket to non blocking mode.\n");
+ close(fd);
+ conn->err_code = CO_ER_SOCK_ERR;
+ conn->flags |= CO_FL_ERROR;
+ return SF_ERR_INTERNAL;
+ }
+
+ if (master == 1 && fd_set_cloexec(fd) == -1) {
+ ha_alert("Cannot set CLOEXEC on client socket.\n");
+ close(fd);
+ conn->err_code = CO_ER_SOCK_ERR;
+ conn->flags |= CO_FL_ERROR;
+ return SF_ERR_INTERNAL;
+ }
+
+ if (global.tune.server_sndbuf)
+ setsockopt(fd, SOL_SOCKET, SO_SNDBUF, &global.tune.server_sndbuf, sizeof(global.tune.server_sndbuf));
+
+ if (global.tune.server_rcvbuf)
+ setsockopt(fd, SOL_SOCKET, SO_RCVBUF, &global.tune.server_rcvbuf, sizeof(global.tune.server_rcvbuf));
+
+ if (connect(fd, (struct sockaddr *)conn->dst, get_addr_len(conn->dst)) == -1) {
+ if (errno == EINPROGRESS || errno == EALREADY) {
+ conn->flags |= CO_FL_WAIT_L4_CONN;
+ }
+ else if (errno == EISCONN) {
+ conn->flags &= ~CO_FL_WAIT_L4_CONN;
+ }
+ else if (errno == EAGAIN || errno == EWOULDBLOCK || errno == EADDRINUSE || errno == EADDRNOTAVAIL) {
+ char *msg;
+ if (errno == EAGAIN || errno == EWOULDBLOCK || errno == EADDRNOTAVAIL) {
+ msg = "can't connect to destination unix socket, check backlog size on the server";
+ conn->err_code = CO_ER_FREE_PORTS;
+ }
+ else {
+ msg = "local address already in use";
+ conn->err_code = CO_ER_ADDR_INUSE;
+ }
+
+ qfprintf(stderr,"Connect() failed for backend %s: %s.\n", be->id, msg);
+ close(fd);
+ send_log(be, LOG_ERR, "Connect() failed for backend %s: %s.\n", be->id, msg);
+ conn->flags |= CO_FL_ERROR;
+ return SF_ERR_RESOURCE;
+ }
+ else if (errno == ETIMEDOUT) {
+ close(fd);
+ conn->err_code = CO_ER_SOCK_ERR;
+ conn->flags |= CO_FL_ERROR;
+ return SF_ERR_SRVTO;
+ }
+ else { // (errno == ECONNREFUSED || errno == ENETUNREACH || errno == EACCES || errno == EPERM)
+ close(fd);
+ conn->err_code = CO_ER_SOCK_ERR;
+ conn->flags |= CO_FL_ERROR;
+ return SF_ERR_SRVCL;
+ }
+ }
+ else {
+ /* connect() already succeeded, which is quite usual for unix
+ * sockets. Let's avoid a second connect() probe to complete it.
+ */
+ conn->flags &= ~CO_FL_WAIT_L4_CONN;
+ }
+
+ /* Prepare to send a few handshakes related to the on-wire protocol. */
+ if (conn->send_proxy_ofs)
+ conn->flags |= CO_FL_SEND_PROXY;
+
+ conn_ctrl_init(conn); /* registers the FD */
+ HA_ATOMIC_AND(&fdtab[fd].state, ~FD_LINGER_RISK); /* no need to disable lingering */
+
+ if (conn->flags & CO_FL_WAIT_L4_CONN) {
+ fd_want_send(fd);
+ fd_cant_send(fd);
+ fd_cant_recv(fd);
+ }
+
+ return SF_ERR_NONE; /* connection is OK */
+}
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/src/protocol.c b/src/protocol.c
new file mode 100644
index 0000000..25ed6b7
--- /dev/null
+++ b/src/protocol.c
@@ -0,0 +1,309 @@
+/*
+ * Protocol registration functions.
+ *
+ * Copyright 2000-2012 Willy Tarreau <w@1wt.eu>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <sys/types.h>
+#include <sys/socket.h>
+
+#include <haproxy/api.h>
+#include <haproxy/errors.h>
+#include <haproxy/global.h>
+#include <haproxy/list.h>
+#include <haproxy/listener.h>
+#include <haproxy/proto_quic.h>
+#include <haproxy/protocol.h>
+#include <haproxy/proxy.h>
+#include <haproxy/sock.h>
+#include <haproxy/tools.h>
+
+
+/* List head of all registered protocols */
+static struct list protocols = LIST_HEAD_INIT(protocols);
+struct protocol *__protocol_by_family[AF_CUST_MAX][PROTO_NUM_TYPES][2] __read_mostly = { };
+
+/* This is the global spinlock we may need to register/unregister listeners or
+ * protocols. Its main purpose is in fact to serialize the rare stop/deinit()
+ * phases.
+ */
+__decl_spinlock(proto_lock);
+
+/* Registers the protocol <proto> */
+void protocol_register(struct protocol *proto)
+{
+ int sock_domain = proto->fam->sock_domain;
+
+ BUG_ON(sock_domain < 0 || sock_domain >= AF_CUST_MAX);
+ BUG_ON(proto->proto_type >= PROTO_NUM_TYPES);
+
+ HA_SPIN_LOCK(PROTO_LOCK, &proto_lock);
+ LIST_APPEND(&protocols, &proto->list);
+ __protocol_by_family[sock_domain]
+ [proto->proto_type]
+ [proto->xprt_type == PROTO_TYPE_DGRAM] = proto;
+ HA_SPIN_UNLOCK(PROTO_LOCK, &proto_lock);
+}
+
+/* Unregisters the protocol <proto>. Note that all listeners must have
+ * previously been unbound.
+ */
+void protocol_unregister(struct protocol *proto)
+{
+ HA_SPIN_LOCK(PROTO_LOCK, &proto_lock);
+ LIST_DELETE(&proto->list);
+ LIST_INIT(&proto->list);
+ HA_SPIN_UNLOCK(PROTO_LOCK, &proto_lock);
+}
+
+/* clears flag <flag> on all protocols. */
+void protocol_clrf_all(uint flag)
+{
+ struct protocol *proto;
+
+ HA_SPIN_LOCK(PROTO_LOCK, &proto_lock);
+ list_for_each_entry(proto, &protocols, list)
+ proto->flags &= ~flag;
+ HA_SPIN_UNLOCK(PROTO_LOCK, &proto_lock);
+}
+
+/* sets flag <flag> on all protocols. */
+void protocol_setf_all(uint flag)
+{
+ struct protocol *proto;
+
+ HA_SPIN_LOCK(PROTO_LOCK, &proto_lock);
+ list_for_each_entry(proto, &protocols, list)
+ proto->flags |= flag;
+ HA_SPIN_UNLOCK(PROTO_LOCK, &proto_lock);
+}
+
+/* Checks if protocol <proto> supports PROTO_F flag <flag>. Returns zero if not,
+ * non-zero if supported. It may return a cached value from a previous test,
+ * and may run live tests then update the proto's flags to cache a result. It's
+ * better to call it only if needed so that it doesn't result in modules being
+ * loaded in case of a live test. It is only supposed to be used during boot.
+ */
+int protocol_supports_flag(struct protocol *proto, uint flag)
+{
+ if (flag == PROTO_F_REUSEPORT_SUPPORTED) {
+ int ret = 0;
+
+ /* check if the protocol supports SO_REUSEPORT */
+ if (!(_HA_ATOMIC_LOAD(&proto->flags) & PROTO_F_REUSEPORT_SUPPORTED))
+ return 0;
+
+ /* at least nobody said it was not supported */
+ if (_HA_ATOMIC_LOAD(&proto->flags) & PROTO_F_REUSEPORT_TESTED)
+ return 1;
+
+ /* run a live check */
+ ret = _sock_supports_reuseport(proto->fam, proto->sock_type, proto->sock_prot);
+ if (!ret)
+ _HA_ATOMIC_AND(&proto->flags, ~PROTO_F_REUSEPORT_SUPPORTED);
+
+ _HA_ATOMIC_OR(&proto->flags, PROTO_F_REUSEPORT_TESTED);
+ return ret;
+ }
+ return 0;
+}
+
+#ifdef USE_QUIC
+/* Return 1 if QUIC protocol may be bound, 0 if no, depending on the tuning
+ * parameters.
+ */
+static inline int protocol_may_bind_quic(struct listener *l)
+{
+ if (global.tune.options & GTUNE_NO_QUIC)
+ return 0;
+ return 1;
+}
+#endif
+
+/* binds all listeners of all registered protocols. Returns a composition
+ * of ERR_NONE, ERR_RETRYABLE, ERR_FATAL.
+ */
+int protocol_bind_all(int verbose)
+{
+ struct protocol *proto;
+ struct listener *listener;
+ struct receiver *receiver;
+ char msg[1000];
+ char *errmsg;
+ int err, lerr;
+
+ err = 0;
+ HA_SPIN_LOCK(PROTO_LOCK, &proto_lock);
+ list_for_each_entry(proto, &protocols, list) {
+ list_for_each_entry(receiver, &proto->receivers, proto_list) {
+ listener = LIST_ELEM(receiver, struct listener *, rx);
+#ifdef USE_QUIC
+ if ((proto == &proto_quic4 || proto == &proto_quic6) &&
+ !protocol_may_bind_quic(listener))
+ continue;
+#endif
+
+ lerr = proto->fam->bind(receiver, &errmsg);
+ err |= lerr;
+
+ /* errors are reported if <verbose> is set or if they are fatal */
+ if (verbose || (lerr & (ERR_FATAL | ERR_ABORT))) {
+ struct proxy *px = listener->bind_conf->frontend;
+
+ if (lerr & ERR_ALERT)
+ ha_alert("Binding [%s:%d] for %s %s: %s\n",
+ listener->bind_conf->file, listener->bind_conf->line,
+ proxy_type_str(px), px->id, errmsg);
+ else if (lerr & ERR_WARN)
+ ha_warning("Binding [%s:%d] for %s %s: %s\n",
+ listener->bind_conf->file, listener->bind_conf->line,
+ proxy_type_str(px), px->id, errmsg);
+ }
+ if (lerr != ERR_NONE)
+ ha_free(&errmsg);
+
+ if (lerr & ERR_ABORT)
+ break;
+
+ if (lerr & ~ERR_WARN)
+ continue;
+
+ /* for now there's still always a listening function */
+ BUG_ON(!proto->listen);
+ lerr = proto->listen(listener, msg, sizeof(msg));
+ err |= lerr;
+
+ if (verbose || (lerr & (ERR_FATAL | ERR_ABORT))) {
+ struct proxy *px = listener->bind_conf->frontend;
+
+ if (lerr & ERR_ALERT)
+ ha_alert("Starting [%s:%d] for %s %s: %s\n",
+ listener->bind_conf->file, listener->bind_conf->line,
+ proxy_type_str(px), px->id, msg);
+ else if (lerr & ERR_WARN)
+ ha_warning("Starting [%s:%d] for %s %s: %s\n",
+ listener->bind_conf->file, listener->bind_conf->line,
+ proxy_type_str(px), px->id, msg);
+ }
+ if (lerr & ERR_ABORT)
+ break;
+ }
+ if (err & ERR_ABORT)
+ break;
+ }
+ HA_SPIN_UNLOCK(PROTO_LOCK, &proto_lock);
+ return err;
+}
+
+/* unbinds all listeners of all registered protocols. They are also closed.
+ * This must be performed before calling exit() in order to get a chance to
+ * remove file-system based sockets and pipes.
+ * Returns a composition of ERR_NONE, ERR_RETRYABLE, ERR_FATAL, ERR_ABORT.
+ */
+int protocol_unbind_all(void)
+{
+ struct protocol *proto;
+ struct listener *listener;
+ int err;
+
+ err = 0;
+ HA_SPIN_LOCK(PROTO_LOCK, &proto_lock);
+ list_for_each_entry(proto, &protocols, list) {
+ list_for_each_entry(listener, &proto->receivers, rx.proto_list)
+ unbind_listener(listener);
+ }
+ HA_SPIN_UNLOCK(PROTO_LOCK, &proto_lock);
+ return err;
+}
+
+/* stops all listeners of all registered protocols. This will normally catch
+ * every single listener, all protocols included. This is to be used during
+ * soft_stop() only. It does not return any error.
+ */
+void protocol_stop_now(void)
+{
+ struct protocol *proto;
+ struct listener *listener, *lback;
+
+ HA_SPIN_LOCK(PROTO_LOCK, &proto_lock);
+ list_for_each_entry(proto, &protocols, list) {
+ list_for_each_entry_safe(listener, lback, &proto->receivers, rx.proto_list)
+ stop_listener(listener, 0, 1, 0);
+ }
+ HA_SPIN_UNLOCK(PROTO_LOCK, &proto_lock);
+}
+
+/* suspends all listeners of all registered protocols. This is typically
+ * used on SIG_TTOU to release all listening sockets for the time needed to
+ * try to bind a new process. The listeners enter LI_PAUSED or LI_ASSIGNED.
+ * It returns ERR_NONE, with ERR_FATAL on failure.
+ */
+int protocol_pause_all(void)
+{
+ struct protocol *proto;
+ struct listener *listener;
+ int err;
+
+ err = 0;
+ HA_SPIN_LOCK(PROTO_LOCK, &proto_lock);
+ list_for_each_entry(proto, &protocols, list) {
+ list_for_each_entry(listener, &proto->receivers, rx.proto_list)
+ if (!suspend_listener(listener, 0, 0))
+ err |= ERR_FATAL;
+ }
+ HA_SPIN_UNLOCK(PROTO_LOCK, &proto_lock);
+ return err;
+}
+
+/* resumes all listeners of all registered protocols. This is typically used on
+ * SIG_TTIN to re-enable listening sockets after a new process failed to bind.
+ * The listeners switch to LI_READY/LI_FULL. It returns ERR_NONE, with ERR_FATAL
+ * on failure.
+ */
+int protocol_resume_all(void)
+{
+ struct protocol *proto;
+ struct listener *listener;
+ int err;
+
+ err = 0;
+ HA_SPIN_LOCK(PROTO_LOCK, &proto_lock);
+ list_for_each_entry(proto, &protocols, list) {
+ list_for_each_entry(listener, &proto->receivers, rx.proto_list)
+ if (!resume_listener(listener, 0, 0))
+ err |= ERR_FATAL;
+ }
+ HA_SPIN_UNLOCK(PROTO_LOCK, &proto_lock);
+ return err;
+}
+
+/* enables all listeners of all registered protocols. This is intended to be
+ * used after a fork() to enable reading on all file descriptors. Returns
+ * composition of ERR_NONE.
+ */
+int protocol_enable_all(void)
+{
+ struct protocol *proto;
+ struct listener *listener;
+
+ HA_SPIN_LOCK(PROTO_LOCK, &proto_lock);
+ list_for_each_entry(proto, &protocols, list) {
+ list_for_each_entry(listener, &proto->receivers, rx.proto_list)
+ enable_listener(listener);
+ }
+ HA_SPIN_UNLOCK(PROTO_LOCK, &proto_lock);
+ return ERR_NONE;
+}
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/src/proxy.c b/src/proxy.c
new file mode 100644
index 0000000..ef95340
--- /dev/null
+++ b/src/proxy.c
@@ -0,0 +1,3451 @@
+/*
+ * Proxy variables and functions.
+ *
+ * Copyright 2000-2009 Willy Tarreau <w@1wt.eu>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <unistd.h>
+#include <string.h>
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <sys/stat.h>
+
+#include <import/eb32tree.h>
+#include <import/ebistree.h>
+
+#include <haproxy/acl.h>
+#include <haproxy/api.h>
+#include <haproxy/applet.h>
+#include <haproxy/capture-t.h>
+#include <haproxy/cfgparse.h>
+#include <haproxy/cli.h>
+#include <haproxy/errors.h>
+#include <haproxy/fd.h>
+#include <haproxy/filters.h>
+#include <haproxy/global.h>
+#include <haproxy/http_ana.h>
+#include <haproxy/http_htx.h>
+#include <haproxy/http_ext.h>
+#include <haproxy/http_rules.h>
+#include <haproxy/listener.h>
+#include <haproxy/log.h>
+#include <haproxy/obj_type-t.h>
+#include <haproxy/peers.h>
+#include <haproxy/pool.h>
+#include <haproxy/protocol.h>
+#include <haproxy/proto_tcp.h>
+#include <haproxy/proxy.h>
+#include <haproxy/sc_strm.h>
+#include <haproxy/quic_tp.h>
+#include <haproxy/server-t.h>
+#include <haproxy/signal.h>
+#include <haproxy/stats-t.h>
+#include <haproxy/stconn.h>
+#include <haproxy/stream.h>
+#include <haproxy/task.h>
+#include <haproxy/tcpcheck.h>
+#include <haproxy/time.h>
+#include <haproxy/tools.h>
+
+
+int listeners; /* # of proxy listeners, set by cfgparse */
+struct proxy *proxies_list = NULL; /* list of all existing proxies */
+struct eb_root used_proxy_id = EB_ROOT; /* list of proxy IDs in use */
+struct eb_root proxy_by_name = EB_ROOT; /* tree of proxies sorted by name */
+struct eb_root defproxy_by_name = EB_ROOT; /* tree of default proxies sorted by name (dups possible) */
+unsigned int error_snapshot_id = 0; /* global ID assigned to each error then incremented */
+
+/* CLI context used during "show servers {state|conn}" */
+struct show_srv_ctx {
+ struct proxy *px; /* current proxy to dump or NULL */
+ struct server *sv; /* current server to dump or NULL */
+ uint only_pxid; /* dump only this proxy ID when explicit */
+ int show_conn; /* non-zero = "conn" otherwise "state" */
+ enum {
+ SHOW_SRV_HEAD = 0,
+ SHOW_SRV_LIST,
+ } state;
+};
+
+/* proxy->options */
+const struct cfg_opt cfg_opts[] =
+{
+ { "abortonclose", PR_O_ABRT_CLOSE, PR_CAP_BE, 0, 0 },
+ { "allbackups", PR_O_USE_ALL_BK, PR_CAP_BE, 0, 0 },
+ { "checkcache", PR_O_CHK_CACHE, PR_CAP_BE, 0, PR_MODE_HTTP },
+ { "clitcpka", PR_O_TCP_CLI_KA, PR_CAP_FE, 0, 0 },
+ { "contstats", PR_O_CONTSTATS, PR_CAP_FE, 0, 0 },
+ { "dontlognull", PR_O_NULLNOLOG, PR_CAP_FE, 0, 0 },
+ { "http-buffer-request", PR_O_WREQ_BODY, PR_CAP_FE | PR_CAP_BE, 0, PR_MODE_HTTP },
+ { "http-ignore-probes", PR_O_IGNORE_PRB, PR_CAP_FE, 0, PR_MODE_HTTP },
+ { "idle-close-on-response", PR_O_IDLE_CLOSE_RESP, PR_CAP_FE, 0, PR_MODE_HTTP },
+ { "prefer-last-server", PR_O_PREF_LAST, PR_CAP_BE, 0, PR_MODE_HTTP },
+ { "logasap", PR_O_LOGASAP, PR_CAP_FE, 0, 0 },
+ { "nolinger", PR_O_TCP_NOLING, PR_CAP_FE | PR_CAP_BE, 0, 0 },
+ { "persist", PR_O_PERSIST, PR_CAP_BE, 0, 0 },
+ { "srvtcpka", PR_O_TCP_SRV_KA, PR_CAP_BE, 0, 0 },
+#ifdef USE_TPROXY
+ { "transparent", PR_O_TRANSP, PR_CAP_BE, 0, 0 },
+#else
+ { "transparent", 0, 0, 0, 0 },
+#endif
+
+ { NULL, 0, 0, 0, 0 }
+};
+
+/* proxy->options2 */
+const struct cfg_opt cfg_opts2[] =
+{
+#ifdef USE_LINUX_SPLICE
+ { "splice-request", PR_O2_SPLIC_REQ, PR_CAP_FE|PR_CAP_BE, 0, 0 },
+ { "splice-response", PR_O2_SPLIC_RTR, PR_CAP_FE|PR_CAP_BE, 0, 0 },
+ { "splice-auto", PR_O2_SPLIC_AUT, PR_CAP_FE|PR_CAP_BE, 0, 0 },
+#else
+ { "splice-request", 0, 0, 0, 0 },
+ { "splice-response", 0, 0, 0, 0 },
+ { "splice-auto", 0, 0, 0, 0 },
+#endif
+ { "accept-invalid-http-request", PR_O2_REQBUG_OK, PR_CAP_FE, 0, PR_MODE_HTTP },
+ { "accept-invalid-http-response", PR_O2_RSPBUG_OK, PR_CAP_BE, 0, PR_MODE_HTTP },
+ { "dontlog-normal", PR_O2_NOLOGNORM, PR_CAP_FE, 0, 0 },
+ { "log-separate-errors", PR_O2_LOGERRORS, PR_CAP_FE, 0, 0 },
+ { "log-health-checks", PR_O2_LOGHCHKS, PR_CAP_BE, 0, 0 },
+ { "socket-stats", PR_O2_SOCKSTAT, PR_CAP_FE, 0, 0 },
+ { "tcp-smart-accept", PR_O2_SMARTACC, PR_CAP_FE, 0, 0 },
+ { "tcp-smart-connect", PR_O2_SMARTCON, PR_CAP_BE, 0, 0 },
+ { "independent-streams", PR_O2_INDEPSTR, PR_CAP_FE|PR_CAP_BE, 0, 0 },
+ { "http-use-proxy-header", PR_O2_USE_PXHDR, PR_CAP_FE, 0, PR_MODE_HTTP },
+ { "http-pretend-keepalive", PR_O2_FAKE_KA, PR_CAP_BE, 0, PR_MODE_HTTP },
+ { "http-no-delay", PR_O2_NODELAY, PR_CAP_FE|PR_CAP_BE, 0, PR_MODE_HTTP },
+
+ {"h1-case-adjust-bogus-client", PR_O2_H1_ADJ_BUGCLI, PR_CAP_FE, 0, 0 },
+ {"h1-case-adjust-bogus-server", PR_O2_H1_ADJ_BUGSRV, PR_CAP_BE, 0, 0 },
+ {"disable-h2-upgrade", PR_O2_NO_H2_UPGRADE, PR_CAP_FE, 0, PR_MODE_HTTP },
+ { NULL, 0, 0, 0 }
+};
+
+/* Helper function to resolve a single sticking rule after config parsing.
+ * Returns 1 for success and 0 for failure
+ */
+int resolve_stick_rule(struct proxy *curproxy, struct sticking_rule *mrule)
+{
+ struct stktable *target;
+
+ if (mrule->table.name)
+ target = stktable_find_by_name(mrule->table.name);
+ else
+ target = curproxy->table;
+
+ if (!target) {
+ ha_alert("Proxy '%s': unable to find stick-table '%s'.\n",
+ curproxy->id, mrule->table.name ? mrule->table.name : curproxy->id);
+ return 0;
+ }
+ else if (!stktable_compatible_sample(mrule->expr, target->type)) {
+ ha_alert("Proxy '%s': type of fetch not usable with type of stick-table '%s'.\n",
+ curproxy->id, mrule->table.name ? mrule->table.name : curproxy->id);
+ return 0;
+ }
+
+ /* success */
+ ha_free(&mrule->table.name);
+ mrule->table.t = target;
+ stktable_alloc_data_type(target, STKTABLE_DT_SERVER_ID, NULL, NULL);
+ stktable_alloc_data_type(target, STKTABLE_DT_SERVER_KEY, NULL, NULL);
+ if (!in_proxies_list(target->proxies_list, curproxy)) {
+ curproxy->next_stkt_ref = target->proxies_list;
+ target->proxies_list = curproxy;
+ }
+ return 1;
+}
+
+void free_stick_rules(struct list *rules)
+{
+ struct sticking_rule *rule, *ruleb;
+
+ list_for_each_entry_safe(rule, ruleb, rules, list) {
+ LIST_DELETE(&rule->list);
+ free_acl_cond(rule->cond);
+ release_sample_expr(rule->expr);
+ free(rule);
+ }
+}
+
+static void free_logformat_list(struct list *lfs)
+{
+ struct logformat_node *lf, *lfb;
+
+ list_for_each_entry_safe(lf, lfb, lfs, list) {
+ LIST_DELETE(&lf->list);
+ release_sample_expr(lf->expr);
+ free(lf->arg);
+ free(lf);
+ }
+}
+
+void free_server_rules(struct list *srules)
+{
+ struct server_rule *srule, *sruleb;
+
+ list_for_each_entry_safe(srule, sruleb, srules, list) {
+ LIST_DELETE(&srule->list);
+ free_acl_cond(srule->cond);
+ free_logformat_list(&srule->expr);
+ free(srule->file);
+ free(srule);
+ }
+}
+
+void free_proxy(struct proxy *p)
+{
+ struct server *s;
+ struct cap_hdr *h,*h_next;
+ struct listener *l,*l_next;
+ struct bind_conf *bind_conf, *bind_back;
+ struct acl_cond *cond, *condb;
+ struct acl *acl, *aclb;
+ struct switching_rule *rule, *ruleb;
+ struct redirect_rule *rdr, *rdrb;
+ struct logger *log, *logb;
+ struct proxy_deinit_fct *pxdf;
+ struct server_deinit_fct *srvdf;
+
+ if (!p)
+ return;
+
+ free(p->conf.file);
+ free(p->id);
+ free(p->cookie_name);
+ free(p->cookie_domain);
+ free(p->cookie_attrs);
+ free(p->lbprm.arg_str);
+ release_sample_expr(p->lbprm.expr);
+ free(p->server_state_file_name);
+ free(p->capture_name);
+ istfree(&p->monitor_uri);
+ free(p->rdp_cookie_name);
+ free(p->invalid_rep);
+ free(p->invalid_req);
+#if defined(CONFIG_HAP_TRANSPARENT)
+ free(p->conn_src.bind_hdr_name);
+#endif
+ if (p->conf.logformat_string != default_http_log_format &&
+ p->conf.logformat_string != default_tcp_log_format &&
+ p->conf.logformat_string != clf_http_log_format &&
+ p->conf.logformat_string != default_https_log_format &&
+ p->conf.logformat_string != httpclient_log_format)
+ free(p->conf.logformat_string);
+
+ free(p->conf.lfs_file);
+ free(p->conf.uniqueid_format_string);
+ istfree(&p->header_unique_id);
+ free(p->conf.uif_file);
+ if ((p->lbprm.algo & BE_LB_LKUP) == BE_LB_LKUP_MAP)
+ free(p->lbprm.map.srv);
+ if (p->mode == PR_MODE_SYSLOG)
+ free(p->lbprm.log.srv);
+
+ if (p->conf.logformat_sd_string != default_rfc5424_sd_log_format)
+ free(p->conf.logformat_sd_string);
+ free(p->conf.lfsd_file);
+
+ free(p->conf.error_logformat_string);
+ free(p->conf.elfs_file);
+
+ list_for_each_entry_safe(cond, condb, &p->mon_fail_cond, list) {
+ LIST_DELETE(&cond->list);
+ free_acl_cond(cond);
+ }
+
+ EXTRA_COUNTERS_FREE(p->extra_counters_fe);
+ EXTRA_COUNTERS_FREE(p->extra_counters_be);
+
+ list_for_each_entry_safe(acl, aclb, &p->acl, list) {
+ LIST_DELETE(&acl->list);
+ prune_acl(acl);
+ free(acl);
+ }
+
+ free_server_rules(&p->server_rules);
+
+ list_for_each_entry_safe(rule, ruleb, &p->switching_rules, list) {
+ LIST_DELETE(&rule->list);
+ free_acl_cond(rule->cond);
+ free(rule->file);
+ free(rule);
+ }
+
+ list_for_each_entry_safe(rdr, rdrb, &p->redirect_rules, list) {
+ LIST_DELETE(&rdr->list);
+ http_free_redirect_rule(rdr);
+ }
+
+ list_for_each_entry_safe(log, logb, &p->loggers, list) {
+ LIST_DEL_INIT(&log->list);
+ free_logger(log);
+ }
+
+ free_logformat_list(&p->logformat);
+ free_logformat_list(&p->logformat_sd);
+ free_logformat_list(&p->format_unique_id);
+ free_logformat_list(&p->logformat_error);
+
+ free_act_rules(&p->tcp_req.inspect_rules);
+ free_act_rules(&p->tcp_rep.inspect_rules);
+ free_act_rules(&p->tcp_req.l4_rules);
+ free_act_rules(&p->tcp_req.l5_rules);
+ free_act_rules(&p->http_req_rules);
+ free_act_rules(&p->http_res_rules);
+ free_act_rules(&p->http_after_res_rules);
+
+ free_stick_rules(&p->storersp_rules);
+ free_stick_rules(&p->sticking_rules);
+
+ h = p->req_cap;
+ while (h) {
+ if (p->defpx && h == p->defpx->req_cap)
+ break;
+ h_next = h->next;
+ free(h->name);
+ pool_destroy(h->pool);
+ free(h);
+ h = h_next;
+ }/* end while(h) */
+
+ h = p->rsp_cap;
+ while (h) {
+ if (p->defpx && h == p->defpx->rsp_cap)
+ break;
+ h_next = h->next;
+ free(h->name);
+ pool_destroy(h->pool);
+ free(h);
+ h = h_next;
+ }/* end while(h) */
+
+ s = p->srv;
+ while (s) {
+ list_for_each_entry(srvdf, &server_deinit_list, list)
+ srvdf->fct(s);
+ s = srv_drop(s);
+ }/* end while(s) */
+
+ /* also free default-server parameters since some of them might have
+ * been dynamically allocated (e.g.: config hints, cookies, ssl..)
+ */
+ srv_free_params(&p->defsrv);
+
+ list_for_each_entry_safe(l, l_next, &p->conf.listeners, by_fe) {
+ LIST_DELETE(&l->by_fe);
+ LIST_DELETE(&l->by_bind);
+ free(l->name);
+ free(l->per_thr);
+ free(l->counters);
+ task_destroy(l->rx.rhttp.task);
+
+ EXTRA_COUNTERS_FREE(l->extra_counters);
+ free(l);
+ }
+
+ /* Release unused SSL configs. */
+ list_for_each_entry_safe(bind_conf, bind_back, &p->conf.bind, by_fe) {
+ if (bind_conf->xprt->destroy_bind_conf)
+ bind_conf->xprt->destroy_bind_conf(bind_conf);
+ free(bind_conf->file);
+ free(bind_conf->arg);
+ free(bind_conf->settings.interface);
+ LIST_DELETE(&bind_conf->by_fe);
+ free(bind_conf->rhttp_srvname);
+ free(bind_conf);
+ }
+
+ flt_deinit(p);
+
+ list_for_each_entry(pxdf, &proxy_deinit_list, list)
+ pxdf->fct(p);
+
+ free(p->desc);
+
+ http_ext_clean(p);
+
+ task_destroy(p->task);
+
+ pool_destroy(p->req_cap_pool);
+ pool_destroy(p->rsp_cap_pool);
+
+ stktable_deinit(p->table);
+ ha_free(&p->table);
+
+ HA_RWLOCK_DESTROY(&p->lbprm.lock);
+ HA_RWLOCK_DESTROY(&p->lock);
+
+ proxy_unref_defaults(p);
+ ha_free(&p);
+}
+
+/*
+ * This function returns a string containing a name describing capabilities to
+ * report comprehensible error messages. Specifically, it will return the words
+ * "frontend", "backend" when appropriate, "defaults" if it corresponds to a
+ * defaults section, or "proxy" for all other cases including the proxies
+ * declared in "listen" mode.
+ */
+const char *proxy_cap_str(int cap)
+{
+ if (cap & PR_CAP_DEF)
+ return "defaults";
+
+ if ((cap & PR_CAP_LISTEN) != PR_CAP_LISTEN) {
+ if (cap & PR_CAP_FE)
+ return "frontend";
+ else if (cap & PR_CAP_BE)
+ return "backend";
+ }
+ return "proxy";
+}
+
+/*
+ * This function returns a string containing the mode of the proxy in a format
+ * suitable for error messages.
+ */
+const char *proxy_mode_str(int mode) {
+
+ if (mode == PR_MODE_TCP)
+ return "tcp";
+ else if (mode == PR_MODE_HTTP)
+ return "http";
+ else if (mode == PR_MODE_CLI)
+ return "cli";
+ else if (mode == PR_MODE_SYSLOG)
+ return "syslog";
+ else if (mode == PR_MODE_PEERS)
+ return "peers";
+ else
+ return "unknown";
+}
+
+/* try to find among known options the one that looks closest to <word> by
+ * counting transitions between letters, digits and other characters. Will
+ * return the best matching word if found, otherwise NULL. An optional array
+ * of extra words to compare may be passed in <extra>, but it must then be
+ * terminated by a NULL entry. If unused it may be NULL.
+ */
+const char *proxy_find_best_option(const char *word, const char **extra)
+{
+ uint8_t word_sig[1024];
+ uint8_t list_sig[1024];
+ const char *best_ptr = NULL;
+ int dist, best_dist = INT_MAX;
+ int index;
+
+ make_word_fingerprint(word_sig, word);
+
+ for (index = 0; cfg_opts[index].name; index++) {
+ make_word_fingerprint(list_sig, cfg_opts[index].name);
+ dist = word_fingerprint_distance(word_sig, list_sig);
+ if (dist < best_dist) {
+ best_dist = dist;
+ best_ptr = cfg_opts[index].name;
+ }
+ }
+
+ for (index = 0; cfg_opts2[index].name; index++) {
+ make_word_fingerprint(list_sig, cfg_opts2[index].name);
+ dist = word_fingerprint_distance(word_sig, list_sig);
+ if (dist < best_dist) {
+ best_dist = dist;
+ best_ptr = cfg_opts2[index].name;
+ }
+ }
+
+ while (extra && *extra) {
+ make_word_fingerprint(list_sig, *extra);
+ dist = word_fingerprint_distance(word_sig, list_sig);
+ if (dist < best_dist) {
+ best_dist = dist;
+ best_ptr = *extra;
+ }
+ extra++;
+ }
+
+ if (best_dist > 2 * strlen(word) || (best_ptr && best_dist > 2 * strlen(best_ptr)))
+ best_ptr = NULL;
+ return best_ptr;
+}
+
+/* This function parses a "timeout" statement in a proxy section. It returns
+ * -1 if there is any error, 1 for a warning, otherwise zero. If it does not
+ * return zero, it will write an error or warning message into a preallocated
+ * buffer returned at <err>. The trailing is not be written. The function must
+ * be called with <args> pointing to the first command line word, with <proxy>
+ * pointing to the proxy being parsed, and <defpx> to the default proxy or NULL.
+ * As a special case for compatibility with older configs, it also accepts
+ * "{cli|srv|con}timeout" in args[0].
+ */
+static int proxy_parse_timeout(char **args, int section, struct proxy *proxy,
+ const struct proxy *defpx, const char *file, int line,
+ char **err)
+{
+ unsigned timeout;
+ int retval, cap;
+ const char *res, *name;
+ int *tv = NULL;
+ const int *td = NULL;
+
+ retval = 0;
+
+ /* simply skip "timeout" but remain compatible with old form */
+ if (strcmp(args[0], "timeout") == 0)
+ args++;
+
+ name = args[0];
+ if (strcmp(args[0], "client") == 0) {
+ name = "client";
+ tv = &proxy->timeout.client;
+ td = &defpx->timeout.client;
+ cap = PR_CAP_FE;
+ } else if (strcmp(args[0], "tarpit") == 0) {
+ tv = &proxy->timeout.tarpit;
+ td = &defpx->timeout.tarpit;
+ cap = PR_CAP_FE | PR_CAP_BE;
+ } else if (strcmp(args[0], "client-hs") == 0) {
+ tv = &proxy->timeout.client_hs;
+ td = &defpx->timeout.client_hs;
+ cap = PR_CAP_FE;
+ } else if (strcmp(args[0], "http-keep-alive") == 0) {
+ tv = &proxy->timeout.httpka;
+ td = &defpx->timeout.httpka;
+ cap = PR_CAP_FE | PR_CAP_BE;
+ } else if (strcmp(args[0], "http-request") == 0) {
+ tv = &proxy->timeout.httpreq;
+ td = &defpx->timeout.httpreq;
+ cap = PR_CAP_FE | PR_CAP_BE;
+ } else if (strcmp(args[0], "server") == 0) {
+ name = "server";
+ tv = &proxy->timeout.server;
+ td = &defpx->timeout.server;
+ cap = PR_CAP_BE;
+ } else if (strcmp(args[0], "connect") == 0) {
+ name = "connect";
+ tv = &proxy->timeout.connect;
+ td = &defpx->timeout.connect;
+ cap = PR_CAP_BE;
+ } else if (strcmp(args[0], "check") == 0) {
+ tv = &proxy->timeout.check;
+ td = &defpx->timeout.check;
+ cap = PR_CAP_BE;
+ } else if (strcmp(args[0], "queue") == 0) {
+ tv = &proxy->timeout.queue;
+ td = &defpx->timeout.queue;
+ cap = PR_CAP_BE;
+ } else if (strcmp(args[0], "tunnel") == 0) {
+ tv = &proxy->timeout.tunnel;
+ td = &defpx->timeout.tunnel;
+ cap = PR_CAP_BE;
+ } else if (strcmp(args[0], "client-fin") == 0) {
+ tv = &proxy->timeout.clientfin;
+ td = &defpx->timeout.clientfin;
+ cap = PR_CAP_FE;
+ } else if (strcmp(args[0], "server-fin") == 0) {
+ tv = &proxy->timeout.serverfin;
+ td = &defpx->timeout.serverfin;
+ cap = PR_CAP_BE;
+ } else if (strcmp(args[0], "clitimeout") == 0) {
+ memprintf(err, "the '%s' directive is not supported anymore since HAProxy 2.1. Use 'timeout client'.", args[0]);
+ return -1;
+ } else if (strcmp(args[0], "srvtimeout") == 0) {
+ memprintf(err, "the '%s' directive is not supported anymore since HAProxy 2.1. Use 'timeout server'.", args[0]);
+ return -1;
+ } else if (strcmp(args[0], "contimeout") == 0) {
+ memprintf(err, "the '%s' directive is not supported anymore since HAProxy 2.1. Use 'timeout connect'.", args[0]);
+ return -1;
+ } else {
+ memprintf(err,
+ "'timeout' supports 'client', 'server', 'connect', 'check', "
+ "'queue', 'handshake', 'http-keep-alive', 'http-request', 'tunnel', 'tarpit', "
+ "'client-fin' and 'server-fin' (got '%s')",
+ args[0]);
+ return -1;
+ }
+
+ if (*args[1] == 0) {
+ memprintf(err, "'timeout %s' expects an integer value (in milliseconds)", name);
+ return -1;
+ }
+
+ res = parse_time_err(args[1], &timeout, TIME_UNIT_MS);
+ if (res == PARSE_TIME_OVER) {
+ memprintf(err, "timer overflow in argument '%s' to 'timeout %s' (maximum value is 2147483647 ms or ~24.8 days)",
+ args[1], name);
+ return -1;
+ }
+ else if (res == PARSE_TIME_UNDER) {
+ memprintf(err, "timer underflow in argument '%s' to 'timeout %s' (minimum non-null value is 1 ms)",
+ args[1], name);
+ return -1;
+ }
+ else if (res) {
+ memprintf(err, "unexpected character '%c' in 'timeout %s'", *res, name);
+ return -1;
+ }
+
+ if (!(proxy->cap & cap)) {
+ memprintf(err, "'timeout %s' will be ignored because %s '%s' has no %s capability",
+ name, proxy_type_str(proxy), proxy->id,
+ (cap & PR_CAP_BE) ? "backend" : "frontend");
+ retval = 1;
+ }
+ else if (defpx && *tv != *td) {
+ memprintf(err, "overwriting 'timeout %s' which was already specified", name);
+ retval = 1;
+ }
+
+ if (*args[2] != 0) {
+ memprintf(err, "'timeout %s' : unexpected extra argument '%s' after value '%s'.", name, args[2], args[1]);
+ retval = -1;
+ }
+
+ *tv = MS_TO_TICKS(timeout);
+ return retval;
+}
+
+/* This function parses a "rate-limit" statement in a proxy section. It returns
+ * -1 if there is any error, 1 for a warning, otherwise zero. If it does not
+ * return zero, it will write an error or warning message into a preallocated
+ * buffer returned at <err>. The function must be called with <args> pointing
+ * to the first command line word, with <proxy> pointing to the proxy being
+ * parsed, and <defpx> to the default proxy or NULL.
+ */
+static int proxy_parse_rate_limit(char **args, int section, struct proxy *proxy,
+ const struct proxy *defpx, const char *file, int line,
+ char **err)
+{
+ int retval;
+ char *res;
+ unsigned int *tv = NULL;
+ const unsigned int *td = NULL;
+ unsigned int val;
+
+ retval = 0;
+
+ if (strcmp(args[1], "sessions") == 0) {
+ tv = &proxy->fe_sps_lim;
+ td = &defpx->fe_sps_lim;
+ }
+ else {
+ memprintf(err, "'%s' only supports 'sessions' (got '%s')", args[0], args[1]);
+ return -1;
+ }
+
+ if (*args[2] == 0) {
+ memprintf(err, "'%s %s' expects expects an integer value (in sessions/second)", args[0], args[1]);
+ return -1;
+ }
+
+ val = strtoul(args[2], &res, 0);
+ if (*res) {
+ memprintf(err, "'%s %s' : unexpected character '%c' in integer value '%s'", args[0], args[1], *res, args[2]);
+ return -1;
+ }
+
+ if (!(proxy->cap & PR_CAP_FE)) {
+ memprintf(err, "%s %s will be ignored because %s '%s' has no frontend capability",
+ args[0], args[1], proxy_type_str(proxy), proxy->id);
+ retval = 1;
+ }
+ else if (defpx && *tv != *td) {
+ memprintf(err, "overwriting %s %s which was already specified", args[0], args[1]);
+ retval = 1;
+ }
+
+ *tv = val;
+ return retval;
+}
+
+/* This function parses a "max-keep-alive-queue" statement in a proxy section.
+ * It returns -1 if there is any error, 1 for a warning, otherwise zero. If it
+ * does not return zero, it will write an error or warning message into a
+ * preallocated buffer returned at <err>. The function must be called with
+ * <args> pointing to the first command line word, with <proxy> pointing to
+ * the proxy being parsed, and <defpx> to the default proxy or NULL.
+ */
+static int proxy_parse_max_ka_queue(char **args, int section, struct proxy *proxy,
+ const struct proxy *defpx, const char *file, int line,
+ char **err)
+{
+ int retval;
+ char *res;
+ unsigned int val;
+
+ retval = 0;
+
+ if (*args[1] == 0) {
+ memprintf(err, "'%s' expects expects an integer value (or -1 to disable)", args[0]);
+ return -1;
+ }
+
+ val = strtol(args[1], &res, 0);
+ if (*res) {
+ memprintf(err, "'%s' : unexpected character '%c' in integer value '%s'", args[0], *res, args[1]);
+ return -1;
+ }
+
+ if (!(proxy->cap & PR_CAP_BE)) {
+ memprintf(err, "%s will be ignored because %s '%s' has no backend capability",
+ args[0], proxy_type_str(proxy), proxy->id);
+ retval = 1;
+ }
+
+ /* we store <val+1> so that a user-facing value of -1 is stored as zero (default) */
+ proxy->max_ka_queue = val + 1;
+ return retval;
+}
+
+/* This function parses a "declare" statement in a proxy section. It returns -1
+ * if there is any error, 1 for warning, otherwise 0. If it does not return zero,
+ * it will write an error or warning message into a preallocated buffer returned
+ * at <err>. The function must be called with <args> pointing to the first command
+ * line word, with <proxy> pointing to the proxy being parsed, and <defpx> to the
+ * default proxy or NULL.
+ */
+static int proxy_parse_declare(char **args, int section, struct proxy *curpx,
+ const struct proxy *defpx, const char *file, int line,
+ char **err)
+{
+ /* Capture keyword wannot be declared in a default proxy. */
+ if (curpx == defpx) {
+ memprintf(err, "'%s' not available in default section", args[0]);
+ return -1;
+ }
+
+ /* Capture keyword is only available in frontend. */
+ if (!(curpx->cap & PR_CAP_FE)) {
+ memprintf(err, "'%s' only available in frontend or listen section", args[0]);
+ return -1;
+ }
+
+ /* Check mandatory second keyword. */
+ if (!args[1] || !*args[1]) {
+ memprintf(err, "'%s' needs a second keyword that specify the type of declaration ('capture')", args[0]);
+ return -1;
+ }
+
+ /* Actually, declare is only available for declaring capture
+ * slot, but in the future it can declare maps or variables.
+ * So, this section permits to check and switch according with
+ * the second keyword.
+ */
+ if (strcmp(args[1], "capture") == 0) {
+ char *error = NULL;
+ long len;
+ struct cap_hdr *hdr;
+
+ /* Check the next keyword. */
+ if (!args[2] || !*args[2] ||
+ (strcmp(args[2], "response") != 0 &&
+ strcmp(args[2], "request") != 0)) {
+ memprintf(err, "'%s %s' requires a direction ('request' or 'response')", args[0], args[1]);
+ return -1;
+ }
+
+ /* Check the 'len' keyword. */
+ if (!args[3] || !*args[3] || strcmp(args[3], "len") != 0) {
+ memprintf(err, "'%s %s' requires a capture length ('len')", args[0], args[1]);
+ return -1;
+ }
+
+ /* Check the length value. */
+ if (!args[4] || !*args[4]) {
+ memprintf(err, "'%s %s': 'len' requires a numeric value that represents the "
+ "capture length",
+ args[0], args[1]);
+ return -1;
+ }
+
+ /* convert the length value. */
+ len = strtol(args[4], &error, 10);
+ if (*error != '\0') {
+ memprintf(err, "'%s %s': cannot parse the length '%s'.",
+ args[0], args[1], args[3]);
+ return -1;
+ }
+
+ /* check length. */
+ if (len <= 0) {
+ memprintf(err, "length must be > 0");
+ return -1;
+ }
+
+ /* register the capture. */
+ hdr = calloc(1, sizeof(*hdr));
+ if (!hdr) {
+ memprintf(err, "proxy '%s': out of memory while registering a capture", curpx->id);
+ return -1;
+ }
+ hdr->name = NULL; /* not a header capture */
+ hdr->namelen = 0;
+ hdr->len = len;
+ hdr->pool = create_pool("caphdr", hdr->len + 1, MEM_F_SHARED);
+
+ if (strcmp(args[2], "request") == 0) {
+ hdr->next = curpx->req_cap;
+ hdr->index = curpx->nb_req_cap++;
+ curpx->req_cap = hdr;
+ }
+ if (strcmp(args[2], "response") == 0) {
+ hdr->next = curpx->rsp_cap;
+ hdr->index = curpx->nb_rsp_cap++;
+ curpx->rsp_cap = hdr;
+ }
+ return 0;
+ }
+ else {
+ memprintf(err, "unknown declaration type '%s' (supports 'capture')", args[1]);
+ return -1;
+ }
+}
+
+/* This function parses a "retry-on" statement */
+static int
+proxy_parse_retry_on(char **args, int section, struct proxy *curpx,
+ const struct proxy *defpx, const char *file, int line,
+ char **err)
+{
+ int i;
+
+ if (!(*args[1])) {
+ memprintf(err, "'%s' needs at least one keyword to specify when to retry", args[0]);
+ return -1;
+ }
+ if (!(curpx->cap & PR_CAP_BE)) {
+ memprintf(err, "'%s' only available in backend or listen section", args[0]);
+ return -1;
+ }
+ curpx->retry_type = 0;
+ for (i = 1; *(args[i]); i++) {
+ if (strcmp(args[i], "conn-failure") == 0)
+ curpx->retry_type |= PR_RE_CONN_FAILED;
+ else if (strcmp(args[i], "empty-response") == 0)
+ curpx->retry_type |= PR_RE_DISCONNECTED;
+ else if (strcmp(args[i], "response-timeout") == 0)
+ curpx->retry_type |= PR_RE_TIMEOUT;
+ else if (strcmp(args[i], "401") == 0)
+ curpx->retry_type |= PR_RE_401;
+ else if (strcmp(args[i], "403") == 0)
+ curpx->retry_type |= PR_RE_403;
+ else if (strcmp(args[i], "404") == 0)
+ curpx->retry_type |= PR_RE_404;
+ else if (strcmp(args[i], "408") == 0)
+ curpx->retry_type |= PR_RE_408;
+ else if (strcmp(args[i], "425") == 0)
+ curpx->retry_type |= PR_RE_425;
+ else if (strcmp(args[i], "500") == 0)
+ curpx->retry_type |= PR_RE_500;
+ else if (strcmp(args[i], "501") == 0)
+ curpx->retry_type |= PR_RE_501;
+ else if (strcmp(args[i], "502") == 0)
+ curpx->retry_type |= PR_RE_502;
+ else if (strcmp(args[i], "503") == 0)
+ curpx->retry_type |= PR_RE_503;
+ else if (strcmp(args[i], "504") == 0)
+ curpx->retry_type |= PR_RE_504;
+ else if (strcmp(args[i], "0rtt-rejected") == 0)
+ curpx->retry_type |= PR_RE_EARLY_ERROR;
+ else if (strcmp(args[i], "junk-response") == 0)
+ curpx->retry_type |= PR_RE_JUNK_REQUEST;
+ else if (!(strcmp(args[i], "all-retryable-errors")))
+ curpx->retry_type |= PR_RE_CONN_FAILED | PR_RE_DISCONNECTED |
+ PR_RE_TIMEOUT | PR_RE_500 | PR_RE_502 |
+ PR_RE_503 | PR_RE_504 | PR_RE_EARLY_ERROR |
+ PR_RE_JUNK_REQUEST;
+ else if (strcmp(args[i], "none") == 0) {
+ if (i != 1 || *args[i + 1]) {
+ memprintf(err, "'%s' 'none' keyworld only usable alone", args[0]);
+ return -1;
+ }
+ } else {
+ memprintf(err, "'%s': unknown keyword '%s'", args[0], args[i]);
+ return -1;
+ }
+
+ }
+
+
+ return 0;
+}
+
+#ifdef TCP_KEEPCNT
+/* This function parses "{cli|srv}tcpka-cnt" statements */
+static int proxy_parse_tcpka_cnt(char **args, int section, struct proxy *proxy,
+ const struct proxy *defpx, const char *file, int line,
+ char **err)
+{
+ int retval;
+ char *res;
+ unsigned int tcpka_cnt;
+
+ retval = 0;
+
+ if (*args[1] == 0) {
+ memprintf(err, "'%s' expects an integer value", args[0]);
+ return -1;
+ }
+
+ tcpka_cnt = strtol(args[1], &res, 0);
+ if (*res) {
+ memprintf(err, "'%s' : unexpected character '%c' in integer value '%s'", args[0], *res, args[1]);
+ return -1;
+ }
+
+ if (strcmp(args[0], "clitcpka-cnt") == 0) {
+ if (!(proxy->cap & PR_CAP_FE)) {
+ memprintf(err, "%s will be ignored because %s '%s' has no frontend capability",
+ args[0], proxy_type_str(proxy), proxy->id);
+ retval = 1;
+ }
+ proxy->clitcpka_cnt = tcpka_cnt;
+ } else if (strcmp(args[0], "srvtcpka-cnt") == 0) {
+ if (!(proxy->cap & PR_CAP_BE)) {
+ memprintf(err, "%s will be ignored because %s '%s' has no backend capability",
+ args[0], proxy_type_str(proxy), proxy->id);
+ retval = 1;
+ }
+ proxy->srvtcpka_cnt = tcpka_cnt;
+ } else {
+ /* unreachable */
+ memprintf(err, "'%s': unknown keyword", args[0]);
+ return -1;
+ }
+
+ return retval;
+}
+#endif
+
+#ifdef TCP_KEEPIDLE
+/* This function parses "{cli|srv}tcpka-idle" statements */
+static int proxy_parse_tcpka_idle(char **args, int section, struct proxy *proxy,
+ const struct proxy *defpx, const char *file, int line,
+ char **err)
+{
+ int retval;
+ const char *res;
+ unsigned int tcpka_idle;
+
+ retval = 0;
+
+ if (*args[1] == 0) {
+ memprintf(err, "'%s' expects an integer value", args[0]);
+ return -1;
+ }
+ res = parse_time_err(args[1], &tcpka_idle, TIME_UNIT_S);
+ if (res == PARSE_TIME_OVER) {
+ memprintf(err, "timer overflow in argument '%s' to '%s' (maximum value is 2147483647 ms or ~24.8 days)",
+ args[1], args[0]);
+ return -1;
+ }
+ else if (res == PARSE_TIME_UNDER) {
+ memprintf(err, "timer underflow in argument '%s' to '%s' (minimum non-null value is 1 ms)",
+ args[1], args[0]);
+ return -1;
+ }
+ else if (res) {
+ memprintf(err, "unexpected character '%c' in argument to <%s>.\n", *res, args[0]);
+ return -1;
+ }
+
+ if (strcmp(args[0], "clitcpka-idle") == 0) {
+ if (!(proxy->cap & PR_CAP_FE)) {
+ memprintf(err, "%s will be ignored because %s '%s' has no frontend capability",
+ args[0], proxy_type_str(proxy), proxy->id);
+ retval = 1;
+ }
+ proxy->clitcpka_idle = tcpka_idle;
+ } else if (strcmp(args[0], "srvtcpka-idle") == 0) {
+ if (!(proxy->cap & PR_CAP_BE)) {
+ memprintf(err, "%s will be ignored because %s '%s' has no backend capability",
+ args[0], proxy_type_str(proxy), proxy->id);
+ retval = 1;
+ }
+ proxy->srvtcpka_idle = tcpka_idle;
+ } else {
+ /* unreachable */
+ memprintf(err, "'%s': unknown keyword", args[0]);
+ return -1;
+ }
+
+ return retval;
+}
+#endif
+
+#ifdef TCP_KEEPINTVL
+/* This function parses "{cli|srv}tcpka-intvl" statements */
+static int proxy_parse_tcpka_intvl(char **args, int section, struct proxy *proxy,
+ const struct proxy *defpx, const char *file, int line,
+ char **err)
+{
+ int retval;
+ const char *res;
+ unsigned int tcpka_intvl;
+
+ retval = 0;
+
+ if (*args[1] == 0) {
+ memprintf(err, "'%s' expects an integer value", args[0]);
+ return -1;
+ }
+ res = parse_time_err(args[1], &tcpka_intvl, TIME_UNIT_S);
+ if (res == PARSE_TIME_OVER) {
+ memprintf(err, "timer overflow in argument '%s' to '%s' (maximum value is 2147483647 ms or ~24.8 days)",
+ args[1], args[0]);
+ return -1;
+ }
+ else if (res == PARSE_TIME_UNDER) {
+ memprintf(err, "timer underflow in argument '%s' to '%s' (minimum non-null value is 1 ms)",
+ args[1], args[0]);
+ return -1;
+ }
+ else if (res) {
+ memprintf(err, "unexpected character '%c' in argument to <%s>.\n", *res, args[0]);
+ return -1;
+ }
+
+ if (strcmp(args[0], "clitcpka-intvl") == 0) {
+ if (!(proxy->cap & PR_CAP_FE)) {
+ memprintf(err, "%s will be ignored because %s '%s' has no frontend capability",
+ args[0], proxy_type_str(proxy), proxy->id);
+ retval = 1;
+ }
+ proxy->clitcpka_intvl = tcpka_intvl;
+ } else if (strcmp(args[0], "srvtcpka-intvl") == 0) {
+ if (!(proxy->cap & PR_CAP_BE)) {
+ memprintf(err, "%s will be ignored because %s '%s' has no backend capability",
+ args[0], proxy_type_str(proxy), proxy->id);
+ retval = 1;
+ }
+ proxy->srvtcpka_intvl = tcpka_intvl;
+ } else {
+ /* unreachable */
+ memprintf(err, "'%s': unknown keyword", args[0]);
+ return -1;
+ }
+
+ return retval;
+}
+#endif
+
+/* This function inserts proxy <px> into the tree of known proxies (regular
+ * ones or defaults depending on px->cap & PR_CAP_DEF). The proxy's name is
+ * used as the storing key so it must already have been initialized.
+ */
+void proxy_store_name(struct proxy *px)
+{
+ struct eb_root *root = (px->cap & PR_CAP_DEF) ? &defproxy_by_name : &proxy_by_name;
+
+ px->conf.by_name.key = px->id;
+ ebis_insert(root, &px->conf.by_name);
+}
+
+/* Returns a pointer to the first proxy matching capabilities <cap> and id
+ * <id>. NULL is returned if no match is found. If <table> is non-zero, it
+ * only considers proxies having a table.
+ */
+struct proxy *proxy_find_by_id(int id, int cap, int table)
+{
+ struct eb32_node *n;
+
+ for (n = eb32_lookup(&used_proxy_id, id); n; n = eb32_next(n)) {
+ struct proxy *px = container_of(n, struct proxy, conf.id);
+
+ if (px->uuid != id)
+ break;
+
+ if ((px->cap & cap) != cap)
+ continue;
+
+ if (table && (!px->table || !px->table->size))
+ continue;
+
+ return px;
+ }
+ return NULL;
+}
+
+/* Returns a pointer to the first proxy matching either name <name>, or id
+ * <name> if <name> begins with a '#'. NULL is returned if no match is found.
+ * If <table> is non-zero, it only considers proxies having a table. The search
+ * is made into the regular proxies, unless <cap> has PR_CAP_DEF set in which
+ * case it's searched into the defproxy tree.
+ */
+struct proxy *proxy_find_by_name(const char *name, int cap, int table)
+{
+ struct proxy *curproxy;
+
+ if (*name == '#' && !(cap & PR_CAP_DEF)) {
+ curproxy = proxy_find_by_id(atoi(name + 1), cap, table);
+ if (curproxy)
+ return curproxy;
+ }
+ else {
+ struct eb_root *root;
+ struct ebpt_node *node;
+
+ root = (cap & PR_CAP_DEF) ? &defproxy_by_name : &proxy_by_name;
+ for (node = ebis_lookup(root, name); node; node = ebpt_next(node)) {
+ curproxy = container_of(node, struct proxy, conf.by_name);
+
+ if (strcmp(curproxy->id, name) != 0)
+ break;
+
+ if ((curproxy->cap & cap) != cap)
+ continue;
+
+ if (table && (!curproxy->table || !curproxy->table->size))
+ continue;
+
+ return curproxy;
+ }
+ }
+ return NULL;
+}
+
+/* Finds the best match for a proxy with capabilities <cap>, name <name> and id
+ * <id>. At most one of <id> or <name> may be different provided that <cap> is
+ * valid. Either <id> or <name> may be left unspecified (0). The purpose is to
+ * find a proxy based on some information from a previous configuration, across
+ * reloads or during information exchange between peers.
+ *
+ * Names are looked up first if present, then IDs are compared if present. In
+ * case of an inexact match whatever is forced in the configuration has
+ * precedence in the following order :
+ * - 1) forced ID (proves a renaming / change of proxy type)
+ * - 2) proxy name+type (may indicate a move if ID differs)
+ * - 3) automatic ID+type (may indicate a renaming)
+ *
+ * Depending on what is found, we can end up in the following situations :
+ *
+ * name id cap | possible causes
+ * -------------+-----------------
+ * -- -- -- | nothing found
+ * -- -- ok | nothing found
+ * -- ok -- | proxy deleted, ID points to next one
+ * -- ok ok | proxy renamed, or deleted with ID pointing to next one
+ * ok -- -- | proxy deleted, but other half with same name still here (before)
+ * ok -- ok | proxy's ID changed (proxy moved in the config file)
+ * ok ok -- | proxy deleted, but other half with same name still here (after)
+ * ok ok ok | perfect match
+ *
+ * Upon return if <diff> is not NULL, it is zeroed then filled with up to 3 bits :
+ * - PR_FBM_MISMATCH_ID : proxy was found but ID differs
+ * (and ID was not zero)
+ * - PR_FBM_MISMATCH_NAME : proxy was found by ID but name differs
+ * (and name was not NULL)
+ * - PR_FBM_MISMATCH_PROXYTYPE : a proxy of different type was found with
+ * the same name and/or id
+ *
+ * Only a valid proxy is returned. If capabilities do not match, NULL is
+ * returned. The caller can check <diff> to report detailed warnings / errors,
+ * and decide whether or not to use what was found.
+ */
+struct proxy *proxy_find_best_match(int cap, const char *name, int id, int *diff)
+{
+ struct proxy *byname;
+ struct proxy *byid;
+
+ if (!name && !id)
+ return NULL;
+
+ if (diff)
+ *diff = 0;
+
+ byname = byid = NULL;
+
+ if (name) {
+ byname = proxy_find_by_name(name, cap, 0);
+ if (byname && (!id || byname->uuid == id))
+ return byname;
+ }
+
+ /* remaining possibilities :
+ * - name not set
+ * - name set but not found
+ * - name found, but ID doesn't match.
+ */
+ if (id) {
+ byid = proxy_find_by_id(id, cap, 0);
+ if (byid) {
+ if (byname) {
+ /* id+type found, name+type found, but not all 3.
+ * ID wins only if forced, otherwise name wins.
+ */
+ if (byid->options & PR_O_FORCED_ID) {
+ if (diff)
+ *diff |= PR_FBM_MISMATCH_NAME;
+ return byid;
+ }
+ else {
+ if (diff)
+ *diff |= PR_FBM_MISMATCH_ID;
+ return byname;
+ }
+ }
+
+ /* remaining possibilities :
+ * - name not set
+ * - name set but not found
+ */
+ if (name && diff)
+ *diff |= PR_FBM_MISMATCH_NAME;
+ return byid;
+ }
+
+ /* ID not found */
+ if (byname) {
+ if (diff)
+ *diff |= PR_FBM_MISMATCH_ID;
+ return byname;
+ }
+ }
+
+ /* All remaining possibilities will lead to NULL. If we can report more
+ * detailed information to the caller about changed types and/or name,
+ * we'll do it. For example, we could detect that "listen foo" was
+ * split into "frontend foo_ft" and "backend foo_bk" if IDs are forced.
+ * - name not set, ID not found
+ * - name not found, ID not set
+ * - name not found, ID not found
+ */
+ if (!diff)
+ return NULL;
+
+ if (name) {
+ byname = proxy_find_by_name(name, 0, 0);
+ if (byname && (!id || byname->uuid == id))
+ *diff |= PR_FBM_MISMATCH_PROXYTYPE;
+ }
+
+ if (id) {
+ byid = proxy_find_by_id(id, 0, 0);
+ if (byid) {
+ if (!name)
+ *diff |= PR_FBM_MISMATCH_PROXYTYPE; /* only type changed */
+ else if (byid->options & PR_O_FORCED_ID)
+ *diff |= PR_FBM_MISMATCH_NAME | PR_FBM_MISMATCH_PROXYTYPE; /* name and type changed */
+ /* otherwise it's a different proxy that was returned */
+ }
+ }
+ return NULL;
+}
+
+/*
+ * This function finds a server with matching name within selected proxy.
+ * It also checks if there are more matching servers with
+ * requested name as this often leads into unexpected situations.
+ */
+
+struct server *findserver(const struct proxy *px, const char *name) {
+
+ struct server *cursrv, *target = NULL;
+
+ if (!px)
+ return NULL;
+
+ for (cursrv = px->srv; cursrv; cursrv = cursrv->next) {
+ if (strcmp(cursrv->id, name) != 0)
+ continue;
+
+ if (!target) {
+ target = cursrv;
+ continue;
+ }
+
+ ha_alert("Refusing to use duplicated server '%s' found in proxy: %s!\n",
+ name, px->id);
+
+ return NULL;
+ }
+
+ return target;
+}
+
+/*
+ * This function finds a server with matching "<puid> x <rid>" within
+ * selected proxy <px>.
+ * Using the combination of proxy-uid + revision id ensures that the function
+ * will either return the server we're expecting or NULL if it has been removed
+ * from the proxy.
+ */
+struct server *findserver_unique_id(const struct proxy *px, int puid, uint32_t rid) {
+
+ struct server *cursrv;
+
+ if (!px)
+ return NULL;
+
+ for (cursrv = px->srv; cursrv; cursrv = cursrv->next) {
+ if (cursrv->puid == puid && cursrv->rid == rid)
+ return cursrv;
+ }
+
+ return NULL;
+}
+
+/*
+ * This function finds a server with matching "<name> x <rid>" within
+ * selected proxy <px>.
+ * Using the combination of name + revision id ensures that the function will
+ * either return the server we're expecting or NULL if it has been removed
+ * from the proxy.
+ */
+struct server *findserver_unique_name(const struct proxy *px, const char *name, uint32_t rid) {
+
+ struct server *cursrv;
+
+ if (!px)
+ return NULL;
+
+ for (cursrv = px->srv; cursrv; cursrv = cursrv->next) {
+ if (!strcmp(cursrv->id, name) && cursrv->rid == rid)
+ return cursrv;
+ }
+
+ return NULL;
+}
+
+/* This function checks that the designated proxy has no http directives
+ * enabled. It will output a warning if there are, and will fix some of them.
+ * It returns the number of fatal errors encountered. This should be called
+ * at the end of the configuration parsing if the proxy is not in http mode.
+ * The <file> argument is used to construct the error message.
+ */
+int proxy_cfg_ensure_no_http(struct proxy *curproxy)
+{
+ if (curproxy->cookie_name != NULL) {
+ ha_warning("cookie will be ignored for %s '%s' (needs 'mode http').\n",
+ proxy_type_str(curproxy), curproxy->id);
+ }
+ if (isttest(curproxy->monitor_uri)) {
+ ha_warning("monitor-uri will be ignored for %s '%s' (needs 'mode http').\n",
+ proxy_type_str(curproxy), curproxy->id);
+ }
+ if (curproxy->lbprm.algo & BE_LB_NEED_HTTP) {
+ curproxy->lbprm.algo &= ~BE_LB_ALGO;
+ curproxy->lbprm.algo |= BE_LB_ALGO_RR;
+ ha_warning("Layer 7 hash not possible for %s '%s' (needs 'mode http'). Falling back to round robin.\n",
+ proxy_type_str(curproxy), curproxy->id);
+ }
+ if (curproxy->to_log & (LW_REQ | LW_RESP)) {
+ curproxy->to_log &= ~(LW_REQ | LW_RESP);
+ ha_warning("parsing [%s:%d] : HTTP log/header format not usable with %s '%s' (needs 'mode http').\n",
+ curproxy->conf.lfs_file, curproxy->conf.lfs_line,
+ proxy_type_str(curproxy), curproxy->id);
+ }
+ if (curproxy->conf.logformat_string == default_http_log_format ||
+ curproxy->conf.logformat_string == clf_http_log_format) {
+ /* Note: we don't change the directive's file:line number */
+ curproxy->conf.logformat_string = default_tcp_log_format;
+ ha_warning("parsing [%s:%d] : 'option httplog' not usable with %s '%s' (needs 'mode http'). Falling back to 'option tcplog'.\n",
+ curproxy->conf.lfs_file, curproxy->conf.lfs_line,
+ proxy_type_str(curproxy), curproxy->id);
+ }
+ else if (curproxy->conf.logformat_string == default_https_log_format) {
+ /* Note: we don't change the directive's file:line number */
+ curproxy->conf.logformat_string = default_tcp_log_format;
+ ha_warning("parsing [%s:%d] : 'option httpslog' not usable with %s '%s' (needs 'mode http'). Falling back to 'option tcplog'.\n",
+ curproxy->conf.lfs_file, curproxy->conf.lfs_line,
+ proxy_type_str(curproxy), curproxy->id);
+ }
+
+ return 0;
+}
+
+/* This function checks that the designated proxy has no log directives
+ * enabled. It will output a warning if there are, and will fix some of them.
+ * It returns the number of fatal errors encountered. This should be called
+ * at the end of the configuration parsing if the proxy is not in log mode.
+ * The <file> argument is used to construct the error message.
+ */
+int proxy_cfg_ensure_no_log(struct proxy *curproxy)
+{
+ if (curproxy->lbprm.algo & BE_LB_NEED_LOG) {
+ curproxy->lbprm.algo &= ~BE_LB_ALGO;
+ curproxy->lbprm.algo |= BE_LB_ALGO_RR;
+ ha_warning("Unusable balance algorithm for %s '%s' (needs 'mode log'). Falling back to round robin.\n",
+ proxy_type_str(curproxy), curproxy->id);
+ }
+
+ return 0;
+}
+
+/* Perform the most basic initialization of a proxy :
+ * memset(), list_init(*), reset_timeouts(*).
+ * Any new proxy or peer should be initialized via this function.
+ */
+void init_new_proxy(struct proxy *p)
+{
+ memset(p, 0, sizeof(struct proxy));
+ p->obj_type = OBJ_TYPE_PROXY;
+ queue_init(&p->queue, p, NULL);
+ LIST_INIT(&p->acl);
+ LIST_INIT(&p->http_req_rules);
+ LIST_INIT(&p->http_res_rules);
+ LIST_INIT(&p->http_after_res_rules);
+ LIST_INIT(&p->redirect_rules);
+ LIST_INIT(&p->mon_fail_cond);
+ LIST_INIT(&p->switching_rules);
+ LIST_INIT(&p->server_rules);
+ LIST_INIT(&p->persist_rules);
+ LIST_INIT(&p->sticking_rules);
+ LIST_INIT(&p->storersp_rules);
+ LIST_INIT(&p->tcp_req.inspect_rules);
+ LIST_INIT(&p->tcp_rep.inspect_rules);
+ LIST_INIT(&p->tcp_req.l4_rules);
+ LIST_INIT(&p->tcp_req.l5_rules);
+ MT_LIST_INIT(&p->listener_queue);
+ LIST_INIT(&p->loggers);
+ LIST_INIT(&p->logformat);
+ LIST_INIT(&p->logformat_sd);
+ LIST_INIT(&p->format_unique_id);
+ LIST_INIT(&p->logformat_error);
+ LIST_INIT(&p->conf.bind);
+ LIST_INIT(&p->conf.listeners);
+ LIST_INIT(&p->conf.errors);
+ LIST_INIT(&p->conf.args.list);
+ LIST_INIT(&p->filter_configs);
+ LIST_INIT(&p->tcpcheck_rules.preset_vars);
+
+ p->defsrv.id = "default-server";
+ p->conf.used_listener_id = EB_ROOT;
+ p->conf.used_server_id = EB_ROOT;
+ p->used_server_addr = EB_ROOT_UNIQUE;
+
+ /* Timeouts are defined as -1 */
+ proxy_reset_timeouts(p);
+ p->tcp_rep.inspect_delay = TICK_ETERNITY;
+
+ /* initial uuid is unassigned (-1) */
+ p->uuid = -1;
+
+ /* Default to only allow L4 retries */
+ p->retry_type = PR_RE_CONN_FAILED;
+
+ p->extra_counters_fe = NULL;
+ p->extra_counters_be = NULL;
+
+ HA_RWLOCK_INIT(&p->lock);
+
+ /* initialize the default settings */
+ proxy_preset_defaults(p);
+}
+
+/* Preset default settings onto proxy <defproxy>. */
+void proxy_preset_defaults(struct proxy *defproxy)
+{
+ defproxy->mode = PR_MODE_TCP;
+ defproxy->flags = 0;
+ if (!(defproxy->cap & PR_CAP_INT)) {
+ defproxy->maxconn = cfg_maxpconn;
+ defproxy->conn_retries = CONN_RETRIES;
+ }
+ defproxy->redispatch_after = 0;
+ defproxy->options = PR_O_REUSE_SAFE;
+ if (defproxy->cap & PR_CAP_INT)
+ defproxy->options2 |= PR_O2_INDEPSTR;
+ defproxy->max_out_conns = MAX_SRV_LIST;
+
+ defproxy->defsrv.check.inter = DEF_CHKINTR;
+ defproxy->defsrv.check.fastinter = 0;
+ defproxy->defsrv.check.downinter = 0;
+ defproxy->defsrv.agent.inter = DEF_CHKINTR;
+ defproxy->defsrv.agent.fastinter = 0;
+ defproxy->defsrv.agent.downinter = 0;
+ defproxy->defsrv.check.rise = DEF_RISETIME;
+ defproxy->defsrv.check.fall = DEF_FALLTIME;
+ defproxy->defsrv.agent.rise = DEF_AGENT_RISETIME;
+ defproxy->defsrv.agent.fall = DEF_AGENT_FALLTIME;
+ defproxy->defsrv.check.port = 0;
+ defproxy->defsrv.agent.port = 0;
+ defproxy->defsrv.maxqueue = 0;
+ defproxy->defsrv.minconn = 0;
+ defproxy->defsrv.maxconn = 0;
+ defproxy->defsrv.max_reuse = -1;
+ defproxy->defsrv.max_idle_conns = -1;
+ defproxy->defsrv.pool_purge_delay = 5000;
+ defproxy->defsrv.slowstart = 0;
+ defproxy->defsrv.onerror = DEF_HANA_ONERR;
+ defproxy->defsrv.consecutive_errors_limit = DEF_HANA_ERRLIMIT;
+ defproxy->defsrv.uweight = defproxy->defsrv.iweight = 1;
+ LIST_INIT(&defproxy->defsrv.pp_tlvs);
+
+ defproxy->email_alert.level = LOG_ALERT;
+ defproxy->load_server_state_from_file = PR_SRV_STATE_FILE_UNSPEC;
+
+ if (defproxy->cap & PR_CAP_INT)
+ defproxy->timeout.connect = 5000;
+}
+
+/* Frees all dynamic settings allocated on a default proxy that's about to be
+ * destroyed. This is a subset of the complete proxy deinit code, but these
+ * should probably be merged ultimately. Note that most of the fields are not
+ * even reset, so extreme care is required here, and calling
+ * proxy_preset_defaults() afterwards would be safer.
+ */
+void proxy_free_defaults(struct proxy *defproxy)
+{
+ struct acl *acl, *aclb;
+ struct logger *log, *logb;
+ struct cap_hdr *h,*h_next;
+
+ ha_free(&defproxy->id);
+ ha_free(&defproxy->conf.file);
+ ha_free((char **)&defproxy->defsrv.conf.file);
+ ha_free(&defproxy->check_command);
+ ha_free(&defproxy->check_path);
+ ha_free(&defproxy->cookie_name);
+ ha_free(&defproxy->rdp_cookie_name);
+ ha_free(&defproxy->dyncookie_key);
+ ha_free(&defproxy->cookie_domain);
+ ha_free(&defproxy->cookie_attrs);
+ ha_free(&defproxy->lbprm.arg_str);
+ ha_free(&defproxy->capture_name);
+ istfree(&defproxy->monitor_uri);
+ ha_free(&defproxy->defbe.name);
+ ha_free(&defproxy->conn_src.iface_name);
+ istfree(&defproxy->server_id_hdr_name);
+
+ http_ext_clean(defproxy);
+
+ list_for_each_entry_safe(acl, aclb, &defproxy->acl, list) {
+ LIST_DELETE(&acl->list);
+ prune_acl(acl);
+ free(acl);
+ }
+
+ free_act_rules(&defproxy->tcp_req.inspect_rules);
+ free_act_rules(&defproxy->tcp_rep.inspect_rules);
+ free_act_rules(&defproxy->tcp_req.l4_rules);
+ free_act_rules(&defproxy->tcp_req.l5_rules);
+ free_act_rules(&defproxy->http_req_rules);
+ free_act_rules(&defproxy->http_res_rules);
+ free_act_rules(&defproxy->http_after_res_rules);
+
+ h = defproxy->req_cap;
+ while (h) {
+ h_next = h->next;
+ free(h->name);
+ pool_destroy(h->pool);
+ free(h);
+ h = h_next;
+ }
+
+ h = defproxy->rsp_cap;
+ while (h) {
+ h_next = h->next;
+ free(h->name);
+ pool_destroy(h->pool);
+ free(h);
+ h = h_next;
+ }
+
+ if (defproxy->conf.logformat_string != default_http_log_format &&
+ defproxy->conf.logformat_string != default_tcp_log_format &&
+ defproxy->conf.logformat_string != clf_http_log_format &&
+ defproxy->conf.logformat_string != default_https_log_format) {
+ ha_free(&defproxy->conf.logformat_string);
+ }
+
+ if (defproxy->conf.logformat_sd_string != default_rfc5424_sd_log_format)
+ ha_free(&defproxy->conf.logformat_sd_string);
+
+ list_for_each_entry_safe(log, logb, &defproxy->loggers, list) {
+ LIST_DEL_INIT(&log->list);
+ free_logger(log);
+ }
+
+ ha_free(&defproxy->conf.uniqueid_format_string);
+ ha_free(&defproxy->conf.error_logformat_string);
+ ha_free(&defproxy->conf.lfs_file);
+ ha_free(&defproxy->conf.lfsd_file);
+ ha_free(&defproxy->conf.uif_file);
+ ha_free(&defproxy->conf.elfs_file);
+ chunk_destroy(&defproxy->log_tag);
+
+ free_email_alert(defproxy);
+ proxy_release_conf_errors(defproxy);
+ deinit_proxy_tcpcheck(defproxy);
+
+ /* FIXME: we cannot free uri_auth because it might already be used by
+ * another proxy (legacy code for stats URI ...). Refcount anyone ?
+ */
+}
+
+/* delete a defproxy from the tree if still in it, frees its content and its
+ * storage. Nothing is done if <px> is NULL or if it doesn't have PR_CAP_DEF
+ * set, allowing to pass it the direct result of a lookup function.
+ */
+void proxy_destroy_defaults(struct proxy *px)
+{
+ if (!px)
+ return;
+ if (!(px->cap & PR_CAP_DEF))
+ return;
+ BUG_ON(px->conf.refcount != 0);
+ ebpt_delete(&px->conf.by_name);
+ proxy_free_defaults(px);
+ free(px);
+}
+
+/* delete all unreferenced default proxies. A default proxy is unreferenced if
+ * its refcount is equal to zero.
+ */
+void proxy_destroy_all_unref_defaults()
+{
+ struct ebpt_node *n;
+
+ n = ebpt_first(&defproxy_by_name);
+ while (n) {
+ struct proxy *px = container_of(n, struct proxy, conf.by_name);
+ BUG_ON(!(px->cap & PR_CAP_DEF));
+ n = ebpt_next(n);
+ if (!px->conf.refcount)
+ proxy_destroy_defaults(px);
+ }
+}
+
+/* Add a reference on the default proxy <defpx> for the proxy <px> Nothing is
+ * done if <px> already references <defpx>. Otherwise, the default proxy
+ * refcount is incremented by one. For now, this operation is not thread safe
+ * and is perform during init stage only.
+ */
+void proxy_ref_defaults(struct proxy *px, struct proxy *defpx)
+{
+ if (px->defpx == defpx)
+ return;
+ BUG_ON(px->defpx != NULL);
+ px->defpx = defpx;
+ defpx->conf.refcount++;
+}
+
+/* proxy <px> removes its reference on its default proxy. The default proxy
+ * refcount is decremented by one. If it was the last reference, the
+ * corresponding default proxy is destroyed. For now this operation is not
+ * thread safe and is performed during deinit staged only.
+*/
+void proxy_unref_defaults(struct proxy *px)
+{
+ if (px->defpx == NULL)
+ return;
+ if (!--px->defpx->conf.refcount)
+ proxy_destroy_defaults(px->defpx);
+ px->defpx = NULL;
+}
+
+/* Allocates a new proxy <name> of type <cap>.
+ * Returns the proxy instance on success. On error, NULL is returned.
+ */
+struct proxy *alloc_new_proxy(const char *name, unsigned int cap, char **errmsg)
+{
+ struct proxy *curproxy;
+
+ if ((curproxy = calloc(1, sizeof(*curproxy))) == NULL) {
+ memprintf(errmsg, "proxy '%s': out of memory", name);
+ goto fail;
+ }
+
+ init_new_proxy(curproxy);
+ curproxy->last_change = ns_to_sec(now_ns);
+ curproxy->id = strdup(name);
+ curproxy->cap = cap;
+
+ if (!(cap & PR_CAP_INT))
+ proxy_store_name(curproxy);
+
+ done:
+ return curproxy;
+
+ fail:
+ /* Note: in case of fatal error here, we WILL make valgrind unhappy,
+ * but its not worth trying to unroll everything here just before
+ * quitting.
+ */
+ free(curproxy);
+ return NULL;
+}
+
+/* Copy the proxy settings from <defproxy> to <curproxy>.
+ * Returns 0 on success.
+ * Returns 1 on error. <errmsg> will be allocated with an error description.
+ */
+static int proxy_defproxy_cpy(struct proxy *curproxy, const struct proxy *defproxy,
+ char **errmsg)
+{
+ struct logger *tmplogger;
+ char *tmpmsg = NULL;
+
+ /* set default values from the specified default proxy */
+ srv_settings_cpy(&curproxy->defsrv, &defproxy->defsrv, 0);
+
+ curproxy->flags = (defproxy->flags & PR_FL_DISABLED); /* Only inherit from disabled flag */
+ curproxy->options = defproxy->options;
+ curproxy->options2 = defproxy->options2;
+ curproxy->no_options = defproxy->no_options;
+ curproxy->no_options2 = defproxy->no_options2;
+ curproxy->retry_type = defproxy->retry_type;
+ curproxy->tcp_req.inspect_delay = defproxy->tcp_req.inspect_delay;
+ curproxy->tcp_rep.inspect_delay = defproxy->tcp_rep.inspect_delay;
+
+ http_ext_clean(curproxy);
+ http_ext_dup(defproxy, curproxy);
+
+ if (isttest(defproxy->server_id_hdr_name))
+ curproxy->server_id_hdr_name = istdup(defproxy->server_id_hdr_name);
+
+ /* initialize error relocations */
+ if (!proxy_dup_default_conf_errors(curproxy, defproxy, &tmpmsg)) {
+ memprintf(errmsg, "proxy '%s' : %s", curproxy->id, tmpmsg);
+ free(tmpmsg);
+ return 1;
+ }
+
+ if (curproxy->cap & PR_CAP_FE) {
+ curproxy->maxconn = defproxy->maxconn;
+ curproxy->backlog = defproxy->backlog;
+ curproxy->fe_sps_lim = defproxy->fe_sps_lim;
+
+ curproxy->to_log = defproxy->to_log & ~LW_COOKIE & ~LW_REQHDR & ~ LW_RSPHDR;
+ curproxy->max_out_conns = defproxy->max_out_conns;
+
+ curproxy->clitcpka_cnt = defproxy->clitcpka_cnt;
+ curproxy->clitcpka_idle = defproxy->clitcpka_idle;
+ curproxy->clitcpka_intvl = defproxy->clitcpka_intvl;
+ }
+
+ if (curproxy->cap & PR_CAP_BE) {
+ curproxy->lbprm.algo = defproxy->lbprm.algo;
+ curproxy->lbprm.hash_balance_factor = defproxy->lbprm.hash_balance_factor;
+ curproxy->fullconn = defproxy->fullconn;
+ curproxy->conn_retries = defproxy->conn_retries;
+ curproxy->redispatch_after = defproxy->redispatch_after;
+ curproxy->max_ka_queue = defproxy->max_ka_queue;
+
+ curproxy->tcpcheck_rules.flags = (defproxy->tcpcheck_rules.flags & ~TCPCHK_RULES_UNUSED_RS);
+ curproxy->tcpcheck_rules.list = defproxy->tcpcheck_rules.list;
+ if (!LIST_ISEMPTY(&defproxy->tcpcheck_rules.preset_vars)) {
+ if (!dup_tcpcheck_vars(&curproxy->tcpcheck_rules.preset_vars,
+ &defproxy->tcpcheck_rules.preset_vars)) {
+ memprintf(errmsg, "proxy '%s': failed to duplicate tcpcheck preset-vars", curproxy->id);
+ return 1;
+ }
+ }
+
+ curproxy->ck_opts = defproxy->ck_opts;
+
+ if (defproxy->cookie_name)
+ curproxy->cookie_name = strdup(defproxy->cookie_name);
+ curproxy->cookie_len = defproxy->cookie_len;
+
+ if (defproxy->dyncookie_key)
+ curproxy->dyncookie_key = strdup(defproxy->dyncookie_key);
+ if (defproxy->cookie_domain)
+ curproxy->cookie_domain = strdup(defproxy->cookie_domain);
+
+ if (defproxy->cookie_maxidle)
+ curproxy->cookie_maxidle = defproxy->cookie_maxidle;
+
+ if (defproxy->cookie_maxlife)
+ curproxy->cookie_maxlife = defproxy->cookie_maxlife;
+
+ if (defproxy->rdp_cookie_name)
+ curproxy->rdp_cookie_name = strdup(defproxy->rdp_cookie_name);
+ curproxy->rdp_cookie_len = defproxy->rdp_cookie_len;
+
+ if (defproxy->cookie_attrs)
+ curproxy->cookie_attrs = strdup(defproxy->cookie_attrs);
+
+ if (defproxy->lbprm.arg_str)
+ curproxy->lbprm.arg_str = strdup(defproxy->lbprm.arg_str);
+ curproxy->lbprm.arg_len = defproxy->lbprm.arg_len;
+ curproxy->lbprm.arg_opt1 = defproxy->lbprm.arg_opt1;
+ curproxy->lbprm.arg_opt2 = defproxy->lbprm.arg_opt2;
+ curproxy->lbprm.arg_opt3 = defproxy->lbprm.arg_opt3;
+
+ if (defproxy->conn_src.iface_name)
+ curproxy->conn_src.iface_name = strdup(defproxy->conn_src.iface_name);
+ curproxy->conn_src.iface_len = defproxy->conn_src.iface_len;
+ curproxy->conn_src.opts = defproxy->conn_src.opts;
+#if defined(CONFIG_HAP_TRANSPARENT)
+ curproxy->conn_src.tproxy_addr = defproxy->conn_src.tproxy_addr;
+#endif
+ curproxy->load_server_state_from_file = defproxy->load_server_state_from_file;
+
+ curproxy->srvtcpka_cnt = defproxy->srvtcpka_cnt;
+ curproxy->srvtcpka_idle = defproxy->srvtcpka_idle;
+ curproxy->srvtcpka_intvl = defproxy->srvtcpka_intvl;
+ }
+
+ if (curproxy->cap & PR_CAP_FE) {
+ if (defproxy->capture_name)
+ curproxy->capture_name = strdup(defproxy->capture_name);
+ curproxy->capture_namelen = defproxy->capture_namelen;
+ curproxy->capture_len = defproxy->capture_len;
+
+ curproxy->nb_req_cap = defproxy->nb_req_cap;
+ curproxy->req_cap = defproxy->req_cap;
+
+ curproxy->nb_rsp_cap = defproxy->nb_rsp_cap;
+ curproxy->rsp_cap = defproxy->rsp_cap;
+ }
+
+ if (curproxy->cap & PR_CAP_FE) {
+ curproxy->timeout.client = defproxy->timeout.client;
+ curproxy->timeout.client_hs = defproxy->timeout.client_hs;
+ curproxy->timeout.clientfin = defproxy->timeout.clientfin;
+ curproxy->timeout.tarpit = defproxy->timeout.tarpit;
+ curproxy->timeout.httpreq = defproxy->timeout.httpreq;
+ curproxy->timeout.httpka = defproxy->timeout.httpka;
+ if (isttest(defproxy->monitor_uri))
+ curproxy->monitor_uri = istdup(defproxy->monitor_uri);
+ if (defproxy->defbe.name)
+ curproxy->defbe.name = strdup(defproxy->defbe.name);
+
+ /* get either a pointer to the logformat string or a copy of it */
+ curproxy->conf.logformat_string = defproxy->conf.logformat_string;
+ if (curproxy->conf.logformat_string &&
+ curproxy->conf.logformat_string != default_http_log_format &&
+ curproxy->conf.logformat_string != default_tcp_log_format &&
+ curproxy->conf.logformat_string != clf_http_log_format &&
+ curproxy->conf.logformat_string != default_https_log_format)
+ curproxy->conf.logformat_string = strdup(curproxy->conf.logformat_string);
+
+ if (defproxy->conf.lfs_file) {
+ curproxy->conf.lfs_file = strdup(defproxy->conf.lfs_file);
+ curproxy->conf.lfs_line = defproxy->conf.lfs_line;
+ }
+
+ /* get either a pointer to the logformat string for RFC5424 structured-data or a copy of it */
+ curproxy->conf.logformat_sd_string = defproxy->conf.logformat_sd_string;
+ if (curproxy->conf.logformat_sd_string &&
+ curproxy->conf.logformat_sd_string != default_rfc5424_sd_log_format)
+ curproxy->conf.logformat_sd_string = strdup(curproxy->conf.logformat_sd_string);
+
+ if (defproxy->conf.lfsd_file) {
+ curproxy->conf.lfsd_file = strdup(defproxy->conf.lfsd_file);
+ curproxy->conf.lfsd_line = defproxy->conf.lfsd_line;
+ }
+
+ curproxy->conf.error_logformat_string = defproxy->conf.error_logformat_string;
+ if (curproxy->conf.error_logformat_string)
+ curproxy->conf.error_logformat_string = strdup(curproxy->conf.error_logformat_string);
+
+ if (defproxy->conf.elfs_file) {
+ curproxy->conf.elfs_file = strdup(defproxy->conf.elfs_file);
+ curproxy->conf.elfs_line = defproxy->conf.elfs_line;
+ }
+ }
+
+ if (curproxy->cap & PR_CAP_BE) {
+ curproxy->timeout.connect = defproxy->timeout.connect;
+ curproxy->timeout.server = defproxy->timeout.server;
+ curproxy->timeout.serverfin = defproxy->timeout.serverfin;
+ curproxy->timeout.check = defproxy->timeout.check;
+ curproxy->timeout.queue = defproxy->timeout.queue;
+ curproxy->timeout.tarpit = defproxy->timeout.tarpit;
+ curproxy->timeout.httpreq = defproxy->timeout.httpreq;
+ curproxy->timeout.httpka = defproxy->timeout.httpka;
+ curproxy->timeout.tunnel = defproxy->timeout.tunnel;
+ curproxy->conn_src.source_addr = defproxy->conn_src.source_addr;
+ }
+
+ curproxy->mode = defproxy->mode;
+ curproxy->uri_auth = defproxy->uri_auth; /* for stats */
+
+ /* copy default loggers to curproxy */
+ list_for_each_entry(tmplogger, &defproxy->loggers, list) {
+ struct logger *node = dup_logger(tmplogger);
+
+ if (!node) {
+ memprintf(errmsg, "proxy '%s': out of memory", curproxy->id);
+ return 1;
+ }
+ LIST_APPEND(&curproxy->loggers, &node->list);
+ }
+
+ curproxy->conf.uniqueid_format_string = defproxy->conf.uniqueid_format_string;
+ if (curproxy->conf.uniqueid_format_string)
+ curproxy->conf.uniqueid_format_string = strdup(curproxy->conf.uniqueid_format_string);
+
+ chunk_dup(&curproxy->log_tag, &defproxy->log_tag);
+
+ if (defproxy->conf.uif_file) {
+ curproxy->conf.uif_file = strdup(defproxy->conf.uif_file);
+ curproxy->conf.uif_line = defproxy->conf.uif_line;
+ }
+
+ /* copy default header unique id */
+ if (isttest(defproxy->header_unique_id)) {
+ const struct ist copy = istdup(defproxy->header_unique_id);
+
+ if (!isttest(copy)) {
+ memprintf(errmsg, "proxy '%s': out of memory for unique-id-header", curproxy->id);
+ return 1;
+ }
+ curproxy->header_unique_id = copy;
+ }
+
+ /* default compression options */
+ if (defproxy->comp != NULL) {
+ curproxy->comp = calloc(1, sizeof(*curproxy->comp));
+ if (!curproxy->comp) {
+ memprintf(errmsg, "proxy '%s': out of memory for default compression options", curproxy->id);
+ return 1;
+ }
+ curproxy->comp->algos_res = defproxy->comp->algos_res;
+ curproxy->comp->algo_req = defproxy->comp->algo_req;
+ curproxy->comp->types_res = defproxy->comp->types_res;
+ curproxy->comp->types_req = defproxy->comp->types_req;
+ curproxy->comp->flags = defproxy->comp->flags;
+ }
+
+ if (defproxy->check_path)
+ curproxy->check_path = strdup(defproxy->check_path);
+ if (defproxy->check_command)
+ curproxy->check_command = strdup(defproxy->check_command);
+
+ if (defproxy->email_alert.mailers.name)
+ curproxy->email_alert.mailers.name = strdup(defproxy->email_alert.mailers.name);
+ if (defproxy->email_alert.from)
+ curproxy->email_alert.from = strdup(defproxy->email_alert.from);
+ if (defproxy->email_alert.to)
+ curproxy->email_alert.to = strdup(defproxy->email_alert.to);
+ if (defproxy->email_alert.myhostname)
+ curproxy->email_alert.myhostname = strdup(defproxy->email_alert.myhostname);
+ curproxy->email_alert.level = defproxy->email_alert.level;
+ curproxy->email_alert.set = defproxy->email_alert.set;
+
+ return 0;
+}
+
+/* Allocates a new proxy <name> of type <cap> found at position <file:linenum>,
+ * preset it from the defaults of <defproxy> and returns it. In case of error,
+ * an alert is printed and NULL is returned.
+ */
+struct proxy *parse_new_proxy(const char *name, unsigned int cap,
+ const char *file, int linenum,
+ const struct proxy *defproxy)
+{
+ struct proxy *curproxy = NULL;
+ char *errmsg = NULL;
+
+ if (!(curproxy = alloc_new_proxy(name, cap, &errmsg))) {
+ ha_alert("parsing [%s:%d] : %s\n", file, linenum, errmsg);
+ free(errmsg);
+ return NULL;
+ }
+
+ if (defproxy) {
+ if (proxy_defproxy_cpy(curproxy, defproxy, &errmsg)) {
+ ha_alert("parsing [%s:%d] : %s\n", file, linenum, errmsg);
+ free(errmsg);
+
+ ha_free(&curproxy);
+ return NULL;
+ }
+ }
+
+ curproxy->conf.args.file = curproxy->conf.file = strdup(file);
+ curproxy->conf.args.line = curproxy->conf.line = linenum;
+
+ return curproxy;
+}
+
+/* to be called under the proxy lock after pausing some listeners. This will
+ * automatically update the p->flags flag
+ */
+void proxy_cond_pause(struct proxy *p)
+{
+ if (p->li_ready)
+ return;
+ p->flags |= PR_FL_PAUSED;
+}
+
+/* to be called under the proxy lock after resuming some listeners. This will
+ * automatically update the p->flags flag
+ */
+void proxy_cond_resume(struct proxy *p)
+{
+ if (!p->li_ready)
+ return;
+ p->flags &= ~PR_FL_PAUSED;
+}
+
+/* to be called under the proxy lock after stopping some listeners. This will
+ * automatically update the p->flags flag after stopping the last one, and
+ * will emit a log indicating the proxy's condition. The function is idempotent
+ * so that it will not emit multiple logs; a proxy will be disabled only once.
+ */
+void proxy_cond_disable(struct proxy *p)
+{
+ if (p->flags & (PR_FL_DISABLED|PR_FL_STOPPED))
+ return;
+
+ if (p->li_ready + p->li_paused > 0)
+ return;
+
+ p->flags |= PR_FL_STOPPED;
+
+ /* Note: syslog proxies use their own loggers so while it's somewhat OK
+ * to report them being stopped as a warning, we must not spam their log
+ * servers which are in fact production servers. For other types (CLI,
+ * peers, etc) we must not report them at all as they're not really on
+ * the data plane but on the control plane.
+ */
+ if ((p->mode == PR_MODE_TCP || p->mode == PR_MODE_HTTP || p->mode == PR_MODE_SYSLOG) && !(p->cap & PR_CAP_INT))
+ ha_warning("Proxy %s stopped (cumulated conns: FE: %lld, BE: %lld).\n",
+ p->id, p->fe_counters.cum_conn, p->be_counters.cum_conn);
+
+ if ((p->mode == PR_MODE_TCP || p->mode == PR_MODE_HTTP) && !(p->cap & PR_CAP_INT))
+ send_log(p, LOG_WARNING, "Proxy %s stopped (cumulated conns: FE: %lld, BE: %lld).\n",
+ p->id, p->fe_counters.cum_conn, p->be_counters.cum_conn);
+
+ if (p->table && p->table->size && p->table->sync_task)
+ task_wakeup(p->table->sync_task, TASK_WOKEN_MSG);
+
+ if (p->task)
+ task_wakeup(p->task, TASK_WOKEN_MSG);
+}
+
+/*
+ * This is the proxy management task. It enables proxies when there are enough
+ * free streams, or stops them when the table is full. It is designed to be
+ * called as a task which is woken up upon stopping or when rate limiting must
+ * be enforced.
+ */
+struct task *manage_proxy(struct task *t, void *context, unsigned int state)
+{
+ struct proxy *p = context;
+ int next = TICK_ETERNITY;
+ unsigned int wait;
+
+ /* We should periodically try to enable listeners waiting for a
+ * global resource here.
+ */
+
+ /* If the proxy holds a stick table, we need to purge all unused
+ * entries. These are all the ones in the table with ref_cnt == 0
+ * and all the ones in the pool used to allocate new entries. Any
+ * entry attached to an existing stream waiting for a store will
+ * be in neither list. Any entry being dumped will have ref_cnt > 0.
+ * However we protect tables that are being synced to peers.
+ */
+ if (unlikely(stopping && (p->flags & (PR_FL_DISABLED|PR_FL_STOPPED)) && p->table && p->table->current)) {
+
+ if (!p->table->refcnt) {
+ /* !table->refcnt means there
+ * is no more pending full resync
+ * to push to a new process and
+ * we are free to flush the table.
+ */
+ int budget;
+ int cleaned_up;
+
+ /* We purposely enforce a budget limitation since we don't want
+ * to spend too much time purging old entries
+ *
+ * This is known to cause the watchdog to occasionnaly trigger if
+ * the table is huge and all entries become available for purge
+ * at the same time
+ *
+ * Moreover, we must also anticipate the pool_gc() call which
+ * will also be much slower if there is too much work at once
+ */
+ budget = MIN(p->table->current, (1 << 15)); /* max: 32K */
+ cleaned_up = stktable_trash_oldest(p->table, budget);
+ if (cleaned_up) {
+ /* immediately release freed memory since we are stopping */
+ pool_gc(NULL);
+ if (cleaned_up > (budget / 2)) {
+ /* most of the budget was used to purge entries,
+ * it is very likely that there are still trashable
+ * entries in the table, reschedule a new cleanup
+ * attempt ASAP
+ */
+ t->expire = TICK_ETERNITY;
+ task_wakeup(t, TASK_WOKEN_RES);
+ return t;
+ }
+ }
+ }
+ if (p->table->current) {
+ /* some entries still remain but are not yet available
+ * for cleanup, let's recheck in one second
+ */
+ next = tick_first(next, tick_add(now_ms, 1000));
+ }
+ }
+
+ /* the rest below is just for frontends */
+ if (!(p->cap & PR_CAP_FE))
+ goto out;
+
+ /* check the various reasons we may find to block the frontend */
+ if (unlikely(p->feconn >= p->maxconn))
+ goto out;
+
+ if (p->fe_sps_lim &&
+ (wait = next_event_delay(&p->fe_sess_per_sec, p->fe_sps_lim, 0))) {
+ /* we're blocking because a limit was reached on the number of
+ * requests/s on the frontend. We want to re-check ASAP, which
+ * means in 1 ms before estimated expiration date, because the
+ * timer will have settled down.
+ */
+ next = tick_first(next, tick_add(now_ms, wait));
+ goto out;
+ }
+
+ /* The proxy is not limited so we can re-enable any waiting listener */
+ dequeue_proxy_listeners(p);
+ out:
+ t->expire = next;
+ task_queue(t);
+ return t;
+}
+
+
+static int proxy_parse_grace(char **args, int section_type, struct proxy *curpx,
+ const struct proxy *defpx, const char *file, int line,
+ char **err)
+{
+ const char *res;
+
+ if (!*args[1]) {
+ memprintf(err, "'%s' expects <time> as argument.\n", args[0]);
+ return -1;
+ }
+ res = parse_time_err(args[1], &global.grace_delay, TIME_UNIT_MS);
+ if (res == PARSE_TIME_OVER) {
+ memprintf(err, "timer overflow in argument '%s' to '%s' (maximum value is 2147483647 ms or ~24.8 days)",
+ args[1], args[0]);
+ return -1;
+ }
+ else if (res == PARSE_TIME_UNDER) {
+ memprintf(err, "timer underflow in argument '%s' to '%s' (minimum non-null value is 1 ms)",
+ args[1], args[0]);
+ return -1;
+ }
+ else if (res) {
+ memprintf(err, "unexpected character '%c' in argument to <%s>.\n", *res, args[0]);
+ return -1;
+ }
+ return 0;
+}
+
+static int proxy_parse_hard_stop_after(char **args, int section_type, struct proxy *curpx,
+ const struct proxy *defpx, const char *file, int line,
+ char **err)
+{
+ const char *res;
+
+ if (!*args[1]) {
+ memprintf(err, "'%s' expects <time> as argument.\n", args[0]);
+ return -1;
+ }
+ res = parse_time_err(args[1], &global.hard_stop_after, TIME_UNIT_MS);
+ if (res == PARSE_TIME_OVER) {
+ memprintf(err, "timer overflow in argument '%s' to '%s' (maximum value is 2147483647 ms or ~24.8 days)",
+ args[1], args[0]);
+ return -1;
+ }
+ else if (res == PARSE_TIME_UNDER) {
+ memprintf(err, "timer underflow in argument '%s' to '%s' (minimum non-null value is 1 ms)",
+ args[1], args[0]);
+ return -1;
+ }
+ else if (res) {
+ memprintf(err, "unexpected character '%c' in argument to <%s>.\n", *res, args[0]);
+ return -1;
+ }
+ return 0;
+}
+
+static int proxy_parse_close_spread_time(char **args, int section_type, struct proxy *curpx,
+ const struct proxy *defpx, const char *file, int line,
+ char **err)
+{
+ const char *res;
+
+ if (!*args[1]) {
+ memprintf(err, "'%s' expects <time> as argument.\n", args[0]);
+ return -1;
+ }
+
+ /* If close-spread-time is set to "infinite", disable the active connection
+ * closing during soft-stop.
+ */
+ if (strcmp(args[1], "infinite") == 0) {
+ global.tune.options |= GTUNE_DISABLE_ACTIVE_CLOSE;
+ global.close_spread_time = TICK_ETERNITY;
+ return 0;
+ }
+
+ res = parse_time_err(args[1], &global.close_spread_time, TIME_UNIT_MS);
+ if (res == PARSE_TIME_OVER) {
+ memprintf(err, "timer overflow in argument '%s' to '%s' (maximum value is 2147483647 ms or ~24.8 days)",
+ args[1], args[0]);
+ return -1;
+ }
+ else if (res == PARSE_TIME_UNDER) {
+ memprintf(err, "timer underflow in argument '%s' to '%s' (minimum non-null value is 1 ms)",
+ args[1], args[0]);
+ return -1;
+ }
+ else if (res) {
+ memprintf(err, "unexpected character '%c' in argument to <%s>.\n", *res, args[0]);
+ return -1;
+ }
+ global.tune.options &= ~GTUNE_DISABLE_ACTIVE_CLOSE;
+
+ return 0;
+}
+
+struct task *hard_stop(struct task *t, void *context, unsigned int state)
+{
+ struct proxy *p;
+ struct stream *s;
+ int thr;
+
+ if (killed) {
+ ha_warning("Some tasks resisted to hard-stop, exiting now.\n");
+ send_log(NULL, LOG_WARNING, "Some tasks resisted to hard-stop, exiting now.\n");
+ killed = 2;
+ for (thr = 0; thr < global.nbthread; thr++)
+ if (_HA_ATOMIC_LOAD(&ha_thread_info[thr].tg->threads_enabled) & ha_thread_info[thr].ltid_bit)
+ wake_thread(thr);
+ t->expire = TICK_ETERNITY;
+ return t;
+ }
+
+ ha_warning("soft-stop running for too long, performing a hard-stop.\n");
+ send_log(NULL, LOG_WARNING, "soft-stop running for too long, performing a hard-stop.\n");
+ p = proxies_list;
+ while (p) {
+ if ((p->cap & PR_CAP_FE) && (p->feconn > 0)) {
+ ha_warning("Proxy %s hard-stopped (%d remaining conns will be closed).\n",
+ p->id, p->feconn);
+ send_log(p, LOG_WARNING, "Proxy %s hard-stopped (%d remaining conns will be closed).\n",
+ p->id, p->feconn);
+ }
+ p = p->next;
+ }
+
+ thread_isolate();
+
+ for (thr = 0; thr < global.nbthread; thr++) {
+ list_for_each_entry(s, &ha_thread_ctx[thr].streams, list) {
+ stream_shutdown(s, SF_ERR_KILLED);
+ }
+ }
+
+ thread_release();
+
+ killed = 1;
+ t->expire = tick_add(now_ms, MS_TO_TICKS(1000));
+ return t;
+}
+
+/* perform the soft-stop right now (i.e. unbind listeners) */
+static void do_soft_stop_now()
+{
+ struct proxy *p;
+ struct task *task;
+
+ /* disable busy polling to avoid cpu eating for the new process */
+ global.tune.options &= ~GTUNE_BUSY_POLLING;
+
+ if (tick_isset(global.close_spread_time)) {
+ global.close_spread_end = tick_add(now_ms, global.close_spread_time);
+ }
+
+ /* schedule a hard-stop after a delay if needed */
+ if (tick_isset(global.hard_stop_after)) {
+ task = task_new_anywhere();
+ if (task) {
+ task->process = hard_stop;
+ task_schedule(task, tick_add(now_ms, global.hard_stop_after));
+ }
+ else {
+ ha_alert("out of memory trying to allocate the hard-stop task.\n");
+ }
+ }
+
+ /* we isolate so that we have a chance of stopping listeners in other groups */
+ thread_isolate();
+
+ /* stop all stoppable listeners */
+ protocol_stop_now();
+
+ thread_release();
+
+ /* Loop on proxies to stop backends */
+ p = proxies_list;
+ while (p) {
+ HA_RWLOCK_WRLOCK(PROXY_LOCK, &p->lock);
+ proxy_cond_disable(p);
+ HA_RWLOCK_WRUNLOCK(PROXY_LOCK, &p->lock);
+ p = p->next;
+ }
+
+ /* signal zero is used to broadcast the "stopping" event */
+ signal_handler(0);
+}
+
+/* triggered by a soft-stop delayed with `grace` */
+static struct task *grace_expired(struct task *t, void *context, unsigned int state)
+{
+ ha_notice("Grace period expired, proceeding with soft-stop now.\n");
+ send_log(NULL, LOG_NOTICE, "Grace period expired, proceeding with soft-stop now.\n");
+ do_soft_stop_now();
+ task_destroy(t);
+ return NULL;
+}
+
+/*
+ * this function disables health-check servers so that the process will quickly be ignored
+ * by load balancers.
+ */
+void soft_stop(void)
+{
+ struct task *task;
+
+ stopping = 1;
+
+ if (tick_isset(global.grace_delay)) {
+ task = task_new_anywhere();
+ if (task) {
+ ha_notice("Scheduling a soft-stop in %u ms.\n", global.grace_delay);
+ send_log(NULL, LOG_WARNING, "Scheduling a soft-stop in %u ms.\n", global.grace_delay);
+ task->process = grace_expired;
+ task_schedule(task, tick_add(now_ms, global.grace_delay));
+ return;
+ }
+ else {
+ ha_alert("out of memory trying to allocate the stop-stop task, stopping now.\n");
+ }
+ }
+
+ /* no grace (or failure to enforce it): stop now */
+ do_soft_stop_now();
+}
+
+
+/* Temporarily disables listening on all of the proxy's listeners. Upon
+ * success, the proxy enters the PR_PAUSED state. The function returns 0
+ * if it fails, or non-zero on success.
+ * The function takes the proxy's lock so it's safe to
+ * call from multiple places.
+ */
+int pause_proxy(struct proxy *p)
+{
+ struct listener *l;
+
+ HA_RWLOCK_WRLOCK(PROXY_LOCK, &p->lock);
+
+ if (!(p->cap & PR_CAP_FE) || (p->flags & (PR_FL_DISABLED|PR_FL_STOPPED)) || !p->li_ready)
+ goto end;
+
+ list_for_each_entry(l, &p->conf.listeners, by_fe)
+ suspend_listener(l, 1, 0);
+
+ if (p->li_ready) {
+ ha_warning("%s %s failed to enter pause mode.\n", proxy_cap_str(p->cap), p->id);
+ send_log(p, LOG_WARNING, "%s %s failed to enter pause mode.\n", proxy_cap_str(p->cap), p->id);
+ HA_RWLOCK_WRUNLOCK(PROXY_LOCK, &p->lock);
+ return 0;
+ }
+end:
+ HA_RWLOCK_WRUNLOCK(PROXY_LOCK, &p->lock);
+ return 1;
+}
+
+/*
+ * This function completely stops a proxy and releases its listeners. It has
+ * to be called when going down in order to release the ports so that another
+ * process may bind to them. It must also be called on disabled proxies at the
+ * end of start-up. If all listeners are closed, the proxy is set to the
+ * PR_STOPPED state.
+ * The function takes the proxy's lock so it's safe to
+ * call from multiple places.
+ */
+void stop_proxy(struct proxy *p)
+{
+ struct listener *l;
+
+ HA_RWLOCK_WRLOCK(PROXY_LOCK, &p->lock);
+
+ list_for_each_entry(l, &p->conf.listeners, by_fe)
+ stop_listener(l, 1, 0, 0);
+
+ if (!(p->flags & (PR_FL_DISABLED|PR_FL_STOPPED)) && !p->li_ready) {
+ /* might be just a backend */
+ p->flags |= PR_FL_STOPPED;
+ }
+
+ HA_RWLOCK_WRUNLOCK(PROXY_LOCK, &p->lock);
+}
+
+/* This function resumes listening on the specified proxy. It scans all of its
+ * listeners and tries to enable them all. If any of them fails, the proxy is
+ * put back to the paused state. It returns 1 upon success, or zero if an error
+ * is encountered.
+ * The function takes the proxy's lock so it's safe to
+ * call from multiple places.
+ */
+int resume_proxy(struct proxy *p)
+{
+ struct listener *l;
+ int fail;
+
+ HA_RWLOCK_WRLOCK(PROXY_LOCK, &p->lock);
+
+ if ((p->flags & (PR_FL_DISABLED|PR_FL_STOPPED)) || !p->li_paused)
+ goto end;
+
+ fail = 0;
+ list_for_each_entry(l, &p->conf.listeners, by_fe) {
+ if (!resume_listener(l, 1, 0)) {
+ int port;
+
+ port = get_host_port(&l->rx.addr);
+ if (port) {
+ ha_warning("Port %d busy while trying to enable %s %s.\n",
+ port, proxy_cap_str(p->cap), p->id);
+ send_log(p, LOG_WARNING, "Port %d busy while trying to enable %s %s.\n",
+ port, proxy_cap_str(p->cap), p->id);
+ }
+ else {
+ ha_warning("Bind on socket %d busy while trying to enable %s %s.\n",
+ l->luid, proxy_cap_str(p->cap), p->id);
+ send_log(p, LOG_WARNING, "Bind on socket %d busy while trying to enable %s %s.\n",
+ l->luid, proxy_cap_str(p->cap), p->id);
+ }
+
+ /* Another port might have been enabled. Let's stop everything. */
+ fail = 1;
+ break;
+ }
+ }
+
+ if (fail) {
+ HA_RWLOCK_WRUNLOCK(PROXY_LOCK, &p->lock);
+ /* pause_proxy will take PROXY_LOCK */
+ pause_proxy(p);
+ return 0;
+ }
+end:
+ HA_RWLOCK_WRUNLOCK(PROXY_LOCK, &p->lock);
+ return 1;
+}
+
+/* Set current stream's backend to <be>. Nothing is done if the
+ * stream already had a backend assigned, which is indicated by
+ * s->flags & SF_BE_ASSIGNED.
+ * All flags, stats and counters which need be updated are updated.
+ * Returns 1 if done, 0 in case of internal error, eg: lack of resource.
+ */
+int stream_set_backend(struct stream *s, struct proxy *be)
+{
+ unsigned int req_ana;
+
+ if (s->flags & SF_BE_ASSIGNED)
+ return 1;
+
+ if (flt_set_stream_backend(s, be) < 0)
+ return 0;
+
+ s->be = be;
+ HA_ATOMIC_UPDATE_MAX(&be->be_counters.conn_max,
+ HA_ATOMIC_ADD_FETCH(&be->beconn, 1));
+ proxy_inc_be_ctr(be);
+
+ /* assign new parameters to the stream from the new backend */
+ s->scb->flags &= ~SC_FL_INDEP_STR;
+ if (be->options2 & PR_O2_INDEPSTR)
+ s->scb->flags |= SC_FL_INDEP_STR;
+
+ /* We want to enable the backend-specific analysers except those which
+ * were already run as part of the frontend/listener. Note that it would
+ * be more reliable to store the list of analysers that have been run,
+ * but what we do here is OK for now.
+ */
+ req_ana = be->be_req_ana;
+ if (!(strm_fe(s)->options & PR_O_WREQ_BODY) && be->options & PR_O_WREQ_BODY) {
+ /* The backend request to parse a request body while it was not
+ * performed on the frontend, so add the corresponding analyser
+ */
+ req_ana |= AN_REQ_HTTP_BODY;
+ }
+ if (IS_HTX_STRM(s) && strm_fe(s)->mode != PR_MODE_HTTP) {
+ /* The stream was already upgraded to HTTP, so remove analysers
+ * set during the upgrade
+ */
+ req_ana &= ~(AN_REQ_WAIT_HTTP|AN_REQ_HTTP_PROCESS_FE);
+ }
+ s->req.analysers |= req_ana & ~(strm_li(s) ? strm_li(s)->bind_conf->analysers : 0);
+
+ if (!IS_HTX_STRM(s) && be->mode == PR_MODE_HTTP) {
+ /* If we chain a TCP frontend to an HTX backend, we must upgrade
+ * the client mux */
+ if (!stream_set_http_mode(s, NULL))
+ return 0;
+ }
+ else if (IS_HTX_STRM(s) && be->mode != PR_MODE_HTTP) {
+ /* If a TCP backend is assgiend to an HTX stream, return an
+ * error. It may happens for a new stream on a previously
+ * upgraded connections. */
+ if (!(s->flags & SF_ERR_MASK))
+ s->flags |= SF_ERR_INTERNAL;
+ return 0;
+ }
+ else {
+ /* If the target backend requires HTTP processing, we have to allocate
+ * the HTTP transaction if we did not have one.
+ */
+ if (unlikely(!s->txn && be->http_needed && !http_create_txn(s)))
+ return 0;
+ }
+
+ s->flags |= SF_BE_ASSIGNED;
+ if (be->options2 & PR_O2_NODELAY) {
+ s->scf->flags |= SC_FL_SND_NEVERWAIT;
+ s->scb->flags |= SC_FL_SND_NEVERWAIT;
+ }
+
+ return 1;
+}
+
+/* Capture a bad request or response and archive it in the proxy's structure.
+ * It is relatively protocol-agnostic so it requires that a number of elements
+ * are passed :
+ * - <proxy> is the proxy where the error was detected and where the snapshot
+ * needs to be stored
+ * - <is_back> indicates that the error happened when receiving the response
+ * - <other_end> is a pointer to the proxy on the other side when known
+ * - <target> is the target of the connection, usually a server or a proxy
+ * - <sess> is the session which experienced the error
+ * - <ctx> may be NULL or should contain any info relevant to the protocol
+ * - <buf> is the buffer containing the offending data
+ * - <buf_ofs> is the position of this buffer's input data in the input
+ * stream, starting at zero. It may be passed as zero if unknown.
+ * - <buf_out> is the portion of <buf->data> which was already forwarded and
+ * which precedes the buffer's input. The buffer's input starts at
+ * buf->head + buf_out.
+ * - <err_pos> is the pointer to the faulty byte in the buffer's input.
+ * - <show> is the callback to use to display <ctx>. It may be NULL.
+ */
+void proxy_capture_error(struct proxy *proxy, int is_back,
+ struct proxy *other_end, enum obj_type *target,
+ const struct session *sess,
+ const struct buffer *buf, long buf_ofs,
+ unsigned int buf_out, unsigned int err_pos,
+ const union error_snapshot_ctx *ctx,
+ void (*show)(struct buffer *, const struct error_snapshot *))
+{
+ struct error_snapshot *es;
+ unsigned int buf_len;
+ int len1, len2;
+ unsigned int ev_id;
+
+ ev_id = HA_ATOMIC_FETCH_ADD(&error_snapshot_id, 1);
+
+ buf_len = b_data(buf) - buf_out;
+
+ es = malloc(sizeof(*es) + buf_len);
+ if (!es)
+ return;
+
+ es->buf_len = buf_len;
+ es->ev_id = ev_id;
+
+ len1 = b_size(buf) - b_peek_ofs(buf, buf_out);
+ if (len1 > buf_len)
+ len1 = buf_len;
+
+ if (len1) {
+ memcpy(es->buf, b_peek(buf, buf_out), len1);
+ len2 = buf_len - len1;
+ if (len2)
+ memcpy(es->buf + len1, b_orig(buf), len2);
+ }
+
+ es->buf_err = err_pos;
+ es->when = date; // user-visible date
+ es->srv = objt_server(target);
+ es->oe = other_end;
+ if (sess && objt_conn(sess->origin) && conn_get_src(__objt_conn(sess->origin)))
+ es->src = *__objt_conn(sess->origin)->src;
+ else
+ memset(&es->src, 0, sizeof(es->src));
+
+ es->buf_wrap = b_wrap(buf) - b_peek(buf, buf_out);
+ es->buf_out = buf_out;
+ es->buf_ofs = buf_ofs;
+
+ /* be sure to indicate the offset of the first IN byte */
+ if (es->buf_ofs >= es->buf_len)
+ es->buf_ofs -= es->buf_len;
+ else
+ es->buf_ofs = 0;
+
+ /* protocol-specific part now */
+ if (ctx)
+ es->ctx = *ctx;
+ else
+ memset(&es->ctx, 0, sizeof(es->ctx));
+ es->show = show;
+
+ /* note: we still lock since we have to be certain that nobody is
+ * dumping the output while we free.
+ */
+ HA_RWLOCK_WRLOCK(PROXY_LOCK, &proxy->lock);
+ if (is_back) {
+ es = HA_ATOMIC_XCHG(&proxy->invalid_rep, es);
+ } else {
+ es = HA_ATOMIC_XCHG(&proxy->invalid_req, es);
+ }
+ HA_RWLOCK_WRUNLOCK(PROXY_LOCK, &proxy->lock);
+ ha_free(&es);
+}
+
+/* Configure all proxies which lack a maxconn setting to use the global one by
+ * default. This avoids the common mistake consisting in setting maxconn only
+ * in the global section and discovering the hard way that it doesn't propagate
+ * through the frontends. These values are also propagated through the various
+ * targeted backends, whose fullconn is finally calculated if not yet set.
+ */
+void proxy_adjust_all_maxconn()
+{
+ struct proxy *curproxy;
+ struct switching_rule *swrule1, *swrule2;
+
+ for (curproxy = proxies_list; curproxy; curproxy = curproxy->next) {
+ if (curproxy->flags & (PR_FL_DISABLED|PR_FL_STOPPED))
+ continue;
+
+ if (!(curproxy->cap & PR_CAP_FE))
+ continue;
+
+ if (!curproxy->maxconn)
+ curproxy->maxconn = global.maxconn;
+
+ /* update the target backend's fullconn count : default_backend */
+ if (curproxy->defbe.be)
+ curproxy->defbe.be->tot_fe_maxconn += curproxy->maxconn;
+ else if ((curproxy->cap & PR_CAP_LISTEN) == PR_CAP_LISTEN)
+ curproxy->tot_fe_maxconn += curproxy->maxconn;
+
+ list_for_each_entry(swrule1, &curproxy->switching_rules, list) {
+ /* For each target of switching rules, we update their
+ * tot_fe_maxconn, except if a previous rule points to
+ * the same backend or to the default backend.
+ */
+ if (swrule1->be.backend != curproxy->defbe.be) {
+ /* note: swrule1->be.backend isn't a backend if the rule
+ * is dynamic, it's an expression instead, so it must not
+ * be dereferenced as a backend before being certain it is.
+ */
+ list_for_each_entry(swrule2, &curproxy->switching_rules, list) {
+ if (swrule2 == swrule1) {
+ if (!swrule1->dynamic)
+ swrule1->be.backend->tot_fe_maxconn += curproxy->maxconn;
+ break;
+ }
+ else if (!swrule2->dynamic && swrule2->be.backend == swrule1->be.backend) {
+ /* there are multiple refs of this backend */
+ break;
+ }
+ }
+ }
+ }
+ }
+
+ /* automatically compute fullconn if not set. We must not do it in the
+ * loop above because cross-references are not yet fully resolved.
+ */
+ for (curproxy = proxies_list; curproxy; curproxy = curproxy->next) {
+ if (curproxy->flags & (PR_FL_DISABLED|PR_FL_STOPPED))
+ continue;
+
+ /* If <fullconn> is not set, let's set it to 10% of the sum of
+ * the possible incoming frontend's maxconns.
+ */
+ if (!curproxy->fullconn && (curproxy->cap & PR_CAP_BE)) {
+ /* we have the sum of the maxconns in <total>. We only
+ * keep 10% of that sum to set the default fullconn, with
+ * a hard minimum of 1 (to avoid a divide by zero).
+ */
+ curproxy->fullconn = (curproxy->tot_fe_maxconn + 9) / 10;
+ if (!curproxy->fullconn)
+ curproxy->fullconn = 1;
+ }
+ }
+}
+
+/* Config keywords below */
+
+static struct cfg_kw_list cfg_kws = {ILH, {
+ { CFG_GLOBAL, "grace", proxy_parse_grace },
+ { CFG_GLOBAL, "hard-stop-after", proxy_parse_hard_stop_after },
+ { CFG_GLOBAL, "close-spread-time", proxy_parse_close_spread_time },
+ { CFG_LISTEN, "timeout", proxy_parse_timeout },
+ { CFG_LISTEN, "clitimeout", proxy_parse_timeout }, /* This keyword actually fails to parse, this line remains for better error messages. */
+ { CFG_LISTEN, "contimeout", proxy_parse_timeout }, /* This keyword actually fails to parse, this line remains for better error messages. */
+ { CFG_LISTEN, "srvtimeout", proxy_parse_timeout }, /* This keyword actually fails to parse, this line remains for better error messages. */
+ { CFG_LISTEN, "rate-limit", proxy_parse_rate_limit },
+ { CFG_LISTEN, "max-keep-alive-queue", proxy_parse_max_ka_queue },
+ { CFG_LISTEN, "declare", proxy_parse_declare },
+ { CFG_LISTEN, "retry-on", proxy_parse_retry_on },
+#ifdef TCP_KEEPCNT
+ { CFG_LISTEN, "clitcpka-cnt", proxy_parse_tcpka_cnt },
+ { CFG_LISTEN, "srvtcpka-cnt", proxy_parse_tcpka_cnt },
+#endif
+#ifdef TCP_KEEPIDLE
+ { CFG_LISTEN, "clitcpka-idle", proxy_parse_tcpka_idle },
+ { CFG_LISTEN, "srvtcpka-idle", proxy_parse_tcpka_idle },
+#endif
+#ifdef TCP_KEEPINTVL
+ { CFG_LISTEN, "clitcpka-intvl", proxy_parse_tcpka_intvl },
+ { CFG_LISTEN, "srvtcpka-intvl", proxy_parse_tcpka_intvl },
+#endif
+ { 0, NULL, NULL },
+}};
+
+INITCALL1(STG_REGISTER, cfg_register_keywords, &cfg_kws);
+
+/* Expects to find a frontend named <arg> and returns it, otherwise displays various
+ * adequate error messages and returns NULL. This function is designed to be used by
+ * functions requiring a frontend on the CLI.
+ */
+struct proxy *cli_find_frontend(struct appctx *appctx, const char *arg)
+{
+ struct proxy *px;
+
+ if (!*arg) {
+ cli_err(appctx, "A frontend name is expected.\n");
+ return NULL;
+ }
+
+ px = proxy_fe_by_name(arg);
+ if (!px) {
+ cli_err(appctx, "No such frontend.\n");
+ return NULL;
+ }
+ return px;
+}
+
+/* Expects to find a backend named <arg> and returns it, otherwise displays various
+ * adequate error messages and returns NULL. This function is designed to be used by
+ * functions requiring a frontend on the CLI.
+ */
+struct proxy *cli_find_backend(struct appctx *appctx, const char *arg)
+{
+ struct proxy *px;
+
+ if (!*arg) {
+ cli_err(appctx, "A backend name is expected.\n");
+ return NULL;
+ }
+
+ px = proxy_be_by_name(arg);
+ if (!px) {
+ cli_err(appctx, "No such backend.\n");
+ return NULL;
+ }
+ return px;
+}
+
+
+/* parse a "show servers [state|conn]" CLI line, returns 0 if it wants to start
+ * the dump or 1 if it stops immediately. If an argument is specified, it will
+ * reserve a show_srv_ctx context and set the proxy pointer into ->px, its ID
+ * into ->only_pxid, and ->show_conn to 0 for "state", or 1 for "conn".
+ */
+static int cli_parse_show_servers(char **args, char *payload, struct appctx *appctx, void *private)
+{
+ struct show_srv_ctx *ctx = applet_reserve_svcctx(appctx, sizeof(*ctx));
+ struct proxy *px;
+
+ ctx->show_conn = *args[2] == 'c'; // "conn" vs "state"
+
+ /* check if a backend name has been provided */
+ if (*args[3]) {
+ /* read server state from local file */
+ px = proxy_be_by_name(args[3]);
+
+ if (!px)
+ return cli_err(appctx, "Can't find backend.\n");
+
+ ctx->px = px;
+ ctx->only_pxid = px->uuid;
+ }
+ return 0;
+}
+
+/* helper to dump server addr */
+static void dump_server_addr(const struct sockaddr_storage *addr, char *addr_str)
+{
+ addr_str[0] = '\0';
+ switch (addr->ss_family) {
+ case AF_INET:
+ case AF_INET6:
+ addr_to_str(addr, addr_str, INET6_ADDRSTRLEN + 1);
+ break;
+ default:
+ memcpy(addr_str, "-\0", 2);
+ break;
+ }
+}
+
+/* dumps server state information for all the servers found in backend cli.p0.
+ * These information are all the parameters which may change during HAProxy runtime.
+ * By default, we only export to the last known server state file format. These
+ * information can be used at next startup to recover same level of server
+ * state. It takes its context from show_srv_ctx, with the proxy pointer from
+ * ->px, the proxy's id ->only_pxid, the server's pointer from ->sv, and the
+ * choice of what to dump from ->show_conn.
+ */
+static int dump_servers_state(struct stconn *sc)
+{
+ struct appctx *appctx = __sc_appctx(sc);
+ struct show_srv_ctx *ctx = appctx->svcctx;
+ struct proxy *px = ctx->px;
+ struct server *srv;
+ char srv_addr[INET6_ADDRSTRLEN + 1];
+ char srv_agent_addr[INET6_ADDRSTRLEN + 1];
+ char srv_check_addr[INET6_ADDRSTRLEN + 1];
+ time_t srv_time_since_last_change;
+ int bk_f_forced_id, srv_f_forced_id;
+ char *srvrecord;
+
+ if (!ctx->sv)
+ ctx->sv = px->srv;
+
+ for (; ctx->sv != NULL; ctx->sv = srv->next) {
+ srv = ctx->sv;
+
+ dump_server_addr(&srv->addr, srv_addr);
+ dump_server_addr(&srv->check.addr, srv_check_addr);
+ dump_server_addr(&srv->agent.addr, srv_agent_addr);
+
+ srv_time_since_last_change = ns_to_sec(now_ns) - srv->last_change;
+ bk_f_forced_id = px->options & PR_O_FORCED_ID ? 1 : 0;
+ srv_f_forced_id = srv->flags & SRV_F_FORCED_ID ? 1 : 0;
+
+ srvrecord = NULL;
+ if (srv->srvrq && srv->srvrq->name)
+ srvrecord = srv->srvrq->name;
+
+ if (ctx->show_conn == 0) {
+ /* show servers state */
+ chunk_printf(&trash,
+ "%d %s "
+ "%d %s %s "
+ "%d %d %d %d %ld "
+ "%d %d %d %d %d "
+ "%d %d %s %u "
+ "%s %d %d "
+ "%s %s %d"
+ "\n",
+ px->uuid, HA_ANON_CLI(px->id),
+ srv->puid, HA_ANON_CLI(srv->id),
+ hash_ipanon(appctx->cli_anon_key, srv_addr, 0),
+ srv->cur_state, srv->cur_admin, srv->uweight, srv->iweight,
+ (long int)srv_time_since_last_change,
+ srv->check.status, srv->check.result, srv->check.health,
+ srv->check.state & 0x0F, srv->agent.state & 0x1F,
+ bk_f_forced_id, srv_f_forced_id,
+ srv->hostname ? HA_ANON_CLI(srv->hostname) : "-", srv->svc_port,
+ srvrecord ? srvrecord : "-", srv->use_ssl, srv->check.port,
+ srv_check_addr, srv_agent_addr, srv->agent.port);
+ } else {
+ /* show servers conn */
+ int thr;
+
+ chunk_printf(&trash,
+ "%s/%s %d/%d %s %u - %u %u %u %u %u %u %d %u",
+ HA_ANON_CLI(px->id), HA_ANON_CLI(srv->id),
+ px->uuid, srv->puid, hash_ipanon(appctx->cli_anon_key, srv_addr, 0),
+ srv->svc_port, srv->pool_purge_delay,
+ srv->curr_used_conns, srv->max_used_conns, srv->est_need_conns,
+ srv->curr_idle_nb, srv->curr_safe_nb, (int)srv->max_idle_conns, srv->curr_idle_conns);
+
+ for (thr = 0; thr < global.nbthread && srv->curr_idle_thr; thr++)
+ chunk_appendf(&trash, " %u", srv->curr_idle_thr[thr]);
+
+ chunk_appendf(&trash, "\n");
+ }
+
+ if (applet_putchk(appctx, &trash) == -1) {
+ return 0;
+ }
+ }
+ return 1;
+}
+
+/* Parses backend list or simply use backend name provided by the user to return
+ * states of servers to stdout. It takes its context from show_srv_ctx and dumps
+ * proxy ->px and stops if ->only_pxid is non-null.
+ */
+static int cli_io_handler_servers_state(struct appctx *appctx)
+{
+ struct show_srv_ctx *ctx = appctx->svcctx;
+ struct stconn *sc = appctx_sc(appctx);
+ struct proxy *curproxy;
+
+ if (ctx->state == SHOW_SRV_HEAD) {
+ if (ctx->show_conn == 0)
+ chunk_printf(&trash, "%d\n# %s\n", SRV_STATE_FILE_VERSION, SRV_STATE_FILE_FIELD_NAMES);
+ else
+ chunk_printf(&trash,
+ "# bkname/svname bkid/svid addr port - purge_delay used_cur used_max need_est unsafe_nb safe_nb idle_lim idle_cur idle_per_thr[%d]\n",
+ global.nbthread);
+
+ if (applet_putchk(appctx, &trash) == -1)
+ return 0;
+
+ ctx->state = SHOW_SRV_LIST;
+
+ if (!ctx->px)
+ ctx->px = proxies_list;
+ }
+
+ for (; ctx->px != NULL; ctx->px = curproxy->next) {
+ curproxy = ctx->px;
+ /* servers are only in backends */
+ if ((curproxy->cap & PR_CAP_BE) && !(curproxy->cap & PR_CAP_INT)) {
+ if (!dump_servers_state(sc))
+ return 0;
+ }
+ /* only the selected proxy is dumped */
+ if (ctx->only_pxid)
+ break;
+ }
+
+ return 1;
+}
+
+/* Parses backend list and simply report backend names. It keeps the proxy
+ * pointer in svcctx since there's nothing else to store there.
+ */
+static int cli_io_handler_show_backend(struct appctx *appctx)
+{
+ struct proxy *curproxy;
+
+ chunk_reset(&trash);
+
+ if (!appctx->svcctx) {
+ chunk_printf(&trash, "# name\n");
+ if (applet_putchk(appctx, &trash) == -1)
+ return 0;
+
+ appctx->svcctx = proxies_list;
+ }
+
+ for (; appctx->svcctx != NULL; appctx->svcctx = curproxy->next) {
+ curproxy = appctx->svcctx;
+
+ /* looking for non-internal backends only */
+ if ((curproxy->cap & (PR_CAP_BE|PR_CAP_INT)) != PR_CAP_BE)
+ continue;
+
+ chunk_appendf(&trash, "%s\n", curproxy->id);
+ if (applet_putchk(appctx, &trash) == -1)
+ return 0;
+ }
+
+ return 1;
+}
+
+/* Parses the "enable dynamic-cookies backend" directive, it always returns 1.
+ *
+ * Grabs the proxy lock and each server's lock.
+ */
+static int cli_parse_enable_dyncookie_backend(char **args, char *payload, struct appctx *appctx, void *private)
+{
+ struct proxy *px;
+ struct server *s;
+
+ if (!cli_has_level(appctx, ACCESS_LVL_ADMIN))
+ return 1;
+
+ px = cli_find_backend(appctx, args[3]);
+ if (!px)
+ return 1;
+
+ if (px->mode != PR_MODE_TCP && px->mode != PR_MODE_HTTP)
+ return cli_err(appctx, "Not available.\n");
+
+ /* Note: this lock is to make sure this doesn't change while another
+ * thread is in srv_set_dyncookie().
+ */
+ HA_RWLOCK_WRLOCK(PROXY_LOCK, &px->lock);
+ px->ck_opts |= PR_CK_DYNAMIC;
+ HA_RWLOCK_WRUNLOCK(PROXY_LOCK, &px->lock);
+
+ for (s = px->srv; s != NULL; s = s->next) {
+ HA_SPIN_LOCK(SERVER_LOCK, &s->lock);
+ srv_set_dyncookie(s);
+ HA_SPIN_UNLOCK(SERVER_LOCK, &s->lock);
+ }
+
+ return 1;
+}
+
+/* Parses the "disable dynamic-cookies backend" directive, it always returns 1.
+ *
+ * Grabs the proxy lock and each server's lock.
+ */
+static int cli_parse_disable_dyncookie_backend(char **args, char *payload, struct appctx *appctx, void *private)
+{
+ struct proxy *px;
+ struct server *s;
+
+ if (!cli_has_level(appctx, ACCESS_LVL_ADMIN))
+ return 1;
+
+ px = cli_find_backend(appctx, args[3]);
+ if (!px)
+ return 1;
+
+ if (px->mode != PR_MODE_TCP && px->mode != PR_MODE_HTTP)
+ return cli_err(appctx, "Not available.\n");
+
+ /* Note: this lock is to make sure this doesn't change while another
+ * thread is in srv_set_dyncookie().
+ */
+ HA_RWLOCK_WRLOCK(PROXY_LOCK, &px->lock);
+ px->ck_opts &= ~PR_CK_DYNAMIC;
+ HA_RWLOCK_WRUNLOCK(PROXY_LOCK, &px->lock);
+
+ for (s = px->srv; s != NULL; s = s->next) {
+ HA_SPIN_LOCK(SERVER_LOCK, &s->lock);
+ if (!(s->flags & SRV_F_COOKIESET))
+ ha_free(&s->cookie);
+ HA_SPIN_UNLOCK(SERVER_LOCK, &s->lock);
+ }
+
+ return 1;
+}
+
+/* Parses the "set dynamic-cookie-key backend" directive, it always returns 1.
+ *
+ * Grabs the proxy lock and each server's lock.
+ */
+static int cli_parse_set_dyncookie_key_backend(char **args, char *payload, struct appctx *appctx, void *private)
+{
+ struct proxy *px;
+ struct server *s;
+ char *newkey;
+
+ if (!cli_has_level(appctx, ACCESS_LVL_ADMIN))
+ return 1;
+
+ px = cli_find_backend(appctx, args[3]);
+ if (!px)
+ return 1;
+
+ if (px->mode != PR_MODE_TCP && px->mode != PR_MODE_HTTP)
+ return cli_err(appctx, "Not available.\n");
+
+ if (!*args[4])
+ return cli_err(appctx, "String value expected.\n");
+
+ newkey = strdup(args[4]);
+ if (!newkey)
+ return cli_err(appctx, "Failed to allocate memory.\n");
+
+ /* Note: this lock is to make sure this doesn't change while another
+ * thread is in srv_set_dyncookie().
+ */
+ HA_RWLOCK_WRLOCK(PROXY_LOCK, &px->lock);
+ free(px->dyncookie_key);
+ px->dyncookie_key = newkey;
+ HA_RWLOCK_WRUNLOCK(PROXY_LOCK, &px->lock);
+
+ for (s = px->srv; s != NULL; s = s->next) {
+ HA_SPIN_LOCK(SERVER_LOCK, &s->lock);
+ srv_set_dyncookie(s);
+ HA_SPIN_UNLOCK(SERVER_LOCK, &s->lock);
+ }
+
+ return 1;
+}
+
+/* Parses the "set maxconn frontend" directive, it always returns 1.
+ *
+ * Grabs the proxy lock.
+ */
+static int cli_parse_set_maxconn_frontend(char **args, char *payload, struct appctx *appctx, void *private)
+{
+ struct proxy *px;
+ struct listener *l;
+ int v;
+
+ if (!cli_has_level(appctx, ACCESS_LVL_ADMIN))
+ return 1;
+
+ px = cli_find_frontend(appctx, args[3]);
+ if (!px)
+ return 1;
+
+ if (!*args[4])
+ return cli_err(appctx, "Integer value expected.\n");
+
+ v = atoi(args[4]);
+ if (v < 0)
+ return cli_err(appctx, "Value out of range.\n");
+
+ /* OK, the value is fine, so we assign it to the proxy and to all of
+ * its listeners. The blocked ones will be dequeued.
+ */
+ HA_RWLOCK_WRLOCK(PROXY_LOCK, &px->lock);
+
+ px->maxconn = v;
+ list_for_each_entry(l, &px->conf.listeners, by_fe) {
+ if (l->state == LI_FULL)
+ relax_listener(l, 1, 0);
+ }
+
+ if (px->maxconn > px->feconn)
+ dequeue_proxy_listeners(px);
+
+ HA_RWLOCK_WRUNLOCK(PROXY_LOCK, &px->lock);
+
+ return 1;
+}
+
+/* Parses the "shutdown frontend" directive, it always returns 1.
+ *
+ * Grabs the proxy lock.
+ */
+static int cli_parse_shutdown_frontend(char **args, char *payload, struct appctx *appctx, void *private)
+{
+ struct proxy *px;
+
+ if (!cli_has_level(appctx, ACCESS_LVL_ADMIN))
+ return 1;
+
+ px = cli_find_frontend(appctx, args[2]);
+ if (!px)
+ return 1;
+
+ if (px->flags & (PR_FL_DISABLED|PR_FL_STOPPED))
+ return cli_msg(appctx, LOG_NOTICE, "Frontend was already shut down.\n");
+
+ stop_proxy(px);
+ return 1;
+}
+
+/* Parses the "disable frontend" directive, it always returns 1.
+ *
+ * Grabs the proxy lock.
+ */
+static int cli_parse_disable_frontend(char **args, char *payload, struct appctx *appctx, void *private)
+{
+ struct proxy *px;
+ int ret;
+
+ if (!cli_has_level(appctx, ACCESS_LVL_ADMIN))
+ return 1;
+
+ px = cli_find_frontend(appctx, args[2]);
+ if (!px)
+ return 1;
+
+ if (px->flags & (PR_FL_DISABLED|PR_FL_STOPPED))
+ return cli_msg(appctx, LOG_NOTICE, "Frontend was previously shut down, cannot disable.\n");
+
+ if (!px->li_ready)
+ return cli_msg(appctx, LOG_NOTICE, "All sockets are already disabled.\n");
+
+ /* pause_proxy will take PROXY_LOCK */
+ ret = pause_proxy(px);
+
+ if (!ret)
+ return cli_err(appctx, "Failed to pause frontend, check logs for precise cause.\n");
+
+ return 1;
+}
+
+/* Parses the "enable frontend" directive, it always returns 1.
+ *
+ * Grabs the proxy lock.
+ */
+static int cli_parse_enable_frontend(char **args, char *payload, struct appctx *appctx, void *private)
+{
+ struct proxy *px;
+ int ret;
+
+ if (!cli_has_level(appctx, ACCESS_LVL_ADMIN))
+ return 1;
+
+ px = cli_find_frontend(appctx, args[2]);
+ if (!px)
+ return 1;
+
+ if (px->flags & (PR_FL_DISABLED|PR_FL_STOPPED))
+ return cli_err(appctx, "Frontend was previously shut down, cannot enable.\n");
+
+ if (px->li_ready == px->li_all)
+ return cli_msg(appctx, LOG_NOTICE, "All sockets are already enabled.\n");
+
+ /* resume_proxy will take PROXY_LOCK */
+ ret = resume_proxy(px);
+
+ if (!ret)
+ return cli_err(appctx, "Failed to resume frontend, check logs for precise cause (port conflict?).\n");
+ return 1;
+}
+
+/* appctx context used during "show errors" */
+struct show_errors_ctx {
+ struct proxy *px; /* current proxy being dumped, NULL = not started yet. */
+ unsigned int flag; /* bit0: buffer being dumped, 0 = req, 1 = resp ; bit1=skip req ; bit2=skip resp. */
+ unsigned int ev_id; /* event ID of error being dumped */
+ int iid; /* if >= 0, ID of the proxy to filter on */
+ int ptr; /* <0: headers, >=0 : text pointer to restart from */
+ int bol; /* pointer to beginning of current line */
+};
+
+/* "show errors" handler for the CLI. Returns 0 if wants to continue, 1 to stop
+ * now.
+ */
+static int cli_parse_show_errors(char **args, char *payload, struct appctx *appctx, void *private)
+{
+ struct show_errors_ctx *ctx = applet_reserve_svcctx(appctx, sizeof(*ctx));
+
+ if (!cli_has_level(appctx, ACCESS_LVL_OPER))
+ return 1;
+
+ if (*args[2]) {
+ struct proxy *px;
+
+ px = proxy_find_by_name(args[2], 0, 0);
+ if (px)
+ ctx->iid = px->uuid;
+ else
+ ctx->iid = atoi(args[2]);
+
+ if (!ctx->iid)
+ return cli_err(appctx, "No such proxy.\n");
+ }
+ else
+ ctx->iid = -1; // dump all proxies
+
+ ctx->flag = 0;
+ if (strcmp(args[3], "request") == 0)
+ ctx->flag |= 4; // ignore response
+ else if (strcmp(args[3], "response") == 0)
+ ctx->flag |= 2; // ignore request
+ ctx->px = NULL;
+ return 0;
+}
+
+/* This function dumps all captured errors onto the stream connector's
+ * read buffer. It returns 0 if the output buffer is full and it needs
+ * to be called again, otherwise non-zero.
+ */
+static int cli_io_handler_show_errors(struct appctx *appctx)
+{
+ struct show_errors_ctx *ctx = appctx->svcctx;
+ struct stconn *sc = appctx_sc(appctx);
+ extern const char *monthname[12];
+
+ /* FIXME: Don't watch the other side !*/
+ if (unlikely(sc_opposite(sc)->flags & SC_FL_SHUT_DONE))
+ return 1;
+
+ chunk_reset(&trash);
+
+ if (!ctx->px) {
+ /* the function had not been called yet, let's prepare the
+ * buffer for a response.
+ */
+ struct tm tm;
+
+ get_localtime(date.tv_sec, &tm);
+ chunk_appendf(&trash, "Total events captured on [%02d/%s/%04d:%02d:%02d:%02d.%03d] : %u\n",
+ tm.tm_mday, monthname[tm.tm_mon], tm.tm_year+1900,
+ tm.tm_hour, tm.tm_min, tm.tm_sec, (int)(date.tv_usec/1000),
+ error_snapshot_id);
+
+ if (applet_putchk(appctx, &trash) == -1)
+ goto cant_send;
+
+ ctx->px = proxies_list;
+ ctx->bol = 0;
+ ctx->ptr = -1;
+ }
+
+ /* we have two inner loops here, one for the proxy, the other one for
+ * the buffer.
+ */
+ while (ctx->px) {
+ struct error_snapshot *es;
+
+ HA_RWLOCK_RDLOCK(PROXY_LOCK, &ctx->px->lock);
+
+ if ((ctx->flag & 1) == 0) {
+ es = ctx->px->invalid_req;
+ if (ctx->flag & 2) // skip req
+ goto next;
+ }
+ else {
+ es = ctx->px->invalid_rep;
+ if (ctx->flag & 4) // skip resp
+ goto next;
+ }
+
+ if (!es)
+ goto next;
+
+ if (ctx->iid >= 0 &&
+ ctx->px->uuid != ctx->iid &&
+ (!es->oe || es->oe->uuid != ctx->iid))
+ goto next;
+
+ if (ctx->ptr < 0) {
+ /* just print headers now */
+
+ char pn[INET6_ADDRSTRLEN];
+ struct tm tm;
+ int port;
+
+ get_localtime(es->when.tv_sec, &tm);
+ chunk_appendf(&trash, " \n[%02d/%s/%04d:%02d:%02d:%02d.%03d]",
+ tm.tm_mday, monthname[tm.tm_mon], tm.tm_year+1900,
+ tm.tm_hour, tm.tm_min, tm.tm_sec, (int)(es->when.tv_usec/1000));
+
+ switch (addr_to_str(&es->src, pn, sizeof(pn))) {
+ case AF_INET:
+ case AF_INET6:
+ port = get_host_port(&es->src);
+ break;
+ default:
+ port = 0;
+ }
+
+ switch (ctx->flag & 1) {
+ case 0:
+ chunk_appendf(&trash,
+ " frontend %s (#%d): invalid request\n"
+ " backend %s (#%d)",
+ ctx->px->id, ctx->px->uuid,
+ (es->oe && es->oe->cap & PR_CAP_BE) ? es->oe->id : "<NONE>",
+ (es->oe && es->oe->cap & PR_CAP_BE) ? es->oe->uuid : -1);
+ break;
+ case 1:
+ chunk_appendf(&trash,
+ " backend %s (#%d): invalid response\n"
+ " frontend %s (#%d)",
+ ctx->px->id, ctx->px->uuid,
+ es->oe ? es->oe->id : "<NONE>" , es->oe ? es->oe->uuid : -1);
+ break;
+ }
+
+ chunk_appendf(&trash,
+ ", server %s (#%d), event #%u, src %s:%d\n"
+ " buffer starts at %llu (including %u out), %u free,\n"
+ " len %u, wraps at %u, error at position %u\n",
+ es->srv ? es->srv->id : "<NONE>",
+ es->srv ? es->srv->puid : -1,
+ es->ev_id, pn, port,
+ es->buf_ofs, es->buf_out,
+ global.tune.bufsize - es->buf_out - es->buf_len,
+ es->buf_len, es->buf_wrap, es->buf_err);
+
+ if (es->show)
+ es->show(&trash, es);
+
+ chunk_appendf(&trash, " \n");
+
+ if (applet_putchk(appctx, &trash) == -1)
+ goto cant_send_unlock;
+
+ ctx->ptr = 0;
+ ctx->ev_id = es->ev_id;
+ }
+
+ if (ctx->ev_id != es->ev_id) {
+ /* the snapshot changed while we were dumping it */
+ chunk_appendf(&trash,
+ " WARNING! update detected on this snapshot, dump interrupted. Please re-check!\n");
+ if (applet_putchk(appctx, &trash) == -1)
+ goto cant_send_unlock;
+
+ goto next;
+ }
+
+ /* OK, ptr >= 0, so we have to dump the current line */
+ while (ctx->ptr < es->buf_len && ctx->ptr < global.tune.bufsize) {
+ int newptr;
+ int newline;
+
+ newline = ctx->bol;
+ newptr = dump_text_line(&trash, es->buf, global.tune.bufsize, es->buf_len, &newline, ctx->ptr);
+ if (newptr == ctx->ptr) {
+ sc_need_room(sc, 0);
+ goto cant_send_unlock;
+ }
+
+ if (applet_putchk(appctx, &trash) == -1)
+ goto cant_send_unlock;
+
+ ctx->ptr = newptr;
+ ctx->bol = newline;
+ };
+ next:
+ HA_RWLOCK_RDUNLOCK(PROXY_LOCK, &ctx->px->lock);
+ ctx->bol = 0;
+ ctx->ptr = -1;
+ ctx->flag ^= 1;
+ if (!(ctx->flag & 1))
+ ctx->px = ctx->px->next;
+ }
+
+ /* dump complete */
+ return 1;
+
+ cant_send_unlock:
+ HA_RWLOCK_RDUNLOCK(PROXY_LOCK, &ctx->px->lock);
+ cant_send:
+ return 0;
+}
+
+/* register cli keywords */
+static struct cli_kw_list cli_kws = {{ },{
+ { { "disable", "frontend", NULL }, "disable frontend <frontend> : temporarily disable specific frontend", cli_parse_disable_frontend, NULL, NULL },
+ { { "enable", "frontend", NULL }, "enable frontend <frontend> : re-enable specific frontend", cli_parse_enable_frontend, NULL, NULL },
+ { { "set", "maxconn", "frontend", NULL }, "set maxconn frontend <frontend> <value> : change a frontend's maxconn setting", cli_parse_set_maxconn_frontend, NULL },
+ { { "show","servers", "conn", NULL }, "show servers conn [<backend>] : dump server connections status (all or for a single backend)", cli_parse_show_servers, cli_io_handler_servers_state },
+ { { "show","servers", "state", NULL }, "show servers state [<backend>] : dump volatile server information (all or for a single backend)", cli_parse_show_servers, cli_io_handler_servers_state },
+ { { "show", "backend", NULL }, "show backend : list backends in the current running config", NULL, cli_io_handler_show_backend },
+ { { "shutdown", "frontend", NULL }, "shutdown frontend <frontend> : stop a specific frontend", cli_parse_shutdown_frontend, NULL, NULL },
+ { { "set", "dynamic-cookie-key", "backend", NULL }, "set dynamic-cookie-key backend <bk> <k> : change a backend secret key for dynamic cookies", cli_parse_set_dyncookie_key_backend, NULL },
+ { { "enable", "dynamic-cookie", "backend", NULL }, "enable dynamic-cookie backend <bk> : enable dynamic cookies on a specific backend", cli_parse_enable_dyncookie_backend, NULL },
+ { { "disable", "dynamic-cookie", "backend", NULL }, "disable dynamic-cookie backend <bk> : disable dynamic cookies on a specific backend", cli_parse_disable_dyncookie_backend, NULL },
+ { { "show", "errors", NULL }, "show errors [<px>] [request|response] : report last request and/or response errors for each proxy", cli_parse_show_errors, cli_io_handler_show_errors, NULL },
+ {{},}
+}};
+
+INITCALL1(STG_REGISTER, cli_register_kw, &cli_kws);
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/src/qmux_http.c b/src/qmux_http.c
new file mode 100644
index 0000000..edf26b1
--- /dev/null
+++ b/src/qmux_http.c
@@ -0,0 +1,108 @@
+#include <haproxy/qmux_http.h>
+
+#include <haproxy/api-t.h>
+#include <haproxy/htx.h>
+#include <haproxy/qmux_trace.h>
+
+/* QUIC MUX rcv_buf operation using HTX data. Received data from stream <qcs>
+ * will be transferred as HTX in <buf>. Output buffer is expected to be of
+ * length <count>. <fin> will be set to signal the last data to receive on this
+ * stream.
+ *
+ * Return the size in bytes of transferred data.
+ */
+size_t qcs_http_rcv_buf(struct qcs *qcs, struct buffer *buf, size_t count,
+ char *fin)
+{
+ struct htx *qcs_htx = NULL;
+ struct htx *cs_htx = NULL;
+ size_t ret = 0;
+
+ TRACE_ENTER(QMUX_EV_STRM_RECV, qcs->qcc->conn, qcs);
+
+ *fin = 0;
+ qcs_htx = htx_from_buf(&qcs->rx.app_buf);
+ if (htx_is_empty(qcs_htx)) {
+ /* Set buffer data to 0 as HTX is empty. */
+ htx_to_buf(qcs_htx, &qcs->rx.app_buf);
+ goto end;
+ }
+
+ ret = qcs_htx->data;
+
+ cs_htx = htx_from_buf(buf);
+ if (htx_is_empty(cs_htx) && htx_used_space(qcs_htx) <= count) {
+ /* EOM will be copied to cs_htx via b_xfer(). */
+ if (qcs_htx->flags & HTX_FL_EOM)
+ *fin = 1;
+
+ htx_to_buf(cs_htx, buf);
+ htx_to_buf(qcs_htx, &qcs->rx.app_buf);
+ b_xfer(buf, &qcs->rx.app_buf, b_data(&qcs->rx.app_buf));
+ goto end;
+ }
+
+ htx_xfer_blks(cs_htx, qcs_htx, count, HTX_BLK_UNUSED);
+ BUG_ON(qcs_htx->flags & HTX_FL_PARSING_ERROR);
+
+ /* Copy EOM from src to dst buffer if all data copied. */
+ if (htx_is_empty(qcs_htx) && (qcs_htx->flags & HTX_FL_EOM)) {
+ cs_htx->flags |= HTX_FL_EOM;
+ *fin = 1;
+ }
+
+ cs_htx->extra = qcs_htx->extra ? (qcs_htx->data + qcs_htx->extra) : 0;
+ htx_to_buf(cs_htx, buf);
+ htx_to_buf(qcs_htx, &qcs->rx.app_buf);
+ ret -= qcs_htx->data;
+
+ end:
+ TRACE_LEAVE(QMUX_EV_STRM_RECV, qcs->qcc->conn, qcs);
+
+ return ret;
+}
+
+/* QUIC MUX snd_buf operation using HTX data. HTX data will be transferred from
+ * <buf> to <qcs> stream buffer. Input buffer is expected to be of length
+ * <count>. <fin> will be set to signal the last data to send for this stream.
+ *
+ * Return the size in bytes of transferred data.
+ */
+size_t qcs_http_snd_buf(struct qcs *qcs, struct buffer *buf, size_t count,
+ char *fin)
+{
+ struct htx *htx;
+ size_t ret;
+ int eom = 0;
+
+ TRACE_ENTER(QMUX_EV_STRM_SEND, qcs->qcc->conn, qcs);
+
+ htx = htxbuf(buf);
+ eom = (htx->flags & HTX_FL_EOM);
+ ret = qcs->qcc->app_ops->snd_buf(qcs, buf, count);
+ *fin = (eom && !b_data(buf));
+
+ TRACE_LEAVE(QMUX_EV_STRM_SEND, qcs->qcc->conn, qcs);
+
+ return ret;
+}
+
+/* QUIC MUX snd_buf reset. HTX data stored in <buf> of length <count> will be
+ * cleared. This can be used when data should not be transmitted any longer.
+ *
+ * Return the size in bytes of cleared data.
+ */
+size_t qcs_http_reset_buf(struct qcs *qcs, struct buffer *buf, size_t count)
+{
+ struct htx *htx;
+
+ TRACE_ENTER(QMUX_EV_STRM_SEND, qcs->qcc->conn, qcs);
+
+ htx = htx_from_buf(buf);
+ htx_reset(htx);
+ htx_to_buf(htx, buf);
+
+ TRACE_LEAVE(QMUX_EV_STRM_SEND, qcs->qcc->conn, qcs);
+
+ return count;
+}
diff --git a/src/qmux_trace.c b/src/qmux_trace.c
new file mode 100644
index 0000000..b213ed4
--- /dev/null
+++ b/src/qmux_trace.c
@@ -0,0 +1,114 @@
+#include <haproxy/qmux_trace.h>
+
+#include <import/ist.h>
+#include <haproxy/api.h>
+#include <haproxy/connection.h>
+#include <haproxy/chunk.h>
+#include <haproxy/mux_quic.h>
+#include <haproxy/quic_frame-t.h>
+
+/* trace source and events */
+static void qmux_trace(enum trace_level level, uint64_t mask,
+ const struct trace_source *src,
+ const struct ist where, const struct ist func,
+ const void *a1, const void *a2, const void *a3, const void *a4);
+
+static const struct name_desc qmux_trace_lockon_args[4] = {
+ /* arg1 */ { /* already used by the connection */ },
+ /* arg2 */ { .name="qcs", .desc="QUIC stream" },
+ /* arg3 */ { },
+ /* arg4 */ { }
+};
+
+static const struct name_desc qmux_trace_decoding[] = {
+#define QMUX_VERB_CLEAN 1
+ { .name="clean", .desc="only user-friendly stuff, generally suitable for level \"user\"" },
+#define QMUX_VERB_MINIMAL 2
+ { .name="minimal", .desc="report only qcc/qcs state and flags, no real decoding" },
+ { /* end */ }
+};
+
+struct trace_source trace_qmux = {
+ .name = IST("qmux"),
+ .desc = "QUIC multiplexer",
+ .arg_def = TRC_ARG1_CONN, /* TRACE()'s first argument is always a connection */
+ .default_cb = qmux_trace,
+ .known_events = qmux_trace_events,
+ .lockon_args = qmux_trace_lockon_args,
+ .decoding = qmux_trace_decoding,
+ .report_events = ~0, /* report everything by default */
+};
+
+
+static void qmux_trace_frm(const struct quic_frame *frm)
+{
+ switch (frm->type) {
+ case QUIC_FT_MAX_STREAMS_BIDI:
+ chunk_appendf(&trace_buf, " max_streams=%llu",
+ (ullong)frm->max_streams_bidi.max_streams);
+ break;
+
+ case QUIC_FT_MAX_STREAMS_UNI:
+ chunk_appendf(&trace_buf, " max_streams=%llu",
+ (ullong)frm->max_streams_uni.max_streams);
+ break;
+
+ default:
+ break;
+ }
+}
+
+/* quic-mux trace handler */
+static void qmux_trace(enum trace_level level, uint64_t mask,
+ const struct trace_source *src,
+ const struct ist where, const struct ist func,
+ const void *a1, const void *a2, const void *a3, const void *a4)
+{
+ const struct connection *conn = a1;
+ const struct qcc *qcc = conn ? conn->ctx : NULL;
+ const struct qcs *qcs = a2;
+
+ if (!qcc)
+ return;
+
+ if (src->verbosity > QMUX_VERB_CLEAN) {
+ chunk_appendf(&trace_buf, " : qcc=%p(F)", qcc);
+ if (qcc->conn->handle.qc)
+ chunk_appendf(&trace_buf, " qc=%p", qcc->conn->handle.qc);
+
+ chunk_appendf(&trace_buf, " md=%llu/%llu/%llu",
+ (ullong)qcc->rfctl.md, (ullong)qcc->tx.offsets, (ullong)qcc->tx.sent_offsets);
+
+ if (qcs) {
+ chunk_appendf(&trace_buf, " qcs=%p .id=%llu .st=%s",
+ qcs, (ullong)qcs->id,
+ qcs_st_to_str(qcs->st));
+ chunk_appendf(&trace_buf, " msd=%llu/%llu/%llu",
+ (ullong)qcs->tx.msd, (ullong)qcs->tx.offset, (ullong)qcs->tx.sent_offset);
+ }
+
+ if (mask & QMUX_EV_QCC_NQCS) {
+ const uint64_t *id = a3;
+ chunk_appendf(&trace_buf, " id=%llu", (ullong)*id);
+ }
+
+ if (mask & QMUX_EV_SEND_FRM)
+ qmux_trace_frm(a3);
+
+ if (mask & QMUX_EV_QCS_XFER_DATA) {
+ const struct qcs_xfer_data_trace_arg *arg = a3;
+ chunk_appendf(&trace_buf, " prep=%lu xfer=%d",
+ (ulong)arg->prep, arg->xfer);
+ }
+
+ if (mask & QMUX_EV_QCS_BUILD_STRM) {
+ const struct qcs_build_stream_trace_arg *arg = a3;
+ chunk_appendf(&trace_buf, " len=%lu fin=%d offset=%llu",
+ (ulong)arg->len, arg->fin, (ullong)arg->offset);
+ }
+ }
+}
+
+
+/* register qmux traces */
+INITCALL1(STG_REGISTER, trace_register_source, TRACE_SOURCE);
diff --git a/src/qpack-dec.c b/src/qpack-dec.c
new file mode 100644
index 0000000..97392bb
--- /dev/null
+++ b/src/qpack-dec.c
@@ -0,0 +1,563 @@
+/*
+ * QPACK decompressor
+ *
+ * Copyright 2021 HAProxy Technologies, Frederic Lecaille <flecaille@haproxy.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <inttypes.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <import/ist.h>
+#include <haproxy/buf.h>
+#include <haproxy/chunk.h>
+#include <haproxy/h3.h>
+#include <haproxy/mux_quic.h>
+#include <haproxy/qpack-t.h>
+#include <haproxy/qpack-dec.h>
+#include <haproxy/qpack-tbl.h>
+#include <haproxy/hpack-huff.h>
+#include <haproxy/hpack-tbl.h>
+#include <haproxy/http-hdr.h>
+#include <haproxy/tools.h>
+
+#if defined(DEBUG_QPACK)
+#define qpack_debug_printf fprintf
+#define qpack_debug_hexdump debug_hexdump
+#else
+#define qpack_debug_printf(...) do { } while (0)
+#define qpack_debug_hexdump(...) do { } while (0)
+#endif
+
+/* Encoded field line bitmask */
+#define QPACK_EFL_BITMASK 0xf0
+#define QPACK_LFL_WPBNM 0x00 // Literal field line with post-base name reference
+#define QPACK_IFL_WPBI 0x10 // Indexed field line with post-based index
+#define QPACK_LFL_WLN_BIT 0x20 // Literal field line with literal name
+#define QPACK_LFL_WNR_BIT 0x40 // Literal field line with name reference
+#define QPACK_IFL_BIT 0x80 // Indexed field line
+
+/* reads a varint from <raw>'s lowest <b> bits and <len> bytes max (raw included).
+ * returns the 64-bit value on success after updating buf and len_in. Forces
+ * len_in to (uint64_t)-1 on truncated input.
+ * Note that this function is similar to the one used for HPACK (except that is supports
+ * up to 62-bits integers).
+ */
+static uint64_t qpack_get_varint(const unsigned char **buf, uint64_t *len_in, int b)
+{
+ uint64_t ret = 0;
+ int len = *len_in;
+ const uint8_t *raw = *buf;
+ uint8_t shift = 0;
+
+ len--;
+ ret = *raw++ & ((1ULL << b) - 1);
+ if (ret != (uint64_t)((1ULL << b) - 1))
+ goto end;
+
+ while (len && (*raw & 128)) {
+ ret += ((uint64_t)*raw++ & 127) << shift;
+ shift += 7;
+ len--;
+ }
+
+ /* last 7 bits */
+ if (!len)
+ goto too_short;
+
+ len--;
+ ret += ((uint64_t)*raw++ & 127) << shift;
+
+ end:
+ *buf = raw;
+ *len_in = len;
+ return ret;
+
+ too_short:
+ *len_in = (uint64_t)-1;
+ return 0;
+}
+
+/* Decode an encoder stream.
+ *
+ * Returns 0 on success else non-zero.
+ */
+int qpack_decode_enc(struct buffer *buf, int fin, void *ctx)
+{
+ struct qcs *qcs = ctx;
+ size_t len;
+ unsigned char inst;
+
+ /* RFC 9204 4.2. Encoder and Decoder Streams
+ *
+ * The sender MUST NOT close either of these streams, and the receiver
+ * MUST NOT request that the sender close either of these streams.
+ * Closure of either unidirectional stream type MUST be treated as a
+ * connection error of type H3_CLOSED_CRITICAL_STREAM.
+ */
+ if (fin) {
+ qcc_set_error(qcs->qcc, H3_CLOSED_CRITICAL_STREAM, 1);
+ return -1;
+ }
+
+ len = b_data(buf);
+ qpack_debug_hexdump(stderr, "[QPACK-DEC-ENC] ", b_head(buf), 0, len);
+
+ if (!len) {
+ qpack_debug_printf(stderr, "[QPACK-DEC-ENC] empty stream\n");
+ return 0;
+ }
+
+ inst = (unsigned char)*b_head(buf) & QPACK_ENC_INST_BITMASK;
+ if (inst == QPACK_ENC_INST_DUP) {
+ /* Duplicate */
+ }
+ else if (inst & QPACK_ENC_INST_IWNR_BIT) {
+ /* Insert With Name Reference */
+ }
+ else if (inst & QPACK_ENC_INST_IWLN_BIT) {
+ /* Insert with literal name */
+ }
+ else if (inst & QPACK_ENC_INST_SDTC_BIT) {
+ /* Set dynamic table capacity */
+ }
+
+ return 0;
+}
+
+/* Decode an decoder stream.
+ *
+ * Returns 0 on success else non-zero.
+ */
+int qpack_decode_dec(struct buffer *buf, int fin, void *ctx)
+{
+ struct qcs *qcs = ctx;
+ size_t len;
+ unsigned char inst;
+
+ /* RFC 9204 4.2. Encoder and Decoder Streams
+ *
+ * The sender MUST NOT close either of these streams, and the receiver
+ * MUST NOT request that the sender close either of these streams.
+ * Closure of either unidirectional stream type MUST be treated as a
+ * connection error of type H3_CLOSED_CRITICAL_STREAM.
+ */
+ if (fin) {
+ qcc_set_error(qcs->qcc, H3_CLOSED_CRITICAL_STREAM, 1);
+ return -1;
+ }
+
+ len = b_data(buf);
+ qpack_debug_hexdump(stderr, "[QPACK-DEC-DEC] ", b_head(buf), 0, len);
+
+ if (!len) {
+ qpack_debug_printf(stderr, "[QPACK-DEC-DEC] empty stream\n");
+ return 0;
+ }
+
+ inst = (unsigned char)*b_head(buf) & QPACK_DEC_INST_BITMASK;
+ if (inst == QPACK_DEC_INST_ICINC) {
+ /* Insert count increment */
+ }
+ else if (inst & QPACK_DEC_INST_SACK) {
+ /* Section Acknowledgment */
+ }
+ else if (inst & QPACK_DEC_INST_SCCL) {
+ /* Stream cancellation */
+ }
+
+ return 0;
+}
+
+/* Decode a field section prefix made of <enc_ric> and <db> two varints.
+ * Also set the 'S' sign bit for <db>.
+ * Return a negative error if failed, 0 if not.
+ */
+static int qpack_decode_fs_pfx(uint64_t *enc_ric, uint64_t *db, int *sign_bit,
+ const unsigned char **raw, uint64_t *len)
+{
+ *enc_ric = qpack_get_varint(raw, len, 8);
+ if (*len == (uint64_t)-1)
+ return -QPACK_ERR_RIC;
+
+ *sign_bit = **raw & 0x8;
+ *db = qpack_get_varint(raw, len, 7);
+ if (*len == (uint64_t)-1)
+ return -QPACK_ERR_DB;
+
+ return 0;
+}
+
+/* Decode a field section from the <raw> buffer of <len> bytes. Each parsed
+ * header is inserted into <list> of <list_size> entries max and uses <tmp> as
+ * a storage for some elements pointing into it. An end marker is inserted at
+ * the end of the list with empty strings as name/value.
+ *
+ * Returns the number of headers inserted into list excluding the end marker.
+ * In case of error, a negative code QPACK_ERR_* is returned.
+ */
+int qpack_decode_fs(const unsigned char *raw, uint64_t len, struct buffer *tmp,
+ struct http_hdr *list, int list_size)
+{
+ struct ist name, value;
+ uint64_t enc_ric, db;
+ int s;
+ unsigned int efl_type;
+ int ret;
+ int hdr_idx = 0;
+
+ qpack_debug_hexdump(stderr, "[QPACK-DEC-FS] ", (const char *)raw, 0, len);
+
+ /* parse field section prefix */
+ ret = qpack_decode_fs_pfx(&enc_ric, &db, &s, &raw, &len);
+ if (ret < 0) {
+ qpack_debug_printf(stderr, "##ERR@%d(%d)\n", __LINE__, ret);
+ goto out;
+ }
+
+ chunk_reset(tmp);
+ qpack_debug_printf(stderr, "enc_ric: %llu db: %llu s=%d\n",
+ (unsigned long long)enc_ric, (unsigned long long)db, !!s);
+ /* Decode field lines */
+ while (len) {
+ if (hdr_idx >= list_size) {
+ qpack_debug_printf(stderr, "##ERR@%d\n", __LINE__);
+ ret = -QPACK_ERR_TOO_LARGE;
+ goto out;
+ }
+
+ /* parse field line representation */
+ efl_type = *raw & QPACK_EFL_BITMASK;
+ qpack_debug_printf(stderr, "efl_type=0x%02x\n", efl_type);
+
+ if (efl_type == QPACK_LFL_WPBNM) {
+ /* Literal field line with post-base name reference
+ * TODO adjust this when dynamic table support is implemented.
+ */
+#if 0
+ uint64_t index __maybe_unused, length;
+ unsigned int n __maybe_unused, h __maybe_unused;
+
+ qpack_debug_printf(stderr, "literal field line with post-base name reference:");
+ n = *raw & 0x08;
+ index = qpack_get_varint(&raw, &len, 3);
+ if (len == (uint64_t)-1) {
+ qpack_debug_printf(stderr, "##ERR@%d\n", __LINE__);
+ ret = -QPACK_ERR_TRUNCATED;
+ goto out;
+ }
+
+ qpack_debug_printf(stderr, " n=%d index=%llu", !!n, (unsigned long long)index);
+ h = *raw & 0x80;
+ length = qpack_get_varint(&raw, &len, 7);
+ if (len == (uint64_t)-1) {
+ qpack_debug_printf(stderr, "##ERR@%d\n", __LINE__);
+ ret = -QPACK_ERR_TRUNCATED;
+ goto out;
+ }
+
+ qpack_debug_printf(stderr, " h=%d length=%llu", !!h, (unsigned long long)length);
+
+ if (len < length) {
+ qpack_debug_printf(stderr, "##ERR@%d\n", __LINE__);
+ ret = -QPACK_ERR_TRUNCATED;
+ goto out;
+ }
+
+ raw += length;
+ len -= length;
+#endif
+
+ /* RFC9204 2.2.3 Invalid References
+ *
+ * If the decoder encounters a reference in a field line representation
+ * to a dynamic table entry that has already been evicted or that has an
+ * absolute index greater than or equal to the declared Required Insert
+ * Count (Section 4.5.1), it MUST treat this as a connection error of
+ * type QPACK_DECOMPRESSION_FAILED.
+ */
+ return -QPACK_DECOMPRESSION_FAILED;
+ }
+ else if (efl_type == QPACK_IFL_WPBI) {
+ /* Indexed field line with post-base index
+ * TODO adjust this when dynamic table support is implemented.
+ */
+#if 0
+ uint64_t index __maybe_unused;
+
+ qpack_debug_printf(stderr, "indexed field line with post-base index:");
+ index = qpack_get_varint(&raw, &len, 4);
+ if (len == (uint64_t)-1) {
+ qpack_debug_printf(stderr, "##ERR@%d\n", __LINE__);
+ ret = -QPACK_ERR_TRUNCATED;
+ goto out;
+ }
+
+ qpack_debug_printf(stderr, " index=%llu", (unsigned long long)index);
+#endif
+
+ /* RFC9204 2.2.3 Invalid References
+ *
+ * If the decoder encounters a reference in a field line representation
+ * to a dynamic table entry that has already been evicted or that has an
+ * absolute index greater than or equal to the declared Required Insert
+ * Count (Section 4.5.1), it MUST treat this as a connection error of
+ * type QPACK_DECOMPRESSION_FAILED.
+ */
+ return -QPACK_DECOMPRESSION_FAILED;
+ }
+ else if (efl_type & QPACK_IFL_BIT) {
+ /* Indexed field line */
+ uint64_t index;
+ unsigned int static_tbl;
+
+ qpack_debug_printf(stderr, "indexed field line:");
+ static_tbl = efl_type & 0x40;
+ index = qpack_get_varint(&raw, &len, 6);
+ if (len == (uint64_t)-1) {
+ qpack_debug_printf(stderr, "##ERR@%d\n", __LINE__);
+ ret = -QPACK_ERR_TRUNCATED;
+ goto out;
+ }
+
+ if (static_tbl && index < QPACK_SHT_SIZE) {
+ name = qpack_sht[index].n;
+ value = qpack_sht[index].v;
+ }
+ else {
+ /* RFC9204 2.2.3 Invalid References
+ *
+ * If the decoder encounters a reference in a field line representation
+ * to a dynamic table entry that has already been evicted or that has an
+ * absolute index greater than or equal to the declared Required Insert
+ * Count (Section 4.5.1), it MUST treat this as a connection error of
+ * type QPACK_DECOMPRESSION_FAILED.
+ *
+ * TODO adjust this when dynamic table support is implemented.
+ */
+ return -QPACK_DECOMPRESSION_FAILED;
+ }
+
+ qpack_debug_printf(stderr, " t=%d index=%llu", !!static_tbl, (unsigned long long)index);
+ }
+ else if (efl_type & QPACK_LFL_WNR_BIT) {
+ /* Literal field line with name reference */
+ uint64_t index, length;
+ unsigned int static_tbl, n __maybe_unused, h;
+
+ qpack_debug_printf(stderr, "Literal field line with name reference:");
+ n = efl_type & 0x20;
+ static_tbl = efl_type & 0x10;
+ index = qpack_get_varint(&raw, &len, 4);
+ if (len == (uint64_t)-1) {
+ qpack_debug_printf(stderr, "##ERR@%d\n", __LINE__);
+ ret = -QPACK_ERR_TRUNCATED;
+ goto out;
+ }
+
+ if (static_tbl && index < QPACK_SHT_SIZE) {
+ name = qpack_sht[index].n;
+ }
+ else {
+ /* RFC9204 2.2.3 Invalid References
+ *
+ * If the decoder encounters a reference in a field line representation
+ * to a dynamic table entry that has already been evicted or that has an
+ * absolute index greater than or equal to the declared Required Insert
+ * Count (Section 4.5.1), it MUST treat this as a connection error of
+ * type QPACK_DECOMPRESSION_FAILED.
+ *
+ * TODO adjust this when dynamic table support is implemented.
+ */
+ return -QPACK_DECOMPRESSION_FAILED;
+ }
+
+ qpack_debug_printf(stderr, " n=%d t=%d index=%llu", !!n, !!static_tbl, (unsigned long long)index);
+ h = *raw & 0x80;
+ length = qpack_get_varint(&raw, &len, 7);
+ if (len == (uint64_t)-1) {
+ qpack_debug_printf(stderr, "##ERR@%d\n", __LINE__);
+ ret = -QPACK_ERR_TRUNCATED;
+ goto out;
+ }
+
+ qpack_debug_printf(stderr, " h=%d length=%llu", !!h, (unsigned long long)length);
+ if (h) {
+ char *trash;
+ int nlen;
+
+ trash = chunk_newstr(tmp);
+ if (!trash) {
+ qpack_debug_printf(stderr, "##ERR@%d\n", __LINE__);
+ ret = -QPACK_DECOMPRESSION_FAILED;
+ goto out;
+ }
+ nlen = huff_dec(raw, length, trash, tmp->size - tmp->data);
+ if (nlen == (uint32_t)-1) {
+ qpack_debug_printf(stderr, " can't decode huffman.\n");
+ ret = -QPACK_ERR_HUFFMAN;
+ goto out;
+ }
+
+ qpack_debug_printf(stderr, " [name huff %d->%d '%s']", (int)length, (int)nlen, trash);
+ /* makes an ist from tmp storage */
+ b_add(tmp, nlen);
+ value = ist2(trash, nlen);
+ }
+ else {
+ value = ist2(raw, length);
+ }
+
+ if (len < length) {
+ qpack_debug_printf(stderr, "##ERR@%d\n", __LINE__);
+ ret = -QPACK_ERR_TRUNCATED;
+ goto out;
+ }
+
+ raw += length;
+ len -= length;
+ }
+ else if (efl_type & QPACK_LFL_WLN_BIT) {
+ /* Literal field line with literal name */
+ unsigned int n __maybe_unused, hname, hvalue;
+ uint64_t name_len, value_len;
+
+ qpack_debug_printf(stderr, "Literal field line with literal name:");
+ n = *raw & 0x10;
+ hname = *raw & 0x08;
+ name_len = qpack_get_varint(&raw, &len, 3);
+ if (len == (uint64_t)-1) {
+ qpack_debug_printf(stderr, "##ERR@%d\n", __LINE__);
+ ret = -QPACK_ERR_TRUNCATED;
+ goto out;
+ }
+
+ qpack_debug_printf(stderr, " n=%d hname=%d name_len=%llu", !!n, !!hname, (unsigned long long)name_len);
+ /* Name string */
+
+ if (len < name_len) {
+ qpack_debug_printf(stderr, "##ERR@%d\n", __LINE__);
+ ret = -QPACK_ERR_TRUNCATED;
+ goto out;
+ }
+
+ if (hname) {
+ char *trash;
+ int nlen;
+
+ trash = chunk_newstr(tmp);
+ if (!trash) {
+ qpack_debug_printf(stderr, "##ERR@%d\n", __LINE__);
+ ret = -QPACK_DECOMPRESSION_FAILED;
+ goto out;
+ }
+ nlen = huff_dec(raw, name_len, trash, tmp->size - tmp->data);
+ if (nlen == (uint32_t)-1) {
+ qpack_debug_printf(stderr, " can't decode huffman.\n");
+ ret = -QPACK_ERR_HUFFMAN;
+ goto out;
+ }
+
+ qpack_debug_printf(stderr, " [name huff %d->%d '%s']", (int)name_len, (int)nlen, trash);
+ /* makes an ist from tmp storage */
+ b_add(tmp, nlen);
+ name = ist2(trash, nlen);
+ }
+ else {
+ name = ist2(raw, name_len);
+ }
+
+ raw += name_len;
+ len -= name_len;
+
+ hvalue = *raw & 0x80;
+ value_len = qpack_get_varint(&raw, &len, 7);
+ if (len == (uint64_t)-1) {
+ qpack_debug_printf(stderr, "##ERR@%d\n", __LINE__);
+ ret = -QPACK_ERR_TRUNCATED;
+ goto out;
+ }
+
+ qpack_debug_printf(stderr, " hvalue=%d value_len=%llu", !!hvalue, (unsigned long long)value_len);
+
+ if (len < value_len) {
+ qpack_debug_printf(stderr, "##ERR@%d\n", __LINE__);
+ ret = -QPACK_ERR_TRUNCATED;
+ goto out;
+ }
+
+ if (hvalue) {
+ char *trash;
+ int nlen;
+
+ trash = chunk_newstr(tmp);
+ if (!trash) {
+ qpack_debug_printf(stderr, "##ERR@%d\n", __LINE__);
+ ret = -QPACK_DECOMPRESSION_FAILED;
+ goto out;
+ }
+ nlen = huff_dec(raw, value_len, trash, tmp->size - tmp->data);
+ if (nlen == (uint32_t)-1) {
+ qpack_debug_printf(stderr, " can't decode huffman.\n");
+ ret = -QPACK_ERR_HUFFMAN;
+ goto out;
+ }
+
+ qpack_debug_printf(stderr, " [name huff %d->%d '%s']", (int)value_len, (int)nlen, trash);
+ /* makes an ist from tmp storage */
+ b_add(tmp, nlen);
+ value = ist2(trash, nlen);
+ }
+ else {
+ value = ist2(raw, value_len);
+ }
+
+ raw += value_len;
+ len -= value_len;
+ }
+
+ /* We must not accept empty header names (forbidden by the spec and used
+ * as a list termination).
+ */
+ if (!name.len) {
+ qpack_debug_printf(stderr, "##ERR@%d\n", __LINE__);
+ ret = -QPACK_DECOMPRESSION_FAILED;
+ goto out;
+ }
+
+ list[hdr_idx].n = name;
+ list[hdr_idx].v = value;
+ ++hdr_idx;
+
+ qpack_debug_printf(stderr, "\n");
+ }
+
+ if (hdr_idx >= list_size) {
+ qpack_debug_printf(stderr, "##ERR@%d\n", __LINE__);
+ ret = -QPACK_ERR_TOO_LARGE;
+ goto out;
+ }
+
+ /* put an end marker */
+ list[hdr_idx].n = list[hdr_idx].v = IST_NULL;
+ ret = hdr_idx;
+
+ out:
+ qpack_debug_printf(stderr, "-- done: ret=%d\n", ret);
+ return ret;
+}
diff --git a/src/qpack-enc.c b/src/qpack-enc.c
new file mode 100644
index 0000000..006f1f1
--- /dev/null
+++ b/src/qpack-enc.c
@@ -0,0 +1,185 @@
+#include <haproxy/qpack-enc.h>
+
+#include <haproxy/buf.h>
+#include <haproxy/intops.h>
+
+/* Returns the byte size required to encode <i> as a <prefix_size>-prefix
+ * integer.
+ */
+static size_t qpack_get_prefix_int_size(int i, int prefix_size)
+{
+ int n = (1 << prefix_size) - 1;
+ if (i < n) {
+ return 1;
+ }
+ else {
+ size_t result = 0;
+ while (i) {
+ ++result;
+ i >>= 7;
+ }
+ return 1 + result;
+ }
+}
+
+/* Encode the integer <i> in the buffer <out> in a <prefix_size>-bit prefix
+ * integer. The caller must ensure there is enough size in the buffer. The
+ * prefix is OR-ed with <before_prefix> byte.
+ *
+ * Returns 0 if success else non-zero.
+ */
+static int qpack_encode_prefix_integer(struct buffer *out, int i,
+ int prefix_size,
+ unsigned char before_prefix)
+{
+ const int mod = (1 << prefix_size) - 1;
+ BUG_ON_HOT(!prefix_size);
+
+ if (i < mod) {
+ if (b_room(out) < 1)
+ return 1;
+
+ b_putchr(out, before_prefix | i);
+ }
+ else {
+ int to_encode = i - mod;
+ const size_t sz = to_encode / mod;
+
+ if (b_room(out) < sz)
+ return 1;
+
+ b_putchr(out, before_prefix | mod);
+ while (1) {
+ if (to_encode > 0x7f) {
+ b_putchr(out, 0x80 | (to_encode & 0x7f));
+ to_encode >>= 7;
+ }
+ else {
+ b_putchr(out, to_encode & 0x7f);
+ break;
+ }
+ }
+ }
+
+ return 0;
+}
+
+/* Returns 0 on success else non-zero. */
+int qpack_encode_int_status(struct buffer *out, unsigned int status)
+{
+ int status_size, idx = 0;
+
+ if (status < 100 || status > 999)
+ return 1;
+
+ switch (status) {
+ case 103: idx = 24; break;
+ case 200: idx = 25; break;
+ case 304: idx = 26; break;
+ case 404: idx = 27; break;
+ case 503: idx = 28; break;
+ case 100: idx = 63; break;
+ case 204: idx = 64; break;
+ case 206: idx = 65; break;
+ case 302: idx = 66; break;
+ case 400: idx = 67; break;
+ case 403: idx = 68; break;
+ case 421: idx = 69; break;
+ case 425: idx = 70; break;
+ case 500: idx = 71; break;
+
+ /* status code not in QPACK static table, idx is null. */
+ default: break;
+ }
+
+ if (idx) {
+ /* status code present in QPACK static table
+ * -> indexed field line
+ */
+ status_size = qpack_get_prefix_int_size(idx, 6);
+ if (b_room(out) < status_size)
+ return 1;
+
+ qpack_encode_prefix_integer(out, idx, 6, 0xc0);
+ }
+ else {
+ /* status code not present in QPACK static table
+ * -> literal field line with name reference
+ */
+ char a, b, c;
+ a = '0' + status / 100;
+ status -= (status / 100 * 100);
+ b = '0' + status / 10;
+ status -= (status / 10 * 10);
+ c = '0' + status;
+
+ /* field name */
+ if (qpack_encode_prefix_integer(out, 24, 4, 0x50))
+ return 1;
+
+ /* field value length */
+ if (qpack_encode_prefix_integer(out, 3, 7, 0x00))
+ return 1;
+
+ if (b_room(out) < 3)
+ return 1;
+
+ b_putchr(out, a);
+ b_putchr(out, b);
+ b_putchr(out, c);
+ }
+
+ return 0;
+}
+
+/* Returns 0 on success else non-zero. */
+int qpack_encode_field_section_line(struct buffer *out)
+{
+ char qpack_field_section[] = {
+ '\x00', /* required insert count */
+ '\x00', /* S + delta base */
+ };
+
+ if (b_room(out) < 2)
+ return 1;
+
+ b_putblk(out, qpack_field_section, 2);
+
+ return 0;
+}
+
+#define QPACK_LFL_WLN_BIT 0x20 // Literal field line with literal name
+
+/* Encode a header in literal field line with literal name.
+ * Returns 0 on success else non-zero.
+ */
+int qpack_encode_header(struct buffer *out, const struct ist n, const struct ist v)
+{
+ int i;
+ size_t sz = qpack_get_prefix_int_size(n.len, 3) + n.len +
+ qpack_get_prefix_int_size(v.len, 7) + v.len;
+
+ if (sz > b_room(out))
+ return 1;
+
+ /* literal field line with literal name
+ * | 0 | 0 | 1 | N | H | . | . | . |
+ * N :(allow an intermediary to add the header in a dynamic table)
+ * H: huffman encoded
+ * name len
+ */
+ qpack_encode_prefix_integer(out, n.len, 3, QPACK_LFL_WLN_BIT);
+ /* name */
+ for (i = 0; i < n.len; ++i)
+ b_putchr(out, n.ptr[i]);
+
+ /* | 0 | . | . | . | . | . | . | . |
+ * value len
+ */
+ qpack_encode_prefix_integer(out, v.len, 7, 0x00);
+ /* value */
+ for (i = 0; i < v.len; ++i)
+ b_putchr(out, v.ptr[i]);
+
+ return 0;
+}
diff --git a/src/qpack-tbl.c b/src/qpack-tbl.c
new file mode 100644
index 0000000..7c59fd2
--- /dev/null
+++ b/src/qpack-tbl.c
@@ -0,0 +1,415 @@
+/*
+ * QPACK header table management (draft-ietf-quic-qpack-20)
+ *
+ * Copyright 2020 HAProxy Technologies, Frederic Lecaille <flecaille@haproxy.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <inttypes.h>
+#include <stdio.h>
+
+#include <import/ist.h>
+#include <haproxy/http-hdr-t.h>
+#include <haproxy/qpack-tbl.h>
+
+/* static header table as in draft-ietf-quic-qpack-20 Appendix A. */
+const struct http_hdr qpack_sht[QPACK_SHT_SIZE] = {
+ [ 0] = { .n = IST(":authority"), .v = IST("") },
+ [ 1] = { .n = IST(":path"), .v = IST("/") },
+ [ 2] = { .n = IST("age"), .v = IST("0") },
+ [ 3] = { .n = IST("content-disposition"), .v = IST("") },
+ [ 4] = { .n = IST("content-length"), .v = IST("0") },
+ [ 5] = { .n = IST("cookie"), .v = IST("") },
+ [ 6] = { .n = IST("date"), .v = IST("") },
+ [ 7] = { .n = IST("etag"), .v = IST("") },
+ [ 8] = { .n = IST("if-modified-since"), .v = IST("") },
+ [ 9] = { .n = IST("if-none-match"), .v = IST("") },
+ [10] = { .n = IST("last-modified"), .v = IST("") },
+ [11] = { .n = IST("link"), .v = IST("") },
+ [12] = { .n = IST("location"), .v = IST("") },
+ [13] = { .n = IST("referer"), .v = IST("") },
+ [14] = { .n = IST("set-cookie"), .v = IST("") },
+ [15] = { .n = IST(":method"), .v = IST("CONNECT") },
+ [16] = { .n = IST(":method"), .v = IST("DELETE") },
+ [17] = { .n = IST(":method"), .v = IST("GET") },
+ [18] = { .n = IST(":method"), .v = IST("HEAD") },
+ [19] = { .n = IST(":method"), .v = IST("OPTIONS") },
+ [20] = { .n = IST(":method"), .v = IST("POST") },
+ [21] = { .n = IST(":method"), .v = IST("PUT") },
+ [22] = { .n = IST(":scheme"), .v = IST("http") },
+ [23] = { .n = IST(":scheme"), .v = IST("https") },
+ [24] = { .n = IST(":status"), .v = IST("103") },
+ [25] = { .n = IST(":status"), .v = IST("200") },
+ [26] = { .n = IST(":status"), .v = IST("304") },
+ [27] = { .n = IST(":status"), .v = IST("404") },
+ [28] = { .n = IST(":status"), .v = IST("503") },
+ [29] = { .n = IST("accept"), .v = IST("*/*") },
+ [30] = { .n = IST("accept"), .v = IST("application/dns-message") },
+ [31] = { .n = IST("accept-encoding"), .v = IST("gzip, deflate, br") },
+ [32] = { .n = IST("accept-ranges"), .v = IST("bytes") },
+ [33] = { .n = IST("access-control-allow-headers"), .v = IST("cache-control") },
+ [34] = { .n = IST("access-control-allow-headers"), .v = IST("content-type") },
+ [35] = { .n = IST("access-control-allow-origin"), .v = IST("*") },
+ [36] = { .n = IST("cache-control"), .v = IST("max-age=0") },
+ [37] = { .n = IST("cache-control"), .v = IST("max-age=2592000") },
+ [38] = { .n = IST("cache-control"), .v = IST("max-age=604800") },
+ [39] = { .n = IST("cache-control"), .v = IST("no-cache") },
+ [40] = { .n = IST("cache-control"), .v = IST("no-store") },
+ [41] = { .n = IST("cache-control"), .v = IST("public, max-age=31536000") },
+ [42] = { .n = IST("content-encoding"), .v = IST("br") },
+ [43] = { .n = IST("content-encoding"), .v = IST("gzip") },
+ [44] = { .n = IST("content-type"), .v = IST("application/dns-message") },
+ [45] = { .n = IST("content-type"), .v = IST("application/javascript") },
+ [46] = { .n = IST("content-type"), .v = IST("application/json") },
+ [47] = { .n = IST("content-type"), .v = IST("application/"
+ "x-www-form-urlencoded") },
+ [48] = { .n = IST("content-type"), .v = IST("image/gif") },
+ [49] = { .n = IST("content-type"), .v = IST("image/jpeg") },
+ [50] = { .n = IST("content-type"), .v = IST("image/png") },
+ [51] = { .n = IST("content-type"), .v = IST("text/css") },
+ [52] = { .n = IST("content-type"), .v = IST("text/html;"
+ " charset=utf-8") },
+ [53] = { .n = IST("content-type"), .v = IST("text/plain") },
+ [54] = { .n = IST("content-type"), .v = IST("text/plain;"
+ "charset=utf-8") },
+ [55] = { .n = IST("range"), .v = IST("bytes=0-") },
+ [56] = { .n = IST("strict-transport-security"), .v = IST("max-age=31536000") },
+ [57] = { .n = IST("strict-transport-security"), .v = IST("max-age=31536000;"
+ " includesubdomains") },
+ [58] = { .n = IST("strict-transport-security"), .v = IST("max-age=31536000;"
+ " includesubdomains;"
+ " preload") },
+ [59] = { .n = IST("vary"), .v = IST("accept-encoding") },
+ [60] = { .n = IST("vary"), .v = IST("origin") },
+ [61] = { .n = IST("x-content-type-options"), .v = IST("nosniff") },
+ [62] = { .n = IST("x-xss-protection"), .v = IST("1; mode=block") },
+ [63] = { .n = IST(":status"), .v = IST("100") },
+ [64] = { .n = IST(":status"), .v = IST("204") },
+ [65] = { .n = IST(":status"), .v = IST("206") },
+ [66] = { .n = IST(":status"), .v = IST("302") },
+ [67] = { .n = IST(":status"), .v = IST("400") },
+ [68] = { .n = IST(":status"), .v = IST("403") },
+ [69] = { .n = IST(":status"), .v = IST("421") },
+ [70] = { .n = IST(":status"), .v = IST("425") },
+ [71] = { .n = IST(":status"), .v = IST("500") },
+ [72] = { .n = IST("accept-language"), .v = IST("") },
+ [73] = { .n = IST("access-control-allow-credentials"), .v = IST("FALSE") },
+ [74] = { .n = IST("access-control-allow-credentials"), .v = IST("TRUE") },
+ [75] = { .n = IST("access-control-allow-headers"), .v = IST("*") },
+ [76] = { .n = IST("access-control-allow-methods"), .v = IST("get") },
+ [77] = { .n = IST("access-control-allow-methods"), .v = IST("get, post, options") },
+ [78] = { .n = IST("access-control-allow-methods"), .v = IST("options") },
+ [79] = { .n = IST("access-control-expose-headers"), .v = IST("content-length") },
+ [80] = { .n = IST("access-control-request-headers"), .v = IST("content-type") },
+ [81] = { .n = IST("access-control-request-method"), .v = IST("get") },
+ [82] = { .n = IST("access-control-request-method"), .v = IST("post") },
+ [83] = { .n = IST("alt-svc"), .v = IST("clear") },
+ [84] = { .n = IST("authorization"), .v = IST("") },
+ [85] = { .n = IST("content-security-policy"), .v = IST("script-src 'none';"
+ " object-src 'none';"
+ " base-uri 'none'") },
+ [86] = { .n = IST("early-data"), .v = IST("1") },
+ [87] = { .n = IST("expect-ct"), .v = IST("") },
+ [88] = { .n = IST("forwarded"), .v = IST("") },
+ [89] = { .n = IST("if-range"), .v = IST("") },
+ [90] = { .n = IST("origin"), .v = IST("") },
+ [91] = { .n = IST("purpose"), .v = IST("prefetch") },
+ [92] = { .n = IST("server"), .v = IST("") },
+ [93] = { .n = IST("timing-allow-origin"), .v = IST("*") },
+ [94] = { .n = IST("upgrade-insecure-requests"), .v = IST("1") },
+ [95] = { .n = IST("user-agent"), .v = IST("") },
+ [96] = { .n = IST("x-forwarded-for"), .v = IST("") },
+ [97] = { .n = IST("x-frame-options"), .v = IST("deny") },
+ [98] = { .n = IST("x-frame-options"), .v = IST("sameorigin") },
+};
+
+struct pool_head *pool_head_qpack_tbl = NULL;
+
+#ifdef DEBUG_QPACK
+/* dump the whole dynamic header table */
+void qpack_dht_dump(FILE *out, const struct qpack_dht *dht)
+{
+ unsigned int i;
+ unsigned int slot;
+ char name[4096], value[4096];
+
+ for (i = QPACK_SHT_SIZE; i < QPACK_SHT_SIZE + dht->used; i++) {
+ slot = (qpack_get_dte(dht, i - QPACK_SHT_SIZE + 1) - dht->dte);
+ fprintf(out, "idx=%u slot=%u name=<%s> value=<%s> addr=%u-%u\n",
+ i, slot,
+ istpad(name, qpack_idx_to_name(dht, i)).ptr,
+ istpad(value, qpack_idx_to_value(dht, i)).ptr,
+ dht->dte[slot].addr, dht->dte[slot].addr+dht->dte[slot].nlen+dht->dte[slot].vlen-1);
+ }
+}
+
+/* check for the whole dynamic header table consistency, abort on failures */
+void qpack_dht_check_consistency(const struct qpack_dht *dht)
+{
+ unsigned slot = qpack_dht_get_tail(dht);
+ unsigned used2 = dht->used;
+ unsigned total = 0;
+
+ if (!dht->used)
+ return;
+
+ if (dht->front >= dht->wrap)
+ abort();
+
+ if (dht->used > dht->wrap)
+ abort();
+
+ if (dht->head >= dht->wrap)
+ abort();
+
+ while (used2--) {
+ total += dht->dte[slot].nlen + dht->dte[slot].vlen;
+ slot++;
+ if (slot >= dht->wrap)
+ slot = 0;
+ }
+
+ if (total != dht->total) {
+ fprintf(stderr, "%d: total=%u dht=%u\n", __LINE__, total, dht->total);
+ abort();
+ }
+}
+#endif // DEBUG_QPACK
+
+/* rebuild a new dynamic header table from <dht> with an unwrapped index and
+ * contents at the end. The new table is returned, the caller must not use the
+ * previous one anymore. NULL may be returned if no table could be allocated.
+ */
+static struct qpack_dht *qpack_dht_defrag(struct qpack_dht *dht)
+{
+ struct qpack_dht *alt_dht;
+ uint16_t old, new;
+ uint32_t addr;
+
+ /* Note: for small tables we could use alloca() instead but
+ * portability especially for large tables can be problematic.
+ */
+ alt_dht = qpack_dht_alloc();
+ if (!alt_dht)
+ return NULL;
+
+ alt_dht->total = dht->total;
+ alt_dht->used = dht->used;
+ alt_dht->wrap = dht->used;
+
+ new = 0;
+ addr = alt_dht->size;
+
+ if (dht->used) {
+ /* start from the tail */
+ old = qpack_dht_get_tail(dht);
+ do {
+ alt_dht->dte[new].nlen = dht->dte[old].nlen;
+ alt_dht->dte[new].vlen = dht->dte[old].vlen;
+ addr -= dht->dte[old].nlen + dht->dte[old].vlen;
+ alt_dht->dte[new].addr = addr;
+
+ memcpy((void *)alt_dht + alt_dht->dte[new].addr,
+ (void *)dht + dht->dte[old].addr,
+ dht->dte[old].nlen + dht->dte[old].vlen);
+
+ old++;
+ if (old >= dht->wrap)
+ old = 0;
+ new++;
+ } while (new < dht->used);
+ }
+
+ alt_dht->front = alt_dht->head = new - 1;
+
+ memcpy(dht, alt_dht, dht->size);
+ qpack_dht_free(alt_dht);
+
+ return dht;
+}
+
+/* Purges table dht until a header field of <needed> bytes fits according to
+ * the protocol (adding 32 bytes overhead). Returns non-zero on success, zero
+ * on failure (ie: table empty but still not sufficient). It must only be
+ * called when the table is not large enough to suit the new entry and there
+ * are some entries left. In case of doubt, use dht_make_room() instead.
+ */
+int __qpack_dht_make_room(struct qpack_dht *dht, unsigned int needed)
+{
+ unsigned int used = dht->used;
+ unsigned int wrap = dht->wrap;
+ unsigned int tail;
+
+ do {
+ tail = ((dht->head + 1U < used) ? wrap : 0) + dht->head + 1U - used;
+ dht->total -= dht->dte[tail].nlen + dht->dte[tail].vlen;
+ if (tail == dht->front)
+ dht->front = dht->head;
+ used--;
+ } while (used && used * 32 + dht->total + needed + 32 > dht->size);
+
+ dht->used = used;
+
+ /* realign if empty */
+ if (!used)
+ dht->front = dht->head = 0;
+
+ /* pack the table if it doesn't wrap anymore */
+ if (dht->head + 1U >= used)
+ dht->wrap = dht->head + 1;
+
+ /* no need to check for 'used' here as if it doesn't fit, used==0 */
+ return needed + 32 <= dht->size;
+}
+
+/* tries to insert a new header <name>:<value> in front of the current head. A
+ * negative value is returned on error.
+ */
+int qpack_dht_insert(struct qpack_dht *dht, struct ist name, struct ist value)
+{
+ unsigned int used;
+ unsigned int head;
+ unsigned int prev;
+ unsigned int wrap;
+ unsigned int tail;
+ uint32_t headroom, tailroom;
+
+ if (!qpack_dht_make_room(dht, name.len + value.len))
+ return 0;
+
+ /* Now there is enough room in the table, that's guaranteed by the
+ * protocol, but not necessarily where we need it.
+ */
+
+ used = dht->used;
+ if (!used) {
+ /* easy, the table was empty */
+ dht->front = dht->head = 0;
+ dht->wrap = dht->used = 1;
+ dht->total = 0;
+ head = 0;
+ dht->dte[head].addr = dht->size - (name.len + value.len);
+ goto copy;
+ }
+
+ /* compute the new head, used and wrap position */
+ prev = head = dht->head;
+ wrap = dht->wrap;
+ tail = qpack_dht_get_tail(dht);
+
+ used++;
+ head++;
+
+ if (head >= wrap) {
+ /* head is leading the entries, we either need to push the
+ * table further or to loop back to released entries. We could
+ * force to loop back when at least half of the allocatable
+ * entries are free but in practice it never happens.
+ */
+ if ((sizeof(*dht) + (wrap + 1) * sizeof(dht->dte[0]) <= dht->dte[dht->front].addr))
+ wrap++;
+ else if (head >= used) /* there's a hole at the beginning */
+ head = 0;
+ else {
+ /* no more room, head hits tail and the index cannot be
+ * extended, we have to realign the whole table.
+ */
+ if (!qpack_dht_defrag(dht))
+ return -1;
+
+ wrap = dht->wrap + 1;
+ head = dht->head + 1;
+ prev = head - 1;
+ tail = 0;
+ }
+ }
+ else if (used >= wrap) {
+ /* we've hit the tail, we need to reorganize the index so that
+ * the head is at the end (but not necessarily move the data).
+ */
+ if (!qpack_dht_defrag(dht))
+ return -1;
+
+ wrap = dht->wrap + 1;
+ head = dht->head + 1;
+ prev = head - 1;
+ tail = 0;
+ }
+
+ /* Now we have updated head, used and wrap, we know that there is some
+ * available room at least from the protocol's perspective. This space
+ * is split in two areas :
+ *
+ * 1: if the previous head was the front cell, the space between the
+ * end of the index table and the front cell's address.
+ * 2: if the previous head was the front cell, the space between the
+ * end of the tail and the end of the table ; or if the previous
+ * head was not the front cell, the space between the end of the
+ * tail and the head's address.
+ */
+ if (prev == dht->front) {
+ /* the area was contiguous */
+ headroom = dht->dte[dht->front].addr - (sizeof(*dht) + wrap * sizeof(dht->dte[0]));
+ tailroom = dht->size - dht->dte[tail].addr - dht->dte[tail].nlen - dht->dte[tail].vlen;
+ }
+ else {
+ /* it's already wrapped so we can't store anything in the headroom */
+ headroom = 0;
+ tailroom = dht->dte[prev].addr - dht->dte[tail].addr - dht->dte[tail].nlen - dht->dte[tail].vlen;
+ }
+
+ /* We can decide to stop filling the headroom as soon as there's enough
+ * room left in the tail to suit the protocol, but tests show that in
+ * practice it almost never happens in other situations so the extra
+ * test is useless and we simply fill the headroom as long as it's
+ * available and we don't wrap.
+ */
+ if (prev == dht->front && headroom >= name.len + value.len) {
+ /* install upfront and update ->front */
+ dht->dte[head].addr = dht->dte[dht->front].addr - (name.len + value.len);
+ dht->front = head;
+ }
+ else if (tailroom >= name.len + value.len) {
+ dht->dte[head].addr = dht->dte[tail].addr + dht->dte[tail].nlen + dht->dte[tail].vlen + tailroom - (name.len + value.len);
+ }
+ else {
+ /* need to defragment the table before inserting upfront */
+ dht = qpack_dht_defrag(dht);
+ wrap = dht->wrap + 1;
+ head = dht->head + 1;
+ dht->dte[head].addr = dht->dte[dht->front].addr - (name.len + value.len);
+ dht->front = head;
+ }
+
+ dht->wrap = wrap;
+ dht->head = head;
+ dht->used = used;
+
+ copy:
+ dht->total += name.len + value.len;
+ dht->dte[head].nlen = name.len;
+ dht->dte[head].vlen = value.len;
+
+ memcpy((void *)dht + dht->dte[head].addr, name.ptr, name.len);
+ memcpy((void *)dht + dht->dte[head].addr + name.len, value.ptr, value.len);
+ return 0;
+}
diff --git a/src/queue.c b/src/queue.c
new file mode 100644
index 0000000..f20285b
--- /dev/null
+++ b/src/queue.c
@@ -0,0 +1,761 @@
+/*
+ * Queue management functions.
+ *
+ * Copyright 2000-2009 Willy Tarreau <w@1wt.eu>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+/* Short explanation on the locking, which is far from being trivial : a
+ * pendconn is a list element which necessarily is associated with an existing
+ * stream. It has pendconn->strm always valid. A pendconn may only be in one of
+ * these three states :
+ * - unlinked : in this case it is an empty list head ;
+ * - linked into the server's queue ;
+ * - linked into the proxy's queue.
+ *
+ * A stream does not necessarily have such a pendconn. Thus the pendconn is
+ * designated by the stream->pend_pos pointer. This results in some properties :
+ * - pendconn->strm->pend_pos is never NULL for any valid pendconn
+ * - if p->node.node.leaf_p is NULL, the element is unlinked,
+ * otherwise it necessarily belongs to one of the other lists ; this may
+ * not be atomically checked under threads though ;
+ * - pendconn->px is never NULL if pendconn->list is not empty
+ * - pendconn->srv is never NULL if pendconn->list is in the server's queue,
+ * and is always NULL if pendconn->list is in the backend's queue or empty.
+ * - pendconn->target is NULL while the element is queued, and points to the
+ * assigned server when the pendconn is picked.
+ *
+ * Threads complicate the design a little bit but rules remain simple :
+ * - the server's queue lock must be held at least when manipulating the
+ * server's queue, which is when adding a pendconn to the queue and when
+ * removing a pendconn from the queue. It protects the queue's integrity.
+ *
+ * - the proxy's queue lock must be held at least when manipulating the
+ * proxy's queue, which is when adding a pendconn to the queue and when
+ * removing a pendconn from the queue. It protects the queue's integrity.
+ *
+ * - both locks are compatible and may be held at the same time.
+ *
+ * - a pendconn_add() is only performed by the stream which will own the
+ * pendconn ; the pendconn is allocated at this moment and returned ; it is
+ * added to either the server or the proxy's queue while holding this
+s * queue's lock.
+ *
+ * - the pendconn is then met by a thread walking over the proxy or server's
+ * queue with the respective lock held. This lock is exclusive and the
+ * pendconn can only appear in one queue so by definition a single thread
+ * may find this pendconn at a time.
+ *
+ * - the pendconn is unlinked either by its own stream upon success/abort/
+ * free, or by another one offering it its server slot. This is achieved by
+ * pendconn_process_next_strm() under either the server or proxy's lock,
+ * pendconn_redistribute() under the server's lock, pendconn_grab_from_px()
+ * under the proxy's lock, or pendconn_unlink() under either the proxy's or
+ * the server's lock depending on the queue the pendconn is attached to.
+ *
+ * - no single operation except the pendconn initialisation prior to the
+ * insertion are performed without eithre a queue lock held or the element
+ * being unlinked and visible exclusively to its stream.
+ *
+ * - pendconn_grab_from_px() and pendconn_process_next_strm() assign ->target
+ * so that the stream knows what server to work with (via
+ * pendconn_dequeue() which sets it on strm->target).
+ *
+ * - a pendconn doesn't switch between queues, it stays where it is.
+ */
+
+#include <import/eb32tree.h>
+#include <haproxy/api.h>
+#include <haproxy/backend.h>
+#include <haproxy/http_rules.h>
+#include <haproxy/pool.h>
+#include <haproxy/queue.h>
+#include <haproxy/sample.h>
+#include <haproxy/server-t.h>
+#include <haproxy/stream.h>
+#include <haproxy/task.h>
+#include <haproxy/tcp_rules.h>
+#include <haproxy/thread.h>
+#include <haproxy/time.h>
+#include <haproxy/tools.h>
+
+
+#define NOW_OFFSET_BOUNDARY() ((now_ms - (TIMER_LOOK_BACK >> 12)) & 0xfffff)
+#define KEY_CLASS(key) ((u32)key & 0xfff00000)
+#define KEY_OFFSET(key) ((u32)key & 0x000fffff)
+#define KEY_CLASS_OFFSET_BOUNDARY(key) (KEY_CLASS(key) | NOW_OFFSET_BOUNDARY())
+#define MAKE_KEY(class, offset) (((u32)(class + 0x7ff) << 20) | ((u32)(now_ms + offset) & 0xfffff))
+
+DECLARE_POOL(pool_head_pendconn, "pendconn", sizeof(struct pendconn));
+
+/* returns the effective dynamic maxconn for a server, considering the minconn
+ * and the proxy's usage relative to its dynamic connections limit. It is
+ * expected that 0 < s->minconn <= s->maxconn when this is called. If the
+ * server is currently warming up, the slowstart is also applied to the
+ * resulting value, which can be lower than minconn in this case, but never
+ * less than 1.
+ */
+unsigned int srv_dynamic_maxconn(const struct server *s)
+{
+ unsigned int max;
+
+ if (s->proxy->beconn >= s->proxy->fullconn)
+ /* no fullconn or proxy is full */
+ max = s->maxconn;
+ else if (s->minconn == s->maxconn)
+ /* static limit */
+ max = s->maxconn;
+ else max = MAX(s->minconn,
+ s->proxy->beconn * s->maxconn / s->proxy->fullconn);
+
+ if ((s->cur_state == SRV_ST_STARTING) &&
+ ns_to_sec(now_ns) < s->last_change + s->slowstart &&
+ ns_to_sec(now_ns) >= s->last_change) {
+ unsigned int ratio;
+ ratio = 100 * (ns_to_sec(now_ns) - s->last_change) / s->slowstart;
+ max = MAX(1, max * ratio / 100);
+ }
+ return max;
+}
+
+/* Remove the pendconn from the server's queue. At this stage, the connection
+ * is not really dequeued. It will be done during the process_stream. It is
+ * up to the caller to atomically decrement the pending counts.
+ *
+ * The caller must own the lock on the server queue. The pendconn must still be
+ * queued (p->node.leaf_p != NULL) and must be in a server (p->srv != NULL).
+ */
+static void __pendconn_unlink_srv(struct pendconn *p)
+{
+ p->strm->logs.srv_queue_pos += _HA_ATOMIC_LOAD(&p->queue->idx) - p->queue_idx;
+ eb32_delete(&p->node);
+}
+
+/* Remove the pendconn from the proxy's queue. At this stage, the connection
+ * is not really dequeued. It will be done during the process_stream. It is
+ * up to the caller to atomically decrement the pending counts.
+ *
+ * The caller must own the lock on the proxy queue. The pendconn must still be
+ * queued (p->node.leaf_p != NULL) and must be in the proxy (p->srv == NULL).
+ */
+static void __pendconn_unlink_prx(struct pendconn *p)
+{
+ p->strm->logs.prx_queue_pos += _HA_ATOMIC_LOAD(&p->queue->idx) - p->queue_idx;
+ eb32_delete(&p->node);
+}
+
+/* Locks the queue the pendconn element belongs to. This relies on both p->px
+ * and p->srv to be properly initialized (which is always the case once the
+ * element has been added).
+ */
+static inline void pendconn_queue_lock(struct pendconn *p)
+{
+ HA_SPIN_LOCK(QUEUE_LOCK, &p->queue->lock);
+}
+
+/* Unlocks the queue the pendconn element belongs to. This relies on both p->px
+ * and p->srv to be properly initialized (which is always the case once the
+ * element has been added).
+ */
+static inline void pendconn_queue_unlock(struct pendconn *p)
+{
+ HA_SPIN_UNLOCK(QUEUE_LOCK, &p->queue->lock);
+}
+
+/* Removes the pendconn from the server/proxy queue. At this stage, the
+ * connection is not really dequeued. It will be done during process_stream().
+ * This function takes all the required locks for the operation. The pendconn
+ * must be valid, though it doesn't matter if it was already unlinked. Prefer
+ * pendconn_cond_unlink() to first check <p>. It also forces a serialization
+ * on p->del_lock to make sure another thread currently waking it up finishes
+ * first.
+ */
+void pendconn_unlink(struct pendconn *p)
+{
+ struct queue *q = p->queue;
+ struct proxy *px = q->px;
+ struct server *sv = q->sv;
+ uint oldidx;
+ int done = 0;
+
+ oldidx = _HA_ATOMIC_LOAD(&p->queue->idx);
+ HA_SPIN_LOCK(QUEUE_LOCK, &q->lock);
+ HA_SPIN_LOCK(QUEUE_LOCK, &p->del_lock);
+
+ if (p->node.node.leaf_p) {
+ eb32_delete(&p->node);
+ done = 1;
+ }
+
+ HA_SPIN_UNLOCK(QUEUE_LOCK, &p->del_lock);
+ HA_SPIN_UNLOCK(QUEUE_LOCK, &q->lock);
+
+ if (done) {
+ oldidx -= p->queue_idx;
+ if (sv)
+ p->strm->logs.srv_queue_pos += oldidx;
+ else
+ p->strm->logs.prx_queue_pos += oldidx;
+
+ _HA_ATOMIC_DEC(&q->length);
+ _HA_ATOMIC_DEC(&px->totpend);
+ }
+}
+
+/* Retrieve the first pendconn from tree <pendconns>. Classes are always
+ * considered first, then the time offset. The time does wrap, so the
+ * lookup is performed twice, one to retrieve the first class and a second
+ * time to retrieve the earliest time in this class.
+ */
+static struct pendconn *pendconn_first(struct eb_root *pendconns)
+{
+ struct eb32_node *node, *node2 = NULL;
+ u32 key;
+
+ node = eb32_first(pendconns);
+ if (!node)
+ return NULL;
+
+ key = KEY_CLASS_OFFSET_BOUNDARY(node->key);
+ node2 = eb32_lookup_ge(pendconns, key);
+
+ if (!node2 ||
+ KEY_CLASS(node2->key) != KEY_CLASS(node->key)) {
+ /* no other key in the tree, or in this class */
+ return eb32_entry(node, struct pendconn, node);
+ }
+
+ /* found a better key */
+ return eb32_entry(node2, struct pendconn, node);
+}
+
+/* Process the next pending connection from either a server or a proxy, and
+ * returns a strictly positive value on success (see below). If no pending
+ * connection is found, 0 is returned. Note that neither <srv> nor <px> may be
+ * NULL. Priority is given to the oldest request in the queue if both <srv> and
+ * <px> have pending requests. This ensures that no request will be left
+ * unserved. The <px> queue is not considered if the server (or a tracked
+ * server) is not RUNNING, is disabled, or has a null weight (server going
+ * down). The <srv> queue is still considered in this case, because if some
+ * connections remain there, it means that some requests have been forced there
+ * after it was seen down (eg: due to option persist). The stream is
+ * immediately marked as "assigned", and both its <srv> and <srv_conn> are set
+ * to <srv>.
+ *
+ * The proxy's queue will be consulted only if px_ok is non-zero.
+ *
+ * This function must only be called if the server queue is locked _AND_ the
+ * proxy queue is not. Today it is only called by process_srv_queue.
+ * When a pending connection is dequeued, this function returns 1 if a pendconn
+ * is dequeued, otherwise 0.
+ */
+static int pendconn_process_next_strm(struct server *srv, struct proxy *px, int px_ok)
+{
+ struct pendconn *p = NULL;
+ struct pendconn *pp = NULL;
+ u32 pkey, ppkey;
+
+ p = NULL;
+ if (srv->queue.length)
+ p = pendconn_first(&srv->queue.head);
+
+ pp = NULL;
+ if (px_ok && px->queue.length) {
+ /* the lock only remains held as long as the pp is
+ * in the proxy's queue.
+ */
+ HA_SPIN_LOCK(QUEUE_LOCK, &px->queue.lock);
+ pp = pendconn_first(&px->queue.head);
+ if (!pp)
+ HA_SPIN_UNLOCK(QUEUE_LOCK, &px->queue.lock);
+ }
+
+ if (!p && !pp)
+ return 0;
+ else if (!pp)
+ goto use_p; /* p != NULL */
+ else if (!p)
+ goto use_pp; /* pp != NULL */
+
+ /* p != NULL && pp != NULL*/
+
+ if (KEY_CLASS(p->node.key) < KEY_CLASS(pp->node.key))
+ goto use_p;
+
+ if (KEY_CLASS(pp->node.key) < KEY_CLASS(p->node.key))
+ goto use_pp;
+
+ pkey = KEY_OFFSET(p->node.key);
+ ppkey = KEY_OFFSET(pp->node.key);
+
+ if (pkey < NOW_OFFSET_BOUNDARY())
+ pkey += 0x100000; // key in the future
+
+ if (ppkey < NOW_OFFSET_BOUNDARY())
+ ppkey += 0x100000; // key in the future
+
+ if (pkey <= ppkey)
+ goto use_p;
+
+ use_pp:
+ /* we'd like to release the proxy lock ASAP to let other threads
+ * work with other servers. But for this we must first hold the
+ * pendconn alive to prevent a removal from its owning stream.
+ */
+ HA_SPIN_LOCK(QUEUE_LOCK, &pp->del_lock);
+
+ /* now the element won't go, we can release the proxy */
+ __pendconn_unlink_prx(pp);
+ HA_SPIN_UNLOCK(QUEUE_LOCK, &px->queue.lock);
+
+ pp->strm_flags |= SF_ASSIGNED;
+ pp->target = srv;
+ stream_add_srv_conn(pp->strm, srv);
+
+ /* we must wake the task up before releasing the lock as it's the only
+ * way to make sure the task still exists. The pendconn cannot vanish
+ * under us since the task will need to take the lock anyway and to wait
+ * if it wakes up on a different thread.
+ */
+ task_wakeup(pp->strm->task, TASK_WOKEN_RES);
+ HA_SPIN_UNLOCK(QUEUE_LOCK, &pp->del_lock);
+
+ _HA_ATOMIC_DEC(&px->queue.length);
+ _HA_ATOMIC_INC(&px->queue.idx);
+ return 1;
+
+ use_p:
+ /* we don't need the px queue lock anymore, we have the server's lock */
+ if (pp)
+ HA_SPIN_UNLOCK(QUEUE_LOCK, &px->queue.lock);
+
+ p->strm_flags |= SF_ASSIGNED;
+ p->target = srv;
+ stream_add_srv_conn(p->strm, srv);
+
+ /* we must wake the task up before releasing the lock as it's the only
+ * way to make sure the task still exists. The pendconn cannot vanish
+ * under us since the task will need to take the lock anyway and to wait
+ * if it wakes up on a different thread.
+ */
+ task_wakeup(p->strm->task, TASK_WOKEN_RES);
+ __pendconn_unlink_srv(p);
+
+ _HA_ATOMIC_DEC(&srv->queue.length);
+ _HA_ATOMIC_INC(&srv->queue.idx);
+ return 1;
+}
+
+/* Manages a server's connection queue. This function will try to dequeue as
+ * many pending streams as possible, and wake them up.
+ */
+void process_srv_queue(struct server *s)
+{
+ struct server *ref = s->track ? s->track : s;
+ struct proxy *p = s->proxy;
+ int maxconn;
+ int stop = 0;
+ int done = 0;
+ int px_ok;
+
+ /* if a server is not usable or backup and must not be used
+ * to dequeue backend requests.
+ */
+ px_ok = srv_currently_usable(ref) &&
+ (!(s->flags & SRV_F_BACKUP) ||
+ (!p->srv_act &&
+ (s == p->lbprm.fbck || (p->options & PR_O_USE_ALL_BK))));
+
+ /* let's repeat that under the lock on each round. Threads competing
+ * for the same server will give up, knowing that at least one of
+ * them will check the conditions again before quitting. In order
+ * to avoid the deadly situation where one thread spends its time
+ * dequeueing for others, we limit the number of rounds it does.
+ * However we still re-enter the loop for one pass if there's no
+ * more served, otherwise we could end up with no other thread
+ * trying to dequeue them.
+ */
+ while (!stop && (done < global.tune.maxpollevents || !s->served) &&
+ s->served < (maxconn = srv_dynamic_maxconn(s))) {
+ if (HA_SPIN_TRYLOCK(QUEUE_LOCK, &s->queue.lock) != 0)
+ break;
+
+ while (s->served < maxconn) {
+ stop = !pendconn_process_next_strm(s, p, px_ok);
+ if (stop)
+ break;
+ _HA_ATOMIC_INC(&s->served);
+ done++;
+ if (done >= global.tune.maxpollevents)
+ break;
+ }
+ HA_SPIN_UNLOCK(QUEUE_LOCK, &s->queue.lock);
+ }
+
+ if (done) {
+ _HA_ATOMIC_SUB(&p->totpend, done);
+ _HA_ATOMIC_ADD(&p->served, done);
+ __ha_barrier_atomic_store();
+ if (p->lbprm.server_take_conn)
+ p->lbprm.server_take_conn(s);
+ }
+}
+
+/* Adds the stream <strm> to the pending connection queue of server <strm>->srv
+ * or to the one of <strm>->proxy if srv is NULL. All counters and back pointers
+ * are updated accordingly. Returns NULL if no memory is available, otherwise the
+ * pendconn itself. If the stream was already marked as served, its flag is
+ * cleared. It is illegal to call this function with a non-NULL strm->srv_conn.
+ * The stream's queue position is counted with an offset of -1 because we want
+ * to make sure that being at the first position in the queue reports 1.
+ *
+ * The queue is sorted by the composition of the priority_class, and the current
+ * timestamp offset by strm->priority_offset. The timestamp is in milliseconds
+ * and truncated to 20 bits, so will wrap every 17m28s575ms.
+ * The offset can be positive or negative, and an offset of 0 puts it in the
+ * middle of this range (~ 8 min). Note that this also means if the adjusted
+ * timestamp wraps around, the request will be misinterpreted as being of
+ * the highest priority for that priority class.
+ *
+ * This function must be called by the stream itself, so in the context of
+ * process_stream.
+ */
+struct pendconn *pendconn_add(struct stream *strm)
+{
+ struct pendconn *p;
+ struct proxy *px;
+ struct server *srv;
+ struct queue *q;
+ unsigned int *max_ptr;
+ unsigned int old_max, new_max;
+
+ p = pool_alloc(pool_head_pendconn);
+ if (!p)
+ return NULL;
+
+ p->target = NULL;
+ p->node.key = MAKE_KEY(strm->priority_class, strm->priority_offset);
+ p->strm = strm;
+ p->strm_flags = strm->flags;
+ HA_SPIN_INIT(&p->del_lock);
+ strm->pend_pos = p;
+
+ px = strm->be;
+ if (strm->flags & SF_ASSIGNED)
+ srv = objt_server(strm->target);
+ else
+ srv = NULL;
+
+ if (srv) {
+ q = &srv->queue;
+ max_ptr = &srv->counters.nbpend_max;
+ }
+ else {
+ q = &px->queue;
+ max_ptr = &px->be_counters.nbpend_max;
+ }
+
+ p->queue = q;
+ p->queue_idx = _HA_ATOMIC_LOAD(&q->idx) - 1; // for logging only
+ new_max = _HA_ATOMIC_ADD_FETCH(&q->length, 1);
+ old_max = _HA_ATOMIC_LOAD(max_ptr);
+ while (new_max > old_max) {
+ if (likely(_HA_ATOMIC_CAS(max_ptr, &old_max, new_max)))
+ break;
+ }
+ __ha_barrier_atomic_store();
+
+ HA_SPIN_LOCK(QUEUE_LOCK, &q->lock);
+ eb32_insert(&q->head, &p->node);
+ HA_SPIN_UNLOCK(QUEUE_LOCK, &q->lock);
+
+ _HA_ATOMIC_INC(&px->totpend);
+ return p;
+}
+
+/* Redistribute pending connections when a server goes down. The number of
+ * connections redistributed is returned. It will take the server queue lock
+ * and does not use nor depend on other locks.
+ */
+int pendconn_redistribute(struct server *s)
+{
+ struct pendconn *p;
+ struct eb32_node *node, *nodeb;
+ int xferred = 0;
+
+ /* The REDISP option was specified. We will ignore cookie and force to
+ * balance or use the dispatcher. */
+ if ((s->proxy->options & (PR_O_REDISP|PR_O_PERSIST)) != PR_O_REDISP)
+ return 0;
+
+ HA_SPIN_LOCK(QUEUE_LOCK, &s->queue.lock);
+ for (node = eb32_first(&s->queue.head); node; node = nodeb) {
+ nodeb = eb32_next(node);
+
+ p = eb32_entry(node, struct pendconn, node);
+ if (p->strm_flags & SF_FORCE_PRST)
+ continue;
+
+ /* it's left to the dispatcher to choose a server */
+ __pendconn_unlink_srv(p);
+ p->strm_flags &= ~(SF_DIRECT | SF_ASSIGNED);
+
+ task_wakeup(p->strm->task, TASK_WOKEN_RES);
+ xferred++;
+ }
+ HA_SPIN_UNLOCK(QUEUE_LOCK, &s->queue.lock);
+
+ if (xferred) {
+ _HA_ATOMIC_SUB(&s->queue.length, xferred);
+ _HA_ATOMIC_SUB(&s->proxy->totpend, xferred);
+ }
+ return xferred;
+}
+
+/* Check for pending connections at the backend, and assign some of them to
+ * the server coming up. The server's weight is checked before being assigned
+ * connections it may not be able to handle. The total number of transferred
+ * connections is returned. It will take the proxy's queue lock and will not
+ * use nor depend on other locks.
+ */
+int pendconn_grab_from_px(struct server *s)
+{
+ struct pendconn *p;
+ int maxconn, xferred = 0;
+
+ if (!srv_currently_usable(s))
+ return 0;
+
+ /* if this is a backup server and there are active servers or at
+ * least another backup server was elected, then this one must
+ * not dequeue requests from the proxy.
+ */
+ if ((s->flags & SRV_F_BACKUP) &&
+ (s->proxy->srv_act ||
+ ((s != s->proxy->lbprm.fbck) && !(s->proxy->options & PR_O_USE_ALL_BK))))
+ return 0;
+
+ HA_SPIN_LOCK(QUEUE_LOCK, &s->proxy->queue.lock);
+ maxconn = srv_dynamic_maxconn(s);
+ while ((p = pendconn_first(&s->proxy->queue.head))) {
+ if (s->maxconn && s->served + xferred >= maxconn)
+ break;
+
+ __pendconn_unlink_prx(p);
+ p->target = s;
+
+ task_wakeup(p->strm->task, TASK_WOKEN_RES);
+ xferred++;
+ }
+ HA_SPIN_UNLOCK(QUEUE_LOCK, &s->proxy->queue.lock);
+ if (xferred) {
+ _HA_ATOMIC_SUB(&s->proxy->queue.length, xferred);
+ _HA_ATOMIC_SUB(&s->proxy->totpend, xferred);
+ }
+ return xferred;
+}
+
+/* Try to dequeue pending connection attached to the stream <strm>. It must
+ * always exists here. If the pendconn is still linked to the server or the
+ * proxy queue, nothing is done and the function returns 1. Otherwise,
+ * <strm>->flags and <strm>->target are updated, the pendconn is released and 0
+ * is returned.
+ *
+ * This function must be called by the stream itself, so in the context of
+ * process_stream.
+ */
+int pendconn_dequeue(struct stream *strm)
+{
+ struct pendconn *p;
+ int is_unlinked;
+
+ /* unexpected case because it is called by the stream itself and
+ * only the stream can release a pendconn. So it is only
+ * possible if a pendconn is released by someone else or if the
+ * stream is supposed to be queued but without its associated
+ * pendconn. In both cases it is a bug! */
+ BUG_ON(!strm->pend_pos);
+
+ p = strm->pend_pos;
+
+ /* note below : we need to grab the queue's lock to check for emptiness
+ * because we don't want a partial _grab_from_px() or _redistribute()
+ * to be called in parallel and show an empty list without having the
+ * time to finish. With this we know that if we see the element
+ * unlinked, these functions were completely done.
+ */
+ pendconn_queue_lock(p);
+ is_unlinked = !p->node.node.leaf_p;
+ pendconn_queue_unlock(p);
+
+ /* serialize to make sure the element was finished processing */
+ HA_SPIN_LOCK(QUEUE_LOCK, &p->del_lock);
+ HA_SPIN_UNLOCK(QUEUE_LOCK, &p->del_lock);
+
+ if (!is_unlinked)
+ return 1;
+
+ /* the pendconn is not queued anymore and will not be so we're safe
+ * to proceed.
+ */
+ strm->flags &= ~(SF_DIRECT | SF_ASSIGNED);
+ strm->flags |= p->strm_flags & (SF_DIRECT | SF_ASSIGNED);
+
+ /* the entry might have been redistributed to another server */
+ if (!(strm->flags & SF_ASSIGNED))
+ sockaddr_free(&strm->scb->dst);
+
+ if (p->target) {
+ /* a server picked this pendconn, it must skip LB */
+ strm->target = &p->target->obj_type;
+ strm->flags |= SF_ASSIGNED;
+ }
+
+ strm->pend_pos = NULL;
+ pool_free(pool_head_pendconn, p);
+ return 0;
+}
+
+static enum act_return action_set_priority_class(struct act_rule *rule, struct proxy *px,
+ struct session *sess, struct stream *s, int flags)
+{
+ struct sample *smp;
+
+ smp = sample_fetch_as_type(px, sess, s, SMP_OPT_DIR_REQ|SMP_OPT_FINAL, rule->arg.expr, SMP_T_SINT);
+ if (!smp)
+ return ACT_RET_CONT;
+
+ s->priority_class = queue_limit_class(smp->data.u.sint);
+ return ACT_RET_CONT;
+}
+
+static enum act_return action_set_priority_offset(struct act_rule *rule, struct proxy *px,
+ struct session *sess, struct stream *s, int flags)
+{
+ struct sample *smp;
+
+ smp = sample_fetch_as_type(px, sess, s, SMP_OPT_DIR_REQ|SMP_OPT_FINAL, rule->arg.expr, SMP_T_SINT);
+ if (!smp)
+ return ACT_RET_CONT;
+
+ s->priority_offset = queue_limit_offset(smp->data.u.sint);
+
+ return ACT_RET_CONT;
+}
+
+static enum act_parse_ret parse_set_priority_class(const char **args, int *arg, struct proxy *px,
+ struct act_rule *rule, char **err)
+{
+ unsigned int where = 0;
+
+ rule->arg.expr = sample_parse_expr((char **)args, arg, px->conf.args.file,
+ px->conf.args.line, err, &px->conf.args, NULL);
+ if (!rule->arg.expr)
+ return ACT_RET_PRS_ERR;
+
+ if (px->cap & PR_CAP_FE)
+ where |= SMP_VAL_FE_HRQ_HDR;
+ if (px->cap & PR_CAP_BE)
+ where |= SMP_VAL_BE_HRQ_HDR;
+
+ if (!(rule->arg.expr->fetch->val & where)) {
+ memprintf(err,
+ "fetch method '%s' extracts information from '%s', none of which is available here",
+ args[0], sample_src_names(rule->arg.expr->fetch->use));
+ free(rule->arg.expr);
+ return ACT_RET_PRS_ERR;
+ }
+
+ rule->action = ACT_CUSTOM;
+ rule->action_ptr = action_set_priority_class;
+ return ACT_RET_PRS_OK;
+}
+
+static enum act_parse_ret parse_set_priority_offset(const char **args, int *arg, struct proxy *px,
+ struct act_rule *rule, char **err)
+{
+ unsigned int where = 0;
+
+ rule->arg.expr = sample_parse_expr((char **)args, arg, px->conf.args.file,
+ px->conf.args.line, err, &px->conf.args, NULL);
+ if (!rule->arg.expr)
+ return ACT_RET_PRS_ERR;
+
+ if (px->cap & PR_CAP_FE)
+ where |= SMP_VAL_FE_HRQ_HDR;
+ if (px->cap & PR_CAP_BE)
+ where |= SMP_VAL_BE_HRQ_HDR;
+
+ if (!(rule->arg.expr->fetch->val & where)) {
+ memprintf(err,
+ "fetch method '%s' extracts information from '%s', none of which is available here",
+ args[0], sample_src_names(rule->arg.expr->fetch->use));
+ free(rule->arg.expr);
+ return ACT_RET_PRS_ERR;
+ }
+
+ rule->action = ACT_CUSTOM;
+ rule->action_ptr = action_set_priority_offset;
+ return ACT_RET_PRS_OK;
+}
+
+static struct action_kw_list tcp_cont_kws = {ILH, {
+ { "set-priority-class", parse_set_priority_class },
+ { "set-priority-offset", parse_set_priority_offset },
+ { /* END */ }
+}};
+
+INITCALL1(STG_REGISTER, tcp_req_cont_keywords_register, &tcp_cont_kws);
+
+static struct action_kw_list http_req_kws = {ILH, {
+ { "set-priority-class", parse_set_priority_class },
+ { "set-priority-offset", parse_set_priority_offset },
+ { /* END */ }
+}};
+
+INITCALL1(STG_REGISTER, http_req_keywords_register, &http_req_kws);
+
+static int
+smp_fetch_priority_class(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ if (!smp->strm)
+ return 0;
+
+ smp->data.type = SMP_T_SINT;
+ smp->data.u.sint = smp->strm->priority_class;
+
+ return 1;
+}
+
+static int
+smp_fetch_priority_offset(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ if (!smp->strm)
+ return 0;
+
+ smp->data.type = SMP_T_SINT;
+ smp->data.u.sint = smp->strm->priority_offset;
+
+ return 1;
+}
+
+
+static struct sample_fetch_kw_list smp_kws = {ILH, {
+ { "prio_class", smp_fetch_priority_class, 0, NULL, SMP_T_SINT, SMP_USE_INTRN, },
+ { "prio_offset", smp_fetch_priority_offset, 0, NULL, SMP_T_SINT, SMP_USE_INTRN, },
+ { /* END */},
+}};
+
+INITCALL1(STG_REGISTER, sample_register_fetches, &smp_kws);
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/src/quic_ack.c b/src/quic_ack.c
new file mode 100644
index 0000000..d28a698
--- /dev/null
+++ b/src/quic_ack.c
@@ -0,0 +1,258 @@
+#include <inttypes.h>
+
+#include <import/eb64tree.h>
+
+#include <haproxy/quic_conn-t.h>
+#include <haproxy/quic_enc.h>
+#include <haproxy/quic_trace.h>
+#include <haproxy/trace.h>
+
+DECLARE_STATIC_POOL(pool_head_quic_arng, "quic_arng", sizeof(struct quic_arng_node));
+
+/* Deallocate <l> list of ACK ranges. */
+void quic_free_arngs(struct quic_conn *qc, struct quic_arngs *arngs)
+{
+ struct eb64_node *n;
+ struct quic_arng_node *ar;
+
+ TRACE_ENTER(QUIC_EV_CONN_CLOSE, qc);
+
+ n = eb64_first(&arngs->root);
+ while (n) {
+ struct eb64_node *next;
+
+ ar = eb64_entry(n, struct quic_arng_node, first);
+ next = eb64_next(n);
+ eb64_delete(n);
+ pool_free(pool_head_quic_arng, ar);
+ n = next;
+ }
+
+ TRACE_LEAVE(QUIC_EV_CONN_CLOSE, qc);
+}
+
+/* Return the gap value between <p> and <q> ACK ranges where <q> follows <p> in
+ * descending order.
+ */
+static inline size_t sack_gap(struct quic_arng_node *p,
+ struct quic_arng_node *q)
+{
+ return p->first.key - q->last - 2;
+}
+
+/* Set the encoded size of <arngs> QUIC ack ranges. */
+static void quic_arngs_set_enc_sz(struct quic_conn *qc, struct quic_arngs *arngs)
+{
+ struct eb64_node *node, *next;
+ struct quic_arng_node *ar, *ar_next;
+
+ TRACE_ENTER(QUIC_EV_CONN_TXPKT, qc);
+
+ node = eb64_last(&arngs->root);
+ if (!node)
+ goto leave;
+
+ ar = eb64_entry(node, struct quic_arng_node, first);
+ arngs->enc_sz = quic_int_getsize(ar->last) +
+ quic_int_getsize(ar->last - ar->first.key) + quic_int_getsize(arngs->sz - 1);
+
+ while ((next = eb64_prev(node))) {
+ ar_next = eb64_entry(next, struct quic_arng_node, first);
+ arngs->enc_sz += quic_int_getsize(sack_gap(ar, ar_next)) +
+ quic_int_getsize(ar_next->last - ar_next->first.key);
+ node = next;
+ ar = eb64_entry(node, struct quic_arng_node, first);
+ }
+
+ leave:
+ TRACE_LEAVE(QUIC_EV_CONN_TXPKT, qc);
+}
+
+/* Insert <ar> ack range into <argns> tree of ack ranges.
+ * Returns the ack range node which has been inserted if succeeded, NULL if not.
+ */
+static inline
+struct quic_arng_node *quic_insert_new_range(struct quic_conn *qc,
+ struct quic_arngs *arngs,
+ struct quic_arng *ar)
+{
+ struct quic_arng_node *new_ar;
+
+ TRACE_ENTER(QUIC_EV_CONN_RXPKT, qc);
+
+ if (arngs->sz >= QUIC_MAX_ACK_RANGES) {
+ struct eb64_node *first;
+
+ first = eb64_first(&arngs->root);
+ BUG_ON(first == NULL);
+ eb64_delete(first);
+ pool_free(pool_head_quic_arng, first);
+ arngs->sz--;
+ }
+
+ new_ar = pool_alloc(pool_head_quic_arng);
+ if (!new_ar) {
+ TRACE_ERROR("ack range allocation failed", QUIC_EV_CONN_RXPKT, qc);
+ goto leave;
+ }
+
+ new_ar->first.key = ar->first;
+ new_ar->last = ar->last;
+ eb64_insert(&arngs->root, &new_ar->first);
+ arngs->sz++;
+
+ leave:
+ TRACE_LEAVE(QUIC_EV_CONN_RXPKT, qc);
+ return new_ar;
+}
+
+/* Update <arngs> tree of ACK ranges with <ar> as new ACK range value.
+ * Note that this function computes the number of bytes required to encode
+ * this tree of ACK ranges in descending order.
+ *
+ * Descending order
+ * ------------->
+ * range1 range2
+ * ..........|--------|..............|--------|
+ * ^ ^ ^ ^
+ * | | | |
+ * last1 first1 last2 first2
+ * ..........+--------+--------------+--------+......
+ * diff1 gap12 diff2
+ *
+ * To encode the previous list of ranges we must encode integers as follows in
+ * descending order:
+ * enc(last2),enc(diff2),enc(gap12),enc(diff1)
+ * with diff1 = last1 - first1
+ * diff2 = last2 - first2
+ * gap12 = first1 - last2 - 2 (>= 0)
+ *
+
+returns 0 on error
+
+ */
+int quic_update_ack_ranges_list(struct quic_conn *qc,
+ struct quic_arngs *arngs,
+ struct quic_arng *ar)
+{
+ int ret = 0;
+ struct eb64_node *le;
+ struct quic_arng_node *new_node;
+ struct eb64_node *new;
+
+ TRACE_ENTER(QUIC_EV_CONN_RXPKT, qc);
+
+ new = NULL;
+ if (eb_is_empty(&arngs->root)) {
+ new_node = quic_insert_new_range(qc, arngs, ar);
+ if (new_node)
+ ret = 1;
+
+ goto leave;
+ }
+
+ le = eb64_lookup_le(&arngs->root, ar->first);
+ if (!le) {
+ new_node = quic_insert_new_range(qc, arngs, ar);
+ if (!new_node)
+ goto leave;
+
+ new = &new_node->first;
+ }
+ else {
+ struct quic_arng_node *le_ar =
+ eb64_entry(le, struct quic_arng_node, first);
+
+ /* Already existing range */
+ if (le_ar->last >= ar->last) {
+ ret = 1;
+ }
+ else if (le_ar->last + 1 >= ar->first) {
+ le_ar->last = ar->last;
+ new = le;
+ new_node = le_ar;
+ }
+ else {
+ new_node = quic_insert_new_range(qc, arngs, ar);
+ if (!new_node)
+ goto leave;
+
+ new = &new_node->first;
+ }
+ }
+
+ /* Verify that the new inserted node does not overlap the nodes
+ * which follow it.
+ */
+ if (new) {
+ struct eb64_node *next;
+ struct quic_arng_node *next_node;
+
+ while ((next = eb64_next(new))) {
+ next_node =
+ eb64_entry(next, struct quic_arng_node, first);
+ if (new_node->last + 1 < next_node->first.key)
+ break;
+
+ if (next_node->last > new_node->last)
+ new_node->last = next_node->last;
+ eb64_delete(next);
+ pool_free(pool_head_quic_arng, next_node);
+ /* Decrement the size of these ranges. */
+ arngs->sz--;
+ }
+ }
+
+ ret = 1;
+ leave:
+ quic_arngs_set_enc_sz(qc, arngs);
+ TRACE_LEAVE(QUIC_EV_CONN_RXPKT, qc);
+ return ret;
+}
+
+/* Remove already sent ranges of acknowledged packet numbers from
+ * <pktns> packet number space tree below <largest_acked_pn> possibly
+ * updating the range which contains <largest_acked_pn>.
+ * Never fails.
+ */
+void qc_treat_ack_of_ack(struct quic_conn *qc, struct quic_arngs *arngs,
+ int64_t largest_acked_pn)
+{
+ struct eb64_node *ar, *next_ar;
+
+ TRACE_ENTER(QUIC_EV_CONN_PRSAFRM, qc);
+
+ ar = eb64_first(&arngs->root);
+ while (ar) {
+ struct quic_arng_node *ar_node;
+
+ next_ar = eb64_next(ar);
+ ar_node = eb64_entry(ar, struct quic_arng_node, first);
+
+ if ((int64_t)ar_node->first.key > largest_acked_pn) {
+ TRACE_DEVEL("first.key > largest", QUIC_EV_CONN_PRSAFRM, qc);
+ break;
+ }
+
+ if (largest_acked_pn < ar_node->last) {
+ eb64_delete(ar);
+ ar_node->first.key = largest_acked_pn + 1;
+ eb64_insert(&arngs->root, ar);
+ break;
+ }
+
+ /* Do not empty the tree: the first ACK range contains the
+ * largest acknowledged packet number.
+ */
+ if (arngs->sz == 1)
+ break;
+
+ eb64_delete(ar);
+ pool_free(pool_head_quic_arng, ar_node);
+ arngs->sz--;
+ ar = next_ar;
+ }
+
+ TRACE_LEAVE(QUIC_EV_CONN_PRSAFRM, qc);
+}
+
diff --git a/src/quic_cc.c b/src/quic_cc.c
new file mode 100644
index 0000000..8fd99d3
--- /dev/null
+++ b/src/quic_cc.c
@@ -0,0 +1,49 @@
+/*
+ * Congestion controller handling.
+ *
+ * This file contains definitions for QUIC congestion control.
+ *
+ * Copyright 2019 HAProxy Technologies, Frederic Lecaille <flecaille@haproxy.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <haproxy/quic_cc.h>
+
+struct quic_cc_algo *default_quic_cc_algo = &quic_cc_algo_cubic;
+
+/*
+ * Initialize <cc> congestion control with <algo> as algorithm depending on <ipv4>
+ * a boolean which is true for an IPv4 path.
+ */
+void quic_cc_init(struct quic_cc *cc,
+ struct quic_cc_algo *algo, struct quic_conn *qc)
+{
+ cc->qc = qc;
+ cc->algo = algo;
+ if (cc->algo->init)
+ (cc->algo->init(cc));
+}
+
+/* Send <ev> event to <cc> congestion controller. */
+void quic_cc_event(struct quic_cc *cc, struct quic_cc_event *ev)
+{
+ cc->algo->event(cc, ev);
+}
+
+void quic_cc_state_trace(struct buffer *buf, const struct quic_cc *cc)
+{
+ cc->algo->state_trace(buf, cc);
+}
diff --git a/src/quic_cc_cubic.c b/src/quic_cc_cubic.c
new file mode 100644
index 0000000..76a62ac
--- /dev/null
+++ b/src/quic_cc_cubic.c
@@ -0,0 +1,542 @@
+#include <haproxy/quic_cc.h>
+#include <haproxy/quic_trace.h>
+#include <haproxy/ticks.h>
+#include <haproxy/trace.h>
+
+/* IMPORTANT NOTE about the units defined by the RFC 9438
+ * (CUBIC for Fast and Long-Distance Networks):
+ *
+ * RFC 9438 4.1. Definitions:
+ * The unit of all window sizes in this document is segments of the SMSS, and
+ * the unit of all times is seconds. Implementations can use bytes to express
+ * window sizes, which would require factoring in the SMSS wherever necessary
+ * and replacing segments_acked (Figure 4) with the number of acknowledged
+ * bytes.
+ */
+
+/* So, this is the reason why here in this implementation each time a number
+ * of segments is used (typically a congestion window value), its value is
+ * multiplied by the MTU value.
+ */
+
+/* This source file is highly inspired from Linux kernel source file
+ * implementation for TCP Cubic. In fact, we have no choice if we do
+ * not want to use any floating point operations to be fast!
+ * (See net/ipv4/tcp_cubic.c)
+ */
+
+/* Constants definitions:
+ * CUBIC_BETA_SCALED refers to the scaled value of RFC 9438 beta_cubic variable.
+ * CUBIC_C_SCALED refers to the scaled value of RFC 9438 C variable.
+ */
+
+/* The right shifting value to apply to scaled values to get its real value. */
+#define CUBIC_SCALE_FACTOR_SHIFT 10
+
+/* CUBIC multiplicative decrease factor as described in RFC 9438 section 4.6 */
+#define CUBIC_BETA_SCALED 717 /* beta_cubic = 0.7 (constant) */
+
+/* CUBIC C constant that determines the aggressiveness of CUBIC in competing
+ * with other congestion control algorithms in high-BDP networks.
+ */
+#define CUBIC_C_SCALED 410 /* RFC 9438 C = 0.4 segment/seconds^3
+ * or 410 mB/s^3 in this implementation.
+ */
+
+/* The scaled value of 1 */
+#define CUBIC_ONE_SCALED (1 << CUBIC_SCALE_FACTOR_SHIFT)
+
+/* The maximum time value which may be cubed and multiplied by CUBIC_C_SCALED */
+#define CUBIC_TIME_LIMIT 355535ULL /* ms */
+
+/* By connection CUBIC algorithm state. Note that the current congestion window
+ * value is not stored in this structure.
+ */
+struct cubic {
+ /* QUIC_CC_ST_* state values. */
+ uint32_t state;
+ /* Slow start threshold (in bytes) */
+ uint32_t ssthresh;
+ /* Remaining number of acknowledged bytes between two ACK for CUBIC congestion
+ * control window (in bytes).
+ */
+ uint32_t remaining_inc;
+ /* Start time of at which the current avoidance stage started (in ms). */
+ uint32_t t_epoch;
+ /* The window to reach for each recovery period during a concave region (in bytes). */
+ uint32_t W_target;
+ /* The time period to reach W_target during a concave region (in ms). */
+ uint32_t K;
+ /* The last window maximum reached (in bytes). */
+ uint32_t last_w_max;
+ /* Estimated value of the Reno congestion window in the TCP-friendly region (in bytes). */
+ uint32_t W_est;
+ /* Remaining number of acknowledged bytes between two ACKs for estimated
+ * TCP-Reno congestion control window (in bytes).
+ */
+ uint32_t remaining_W_est_inc;
+ /* Start time of recovery period (used to avoid re-entering this state, if already
+ * in recovery period) (in ms).
+ */
+ uint32_t recovery_start_time;
+};
+
+static void quic_cc_cubic_reset(struct quic_cc *cc)
+{
+ struct cubic *c = quic_cc_priv(cc);
+
+ TRACE_ENTER(QUIC_EV_CONN_CC, cc->qc);
+ c->state = QUIC_CC_ST_SS;
+ c->ssthresh = QUIC_CC_INFINITE_SSTHESH;
+ c->remaining_inc = 0;
+ c->remaining_W_est_inc = 0;
+ c->t_epoch = 0;
+ c->W_target = 0;
+ c->K = 0;
+ c->last_w_max = 0;
+ c->W_est = 0;
+ c->recovery_start_time = 0;
+ TRACE_LEAVE(QUIC_EV_CONN_CC, cc->qc);
+}
+
+static int quic_cc_cubic_init(struct quic_cc *cc)
+{
+ quic_cc_cubic_reset(cc);
+ return 1;
+}
+
+/* Cubic root.
+ * Highly inspired from Linux kernel sources.
+ * See net/ipv4/tcp_cubic.c
+ */
+static uint32_t cubic_root(uint64_t val)
+{
+ uint32_t x, b, shift;
+
+ static const uint8_t v[] = {
+ 0, 54, 54, 54, 118, 118, 118, 118,
+ 123, 129, 134, 138, 143, 147, 151, 156,
+ 157, 161, 164, 168, 170, 173, 176, 179,
+ 181, 185, 187, 190, 192, 194, 197, 199,
+ 200, 202, 204, 206, 209, 211, 213, 215,
+ 217, 219, 221, 222, 224, 225, 227, 229,
+ 231, 232, 234, 236, 237, 239, 240, 242,
+ 244, 245, 246, 248, 250, 251, 252, 254,
+ };
+
+ if (!val || (b = my_flsl(val)) < 7) {
+ /* val in [0..63] */
+ return ((uint32_t)v[(uint32_t)val] + 35) >> 6;
+ }
+
+ b = ((b * 84) >> 8) - 1;
+ shift = (val >> (b * 3));
+
+ x = ((uint32_t)(((uint32_t)v[shift] + 10) << b)) >> 6;
+
+ x = 2 * x + (uint32_t)(val / ((uint64_t)x * (uint64_t)(x - 1)));
+ x = ((x * 341) >> 10);
+
+ return x;
+}
+
+/*
+ * RFC 9438 3.1. Principle 1 for the CUBIC Increase Function
+ *
+ * For better network utilization and stability, CUBIC [HRX08] uses a cubic
+ * window increase function in terms of the elapsed time from the last
+ * congestion event. While most congestion control algorithms that provide
+ * alternatives to Reno increase the congestion window using convex functions,
+ * CUBIC uses both the concave and convex profiles of a cubic function for
+ * window growth.
+ *
+ * After a window reduction in response to a congestion event detected by
+ * duplicate acknowledgments (ACKs), Explicit Congestion Notification-Echo
+ * (ECN-Echo (ECE)) ACKs [RFC3168], RACK-TLP for TCP [RFC8985], or QUIC loss
+ * detection [RFC9002], CUBIC remembers the congestion window size at which it
+ * received the congestion event and performs a multiplicative decrease of the
+ * congestion window. When CUBIC enters into congestion avoidance, it starts to
+ * increase the congestion window using the concave profile of the cubic
+ * function. The cubic function is set to have its plateau at the remembered
+ * congestion window size, so that the concave window increase continues until
+ * then. After that, the cubic function turns into a convex profile and the
+ * convex window increase begins.
+ *
+ * W_cubic(time) (bytes)
+ * ^ convex region
+ * | <------------------------->
+ * | . +
+ * | . +
+ * | . +
+ * | . +
+ * | . + ^
+ * | . + | W_cubic_t
+ * | . + |
+ * | . + |
+ * W_target |-----------+--------------------------+------------------------+
+ * (W_max) | +. + . t
+ * | + . + .
+ * | + . + .
+ * | + . + .
+ * | + . + .
+ * | .+ .
+ * | + .
+ * | + .
+ * | + .
+ * | . .
+ * | . .
+ * | . .
+ * +-----------+--------------------------+-+------------------------> time (s)
+ * 0 t_epoch (t_epoch + K)
+ * <-------------------------->
+ * . concave region
+ * .
+ * congestion
+ * event
+ *
+ * RFC 9438 4.2. Window Increase Function:
+ *
+ * W_cubic(t) = C*(t-K)^3 + W_max (Figure 1)
+ * K = cubic_root((W_max - cwnd_epoch)/C) (Figure 2)
+ *
+ * +--------------------------------------------------------------------+
+ * | RFC 9438 definitions | Code variables |
+ * +--------------------------------------------------------------------+
+ * | C (segments/s^3) | CUBIC_C_SCALED (mB/s^3) |
+ * +--------------------------------------------------------------------+
+ * | W_max (segments) | c->last_w_max - path->cwnd (bytes) |
+ * +--------------------------------------------------------------------+
+ * | K (s) | c->K (ms) |
+ * +--------------------------------------------------------------------+
+ * | beta_cubic (constant) | CUBIC_BETA_SCALED (constant) |
+ * +--------------------------------------------------------------------+
+ */
+static inline void quic_cubic_update(struct quic_cc *cc, uint32_t acked)
+{
+ struct cubic *c = quic_cc_priv(cc);
+ struct quic_cc_path *path = container_of(cc, struct quic_cc_path, cc);
+ /* The elapsed time since the start of the congestion event. */
+ uint32_t elapsed_time;
+ /* Target value of the congestion window. */
+ uint32_t target;
+ /* The time at which the congestion window will be computed based
+ * on the cubic increase function.
+ */
+ uint64_t t;
+ /* The computed value of the congestion window at time t based on the cubic
+ * increase function.
+ */
+ uint64_t W_cubic_t;
+ uint32_t inc, inc_diff;
+
+ TRACE_ENTER(QUIC_EV_CONN_CC, cc->qc);
+ if (!c->t_epoch) {
+ c->t_epoch = now_ms;
+ if (c->last_w_max <= path->cwnd) {
+ c->K = 0;
+ c->W_target = path->cwnd;
+ }
+ else {
+ /* K value computing (in seconds):
+ * K = cubic_root((W_max - cwnd_epoch)/C) (Figure 2)
+ * Note that K is stored in milliseconds.
+ */
+ c->K = cubic_root(((c->last_w_max - path->cwnd) << CUBIC_SCALE_FACTOR_SHIFT) / (CUBIC_C_SCALED * path->mtu));
+ /* Convert to miliseconds. */
+ c->K *= 1000;
+ c->W_target = c->last_w_max;
+ }
+
+ c->W_est = path->cwnd;
+ c->remaining_inc = 0;
+ c->remaining_W_est_inc = 0;
+ }
+
+ elapsed_time = now_ms + path->loss.rtt_min - c->t_epoch;
+ if (elapsed_time < c->K) {
+ t = c->K - elapsed_time;
+ }
+ else {
+ t = elapsed_time - c->K;
+ }
+
+ if (t > CUBIC_TIME_LIMIT) {
+ /* TODO : should not happen if we handle the case
+ * of very late acks receipt. This must be handled as a congestion
+ * control event: a very late ack should trigger a congestion
+ * control algorithm reset.
+ */
+ quic_cc_cubic_reset(cc);
+ goto leave;
+ }
+
+ /* Compute W_cubic_t at t time. */
+ W_cubic_t = CUBIC_C_SCALED * path->mtu;
+ W_cubic_t = (W_cubic_t * t) / 1000;
+ W_cubic_t = (W_cubic_t * t) / 1000;
+ W_cubic_t = (W_cubic_t * t) / 1000;
+ W_cubic_t >>= CUBIC_SCALE_FACTOR_SHIFT;
+ if (elapsed_time < c->K)
+ target = c->W_target - W_cubic_t;
+ else
+ target = c->W_target + W_cubic_t;
+
+ if (target > path->cwnd) {
+ /* Concave region */
+
+ /* RFC 9438 4.4. Concave Region
+ *
+ * When receiving a new ACK in congestion avoidance, if CUBIC is not in
+ * the Reno-friendly region and cwnd is less than Wmax, then CUBIC is
+ * in the concave region. In this region, cwnd MUST be incremented by
+ * (target - cwnd) / cwnd.
+ */
+ inc_diff = c->remaining_inc + path->mtu * (target - path->cwnd);
+ c->remaining_inc = inc_diff % path->cwnd;
+ inc = inc_diff / path->cwnd;
+ }
+ else {
+ /* Convex region: very small increment */
+
+ /* RFC 9438 4.5. Convex Region
+ *
+ * When receiving a new ACK in congestion avoidance, if CUBIC is not in
+ * the Reno-friendly region and cwnd is larger than or equal to Wmax,
+ * then CUBIC is in the convex region.The convex region indicates that
+ * the network conditions might have changed since the last congestion
+ * event, possibly implying more available bandwidth after some flow
+ * departures. Since the Internet is highly asynchronous, some amount
+ * of perturbation is always possible without causing a major change in
+ * available bandwidth.Unless the cwnd is overridden by the AIMD window
+ * increase, CUBIC will behave cautiously when operating in this region.
+ * The convex profile aims to increase the window very slowly at the
+ * beginning when cwnd is around Wmax and then gradually increases its
+ * rate of increase. This region is also called the "maximum probing
+ * phase", since CUBIC is searching for a new Wmax. In this region,
+ * cwnd MUST be incremented by (target - cwnd) / cwnd) for each received
+ * new ACK, where target is calculated as described in Section 4.2.
+ */
+ inc_diff = c->remaining_inc + path->mtu;
+ c->remaining_inc = inc_diff % (100 * path->cwnd);
+ inc = inc_diff / (100 * path->cwnd);
+ }
+
+ inc_diff = c->remaining_W_est_inc + path->mtu * acked;
+ c->W_est += inc_diff / path->cwnd;
+ c->remaining_W_est_inc = inc_diff % path->cwnd;
+
+ /* TCP friendliness :
+ * RFC 9438 4.3. Reno-Friendly Region
+ *
+ * Reno performs well in certain types of networks -- for example, under
+ * short RTTs and small bandwidths (or small BDPs). In these networks,
+ * CUBIC remains in the Reno-friendly region to achieve at least the same
+ * throughput as Reno.
+ *
+ * When receiving a new ACK in congestion avoidance (where cwnd could be
+ * greater than or less than Wmax), CUBIC checks whether Wcubic(t) is less
+ * than West. If so, CUBIC is in the Reno-friendly region and cwnd SHOULD
+ * be set to West at each reception of a new ACK.
+ *
+ * West is set equal to cwnd_epoch at the start of the congestion avoidance
+ * stage. After that, on every new ACK, West is updated using Figure 4.
+ * Note that this equation uses segments_acked and cwnd is measured in
+ * segments. An implementation that measures cwnd in bytes should adjust the
+ * equation accordingly using the number of acknowledged bytes and the SMSS.
+ * Also note that this equation works for connections with enabled or
+ * disabled delayed ACKs [RFC5681], as segments_acked will be different based
+ * on the segments actually acknowledged by a new ACK.
+ *
+ * Figure 4 : West = West + alpha_cubic * (segments_acked / cwnd)
+ *
+ * Once West has grown to reach the cwnd at the time of most recently
+ * setting ssthresh -- that is, West >= cwndprior -- the sender SHOULD set
+ * alpha_cubic to 1 to ensure that it can achieve the same congestion window
+ * increment rate as Reno, which uses AIMD(1, 0.5).
+ */
+ if (c->W_est > path->cwnd) {
+ uint32_t W_est_inc = path->mtu * (c->W_est - path->cwnd) / path->cwnd;
+ if (W_est_inc > inc)
+ inc = W_est_inc;
+ }
+
+ path->cwnd += inc;
+ path->cwnd = QUIC_MIN(path->max_cwnd, path->cwnd);
+ path->mcwnd = QUIC_MAX(path->cwnd, path->mcwnd);
+ leave:
+ TRACE_LEAVE(QUIC_EV_CONN_CC, cc->qc);
+}
+
+static void quic_cc_cubic_slow_start(struct quic_cc *cc)
+{
+ TRACE_ENTER(QUIC_EV_CONN_CC, cc->qc);
+ quic_cc_cubic_reset(cc);
+ TRACE_LEAVE(QUIC_EV_CONN_CC, cc->qc);
+}
+
+static void quic_enter_recovery(struct quic_cc *cc)
+{
+ struct quic_cc_path *path = container_of(cc, struct quic_cc_path, cc);
+ struct cubic *c = quic_cc_priv(cc);
+ /* Current cwnd as number of packets */
+
+ TRACE_ENTER(QUIC_EV_CONN_CC, cc->qc);
+ c->t_epoch = 0;
+ c->recovery_start_time = now_ms;
+
+ /* RFC 9438 4.7. Fast Convergence
+ *
+ * To improve convergence speed, CUBIC uses a heuristic. When a new flow
+ * joins the network, existing flows need to give up some of their bandwidth
+ * to allow the new flow some room for growth if the existing flows have
+ * been using all the network bandwidth. To speed up this bandwidth release
+ * by existing flows, the following fast convergence mechanism SHOULD be
+ * implemented.With fast convergence, when a congestion event occurs, Wmax
+ * is updated as follows, before the window reduction described in Section
+ * 4.6.
+ *
+ * if cwnd < Wmax and fast convergence enabled, further reduce Wax:
+ * Wmax = cwnd * (1 + beta_cubic)
+ * otherwise, remember cwn before reduction:
+ * Wmax = cwnd
+ */
+ if (path->cwnd < c->last_w_max) {
+ /* (1 + beta_cubic) * path->cwnd / 2 */
+ c->last_w_max = (path->cwnd * (CUBIC_ONE_SCALED + CUBIC_BETA_SCALED) / 2) >> CUBIC_SCALE_FACTOR_SHIFT;
+ }
+ else {
+ c->last_w_max = path->cwnd;
+ }
+
+ c->ssthresh = (CUBIC_BETA_SCALED * path->cwnd) >> CUBIC_SCALE_FACTOR_SHIFT;
+ path->cwnd = QUIC_MAX(c->ssthresh, (uint32_t)path->min_cwnd);
+ c->state = QUIC_CC_ST_RP;
+ TRACE_LEAVE(QUIC_EV_CONN_CC, cc->qc, NULL, cc);
+}
+
+/* Congestion slow-start callback. */
+static void quic_cc_cubic_ss_cb(struct quic_cc *cc, struct quic_cc_event *ev)
+{
+ struct quic_cc_path *path = container_of(cc, struct quic_cc_path, cc);
+ struct cubic *c = quic_cc_priv(cc);
+
+ TRACE_ENTER(QUIC_EV_CONN_CC, cc->qc);
+ TRACE_PROTO("CC cubic", QUIC_EV_CONN_CC, cc->qc, ev);
+ switch (ev->type) {
+ case QUIC_CC_EVT_ACK:
+ if (path->cwnd < QUIC_CC_INFINITE_SSTHESH - ev->ack.acked) {
+ path->cwnd += ev->ack.acked;
+ path->cwnd = QUIC_MIN(path->max_cwnd, path->cwnd);
+ }
+ /* Exit to congestion avoidance if slow start threshold is reached. */
+ if (path->cwnd >= c->ssthresh)
+ c->state = QUIC_CC_ST_CA;
+ path->mcwnd = QUIC_MAX(path->cwnd, path->mcwnd);
+ break;
+
+ case QUIC_CC_EVT_LOSS:
+ quic_enter_recovery(cc);
+ break;
+
+ case QUIC_CC_EVT_ECN_CE:
+ /* TODO */
+ break;
+ }
+
+ out:
+ TRACE_PROTO("CC cubic", QUIC_EV_CONN_CC, cc->qc, NULL, cc);
+ TRACE_LEAVE(QUIC_EV_CONN_CC, cc->qc);
+}
+
+/* Congestion avoidance callback. */
+static void quic_cc_cubic_ca_cb(struct quic_cc *cc, struct quic_cc_event *ev)
+{
+ TRACE_ENTER(QUIC_EV_CONN_CC, cc->qc);
+ TRACE_PROTO("CC cubic", QUIC_EV_CONN_CC, cc->qc, ev);
+ switch (ev->type) {
+ case QUIC_CC_EVT_ACK:
+ quic_cubic_update(cc, ev->ack.acked);
+ break;
+ case QUIC_CC_EVT_LOSS:
+ quic_enter_recovery(cc);
+ break;
+ case QUIC_CC_EVT_ECN_CE:
+ /* TODO */
+ break;
+ }
+
+ out:
+ TRACE_PROTO("CC cubic", QUIC_EV_CONN_CC, cc->qc, NULL, cc);
+ TRACE_LEAVE(QUIC_EV_CONN_CC, cc->qc);
+}
+
+/* Recovery period callback */
+static void quic_cc_cubic_rp_cb(struct quic_cc *cc, struct quic_cc_event *ev)
+{
+ struct cubic *c = quic_cc_priv(cc);
+
+ TRACE_ENTER(QUIC_EV_CONN_CC, cc->qc, ev);
+ TRACE_PROTO("CC cubic", QUIC_EV_CONN_CC, cc->qc, ev, cc);
+
+ switch (ev->type) {
+ case QUIC_CC_EVT_ACK:
+ /* RFC 9002 7.3.2. Recovery
+ * A recovery period ends and the sender enters congestion avoidance when a
+ * packet sent during the recovery period is acknowledged.
+ */
+ if (tick_is_le(ev->ack.time_sent, c->recovery_start_time)) {
+ TRACE_PROTO("CC cubic (still in recov. period)", QUIC_EV_CONN_CC, cc->qc);
+ goto leave;
+ }
+
+ c->state = QUIC_CC_ST_CA;
+ c->recovery_start_time = TICK_ETERNITY;
+ break;
+ case QUIC_CC_EVT_LOSS:
+ break;
+ case QUIC_CC_EVT_ECN_CE:
+ /* TODO */
+ break;
+ }
+
+ leave:
+ TRACE_PROTO("CC cubic", QUIC_EV_CONN_CC, cc->qc, NULL, cc);
+ TRACE_LEAVE(QUIC_EV_CONN_CC, cc->qc, NULL, cc);
+}
+
+static void (*quic_cc_cubic_state_cbs[])(struct quic_cc *cc,
+ struct quic_cc_event *ev) = {
+ [QUIC_CC_ST_SS] = quic_cc_cubic_ss_cb,
+ [QUIC_CC_ST_CA] = quic_cc_cubic_ca_cb,
+ [QUIC_CC_ST_RP] = quic_cc_cubic_rp_cb,
+};
+
+static void quic_cc_cubic_event(struct quic_cc *cc, struct quic_cc_event *ev)
+{
+ struct cubic *c = quic_cc_priv(cc);
+
+ return quic_cc_cubic_state_cbs[c->state](cc, ev);
+}
+
+static void quic_cc_cubic_state_trace(struct buffer *buf, const struct quic_cc *cc)
+{
+ struct quic_cc_path *path;
+ struct cubic *c = quic_cc_priv(cc);
+
+ path = container_of(cc, struct quic_cc_path, cc);
+ chunk_appendf(buf, " state=%s cwnd=%llu mcwnd=%llu ssthresh=%d rpst=%dms",
+ quic_cc_state_str(c->state),
+ (unsigned long long)path->cwnd,
+ (unsigned long long)path->mcwnd,
+ (int)c->ssthresh,
+ !tick_isset(c->recovery_start_time) ? -1 :
+ TICKS_TO_MS(tick_remain(c->recovery_start_time, now_ms)));
+}
+
+struct quic_cc_algo quic_cc_algo_cubic = {
+ .type = QUIC_CC_ALGO_TP_CUBIC,
+ .init = quic_cc_cubic_init,
+ .event = quic_cc_cubic_event,
+ .slow_start = quic_cc_cubic_slow_start,
+ .state_trace = quic_cc_cubic_state_trace,
+};
diff --git a/src/quic_cc_newreno.c b/src/quic_cc_newreno.c
new file mode 100644
index 0000000..405b0ba
--- /dev/null
+++ b/src/quic_cc_newreno.c
@@ -0,0 +1,220 @@
+/*
+ * NewReno congestion control algorithm.
+ *
+ * This file contains definitions for QUIC congestion control.
+ *
+ * Copyright 2019 HAProxy Technologies, Frederic Lecaille <flecaille@haproxy.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <haproxy/api-t.h>
+#include <haproxy/buf.h>
+#include <haproxy/chunk.h>
+#include <haproxy/quic_cc.h>
+#include <haproxy/quic_conn-t.h>
+#include <haproxy/quic_trace.h>
+#include <haproxy/trace.h>
+
+/* Newreno state */
+struct nr {
+ uint32_t state;
+ uint32_t ssthresh;
+ uint32_t recovery_start_time;
+ uint32_t remain_acked;
+};
+
+static int quic_cc_nr_init(struct quic_cc *cc)
+{
+ struct nr *nr = quic_cc_priv(cc);
+
+ nr->state = QUIC_CC_ST_SS;
+ nr->ssthresh = QUIC_CC_INFINITE_SSTHESH;
+ nr->recovery_start_time = 0;
+ nr->remain_acked = 0;
+
+ return 1;
+}
+
+/* Re-enter slow start state. */
+static void quic_cc_nr_slow_start(struct quic_cc *cc)
+{
+ struct quic_cc_path *path;
+ struct nr *nr = quic_cc_priv(cc);
+
+ path = container_of(cc, struct quic_cc_path, cc);
+ path->cwnd = path->min_cwnd;
+ /* Re-entering slow start state. */
+ nr->state = QUIC_CC_ST_SS;
+ /* Recovery start time reset */
+ nr->recovery_start_time = 0;
+}
+
+/* Enter a recovery period. */
+static void quic_cc_nr_enter_recovery(struct quic_cc *cc)
+{
+ struct quic_cc_path *path;
+ struct nr *nr = quic_cc_priv(cc);
+
+ path = container_of(cc, struct quic_cc_path, cc);
+ nr->recovery_start_time = now_ms;
+ nr->ssthresh = path->cwnd >> 1;
+ path->cwnd = QUIC_MAX(nr->ssthresh, (uint32_t)path->min_cwnd);
+ nr->state = QUIC_CC_ST_RP;
+}
+
+/* Slow start callback. */
+static void quic_cc_nr_ss_cb(struct quic_cc *cc, struct quic_cc_event *ev)
+{
+ struct quic_cc_path *path;
+ struct nr *nr = quic_cc_priv(cc);
+
+ TRACE_ENTER(QUIC_EV_CONN_CC, cc->qc);
+ TRACE_PROTO("CC reno", QUIC_EV_CONN_CC, cc->qc, ev);
+ path = container_of(cc, struct quic_cc_path, cc);
+ switch (ev->type) {
+ case QUIC_CC_EVT_ACK:
+ path->cwnd += ev->ack.acked;
+ path->cwnd = QUIC_MIN(path->max_cwnd, path->cwnd);
+ path->mcwnd = QUIC_MAX(path->cwnd, path->mcwnd);
+ /* Exit to congestion avoidance if slow start threshold is reached. */
+ if (path->cwnd > nr->ssthresh)
+ nr->state = QUIC_CC_ST_CA;
+ break;
+
+ case QUIC_CC_EVT_LOSS:
+ quic_cc_nr_enter_recovery(cc);
+ break;
+
+ case QUIC_CC_EVT_ECN_CE:
+ /* XXX TO DO XXX */
+ break;
+ }
+ TRACE_PROTO("CC reno", QUIC_EV_CONN_CC, cc->qc, NULL, cc);
+ TRACE_LEAVE(QUIC_EV_CONN_CC, cc->qc);
+}
+
+/* Congestion avoidance callback. */
+static void quic_cc_nr_ca_cb(struct quic_cc *cc, struct quic_cc_event *ev)
+{
+ struct quic_cc_path *path;
+ struct nr *nr = quic_cc_priv(cc);
+
+ TRACE_ENTER(QUIC_EV_CONN_CC, cc->qc);
+ TRACE_PROTO("CC reno", QUIC_EV_CONN_CC, cc->qc, ev);
+ path = container_of(cc, struct quic_cc_path, cc);
+ switch (ev->type) {
+ case QUIC_CC_EVT_ACK:
+ {
+ uint64_t acked;
+
+ /* Increasing the congestion window by (acked / cwnd)
+ */
+ acked = ev->ack.acked * path->mtu + nr->remain_acked;
+ nr->remain_acked = acked % path->cwnd;
+ path->cwnd += acked / path->cwnd;
+ path->cwnd = QUIC_MIN(path->max_cwnd, path->cwnd);
+ path->mcwnd = QUIC_MAX(path->cwnd, path->mcwnd);
+ break;
+ }
+
+ case QUIC_CC_EVT_LOSS:
+ quic_cc_nr_enter_recovery(cc);
+ break;
+
+ case QUIC_CC_EVT_ECN_CE:
+ /* XXX TO DO XXX */
+ break;
+ }
+
+ out:
+ TRACE_PROTO("CC reno", QUIC_EV_CONN_CC, cc->qc, NULL, cc);
+ TRACE_LEAVE(QUIC_EV_CONN_CC, cc->qc);
+}
+
+/* Recovery period callback. */
+static void quic_cc_nr_rp_cb(struct quic_cc *cc, struct quic_cc_event *ev)
+{
+ struct quic_cc_path *path;
+ struct nr *nr = quic_cc_priv(cc);
+
+ TRACE_ENTER(QUIC_EV_CONN_CC, cc->qc);
+ TRACE_PROTO("CC reno", QUIC_EV_CONN_CC, cc->qc, ev);
+ path = container_of(cc, struct quic_cc_path, cc);
+ switch (ev->type) {
+ case QUIC_CC_EVT_ACK:
+ /* RFC 9022 7.3.2. Recovery
+ * A recovery period ends and the sender enters congestion avoidance when a
+ * packet sent during the recovery period is acknowledged.
+ */
+ if (tick_is_le(ev->ack.time_sent, nr->recovery_start_time)) {
+ TRACE_PROTO("CC reno (still in recovery period)", QUIC_EV_CONN_CC, cc->qc, ev);
+ goto leave;
+ }
+
+ nr->state = QUIC_CC_ST_CA;
+ nr->recovery_start_time = TICK_ETERNITY;
+ path->cwnd = nr->ssthresh;
+ break;
+ case QUIC_CC_EVT_LOSS:
+ /* Do nothing */
+ break;
+ case QUIC_CC_EVT_ECN_CE:
+ /* XXX TO DO XXX */
+ break;
+ }
+
+ leave:
+ TRACE_PROTO("CC reno", QUIC_EV_CONN_CC, cc->qc, ev);
+ TRACE_ENTER(QUIC_EV_CONN_CC, cc->qc, ev);
+}
+static void quic_cc_nr_state_trace(struct buffer *buf, const struct quic_cc *cc)
+{
+ struct quic_cc_path *path;
+ struct nr *nr = quic_cc_priv(cc);
+
+ path = container_of(cc, struct quic_cc_path, cc);
+ chunk_appendf(buf, " state=%s cwnd=%llu mcwnd=%llu ssthresh=%ld rpst=%dms pktloss=%llu",
+ quic_cc_state_str(nr->state),
+ (unsigned long long)path->cwnd,
+ (unsigned long long)path->mcwnd,
+ (long)nr->ssthresh,
+ !tick_isset(nr->recovery_start_time) ? -1 :
+ TICKS_TO_MS(tick_remain(nr->recovery_start_time, now_ms)),
+ (unsigned long long)path->loss.nb_lost_pkt);
+}
+
+static void (*quic_cc_nr_state_cbs[])(struct quic_cc *cc,
+ struct quic_cc_event *ev) = {
+ [QUIC_CC_ST_SS] = quic_cc_nr_ss_cb,
+ [QUIC_CC_ST_CA] = quic_cc_nr_ca_cb,
+ [QUIC_CC_ST_RP] = quic_cc_nr_rp_cb,
+};
+
+static void quic_cc_nr_event(struct quic_cc *cc, struct quic_cc_event *ev)
+{
+ struct nr *nr = quic_cc_priv(cc);
+
+ return quic_cc_nr_state_cbs[nr->state](cc, ev);
+}
+
+struct quic_cc_algo quic_cc_algo_nr = {
+ .type = QUIC_CC_ALGO_TP_NEWRENO,
+ .init = quic_cc_nr_init,
+ .event = quic_cc_nr_event,
+ .slow_start = quic_cc_nr_slow_start,
+ .state_trace = quic_cc_nr_state_trace,
+};
+
diff --git a/src/quic_cc_nocc.c b/src/quic_cc_nocc.c
new file mode 100644
index 0000000..6e5cff9
--- /dev/null
+++ b/src/quic_cc_nocc.c
@@ -0,0 +1,76 @@
+/*
+ * Fake congestion control algorithm which does nothing except initializing
+ * the congestion control window to a fixed value.
+ *
+ */
+
+#include <haproxy/api-t.h>
+#include <haproxy/quic_conn-t.h>
+#include <haproxy/quic_trace.h>
+#include <haproxy/trace.h>
+
+static int quic_cc_nocc_init(struct quic_cc *cc)
+{
+ struct quic_cc_path *path;
+
+ path = container_of(cc, struct quic_cc_path, cc);
+ path->cwnd = path->max_cwnd;
+ return 1;
+}
+
+static void quic_cc_nocc_slow_start(struct quic_cc *cc)
+{
+}
+
+/* Slow start callback. */
+static void quic_cc_nocc_ss_cb(struct quic_cc *cc, struct quic_cc_event *ev)
+{
+ TRACE_ENTER(QUIC_EV_CONN_CC, cc->qc);
+ TRACE_PROTO("CC nocc", QUIC_EV_CONN_CC, cc->qc, ev, cc);
+ TRACE_LEAVE(QUIC_EV_CONN_CC, cc->qc);
+}
+
+/* Congestion avoidance callback. */
+static void quic_cc_nocc_ca_cb(struct quic_cc *cc, struct quic_cc_event *ev)
+{
+ TRACE_ENTER(QUIC_EV_CONN_CC, cc->qc);
+ TRACE_PROTO("CC nocc", QUIC_EV_CONN_CC, cc->qc, ev, cc);
+ TRACE_LEAVE(QUIC_EV_CONN_CC, cc->qc);
+}
+
+/* Recovery period callback. */
+static void quic_cc_nocc_rp_cb(struct quic_cc *cc, struct quic_cc_event *ev)
+{
+ TRACE_ENTER(QUIC_EV_CONN_CC, cc->qc);
+ TRACE_PROTO("CC nocc", QUIC_EV_CONN_CC, cc->qc, ev, cc);
+ TRACE_LEAVE(QUIC_EV_CONN_CC, cc->qc);
+}
+
+static void quic_cc_nocc_state_trace(struct buffer *buf, const struct quic_cc *cc)
+{
+ struct quic_cc_path *path;
+
+ path = container_of(cc, struct quic_cc_path, cc);
+ chunk_appendf(buf, " cwnd=%llu", (unsigned long long)path->cwnd);
+}
+
+static void (*quic_cc_nocc_state_cbs[])(struct quic_cc *cc,
+ struct quic_cc_event *ev) = {
+ [QUIC_CC_ST_SS] = quic_cc_nocc_ss_cb,
+ [QUIC_CC_ST_CA] = quic_cc_nocc_ca_cb,
+ [QUIC_CC_ST_RP] = quic_cc_nocc_rp_cb,
+};
+
+static void quic_cc_nocc_event(struct quic_cc *cc, struct quic_cc_event *ev)
+{
+ return quic_cc_nocc_state_cbs[QUIC_CC_ST_SS](cc, ev);
+}
+
+struct quic_cc_algo quic_cc_algo_nocc = {
+ .type = QUIC_CC_ALGO_TP_NOCC,
+ .init = quic_cc_nocc_init,
+ .event = quic_cc_nocc_event,
+ .slow_start = quic_cc_nocc_slow_start,
+ .state_trace = quic_cc_nocc_state_trace,
+};
+
diff --git a/src/quic_cid.c b/src/quic_cid.c
new file mode 100644
index 0000000..19c1f07
--- /dev/null
+++ b/src/quic_cid.c
@@ -0,0 +1,286 @@
+#include <import/eb64tree.h>
+#include <import/ebmbtree.h>
+
+#include <haproxy/pool.h>
+#include <haproxy/quic_cid.h>
+#include <haproxy/quic_conn.h>
+#include <haproxy/quic_rx-t.h>
+#include <haproxy/quic_trace.h>
+#include <haproxy/trace.h>
+#include <haproxy/xxhash.h>
+
+/* Initialize the stateless reset token attached to <conn_id> connection ID.
+ * Returns 1 if succeeded, 0 if not.
+ */
+static int quic_stateless_reset_token_init(struct quic_connection_id *conn_id)
+{
+ /* Output secret */
+ unsigned char *token = conn_id->stateless_reset_token;
+ size_t tokenlen = sizeof conn_id->stateless_reset_token;
+ /* Salt */
+ const unsigned char *cid = conn_id->cid.data;
+ size_t cidlen = conn_id->cid.len;
+
+ return quic_stateless_reset_token_cpy(token, tokenlen, cid, cidlen);
+}
+
+/* Generate a CID directly derived from <orig> CID and <addr> address.
+ *
+ * Returns the derived CID.
+ */
+struct quic_cid quic_derive_cid(const struct quic_cid *orig,
+ const struct sockaddr_storage *addr)
+{
+ struct quic_cid cid;
+ const struct sockaddr_in *in;
+ const struct sockaddr_in6 *in6;
+ char *pos = trash.area;
+ size_t idx = 0;
+ uint64_t hash;
+ int i;
+
+ /* Prepare buffer for hash using original CID first. */
+ memcpy(pos, orig->data, orig->len);
+ idx += orig->len;
+
+ /* Concatenate client address. */
+ switch (addr->ss_family) {
+ case AF_INET:
+ in = (struct sockaddr_in *)addr;
+
+ memcpy(&pos[idx], &in->sin_addr, sizeof(in->sin_addr));
+ idx += sizeof(in->sin_addr);
+ memcpy(&pos[idx], &in->sin_port, sizeof(in->sin_port));
+ idx += sizeof(in->sin_port);
+ break;
+
+ case AF_INET6:
+ in6 = (struct sockaddr_in6 *)addr;
+
+ memcpy(&pos[idx], &in6->sin6_addr, sizeof(in6->sin6_addr));
+ idx += sizeof(in6->sin6_addr);
+ memcpy(&pos[idx], &in6->sin6_port, sizeof(in6->sin6_port));
+ idx += sizeof(in6->sin6_port);
+ break;
+
+ default:
+ /* TODO to implement */
+ ABORT_NOW();
+ }
+
+ /* Avoid similar values between multiple haproxy process. */
+ memcpy(&pos[idx], boot_seed, sizeof(boot_seed));
+ idx += sizeof(boot_seed);
+
+ /* Hash the final buffer content. */
+ hash = XXH64(pos, idx, 0);
+
+ for (i = 0; i < sizeof(hash); ++i)
+ cid.data[i] = hash >> ((sizeof(hash) * 7) - (8 * i));
+ cid.len = sizeof(hash);
+
+ return cid;
+}
+
+/* Allocate a new CID and attach it to <root> ebtree.
+ *
+ * If <orig> and <addr> params are non null, the new CID value is directly
+ * derived from them. Else a random value is generated. The CID is then marked
+ * with the current thread ID.
+ *
+ * Returns the new CID if succeeded, NULL if not.
+ */
+struct quic_connection_id *new_quic_cid(struct eb_root *root,
+ struct quic_conn *qc,
+ const struct quic_cid *orig,
+ const struct sockaddr_storage *addr)
+{
+ struct quic_connection_id *conn_id;
+
+ TRACE_ENTER(QUIC_EV_CONN_TXPKT, qc);
+
+ /* Caller must set either none or both values. */
+ BUG_ON(!!orig != !!addr);
+
+ conn_id = pool_alloc(pool_head_quic_connection_id);
+ if (!conn_id) {
+ TRACE_ERROR("cid allocation failed", QUIC_EV_CONN_TXPKT, qc);
+ goto err;
+ }
+
+ conn_id->cid.len = QUIC_HAP_CID_LEN;
+
+ if (!orig) {
+ if (quic_newcid_from_hash64)
+ quic_newcid_from_hash64(conn_id->cid.data, conn_id->cid.len, qc->hash64,
+ global.cluster_secret, sizeof(global.cluster_secret));
+ else if (RAND_bytes(conn_id->cid.data, conn_id->cid.len) != 1) {
+ /* TODO: RAND_bytes() should be replaced */
+ TRACE_ERROR("RAND_bytes() failed", QUIC_EV_CONN_TXPKT, qc);
+ goto err;
+ }
+ }
+ else {
+ /* Derive the new CID value from original CID. */
+ conn_id->cid = quic_derive_cid(orig, addr);
+ }
+
+ if (quic_stateless_reset_token_init(conn_id) != 1) {
+ TRACE_ERROR("quic_stateless_reset_token_init() failed", QUIC_EV_CONN_TXPKT, qc);
+ goto err;
+ }
+
+ conn_id->qc = qc;
+ HA_ATOMIC_STORE(&conn_id->tid, tid);
+
+ conn_id->seq_num.key = qc ? qc->next_cid_seq_num++ : 0;
+ conn_id->retire_prior_to = 0;
+ /* insert the allocated CID in the quic_conn tree */
+ if (root)
+ eb64_insert(root, &conn_id->seq_num);
+
+ TRACE_LEAVE(QUIC_EV_CONN_TXPKT, qc);
+ return conn_id;
+
+ err:
+ pool_free(pool_head_quic_connection_id, conn_id);
+ TRACE_LEAVE(QUIC_EV_CONN_TXPKT, qc);
+ return NULL;
+}
+
+/* Retrieve the thread ID associated to QUIC connection ID <cid> of length
+ * <cid_len>. CID may be not found on the CID tree because it is an ODCID. In
+ * this case, it will derived using client address <cli_addr> as hash
+ * parameter. However, this is done only if <pos> points to an INITIAL or 0RTT
+ * packet of length <len>.
+ *
+ * Returns the thread ID or a negative error code.
+ */
+int quic_get_cid_tid(const unsigned char *cid, size_t cid_len,
+ const struct sockaddr_storage *cli_addr,
+ unsigned char *pos, size_t len)
+{
+ struct quic_cid_tree *tree;
+ struct quic_connection_id *conn_id;
+ struct ebmb_node *node;
+
+ tree = &quic_cid_trees[_quic_cid_tree_idx(cid)];
+ HA_RWLOCK_RDLOCK(QC_CID_LOCK, &tree->lock);
+ node = ebmb_lookup(&tree->root, cid, cid_len);
+ HA_RWLOCK_RDUNLOCK(QC_CID_LOCK, &tree->lock);
+
+ if (!node) {
+ struct quic_cid orig, derive_cid;
+ struct quic_rx_packet pkt;
+
+ if (!qc_parse_hd_form(&pkt, &pos, pos + len))
+ goto not_found;
+
+ if (pkt.type != QUIC_PACKET_TYPE_INITIAL &&
+ pkt.type != QUIC_PACKET_TYPE_0RTT) {
+ goto not_found;
+ }
+
+ memcpy(orig.data, cid, cid_len);
+ orig.len = cid_len;
+ derive_cid = quic_derive_cid(&orig, cli_addr);
+
+ tree = &quic_cid_trees[quic_cid_tree_idx(&derive_cid)];
+ HA_RWLOCK_RDLOCK(QC_CID_LOCK, &tree->lock);
+ node = ebmb_lookup(&tree->root, cid, cid_len);
+ HA_RWLOCK_RDUNLOCK(QC_CID_LOCK, &tree->lock);
+ }
+
+ if (!node)
+ goto not_found;
+
+ conn_id = ebmb_entry(node, struct quic_connection_id, node);
+ return HA_ATOMIC_LOAD(&conn_id->tid);
+
+ not_found:
+ return -1;
+}
+
+/* Retrieve a quic_conn instance from the <pkt> DCID field. If the packet is an
+ * INITIAL or 0RTT type, we may have to use client address <saddr> if an ODCID
+ * is used.
+ *
+ * Returns the instance or NULL if not found.
+ */
+struct quic_conn *retrieve_qc_conn_from_cid(struct quic_rx_packet *pkt,
+ struct sockaddr_storage *saddr,
+ int *new_tid)
+{
+ struct quic_conn *qc = NULL;
+ struct ebmb_node *node;
+ struct quic_connection_id *conn_id;
+ struct quic_cid_tree *tree;
+ uint conn_id_tid;
+
+ TRACE_ENTER(QUIC_EV_CONN_RXPKT);
+ *new_tid = -1;
+
+ /* First look into DCID tree. */
+ tree = &quic_cid_trees[_quic_cid_tree_idx(pkt->dcid.data)];
+ HA_RWLOCK_RDLOCK(QC_CID_LOCK, &tree->lock);
+ node = ebmb_lookup(&tree->root, pkt->dcid.data, pkt->dcid.len);
+
+ /* If not found on an Initial/0-RTT packet, it could be because an
+ * ODCID is reused by the client. Calculate the derived CID value to
+ * retrieve it from the DCID tree.
+ */
+ if (!node && (pkt->type == QUIC_PACKET_TYPE_INITIAL ||
+ pkt->type == QUIC_PACKET_TYPE_0RTT)) {
+ const struct quic_cid derive_cid = quic_derive_cid(&pkt->dcid, saddr);
+
+ HA_RWLOCK_RDUNLOCK(QC_CID_LOCK, &tree->lock);
+
+ tree = &quic_cid_trees[quic_cid_tree_idx(&derive_cid)];
+ HA_RWLOCK_RDLOCK(QC_CID_LOCK, &tree->lock);
+ node = ebmb_lookup(&tree->root, derive_cid.data, derive_cid.len);
+ }
+
+ if (!node)
+ goto end;
+
+ conn_id = ebmb_entry(node, struct quic_connection_id, node);
+ conn_id_tid = HA_ATOMIC_LOAD(&conn_id->tid);
+ if (conn_id_tid != tid) {
+ *new_tid = conn_id_tid;
+ goto end;
+ }
+ qc = conn_id->qc;
+
+ end:
+ HA_RWLOCK_RDUNLOCK(QC_CID_LOCK, &tree->lock);
+ TRACE_LEAVE(QUIC_EV_CONN_RXPKT, qc);
+ return qc;
+}
+
+/* Build a NEW_CONNECTION_ID frame for <conn_id> CID of <qc> connection.
+ *
+ * Returns 1 on success else 0.
+ */
+int qc_build_new_connection_id_frm(struct quic_conn *qc,
+ struct quic_connection_id *conn_id)
+{
+ int ret = 0;
+ struct quic_frame *frm;
+ struct quic_enc_level *qel;
+
+ TRACE_ENTER(QUIC_EV_CONN_PRSHPKT, qc);
+
+ qel = qc->ael;
+ frm = qc_frm_alloc(QUIC_FT_NEW_CONNECTION_ID);
+ if (!frm) {
+ TRACE_ERROR("frame allocation error", QUIC_EV_CONN_IO_CB, qc);
+ goto leave;
+ }
+
+ quic_connection_id_to_frm_cpy(frm, conn_id);
+ LIST_APPEND(&qel->pktns->tx.frms, &frm->list);
+ ret = 1;
+ leave:
+ TRACE_LEAVE(QUIC_EV_CONN_PRSHPKT, qc);
+ return ret;
+}
diff --git a/src/quic_cli.c b/src/quic_cli.c
new file mode 100644
index 0000000..56301fa
--- /dev/null
+++ b/src/quic_cli.c
@@ -0,0 +1,413 @@
+#include <import/eb64tree.h>
+
+#include <haproxy/applet-t.h>
+#include <haproxy/cli.h>
+#include <haproxy/list.h>
+#include <haproxy/tools.h>
+#include <haproxy/quic_conn-t.h>
+#include <haproxy/quic_tp.h>
+
+/* incremented by each "show quic". */
+unsigned int qc_epoch = 0;
+
+enum quic_dump_format {
+ QUIC_DUMP_FMT_ONELINE,
+ QUIC_DUMP_FMT_FULL,
+};
+
+/* appctx context used by "show quic" command */
+struct show_quic_ctx {
+ unsigned int epoch;
+ struct bref bref; /* back-reference to the quic-conn being dumped */
+ unsigned int thr;
+ int flags;
+ enum quic_dump_format format;
+};
+
+#define QC_CLI_FL_SHOW_ALL 0x1 /* show closing/draining connections */
+
+static int cli_parse_show_quic(char **args, char *payload, struct appctx *appctx, void *private)
+{
+ struct show_quic_ctx *ctx = applet_reserve_svcctx(appctx, sizeof(*ctx));
+ int argc = 2;
+
+ if (!cli_has_level(appctx, ACCESS_LVL_OPER))
+ return 1;
+
+ ctx->epoch = _HA_ATOMIC_FETCH_ADD(&qc_epoch, 1);
+ ctx->thr = 0;
+ ctx->flags = 0;
+ ctx->format = QUIC_DUMP_FMT_ONELINE;
+
+ if (strcmp(args[argc], "oneline") == 0) {
+ /* format already used as default value */
+ ++argc;
+ }
+ else if (strcmp(args[argc], "full") == 0) {
+ ctx->format = QUIC_DUMP_FMT_FULL;
+ ++argc;
+ }
+
+ while (*args[argc]) {
+ if (strcmp(args[argc], "all") == 0)
+ ctx->flags |= QC_CLI_FL_SHOW_ALL;
+
+ ++argc;
+ }
+
+ LIST_INIT(&ctx->bref.users);
+
+ return 0;
+}
+
+/* Dump for "show quic" with "oneline" format. */
+static void dump_quic_oneline(struct show_quic_ctx *ctx, struct quic_conn *qc)
+{
+ char bufaddr[INET6_ADDRSTRLEN], bufport[6];
+ int ret;
+ unsigned char cid_len;
+
+ ret = chunk_appendf(&trash, "%p[%02u]/%-.12s ", qc, ctx->thr,
+ qc->li->bind_conf->frontend->id);
+ chunk_appendf(&trash, "%*s", 36 - ret, " "); /* align output */
+
+ /* State */
+ if (qc->flags & QUIC_FL_CONN_CLOSING)
+ chunk_appendf(&trash, "CLOSE ");
+ else if (qc->flags & QUIC_FL_CONN_DRAINING)
+ chunk_appendf(&trash, "DRAIN ");
+ else if (qc->state < QUIC_HS_ST_COMPLETE)
+ chunk_appendf(&trash, "HDSHK ");
+ else
+ chunk_appendf(&trash, "ESTAB ");
+
+ /* Bytes in flight / Lost packets */
+ chunk_appendf(&trash, "%9llu %6llu %6llu ",
+ (ullong)qc->path->in_flight,
+ (ullong)qc->path->ifae_pkts,
+ (ullong)qc->path->loss.nb_lost_pkt);
+
+ /* Socket */
+ if (qc->local_addr.ss_family == AF_INET ||
+ qc->local_addr.ss_family == AF_INET6) {
+ addr_to_str(&qc->local_addr, bufaddr, sizeof(bufaddr));
+ port_to_str(&qc->local_addr, bufport, sizeof(bufport));
+ chunk_appendf(&trash, "%15s:%-5s ", bufaddr, bufport);
+
+ addr_to_str(&qc->peer_addr, bufaddr, sizeof(bufaddr));
+ port_to_str(&qc->peer_addr, bufport, sizeof(bufport));
+ chunk_appendf(&trash, "%15s:%-5s ", bufaddr, bufport);
+
+ }
+
+ /* CIDs */
+ for (cid_len = 0; cid_len < qc->scid.len; ++cid_len)
+ chunk_appendf(&trash, "%02x", qc->scid.data[cid_len]);
+
+ chunk_appendf(&trash, " ");
+ for (cid_len = 0; cid_len < qc->dcid.len; ++cid_len)
+ chunk_appendf(&trash, "%02x", qc->dcid.data[cid_len]);
+
+ chunk_appendf(&trash, "\n");
+}
+
+/* Dump for "show quic" with "full" format. */
+static void dump_quic_full(struct show_quic_ctx *ctx, struct quic_conn *qc)
+{
+ struct quic_pktns *pktns;
+ struct eb64_node *node;
+ struct qc_stream_desc *stream;
+ char bufaddr[INET6_ADDRSTRLEN], bufport[6];
+ int expire, i, addnl;
+ unsigned char cid_len;
+
+ addnl = 0;
+ /* CIDs */
+ chunk_appendf(&trash, "* %p[%02u]: scid=", qc, ctx->thr);
+ for (cid_len = 0; cid_len < qc->scid.len; ++cid_len)
+ chunk_appendf(&trash, "%02x", qc->scid.data[cid_len]);
+ while (cid_len++ < 20)
+ chunk_appendf(&trash, "..");
+
+ chunk_appendf(&trash, " dcid=");
+ for (cid_len = 0; cid_len < qc->dcid.len; ++cid_len)
+ chunk_appendf(&trash, "%02x", qc->dcid.data[cid_len]);
+ while (cid_len++ < 20)
+ chunk_appendf(&trash, "..");
+
+ chunk_appendf(&trash, "\n");
+
+ chunk_appendf(&trash, " loc. TPs:");
+ quic_transport_params_dump(&trash, qc, &qc->rx.params);
+ chunk_appendf(&trash, "\n");
+ chunk_appendf(&trash, " rem. TPs:");
+ quic_transport_params_dump(&trash, qc, &qc->tx.params);
+ chunk_appendf(&trash, "\n");
+
+ /* Connection state */
+ if (qc->flags & QUIC_FL_CONN_CLOSING)
+ chunk_appendf(&trash, " st=closing ");
+ else if (qc->flags & QUIC_FL_CONN_DRAINING)
+ chunk_appendf(&trash, " st=draining ");
+ else if (qc->state < QUIC_HS_ST_CONFIRMED)
+ chunk_appendf(&trash, " st=handshake ");
+ else
+ chunk_appendf(&trash, " st=opened ");
+
+ if (qc->mux_state == QC_MUX_NULL)
+ chunk_appendf(&trash, "mux=null ");
+ else if (qc->mux_state == QC_MUX_READY)
+ chunk_appendf(&trash, "mux=ready ");
+ else
+ chunk_appendf(&trash, "mux=released ");
+
+ if (qc->idle_timer_task) {
+ expire = qc->idle_timer_task->expire;
+ chunk_appendf(&trash, "expire=%02ds ",
+ TICKS_TO_MS(tick_remain(now_ms, expire)) / 1000);
+ }
+
+ chunk_appendf(&trash, "\n");
+
+ /* Socket */
+ chunk_appendf(&trash, " fd=%d", qc->fd);
+ if (qc->local_addr.ss_family == AF_INET ||
+ qc->local_addr.ss_family == AF_INET6) {
+ addr_to_str(&qc->local_addr, bufaddr, sizeof(bufaddr));
+ port_to_str(&qc->local_addr, bufport, sizeof(bufport));
+ chunk_appendf(&trash, " local_addr=%s:%s", bufaddr, bufport);
+
+ addr_to_str(&qc->peer_addr, bufaddr, sizeof(bufaddr));
+ port_to_str(&qc->peer_addr, bufport, sizeof(bufport));
+ chunk_appendf(&trash, " foreign_addr=%s:%s", bufaddr, bufport);
+ }
+
+ chunk_appendf(&trash, "\n");
+
+ /* Packet number spaces information */
+ pktns = qc->ipktns;
+ if (pktns) {
+ chunk_appendf(&trash, " [initl] rx.ackrng=%-6zu tx.inflight=%-6zu",
+ pktns->rx.arngs.sz, pktns->tx.in_flight);
+ }
+
+ pktns = qc->hpktns;
+ if (pktns) {
+ chunk_appendf(&trash, " [hndshk] rx.ackrng=%-6zu tx.inflight=%-6zu\n",
+ pktns->rx.arngs.sz, pktns->tx.in_flight);
+ }
+
+ pktns = qc->apktns;
+ if (pktns) {
+ chunk_appendf(&trash, " [01rtt] rx.ackrng=%-6zu tx.inflight=%-6zu\n",
+ pktns->rx.arngs.sz, pktns->tx.in_flight);
+ }
+
+ chunk_appendf(&trash, " srtt=%-4u rttvar=%-4u rttmin=%-4u ptoc=%-4u cwnd=%-6llu"
+ " mcwnd=%-6llu sentpkts=%-6llu lostpkts=%-6llu\n reorderedpkts=%-6llu",
+ qc->path->loss.srtt, qc->path->loss.rtt_var,
+ qc->path->loss.rtt_min, qc->path->loss.pto_count, (ullong)qc->path->cwnd,
+ (ullong)qc->path->mcwnd, (ullong)qc->cntrs.sent_pkt, (ullong)qc->path->loss.nb_lost_pkt, (ullong)qc->path->loss.nb_reordered_pkt);
+
+ if (qc->cntrs.dropped_pkt) {
+ chunk_appendf(&trash, " droppkts=%-6llu", qc->cntrs.dropped_pkt);
+ addnl = 1;
+ }
+ if (qc->cntrs.dropped_pkt_bufoverrun) {
+ chunk_appendf(&trash, " dropbuff=%-6llu", qc->cntrs.dropped_pkt_bufoverrun);
+ addnl = 1;
+ }
+ if (qc->cntrs.dropped_parsing) {
+ chunk_appendf(&trash, " droppars=%-6llu", qc->cntrs.dropped_parsing);
+ addnl = 1;
+ }
+ if (qc->cntrs.socket_full) {
+ chunk_appendf(&trash, " sockfull=%-6llu", qc->cntrs.socket_full);
+ addnl = 1;
+ }
+ if (qc->cntrs.sendto_err) {
+ chunk_appendf(&trash, " sendtoerr=%-6llu", qc->cntrs.sendto_err);
+ addnl = 1;
+ }
+ if (qc->cntrs.sendto_err_unknown) {
+ chunk_appendf(&trash, " sendtounknerr=%-6llu", qc->cntrs.sendto_err);
+ addnl = 1;
+ }
+ if (qc->cntrs.conn_migration_done) {
+ chunk_appendf(&trash, " migrdone=%-6llu", qc->cntrs.conn_migration_done);
+ addnl = 1;
+ }
+ if (qc->cntrs.data_blocked) {
+ chunk_appendf(&trash, " datablocked=%-6llu", qc->cntrs.data_blocked);
+ addnl = 1;
+ }
+ if (qc->cntrs.stream_data_blocked) {
+ chunk_appendf(&trash, " sdatablocked=%-6llu", qc->cntrs.stream_data_blocked);
+ addnl = 1;
+ }
+ if (qc->cntrs.streams_blocked_bidi) {
+ chunk_appendf(&trash, " sblockebidi=%-6llu", qc->cntrs.streams_blocked_bidi);
+ addnl = 1;
+ }
+ if (qc->cntrs.streams_blocked_uni) {
+ chunk_appendf(&trash, " sblockeduni=%-6llu", qc->cntrs.streams_blocked_uni);
+ addnl = 1;
+ }
+ if (addnl)
+ chunk_appendf(&trash, "\n");
+
+ /* Streams */
+ node = eb64_first(&qc->streams_by_id);
+ i = 0;
+ while (node) {
+ stream = eb64_entry(node, struct qc_stream_desc, by_id);
+ node = eb64_next(node);
+
+ chunk_appendf(&trash, " | stream=%-8llu", (unsigned long long)stream->by_id.key);
+ chunk_appendf(&trash, " off=%-8llu ack=%-8llu",
+ (unsigned long long)stream->buf_offset,
+ (unsigned long long)stream->ack_offset);
+
+ if (!(++i % 3)) {
+ chunk_appendf(&trash, "\n");
+ i = 0;
+ }
+ }
+
+ chunk_appendf(&trash, "\n");
+}
+
+static int cli_io_handler_dump_quic(struct appctx *appctx)
+{
+ struct show_quic_ctx *ctx = appctx->svcctx;
+ struct stconn *sc = appctx_sc(appctx);
+ struct quic_conn *qc;
+
+ thread_isolate();
+
+ if (ctx->thr >= global.nbthread)
+ goto done;
+
+ /* FIXME: Don't watch the other side !*/
+ if (unlikely(sc_opposite(sc)->flags & SC_FL_SHUT_DONE)) {
+ /* If we're forced to shut down, we might have to remove our
+ * reference to the last stream being dumped.
+ */
+ if (!LIST_ISEMPTY(&ctx->bref.users))
+ LIST_DEL_INIT(&ctx->bref.users);
+ goto done;
+ }
+
+ chunk_reset(&trash);
+
+ if (!LIST_ISEMPTY(&ctx->bref.users)) {
+ /* Remove show_quic_ctx from previous quic_conn instance. */
+ LIST_DEL_INIT(&ctx->bref.users);
+ }
+ else if (!ctx->bref.ref) {
+ /* First invocation. */
+ ctx->bref.ref = ha_thread_ctx[ctx->thr].quic_conns.n;
+
+ /* Print legend for oneline format. */
+ if (ctx->format == QUIC_DUMP_FMT_ONELINE) {
+ chunk_appendf(&trash, "# conn/frontend state "
+ "in_flight infl_p lost_p "
+ "Local Address Foreign Address "
+ "local & remote CIDs\n");
+ applet_putchk(appctx, &trash);
+ }
+ }
+
+ while (1) {
+ int done = 0;
+
+ if (ctx->bref.ref == &ha_thread_ctx[ctx->thr].quic_conns) {
+ /* If closing connections requested through "all", move
+ * to quic_conns_clo list after browsing quic_conns.
+ * Else move directly to the next quic_conns thread.
+ */
+ if (ctx->flags & QC_CLI_FL_SHOW_ALL) {
+ ctx->bref.ref = ha_thread_ctx[ctx->thr].quic_conns_clo.n;
+ continue;
+ }
+
+ done = 1;
+ }
+ else if (ctx->bref.ref == &ha_thread_ctx[ctx->thr].quic_conns_clo) {
+ /* Closing list entirely browsed, go to next quic_conns
+ * thread.
+ */
+ done = 1;
+ }
+ else {
+ /* Retrieve next element of the current list. */
+ qc = LIST_ELEM(ctx->bref.ref, struct quic_conn *, el_th_ctx);
+ if ((int)(qc->qc_epoch - ctx->epoch) > 0)
+ done = 1;
+ }
+
+ if (done) {
+ ++ctx->thr;
+ if (ctx->thr >= global.nbthread)
+ break;
+ /* Switch to next thread quic_conns list. */
+ ctx->bref.ref = ha_thread_ctx[ctx->thr].quic_conns.n;
+ continue;
+ }
+
+ switch (ctx->format) {
+ case QUIC_DUMP_FMT_FULL:
+ dump_quic_full(ctx, qc);
+ break;
+ case QUIC_DUMP_FMT_ONELINE:
+ dump_quic_oneline(ctx, qc);
+ break;
+ }
+
+ if (applet_putchk(appctx, &trash) == -1) {
+ /* Register show_quic_ctx to quic_conn instance. */
+ LIST_APPEND(&qc->back_refs, &ctx->bref.users);
+ goto full;
+ }
+
+ ctx->bref.ref = qc->el_th_ctx.n;
+ }
+
+ done:
+ thread_release();
+ return 1;
+
+ full:
+ thread_release();
+ return 0;
+}
+
+static void cli_release_show_quic(struct appctx *appctx)
+{
+ struct show_quic_ctx *ctx = appctx->svcctx;
+
+ if (ctx->thr < global.nbthread) {
+ thread_isolate();
+ if (!LIST_ISEMPTY(&ctx->bref.users))
+ LIST_DEL_INIT(&ctx->bref.users);
+ thread_release();
+ }
+}
+
+static struct cli_kw_list cli_kws = {{ }, {
+ { { "show", "quic", NULL }, "show quic [oneline|full] [all] : display quic connections status", cli_parse_show_quic, cli_io_handler_dump_quic, cli_release_show_quic },
+ {{},}
+}};
+
+INITCALL1(STG_REGISTER, cli_register_kw, &cli_kws);
+
+static void cli_quic_init()
+{
+ int thr;
+
+ for (thr = 0; thr < MAX_THREADS; ++thr) {
+ LIST_INIT(&ha_thread_ctx[thr].quic_conns);
+ LIST_INIT(&ha_thread_ctx[thr].quic_conns_clo);
+ }
+}
+INITCALL0(STG_INIT, cli_quic_init);
diff --git a/src/quic_conn.c b/src/quic_conn.c
new file mode 100644
index 0000000..5233496
--- /dev/null
+++ b/src/quic_conn.c
@@ -0,0 +1,1893 @@
+/*
+ * QUIC protocol implementation. Lower layer with internal features implemented
+ * here such as QUIC encryption, idle timeout, acknowledgement and
+ * retransmission.
+ *
+ * Copyright 2020 HAProxy Technologies, Frederic Lecaille <flecaille@haproxy.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <haproxy/quic_conn.h>
+
+#define _GNU_SOURCE
+#include <stdio.h>
+#include <stdlib.h>
+
+#include <sys/socket.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+
+#include <netinet/tcp.h>
+
+#include <import/ebmbtree.h>
+
+#include <haproxy/buf-t.h>
+#include <haproxy/compat.h>
+#include <haproxy/api.h>
+#include <haproxy/debug.h>
+#include <haproxy/tools.h>
+#include <haproxy/ticks.h>
+
+#include <haproxy/connection.h>
+#include <haproxy/fd.h>
+#include <haproxy/freq_ctr.h>
+#include <haproxy/frontend.h>
+#include <haproxy/global.h>
+#include <haproxy/h3.h>
+#include <haproxy/hq_interop.h>
+#include <haproxy/log.h>
+#include <haproxy/mux_quic.h>
+#include <haproxy/ncbuf.h>
+#include <haproxy/pipe.h>
+#include <haproxy/proxy.h>
+#include <haproxy/quic_ack.h>
+#include <haproxy/quic_cc.h>
+#include <haproxy/quic_cli-t.h>
+#include <haproxy/quic_frame.h>
+#include <haproxy/quic_enc.h>
+#include <haproxy/quic_loss.h>
+#include <haproxy/quic_rx.h>
+#include <haproxy/quic_ssl.h>
+#include <haproxy/quic_sock.h>
+#include <haproxy/quic_stats.h>
+#include <haproxy/quic_stream.h>
+#include <haproxy/quic_tp.h>
+#include <haproxy/quic_trace.h>
+#include <haproxy/quic_tx.h>
+#include <haproxy/cbuf.h>
+#include <haproxy/proto_quic.h>
+#include <haproxy/quic_tls.h>
+#include <haproxy/ssl_sock.h>
+#include <haproxy/task.h>
+#include <haproxy/thread.h>
+#include <haproxy/trace.h>
+
+/* list of supported QUIC versions by this implementation */
+const struct quic_version quic_versions[] = {
+ {
+ .num = QUIC_PROTOCOL_VERSION_DRAFT_29,
+ .initial_salt = initial_salt_draft_29,
+ .initial_salt_len = sizeof initial_salt_draft_29,
+ .key_label = (const unsigned char *)QUIC_HKDF_KEY_LABEL_V1,
+ .key_label_len = sizeof(QUIC_HKDF_KEY_LABEL_V1) - 1,
+ .iv_label = (const unsigned char *)QUIC_HKDF_IV_LABEL_V1,
+ .iv_label_len = sizeof(QUIC_HKDF_IV_LABEL_V1) - 1,
+ .hp_label = (const unsigned char *)QUIC_HKDF_HP_LABEL_V1,
+ .hp_label_len = sizeof(QUIC_HKDF_HP_LABEL_V1) - 1,
+ .ku_label = (const unsigned char *)QUIC_HKDF_KU_LABEL_V1,
+ .ku_label_len = sizeof(QUIC_HKDF_KU_LABEL_V1) - 1,
+ .retry_tag_key = (const unsigned char *)QUIC_TLS_RETRY_KEY_DRAFT,
+ .retry_tag_nonce = (const unsigned char *)QUIC_TLS_RETRY_NONCE_DRAFT,
+ },
+ {
+ .num = QUIC_PROTOCOL_VERSION_1,
+ .initial_salt = initial_salt_v1,
+ .initial_salt_len = sizeof initial_salt_v1,
+ .key_label = (const unsigned char *)QUIC_HKDF_KEY_LABEL_V1,
+ .key_label_len = sizeof(QUIC_HKDF_KEY_LABEL_V1) - 1,
+ .iv_label = (const unsigned char *)QUIC_HKDF_IV_LABEL_V1,
+ .iv_label_len = sizeof(QUIC_HKDF_IV_LABEL_V1) - 1,
+ .hp_label = (const unsigned char *)QUIC_HKDF_HP_LABEL_V1,
+ .hp_label_len = sizeof(QUIC_HKDF_HP_LABEL_V1) - 1,
+ .ku_label = (const unsigned char *)QUIC_HKDF_KU_LABEL_V1,
+ .ku_label_len = sizeof(QUIC_HKDF_KU_LABEL_V1) - 1,
+ .retry_tag_key = (const unsigned char *)QUIC_TLS_RETRY_KEY_V1,
+ .retry_tag_nonce = (const unsigned char *)QUIC_TLS_RETRY_NONCE_V1,
+ },
+ {
+ .num = QUIC_PROTOCOL_VERSION_2,
+ .initial_salt = initial_salt_v2,
+ .initial_salt_len = sizeof initial_salt_v2,
+ .key_label = (const unsigned char *)QUIC_HKDF_KEY_LABEL_V2,
+ .key_label_len = sizeof(QUIC_HKDF_KEY_LABEL_V2) - 1,
+ .iv_label = (const unsigned char *)QUIC_HKDF_IV_LABEL_V2,
+ .iv_label_len = sizeof(QUIC_HKDF_IV_LABEL_V2) - 1,
+ .hp_label = (const unsigned char *)QUIC_HKDF_HP_LABEL_V2,
+ .hp_label_len = sizeof(QUIC_HKDF_HP_LABEL_V2) - 1,
+ .ku_label = (const unsigned char *)QUIC_HKDF_KU_LABEL_V2,
+ .ku_label_len = sizeof(QUIC_HKDF_KU_LABEL_V2) - 1,
+ .retry_tag_key = (const unsigned char *)QUIC_TLS_RETRY_KEY_V2,
+ .retry_tag_nonce = (const unsigned char *)QUIC_TLS_RETRY_NONCE_V2,
+ },
+};
+
+/* Function pointers, can be used to compute a hash from first generated CID and to derive new CIDs */
+uint64_t (*quic_hash64_from_cid)(const unsigned char *cid, int size, const unsigned char *secret, size_t secretlen) = NULL;
+void (*quic_newcid_from_hash64)(unsigned char *cid, int size, uint64_t hash, const unsigned char *secret, size_t secretlen) = NULL;
+
+/* The total number of supported versions */
+const size_t quic_versions_nb = sizeof quic_versions / sizeof *quic_versions;
+/* Listener only preferred version */
+const struct quic_version *preferred_version;
+/* RFC 8999 5.4. Version
+ * A Version field with a
+ * value of 0x00000000 is reserved for version negotiation
+ */
+const struct quic_version quic_version_VN_reserved = { .num = 0, };
+
+DECLARE_STATIC_POOL(pool_head_quic_conn, "quic_conn", sizeof(struct quic_conn));
+DECLARE_STATIC_POOL(pool_head_quic_conn_closed, "quic_conn_closed", sizeof(struct quic_conn_closed));
+DECLARE_STATIC_POOL(pool_head_quic_cids, "quic_cids", sizeof(struct eb_root));
+DECLARE_POOL(pool_head_quic_connection_id,
+ "quic_connection_id", sizeof(struct quic_connection_id));
+
+struct task *quic_conn_app_io_cb(struct task *t, void *context, unsigned int state);
+static int quic_conn_init_timer(struct quic_conn *qc);
+static int quic_conn_init_idle_timer_task(struct quic_conn *qc, struct proxy *px);
+
+/* Returns 1 if the peer has validated <qc> QUIC connection address, 0 if not. */
+int quic_peer_validated_addr(struct quic_conn *qc)
+{
+ if (!qc_is_listener(qc))
+ return 1;
+
+ if (qc->flags & QUIC_FL_CONN_PEER_VALIDATED_ADDR)
+ return 1;
+
+ BUG_ON(qc->bytes.prep > 3 * qc->bytes.rx);
+
+ return 0;
+}
+
+/* To be called to kill a connection as soon as possible (without sending any packet). */
+void qc_kill_conn(struct quic_conn *qc)
+{
+ TRACE_ENTER(QUIC_EV_CONN_KILL, qc);
+ TRACE_PROTO("killing the connection", QUIC_EV_CONN_KILL, qc);
+ qc->flags |= QUIC_FL_CONN_TO_KILL;
+ qc->flags &= ~QUIC_FL_CONN_RETRANS_NEEDED;
+ task_wakeup(qc->idle_timer_task, TASK_WOKEN_OTHER);
+
+ qc_notify_err(qc);
+
+ TRACE_LEAVE(QUIC_EV_CONN_KILL, qc);
+}
+
+/* Set the timer attached to the QUIC connection with <ctx> as I/O handler and used for
+ * both loss detection and PTO and schedule the task assiated to this timer if needed.
+ */
+void qc_set_timer(struct quic_conn *qc)
+{
+ struct quic_pktns *pktns;
+ unsigned int pto;
+ int handshake_confirmed;
+
+ TRACE_ENTER(QUIC_EV_CONN_STIMER, qc);
+ TRACE_PROTO("set timer", QUIC_EV_CONN_STIMER, qc, NULL, NULL, &qc->path->ifae_pkts);
+
+ pktns = NULL;
+ if (!qc->timer_task) {
+ TRACE_PROTO("already released timer task", QUIC_EV_CONN_STIMER, qc);
+ goto leave;
+ }
+
+ pktns = quic_loss_pktns(qc);
+ if (tick_isset(pktns->tx.loss_time)) {
+ qc->timer = pktns->tx.loss_time;
+ goto out;
+ }
+
+ /* anti-amplification: the timer must be
+ * cancelled for a server which reached the anti-amplification limit.
+ */
+ if (!quic_peer_validated_addr(qc) &&
+ (qc->flags & QUIC_FL_CONN_ANTI_AMPLIFICATION_REACHED)) {
+ TRACE_PROTO("anti-amplification reached", QUIC_EV_CONN_STIMER, qc);
+ qc->timer = TICK_ETERNITY;
+ goto out;
+ }
+
+ if (!qc->path->ifae_pkts && quic_peer_validated_addr(qc)) {
+ TRACE_PROTO("timer cancellation", QUIC_EV_CONN_STIMER, qc);
+ /* Timer cancellation. */
+ qc->timer = TICK_ETERNITY;
+ goto out;
+ }
+
+ handshake_confirmed = qc->state >= QUIC_HS_ST_CONFIRMED;
+ pktns = quic_pto_pktns(qc, handshake_confirmed, &pto);
+ if (tick_isset(pto))
+ qc->timer = pto;
+ out:
+ if (qc->timer == TICK_ETERNITY) {
+ qc->timer_task->expire = TICK_ETERNITY;
+ }
+ else if (tick_is_expired(qc->timer, now_ms)) {
+ TRACE_DEVEL("wakeup asap timer task", QUIC_EV_CONN_STIMER, qc);
+ task_wakeup(qc->timer_task, TASK_WOKEN_MSG);
+ }
+ else {
+ TRACE_DEVEL("timer task scheduling", QUIC_EV_CONN_STIMER, qc);
+ task_schedule(qc->timer_task, qc->timer);
+ }
+ leave:
+ TRACE_PROTO("set timer", QUIC_EV_CONN_STIMER, qc, pktns);
+ TRACE_LEAVE(QUIC_EV_CONN_STIMER, qc);
+}
+
+/* Prepare the emission of CONNECTION_CLOSE with error <err>. All send/receive
+ * activity for <qc> will be interrupted.
+ */
+void quic_set_connection_close(struct quic_conn *qc, const struct quic_err err)
+{
+ TRACE_ENTER(QUIC_EV_CONN_CLOSE, qc);
+ if (qc->flags & QUIC_FL_CONN_IMMEDIATE_CLOSE)
+ goto leave;
+
+ TRACE_STATE("setting immediate close", QUIC_EV_CONN_CLOSE, qc);
+ qc->flags |= QUIC_FL_CONN_IMMEDIATE_CLOSE;
+ qc->err.code = err.code;
+ qc->err.app = err.app;
+
+ leave:
+ TRACE_LEAVE(QUIC_EV_CONN_CLOSE, qc);
+}
+
+/* Set <alert> TLS alert as QUIC CRYPTO_ERROR error */
+void quic_set_tls_alert(struct quic_conn *qc, int alert)
+{
+ TRACE_ENTER(QUIC_EV_CONN_SSLALERT, qc);
+
+ quic_set_connection_close(qc, quic_err_tls(alert));
+ qc->flags |= QUIC_FL_CONN_TLS_ALERT;
+ TRACE_STATE("Alert set", QUIC_EV_CONN_SSLALERT, qc);
+
+ TRACE_LEAVE(QUIC_EV_CONN_SSLALERT, qc);
+}
+
+/* Set the application for <qc> QUIC connection.
+ * Return 1 if succeeded, 0 if not.
+ */
+int quic_set_app_ops(struct quic_conn *qc, const unsigned char *alpn, size_t alpn_len)
+{
+ if (alpn_len >= 2 && memcmp(alpn, "h3", 2) == 0)
+ qc->app_ops = &h3_ops;
+ else if (alpn_len >= 10 && memcmp(alpn, "hq-interop", 10) == 0)
+ qc->app_ops = &hq_interop_ops;
+ else
+ return 0;
+
+ return 1;
+}
+
+/* Schedule a CONNECTION_CLOSE emission on <qc> if the MUX has been released
+ * and all STREAM data are acknowledged. The MUX is responsible to have set
+ * <qc.err> before as it is reused for the CONNECTION_CLOSE frame.
+ *
+ * TODO this should also be called on lost packet detection
+ */
+void qc_check_close_on_released_mux(struct quic_conn *qc)
+{
+ TRACE_ENTER(QUIC_EV_CONN_CLOSE, qc);
+
+ if (qc->mux_state == QC_MUX_RELEASED && eb_is_empty(&qc->streams_by_id)) {
+ /* Reuse errcode which should have been previously set by the MUX on release. */
+ quic_set_connection_close(qc, qc->err);
+ tasklet_wakeup(qc->wait_event.tasklet);
+ }
+
+ TRACE_LEAVE(QUIC_EV_CONN_CLOSE, qc);
+}
+
+/* Finalize <qc> QUIC connection:
+
+ * MUST be called after having received the remote transport parameters which
+ * are parsed when the TLS callback for the ClientHello message is called upon
+ * SSL_do_handshake() calls, not necessarily at the first time as this TLS
+ * message may be split between packets
+ * Return 1 if succeeded, 0 if not.
+ */
+int qc_conn_finalize(struct quic_conn *qc, int server)
+{
+ int ret = 0;
+
+ TRACE_ENTER(QUIC_EV_CONN_NEW, qc);
+
+ if (qc->flags & QUIC_FL_CONN_FINALIZED)
+ goto finalized;
+
+ if (!quic_tls_finalize(qc, server))
+ goto out;
+
+ /* This connection is functional (ready to send/receive) */
+ qc->flags |= QUIC_FL_CONN_FINALIZED;
+
+ finalized:
+ ret = 1;
+ out:
+ TRACE_LEAVE(QUIC_EV_CONN_NEW, qc);
+ return ret;
+}
+
+void quic_conn_closed_err_count_inc(struct quic_conn *qc, struct quic_frame *frm)
+{
+ TRACE_ENTER(QUIC_EV_CONN_CLOSE, qc);
+
+ if (frm->type == QUIC_FT_CONNECTION_CLOSE)
+ quic_stats_transp_err_count_inc(qc->prx_counters, frm->connection_close.error_code);
+ else if (frm->type == QUIC_FT_CONNECTION_CLOSE_APP) {
+ if (qc->mux_state != QC_MUX_READY || !qc->qcc->app_ops->inc_err_cnt)
+ goto out;
+
+ qc->qcc->app_ops->inc_err_cnt(qc->qcc->ctx, frm->connection_close_app.error_code);
+ }
+
+ out:
+ TRACE_LEAVE(QUIC_EV_CONN_CLOSE, qc);
+}
+
+/* Cancel a request on connection <qc> for stream id <id>. This is useful when
+ * the client opens a new stream but the MUX has already been released. A
+ * STOP_SENDING + RESET_STREAM frames are prepared for emission.
+ *
+ * TODO this function is closely related to H3. Its place should be in H3 layer
+ * instead of quic-conn but this requires an architecture adjustment.
+ *
+ * Returns 1 on success else 0.
+ */
+int qc_h3_request_reject(struct quic_conn *qc, uint64_t id)
+{
+ int ret = 0;
+ struct quic_frame *ss, *rs;
+ struct quic_enc_level *qel = qc->ael;
+ const uint64_t app_error_code = H3_REQUEST_REJECTED;
+
+ TRACE_ENTER(QUIC_EV_CONN_PRSHPKT, qc);
+
+ /* Do not emit rejection for unknown unidirectional stream as it is
+ * forbidden to close some of them (H3 control stream and QPACK
+ * encoder/decoder streams).
+ */
+ if (quic_stream_is_uni(id)) {
+ ret = 1;
+ goto out;
+ }
+
+ ss = qc_frm_alloc(QUIC_FT_STOP_SENDING);
+ if (!ss) {
+ TRACE_ERROR("failed to allocate quic_frame", QUIC_EV_CONN_PRSHPKT, qc);
+ goto out;
+ }
+
+ ss->stop_sending.id = id;
+ ss->stop_sending.app_error_code = app_error_code;
+
+ rs = qc_frm_alloc(QUIC_FT_RESET_STREAM);
+ if (!rs) {
+ TRACE_ERROR("failed to allocate quic_frame", QUIC_EV_CONN_PRSHPKT, qc);
+ qc_frm_free(qc, &ss);
+ goto out;
+ }
+
+ rs->reset_stream.id = id;
+ rs->reset_stream.app_error_code = app_error_code;
+ rs->reset_stream.final_size = 0;
+
+ LIST_APPEND(&qel->pktns->tx.frms, &ss->list);
+ LIST_APPEND(&qel->pktns->tx.frms, &rs->list);
+ ret = 1;
+ out:
+ TRACE_LEAVE(QUIC_EV_CONN_PRSHPKT, qc);
+ return ret;
+}
+
+/* Remove a <qc> quic-conn from its ha_thread_ctx list. If <closing> is true,
+ * it will immediately be reinserted in the ha_thread_ctx quic_conns_clo list.
+ */
+void qc_detach_th_ctx_list(struct quic_conn *qc, int closing)
+{
+ struct bref *bref, *back;
+
+ /* Detach CLI context watchers currently dumping this connection.
+ * Reattach them to the next quic_conn instance.
+ */
+ list_for_each_entry_safe(bref, back, &qc->back_refs, users) {
+ /* Remove watcher from this quic_conn instance. */
+ LIST_DEL_INIT(&bref->users);
+
+ /* Attach it to next instance unless it was the last list element. */
+ if (qc->el_th_ctx.n != &th_ctx->quic_conns &&
+ qc->el_th_ctx.n != &th_ctx->quic_conns_clo) {
+ struct quic_conn *next = LIST_NEXT(&qc->el_th_ctx,
+ struct quic_conn *,
+ el_th_ctx);
+ LIST_APPEND(&next->back_refs, &bref->users);
+ }
+ bref->ref = qc->el_th_ctx.n;
+ __ha_barrier_store();
+ }
+
+ /* Remove quic_conn from global ha_thread_ctx list. */
+ LIST_DEL_INIT(&qc->el_th_ctx);
+
+ if (closing)
+ LIST_APPEND(&th_ctx->quic_conns_clo, &qc->el_th_ctx);
+}
+
+
+/* Copy at <pos> position a stateless reset token depending on the
+ * <salt> salt input. This is the cluster secret which will be derived
+ * as HKDF input secret to generate this token.
+ * Return 1 if succeeded, 0 if not.
+ */
+int quic_stateless_reset_token_cpy(unsigned char *pos, size_t len,
+ const unsigned char *salt, size_t saltlen)
+{
+ /* Input secret */
+ const unsigned char *key = global.cluster_secret;
+ size_t keylen = sizeof global.cluster_secret;
+ /* Info */
+ const unsigned char label[] = "stateless token";
+ size_t labellen = sizeof label - 1;
+ int ret;
+
+ ret = quic_hkdf_extract_and_expand(EVP_sha256(), pos, len,
+ key, keylen, salt, saltlen, label, labellen);
+ return ret;
+}
+
+/* Build all the frames which must be sent just after the handshake have succeeded.
+ * This is essentially NEW_CONNECTION_ID frames. A QUIC server must also send
+ * a HANDSHAKE_DONE frame.
+ * Return 1 if succeeded, 0 if not.
+ */
+int quic_build_post_handshake_frames(struct quic_conn *qc)
+{
+ int ret = 0, max;
+ struct quic_enc_level *qel;
+ struct quic_frame *frm, *frmbak;
+ struct list frm_list = LIST_HEAD_INIT(frm_list);
+ struct eb64_node *node;
+
+ TRACE_ENTER(QUIC_EV_CONN_IO_CB, qc);
+
+ qel = qc->ael;
+ /* Only servers must send a HANDSHAKE_DONE frame. */
+ if (qc_is_listener(qc)) {
+ frm = qc_frm_alloc(QUIC_FT_HANDSHAKE_DONE);
+ if (!frm) {
+ TRACE_ERROR("frame allocation error", QUIC_EV_CONN_IO_CB, qc);
+ goto leave;
+ }
+
+ LIST_APPEND(&frm_list, &frm->list);
+ }
+
+ /* Initialize <max> connection IDs minus one: there is
+ * already one connection ID used for the current connection. Also limit
+ * the number of connection IDs sent to the peer to 4 (3 from this function
+ * plus 1 for the current connection.
+ * Note that active_connection_id_limit >= 2: this has been already checked
+ * when receiving this parameter.
+ */
+ max = QUIC_MIN(qc->tx.params.active_connection_id_limit - 1, (uint64_t)3);
+ while (max--) {
+ struct quic_connection_id *conn_id;
+
+ frm = qc_frm_alloc(QUIC_FT_NEW_CONNECTION_ID);
+ if (!frm) {
+ TRACE_ERROR("frame allocation error", QUIC_EV_CONN_IO_CB, qc);
+ goto err;
+ }
+
+ conn_id = new_quic_cid(qc->cids, qc, NULL, NULL);
+ if (!conn_id) {
+ qc_frm_free(qc, &frm);
+ TRACE_ERROR("CID allocation error", QUIC_EV_CONN_IO_CB, qc);
+ goto err;
+ }
+
+ /* TODO To prevent CID tree locking, all CIDs created here
+ * could be allocated at the same time as the first one.
+ */
+ quic_cid_insert(conn_id);
+
+ quic_connection_id_to_frm_cpy(frm, conn_id);
+ LIST_APPEND(&frm_list, &frm->list);
+ }
+
+ LIST_SPLICE(&qel->pktns->tx.frms, &frm_list);
+ qc->flags &= ~QUIC_FL_CONN_NEED_POST_HANDSHAKE_FRMS;
+
+ ret = 1;
+ leave:
+ TRACE_LEAVE(QUIC_EV_CONN_IO_CB, qc);
+ return ret;
+
+ err:
+ /* free the frames */
+ list_for_each_entry_safe(frm, frmbak, &frm_list, list)
+ qc_frm_free(qc, &frm);
+
+ /* The first CID sequence number value used to allocated CIDs by this function is 1,
+ * 0 being the sequence number of the CID for this connection.
+ */
+ node = eb64_lookup_ge(qc->cids, 1);
+ while (node) {
+ struct quic_connection_id *conn_id;
+
+ conn_id = eb64_entry(node, struct quic_connection_id, seq_num);
+ if (conn_id->seq_num.key >= max)
+ break;
+
+ node = eb64_next(node);
+ quic_cid_delete(conn_id);
+
+ eb64_delete(&conn_id->seq_num);
+ pool_free(pool_head_quic_connection_id, conn_id);
+ }
+ goto leave;
+}
+
+
+/* QUIC connection packet handler task (post handshake) */
+struct task *quic_conn_app_io_cb(struct task *t, void *context, unsigned int state)
+{
+ struct quic_conn *qc = context;
+ struct quic_enc_level *qel;
+
+ TRACE_ENTER(QUIC_EV_CONN_IO_CB, qc);
+
+ qel = qc->ael;
+ TRACE_STATE("connection handshake state", QUIC_EV_CONN_IO_CB, qc, &qc->state);
+
+ if (qc_test_fd(qc))
+ qc_rcv_buf(qc);
+
+ /* Prepare post-handshake frames
+ * - after connection is instantiated (accept is done)
+ * - handshake state is completed (may not be the case here in 0-RTT)
+ */
+ if ((qc->flags & QUIC_FL_CONN_NEED_POST_HANDSHAKE_FRMS) && qc->conn &&
+ qc->state >= QUIC_HS_ST_COMPLETE) {
+ quic_build_post_handshake_frames(qc);
+ }
+
+ /* Retranmissions */
+ if (qc->flags & QUIC_FL_CONN_RETRANS_NEEDED) {
+ TRACE_STATE("retransmission needed", QUIC_EV_CONN_IO_CB, qc);
+ qc->flags &= ~QUIC_FL_CONN_RETRANS_NEEDED;
+ if (!qc_dgrams_retransmit(qc))
+ goto out;
+ }
+
+ if (!qc_treat_rx_pkts(qc)) {
+ TRACE_DEVEL("qc_treat_rx_pkts() failed", QUIC_EV_CONN_IO_CB, qc);
+ goto out;
+ }
+
+ if (qc->flags & QUIC_FL_CONN_TO_KILL) {
+ TRACE_DEVEL("connection to be killed", QUIC_EV_CONN_IO_CB, qc);
+ goto out;
+ }
+
+ if ((qc->flags & QUIC_FL_CONN_DRAINING) &&
+ !(qc->flags & QUIC_FL_CONN_IMMEDIATE_CLOSE)) {
+ TRACE_STATE("draining connection (must not send packets)", QUIC_EV_CONN_IO_CB, qc);
+ goto out;
+ }
+
+ /* XXX TODO: how to limit the list frames to send */
+ if (!qc_send_app_pkts(qc, &qel->pktns->tx.frms)) {
+ TRACE_DEVEL("qc_send_app_pkts() failed", QUIC_EV_CONN_IO_CB, qc);
+ goto out;
+ }
+
+ out:
+ if ((qc->flags & QUIC_FL_CONN_CLOSING) && qc->mux_state != QC_MUX_READY) {
+ quic_conn_release(qc);
+ qc = NULL;
+ }
+
+ TRACE_LEAVE(QUIC_EV_CONN_IO_CB, qc);
+ return t;
+}
+
+static void quic_release_cc_conn(struct quic_conn_closed *cc_qc)
+{
+ struct quic_conn *qc = (struct quic_conn *)cc_qc;
+
+ TRACE_ENTER(QUIC_EV_CONN_IO_CB, cc_qc);
+
+ task_destroy(cc_qc->idle_timer_task);
+ cc_qc->idle_timer_task = NULL;
+ tasklet_free(qc->wait_event.tasklet);
+ free_quic_conn_cids(qc);
+ pool_free(pool_head_quic_cids, cc_qc->cids);
+ cc_qc->cids = NULL;
+ pool_free(pool_head_quic_cc_buf, cc_qc->cc_buf_area);
+ cc_qc->cc_buf_area = NULL;
+ /* free the SSL sock context */
+ pool_free(pool_head_quic_conn_closed, cc_qc);
+
+ TRACE_ENTER(QUIC_EV_CONN_IO_CB);
+}
+
+/* QUIC connection packet handler task used when in "closing connection" state. */
+static struct task *quic_conn_closed_io_cb(struct task *t, void *context, unsigned int state)
+{
+ struct quic_conn_closed *cc_qc = context;
+ struct quic_conn *qc = (struct quic_conn *)cc_qc;
+ struct buffer buf;
+ uint16_t dglen;
+ struct quic_tx_packet *first_pkt;
+ size_t headlen = sizeof dglen + sizeof first_pkt;
+
+ TRACE_ENTER(QUIC_EV_CONN_IO_CB, qc);
+
+ if (qc_test_fd(qc))
+ qc_rcv_buf(qc);
+
+ /* Do not send too much data if the peer address was not validated. */
+ if ((qc->flags & QUIC_FL_CONN_IMMEDIATE_CLOSE) &&
+ !(qc->flags & QUIC_FL_CONN_PEER_VALIDATED_ADDR) &&
+ quic_may_send_bytes(qc) < cc_qc->cc_dgram_len)
+ goto leave;
+
+ buf = b_make(cc_qc->cc_buf_area + headlen,
+ QUIC_MAX_CC_BUFSIZE - headlen, 0, cc_qc->cc_dgram_len);
+ if (qc_snd_buf(qc, &buf, buf.data, 0) < 0) {
+ TRACE_ERROR("sendto fatal error", QUIC_EV_CONN_IO_CB, qc);
+ quic_release_cc_conn(cc_qc);
+ cc_qc = NULL;
+ qc = NULL;
+ t = NULL;
+ goto leave;
+ }
+
+ qc->flags &= ~QUIC_FL_CONN_IMMEDIATE_CLOSE;
+
+ leave:
+ TRACE_LEAVE(QUIC_EV_CONN_IO_CB, qc);
+
+ return t;
+}
+
+/* The task handling the idle timeout of a connection in "connection close" state */
+static struct task *quic_conn_closed_idle_timer_task(struct task *t, void *ctx, unsigned int state)
+{
+ struct quic_conn_closed *cc_qc = ctx;
+
+ quic_release_cc_conn(cc_qc);
+
+ return NULL;
+}
+
+/* Allocate a new connection in "connection close" state and return it
+ * if succeeded, NULL if not. This function is also responsible of
+ * copying enough and the least possible information from <qc> original
+ * connection to the newly allocated connection so that to keep it
+ * functional until its idle timer expires.
+ */
+static struct quic_conn_closed *qc_new_cc_conn(struct quic_conn *qc)
+{
+ struct quic_conn_closed *cc_qc;
+
+ cc_qc = pool_alloc(pool_head_quic_conn_closed);
+ if (!cc_qc)
+ return NULL;
+
+ quic_conn_mv_cids_to_cc_conn(cc_qc, qc);
+
+ qc_init_fd((struct quic_conn *)cc_qc);
+
+ cc_qc->flags = qc->flags;
+ cc_qc->err = qc->err;
+
+ cc_qc->nb_pkt_for_cc = qc->nb_pkt_for_cc;
+ cc_qc->nb_pkt_since_cc = qc->nb_pkt_since_cc;
+
+ cc_qc->local_addr = qc->local_addr;
+ cc_qc->peer_addr = qc->peer_addr;
+
+ cc_qc->wait_event.tasklet = qc->wait_event.tasklet;
+ cc_qc->wait_event.tasklet->process = quic_conn_closed_io_cb;
+ cc_qc->wait_event.tasklet->context = cc_qc;
+ cc_qc->wait_event.events = 0;
+ cc_qc->subs = NULL;
+
+ cc_qc->bytes.prep = qc->bytes.prep;
+ cc_qc->bytes.tx = qc->bytes.tx;
+ cc_qc->bytes.rx = qc->bytes.rx;
+
+ cc_qc->odcid = qc->odcid;
+ cc_qc->dcid = qc->dcid;
+ cc_qc->scid = qc->scid;
+
+ cc_qc->li = qc->li;
+ cc_qc->cids = qc->cids;
+
+ cc_qc->idle_timer_task = qc->idle_timer_task;
+ cc_qc->idle_timer_task->process = quic_conn_closed_idle_timer_task;
+ cc_qc->idle_timer_task->context = cc_qc;
+ cc_qc->idle_expire = qc->idle_expire;
+
+ cc_qc->conn = qc->conn;
+ qc->conn = NULL;
+
+ cc_qc->cc_buf_area = qc->tx.cc_buf_area;
+ cc_qc->cc_dgram_len = qc->tx.cc_dgram_len;
+ TRACE_PRINTF(TRACE_LEVEL_PROTO, QUIC_EV_CONN_IO_CB, qc, 0, 0, 0,
+ "switch qc@%p to cc_qc@%p", qc, cc_qc);
+
+ return cc_qc;
+}
+
+/* QUIC connection packet handler task. */
+struct task *quic_conn_io_cb(struct task *t, void *context, unsigned int state)
+{
+ int ret;
+ struct quic_conn *qc = context;
+ struct buffer *buf = NULL;
+ int st;
+ struct tasklet *tl = (struct tasklet *)t;
+
+ TRACE_ENTER(QUIC_EV_CONN_IO_CB, qc);
+
+ st = qc->state;
+ TRACE_PROTO("connection state", QUIC_EV_CONN_IO_CB, qc, &st);
+
+ if (HA_ATOMIC_LOAD(&tl->state) & TASK_HEAVY) {
+ HA_ATOMIC_AND(&tl->state, ~TASK_HEAVY);
+ qc_ssl_provide_all_quic_data(qc, qc->xprt_ctx);
+ }
+
+ /* Retranmissions */
+ if (qc->flags & QUIC_FL_CONN_RETRANS_NEEDED) {
+ TRACE_DEVEL("retransmission needed", QUIC_EV_CONN_PHPKTS, qc);
+ qc->flags &= ~QUIC_FL_CONN_RETRANS_NEEDED;
+ if (!qc_dgrams_retransmit(qc))
+ goto out;
+ }
+
+ if (qc_test_fd(qc))
+ qc_rcv_buf(qc);
+
+ if (!qc_treat_rx_pkts(qc))
+ goto out;
+
+ if (HA_ATOMIC_LOAD(&tl->state) & TASK_HEAVY) {
+ tasklet_wakeup(tl);
+ goto out;
+ }
+
+ if (qc->flags & QUIC_FL_CONN_TO_KILL) {
+ TRACE_DEVEL("connection to be killed", QUIC_EV_CONN_PHPKTS, qc);
+ goto out;
+ }
+
+ if ((qc->flags & QUIC_FL_CONN_DRAINING) &&
+ !(qc->flags & QUIC_FL_CONN_IMMEDIATE_CLOSE))
+ goto out;
+
+ st = qc->state;
+ if (st >= QUIC_HS_ST_COMPLETE) {
+ if (!(qc->flags & QUIC_FL_CONN_HPKTNS_DCD)) {
+ /* Discard the Handshake packet number space. */
+ TRACE_PROTO("discarding Handshake pktns", QUIC_EV_CONN_PHPKTS, qc);
+ quic_pktns_discard(qc->hel->pktns, qc);
+ qc_set_timer(qc);
+ qc_el_rx_pkts_del(qc->hel);
+ qc_release_pktns_frms(qc, qc->hel->pktns);
+ }
+ }
+
+ buf = qc_get_txb(qc);
+ if (!buf)
+ goto out;
+
+ if (b_data(buf) && !qc_purge_txbuf(qc, buf))
+ goto out;
+
+ /* Currently buf cannot be non-empty at this stage. Even if a previous
+ * sendto() has failed it is emptied to simulate packet emission and
+ * rely on QUIC lost detection to try to emit it.
+ */
+ BUG_ON_HOT(b_data(buf));
+ b_reset(buf);
+
+ ret = qc_prep_hpkts(qc, buf, NULL);
+ if (ret == -1) {
+ qc_txb_release(qc);
+ goto out;
+ }
+
+ if (ret && !qc_send_ppkts(buf, qc->xprt_ctx)) {
+ if (qc->flags & QUIC_FL_CONN_TO_KILL)
+ qc_txb_release(qc);
+ goto out;
+ }
+
+ qc_txb_release(qc);
+
+ out:
+ /* Release the Handshake encryption level and packet number space if
+ * the Handshake is confirmed and if there is no need to send
+ * anymore Handshake packets.
+ */
+ if (quic_tls_pktns_is_dcd(qc, qc->hpktns) &&
+ !qc_need_sending(qc, qc->hel)) {
+ /* Ensure Initial packet encryption level and packet number space have
+ * been released.
+ */
+ qc_enc_level_free(qc, &qc->iel);
+ quic_pktns_release(qc, &qc->ipktns);
+ qc_enc_level_free(qc, &qc->hel);
+ quic_pktns_release(qc, &qc->hpktns);
+ /* Also release the negotiated Initial TLS context. */
+ quic_nictx_free(qc);
+ }
+
+ if ((qc->flags & QUIC_FL_CONN_CLOSING) && qc->mux_state != QC_MUX_READY) {
+ quic_conn_release(qc);
+ qc = NULL;
+ }
+
+ TRACE_PROTO("ssl error", QUIC_EV_CONN_IO_CB, qc, &st);
+ TRACE_LEAVE(QUIC_EV_CONN_IO_CB, qc);
+ return t;
+}
+
+/* Callback called upon loss detection and PTO timer expirations. */
+struct task *qc_process_timer(struct task *task, void *ctx, unsigned int state)
+{
+ struct quic_conn *qc = ctx;
+ struct quic_pktns *pktns;
+
+ TRACE_ENTER(QUIC_EV_CONN_PTIMER, qc);
+ TRACE_PROTO("process timer", QUIC_EV_CONN_PTIMER, qc,
+ NULL, NULL, &qc->path->ifae_pkts);
+
+ task->expire = TICK_ETERNITY;
+ pktns = quic_loss_pktns(qc);
+
+ if (qc->flags & (QUIC_FL_CONN_DRAINING|QUIC_FL_CONN_TO_KILL)) {
+ TRACE_PROTO("cancelled action (draining state)", QUIC_EV_CONN_PTIMER, qc);
+ goto out;
+ }
+
+ if (tick_isset(pktns->tx.loss_time)) {
+ struct list lost_pkts = LIST_HEAD_INIT(lost_pkts);
+
+ qc_packet_loss_lookup(pktns, qc, &lost_pkts);
+ if (!LIST_ISEMPTY(&lost_pkts))
+ tasklet_wakeup(qc->wait_event.tasklet);
+ if (qc_release_lost_pkts(qc, pktns, &lost_pkts, now_ms))
+ qc_set_timer(qc);
+ goto out;
+ }
+
+ if (qc->path->in_flight) {
+ pktns = quic_pto_pktns(qc, qc->state >= QUIC_HS_ST_CONFIRMED, NULL);
+ if (!pktns->tx.in_flight) {
+ TRACE_PROTO("No in flight packets to probe with", QUIC_EV_CONN_TXPKT, qc);
+ goto out;
+ }
+
+ if (pktns == qc->ipktns) {
+ if (qc_may_probe_ipktns(qc)) {
+ qc->flags |= QUIC_FL_CONN_RETRANS_NEEDED;
+ pktns->flags |= QUIC_FL_PKTNS_PROBE_NEEDED;
+ TRACE_STATE("needs to probe Initial packet number space", QUIC_EV_CONN_TXPKT, qc);
+ }
+ else {
+ TRACE_STATE("Cannot probe Initial packet number space", QUIC_EV_CONN_TXPKT, qc);
+ }
+ if (qc->hpktns && qc->hpktns->tx.in_flight) {
+ qc->flags |= QUIC_FL_CONN_RETRANS_NEEDED;
+ qc->hpktns->flags |= QUIC_FL_PKTNS_PROBE_NEEDED;
+ TRACE_STATE("needs to probe Handshake packet number space", QUIC_EV_CONN_TXPKT, qc);
+ }
+ }
+ else if (pktns == qc->hpktns) {
+ TRACE_STATE("needs to probe Handshake packet number space", QUIC_EV_CONN_TXPKT, qc);
+ qc->flags |= QUIC_FL_CONN_RETRANS_NEEDED;
+ pktns->flags |= QUIC_FL_PKTNS_PROBE_NEEDED;
+ if (qc->ipktns && qc->ipktns->tx.in_flight) {
+ if (qc_may_probe_ipktns(qc)) {
+ qc->ipktns->flags |= QUIC_FL_PKTNS_PROBE_NEEDED;
+ TRACE_STATE("needs to probe Initial packet number space", QUIC_EV_CONN_TXPKT, qc);
+ }
+ else {
+ TRACE_STATE("Cannot probe Initial packet number space", QUIC_EV_CONN_TXPKT, qc);
+ }
+ }
+ }
+ else if (pktns == qc->apktns) {
+ pktns->tx.pto_probe = QUIC_MAX_NB_PTO_DGRAMS;
+ /* Wake up upper layer if waiting to send new data. */
+ if (!qc_notify_send(qc)) {
+ TRACE_STATE("needs to probe 01RTT packet number space", QUIC_EV_CONN_TXPKT, qc);
+ qc->flags |= QUIC_FL_CONN_RETRANS_NEEDED;
+ pktns->flags |= QUIC_FL_PKTNS_PROBE_NEEDED;
+ }
+ }
+ }
+ else if (!qc_is_listener(qc) && qc->state <= QUIC_HS_ST_COMPLETE) {
+ if (quic_tls_has_tx_sec(qc->hel))
+ qc->hel->pktns->tx.pto_probe = 1;
+ if (quic_tls_has_tx_sec(qc->iel))
+ qc->iel->pktns->tx.pto_probe = 1;
+ }
+
+ tasklet_wakeup(qc->wait_event.tasklet);
+ qc->path->loss.pto_count++;
+
+ out:
+ TRACE_PROTO("process timer", QUIC_EV_CONN_PTIMER, qc, pktns);
+ TRACE_LEAVE(QUIC_EV_CONN_PTIMER, qc);
+
+ return task;
+}
+
+/* Allocate a new QUIC connection with <version> as QUIC version. <ipv4>
+ * boolean is set to 1 for IPv4 connection, 0 for IPv6. <server> is set to 1
+ * for QUIC servers (or haproxy listeners).
+ * <dcid> is the destination connection ID, <scid> is the source connection ID.
+ * This latter <scid> CID as the same value on the wire as the one for <conn_id>
+ * which is the first CID of this connection but a different internal representation used to build
+ * NEW_CONNECTION_ID frames. This is the responsibility of the caller to insert
+ * <conn_id> in the CIDs tree for this connection (qc->cids).
+ * <token> is the token found to be used for this connection with <token_len> as
+ * length. Endpoints addresses are specified via <local_addr> and <peer_addr>.
+ * Returns the connection if succeeded, NULL if not.
+ */
+struct quic_conn *qc_new_conn(const struct quic_version *qv, int ipv4,
+ struct quic_cid *dcid, struct quic_cid *scid,
+ const struct quic_cid *token_odcid,
+ struct quic_connection_id *conn_id,
+ struct sockaddr_storage *local_addr,
+ struct sockaddr_storage *peer_addr,
+ int server, int token, void *owner)
+{
+ int i;
+ struct quic_conn *qc = NULL;
+ struct listener *l = server ? owner : NULL;
+ struct proxy *prx = l ? l->bind_conf->frontend : NULL;
+ struct quic_cc_algo *cc_algo = NULL;
+ unsigned int next_actconn = 0, next_sslconn = 0, next_handshake = 0;
+
+ TRACE_ENTER(QUIC_EV_CONN_INIT);
+
+ next_actconn = increment_actconn();
+ if (!next_actconn) {
+ _HA_ATOMIC_INC(&maxconn_reached);
+ TRACE_STATE("maxconn reached", QUIC_EV_CONN_INIT);
+ goto err;
+ }
+
+ next_sslconn = increment_sslconn();
+ if (!next_sslconn) {
+ TRACE_STATE("sslconn reached", QUIC_EV_CONN_INIT);
+ goto err;
+ }
+
+ if (server) {
+ next_handshake = quic_increment_curr_handshake(l);
+ if (!next_handshake) {
+ TRACE_STATE("max handshake reached", QUIC_EV_CONN_INIT);
+ goto err;
+ }
+ }
+
+ qc = pool_alloc(pool_head_quic_conn);
+ if (!qc) {
+ TRACE_ERROR("Could not allocate a new connection", QUIC_EV_CONN_INIT);
+ goto err;
+ }
+
+ /* Now that quic_conn instance is allocated, quic_conn_release() will
+ * ensure global accounting is decremented.
+ */
+ next_handshake = next_sslconn = next_actconn = 0;
+
+ /* Initialize in priority qc members required for a safe dealloc. */
+ qc->nictx = NULL;
+ /* Prevents these CID to be dumped by TRACE() calls */
+ qc->scid.len = qc->odcid.len = qc->dcid.len = 0;
+ /* required to use MTLIST_IN_LIST */
+ MT_LIST_INIT(&qc->accept_list);
+
+ LIST_INIT(&qc->rx.pkt_list);
+
+ qc->streams_by_id = EB_ROOT_UNIQUE;
+
+ /* Required to call free_quic_conn_cids() from quic_conn_release() */
+ qc->cids = NULL;
+ qc->tx.cc_buf_area = NULL;
+ qc_init_fd(qc);
+
+ LIST_INIT(&qc->back_refs);
+ LIST_INIT(&qc->el_th_ctx);
+
+ qc->wait_event.tasklet = NULL;
+
+ /* Required to destroy <qc> tasks from quic_conn_release() */
+ qc->timer_task = NULL;
+ qc->idle_timer_task = NULL;
+
+ qc->xprt_ctx = NULL;
+ qc->conn = NULL;
+ qc->qcc = NULL;
+ qc->app_ops = NULL;
+ qc->path = NULL;
+
+ /* Keyupdate: required to safely call quic_tls_ku_free() from
+ * quic_conn_release().
+ */
+ quic_tls_ku_reset(&qc->ku.prv_rx);
+ quic_tls_ku_reset(&qc->ku.nxt_rx);
+ quic_tls_ku_reset(&qc->ku.nxt_tx);
+
+ /* Encryption levels */
+ qc->iel = qc->eel = qc->hel = qc->ael = NULL;
+ LIST_INIT(&qc->qel_list);
+ /* Packet number spaces */
+ qc->ipktns = qc->hpktns = qc->apktns = NULL;
+ LIST_INIT(&qc->pktns_list);
+
+ /* Required to safely call quic_conn_prx_cntrs_update() from quic_conn_release(). */
+ qc->prx_counters = NULL;
+
+ /* QUIC Server (or listener). */
+ if (server) {
+ cc_algo = l->bind_conf->quic_cc_algo;
+
+ qc->prx_counters = EXTRA_COUNTERS_GET(prx->extra_counters_fe,
+ &quic_stats_module);
+ qc->flags = QUIC_FL_CONN_LISTENER;
+ qc->state = QUIC_HS_ST_SERVER_INITIAL;
+ /* Copy the client original DCID. */
+ qc->odcid = *dcid;
+ /* Copy the packet SCID to reuse it as DCID for sending */
+ qc->dcid = *scid;
+ qc->tx.buf = BUF_NULL;
+ qc->li = l;
+ }
+ /* QUIC Client (outgoing connection to servers) */
+ else {
+ qc->state = QUIC_HS_ST_CLIENT_INITIAL;
+ if (dcid->len)
+ memcpy(qc->dcid.data, dcid->data, dcid->len);
+ qc->dcid.len = dcid->len;
+ qc->li = NULL;
+ }
+ qc->mux_state = QC_MUX_NULL;
+ qc->err = quic_err_transport(QC_ERR_NO_ERROR);
+
+ /* If connection is instantiated due to an INITIAL packet with an
+ * already checked token, consider the peer address as validated.
+ */
+ if (token_odcid->len) {
+ TRACE_STATE("validate peer address due to initial token",
+ QUIC_EV_CONN_INIT, qc);
+ qc->flags |= QUIC_FL_CONN_PEER_VALIDATED_ADDR;
+ }
+ else {
+ HA_ATOMIC_INC(&qc->prx_counters->half_open_conn);
+ }
+
+ /* Now proceeds to allocation of qc members. */
+ qc->rx.buf.area = pool_alloc(pool_head_quic_conn_rxbuf);
+ if (!qc->rx.buf.area) {
+ TRACE_ERROR("Could not allocate a new RX buffer", QUIC_EV_CONN_INIT, qc);
+ goto err;
+ }
+
+ qc->cids = pool_alloc(pool_head_quic_cids);
+ if (!qc->cids) {
+ TRACE_ERROR("Could not allocate a new CID tree", QUIC_EV_CONN_INIT, qc);
+ goto err;
+ }
+ *qc->cids = EB_ROOT;
+
+ conn_id->qc = qc;
+
+ if (HA_ATOMIC_LOAD(&l->rx.quic_mode) == QUIC_SOCK_MODE_CONN &&
+ (global.tune.options & GTUNE_QUIC_SOCK_PER_CONN) &&
+ is_addr(local_addr)) {
+ TRACE_USER("Allocate a socket for QUIC connection", QUIC_EV_CONN_INIT, qc);
+ qc_alloc_fd(qc, local_addr, peer_addr);
+
+ /* haproxy soft-stop is supported only for QUIC connections
+ * with their owned socket.
+ */
+ if (qc_test_fd(qc))
+ _HA_ATOMIC_INC(&jobs);
+ }
+
+ /* Select our SCID which is the first CID with 0 as sequence number. */
+ qc->scid = conn_id->cid;
+
+ if (!qc_enc_level_alloc(qc, &qc->ipktns, &qc->iel, ssl_encryption_initial)) {
+ TRACE_ERROR("Could not initialize an encryption level", QUIC_EV_CONN_INIT, qc);
+ goto err;
+ }
+
+ qc->original_version = qv;
+ qc->negotiated_version = NULL;
+ qc->tps_tls_ext = (qc->original_version->num & 0xff000000) == 0xff000000 ?
+ TLS_EXTENSION_QUIC_TRANSPORT_PARAMETERS_DRAFT:
+ TLS_EXTENSION_QUIC_TRANSPORT_PARAMETERS;
+ /* TX part. */
+ qc->bytes.tx = qc->bytes.prep = 0;
+ memset(&qc->tx.params, 0, sizeof(qc->tx.params));
+ qc->tx.buf = BUF_NULL;
+ qc->tx.cc_buf = BUF_NULL;
+ qc->tx.cc_buf_area = NULL;
+ qc->tx.cc_dgram_len = 0;
+ /* RX part. */
+ qc->bytes.rx = 0;
+ memset(&qc->rx.params, 0, sizeof(qc->rx.params));
+ qc->rx.buf = b_make(qc->rx.buf.area, QUIC_CONN_RX_BUFSZ, 0, 0);
+ for (i = 0; i < QCS_MAX_TYPES; i++)
+ qc->rx.strms[i].nb_streams = 0;
+
+ qc->nb_pkt_for_cc = 1;
+ qc->nb_pkt_since_cc = 0;
+
+ if (!quic_tls_ku_init(qc)) {
+ TRACE_ERROR("Key update initialization failed", QUIC_EV_CONN_INIT, qc);
+ goto err;
+ }
+
+ qc->max_ack_delay = 0;
+ /* Only one path at this time (multipath not supported) */
+ qc->path = &qc->paths[0];
+ quic_cc_path_init(qc->path, ipv4, server ? l->bind_conf->max_cwnd : 0,
+ cc_algo ? cc_algo : default_quic_cc_algo, qc);
+
+ qc->stream_buf_count = 0;
+ memcpy(&qc->local_addr, local_addr, sizeof(qc->local_addr));
+ memcpy(&qc->peer_addr, peer_addr, sizeof qc->peer_addr);
+
+ if (server && !qc_lstnr_params_init(qc, &l->bind_conf->quic_params,
+ conn_id->stateless_reset_token,
+ dcid->data, dcid->len,
+ qc->scid.data, qc->scid.len, token_odcid))
+ goto err;
+
+ /* Initialize the idle timeout of the connection at the "max_idle_timeout"
+ * value from local transport parameters.
+ */
+ qc->max_idle_timeout = qc->rx.params.max_idle_timeout;
+ qc->wait_event.tasklet = tasklet_new();
+ if (!qc->wait_event.tasklet) {
+ TRACE_ERROR("tasklet_new() failed", QUIC_EV_CONN_TXPKT);
+ goto err;
+ }
+ qc->wait_event.tasklet->process = quic_conn_io_cb;
+ qc->wait_event.tasklet->context = qc;
+ qc->wait_event.events = 0;
+ qc->subs = NULL;
+
+ if (qc_alloc_ssl_sock_ctx(qc) ||
+ !quic_conn_init_timer(qc) ||
+ !quic_conn_init_idle_timer_task(qc, prx))
+ goto err;
+
+ if (!qc_new_isecs(qc, &qc->iel->tls_ctx, qc->original_version, dcid->data, dcid->len, 1))
+ goto err;
+
+ /* Counters initialization */
+ memset(&qc->cntrs, 0, sizeof qc->cntrs);
+
+ LIST_APPEND(&th_ctx->quic_conns, &qc->el_th_ctx);
+ qc->qc_epoch = HA_ATOMIC_LOAD(&qc_epoch);
+
+ TRACE_LEAVE(QUIC_EV_CONN_INIT, qc);
+
+ return qc;
+
+ err:
+ quic_conn_release(qc);
+
+ /* Decrement global counters. Done only for errors happening before or
+ * on pool_head_quic_conn alloc. All other cases are covered by
+ * quic_conn_release().
+ */
+ if (next_actconn)
+ _HA_ATOMIC_DEC(&actconn);
+ if (next_sslconn)
+ _HA_ATOMIC_DEC(&global.sslconns);
+ if (next_handshake)
+ _HA_ATOMIC_DEC(&l->rx.quic_curr_handshake);
+
+ TRACE_LEAVE(QUIC_EV_CONN_INIT);
+ return NULL;
+}
+
+/* React to a connection migration initiated on <qc> by a client with the new
+ * path addresses <peer_addr>/<local_addr>.
+ *
+ * Returns 0 on success else non-zero.
+ */
+int qc_handle_conn_migration(struct quic_conn *qc,
+ const struct sockaddr_storage *peer_addr,
+ const struct sockaddr_storage *local_addr)
+{
+ TRACE_ENTER(QUIC_EV_CONN_LPKT, qc);
+
+ /* RFC 9000. Connection Migration
+ *
+ * If the peer sent the disable_active_migration transport parameter,
+ * an endpoint also MUST NOT send packets (including probing packets;
+ * see Section 9.1) from a different local address to the address the peer
+ * used during the handshake, unless the endpoint has acted on a
+ * preferred_address transport parameter from the peer.
+ */
+ if (qc->li->bind_conf->quic_params.disable_active_migration) {
+ TRACE_ERROR("Active migration was disabled, datagram dropped", QUIC_EV_CONN_LPKT, qc);
+ goto err;
+ }
+
+ /* RFC 9000 9. Connection Migration
+ *
+ * The design of QUIC relies on endpoints retaining a stable address for
+ * the duration of the handshake. An endpoint MUST NOT initiate
+ * connection migration before the handshake is confirmed, as defined in
+ * Section 4.1.2 of [QUIC-TLS].
+ */
+ if (qc->state < QUIC_HS_ST_COMPLETE) {
+ TRACE_STATE("Connection migration during handshake rejected", QUIC_EV_CONN_LPKT, qc);
+ goto err;
+ }
+
+ /* RFC 9000 9. Connection Migration
+ *
+ * TODO
+ * An endpoint MUST
+ * perform path validation (Section 8.2) if it detects any change to a
+ * peer's address, unless it has previously validated that address.
+ */
+
+ /* Update quic-conn owned socket if in used.
+ * TODO try to reuse it instead of closing and opening a new one.
+ */
+ if (qc_test_fd(qc)) {
+ /* TODO try to reuse socket instead of closing it and opening a new one. */
+ TRACE_STATE("Connection migration detected, allocate a new connection socket", QUIC_EV_CONN_LPKT, qc);
+ qc_release_fd(qc, 1);
+ /* TODO need to adjust <jobs> on socket allocation failure. */
+ qc_alloc_fd(qc, local_addr, peer_addr);
+ }
+
+ qc->local_addr = *local_addr;
+ qc->peer_addr = *peer_addr;
+ qc->cntrs.conn_migration_done++;
+
+ TRACE_LEAVE(QUIC_EV_CONN_LPKT, qc);
+ return 0;
+
+ err:
+ TRACE_LEAVE(QUIC_EV_CONN_LPKT, qc);
+ return 1;
+}
+
+
+/* Update the proxy counters of <qc> QUIC connection from its counters */
+static inline void quic_conn_prx_cntrs_update(struct quic_conn *qc)
+{
+ if (!qc->prx_counters)
+ return;
+
+ HA_ATOMIC_ADD(&qc->prx_counters->dropped_pkt, qc->cntrs.dropped_pkt);
+ HA_ATOMIC_ADD(&qc->prx_counters->dropped_pkt_bufoverrun, qc->cntrs.dropped_pkt_bufoverrun);
+ HA_ATOMIC_ADD(&qc->prx_counters->dropped_parsing, qc->cntrs.dropped_parsing);
+ HA_ATOMIC_ADD(&qc->prx_counters->socket_full, qc->cntrs.socket_full);
+ HA_ATOMIC_ADD(&qc->prx_counters->sendto_err, qc->cntrs.sendto_err);
+ HA_ATOMIC_ADD(&qc->prx_counters->sendto_err_unknown, qc->cntrs.sendto_err_unknown);
+ HA_ATOMIC_ADD(&qc->prx_counters->sent_pkt, qc->cntrs.sent_pkt);
+ /* It is possible that ->path was not initialized. For instance if a
+ * QUIC connection allocation has failed.
+ */
+ if (qc->path)
+ HA_ATOMIC_ADD(&qc->prx_counters->lost_pkt, qc->path->loss.nb_lost_pkt);
+ HA_ATOMIC_ADD(&qc->prx_counters->conn_migration_done, qc->cntrs.conn_migration_done);
+ /* Stream related counters */
+ HA_ATOMIC_ADD(&qc->prx_counters->data_blocked, qc->cntrs.data_blocked);
+ HA_ATOMIC_ADD(&qc->prx_counters->stream_data_blocked, qc->cntrs.stream_data_blocked);
+ HA_ATOMIC_ADD(&qc->prx_counters->streams_blocked_bidi, qc->cntrs.streams_blocked_bidi);
+ HA_ATOMIC_ADD(&qc->prx_counters->streams_blocked_uni, qc->cntrs.streams_blocked_uni);
+}
+
+/* Release the quic_conn <qc>. The connection is removed from the CIDs tree.
+ * The connection tasklet is killed.
+ *
+ * This function must only be called by the thread responsible of the quic_conn
+ * tasklet.
+ */
+void quic_conn_release(struct quic_conn *qc)
+{
+ struct eb64_node *node;
+ struct quic_rx_packet *pkt, *pktback;
+ struct quic_conn_closed *cc_qc;
+
+ TRACE_ENTER(QUIC_EV_CONN_CLOSE, qc);
+
+ if (!qc)
+ goto leave;
+
+ /* We must not free the quic-conn if the MUX is still allocated. */
+ BUG_ON(qc->mux_state == QC_MUX_READY);
+
+ cc_qc = NULL;
+ if ((qc->flags & QUIC_FL_CONN_CLOSING) && !(qc->flags & QUIC_FL_CONN_EXP_TIMER) &&
+ qc->tx.cc_buf_area)
+ cc_qc = qc_new_cc_conn(qc);
+
+ if (!cc_qc) {
+ task_destroy(qc->idle_timer_task);
+ qc->idle_timer_task = NULL;
+ tasklet_free(qc->wait_event.tasklet);
+ /* remove the connection from receiver cids trees */
+ free_quic_conn_cids(qc);
+ pool_free(pool_head_quic_cids, qc->cids);
+ qc->cids = NULL;
+ pool_free(pool_head_quic_cc_buf, qc->tx.cc_buf_area);
+ qc->tx.cc_buf_area = NULL;
+ }
+
+ if (qc_test_fd(qc))
+ _HA_ATOMIC_DEC(&jobs);
+
+ /* Close quic-conn socket fd. */
+ qc_release_fd(qc, 0);
+
+ /* in the unlikely (but possible) case the connection was just added to
+ * the accept_list we must delete it from there.
+ */
+ if (MT_LIST_INLIST(&qc->accept_list)) {
+ MT_LIST_DELETE(&qc->accept_list);
+ BUG_ON(qc->li->rx.quic_curr_accept == 0);
+ HA_ATOMIC_DEC(&qc->li->rx.quic_curr_accept);
+ }
+
+ /* free remaining stream descriptors */
+ node = eb64_first(&qc->streams_by_id);
+ while (node) {
+ struct qc_stream_desc *stream;
+
+ stream = eb64_entry(node, struct qc_stream_desc, by_id);
+ node = eb64_next(node);
+
+ /* all streams attached to the quic-conn are released, so
+ * qc_stream_desc_free will liberate the stream instance.
+ */
+ BUG_ON(!stream->release);
+ qc_stream_desc_free(stream, 1);
+ }
+
+ /* free the SSL sock context */
+ qc_free_ssl_sock_ctx(&qc->xprt_ctx);
+ /* Purge Rx packet list. */
+ list_for_each_entry_safe(pkt, pktback, &qc->rx.pkt_list, qc_rx_pkt_list) {
+ LIST_DELETE(&pkt->qc_rx_pkt_list);
+ pool_free(pool_head_quic_rx_packet, pkt);
+ }
+
+ task_destroy(qc->timer_task);
+ qc->timer_task = NULL;
+
+ quic_tls_ku_free(qc);
+ if (qc->ael) {
+ struct quic_tls_ctx *actx = &qc->ael->tls_ctx;
+
+ /* Secrets used by keyupdate */
+ pool_free(pool_head_quic_tls_secret, actx->rx.secret);
+ pool_free(pool_head_quic_tls_secret, actx->tx.secret);
+ }
+
+ qc_enc_level_free(qc, &qc->iel);
+ qc_enc_level_free(qc, &qc->eel);
+ qc_enc_level_free(qc, &qc->hel);
+ qc_enc_level_free(qc, &qc->ael);
+
+ quic_tls_ctx_free(&qc->nictx);
+
+ quic_pktns_release(qc, &qc->ipktns);
+ quic_pktns_release(qc, &qc->hpktns);
+ quic_pktns_release(qc, &qc->apktns);
+
+ qc_detach_th_ctx_list(qc, 0);
+
+ quic_conn_prx_cntrs_update(qc);
+ pool_free(pool_head_quic_conn_rxbuf, qc->rx.buf.area);
+ qc->rx.buf.area = NULL;
+
+ /* Connection released before peer address validated. */
+ if (unlikely(!(qc->flags & QUIC_FL_CONN_PEER_VALIDATED_ADDR))) {
+ BUG_ON(!qc->prx_counters->half_open_conn);
+ HA_ATOMIC_DEC(&qc->prx_counters->half_open_conn);
+ }
+
+ /* Connection released before handshake completion. */
+ if (unlikely(qc->state < QUIC_HS_ST_COMPLETE)) {
+ if (qc_is_listener(qc)) {
+ BUG_ON(qc->li->rx.quic_curr_handshake == 0);
+ HA_ATOMIC_DEC(&qc->li->rx.quic_curr_handshake);
+ }
+ }
+
+ pool_free(pool_head_quic_conn, qc);
+ qc = NULL;
+
+ /* Decrement global counters when quic_conn is deallocated.
+ * quic_conn_closed instances are not accounted as they run for a short
+ * time with limited resources.
+ */
+ _HA_ATOMIC_DEC(&actconn);
+ _HA_ATOMIC_DEC(&global.sslconns);
+
+ TRACE_PROTO("QUIC conn. freed", QUIC_EV_CONN_FREED, qc);
+ leave:
+ TRACE_LEAVE(QUIC_EV_CONN_CLOSE, qc);
+}
+
+/* Initialize the timer task of <qc> QUIC connection.
+ * Returns 1 if succeeded, 0 if not.
+ */
+static int quic_conn_init_timer(struct quic_conn *qc)
+{
+ int ret = 0;
+ /* Attach this task to the same thread ID used for the connection */
+ TRACE_ENTER(QUIC_EV_CONN_NEW, qc);
+
+ qc->timer_task = task_new_here();
+ if (!qc->timer_task) {
+ TRACE_ERROR("timer task allocation failed", QUIC_EV_CONN_NEW, qc);
+ goto leave;
+ }
+
+ qc->timer = TICK_ETERNITY;
+ qc->timer_task->process = qc_process_timer;
+ qc->timer_task->context = qc;
+
+ ret = 1;
+ leave:
+ TRACE_LEAVE(QUIC_EV_CONN_NEW, qc);
+ return ret;
+}
+
+/* Rearm the idle timer or the ack timer (if not already armde) for <qc> QUIC
+ * connection. */
+void qc_idle_timer_do_rearm(struct quic_conn *qc, int arm_ack)
+{
+ unsigned int expire;
+
+ /* It is possible the idle timer task has been already released. */
+ if (!qc->idle_timer_task)
+ return;
+
+ if (qc->flags & (QUIC_FL_CONN_CLOSING|QUIC_FL_CONN_DRAINING)) {
+ /* RFC 9000 10.2. Immediate Close
+ *
+ * The closing and draining connection states exist to ensure that
+ * connections close cleanly and that delayed or reordered packets are
+ * properly discarded. These states SHOULD persist for at least three
+ * times the current PTO interval as defined in [QUIC-RECOVERY].
+ */
+
+ /* Delay is limited to 1s which should cover most of network
+ * conditions. The process should not be impacted by a
+ * connection with a high RTT.
+ */
+ expire = MIN(3 * quic_pto(qc), 1000);
+ }
+ else {
+ /* RFC 9000 10.1. Idle Timeout
+ *
+ * To avoid excessively small idle timeout periods, endpoints MUST
+ * increase the idle timeout period to be at least three times the
+ * current Probe Timeout (PTO). This allows for multiple PTOs to expire,
+ * and therefore multiple probes to be sent and lost, prior to idle
+ * timeout.
+ */
+ expire = QUIC_MAX(3 * quic_pto(qc), qc->max_idle_timeout);
+ }
+
+ qc->idle_expire = tick_add(now_ms, MS_TO_TICKS(expire));
+ /* Note that the ACK timer is not armed during the handshake. So,
+ * the handshake expiration date is taken into an account only
+ * when <arm_ack> is false.
+ */
+ if (arm_ack) {
+ /* Arm the ack timer only if not already armed. */
+ if (!tick_isset(qc->ack_expire)) {
+ qc->ack_expire = tick_add(now_ms, MS_TO_TICKS(QUIC_ACK_DELAY));
+ qc->idle_timer_task->expire = qc->ack_expire;
+ task_queue(qc->idle_timer_task);
+ TRACE_PROTO("ack timer armed", QUIC_EV_CONN_IDLE_TIMER, qc);
+ }
+ }
+ else {
+ qc->idle_timer_task->expire = tick_first(qc->ack_expire, qc->idle_expire);
+ if (qc->state < QUIC_HS_ST_COMPLETE)
+ qc->idle_timer_task->expire = tick_first(qc->hs_expire, qc->idle_expire);
+ task_queue(qc->idle_timer_task);
+ TRACE_PROTO("idle timer armed", QUIC_EV_CONN_IDLE_TIMER, qc);
+ }
+}
+
+/* Rearm the idle timer or ack timer for <qc> QUIC connection depending on <read>
+ * and <arm_ack> booleans. The former is set to 1 when receiving a packet ,
+ * and 0 when sending packet. <arm_ack> is set to 1 if this is the ack timer
+ * which must be rearmed.
+ */
+void qc_idle_timer_rearm(struct quic_conn *qc, int read, int arm_ack)
+{
+ TRACE_ENTER(QUIC_EV_CONN_IDLE_TIMER, qc);
+
+ if (read) {
+ qc->flags |= QUIC_FL_CONN_IDLE_TIMER_RESTARTED_AFTER_READ;
+ }
+ else {
+ qc->flags &= ~QUIC_FL_CONN_IDLE_TIMER_RESTARTED_AFTER_READ;
+ }
+ qc_idle_timer_do_rearm(qc, arm_ack);
+
+ leave:
+ TRACE_LEAVE(QUIC_EV_CONN_IDLE_TIMER, qc);
+}
+
+/* The task handling the idle timeout */
+struct task *qc_idle_timer_task(struct task *t, void *ctx, unsigned int state)
+{
+ struct quic_conn *qc = ctx;
+
+ TRACE_ENTER(QUIC_EV_CONN_IDLE_TIMER, qc);
+
+ if ((state & TASK_WOKEN_ANY) == TASK_WOKEN_TIMER && !tick_is_expired(t->expire, now_ms))
+ goto requeue;
+
+ if (tick_is_expired(qc->ack_expire, now_ms)) {
+ TRACE_PROTO("ack timer expired", QUIC_EV_CONN_IDLE_TIMER, qc);
+ qc->ack_expire = TICK_ETERNITY;
+ /* Note that ->idle_expire is always set. */
+ t->expire = qc->idle_expire;
+ /* Do not wakeup the I/O handler in DRAINING state or if the
+ * connection must be killed as soon as possible.
+ */
+ if (!(qc->flags & (QUIC_FL_CONN_DRAINING|QUIC_FL_CONN_TO_KILL))) {
+ qc->flags |= QUIC_FL_CONN_ACK_TIMER_FIRED;
+ tasklet_wakeup(qc->wait_event.tasklet);
+ }
+
+ goto requeue;
+ }
+
+ TRACE_PROTO("idle timer task running", QUIC_EV_CONN_IDLE_TIMER, qc);
+ /* Notify the MUX before settings QUIC_FL_CONN_EXP_TIMER or the MUX
+ * might free the quic-conn too early via quic_close().
+ */
+ qc_notify_err(qc);
+
+ /* If the MUX is still alive, keep the quic-conn. The MUX is
+ * responsible to call quic_close to release it.
+ */
+ qc->flags |= QUIC_FL_CONN_EXP_TIMER;
+ if (qc->mux_state != QC_MUX_READY) {
+ quic_conn_release(qc);
+ qc = NULL;
+ }
+ else {
+ task_destroy(t);
+ qc->idle_timer_task = NULL;
+ }
+
+ t = NULL;
+
+ /* TODO if the quic-conn cannot be freed because of the MUX, we may at
+ * least clean some parts of it such as the tasklet.
+ */
+
+ requeue:
+ TRACE_LEAVE(QUIC_EV_CONN_IDLE_TIMER, qc);
+ return t;
+}
+
+/* Initialize the idle timeout task for <qc>.
+ * Returns 1 if succeeded, 0 if not.
+ */
+static int quic_conn_init_idle_timer_task(struct quic_conn *qc,
+ struct proxy *px)
+{
+ int ret = 0;
+ int timeout;
+
+ TRACE_ENTER(QUIC_EV_CONN_NEW, qc);
+
+
+ timeout = px->timeout.client_hs ? px->timeout.client_hs : px->timeout.client;
+ qc->idle_timer_task = task_new_here();
+ if (!qc->idle_timer_task) {
+ TRACE_ERROR("Idle timer task allocation failed", QUIC_EV_CONN_NEW, qc);
+ goto leave;
+ }
+
+ qc->idle_timer_task->process = qc_idle_timer_task;
+ qc->idle_timer_task->context = qc;
+ qc->ack_expire = TICK_ETERNITY;
+ qc->hs_expire = tick_add_ifset(now_ms, MS_TO_TICKS(timeout));
+ qc_idle_timer_rearm(qc, 1, 0);
+ task_queue(qc->idle_timer_task);
+
+ ret = 1;
+ leave:
+ TRACE_LEAVE(QUIC_EV_CONN_NEW, qc);
+ return ret;
+}
+
+/* Return the QUIC version (quic_version struct) with <version> as version number
+ * if supported or NULL if not.
+ */
+const struct quic_version *qc_supported_version(uint32_t version)
+{
+ int i;
+
+ if (unlikely(!version))
+ return &quic_version_VN_reserved;
+
+ for (i = 0; i < quic_versions_nb; i++)
+ if (quic_versions[i].num == version)
+ return &quic_versions[i];
+
+ return NULL;
+}
+
+/* Check if connection ID <dcid> of length <dcid_len> belongs to <qc> local
+ * CIDs. This can be used to determine if a datagram is addressed to the right
+ * connection instance.
+ *
+ * Returns a boolean value.
+ */
+int qc_check_dcid(struct quic_conn *qc, unsigned char *dcid, size_t dcid_len)
+{
+ const uchar idx = _quic_cid_tree_idx(dcid);
+ struct quic_connection_id *conn_id;
+ struct ebmb_node *node = NULL;
+ struct quic_cid_tree *tree = &quic_cid_trees[idx];
+
+ /* Test against our default CID or client ODCID. */
+ if ((qc->scid.len == dcid_len &&
+ memcmp(qc->scid.data, dcid, dcid_len) == 0) ||
+ (qc->odcid.len == dcid_len &&
+ memcmp(qc->odcid.data, dcid, dcid_len) == 0)) {
+ return 1;
+ }
+
+ /* Test against our other CIDs. This can happen if the client has
+ * decided to switch to a new one.
+ *
+ * TODO to avoid locking, loop through qc.cids as an alternative.
+ *
+ * TODO set it to our default CID to avoid this operation next time.
+ */
+ HA_RWLOCK_RDLOCK(QC_CID_LOCK, &tree->lock);
+ node = ebmb_lookup(&tree->root, dcid, dcid_len);
+ HA_RWLOCK_RDUNLOCK(QC_CID_LOCK, &tree->lock);
+
+ if (node) {
+ conn_id = ebmb_entry(node, struct quic_connection_id, node);
+ if (qc == conn_id->qc)
+ return 1;
+ }
+
+ return 0;
+}
+
+/* Wake-up upper layer for sending if all conditions are met :
+ * - room in congestion window or probe packet to sent
+ * - socket FD ready to sent or listener socket used
+ *
+ * Returns 1 if upper layer has been woken up else 0.
+ */
+int qc_notify_send(struct quic_conn *qc)
+{
+ const struct quic_pktns *pktns = qc->apktns;
+
+ if (qc->subs && qc->subs->events & SUB_RETRY_SEND) {
+ /* RFC 9002 7.5. Probe Timeout
+ *
+ * Probe packets MUST NOT be blocked by the congestion controller.
+ */
+ if ((quic_cc_path_prep_data(qc->path) || pktns->tx.pto_probe) &&
+ (!qc_test_fd(qc) || !fd_send_active(qc->fd))) {
+ tasklet_wakeup(qc->subs->tasklet);
+ qc->subs->events &= ~SUB_RETRY_SEND;
+ if (!qc->subs->events)
+ qc->subs = NULL;
+
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+/* Notify upper layer of a fatal error which forces to close the connection. */
+void qc_notify_err(struct quic_conn *qc)
+{
+ TRACE_ENTER(QUIC_EV_CONN_CLOSE, qc);
+
+ if (qc->mux_state == QC_MUX_READY) {
+ TRACE_STATE("error notified to mux", QUIC_EV_CONN_CLOSE, qc);
+
+ /* Mark socket as closed. */
+ qc->conn->flags |= CO_FL_ERROR | CO_FL_SOCK_RD_SH | CO_FL_SOCK_WR_SH;
+
+ /* TODO quic-conn layer must stay active until MUX is released.
+ * Thus, we have to wake up directly to ensure upper stream
+ * layer will be notified of the error. If a proper separation
+ * is made between MUX and quic-conn layer, wake up could be
+ * conducted only with qc.subs.
+ */
+ tasklet_wakeup(qc->qcc->wait_event.tasklet);
+ }
+
+ TRACE_LEAVE(QUIC_EV_CONN_CLOSE, qc);
+}
+
+/* Move a <qc> QUIC connection and its resources from the current thread to the
+ * new one <new_tid> optionally in association with <new_li> (since it may need
+ * to change when migrating to a thread from a different group, otherwise leave
+ * it NULL). After this call, the connection cannot be dereferenced anymore on
+ * the current thread.
+ *
+ * Returns 0 on success else non-zero.
+ */
+int qc_set_tid_affinity(struct quic_conn *qc, uint new_tid, struct listener *new_li)
+{
+ struct task *t1 = NULL, *t2 = NULL;
+ struct tasklet *t3 = NULL;
+
+ struct quic_connection_id *conn_id;
+ struct eb64_node *node;
+
+ TRACE_ENTER(QUIC_EV_CONN_SET_AFFINITY, qc);
+
+ /* Pre-allocate all required resources. This ensures we do not left a
+ * connection with only some of its field rebinded.
+ */
+ if (((t1 = task_new_on(new_tid)) == NULL) ||
+ (qc->timer_task && (t2 = task_new_on(new_tid)) == NULL) ||
+ (t3 = tasklet_new()) == NULL) {
+ goto err;
+ }
+
+ /* Reinit idle timer task. */
+ task_kill(qc->idle_timer_task);
+ t1->expire = qc->idle_timer_task->expire;
+ qc->idle_timer_task = t1;
+ qc->idle_timer_task->process = qc_idle_timer_task;
+ qc->idle_timer_task->context = qc;
+
+ /* Reinit timer task if allocated. */
+ if (qc->timer_task) {
+ task_kill(qc->timer_task);
+ qc->timer_task = t2;
+ qc->timer_task->process = qc_process_timer;
+ qc->timer_task->context = qc;
+ }
+
+ /* Reinit IO tasklet. */
+ if (qc->wait_event.tasklet->state & TASK_IN_LIST)
+ qc->flags |= QUIC_FL_CONN_IO_TO_REQUEUE;
+ tasklet_kill(qc->wait_event.tasklet);
+ /* In most cases quic_conn_app_io_cb is used but for 0-RTT quic_conn_io_cb can be still activated. */
+ t3->process = qc->wait_event.tasklet->process;
+ qc->wait_event.tasklet = t3;
+ qc->wait_event.tasklet->tid = new_tid;
+ qc->wait_event.tasklet->context = qc;
+ qc->wait_event.events = 0;
+
+ /* Rebind the connection FD. */
+ if (qc_test_fd(qc)) {
+ /* Reading is reactivated by the new thread. */
+ fd_migrate_on(qc->fd, new_tid);
+ }
+
+ /* Remove conn from per-thread list instance. It will be hidden from
+ * "show quic" until rebinding is completed.
+ */
+ qc_detach_th_ctx_list(qc, 0);
+
+ node = eb64_first(qc->cids);
+ BUG_ON(!node || eb64_next(node)); /* One and only one CID must be present before affinity rebind. */
+ conn_id = eb64_entry(node, struct quic_connection_id, seq_num);
+
+ /* At this point no connection was accounted for yet on this
+ * listener so it's OK to just swap the pointer.
+ */
+ if (new_li && new_li != qc->li)
+ qc->li = new_li;
+
+ /* Rebinding is considered done when CID points to the new thread. No
+ * access should be done to quic-conn instance after it.
+ */
+ qc->flags |= QUIC_FL_CONN_AFFINITY_CHANGED;
+ HA_ATOMIC_STORE(&conn_id->tid, new_tid);
+ qc = NULL;
+
+ TRACE_LEAVE(QUIC_EV_CONN_SET_AFFINITY, NULL);
+ return 0;
+
+ err:
+ task_destroy(t1);
+ task_destroy(t2);
+ tasklet_free(t3);
+
+ TRACE_DEVEL("leaving on error", QUIC_EV_CONN_SET_AFFINITY, qc);
+ return 1;
+}
+
+/* Must be called after qc_set_tid_affinity() on the new thread. */
+void qc_finalize_affinity_rebind(struct quic_conn *qc)
+{
+ TRACE_ENTER(QUIC_EV_CONN_SET_AFFINITY, qc);
+
+ /* This function must not be called twice after an affinity rebind. */
+ BUG_ON(!(qc->flags & QUIC_FL_CONN_AFFINITY_CHANGED));
+ qc->flags &= ~QUIC_FL_CONN_AFFINITY_CHANGED;
+
+ /* If quic_conn is closing it is unnecessary to migrate it as it will
+ * be soon released. Besides, special care must be taken for CLOSING
+ * connections (using quic_conn_closed and th_ctx.quic_conns_clo list for
+ * instance). This should never occur as CLOSING connections are
+ * skipped by quic_sock_accept_conn().
+ */
+ BUG_ON(qc->flags & (QUIC_FL_CONN_CLOSING|QUIC_FL_CONN_DRAINING));
+
+ /* Reinsert connection in ha_thread_ctx global list. */
+ LIST_APPEND(&th_ctx->quic_conns, &qc->el_th_ctx);
+ qc->qc_epoch = HA_ATOMIC_LOAD(&qc_epoch);
+
+ /* Reactivate FD polling if connection socket is active. */
+ qc_want_recv(qc);
+
+ /* Reactivate timer task if needed. */
+ qc_set_timer(qc);
+
+ /* Idle timer task is always active. */
+ task_queue(qc->idle_timer_task);
+
+ /* Reactivate IO tasklet if needed. */
+ if (qc->flags & QUIC_FL_CONN_IO_TO_REQUEUE) {
+ tasklet_wakeup(qc->wait_event.tasklet);
+ qc->flags &= ~QUIC_FL_CONN_IO_TO_REQUEUE;
+ }
+
+ TRACE_LEAVE(QUIC_EV_CONN_SET_AFFINITY, qc);
+}
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/src/quic_frame.c b/src/quic_frame.c
new file mode 100644
index 0000000..61d2c93
--- /dev/null
+++ b/src/quic_frame.c
@@ -0,0 +1,1273 @@
+/*
+ * Copyright 2019 HAProxy Technologies, Frederic Lecaille <flecaille@haproxy.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <string.h>
+
+#include <import/eb64tree.h>
+#include <haproxy/buf-t.h>
+#include <haproxy/chunk.h>
+#include <haproxy/pool.h>
+#include <haproxy/quic_conn-t.h>
+#include <haproxy/quic_enc.h>
+#include <haproxy/quic_frame.h>
+#include <haproxy/quic_rx-t.h>
+#include <haproxy/quic_tp-t.h>
+#include <haproxy/quic_trace.h>
+#include <haproxy/quic_tx.h>
+#include <haproxy/trace.h>
+
+DECLARE_POOL(pool_head_quic_frame, "quic_frame", sizeof(struct quic_frame));
+DECLARE_POOL(pool_head_qf_crypto, "qf_crypto", sizeof(struct qf_crypto));
+
+const char *quic_frame_type_string(enum quic_frame_type ft)
+{
+ switch (ft) {
+ case QUIC_FT_PADDING:
+ return "PADDING";
+ case QUIC_FT_PING:
+ return "PING";
+ case QUIC_FT_ACK:
+ return "ACK";
+ case QUIC_FT_ACK_ECN:
+ return "ACK_ECN";
+ case QUIC_FT_RESET_STREAM:
+ return "RESET_STREAM";
+ case QUIC_FT_STOP_SENDING:
+ return "STOP_SENDING";
+ case QUIC_FT_CRYPTO:
+ return "CRYPTO";
+ case QUIC_FT_NEW_TOKEN:
+ return "NEW_TOKEN";
+
+ case QUIC_FT_STREAM_8:
+ return "STREAM_8";
+ case QUIC_FT_STREAM_9:
+ return "STREAM_9";
+ case QUIC_FT_STREAM_A:
+ return "STREAM_A";
+ case QUIC_FT_STREAM_B:
+ return "STREAM_B";
+ case QUIC_FT_STREAM_C:
+ return "STREAM_C";
+ case QUIC_FT_STREAM_D:
+ return "STREAM_D";
+ case QUIC_FT_STREAM_E:
+ return "STREAM_E";
+ case QUIC_FT_STREAM_F:
+ return "STREAM_F";
+
+ case QUIC_FT_MAX_DATA:
+ return "MAX_DATA";
+ case QUIC_FT_MAX_STREAM_DATA:
+ return "MAX_STREAM_DATA";
+ case QUIC_FT_MAX_STREAMS_BIDI:
+ return "MAX_STREAMS_BIDI";
+ case QUIC_FT_MAX_STREAMS_UNI:
+ return "MAX_STREAMS_UNI";
+ case QUIC_FT_DATA_BLOCKED:
+ return "DATA_BLOCKED";
+ case QUIC_FT_STREAM_DATA_BLOCKED:
+ return "STREAM_DATA_BLOCKED";
+ case QUIC_FT_STREAMS_BLOCKED_BIDI:
+ return "STREAMS_BLOCKED_BIDI";
+ case QUIC_FT_STREAMS_BLOCKED_UNI:
+ return "STREAMS_BLOCKED_UNI";
+ case QUIC_FT_NEW_CONNECTION_ID:
+ return "NEW_CONNECTION_ID";
+ case QUIC_FT_RETIRE_CONNECTION_ID:
+ return "RETIRE_CONNECTION_ID";
+ case QUIC_FT_PATH_CHALLENGE:
+ return "PATH_CHALLENGE";
+ case QUIC_FT_PATH_RESPONSE:
+ return "PATH_RESPONSE";
+ case QUIC_FT_CONNECTION_CLOSE:
+ return "CONNECTION_CLOSE";
+ case QUIC_FT_CONNECTION_CLOSE_APP:
+ return "CONNECTION_CLOSE_APP";
+ case QUIC_FT_HANDSHAKE_DONE:
+ return "HANDSHAKE_DONE";
+ default:
+ return "UNKNOWN";
+ }
+}
+
+static void chunk_cc_phrase_appendf(struct buffer *buf,
+ const unsigned char *phr, size_t phrlen)
+{
+ chunk_appendf(buf, " reason_phrase: '");
+ while (phrlen--)
+ chunk_appendf(buf, "%c", *phr++);
+ chunk_appendf(buf, "'");
+}
+
+/* Add traces to <buf> depending on <frm> frame type. */
+void chunk_frm_appendf(struct buffer *buf, const struct quic_frame *frm)
+{
+ chunk_appendf(buf, " %s", quic_frame_type_string(frm->type));
+ switch (frm->type) {
+ case QUIC_FT_CRYPTO:
+ {
+ const struct qf_crypto *crypto_frm = &frm->crypto;
+ chunk_appendf(buf, " cfoff=%llu cflen=%llu",
+ (ull)crypto_frm->offset, (ull)crypto_frm->len);
+ break;
+ }
+ case QUIC_FT_RESET_STREAM:
+ {
+ const struct qf_reset_stream *rs_frm = &frm->reset_stream;
+ chunk_appendf(buf, " id=%llu app_error_code=%llu final_size=%llu",
+ (ull)rs_frm->id, (ull)rs_frm->app_error_code, (ull)rs_frm->final_size);
+ break;
+ }
+ case QUIC_FT_STOP_SENDING:
+ {
+ const struct qf_stop_sending *ss_frm = &frm->stop_sending;
+ chunk_appendf(&trace_buf, " id=%llu app_error_code=%llu",
+ (ull)ss_frm->id, (ull)ss_frm->app_error_code);
+ break;
+ }
+ case QUIC_FT_STREAM_8 ... QUIC_FT_STREAM_F:
+ {
+ const struct qf_stream *strm_frm = &frm->stream;
+ chunk_appendf(&trace_buf, " uni=%d fin=%d id=%llu off=%llu len=%llu",
+ !!(strm_frm->id & QUIC_STREAM_FRAME_ID_DIR_BIT),
+ !!(frm->type & QUIC_STREAM_FRAME_TYPE_FIN_BIT),
+ (ull)strm_frm->id, (ull)strm_frm->offset.key, (ull)strm_frm->len);
+ break;
+ }
+ case QUIC_FT_MAX_DATA:
+ {
+ const struct qf_max_data *md_frm = &frm->max_data;
+ chunk_appendf(&trace_buf, " max_data=%llu", (ull)md_frm->max_data);
+ break;
+ }
+ case QUIC_FT_MAX_STREAM_DATA:
+ {
+ const struct qf_max_stream_data *msd_frm = &frm->max_stream_data;
+ chunk_appendf(&trace_buf, " id=%llu max_stream_data=%llu",
+ (ull)msd_frm->id, (ull)msd_frm->max_stream_data);
+ break;
+ }
+ case QUIC_FT_MAX_STREAMS_BIDI:
+ {
+ const struct qf_max_streams *ms_frm = &frm->max_streams_bidi;
+ chunk_appendf(&trace_buf, " max_streams=%llu", (ull)ms_frm->max_streams);
+ break;
+ }
+ case QUIC_FT_MAX_STREAMS_UNI:
+ {
+ const struct qf_max_streams *ms_frm = &frm->max_streams_uni;
+ chunk_appendf(&trace_buf, " max_streams=%llu", (ull)ms_frm->max_streams);
+ break;
+ }
+ case QUIC_FT_DATA_BLOCKED:
+ {
+ const struct qf_data_blocked *db_frm = &frm->data_blocked;
+ chunk_appendf(&trace_buf, " limit=%llu", (ull)db_frm->limit);
+ break;
+ }
+ case QUIC_FT_STREAM_DATA_BLOCKED:
+ {
+ const struct qf_stream_data_blocked *sdb_frm = &frm->stream_data_blocked;
+ chunk_appendf(&trace_buf, " id=%llu limit=%llu",
+ (ull)sdb_frm->id, (ull)sdb_frm->limit);
+ break;
+ }
+ case QUIC_FT_STREAMS_BLOCKED_BIDI:
+ {
+ const struct qf_streams_blocked *sb_frm = &frm->streams_blocked_bidi;
+ chunk_appendf(&trace_buf, " limit=%llu", (ull)sb_frm->limit);
+ break;
+ }
+ case QUIC_FT_STREAMS_BLOCKED_UNI:
+ {
+ const struct qf_streams_blocked *sb_frm = &frm->streams_blocked_uni;
+ chunk_appendf(&trace_buf, " limit=%llu", (ull)sb_frm->limit);
+ break;
+ }
+ case QUIC_FT_RETIRE_CONNECTION_ID:
+ {
+ const struct qf_retire_connection_id *rcid_frm = &frm->retire_connection_id;
+ chunk_appendf(&trace_buf, " seq_num=%llu", (ull)rcid_frm->seq_num);
+ break;
+ }
+ case QUIC_FT_CONNECTION_CLOSE:
+ {
+ const struct qf_connection_close *cc_frm = &frm->connection_close;
+ size_t plen = QUIC_MIN((size_t)cc_frm->reason_phrase_len, sizeof cc_frm->reason_phrase);
+ chunk_appendf(&trace_buf,
+ " error_code=%llu frame_type=%llu reason_phrase_len=%llu",
+ (ull)cc_frm->error_code, (ull)cc_frm->frame_type,
+ (ull)cc_frm->reason_phrase_len);
+ if (plen)
+ chunk_cc_phrase_appendf(&trace_buf, cc_frm->reason_phrase, plen);
+ break;
+ }
+ case QUIC_FT_CONNECTION_CLOSE_APP:
+ {
+ const struct qf_connection_close_app *cc_frm = &frm->connection_close_app;
+ size_t plen = QUIC_MIN((size_t)cc_frm->reason_phrase_len, sizeof cc_frm->reason_phrase);
+ chunk_appendf(&trace_buf,
+ " error_code=%llu reason_phrase_len=%llu",
+ (ull)cc_frm->error_code, (ull)cc_frm->reason_phrase_len);
+ if (plen)
+ chunk_cc_phrase_appendf(&trace_buf, cc_frm->reason_phrase, plen);
+ break;
+ }
+ }
+}
+
+/* Encode <frm> PADDING frame at <pos> buffer position, <end> being one byte past the end
+ * of this buffer.
+ * Returns 1 if succeeded (enough room in the buffer to encode the frame), 0 if not.
+ */
+static int quic_build_padding_frame(unsigned char **pos, const unsigned char *end,
+ struct quic_frame *frm, struct quic_conn *conn)
+{
+ struct qf_padding *padding_frm = &frm->padding;
+
+ if (end - *pos < padding_frm->len - 1)
+ return 0;
+
+ memset(*pos, 0, padding_frm->len - 1);
+ *pos += padding_frm->len - 1;
+
+ return 1;
+}
+
+/* Parse a PADDING frame at <pos> buffer position with <end> as end into <frm> frame.
+ * Return 1 if succeeded (enough room to parse this frame), 0 if not.
+ */
+static int quic_parse_padding_frame(struct quic_frame *frm, struct quic_conn *qc,
+ const unsigned char **pos, const unsigned char *end)
+{
+ const unsigned char *beg;
+ struct qf_padding *padding_frm = &frm->padding;
+
+ beg = *pos;
+ padding_frm->len = 1;
+ while (*pos < end && !**pos)
+ (*pos)++;
+ padding_frm->len += *pos - beg;
+
+ return 1;
+}
+
+/* Encode a ACK frame at <pos> buffer position.
+ * Always succeeds.
+ */
+static int quic_build_ping_frame(unsigned char **pos, const unsigned char *end,
+ struct quic_frame *frm, struct quic_conn *conn)
+{
+ /* No field */
+ return 1;
+}
+
+/* Parse a PADDING frame from <pos> buffer position with <end> as end into <frm> frame.
+ * Always succeeds.
+ */
+static int quic_parse_ping_frame(struct quic_frame *frm, struct quic_conn *qc,
+ const unsigned char **pos, const unsigned char *end)
+{
+ /* No field */
+ return 1;
+}
+
+/* Encode a ACK frame.
+ * Returns 1 if succeeded (enough room at <pos> buffer position to encode the frame), 0 if not.
+ */
+static int quic_build_ack_frame(unsigned char **pos, const unsigned char *end,
+ struct quic_frame *frm, struct quic_conn *qc)
+{
+ struct qf_tx_ack *ack_frm = &frm->tx_ack;
+ struct eb64_node *ar, *prev_ar;
+ struct quic_arng_node *ar_node, *prev_ar_node;
+
+ ar = eb64_last(&ack_frm->arngs->root);
+ ar_node = eb64_entry(ar, struct quic_arng_node, first);
+ TRACE_PROTO("TX ack range", QUIC_EV_CONN_PRSAFRM,
+ qc,, &ar_node->last, &ar_node->first.key);
+ if (!quic_enc_int(pos, end, ar_node->last) ||
+ !quic_enc_int(pos, end, ack_frm->ack_delay) ||
+ !quic_enc_int(pos, end, ack_frm->arngs->sz - 1) ||
+ !quic_enc_int(pos, end, ar_node->last - ar_node->first.key))
+ return 0;
+
+ while ((prev_ar = eb64_prev(ar))) {
+ prev_ar_node = eb64_entry(prev_ar, struct quic_arng_node, first);
+ TRACE_PROTO("TX ack range", QUIC_EV_CONN_PRSAFRM, qc,,
+ &prev_ar_node->last, &prev_ar_node->first.key);
+ if (!quic_enc_int(pos, end, ar_node->first.key - prev_ar_node->last - 2) ||
+ !quic_enc_int(pos, end, prev_ar_node->last - prev_ar_node->first.key))
+ return 0;
+
+ ar = prev_ar;
+ ar_node = eb64_entry(ar, struct quic_arng_node, first);
+ }
+
+ return 1;
+}
+
+/* Parse an ACK frame header at <pos> buffer position with <end> as end into <frm> frame.
+ * Return 1 if succeeded (enough room at <pos> buffer position to parse this frame), 0 if not.
+ */
+static int quic_parse_ack_frame_header(struct quic_frame *frm, struct quic_conn *qc,
+ const unsigned char **pos, const unsigned char *end)
+{
+ int ret;
+ struct qf_ack *ack_frm = &frm->ack;
+
+ ret = quic_dec_int(&ack_frm->largest_ack, pos, end);
+ if (!ret)
+ return 0;
+
+ ret = quic_dec_int(&ack_frm->ack_delay, pos, end);
+ if (!ret)
+ return 0;
+
+ ret = quic_dec_int(&ack_frm->ack_range_num, pos, end);
+ if (!ret)
+ return 0;
+
+ ret = quic_dec_int(&ack_frm->first_ack_range, pos, end);
+ if (!ret)
+ return 0;
+
+ return 1;
+}
+
+/* Encode a ACK_ECN frame.
+ * Returns 1 if succeeded (enough room at <pos> buffer position to encode the frame), 0 if not.
+ */
+static int quic_build_ack_ecn_frame(unsigned char **pos, const unsigned char *end,
+ struct quic_frame *frm, struct quic_conn *conn)
+{
+ struct qf_ack *ack_frm = &frm->ack;
+
+ return quic_enc_int(pos, end, ack_frm->largest_ack) &&
+ quic_enc_int(pos, end, ack_frm->ack_delay) &&
+ quic_enc_int(pos, end, ack_frm->first_ack_range) &&
+ quic_enc_int(pos, end, ack_frm->ack_range_num);
+}
+
+/* Parse an ACK_ECN frame at <pos> buffer position with <end> as end into <frm> frame.
+ * Return 1 if succeeded (enough at <pos> buffer position to parse this frame), 0 if not.
+ */
+static int quic_parse_ack_ecn_frame(struct quic_frame *frm, struct quic_conn *qc,
+ const unsigned char **pos, const unsigned char *end)
+{
+ struct qf_ack *ack_frm = &frm->ack;
+
+ return quic_dec_int(&ack_frm->largest_ack, pos, end) &&
+ quic_dec_int(&ack_frm->ack_delay, pos, end) &&
+ quic_dec_int(&ack_frm->first_ack_range, pos, end) &&
+ quic_dec_int(&ack_frm->ack_range_num, pos, end);
+}
+
+/* Encode a RESET_STREAM frame at <pos> buffer position.
+ * Returns 1 if succeeded (enough room at <pos> buffer position to encode the frame), 0 if not.
+ */
+static int quic_build_reset_stream_frame(unsigned char **pos, const unsigned char *end,
+ struct quic_frame *frm, struct quic_conn *conn)
+{
+ struct qf_reset_stream *rs_frm = &frm->reset_stream;
+
+ return quic_enc_int(pos, end, rs_frm->id) &&
+ quic_enc_int(pos, end, rs_frm->app_error_code) &&
+ quic_enc_int(pos, end, rs_frm->final_size);
+}
+
+/* Parse a RESET_STREAM frame at <pos> buffer position with <end> as end into <frm> frame.
+ * Return 1 if succeeded (enough room to parse this frame), 0 if not.
+ */
+static int quic_parse_reset_stream_frame(struct quic_frame *frm, struct quic_conn *qc,
+ const unsigned char **pos, const unsigned char *end)
+{
+ struct qf_reset_stream *rs_frm = &frm->reset_stream;
+
+ return quic_dec_int(&rs_frm->id, pos, end) &&
+ quic_dec_int(&rs_frm->app_error_code, pos, end) &&
+ quic_dec_int(&rs_frm->final_size, pos, end);
+}
+
+/* Encode a STOP_SENDING frame.
+ * Returns 1 if succeeded (enough room at <pos> buffer position to encode the frame), 0 if not.
+ */
+static int quic_build_stop_sending_frame(unsigned char **pos, const unsigned char *end,
+ struct quic_frame *frm, struct quic_conn *conn)
+{
+ struct qf_stop_sending *ss_frm = &frm->stop_sending;
+
+ return quic_enc_int(pos, end, ss_frm->id) &&
+ quic_enc_int(pos, end, ss_frm->app_error_code);
+}
+
+/* Parse a STOP_SENDING frame at <pos> buffer position with <end> as end into <frm> frame.
+ * Return 1 if succeeded (enough room at <pos> buffer position to parse this frame), 0 if not.
+ */
+static int quic_parse_stop_sending_frame(struct quic_frame *frm, struct quic_conn *qc,
+ const unsigned char **pos, const unsigned char *end)
+{
+ struct qf_stop_sending *ss_frm = &frm->stop_sending;
+
+ return quic_dec_int(&ss_frm->id, pos, end) &&
+ quic_dec_int(&ss_frm->app_error_code, pos, end);
+}
+
+/* Encode a CRYPTO frame at <pos> buffer position.
+ * Returns 1 if succeeded (enough room at <pos> buffer position to encode the frame), 0 if not.
+ */
+static int quic_build_crypto_frame(unsigned char **pos, const unsigned char *end,
+ struct quic_frame *frm, struct quic_conn *conn)
+{
+ struct qf_crypto *crypto_frm = &frm->crypto;
+ const struct quic_enc_level *qel = crypto_frm->qel;
+ size_t offset, len;
+
+ if (!quic_enc_int(pos, end, crypto_frm->offset) ||
+ !quic_enc_int(pos, end, crypto_frm->len) || end - *pos < crypto_frm->len)
+ return 0;
+
+ len = crypto_frm->len;
+ offset = crypto_frm->offset;
+ while (len) {
+ int idx;
+ size_t to_copy;
+ const unsigned char *data;
+
+ idx = offset >> QUIC_CRYPTO_BUF_SHIFT;
+ to_copy = qel->tx.crypto.bufs[idx]->sz - (offset & QUIC_CRYPTO_BUF_MASK);
+ if (to_copy > len)
+ to_copy = len;
+ data = qel->tx.crypto.bufs[idx]->data + (offset & QUIC_CRYPTO_BUF_MASK);
+ memcpy(*pos, data, to_copy);
+ *pos += to_copy;
+ offset += to_copy;
+ len -= to_copy;
+ }
+
+ return 1;
+}
+
+/* Parse a CRYPTO frame from <pos> buffer position with <end> as end into <frm> frame.
+ * Return 1 if succeeded (enough room to parse this frame), 0 if not.
+ */
+static int quic_parse_crypto_frame(struct quic_frame *frm, struct quic_conn *qc,
+ const unsigned char **pos, const unsigned char *end)
+{
+ struct qf_crypto *crypto_frm = &frm->crypto;
+
+ if (!quic_dec_int(&crypto_frm->offset, pos, end) ||
+ !quic_dec_int(&crypto_frm->len, pos, end) || end - *pos < crypto_frm->len)
+ return 0;
+
+ crypto_frm->data = *pos;
+ *pos += crypto_frm->len;
+
+ return 1;
+}
+
+/* Encode a NEW_TOKEN frame at <pos> buffer position.
+ * Returns 1 if succeeded (enough room at <pos> buffer position to encode the frame), 0 if not.
+ */
+static int quic_build_new_token_frame(unsigned char **pos, const unsigned char *end,
+ struct quic_frame *frm, struct quic_conn *conn)
+{
+ struct qf_new_token *new_token_frm = &frm->new_token;
+
+ if (!quic_enc_int(pos, end, new_token_frm->len) || end - *pos < new_token_frm->len)
+ return 0;
+
+ memcpy(*pos, new_token_frm->data, new_token_frm->len);
+
+ return 1;
+}
+
+/* Parse a NEW_TOKEN frame at <pos> buffer position with <end> as end into <frm> frame.
+ * Return 1 if succeeded (enough room at <pos> buffer position to parse this frame), 0 if not.
+ */
+static int quic_parse_new_token_frame(struct quic_frame *frm, struct quic_conn *qc,
+ const unsigned char **pos, const unsigned char *end)
+{
+ struct qf_new_token *new_token_frm = &frm->new_token;
+
+ if (!quic_dec_int(&new_token_frm->len, pos, end) || end - *pos < new_token_frm->len)
+ return 0;
+
+ new_token_frm->data = *pos;
+ *pos += new_token_frm->len;
+
+ return 1;
+}
+
+/* Encode a STREAM frame at <pos> buffer position.
+ * Returns 1 if succeeded (enough room at <pos> buffer position to encode the frame), 0 if not.
+ */
+static int quic_build_stream_frame(unsigned char **pos, const unsigned char *end,
+ struct quic_frame *frm, struct quic_conn *conn)
+{
+ struct qf_stream *strm_frm = &frm->stream;
+ const unsigned char *wrap;
+
+ /* Caller must set OFF bit if and only if a non-null offset is used. */
+ BUG_ON(!!(frm->type & QUIC_STREAM_FRAME_TYPE_OFF_BIT) !=
+ !!strm_frm->offset.key);
+
+ if (!quic_enc_int(pos, end, strm_frm->id) ||
+ ((frm->type & QUIC_STREAM_FRAME_TYPE_OFF_BIT) && !quic_enc_int(pos, end, strm_frm->offset.key)) ||
+ ((frm->type & QUIC_STREAM_FRAME_TYPE_LEN_BIT) &&
+ (!quic_enc_int(pos, end, strm_frm->len) || end - *pos < strm_frm->len)))
+ return 0;
+
+ /* No need for data memcpy if no payload. */
+ if (!strm_frm->len)
+ return 1;
+
+ wrap = (const unsigned char *)b_wrap(strm_frm->buf);
+ if (strm_frm->data + strm_frm->len > wrap) {
+ size_t to_copy = wrap - strm_frm->data;
+ memcpy(*pos, strm_frm->data, to_copy);
+ *pos += to_copy;
+
+ to_copy = strm_frm->len - to_copy;
+ memcpy(*pos, b_orig(strm_frm->buf), to_copy);
+ *pos += to_copy;
+ }
+ else {
+ memcpy(*pos, strm_frm->data, strm_frm->len);
+ *pos += strm_frm->len;
+ }
+
+ return 1;
+}
+
+/* Parse a STREAM frame at <pos> buffer position with <end> as end into <frm> frame.
+ * Return 1 if succeeded (enough room at <pos> buffer position to parse this frame), 0 if not.
+ */
+static int quic_parse_stream_frame(struct quic_frame *frm, struct quic_conn *qc,
+ const unsigned char **pos, const unsigned char *end)
+{
+ struct qf_stream *strm_frm = &frm->stream;
+
+ if (!quic_dec_int(&strm_frm->id, pos, end))
+ return 0;
+
+ /* Offset parsing */
+ if (!(frm->type & QUIC_STREAM_FRAME_TYPE_OFF_BIT)) {
+ strm_frm->offset.key = 0;
+ }
+ else if (!quic_dec_int((uint64_t *)&strm_frm->offset.key, pos, end))
+ return 0;
+
+ /* Length parsing */
+ if (!(frm->type & QUIC_STREAM_FRAME_TYPE_LEN_BIT)) {
+ strm_frm->len = end - *pos;
+ }
+ else if (!quic_dec_int(&strm_frm->len, pos, end) || end - *pos < strm_frm->len)
+ return 0;
+
+ strm_frm->data = *pos;
+ *pos += strm_frm->len;
+
+ return 1;
+}
+
+/* Encode a MAX_DATA frame at <pos> buffer position.
+ * Returns 1 if succeeded (enough room at <pos> buffer position to encode the frame), 0 if not.
+ */
+static int quic_build_max_data_frame(unsigned char **pos, const unsigned char *end,
+ struct quic_frame *frm, struct quic_conn *conn)
+{
+ struct qf_max_data *md_frm = &frm->max_data;
+
+ return quic_enc_int(pos, end, md_frm->max_data);
+}
+
+/* Parse a MAX_DATA frame at <pos> buffer position with <end> as end into <frm> frame.
+ * Return 1 if succeeded (enough room to parse this frame), 0 if not.
+ */
+static int quic_parse_max_data_frame(struct quic_frame *frm, struct quic_conn *qc,
+ const unsigned char **pos, const unsigned char *end)
+{
+ struct qf_max_data *md_frm = &frm->max_data;
+
+ return quic_dec_int(&md_frm->max_data, pos, end);
+}
+
+/* Encode a MAX_STREAM_DATA frame at <pos> buffer position.
+ * Returns 1 if succeeded (enough room at <pos> buffer position to encode the frame), 0 if not.
+ */
+static int quic_build_max_stream_data_frame(unsigned char **pos, const unsigned char *end,
+ struct quic_frame *frm, struct quic_conn *conn)
+{
+ struct qf_max_stream_data *msd_frm = &frm->max_stream_data;
+
+ return quic_enc_int(pos, end, msd_frm->id) &&
+ quic_enc_int(pos, end, msd_frm->max_stream_data);
+}
+
+/* Parse a MAX_STREAM_DATA frame at <pos> buffer position with <end> as end into <frm> frame.
+ * Return 1 if succeeded (enough room to parse this frame), 0 if not.
+ */
+static int quic_parse_max_stream_data_frame(struct quic_frame *frm, struct quic_conn *qc,
+ const unsigned char **pos, const unsigned char *end)
+{
+ struct qf_max_stream_data *msd_frm = &frm->max_stream_data;
+
+ return quic_dec_int(&msd_frm->id, pos, end) &&
+ quic_dec_int(&msd_frm->max_stream_data, pos, end);
+}
+
+/* Encode a MAX_STREAMS frame for bidirectional streams at <buf> buffer position.
+ * Returns 1 if succeeded (enough room at <pos> buffer position to encode the frame), 0 if not.
+ */
+static int quic_build_max_streams_bidi_frame(unsigned char **pos, const unsigned char *end,
+ struct quic_frame *frm, struct quic_conn *conn)
+{
+ struct qf_max_streams *ms_frm = &frm->max_streams_bidi;
+
+ return quic_enc_int(pos, end, ms_frm->max_streams);
+}
+
+/* Parse a MAX_STREAMS frame for bidirectional streams at <pos> buffer position with <end>
+ * as end into <frm> frame.
+ * Return 1 if succeeded (enough room to parse this frame), 0 if not.
+ */
+static int quic_parse_max_streams_bidi_frame(struct quic_frame *frm, struct quic_conn *qc,
+ const unsigned char **pos, const unsigned char *end)
+{
+ struct qf_max_streams *ms_frm = &frm->max_streams_bidi;
+
+ return quic_dec_int(&ms_frm->max_streams, pos, end);
+}
+
+/* Encode a MAX_STREAMS frame for unidirectional streams at <pos> buffer position.
+ * Returns 1 if succeeded (enough room at <pos> buffer position to encode the frame), 0 if not.
+ */
+static int quic_build_max_streams_uni_frame(unsigned char **pos, const unsigned char *end,
+ struct quic_frame *frm, struct quic_conn *conn)
+{
+ struct qf_max_streams *ms_frm = &frm->max_streams_uni;
+
+ return quic_enc_int(pos, end, ms_frm->max_streams);
+}
+
+/* Parse a MAX_STREAMS frame for undirectional streams at <pos> buffer position with <end>
+ * as end into <frm> frame.
+ * Return 1 if succeeded (enough room to parse this frame), 0 if not.
+ */
+static int quic_parse_max_streams_uni_frame(struct quic_frame *frm, struct quic_conn *qc,
+ const unsigned char **pos, const unsigned char *end)
+{
+ struct qf_max_streams *ms_frm = &frm->max_streams_uni;
+
+ return quic_dec_int(&ms_frm->max_streams, pos, end);
+}
+
+/* Encode a DATA_BLOCKED frame at <pos> buffer position.
+ * Returns 1 if succeeded (enough room at <pos> buffer position to encode the frame), 0 if not.
+ */
+static int quic_build_data_blocked_frame(unsigned char **pos, const unsigned char *end,
+ struct quic_frame *frm, struct quic_conn *conn)
+{
+ struct qf_data_blocked *db_frm = &frm->data_blocked;
+
+ return quic_enc_int(pos, end, db_frm->limit);
+}
+
+/* Parse a DATA_BLOCKED frame at <pos> buffer position with <end> as end into <frm> frame.
+ * Return 1 if succeeded (enough room to parse this frame), 0 if not.
+ */
+static int quic_parse_data_blocked_frame(struct quic_frame *frm, struct quic_conn *qc,
+ const unsigned char **pos, const unsigned char *end)
+{
+ struct qf_data_blocked *db_frm = &frm->data_blocked;
+
+ return quic_dec_int(&db_frm->limit, pos, end);
+}
+
+/* Encode a STREAM_DATA_BLOCKED at <pos> buffer position.
+ * Returns 1 if succeeded (enough room at <pos> buffer position to encode the frame), 0 if not.
+ */
+static int quic_build_stream_data_blocked_frame(unsigned char **pos, const unsigned char *end,
+ struct quic_frame *frm, struct quic_conn *conn)
+{
+ struct qf_stream_data_blocked *sdb_frm = &frm->stream_data_blocked;
+
+ return quic_enc_int(pos, end, sdb_frm->id) &&
+ quic_enc_int(pos, end, sdb_frm->limit);
+}
+
+/* Parse a STREAM_DATA_BLOCKED frame at <pos> buffer position with <end> as end into <frm> frame.
+ * Return 1 if succeeded (enough room to parse this frame), 0 if not.
+ */
+static int quic_parse_stream_data_blocked_frame(struct quic_frame *frm, struct quic_conn *qc,
+ const unsigned char **pos, const unsigned char *end)
+{
+ struct qf_stream_data_blocked *sdb_frm = &frm->stream_data_blocked;
+
+ return quic_dec_int(&sdb_frm->id, pos, end) &&
+ quic_dec_int(&sdb_frm->limit, pos, end);
+}
+
+/* Encode a STREAMS_BLOCKED frame for bidirectional streams at <pos> buffer position.
+ * Returns 1 if succeeded (enough room at <pos> buffer position to encode the frame), 0 if not.
+ */
+static int quic_build_streams_blocked_bidi_frame(unsigned char **pos, const unsigned char *end,
+ struct quic_frame *frm, struct quic_conn *conn)
+{
+ struct qf_streams_blocked *sb_frm = &frm->streams_blocked_bidi;
+
+ return quic_enc_int(pos, end, sb_frm->limit);
+}
+
+/* Parse a STREAMS_BLOCKED frame for bidirectional streams at <pos> buffer position with <end>
+ * as end into <frm> frame.
+ * Return 1 if succeeded (enough room at <pos> buffer position to parse this frame), 0 if not.
+ */
+static int quic_parse_streams_blocked_bidi_frame(struct quic_frame *frm, struct quic_conn *qc,
+ const unsigned char **pos, const unsigned char *end)
+{
+ struct qf_streams_blocked *sb_frm = &frm->streams_blocked_bidi;
+
+ return quic_dec_int(&sb_frm->limit, pos, end);
+}
+
+/* Encode a STREAMS_BLOCKED frame for unidirectional streams at <pos> buffer position.
+ * Returns 1 if succeeded (enough room at <pos> buffer position to encode the frame), 0 if not.
+ */
+static int quic_build_streams_blocked_uni_frame(unsigned char **pos, const unsigned char *end,
+ struct quic_frame *frm, struct quic_conn *conn)
+{
+ struct qf_streams_blocked *sb_frm = &frm->streams_blocked_uni;
+
+ return quic_enc_int(pos, end, sb_frm->limit);
+}
+
+/* Parse a STREAMS_BLOCKED frame for unidirectional streams at <pos> buffer position with <end>
+ * as end into <frm> frame.
+ * Return 1 if succeeded (enough room at <pos> buffer position to parse this frame), 0 if not.
+ */
+static int quic_parse_streams_blocked_uni_frame(struct quic_frame *frm, struct quic_conn *qc,
+ const unsigned char **pos, const unsigned char *end)
+{
+ struct qf_streams_blocked *sb_frm = &frm->streams_blocked_uni;
+
+ return quic_dec_int(&sb_frm->limit, pos, end);
+}
+
+/* Encode a NEW_CONNECTION_ID frame at <pos> buffer position.
+ * Returns 1 if succeeded (enough room at <pos> buffer position to encode the frame), 0 if not.
+ */
+static int quic_build_new_connection_id_frame(unsigned char **pos, const unsigned char *end,
+ struct quic_frame *frm, struct quic_conn *conn)
+{
+ struct qf_new_connection_id *ncid_frm = &frm->new_connection_id;
+
+ if (!quic_enc_int(pos, end, ncid_frm->seq_num) ||
+ !quic_enc_int(pos, end, ncid_frm->retire_prior_to) ||
+ end - *pos < sizeof ncid_frm->cid.len + ncid_frm->cid.len + QUIC_STATELESS_RESET_TOKEN_LEN)
+ return 0;
+
+ *(*pos)++ = ncid_frm->cid.len;
+
+ if (ncid_frm->cid.len) {
+ memcpy(*pos, ncid_frm->cid.data, ncid_frm->cid.len);
+ *pos += ncid_frm->cid.len;
+ }
+ memcpy(*pos, ncid_frm->stateless_reset_token, QUIC_STATELESS_RESET_TOKEN_LEN);
+ *pos += QUIC_STATELESS_RESET_TOKEN_LEN;
+
+ return 1;
+}
+
+/* Parse a NEW_CONNECTION_ID frame at <pos> buffer position with <end> as end into <frm> frame.
+ * Return 1 if succeeded (enough room to parse this frame), 0 if not.
+ */
+static int quic_parse_new_connection_id_frame(struct quic_frame *frm, struct quic_conn *qc,
+ const unsigned char **pos, const unsigned char *end)
+{
+ struct qf_new_connection_id *ncid_frm = &frm->new_connection_id;
+
+ if (!quic_dec_int(&ncid_frm->seq_num, pos, end) ||
+ !quic_dec_int(&ncid_frm->retire_prior_to, pos, end) || end <= *pos)
+ return 0;
+
+ ncid_frm->cid.len = *(*pos)++;
+ if (end - *pos < ncid_frm->cid.len + QUIC_STATELESS_RESET_TOKEN_LEN)
+ return 0;
+
+ if (ncid_frm->cid.len) {
+ ncid_frm->cid.data = *pos;
+ *pos += ncid_frm->cid.len;
+ }
+ ncid_frm->stateless_reset_token = *pos;
+ *pos += QUIC_STATELESS_RESET_TOKEN_LEN;
+
+ return 1;
+}
+
+/* Encode a RETIRE_CONNECTION_ID frame at <pos> buffer position.
+ * Returns 1 if succeeded (enough room at <pos> buffer position to encode the frame), 0 if not.
+ */
+static int quic_build_retire_connection_id_frame(unsigned char **pos, const unsigned char *end,
+ struct quic_frame *frm, struct quic_conn *conn)
+{
+ struct qf_retire_connection_id *rcid_frm = &frm->retire_connection_id;
+
+ return quic_enc_int(pos, end, rcid_frm->seq_num);
+}
+
+/* Parse a RETIRE_CONNECTION_ID frame at <pos> buffer position with <end> as end into <frm> frame.
+ * Return 1 if succeeded (enough room to parse this frame), 0 if not.
+ */
+static int quic_parse_retire_connection_id_frame(struct quic_frame *frm, struct quic_conn *qc,
+ const unsigned char **pos, const unsigned char *end)
+{
+ struct qf_retire_connection_id *rcid_frm = &frm->retire_connection_id;
+
+ return quic_dec_int(&rcid_frm->seq_num, pos, end);
+}
+
+/* Encode a PATH_CHALLENGE frame at <pos> buffer position.
+ * Returns 1 if succeeded (enough room at <pos> buffer position to encode the frame), 0 if not.
+ */
+static int quic_build_path_challenge_frame(unsigned char **pos, const unsigned char *end,
+ struct quic_frame *frm, struct quic_conn *conn)
+{
+ struct qf_path_challenge *pc_frm = &frm->path_challenge;
+
+ if (end - *pos < sizeof pc_frm->data)
+ return 0;
+
+ memcpy(*pos, pc_frm->data, sizeof pc_frm->data);
+ *pos += sizeof pc_frm->data;
+
+ return 1;
+}
+
+/* Parse a PATH_CHALLENGE frame at <pos> buffer position with <end> as end into <frm> frame.
+ * Return 1 if succeeded (enough room at <pos> buffer position to parse this frame), 0 if not.
+ */
+static int quic_parse_path_challenge_frame(struct quic_frame *frm, struct quic_conn *qc,
+ const unsigned char **pos, const unsigned char *end)
+{
+ struct qf_path_challenge *pc_frm = &frm->path_challenge;
+
+ if (end - *pos < sizeof pc_frm->data)
+ return 0;
+
+ memcpy(pc_frm->data, *pos, sizeof pc_frm->data);
+ *pos += sizeof pc_frm->data;
+
+ return 1;
+}
+
+
+/* Encode a PATH_RESPONSE frame at <pos> buffer position.
+ * Returns 1 if succeeded (enough room at <pos> buffer position to encode the frame), 0 if not.
+ */
+static int quic_build_path_response_frame(unsigned char **pos, const unsigned char *end,
+ struct quic_frame *frm, struct quic_conn *conn)
+{
+ struct qf_path_challenge_response *pcr_frm = &frm->path_challenge_response;
+
+ if (end - *pos < sizeof pcr_frm->data)
+ return 0;
+
+ memcpy(*pos, pcr_frm->data, sizeof pcr_frm->data);
+ *pos += sizeof pcr_frm->data;
+
+ return 1;
+}
+
+/* Parse a PATH_RESPONSE frame at <pos> buffer position with <end> as end into <frm> frame.
+ * Return 1 if succeeded (enough room at <pos> buffer position to parse this frame), 0 if not.
+ */
+static int quic_parse_path_response_frame(struct quic_frame *frm, struct quic_conn *qc,
+ const unsigned char **pos, const unsigned char *end)
+{
+ struct qf_path_challenge_response *pcr_frm = &frm->path_challenge_response;
+
+ if (end - *pos < sizeof pcr_frm->data)
+ return 0;
+
+ memcpy(pcr_frm->data, *pos, sizeof pcr_frm->data);
+ *pos += sizeof pcr_frm->data;
+
+ return 1;
+}
+
+/* Encode a CONNECTION_CLOSE frame at QUIC layer at <pos> buffer position.
+ * Note there exist two types of CONNECTION_CLOSE frame, one for the application layer
+ * and another at QUIC layer.
+ * Returns 1 if succeeded (enough room at <pos> buffer position to encode the frame), 0 if not.
+ */
+static int quic_build_connection_close_frame(unsigned char **pos, const unsigned char *end,
+ struct quic_frame *frm, struct quic_conn *conn)
+{
+ struct qf_connection_close *cc_frm = &frm->connection_close;
+
+ if (!quic_enc_int(pos, end, cc_frm->error_code) ||
+ !quic_enc_int(pos, end, cc_frm->frame_type) ||
+ !quic_enc_int(pos, end, cc_frm->reason_phrase_len) ||
+ end - *pos < cc_frm->reason_phrase_len)
+ return 0;
+
+ memcpy(*pos, cc_frm->reason_phrase, cc_frm->reason_phrase_len);
+ *pos += cc_frm->reason_phrase_len;
+
+ return 1;
+}
+
+/* Parse a CONNECTION_CLOSE frame at QUIC layer at <pos> buffer position with <end> as end into <frm> frame.
+ * Note there exist two types of CONNECTION_CLOSE frame, one for the application layer
+ * and another at QUIC layer.
+ * Return 1 if succeeded (enough room at <pos> buffer position to parse this frame), 0 if not.
+ */
+static int quic_parse_connection_close_frame(struct quic_frame *frm, struct quic_conn *qc,
+ const unsigned char **pos, const unsigned char *end)
+{
+ size_t plen;
+ struct qf_connection_close *cc_frm = &frm->connection_close;
+
+ if (!quic_dec_int(&cc_frm->error_code, pos, end) ||
+ !quic_dec_int(&cc_frm->frame_type, pos, end) ||
+ !quic_dec_int(&cc_frm->reason_phrase_len, pos, end) ||
+ end - *pos < cc_frm->reason_phrase_len)
+ return 0;
+
+ plen = QUIC_MIN((size_t)cc_frm->reason_phrase_len, sizeof cc_frm->reason_phrase);
+ memcpy(cc_frm->reason_phrase, *pos, plen);
+ *pos += cc_frm->reason_phrase_len;
+
+ return 1;
+}
+
+/* Encode a CONNECTION_CLOSE frame at application layer at <pos> buffer position.
+ * Note there exist two types of CONNECTION_CLOSE frame, one for application layer
+ * and another at QUIC layer.
+ * Returns 1 if succeeded (enough room at <pos> buffer position to encode the frame), 0 if not.
+ */
+static int quic_build_connection_close_app_frame(unsigned char **pos, const unsigned char *end,
+ struct quic_frame *frm, struct quic_conn *conn)
+{
+ struct qf_connection_close_app *cc_frm = &frm->connection_close_app;
+
+ if (!quic_enc_int(pos, end, cc_frm->error_code) ||
+ !quic_enc_int(pos, end, cc_frm->reason_phrase_len) ||
+ end - *pos < cc_frm->reason_phrase_len)
+ return 0;
+
+ memcpy(*pos, cc_frm->reason_phrase, cc_frm->reason_phrase_len);
+ *pos += cc_frm->reason_phrase_len;
+
+ return 1;
+}
+
+/* Parse a CONNECTION_CLOSE frame at QUIC layer at <pos> buffer position with <end> as end into <frm> frame.
+ * Note there exist two types of CONNECTION_CLOSE frame, one for the application layer
+ * and another at QUIC layer.
+ * Return 1 if succeeded (enough room at <pos> buffer position to parse this frame), 0 if not.
+ */
+static int quic_parse_connection_close_app_frame(struct quic_frame *frm, struct quic_conn *qc,
+ const unsigned char **pos, const unsigned char *end)
+{
+ size_t plen;
+ struct qf_connection_close_app *cc_frm = &frm->connection_close_app;
+
+ if (!quic_dec_int(&cc_frm->error_code, pos, end) ||
+ !quic_dec_int(&cc_frm->reason_phrase_len, pos, end) ||
+ end - *pos < cc_frm->reason_phrase_len)
+ return 0;
+
+ plen = QUIC_MIN((size_t)cc_frm->reason_phrase_len, sizeof cc_frm->reason_phrase);
+ memcpy(cc_frm->reason_phrase, *pos, plen);
+ *pos += cc_frm->reason_phrase_len;
+
+ return 1;
+}
+
+/* Encode a HANDSHAKE_DONE frame at <pos> buffer position.
+ * Always succeeds.
+ */
+static int quic_build_handshake_done_frame(unsigned char **pos, const unsigned char *end,
+ struct quic_frame *frm, struct quic_conn *conn)
+{
+ /* No field */
+ return 1;
+}
+
+/* Parse a HANDSHAKE_DONE frame at QUIC layer at <pos> buffer position with <end> as end into <frm> frame.
+ * Always succeed.
+ */
+static int quic_parse_handshake_done_frame(struct quic_frame *frm, struct quic_conn *qc,
+ const unsigned char **pos, const unsigned char *end)
+{
+ /* No field */
+ return 1;
+}
+
+struct quic_frame_builder {
+ int (*func)(unsigned char **pos, const unsigned char *end,
+ struct quic_frame *frm, struct quic_conn *conn);
+ uint32_t mask;
+ unsigned char flags;
+};
+
+const struct quic_frame_builder quic_frame_builders[] = {
+ [QUIC_FT_PADDING] = { .func = quic_build_padding_frame, .flags = QUIC_FL_TX_PACKET_PADDING, .mask = QUIC_FT_PKT_TYPE_IH01_BITMASK, },
+ [QUIC_FT_PING] = { .func = quic_build_ping_frame, .flags = QUIC_FL_TX_PACKET_ACK_ELICITING, .mask = QUIC_FT_PKT_TYPE_IH01_BITMASK, },
+ [QUIC_FT_ACK] = { .func = quic_build_ack_frame, .flags = 0, .mask = QUIC_FT_PKT_TYPE_IH_1_BITMASK, },
+ [QUIC_FT_ACK_ECN] = { .func = quic_build_ack_ecn_frame, .flags = 0, .mask = QUIC_FT_PKT_TYPE_IH_1_BITMASK, },
+ [QUIC_FT_RESET_STREAM] = { .func = quic_build_reset_stream_frame, .flags = QUIC_FL_TX_PACKET_ACK_ELICITING, .mask = QUIC_FT_PKT_TYPE___01_BITMASK, },
+ [QUIC_FT_STOP_SENDING] = { .func = quic_build_stop_sending_frame, .flags = QUIC_FL_TX_PACKET_ACK_ELICITING, .mask = QUIC_FT_PKT_TYPE___01_BITMASK, },
+ [QUIC_FT_CRYPTO] = { .func = quic_build_crypto_frame, .flags = QUIC_FL_TX_PACKET_ACK_ELICITING, .mask = QUIC_FT_PKT_TYPE_IH_1_BITMASK, },
+ [QUIC_FT_NEW_TOKEN] = { .func = quic_build_new_token_frame, .flags = QUIC_FL_TX_PACKET_ACK_ELICITING, .mask = QUIC_FT_PKT_TYPE____1_BITMASK, },
+ [QUIC_FT_STREAM_8] = { .func = quic_build_stream_frame, .flags = QUIC_FL_TX_PACKET_ACK_ELICITING, .mask = QUIC_FT_PKT_TYPE___01_BITMASK, },
+ [QUIC_FT_STREAM_9] = { .func = quic_build_stream_frame, .flags = QUIC_FL_TX_PACKET_ACK_ELICITING, .mask = QUIC_FT_PKT_TYPE___01_BITMASK, },
+ [QUIC_FT_STREAM_A] = { .func = quic_build_stream_frame, .flags = QUIC_FL_TX_PACKET_ACK_ELICITING, .mask = QUIC_FT_PKT_TYPE___01_BITMASK, },
+ [QUIC_FT_STREAM_B] = { .func = quic_build_stream_frame, .flags = QUIC_FL_TX_PACKET_ACK_ELICITING, .mask = QUIC_FT_PKT_TYPE___01_BITMASK, },
+ [QUIC_FT_STREAM_C] = { .func = quic_build_stream_frame, .flags = QUIC_FL_TX_PACKET_ACK_ELICITING, .mask = QUIC_FT_PKT_TYPE___01_BITMASK, },
+ [QUIC_FT_STREAM_D] = { .func = quic_build_stream_frame, .flags = QUIC_FL_TX_PACKET_ACK_ELICITING, .mask = QUIC_FT_PKT_TYPE___01_BITMASK, },
+ [QUIC_FT_STREAM_E] = { .func = quic_build_stream_frame, .flags = QUIC_FL_TX_PACKET_ACK_ELICITING, .mask = QUIC_FT_PKT_TYPE___01_BITMASK, },
+ [QUIC_FT_STREAM_F] = { .func = quic_build_stream_frame, .flags = QUIC_FL_TX_PACKET_ACK_ELICITING, .mask = QUIC_FT_PKT_TYPE___01_BITMASK, },
+ [QUIC_FT_MAX_DATA] = { .func = quic_build_max_data_frame, .flags = QUIC_FL_TX_PACKET_ACK_ELICITING, .mask = QUIC_FT_PKT_TYPE___01_BITMASK, },
+ [QUIC_FT_MAX_STREAM_DATA] = { .func = quic_build_max_stream_data_frame, .flags = QUIC_FL_TX_PACKET_ACK_ELICITING, .mask = QUIC_FT_PKT_TYPE___01_BITMASK, },
+ [QUIC_FT_MAX_STREAMS_BIDI] = { .func = quic_build_max_streams_bidi_frame, .flags = QUIC_FL_TX_PACKET_ACK_ELICITING, .mask = QUIC_FT_PKT_TYPE___01_BITMASK, },
+ [QUIC_FT_MAX_STREAMS_UNI] = { .func = quic_build_max_streams_uni_frame, .flags = QUIC_FL_TX_PACKET_ACK_ELICITING, .mask = QUIC_FT_PKT_TYPE___01_BITMASK, },
+ [QUIC_FT_DATA_BLOCKED] = { .func = quic_build_data_blocked_frame, .flags = QUIC_FL_TX_PACKET_ACK_ELICITING, .mask = QUIC_FT_PKT_TYPE___01_BITMASK, },
+ [QUIC_FT_STREAM_DATA_BLOCKED] = { .func = quic_build_stream_data_blocked_frame, .flags = QUIC_FL_TX_PACKET_ACK_ELICITING, .mask = QUIC_FT_PKT_TYPE___01_BITMASK, },
+ [QUIC_FT_STREAMS_BLOCKED_BIDI] = { .func = quic_build_streams_blocked_bidi_frame, .flags = QUIC_FL_TX_PACKET_ACK_ELICITING, .mask = QUIC_FT_PKT_TYPE___01_BITMASK, },
+ [QUIC_FT_STREAMS_BLOCKED_UNI] = { .func = quic_build_streams_blocked_uni_frame, .flags = QUIC_FL_TX_PACKET_ACK_ELICITING, .mask = QUIC_FT_PKT_TYPE___01_BITMASK, },
+ [QUIC_FT_NEW_CONNECTION_ID] = { .func = quic_build_new_connection_id_frame, .flags = QUIC_FL_TX_PACKET_ACK_ELICITING, .mask = QUIC_FT_PKT_TYPE___01_BITMASK, },
+ [QUIC_FT_RETIRE_CONNECTION_ID] = { .func = quic_build_retire_connection_id_frame, .flags = QUIC_FL_TX_PACKET_ACK_ELICITING, .mask = QUIC_FT_PKT_TYPE___01_BITMASK, },
+ [QUIC_FT_PATH_CHALLENGE] = { .func = quic_build_path_challenge_frame, .flags = QUIC_FL_TX_PACKET_ACK_ELICITING, .mask = QUIC_FT_PKT_TYPE___01_BITMASK, },
+ [QUIC_FT_PATH_RESPONSE] = { .func = quic_build_path_response_frame, .flags = QUIC_FL_TX_PACKET_ACK_ELICITING, .mask = QUIC_FT_PKT_TYPE___01_BITMASK, },
+ [QUIC_FT_CONNECTION_CLOSE] = { .func = quic_build_connection_close_frame, .flags = 0, .mask = QUIC_FT_PKT_TYPE_IH01_BITMASK, },
+ [QUIC_FT_CONNECTION_CLOSE_APP] = { .func = quic_build_connection_close_app_frame, .flags = 0, .mask = QUIC_FT_PKT_TYPE___01_BITMASK, },
+ [QUIC_FT_HANDSHAKE_DONE] = { .func = quic_build_handshake_done_frame, .flags = QUIC_FL_TX_PACKET_ACK_ELICITING, .mask = QUIC_FT_PKT_TYPE____1_BITMASK, },
+};
+
+struct quic_frame_parser {
+ int (*func)(struct quic_frame *frm, struct quic_conn *qc,
+ const unsigned char **pos, const unsigned char *end);
+ uint32_t mask;
+ unsigned char flags;
+};
+
+const struct quic_frame_parser quic_frame_parsers[] = {
+ [QUIC_FT_PADDING] = { .func = quic_parse_padding_frame, .flags = 0, .mask = QUIC_FT_PKT_TYPE_IH01_BITMASK, },
+ [QUIC_FT_PING] = { .func = quic_parse_ping_frame, .flags = QUIC_FL_RX_PACKET_ACK_ELICITING, .mask = QUIC_FT_PKT_TYPE_IH01_BITMASK, },
+ [QUIC_FT_ACK] = { .func = quic_parse_ack_frame_header, .flags = 0, .mask = QUIC_FT_PKT_TYPE_IH_1_BITMASK, },
+ [QUIC_FT_ACK_ECN] = { .func = quic_parse_ack_ecn_frame, .flags = 0, .mask = QUIC_FT_PKT_TYPE_IH_1_BITMASK, },
+ [QUIC_FT_RESET_STREAM] = { .func = quic_parse_reset_stream_frame, .flags = QUIC_FL_RX_PACKET_ACK_ELICITING, .mask = QUIC_FT_PKT_TYPE___01_BITMASK, },
+ [QUIC_FT_STOP_SENDING] = { .func = quic_parse_stop_sending_frame, .flags = QUIC_FL_RX_PACKET_ACK_ELICITING, .mask = QUIC_FT_PKT_TYPE___01_BITMASK, },
+ [QUIC_FT_CRYPTO] = { .func = quic_parse_crypto_frame, .flags = QUIC_FL_RX_PACKET_ACK_ELICITING, .mask = QUIC_FT_PKT_TYPE_IH_1_BITMASK, },
+ [QUIC_FT_NEW_TOKEN] = { .func = quic_parse_new_token_frame, .flags = QUIC_FL_RX_PACKET_ACK_ELICITING, .mask = QUIC_FT_PKT_TYPE____1_BITMASK, },
+ [QUIC_FT_STREAM_8] = { .func = quic_parse_stream_frame, .flags = QUIC_FL_RX_PACKET_ACK_ELICITING, .mask = QUIC_FT_PKT_TYPE___01_BITMASK, },
+ [QUIC_FT_STREAM_9] = { .func = quic_parse_stream_frame, .flags = QUIC_FL_RX_PACKET_ACK_ELICITING, .mask = QUIC_FT_PKT_TYPE___01_BITMASK, },
+ [QUIC_FT_STREAM_A] = { .func = quic_parse_stream_frame, .flags = QUIC_FL_RX_PACKET_ACK_ELICITING, .mask = QUIC_FT_PKT_TYPE___01_BITMASK, },
+ [QUIC_FT_STREAM_B] = { .func = quic_parse_stream_frame, .flags = QUIC_FL_RX_PACKET_ACK_ELICITING, .mask = QUIC_FT_PKT_TYPE___01_BITMASK, },
+ [QUIC_FT_STREAM_C] = { .func = quic_parse_stream_frame, .flags = QUIC_FL_RX_PACKET_ACK_ELICITING, .mask = QUIC_FT_PKT_TYPE___01_BITMASK, },
+ [QUIC_FT_STREAM_D] = { .func = quic_parse_stream_frame, .flags = QUIC_FL_RX_PACKET_ACK_ELICITING, .mask = QUIC_FT_PKT_TYPE___01_BITMASK, },
+ [QUIC_FT_STREAM_E] = { .func = quic_parse_stream_frame, .flags = QUIC_FL_RX_PACKET_ACK_ELICITING, .mask = QUIC_FT_PKT_TYPE___01_BITMASK, },
+ [QUIC_FT_STREAM_F] = { .func = quic_parse_stream_frame, .flags = QUIC_FL_RX_PACKET_ACK_ELICITING, .mask = QUIC_FT_PKT_TYPE___01_BITMASK, },
+ [QUIC_FT_MAX_DATA] = { .func = quic_parse_max_data_frame, .flags = QUIC_FL_RX_PACKET_ACK_ELICITING, .mask = QUIC_FT_PKT_TYPE___01_BITMASK, },
+ [QUIC_FT_MAX_STREAM_DATA] = { .func = quic_parse_max_stream_data_frame, .flags = QUIC_FL_RX_PACKET_ACK_ELICITING, .mask = QUIC_FT_PKT_TYPE___01_BITMASK, },
+ [QUIC_FT_MAX_STREAMS_BIDI] = { .func = quic_parse_max_streams_bidi_frame, .flags = QUIC_FL_RX_PACKET_ACK_ELICITING, .mask = QUIC_FT_PKT_TYPE___01_BITMASK, },
+ [QUIC_FT_MAX_STREAMS_UNI] = { .func = quic_parse_max_streams_uni_frame, .flags = QUIC_FL_RX_PACKET_ACK_ELICITING, .mask = QUIC_FT_PKT_TYPE___01_BITMASK, },
+ [QUIC_FT_DATA_BLOCKED] = { .func = quic_parse_data_blocked_frame, .flags = QUIC_FL_RX_PACKET_ACK_ELICITING, .mask = QUIC_FT_PKT_TYPE___01_BITMASK, },
+ [QUIC_FT_STREAM_DATA_BLOCKED] = { .func = quic_parse_stream_data_blocked_frame, .flags = QUIC_FL_RX_PACKET_ACK_ELICITING, .mask = QUIC_FT_PKT_TYPE___01_BITMASK, },
+ [QUIC_FT_STREAMS_BLOCKED_BIDI] = { .func = quic_parse_streams_blocked_bidi_frame, .flags = QUIC_FL_RX_PACKET_ACK_ELICITING, .mask = QUIC_FT_PKT_TYPE___01_BITMASK, },
+ [QUIC_FT_STREAMS_BLOCKED_UNI] = { .func = quic_parse_streams_blocked_uni_frame, .flags = QUIC_FL_RX_PACKET_ACK_ELICITING, .mask = QUIC_FT_PKT_TYPE___01_BITMASK, },
+ [QUIC_FT_NEW_CONNECTION_ID] = { .func = quic_parse_new_connection_id_frame, .flags = QUIC_FL_RX_PACKET_ACK_ELICITING, .mask = QUIC_FT_PKT_TYPE___01_BITMASK, },
+ [QUIC_FT_RETIRE_CONNECTION_ID] = { .func = quic_parse_retire_connection_id_frame, .flags = QUIC_FL_RX_PACKET_ACK_ELICITING, .mask = QUIC_FT_PKT_TYPE___01_BITMASK, },
+ [QUIC_FT_PATH_CHALLENGE] = { .func = quic_parse_path_challenge_frame, .flags = QUIC_FL_RX_PACKET_ACK_ELICITING, .mask = QUIC_FT_PKT_TYPE___01_BITMASK, },
+ [QUIC_FT_PATH_RESPONSE] = { .func = quic_parse_path_response_frame, .flags = QUIC_FL_RX_PACKET_ACK_ELICITING, .mask = QUIC_FT_PKT_TYPE___01_BITMASK, },
+ [QUIC_FT_CONNECTION_CLOSE] = { .func = quic_parse_connection_close_frame, .flags = 0, .mask = QUIC_FT_PKT_TYPE_IH01_BITMASK, },
+ [QUIC_FT_CONNECTION_CLOSE_APP] = { .func = quic_parse_connection_close_app_frame, .flags = 0, .mask = QUIC_FT_PKT_TYPE___01_BITMASK, },
+ [QUIC_FT_HANDSHAKE_DONE] = { .func = quic_parse_handshake_done_frame, .flags = QUIC_FL_RX_PACKET_ACK_ELICITING, .mask = QUIC_FT_PKT_TYPE____1_BITMASK, },
+};
+
+/* Decode a QUIC frame at <pos> buffer position into <frm> frame.
+ * Returns 1 if succeeded (enough data at <pos> buffer position to parse the frame), 0 if not.
+ */
+int qc_parse_frm(struct quic_frame *frm, struct quic_rx_packet *pkt,
+ const unsigned char **pos, const unsigned char *end,
+ struct quic_conn *qc)
+{
+ int ret = 0;
+ const struct quic_frame_parser *parser;
+
+ TRACE_ENTER(QUIC_EV_CONN_PRSFRM, qc);
+ if (end <= *pos) {
+ TRACE_DEVEL("wrong frame", QUIC_EV_CONN_PRSFRM, qc);
+ goto leave;
+ }
+
+ frm->type = *(*pos)++;
+ if (frm->type >= QUIC_FT_MAX) {
+ TRACE_DEVEL("wrong frame type", QUIC_EV_CONN_PRSFRM, qc, frm);
+ goto leave;
+ }
+
+ parser = &quic_frame_parsers[frm->type];
+ if (!(parser->mask & (1U << pkt->type))) {
+ TRACE_DEVEL("unauthorized frame", QUIC_EV_CONN_PRSFRM, qc, frm);
+ goto leave;
+ }
+
+ if (!parser->func(frm, qc, pos, end)) {
+ TRACE_DEVEL("parsing error", QUIC_EV_CONN_PRSFRM, qc, frm);
+ goto leave;
+ }
+
+ TRACE_PROTO("RX frm", QUIC_EV_CONN_PSTRM, qc, frm);
+
+ pkt->flags |= parser->flags;
+
+ ret = 1;
+ leave:
+ TRACE_LEAVE(QUIC_EV_CONN_PRSFRM, qc);
+ return ret;
+}
+
+/* Encode <frm> QUIC frame at <pos> buffer position.
+ * Returns 1 if succeeded (enough room at <pos> buffer position to encode the frame), 0 if not.
+ * The buffer is updated to point to one byte past the end of the built frame
+ * only if succeeded.
+ */
+int qc_build_frm(unsigned char **pos, const unsigned char *end,
+ struct quic_frame *frm, struct quic_tx_packet *pkt,
+ struct quic_conn *qc)
+{
+ int ret = 0;
+ const struct quic_frame_builder *builder;
+ unsigned char *p = *pos;
+
+ TRACE_ENTER(QUIC_EV_CONN_BFRM, qc);
+ builder = &quic_frame_builders[frm->type];
+ if (!(builder->mask & (1U << pkt->type))) {
+ /* XXX This it a bug to send an unauthorized frame with such a packet type XXX */
+ TRACE_ERROR("unauthorized frame", QUIC_EV_CONN_BFRM, qc, frm);
+ BUG_ON(!(builder->mask & (1U << pkt->type)));
+ }
+
+ if (end <= p) {
+ TRACE_DEVEL("not enough room", QUIC_EV_CONN_BFRM, qc, frm);
+ goto leave;
+ }
+
+ TRACE_PROTO("TX frm", QUIC_EV_CONN_BFRM, qc, frm);
+ *p++ = frm->type;
+ if (!quic_frame_builders[frm->type].func(&p, end, frm, qc)) {
+ TRACE_ERROR("frame building error", QUIC_EV_CONN_BFRM, qc, frm);
+ goto leave;
+ }
+
+ pkt->flags |= builder->flags;
+ *pos = p;
+
+ ret = 1;
+ leave:
+ TRACE_LEAVE(QUIC_EV_CONN_BFRM, qc);
+ return ret;
+}
+
+/* Detach all duplicated frames from <frm> reflist. */
+void qc_frm_unref(struct quic_frame *frm, struct quic_conn *qc)
+{
+ struct quic_frame *f, *tmp;
+
+ TRACE_ENTER(QUIC_EV_CONN_PRSAFRM, qc, frm);
+
+ list_for_each_entry_safe(f, tmp, &frm->reflist, ref) {
+ f->origin = NULL;
+ LIST_DEL_INIT(&f->ref);
+ if (f->pkt) {
+ TRACE_DEVEL("remove frame reference",
+ QUIC_EV_CONN_PRSAFRM, qc, f, &f->pkt->pn_node.key);
+ }
+ else {
+ TRACE_DEVEL("remove frame reference for unsent frame",
+ QUIC_EV_CONN_PRSAFRM, qc, f);
+ }
+ }
+
+ TRACE_LEAVE(QUIC_EV_CONN_PRSAFRM, qc);
+}
+
+/* Free a <frm> quic_frame. Remove it from parent element if still attached. */
+void qc_frm_free(struct quic_conn *qc, struct quic_frame **frm)
+{
+
+ TRACE_ENTER(QUIC_EV_CONN_PRSAFRM, qc, *frm);
+ /* Caller must ensure that no other frame points to <frm>. Use
+ * qc_frm_unref() to handle this properly.
+ */
+ BUG_ON(!LIST_ISEMPTY(&((*frm)->reflist)));
+ BUG_ON(LIST_INLIST(&((*frm)->ref)));
+
+ /* TODO simplify frame deallocation. In some code paths, we must
+ * manually call this LIST_DEL_INIT before using
+ * quic_tx_packet_refdec() and freeing the frame.
+ */
+ LIST_DEL_INIT(&((*frm)->list));
+
+ pool_free(pool_head_quic_frame, *frm);
+ *frm = NULL;
+ TRACE_LEAVE(QUIC_EV_CONN_PRSAFRM, qc);
+}
+
+/* Release <frm> frame and mark its copies as acknowledged */
+void qc_release_frm(struct quic_conn *qc, struct quic_frame *frm)
+{
+ uint64_t pn;
+ struct quic_frame *origin, *f, *tmp;
+
+ TRACE_ENTER(QUIC_EV_CONN_PRSAFRM, qc, frm);
+
+ /* Identify this frame: a frame copy or one of its copies */
+ origin = frm->origin ? frm->origin : frm;
+ /* Ensure the source of the copies is flagged as acked, <frm> being
+ * possibly a copy of <origin>
+ */
+ origin->flags |= QUIC_FL_TX_FRAME_ACKED;
+ /* Mark all the copy of <origin> as acknowledged. We must
+ * not release the packets (releasing the frames) at this time as
+ * they are possibly also to be acknowledged alongside the
+ * the current one.
+ */
+ list_for_each_entry_safe(f, tmp, &origin->reflist, ref) {
+ if (f->pkt) {
+ f->flags |= QUIC_FL_TX_FRAME_ACKED;
+ f->origin = NULL;
+ LIST_DEL_INIT(&f->ref);
+ pn = f->pkt->pn_node.key;
+ TRACE_DEVEL("mark frame as acked from packet",
+ QUIC_EV_CONN_PRSAFRM, qc, f, &pn);
+ }
+ else {
+ TRACE_DEVEL("freeing unsent frame",
+ QUIC_EV_CONN_PRSAFRM, qc, f);
+ LIST_DEL_INIT(&f->ref);
+ qc_frm_free(qc, &f);
+ }
+ }
+ LIST_DEL_INIT(&frm->list);
+ pn = frm->pkt->pn_node.key;
+ quic_tx_packet_refdec(frm->pkt);
+ TRACE_DEVEL("freeing frame from packet",
+ QUIC_EV_CONN_PRSAFRM, qc, frm, &pn);
+ qc_frm_free(qc, &frm);
+
+ TRACE_LEAVE(QUIC_EV_CONN_PRSAFRM, qc);
+}
+
diff --git a/src/quic_loss.c b/src/quic_loss.c
new file mode 100644
index 0000000..fd9568a
--- /dev/null
+++ b/src/quic_loss.c
@@ -0,0 +1,312 @@
+#include <import/eb64tree.h>
+
+#include <haproxy/quic_conn-t.h>
+#include <haproxy/quic_loss.h>
+#include <haproxy/quic_tls.h>
+#include <haproxy/quic_trace.h>
+
+#include <haproxy/atomic.h>
+#include <haproxy/list.h>
+#include <haproxy/ticks.h>
+#include <haproxy/trace.h>
+
+/* Update <ql> QUIC loss information with new <rtt> measurement and <ack_delay>
+ * on ACK frame receipt which MUST be min(ack->ack_delay, max_ack_delay)
+ * before the handshake is confirmed.
+ */
+void quic_loss_srtt_update(struct quic_loss *ql,
+ unsigned int rtt, unsigned int ack_delay,
+ struct quic_conn *qc)
+{
+ TRACE_ENTER(QUIC_EV_CONN_RTTUPDT, qc);
+ TRACE_PROTO("TX loss srtt update", QUIC_EV_CONN_RTTUPDT, qc, &rtt, &ack_delay, ql);
+
+ ql->latest_rtt = rtt;
+ if (!ql->rtt_min) {
+ /* No previous measurement. */
+ ql->srtt = rtt;
+ ql->rtt_var = rtt / 2;
+ ql->rtt_min = rtt;
+ }
+ else {
+ int diff;
+
+ ql->rtt_min = QUIC_MIN(rtt, ql->rtt_min);
+ /* Specific to QUIC (RTT adjustment). */
+ if (ack_delay && rtt >= ql->rtt_min + ack_delay)
+ rtt -= ack_delay;
+ diff = ql->srtt - rtt;
+ if (diff < 0)
+ diff = -diff;
+ ql->rtt_var = (3 * ql->rtt_var + diff) / 4;
+ ql->srtt = (7 * ql->srtt + rtt) / 8;
+ }
+
+ TRACE_PROTO("TX loss srtt update", QUIC_EV_CONN_RTTUPDT, qc,,, ql);
+ TRACE_LEAVE(QUIC_EV_CONN_RTTUPDT, qc);
+}
+
+/* Returns for <qc> QUIC connection the first packet number space which
+ * experienced packet loss, if any or a packet number space with
+ * TICK_ETERNITY as packet loss time if not.
+ */
+struct quic_pktns *quic_loss_pktns(struct quic_conn *qc)
+{
+ struct quic_pktns *pktns, *p;
+
+ TRACE_ENTER(QUIC_EV_CONN_SPTO, qc);
+
+ BUG_ON(LIST_ISEMPTY(&qc->pktns_list));
+ pktns = p = LIST_NEXT(&qc->pktns_list, struct quic_pktns *, list);
+
+ do {
+ TRACE_PROTO("TX loss pktns", QUIC_EV_CONN_SPTO, qc, p);
+ if (!tick_isset(pktns->tx.loss_time) ||
+ tick_is_lt(p->tx.loss_time, pktns->tx.loss_time)) {
+ pktns = p;
+ }
+ p = LIST_NEXT(&p->list, struct quic_pktns *, list);
+ } while (&p->list != &qc->pktns_list);
+
+ TRACE_LEAVE(QUIC_EV_CONN_SPTO, qc);
+
+ return pktns;
+}
+
+/* Returns for <qc> QUIC connection the first packet number space to
+ * arm the PTO for if any or a packet number space with TICK_ETERNITY
+ * as PTO value if not.
+ */
+struct quic_pktns *quic_pto_pktns(struct quic_conn *qc,
+ int handshake_confirmed,
+ unsigned int *pto)
+{
+ unsigned int duration, lpto;
+ struct quic_loss *ql = &qc->path->loss;
+ struct quic_pktns *pktns, *p;
+
+ TRACE_ENTER(QUIC_EV_CONN_SPTO, qc);
+
+ BUG_ON(LIST_ISEMPTY(&qc->pktns_list));
+ duration =
+ ql->srtt +
+ (QUIC_MAX(4 * ql->rtt_var, QUIC_TIMER_GRANULARITY) << ql->pto_count);
+
+ /* RFC 9002 6.2.2.1. Before Address Validation
+ *
+ * the client MUST set the PTO timer if the client has not received an
+ * acknowledgment for any of its Handshake packets and the handshake is
+ * not confirmed (see Section 4.1.2 of [QUIC-TLS]), even if there are no
+ * packets in flight.
+ *
+ * TODO implement the above paragraph for QUIC on backend side. Note
+ * that if now_ms is used this function is not reentrant anymore and can
+ * not be used anytime without side-effect (for example after QUIC
+ * connection migration).
+ */
+
+ lpto = TICK_ETERNITY;
+ pktns = p = LIST_NEXT(&qc->pktns_list, struct quic_pktns *, list);
+
+ do {
+ unsigned int tmp_pto;
+
+ if (p->tx.in_flight) {
+ if (p == qc->apktns) {
+ if (!handshake_confirmed) {
+ TRACE_STATE("TX PTO handshake not already confirmed", QUIC_EV_CONN_SPTO, qc);
+ goto out;
+ }
+
+ duration += qc->max_ack_delay << ql->pto_count;
+ }
+
+ tmp_pto = tick_add(p->tx.time_of_last_eliciting, duration);
+ if (!tick_isset(lpto) || tick_is_lt(tmp_pto, lpto)) {
+ lpto = tmp_pto;
+ pktns = p;
+ }
+
+ TRACE_PROTO("TX PTO", QUIC_EV_CONN_SPTO, qc, p);
+ }
+
+ p = LIST_NEXT(&p->list, struct quic_pktns *, list);
+ } while (&p->list != &qc->pktns_list);
+
+ out:
+ if (pto)
+ *pto = lpto;
+ TRACE_PROTO("TX PTO", QUIC_EV_CONN_SPTO, qc, pktns, &duration);
+ TRACE_LEAVE(QUIC_EV_CONN_SPTO, qc);
+
+ return pktns;
+}
+
+/* Look for packet loss from sent packets for <qel> encryption level of a
+ * connection with <ctx> as I/O handler context. If remove is true, remove them from
+ * their tree if deemed as lost or set the <loss_time> value the packet number
+ * space if any not deemed lost.
+ * Should be called after having received an ACK frame with newly acknowledged
+ * packets or when the the loss detection timer has expired.
+ * Always succeeds.
+ */
+void qc_packet_loss_lookup(struct quic_pktns *pktns, struct quic_conn *qc,
+ struct list *lost_pkts)
+{
+ struct eb_root *pkts;
+ struct eb64_node *node;
+ struct quic_loss *ql;
+ unsigned int loss_delay;
+ uint64_t pktthresh;
+
+ TRACE_ENTER(QUIC_EV_CONN_PKTLOSS, qc);
+ TRACE_PROTO("TX loss", QUIC_EV_CONN_PKTLOSS, qc, pktns);
+ pkts = &pktns->tx.pkts;
+ pktns->tx.loss_time = TICK_ETERNITY;
+ if (eb_is_empty(pkts))
+ goto out;
+
+ ql = &qc->path->loss;
+ loss_delay = QUIC_MAX(ql->latest_rtt, ql->srtt);
+ loss_delay = QUIC_MAX(loss_delay, MS_TO_TICKS(QUIC_TIMER_GRANULARITY)) *
+ QUIC_LOSS_TIME_THRESHOLD_MULTIPLICAND / QUIC_LOSS_TIME_THRESHOLD_DIVISOR;
+
+ node = eb64_first(pkts);
+
+ /* RFC 9002 6.1.1. Packet Threshold
+ * The RECOMMENDED initial value for the packet reordering threshold
+ * (kPacketThreshold) is 3, based on best practices for TCP loss detection
+ * [RFC5681] [RFC6675]. In order to remain similar to TCP, implementations
+ * SHOULD NOT use a packet threshold less than 3; see [RFC5681].
+
+ * Some networks may exhibit higher degrees of packet reordering, causing a
+ * sender to detect spurious losses. Additionally, packet reordering could be
+ * more common with QUIC than TCP because network elements that could observe
+ * and reorder TCP packets cannot do that for QUIC and also because QUIC
+ * packet numbers are encrypted.
+ */
+
+ /* Dynamic packet reordering threshold calculation depending on the distance
+ * (in packets) between the last transmitted packet and the oldest still in
+ * flight before loss detection.
+ */
+ pktthresh = pktns->tx.next_pn - 1 - eb64_entry(node, struct quic_tx_packet, pn_node)->pn_node.key;
+ /* Apply a ratio to this threshold and add it to QUIC_LOSS_PACKET_THRESHOLD. */
+ pktthresh = pktthresh * global.tune.quic_reorder_ratio / 100 + QUIC_LOSS_PACKET_THRESHOLD;
+ while (node) {
+ struct quic_tx_packet *pkt;
+ int64_t largest_acked_pn;
+ unsigned int loss_time_limit, time_sent;
+ int reordered;
+
+ pkt = eb64_entry(&node->node, struct quic_tx_packet, pn_node);
+ largest_acked_pn = pktns->rx.largest_acked_pn;
+ node = eb64_next(node);
+ if ((int64_t)pkt->pn_node.key > largest_acked_pn)
+ break;
+
+ time_sent = pkt->time_sent;
+ loss_time_limit = tick_add(time_sent, loss_delay);
+
+ reordered = (int64_t)largest_acked_pn >= pkt->pn_node.key + pktthresh;
+ if (reordered)
+ ql->nb_reordered_pkt++;
+
+ if (tick_is_le(loss_time_limit, now_ms) || reordered) {
+ eb64_delete(&pkt->pn_node);
+ LIST_APPEND(lost_pkts, &pkt->list);
+ ql->nb_lost_pkt++;
+ }
+ else {
+ if (tick_isset(pktns->tx.loss_time))
+ pktns->tx.loss_time = tick_first(pktns->tx.loss_time, loss_time_limit);
+ else
+ pktns->tx.loss_time = loss_time_limit;
+ break;
+ }
+ }
+
+ out:
+ TRACE_PROTO("TX loss", QUIC_EV_CONN_PKTLOSS, qc, pktns, lost_pkts);
+ TRACE_LEAVE(QUIC_EV_CONN_PKTLOSS, qc);
+}
+
+/* Handle <pkts> list of lost packets detected at <now_us> handling their TX
+ * frames. Send a packet loss event to the congestion controller if in flight
+ * packet have been lost. Also frees the packet in <pkts> list.
+ *
+ * Returns 1 on success else 0 if loss limit has been exceeded. A
+ * CONNECTION_CLOSE was prepared to close the connection ASAP.
+ */
+int qc_release_lost_pkts(struct quic_conn *qc, struct quic_pktns *pktns,
+ struct list *pkts, uint64_t now_us)
+{
+ struct quic_tx_packet *pkt, *tmp, *oldest_lost, *newest_lost;
+ int close = 0;
+
+ TRACE_ENTER(QUIC_EV_CONN_PRSAFRM, qc);
+
+ if (LIST_ISEMPTY(pkts))
+ goto leave;
+
+ oldest_lost = newest_lost = NULL;
+ list_for_each_entry_safe(pkt, tmp, pkts, list) {
+ struct list tmp = LIST_HEAD_INIT(tmp);
+
+ pkt->pktns->tx.in_flight -= pkt->in_flight_len;
+ qc->path->prep_in_flight -= pkt->in_flight_len;
+ qc->path->in_flight -= pkt->in_flight_len;
+ if (pkt->flags & QUIC_FL_TX_PACKET_ACK_ELICITING)
+ qc->path->ifae_pkts--;
+ /* Treat the frames of this lost packet. */
+ if (!qc_handle_frms_of_lost_pkt(qc, pkt, &pktns->tx.frms))
+ close = 1;
+ LIST_DELETE(&pkt->list);
+ if (!oldest_lost) {
+ oldest_lost = newest_lost = pkt;
+ }
+ else {
+ if (newest_lost != oldest_lost)
+ quic_tx_packet_refdec(newest_lost);
+ newest_lost = pkt;
+ }
+ }
+
+ if (!close) {
+ if (newest_lost) {
+ /* Sent a congestion event to the controller */
+ struct quic_cc_event ev = { };
+
+ ev.type = QUIC_CC_EVT_LOSS;
+ ev.loss.time_sent = newest_lost->time_sent;
+
+ quic_cc_event(&qc->path->cc, &ev);
+ }
+
+ /* If an RTT have been already sampled, <rtt_min> has been set.
+ * We must check if we are experiencing a persistent congestion.
+ * If this is the case, the congestion controller must re-enter
+ * slow start state.
+ */
+ if (qc->path->loss.rtt_min && newest_lost != oldest_lost) {
+ unsigned int period = newest_lost->time_sent - oldest_lost->time_sent;
+
+ if (quic_loss_persistent_congestion(&qc->path->loss, period,
+ now_ms, qc->max_ack_delay))
+ qc->path->cc.algo->slow_start(&qc->path->cc);
+ }
+ }
+
+ /* <oldest_lost> cannot be NULL at this stage because we have ensured
+ * that <pkts> list is not empty. Without this, GCC 12.2.0 reports a
+ * possible overflow on a 0 byte region with O2 optimization.
+ */
+ ALREADY_CHECKED(oldest_lost);
+ quic_tx_packet_refdec(oldest_lost);
+ if (newest_lost != oldest_lost)
+ quic_tx_packet_refdec(newest_lost);
+
+ leave:
+ TRACE_LEAVE(QUIC_EV_CONN_PRSAFRM, qc);
+ return !close;
+}
diff --git a/src/quic_openssl_compat.c b/src/quic_openssl_compat.c
new file mode 100644
index 0000000..d914ac4
--- /dev/null
+++ b/src/quic_openssl_compat.c
@@ -0,0 +1,531 @@
+#ifndef USE_QUIC
+#error "Must define USE_QUIC"
+#endif
+
+#ifndef USE_OPENSSL
+#error "Must define USE_OPENSSL"
+#endif
+
+#include <haproxy/openssl-compat.h>
+/* Highly inspired from nginx QUIC TLS compatibility code */
+#include <openssl/kdf.h>
+
+#include <haproxy/quic_conn.h>
+#include <haproxy/quic_tls.h>
+#include <haproxy/quic_trace.h>
+#include <haproxy/ssl_sock.h>
+#include <haproxy/trace.h>
+
+#ifndef HAVE_SSL_KEYLOG
+#error "HAVE_SSL_KEYLOG is not defined"
+#endif
+
+#define QUIC_OPENSSL_COMPAT_RECORD_SIZE 1024
+
+#define QUIC_TLS_KEY_LABEL "key"
+#define QUIC_TLS_IV_LABEL "iv"
+
+struct quic_tls_compat_record {
+ unsigned char type;
+ const unsigned char *payload;
+ size_t payload_len;
+ uint64_t number;
+ struct quic_tls_compat_keys *keys;
+};
+
+/* Callback used to set the local transport parameters into the TLS stack.
+ * Must be called after having been set at the QUIC connection level.
+ */
+static int qc_ssl_compat_add_tps_cb(SSL *ssl, unsigned int ext_type, unsigned int context,
+ const unsigned char **out, size_t *outlen,
+ X509 *x, size_t chainidx, int *al, void *add_arg)
+{
+ struct quic_conn *qc = SSL_get_ex_data(ssl, ssl_qc_app_data_index);
+
+ TRACE_ENTER(QUIC_EV_CONN_SSL_COMPAT, qc);
+
+ *out = qc->enc_params;
+ *outlen = qc->enc_params_len;
+
+ TRACE_LEAVE(QUIC_EV_CONN_SSL_COMPAT, qc);
+ return 1;
+}
+
+/* Set the keylog callback used to derive TLS secrets and the callback
+ * used to pass local transport parameters to the TLS stack.
+ * Return 1 if succeeded, 0 if not.
+ */
+int quic_tls_compat_init(struct bind_conf *bind_conf, SSL_CTX *ctx)
+{
+ /* Ignore non-QUIC connections */
+ if (bind_conf->xprt != xprt_get(XPRT_QUIC))
+ return 1;
+
+ /* This callback is already registered if the TLS keylog is activated for
+ * traffic decryption analysis.
+ */
+ if (!global_ssl.keylog)
+ SSL_CTX_set_keylog_callback(ctx, quic_tls_compat_keylog_callback);
+
+ if (SSL_CTX_has_client_custom_ext(ctx, QUIC_OPENSSL_COMPAT_SSL_TP_EXT))
+ return 1;
+
+ if (!SSL_CTX_add_custom_ext(ctx, QUIC_OPENSSL_COMPAT_SSL_TP_EXT,
+ SSL_EXT_CLIENT_HELLO | SSL_EXT_TLS1_3_ENCRYPTED_EXTENSIONS,
+ qc_ssl_compat_add_tps_cb, NULL, NULL,
+ NULL, NULL))
+ return 0;
+
+ return 1;
+}
+
+static int quic_tls_compat_set_encryption_secret(struct quic_conn *qc,
+ struct quic_tls_compat_keys *keys,
+ enum ssl_encryption_level_t level,
+ const SSL_CIPHER *cipher,
+ const uint8_t *secret, size_t secret_len)
+{
+ int ret = 0, key_len;
+ struct quic_tls_secret *peer_secret;
+
+ TRACE_ENTER(QUIC_EV_CONN_SSL_COMPAT, qc);
+
+ peer_secret = &keys->secret;
+ if (sizeof(peer_secret->secret.data) < secret_len)
+ goto leave;
+
+ keys->cipher = tls_aead(cipher);
+ if (!keys->cipher)
+ goto leave;
+
+ key_len = EVP_CIPHER_key_length(keys->cipher);
+
+ peer_secret->secret.len = secret_len;
+ memcpy(peer_secret->secret.data, secret, secret_len);
+
+ peer_secret->key.len = key_len;
+ peer_secret->iv.len = QUIC_OPENSSL_COMPAT_TLS_IV_LEN;
+ if (!quic_hkdf_expand_label(tls_md(cipher),
+ peer_secret->key.data, peer_secret->key.len,
+ secret, secret_len,
+ (const unsigned char *)QUIC_TLS_KEY_LABEL,
+ sizeof(QUIC_TLS_KEY_LABEL) - 1) ||
+ !quic_hkdf_expand_label(tls_md(cipher),
+ peer_secret->iv.data, peer_secret->iv.len,
+ secret, secret_len,
+ (const unsigned char *)QUIC_TLS_IV_LABEL,
+ sizeof(QUIC_TLS_IV_LABEL) - 1))
+ goto leave;
+
+ ret = 1;
+ leave:
+ TRACE_LEAVE(QUIC_EV_CONN_SSL_COMPAT, qc);
+ return ret;
+}
+
+/* Callback used to get the Handshake and Application level secrets from
+ * the TLS stack.
+ */
+void quic_tls_compat_keylog_callback(const SSL *ssl, const char *line)
+{
+ unsigned char ch, value;
+ const char *start, *p;
+ size_t n;
+ unsigned int write;
+ struct quic_openssl_compat *compat;
+ enum ssl_encryption_level_t level;
+ unsigned char secret[EVP_MAX_MD_SIZE];
+ struct quic_conn *qc = SSL_get_ex_data(ssl, ssl_qc_app_data_index);
+
+ /* Ignore non-QUIC connections */
+ if (!qc)
+ return;
+
+ TRACE_ENTER(QUIC_EV_CONN_SSL_COMPAT, qc);
+
+ p = line;
+ for (start = p; *p && *p != ' '; p++);
+ n = p - start;
+
+ if (sizeof(QUIC_OPENSSL_COMPAT_CLIENT_HANDSHAKE) - 1 == n &&
+ !strncmp(start, QUIC_OPENSSL_COMPAT_CLIENT_HANDSHAKE, n)) {
+ level = ssl_encryption_handshake;
+ write = 0;
+ }
+ else if (sizeof(QUIC_OPENSSL_COMPAT_SERVER_HANDSHAKE) - 1 == n &&
+ !strncmp(start, QUIC_OPENSSL_COMPAT_SERVER_HANDSHAKE, n)) {
+ level = ssl_encryption_handshake;
+ write = 1;
+ }
+ else if (sizeof(QUIC_OPENSSL_COMPAT_CLIENT_APPLICATION) - 1 == n &&
+ !strncmp(start, QUIC_OPENSSL_COMPAT_CLIENT_APPLICATION, n)) {
+ level = ssl_encryption_application;
+ write = 0;
+ }
+ else if (sizeof(QUIC_OPENSSL_COMPAT_SERVER_APPLICATION) - 1 == n &&
+ !strncmp(start, QUIC_OPENSSL_COMPAT_SERVER_APPLICATION, n)) {
+ level = ssl_encryption_application;
+ write = 1;
+ }
+ else
+ goto leave;
+
+ if (*p++ == '\0')
+ goto leave;
+
+ while (*p && *p != ' ')
+ p++;
+
+ if (*p++ == '\0')
+ goto leave;
+
+ for (n = 0, start = p; *p; p++) {
+ ch = *p;
+ if (ch >= '0' && ch <= '9') {
+ value = ch - '0';
+ goto next;
+ }
+
+ ch = (unsigned char) (ch | 0x20);
+ if (ch >= 'a' && ch <= 'f') {
+ value = ch - 'a' + 10;
+ goto next;
+ }
+
+ goto leave;
+
+next:
+ if ((p - start) % 2) {
+ secret[n++] += value;
+ }
+ else {
+ if (n >= EVP_MAX_MD_SIZE)
+ goto leave;
+
+ secret[n] = (value << 4);
+ }
+ }
+
+ /* Secret successfully parsed */
+ compat = &qc->openssl_compat;
+ if (write) {
+ compat->method->set_encryption_secrets((SSL *) ssl, level, NULL, secret, n);
+ compat->write_level = level;
+
+ } else {
+ const SSL_CIPHER *cipher;
+
+ cipher = SSL_get_current_cipher(ssl);
+ /* AES_128_CCM_SHA256 not supported at this time. Furthermore, this
+ * algorithm is silently disabled by the TLS stack. But it can be
+ * enabled with "ssl-default-bind-ciphersuites" setting.
+ */
+ if (SSL_CIPHER_get_id(cipher) == TLS1_3_CK_AES_128_CCM_SHA256) {
+ quic_set_tls_alert(qc, SSL_AD_HANDSHAKE_FAILURE);
+ goto leave;
+ }
+
+ compat->method->set_encryption_secrets((SSL *) ssl, level, secret, NULL, n);
+ compat->read_level = level;
+ compat->read_record = 0;
+ quic_tls_compat_set_encryption_secret(qc, &compat->keys, level,
+ cipher, secret, n);
+ }
+
+ leave:
+ TRACE_LEAVE(QUIC_EV_CONN_SSL_COMPAT, qc);
+}
+
+static size_t quic_tls_compat_create_header(struct quic_conn *qc,
+ struct quic_tls_compat_record *rec,
+ unsigned char *out, int plain)
+{
+ unsigned char type;
+ size_t len;
+
+ TRACE_ENTER(QUIC_EV_CONN_SSL_COMPAT, qc);
+
+ len = rec->payload_len;
+ if (plain) {
+ type = rec->type;
+ }
+ else {
+ type = SSL3_RT_APPLICATION_DATA;
+ len += EVP_GCM_TLS_TAG_LEN;
+ }
+
+ out[0] = type;
+ out[1] = 0x03;
+ out[2] = 0x03;
+ out[3] = (len >> 8);
+ out[4] = len;
+
+ TRACE_LEAVE(QUIC_EV_CONN_SSL_COMPAT, qc);
+ return 5;
+}
+
+static void quic_tls_compute_nonce(unsigned char *nonce, size_t len, uint64_t pn)
+{
+ nonce[len - 8] ^= (pn >> 56) & 0x3f;
+ nonce[len - 7] ^= (pn >> 48) & 0xff;
+ nonce[len - 6] ^= (pn >> 40) & 0xff;
+ nonce[len - 5] ^= (pn >> 32) & 0xff;
+ nonce[len - 4] ^= (pn >> 24) & 0xff;
+ nonce[len - 3] ^= (pn >> 16) & 0xff;
+ nonce[len - 2] ^= (pn >> 8) & 0xff;
+ nonce[len - 1] ^= pn & 0xff;
+}
+
+/* Cipher <in> buffer data into <out> with <cipher> as AEAD cipher, <s> as secret.
+ * <ad> is the buffer for the additional data.
+ */
+static int quic_tls_tls_seal(struct quic_conn *qc,
+ const EVP_CIPHER *cipher, struct quic_tls_secret *s,
+ unsigned char *out, size_t *outlen, unsigned char *nonce,
+ const unsigned char *in, size_t inlen,
+ const unsigned char *ad, size_t adlen)
+{
+ int ret = 0, wlen;
+ EVP_CIPHER_CTX *ctx;
+ int aead_nid = EVP_CIPHER_nid(cipher);
+
+ TRACE_ENTER(QUIC_EV_CONN_SSL_COMPAT, qc);
+ ctx = EVP_CIPHER_CTX_new();
+ if (ctx == NULL)
+ goto leave;
+
+ /* Note that the following encryption code works with NID_aes_128_ccm, but leads
+ * to an handshake failure with "bad record mac" (20) TLS alert received from
+ * the peer.
+ */
+ if (!EVP_EncryptInit_ex(ctx, cipher, NULL, NULL, NULL) ||
+ !EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_GCM_SET_IVLEN, s->iv.len, NULL) ||
+ (aead_nid == NID_aes_128_ccm &&
+ !EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_GCM_SET_TAG, EVP_GCM_TLS_TAG_LEN, NULL)) ||
+ !EVP_EncryptInit_ex(ctx, NULL, NULL, s->key.data, nonce) ||
+ (aead_nid == NID_aes_128_ccm &&
+ !EVP_EncryptUpdate(ctx, NULL, &wlen, NULL, inlen)) ||
+ !EVP_EncryptUpdate(ctx, NULL, &wlen, ad, adlen) ||
+ !EVP_EncryptUpdate(ctx, out, &wlen, in, inlen) ||
+ !EVP_EncryptFinal_ex(ctx, out + wlen, &wlen) ||
+ (aead_nid != NID_aes_128_ccm &&
+ !EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_GCM_GET_TAG, EVP_GCM_TLS_TAG_LEN, out + inlen))) {
+ goto leave;
+ }
+
+ *outlen = inlen + adlen + EVP_GCM_TLS_TAG_LEN;
+ ret = 1;
+ leave:
+ /* Safe to call EVP_CIPHER_CTX_free() with null ctx */
+ EVP_CIPHER_CTX_free(ctx);
+ TRACE_LEAVE(QUIC_EV_CONN_SSL_COMPAT, qc);
+ return ret;
+}
+
+static int quic_tls_compat_create_record(struct quic_conn *qc,
+ enum ssl_encryption_level_t level,
+ struct quic_tls_compat_record *rec,
+ unsigned char *res)
+{
+ int ret = 0;
+ unsigned char *ad;
+ size_t adlen;
+ unsigned char *out;
+ size_t outlen;
+ struct quic_tls_secret *secret;
+ unsigned char nonce[QUIC_OPENSSL_COMPAT_TLS_IV_LEN];
+
+ TRACE_ENTER(QUIC_EV_CONN_SSL_COMPAT, qc);
+
+ ad = res;
+ adlen = quic_tls_compat_create_header(qc, rec, ad, 0);
+
+ out = res + adlen;
+ outlen = rec->payload_len + EVP_GCM_TLS_TAG_LEN;
+
+ secret = &rec->keys->secret;
+
+ memcpy(nonce, secret->iv.data, secret->iv.len);
+ quic_tls_compute_nonce(nonce, sizeof(nonce), rec->number);
+
+ if (!quic_tls_tls_seal(qc, rec->keys->cipher, secret, out, &outlen,
+ nonce, rec->payload, rec->payload_len, ad, adlen))
+ goto leave;
+
+ ret = outlen;
+leave:
+ TRACE_LEAVE(QUIC_EV_CONN_SSL_COMPAT, qc);
+ return ret;
+}
+
+/* Callback use to parse TLS messages for <ssl> TLS session. */
+void quic_tls_compat_msg_callback(struct connection *conn,
+ int write_p, int version, int content_type,
+ const void *buf, size_t len, SSL *ssl)
+{
+ unsigned int alert;
+ enum ssl_encryption_level_t level;
+ struct quic_conn *qc = SSL_get_ex_data(ssl, ssl_qc_app_data_index);
+ struct quic_openssl_compat *com;
+
+ if (!write_p || !qc)
+ goto leave;
+
+ TRACE_ENTER(QUIC_EV_CONN_SSL_COMPAT, qc);
+
+ com = &qc->openssl_compat;
+ level = com->write_level;
+ switch (content_type) {
+ case SSL3_RT_HANDSHAKE:
+ com->method->add_handshake_data(ssl, level, buf, len);
+ break;
+ case SSL3_RT_ALERT:
+ if (len >= 2) {
+ alert = ((unsigned char *) buf)[1];
+ com->method->send_alert(ssl, level, alert);
+ }
+ break;
+ }
+
+ leave:
+ TRACE_LEAVE(QUIC_EV_CONN_SSL_COMPAT, qc);
+}
+
+int SSL_set_quic_method(SSL *ssl, const SSL_QUIC_METHOD *quic_method)
+{
+ int ret = 0;
+ BIO *rbio, *wbio = NULL;
+ struct quic_conn *qc = SSL_get_ex_data(ssl, ssl_qc_app_data_index);
+
+ TRACE_ENTER(QUIC_EV_CONN_SSL_COMPAT, qc);
+
+ rbio = BIO_new(BIO_s_mem());
+ if (!rbio)
+ goto err;
+
+ wbio = BIO_new(BIO_s_null());
+ if (!wbio)
+ goto err;
+
+ SSL_set_bio(ssl, rbio, wbio);
+ /* No ealy data support */
+ SSL_set_max_early_data(ssl, 0);
+
+ qc->openssl_compat.rbio = rbio;
+ qc->openssl_compat.wbio = wbio;
+ qc->openssl_compat.method = quic_method;
+ qc->openssl_compat.read_level = ssl_encryption_initial;
+ qc->openssl_compat.write_level = ssl_encryption_initial;
+ ret = 1;
+
+ leave:
+ TRACE_LEAVE(QUIC_EV_CONN_SSL_COMPAT, qc);
+ return ret;
+ err:
+ BIO_free(rbio);
+ BIO_free(wbio);
+ goto leave;
+}
+
+enum ssl_encryption_level_t SSL_quic_read_level(const SSL *ssl)
+{
+ struct quic_conn *qc = SSL_get_ex_data(ssl, ssl_qc_app_data_index);
+
+ TRACE_ENTER(QUIC_EV_CONN_SSL_COMPAT, qc);
+ TRACE_LEAVE(QUIC_EV_CONN_SSL_COMPAT, qc);
+ return qc->openssl_compat.read_level;
+}
+
+
+enum ssl_encryption_level_t SSL_quic_write_level(const SSL *ssl)
+{
+ struct quic_conn *qc = SSL_get_ex_data(ssl, ssl_qc_app_data_index);
+
+ TRACE_ENTER(QUIC_EV_CONN_SSL_COMPAT, qc);
+ TRACE_LEAVE(QUIC_EV_CONN_SSL_COMPAT, qc);
+ return qc->openssl_compat.write_level;
+}
+
+int SSL_provide_quic_data(SSL *ssl, enum ssl_encryption_level_t level,
+ const uint8_t *data, size_t len)
+{
+ int ret = 0;
+ BIO *rbio;
+ struct quic_tls_compat_record rec;
+ unsigned char in[QUIC_OPENSSL_COMPAT_RECORD_SIZE + 1];
+ unsigned char out[QUIC_OPENSSL_COMPAT_RECORD_SIZE + 1 +
+ SSL3_RT_HEADER_LENGTH + EVP_GCM_TLS_TAG_LEN];
+ struct quic_conn *qc = SSL_get_ex_data(ssl, ssl_qc_app_data_index);
+ size_t n;
+
+ TRACE_ENTER(QUIC_EV_CONN_SSL_COMPAT, qc);
+
+ rbio = SSL_get_rbio(ssl);
+
+ while (len) {
+ memset(&rec, 0, sizeof rec);
+ rec.type = SSL3_RT_HANDSHAKE;
+ rec.number = qc->openssl_compat.read_record++;
+ rec.keys = &qc->openssl_compat.keys;
+ if (level == ssl_encryption_initial) {
+ n = QUIC_MIN(len, (size_t)65535);
+ rec.payload = (unsigned char *)data;
+ rec.payload_len = n;
+ quic_tls_compat_create_header(qc, &rec, out, 1);
+ BIO_write(rbio, out, SSL3_RT_HEADER_LENGTH);
+ BIO_write(rbio, data, n);
+ }
+ else {
+ size_t outlen;
+ unsigned char *p = in;
+
+ n = QUIC_MIN(len, (size_t)QUIC_OPENSSL_COMPAT_RECORD_SIZE);
+ memcpy(in, data, n);
+ p += n;
+ *p++ = SSL3_RT_HANDSHAKE;
+
+ rec.payload = in;
+ rec.payload_len = p - in;
+
+ if (!rec.keys->cipher)
+ goto leave;
+
+ outlen = quic_tls_compat_create_record(qc, level, &rec, out);
+ if (!outlen)
+ goto leave;
+
+ BIO_write(rbio, out, outlen);
+ }
+
+ data += n;
+ len -= n;
+ }
+
+ ret = 1;
+ leave:
+ TRACE_LEAVE(QUIC_EV_CONN_SSL_COMPAT, qc);
+ return ret;
+}
+
+int SSL_process_quic_post_handshake(SSL *ssl)
+{
+ struct quic_conn *qc = SSL_get_ex_data(ssl, ssl_qc_app_data_index);
+
+ /* Do nothing: rely on the TLS message callback to parse alert messages. */
+ TRACE_ENTER(QUIC_EV_CONN_SSL_COMPAT, qc);
+ TRACE_LEAVE(QUIC_EV_CONN_SSL_COMPAT, qc);
+ return 1;
+}
+
+int SSL_set_quic_transport_params(SSL *ssl, const uint8_t *params, size_t params_len)
+{
+ struct quic_conn *qc = SSL_get_ex_data(ssl, ssl_qc_app_data_index);
+ /* The local transport parameters are stored into the quic_conn object.
+ * There is no need to add an intermediary to store pointers to these
+ * transport paraemters.
+ */
+ TRACE_ENTER(QUIC_EV_CONN_SSL_COMPAT, qc);
+ TRACE_LEAVE(QUIC_EV_CONN_SSL_COMPAT, qc);
+ return 1;
+}
+
diff --git a/src/quic_retransmit.c b/src/quic_retransmit.c
new file mode 100644
index 0000000..d06293f
--- /dev/null
+++ b/src/quic_retransmit.c
@@ -0,0 +1,252 @@
+#include <import/eb64tree.h>
+
+#include <haproxy/quic_conn.h>
+#include <haproxy/quic_frame.h>
+#include <haproxy/quic_retransmit.h>
+#include <haproxy/quic_trace.h>
+#include <haproxy/quic_tx.h>
+#include <haproxy/trace.h>
+
+#define TRACE_SOURCE &trace_quic
+
+/* Duplicate all frames from <pkt_frm_list> list into <out_frm_list> list
+ * for <qc> QUIC connection.
+ * This is a best effort function which never fails even if no memory could be
+ * allocated to duplicate these frames.
+ */
+static void qc_dup_pkt_frms(struct quic_conn *qc,
+ struct list *pkt_frm_list, struct list *out_frm_list)
+{
+ struct quic_frame *frm, *frmbak;
+ struct list tmp = LIST_HEAD_INIT(tmp);
+
+ TRACE_ENTER(QUIC_EV_CONN_PRSAFRM, qc);
+
+ list_for_each_entry_safe(frm, frmbak, pkt_frm_list, list) {
+ struct quic_frame *dup_frm, *origin;
+
+ if (frm->flags & QUIC_FL_TX_FRAME_ACKED) {
+ TRACE_DEVEL("already acknowledged frame", QUIC_EV_CONN_PRSAFRM, qc, frm);
+ continue;
+ }
+
+ switch (frm->type) {
+ case QUIC_FT_STREAM_8 ... QUIC_FT_STREAM_F:
+ {
+ struct qf_stream *strm_frm = &frm->stream;
+ struct eb64_node *node = NULL;
+ struct qc_stream_desc *stream_desc;
+
+ node = eb64_lookup(&qc->streams_by_id, strm_frm->id);
+ if (!node) {
+ TRACE_DEVEL("ignored frame for a released stream", QUIC_EV_CONN_PRSAFRM, qc, frm);
+ continue;
+ }
+
+ stream_desc = eb64_entry(node, struct qc_stream_desc, by_id);
+ /* Do not resend this frame if in the "already acked range" */
+ if (strm_frm->offset.key + strm_frm->len <= stream_desc->ack_offset) {
+ TRACE_DEVEL("ignored frame in already acked range",
+ QUIC_EV_CONN_PRSAFRM, qc, frm);
+ continue;
+ }
+ else if (strm_frm->offset.key < stream_desc->ack_offset) {
+ uint64_t diff = stream_desc->ack_offset - strm_frm->offset.key;
+
+ qc_stream_frm_mv_fwd(frm, diff);
+ TRACE_DEVEL("updated partially acked frame",
+ QUIC_EV_CONN_PRSAFRM, qc, frm);
+ }
+
+ strm_frm->dup = 1;
+ break;
+ }
+
+ default:
+ break;
+ }
+
+ /* If <frm> is already a copy of another frame, we must take
+ * its original frame as source for the copy.
+ */
+ origin = frm->origin ? frm->origin : frm;
+ dup_frm = qc_frm_dup(origin);
+ if (!dup_frm) {
+ TRACE_ERROR("could not duplicate frame", QUIC_EV_CONN_PRSAFRM, qc, frm);
+ break;
+ }
+
+ TRACE_DEVEL("built probing frame", QUIC_EV_CONN_PRSAFRM, qc, origin);
+ if (origin->pkt) {
+ TRACE_DEVEL("duplicated from packet", QUIC_EV_CONN_PRSAFRM,
+ qc, dup_frm, &origin->pkt->pn_node.key);
+ }
+ else {
+ /* <origin> is a frame which was sent from a packet detected as lost. */
+ TRACE_DEVEL("duplicated from lost packet", QUIC_EV_CONN_PRSAFRM, qc);
+ }
+
+ LIST_APPEND(&tmp, &dup_frm->list);
+ }
+
+ LIST_SPLICE(out_frm_list, &tmp);
+
+ TRACE_LEAVE(QUIC_EV_CONN_PRSAFRM, qc);
+}
+
+/* Boolean function which return 1 if <pkt> TX packet is only made of
+ * already acknowledged frame.
+ */
+static inline int qc_pkt_with_only_acked_frms(struct quic_tx_packet *pkt)
+{
+ struct quic_frame *frm;
+
+ list_for_each_entry(frm, &pkt->frms, list)
+ if (!(frm->flags & QUIC_FL_TX_FRAME_ACKED))
+ return 0;
+
+ return 1;
+}
+
+/* Prepare a fast retransmission from <qel> encryption level */
+void qc_prep_fast_retrans(struct quic_conn *qc,
+ struct quic_pktns *pktns,
+ struct list *frms1, struct list *frms2)
+{
+ struct eb_root *pkts = &pktns->tx.pkts;
+ struct list *frms = frms1;
+ struct eb64_node *node;
+ struct quic_tx_packet *pkt;
+
+ TRACE_ENTER(QUIC_EV_CONN_SPPKTS, qc);
+
+ BUG_ON(frms1 == frms2);
+
+ pkt = NULL;
+ node = eb64_first(pkts);
+ start:
+ while (node) {
+ struct quic_tx_packet *p;
+
+ p = eb64_entry(node, struct quic_tx_packet, pn_node);
+ node = eb64_next(node);
+ /* Skip the empty and coalesced packets */
+ TRACE_PRINTF(TRACE_LEVEL_PROTO, QUIC_EV_CONN_SPPKTS, qc, 0, 0, 0,
+ "--> pn=%llu (%d %d %d)", (ull)p->pn_node.key,
+ LIST_ISEMPTY(&p->frms), !!(p->flags & QUIC_FL_TX_PACKET_COALESCED),
+ qc_pkt_with_only_acked_frms(p));
+ if (!LIST_ISEMPTY(&p->frms) && !qc_pkt_with_only_acked_frms(p)) {
+ pkt = p;
+ break;
+ }
+ }
+
+ if (!pkt)
+ goto leave;
+
+ /* When building a packet from another one, the field which may increase the
+ * packet size is the packet number. And the maximum increase is 4 bytes.
+ */
+ if (!quic_peer_validated_addr(qc) && qc_is_listener(qc) &&
+ pkt->len + 4 > quic_may_send_bytes(qc)) {
+ qc->flags |= QUIC_FL_CONN_ANTI_AMPLIFICATION_REACHED;
+ TRACE_PROTO("anti-amplification limit would be reached", QUIC_EV_CONN_SPPKTS, qc, pkt);
+ goto leave;
+ }
+
+ TRACE_PROTO("duplicating packet", QUIC_EV_CONN_SPPKTS, qc, pkt);
+ qc_dup_pkt_frms(qc, &pkt->frms, frms);
+ if (frms == frms1 && frms2) {
+ frms = frms2;
+ goto start;
+ }
+ leave:
+ TRACE_LEAVE(QUIC_EV_CONN_SPPKTS, qc);
+}
+
+/* Prepare a fast retransmission during a handshake after a client
+ * has resent Initial packets. According to the RFC a server may retransmit
+ * Initial packets send them coalescing with others (Handshake here).
+ * (Listener only function).
+ */
+void qc_prep_hdshk_fast_retrans(struct quic_conn *qc,
+ struct list *ifrms, struct list *hfrms)
+{
+ struct list itmp = LIST_HEAD_INIT(itmp);
+ struct list htmp = LIST_HEAD_INIT(htmp);
+
+ struct quic_enc_level *iqel = qc->iel;
+ struct quic_enc_level *hqel = qc->hel;
+ struct quic_enc_level *qel = iqel;
+ struct eb_root *pkts;
+ struct eb64_node *node;
+ struct quic_tx_packet *pkt;
+ struct list *tmp = &itmp;
+
+ TRACE_ENTER(QUIC_EV_CONN_SPPKTS, qc);
+ start:
+ pkt = NULL;
+ pkts = &qel->pktns->tx.pkts;
+ node = eb64_first(pkts);
+ /* Skip the empty packet (they have already been retransmitted) */
+ while (node) {
+ struct quic_tx_packet *p;
+
+ p = eb64_entry(node, struct quic_tx_packet, pn_node);
+ TRACE_PRINTF(TRACE_LEVEL_PROTO, QUIC_EV_CONN_SPPKTS, qc, 0, 0, 0,
+ "--> pn=%llu (%d %d)", (ull)p->pn_node.key,
+ LIST_ISEMPTY(&p->frms), !!(p->flags & QUIC_FL_TX_PACKET_COALESCED));
+ if (!LIST_ISEMPTY(&p->frms) && !(p->flags & QUIC_FL_TX_PACKET_COALESCED) &&
+ !qc_pkt_with_only_acked_frms(p)) {
+ pkt = p;
+ break;
+ }
+
+ node = eb64_next(node);
+ }
+
+ if (!pkt)
+ goto end;
+
+ /* When building a packet from another one, the field which may increase the
+ * packet size is the packet number. And the maximum increase is 4 bytes.
+ */
+ if (!quic_peer_validated_addr(qc) && qc_is_listener(qc)) {
+ size_t dglen = pkt->len + 4;
+ size_t may_send;
+
+ may_send = quic_may_send_bytes(qc);
+ dglen += pkt->next ? pkt->next->len + 4 : 0;
+ if (dglen > may_send) {
+ qc->flags |= QUIC_FL_CONN_ANTI_AMPLIFICATION_REACHED;
+ TRACE_PROTO("anti-amplification limit would be reached", QUIC_EV_CONN_SPPKTS, qc, pkt);
+ if (pkt->next)
+ TRACE_PROTO("anti-amplification limit would be reached", QUIC_EV_CONN_SPPKTS, qc, pkt->next);
+ if (qel == iqel && may_send >= QUIC_INITIAL_PACKET_MINLEN)
+ TRACE_PROTO("will probe Initial packet number space", QUIC_EV_CONN_SPPKTS, qc);
+ goto end;
+ }
+ }
+
+ qel->pktns->tx.pto_probe += 1;
+
+ /* No risk to loop here, #packet per datagram is bounded */
+ requeue:
+ TRACE_PROTO("duplicating packet", QUIC_EV_CONN_PRSAFRM, qc, NULL, &pkt->pn_node.key);
+ qc_dup_pkt_frms(qc, &pkt->frms, tmp);
+ if (qel == iqel) {
+ if (pkt->next && pkt->next->type == QUIC_PACKET_TYPE_HANDSHAKE) {
+ pkt = pkt->next;
+ tmp = &htmp;
+ hqel->pktns->tx.pto_probe += 1;
+ TRACE_DEVEL("looping for next packet", QUIC_EV_CONN_SPPKTS, qc);
+ goto requeue;
+ }
+ }
+
+ end:
+ LIST_SPLICE(ifrms, &itmp);
+ LIST_SPLICE(hfrms, &htmp);
+
+ TRACE_LEAVE(QUIC_EV_CONN_SPPKTS, qc);
+}
diff --git a/src/quic_retry.c b/src/quic_retry.c
new file mode 100644
index 0000000..1c58e5e
--- /dev/null
+++ b/src/quic_retry.c
@@ -0,0 +1,320 @@
+#include <string.h>
+
+#include <haproxy/clock.h>
+#include <haproxy/global.h>
+#include <haproxy/quic_retry.h>
+#include <haproxy/quic_tls.h>
+#include <haproxy/quic_trace-t.h>
+#include <haproxy/trace.h>
+
+#define TRACE_SOURCE &trace_quic
+
+/* Salt length used to derive retry token secret */
+#define QUIC_RETRY_TOKEN_SALTLEN 16 /* bytes */
+
+/* Copy <saddr> socket address data into <buf> buffer.
+ * This is the responsibility of the caller to check the output buffer is big
+ * enough to contain these socket address data.
+ * Return the number of bytes copied.
+ */
+static inline size_t quic_saddr_cpy(unsigned char *buf,
+ const struct sockaddr_storage *saddr)
+{
+ void *port, *addr;
+ unsigned char *p;
+ size_t port_len, addr_len;
+
+ p = buf;
+ if (saddr->ss_family == AF_INET6) {
+ port = &((struct sockaddr_in6 *)saddr)->sin6_port;
+ addr = &((struct sockaddr_in6 *)saddr)->sin6_addr;
+ port_len = sizeof ((struct sockaddr_in6 *)saddr)->sin6_port;
+ addr_len = sizeof ((struct sockaddr_in6 *)saddr)->sin6_addr;
+ }
+ else {
+ port = &((struct sockaddr_in *)saddr)->sin_port;
+ addr = &((struct sockaddr_in *)saddr)->sin_addr;
+ port_len = sizeof ((struct sockaddr_in *)saddr)->sin_port;
+ addr_len = sizeof ((struct sockaddr_in *)saddr)->sin_addr;
+ }
+ memcpy(p, port, port_len);
+ p += port_len;
+ memcpy(p, addr, addr_len);
+ p += addr_len;
+
+ return p - buf;
+}
+
+
+/* QUIC server only function.
+ * Add AAD to <add> buffer from <cid> connection ID and <addr> socket address.
+ * This is the responsibility of the caller to check <aad> size is big enough
+ * to contain these data.
+ * Return the number of bytes copied to <aad>.
+ */
+static int quic_generate_retry_token_aad(unsigned char *aad,
+ uint32_t version,
+ const struct quic_cid *cid,
+ const struct sockaddr_storage *addr)
+{
+ unsigned char *p;
+
+ p = aad;
+ *(uint32_t *)p = htonl(version);
+ p += sizeof version;
+ p += quic_saddr_cpy(p, addr);
+ memcpy(p, cid->data, cid->len);
+ p += cid->len;
+
+ return p - aad;
+}
+
+/* QUIC server only function.
+ * Generate the token to be used in Retry packets. The token is written to
+ * <token> with <len> as length. <odcid> is the original destination connection
+ * ID and <dcid> is our side destination connection ID (or client source
+ * connection ID).
+ * Returns the length of the encoded token or 0 on error.
+ */
+int quic_generate_retry_token(unsigned char *token, size_t len,
+ const uint32_t version,
+ const struct quic_cid *odcid,
+ const struct quic_cid *dcid,
+ struct sockaddr_storage *addr)
+{
+ int ret = 0;
+ unsigned char *p;
+ unsigned char aad[sizeof(uint32_t) + sizeof(in_port_t) +
+ sizeof(struct in6_addr) + QUIC_CID_MAXLEN];
+ size_t aadlen;
+ unsigned char salt[QUIC_RETRY_TOKEN_SALTLEN];
+ unsigned char key[QUIC_TLS_KEY_LEN];
+ unsigned char iv[QUIC_TLS_IV_LEN];
+ const unsigned char *sec = global.cluster_secret;
+ size_t seclen = sizeof global.cluster_secret;
+ EVP_CIPHER_CTX *ctx = NULL;
+ const EVP_CIPHER *aead = EVP_aes_128_gcm();
+ uint32_t timestamp = (uint32_t)date.tv_sec;
+
+ TRACE_ENTER(QUIC_EV_CONN_TXPKT);
+
+ /* The token is made of the token format byte, the ODCID prefixed by its one byte
+ * length, the creation timestamp, an AEAD TAG, and finally
+ * the random bytes used to derive the secret to encrypt the token.
+ */
+ if (1 + odcid->len + 1 + sizeof(timestamp) + QUIC_TLS_TAG_LEN + QUIC_RETRY_TOKEN_SALTLEN > len)
+ goto err;
+
+ aadlen = quic_generate_retry_token_aad(aad, version, dcid, addr);
+ /* TODO: RAND_bytes() should be replaced */
+ if (RAND_bytes(salt, sizeof salt) != 1) {
+ TRACE_ERROR("RAND_bytes()", QUIC_EV_CONN_TXPKT);
+ goto err;
+ }
+
+ if (!quic_tls_derive_retry_token_secret(EVP_sha256(), key, sizeof key, iv, sizeof iv,
+ salt, sizeof salt, sec, seclen)) {
+ TRACE_ERROR("quic_tls_derive_retry_token_secret() failed", QUIC_EV_CONN_TXPKT);
+ goto err;
+ }
+
+ if (!quic_tls_tx_ctx_init(&ctx, aead, key)) {
+ TRACE_ERROR("quic_tls_tx_ctx_init() failed", QUIC_EV_CONN_TXPKT);
+ goto err;
+ }
+
+ /* Token build */
+ p = token;
+ *p++ = QUIC_TOKEN_FMT_RETRY,
+ *p++ = odcid->len;
+ memcpy(p, odcid->data, odcid->len);
+ p += odcid->len;
+ write_u32(p, htonl(timestamp));
+ p += sizeof timestamp;
+
+ /* Do not encrypt the QUIC_TOKEN_FMT_RETRY byte */
+ if (!quic_tls_encrypt(token + 1, p - token - 1, aad, aadlen, ctx, aead, iv)) {
+ TRACE_ERROR("quic_tls_encrypt() failed", QUIC_EV_CONN_TXPKT);
+ goto err;
+ }
+
+ p += QUIC_TLS_TAG_LEN;
+ memcpy(p, salt, sizeof salt);
+ p += sizeof salt;
+ EVP_CIPHER_CTX_free(ctx);
+
+ ret = p - token;
+ leave:
+ TRACE_LEAVE(QUIC_EV_CONN_TXPKT);
+ return ret;
+
+ err:
+ if (ctx)
+ EVP_CIPHER_CTX_free(ctx);
+ goto leave;
+}
+
+/* Parse the Retry token from buffer <token> with <end> a pointer to
+ * one byte past the end of this buffer. This will extract the ODCID
+ * which will be stored into <odcid>
+ *
+ * Returns 0 on success else non-zero.
+ */
+int parse_retry_token(struct quic_conn *qc,
+ const unsigned char *token, const unsigned char *end,
+ struct quic_cid *odcid)
+{
+ int ret = 0;
+ uint64_t odcid_len;
+ uint32_t timestamp;
+ uint32_t now_sec = (uint32_t)date.tv_sec;
+
+ TRACE_ENTER(QUIC_EV_CONN_LPKT, qc);
+
+ if (!quic_dec_int(&odcid_len, &token, end)) {
+ TRACE_ERROR("quic_dec_int() error", QUIC_EV_CONN_LPKT, qc);
+ goto leave;
+ }
+
+ /* RFC 9000 7.2. Negotiating Connection IDs:
+ * When an Initial packet is sent by a client that has not previously
+ * received an Initial or Retry packet from the server, the client
+ * populates the Destination Connection ID field with an unpredictable
+ * value. This Destination Connection ID MUST be at least 8 bytes in length.
+ */
+ if (odcid_len < QUIC_ODCID_MINLEN || odcid_len > QUIC_CID_MAXLEN) {
+ TRACE_ERROR("wrong ODCID length", QUIC_EV_CONN_LPKT, qc);
+ goto leave;
+ }
+
+ if (end - token < odcid_len + sizeof timestamp) {
+ TRACE_ERROR("too long ODCID length", QUIC_EV_CONN_LPKT, qc);
+ goto leave;
+ }
+
+ timestamp = ntohl(read_u32(token + odcid_len));
+ /* check if elapsed time is +/- QUIC_RETRY_DURATION_SEC
+ * to tolerate token generator is not perfectly time synced
+ */
+ if ((uint32_t)(now_sec - timestamp) > QUIC_RETRY_DURATION_SEC &&
+ (uint32_t)(timestamp - now_sec) > QUIC_RETRY_DURATION_SEC) {
+ TRACE_ERROR("token has expired", QUIC_EV_CONN_LPKT, qc);
+ goto leave;
+ }
+
+ ret = 1;
+ memcpy(odcid->data, token, odcid_len);
+ odcid->len = odcid_len;
+ leave:
+ TRACE_LEAVE(QUIC_EV_CONN_LPKT, qc);
+ return !ret;
+}
+
+/* QUIC server only function.
+ *
+ * Check the validity of the Retry token from Initial packet <pkt>. <dgram> is
+ * the UDP datagram containing <pkt> and <l> is the listener instance on which
+ * it was received. If the token is valid, the ODCID of <qc> QUIC connection
+ * will be put into <odcid>. <qc> is used to retrieve the QUIC version needed
+ * to validate the token but it can be NULL : in this case the version will be
+ * retrieved from the packet.
+ *
+ * Return 1 if succeeded, 0 if not.
+ */
+
+int quic_retry_token_check(struct quic_rx_packet *pkt,
+ struct quic_dgram *dgram,
+ struct listener *l,
+ struct quic_conn *qc,
+ struct quic_cid *odcid)
+{
+ struct proxy *prx;
+ struct quic_counters *prx_counters;
+ int ret = 0;
+ unsigned char *token = pkt->token;
+ const uint64_t tokenlen = pkt->token_len;
+ unsigned char buf[128];
+ unsigned char aad[sizeof(uint32_t) + QUIC_CID_MAXLEN +
+ sizeof(in_port_t) + sizeof(struct in6_addr)];
+ size_t aadlen;
+ const unsigned char *salt;
+ unsigned char key[QUIC_TLS_KEY_LEN];
+ unsigned char iv[QUIC_TLS_IV_LEN];
+ const unsigned char *sec = global.cluster_secret;
+ size_t seclen = sizeof global.cluster_secret;
+ EVP_CIPHER_CTX *ctx = NULL;
+ const EVP_CIPHER *aead = EVP_aes_128_gcm();
+ const struct quic_version *qv = qc ? qc->original_version :
+ pkt->version;
+
+ TRACE_ENTER(QUIC_EV_CONN_LPKT, qc);
+
+ /* The caller must ensure this. */
+ BUG_ON(!pkt->token_len);
+
+ prx = l->bind_conf->frontend;
+ prx_counters = EXTRA_COUNTERS_GET(prx->extra_counters_fe, &quic_stats_module);
+
+ if (*pkt->token != QUIC_TOKEN_FMT_RETRY) {
+ /* TODO: New token check */
+ TRACE_PROTO("Packet dropped", QUIC_EV_CONN_LPKT, qc, NULL, NULL, pkt->version);
+ goto leave;
+ }
+
+ if (sizeof buf < tokenlen) {
+ TRACE_ERROR("too short buffer", QUIC_EV_CONN_LPKT, qc);
+ goto err;
+ }
+
+ /* The token is made of the token format byte, the ODCID prefixed by its one byte
+ * length, the creation timestamp, an AEAD TAG, and finally
+ * the random bytes used to derive the secret to encrypt the token.
+ */
+ if (tokenlen < 2 + QUIC_ODCID_MINLEN + sizeof(uint32_t) + QUIC_TLS_TAG_LEN + QUIC_RETRY_TOKEN_SALTLEN ||
+ tokenlen > 2 + QUIC_CID_MAXLEN + sizeof(uint32_t) + QUIC_TLS_TAG_LEN + QUIC_RETRY_TOKEN_SALTLEN) {
+ TRACE_ERROR("invalid token length", QUIC_EV_CONN_LPKT, qc);
+ goto err;
+ }
+
+ aadlen = quic_generate_retry_token_aad(aad, qv->num, &pkt->scid, &dgram->saddr);
+ salt = token + tokenlen - QUIC_RETRY_TOKEN_SALTLEN;
+ if (!quic_tls_derive_retry_token_secret(EVP_sha256(), key, sizeof key, iv, sizeof iv,
+ salt, QUIC_RETRY_TOKEN_SALTLEN, sec, seclen)) {
+ TRACE_ERROR("Could not derive retry secret", QUIC_EV_CONN_LPKT, qc);
+ goto err;
+ }
+
+ if (!quic_tls_rx_ctx_init(&ctx, aead, key)) {
+ TRACE_ERROR("quic_tls_rx_ctx_init() failed", QUIC_EV_CONN_LPKT, qc);
+ goto err;
+ }
+
+ /* The token is prefixed by a one-byte length format which is not ciphered. */
+ if (!quic_tls_decrypt2(buf, token + 1, tokenlen - QUIC_RETRY_TOKEN_SALTLEN - 1, aad, aadlen,
+ ctx, aead, key, iv)) {
+ TRACE_ERROR("Could not decrypt retry token", QUIC_EV_CONN_LPKT, qc);
+ goto err;
+ }
+
+ if (parse_retry_token(qc, buf, buf + tokenlen - QUIC_RETRY_TOKEN_SALTLEN - 1, odcid)) {
+ TRACE_ERROR("Error during Initial token parsing", QUIC_EV_CONN_LPKT, qc);
+ goto err;
+ }
+
+ EVP_CIPHER_CTX_free(ctx);
+
+ ret = 1;
+ HA_ATOMIC_INC(&prx_counters->retry_validated);
+
+ leave:
+ TRACE_LEAVE(QUIC_EV_CONN_LPKT, qc);
+ return ret;
+
+ err:
+ HA_ATOMIC_INC(&prx_counters->retry_error);
+ if (ctx)
+ EVP_CIPHER_CTX_free(ctx);
+ goto leave;
+}
+
+
diff --git a/src/quic_rx.c b/src/quic_rx.c
new file mode 100644
index 0000000..9e55aa3
--- /dev/null
+++ b/src/quic_rx.c
@@ -0,0 +1,2290 @@
+/*
+ * QUIC protocol implementation. Lower layer with internal features implemented
+ * here such as QUIC encryption, idle timeout, acknowledgement and
+ * retransmission.
+ *
+ * Copyright 2020 HAProxy Technologies, Frederic Lecaille <flecaille@haproxy.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <haproxy/quic_rx.h>
+
+#include <haproxy/h3.h>
+#include <haproxy/list.h>
+#include <haproxy/ncbuf.h>
+#include <haproxy/proto_quic.h>
+#include <haproxy/quic_ack.h>
+#include <haproxy/quic_cid.h>
+#include <haproxy/quic_retransmit.h>
+#include <haproxy/quic_retry.h>
+#include <haproxy/quic_sock.h>
+#include <haproxy/quic_stream.h>
+#include <haproxy/quic_ssl.h>
+#include <haproxy/quic_tls.h>
+#include <haproxy/quic_trace.h>
+#include <haproxy/quic_tx.h>
+#include <haproxy/ssl_sock.h>
+#include <haproxy/trace.h>
+
+DECLARE_POOL(pool_head_quic_conn_rxbuf, "quic_conn_rxbuf", QUIC_CONN_RX_BUFSZ);
+DECLARE_POOL(pool_head_quic_dgram, "quic_dgram", sizeof(struct quic_dgram));
+DECLARE_POOL(pool_head_quic_rx_packet, "quic_rx_packet", sizeof(struct quic_rx_packet));
+
+/* Decode an expected packet number from <truncated_on> its truncated value,
+ * depending on <largest_pn> the largest received packet number, and <pn_nbits>
+ * the number of bits used to encode this packet number (its length in bytes * 8).
+ * See https://quicwg.org/base-drafts/draft-ietf-quic-transport.html#packet-encoding
+ */
+static uint64_t decode_packet_number(uint64_t largest_pn,
+ uint32_t truncated_pn, unsigned int pn_nbits)
+{
+ uint64_t expected_pn = largest_pn + 1;
+ uint64_t pn_win = (uint64_t)1 << pn_nbits;
+ uint64_t pn_hwin = pn_win / 2;
+ uint64_t pn_mask = pn_win - 1;
+ uint64_t candidate_pn;
+
+
+ candidate_pn = (expected_pn & ~pn_mask) | truncated_pn;
+ /* Note that <pn_win> > <pn_hwin>. */
+ if (candidate_pn < QUIC_MAX_PACKET_NUM - pn_win &&
+ candidate_pn + pn_hwin <= expected_pn)
+ return candidate_pn + pn_win;
+
+ if (candidate_pn > expected_pn + pn_hwin && candidate_pn >= pn_win)
+ return candidate_pn - pn_win;
+
+ return candidate_pn;
+}
+
+/* Remove the header protection of <pkt> QUIC packet using <tls_ctx> as QUIC TLS
+ * cryptographic context.
+ * <largest_pn> is the largest received packet number and <pn> the address of
+ * the packet number field for this packet with <byte0> address of its first byte.
+ * <end> points to one byte past the end of this packet.
+ * Returns 1 if succeeded, 0 if not.
+ */
+static int qc_do_rm_hp(struct quic_conn *qc,
+ struct quic_rx_packet *pkt, struct quic_tls_ctx *tls_ctx,
+ int64_t largest_pn, unsigned char *pn, unsigned char *byte0)
+{
+ int ret, i, pnlen;
+ uint64_t packet_number;
+ uint32_t truncated_pn = 0;
+ unsigned char mask[5] = {0};
+ unsigned char *sample;
+
+ TRACE_ENTER(QUIC_EV_CONN_RMHP, qc);
+
+ ret = 0;
+
+ /* Check there is enough data in this packet. */
+ if (pkt->len - (pn - byte0) < QUIC_PACKET_PN_MAXLEN + sizeof mask) {
+ TRACE_PROTO("too short packet", QUIC_EV_CONN_RMHP, qc, pkt);
+ goto leave;
+ }
+
+ sample = pn + QUIC_PACKET_PN_MAXLEN;
+
+ if (!quic_tls_aes_decrypt(mask, sample, sizeof mask, tls_ctx->rx.hp_ctx)) {
+ TRACE_ERROR("HP removing failed", QUIC_EV_CONN_RMHP, qc, pkt);
+ goto leave;
+ }
+
+ *byte0 ^= mask[0] & (*byte0 & QUIC_PACKET_LONG_HEADER_BIT ? 0xf : 0x1f);
+ pnlen = (*byte0 & QUIC_PACKET_PNL_BITMASK) + 1;
+ for (i = 0; i < pnlen; i++) {
+ pn[i] ^= mask[i + 1];
+ truncated_pn = (truncated_pn << 8) | pn[i];
+ }
+
+ packet_number = decode_packet_number(largest_pn, truncated_pn, pnlen * 8);
+ /* Store remaining information for this unprotected header */
+ pkt->pn = packet_number;
+ pkt->pnl = pnlen;
+
+ ret = 1;
+ leave:
+ TRACE_LEAVE(QUIC_EV_CONN_RMHP, qc);
+ return ret;
+}
+
+/* Decrypt <pkt> packet using encryption level <qel> for <qc> connection.
+ * Decryption is done in place in packet buffer.
+ *
+ * Returns 1 on success else 0.
+ */
+static int qc_pkt_decrypt(struct quic_conn *qc, struct quic_enc_level *qel,
+ struct quic_rx_packet *pkt)
+{
+ int ret, kp_changed;
+ unsigned char iv[QUIC_TLS_IV_LEN];
+ struct quic_tls_ctx *tls_ctx =
+ qc_select_tls_ctx(qc, qel, pkt->type, pkt->version);
+ EVP_CIPHER_CTX *rx_ctx = tls_ctx->rx.ctx;
+ unsigned char *rx_iv = tls_ctx->rx.iv;
+ size_t rx_iv_sz = tls_ctx->rx.ivlen;
+ unsigned char *rx_key = tls_ctx->rx.key;
+
+ TRACE_ENTER(QUIC_EV_CONN_RXPKT, qc);
+
+ ret = 0;
+ kp_changed = 0;
+
+ if (pkt->type == QUIC_PACKET_TYPE_SHORT) {
+ /* The two tested bits are not at the same position,
+ * this is why they are first both inversed.
+ */
+ if (!(*pkt->data & QUIC_PACKET_KEY_PHASE_BIT) ^ !(tls_ctx->flags & QUIC_FL_TLS_KP_BIT_SET)) {
+ if (pkt->pn < tls_ctx->rx.pn) {
+ /* The lowest packet number of a previous key phase
+ * cannot be null if it really stores previous key phase
+ * secrets.
+ */
+ // TODO: check if BUG_ON() more suitable
+ if (!qc->ku.prv_rx.pn) {
+ TRACE_ERROR("null previous packet number", QUIC_EV_CONN_RXPKT, qc);
+ goto leave;
+ }
+
+ rx_ctx = qc->ku.prv_rx.ctx;
+ rx_iv = qc->ku.prv_rx.iv;
+ rx_key = qc->ku.prv_rx.key;
+ }
+ else if (pkt->pn > qel->pktns->rx.largest_pn) {
+ /* Next key phase */
+ TRACE_PROTO("Key phase changed", QUIC_EV_CONN_RXPKT, qc);
+ kp_changed = 1;
+ rx_ctx = qc->ku.nxt_rx.ctx;
+ rx_iv = qc->ku.nxt_rx.iv;
+ rx_key = qc->ku.nxt_rx.key;
+ }
+ }
+ }
+
+ quic_aead_iv_build(iv, sizeof iv, rx_iv, rx_iv_sz, pkt->pn);
+
+ ret = quic_tls_decrypt(pkt->data + pkt->aad_len, pkt->len - pkt->aad_len,
+ pkt->data, pkt->aad_len,
+ rx_ctx, tls_ctx->rx.aead, rx_key, iv);
+ if (!ret) {
+ TRACE_ERROR("quic_tls_decrypt() failed", QUIC_EV_CONN_RXPKT, qc);
+ goto leave;
+ }
+
+ /* Update the keys only if the packet decryption succeeded. */
+ if (kp_changed) {
+ quic_tls_rotate_keys(qc);
+ /* Toggle the Key Phase bit */
+ tls_ctx->flags ^= QUIC_FL_TLS_KP_BIT_SET;
+ /* Store the lowest packet number received for the current key phase */
+ tls_ctx->rx.pn = pkt->pn;
+ /* Prepare the next key update */
+ if (!quic_tls_key_update(qc)) {
+ TRACE_ERROR("quic_tls_key_update() failed", QUIC_EV_CONN_RXPKT, qc);
+ goto leave;
+ }
+ }
+
+ /* Update the packet length (required to parse the frames). */
+ pkt->len -= QUIC_TLS_TAG_LEN;
+ ret = 1;
+ leave:
+ TRACE_LEAVE(QUIC_EV_CONN_RXPKT, qc);
+ return ret;
+}
+
+/* Remove from <stream> the acknowledged frames.
+ *
+ * Returns 1 if at least one frame was removed else 0.
+ */
+static int quic_stream_try_to_consume(struct quic_conn *qc,
+ struct qc_stream_desc *stream)
+{
+ int ret;
+ struct eb64_node *frm_node;
+
+ TRACE_ENTER(QUIC_EV_CONN_ACKSTRM, qc);
+
+ ret = 0;
+ frm_node = eb64_first(&stream->acked_frms);
+ while (frm_node) {
+ struct qf_stream *strm_frm;
+ struct quic_frame *frm;
+ size_t offset, len;
+
+ strm_frm = eb64_entry(frm_node, struct qf_stream, offset);
+ offset = strm_frm->offset.key;
+ len = strm_frm->len;
+
+ if (offset > stream->ack_offset)
+ break;
+
+ if (qc_stream_desc_ack(&stream, offset, len)) {
+ /* cf. next comment : frame may be freed at this stage. */
+ TRACE_DEVEL("stream consumed", QUIC_EV_CONN_ACKSTRM,
+ qc, stream ? strm_frm : NULL, stream);
+ ret = 1;
+ }
+
+ /* If stream is NULL after qc_stream_desc_ack(), it means frame
+ * has been freed. with the stream frames tree. Nothing to do
+ * anymore in here.
+ */
+ if (!stream) {
+ qc_check_close_on_released_mux(qc);
+ ret = 1;
+ goto leave;
+ }
+
+ frm_node = eb64_next(frm_node);
+ eb64_delete(&strm_frm->offset);
+
+ frm = container_of(strm_frm, struct quic_frame, stream);
+ qc_release_frm(qc, frm);
+ }
+
+ leave:
+ TRACE_LEAVE(QUIC_EV_CONN_ACKSTRM, qc);
+ return ret;
+}
+
+/* Handle <frm> frame whose packet it is attached to has just been acknowledged. The memory allocated
+ * for this frame will be at least released in every cases.
+ * Never fail.
+ */
+static void qc_handle_newly_acked_frm(struct quic_conn *qc, struct quic_frame *frm)
+{
+ TRACE_ENTER(QUIC_EV_CONN_PRSAFRM, qc);
+ TRACE_PROTO("RX ack TX frm", QUIC_EV_CONN_PRSAFRM, qc, frm);
+
+ switch (frm->type) {
+ case QUIC_FT_STREAM_8 ... QUIC_FT_STREAM_F:
+ {
+ struct qf_stream *strm_frm = &frm->stream;
+ struct eb64_node *node = NULL;
+ struct qc_stream_desc *stream = NULL;
+ const size_t offset = strm_frm->offset.key;
+ const size_t len = strm_frm->len;
+
+ /* do not use strm_frm->stream as the qc_stream_desc instance
+ * might be freed at this stage. Use the id to do a proper
+ * lookup.
+ *
+ * TODO if lookup operation impact on the perf is noticeable,
+ * implement a refcount on qc_stream_desc instances.
+ */
+ node = eb64_lookup(&qc->streams_by_id, strm_frm->id);
+ if (!node) {
+ TRACE_DEVEL("acked stream for released stream", QUIC_EV_CONN_ACKSTRM, qc, strm_frm);
+ qc_release_frm(qc, frm);
+ /* early return */
+ goto leave;
+ }
+ stream = eb64_entry(node, struct qc_stream_desc, by_id);
+
+ TRACE_DEVEL("acked stream", QUIC_EV_CONN_ACKSTRM, qc, strm_frm, stream);
+ if (offset <= stream->ack_offset) {
+ if (qc_stream_desc_ack(&stream, offset, len)) {
+ TRACE_DEVEL("stream consumed", QUIC_EV_CONN_ACKSTRM,
+ qc, strm_frm, stream);
+ }
+
+ if (!stream) {
+ /* no need to continue if stream freed. */
+ TRACE_DEVEL("stream released and freed", QUIC_EV_CONN_ACKSTRM, qc);
+ qc_release_frm(qc, frm);
+ qc_check_close_on_released_mux(qc);
+ break;
+ }
+
+ TRACE_DEVEL("stream consumed", QUIC_EV_CONN_ACKSTRM,
+ qc, strm_frm, stream);
+ qc_release_frm(qc, frm);
+ }
+ else {
+ eb64_insert(&stream->acked_frms, &strm_frm->offset);
+ }
+
+ quic_stream_try_to_consume(qc, stream);
+ }
+ break;
+ default:
+ qc_release_frm(qc, frm);
+ }
+
+ leave:
+ TRACE_LEAVE(QUIC_EV_CONN_PRSAFRM, qc);
+}
+
+/* Collect newly acknowledged TX packets from <pkts> ebtree into <newly_acked_pkts>
+ * list depending on <largest> and <smallest> packet number of a range of acknowledged
+ * packets announced in an ACK frame. <largest_node> may be provided to start
+ * looking from this packet node.
+ */
+static void qc_newly_acked_pkts(struct quic_conn *qc, struct eb_root *pkts,
+ struct list *newly_acked_pkts,
+ struct eb64_node *largest_node,
+ uint64_t largest, uint64_t smallest)
+{
+ struct eb64_node *node;
+ struct quic_tx_packet *pkt;
+
+ TRACE_ENTER(QUIC_EV_CONN_PRSAFRM, qc);
+
+ node = eb64_lookup_ge(pkts, smallest);
+ if (!node)
+ goto leave;
+
+ largest_node = largest_node ? largest_node : eb64_lookup_le(pkts, largest);
+ if (!largest_node)
+ goto leave;
+
+ while (node && node->key <= largest_node->key) {
+ pkt = eb64_entry(node, struct quic_tx_packet, pn_node);
+ LIST_APPEND(newly_acked_pkts, &pkt->list);
+ node = eb64_next(node);
+ eb64_delete(&pkt->pn_node);
+ }
+
+ leave:
+ TRACE_LEAVE(QUIC_EV_CONN_PRSAFRM, qc);
+}
+
+/* Handle <newly_acked_pkts> list of newly acknowledged TX packets */
+static void qc_handle_newly_acked_pkts(struct quic_conn *qc,
+ unsigned int *pkt_flags, struct list *newly_acked_pkts)
+{
+ struct quic_tx_packet *pkt, *tmp;
+
+ TRACE_ENTER(QUIC_EV_CONN_PRSAFRM, qc);
+
+ list_for_each_entry_safe(pkt, tmp, newly_acked_pkts, list) {
+ struct quic_frame *frm, *frmbak;
+
+ *pkt_flags |= pkt->flags;
+ TRACE_DEVEL("Removing packet #", QUIC_EV_CONN_PRSAFRM, qc, NULL, &pkt->pn_node.key);
+ list_for_each_entry_safe(frm, frmbak, &pkt->frms, list)
+ qc_handle_newly_acked_frm(qc, frm);
+ /* If there are others packet in the same datagram <pkt> is attached to,
+ * detach the previous one and the next one from <pkt>.
+ */
+ quic_tx_packet_dgram_detach(pkt);
+ eb64_delete(&pkt->pn_node);
+ }
+
+ leave:
+ TRACE_LEAVE(QUIC_EV_CONN_PRSAFRM, qc);
+}
+
+/* Handle all frames sent from <pkt> packet and reinsert them in the same order
+ * they have been sent into <pktns_frm_list>. The loss counter of each frame is
+ * incremented and checked if it does not exceed retransmission limit.
+ *
+ * Returns 1 on success, 0 if a frame loss limit is exceeded. A
+ * CONNECTION_CLOSE is scheduled in this case.
+ */
+int qc_handle_frms_of_lost_pkt(struct quic_conn *qc,
+ struct quic_tx_packet *pkt,
+ struct list *pktns_frm_list)
+{
+ struct quic_frame *frm, *frmbak;
+ struct list *pkt_frm_list = &pkt->frms;
+ uint64_t pn = pkt->pn_node.key;
+ int close = 0;
+
+ TRACE_ENTER(QUIC_EV_CONN_PRSAFRM, qc);
+
+ list_for_each_entry_safe(frm, frmbak, pkt_frm_list, list) {
+ /* First remove this frame from the packet it was attached to */
+ LIST_DEL_INIT(&frm->list);
+ quic_tx_packet_refdec(pkt);
+ /* At this time, this frame is not freed but removed from its packet */
+ frm->pkt = NULL;
+ /* Remove any reference to this frame */
+ qc_frm_unref(frm, qc);
+ switch (frm->type) {
+ case QUIC_FT_STREAM_8 ... QUIC_FT_STREAM_F:
+ {
+ struct qf_stream *strm_frm = &frm->stream;
+ struct eb64_node *node = NULL;
+ struct qc_stream_desc *stream_desc;
+
+ node = eb64_lookup(&qc->streams_by_id, strm_frm->id);
+ if (!node) {
+ TRACE_DEVEL("released stream", QUIC_EV_CONN_PRSAFRM, qc, frm);
+ TRACE_DEVEL("freeing frame from packet", QUIC_EV_CONN_PRSAFRM,
+ qc, frm, &pn);
+ qc_frm_free(qc, &frm);
+ continue;
+ }
+
+ stream_desc = eb64_entry(node, struct qc_stream_desc, by_id);
+ /* Do not resend this frame if in the "already acked range" */
+ if (strm_frm->offset.key + strm_frm->len <= stream_desc->ack_offset) {
+ TRACE_DEVEL("ignored frame in already acked range",
+ QUIC_EV_CONN_PRSAFRM, qc, frm);
+ qc_frm_free(qc, &frm);
+ continue;
+ }
+ else if (strm_frm->offset.key < stream_desc->ack_offset) {
+ uint64_t diff = stream_desc->ack_offset - strm_frm->offset.key;
+
+ qc_stream_frm_mv_fwd(frm, diff);
+ TRACE_DEVEL("updated partially acked frame",
+ QUIC_EV_CONN_PRSAFRM, qc, frm);
+ }
+ break;
+ }
+
+ default:
+ break;
+ }
+
+ /* Do not resend probing packet with old data */
+ if (pkt->flags & QUIC_FL_TX_PACKET_PROBE_WITH_OLD_DATA) {
+ TRACE_DEVEL("ignored frame with old data from packet", QUIC_EV_CONN_PRSAFRM,
+ qc, frm, &pn);
+ if (frm->origin)
+ LIST_DEL_INIT(&frm->ref);
+ qc_frm_free(qc, &frm);
+ continue;
+ }
+
+ if (frm->flags & QUIC_FL_TX_FRAME_ACKED) {
+ TRACE_DEVEL("already acked frame", QUIC_EV_CONN_PRSAFRM, qc, frm);
+ TRACE_DEVEL("freeing frame from packet", QUIC_EV_CONN_PRSAFRM,
+ qc, frm, &pn);
+ qc_frm_free(qc, &frm);
+ }
+ else {
+ if (++frm->loss_count >= global.tune.quic_max_frame_loss) {
+ TRACE_ERROR("retransmission limit reached, closing the connection", QUIC_EV_CONN_PRSAFRM, qc);
+ quic_set_connection_close(qc, quic_err_transport(QC_ERR_INTERNAL_ERROR));
+ qc_notify_err(qc);
+ close = 1;
+ }
+
+ LIST_APPEND(pktns_frm_list, &frm->list);
+ TRACE_DEVEL("frame requeued", QUIC_EV_CONN_PRSAFRM, qc, frm);
+ }
+ }
+
+ end:
+ TRACE_LEAVE(QUIC_EV_CONN_PRSAFRM, qc);
+ return !close;
+}
+
+/* Send a packet ack event nofication for each newly acked packet of
+ * <newly_acked_pkts> list and free them.
+ * Always succeeds.
+ */
+static void qc_notify_cc_of_newly_acked_pkts(struct quic_conn *qc,
+ struct list *newly_acked_pkts)
+{
+ struct quic_tx_packet *pkt, *tmp;
+ struct quic_cc_event ev = { .type = QUIC_CC_EVT_ACK, };
+
+ TRACE_ENTER(QUIC_EV_CONN_PRSAFRM, qc);
+
+ list_for_each_entry_safe(pkt, tmp, newly_acked_pkts, list) {
+ pkt->pktns->tx.in_flight -= pkt->in_flight_len;
+ qc->path->prep_in_flight -= pkt->in_flight_len;
+ qc->path->in_flight -= pkt->in_flight_len;
+ if (pkt->flags & QUIC_FL_TX_PACKET_ACK_ELICITING)
+ qc->path->ifae_pkts--;
+ /* If this packet contained an ACK frame, proceed to the
+ * acknowledging of range of acks from the largest acknowledged
+ * packet number which was sent in an ACK frame by this packet.
+ */
+ if (pkt->largest_acked_pn != -1)
+ qc_treat_ack_of_ack(qc, &pkt->pktns->rx.arngs, pkt->largest_acked_pn);
+ ev.ack.acked = pkt->in_flight_len;
+ ev.ack.time_sent = pkt->time_sent;
+ quic_cc_event(&qc->path->cc, &ev);
+ LIST_DEL_INIT(&pkt->list);
+ quic_tx_packet_refdec(pkt);
+ }
+
+ TRACE_LEAVE(QUIC_EV_CONN_PRSAFRM, qc);
+
+}
+
+/* Parse ACK frame into <frm> from a buffer at <buf> address with <end> being at
+ * one byte past the end of this buffer. Also update <rtt_sample> if needed, i.e.
+ * if the largest acked packet was newly acked and if there was at least one newly
+ * acked ack-eliciting packet.
+ * Return 1, if succeeded, 0 if not.
+ */
+static int qc_parse_ack_frm(struct quic_conn *qc,
+ struct quic_frame *frm,
+ struct quic_enc_level *qel,
+ unsigned int *rtt_sample,
+ const unsigned char **pos, const unsigned char *end)
+{
+ struct qf_ack *ack_frm = &frm->ack;
+ uint64_t smallest, largest;
+ struct eb_root *pkts;
+ struct eb64_node *largest_node;
+ unsigned int time_sent, pkt_flags;
+ struct list newly_acked_pkts = LIST_HEAD_INIT(newly_acked_pkts);
+ struct list lost_pkts = LIST_HEAD_INIT(lost_pkts);
+ int ret = 0, new_largest_acked_pn = 0;
+ struct quic_tx_packet *pkt, *tmp;
+
+ TRACE_ENTER(QUIC_EV_CONN_PRSAFRM, qc);
+
+ pkts = &qel->pktns->tx.pkts;
+ if (ack_frm->largest_ack > qel->pktns->tx.next_pn) {
+ TRACE_DEVEL("ACK for not sent packet", QUIC_EV_CONN_PRSAFRM,
+ qc, NULL, &ack_frm->largest_ack);
+ goto err;
+ }
+
+ if (ack_frm->first_ack_range > ack_frm->largest_ack) {
+ TRACE_DEVEL("too big first ACK range", QUIC_EV_CONN_PRSAFRM,
+ qc, NULL, &ack_frm->first_ack_range);
+ goto err;
+ }
+
+ largest = ack_frm->largest_ack;
+ smallest = largest - ack_frm->first_ack_range;
+ pkt_flags = 0;
+ largest_node = NULL;
+ time_sent = 0;
+
+ if ((int64_t)ack_frm->largest_ack > qel->pktns->rx.largest_acked_pn) {
+ largest_node = eb64_lookup(pkts, largest);
+ if (!largest_node) {
+ TRACE_DEVEL("Largest acked packet not found",
+ QUIC_EV_CONN_PRSAFRM, qc);
+ }
+ else {
+ time_sent = eb64_entry(largest_node,
+ struct quic_tx_packet, pn_node)->time_sent;
+ new_largest_acked_pn = 1;
+ }
+ }
+
+ TRACE_PROTO("RX ack range", QUIC_EV_CONN_PRSAFRM,
+ qc, NULL, &largest, &smallest);
+ do {
+ uint64_t gap, ack_range;
+
+ qc_newly_acked_pkts(qc, pkts, &newly_acked_pkts,
+ largest_node, largest, smallest);
+ if (!ack_frm->ack_range_num--)
+ break;
+
+ if (!quic_dec_int(&gap, pos, end)) {
+ TRACE_ERROR("quic_dec_int(gap) failed", QUIC_EV_CONN_PRSAFRM, qc);
+ goto err;
+ }
+
+ if (smallest < gap + 2) {
+ TRACE_DEVEL("wrong gap value", QUIC_EV_CONN_PRSAFRM,
+ qc, NULL, &gap, &smallest);
+ goto err;
+ }
+
+ largest = smallest - gap - 2;
+ if (!quic_dec_int(&ack_range, pos, end)) {
+ TRACE_ERROR("quic_dec_int(ack_range) failed", QUIC_EV_CONN_PRSAFRM, qc);
+ goto err;
+ }
+
+ if (largest < ack_range) {
+ TRACE_DEVEL("wrong ack range value", QUIC_EV_CONN_PRSAFRM,
+ qc, NULL, &largest, &ack_range);
+ goto err;
+ }
+
+ /* Do not use this node anymore. */
+ largest_node = NULL;
+ /* Next range */
+ smallest = largest - ack_range;
+
+ TRACE_PROTO("RX next ack range", QUIC_EV_CONN_PRSAFRM,
+ qc, NULL, &largest, &smallest);
+ } while (1);
+
+ if (!LIST_ISEMPTY(&newly_acked_pkts)) {
+ qc_handle_newly_acked_pkts(qc, &pkt_flags, &newly_acked_pkts);
+ if (new_largest_acked_pn && (pkt_flags & QUIC_FL_TX_PACKET_ACK_ELICITING)) {
+ *rtt_sample = tick_remain(time_sent, now_ms);
+ qel->pktns->rx.largest_acked_pn = ack_frm->largest_ack;
+ }
+
+ if (!eb_is_empty(&qel->pktns->tx.pkts)) {
+ qc_packet_loss_lookup(qel->pktns, qc, &lost_pkts);
+ if (!qc_release_lost_pkts(qc, qel->pktns, &lost_pkts, now_ms))
+ goto leave;
+ }
+ qc_notify_cc_of_newly_acked_pkts(qc, &newly_acked_pkts);
+ if (quic_peer_validated_addr(qc))
+ qc->path->loss.pto_count = 0;
+ qc_set_timer(qc);
+ qc_notify_send(qc);
+ }
+
+ ret = 1;
+ leave:
+ TRACE_LEAVE(QUIC_EV_CONN_PRSAFRM, qc);
+ return ret;
+
+ err:
+ /* Move back these packets into their tree. */
+ list_for_each_entry_safe(pkt, tmp, &newly_acked_pkts, list) {
+ LIST_DEL_INIT(&pkt->list);
+ eb64_insert(pkts, &pkt->pn_node);
+ }
+ goto leave;
+}
+
+/* Parse a STREAM frame <strm_frm> received in <pkt> packet for <qc>
+ * connection. <fin> is true if FIN bit is set on frame type.
+ *
+ * Return 1 on success. On error, 0 is returned. In this case, the packet
+ * containing the frame must not be acknowledged.
+ */
+static int qc_handle_strm_frm(struct quic_rx_packet *pkt,
+ struct qf_stream *strm_frm,
+ struct quic_conn *qc, char fin)
+{
+ int ret;
+
+ /* RFC9000 13.1. Packet Processing
+ *
+ * A packet MUST NOT be acknowledged until packet protection has been
+ * successfully removed and all frames contained in the packet have
+ * been processed. For STREAM frames, this means the data has been
+ * enqueued in preparation to be received by the application protocol,
+ * but it does not require that data be delivered and consumed.
+ */
+ TRACE_ENTER(QUIC_EV_CONN_PRSFRM, qc);
+
+ ret = qcc_recv(qc->qcc, strm_frm->id, strm_frm->len,
+ strm_frm->offset.key, fin, (char *)strm_frm->data);
+
+ /* frame rejected - packet must not be acknowledeged */
+ TRACE_LEAVE(QUIC_EV_CONN_PRSFRM, qc);
+ return !ret;
+}
+
+/* Parse <frm> CRYPTO frame coming with <pkt> packet at <qel> <qc> connectionn.
+ * Returns 1 if succeeded, 0 if not. Also set <*fast_retrans> to 1 if the
+ * speed up handshake completion may be run after having received duplicated
+ * CRYPTO data.
+ */
+static int qc_handle_crypto_frm(struct quic_conn *qc,
+ struct qf_crypto *crypto_frm, struct quic_rx_packet *pkt,
+ struct quic_enc_level *qel, int *fast_retrans)
+{
+ int ret = 0;
+ enum ncb_ret ncb_ret;
+ /* XXX TO DO: <cfdebug> is used only for the traces. */
+ struct quic_rx_crypto_frm cfdebug = {
+ .offset_node.key = crypto_frm->offset,
+ .len = crypto_frm->len,
+ };
+ struct quic_cstream *cstream = qel->cstream;
+ struct ncbuf *ncbuf = &qel->cstream->rx.ncbuf;
+
+ TRACE_ENTER(QUIC_EV_CONN_PRSHPKT, qc);
+
+ if (unlikely(crypto_frm->offset < cstream->rx.offset)) {
+ size_t diff;
+
+ if (crypto_frm->offset + crypto_frm->len <= cstream->rx.offset) {
+ /* Nothing to do */
+ TRACE_PROTO("Already received CRYPTO data",
+ QUIC_EV_CONN_RXPKT, qc, pkt, &cfdebug);
+ if (qc_is_listener(qc) && qel == qc->iel &&
+ !(qc->flags & QUIC_FL_CONN_HANDSHAKE_SPEED_UP))
+ *fast_retrans = 1;
+ goto done;
+ }
+
+ TRACE_PROTO("Partially already received CRYPTO data",
+ QUIC_EV_CONN_RXPKT, qc, pkt, &cfdebug);
+
+ diff = cstream->rx.offset - crypto_frm->offset;
+ crypto_frm->len -= diff;
+ crypto_frm->data += diff;
+ crypto_frm->offset = cstream->rx.offset;
+ }
+
+ if (crypto_frm->offset == cstream->rx.offset && ncb_is_empty(ncbuf)) {
+ struct qf_crypto *qf_crypto;
+
+ qf_crypto = pool_alloc(pool_head_qf_crypto);
+ if (!qf_crypto) {
+ TRACE_ERROR("CRYPTO frame allocation failed", QUIC_EV_CONN_PRSHPKT, qc);
+ goto leave;
+ }
+
+ qf_crypto->offset = crypto_frm->offset;
+ qf_crypto->len = crypto_frm->len;
+ qf_crypto->data = crypto_frm->data;
+ qf_crypto->qel = qel;
+ LIST_APPEND(&qel->rx.crypto_frms, &qf_crypto->list);
+
+ cstream->rx.offset += crypto_frm->len;
+ HA_ATOMIC_OR(&qc->wait_event.tasklet->state, TASK_HEAVY);
+ TRACE_DEVEL("increment crypto level offset", QUIC_EV_CONN_PHPKTS, qc, qel);
+ goto done;
+ }
+
+ if (!quic_get_ncbuf(ncbuf) ||
+ ncb_is_null(ncbuf)) {
+ TRACE_ERROR("CRYPTO ncbuf allocation failed", QUIC_EV_CONN_PRSHPKT, qc);
+ goto leave;
+ }
+
+ /* crypto_frm->offset > cstream-trx.offset */
+ ncb_ret = ncb_add(ncbuf, crypto_frm->offset - cstream->rx.offset,
+ (const char *)crypto_frm->data, crypto_frm->len, NCB_ADD_COMPARE);
+ if (ncb_ret != NCB_RET_OK) {
+ if (ncb_ret == NCB_RET_DATA_REJ) {
+ TRACE_ERROR("overlapping data rejected", QUIC_EV_CONN_PRSHPKT, qc);
+ quic_set_connection_close(qc, quic_err_transport(QC_ERR_PROTOCOL_VIOLATION));
+ qc_notify_err(qc);
+ }
+ else if (ncb_ret == NCB_RET_GAP_SIZE) {
+ TRACE_ERROR("cannot bufferize frame due to gap size limit",
+ QUIC_EV_CONN_PRSHPKT, qc);
+ }
+ goto leave;
+ }
+
+ if (ncb_data(ncbuf, 0))
+ HA_ATOMIC_OR(&qc->wait_event.tasklet->state, TASK_HEAVY);
+
+ done:
+ ret = 1;
+ leave:
+ TRACE_LEAVE(QUIC_EV_CONN_PRSHPKT, qc);
+ return ret;
+}
+
+/* Handle RETIRE_CONNECTION_ID frame from <frm> frame.
+ * Return 1 if succeeded, 0 if not. If succeeded, also set <to_retire>
+ * to the CID to be retired if not already retired.
+ */
+static int qc_handle_retire_connection_id_frm(struct quic_conn *qc,
+ struct quic_frame *frm,
+ struct quic_cid *dcid,
+ struct quic_connection_id **to_retire)
+{
+ int ret = 0;
+ struct qf_retire_connection_id *rcid_frm = &frm->retire_connection_id;
+ struct eb64_node *node;
+ struct quic_connection_id *conn_id;
+
+ TRACE_ENTER(QUIC_EV_CONN_PRSHPKT, qc);
+
+ /* RFC 9000 19.16. RETIRE_CONNECTION_ID Frames:
+ * Receipt of a RETIRE_CONNECTION_ID frame containing a sequence number greater
+ * than any previously sent to the peer MUST be treated as a connection error
+ * of type PROTOCOL_VIOLATION.
+ */
+ if (rcid_frm->seq_num >= qc->next_cid_seq_num) {
+ TRACE_PROTO("CID seq. number too big", QUIC_EV_CONN_PSTRM, qc, frm);
+ goto protocol_violation;
+ }
+
+ /* RFC 9000 19.16. RETIRE_CONNECTION_ID Frames:
+ * The sequence number specified in a RETIRE_CONNECTION_ID frame MUST NOT refer to
+ * the Destination Connection ID field of the packet in which the frame is contained.
+ * The peer MAY treat this as a connection error of type PROTOCOL_VIOLATION.
+ */
+ node = eb64_lookup(qc->cids, rcid_frm->seq_num);
+ if (!node) {
+ TRACE_PROTO("CID already retired", QUIC_EV_CONN_PSTRM, qc, frm);
+ goto out;
+ }
+
+ conn_id = eb64_entry(node, struct quic_connection_id, seq_num);
+ /* Note that the length of <dcid> has already been checked. It must match the
+ * length of the CIDs which have been provided to the peer.
+ */
+ if (!memcmp(dcid->data, conn_id->cid.data, QUIC_HAP_CID_LEN)) {
+ TRACE_PROTO("cannot retire the current CID", QUIC_EV_CONN_PSTRM, qc, frm);
+ goto protocol_violation;
+ }
+
+ *to_retire = conn_id;
+ out:
+ ret = 1;
+ leave:
+ TRACE_LEAVE(QUIC_EV_CONN_PRSHPKT, qc);
+ return ret;
+ protocol_violation:
+ quic_set_connection_close(qc, quic_err_transport(QC_ERR_PROTOCOL_VIOLATION));
+ qc_notify_err(qc);
+ goto leave;
+}
+
+/* Returns the <ack_delay> field value in milliseconds from <ack_frm> ACK frame for
+ * <conn> QUIC connection. Note that the value of <ack_delay> coming from
+ * ACK frame is in microseconds.
+ */
+static inline unsigned int quic_ack_delay_ms(struct qf_ack *ack_frm,
+ struct quic_conn *conn)
+{
+ return (ack_frm->ack_delay << conn->tx.params.ack_delay_exponent) / 1000;
+}
+
+/* Parse all the frames of <pkt> QUIC packet for QUIC connection <qc> and <qel>
+ * as encryption level.
+ * Returns 1 if succeeded, 0 if failed.
+ */
+static int qc_parse_pkt_frms(struct quic_conn *qc, struct quic_rx_packet *pkt,
+ struct quic_enc_level *qel)
+{
+ struct quic_frame frm;
+ const unsigned char *pos, *end;
+ int fast_retrans = 0, ret = 0;
+
+ TRACE_ENTER(QUIC_EV_CONN_PRSHPKT, qc);
+ /* Skip the AAD */
+ pos = pkt->data + pkt->aad_len;
+ end = pkt->data + pkt->len;
+
+ /* Packet with no frame. */
+ if (pos == end) {
+ /* RFC9000 12.4. Frames and Frame Types
+ *
+ * The payload of a packet that contains frames MUST contain at least
+ * one frame, and MAY contain multiple frames and multiple frame types.
+ * An endpoint MUST treat receipt of a packet containing no frames as a
+ * connection error of type PROTOCOL_VIOLATION. Frames always fit within
+ * a single QUIC packet and cannot span multiple packets.
+ */
+ quic_set_connection_close(qc, quic_err_transport(QC_ERR_PROTOCOL_VIOLATION));
+ goto leave;
+ }
+
+ while (pos < end) {
+ if (!qc_parse_frm(&frm, pkt, &pos, end, qc)) {
+ // trace already emitted by function above
+ goto leave;
+ }
+
+ switch (frm.type) {
+ case QUIC_FT_PADDING:
+ break;
+ case QUIC_FT_PING:
+ break;
+ case QUIC_FT_ACK:
+ {
+ unsigned int rtt_sample;
+ rtt_sample = UINT_MAX;
+
+ if (!qc_parse_ack_frm(qc, &frm, qel, &rtt_sample, &pos, end)) {
+ // trace already emitted by function above
+ goto leave;
+ }
+
+ if (rtt_sample != UINT_MAX) {
+ unsigned int ack_delay;
+
+ ack_delay = !quic_application_pktns(qel->pktns, qc) ? 0 :
+ qc->state >= QUIC_HS_ST_CONFIRMED ?
+ MS_TO_TICKS(QUIC_MIN(quic_ack_delay_ms(&frm.ack, qc), qc->max_ack_delay)) :
+ MS_TO_TICKS(quic_ack_delay_ms(&frm.ack, qc));
+ quic_loss_srtt_update(&qc->path->loss, rtt_sample, ack_delay, qc);
+ }
+ break;
+ }
+ case QUIC_FT_RESET_STREAM:
+ if (qc->mux_state == QC_MUX_READY) {
+ struct qf_reset_stream *rs_frm = &frm.reset_stream;
+ qcc_recv_reset_stream(qc->qcc, rs_frm->id, rs_frm->app_error_code, rs_frm->final_size);
+ }
+ break;
+ case QUIC_FT_STOP_SENDING:
+ {
+ struct qf_stop_sending *ss_frm = &frm.stop_sending;
+ if (qc->mux_state == QC_MUX_READY) {
+ if (qcc_recv_stop_sending(qc->qcc, ss_frm->id,
+ ss_frm->app_error_code)) {
+ TRACE_ERROR("qcc_recv_stop_sending() failed", QUIC_EV_CONN_PRSHPKT, qc);
+ goto leave;
+ }
+ }
+ break;
+ }
+ case QUIC_FT_CRYPTO:
+ if (!qc_handle_crypto_frm(qc, &frm.crypto, pkt, qel, &fast_retrans))
+ goto leave;
+ break;
+ case QUIC_FT_STREAM_8 ... QUIC_FT_STREAM_F:
+ {
+ struct qf_stream *strm_frm = &frm.stream;
+ unsigned nb_streams = qc->rx.strms[qcs_id_type(strm_frm->id)].nb_streams;
+ const char fin = frm.type & QUIC_STREAM_FRAME_TYPE_FIN_BIT;
+
+ /* The upper layer may not be allocated. */
+ if (qc->mux_state != QC_MUX_READY) {
+ if ((strm_frm->id >> QCS_ID_TYPE_SHIFT) < nb_streams) {
+ TRACE_DATA("Already closed stream", QUIC_EV_CONN_PRSHPKT, qc);
+ }
+ else {
+ TRACE_DEVEL("No mux for new stream", QUIC_EV_CONN_PRSHPKT, qc);
+ if (qc->app_ops == &h3_ops) {
+ if (!qc_h3_request_reject(qc, strm_frm->id)) {
+ TRACE_ERROR("error on request rejection", QUIC_EV_CONN_PRSHPKT, qc);
+ /* This packet will not be acknowledged */
+ goto leave;
+ }
+ }
+ else {
+ /* This packet will not be acknowledged */
+ goto leave;
+ }
+ }
+
+ break;
+ }
+
+ if (!qc_handle_strm_frm(pkt, strm_frm, qc, fin)) {
+ TRACE_ERROR("qc_handle_strm_frm() failed", QUIC_EV_CONN_PRSHPKT, qc);
+ goto leave;
+ }
+
+ break;
+ }
+ case QUIC_FT_MAX_DATA:
+ if (qc->mux_state == QC_MUX_READY) {
+ struct qf_max_data *md_frm = &frm.max_data;
+ qcc_recv_max_data(qc->qcc, md_frm->max_data);
+ }
+ break;
+ case QUIC_FT_MAX_STREAM_DATA:
+ if (qc->mux_state == QC_MUX_READY) {
+ struct qf_max_stream_data *msd_frm = &frm.max_stream_data;
+ if (qcc_recv_max_stream_data(qc->qcc, msd_frm->id,
+ msd_frm->max_stream_data)) {
+ TRACE_ERROR("qcc_recv_max_stream_data() failed", QUIC_EV_CONN_PRSHPKT, qc);
+ goto leave;
+ }
+ }
+ break;
+ case QUIC_FT_MAX_STREAMS_BIDI:
+ case QUIC_FT_MAX_STREAMS_UNI:
+ break;
+ case QUIC_FT_DATA_BLOCKED:
+ qc->cntrs.data_blocked++;
+ break;
+ case QUIC_FT_STREAM_DATA_BLOCKED:
+ qc->cntrs.stream_data_blocked++;
+ break;
+ case QUIC_FT_STREAMS_BLOCKED_BIDI:
+ qc->cntrs.streams_blocked_bidi++;
+ break;
+ case QUIC_FT_STREAMS_BLOCKED_UNI:
+ qc->cntrs.streams_blocked_uni++;
+ break;
+ case QUIC_FT_NEW_CONNECTION_ID:
+ /* XXX TO DO XXX */
+ break;
+ case QUIC_FT_RETIRE_CONNECTION_ID:
+ {
+ struct quic_cid_tree *tree;
+ struct quic_connection_id *conn_id = NULL;
+
+ if (!qc_handle_retire_connection_id_frm(qc, &frm, &pkt->dcid, &conn_id))
+ goto leave;
+
+ if (!conn_id)
+ break;
+
+ tree = &quic_cid_trees[quic_cid_tree_idx(&conn_id->cid)];
+ HA_RWLOCK_WRLOCK(QC_CID_LOCK, &tree->lock);
+ ebmb_delete(&conn_id->node);
+ HA_RWLOCK_WRUNLOCK(QC_CID_LOCK, &tree->lock);
+ eb64_delete(&conn_id->seq_num);
+ pool_free(pool_head_quic_connection_id, conn_id);
+ TRACE_PROTO("CID retired", QUIC_EV_CONN_PSTRM, qc);
+
+ conn_id = new_quic_cid(qc->cids, qc, NULL, NULL);
+ if (!conn_id) {
+ TRACE_ERROR("CID allocation error", QUIC_EV_CONN_IO_CB, qc);
+ }
+ else {
+ quic_cid_insert(conn_id);
+ qc_build_new_connection_id_frm(qc, conn_id);
+ }
+ break;
+ }
+ case QUIC_FT_CONNECTION_CLOSE:
+ case QUIC_FT_CONNECTION_CLOSE_APP:
+ /* Increment the error counters */
+ quic_conn_closed_err_count_inc(qc, &frm);
+ if (!(qc->flags & QUIC_FL_CONN_DRAINING)) {
+ TRACE_STATE("Entering draining state", QUIC_EV_CONN_PRSHPKT, qc);
+ /* RFC 9000 10.2. Immediate Close:
+ * The closing and draining connection states exist to ensure
+ * that connections close cleanly and that delayed or reordered
+ * packets are properly discarded. These states SHOULD persist
+ * for at least three times the current PTO interval...
+ *
+ * Rearm the idle timeout only one time when entering draining
+ * state.
+ */
+ qc->flags |= QUIC_FL_CONN_DRAINING|QUIC_FL_CONN_IMMEDIATE_CLOSE;
+ qc_detach_th_ctx_list(qc, 1);
+ qc_idle_timer_do_rearm(qc, 0);
+ qc_notify_err(qc);
+ }
+ break;
+ case QUIC_FT_HANDSHAKE_DONE:
+ if (qc_is_listener(qc)) {
+ TRACE_ERROR("non accepted QUIC_FT_HANDSHAKE_DONE frame",
+ QUIC_EV_CONN_PRSHPKT, qc);
+ goto leave;
+ }
+
+ qc->state = QUIC_HS_ST_CONFIRMED;
+ break;
+ default:
+ TRACE_ERROR("unknosw frame type", QUIC_EV_CONN_PRSHPKT, qc);
+ goto leave;
+ }
+ }
+
+ if (fast_retrans && qc->iel && qc->hel) {
+ struct quic_enc_level *iqel = qc->iel;
+ struct quic_enc_level *hqel = qc->hel;
+
+ TRACE_PROTO("speeding up handshake completion", QUIC_EV_CONN_PRSHPKT, qc);
+ qc_prep_hdshk_fast_retrans(qc, &iqel->pktns->tx.frms, &hqel->pktns->tx.frms);
+ qc->flags |= QUIC_FL_CONN_HANDSHAKE_SPEED_UP;
+ }
+
+ /* The server must switch from INITIAL to HANDSHAKE handshake state when it
+ * has successfully parse a Handshake packet. The Initial encryption must also
+ * be discarded.
+ */
+ if (pkt->type == QUIC_PACKET_TYPE_HANDSHAKE && qc_is_listener(qc)) {
+ if (qc->state >= QUIC_HS_ST_SERVER_INITIAL) {
+ if (qc->ipktns && !quic_tls_pktns_is_dcd(qc, qc->ipktns)) {
+ /* Discard the handshake packet number space. */
+ TRACE_PROTO("discarding Initial pktns", QUIC_EV_CONN_PRSHPKT, qc);
+ quic_pktns_discard(qc->ipktns, qc);
+ qc_set_timer(qc);
+ qc_el_rx_pkts_del(qc->iel);
+ qc_release_pktns_frms(qc, qc->ipktns);
+ }
+ if (qc->state < QUIC_HS_ST_SERVER_HANDSHAKE)
+ qc->state = QUIC_HS_ST_SERVER_HANDSHAKE;
+ }
+ }
+
+ ret = 1;
+ leave:
+ TRACE_LEAVE(QUIC_EV_CONN_PRSHPKT, qc);
+ return ret;
+}
+
+/* Detect the value of the spin bit to be used. */
+static inline void qc_handle_spin_bit(struct quic_conn *qc, struct quic_rx_packet *pkt,
+ struct quic_enc_level *qel)
+{
+ uint64_t largest_pn = qel->pktns->rx.largest_pn;
+
+ if (qel != qc->ael || largest_pn == -1 ||
+ pkt->pn <= largest_pn)
+ return;
+
+ if (qc_is_listener(qc)) {
+ if (pkt->flags & QUIC_FL_RX_PACKET_SPIN_BIT)
+ qc->flags |= QUIC_FL_CONN_SPIN_BIT;
+ else
+ qc->flags &= ~QUIC_FL_CONN_SPIN_BIT;
+ }
+ else {
+ if (pkt->flags & QUIC_FL_RX_PACKET_SPIN_BIT)
+ qc->flags &= ~QUIC_FL_CONN_SPIN_BIT;
+ else
+ qc->flags |= QUIC_FL_CONN_SPIN_BIT;
+ }
+}
+
+/* Remove the header protection of packets at <el> encryption level.
+ * Always succeeds.
+ */
+static void qc_rm_hp_pkts(struct quic_conn *qc, struct quic_enc_level *el)
+{
+ struct quic_rx_packet *pqpkt, *pkttmp;
+
+ TRACE_ENTER(QUIC_EV_CONN_ELRMHP, qc);
+ /* A server must not process incoming 1-RTT packets before the handshake is complete. */
+ if (el == qc->ael && qc_is_listener(qc) && qc->state < QUIC_HS_ST_COMPLETE) {
+ TRACE_PROTO("RX hp not removed (handshake not completed)",
+ QUIC_EV_CONN_ELRMHP, qc);
+ goto out;
+ }
+
+ list_for_each_entry_safe(pqpkt, pkttmp, &el->rx.pqpkts, list) {
+ struct quic_tls_ctx *tls_ctx;
+
+ tls_ctx = qc_select_tls_ctx(qc, el, pqpkt->type, pqpkt->version);
+ if (!qc_do_rm_hp(qc, pqpkt, tls_ctx, el->pktns->rx.largest_pn,
+ pqpkt->data + pqpkt->pn_offset, pqpkt->data)) {
+ TRACE_ERROR("RX hp removing error", QUIC_EV_CONN_ELRMHP, qc);
+ }
+ else {
+ qc_handle_spin_bit(qc, pqpkt, el);
+ /* The AAD includes the packet number field */
+ pqpkt->aad_len = pqpkt->pn_offset + pqpkt->pnl;
+ /* Store the packet into the tree of packets to decrypt. */
+ pqpkt->pn_node.key = pqpkt->pn;
+ eb64_insert(&el->rx.pkts, &pqpkt->pn_node);
+ quic_rx_packet_refinc(pqpkt);
+ TRACE_PROTO("RX hp removed", QUIC_EV_CONN_ELRMHP, qc, pqpkt);
+ }
+ LIST_DELETE(&pqpkt->list);
+ quic_rx_packet_refdec(pqpkt);
+ }
+
+ out:
+ TRACE_LEAVE(QUIC_EV_CONN_ELRMHP, qc);
+}
+
+/* Process all the CRYPTO frame at <el> encryption level. This is the
+ * responsibility of the called to ensure there exists a CRYPTO data
+ * stream for this level.
+ * Return 1 if succeeded, 0 if not.
+ */
+int qc_treat_rx_crypto_frms(struct quic_conn *qc, struct quic_enc_level *el,
+ struct ssl_sock_ctx *ctx)
+{
+ int ret = 0;
+ struct ncbuf *ncbuf;
+ struct quic_cstream *cstream = el->cstream;
+ ncb_sz_t data;
+
+ TRACE_ENTER(QUIC_EV_CONN_PHPKTS, qc);
+
+ BUG_ON(!cstream);
+ ncbuf = &cstream->rx.ncbuf;
+ if (ncb_is_null(ncbuf))
+ goto done;
+
+ /* TODO not working if buffer is wrapping */
+ while ((data = ncb_data(ncbuf, 0))) {
+ const unsigned char *cdata = (const unsigned char *)ncb_head(ncbuf);
+
+ if (!qc_ssl_provide_quic_data(&el->cstream->rx.ncbuf, el->level,
+ ctx, cdata, data))
+ goto leave;
+
+ cstream->rx.offset += data;
+ TRACE_DEVEL("buffered crypto data were provided to TLS stack",
+ QUIC_EV_CONN_PHPKTS, qc, el);
+ }
+
+ done:
+ ret = 1;
+ leave:
+ if (!ncb_is_null(ncbuf) && ncb_is_empty(ncbuf)) {
+ TRACE_DEVEL("freeing crypto buf", QUIC_EV_CONN_PHPKTS, qc, el);
+ quic_free_ncbuf(ncbuf);
+ }
+ TRACE_LEAVE(QUIC_EV_CONN_PHPKTS, qc);
+ return ret;
+}
+
+/* Check if it's possible to remove header protection for packets related to
+ * encryption level <qel>. If <qel> is NULL, assume it's false.
+ *
+ * Return true if the operation is possible else false.
+ */
+static int qc_qel_may_rm_hp(struct quic_conn *qc, struct quic_enc_level *qel)
+{
+ int ret = 0;
+
+ TRACE_ENTER(QUIC_EV_CONN_TRMHP, qc);
+
+ if (!qel)
+ goto cant_rm_hp;
+
+ if (!quic_tls_has_rx_sec(qel)) {
+ TRACE_PROTO("non available secrets", QUIC_EV_CONN_TRMHP, qc);
+ goto cant_rm_hp;
+ }
+
+ if (qel == qc->ael && qc->state < QUIC_HS_ST_COMPLETE) {
+ TRACE_PROTO("handshake not complete", QUIC_EV_CONN_TRMHP, qc);
+ goto cant_rm_hp;
+ }
+
+ /* check if the connection layer is ready before using app level */
+ if ((qel == qc->ael || qel == qc->eel) &&
+ qc->mux_state == QC_MUX_NULL) {
+ TRACE_PROTO("connection layer not ready", QUIC_EV_CONN_TRMHP, qc);
+ goto cant_rm_hp;
+ }
+
+ ret = 1;
+ cant_rm_hp:
+ TRACE_LEAVE(QUIC_EV_CONN_TRMHP, qc);
+ return ret;
+}
+
+/* Process all the packets for all the encryption levels listed in <qc> QUIC connection.
+ * Return 1 if succeeded, 0 if not.
+ */
+int qc_treat_rx_pkts(struct quic_conn *qc)
+{
+ int ret = 0;
+ struct eb64_node *node;
+ int64_t largest_pn = -1;
+ unsigned int largest_pn_time_received = 0;
+ struct quic_enc_level *qel, *qelbak;
+
+ TRACE_ENTER(QUIC_EV_CONN_RXPKT, qc);
+
+ list_for_each_entry_safe(qel, qelbak, &qc->qel_list, list) {
+ /* Treat packets waiting for header packet protection decryption */
+ if (!LIST_ISEMPTY(&qel->rx.pqpkts) && qc_qel_may_rm_hp(qc, qel))
+ qc_rm_hp_pkts(qc, qel);
+
+ node = eb64_first(&qel->rx.pkts);
+ while (node) {
+ struct quic_rx_packet *pkt;
+
+ pkt = eb64_entry(node, struct quic_rx_packet, pn_node);
+ TRACE_DATA("new packet", QUIC_EV_CONN_RXPKT,
+ qc, pkt, NULL, qc->xprt_ctx->ssl);
+ if (!qc_pkt_decrypt(qc, qel, pkt)) {
+ /* Drop the packet */
+ TRACE_ERROR("packet decryption failed -> dropped",
+ QUIC_EV_CONN_RXPKT, qc, pkt);
+ }
+ else {
+ if (!qc_parse_pkt_frms(qc, pkt, qel)) {
+ /* Drop the packet */
+ TRACE_ERROR("packet parsing failed -> dropped",
+ QUIC_EV_CONN_RXPKT, qc, pkt);
+ qc->cntrs.dropped_parsing++;
+ }
+ else {
+ struct quic_arng ar = { .first = pkt->pn, .last = pkt->pn };
+
+ /* RFC 9000 8.1. Address Validation during Connection Establishment
+ *
+ * Connection establishment implicitly provides address validation for
+ * both endpoints. In particular, receipt of a packet protected with
+ * Handshake keys confirms that the peer successfully processed an
+ * Initial packet.
+ */
+ if (qel == qc->hel &&
+ !(qc->flags & QUIC_FL_CONN_PEER_VALIDATED_ADDR)) {
+ TRACE_STATE("validate peer address on handshake packet",
+ QUIC_EV_CONN_RXPKT, qc, pkt);
+ qc->flags |= QUIC_FL_CONN_PEER_VALIDATED_ADDR;
+ BUG_ON(!qc->prx_counters->half_open_conn);
+ HA_ATOMIC_DEC(&qc->prx_counters->half_open_conn);
+ }
+
+ /* Update the list of ranges to acknowledge. */
+ if (quic_update_ack_ranges_list(qc, &qel->pktns->rx.arngs, &ar)) {
+ if (pkt->flags & QUIC_FL_RX_PACKET_ACK_ELICITING) {
+ int arm_ack_timer =
+ qc->state >= QUIC_HS_ST_COMPLETE &&
+ qel->pktns == qc->apktns;
+
+ qel->pktns->flags |= QUIC_FL_PKTNS_ACK_REQUIRED;
+ qel->pktns->rx.nb_aepkts_since_last_ack++;
+ qc_idle_timer_rearm(qc, 1, arm_ack_timer);
+ }
+
+ if (pkt->pn > largest_pn) {
+ largest_pn = pkt->pn;
+ largest_pn_time_received = pkt->time_received;
+ }
+ }
+ else {
+ TRACE_ERROR("Could not update ack range list",
+ QUIC_EV_CONN_RXPKT, qc);
+ }
+ }
+ }
+ node = eb64_next(node);
+ eb64_delete(&pkt->pn_node);
+ quic_rx_packet_refdec(pkt);
+ }
+
+ if (largest_pn != -1 && largest_pn > qel->pktns->rx.largest_pn) {
+ /* Update the largest packet number. */
+ qel->pktns->rx.largest_pn = largest_pn;
+ /* Update the largest acknowledged packet timestamps */
+ qel->pktns->rx.largest_time_received = largest_pn_time_received;
+ qel->pktns->flags |= QUIC_FL_PKTNS_NEW_LARGEST_PN;
+ }
+
+ if (qel->cstream) {
+ struct ncbuf *ncbuf = &qel->cstream->rx.ncbuf;
+
+ if (!ncb_is_null(ncbuf) && ncb_data(ncbuf, 0)) {
+ /* Some in order CRYPTO data were bufferized. */
+ HA_ATOMIC_OR(&qc->wait_event.tasklet->state, TASK_HEAVY);
+ }
+ }
+
+ /* Release the Initial encryption level and packet number space. */
+ if ((qc->flags & QUIC_FL_CONN_IPKTNS_DCD) && qel == qc->iel) {
+ qc_enc_level_free(qc, &qc->iel);
+ quic_pktns_release(qc, &qc->ipktns);
+ }
+
+ largest_pn = -1;
+ }
+
+ out:
+ ret = 1;
+ leave:
+ TRACE_LEAVE(QUIC_EV_CONN_RXPKT, qc);
+ return ret;
+}
+
+/* Parse into <pkt> a long header located at <*pos> position, <end> begin a pointer to the end
+ * past one byte of this buffer.
+ */
+static inline int quic_packet_read_long_header(unsigned char **pos, const unsigned char *end,
+ struct quic_rx_packet *pkt)
+{
+ int ret = 0;
+ unsigned char dcid_len, scid_len;
+
+ TRACE_ENTER(QUIC_EV_CONN_RXPKT);
+
+ if (end == *pos) {
+ TRACE_ERROR("buffer data consumed", QUIC_EV_CONN_RXPKT);
+ goto leave;
+ }
+
+ /* Destination Connection ID Length */
+ dcid_len = *(*pos)++;
+ /* We want to be sure we can read <dcid_len> bytes and one more for <scid_len> value */
+ if (dcid_len > QUIC_CID_MAXLEN || end - *pos < dcid_len + 1) {
+ TRACE_ERROR("too long DCID", QUIC_EV_CONN_RXPKT);
+ goto leave;
+ }
+
+ if (dcid_len) {
+ /* Check that the length of this received DCID matches the CID lengths
+ * of our implementation for non Initials packets only.
+ */
+ if (pkt->version && pkt->version->num &&
+ pkt->type != QUIC_PACKET_TYPE_INITIAL &&
+ pkt->type != QUIC_PACKET_TYPE_0RTT &&
+ dcid_len != QUIC_HAP_CID_LEN) {
+ TRACE_ERROR("wrong DCID length", QUIC_EV_CONN_RXPKT);
+ goto leave;
+ }
+
+ memcpy(pkt->dcid.data, *pos, dcid_len);
+ }
+
+ pkt->dcid.len = dcid_len;
+ *pos += dcid_len;
+
+ /* Source Connection ID Length */
+ scid_len = *(*pos)++;
+ if (scid_len > QUIC_CID_MAXLEN || end - *pos < scid_len) {
+ TRACE_ERROR("too long SCID", QUIC_EV_CONN_RXPKT);
+ goto leave;
+ }
+
+ if (scid_len)
+ memcpy(pkt->scid.data, *pos, scid_len);
+ pkt->scid.len = scid_len;
+ *pos += scid_len;
+
+ ret = 1;
+ leave:
+ TRACE_LEAVE(QUIC_EV_CONN_RXPKT);
+ return ret;
+}
+
+/* Try to remove the header protection of <pkt> QUIC packet with <beg> the
+ * address of the packet first byte, using the keys from encryption level <el>.
+ *
+ * If header protection has been successfully removed, packet data are copied
+ * into <qc> Rx buffer. If <el> secrets are not yet available, the copy is also
+ * proceeded, and the packet is inserted into <qc> protected packets tree. In
+ * both cases, packet can now be considered handled by the <qc> connection.
+ *
+ * If header protection cannot be removed due to <el> secrets already
+ * discarded, no operation is conducted.
+ *
+ * Returns 1 on success : packet data is now handled by the connection. On
+ * error 0 is returned : packet should be dropped by the caller.
+ */
+static int qc_try_rm_hp(struct quic_conn *qc, struct quic_rx_packet *pkt,
+ unsigned char *beg, struct quic_enc_level **el)
+{
+ int ret = 0;
+ unsigned char *pn = NULL; /* Packet number field */
+ enum quic_tls_enc_level tel;
+ struct quic_enc_level *qel;
+ /* Only for traces. */
+
+ TRACE_ENTER(QUIC_EV_CONN_TRMHP, qc);
+ BUG_ON(!pkt->pn_offset);
+
+ /* The packet number is here. This is also the start minus
+ * QUIC_PACKET_PN_MAXLEN of the sample used to add/remove the header
+ * protection.
+ */
+ pn = beg + pkt->pn_offset;
+
+ tel = quic_packet_type_enc_level(pkt->type);
+ qel = qc_quic_enc_level(qc, tel);
+ if (!qel) {
+ struct quic_enc_level **qc_qel = qel_to_qel_addr(qc, tel);
+ struct quic_pktns **qc_pktns = qel_to_quic_pktns(qc, tel);
+
+ if (!qc_enc_level_alloc(qc, qc_pktns, qc_qel, quic_to_ssl_enc_level(tel))) {
+ TRACE_PROTO("Could not allocated an encryption level", QUIC_EV_CONN_ADDDATA, qc);
+ goto out;
+ }
+
+ qel = *qc_qel;
+ }
+
+ if (qc_qel_may_rm_hp(qc, qel)) {
+ struct quic_tls_ctx *tls_ctx =
+ qc_select_tls_ctx(qc, qel, pkt->type, pkt->version);
+
+ /* Note that the following function enables us to unprotect the packet
+ * number and its length subsequently used to decrypt the entire
+ * packets.
+ */
+ if (!qc_do_rm_hp(qc, pkt, tls_ctx,
+ qel->pktns->rx.largest_pn, pn, beg)) {
+ TRACE_PROTO("hp error", QUIC_EV_CONN_TRMHP, qc);
+ goto out;
+ }
+
+ qc_handle_spin_bit(qc, pkt, qel);
+ /* The AAD includes the packet number field. */
+ pkt->aad_len = pkt->pn_offset + pkt->pnl;
+ if (pkt->len - pkt->aad_len < QUIC_TLS_TAG_LEN) {
+ TRACE_PROTO("Too short packet", QUIC_EV_CONN_TRMHP, qc);
+ goto out;
+ }
+
+ TRACE_PROTO("RX hp removed", QUIC_EV_CONN_TRMHP, qc, pkt);
+ }
+ else {
+ TRACE_PROTO("RX hp not removed", QUIC_EV_CONN_TRMHP, qc, pkt);
+ LIST_APPEND(&qel->rx.pqpkts, &pkt->list);
+ quic_rx_packet_refinc(pkt);
+ }
+
+ *el = qel;
+ /* No reference counter incrementation here!!! */
+ LIST_APPEND(&qc->rx.pkt_list, &pkt->qc_rx_pkt_list);
+ memcpy(b_tail(&qc->rx.buf), beg, pkt->len);
+ pkt->data = (unsigned char *)b_tail(&qc->rx.buf);
+ b_add(&qc->rx.buf, pkt->len);
+
+ ret = 1;
+ out:
+ TRACE_LEAVE(QUIC_EV_CONN_TRMHP, qc);
+ return ret;
+}
+
+/* Return a 32-bits integer in <val> from QUIC packet with <buf> as address.
+ * Makes <buf> point to the data after this 32-bits value if succeeded.
+ * Note that these 32-bits integers are network bytes ordered.
+ * Returns 0 if failed (not enough data in the buffer), 1 if succeeded.
+ */
+static inline int quic_read_uint32(uint32_t *val,
+ const unsigned char **buf,
+ const unsigned char *end)
+{
+ if (end - *buf < sizeof *val)
+ return 0;
+
+ *val = ntohl(*(uint32_t *)*buf);
+ *buf += sizeof *val;
+
+ return 1;
+}
+
+/* Parse a QUIC packet header starting at <pos> position without exceeding <end>.
+ * Version and type are stored in <pkt> packet instance. Type is set to unknown
+ * on two occasions : for unsupported version, in this case version field is
+ * set to NULL; for Version Negotiation packet with version number set to 0.
+ *
+ * Returns 1 on success else 0.
+ */
+int qc_parse_hd_form(struct quic_rx_packet *pkt,
+ unsigned char **pos, const unsigned char *end)
+{
+ uint32_t version;
+ int ret = 0;
+ const unsigned char byte0 = **pos;
+
+ TRACE_ENTER(QUIC_EV_CONN_RXPKT);
+ pkt->version = NULL;
+ pkt->type = QUIC_PACKET_TYPE_UNKNOWN;
+
+ (*pos)++;
+ if (byte0 & QUIC_PACKET_LONG_HEADER_BIT) {
+ unsigned char type =
+ (byte0 >> QUIC_PACKET_TYPE_SHIFT) & QUIC_PACKET_TYPE_BITMASK;
+
+ /* Version */
+ if (!quic_read_uint32(&version, (const unsigned char **)pos, end)) {
+ TRACE_ERROR("could not read the packet version", QUIC_EV_CONN_RXPKT);
+ goto out;
+ }
+
+ pkt->version = qc_supported_version(version);
+ if (version && pkt->version) {
+ if (version != QUIC_PROTOCOL_VERSION_2) {
+ pkt->type = type;
+ }
+ else {
+ switch (type) {
+ case 0:
+ pkt->type = QUIC_PACKET_TYPE_RETRY;
+ break;
+ case 1:
+ pkt->type = QUIC_PACKET_TYPE_INITIAL;
+ break;
+ case 2:
+ pkt->type = QUIC_PACKET_TYPE_0RTT;
+ break;
+ case 3:
+ pkt->type = QUIC_PACKET_TYPE_HANDSHAKE;
+ break;
+ }
+ }
+ }
+ }
+ else {
+ if (byte0 & QUIC_PACKET_SPIN_BIT)
+ pkt->flags |= QUIC_FL_RX_PACKET_SPIN_BIT;
+ pkt->type = QUIC_PACKET_TYPE_SHORT;
+ }
+
+ ret = 1;
+ out:
+ TRACE_LEAVE(QUIC_EV_CONN_RXPKT);
+ return ret;
+}
+
+/* Check that all the bytes between <pos> included and <end> address
+ * excluded are null. This is the responsibility of the caller to
+ * check that there is at least one byte between <pos> end <end>.
+ * Return 1 if this all the bytes are null, 0 if not.
+ */
+static inline int quic_padding_check(const unsigned char *pos,
+ const unsigned char *end)
+{
+ while (pos < end && !*pos)
+ pos++;
+
+ return pos == end;
+}
+
+/* Find the associated connection to the packet <pkt> or create a new one if
+ * this is an Initial packet. <dgram> is the datagram containing the packet and
+ * <l> is the listener instance on which it was received.
+ *
+ * By default, <new_tid> is set to -1. However, if thread affinity has been
+ * chanbed, it will be set to its new thread ID.
+ *
+ * Returns the quic-conn instance or NULL if not found or thread affinity
+ * changed.
+ */
+static struct quic_conn *quic_rx_pkt_retrieve_conn(struct quic_rx_packet *pkt,
+ struct quic_dgram *dgram,
+ struct listener *l,
+ int *new_tid)
+{
+ struct quic_cid token_odcid = { .len = 0 };
+ struct quic_conn *qc = NULL;
+ struct proxy *prx;
+ struct quic_counters *prx_counters;
+
+ TRACE_ENTER(QUIC_EV_CONN_LPKT);
+
+ *new_tid = -1;
+
+ prx = l->bind_conf->frontend;
+ prx_counters = EXTRA_COUNTERS_GET(prx->extra_counters_fe, &quic_stats_module);
+
+ qc = retrieve_qc_conn_from_cid(pkt, &dgram->saddr, new_tid);
+
+ /* If connection already created or rebinded on another thread. */
+ if (!qc && *new_tid != -1 && tid != *new_tid)
+ goto out;
+
+ if (pkt->type == QUIC_PACKET_TYPE_INITIAL) {
+ BUG_ON(!pkt->version); /* This must not happen. */
+
+ if (!qc) {
+ struct quic_cid_tree *tree;
+ struct ebmb_node *node;
+ struct quic_connection_id *conn_id;
+ int ipv4;
+
+ /* Reject INITIAL early if listener limits reached. */
+ if (unlikely(HA_ATOMIC_LOAD(&l->rx.quic_curr_handshake) >=
+ quic_listener_max_handshake(l))) {
+ TRACE_DATA("Drop INITIAL on max handshake",
+ QUIC_EV_CONN_LPKT, NULL, NULL, NULL, pkt->version);
+ goto out;
+ }
+
+ if (unlikely(HA_ATOMIC_LOAD(&l->rx.quic_curr_accept) >=
+ quic_listener_max_accept(l))) {
+ TRACE_DATA("Drop INITIAL on max accept",
+ QUIC_EV_CONN_LPKT, NULL, NULL, NULL, pkt->version);
+ goto out;
+ }
+
+ if (pkt->token_len) {
+ /* Validate the token only when connection is unknown. */
+ if (!quic_retry_token_check(pkt, dgram, l, qc, &token_odcid))
+ goto err;
+ }
+ else if (!(l->bind_conf->options & BC_O_QUIC_FORCE_RETRY) &&
+ HA_ATOMIC_LOAD(&prx_counters->half_open_conn) >= global.tune.quic_retry_threshold) {
+ TRACE_PROTO("Initial without token, sending retry",
+ QUIC_EV_CONN_LPKT, NULL, NULL, NULL, pkt->version);
+ if (send_retry(l->rx.fd, &dgram->saddr, pkt, pkt->version)) {
+ TRACE_ERROR("Error during Retry generation",
+ QUIC_EV_CONN_LPKT, NULL, NULL, NULL, pkt->version);
+ goto out;
+ }
+
+ HA_ATOMIC_INC(&prx_counters->retry_sent);
+ goto out;
+ }
+
+ /* RFC 9000 7.2. Negotiating Connection IDs:
+ * When an Initial packet is sent by a client that has not previously
+ * received an Initial or Retry packet from the server, the client
+ * populates the Destination Connection ID field with an unpredictable
+ * value. This Destination Connection ID MUST be at least 8 bytes in length.
+ */
+ if (pkt->dcid.len < QUIC_ODCID_MINLEN) {
+ TRACE_PROTO("dropped packet",
+ QUIC_EV_CONN_LPKT, NULL, NULL, NULL, pkt->version);
+ goto err;
+ }
+
+ pkt->saddr = dgram->saddr;
+ ipv4 = dgram->saddr.ss_family == AF_INET;
+
+ /* Generate the first connection CID. This is derived from the client
+ * ODCID and address. This allows to retrieve the connection from the
+ * ODCID without storing it in the CID tree. This is an interesting
+ * optimization as the client is expected to stop using its ODCID in
+ * favor of our generated value.
+ */
+ conn_id = new_quic_cid(NULL, NULL, &pkt->dcid, &pkt->saddr);
+ if (!conn_id)
+ goto err;
+
+ qc = qc_new_conn(pkt->version, ipv4, &pkt->dcid, &pkt->scid, &token_odcid,
+ conn_id, &dgram->daddr, &pkt->saddr, 1,
+ !!pkt->token_len, l);
+ if (qc == NULL) {
+ pool_free(pool_head_quic_connection_id, conn_id);
+ goto err;
+ }
+
+ /* Compute and store into the quic_conn the hash used to compute extra CIDs */
+ if (quic_hash64_from_cid)
+ qc->hash64 = quic_hash64_from_cid(conn_id->cid.data, conn_id->cid.len,
+ global.cluster_secret, sizeof(global.cluster_secret));
+
+ tree = &quic_cid_trees[quic_cid_tree_idx(&conn_id->cid)];
+ HA_RWLOCK_WRLOCK(QC_CID_LOCK, &tree->lock);
+ node = ebmb_insert(&tree->root, &conn_id->node, conn_id->cid.len);
+ if (node != &conn_id->node) {
+ pool_free(pool_head_quic_connection_id, conn_id);
+
+ conn_id = ebmb_entry(node, struct quic_connection_id, node);
+ *new_tid = HA_ATOMIC_LOAD(&conn_id->tid);
+ quic_conn_release(qc);
+ qc = NULL;
+ }
+ else {
+ /* From here, <qc> is the correct connection for this <pkt> Initial
+ * packet. <conn_id> must be inserted in the CIDs tree for this
+ * connection.
+ */
+ eb64_insert(qc->cids, &conn_id->seq_num);
+ /* Initialize the next CID sequence number to be used for this connection. */
+ qc->next_cid_seq_num = 1;
+ }
+ HA_RWLOCK_WRUNLOCK(QC_CID_LOCK, &tree->lock);
+
+ if (*new_tid != -1)
+ goto out;
+ }
+ }
+ else if (!qc) {
+ TRACE_PROTO("RX non Initial pkt without connection", QUIC_EV_CONN_LPKT, NULL, NULL, NULL, pkt->version);
+ if (!send_stateless_reset(l, &dgram->saddr, pkt))
+ TRACE_ERROR("stateless reset not sent", QUIC_EV_CONN_LPKT, qc);
+ goto err;
+ }
+
+ out:
+ TRACE_LEAVE(QUIC_EV_CONN_LPKT, qc);
+ return qc;
+
+ err:
+ HA_ATOMIC_INC(&prx_counters->dropped_pkt);
+
+ TRACE_LEAVE(QUIC_EV_CONN_LPKT);
+ return NULL;
+}
+
+/* Parse a QUIC packet starting at <pos>. Data won't be read after <end> even
+ * if the packet is incomplete. This function will populate fields of <pkt>
+ * instance, most notably its length. <dgram> is the UDP datagram which
+ * contains the parsed packet. <l> is the listener instance on which it was
+ * received.
+ *
+ * Returns 0 on success else non-zero. Packet length is guaranteed to be set to
+ * the real packet value or to cover all data between <pos> and <end> : this is
+ * useful to reject a whole datagram.
+ */
+static int quic_rx_pkt_parse(struct quic_rx_packet *pkt,
+ unsigned char *pos, const unsigned char *end,
+ struct quic_dgram *dgram, struct listener *l)
+{
+ const unsigned char *beg = pos;
+ struct proxy *prx;
+ struct quic_counters *prx_counters;
+
+ TRACE_ENTER(QUIC_EV_CONN_LPKT);
+
+ prx = l->bind_conf->frontend;
+ prx_counters = EXTRA_COUNTERS_GET(prx->extra_counters_fe, &quic_stats_module);
+
+ if (end <= pos) {
+ TRACE_PROTO("Packet dropped", QUIC_EV_CONN_LPKT);
+ goto drop;
+ }
+
+ /* Fixed bit */
+ if (!(*pos & QUIC_PACKET_FIXED_BIT)) {
+ if (!(pkt->flags & QUIC_FL_RX_PACKET_DGRAM_FIRST) &&
+ quic_padding_check(pos, end)) {
+ /* Some browsers may pad the remaining datagram space with null bytes.
+ * That is what we called add padding out of QUIC packets. Such
+ * datagrams must be considered as valid. But we can only consume
+ * the remaining space.
+ */
+ pkt->len = end - pos;
+ goto drop_silent;
+ }
+
+ TRACE_PROTO("Packet dropped", QUIC_EV_CONN_LPKT);
+ goto drop;
+ }
+
+ /* Header form */
+ if (!qc_parse_hd_form(pkt, &pos, end)) {
+ TRACE_PROTO("Packet dropped", QUIC_EV_CONN_LPKT);
+ goto drop;
+ }
+
+ if (pkt->type != QUIC_PACKET_TYPE_SHORT) {
+ uint64_t len;
+ TRACE_PROTO("long header packet received", QUIC_EV_CONN_LPKT);
+
+ if (!quic_packet_read_long_header(&pos, end, pkt)) {
+ TRACE_PROTO("Packet dropped", QUIC_EV_CONN_LPKT);
+ goto drop;
+ }
+
+ /* When multiple QUIC packets are coalesced on the same UDP datagram,
+ * they must have the same DCID.
+ */
+ if (!(pkt->flags & QUIC_FL_RX_PACKET_DGRAM_FIRST) &&
+ (pkt->dcid.len != dgram->dcid_len ||
+ memcmp(dgram->dcid, pkt->dcid.data, pkt->dcid.len))) {
+ TRACE_PROTO("Packet dropped", QUIC_EV_CONN_LPKT);
+ goto drop;
+ }
+
+ /* Retry of Version Negotiation packets are only sent by servers */
+ if (pkt->type == QUIC_PACKET_TYPE_RETRY ||
+ (pkt->version && !pkt->version->num)) {
+ TRACE_PROTO("Packet dropped", QUIC_EV_CONN_LPKT);
+ goto drop;
+ }
+
+ /* RFC9000 6. Version Negotiation */
+ if (!pkt->version) {
+ /* unsupported version, send Negotiation packet */
+ if (send_version_negotiation(l->rx.fd, &dgram->saddr, pkt)) {
+ TRACE_ERROR("VN packet not sent", QUIC_EV_CONN_LPKT);
+ goto drop_silent;
+ }
+
+ TRACE_PROTO("VN packet sent", QUIC_EV_CONN_LPKT);
+ goto drop_silent;
+ }
+
+ /* For Initial packets, and for servers (QUIC clients connections),
+ * there is no Initial connection IDs storage.
+ */
+ if (pkt->type == QUIC_PACKET_TYPE_INITIAL) {
+ uint64_t token_len;
+
+ if (!quic_dec_int(&token_len, (const unsigned char **)&pos, end) ||
+ end - pos < token_len) {
+ TRACE_PROTO("Packet dropped",
+ QUIC_EV_CONN_LPKT, NULL, NULL, NULL, pkt->version);
+ goto drop;
+ }
+
+ /* TODO Retry should be automatically activated if
+ * suspect network usage is detected.
+ */
+ if (!token_len) {
+ if (l->bind_conf->options & BC_O_QUIC_FORCE_RETRY) {
+ TRACE_PROTO("Initial without token, sending retry",
+ QUIC_EV_CONN_LPKT, NULL, NULL, NULL, pkt->version);
+ if (send_retry(l->rx.fd, &dgram->saddr, pkt, pkt->version)) {
+ TRACE_PROTO("Error during Retry generation",
+ QUIC_EV_CONN_LPKT, NULL, NULL, NULL, pkt->version);
+ goto drop_silent;
+ }
+
+ HA_ATOMIC_INC(&prx_counters->retry_sent);
+ goto drop_silent;
+ }
+ }
+
+ pkt->token = pos;
+ pkt->token_len = token_len;
+ pos += pkt->token_len;
+ }
+ else if (pkt->type != QUIC_PACKET_TYPE_0RTT) {
+ if (pkt->dcid.len != QUIC_HAP_CID_LEN) {
+ TRACE_PROTO("Packet dropped",
+ QUIC_EV_CONN_LPKT, NULL, NULL, NULL, pkt->version);
+ goto drop;
+ }
+ }
+
+ if (!quic_dec_int(&len, (const unsigned char **)&pos, end) ||
+ end - pos < len) {
+ TRACE_PROTO("Packet dropped",
+ QUIC_EV_CONN_LPKT, NULL, NULL, NULL, pkt->version);
+ goto drop;
+ }
+
+ /* Packet Number is stored here. Packet Length totalizes the
+ * rest of the content.
+ */
+ pkt->pn_offset = pos - beg;
+ pkt->len = pkt->pn_offset + len;
+
+ /* RFC 9000. Initial Datagram Size
+ *
+ * A server MUST discard an Initial packet that is carried in a UDP datagram
+ * with a payload that is smaller than the smallest allowed maximum datagram
+ * size of 1200 bytes.
+ */
+ if (pkt->type == QUIC_PACKET_TYPE_INITIAL &&
+ dgram->len < QUIC_INITIAL_PACKET_MINLEN) {
+ TRACE_PROTO("RX too short datagram with an Initial packet", QUIC_EV_CONN_LPKT);
+ HA_ATOMIC_INC(&prx_counters->too_short_initial_dgram);
+ goto drop;
+ }
+
+ /* Interrupt parsing after packet length retrieval : this
+ * ensures that only the packet is dropped but not the whole
+ * datagram.
+ */
+ if (pkt->type == QUIC_PACKET_TYPE_0RTT && !l->bind_conf->ssl_conf.early_data) {
+ TRACE_PROTO("RX 0-RTT packet not supported", QUIC_EV_CONN_LPKT);
+ goto drop;
+ }
+ }
+ else {
+ TRACE_PROTO("RX short header packet", QUIC_EV_CONN_LPKT);
+ if (end - pos < QUIC_HAP_CID_LEN) {
+ TRACE_PROTO("RX pkt dropped", QUIC_EV_CONN_LPKT);
+ goto drop;
+ }
+
+ memcpy(pkt->dcid.data, pos, QUIC_HAP_CID_LEN);
+ pkt->dcid.len = QUIC_HAP_CID_LEN;
+
+ /* When multiple QUIC packets are coalesced on the same UDP datagram,
+ * they must have the same DCID.
+ */
+ if (!(pkt->flags & QUIC_FL_RX_PACKET_DGRAM_FIRST) &&
+ (pkt->dcid.len != dgram->dcid_len ||
+ memcmp(dgram->dcid, pkt->dcid.data, pkt->dcid.len))) {
+ TRACE_PROTO("RX pkt dropped", QUIC_EV_CONN_LPKT);
+ goto drop;
+ }
+
+ pos += QUIC_HAP_CID_LEN;
+
+ pkt->pn_offset = pos - beg;
+ /* A short packet is the last one of a UDP datagram. */
+ pkt->len = end - beg;
+ }
+
+ TRACE_PROTO("RX pkt parsed", QUIC_EV_CONN_LPKT, NULL, pkt, NULL, pkt->version);
+ TRACE_LEAVE(QUIC_EV_CONN_LPKT);
+ return 0;
+
+ drop:
+ HA_ATOMIC_INC(&prx_counters->dropped_pkt);
+ drop_silent:
+ if (!pkt->len)
+ pkt->len = end - beg;
+ TRACE_PROTO("RX pkt parsing failed", QUIC_EV_CONN_LPKT, NULL, pkt, NULL, pkt->version);
+ TRACE_LEAVE(QUIC_EV_CONN_LPKT);
+ return -1;
+}
+
+/* Check if received packet <pkt> should be drop due to <qc> already in closing
+ * state. This can be true if a CONNECTION_CLOSE has already been emitted for
+ * this connection.
+ *
+ * Returns false if connection is not in closing state else true. The caller
+ * should drop the whole datagram in the last case to not mess up <qc>
+ * CONNECTION_CLOSE rate limit counter.
+ */
+static int qc_rx_check_closing(struct quic_conn *qc,
+ struct quic_rx_packet *pkt)
+{
+ if (!(qc->flags & QUIC_FL_CONN_CLOSING))
+ return 0;
+
+ TRACE_STATE("Closing state connection", QUIC_EV_CONN_LPKT, qc, NULL, NULL, pkt->version);
+
+ /* Check if CONNECTION_CLOSE rate reemission is reached. */
+ if (++qc->nb_pkt_since_cc >= qc->nb_pkt_for_cc) {
+ qc->flags |= QUIC_FL_CONN_IMMEDIATE_CLOSE;
+ qc->nb_pkt_for_cc++;
+ qc->nb_pkt_since_cc = 0;
+ }
+
+ return 1;
+}
+
+/* Release the memory for the RX packets which are no more referenced
+ * and consume their payloads which have been copied to the RX buffer
+ * for the connection.
+ * Always succeeds.
+ */
+static void quic_rx_pkts_del(struct quic_conn *qc)
+{
+ struct quic_rx_packet *pkt, *pktback;
+
+ list_for_each_entry_safe(pkt, pktback, &qc->rx.pkt_list, qc_rx_pkt_list) {
+ TRACE_PRINTF(TRACE_LEVEL_DEVELOPER, QUIC_EV_CONN_LPKT, qc, 0, 0, 0,
+ "pkt #%lld(type=%d,len=%llu,rawlen=%llu,refcnt=%u) (diff: %zd)",
+ (long long)pkt->pn_node.key,
+ pkt->type, (ull)pkt->len, (ull)pkt->raw_len, pkt->refcnt,
+ (unsigned char *)b_head(&qc->rx.buf) - pkt->data);
+ if (pkt->data != (unsigned char *)b_head(&qc->rx.buf)) {
+ size_t cdata;
+
+ cdata = b_contig_data(&qc->rx.buf, 0);
+ TRACE_PRINTF(TRACE_LEVEL_DEVELOPER, QUIC_EV_CONN_LPKT, qc, 0, 0, 0,
+ "cdata=%llu *b_head()=0x%x", (ull)cdata, *b_head(&qc->rx.buf));
+ if (cdata && !*b_head(&qc->rx.buf)) {
+ /* Consume the remaining data */
+ b_del(&qc->rx.buf, cdata);
+ }
+ break;
+ }
+
+ if (pkt->refcnt)
+ break;
+
+ b_del(&qc->rx.buf, pkt->raw_len);
+ LIST_DELETE(&pkt->qc_rx_pkt_list);
+ pool_free(pool_head_quic_rx_packet, pkt);
+ }
+
+ /* In frequent cases the buffer will be emptied at this stage. */
+ b_realign_if_empty(&qc->rx.buf);
+}
+
+/* Handle a parsed packet <pkt> by the connection <qc>. Data will be copied
+ * into <qc> receive buffer after header protection removal procedure.
+ *
+ * <dgram> must be set to the datagram which contains the QUIC packet. <beg>
+ * must point to packet buffer first byte.
+ *
+ * <tasklist_head> may be non-NULL when the caller treat several datagrams for
+ * different quic-conn. In this case, each quic-conn tasklet will be appended
+ * to it in order to be woken up after the current task.
+ *
+ * The caller can safely removed the packet data. If packet refcount was not
+ * incremented by this function, it means that the connection did not handled
+ * it and it should be freed by the caller.
+ */
+static void qc_rx_pkt_handle(struct quic_conn *qc, struct quic_rx_packet *pkt,
+ struct quic_dgram *dgram, unsigned char *beg,
+ struct list **tasklist_head)
+{
+ const struct quic_version *qv = pkt->version;
+ struct quic_enc_level *qel = NULL;
+ size_t b_cspace;
+
+ TRACE_ENTER(QUIC_EV_CONN_LPKT, qc);
+ TRACE_PROTO("RX pkt", QUIC_EV_CONN_LPKT, qc, pkt, NULL, qv);
+
+ if (pkt->flags & QUIC_FL_RX_PACKET_DGRAM_FIRST &&
+ qc->flags & QUIC_FL_CONN_ANTI_AMPLIFICATION_REACHED) {
+ TRACE_PROTO("PTO timer must be armed after anti-amplication was reached",
+ QUIC_EV_CONN_LPKT, qc, NULL, NULL, qv);
+ TRACE_DEVEL("needs to wakeup the timer task after the amplification limit was reached",
+ QUIC_EV_CONN_LPKT, qc);
+ /* Reset the anti-amplification bit. It will be set again
+ * when sending the next packet if reached again.
+ */
+ qc->flags &= ~QUIC_FL_CONN_ANTI_AMPLIFICATION_REACHED;
+ qc_set_timer(qc);
+ if (qc->timer_task && tick_isset(qc->timer) && tick_is_lt(qc->timer, now_ms))
+ task_wakeup(qc->timer_task, TASK_WOKEN_MSG);
+ }
+
+ /* Drop asap packet whose packet number space is discarded. */
+ if (quic_tls_pkt_type_pktns_dcd(qc, pkt->type)) {
+ TRACE_PROTO("Discarded packet number space", QUIC_EV_CONN_TRMHP, qc);
+ goto drop_silent;
+ }
+
+ if (qc->flags & QUIC_FL_CONN_IMMEDIATE_CLOSE) {
+ TRACE_PROTO("Connection error",
+ QUIC_EV_CONN_LPKT, qc, NULL, NULL, qv);
+ goto out;
+ }
+
+ pkt->raw_len = pkt->len;
+ quic_rx_pkts_del(qc);
+ b_cspace = b_contig_space(&qc->rx.buf);
+ if (b_cspace < pkt->len) {
+ TRACE_PRINTF(TRACE_LEVEL_DEVELOPER, QUIC_EV_CONN_LPKT, qc, 0, 0, 0,
+ "bspace=%llu pkt->len=%llu", (ull)b_cspace, (ull)pkt->len);
+ /* Do not consume buf if space not at the end. */
+ if (b_tail(&qc->rx.buf) + b_cspace < b_wrap(&qc->rx.buf)) {
+ TRACE_PROTO("Packet dropped",
+ QUIC_EV_CONN_LPKT, qc, NULL, NULL, qv);
+ qc->cntrs.dropped_pkt_bufoverrun++;
+ goto drop_silent;
+ }
+
+ /* Let us consume the remaining contiguous space. */
+ if (b_cspace) {
+ b_putchr(&qc->rx.buf, 0x00);
+ b_cspace--;
+ }
+ b_add(&qc->rx.buf, b_cspace);
+ if (b_contig_space(&qc->rx.buf) < pkt->len) {
+ TRACE_PROTO("Too big packet",
+ QUIC_EV_CONN_LPKT, qc, pkt, &pkt->len, qv);
+ qc->cntrs.dropped_pkt_bufoverrun++;
+ goto drop_silent;
+ }
+ }
+
+ if (!qc_try_rm_hp(qc, pkt, beg, &qel)) {
+ TRACE_PROTO("Packet dropped", QUIC_EV_CONN_LPKT, qc, NULL, NULL, qv);
+ goto drop;
+ }
+
+ TRACE_DATA("New packet", QUIC_EV_CONN_LPKT, qc, pkt, NULL, qv);
+ if (pkt->aad_len) {
+ /* Insert this RX packet in its encryption level tree */
+ pkt->pn_node.key = pkt->pn;
+ quic_rx_packet_refinc(pkt);
+ eb64_insert(&qel->rx.pkts, &pkt->pn_node);
+ }
+ out:
+ *tasklist_head = tasklet_wakeup_after(*tasklist_head,
+ qc->wait_event.tasklet);
+
+ drop_silent:
+ TRACE_PROTO("RX pkt", QUIC_EV_CONN_LPKT, qc ? qc : NULL, pkt, NULL, qv);
+ TRACE_LEAVE(QUIC_EV_CONN_LPKT, qc ? qc : NULL);
+ return;
+
+ drop:
+ qc->cntrs.dropped_pkt++;
+ TRACE_PROTO("packet drop", QUIC_EV_CONN_LPKT, qc, pkt, NULL, qv);
+ TRACE_LEAVE(QUIC_EV_CONN_LPKT, qc);
+}
+
+/* Handle a new <dgram> received. Parse each QUIC packets and copied their
+ * content to a quic-conn instance. The datagram content can be released after
+ * this function.
+ *
+ * If datagram has been received on a quic-conn owned FD, <from_qc> must be set
+ * to the connection instance. <li> is the attached listener. The caller is
+ * responsible to ensure that the first packet is destined to this connection
+ * by comparing CIDs.
+ *
+ * If datagram has been received on a receiver FD, <from_qc> will be NULL. This
+ * function will thus retrieve the connection from the CID tree or allocate a
+ * new one if possible. <li> is the listener attached to the receiver.
+ *
+ * Returns 0 on success else non-zero. If an error happens, some packets from
+ * the datagram may not have been parsed.
+ */
+int quic_dgram_parse(struct quic_dgram *dgram, struct quic_conn *from_qc,
+ struct listener *li)
+{
+ struct quic_rx_packet *pkt;
+ struct quic_conn *qc = NULL;
+ unsigned char *pos, *end;
+ struct list *tasklist_head = NULL;
+
+ TRACE_ENTER(QUIC_EV_CONN_LPKT);
+
+ pos = dgram->buf;
+ end = pos + dgram->len;
+ do {
+ pkt = pool_alloc(pool_head_quic_rx_packet);
+ if (!pkt) {
+ TRACE_ERROR("RX packet allocation failed", QUIC_EV_CONN_LPKT);
+ goto err;
+ }
+
+ LIST_INIT(&pkt->qc_rx_pkt_list);
+ pkt->version = NULL;
+ pkt->type = QUIC_PACKET_TYPE_UNKNOWN;
+ pkt->pn_offset = 0;
+ pkt->len = 0;
+ pkt->raw_len = 0;
+ pkt->token = NULL;
+ pkt->token_len = 0;
+ pkt->aad_len = 0;
+ pkt->data = NULL;
+ pkt->pn_node.key = (uint64_t)-1;
+ pkt->refcnt = 0;
+ pkt->flags = 0;
+ pkt->time_received = now_ms;
+
+ /* Set flag if pkt is the first one in dgram. */
+ if (pos == dgram->buf)
+ pkt->flags |= QUIC_FL_RX_PACKET_DGRAM_FIRST;
+
+ quic_rx_packet_refinc(pkt);
+ if (quic_rx_pkt_parse(pkt, pos, end, dgram, li))
+ goto next;
+
+ /* Search quic-conn instance for first packet of the datagram.
+ * quic_rx_packet_parse() is responsible to discard packets
+ * with different DCID as the first one in the same datagram.
+ */
+ if (!qc) {
+ int new_tid = -1;
+
+ qc = from_qc ? from_qc : quic_rx_pkt_retrieve_conn(pkt, dgram, li, &new_tid);
+ /* qc is NULL if receiving a non Initial packet for an
+ * unknown connection or on connection affinity rebind.
+ */
+ if (!qc) {
+ if (new_tid >= 0) {
+ MT_LIST_APPEND(&quic_dghdlrs[new_tid].dgrams,
+ &dgram->handler_list);
+ tasklet_wakeup(quic_dghdlrs[new_tid].task);
+ pool_free(pool_head_quic_rx_packet, pkt);
+ goto out;
+ }
+
+ /* Skip the entire datagram. */
+ pkt->len = end - pos;
+ goto next;
+ }
+
+ dgram->qc = qc;
+ }
+
+ /* Ensure thread connection migration is finalized ASAP. */
+ if (qc->flags & QUIC_FL_CONN_AFFINITY_CHANGED)
+ qc_finalize_affinity_rebind(qc);
+
+ if (qc_rx_check_closing(qc, pkt)) {
+ /* Skip the entire datagram. */
+ pkt->len = end - pos;
+ goto next;
+ }
+
+ /* Detect QUIC connection migration. */
+ if (ipcmp(&qc->peer_addr, &dgram->saddr, 1)) {
+ if (qc_handle_conn_migration(qc, &dgram->saddr, &dgram->daddr)) {
+ /* Skip the entire datagram. */
+ TRACE_ERROR("error during connection migration, datagram dropped", QUIC_EV_CONN_LPKT, qc);
+ pkt->len = end - pos;
+ goto next;
+ }
+ }
+
+ qc_rx_pkt_handle(qc, pkt, dgram, pos, &tasklist_head);
+
+ next:
+ pos += pkt->len;
+ quic_rx_packet_refdec(pkt);
+
+ /* Free rejected packets */
+ if (!pkt->refcnt) {
+ BUG_ON(LIST_INLIST(&pkt->qc_rx_pkt_list));
+ pool_free(pool_head_quic_rx_packet, pkt);
+ }
+ } while (pos < end);
+
+ /* Increasing the received bytes counter by the UDP datagram length
+ * if this datagram could be associated to a connection.
+ */
+ if (dgram->qc)
+ dgram->qc->bytes.rx += dgram->len;
+
+ /* This must never happen. */
+ BUG_ON(pos > end);
+ BUG_ON(pos < end || pos > dgram->buf + dgram->len);
+ /* Mark this datagram as consumed */
+ HA_ATOMIC_STORE(&dgram->buf, NULL);
+
+ out:
+ TRACE_LEAVE(QUIC_EV_CONN_LPKT);
+ return 0;
+
+ err:
+ /* Mark this datagram as consumed as maybe at least some packets were parsed. */
+ HA_ATOMIC_STORE(&dgram->buf, NULL);
+ TRACE_LEAVE(QUIC_EV_CONN_LPKT);
+ return -1;
+}
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/src/quic_sock.c b/src/quic_sock.c
new file mode 100644
index 0000000..c479249
--- /dev/null
+++ b/src/quic_sock.c
@@ -0,0 +1,1080 @@
+/*
+ * QUIC socket management.
+ *
+ * Copyright 2020 HAProxy Technologies, Frederic Lecaille <flecaille@haproxy.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#define _GNU_SOURCE /* required for struct in6_pktinfo */
+#include <errno.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <netinet/in.h>
+#include <sys/socket.h>
+#include <sys/types.h>
+
+#include <haproxy/api.h>
+#include <haproxy/buf.h>
+#include <haproxy/connection.h>
+#include <haproxy/dynbuf.h>
+#include <haproxy/fd.h>
+#include <haproxy/global-t.h>
+#include <haproxy/list.h>
+#include <haproxy/listener.h>
+#include <haproxy/log.h>
+#include <haproxy/pool.h>
+#include <haproxy/proto_quic.h>
+#include <haproxy/proxy-t.h>
+#include <haproxy/quic_cid.h>
+#include <haproxy/quic_conn.h>
+#include <haproxy/quic_rx.h>
+#include <haproxy/quic_sock.h>
+#include <haproxy/quic_tp-t.h>
+#include <haproxy/quic_trace.h>
+#include <haproxy/session.h>
+#include <haproxy/stats-t.h>
+#include <haproxy/task.h>
+#include <haproxy/trace.h>
+#include <haproxy/tools.h>
+#include <haproxy/trace.h>
+
+/* Log only first EACCES bind() error runtime occurrence. */
+static volatile char quic_bind_eacces_warn = 0;
+
+/* Retrieve a connection's source address. Returns -1 on failure. */
+int quic_sock_get_src(struct connection *conn, struct sockaddr *addr, socklen_t len)
+{
+ struct quic_conn *qc;
+
+ if (!conn || !conn->handle.qc)
+ return -1;
+
+ qc = conn->handle.qc;
+ if (conn_is_back(conn)) {
+ /* no source address defined for outgoing connections for now */
+ return -1;
+ } else {
+ /* front connection, return the peer's address */
+ if (len > sizeof(qc->peer_addr))
+ len = sizeof(qc->peer_addr);
+ memcpy(addr, &qc->peer_addr, len);
+ return 0;
+ }
+}
+
+/* Retrieve a connection's destination address. Returns -1 on failure. */
+int quic_sock_get_dst(struct connection *conn, struct sockaddr *addr, socklen_t len)
+{
+ struct quic_conn *qc;
+
+ if (!conn || !conn->handle.qc)
+ return -1;
+
+ qc = conn->handle.qc;
+ if (conn_is_back(conn)) {
+ /* back connection, return the peer's address */
+ if (len > sizeof(qc->peer_addr))
+ len = sizeof(qc->peer_addr);
+ memcpy(addr, &qc->peer_addr, len);
+ } else {
+ struct sockaddr_storage *from;
+
+ /* Return listener address if IP_PKTINFO or friends are not
+ * supported by the socket.
+ */
+ BUG_ON(!qc->li);
+ from = is_addr(&qc->local_addr) ? &qc->local_addr :
+ &qc->li->rx.addr;
+ if (len > sizeof(*from))
+ len = sizeof(*from);
+ memcpy(addr, from, len);
+ }
+ return 0;
+}
+
+/*
+ * Inspired from session_accept_fd().
+ * Instantiate a new connection (connection struct) to be attached to <qc>
+ * QUIC connection of <l> listener.
+ * Returns 1 if succeeded, 0 if not.
+ */
+static int new_quic_cli_conn(struct quic_conn *qc, struct listener *l,
+ struct sockaddr_storage *saddr)
+{
+ struct connection *cli_conn;
+
+ if (unlikely((cli_conn = conn_new(&l->obj_type)) == NULL))
+ goto out;
+
+ if (!sockaddr_alloc(&cli_conn->src, saddr, sizeof *saddr))
+ goto out_free_conn;
+
+ cli_conn->flags |= CO_FL_FDLESS;
+ qc->conn = cli_conn;
+ cli_conn->handle.qc = qc;
+
+ cli_conn->target = &l->obj_type;
+
+ return 1;
+
+ out_free_conn:
+ qc->conn = NULL;
+ conn_stop_tracking(cli_conn);
+ conn_xprt_close(cli_conn);
+ conn_free(cli_conn);
+ out:
+
+ return 0;
+}
+
+/* Tests if the receiver supports accepting connections. Returns positive on
+ * success, 0 if not possible
+ */
+int quic_sock_accepting_conn(const struct receiver *rx)
+{
+ return 1;
+}
+
+/* Accept an incoming connection from listener <l>, and return it, as well as
+ * a CO_AC_* status code into <status> if not null. Null is returned on error.
+ * <l> must be a valid listener with a valid frontend.
+ */
+struct connection *quic_sock_accept_conn(struct listener *l, int *status)
+{
+ struct quic_conn *qc;
+ struct li_per_thread *lthr = &l->per_thr[ti->ltid];
+
+ qc = MT_LIST_POP(&lthr->quic_accept.conns, struct quic_conn *, accept_list);
+ if (!qc || qc->flags & (QUIC_FL_CONN_CLOSING|QUIC_FL_CONN_DRAINING))
+ goto done;
+
+ if (!new_quic_cli_conn(qc, l, &qc->peer_addr))
+ goto err;
+
+ done:
+ *status = CO_AC_DONE;
+
+ if (qc) {
+ BUG_ON(l->rx.quic_curr_accept <= 0);
+ HA_ATOMIC_DEC(&l->rx.quic_curr_accept);
+ return qc->conn;
+ }
+ else {
+ return NULL;
+ }
+
+ err:
+ /* in case of error reinsert the element to process it later. */
+ MT_LIST_INSERT(&lthr->quic_accept.conns, &qc->accept_list);
+
+ *status = CO_AC_PAUSE;
+ return NULL;
+}
+
+/* QUIC datagrams handler task. */
+struct task *quic_lstnr_dghdlr(struct task *t, void *ctx, unsigned int state)
+{
+ struct quic_dghdlr *dghdlr = ctx;
+ struct quic_dgram *dgram;
+ int max_dgrams = global.tune.maxpollevents;
+
+ TRACE_ENTER(QUIC_EV_CONN_LPKT);
+
+ while ((dgram = MT_LIST_POP(&dghdlr->dgrams, typeof(dgram), handler_list))) {
+ if (quic_dgram_parse(dgram, NULL, dgram->owner)) {
+ /* TODO should we requeue the datagram ? */
+ break;
+ }
+
+ if (--max_dgrams <= 0)
+ goto stop_here;
+ }
+
+ TRACE_LEAVE(QUIC_EV_CONN_LPKT);
+ return t;
+
+ stop_here:
+ /* too much work done at once, come back here later */
+ if (!MT_LIST_ISEMPTY(&dghdlr->dgrams))
+ tasklet_wakeup((struct tasklet *)t);
+
+ TRACE_LEAVE(QUIC_EV_CONN_LPKT);
+ return t;
+}
+
+/* Retrieve the DCID from a QUIC datagram or packet at <pos> position,
+ * <end> being at one byte past the end of this datagram.
+ * Returns 1 if succeeded, 0 if not.
+ */
+static int quic_get_dgram_dcid(unsigned char *pos, const unsigned char *end,
+ unsigned char **dcid, size_t *dcid_len)
+{
+ int ret = 0, long_header;
+ size_t minlen, skip;
+
+ TRACE_ENTER(QUIC_EV_CONN_RXPKT);
+
+ if (!(*pos & QUIC_PACKET_FIXED_BIT)) {
+ TRACE_PROTO("fixed bit not set", QUIC_EV_CONN_RXPKT);
+ goto err;
+ }
+
+ long_header = *pos & QUIC_PACKET_LONG_HEADER_BIT;
+ minlen = long_header ? QUIC_LONG_PACKET_MINLEN :
+ QUIC_SHORT_PACKET_MINLEN + QUIC_HAP_CID_LEN + QUIC_TLS_TAG_LEN;
+ skip = long_header ? QUIC_LONG_PACKET_DCID_OFF : QUIC_SHORT_PACKET_DCID_OFF;
+ if (end - pos < minlen)
+ goto err;
+
+ pos += skip;
+ *dcid_len = long_header ? *pos++ : QUIC_HAP_CID_LEN;
+ if (*dcid_len > QUIC_CID_MAXLEN || end - pos <= *dcid_len)
+ goto err;
+
+ *dcid = pos;
+
+ ret = 1;
+ leave:
+ TRACE_LEAVE(QUIC_EV_CONN_RXPKT);
+ return ret;
+
+ err:
+ TRACE_PROTO("wrong datagram", QUIC_EV_CONN_RXPKT);
+ goto leave;
+}
+
+
+/* Retrieve the DCID from the datagram found at <pos> position and deliver it to the
+ * correct datagram handler.
+ * Return 1 if a correct datagram could be found, 0 if not.
+ */
+static int quic_lstnr_dgram_dispatch(unsigned char *pos, size_t len, void *owner,
+ struct sockaddr_storage *saddr,
+ struct sockaddr_storage *daddr,
+ struct quic_dgram *new_dgram, struct list *dgrams)
+{
+ struct quic_dgram *dgram;
+ unsigned char *dcid;
+ size_t dcid_len;
+ int cid_tid;
+
+ if (!len || !quic_get_dgram_dcid(pos, pos + len, &dcid, &dcid_len))
+ goto err;
+
+ dgram = new_dgram ? new_dgram : pool_alloc(pool_head_quic_dgram);
+ if (!dgram)
+ goto err;
+
+ if ((cid_tid = quic_get_cid_tid(dcid, dcid_len, saddr, pos, len)) < 0) {
+ /* Use the current thread if CID not found. If a clients opens
+ * a connection with multiple packets, it is possible that
+ * several threads will deal with datagrams sharing the same
+ * CID. For this reason, the CID tree insertion will be
+ * conducted as an atomic operation and the datagram ultimately
+ * redispatch by the late thread.
+ */
+ cid_tid = tid;
+ }
+
+ /* All the members must be initialized! */
+ dgram->owner = owner;
+ dgram->buf = pos;
+ dgram->len = len;
+ dgram->dcid = dcid;
+ dgram->dcid_len = dcid_len;
+ dgram->saddr = *saddr;
+ dgram->daddr = *daddr;
+ dgram->qc = NULL;
+
+ /* Attached datagram to its quic_receiver_buf and quic_dghdlrs. */
+ LIST_APPEND(dgrams, &dgram->recv_list);
+ MT_LIST_APPEND(&quic_dghdlrs[cid_tid].dgrams, &dgram->handler_list);
+
+ /* typically quic_lstnr_dghdlr() */
+ tasklet_wakeup(quic_dghdlrs[cid_tid].task);
+
+ return 1;
+
+ err:
+ pool_free(pool_head_quic_dgram, new_dgram);
+ return 0;
+}
+
+/* This function is responsible to remove unused datagram attached in front of
+ * <buf>. Each instances will be freed until a not yet consumed datagram is
+ * found or end of the list is hit. The last unused datagram found is not freed
+ * and is instead returned so that the caller can reuse it if needed.
+ *
+ * Returns the last unused datagram or NULL if no occurrence found.
+ */
+static struct quic_dgram *quic_rxbuf_purge_dgrams(struct quic_receiver_buf *rbuf)
+{
+ struct quic_dgram *cur, *prev = NULL;
+
+ while (!LIST_ISEMPTY(&rbuf->dgram_list)) {
+ cur = LIST_ELEM(rbuf->dgram_list.n, struct quic_dgram *, recv_list);
+
+ /* Loop until a not yet consumed datagram is found. */
+ if (HA_ATOMIC_LOAD(&cur->buf))
+ break;
+
+ /* Clear buffer of current unused datagram. */
+ LIST_DELETE(&cur->recv_list);
+ b_del(&rbuf->buf, cur->len);
+
+ /* Free last found unused datagram. */
+ pool_free(pool_head_quic_dgram, prev);
+ prev = cur;
+ }
+
+ /* Return last unused datagram found. */
+ return prev;
+}
+
+/* Receive data from datagram socket <fd>. Data are placed in <out> buffer of
+ * length <len>.
+ *
+ * Datagram addresses will be returned via the next arguments. <from> will be
+ * the peer address and <to> the reception one. Note that <to> can only be
+ * retrieved if the socket supports IP_PKTINFO or affiliated options. If not,
+ * <to> will be set as AF_UNSPEC. The caller must specify <to_port> to ensure
+ * that <to> address is completely filled.
+ *
+ * Returns value from recvmsg syscall.
+ */
+static ssize_t quic_recv(int fd, void *out, size_t len,
+ struct sockaddr *from, socklen_t from_len,
+ struct sockaddr *to, socklen_t to_len,
+ uint16_t dst_port)
+{
+ union pktinfo {
+#ifdef IP_PKTINFO
+ struct in_pktinfo in;
+#else /* !IP_PKTINFO */
+ struct in_addr addr;
+#endif
+#ifdef IPV6_RECVPKTINFO
+ struct in6_pktinfo in6;
+#endif
+ };
+ char cdata[CMSG_SPACE(sizeof(union pktinfo))];
+ struct msghdr msg;
+ struct iovec vec;
+ struct cmsghdr *cmsg;
+ ssize_t ret;
+
+ vec.iov_base = out;
+ vec.iov_len = len;
+
+ memset(&msg, 0, sizeof(msg));
+ msg.msg_name = from;
+ msg.msg_namelen = from_len;
+ msg.msg_iov = &vec;
+ msg.msg_iovlen = 1;
+ msg.msg_control = &cdata;
+ msg.msg_controllen = sizeof(cdata);
+
+ clear_addr((struct sockaddr_storage *)to);
+
+ do {
+ ret = recvmsg(fd, &msg, 0);
+ } while (ret < 0 && errno == EINTR);
+
+ /* TODO handle errno. On EAGAIN/EWOULDBLOCK use fd_cant_recv() if
+ * using dedicated connection socket.
+ */
+
+ if (ret < 0)
+ goto end;
+
+ for (cmsg = CMSG_FIRSTHDR(&msg); cmsg; cmsg = CMSG_NXTHDR(&msg, cmsg)) {
+ switch (cmsg->cmsg_level) {
+ case IPPROTO_IP:
+#if defined(IP_PKTINFO)
+ if (cmsg->cmsg_type == IP_PKTINFO) {
+ struct sockaddr_in *in = (struct sockaddr_in *)to;
+ struct in_pktinfo *info = (struct in_pktinfo *)CMSG_DATA(cmsg);
+
+ if (to_len >= sizeof(struct sockaddr_in)) {
+ in->sin_family = AF_INET;
+ in->sin_addr = info->ipi_addr;
+ in->sin_port = dst_port;
+ }
+ }
+#elif defined(IP_RECVDSTADDR)
+ if (cmsg->cmsg_type == IP_RECVDSTADDR) {
+ struct sockaddr_in *in = (struct sockaddr_in *)to;
+ struct in_addr *info = (struct in_addr *)CMSG_DATA(cmsg);
+
+ if (to_len >= sizeof(struct sockaddr_in)) {
+ in->sin_family = AF_INET;
+ in->sin_addr.s_addr = info->s_addr;
+ in->sin_port = dst_port;
+ }
+ }
+#endif /* IP_PKTINFO || IP_RECVDSTADDR */
+ break;
+
+ case IPPROTO_IPV6:
+#ifdef IPV6_RECVPKTINFO
+ if (cmsg->cmsg_type == IPV6_PKTINFO) {
+ struct sockaddr_in6 *in6 = (struct sockaddr_in6 *)to;
+ struct in6_pktinfo *info6 = (struct in6_pktinfo *)CMSG_DATA(cmsg);
+
+ if (to_len >= sizeof(struct sockaddr_in6)) {
+ in6->sin6_family = AF_INET6;
+ memcpy(&in6->sin6_addr, &info6->ipi6_addr, sizeof(in6->sin6_addr));
+ in6->sin6_port = dst_port;
+ }
+ }
+#endif
+ break;
+ }
+ }
+
+ end:
+ return ret;
+}
+
+/* Function called on a read event from a listening socket. It tries
+ * to handle as many connections as possible.
+ */
+void quic_lstnr_sock_fd_iocb(int fd)
+{
+ ssize_t ret;
+ struct quic_receiver_buf *rxbuf;
+ struct buffer *buf;
+ struct listener *l = objt_listener(fdtab[fd].owner);
+ struct quic_transport_params *params;
+ /* Source address */
+ struct sockaddr_storage saddr = {0}, daddr = {0};
+ size_t max_sz, cspace;
+ struct quic_dgram *new_dgram;
+ unsigned char *dgram_buf;
+ int max_dgrams;
+
+ BUG_ON(!l);
+
+ new_dgram = NULL;
+ if (!l)
+ return;
+
+ if (!(fdtab[fd].state & FD_POLL_IN) || !fd_recv_ready(fd))
+ return;
+
+ rxbuf = MT_LIST_POP(&l->rx.rxbuf_list, typeof(rxbuf), rxbuf_el);
+ if (!rxbuf)
+ goto out;
+
+ buf = &rxbuf->buf;
+
+ max_dgrams = global.tune.maxpollevents;
+ start:
+ /* Try to reuse an existing dgram. Note that there is always at
+ * least one datagram to pick, except the first time we enter
+ * this function for this <rxbuf> buffer.
+ */
+ new_dgram = quic_rxbuf_purge_dgrams(rxbuf);
+
+ params = &l->bind_conf->quic_params;
+ max_sz = params->max_udp_payload_size;
+ cspace = b_contig_space(buf);
+ if (cspace < max_sz) {
+ struct proxy *px = l->bind_conf->frontend;
+ struct quic_counters *prx_counters = EXTRA_COUNTERS_GET(px->extra_counters_fe, &quic_stats_module);
+ struct quic_dgram *dgram;
+
+ /* Do no mark <buf> as full, and do not try to consume it
+ * if the contiguous remaining space is not at the end
+ */
+ if (b_tail(buf) + cspace < b_wrap(buf)) {
+ HA_ATOMIC_INC(&prx_counters->rxbuf_full);
+ goto out;
+ }
+
+ /* Allocate a fake datagram, without data to locate
+ * the end of the RX buffer (required during purging).
+ */
+ dgram = pool_alloc(pool_head_quic_dgram);
+ if (!dgram)
+ goto out;
+
+ /* Initialize only the useful members of this fake datagram. */
+ dgram->buf = NULL;
+ dgram->len = cspace;
+ /* Append this datagram only to the RX buffer list. It will
+ * not be treated by any datagram handler.
+ */
+ LIST_APPEND(&rxbuf->dgram_list, &dgram->recv_list);
+
+ /* Consume the remaining space */
+ b_add(buf, cspace);
+ if (b_contig_space(buf) < max_sz) {
+ HA_ATOMIC_INC(&prx_counters->rxbuf_full);
+ goto out;
+ }
+ }
+
+ dgram_buf = (unsigned char *)b_tail(buf);
+ ret = quic_recv(fd, dgram_buf, max_sz,
+ (struct sockaddr *)&saddr, sizeof(saddr),
+ (struct sockaddr *)&daddr, sizeof(daddr),
+ get_net_port(&l->rx.addr));
+ if (ret <= 0)
+ goto out;
+
+ b_add(buf, ret);
+ if (!quic_lstnr_dgram_dispatch(dgram_buf, ret, l, &saddr, &daddr,
+ new_dgram, &rxbuf->dgram_list)) {
+ /* If wrong, consume this datagram */
+ b_sub(buf, ret);
+ }
+ new_dgram = NULL;
+ if (--max_dgrams > 0)
+ goto start;
+ out:
+ pool_free(pool_head_quic_dgram, new_dgram);
+ MT_LIST_APPEND(&l->rx.rxbuf_list, &rxbuf->rxbuf_el);
+}
+
+/* FD-owned quic-conn socket callback. */
+void quic_conn_sock_fd_iocb(int fd)
+{
+ struct quic_conn *qc = fdtab[fd].owner;
+
+ TRACE_ENTER(QUIC_EV_CONN_RCV, qc);
+
+ if (fd_send_active(fd) && fd_send_ready(fd)) {
+ TRACE_DEVEL("send ready", QUIC_EV_CONN_RCV, qc);
+ fd_stop_send(fd);
+ tasklet_wakeup_after(NULL, qc->wait_event.tasklet);
+ qc_notify_send(qc);
+ }
+
+ if (fd_recv_ready(fd)) {
+ TRACE_DEVEL("recv ready", QUIC_EV_CONN_RCV, qc);
+ tasklet_wakeup_after(NULL, qc->wait_event.tasklet);
+ fd_stop_recv(fd);
+ }
+
+ TRACE_LEAVE(QUIC_EV_CONN_RCV, qc);
+}
+
+/* Send a datagram stored into <buf> buffer with <sz> as size.
+ * The caller must ensure there is at least <sz> bytes in this buffer.
+ *
+ * Returns the total bytes sent over the socket. 0 is returned if a transient
+ * error is encountered which allows send to be retry later. A negative value
+ * is used for a fatal error which guarantee that all future send operation for
+ * this connection will fail.
+ *
+ * TODO standardize this function for a generic UDP sendto wrapper. This can be
+ * done by removing the <qc> arg and replace it with address/port.
+ */
+int qc_snd_buf(struct quic_conn *qc, const struct buffer *buf, size_t sz,
+ int flags)
+{
+ ssize_t ret;
+
+ do {
+ if (qc_test_fd(qc)) {
+ if (!fd_send_ready(qc->fd))
+ return 0;
+
+ ret = send(qc->fd, b_peek(buf, b_head_ofs(buf)), sz,
+ MSG_DONTWAIT | MSG_NOSIGNAL);
+ }
+#if defined(IP_PKTINFO) || defined(IP_RECVDSTADDR) || defined(IPV6_RECVPKTINFO)
+ else if (is_addr(&qc->local_addr)) {
+ struct msghdr msg = { 0 };
+ struct iovec vec;
+ struct cmsghdr *cmsg;
+#ifdef IP_PKTINFO
+ struct in_pktinfo in;
+#endif /* IP_PKTINFO */
+#ifdef IPV6_RECVPKTINFO
+ struct in6_pktinfo in6;
+#endif /* IPV6_RECVPKTINFO */
+ union {
+#ifdef IP_PKTINFO
+ char buf[CMSG_SPACE(sizeof(in))];
+#endif /* IP_PKTINFO */
+#ifdef IPV6_RECVPKTINFO
+ char buf6[CMSG_SPACE(sizeof(in6))];
+#endif /* IPV6_RECVPKTINFO */
+ char bufaddr[CMSG_SPACE(sizeof(struct in_addr))];
+ struct cmsghdr align;
+ } u;
+
+ vec.iov_base = b_peek(buf, b_head_ofs(buf));
+ vec.iov_len = sz;
+ msg.msg_name = &qc->peer_addr;
+ msg.msg_namelen = get_addr_len(&qc->peer_addr);
+ msg.msg_iov = &vec;
+ msg.msg_iovlen = 1;
+
+ switch (qc->local_addr.ss_family) {
+ case AF_INET:
+#if defined(IP_PKTINFO)
+ memset(&in, 0, sizeof(in));
+ memcpy(&in.ipi_spec_dst,
+ &((struct sockaddr_in *)&qc->local_addr)->sin_addr,
+ sizeof(struct in_addr));
+
+ msg.msg_control = u.buf;
+ msg.msg_controllen = sizeof(u.buf);
+
+ cmsg = CMSG_FIRSTHDR(&msg);
+ cmsg->cmsg_level = IPPROTO_IP;
+ cmsg->cmsg_type = IP_PKTINFO;
+ cmsg->cmsg_len = CMSG_LEN(sizeof(struct in_pktinfo));
+ memcpy(CMSG_DATA(cmsg), &in, sizeof(in));
+#elif defined(IP_RECVDSTADDR)
+ msg.msg_control = u.bufaddr;
+ msg.msg_controllen = sizeof(u.bufaddr);
+
+ cmsg = CMSG_FIRSTHDR(&msg);
+ cmsg->cmsg_level = IPPROTO_IP;
+ cmsg->cmsg_type = IP_SENDSRCADDR;
+ cmsg->cmsg_len = CMSG_LEN(sizeof(struct in_addr));
+ memcpy(CMSG_DATA(cmsg),
+ &((struct sockaddr_in *)&qc->local_addr)->sin_addr,
+ sizeof(struct in_addr));
+#endif /* IP_PKTINFO || IP_RECVDSTADDR */
+ break;
+
+ case AF_INET6:
+#ifdef IPV6_RECVPKTINFO
+ memset(&in6, 0, sizeof(in6));
+ memcpy(&in6.ipi6_addr,
+ &((struct sockaddr_in6 *)&qc->local_addr)->sin6_addr,
+ sizeof(struct in6_addr));
+
+ msg.msg_control = u.buf6;
+ msg.msg_controllen = sizeof(u.buf6);
+
+ cmsg = CMSG_FIRSTHDR(&msg);
+ cmsg->cmsg_level = IPPROTO_IPV6;
+ cmsg->cmsg_type = IPV6_PKTINFO;
+ cmsg->cmsg_len = CMSG_LEN(sizeof(struct in6_pktinfo));
+ memcpy(CMSG_DATA(cmsg), &in6, sizeof(in6));
+#endif /* IPV6_RECVPKTINFO */
+ break;
+
+ default:
+ break;
+ }
+
+ ret = sendmsg(qc->li->rx.fd, &msg,
+ MSG_DONTWAIT|MSG_NOSIGNAL);
+ }
+#endif /* IP_PKTINFO || IP_RECVDSTADDR || IPV6_RECVPKTINFO */
+ else {
+ ret = sendto(qc->li->rx.fd, b_peek(buf, b_head_ofs(buf)), sz,
+ MSG_DONTWAIT|MSG_NOSIGNAL,
+ (struct sockaddr *)&qc->peer_addr,
+ get_addr_len(&qc->peer_addr));
+ }
+ } while (ret < 0 && errno == EINTR);
+
+ if (ret < 0) {
+ if (errno == EAGAIN || errno == EWOULDBLOCK ||
+ errno == ENOTCONN || errno == EINPROGRESS) {
+ if (errno == EAGAIN || errno == EWOULDBLOCK)
+ qc->cntrs.socket_full++;
+ else
+ qc->cntrs.sendto_err++;
+
+ /* transient error */
+ fd_want_send(qc->fd);
+ fd_cant_send(qc->fd);
+ TRACE_PRINTF(TRACE_LEVEL_USER, QUIC_EV_CONN_SPPKTS, qc, 0, 0, 0,
+ "UDP send failure errno=%d (%s)", errno, strerror(errno));
+ return 0;
+ }
+ else {
+ /* unrecoverable error */
+ qc->cntrs.sendto_err_unknown++;
+ TRACE_PRINTF(TRACE_LEVEL_USER, QUIC_EV_CONN_SPPKTS, qc, 0, 0, 0,
+ "UDP send failure errno=%d (%s)", errno, strerror(errno));
+ return -1;
+ }
+ }
+
+ if (ret != sz)
+ return 0;
+
+ return ret;
+}
+
+/* Receive datagram on <qc> FD-owned socket.
+ *
+ * Returns the total number of bytes read or a negative value on error.
+ */
+int qc_rcv_buf(struct quic_conn *qc)
+{
+ struct sockaddr_storage saddr = {0}, daddr = {0};
+ struct quic_transport_params *params;
+ struct quic_dgram *new_dgram = NULL;
+ struct buffer buf = BUF_NULL;
+ size_t max_sz;
+ unsigned char *dgram_buf;
+ struct listener *l;
+ ssize_t ret = 0;
+
+ /* Do not call this if quic-conn FD is uninitialized. */
+ BUG_ON(qc->fd < 0);
+
+ TRACE_ENTER(QUIC_EV_CONN_RCV, qc);
+ l = qc->li;
+
+ params = &l->bind_conf->quic_params;
+ max_sz = params->max_udp_payload_size;
+
+ do {
+ if (!b_alloc(&buf))
+ break; /* TODO subscribe for memory again available. */
+
+ b_reset(&buf);
+ BUG_ON(b_contig_space(&buf) < max_sz);
+
+ /* Allocate datagram on first loop or after requeuing. */
+ if (!new_dgram && !(new_dgram = pool_alloc(pool_head_quic_dgram)))
+ break; /* TODO subscribe for memory again available. */
+
+ dgram_buf = (unsigned char *)b_tail(&buf);
+ ret = quic_recv(qc->fd, dgram_buf, max_sz,
+ (struct sockaddr *)&saddr, sizeof(saddr),
+ (struct sockaddr *)&daddr, sizeof(daddr),
+ get_net_port(&qc->local_addr));
+ if (ret <= 0) {
+ /* Subscribe FD for future reception. */
+ if (errno == EAGAIN || errno == EWOULDBLOCK || errno == ENOTCONN)
+ fd_want_recv(qc->fd);
+ /* TODO handle other error codes as fatal on the connection. */
+ break;
+ }
+
+ b_add(&buf, ret);
+
+ new_dgram->buf = dgram_buf;
+ new_dgram->len = ret;
+ new_dgram->dcid_len = 0;
+ new_dgram->dcid = NULL;
+ new_dgram->saddr = saddr;
+ new_dgram->daddr = daddr;
+ new_dgram->qc = NULL; /* set later via quic_dgram_parse() */
+
+ TRACE_DEVEL("read datagram", QUIC_EV_CONN_RCV, qc, new_dgram);
+
+ if (!quic_get_dgram_dcid(new_dgram->buf,
+ new_dgram->buf + new_dgram->len,
+ &new_dgram->dcid, &new_dgram->dcid_len)) {
+ continue;
+ }
+
+ if (!qc_check_dcid(qc, new_dgram->dcid, new_dgram->dcid_len)) {
+ /* Datagram received by error on the connection FD, dispatch it
+ * to its associated quic-conn.
+ *
+ * TODO count redispatch datagrams.
+ */
+ struct quic_receiver_buf *rxbuf;
+ struct quic_dgram *tmp_dgram;
+ unsigned char *rxbuf_tail;
+ size_t cspace;
+
+ TRACE_STATE("datagram for other connection on quic-conn socket, requeue it", QUIC_EV_CONN_RCV, qc);
+
+ rxbuf = MT_LIST_POP(&l->rx.rxbuf_list, typeof(rxbuf), rxbuf_el);
+ ALREADY_CHECKED(rxbuf);
+ cspace = b_contig_space(&rxbuf->buf);
+
+ tmp_dgram = quic_rxbuf_purge_dgrams(rxbuf);
+ pool_free(pool_head_quic_dgram, tmp_dgram);
+
+ /* Insert a fake datagram if space wraps to consume it. */
+ if (cspace < new_dgram->len && b_space_wraps(&rxbuf->buf)) {
+ struct quic_dgram *fake_dgram = pool_alloc(pool_head_quic_dgram);
+ if (!fake_dgram) {
+ /* TODO count lost datagrams */
+ MT_LIST_APPEND(&l->rx.rxbuf_list, &rxbuf->rxbuf_el);
+ continue;
+ }
+
+ fake_dgram->buf = NULL;
+ fake_dgram->len = cspace;
+ LIST_APPEND(&rxbuf->dgram_list, &fake_dgram->recv_list);
+ b_add(&rxbuf->buf, cspace);
+ }
+
+ /* Recheck contig space after fake datagram insert. */
+ if (b_contig_space(&rxbuf->buf) < new_dgram->len) {
+ /* TODO count lost datagrams */
+ MT_LIST_APPEND(&l->rx.rxbuf_list, &rxbuf->rxbuf_el);
+ continue;
+ }
+
+ rxbuf_tail = (unsigned char *)b_tail(&rxbuf->buf);
+ __b_putblk(&rxbuf->buf, (char *)dgram_buf, new_dgram->len);
+ if (!quic_lstnr_dgram_dispatch(rxbuf_tail, ret, l, &saddr, &daddr,
+ new_dgram, &rxbuf->dgram_list)) {
+ /* TODO count lost datagrams. */
+ b_sub(&buf, ret);
+ }
+ else {
+ /* datagram must not be freed as it was requeued. */
+ new_dgram = NULL;
+ }
+
+ MT_LIST_APPEND(&l->rx.rxbuf_list, &rxbuf->rxbuf_el);
+ continue;
+ }
+
+ quic_dgram_parse(new_dgram, qc, qc->li);
+ /* A datagram must always be consumed after quic_parse_dgram(). */
+ BUG_ON(new_dgram->buf);
+ } while (ret > 0);
+
+ pool_free(pool_head_quic_dgram, new_dgram);
+
+ if (b_size(&buf)) {
+ b_free(&buf);
+ offer_buffers(NULL, 1);
+ }
+
+ TRACE_LEAVE(QUIC_EV_CONN_RCV, qc);
+ return ret;
+}
+
+/* Allocate a socket file-descriptor specific for QUIC connection <qc>.
+ * Endpoint addresses are specified by the two following arguments : <src> is
+ * the local address and <dst> is the remote one.
+ *
+ * Return the socket FD or a negative error code. On error, socket is marked as
+ * uninitialized.
+ */
+void qc_alloc_fd(struct quic_conn *qc, const struct sockaddr_storage *src,
+ const struct sockaddr_storage *dst)
+{
+ struct bind_conf *bc = qc->li->bind_conf;
+ struct proxy *p = bc->frontend;
+ int fd = -1;
+ int ret;
+
+ /* Must not happen. */
+ BUG_ON(src->ss_family != dst->ss_family);
+
+ qc_init_fd(qc);
+
+ fd = socket(src->ss_family, SOCK_DGRAM, 0);
+ if (fd < 0)
+ goto err;
+
+ if (fd >= global.maxsock) {
+ send_log(p, LOG_EMERG,
+ "Proxy %s reached the configured maximum connection limit. Please check the global 'maxconn' value.\n",
+ p->id);
+ goto err;
+ }
+
+ ret = setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &one, sizeof(one));
+ if (ret < 0)
+ goto err;
+
+ switch (src->ss_family) {
+ case AF_INET:
+#if defined(IP_PKTINFO)
+ ret = setsockopt(fd, IPPROTO_IP, IP_PKTINFO, &one, sizeof(one));
+#elif defined(IP_RECVDSTADDR)
+ ret = setsockopt(fd, IPPROTO_IP, IP_RECVDSTADDR, &one, sizeof(one));
+#endif /* IP_PKTINFO || IP_RECVDSTADDR */
+ break;
+ case AF_INET6:
+#ifdef IPV6_RECVPKTINFO
+ ret = setsockopt(fd, IPPROTO_IPV6, IPV6_RECVPKTINFO, &one, sizeof(one));
+#endif
+ break;
+ }
+ if (ret < 0)
+ goto err;
+
+ ret = bind(fd, (struct sockaddr *)src, get_addr_len(src));
+ if (ret < 0) {
+ if (errno == EACCES) {
+ if (!quic_bind_eacces_warn) {
+ send_log(p, LOG_WARNING,
+ "Permission error on QUIC socket binding for proxy %s. Consider using setcap cap_net_bind_service (Linux only) or running as root.\n",
+ p->id);
+ quic_bind_eacces_warn = 1;
+ }
+
+ /* Fallback to listener socket for this receiver instance. */
+ HA_ATOMIC_STORE(&qc->li->rx.quic_mode, QUIC_SOCK_MODE_LSTNR);
+ }
+ goto err;
+ }
+
+ ret = connect(fd, (struct sockaddr *)dst, get_addr_len(dst));
+ if (ret < 0)
+ goto err;
+
+ qc->fd = fd;
+ fd_set_nonblock(fd);
+ fd_insert(fd, qc, quic_conn_sock_fd_iocb, tgid, ti->ltid_bit);
+ fd_want_recv(fd);
+
+ return;
+
+ err:
+ if (fd >= 0)
+ close(fd);
+}
+
+/* Release socket file-descriptor specific for QUIC connection <qc>. Set
+ * <reinit> if socket should be reinitialized after address migration.
+ */
+void qc_release_fd(struct quic_conn *qc, int reinit)
+{
+ if (qc_test_fd(qc)) {
+ fd_delete(qc->fd);
+ qc->fd = DEAD_FD_MAGIC;
+
+ if (reinit)
+ qc_init_fd(qc);
+ }
+}
+
+/* Wrapper for fd_want_recv(). Safe even if connection does not used its owned
+ * socket.
+ */
+void qc_want_recv(struct quic_conn *qc)
+{
+ if (qc_test_fd(qc))
+ fd_want_recv(qc->fd);
+}
+
+/*********************** QUIC accept queue management ***********************/
+/* per-thread accept queues */
+struct quic_accept_queue *quic_accept_queues;
+
+/* Install <qc> on the queue ready to be accepted. The queue task is then woken
+ * up. If <qc> accept is already scheduled or done, nothing is done.
+ */
+void quic_accept_push_qc(struct quic_conn *qc)
+{
+ struct quic_accept_queue *queue = &quic_accept_queues[tid];
+ struct li_per_thread *lthr = &qc->li->per_thr[ti->ltid];
+
+ /* early return if accept is already in progress/done for this
+ * connection
+ */
+ if (qc->flags & QUIC_FL_CONN_ACCEPT_REGISTERED)
+ return;
+
+ BUG_ON(MT_LIST_INLIST(&qc->accept_list));
+ HA_ATOMIC_INC(&qc->li->rx.quic_curr_accept);
+
+ qc->flags |= QUIC_FL_CONN_ACCEPT_REGISTERED;
+ /* 1. insert the listener in the accept queue
+ *
+ * Use TRY_APPEND as there is a possible race even with INLIST if
+ * multiple threads try to add the same listener instance from several
+ * quic_conn.
+ */
+ if (!MT_LIST_INLIST(&(lthr->quic_accept.list)))
+ MT_LIST_TRY_APPEND(&queue->listeners, &(lthr->quic_accept.list));
+
+ /* 2. insert the quic_conn in the listener per-thread queue. */
+ MT_LIST_APPEND(&lthr->quic_accept.conns, &qc->accept_list);
+
+ /* 3. wake up the queue tasklet */
+ tasklet_wakeup(quic_accept_queues[tid].tasklet);
+}
+
+/* Tasklet handler to accept QUIC connections. Call listener_accept on every
+ * listener instances registered in the accept queue.
+ */
+struct task *quic_accept_run(struct task *t, void *ctx, unsigned int i)
+{
+ struct li_per_thread *lthr;
+ struct mt_list *elt1, elt2;
+ struct quic_accept_queue *queue = &quic_accept_queues[tid];
+
+ mt_list_for_each_entry_safe(lthr, &queue->listeners, quic_accept.list, elt1, elt2) {
+ listener_accept(lthr->li);
+ if (!MT_LIST_ISEMPTY(&lthr->quic_accept.conns))
+ tasklet_wakeup((struct tasklet*)t);
+ else
+ MT_LIST_DELETE_SAFE(elt1);
+ }
+
+ return NULL;
+}
+
+/* Returns the maximum number of QUIC connections waiting for handshake to
+ * complete in parallel on listener <l> instance. This is directly based on
+ * listener backlog value.
+ */
+int quic_listener_max_handshake(const struct listener *l)
+{
+ return listener_backlog(l) / 2;
+}
+
+/* Returns the value which is considered as the maximum number of QUIC
+ * connections waiting to be accepted for listener <l> instance. This is
+ * directly based on listener backlog value.
+ */
+int quic_listener_max_accept(const struct listener *l)
+{
+ return listener_backlog(l) / 2;
+}
+
+static int quic_alloc_accept_queues(void)
+{
+ int i;
+
+ quic_accept_queues = calloc(global.nbthread,
+ sizeof(*quic_accept_queues));
+ if (!quic_accept_queues) {
+ ha_alert("Failed to allocate the quic accept queues.\n");
+ return 0;
+ }
+
+ for (i = 0; i < global.nbthread; ++i) {
+ struct tasklet *task;
+ if (!(task = tasklet_new())) {
+ ha_alert("Failed to allocate the quic accept queue on thread %d.\n", i);
+ return 0;
+ }
+
+ tasklet_set_tid(task, i);
+ task->process = quic_accept_run;
+ quic_accept_queues[i].tasklet = task;
+
+ MT_LIST_INIT(&quic_accept_queues[i].listeners);
+ }
+
+ return 1;
+}
+REGISTER_POST_CHECK(quic_alloc_accept_queues);
+
+static int quic_deallocate_accept_queues(void)
+{
+ int i;
+
+ if (quic_accept_queues) {
+ for (i = 0; i < global.nbthread; ++i)
+ tasklet_free(quic_accept_queues[i].tasklet);
+ free(quic_accept_queues);
+ }
+
+ return 1;
+}
+REGISTER_POST_DEINIT(quic_deallocate_accept_queues);
diff --git a/src/quic_ssl.c b/src/quic_ssl.c
new file mode 100644
index 0000000..314f587
--- /dev/null
+++ b/src/quic_ssl.c
@@ -0,0 +1,790 @@
+#include <haproxy/errors.h>
+#include <haproxy/ncbuf.h>
+#include <haproxy/proxy.h>
+#include <haproxy/quic_conn.h>
+#include <haproxy/quic_rx.h>
+#include <haproxy/quic_sock.h>
+#include <haproxy/quic_ssl.h>
+#include <haproxy/quic_tls.h>
+#include <haproxy/quic_tp.h>
+#include <haproxy/quic_trace.h>
+#include <haproxy/ssl_sock.h>
+#include <haproxy/trace.h>
+
+static BIO_METHOD *ha_quic_meth;
+
+DECLARE_POOL(pool_head_quic_ssl_sock_ctx, "quic_ssl_sock_ctx", sizeof(struct ssl_sock_ctx));
+
+/* Set the encoded version of the transport parameter into the TLS
+ * stack depending on <ver> QUIC version and <server> boolean which must
+ * be set to 1 for a QUIC server, 0 for a client.
+ * Return 1 if succeeded, 0 if not.
+ */
+static int qc_ssl_set_quic_transport_params(struct quic_conn *qc,
+ const struct quic_version *ver, int server)
+{
+ int ret = 0;
+#ifdef USE_QUIC_OPENSSL_COMPAT
+ unsigned char *in = qc->enc_params;
+ size_t insz = sizeof qc->enc_params;
+ size_t *enclen = &qc->enc_params_len;
+#else
+ unsigned char tps[QUIC_TP_MAX_ENCLEN];
+ size_t tpslen;
+ unsigned char *in = tps;
+ size_t insz = sizeof tps;
+ size_t *enclen = &tpslen;
+#endif
+
+ TRACE_ENTER(QUIC_EV_CONN_RWSEC, qc);
+ *enclen = quic_transport_params_encode(in, in + insz, &qc->rx.params, ver, server);
+ if (!*enclen) {
+ TRACE_ERROR("quic_transport_params_encode() failed", QUIC_EV_CONN_RWSEC);
+ goto leave;
+ }
+
+ if (!SSL_set_quic_transport_params(qc->xprt_ctx->ssl, in, *enclen)) {
+ TRACE_ERROR("SSL_set_quic_transport_params() failed", QUIC_EV_CONN_RWSEC);
+ goto leave;
+ }
+
+ ret = 1;
+ leave:
+ TRACE_LEAVE(QUIC_EV_CONN_RWSEC, qc);
+ return ret;
+}
+
+/* This function copies the CRYPTO data provided by the TLS stack found at <data>
+ * with <len> as size in CRYPTO buffers dedicated to store the information about
+ * outgoing CRYPTO frames so that to be able to replay the CRYPTO data streams.
+ * It fails (returns 0) only if it could not managed to allocate enough CRYPTO
+ * buffers to store all the data.
+ * Note that CRYPTO data may exist at any encryption level except at 0-RTT.
+ */
+static int qc_ssl_crypto_data_cpy(struct quic_conn *qc, struct quic_enc_level *qel,
+ const unsigned char *data, size_t len)
+{
+ struct quic_crypto_buf **qcb;
+ /* The remaining byte to store in CRYPTO buffers. */
+ size_t cf_offset, cf_len, *nb_buf;
+ unsigned char *pos;
+ int ret = 0;
+
+ nb_buf = &qel->tx.crypto.nb_buf;
+ qcb = &qel->tx.crypto.bufs[*nb_buf - 1];
+ cf_offset = (*nb_buf - 1) * QUIC_CRYPTO_BUF_SZ + (*qcb)->sz;
+ cf_len = len;
+
+ TRACE_ENTER(QUIC_EV_CONN_ADDDATA, qc);
+
+ while (len) {
+ size_t to_copy, room;
+
+ pos = (*qcb)->data + (*qcb)->sz;
+ room = QUIC_CRYPTO_BUF_SZ - (*qcb)->sz;
+ to_copy = len > room ? room : len;
+ if (to_copy) {
+ memcpy(pos, data, to_copy);
+ /* Increment the total size of this CRYPTO buffers by <to_copy>. */
+ qel->tx.crypto.sz += to_copy;
+ (*qcb)->sz += to_copy;
+ len -= to_copy;
+ data += to_copy;
+ }
+ else {
+ struct quic_crypto_buf **tmp;
+
+ // FIXME: realloc!
+ tmp = realloc(qel->tx.crypto.bufs,
+ (*nb_buf + 1) * sizeof *qel->tx.crypto.bufs);
+ if (tmp) {
+ qel->tx.crypto.bufs = tmp;
+ qcb = &qel->tx.crypto.bufs[*nb_buf];
+ *qcb = pool_alloc(pool_head_quic_crypto_buf);
+ if (!*qcb) {
+ TRACE_ERROR("Could not allocate crypto buf", QUIC_EV_CONN_ADDDATA, qc);
+ goto leave;
+ }
+
+ (*qcb)->sz = 0;
+ ++*nb_buf;
+ }
+ else {
+ break;
+ }
+ }
+ }
+
+ /* Allocate a TX CRYPTO frame only if all the CRYPTO data
+ * have been buffered.
+ */
+ if (!len) {
+ struct quic_frame *frm;
+ struct quic_frame *found = NULL;
+
+ /* There is at most one CRYPTO frame in this packet number
+ * space. Let's look for it.
+ */
+ list_for_each_entry(frm, &qel->pktns->tx.frms, list) {
+ if (frm->type != QUIC_FT_CRYPTO)
+ continue;
+
+ /* Found */
+ found = frm;
+ break;
+ }
+
+ if (found) {
+ found->crypto.len += cf_len;
+ }
+ else {
+ frm = qc_frm_alloc(QUIC_FT_CRYPTO);
+ if (!frm) {
+ TRACE_ERROR("Could not allocate quic frame", QUIC_EV_CONN_ADDDATA, qc);
+ goto leave;
+ }
+
+ frm->crypto.offset = cf_offset;
+ frm->crypto.len = cf_len;
+ frm->crypto.qel = qel;
+ LIST_APPEND(&qel->pktns->tx.frms, &frm->list);
+ }
+ }
+ ret = len == 0;
+ leave:
+ TRACE_LEAVE(QUIC_EV_CONN_ADDDATA, qc);
+ return ret;
+}
+
+/* returns 0 on error, 1 on success */
+static int ha_quic_set_encryption_secrets(SSL *ssl, enum ssl_encryption_level_t level,
+ const uint8_t *read_secret,
+ const uint8_t *write_secret, size_t secret_len)
+{
+ int ret = 0;
+ struct quic_conn *qc = SSL_get_ex_data(ssl, ssl_qc_app_data_index);
+ struct quic_enc_level **qel = ssl_to_qel_addr(qc, level);
+ struct quic_pktns **pktns = ssl_to_quic_pktns(qc, level);
+ struct quic_tls_ctx *tls_ctx;
+ const SSL_CIPHER *cipher = SSL_get_current_cipher(ssl);
+ struct quic_tls_secrets *rx = NULL, *tx = NULL;
+ const struct quic_version *ver =
+ qc->negotiated_version ? qc->negotiated_version : qc->original_version;
+
+ TRACE_ENTER(QUIC_EV_CONN_RWSEC, qc);
+ BUG_ON(secret_len > QUIC_TLS_SECRET_LEN);
+
+ if (!*qel && !qc_enc_level_alloc(qc, pktns, qel, level)) {
+ TRACE_PROTO("Could not allocate an encryption level", QUIC_EV_CONN_ADDDATA, qc);
+ goto leave;
+ }
+
+ tls_ctx = &(*qel)->tls_ctx;
+
+ if (qc->flags & QUIC_FL_CONN_TO_KILL) {
+ TRACE_PROTO("connection to be killed", QUIC_EV_CONN_ADDDATA, qc);
+ goto out;
+ }
+
+ if (qc->flags & QUIC_FL_CONN_IMMEDIATE_CLOSE) {
+ TRACE_PROTO("CC required", QUIC_EV_CONN_RWSEC, qc);
+ goto out;
+ }
+
+ if (!read_secret)
+ goto write;
+
+ rx = &tls_ctx->rx;
+ rx->aead = tls_aead(cipher);
+ rx->md = tls_md(cipher);
+ rx->hp = tls_hp(cipher);
+ if (!rx->aead || !rx->md || !rx->hp)
+ goto leave;
+
+ if (!quic_tls_secrets_keys_alloc(rx)) {
+ TRACE_ERROR("RX keys allocation failed", QUIC_EV_CONN_RWSEC, qc);
+ goto leave;
+ }
+
+ if (!quic_tls_derive_keys(rx->aead, rx->hp, rx->md, ver, rx->key, rx->keylen,
+ rx->iv, rx->ivlen, rx->hp_key, sizeof rx->hp_key,
+ read_secret, secret_len)) {
+ TRACE_ERROR("TX key derivation failed", QUIC_EV_CONN_RWSEC, qc);
+ goto leave;
+ }
+
+ if (!quic_tls_rx_ctx_init(&rx->ctx, rx->aead, rx->key)) {
+ TRACE_ERROR("could not initial RX TLS cipher context", QUIC_EV_CONN_RWSEC, qc);
+ goto leave;
+ }
+
+ if (!quic_tls_dec_aes_ctx_init(&rx->hp_ctx, rx->hp, rx->hp_key)) {
+ TRACE_ERROR("could not initial RX TLS cipher context for HP", QUIC_EV_CONN_RWSEC, qc);
+ goto leave;
+ }
+
+ /* Enqueue this connection asap if we could derive O-RTT secrets as
+ * listener. Note that a listener derives only RX secrets for this
+ * level.
+ */
+ if (qc_is_listener(qc) && level == ssl_encryption_early_data) {
+ TRACE_DEVEL("pushing connection into accept queue", QUIC_EV_CONN_RWSEC, qc);
+ quic_accept_push_qc(qc);
+ }
+
+write:
+
+ if (!write_secret)
+ goto keyupdate_init;
+
+ tx = &tls_ctx->tx;
+ tx->aead = tls_aead(cipher);
+ tx->md = tls_md(cipher);
+ tx->hp = tls_hp(cipher);
+ if (!tx->aead || !tx->md || !tx->hp)
+ goto leave;
+
+ if (!quic_tls_secrets_keys_alloc(tx)) {
+ TRACE_ERROR("TX keys allocation failed", QUIC_EV_CONN_RWSEC, qc);
+ goto leave;
+ }
+
+ if (!quic_tls_derive_keys(tx->aead, tx->hp, tx->md, ver, tx->key, tx->keylen,
+ tx->iv, tx->ivlen, tx->hp_key, sizeof tx->hp_key,
+ write_secret, secret_len)) {
+ TRACE_ERROR("TX key derivation failed", QUIC_EV_CONN_RWSEC, qc);
+ goto leave;
+ }
+
+ if (!quic_tls_tx_ctx_init(&tx->ctx, tx->aead, tx->key)) {
+ TRACE_ERROR("could not initial RX TLS cipher context", QUIC_EV_CONN_RWSEC, qc);
+ goto leave;
+ }
+
+ if (!quic_tls_enc_aes_ctx_init(&tx->hp_ctx, tx->hp, tx->hp_key)) {
+ TRACE_ERROR("could not initial TX TLS cipher context for HP", QUIC_EV_CONN_RWSEC, qc);
+ goto leave;
+ }
+
+ /* Set the transport parameters in the TLS stack. */
+ if (level == ssl_encryption_handshake && qc_is_listener(qc) &&
+ !qc_ssl_set_quic_transport_params(qc, ver, 1))
+ goto leave;
+
+ keyupdate_init:
+ /* Store the secret provided by the TLS stack, required for keyupdate. */
+ if (level == ssl_encryption_application) {
+ struct quic_tls_kp *prv_rx = &qc->ku.prv_rx;
+ struct quic_tls_kp *nxt_rx = &qc->ku.nxt_rx;
+ struct quic_tls_kp *nxt_tx = &qc->ku.nxt_tx;
+
+ if (rx) {
+ if (!(rx->secret = pool_alloc(pool_head_quic_tls_secret))) {
+ TRACE_ERROR("Could not allocate RX Application secrete keys", QUIC_EV_CONN_RWSEC, qc);
+ goto leave;
+ }
+
+ memcpy(rx->secret, read_secret, secret_len);
+ rx->secretlen = secret_len;
+ }
+
+ if (tx) {
+ if (!(tx->secret = pool_alloc(pool_head_quic_tls_secret))) {
+ TRACE_ERROR("Could not allocate TX Application secrete keys", QUIC_EV_CONN_RWSEC, qc);
+ goto leave;
+ }
+
+ memcpy(tx->secret, write_secret, secret_len);
+ tx->secretlen = secret_len;
+ }
+
+ /* Initialize all the secret keys lengths */
+ prv_rx->secretlen = nxt_rx->secretlen = nxt_tx->secretlen = secret_len;
+ }
+
+ out:
+ ret = 1;
+ leave:
+ if (!ret) {
+ /* Release the CRYPTO frames which have been provided by the TLS stack
+ * to prevent the transmission of ack-eliciting packets.
+ */
+ qc_release_pktns_frms(qc, qc->ipktns);
+ qc_release_pktns_frms(qc, qc->hpktns);
+ qc_release_pktns_frms(qc, qc->apktns);
+ quic_set_tls_alert(qc, SSL_AD_HANDSHAKE_FAILURE);
+ }
+
+ TRACE_LEAVE(QUIC_EV_CONN_RWSEC, qc, &level);
+ return ret;
+}
+
+#if defined(OPENSSL_IS_AWSLC)
+/* compatibility function for split read/write encryption secrets to be used
+ * with the API which uses 2 callbacks. */
+static inline int ha_quic_set_read_secret(SSL *ssl, enum ssl_encryption_level_t level,
+ const SSL_CIPHER *cipher, const uint8_t *secret,
+ size_t secret_len)
+{
+ return ha_quic_set_encryption_secrets(ssl, level, secret, NULL, secret_len);
+
+}
+
+static inline int ha_quic_set_write_secret(SSL *ssl, enum ssl_encryption_level_t level,
+ const SSL_CIPHER *cipher, const uint8_t *secret,
+ size_t secret_len)
+{
+
+ return ha_quic_set_encryption_secrets(ssl, level, NULL, secret, secret_len);
+
+}
+#endif
+
+/* ->add_handshake_data QUIC TLS callback used by the QUIC TLS stack when it
+ * wants to provide the QUIC layer with CRYPTO data.
+ * Returns 1 if succeeded, 0 if not.
+ */
+static int ha_quic_add_handshake_data(SSL *ssl, enum ssl_encryption_level_t level,
+ const uint8_t *data, size_t len)
+{
+ int ret = 0;
+ struct quic_conn *qc = SSL_get_ex_data(ssl, ssl_qc_app_data_index);
+ struct quic_enc_level **qel = ssl_to_qel_addr(qc, level);
+ struct quic_pktns **pktns = ssl_to_quic_pktns(qc, level);
+
+ TRACE_ENTER(QUIC_EV_CONN_ADDDATA, qc);
+
+ if (qc->flags & QUIC_FL_CONN_TO_KILL) {
+ TRACE_PROTO("connection to be killed", QUIC_EV_CONN_ADDDATA, qc);
+ goto out;
+ }
+
+ if (qc->flags & QUIC_FL_CONN_IMMEDIATE_CLOSE) {
+ TRACE_PROTO("CC required", QUIC_EV_CONN_ADDDATA, qc);
+ goto out;
+ }
+
+ if (!*qel && !qc_enc_level_alloc(qc, pktns, qel, level))
+ goto leave;
+
+ if (!qc_ssl_crypto_data_cpy(qc, *qel, data, len)) {
+ TRACE_ERROR("Could not bufferize", QUIC_EV_CONN_ADDDATA, qc);
+ goto leave;
+ }
+
+ TRACE_DEVEL("CRYPTO data buffered", QUIC_EV_CONN_ADDDATA,
+ qc, &level, &len);
+ out:
+ ret = 1;
+ leave:
+ TRACE_LEAVE(QUIC_EV_CONN_ADDDATA, qc);
+ return ret;
+}
+
+static int ha_quic_flush_flight(SSL *ssl)
+{
+ struct quic_conn *qc = SSL_get_ex_data(ssl, ssl_qc_app_data_index);
+
+ TRACE_ENTER(QUIC_EV_CONN_FFLIGHT, qc);
+ TRACE_LEAVE(QUIC_EV_CONN_FFLIGHT, qc);
+
+ return 1;
+}
+
+static int ha_quic_send_alert(SSL *ssl, enum ssl_encryption_level_t level, uint8_t alert)
+{
+ struct quic_conn *qc = SSL_get_ex_data(ssl, ssl_qc_app_data_index);
+
+ TRACE_ENTER(QUIC_EV_CONN_SSLALERT, qc);
+
+ TRACE_PROTO("Received TLS alert", QUIC_EV_CONN_SSLALERT, qc, &alert, &level);
+
+ quic_set_tls_alert(qc, alert);
+ TRACE_LEAVE(QUIC_EV_CONN_SSLALERT, qc);
+ return 1;
+}
+
+/* QUIC TLS methods */
+#if defined(OPENSSL_IS_AWSLC)
+/* write/read set secret split */
+static SSL_QUIC_METHOD ha_quic_method = {
+ .set_read_secret = ha_quic_set_read_secret,
+ .set_write_secret = ha_quic_set_write_secret,
+ .add_handshake_data = ha_quic_add_handshake_data,
+ .flush_flight = ha_quic_flush_flight,
+ .send_alert = ha_quic_send_alert,
+};
+
+#else
+
+static SSL_QUIC_METHOD ha_quic_method = {
+ .set_encryption_secrets = ha_quic_set_encryption_secrets,
+ .add_handshake_data = ha_quic_add_handshake_data,
+ .flush_flight = ha_quic_flush_flight,
+ .send_alert = ha_quic_send_alert,
+};
+#endif
+
+/* Initialize the TLS context of a listener with <bind_conf> as configuration.
+ * Returns an error count.
+ */
+int ssl_quic_initial_ctx(struct bind_conf *bind_conf)
+{
+ struct ssl_bind_conf __maybe_unused *ssl_conf_cur;
+ int cfgerr = 0;
+
+ long options =
+ (SSL_OP_ALL & ~SSL_OP_DONT_INSERT_EMPTY_FRAGMENTS) |
+ SSL_OP_SINGLE_ECDH_USE |
+ SSL_OP_CIPHER_SERVER_PREFERENCE;
+ SSL_CTX *ctx;
+
+ ctx = SSL_CTX_new(TLS_server_method());
+ bind_conf->initial_ctx = ctx;
+
+ SSL_CTX_set_options(ctx, options);
+ SSL_CTX_set_mode(ctx, SSL_MODE_RELEASE_BUFFERS);
+ SSL_CTX_set_min_proto_version(ctx, TLS1_3_VERSION);
+ SSL_CTX_set_max_proto_version(ctx, TLS1_3_VERSION);
+
+#ifdef SSL_CTRL_SET_TLSEXT_HOSTNAME
+# if defined(HAVE_SSL_CLIENT_HELLO_CB)
+# if defined(SSL_OP_NO_ANTI_REPLAY)
+ if (bind_conf->ssl_conf.early_data) {
+ SSL_CTX_set_options(ctx, SSL_OP_NO_ANTI_REPLAY);
+# if defined(USE_QUIC_OPENSSL_COMPAT) || defined(OPENSSL_IS_AWSLC)
+ ha_warning("Binding [%s:%d] for %s %s: 0-RTT is not supported in limited QUIC compatibility mode, ignored.\n",
+ bind_conf->file, bind_conf->line, proxy_type_str(bind_conf->frontend), bind_conf->frontend->id);
+# else
+ SSL_CTX_set_max_early_data(ctx, 0xffffffff);
+# endif /* ! USE_QUIC_OPENSSL_COMPAT */
+ }
+# endif /* !SSL_OP_NO_ANTI_REPLAY */
+ SSL_CTX_set_client_hello_cb(ctx, ssl_sock_switchctx_cbk, NULL);
+ SSL_CTX_set_tlsext_servername_callback(ctx, ssl_sock_switchctx_err_cbk);
+# else /* ! HAVE_SSL_CLIENT_HELLO_CB */
+ SSL_CTX_set_tlsext_servername_callback(ctx, ssl_sock_switchctx_cbk);
+# endif
+ SSL_CTX_set_tlsext_servername_arg(ctx, bind_conf);
+#endif
+#ifdef USE_QUIC_OPENSSL_COMPAT
+ if (!quic_tls_compat_init(bind_conf, ctx))
+ cfgerr++;
+#endif
+
+ return cfgerr;
+}
+
+/* This function gives the detail of the SSL error. It is used only
+ * if the debug mode and the verbose mode are activated. It dump all
+ * the SSL error until the stack was empty.
+ */
+static forceinline void qc_ssl_dump_errors(struct connection *conn)
+{
+ if (unlikely(global.mode & MODE_DEBUG)) {
+ while (1) {
+ const char *func = NULL;
+ unsigned long ret;
+
+ ERR_peek_error_func(&func);
+ ret = ERR_get_error();
+ if (!ret)
+ return;
+
+ fprintf(stderr, "conn. @%p OpenSSL error[0x%lx] %s: %s\n", conn, ret,
+ func, ERR_reason_error_string(ret));
+ }
+ }
+}
+
+/* Provide CRYPTO data to the TLS stack found at <data> with <len> as length
+ * from <qel> encryption level with <ctx> as QUIC connection context.
+ * Remaining parameter are there for debugging purposes.
+ * Return 1 if succeeded, 0 if not.
+ */
+int qc_ssl_provide_quic_data(struct ncbuf *ncbuf,
+ enum ssl_encryption_level_t level,
+ struct ssl_sock_ctx *ctx,
+ const unsigned char *data, size_t len)
+{
+#ifdef DEBUG_STRICT
+ enum ncb_ret ncb_ret;
+#endif
+ int ssl_err, state;
+ struct quic_conn *qc;
+ int ret = 0;
+
+ ssl_err = SSL_ERROR_NONE;
+ qc = ctx->qc;
+
+ TRACE_ENTER(QUIC_EV_CONN_SSLDATA, qc);
+
+ if (SSL_provide_quic_data(ctx->ssl, level, data, len) != 1) {
+ TRACE_ERROR("SSL_provide_quic_data() error",
+ QUIC_EV_CONN_SSLDATA, qc, NULL, NULL, ctx->ssl);
+ goto leave;
+ }
+
+ state = qc->state;
+ if (state < QUIC_HS_ST_COMPLETE) {
+ ssl_err = SSL_do_handshake(ctx->ssl);
+
+ if (qc->flags & QUIC_FL_CONN_TO_KILL) {
+ TRACE_DEVEL("connection to be killed", QUIC_EV_CONN_IO_CB, qc);
+ goto leave;
+ }
+
+ /* Finalize the connection as soon as possible if the peer transport parameters
+ * have been received. This may be useful to send packets even if this
+ * handshake fails.
+ */
+ if ((qc->flags & QUIC_FL_CONN_TX_TP_RECEIVED) && !qc_conn_finalize(qc, 1)) {
+ TRACE_ERROR("connection finalization failed", QUIC_EV_CONN_IO_CB, qc, &state);
+ goto leave;
+ }
+
+ if (ssl_err != 1) {
+ ssl_err = SSL_get_error(ctx->ssl, ssl_err);
+ if (ssl_err == SSL_ERROR_WANT_READ || ssl_err == SSL_ERROR_WANT_WRITE) {
+ TRACE_PROTO("SSL handshake in progress",
+ QUIC_EV_CONN_IO_CB, qc, &state, &ssl_err);
+ goto out;
+ }
+
+ TRACE_ERROR("SSL handshake error", QUIC_EV_CONN_IO_CB, qc, &state, &ssl_err);
+ HA_ATOMIC_INC(&qc->prx_counters->hdshk_fail);
+ qc_ssl_dump_errors(ctx->conn);
+ ERR_clear_error();
+ goto leave;
+ }
+
+ TRACE_PROTO("SSL handshake OK", QUIC_EV_CONN_IO_CB, qc, &state);
+
+ /* Check the alpn could be negotiated */
+ if (!qc->app_ops) {
+ TRACE_ERROR("No negotiated ALPN", QUIC_EV_CONN_IO_CB, qc, &state);
+ quic_set_tls_alert(qc, SSL_AD_NO_APPLICATION_PROTOCOL);
+ goto leave;
+ }
+
+ /* I/O callback switch */
+ qc->wait_event.tasklet->process = quic_conn_app_io_cb;
+ if (qc_is_listener(ctx->qc)) {
+ qc->flags |= QUIC_FL_CONN_NEED_POST_HANDSHAKE_FRMS;
+ qc->state = QUIC_HS_ST_CONFIRMED;
+ /* The connection is ready to be accepted. */
+ quic_accept_push_qc(qc);
+
+ BUG_ON(qc->li->rx.quic_curr_handshake == 0);
+ HA_ATOMIC_DEC(&qc->li->rx.quic_curr_handshake);
+ }
+ else {
+ qc->state = QUIC_HS_ST_COMPLETE;
+ }
+
+ /* Prepare the next key update */
+ if (!quic_tls_key_update(qc)) {
+ TRACE_ERROR("quic_tls_key_update() failed", QUIC_EV_CONN_IO_CB, qc);
+ goto leave;
+ }
+ } else {
+ ssl_err = SSL_process_quic_post_handshake(ctx->ssl);
+ if (ssl_err != 1) {
+ ssl_err = SSL_get_error(ctx->ssl, ssl_err);
+ if (ssl_err == SSL_ERROR_WANT_READ || ssl_err == SSL_ERROR_WANT_WRITE) {
+ TRACE_PROTO("SSL post handshake in progress",
+ QUIC_EV_CONN_IO_CB, qc, &state, &ssl_err);
+ goto out;
+ }
+
+ TRACE_ERROR("SSL post handshake error",
+ QUIC_EV_CONN_IO_CB, qc, &state, &ssl_err);
+ goto leave;
+ }
+
+ TRACE_STATE("SSL post handshake succeeded", QUIC_EV_CONN_IO_CB, qc, &state);
+ }
+
+ out:
+ ret = 1;
+ leave:
+ /* The CRYPTO data are consumed even in case of an error to release
+ * the memory asap.
+ */
+ if (!ncb_is_null(ncbuf)) {
+#ifdef DEBUG_STRICT
+ ncb_ret = ncb_advance(ncbuf, len);
+ /* ncb_advance() must always succeed. This is guaranteed as
+ * this is only done inside a data block. If false, this will
+ * lead to handshake failure with quic_enc_level offset shifted
+ * from buffer data.
+ */
+ BUG_ON(ncb_ret != NCB_RET_OK);
+#else
+ ncb_advance(ncbuf, len);
+#endif
+ }
+
+ TRACE_LEAVE(QUIC_EV_CONN_SSLDATA, qc);
+ return ret;
+}
+
+/* Provide all the stored in order CRYPTO data received from the peer to the TLS.
+ * Return 1 if succeeded, 0 if not.
+ */
+int qc_ssl_provide_all_quic_data(struct quic_conn *qc, struct ssl_sock_ctx *ctx)
+{
+ int ret = 0;
+ struct quic_enc_level *qel;
+ struct ncbuf ncbuf = NCBUF_NULL;
+
+ TRACE_ENTER(QUIC_EV_CONN_PHPKTS, qc);
+ list_for_each_entry(qel, &qc->qel_list, list) {
+ struct qf_crypto *qf_crypto, *qf_back;
+
+ list_for_each_entry_safe(qf_crypto, qf_back, &qel->rx.crypto_frms, list) {
+ const unsigned char *crypto_data = qf_crypto->data;
+ size_t crypto_len = qf_crypto->len;
+
+ /* Free this frame asap */
+ LIST_DELETE(&qf_crypto->list);
+ pool_free(pool_head_qf_crypto, qf_crypto);
+
+ if (!qc_ssl_provide_quic_data(&ncbuf, qel->level, ctx,
+ crypto_data, crypto_len))
+ goto leave;
+
+ TRACE_DEVEL("buffered crypto data were provided to TLS stack",
+ QUIC_EV_CONN_PHPKTS, qc, qel);
+ }
+
+ if (!qel->cstream)
+ continue;
+
+ if (!qc_treat_rx_crypto_frms(qc, qel, ctx))
+ goto leave;
+ }
+
+ ret = 1;
+ leave:
+ TRACE_LEAVE(QUIC_EV_CONN_PHPKTS, qc);
+ return ret;
+}
+
+/* Try to allocate the <*ssl> SSL session object for <qc> QUIC connection
+ * with <ssl_ctx> as SSL context inherited settings. Also set the transport
+ * parameters of this session.
+ * This is the responsibility of the caller to check the validity of all the
+ * pointers passed as parameter to this function.
+ * Return 0 if succeeded, -1 if not. If failed, sets the ->err_code member of <qc->conn> to
+ * CO_ER_SSL_NO_MEM.
+ */
+static int qc_ssl_sess_init(struct quic_conn *qc, SSL_CTX *ssl_ctx, SSL **ssl)
+{
+ int retry, ret = -1;
+
+ TRACE_ENTER(QUIC_EV_CONN_NEW, qc);
+
+ retry = 1;
+ retry:
+ *ssl = SSL_new(ssl_ctx);
+ if (!*ssl) {
+ if (!retry--)
+ goto leave;
+
+ pool_gc(NULL);
+ goto retry;
+ }
+
+ if (!SSL_set_ex_data(*ssl, ssl_qc_app_data_index, qc) ||
+ !SSL_set_quic_method(*ssl, &ha_quic_method)) {
+ SSL_free(*ssl);
+ *ssl = NULL;
+ if (!retry--)
+ goto leave;
+
+ pool_gc(NULL);
+ goto retry;
+ }
+
+ ret = 0;
+ leave:
+ TRACE_LEAVE(QUIC_EV_CONN_NEW, qc);
+ return ret;
+}
+
+/* Allocate the ssl_sock_ctx from connection <qc>. This creates the tasklet
+ * used to process <qc> received packets. The allocated context is stored in
+ * <qc.xprt_ctx>.
+ *
+ * Returns 0 on success else non-zero.
+ */
+int qc_alloc_ssl_sock_ctx(struct quic_conn *qc)
+{
+ int ret = 0;
+ struct bind_conf *bc = qc->li->bind_conf;
+ struct ssl_sock_ctx *ctx = NULL;
+
+ TRACE_ENTER(QUIC_EV_CONN_NEW, qc);
+
+ ctx = pool_alloc(pool_head_quic_ssl_sock_ctx);
+ if (!ctx) {
+ TRACE_ERROR("SSL context allocation failed", QUIC_EV_CONN_TXPKT);
+ goto err;
+ }
+
+ ctx->conn = NULL;
+ ctx->bio = NULL;
+ ctx->xprt = NULL;
+ ctx->xprt_ctx = NULL;
+ memset(&ctx->wait_event, 0, sizeof(ctx->wait_event));
+ ctx->subs = NULL;
+ ctx->xprt_st = 0;
+ ctx->error_code = 0;
+ ctx->early_buf = BUF_NULL;
+ ctx->sent_early_data = 0;
+ ctx->qc = qc;
+
+ if (qc_is_listener(qc)) {
+ if (qc_ssl_sess_init(qc, bc->initial_ctx, &ctx->ssl) == -1)
+ goto err;
+#if (HA_OPENSSL_VERSION_NUMBER >= 0x10101000L) && !defined(OPENSSL_IS_AWSLC)
+#ifndef USE_QUIC_OPENSSL_COMPAT
+ /* Enabling 0-RTT */
+ if (bc->ssl_conf.early_data)
+ SSL_set_quic_early_data_enabled(ctx->ssl, 1);
+#endif
+#endif
+
+ SSL_set_accept_state(ctx->ssl);
+ }
+
+ ctx->xprt = xprt_get(XPRT_QUIC);
+
+ /* Store the allocated context in <qc>. */
+ qc->xprt_ctx = ctx;
+
+ /* global.sslconns is already incremented on INITIAL packet parsing. */
+ _HA_ATOMIC_INC(&global.totalsslconns);
+
+ ret = 1;
+ leave:
+ TRACE_LEAVE(QUIC_EV_CONN_NEW, qc);
+ return !ret;
+
+ err:
+ pool_free(pool_head_quic_ssl_sock_ctx, ctx);
+ goto leave;
+}
+
+static void __quic_conn_init(void)
+{
+ ha_quic_meth = BIO_meth_new(0x666, "ha QUIC methods");
+}
+INITCALL0(STG_REGISTER, __quic_conn_init);
+
+static void __quic_conn_deinit(void)
+{
+ BIO_meth_free(ha_quic_meth);
+}
+REGISTER_POST_DEINIT(__quic_conn_deinit);
diff --git a/src/quic_stats.c b/src/quic_stats.c
new file mode 100644
index 0000000..3657f30
--- /dev/null
+++ b/src/quic_stats.c
@@ -0,0 +1,215 @@
+#include <haproxy/quic_frame-t.h>
+#include <haproxy/quic_stats-t.h>
+#include <haproxy/stats.h>
+
+static struct name_desc quic_stats[] = {
+ [QUIC_ST_RXBUF_FULL] = { .name = "quic_rxbuf_full",
+ .desc = "Total number of cancelled reception due to full receiver buffer" },
+ [QUIC_ST_DROPPED_PACKET] = { .name = "quic_dropped_pkt",
+ .desc = "Total number of dropped packets" },
+ [QUIC_ST_DROPPED_PACKET_BUFOVERRUN] = { .name = "quic_dropped_pkt_bufoverrun",
+ .desc = "Total number of dropped packets because of buffer overrun" },
+ [QUIC_ST_DROPPED_PARSING] = { .name = "quic_dropped_parsing_pkt",
+ .desc = "Total number of dropped packets upon parsing error" },
+ [QUIC_ST_SOCKET_FULL] = { .name = "quic_socket_full",
+ .desc = "Total number of EAGAIN error on sendto() calls" },
+ [QUIC_ST_SENDTO_ERR] = { .name = "quic_sendto_err",
+ .desc = "Total number of error on sendto() calls, EAGAIN excepted" },
+ [QUIC_ST_SENDTO_ERR_UNKNWN] = { .name = "quic_sendto_err_unknwn",
+ .desc = "Total number of error on sendto() calls not explicitly listed" },
+ [QUIC_ST_SENT_PACKET] = { .name = "quic_sent_pkt",
+ .desc = "Total number of sent packets" },
+ [QUIC_ST_LOST_PACKET] = { .name = "quic_lost_pkt",
+ .desc = "Total number of lost sent packets" },
+ [QUIC_ST_TOO_SHORT_INITIAL_DGRAM] = { .name = "quic_too_short_dgram",
+ .desc = "Total number of too short dgrams with Initial packets" },
+ [QUIC_ST_RETRY_SENT] = { .name = "quic_retry_sent",
+ .desc = "Total number of Retry sent" },
+ [QUIC_ST_RETRY_VALIDATED] = { .name = "quic_retry_validated",
+ .desc = "Total number of validated Retry tokens" },
+ [QUIC_ST_RETRY_ERRORS] = { .name = "quic_retry_error",
+ .desc = "Total number of Retry tokens errors" },
+ [QUIC_ST_HALF_OPEN_CONN] = { .name = "quic_half_open_conn",
+ .desc = "Total number of half open connections" },
+ [QUIC_ST_HDSHK_FAIL] = { .name = "quic_hdshk_fail",
+ .desc = "Total number of handshake failures" },
+ [QUIC_ST_STATELESS_RESET_SENT] = { .name = "quic_stless_rst_sent",
+ .desc = "Total number of stateless reset packet sent" },
+ /* Special events of interest */
+ [QUIC_ST_CONN_MIGRATION_DONE] = { .name = "quic_conn_migration_done",
+ .desc = "Total number of connection migration proceeded" },
+ /* Transport errors */
+ [QUIC_ST_TRANSP_ERR_NO_ERROR] = { .name = "quic_transp_err_no_error",
+ .desc = "Total number of NO_ERROR errors received" },
+ [QUIC_ST_TRANSP_ERR_INTERNAL_ERROR] = { .name = "quic_transp_err_internal_error",
+ .desc = "Total number of INTERNAL_ERROR errors received" },
+ [QUIC_ST_TRANSP_ERR_CONNECTION_REFUSED] = { .name = "quic_transp_err_connection_refused",
+ .desc = "Total number of CONNECTION_REFUSED errors received" },
+ [QUIC_ST_TRANSP_ERR_FLOW_CONTROL_ERROR] = { .name = "quic_transp_err_flow_control_error",
+ .desc = "Total number of FLOW_CONTROL_ERROR errors received" },
+ [QUIC_ST_TRANSP_ERR_STREAM_LIMIT_ERROR] = { .name = "quic_transp_err_stream_limit_error",
+ .desc = "Total number of STREAM_LIMIT_ERROR errors received" },
+ [QUIC_ST_TRANSP_ERR_STREAM_STATE_ERROR] = { .name = "quic_transp_err_stream_state_error",
+ .desc = "Total number of STREAM_STATE_ERROR errors received" },
+ [QUIC_ST_TRANSP_ERR_FINAL_SIZE_ERROR] = { .name = "quic_transp_err_final_size_error",
+ .desc = "Total number of FINAL_SIZE_ERROR errors received" },
+ [QUIC_ST_TRANSP_ERR_FRAME_ENCODING_ERROR] = { .name = "quic_transp_err_frame_encoding_error",
+ .desc = "Total number of FRAME_ENCODING_ERROR errors received" },
+ [QUIC_ST_TRANSP_ERR_TRANSPORT_PARAMETER_ERROR] = { .name = "quic_transp_err_transport_parameter_error",
+ .desc = "Total number of TRANSPORT_PARAMETER_ERROR errors received" },
+ [QUIC_ST_TRANSP_ERR_CONNECTION_ID_LIMIT_ERROR] = { .name = "quic_transp_err_connection_id_limit",
+ .desc = "Total number of CONNECTION_ID_LIMIT_ERROR errors received" },
+ [QUIC_ST_TRANSP_ERR_PROTOCOL_VIOLATION] = { .name = "quic_transp_err_protocol_violation_error",
+ .desc = "Total number of PROTOCOL_VIOLATION errors received" },
+ [QUIC_ST_TRANSP_ERR_INVALID_TOKEN] = { .name = "quic_transp_err_invalid_token",
+ .desc = "Total number of INVALID_TOKEN errors received" },
+ [QUIC_ST_TRANSP_ERR_APPLICATION_ERROR] = { .name = "quic_transp_err_application_error",
+ .desc = "Total number of APPLICATION_ERROR errors received" },
+ [QUIC_ST_TRANSP_ERR_CRYPTO_BUFFER_EXCEEDED] = { .name = "quic_transp_err_crypto_buffer_exceeded",
+ .desc = "Total number of CRYPTO_BUFFER_EXCEEDED errors received" },
+ [QUIC_ST_TRANSP_ERR_KEY_UPDATE_ERROR] = { .name = "quic_transp_err_key_update_error",
+ .desc = "Total number of KEY_UPDATE_ERROR errors received" },
+ [QUIC_ST_TRANSP_ERR_AEAD_LIMIT_REACHED] = { .name = "quic_transp_err_aead_limit_reached",
+ .desc = "Total number of AEAD_LIMIT_REACHED errors received" },
+ [QUIC_ST_TRANSP_ERR_NO_VIABLE_PATH] = { .name = "quic_transp_err_no_viable_path",
+ .desc = "Total number of NO_VIABLE_PATH errors received" },
+ [QUIC_ST_TRANSP_ERR_CRYPTO_ERROR] = { .name = "quic_transp_err_crypto_error",
+ .desc = "Total number of CRYPTO_ERROR errors received" },
+ [QUIC_ST_TRANSP_ERR_UNKNOWN_ERROR] = { .name = "quic_transp_err_unknown_error",
+ .desc = "Total number of UNKNOWN_ERROR errors received" },
+ /* Streams related counters */
+ [QUIC_ST_DATA_BLOCKED] = { .name = "quic_data_blocked",
+ .desc = "Total number of received DATA_BLOCKED frames" },
+ [QUIC_ST_STREAM_DATA_BLOCKED] = { .name = "quic_stream_data_blocked",
+ .desc = "Total number of received STREAM_DATA_BLOCKED frames" },
+ [QUIC_ST_STREAMS_BLOCKED_BIDI] = { .name = "quic_streams_blocked_bidi",
+ .desc = "Total number of received STREAMS_BLOCKED_BIDI frames" },
+ [QUIC_ST_STREAMS_BLOCKED_UNI] = { .name = "quic_streams_blocked_uni",
+ .desc = "Total number of received STREAMS_BLOCKED_UNI frames" },
+};
+
+struct quic_counters quic_counters;
+
+static void quic_fill_stats(void *data, struct field *stats)
+{
+ struct quic_counters *counters = data;
+
+ stats[QUIC_ST_RXBUF_FULL] = mkf_u64(FN_COUNTER, counters->rxbuf_full);
+ stats[QUIC_ST_DROPPED_PACKET] = mkf_u64(FN_COUNTER, counters->dropped_pkt);
+ stats[QUIC_ST_DROPPED_PACKET_BUFOVERRUN] = mkf_u64(FN_COUNTER, counters->dropped_pkt_bufoverrun);
+ stats[QUIC_ST_DROPPED_PARSING] = mkf_u64(FN_COUNTER, counters->dropped_parsing);
+ stats[QUIC_ST_SOCKET_FULL] = mkf_u64(FN_COUNTER, counters->socket_full);
+ stats[QUIC_ST_SENDTO_ERR] = mkf_u64(FN_COUNTER, counters->sendto_err);
+ stats[QUIC_ST_SENDTO_ERR_UNKNWN] = mkf_u64(FN_COUNTER, counters->sendto_err_unknown);
+ stats[QUIC_ST_SENT_PACKET] = mkf_u64(FN_COUNTER, counters->sent_pkt);
+ stats[QUIC_ST_LOST_PACKET] = mkf_u64(FN_COUNTER, counters->lost_pkt);
+ stats[QUIC_ST_TOO_SHORT_INITIAL_DGRAM] = mkf_u64(FN_COUNTER, counters->too_short_initial_dgram);
+ stats[QUIC_ST_RETRY_SENT] = mkf_u64(FN_COUNTER, counters->retry_sent);
+ stats[QUIC_ST_RETRY_VALIDATED] = mkf_u64(FN_COUNTER, counters->retry_validated);
+ stats[QUIC_ST_RETRY_ERRORS] = mkf_u64(FN_COUNTER, counters->retry_error);
+ stats[QUIC_ST_HALF_OPEN_CONN] = mkf_u64(FN_GAUGE, counters->half_open_conn);
+ stats[QUIC_ST_HDSHK_FAIL] = mkf_u64(FN_COUNTER, counters->hdshk_fail);
+ stats[QUIC_ST_STATELESS_RESET_SENT] = mkf_u64(FN_COUNTER, counters->stateless_reset_sent);
+ /* Special events of interest */
+ stats[QUIC_ST_CONN_MIGRATION_DONE] = mkf_u64(FN_COUNTER, counters->conn_migration_done);
+ /* Transport errors */
+ stats[QUIC_ST_TRANSP_ERR_NO_ERROR] = mkf_u64(FN_COUNTER, counters->quic_transp_err_no_error);
+ stats[QUIC_ST_TRANSP_ERR_INTERNAL_ERROR] = mkf_u64(FN_COUNTER, counters->quic_transp_err_internal_error);
+ stats[QUIC_ST_TRANSP_ERR_CONNECTION_REFUSED] = mkf_u64(FN_COUNTER, counters->quic_transp_err_connection_refused);
+ stats[QUIC_ST_TRANSP_ERR_FLOW_CONTROL_ERROR] = mkf_u64(FN_COUNTER, counters->quic_transp_err_flow_control_error);
+ stats[QUIC_ST_TRANSP_ERR_STREAM_LIMIT_ERROR] = mkf_u64(FN_COUNTER, counters->quic_transp_err_stream_limit_error);
+ stats[QUIC_ST_TRANSP_ERR_STREAM_STATE_ERROR] = mkf_u64(FN_COUNTER, counters->quic_transp_err_stream_state_error);
+ stats[QUIC_ST_TRANSP_ERR_FINAL_SIZE_ERROR] = mkf_u64(FN_COUNTER, counters->quic_transp_err_final_size_error);
+ stats[QUIC_ST_TRANSP_ERR_FRAME_ENCODING_ERROR] = mkf_u64(FN_COUNTER, counters->quic_transp_err_frame_encoding_error);
+ stats[QUIC_ST_TRANSP_ERR_TRANSPORT_PARAMETER_ERROR] = mkf_u64(FN_COUNTER, counters->quic_transp_err_transport_parameter_error);
+ stats[QUIC_ST_TRANSP_ERR_CONNECTION_ID_LIMIT_ERROR] = mkf_u64(FN_COUNTER, counters->quic_transp_err_connection_id_limit);
+ stats[QUIC_ST_TRANSP_ERR_PROTOCOL_VIOLATION] = mkf_u64(FN_COUNTER, counters->quic_transp_err_protocol_violation);
+ stats[QUIC_ST_TRANSP_ERR_INVALID_TOKEN] = mkf_u64(FN_COUNTER, counters->quic_transp_err_invalid_token);
+ stats[QUIC_ST_TRANSP_ERR_APPLICATION_ERROR] = mkf_u64(FN_COUNTER, counters->quic_transp_err_application_error);
+ stats[QUIC_ST_TRANSP_ERR_CRYPTO_BUFFER_EXCEEDED] = mkf_u64(FN_COUNTER, counters->quic_transp_err_crypto_buffer_exceeded);
+ stats[QUIC_ST_TRANSP_ERR_KEY_UPDATE_ERROR] = mkf_u64(FN_COUNTER, counters->quic_transp_err_key_update_error);
+ stats[QUIC_ST_TRANSP_ERR_AEAD_LIMIT_REACHED] = mkf_u64(FN_COUNTER, counters->quic_transp_err_aead_limit_reached);
+ stats[QUIC_ST_TRANSP_ERR_NO_VIABLE_PATH] = mkf_u64(FN_COUNTER, counters->quic_transp_err_no_viable_path);
+ stats[QUIC_ST_TRANSP_ERR_CRYPTO_ERROR] = mkf_u64(FN_COUNTER, counters->quic_transp_err_crypto_error);
+ stats[QUIC_ST_TRANSP_ERR_UNKNOWN_ERROR] = mkf_u64(FN_COUNTER, counters->quic_transp_err_unknown_error);
+ /* Streams related counters */
+ stats[QUIC_ST_DATA_BLOCKED] = mkf_u64(FN_COUNTER, counters->data_blocked);
+ stats[QUIC_ST_STREAM_DATA_BLOCKED] = mkf_u64(FN_COUNTER, counters->stream_data_blocked);
+ stats[QUIC_ST_STREAMS_BLOCKED_BIDI] = mkf_u64(FN_COUNTER, counters->streams_blocked_bidi);
+ stats[QUIC_ST_STREAMS_BLOCKED_UNI] = mkf_u64(FN_COUNTER, counters->streams_blocked_uni);
+}
+
+struct stats_module quic_stats_module = {
+ .name = "quic",
+ .fill_stats = quic_fill_stats,
+ .stats = quic_stats,
+ .stats_count = QUIC_STATS_COUNT,
+ .counters = &quic_counters,
+ .counters_size = sizeof(quic_counters),
+ .domain_flags = MK_STATS_PROXY_DOMAIN(STATS_PX_CAP_FE),
+ .clearable = 1,
+};
+
+INITCALL1(STG_REGISTER, stats_register_module, &quic_stats_module);
+
+void quic_stats_transp_err_count_inc(struct quic_counters *ctrs, int error_code)
+{
+ switch (error_code) {
+ case QC_ERR_NO_ERROR:
+ HA_ATOMIC_INC(&ctrs->quic_transp_err_no_error);
+ break;
+ case QC_ERR_INTERNAL_ERROR:
+ HA_ATOMIC_INC(&ctrs->quic_transp_err_internal_error);
+ break;
+ case QC_ERR_CONNECTION_REFUSED:
+ HA_ATOMIC_INC(&ctrs->quic_transp_err_connection_refused);
+ break;
+ case QC_ERR_FLOW_CONTROL_ERROR:
+ HA_ATOMIC_INC(&ctrs->quic_transp_err_flow_control_error);
+ break;
+ case QC_ERR_STREAM_LIMIT_ERROR:
+ HA_ATOMIC_INC(&ctrs->quic_transp_err_stream_limit_error);
+ break;
+ case QC_ERR_STREAM_STATE_ERROR:
+ HA_ATOMIC_INC(&ctrs->quic_transp_err_stream_state_error);
+ break;
+ case QC_ERR_FINAL_SIZE_ERROR:
+ HA_ATOMIC_INC(&ctrs->quic_transp_err_final_size_error);
+ break;
+ case QC_ERR_FRAME_ENCODING_ERROR:
+ HA_ATOMIC_INC(&ctrs->quic_transp_err_frame_encoding_error);
+ break;
+ case QC_ERR_TRANSPORT_PARAMETER_ERROR:
+ HA_ATOMIC_INC(&ctrs->quic_transp_err_transport_parameter_error);
+ break;
+ case QC_ERR_CONNECTION_ID_LIMIT_ERROR:
+ HA_ATOMIC_INC(&ctrs->quic_transp_err_connection_id_limit);
+ break;
+ case QC_ERR_PROTOCOL_VIOLATION:
+ HA_ATOMIC_INC(&ctrs->quic_transp_err_protocol_violation);
+ break;
+ case QC_ERR_INVALID_TOKEN:
+ HA_ATOMIC_INC(&ctrs->quic_transp_err_invalid_token);
+ break;
+ case QC_ERR_APPLICATION_ERROR:
+ HA_ATOMIC_INC(&ctrs->quic_transp_err_application_error);
+ break;
+ case QC_ERR_CRYPTO_BUFFER_EXCEEDED:
+ HA_ATOMIC_INC(&ctrs->quic_transp_err_crypto_buffer_exceeded);
+ break;
+ case QC_ERR_KEY_UPDATE_ERROR:
+ HA_ATOMIC_INC(&ctrs->quic_transp_err_key_update_error);
+ break;
+ case QC_ERR_AEAD_LIMIT_REACHED:
+ HA_ATOMIC_INC(&ctrs->quic_transp_err_aead_limit_reached);
+ break;
+ case QC_ERR_NO_VIABLE_PATH:
+ HA_ATOMIC_INC(&ctrs->quic_transp_err_no_viable_path);
+ break;
+ default:
+ if (error_code >= 0x100 && error_code <= 0x1ff)
+ HA_ATOMIC_INC(&ctrs->quic_transp_err_crypto_error);
+ else
+ HA_ATOMIC_INC(&ctrs->quic_transp_err_unknown_error);
+ }
+}
diff --git a/src/quic_stream.c b/src/quic_stream.c
new file mode 100644
index 0000000..a4b984d
--- /dev/null
+++ b/src/quic_stream.c
@@ -0,0 +1,294 @@
+#include <haproxy/quic_stream.h>
+
+#include <import/eb64tree.h>
+
+#include <haproxy/api.h>
+#include <haproxy/buf.h>
+#include <haproxy/dynbuf.h>
+#include <haproxy/list.h>
+#include <haproxy/mux_quic-t.h>
+#include <haproxy/pool.h>
+#include <haproxy/quic_conn.h>
+#include <haproxy/task.h>
+
+DECLARE_STATIC_POOL(pool_head_quic_stream_desc, "qc_stream_desc",
+ sizeof(struct qc_stream_desc));
+DECLARE_STATIC_POOL(pool_head_quic_stream_buf, "qc_stream_buf",
+ sizeof(struct qc_stream_buf));
+
+
+static void qc_stream_buf_free(struct qc_stream_desc *stream,
+ struct qc_stream_buf **stream_buf)
+{
+ struct quic_conn *qc = stream->qc;
+ struct buffer *buf = &(*stream_buf)->buf;
+
+ LIST_DEL_INIT(&(*stream_buf)->list);
+
+ /* Reset current buf ptr if deleted instance is the same one. */
+ if (*stream_buf == stream->buf)
+ stream->buf = NULL;
+
+ b_free(buf);
+ offer_buffers(NULL, 1);
+ pool_free(pool_head_quic_stream_buf, *stream_buf);
+ *stream_buf = NULL;
+
+ /* notify MUX about available buffers. */
+ --qc->stream_buf_count;
+ if (qc->mux_state == QC_MUX_READY) {
+ if (qc->qcc->flags & QC_CF_CONN_FULL) {
+ qc->qcc->flags &= ~QC_CF_CONN_FULL;
+ tasklet_wakeup(qc->qcc->wait_event.tasklet);
+ }
+ }
+}
+
+/* Allocate a new stream descriptor with id <id>. The caller is responsible to
+ * store the stream in the appropriate tree. -1 special value must be used for
+ * a CRYPTO data stream, the type being ignored.
+ *
+ * Returns the newly allocated instance on success or else NULL.
+ */
+struct qc_stream_desc *qc_stream_desc_new(uint64_t id, enum qcs_type type, void *ctx,
+ struct quic_conn *qc)
+{
+ struct qc_stream_desc *stream;
+
+ stream = pool_alloc(pool_head_quic_stream_desc);
+ if (!stream)
+ return NULL;
+
+ if (id == (uint64_t)-1) {
+ stream->by_id.key = (uint64_t)-1;
+ }
+ else {
+ stream->by_id.key = id;
+ eb64_insert(&qc->streams_by_id, &stream->by_id);
+ qc->rx.strms[type].nb_streams++;
+ }
+ stream->qc = qc;
+
+ stream->buf = NULL;
+ LIST_INIT(&stream->buf_list);
+ stream->buf_offset = 0;
+
+ stream->acked_frms = EB_ROOT;
+ stream->ack_offset = 0;
+ stream->release = 0;
+ stream->ctx = ctx;
+
+ return stream;
+}
+
+/* Mark the stream descriptor <stream> as released. It will be freed as soon as
+ * all its buffered data are acknowledged. Does nothing if <stream> is already
+ * NULL.
+ *
+ * <final_size> corresponds to the last offset sent for this stream. If there
+ * is unsent data present, they will be remove first to guarantee that buffer
+ * is freed after receiving all acknowledges.
+ */
+void qc_stream_desc_release(struct qc_stream_desc *stream,
+ uint64_t final_size)
+{
+ if (!stream)
+ return;
+
+ /* A stream can be released only one time. */
+ BUG_ON(stream->release);
+
+ stream->release = 1;
+ stream->ctx = NULL;
+
+ if (stream->buf) {
+ struct qc_stream_buf *stream_buf = stream->buf;
+ struct buffer *buf = &stream_buf->buf;
+ const uint64_t tail_offset =
+ MAX(stream->buf_offset, stream->ack_offset) + b_data(buf);
+
+ /* final_size cannot be greater than all currently stored data. */
+ BUG_ON(final_size > tail_offset);
+
+ /* Remove unsent data from current buffer. */
+ if (final_size < tail_offset) {
+ b_sub(buf, tail_offset - final_size);
+ /* Remove buffer is all ACK already received. */
+ if (!b_data(buf))
+ qc_stream_buf_free(stream, &stream_buf);
+ }
+
+ /* A released stream does not use <stream.buf>. */
+ stream->buf = NULL;
+ }
+
+ if (LIST_ISEMPTY(&stream->buf_list)) {
+ /* if no buffer left we can free the stream. */
+ qc_stream_desc_free(stream, 0);
+ }
+}
+
+/* Acknowledge data at <offset> of length <len> for <stream>. It is handled
+ * only if it covers a range corresponding to stream.ack_offset. After data
+ * removal, if the stream does not contains data any more and is already
+ * released, the instance stream is freed. <stream> is set to NULL to indicate
+ * this.
+ *
+ * Returns the count of byte removed from stream. Do not forget to check if
+ * <stream> is NULL after invocation.
+ */
+int qc_stream_desc_ack(struct qc_stream_desc **stream, size_t offset, size_t len)
+{
+ struct qc_stream_desc *s = *stream;
+ struct qc_stream_buf *stream_buf;
+ struct buffer *buf;
+ size_t diff;
+
+ if (offset + len <= s->ack_offset || offset > s->ack_offset)
+ return 0;
+
+ /* There must be at least a buffer or we must not report an ACK. */
+ BUG_ON(LIST_ISEMPTY(&s->buf_list));
+
+ /* get oldest buffer from buf_list */
+ stream_buf = LIST_NEXT(&s->buf_list, struct qc_stream_buf *, list);
+ buf = &stream_buf->buf;
+
+ diff = offset + len - s->ack_offset;
+ s->ack_offset += diff;
+ b_del(buf, diff);
+
+ /* Free oldest buffer if all data acknowledged. */
+ if (!b_data(buf)) {
+ qc_stream_buf_free(s, &stream_buf);
+
+ /* Free stream instance if already released and no buffers left. */
+ if (s->release && LIST_ISEMPTY(&s->buf_list)) {
+ qc_stream_desc_free(s, 0);
+ *stream = NULL;
+ }
+ }
+
+ return diff;
+}
+
+/* Free the stream descriptor <stream> content. This function should be used
+ * when all its data have been acknowledged or on full connection closing if <closing>
+ * boolean is set to 1. It must only be called after the stream is released.
+ */
+void qc_stream_desc_free(struct qc_stream_desc *stream, int closing)
+{
+ struct qc_stream_buf *buf, *buf_back;
+ struct quic_conn *qc = stream->qc;
+ struct eb64_node *frm_node;
+ unsigned int free_count = 0;
+
+ /* This function only deals with released streams. */
+ BUG_ON(!stream->release);
+
+ /* free remaining stream buffers */
+ list_for_each_entry_safe(buf, buf_back, &stream->buf_list, list) {
+ if (!(b_data(&buf->buf)) || closing) {
+ b_free(&buf->buf);
+ LIST_DELETE(&buf->list);
+ pool_free(pool_head_quic_stream_buf, buf);
+
+ ++free_count;
+ }
+ }
+
+ if (free_count) {
+ offer_buffers(NULL, free_count);
+
+ qc->stream_buf_count -= free_count;
+ if (qc->mux_state == QC_MUX_READY) {
+ /* notify MUX about available buffers. */
+ if (qc->qcc->flags & QC_CF_CONN_FULL) {
+ qc->qcc->flags &= ~QC_CF_CONN_FULL;
+ tasklet_wakeup(qc->qcc->wait_event.tasklet);
+ }
+ }
+ }
+
+ /* qc_stream_desc might be freed before having received all its ACKs.
+ * This is the case if some frames were retransmitted.
+ */
+ frm_node = eb64_first(&stream->acked_frms);
+ while (frm_node) {
+ struct qf_stream *strm_frm;
+ struct quic_frame *frm;
+
+ strm_frm = eb64_entry(frm_node, struct qf_stream, offset);
+
+ frm_node = eb64_next(frm_node);
+ eb64_delete(&strm_frm->offset);
+
+ frm = container_of(strm_frm, struct quic_frame, stream);
+ qc_release_frm(qc, frm);
+ }
+
+ if (stream->by_id.key != (uint64_t)-1)
+ eb64_delete(&stream->by_id);
+ pool_free(pool_head_quic_stream_desc, stream);
+}
+
+/* Return the current buffer of <stream>. May be NULL if not allocated. */
+struct buffer *qc_stream_buf_get(struct qc_stream_desc *stream)
+{
+ if (!stream->buf)
+ return NULL;
+
+ return &stream->buf->buf;
+}
+
+/* Returns the count of available buffer left for <qc>. */
+static int qc_stream_buf_avail(struct quic_conn *qc)
+{
+ BUG_ON(qc->stream_buf_count > global.tune.quic_streams_buf);
+ return global.tune.quic_streams_buf - qc->stream_buf_count;
+}
+
+/* Allocate a new current buffer for <stream>. The buffer limit count for the
+ * connection is checked first. This function is not allowed if current buffer
+ * is not NULL prior to this call. The new buffer represents stream payload at
+ * offset <offset>.
+ *
+ * Returns the buffer or NULL on error. Caller may check <avail> to ensure if
+ * the connection buffer limit was reached or a fatal error was encountered.
+ */
+struct buffer *qc_stream_buf_alloc(struct qc_stream_desc *stream,
+ uint64_t offset, int *avail)
+{
+ struct quic_conn *qc = stream->qc;
+
+ /* current buffer must be released first before allocate a new one. */
+ BUG_ON(stream->buf);
+
+ *avail = qc_stream_buf_avail(qc);
+ if (!*avail)
+ return NULL;
+
+ stream->buf_offset = offset;
+ stream->buf = pool_alloc(pool_head_quic_stream_buf);
+ if (!stream->buf)
+ return NULL;
+
+ ++qc->stream_buf_count;
+
+ stream->buf->buf = BUF_NULL;
+ LIST_APPEND(&stream->buf_list, &stream->buf->list);
+
+ return &stream->buf->buf;
+}
+
+/* Release the current buffer of <stream>. It will be kept internally by
+ * the <stream>. The current buffer cannot be NULL.
+ */
+void qc_stream_buf_release(struct qc_stream_desc *stream)
+{
+ /* current buffer already released */
+ BUG_ON(!stream->buf);
+
+ stream->buf = NULL;
+ stream->buf_offset = 0;
+}
diff --git a/src/quic_tls.c b/src/quic_tls.c
new file mode 100644
index 0000000..581d615
--- /dev/null
+++ b/src/quic_tls.c
@@ -0,0 +1,1095 @@
+#include <haproxy/quic_tls.h>
+
+#include <string.h>
+
+#include <openssl/evp.h>
+#include <openssl/kdf.h>
+#include <openssl/ssl.h>
+
+#include <haproxy/buf.h>
+#include <haproxy/chunk.h>
+#include <haproxy/pool.h>
+#include <haproxy/quic_ack.h>
+#include <haproxy/quic_conn.h>
+#include <haproxy/quic_rx.h>
+#include <haproxy/quic_stream.h>
+
+
+DECLARE_POOL(pool_head_quic_enc_level, "quic_enc_level", sizeof(struct quic_enc_level));
+DECLARE_POOL(pool_head_quic_pktns, "quic_pktns", sizeof(struct quic_pktns));
+DECLARE_POOL(pool_head_quic_tls_ctx, "quic_tls_ctx", sizeof(struct quic_tls_ctx));
+DECLARE_POOL(pool_head_quic_tls_secret, "quic_tls_secret", QUIC_TLS_SECRET_LEN);
+DECLARE_POOL(pool_head_quic_tls_iv, "quic_tls_iv", QUIC_TLS_IV_LEN);
+DECLARE_POOL(pool_head_quic_tls_key, "quic_tls_key", QUIC_TLS_KEY_LEN);
+
+DECLARE_POOL(pool_head_quic_crypto_buf, "quic_crypto_buf", sizeof(struct quic_crypto_buf));
+DECLARE_STATIC_POOL(pool_head_quic_cstream, "quic_cstream", sizeof(struct quic_cstream));
+
+/* Initial salt depending on QUIC version to derive client/server initial secrets.
+ * This one is for draft-29 QUIC version.
+ */
+const unsigned char initial_salt_draft_29[20] = {
+ 0xaf, 0xbf, 0xec, 0x28, 0x99, 0x93, 0xd2, 0x4c,
+ 0x9e, 0x97, 0x86, 0xf1, 0x9c, 0x61, 0x11, 0xe0,
+ 0x43, 0x90, 0xa8, 0x99
+};
+
+const unsigned char initial_salt_v1[20] = {
+ 0x38, 0x76, 0x2c, 0xf7, 0xf5, 0x59, 0x34, 0xb3,
+ 0x4d, 0x17, 0x9a, 0xe6, 0xa4, 0xc8, 0x0c, 0xad,
+ 0xcc, 0xbb, 0x7f, 0x0a
+};
+
+const unsigned char initial_salt_v2[20] = {
+ 0x0d, 0xed, 0xe3, 0xde, 0xf7, 0x00, 0xa6, 0xdb,
+ 0x81, 0x93, 0x81, 0xbe, 0x6e, 0x26, 0x9d, 0xcb,
+ 0xf9, 0xbd, 0x2e, 0xd9
+};
+
+/* Dump the RX/TX secrets of <secs> QUIC TLS secrets. */
+void quic_tls_keys_hexdump(struct buffer *buf,
+ const struct quic_tls_secrets *secs)
+{
+ int i;
+ size_t aead_keylen;
+ size_t aead_ivlen;
+ size_t hp_len;
+
+ if (!secs->aead || !secs->hp)
+ return;
+
+ aead_keylen = (size_t)EVP_CIPHER_key_length(secs->aead);
+ aead_ivlen = (size_t)EVP_CIPHER_iv_length(secs->aead);
+ hp_len = (size_t)EVP_CIPHER_key_length(secs->hp);
+
+ chunk_appendf(buf, "\n key=");
+ for (i = 0; i < aead_keylen; i++)
+ chunk_appendf(buf, "%02x", secs->key[i]);
+ chunk_appendf(buf, "\n iv=");
+ for (i = 0; i < aead_ivlen; i++)
+ chunk_appendf(buf, "%02x", secs->iv[i]);
+ chunk_appendf(buf, "\n hp=");
+ for (i = 0; i < hp_len; i++)
+ chunk_appendf(buf, "%02x", secs->hp_key[i]);
+}
+
+/* Dump the RX/TX secrets of <kp> QUIC TLS key phase */
+void quic_tls_kp_keys_hexdump(struct buffer *buf,
+ const struct quic_tls_kp *kp)
+{
+ int i;
+
+ chunk_appendf(buf, "\n secret=");
+ for (i = 0; i < kp->secretlen; i++)
+ chunk_appendf(buf, "%02x", kp->secret[i]);
+ chunk_appendf(buf, "\n key=");
+ for (i = 0; i < kp->keylen; i++)
+ chunk_appendf(buf, "%02x", kp->key[i]);
+ chunk_appendf(buf, "\n iv=");
+ for (i = 0; i < kp->ivlen; i++)
+ chunk_appendf(buf, "%02x", kp->iv[i]);
+}
+
+/* Release the memory of <pktns> packet number space attached to <qc> QUIC connection. */
+void quic_pktns_release(struct quic_conn *qc, struct quic_pktns **pktns)
+{
+ if (!*pktns)
+ return;
+
+ quic_pktns_tx_pkts_release(*pktns, qc);
+ qc_release_pktns_frms(qc, *pktns);
+ quic_free_arngs(qc, &(*pktns)->rx.arngs);
+ LIST_DEL_INIT(&(*pktns)->list);
+ pool_free(pool_head_quic_pktns, *pktns);
+ *pktns = NULL;
+}
+
+/* Dump <secret> TLS secret. */
+void quic_tls_secret_hexdump(struct buffer *buf,
+ const unsigned char *secret, size_t secret_len)
+{
+ int i;
+
+ chunk_appendf(buf, " secret=");
+ for (i = 0; i < secret_len; i++)
+ chunk_appendf(buf, "%02x", secret[i]);
+}
+
+/* Release the memory allocated for <cs> CRYPTO stream */
+void quic_cstream_free(struct quic_cstream *cs)
+{
+ if (!cs) {
+ /* This is the case for ORTT encryption level */
+ return;
+ }
+
+ quic_free_ncbuf(&cs->rx.ncbuf);
+
+ qc_stream_desc_release(cs->desc, 0);
+ pool_free(pool_head_quic_cstream, cs);
+}
+
+/* Allocate a new QUIC stream for <qc>.
+ * Return it if succeeded, NULL if not.
+ */
+struct quic_cstream *quic_cstream_new(struct quic_conn *qc)
+{
+ struct quic_cstream *cs, *ret_cs = NULL;
+
+ TRACE_ENTER(QUIC_EV_CONN_LPKT, qc);
+ cs = pool_alloc(pool_head_quic_cstream);
+ if (!cs) {
+ TRACE_ERROR("crypto stream allocation failed", QUIC_EV_CONN_INIT, qc);
+ goto leave;
+ }
+
+ cs->rx.offset = 0;
+ cs->rx.ncbuf = NCBUF_NULL;
+ cs->rx.offset = 0;
+
+ cs->tx.offset = 0;
+ cs->tx.sent_offset = 0;
+ cs->tx.buf = BUF_NULL;
+ cs->desc = qc_stream_desc_new((uint64_t)-1, -1, cs, qc);
+ if (!cs->desc) {
+ TRACE_ERROR("crypto stream allocation failed", QUIC_EV_CONN_INIT, qc);
+ goto err;
+ }
+
+ ret_cs = cs;
+ leave:
+ TRACE_LEAVE(QUIC_EV_CONN_LPKT, qc);
+ return ret_cs;
+
+ err:
+ pool_free(pool_head_quic_cstream, cs);
+ goto leave;
+}
+
+/* Uninitialize <qel> QUIC encryption level. Never fails. */
+void quic_conn_enc_level_uninit(struct quic_conn *qc, struct quic_enc_level *qel)
+{
+ int i;
+ struct qf_crypto *qf_crypto, *qfback;
+
+ TRACE_ENTER(QUIC_EV_CONN_CLOSE, qc);
+
+ for (i = 0; i < qel->tx.crypto.nb_buf; i++) {
+ if (qel->tx.crypto.bufs[i]) {
+ pool_free(pool_head_quic_crypto_buf, qel->tx.crypto.bufs[i]);
+ qel->tx.crypto.bufs[i] = NULL;
+ }
+ }
+
+ list_for_each_entry_safe(qf_crypto, qfback, &qel->rx.crypto_frms, list) {
+ LIST_DELETE(&qf_crypto->list);
+ pool_free(pool_head_qf_crypto, qf_crypto);
+ }
+
+ ha_free(&qel->tx.crypto.bufs);
+ quic_cstream_free(qel->cstream);
+
+ TRACE_LEAVE(QUIC_EV_CONN_CLOSE, qc);
+}
+
+/* Initialize QUIC TLS encryption level with <level<> as level for <qc> QUIC
+ * connection allocating everything needed.
+ *
+ * Returns 1 if succeeded, 0 if not. On error the caller is responsible to use
+ * quic_conn_enc_level_uninit() to cleanup partially allocated content.
+ */
+static int quic_conn_enc_level_init(struct quic_conn *qc,
+ struct quic_enc_level **el,
+ struct quic_pktns *pktns,
+ enum ssl_encryption_level_t level)
+{
+ int ret = 0;
+ struct quic_enc_level *qel;
+
+ TRACE_ENTER(QUIC_EV_CONN_CLOSE, qc);
+
+ qel = pool_alloc(pool_head_quic_enc_level);
+ if (!qel)
+ goto leave;
+
+ LIST_INIT(&qel->retrans);
+ qel->retrans_frms = NULL;
+ qel->tx.crypto.bufs = NULL;
+ qel->tx.crypto.nb_buf = 0;
+ qel->cstream = NULL;
+ qel->pktns = pktns;
+ qel->level = level;
+ quic_tls_ctx_reset(&qel->tls_ctx);
+
+ qel->rx.pkts = EB_ROOT;
+ LIST_INIT(&qel->rx.pqpkts);
+ LIST_INIT(&qel->rx.crypto_frms);
+
+ /* Allocate only one buffer. */
+ /* TODO: use a pool */
+ qel->tx.crypto.bufs = malloc(sizeof *qel->tx.crypto.bufs);
+ if (!qel->tx.crypto.bufs)
+ goto err;
+
+ qel->tx.crypto.bufs[0] = pool_alloc(pool_head_quic_crypto_buf);
+ if (!qel->tx.crypto.bufs[0])
+ goto err;
+
+
+ qel->tx.crypto.bufs[0]->sz = 0;
+ qel->tx.crypto.nb_buf = 1;
+
+ qel->tx.crypto.sz = 0;
+ qel->tx.crypto.offset = 0;
+ /* No CRYPTO data for early data TLS encryption level */
+ if (level == ssl_encryption_early_data)
+ qel->cstream = NULL;
+ else {
+ qel->cstream = quic_cstream_new(qc);
+ if (!qel->cstream)
+ goto err;
+ }
+
+ LIST_APPEND(&qc->qel_list, &qel->list);
+ *el = qel;
+ ret = 1;
+ leave:
+ TRACE_LEAVE(QUIC_EV_CONN_CLOSE, qc);
+ return ret;
+
+ err:
+ quic_conn_enc_level_uninit(qc, qel);
+ pool_free(pool_head_quic_enc_level, qel);
+ goto leave;
+}
+
+/* Allocate a QUIC TLS encryption with <level> as TLS stack encryption to be
+ * attached to <qc> QUIC connection. Also allocate the associated packet number
+ * space object with <pktns> as address to be attached to <qc> if not already
+ * allocated.
+ * Return 1 if succeeded, 0 if not.
+ */
+int qc_enc_level_alloc(struct quic_conn *qc, struct quic_pktns **pktns,
+ struct quic_enc_level **qel, enum ssl_encryption_level_t level)
+{
+ int ret = 0;
+
+ BUG_ON(!qel || !pktns);
+ BUG_ON(*qel && !*pktns);
+
+ if (!*pktns && !quic_pktns_init(qc, pktns))
+ goto leave;
+
+ if (!*qel && !quic_conn_enc_level_init(qc, qel, *pktns, level))
+ goto leave;
+
+ ret = 1;
+ leave:
+ return ret;
+}
+
+/* Free the memory allocated to the encryption level attached to <qc> connection
+ * with <qel> as pointer address. Also remove it from the list of the encryption
+ * levels attached to this connection and reset its value to NULL.
+ * Never fails.
+ */
+void qc_enc_level_free(struct quic_conn *qc, struct quic_enc_level **qel)
+{
+ if (!*qel)
+ return;
+
+ quic_tls_ctx_secs_free(&(*qel)->tls_ctx);
+ quic_conn_enc_level_uninit(qc, *qel);
+ LIST_DEL_INIT(&(*qel)->list);
+ pool_free(pool_head_quic_enc_level, *qel);
+ *qel = NULL;
+}
+
+int quic_hkdf_extract(const EVP_MD *md,
+ unsigned char *buf, size_t buflen,
+ const unsigned char *key, size_t keylen,
+ const unsigned char *salt, size_t saltlen)
+{
+ EVP_PKEY_CTX *ctx;
+
+ ctx = EVP_PKEY_CTX_new_id(EVP_PKEY_HKDF, NULL);
+ if (!ctx)
+ return 0;
+
+ if (EVP_PKEY_derive_init(ctx) <= 0 ||
+ EVP_PKEY_CTX_hkdf_mode(ctx, EVP_PKEY_HKDEF_MODE_EXTRACT_ONLY) <= 0 ||
+ EVP_PKEY_CTX_set_hkdf_md(ctx, md) <= 0 ||
+ EVP_PKEY_CTX_set1_hkdf_salt(ctx, salt, saltlen) <= 0 ||
+ EVP_PKEY_CTX_set1_hkdf_key(ctx, key, keylen) <= 0 ||
+ EVP_PKEY_derive(ctx, buf, &buflen) <= 0)
+ goto err;
+
+ EVP_PKEY_CTX_free(ctx);
+ return 1;
+
+ err:
+ EVP_PKEY_CTX_free(ctx);
+ return 0;
+}
+
+int quic_hkdf_expand(const EVP_MD *md,
+ unsigned char *buf, size_t buflen,
+ const unsigned char *key, size_t keylen,
+ const unsigned char *label, size_t labellen)
+{
+ EVP_PKEY_CTX *ctx;
+
+ ctx = EVP_PKEY_CTX_new_id(EVP_PKEY_HKDF, NULL);
+ if (!ctx)
+ return 0;
+
+ if (EVP_PKEY_derive_init(ctx) <= 0 ||
+ EVP_PKEY_CTX_hkdf_mode(ctx, EVP_PKEY_HKDEF_MODE_EXPAND_ONLY) <= 0 ||
+ EVP_PKEY_CTX_set_hkdf_md(ctx, md) <= 0 ||
+ EVP_PKEY_CTX_set1_hkdf_key(ctx, key, keylen) <= 0 ||
+ EVP_PKEY_CTX_add1_hkdf_info(ctx, label, labellen) <= 0 ||
+ EVP_PKEY_derive(ctx, buf, &buflen) <= 0)
+ goto err;
+
+ EVP_PKEY_CTX_free(ctx);
+ return 1;
+
+ err:
+ EVP_PKEY_CTX_free(ctx);
+ return 0;
+}
+
+/* Extracts a peudo-random secret key from <key> which is eventually not
+ * pseudo-random and expand it to a new pseudo-random key into
+ * <buf> with <buflen> as key length according to HKDF specifications
+ * (https://datatracker.ietf.org/doc/html/rfc5869).
+ * According to this specifications it is highly recommended to use
+ * a salt, even if optional (NULL value).
+ * Return 1 if succeeded, 0 if not.
+ */
+int quic_hkdf_extract_and_expand(const EVP_MD *md,
+ unsigned char *buf, size_t buflen,
+ const unsigned char *key, size_t keylen,
+ const unsigned char *salt, size_t saltlen,
+ const unsigned char *label, size_t labellen)
+{
+ EVP_PKEY_CTX *ctx;
+
+ ctx = EVP_PKEY_CTX_new_id(EVP_PKEY_HKDF, NULL);
+ if (!ctx)
+ return 0;
+
+ if (EVP_PKEY_derive_init(ctx) <= 0 ||
+ EVP_PKEY_CTX_hkdf_mode(ctx, EVP_PKEY_HKDEF_MODE_EXTRACT_AND_EXPAND) <= 0 ||
+ EVP_PKEY_CTX_set_hkdf_md(ctx, md) <= 0 ||
+ EVP_PKEY_CTX_set1_hkdf_salt(ctx, salt, saltlen) <= 0 ||
+ EVP_PKEY_CTX_set1_hkdf_key(ctx, key, keylen) <= 0 ||
+ EVP_PKEY_CTX_add1_hkdf_info(ctx, label, labellen) <= 0 ||
+ EVP_PKEY_derive(ctx, buf, &buflen) <= 0)
+ goto err;
+
+ EVP_PKEY_CTX_free(ctx);
+ return 1;
+
+ err:
+ EVP_PKEY_CTX_free(ctx);
+ return 0;
+}
+
+/* https://quicwg.org/base-drafts/draft-ietf-quic-tls.html#protection-keys
+ * refers to:
+ *
+ * https://tools.ietf.org/html/rfc8446#section-7.1:
+ * 7.1. Key Schedule
+ *
+ * The key derivation process makes use of the HKDF-Extract and
+ * HKDF-Expand functions as defined for HKDF [RFC5869], as well as the
+ * functions defined below:
+ *
+ * HKDF-Expand-Label(Secret, Label, Context, Length) =
+ * HKDF-Expand(Secret, HkdfLabel, Length)
+ *
+ * Where HkdfLabel is specified as:
+ *
+ * struct {
+ * uint16 length = Length;
+ * opaque label<7..255> = "tls13 " + Label;
+ * opaque context<0..255> = Context;
+ * } HkdfLabel;
+ *
+ * Derive-Secret(Secret, Label, Messages) =
+ * HKDF-Expand-Label(Secret, Label,
+ * Transcript-Hash(Messages), Hash.length)
+ *
+ */
+int quic_hkdf_expand_label(const EVP_MD *md,
+ unsigned char *buf, size_t buflen,
+ const unsigned char *key, size_t keylen,
+ const unsigned char *label, size_t labellen)
+{
+ unsigned char hdkf_label[256], *pos;
+ const unsigned char hdkf_label_label[] = "tls13 ";
+ size_t hdkf_label_label_sz = sizeof hdkf_label_label - 1;
+
+ pos = hdkf_label;
+ *pos++ = buflen >> 8;
+ *pos++ = buflen & 0xff;
+ *pos++ = hdkf_label_label_sz + labellen;
+ memcpy(pos, hdkf_label_label, hdkf_label_label_sz);
+ pos += hdkf_label_label_sz;
+ memcpy(pos, label, labellen);
+ pos += labellen;
+ *pos++ = '\0';
+
+ return quic_hkdf_expand(md, buf, buflen,
+ key, keylen, hdkf_label, pos - hdkf_label);
+}
+
+/*
+ * This function derives two keys from <secret> is <ctx> as TLS cryptographic context.
+ * ->key is the TLS key to be derived to encrypt/decrypt data at TLS level.
+ * ->iv is the initialization vector to be used with ->key.
+ * ->hp_key is the key to be derived for header protection.
+ * Obviouly these keys have the same size becaused derived with the same TLS cryptographic context.
+ */
+int quic_tls_derive_keys(const EVP_CIPHER *aead, const EVP_CIPHER *hp,
+ const EVP_MD *md, const struct quic_version *qv,
+ unsigned char *key, size_t keylen,
+ unsigned char *iv, size_t ivlen,
+ unsigned char *hp_key, size_t hp_keylen,
+ const unsigned char *secret, size_t secretlen)
+{
+ size_t aead_keylen = (size_t)EVP_CIPHER_key_length(aead);
+ size_t aead_ivlen = (size_t)EVP_CIPHER_iv_length(aead);
+ size_t hp_len = hp ? (size_t)EVP_CIPHER_key_length(hp) : 0;
+
+ if (aead_keylen > keylen || aead_ivlen > ivlen || hp_len > hp_keylen)
+ return 0;
+
+ if (!quic_hkdf_expand_label(md, key, aead_keylen, secret, secretlen,
+ qv->key_label,qv->key_label_len) ||
+ !quic_hkdf_expand_label(md, iv, aead_ivlen, secret, secretlen,
+ qv->iv_label, qv->iv_label_len) ||
+ (hp_key && !quic_hkdf_expand_label(md, hp_key, hp_len, secret, secretlen,
+ qv->hp_label, qv->hp_label_len)))
+ return 0;
+
+ return 1;
+}
+
+/*
+ * Derive the initial secret from <secret> and QUIC version dependent salt.
+ * Returns the size of the derived secret if succeeded, 0 if not.
+ */
+int quic_derive_initial_secret(const EVP_MD *md,
+ const unsigned char *initial_salt, size_t initial_salt_sz,
+ unsigned char *initial_secret, size_t initial_secret_sz,
+ const unsigned char *secret, size_t secret_sz)
+{
+ if (!quic_hkdf_extract(md, initial_secret, initial_secret_sz, secret, secret_sz,
+ initial_salt, initial_salt_sz))
+ return 0;
+
+ return 1;
+}
+
+/*
+ * Derive the client initial secret from the initial secret.
+ * Returns the size of the derived secret if succeeded, 0 if not.
+ */
+int quic_tls_derive_initial_secrets(const EVP_MD *md,
+ unsigned char *rx, size_t rx_sz,
+ unsigned char *tx, size_t tx_sz,
+ const unsigned char *secret, size_t secret_sz,
+ int server)
+{
+ const unsigned char client_label[] = "client in";
+ const unsigned char server_label[] = "server in";
+ const unsigned char *tx_label, *rx_label;
+ size_t rx_label_sz, tx_label_sz;
+
+ if (server) {
+ rx_label = client_label;
+ rx_label_sz = sizeof client_label;
+ tx_label = server_label;
+ tx_label_sz = sizeof server_label;
+ }
+ else {
+ rx_label = server_label;
+ rx_label_sz = sizeof server_label;
+ tx_label = client_label;
+ tx_label_sz = sizeof client_label;
+ }
+
+ if (!quic_hkdf_expand_label(md, rx, rx_sz, secret, secret_sz,
+ rx_label, rx_label_sz - 1) ||
+ !quic_hkdf_expand_label(md, tx, tx_sz, secret, secret_sz,
+ tx_label, tx_label_sz - 1))
+ return 0;
+
+ return 1;
+}
+
+/* Update <sec> secret key into <new_sec> according to RFC 9001 6.1.
+ * Always succeeds.
+ */
+int quic_tls_sec_update(const EVP_MD *md, const struct quic_version *qv,
+ unsigned char *new_sec, size_t new_seclen,
+ const unsigned char *sec, size_t seclen)
+{
+ return quic_hkdf_expand_label(md, new_sec, new_seclen, sec, seclen,
+ qv->ku_label, qv->ku_label_len);
+}
+
+/*
+ * Build an IV into <iv> buffer with <ivlen> as size from <aead_iv> with
+ * <aead_ivlen> as size depending on <pn> packet number.
+ * This is the function which must be called to build an AEAD IV for the AEAD cryptographic algorithm
+ * used to encrypt/decrypt the QUIC packet payloads depending on the packet number <pn>.
+ */
+void quic_aead_iv_build(unsigned char *iv, size_t ivlen,
+ unsigned char *aead_iv, size_t aead_ivlen, uint64_t pn)
+{
+ int i;
+ unsigned int shift;
+ unsigned char *pos = iv;
+
+ /* Input buffers must have the same size. */
+ BUG_ON(ivlen != aead_ivlen);
+
+ for (i = 0; i < ivlen - sizeof pn; i++)
+ *pos++ = *aead_iv++;
+
+ /* Only the remaining (sizeof pn) bytes are XOR'ed. */
+ shift = 56;
+ for (i = aead_ivlen - sizeof pn; i < aead_ivlen ; i++, shift -= 8)
+ *pos++ = *aead_iv++ ^ (pn >> shift);
+}
+
+/* Initialize the cipher context for RX part of <tls_ctx> QUIC TLS context.
+ * Return 1 if succeeded, 0 if not.
+ */
+int quic_tls_rx_ctx_init(EVP_CIPHER_CTX **rx_ctx,
+ const EVP_CIPHER *aead, unsigned char *key)
+{
+ EVP_CIPHER_CTX *ctx;
+ int aead_nid = EVP_CIPHER_nid(aead);
+
+ ctx = EVP_CIPHER_CTX_new();
+ if (!ctx)
+ return 0;
+
+ if (!EVP_DecryptInit_ex(ctx, aead, NULL, NULL, NULL) ||
+ !EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_AEAD_SET_IVLEN, QUIC_TLS_IV_LEN, NULL) ||
+ (aead_nid == NID_aes_128_ccm &&
+ !EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_AEAD_SET_TAG, QUIC_TLS_TAG_LEN, NULL)) ||
+ !EVP_DecryptInit_ex(ctx, NULL, NULL, key, NULL))
+ goto err;
+
+ *rx_ctx = ctx;
+
+ return 1;
+
+ err:
+ EVP_CIPHER_CTX_free(ctx);
+ return 0;
+}
+
+/* Initialize <*aes_ctx> AES cipher context with <key> as key for encryption */
+int quic_tls_enc_aes_ctx_init(EVP_CIPHER_CTX **aes_ctx,
+ const EVP_CIPHER *aes, unsigned char *key)
+{
+ EVP_CIPHER_CTX *ctx;
+
+ ctx = EVP_CIPHER_CTX_new();
+ if (!ctx)
+ return 0;
+
+ if (!EVP_EncryptInit_ex(ctx, aes, NULL, key, NULL))
+ goto err;
+
+ *aes_ctx = ctx;
+ return 1;
+
+ err:
+ EVP_CIPHER_CTX_free(ctx);
+ return 0;
+}
+
+/* Encrypt <inlen> bytes from <in> buffer into <out> with <ctx> as AES
+ * cipher context. This is the responsibility of the caller to check there
+ * is at least <inlen> bytes of available space in <out> buffer.
+ * Return 1 if succeeded, 0 if not.
+ */
+int quic_tls_aes_encrypt(unsigned char *out,
+ const unsigned char *in, size_t inlen,
+ EVP_CIPHER_CTX *ctx)
+{
+ int ret = 0;
+
+ if (!EVP_EncryptInit_ex(ctx, NULL, NULL, NULL, in) ||
+ !EVP_EncryptUpdate(ctx, out, &ret, out, inlen) ||
+ !EVP_EncryptFinal_ex(ctx, out, &ret))
+ return 0;
+
+ return 1;
+}
+
+/* Initialize <*aes_ctx> AES cipher context with <key> as key for decryption */
+int quic_tls_dec_aes_ctx_init(EVP_CIPHER_CTX **aes_ctx,
+ const EVP_CIPHER *aes, unsigned char *key)
+{
+ EVP_CIPHER_CTX *ctx;
+
+ ctx = EVP_CIPHER_CTX_new();
+ if (!ctx)
+ return 0;
+
+ if (!EVP_DecryptInit_ex(ctx, aes, NULL, key, NULL))
+ goto err;
+
+ *aes_ctx = ctx;
+ return 1;
+
+ err:
+ EVP_CIPHER_CTX_free(ctx);
+ return 0;
+}
+
+/* Decrypt <in> data into <out> with <ctx> as AES cipher context.
+ * This is the responsibility of the caller to check there is at least
+ * <outlen> bytes into <in> buffer.
+ * Return 1 if succeeded, 0 if not.
+ */
+int quic_tls_aes_decrypt(unsigned char *out,
+ const unsigned char *in, size_t inlen,
+ EVP_CIPHER_CTX *ctx)
+{
+ int ret = 0;
+
+ if (!EVP_DecryptInit_ex(ctx, NULL, NULL, NULL, in) ||
+ !EVP_DecryptUpdate(ctx, out, &ret, out, inlen) ||
+ !EVP_DecryptFinal_ex(ctx, out, &ret))
+ return 0;
+
+ return 1;
+}
+
+/* Initialize the cipher context for TX part of <tls_ctx> QUIC TLS context.
+ * Return 1 if succeeded, 0 if not.
+ */
+int quic_tls_tx_ctx_init(EVP_CIPHER_CTX **tx_ctx,
+ const EVP_CIPHER *aead, unsigned char *key)
+{
+ EVP_CIPHER_CTX *ctx;
+ int aead_nid = EVP_CIPHER_nid(aead);
+
+ ctx = EVP_CIPHER_CTX_new();
+ if (!ctx)
+ return 0;
+
+ if (!EVP_EncryptInit_ex(ctx, aead, NULL, NULL, NULL) ||
+ !EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_AEAD_SET_IVLEN, QUIC_TLS_IV_LEN, NULL) ||
+ (aead_nid == NID_aes_128_ccm &&
+ !EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_AEAD_SET_TAG, QUIC_TLS_TAG_LEN, NULL)) ||
+ !EVP_EncryptInit_ex(ctx, NULL, NULL, key, NULL))
+ goto err;
+
+ *tx_ctx = ctx;
+
+ return 1;
+
+ err:
+ EVP_CIPHER_CTX_free(ctx);
+ return 0;
+}
+
+/*
+ * https://quicwg.org/base-drafts/draft-ietf-quic-tls.html#aead
+ *
+ * 5.3. AEAD Usage
+ *
+ * Packets are protected prior to applying header protection (Section 5.4).
+ * The unprotected packet header is part of the associated data (A). When removing
+ * packet protection, an endpoint first removes the header protection.
+ * (...)
+ * These ciphersuites have a 16-byte authentication tag and produce an output 16
+ * bytes larger than their input.
+ * The key and IV for the packet are computed as described in Section 5.1. The nonce,
+ * N, is formed by combining the packet protection IV with the packet number. The 62
+ * bits of the reconstructed QUIC packet number in network byte order are left-padded
+ * with zeros to the size of the IV. The exclusive OR of the padded packet number and
+ * the IV forms the AEAD nonce.
+ *
+ * The associated data, A, for the AEAD is the contents of the QUIC header, starting
+ * from the flags byte in either the short or long header, up to and including the
+ * unprotected packet number.
+ *
+ * The input plaintext, P, for the AEAD is the payload of the QUIC packet, as described
+ * in [QUIC-TRANSPORT].
+ *
+ * The output ciphertext, C, of the AEAD is transmitted in place of P.
+ *
+ * Some AEAD functions have limits for how many packets can be encrypted under the same
+ * key and IV (see for example [AEBounds]). This might be lower than the packet number limit.
+ * An endpoint MUST initiate a key update (Section 6) prior to exceeding any limit set for
+ * the AEAD that is in use.
+ */
+
+/* Encrypt in place <buf> plaintext with <len> as length with QUIC_TLS_TAG_LEN
+ * included tailing bytes for the tag.
+ * Note that for CCM mode, we must set the the ciphertext length if AAD data
+ * are provided from <aad> buffer with <aad_len> as length. This is always the
+ * case here. So the caller of this function must provide <aad>.
+ *
+ * https://wiki.openssl.org/index.php/EVP_Authenticated_Encryption_and_Decryption
+ */
+int quic_tls_encrypt(unsigned char *buf, size_t len,
+ const unsigned char *aad, size_t aad_len,
+ EVP_CIPHER_CTX *ctx, const EVP_CIPHER *aead,
+ const unsigned char *iv)
+{
+ int outlen;
+ int aead_nid = EVP_CIPHER_nid(aead);
+
+ if (!EVP_EncryptInit_ex(ctx, NULL, NULL, NULL, iv) ||
+ (aead_nid == NID_aes_128_ccm &&
+ !EVP_EncryptUpdate(ctx, NULL, &outlen, NULL, len)) ||
+ !EVP_EncryptUpdate(ctx, NULL, &outlen, aad, aad_len) ||
+ !EVP_EncryptUpdate(ctx, buf, &outlen, buf, len) ||
+ !EVP_EncryptFinal_ex(ctx, buf + outlen, &outlen) ||
+ !EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_AEAD_GET_TAG, QUIC_TLS_TAG_LEN, buf + len))
+ return 0;
+
+ return 1;
+}
+
+/* Decrypt in place <buf> ciphertext with <len> as length with QUIC_TLS_TAG_LEN
+ * included tailing bytes for the tag.
+ * Note that for CCM mode, we must set the the ciphertext length if AAD data
+ * are provided from <aad> buffer with <aad_len> as length. This is always the
+ * case here. So the caller of this function must provide <aad>. Also not the
+ * there is no need to call EVP_DecryptFinal_ex for CCM mode.
+ *
+ * https://wiki.openssl.org/index.php/EVP_Authenticated_Encryption_and_Decryption
+ */
+int quic_tls_decrypt(unsigned char *buf, size_t len,
+ unsigned char *aad, size_t aad_len,
+ EVP_CIPHER_CTX *ctx, const EVP_CIPHER *aead,
+ const unsigned char *key, const unsigned char *iv)
+{
+ int outlen;
+ int aead_nid = EVP_CIPHER_nid(aead);
+
+ if (!EVP_DecryptInit_ex(ctx, NULL, NULL, NULL, iv) ||
+ !EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_AEAD_SET_TAG, QUIC_TLS_TAG_LEN,
+ buf + len - QUIC_TLS_TAG_LEN) ||
+ (aead_nid == NID_aes_128_ccm &&
+ !EVP_DecryptUpdate(ctx, NULL, &outlen, NULL, len - QUIC_TLS_TAG_LEN)) ||
+ !EVP_DecryptUpdate(ctx, NULL, &outlen, aad, aad_len) ||
+ !EVP_DecryptUpdate(ctx, buf, &outlen, buf, len - QUIC_TLS_TAG_LEN) ||
+ (aead_nid != NID_aes_128_ccm &&
+ !EVP_DecryptFinal_ex(ctx, buf + outlen, &outlen)))
+ return 0;
+
+ return 1;
+}
+
+/* Similar to quic_tls_decrypt(), except that this function does not decrypt
+ * in place its ciphertest if <out> output buffer ciphertest with <len> as length
+ * is different from <in> input buffer. This is the responbality of the caller
+ * to check that the output buffer has at least the same size as the input buffer.
+ * Note that for CCM mode, we must set the the ciphertext length if AAD data
+ * are provided from <aad> buffer with <aad_len> as length. This is always the
+ * case here. So the caller of this function must provide <aad>. Also note that
+ * there is no need to call EVP_DecryptFinal_ex for CCM mode.
+ *
+ * https://wiki.openssl.org/index.php/EVP_Authenticated_Encryption_and_Decryption
+ *
+ * Return 1 if succeeded, 0 if not.
+ */
+int quic_tls_decrypt2(unsigned char *out,
+ unsigned char *in, size_t len,
+ unsigned char *aad, size_t aad_len,
+ EVP_CIPHER_CTX *ctx, const EVP_CIPHER *aead,
+ const unsigned char *key, const unsigned char *iv)
+{
+ int outlen;
+ int aead_nid = EVP_CIPHER_nid(aead);
+
+ len -= QUIC_TLS_TAG_LEN;
+ if (!EVP_DecryptInit_ex(ctx, NULL, NULL, NULL, iv) ||
+ !EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_AEAD_SET_TAG, QUIC_TLS_TAG_LEN, in + len) ||
+ (aead_nid == NID_aes_128_ccm &&
+ !EVP_DecryptUpdate(ctx, NULL, &outlen, NULL, len)) ||
+ !EVP_DecryptUpdate(ctx, NULL, &outlen, aad, aad_len) ||
+ !EVP_DecryptUpdate(ctx, out, &outlen, in, len) ||
+ (aead_nid != NID_aes_128_ccm &&
+ !EVP_DecryptFinal_ex(ctx, out + outlen, &outlen)))
+ return 0;
+
+ return 1;
+}
+
+/* Derive <key> and <iv> key and IV to be used to encrypt a retry token
+ * with <secret> which is not pseudo-random.
+ * Return 1 if succeeded, 0 if not.
+ */
+int quic_tls_derive_retry_token_secret(const EVP_MD *md,
+ unsigned char *key, size_t keylen,
+ unsigned char *iv, size_t ivlen,
+ const unsigned char *salt, size_t saltlen,
+ const unsigned char *secret, size_t secretlen)
+{
+ unsigned char tmpkey[QUIC_TLS_KEY_LEN];
+ const unsigned char key_label[] = "retry token key";
+ const unsigned char iv_label[] = "retry token iv";
+
+ if (!quic_hkdf_extract(md, tmpkey, sizeof tmpkey,
+ secret, secretlen, salt, saltlen) ||
+ !quic_hkdf_expand(md, key, keylen, tmpkey, sizeof tmpkey,
+ key_label, sizeof key_label - 1) ||
+ !quic_hkdf_expand(md, iv, ivlen, tmpkey, sizeof tmpkey,
+ iv_label, sizeof iv_label - 1))
+ return 0;
+
+ return 1;
+}
+
+/* Generate the AEAD tag for the Retry packet <pkt> of <pkt_len> bytes and
+ * write it to <tag>. The tag is written just after the <pkt> area. It should
+ * be at least 16 bytes longs. <odcid> is the CID of the Initial packet
+ * received which triggers the Retry.
+ *
+ * Returns non-zero on success else zero.
+ */
+int quic_tls_generate_retry_integrity_tag(unsigned char *odcid, unsigned char odcid_len,
+ unsigned char *pkt, size_t pkt_len,
+ const struct quic_version *qv)
+{
+ const EVP_CIPHER *evp = EVP_aes_128_gcm();
+ EVP_CIPHER_CTX *ctx;
+
+ /* encryption buffer - not used as only AEAD tag generation is proceed */
+ unsigned char *out = NULL;
+ /* address to store the AEAD tag */
+ unsigned char *tag = pkt + pkt_len;
+ int outlen, ret = 0;
+
+ ctx = EVP_CIPHER_CTX_new();
+ if (!ctx)
+ return 0;
+
+ /* rfc9001 5.8. Retry Packet Integrity
+ *
+ * AEAD is proceed over a pseudo-Retry packet used as AAD. It contains
+ * the ODCID len + data and the Retry packet itself.
+ */
+ if (!EVP_EncryptInit_ex(ctx, evp, NULL, qv->retry_tag_key, qv->retry_tag_nonce) ||
+ /* specify pseudo-Retry as AAD */
+ !EVP_EncryptUpdate(ctx, NULL, &outlen, &odcid_len, sizeof(odcid_len)) ||
+ !EVP_EncryptUpdate(ctx, NULL, &outlen, odcid, odcid_len) ||
+ !EVP_EncryptUpdate(ctx, NULL, &outlen, pkt, pkt_len) ||
+ /* finalize */
+ !EVP_EncryptFinal_ex(ctx, out, &outlen) ||
+ /* store the tag */
+ !EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_AEAD_GET_TAG, QUIC_TLS_TAG_LEN, tag)) {
+ goto out;
+ }
+ ret = 1;
+
+ out:
+ EVP_CIPHER_CTX_free(ctx);
+ return ret;
+}
+
+/* Derive new keys and ivs required for Key Update feature for <qc> QUIC
+ * connection.
+ * Return 1 if succeeded, 0 if not.
+ */
+int quic_tls_key_update(struct quic_conn *qc)
+{
+ struct quic_tls_ctx *tls_ctx = &qc->ael->tls_ctx;
+ struct quic_tls_secrets *rx = &tls_ctx->rx;
+ struct quic_tls_secrets *tx = &tls_ctx->tx;
+ /* Used only for the traces */
+ struct quic_kp_trace kp_trace = {
+ .rx_sec = rx->secret,
+ .rx_seclen = rx->secretlen,
+ .tx_sec = tx->secret,
+ .tx_seclen = tx->secretlen,
+ };
+ /* The next key phase secrets to be derived */
+ struct quic_tls_kp *nxt_rx = &qc->ku.nxt_rx;
+ struct quic_tls_kp *nxt_tx = &qc->ku.nxt_tx;
+ const struct quic_version *ver =
+ qc->negotiated_version ? qc->negotiated_version : qc->original_version;
+ int ret = 0;
+
+ TRACE_ENTER(QUIC_EV_CONN_KP, qc);
+
+ nxt_rx = &qc->ku.nxt_rx;
+ nxt_tx = &qc->ku.nxt_tx;
+
+ TRACE_PRINTF(TRACE_LEVEL_DEVELOPER, QUIC_EV_CONN_SPPKTS, qc, 0, 0, 0,
+ "nxt_rx->secretlen=%llu rx->secretlen=%llu",
+ (ull)nxt_rx->secretlen, (ull)rx->secretlen);
+ /* Prepare new RX secrets */
+ if (!quic_tls_sec_update(rx->md, ver, nxt_rx->secret, nxt_rx->secretlen,
+ rx->secret, rx->secretlen)) {
+ TRACE_ERROR("New RX secret update failed", QUIC_EV_CONN_KP, qc);
+ goto leave;
+ }
+
+ if (!quic_tls_derive_keys(rx->aead, NULL, rx->md, ver,
+ nxt_rx->key, nxt_rx->keylen,
+ nxt_rx->iv, nxt_rx->ivlen, NULL, 0,
+ nxt_rx->secret, nxt_rx->secretlen)) {
+ TRACE_ERROR("New RX key derivation failed", QUIC_EV_CONN_KP, qc);
+ goto leave;
+ }
+
+ kp_trace.rx = nxt_rx;
+ /* Prepare new TX secrets */
+ if (!quic_tls_sec_update(tx->md, ver, nxt_tx->secret, nxt_tx->secretlen,
+ tx->secret, tx->secretlen)) {
+ TRACE_ERROR("New TX secret update failed", QUIC_EV_CONN_KP, qc);
+ goto leave;
+ }
+
+ if (!quic_tls_derive_keys(tx->aead, NULL, tx->md, ver,
+ nxt_tx->key, nxt_tx->keylen,
+ nxt_tx->iv, nxt_tx->ivlen, NULL, 0,
+ nxt_tx->secret, nxt_tx->secretlen)) {
+ TRACE_ERROR("New TX key derivation failed", QUIC_EV_CONN_KP, qc);
+ goto leave;
+ }
+
+ kp_trace.tx = nxt_tx;
+ if (nxt_rx->ctx) {
+ EVP_CIPHER_CTX_free(nxt_rx->ctx);
+ nxt_rx->ctx = NULL;
+ }
+
+ if (!quic_tls_rx_ctx_init(&nxt_rx->ctx, tls_ctx->rx.aead, nxt_rx->key)) {
+ TRACE_ERROR("could not initialize RX TLS cipher context", QUIC_EV_CONN_KP, qc);
+ goto leave;
+ }
+
+ if (nxt_tx->ctx) {
+ EVP_CIPHER_CTX_free(nxt_tx->ctx);
+ nxt_tx->ctx = NULL;
+ }
+
+ if (!quic_tls_tx_ctx_init(&nxt_tx->ctx, tls_ctx->tx.aead, nxt_tx->key)) {
+ TRACE_ERROR("could not initialize TX TLS cipher context", QUIC_EV_CONN_KP, qc);
+ goto leave;
+ }
+
+ ret = 1;
+ leave:
+ TRACE_PROTO("key update", QUIC_EV_CONN_KP, qc, &kp_trace);
+ TRACE_LEAVE(QUIC_EV_CONN_KP, qc);
+ return ret;
+}
+
+/* Rotate the Key Update information for <qc> QUIC connection.
+ * Must be used after having updated them.
+ * Always succeeds.
+ */
+void quic_tls_rotate_keys(struct quic_conn *qc)
+{
+ struct quic_tls_ctx *tls_ctx = &qc->ael->tls_ctx;
+ unsigned char *curr_secret, *curr_iv, *curr_key;
+ EVP_CIPHER_CTX *curr_ctx;
+
+ TRACE_ENTER(QUIC_EV_CONN_RXPKT, qc);
+
+ /* Rotate the RX secrets */
+ curr_ctx = tls_ctx->rx.ctx;
+ curr_secret = tls_ctx->rx.secret;
+ curr_iv = tls_ctx->rx.iv;
+ curr_key = tls_ctx->rx.key;
+
+ tls_ctx->rx.ctx = qc->ku.nxt_rx.ctx;
+ tls_ctx->rx.secret = qc->ku.nxt_rx.secret;
+ tls_ctx->rx.iv = qc->ku.nxt_rx.iv;
+ tls_ctx->rx.key = qc->ku.nxt_rx.key;
+
+ qc->ku.nxt_rx.ctx = qc->ku.prv_rx.ctx;
+ qc->ku.nxt_rx.secret = qc->ku.prv_rx.secret;
+ qc->ku.nxt_rx.iv = qc->ku.prv_rx.iv;
+ qc->ku.nxt_rx.key = qc->ku.prv_rx.key;
+
+ qc->ku.prv_rx.ctx = curr_ctx;
+ qc->ku.prv_rx.secret = curr_secret;
+ qc->ku.prv_rx.iv = curr_iv;
+ qc->ku.prv_rx.key = curr_key;
+ qc->ku.prv_rx.pn = tls_ctx->rx.pn;
+
+ /* Update the TX secrets */
+ curr_ctx = tls_ctx->tx.ctx;
+ curr_secret = tls_ctx->tx.secret;
+ curr_iv = tls_ctx->tx.iv;
+ curr_key = tls_ctx->tx.key;
+
+ tls_ctx->tx.ctx = qc->ku.nxt_tx.ctx;
+ tls_ctx->tx.secret = qc->ku.nxt_tx.secret;
+ tls_ctx->tx.iv = qc->ku.nxt_tx.iv;
+ tls_ctx->tx.key = qc->ku.nxt_tx.key;
+
+ qc->ku.nxt_tx.ctx = curr_ctx;
+ qc->ku.nxt_tx.secret = curr_secret;
+ qc->ku.nxt_tx.iv = curr_iv;
+ qc->ku.nxt_tx.key = curr_key;
+
+ TRACE_LEAVE(QUIC_EV_CONN_RXPKT, qc);
+}
+
+/* Release the memory allocated for the QUIC TLS context with <ctx> as address. */
+void quic_tls_ctx_free(struct quic_tls_ctx **ctx)
+{
+ if (!*ctx)
+ return;
+
+ quic_tls_ctx_secs_free(*ctx);
+ pool_free(pool_head_quic_tls_ctx, *ctx);
+ *ctx = NULL;
+}
+
+/* Finalize <qc> QUIC connection:
+ * - allocated and initialize the Initial QUIC TLS context for negotiated
+ * version if needed,
+ * - derive the secrets for this context,
+ * - set them into the TLS stack,
+ *
+ * Return 1 if succeeded, 0 if not.
+ */
+int quic_tls_finalize(struct quic_conn *qc, int server)
+{
+ int ret = 0;
+
+ TRACE_ENTER(QUIC_EV_CONN_NEW, qc);
+
+ if (!qc->negotiated_version)
+ goto done;
+
+ qc->nictx = pool_alloc(pool_head_quic_tls_ctx);
+ if (!qc->nictx)
+ goto err;
+
+ quic_tls_ctx_reset(qc->nictx);
+ if (!qc_new_isecs(qc, qc->nictx, qc->negotiated_version,
+ qc->odcid.data, qc->odcid.len, server))
+ goto err;
+
+ done:
+ ret = 1;
+ out:
+ TRACE_LEAVE(QUIC_EV_CONN_NEW, qc);
+ return ret;
+
+ err:
+ quic_tls_ctx_free(&qc->nictx);
+ goto out;
+}
diff --git a/src/quic_tp.c b/src/quic_tp.c
new file mode 100644
index 0000000..caf48ce
--- /dev/null
+++ b/src/quic_tp.c
@@ -0,0 +1,714 @@
+#include <arpa/inet.h>
+#include <string.h>
+
+#include <haproxy/global.h>
+#include <haproxy/ncbuf-t.h>
+#include <haproxy/net_helper.h>
+#include <haproxy/quic_conn.h>
+#include <haproxy/quic_enc.h>
+#include <haproxy/quic_tp.h>
+#include <haproxy/quic_trace.h>
+#include <haproxy/trace.h>
+
+#define QUIC_MAX_UDP_PAYLOAD_SIZE 2048
+
+/* This is the values of some QUIC transport parameters when absent.
+ * Should be used to initialize any transport parameters (local or remote)
+ * before updating them with customized values.
+ */
+struct quic_transport_params quic_dflt_transport_params = {
+ .max_udp_payload_size = QUIC_TP_DFLT_MAX_UDP_PAYLOAD_SIZE,
+ .ack_delay_exponent = QUIC_TP_DFLT_ACK_DELAY_COMPONENT,
+ .max_ack_delay = QUIC_TP_DFLT_MAX_ACK_DELAY,
+ .active_connection_id_limit = QUIC_TP_DFLT_ACTIVE_CONNECTION_ID_LIMIT,
+};
+
+/* Initialize <dst> transport parameters with default values (when absent)
+ * from <quic_dflt_transport_params>.
+ * Never fails.
+ */
+static void quic_dflt_transport_params_cpy(struct quic_transport_params *dst)
+{
+ dst->max_udp_payload_size = quic_dflt_transport_params.max_udp_payload_size;
+ dst->ack_delay_exponent = quic_dflt_transport_params.ack_delay_exponent;
+ dst->max_ack_delay = quic_dflt_transport_params.max_ack_delay;
+ dst->active_connection_id_limit = quic_dflt_transport_params.active_connection_id_limit;
+}
+
+/* Initialize <p> transport parameters. <server> is a boolean, set if TPs are
+ * used by a server (haproxy frontend) else this is for a client (haproxy
+ * backend).
+ *
+ * This must only be used for haproxy local parameters. To initialize peer
+ * parameters, see quic_dflt_transport_params_cpy().
+ *
+ * Never fails.
+ */
+void quic_transport_params_init(struct quic_transport_params *p, int server)
+{
+ const uint64_t ncb_size = global.tune.bufsize - NCB_RESERVED_SZ;
+ const int max_streams_bidi = global.tune.quic_frontend_max_streams_bidi;
+ const int max_streams_uni = 3;
+
+ /* Set RFC default values for unspecified parameters. */
+ quic_dflt_transport_params_cpy(p);
+
+ /* Set the max_udp_payload_size value. If not would equal to
+ * QUIC_TP_DFLT_MAX_UDP_PAYLOAD_SIZE
+ */
+ p->max_udp_payload_size = QUIC_MAX_UDP_PAYLOAD_SIZE;
+ if (server)
+ p->max_idle_timeout = global.tune.quic_frontend_max_idle_timeout;
+ else
+ p->max_idle_timeout = global.tune.quic_backend_max_idle_timeout;
+
+ p->initial_max_streams_bidi = max_streams_bidi;
+ p->initial_max_streams_uni = max_streams_uni;
+ p->initial_max_stream_data_bidi_local = ncb_size;
+ p->initial_max_stream_data_bidi_remote = ncb_size;
+ p->initial_max_stream_data_uni = ncb_size;
+ p->initial_max_data = (max_streams_bidi + max_streams_uni) * ncb_size;
+
+ if (server) {
+ p->with_stateless_reset_token = 1;
+ p->disable_active_migration = 1;
+ }
+
+ p->active_connection_id_limit = 8;
+
+ p->retry_source_connection_id.len = 0;
+}
+
+/* Encode <addr> preferred address transport parameter in <buf> without its
+ * "type+len" prefix.
+ * It is the responsibility of the caller to check there is enough room in <buf> to encode
+ * this address.
+ * Never fails.
+ */
+static void quic_transport_param_enc_pref_addr_val(unsigned char **buf,
+ const unsigned char *end,
+ struct tp_preferred_address *addr)
+{
+ write_n16(*buf, addr->ipv4_port);
+ *buf += sizeof addr->ipv4_port;
+
+ memcpy(*buf, (uint8_t *)&addr->ipv4_addr.s_addr, sizeof(addr->ipv4_addr.s_addr));
+ *buf += sizeof(addr->ipv4_addr.s_addr);
+
+ write_n16(*buf, addr->ipv6_port);
+ *buf += sizeof addr->ipv6_port;
+
+ memcpy(*buf, addr->ipv6_addr.s6_addr, sizeof(addr->ipv6_addr.s6_addr));
+ *buf += sizeof(addr->ipv6_addr.s6_addr);
+
+ *(*buf)++ = addr->cid.len;
+ if (addr->cid.len) {
+ memcpy(*buf, addr->cid.data, addr->cid.len);
+ *buf += addr->cid.len;
+ }
+
+ memcpy(*buf, addr->stateless_reset_token, sizeof addr->stateless_reset_token);
+ *buf += sizeof addr->stateless_reset_token;
+}
+
+/* Decode into <addr> preferred address transport parameter found in <*buf> buffer.
+ * Returns 1 if succeeded, 0 if not.
+ */
+static int quic_transport_param_dec_pref_addr(struct tp_preferred_address *addr,
+ const unsigned char **buf,
+ const unsigned char *end)
+{
+ ssize_t addr_len;
+
+ addr_len = sizeof(addr->ipv4_port) + sizeof(addr->ipv4_addr.s_addr);
+ addr_len += sizeof(addr->ipv6_port) + sizeof(addr->ipv6_addr.s6_addr);
+ addr_len += sizeof(addr->cid.len);
+
+ if (end - *buf < addr_len)
+ return 0;
+
+ memcpy((uint8_t *)&addr->ipv4_addr.s_addr, *buf, sizeof(addr->ipv4_addr.s_addr));
+ *buf += sizeof(addr->ipv4_addr.s_addr);
+
+ addr->ipv4_port = read_n16(*buf);
+ *buf += sizeof addr->ipv4_port;
+
+ memcpy(addr->ipv6_addr.s6_addr, *buf, sizeof(addr->ipv6_addr.s6_addr));
+ *buf += sizeof(addr->ipv6_addr.s6_addr);
+
+ addr->ipv6_port = read_n16(*buf);
+ *buf += sizeof addr->ipv6_port;
+
+ addr->cid.len = *(*buf)++;
+ if (addr->cid.len) {
+ if (end - sizeof(addr->stateless_reset_token) - *buf > addr->cid.len ||
+ addr->cid.len > sizeof(addr->cid.data)) {
+ return 0;
+ }
+
+ memcpy(addr->cid.data, *buf, addr->cid.len);
+ *buf += addr->cid.len;
+ }
+
+ if (end - *buf != sizeof(addr->stateless_reset_token))
+ return 0;
+
+ memcpy(addr->stateless_reset_token, *buf, end - *buf);
+ *buf += sizeof addr->stateless_reset_token;
+
+ return *buf == end;
+}
+
+/* Decode into <v> version information received transport parameters from <*buf>
+ * buffer. <server> must be set to 1 for QUIC clients which receive server
+ * transport parameters, and 0 for QUIC servers which receive client transport
+ * parameters.
+ * Also set the QUIC negotiated version into <tp>.
+ * Return 1 if succeeded, 0 if not.
+ */
+static int quic_transport_param_dec_version_info(struct tp_version_information *tp,
+ const unsigned char **buf,
+ const unsigned char *end, int server)
+{
+ size_t tp_len = end - *buf;
+ const uint32_t *ver, *others;
+
+ /* <tp_len> must be a multiple of sizeof(uint32_t) */
+ if (tp_len < sizeof tp->chosen || (tp_len & 0x3))
+ return 0;
+
+ tp->chosen = ntohl(*(uint32_t *)*buf);
+ /* Must not be null */
+ if (!tp->chosen)
+ return 0;
+
+ *buf += sizeof tp->chosen;
+ others = (const uint32_t *)*buf;
+
+ /* Others versions must not be null */
+ for (ver = others; ver < (const uint32_t *)end; ver++) {
+ if (!*ver)
+ return 0;
+ }
+
+ if (server)
+ /* TODO: not supported */
+ return 0;
+
+ for (ver = others; ver < (const uint32_t *)end; ver++) {
+ if (!tp->negotiated_version) {
+ int i;
+
+ for (i = 0; i < quic_versions_nb; i++) {
+ if (ntohl(*ver) == quic_versions[i].num) {
+ tp->negotiated_version = &quic_versions[i];
+ break;
+ }
+ }
+ }
+
+ if (preferred_version && ntohl(*ver) == preferred_version->num) {
+ tp->negotiated_version = preferred_version;
+ goto out;
+ }
+ }
+
+ out:
+ *buf = end;
+
+ return 1;
+}
+
+/* Decode into <p> struct a transport parameter found in <*buf> buffer with
+ * <type> as type and <len> as length, depending on <server> boolean value which
+ * must be set to 1 for a server (haproxy listener) or 0 for a client (connection
+ * to an haproxy server).
+ */
+static int quic_transport_param_decode(struct quic_transport_params *p,
+ int server, uint64_t type,
+ const unsigned char **buf, size_t len)
+{
+ const unsigned char *end = *buf + len;
+
+ switch (type) {
+ case QUIC_TP_ORIGINAL_DESTINATION_CONNECTION_ID:
+ if (!server || len > sizeof p->original_destination_connection_id.data)
+ return 0;
+
+ if (len)
+ memcpy(p->original_destination_connection_id.data, *buf, len);
+ p->original_destination_connection_id.len = len;
+ *buf += len;
+ p->original_destination_connection_id_present = 1;
+ break;
+ case QUIC_TP_INITIAL_SOURCE_CONNECTION_ID:
+ if (len > sizeof p->initial_source_connection_id.data)
+ return 0;
+
+ if (len)
+ memcpy(p->initial_source_connection_id.data, *buf, len);
+ p->initial_source_connection_id.len = len;
+ *buf += len;
+ p->initial_source_connection_id_present = 1;
+ break;
+ case QUIC_TP_STATELESS_RESET_TOKEN:
+ if (!server || len != sizeof p->stateless_reset_token)
+ return 0;
+ memcpy(p->stateless_reset_token, *buf, len);
+ *buf += len;
+ p->with_stateless_reset_token = 1;
+ break;
+ case QUIC_TP_PREFERRED_ADDRESS:
+ if (!server)
+ return 0;
+ if (!quic_transport_param_dec_pref_addr(&p->preferred_address, buf, *buf + len))
+ return 0;
+ p->with_preferred_address = 1;
+ break;
+ case QUIC_TP_MAX_IDLE_TIMEOUT:
+ if (!quic_dec_int(&p->max_idle_timeout, buf, end))
+ return 0;
+ break;
+ case QUIC_TP_MAX_UDP_PAYLOAD_SIZE:
+ if (!quic_dec_int(&p->max_udp_payload_size, buf, end))
+ return 0;
+ break;
+ case QUIC_TP_INITIAL_MAX_DATA:
+ if (!quic_dec_int(&p->initial_max_data, buf, end))
+ return 0;
+ break;
+ case QUIC_TP_INITIAL_MAX_STREAM_DATA_BIDI_LOCAL:
+ if (!quic_dec_int(&p->initial_max_stream_data_bidi_local, buf, end))
+ return 0;
+ break;
+ case QUIC_TP_INITIAL_MAX_STREAM_DATA_BIDI_REMOTE:
+ if (!quic_dec_int(&p->initial_max_stream_data_bidi_remote, buf, end))
+ return 0;
+ break;
+ case QUIC_TP_INITIAL_MAX_STREAM_DATA_UNI:
+ if (!quic_dec_int(&p->initial_max_stream_data_uni, buf, end))
+ return 0;
+ break;
+ case QUIC_TP_INITIAL_MAX_STREAMS_BIDI:
+ if (!quic_dec_int(&p->initial_max_streams_bidi, buf, end))
+ return 0;
+ break;
+ case QUIC_TP_INITIAL_MAX_STREAMS_UNI:
+ if (!quic_dec_int(&p->initial_max_streams_uni, buf, end))
+ return 0;
+ break;
+ case QUIC_TP_ACK_DELAY_EXPONENT:
+ if (!quic_dec_int(&p->ack_delay_exponent, buf, end) ||
+ p->ack_delay_exponent > QUIC_TP_ACK_DELAY_EXPONENT_LIMIT)
+ return 0;
+ break;
+ case QUIC_TP_MAX_ACK_DELAY:
+ if (!quic_dec_int(&p->max_ack_delay, buf, end) ||
+ p->max_ack_delay > QUIC_TP_MAX_ACK_DELAY_LIMIT)
+ return 0;
+ break;
+ case QUIC_TP_DISABLE_ACTIVE_MIGRATION:
+ /* Zero-length parameter type. */
+ if (len != 0)
+ return 0;
+ p->disable_active_migration = 1;
+ break;
+ case QUIC_TP_ACTIVE_CONNECTION_ID_LIMIT:
+ if (!quic_dec_int(&p->active_connection_id_limit, buf, end))
+ return 0;
+ break;
+ case QUIC_TP_VERSION_INFORMATION:
+ if (!quic_transport_param_dec_version_info(&p->version_information,
+ buf, *buf + len, server))
+ return 0;
+ break;
+ default:
+ *buf += len;
+ };
+
+ return *buf == end;
+}
+
+/* Encode <type> and <len> variable length values in <buf>.
+ * Returns 1 if succeeded, 0 if not.
+ */
+static int quic_transport_param_encode_type_len(unsigned char **buf,
+ const unsigned char *end,
+ uint64_t type, uint64_t len)
+{
+ return quic_enc_int(buf, end, type) && quic_enc_int(buf, end, len);
+}
+
+/* Decode variable length type and length values of a QUIC transport parameter
+ * into <type> and <len> found in <*buf> buffer.
+ * Returns 1 if succeeded, 0 if not.
+ */
+static int quic_transport_param_decode_type_len(uint64_t *type, uint64_t *len,
+ const unsigned char **buf,
+ const unsigned char *end)
+{
+ return quic_dec_int(type, buf, end) && quic_dec_int(len, buf, end);
+}
+
+/* Encode <param> bytes stream with <type> as type and <length> as length into buf.
+ * Returns 1 if succeeded, 0 if not.
+ */
+static int quic_transport_param_enc_mem(unsigned char **buf, const unsigned char *end,
+ uint64_t type, void *param, uint64_t length)
+{
+ if (!quic_transport_param_encode_type_len(buf, end, type, length))
+ return 0;
+
+ if (end - *buf < length)
+ return 0;
+
+ if (length)
+ memcpy(*buf, param, length);
+ *buf += length;
+
+ return 1;
+}
+
+/* Encode <val> 64-bits value as variable length integer into <buf>.
+ * Returns 1 if succeeded, 0 if not.
+ */
+static int quic_transport_param_enc_int(unsigned char **buf,
+ const unsigned char *end,
+ uint64_t type, uint64_t val)
+{
+ size_t len;
+
+ len = quic_int_getsize(val);
+
+ return len && quic_transport_param_encode_type_len(buf, end, type, len) &&
+ quic_enc_int(buf, end, val);
+}
+
+/* Returns the required length in bytes to encode <cid> QUIC connection ID. */
+static inline size_t sizeof_quic_cid(const struct tp_cid *cid)
+{
+ return sizeof cid->len + cid->len;
+}
+
+/* Encode <addr> preferred address into <buf>.
+ * Returns 1 if succeeded, 0 if not.
+ */
+static int quic_transport_param_enc_pref_addr(unsigned char **buf,
+ const unsigned char *end,
+ struct tp_preferred_address *addr)
+{
+ uint64_t addr_len = 0;
+
+ addr_len += sizeof(addr->ipv4_port) + sizeof(addr->ipv4_addr.s_addr);
+ addr_len += sizeof(addr->ipv6_port) + sizeof(addr->ipv6_addr.s6_addr);
+ addr_len += sizeof_quic_cid(&addr->cid);
+ addr_len += sizeof(addr->stateless_reset_token);
+
+ if (!quic_transport_param_encode_type_len(buf, end, QUIC_TP_PREFERRED_ADDRESS, addr_len))
+ return 0;
+
+ if (end - *buf < addr_len)
+ return 0;
+
+ quic_transport_param_enc_pref_addr_val(buf, end, addr);
+
+ return 1;
+}
+
+/* Encode version information transport parameters with <chosen_version> as chosen
+ * version.
+ * Return 1 if succeeded, 0 if not.
+ */
+static int quic_transport_param_enc_version_info(unsigned char **buf,
+ const unsigned char *end,
+ const struct quic_version *chosen_version,
+ int server)
+{
+ int i;
+ uint64_t tp_len;
+ uint32_t ver;
+
+ tp_len = sizeof chosen_version->num + quic_versions_nb * sizeof(uint32_t);
+ if (!quic_transport_param_encode_type_len(buf, end,
+ QUIC_TP_VERSION_INFORMATION,
+ tp_len))
+ return 0;
+
+ if (end - *buf < tp_len)
+ return 0;
+
+ /* First: chosen version */
+ ver = htonl(chosen_version->num);
+ memcpy(*buf, &ver, sizeof ver);
+ *buf += sizeof ver;
+ /* For servers: all supported version, chosen included */
+ for (i = 0; i < quic_versions_nb; i++) {
+ ver = htonl(quic_versions[i].num);
+ memcpy(*buf, &ver, sizeof ver);
+ *buf += sizeof ver;
+ }
+
+ return 1;
+}
+
+/* Encode <p> transport parameter into <buf> depending on <server> value which
+ * must be set to 1 for a server (haproxy listener) or 0 for a client
+ * (connection to a haproxy server).
+ * Return the number of bytes consumed if succeeded, 0 if not.
+ */
+int quic_transport_params_encode(unsigned char *buf,
+ const unsigned char *end,
+ struct quic_transport_params *p,
+ const struct quic_version *chosen_version,
+ int server)
+{
+ unsigned char *head;
+ unsigned char *pos;
+
+ head = pos = buf;
+ if (server) {
+ if (!quic_transport_param_enc_mem(&pos, end,
+ QUIC_TP_ORIGINAL_DESTINATION_CONNECTION_ID,
+ p->original_destination_connection_id.data,
+ p->original_destination_connection_id.len))
+ return 0;
+
+ if (p->retry_source_connection_id.len) {
+ if (!quic_transport_param_enc_mem(&pos, end,
+ QUIC_TP_RETRY_SOURCE_CONNECTION_ID,
+ p->retry_source_connection_id.data,
+ p->retry_source_connection_id.len))
+ return 0;
+ }
+
+ if (p->with_stateless_reset_token &&
+ !quic_transport_param_enc_mem(&pos, end, QUIC_TP_STATELESS_RESET_TOKEN,
+ p->stateless_reset_token,
+ sizeof p->stateless_reset_token))
+ return 0;
+ if (p->with_preferred_address &&
+ !quic_transport_param_enc_pref_addr(&pos, end, &p->preferred_address))
+ return 0;
+ }
+
+ if (!quic_transport_param_enc_mem(&pos, end,
+ QUIC_TP_INITIAL_SOURCE_CONNECTION_ID,
+ p->initial_source_connection_id.data,
+ p->initial_source_connection_id.len))
+ return 0;
+
+ if (p->max_idle_timeout &&
+ !quic_transport_param_enc_int(&pos, end, QUIC_TP_MAX_IDLE_TIMEOUT, p->max_idle_timeout))
+ return 0;
+
+ /*
+ * "max_packet_size" transport parameter must be transmitted only if different
+ * of the default value.
+ */
+ if (p->max_udp_payload_size != QUIC_TP_DFLT_MAX_UDP_PAYLOAD_SIZE &&
+ !quic_transport_param_enc_int(&pos, end, QUIC_TP_MAX_UDP_PAYLOAD_SIZE, p->max_udp_payload_size))
+ return 0;
+
+ if (p->initial_max_data &&
+ !quic_transport_param_enc_int(&pos, end, QUIC_TP_INITIAL_MAX_DATA, p->initial_max_data))
+ return 0;
+
+ if (p->initial_max_stream_data_bidi_local &&
+ !quic_transport_param_enc_int(&pos, end, QUIC_TP_INITIAL_MAX_STREAM_DATA_BIDI_LOCAL,
+ p->initial_max_stream_data_bidi_local))
+ return 0;
+
+ if (p->initial_max_stream_data_bidi_remote &&
+ !quic_transport_param_enc_int(&pos, end, QUIC_TP_INITIAL_MAX_STREAM_DATA_BIDI_REMOTE,
+ p->initial_max_stream_data_bidi_remote))
+ return 0;
+
+ if (p->initial_max_stream_data_uni &&
+ !quic_transport_param_enc_int(&pos, end, QUIC_TP_INITIAL_MAX_STREAM_DATA_UNI,
+ p->initial_max_stream_data_uni))
+ return 0;
+
+ if (p->initial_max_streams_bidi &&
+ !quic_transport_param_enc_int(&pos, end, QUIC_TP_INITIAL_MAX_STREAMS_BIDI,
+ p->initial_max_streams_bidi))
+ return 0;
+
+ if (p->initial_max_streams_uni &&
+ !quic_transport_param_enc_int(&pos, end, QUIC_TP_INITIAL_MAX_STREAMS_UNI,
+ p->initial_max_streams_uni))
+ return 0;
+
+ /*
+ * "ack_delay_exponent" transport parameter must be transmitted only if different
+ * of the default value.
+ */
+ if (p->ack_delay_exponent != QUIC_TP_DFLT_ACK_DELAY_COMPONENT &&
+ !quic_transport_param_enc_int(&pos, end, QUIC_TP_ACK_DELAY_EXPONENT, p->ack_delay_exponent))
+ return 0;
+
+ /*
+ * "max_ack_delay" transport parameter must be transmitted only if different
+ * of the default value.
+ */
+ if (p->max_ack_delay != QUIC_TP_DFLT_MAX_ACK_DELAY &&
+ !quic_transport_param_enc_int(&pos, end, QUIC_TP_MAX_ACK_DELAY, p->max_ack_delay))
+ return 0;
+
+ /* 0-length value */
+ if (p->disable_active_migration &&
+ !quic_transport_param_encode_type_len(&pos, end, QUIC_TP_DISABLE_ACTIVE_MIGRATION, 0))
+ return 0;
+
+ if (p->active_connection_id_limit &&
+ p->active_connection_id_limit != QUIC_TP_DFLT_ACTIVE_CONNECTION_ID_LIMIT &&
+ !quic_transport_param_enc_int(&pos, end, QUIC_TP_ACTIVE_CONNECTION_ID_LIMIT,
+ p->active_connection_id_limit))
+ return 0;
+
+ if (!quic_transport_param_enc_version_info(&pos, end, chosen_version, server))
+ return 0;
+
+ return pos - head;
+}
+
+/* Decode transport parameters found in <buf> buffer into <p>, depending on
+ * <server> boolean value which must be set to 1 for a server (haproxy listener)
+ * or 0 for a client (connection to a haproxy server).
+ * Returns 1 if succeeded, 0 if not.
+ */
+static int quic_transport_params_decode(struct quic_transport_params *p, int server,
+ const unsigned char *buf,
+ const unsigned char *end)
+{
+ const unsigned char *pos;
+ uint64_t type, len = 0;
+
+ pos = buf;
+
+ while (pos != end) {
+ if (!quic_transport_param_decode_type_len(&type, &len, &pos, end))
+ return 0;
+
+ if (end - pos < len)
+ return 0;
+
+ if (!quic_transport_param_decode(p, server, type, &pos, len))
+ return 0;
+ }
+
+ /*
+ * A server MUST send original_destination_connection_id transport parameter.
+ * initial_source_connection_id must be present both for server and client.
+ */
+ if ((server && !p->original_destination_connection_id_present) ||
+ !p->initial_source_connection_id_present)
+ return 0;
+
+ /* Note that if not received by the peer, active_connection_id_limit will
+ * have QUIC_TP_DFLT_ACTIVE_CONNECTION_ID_LIMIT as default value. This
+ * is also the minimum value for this transport parameter.
+ */
+ if (p->active_connection_id_limit < QUIC_TP_DFLT_ACTIVE_CONNECTION_ID_LIMIT)
+ return 0;
+
+ return 1;
+}
+
+/* Store transport parameters found in <buf> buffer into <qc> QUIC connection
+ * depending on <server> value which must be 1 for a server (haproxy listener)
+ * or 0 for a client (connection to a haproxy server).
+ * Note that peer transport parameters are stored in the TX part of the connection:
+ * they are used to send packets to the peer with its transport parameters as
+ * limitations.
+ * Returns 1 if succeeded, 0 if not.
+ */
+int quic_transport_params_store(struct quic_conn *qc, int server,
+ const unsigned char *buf,
+ const unsigned char *end)
+{
+ struct quic_transport_params *tx_params = &qc->tx.params;
+ struct quic_transport_params *rx_params = &qc->rx.params;
+ /* Initial source connection ID */
+ struct tp_cid *iscid;
+
+ /* initialize peer TPs to RFC default value */
+ quic_dflt_transport_params_cpy(tx_params);
+
+ if (!quic_transport_params_decode(tx_params, server, buf, end))
+ return 0;
+
+ /* Update the connection from transport parameters received */
+ if (tx_params->version_information.negotiated_version &&
+ tx_params->version_information.negotiated_version != qc->original_version)
+ qc->negotiated_version =
+ qc->tx.params.version_information.negotiated_version;
+
+ if (tx_params->max_ack_delay)
+ qc->max_ack_delay = tx_params->max_ack_delay;
+
+ if (tx_params->max_idle_timeout && rx_params->max_idle_timeout)
+ qc->max_idle_timeout =
+ QUIC_MIN(tx_params->max_idle_timeout, rx_params->max_idle_timeout);
+ else
+ qc->max_idle_timeout =
+ QUIC_MAX(tx_params->max_idle_timeout, rx_params->max_idle_timeout);
+ TRACE_PROTO("\nTX(remote) transp. params.", QUIC_EV_TRANSP_PARAMS, qc, tx_params);
+
+ /* Check that the "initial_source_connection_id" transport parameter matches
+ * the SCID received which is also the DCID of the connection.
+ */
+ iscid = &tx_params->initial_source_connection_id;
+ if (qc->dcid.len != iscid->len ||
+ (qc->dcid.len && memcmp(qc->dcid.data, iscid->data, qc->dcid.len))) {
+ TRACE_PROTO("initial_source_connection_id transport parameter mismatch",
+ QUIC_EV_TRANSP_PARAMS, qc);
+ /* Kill the connection as soon as possible */
+ qc_kill_conn(qc);
+ }
+
+ return 1;
+}
+
+/* QUIC server (or haproxy listener) only function.
+ * Initialize the local transport parameters <rx_params> from <listener_params>
+ * coming from configuration and Initial packet information (destination
+ * connection ID, source connection ID, original destination connection ID) from
+ * client token.
+ * Returns 1 if succeeded, 0 if not.
+ */
+int qc_lstnr_params_init(struct quic_conn *qc,
+ const struct quic_transport_params *listener_params,
+ const unsigned char *stateless_reset_token,
+ const unsigned char *dcid, size_t dcidlen,
+ const unsigned char *scid, size_t scidlen,
+ const struct quic_cid *token_odcid)
+{
+ struct quic_transport_params *rx_params = &qc->rx.params;
+ struct tp_cid *odcid_param = &rx_params->original_destination_connection_id;
+
+ /* Copy the transport parameters. */
+ *rx_params = *listener_params;
+ /* Copy the stateless reset token */
+ memcpy(rx_params->stateless_reset_token, stateless_reset_token,
+ sizeof rx_params->stateless_reset_token);
+ /* Copy original_destination_connection_id transport parameter. */
+ if (token_odcid->len) {
+ memcpy(odcid_param->data, token_odcid->data, token_odcid->len);
+ odcid_param->len = token_odcid->len;
+ /* Copy retry_source_connection_id transport parameter. */
+ memcpy(rx_params->retry_source_connection_id.data, dcid, dcidlen);
+ rx_params->retry_source_connection_id.len = dcidlen;
+ }
+ else {
+ memcpy(odcid_param->data, dcid, dcidlen);
+ odcid_param->len = dcidlen;
+ }
+
+ /* Copy the initial source connection ID. */
+ memcpy(rx_params->initial_source_connection_id.data, scid, scidlen);
+ rx_params->initial_source_connection_id.len = scidlen;
+ TRACE_PROTO("\nRX(local) transp. params.", QUIC_EV_TRANSP_PARAMS, qc, rx_params);
+
+ return 1;
+}
+
diff --git a/src/quic_trace.c b/src/quic_trace.c
new file mode 100644
index 0000000..9ab9626
--- /dev/null
+++ b/src/quic_trace.c
@@ -0,0 +1,633 @@
+/*
+ * QUIC traces
+ *
+ * Copyright 2000-2020
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <inttypes.h>
+
+#include <haproxy/quic_conn.h>
+#include <haproxy/quic_tls.h>
+#include <haproxy/quic_trace.h>
+#include <haproxy/quic_tp.h>
+#include <haproxy/trace.h>
+
+static void quic_trace(enum trace_level level, uint64_t mask, const struct trace_source *src,
+ const struct ist where, const struct ist func,
+ const void *a1, const void *a2, const void *a3, const void *a4);
+
+static const struct trace_event quic_trace_events[] = {
+ { .mask = QUIC_EV_CONN_NEW, .name = "new_conn", .desc = "new QUIC connection" },
+ { .mask = QUIC_EV_CONN_INIT, .name = "new_conn_init", .desc = "new QUIC connection initialization" },
+ { .mask = QUIC_EV_CONN_ISEC, .name = "init_secs", .desc = "initial secrets derivation" },
+ { .mask = QUIC_EV_CONN_RSEC, .name = "read_secs", .desc = "read secrets derivation" },
+ { .mask = QUIC_EV_CONN_WSEC, .name = "write_secs", .desc = "write secrets derivation" },
+ { .mask = QUIC_EV_CONN_LPKT, .name = "lstnr_packet", .desc = "new listener received packet" },
+ { .mask = QUIC_EV_CONN_SPKT, .name = "srv_packet", .desc = "new server received packet" },
+ { .mask = QUIC_EV_CONN_ENCPKT, .name = "enc_hdshk_pkt", .desc = "handhshake packet encryption" },
+ { .mask = QUIC_EV_CONN_TXPKT, .name = "tx_pkt", .desc = "TX packet" },
+ { .mask = QUIC_EV_CONN_PAPKT, .name = "phdshk_apkt", .desc = "post handhshake application packet preparation" },
+ { .mask = QUIC_EV_CONN_PAPKTS, .name = "phdshk_apkts", .desc = "post handhshake application packets preparation" },
+ { .mask = QUIC_EV_CONN_IO_CB, .name = "qc_io_cb", .desc = "QUIC conn. I/O processing" },
+ { .mask = QUIC_EV_CONN_RMHP, .name = "rm_hp", .desc = "Remove header protection" },
+ { .mask = QUIC_EV_CONN_PRSHPKT, .name = "parse_hpkt", .desc = "parse handshake packet" },
+ { .mask = QUIC_EV_CONN_PRSAPKT, .name = "parse_apkt", .desc = "parse application packet" },
+ { .mask = QUIC_EV_CONN_PRSFRM, .name = "parse_frm", .desc = "parse frame" },
+ { .mask = QUIC_EV_CONN_PRSAFRM, .name = "parse_ack_frm", .desc = "parse ACK frame" },
+ { .mask = QUIC_EV_CONN_BFRM, .name = "build_frm", .desc = "build frame" },
+ { .mask = QUIC_EV_CONN_PHPKTS, .name = "phdshk_pkts", .desc = "handhshake packets preparation" },
+ { .mask = QUIC_EV_CONN_TRMHP, .name = "rm_hp_try", .desc = "header protection removing try" },
+ { .mask = QUIC_EV_CONN_ELRMHP, .name = "el_rm_hp", .desc = "handshake enc. level header protection removing" },
+ { .mask = QUIC_EV_CONN_RXPKT, .name = "rx_pkt", .desc = "RX packet" },
+ { .mask = QUIC_EV_CONN_SSLDATA, .name = "ssl_provide_data", .desc = "CRYPTO data provision to TLS stack" },
+ { .mask = QUIC_EV_CONN_RXCDATA, .name = "el_treat_rx_cfrms",.desc = "enc. level RX CRYPTO frames processing"},
+ { .mask = QUIC_EV_CONN_ADDDATA, .name = "add_hdshk_data", .desc = "TLS stack ->add_handshake_data() call"},
+ { .mask = QUIC_EV_CONN_FFLIGHT, .name = "flush_flight", .desc = "TLS stack ->flush_flight() call"},
+ { .mask = QUIC_EV_CONN_SSLALERT, .name = "send_alert", .desc = "TLS stack ->send_alert() call"},
+ { .mask = QUIC_EV_CONN_RTTUPDT, .name = "rtt_updt", .desc = "RTT sampling" },
+ { .mask = QUIC_EV_CONN_SPPKTS, .name = "sppkts", .desc = "send prepared packets" },
+ { .mask = QUIC_EV_CONN_PKTLOSS, .name = "pktloss", .desc = "detect packet loss" },
+ { .mask = QUIC_EV_CONN_STIMER, .name = "stimer", .desc = "set timer" },
+ { .mask = QUIC_EV_CONN_PTIMER, .name = "ptimer", .desc = "process timer" },
+ { .mask = QUIC_EV_CONN_SPTO, .name = "spto", .desc = "set PTO" },
+ { .mask = QUIC_EV_CONN_BCFRMS, .name = "bcfrms", .desc = "build CRYPTO data frames" },
+ { .mask = QUIC_EV_CONN_XPRTSEND, .name = "xprt_send", .desc = "sending XRPT subscription" },
+ { .mask = QUIC_EV_CONN_XPRTRECV, .name = "xprt_recv", .desc = "receiving XRPT subscription" },
+ { .mask = QUIC_EV_CONN_FREED, .name = "conn_freed", .desc = "releasing conn. memory" },
+ { .mask = QUIC_EV_CONN_CLOSE, .name = "conn_close", .desc = "closing conn." },
+ { .mask = QUIC_EV_CONN_ACKSTRM, .name = "ack_strm", .desc = "STREAM ack."},
+ { .mask = QUIC_EV_CONN_FRMLIST, .name = "frm_list", .desc = "frame list"},
+ { .mask = QUIC_EV_STATELESS_RST, .name = "stateless_reset", .desc = "stateless reset sent"},
+ { .mask = QUIC_EV_TRANSP_PARAMS, .name = "transport_params", .desc = "transport parameters"},
+ { .mask = QUIC_EV_CONN_IDLE_TIMER, .name = "idle_timer", .desc = "idle timer task"},
+ { .mask = QUIC_EV_CONN_SUB, .name = "xprt_sub", .desc = "RX/TX subscription or unsubscription to QUIC xprt"},
+ { .mask = QUIC_EV_CONN_RCV, .name = "conn_recv", .desc = "RX on connection" },
+ { .mask = QUIC_EV_CONN_SET_AFFINITY, .name = "conn_set_affinity", .desc = "set connection thread affinity" },
+ { /* end */ }
+};
+
+static const struct name_desc quic_trace_lockon_args[4] = {
+ /* arg1 */ { /* already used by the connection */ },
+ /* arg2 */ { .name="quic", .desc="QUIC transport" },
+ /* arg3 */ { },
+ /* arg4 */ { }
+};
+
+static const struct name_desc quic_trace_decoding[] = {
+#define QUIC_VERB_CLEAN 1
+ { .name="clean", .desc="only user-friendly stuff, generally suitable for level \"user\"" },
+ { /* end */ }
+};
+
+
+struct trace_source trace_quic = {
+ .name = IST("quic"),
+ .desc = "QUIC xprt",
+ .arg_def = TRC_ARG1_QCON, /* TRACE()'s first argument is always a quic_conn */
+ .default_cb = quic_trace,
+ .known_events = quic_trace_events,
+ .lockon_args = quic_trace_lockon_args,
+ .decoding = quic_trace_decoding,
+ .report_events = ~0, /* report everything by default */
+};
+
+INITCALL1(STG_REGISTER, trace_register_source, TRACE_SOURCE);
+
+/* Trace callback for QUIC.
+ * These traces always expect that arg1, if non-null, is of type connection.
+ */
+static void quic_trace(enum trace_level level, uint64_t mask, const struct trace_source *src,
+ const struct ist where, const struct ist func,
+ const void *a1, const void *a2, const void *a3, const void *a4)
+{
+ const struct quic_conn *qc = a1;
+
+ if (qc) {
+ const struct quic_tls_ctx *tls_ctx;
+
+ chunk_appendf(&trace_buf, " : qc@%p idle_timer_task@%p flags=0x%x",
+ qc, qc->idle_timer_task, qc->flags);
+ if (mask & QUIC_EV_CONN_INIT) {
+ chunk_appendf(&trace_buf, "\n odcid");
+ quic_cid_dump(&trace_buf, &qc->odcid);
+ chunk_appendf(&trace_buf, "\n dcid");
+ quic_cid_dump(&trace_buf, &qc->dcid);
+ chunk_appendf(&trace_buf, "\n scid");
+ quic_cid_dump(&trace_buf, &qc->scid);
+ }
+
+ if (mask & QUIC_EV_TRANSP_PARAMS) {
+ const struct quic_transport_params *p = a2;
+
+ if (p)
+ quic_transport_params_dump(&trace_buf, qc, p);
+ }
+
+ if (mask & QUIC_EV_CONN_ADDDATA) {
+ const enum ssl_encryption_level_t *level = a2;
+ const size_t *len = a3;
+
+ if (level) {
+ enum quic_tls_enc_level lvl = ssl_to_quic_enc_level(*level);
+
+ chunk_appendf(&trace_buf, " el=%c(%d)", quic_enc_level_char(lvl), lvl);
+ }
+ if (len)
+ chunk_appendf(&trace_buf, " len=%llu", (unsigned long long)*len);
+ }
+ if ((mask & QUIC_EV_CONN_ISEC) && qc) {
+ /* Initial read & write secrets. */
+ const unsigned char *rx_sec = a2;
+ const unsigned char *tx_sec = a3;
+
+ tls_ctx = &qc->iel->tls_ctx;
+ chunk_appendf(&trace_buf, "\n RX el=I");
+ if (rx_sec)
+ quic_tls_secret_hexdump(&trace_buf, rx_sec, 32);
+ quic_tls_keys_hexdump(&trace_buf, &tls_ctx->rx);
+ chunk_appendf(&trace_buf, "\n TX el=I");
+ if (tx_sec)
+ quic_tls_secret_hexdump(&trace_buf, tx_sec, 32);
+ quic_tls_keys_hexdump(&trace_buf, &tls_ctx->tx);
+ }
+
+ if ((mask & QUIC_EV_CONN_KP) && qc) {
+ /* Initial read & write secrets. */
+ const struct quic_kp_trace *kp = a2;
+
+ if (kp) {
+ if (kp->rx) {
+ chunk_appendf(&trace_buf, "\n RX kp");
+ if (kp->rx_sec)
+ quic_tls_secret_hexdump(&trace_buf, kp->rx_sec, kp->rx_seclen);
+ quic_tls_kp_keys_hexdump(&trace_buf, kp->rx);
+ }
+ if (kp->tx) {
+ chunk_appendf(&trace_buf, "\n TX kp");
+ if (kp->tx_sec)
+ quic_tls_secret_hexdump(&trace_buf, kp->tx_sec, kp->tx_seclen);
+ quic_tls_kp_keys_hexdump(&trace_buf, kp->tx);
+ }
+ }
+ }
+
+ if (mask & (QUIC_EV_CONN_RSEC|QUIC_EV_CONN_RWSEC)) {
+ const enum ssl_encryption_level_t *level = a2;
+
+ if (level) {
+ enum quic_tls_enc_level lvl = ssl_to_quic_enc_level(*level);
+ struct quic_enc_level *qel = qc_quic_enc_level(qc, lvl);
+
+ chunk_appendf(&trace_buf, "\n RX el=%c", quic_enc_level_char(lvl));
+ if (quic_tls_has_rx_sec(qel))
+ quic_tls_keys_hexdump(&trace_buf, &qel->tls_ctx.rx);
+ else
+ chunk_appendf(&trace_buf, " (none)");
+ }
+ }
+
+ if (mask & (QUIC_EV_CONN_WSEC|QUIC_EV_CONN_RWSEC)) {
+ const enum ssl_encryption_level_t *level = a2;
+
+ if (level) {
+ enum quic_tls_enc_level lvl = ssl_to_quic_enc_level(*level);
+ struct quic_enc_level *qel = qc_quic_enc_level(qc, lvl);
+
+ chunk_appendf(&trace_buf, "\n TX el=%c", quic_enc_level_char(lvl));
+ if (quic_tls_has_tx_sec(qel)) {
+ quic_tls_keys_hexdump(&trace_buf, &qel->tls_ctx.tx);
+ }
+ else
+ chunk_appendf(&trace_buf, " (none)");
+ }
+
+ }
+
+ if (mask & QUIC_EV_CONN_FRMLIST) {
+ const struct list *l = a2;
+
+ if (l) {
+ const struct quic_frame *frm;
+ list_for_each_entry(frm, l, list) {
+ chunk_appendf(&trace_buf, " frm@%p", frm);
+ chunk_frm_appendf(&trace_buf, frm);
+ }
+ }
+ }
+
+ if (mask & (QUIC_EV_CONN_TXPKT|QUIC_EV_CONN_PAPKT)) {
+ const struct quic_tx_packet *pkt = a2;
+ const struct quic_enc_level *qel = a3;
+ const ssize_t *room = a4;
+
+ if (qel) {
+ const struct quic_pktns *pktns = qel->pktns;
+ chunk_appendf(&trace_buf, " qel=%c flags=0x%x pto_count=%d cwnd=%llu ppif=%lld pif=%llu "
+ "if=%llu pp=%u",
+ quic_enc_level_char_from_qel(qel, qc),
+ qel->pktns->flags,
+ qc->path->loss.pto_count,
+ (unsigned long long)qc->path->cwnd,
+ (unsigned long long)qc->path->prep_in_flight,
+ (unsigned long long)qc->path->in_flight,
+ (unsigned long long)pktns->tx.in_flight,
+ pktns->tx.pto_probe);
+ }
+ if (pkt) {
+ const struct quic_frame *frm;
+ if (pkt->pn_node.key != (uint64_t)-1)
+ chunk_appendf(&trace_buf, " pn=%llu",(ull)pkt->pn_node.key);
+ list_for_each_entry(frm, &pkt->frms, list) {
+ chunk_appendf(&trace_buf, " frm@%p", frm);
+ chunk_frm_appendf(&trace_buf, frm);
+ }
+ }
+
+ if (room) {
+ chunk_appendf(&trace_buf, " room=%lld", (long long)*room);
+ chunk_appendf(&trace_buf, " dcid.len=%llu scid.len=%llu",
+ (unsigned long long)qc->dcid.len, (unsigned long long)qc->scid.len);
+ }
+ }
+
+ if (mask & QUIC_EV_CONN_IO_CB) {
+ const enum quic_handshake_state *state = a2;
+
+ if (state)
+ chunk_appendf(&trace_buf, " state=%s", quic_hdshk_state_str(*state));
+ }
+
+ if (mask & (QUIC_EV_CONN_TRMHP|QUIC_EV_CONN_ELRMHP|QUIC_EV_CONN_SPKT)) {
+ const struct quic_rx_packet *pkt = a2;
+ const unsigned long *pktlen = a3;
+ const SSL *ssl = a4;
+
+ if (pkt) {
+ chunk_appendf(&trace_buf, " pkt@%p", pkt);
+ if (pkt->type == QUIC_PACKET_TYPE_SHORT && pkt->data)
+ chunk_appendf(&trace_buf, " kp=%d",
+ !!(*pkt->data & QUIC_PACKET_KEY_PHASE_BIT));
+ chunk_appendf(&trace_buf, " el=%c",
+ quic_packet_type_enc_level_char(pkt->type));
+ if (pkt->pnl)
+ chunk_appendf(&trace_buf, " pnl=%u pn=%llu", pkt->pnl,
+ (unsigned long long)pkt->pn);
+ if (pkt->token_len)
+ chunk_appendf(&trace_buf, " toklen=%llu",
+ (unsigned long long)pkt->token_len);
+ if (pkt->aad_len)
+ chunk_appendf(&trace_buf, " aadlen=%llu",
+ (unsigned long long)pkt->aad_len);
+ chunk_appendf(&trace_buf, " flags=0x%x len=%llu",
+ pkt->flags, (unsigned long long)pkt->len);
+ }
+ if (pktlen)
+ chunk_appendf(&trace_buf, " (%ld)", *pktlen);
+ if (ssl) {
+ enum ssl_encryption_level_t level = SSL_quic_read_level(ssl);
+ chunk_appendf(&trace_buf, " el=%c",
+ quic_enc_level_char(ssl_to_quic_enc_level(level)));
+ }
+ }
+
+ if (mask & (QUIC_EV_CONN_RXPKT|QUIC_EV_CONN_PRSHPKT|QUIC_EV_CONN_SSLDATA)) {
+ const struct quic_rx_packet *pkt = a2;
+ const struct quic_rx_crypto_frm *cf = a3;
+ const SSL *ssl = a4;
+
+ if (pkt)
+ chunk_appendf(&trace_buf, " pkt@%p el=%c pn=%llu", pkt,
+ quic_packet_type_enc_level_char(pkt->type),
+ (unsigned long long)pkt->pn);
+ if (cf)
+ chunk_appendf(&trace_buf, " cfoff=%llu cflen=%llu",
+ (unsigned long long)cf->offset_node.key,
+ (unsigned long long)cf->len);
+ if (ssl) {
+ enum ssl_encryption_level_t level = SSL_quic_read_level(ssl);
+ chunk_appendf(&trace_buf, " rel=%c",
+ quic_enc_level_char(ssl_to_quic_enc_level(level)));
+ }
+
+ if (qc->err.code)
+ chunk_appendf(&trace_buf, " err_code=0x%llx", (ull)qc->err.code);
+ }
+
+ if (mask & (QUIC_EV_CONN_PRSFRM|QUIC_EV_CONN_BFRM)) {
+ const struct quic_frame *frm = a2;
+
+ if (frm)
+ chunk_appendf(&trace_buf, " %s", quic_frame_type_string(frm->type));
+ }
+
+ if (mask & QUIC_EV_CONN_PHPKTS) {
+ const struct quic_enc_level *qel = a2;
+ const struct list *l = a3;
+
+ if (qel) {
+ const struct quic_pktns *pktns = qel->pktns;
+ chunk_appendf(&trace_buf,
+ " qel=%c flags=0x%x state=%s ack?%d pto_count=%d cwnd=%llu "
+ "ppif=%lld pif=%llu if=%llu pp=%u off=%llu",
+ quic_enc_level_char_from_qel(qel, qc),
+ qel->pktns->flags,
+ quic_hdshk_state_str(qc->state),
+ !!(qel->pktns->flags & QUIC_FL_PKTNS_ACK_REQUIRED),
+ qc->path->loss.pto_count,
+ (unsigned long long)qc->path->cwnd,
+ (unsigned long long)qc->path->prep_in_flight,
+ (unsigned long long)qc->path->in_flight,
+ (unsigned long long)pktns->tx.in_flight,
+ pktns->tx.pto_probe,
+ qel->cstream ? (unsigned long long)qel->cstream->rx.offset : 0);
+ }
+
+ if (l) {
+ const struct quic_frame *frm;
+ list_for_each_entry(frm, l, list) {
+ chunk_appendf(&trace_buf, " frm@%p", frm);
+ chunk_frm_appendf(&trace_buf, frm);
+ }
+ }
+ }
+
+ if (mask & QUIC_EV_CONN_ENCPKT) {
+ const struct enc_debug_info *edi = a2;
+
+ if (edi)
+ chunk_appendf(&trace_buf,
+ " payload=@%p payload_len=%llu"
+ " aad=@%p aad_len=%llu pn=%llu",
+ edi->payload, (unsigned long long)edi->payload_len,
+ edi->aad, (unsigned long long)edi->aad_len,
+ (unsigned long long)edi->pn);
+ }
+
+ if (mask & QUIC_EV_CONN_RMHP) {
+ const struct quic_rx_packet *pkt = a2;
+
+ if (pkt) {
+ const int *ret = a3;
+
+ chunk_appendf(&trace_buf, " pkt@%p", pkt);
+ if (ret && *ret)
+ chunk_appendf(&trace_buf, " pnl=%u pn=%llu",
+ pkt->pnl, (unsigned long long)pkt->pn);
+ }
+ }
+
+ if (mask & QUIC_EV_CONN_PRSAFRM) {
+ const struct quic_frame *frm = a2;
+ const unsigned long *val1 = a3;
+ const unsigned long *val2 = a4;
+
+ if (frm) {
+ chunk_appendf(&trace_buf, " frm@%p", frm);
+ chunk_frm_appendf(&trace_buf, frm);
+ }
+ if (val1)
+ chunk_appendf(&trace_buf, " %lu", *val1);
+ if (val2)
+ chunk_appendf(&trace_buf, "..%lu", *val2);
+ }
+
+ if (mask & QUIC_EV_CONN_ACKSTRM) {
+ const struct qf_stream *strm_frm = a2;
+ const struct qc_stream_desc *stream = a3;
+
+ if (strm_frm)
+ chunk_appendf(&trace_buf, " off=%llu len=%llu", (ull)strm_frm->offset.key, (ull)strm_frm->len);
+ if (stream)
+ chunk_appendf(&trace_buf, " ack_offset=%llu", (ull)stream->ack_offset);
+ }
+
+ if (mask & QUIC_EV_CONN_RTTUPDT) {
+ const unsigned int *rtt_sample = a2;
+ const unsigned int *ack_delay = a3;
+ const struct quic_loss *ql = a4;
+
+ if (rtt_sample)
+ chunk_appendf(&trace_buf, " rtt_sample=%ums", *rtt_sample);
+ if (ack_delay)
+ chunk_appendf(&trace_buf, " ack_delay=%ums", *ack_delay);
+ if (ql)
+ chunk_appendf(&trace_buf,
+ " srtt=%ums rttvar=%ums min_rtt=%ums",
+ ql->srtt, ql->rtt_var, ql->rtt_min);
+ }
+ if (mask & QUIC_EV_CONN_CC) {
+ const struct quic_cc_event *ev = a2;
+ const struct quic_cc *cc = a3;
+
+ if (a2)
+ quic_cc_event_trace(&trace_buf, ev);
+ if (a3)
+ quic_cc_state_trace(&trace_buf, cc);
+ }
+
+ if (mask & QUIC_EV_CONN_PKTLOSS) {
+ const struct quic_pktns *pktns = a2;
+ const struct list *lost_pkts = a3;
+
+ if (pktns) {
+ chunk_appendf(&trace_buf, " pktns=%c", quic_pktns_char(qc, pktns));
+ if (pktns->tx.loss_time)
+ chunk_appendf(&trace_buf, " loss_time=%dms",
+ TICKS_TO_MS(tick_remain(now_ms, pktns->tx.loss_time)));
+ }
+ if (lost_pkts && !LIST_ISEMPTY(lost_pkts)) {
+ struct quic_tx_packet *pkt;
+
+ chunk_appendf(&trace_buf, " lost_pkts:");
+ list_for_each_entry(pkt, lost_pkts, list)
+ chunk_appendf(&trace_buf, " %lu", (unsigned long)pkt->pn_node.key);
+ }
+ }
+
+ if (mask & (QUIC_EV_CONN_STIMER|QUIC_EV_CONN_PTIMER|QUIC_EV_CONN_SPTO)) {
+ const struct quic_pktns *pktns = a2;
+ const int *duration = a3;
+ const uint64_t *ifae_pkts = a4;
+
+ if (ifae_pkts)
+ chunk_appendf(&trace_buf, " ifae_pkts=%llu",
+ (unsigned long long)*ifae_pkts);
+ if (pktns) {
+ chunk_appendf(&trace_buf, " pktns=%c pp=%d",
+ quic_pktns_char(qc, pktns),
+ pktns->tx.pto_probe);
+ if (mask & (QUIC_EV_CONN_STIMER|QUIC_EV_CONN_SPTO)) {
+ if (pktns->tx.in_flight)
+ chunk_appendf(&trace_buf, " if=%llu", (ull)pktns->tx.in_flight);
+ if (pktns->tx.loss_time)
+ chunk_appendf(&trace_buf, " loss_time=%dms",
+ TICKS_TO_MS(pktns->tx.loss_time - now_ms));
+ }
+ if (mask & QUIC_EV_CONN_SPTO) {
+ if (pktns->tx.time_of_last_eliciting)
+ chunk_appendf(&trace_buf, " tole=%dms",
+ TICKS_TO_MS(pktns->tx.time_of_last_eliciting - now_ms));
+ if (duration)
+ chunk_appendf(&trace_buf, " dur=%dms", TICKS_TO_MS(*duration));
+ }
+ }
+
+ if (!(mask & (QUIC_EV_CONN_SPTO|QUIC_EV_CONN_PTIMER)) && qc->timer_task) {
+ chunk_appendf(&trace_buf,
+ " expire=%dms", TICKS_TO_MS(qc->timer - now_ms));
+ }
+ }
+
+ if (mask & QUIC_EV_CONN_SPPKTS) {
+ const struct quic_tx_packet *pkt = a2;
+
+ chunk_appendf(&trace_buf, " pto_count=%d cwnd=%llu ppif=%llu pif=%llu",
+ qc->path->loss.pto_count,
+ (unsigned long long)qc->path->cwnd,
+ (unsigned long long)qc->path->prep_in_flight,
+ (unsigned long long)qc->path->in_flight);
+ if (pkt) {
+ const struct quic_frame *frm;
+ if (pkt->flags & QUIC_FL_TX_PACKET_ACK)
+ chunk_appendf(&trace_buf, " ack");
+ chunk_appendf(&trace_buf, " pn=%lu(%c) iflen=%llu",
+ (unsigned long)pkt->pn_node.key,
+ quic_pktns_char(qc, pkt->pktns),
+ (unsigned long long)pkt->in_flight_len);
+ chunk_appendf(&trace_buf, " bytes.rx=%llu bytes.tx=%llu",
+ (unsigned long long)qc->bytes.rx,
+ (unsigned long long)qc->bytes.tx);
+ list_for_each_entry(frm, &pkt->frms, list) {
+ chunk_appendf(&trace_buf, " frm@%p", frm);
+ chunk_frm_appendf(&trace_buf, frm);
+ }
+
+ if (pkt->type == QUIC_PACKET_TYPE_INITIAL) {
+ chunk_appendf(&trace_buf, " with scid");
+ quic_cid_dump(&trace_buf, &qc->scid);
+ }
+ }
+ }
+
+ if (mask & QUIC_EV_CONN_SSLALERT) {
+ const uint8_t *alert = a2;
+ const enum ssl_encryption_level_t *level = a3;
+
+ if (alert)
+ chunk_appendf(&trace_buf, " alert=0x%02x", *alert);
+ if (level)
+ chunk_appendf(&trace_buf, " el=%c",
+ quic_enc_level_char(ssl_to_quic_enc_level(*level)));
+ }
+
+ if (mask & QUIC_EV_CONN_BCFRMS) {
+ const size_t *sz1 = a2;
+ const size_t *sz2 = a3;
+ const size_t *sz3 = a4;
+
+ if (sz1)
+ chunk_appendf(&trace_buf, " %llu", (unsigned long long)*sz1);
+ if (sz2)
+ chunk_appendf(&trace_buf, " %llu", (unsigned long long)*sz2);
+ if (sz3)
+ chunk_appendf(&trace_buf, " %llu", (unsigned long long)*sz3);
+ }
+
+ if (mask & QUIC_EV_CONN_PSTRM) {
+ const struct quic_frame *frm = a2;
+
+ if (frm)
+ chunk_frm_appendf(&trace_buf, frm);
+ }
+
+ if (mask & QUIC_EV_CONN_ELEVELSEL) {
+ const enum quic_handshake_state *state = a2;
+ const enum quic_tls_enc_level *level = a3;
+ const enum quic_tls_enc_level *next_level = a4;
+
+ if (state)
+ chunk_appendf(&trace_buf, " state=%s", quic_hdshk_state_str(qc->state));
+ if (level)
+ chunk_appendf(&trace_buf, " level=%c", quic_enc_level_char(*level));
+ if (next_level)
+ chunk_appendf(&trace_buf, " next_level=%c", quic_enc_level_char(*next_level));
+
+ }
+
+ if (mask & QUIC_EV_CONN_IDLE_TIMER) {
+ if (tick_isset(qc->ack_expire))
+ chunk_appendf(&trace_buf, " ack_expire=%ums",
+ TICKS_TO_MS(tick_remain(now_ms, qc->ack_expire)));
+ if (tick_isset(qc->idle_expire))
+ chunk_appendf(&trace_buf, " idle_expire=%ums",
+ TICKS_TO_MS(tick_remain(now_ms, qc->idle_expire)));
+ if (qc->idle_timer_task && tick_isset(qc->idle_timer_task->expire))
+ chunk_appendf(&trace_buf, " expire=%ums",
+ TICKS_TO_MS(tick_remain(now_ms, qc->idle_timer_task->expire)));
+ }
+ }
+
+ if (mask & QUIC_EV_CONN_RCV) {
+ int i;
+ const struct quic_dgram *dgram = a2;
+ char bufaddr[INET6_ADDRSTRLEN], bufport[6];
+
+ if (qc) {
+ addr_to_str(&qc->peer_addr, bufaddr, sizeof(bufaddr));
+ port_to_str(&qc->peer_addr, bufport, sizeof(bufport));
+ chunk_appendf(&trace_buf, " peer_addr=%s:%s ", bufaddr, bufport);
+ }
+
+ if (dgram) {
+ chunk_appendf(&trace_buf, " dgram.len=%zu", dgram->len);
+ /* Socket */
+ if (dgram->saddr.ss_family == AF_INET ||
+ dgram->saddr.ss_family == AF_INET6) {
+ addr_to_str(&dgram->saddr, bufaddr, sizeof(bufaddr));
+ port_to_str(&dgram->saddr, bufport, sizeof(bufport));
+ chunk_appendf(&trace_buf, "saddr=%s:%s ", bufaddr, bufport);
+
+ addr_to_str(&dgram->daddr, bufaddr, sizeof(bufaddr));
+ port_to_str(&dgram->daddr, bufport, sizeof(bufport));
+ chunk_appendf(&trace_buf, "daddr=%s:%s ", bufaddr, bufport);
+ }
+ /* DCID */
+ for (i = 0; i < dgram->dcid_len; ++i)
+ chunk_appendf(&trace_buf, "%02x", dgram->dcid[i]);
+
+ }
+ }
+
+ if (mask & QUIC_EV_CONN_LPKT) {
+ const struct quic_rx_packet *pkt = a2;
+ const uint64_t *len = a3;
+ const struct quic_version *ver = a4;
+
+ if (pkt) {
+ chunk_appendf(&trace_buf, " pkt@%p type=0x%02x %s",
+ pkt, pkt->type, qc_pkt_long(pkt) ? "long" : "short");
+ if (pkt->pn_node.key != (uint64_t)-1)
+ chunk_appendf(&trace_buf, " pn=%llu", pkt->pn_node.key);
+ }
+
+ if (len)
+ chunk_appendf(&trace_buf, " len=%llu", (ull)*len);
+
+ if (ver)
+ chunk_appendf(&trace_buf, " ver=0x%08x", ver->num);
+ }
+
+ if (mask & QUIC_EV_STATELESS_RST) {
+ const struct quic_cid *cid = a2;
+
+ if (cid)
+ quic_cid_dump(&trace_buf, cid);
+ }
+
+}
diff --git a/src/quic_tx.c b/src/quic_tx.c
new file mode 100644
index 0000000..306b4c2
--- /dev/null
+++ b/src/quic_tx.c
@@ -0,0 +1,2348 @@
+/*
+ * QUIC protocol implementation. Lower layer with internal features implemented
+ * here such as QUIC encryption, idle timeout, acknowledgement and
+ * retransmission.
+ *
+ * Copyright 2020 HAProxy Technologies, Frederic Lecaille <flecaille@haproxy.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <haproxy/quic_tx.h>
+
+#include <haproxy/pool.h>
+#include <haproxy/trace.h>
+#include <haproxy/quic_cid.h>
+#include <haproxy/quic_conn.h>
+#include <haproxy/quic_retransmit.h>
+#include <haproxy/quic_retry.h>
+#include <haproxy/quic_sock.h>
+#include <haproxy/quic_tls.h>
+#include <haproxy/quic_trace.h>
+#include <haproxy/ssl_sock-t.h>
+
+DECLARE_POOL(pool_head_quic_tx_packet, "quic_tx_packet", sizeof(struct quic_tx_packet));
+DECLARE_POOL(pool_head_quic_cc_buf, "quic_cc_buf", QUIC_MAX_CC_BUFSIZE);
+
+static struct quic_tx_packet *qc_build_pkt(unsigned char **pos, const unsigned char *buf_end,
+ struct quic_enc_level *qel, struct quic_tls_ctx *ctx,
+ struct list *frms, struct quic_conn *qc,
+ const struct quic_version *ver, size_t dglen, int pkt_type,
+ int must_ack, int padding, int probe, int cc, int *err);
+
+static void quic_packet_encrypt(unsigned char *payload, size_t payload_len,
+ unsigned char *aad, size_t aad_len, uint64_t pn,
+ struct quic_tls_ctx *tls_ctx, struct quic_conn *qc,
+ int *fail)
+{
+ unsigned char iv[QUIC_TLS_IV_LEN];
+ unsigned char *tx_iv = tls_ctx->tx.iv;
+ size_t tx_iv_sz = tls_ctx->tx.ivlen;
+ struct enc_debug_info edi;
+
+ TRACE_ENTER(QUIC_EV_CONN_ENCPKT, qc);
+ *fail = 0;
+
+ quic_aead_iv_build(iv, sizeof iv, tx_iv, tx_iv_sz, pn);
+
+ if (!quic_tls_encrypt(payload, payload_len, aad, aad_len,
+ tls_ctx->tx.ctx, tls_ctx->tx.aead, iv)) {
+ TRACE_ERROR("QUIC packet encryption failed", QUIC_EV_CONN_ENCPKT, qc);
+ *fail = 1;
+ enc_debug_info_init(&edi, payload, payload_len, aad, aad_len, pn);
+ }
+
+ TRACE_LEAVE(QUIC_EV_CONN_ENCPKT, qc);
+}
+
+/* Free <pkt> TX packet and its attached frames.
+ * This is the responsibility of the caller to remove this packet of
+ * any data structure it was possibly attached to.
+ */
+static inline void free_quic_tx_packet(struct quic_conn *qc,
+ struct quic_tx_packet *pkt)
+{
+ struct quic_frame *frm, *frmbak;
+
+ TRACE_ENTER(QUIC_EV_CONN_TXPKT, qc);
+
+ if (!pkt)
+ goto leave;
+
+ list_for_each_entry_safe(frm, frmbak, &pkt->frms, list)
+ qc_frm_free(qc, &frm);
+ pool_free(pool_head_quic_tx_packet, pkt);
+
+ leave:
+ TRACE_LEAVE(QUIC_EV_CONN_TXPKT, qc);
+}
+
+/* Allocate Tx buffer from <qc> quic-conn if needed.
+ *
+ * Returns allocated buffer or NULL on error.
+ */
+struct buffer *qc_txb_alloc(struct quic_conn *qc)
+{
+ struct buffer *buf = &qc->tx.buf;
+ if (!b_alloc(buf))
+ return NULL;
+
+ return buf;
+}
+
+/* Free Tx buffer from <qc> if it is empty. */
+void qc_txb_release(struct quic_conn *qc)
+{
+ struct buffer *buf = &qc->tx.buf;
+
+ /* For the moment sending function is responsible to purge the buffer
+ * entirely. It may change in the future but this requires to be able
+ * to reuse old data.
+ * For the moment we do not care to leave data in the buffer for
+ * a connection which is supposed to be killed asap.
+ */
+ BUG_ON_HOT(buf && b_data(buf));
+
+ if (!b_data(buf)) {
+ b_free(buf);
+ offer_buffers(NULL, 1);
+ }
+}
+
+/* Return the TX buffer dedicated to the "connection close" datagram to be built
+ * if an immediate close is required after having allocated it or directly
+ * allocate a TX buffer if an immediate close is not required.
+ */
+struct buffer *qc_get_txb(struct quic_conn *qc)
+{
+ struct buffer *buf;
+
+ if (qc->flags & QUIC_FL_CONN_IMMEDIATE_CLOSE) {
+ TRACE_PROTO("Immediate close required", QUIC_EV_CONN_PHPKTS, qc);
+ buf = &qc->tx.cc_buf;
+ if (b_is_null(buf)) {
+ qc->tx.cc_buf_area = pool_alloc(pool_head_quic_cc_buf);
+ if (!qc->tx.cc_buf_area)
+ goto err;
+ }
+
+ /* In every case, initialize ->tx.cc_buf */
+ qc->tx.cc_buf = b_make(qc->tx.cc_buf_area, QUIC_MAX_CC_BUFSIZE, 0, 0);
+ }
+ else {
+ buf = qc_txb_alloc(qc);
+ if (!buf)
+ goto err;
+ }
+
+ return buf;
+ err:
+ return NULL;
+}
+
+/* Commit a datagram payload written into <buf> of length <length>. <first_pkt>
+ * must contains the address of the first packet stored in the payload.
+ *
+ * Caller is responsible that there is enough space in the buffer.
+ */
+static void qc_txb_store(struct buffer *buf, uint16_t length,
+ struct quic_tx_packet *first_pkt)
+{
+ const size_t hdlen = sizeof(uint16_t) + sizeof(void *);
+ BUG_ON_HOT(b_contig_space(buf) < hdlen); /* this must not happen */
+
+ write_u16(b_tail(buf), length);
+ write_ptr(b_tail(buf) + sizeof(length), first_pkt);
+ b_add(buf, hdlen + length);
+}
+
+/* Returns 1 if a packet may be built for <qc> from <qel> encryption level
+ * with <frms> as ack-eliciting frame list to send, 0 if not.
+ * <cc> must equal to 1 if an immediate close was asked, 0 if not.
+ * <probe> must equalt to 1 if a probing packet is required, 0 if not.
+ * Also set <*must_ack> to inform the caller if an acknowledgement should be sent.
+ */
+static int qc_may_build_pkt(struct quic_conn *qc, struct list *frms,
+ struct quic_enc_level *qel, int cc, int probe,
+ int *must_ack)
+{
+ int force_ack = qel == qc->iel || qel == qc->hel;
+ int nb_aepkts_since_last_ack = qel->pktns->rx.nb_aepkts_since_last_ack;
+
+ /* An acknowledgement must be sent if this has been forced by the caller,
+ * typically during the handshake when the packets must be acknowledged as
+ * soon as possible. This is also the case when the ack delay timer has been
+ * triggered, or at least every QUIC_MAX_RX_AEPKTS_SINCE_LAST_ACK packets.
+ */
+ *must_ack = (qc->flags & QUIC_FL_CONN_ACK_TIMER_FIRED) ||
+ ((qel->pktns->flags & QUIC_FL_PKTNS_ACK_REQUIRED) &&
+ (force_ack || nb_aepkts_since_last_ack >= QUIC_MAX_RX_AEPKTS_SINCE_LAST_ACK));
+
+ TRACE_PRINTF(TRACE_LEVEL_DEVELOPER, QUIC_EV_CONN_PHPKTS, qc, 0, 0, 0,
+ "has_sec=%d cc=%d probe=%d must_ack=%d frms=%d prep_in_fligh=%llu cwnd=%llu",
+ quic_tls_has_tx_sec(qel), cc, probe, *must_ack, LIST_ISEMPTY(frms),
+ (ullong)qc->path->prep_in_flight, (ullong)qc->path->cwnd);
+
+ /* Do not build any more packet if the TX secrets are not available or
+ * if there is nothing to send, i.e. if no CONNECTION_CLOSE or ACK are required
+ * and if there is no more packets to send upon PTO expiration
+ * and if there is no more ack-eliciting frames to send or in flight
+ * congestion control limit is reached for prepared data
+ */
+ if (!quic_tls_has_tx_sec(qel) ||
+ (!cc && !probe && !*must_ack &&
+ (LIST_ISEMPTY(frms) || qc->path->prep_in_flight >= qc->path->cwnd))) {
+ return 0;
+ }
+
+ return 1;
+}
+
+/* Prepare as much as possible QUIC packets for sending from prebuilt frames
+ * <frms>. Each packet is stored in a distinct datagram written to <buf>.
+ *
+ * Each datagram is prepended by a two fields header : the datagram length and
+ * the address of the packet contained in the datagram.
+ *
+ * Returns the number of bytes prepared in packets if succeeded (may be 0), or
+ * -1 if something wrong happened.
+ */
+static int qc_prep_app_pkts(struct quic_conn *qc, struct buffer *buf,
+ struct list *frms)
+{
+ int ret = -1, cc;
+ struct quic_enc_level *qel;
+ unsigned char *end, *pos;
+ struct quic_tx_packet *pkt;
+ size_t total;
+
+ TRACE_ENTER(QUIC_EV_CONN_PHPKTS, qc);
+
+ qel = qc->ael;
+ total = 0;
+ pos = (unsigned char *)b_tail(buf);
+ cc = qc->flags & QUIC_FL_CONN_IMMEDIATE_CLOSE;
+ /* Each datagram is prepended with its length followed by the address
+ * of the first packet in the datagram (QUIC_DGRAM_HEADLEN).
+ */
+ while ((!cc && b_contig_space(buf) >= (int)qc->path->mtu + QUIC_DGRAM_HEADLEN) ||
+ (cc && b_contig_space(buf) >= QUIC_MIN_CC_PKTSIZE + QUIC_DGRAM_HEADLEN)) {
+ int err, probe, must_ack;
+
+ TRACE_PROTO("TX prep app pkts", QUIC_EV_CONN_PHPKTS, qc, qel, frms);
+ probe = 0;
+ /* We do not probe if an immediate close was asked */
+ if (!cc)
+ probe = qel->pktns->tx.pto_probe;
+
+ if (!qc_may_build_pkt(qc, frms, qel, cc, probe, &must_ack))
+ break;
+
+ /* Leave room for the datagram header */
+ pos += QUIC_DGRAM_HEADLEN;
+ if (cc) {
+ end = pos + QUIC_MIN_CC_PKTSIZE;
+ }
+ else if (!quic_peer_validated_addr(qc) && qc_is_listener(qc)) {
+ end = pos + QUIC_MIN(qc->path->mtu, quic_may_send_bytes(qc));
+ }
+ else {
+ end = pos + qc->path->mtu;
+ }
+
+ pkt = qc_build_pkt(&pos, end, qel, &qel->tls_ctx, frms, qc, NULL, 0,
+ QUIC_PACKET_TYPE_SHORT, must_ack, 0, probe, cc, &err);
+ switch (err) {
+ case -3:
+ qc_purge_txbuf(qc, buf);
+ goto leave;
+ case -2:
+ // trace already emitted by function above
+ goto leave;
+ case -1:
+ /* As we provide qc_build_pkt() with an enough big buffer to fulfill an
+ * MTU, we are here because of the congestion control window. There is
+ * no need to try to reuse this buffer.
+ */
+ TRACE_PROTO("could not prepare anymore packet", QUIC_EV_CONN_PHPKTS, qc, qel);
+ goto out;
+ default:
+ break;
+ }
+
+ /* This is to please to GCC. We cannot have (err >= 0 && !pkt) */
+ BUG_ON(!pkt);
+
+ if (qc->flags & QUIC_FL_CONN_RETRANS_OLD_DATA)
+ pkt->flags |= QUIC_FL_TX_PACKET_PROBE_WITH_OLD_DATA;
+
+ total += pkt->len;
+
+ /* Write datagram header. */
+ qc_txb_store(buf, pkt->len, pkt);
+ /* Build only one datagram when an immediate close is required. */
+ if (cc)
+ break;
+ }
+
+ out:
+ if (total && cc) {
+ BUG_ON(buf != &qc->tx.cc_buf);
+ qc->tx.cc_dgram_len = total;
+ }
+ ret = total;
+ leave:
+ TRACE_LEAVE(QUIC_EV_CONN_PHPKTS, qc);
+ return ret;
+}
+
+/* Free all frames in <l> list. In addition also remove all these frames
+ * from the original ones if they are the results of duplications.
+ */
+static inline void qc_free_frm_list(struct quic_conn *qc, struct list *l)
+{
+ struct quic_frame *frm, *frmbak;
+
+ TRACE_ENTER(QUIC_EV_CONN_TXPKT, qc);
+
+ list_for_each_entry_safe(frm, frmbak, l, list) {
+ LIST_DEL_INIT(&frm->ref);
+ qc_frm_free(qc, &frm);
+ }
+
+ TRACE_LEAVE(QUIC_EV_CONN_TXPKT, qc);
+}
+
+/* Free <pkt> TX packet and all the packets coalesced to it. */
+static inline void qc_free_tx_coalesced_pkts(struct quic_conn *qc,
+ struct quic_tx_packet *p)
+{
+ struct quic_tx_packet *pkt, *nxt_pkt;
+
+ TRACE_ENTER(QUIC_EV_CONN_TXPKT, qc);
+
+ for (pkt = p; pkt; pkt = nxt_pkt) {
+ qc_free_frm_list(qc, &pkt->frms);
+ nxt_pkt = pkt->next;
+ pool_free(pool_head_quic_tx_packet, pkt);
+ }
+
+ TRACE_LEAVE(QUIC_EV_CONN_TXPKT, qc);
+}
+
+/* Purge <buf> TX buffer from its prepare packets. */
+static void qc_purge_tx_buf(struct quic_conn *qc, struct buffer *buf)
+{
+ while (b_contig_data(buf, 0)) {
+ uint16_t dglen;
+ struct quic_tx_packet *pkt;
+ size_t headlen = sizeof dglen + sizeof pkt;
+
+ dglen = read_u16(b_head(buf));
+ pkt = read_ptr(b_head(buf) + sizeof dglen);
+ qc_free_tx_coalesced_pkts(qc, pkt);
+ b_del(buf, dglen + headlen);
+ }
+
+ BUG_ON(b_data(buf));
+}
+
+/* Send datagrams stored in <buf>.
+ *
+ * This function returns 1 for success. On error, there is several behavior
+ * depending on underlying sendto() error :
+ * - for an unrecoverable error, 0 is returned and connection is killed.
+ * - a transient error is handled differently if connection has its owned
+ * socket. If this is the case, 0 is returned and socket is subscribed on the
+ * poller. The other case is assimilated to a success case with 1 returned.
+ * Remaining data are purged from the buffer and will eventually be detected
+ * as lost which gives the opportunity to retry sending.
+ */
+int qc_send_ppkts(struct buffer *buf, struct ssl_sock_ctx *ctx)
+{
+ int ret = 0;
+ struct quic_conn *qc;
+ char skip_sendto = 0;
+
+ qc = ctx->qc;
+ TRACE_ENTER(QUIC_EV_CONN_SPPKTS, qc);
+ while (b_contig_data(buf, 0)) {
+ unsigned char *pos;
+ struct buffer tmpbuf = { };
+ struct quic_tx_packet *first_pkt, *pkt, *next_pkt;
+ uint16_t dglen;
+ size_t headlen = sizeof dglen + sizeof first_pkt;
+ unsigned int time_sent;
+
+ pos = (unsigned char *)b_head(buf);
+ dglen = read_u16(pos);
+ BUG_ON_HOT(!dglen); /* this should not happen */
+
+ pos += sizeof dglen;
+ first_pkt = read_ptr(pos);
+ pos += sizeof first_pkt;
+ tmpbuf.area = (char *)pos;
+ tmpbuf.size = tmpbuf.data = dglen;
+
+ TRACE_PROTO("TX dgram", QUIC_EV_CONN_SPPKTS, qc);
+ /* If sendto is on error just skip the call to it for the rest
+ * of the loop but continue to purge the buffer. Data will be
+ * transmitted when QUIC packets are detected as lost on our
+ * side.
+ *
+ * TODO use fd-monitoring to detect when send operation can be
+ * retry. This should improve the bandwidth without relying on
+ * retransmission timer. However, it requires a major rework on
+ * quic-conn fd management.
+ */
+ if (!skip_sendto) {
+ int ret = qc_snd_buf(qc, &tmpbuf, tmpbuf.data, 0);
+ if (ret < 0) {
+ TRACE_ERROR("sendto fatal error", QUIC_EV_CONN_SPPKTS, qc, first_pkt);
+ qc_kill_conn(qc);
+ qc_free_tx_coalesced_pkts(qc, first_pkt);
+ b_del(buf, dglen + headlen);
+ qc_purge_tx_buf(qc, buf);
+ goto leave;
+ }
+ else if (!ret) {
+ /* Connection owned socket : poller will wake us up when transient error is cleared. */
+ if (qc_test_fd(qc)) {
+ TRACE_ERROR("sendto error, subscribe to poller", QUIC_EV_CONN_SPPKTS, qc);
+ goto leave;
+ }
+
+ /* No connection owned-socket : rely on retransmission to retry sending. */
+ skip_sendto = 1;
+ TRACE_ERROR("sendto error, simulate sending for the rest of data", QUIC_EV_CONN_SPPKTS, qc);
+ }
+ }
+
+ b_del(buf, dglen + headlen);
+ qc->bytes.tx += tmpbuf.data;
+ time_sent = now_ms;
+
+ for (pkt = first_pkt; pkt; pkt = next_pkt) {
+ /* RFC 9000 14.1 Initial datagram size
+ * a server MUST expand the payload of all UDP datagrams carrying ack-eliciting
+ * Initial packets to at least the smallest allowed maximum datagram size of
+ * 1200 bytes.
+ */
+ qc->cntrs.sent_pkt++;
+ BUG_ON_HOT(pkt->type == QUIC_PACKET_TYPE_INITIAL &&
+ (pkt->flags & QUIC_FL_TX_PACKET_ACK_ELICITING) &&
+ dglen < QUIC_INITIAL_PACKET_MINLEN);
+
+ pkt->time_sent = time_sent;
+ if (pkt->flags & QUIC_FL_TX_PACKET_ACK_ELICITING) {
+ pkt->pktns->tx.time_of_last_eliciting = time_sent;
+ qc->path->ifae_pkts++;
+ if (qc->flags & QUIC_FL_CONN_IDLE_TIMER_RESTARTED_AFTER_READ)
+ qc_idle_timer_rearm(qc, 0, 0);
+ }
+ if (!(qc->flags & QUIC_FL_CONN_CLOSING) &&
+ (pkt->flags & QUIC_FL_TX_PACKET_CC)) {
+ qc->flags |= QUIC_FL_CONN_CLOSING;
+ qc_detach_th_ctx_list(qc, 1);
+
+ /* RFC 9000 10.2. Immediate Close:
+ * The closing and draining connection states exist to ensure
+ * that connections close cleanly and that delayed or reordered
+ * packets are properly discarded. These states SHOULD persist
+ * for at least three times the current PTO interval...
+ *
+ * Rearm the idle timeout only one time when entering closing
+ * state.
+ */
+ qc_idle_timer_do_rearm(qc, 0);
+ if (qc->timer_task) {
+ task_destroy(qc->timer_task);
+ qc->timer_task = NULL;
+ }
+ }
+ qc->path->in_flight += pkt->in_flight_len;
+ pkt->pktns->tx.in_flight += pkt->in_flight_len;
+ if (pkt->in_flight_len)
+ qc_set_timer(qc);
+ TRACE_PROTO("TX pkt", QUIC_EV_CONN_SPPKTS, qc, pkt);
+ next_pkt = pkt->next;
+ quic_tx_packet_refinc(pkt);
+ eb64_insert(&pkt->pktns->tx.pkts, &pkt->pn_node);
+ }
+ }
+
+ ret = 1;
+leave:
+ TRACE_LEAVE(QUIC_EV_CONN_SPPKTS, qc);
+
+ return ret;
+}
+
+/* Flush txbuf for <qc> connection. This must be called prior to a packet
+ * preparation when txbuf contains older data. A send will be conducted for
+ * these data.
+ *
+ * Returns 1 on success : buffer is empty and can be use for packet
+ * preparation. On error 0 is returned.
+ */
+int qc_purge_txbuf(struct quic_conn *qc, struct buffer *buf)
+{
+ TRACE_ENTER(QUIC_EV_CONN_TXPKT, qc);
+
+ /* This operation can only be conducted if txbuf is not empty. This
+ * case only happens for connection with their owned socket due to an
+ * older transient sendto() error.
+ */
+ BUG_ON(!qc_test_fd(qc));
+
+ if (b_data(buf) && !qc_send_ppkts(buf, qc->xprt_ctx)) {
+ if (qc->flags & QUIC_FL_CONN_TO_KILL)
+ qc_txb_release(qc);
+ TRACE_DEVEL("leaving in error", QUIC_EV_CONN_TXPKT, qc);
+ return 0;
+ }
+
+ TRACE_LEAVE(QUIC_EV_CONN_TXPKT, qc);
+ return 1;
+}
+
+/* Try to send application frames from list <frms> on connection <qc>.
+ *
+ * Use qc_send_app_probing wrapper when probing with old data.
+ *
+ * Returns 1 on success. Some data might not have been sent due to congestion,
+ * in this case they are left in <frms> input list. The caller may subscribe on
+ * quic-conn to retry later.
+ *
+ * Returns 0 on critical error.
+ * TODO review and classify more distinctly transient from definitive errors to
+ * allow callers to properly handle it.
+ */
+int qc_send_app_pkts(struct quic_conn *qc, struct list *frms)
+{
+ int status = 0, ret;
+ struct buffer *buf;
+
+ TRACE_ENTER(QUIC_EV_CONN_TXPKT, qc);
+
+ buf = qc_get_txb(qc);
+ if (!buf) {
+ TRACE_ERROR("could not get a buffer", QUIC_EV_CONN_TXPKT, qc);
+ goto err;
+ }
+
+ if (b_data(buf) && !qc_purge_txbuf(qc, buf))
+ goto err;
+
+ /* Prepare and send packets until we could not further prepare packets. */
+ do {
+ /* Currently buf cannot be non-empty at this stage. Even if a
+ * previous sendto() has failed it is emptied to simulate
+ * packet emission and rely on QUIC lost detection to try to
+ * emit it.
+ */
+ BUG_ON_HOT(b_data(buf));
+ b_reset(buf);
+
+ ret = qc_prep_app_pkts(qc, buf, frms);
+
+ if (b_data(buf) && !qc_send_ppkts(buf, qc->xprt_ctx)) {
+ if (qc->flags & QUIC_FL_CONN_TO_KILL)
+ qc_txb_release(qc);
+ goto err;
+ }
+ } while (ret > 0);
+
+ qc_txb_release(qc);
+ if (ret < 0)
+ goto err;
+
+ status = 1;
+ TRACE_LEAVE(QUIC_EV_CONN_TXPKT, qc);
+ return status;
+
+ err:
+ TRACE_DEVEL("leaving in error", QUIC_EV_CONN_TXPKT, qc);
+ return 0;
+}
+
+/* Try to send application frames from list <frms> on connection <qc>. Use this
+ * function when probing is required.
+ *
+ * Returns the result from qc_send_app_pkts function.
+ */
+static forceinline int qc_send_app_probing(struct quic_conn *qc,
+ struct list *frms)
+{
+ int ret;
+
+ TRACE_ENTER(QUIC_EV_CONN_TXPKT, qc);
+
+ TRACE_PROTO("preparing old data (probing)", QUIC_EV_CONN_FRMLIST, qc, frms);
+ qc->flags |= QUIC_FL_CONN_RETRANS_OLD_DATA;
+ ret = qc_send_app_pkts(qc, frms);
+ qc->flags &= ~QUIC_FL_CONN_RETRANS_OLD_DATA;
+
+ TRACE_LEAVE(QUIC_EV_CONN_TXPKT, qc);
+ return ret;
+}
+
+/* Try to send application frames from list <frms> on connection <qc>. This
+ * function is provided for MUX upper layer usage only.
+ *
+ * Returns the result from qc_send_app_pkts function.
+ */
+int qc_send_mux(struct quic_conn *qc, struct list *frms)
+{
+ int ret;
+
+ TRACE_ENTER(QUIC_EV_CONN_TXPKT, qc);
+ BUG_ON(qc->mux_state != QC_MUX_READY); /* Only MUX can uses this function so it must be ready. */
+
+ if (qc->conn->flags & CO_FL_SOCK_WR_SH) {
+ qc->conn->flags |= CO_FL_ERROR | CO_FL_SOCK_RD_SH;
+ TRACE_DEVEL("connection on error", QUIC_EV_CONN_TXPKT, qc);
+ return 0;
+ }
+
+ /* Try to send post handshake frames first unless on 0-RTT. */
+ if ((qc->flags & QUIC_FL_CONN_NEED_POST_HANDSHAKE_FRMS) &&
+ qc->state >= QUIC_HS_ST_COMPLETE) {
+ quic_build_post_handshake_frames(qc);
+ qc_send_app_pkts(qc, &qc->ael->pktns->tx.frms);
+ }
+
+ TRACE_STATE("preparing data (from MUX)", QUIC_EV_CONN_TXPKT, qc);
+ qc->flags |= QUIC_FL_CONN_TX_MUX_CONTEXT;
+ ret = qc_send_app_pkts(qc, frms);
+ qc->flags &= ~QUIC_FL_CONN_TX_MUX_CONTEXT;
+
+ TRACE_LEAVE(QUIC_EV_CONN_TXPKT, qc);
+ return ret;
+}
+
+/* Return the encryption level following the one which contains <el> list head
+ * depending on <retrans> TX mode (retranmission or not).
+ */
+static inline struct quic_enc_level *qc_list_next_qel(struct list *el, int retrans)
+{
+ return !retrans ? LIST_NEXT(el, struct quic_enc_level *, list) :
+ LIST_NEXT(el, struct quic_enc_level *, retrans);
+}
+
+/* Return the encryption level following <qel> depending on <retrans> TX mode
+ * (retranmission or not).
+ */
+static inline struct quic_enc_level *qc_next_qel(struct quic_enc_level *qel, int retrans)
+{
+ struct list *el = !retrans ? &qel->list : &qel->retrans;
+
+ return qc_list_next_qel(el, retrans);
+}
+
+/* Return 1 if <qel> is at the head of its list, 0 if not. */
+static inline int qc_qel_is_head(struct quic_enc_level *qel, struct list *l,
+ int retrans)
+{
+ return !retrans ? &qel->list == l : &qel->retrans == l;
+}
+
+/* Select <*tls_ctx>, <*frms> and <*ver> for the encryption level <qel> of <qc> QUIC
+ * connection, depending on its state, especially the negotiated version and if
+ * retransmissions are required. If this the case <qels> is the list of encryption
+ * levels to used, or NULL if no retransmissions are required.
+ * Never fails.
+ */
+static inline void qc_select_tls_frms_ver(struct quic_conn *qc,
+ struct quic_enc_level *qel,
+ struct quic_tls_ctx **tls_ctx,
+ struct list **frms,
+ const struct quic_version **ver,
+ struct list *qels)
+{
+ if (qc->negotiated_version) {
+ *ver = qc->negotiated_version;
+ if (qel == qc->iel)
+ *tls_ctx = qc->nictx;
+ else
+ *tls_ctx = &qel->tls_ctx;
+ }
+ else {
+ *ver = qc->original_version;
+ *tls_ctx = &qel->tls_ctx;
+ }
+
+ if (!qels)
+ *frms = &qel->pktns->tx.frms;
+ else
+ *frms = qel->retrans_frms;
+}
+
+/* Prepare as much as possible QUIC datagrams/packets for sending from <qels>
+ * list of encryption levels. Several packets can be coalesced into a single
+ * datagram. The result is written into <buf>. Note that if <qels> is NULL,
+ * the encryption levels which will be used are those currently allocated
+ * and attached to the connection.
+ *
+ * Each datagram is prepended by a two fields header : the datagram length and
+ * the address of first packet in the datagram.
+ *
+ * Returns the number of bytes prepared in datragrams/packets if succeeded
+ * (may be 0), or -1 if something wrong happened.
+ */
+int qc_prep_hpkts(struct quic_conn *qc, struct buffer *buf, struct list *qels)
+{
+ int ret, cc, retrans, padding;
+ struct quic_tx_packet *first_pkt, *prv_pkt;
+ unsigned char *end, *pos;
+ uint16_t dglen;
+ size_t total;
+ struct list *qel_list;
+ struct quic_enc_level *qel;
+
+ TRACE_ENTER(QUIC_EV_CONN_IO_CB, qc);
+ /* Currently qc_prep_pkts() does not handle buffer wrapping so the
+ * caller must ensure that buf is reset.
+ */
+ BUG_ON_HOT(buf->head || buf->data);
+
+ ret = -1;
+ cc = qc->flags & QUIC_FL_CONN_IMMEDIATE_CLOSE;
+ retrans = !!qels;
+ padding = 0;
+ first_pkt = prv_pkt = NULL;
+ end = pos = (unsigned char *)b_head(buf);
+ dglen = 0;
+ total = 0;
+
+ qel_list = qels ? qels : &qc->qel_list;
+ qel = qc_list_next_qel(qel_list, retrans);
+ while (!qc_qel_is_head(qel, qel_list, retrans)) {
+ struct quic_tls_ctx *tls_ctx;
+ const struct quic_version *ver;
+ struct list *frms, *next_frms;
+ struct quic_enc_level *next_qel;
+
+ if (qel == qc->eel) {
+ /* Next encryption level */
+ qel = qc_next_qel(qel, retrans);
+ continue;
+ }
+
+ qc_select_tls_frms_ver(qc, qel, &tls_ctx, &frms, &ver, qels);
+
+ next_qel = qc_next_qel(qel, retrans);
+ next_frms = qc_qel_is_head(next_qel, qel_list, retrans) ? NULL :
+ !qels ? &next_qel->pktns->tx.frms : next_qel->retrans_frms;
+
+ /* Build as much as datagrams at <qel> encryption level.
+ * Each datagram is prepended with its length followed by the address
+ * of the first packet in the datagram (QUIC_DGRAM_HEADLEN).
+ */
+ while ((!cc && b_contig_space(buf) >= (int)qc->path->mtu + QUIC_DGRAM_HEADLEN) ||
+ (cc && b_contig_space(buf) >= QUIC_MIN_CC_PKTSIZE + QUIC_DGRAM_HEADLEN) || prv_pkt) {
+ int err, probe, must_ack;
+ enum quic_pkt_type pkt_type;
+ struct quic_tx_packet *cur_pkt;
+
+ TRACE_PROTO("TX prep pkts", QUIC_EV_CONN_PHPKTS, qc, qel);
+ probe = 0;
+ /* We do not probe if an immediate close was asked */
+ if (!cc)
+ probe = qel->pktns->tx.pto_probe;
+
+ if (!qc_may_build_pkt(qc, frms, qel, cc, probe, &must_ack)) {
+ if (prv_pkt && qc_qel_is_head(next_qel, qel_list, retrans)) {
+ qc_txb_store(buf, dglen, first_pkt);
+ /* Build only one datagram when an immediate close is required. */
+ if (cc)
+ goto out;
+ }
+
+ TRACE_DEVEL("next encryption level", QUIC_EV_CONN_PHPKTS, qc);
+ break;
+ }
+
+ if (!prv_pkt) {
+ /* Leave room for the datagram header */
+ pos += QUIC_DGRAM_HEADLEN;
+ if (cc) {
+ end = pos + QUIC_MIN_CC_PKTSIZE;
+ }
+ else if (!quic_peer_validated_addr(qc) && qc_is_listener(qc)) {
+ end = pos + QUIC_MIN(qc->path->mtu, quic_may_send_bytes(qc));
+ }
+ else {
+ end = pos + qc->path->mtu;
+ }
+ }
+
+ /* RFC 9000 14.1 Initial datagram size
+ * a server MUST expand the payload of all UDP datagrams carrying ack-eliciting
+ * Initial packets to at least the smallest allowed maximum datagram size of
+ * 1200 bytes.
+ *
+ * Ensure that no ack-eliciting packets are sent into too small datagrams
+ */
+ if (qel == qc->iel && !LIST_ISEMPTY(frms)) {
+ if (end - pos < QUIC_INITIAL_PACKET_MINLEN) {
+ TRACE_PROTO("No more enough room to build an Initial packet",
+ QUIC_EV_CONN_PHPKTS, qc);
+ break;
+ }
+
+ /* Pad this Initial packet if there is no ack-eliciting frames to send from
+ * the next packet number space.
+ */
+ if (!next_frms || LIST_ISEMPTY(next_frms))
+ padding = 1;
+ }
+
+ pkt_type = quic_enc_level_pkt_type(qc, qel);
+ cur_pkt = qc_build_pkt(&pos, end, qel, tls_ctx, frms,
+ qc, ver, dglen, pkt_type,
+ must_ack, padding, probe, cc, &err);
+ switch (err) {
+ case -3:
+ if (first_pkt)
+ qc_txb_store(buf, dglen, first_pkt);
+ qc_purge_tx_buf(qc, buf);
+ goto leave;
+ case -2:
+ // trace already emitted by function above
+ goto leave;
+ case -1:
+ /* If there was already a correct packet present, set the
+ * current datagram as prepared into <cbuf>.
+ */
+ if (prv_pkt)
+ qc_txb_store(buf, dglen, first_pkt);
+ TRACE_PROTO("could not prepare anymore packet", QUIC_EV_CONN_PHPKTS, qc, qel);
+ goto out;
+ default:
+ break;
+ }
+
+ /* This is to please to GCC. We cannot have (err >= 0 && !cur_pkt) */
+ BUG_ON(!cur_pkt);
+
+ total += cur_pkt->len;
+ dglen += cur_pkt->len;
+
+ if (qc->flags & QUIC_FL_CONN_RETRANS_OLD_DATA)
+ cur_pkt->flags |= QUIC_FL_TX_PACKET_PROBE_WITH_OLD_DATA;
+
+ /* keep trace of the first packet in the datagram */
+ if (!first_pkt)
+ first_pkt = cur_pkt;
+
+ /* Attach the current one to the previous one and vice versa */
+ if (prv_pkt) {
+ prv_pkt->next = cur_pkt;
+ cur_pkt->prev = prv_pkt;
+ cur_pkt->flags |= QUIC_FL_TX_PACKET_COALESCED;
+ }
+
+ /* If there is no more packet to build for this encryption level,
+ * select the next one <next_qel>, if any, to coalesce a packet in
+ * the same datagram, except if <qel> is the Application data
+ * encryption level which cannot be selected to do that.
+ */
+ if (LIST_ISEMPTY(frms) && qel != qc->ael &&
+ !qc_qel_is_head(next_qel, qel_list, retrans)) {
+ if (qel == qc->iel &&
+ (!qc_is_listener(qc) ||
+ cur_pkt->flags & QUIC_FL_TX_PACKET_ACK_ELICITING))
+ padding = 1;
+
+ prv_pkt = cur_pkt;
+ break;
+ }
+ else {
+ qc_txb_store(buf, dglen, first_pkt);
+ /* Build only one datagram when an immediate close is required. */
+ if (cc)
+ goto out;
+ first_pkt = NULL;
+ dglen = 0;
+ padding = 0;
+ prv_pkt = NULL;
+ }
+ }
+
+ /* Next encryption level */
+ qel = next_qel;
+ }
+
+ out:
+ if (cc && total) {
+ BUG_ON(buf != &qc->tx.cc_buf);
+ BUG_ON(dglen != total);
+ qc->tx.cc_dgram_len = dglen;
+ }
+
+ ret = total;
+ leave:
+ TRACE_LEAVE(QUIC_EV_CONN_PHPKTS, qc);
+ return ret;
+}
+
+/* Sends handshake packets from up to two encryption levels <tel> and <next_te>
+ * with <tel_frms> and <next_tel_frms> as frame list respectively for <qc>
+ * QUIC connection. <old_data> is used as boolean to send data already sent but
+ * not already acknowledged (in flight).
+ * Returns 1 if succeeded, 0 if not.
+ */
+int qc_send_hdshk_pkts(struct quic_conn *qc, int old_data,
+ struct quic_enc_level *qel1, struct quic_enc_level *qel2)
+{
+ int ret, status = 0;
+ struct buffer *buf = qc_get_txb(qc);
+ struct list qels = LIST_HEAD_INIT(qels);
+
+ TRACE_ENTER(QUIC_EV_CONN_TXPKT, qc);
+
+ if (!buf) {
+ TRACE_ERROR("buffer allocation failed", QUIC_EV_CONN_TXPKT, qc);
+ goto leave;
+ }
+
+ if (b_data(buf) && !qc_purge_txbuf(qc, buf)) {
+ TRACE_ERROR("Could not purge TX buffer", QUIC_EV_CONN_TXPKT, qc);
+ goto out;
+ }
+
+ /* Currently buf cannot be non-empty at this stage. Even if a previous
+ * sendto() has failed it is emptied to simulate packet emission and
+ * rely on QUIC lost detection to try to emit it.
+ */
+ BUG_ON_HOT(b_data(buf));
+ b_reset(buf);
+
+ if (old_data) {
+ TRACE_STATE("old data for probing asked", QUIC_EV_CONN_TXPKT, qc);
+ qc->flags |= QUIC_FL_CONN_RETRANS_OLD_DATA;
+ }
+
+ if (qel1) {
+ BUG_ON(LIST_INLIST(&qel1->retrans));
+ LIST_APPEND(&qels, &qel1->retrans);
+ }
+
+ if (qel2) {
+ BUG_ON(LIST_INLIST(&qel2->retrans));
+ LIST_APPEND(&qels, &qel2->retrans);
+ }
+
+ ret = qc_prep_hpkts(qc, buf, &qels);
+ if (ret == -1) {
+ qc_txb_release(qc);
+ TRACE_ERROR("Could not build some packets", QUIC_EV_CONN_TXPKT, qc);
+ goto out;
+ }
+
+ if (ret && !qc_send_ppkts(buf, qc->xprt_ctx)) {
+ if (qc->flags & QUIC_FL_CONN_TO_KILL)
+ qc_txb_release(qc);
+ TRACE_ERROR("Could not send some packets", QUIC_EV_CONN_TXPKT, qc);
+ goto out;
+ }
+
+ qc_txb_release(qc);
+ status = 1;
+
+ out:
+ if (qel1) {
+ LIST_DEL_INIT(&qel1->retrans);
+ qel1->retrans_frms = NULL;
+ }
+
+ if (qel2) {
+ LIST_DEL_INIT(&qel2->retrans);
+ qel2->retrans_frms = NULL;
+ }
+
+ TRACE_STATE("no more need old data for probing", QUIC_EV_CONN_TXPKT, qc);
+ qc->flags &= ~QUIC_FL_CONN_RETRANS_OLD_DATA;
+ leave:
+ TRACE_LEAVE(QUIC_EV_CONN_TXPKT, qc);
+ return status;
+}
+
+/* Retransmit up to two datagrams depending on packet number space.
+ * Return 0 when failed, 0 if not.
+ */
+int qc_dgrams_retransmit(struct quic_conn *qc)
+{
+ int ret = 0;
+ int sret;
+ struct quic_pktns *ipktns = qc->ipktns;
+ struct quic_pktns *hpktns = qc->hpktns;
+ struct quic_pktns *apktns = qc->apktns;
+
+ TRACE_ENTER(QUIC_EV_CONN_TXPKT, qc);
+
+ /* Note that if the Initial packet number space is not discarded,
+ * this is also the case for the Handshake packet number space.
+ */
+ if (ipktns && (ipktns->flags & QUIC_FL_PKTNS_PROBE_NEEDED)) {
+ int i;
+
+ for (i = 0; i < QUIC_MAX_NB_PTO_DGRAMS; i++) {
+ struct list ifrms = LIST_HEAD_INIT(ifrms);
+ struct list hfrms = LIST_HEAD_INIT(hfrms);
+ struct list qels = LIST_HEAD_INIT(qels);
+
+ qc_prep_hdshk_fast_retrans(qc, &ifrms, &hfrms);
+ TRACE_DEVEL("Avail. ack eliciting frames", QUIC_EV_CONN_FRMLIST, qc, &ifrms);
+ TRACE_DEVEL("Avail. ack eliciting frames", QUIC_EV_CONN_FRMLIST, qc, &hfrms);
+ if (!LIST_ISEMPTY(&ifrms)) {
+ ipktns->tx.pto_probe = 1;
+ if (!LIST_ISEMPTY(&hfrms))
+ hpktns->tx.pto_probe = 1;
+ qc->iel->retrans_frms = &ifrms;
+ if (qc->hel)
+ qc->hel->retrans_frms = &hfrms;
+ sret = qc_send_hdshk_pkts(qc, 1, qc->iel, qc->hel);
+ qc_free_frm_list(qc, &ifrms);
+ qc_free_frm_list(qc, &hfrms);
+ if (!sret)
+ goto leave;
+ }
+ else {
+ /* We are in the case where the anti-amplification limit will be
+ * reached after having sent this datagram or some handshake frames
+ * could not be allocated. There is no need to send more than one
+ * datagram.
+ */
+ ipktns->tx.pto_probe = 1;
+ qc->iel->retrans_frms = &ifrms;
+ sret = qc_send_hdshk_pkts(qc, 0, qc->iel, NULL);
+ qc_free_frm_list(qc, &ifrms);
+ qc_free_frm_list(qc, &hfrms);
+ if (!sret)
+ goto leave;
+
+ break;
+ }
+ }
+ TRACE_STATE("no more need to probe Initial packet number space",
+ QUIC_EV_CONN_TXPKT, qc);
+ ipktns->flags &= ~QUIC_FL_PKTNS_PROBE_NEEDED;
+ if (hpktns)
+ hpktns->flags &= ~QUIC_FL_PKTNS_PROBE_NEEDED;
+ }
+ else {
+ int i;
+
+ if (hpktns && (hpktns->flags & QUIC_FL_PKTNS_PROBE_NEEDED)) {
+ hpktns->tx.pto_probe = 0;
+ for (i = 0; i < QUIC_MAX_NB_PTO_DGRAMS; i++) {
+ struct list frms1 = LIST_HEAD_INIT(frms1);
+
+ qc_prep_fast_retrans(qc, hpktns, &frms1, NULL);
+ TRACE_DEVEL("Avail. ack eliciting frames", QUIC_EV_CONN_FRMLIST, qc, &frms1);
+ if (!LIST_ISEMPTY(&frms1)) {
+ hpktns->tx.pto_probe = 1;
+ qc->hel->retrans_frms = &frms1;
+ sret = qc_send_hdshk_pkts(qc, 1, qc->hel, NULL);
+ qc_free_frm_list(qc, &frms1);
+ if (!sret)
+ goto leave;
+ }
+ }
+ TRACE_STATE("no more need to probe Handshake packet number space",
+ QUIC_EV_CONN_TXPKT, qc);
+ hpktns->flags &= ~QUIC_FL_PKTNS_PROBE_NEEDED;
+ }
+ else if (apktns && (apktns->flags & QUIC_FL_PKTNS_PROBE_NEEDED)) {
+ struct list frms2 = LIST_HEAD_INIT(frms2);
+ struct list frms1 = LIST_HEAD_INIT(frms1);
+
+ apktns->tx.pto_probe = 0;
+ qc_prep_fast_retrans(qc, apktns, &frms1, &frms2);
+ TRACE_PROTO("Avail. ack eliciting frames", QUIC_EV_CONN_FRMLIST, qc, &frms1);
+ TRACE_PROTO("Avail. ack eliciting frames", QUIC_EV_CONN_FRMLIST, qc, &frms2);
+
+ if (!LIST_ISEMPTY(&frms1)) {
+ apktns->tx.pto_probe = 1;
+ sret = qc_send_app_probing(qc, &frms1);
+ qc_free_frm_list(qc, &frms1);
+ if (!sret) {
+ qc_free_frm_list(qc, &frms2);
+ goto leave;
+ }
+ }
+
+ if (!LIST_ISEMPTY(&frms2)) {
+ apktns->tx.pto_probe = 1;
+ sret = qc_send_app_probing(qc, &frms2);
+ qc_free_frm_list(qc, &frms2);
+ if (!sret)
+ goto leave;
+ }
+ TRACE_STATE("no more need to probe 01RTT packet number space",
+ QUIC_EV_CONN_TXPKT, qc);
+ apktns->flags &= ~QUIC_FL_PKTNS_PROBE_NEEDED;
+ }
+ }
+
+ ret = 1;
+ leave:
+ TRACE_LEAVE(QUIC_EV_CONN_TXPKT, qc);
+ return ret;
+}
+
+/*
+ * Send a Version Negotiation packet on response to <pkt> on socket <fd> to
+ * address <addr>.
+ * Implementation of RFC9000 6. Version Negotiation
+ *
+ * TODO implement a rate-limiting sending of Version Negotiation packets
+ *
+ * Returns 0 on success else non-zero
+ */
+int send_version_negotiation(int fd, struct sockaddr_storage *addr,
+ struct quic_rx_packet *pkt)
+{
+ char buf[256];
+ int ret = 0, i = 0, j;
+ uint32_t version;
+ const socklen_t addrlen = get_addr_len(addr);
+
+ TRACE_ENTER(QUIC_EV_CONN_TXPKT);
+ /*
+ * header form
+ * long header, fixed bit to 0 for Version Negotiation
+ */
+ /* TODO: RAND_bytes() should be replaced? */
+ if (RAND_bytes((unsigned char *)buf, 1) != 1) {
+ TRACE_ERROR("RAND_bytes() error", QUIC_EV_CONN_TXPKT);
+ goto out;
+ }
+
+ buf[i++] |= '\x80';
+ /* null version for Version Negotiation */
+ buf[i++] = '\x00';
+ buf[i++] = '\x00';
+ buf[i++] = '\x00';
+ buf[i++] = '\x00';
+
+ /* source connection id */
+ buf[i++] = pkt->scid.len;
+ memcpy(&buf[i], pkt->scid.data, pkt->scid.len);
+ i += pkt->scid.len;
+
+ /* destination connection id */
+ buf[i++] = pkt->dcid.len;
+ memcpy(&buf[i], pkt->dcid.data, pkt->dcid.len);
+ i += pkt->dcid.len;
+
+ /* supported version */
+ for (j = 0; j < quic_versions_nb; j++) {
+ version = htonl(quic_versions[j].num);
+ memcpy(&buf[i], &version, sizeof(version));
+ i += sizeof(version);
+ }
+
+ if (sendto(fd, buf, i, 0, (struct sockaddr *)addr, addrlen) < 0)
+ goto out;
+
+ ret = 1;
+ out:
+ TRACE_LEAVE(QUIC_EV_CONN_TXPKT);
+ return !ret;
+}
+
+/* Send a stateless reset packet depending on <pkt> RX packet information
+ * from <fd> UDP socket to <dst>
+ * Return 1 if succeeded, 0 if not.
+ */
+int send_stateless_reset(struct listener *l, struct sockaddr_storage *dstaddr,
+ struct quic_rx_packet *rxpkt)
+{
+ int ret = 0, pktlen, rndlen;
+ unsigned char pkt[64];
+ const socklen_t addrlen = get_addr_len(dstaddr);
+ struct proxy *prx;
+ struct quic_counters *prx_counters;
+
+ TRACE_ENTER(QUIC_EV_STATELESS_RST);
+
+ prx = l->bind_conf->frontend;
+ prx_counters = EXTRA_COUNTERS_GET(prx->extra_counters_fe, &quic_stats_module);
+ /* 10.3 Stateless Reset (https://www.rfc-editor.org/rfc/rfc9000.html#section-10.3)
+ * The resulting minimum size of 21 bytes does not guarantee that a Stateless
+ * Reset is difficult to distinguish from other packets if the recipient requires
+ * the use of a connection ID. To achieve that end, the endpoint SHOULD ensure
+ * that all packets it sends are at least 22 bytes longer than the minimum
+ * connection ID length that it requests the peer to include in its packets,
+ * adding PADDING frames as necessary. This ensures that any Stateless Reset
+ * sent by the peer is indistinguishable from a valid packet sent to the endpoint.
+ * An endpoint that sends a Stateless Reset in response to a packet that is
+ * 43 bytes or shorter SHOULD send a Stateless Reset that is one byte shorter
+ * than the packet it responds to.
+ */
+
+ /* Note that we build at most a 42 bytes QUIC packet to mimic a short packet */
+ pktlen = rxpkt->len <= 43 ? rxpkt->len - 1 : 0;
+ pktlen = QUIC_MAX(QUIC_STATELESS_RESET_PACKET_MINLEN, pktlen);
+ rndlen = pktlen - QUIC_STATELESS_RESET_TOKEN_LEN;
+
+ /* Put a header of random bytes */
+ /* TODO: RAND_bytes() should be replaced */
+ if (RAND_bytes(pkt, rndlen) != 1) {
+ TRACE_ERROR("RAND_bytes() failed", QUIC_EV_STATELESS_RST);
+ goto leave;
+ }
+
+ /* Clear the most significant bit, and set the second one */
+ *pkt = (*pkt & ~0x80) | 0x40;
+ if (!quic_stateless_reset_token_cpy(pkt + rndlen, QUIC_STATELESS_RESET_TOKEN_LEN,
+ rxpkt->dcid.data, rxpkt->dcid.len))
+ goto leave;
+
+ if (sendto(l->rx.fd, pkt, pktlen, 0, (struct sockaddr *)dstaddr, addrlen) < 0)
+ goto leave;
+
+ ret = 1;
+ HA_ATOMIC_INC(&prx_counters->stateless_reset_sent);
+ TRACE_PROTO("stateless reset sent", QUIC_EV_STATELESS_RST, NULL, &rxpkt->dcid);
+ leave:
+ TRACE_LEAVE(QUIC_EV_STATELESS_RST);
+ return ret;
+}
+
+/* Return the long packet type matching with <qv> version and <type> */
+static inline int quic_pkt_type(int type, uint32_t version)
+{
+ if (version != QUIC_PROTOCOL_VERSION_2)
+ return type;
+
+ switch (type) {
+ case QUIC_PACKET_TYPE_INITIAL:
+ return 1;
+ case QUIC_PACKET_TYPE_0RTT:
+ return 2;
+ case QUIC_PACKET_TYPE_HANDSHAKE:
+ return 3;
+ case QUIC_PACKET_TYPE_RETRY:
+ return 0;
+ }
+
+ return -1;
+}
+
+
+/* Generate a Retry packet and send it on <fd> socket to <addr> in response to
+ * the Initial <pkt> packet.
+ *
+ * Returns 0 on success else non-zero.
+ */
+int send_retry(int fd, struct sockaddr_storage *addr,
+ struct quic_rx_packet *pkt, const struct quic_version *qv)
+{
+ int ret = 0;
+ unsigned char buf[128];
+ int i = 0, token_len;
+ const socklen_t addrlen = get_addr_len(addr);
+ struct quic_cid scid;
+
+ TRACE_ENTER(QUIC_EV_CONN_TXPKT);
+
+ /* long header(1) | fixed bit(1) | packet type QUIC_PACKET_TYPE_RETRY(2) | unused random bits(4)*/
+ buf[i++] = (QUIC_PACKET_LONG_HEADER_BIT | QUIC_PACKET_FIXED_BIT) |
+ (quic_pkt_type(QUIC_PACKET_TYPE_RETRY, qv->num) << QUIC_PACKET_TYPE_SHIFT) |
+ statistical_prng_range(16);
+ /* version */
+ write_n32(&buf[i], qv->num);
+ i += sizeof(uint32_t);
+
+ /* Use the SCID from <pkt> for Retry DCID. */
+ buf[i++] = pkt->scid.len;
+ memcpy(&buf[i], pkt->scid.data, pkt->scid.len);
+ i += pkt->scid.len;
+
+ /* Generate a new CID to be used as SCID for the Retry packet. */
+ scid.len = QUIC_HAP_CID_LEN;
+ /* TODO: RAND_bytes() should be replaced */
+ if (RAND_bytes(scid.data, scid.len) != 1) {
+ TRACE_ERROR("RAND_bytes() failed", QUIC_EV_CONN_TXPKT);
+ goto out;
+ }
+
+ buf[i++] = scid.len;
+ memcpy(&buf[i], scid.data, scid.len);
+ i += scid.len;
+
+ /* token */
+ if (!(token_len = quic_generate_retry_token(&buf[i], sizeof(buf) - i, qv->num,
+ &pkt->dcid, &pkt->scid, addr))) {
+ TRACE_ERROR("quic_generate_retry_token() failed", QUIC_EV_CONN_TXPKT);
+ goto out;
+ }
+
+ i += token_len;
+
+ /* token integrity tag */
+ if ((sizeof(buf) - i < QUIC_TLS_TAG_LEN) ||
+ !quic_tls_generate_retry_integrity_tag(pkt->dcid.data,
+ pkt->dcid.len, buf, i, qv)) {
+ TRACE_ERROR("quic_tls_generate_retry_integrity_tag() failed", QUIC_EV_CONN_TXPKT);
+ goto out;
+ }
+
+ i += QUIC_TLS_TAG_LEN;
+
+ if (sendto(fd, buf, i, 0, (struct sockaddr *)addr, addrlen) < 0) {
+ TRACE_ERROR("quic_tls_generate_retry_integrity_tag() failed", QUIC_EV_CONN_TXPKT);
+ goto out;
+ }
+
+ ret = 1;
+ out:
+ TRACE_LEAVE(QUIC_EV_CONN_TXPKT);
+ return !ret;
+}
+
+/* Write a 32-bits integer to a buffer with <buf> as address.
+ * Make <buf> point to the data after this 32-buts value if succeeded.
+ * Note that these 32-bits integers are networkg bytes ordered.
+ * Returns 0 if failed (not enough room in the buffer), 1 if succeeded.
+ */
+static inline int quic_write_uint32(unsigned char **buf,
+ const unsigned char *end, uint32_t val)
+{
+ if (end - *buf < sizeof val)
+ return 0;
+
+ *(uint32_t *)*buf = htonl(val);
+ *buf += sizeof val;
+
+ return 1;
+}
+
+/* Return the maximum number of bytes we must use to completely fill a
+ * buffer with <sz> as size for a data field of bytes prefixed by its QUIC
+ * variable-length (may be 0).
+ * Also put in <*len_sz> the size of this QUIC variable-length.
+ * So after returning from this function we have : <*len_sz> + <ret> <= <sz>
+ * (<*len_sz> = { max(i), i + ret <= <sz> }) .
+ */
+static inline size_t max_available_room(size_t sz, size_t *len_sz)
+{
+ size_t sz_sz, ret;
+ size_t diff;
+
+ sz_sz = quic_int_getsize(sz);
+ if (sz <= sz_sz)
+ return 0;
+
+ ret = sz - sz_sz;
+ *len_sz = quic_int_getsize(ret);
+ /* Difference between the two sizes. Note that <sz_sz> >= <*len_sz>. */
+ diff = sz_sz - *len_sz;
+ if (unlikely(diff > 0)) {
+ /* Let's try to take into an account remaining bytes.
+ *
+ * <----------------> <sz_sz>
+ * <--------------><--------> +----> <max_int>
+ * <ret> <len_sz> |
+ * +---------------------------+-----------....
+ * <--------------------------------> <sz>
+ */
+ size_t max_int = quic_max_int(*len_sz);
+
+ if (max_int + *len_sz <= sz)
+ ret = max_int;
+ else
+ ret = sz - diff;
+ }
+
+ return ret;
+}
+
+/* This function computes the maximum data we can put into a buffer with <sz> as
+ * size prefixed with a variable-length field "Length" whose value is the
+ * remaining data length, already filled of <ilen> bytes which must be taken
+ * into an account by "Length" field, and finally followed by the data we want
+ * to put in this buffer prefixed again by a variable-length field.
+ * <sz> is the size of the buffer to fill.
+ * <ilen> the number of bytes already put after the "Length" field.
+ * <dlen> the number of bytes we want to at most put in the buffer.
+ * Also set <*dlen_sz> to the size of the data variable-length we want to put in
+ * the buffer. This is typically this function which must be used to fill as
+ * much as possible a QUIC packet made of only one CRYPTO or STREAM frames.
+ * Returns this computed size if there is enough room in the buffer, 0 if not.
+ */
+static inline size_t max_stream_data_size(size_t sz, size_t ilen, size_t dlen)
+{
+ size_t ret, len_sz, dlen_sz;
+
+ /*
+ * The length of variable-length QUIC integers are powers of two.
+ * Look for the first 3length" field value <len_sz> which match our need.
+ * As we must put <ilen> bytes in our buffer, the minimum value for
+ * <len_sz> is the number of bytes required to encode <ilen>.
+ */
+ for (len_sz = quic_int_getsize(ilen);
+ len_sz <= QUIC_VARINT_MAX_SIZE;
+ len_sz <<= 1) {
+ if (sz < len_sz + ilen)
+ return 0;
+
+ ret = max_available_room(sz - len_sz - ilen, &dlen_sz);
+ if (!ret)
+ return 0;
+
+ /* Check that <*len_sz> matches <ret> value */
+ if (len_sz + ilen + dlen_sz + ret <= quic_max_int(len_sz))
+ return ret < dlen ? ret : dlen;
+ }
+
+ return 0;
+}
+
+/* Return the length in bytes of <pn> packet number depending on
+ * <largest_acked_pn> the largest ackownledged packet number.
+ */
+static inline size_t quic_packet_number_length(int64_t pn,
+ int64_t largest_acked_pn)
+{
+ int64_t max_nack_pkts;
+
+ /* About packet number encoding, the RFC says:
+ * The sender MUST use a packet number size able to represent more than
+ * twice as large a range than the difference between the largest
+ * acknowledged packet and packet number being sent.
+ */
+ max_nack_pkts = 2 * (pn - largest_acked_pn) + 1;
+ if (max_nack_pkts > 0xffffff)
+ return 4;
+ if (max_nack_pkts > 0xffff)
+ return 3;
+ if (max_nack_pkts > 0xff)
+ return 2;
+
+ return 1;
+}
+
+/* Encode <pn> packet number with <pn_len> as length in byte into a buffer with
+ * <buf> as current copy address and <end> as pointer to one past the end of
+ * this buffer. This is the responsibility of the caller to check there is
+ * enough room in the buffer to copy <pn_len> bytes.
+ * Never fails.
+ */
+static inline int quic_packet_number_encode(unsigned char **buf,
+ const unsigned char *end,
+ uint64_t pn, size_t pn_len)
+{
+ if (end - *buf < pn_len)
+ return 0;
+
+ /* Encode the packet number. */
+ switch (pn_len) {
+ case 1:
+ **buf = pn;
+ break;
+ case 2:
+ write_n16(*buf, pn);
+ break;
+ case 3:
+ (*buf)[0] = pn >> 16;
+ (*buf)[1] = pn >> 8;
+ (*buf)[2] = pn;
+ break;
+ case 4:
+ write_n32(*buf, pn);
+ break;
+ }
+ *buf += pn_len;
+
+ return 1;
+}
+
+/* This function builds into a buffer at <pos> position a QUIC long packet header,
+ * <end> being one byte past the end of this buffer.
+ * Return 1 if enough room to build this header, 0 if not.
+ */
+static int quic_build_packet_long_header(unsigned char **pos, const unsigned char *end,
+ int type, size_t pn_len,
+ struct quic_conn *qc, const struct quic_version *ver)
+{
+ int ret = 0;
+
+ TRACE_ENTER(QUIC_EV_CONN_LPKT, qc);
+
+ if (end - *pos < sizeof ver->num + qc->dcid.len + qc->scid.len + 3) {
+ TRACE_DEVEL("not enough room", QUIC_EV_CONN_LPKT, qc);
+ goto leave;
+ }
+
+ type = quic_pkt_type(type, ver->num);
+ /* #0 byte flags */
+ *(*pos)++ = QUIC_PACKET_FIXED_BIT | QUIC_PACKET_LONG_HEADER_BIT |
+ (type << QUIC_PACKET_TYPE_SHIFT) | (pn_len - 1);
+ /* Version */
+ quic_write_uint32(pos, end, ver->num);
+ *(*pos)++ = qc->dcid.len;
+ /* Destination connection ID */
+ if (qc->dcid.len) {
+ memcpy(*pos, qc->dcid.data, qc->dcid.len);
+ *pos += qc->dcid.len;
+ }
+ /* Source connection ID */
+ *(*pos)++ = qc->scid.len;
+ if (qc->scid.len) {
+ memcpy(*pos, qc->scid.data, qc->scid.len);
+ *pos += qc->scid.len;
+ }
+
+ ret = 1;
+ leave:
+ TRACE_LEAVE(QUIC_EV_CONN_LPKT, qc);
+ return ret;
+}
+
+/* This function builds into a buffer at <pos> position a QUIC short packet header,
+ * <end> being one byte past the end of this buffer.
+ * Return 1 if enough room to build this header, 0 if not.
+ */
+static int quic_build_packet_short_header(unsigned char **pos, const unsigned char *end,
+ size_t pn_len, struct quic_conn *qc,
+ unsigned char tls_flags)
+{
+ int ret = 0;
+ unsigned char spin_bit =
+ (qc->flags & QUIC_FL_CONN_SPIN_BIT) ? QUIC_PACKET_SPIN_BIT : 0;
+
+ TRACE_ENTER(QUIC_EV_CONN_TXPKT, qc);
+
+ if (end - *pos < 1 + qc->dcid.len) {
+ TRACE_DEVEL("not enough room", QUIC_EV_CONN_LPKT, qc);
+ goto leave;
+ }
+
+ /* #0 byte flags */
+ *(*pos)++ = QUIC_PACKET_FIXED_BIT | spin_bit |
+ ((tls_flags & QUIC_FL_TLS_KP_BIT_SET) ? QUIC_PACKET_KEY_PHASE_BIT : 0) | (pn_len - 1);
+ /* Destination connection ID */
+ if (qc->dcid.len) {
+ memcpy(*pos, qc->dcid.data, qc->dcid.len);
+ *pos += qc->dcid.len;
+ }
+
+ ret = 1;
+ leave:
+ TRACE_LEAVE(QUIC_EV_CONN_TXPKT, qc);
+ return ret;
+}
+
+/* Apply QUIC header protection to the packet with <pos> as first byte address,
+ * <pn> as address of the Packet number field, <pnlen> being this field length
+ * with <aead> as AEAD cipher and <key> as secret key.
+ *
+ * TODO no error is expected as encryption is done in place but encryption
+ * manual is unclear. <fail> will be set to true if an error is detected.
+ */
+void quic_apply_header_protection(struct quic_conn *qc, unsigned char *pos,
+ unsigned char *pn, size_t pnlen,
+ struct quic_tls_ctx *tls_ctx, int *fail)
+
+{
+ int i;
+ /* We need an IV of at least 5 bytes: one byte for bytes #0
+ * and at most 4 bytes for the packet number
+ */
+ unsigned char mask[5] = {0};
+ EVP_CIPHER_CTX *aes_ctx = tls_ctx->tx.hp_ctx;
+
+ TRACE_ENTER(QUIC_EV_CONN_TXPKT, qc);
+
+ *fail = 0;
+
+ if (!quic_tls_aes_encrypt(mask, pn + QUIC_PACKET_PN_MAXLEN, sizeof mask, aes_ctx)) {
+ TRACE_ERROR("could not apply header protection", QUIC_EV_CONN_TXPKT, qc);
+ *fail = 1;
+ goto out;
+ }
+
+ *pos ^= mask[0] & (*pos & QUIC_PACKET_LONG_HEADER_BIT ? 0xf : 0x1f);
+ for (i = 0; i < pnlen; i++)
+ pn[i] ^= mask[i + 1];
+
+ out:
+ TRACE_LEAVE(QUIC_EV_CONN_TXPKT, qc);
+}
+
+/* Prepare into <outlist> as most as possible ack-eliciting frame from their
+ * <inlist> prebuilt frames for <qel> encryption level to be encoded in a buffer
+ * with <room> as available room, and <*len> the packet Length field initialized
+ * with the number of bytes already present in this buffer which must be taken
+ * into an account for the Length packet field value. <headlen> is the number of
+ * bytes already present in this packet before building frames.
+ *
+ * Update consequently <*len> to reflect the size of these frames built
+ * by this function. Also attach these frames to <l> frame list.
+ * Return 1 if at least one ack-eleciting frame could be built, 0 if not.
+ */
+static int qc_build_frms(struct list *outlist, struct list *inlist,
+ size_t room, size_t *len, size_t headlen,
+ struct quic_enc_level *qel,
+ struct quic_conn *qc)
+{
+ int ret;
+ struct quic_frame *cf, *cfbak;
+
+ TRACE_ENTER(QUIC_EV_CONN_BCFRMS, qc);
+
+ ret = 0;
+ if (*len > room)
+ goto leave;
+
+ /* If we are not probing we must take into an account the congestion
+ * control window.
+ */
+ if (!qel->pktns->tx.pto_probe) {
+ size_t remain = quic_cc_path_prep_data(qc->path);
+
+ if (headlen > remain)
+ goto leave;
+
+ room = QUIC_MIN(room, remain - headlen);
+ }
+
+ TRACE_PROTO("TX frms build (headlen)",
+ QUIC_EV_CONN_BCFRMS, qc, &headlen);
+
+ /* NOTE: switch/case block inside a loop, a successful status must be
+ * returned by this function only if at least one frame could be built
+ * in the switch/case block.
+ */
+ list_for_each_entry_safe(cf, cfbak, inlist, list) {
+ /* header length, data length, frame length. */
+ size_t hlen, dlen, dlen_sz, avail_room, flen;
+
+ if (!room)
+ break;
+
+ switch (cf->type) {
+ case QUIC_FT_CRYPTO:
+ TRACE_DEVEL(" New CRYPTO frame build (room, len)",
+ QUIC_EV_CONN_BCFRMS, qc, &room, len);
+ /* Compute the length of this CRYPTO frame header */
+ hlen = 1 + quic_int_getsize(cf->crypto.offset);
+ /* Compute the data length of this CRyPTO frame. */
+ dlen = max_stream_data_size(room, *len + hlen, cf->crypto.len);
+ TRACE_DEVEL(" CRYPTO data length (hlen, crypto.len, dlen)",
+ QUIC_EV_CONN_BCFRMS, qc, &hlen, &cf->crypto.len, &dlen);
+ if (!dlen)
+ continue;
+
+ /* CRYPTO frame length. */
+ flen = hlen + quic_int_getsize(dlen) + dlen;
+ TRACE_DEVEL(" CRYPTO frame length (flen)",
+ QUIC_EV_CONN_BCFRMS, qc, &flen);
+ /* Add the CRYPTO data length and its encoded length to the packet
+ * length and the length of this length.
+ */
+ *len += flen;
+ room -= flen;
+ if (dlen == cf->crypto.len) {
+ /* <cf> CRYPTO data have been consumed. */
+ LIST_DEL_INIT(&cf->list);
+ LIST_APPEND(outlist, &cf->list);
+ }
+ else {
+ struct quic_frame *new_cf;
+
+ new_cf = qc_frm_alloc(QUIC_FT_CRYPTO);
+ if (!new_cf) {
+ TRACE_ERROR("No memory for new crypto frame", QUIC_EV_CONN_BCFRMS, qc);
+ continue;
+ }
+
+ new_cf->crypto.len = dlen;
+ new_cf->crypto.offset = cf->crypto.offset;
+ new_cf->crypto.qel = qel;
+ TRACE_DEVEL("split frame", QUIC_EV_CONN_PRSAFRM, qc, new_cf);
+ if (cf->origin) {
+ TRACE_DEVEL("duplicated frame", QUIC_EV_CONN_PRSAFRM, qc);
+ /* This <cf> frame was duplicated */
+ LIST_APPEND(&cf->origin->reflist, &new_cf->ref);
+ new_cf->origin = cf->origin;
+ /* Detach the remaining CRYPTO frame from its original frame */
+ LIST_DEL_INIT(&cf->ref);
+ cf->origin = NULL;
+ }
+ LIST_APPEND(outlist, &new_cf->list);
+ /* Consume <dlen> bytes of the current frame. */
+ cf->crypto.len -= dlen;
+ cf->crypto.offset += dlen;
+ }
+ break;
+
+ case QUIC_FT_STREAM_8 ... QUIC_FT_STREAM_F:
+ if (cf->stream.dup) {
+ struct eb64_node *node = NULL;
+ struct qc_stream_desc *stream_desc = NULL;
+ struct qf_stream *strm_frm = &cf->stream;
+
+ /* As this frame has been already lost, ensure the stream is always
+ * available or the range of this frame is not consumed before
+ * resending it.
+ */
+ node = eb64_lookup(&qc->streams_by_id, strm_frm->id);
+ if (!node) {
+ TRACE_DEVEL("released stream", QUIC_EV_CONN_PRSAFRM, qc, cf);
+ qc_frm_free(qc, &cf);
+ continue;
+ }
+
+ stream_desc = eb64_entry(node, struct qc_stream_desc, by_id);
+ if (strm_frm->offset.key + strm_frm->len <= stream_desc->ack_offset) {
+ TRACE_DEVEL("ignored frame frame in already acked range",
+ QUIC_EV_CONN_PRSAFRM, qc, cf);
+ qc_frm_free(qc, &cf);
+ continue;
+ }
+ else if (strm_frm->offset.key < stream_desc->ack_offset) {
+ uint64_t diff = stream_desc->ack_offset - strm_frm->offset.key;
+
+ qc_stream_frm_mv_fwd(cf, diff);
+ TRACE_DEVEL("updated partially acked frame",
+ QUIC_EV_CONN_PRSAFRM, qc, cf);
+ }
+ }
+ /* Note that these frames are accepted in short packets only without
+ * "Length" packet field. Here, <*len> is used only to compute the
+ * sum of the lengths of the already built frames for this packet.
+ *
+ * Compute the length of this STREAM frame "header" made a all the field
+ * excepting the variable ones. Note that +1 is for the type of this frame.
+ */
+ hlen = 1 + quic_int_getsize(cf->stream.id) +
+ ((cf->type & QUIC_STREAM_FRAME_TYPE_OFF_BIT) ? quic_int_getsize(cf->stream.offset.key) : 0);
+ /* Compute the data length of this STREAM frame. */
+ avail_room = room - hlen - *len;
+ if ((ssize_t)avail_room <= 0)
+ continue;
+
+ TRACE_DEVEL(" New STREAM frame build (room, len)",
+ QUIC_EV_CONN_BCFRMS, qc, &room, len);
+
+ /* hlen contains STREAM id and offset. Ensure there is
+ * enough room for length field.
+ */
+ if (cf->type & QUIC_STREAM_FRAME_TYPE_LEN_BIT) {
+ dlen = QUIC_MIN((uint64_t)max_available_room(avail_room, &dlen_sz),
+ cf->stream.len);
+ dlen_sz = quic_int_getsize(dlen);
+ flen = hlen + dlen_sz + dlen;
+ }
+ else {
+ dlen = QUIC_MIN((uint64_t)avail_room, cf->stream.len);
+ flen = hlen + dlen;
+ }
+
+ if (cf->stream.len && !dlen) {
+ /* Only a small gap is left on buffer, not
+ * enough to encode the STREAM data length.
+ */
+ continue;
+ }
+
+ TRACE_DEVEL(" STREAM data length (hlen, stream.len, dlen)",
+ QUIC_EV_CONN_BCFRMS, qc, &hlen, &cf->stream.len, &dlen);
+ TRACE_DEVEL(" STREAM frame length (flen)",
+ QUIC_EV_CONN_BCFRMS, qc, &flen);
+ /* Add the STREAM data length and its encoded length to the packet
+ * length and the length of this length.
+ */
+ *len += flen;
+ room -= flen;
+ if (dlen == cf->stream.len) {
+ /* <cf> STREAM data have been consumed. */
+ LIST_DEL_INIT(&cf->list);
+ LIST_APPEND(outlist, &cf->list);
+
+ /* Do not notify MUX on retransmission. */
+ if (qc->flags & QUIC_FL_CONN_TX_MUX_CONTEXT) {
+ qcc_streams_sent_done(cf->stream.stream->ctx,
+ cf->stream.len,
+ cf->stream.offset.key);
+ }
+ }
+ else {
+ struct quic_frame *new_cf;
+ struct buffer cf_buf;
+
+ new_cf = qc_frm_alloc(cf->type);
+ if (!new_cf) {
+ TRACE_ERROR("No memory for new STREAM frame", QUIC_EV_CONN_BCFRMS, qc);
+ continue;
+ }
+
+ new_cf->stream.stream = cf->stream.stream;
+ new_cf->stream.buf = cf->stream.buf;
+ new_cf->stream.id = cf->stream.id;
+ new_cf->stream.offset = cf->stream.offset;
+ new_cf->stream.len = dlen;
+ new_cf->type |= QUIC_STREAM_FRAME_TYPE_LEN_BIT;
+ /* FIN bit reset */
+ new_cf->type &= ~QUIC_STREAM_FRAME_TYPE_FIN_BIT;
+ new_cf->stream.data = cf->stream.data;
+ new_cf->stream.dup = cf->stream.dup;
+ TRACE_DEVEL("split frame", QUIC_EV_CONN_PRSAFRM, qc, new_cf);
+ if (cf->origin) {
+ TRACE_DEVEL("duplicated frame", QUIC_EV_CONN_PRSAFRM, qc);
+ /* This <cf> frame was duplicated */
+ LIST_APPEND(&cf->origin->reflist, &new_cf->ref);
+ new_cf->origin = cf->origin;
+ /* Detach this STREAM frame from its origin */
+ LIST_DEL_INIT(&cf->ref);
+ cf->origin = NULL;
+ }
+ LIST_APPEND(outlist, &new_cf->list);
+ cf->type |= QUIC_STREAM_FRAME_TYPE_OFF_BIT;
+ /* Consume <dlen> bytes of the current frame. */
+ cf_buf = b_make(b_orig(cf->stream.buf),
+ b_size(cf->stream.buf),
+ (char *)cf->stream.data - b_orig(cf->stream.buf), 0);
+ cf->stream.len -= dlen;
+ cf->stream.offset.key += dlen;
+ cf->stream.data = (unsigned char *)b_peek(&cf_buf, dlen);
+
+ /* Do not notify MUX on retransmission. */
+ if (qc->flags & QUIC_FL_CONN_TX_MUX_CONTEXT) {
+ qcc_streams_sent_done(new_cf->stream.stream->ctx,
+ new_cf->stream.len,
+ new_cf->stream.offset.key);
+ }
+ }
+
+ /* TODO the MUX is notified about the frame sending via
+ * previous qcc_streams_sent_done call. However, the
+ * sending can fail later, for example if the sendto
+ * system call returns an error. As the MUX has been
+ * notified, the transport layer is responsible to
+ * bufferize and resent the announced data later.
+ */
+
+ break;
+
+ default:
+ flen = qc_frm_len(cf);
+ BUG_ON(!flen);
+ if (flen > room)
+ continue;
+
+ *len += flen;
+ room -= flen;
+ LIST_DEL_INIT(&cf->list);
+ LIST_APPEND(outlist, &cf->list);
+ break;
+ }
+
+ /* Successful status as soon as a frame could be built */
+ ret = 1;
+ }
+
+ leave:
+ TRACE_LEAVE(QUIC_EV_CONN_BCFRMS, qc);
+ return ret;
+}
+
+/* Generate a CONNECTION_CLOSE frame for <qc> on <qel> encryption level. <out>
+ * is used as return parameter and should be zero'ed by the caller.
+ */
+static void qc_build_cc_frm(struct quic_conn *qc, struct quic_enc_level *qel,
+ struct quic_frame *out)
+{
+ /* TODO improve CONNECTION_CLOSE on Initial/Handshake encryption levels
+ *
+ * A CONNECTION_CLOSE frame should be sent in several packets with
+ * different encryption levels depending on the client context. This is
+ * to ensure that the client can decrypt it. See RFC 9000 10.2.3 for
+ * more details on how to implement it.
+ */
+ TRACE_ENTER(QUIC_EV_CONN_BFRM, qc);
+
+
+ if (qc->err.app) {
+ if (unlikely(qel == qc->iel || qel == qc->hel)) {
+ /* RFC 9000 10.2.3. Immediate Close during the Handshake
+ *
+ * Sending a CONNECTION_CLOSE of type 0x1d in an Initial or Handshake
+ * packet could expose application state or be used to alter application
+ * state. A CONNECTION_CLOSE of type 0x1d MUST be replaced by a
+ * CONNECTION_CLOSE of type 0x1c when sending the frame in Initial or
+ * Handshake packets. Otherwise, information about the application
+ * state might be revealed. Endpoints MUST clear the value of the
+ * Reason Phrase field and SHOULD use the APPLICATION_ERROR code when
+ * converting to a CONNECTION_CLOSE of type 0x1c.
+ */
+ out->type = QUIC_FT_CONNECTION_CLOSE;
+ out->connection_close.error_code = QC_ERR_APPLICATION_ERROR;
+ out->connection_close.reason_phrase_len = 0;
+ }
+ else {
+ out->type = QUIC_FT_CONNECTION_CLOSE_APP;
+ out->connection_close_app.error_code = qc->err.code;
+ out->connection_close_app.reason_phrase_len = 0;
+ }
+ }
+ else {
+ out->type = QUIC_FT_CONNECTION_CLOSE;
+ out->connection_close.error_code = qc->err.code;
+ out->connection_close.reason_phrase_len = 0;
+ }
+ TRACE_LEAVE(QUIC_EV_CONN_BFRM, qc);
+
+}
+
+/* Returns the <ack_delay> field value in microsecond to be set in an ACK frame
+ * depending on the time the packet with a new largest packet number was received.
+ */
+static inline uint64_t quic_compute_ack_delay_us(unsigned int time_received,
+ struct quic_conn *conn)
+{
+ return ((now_ms - time_received) * 1000) >> conn->tx.params.ack_delay_exponent;
+}
+
+/* This function builds a clear packet from <pkt> information (its type)
+ * into a buffer with <pos> as position pointer and <qel> as QUIC TLS encryption
+ * level for <conn> QUIC connection and <qel> as QUIC TLS encryption level,
+ * filling the buffer with as much frames as possible from <frms> list of
+ * prebuilt frames.
+ * The trailing QUIC_TLS_TAG_LEN bytes of this packet are not built. But they are
+ * reserved so that to ensure there is enough room to build this AEAD TAG after
+ * having returned from this function.
+ * This function also updates the value of <buf_pn> pointer to point to the packet
+ * number field in this packet. <pn_len> will also have the packet number
+ * length as value.
+ *
+ * Return 1 if succeeded (enough room to buile this packet), O if not.
+ */
+static int qc_do_build_pkt(unsigned char *pos, const unsigned char *end,
+ size_t dglen, struct quic_tx_packet *pkt,
+ int64_t pn, size_t *pn_len, unsigned char **buf_pn,
+ int must_ack, int padding, int cc, int probe,
+ struct quic_enc_level *qel, struct quic_conn *qc,
+ const struct quic_version *ver, struct list *frms)
+{
+ unsigned char *beg, *payload;
+ size_t len, len_sz, len_frms, padding_len;
+ struct quic_frame frm;
+ struct quic_frame ack_frm;
+ struct quic_frame cc_frm;
+ size_t ack_frm_len, head_len;
+ int64_t rx_largest_acked_pn;
+ int add_ping_frm;
+ struct list frm_list = LIST_HEAD_INIT(frm_list);
+ struct quic_frame *cf;
+ int ret = 0;
+
+ TRACE_ENTER(QUIC_EV_CONN_TXPKT, qc);
+
+ /* Length field value with CRYPTO frames if present. */
+ len_frms = 0;
+ beg = pos;
+ /* When not probing, and no immediate close is required, reduce the size of this
+ * buffer to respect the congestion controller window.
+ * This size will be limited if we have ack-eliciting frames to send from <frms>.
+ */
+ if (!probe && !LIST_ISEMPTY(frms) && !cc) {
+ size_t path_room;
+
+ path_room = quic_cc_path_prep_data(qc->path);
+ if (end - beg > path_room)
+ end = beg + path_room;
+ }
+
+ /* Ensure there is enough room for the TLS encryption tag and a zero token
+ * length field if any.
+ */
+ if (end - pos < QUIC_TLS_TAG_LEN +
+ (pkt->type == QUIC_PACKET_TYPE_INITIAL ? 1 : 0))
+ goto no_room;
+
+ end -= QUIC_TLS_TAG_LEN;
+ rx_largest_acked_pn = qel->pktns->rx.largest_acked_pn;
+ /* packet number length */
+ *pn_len = quic_packet_number_length(pn, rx_largest_acked_pn);
+ /* Build the header */
+ if ((pkt->type == QUIC_PACKET_TYPE_SHORT &&
+ !quic_build_packet_short_header(&pos, end, *pn_len, qc, qel->tls_ctx.flags)) ||
+ (pkt->type != QUIC_PACKET_TYPE_SHORT &&
+ !quic_build_packet_long_header(&pos, end, pkt->type, *pn_len, qc, ver)))
+ goto no_room;
+
+ /* Encode the token length (0) for an Initial packet. */
+ if (pkt->type == QUIC_PACKET_TYPE_INITIAL) {
+ if (end <= pos)
+ goto no_room;
+
+ *pos++ = 0;
+ }
+
+ head_len = pos - beg;
+ /* Build an ACK frame if required. */
+ ack_frm_len = 0;
+ /* Do not ack and probe at the same time. */
+ if ((must_ack || (qel->pktns->flags & QUIC_FL_PKTNS_ACK_REQUIRED)) && !qel->pktns->tx.pto_probe) {
+ struct quic_arngs *arngs = &qel->pktns->rx.arngs;
+ BUG_ON(eb_is_empty(&qel->pktns->rx.arngs.root));
+ ack_frm.type = QUIC_FT_ACK;
+ ack_frm.tx_ack.arngs = arngs;
+ if (qel->pktns->flags & QUIC_FL_PKTNS_NEW_LARGEST_PN) {
+ qel->pktns->tx.ack_delay =
+ quic_compute_ack_delay_us(qel->pktns->rx.largest_time_received, qc);
+ qel->pktns->flags &= ~QUIC_FL_PKTNS_NEW_LARGEST_PN;
+ }
+ ack_frm.tx_ack.ack_delay = qel->pktns->tx.ack_delay;
+ /* XXX BE CAREFUL XXX : here we reserved at least one byte for the
+ * smallest frame (PING) and <*pn_len> more for the packet number. Note
+ * that from here, we do not know if we will have to send a PING frame.
+ * This will be decided after having computed the ack-eliciting frames
+ * to be added to this packet.
+ */
+ if (end - pos <= 1 + *pn_len)
+ goto no_room;
+
+ ack_frm_len = qc_frm_len(&ack_frm);
+ if (ack_frm_len > end - 1 - *pn_len - pos)
+ goto no_room;
+ }
+
+ /* Length field value without the ack-eliciting frames. */
+ len = ack_frm_len + *pn_len;
+ len_frms = 0;
+ if (!cc && !LIST_ISEMPTY(frms)) {
+ ssize_t room = end - pos;
+
+ TRACE_PROTO("Avail. ack eliciting frames", QUIC_EV_CONN_FRMLIST, qc, frms);
+ /* Initialize the length of the frames built below to <len>.
+ * If any frame could be successfully built by qc_build_frms(),
+ * we will have len_frms > len.
+ */
+ len_frms = len;
+ if (!qc_build_frms(&frm_list, frms,
+ end - pos, &len_frms, pos - beg, qel, qc)) {
+ TRACE_PROTO("Not enough room", QUIC_EV_CONN_TXPKT,
+ qc, NULL, NULL, &room);
+ if (padding) {
+ len_frms = 0;
+ goto comp_pkt_len;
+ }
+
+ if (!ack_frm_len && !qel->pktns->tx.pto_probe)
+ goto no_room;
+ }
+ }
+
+ comp_pkt_len:
+ /* Length (of the remaining data). Must not fail because, the buffer size
+ * has been checked above. Note that we have reserved QUIC_TLS_TAG_LEN bytes
+ * for the encryption tag. It must be taken into an account for the length
+ * of this packet.
+ */
+ if (len_frms)
+ len = len_frms + QUIC_TLS_TAG_LEN;
+ else
+ len += QUIC_TLS_TAG_LEN;
+ /* CONNECTION_CLOSE frame */
+ if (cc) {
+ qc_build_cc_frm(qc, qel, &cc_frm);
+ len += qc_frm_len(&cc_frm);
+ }
+ add_ping_frm = 0;
+ padding_len = 0;
+ len_sz = quic_int_getsize(len);
+ /* Add this packet size to <dglen> */
+ dglen += head_len + len_sz + len;
+ /* Note that <padding> is true only when building an Handshake packet
+ * coalesced to an Initial packet.
+ */
+ if (padding && dglen < QUIC_INITIAL_PACKET_MINLEN) {
+ /* This is a maximum padding size */
+ padding_len = QUIC_INITIAL_PACKET_MINLEN - dglen;
+ /* The length field value is of this packet is <len> + <padding_len>
+ * the size of which may be greater than the initial computed size
+ * <len_sz>. So, let's deduce the difference between these to packet
+ * sizes from <padding_len>.
+ */
+ padding_len -= quic_int_getsize(len + padding_len) - len_sz;
+ len += padding_len;
+ }
+ else if (len_frms && len_frms < QUIC_PACKET_PN_MAXLEN) {
+ len += padding_len = QUIC_PACKET_PN_MAXLEN - len_frms;
+ }
+ else if (LIST_ISEMPTY(&frm_list)) {
+ if (qel->pktns->tx.pto_probe) {
+ /* If we cannot send a frame, we send a PING frame. */
+ add_ping_frm = 1;
+ len += 1;
+ dglen += 1;
+ /* Note that only we are in the case where this Initial packet
+ * is not coalesced to an Handshake packet. We must directly
+ * pad the datragram.
+ */
+ if (pkt->type == QUIC_PACKET_TYPE_INITIAL) {
+ if (dglen < QUIC_INITIAL_PACKET_MINLEN) {
+ padding_len = QUIC_INITIAL_PACKET_MINLEN - dglen;
+ padding_len -= quic_int_getsize(len + padding_len) - len_sz;
+ len += padding_len;
+ }
+ }
+ else {
+ /* Note that +1 is for the PING frame */
+ if (*pn_len + 1 < QUIC_PACKET_PN_MAXLEN)
+ len += padding_len = QUIC_PACKET_PN_MAXLEN - *pn_len - 1;
+ }
+ }
+ else {
+ /* If there is no frame at all to follow, add at least a PADDING frame. */
+ if (!ack_frm_len && !cc)
+ len += padding_len = QUIC_PACKET_PN_MAXLEN - *pn_len;
+ }
+ }
+
+ if (pkt->type != QUIC_PACKET_TYPE_SHORT && !quic_enc_int(&pos, end, len))
+ goto no_room;
+
+ /* Packet number field address. */
+ *buf_pn = pos;
+
+ /* Packet number encoding. */
+ if (!quic_packet_number_encode(&pos, end, pn, *pn_len))
+ goto no_room;
+
+ /* payload building (ack-eliciting or not frames) */
+ payload = pos;
+ if (ack_frm_len) {
+ if (!qc_build_frm(&pos, end, &ack_frm, pkt, qc))
+ goto no_room;
+
+ pkt->largest_acked_pn = quic_pktns_get_largest_acked_pn(qel->pktns);
+ pkt->flags |= QUIC_FL_TX_PACKET_ACK;
+ }
+
+ /* Ack-eliciting frames */
+ if (!LIST_ISEMPTY(&frm_list)) {
+ struct quic_frame *tmp_cf;
+ list_for_each_entry_safe(cf, tmp_cf, &frm_list, list) {
+ if (!qc_build_frm(&pos, end, cf, pkt, qc)) {
+ ssize_t room = end - pos;
+ TRACE_PROTO("Not enough room", QUIC_EV_CONN_TXPKT,
+ qc, NULL, NULL, &room);
+ /* Note that <cf> was added from <frms> to <frm_list> list by
+ * qc_build_frms().
+ */
+ LIST_DEL_INIT(&cf->list);
+ LIST_INSERT(frms, &cf->list);
+ continue;
+ }
+
+ quic_tx_packet_refinc(pkt);
+ cf->pkt = pkt;
+ }
+ }
+
+ /* Build a PING frame if needed. */
+ if (add_ping_frm) {
+ frm.type = QUIC_FT_PING;
+ if (!qc_build_frm(&pos, end, &frm, pkt, qc))
+ goto no_room;
+ }
+
+ /* Build a CONNECTION_CLOSE frame if needed. */
+ if (cc) {
+ if (!qc_build_frm(&pos, end, &cc_frm, pkt, qc))
+ goto no_room;
+
+ pkt->flags |= QUIC_FL_TX_PACKET_CC;
+ }
+
+ /* Build a PADDING frame if needed. */
+ if (padding_len) {
+ frm.type = QUIC_FT_PADDING;
+ frm.padding.len = padding_len;
+ if (!qc_build_frm(&pos, end, &frm, pkt, qc))
+ goto no_room;
+ }
+
+ if (pos == payload) {
+ /* No payload was built because of congestion control */
+ TRACE_PROTO("limited by congestion control", QUIC_EV_CONN_TXPKT, qc);
+ goto no_room;
+ }
+
+ /* If this packet is ack-eliciting and we are probing let's
+ * decrement the PTO probe counter.
+ */
+ if ((pkt->flags & QUIC_FL_TX_PACKET_ACK_ELICITING) &&
+ qel->pktns->tx.pto_probe)
+ qel->pktns->tx.pto_probe--;
+
+ pkt->len = pos - beg;
+ LIST_SPLICE(&pkt->frms, &frm_list);
+
+ ret = 1;
+ TRACE_PROTO("Packet ack-eliciting frames", QUIC_EV_CONN_TXPKT, qc, pkt);
+ leave:
+ TRACE_LEAVE(QUIC_EV_CONN_TXPKT, qc);
+ return ret;
+
+ no_room:
+ /* Replace the pre-built frames which could not be add to this packet */
+ LIST_SPLICE(frms, &frm_list);
+ TRACE_PROTO("Remaining ack-eliciting frames", QUIC_EV_CONN_FRMLIST, qc, frms);
+ goto leave;
+}
+
+static inline void quic_tx_packet_init(struct quic_tx_packet *pkt, int type)
+{
+ pkt->type = type;
+ pkt->len = 0;
+ pkt->in_flight_len = 0;
+ pkt->pn_node.key = (uint64_t)-1;
+ LIST_INIT(&pkt->frms);
+ pkt->time_sent = TICK_ETERNITY;
+ pkt->next = NULL;
+ pkt->prev = NULL;
+ pkt->largest_acked_pn = -1;
+ pkt->flags = 0;
+ pkt->refcnt = 0;
+}
+
+/* Build a packet into a buffer at <pos> position, <end> pointing to one byte past
+ * the end of this buffer, with <pkt_type> as packet type for <qc> QUIC connection
+ * at <qel> encryption level with <frms> list of prebuilt frames.
+ *
+ * Return -3 if the packet could not be allocated, -2 if could not be encrypted for
+ * any reason, -1 if there was not enough room to build a packet.
+ * XXX NOTE XXX
+ * If you provide provide qc_build_pkt() with a big enough buffer to build a packet as big as
+ * possible (to fill an MTU), the unique reason why this function may fail is the congestion
+ * control window limitation.
+ */
+static struct quic_tx_packet *qc_build_pkt(unsigned char **pos,
+ const unsigned char *end,
+ struct quic_enc_level *qel,
+ struct quic_tls_ctx *tls_ctx, struct list *frms,
+ struct quic_conn *qc, const struct quic_version *ver,
+ size_t dglen, int pkt_type, int must_ack,
+ int padding, int probe, int cc, int *err)
+{
+ struct quic_tx_packet *ret_pkt = NULL;
+ /* The pointer to the packet number field. */
+ unsigned char *buf_pn;
+ unsigned char *first_byte, *last_byte, *payload;
+ int64_t pn;
+ size_t pn_len, payload_len, aad_len;
+ struct quic_tx_packet *pkt;
+ int encrypt_failure = 0;
+
+ TRACE_ENTER(QUIC_EV_CONN_TXPKT, qc);
+ TRACE_PROTO("TX pkt build", QUIC_EV_CONN_TXPKT, qc, NULL, qel);
+ *err = 0;
+ pkt = pool_alloc(pool_head_quic_tx_packet);
+ if (!pkt) {
+ TRACE_DEVEL("Not enough memory for a new packet", QUIC_EV_CONN_TXPKT, qc);
+ *err = -3;
+ goto err;
+ }
+
+ quic_tx_packet_init(pkt, pkt_type);
+ first_byte = *pos;
+ pn_len = 0;
+ buf_pn = NULL;
+
+ pn = qel->pktns->tx.next_pn + 1;
+ if (!qc_do_build_pkt(*pos, end, dglen, pkt, pn, &pn_len, &buf_pn,
+ must_ack, padding, cc, probe, qel, qc, ver, frms)) {
+ // trace already emitted by function above
+ *err = -1;
+ goto err;
+ }
+
+ last_byte = first_byte + pkt->len;
+ payload = buf_pn + pn_len;
+ payload_len = last_byte - payload;
+ aad_len = payload - first_byte;
+
+ quic_packet_encrypt(payload, payload_len, first_byte, aad_len, pn, tls_ctx, qc, &encrypt_failure);
+ if (encrypt_failure) {
+ /* TODO Unrecoverable failure, unencrypted data should be returned to the caller. */
+ WARN_ON("quic_packet_encrypt failure");
+ *err = -2;
+ goto err;
+ }
+
+ last_byte += QUIC_TLS_TAG_LEN;
+ pkt->len += QUIC_TLS_TAG_LEN;
+ quic_apply_header_protection(qc, first_byte, buf_pn, pn_len, tls_ctx, &encrypt_failure);
+ if (encrypt_failure) {
+ /* TODO Unrecoverable failure, unencrypted data should be returned to the caller. */
+ WARN_ON("quic_apply_header_protection failure");
+ *err = -2;
+ goto err;
+ }
+
+ /* Consume a packet number */
+ qel->pktns->tx.next_pn++;
+ qc->bytes.prep += pkt->len;
+ if (qc->bytes.prep >= 3 * qc->bytes.rx && !quic_peer_validated_addr(qc)) {
+ qc->flags |= QUIC_FL_CONN_ANTI_AMPLIFICATION_REACHED;
+ TRACE_PROTO("anti-amplification limit reached", QUIC_EV_CONN_TXPKT, qc);
+ }
+
+ /* Now that a correct packet is built, let us consume <*pos> buffer. */
+ *pos = last_byte;
+ /* Attach the built packet to its tree. */
+ pkt->pn_node.key = pn;
+ /* Set the packet in fligth length for in flight packet only. */
+ if (pkt->flags & QUIC_FL_TX_PACKET_IN_FLIGHT) {
+ pkt->in_flight_len = pkt->len;
+ qc->path->prep_in_flight += pkt->len;
+ }
+ /* Always reset this flag */
+ qc->flags &= ~QUIC_FL_CONN_IMMEDIATE_CLOSE;
+ if (pkt->flags & QUIC_FL_TX_PACKET_ACK) {
+ qel->pktns->flags &= ~QUIC_FL_PKTNS_ACK_REQUIRED;
+ qel->pktns->rx.nb_aepkts_since_last_ack = 0;
+ qc->flags &= ~QUIC_FL_CONN_ACK_TIMER_FIRED;
+ if (tick_isset(qc->ack_expire)) {
+ qc->ack_expire = TICK_ETERNITY;
+ qc->idle_timer_task->expire = qc->idle_expire;
+ task_queue(qc->idle_timer_task);
+ TRACE_PROTO("ack timer cancelled", QUIC_EV_CONN_IDLE_TIMER, qc);
+ }
+ }
+
+ pkt->pktns = qel->pktns;
+
+ ret_pkt = pkt;
+ leave:
+ TRACE_PROTO("TX pkt built", QUIC_EV_CONN_TXPKT, qc, ret_pkt);
+ TRACE_LEAVE(QUIC_EV_CONN_TXPKT, qc);
+ return ret_pkt;
+
+ err:
+ /* TODO: what about the frames which have been built
+ * for this packet.
+ */
+ free_quic_tx_packet(qc, pkt);
+ goto leave;
+}
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/src/raw_sock.c b/src/raw_sock.c
new file mode 100644
index 0000000..1287dc5
--- /dev/null
+++ b/src/raw_sock.c
@@ -0,0 +1,489 @@
+/*
+ * RAW transport layer over SOCK_STREAM sockets.
+ *
+ * Copyright 2000-2012 Willy Tarreau <w@1wt.eu>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#define _GNU_SOURCE
+#include <errno.h>
+#include <fcntl.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+#include <sys/socket.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <netinet/tcp.h>
+
+#include <haproxy/api.h>
+#include <haproxy/buf.h>
+#include <haproxy/connection.h>
+#include <haproxy/errors.h>
+#include <haproxy/fd.h>
+#include <haproxy/global.h>
+#include <haproxy/pipe.h>
+#include <haproxy/proxy.h>
+#include <haproxy/tools.h>
+
+
+#if defined(USE_LINUX_SPLICE)
+
+/* A pipe contains 16 segments max, and it's common to see segments of 1448 bytes
+ * because of timestamps. Use this as a hint for not looping on splice().
+ */
+#define SPLICE_FULL_HINT 16*1448
+
+/* how many data we attempt to splice at once when the buffer is configured for
+ * infinite forwarding */
+#define MAX_SPLICE_AT_ONCE (1<<30)
+
+/* Returns :
+ * -1 if splice() is not supported
+ * >= 0 to report the amount of spliced bytes.
+ * connection flags are updated (error, read0, wait_room, wait_data).
+ * The caller must have previously allocated the pipe.
+ */
+int raw_sock_to_pipe(struct connection *conn, void *xprt_ctx, struct pipe *pipe, unsigned int count)
+{
+ int ret;
+ int retval = 0;
+
+
+ if (!conn_ctrl_ready(conn))
+ return 0;
+
+ BUG_ON(conn->flags & CO_FL_FDLESS);
+
+ if (!fd_recv_ready(conn->handle.fd))
+ return 0;
+
+ conn->flags &= ~CO_FL_WAIT_ROOM;
+ errno = 0;
+
+ /* Under Linux, if FD_POLL_HUP is set, we have reached the end.
+ * Since older splice() implementations were buggy and returned
+ * EAGAIN on end of read, let's bypass the call to splice() now.
+ */
+ if (unlikely(!(fdtab[conn->handle.fd].state & FD_POLL_IN))) {
+ /* stop here if we reached the end of data */
+ if ((fdtab[conn->handle.fd].state & (FD_POLL_ERR|FD_POLL_HUP)) == FD_POLL_HUP)
+ goto out_read0;
+
+ /* report error on POLL_ERR before connection establishment */
+ if ((fdtab[conn->handle.fd].state & FD_POLL_ERR) && (conn->flags & CO_FL_WAIT_L4_CONN)) {
+ conn->flags |= CO_FL_ERROR | CO_FL_SOCK_RD_SH | CO_FL_SOCK_WR_SH;
+ errno = 0; /* let the caller do a getsockopt() if it wants it */
+ goto leave;
+ }
+ }
+
+ while (count) {
+ if (count > MAX_SPLICE_AT_ONCE)
+ count = MAX_SPLICE_AT_ONCE;
+
+ ret = splice(conn->handle.fd, NULL, pipe->prod, NULL, count,
+ SPLICE_F_MOVE|SPLICE_F_NONBLOCK);
+
+ if (ret <= 0) {
+ if (ret == 0)
+ goto out_read0;
+
+ if (errno == EAGAIN || errno == EWOULDBLOCK) {
+ /* there are two reasons for EAGAIN :
+ * - nothing in the socket buffer (standard)
+ * - pipe is full
+ * The difference between these two situations
+ * is problematic. Since we don't know if the
+ * pipe is full, we'll stop if the pipe is not
+ * empty. Anyway, we will almost always fill or
+ * empty the pipe.
+ */
+ if (pipe->data) {
+ /* always stop reading until the pipe is flushed */
+ conn->flags |= CO_FL_WAIT_ROOM;
+ break;
+ }
+ /* socket buffer exhausted */
+ fd_cant_recv(conn->handle.fd);
+ break;
+ }
+ else if (errno == ENOSYS || errno == EINVAL || errno == EBADF) {
+ /* splice not supported on this end, disable it.
+ * We can safely return -1 since there is no
+ * chance that any data has been piped yet.
+ */
+ retval = -1;
+ goto leave;
+ }
+ else if (errno == EINTR) {
+ /* try again */
+ continue;
+ }
+ /* here we have another error */
+ conn->flags |= CO_FL_ERROR;
+ break;
+ } /* ret <= 0 */
+
+ retval += ret;
+ pipe->data += ret;
+ count -= ret;
+
+ if (pipe->data >= SPLICE_FULL_HINT || ret >= global.tune.recv_enough) {
+ /* We've read enough of it for this time, let's stop before
+ * being asked to poll.
+ */
+ conn->flags |= CO_FL_WAIT_ROOM;
+ break;
+ }
+ } /* while */
+
+ if (unlikely(conn->flags & CO_FL_WAIT_L4_CONN) && retval)
+ conn->flags &= ~CO_FL_WAIT_L4_CONN;
+
+ leave:
+ if (retval > 0)
+ increment_send_rate(retval, 1);
+
+ return retval;
+
+ out_read0:
+ conn_sock_read0(conn);
+ conn->flags &= ~CO_FL_WAIT_L4_CONN;
+ goto leave;
+}
+
+/* Send as many bytes as possible from the pipe to the connection's socket.
+ */
+int raw_sock_from_pipe(struct connection *conn, void *xprt_ctx, struct pipe *pipe, unsigned int count)
+{
+ int ret, done;
+
+ if (!conn_ctrl_ready(conn))
+ return 0;
+
+ BUG_ON(conn->flags & CO_FL_FDLESS);
+
+ if (!fd_send_ready(conn->handle.fd))
+ return 0;
+
+ if (conn->flags & CO_FL_SOCK_WR_SH) {
+ /* it's already closed */
+ conn->flags |= CO_FL_ERROR | CO_FL_SOCK_RD_SH;
+ errno = EPIPE;
+ return 0;
+ }
+
+ if (unlikely(count > pipe->data))
+ count = pipe->data;
+
+ done = 0;
+ while (count) {
+ ret = splice(pipe->cons, NULL, conn->handle.fd, NULL, count,
+ SPLICE_F_MOVE|SPLICE_F_NONBLOCK);
+
+ if (ret <= 0) {
+ if (ret == 0 || errno == EAGAIN || errno == EWOULDBLOCK) {
+ fd_cant_send(conn->handle.fd);
+ break;
+ }
+ else if (errno == EINTR)
+ continue;
+
+ /* here we have another error */
+ conn->flags |= CO_FL_ERROR;
+ break;
+ }
+
+ done += ret;
+ count -= ret;
+ pipe->data -= ret;
+ }
+ if (unlikely(conn->flags & CO_FL_WAIT_L4_CONN) && done) {
+ conn->flags &= ~CO_FL_WAIT_L4_CONN;
+ }
+
+ return done;
+}
+
+#endif /* USE_LINUX_SPLICE */
+
+
+/* Receive up to <count> bytes from connection <conn>'s socket and store them
+ * into buffer <buf>. Only one call to recv() is performed, unless the
+ * buffer wraps, in which case a second call may be performed. The connection's
+ * flags are updated with whatever special event is detected (error, read0,
+ * empty). The caller is responsible for taking care of those events and
+ * avoiding the call if inappropriate. The function does not call the
+ * connection's polling update function, so the caller is responsible for this.
+ * errno is cleared before starting so that the caller knows that if it spots an
+ * error without errno, it's pending and can be retrieved via getsockopt(SO_ERROR).
+ */
+static size_t raw_sock_to_buf(struct connection *conn, void *xprt_ctx, struct buffer *buf, size_t count, int flags)
+{
+ ssize_t ret;
+ size_t try, done = 0;
+
+ if (!conn_ctrl_ready(conn))
+ return 0;
+
+ BUG_ON(conn->flags & CO_FL_FDLESS);
+
+ if (!fd_recv_ready(conn->handle.fd))
+ return 0;
+
+ conn->flags &= ~CO_FL_WAIT_ROOM;
+ errno = 0;
+
+ if (unlikely(!(fdtab[conn->handle.fd].state & FD_POLL_IN))) {
+ /* stop here if we reached the end of data */
+ if ((fdtab[conn->handle.fd].state & (FD_POLL_ERR|FD_POLL_HUP)) == FD_POLL_HUP)
+ goto read0;
+
+ /* report error on POLL_ERR before connection establishment */
+ if ((fdtab[conn->handle.fd].state & FD_POLL_ERR) && (conn->flags & CO_FL_WAIT_L4_CONN)) {
+ conn->flags |= CO_FL_ERROR | CO_FL_SOCK_RD_SH | CO_FL_SOCK_WR_SH;
+ goto leave;
+ }
+ }
+
+ /* read the largest possible block. For this, we perform only one call
+ * to recv() unless the buffer wraps and we exactly fill the first hunk,
+ * in which case we accept to do it once again. A new attempt is made on
+ * EINTR too.
+ */
+ while (count > 0) {
+ try = b_contig_space(buf);
+ if (!try)
+ break;
+
+ if (try > count)
+ try = count;
+
+ ret = recv(conn->handle.fd, b_tail(buf), try, 0);
+
+ if (ret > 0) {
+ b_add(buf, ret);
+ done += ret;
+ if (ret < try) {
+ /* socket buffer exhausted */
+ fd_cant_recv(conn->handle.fd);
+
+ /* unfortunately, on level-triggered events, POLL_HUP
+ * is generally delivered AFTER the system buffer is
+ * empty, unless the poller supports POLL_RDHUP. If
+ * we know this is the case, we don't try to read more
+ * as we know there's no more available. Similarly, if
+ * there's no problem with lingering we don't even try
+ * to read an unlikely close from the client since we'll
+ * close first anyway.
+ */
+ if (fdtab[conn->handle.fd].state & FD_POLL_HUP)
+ goto read0;
+
+ if (!(fdtab[conn->handle.fd].state & FD_LINGER_RISK) ||
+ (cur_poller.flags & HAP_POLL_F_RDHUP)) {
+ break;
+ }
+ }
+ count -= ret;
+
+ if (flags & CO_RFL_READ_ONCE)
+ break;
+ }
+ else if (ret == 0) {
+ goto read0;
+ }
+ else if (errno == EAGAIN || errno == EWOULDBLOCK || errno == ENOTCONN) {
+ /* socket buffer exhausted */
+ fd_cant_recv(conn->handle.fd);
+ break;
+ }
+ else if (errno != EINTR) {
+ conn->flags |= CO_FL_ERROR | CO_FL_SOCK_RD_SH | CO_FL_SOCK_WR_SH;
+ break;
+ }
+ }
+
+ if (unlikely(conn->flags & CO_FL_WAIT_L4_CONN) && done)
+ conn->flags &= ~CO_FL_WAIT_L4_CONN;
+
+ leave:
+ return done;
+
+ read0:
+ conn_sock_read0(conn);
+ conn->flags &= ~CO_FL_WAIT_L4_CONN;
+
+ /* Now a final check for a possible asynchronous low-level error
+ * report. This can happen when a connection receives a reset
+ * after a shutdown, both POLL_HUP and POLL_ERR are queued, and
+ * we might have come from there by just checking POLL_HUP instead
+ * of recv()'s return value 0, so we have no way to tell there was
+ * an error without checking.
+ */
+ if (unlikely(!done && fdtab[conn->handle.fd].state & FD_POLL_ERR))
+ conn->flags |= CO_FL_ERROR | CO_FL_SOCK_RD_SH | CO_FL_SOCK_WR_SH;
+ goto leave;
+}
+
+
+/* Send up to <count> pending bytes from buffer <buf> to connection <conn>'s
+ * socket. <flags> may contain some CO_SFL_* flags to hint the system about
+ * other pending data for example, but this flag is ignored at the moment.
+ * Only one call to send() is performed, unless the buffer wraps, in which case
+ * a second call may be performed. The connection's flags are updated with
+ * whatever special event is detected (error, empty). The caller is responsible
+ * for taking care of those events and avoiding the call if inappropriate. The
+ * function does not call the connection's polling update function, so the caller
+ * is responsible for this. It's up to the caller to update the buffer's contents
+ * based on the return value.
+ */
+static size_t raw_sock_from_buf(struct connection *conn, void *xprt_ctx, const struct buffer *buf, size_t count, int flags)
+{
+ ssize_t ret;
+ size_t try, done;
+ int send_flag;
+
+ if (!conn_ctrl_ready(conn))
+ return 0;
+
+ BUG_ON(conn->flags & CO_FL_FDLESS);
+
+ if (!fd_send_ready(conn->handle.fd))
+ return 0;
+
+ if (unlikely(fdtab[conn->handle.fd].state & FD_POLL_ERR)) {
+ /* an error was reported on the FD, we can't send anymore */
+ conn->flags |= CO_FL_ERROR | CO_FL_SOCK_WR_SH | CO_FL_SOCK_RD_SH;
+ errno = EPIPE;
+ return 0;
+ }
+
+ if (conn->flags & CO_FL_SOCK_WR_SH) {
+ /* it's already closed */
+ conn->flags |= CO_FL_ERROR | CO_FL_SOCK_RD_SH;
+ errno = EPIPE;
+ return 0;
+ }
+
+ done = 0;
+ /* send the largest possible block. For this we perform only one call
+ * to send() unless the buffer wraps and we exactly fill the first hunk,
+ * in which case we accept to do it once again.
+ */
+ while (count) {
+ try = b_contig_data(buf, done);
+ if (try > count)
+ try = count;
+
+ send_flag = MSG_DONTWAIT | MSG_NOSIGNAL;
+ if (try < count || flags & CO_SFL_MSG_MORE)
+ send_flag |= MSG_MORE;
+
+ ret = send(conn->handle.fd, b_peek(buf, done), try, send_flag);
+
+ if (ret > 0) {
+ count -= ret;
+ done += ret;
+
+ /* if the system buffer is full, don't insist */
+ if (ret < try) {
+ fd_cant_send(conn->handle.fd);
+ break;
+ }
+ if (!count)
+ fd_stop_send(conn->handle.fd);
+ }
+ else if (ret == 0 || errno == EAGAIN || errno == EWOULDBLOCK || errno == ENOTCONN || errno == EINPROGRESS) {
+ /* nothing written, we need to poll for write first */
+ fd_cant_send(conn->handle.fd);
+ break;
+ }
+ else if (errno != EINTR) {
+ conn->flags |= CO_FL_ERROR | CO_FL_SOCK_RD_SH | CO_FL_SOCK_WR_SH;
+ break;
+ }
+ }
+ if (unlikely(conn->flags & CO_FL_WAIT_L4_CONN) && done) {
+ conn->flags &= ~CO_FL_WAIT_L4_CONN;
+ }
+
+ if (done > 0)
+ increment_send_rate(done, 0);
+
+ return done;
+}
+
+/* Called from the upper layer, to subscribe <es> to events <event_type>. The
+ * event subscriber <es> is not allowed to change from a previous call as long
+ * as at least one event is still subscribed. The <event_type> must only be a
+ * combination of SUB_RETRY_RECV and SUB_RETRY_SEND. It always returns 0.
+ */
+static int raw_sock_subscribe(struct connection *conn, void *xprt_ctx, int event_type, struct wait_event *es)
+{
+ return conn_subscribe(conn, xprt_ctx, event_type, es);
+}
+
+/* Called from the upper layer, to unsubscribe <es> from events <event_type>.
+ * The <es> pointer is not allowed to differ from the one passed to the
+ * subscribe() call. It always returns zero.
+ */
+static int raw_sock_unsubscribe(struct connection *conn, void *xprt_ctx, int event_type, struct wait_event *es)
+{
+ return conn_unsubscribe(conn, xprt_ctx, event_type, es);
+}
+
+static void raw_sock_close(struct connection *conn, void *xprt_ctx)
+{
+ if (conn->subs != NULL) {
+ conn_unsubscribe(conn, NULL, conn->subs->events, conn->subs);
+ }
+}
+
+/* We can't have an underlying XPRT, so just return -1 to signify failure */
+static int raw_sock_remove_xprt(struct connection *conn, void *xprt_ctx, void *toremove_ctx, const struct xprt_ops *newops, void *newctx)
+{
+ /* This is the lowest xprt we can have, so if we get there we didn't
+ * find the xprt we wanted to remove, that's a bug
+ */
+ BUG_ON(1);
+ return -1;
+}
+
+/* transport-layer operations for RAW sockets */
+static struct xprt_ops raw_sock = {
+ .snd_buf = raw_sock_from_buf,
+ .rcv_buf = raw_sock_to_buf,
+ .subscribe = raw_sock_subscribe,
+ .unsubscribe = raw_sock_unsubscribe,
+ .remove_xprt = raw_sock_remove_xprt,
+#if defined(USE_LINUX_SPLICE)
+ .rcv_pipe = raw_sock_to_pipe,
+ .snd_pipe = raw_sock_from_pipe,
+#endif
+ .shutr = NULL,
+ .shutw = NULL,
+ .close = raw_sock_close,
+ .name = "RAW",
+};
+
+
+static void __raw_sock_init(void)
+{
+ xprt_register(XPRT_RAW, &raw_sock);
+}
+
+INITCALL0(STG_REGISTER, __raw_sock_init);
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/src/regex.c b/src/regex.c
new file mode 100644
index 0000000..19c7eda
--- /dev/null
+++ b/src/regex.c
@@ -0,0 +1,459 @@
+/*
+ * Regex and string management functions.
+ *
+ * Copyright 2000-2010 Willy Tarreau <w@1wt.eu>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <ctype.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <haproxy/api.h>
+#include <haproxy/errors.h>
+#include <haproxy/global.h>
+#include <haproxy/regex.h>
+#include <haproxy/tools.h>
+
+/* regex trash buffer used by various regex tests */
+THREAD_LOCAL regmatch_t pmatch[MAX_MATCH]; /* rm_so, rm_eo for regular expressions */
+
+int exp_replace(char *dst, unsigned int dst_size, char *src, const char *str, const regmatch_t *matches)
+{
+ char *old_dst = dst;
+ char* dst_end = dst + dst_size;
+
+ while (*str) {
+ if (*str == '\\') {
+ str++;
+ if (!*str)
+ return -1;
+
+ if (isdigit((unsigned char)*str)) {
+ int len, num;
+
+ num = *str - '0';
+ str++;
+
+ if (matches[num].rm_eo > -1 && matches[num].rm_so > -1) {
+ len = matches[num].rm_eo - matches[num].rm_so;
+
+ if (dst + len >= dst_end)
+ return -1;
+
+ memcpy(dst, src + matches[num].rm_so, len);
+ dst += len;
+ }
+
+ } else if (*str == 'x') {
+ unsigned char hex1, hex2;
+ str++;
+
+ if (!*str)
+ return -1;
+
+ hex1 = toupper((unsigned char)*str++) - '0';
+
+ if (!*str)
+ return -1;
+
+ hex2 = toupper((unsigned char)*str++) - '0';
+
+ if (hex1 > 9) hex1 -= 'A' - '9' - 1;
+ if (hex2 > 9) hex2 -= 'A' - '9' - 1;
+
+ if (dst >= dst_end)
+ return -1;
+
+ *dst++ = (hex1<<4) + hex2;
+ } else {
+ if (dst >= dst_end)
+ return -1;
+
+ *dst++ = *str++;
+ }
+ } else {
+ if (dst >= dst_end)
+ return -1;
+
+ *dst++ = *str++;
+ }
+ }
+ if (dst >= dst_end)
+ return -1;
+
+ *dst = '\0';
+ return dst - old_dst;
+}
+
+/* returns NULL if the replacement string <str> is valid, or the pointer to the first error */
+const char *check_replace_string(const char *str)
+{
+ const char *err = NULL;
+ while (*str) {
+ if (*str == '\\') {
+ err = str; /* in case of a backslash, we return the pointer to it */
+ str++;
+ if (!*str)
+ return err;
+ else if (isdigit((unsigned char)*str))
+ err = NULL;
+ else if (*str == 'x') {
+ str++;
+ if (!ishex(*str))
+ return err;
+ str++;
+ if (!ishex(*str))
+ return err;
+ err = NULL;
+ }
+ else {
+ ha_warning("'\\%c' : deprecated use of a backslash before something not '\\','x' or a digit.\n", *str);
+ err = NULL;
+ }
+ }
+ str++;
+ }
+ return err;
+}
+
+
+/* This function apply regex. It take const null terminated char as input.
+ * If the function doesn't match, it returns false, else it returns true.
+ * When it is compiled with JIT, this function execute strlen on the subject.
+ * Currently the only supported flag is REG_NOTBOL.
+ */
+int regex_exec_match(const struct my_regex *preg, const char *subject,
+ size_t nmatch, regmatch_t pmatch[], int flags) {
+#if defined(USE_PCRE) || defined(USE_PCRE_JIT) || defined(USE_PCRE2) || defined(USE_PCRE2_JIT)
+ int ret;
+#ifdef USE_PCRE2
+ PCRE2_SIZE *matches;
+ pcre2_match_data *pm;
+#else
+ int matches[MAX_MATCH * 3];
+#endif
+ int enmatch;
+ int i;
+ int options;
+
+ /* Silently limit the number of allowed matches. max
+ * match i the maximum value for match, in fact this
+ * limit is not applied.
+ */
+
+ enmatch = nmatch;
+ if (enmatch > MAX_MATCH)
+ enmatch = MAX_MATCH;
+
+ options = 0;
+ if (flags & REG_NOTBOL)
+#ifdef USE_PCRE2
+ options |= PCRE2_NOTBOL;
+#else
+ options |= PCRE_NOTBOL;
+#endif
+
+ /* The value returned by pcre_exec()/pcre2_match() is one more than the highest numbered
+ * pair that has been set. For example, if two substrings have been captured,
+ * the returned value is 3. If there are no capturing subpatterns, the return
+ * value from a successful match is 1, indicating that just the first pair of
+ * offsets has been set.
+ *
+ * It seems that this function returns 0 if it detects more matches than available
+ * space in the matches array.
+ */
+#ifdef USE_PCRE2
+ pm = pcre2_match_data_create_from_pattern(preg->reg, NULL);
+ ret = preg->mfn(preg->reg, (PCRE2_SPTR)subject, (PCRE2_SIZE)strlen(subject), 0, options, pm, NULL);
+
+ if (ret < 0) {
+ pcre2_match_data_free(pm);
+ return 0;
+ }
+
+ matches = pcre2_get_ovector_pointer(pm);
+#else
+ ret = pcre_exec(preg->reg, preg->extra, subject, strlen(subject), 0, options, matches, enmatch * 3);
+
+ if (ret < 0)
+ return 0;
+#endif
+
+ if (ret == 0)
+ ret = enmatch;
+
+ for (i=0; i<nmatch; i++) {
+ /* Copy offset. */
+ if (i < ret) {
+ pmatch[i].rm_so = matches[(i*2)];
+ pmatch[i].rm_eo = matches[(i*2)+1];
+ continue;
+ }
+ /* Set the unmatvh flag (-1). */
+ pmatch[i].rm_so = -1;
+ pmatch[i].rm_eo = -1;
+ }
+#ifdef USE_PCRE2
+ pcre2_match_data_free(pm);
+#endif
+ return 1;
+#else
+ int match;
+
+ flags &= REG_NOTBOL;
+ match = regexec(&preg->regex, subject, nmatch, pmatch, flags);
+ if (match == REG_NOMATCH)
+ return 0;
+ return 1;
+#endif
+}
+
+/* This function apply regex. It take a "char *" ans length as input. The
+ * <subject> can be modified during the processing. If the function doesn't
+ * match, it returns false, else it returns true.
+ * When it is compiled with standard POSIX regex or PCRE, this function add
+ * a temporary null characters at the end of the <subject>. The <subject> must
+ * have a real length of <length> + 1. Currently the only supported flag is
+ * REG_NOTBOL.
+ */
+int regex_exec_match2(const struct my_regex *preg, char *subject, int length,
+ size_t nmatch, regmatch_t pmatch[], int flags) {
+#if defined(USE_PCRE) || defined(USE_PCRE_JIT) || defined(USE_PCRE2) || defined(USE_PCRE2_JIT)
+ int ret;
+#ifdef USE_PCRE2
+ PCRE2_SIZE *matches;
+ pcre2_match_data *pm;
+#else
+ int matches[MAX_MATCH * 3];
+#endif
+ int enmatch;
+ int i;
+ int options;
+
+ /* Silently limit the number of allowed matches. max
+ * match i the maximum value for match, in fact this
+ * limit is not applied.
+ */
+ enmatch = nmatch;
+ if (enmatch > MAX_MATCH)
+ enmatch = MAX_MATCH;
+
+ options = 0;
+ if (flags & REG_NOTBOL)
+#ifdef USE_PCRE2
+ options |= PCRE2_NOTBOL;
+#else
+ options |= PCRE_NOTBOL;
+#endif
+
+ /* The value returned by pcre_exec()/pcre2_(jit)_match() is one more than the highest numbered
+ * pair that has been set. For example, if two substrings have been captured,
+ * the returned value is 3. If there are no capturing subpatterns, the return
+ * value from a successful match is 1, indicating that just the first pair of
+ * offsets has been set.
+ *
+ * It seems that this function returns 0 if it detects more matches than available
+ * space in the matches array.
+ */
+#ifdef USE_PCRE2
+ pm = pcre2_match_data_create_from_pattern(preg->reg, NULL);
+ ret = preg->mfn(preg->reg, (PCRE2_SPTR)subject, (PCRE2_SIZE)length, 0, options, pm, NULL);
+
+ if (ret < 0) {
+ pcre2_match_data_free(pm);
+ return 0;
+ }
+
+ matches = pcre2_get_ovector_pointer(pm);
+#else
+ ret = pcre_exec(preg->reg, preg->extra, subject, length, 0, options, matches, enmatch * 3);
+ if (ret < 0)
+ return 0;
+#endif
+
+ if (ret == 0)
+ ret = enmatch;
+
+ for (i=0; i<nmatch; i++) {
+ /* Copy offset. */
+ if (i < ret) {
+ pmatch[i].rm_so = matches[(i*2)];
+ pmatch[i].rm_eo = matches[(i*2)+1];
+ continue;
+ }
+ /* Set the unmatvh flag (-1). */
+ pmatch[i].rm_so = -1;
+ pmatch[i].rm_eo = -1;
+ }
+#ifdef USE_PCRE2
+ pcre2_match_data_free(pm);
+#endif
+ return 1;
+#else
+ char old_char = subject[length];
+ int match;
+
+ flags &= REG_NOTBOL;
+ subject[length] = 0;
+ match = regexec(&preg->regex, subject, nmatch, pmatch, flags);
+ subject[length] = old_char;
+ if (match == REG_NOMATCH)
+ return 0;
+ return 1;
+#endif
+}
+
+struct my_regex *regex_comp(const char *str, int cs, int cap, char **err)
+{
+ struct my_regex *regex = NULL;
+#if defined(USE_PCRE) || defined(USE_PCRE_JIT)
+ int flags = 0;
+ const char *error;
+ int erroffset;
+#elif defined(USE_PCRE2) || defined(USE_PCRE2_JIT)
+ int flags = 0;
+ int errn;
+#if defined(USE_PCRE2_JIT)
+ int jit;
+#endif
+ PCRE2_UCHAR error[256];
+ PCRE2_SIZE erroffset;
+#else
+ int flags = REG_EXTENDED;
+#endif
+
+ regex = calloc(1, sizeof(*regex));
+ if (!regex) {
+ memprintf(err, "not enough memory to build regex");
+ goto out_fail_alloc;
+ }
+
+#if defined(USE_PCRE) || defined(USE_PCRE_JIT)
+ if (!cs)
+ flags |= PCRE_CASELESS;
+ if (!cap)
+ flags |= PCRE_NO_AUTO_CAPTURE;
+
+ regex->reg = pcre_compile(str, flags, &error, &erroffset, NULL);
+ if (!regex->reg) {
+ memprintf(err, "regex '%s' is invalid (error=%s, erroffset=%d)", str, error, erroffset);
+ goto out_fail_alloc;
+ }
+
+ regex->extra = pcre_study(regex->reg, PCRE_STUDY_JIT_COMPILE, &error);
+ if (!regex->extra && error != NULL) {
+ pcre_free(regex->reg);
+ memprintf(err, "failed to compile regex '%s' (error=%s)", str, error);
+ goto out_fail_alloc;
+ }
+#elif defined(USE_PCRE2) || defined(USE_PCRE2_JIT)
+ if (!cs)
+ flags |= PCRE2_CASELESS;
+ if (!cap)
+ flags |= PCRE2_NO_AUTO_CAPTURE;
+
+ regex->reg = pcre2_compile((PCRE2_SPTR)str, PCRE2_ZERO_TERMINATED, flags, &errn, &erroffset, NULL);
+ if (!regex->reg) {
+ pcre2_get_error_message(errn, error, sizeof(error));
+ memprintf(err, "regex '%s' is invalid (error=%s, erroffset=%zu)", str, error, erroffset);
+ goto out_fail_alloc;
+ }
+
+ regex->mfn = &pcre2_match;
+#if defined(USE_PCRE2_JIT)
+ jit = pcre2_jit_compile(regex->reg, PCRE2_JIT_COMPLETE);
+ /*
+ * We end if it is an error not related to lack of JIT support
+ * in a case of JIT support missing pcre2_jit_compile is "no-op"
+ */
+ if (!jit)
+ regex->mfn = &pcre2_jit_match;
+ else {
+ if (jit != PCRE2_ERROR_JIT_BADOPTION) {
+ pcre2_code_free(regex->reg);
+ memprintf(err, "regex '%s' jit compilation failed", str);
+ goto out_fail_alloc;
+ }
+ else
+ regex->mfn = &pcre2_match;
+ }
+#endif
+
+#else
+ if (!cs)
+ flags |= REG_ICASE;
+ if (!cap)
+ flags |= REG_NOSUB;
+
+ if (regcomp(&regex->regex, str, flags) != 0) {
+ memprintf(err, "regex '%s' is invalid", str);
+ goto out_fail_alloc;
+ }
+#endif
+ return regex;
+
+ out_fail_alloc:
+ free(regex);
+ return NULL;
+}
+
+static void regex_register_build_options(void)
+{
+ char *ptr = NULL;
+
+#ifdef USE_PCRE
+ memprintf(&ptr, "Built with PCRE version : %s", (HAP_XSTRING(Z PCRE_PRERELEASE)[1] == 0)?
+ HAP_XSTRING(PCRE_MAJOR.PCRE_MINOR PCRE_DATE) :
+ HAP_XSTRING(PCRE_MAJOR.PCRE_MINOR) HAP_XSTRING(PCRE_PRERELEASE PCRE_DATE));
+ memprintf(&ptr, "%s\nRunning on PCRE version : %s", ptr, pcre_version());
+
+ memprintf(&ptr, "%s\nPCRE library supports JIT : %s", ptr,
+#ifdef USE_PCRE_JIT
+ ({
+ int r;
+ pcre_config(PCRE_CONFIG_JIT, &r);
+ r ? "yes" : "no (libpcre build without JIT?)";
+ })
+#else
+ "no (USE_PCRE_JIT not set)"
+#endif
+ );
+#endif /* USE_PCRE */
+
+#ifdef USE_PCRE2
+ memprintf(&ptr, "Built with PCRE2 version : %s", (HAP_XSTRING(Z PCRE2_PRERELEASE)[1] == 0) ?
+ HAP_XSTRING(PCRE2_MAJOR.PCRE2_MINOR PCRE2_DATE) :
+ HAP_XSTRING(PCRE2_MAJOR.PCRE2_MINOR) HAP_XSTRING(PCRE2_PRERELEASE PCRE2_DATE));
+ memprintf(&ptr, "%s\nPCRE2 library supports JIT : %s", ptr,
+#ifdef USE_PCRE2_JIT
+ ({
+ int r;
+ pcre2_config(PCRE2_CONFIG_JIT, &r);
+ r ? "yes" : "no (libpcre2 build without JIT?)";
+ })
+#else
+ "no (USE_PCRE2_JIT not set)"
+#endif
+ );
+#endif /* USE_PCRE2 */
+
+#if !defined(USE_PCRE) && !defined(USE_PCRE2)
+ memprintf(&ptr, "Built without PCRE or PCRE2 support (using libc's regex instead)");
+#endif
+ hap_register_build_opts(ptr, 1);
+}
+
+INITCALL0(STG_REGISTER, regex_register_build_options);
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/src/resolvers.c b/src/resolvers.c
new file mode 100644
index 0000000..3275cd2
--- /dev/null
+++ b/src/resolvers.c
@@ -0,0 +1,3813 @@
+/*
+ * Name server resolution
+ *
+ * Copyright 2014 Baptiste Assmann <bedis9@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+#include <sys/types.h>
+
+#include <import/ebistree.h>
+
+#include <haproxy/action.h>
+#include <haproxy/api.h>
+#include <haproxy/applet.h>
+#include <haproxy/cfgparse.h>
+#include <haproxy/channel.h>
+#include <haproxy/check.h>
+#include <haproxy/cli.h>
+#include <haproxy/dns.h>
+#include <haproxy/errors.h>
+#include <haproxy/fd.h>
+#include <haproxy/http_rules.h>
+#include <haproxy/log.h>
+#include <haproxy/net_helper.h>
+#include <haproxy/protocol.h>
+#include <haproxy/proxy.h>
+#include <haproxy/resolvers.h>
+#include <haproxy/ring.h>
+#include <haproxy/sample.h>
+#include <haproxy/sc_strm.h>
+#include <haproxy/server.h>
+#include <haproxy/stats.h>
+#include <haproxy/stconn.h>
+#include <haproxy/task.h>
+#include <haproxy/tcp_rules.h>
+#include <haproxy/ticks.h>
+#include <haproxy/time.h>
+#include <haproxy/tools.h>
+#include <haproxy/vars.h>
+#include <haproxy/xxhash.h>
+
+
+struct list sec_resolvers = LIST_HEAD_INIT(sec_resolvers);
+struct list resolv_srvrq_list = LIST_HEAD_INIT(resolv_srvrq_list);
+
+static THREAD_LOCAL struct list death_row; /* list of deferred resolutions to kill, local validity only */
+static THREAD_LOCAL unsigned int recurse = 0; /* counter to track calls to public functions */
+static THREAD_LOCAL uint64_t resolv_query_id_seed = 0; /* random seed */
+struct resolvers *curr_resolvers = NULL;
+
+DECLARE_STATIC_POOL(resolv_answer_item_pool, "resolv_answer_item", sizeof(struct resolv_answer_item));
+DECLARE_STATIC_POOL(resolv_resolution_pool, "resolv_resolution", sizeof(struct resolv_resolution));
+DECLARE_POOL(resolv_requester_pool, "resolv_requester", sizeof(struct resolv_requester));
+
+static unsigned int resolution_uuid = 1;
+unsigned int resolv_failed_resolutions = 0;
+struct task *process_resolvers(struct task *t, void *context, unsigned int state);
+static void resolv_free_resolution(struct resolv_resolution *resolution);
+static void _resolv_unlink_resolution(struct resolv_requester *requester);
+static void enter_resolver_code();
+static void leave_resolver_code();
+
+enum {
+ RSLV_STAT_ID,
+ RSLV_STAT_PID,
+ RSLV_STAT_SENT,
+ RSLV_STAT_SND_ERROR,
+ RSLV_STAT_VALID,
+ RSLV_STAT_UPDATE,
+ RSLV_STAT_CNAME,
+ RSLV_STAT_CNAME_ERROR,
+ RSLV_STAT_ANY_ERR,
+ RSLV_STAT_NX,
+ RSLV_STAT_TIMEOUT,
+ RSLV_STAT_REFUSED,
+ RSLV_STAT_OTHER,
+ RSLV_STAT_INVALID,
+ RSLV_STAT_TOO_BIG,
+ RSLV_STAT_TRUNCATED,
+ RSLV_STAT_OUTDATED,
+ RSLV_STAT_END,
+};
+
+static struct name_desc resolv_stats[] = {
+ [RSLV_STAT_ID] = { .name = "id", .desc = "ID" },
+ [RSLV_STAT_PID] = { .name = "pid", .desc = "Parent ID" },
+ [RSLV_STAT_SENT] = { .name = "sent", .desc = "Sent" },
+ [RSLV_STAT_SND_ERROR] = { .name = "send_error", .desc = "Send error" },
+ [RSLV_STAT_VALID] = { .name = "valid", .desc = "Valid" },
+ [RSLV_STAT_UPDATE] = { .name = "update", .desc = "Update" },
+ [RSLV_STAT_CNAME] = { .name = "cname", .desc = "CNAME" },
+ [RSLV_STAT_CNAME_ERROR] = { .name = "cname_error", .desc = "CNAME error" },
+ [RSLV_STAT_ANY_ERR] = { .name = "any_err", .desc = "Any errors" },
+ [RSLV_STAT_NX] = { .name = "nx", .desc = "NX" },
+ [RSLV_STAT_TIMEOUT] = { .name = "timeout", .desc = "Timeout" },
+ [RSLV_STAT_REFUSED] = { .name = "refused", .desc = "Refused" },
+ [RSLV_STAT_OTHER] = { .name = "other", .desc = "Other" },
+ [RSLV_STAT_INVALID] = { .name = "invalid", .desc = "Invalid" },
+ [RSLV_STAT_TOO_BIG] = { .name = "too_big", .desc = "Too big" },
+ [RSLV_STAT_TRUNCATED] = { .name = "truncated", .desc = "Truncated" },
+ [RSLV_STAT_OUTDATED] = { .name = "outdated", .desc = "Outdated" },
+};
+
+static struct dns_counters dns_counters;
+
+static void resolv_fill_stats(void *d, struct field *stats)
+{
+ struct dns_counters *counters = d;
+ stats[RSLV_STAT_ID] = mkf_str(FO_CONFIG, counters->id);
+ stats[RSLV_STAT_PID] = mkf_str(FO_CONFIG, counters->pid);
+ stats[RSLV_STAT_SENT] = mkf_u64(FN_GAUGE, counters->sent);
+ stats[RSLV_STAT_SND_ERROR] = mkf_u64(FN_GAUGE, counters->snd_error);
+ stats[RSLV_STAT_VALID] = mkf_u64(FN_GAUGE, counters->app.resolver.valid);
+ stats[RSLV_STAT_UPDATE] = mkf_u64(FN_GAUGE, counters->app.resolver.update);
+ stats[RSLV_STAT_CNAME] = mkf_u64(FN_GAUGE, counters->app.resolver.cname);
+ stats[RSLV_STAT_CNAME_ERROR] = mkf_u64(FN_GAUGE, counters->app.resolver.cname_error);
+ stats[RSLV_STAT_ANY_ERR] = mkf_u64(FN_GAUGE, counters->app.resolver.any_err);
+ stats[RSLV_STAT_NX] = mkf_u64(FN_GAUGE, counters->app.resolver.nx);
+ stats[RSLV_STAT_TIMEOUT] = mkf_u64(FN_GAUGE, counters->app.resolver.timeout);
+ stats[RSLV_STAT_REFUSED] = mkf_u64(FN_GAUGE, counters->app.resolver.refused);
+ stats[RSLV_STAT_OTHER] = mkf_u64(FN_GAUGE, counters->app.resolver.other);
+ stats[RSLV_STAT_INVALID] = mkf_u64(FN_GAUGE, counters->app.resolver.invalid);
+ stats[RSLV_STAT_TOO_BIG] = mkf_u64(FN_GAUGE, counters->app.resolver.too_big);
+ stats[RSLV_STAT_TRUNCATED] = mkf_u64(FN_GAUGE, counters->app.resolver.truncated);
+ stats[RSLV_STAT_OUTDATED] = mkf_u64(FN_GAUGE, counters->app.resolver.outdated);
+}
+
+static struct stats_module rslv_stats_module = {
+ .name = "resolvers",
+ .domain_flags = STATS_DOMAIN_RESOLVERS << STATS_DOMAIN,
+ .fill_stats = resolv_fill_stats,
+ .stats = resolv_stats,
+ .stats_count = RSLV_STAT_END,
+ .counters = &dns_counters,
+ .counters_size = sizeof(dns_counters),
+ .clearable = 0,
+};
+
+INITCALL1(STG_REGISTER, stats_register_module, &rslv_stats_module);
+
+/* CLI context used during "show resolvers" */
+struct show_resolvers_ctx {
+ struct resolvers *forced_section;
+ struct resolvers *resolvers;
+ struct dns_nameserver *ns;
+};
+
+/* Returns a pointer to the resolvers matching the id <id>. NULL is returned if
+ * no match is found.
+ */
+struct resolvers *find_resolvers_by_id(const char *id)
+{
+ struct resolvers *res;
+
+ list_for_each_entry(res, &sec_resolvers, list) {
+ if (strcmp(res->id, id) == 0)
+ return res;
+ }
+ return NULL;
+}
+
+/* Returns a pointer on the SRV request matching the name <name> for the proxy
+ * <px>. NULL is returned if no match is found.
+ */
+struct resolv_srvrq *find_srvrq_by_name(const char *name, struct proxy *px)
+{
+ struct resolv_srvrq *srvrq;
+
+ list_for_each_entry(srvrq, &resolv_srvrq_list, list) {
+ if (srvrq->proxy == px && strcmp(srvrq->name, name) == 0)
+ return srvrq;
+ }
+ return NULL;
+}
+
+/* Allocates a new SRVRQ for the given server with the name <fqdn>. It returns
+ * NULL if an error occurred. */
+struct resolv_srvrq *new_resolv_srvrq(struct server *srv, char *fqdn)
+{
+ struct proxy *px = srv->proxy;
+ struct resolv_srvrq *srvrq = NULL;
+ int fqdn_len, hostname_dn_len;
+
+ fqdn_len = strlen(fqdn);
+ hostname_dn_len = resolv_str_to_dn_label(fqdn, fqdn_len, trash.area,
+ trash.size);
+ if (hostname_dn_len == -1) {
+ ha_alert("%s '%s', server '%s': failed to parse FQDN '%s'\n",
+ proxy_type_str(px), px->id, srv->id, fqdn);
+ goto err;
+ }
+
+ if ((srvrq = calloc(1, sizeof(*srvrq))) == NULL) {
+ ha_alert("%s '%s', server '%s': out of memory\n",
+ proxy_type_str(px), px->id, srv->id);
+ goto err;
+ }
+ srvrq->obj_type = OBJ_TYPE_SRVRQ;
+ srvrq->proxy = px;
+ srvrq->name = strdup(fqdn);
+ srvrq->hostname_dn = strdup(trash.area);
+ srvrq->hostname_dn_len = hostname_dn_len;
+ if (!srvrq->name || !srvrq->hostname_dn) {
+ ha_alert("%s '%s', server '%s': out of memory\n",
+ proxy_type_str(px), px->id, srv->id);
+ goto err;
+ }
+ LIST_INIT(&srvrq->attached_servers);
+ srvrq->named_servers = EB_ROOT;
+ LIST_APPEND(&resolv_srvrq_list, &srvrq->list);
+ return srvrq;
+
+ err:
+ if (srvrq) {
+ free(srvrq->name);
+ free(srvrq->hostname_dn);
+ free(srvrq);
+ }
+ return NULL;
+}
+
+
+/* finds and return the SRV answer item associated to a requester (whose type is 'server').
+ *
+ * returns NULL in case of error or not found.
+ */
+struct resolv_answer_item *find_srvrq_answer_record(const struct resolv_requester *requester)
+{
+ struct resolv_resolution *res;
+ struct eb32_node *eb32;
+ struct server *srv;
+
+ if (!requester)
+ return NULL;
+
+ if ((srv = objt_server(requester->owner)) == NULL)
+ return NULL;
+ /* check if the server is managed by a SRV record */
+ if (srv->srvrq == NULL)
+ return NULL;
+
+ res = srv->srvrq->requester->resolution;
+
+ /* search an ANSWER record whose target points to the server's hostname and whose port is
+ * the same as server's svc_port */
+ for (eb32 = eb32_first(&res->response.answer_tree); eb32 != NULL; eb32 = eb32_next(eb32)) {
+ struct resolv_answer_item *item = eb32_entry(eb32, typeof(*item), link);
+
+ if (memcmp(srv->hostname_dn, item->data.target, srv->hostname_dn_len) == 0 &&
+ (srv->svc_port == item->port))
+ return item;
+ }
+
+ return NULL;
+}
+
+/* 2 bytes random generator to generate DNS query ID */
+static inline uint16_t resolv_rnd16(void)
+{
+ if (!resolv_query_id_seed)
+ resolv_query_id_seed = now_ms;
+ resolv_query_id_seed ^= resolv_query_id_seed << 13;
+ resolv_query_id_seed ^= resolv_query_id_seed >> 7;
+ resolv_query_id_seed ^= resolv_query_id_seed << 17;
+ return resolv_query_id_seed;
+}
+
+
+static inline int resolv_resolution_timeout(struct resolv_resolution *res)
+{
+ return res->resolvers->timeout.resolve;
+}
+
+/* Updates a resolvers' task timeout for next wake up and queue it */
+static void resolv_update_resolvers_timeout(struct resolvers *resolvers)
+{
+ struct resolv_resolution *res;
+ int next = TICK_ETERNITY;
+
+ if (!LIST_ISEMPTY(&resolvers->resolutions.curr)) {
+ res = LIST_NEXT(&resolvers->resolutions.curr, struct resolv_resolution *, list);
+ next = tick_add(now_ms, resolvers->timeout.resolve);
+ next = tick_first(next, tick_add(res->last_query, resolvers->timeout.retry));
+ }
+
+ list_for_each_entry(res, &resolvers->resolutions.wait, list)
+ next = tick_first(next, tick_add(res->last_resolution, resolv_resolution_timeout(res)));
+
+ resolvers->t->expire = next;
+ task_queue(resolvers->t);
+}
+
+/* Forges a DNS query. It needs the following information from the caller:
+ * - <query_id> : the DNS query id corresponding to this query
+ * - <query_type> : DNS_RTYPE_* request DNS record type (A, AAAA, ANY...)
+ * - <hostname_dn> : hostname in domain name format
+ * - <hostname_dn_len> : length of <hostname_dn>
+ *
+ * To store the query, the caller must pass a buffer <buf> and its size
+ * <bufsize>. It returns the number of written bytes in success, -1 if <buf> is
+ * too short.
+ */
+static int resolv_build_query(int query_id, int query_type, unsigned int accepted_payload_size,
+ char *hostname_dn, int hostname_dn_len, char *buf, int bufsize)
+{
+ struct dns_header dns_hdr;
+ struct dns_question qinfo;
+ struct dns_additional_record edns;
+ char *p = buf;
+
+ if (sizeof(dns_hdr) + sizeof(qinfo) + sizeof(edns) + hostname_dn_len >= bufsize)
+ return -1;
+
+ memset(buf, 0, bufsize);
+
+ /* Set dns query headers */
+ dns_hdr.id = (unsigned short) htons(query_id);
+ dns_hdr.flags = htons(0x0100); /* qr=0, opcode=0, aa=0, tc=0, rd=1, ra=0, z=0, rcode=0 */
+ dns_hdr.qdcount = htons(1); /* 1 question */
+ dns_hdr.ancount = 0;
+ dns_hdr.nscount = 0;
+ dns_hdr.arcount = htons(1);
+ memcpy(p, &dns_hdr, sizeof(dns_hdr));
+ p += sizeof(dns_hdr);
+
+ /* Set up query hostname */
+ memcpy(p, hostname_dn, hostname_dn_len);
+ p += hostname_dn_len;
+ *p++ = 0;
+
+ /* Set up query info (type and class) */
+ qinfo.qtype = htons(query_type);
+ qinfo.qclass = htons(DNS_RCLASS_IN);
+ memcpy(p, &qinfo, sizeof(qinfo));
+ p += sizeof(qinfo);
+
+ /* Set the DNS extension */
+ edns.name = 0;
+ edns.type = htons(DNS_RTYPE_OPT);
+ edns.udp_payload_size = htons(accepted_payload_size);
+ edns.extension = 0;
+ edns.data_length = 0;
+ memcpy(p, &edns, sizeof(edns));
+ p += sizeof(edns);
+
+ return (p - buf);
+}
+
+/* Sends a DNS query to resolvers associated to a resolution. It returns 0 on
+ * success or -1 if trash buffer is not large enough to build a valid query.
+ */
+static int resolv_send_query(struct resolv_resolution *resolution)
+{
+ struct resolvers *resolvers = resolution->resolvers;
+ struct dns_nameserver *ns;
+ int len;
+
+ /* Update resolution */
+ resolution->nb_queries = 0;
+ resolution->nb_responses = 0;
+ resolution->last_query = now_ms;
+
+ len = resolv_build_query(resolution->query_id, resolution->query_type,
+ resolvers->accepted_payload_size,
+ resolution->hostname_dn, resolution->hostname_dn_len,
+ trash.area, trash.size);
+ if (len < 0) {
+ send_log(NULL, LOG_NOTICE,
+ "can not build the query message for %s, in resolvers %s.\n",
+ resolution->hostname_dn, resolvers->id);
+ return -1;
+ }
+
+ list_for_each_entry(ns, &resolvers->nameservers, list) {
+ if (dns_send_nameserver(ns, trash.area, len) >= 0)
+ resolution->nb_queries++;
+ }
+
+ /* Push the resolution at the end of the active list */
+ LIST_DEL_INIT(&resolution->list);
+ LIST_APPEND(&resolvers->resolutions.curr, &resolution->list);
+ return 0;
+}
+
+/* Prepares and sends a DNS resolution. It returns 1 if the query was sent, 0 if
+ * skipped and -1 if an error occurred.
+ */
+static int
+resolv_run_resolution(struct resolv_resolution *resolution)
+{
+ struct resolvers *resolvers = resolution->resolvers;
+ int query_id, i;
+
+ /* Avoid sending requests for resolutions that don't yet have an
+ * hostname, ie resolutions linked to servers that do not yet have an
+ * fqdn */
+ if (!resolution->hostname_dn)
+ return 0;
+
+ /* Check if a resolution has already been started for this server return
+ * directly to avoid resolution pill up. */
+ if (resolution->step != RSLV_STEP_NONE)
+ return 0;
+
+ /* Generates a new query id. We try at most 100 times to find a free
+ * query id */
+ for (i = 0; i < 100; ++i) {
+ query_id = resolv_rnd16();
+ if (!eb32_lookup(&resolvers->query_ids, query_id))
+ break;
+ query_id = -1;
+ }
+ if (query_id == -1) {
+ send_log(NULL, LOG_NOTICE,
+ "could not generate a query id for %s, in resolvers %s.\n",
+ resolution->hostname_dn, resolvers->id);
+ return -1;
+ }
+
+ /* Update resolution parameters */
+ resolution->query_id = query_id;
+ resolution->qid.key = query_id;
+ resolution->step = RSLV_STEP_RUNNING;
+ resolution->query_type = resolution->prefered_query_type;
+ resolution->try = resolvers->resolve_retries;
+ eb32_insert(&resolvers->query_ids, &resolution->qid);
+
+ /* Send the DNS query */
+ resolution->try -= 1;
+ resolv_send_query(resolution);
+ return 1;
+}
+
+/* Performs a name resolution for the requester <req> */
+void resolv_trigger_resolution(struct resolv_requester *req)
+{
+ struct resolvers *resolvers;
+ struct resolv_resolution *res;
+ int exp;
+
+ if (!req || !req->resolution)
+ return;
+ res = req->resolution;
+ resolvers = res->resolvers;
+
+ enter_resolver_code();
+
+ /* The resolution must not be triggered yet. Use the cached response, if
+ * valid */
+ exp = tick_add(res->last_resolution, resolvers->hold.valid);
+ if (resolvers->t && (!tick_isset(resolvers->t->expire) || res->status != RSLV_STATUS_VALID ||
+ !tick_isset(res->last_resolution) || tick_is_expired(exp, now_ms))) {
+ /* If the resolution is not running and the requester is a
+ * server, reset the resolution timer to force a quick
+ * resolution.
+ */
+ if (res->step == RSLV_STEP_NONE &&
+ (obj_type(req->owner) == OBJ_TYPE_SERVER ||
+ obj_type(req->owner) == OBJ_TYPE_SRVRQ))
+ res->last_resolution = TICK_ETERNITY;
+ task_wakeup(resolvers->t, TASK_WOKEN_OTHER);
+ }
+
+ leave_resolver_code();
+}
+
+
+/* Resets some resolution parameters to initial values and also delete the query
+ * ID from the resolver's tree.
+ */
+static void resolv_reset_resolution(struct resolv_resolution *resolution)
+{
+ /* update resolution status */
+ resolution->step = RSLV_STEP_NONE;
+ resolution->try = 0;
+ resolution->last_resolution = now_ms;
+ resolution->nb_queries = 0;
+ resolution->nb_responses = 0;
+ resolution->query_type = resolution->prefered_query_type;
+
+ /* clean up query id */
+ eb32_delete(&resolution->qid);
+ resolution->query_id = 0;
+ resolution->qid.key = 0;
+}
+
+/* Returns the query id contained in a DNS response */
+static inline unsigned short resolv_response_get_query_id(unsigned char *resp)
+{
+ return resp[0] * 256 + resp[1];
+}
+
+
+/* Analyses, re-builds and copies the name <name> from the DNS response packet
+ * <buffer>. <name> must point to the 'data_len' information or pointer 'c0'
+ * for compressed data. The result is copied into <dest>, ensuring we don't
+ * overflow using <dest_len> Returns the number of bytes the caller can move
+ * forward. If 0 it means an error occurred while parsing the name. <offset> is
+ * the number of bytes the caller could move forward.
+ */
+int resolv_read_name(unsigned char *buffer, unsigned char *bufend,
+ unsigned char *name, char *destination, int dest_len,
+ int *offset, unsigned int depth)
+{
+ int nb_bytes = 0, n = 0;
+ int label_len;
+ unsigned char *reader = name;
+ char *dest = destination;
+
+ while (1) {
+ if (reader >= bufend)
+ goto err;
+
+ /* Name compression is in use */
+ if ((*reader & 0xc0) == 0xc0) {
+ if (reader + 1 >= bufend)
+ goto err;
+
+ /* Must point BEFORE current position */
+ if ((buffer + reader[1]) > reader)
+ goto err;
+
+ if (depth++ > 100)
+ goto err;
+
+ n = resolv_read_name(buffer, bufend, buffer + (*reader & 0x3f)*256 + reader[1],
+ dest, dest_len - nb_bytes, offset, depth);
+ if (n == 0)
+ goto err;
+
+ dest += n;
+ nb_bytes += n;
+ goto out;
+ }
+
+ label_len = *reader;
+ if (label_len == 0)
+ goto out;
+
+ /* Check if:
+ * - we won't read outside the buffer
+ * - there is enough place in the destination
+ */
+ if ((reader + label_len >= bufend) || (nb_bytes + label_len >= dest_len))
+ goto err;
+
+ /* +1 to take label len + label string */
+ label_len++;
+
+ memcpy(dest, reader, label_len);
+
+ dest += label_len;
+ nb_bytes += label_len;
+ reader += label_len;
+ }
+
+ out:
+ /* offset computation:
+ * parse from <name> until finding either NULL or a pointer "c0xx"
+ */
+ reader = name;
+ *offset = 0;
+ while (reader < bufend) {
+ if ((reader[0] & 0xc0) == 0xc0) {
+ *offset += 2;
+ break;
+ }
+ else if (*reader == 0) {
+ *offset += 1;
+ break;
+ }
+ *offset += 1;
+ ++reader;
+ }
+ return nb_bytes;
+
+ err:
+ return 0;
+}
+
+/* Reinitialize the list of aborted resolutions before calling certain
+ * functions relying on it. The list must be processed by calling
+ * leave_resolver_code() after operations.
+ */
+static void enter_resolver_code()
+{
+ if (!recurse)
+ LIST_INIT(&death_row);
+ recurse++;
+}
+
+/* Add a resolution to the death_row. */
+static void abort_resolution(struct resolv_resolution *res)
+{
+ /* Remove the resolution from query_ids tree and from any resolvers list */
+ eb32_delete(&res->qid);
+ res->query_id = 0;
+ res->qid.key = 0;
+
+ LIST_DEL_INIT(&res->list);
+ LIST_APPEND(&death_row, &res->list);
+}
+
+/* This releases any aborted resolution found in the death row. It is mandatory
+ * to call enter_resolver_code() first before the function (or loop) that
+ * needs to defer deletions. Note that some of them are in relation via internal
+ * objects and might cause the deletion of other ones from the same list, so we
+ * must absolutely not use a list_for_each_entry_safe() nor any such thing here,
+ * and solely rely on each call to remove the first remaining list element.
+ */
+static void leave_resolver_code()
+{
+ struct resolv_resolution *res;
+
+ recurse--;
+ if (recurse)
+ return;
+
+ while (!LIST_ISEMPTY(&death_row)) {
+ res = LIST_NEXT(&death_row, struct resolv_resolution *, list);
+ resolv_free_resolution(res);
+ }
+
+ /* make sure nobody tries to add anything without having initialized it */
+ death_row = (struct list){ };
+}
+
+/* Cleanup fqdn/port and address of a server attached to a SRV resolution. This
+ * happens when an SRV item is purged or when the server status is considered as
+ * obsolete.
+ *
+ * Must be called with the DNS lock held, and with the death_row already
+ * initialized via enter_resolver_code().
+ */
+static void resolv_srvrq_cleanup_srv(struct server *srv)
+{
+ _resolv_unlink_resolution(srv->resolv_requester);
+ HA_SPIN_LOCK(SERVER_LOCK, &srv->lock);
+ srvrq_update_srv_status(srv, 1);
+ ha_free(&srv->hostname);
+ ha_free(&srv->hostname_dn);
+ srv->hostname_dn_len = 0;
+ memset(&srv->addr, 0, sizeof(srv->addr));
+ srv->svc_port = 0;
+ srv->flags |= SRV_F_NO_RESOLUTION;
+
+ ebpt_delete(&srv->host_dn);
+ ha_free(&srv->host_dn.key);
+
+ HA_SPIN_UNLOCK(SERVER_LOCK, &srv->lock);
+ LIST_DEL_INIT(&srv->srv_rec_item);
+ LIST_APPEND(&srv->srvrq->attached_servers, &srv->srv_rec_item);
+
+ srv->srvrq_check->expire = TICK_ETERNITY;
+}
+
+/* Takes care to cleanup a server resolution when it is outdated. This only
+ * happens for a server relying on a SRV record.
+ */
+static struct task *resolv_srvrq_expire_task(struct task *t, void *context, unsigned int state)
+{
+ struct server *srv = context;
+
+ if (!tick_is_expired(t->expire, now_ms))
+ goto end;
+
+ enter_resolver_code();
+ HA_SPIN_LOCK(DNS_LOCK, &srv->srvrq->resolvers->lock);
+ resolv_srvrq_cleanup_srv(srv);
+ HA_SPIN_UNLOCK(DNS_LOCK, &srv->srvrq->resolvers->lock);
+ leave_resolver_code();
+
+ end:
+ return t;
+}
+
+/* Checks for any obsolete record, also identify any SRV request, and try to
+ * find a corresponding server.
+ */
+static void resolv_check_response(struct resolv_resolution *res)
+{
+ struct resolvers *resolvers = res->resolvers;
+ struct resolv_requester *req;
+ struct eb32_node *eb32, *eb32_back;
+ struct server *srv, *srvback;
+ struct resolv_srvrq *srvrq;
+
+ for (eb32 = eb32_first(&res->response.answer_tree); eb32 && (eb32_back = eb32_next(eb32), 1); eb32 = eb32_back) {
+ struct resolv_answer_item *item = eb32_entry(eb32, typeof(*item), link);
+ struct resolv_answer_item *ar_item = item->ar_item;
+
+ /* clean up obsolete Additional record */
+ if (ar_item && tick_is_lt(tick_add(ar_item->last_seen, resolvers->hold.obsolete), now_ms)) {
+ /* Cleaning up the AR item will trigger an extra DNS resolution, except if the SRV
+ * item is also obsolete.
+ */
+ pool_free(resolv_answer_item_pool, ar_item);
+ item->ar_item = NULL;
+ }
+
+ /* Remove obsolete items */
+ if (tick_is_lt(tick_add(item->last_seen, resolvers->hold.obsolete), now_ms)) {
+ if (item->type == DNS_RTYPE_A || item->type == DNS_RTYPE_AAAA) {
+ /* Remove any associated server */
+ list_for_each_entry_safe(srv, srvback, &item->attached_servers, ip_rec_item) {
+ LIST_DEL_INIT(&srv->ip_rec_item);
+ }
+ }
+ else if (item->type == DNS_RTYPE_SRV) {
+ /* Remove any associated server */
+ list_for_each_entry_safe(srv, srvback, &item->attached_servers, srv_rec_item)
+ resolv_srvrq_cleanup_srv(srv);
+ }
+
+ eb32_delete(&item->link);
+ if (item->ar_item) {
+ pool_free(resolv_answer_item_pool, item->ar_item);
+ item->ar_item = NULL;
+ }
+ pool_free(resolv_answer_item_pool, item);
+ continue;
+ }
+
+ if (item->type != DNS_RTYPE_SRV)
+ continue;
+
+ /* Now process SRV records */
+ list_for_each_entry(req, &res->requesters, list) {
+ struct ebpt_node *node;
+ char target[DNS_MAX_NAME_SIZE+1];
+
+ int i;
+ if ((srvrq = objt_resolv_srvrq(req->owner)) == NULL)
+ continue;
+
+ /* Check if a server already uses that record */
+ srv = NULL;
+ list_for_each_entry(srv, &item->attached_servers, srv_rec_item) {
+ if (srv->srvrq == srvrq) {
+ HA_SPIN_LOCK(SERVER_LOCK, &srv->lock);
+ goto srv_found;
+ }
+ }
+
+
+ /* If not empty we try to match a server
+ * in server state file tree with the same hostname
+ */
+ if (!eb_is_empty(&srvrq->named_servers)) {
+ srv = NULL;
+
+ /* convert the key to lookup in lower case */
+ for (i = 0 ; item->data.target[i] ; i++)
+ target[i] = tolower(item->data.target[i]);
+ target[i] = 0;
+
+ node = ebis_lookup(&srvrq->named_servers, target);
+ if (node) {
+ srv = ebpt_entry(node, struct server, host_dn);
+ HA_SPIN_LOCK(SERVER_LOCK, &srv->lock);
+
+ /* an entry was found with the same hostname
+ * let check this node if the port matches
+ * and try next node if the hostname
+ * is still the same
+ */
+ while (1) {
+ if (srv->svc_port == item->port) {
+ /* server found, we remove it from tree */
+ ebpt_delete(node);
+ ha_free(&srv->host_dn.key);
+ goto srv_found;
+ }
+
+ HA_SPIN_UNLOCK(SERVER_LOCK, &srv->lock);
+
+ node = ebpt_next(node);
+ if (!node)
+ break;
+
+ srv = ebpt_entry(node, struct server, host_dn);
+ HA_SPIN_LOCK(SERVER_LOCK, &srv->lock);
+
+ if ((item->data_len != srv->hostname_dn_len)
+ || memcmp(srv->hostname_dn, item->data.target, item->data_len) != 0) {
+ HA_SPIN_UNLOCK(SERVER_LOCK, &srv->lock);
+ break;
+ }
+ }
+ }
+ }
+
+ /* Pick the first server listed in srvrq (those ones don't
+ * have hostname and are free to use)
+ */
+ srv = NULL;
+ list_for_each_entry(srv, &srvrq->attached_servers, srv_rec_item) {
+ LIST_DEL_INIT(&srv->srv_rec_item);
+ HA_SPIN_LOCK(SERVER_LOCK, &srv->lock);
+ goto srv_found;
+ }
+ srv = NULL;
+
+srv_found:
+ /* And update this server, if found (srv is locked here) */
+ if (srv) {
+ /* re-enable DNS resolution for this server by default */
+ srv->flags &= ~SRV_F_NO_RESOLUTION;
+ srv->srvrq_check->expire = TICK_ETERNITY;
+
+ srv->svc_port = item->port;
+ srv->flags &= ~SRV_F_MAPPORTS;
+
+ /* Check if an Additional Record is associated to this SRV record.
+ * Perform some sanity checks too to ensure the record can be used.
+ * If all fine, we simply pick up the IP address found and associate
+ * it to the server. And DNS resolution is disabled for this server.
+ */
+ if ((item->ar_item != NULL) &&
+ (item->ar_item->type == DNS_RTYPE_A || item->ar_item->type == DNS_RTYPE_AAAA))
+ {
+
+ switch (item->ar_item->type) {
+ case DNS_RTYPE_A:
+ srv_update_addr(srv, &item->ar_item->data.in4.sin_addr, AF_INET, "DNS additional record");
+ break;
+ case DNS_RTYPE_AAAA:
+ srv_update_addr(srv, &item->ar_item->data.in6.sin6_addr, AF_INET6, "DNS additional record");
+ break;
+ }
+
+ srv->flags |= SRV_F_NO_RESOLUTION;
+
+ /* Unlink A/AAAA resolution for this server if there is an AR item.
+ * It is usless to perform an extra resolution
+ */
+ _resolv_unlink_resolution(srv->resolv_requester);
+ }
+
+ if (!srv->hostname_dn) {
+ const char *msg = NULL;
+ char hostname[DNS_MAX_NAME_SIZE+1];
+
+ if (resolv_dn_label_to_str(item->data.target, item->data_len,
+ hostname, sizeof(hostname)) == -1) {
+ HA_SPIN_UNLOCK(SERVER_LOCK, &srv->lock);
+ continue;
+ }
+ msg = srv_update_fqdn(srv, hostname, "SRV record", 1);
+ if (msg)
+ send_log(srv->proxy, LOG_NOTICE, "%s", msg);
+ }
+
+ if (!LIST_INLIST(&srv->srv_rec_item))
+ LIST_APPEND(&item->attached_servers, &srv->srv_rec_item);
+
+ if (!(srv->flags & SRV_F_NO_RESOLUTION)) {
+ /* If there is no AR item responsible of the FQDN resolution,
+ * trigger a dedicated DNS resolution
+ */
+ if (!srv->resolv_requester || !srv->resolv_requester->resolution)
+ resolv_link_resolution(srv, OBJ_TYPE_SERVER, 1);
+ }
+
+ /* Update the server status */
+ srvrq_update_srv_status(srv, (srv->addr.ss_family != AF_INET && srv->addr.ss_family != AF_INET6));
+
+ if (!srv->resolv_opts.ignore_weight) {
+ char weight[9];
+ int ha_weight;
+
+ /* DNS weight range if from 0 to 65535
+ * HAProxy weight is from 0 to 256
+ * The rule below ensures that weight 0 is well respected
+ * while allowing a "mapping" from DNS weight into HAProxy's one.
+ */
+ ha_weight = (item->weight + 255) / 256;
+
+ snprintf(weight, sizeof(weight), "%d", ha_weight);
+ server_parse_weight_change_request(srv, weight);
+ }
+ HA_SPIN_UNLOCK(SERVER_LOCK, &srv->lock);
+ }
+ }
+ }
+}
+
+/* Validates that the buffer DNS response provided in <resp> and finishing
+ * before <bufend> is valid from a DNS protocol point of view.
+ *
+ * The result is stored in <resolution>' response, buf_response,
+ * response_query_records and response_answer_records members.
+ *
+ * This function returns one of the RSLV_RESP_* code to indicate the type of
+ * error found.
+ */
+static int resolv_validate_dns_response(unsigned char *resp, unsigned char *bufend,
+ struct resolv_resolution *resolution, int max_answer_records)
+{
+ unsigned char *reader;
+ char *previous_dname, tmpname[DNS_MAX_NAME_SIZE];
+ int len, flags, offset;
+ int nb_saved_records;
+ struct resolv_query_item *query;
+ struct resolv_answer_item *answer_record, *tmp_record;
+ struct resolv_response *r_res;
+ struct eb32_node *eb32;
+ uint32_t key = 0;
+ int i, found = 0;
+ int cause = RSLV_RESP_ERROR;
+
+ reader = resp;
+ len = 0;
+ previous_dname = NULL;
+ query = NULL;
+ answer_record = NULL;
+
+ /* Initialization of response buffer and structure */
+ r_res = &resolution->response;
+
+ /* query id */
+ if (reader + 2 >= bufend)
+ goto invalid_resp;
+
+ r_res->header.id = reader[0] * 256 + reader[1];
+ reader += 2;
+
+ /* Flags and rcode are stored over 2 bytes
+ * First byte contains:
+ * - response flag (1 bit)
+ * - opcode (4 bits)
+ * - authoritative (1 bit)
+ * - truncated (1 bit)
+ * - recursion desired (1 bit)
+ */
+ if (reader + 2 >= bufend)
+ goto invalid_resp;
+
+ flags = reader[0] * 256 + reader[1];
+
+ if ((flags & DNS_FLAG_REPLYCODE) != DNS_RCODE_NO_ERROR) {
+ if ((flags & DNS_FLAG_REPLYCODE) == DNS_RCODE_NX_DOMAIN) {
+ cause = RSLV_RESP_NX_DOMAIN;
+ goto return_error;
+ }
+ else if ((flags & DNS_FLAG_REPLYCODE) == DNS_RCODE_REFUSED) {
+ cause = RSLV_RESP_REFUSED;
+ goto return_error;
+ }
+ else {
+ cause = RSLV_RESP_ERROR;
+ goto return_error;
+ }
+ }
+
+ /* Move forward 2 bytes for flags */
+ reader += 2;
+
+ /* 2 bytes for question count */
+ if (reader + 2 >= bufend)
+ goto invalid_resp;
+ r_res->header.qdcount = reader[0] * 256 + reader[1];
+ /* (for now) we send one query only, so we expect only one in the
+ * response too */
+ if (r_res->header.qdcount != 1) {
+ cause = RSLV_RESP_QUERY_COUNT_ERROR;
+ goto return_error;
+ }
+
+ if (r_res->header.qdcount > DNS_MAX_QUERY_RECORDS)
+ goto invalid_resp;
+ reader += 2;
+
+ /* 2 bytes for answer count */
+ if (reader + 2 >= bufend)
+ goto invalid_resp;
+ r_res->header.ancount = reader[0] * 256 + reader[1];
+ if (r_res->header.ancount == 0) {
+ cause = RSLV_RESP_ANCOUNT_ZERO;
+ goto return_error;
+ }
+
+ /* Check if too many records are announced */
+ if (r_res->header.ancount > max_answer_records)
+ goto invalid_resp;
+ reader += 2;
+
+ /* 2 bytes authority count */
+ if (reader + 2 >= bufend)
+ goto invalid_resp;
+ r_res->header.nscount = reader[0] * 256 + reader[1];
+ reader += 2;
+
+ /* 2 bytes additional count */
+ if (reader + 2 >= bufend)
+ goto invalid_resp;
+ r_res->header.arcount = reader[0] * 256 + reader[1];
+ reader += 2;
+
+ /* Parsing dns queries. For now there is only one query and it exists
+ * because (qdcount == 1).
+ */
+ query = &resolution->response_query_records[0];
+
+ /* Name is a NULL terminated string in our case, since we have
+ * one query per response and the first one can't be compressed
+ * (using the 0x0c format) */
+ offset = 0;
+ len = resolv_read_name(resp, bufend, reader, query->name, DNS_MAX_NAME_SIZE, &offset, 0);
+
+ if (len == 0)
+ goto invalid_resp;
+
+ /* Now let's check the query's dname corresponds to the one we sent. */
+ if (len != resolution->hostname_dn_len ||
+ memcmp(query->name, resolution->hostname_dn, resolution->hostname_dn_len) != 0) {
+ cause = RSLV_RESP_WRONG_NAME;
+ goto return_error;
+ }
+
+ reader += offset;
+ previous_dname = query->name;
+
+ /* move forward 2 bytes for question type */
+ if (reader + 2 >= bufend)
+ goto invalid_resp;
+ query->type = reader[0] * 256 + reader[1];
+ reader += 2;
+
+ /* move forward 2 bytes for question class */
+ if (reader + 2 >= bufend)
+ goto invalid_resp;
+ query->class = reader[0] * 256 + reader[1];
+ reader += 2;
+
+ /* TRUNCATED flag must be checked after we could read the query type
+ * because a TRUNCATED SRV query type response can still be exploited
+ */
+ if (query->type != DNS_RTYPE_SRV && flags & DNS_FLAG_TRUNCATED) {
+ cause = RSLV_RESP_TRUNCATED;
+ goto return_error;
+ }
+
+ /* now parsing response records */
+ nb_saved_records = 0;
+ for (i = 0; i < r_res->header.ancount; i++) {
+ if (reader >= bufend)
+ goto invalid_resp;
+
+ answer_record = pool_alloc(resolv_answer_item_pool);
+ if (answer_record == NULL)
+ goto invalid_resp;
+
+ /* initialization */
+ answer_record->ar_item = NULL;
+ answer_record->last_seen = TICK_ETERNITY;
+ LIST_INIT(&answer_record->attached_servers);
+ answer_record->link.node.leaf_p = NULL;
+
+ offset = 0;
+ len = resolv_read_name(resp, bufend, reader, tmpname, DNS_MAX_NAME_SIZE, &offset, 0);
+
+ if (len == 0)
+ goto invalid_resp;
+
+ /* Check if the current record dname is valid. previous_dname
+ * points either to queried dname or last CNAME target */
+ if (query->type != DNS_RTYPE_SRV && memcmp(previous_dname, tmpname, len) != 0) {
+ if (i == 0) {
+ /* First record, means a mismatch issue between
+ * queried dname and dname found in the first
+ * record */
+ goto invalid_resp;
+ }
+ else {
+ /* If not the first record, this means we have a
+ * CNAME resolution error.
+ */
+ cause = RSLV_RESP_CNAME_ERROR;
+ goto return_error;
+ }
+
+ }
+
+ memcpy(answer_record->name, tmpname, len);
+ answer_record->name[len] = 0;
+
+ reader += offset;
+ if (reader >= bufend)
+ goto invalid_resp;
+
+ /* 2 bytes for record type (A, AAAA, CNAME, etc...) */
+ if (reader + 2 > bufend)
+ goto invalid_resp;
+
+ answer_record->type = reader[0] * 256 + reader[1];
+ reader += 2;
+
+ /* 2 bytes for class (2) */
+ if (reader + 2 > bufend)
+ goto invalid_resp;
+
+ answer_record->class = reader[0] * 256 + reader[1];
+ reader += 2;
+
+ /* 4 bytes for ttl (4) */
+ if (reader + 4 > bufend)
+ goto invalid_resp;
+
+ answer_record->ttl = reader[0] * 16777216 + reader[1] * 65536
+ + reader[2] * 256 + reader[3];
+ reader += 4;
+
+ /* Now reading data len */
+ if (reader + 2 > bufend)
+ goto invalid_resp;
+
+ answer_record->data_len = reader[0] * 256 + reader[1];
+
+ /* Move forward 2 bytes for data len */
+ reader += 2;
+
+ if (reader + answer_record->data_len > bufend)
+ goto invalid_resp;
+
+ /* Analyzing record content */
+ switch (answer_record->type) {
+ case DNS_RTYPE_A:
+ /* ipv4 is stored on 4 bytes */
+ if (answer_record->data_len != 4)
+ goto invalid_resp;
+
+ answer_record->data.in4.sin_family = AF_INET;
+ memcpy(&answer_record->data.in4.sin_addr, reader, answer_record->data_len);
+ key = XXH32(reader, answer_record->data_len, answer_record->type);
+ break;
+
+ case DNS_RTYPE_CNAME:
+ /* Check if this is the last record and update the caller about the status:
+ * no IP could be found and last record was a CNAME. Could be triggered
+ * by a wrong query type
+ *
+ * + 1 because answer_record_id starts at 0
+ * while number of answers is an integer and
+ * starts at 1.
+ */
+ if (i + 1 == r_res->header.ancount) {
+ cause = RSLV_RESP_CNAME_ERROR;
+ goto return_error;
+ }
+
+ offset = 0;
+ len = resolv_read_name(resp, bufend, reader, tmpname, DNS_MAX_NAME_SIZE, &offset, 0);
+ if (len == 0)
+ goto invalid_resp;
+
+ memcpy(answer_record->data.target, tmpname, len);
+ answer_record->data.target[len] = 0;
+ key = XXH32(tmpname, len, answer_record->type);
+ previous_dname = answer_record->data.target;
+ break;
+
+
+ case DNS_RTYPE_SRV:
+ /* Answer must contain :
+ * - 2 bytes for the priority
+ * - 2 bytes for the weight
+ * - 2 bytes for the port
+ * - the target hostname
+ */
+ if (answer_record->data_len <= 6)
+ goto invalid_resp;
+
+ answer_record->priority = read_n16(reader);
+ reader += sizeof(uint16_t);
+ answer_record->weight = read_n16(reader);
+ reader += sizeof(uint16_t);
+ answer_record->port = read_n16(reader);
+ reader += sizeof(uint16_t);
+ offset = 0;
+ len = resolv_read_name(resp, bufend, reader, tmpname, DNS_MAX_NAME_SIZE, &offset, 0);
+ if (len == 0)
+ goto invalid_resp;
+
+ answer_record->data_len = len;
+ memcpy(answer_record->data.target, tmpname, len);
+ answer_record->data.target[len] = 0;
+ key = XXH32(tmpname, len, answer_record->type);
+ if (answer_record->ar_item != NULL) {
+ pool_free(resolv_answer_item_pool, answer_record->ar_item);
+ answer_record->ar_item = NULL;
+ }
+ break;
+
+ case DNS_RTYPE_AAAA:
+ /* ipv6 is stored on 16 bytes */
+ if (answer_record->data_len != 16)
+ goto invalid_resp;
+
+ answer_record->data.in6.sin6_family = AF_INET6;
+ memcpy(&answer_record->data.in6.sin6_addr, reader, answer_record->data_len);
+ key = XXH32(reader, answer_record->data_len, answer_record->type);
+ break;
+
+ } /* switch (record type) */
+
+ /* Increment the counter for number of records saved into our
+ * local response */
+ nb_saved_records++;
+
+ /* Move forward answer_record->data_len for analyzing next
+ * record in the response */
+ reader += ((answer_record->type == DNS_RTYPE_SRV)
+ ? offset
+ : answer_record->data_len);
+
+ /* Lookup to see if we already had this entry */
+ found = 0;
+
+ for (eb32 = eb32_lookup(&r_res->answer_tree, key); eb32 != NULL; eb32 = eb32_next(eb32)) {
+ tmp_record = eb32_entry(eb32, typeof(*tmp_record), link);
+ if (tmp_record->type != answer_record->type)
+ continue;
+
+ switch(tmp_record->type) {
+ case DNS_RTYPE_A:
+ if (!memcmp(&answer_record->data.in4.sin_addr,
+ &tmp_record->data.in4.sin_addr,
+ sizeof(answer_record->data.in4.sin_addr)))
+ found = 1;
+ break;
+
+ case DNS_RTYPE_AAAA:
+ if (!memcmp(&answer_record->data.in6.sin6_addr,
+ &tmp_record->data.in6.sin6_addr,
+ sizeof(answer_record->data.in6.sin6_addr)))
+ found = 1;
+ break;
+
+ case DNS_RTYPE_SRV:
+ if (answer_record->data_len == tmp_record->data_len &&
+ memcmp(answer_record->data.target, tmp_record->data.target, answer_record->data_len) == 0 &&
+ answer_record->port == tmp_record->port) {
+ tmp_record->weight = answer_record->weight;
+ found = 1;
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ if (found == 1)
+ break;
+ }
+
+ if (found == 1) {
+ tmp_record->last_seen = now_ms;
+ pool_free(resolv_answer_item_pool, answer_record);
+ answer_record = NULL;
+ }
+ else {
+ answer_record->last_seen = now_ms;
+ answer_record->ar_item = NULL;
+ answer_record->link.key = key;
+ eb32_insert(&r_res->answer_tree, &answer_record->link);
+ answer_record = NULL;
+ }
+ } /* for i 0 to ancount */
+
+ /* Save the number of records we really own */
+ r_res->header.ancount = nb_saved_records;
+
+ /* now parsing additional records for SRV queries only */
+ if (query->type != DNS_RTYPE_SRV)
+ goto skip_parsing_additional_records;
+
+ /* if we find Authority records, just skip them */
+ for (i = 0; i < r_res->header.nscount; i++) {
+ offset = 0;
+ len = resolv_read_name(resp, bufend, reader, tmpname, DNS_MAX_NAME_SIZE,
+ &offset, 0);
+ if (len == 0)
+ continue;
+
+ if (reader + offset + 10 >= bufend)
+ goto invalid_resp;
+
+ reader += offset;
+ /* skip 2 bytes for class */
+ reader += 2;
+ /* skip 2 bytes for type */
+ reader += 2;
+ /* skip 4 bytes for ttl */
+ reader += 4;
+ /* read data len */
+ len = reader[0] * 256 + reader[1];
+ reader += 2;
+
+ if (reader + len >= bufend)
+ goto invalid_resp;
+
+ reader += len;
+ }
+
+ nb_saved_records = 0;
+ for (i = 0; i < r_res->header.arcount; i++) {
+ if (reader >= bufend)
+ goto invalid_resp;
+
+ answer_record = pool_alloc(resolv_answer_item_pool);
+ if (answer_record == NULL)
+ goto invalid_resp;
+ answer_record->last_seen = TICK_ETERNITY;
+ LIST_INIT(&answer_record->attached_servers);
+
+ offset = 0;
+ len = resolv_read_name(resp, bufend, reader, tmpname, DNS_MAX_NAME_SIZE, &offset, 0);
+
+ if (len == 0) {
+ pool_free(resolv_answer_item_pool, answer_record);
+ answer_record = NULL;
+ continue;
+ }
+
+ memcpy(answer_record->name, tmpname, len);
+ answer_record->name[len] = 0;
+
+ reader += offset;
+ if (reader >= bufend)
+ goto invalid_resp;
+
+ /* 2 bytes for record type (A, AAAA, CNAME, etc...) */
+ if (reader + 2 > bufend)
+ goto invalid_resp;
+
+ answer_record->type = reader[0] * 256 + reader[1];
+ reader += 2;
+
+ /* 2 bytes for class (2) */
+ if (reader + 2 > bufend)
+ goto invalid_resp;
+
+ answer_record->class = reader[0] * 256 + reader[1];
+ reader += 2;
+
+ /* 4 bytes for ttl (4) */
+ if (reader + 4 > bufend)
+ goto invalid_resp;
+
+ answer_record->ttl = reader[0] * 16777216 + reader[1] * 65536
+ + reader[2] * 256 + reader[3];
+ reader += 4;
+
+ /* Now reading data len */
+ if (reader + 2 > bufend)
+ goto invalid_resp;
+
+ answer_record->data_len = reader[0] * 256 + reader[1];
+
+ /* Move forward 2 bytes for data len */
+ reader += 2;
+
+ if (reader + answer_record->data_len > bufend)
+ goto invalid_resp;
+
+ /* Analyzing record content */
+ switch (answer_record->type) {
+ case DNS_RTYPE_A:
+ /* ipv4 is stored on 4 bytes */
+ if (answer_record->data_len != 4)
+ goto invalid_resp;
+
+ answer_record->data.in4.sin_family = AF_INET;
+ memcpy(&answer_record->data.in4.sin_addr, reader, answer_record->data_len);
+ break;
+
+ case DNS_RTYPE_AAAA:
+ /* ipv6 is stored on 16 bytes */
+ if (answer_record->data_len != 16)
+ goto invalid_resp;
+
+ answer_record->data.in6.sin6_family = AF_INET6;
+ memcpy(&answer_record->data.in6.sin6_addr, reader, answer_record->data_len);
+ break;
+
+ default:
+ pool_free(resolv_answer_item_pool, answer_record);
+ answer_record = NULL;
+ continue;
+
+ } /* switch (record type) */
+
+ /* Increment the counter for number of records saved into our
+ * local response */
+ nb_saved_records++;
+
+ /* Move forward answer_record->data_len for analyzing next
+ * record in the response */
+ reader += answer_record->data_len;
+
+ /* Lookup to see if we already had this entry */
+ found = 0;
+
+ for (eb32 = eb32_first(&r_res->answer_tree); eb32 != NULL; eb32 = eb32_next(eb32)) {
+ struct resolv_answer_item *ar_item;
+
+ tmp_record = eb32_entry(eb32, typeof(*tmp_record), link);
+ if (tmp_record->type != DNS_RTYPE_SRV || !tmp_record->ar_item)
+ continue;
+
+ ar_item = tmp_record->ar_item;
+ if (ar_item->type != answer_record->type || ar_item->last_seen == now_ms ||
+ len != tmp_record->data_len ||
+ memcmp(answer_record->name, tmp_record->data.target, tmp_record->data_len) != 0)
+ continue;
+
+ switch(ar_item->type) {
+ case DNS_RTYPE_A:
+ if (!memcmp(&answer_record->data.in4.sin_addr,
+ &ar_item->data.in4.sin_addr,
+ sizeof(answer_record->data.in4.sin_addr)))
+ found = 1;
+ break;
+
+ case DNS_RTYPE_AAAA:
+ if (!memcmp(&answer_record->data.in6.sin6_addr,
+ &ar_item->data.in6.sin6_addr,
+ sizeof(answer_record->data.in6.sin6_addr)))
+ found = 1;
+ break;
+
+ default:
+ break;
+ }
+
+ if (found == 1)
+ break;
+ }
+
+ if (found == 1) {
+ tmp_record->ar_item->last_seen = now_ms;
+ pool_free(resolv_answer_item_pool, answer_record);
+ answer_record = NULL;
+ }
+ else {
+ answer_record->last_seen = now_ms;
+ answer_record->ar_item = NULL;
+
+ // looking for the SRV record in the response list linked to this additional record
+ for (eb32 = eb32_first(&r_res->answer_tree); eb32 != NULL; eb32 = eb32_next(eb32)) {
+ tmp_record = eb32_entry(eb32, typeof(*tmp_record), link);
+
+ if (tmp_record->type == DNS_RTYPE_SRV &&
+ tmp_record->ar_item == NULL &&
+ memcmp(tmp_record->data.target, answer_record->name, tmp_record->data_len) == 0) {
+ /* Always use the received additional record to refresh info */
+ pool_free(resolv_answer_item_pool, tmp_record->ar_item);
+ tmp_record->ar_item = answer_record;
+ answer_record = NULL;
+ break;
+ }
+ }
+ if (answer_record) {
+ pool_free(resolv_answer_item_pool, answer_record);
+ answer_record = NULL;
+ }
+ }
+ } /* for i 0 to arcount */
+
+ skip_parsing_additional_records:
+
+ /* Save the number of records we really own */
+ r_res->header.arcount = nb_saved_records;
+ resolv_check_response(resolution);
+ return RSLV_RESP_VALID;
+
+ invalid_resp:
+ cause = RSLV_RESP_INVALID;
+
+ return_error:
+ pool_free(resolv_answer_item_pool, answer_record);
+ return cause;
+}
+
+/* Searches dn_name resolution in resp.
+ * If existing IP not found, return the first IP matching family_priority,
+ * otherwise, first ip found
+ * The following tasks are the responsibility of the caller:
+ * - <r_res> contains an error free DNS response
+ * For both cases above, resolv_validate_dns_response is required
+ * returns one of the RSLV_UPD_* code
+ */
+int resolv_get_ip_from_response(struct resolv_response *r_res,
+ struct resolv_options *resolv_opts, void *currentip,
+ short currentip_sin_family,
+ void **newip, short *newip_sin_family,
+ struct server *owner)
+{
+ struct resolv_answer_item *record, *found_record = NULL;
+ struct eb32_node *eb32;
+ int family_priority;
+ int currentip_found;
+ unsigned char *newip4, *newip6;
+ int currentip_sel;
+ int j;
+ int score, max_score;
+ int allowed_duplicated_ip;
+
+ /* srv is linked to an alive ip record */
+ if (owner && LIST_INLIST(&owner->ip_rec_item))
+ return RSLV_UPD_NO;
+
+ family_priority = resolv_opts->family_prio;
+ allowed_duplicated_ip = resolv_opts->accept_duplicate_ip;
+ *newip = newip4 = newip6 = NULL;
+ currentip_found = 0;
+ *newip_sin_family = AF_UNSPEC;
+ max_score = -1;
+
+ /* Select an IP regarding configuration preference.
+ * Top priority is the preferred network ip version,
+ * second priority is the preferred network.
+ * the last priority is the currently used IP,
+ *
+ * For these three priorities, a score is calculated. The
+ * weight are:
+ * 8 - preferred ip version.
+ * 4 - preferred network.
+ * 2 - if the ip in the record is not affected to any other server in the same backend (duplication)
+ * 1 - current ip.
+ * The result with the biggest score is returned.
+ */
+
+ for (eb32 = eb32_first(&r_res->answer_tree); eb32 != NULL; eb32 = eb32_next(eb32)) {
+ void *ip;
+ unsigned char ip_type;
+
+ record = eb32_entry(eb32, typeof(*record), link);
+ if (record->type == DNS_RTYPE_A) {
+ ip_type = AF_INET;
+ ip = &record->data.in4.sin_addr;
+ }
+ else if (record->type == DNS_RTYPE_AAAA) {
+ ip_type = AF_INET6;
+ ip = &record->data.in6.sin6_addr;
+ }
+ else
+ continue;
+ score = 0;
+
+ /* Check for preferred ip protocol. */
+ if (ip_type == family_priority)
+ score += 8;
+
+ /* Check for preferred network. */
+ for (j = 0; j < resolv_opts->pref_net_nb; j++) {
+
+ /* Compare only the same addresses class. */
+ if (resolv_opts->pref_net[j].family != ip_type)
+ continue;
+
+ if ((ip_type == AF_INET &&
+ in_net_ipv4(ip,
+ &resolv_opts->pref_net[j].mask.in4,
+ &resolv_opts->pref_net[j].addr.in4)) ||
+ (ip_type == AF_INET6 &&
+ in_net_ipv6(ip,
+ &resolv_opts->pref_net[j].mask.in6,
+ &resolv_opts->pref_net[j].addr.in6))) {
+ score += 4;
+ break;
+ }
+ }
+
+ /* Check if the IP found in the record is already affected to a
+ * member of a group. If not, the score should be incremented
+ * by 2. */
+ if (owner) {
+ struct server *srv;
+ int already_used = 0;
+
+ list_for_each_entry(srv, &record->attached_servers, ip_rec_item) {
+ if (srv == owner)
+ continue;
+ if (srv->proxy == owner->proxy) {
+ already_used = 1;
+ break;
+ }
+ }
+ if (already_used) {
+ if (!allowed_duplicated_ip) {
+ continue;
+ }
+ }
+ else {
+ score += 2;
+ }
+ } else {
+ score += 2;
+ }
+
+ /* Check for current ip matching. */
+ if (ip_type == currentip_sin_family &&
+ ((currentip_sin_family == AF_INET &&
+ !memcmp(ip, currentip, 4)) ||
+ (currentip_sin_family == AF_INET6 &&
+ !memcmp(ip, currentip, 16)))) {
+ score++;
+ currentip_sel = 1;
+ }
+ else
+ currentip_sel = 0;
+
+ /* Keep the address if the score is better than the previous
+ * score. The maximum score is 15, if this value is reached, we
+ * break the parsing. Implicitly, this score is reached the ip
+ * selected is the current ip. */
+ if (score > max_score) {
+ if (ip_type == AF_INET)
+ newip4 = ip;
+ else
+ newip6 = ip;
+ found_record = record;
+ currentip_found = currentip_sel;
+ if (score == 15) {
+ /* this was not registered on the current record but it matches
+ * let's fix it (it may comes from state file */
+ if (owner)
+ LIST_APPEND(&found_record->attached_servers, &owner->ip_rec_item);
+ return RSLV_UPD_NO;
+ }
+ max_score = score;
+ }
+ } /* list for each record entries */
+
+ /* No IP found in the response */
+ if (!newip4 && !newip6)
+ return RSLV_UPD_NO_IP_FOUND;
+
+ /* Case when the caller looks first for an IPv4 address */
+ if (family_priority == AF_INET) {
+ if (newip4) {
+ *newip = newip4;
+ *newip_sin_family = AF_INET;
+ }
+ else if (newip6) {
+ *newip = newip6;
+ *newip_sin_family = AF_INET6;
+ }
+ }
+ /* Case when the caller looks first for an IPv6 address */
+ else if (family_priority == AF_INET6) {
+ if (newip6) {
+ *newip = newip6;
+ *newip_sin_family = AF_INET6;
+ }
+ else if (newip4) {
+ *newip = newip4;
+ *newip_sin_family = AF_INET;
+ }
+ }
+ /* Case when the caller have no preference (we prefer IPv6) */
+ else if (family_priority == AF_UNSPEC) {
+ if (newip6) {
+ *newip = newip6;
+ *newip_sin_family = AF_INET6;
+ }
+ else if (newip4) {
+ *newip = newip4;
+ *newip_sin_family = AF_INET;
+ }
+ }
+
+ /* the ip of this record was chosen for the server */
+ if (owner && found_record) {
+ LIST_DEL_INIT(&owner->ip_rec_item);
+ LIST_APPEND(&found_record->attached_servers, &owner->ip_rec_item);
+ }
+
+ eb32 = eb32_first(&r_res->answer_tree);
+ if (eb32) {
+ /* Move the first record to the end of the list, for internal
+ * round robin.
+ */
+ eb32_delete(eb32);
+ eb32_insert(&r_res->answer_tree, eb32);
+ }
+
+ return (currentip_found ? RSLV_UPD_NO : RSLV_UPD_SRVIP_NOT_FOUND);
+}
+
+/* Turns a domain name label into a string: 3www7haproxy3org into www.haproxy.org
+ *
+ * <dn> contains the input label of <dn_len> bytes long and does not need to be
+ * null-terminated. <str> must be allocated large enough to contain a full host
+ * name plus the trailing zero, and the allocated size must be passed in
+ * <str_len>.
+ *
+ * In case of error, -1 is returned, otherwise, the number of bytes copied in
+ * <str> (including the terminating null byte).
+ */
+int resolv_dn_label_to_str(const char *dn, int dn_len, char *str, int str_len)
+{
+ char *ptr;
+ int i, sz;
+
+ if (str_len < dn_len)
+ return -1;
+
+ ptr = str;
+ for (i = 0; i < dn_len; ++i) {
+ sz = dn[i];
+ if (i)
+ *ptr++ = '.';
+ /* copy the string at i+1 to lower case */
+ for (; sz > 0; sz--)
+ *(ptr++) = tolower(dn[++i]);
+ }
+ *ptr++ = '\0';
+ return (ptr - str);
+}
+
+/* Turns a string into domain name label: www.haproxy.org into 3www7haproxy3org
+ *
+ * <str> contains the input string that is <str_len> bytes long (trailing zero
+ * not needed). <dn> buffer must be allocated large enough to contain the
+ * encoded string and a trailing zero, so it must be at least str_len+2, and
+ * this allocated buffer size must be passed in <dn_len>.
+ *
+ * In case of error, -1 is returned, otherwise, the number of bytes copied in
+ * <dn> (excluding the terminating null byte).
+ */
+int resolv_str_to_dn_label(const char *str, int str_len, char *dn, int dn_len)
+{
+ int i, offset;
+
+ if (dn_len < str_len + 2)
+ return -1;
+
+ /* First byte of dn will be used to store the length of the first
+ * label */
+ offset = 0;
+ for (i = 0; i < str_len; ++i) {
+ if (str[i] == '.') {
+ /* 2 or more consecutive dots is invalid */
+ if (i == offset)
+ return -1;
+
+ /* ignore trailing dot */
+ if (i + 1 == str_len)
+ break;
+
+ dn[offset] = (i - offset);
+ offset = i+1;
+ continue;
+ }
+ dn[i+1] = tolower(str[i]);
+ }
+ dn[offset] = i - offset;
+ dn[i+1] = '\0';
+ return i+1;
+}
+
+/* Validates host name:
+ * - total size
+ * - each label size individually
+ * returns:
+ * 0 in case of error. If <err> is not NULL, an error message is stored there.
+ * 1 when no error. <err> is left unaffected.
+ */
+int resolv_hostname_validation(const char *string, char **err)
+{
+ int i;
+
+ if (strlen(string) > DNS_MAX_NAME_SIZE) {
+ if (err)
+ *err = DNS_TOO_LONG_FQDN;
+ return 0;
+ }
+
+ while (*string) {
+ i = 0;
+ while (*string && *string != '.' && i < DNS_MAX_LABEL_SIZE) {
+ if (!(*string == '-' || *string == '_' ||
+ (*string >= 'a' && *string <= 'z') ||
+ (*string >= 'A' && *string <= 'Z') ||
+ (*string >= '0' && *string <= '9'))) {
+ if (err)
+ *err = DNS_INVALID_CHARACTER;
+ return 0;
+ }
+ i++;
+ string++;
+ }
+
+ if (!(*string))
+ break;
+
+ if (*string != '.' && i >= DNS_MAX_LABEL_SIZE) {
+ if (err)
+ *err = DNS_LABEL_TOO_LONG;
+ return 0;
+ }
+
+ string++;
+ }
+ return 1;
+}
+
+/* Picks up an available resolution from the different resolution list
+ * associated to a resolvers section, in this order:
+ * 1. check in resolutions.curr for the same hostname and query_type
+ * 2. check in resolutions.wait for the same hostname and query_type
+ * 3. Get a new resolution from resolution pool
+ *
+ * Returns an available resolution, NULL if none found.
+ */
+static struct resolv_resolution *resolv_pick_resolution(struct resolvers *resolvers,
+ char **hostname_dn, int hostname_dn_len,
+ int query_type)
+{
+ struct resolv_resolution *res;
+
+ if (!*hostname_dn)
+ goto from_pool;
+
+ /* Search for same hostname and query type in resolutions.curr */
+ list_for_each_entry(res, &resolvers->resolutions.curr, list) {
+ if (!res->hostname_dn)
+ continue;
+ if ((query_type == res->prefered_query_type) &&
+ hostname_dn_len == res->hostname_dn_len &&
+ memcmp(*hostname_dn, res->hostname_dn, hostname_dn_len) == 0)
+ return res;
+ }
+
+ /* Search for same hostname and query type in resolutions.wait */
+ list_for_each_entry(res, &resolvers->resolutions.wait, list) {
+ if (!res->hostname_dn)
+ continue;
+ if ((query_type == res->prefered_query_type) &&
+ hostname_dn_len == res->hostname_dn_len &&
+ memcmp(*hostname_dn, res->hostname_dn, hostname_dn_len) == 0)
+ return res;
+ }
+
+ from_pool:
+ /* No resolution could be found, so let's allocate a new one */
+ res = pool_zalloc(resolv_resolution_pool);
+ if (res) {
+ res->resolvers = resolvers;
+ res->uuid = resolution_uuid;
+ res->status = RSLV_STATUS_NONE;
+ res->step = RSLV_STEP_NONE;
+ res->last_valid = now_ms;
+
+ LIST_INIT(&res->requesters);
+ res->response.answer_tree = EB_ROOT;
+
+ res->prefered_query_type = query_type;
+ res->query_type = query_type;
+ res->hostname_dn = *hostname_dn;
+ res->hostname_dn_len = hostname_dn_len;
+
+ ++resolution_uuid;
+
+ /* Move the resolution to the resolvers wait queue */
+ LIST_APPEND(&resolvers->resolutions.wait, &res->list);
+ }
+ return res;
+}
+
+/* deletes and frees all answer_items from the resolution's answer_list */
+static void resolv_purge_resolution_answer_records(struct resolv_resolution *resolution)
+{
+ struct eb32_node *eb32, *eb32_back;
+ struct resolv_answer_item *item;
+
+ for (eb32 = eb32_first(&resolution->response.answer_tree);
+ eb32 && (eb32_back = eb32_next(eb32), 1);
+ eb32 = eb32_back) {
+ item = eb32_entry(eb32, typeof(*item), link);
+ eb32_delete(&item->link);
+ pool_free(resolv_answer_item_pool, item->ar_item);
+ pool_free(resolv_answer_item_pool, item);
+ }
+}
+
+/* Releases a resolution from its requester(s) and move it back to the pool */
+static void resolv_free_resolution(struct resolv_resolution *resolution)
+{
+ struct resolv_requester *req, *reqback;
+
+ /* clean up configuration */
+ resolv_reset_resolution(resolution);
+ resolution->hostname_dn = NULL;
+ resolution->hostname_dn_len = 0;
+
+ list_for_each_entry_safe(req, reqback, &resolution->requesters, list) {
+ LIST_DEL_INIT(&req->list);
+ req->resolution = NULL;
+ }
+ resolv_purge_resolution_answer_records(resolution);
+
+ LIST_DEL_INIT(&resolution->list);
+ pool_free(resolv_resolution_pool, resolution);
+}
+
+/* If *<req> is not NULL, returns it, otherwise tries to allocate a requester
+ * and makes it owned by this obj_type, with the proposed callback and error
+ * callback. On success, *req is assigned the allocated requester. Returns
+ * NULL on allocation failure.
+ */
+static struct resolv_requester *
+resolv_get_requester(struct resolv_requester **req, enum obj_type *owner,
+ int (*cb)(struct resolv_requester *, struct dns_counters *),
+ int (*err_cb)(struct resolv_requester *, int))
+{
+ struct resolv_requester *tmp;
+
+ if (*req)
+ return *req;
+
+ tmp = pool_alloc(resolv_requester_pool);
+ if (!tmp)
+ goto end;
+
+ LIST_INIT(&tmp->list);
+ tmp->owner = owner;
+ tmp->resolution = NULL;
+ tmp->requester_cb = cb;
+ tmp->requester_error_cb = err_cb;
+ *req = tmp;
+ end:
+ return tmp;
+}
+
+/* Links a requester (a server or a resolv_srvrq) with a resolution. It returns 0
+ * on success, -1 otherwise.
+ */
+int resolv_link_resolution(void *requester, int requester_type, int requester_locked)
+{
+ struct resolv_resolution *res = NULL;
+ struct resolv_requester *req;
+ struct resolvers *resolvers;
+ struct server *srv = NULL;
+ struct resolv_srvrq *srvrq = NULL;
+ struct stream *stream = NULL;
+ char **hostname_dn;
+ int hostname_dn_len, query_type;
+
+ enter_resolver_code();
+ switch (requester_type) {
+ case OBJ_TYPE_SERVER:
+ srv = (struct server *)requester;
+
+ if (!requester_locked)
+ HA_SPIN_LOCK(SERVER_LOCK, &srv->lock);
+
+ req = resolv_get_requester(&srv->resolv_requester,
+ &srv->obj_type,
+ snr_resolution_cb,
+ snr_resolution_error_cb);
+
+ if (!requester_locked)
+ HA_SPIN_UNLOCK(SERVER_LOCK, &srv->lock);
+
+ if (!req)
+ goto err;
+
+ hostname_dn = &srv->hostname_dn;
+ hostname_dn_len = srv->hostname_dn_len;
+ resolvers = srv->resolvers;
+ query_type = ((srv->resolv_opts.family_prio == AF_INET)
+ ? DNS_RTYPE_A
+ : DNS_RTYPE_AAAA);
+ break;
+
+ case OBJ_TYPE_SRVRQ:
+ srvrq = (struct resolv_srvrq *)requester;
+
+ req = resolv_get_requester(&srvrq->requester,
+ &srvrq->obj_type,
+ snr_resolution_cb,
+ srvrq_resolution_error_cb);
+ if (!req)
+ goto err;
+
+ hostname_dn = &srvrq->hostname_dn;
+ hostname_dn_len = srvrq->hostname_dn_len;
+ resolvers = srvrq->resolvers;
+ query_type = DNS_RTYPE_SRV;
+ break;
+
+ case OBJ_TYPE_STREAM:
+ stream = (struct stream *)requester;
+
+ req = resolv_get_requester(&stream->resolv_ctx.requester,
+ &stream->obj_type,
+ act_resolution_cb,
+ act_resolution_error_cb);
+ if (!req)
+ goto err;
+
+ hostname_dn = &stream->resolv_ctx.hostname_dn;
+ hostname_dn_len = stream->resolv_ctx.hostname_dn_len;
+ resolvers = stream->resolv_ctx.parent->arg.resolv.resolvers;
+ query_type = ((stream->resolv_ctx.parent->arg.resolv.opts->family_prio == AF_INET)
+ ? DNS_RTYPE_A
+ : DNS_RTYPE_AAAA);
+ break;
+ default:
+ goto err;
+ }
+
+ /* Get a resolution from the resolvers' wait queue or pool */
+ if ((res = resolv_pick_resolution(resolvers, hostname_dn, hostname_dn_len, query_type)) == NULL)
+ goto err;
+
+ req->resolution = res;
+
+ LIST_APPEND(&res->requesters, &req->list);
+ leave_resolver_code();
+ return 0;
+
+ err:
+ if (res && LIST_ISEMPTY(&res->requesters))
+ resolv_free_resolution(res);
+ leave_resolver_code();
+ return -1;
+}
+
+/* This function removes all server/srvrq references on answer items. */
+void resolv_detach_from_resolution_answer_items(struct resolv_resolution *res, struct resolv_requester *req)
+{
+ struct eb32_node *eb32, *eb32_back;
+ struct resolv_answer_item *item;
+ struct server *srv, *srvback;
+ struct resolv_srvrq *srvrq;
+
+ enter_resolver_code();
+ if ((srv = objt_server(req->owner)) != NULL) {
+ LIST_DEL_INIT(&srv->ip_rec_item);
+ }
+ else if ((srvrq = objt_resolv_srvrq(req->owner)) != NULL) {
+ for (eb32 = eb32_first(&res->response.answer_tree);
+ eb32 && (eb32_back = eb32_next(eb32), 1);
+ eb32 = eb32_back) {
+ item = eb32_entry(eb32, typeof(*item), link);
+ if (item->type == DNS_RTYPE_SRV) {
+ list_for_each_entry_safe(srv, srvback, &item->attached_servers, srv_rec_item) {
+ if (srv->srvrq == srvrq)
+ resolv_srvrq_cleanup_srv(srv);
+ }
+ }
+ }
+ }
+ leave_resolver_code();
+}
+
+/* Removes a requester from a DNS resolution. It takes takes care of all the
+ * consequences. It also cleans up some parameters from the requester.
+ */
+static void _resolv_unlink_resolution(struct resolv_requester *requester)
+{
+ struct resolv_resolution *res;
+ struct resolv_requester *req;
+
+ /* Nothing to do */
+ if (!requester || !requester->resolution)
+ return;
+ res = requester->resolution;
+
+ /* Clean up the requester */
+ LIST_DEL_INIT(&requester->list);
+ requester->resolution = NULL;
+
+ /* remove ref from the resolution answer item list to the requester */
+ resolv_detach_from_resolution_answer_items(res, requester);
+
+ /* We need to find another requester linked on this resolution */
+ if (!LIST_ISEMPTY(&res->requesters))
+ req = LIST_NEXT(&res->requesters, struct resolv_requester *, list);
+ else {
+ abort_resolution(res);
+ return;
+ }
+
+ /* Move hostname_dn related pointers to the next requester */
+ switch (obj_type(req->owner)) {
+ case OBJ_TYPE_SERVER:
+ res->hostname_dn = __objt_server(req->owner)->hostname_dn;
+ res->hostname_dn_len = __objt_server(req->owner)->hostname_dn_len;
+ break;
+ case OBJ_TYPE_SRVRQ:
+ res->hostname_dn = __objt_resolv_srvrq(req->owner)->hostname_dn;
+ res->hostname_dn_len = __objt_resolv_srvrq(req->owner)->hostname_dn_len;
+ break;
+ case OBJ_TYPE_STREAM:
+ res->hostname_dn = __objt_stream(req->owner)->resolv_ctx.hostname_dn;
+ res->hostname_dn_len = __objt_stream(req->owner)->resolv_ctx.hostname_dn_len;
+ break;
+ default:
+ res->hostname_dn = NULL;
+ res->hostname_dn_len = 0;
+ break;
+ }
+}
+
+/* The public version of the function above that deals with the death row. */
+void resolv_unlink_resolution(struct resolv_requester *requester)
+{
+ enter_resolver_code();
+ _resolv_unlink_resolution(requester);
+ leave_resolver_code();
+}
+
+/* Called when a network IO is generated on a name server socket for an incoming
+ * packet. It performs the following actions:
+ * - check if the packet requires processing (not outdated resolution)
+ * - ensure the DNS packet received is valid and call requester's callback
+ * - call requester's error callback if invalid response
+ * - check the dn_name in the packet against the one sent
+ */
+static int resolv_process_responses(struct dns_nameserver *ns)
+{
+ struct dns_counters *tmpcounters;
+ struct resolvers *resolvers;
+ struct resolv_resolution *res;
+ unsigned char buf[DNS_MAX_UDP_MESSAGE + 1];
+ unsigned char *bufend;
+ int buflen, dns_resp;
+ int max_answer_records;
+ unsigned short query_id;
+ struct eb32_node *eb;
+ struct resolv_requester *req;
+ int keep_answer_items;
+
+ resolvers = ns->parent;
+ enter_resolver_code();
+ HA_SPIN_LOCK(DNS_LOCK, &resolvers->lock);
+
+ /* process all pending input messages */
+ while (1) {
+ /* read message received */
+ memset(buf, '\0', resolvers->accepted_payload_size + 1);
+ if ((buflen = dns_recv_nameserver(ns, (void *)buf, sizeof(buf))) <= 0) {
+ break;
+ }
+
+ /* message too big */
+ if (buflen > resolvers->accepted_payload_size) {
+ ns->counters->app.resolver.too_big++;
+ continue;
+ }
+
+ /* initializing variables */
+ bufend = buf + buflen; /* pointer to mark the end of the buffer */
+
+ /* read the query id from the packet (16 bits) */
+ if (buf + 2 > bufend) {
+ ns->counters->app.resolver.invalid++;
+ continue;
+ }
+ query_id = resolv_response_get_query_id(buf);
+
+ /* search the query_id in the pending resolution tree */
+ eb = eb32_lookup(&resolvers->query_ids, query_id);
+ if (eb == NULL) {
+ /* unknown query id means an outdated response and can be safely ignored */
+ ns->counters->app.resolver.outdated++;
+ continue;
+ }
+
+ /* known query id means a resolution in progress */
+ res = eb32_entry(eb, struct resolv_resolution, qid);
+ /* number of responses received */
+ res->nb_responses++;
+
+ max_answer_records = (resolvers->accepted_payload_size - DNS_HEADER_SIZE) / DNS_MIN_RECORD_SIZE;
+ dns_resp = resolv_validate_dns_response(buf, bufend, res, max_answer_records);
+
+ switch (dns_resp) {
+ case RSLV_RESP_VALID:
+ break;
+
+ case RSLV_RESP_INVALID:
+ case RSLV_RESP_QUERY_COUNT_ERROR:
+ case RSLV_RESP_WRONG_NAME:
+ res->status = RSLV_STATUS_INVALID;
+ ns->counters->app.resolver.invalid++;
+ break;
+
+ case RSLV_RESP_NX_DOMAIN:
+ res->status = RSLV_STATUS_NX;
+ ns->counters->app.resolver.nx++;
+ break;
+
+ case RSLV_RESP_REFUSED:
+ res->status = RSLV_STATUS_REFUSED;
+ ns->counters->app.resolver.refused++;
+ break;
+
+ case RSLV_RESP_ANCOUNT_ZERO:
+ res->status = RSLV_STATUS_OTHER;
+ ns->counters->app.resolver.any_err++;
+ break;
+
+ case RSLV_RESP_CNAME_ERROR:
+ res->status = RSLV_STATUS_OTHER;
+ ns->counters->app.resolver.cname_error++;
+ break;
+
+ case RSLV_RESP_TRUNCATED:
+ res->status = RSLV_STATUS_OTHER;
+ ns->counters->app.resolver.truncated++;
+ break;
+
+ case RSLV_RESP_NO_EXPECTED_RECORD:
+ case RSLV_RESP_ERROR:
+ case RSLV_RESP_INTERNAL:
+ res->status = RSLV_STATUS_OTHER;
+ ns->counters->app.resolver.other++;
+ break;
+ }
+
+ /* Wait all nameservers response to handle errors */
+ if (dns_resp != RSLV_RESP_VALID && res->nb_responses < res->nb_queries)
+ continue;
+
+ /* Process error codes */
+ if (dns_resp != RSLV_RESP_VALID) {
+ if (res->prefered_query_type != res->query_type) {
+ /* The fallback on the query type was already performed,
+ * so check the try counter. If it falls to 0, we can
+ * report an error. Else, wait the next attempt. */
+ if (!res->try)
+ goto report_res_error;
+ }
+ else {
+ /* Fallback from A to AAAA or the opposite and re-send
+ * the resolution immediately. try counter is not
+ * decremented. */
+ if (res->prefered_query_type == DNS_RTYPE_A) {
+ res->query_type = DNS_RTYPE_AAAA;
+ resolv_send_query(res);
+ }
+ else if (res->prefered_query_type == DNS_RTYPE_AAAA) {
+ res->query_type = DNS_RTYPE_A;
+ resolv_send_query(res);
+ }
+ }
+ continue;
+ }
+
+ /* So the resolution succeeded */
+ res->status = RSLV_STATUS_VALID;
+ res->last_valid = now_ms;
+ ns->counters->app.resolver.valid++;
+ goto report_res_success;
+
+ report_res_error:
+ keep_answer_items = 0;
+ list_for_each_entry(req, &res->requesters, list)
+ keep_answer_items |= req->requester_error_cb(req, dns_resp);
+ if (!keep_answer_items)
+ resolv_purge_resolution_answer_records(res);
+ resolv_reset_resolution(res);
+ LIST_DEL_INIT(&res->list);
+ LIST_APPEND(&resolvers->resolutions.wait, &res->list);
+ continue;
+
+ report_res_success:
+ /* Only the 1rst requester s managed by the server, others are
+ * from the cache */
+ tmpcounters = ns->counters;
+ list_for_each_entry(req, &res->requesters, list) {
+ struct server *s = objt_server(req->owner);
+
+ if (s)
+ HA_SPIN_LOCK(SERVER_LOCK, &s->lock);
+ req->requester_cb(req, tmpcounters);
+ if (s)
+ HA_SPIN_UNLOCK(SERVER_LOCK, &s->lock);
+ tmpcounters = NULL;
+ }
+
+ resolv_reset_resolution(res);
+ LIST_DEL_INIT(&res->list);
+ LIST_APPEND(&resolvers->resolutions.wait, &res->list);
+ continue;
+ }
+ resolv_update_resolvers_timeout(resolvers);
+ HA_SPIN_UNLOCK(DNS_LOCK, &resolvers->lock);
+ leave_resolver_code();
+ return buflen;
+}
+
+/* Processes DNS resolution. First, it checks the active list to detect expired
+ * resolutions and retry them if possible. Else a timeout is reported. Then, it
+ * checks the wait list to trigger new resolutions.
+ */
+struct task *process_resolvers(struct task *t, void *context, unsigned int state)
+{
+ struct resolvers *resolvers = context;
+ struct resolv_resolution *res, *resback;
+ int exp;
+
+ enter_resolver_code();
+ HA_SPIN_LOCK(DNS_LOCK, &resolvers->lock);
+
+ /* Handle all expired resolutions from the active list. Elements that
+ * need to be removed will in fact be moved to the death_row. Other
+ * ones will be handled normally.
+ */
+
+ res = LIST_NEXT(&resolvers->resolutions.curr, struct resolv_resolution *, list);
+ while (&res->list != &resolvers->resolutions.curr) {
+ resback = LIST_NEXT(&res->list, struct resolv_resolution *, list);
+
+ if (LIST_ISEMPTY(&res->requesters)) {
+ abort_resolution(res);
+ res = resback;
+ continue;
+ }
+
+ /* When we find the first resolution in the future, then we can
+ * stop here */
+ exp = tick_add(res->last_query, resolvers->timeout.retry);
+ if (!tick_is_expired(exp, now_ms))
+ break;
+
+ /* If current resolution has been tried too many times and
+ * finishes in timeout we update its status and remove it from
+ * the list */
+ if (!res->try) {
+ struct resolv_requester *req;
+ int keep_answer_items = 0;
+
+ /* Notify the result to the requesters */
+ if (!res->nb_responses)
+ res->status = RSLV_STATUS_TIMEOUT;
+ list_for_each_entry(req, &res->requesters, list)
+ keep_answer_items |= req->requester_error_cb(req, res->status);
+ if (!keep_answer_items)
+ resolv_purge_resolution_answer_records(res);
+
+ /* Clean up resolution info and remove it from the
+ * current list */
+ resolv_reset_resolution(res);
+
+ /* subsequent entries might have been deleted here */
+ resback = LIST_NEXT(&res->list, struct resolv_resolution *, list);
+ LIST_DEL_INIT(&res->list);
+ LIST_APPEND(&resolvers->resolutions.wait, &res->list);
+ res = resback;
+ }
+ else {
+ /* Otherwise resend the DNS query and requeue the resolution */
+ if (!res->nb_responses || res->prefered_query_type != res->query_type) {
+ /* No response received (a real timeout) or fallback already done */
+ res->query_type = res->prefered_query_type;
+ res->try--;
+ }
+ else {
+ /* Fallback from A to AAAA or the opposite and re-send
+ * the resolution immediately. try counter is not
+ * decremented. */
+ if (res->prefered_query_type == DNS_RTYPE_A)
+ res->query_type = DNS_RTYPE_AAAA;
+ else if (res->prefered_query_type == DNS_RTYPE_AAAA)
+ res->query_type = DNS_RTYPE_A;
+ else
+ res->try--;
+ }
+ resolv_send_query(res);
+ resback = LIST_NEXT(&res->list, struct resolv_resolution *, list);
+ res = resback;
+ }
+ }
+
+ /* Handle all resolutions in the wait list */
+ list_for_each_entry_safe(res, resback, &resolvers->resolutions.wait, list) {
+
+ if (unlikely(stopping)) {
+ /* If haproxy is stopping, check if the resolution to know if it must be run or not.
+ * If at least a requester is a stream (because of a do-resolv action) or if there
+ * is a requester attached to a running proxy, the resolution is performed.
+ * Otherwise, it is skipped for now.
+ */
+ struct resolv_requester *req;
+ int must_run = 0;
+
+ list_for_each_entry(req, &res->requesters, list) {
+ struct proxy *px = NULL;
+
+ switch (obj_type(req->owner)) {
+ case OBJ_TYPE_SERVER:
+ px = __objt_server(req->owner)->proxy;
+ break;
+ case OBJ_TYPE_SRVRQ:
+ px = __objt_resolv_srvrq(req->owner)->proxy;
+ break;
+ case OBJ_TYPE_STREAM:
+ /* Always perform the resolution */
+ must_run = 1;
+ break;
+ default:
+ break;
+ }
+ /* Perform the resolution if the proxy is not stopped or disabled */
+ if (px && !(px->flags & (PR_FL_DISABLED|PR_FL_STOPPED)))
+ must_run = 1;
+
+ if (must_run)
+ break;
+ }
+
+ if (!must_run) {
+ /* Skip the reolsution. reset it and wait for the next wakeup */
+ resolv_reset_resolution(res);
+ continue;
+ }
+ }
+
+ if (LIST_ISEMPTY(&res->requesters)) {
+ abort_resolution(res);
+ continue;
+ }
+
+ exp = tick_add(res->last_resolution, resolv_resolution_timeout(res));
+ if (tick_isset(res->last_resolution) && !tick_is_expired(exp, now_ms))
+ continue;
+
+ if (resolv_run_resolution(res) != 1) {
+ res->last_resolution = now_ms;
+ LIST_DEL_INIT(&res->list);
+ LIST_APPEND(&resolvers->resolutions.wait, &res->list);
+ }
+ }
+
+ resolv_update_resolvers_timeout(resolvers);
+ HA_SPIN_UNLOCK(DNS_LOCK, &resolvers->lock);
+
+ if (unlikely(stopping)) {
+ struct dns_nameserver *ns;
+
+ if (LIST_ISEMPTY(&resolvers->resolutions.curr))
+ t->expire = TICK_ETERNITY;
+
+ list_for_each_entry(ns, &resolvers->nameservers, list) {
+ if (ns->stream)
+ task_wakeup(ns->stream->task_idle, TASK_WOKEN_MSG);
+ }
+ }
+
+ /* now we can purge all queued deletions */
+ leave_resolver_code();
+ return t;
+}
+
+
+/* destroy a resolvers */
+static void resolvers_destroy(struct resolvers *resolvers)
+{
+ struct dns_nameserver *ns, *nsback;
+ struct resolv_resolution *res, *resback;
+ struct resolv_requester *req, *reqback;
+
+ list_for_each_entry_safe(ns, nsback, &resolvers->nameservers, list) {
+ free(ns->id);
+ free((char *)ns->conf.file);
+ if (ns->dgram) {
+ if (ns->dgram->conn.t.sock.fd != -1) {
+ fd_delete(ns->dgram->conn.t.sock.fd);
+ close(ns->dgram->conn.t.sock.fd);
+ }
+ ring_free(ns->dgram->ring_req);
+ free(ns->dgram);
+ }
+ if (ns->stream) {
+ ring_free(ns->stream->ring_req);
+ task_destroy(ns->stream->task_req);
+ task_destroy(ns->stream->task_rsp);
+ free(ns->stream);
+ }
+ LIST_DEL_INIT(&ns->list);
+ EXTRA_COUNTERS_FREE(ns->extra_counters);
+ free(ns);
+ }
+
+ list_for_each_entry_safe(res, resback, &resolvers->resolutions.curr, list) {
+ list_for_each_entry_safe(req, reqback, &res->requesters, list) {
+ LIST_DEL_INIT(&req->list);
+ pool_free(resolv_requester_pool, req);
+ }
+ resolv_free_resolution(res);
+ }
+
+ list_for_each_entry_safe(res, resback, &resolvers->resolutions.wait, list) {
+ list_for_each_entry_safe(req, reqback, &res->requesters, list) {
+ LIST_DEL_INIT(&req->list);
+ pool_free(resolv_requester_pool, req);
+ }
+ resolv_free_resolution(res);
+ }
+
+ free_proxy(resolvers->px);
+ free(resolvers->id);
+ free((char *)resolvers->conf.file);
+ task_destroy(resolvers->t);
+ LIST_DEL_INIT(&resolvers->list);
+ free(resolvers);
+}
+
+/* Release memory allocated by DNS */
+static void resolvers_deinit(void)
+{
+ struct resolvers *resolvers, *resolversback;
+ struct resolv_srvrq *srvrq, *srvrqback;
+
+ list_for_each_entry_safe(resolvers, resolversback, &sec_resolvers, list) {
+ resolvers_destroy(resolvers);
+ }
+
+ list_for_each_entry_safe(srvrq, srvrqback, &resolv_srvrq_list, list) {
+ free(srvrq->name);
+ free(srvrq->hostname_dn);
+ LIST_DEL_INIT(&srvrq->list);
+ free(srvrq);
+ }
+}
+
+/* Finalizes the DNS configuration by allocating required resources and checking
+ * live parameters.
+ * Returns 0 on success, 1 on error.
+ */
+static int resolvers_finalize_config(void)
+{
+ struct resolvers *resolvers;
+ struct proxy *px;
+ int err_code = 0;
+
+ enter_resolver_code();
+
+ /* allocate pool of resolution per resolvers */
+ list_for_each_entry(resolvers, &sec_resolvers, list) {
+ struct dns_nameserver *ns;
+ struct task *t;
+
+ /* Check if we can create the socket with nameservers info */
+ list_for_each_entry(ns, &resolvers->nameservers, list) {
+ int fd;
+
+ if (ns->dgram) {
+ /* Check nameserver info */
+ if ((fd = socket(ns->dgram->conn.addr.to.ss_family, SOCK_DGRAM, IPPROTO_UDP)) == -1) {
+ if (!resolvers->conf.implicit) { /* emit a warning only if it was configured manually */
+ ha_alert("resolvers '%s': can't create socket for nameserver '%s'.\n",
+ resolvers->id, ns->id);
+ err_code |= (ERR_ALERT|ERR_ABORT);
+ }
+ continue;
+ }
+ if (connect(fd, (struct sockaddr*)&ns->dgram->conn.addr.to, get_addr_len(&ns->dgram->conn.addr.to)) == -1) {
+ if (!resolvers->conf.implicit) { /* emit a warning only if it was configured manually */
+ ha_warning("resolvers '%s': can't connect socket for nameserver '%s'.\n",
+ resolvers->id, ns->id);
+ }
+ close(fd);
+ err_code |= ERR_WARN;
+ continue;
+ }
+ close(fd);
+ }
+ }
+
+ /* Create the task associated to the resolvers section */
+ if ((t = task_new_anywhere()) == NULL) {
+ ha_alert("resolvers '%s' : out of memory.\n", resolvers->id);
+ err_code |= (ERR_ALERT|ERR_ABORT);
+ goto err;
+ }
+
+ /* Update task's parameters */
+ t->process = process_resolvers;
+ t->context = resolvers;
+ resolvers->t = t;
+ task_wakeup(t, TASK_WOKEN_INIT);
+ }
+
+ for (px = proxies_list; px; px = px->next) {
+ struct server *srv;
+
+ if (px->flags & PR_FL_DISABLED) {
+ /* must not run and will not work anyway since
+ * nothing in the proxy is initialized.
+ */
+ continue;
+ }
+
+ for (srv = px->srv; srv; srv = srv->next) {
+ struct resolvers *resolvers;
+
+ if (!srv->resolvers_id)
+ continue;
+
+ if ((resolvers = find_resolvers_by_id(srv->resolvers_id)) == NULL) {
+ ha_alert("%s '%s', server '%s': unable to find required resolvers '%s'\n",
+ proxy_type_str(px), px->id, srv->id, srv->resolvers_id);
+ err_code |= (ERR_ALERT|ERR_ABORT);
+ continue;
+ }
+ srv->resolvers = resolvers;
+ srv->srvrq_check = NULL;
+ if (srv->srvrq) {
+ if (!srv->srvrq->resolvers) {
+ srv->srvrq->resolvers = srv->resolvers;
+ if (resolv_link_resolution(srv->srvrq, OBJ_TYPE_SRVRQ, 0) == -1) {
+ ha_alert("%s '%s' : unable to set DNS resolution for server '%s'.\n",
+ proxy_type_str(px), px->id, srv->id);
+ err_code |= (ERR_ALERT|ERR_ABORT);
+ continue;
+ }
+ }
+
+ srv->srvrq_check = task_new_anywhere();
+ if (!srv->srvrq_check) {
+ ha_alert("%s '%s' : unable to create SRVRQ task for server '%s'.\n",
+ proxy_type_str(px), px->id, srv->id);
+ err_code |= (ERR_ALERT|ERR_ABORT);
+ goto err;
+ }
+ srv->srvrq_check->process = resolv_srvrq_expire_task;
+ srv->srvrq_check->context = srv;
+ srv->srvrq_check->expire = TICK_ETERNITY;
+ }
+ else if (resolv_link_resolution(srv, OBJ_TYPE_SERVER, 0) == -1) {
+ ha_alert("%s '%s', unable to set DNS resolution for server '%s'.\n",
+ proxy_type_str(px), px->id, srv->id);
+ err_code |= (ERR_ALERT|ERR_ABORT);
+ continue;
+ }
+
+ srv->flags |= SRV_F_NON_PURGEABLE;
+ }
+ }
+
+ if (err_code & (ERR_ALERT|ERR_ABORT))
+ goto err;
+
+ leave_resolver_code();
+ return 0;
+ err:
+ leave_resolver_code();
+ resolvers_deinit();
+ return 1;
+
+}
+
+static int stats_dump_resolv_to_buffer(struct stconn *sc,
+ struct dns_nameserver *ns,
+ struct field *stats, size_t stats_count,
+ struct list *stat_modules)
+{
+ struct appctx *appctx = __sc_appctx(sc);
+ struct stats_module *mod;
+ size_t idx = 0;
+
+ memset(stats, 0, sizeof(struct field) * stats_count);
+
+ list_for_each_entry(mod, stat_modules, list) {
+ struct counters_node *counters = EXTRA_COUNTERS_GET(ns->extra_counters, mod);
+
+ mod->fill_stats(counters, stats + idx);
+ idx += mod->stats_count;
+ }
+
+ if (!stats_dump_one_line(stats, idx, appctx))
+ return 0;
+
+ if (!stats_putchk(appctx, NULL))
+ goto full;
+
+ return 1;
+
+ full:
+ return 0;
+}
+
+/* Uses <appctx.ctx.stats.obj1> as a pointer to the current resolver and <obj2>
+ * as a pointer to the current nameserver.
+ */
+int stats_dump_resolvers(struct stconn *sc,
+ struct field *stats, size_t stats_count,
+ struct list *stat_modules)
+{
+ struct appctx *appctx = __sc_appctx(sc);
+ struct show_stat_ctx *ctx = appctx->svcctx;
+ struct channel *rep = sc_ic(sc);
+ struct resolvers *resolver = ctx->obj1;
+ struct dns_nameserver *ns = ctx->obj2;
+
+ if (!resolver)
+ resolver = LIST_NEXT(&sec_resolvers, struct resolvers *, list);
+
+ /* dump resolvers */
+ list_for_each_entry_from(resolver, &sec_resolvers, list) {
+ ctx->obj1 = resolver;
+
+ ns = ctx->obj2 ?
+ ctx->obj2 :
+ LIST_NEXT(&resolver->nameservers, struct dns_nameserver *, list);
+
+ list_for_each_entry_from(ns, &resolver->nameservers, list) {
+ ctx->obj2 = ns;
+
+ if (buffer_almost_full(&rep->buf)) {
+ sc_need_room(sc, b_size(&rep->buf) / 2);
+ goto full;
+ }
+
+ if (!stats_dump_resolv_to_buffer(sc, ns,
+ stats, stats_count,
+ stat_modules)) {
+ return 0;
+ }
+ }
+
+ ctx->obj2 = NULL;
+ }
+
+ return 1;
+
+ full:
+ return 0;
+}
+
+void resolv_stats_clear_counters(int clrall, struct list *stat_modules)
+{
+ struct resolvers *resolvers;
+ struct dns_nameserver *ns;
+ struct stats_module *mod;
+ void *counters;
+
+ list_for_each_entry(mod, stat_modules, list) {
+ if (!mod->clearable && !clrall)
+ continue;
+
+ list_for_each_entry(resolvers, &sec_resolvers, list) {
+ list_for_each_entry(ns, &resolvers->nameservers, list) {
+ counters = EXTRA_COUNTERS_GET(ns->extra_counters, mod);
+ memcpy(counters, mod->counters, mod->counters_size);
+ }
+ }
+ }
+
+}
+
+int resolv_allocate_counters(struct list *stat_modules)
+{
+ struct stats_module *mod;
+ struct resolvers *resolvers;
+ struct dns_nameserver *ns;
+
+ list_for_each_entry(resolvers, &sec_resolvers, list) {
+ list_for_each_entry(ns, &resolvers->nameservers, list) {
+ EXTRA_COUNTERS_REGISTER(&ns->extra_counters, COUNTERS_RSLV,
+ alloc_failed);
+
+ list_for_each_entry(mod, stat_modules, list) {
+ EXTRA_COUNTERS_ADD(mod,
+ ns->extra_counters,
+ mod->counters,
+ mod->counters_size);
+ }
+
+ EXTRA_COUNTERS_ALLOC(ns->extra_counters, alloc_failed);
+
+ list_for_each_entry(mod, stat_modules, list) {
+ memcpy(ns->extra_counters->data + mod->counters_off[ns->extra_counters->type],
+ mod->counters, mod->counters_size);
+
+ /* Store the ns counters pointer */
+ if (strcmp(mod->name, "resolvers") == 0) {
+ ns->counters = (struct dns_counters *)ns->extra_counters->data + mod->counters_off[COUNTERS_RSLV];
+ ns->counters->id = ns->id;
+ ns->counters->pid = resolvers->id;
+ }
+ }
+ }
+ }
+
+ return 1;
+
+alloc_failed:
+ return 0;
+}
+
+/* if an arg is found, it sets the optional resolvers section pointer into a
+ * show_resolvers_ctx struct pointed to by svcctx, or NULL when dumping all.
+ */
+static int cli_parse_stat_resolvers(char **args, char *payload, struct appctx *appctx, void *private)
+{
+ struct show_resolvers_ctx *ctx = applet_reserve_svcctx(appctx, sizeof(*ctx));
+ struct resolvers *presolvers;
+
+ if (*args[2]) {
+ list_for_each_entry(presolvers, &sec_resolvers, list) {
+ if (strcmp(presolvers->id, args[2]) == 0) {
+ ctx->forced_section = presolvers;
+ break;
+ }
+ }
+ if (ctx->forced_section == NULL)
+ return cli_err(appctx, "Can't find that resolvers section\n");
+ }
+ return 0;
+}
+
+/* Dumps counters from all resolvers section and associated name servers. It
+ * returns 0 if the output buffer is full and it needs to be called again,
+ * otherwise non-zero. It may limit itself to the resolver pointed to by the
+ * <resolvers> field of struct show_resolvers_ctx pointed to by <svcctx> if
+ * it's not null.
+ */
+static int cli_io_handler_dump_resolvers_to_buffer(struct appctx *appctx)
+{
+ struct show_resolvers_ctx *ctx = appctx->svcctx;
+ struct resolvers *resolvers = ctx->resolvers;
+ struct dns_nameserver *ns;
+
+ chunk_reset(&trash);
+
+ if (LIST_ISEMPTY(&sec_resolvers)) {
+ if (applet_putstr(appctx, "No resolvers found\n") == -1)
+ goto full;
+ }
+ else {
+ if (!resolvers)
+ resolvers = LIST_ELEM(sec_resolvers.n, typeof(resolvers), list);
+
+ list_for_each_entry_from(resolvers, &sec_resolvers, list) {
+ if (ctx->forced_section != NULL && ctx->forced_section != resolvers)
+ continue;
+
+ ctx->resolvers = resolvers;
+ ns = ctx->ns;
+
+ if (!ns) {
+ chunk_printf(&trash, "Resolvers section %s\n", resolvers->id);
+ if (applet_putchk(appctx, &trash) == -1)
+ goto full;
+
+ ns = LIST_ELEM(resolvers->nameservers.n, typeof(ns), list);
+ ctx->ns = ns;
+ }
+
+ list_for_each_entry_from(ns, &resolvers->nameservers, list) {
+ chunk_reset(&trash);
+ chunk_appendf(&trash, " nameserver %s:\n", ns->id);
+ chunk_appendf(&trash, " sent: %lld\n", ns->counters->sent);
+ chunk_appendf(&trash, " snd_error: %lld\n", ns->counters->snd_error);
+ chunk_appendf(&trash, " valid: %lld\n", ns->counters->app.resolver.valid);
+ chunk_appendf(&trash, " update: %lld\n", ns->counters->app.resolver.update);
+ chunk_appendf(&trash, " cname: %lld\n", ns->counters->app.resolver.cname);
+ chunk_appendf(&trash, " cname_error: %lld\n", ns->counters->app.resolver.cname_error);
+ chunk_appendf(&trash, " any_err: %lld\n", ns->counters->app.resolver.any_err);
+ chunk_appendf(&trash, " nx: %lld\n", ns->counters->app.resolver.nx);
+ chunk_appendf(&trash, " timeout: %lld\n", ns->counters->app.resolver.timeout);
+ chunk_appendf(&trash, " refused: %lld\n", ns->counters->app.resolver.refused);
+ chunk_appendf(&trash, " other: %lld\n", ns->counters->app.resolver.other);
+ chunk_appendf(&trash, " invalid: %lld\n", ns->counters->app.resolver.invalid);
+ chunk_appendf(&trash, " too_big: %lld\n", ns->counters->app.resolver.too_big);
+ chunk_appendf(&trash, " truncated: %lld\n", ns->counters->app.resolver.truncated);
+ chunk_appendf(&trash, " outdated: %lld\n", ns->counters->app.resolver.outdated);
+ if (applet_putchk(appctx, &trash) == -1)
+ goto full;
+ ctx->ns = ns;
+ }
+
+ ctx->ns = NULL;
+
+ /* was this the only section to dump ? */
+ if (ctx->forced_section)
+ break;
+ }
+ }
+
+ /* done! */
+ return 1;
+ full:
+ /* the output buffer is full, retry later */
+ return 0;
+}
+
+/* register cli keywords */
+static struct cli_kw_list cli_kws = {{ }, {
+ { { "show", "resolvers", NULL }, "show resolvers [id] : dumps counters from all resolvers section and associated name servers",
+ cli_parse_stat_resolvers, cli_io_handler_dump_resolvers_to_buffer },
+ {{},}
+ }
+};
+
+INITCALL1(STG_REGISTER, cli_register_kw, &cli_kws);
+
+/*
+ * Prepare <rule> for hostname resolution.
+ * Returns -1 in case of any allocation failure, 0 if not.
+ * On error, a global failure counter is also incremented.
+ */
+static int action_prepare_for_resolution(struct stream *stream, const char *hostname, int hostname_len)
+{
+ char *hostname_dn;
+ int hostname_dn_len;
+ struct buffer *tmp = get_trash_chunk();
+
+ if (!hostname)
+ return 0;
+
+ hostname_dn = tmp->area;
+ hostname_dn_len = resolv_str_to_dn_label(hostname, hostname_len,
+ hostname_dn, tmp->size);
+ if (hostname_dn_len == -1)
+ goto err;
+
+
+ stream->resolv_ctx.hostname_dn = strdup(hostname_dn);
+ stream->resolv_ctx.hostname_dn_len = hostname_dn_len;
+ if (!stream->resolv_ctx.hostname_dn)
+ goto err;
+
+ return 0;
+
+ err:
+ ha_free(&stream->resolv_ctx.hostname_dn);
+ resolv_failed_resolutions += 1;
+ return -1;
+}
+
+
+/*
+ * Execute the "do-resolution" action. May be called from {tcp,http}request.
+ */
+enum act_return resolv_action_do_resolve(struct act_rule *rule, struct proxy *px,
+ struct session *sess, struct stream *s, int flags)
+{
+ struct resolv_resolution *resolution;
+ struct sample *smp;
+ struct resolv_requester *req;
+ struct resolvers *resolvers;
+ struct resolv_resolution *res;
+ int exp, locked = 0;
+ enum act_return ret = ACT_RET_CONT;
+
+ resolvers = rule->arg.resolv.resolvers;
+
+ enter_resolver_code();
+
+ /* we have a response to our DNS resolution */
+ use_cache:
+ if (s->resolv_ctx.requester && s->resolv_ctx.requester->resolution != NULL) {
+ resolution = s->resolv_ctx.requester->resolution;
+ if (!locked) {
+ HA_SPIN_LOCK(DNS_LOCK, &resolvers->lock);
+ locked = 1;
+ }
+
+ if (resolution->step == RSLV_STEP_RUNNING)
+ goto yield;
+ if (resolution->step == RSLV_STEP_NONE) {
+ /* We update the variable only if we have a valid
+ * response. If the response was not received yet, we
+ * must yield.
+ */
+ if (resolution->status == RSLV_STATUS_NONE)
+ goto yield;
+ if (resolution->status == RSLV_STATUS_VALID) {
+ struct sample smp;
+ short ip_sin_family = 0;
+ void *ip = NULL;
+
+ resolv_get_ip_from_response(&resolution->response, rule->arg.resolv.opts, NULL,
+ 0, &ip, &ip_sin_family, NULL);
+
+ switch (ip_sin_family) {
+ case AF_INET:
+ smp.data.type = SMP_T_IPV4;
+ memcpy(&smp.data.u.ipv4, ip, 4);
+ break;
+ case AF_INET6:
+ smp.data.type = SMP_T_IPV6;
+ memcpy(&smp.data.u.ipv6, ip, 16);
+ break;
+ default:
+ ip = NULL;
+ }
+
+ if (ip) {
+ smp.px = px;
+ smp.sess = sess;
+ smp.strm = s;
+
+ vars_set_by_name(rule->arg.resolv.varname, strlen(rule->arg.resolv.varname), &smp);
+ }
+ }
+ }
+
+ goto release_requester;
+ }
+
+ /* need to configure and start a new DNS resolution */
+ smp = sample_fetch_as_type(px, sess, s, SMP_OPT_DIR_REQ|SMP_OPT_FINAL, rule->arg.resolv.expr, SMP_T_STR);
+ if (smp == NULL)
+ goto end;
+
+ if (action_prepare_for_resolution(s, smp->data.u.str.area, smp->data.u.str.data) == -1)
+ goto end; /* on error, ignore the action */
+
+ s->resolv_ctx.parent = rule;
+
+ HA_SPIN_LOCK(DNS_LOCK, &resolvers->lock);
+ locked = 1;
+
+ resolv_link_resolution(s, OBJ_TYPE_STREAM, 0);
+
+ /* Check if there is a fresh enough response in the cache of our associated resolution */
+ req = s->resolv_ctx.requester;
+ if (!req || !req->resolution)
+ goto release_requester; /* on error, ignore the action */
+ res = req->resolution;
+
+ exp = tick_add(res->last_resolution, resolvers->hold.valid);
+ if (resolvers->t && res->status == RSLV_STATUS_VALID && tick_isset(res->last_resolution)
+ && !tick_is_expired(exp, now_ms)) {
+ goto use_cache;
+ }
+
+ resolv_trigger_resolution(s->resolv_ctx.requester);
+
+ yield:
+ if (flags & ACT_OPT_FINAL)
+ goto release_requester;
+ ret = ACT_RET_YIELD;
+
+ end:
+ leave_resolver_code();
+ if (locked)
+ HA_SPIN_UNLOCK(DNS_LOCK, &resolvers->lock);
+ return ret;
+
+ release_requester:
+ ha_free(&s->resolv_ctx.hostname_dn);
+ s->resolv_ctx.hostname_dn_len = 0;
+ if (s->resolv_ctx.requester) {
+ _resolv_unlink_resolution(s->resolv_ctx.requester);
+ pool_free(resolv_requester_pool, s->resolv_ctx.requester);
+ s->resolv_ctx.requester = NULL;
+ }
+ goto end;
+}
+
+static void release_resolv_action(struct act_rule *rule)
+{
+ release_sample_expr(rule->arg.resolv.expr);
+ free(rule->arg.resolv.varname);
+ free(rule->arg.resolv.resolvers_id);
+ free(rule->arg.resolv.opts);
+}
+
+
+/* parse "do-resolve" action
+ * This action takes the following arguments:
+ * do-resolve(<varName>,<resolversSectionName>,<resolvePrefer>) <expr>
+ *
+ * - <varName> is the variable name where the result of the DNS resolution will be stored
+ * (mandatory)
+ * - <resolversSectionName> is the name of the resolvers section to use to perform the resolution
+ * (mandatory)
+ * - <resolvePrefer> can be either 'ipv4' or 'ipv6' and is the IP family we would like to resolve first
+ * (optional), defaults to ipv6
+ * - <expr> is an HAProxy expression used to fetch the name to be resolved
+ */
+enum act_parse_ret resolv_parse_do_resolve(const char **args, int *orig_arg, struct proxy *px, struct act_rule *rule, char **err)
+{
+ int cur_arg;
+ struct sample_expr *expr;
+ unsigned int where;
+ const char *beg, *end;
+
+ /* orig_arg points to the first argument, but we need to analyse the command itself first */
+ cur_arg = *orig_arg - 1;
+
+ /* locate varName, which is mandatory */
+ beg = strchr(args[cur_arg], '(');
+ if (beg == NULL)
+ goto do_resolve_parse_error;
+ beg = beg + 1; /* beg should points to the first character after opening parenthesis '(' */
+ end = strchr(beg, ',');
+ if (end == NULL)
+ goto do_resolve_parse_error;
+ rule->arg.resolv.varname = my_strndup(beg, end - beg);
+ if (rule->arg.resolv.varname == NULL)
+ goto do_resolve_parse_error;
+
+
+ /* locate resolversSectionName, which is mandatory.
+ * Since next parameters are optional, the delimiter may be comma ','
+ * or closing parenthesis ')'
+ */
+ beg = end + 1;
+ end = strchr(beg, ',');
+ if (end == NULL)
+ end = strchr(beg, ')');
+ if (end == NULL)
+ goto do_resolve_parse_error;
+ rule->arg.resolv.resolvers_id = my_strndup(beg, end - beg);
+ if (rule->arg.resolv.resolvers_id == NULL)
+ goto do_resolve_parse_error;
+
+
+ rule->arg.resolv.opts = calloc(1, sizeof(*rule->arg.resolv.opts));
+ if (rule->arg.resolv.opts == NULL)
+ goto do_resolve_parse_error;
+
+ /* Default priority is ipv6 */
+ rule->arg.resolv.opts->family_prio = AF_INET6;
+
+ /* optional arguments accepted for now:
+ * ipv4 or ipv6
+ */
+ while (*end != ')') {
+ beg = end + 1;
+ end = strchr(beg, ',');
+ if (end == NULL)
+ end = strchr(beg, ')');
+ if (end == NULL)
+ goto do_resolve_parse_error;
+
+ if (strncmp(beg, "ipv4", end - beg) == 0) {
+ rule->arg.resolv.opts->family_prio = AF_INET;
+ }
+ else if (strncmp(beg, "ipv6", end - beg) == 0) {
+ rule->arg.resolv.opts->family_prio = AF_INET6;
+ }
+ else {
+ goto do_resolve_parse_error;
+ }
+ }
+
+ cur_arg = cur_arg + 1;
+
+ expr = sample_parse_expr((char **)args, &cur_arg, px->conf.args.file, px->conf.args.line, err, &px->conf.args, NULL);
+ if (!expr)
+ goto do_resolve_parse_error;
+
+
+ where = 0;
+ if (px->cap & PR_CAP_FE)
+ where |= SMP_VAL_FE_HRQ_HDR;
+ if (px->cap & PR_CAP_BE)
+ where |= SMP_VAL_BE_HRQ_HDR;
+
+ if (!(expr->fetch->val & where)) {
+ memprintf(err,
+ "fetch method '%s' extracts information from '%s', none of which is available here",
+ args[cur_arg-1], sample_src_names(expr->fetch->use));
+ free(expr);
+ return ACT_RET_PRS_ERR;
+ }
+ rule->arg.resolv.expr = expr;
+ rule->action = ACT_CUSTOM;
+ rule->action_ptr = resolv_action_do_resolve;
+ *orig_arg = cur_arg;
+
+ rule->check_ptr = check_action_do_resolve;
+ rule->release_ptr = release_resolv_action;
+
+ return ACT_RET_PRS_OK;
+
+ do_resolve_parse_error:
+ ha_free(&rule->arg.resolv.varname);
+ ha_free(&rule->arg.resolv.resolvers_id);
+ memprintf(err, "Can't parse '%s'. Expects 'do-resolve(<varname>,<resolvers>[,<options>]) <expr>'. Available options are 'ipv4' and 'ipv6'",
+ args[cur_arg]);
+ return ACT_RET_PRS_ERR;
+}
+
+static struct action_kw_list http_req_kws = { { }, {
+ { "do-resolve", resolv_parse_do_resolve, KWF_MATCH_PREFIX },
+ { /* END */ }
+}};
+
+INITCALL1(STG_REGISTER, http_req_keywords_register, &http_req_kws);
+
+static struct action_kw_list tcp_req_cont_actions = {ILH, {
+ { "do-resolve", resolv_parse_do_resolve, KWF_MATCH_PREFIX },
+ { /* END */ }
+}};
+
+INITCALL1(STG_REGISTER, tcp_req_cont_keywords_register, &tcp_req_cont_actions);
+
+/* Check an "http-request do-resolve" action.
+ *
+ * The function returns 1 in success case, otherwise, it returns 0 and err is
+ * filled.
+ */
+int check_action_do_resolve(struct act_rule *rule, struct proxy *px, char **err)
+{
+ struct resolvers *resolvers = NULL;
+
+ if (rule->arg.resolv.resolvers_id == NULL) {
+ memprintf(err,"Proxy '%s': %s", px->id, "do-resolve action without resolvers");
+ return 0;
+ }
+
+ resolvers = find_resolvers_by_id(rule->arg.resolv.resolvers_id);
+ if (resolvers == NULL) {
+ memprintf(err,"Can't find resolvers section '%s' for do-resolve action", rule->arg.resolv.resolvers_id);
+ return 0;
+ }
+ rule->arg.resolv.resolvers = resolvers;
+
+ return 1;
+}
+
+void resolvers_setup_proxy(struct proxy *px)
+{
+ px->last_change = ns_to_sec(now_ns);
+ px->cap = PR_CAP_FE | PR_CAP_BE;
+ px->maxconn = 0;
+ px->conn_retries = 1;
+ px->timeout.server = TICK_ETERNITY;
+ px->timeout.client = TICK_ETERNITY;
+ px->timeout.connect = 1000; // by default same than timeout.resolve
+ px->accept = NULL;
+ px->options2 |= PR_O2_INDEPSTR | PR_O2_SMARTCON;
+}
+
+static int parse_resolve_conf(char **errmsg, char **warnmsg)
+{
+ struct dns_nameserver *newnameserver = NULL;
+ const char *whitespace = "\r\n\t ";
+ char *resolv_line = NULL;
+ int resolv_linenum = 0;
+ FILE *f = NULL;
+ char *address = NULL;
+ struct sockaddr_storage *sk = NULL;
+ struct protocol *proto;
+ int duplicate_name = 0;
+ int err_code = 0;
+
+ if ((resolv_line = malloc(sizeof(*resolv_line) * LINESIZE)) == NULL) {
+ memprintf(errmsg, "out of memory.\n");
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto resolv_out;
+ }
+
+ if ((f = fopen("/etc/resolv.conf", "r")) == NULL) {
+ if (errmsg)
+ memprintf(errmsg, "failed to open /etc/resolv.conf.");
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto resolv_out;
+ }
+
+ sk = calloc(1, sizeof(*sk));
+ if (sk == NULL) {
+ if (errmsg)
+ memprintf(errmsg, "parsing [/etc/resolv.conf:%d] : out of memory.", resolv_linenum);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto resolv_out;
+ }
+
+ while (fgets(resolv_line, LINESIZE, f) != NULL) {
+ resolv_linenum++;
+ if (strncmp(resolv_line, "nameserver", 10) != 0)
+ continue;
+
+ address = strtok(resolv_line + 10, whitespace);
+ if (address == resolv_line + 10)
+ continue;
+
+ if (address == NULL) {
+ if (warnmsg)
+ memprintf(warnmsg, "%sparsing [/etc/resolv.conf:%d] : nameserver line is missing address.\n",
+ *warnmsg ? *warnmsg : "", resolv_linenum);
+ err_code |= ERR_WARN;
+ continue;
+ }
+
+ duplicate_name = 0;
+ list_for_each_entry(newnameserver, &curr_resolvers->nameservers, list) {
+ if (strcmp(newnameserver->id, address) == 0) {
+ if (warnmsg)
+ memprintf(warnmsg, "%sParsing [/etc/resolv.conf:%d] : generated name for /etc/resolv.conf nameserver '%s' conflicts with another nameserver (declared at %s:%d), it appears to be a duplicate and will be excluded.\n",
+ *warnmsg ? *warnmsg : "", resolv_linenum, address, newnameserver->conf.file, newnameserver->conf.line);
+ err_code |= ERR_WARN;
+ duplicate_name = 1;
+ }
+ }
+
+ if (duplicate_name)
+ continue;
+
+ memset(sk, 0, sizeof(*sk));
+ if (!str2ip2(address, sk, 1)) {
+ if (warnmsg)
+ memprintf(warnmsg, "%sparsing [/etc/resolv.conf:%d] : address '%s' could not be recognized, nameserver will be excluded.\n",
+ *warnmsg ? *warnmsg : "", resolv_linenum, address);
+ err_code |= ERR_WARN;
+ continue;
+ }
+
+ set_host_port(sk, 53);
+
+ proto = protocol_lookup(sk->ss_family, PROTO_TYPE_STREAM, 0);
+ if (!proto || !proto->connect) {
+ if (warnmsg)
+ memprintf(warnmsg, "%sparsing [/etc/resolv.conf:%d] : '%s' : connect() not supported for this address family.\n",
+ *warnmsg ? *warnmsg : "", resolv_linenum, address);
+ err_code |= ERR_WARN;
+ continue;
+ }
+
+ if ((newnameserver = calloc(1, sizeof(*newnameserver))) == NULL) {
+ if (errmsg)
+ memprintf(errmsg, "parsing [/etc/resolv.conf:%d] : out of memory.", resolv_linenum);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto resolv_out;
+ }
+
+ if (dns_dgram_init(newnameserver, sk) < 0) {
+ if (errmsg)
+ memprintf(errmsg, "parsing [/etc/resolv.conf:%d] : out of memory.", resolv_linenum);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ free(newnameserver);
+ goto resolv_out;
+ }
+
+ newnameserver->conf.file = strdup("/etc/resolv.conf");
+ if (newnameserver->conf.file == NULL) {
+ if (errmsg)
+ memprintf(errmsg, "parsing [/etc/resolv.conf:%d] : out of memory.", resolv_linenum);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ free(newnameserver);
+ goto resolv_out;
+ }
+
+ newnameserver->id = strdup(address);
+ if (newnameserver->id == NULL) {
+ if (errmsg)
+ memprintf(errmsg, "parsing [/etc/resolv.conf:%d] : out of memory.", resolv_linenum);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ free((char *)newnameserver->conf.file);
+ free(newnameserver);
+ goto resolv_out;
+ }
+
+ newnameserver->parent = curr_resolvers;
+ newnameserver->process_responses = resolv_process_responses;
+ newnameserver->conf.line = resolv_linenum;
+ LIST_APPEND(&curr_resolvers->nameservers, &newnameserver->list);
+ }
+
+resolv_out:
+ free(sk);
+ free(resolv_line);
+ if (f != NULL)
+ fclose(f);
+
+ return err_code;
+}
+
+static int resolvers_new(struct resolvers **resolvers, const char *id, const char *file, int linenum)
+{
+ struct resolvers *r = NULL;
+ struct proxy *p = NULL;
+ int err_code = 0;
+
+ if ((r = calloc(1, sizeof(*r))) == NULL) {
+ err_code |= ERR_ALERT | ERR_ABORT;
+ goto out;
+ }
+
+ /* allocate new proxy to tcp servers */
+ p = calloc(1, sizeof *p);
+ if (!p) {
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+
+ init_new_proxy(p);
+ resolvers_setup_proxy(p);
+ p->parent = r;
+ p->id = strdup(id);
+ p->conf.args.file = p->conf.file = strdup(file);
+ p->conf.args.line = p->conf.line = linenum;
+ r->px = p;
+
+ /* default values */
+ LIST_APPEND(&sec_resolvers, &r->list);
+ r->conf.file = strdup(file);
+ r->conf.line = linenum;
+ r->id = strdup(id);
+ r->query_ids = EB_ROOT;
+ /* default maximum response size */
+ r->accepted_payload_size = 512;
+ /* default hold period for nx, other, refuse and timeout is 30s */
+ r->hold.nx = 30000;
+ r->hold.other = 30000;
+ r->hold.refused = 30000;
+ r->hold.timeout = 30000;
+ r->hold.obsolete = 0;
+ /* default hold period for valid is 10s */
+ r->hold.valid = 10000;
+ r->timeout.resolve = 1000;
+ r->timeout.retry = 1000;
+ r->resolve_retries = 3;
+ LIST_INIT(&r->nameservers);
+ LIST_INIT(&r->resolutions.curr);
+ LIST_INIT(&r->resolutions.wait);
+ HA_SPIN_INIT(&r->lock);
+
+ *resolvers = r;
+
+out:
+ if (err_code & (ERR_FATAL|ERR_ABORT)) {
+ ha_free(&r);
+ ha_free(&p);
+ }
+
+ return err_code;
+}
+
+
+/*
+ * Parse a <resolvers> section.
+ * Returns the error code, 0 if OK, or any combination of :
+ * - ERR_ABORT: must abort ASAP
+ * - ERR_FATAL: we can continue parsing but not start the service
+ * - ERR_WARN: a warning has been emitted
+ * - ERR_ALERT: an alert has been emitted
+ * Only the two first ones can stop processing, the two others are just
+ * indicators.
+ */
+int cfg_parse_resolvers(const char *file, int linenum, char **args, int kwm)
+{
+ const char *err;
+ int err_code = 0;
+ char *errmsg = NULL;
+ char *warnmsg = NULL;
+
+ if (strcmp(args[0], "resolvers") == 0) { /* new resolvers section */
+ if (!*args[1]) {
+ ha_alert("parsing [%s:%d] : missing name for resolvers section.\n", file, linenum);
+ err_code |= ERR_ALERT | ERR_ABORT;
+ goto out;
+ }
+
+ err = invalid_char(args[1]);
+ if (err) {
+ ha_alert("parsing [%s:%d] : character '%c' is not permitted in '%s' name '%s'.\n",
+ file, linenum, *err, args[0], args[1]);
+ err_code |= ERR_ALERT | ERR_ABORT;
+ goto out;
+ }
+
+ list_for_each_entry(curr_resolvers, &sec_resolvers, list) {
+ /* Error if two resolvers owns the same name */
+ if (strcmp(curr_resolvers->id, args[1]) == 0) {
+ ha_alert("Parsing [%s:%d]: resolvers '%s' has same name as another resolvers (declared at %s:%d).\n",
+ file, linenum, args[1], curr_resolvers->conf.file, curr_resolvers->conf.line);
+ err_code |= ERR_ALERT | ERR_ABORT;
+ }
+ }
+
+ err_code |= resolvers_new(&curr_resolvers, args[1], file, linenum);
+ if (err_code & ERR_ALERT) {
+ ha_alert("parsing [%s:%d] : out of memory.\n", file, linenum);
+ goto out;
+ }
+
+ }
+ else if (strcmp(args[0], "nameserver") == 0) { /* nameserver definition */
+ struct dns_nameserver *newnameserver = NULL;
+ struct sockaddr_storage *sk;
+ int port1, port2;
+ struct protocol *proto;
+
+ if (!*args[2]) {
+ ha_alert("parsing [%s:%d] : '%s' expects <name> and <addr>[:<port>] as arguments.\n",
+ file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+
+ err = invalid_char(args[1]);
+ if (err) {
+ ha_alert("parsing [%s:%d] : character '%c' is not permitted in server name '%s'.\n",
+ file, linenum, *err, args[1]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+
+ list_for_each_entry(newnameserver, &curr_resolvers->nameservers, list) {
+ /* Error if two resolvers owns the same name */
+ if (strcmp(newnameserver->id, args[1]) == 0) {
+ ha_alert("Parsing [%s:%d]: nameserver '%s' has same name as another nameserver (declared at %s:%d).\n",
+ file, linenum, args[1], newnameserver->conf.file, newnameserver->conf.line);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ }
+ }
+
+ sk = str2sa_range(args[2], NULL, &port1, &port2, NULL, &proto, NULL,
+ &errmsg, NULL, NULL, PA_O_RESOLVE | PA_O_PORT_OK | PA_O_PORT_MAND | PA_O_DGRAM | PA_O_STREAM | PA_O_DEFAULT_DGRAM);
+ if (!sk) {
+ ha_alert("parsing [%s:%d] : '%s %s' : %s\n", file, linenum, args[0], args[1], errmsg);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+
+ if ((newnameserver = calloc(1, sizeof(*newnameserver))) == NULL) {
+ ha_alert("parsing [%s:%d] : out of memory.\n", file, linenum);
+ err_code |= ERR_ALERT | ERR_ABORT;
+ goto out;
+ }
+
+ if (proto && proto->xprt_type == PROTO_TYPE_STREAM) {
+ err_code |= parse_server(file, linenum, args, curr_resolvers->px, NULL,
+ SRV_PARSE_PARSE_ADDR|SRV_PARSE_INITIAL_RESOLVE);
+ if (err_code & (ERR_FATAL|ERR_ABORT)) {
+ err_code |= ERR_ABORT;
+ goto out;
+ }
+
+ if (dns_stream_init(newnameserver, curr_resolvers->px->srv) < 0) {
+ ha_alert("parsing [%s:%d] : out of memory.\n", file, linenum);
+ err_code |= ERR_ALERT|ERR_ABORT;
+ goto out;
+ }
+ }
+ else if (dns_dgram_init(newnameserver, sk) < 0) {
+ ha_alert("parsing [%s:%d] : out of memory.\n", file, linenum);
+ err_code |= ERR_ALERT | ERR_ABORT;
+ goto out;
+ }
+
+ if ((newnameserver->conf.file = strdup(file)) == NULL) {
+ ha_alert("parsing [%s:%d] : out of memory.\n", file, linenum);
+ err_code |= ERR_ALERT | ERR_ABORT;
+ goto out;
+ }
+
+ if ((newnameserver->id = strdup(args[1])) == NULL) {
+ ha_alert("parsing [%s:%d] : out of memory.\n", file, linenum);
+ err_code |= ERR_ALERT | ERR_ABORT;
+ goto out;
+ }
+
+ newnameserver->parent = curr_resolvers;
+ newnameserver->process_responses = resolv_process_responses;
+ newnameserver->conf.line = linenum;
+ /* the nameservers are linked backward first */
+ LIST_APPEND(&curr_resolvers->nameservers, &newnameserver->list);
+ }
+ else if (strcmp(args[0], "parse-resolv-conf") == 0) {
+ err_code |= parse_resolve_conf(&errmsg, &warnmsg);
+ if (err_code & ERR_WARN) {
+ indent_msg(&warnmsg, 8);
+ ha_warning("parsing [%s:%d]: %s\n", file, linenum, warnmsg);
+ ha_free(&warnmsg);
+ }
+ if (err_code & ERR_ALERT) {
+ indent_msg(&errmsg, 8);
+ ha_alert("parsing [%s:%d]: %s\n", file, linenum, errmsg);
+ ha_free(&errmsg);
+ goto out;
+ }
+ }
+ else if (strcmp(args[0], "hold") == 0) { /* hold periods */
+ const char *res;
+ unsigned int time;
+
+ if (!*args[2]) {
+ ha_alert("parsing [%s:%d] : '%s' expects an <event> and a <time> as arguments.\n",
+ file, linenum, args[0]);
+ ha_alert("<event> can be either 'valid', 'nx', 'refused', 'timeout', or 'other'\n");
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ res = parse_time_err(args[2], &time, TIME_UNIT_MS);
+ if (res == PARSE_TIME_OVER) {
+ ha_alert("parsing [%s:%d]: timer overflow in argument <%s> to <%s>, maximum value is 2147483647 ms (~24.8 days).\n",
+ file, linenum, args[1], args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ else if (res == PARSE_TIME_UNDER) {
+ ha_alert("parsing [%s:%d]: timer underflow in argument <%s> to <%s>, minimum non-null value is 1 ms.\n",
+ file, linenum, args[1], args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ else if (res) {
+ ha_alert("parsing [%s:%d]: unexpected character '%c' in argument to <%s>.\n",
+ file, linenum, *res, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ if (strcmp(args[1], "nx") == 0)
+ curr_resolvers->hold.nx = time;
+ else if (strcmp(args[1], "other") == 0)
+ curr_resolvers->hold.other = time;
+ else if (strcmp(args[1], "refused") == 0)
+ curr_resolvers->hold.refused = time;
+ else if (strcmp(args[1], "timeout") == 0)
+ curr_resolvers->hold.timeout = time;
+ else if (strcmp(args[1], "valid") == 0)
+ curr_resolvers->hold.valid = time;
+ else if (strcmp(args[1], "obsolete") == 0)
+ curr_resolvers->hold.obsolete = time;
+ else {
+ ha_alert("parsing [%s:%d] : '%s' unknown <event>: '%s', expects either 'nx', 'timeout', 'valid', 'obsolete' or 'other'.\n",
+ file, linenum, args[0], args[1]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+
+ }
+ else if (strcmp(args[0], "accepted_payload_size") == 0) {
+ int i = 0;
+
+ if (!*args[1]) {
+ ha_alert("parsing [%s:%d] : '%s' expects <nb> as argument.\n",
+ file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+
+ i = atoi(args[1]);
+ if (i < DNS_HEADER_SIZE || i > DNS_MAX_UDP_MESSAGE) {
+ ha_alert("parsing [%s:%d] : '%s' must be between %d and %d inclusive (was %s).\n",
+ file, linenum, args[0], DNS_HEADER_SIZE, DNS_MAX_UDP_MESSAGE, args[1]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+
+ curr_resolvers->accepted_payload_size = i;
+ }
+ else if (strcmp(args[0], "resolution_pool_size") == 0) {
+ ha_alert("parsing [%s:%d] : '%s' directive is not supported anymore (it never appeared in a stable release).\n",
+ file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ else if (strcmp(args[0], "resolve_retries") == 0) {
+ if (!*args[1]) {
+ ha_alert("parsing [%s:%d] : '%s' expects <nb> as argument.\n",
+ file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ curr_resolvers->resolve_retries = atoi(args[1]);
+ }
+ else if (strcmp(args[0], "timeout") == 0) {
+ if (!*args[1]) {
+ ha_alert("parsing [%s:%d] : '%s' expects 'retry' or 'resolve' and <time> as arguments.\n",
+ file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ else if (strcmp(args[1], "retry") == 0 ||
+ strcmp(args[1], "resolve") == 0) {
+ const char *res;
+ unsigned int tout;
+
+ if (!*args[2]) {
+ ha_alert("parsing [%s:%d] : '%s %s' expects <time> as argument.\n",
+ file, linenum, args[0], args[1]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ res = parse_time_err(args[2], &tout, TIME_UNIT_MS);
+ if (res == PARSE_TIME_OVER) {
+ ha_alert("parsing [%s:%d]: timer overflow in argument <%s> to <%s %s>, maximum value is 2147483647 ms (~24.8 days).\n",
+ file, linenum, args[2], args[0], args[1]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ else if (res == PARSE_TIME_UNDER) {
+ ha_alert("parsing [%s:%d]: timer underflow in argument <%s> to <%s %s>, minimum non-null value is 1 ms.\n",
+ file, linenum, args[2], args[0], args[1]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ else if (res) {
+ ha_alert("parsing [%s:%d]: unexpected character '%c' in argument to <%s %s>.\n",
+ file, linenum, *res, args[0], args[1]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ if (args[1][2] == 't')
+ curr_resolvers->timeout.retry = tout;
+ else {
+ curr_resolvers->timeout.resolve = tout;
+ curr_resolvers->px->timeout.connect = tout;
+ }
+
+ }
+ else {
+ ha_alert("parsing [%s:%d] : '%s' expects 'retry' or 'resolve' and <time> as arguments got '%s'.\n",
+ file, linenum, args[0], args[1]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ }
+ else if (*args[0] != 0) {
+ ha_alert("parsing [%s:%d] : unknown keyword '%s' in '%s' section\n", file, linenum, args[0], cursection);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+
+out:
+ free(errmsg);
+ free(warnmsg);
+ return err_code;
+}
+
+/* try to create a "default" resolvers section which uses "/etc/resolv.conf"
+ *
+ * This function is opportunistic and does not try to display errors or warnings.
+ */
+int resolvers_create_default()
+{
+ int err_code = 0;
+
+ if (global.mode & MODE_MWORKER_WAIT) /* does not create the section if in wait mode */
+ return 0;
+
+ /* if the section already exists, do nothing */
+ if (find_resolvers_by_id("default"))
+ return 0;
+
+ curr_resolvers = NULL;
+ err_code |= resolvers_new(&curr_resolvers, "default", "<internal>", 0);
+ if (err_code & ERR_CODE)
+ goto err;
+
+ curr_resolvers->conf.implicit = 1;
+
+ err_code |= parse_resolve_conf(NULL, NULL);
+ if (err_code & ERR_CODE)
+ goto err;
+ /* check if there was any nameserver in the resolvconf file */
+ if (LIST_ISEMPTY(&curr_resolvers->nameservers)) {
+ err_code |= ERR_FATAL;
+ goto err;
+ }
+
+err:
+ if (err_code & ERR_CODE) {
+ resolvers_destroy(curr_resolvers);
+ curr_resolvers = NULL;
+ }
+
+ /* we never return an error there, we only try to create this section
+ * if that's possible */
+ return 0;
+}
+
+int cfg_post_parse_resolvers()
+{
+ int err_code = 0;
+ struct server *srv;
+
+ if (curr_resolvers) {
+
+ /* prepare forward server descriptors */
+ if (curr_resolvers->px) {
+ srv = curr_resolvers->px->srv;
+ while (srv) {
+ /* init ssl if needed */
+ if (srv->use_ssl == 1 && xprt_get(XPRT_SSL) && xprt_get(XPRT_SSL)->prepare_srv) {
+ if (xprt_get(XPRT_SSL)->prepare_srv(srv)) {
+ ha_alert("unable to prepare SSL for server '%s' in resolvers section '%s'.\n", srv->id, curr_resolvers->id);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ break;
+ }
+ }
+ srv = srv->next;
+ }
+ }
+ }
+ curr_resolvers = NULL;
+ return err_code;
+}
+
+REGISTER_CONFIG_SECTION("resolvers", cfg_parse_resolvers, cfg_post_parse_resolvers);
+REGISTER_POST_DEINIT(resolvers_deinit);
+REGISTER_CONFIG_POSTPARSER("dns runtime resolver", resolvers_finalize_config);
+REGISTER_PRE_CHECK(resolvers_create_default);
diff --git a/src/ring.c b/src/ring.c
new file mode 100644
index 0000000..849221e
--- /dev/null
+++ b/src/ring.c
@@ -0,0 +1,482 @@
+/*
+ * Ring buffer management
+ *
+ * Copyright (C) 2000-2019 Willy Tarreau - w@1wt.eu
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <stdlib.h>
+#include <haproxy/api.h>
+#include <haproxy/applet.h>
+#include <haproxy/buf.h>
+#include <haproxy/cli.h>
+#include <haproxy/ring.h>
+#include <haproxy/sc_strm.h>
+#include <haproxy/stconn.h>
+#include <haproxy/thread.h>
+
+/* context used to dump the contents of a ring via "show events" or "show errors" */
+struct show_ring_ctx {
+ struct ring *ring; /* ring to be dumped */
+ size_t ofs; /* storage offset to restart from; ~0=oldest */
+ uint flags; /* set of RING_WF_* */
+};
+
+/* Initialize a pre-allocated ring with the buffer area
+ * of size */
+void ring_init(struct ring *ring, void *area, size_t size)
+{
+ HA_RWLOCK_INIT(&ring->lock);
+ LIST_INIT(&ring->waiters);
+ ring->readers_count = 0;
+ ring->buf = b_make(area, size, 0, 0);
+ /* write the initial RC byte */
+ b_putchr(&ring->buf, 0);
+}
+
+/* Creates and returns a ring buffer of size <size> bytes. Returns NULL on
+ * allocation failure.
+ */
+struct ring *ring_new(size_t size)
+{
+ struct ring *ring = NULL;
+ void *area = NULL;
+
+ if (size < 2)
+ goto fail;
+
+ ring = malloc(sizeof(*ring));
+ if (!ring)
+ goto fail;
+
+ area = malloc(size);
+ if (!area)
+ goto fail;
+
+ ring_init(ring, area, size);
+ return ring;
+ fail:
+ free(area);
+ free(ring);
+ return NULL;
+}
+
+/* Creates a unified ring + storage area at address <area> for <size> bytes.
+ * If <area> is null, then it's allocated of the requested size. The ring
+ * struct is part of the area so the usable area is slightly reduced. However
+ * the ring storage is immediately adjacent to the struct. ring_free() will
+ * ignore such rings, so the caller is responsible for releasing them.
+ */
+struct ring *ring_make_from_area(void *area, size_t size)
+{
+ struct ring *ring = NULL;
+
+ if (size < sizeof(*ring))
+ return NULL;
+
+ if (!area)
+ area = malloc(size);
+ if (!area)
+ return NULL;
+
+ ring = area;
+ area += sizeof(*ring);
+ ring_init(ring, area, size - sizeof(*ring));
+ return ring;
+}
+
+/* Cast an unified ring + storage area to a ring from <area>, without
+ * reinitializing the data buffer.
+ *
+ * Reinitialize the waiters and the lock.
+ */
+struct ring *ring_cast_from_area(void *area)
+{
+ struct ring *ring = NULL;
+
+ ring = area;
+ ring->buf.area = area + sizeof(*ring);
+
+ HA_RWLOCK_INIT(&ring->lock);
+ LIST_INIT(&ring->waiters);
+ ring->readers_count = 0;
+
+ return ring;
+}
+
+/* Resizes existing ring <ring> to <size> which must be larger, without losing
+ * its contents. The new size must be at least as large as the previous one or
+ * no change will be performed. The pointer to the ring is returned on success,
+ * or NULL on allocation failure. This will lock the ring for writes.
+ */
+struct ring *ring_resize(struct ring *ring, size_t size)
+{
+ void *area;
+
+ if (b_size(&ring->buf) >= size)
+ return ring;
+
+ area = malloc(size);
+ if (!area)
+ return NULL;
+
+ HA_RWLOCK_WRLOCK(RING_LOCK, &ring->lock);
+
+ /* recheck the buffer's size, it may have changed during the malloc */
+ if (b_size(&ring->buf) < size) {
+ /* copy old contents */
+ b_getblk(&ring->buf, area, ring->buf.data, 0);
+ area = HA_ATOMIC_XCHG(&ring->buf.area, area);
+ ring->buf.size = size;
+ }
+
+ HA_RWLOCK_WRUNLOCK(RING_LOCK, &ring->lock);
+
+ free(area);
+ return ring;
+}
+
+/* destroys and frees ring <ring> */
+void ring_free(struct ring *ring)
+{
+ if (!ring)
+ return;
+
+ /* make sure it was not allocated by ring_make_from_area */
+ if (ring->buf.area == (void *)ring + sizeof(*ring))
+ return;
+
+ free(ring->buf.area);
+ free(ring);
+}
+
+/* Tries to send <npfx> parts from <prefix> followed by <nmsg> parts from <msg>
+ * to ring <ring>. The message is sent atomically. It may be truncated to
+ * <maxlen> bytes if <maxlen> is non-null. There is no distinction between the
+ * two lists, it's just a convenience to help the caller prepend some prefixes
+ * when necessary. It takes the ring's write lock to make sure no other thread
+ * will touch the buffer during the update. Returns the number of bytes sent,
+ * or <=0 on failure.
+ */
+ssize_t ring_write(struct ring *ring, size_t maxlen, const struct ist pfx[], size_t npfx, const struct ist msg[], size_t nmsg)
+{
+ struct buffer *buf = &ring->buf;
+ struct appctx *appctx;
+ size_t totlen = 0;
+ size_t lenlen;
+ uint64_t dellen;
+ int dellenlen;
+ ssize_t sent = 0;
+ int i;
+
+ /* we have to find some room to add our message (the buffer is
+ * never empty and at least contains the previous counter) and
+ * to update both the buffer contents and heads at the same
+ * time (it's doable using atomic ops but not worth the
+ * trouble, let's just lock). For this we first need to know
+ * the total message's length. We cannot measure it while
+ * copying due to the varint encoding of the length.
+ */
+ for (i = 0; i < npfx; i++)
+ totlen += pfx[i].len;
+ for (i = 0; i < nmsg; i++)
+ totlen += msg[i].len;
+
+ if (totlen > maxlen)
+ totlen = maxlen;
+
+ lenlen = varint_bytes(totlen);
+
+ HA_RWLOCK_WRLOCK(RING_LOCK, &ring->lock);
+ if (lenlen + totlen + 1 + 1 > b_size(buf))
+ goto done_buf;
+
+ while (b_room(buf) < lenlen + totlen + 1) {
+ /* we need to delete the oldest message (from the end),
+ * and we have to stop if there's a reader stuck there.
+ * Unless there's corruption in the buffer it's guaranteed
+ * that we have enough data to find 1 counter byte, a
+ * varint-encoded length (1 byte min) and the message
+ * payload (0 bytes min).
+ */
+ if (*b_head(buf))
+ goto done_buf;
+ dellenlen = b_peek_varint(buf, 1, &dellen);
+ if (!dellenlen)
+ goto done_buf;
+ BUG_ON(b_data(buf) < 1 + dellenlen + dellen);
+
+ b_del(buf, 1 + dellenlen + dellen);
+ }
+
+ /* OK now we do have room */
+ __b_put_varint(buf, totlen);
+
+ totlen = 0;
+ for (i = 0; i < npfx; i++) {
+ size_t len = pfx[i].len;
+
+ if (len + totlen > maxlen)
+ len = maxlen - totlen;
+ if (len)
+ __b_putblk(buf, pfx[i].ptr, len);
+ totlen += len;
+ }
+
+ for (i = 0; i < nmsg; i++) {
+ size_t len = msg[i].len;
+
+ if (len + totlen > maxlen)
+ len = maxlen - totlen;
+ if (len)
+ __b_putblk(buf, msg[i].ptr, len);
+ totlen += len;
+ }
+
+ *b_tail(buf) = 0; buf->data++; // new read counter
+ sent = lenlen + totlen + 1;
+
+ /* notify potential readers */
+ list_for_each_entry(appctx, &ring->waiters, wait_entry)
+ appctx_wakeup(appctx);
+
+ done_buf:
+ HA_RWLOCK_WRUNLOCK(RING_LOCK, &ring->lock);
+ return sent;
+}
+
+/* Tries to attach appctx <appctx> as a new reader on ring <ring>. This is
+ * meant to be used by low level appctx code such as CLI or ring forwarding.
+ * For higher level functions, please see the relevant parts in appctx or CLI.
+ * It returns non-zero on success or zero on failure if too many users are
+ * already attached. On success, the caller MUST call ring_detach_appctx()
+ * to detach itself, even if it was never woken up.
+ */
+int ring_attach(struct ring *ring)
+{
+ int users = ring->readers_count;
+
+ do {
+ if (users >= 255)
+ return 0;
+ } while (!_HA_ATOMIC_CAS(&ring->readers_count, &users, users + 1));
+ return 1;
+}
+
+/* detach an appctx from a ring. The appctx is expected to be waiting at offset
+ * <ofs> relative to the beginning of the storage, or ~0 if not waiting yet.
+ * Nothing is done if <ring> is NULL.
+ */
+void ring_detach_appctx(struct ring *ring, struct appctx *appctx, size_t ofs)
+{
+ if (!ring)
+ return;
+
+ HA_RWLOCK_WRLOCK(RING_LOCK, &ring->lock);
+ if (ofs != ~0) {
+ /* reader was still attached */
+ if (ofs < b_head_ofs(&ring->buf))
+ ofs += b_size(&ring->buf) - b_head_ofs(&ring->buf);
+ else
+ ofs -= b_head_ofs(&ring->buf);
+
+ BUG_ON(ofs >= b_size(&ring->buf));
+ LIST_DEL_INIT(&appctx->wait_entry);
+ HA_ATOMIC_DEC(b_peek(&ring->buf, ofs));
+ }
+ HA_ATOMIC_DEC(&ring->readers_count);
+ HA_RWLOCK_WRUNLOCK(RING_LOCK, &ring->lock);
+}
+
+/* Tries to attach CLI handler <appctx> as a new reader on ring <ring>. This is
+ * meant to be used when registering a CLI function to dump a buffer, so it
+ * returns zero on success, or non-zero on failure with a message in the appctx
+ * CLI context. It automatically sets the io_handler and io_release callbacks if
+ * they were not set. The <flags> take a combination of RING_WF_*.
+ */
+int ring_attach_cli(struct ring *ring, struct appctx *appctx, uint flags)
+{
+ struct show_ring_ctx *ctx = applet_reserve_svcctx(appctx, sizeof(*ctx));
+
+ if (!ring_attach(ring))
+ return cli_err(appctx,
+ "Sorry, too many watchers (255) on this ring buffer. "
+ "What could it have so interesting to attract so many watchers ?");
+
+ if (!appctx->io_handler)
+ appctx->io_handler = cli_io_handler_show_ring;
+ if (!appctx->io_release)
+ appctx->io_release = cli_io_release_show_ring;
+
+ memset(ctx, 0, sizeof(*ctx));
+ ctx->ring = ring;
+ ctx->ofs = ~0; // start from the oldest event
+ ctx->flags = flags;
+ return 0;
+}
+
+/* This function dumps all events from the ring whose pointer is in <p0> into
+ * the appctx's output buffer, and takes from <o0> the seek offset into the
+ * buffer's history (0 for oldest known event). It looks at <i0> for boolean
+ * options: bit0 means it must wait for new data or any key to be pressed. Bit1
+ * means it must seek directly to the end to wait for new contents. It returns
+ * 0 if the output buffer or events are missing is full and it needs to be
+ * called again, otherwise non-zero. It is meant to be used with
+ * cli_release_show_ring() to clean up.
+ */
+int cli_io_handler_show_ring(struct appctx *appctx)
+{
+ struct show_ring_ctx *ctx = appctx->svcctx;
+ struct stconn *sc = appctx_sc(appctx);
+ struct ring *ring = ctx->ring;
+ struct buffer *buf = &ring->buf;
+ size_t ofs;
+ size_t last_ofs;
+ uint64_t msg_len;
+ size_t len, cnt;
+ int ret;
+
+ /* FIXME: Don't watch the other side !*/
+ if (unlikely(sc_opposite(sc)->flags & SC_FL_SHUT_DONE))
+ return 1;
+
+ HA_RWLOCK_WRLOCK(RING_LOCK, &ring->lock);
+ LIST_DEL_INIT(&appctx->wait_entry);
+ HA_RWLOCK_WRUNLOCK(RING_LOCK, &ring->lock);
+
+ HA_RWLOCK_RDLOCK(RING_LOCK, &ring->lock);
+
+ /* explanation for the initialization below: it would be better to do
+ * this in the parsing function but this would occasionally result in
+ * dropped events because we'd take a reference on the oldest message
+ * and keep it while being scheduled. Thus instead let's take it the
+ * first time we enter here so that we have a chance to pass many
+ * existing messages before grabbing a reference to a location. This
+ * value cannot be produced after initialization.
+ */
+ if (unlikely(ctx->ofs == ~0)) {
+ /* going to the end means looking at tail-1 */
+ ctx->ofs = b_peek_ofs(buf, (ctx->flags & RING_WF_SEEK_NEW) ? b_data(buf) - 1 : 0);
+ HA_ATOMIC_INC(b_orig(buf) + ctx->ofs);
+ }
+
+ /* we were already there, adjust the offset to be relative to
+ * the buffer's head and remove us from the counter.
+ */
+ ofs = ctx->ofs - b_head_ofs(buf);
+ if (ctx->ofs < b_head_ofs(buf))
+ ofs += b_size(buf);
+
+ BUG_ON(ofs >= buf->size);
+ HA_ATOMIC_DEC(b_peek(buf, ofs));
+
+ /* in this loop, ofs always points to the counter byte that precedes
+ * the message so that we can take our reference there if we have to
+ * stop before the end (ret=0).
+ */
+ ret = 1;
+ while (ofs + 1 < b_data(buf)) {
+ cnt = 1;
+ len = b_peek_varint(buf, ofs + cnt, &msg_len);
+ if (!len)
+ break;
+ cnt += len;
+ BUG_ON(msg_len + ofs + cnt + 1 > b_data(buf));
+
+ if (unlikely(msg_len + 1 > b_size(&trash))) {
+ /* too large a message to ever fit, let's skip it */
+ ofs += cnt + msg_len;
+ continue;
+ }
+
+ chunk_reset(&trash);
+ len = b_getblk(buf, trash.area, msg_len, ofs + cnt);
+ trash.data += len;
+ trash.area[trash.data++] = '\n';
+
+ if (applet_putchk(appctx, &trash) == -1) {
+ ret = 0;
+ break;
+ }
+ ofs += cnt + msg_len;
+ }
+
+ HA_ATOMIC_INC(b_peek(buf, ofs));
+ last_ofs = b_tail_ofs(buf);
+ ctx->ofs = b_peek_ofs(buf, ofs);
+ HA_RWLOCK_RDUNLOCK(RING_LOCK, &ring->lock);
+
+ if (ret && (ctx->flags & RING_WF_WAIT_MODE)) {
+ /* we've drained everything and are configured to wait for more
+ * data or an event (keypress, close)
+ */
+ if (!sc_oc(sc)->output && !(sc->flags & SC_FL_SHUT_DONE)) {
+ /* let's be woken up once new data arrive */
+ HA_RWLOCK_WRLOCK(RING_LOCK, &ring->lock);
+ LIST_APPEND(&ring->waiters, &appctx->wait_entry);
+ ofs = b_tail_ofs(&ring->buf);
+ HA_RWLOCK_WRUNLOCK(RING_LOCK, &ring->lock);
+ if (ofs != last_ofs) {
+ /* more data was added into the ring between the
+ * unlock and the lock, and the writer might not
+ * have seen us. We need to reschedule a read.
+ */
+ applet_have_more_data(appctx);
+ } else
+ applet_have_no_more_data(appctx);
+ ret = 0;
+ }
+ /* always drain all the request */
+ co_skip(sc_oc(sc), sc_oc(sc)->output);
+ }
+
+ applet_expect_no_data(appctx);
+ return ret;
+}
+
+/* must be called after cli_io_handler_show_ring() above */
+void cli_io_release_show_ring(struct appctx *appctx)
+{
+ struct show_ring_ctx *ctx = appctx->svcctx;
+ struct ring *ring = ctx->ring;
+ size_t ofs = ctx->ofs;
+
+ ring_detach_appctx(ring, appctx, ofs);
+}
+
+/* Returns the MAXIMUM payload len that could theoretically fit into the ring
+ * based on ring buffer size.
+ *
+ * Computation logic relies on implementation details from 'ring-t.h'.
+ */
+size_t ring_max_payload(const struct ring *ring)
+{
+ size_t max;
+
+ /* initial max = bufsize - 1 (initial RC) - 1 (payload RC) */
+ max = b_size(&ring->buf) - 1 - 1;
+
+ /* subtract payload VI (varint-encoded size) */
+ max -= varint_bytes(max);
+ return max;
+}
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/src/sample.c b/src/sample.c
new file mode 100644
index 0000000..89de612
--- /dev/null
+++ b/src/sample.c
@@ -0,0 +1,5173 @@
+/*
+ * Sample management functions.
+ *
+ * Copyright 2009-2010 EXCELIANCE, Emeric Brun <ebrun@exceliance.fr>
+ * Copyright (C) 2012 Willy Tarreau <w@1wt.eu>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <ctype.h>
+#include <string.h>
+#include <arpa/inet.h>
+#include <stdio.h>
+
+#include <import/mjson.h>
+#include <import/sha1.h>
+
+#include <haproxy/api.h>
+#include <haproxy/arg.h>
+#include <haproxy/auth.h>
+#include <haproxy/base64.h>
+#include <haproxy/buf.h>
+#include <haproxy/chunk.h>
+#include <haproxy/clock.h>
+#include <haproxy/errors.h>
+#include <haproxy/fix.h>
+#include <haproxy/global.h>
+#include <haproxy/hash.h>
+#include <haproxy/http.h>
+#include <haproxy/istbuf.h>
+#include <haproxy/mqtt.h>
+#include <haproxy/net_helper.h>
+#include <haproxy/protobuf.h>
+#include <haproxy/proxy.h>
+#include <haproxy/regex.h>
+#include <haproxy/sample.h>
+#include <haproxy/sink.h>
+#include <haproxy/stick_table.h>
+#include <haproxy/time.h>
+#include <haproxy/tools.h>
+#include <haproxy/uri_auth-t.h>
+#include <haproxy/vars.h>
+#include <haproxy/xxhash.h>
+#include <haproxy/jwt.h>
+
+/* sample type names */
+const char *smp_to_type[SMP_TYPES] = {
+ [SMP_T_ANY] = "any",
+ [SMP_T_SAME] = "same",
+ [SMP_T_BOOL] = "bool",
+ [SMP_T_SINT] = "sint",
+ [SMP_T_ADDR] = "addr",
+ [SMP_T_IPV4] = "ipv4",
+ [SMP_T_IPV6] = "ipv6",
+ [SMP_T_STR] = "str",
+ [SMP_T_BIN] = "bin",
+ [SMP_T_METH] = "meth",
+};
+
+/* static sample used in sample_process() when <p> is NULL */
+static THREAD_LOCAL struct sample temp_smp;
+
+/* list head of all known sample fetch keywords */
+static struct sample_fetch_kw_list sample_fetches = {
+ .list = LIST_HEAD_INIT(sample_fetches.list)
+};
+
+/* list head of all known sample format conversion keywords */
+static struct sample_conv_kw_list sample_convs = {
+ .list = LIST_HEAD_INIT(sample_convs.list)
+};
+
+const unsigned int fetch_cap[SMP_SRC_ENTRIES] = {
+ [SMP_SRC_CONST] = (SMP_VAL_FE_CON_ACC | SMP_VAL_FE_SES_ACC | SMP_VAL_FE_REQ_CNT |
+ SMP_VAL_FE_HRQ_HDR | SMP_VAL_FE_HRQ_BDY | SMP_VAL_FE_SET_BCK |
+ SMP_VAL_BE_REQ_CNT | SMP_VAL_BE_HRQ_HDR | SMP_VAL_BE_HRQ_BDY |
+ SMP_VAL_BE_SET_SRV | SMP_VAL_BE_SRV_CON | SMP_VAL_BE_RES_CNT |
+ SMP_VAL_BE_HRS_HDR | SMP_VAL_BE_HRS_BDY | SMP_VAL_BE_STO_RUL |
+ SMP_VAL_FE_RES_CNT | SMP_VAL_FE_HRS_HDR | SMP_VAL_FE_HRS_BDY |
+ SMP_VAL_FE_LOG_END | SMP_VAL_BE_CHK_RUL | SMP_VAL_CFG_PARSER |
+ SMP_VAL_CLI_PARSER ),
+
+ [SMP_SRC_INTRN] = (SMP_VAL_FE_CON_ACC | SMP_VAL_FE_SES_ACC | SMP_VAL_FE_REQ_CNT |
+ SMP_VAL_FE_HRQ_HDR | SMP_VAL_FE_HRQ_BDY | SMP_VAL_FE_SET_BCK |
+ SMP_VAL_BE_REQ_CNT | SMP_VAL_BE_HRQ_HDR | SMP_VAL_BE_HRQ_BDY |
+ SMP_VAL_BE_SET_SRV | SMP_VAL_BE_SRV_CON | SMP_VAL_BE_RES_CNT |
+ SMP_VAL_BE_HRS_HDR | SMP_VAL_BE_HRS_BDY | SMP_VAL_BE_STO_RUL |
+ SMP_VAL_FE_RES_CNT | SMP_VAL_FE_HRS_HDR | SMP_VAL_FE_HRS_BDY |
+ SMP_VAL_FE_LOG_END | SMP_VAL_BE_CHK_RUL | SMP_VAL___________ |
+ SMP_VAL_CLI_PARSER ),
+
+ [SMP_SRC_LISTN] = (SMP_VAL_FE_CON_ACC | SMP_VAL_FE_SES_ACC | SMP_VAL_FE_REQ_CNT |
+ SMP_VAL_FE_HRQ_HDR | SMP_VAL_FE_HRQ_BDY | SMP_VAL_FE_SET_BCK |
+ SMP_VAL_BE_REQ_CNT | SMP_VAL_BE_HRQ_HDR | SMP_VAL_BE_HRQ_BDY |
+ SMP_VAL_BE_SET_SRV | SMP_VAL_BE_SRV_CON | SMP_VAL_BE_RES_CNT |
+ SMP_VAL_BE_HRS_HDR | SMP_VAL_BE_HRS_BDY | SMP_VAL_BE_STO_RUL |
+ SMP_VAL_FE_RES_CNT | SMP_VAL_FE_HRS_HDR | SMP_VAL_FE_HRS_BDY |
+ SMP_VAL_FE_LOG_END | SMP_VAL___________ | SMP_VAL___________ |
+ SMP_VAL___________ ),
+
+ [SMP_SRC_FTEND] = (SMP_VAL_FE_CON_ACC | SMP_VAL_FE_SES_ACC | SMP_VAL_FE_REQ_CNT |
+ SMP_VAL_FE_HRQ_HDR | SMP_VAL_FE_HRQ_BDY | SMP_VAL_FE_SET_BCK |
+ SMP_VAL_BE_REQ_CNT | SMP_VAL_BE_HRQ_HDR | SMP_VAL_BE_HRQ_BDY |
+ SMP_VAL_BE_SET_SRV | SMP_VAL_BE_SRV_CON | SMP_VAL_BE_RES_CNT |
+ SMP_VAL_BE_HRS_HDR | SMP_VAL_BE_HRS_BDY | SMP_VAL_BE_STO_RUL |
+ SMP_VAL_FE_RES_CNT | SMP_VAL_FE_HRS_HDR | SMP_VAL_FE_HRS_BDY |
+ SMP_VAL_FE_LOG_END | SMP_VAL___________ | SMP_VAL___________ |
+ SMP_VAL___________ ),
+
+ [SMP_SRC_L4CLI] = (SMP_VAL_FE_CON_ACC | SMP_VAL_FE_SES_ACC | SMP_VAL_FE_REQ_CNT |
+ SMP_VAL_FE_HRQ_HDR | SMP_VAL_FE_HRQ_BDY | SMP_VAL_FE_SET_BCK |
+ SMP_VAL_BE_REQ_CNT | SMP_VAL_BE_HRQ_HDR | SMP_VAL_BE_HRQ_BDY |
+ SMP_VAL_BE_SET_SRV | SMP_VAL_BE_SRV_CON | SMP_VAL_BE_RES_CNT |
+ SMP_VAL_BE_HRS_HDR | SMP_VAL_BE_HRS_BDY | SMP_VAL_BE_STO_RUL |
+ SMP_VAL_FE_RES_CNT | SMP_VAL_FE_HRS_HDR | SMP_VAL_FE_HRS_BDY |
+ SMP_VAL_FE_LOG_END | SMP_VAL_BE_CHK_RUL | SMP_VAL___________ |
+ SMP_VAL___________ ),
+
+ [SMP_SRC_L5CLI] = (SMP_VAL___________ | SMP_VAL_FE_SES_ACC | SMP_VAL_FE_REQ_CNT |
+ SMP_VAL_FE_HRQ_HDR | SMP_VAL_FE_HRQ_BDY | SMP_VAL_FE_SET_BCK |
+ SMP_VAL_BE_REQ_CNT | SMP_VAL_BE_HRQ_HDR | SMP_VAL_BE_HRQ_BDY |
+ SMP_VAL_BE_SET_SRV | SMP_VAL_BE_SRV_CON | SMP_VAL_BE_RES_CNT |
+ SMP_VAL_BE_HRS_HDR | SMP_VAL_BE_HRS_BDY | SMP_VAL_BE_STO_RUL |
+ SMP_VAL_FE_RES_CNT | SMP_VAL_FE_HRS_HDR | SMP_VAL_FE_HRS_BDY |
+ SMP_VAL_FE_LOG_END | SMP_VAL___________ | SMP_VAL___________ |
+ SMP_VAL___________ ),
+
+ [SMP_SRC_TRACK] = (SMP_VAL_FE_CON_ACC | SMP_VAL_FE_SES_ACC | SMP_VAL_FE_REQ_CNT |
+ SMP_VAL_FE_HRQ_HDR | SMP_VAL_FE_HRQ_BDY | SMP_VAL_FE_SET_BCK |
+ SMP_VAL_BE_REQ_CNT | SMP_VAL_BE_HRQ_HDR | SMP_VAL_BE_HRQ_BDY |
+ SMP_VAL_BE_SET_SRV | SMP_VAL_BE_SRV_CON | SMP_VAL_BE_RES_CNT |
+ SMP_VAL_BE_HRS_HDR | SMP_VAL_BE_HRS_BDY | SMP_VAL_BE_STO_RUL |
+ SMP_VAL_FE_RES_CNT | SMP_VAL_FE_HRS_HDR | SMP_VAL_FE_HRS_BDY |
+ SMP_VAL_FE_LOG_END | SMP_VAL___________ | SMP_VAL___________ |
+ SMP_VAL___________ ),
+
+ [SMP_SRC_L6REQ] = (SMP_VAL___________ | SMP_VAL___________ | SMP_VAL_FE_REQ_CNT |
+ SMP_VAL_FE_HRQ_HDR | SMP_VAL_FE_HRQ_BDY | SMP_VAL_FE_SET_BCK |
+ SMP_VAL_BE_REQ_CNT | SMP_VAL_BE_HRQ_HDR | SMP_VAL_BE_HRQ_BDY |
+ SMP_VAL_BE_SET_SRV | SMP_VAL_BE_SRV_CON | SMP_VAL___________ |
+ SMP_VAL___________ | SMP_VAL___________ | SMP_VAL___________ |
+ SMP_VAL___________ | SMP_VAL___________ | SMP_VAL___________ |
+ SMP_VAL___________ | SMP_VAL___________ | SMP_VAL___________ |
+ SMP_VAL___________ ),
+
+ [SMP_SRC_HRQHV] = (SMP_VAL___________ | SMP_VAL___________ | SMP_VAL_FE_REQ_CNT |
+ SMP_VAL_FE_HRQ_HDR | SMP_VAL_FE_HRQ_BDY | SMP_VAL_FE_SET_BCK |
+ SMP_VAL_BE_REQ_CNT | SMP_VAL_BE_HRQ_HDR | SMP_VAL_BE_HRQ_BDY |
+ SMP_VAL_BE_SET_SRV | SMP_VAL_BE_SRV_CON | SMP_VAL___________ |
+ SMP_VAL___________ | SMP_VAL___________ | SMP_VAL___________ |
+ SMP_VAL___________ | SMP_VAL___________ | SMP_VAL___________ |
+ SMP_VAL___________ | SMP_VAL___________ | SMP_VAL___________ |
+ SMP_VAL___________ ),
+
+ [SMP_SRC_HRQHP] = (SMP_VAL___________ | SMP_VAL___________ | SMP_VAL_FE_REQ_CNT |
+ SMP_VAL_FE_HRQ_HDR | SMP_VAL_FE_HRQ_BDY | SMP_VAL_FE_SET_BCK |
+ SMP_VAL_BE_REQ_CNT | SMP_VAL_BE_HRQ_HDR | SMP_VAL_BE_HRQ_BDY |
+ SMP_VAL_BE_SET_SRV | SMP_VAL_BE_SRV_CON | SMP_VAL_BE_RES_CNT |
+ SMP_VAL_BE_HRS_HDR | SMP_VAL_BE_HRS_BDY | SMP_VAL_BE_STO_RUL |
+ SMP_VAL_FE_RES_CNT | SMP_VAL_FE_HRS_HDR | SMP_VAL_FE_HRS_BDY |
+ SMP_VAL_FE_LOG_END | SMP_VAL___________ | SMP_VAL___________ |
+ SMP_VAL___________ ),
+
+ [SMP_SRC_HRQBO] = (SMP_VAL___________ | SMP_VAL___________ | SMP_VAL___________ |
+ SMP_VAL___________ | SMP_VAL_FE_HRQ_BDY | SMP_VAL_FE_SET_BCK |
+ SMP_VAL_BE_REQ_CNT | SMP_VAL_BE_HRQ_HDR | SMP_VAL_BE_HRQ_BDY |
+ SMP_VAL_BE_SET_SRV | SMP_VAL_BE_SRV_CON | SMP_VAL___________ |
+ SMP_VAL___________ | SMP_VAL___________ | SMP_VAL___________ |
+ SMP_VAL___________ | SMP_VAL___________ | SMP_VAL___________ |
+ SMP_VAL___________ | SMP_VAL___________ | SMP_VAL___________ |
+ SMP_VAL___________ ),
+
+ [SMP_SRC_BKEND] = (SMP_VAL___________ | SMP_VAL___________ | SMP_VAL___________ |
+ SMP_VAL___________ | SMP_VAL___________ | SMP_VAL___________ |
+ SMP_VAL_BE_REQ_CNT | SMP_VAL_BE_HRQ_HDR | SMP_VAL_BE_HRQ_BDY |
+ SMP_VAL_BE_SET_SRV | SMP_VAL_BE_SRV_CON | SMP_VAL_BE_RES_CNT |
+ SMP_VAL_BE_HRS_HDR | SMP_VAL_BE_HRS_BDY | SMP_VAL_BE_STO_RUL |
+ SMP_VAL_FE_RES_CNT | SMP_VAL_FE_HRS_HDR | SMP_VAL_FE_HRS_BDY |
+ SMP_VAL_FE_LOG_END | SMP_VAL_BE_CHK_RUL | SMP_VAL___________ |
+ SMP_VAL___________ ),
+
+ [SMP_SRC_SERVR] = (SMP_VAL___________ | SMP_VAL___________ | SMP_VAL___________ |
+ SMP_VAL___________ | SMP_VAL___________ | SMP_VAL___________ |
+ SMP_VAL___________ | SMP_VAL___________ | SMP_VAL___________ |
+ SMP_VAL___________ | SMP_VAL_BE_SRV_CON | SMP_VAL_BE_RES_CNT |
+ SMP_VAL_BE_HRS_HDR | SMP_VAL_BE_HRS_BDY | SMP_VAL_BE_STO_RUL |
+ SMP_VAL_FE_RES_CNT | SMP_VAL_FE_HRS_HDR | SMP_VAL_FE_HRS_BDY |
+ SMP_VAL_FE_LOG_END | SMP_VAL_BE_CHK_RUL | SMP_VAL___________ |
+ SMP_VAL___________ ),
+
+ [SMP_SRC_L4SRV] = (SMP_VAL___________ | SMP_VAL___________ | SMP_VAL___________ |
+ SMP_VAL___________ | SMP_VAL___________ | SMP_VAL___________ |
+ SMP_VAL___________ | SMP_VAL___________ | SMP_VAL___________ |
+ SMP_VAL___________ | SMP_VAL___________ | SMP_VAL_BE_RES_CNT |
+ SMP_VAL_BE_HRS_HDR | SMP_VAL_BE_HRS_BDY | SMP_VAL_BE_STO_RUL |
+ SMP_VAL_FE_RES_CNT | SMP_VAL_FE_HRS_HDR | SMP_VAL_FE_HRS_BDY |
+ SMP_VAL_FE_LOG_END | SMP_VAL_BE_CHK_RUL | SMP_VAL___________ |
+ SMP_VAL___________ ),
+
+ [SMP_SRC_L5SRV] = (SMP_VAL___________ | SMP_VAL___________ | SMP_VAL___________ |
+ SMP_VAL___________ | SMP_VAL___________ | SMP_VAL___________ |
+ SMP_VAL___________ | SMP_VAL___________ | SMP_VAL___________ |
+ SMP_VAL___________ | SMP_VAL___________ | SMP_VAL_BE_RES_CNT |
+ SMP_VAL_BE_HRS_HDR | SMP_VAL_BE_HRS_BDY | SMP_VAL_BE_STO_RUL |
+ SMP_VAL_FE_RES_CNT | SMP_VAL_FE_HRS_HDR | SMP_VAL_FE_HRS_BDY |
+ SMP_VAL_FE_LOG_END | SMP_VAL_BE_CHK_RUL | SMP_VAL___________ |
+ SMP_VAL___________ ),
+
+ [SMP_SRC_L6RES] = (SMP_VAL___________ | SMP_VAL___________ | SMP_VAL___________ |
+ SMP_VAL___________ | SMP_VAL___________ | SMP_VAL___________ |
+ SMP_VAL___________ | SMP_VAL___________ | SMP_VAL___________ |
+ SMP_VAL___________ | SMP_VAL___________ | SMP_VAL_BE_RES_CNT |
+ SMP_VAL_BE_HRS_HDR | SMP_VAL_BE_HRS_BDY | SMP_VAL_BE_STO_RUL |
+ SMP_VAL_FE_RES_CNT | SMP_VAL_FE_HRS_HDR | SMP_VAL_FE_HRS_BDY |
+ SMP_VAL___________ | SMP_VAL_BE_CHK_RUL | SMP_VAL___________ |
+ SMP_VAL___________ ),
+
+ [SMP_SRC_HRSHV] = (SMP_VAL___________ | SMP_VAL___________ | SMP_VAL___________ |
+ SMP_VAL___________ | SMP_VAL___________ | SMP_VAL___________ |
+ SMP_VAL___________ | SMP_VAL___________ | SMP_VAL___________ |
+ SMP_VAL___________ | SMP_VAL___________ | SMP_VAL_BE_RES_CNT |
+ SMP_VAL_BE_HRS_HDR | SMP_VAL_BE_HRS_BDY | SMP_VAL_BE_STO_RUL |
+ SMP_VAL_FE_RES_CNT | SMP_VAL_FE_HRS_HDR | SMP_VAL_FE_HRS_BDY |
+ SMP_VAL___________ | SMP_VAL_BE_CHK_RUL | SMP_VAL___________ |
+ SMP_VAL___________ ),
+
+ [SMP_SRC_HRSHP] = (SMP_VAL___________ | SMP_VAL___________ | SMP_VAL___________ |
+ SMP_VAL___________ | SMP_VAL___________ | SMP_VAL___________ |
+ SMP_VAL___________ | SMP_VAL___________ | SMP_VAL___________ |
+ SMP_VAL___________ | SMP_VAL___________ | SMP_VAL_BE_RES_CNT |
+ SMP_VAL_BE_HRS_HDR | SMP_VAL_BE_HRS_BDY | SMP_VAL_BE_STO_RUL |
+ SMP_VAL_FE_RES_CNT | SMP_VAL_FE_HRS_HDR | SMP_VAL_FE_HRS_BDY |
+ SMP_VAL_FE_LOG_END | SMP_VAL_BE_CHK_RUL | SMP_VAL___________ |
+ SMP_VAL___________ ),
+
+ [SMP_SRC_HRSBO] = (SMP_VAL___________ | SMP_VAL___________ | SMP_VAL___________ |
+ SMP_VAL___________ | SMP_VAL___________ | SMP_VAL___________ |
+ SMP_VAL___________ | SMP_VAL___________ | SMP_VAL___________ |
+ SMP_VAL___________ | SMP_VAL___________ | SMP_VAL___________ |
+ SMP_VAL___________ | SMP_VAL_BE_HRS_BDY | SMP_VAL_BE_STO_RUL |
+ SMP_VAL_FE_RES_CNT | SMP_VAL_FE_HRS_HDR | SMP_VAL_FE_HRS_BDY |
+ SMP_VAL___________ | SMP_VAL_BE_CHK_RUL | SMP_VAL___________ |
+ SMP_VAL___________ ),
+
+ [SMP_SRC_RQFIN] = (SMP_VAL___________ | SMP_VAL___________ | SMP_VAL___________ |
+ SMP_VAL___________ | SMP_VAL___________ | SMP_VAL___________ |
+ SMP_VAL___________ | SMP_VAL___________ | SMP_VAL___________ |
+ SMP_VAL___________ | SMP_VAL___________ | SMP_VAL___________ |
+ SMP_VAL___________ | SMP_VAL___________ | SMP_VAL___________ |
+ SMP_VAL___________ | SMP_VAL___________ | SMP_VAL___________ |
+ SMP_VAL_FE_LOG_END | SMP_VAL___________ | SMP_VAL___________ |
+ SMP_VAL___________ ),
+
+ [SMP_SRC_RSFIN] = (SMP_VAL___________ | SMP_VAL___________ | SMP_VAL___________ |
+ SMP_VAL___________ | SMP_VAL___________ | SMP_VAL___________ |
+ SMP_VAL___________ | SMP_VAL___________ | SMP_VAL___________ |
+ SMP_VAL___________ | SMP_VAL___________ | SMP_VAL___________ |
+ SMP_VAL___________ | SMP_VAL___________ | SMP_VAL___________ |
+ SMP_VAL___________ | SMP_VAL___________ | SMP_VAL___________ |
+ SMP_VAL_FE_LOG_END | SMP_VAL___________ | SMP_VAL___________ |
+ SMP_VAL___________ ),
+
+ [SMP_SRC_TXFIN] = (SMP_VAL___________ | SMP_VAL___________ | SMP_VAL___________ |
+ SMP_VAL___________ | SMP_VAL___________ | SMP_VAL___________ |
+ SMP_VAL___________ | SMP_VAL___________ | SMP_VAL___________ |
+ SMP_VAL___________ | SMP_VAL___________ | SMP_VAL___________ |
+ SMP_VAL___________ | SMP_VAL___________ | SMP_VAL___________ |
+ SMP_VAL___________ | SMP_VAL___________ | SMP_VAL___________ |
+ SMP_VAL_FE_LOG_END | SMP_VAL___________ | SMP_VAL___________ |
+ SMP_VAL___________ ),
+
+ [SMP_SRC_SSFIN] = (SMP_VAL___________ | SMP_VAL___________ | SMP_VAL___________ |
+ SMP_VAL___________ | SMP_VAL___________ | SMP_VAL___________ |
+ SMP_VAL___________ | SMP_VAL___________ | SMP_VAL___________ |
+ SMP_VAL___________ | SMP_VAL___________ | SMP_VAL___________ |
+ SMP_VAL___________ | SMP_VAL___________ | SMP_VAL___________ |
+ SMP_VAL___________ | SMP_VAL___________ | SMP_VAL___________ |
+ SMP_VAL_FE_LOG_END | SMP_VAL___________ | SMP_VAL___________ |
+ SMP_VAL___________ ),
+};
+
+static const char *fetch_src_names[SMP_SRC_ENTRIES] = {
+ [SMP_SRC_INTRN] = "internal state",
+ [SMP_SRC_LISTN] = "listener",
+ [SMP_SRC_FTEND] = "frontend",
+ [SMP_SRC_L4CLI] = "client address",
+ [SMP_SRC_L5CLI] = "client-side connection",
+ [SMP_SRC_TRACK] = "track counters",
+ [SMP_SRC_L6REQ] = "request buffer",
+ [SMP_SRC_HRQHV] = "HTTP request headers",
+ [SMP_SRC_HRQHP] = "HTTP request",
+ [SMP_SRC_HRQBO] = "HTTP request body",
+ [SMP_SRC_BKEND] = "backend",
+ [SMP_SRC_SERVR] = "server",
+ [SMP_SRC_L4SRV] = "server address",
+ [SMP_SRC_L5SRV] = "server-side connection",
+ [SMP_SRC_L6RES] = "response buffer",
+ [SMP_SRC_HRSHV] = "HTTP response headers",
+ [SMP_SRC_HRSHP] = "HTTP response",
+ [SMP_SRC_HRSBO] = "HTTP response body",
+ [SMP_SRC_RQFIN] = "request buffer statistics",
+ [SMP_SRC_RSFIN] = "response buffer statistics",
+ [SMP_SRC_TXFIN] = "transaction statistics",
+ [SMP_SRC_SSFIN] = "session statistics",
+};
+
+static const char *fetch_ckp_names[SMP_CKP_ENTRIES] = {
+ [SMP_CKP_FE_CON_ACC] = "frontend tcp-request connection rule",
+ [SMP_CKP_FE_SES_ACC] = "frontend tcp-request session rule",
+ [SMP_CKP_FE_REQ_CNT] = "frontend tcp-request content rule",
+ [SMP_CKP_FE_HRQ_HDR] = "frontend http-request header rule",
+ [SMP_CKP_FE_HRQ_BDY] = "frontend http-request body rule",
+ [SMP_CKP_FE_SET_BCK] = "frontend use-backend rule",
+ [SMP_CKP_BE_REQ_CNT] = "backend tcp-request content rule",
+ [SMP_CKP_BE_HRQ_HDR] = "backend http-request header rule",
+ [SMP_CKP_BE_HRQ_BDY] = "backend http-request body rule",
+ [SMP_CKP_BE_SET_SRV] = "backend use-server, balance or stick-match rule",
+ [SMP_CKP_BE_SRV_CON] = "server source selection",
+ [SMP_CKP_BE_RES_CNT] = "backend tcp-response content rule",
+ [SMP_CKP_BE_HRS_HDR] = "backend http-response header rule",
+ [SMP_CKP_BE_HRS_BDY] = "backend http-response body rule",
+ [SMP_CKP_BE_STO_RUL] = "backend stick-store rule",
+ [SMP_CKP_FE_RES_CNT] = "frontend tcp-response content rule",
+ [SMP_CKP_FE_HRS_HDR] = "frontend http-response header rule",
+ [SMP_CKP_FE_HRS_BDY] = "frontend http-response body rule",
+ [SMP_CKP_FE_LOG_END] = "logs",
+ [SMP_CKP_BE_CHK_RUL] = "backend tcp-check rule",
+ [SMP_CKP_CFG_PARSER] = "configuration parser",
+ [SMP_CKP_CLI_PARSER] = "CLI parser",
+};
+
+/* This function returns the most accurate expected type of the data returned
+ * by the sample_expr. It assumes that the <expr> and all of its converters are
+ * properly initialized.
+ */
+int smp_expr_output_type(struct sample_expr *expr)
+{
+ struct sample_conv_expr *cur_smp = NULL;
+ int cur_type = SMP_T_ANY; /* current type in the chain */
+ int next_type = SMP_T_ANY; /* next type in the chain */
+
+ if (!LIST_ISEMPTY(&expr->conv_exprs)) {
+ /* Ignore converters that output SMP_T_SAME if switching to them is
+ * conversion-free. (such converter's output match with input, thus only
+ * their input is considered)
+ *
+ * We start looking at the end of conv list and then loop back until the
+ * sample fetch for better performance (it is more likely to find the last
+ * effective output type near the end of the chain)
+ */
+ do {
+ struct list *cur_head = (cur_smp) ? &cur_smp->list : &expr->conv_exprs;
+
+ cur_smp = LIST_PREV(cur_head, struct sample_conv_expr *, list);
+ if (cur_smp->conv->out_type != SMP_T_SAME) {
+ /* current converter has effective out_type */
+ cur_type = cur_smp->conv->out_type;
+ goto out;
+ }
+ else if (sample_casts[cur_type][next_type] != c_none)
+ return next_type; /* switching to next type is not conversion-free */
+
+ next_type = cur_smp->conv->in_type;
+ } while (cur_smp != LIST_NEXT(&expr->conv_exprs, struct sample_conv_expr *, list));
+ }
+ /* conv list empty or doesn't have effective out_type,
+ * falling back to sample fetch out_type
+ */
+ cur_type = expr->fetch->out_type;
+ out:
+ if (sample_casts[cur_type][next_type] != c_none)
+ return next_type; /* switching to next type is not conversion-free */
+ return cur_type;
+}
+
+
+/* fill the trash with a comma-delimited list of source names for the <use> bit
+ * field which must be composed of a non-null set of SMP_USE_* flags. The return
+ * value is the pointer to the string in the trash buffer.
+ */
+const char *sample_src_names(unsigned int use)
+{
+ int bit;
+
+ trash.data = 0;
+ trash.area[0] = '\0';
+ for (bit = 0; bit < SMP_SRC_ENTRIES; bit++) {
+ if (!(use & ~((1 << bit) - 1)))
+ break; /* no more bits */
+
+ if (!(use & (1 << bit)))
+ continue; /* bit not set */
+
+ trash.data += snprintf(trash.area + trash.data,
+ trash.size - trash.data, "%s%s",
+ (use & ((1 << bit) - 1)) ? "," : "",
+ fetch_src_names[bit]);
+ }
+ return trash.area;
+}
+
+/* return a pointer to the correct sample checkpoint name, or "unknown" when
+ * the flags are invalid. Only the lowest bit is used, higher bits are ignored
+ * if set.
+ */
+const char *sample_ckp_names(unsigned int use)
+{
+ int bit;
+
+ for (bit = 0; bit < SMP_CKP_ENTRIES; bit++)
+ if (use & (1 << bit))
+ return fetch_ckp_names[bit];
+ return "unknown sample check place, please report this bug";
+}
+
+/*
+ * Registers the sample fetch keyword list <kwl> as a list of valid keywords
+ * for next parsing sessions. The fetch keywords capabilities are also computed
+ * from their ->use field.
+ */
+void sample_register_fetches(struct sample_fetch_kw_list *kwl)
+{
+ struct sample_fetch *sf;
+ int bit;
+
+ for (sf = kwl->kw; sf->kw != NULL; sf++) {
+ for (bit = 0; bit < SMP_SRC_ENTRIES; bit++)
+ if (sf->use & (1 << bit))
+ sf->val |= fetch_cap[bit];
+ }
+ LIST_APPEND(&sample_fetches.list, &kwl->list);
+}
+
+/*
+ * Registers the sample format coverstion keyword list <pckl> as a list of valid keywords for next
+ * parsing sessions.
+ */
+void sample_register_convs(struct sample_conv_kw_list *pckl)
+{
+ LIST_APPEND(&sample_convs.list, &pckl->list);
+}
+
+/*
+ * Returns the pointer on sample fetch keyword structure identified by
+ * string of <len> in buffer <kw>.
+ *
+ */
+struct sample_fetch *find_sample_fetch(const char *kw, int len)
+{
+ int index;
+ struct sample_fetch_kw_list *kwl;
+
+ list_for_each_entry(kwl, &sample_fetches.list, list) {
+ for (index = 0; kwl->kw[index].kw != NULL; index++) {
+ if (strncmp(kwl->kw[index].kw, kw, len) == 0 &&
+ kwl->kw[index].kw[len] == '\0')
+ return &kwl->kw[index];
+ }
+ }
+ return NULL;
+}
+
+/* dump list of registered sample fetch keywords on stdout */
+void smp_dump_fetch_kw(void)
+{
+ struct sample_fetch_kw_list *kwl;
+ struct sample_fetch *kwp, *kw;
+ uint64_t mask;
+ int index;
+ int arg;
+ int bit;
+
+ for (bit = 0; bit <= SMP_CKP_ENTRIES + 1; bit++) {
+ putchar('#');
+ for (index = 0; bit + index <= SMP_CKP_ENTRIES; index++)
+ putchar(' ');
+ for (index = 0; index < bit && index < SMP_CKP_ENTRIES; index++)
+ printf((bit <= SMP_CKP_ENTRIES) ? "/ " : " |");
+ for (index = bit; bit < SMP_CKP_ENTRIES && index < SMP_CKP_ENTRIES + 2; index++)
+ if (index == bit)
+ putchar('_');
+ else if (index == bit + 1)
+ putchar('.');
+ else
+ putchar('-');
+ printf(" %s\n", (bit < SMP_CKP_ENTRIES) ? fetch_ckp_names[bit] : "");
+ }
+
+ for (kw = kwp = NULL;; kwp = kw) {
+ list_for_each_entry(kwl, &sample_fetches.list, list) {
+ for (index = 0; kwl->kw[index].kw != NULL; index++) {
+ if (strordered(kwp ? kwp->kw : NULL,
+ kwl->kw[index].kw,
+ kw != kwp ? kw->kw : NULL))
+ kw = &kwl->kw[index];
+ }
+ }
+
+ if (kw == kwp)
+ break;
+
+ printf("[ ");
+ for (bit = 0; bit < SMP_CKP_ENTRIES; bit++)
+ printf("%s", (kw->val & (1 << bit)) ? "Y " : ". ");
+
+ printf("] %s", kw->kw);
+ if (kw->arg_mask) {
+ mask = kw->arg_mask >> ARGM_BITS;
+ printf("(");
+ for (arg = 0;
+ arg < ARGM_NBARGS && ((mask >> (arg * ARGT_BITS)) & ARGT_MASK);
+ arg++) {
+ if (arg == (kw->arg_mask & ARGM_MASK)) {
+ /* now dumping extra args */
+ printf("[");
+ }
+ if (arg)
+ printf(",");
+ printf("%s", arg_type_names[(mask >> (arg * ARGT_BITS)) & ARGT_MASK]);
+ }
+ if (arg > (kw->arg_mask & ARGM_MASK)) {
+ /* extra args were dumped */
+ printf("]");
+ }
+ printf(")");
+ }
+ printf(": %s", smp_to_type[kw->out_type]);
+ printf("\n");
+ }
+}
+
+/* dump list of registered sample converter keywords on stdout */
+void smp_dump_conv_kw(void)
+{
+ struct sample_conv_kw_list *kwl;
+ struct sample_conv *kwp, *kw;
+ uint64_t mask;
+ int index;
+ int arg;
+
+ for (kw = kwp = NULL;; kwp = kw) {
+ list_for_each_entry(kwl, &sample_convs.list, list) {
+ for (index = 0; kwl->kw[index].kw != NULL; index++) {
+ if (strordered(kwp ? kwp->kw : NULL,
+ kwl->kw[index].kw,
+ kw != kwp ? kw->kw : NULL))
+ kw = &kwl->kw[index];
+ }
+ }
+
+ if (kw == kwp)
+ break;
+
+ printf("%s", kw->kw);
+ if (kw->arg_mask) {
+ mask = kw->arg_mask >> ARGM_BITS;
+ printf("(");
+ for (arg = 0;
+ arg < ARGM_NBARGS && ((mask >> (arg * ARGT_BITS)) & ARGT_MASK);
+ arg++) {
+ if (arg == (kw->arg_mask & ARGM_MASK)) {
+ /* now dumping extra args */
+ printf("[");
+ }
+ if (arg)
+ printf(",");
+ printf("%s", arg_type_names[(mask >> (arg * ARGT_BITS)) & ARGT_MASK]);
+ }
+ if (arg > (kw->arg_mask & ARGM_MASK)) {
+ /* extra args were dumped */
+ printf("]");
+ }
+ printf(")");
+ }
+ printf(": %s => %s", smp_to_type[kw->out_type], smp_to_type[kw->in_type]);
+ printf("\n");
+ }
+}
+
+/* This function browses the list of available sample fetches. <current> is
+ * the last used sample fetch. If it is the first call, it must set to NULL.
+ * <idx> is the index of the next sample fetch entry. It is used as private
+ * value. It is useless to initiate it.
+ *
+ * It returns always the new fetch_sample entry, and NULL when the end of
+ * the list is reached.
+ */
+struct sample_fetch *sample_fetch_getnext(struct sample_fetch *current, int *idx)
+{
+ struct sample_fetch_kw_list *kwl;
+ struct sample_fetch *base;
+
+ if (!current) {
+ /* Get first kwl entry. */
+ kwl = LIST_NEXT(&sample_fetches.list, struct sample_fetch_kw_list *, list);
+ (*idx) = 0;
+ } else {
+ /* Get kwl corresponding to the current entry. */
+ base = current + 1 - (*idx);
+ kwl = container_of(base, struct sample_fetch_kw_list, kw);
+ }
+
+ while (1) {
+
+ /* Check if kwl is the last entry. */
+ if (&kwl->list == &sample_fetches.list)
+ return NULL;
+
+ /* idx contain the next keyword. If it is available, return it. */
+ if (kwl->kw[*idx].kw) {
+ (*idx)++;
+ return &kwl->kw[(*idx)-1];
+ }
+
+ /* get next entry in the main list, and return NULL if the end is reached. */
+ kwl = LIST_NEXT(&kwl->list, struct sample_fetch_kw_list *, list);
+
+ /* Set index to 0, ans do one other loop. */
+ (*idx) = 0;
+ }
+}
+
+/* This function browses the list of available converters. <current> is
+ * the last used converter. If it is the first call, it must set to NULL.
+ * <idx> is the index of the next converter entry. It is used as private
+ * value. It is useless to initiate it.
+ *
+ * It returns always the next sample_conv entry, and NULL when the end of
+ * the list is reached.
+ */
+struct sample_conv *sample_conv_getnext(struct sample_conv *current, int *idx)
+{
+ struct sample_conv_kw_list *kwl;
+ struct sample_conv *base;
+
+ if (!current) {
+ /* Get first kwl entry. */
+ kwl = LIST_NEXT(&sample_convs.list, struct sample_conv_kw_list *, list);
+ (*idx) = 0;
+ } else {
+ /* Get kwl corresponding to the current entry. */
+ base = current + 1 - (*idx);
+ kwl = container_of(base, struct sample_conv_kw_list, kw);
+ }
+
+ while (1) {
+ /* Check if kwl is the last entry. */
+ if (&kwl->list == &sample_convs.list)
+ return NULL;
+
+ /* idx contain the next keyword. If it is available, return it. */
+ if (kwl->kw[*idx].kw) {
+ (*idx)++;
+ return &kwl->kw[(*idx)-1];
+ }
+
+ /* get next entry in the main list, and return NULL if the end is reached. */
+ kwl = LIST_NEXT(&kwl->list, struct sample_conv_kw_list *, list);
+
+ /* Set index to 0, ans do one other loop. */
+ (*idx) = 0;
+ }
+}
+
+/*
+ * Returns the pointer on sample format conversion keyword structure identified by
+ * string of <len> in buffer <kw>.
+ *
+ */
+struct sample_conv *find_sample_conv(const char *kw, int len)
+{
+ int index;
+ struct sample_conv_kw_list *kwl;
+
+ list_for_each_entry(kwl, &sample_convs.list, list) {
+ for (index = 0; kwl->kw[index].kw != NULL; index++) {
+ if (strncmp(kwl->kw[index].kw, kw, len) == 0 &&
+ kwl->kw[index].kw[len] == '\0')
+ return &kwl->kw[index];
+ }
+ }
+ return NULL;
+}
+
+/******************************************************************/
+/* Sample casts functions */
+/******************************************************************/
+
+static int c_ip2int(struct sample *smp)
+{
+ smp->data.u.sint = ntohl(smp->data.u.ipv4.s_addr);
+ smp->data.type = SMP_T_SINT;
+ return 1;
+}
+
+static int c_ip2str(struct sample *smp)
+{
+ struct buffer *trash = get_trash_chunk();
+
+ if (!inet_ntop(AF_INET, (void *)&smp->data.u.ipv4, trash->area, trash->size))
+ return 0;
+
+ trash->data = strlen(trash->area);
+ smp->data.u.str = *trash;
+ smp->data.type = SMP_T_STR;
+ smp->flags &= ~SMP_F_CONST;
+
+ return 1;
+}
+
+static int c_ip2ipv6(struct sample *smp)
+{
+ v4tov6(&smp->data.u.ipv6, &smp->data.u.ipv4);
+ smp->data.type = SMP_T_IPV6;
+ return 1;
+}
+
+static int c_ipv62ip(struct sample *smp)
+{
+ if (!v6tov4(&smp->data.u.ipv4, &smp->data.u.ipv6))
+ return 0;
+ smp->data.type = SMP_T_IPV4;
+ return 1;
+}
+
+static int c_ipv62str(struct sample *smp)
+{
+ struct buffer *trash = get_trash_chunk();
+
+ if (!inet_ntop(AF_INET6, (void *)&smp->data.u.ipv6, trash->area, trash->size))
+ return 0;
+
+ trash->data = strlen(trash->area);
+ smp->data.u.str = *trash;
+ smp->data.type = SMP_T_STR;
+ smp->flags &= ~SMP_F_CONST;
+ return 1;
+}
+
+/*
+static int c_ipv62ip(struct sample *smp)
+{
+ return v6tov4(&smp->data.u.ipv4, &smp->data.u.ipv6);
+}
+*/
+
+static int c_int2ip(struct sample *smp)
+{
+ smp->data.u.ipv4.s_addr = htonl((unsigned int)smp->data.u.sint);
+ smp->data.type = SMP_T_IPV4;
+ return 1;
+}
+
+static int c_int2ipv6(struct sample *smp)
+{
+ smp->data.u.ipv4.s_addr = htonl((unsigned int)smp->data.u.sint);
+ v4tov6(&smp->data.u.ipv6, &smp->data.u.ipv4);
+ smp->data.type = SMP_T_IPV6;
+ return 1;
+}
+
+static int c_str2addr(struct sample *smp)
+{
+ if (!buf2ip(smp->data.u.str.area, smp->data.u.str.data, &smp->data.u.ipv4)) {
+ if (!buf2ip6(smp->data.u.str.area, smp->data.u.str.data, &smp->data.u.ipv6))
+ return 0;
+ smp->data.type = SMP_T_IPV6;
+ smp->flags &= ~SMP_F_CONST;
+ return 1;
+ }
+ smp->data.type = SMP_T_IPV4;
+ smp->flags &= ~SMP_F_CONST;
+ return 1;
+}
+
+static int c_str2ip(struct sample *smp)
+{
+ if (!buf2ip(smp->data.u.str.area, smp->data.u.str.data, &smp->data.u.ipv4))
+ return 0;
+ smp->data.type = SMP_T_IPV4;
+ smp->flags &= ~SMP_F_CONST;
+ return 1;
+}
+
+static int c_str2ipv6(struct sample *smp)
+{
+ if (!buf2ip6(smp->data.u.str.area, smp->data.u.str.data, &smp->data.u.ipv6))
+ return 0;
+ smp->data.type = SMP_T_IPV6;
+ smp->flags &= ~SMP_F_CONST;
+ return 1;
+}
+
+/*
+ * The NULL char always enforces the end of string if it is met.
+ * Data is never changed, so we can ignore the CONST case
+ */
+static int c_bin2str(struct sample *smp)
+{
+ int i;
+
+ for (i = 0; i < smp->data.u.str.data; i++) {
+ if (!smp->data.u.str.area[i]) {
+ smp->data.u.str.data = i;
+ break;
+ }
+ }
+ smp->data.type = SMP_T_STR;
+ return 1;
+}
+
+static int c_int2str(struct sample *smp)
+{
+ struct buffer *trash = get_trash_chunk();
+ char *pos;
+
+ pos = lltoa_r(smp->data.u.sint, trash->area, trash->size);
+ if (!pos)
+ return 0;
+
+ trash->size = trash->size - (pos - trash->area);
+ trash->area = pos;
+ trash->data = strlen(pos);
+ smp->data.u.str = *trash;
+ smp->data.type = SMP_T_STR;
+ smp->flags &= ~SMP_F_CONST;
+ return 1;
+}
+
+/* This function unconditionally duplicates data and removes the "const" flag.
+ * For strings and binary blocks, it also provides a known allocated size with
+ * a length that is capped to the size, and ensures a trailing zero is always
+ * appended for strings. This is necessary for some operations which may
+ * require to extend the length. It returns 0 if it fails, 1 on success.
+ */
+int smp_dup(struct sample *smp)
+{
+ struct buffer *trash;
+
+ switch (smp->data.type) {
+ case SMP_T_BOOL:
+ case SMP_T_SINT:
+ case SMP_T_ADDR:
+ case SMP_T_IPV4:
+ case SMP_T_IPV6:
+ /* These type are not const. */
+ break;
+
+ case SMP_T_METH:
+ if (smp->data.u.meth.meth != HTTP_METH_OTHER)
+ break;
+ __fallthrough;
+
+ case SMP_T_STR:
+ trash = get_trash_chunk();
+ trash->data = smp->data.type == SMP_T_STR ?
+ smp->data.u.str.data : smp->data.u.meth.str.data;
+ if (trash->data > trash->size - 1)
+ trash->data = trash->size - 1;
+
+ memcpy(trash->area, smp->data.type == SMP_T_STR ?
+ smp->data.u.str.area : smp->data.u.meth.str.area,
+ trash->data);
+ trash->area[trash->data] = 0;
+ smp->data.u.str = *trash;
+ break;
+
+ case SMP_T_BIN:
+ trash = get_trash_chunk();
+ trash->data = smp->data.u.str.data;
+ if (trash->data > trash->size)
+ trash->data = trash->size;
+
+ memcpy(trash->area, smp->data.u.str.area, trash->data);
+ smp->data.u.str = *trash;
+ break;
+
+ default:
+ /* Other cases are unexpected. */
+ return 0;
+ }
+
+ /* remove const flag */
+ smp->flags &= ~SMP_F_CONST;
+ return 1;
+}
+
+int c_none(struct sample *smp)
+{
+ return 1;
+}
+
+/* special converter function used by pseudo types in the compatibility matrix
+ * to inform that the conversion is theoretically allowed at parsing time.
+ *
+ * However, being a pseudo type, it may not be emitted by fetches or converters
+ * so this function should never be called. If this is the case, then it means
+ * that a pseudo type has been used as a final output type at runtime, which is
+ * considered as a bug and should be fixed. To help spot this kind of bug, the
+ * process will crash in this case.
+ */
+int c_pseudo(struct sample *smp)
+{
+ ABORT_NOW(); // die loudly
+ /* never reached */
+ return 0;
+}
+
+static int c_str2int(struct sample *smp)
+{
+ const char *str;
+ const char *end;
+
+ if (smp->data.u.str.data == 0)
+ return 0;
+
+ str = smp->data.u.str.area;
+ end = smp->data.u.str.area + smp->data.u.str.data;
+
+ smp->data.u.sint = read_int64(&str, end);
+ smp->data.type = SMP_T_SINT;
+ smp->flags &= ~SMP_F_CONST;
+ return 1;
+}
+
+static int c_str2meth(struct sample *smp)
+{
+ enum http_meth_t meth;
+ int len;
+
+ meth = find_http_meth(smp->data.u.str.area, smp->data.u.str.data);
+ if (meth == HTTP_METH_OTHER) {
+ len = smp->data.u.str.data;
+ smp->data.u.meth.str.area = smp->data.u.str.area;
+ smp->data.u.meth.str.data = len;
+ }
+ else
+ smp->flags &= ~SMP_F_CONST;
+ smp->data.u.meth.meth = meth;
+ smp->data.type = SMP_T_METH;
+ return 1;
+}
+
+static int c_meth2str(struct sample *smp)
+{
+ int len;
+ enum http_meth_t meth;
+
+ if (smp->data.u.meth.meth == HTTP_METH_OTHER) {
+ /* The method is unknown. Copy the original pointer. */
+ len = smp->data.u.meth.str.data;
+ smp->data.u.str.area = smp->data.u.meth.str.area;
+ smp->data.u.str.data = len;
+ smp->data.type = SMP_T_STR;
+ }
+ else if (smp->data.u.meth.meth < HTTP_METH_OTHER) {
+ /* The method is known, copy the pointer containing the string. */
+ meth = smp->data.u.meth.meth;
+ smp->data.u.str.area = http_known_methods[meth].ptr;
+ smp->data.u.str.data = http_known_methods[meth].len;
+ smp->flags |= SMP_F_CONST;
+ smp->data.type = SMP_T_STR;
+ }
+ else {
+ /* Unknown method */
+ return 0;
+ }
+ return 1;
+}
+
+static int c_addr2bin(struct sample *smp)
+{
+ struct buffer *chk = get_trash_chunk();
+
+ if (smp->data.type == SMP_T_IPV4) {
+ chk->data = 4;
+ memcpy(chk->area, &smp->data.u.ipv4, chk->data);
+ }
+ else if (smp->data.type == SMP_T_IPV6) {
+ chk->data = 16;
+ memcpy(chk->area, &smp->data.u.ipv6, chk->data);
+ }
+ else
+ return 0;
+
+ smp->data.u.str = *chk;
+ smp->data.type = SMP_T_BIN;
+ return 1;
+}
+
+static int c_int2bin(struct sample *smp)
+{
+ struct buffer *chk = get_trash_chunk();
+
+ *(unsigned long long int *) chk->area = my_htonll(smp->data.u.sint);
+ chk->data = 8;
+
+ smp->data.u.str = *chk;
+ smp->data.type = SMP_T_BIN;
+ return 1;
+}
+
+static int c_bool2bin(struct sample *smp)
+{
+ struct buffer *chk = get_trash_chunk();
+
+ *(unsigned long long int *)chk->area = my_htonll(!!smp->data.u.sint);
+ chk->data = 8;
+ smp->data.u.str = *chk;
+ smp->data.type = SMP_T_BIN;
+ return 1;
+}
+
+
+/*****************************************************************/
+/* Sample casts matrix: */
+/* sample_casts[from type][to type] */
+/* NULL pointer used for impossible sample casts */
+/*****************************************************************/
+
+sample_cast_fct sample_casts[SMP_TYPES][SMP_TYPES] = {
+/* to: ANY SAME BOOL SINT ADDR IPV4 IPV6 STR BIN METH */
+/* from: ANY */ { c_none, NULL, c_pseudo, c_pseudo, c_pseudo, c_pseudo, c_pseudo, c_pseudo, c_pseudo, c_pseudo },
+/* SAME */ { NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL },
+/* BOOL */ { c_none, NULL, c_none, c_none, NULL, NULL, NULL, c_int2str, c_bool2bin, NULL },
+/* SINT */ { c_none, NULL, c_none, c_none, c_int2ip, c_int2ip, c_int2ipv6, c_int2str, c_int2bin, NULL },
+/* ADDR */ { c_none, NULL, NULL, NULL, c_pseudo, c_pseudo, c_pseudo, c_pseudo, c_pseudo, NULL },
+/* IPV4 */ { c_none, NULL, NULL, c_ip2int, c_none, c_none, c_ip2ipv6, c_ip2str, c_addr2bin, NULL },
+/* IPV6 */ { c_none, NULL, NULL, NULL, c_none, c_ipv62ip, c_none, c_ipv62str, c_addr2bin, NULL },
+/* STR */ { c_none, NULL, c_str2int, c_str2int, c_str2addr, c_str2ip, c_str2ipv6, c_none, c_none, c_str2meth },
+/* BIN */ { c_none, NULL, NULL, NULL, NULL, NULL, NULL, c_bin2str, c_none, c_str2meth },
+/* METH */ { c_none, NULL, NULL, NULL, NULL, NULL, NULL, c_meth2str, c_meth2str, c_none }
+};
+
+/* Process the converters (if any) for a sample expr after the first fetch
+ * keyword. We have two supported syntaxes for the converters, which can be
+ * combined:
+ * - comma-delimited list of converters just after the keyword and args ;
+ * - one converter per keyword (if <idx> != NULL)
+ * FIXME: should we continue to support this old syntax?
+ * The combination allows to have each keyword being a comma-delimited
+ * series of converters.
+ *
+ * We want to process the former first, then the latter. For this we start
+ * from the beginning of the supposed place in the exiting conv chain, which
+ * starts at the last comma (<start> which is then referred to as endt).
+ *
+ * If <endptr> is non-nul, it will be set to the first unparsed character
+ * (which may be the final '\0') on success. If it is nul, the expression
+ * must be properly terminated by a '\0' otherwise an error is reported.
+ *
+ * <expr> should point the the sample expression that is already initialized
+ * with the sample fetch that precedes the converters chain.
+ *
+ * The function returns a positive value for success and 0 for failure, in which
+ * case <err_msg> will point to an allocated string that brings some info
+ * about the failure. It is the caller's responsibility to free it.
+ */
+int sample_parse_expr_cnv(char **str, int *idx, char **endptr, char **err_msg, struct arg_list *al, const char *file, int line,
+ struct sample_expr *expr, const char *start)
+{
+ struct sample_conv *conv;
+ const char *endt = start; /* end of term */
+ const char *begw; /* beginning of word */
+ const char *endw; /* end of word */
+ char *ckw = NULL;
+ unsigned long prev_type = expr->fetch->out_type;
+ int success = 1;
+
+ while (1) {
+ struct sample_conv_expr *conv_expr;
+ int err_arg;
+ int argcnt;
+
+ if (*endt && *endt != ',') {
+ if (endptr) {
+ /* end found, let's stop here */
+ break;
+ }
+ if (ckw)
+ memprintf(err_msg, "missing comma after converter '%s'", ckw);
+ else
+ memprintf(err_msg, "missing comma after fetch keyword");
+ goto out_error;
+ }
+
+ /* FIXME: how long should we support such idiocies ? Maybe we
+ * should already warn ?
+ */
+ while (*endt == ',') /* then trailing commas */
+ endt++;
+
+ begw = endt; /* start of converter */
+
+ if (!*begw) {
+ /* none ? skip to next string if idx is set */
+ if (!idx)
+ break; /* end of converters */
+ (*idx)++;
+ begw = str[*idx];
+ if (!begw || !*begw)
+ break;
+ }
+
+ for (endw = begw; is_idchar(*endw); endw++)
+ ;
+
+ ha_free(&ckw);
+ ckw = my_strndup(begw, endw - begw);
+
+ conv = find_sample_conv(begw, endw - begw);
+ if (!conv) {
+ /* we found an isolated keyword that we don't know, it's not ours */
+ if (idx && begw == str[*idx]) {
+ endt = begw;
+ break;
+ }
+ memprintf(err_msg, "unknown converter '%s'", ckw);
+ goto out_error;
+ }
+
+ if (conv->in_type >= SMP_TYPES || conv->out_type >= SMP_TYPES) {
+ memprintf(err_msg, "return type of converter '%s' is unknown", ckw);
+ goto out_error;
+ }
+
+ /* If impossible type conversion */
+ if (!sample_casts[prev_type][conv->in_type]) {
+ memprintf(err_msg, "converter '%s' cannot be applied", ckw);
+ goto out_error;
+ }
+
+ /* Ignore converters that output SMP_T_SAME if switching to them is
+ * conversion-free. (such converter's output match with input, thus only
+ * their input is considered)
+ */
+ if (conv->out_type != SMP_T_SAME)
+ prev_type = conv->out_type;
+ else if (sample_casts[prev_type][conv->in_type] != c_none)
+ prev_type = conv->in_type;
+
+ conv_expr = calloc(1, sizeof(*conv_expr));
+ if (!conv_expr)
+ goto out_error;
+
+ LIST_APPEND(&(expr->conv_exprs), &(conv_expr->list));
+ conv_expr->conv = conv;
+
+ if (al) {
+ al->kw = expr->fetch->kw;
+ al->conv = conv_expr->conv->kw;
+ }
+ argcnt = make_arg_list(endw, -1, conv->arg_mask, &conv_expr->arg_p, err_msg, &endt, &err_arg, al);
+ if (argcnt < 0) {
+ memprintf(err_msg, "invalid arg %d in converter '%s' : %s", err_arg+1, ckw, *err_msg);
+ goto out_error;
+ }
+
+ if (argcnt && !conv->arg_mask) {
+ memprintf(err_msg, "converter '%s' does not support any args", ckw);
+ goto out_error;
+ }
+
+ if (!conv_expr->arg_p)
+ conv_expr->arg_p = empty_arg_list;
+
+ if (conv->val_args && !conv->val_args(conv_expr->arg_p, conv, file, line, err_msg)) {
+ memprintf(err_msg, "invalid args in converter '%s' : %s", ckw, *err_msg);
+ goto out_error;
+ }
+ }
+
+ if (endptr) {
+ /* end found, let's stop here */
+ *endptr = (char *)endt;
+ }
+ out:
+ free(ckw);
+ return success;
+
+ out_error:
+ success = 0;
+ goto out;
+}
+
+/*
+ * Parse a sample expression configuration:
+ * fetch keyword followed by format conversion keywords.
+ *
+ * <al> is an arg_list serving as a list head to report missing dependencies.
+ * It may be NULL if such dependencies are not allowed. Otherwise, the caller
+ * must have set al->ctx if al is set.
+ *
+ * Returns a pointer on allocated sample expression structure or NULL in case
+ * of error, in which case <err_msg> will point to an allocated string that
+ * brings some info about the failure. It is the caller's responsibility to
+ * free it.
+ */
+struct sample_expr *sample_parse_expr(char **str, int *idx, const char *file, int line, char **err_msg, struct arg_list *al, char **endptr)
+{
+ const char *begw; /* beginning of word */
+ const char *endw; /* end of word */
+ const char *endt; /* end of term */
+ struct sample_expr *expr = NULL;
+ struct sample_fetch *fetch;
+ char *fkw = NULL;
+ int err_arg;
+
+ begw = str[*idx];
+ for (endw = begw; is_idchar(*endw); endw++)
+ ;
+
+ if (endw == begw) {
+ memprintf(err_msg, "missing fetch method");
+ goto out_error;
+ }
+
+ /* keep a copy of the current fetch keyword for error reporting */
+ fkw = my_strndup(begw, endw - begw);
+
+ fetch = find_sample_fetch(begw, endw - begw);
+ if (!fetch) {
+ memprintf(err_msg, "unknown fetch method '%s'", fkw);
+ goto out_error;
+ }
+
+ /* At this point, we have :
+ * - begw : beginning of the keyword
+ * - endw : end of the keyword, first character not part of keyword
+ */
+
+ if (fetch->out_type >= SMP_TYPES) {
+ memprintf(err_msg, "returns type of fetch method '%s' is unknown", fkw);
+ goto out_error;
+ }
+
+ expr = calloc(1, sizeof(*expr));
+ if (!expr)
+ goto out_error;
+
+ LIST_INIT(&(expr->conv_exprs));
+ expr->fetch = fetch;
+ expr->arg_p = empty_arg_list;
+
+ /* Note that we call the argument parser even with an empty string,
+ * this allows it to automatically create entries for mandatory
+ * implicit arguments (eg: local proxy name).
+ */
+ if (al) {
+ al->kw = expr->fetch->kw;
+ al->conv = NULL;
+ }
+ if (make_arg_list(endw, -1, fetch->arg_mask, &expr->arg_p, err_msg, &endt, &err_arg, al) < 0) {
+ memprintf(err_msg, "fetch method '%s' : %s", fkw, *err_msg);
+ goto out_error;
+ }
+
+ /* now endt is our first char not part of the arg list, typically the
+ * comma after the sample fetch name or after the closing parenthesis,
+ * or the NUL char.
+ */
+
+ if (!expr->arg_p) {
+ expr->arg_p = empty_arg_list;
+ }
+ else if (fetch->val_args && !fetch->val_args(expr->arg_p, err_msg)) {
+ memprintf(err_msg, "invalid args in fetch method '%s' : %s", fkw, *err_msg);
+ goto out_error;
+ }
+
+ if (!sample_parse_expr_cnv(str, idx, endptr, err_msg, al, file, line, expr, endt))
+ goto out_error;
+
+ out:
+ free(fkw);
+ return expr;
+
+out_error:
+ release_sample_expr(expr);
+ expr = NULL;
+ goto out;
+}
+
+/*
+ * Helper function to process the converter list of a given sample expression
+ * <expr> using the sample <p> (which is assumed to be properly initialized)
+ * as input.
+ *
+ * Returns 1 on success and 0 on failure.
+ */
+int sample_process_cnv(struct sample_expr *expr, struct sample *p)
+{
+ struct sample_conv_expr *conv_expr;
+
+ list_for_each_entry(conv_expr, &expr->conv_exprs, list) {
+ /* we want to ensure that p->type can be casted into
+ * conv_expr->conv->in_type. We have 3 possibilities :
+ * - NULL => not castable.
+ * - c_none => nothing to do (let's optimize it)
+ * - other => apply cast and prepare to fail
+ */
+ if (!sample_casts[p->data.type][conv_expr->conv->in_type])
+ return 0;
+
+ if (sample_casts[p->data.type][conv_expr->conv->in_type] != c_none &&
+ !sample_casts[p->data.type][conv_expr->conv->in_type](p))
+ return 0;
+
+ /* OK cast succeeded */
+
+ if (!conv_expr->conv->process(conv_expr->arg_p, p, conv_expr->conv->private))
+ return 0;
+ }
+ return 1;
+}
+
+/*
+ * Process a fetch + format conversion of defined by the sample expression <expr>
+ * on request or response considering the <opt> parameter.
+ * Returns a pointer on a typed sample structure containing the result or NULL if
+ * sample is not found or when format conversion failed.
+ * If <p> is not null, function returns results in structure pointed by <p>.
+ * If <p> is null, functions returns a pointer on a static sample structure.
+ *
+ * Note: the fetch functions are required to properly set the return type. The
+ * conversion functions must do so too. However the cast functions do not need
+ * to since they're made to cast multiple types according to what is required.
+ *
+ * The caller may indicate in <opt> if it considers the result final or not.
+ * The caller needs to check the SMP_F_MAY_CHANGE flag in p->flags to verify
+ * if the result is stable or not, according to the following table :
+ *
+ * return MAY_CHANGE FINAL Meaning for the sample
+ * NULL 0 * Not present and will never be (eg: header)
+ * NULL 1 0 Not present yet, could change (eg: POST param)
+ * NULL 1 1 Not present yet, will not change anymore
+ * smp 0 * Present and will not change (eg: header)
+ * smp 1 0 Present, may change (eg: request length)
+ * smp 1 1 Present, last known value (eg: request length)
+ */
+struct sample *sample_process(struct proxy *px, struct session *sess,
+ struct stream *strm, unsigned int opt,
+ struct sample_expr *expr, struct sample *p)
+{
+ if (p == NULL) {
+ p = &temp_smp;
+ memset(p, 0, sizeof(*p));
+ }
+
+ smp_set_owner(p, px, sess, strm, opt);
+ if (!expr->fetch->process(expr->arg_p, p, expr->fetch->kw, expr->fetch->private))
+ return NULL;
+
+ if (!sample_process_cnv(expr, p))
+ return NULL;
+ return p;
+}
+
+/*
+ * Resolve all remaining arguments in proxy <p>. Returns the number of
+ * errors or 0 if everything is fine. If at least one error is met, it will
+ * be appended to *err. If *err==NULL it will be allocated first.
+ */
+int smp_resolve_args(struct proxy *p, char **err)
+{
+ struct arg_list *cur, *bak;
+ const char *ctx, *where;
+ const char *conv_ctx, *conv_pre, *conv_pos;
+ struct userlist *ul;
+ struct my_regex *reg;
+ struct arg *arg;
+ int cfgerr = 0;
+ int rflags;
+
+ list_for_each_entry_safe(cur, bak, &p->conf.args.list, list) {
+ struct proxy *px;
+ struct server *srv;
+ struct stktable *t;
+ char *pname, *sname, *stktname;
+ char *err2;
+
+ arg = cur->arg;
+
+ /* prepare output messages */
+ conv_pre = conv_pos = conv_ctx = "";
+ if (cur->conv) {
+ conv_ctx = cur->conv;
+ conv_pre = "conversion keyword '";
+ conv_pos = "' for ";
+ }
+
+ where = "in";
+ ctx = "sample fetch keyword";
+ switch (cur->ctx) {
+ case ARGC_STK: where = "in stick rule in"; break;
+ case ARGC_TRK: where = "in tracking rule in"; break;
+ case ARGC_LOG: where = "in log-format string in"; break;
+ case ARGC_LOGSD: where = "in log-format-sd string in"; break;
+ case ARGC_HRQ: where = "in http-request expression in"; break;
+ case ARGC_HRS: where = "in http-response response in"; break;
+ case ARGC_UIF: where = "in unique-id-format string in"; break;
+ case ARGC_RDR: where = "in redirect format string in"; break;
+ case ARGC_CAP: where = "in capture rule in"; break;
+ case ARGC_ACL: ctx = "ACL keyword"; break;
+ case ARGC_SRV: where = "in server directive in"; break;
+ case ARGC_SPOE: where = "in spoe-message directive in"; break;
+ case ARGC_UBK: where = "in use_backend expression in"; break;
+ case ARGC_USRV: where = "in use-server or balance expression in"; break;
+ case ARGC_HERR: where = "in http-error directive in"; break;
+ case ARGC_OT: where = "in ot-scope directive in"; break;
+ case ARGC_OPT: where = "in option directive in"; break;
+ case ARGC_TCO: where = "in tcp-request connection expression in"; break;
+ case ARGC_TSE: where = "in tcp-request session expression in"; break;
+ case ARGC_TRQ: where = "in tcp-request content expression in"; break;
+ case ARGC_TRS: where = "in tcp-response content expression in"; break;
+ case ARGC_TCK: where = "in tcp-check expression in"; break;
+ case ARGC_CFG: where = "in configuration expression in"; break;
+ case ARGC_CLI: where = "in CLI expression in"; break;
+ }
+
+ /* set a few default settings */
+ px = p;
+ pname = p->id;
+
+ switch (arg->type) {
+ case ARGT_SRV:
+ if (!arg->data.str.data) {
+ memprintf(err, "%sparsing [%s:%d]: missing server name in arg %d of %s%s%s%s '%s' %s proxy '%s'.\n",
+ *err ? *err : "", cur->file, cur->line,
+ cur->arg_pos + 1, conv_pre, conv_ctx, conv_pos, ctx, cur->kw, where, p->id);
+ cfgerr++;
+ continue;
+ }
+
+ /* we support two formats : "bck/srv" and "srv" */
+ sname = strrchr(arg->data.str.area, '/');
+
+ if (sname) {
+ *sname++ = '\0';
+ pname = arg->data.str.area;
+
+ px = proxy_be_by_name(pname);
+ if (!px) {
+ memprintf(err, "%sparsing [%s:%d]: unable to find proxy '%s' referenced in arg %d of %s%s%s%s '%s' %s proxy '%s'.\n",
+ *err ? *err : "", cur->file, cur->line, pname,
+ cur->arg_pos + 1, conv_pre, conv_ctx, conv_pos, ctx, cur->kw, where, p->id);
+ cfgerr++;
+ break;
+ }
+ }
+ else {
+ if (px->cap & PR_CAP_DEF) {
+ memprintf(err, "%sparsing [%s:%d]: backend name must be set in arg %d of %s%s%s%s '%s' %s proxy '%s'.\n",
+ *err ? *err : "", cur->file, cur->line,
+ cur->arg_pos + 1, conv_pre, conv_ctx, conv_pos, ctx, cur->kw, where, p->id);
+ cfgerr++;
+ break;
+ }
+ sname = arg->data.str.area;
+ }
+
+ srv = findserver(px, sname);
+ if (!srv) {
+ memprintf(err, "%sparsing [%s:%d]: unable to find server '%s' in proxy '%s', referenced in arg %d of %s%s%s%s '%s' %s proxy '%s'.\n",
+ *err ? *err : "", cur->file, cur->line, sname, pname,
+ cur->arg_pos + 1, conv_pre, conv_ctx, conv_pos, ctx, cur->kw, where, p->id);
+ cfgerr++;
+ break;
+ }
+
+ srv->flags |= SRV_F_NON_PURGEABLE;
+
+ chunk_destroy(&arg->data.str);
+ arg->unresolved = 0;
+ arg->data.srv = srv;
+ break;
+
+ case ARGT_FE:
+ if (arg->data.str.data) {
+ pname = arg->data.str.area;
+ px = proxy_fe_by_name(pname);
+ }
+
+ if (!px) {
+ memprintf(err, "%sparsing [%s:%d]: unable to find frontend '%s' referenced in arg %d of %s%s%s%s '%s' %s proxy '%s'.\n",
+ *err ? *err : "", cur->file, cur->line, pname,
+ cur->arg_pos + 1, conv_pre, conv_ctx, conv_pos, ctx, cur->kw, where, p->id);
+ cfgerr++;
+ break;
+ }
+
+ if (!(px->cap & PR_CAP_FE)) {
+ memprintf(err, "%sparsing [%s:%d]: proxy '%s', referenced in arg %d of %s%s%s%s '%s' %s proxy '%s', has not frontend capability.\n",
+ *err ? *err : "", cur->file, cur->line, pname,
+ cur->arg_pos + 1, conv_pre, conv_ctx, conv_pos, ctx, cur->kw, where, p->id);
+ cfgerr++;
+ break;
+ }
+
+ chunk_destroy(&arg->data.str);
+ arg->unresolved = 0;
+ arg->data.prx = px;
+ break;
+
+ case ARGT_BE:
+ if (arg->data.str.data) {
+ pname = arg->data.str.area;
+ px = proxy_be_by_name(pname);
+ }
+
+ if (!px) {
+ memprintf(err, "%sparsing [%s:%d]: unable to find backend '%s' referenced in arg %d of %s%s%s%s '%s' %s proxy '%s'.\n",
+ *err ? *err : "", cur->file, cur->line, pname,
+ cur->arg_pos + 1, conv_pre, conv_ctx, conv_pos, ctx, cur->kw, where, p->id);
+ cfgerr++;
+ break;
+ }
+
+ if (!(px->cap & PR_CAP_BE)) {
+ memprintf(err, "%sparsing [%s:%d]: proxy '%s', referenced in arg %d of %s%s%s%s '%s' %s proxy '%s', has not backend capability.\n",
+ *err ? *err : "", cur->file, cur->line, pname,
+ cur->arg_pos + 1, conv_pre, conv_ctx, conv_pos, ctx, cur->kw, where, p->id);
+ cfgerr++;
+ break;
+ }
+
+ chunk_destroy(&arg->data.str);
+ arg->unresolved = 0;
+ arg->data.prx = px;
+ break;
+
+ case ARGT_TAB:
+ if (arg->data.str.data)
+ stktname = arg->data.str.area;
+ else {
+ if (px->cap & PR_CAP_DEF) {
+ memprintf(err, "%sparsing [%s:%d]: table name must be set in arg %d of %s%s%s%s '%s' %s proxy '%s'.\n",
+ *err ? *err : "", cur->file, cur->line,
+ cur->arg_pos + 1, conv_pre, conv_ctx, conv_pos, ctx, cur->kw, where, p->id);
+ cfgerr++;
+ break;
+ }
+ stktname = px->id;
+ }
+
+ t = stktable_find_by_name(stktname);
+ if (!t) {
+ memprintf(err, "%sparsing [%s:%d]: unable to find table '%s' referenced in arg %d of %s%s%s%s '%s' %s proxy '%s'.\n",
+ *err ? *err : "", cur->file, cur->line, stktname,
+ cur->arg_pos + 1, conv_pre, conv_ctx, conv_pos, ctx, cur->kw, where, p->id);
+ cfgerr++;
+ break;
+ }
+
+ if (!t->size) {
+ memprintf(err, "%sparsing [%s:%d]: no table in proxy '%s' referenced in arg %d of %s%s%s%s '%s' %s proxy '%s'.\n",
+ *err ? *err : "", cur->file, cur->line, stktname,
+ cur->arg_pos + 1, conv_pre, conv_ctx, conv_pos, ctx, cur->kw, where, p->id);
+ cfgerr++;
+ break;
+ }
+
+ if (!in_proxies_list(t->proxies_list, p)) {
+ p->next_stkt_ref = t->proxies_list;
+ t->proxies_list = p;
+ }
+
+ chunk_destroy(&arg->data.str);
+ arg->unresolved = 0;
+ arg->data.t = t;
+ break;
+
+ case ARGT_USR:
+ if (!arg->data.str.data) {
+ memprintf(err, "%sparsing [%s:%d]: missing userlist name in arg %d of %s%s%s%s '%s' %s proxy '%s'.\n",
+ *err ? *err : "", cur->file, cur->line,
+ cur->arg_pos + 1, conv_pre, conv_ctx, conv_pos, ctx, cur->kw, where, p->id);
+ cfgerr++;
+ break;
+ }
+
+ if (p->uri_auth && p->uri_auth->userlist &&
+ strcmp(p->uri_auth->userlist->name, arg->data.str.area) == 0)
+ ul = p->uri_auth->userlist;
+ else
+ ul = auth_find_userlist(arg->data.str.area);
+
+ if (!ul) {
+ memprintf(err, "%sparsing [%s:%d]: unable to find userlist '%s' referenced in arg %d of %s%s%s%s '%s' %s proxy '%s'.\n",
+ *err ? *err : "", cur->file, cur->line,
+ arg->data.str.area,
+ cur->arg_pos + 1, conv_pre, conv_ctx, conv_pos, ctx, cur->kw, where, p->id);
+ cfgerr++;
+ break;
+ }
+
+ chunk_destroy(&arg->data.str);
+ arg->unresolved = 0;
+ arg->data.usr = ul;
+ break;
+
+ case ARGT_REG:
+ if (!arg->data.str.data) {
+ memprintf(err, "%sparsing [%s:%d]: missing regex in arg %d of %s%s%s%s '%s' %s proxy '%s'.\n",
+ *err ? *err : "", cur->file, cur->line,
+ cur->arg_pos + 1, conv_pre, conv_ctx, conv_pos, ctx, cur->kw, where, p->id);
+ cfgerr++;
+ continue;
+ }
+
+ rflags = 0;
+ rflags |= (arg->type_flags & ARGF_REG_ICASE) ? REG_ICASE : 0;
+ err2 = NULL;
+
+ if (!(reg = regex_comp(arg->data.str.area, !(rflags & REG_ICASE), 1 /* capture substr */, &err2))) {
+ memprintf(err, "%sparsing [%s:%d]: error in regex '%s' in arg %d of %s%s%s%s '%s' %s proxy '%s' : %s.\n",
+ *err ? *err : "", cur->file, cur->line,
+ arg->data.str.area,
+ cur->arg_pos + 1, conv_pre, conv_ctx, conv_pos, ctx, cur->kw, where, p->id, err2);
+ cfgerr++;
+ continue;
+ }
+
+ chunk_destroy(&arg->data.str);
+ arg->unresolved = 0;
+ arg->data.reg = reg;
+ break;
+
+
+ }
+
+ LIST_DELETE(&cur->list);
+ free(cur);
+ } /* end of args processing */
+
+ return cfgerr;
+}
+
+/*
+ * Process a fetch + format conversion as defined by the sample expression
+ * <expr> on request or response considering the <opt> parameter. The output is
+ * not explicitly set to <smp_type>, but shall be compatible with it as
+ * specified by 'sample_casts' table. If a stable sample can be fetched, or an
+ * unstable one when <opt> contains SMP_OPT_FINAL, the sample is converted and
+ * returned without the SMP_F_MAY_CHANGE flag. If an unstable sample is found
+ * and <opt> does not contain SMP_OPT_FINAL, then the sample is returned as-is
+ * with its SMP_F_MAY_CHANGE flag so that the caller can check it and decide to
+ * take actions (eg: wait longer). If a sample could not be found or could not
+ * be converted, NULL is returned. The caller MUST NOT use the sample if the
+ * SMP_F_MAY_CHANGE flag is present, as it is used only as a hint that there is
+ * still hope to get it after waiting longer, and is not converted to string.
+ * The possible output combinations are the following :
+ *
+ * return MAY_CHANGE FINAL Meaning for the sample
+ * NULL * * Not present and will never be (eg: header)
+ * smp 0 * Final value converted (eg: header)
+ * smp 1 0 Not present yet, may appear later (eg: header)
+ * smp 1 1 never happens (either flag is cleared on output)
+ */
+struct sample *sample_fetch_as_type(struct proxy *px, struct session *sess,
+ struct stream *strm, unsigned int opt,
+ struct sample_expr *expr, int smp_type)
+{
+ struct sample *smp = &temp_smp;
+
+ memset(smp, 0, sizeof(*smp));
+
+ if (!sample_process(px, sess, strm, opt, expr, smp)) {
+ if ((smp->flags & SMP_F_MAY_CHANGE) && !(opt & SMP_OPT_FINAL))
+ return smp;
+ return NULL;
+ }
+
+ if (!sample_casts[smp->data.type][smp_type])
+ return NULL;
+
+ if (sample_casts[smp->data.type][smp_type] != c_none &&
+ !sample_casts[smp->data.type][smp_type](smp))
+ return NULL;
+
+ smp->flags &= ~SMP_F_MAY_CHANGE;
+ return smp;
+}
+
+static void release_sample_arg(struct arg *p)
+{
+ struct arg *p_back = p;
+
+ if (!p)
+ return;
+
+ while (p->type != ARGT_STOP) {
+ if (p->type == ARGT_STR || p->unresolved) {
+ chunk_destroy(&p->data.str);
+ p->unresolved = 0;
+ }
+ else if (p->type == ARGT_REG) {
+ regex_free(p->data.reg);
+ p->data.reg = NULL;
+ }
+ p++;
+ }
+
+ if (p_back != empty_arg_list)
+ free(p_back);
+}
+
+void release_sample_expr(struct sample_expr *expr)
+{
+ struct sample_conv_expr *conv_expr, *conv_exprb;
+
+ if (!expr)
+ return;
+
+ list_for_each_entry_safe(conv_expr, conv_exprb, &expr->conv_exprs, list) {
+ LIST_DELETE(&conv_expr->list);
+ release_sample_arg(conv_expr->arg_p);
+ free(conv_expr);
+ }
+
+ release_sample_arg(expr->arg_p);
+ free(expr);
+}
+
+/*****************************************************************/
+/* Sample format convert functions */
+/* These functions set the data type on return. */
+/*****************************************************************/
+
+static int sample_conv_debug(const struct arg *arg_p, struct sample *smp, void *private)
+{
+ int i;
+ struct sample tmp;
+ struct buffer *buf;
+ struct sink *sink;
+ struct ist line;
+ char *pfx;
+
+ buf = alloc_trash_chunk();
+ if (!buf)
+ goto end;
+
+ sink = (struct sink *)arg_p[1].data.ptr;
+ BUG_ON(!sink);
+
+ pfx = arg_p[0].data.str.area;
+ BUG_ON(!pfx);
+
+ chunk_printf(buf, "[debug] %s: type=%s ", pfx, smp_to_type[smp->data.type]);
+ if (!sample_casts[smp->data.type][SMP_T_STR])
+ goto nocast;
+
+ /* Copy sample fetch. This puts the sample as const, the
+ * cast will copy data if a transformation is required.
+ */
+ memcpy(&tmp, smp, sizeof(struct sample));
+ tmp.flags = SMP_F_CONST;
+
+ if (!sample_casts[smp->data.type][SMP_T_STR](&tmp))
+ goto nocast;
+
+ /* Display the displayable chars*. */
+ b_putchr(buf, '<');
+ for (i = 0; i < tmp.data.u.str.data; i++) {
+ if (isprint((unsigned char)tmp.data.u.str.area[i]))
+ b_putchr(buf, tmp.data.u.str.area[i]);
+ else
+ b_putchr(buf, '.');
+ }
+ b_putchr(buf, '>');
+
+ done:
+ line = ist2(buf->area, buf->data);
+ sink_write(sink, LOG_HEADER_NONE, 0, &line, 1);
+ end:
+ free_trash_chunk(buf);
+ return 1;
+ nocast:
+ chunk_appendf(buf, "(undisplayable)");
+ goto done;
+}
+
+// This function checks the "debug" converter's arguments.
+static int smp_check_debug(struct arg *args, struct sample_conv *conv,
+ const char *file, int line, char **err)
+{
+ const char *name = "buf0";
+ struct sink *sink = NULL;
+
+ if (args[0].type != ARGT_STR) {
+ /* optional prefix */
+ args[0].data.str.area = "";
+ args[0].data.str.data = 0;
+ }
+
+ if (args[1].type == ARGT_STR)
+ name = args[1].data.str.area;
+
+ sink = sink_find(name);
+ if (!sink) {
+ memprintf(err, "No such sink '%s'", name);
+ return 0;
+ }
+
+ chunk_destroy(&args[1].data.str);
+ args[1].type = ARGT_PTR;
+ args[1].data.ptr = sink;
+ return 1;
+}
+
+static int sample_conv_base642bin(const struct arg *arg_p, struct sample *smp, void *private)
+{
+ struct buffer *trash = get_trash_chunk();
+ int bin_len;
+
+ trash->data = 0;
+ bin_len = base64dec(smp->data.u.str.area, smp->data.u.str.data,
+ trash->area, trash->size);
+ if (bin_len < 0)
+ return 0;
+
+ trash->data = bin_len;
+ smp->data.u.str = *trash;
+ smp->data.type = SMP_T_BIN;
+ smp->flags &= ~SMP_F_CONST;
+ return 1;
+}
+
+static int sample_conv_base64url2bin(const struct arg *arg_p, struct sample *smp, void *private)
+{
+ struct buffer *trash = get_trash_chunk();
+ int bin_len;
+
+ trash->data = 0;
+ bin_len = base64urldec(smp->data.u.str.area, smp->data.u.str.data,
+ trash->area, trash->size);
+ if (bin_len < 0)
+ return 0;
+
+ trash->data = bin_len;
+ smp->data.u.str = *trash;
+ smp->data.type = SMP_T_BIN;
+ smp->flags &= ~SMP_F_CONST;
+ return 1;
+}
+
+static int sample_conv_bin2base64(const struct arg *arg_p, struct sample *smp, void *private)
+{
+ struct buffer *trash = get_trash_chunk();
+ int b64_len;
+
+ trash->data = 0;
+ b64_len = a2base64(smp->data.u.str.area, smp->data.u.str.data,
+ trash->area, trash->size);
+ if (b64_len < 0)
+ return 0;
+
+ trash->data = b64_len;
+ smp->data.u.str = *trash;
+ smp->data.type = SMP_T_STR;
+ smp->flags &= ~SMP_F_CONST;
+ return 1;
+}
+
+static int sample_conv_bin2base64url(const struct arg *arg_p, struct sample *smp, void *private)
+{
+ struct buffer *trash = get_trash_chunk();
+ int b64_len;
+
+ trash->data = 0;
+ b64_len = a2base64url(smp->data.u.str.area, smp->data.u.str.data,
+ trash->area, trash->size);
+ if (b64_len < 0)
+ return 0;
+
+ trash->data = b64_len;
+ smp->data.u.str = *trash;
+ smp->data.type = SMP_T_STR;
+ smp->flags &= ~SMP_F_CONST;
+ return 1;
+}
+
+/* This function returns a sample struct filled with the conversion of variable
+ * <var> to sample type <type> (SMP_T_*), via a cast to the target type. If the
+ * variable cannot be retrieved or casted, 0 is returned, otherwise 1.
+ *
+ * Keep in mind that the sample content may be written to a pre-allocated
+ * trash chunk as returned by get_trash_chunk().
+ */
+int sample_conv_var2smp(const struct var_desc *var, struct sample *smp, int type)
+{
+ if (!vars_get_by_desc(var, smp, NULL))
+ return 0;
+ if (!sample_casts[smp->data.type][type])
+ return 0;
+ if (!sample_casts[smp->data.type][type](smp))
+ return 0;
+ return 1;
+}
+
+static int sample_conv_sha1(const struct arg *arg_p, struct sample *smp, void *private)
+{
+ blk_SHA_CTX ctx;
+ struct buffer *trash = get_trash_chunk();
+
+ memset(&ctx, 0, sizeof(ctx));
+
+ blk_SHA1_Init(&ctx);
+ blk_SHA1_Update(&ctx, smp->data.u.str.area, smp->data.u.str.data);
+ blk_SHA1_Final((unsigned char *) trash->area, &ctx);
+
+ trash->data = 20;
+ smp->data.u.str = *trash;
+ smp->data.type = SMP_T_BIN;
+ smp->flags &= ~SMP_F_CONST;
+ return 1;
+}
+
+/* This function returns a sample struct filled with an <arg> content.
+ * If the <arg> contains a string, it is returned in the sample flagged as
+ * SMP_F_CONST. If the <arg> contains a variable descriptor, the sample is
+ * filled with the content of the variable by using vars_get_by_desc().
+ *
+ * Keep in mind that the sample content may be written to a pre-allocated
+ * trash chunk as returned by get_trash_chunk().
+ *
+ * This function returns 0 if an error occurs, otherwise it returns 1.
+ */
+int sample_conv_var2smp_str(const struct arg *arg, struct sample *smp)
+{
+ switch (arg->type) {
+ case ARGT_STR:
+ smp->data.type = SMP_T_STR;
+ smp->data.u.str = arg->data.str;
+ smp->flags = SMP_F_CONST;
+ return 1;
+ case ARGT_VAR:
+ return sample_conv_var2smp(&arg->data.var, smp, SMP_T_STR);
+ default:
+ return 0;
+ }
+}
+
+static int sample_conv_be2dec_check(struct arg *args, struct sample_conv *conv,
+ const char *file, int line, char **err)
+{
+ if (args[1].data.sint <= 0 || args[1].data.sint > sizeof(unsigned long long)) {
+ memprintf(err, "chunk_size out of [1..%u] range (%lld)", (uint)sizeof(unsigned long long), args[1].data.sint);
+ return 0;
+ }
+
+ if (args[2].data.sint != 0 && args[2].data.sint != 1) {
+ memprintf(err, "Unsupported truncate value (%lld)", args[2].data.sint);
+ return 0;
+ }
+
+ return 1;
+}
+
+/* Converts big-endian binary input sample to a string containing an unsigned
+ * integer number per <chunk_size> input bytes separated with <separator>.
+ * Optional <truncate> flag indicates if input is truncated at <chunk_size>
+ * boundaries.
+ * Arguments: separator (string), chunk_size (integer), truncate (0,1)
+ */
+static int sample_conv_be2dec(const struct arg *args, struct sample *smp, void *private)
+{
+ struct buffer *trash = get_trash_chunk();
+ const int last = args[2].data.sint ? smp->data.u.str.data - args[1].data.sint + 1 : smp->data.u.str.data;
+ int max_size = trash->size - 2;
+ int i;
+ int start;
+ int ptr = 0;
+ unsigned long long number;
+ char *pos;
+
+ trash->data = 0;
+
+ while (ptr < last && trash->data <= max_size) {
+ start = trash->data;
+ if (ptr) {
+ /* Add separator */
+ memcpy(trash->area + trash->data, args[0].data.str.area, args[0].data.str.data);
+ trash->data += args[0].data.str.data;
+ }
+ else
+ max_size -= args[0].data.str.data;
+
+ /* Add integer */
+ for (number = 0, i = 0; i < args[1].data.sint && ptr < smp->data.u.str.data; i++)
+ number = (number << 8) + (unsigned char)smp->data.u.str.area[ptr++];
+
+ pos = ulltoa(number, trash->area + trash->data, trash->size - trash->data);
+ if (pos)
+ trash->data = pos - trash->area;
+ else {
+ trash->data = start;
+ break;
+ }
+ }
+
+ smp->data.u.str = *trash;
+ smp->data.type = SMP_T_STR;
+ smp->flags &= ~SMP_F_CONST;
+ return 1;
+}
+
+static int sample_conv_be2hex_check(struct arg *args, struct sample_conv *conv,
+ const char *file, int line, char **err)
+{
+ if (args[1].data.sint <= 0 && (args[0].data.str.data > 0 || args[2].data.sint != 0)) {
+ memprintf(err, "chunk_size needs to be positive (%lld)", args[1].data.sint);
+ return 0;
+ }
+
+ if (args[2].data.sint != 0 && args[2].data.sint != 1) {
+ memprintf(err, "Unsupported truncate value (%lld)", args[2].data.sint);
+ return 0;
+ }
+
+ return 1;
+}
+
+/* Converts big-endian binary input sample to a hex string containing two hex
+ * digits per input byte. <separator> is put every <chunk_size> binary input
+ * bytes if specified. Optional <truncate> flag indicates if input is truncated
+ * at <chunk_size> boundaries.
+ * Arguments: separator (string), chunk_size (integer), truncate (0,1)
+ */
+static int sample_conv_be2hex(const struct arg *args, struct sample *smp, void *private)
+{
+ struct buffer *trash = get_trash_chunk();
+ int chunk_size = args[1].data.sint;
+ const int last = args[2].data.sint ? smp->data.u.str.data - chunk_size + 1 : smp->data.u.str.data;
+ int i;
+ int max_size;
+ int ptr = 0;
+ unsigned char c;
+
+ trash->data = 0;
+ if (args[0].data.str.data == 0 && args[2].data.sint == 0)
+ chunk_size = smp->data.u.str.data;
+ max_size = trash->size - 2 * chunk_size;
+
+ while (ptr < last && trash->data <= max_size) {
+ if (ptr) {
+ /* Add separator */
+ memcpy(trash->area + trash->data, args[0].data.str.area, args[0].data.str.data);
+ trash->data += args[0].data.str.data;
+ }
+ else
+ max_size -= args[0].data.str.data;
+
+ /* Add hex */
+ for (i = 0; i < chunk_size && ptr < smp->data.u.str.data; i++) {
+ c = smp->data.u.str.area[ptr++];
+ trash->area[trash->data++] = hextab[(c >> 4) & 0xF];
+ trash->area[trash->data++] = hextab[c & 0xF];
+ }
+ }
+
+ smp->data.u.str = *trash;
+ smp->data.type = SMP_T_STR;
+ smp->flags &= ~SMP_F_CONST;
+ return 1;
+}
+
+static int sample_conv_bin2hex(const struct arg *arg_p, struct sample *smp, void *private)
+{
+ struct buffer *trash = get_trash_chunk();
+ unsigned char c;
+ int ptr = 0;
+
+ trash->data = 0;
+ while (ptr < smp->data.u.str.data && trash->data <= trash->size - 2) {
+ c = smp->data.u.str.area[ptr++];
+ trash->area[trash->data++] = hextab[(c >> 4) & 0xF];
+ trash->area[trash->data++] = hextab[c & 0xF];
+ }
+ smp->data.u.str = *trash;
+ smp->data.type = SMP_T_STR;
+ smp->flags &= ~SMP_F_CONST;
+ return 1;
+}
+
+static int sample_conv_hex2int(const struct arg *arg_p, struct sample *smp, void *private)
+{
+ long long int n = 0;
+ int i, c;
+
+ for (i = 0; i < smp->data.u.str.data; i++) {
+ if ((c = hex2i(smp->data.u.str.area[i])) < 0)
+ return 0;
+ n = (n << 4) + c;
+ }
+
+ smp->data.u.sint = n;
+ smp->data.type = SMP_T_SINT;
+ smp->flags &= ~SMP_F_CONST;
+ return 1;
+}
+
+/* hashes the binary input into a 32-bit unsigned int */
+static int sample_conv_djb2(const struct arg *arg_p, struct sample *smp, void *private)
+{
+ smp->data.u.sint = hash_djb2(smp->data.u.str.area,
+ smp->data.u.str.data);
+ if (arg_p->data.sint)
+ smp->data.u.sint = full_hash(smp->data.u.sint);
+ smp->data.type = SMP_T_SINT;
+ return 1;
+}
+
+static int sample_conv_length(const struct arg *arg_p, struct sample *smp, void *private)
+{
+ int i = smp->data.u.str.data;
+ smp->data.u.sint = i;
+ smp->data.type = SMP_T_SINT;
+ return 1;
+}
+
+
+static int sample_conv_str2lower(const struct arg *arg_p, struct sample *smp, void *private)
+{
+ int i;
+
+ if (!smp_make_rw(smp))
+ return 0;
+
+ for (i = 0; i < smp->data.u.str.data; i++) {
+ if ((smp->data.u.str.area[i] >= 'A') && (smp->data.u.str.area[i] <= 'Z'))
+ smp->data.u.str.area[i] += 'a' - 'A';
+ }
+ return 1;
+}
+
+static int sample_conv_str2upper(const struct arg *arg_p, struct sample *smp, void *private)
+{
+ int i;
+
+ if (!smp_make_rw(smp))
+ return 0;
+
+ for (i = 0; i < smp->data.u.str.data; i++) {
+ if ((smp->data.u.str.area[i] >= 'a') && (smp->data.u.str.area[i] <= 'z'))
+ smp->data.u.str.area[i] += 'A' - 'a';
+ }
+ return 1;
+}
+
+/* takes the IPv4 mask in args[0] and an optional IPv6 mask in args[1] */
+static int sample_conv_ipmask(const struct arg *args, struct sample *smp, void *private)
+{
+ /* Attempt to convert to IPv4 to apply the correct mask. */
+ c_ipv62ip(smp);
+
+ if (smp->data.type == SMP_T_IPV4) {
+ smp->data.u.ipv4.s_addr &= args[0].data.ipv4.s_addr;
+ smp->data.type = SMP_T_IPV4;
+ }
+ else if (smp->data.type == SMP_T_IPV6) {
+ /* IPv6 cannot be converted without an IPv6 mask. */
+ if (args[1].type != ARGT_IPV6)
+ return 0;
+
+ write_u64(&smp->data.u.ipv6.s6_addr[0],
+ read_u64(&smp->data.u.ipv6.s6_addr[0]) & read_u64(&args[1].data.ipv6.s6_addr[0]));
+ write_u64(&smp->data.u.ipv6.s6_addr[8],
+ read_u64(&smp->data.u.ipv6.s6_addr[8]) & read_u64(&args[1].data.ipv6.s6_addr[8]));
+ smp->data.type = SMP_T_IPV6;
+ }
+
+ return 1;
+}
+
+/*
+ * This function implement a conversion specifier seeker for %N so it could be
+ * replaced before doing strftime.
+ *
+ * <format> is the input format string which is used as a haystack
+ *
+ * The function fills multiple variables:
+ * <skip> is the len of the conversion specifier string which was found (ex: strlen(%N):2, strlen(%3N):3 strlen(%123N): 5)
+ * <width> is the width argument, default width is 9 (ex: %3N: 3, %4N: 4: %N: 9, %5N: 5)
+ *
+ * Returns a ptr to the first character of the conversion specifier or NULL if not found
+ */
+static const char *lookup_convspec_N(const char *format, int *skip, int *width)
+{
+ const char *p, *needle;
+ const char *digits;
+ int state;
+
+ p = format;
+
+ /* this looks for % in loop. The iteration stops when a %N conversion
+ * specifier was found or there is no '%' anymore */
+lookagain:
+ while (p && *p) {
+ state = 0;
+ digits = NULL;
+
+ p = needle = strchr(p, '%');
+ /* Once we find a % we try to move forward in the string
+ *
+ * state 0: found %
+ * state 1: digits (precision)
+ * state 2: N
+ */
+ while (p && *p) {
+ switch (state) {
+ case 0:
+ state = 1;
+ break;
+
+ case 1:
+ if (isdigit((unsigned char)*p) && !digits) /* set the start of the digits */
+ digits = p;
+
+ if (isdigit((unsigned char)*p))
+ break;
+ else
+ state = 2;
+ /* if this is not a number anymore, we
+ * don't want to increment p but try the
+ * next state directly */
+ __fallthrough;
+ case 2:
+ if (*p == 'N')
+ goto found;
+ else
+ /* this was not a %N, start again */
+ goto lookagain;
+ break;
+ }
+ p++;
+ }
+ }
+
+ *skip = 0;
+ *width = 0;
+ return NULL;
+
+found:
+ *skip = p - needle + 1;
+ if (digits)
+ *width = atoi(digits);
+ else
+ *width = 9;
+ return needle;
+}
+
+ /*
+ * strftime(3) does not implement nanoseconds, but we still want them in our
+ * date format.
+ *
+ * This function implements %N like in date(1) which gives you the nanoseconds part of the timestamp
+ * An optional field width can be specified, a maximum width of 9 is supported (ex: %3N %6N %9N)
+ *
+ * <format> is the format string
+ * <curr_date> in seconds since epoch
+ * <ns> only the nanoseconds part of the timestamp
+ * <local> chose the localtime instead of UTC time
+ *
+ * Return the results of strftime in the trash buffer
+ */
+static struct buffer *conv_time_common(const char *format, time_t curr_date, uint64_t ns, int local)
+{
+ struct buffer *tmp_format = NULL;
+ struct buffer *res = NULL;
+ struct tm tm;
+ const char *p;
+ char ns_str[10] = {};
+ int set = 0;
+
+ if (local)
+ get_localtime(curr_date, &tm);
+ else
+ get_gmtime(curr_date, &tm);
+
+
+ /* we need to iterate in order to replace all the %N in the string */
+
+ p = format;
+ while (*p) {
+ const char *needle;
+ int skip = 0;
+ int cpy = 0;
+ int width = 0;
+
+ /* look for the next %N onversion specifier */
+ if (!(needle = lookup_convspec_N(p, &skip, &width)))
+ break;
+
+ if (width > 9) /* we don't handle more that 9 */
+ width = 9;
+ cpy = needle - p;
+
+ if (!tmp_format)
+ tmp_format = alloc_trash_chunk();
+ if (!tmp_format)
+ goto error;
+
+ if (set != 9) /* if the snprintf wasn't done yet */
+ set = snprintf(ns_str, sizeof(ns_str), "%.9llu", (unsigned long long)ns);
+
+ if (chunk_istcat(tmp_format, ist2(p, cpy)) == 0) /* copy before the %N */
+ goto error;
+ if (chunk_istcat(tmp_format, ist2(ns_str, width)) == 0) /* copy the %N result with the right precision */
+ goto error;
+
+ p += skip + cpy; /* skip the %N */
+ }
+
+
+ if (tmp_format) { /* %N was found */
+ if (chunk_strcat(tmp_format, p) == 0) /* copy the end of the string if needed or just the \0 */
+ goto error;
+ res = get_trash_chunk();
+ res->data = strftime(res->area, res->size, tmp_format->area , &tm);
+ } else {
+ res = get_trash_chunk();
+ res->data = strftime(res->area, res->size, format, &tm);
+ }
+
+error:
+ free_trash_chunk(tmp_format);
+ return res;
+}
+
+
+
+/*
+ * same as sample_conv_ltime but input is us and %N is supported
+ */
+static int sample_conv_us_ltime(const struct arg *args, struct sample *smp, void *private)
+{
+ struct buffer *temp;
+ time_t curr_date = smp->data.u.sint / 1000000; /* convert us to s */
+ uint64_t ns = (smp->data.u.sint % 1000000) * 1000; /* us part to ns */
+
+ /* add offset */
+ if (args[1].type == ARGT_SINT)
+ curr_date += args[1].data.sint;
+
+ temp = conv_time_common(args[0].data.str.area, curr_date, ns, 1);
+ smp->data.u.str = *temp;
+ smp->data.type = SMP_T_STR;
+ return 1;
+}
+
+/*
+ * same as sample_conv_ltime but input is ms and %N is supported
+ */
+static int sample_conv_ms_ltime(const struct arg *args, struct sample *smp, void *private)
+{
+ struct buffer *temp;
+ time_t curr_date = smp->data.u.sint / 1000; /* convert ms to s */
+ uint64_t ns = (smp->data.u.sint % 1000) * 1000000; /* ms part to ns */
+
+ /* add offset */
+ if (args[1].type == ARGT_SINT)
+ curr_date += args[1].data.sint;
+
+ temp = conv_time_common(args[0].data.str.area, curr_date, ns, 1);
+ smp->data.u.str = *temp;
+ smp->data.type = SMP_T_STR;
+ return 1;
+}
+
+
+/* takes an UINT value on input supposed to represent the time since EPOCH,
+ * adds an optional offset found in args[1] and emits a string representing
+ * the local time in the format specified in args[1] using strftime().
+ */
+static int sample_conv_ltime(const struct arg *args, struct sample *smp, void *private)
+{
+ struct buffer *temp;
+ /* With high numbers, the date returned can be negative, the 55 bits mask prevent this. */
+ time_t curr_date = smp->data.u.sint & 0x007fffffffffffffLL;
+ struct tm tm;
+
+ /* add offset */
+ if (args[1].type == ARGT_SINT)
+ curr_date += args[1].data.sint;
+
+ get_localtime(curr_date, &tm);
+
+ temp = get_trash_chunk();
+ temp->data = strftime(temp->area, temp->size, args[0].data.str.area, &tm);
+ smp->data.u.str = *temp;
+ smp->data.type = SMP_T_STR;
+ return 1;
+}
+
+/* hashes the binary input into a 32-bit unsigned int */
+static int sample_conv_sdbm(const struct arg *arg_p, struct sample *smp, void *private)
+{
+ smp->data.u.sint = hash_sdbm(smp->data.u.str.area,
+ smp->data.u.str.data);
+ if (arg_p->data.sint)
+ smp->data.u.sint = full_hash(smp->data.u.sint);
+ smp->data.type = SMP_T_SINT;
+ return 1;
+}
+
+/*
+ * same as sample_conv_utime but input is us and %N is supported
+ */
+static int sample_conv_us_utime(const struct arg *args, struct sample *smp, void *private)
+{
+ struct buffer *temp;
+ time_t curr_date = smp->data.u.sint / 1000000; /* convert us to s */
+ uint64_t ns = (smp->data.u.sint % 1000000) * 1000; /* us part to ns */
+
+ /* add offset */
+ if (args[1].type == ARGT_SINT)
+ curr_date += args[1].data.sint;
+
+ temp = conv_time_common(args[0].data.str.area, curr_date, ns, 0);
+ smp->data.u.str = *temp;
+ smp->data.type = SMP_T_STR;
+ return 1;
+}
+
+/*
+ * same as sample_conv_utime but input is ms and %N is supported
+ */
+static int sample_conv_ms_utime(const struct arg *args, struct sample *smp, void *private)
+{
+ struct buffer *temp;
+ time_t curr_date = smp->data.u.sint / 1000; /* convert ms to s */
+ uint64_t ns = (smp->data.u.sint % 1000) * 1000000; /* ms part to ns */
+
+ /* add offset */
+ if (args[1].type == ARGT_SINT)
+ curr_date += args[1].data.sint;
+
+ temp = conv_time_common(args[0].data.str.area, curr_date, ns, 0);
+ smp->data.u.str = *temp;
+ smp->data.type = SMP_T_STR;
+ return 1;
+}
+
+/* takes an UINT value on input supposed to represent the time since EPOCH,
+ * adds an optional offset found in args[1] and emits a string representing
+ * the UTC date in the format specified in args[1] using strftime().
+ */
+static int sample_conv_utime(const struct arg *args, struct sample *smp, void *private)
+{
+ struct buffer *temp;
+ /* With high numbers, the date returned can be negative, the 55 bits mask prevent this. */
+ time_t curr_date = smp->data.u.sint & 0x007fffffffffffffLL;
+ struct tm tm;
+
+ /* add offset */
+ if (args[1].type == ARGT_SINT)
+ curr_date += args[1].data.sint;
+
+ get_gmtime(curr_date, &tm);
+
+ temp = get_trash_chunk();
+ temp->data = strftime(temp->area, temp->size, args[0].data.str.area, &tm);
+ smp->data.u.str = *temp;
+ smp->data.type = SMP_T_STR;
+ return 1;
+}
+
+/* hashes the binary input into a 32-bit unsigned int */
+static int sample_conv_wt6(const struct arg *arg_p, struct sample *smp, void *private)
+{
+ smp->data.u.sint = hash_wt6(smp->data.u.str.area,
+ smp->data.u.str.data);
+ if (arg_p->data.sint)
+ smp->data.u.sint = full_hash(smp->data.u.sint);
+ smp->data.type = SMP_T_SINT;
+ return 1;
+}
+
+/* hashes the binary input into a 32-bit unsigned int using xxh.
+ * The seed of the hash defaults to 0 but can be changd in argument 1.
+ */
+static int sample_conv_xxh32(const struct arg *arg_p, struct sample *smp, void *private)
+{
+ unsigned int seed;
+
+ if (arg_p->data.sint)
+ seed = arg_p->data.sint;
+ else
+ seed = 0;
+ smp->data.u.sint = XXH32(smp->data.u.str.area, smp->data.u.str.data,
+ seed);
+ smp->data.type = SMP_T_SINT;
+ return 1;
+}
+
+/* hashes the binary input into a 64-bit unsigned int using xxh.
+ * In fact, the function returns a 64 bit unsigned, but the sample
+ * storage of haproxy only proposes 64-bits signed, so the value is
+ * cast as signed. This cast doesn't impact the hash repartition.
+ * The seed of the hash defaults to 0 but can be changd in argument 1.
+ */
+static int sample_conv_xxh64(const struct arg *arg_p, struct sample *smp, void *private)
+{
+ unsigned long long int seed;
+
+ if (arg_p->data.sint)
+ seed = (unsigned long long int)arg_p->data.sint;
+ else
+ seed = 0;
+ smp->data.u.sint = (long long int)XXH64(smp->data.u.str.area,
+ smp->data.u.str.data, seed);
+ smp->data.type = SMP_T_SINT;
+ return 1;
+}
+
+static int sample_conv_xxh3(const struct arg *arg_p, struct sample *smp, void *private)
+{
+ unsigned long long int seed;
+
+ if (arg_p->data.sint)
+ seed = (unsigned long long int)arg_p->data.sint;
+ else
+ seed = 0;
+ smp->data.u.sint = (long long int)XXH3(smp->data.u.str.area,
+ smp->data.u.str.data, seed);
+ smp->data.type = SMP_T_SINT;
+ return 1;
+}
+
+/* hashes the binary input into a 32-bit unsigned int */
+static int sample_conv_crc32(const struct arg *arg_p, struct sample *smp, void *private)
+{
+ smp->data.u.sint = hash_crc32(smp->data.u.str.area,
+ smp->data.u.str.data);
+ if (arg_p->data.sint)
+ smp->data.u.sint = full_hash(smp->data.u.sint);
+ smp->data.type = SMP_T_SINT;
+ return 1;
+}
+
+/* hashes the binary input into crc32c (RFC4960, Appendix B [8].) */
+static int sample_conv_crc32c(const struct arg *arg_p, struct sample *smp, void *private)
+{
+ smp->data.u.sint = hash_crc32c(smp->data.u.str.area,
+ smp->data.u.str.data);
+ if (arg_p->data.sint)
+ smp->data.u.sint = full_hash(smp->data.u.sint);
+ smp->data.type = SMP_T_SINT;
+ return 1;
+}
+
+/* This function escape special json characters. The returned string can be
+ * safely set between two '"' and used as json string. The json string is
+ * defined like this:
+ *
+ * any Unicode character except '"' or '\' or control character
+ * \", \\, \/, \b, \f, \n, \r, \t, \u + four-hex-digits
+ *
+ * The enum input_type contain all the allowed mode for decoding the input
+ * string.
+ */
+enum input_type {
+ IT_ASCII = 0,
+ IT_UTF8,
+ IT_UTF8S,
+ IT_UTF8P,
+ IT_UTF8PS,
+};
+
+static int sample_conv_json_check(struct arg *arg, struct sample_conv *conv,
+ const char *file, int line, char **err)
+{
+ enum input_type type;
+
+ if (strcmp(arg->data.str.area, "") == 0)
+ type = IT_ASCII;
+ else if (strcmp(arg->data.str.area, "ascii") == 0)
+ type = IT_ASCII;
+ else if (strcmp(arg->data.str.area, "utf8") == 0)
+ type = IT_UTF8;
+ else if (strcmp(arg->data.str.area, "utf8s") == 0)
+ type = IT_UTF8S;
+ else if (strcmp(arg->data.str.area, "utf8p") == 0)
+ type = IT_UTF8P;
+ else if (strcmp(arg->data.str.area, "utf8ps") == 0)
+ type = IT_UTF8PS;
+ else {
+ memprintf(err, "Unexpected input code type. "
+ "Allowed value are 'ascii', 'utf8', 'utf8s', 'utf8p' and 'utf8ps'");
+ return 0;
+ }
+
+ chunk_destroy(&arg->data.str);
+ arg->type = ARGT_SINT;
+ arg->data.sint = type;
+ return 1;
+}
+
+static int sample_conv_json(const struct arg *arg_p, struct sample *smp, void *private)
+{
+ struct buffer *temp;
+ char _str[7]; /* \u + 4 hex digit + null char for sprintf. */
+ const char *str;
+ int len;
+ enum input_type input_type = IT_ASCII;
+ unsigned int c;
+ unsigned int ret;
+ char *p;
+
+ input_type = arg_p->data.sint;
+
+ temp = get_trash_chunk();
+ temp->data = 0;
+
+ p = smp->data.u.str.area;
+ while (p < smp->data.u.str.area + smp->data.u.str.data) {
+
+ if (input_type == IT_ASCII) {
+ /* Read input as ASCII. */
+ c = *(unsigned char *)p;
+ p++;
+ }
+ else {
+ /* Read input as UTF8. */
+ ret = utf8_next(p,
+ smp->data.u.str.data - ( p - smp->data.u.str.area),
+ &c);
+ p += utf8_return_length(ret);
+
+ if (input_type == IT_UTF8 && utf8_return_code(ret) != UTF8_CODE_OK)
+ return 0;
+ if (input_type == IT_UTF8S && utf8_return_code(ret) != UTF8_CODE_OK)
+ continue;
+ if (input_type == IT_UTF8P && utf8_return_code(ret) & (UTF8_CODE_INVRANGE|UTF8_CODE_BADSEQ))
+ return 0;
+ if (input_type == IT_UTF8PS && utf8_return_code(ret) & (UTF8_CODE_INVRANGE|UTF8_CODE_BADSEQ))
+ continue;
+
+ /* Check too big values. */
+ if ((unsigned int)c > 0xffff) {
+ if (input_type == IT_UTF8 || input_type == IT_UTF8P)
+ return 0;
+ continue;
+ }
+ }
+
+ /* Convert character. */
+ if (c == '"') {
+ len = 2;
+ str = "\\\"";
+ }
+ else if (c == '\\') {
+ len = 2;
+ str = "\\\\";
+ }
+ else if (c == '/') {
+ len = 2;
+ str = "\\/";
+ }
+ else if (c == '\b') {
+ len = 2;
+ str = "\\b";
+ }
+ else if (c == '\f') {
+ len = 2;
+ str = "\\f";
+ }
+ else if (c == '\r') {
+ len = 2;
+ str = "\\r";
+ }
+ else if (c == '\n') {
+ len = 2;
+ str = "\\n";
+ }
+ else if (c == '\t') {
+ len = 2;
+ str = "\\t";
+ }
+ else if (c > 0xff || !isprint((unsigned char)c)) {
+ /* isprint generate a segfault if c is too big. The man says that
+ * c must have the value of an unsigned char or EOF.
+ */
+ len = 6;
+ _str[0] = '\\';
+ _str[1] = 'u';
+ snprintf(&_str[2], 5, "%04x", (unsigned short)c);
+ str = _str;
+ }
+ else {
+ len = 1;
+ _str[0] = c;
+ str = _str;
+ }
+
+ /* Check length */
+ if (temp->data + len > temp->size)
+ return 0;
+
+ /* Copy string. */
+ memcpy(temp->area + temp->data, str, len);
+ temp->data += len;
+ }
+
+ smp->flags &= ~SMP_F_CONST;
+ smp->data.u.str = *temp;
+ smp->data.type = SMP_T_STR;
+
+ return 1;
+}
+
+/* This sample function is designed to extract some bytes from an input buffer.
+ * First arg is the offset.
+ * Optional second arg is the length to truncate */
+static int sample_conv_bytes(const struct arg *arg_p, struct sample *smp, void *private)
+{
+ struct sample smp_arg0, smp_arg1;
+ long long start_idx, length;
+
+ // determine the start_idx and length of the output
+ smp_set_owner(&smp_arg0, smp->px, smp->sess, smp->strm, smp->opt);
+ if (!sample_conv_var2smp_sint(&arg_p[0], &smp_arg0) || smp_arg0.data.u.sint < 0) {
+ /* invalid or negative value */
+ goto fail;
+ }
+
+ if (smp_arg0.data.u.sint >= smp->data.u.str.data) {
+ // arg0 >= the input length
+ if (smp->opt & SMP_OPT_FINAL) {
+ // empty output value on final smp
+ smp->data.u.str.data = 0;
+ goto end;
+ }
+ goto wait;
+ }
+ start_idx = smp_arg0.data.u.sint;
+
+ // length comes from arg1 if present, otherwise it's the remaining length
+ length = smp->data.u.str.data - start_idx;
+ if (arg_p[1].type != ARGT_STOP) {
+ smp_set_owner(&smp_arg1, smp->px, smp->sess, smp->strm, smp->opt);
+ if (!sample_conv_var2smp_sint(&arg_p[1], &smp_arg1) || smp_arg1.data.u.sint < 0) {
+ // invalid or negative value
+ goto fail;
+ }
+
+ if (smp_arg1.data.u.sint > (smp->data.u.str.data - start_idx)) {
+ // arg1 value is greater than the remaining length
+ if (smp->opt & SMP_OPT_FINAL) {
+ // truncate to remaining length
+ length = smp->data.u.str.data - start_idx;
+ goto end;
+ }
+ goto wait;
+ }
+ length = smp_arg1.data.u.sint;
+ }
+
+ // update the output using the start_idx and length
+ smp->data.u.str.area += start_idx;
+ smp->data.u.str.data = length;
+
+ end:
+ return 1;
+
+ fail:
+ smp->flags &= ~SMP_F_MAY_CHANGE;
+ wait:
+ smp->data.u.str.data = 0;
+ return 0;
+}
+
+static int sample_conv_field_check(struct arg *args, struct sample_conv *conv,
+ const char *file, int line, char **err)
+{
+ struct arg *arg = args;
+
+ if (arg->type != ARGT_SINT) {
+ memprintf(err, "Unexpected arg type");
+ return 0;
+ }
+
+ if (!arg->data.sint) {
+ memprintf(err, "Unexpected value 0 for index");
+ return 0;
+ }
+
+ arg++;
+
+ if (arg->type != ARGT_STR) {
+ memprintf(err, "Unexpected arg type");
+ return 0;
+ }
+
+ if (!arg->data.str.data) {
+ memprintf(err, "Empty separators list");
+ return 0;
+ }
+
+ return 1;
+}
+
+/* This sample function is designed to a return selected part of a string (field).
+ * First arg is the index of the field (start at 1)
+ * Second arg is a char list of separators (type string)
+ */
+static int sample_conv_field(const struct arg *arg_p, struct sample *smp, void *private)
+{
+ int field;
+ char *start, *end;
+ int i;
+ int count = (arg_p[2].type == ARGT_SINT) ? arg_p[2].data.sint : 1;
+
+ if (!arg_p[0].data.sint)
+ return 0;
+
+ if (arg_p[0].data.sint < 0) {
+ field = -1;
+ end = start = smp->data.u.str.area + smp->data.u.str.data;
+ while (start > smp->data.u.str.area) {
+ for (i = 0 ; i < arg_p[1].data.str.data; i++) {
+ if (*(start-1) == arg_p[1].data.str.area[i]) {
+ if (field == arg_p[0].data.sint) {
+ if (count == 1)
+ goto found;
+ else if (count > 1)
+ count--;
+ } else {
+ end = start-1;
+ field--;
+ }
+ break;
+ }
+ }
+ start--;
+ }
+ } else {
+ field = 1;
+ end = start = smp->data.u.str.area;
+ while (end - smp->data.u.str.area < smp->data.u.str.data) {
+ for (i = 0 ; i < arg_p[1].data.str.data; i++) {
+ if (*end == arg_p[1].data.str.area[i]) {
+ if (field == arg_p[0].data.sint) {
+ if (count == 1)
+ goto found;
+ else if (count > 1)
+ count--;
+ } else {
+ start = end+1;
+ field++;
+ }
+ break;
+ }
+ }
+ end++;
+ }
+ }
+
+ /* Field not found */
+ if (field != arg_p[0].data.sint) {
+ smp->data.u.str.data = 0;
+ return 0;
+ }
+found:
+ smp->data.u.str.data = end - start;
+ /* If ret string is len 0, no need to
+ change pointers or to update size */
+ if (!smp->data.u.str.data)
+ return 1;
+
+ /* Compute remaining size if needed
+ Note: smp->data.u.str.size cannot be set to 0 */
+ if (smp->data.u.str.size)
+ smp->data.u.str.size -= start - smp->data.u.str.area;
+
+ smp->data.u.str.area = start;
+
+ return 1;
+}
+
+/* This sample function is designed to return a word from a string.
+ * First arg is the index of the word (start at 1)
+ * Second arg is a char list of words separators (type string)
+ */
+static int sample_conv_word(const struct arg *arg_p, struct sample *smp, void *private)
+{
+ int word;
+ char *start, *end;
+ int i, issep, inword;
+ int count = (arg_p[2].type == ARGT_SINT) ? arg_p[2].data.sint : 1;
+
+ if (!arg_p[0].data.sint)
+ return 0;
+
+ word = 0;
+ inword = 0;
+ if (arg_p[0].data.sint < 0) {
+ end = start = smp->data.u.str.area + smp->data.u.str.data;
+ while (start > smp->data.u.str.area) {
+ issep = 0;
+ for (i = 0 ; i < arg_p[1].data.str.data; i++) {
+ if (*(start-1) == arg_p[1].data.str.area[i]) {
+ issep = 1;
+ break;
+ }
+ }
+ if (!inword) {
+ if (!issep) {
+ if (word != arg_p[0].data.sint) {
+ word--;
+ end = start;
+ }
+ inword = 1;
+ }
+ }
+ else if (issep) {
+ if (word == arg_p[0].data.sint) {
+ if (count == 1)
+ goto found;
+ else if (count > 1)
+ count--;
+ }
+ inword = 0;
+ }
+ start--;
+ }
+ } else {
+ end = start = smp->data.u.str.area;
+ while (end - smp->data.u.str.area < smp->data.u.str.data) {
+ issep = 0;
+ for (i = 0 ; i < arg_p[1].data.str.data; i++) {
+ if (*end == arg_p[1].data.str.area[i]) {
+ issep = 1;
+ break;
+ }
+ }
+ if (!inword) {
+ if (!issep) {
+ if (word != arg_p[0].data.sint) {
+ word++;
+ start = end;
+ }
+ inword = 1;
+ }
+ }
+ else if (issep) {
+ if (word == arg_p[0].data.sint) {
+ if (count == 1)
+ goto found;
+ else if (count > 1)
+ count--;
+ }
+ inword = 0;
+ }
+ end++;
+ }
+ }
+
+ /* Field not found */
+ if (word != arg_p[0].data.sint) {
+ smp->data.u.str.data = 0;
+ return 0;
+ }
+found:
+ smp->data.u.str.data = end - start;
+ /* If ret string is len 0, no need to
+ change pointers or to update size */
+ if (!smp->data.u.str.data)
+ return 1;
+
+
+ /* Compute remaining size if needed
+ Note: smp->data.u.str.size cannot be set to 0 */
+ if (smp->data.u.str.size)
+ smp->data.u.str.size -= start - smp->data.u.str.area;
+
+ smp->data.u.str.area = start;
+
+ return 1;
+}
+
+static int sample_conv_param_check(struct arg *arg, struct sample_conv *conv,
+ const char *file, int line, char **err)
+{
+ if (arg[1].type == ARGT_STR && arg[1].data.str.data != 1) {
+ memprintf(err, "Delimiter must be exactly 1 character.");
+ return 0;
+ }
+
+ return 1;
+}
+
+static int sample_conv_param(const struct arg *arg_p, struct sample *smp, void *private)
+{
+ char *pos, *end, *pend, *equal;
+ char delim = '&';
+ const char *name = arg_p[0].data.str.area;
+ size_t name_l = arg_p[0].data.str.data;
+
+ if (arg_p[1].type == ARGT_STR)
+ delim = *arg_p[1].data.str.area;
+
+ pos = smp->data.u.str.area;
+ end = pos + smp->data.u.str.data;
+ while (pos < end) {
+ equal = pos + name_l;
+ /* Parameter not found */
+ if (equal > end)
+ break;
+
+ if (equal == end || *equal == delim) {
+ if (memcmp(pos, name, name_l) == 0) {
+ /* input contains parameter, but no value is supplied */
+ smp->data.u.str.data = 0;
+ return 1;
+ }
+ pos = equal + 1;
+ continue;
+ }
+
+ if (*equal == '=' && memcmp(pos, name, name_l) == 0) {
+ pos = equal + 1;
+ pend = memchr(pos, delim, end - pos);
+ if (pend == NULL)
+ pend = end;
+
+ if (smp->data.u.str.size)
+ smp->data.u.str.size -= pos - smp->data.u.str.area;
+ smp->data.u.str.area = pos;
+ smp->data.u.str.data = pend - pos;
+ return 1;
+ }
+ /* find the next delimiter and set position to character after that */
+ pos = memchr(pos, delim, end - pos);
+ if (pos == NULL)
+ pos = end;
+ else
+ pos++;
+ }
+ /* Parameter not found */
+ smp->data.u.str.data = 0;
+ return 0;
+}
+
+static int sample_conv_regsub_check(struct arg *args, struct sample_conv *conv,
+ const char *file, int line, char **err)
+{
+ struct arg *arg = args;
+ char *p;
+ int len;
+
+ /* arg0 is a regex, it uses type_flag for ICASE and global match */
+ arg[0].type_flags = 0;
+
+ if (arg[2].type != ARGT_STR)
+ return 1;
+
+ p = arg[2].data.str.area;
+ len = arg[2].data.str.data;
+ while (len) {
+ if (*p == 'i') {
+ arg[0].type_flags |= ARGF_REG_ICASE;
+ }
+ else if (*p == 'g') {
+ arg[0].type_flags |= ARGF_REG_GLOB;
+ }
+ else {
+ memprintf(err, "invalid regex flag '%c', only 'i' and 'g' are supported", *p);
+ return 0;
+ }
+ p++;
+ len--;
+ }
+ return 1;
+}
+
+/* This sample function is designed to do the equivalent of s/match/replace/ on
+ * the input string. It applies a regex and restarts from the last matched
+ * location until nothing matches anymore. First arg is the regex to apply to
+ * the input string, second arg is the replacement expression.
+ */
+static int sample_conv_regsub(const struct arg *arg_p, struct sample *smp, void *private)
+{
+ char *start, *end;
+ struct my_regex *reg = arg_p[0].data.reg;
+ regmatch_t pmatch[MAX_MATCH];
+ struct buffer *trash = get_trash_chunk();
+ struct buffer *output;
+ int flag, max;
+ int found;
+
+ start = smp->data.u.str.area;
+ end = start + smp->data.u.str.data;
+
+ flag = 0;
+ while (1) {
+ /* check for last round which is used to copy remaining parts
+ * when not running in global replacement mode.
+ */
+ found = 0;
+ if ((arg_p[0].type_flags & ARGF_REG_GLOB) || !(flag & REG_NOTBOL)) {
+ /* Note: we can have start == end on empty strings or at the end */
+ found = regex_exec_match2(reg, start, end - start, MAX_MATCH, pmatch, flag);
+ }
+
+ if (!found)
+ pmatch[0].rm_so = end - start;
+
+ /* copy the heading non-matching part (which may also be the tail if nothing matches) */
+ max = trash->size - trash->data;
+ if (max && pmatch[0].rm_so > 0) {
+ if (max > pmatch[0].rm_so)
+ max = pmatch[0].rm_so;
+ memcpy(trash->area + trash->data, start, max);
+ trash->data += max;
+ }
+
+ if (!found)
+ break;
+
+ output = alloc_trash_chunk();
+ if (!output)
+ break;
+
+ output->data = exp_replace(output->area, output->size, start, arg_p[1].data.str.area, pmatch);
+
+ /* replace the matching part */
+ max = output->size - output->data;
+ if (max) {
+ if (max > output->data)
+ max = output->data;
+ memcpy(trash->area + trash->data,
+ output->area, max);
+ trash->data += max;
+ }
+
+ free_trash_chunk(output);
+
+ /* stop here if we're done with this string */
+ if (start >= end)
+ break;
+
+ /* We have a special case for matches of length 0 (eg: "x*y*").
+ * These ones are considered to match in front of a character,
+ * so we have to copy that character and skip to the next one.
+ */
+ if (!pmatch[0].rm_eo) {
+ if (trash->data < trash->size)
+ trash->area[trash->data++] = start[pmatch[0].rm_eo];
+ pmatch[0].rm_eo++;
+ }
+
+ start += pmatch[0].rm_eo;
+ flag |= REG_NOTBOL;
+ }
+
+ smp->data.u.str = *trash;
+ return 1;
+}
+
+/* This function check an operator entry. It expects a string.
+ * The string can be an integer or a variable name.
+ */
+static int check_operator(struct arg *args, struct sample_conv *conv,
+ const char *file, int line, char **err)
+{
+ const char *str;
+ const char *end;
+ long long int i;
+
+ /* Try to decode a variable. The 'err' variable is intentionnaly left
+ * NULL since the operators accept an integer as argument in which case
+ * vars_check_arg call will fail.
+ */
+ if (vars_check_arg(&args[0], NULL))
+ return 1;
+
+ /* Try to convert an integer */
+ str = args[0].data.str.area;
+ end = str + strlen(str);
+ i = read_int64(&str, end);
+ if (*str != '\0') {
+ memprintf(err, "expects an integer or a variable name");
+ return 0;
+ }
+
+ chunk_destroy(&args[0].data.str);
+ args[0].type = ARGT_SINT;
+ args[0].data.sint = i;
+ return 1;
+}
+
+/* This function returns a sample struct filled with an arg content.
+ * If the arg contain an integer, the integer is returned in the
+ * sample. If the arg contains a variable descriptor, it returns the
+ * variable value.
+ *
+ * This function returns 0 if an error occurs, otherwise it returns 1.
+ */
+int sample_conv_var2smp_sint(const struct arg *arg, struct sample *smp)
+{
+ switch (arg->type) {
+ case ARGT_SINT:
+ smp->data.type = SMP_T_SINT;
+ smp->data.u.sint = arg->data.sint;
+ return 1;
+ case ARGT_VAR:
+ return sample_conv_var2smp(&arg->data.var, smp, SMP_T_SINT);
+ default:
+ return 0;
+ }
+}
+
+/* Takes a SINT on input, applies a binary twos complement and returns the SINT
+ * result.
+ */
+static int sample_conv_binary_cpl(const struct arg *arg_p, struct sample *smp, void *private)
+{
+ smp->data.u.sint = ~smp->data.u.sint;
+ return 1;
+}
+
+/* Takes a SINT on input, applies a binary "and" with the SINT directly in
+ * arg_p or in the variable described in arg_p, and returns the SINT result.
+ */
+static int sample_conv_binary_and(const struct arg *arg_p, struct sample *smp, void *private)
+{
+ struct sample tmp;
+
+ smp_set_owner(&tmp, smp->px, smp->sess, smp->strm, smp->opt);
+ if (!sample_conv_var2smp_sint(arg_p, &tmp))
+ return 0;
+ smp->data.u.sint &= tmp.data.u.sint;
+ return 1;
+}
+
+/* Takes a SINT on input, applies a binary "or" with the SINT directly in
+ * arg_p or in the variable described in arg_p, and returns the SINT result.
+ */
+static int sample_conv_binary_or(const struct arg *arg_p, struct sample *smp, void *private)
+{
+ struct sample tmp;
+
+ smp_set_owner(&tmp, smp->px, smp->sess, smp->strm, smp->opt);
+ if (!sample_conv_var2smp_sint(arg_p, &tmp))
+ return 0;
+ smp->data.u.sint |= tmp.data.u.sint;
+ return 1;
+}
+
+/* Takes a SINT on input, applies a binary "xor" with the SINT directly in
+ * arg_p or in the variable described in arg_p, and returns the SINT result.
+ */
+static int sample_conv_binary_xor(const struct arg *arg_p, struct sample *smp, void *private)
+{
+ struct sample tmp;
+
+ smp_set_owner(&tmp, smp->px, smp->sess, smp->strm, smp->opt);
+ if (!sample_conv_var2smp_sint(arg_p, &tmp))
+ return 0;
+ smp->data.u.sint ^= tmp.data.u.sint;
+ return 1;
+}
+
+static inline long long int arith_add(long long int a, long long int b)
+{
+ /* Prevent overflow and makes capped calculus.
+ * We must ensure that the check calculus doesn't
+ * exceed the signed 64 bits limits.
+ *
+ * +----------+----------+
+ * | a<0 | a>=0 |
+ * +------+----------+----------+
+ * | b<0 | MIN-a>b | no check |
+ * +------+----------+----------+
+ * | b>=0 | no check | MAX-a<b |
+ * +------+----------+----------+
+ */
+ if ((a ^ b) >= 0) {
+ /* signs are same. */
+ if (a < 0) {
+ if (LLONG_MIN - a > b)
+ return LLONG_MIN;
+ }
+ else if (LLONG_MAX - a < b)
+ return LLONG_MAX;
+ }
+ return a + b;
+}
+
+/* Takes a SINT on input, applies an arithmetic "add" with the SINT directly in
+ * arg_p or in the variable described in arg_p, and returns the SINT result.
+ */
+static int sample_conv_arith_add(const struct arg *arg_p, struct sample *smp, void *private)
+{
+ struct sample tmp;
+
+ smp_set_owner(&tmp, smp->px, smp->sess, smp->strm, smp->opt);
+ if (!sample_conv_var2smp_sint(arg_p, &tmp))
+ return 0;
+ smp->data.u.sint = arith_add(smp->data.u.sint, tmp.data.u.sint);
+ return 1;
+}
+
+/* Takes a SINT on input, applies an arithmetic "sub" with the SINT directly in
+ * arg_p or in the variable described in arg_p, and returns the SINT result.
+ */
+static int sample_conv_arith_sub(const struct arg *arg_p,
+ struct sample *smp, void *private)
+{
+ struct sample tmp;
+
+ smp_set_owner(&tmp, smp->px, smp->sess, smp->strm, smp->opt);
+ if (!sample_conv_var2smp_sint(arg_p, &tmp))
+ return 0;
+
+ /* We cannot represent -LLONG_MIN because abs(LLONG_MIN) is greater
+ * than abs(LLONG_MAX). So, the following code use LLONG_MAX in place
+ * of -LLONG_MIN and correct the result.
+ */
+ if (tmp.data.u.sint == LLONG_MIN) {
+ smp->data.u.sint = arith_add(smp->data.u.sint, LLONG_MAX);
+ if (smp->data.u.sint < LLONG_MAX)
+ smp->data.u.sint++;
+ return 1;
+ }
+
+ /* standard subtraction: we use the "add" function and negate
+ * the second operand.
+ */
+ smp->data.u.sint = arith_add(smp->data.u.sint, -tmp.data.u.sint);
+ return 1;
+}
+
+/* Takes a SINT on input, applies an arithmetic "mul" with the SINT directly in
+ * arg_p or in the variable described in arg_p, and returns the SINT result.
+ * If the result makes an overflow, then the largest possible quantity is
+ * returned.
+ */
+static int sample_conv_arith_mul(const struct arg *arg_p,
+ struct sample *smp, void *private)
+{
+ struct sample tmp;
+ long long int c;
+
+ smp_set_owner(&tmp, smp->px, smp->sess, smp->strm, smp->opt);
+ if (!sample_conv_var2smp_sint(arg_p, &tmp))
+ return 0;
+
+ /* prevent divide by 0 during the check */
+ if (!smp->data.u.sint || !tmp.data.u.sint) {
+ smp->data.u.sint = 0;
+ return 1;
+ }
+
+ /* The multiply between LLONG_MIN and -1 returns a
+ * "floating point exception".
+ */
+ if (smp->data.u.sint == LLONG_MIN && tmp.data.u.sint == -1) {
+ smp->data.u.sint = LLONG_MAX;
+ return 1;
+ }
+
+ /* execute standard multiplication. */
+ c = smp->data.u.sint * tmp.data.u.sint;
+
+ /* check for overflow and makes capped multiply. */
+ if (smp->data.u.sint != c / tmp.data.u.sint) {
+ if ((smp->data.u.sint < 0) == (tmp.data.u.sint < 0)) {
+ smp->data.u.sint = LLONG_MAX;
+ return 1;
+ }
+ smp->data.u.sint = LLONG_MIN;
+ return 1;
+ }
+ smp->data.u.sint = c;
+ return 1;
+}
+
+/* Takes a SINT on input, applies an arithmetic "div" with the SINT directly in
+ * arg_p or in the variable described in arg_p, and returns the SINT result.
+ * If arg_p makes the result overflow, then the largest possible quantity is
+ * returned.
+ */
+static int sample_conv_arith_div(const struct arg *arg_p,
+ struct sample *smp, void *private)
+{
+ struct sample tmp;
+
+ smp_set_owner(&tmp, smp->px, smp->sess, smp->strm, smp->opt);
+ if (!sample_conv_var2smp_sint(arg_p, &tmp))
+ return 0;
+
+ if (tmp.data.u.sint) {
+ /* The divide between LLONG_MIN and -1 returns a
+ * "floating point exception".
+ */
+ if (smp->data.u.sint == LLONG_MIN && tmp.data.u.sint == -1) {
+ smp->data.u.sint = LLONG_MAX;
+ return 1;
+ }
+ smp->data.u.sint /= tmp.data.u.sint;
+ return 1;
+ }
+ smp->data.u.sint = LLONG_MAX;
+ return 1;
+}
+
+/* Takes a SINT on input, applies an arithmetic "mod" with the SINT directly in
+ * arg_p or in the variable described in arg_p, and returns the SINT result.
+ * If arg_p makes the result overflow, then 0 is returned.
+ */
+static int sample_conv_arith_mod(const struct arg *arg_p,
+ struct sample *smp, void *private)
+{
+ struct sample tmp;
+
+ smp_set_owner(&tmp, smp->px, smp->sess, smp->strm, smp->opt);
+ if (!sample_conv_var2smp_sint(arg_p, &tmp))
+ return 0;
+
+ if (tmp.data.u.sint) {
+ /* The divide between LLONG_MIN and -1 returns a
+ * "floating point exception".
+ */
+ if (smp->data.u.sint == LLONG_MIN && tmp.data.u.sint == -1) {
+ smp->data.u.sint = 0;
+ return 1;
+ }
+ smp->data.u.sint %= tmp.data.u.sint;
+ return 1;
+ }
+ smp->data.u.sint = 0;
+ return 1;
+}
+
+/* Takes an SINT on input, applies an arithmetic "neg" and returns the SINT
+ * result.
+ */
+static int sample_conv_arith_neg(const struct arg *arg_p,
+ struct sample *smp, void *private)
+{
+ if (smp->data.u.sint == LLONG_MIN)
+ smp->data.u.sint = LLONG_MAX;
+ else
+ smp->data.u.sint = -smp->data.u.sint;
+ return 1;
+}
+
+/* Takes a SINT on input, returns true is the value is non-null, otherwise
+ * false. The output is a BOOL.
+ */
+static int sample_conv_arith_bool(const struct arg *arg_p,
+ struct sample *smp, void *private)
+{
+ smp->data.u.sint = !!smp->data.u.sint;
+ smp->data.type = SMP_T_BOOL;
+ return 1;
+}
+
+/* Takes a SINT on input, returns false is the value is non-null, otherwise
+ * truee. The output is a BOOL.
+ */
+static int sample_conv_arith_not(const struct arg *arg_p,
+ struct sample *smp, void *private)
+{
+ smp->data.u.sint = !smp->data.u.sint;
+ smp->data.type = SMP_T_BOOL;
+ return 1;
+}
+
+/* Takes a SINT on input, returns true is the value is odd, otherwise false.
+ * The output is a BOOL.
+ */
+static int sample_conv_arith_odd(const struct arg *arg_p,
+ struct sample *smp, void *private)
+{
+ smp->data.u.sint = smp->data.u.sint & 1;
+ smp->data.type = SMP_T_BOOL;
+ return 1;
+}
+
+/* Takes a SINT on input, returns true is the value is even, otherwise false.
+ * The output is a BOOL.
+ */
+static int sample_conv_arith_even(const struct arg *arg_p,
+ struct sample *smp, void *private)
+{
+ smp->data.u.sint = !(smp->data.u.sint & 1);
+ smp->data.type = SMP_T_BOOL;
+ return 1;
+}
+
+/* appends an optional const string, an optional variable contents and another
+ * optional const string to an existing string.
+ */
+static int sample_conv_concat(const struct arg *arg_p, struct sample *smp, void *private)
+{
+ struct buffer *trash;
+ struct sample tmp;
+ int max;
+
+ trash = alloc_trash_chunk();
+ if (!trash)
+ return 0;
+
+ trash->data = smp->data.u.str.data;
+ if (trash->data > trash->size - 1)
+ trash->data = trash->size - 1;
+
+ memcpy(trash->area, smp->data.u.str.area, trash->data);
+ trash->area[trash->data] = 0;
+
+ /* append first string */
+ max = arg_p[0].data.str.data;
+ if (max > trash->size - 1 - trash->data)
+ max = trash->size - 1 - trash->data;
+
+ if (max) {
+ memcpy(trash->area + trash->data, arg_p[0].data.str.area, max);
+ trash->data += max;
+ trash->area[trash->data] = 0;
+ }
+
+ /* append second string (variable) if it's found and we can turn it
+ * into a string.
+ */
+ smp_set_owner(&tmp, smp->px, smp->sess, smp->strm, smp->opt);
+ if (arg_p[1].type == ARGT_VAR && vars_get_by_desc(&arg_p[1].data.var, &tmp, NULL) &&
+ (sample_casts[tmp.data.type][SMP_T_STR] == c_none ||
+ sample_casts[tmp.data.type][SMP_T_STR](&tmp))) {
+
+ max = tmp.data.u.str.data;
+ if (max > trash->size - 1 - trash->data)
+ max = trash->size - 1 - trash->data;
+
+ if (max) {
+ memcpy(trash->area + trash->data, tmp.data.u.str.area,
+ max);
+ trash->data += max;
+ trash->area[trash->data] = 0;
+ }
+ }
+
+ /* append third string */
+ max = arg_p[2].data.str.data;
+ if (max > trash->size - 1 - trash->data)
+ max = trash->size - 1 - trash->data;
+
+ if (max) {
+ memcpy(trash->area + trash->data, arg_p[2].data.str.area, max);
+ trash->data += max;
+ trash->area[trash->data] = 0;
+ }
+
+ smp->data.u.str = *trash;
+ smp->data.type = SMP_T_STR;
+ smp_dup(smp);
+ free_trash_chunk(trash);
+ return 1;
+}
+
+/* This function checks the "concat" converter's arguments and extracts the
+ * variable name and its scope.
+ */
+static int smp_check_concat(struct arg *args, struct sample_conv *conv,
+ const char *file, int line, char **err)
+{
+ /* Try to decode a variable. */
+ if (args[1].data.str.data > 0 && !vars_check_arg(&args[1], NULL)) {
+ memprintf(err, "failed to register variable name '%s'",
+ args[1].data.str.area);
+ return 0;
+ }
+ return 1;
+}
+
+/* Append delimiter (only to a non empty input) followed by the optional
+ * variable contents concatenated with the optional sufix.
+ */
+static int sample_conv_add_item(const struct arg *arg_p, struct sample *smp, void *private)
+{
+ struct buffer *tmpbuf;
+ struct sample tmp;
+ size_t max;
+ int var_available;
+
+ tmpbuf = alloc_trash_chunk();
+ if (!tmpbuf)
+ return 0;
+
+ tmpbuf->data = smp->data.u.str.data;
+ if (tmpbuf->data > tmpbuf->size - 1)
+ tmpbuf->data = tmpbuf->size - 1;
+
+ memcpy(tmpbuf->area, smp->data.u.str.area, tmpbuf->data);
+ tmpbuf->area[tmpbuf->data] = 0;
+
+ /* Check if variable is found and we can turn into a string. */
+ var_available = 0;
+ smp_set_owner(&tmp, smp->px, smp->sess, smp->strm, smp->opt);
+ if (arg_p[1].type == ARGT_VAR && vars_get_by_desc(&arg_p[1].data.var, &tmp, NULL) &&
+ (sample_casts[tmp.data.type][SMP_T_STR] == c_none ||
+ sample_casts[tmp.data.type][SMP_T_STR](&tmp)))
+ var_available = 1;
+
+ /* Append delimiter only if input is not empty and either
+ * the variable or the suffix are not empty
+ */
+ if (smp->data.u.str.data && ((var_available && tmp.data.u.str.data) ||
+ arg_p[2].data.str.data)) {
+ max = arg_p[0].data.str.data;
+ if (max > tmpbuf->size - 1 - tmpbuf->data)
+ max = tmpbuf->size - 1 - tmpbuf->data;
+
+ if (max) {
+ memcpy(tmpbuf->area + tmpbuf->data, arg_p[0].data.str.area, max);
+ tmpbuf->data += max;
+ tmpbuf->area[tmpbuf->data] = 0;
+ }
+ }
+
+ /* Append variable contents if variable is found and turned into string. */
+ if (var_available) {
+ max = tmp.data.u.str.data;
+ if (max > tmpbuf->size - 1 - tmpbuf->data)
+ max = tmpbuf->size - 1 - tmpbuf->data;
+
+ if (max) {
+ memcpy(tmpbuf->area + tmpbuf->data, tmp.data.u.str.area, max);
+ tmpbuf->data += max;
+ tmpbuf->area[tmpbuf->data] = 0;
+ }
+ }
+
+ /* Append optional suffix. */
+ max = arg_p[2].data.str.data;
+ if (max > tmpbuf->size - 1 - tmpbuf->data)
+ max = tmpbuf->size - 1 - tmpbuf->data;
+
+ if (max) {
+ memcpy(tmpbuf->area + tmpbuf->data, arg_p[2].data.str.area, max);
+ tmpbuf->data += max;
+ tmpbuf->area[tmpbuf->data] = 0;
+ }
+
+ smp->data.u.str = *tmpbuf;
+ smp->data.type = SMP_T_STR;
+ smp_dup(smp);
+ free_trash_chunk(tmpbuf);
+ return 1;
+}
+
+/* Check the "add_item" converter's arguments and extracts the
+ * variable name and its scope.
+ */
+static int smp_check_add_item(struct arg *args, struct sample_conv *conv,
+ const char *file, int line, char **err)
+{
+ /* Try to decode a variable. */
+ if (args[1].data.str.data > 0 && !vars_check_arg(&args[1], NULL)) {
+ memprintf(err, "failed to register variable name '%s'",
+ args[1].data.str.area);
+ return 0;
+ }
+
+ if (args[1].data.str.data == 0 && args[2].data.str.data == 0) {
+ memprintf(err, "one of the optional arguments has to be nonempty");
+ return 0;
+ }
+
+ return 1;
+}
+
+/* Compares string with a variable containing a string. Return value
+ * is compatible with strcmp(3)'s return value.
+ */
+static int sample_conv_strcmp(const struct arg *arg_p, struct sample *smp, void *private)
+{
+ struct sample tmp;
+ int max, result;
+
+ smp_set_owner(&tmp, smp->px, smp->sess, smp->strm, smp->opt);
+ if (arg_p[0].type != ARGT_VAR)
+ return 0;
+
+ if (!sample_conv_var2smp(&arg_p[0].data.var, &tmp, SMP_T_STR))
+ return 0;
+
+ max = MIN(smp->data.u.str.data, tmp.data.u.str.data);
+ result = strncmp(smp->data.u.str.area, tmp.data.u.str.area, max);
+ if (result == 0) {
+ if (smp->data.u.str.data != tmp.data.u.str.data) {
+ if (smp->data.u.str.data < tmp.data.u.str.data) {
+ result = -1;
+ }
+ else {
+ result = 1;
+ }
+ }
+ }
+
+ smp->data.u.sint = result;
+ smp->data.type = SMP_T_SINT;
+ return 1;
+}
+/*
+ * This converter can takes a Host header value as defined by rfc9110#section-7.2
+ * Host = uri-host [ ":" port ] ;
+ * It returns the uri-host value in lowecase with the port stripped.
+ */
+static int sample_conv_host_only(const struct arg *arg_p, struct sample *smp, void *private)
+{
+ /* Working cases: hostname00, hostname00:80, 127.0.0.1, 127.0.0.1:80, [::1], [::1]:80 */
+ char *beg = smp->data.u.str.area;
+ char *end = smp->data.u.str.area + smp->data.u.str.data - 1;
+ char *p;
+
+ for (p = end; p >= beg; p--) {
+ if (*p == ':' || *p == ']')
+ break;
+ }
+
+ if (p >= beg && *p == ':')
+ smp->data.u.str.data = p - beg;
+ /* if no port part was found, the hostname is the whole string */
+
+ smp->data.type = SMP_T_STR;
+
+ return sample_conv_str2lower(arg_p, smp, NULL);
+}
+
+/*
+ * This converter can takes a Host header value as defined by rfc9110#section-7.2
+ * Host = uri-host [ ":" port ] ;
+ * It returns the port value as a int.
+ */
+static int sample_conv_port_only(const struct arg *arg_p, struct sample *smp, void *private)
+{
+ /* Working cases: hostname00, hostname00:80, 127.0.0.1, 127.0.0.1:80, [::1], [::1]:80 */
+ char *beg = smp->data.u.str.area;
+ char *end = smp->data.u.str.area + smp->data.u.str.data - 1;
+ char *p;
+
+ for (p = end; p >= beg; p--) {
+ if (*p == ':' || *p == ']')
+ break;
+ }
+
+ smp->data.type = SMP_T_SINT;
+ if (p >= beg && *p == ':' && ++p <= end) {
+ smp->data.u.sint = strl2ui(p, smp->data.u.str.data + smp->data.u.str.area - p);
+ } else {
+ smp->data.u.sint = 0;
+ }
+ return 1;
+}
+
+
+/* Takes a boolean as input. Returns the first argument if that boolean is true and
+ * the second argument otherwise.
+ */
+static int sample_conv_iif(const struct arg *arg_p, struct sample *smp, void *private)
+{
+ smp->data.type = SMP_T_STR;
+ smp->flags |= SMP_F_CONST;
+
+ if (smp->data.u.sint) {
+ smp->data.u.str.data = arg_p[0].data.str.data;
+ smp->data.u.str.area = arg_p[0].data.str.area;
+ }
+ else {
+ smp->data.u.str.data = arg_p[1].data.str.data;
+ smp->data.u.str.area = arg_p[1].data.str.area;
+ }
+
+ return 1;
+}
+
+#define GRPC_MSG_COMPRESS_FLAG_SZ 1 /* 1 byte */
+#define GRPC_MSG_LENGTH_SZ 4 /* 4 bytes */
+#define GRPC_MSG_HEADER_SZ (GRPC_MSG_COMPRESS_FLAG_SZ + GRPC_MSG_LENGTH_SZ)
+
+/*
+ * Extract the field value of an input binary sample. Takes a mandatory argument:
+ * the protocol buffers field identifier (dotted notation) internally represented
+ * as an array of unsigned integers and its size.
+ * Return 1 if the field was found, 0 if not.
+ */
+static int sample_conv_ungrpc(const struct arg *arg_p, struct sample *smp, void *private)
+{
+ unsigned char *pos;
+ size_t grpc_left;
+
+ pos = (unsigned char *)smp->data.u.str.area;
+ grpc_left = smp->data.u.str.data;
+
+ while (grpc_left > GRPC_MSG_HEADER_SZ) {
+ size_t grpc_msg_len, left;
+
+ grpc_msg_len = left = ntohl(*(uint32_t *)(pos + GRPC_MSG_COMPRESS_FLAG_SZ));
+
+ pos += GRPC_MSG_HEADER_SZ;
+ grpc_left -= GRPC_MSG_HEADER_SZ;
+
+ if (grpc_left < left)
+ return 0;
+
+ if (protobuf_field_lookup(arg_p, smp, &pos, &left))
+ return 1;
+
+ grpc_left -= grpc_msg_len;
+ }
+
+ return 0;
+}
+
+static int sample_conv_protobuf(const struct arg *arg_p, struct sample *smp, void *private)
+{
+ unsigned char *pos;
+ size_t left;
+
+ pos = (unsigned char *)smp->data.u.str.area;
+ left = smp->data.u.str.data;
+
+ return protobuf_field_lookup(arg_p, smp, &pos, &left);
+}
+
+static int sample_conv_protobuf_check(struct arg *args, struct sample_conv *conv,
+ const char *file, int line, char **err)
+{
+ if (!args[1].type) {
+ args[1].type = ARGT_SINT;
+ args[1].data.sint = PBUF_T_BINARY;
+ }
+ else {
+ int pbuf_type;
+
+ pbuf_type = protobuf_type(args[1].data.str.area);
+ if (pbuf_type == -1) {
+ memprintf(err, "Wrong protocol buffer type '%s'", args[1].data.str.area);
+ return 0;
+ }
+
+ chunk_destroy(&args[1].data.str);
+ args[1].type = ARGT_SINT;
+ args[1].data.sint = pbuf_type;
+ }
+
+ return 1;
+}
+
+/*
+ * Extract the tag value of an input binary sample. Takes a mandatory argument:
+ * the FIX protocol tag identifier.
+ * Return 1 if the tag was found, 0 if not.
+ */
+static int sample_conv_fix_tag_value(const struct arg *arg_p, struct sample *smp, void *private)
+{
+ struct ist value;
+
+ smp->flags &= ~SMP_F_MAY_CHANGE;
+ value = fix_tag_value(ist2(smp->data.u.str.area, smp->data.u.str.data),
+ arg_p[0].data.sint);
+ if (!istlen(value)) {
+ if (isttest(value)) {
+ /* value != IST_NULL, need more data */
+ smp->flags |= SMP_F_MAY_CHANGE;
+ }
+ return 0;
+ }
+
+ smp->data.u.str = ist2buf(value);
+ smp->flags |= SMP_F_CONST;
+
+ return 1;
+}
+
+/* This function checks the "fix_tag_value" converter configuration.
+ * It expects a "known" (by HAProxy) tag name or ID.
+ * Tag string names are converted to their ID counterpart because this is the
+ * format they are sent over the wire.
+ */
+static int sample_conv_fix_value_check(struct arg *args, struct sample_conv *conv,
+ const char *file, int line, char **err)
+{
+ struct ist str;
+ unsigned int tag;
+
+ str = ist2(args[0].data.str.area, args[0].data.str.data);
+ tag = fix_tagid(str);
+ if (!tag) {
+ memprintf(err, "Unknown FIX tag name '%s'", args[0].data.str.area);
+ return 0;
+ }
+
+ chunk_destroy(&args[0].data.str);
+ args[0].type = ARGT_SINT;
+ args[0].data.sint = tag;
+
+ return 1;
+}
+
+/*
+ * Checks that a buffer contains a valid FIX message
+ *
+ * Return 1 if the check could be run, 0 if not.
+ * The result of the analyse itself is stored in <smp> as a boolean
+ */
+static int sample_conv_fix_is_valid(const struct arg *arg_p, struct sample *smp, void *private)
+{
+ struct ist msg;
+
+ msg = ist2(smp->data.u.str.area, smp->data.u.str.data);
+
+ smp->flags &= ~SMP_F_MAY_CHANGE;
+ switch (fix_validate_message(msg)) {
+ case FIX_VALID_MESSAGE:
+ smp->data.type = SMP_T_BOOL;
+ smp->data.u.sint = 1;
+ return 1;
+ case FIX_NEED_MORE_DATA:
+ smp->flags |= SMP_F_MAY_CHANGE;
+ return 0;
+ case FIX_INVALID_MESSAGE:
+ smp->data.type = SMP_T_BOOL;
+ smp->data.u.sint = 0;
+ return 1;
+ }
+ return 0;
+}
+
+/*
+ * Extract the field value of an input binary sample containing an MQTT packet.
+ * Takes 2 mandatory arguments:
+ * - packet type
+ * - field name
+ *
+ * return 1 if the field was found, 0 if not.
+ */
+static int sample_conv_mqtt_field_value(const struct arg *arg_p, struct sample *smp, void *private)
+{
+ struct ist pkt, value;
+ int type, fieldname_id;
+
+ pkt = ist2(smp->data.u.str.area, smp->data.u.str.data);
+ type = arg_p[0].data.sint;
+ fieldname_id = arg_p[1].data.sint;
+
+ smp->flags &= ~SMP_F_MAY_CHANGE;
+ value = mqtt_field_value(pkt, type, fieldname_id);
+ if (!istlen(value)) {
+ if (isttest(value)) {
+ /* value != IST_NULL, need more data */
+ smp->flags |= SMP_F_MAY_CHANGE;
+ }
+ return 0;
+ }
+
+ smp->data.u.str = ist2buf(value);
+ smp->flags |= SMP_F_CONST;
+ return 1;
+}
+
+/*
+ * this function checks the "mqtt_field_value" converter configuration.
+ * It expects a known packet type name or ID and a field name, in this order
+ *
+ * Args[0] will be turned into a MQTT_CPT_* value for direct matching when parsing
+ * a packet.
+ */
+static int sample_conv_mqtt_field_value_check(struct arg *args, struct sample_conv *conv,
+ const char *file, int line, char **err)
+{
+ int type, fieldname_id;
+
+ /* check the MQTT packet type is valid */
+ type = mqtt_typeid(ist2(args[0].data.str.area, args[0].data.str.data));
+ if (type == MQTT_CPT_INVALID) {
+ memprintf(err, "Unknown MQTT type '%s'", args[0].data.str.area);
+ return 0;
+ }
+
+ /* check the field name belongs to the MQTT packet type */
+ fieldname_id = mqtt_check_type_fieldname(type, ist2(args[1].data.str.area, args[1].data.str.data));
+ if (fieldname_id == MQTT_FN_INVALID) {
+ memprintf(err, "Unknown MQTT field name '%s' for packet type '%s'", args[1].data.str.area,
+ args[0].data.str.area);
+ return 0;
+ }
+
+ /* save numeric counterparts of type and field name */
+ chunk_destroy(&args[0].data.str);
+ chunk_destroy(&args[1].data.str);
+ args[0].type = ARGT_SINT;
+ args[0].data.sint = type;
+ args[1].type = ARGT_SINT;
+ args[1].data.sint = fieldname_id;
+
+ return 1;
+}
+
+/*
+ * Checks that <smp> contains a valid MQTT message
+ *
+ * The function returns 1 if the check was run to its end, 0 otherwise.
+ * The result of the analyse itself is stored in <smp> as a boolean.
+ */
+static int sample_conv_mqtt_is_valid(const struct arg *arg_p, struct sample *smp, void *private)
+{
+ struct ist msg;
+
+ msg = ist2(smp->data.u.str.area, smp->data.u.str.data);
+
+ smp->flags &= ~SMP_F_MAY_CHANGE;
+ switch (mqtt_validate_message(msg, NULL)) {
+ case FIX_VALID_MESSAGE:
+ smp->data.type = SMP_T_BOOL;
+ smp->data.u.sint = 1;
+ return 1;
+ case FIX_NEED_MORE_DATA:
+ smp->flags |= SMP_F_MAY_CHANGE;
+ return 0;
+ case FIX_INVALID_MESSAGE:
+ smp->data.type = SMP_T_BOOL;
+ smp->data.u.sint = 0;
+ return 1;
+ }
+ return 0;
+}
+
+/* This function checks the "strcmp" converter's arguments and extracts the
+ * variable name and its scope.
+ */
+static int smp_check_strcmp(struct arg *args, struct sample_conv *conv,
+ const char *file, int line, char **err)
+{
+ if (!args[0].data.str.data) {
+ memprintf(err, "missing variable name");
+ return 0;
+ }
+
+ /* Try to decode a variable. */
+ if (vars_check_arg(&args[0], NULL))
+ return 1;
+
+ memprintf(err, "failed to register variable name '%s'",
+ args[0].data.str.area);
+ return 0;
+}
+
+/**/
+static int sample_conv_htonl(const struct arg *arg_p, struct sample *smp, void *private)
+{
+ struct buffer *tmp;
+ uint32_t n;
+
+ n = htonl((uint32_t)smp->data.u.sint);
+ tmp = get_trash_chunk();
+
+ memcpy(b_head(tmp), &n, 4);
+ b_add(tmp, 4);
+
+ smp->data.u.str = *tmp;
+ smp->data.type = SMP_T_BIN;
+ return 1;
+}
+
+/**/
+static int sample_conv_cut_crlf(const struct arg *arg_p, struct sample *smp, void *private)
+{
+ char *p;
+ size_t l;
+
+ p = smp->data.u.str.area;
+ for (l = 0; l < smp->data.u.str.data; l++) {
+ if (*(p+l) == '\r' || *(p+l) == '\n')
+ break;
+ }
+ smp->data.u.str.data = l;
+ return 1;
+}
+
+/**/
+static int sample_conv_ltrim(const struct arg *arg_p, struct sample *smp, void *private)
+{
+ char *delimiters, *p;
+ size_t dlen, l;
+
+ delimiters = arg_p[0].data.str.area;
+ dlen = arg_p[0].data.str.data;
+
+ l = smp->data.u.str.data;
+ p = smp->data.u.str.area;
+ while (l && memchr(delimiters, *p, dlen) != NULL) {
+ p++;
+ l--;
+ }
+
+ smp->data.u.str.area = p;
+ smp->data.u.str.data = l;
+ return 1;
+}
+
+/**/
+static int sample_conv_rtrim(const struct arg *arg_p, struct sample *smp, void *private)
+{
+ char *delimiters, *p;
+ size_t dlen, l;
+
+ delimiters = arg_p[0].data.str.area;
+ dlen = arg_p[0].data.str.data;
+
+ l = smp->data.u.str.data;
+ p = smp->data.u.str.area + l - 1;
+ while (l && memchr(delimiters, *p, dlen) != NULL) {
+ p--;
+ l--;
+ }
+
+ smp->data.u.str.data = l;
+ return 1;
+}
+
+/* This function checks the "json_query" converter's arguments. */
+static int sample_check_json_query(struct arg *arg, struct sample_conv *conv,
+ const char *file, int line, char **err)
+{
+ if (arg[0].data.str.data == 0) {
+ memprintf(err, "json_path must not be empty");
+ return 0;
+ }
+
+ if (arg[1].data.str.data != 0) {
+ if (strcmp(arg[1].data.str.area, "int") != 0) {
+ memprintf(err, "output_type only supports \"int\" as argument");
+ return 0;
+ } else {
+ arg[1].type = ARGT_SINT;
+ arg[1].data.sint = 0;
+ }
+ }
+ return 1;
+}
+
+/* Limit JSON integer values to the range [-(2**53)+1, (2**53)-1] as per
+ * the recommendation for interoperable integers in section 6 of RFC 7159.
+ */
+#define JSON_INT_MAX ((1LL << 53) - 1)
+#define JSON_INT_MIN (-JSON_INT_MAX)
+
+/* This sample function get the value from a given json string.
+ * The mjson library is used to parse the JSON struct
+ */
+static int sample_conv_json_query(const struct arg *args, struct sample *smp, void *private)
+{
+ struct buffer *trash = get_trash_chunk();
+ const char *token; /* holds the temporary string from mjson_find */
+ int token_size; /* holds the length of <token> */
+
+ enum mjson_tok token_type;
+
+ token_type = mjson_find(smp->data.u.str.area, smp->data.u.str.data, args[0].data.str.area, &token, &token_size);
+
+ switch (token_type) {
+ case MJSON_TOK_NUMBER:
+ if (args[1].type == ARGT_SINT) {
+ smp->data.u.sint = strtoll(token, NULL, 0);
+
+ if (smp->data.u.sint < JSON_INT_MIN || smp->data.u.sint > JSON_INT_MAX)
+ return 0;
+
+ smp->data.type = SMP_T_SINT;
+
+ return 1;
+ } else {
+ double double_val;
+
+ if (mjson_get_number(smp->data.u.str.area, smp->data.u.str.data, args[0].data.str.area, &double_val) == 0)
+ return 0;
+
+ trash->data = snprintf(trash->area,trash->size,"%g",double_val);
+ smp->data.u.str = *trash;
+ smp->data.type = SMP_T_STR;
+
+ return 1;
+ }
+ case MJSON_TOK_TRUE:
+ smp->data.type = SMP_T_BOOL;
+ smp->data.u.sint = 1;
+
+ return 1;
+ case MJSON_TOK_FALSE:
+ smp->data.type = SMP_T_BOOL;
+ smp->data.u.sint = 0;
+
+ return 1;
+ case MJSON_TOK_STRING: {
+ int len;
+
+ len = mjson_get_string(smp->data.u.str.area, smp->data.u.str.data, args[0].data.str.area, trash->area, trash->size);
+
+ if (len == -1) {
+ /* invalid string */
+ return 0;
+ }
+
+ trash->data = len;
+ smp->data.u.str = *trash;
+ smp->data.type = SMP_T_STR;
+
+ return 1;
+ }
+ case MJSON_TOK_ARRAY: {
+ // We copy the complete array, including square brackets into the return buffer
+ // result looks like: ["manage-account","manage-account-links","view-profile"]
+ trash->data = b_putblk(trash, token, token_size);
+ smp->data.u.str = *trash;
+ smp->data.type = SMP_T_STR;
+ return 1;
+ }
+ case MJSON_TOK_NULL:
+ case MJSON_TOK_OBJECT:
+ /* We cannot handle these. */
+ return 0;
+ case MJSON_TOK_INVALID:
+ /* Nothing matches the query. */
+ return 0;
+ case MJSON_TOK_KEY:
+ /* This is not a valid return value according to the
+ * mjson documentation, but we handle it to benefit
+ * from '-Wswitch'.
+ */
+ return 0;
+ }
+
+ my_unreachable();
+ return 0;
+}
+
+#ifdef USE_OPENSSL
+static int sample_conv_jwt_verify_check(struct arg *args, struct sample_conv *conv,
+ const char *file, int line, char **err)
+{
+ vars_check_arg(&args[0], NULL);
+ vars_check_arg(&args[1], NULL);
+
+ if (args[0].type == ARGT_STR) {
+ enum jwt_alg alg = jwt_parse_alg(args[0].data.str.area, args[0].data.str.data);
+
+ if (alg == JWT_ALG_DEFAULT) {
+ memprintf(err, "unknown JWT algorithm: %s", args[0].data.str.area);
+ return 0;
+ }
+ }
+
+ if (args[1].type == ARGT_STR) {
+ jwt_tree_load_cert(args[1].data.str.area, args[1].data.str.data, err);
+ }
+
+ return 1;
+}
+
+/* Check that a JWT's signature is correct */
+static int sample_conv_jwt_verify(const struct arg *args, struct sample *smp, void *private)
+{
+ struct sample alg_smp, key_smp;
+ enum jwt_vrfy_status ret;
+
+ smp_set_owner(&alg_smp, smp->px, smp->sess, smp->strm, smp->opt);
+ smp_set_owner(&key_smp, smp->px, smp->sess, smp->strm, smp->opt);
+ if (!sample_conv_var2smp_str(&args[0], &alg_smp))
+ return 0;
+ if (!sample_conv_var2smp_str(&args[1], &key_smp))
+ return 0;
+
+ ret = jwt_verify(&smp->data.u.str, &alg_smp.data.u.str, &key_smp.data.u.str);
+
+ smp->data.type = SMP_T_SINT;
+ smp->data.u.sint = ret;
+ return 1;
+}
+
+
+/*
+ * Returns the decoded header or payload of a JWT if no parameter is given, or
+ * the value of the specified field of the corresponding JWT subpart if a
+ * parameter is given.
+ */
+static int sample_conv_jwt_member_query(const struct arg *args, struct sample *smp,
+ void *private, enum jwt_elt member)
+{
+ struct jwt_item items[JWT_ELT_MAX] = { { 0 } };
+ unsigned int item_num = member + 1; /* We don't need to tokenize the full token */
+ struct buffer *decoded_header = get_trash_chunk();
+ int retval = 0;
+ int ret;
+
+ jwt_tokenize(&smp->data.u.str, items, &item_num);
+
+ if (item_num < member + 1)
+ goto end;
+
+ ret = base64urldec(items[member].start, items[member].length,
+ decoded_header->area, decoded_header->size);
+ if (ret == -1)
+ goto end;
+
+ decoded_header->data = ret;
+ if (args[0].type != ARGT_STR) {
+ smp->data.u.str = *decoded_header;
+ smp->data.type = SMP_T_STR;
+ goto end;
+ }
+
+ /* We look for a specific field of the header or payload part of the JWT */
+ smp->data.u.str = *decoded_header;
+
+ retval = sample_conv_json_query(args, smp, private);
+
+end:
+ return retval;
+}
+
+/* This function checks the "jwt_header_query" and "jwt_payload_query" converters' arguments.
+ * It is based on the "json_query" converter's check with the only difference
+ * being that the jwt converters can take 0 parameters as well.
+ */
+static int sample_conv_jwt_query_check(struct arg *arg, struct sample_conv *conv,
+ const char *file, int line, char **err)
+{
+ if (arg[1].data.str.data != 0) {
+ if (strcmp(arg[1].data.str.area, "int") != 0) {
+ memprintf(err, "output_type only supports \"int\" as argument");
+ return 0;
+ } else {
+ arg[1].type = ARGT_SINT;
+ arg[1].data.sint = 0;
+ }
+ }
+ return 1;
+}
+
+/*
+ * If no parameter is given, return the decoded header part of a JWT (the first
+ * base64 encoded part, corresponding to the JOSE header).
+ * If a parameter is given, this converter acts as a "json_query" on this
+ * decoded JSON.
+ */
+static int sample_conv_jwt_header_query(const struct arg *args, struct sample *smp, void *private)
+{
+ return sample_conv_jwt_member_query(args, smp, private, JWT_ELT_JOSE);
+}
+
+/*
+ * If no parameter is given, return the decoded payload part of a JWT (the
+ * second base64 encoded part, which contains all the claims). If a parameter
+ * is given, this converter acts as a "json_query" on this decoded JSON.
+ */
+static int sample_conv_jwt_payload_query(const struct arg *args, struct sample *smp, void *private)
+{
+ return sample_conv_jwt_member_query(args, smp, private, JWT_ELT_CLAIMS);
+}
+
+#endif /* USE_OPENSSL */
+
+/************************************************************************/
+/* All supported sample fetch functions must be declared here */
+/************************************************************************/
+
+
+/* returns the actconn */
+static int
+smp_fetch_actconn(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ smp->data.type = SMP_T_SINT;
+ smp->data.u.sint = actconn;
+ return 1;
+}
+
+
+/* force TRUE to be returned at the fetch level */
+static int
+smp_fetch_true(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ if (!smp_make_rw(smp))
+ return 0;
+
+ smp->data.type = SMP_T_BOOL;
+ smp->data.u.sint = 1;
+ return 1;
+}
+
+/* force FALSE to be returned at the fetch level */
+static int
+smp_fetch_false(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ smp->data.type = SMP_T_BOOL;
+ smp->data.u.sint = 0;
+ return 1;
+}
+
+/* retrieve environment variable $1 as a string */
+static int
+smp_fetch_env(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ char *env;
+
+ if (args[0].type != ARGT_STR)
+ return 0;
+
+ env = getenv(args[0].data.str.area);
+ if (!env)
+ return 0;
+
+ smp->data.type = SMP_T_STR;
+ smp->flags = SMP_F_CONST;
+ smp->data.u.str.area = env;
+ smp->data.u.str.data = strlen(env);
+ return 1;
+}
+
+/* Validates the data unit argument passed to "date" fetch. Argument 1 support an
+ * optional string representing the unit of the result: "s" for seconds, "ms" for
+ * milliseconds and "us" for microseconds.
+ * Returns 0 on error and non-zero if OK.
+ */
+int smp_check_date_unit(struct arg *args, char **err)
+{
+ if (args[1].type == ARGT_STR) {
+ long long int unit;
+
+ if (strcmp(args[1].data.str.area, "s") == 0) {
+ unit = TIME_UNIT_S;
+ }
+ else if (strcmp(args[1].data.str.area, "ms") == 0) {
+ unit = TIME_UNIT_MS;
+ }
+ else if (strcmp(args[1].data.str.area, "us") == 0) {
+ unit = TIME_UNIT_US;
+ }
+ else {
+ memprintf(err, "expects 's', 'ms' or 'us', got '%s'",
+ args[1].data.str.area);
+ return 0;
+ }
+
+ chunk_destroy(&args[1].data.str);
+ args[1].type = ARGT_SINT;
+ args[1].data.sint = unit;
+ }
+ else if (args[1].type != ARGT_STOP) {
+ memprintf(err, "Unexpected arg type");
+ return 0;
+ }
+
+ return 1;
+}
+
+/* retrieve the current local date in epoch time, converts it to milliseconds
+ * or microseconds if asked to in optional args[1] unit param, and applies an
+ * optional args[0] offset.
+ */
+static int
+smp_fetch_date(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ smp->data.u.sint = date.tv_sec;
+
+ /* report in milliseconds */
+ if (args[1].type == ARGT_SINT && args[1].data.sint == TIME_UNIT_MS) {
+ smp->data.u.sint *= 1000;
+ smp->data.u.sint += date.tv_usec / 1000;
+ }
+ /* report in microseconds */
+ else if (args[1].type == ARGT_SINT && args[1].data.sint == TIME_UNIT_US) {
+ smp->data.u.sint *= 1000000;
+ smp->data.u.sint += date.tv_usec;
+ }
+
+ /* add offset */
+ if (args[0].type == ARGT_SINT)
+ smp->data.u.sint += args[0].data.sint;
+
+ smp->data.type = SMP_T_SINT;
+ smp->flags |= SMP_F_VOL_TEST | SMP_F_MAY_CHANGE;
+ return 1;
+}
+
+/* retrieve the current microsecond part of the date */
+static int
+smp_fetch_date_us(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ smp->data.u.sint = date.tv_usec;
+ smp->data.type = SMP_T_SINT;
+ smp->flags |= SMP_F_VOL_TEST | SMP_F_MAY_CHANGE;
+ return 1;
+}
+
+
+/* returns the hostname */
+static int
+smp_fetch_hostname(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ smp->data.type = SMP_T_STR;
+ smp->flags = SMP_F_CONST;
+ smp->data.u.str.area = hostname;
+ smp->data.u.str.data = strlen(hostname);
+ return 1;
+}
+
+/* returns the number of processes */
+static int
+smp_fetch_nbproc(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ smp->data.type = SMP_T_SINT;
+ smp->data.u.sint = 1;
+ return 1;
+}
+
+/* returns the PID of the current process */
+static int
+smp_fetch_pid(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ smp->data.type = SMP_T_SINT;
+ smp->data.u.sint = pid;
+ return 1;
+}
+
+
+/* returns the number of the current process (between 1 and nbproc */
+static int
+smp_fetch_proc(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ smp->data.type = SMP_T_SINT;
+ smp->data.u.sint = 1;
+ return 1;
+}
+
+/* returns the number of the current thread (between 1 and nbthread */
+static int
+smp_fetch_thread(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ smp->data.type = SMP_T_SINT;
+ smp->data.u.sint = tid;
+ return 1;
+}
+
+/* generate a random 32-bit integer for whatever purpose, with an optional
+ * range specified in argument.
+ */
+static int
+smp_fetch_rand(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ smp->data.u.sint = statistical_prng();
+
+ /* reduce if needed. Don't do a modulo, use all bits! */
+ if (args[0].type == ARGT_SINT)
+ smp->data.u.sint = ((u64)smp->data.u.sint * (u64)args[0].data.sint) >> 32;
+
+ smp->data.type = SMP_T_SINT;
+ smp->flags |= SMP_F_VOL_TEST | SMP_F_MAY_CHANGE;
+ return 1;
+}
+
+/* returns true if the current process is stopping */
+static int
+smp_fetch_stopping(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ smp->data.type = SMP_T_BOOL;
+ smp->data.u.sint = stopping;
+ return 1;
+}
+
+/* returns the number of calls of the current stream's process_stream() */
+static int
+smp_fetch_cpu_calls(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ if (!smp->strm)
+ return 0;
+
+ smp->data.type = SMP_T_SINT;
+ smp->data.u.sint = smp->strm->task->calls;
+ return 1;
+}
+
+/* returns the average number of nanoseconds spent processing the stream per call */
+static int
+smp_fetch_cpu_ns_avg(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ if (!smp->strm)
+ return 0;
+
+ smp->data.type = SMP_T_SINT;
+ smp->data.u.sint = smp->strm->task->calls ? smp->strm->cpu_time / smp->strm->task->calls : 0;
+ return 1;
+}
+
+/* returns the total number of nanoseconds spent processing the stream */
+static int
+smp_fetch_cpu_ns_tot(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ if (!smp->strm)
+ return 0;
+
+ smp->data.type = SMP_T_SINT;
+ smp->data.u.sint = smp->strm->cpu_time;
+ return 1;
+}
+
+/* returns the average number of nanoseconds per call spent waiting for other tasks to be processed */
+static int
+smp_fetch_lat_ns_avg(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ if (!smp->strm)
+ return 0;
+
+ smp->data.type = SMP_T_SINT;
+ smp->data.u.sint = smp->strm->task->calls ? smp->strm->lat_time / smp->strm->task->calls : 0;
+ return 1;
+}
+
+/* returns the total number of nanoseconds per call spent waiting for other tasks to be processed */
+static int
+smp_fetch_lat_ns_tot(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ if (!smp->strm)
+ return 0;
+
+ smp->data.type = SMP_T_SINT;
+ smp->data.u.sint = smp->strm->lat_time;
+ return 1;
+}
+
+static int smp_fetch_const_str(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ smp->flags |= SMP_F_CONST;
+ smp->data.type = SMP_T_STR;
+ smp->data.u.str.area = args[0].data.str.area;
+ smp->data.u.str.data = args[0].data.str.data;
+ return 1;
+}
+
+static int smp_check_const_bool(struct arg *args, char **err)
+{
+ if (strcasecmp(args[0].data.str.area, "true") == 0 ||
+ strcasecmp(args[0].data.str.area, "1") == 0) {
+ chunk_destroy(&args[0].data.str);
+ args[0].type = ARGT_SINT;
+ args[0].data.sint = 1;
+ return 1;
+ }
+ if (strcasecmp(args[0].data.str.area, "false") == 0 ||
+ strcasecmp(args[0].data.str.area, "0") == 0) {
+ chunk_destroy(&args[0].data.str);
+ args[0].type = ARGT_SINT;
+ args[0].data.sint = 0;
+ return 1;
+ }
+ memprintf(err, "Expects 'true', 'false', '0' or '1'");
+ return 0;
+}
+
+static int smp_fetch_const_bool(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ smp->data.type = SMP_T_BOOL;
+ smp->data.u.sint = args[0].data.sint;
+ return 1;
+}
+
+static int smp_fetch_const_int(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ smp->data.type = SMP_T_SINT;
+ smp->data.u.sint = args[0].data.sint;
+ return 1;
+}
+
+static int smp_fetch_const_ipv4(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ smp->data.type = SMP_T_IPV4;
+ smp->data.u.ipv4 = args[0].data.ipv4;
+ return 1;
+}
+
+static int smp_fetch_const_ipv6(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ smp->data.type = SMP_T_IPV6;
+ smp->data.u.ipv6 = args[0].data.ipv6;
+ return 1;
+}
+
+static int smp_check_const_bin(struct arg *args, char **err)
+{
+ char *binstr = NULL;
+ int binstrlen;
+
+ if (!parse_binary(args[0].data.str.area, &binstr, &binstrlen, err))
+ return 0;
+ chunk_destroy(&args[0].data.str);
+ args[0].type = ARGT_STR;
+ args[0].data.str.area = binstr;
+ args[0].data.str.data = binstrlen;
+ return 1;
+}
+
+static int smp_fetch_const_bin(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ smp->flags |= SMP_F_CONST;
+ smp->data.type = SMP_T_BIN;
+ smp->data.u.str.area = args[0].data.str.area;
+ smp->data.u.str.data = args[0].data.str.data;
+ return 1;
+}
+
+static int smp_check_const_meth(struct arg *args, char **err)
+{
+ enum http_meth_t meth;
+ int i;
+
+ meth = find_http_meth(args[0].data.str.area, args[0].data.str.data);
+ if (meth != HTTP_METH_OTHER) {
+ chunk_destroy(&args[0].data.str);
+ args[0].type = ARGT_SINT;
+ args[0].data.sint = meth;
+ } else {
+ /* Check method availability. A method is a token defined as :
+ * tchar = "!" / "#" / "$" / "%" / "&" / "'" / "*" / "+" / "-" / "." /
+ * "^" / "_" / "`" / "|" / "~" / DIGIT / ALPHA
+ * token = 1*tchar
+ */
+ for (i = 0; i < args[0].data.str.data; i++) {
+ if (!HTTP_IS_TOKEN(args[0].data.str.area[i])) {
+ memprintf(err, "expects valid method.");
+ return 0;
+ }
+ }
+ }
+ return 1;
+}
+
+static int smp_fetch_const_meth(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ smp->data.type = SMP_T_METH;
+ if (args[0].type == ARGT_SINT) {
+ smp->flags &= ~SMP_F_CONST;
+ smp->data.u.meth.meth = args[0].data.sint;
+ smp->data.u.meth.str.area = "";
+ smp->data.u.meth.str.data = 0;
+ } else {
+ smp->flags |= SMP_F_CONST;
+ smp->data.u.meth.meth = HTTP_METH_OTHER;
+ smp->data.u.meth.str.area = args[0].data.str.area;
+ smp->data.u.meth.str.data = args[0].data.str.data;
+ }
+ return 1;
+}
+
+// This function checks the "uuid" sample's arguments.
+// Function won't get called when no parameter is specified (maybe a bug?)
+static int smp_check_uuid(struct arg *args, char **err)
+{
+ if (!args[0].type) {
+ args[0].type = ARGT_SINT;
+ args[0].data.sint = 4;
+ }
+ else if (args[0].data.sint != 4) {
+ memprintf(err, "Unsupported UUID version: '%lld'", args[0].data.sint);
+ return 0;
+ }
+
+ return 1;
+}
+
+// Generate a RFC4122 UUID (default is v4 = fully random)
+static int smp_fetch_uuid(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ if (args[0].data.sint == 4 || !args[0].type) {
+ ha_generate_uuid(&trash);
+ smp->data.type = SMP_T_STR;
+ smp->flags = SMP_F_VOL_TEST | SMP_F_MAY_CHANGE;
+ smp->data.u.str = trash;
+ return 1;
+ }
+
+ // more implementations of other uuid formats possible here
+ return 0;
+}
+
+/* Check if QUIC support was compiled and was not disabled by "no-quic" global option */
+static int smp_fetch_quic_enabled(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ smp->data.type = SMP_T_BOOL;
+ smp->flags = 0;
+#ifdef USE_QUIC
+ smp->data.u.sint = !(global.tune.options & GTUNE_NO_QUIC);
+#else
+ smp->data.u.sint = 0;
+#endif
+ return smp->data.u.sint;
+}
+
+/* Timing events re{q,s}.timer. */
+static int smp_fetch_reX_timers(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ struct strm_logs *logs;
+ int t_request = -1;
+
+ if (!smp->strm)
+ return 0;
+
+ smp->data.type = SMP_T_SINT;
+ smp->flags = 0;
+
+ logs = &smp->strm->logs;
+
+
+ if ((llong)(logs->request_ts - logs->accept_ts) >= 0)
+ t_request = ns_to_ms(logs->request_ts - logs->accept_ts);
+
+ /* req.timer. */
+ if (kw[2] == 'q') {
+
+ switch (kw[10]) {
+
+ /* req.timer.idle (%Ti) */
+ case 'i':
+ smp->data.u.sint = logs->t_idle;
+ break;
+
+ /* req.timer.tq (%Tq) */
+ case 't':
+ smp->data.u.sint = t_request;
+ break;
+
+ /* req.timer.hdr (%TR) */
+ case 'h':
+ smp->data.u.sint = (t_request >= 0) ? t_request - logs->t_idle - logs->t_handshake : -1;
+ break;
+
+ /* req.timer.queue (%Tw) */
+ case 'q':
+ smp->data.u.sint = (logs->t_queue >= 0) ? logs->t_queue - t_request : -1;
+ break;
+
+ default:
+ goto error;
+
+ }
+ } else {
+ /* res.timer. */
+ switch (kw[10]) {
+ /* res.timer.hdr (%Tr) */
+ case 'h':
+ smp->data.u.sint = (logs->t_data >= 0) ? logs->t_data - logs->t_connect : -1;
+ break;
+
+ /* res.timer.data (%Td) */
+ case 'd':
+ smp->data.u.sint = (logs->t_data >= 0) ? logs->t_close - logs->t_data : -1;
+ break;
+
+ default:
+ goto error;
+
+ }
+
+ }
+
+ return 1;
+error:
+
+ return 0;
+ }
+
+
+/* Timing events txn. */
+static int smp_fetch_txn_timers(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ struct strm_logs *logs;
+
+ if (!smp->strm)
+ return 0;
+
+ smp->data.type = SMP_T_SINT;
+ smp->flags = 0;
+
+ logs = &smp->strm->logs;
+
+ /* txn.timer. */
+ switch (kw[10]) {
+
+ /* txn.timer.total (%Ta) */
+ case 't':
+ smp->data.u.sint = logs->t_close - (logs->t_idle >= 0 ? logs->t_idle + logs->t_handshake : 0);
+ break;
+
+
+ /* txn.timer.user (%Tu) */
+ case 'u':
+ smp->data.u.sint = logs->t_close - (logs->t_idle >= 0 ? logs->t_idle : 0);
+ break;
+
+ default:
+ goto error;
+
+ }
+
+ return 1;
+error:
+
+ return 0;
+}
+
+/* Timing events {f,bc}.timer. */
+static int smp_fetch_conn_timers(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ struct strm_logs *logs;
+
+ if (!smp->strm)
+ return 0;
+
+ smp->data.type = SMP_T_SINT;
+ smp->flags = 0;
+
+ logs = &smp->strm->logs;
+
+ if (kw[0] == 'b') {
+ /* fc.timer. */
+ switch (kw[9]) {
+
+ /* bc.timer.connect (%Tc) */
+ case 'c':
+ smp->data.u.sint = (logs->t_connect >= 0) ? logs->t_connect - logs->t_queue : -1;
+ break;
+
+ default:
+ goto error;
+ }
+
+ } else {
+
+ /* fc.timer. */
+ switch (kw[9]) {
+
+ /* fc.timer.handshake (%Th) */
+ case 'h':
+ smp->data.u.sint = logs->t_handshake;
+ break;
+
+ /* fc,timer.total (%Tt) */
+ case 't':
+ smp->data.u.sint = logs->t_close;
+ break;
+
+ default:
+ goto error;
+ }
+
+ }
+
+ return 1;
+error:
+
+ return 0;
+}
+
+/* bytes_{in,out} */
+static int smp_fetch_bytes(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ struct strm_logs *logs;
+
+ if (!smp->strm)
+ return 0;
+
+ smp->data.type = SMP_T_SINT;
+ smp->flags = 0;
+
+ logs = &smp->strm->logs;
+ if (!logs)
+ return 0;
+
+ if (kw[6] == 'i') { /* bytes_in */
+ smp->data.u.sint = logs->bytes_in;
+ } else { /* bytes_out */
+ smp->data.u.sint = logs->bytes_out;
+ }
+
+ return 1;
+}
+
+static int sample_conv_bytes_check(struct arg *args, struct sample_conv *conv,
+ const char *file, int line, char **err)
+{
+ // arg0 is not optional, must be >= 0
+ if (!check_operator(&args[0], conv, file, line, err)) {
+ return 0;
+ }
+ if (args[0].type != ARGT_VAR) {
+ if (args[0].type != ARGT_SINT || args[0].data.sint < 0) {
+ memprintf(err, "expects a non-negative integer");
+ return 0;
+ }
+ }
+ // arg1 is optional, must be > 0
+ if (args[1].type != ARGT_STOP) {
+ if (!check_operator(&args[1], conv, file, line, err)) {
+ return 0;
+ }
+ if (args[1].type != ARGT_VAR) {
+ if (args[1].type != ARGT_SINT || args[1].data.sint <= 0) {
+ memprintf(err, "expects a positive integer");
+ return 0;
+ }
+ }
+ }
+
+ return 1;
+}
+
+static struct sample_fetch_kw_list smp_logs_kws = {ILH, {
+ { "bytes_in", smp_fetch_bytes, 0, NULL, SMP_T_SINT, SMP_USE_INTRN },
+ { "bytes_out", smp_fetch_bytes, 0, NULL, SMP_T_SINT, SMP_USE_INTRN },
+
+ { "txn.timer.total", smp_fetch_txn_timers, 0, NULL, SMP_T_SINT, SMP_USE_TXFIN }, /* "Ta" */
+ { "txn.timer.user", smp_fetch_txn_timers, 0, NULL, SMP_T_SINT, SMP_USE_TXFIN }, /* "Tu" */
+
+ { "bc.timer.connect", smp_fetch_conn_timers, 0, NULL, SMP_T_SINT, SMP_USE_L4SRV }, /* "Tc" */
+ { "fc.timer.handshake", smp_fetch_conn_timers, 0, NULL, SMP_T_SINT, SMP_USE_L4CLI }, /* "Th" */
+ { "fc.timer.total", smp_fetch_conn_timers, 0, NULL, SMP_T_SINT, SMP_USE_SSFIN }, /* "Tt" */
+
+ { "req.timer.idle", smp_fetch_reX_timers, 0, NULL, SMP_T_SINT, SMP_USE_HRQHV }, /* "Ti" */
+ { "req.timer.tq", smp_fetch_reX_timers, 0, NULL, SMP_T_SINT, SMP_USE_HRQHV }, /* "Tq" */
+ { "req.timer.hdr", smp_fetch_reX_timers, 0, NULL, SMP_T_SINT, SMP_USE_HRQHV }, /* "TR" */
+ { "req.timer.queue", smp_fetch_reX_timers, 0, NULL, SMP_T_SINT, SMP_USE_L4SRV }, /* "Tw" */
+ { "res.timer.data", smp_fetch_reX_timers, 0, NULL, SMP_T_SINT, SMP_USE_RSFIN }, /* "Td" */
+ { "res.timer.hdr", smp_fetch_reX_timers, 0, NULL, SMP_T_SINT, SMP_USE_HRSHV }, /* "Tr" */
+ { /* END */ },
+}};
+
+INITCALL1(STG_REGISTER, sample_register_fetches, &smp_logs_kws);
+
+/* Note: must not be declared <const> as its list will be overwritten.
+ * Note: fetches that may return multiple types should be declared using the
+ * appropriate pseudo-type. If not available it must be declared as the lowest
+ * common denominator, the type that can be casted into all other ones.
+ */
+static struct sample_fetch_kw_list smp_kws = {ILH, {
+ { "act_conn", smp_fetch_actconn, 0, NULL, SMP_T_SINT, SMP_USE_CONST },
+ { "always_false", smp_fetch_false, 0, NULL, SMP_T_BOOL, SMP_USE_CONST },
+ { "always_true", smp_fetch_true, 0, NULL, SMP_T_BOOL, SMP_USE_CONST },
+ { "env", smp_fetch_env, ARG1(1,STR), NULL, SMP_T_STR, SMP_USE_CONST },
+ { "date", smp_fetch_date, ARG2(0,SINT,STR), smp_check_date_unit, SMP_T_SINT, SMP_USE_CONST },
+ { "date_us", smp_fetch_date_us, 0, NULL, SMP_T_SINT, SMP_USE_CONST },
+ { "hostname", smp_fetch_hostname, 0, NULL, SMP_T_STR, SMP_USE_CONST },
+ { "nbproc", smp_fetch_nbproc,0, NULL, SMP_T_SINT, SMP_USE_CONST },
+ { "pid", smp_fetch_pid, 0, NULL, SMP_T_SINT, SMP_USE_CONST },
+ { "proc", smp_fetch_proc, 0, NULL, SMP_T_SINT, SMP_USE_CONST },
+ { "quic_enabled", smp_fetch_quic_enabled, 0, NULL, SMP_T_BOOL, SMP_USE_CONST },
+ { "thread", smp_fetch_thread, 0, NULL, SMP_T_SINT, SMP_USE_CONST },
+ { "rand", smp_fetch_rand, ARG1(0,SINT), NULL, SMP_T_SINT, SMP_USE_CONST },
+ { "stopping", smp_fetch_stopping, 0, NULL, SMP_T_BOOL, SMP_USE_INTRN },
+ { "uuid", smp_fetch_uuid, ARG1(0, SINT), smp_check_uuid, SMP_T_STR, SMP_USE_CONST },
+
+ { "cpu_calls", smp_fetch_cpu_calls, 0, NULL, SMP_T_SINT, SMP_USE_INTRN },
+ { "cpu_ns_avg", smp_fetch_cpu_ns_avg, 0, NULL, SMP_T_SINT, SMP_USE_INTRN },
+ { "cpu_ns_tot", smp_fetch_cpu_ns_tot, 0, NULL, SMP_T_SINT, SMP_USE_INTRN },
+ { "lat_ns_avg", smp_fetch_lat_ns_avg, 0, NULL, SMP_T_SINT, SMP_USE_INTRN },
+ { "lat_ns_tot", smp_fetch_lat_ns_tot, 0, NULL, SMP_T_SINT, SMP_USE_INTRN },
+
+ { "str", smp_fetch_const_str, ARG1(1,STR), NULL , SMP_T_STR, SMP_USE_CONST },
+ { "bool", smp_fetch_const_bool, ARG1(1,STR), smp_check_const_bool, SMP_T_BOOL, SMP_USE_CONST },
+ { "int", smp_fetch_const_int, ARG1(1,SINT), NULL , SMP_T_SINT, SMP_USE_CONST },
+ { "ipv4", smp_fetch_const_ipv4, ARG1(1,IPV4), NULL , SMP_T_IPV4, SMP_USE_CONST },
+ { "ipv6", smp_fetch_const_ipv6, ARG1(1,IPV6), NULL , SMP_T_IPV6, SMP_USE_CONST },
+ { "bin", smp_fetch_const_bin, ARG1(1,STR), smp_check_const_bin , SMP_T_BIN, SMP_USE_CONST },
+ { "meth", smp_fetch_const_meth, ARG1(1,STR), smp_check_const_meth, SMP_T_METH, SMP_USE_CONST },
+
+ { /* END */ },
+}};
+
+INITCALL1(STG_REGISTER, sample_register_fetches, &smp_kws);
+
+/* Note: must not be declared <const> as its list will be overwritten */
+static struct sample_conv_kw_list sample_conv_kws = {ILH, {
+ { "add_item",sample_conv_add_item, ARG3(2,STR,STR,STR), smp_check_add_item, SMP_T_STR, SMP_T_STR },
+ { "debug", sample_conv_debug, ARG2(0,STR,STR), smp_check_debug, SMP_T_ANY, SMP_T_SAME },
+ { "b64dec", sample_conv_base642bin, 0, NULL, SMP_T_STR, SMP_T_BIN },
+ { "base64", sample_conv_bin2base64, 0, NULL, SMP_T_BIN, SMP_T_STR },
+ { "concat", sample_conv_concat, ARG3(1,STR,STR,STR), smp_check_concat, SMP_T_STR, SMP_T_STR },
+ { "ub64enc", sample_conv_bin2base64url,0, NULL, SMP_T_BIN, SMP_T_STR },
+ { "ub64dec", sample_conv_base64url2bin,0, NULL, SMP_T_STR, SMP_T_BIN },
+ { "upper", sample_conv_str2upper, 0, NULL, SMP_T_STR, SMP_T_STR },
+ { "lower", sample_conv_str2lower, 0, NULL, SMP_T_STR, SMP_T_STR },
+ { "length", sample_conv_length, 0, NULL, SMP_T_STR, SMP_T_SINT },
+ { "be2dec", sample_conv_be2dec, ARG3(1,STR,SINT,SINT), sample_conv_be2dec_check, SMP_T_BIN, SMP_T_STR },
+ { "be2hex", sample_conv_be2hex, ARG3(1,STR,SINT,SINT), sample_conv_be2hex_check, SMP_T_BIN, SMP_T_STR },
+ { "hex", sample_conv_bin2hex, 0, NULL, SMP_T_BIN, SMP_T_STR },
+ { "hex2i", sample_conv_hex2int, 0, NULL, SMP_T_STR, SMP_T_SINT },
+ { "ipmask", sample_conv_ipmask, ARG2(1,MSK4,MSK6), NULL, SMP_T_ADDR, SMP_T_ADDR },
+ { "ltime", sample_conv_ltime, ARG2(1,STR,SINT), NULL, SMP_T_SINT, SMP_T_STR },
+ { "ms_ltime", sample_conv_ms_ltime, ARG2(1,STR,SINT), NULL, SMP_T_SINT, SMP_T_STR },
+ { "us_ltime", sample_conv_us_ltime, ARG2(1,STR,SINT), NULL, SMP_T_SINT, SMP_T_STR },
+ { "utime", sample_conv_utime, ARG2(1,STR,SINT), NULL, SMP_T_SINT, SMP_T_STR },
+ { "ms_utime", sample_conv_ms_utime, ARG2(1,STR,SINT), NULL, SMP_T_SINT, SMP_T_STR },
+ { "us_utime", sample_conv_us_utime, ARG2(1,STR,SINT), NULL, SMP_T_SINT, SMP_T_STR },
+ { "crc32", sample_conv_crc32, ARG1(0,SINT), NULL, SMP_T_BIN, SMP_T_SINT },
+ { "crc32c", sample_conv_crc32c, ARG1(0,SINT), NULL, SMP_T_BIN, SMP_T_SINT },
+ { "djb2", sample_conv_djb2, ARG1(0,SINT), NULL, SMP_T_BIN, SMP_T_SINT },
+ { "sdbm", sample_conv_sdbm, ARG1(0,SINT), NULL, SMP_T_BIN, SMP_T_SINT },
+ { "wt6", sample_conv_wt6, ARG1(0,SINT), NULL, SMP_T_BIN, SMP_T_SINT },
+ { "xxh3", sample_conv_xxh3, ARG1(0,SINT), NULL, SMP_T_BIN, SMP_T_SINT },
+ { "xxh32", sample_conv_xxh32, ARG1(0,SINT), NULL, SMP_T_BIN, SMP_T_SINT },
+ { "xxh64", sample_conv_xxh64, ARG1(0,SINT), NULL, SMP_T_BIN, SMP_T_SINT },
+ { "json", sample_conv_json, ARG1(1,STR), sample_conv_json_check, SMP_T_STR, SMP_T_STR },
+ { "bytes", sample_conv_bytes, ARG2(1,STR,STR), sample_conv_bytes_check, SMP_T_BIN, SMP_T_BIN },
+ { "field", sample_conv_field, ARG3(2,SINT,STR,SINT), sample_conv_field_check, SMP_T_STR, SMP_T_STR },
+ { "word", sample_conv_word, ARG3(2,SINT,STR,SINT), sample_conv_field_check, SMP_T_STR, SMP_T_STR },
+ { "param", sample_conv_param, ARG2(1,STR,STR), sample_conv_param_check, SMP_T_STR, SMP_T_STR },
+ { "regsub", sample_conv_regsub, ARG3(2,REG,STR,STR), sample_conv_regsub_check, SMP_T_STR, SMP_T_STR },
+ { "sha1", sample_conv_sha1, 0, NULL, SMP_T_BIN, SMP_T_BIN },
+ { "strcmp", sample_conv_strcmp, ARG1(1,STR), smp_check_strcmp, SMP_T_STR, SMP_T_SINT },
+ { "host_only", sample_conv_host_only, 0, NULL, SMP_T_STR, SMP_T_STR },
+ { "port_only", sample_conv_port_only, 0, NULL, SMP_T_STR, SMP_T_SINT },
+
+ /* gRPC converters. */
+ { "ungrpc", sample_conv_ungrpc, ARG2(1,PBUF_FNUM,STR), sample_conv_protobuf_check, SMP_T_BIN, SMP_T_BIN },
+ { "protobuf", sample_conv_protobuf, ARG2(1,PBUF_FNUM,STR), sample_conv_protobuf_check, SMP_T_BIN, SMP_T_BIN },
+
+ /* FIX converters */
+ { "fix_is_valid", sample_conv_fix_is_valid, 0, NULL, SMP_T_BIN, SMP_T_BOOL },
+ { "fix_tag_value", sample_conv_fix_tag_value, ARG1(1,STR), sample_conv_fix_value_check, SMP_T_BIN, SMP_T_BIN },
+
+ /* MQTT converters */
+ { "mqtt_is_valid", sample_conv_mqtt_is_valid, 0, NULL, SMP_T_BIN, SMP_T_BOOL },
+ { "mqtt_field_value", sample_conv_mqtt_field_value, ARG2(2,STR,STR), sample_conv_mqtt_field_value_check, SMP_T_BIN, SMP_T_STR },
+
+ { "iif", sample_conv_iif, ARG2(2, STR, STR), NULL, SMP_T_BOOL, SMP_T_STR },
+
+ { "and", sample_conv_binary_and, ARG1(1,STR), check_operator, SMP_T_SINT, SMP_T_SINT },
+ { "or", sample_conv_binary_or, ARG1(1,STR), check_operator, SMP_T_SINT, SMP_T_SINT },
+ { "xor", sample_conv_binary_xor, ARG1(1,STR), check_operator, SMP_T_SINT, SMP_T_SINT },
+ { "cpl", sample_conv_binary_cpl, 0, NULL, SMP_T_SINT, SMP_T_SINT },
+ { "bool", sample_conv_arith_bool, 0, NULL, SMP_T_SINT, SMP_T_BOOL },
+ { "not", sample_conv_arith_not, 0, NULL, SMP_T_SINT, SMP_T_BOOL },
+ { "odd", sample_conv_arith_odd, 0, NULL, SMP_T_SINT, SMP_T_BOOL },
+ { "even", sample_conv_arith_even, 0, NULL, SMP_T_SINT, SMP_T_BOOL },
+ { "add", sample_conv_arith_add, ARG1(1,STR), check_operator, SMP_T_SINT, SMP_T_SINT },
+ { "sub", sample_conv_arith_sub, ARG1(1,STR), check_operator, SMP_T_SINT, SMP_T_SINT },
+ { "mul", sample_conv_arith_mul, ARG1(1,STR), check_operator, SMP_T_SINT, SMP_T_SINT },
+ { "div", sample_conv_arith_div, ARG1(1,STR), check_operator, SMP_T_SINT, SMP_T_SINT },
+ { "mod", sample_conv_arith_mod, ARG1(1,STR), check_operator, SMP_T_SINT, SMP_T_SINT },
+ { "neg", sample_conv_arith_neg, 0, NULL, SMP_T_SINT, SMP_T_SINT },
+
+ { "htonl", sample_conv_htonl, 0, NULL, SMP_T_SINT, SMP_T_BIN },
+ { "cut_crlf", sample_conv_cut_crlf, 0, NULL, SMP_T_STR, SMP_T_STR },
+ { "ltrim", sample_conv_ltrim, ARG1(1,STR), NULL, SMP_T_STR, SMP_T_STR },
+ { "rtrim", sample_conv_rtrim, ARG1(1,STR), NULL, SMP_T_STR, SMP_T_STR },
+ { "json_query", sample_conv_json_query, ARG2(1,STR,STR), sample_check_json_query , SMP_T_STR, SMP_T_ANY },
+
+#ifdef USE_OPENSSL
+ /* JSON Web Token converters */
+ { "jwt_header_query", sample_conv_jwt_header_query, ARG2(0,STR,STR), sample_conv_jwt_query_check, SMP_T_BIN, SMP_T_ANY },
+ { "jwt_payload_query", sample_conv_jwt_payload_query, ARG2(0,STR,STR), sample_conv_jwt_query_check, SMP_T_BIN, SMP_T_ANY },
+ { "jwt_verify", sample_conv_jwt_verify, ARG2(2,STR,STR), sample_conv_jwt_verify_check, SMP_T_BIN, SMP_T_SINT },
+#endif
+ { NULL, NULL, 0, 0, 0 },
+}};
+
+INITCALL1(STG_REGISTER, sample_register_convs, &sample_conv_kws);
diff --git a/src/server.c b/src/server.c
new file mode 100644
index 0000000..829fbb3
--- /dev/null
+++ b/src/server.c
@@ -0,0 +1,6765 @@
+/*
+ * Server management functions.
+ *
+ * Copyright 2000-2012 Willy Tarreau <w@1wt.eu>
+ * Copyright 2007-2008 Krzysztof Piotr Oledzki <ole@ans.pl>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <sys/types.h>
+#include <netinet/tcp.h>
+#include <ctype.h>
+#include <errno.h>
+
+#include <import/ebmbtree.h>
+
+#include <haproxy/api.h>
+#include <haproxy/applet-t.h>
+#include <haproxy/backend.h>
+#include <haproxy/cfgparse.h>
+#include <haproxy/check.h>
+#include <haproxy/cli.h>
+#include <haproxy/connection.h>
+#include <haproxy/dict-t.h>
+#include <haproxy/errors.h>
+#include <haproxy/global.h>
+#include <haproxy/log.h>
+#include <haproxy/mailers.h>
+#include <haproxy/namespace.h>
+#include <haproxy/port_range.h>
+#include <haproxy/protocol.h>
+#include <haproxy/proxy.h>
+#include <haproxy/queue.h>
+#include <haproxy/resolvers.h>
+#include <haproxy/sample.h>
+#include <haproxy/sc_strm.h>
+#include <haproxy/server.h>
+#include <haproxy/stats.h>
+#include <haproxy/stconn.h>
+#include <haproxy/stream.h>
+#include <haproxy/task.h>
+#include <haproxy/tcpcheck.h>
+#include <haproxy/time.h>
+#include <haproxy/tools.h>
+#include <haproxy/xxhash.h>
+#include <haproxy/event_hdl.h>
+
+
+static void srv_update_status(struct server *s, int type, int cause);
+static int srv_apply_lastaddr(struct server *srv, int *err_code);
+static void srv_cleanup_connections(struct server *srv);
+
+/* extra keywords used as value for other arguments. They are used as
+ * suggestions for mistyped words.
+ */
+static const char *extra_kw_list[] = {
+ "ipv4", "ipv6", "legacy", "octet-count",
+ "fail-check", "sudden-death", "mark-down",
+ NULL /* must be last */
+};
+
+/* List head of all known server keywords */
+struct srv_kw_list srv_keywords = {
+ .list = LIST_HEAD_INIT(srv_keywords.list)
+};
+
+__decl_thread(HA_SPINLOCK_T idle_conn_srv_lock);
+struct eb_root idle_conn_srv = EB_ROOT;
+struct task *idle_conn_task __read_mostly = NULL;
+struct list servers_list = LIST_HEAD_INIT(servers_list);
+static struct task *server_atomic_sync_task = NULL;
+static event_hdl_async_equeue server_atomic_sync_queue;
+
+/* SERVER DELETE(n)->ADD global tracker:
+ * This is meant to provide srv->rid (revision id) value.
+ * Revision id allows to differentiate between a previously existing
+ * deleted server and a new server reusing deleted server name/id.
+ *
+ * start value is 0 (even value)
+ * LSB is used to specify that one or multiple srv delete in a row
+ * were performed.
+ * When adding a new server, increment by 1 if current
+ * value is odd (odd = LSB set),
+ * because adding a new server after one or
+ * multiple deletions means we could potentially be reusing old names:
+ * Increase the revision id to prevent mixups between old and new names.
+ *
+ * srv->rid is calculated from cnt even values only.
+ * sizeof(srv_id_reuse_cnt) must be twice sizeof(srv->rid)
+ *
+ * Wraparound is expected and should not cause issues
+ * (with current design we allow up to 4 billion unique revisions)
+ *
+ * Counter is only used under thread_isolate (cli_add/cli_del),
+ * no need for atomic ops.
+ */
+static uint64_t srv_id_reuse_cnt = 0;
+
+/* The server names dictionary */
+struct dict server_key_dict = {
+ .name = "server keys",
+ .values = EB_ROOT_UNIQUE,
+};
+
+static const char *srv_adm_st_chg_cause_str[] = {
+ [SRV_ADM_STCHGC_NONE] = "",
+ [SRV_ADM_STCHGC_DNS_NOENT] = "entry removed from SRV record",
+ [SRV_ADM_STCHGC_DNS_NOIP] = "No IP for server ",
+ [SRV_ADM_STCHGC_DNS_NX] = "DNS NX status",
+ [SRV_ADM_STCHGC_DNS_TIMEOUT] = "DNS timeout status",
+ [SRV_ADM_STCHGC_DNS_REFUSED] = "DNS refused status",
+ [SRV_ADM_STCHGC_DNS_UNSPEC] = "unspecified DNS error",
+ [SRV_ADM_STCHGC_STATS_DISABLE] = "'disable' on stats page",
+ [SRV_ADM_STCHGC_STATS_STOP] = "'stop' on stats page"
+};
+
+const char *srv_adm_st_chg_cause(enum srv_adm_st_chg_cause cause)
+{
+ return srv_adm_st_chg_cause_str[cause];
+}
+
+static const char *srv_op_st_chg_cause_str[] = {
+ [SRV_OP_STCHGC_NONE] = "",
+ [SRV_OP_STCHGC_HEALTH] = "",
+ [SRV_OP_STCHGC_AGENT] = "",
+ [SRV_OP_STCHGC_CLI] = "changed from CLI",
+ [SRV_OP_STCHGC_LUA] = "changed from Lua script",
+ [SRV_OP_STCHGC_STATS_WEB] = "changed from Web interface",
+ [SRV_OP_STCHGC_STATEFILE] = "changed from server-state after a reload"
+};
+
+const char *srv_op_st_chg_cause(enum srv_op_st_chg_cause cause)
+{
+ return srv_op_st_chg_cause_str[cause];
+}
+
+int srv_downtime(const struct server *s)
+{
+ if ((s->cur_state != SRV_ST_STOPPED) || s->last_change >= ns_to_sec(now_ns)) // ignore negative time
+ return s->down_time;
+
+ return ns_to_sec(now_ns) - s->last_change + s->down_time;
+}
+
+int srv_lastsession(const struct server *s)
+{
+ if (s->counters.last_sess)
+ return ns_to_sec(now_ns) - s->counters.last_sess;
+
+ return -1;
+}
+
+int srv_getinter(const struct check *check)
+{
+ const struct server *s = check->server;
+
+ if ((check->state & (CHK_ST_CONFIGURED|CHK_ST_FASTINTER)) == CHK_ST_CONFIGURED &&
+ (check->health == check->rise + check->fall - 1))
+ return check->inter;
+
+ if ((s->next_state == SRV_ST_STOPPED) && check->health == 0)
+ return (check->downinter)?(check->downinter):(check->inter);
+
+ return (check->fastinter)?(check->fastinter):(check->inter);
+}
+
+/* Update server's addr:svc_port tuple in INET context
+ *
+ * Must be called under thread isolation to ensure consistent readings accross
+ * all threads (addr:svc_port might be read without srv lock being held).
+ */
+static void _srv_set_inetaddr_port(struct server *srv,
+ const struct sockaddr_storage *addr,
+ unsigned int svc_port, uint8_t mapped_port)
+{
+ ipcpy(addr, &srv->addr);
+ srv->svc_port = svc_port;
+ if (mapped_port)
+ srv->flags |= SRV_F_MAPPORTS;
+ else
+ srv->flags &= ~SRV_F_MAPPORTS;
+
+ if (srv->log_target && srv->log_target->type == LOG_TARGET_DGRAM) {
+ /* server is used as a log target, manually update log target addr for DGRAM */
+ ipcpy(addr, srv->log_target->addr);
+ set_host_port(srv->log_target->addr, svc_port);
+ }
+}
+
+/* same as _srv_set_inetaddr_port() but only updates the addr part
+ */
+static void _srv_set_inetaddr(struct server *srv,
+ const struct sockaddr_storage *addr)
+{
+ _srv_set_inetaddr_port(srv, addr, srv->svc_port, !!(srv->flags & SRV_F_MAPPORTS));
+}
+
+/*
+ * Function executed by server_atomic_sync_task to perform atomic updates on
+ * compatible server struct members that are not guarded by any lock since
+ * they are not supposed to change often and are subject to being used in
+ * sensitive codepaths
+ *
+ * Some updates may require thread isolation: we start without isolation
+ * but as soon as we encounter an event that requires isolation, we do so.
+ * Once the event is processed, we keep the isolation until we've processed
+ * the whole batch of events and leave isolation once we're done, as it would
+ * be very costly to try to acquire isolation multiple times in a row.
+ * The task will limit itself to a number of events per run to prevent
+ * thread contention (see: "tune.events.max-events-at-once").
+ *
+ * TODO: if we find out that enforcing isolation is too costly, we may
+ * consider adding thread_isolate_try_full(timeout) or equivalent to the
+ * thread API so that we can do our best not to block harmless threads
+ * for too long if one or multiple threads are still heavily busy. This
+ * would mean that the task would be capable of rescheduling itself to
+ * start again on the current event if it failed to acquire thread
+ * isolation. This would also imply that the event_hdl API allows us
+ * to check an event without popping it from the queue first (remove the
+ * event once it is successfully processed).
+ */
+static void srv_set_addr_desc(struct server *s, int reattach);
+static struct task *server_atomic_sync(struct task *task, void *context, unsigned int state)
+{
+ unsigned int remain = event_hdl_tune.max_events_at_once; // to limit max number of events per batch
+ struct event_hdl_async_event *event;
+
+ /* check for new server events that we care about */
+ while ((event = event_hdl_async_equeue_pop(&server_atomic_sync_queue))) {
+ if (event_hdl_sub_type_equal(event->type, EVENT_HDL_SUB_END)) {
+ /* ending event: no more events to come */
+ event_hdl_async_free_event(event);
+ task_destroy(task);
+ task = NULL;
+ break;
+ }
+
+ if (!remain) {
+ /* STOP: we've already spent all our budget here, and
+ * considering we possibly are under isolation, we cannot
+ * keep blocking other threads any longer.
+ *
+ * Reschedule the task to finish where we left off if
+ * there are remaining events in the queue.
+ */
+ if (!event_hdl_async_equeue_isempty(&server_atomic_sync_queue))
+ task_wakeup(task, TASK_WOKEN_OTHER);
+ break;
+ }
+ remain--;
+
+ /* new event to process */
+ if (event_hdl_sub_type_equal(event->type, EVENT_HDL_SUB_SERVER_INETADDR)) {
+ struct sockaddr_storage new_addr;
+ struct event_hdl_cb_data_server_inetaddr *data = event->data;
+ struct proxy *px;
+ struct server *srv;
+
+ /* server ip:port changed, we must atomically update data members
+ * to prevent invalid reads by other threads.
+ */
+
+ /* check if related server still exists */
+ px = proxy_find_by_id(data->server.safe.proxy_uuid, PR_CAP_BE, 0);
+ if (!px)
+ continue;
+ srv = findserver_unique_id(px, data->server.safe.puid, data->server.safe.rid);
+ if (!srv)
+ continue;
+
+ /* prepare new addr based on event cb data */
+ memset(&new_addr, 0, sizeof(new_addr));
+ new_addr.ss_family = data->safe.next.family;
+ switch (new_addr.ss_family) {
+ case AF_INET:
+ ((struct sockaddr_in *)&new_addr)->sin_addr.s_addr =
+ data->safe.next.addr.v4.s_addr;
+ break;
+ case AF_INET6:
+ memcpy(&((struct sockaddr_in6 *)&new_addr)->sin6_addr,
+ &data->safe.next.addr.v6,
+ sizeof(struct in6_addr));
+ break;
+ case AF_UNSPEC:
+ /* addr reset, nothing to do */
+ break;
+ default:
+ /* should not happen */
+ break;
+ }
+ /*
+ * this requires thread isolation, which is safe since we're the only
+ * task working for the current subscription and we don't hold locks
+ * or ressources that other threads may depend on to complete a running
+ * cycle. Note that we do this way because we assume that this event is
+ * rather rare.
+ */
+ if (!thread_isolated())
+ thread_isolate_full();
+
+ /* apply new addr:port combination */
+ _srv_set_inetaddr_port(srv, &new_addr,
+ data->safe.next.port.svc, data->safe.next.port.map);
+
+ /* propagate the changes */
+ if (data->safe.purge_conn) /* force connection cleanup on the given server? */
+ srv_cleanup_connections(srv);
+ srv_set_dyncookie(srv);
+ srv_set_addr_desc(srv, 1);
+ }
+ event_hdl_async_free_event(event);
+ }
+
+ /* some events possibly required thread_isolation:
+ * now that we are done, we must leave thread isolation before
+ * returning
+ */
+ if (thread_isolated())
+ thread_release();
+
+ return task;
+}
+
+/* Try to start the atomic server sync task.
+ *
+ * Returns ERR_NONE on success and a combination of ERR_CODE on failure
+ */
+static int server_atomic_sync_start()
+{
+ struct event_hdl_sub_type subscriptions = EVENT_HDL_SUB_NONE;
+
+ if (server_atomic_sync_task)
+ return ERR_NONE; // nothing to do
+ server_atomic_sync_task = task_new_anywhere();
+ if (!server_atomic_sync_task)
+ goto fail;
+ server_atomic_sync_task->process = server_atomic_sync;
+ event_hdl_async_equeue_init(&server_atomic_sync_queue);
+
+ /* task created, now subscribe to relevant server events in the global list */
+ subscriptions = event_hdl_sub_type_add(subscriptions, EVENT_HDL_SUB_SERVER_INETADDR);
+ if (!event_hdl_subscribe(NULL, subscriptions,
+ EVENT_HDL_ASYNC_TASK(&server_atomic_sync_queue,
+ server_atomic_sync_task,
+ NULL,
+ NULL)))
+ goto fail;
+
+
+ return ERR_NONE;
+
+ fail:
+ task_destroy(server_atomic_sync_task);
+ server_atomic_sync_task = NULL;
+ return ERR_ALERT | ERR_FATAL;
+}
+REGISTER_POST_CHECK(server_atomic_sync_start);
+
+/* fill common server event data members struct
+ * must be called with server lock or under thread isolate
+ */
+static inline void _srv_event_hdl_prepare(struct event_hdl_cb_data_server *cb_data,
+ struct server *srv, uint8_t thread_isolate)
+{
+ /* safe data assignments */
+ cb_data->safe.puid = srv->puid;
+ cb_data->safe.rid = srv->rid;
+ cb_data->safe.flags = srv->flags;
+ snprintf(cb_data->safe.name, sizeof(cb_data->safe.name), "%s", srv->id);
+ cb_data->safe.proxy_name[0] = '\0';
+ cb_data->safe.proxy_uuid = -1; /* default value */
+ if (srv->proxy) {
+ cb_data->safe.proxy_uuid = srv->proxy->uuid;
+ snprintf(cb_data->safe.proxy_name, sizeof(cb_data->safe.proxy_name), "%s", srv->proxy->id);
+ }
+ /* unsafe data assignments */
+ cb_data->unsafe.ptr = srv;
+ cb_data->unsafe.thread_isolate = thread_isolate;
+ cb_data->unsafe.srv_lock = !thread_isolate;
+}
+
+/* take an event-check snapshot from a live check */
+void _srv_event_hdl_prepare_checkres(struct event_hdl_cb_data_server_checkres *checkres,
+ struct check *check)
+{
+ checkres->agent = !!(check->state & CHK_ST_AGENT);
+ checkres->result = check->result;
+ checkres->duration = check->duration;
+ checkres->reason.status = check->status;
+ checkres->reason.code = check->code;
+ checkres->health.cur = check->health;
+ checkres->health.rise = check->rise;
+ checkres->health.fall = check->fall;
+}
+
+/* Prepare SERVER_STATE event
+ *
+ * This special event will contain extra hints related to the state change
+ *
+ * Must be called with server lock held
+ */
+void _srv_event_hdl_prepare_state(struct event_hdl_cb_data_server_state *cb_data,
+ struct server *srv, int type, int cause,
+ enum srv_state prev_state, int requeued)
+{
+ /* state event provides additional info about the server state change */
+ cb_data->safe.type = type;
+ cb_data->safe.new_state = srv->cur_state;
+ cb_data->safe.old_state = prev_state;
+ cb_data->safe.requeued = requeued;
+ if (type) {
+ /* administrative */
+ cb_data->safe.adm_st_chg.cause = cause;
+ }
+ else {
+ /* operational */
+ cb_data->safe.op_st_chg.cause = cause;
+ if (cause == SRV_OP_STCHGC_HEALTH || cause == SRV_OP_STCHGC_AGENT) {
+ struct check *check = (cause == SRV_OP_STCHGC_HEALTH) ? &srv->check : &srv->agent;
+
+ /* provide additional check-related state change result */
+ _srv_event_hdl_prepare_checkres(&cb_data->safe.op_st_chg.check, check);
+ }
+ }
+}
+
+/* Prepare SERVER_INETADDR event, prev data is learned from the current
+ * server settings.
+ *
+ * This special event will contain extra hints related to the addr change
+ *
+ * Must be called with the server lock held.
+ */
+static void _srv_event_hdl_prepare_inetaddr(struct event_hdl_cb_data_server_inetaddr *cb_data,
+ struct server *srv,
+ const struct sockaddr_storage *next_addr,
+ unsigned int next_port, uint8_t next_mapports,
+ uint8_t purge_conn)
+{
+ struct sockaddr_storage *prev_addr = &srv->addr;
+ unsigned int prev_port = srv->svc_port;
+ uint8_t prev_mapports = !!(srv->flags & SRV_F_MAPPORTS);
+
+ /* only INET families are supported */
+ BUG_ON((prev_addr->ss_family != AF_UNSPEC &&
+ prev_addr->ss_family != AF_INET && prev_addr->ss_family != AF_INET6) ||
+ (next_addr->ss_family != AF_UNSPEC &&
+ next_addr->ss_family != AF_INET && next_addr->ss_family != AF_INET6));
+
+ /* prev */
+ cb_data->safe.prev.family = prev_addr->ss_family;
+ memset(&cb_data->safe.prev.addr, 0, sizeof(cb_data->safe.prev.addr));
+ if (prev_addr->ss_family == AF_INET)
+ cb_data->safe.prev.addr.v4.s_addr =
+ ((struct sockaddr_in *)prev_addr)->sin_addr.s_addr;
+ else if (prev_addr->ss_family == AF_INET6)
+ memcpy(&cb_data->safe.prev.addr.v6,
+ &((struct sockaddr_in6 *)prev_addr)->sin6_addr,
+ sizeof(struct in6_addr));
+ cb_data->safe.prev.port.svc = prev_port;
+ cb_data->safe.prev.port.map = prev_mapports;
+
+ /* next */
+ cb_data->safe.next.family = next_addr->ss_family;
+ memset(&cb_data->safe.next.addr, 0, sizeof(cb_data->safe.next.addr));
+ if (next_addr->ss_family == AF_INET)
+ cb_data->safe.next.addr.v4.s_addr =
+ ((struct sockaddr_in *)next_addr)->sin_addr.s_addr;
+ else if (next_addr->ss_family == AF_INET6)
+ memcpy(&cb_data->safe.next.addr.v6,
+ &((struct sockaddr_in6 *)next_addr)->sin6_addr,
+ sizeof(struct in6_addr));
+ cb_data->safe.next.port.svc = next_port;
+ cb_data->safe.next.port.map = next_mapports;
+
+ cb_data->safe.purge_conn = purge_conn;
+}
+
+/* server event publishing helper: publish in both global and
+ * server dedicated subscription list.
+ */
+#define _srv_event_hdl_publish(e, d, s) \
+ ({ \
+ /* publish in server dedicated sub list */ \
+ event_hdl_publish(&s->e_subs, e, EVENT_HDL_CB_DATA(&d));\
+ /* publish in global subscription list */ \
+ event_hdl_publish(NULL, e, EVENT_HDL_CB_DATA(&d)); \
+ })
+
+/* General server event publishing:
+ * Use this to publish EVENT_HDL_SUB_SERVER family type event
+ * from srv facility.
+ *
+ * server ptr must be valid.
+ * Must be called with srv lock or under thread_isolate.
+ */
+static void srv_event_hdl_publish(struct event_hdl_sub_type event,
+ struct server *srv, uint8_t thread_isolate)
+{
+ struct event_hdl_cb_data_server cb_data;
+
+ /* prepare event data */
+ _srv_event_hdl_prepare(&cb_data, srv, thread_isolate);
+ _srv_event_hdl_publish(event, cb_data, srv);
+}
+
+/* Publish SERVER_CHECK event
+ *
+ * This special event will contain extra hints related to the check itself
+ *
+ * Must be called with server lock held
+ */
+void srv_event_hdl_publish_check(struct server *srv, struct check *check)
+{
+ struct event_hdl_cb_data_server_check cb_data;
+
+ /* check event provides additional info about the server check */
+ _srv_event_hdl_prepare_checkres(&cb_data.safe.res, check);
+
+ cb_data.unsafe.ptr = check;
+
+ /* prepare event data (common server data) */
+ _srv_event_hdl_prepare((struct event_hdl_cb_data_server *)&cb_data, srv, 0);
+
+ _srv_event_hdl_publish(EVENT_HDL_SUB_SERVER_CHECK, cb_data, srv);
+}
+
+/*
+ * Check that we did not get a hash collision.
+ * Unlikely, but it can happen. The server's proxy must be at least
+ * read-locked.
+ */
+static inline void srv_check_for_dup_dyncookie(struct server *s)
+{
+ struct proxy *p = s->proxy;
+ struct server *tmpserv;
+
+ for (tmpserv = p->srv; tmpserv != NULL;
+ tmpserv = tmpserv->next) {
+ if (tmpserv == s)
+ continue;
+ if (tmpserv->next_admin & SRV_ADMF_FMAINT)
+ continue;
+ if (tmpserv->cookie &&
+ strcmp(tmpserv->cookie, s->cookie) == 0) {
+ ha_warning("We generated two equal cookies for two different servers.\n"
+ "Please change the secret key for '%s'.\n",
+ s->proxy->id);
+ }
+ }
+
+}
+
+/*
+ * Must be called with the server lock held, and will read-lock the proxy.
+ */
+void srv_set_dyncookie(struct server *s)
+{
+ struct proxy *p = s->proxy;
+ char *tmpbuf;
+ unsigned long long hash_value;
+ size_t key_len;
+ size_t buffer_len;
+ int addr_len;
+ int port;
+
+ HA_RWLOCK_RDLOCK(PROXY_LOCK, &p->lock);
+
+ if ((s->flags & SRV_F_COOKIESET) ||
+ !(s->proxy->ck_opts & PR_CK_DYNAMIC) ||
+ s->proxy->dyncookie_key == NULL)
+ goto out;
+ key_len = strlen(p->dyncookie_key);
+
+ if (s->addr.ss_family != AF_INET &&
+ s->addr.ss_family != AF_INET6)
+ goto out;
+ /*
+ * Buffer to calculate the cookie value.
+ * The buffer contains the secret key + the server IP address
+ * + the TCP port.
+ */
+ addr_len = (s->addr.ss_family == AF_INET) ? 4 : 16;
+ /*
+ * The TCP port should use only 2 bytes, but is stored in
+ * an unsigned int in struct server, so let's use 4, to be
+ * on the safe side.
+ */
+ buffer_len = key_len + addr_len + 4;
+ tmpbuf = trash.area;
+ memcpy(tmpbuf, p->dyncookie_key, key_len);
+ memcpy(&(tmpbuf[key_len]),
+ s->addr.ss_family == AF_INET ?
+ (void *)&((struct sockaddr_in *)&s->addr)->sin_addr.s_addr :
+ (void *)&(((struct sockaddr_in6 *)&s->addr)->sin6_addr.s6_addr),
+ addr_len);
+ /*
+ * Make sure it's the same across all the load balancers,
+ * no matter their endianness.
+ */
+ port = htonl(s->svc_port);
+ memcpy(&tmpbuf[key_len + addr_len], &port, 4);
+ hash_value = XXH64(tmpbuf, buffer_len, 0);
+ memprintf(&s->cookie, "%016llx", hash_value);
+ if (!s->cookie)
+ goto out;
+ s->cklen = 16;
+
+ /* Don't bother checking if the dyncookie is duplicated if
+ * the server is marked as "disabled", maybe it doesn't have
+ * its real IP yet, but just a place holder.
+ */
+ if (!(s->next_admin & SRV_ADMF_FMAINT))
+ srv_check_for_dup_dyncookie(s);
+ out:
+ HA_RWLOCK_RDUNLOCK(PROXY_LOCK, &p->lock);
+}
+
+/* Returns true if it's possible to reuse an idle connection from server <srv>
+ * for a websocket stream. This is the case if server is configured to use the
+ * same protocol for both HTTP and websocket streams. This depends on the value
+ * of "proto", "alpn" and "ws" keywords.
+ */
+int srv_check_reuse_ws(struct server *srv)
+{
+ if (srv->mux_proto || srv->use_ssl != 1 || !srv->ssl_ctx.alpn_str) {
+ /* explicit srv.mux_proto or no ALPN : srv.mux_proto is used
+ * for mux selection.
+ */
+ const struct ist srv_mux = srv->mux_proto ?
+ srv->mux_proto->token : IST_NULL;
+
+ switch (srv->ws) {
+ /* "auto" means use the same protocol : reuse is possible. */
+ case SRV_WS_AUTO:
+ return 1;
+
+ /* "h2" means use h2 for websocket : reuse is possible if
+ * server mux is h2.
+ */
+ case SRV_WS_H2:
+ if (srv->mux_proto && isteq(srv_mux, ist("h2")))
+ return 1;
+ break;
+
+ /* "h1" means use h1 for websocket : reuse is possible if
+ * server mux is h1.
+ */
+ case SRV_WS_H1:
+ if (!srv->mux_proto || isteq(srv_mux, ist("h1")))
+ return 1;
+ break;
+ }
+ }
+ else {
+ /* ALPN selection.
+ * Based on the assumption that only "h2" and "http/1.1" token
+ * are used on server ALPN.
+ */
+ const struct ist alpn = ist2(srv->ssl_ctx.alpn_str,
+ srv->ssl_ctx.alpn_len);
+
+ switch (srv->ws) {
+ case SRV_WS_AUTO:
+ /* for auto mode, consider reuse as possible if the
+ * server uses a single protocol ALPN
+ */
+ if (!istchr(alpn, ','))
+ return 1;
+ break;
+
+ case SRV_WS_H2:
+ return isteq(alpn, ist("\x02h2"));
+
+ case SRV_WS_H1:
+ return isteq(alpn, ist("\x08http/1.1"));
+ }
+ }
+
+ return 0;
+}
+
+/* Return the proto to used for a websocket stream on <srv> without ALPN. NULL
+ * is a valid value indicating to use the fallback mux.
+ */
+const struct mux_ops *srv_get_ws_proto(struct server *srv)
+{
+ const struct mux_proto_list *mux = NULL;
+
+ switch (srv->ws) {
+ case SRV_WS_AUTO:
+ mux = srv->mux_proto;
+ break;
+
+ case SRV_WS_H1:
+ mux = get_mux_proto(ist("h1"));
+ break;
+
+ case SRV_WS_H2:
+ mux = get_mux_proto(ist("h2"));
+ break;
+ }
+
+ return mux ? mux->mux : NULL;
+}
+
+/*
+ * Must be called with the server lock held. The server is first removed from
+ * the proxy tree if it was already attached. If <reattach> is true, the server
+ * will then be attached in the proxy tree. The proxy lock is held to
+ * manipulate the tree.
+ */
+static void srv_set_addr_desc(struct server *s, int reattach)
+{
+ struct proxy *p = s->proxy;
+ char *key;
+
+ key = sa2str(&s->addr, s->svc_port, s->flags & SRV_F_MAPPORTS);
+
+ if (s->addr_node.key) {
+ if (key && strcmp(key, s->addr_node.key) == 0) {
+ free(key);
+ return;
+ }
+
+ HA_RWLOCK_WRLOCK(PROXY_LOCK, &p->lock);
+ ebpt_delete(&s->addr_node);
+ HA_RWLOCK_WRUNLOCK(PROXY_LOCK, &p->lock);
+
+ free(s->addr_node.key);
+ }
+
+ s->addr_node.key = key;
+
+ if (reattach) {
+ if (s->addr_node.key) {
+ HA_RWLOCK_WRLOCK(PROXY_LOCK, &p->lock);
+ ebis_insert(&p->used_server_addr, &s->addr_node);
+ HA_RWLOCK_WRUNLOCK(PROXY_LOCK, &p->lock);
+ }
+ }
+}
+
+/*
+ * Registers the server keyword list <kwl> as a list of valid keywords for next
+ * parsing sessions.
+ */
+void srv_register_keywords(struct srv_kw_list *kwl)
+{
+ LIST_APPEND(&srv_keywords.list, &kwl->list);
+}
+
+/* Return a pointer to the server keyword <kw>, or NULL if not found. If the
+ * keyword is found with a NULL ->parse() function, then an attempt is made to
+ * find one with a valid ->parse() function. This way it is possible to declare
+ * platform-dependant, known keywords as NULL, then only declare them as valid
+ * if some options are met. Note that if the requested keyword contains an
+ * opening parenthesis, everything from this point is ignored.
+ */
+struct srv_kw *srv_find_kw(const char *kw)
+{
+ int index;
+ const char *kwend;
+ struct srv_kw_list *kwl;
+ struct srv_kw *ret = NULL;
+
+ kwend = strchr(kw, '(');
+ if (!kwend)
+ kwend = kw + strlen(kw);
+
+ list_for_each_entry(kwl, &srv_keywords.list, list) {
+ for (index = 0; kwl->kw[index].kw != NULL; index++) {
+ if ((strncmp(kwl->kw[index].kw, kw, kwend - kw) == 0) &&
+ kwl->kw[index].kw[kwend-kw] == 0) {
+ if (kwl->kw[index].parse)
+ return &kwl->kw[index]; /* found it !*/
+ else
+ ret = &kwl->kw[index]; /* may be OK */
+ }
+ }
+ }
+ return ret;
+}
+
+/* Dumps all registered "server" keywords to the <out> string pointer. The
+ * unsupported keywords are only dumped if their supported form was not
+ * found.
+ */
+void srv_dump_kws(char **out)
+{
+ struct srv_kw_list *kwl;
+ int index;
+
+ if (!out)
+ return;
+
+ *out = NULL;
+ list_for_each_entry(kwl, &srv_keywords.list, list) {
+ for (index = 0; kwl->kw[index].kw != NULL; index++) {
+ if (kwl->kw[index].parse ||
+ srv_find_kw(kwl->kw[index].kw) == &kwl->kw[index]) {
+ memprintf(out, "%s[%4s] %s%s%s%s\n", *out ? *out : "",
+ kwl->scope,
+ kwl->kw[index].kw,
+ kwl->kw[index].skip ? " <arg>" : "",
+ kwl->kw[index].default_ok ? " [dflt_ok]" : "",
+ kwl->kw[index].parse ? "" : " (not supported)");
+ }
+ }
+ }
+}
+
+/* Try to find in srv_keyword the word that looks closest to <word> by counting
+ * transitions between letters, digits and other characters. Will return the
+ * best matching word if found, otherwise NULL. An optional array of extra
+ * words to compare may be passed in <extra>, but it must then be terminated
+ * by a NULL entry. If unused it may be NULL.
+ */
+static const char *srv_find_best_kw(const char *word)
+{
+ uint8_t word_sig[1024];
+ uint8_t list_sig[1024];
+ const struct srv_kw_list *kwl;
+ const char *best_ptr = NULL;
+ int dist, best_dist = INT_MAX;
+ const char **extra;
+ int index;
+
+ make_word_fingerprint(word_sig, word);
+ list_for_each_entry(kwl, &srv_keywords.list, list) {
+ for (index = 0; kwl->kw[index].kw != NULL; index++) {
+ make_word_fingerprint(list_sig, kwl->kw[index].kw);
+ dist = word_fingerprint_distance(word_sig, list_sig);
+ if (dist < best_dist) {
+ best_dist = dist;
+ best_ptr = kwl->kw[index].kw;
+ }
+ }
+ }
+
+ for (extra = extra_kw_list; *extra; extra++) {
+ make_word_fingerprint(list_sig, *extra);
+ dist = word_fingerprint_distance(word_sig, list_sig);
+ if (dist < best_dist) {
+ best_dist = dist;
+ best_ptr = *extra;
+ }
+ }
+
+ if (best_dist > 2 * strlen(word) || (best_ptr && best_dist > 2 * strlen(best_ptr)))
+ best_ptr = NULL;
+
+ return best_ptr;
+}
+
+/* Parse the "backup" server keyword */
+static int srv_parse_backup(char **args, int *cur_arg,
+ struct proxy *curproxy, struct server *newsrv, char **err)
+{
+ newsrv->flags |= SRV_F_BACKUP;
+ return 0;
+}
+
+
+/* Parse the "cookie" server keyword */
+static int srv_parse_cookie(char **args, int *cur_arg,
+ struct proxy *curproxy, struct server *newsrv, char **err)
+{
+ char *arg;
+
+ arg = args[*cur_arg + 1];
+ if (!*arg) {
+ memprintf(err, "'%s' expects <value> as argument.\n", args[*cur_arg]);
+ return ERR_ALERT | ERR_FATAL;
+ }
+
+ free(newsrv->cookie);
+ newsrv->cookie = strdup(arg);
+ newsrv->cklen = strlen(arg);
+ newsrv->flags |= SRV_F_COOKIESET;
+ return 0;
+}
+
+/* Parse the "disabled" server keyword */
+static int srv_parse_disabled(char **args, int *cur_arg,
+ struct proxy *curproxy, struct server *newsrv, char **err)
+{
+ newsrv->next_admin |= SRV_ADMF_CMAINT | SRV_ADMF_FMAINT;
+ newsrv->next_state = SRV_ST_STOPPED;
+ newsrv->check.state |= CHK_ST_PAUSED;
+ newsrv->check.health = 0;
+ return 0;
+}
+
+/* Parse the "enabled" server keyword */
+static int srv_parse_enabled(char **args, int *cur_arg,
+ struct proxy *curproxy, struct server *newsrv, char **err)
+{
+ newsrv->next_admin &= ~SRV_ADMF_CMAINT & ~SRV_ADMF_FMAINT;
+ newsrv->next_state = SRV_ST_RUNNING;
+ newsrv->check.state &= ~CHK_ST_PAUSED;
+ newsrv->check.health = newsrv->check.rise;
+ return 0;
+}
+
+/* Parse the "error-limit" server keyword */
+static int srv_parse_error_limit(char **args, int *cur_arg,
+ struct proxy *curproxy, struct server *newsrv, char **err)
+{
+ if (!*args[*cur_arg + 1]) {
+ memprintf(err, "'%s' expects an integer argument.",
+ args[*cur_arg]);
+ return ERR_ALERT | ERR_FATAL;
+ }
+
+ newsrv->consecutive_errors_limit = atoi(args[*cur_arg + 1]);
+
+ if (newsrv->consecutive_errors_limit <= 0) {
+ memprintf(err, "%s has to be > 0.",
+ args[*cur_arg]);
+ return ERR_ALERT | ERR_FATAL;
+ }
+
+ return 0;
+}
+
+/* Parse the "ws" keyword */
+static int srv_parse_ws(char **args, int *cur_arg,
+ struct proxy *curproxy, struct server *newsrv, char **err)
+{
+ if (!args[*cur_arg + 1]) {
+ memprintf(err, "'%s' expects 'auto', 'h1' or 'h2' value", args[*cur_arg]);
+ return ERR_ALERT | ERR_FATAL;
+ }
+
+ if (strcmp(args[*cur_arg + 1], "h1") == 0) {
+ newsrv->ws = SRV_WS_H1;
+ }
+ else if (strcmp(args[*cur_arg + 1], "h2") == 0) {
+ newsrv->ws = SRV_WS_H2;
+ }
+ else if (strcmp(args[*cur_arg + 1], "auto") == 0) {
+ newsrv->ws = SRV_WS_AUTO;
+ }
+ else {
+ memprintf(err, "'%s' has to be 'auto', 'h1' or 'h2'", args[*cur_arg]);
+ return ERR_ALERT | ERR_FATAL;
+ }
+
+
+ return 0;
+}
+
+/* Parse the "init-addr" server keyword */
+static int srv_parse_init_addr(char **args, int *cur_arg,
+ struct proxy *curproxy, struct server *newsrv, char **err)
+{
+ char *p, *end;
+ int done;
+ struct sockaddr_storage sa;
+
+ newsrv->init_addr_methods = 0;
+ memset(&newsrv->init_addr, 0, sizeof(newsrv->init_addr));
+
+ for (p = args[*cur_arg + 1]; *p; p = end) {
+ /* cut on next comma */
+ for (end = p; *end && *end != ','; end++);
+ if (*end)
+ *(end++) = 0;
+
+ memset(&sa, 0, sizeof(sa));
+ if (strcmp(p, "libc") == 0) {
+ done = srv_append_initaddr(&newsrv->init_addr_methods, SRV_IADDR_LIBC);
+ }
+ else if (strcmp(p, "last") == 0) {
+ done = srv_append_initaddr(&newsrv->init_addr_methods, SRV_IADDR_LAST);
+ }
+ else if (strcmp(p, "none") == 0) {
+ done = srv_append_initaddr(&newsrv->init_addr_methods, SRV_IADDR_NONE);
+ }
+ else if (str2ip2(p, &sa, 0)) {
+ if (is_addr(&newsrv->init_addr)) {
+ memprintf(err, "'%s' : initial address already specified, cannot add '%s'.",
+ args[*cur_arg], p);
+ return ERR_ALERT | ERR_FATAL;
+ }
+ newsrv->init_addr = sa;
+ done = srv_append_initaddr(&newsrv->init_addr_methods, SRV_IADDR_IP);
+ }
+ else {
+ memprintf(err, "'%s' : unknown init-addr method '%s', supported methods are 'libc', 'last', 'none'.",
+ args[*cur_arg], p);
+ return ERR_ALERT | ERR_FATAL;
+ }
+ if (!done) {
+ memprintf(err, "'%s' : too many init-addr methods when trying to add '%s'",
+ args[*cur_arg], p);
+ return ERR_ALERT | ERR_FATAL;
+ }
+ }
+
+ return 0;
+}
+
+/* Parse the "log-bufsize" server keyword */
+static int srv_parse_log_bufsize(char **args, int *cur_arg,
+ struct proxy *curproxy, struct server *newsrv, char **err)
+{
+ if (!*args[*cur_arg + 1]) {
+ memprintf(err, "'%s' expects an integer argument.",
+ args[*cur_arg]);
+ return ERR_ALERT | ERR_FATAL;
+ }
+
+ newsrv->log_bufsize = atoi(args[*cur_arg + 1]);
+
+ if (newsrv->log_bufsize <= 0) {
+ memprintf(err, "%s has to be > 0.",
+ args[*cur_arg]);
+ return ERR_ALERT | ERR_FATAL;
+ }
+
+ return 0;
+}
+
+/* Parse the "log-proto" server keyword */
+static int srv_parse_log_proto(char **args, int *cur_arg,
+ struct proxy *curproxy, struct server *newsrv, char **err)
+{
+ if (strcmp(args[*cur_arg + 1], "legacy") == 0)
+ newsrv->log_proto = SRV_LOG_PROTO_LEGACY;
+ else if (strcmp(args[*cur_arg + 1], "octet-count") == 0)
+ newsrv->log_proto = SRV_LOG_PROTO_OCTET_COUNTING;
+ else {
+ memprintf(err, "'%s' expects one of 'legacy' or 'octet-count' but got '%s'",
+ args[*cur_arg], args[*cur_arg + 1]);
+ return ERR_ALERT | ERR_FATAL;
+ }
+
+ return 0;
+}
+
+/* Parse the "maxconn" server keyword */
+static int srv_parse_maxconn(char **args, int *cur_arg,
+ struct proxy *curproxy, struct server *newsrv, char **err)
+{
+ newsrv->maxconn = atol(args[*cur_arg + 1]);
+ return 0;
+}
+
+/* Parse the "maxqueue" server keyword */
+static int srv_parse_maxqueue(char **args, int *cur_arg,
+ struct proxy *curproxy, struct server *newsrv, char **err)
+{
+ newsrv->maxqueue = atol(args[*cur_arg + 1]);
+ return 0;
+}
+
+/* Parse the "minconn" server keyword */
+static int srv_parse_minconn(char **args, int *cur_arg,
+ struct proxy *curproxy, struct server *newsrv, char **err)
+{
+ newsrv->minconn = atol(args[*cur_arg + 1]);
+ return 0;
+}
+
+static int srv_parse_max_reuse(char **args, int *cur_arg, struct proxy *curproxy, struct server *newsrv, char **err)
+{
+ char *arg;
+
+ arg = args[*cur_arg + 1];
+ if (!*arg) {
+ memprintf(err, "'%s' expects <value> as argument.\n", args[*cur_arg]);
+ return ERR_ALERT | ERR_FATAL;
+ }
+ newsrv->max_reuse = atoi(arg);
+
+ return 0;
+}
+
+static int srv_parse_pool_purge_delay(char **args, int *cur_arg, struct proxy *curproxy, struct server *newsrv, char **err)
+{
+ const char *res;
+ char *arg;
+ unsigned int time;
+
+ arg = args[*cur_arg + 1];
+ if (!*arg) {
+ memprintf(err, "'%s' expects <value> as argument.\n", args[*cur_arg]);
+ return ERR_ALERT | ERR_FATAL;
+ }
+ res = parse_time_err(arg, &time, TIME_UNIT_MS);
+ if (res == PARSE_TIME_OVER) {
+ memprintf(err, "timer overflow in argument '%s' to '%s' (maximum value is 2147483647 ms or ~24.8 days)",
+ args[*cur_arg+1], args[*cur_arg]);
+ return ERR_ALERT | ERR_FATAL;
+ }
+ else if (res == PARSE_TIME_UNDER) {
+ memprintf(err, "timer underflow in argument '%s' to '%s' (minimum non-null value is 1 ms)",
+ args[*cur_arg+1], args[*cur_arg]);
+ return ERR_ALERT | ERR_FATAL;
+ }
+ else if (res) {
+ memprintf(err, "unexpected character '%c' in argument to <%s>.\n",
+ *res, args[*cur_arg]);
+ return ERR_ALERT | ERR_FATAL;
+ }
+ newsrv->pool_purge_delay = time;
+
+ return 0;
+}
+
+static int srv_parse_pool_low_conn(char **args, int *cur_arg, struct proxy *curproxy, struct server *newsrv, char **err)
+{
+ char *arg;
+
+ arg = args[*cur_arg + 1];
+ if (!*arg) {
+ memprintf(err, "'%s' expects <value> as argument.\n", args[*cur_arg]);
+ return ERR_ALERT | ERR_FATAL;
+ }
+
+ newsrv->low_idle_conns = atoi(arg);
+ return 0;
+}
+
+static int srv_parse_pool_max_conn(char **args, int *cur_arg, struct proxy *curproxy, struct server *newsrv, char **err)
+{
+ char *arg;
+
+ arg = args[*cur_arg + 1];
+ if (!*arg) {
+ memprintf(err, "'%s' expects <value> as argument.\n", args[*cur_arg]);
+ return ERR_ALERT | ERR_FATAL;
+ }
+
+ newsrv->max_idle_conns = atoi(arg);
+ if ((int)newsrv->max_idle_conns < -1) {
+ memprintf(err, "'%s' must be >= -1", args[*cur_arg]);
+ return ERR_ALERT | ERR_FATAL;
+ }
+
+ return 0;
+}
+
+/* parse the "id" server keyword */
+static int srv_parse_id(char **args, int *cur_arg, struct proxy *curproxy, struct server *newsrv, char **err)
+{
+ struct eb32_node *node;
+
+ if (!*args[*cur_arg + 1]) {
+ memprintf(err, "'%s' : expects an integer argument", args[*cur_arg]);
+ return ERR_ALERT | ERR_FATAL;
+ }
+
+ newsrv->puid = atol(args[*cur_arg + 1]);
+ newsrv->conf.id.key = newsrv->puid;
+
+ if (newsrv->puid <= 0) {
+ memprintf(err, "'%s' : custom id has to be > 0", args[*cur_arg]);
+ return ERR_ALERT | ERR_FATAL;
+ }
+
+ node = eb32_lookup(&curproxy->conf.used_server_id, newsrv->puid);
+ if (node) {
+ struct server *target = container_of(node, struct server, conf.id);
+ memprintf(err, "'%s' : custom id %d already used at %s:%d ('server %s')",
+ args[*cur_arg], newsrv->puid, target->conf.file, target->conf.line,
+ target->id);
+ return ERR_ALERT | ERR_FATAL;
+ }
+
+ newsrv->flags |= SRV_F_FORCED_ID;
+ return 0;
+}
+
+/* Parse the "namespace" server keyword */
+static int srv_parse_namespace(char **args, int *cur_arg,
+ struct proxy *curproxy, struct server *newsrv, char **err)
+{
+#ifdef USE_NS
+ char *arg;
+
+ arg = args[*cur_arg + 1];
+ if (!*arg) {
+ memprintf(err, "'%s' : expects <name> as argument", args[*cur_arg]);
+ return ERR_ALERT | ERR_FATAL;
+ }
+
+ if (strcmp(arg, "*") == 0) {
+ /* Use the namespace associated with the connection (if present). */
+ newsrv->flags |= SRV_F_USE_NS_FROM_PP;
+ return 0;
+ }
+
+ /*
+ * As this parser may be called several times for the same 'default-server'
+ * object, or for a new 'server' instance deriving from a 'default-server'
+ * one with SRV_F_USE_NS_FROM_PP flag enabled, let's reset it.
+ */
+ newsrv->flags &= ~SRV_F_USE_NS_FROM_PP;
+
+ newsrv->netns = netns_store_lookup(arg, strlen(arg));
+ if (!newsrv->netns)
+ newsrv->netns = netns_store_insert(arg);
+
+ if (!newsrv->netns) {
+ memprintf(err, "Cannot open namespace '%s'", arg);
+ return ERR_ALERT | ERR_FATAL;
+ }
+
+ return 0;
+#else
+ memprintf(err, "'%s': '%s' option not implemented", args[0], args[*cur_arg]);
+ return ERR_ALERT | ERR_FATAL;
+#endif
+}
+
+/* Parse the "no-backup" server keyword */
+static int srv_parse_no_backup(char **args, int *cur_arg,
+ struct proxy *curproxy, struct server *newsrv, char **err)
+{
+ newsrv->flags &= ~SRV_F_BACKUP;
+ return 0;
+}
+
+
+/* Disable server PROXY protocol flags. */
+static inline int srv_disable_pp_flags(struct server *srv, unsigned int flags)
+{
+ srv->pp_opts &= ~flags;
+ return 0;
+}
+
+/* Parse the "no-send-proxy" server keyword */
+static int srv_parse_no_send_proxy(char **args, int *cur_arg,
+ struct proxy *curproxy, struct server *newsrv, char **err)
+{
+ return srv_disable_pp_flags(newsrv, SRV_PP_V1);
+}
+
+/* Parse the "no-send-proxy-v2" server keyword */
+static int srv_parse_no_send_proxy_v2(char **args, int *cur_arg,
+ struct proxy *curproxy, struct server *newsrv, char **err)
+{
+ return srv_disable_pp_flags(newsrv, SRV_PP_V2);
+}
+
+/* Parse the "shard" server keyword */
+static int srv_parse_shard(char **args, int *cur_arg,
+ struct proxy *curproxy, struct server *newsrv, char **err)
+{
+ newsrv->shard = atol(args[*cur_arg + 1]);
+ return 0;
+}
+
+/* Parse the "no-tfo" server keyword */
+static int srv_parse_no_tfo(char **args, int *cur_arg,
+ struct proxy *curproxy, struct server *newsrv, char **err)
+{
+ newsrv->flags &= ~SRV_F_FASTOPEN;
+ return 0;
+}
+
+/* Parse the "non-stick" server keyword */
+static int srv_parse_non_stick(char **args, int *cur_arg,
+ struct proxy *curproxy, struct server *newsrv, char **err)
+{
+ newsrv->flags |= SRV_F_NON_STICK;
+ return 0;
+}
+
+/* Enable server PROXY protocol flags. */
+static inline int srv_enable_pp_flags(struct server *srv, unsigned int flags)
+{
+ srv->pp_opts |= flags;
+ return 0;
+}
+/* parse the "proto" server keyword */
+static int srv_parse_proto(char **args, int *cur_arg,
+ struct proxy *px, struct server *newsrv, char **err)
+{
+ struct ist proto;
+
+ if (!*args[*cur_arg + 1]) {
+ memprintf(err, "'%s' : missing value", args[*cur_arg]);
+ return ERR_ALERT | ERR_FATAL;
+ }
+ proto = ist(args[*cur_arg + 1]);
+ newsrv->mux_proto = get_mux_proto(proto);
+ if (!newsrv->mux_proto) {
+ memprintf(err, "'%s' : unknown MUX protocol '%s'", args[*cur_arg], args[*cur_arg+1]);
+ return ERR_ALERT | ERR_FATAL;
+ }
+ return 0;
+}
+
+/* parse the "proxy-v2-options" */
+static int srv_parse_proxy_v2_options(char **args, int *cur_arg,
+ struct proxy *px, struct server *newsrv, char **err)
+{
+ char *p, *n;
+ for (p = args[*cur_arg+1]; p; p = n) {
+ n = strchr(p, ',');
+ if (n)
+ *n++ = '\0';
+ if (strcmp(p, "ssl") == 0) {
+ newsrv->pp_opts |= SRV_PP_V2_SSL;
+ } else if (strcmp(p, "cert-cn") == 0) {
+ newsrv->pp_opts |= SRV_PP_V2_SSL;
+ newsrv->pp_opts |= SRV_PP_V2_SSL_CN;
+ } else if (strcmp(p, "cert-key") == 0) {
+ newsrv->pp_opts |= SRV_PP_V2_SSL;
+ newsrv->pp_opts |= SRV_PP_V2_SSL_KEY_ALG;
+ } else if (strcmp(p, "cert-sig") == 0) {
+ newsrv->pp_opts |= SRV_PP_V2_SSL;
+ newsrv->pp_opts |= SRV_PP_V2_SSL_SIG_ALG;
+ } else if (strcmp(p, "ssl-cipher") == 0) {
+ newsrv->pp_opts |= SRV_PP_V2_SSL;
+ newsrv->pp_opts |= SRV_PP_V2_SSL_CIPHER;
+ } else if (strcmp(p, "authority") == 0) {
+ newsrv->pp_opts |= SRV_PP_V2_AUTHORITY;
+ } else if (strcmp(p, "crc32c") == 0) {
+ newsrv->pp_opts |= SRV_PP_V2_CRC32C;
+ } else if (strcmp(p, "unique-id") == 0) {
+ newsrv->pp_opts |= SRV_PP_V2_UNIQUE_ID;
+ } else
+ goto fail;
+ }
+ return 0;
+ fail:
+ if (err)
+ memprintf(err, "'%s' : proxy v2 option not implemented", p);
+ return ERR_ALERT | ERR_FATAL;
+}
+
+/* Parse the "observe" server keyword */
+static int srv_parse_observe(char **args, int *cur_arg,
+ struct proxy *curproxy, struct server *newsrv, char **err)
+{
+ char *arg;
+
+ arg = args[*cur_arg + 1];
+ if (!*arg) {
+ memprintf(err, "'%s' expects <mode> as argument.\n", args[*cur_arg]);
+ return ERR_ALERT | ERR_FATAL;
+ }
+
+ if (strcmp(arg, "none") == 0) {
+ newsrv->observe = HANA_OBS_NONE;
+ }
+ else if (strcmp(arg, "layer4") == 0) {
+ newsrv->observe = HANA_OBS_LAYER4;
+ }
+ else if (strcmp(arg, "layer7") == 0) {
+ if (curproxy->mode != PR_MODE_HTTP) {
+ memprintf(err, "'%s' can only be used in http proxies.\n", arg);
+ return ERR_ALERT;
+ }
+ newsrv->observe = HANA_OBS_LAYER7;
+ }
+ else {
+ memprintf(err, "'%s' expects one of 'none', 'layer4', 'layer7' "
+ "but got '%s'\n", args[*cur_arg], arg);
+ return ERR_ALERT | ERR_FATAL;
+ }
+
+ return 0;
+}
+
+/* Parse the "on-error" server keyword */
+static int srv_parse_on_error(char **args, int *cur_arg,
+ struct proxy *curproxy, struct server *newsrv, char **err)
+{
+ if (strcmp(args[*cur_arg + 1], "fastinter") == 0)
+ newsrv->onerror = HANA_ONERR_FASTINTER;
+ else if (strcmp(args[*cur_arg + 1], "fail-check") == 0)
+ newsrv->onerror = HANA_ONERR_FAILCHK;
+ else if (strcmp(args[*cur_arg + 1], "sudden-death") == 0)
+ newsrv->onerror = HANA_ONERR_SUDDTH;
+ else if (strcmp(args[*cur_arg + 1], "mark-down") == 0)
+ newsrv->onerror = HANA_ONERR_MARKDWN;
+ else {
+ memprintf(err, "'%s' expects one of 'fastinter', "
+ "'fail-check', 'sudden-death' or 'mark-down' but got '%s'",
+ args[*cur_arg], args[*cur_arg + 1]);
+ return ERR_ALERT | ERR_FATAL;
+ }
+
+ return 0;
+}
+
+/* Parse the "on-marked-down" server keyword */
+static int srv_parse_on_marked_down(char **args, int *cur_arg,
+ struct proxy *curproxy, struct server *newsrv, char **err)
+{
+ if (strcmp(args[*cur_arg + 1], "shutdown-sessions") == 0)
+ newsrv->onmarkeddown = HANA_ONMARKEDDOWN_SHUTDOWNSESSIONS;
+ else {
+ memprintf(err, "'%s' expects 'shutdown-sessions' but got '%s'",
+ args[*cur_arg], args[*cur_arg + 1]);
+ return ERR_ALERT | ERR_FATAL;
+ }
+
+ return 0;
+}
+
+/* Parse the "on-marked-up" server keyword */
+static int srv_parse_on_marked_up(char **args, int *cur_arg,
+ struct proxy *curproxy, struct server *newsrv, char **err)
+{
+ if (strcmp(args[*cur_arg + 1], "shutdown-backup-sessions") == 0)
+ newsrv->onmarkedup = HANA_ONMARKEDUP_SHUTDOWNBACKUPSESSIONS;
+ else {
+ memprintf(err, "'%s' expects 'shutdown-backup-sessions' but got '%s'",
+ args[*cur_arg], args[*cur_arg + 1]);
+ return ERR_ALERT | ERR_FATAL;
+ }
+
+ return 0;
+}
+
+/* Parse the "redir" server keyword */
+static int srv_parse_redir(char **args, int *cur_arg,
+ struct proxy *curproxy, struct server *newsrv, char **err)
+{
+ char *arg;
+
+ arg = args[*cur_arg + 1];
+ if (!*arg) {
+ memprintf(err, "'%s' expects <prefix> as argument.\n", args[*cur_arg]);
+ return ERR_ALERT | ERR_FATAL;
+ }
+
+ free(newsrv->rdr_pfx);
+ newsrv->rdr_pfx = strdup(arg);
+ newsrv->rdr_len = strlen(arg);
+
+ return 0;
+}
+
+/* Parse the "resolvers" server keyword */
+static int srv_parse_resolvers(char **args, int *cur_arg,
+ struct proxy *curproxy, struct server *newsrv, char **err)
+{
+ free(newsrv->resolvers_id);
+ newsrv->resolvers_id = strdup(args[*cur_arg + 1]);
+ return 0;
+}
+
+/* Parse the "resolve-net" server keyword */
+static int srv_parse_resolve_net(char **args, int *cur_arg,
+ struct proxy *curproxy, struct server *newsrv, char **err)
+{
+ char *p, *e;
+ unsigned char mask;
+ struct resolv_options *opt;
+
+ if (!args[*cur_arg + 1] || args[*cur_arg + 1][0] == '\0') {
+ memprintf(err, "'%s' expects a list of networks.",
+ args[*cur_arg]);
+ return ERR_ALERT | ERR_FATAL;
+ }
+
+ opt = &newsrv->resolv_opts;
+
+ /* Split arguments by comma, and convert it from ipv4 or ipv6
+ * string network in in_addr or in6_addr.
+ */
+ p = args[*cur_arg + 1];
+ e = p;
+ while (*p != '\0') {
+ /* If no room available, return error. */
+ if (opt->pref_net_nb >= SRV_MAX_PREF_NET) {
+ memprintf(err, "'%s' exceed %d networks.",
+ args[*cur_arg], SRV_MAX_PREF_NET);
+ return ERR_ALERT | ERR_FATAL;
+ }
+ /* look for end or comma. */
+ while (*e != ',' && *e != '\0')
+ e++;
+ if (*e == ',') {
+ *e = '\0';
+ e++;
+ }
+ if (str2net(p, 0, &opt->pref_net[opt->pref_net_nb].addr.in4,
+ &opt->pref_net[opt->pref_net_nb].mask.in4)) {
+ /* Try to convert input string from ipv4 or ipv6 network. */
+ opt->pref_net[opt->pref_net_nb].family = AF_INET;
+ } else if (str62net(p, &opt->pref_net[opt->pref_net_nb].addr.in6,
+ &mask)) {
+ /* Try to convert input string from ipv6 network. */
+ len2mask6(mask, &opt->pref_net[opt->pref_net_nb].mask.in6);
+ opt->pref_net[opt->pref_net_nb].family = AF_INET6;
+ } else {
+ /* All network conversions fail, return error. */
+ memprintf(err, "'%s' invalid network '%s'.",
+ args[*cur_arg], p);
+ return ERR_ALERT | ERR_FATAL;
+ }
+ opt->pref_net_nb++;
+ p = e;
+ }
+
+ return 0;
+}
+
+/* Parse the "resolve-opts" server keyword */
+static int srv_parse_resolve_opts(char **args, int *cur_arg,
+ struct proxy *curproxy, struct server *newsrv, char **err)
+{
+ char *p, *end;
+
+ for (p = args[*cur_arg + 1]; *p; p = end) {
+ /* cut on next comma */
+ for (end = p; *end && *end != ','; end++);
+ if (*end)
+ *(end++) = 0;
+
+ if (strcmp(p, "allow-dup-ip") == 0) {
+ newsrv->resolv_opts.accept_duplicate_ip = 1;
+ }
+ else if (strcmp(p, "ignore-weight") == 0) {
+ newsrv->resolv_opts.ignore_weight = 1;
+ }
+ else if (strcmp(p, "prevent-dup-ip") == 0) {
+ newsrv->resolv_opts.accept_duplicate_ip = 0;
+ }
+ else {
+ memprintf(err, "'%s' : unknown resolve-opts option '%s', supported methods are 'allow-dup-ip', 'ignore-weight', and 'prevent-dup-ip'.",
+ args[*cur_arg], p);
+ return ERR_ALERT | ERR_FATAL;
+ }
+ }
+
+ return 0;
+}
+
+/* Parse the "resolve-prefer" server keyword */
+static int srv_parse_resolve_prefer(char **args, int *cur_arg,
+ struct proxy *curproxy, struct server *newsrv, char **err)
+{
+ if (strcmp(args[*cur_arg + 1], "ipv4") == 0)
+ newsrv->resolv_opts.family_prio = AF_INET;
+ else if (strcmp(args[*cur_arg + 1], "ipv6") == 0)
+ newsrv->resolv_opts.family_prio = AF_INET6;
+ else {
+ memprintf(err, "'%s' expects either ipv4 or ipv6 as argument.",
+ args[*cur_arg]);
+ return ERR_ALERT | ERR_FATAL;
+ }
+
+ return 0;
+}
+
+/* Parse the "send-proxy" server keyword */
+static int srv_parse_send_proxy(char **args, int *cur_arg,
+ struct proxy *curproxy, struct server *newsrv, char **err)
+{
+ return srv_enable_pp_flags(newsrv, SRV_PP_V1);
+}
+
+/* Parse the "send-proxy-v2" server keyword */
+static int srv_parse_send_proxy_v2(char **args, int *cur_arg,
+ struct proxy *curproxy, struct server *newsrv, char **err)
+{
+ return srv_enable_pp_flags(newsrv, SRV_PP_V2);
+}
+
+/* Parse the "set-proxy-v2-tlv-fmt" server keyword */
+static int srv_parse_set_proxy_v2_tlv_fmt(char **args, int *cur_arg,
+ struct proxy *px, struct server *newsrv, char **err)
+{
+ char *error = NULL, *cmd = NULL;
+ unsigned int tlv_type = 0;
+ struct srv_pp_tlv_list *srv_tlv = NULL;
+
+ cmd = args[*cur_arg];
+ if (!*cmd) {
+ memprintf(err, "'%s' : could not read set-proxy-v2-tlv-fmt command", args[*cur_arg]);
+ goto fail;
+ }
+
+ cmd += strlen("set-proxy-v2-tlv-fmt");
+
+ if (*cmd == '(') {
+ cmd++; /* skip the '(' */
+ errno = 0;
+ tlv_type = strtoul(cmd, &error, 0); /* convert TLV ID */
+ if (unlikely((cmd == error) || (errno != 0))) {
+ memprintf(err, "'%s' : could not convert TLV ID", args[*cur_arg]);
+ goto fail;
+ }
+ if (errno == EINVAL) {
+ memprintf(err, "'%s' : could not find a valid number for the TLV ID", args[*cur_arg]);
+ goto fail;
+ }
+ if (*error != ')') {
+ memprintf(err, "'%s' : expects set-proxy-v2-tlv(<TLV ID>)", args[*cur_arg]);
+ goto fail;
+ }
+ if (tlv_type > 0xFF) {
+ memprintf(err, "'%s' : the maximum allowed TLV ID is %d", args[*cur_arg], 0xFF);
+ goto fail;
+ }
+ }
+
+ srv_tlv = malloc(sizeof(*srv_tlv));
+ if (unlikely(!srv_tlv)) {
+ memprintf(err, "'%s' : failed to parse allocate TLV entry", args[*cur_arg]);
+ goto fail;
+ }
+ srv_tlv->type = tlv_type;
+ srv_tlv->fmt_string = strdup(args[*cur_arg + 1]);
+ if (unlikely(!srv_tlv->fmt_string)) {
+ memprintf(err, "'%s' : failed to save format string for parsing", args[*cur_arg]);
+ goto fail;
+ }
+
+ LIST_APPEND(&newsrv->pp_tlvs, &srv_tlv->list);
+
+ (*cur_arg)++;
+
+ return 0;
+
+ fail:
+ free(srv_tlv);
+ errno = 0;
+ return ERR_ALERT | ERR_FATAL;
+}
+
+/* Parse the "slowstart" server keyword */
+static int srv_parse_slowstart(char **args, int *cur_arg,
+ struct proxy *curproxy, struct server *newsrv, char **err)
+{
+ /* slowstart is stored in seconds */
+ unsigned int val;
+ const char *time_err = parse_time_err(args[*cur_arg + 1], &val, TIME_UNIT_MS);
+
+ if (time_err == PARSE_TIME_OVER) {
+ memprintf(err, "overflow in argument <%s> to <%s> of server %s, maximum value is 2147483647 ms (~24.8 days).",
+ args[*cur_arg+1], args[*cur_arg], newsrv->id);
+ return ERR_ALERT | ERR_FATAL;
+ }
+ else if (time_err == PARSE_TIME_UNDER) {
+ memprintf(err, "underflow in argument <%s> to <%s> of server %s, minimum non-null value is 1 ms.",
+ args[*cur_arg+1], args[*cur_arg], newsrv->id);
+ return ERR_ALERT | ERR_FATAL;
+ }
+ else if (time_err) {
+ memprintf(err, "unexpected character '%c' in 'slowstart' argument of server %s.",
+ *time_err, newsrv->id);
+ return ERR_ALERT | ERR_FATAL;
+ }
+ newsrv->slowstart = (val + 999) / 1000;
+
+ return 0;
+}
+
+/* Parse the "source" server keyword */
+static int srv_parse_source(char **args, int *cur_arg,
+ struct proxy *curproxy, struct server *newsrv, char **err)
+{
+ char *errmsg;
+ int port_low, port_high;
+ struct sockaddr_storage *sk;
+
+ errmsg = NULL;
+
+ if (!*args[*cur_arg + 1]) {
+ memprintf(err, "'%s' expects <addr>[:<port>[-<port>]], and optionally '%s' <addr>, "
+ "and '%s' <name> as argument.\n", args[*cur_arg], "usesrc", "interface");
+ goto err;
+ }
+
+ /* 'sk' is statically allocated (no need to be freed). */
+ sk = str2sa_range(args[*cur_arg + 1], NULL, &port_low, &port_high, NULL, NULL, NULL,
+ &errmsg, NULL, NULL,
+ PA_O_RESOLVE | PA_O_PORT_OK | PA_O_PORT_RANGE | PA_O_STREAM | PA_O_CONNECT);
+ if (!sk) {
+ memprintf(err, "'%s %s' : %s\n", args[*cur_arg], args[*cur_arg + 1], errmsg);
+ goto err;
+ }
+
+ newsrv->conn_src.opts |= CO_SRC_BIND;
+ newsrv->conn_src.source_addr = *sk;
+
+ if (port_low != port_high) {
+ int i;
+
+ newsrv->conn_src.sport_range = port_range_alloc_range(port_high - port_low + 1);
+ if (!newsrv->conn_src.sport_range) {
+ ha_alert("Server '%s': Out of memory (sport_range)\n", args[0]);
+ goto err;
+ }
+ for (i = 0; i < newsrv->conn_src.sport_range->size; i++)
+ newsrv->conn_src.sport_range->ports[i] = port_low + i;
+ }
+
+ *cur_arg += 2;
+ while (*(args[*cur_arg])) {
+ if (strcmp(args[*cur_arg], "usesrc") == 0) { /* address to use outside */
+#if defined(CONFIG_HAP_TRANSPARENT)
+ if (!*args[*cur_arg + 1]) {
+ ha_alert("'usesrc' expects <addr>[:<port>], 'client', 'clientip', "
+ "or 'hdr_ip(name,#)' as argument.\n");
+ goto err;
+ }
+ if (strcmp(args[*cur_arg + 1], "client") == 0) {
+ newsrv->conn_src.opts &= ~CO_SRC_TPROXY_MASK;
+ newsrv->conn_src.opts |= CO_SRC_TPROXY_CLI;
+ }
+ else if (strcmp(args[*cur_arg + 1], "clientip") == 0) {
+ newsrv->conn_src.opts &= ~CO_SRC_TPROXY_MASK;
+ newsrv->conn_src.opts |= CO_SRC_TPROXY_CIP;
+ }
+ else if (!strncmp(args[*cur_arg + 1], "hdr_ip(", 7)) {
+ char *name, *end;
+
+ name = args[*cur_arg + 1] + 7;
+ while (isspace((unsigned char)*name))
+ name++;
+
+ end = name;
+ while (*end && !isspace((unsigned char)*end) && *end != ',' && *end != ')')
+ end++;
+
+ newsrv->conn_src.opts &= ~CO_SRC_TPROXY_MASK;
+ newsrv->conn_src.opts |= CO_SRC_TPROXY_DYN;
+ free(newsrv->conn_src.bind_hdr_name);
+ newsrv->conn_src.bind_hdr_name = calloc(1, end - name + 1);
+ if (!newsrv->conn_src.bind_hdr_name) {
+ ha_alert("Server '%s': Out of memory (bind_hdr_name)\n", args[0]);
+ goto err;
+ }
+ newsrv->conn_src.bind_hdr_len = end - name;
+ memcpy(newsrv->conn_src.bind_hdr_name, name, end - name);
+ newsrv->conn_src.bind_hdr_name[end - name] = '\0';
+ newsrv->conn_src.bind_hdr_occ = -1;
+
+ /* now look for an occurrence number */
+ while (isspace((unsigned char)*end))
+ end++;
+ if (*end == ',') {
+ end++;
+ name = end;
+ if (*end == '-')
+ end++;
+ while (isdigit((unsigned char)*end))
+ end++;
+ newsrv->conn_src.bind_hdr_occ = strl2ic(name, end - name);
+ }
+
+ if (newsrv->conn_src.bind_hdr_occ < -MAX_HDR_HISTORY) {
+ ha_alert("usesrc hdr_ip(name,num) does not support negative"
+ " occurrences values smaller than %d.\n", MAX_HDR_HISTORY);
+ goto err;
+ }
+ }
+ else {
+ struct sockaddr_storage *sk;
+ int port1, port2;
+
+ /* 'sk' is statically allocated (no need to be freed). */
+ sk = str2sa_range(args[*cur_arg + 1], NULL, &port1, &port2, NULL, NULL, NULL,
+ &errmsg, NULL, NULL,
+ PA_O_RESOLVE | PA_O_PORT_OK | PA_O_STREAM | PA_O_CONNECT);
+ if (!sk) {
+ ha_alert("'%s %s' : %s\n", args[*cur_arg], args[*cur_arg + 1], errmsg);
+ goto err;
+ }
+
+ newsrv->conn_src.tproxy_addr = *sk;
+ newsrv->conn_src.opts |= CO_SRC_TPROXY_ADDR;
+ }
+ global.last_checks |= LSTCHK_NETADM;
+ *cur_arg += 2;
+ continue;
+#else /* no TPROXY support */
+ ha_alert("'usesrc' not allowed here because support for TPROXY was not compiled in.\n");
+ goto err;
+#endif /* defined(CONFIG_HAP_TRANSPARENT) */
+ } /* "usesrc" */
+
+ if (strcmp(args[*cur_arg], "interface") == 0) { /* specifically bind to this interface */
+#ifdef SO_BINDTODEVICE
+ if (!*args[*cur_arg + 1]) {
+ ha_alert("'%s' : missing interface name.\n", args[0]);
+ goto err;
+ }
+ free(newsrv->conn_src.iface_name);
+ newsrv->conn_src.iface_name = strdup(args[*cur_arg + 1]);
+ newsrv->conn_src.iface_len = strlen(newsrv->conn_src.iface_name);
+ global.last_checks |= LSTCHK_NETADM;
+#else
+ ha_alert("'%s' : '%s' option not implemented.\n", args[0], args[*cur_arg]);
+ goto err;
+#endif
+ *cur_arg += 2;
+ continue;
+ }
+ /* this keyword in not an option of "source" */
+ break;
+ } /* while */
+
+ return 0;
+
+ err:
+ free(errmsg);
+ return ERR_ALERT | ERR_FATAL;
+}
+
+/* Parse the "stick" server keyword */
+static int srv_parse_stick(char **args, int *cur_arg,
+ struct proxy *curproxy, struct server *newsrv, char **err)
+{
+ newsrv->flags &= ~SRV_F_NON_STICK;
+ return 0;
+}
+
+/* Parse the "track" server keyword */
+static int srv_parse_track(char **args, int *cur_arg,
+ struct proxy *curproxy, struct server *newsrv, char **err)
+{
+ char *arg;
+
+ arg = args[*cur_arg + 1];
+ if (!*arg) {
+ memprintf(err, "'track' expects [<proxy>/]<server> as argument.\n");
+ return ERR_ALERT | ERR_FATAL;
+ }
+
+ free(newsrv->trackit);
+ newsrv->trackit = strdup(arg);
+
+ return 0;
+}
+
+/* Parse the "socks4" server keyword */
+static int srv_parse_socks4(char **args, int *cur_arg,
+ struct proxy *curproxy, struct server *newsrv, char **err)
+{
+ char *errmsg;
+ int port_low, port_high;
+ struct sockaddr_storage *sk;
+
+ errmsg = NULL;
+
+ if (!*args[*cur_arg + 1]) {
+ memprintf(err, "'%s' expects <addr>:<port> as argument.\n", args[*cur_arg]);
+ goto err;
+ }
+
+ /* 'sk' is statically allocated (no need to be freed). */
+ sk = str2sa_range(args[*cur_arg + 1], NULL, &port_low, &port_high, NULL, NULL, NULL,
+ &errmsg, NULL, NULL,
+ PA_O_RESOLVE | PA_O_PORT_OK | PA_O_PORT_MAND | PA_O_STREAM | PA_O_CONNECT);
+ if (!sk) {
+ memprintf(err, "'%s %s' : %s\n", args[*cur_arg], args[*cur_arg + 1], errmsg);
+ goto err;
+ }
+
+ newsrv->flags |= SRV_F_SOCKS4_PROXY;
+ newsrv->socks4_addr = *sk;
+
+ return 0;
+
+ err:
+ free(errmsg);
+ return ERR_ALERT | ERR_FATAL;
+}
+
+
+/* parse the "tfo" server keyword */
+static int srv_parse_tfo(char **args, int *cur_arg, struct proxy *px, struct server *newsrv, char **err)
+{
+ newsrv->flags |= SRV_F_FASTOPEN;
+ return 0;
+}
+
+/* parse the "usesrc" server keyword */
+static int srv_parse_usesrc(char **args, int *cur_arg, struct proxy *px, struct server *newsrv, char **err)
+{
+ memprintf(err, "'%s' only allowed after a '%s' statement.",
+ "usesrc", "source");
+ return ERR_ALERT | ERR_FATAL;
+}
+
+/* parse the "weight" server keyword */
+static int srv_parse_weight(char **args, int *cur_arg, struct proxy *px, struct server *newsrv, char **err)
+{
+ int w;
+
+ w = atol(args[*cur_arg + 1]);
+ if (w < 0 || w > SRV_UWGHT_MAX) {
+ memprintf(err, "weight of server %s is not within 0 and %d (%d).",
+ newsrv->id, SRV_UWGHT_MAX, w);
+ return ERR_ALERT | ERR_FATAL;
+ }
+ newsrv->uweight = newsrv->iweight = w;
+
+ return 0;
+}
+
+/* Returns 1 if the server has streams pointing to it, and 0 otherwise.
+ *
+ * Must be called with the server lock held.
+ */
+static int srv_has_streams(struct server *srv)
+{
+ int thr;
+
+ for (thr = 0; thr < global.nbthread; thr++)
+ if (!MT_LIST_ISEMPTY(&srv->per_thr[thr].streams))
+ return 1;
+ return 0;
+}
+
+/* Shutdown all connections of a server. The caller must pass a termination
+ * code in <why>, which must be one of SF_ERR_* indicating the reason for the
+ * shutdown.
+ *
+ * Must be called with the server lock held.
+ */
+void srv_shutdown_streams(struct server *srv, int why)
+{
+ struct stream *stream;
+ struct mt_list *elt1, elt2;
+ int thr;
+
+ for (thr = 0; thr < global.nbthread; thr++)
+ mt_list_for_each_entry_safe(stream, &srv->per_thr[thr].streams, by_srv, elt1, elt2)
+ if (stream->srv_conn == srv)
+ stream_shutdown(stream, why);
+}
+
+/* Shutdown all connections of all backup servers of a proxy. The caller must
+ * pass a termination code in <why>, which must be one of SF_ERR_* indicating
+ * the reason for the shutdown.
+ *
+ * Must be called with the server lock held.
+ */
+void srv_shutdown_backup_streams(struct proxy *px, int why)
+{
+ struct server *srv;
+
+ for (srv = px->srv; srv != NULL; srv = srv->next)
+ if (srv->flags & SRV_F_BACKUP)
+ srv_shutdown_streams(srv, why);
+}
+
+static void srv_append_op_chg_cause(struct buffer *msg, struct server *s, enum srv_op_st_chg_cause cause)
+{
+ switch (cause) {
+ case SRV_OP_STCHGC_NONE:
+ break; /* do nothing */
+ case SRV_OP_STCHGC_HEALTH:
+ check_append_info(msg, &s->check);
+ break;
+ case SRV_OP_STCHGC_AGENT:
+ check_append_info(msg, &s->agent);
+ break;
+ default:
+ chunk_appendf(msg, ", %s", srv_op_st_chg_cause(cause));
+ break;
+ }
+}
+
+static void srv_append_adm_chg_cause(struct buffer *msg, struct server *s, enum srv_adm_st_chg_cause cause)
+{
+ if (cause)
+ chunk_appendf(msg, " (%s)", srv_adm_st_chg_cause(cause));
+}
+
+/* Appends some information to a message string related to a server tracking
+ * or requeued connections info.
+ *
+ * If <forced> is null and the server tracks another one, a "via"
+ * If <xferred> is non-negative, some information about requeued sessions are
+ * provided.
+ *
+ * Must be called with the server lock held.
+ */
+static void srv_append_more(struct buffer *msg, struct server *s,
+ int xferred, int forced)
+{
+ if (!forced && s->track) {
+ chunk_appendf(msg, " via %s/%s", s->track->proxy->id, s->track->id);
+ }
+
+ if (xferred >= 0) {
+ if (s->next_state == SRV_ST_STOPPED)
+ chunk_appendf(msg, ". %d active and %d backup servers left.%s"
+ " %d sessions active, %d requeued, %d remaining in queue",
+ s->proxy->srv_act, s->proxy->srv_bck,
+ (s->proxy->srv_bck && !s->proxy->srv_act) ? " Running on backup." : "",
+ s->cur_sess, xferred, s->queue.length);
+ else
+ chunk_appendf(msg, ". %d active and %d backup servers online.%s"
+ " %d sessions requeued, %d total in queue",
+ s->proxy->srv_act, s->proxy->srv_bck,
+ (s->proxy->srv_bck && !s->proxy->srv_act) ? " Running on backup." : "",
+ xferred, s->queue.length);
+ }
+}
+
+/* Marks server <s> down, regardless of its checks' statuses. The server
+ * transfers queued streams whenever possible to other servers at a sync
+ * point. Maintenance servers are ignored.
+ *
+ * Must be called with the server lock held.
+ */
+void srv_set_stopped(struct server *s, enum srv_op_st_chg_cause cause)
+{
+ struct server *srv;
+
+ if ((s->cur_admin & SRV_ADMF_MAINT) || s->next_state == SRV_ST_STOPPED)
+ return;
+
+ s->next_state = SRV_ST_STOPPED;
+
+ /* propagate changes */
+ srv_update_status(s, 0, cause);
+
+ for (srv = s->trackers; srv; srv = srv->tracknext) {
+ HA_SPIN_LOCK(SERVER_LOCK, &srv->lock);
+ srv_set_stopped(srv, SRV_OP_STCHGC_NONE);
+ HA_SPIN_UNLOCK(SERVER_LOCK, &srv->lock);
+ }
+}
+
+/* Marks server <s> up regardless of its checks' statuses and provided it isn't
+ * in maintenance. The server tries to grab requests from the proxy at a sync
+ * point. Maintenance servers are ignored.
+ *
+ * Must be called with the server lock held.
+ */
+void srv_set_running(struct server *s, enum srv_op_st_chg_cause cause)
+{
+ struct server *srv;
+
+ if (s->cur_admin & SRV_ADMF_MAINT)
+ return;
+
+ if (s->next_state == SRV_ST_STARTING || s->next_state == SRV_ST_RUNNING)
+ return;
+
+ s->next_state = SRV_ST_STARTING;
+
+ if (s->slowstart <= 0)
+ s->next_state = SRV_ST_RUNNING;
+
+ /* propagate changes */
+ srv_update_status(s, 0, cause);
+
+ for (srv = s->trackers; srv; srv = srv->tracknext) {
+ HA_SPIN_LOCK(SERVER_LOCK, &srv->lock);
+ srv_set_running(srv, SRV_OP_STCHGC_NONE);
+ HA_SPIN_UNLOCK(SERVER_LOCK, &srv->lock);
+ }
+}
+
+/* Marks server <s> stopping regardless of its checks' statuses and provided it
+ * isn't in maintenance. The server tries to redispatch pending requests
+ * to the proxy. Maintenance servers are ignored.
+ *
+ * Must be called with the server lock held.
+ */
+void srv_set_stopping(struct server *s, enum srv_op_st_chg_cause cause)
+{
+ struct server *srv;
+
+ if (s->cur_admin & SRV_ADMF_MAINT)
+ return;
+
+ if (s->next_state == SRV_ST_STOPPING)
+ return;
+
+ s->next_state = SRV_ST_STOPPING;
+
+ /* propagate changes */
+ srv_update_status(s, 0, cause);
+
+ for (srv = s->trackers; srv; srv = srv->tracknext) {
+ HA_SPIN_LOCK(SERVER_LOCK, &srv->lock);
+ srv_set_stopping(srv, SRV_OP_STCHGC_NONE);
+ HA_SPIN_UNLOCK(SERVER_LOCK, &srv->lock);
+ }
+}
+
+/* Enables admin flag <mode> (among SRV_ADMF_*) on server <s>. This is used to
+ * enforce either maint mode or drain mode. It is not allowed to set more than
+ * one flag at once. The equivalent "inherited" flag is propagated to all
+ * tracking servers. Maintenance mode disables health checks (but not agent
+ * checks). When either the flag is already set or no flag is passed, nothing
+ * is done. If <cause> is non-null, it will be displayed at the end of the log
+ * lines to justify the state change.
+ *
+ * Must be called with the server lock held.
+ */
+void srv_set_admin_flag(struct server *s, enum srv_admin mode, enum srv_adm_st_chg_cause cause)
+{
+ struct server *srv;
+
+ if (!mode)
+ return;
+
+ /* stop going down as soon as we meet a server already in the same state */
+ if (s->next_admin & mode)
+ return;
+
+ s->next_admin |= mode;
+
+ /* propagate changes */
+ srv_update_status(s, 1, cause);
+
+ /* stop going down if the equivalent flag was already present (forced or inherited) */
+ if (((mode & SRV_ADMF_MAINT) && (s->next_admin & ~mode & SRV_ADMF_MAINT)) ||
+ ((mode & SRV_ADMF_DRAIN) && (s->next_admin & ~mode & SRV_ADMF_DRAIN)))
+ return;
+
+ /* compute the inherited flag to propagate */
+ if (mode & SRV_ADMF_MAINT)
+ mode = SRV_ADMF_IMAINT;
+ else if (mode & SRV_ADMF_DRAIN)
+ mode = SRV_ADMF_IDRAIN;
+
+ for (srv = s->trackers; srv; srv = srv->tracknext) {
+ HA_SPIN_LOCK(SERVER_LOCK, &srv->lock);
+ srv_set_admin_flag(srv, mode, cause);
+ HA_SPIN_UNLOCK(SERVER_LOCK, &srv->lock);
+ }
+}
+
+/* Disables admin flag <mode> (among SRV_ADMF_*) on server <s>. This is used to
+ * stop enforcing either maint mode or drain mode. It is not allowed to set more
+ * than one flag at once. The equivalent "inherited" flag is propagated to all
+ * tracking servers. Leaving maintenance mode re-enables health checks. When
+ * either the flag is already cleared or no flag is passed, nothing is done.
+ *
+ * Must be called with the server lock held.
+ */
+void srv_clr_admin_flag(struct server *s, enum srv_admin mode)
+{
+ struct server *srv;
+
+ if (!mode)
+ return;
+
+ /* stop going down as soon as we see the flag is not there anymore */
+ if (!(s->next_admin & mode))
+ return;
+
+ s->next_admin &= ~mode;
+
+ /* propagate changes */
+ srv_update_status(s, 1, SRV_ADM_STCHGC_NONE);
+
+ /* stop going down if the equivalent flag is still present (forced or inherited) */
+ if (((mode & SRV_ADMF_MAINT) && (s->next_admin & SRV_ADMF_MAINT)) ||
+ ((mode & SRV_ADMF_DRAIN) && (s->next_admin & SRV_ADMF_DRAIN)))
+ return;
+
+ if (mode & SRV_ADMF_MAINT)
+ mode = SRV_ADMF_IMAINT;
+ else if (mode & SRV_ADMF_DRAIN)
+ mode = SRV_ADMF_IDRAIN;
+
+ for (srv = s->trackers; srv; srv = srv->tracknext) {
+ HA_SPIN_LOCK(SERVER_LOCK, &srv->lock);
+ srv_clr_admin_flag(srv, mode);
+ HA_SPIN_UNLOCK(SERVER_LOCK, &srv->lock);
+ }
+}
+
+/* principle: propagate maint and drain to tracking servers. This is useful
+ * upon startup so that inherited states are correct.
+ */
+static void srv_propagate_admin_state(struct server *srv)
+{
+ struct server *srv2;
+
+ if (!srv->trackers)
+ return;
+
+ for (srv2 = srv->trackers; srv2; srv2 = srv2->tracknext) {
+ HA_SPIN_LOCK(SERVER_LOCK, &srv2->lock);
+ if (srv->next_admin & (SRV_ADMF_MAINT | SRV_ADMF_CMAINT))
+ srv_set_admin_flag(srv2, SRV_ADMF_IMAINT, SRV_ADM_STCHGC_NONE);
+
+ if (srv->next_admin & SRV_ADMF_DRAIN)
+ srv_set_admin_flag(srv2, SRV_ADMF_IDRAIN, SRV_ADM_STCHGC_NONE);
+ HA_SPIN_UNLOCK(SERVER_LOCK, &srv2->lock);
+ }
+}
+
+/* Compute and propagate the admin states for all servers in proxy <px>.
+ * Only servers *not* tracking another one are considered, because other
+ * ones will be handled when the server they track is visited.
+ */
+void srv_compute_all_admin_states(struct proxy *px)
+{
+ struct server *srv;
+
+ for (srv = px->srv; srv; srv = srv->next) {
+ if (srv->track)
+ continue;
+ srv_propagate_admin_state(srv);
+ }
+}
+
+/* Note: must not be declared <const> as its list will be overwritten.
+ * Please take care of keeping this list alphabetically sorted, doing so helps
+ * all code contributors.
+ * Optional keywords are also declared with a NULL ->parse() function so that
+ * the config parser can report an appropriate error when a known keyword was
+ * not enabled.
+ * Note: -1 as ->skip value means that the number of arguments are variable.
+ */
+static struct srv_kw_list srv_kws = { "ALL", { }, {
+ { "backup", srv_parse_backup, 0, 1, 1 }, /* Flag as backup server */
+ { "cookie", srv_parse_cookie, 1, 1, 0 }, /* Assign a cookie to the server */
+ { "disabled", srv_parse_disabled, 0, 1, 1 }, /* Start the server in 'disabled' state */
+ { "enabled", srv_parse_enabled, 0, 1, 1 }, /* Start the server in 'enabled' state */
+ { "error-limit", srv_parse_error_limit, 1, 1, 1 }, /* Configure the consecutive count of check failures to consider a server on error */
+ { "ws", srv_parse_ws, 1, 1, 1 }, /* websocket protocol */
+ { "id", srv_parse_id, 1, 0, 1 }, /* set id# of server */
+ { "init-addr", srv_parse_init_addr, 1, 1, 0 }, /* */
+ { "log-bufsize", srv_parse_log_bufsize, 1, 1, 0 }, /* Set the ring bufsize for log server (only for log backends) */
+ { "log-proto", srv_parse_log_proto, 1, 1, 0 }, /* Set the protocol for event messages, only relevant in a log or ring section */
+ { "maxconn", srv_parse_maxconn, 1, 1, 1 }, /* Set the max number of concurrent connection */
+ { "maxqueue", srv_parse_maxqueue, 1, 1, 1 }, /* Set the max number of connection to put in queue */
+ { "max-reuse", srv_parse_max_reuse, 1, 1, 0 }, /* Set the max number of requests on a connection, -1 means unlimited */
+ { "minconn", srv_parse_minconn, 1, 1, 1 }, /* Enable a dynamic maxconn limit */
+ { "namespace", srv_parse_namespace, 1, 1, 0 }, /* Namespace the server socket belongs to (if supported) */
+ { "no-backup", srv_parse_no_backup, 0, 1, 1 }, /* Flag as non-backup server */
+ { "no-send-proxy", srv_parse_no_send_proxy, 0, 1, 1 }, /* Disable use of PROXY V1 protocol */
+ { "no-send-proxy-v2", srv_parse_no_send_proxy_v2, 0, 1, 1 }, /* Disable use of PROXY V2 protocol */
+ { "no-tfo", srv_parse_no_tfo, 0, 1, 1 }, /* Disable use of TCP Fast Open */
+ { "non-stick", srv_parse_non_stick, 0, 1, 0 }, /* Disable stick-table persistence */
+ { "observe", srv_parse_observe, 1, 1, 1 }, /* Enables health adjusting based on observing communication with the server */
+ { "on-error", srv_parse_on_error, 1, 1, 1 }, /* Configure the action on check failure */
+ { "on-marked-down", srv_parse_on_marked_down, 1, 1, 1 }, /* Configure the action when a server is marked down */
+ { "on-marked-up", srv_parse_on_marked_up, 1, 1, 1 }, /* Configure the action when a server is marked up */
+ { "pool-low-conn", srv_parse_pool_low_conn, 1, 1, 1 }, /* Set the min number of orphan idle connecbefore being allowed to pick from other threads */
+ { "pool-max-conn", srv_parse_pool_max_conn, 1, 1, 1 }, /* Set the max number of orphan idle connections, -1 means unlimited */
+ { "pool-purge-delay", srv_parse_pool_purge_delay, 1, 1, 1 }, /* Set the time before we destroy orphan idle connections, defaults to 1s */
+ { "proto", srv_parse_proto, 1, 1, 1 }, /* Set the proto to use for all outgoing connections */
+ { "proxy-v2-options", srv_parse_proxy_v2_options, 1, 1, 1 }, /* options for send-proxy-v2 */
+ { "redir", srv_parse_redir, 1, 1, 0 }, /* Enable redirection mode */
+ { "resolve-net", srv_parse_resolve_net, 1, 1, 0 }, /* Set the preferred network range for name resolution */
+ { "resolve-opts", srv_parse_resolve_opts, 1, 1, 0 }, /* Set options for name resolution */
+ { "resolve-prefer", srv_parse_resolve_prefer, 1, 1, 0 }, /* Set the preferred family for name resolution */
+ { "resolvers", srv_parse_resolvers, 1, 1, 0 }, /* Configure the resolver to use for name resolution */
+ { "send-proxy", srv_parse_send_proxy, 0, 1, 1 }, /* Enforce use of PROXY V1 protocol */
+ { "send-proxy-v2", srv_parse_send_proxy_v2, 0, 1, 1 }, /* Enforce use of PROXY V2 protocol */
+ { "set-proxy-v2-tlv-fmt", srv_parse_set_proxy_v2_tlv_fmt, 0, 1, 1 }, /* Set TLV of PROXY V2 protocol */
+ { "shard", srv_parse_shard, 1, 1, 1 }, /* Server shard (only in peers protocol context) */
+ { "slowstart", srv_parse_slowstart, 1, 1, 1 }, /* Set the warm-up timer for a previously failed server */
+ { "source", srv_parse_source, -1, 1, 1 }, /* Set the source address to be used to connect to the server */
+ { "stick", srv_parse_stick, 0, 1, 0 }, /* Enable stick-table persistence */
+ { "tfo", srv_parse_tfo, 0, 1, 1 }, /* enable TCP Fast Open of server */
+ { "track", srv_parse_track, 1, 1, 1 }, /* Set the current state of the server, tracking another one */
+ { "socks4", srv_parse_socks4, 1, 1, 0 }, /* Set the socks4 proxy of the server*/
+ { "usesrc", srv_parse_usesrc, 0, 1, 1 }, /* safe-guard against usesrc without preceding <source> keyword */
+ { "weight", srv_parse_weight, 1, 1, 1 }, /* Set the load-balancing weight */
+ { NULL, NULL, 0 },
+}};
+
+INITCALL1(STG_REGISTER, srv_register_keywords, &srv_kws);
+
+/* Recomputes the server's eweight based on its state, uweight, the current time,
+ * and the proxy's algorithm. To be used after updating sv->uweight. The warmup
+ * state is automatically disabled if the time is elapsed. If <must_update> is
+ * not zero, the update will be propagated immediately.
+ *
+ * Must be called with the server lock held.
+ */
+void server_recalc_eweight(struct server *sv, int must_update)
+{
+ struct proxy *px = sv->proxy;
+ unsigned w;
+
+ if (ns_to_sec(now_ns) < sv->last_change || ns_to_sec(now_ns) >= sv->last_change + sv->slowstart) {
+ /* go to full throttle if the slowstart interval is reached */
+ if (sv->next_state == SRV_ST_STARTING)
+ sv->next_state = SRV_ST_RUNNING;
+ }
+
+ /* We must take care of not pushing the server to full throttle during slow starts.
+ * It must also start immediately, at least at the minimal step when leaving maintenance.
+ */
+ if ((sv->next_state == SRV_ST_STARTING) && (px->lbprm.algo & BE_LB_PROP_DYN))
+ w = (px->lbprm.wdiv * (ns_to_sec(now_ns) - sv->last_change) + sv->slowstart) / sv->slowstart;
+ else
+ w = px->lbprm.wdiv;
+
+ sv->next_eweight = (sv->uweight * w + px->lbprm.wmult - 1) / px->lbprm.wmult;
+
+ /* propagate changes only if needed (i.e. not recursively) */
+ if (must_update)
+ srv_update_status(sv, 0, SRV_OP_STCHGC_NONE);
+}
+
+/*
+ * Parses weight_str and configures sv accordingly.
+ * Returns NULL on success, error message string otherwise.
+ *
+ * Must be called with the server lock held.
+ */
+const char *server_parse_weight_change_request(struct server *sv,
+ const char *weight_str)
+{
+ struct proxy *px;
+ long int w;
+ char *end;
+
+ px = sv->proxy;
+
+ /* if the weight is terminated with '%', it is set relative to
+ * the initial weight, otherwise it is absolute.
+ */
+ if (!*weight_str)
+ return "Require <weight> or <weight%>.\n";
+
+ w = strtol(weight_str, &end, 10);
+ if (end == weight_str)
+ return "Empty weight string empty or preceded by garbage";
+ else if (end[0] == '%' && end[1] == '\0') {
+ if (w < 0)
+ return "Relative weight must be positive.\n";
+ /* Avoid integer overflow */
+ if (w > 25600)
+ w = 25600;
+ w = sv->iweight * w / 100;
+ if (w > 256)
+ w = 256;
+ }
+ else if (w < 0 || w > 256)
+ return "Absolute weight can only be between 0 and 256 inclusive.\n";
+ else if (end[0] != '\0')
+ return "Trailing garbage in weight string";
+
+ if (w && w != sv->iweight && !(px->lbprm.algo & BE_LB_PROP_DYN))
+ return "Backend is using a static LB algorithm and only accepts weights '0%' and '100%'.\n";
+
+ sv->uweight = w;
+ server_recalc_eweight(sv, 1);
+
+ return NULL;
+}
+
+/*
+ * Parses <addr_str> and configures <sv> accordingly. <from> precise
+ * the source of the change in the associated message log.
+ * Returns:
+ * - error string on error
+ * - NULL on success
+ *
+ * Must be called with the server lock held.
+ */
+const char *server_parse_addr_change_request(struct server *sv,
+ const char *addr_str, const char *updater)
+{
+ unsigned char ip[INET6_ADDRSTRLEN];
+
+ if (inet_pton(AF_INET6, addr_str, ip)) {
+ srv_update_addr(sv, ip, AF_INET6, updater);
+ return NULL;
+ }
+ if (inet_pton(AF_INET, addr_str, ip)) {
+ srv_update_addr(sv, ip, AF_INET, updater);
+ return NULL;
+ }
+
+ return "Could not understand IP address format.\n";
+}
+
+/*
+ * Must be called with the server lock held.
+ */
+const char *server_parse_maxconn_change_request(struct server *sv,
+ const char *maxconn_str)
+{
+ long int v;
+ char *end;
+
+ if (!*maxconn_str)
+ return "Require <maxconn>.\n";
+
+ v = strtol(maxconn_str, &end, 10);
+ if (end == maxconn_str)
+ return "maxconn string empty or preceded by garbage";
+ else if (end[0] != '\0')
+ return "Trailing garbage in maxconn string";
+
+ if (sv->maxconn == sv->minconn) { // static maxconn
+ sv->maxconn = sv->minconn = v;
+ } else { // dynamic maxconn
+ sv->maxconn = v;
+ }
+
+ if (may_dequeue_tasks(sv, sv->proxy))
+ process_srv_queue(sv);
+
+ return NULL;
+}
+
+static struct sample_expr *srv_sni_sample_parse_expr(struct server *srv, struct proxy *px,
+ const char *file, int linenum, char **err)
+{
+ int idx;
+ const char *args[] = {
+ srv->sni_expr,
+ NULL,
+ };
+
+ idx = 0;
+ px->conf.args.ctx = ARGC_SRV;
+
+ return sample_parse_expr((char **)args, &idx, file, linenum, err, &px->conf.args, NULL);
+}
+
+int server_parse_sni_expr(struct server *newsrv, struct proxy *px, char **err)
+{
+ struct sample_expr *expr;
+
+ expr = srv_sni_sample_parse_expr(newsrv, px, px->conf.file, px->conf.line, err);
+ if (!expr) {
+ memprintf(err, "error detected while parsing sni expression : %s", *err);
+ return ERR_ALERT | ERR_FATAL;
+ }
+
+ if (!(expr->fetch->val & SMP_VAL_BE_SRV_CON)) {
+ memprintf(err, "error detected while parsing sni expression : "
+ " fetch method '%s' extracts information from '%s', "
+ "none of which is available here.",
+ newsrv->sni_expr, sample_src_names(expr->fetch->use));
+ return ERR_ALERT | ERR_FATAL;
+ }
+
+ px->http_needed |= !!(expr->fetch->use & SMP_USE_HTTP_ANY);
+ release_sample_expr(newsrv->ssl_ctx.sni);
+ newsrv->ssl_ctx.sni = expr;
+
+ return 0;
+}
+
+static void display_parser_err(const char *file, int linenum, char **args, int cur_arg, int err_code, char **err)
+{
+ char *msg = "error encountered while processing ";
+ char *quote = "'";
+ char *token = args[cur_arg];
+
+ if (err && *err) {
+ indent_msg(err, 2);
+ msg = *err;
+ quote = "";
+ token = "";
+ }
+
+ if (err_code & ERR_WARN && !(err_code & ERR_ALERT))
+ ha_warning("%s%s%s%s.\n", msg, quote, token, quote);
+ else
+ ha_alert("%s%s%s%s.\n", msg, quote, token, quote);
+}
+
+static void srv_conn_src_sport_range_cpy(struct server *srv, const struct server *src)
+{
+ int range_sz;
+
+ range_sz = src->conn_src.sport_range->size;
+ if (range_sz > 0) {
+ srv->conn_src.sport_range = port_range_alloc_range(range_sz);
+ if (srv->conn_src.sport_range != NULL) {
+ int i;
+
+ for (i = 0; i < range_sz; i++) {
+ srv->conn_src.sport_range->ports[i] =
+ src->conn_src.sport_range->ports[i];
+ }
+ }
+ }
+}
+
+/*
+ * Copy <src> server connection source settings to <srv> server everything needed.
+ */
+static void srv_conn_src_cpy(struct server *srv, const struct server *src)
+{
+ srv->conn_src.opts = src->conn_src.opts;
+ srv->conn_src.source_addr = src->conn_src.source_addr;
+
+ /* Source port range copy. */
+ if (src->conn_src.sport_range != NULL)
+ srv_conn_src_sport_range_cpy(srv, src);
+
+#ifdef CONFIG_HAP_TRANSPARENT
+ if (src->conn_src.bind_hdr_name != NULL) {
+ srv->conn_src.bind_hdr_name = strdup(src->conn_src.bind_hdr_name);
+ srv->conn_src.bind_hdr_len = strlen(src->conn_src.bind_hdr_name);
+ }
+ srv->conn_src.bind_hdr_occ = src->conn_src.bind_hdr_occ;
+ srv->conn_src.tproxy_addr = src->conn_src.tproxy_addr;
+#endif
+ if (src->conn_src.iface_name != NULL)
+ srv->conn_src.iface_name = strdup(src->conn_src.iface_name);
+}
+
+/*
+ * Copy <src> server SSL settings to <srv> server allocating
+ * everything needed.
+ */
+#if defined(USE_OPENSSL)
+static void srv_ssl_settings_cpy(struct server *srv, const struct server *src)
+{
+ /* <src> is the current proxy's default server and SSL is enabled */
+ BUG_ON(src->ssl_ctx.ctx != NULL); /* the SSL_CTX must never be initialized in a default-server */
+
+ if (src == &srv->proxy->defsrv && src->use_ssl == 1)
+ srv->flags |= SRV_F_DEFSRV_USE_SSL;
+
+ if (src->ssl_ctx.ca_file != NULL)
+ srv->ssl_ctx.ca_file = strdup(src->ssl_ctx.ca_file);
+ if (src->ssl_ctx.crl_file != NULL)
+ srv->ssl_ctx.crl_file = strdup(src->ssl_ctx.crl_file);
+ if (src->ssl_ctx.client_crt != NULL)
+ srv->ssl_ctx.client_crt = strdup(src->ssl_ctx.client_crt);
+
+ srv->ssl_ctx.verify = src->ssl_ctx.verify;
+
+
+ if (src->ssl_ctx.verify_host != NULL)
+ srv->ssl_ctx.verify_host = strdup(src->ssl_ctx.verify_host);
+ if (src->ssl_ctx.ciphers != NULL)
+ srv->ssl_ctx.ciphers = strdup(src->ssl_ctx.ciphers);
+ if (src->ssl_ctx.options)
+ srv->ssl_ctx.options = src->ssl_ctx.options;
+ if (src->ssl_ctx.methods.flags)
+ srv->ssl_ctx.methods.flags = src->ssl_ctx.methods.flags;
+ if (src->ssl_ctx.methods.min)
+ srv->ssl_ctx.methods.min = src->ssl_ctx.methods.min;
+ if (src->ssl_ctx.methods.max)
+ srv->ssl_ctx.methods.max = src->ssl_ctx.methods.max;
+
+ if (src->ssl_ctx.ciphersuites != NULL)
+ srv->ssl_ctx.ciphersuites = strdup(src->ssl_ctx.ciphersuites);
+ if (src->sni_expr != NULL)
+ srv->sni_expr = strdup(src->sni_expr);
+
+ if (src->ssl_ctx.alpn_str) {
+ srv->ssl_ctx.alpn_str = malloc(src->ssl_ctx.alpn_len);
+ if (srv->ssl_ctx.alpn_str) {
+ memcpy(srv->ssl_ctx.alpn_str, src->ssl_ctx.alpn_str,
+ src->ssl_ctx.alpn_len);
+ srv->ssl_ctx.alpn_len = src->ssl_ctx.alpn_len;
+ }
+ }
+
+ if (src->ssl_ctx.npn_str) {
+ srv->ssl_ctx.npn_str = malloc(src->ssl_ctx.npn_len);
+ if (srv->ssl_ctx.npn_str) {
+ memcpy(srv->ssl_ctx.npn_str, src->ssl_ctx.npn_str,
+ src->ssl_ctx.npn_len);
+ srv->ssl_ctx.npn_len = src->ssl_ctx.npn_len;
+ }
+ }
+}
+
+/* Activate ssl on server <s>.
+ * do nothing if there is no change to apply
+ *
+ * Must be called with the server lock held.
+ */
+void srv_set_ssl(struct server *s, int use_ssl)
+{
+ if (s->use_ssl == use_ssl)
+ return;
+
+ s->use_ssl = use_ssl;
+ if (s->use_ssl)
+ s->xprt = xprt_get(XPRT_SSL);
+ else
+ s->xprt = xprt_get(XPRT_RAW);
+}
+
+#endif /* USE_OPENSSL */
+
+/*
+ * Prepare <srv> for hostname resolution.
+ * May be safely called with a default server as <src> argument (without hostname).
+ * Returns -1 in case of any allocation failure, 0 if not.
+ */
+int srv_prepare_for_resolution(struct server *srv, const char *hostname)
+{
+ char *hostname_dn;
+ int hostname_len, hostname_dn_len;
+
+ if (!hostname)
+ return 0;
+
+ hostname_len = strlen(hostname);
+ hostname_dn = trash.area;
+ hostname_dn_len = resolv_str_to_dn_label(hostname, hostname_len,
+ hostname_dn, trash.size);
+ if (hostname_dn_len == -1)
+ goto err;
+
+
+ free(srv->hostname);
+ free(srv->hostname_dn);
+ srv->hostname = strdup(hostname);
+ srv->hostname_dn = strdup(hostname_dn);
+ srv->hostname_dn_len = hostname_dn_len;
+ if (!srv->hostname || !srv->hostname_dn)
+ goto err;
+
+ return 0;
+
+ err:
+ ha_free(&srv->hostname);
+ ha_free(&srv->hostname_dn);
+ return -1;
+}
+
+/*
+ * Copy <src> server settings to <srv> server allocating
+ * everything needed.
+ * This function is not supposed to be called at any time, but only
+ * during server settings parsing or during server allocations from
+ * a server template, and just after having calloc()'ed a new server.
+ * So, <src> may only be a default server (when parsing server settings)
+ * or a server template (during server allocations from a server template).
+ * <srv_tmpl> distinguishes these two cases (must be 1 if <srv> is a template,
+ * 0 if not).
+ */
+void srv_settings_cpy(struct server *srv, const struct server *src, int srv_tmpl)
+{
+ struct srv_pp_tlv_list *srv_tlv = NULL, *new_srv_tlv = NULL;
+
+ /* Connection source settings copy */
+ srv_conn_src_cpy(srv, src);
+
+ if (srv_tmpl) {
+ srv->addr = src->addr;
+ srv->addr_type = src->addr_type;
+ srv->svc_port = src->svc_port;
+ }
+
+ srv->pp_opts = src->pp_opts;
+ if (src->rdr_pfx != NULL) {
+ srv->rdr_pfx = strdup(src->rdr_pfx);
+ srv->rdr_len = src->rdr_len;
+ }
+ if (src->cookie != NULL) {
+ srv->cookie = strdup(src->cookie);
+ srv->cklen = src->cklen;
+ }
+ srv->use_ssl = src->use_ssl;
+ srv->check.addr = src->check.addr;
+ srv->agent.addr = src->agent.addr;
+ srv->check.use_ssl = src->check.use_ssl;
+ srv->check.port = src->check.port;
+ srv->check.sni = src->check.sni;
+ srv->check.alpn_str = src->check.alpn_str;
+ srv->check.alpn_len = src->check.alpn_len;
+ /* Note: 'flags' field has potentially been already initialized. */
+ srv->flags |= src->flags;
+ srv->do_check = src->do_check;
+ srv->do_agent = src->do_agent;
+ srv->check.inter = src->check.inter;
+ srv->check.fastinter = src->check.fastinter;
+ srv->check.downinter = src->check.downinter;
+ srv->agent.use_ssl = src->agent.use_ssl;
+ srv->agent.port = src->agent.port;
+
+ if (src->agent.tcpcheck_rules) {
+ srv->agent.tcpcheck_rules = calloc(1, sizeof(*srv->agent.tcpcheck_rules));
+ if (srv->agent.tcpcheck_rules) {
+ srv->agent.tcpcheck_rules->flags = src->agent.tcpcheck_rules->flags;
+ srv->agent.tcpcheck_rules->list = src->agent.tcpcheck_rules->list;
+ LIST_INIT(&srv->agent.tcpcheck_rules->preset_vars);
+ dup_tcpcheck_vars(&srv->agent.tcpcheck_rules->preset_vars,
+ &src->agent.tcpcheck_rules->preset_vars);
+ }
+ }
+
+ srv->agent.inter = src->agent.inter;
+ srv->agent.fastinter = src->agent.fastinter;
+ srv->agent.downinter = src->agent.downinter;
+ srv->maxqueue = src->maxqueue;
+ srv->ws = src->ws;
+ srv->minconn = src->minconn;
+ srv->maxconn = src->maxconn;
+ srv->slowstart = src->slowstart;
+ srv->observe = src->observe;
+ srv->onerror = src->onerror;
+ srv->onmarkeddown = src->onmarkeddown;
+ srv->onmarkedup = src->onmarkedup;
+ if (src->trackit != NULL)
+ srv->trackit = strdup(src->trackit);
+ srv->consecutive_errors_limit = src->consecutive_errors_limit;
+ srv->uweight = srv->iweight = src->iweight;
+
+ srv->check.send_proxy = src->check.send_proxy;
+ /* health: up, but will fall down at first failure */
+ srv->check.rise = srv->check.health = src->check.rise;
+ srv->check.fall = src->check.fall;
+
+ /* Here we check if 'disabled' is the default server state */
+ if (src->next_admin & (SRV_ADMF_CMAINT | SRV_ADMF_FMAINT)) {
+ srv->next_admin |= SRV_ADMF_CMAINT | SRV_ADMF_FMAINT;
+ srv->next_state = SRV_ST_STOPPED;
+ srv->check.state |= CHK_ST_PAUSED;
+ srv->check.health = 0;
+ }
+
+ /* health: up but will fall down at first failure */
+ srv->agent.rise = srv->agent.health = src->agent.rise;
+ srv->agent.fall = src->agent.fall;
+
+ if (src->resolvers_id != NULL)
+ srv->resolvers_id = strdup(src->resolvers_id);
+ srv->resolv_opts.family_prio = src->resolv_opts.family_prio;
+ srv->resolv_opts.accept_duplicate_ip = src->resolv_opts.accept_duplicate_ip;
+ srv->resolv_opts.ignore_weight = src->resolv_opts.ignore_weight;
+ if (srv->resolv_opts.family_prio == AF_UNSPEC)
+ srv->resolv_opts.family_prio = AF_INET6;
+ memcpy(srv->resolv_opts.pref_net,
+ src->resolv_opts.pref_net,
+ sizeof srv->resolv_opts.pref_net);
+ srv->resolv_opts.pref_net_nb = src->resolv_opts.pref_net_nb;
+
+ srv->init_addr_methods = src->init_addr_methods;
+ srv->init_addr = src->init_addr;
+#if defined(USE_OPENSSL)
+ srv_ssl_settings_cpy(srv, src);
+#endif
+#ifdef TCP_USER_TIMEOUT
+ srv->tcp_ut = src->tcp_ut;
+#endif
+ srv->mux_proto = src->mux_proto;
+ srv->pool_purge_delay = src->pool_purge_delay;
+ srv->low_idle_conns = src->low_idle_conns;
+ srv->max_idle_conns = src->max_idle_conns;
+ srv->max_reuse = src->max_reuse;
+
+ if (srv_tmpl)
+ srv->srvrq = src->srvrq;
+
+ srv->netns = src->netns;
+ srv->check.via_socks4 = src->check.via_socks4;
+ srv->socks4_addr = src->socks4_addr;
+ srv->log_bufsize = src->log_bufsize;
+
+ LIST_INIT(&srv->pp_tlvs);
+
+ list_for_each_entry(srv_tlv, &src->pp_tlvs, list) {
+ new_srv_tlv = malloc(sizeof(*new_srv_tlv));
+ if (unlikely(!new_srv_tlv)) {
+ break;
+ }
+ new_srv_tlv->fmt_string = strdup(srv_tlv->fmt_string);
+ if (unlikely(!new_srv_tlv->fmt_string)) {
+ free(new_srv_tlv);
+ break;
+ }
+ new_srv_tlv->type = srv_tlv->type;
+ LIST_APPEND(&srv->pp_tlvs, &new_srv_tlv->list);
+ }
+}
+
+/* allocate a server and attach it to the global servers_list. Returns
+ * the server on success, otherwise NULL.
+ */
+struct server *new_server(struct proxy *proxy)
+{
+ struct server *srv;
+
+ srv = calloc(1, sizeof *srv);
+ if (!srv)
+ return NULL;
+
+ srv_take(srv);
+
+ srv->obj_type = OBJ_TYPE_SERVER;
+ srv->proxy = proxy;
+ queue_init(&srv->queue, proxy, srv);
+ LIST_APPEND(&servers_list, &srv->global_list);
+ LIST_INIT(&srv->srv_rec_item);
+ LIST_INIT(&srv->ip_rec_item);
+ LIST_INIT(&srv->pp_tlvs);
+ MT_LIST_INIT(&srv->prev_deleted);
+ event_hdl_sub_list_init(&srv->e_subs);
+ srv->rid = 0; /* rid defaults to 0 */
+
+ srv->next_state = SRV_ST_RUNNING; /* early server setup */
+ srv->last_change = ns_to_sec(now_ns);
+
+ srv->check.obj_type = OBJ_TYPE_CHECK;
+ srv->check.status = HCHK_STATUS_INI;
+ srv->check.server = srv;
+ srv->check.proxy = proxy;
+ srv->check.tcpcheck_rules = &proxy->tcpcheck_rules;
+
+ srv->agent.obj_type = OBJ_TYPE_CHECK;
+ srv->agent.status = HCHK_STATUS_INI;
+ srv->agent.server = srv;
+ srv->agent.proxy = proxy;
+ srv->xprt = srv->check.xprt = srv->agent.xprt = xprt_get(XPRT_RAW);
+
+ srv->extra_counters = NULL;
+#ifdef USE_OPENSSL
+ HA_RWLOCK_INIT(&srv->ssl_ctx.lock);
+#endif
+
+ /* please don't put default server settings here, they are set in
+ * proxy_preset_defaults().
+ */
+ return srv;
+}
+
+/* Increment the server refcount. */
+void srv_take(struct server *srv)
+{
+ HA_ATOMIC_INC(&srv->refcount);
+}
+
+/* deallocate common server parameters (may be used by default-servers) */
+void srv_free_params(struct server *srv)
+{
+ free(srv->cookie);
+ free(srv->rdr_pfx);
+ free(srv->hostname);
+ free(srv->hostname_dn);
+ free((char*)srv->conf.file);
+ free(srv->per_thr);
+ free(srv->per_tgrp);
+ free(srv->curr_idle_thr);
+ free(srv->resolvers_id);
+ free(srv->addr_node.key);
+ free(srv->lb_nodes);
+ if (srv->log_target) {
+ deinit_log_target(srv->log_target);
+ free(srv->log_target);
+ }
+
+ if (xprt_get(XPRT_SSL) && xprt_get(XPRT_SSL)->destroy_srv)
+ xprt_get(XPRT_SSL)->destroy_srv(srv);
+}
+
+/* Deallocate a server <srv> and its member. <srv> must be allocated. For
+ * dynamic servers, its refcount is decremented first. The free operations are
+ * conducted only if the refcount is nul.
+ *
+ * As a convenience, <srv.next> is returned if srv is not NULL. It may be useful
+ * when calling srv_drop on the list of servers.
+ */
+struct server *srv_drop(struct server *srv)
+{
+ struct server *next = NULL;
+
+ if (!srv)
+ goto end;
+
+ next = srv->next;
+
+ /* For dynamic servers, decrement the reference counter. Only free the
+ * server when reaching zero.
+ */
+ if (HA_ATOMIC_SUB_FETCH(&srv->refcount, 1))
+ goto end;
+
+ /* make sure we are removed from our 'next->prev_deleted' list
+ * This doesn't require full thread isolation as we're using mt lists
+ * However this could easily be turned into regular list if required
+ * (with the proper use of thread isolation)
+ */
+ MT_LIST_DELETE(&srv->prev_deleted);
+
+ task_destroy(srv->warmup);
+ task_destroy(srv->srvrq_check);
+
+ free(srv->id);
+ srv_free_params(srv);
+
+ HA_SPIN_DESTROY(&srv->lock);
+
+ LIST_DELETE(&srv->global_list);
+ event_hdl_sub_list_destroy(&srv->e_subs);
+
+ EXTRA_COUNTERS_FREE(srv->extra_counters);
+
+ ha_free(&srv);
+
+ end:
+ return next;
+}
+
+/* Detach server from proxy list. It is supported to call this
+ * even if the server is not yet in the list
+ */
+static void _srv_detach(struct server *srv)
+{
+ struct proxy *be = srv->proxy;
+
+ if (be->srv == srv) {
+ be->srv = srv->next;
+ }
+ else {
+ struct server *prev;
+
+ for (prev = be->srv; prev && prev->next != srv; prev = prev->next)
+ ;
+ if (prev)
+ prev->next = srv->next;
+ }
+}
+
+/* Remove a server <srv> from a tracking list if <srv> is tracking another
+ * server. No special care is taken if <srv> is tracked itself by another one :
+ * this situation should be avoided by the caller.
+ *
+ * Not thread-safe.
+ */
+static void release_server_track(struct server *srv)
+{
+ struct server *strack = srv->track;
+ struct server **base;
+
+ if (!strack)
+ return;
+
+ for (base = &strack->trackers; *base; base = &((*base)->tracknext)) {
+ if (*base == srv) {
+ *base = srv->tracknext;
+ return;
+ }
+ }
+
+ /* srv not found on the tracking list, this should never happen */
+ BUG_ON(!*base);
+}
+
+/*
+ * Parse as much as possible such a range string argument: low[-high]
+ * Set <nb_low> and <nb_high> values so that they may be reused by this loop
+ * for(int i = nb_low; i <= nb_high; i++)... with nb_low >= 1.
+ * Fails if 'low' < 0 or 'high' is present and not higher than 'low'.
+ * Returns 0 if succeeded, -1 if not.
+ */
+static int _srv_parse_tmpl_range(struct server *srv, const char *arg,
+ int *nb_low, int *nb_high)
+{
+ char *nb_high_arg;
+
+ *nb_high = 0;
+ chunk_printf(&trash, "%s", arg);
+ *nb_low = atoi(trash.area);
+
+ if ((nb_high_arg = strchr(trash.area, '-'))) {
+ *nb_high_arg++ = '\0';
+ *nb_high = atoi(nb_high_arg);
+ }
+ else {
+ *nb_high += *nb_low;
+ *nb_low = 1;
+ }
+
+ if (*nb_low < 0 || *nb_high < *nb_low)
+ return -1;
+
+ return 0;
+}
+
+/* Parse as much as possible such a range string argument: low[-high]
+ * Set <nb_low> and <nb_high> values so that they may be reused by this loop
+ * for(int i = nb_low; i <= nb_high; i++)... with nb_low >= 1.
+ *
+ * This function is first intended to be used through parse_server to
+ * initialize a new server on startup.
+ *
+ * Fails if 'low' < 0 or 'high' is present and not higher than 'low'.
+ * Returns 0 if succeeded, -1 if not.
+ */
+static inline void _srv_parse_set_id_from_prefix(struct server *srv,
+ const char *prefix, int nb)
+{
+ chunk_printf(&trash, "%s%d", prefix, nb);
+ free(srv->id);
+ srv->id = strdup(trash.area);
+}
+
+/* Initialize as much as possible servers from <srv> server template.
+ * Note that a server template is a special server with
+ * a few different parameters than a server which has
+ * been parsed mostly the same way as a server.
+ *
+ * This function is first intended to be used through parse_server to
+ * initialize a new server on startup.
+ *
+ * Returns the number of servers successfully allocated,
+ * 'srv' template included.
+ */
+static int _srv_parse_tmpl_init(struct server *srv, struct proxy *px)
+{
+ int i;
+ struct server *newsrv;
+
+ for (i = srv->tmpl_info.nb_low + 1; i <= srv->tmpl_info.nb_high; i++) {
+ newsrv = new_server(px);
+ if (!newsrv)
+ goto err;
+
+ newsrv->conf.file = strdup(srv->conf.file);
+ newsrv->conf.line = srv->conf.line;
+
+ srv_settings_cpy(newsrv, srv, 1);
+ srv_prepare_for_resolution(newsrv, srv->hostname);
+
+ if (newsrv->sni_expr) {
+ newsrv->ssl_ctx.sni = srv_sni_sample_parse_expr(newsrv, px, NULL, 0, NULL);
+ if (!newsrv->ssl_ctx.sni)
+ goto err;
+ }
+
+ /* append to list of servers available to receive an hostname */
+ if (newsrv->srvrq)
+ LIST_APPEND(&newsrv->srvrq->attached_servers, &newsrv->srv_rec_item);
+
+ /* Set this new server ID. */
+ _srv_parse_set_id_from_prefix(newsrv, srv->tmpl_info.prefix, i);
+
+ /* Linked backwards first. This will be restablished after parsing. */
+ newsrv->next = px->srv;
+ px->srv = newsrv;
+ }
+ _srv_parse_set_id_from_prefix(srv, srv->tmpl_info.prefix, srv->tmpl_info.nb_low);
+
+ return i - srv->tmpl_info.nb_low;
+
+ err:
+ _srv_parse_set_id_from_prefix(srv, srv->tmpl_info.prefix, srv->tmpl_info.nb_low);
+ if (newsrv) {
+ release_sample_expr(newsrv->ssl_ctx.sni);
+ free_check(&newsrv->agent);
+ free_check(&newsrv->check);
+ LIST_DELETE(&newsrv->global_list);
+ }
+ free(newsrv);
+ return i - srv->tmpl_info.nb_low;
+}
+
+/* Ensure server config will work with effective proxy mode
+ *
+ * This function is expected to be called after _srv_parse_init() initialization
+ * but only when the effective server's proxy mode is known, which is not always
+ * the case during parsing time, in which case the function will be called during
+ * postparsing thanks to the _srv_postparse() below.
+ *
+ * Returns ERR_NONE on success else a combination or ERR_CODE.
+ */
+static int _srv_check_proxy_mode(struct server *srv, char postparse)
+{
+ int err_code = ERR_NONE;
+
+ if (postparse && !(srv->proxy->cap & PR_CAP_LB))
+ return ERR_NONE; /* nothing to do, the check was already performed during parsing */
+
+ if (srv->conf.file)
+ set_usermsgs_ctx(srv->conf.file, srv->conf.line, NULL);
+
+ if (!srv->proxy) {
+ /* proxy mode not known, cannot perform checks (ie: defaults section) */
+ goto out;
+ }
+
+ if (srv->proxy->mode == PR_MODE_SYSLOG) {
+ /* log backend server (belongs to proxy with mode log enabled):
+ * perform some compatibility checks
+ */
+
+ /* supported address family types are:
+ * - ipv4
+ * - ipv6
+ * (UNSPEC is supported because it means it will be resolved later)
+ */
+ if (srv->addr.ss_family != AF_UNSPEC &&
+ srv->addr.ss_family != AF_INET && srv->addr.ss_family != AF_INET6) {
+ ha_alert("log server address family not supported for log backend server.\n");
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+
+ /* only @tcp or @udp address forms (or equivalent) are supported */
+ if (!(srv->addr_type.xprt_type == PROTO_TYPE_DGRAM && srv->addr_type.proto_type == PROTO_TYPE_DGRAM) &&
+ !(srv->addr_type.xprt_type == PROTO_TYPE_STREAM && srv->addr_type.proto_type == PROTO_TYPE_STREAM)) {
+ ha_alert("log server address type not supported for log backend server.\n");
+ err_code |= ERR_ALERT | ERR_FATAL;
+ }
+ }
+ else {
+ /* for all other proxy modes: only TCP expected as srv's transport type for now */
+ if (srv->addr_type.xprt_type != PROTO_TYPE_STREAM) {
+ ha_alert("unsupported transport for server address in '%s' backend.\n", proxy_mode_str(srv->proxy->mode));
+ err_code |= ERR_ALERT | ERR_FATAL;
+ }
+ }
+ out:
+ if (srv->conf.file)
+ reset_usermsgs_ctx();
+
+ return err_code;
+}
+
+/* Perform some server postparsing checks / tasks:
+ * We must be careful that checks / postinits performed within this function
+ * don't depend or conflict with other postcheck functions that are registered
+ * using REGISTER_POST_SERVER_CHECK() hook.
+ *
+ * Returns ERR_NONE on success else a combination or ERR_CODE.
+ */
+static int _srv_postparse(struct server *srv)
+{
+ int err_code = ERR_NONE;
+
+ err_code |= _srv_check_proxy_mode(srv, 1);
+
+ return err_code;
+}
+REGISTER_POST_SERVER_CHECK(_srv_postparse);
+
+/* Allocate a new server pointed by <srv> and try to parse the first arguments
+ * in <args> as an address for a server or an address-range for a template or
+ * nothing for a default-server. <cur_arg> is incremented to the next argument.
+ *
+ * This function is first intended to be used through parse_server to
+ * initialize a new server on startup.
+ *
+ * A mask of errors is returned. On a parsing error, ERR_FATAL is set. In case
+ * of memory exhaustion, ERR_ABORT is set. If the server cannot be allocated,
+ * <srv> will be set to NULL.
+ */
+static int _srv_parse_init(struct server **srv, char **args, int *cur_arg,
+ struct proxy *curproxy,
+ int parse_flags)
+{
+ struct server *newsrv = NULL;
+ const char *err = NULL;
+ int err_code = 0;
+ char *fqdn = NULL;
+ int tmpl_range_low = 0, tmpl_range_high = 0;
+ char *errmsg = NULL;
+
+ *srv = NULL;
+
+ /* There is no mandatory first arguments for default server. */
+ if (parse_flags & SRV_PARSE_PARSE_ADDR) {
+ if (parse_flags & SRV_PARSE_TEMPLATE) {
+ if (!*args[3]) {
+ /* 'server-template' line number of argument check. */
+ ha_alert("'%s' expects <prefix> <nb | range> <addr>[:<port>] as arguments.\n",
+ args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+
+ err = invalid_prefix_char(args[1]);
+ }
+ else {
+ if (!*args[2]) {
+ /* 'server' line number of argument check. */
+ ha_alert("'%s' expects <name> and <addr>[:<port>] as arguments.\n",
+ args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+
+ err = invalid_char(args[1]);
+ }
+
+ if (err) {
+ ha_alert("character '%c' is not permitted in %s %s '%s'.\n",
+ *err, args[0], !(parse_flags & SRV_PARSE_TEMPLATE) ? "name" : "prefix", args[1]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ }
+
+ *cur_arg = 2;
+ if (parse_flags & SRV_PARSE_TEMPLATE) {
+ /* Parse server-template <nb | range> arg. */
+ if (_srv_parse_tmpl_range(newsrv, args[*cur_arg], &tmpl_range_low, &tmpl_range_high) < 0) {
+ ha_alert("Wrong %s number or range arg '%s'.\n",
+ args[0], args[*cur_arg]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ (*cur_arg)++;
+ }
+
+ if (!(parse_flags & SRV_PARSE_DEFAULT_SERVER)) {
+ struct sockaddr_storage *sk;
+ int port1, port2, port;
+
+ *srv = newsrv = new_server(curproxy);
+ if (!newsrv) {
+ ha_alert("out of memory.\n");
+ err_code |= ERR_ALERT | ERR_ABORT;
+ goto out;
+ }
+ register_parsing_obj(&newsrv->obj_type);
+
+ if (parse_flags & SRV_PARSE_TEMPLATE) {
+ newsrv->tmpl_info.nb_low = tmpl_range_low;
+ newsrv->tmpl_info.nb_high = tmpl_range_high;
+ }
+
+ if (parse_flags & SRV_PARSE_DYNAMIC)
+ newsrv->flags |= SRV_F_DYNAMIC;
+
+ /* Note: for a server template, its id is its prefix.
+ * This is a temporary id which will be used for server allocations to come
+ * after parsing.
+ */
+ if (!(parse_flags & SRV_PARSE_TEMPLATE))
+ newsrv->id = strdup(args[1]);
+ else
+ newsrv->tmpl_info.prefix = strdup(args[1]);
+
+ /* several ways to check the port component :
+ * - IP => port=+0, relative (IPv4 only)
+ * - IP: => port=+0, relative
+ * - IP:N => port=N, absolute
+ * - IP:+N => port=+N, relative
+ * - IP:-N => port=-N, relative
+ */
+ if (!(parse_flags & SRV_PARSE_PARSE_ADDR))
+ goto skip_addr;
+
+ sk = str2sa_range(args[*cur_arg], &port, &port1, &port2, NULL, NULL, &newsrv->addr_type,
+ &errmsg, NULL, &fqdn,
+ (parse_flags & SRV_PARSE_INITIAL_RESOLVE ? PA_O_RESOLVE : 0) | PA_O_PORT_OK |
+ (parse_flags & SRV_PARSE_IN_PEER_SECTION ? PA_O_PORT_MAND : PA_O_PORT_OFS) |
+ PA_O_STREAM | PA_O_DGRAM | PA_O_XPRT);
+ if (!sk) {
+ ha_alert("%s\n", errmsg);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ ha_free(&errmsg);
+ goto out;
+ }
+
+ if (!port1 || !port2) {
+ if (sk->ss_family != AF_CUST_RHTTP_SRV) {
+ /* no port specified, +offset, -offset */
+ newsrv->flags |= SRV_F_MAPPORTS;
+ }
+ else {
+ newsrv->flags |= SRV_F_RHTTP;
+ }
+ }
+
+ /* save hostname and create associated name resolution */
+ if (fqdn) {
+ if (fqdn[0] == '_') { /* SRV record */
+ /* Check if a SRV request already exists, and if not, create it */
+ if ((newsrv->srvrq = find_srvrq_by_name(fqdn, curproxy)) == NULL)
+ newsrv->srvrq = new_resolv_srvrq(newsrv, fqdn);
+ if (newsrv->srvrq == NULL) {
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ LIST_APPEND(&newsrv->srvrq->attached_servers, &newsrv->srv_rec_item);
+ }
+ else if (srv_prepare_for_resolution(newsrv, fqdn) == -1) {
+ ha_alert("Can't create DNS resolution for server '%s'\n",
+ newsrv->id);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ }
+
+ newsrv->addr = *sk;
+ newsrv->svc_port = port;
+ /*
+ * we don't need to lock the server here, because
+ * we are in the process of initializing.
+ *
+ * Note that the server is not attached into the proxy tree if
+ * this is a dynamic server.
+ */
+ srv_set_addr_desc(newsrv, !(parse_flags & SRV_PARSE_DYNAMIC));
+
+ if (!newsrv->srvrq && !newsrv->hostname &&
+ !protocol_lookup(newsrv->addr.ss_family, PROTO_TYPE_STREAM, 0)) {
+ ha_alert("Unknown protocol family %d '%s'\n",
+ newsrv->addr.ss_family, args[*cur_arg]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+
+ (*cur_arg)++;
+ skip_addr:
+ if (!(parse_flags & SRV_PARSE_DYNAMIC)) {
+ /* Copy default server settings to new server */
+ srv_settings_cpy(newsrv, &curproxy->defsrv, 0);
+ } else {
+ /* Initialize dynamic server weight to 1 */
+ newsrv->uweight = newsrv->iweight = 1;
+
+ /* A dynamic server is disabled on startup */
+ newsrv->next_admin = SRV_ADMF_FMAINT;
+ newsrv->next_state = SRV_ST_STOPPED;
+ server_recalc_eweight(newsrv, 0);
+
+ /* Set default values for checks */
+ newsrv->check.inter = DEF_CHKINTR;
+ newsrv->check.rise = DEF_RISETIME;
+ newsrv->check.fall = DEF_FALLTIME;
+
+ newsrv->agent.inter = DEF_CHKINTR;
+ newsrv->agent.rise = DEF_AGENT_RISETIME;
+ newsrv->agent.fall = DEF_AGENT_FALLTIME;
+ }
+ HA_SPIN_INIT(&newsrv->lock);
+ }
+ else {
+ *srv = newsrv = &curproxy->defsrv;
+ *cur_arg = 1;
+ newsrv->resolv_opts.family_prio = AF_INET6;
+ newsrv->resolv_opts.accept_duplicate_ip = 0;
+ }
+
+ free(fqdn);
+ if (!(curproxy->cap & PR_CAP_LB)) {
+ /* No need to wait for effective proxy mode, it is already known:
+ * Only general purpose user-declared proxies ("listen", "frontend", "backend")
+ * offer the possibility to configure the mode of the proxy. Hopefully for us,
+ * they have the PR_CAP_LB set.
+ */
+ return _srv_check_proxy_mode(newsrv, 0);
+ }
+ return 0;
+
+out:
+ free(fqdn);
+ return err_code;
+}
+
+/* Parse the server keyword in <args>.
+ * <cur_arg> is incremented beyond the keyword optional value. Note that this
+ * might not be the case if an error is reported.
+ *
+ * This function is first intended to be used through parse_server to
+ * initialize a new server on startup.
+ *
+ * A mask of errors is returned. ERR_FATAL is set if the parsing should be
+ * interrupted.
+ */
+static int _srv_parse_kw(struct server *srv, char **args, int *cur_arg,
+ struct proxy *curproxy,
+ int parse_flags)
+{
+ int err_code = 0;
+ struct srv_kw *kw;
+ const char *best;
+ char *errmsg = NULL;
+
+ kw = srv_find_kw(args[*cur_arg]);
+ if (!kw) {
+ best = srv_find_best_kw(args[*cur_arg]);
+ if (best)
+ ha_alert("unknown keyword '%s'; did you mean '%s' maybe ?%s\n",
+ args[*cur_arg], best,
+ (parse_flags & SRV_PARSE_PARSE_ADDR) ? "" :
+ " Hint: no address was expected for this server.");
+ else
+ ha_alert("unknown keyword '%s'.%s\n", args[*cur_arg],
+ (parse_flags & SRV_PARSE_PARSE_ADDR) ? "" :
+ " Hint: no address was expected for this server.");
+
+ return ERR_ALERT | ERR_FATAL;
+ }
+
+ if (!kw->parse) {
+ ha_alert("'%s' option is not implemented in this version (check build options)\n",
+ args[*cur_arg]);
+ err_code = ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+
+ if ((parse_flags & SRV_PARSE_DEFAULT_SERVER) && !kw->default_ok) {
+ ha_alert("'%s' option is not accepted in default-server sections\n",
+ args[*cur_arg]);
+ err_code = ERR_ALERT;
+ goto out;
+ }
+ else if ((parse_flags & SRV_PARSE_DYNAMIC) && !kw->dynamic_ok) {
+ ha_alert("'%s' option is not accepted for dynamic server\n",
+ args[*cur_arg]);
+ err_code |= ERR_ALERT;
+ goto out;
+ }
+
+ err_code = kw->parse(args, cur_arg, curproxy, srv, &errmsg);
+ if (err_code) {
+ display_parser_err(NULL, 0, args, *cur_arg, err_code, &errmsg);
+ free(errmsg);
+ }
+
+out:
+ if (kw->skip != -1)
+ *cur_arg += 1 + kw->skip;
+
+ return err_code;
+}
+
+/* This function is first intended to be used through parse_server to
+ * initialize a new server on startup.
+ */
+static int _srv_parse_sni_expr_init(char **args, int cur_arg,
+ struct server *srv, struct proxy *proxy,
+ char **errmsg)
+{
+ int ret;
+
+ if (!srv->sni_expr)
+ return 0;
+
+ ret = server_parse_sni_expr(srv, proxy, errmsg);
+ if (!ret)
+ return 0;
+
+ return ret;
+}
+
+/* Server initializations finalization.
+ * Initialize health check, agent check, SNI expression and outgoing TLVs if enabled.
+ * Must not be called for a default server instance.
+ *
+ * This function is first intended to be used through parse_server to
+ * initialize a new server on startup.
+ */
+static int _srv_parse_finalize(char **args, int cur_arg,
+ struct server *srv, struct proxy *px,
+ int parse_flags)
+{
+ int ret;
+ char *errmsg = NULL;
+ struct srv_pp_tlv_list *srv_tlv = NULL;
+
+ if (srv->do_check && srv->trackit) {
+ ha_alert("unable to enable checks and tracking at the same time!\n");
+ return ERR_ALERT | ERR_FATAL;
+ }
+
+ if (srv->do_agent && !srv->agent.port) {
+ ha_alert("server %s does not have agent port. Agent check has been disabled.\n",
+ srv->id);
+ return ERR_ALERT | ERR_FATAL;
+ }
+
+ if ((ret = _srv_parse_sni_expr_init(args, cur_arg, srv, px, &errmsg)) != 0) {
+ if (errmsg) {
+ ha_alert("%s\n", errmsg);
+ free(errmsg);
+ }
+ return ret;
+ }
+
+ /* A dynamic server is disabled on startup. It must not be counted as
+ * an active backend entry.
+ */
+ if (!(parse_flags & SRV_PARSE_DYNAMIC)) {
+ if (srv->flags & SRV_F_BACKUP)
+ px->srv_bck++;
+ else
+ px->srv_act++;
+ }
+
+ list_for_each_entry(srv_tlv, &srv->pp_tlvs, list) {
+ LIST_INIT(&srv_tlv->fmt);
+ if (srv_tlv->fmt_string && unlikely(!parse_logformat_string(srv_tlv->fmt_string,
+ srv->proxy, &srv_tlv->fmt, 0, SMP_VAL_BE_SRV_CON, &errmsg))) {
+ if (errmsg) {
+ ha_alert("%s\n", errmsg);
+ free(errmsg);
+ }
+ return ERR_ALERT | ERR_FATAL;
+ }
+ }
+
+ srv_lb_commit_status(srv);
+
+ return 0;
+}
+
+int parse_server(const char *file, int linenum, char **args,
+ struct proxy *curproxy, const struct proxy *defproxy,
+ int parse_flags)
+{
+ struct server *newsrv = NULL;
+ int err_code = 0;
+
+ int cur_arg;
+
+ set_usermsgs_ctx(file, linenum, NULL);
+
+ if (!(parse_flags & SRV_PARSE_DEFAULT_SERVER) && curproxy == defproxy) {
+ ha_alert("'%s' not allowed in 'defaults' section.\n", args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ else if (failifnotcap(curproxy, PR_CAP_BE, file, linenum, args[0], NULL)) {
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+
+ if ((parse_flags & (SRV_PARSE_IN_PEER_SECTION|SRV_PARSE_PARSE_ADDR)) ==
+ (SRV_PARSE_IN_PEER_SECTION|SRV_PARSE_PARSE_ADDR)) {
+ if (!*args[2])
+ return 0;
+ }
+
+ err_code = _srv_parse_init(&newsrv, args, &cur_arg, curproxy,
+ parse_flags);
+
+ /* the servers are linked backwards first */
+ if (newsrv && !(parse_flags & SRV_PARSE_DEFAULT_SERVER)) {
+ newsrv->next = curproxy->srv;
+ curproxy->srv = newsrv;
+ }
+
+ if (err_code & ERR_CODE)
+ goto out;
+
+ if (!newsrv->conf.file) // note: do it only once for default-server
+ newsrv->conf.file = strdup(file);
+ newsrv->conf.line = linenum;
+
+ while (*args[cur_arg]) {
+ err_code = _srv_parse_kw(newsrv, args, &cur_arg, curproxy,
+ parse_flags);
+ if (err_code & ERR_FATAL)
+ goto out;
+ }
+
+ if (!(parse_flags & SRV_PARSE_DEFAULT_SERVER)) {
+ err_code |= _srv_parse_finalize(args, cur_arg, newsrv, curproxy, parse_flags);
+ if (err_code & ERR_FATAL)
+ goto out;
+ }
+
+ if (parse_flags & SRV_PARSE_TEMPLATE)
+ _srv_parse_tmpl_init(newsrv, curproxy);
+
+ /* If the server id is fixed, insert it in the proxy used_id tree.
+ * This is needed to detect a later duplicate id via srv_parse_id.
+ *
+ * If no is specified, a dynamic one is generated in
+ * check_config_validity.
+ */
+ if (newsrv->flags & SRV_F_FORCED_ID)
+ eb32_insert(&curproxy->conf.used_server_id, &newsrv->conf.id);
+
+ HA_DIAG_WARNING_COND((curproxy->cap & PR_CAP_LB) && !newsrv->uweight,
+ "configured with weight of 0 will never be selected by load balancing algorithms\n");
+
+ reset_usermsgs_ctx();
+ return 0;
+
+ out:
+ reset_usermsgs_ctx();
+ return err_code;
+}
+
+/* Returns a pointer to the first server matching either id <id>.
+ * NULL is returned if no match is found.
+ * the lookup is performed in the backend <bk>
+ */
+struct server *server_find_by_id(struct proxy *bk, int id)
+{
+ struct eb32_node *eb32;
+ struct server *curserver;
+
+ if (!bk || (id ==0))
+ return NULL;
+
+ /* <bk> has no backend capabilities, so it can't have a server */
+ if (!(bk->cap & PR_CAP_BE))
+ return NULL;
+
+ curserver = NULL;
+
+ eb32 = eb32_lookup(&bk->conf.used_server_id, id);
+ if (eb32)
+ curserver = container_of(eb32, struct server, conf.id);
+
+ return curserver;
+}
+
+/* Returns a pointer to the first server matching either name <name>, or id
+ * if <name> starts with a '#'. NULL is returned if no match is found.
+ * the lookup is performed in the backend <bk>
+ */
+struct server *server_find_by_name(struct proxy *bk, const char *name)
+{
+ struct server *curserver;
+
+ if (!bk || !name)
+ return NULL;
+
+ /* <bk> has no backend capabilities, so it can't have a server */
+ if (!(bk->cap & PR_CAP_BE))
+ return NULL;
+
+ curserver = NULL;
+ if (*name == '#') {
+ curserver = server_find_by_id(bk, atoi(name + 1));
+ if (curserver)
+ return curserver;
+ }
+ else {
+ curserver = bk->srv;
+
+ while (curserver && (strcmp(curserver->id, name) != 0))
+ curserver = curserver->next;
+
+ if (curserver)
+ return curserver;
+ }
+
+ return NULL;
+}
+
+struct server *server_find_best_match(struct proxy *bk, char *name, int id, int *diff)
+{
+ struct server *byname;
+ struct server *byid;
+
+ if (!name && !id)
+ return NULL;
+
+ if (diff)
+ *diff = 0;
+
+ byname = byid = NULL;
+
+ if (name) {
+ byname = server_find_by_name(bk, name);
+ if (byname && (!id || byname->puid == id))
+ return byname;
+ }
+
+ /* remaining possibilities :
+ * - name not set
+ * - name set but not found
+ * - name found but ID doesn't match
+ */
+ if (id) {
+ byid = server_find_by_id(bk, id);
+ if (byid) {
+ if (byname) {
+ /* use id only if forced by configuration */
+ if (byid->flags & SRV_F_FORCED_ID) {
+ if (diff)
+ *diff |= 2;
+ return byid;
+ }
+ else {
+ if (diff)
+ *diff |= 1;
+ return byname;
+ }
+ }
+
+ /* remaining possibilities:
+ * - name not set
+ * - name set but not found
+ */
+ if (name && diff)
+ *diff |= 2;
+ return byid;
+ }
+
+ /* id bot found */
+ if (byname) {
+ if (diff)
+ *diff |= 1;
+ return byname;
+ }
+ }
+
+ return NULL;
+}
+
+/*
+ * update a server's current IP address.
+ * ip is a pointer to the new IP address, whose address family is ip_sin_family.
+ * ip is in network format.
+ * updater is a string which contains an information about the requester of the update.
+ * updater is used if not NULL.
+ *
+ * A log line and a stderr warning message is generated based on server's backend options.
+ *
+ * Must be called with the server lock held.
+ */
+int srv_update_addr(struct server *s, void *ip, int ip_sin_family, const char *updater)
+{
+ union {
+ struct event_hdl_cb_data_server_inetaddr addr;
+ struct event_hdl_cb_data_server common;
+ } cb_data;
+ struct sockaddr_storage new_addr = { }; // shut up gcc warning
+
+ /* save the new IP family & address if necessary */
+ switch (ip_sin_family) {
+ case AF_INET:
+ if (s->addr.ss_family == ip_sin_family &&
+ !memcmp(ip, &((struct sockaddr_in *)&s->addr)->sin_addr.s_addr, 4))
+ return 0;
+ break;
+ case AF_INET6:
+ if (s->addr.ss_family == ip_sin_family &&
+ !memcmp(ip, &((struct sockaddr_in6 *)&s->addr)->sin6_addr.s6_addr, 16))
+ return 0;
+ break;
+ };
+
+ /* generates a log line and a warning on stderr */
+ if (1) {
+ /* book enough space for both IPv4 and IPv6 */
+ char oldip[INET6_ADDRSTRLEN];
+ char newip[INET6_ADDRSTRLEN];
+
+ memset(oldip, '\0', INET6_ADDRSTRLEN);
+ memset(newip, '\0', INET6_ADDRSTRLEN);
+
+ /* copy old IP address in a string */
+ switch (s->addr.ss_family) {
+ case AF_INET:
+ inet_ntop(s->addr.ss_family, &((struct sockaddr_in *)&s->addr)->sin_addr, oldip, INET_ADDRSTRLEN);
+ break;
+ case AF_INET6:
+ inet_ntop(s->addr.ss_family, &((struct sockaddr_in6 *)&s->addr)->sin6_addr, oldip, INET6_ADDRSTRLEN);
+ break;
+ default:
+ strlcpy2(oldip, "(none)", sizeof(oldip));
+ break;
+ };
+
+ /* copy new IP address in a string */
+ switch (ip_sin_family) {
+ case AF_INET:
+ inet_ntop(ip_sin_family, ip, newip, INET_ADDRSTRLEN);
+ break;
+ case AF_INET6:
+ inet_ntop(ip_sin_family, ip, newip, INET6_ADDRSTRLEN);
+ break;
+ };
+
+ /* save log line into a buffer */
+ chunk_printf(&trash, "%s/%s changed its IP from %s to %s by %s",
+ s->proxy->id, s->id, oldip, newip, updater);
+
+ /* write the buffer on stderr */
+ ha_warning("%s.\n", trash.area);
+
+ /* send a log */
+ send_log(s->proxy, LOG_NOTICE, "%s.\n", trash.area);
+ }
+
+ /* save the new IP family */
+ new_addr.ss_family = ip_sin_family;
+ /* save the new IP address */
+ switch (ip_sin_family) {
+ case AF_INET:
+ memcpy(&((struct sockaddr_in *)&new_addr)->sin_addr.s_addr, ip, 4);
+ break;
+ case AF_INET6:
+ memcpy(((struct sockaddr_in6 *)&new_addr)->sin6_addr.s6_addr, ip, 16);
+ break;
+ };
+
+ _srv_event_hdl_prepare(&cb_data.common, s, 0);
+ _srv_event_hdl_prepare_inetaddr(&cb_data.addr, s,
+ &new_addr, s->svc_port, !!(s->flags & SRV_F_MAPPORTS),
+ 0);
+
+ /* server_atomic_sync_task will apply the changes for us */
+ _srv_event_hdl_publish(EVENT_HDL_SUB_SERVER_INETADDR, cb_data, s);
+
+ return 0;
+}
+
+/* update agent health check address and port
+ * addr can be ip4/ip6 or a hostname
+ * if one error occurs, don't apply anything
+ * must be called with the server lock held.
+ */
+const char *srv_update_agent_addr_port(struct server *s, const char *addr, const char *port)
+{
+ struct sockaddr_storage sk;
+ struct buffer *msg;
+ int new_port;
+
+ msg = get_trash_chunk();
+ chunk_reset(msg);
+
+ if (!(s->agent.state & CHK_ST_ENABLED)) {
+ chunk_strcat(msg, "agent checks are not enabled on this server");
+ goto out;
+ }
+ if (addr) {
+ memset(&sk, 0, sizeof(struct sockaddr_storage));
+ if (str2ip(addr, &sk) == NULL) {
+ chunk_appendf(msg, "invalid addr '%s'", addr);
+ goto out;
+ }
+ }
+ if (port) {
+ if (strl2irc(port, strlen(port), &new_port) != 0) {
+ chunk_appendf(msg, "provided port is not an integer");
+ goto out;
+ }
+ if (new_port < 0 || new_port > 65535) {
+ chunk_appendf(msg, "provided port is invalid");
+ goto out;
+ }
+ }
+out:
+ if (msg->data)
+ return msg->area;
+ else {
+ if (addr)
+ set_srv_agent_addr(s, &sk);
+ if (port)
+ set_srv_agent_port(s, new_port);
+ }
+ return NULL;
+}
+
+/* update server health check address and port
+ * addr must be ip4 or ip6, it won't be resolved
+ * if one error occurs, don't apply anything
+ * must be called with the server lock held.
+ */
+const char *srv_update_check_addr_port(struct server *s, const char *addr, const char *port)
+{
+ struct sockaddr_storage sk;
+ struct buffer *msg;
+ int new_port;
+
+ msg = get_trash_chunk();
+ chunk_reset(msg);
+
+ if (!(s->check.state & CHK_ST_ENABLED)) {
+ chunk_strcat(msg, "health checks are not enabled on this server");
+ goto out;
+ }
+ if (addr) {
+ memset(&sk, 0, sizeof(struct sockaddr_storage));
+ if (str2ip2(addr, &sk, 0) == NULL) {
+ chunk_appendf(msg, "invalid addr '%s'", addr);
+ goto out;
+ }
+ }
+ if (port) {
+ if (strl2irc(port, strlen(port), &new_port) != 0) {
+ chunk_appendf(msg, "provided port is not an integer");
+ goto out;
+ }
+ if (new_port < 0 || new_port > 65535) {
+ chunk_appendf(msg, "provided port is invalid");
+ goto out;
+ }
+ /* prevent the update of port to 0 if MAPPORTS are in use */
+ if ((s->flags & SRV_F_MAPPORTS) && new_port == 0) {
+ chunk_appendf(msg, "can't unset 'port' since MAPPORTS is in use");
+ goto out;
+ }
+ }
+out:
+ if (msg->data)
+ return msg->area;
+ else {
+ if (addr)
+ s->check.addr = sk;
+ if (port)
+ s->check.port = new_port;
+ }
+ return NULL;
+}
+
+/*
+ * This function update a server's addr and port only for AF_INET and AF_INET6 families.
+ *
+ * Caller can pass its name through <updater> to get it integrated in the response message
+ * returned by the function.
+ *
+ * The function first does the following, in that order:
+ * - validates the new addr and/or port
+ * - checks if an update is required (new IP or port is different than current ones)
+ * - checks the update is allowed:
+ * - don't switch from/to a family other than AF_INET4 and AF_INET6
+ * - allow all changes if no CHECKS are configured
+ * - if CHECK is configured:
+ * - if switch to port map (SRV_F_MAPPORTS), ensure health check have their own ports
+ * - applies required changes to both ADDR and PORT if both 'required' and 'allowed'
+ * conditions are met
+ *
+ * Must be called with the server lock held.
+ */
+const char *srv_update_addr_port(struct server *s, const char *addr, const char *port, char *updater)
+{
+ union {
+ struct event_hdl_cb_data_server_inetaddr addr;
+ struct event_hdl_cb_data_server common;
+ } cb_data;
+ struct sockaddr_storage sa;
+ int ret;
+ char current_addr[INET6_ADDRSTRLEN];
+ uint16_t current_port, new_port = 0;
+ struct buffer *msg;
+ int ip_change = 0;
+ int port_change = 0;
+ uint8_t mapports = !!(s->flags & SRV_F_MAPPORTS);
+
+ msg = get_trash_chunk();
+ chunk_reset(msg);
+
+ if (addr) {
+ memset(&sa, 0, sizeof(struct sockaddr_storage));
+ if (str2ip2(addr, &sa, 0) == NULL) {
+ chunk_printf(msg, "Invalid addr '%s'", addr);
+ goto out;
+ }
+
+ /* changes are allowed on AF_INET* families only */
+ if ((sa.ss_family != AF_INET) && (sa.ss_family != AF_INET6)) {
+ chunk_printf(msg, "Update to families other than AF_INET and AF_INET6 supported only through configuration file");
+ goto out;
+ }
+
+ /* collecting data currently setup */
+ memset(current_addr, '\0', sizeof(current_addr));
+ ret = addr_to_str(&s->addr, current_addr, sizeof(current_addr));
+ /* changes are allowed on AF_INET* families only */
+ if ((ret != AF_INET) && (ret != AF_INET6)) {
+ chunk_printf(msg, "Update for the current server address family is only supported through configuration file");
+ goto out;
+ }
+
+ /* applying ADDR changes if required and allowed
+ * ipcmp returns 0 when both ADDR are the same
+ */
+ if (ipcmp(&s->addr, &sa, 0) == 0) {
+ chunk_appendf(msg, "no need to change the addr");
+ goto port;
+ }
+ ip_change = 1;
+
+ /* update report for caller */
+ chunk_printf(msg, "IP changed from '%s' to '%s'", current_addr, addr);
+ }
+
+ port:
+ if (port) {
+ char sign = '\0';
+ char *endptr;
+
+ if (addr)
+ chunk_appendf(msg, ", ");
+
+ /* collecting data currently setup */
+ current_port = s->svc_port;
+
+ sign = *port;
+ errno = 0;
+ new_port = strtol(port, &endptr, 10);
+ if ((errno != 0) || (port == endptr)) {
+ chunk_appendf(msg, "problem converting port '%s' to an int", port);
+ goto out;
+ }
+
+ /* check if caller triggers a port mapped or offset */
+ if (sign == '-' || (sign == '+')) {
+ /* check if server currently uses port map */
+ if (!(s->flags & SRV_F_MAPPORTS)) {
+ /* check is configured
+ * we're switching from a fixed port to a SRV_F_MAPPORTS (mapped) port
+ * prevent PORT change if check doesn't have it's dedicated port while switching
+ * to port mapping */
+ if (!s->check.port) {
+ chunk_appendf(msg, "can't change <port> to port map because it is incompatible with current health check port configuration (use 'port' statement from the 'server' directive.");
+ goto out;
+ }
+ /* switch from fixed port to port map mandatorily triggers
+ * a port change */
+ port_change = 1;
+ }
+ /* we're already using port maps */
+ else {
+ port_change = current_port != new_port;
+ }
+ }
+ /* fixed port */
+ else {
+ port_change = current_port != new_port;
+ }
+
+ /* applying PORT changes if required and update response message */
+ if (port_change) {
+ uint16_t new_port_print = new_port;
+
+ /* prepare message */
+ chunk_appendf(msg, "port changed from '");
+ if (s->flags & SRV_F_MAPPORTS)
+ chunk_appendf(msg, "+");
+ chunk_appendf(msg, "%d' to '", current_port);
+
+ if (sign == '-') {
+ mapports = 1;
+ chunk_appendf(msg, "%c", sign);
+ /* just use for result output */
+ new_port_print = -new_port_print;
+ }
+ else if (sign == '+') {
+ mapports = 1;
+ chunk_appendf(msg, "%c", sign);
+ }
+ else {
+ mapports = 0;
+ }
+
+ chunk_appendf(msg, "%d'", new_port_print);
+ }
+ else {
+ chunk_appendf(msg, "no need to change the port");
+ }
+ }
+
+out:
+ if (ip_change || port_change) {
+ _srv_event_hdl_prepare(&cb_data.common, s, 0);
+ _srv_event_hdl_prepare_inetaddr(&cb_data.addr, s,
+ ((ip_change) ? &sa : &s->addr),
+ ((port_change) ? new_port : s->svc_port), mapports,
+ 1);
+
+ /* server_atomic_sync_task will apply the changes for us */
+ _srv_event_hdl_publish(EVENT_HDL_SUB_SERVER_INETADDR, cb_data, s);
+ }
+ if (updater)
+ chunk_appendf(msg, " by '%s'", updater);
+ chunk_appendf(msg, "\n");
+ return msg->area;
+}
+
+/*
+ * update server status based on result of SRV resolution
+ * returns:
+ * 0 if server status is updated
+ * 1 if server status has not changed
+ *
+ * Must be called with the server lock held.
+ */
+int srvrq_update_srv_status(struct server *s, int has_no_ip)
+{
+ if (!s->srvrq)
+ return 1;
+
+ /* since this server has an IP, it can go back in production */
+ if (has_no_ip == 0) {
+ srv_clr_admin_flag(s, SRV_ADMF_RMAINT);
+ return 1;
+ }
+
+ if (s->next_admin & SRV_ADMF_RMAINT)
+ return 1;
+
+ srv_set_admin_flag(s, SRV_ADMF_RMAINT, SRV_ADM_STCHGC_DNS_NOENT);
+ return 0;
+}
+
+/*
+ * update server status based on result of name resolution
+ * returns:
+ * 0 if server status is updated
+ * 1 if server status has not changed
+ *
+ * Must be called with the server lock held.
+ */
+int snr_update_srv_status(struct server *s, int has_no_ip)
+{
+ struct resolvers *resolvers = s->resolvers;
+ struct resolv_resolution *resolution = (s->resolv_requester ? s->resolv_requester->resolution : NULL);
+ int exp;
+
+ /* If resolution is NULL we're dealing with SRV records Additional records */
+ if (resolution == NULL)
+ return srvrq_update_srv_status(s, has_no_ip);
+
+ switch (resolution->status) {
+ case RSLV_STATUS_NONE:
+ /* status when HAProxy has just (re)started.
+ * Nothing to do, since the task is already automatically started */
+ break;
+
+ case RSLV_STATUS_VALID:
+ /*
+ * resume health checks
+ * server will be turned back on if health check is safe
+ */
+ if (has_no_ip) {
+ if (s->next_admin & SRV_ADMF_RMAINT)
+ return 1;
+ srv_set_admin_flag(s, SRV_ADMF_RMAINT, SRV_ADM_STCHGC_DNS_NOIP);
+ return 0;
+ }
+
+ if (!(s->next_admin & SRV_ADMF_RMAINT))
+ return 1;
+ srv_clr_admin_flag(s, SRV_ADMF_RMAINT);
+ chunk_printf(&trash, "Server %s/%s administratively READY thanks to valid DNS answer",
+ s->proxy->id, s->id);
+
+ ha_warning("%s.\n", trash.area);
+ send_log(s->proxy, LOG_NOTICE, "%s.\n", trash.area);
+ return 0;
+
+ case RSLV_STATUS_NX:
+ /* stop server if resolution is NX for a long enough period */
+ exp = tick_add(resolution->last_valid, resolvers->hold.nx);
+ if (!tick_is_expired(exp, now_ms))
+ break;
+
+ if (s->next_admin & SRV_ADMF_RMAINT)
+ return 1;
+ srv_set_admin_flag(s, SRV_ADMF_RMAINT, SRV_ADM_STCHGC_DNS_NX);
+ return 0;
+
+ case RSLV_STATUS_TIMEOUT:
+ /* stop server if resolution is TIMEOUT for a long enough period */
+ exp = tick_add(resolution->last_valid, resolvers->hold.timeout);
+ if (!tick_is_expired(exp, now_ms))
+ break;
+
+ if (s->next_admin & SRV_ADMF_RMAINT)
+ return 1;
+ srv_set_admin_flag(s, SRV_ADMF_RMAINT, SRV_ADM_STCHGC_DNS_TIMEOUT);
+ return 0;
+
+ case RSLV_STATUS_REFUSED:
+ /* stop server if resolution is REFUSED for a long enough period */
+ exp = tick_add(resolution->last_valid, resolvers->hold.refused);
+ if (!tick_is_expired(exp, now_ms))
+ break;
+
+ if (s->next_admin & SRV_ADMF_RMAINT)
+ return 1;
+ srv_set_admin_flag(s, SRV_ADMF_RMAINT, SRV_ADM_STCHGC_DNS_REFUSED);
+ return 0;
+
+ default:
+ /* stop server if resolution failed for a long enough period */
+ exp = tick_add(resolution->last_valid, resolvers->hold.other);
+ if (!tick_is_expired(exp, now_ms))
+ break;
+
+ if (s->next_admin & SRV_ADMF_RMAINT)
+ return 1;
+ srv_set_admin_flag(s, SRV_ADMF_RMAINT, SRV_ADM_STCHGC_DNS_UNSPEC);
+ return 0;
+ }
+
+ return 1;
+}
+
+/*
+ * Server Name Resolution valid response callback
+ * It expects:
+ * - <nameserver>: the name server which answered the valid response
+ * - <response>: buffer containing a valid DNS response
+ * - <response_len>: size of <response>
+ * It performs the following actions:
+ * - ignore response if current ip found and server family not met
+ * - update with first new ip found if family is met and current IP is not found
+ * returns:
+ * 0 on error
+ * 1 when no error or safe ignore
+ *
+ * Must be called with server lock held
+ */
+int snr_resolution_cb(struct resolv_requester *requester, struct dns_counters *counters)
+{
+ struct server *s = NULL;
+ struct resolv_resolution *resolution = NULL;
+ void *serverip, *firstip;
+ short server_sin_family, firstip_sin_family;
+ int ret;
+ struct buffer *chk = get_trash_chunk();
+ int has_no_ip = 0;
+
+ s = objt_server(requester->owner);
+ if (!s)
+ return 1;
+
+ if (s->srvrq) {
+ /* If DNS resolution is disabled ignore it.
+ * This is the case if the server was associated to
+ * a SRV record and this record is now expired.
+ */
+ if (s->flags & SRV_F_NO_RESOLUTION)
+ return 1;
+ }
+
+ resolution = (s->resolv_requester ? s->resolv_requester->resolution : NULL);
+ if (!resolution)
+ return 1;
+
+ /* initializing variables */
+ firstip = NULL; /* pointer to the first valid response found */
+ /* it will be used as the new IP if a change is required */
+ firstip_sin_family = AF_UNSPEC;
+ serverip = NULL; /* current server IP address */
+
+ /* initializing server IP pointer */
+ server_sin_family = s->addr.ss_family;
+ switch (server_sin_family) {
+ case AF_INET:
+ serverip = &((struct sockaddr_in *)&s->addr)->sin_addr.s_addr;
+ break;
+
+ case AF_INET6:
+ serverip = &((struct sockaddr_in6 *)&s->addr)->sin6_addr.s6_addr;
+ break;
+
+ case AF_UNSPEC:
+ break;
+
+ default:
+ goto invalid;
+ }
+
+ ret = resolv_get_ip_from_response(&resolution->response, &s->resolv_opts,
+ serverip, server_sin_family, &firstip,
+ &firstip_sin_family, s);
+
+ switch (ret) {
+ case RSLV_UPD_NO:
+ goto update_status;
+
+ case RSLV_UPD_SRVIP_NOT_FOUND:
+ goto save_ip;
+
+ case RSLV_UPD_NO_IP_FOUND:
+ has_no_ip = 1;
+ goto update_status;
+
+ case RSLV_UPD_NAME_ERROR:
+ /* update resolution status to OTHER error type */
+ resolution->status = RSLV_STATUS_OTHER;
+ has_no_ip = 1;
+ goto update_status;
+
+ default:
+ has_no_ip = 1;
+ goto invalid;
+
+ }
+
+ save_ip:
+ if (counters) {
+ counters->app.resolver.update++;
+ /* save the first ip we found */
+ chunk_printf(chk, "%s/%s", counters->pid, counters->id);
+ }
+ else
+ chunk_printf(chk, "DNS cache");
+ srv_update_addr(s, firstip, firstip_sin_family, (char *) chk->area);
+
+ update_status:
+ if (!snr_update_srv_status(s, has_no_ip) && has_no_ip)
+ memset(&s->addr, 0, sizeof(s->addr));
+ return 1;
+
+ invalid:
+ if (counters) {
+ counters->app.resolver.invalid++;
+ goto update_status;
+ }
+ if (!snr_update_srv_status(s, has_no_ip) && has_no_ip)
+ memset(&s->addr, 0, sizeof(s->addr));
+ return 0;
+}
+
+/*
+ * SRV record error management callback
+ * returns:
+ * 0 if we can trash answser items.
+ * 1 when safely ignored and we must kept answer items
+ *
+ * Grabs the server's lock.
+ */
+int srvrq_resolution_error_cb(struct resolv_requester *requester, int error_code)
+{
+ struct resolv_srvrq *srvrq;
+ struct resolv_resolution *res;
+ struct resolvers *resolvers;
+ int exp;
+
+ /* SRV records */
+ srvrq = objt_resolv_srvrq(requester->owner);
+ if (!srvrq)
+ return 0;
+
+ resolvers = srvrq->resolvers;
+ res = requester->resolution;
+
+ switch (res->status) {
+
+ case RSLV_STATUS_NX:
+ /* stop server if resolution is NX for a long enough period */
+ exp = tick_add(res->last_valid, resolvers->hold.nx);
+ if (!tick_is_expired(exp, now_ms))
+ return 1;
+ break;
+
+ case RSLV_STATUS_TIMEOUT:
+ /* stop server if resolution is TIMEOUT for a long enough period */
+ exp = tick_add(res->last_valid, resolvers->hold.timeout);
+ if (!tick_is_expired(exp, now_ms))
+ return 1;
+ break;
+
+ case RSLV_STATUS_REFUSED:
+ /* stop server if resolution is REFUSED for a long enough period */
+ exp = tick_add(res->last_valid, resolvers->hold.refused);
+ if (!tick_is_expired(exp, now_ms))
+ return 1;
+ break;
+
+ default:
+ /* stop server if resolution failed for a long enough period */
+ exp = tick_add(res->last_valid, resolvers->hold.other);
+ if (!tick_is_expired(exp, now_ms))
+ return 1;
+ }
+
+ /* Remove any associated server ref */
+ resolv_detach_from_resolution_answer_items(res, requester);
+
+ return 0;
+}
+
+/*
+ * Server Name Resolution error management callback
+ * returns:
+ * 0 if we can trash answser items.
+ * 1 when safely ignored and we must kept answer items
+ *
+ * Grabs the server's lock.
+ */
+int snr_resolution_error_cb(struct resolv_requester *requester, int error_code)
+{
+ struct server *s;
+
+ s = objt_server(requester->owner);
+ if (!s)
+ return 0;
+
+ HA_SPIN_LOCK(SERVER_LOCK, &s->lock);
+ if (!snr_update_srv_status(s, 1)) {
+ memset(&s->addr, 0, sizeof(s->addr));
+ HA_SPIN_UNLOCK(SERVER_LOCK, &s->lock);
+ resolv_detach_from_resolution_answer_items(requester->resolution, requester);
+ return 0;
+ }
+ HA_SPIN_UNLOCK(SERVER_LOCK, &s->lock);
+
+ return 1;
+}
+
+/*
+ * Function to check if <ip> is already affected to a server in the backend
+ * which owns <srv> and is up.
+ * It returns a pointer to the first server found or NULL if <ip> is not yet
+ * assigned.
+ *
+ * Must be called with server lock held
+ */
+struct server *snr_check_ip_callback(struct server *srv, void *ip, unsigned char *ip_family)
+{
+ struct server *tmpsrv;
+ struct proxy *be;
+
+ if (!srv)
+ return NULL;
+
+ be = srv->proxy;
+ for (tmpsrv = be->srv; tmpsrv; tmpsrv = tmpsrv->next) {
+ /* we found the current server is the same, ignore it */
+ if (srv == tmpsrv)
+ continue;
+
+ /* We want to compare the IP in the record with the IP of the servers in the
+ * same backend, only if:
+ * * DNS resolution is enabled on the server
+ * * the hostname used for the resolution by our server is the same than the
+ * one used for the server found in the backend
+ * * the server found in the backend is not our current server
+ */
+ HA_SPIN_LOCK(SERVER_LOCK, &tmpsrv->lock);
+ if ((tmpsrv->hostname_dn == NULL) ||
+ (srv->hostname_dn_len != tmpsrv->hostname_dn_len) ||
+ (strcasecmp(srv->hostname_dn, tmpsrv->hostname_dn) != 0) ||
+ (srv->puid == tmpsrv->puid)) {
+ HA_SPIN_UNLOCK(SERVER_LOCK, &tmpsrv->lock);
+ continue;
+ }
+
+ /* If the server has been taken down, don't consider it */
+ if (tmpsrv->next_admin & SRV_ADMF_RMAINT) {
+ HA_SPIN_UNLOCK(SERVER_LOCK, &tmpsrv->lock);
+ continue;
+ }
+
+ /* At this point, we have 2 different servers using the same DNS hostname
+ * for their respective resolution.
+ */
+ if (*ip_family == tmpsrv->addr.ss_family &&
+ ((tmpsrv->addr.ss_family == AF_INET &&
+ memcmp(ip, &((struct sockaddr_in *)&tmpsrv->addr)->sin_addr, 4) == 0) ||
+ (tmpsrv->addr.ss_family == AF_INET6 &&
+ memcmp(ip, &((struct sockaddr_in6 *)&tmpsrv->addr)->sin6_addr, 16) == 0))) {
+ HA_SPIN_UNLOCK(SERVER_LOCK, &tmpsrv->lock);
+ return tmpsrv;
+ }
+ HA_SPIN_UNLOCK(SERVER_LOCK, &tmpsrv->lock);
+ }
+
+
+ return NULL;
+}
+
+/* Sets the server's address (srv->addr) from srv->hostname using the libc's
+ * resolver. This is suited for initial address configuration. Returns 0 on
+ * success otherwise a non-zero error code. In case of error, *err_code, if
+ * not NULL, is filled up.
+ */
+int srv_set_addr_via_libc(struct server *srv, int *err_code)
+{
+ struct sockaddr_storage new_addr;
+
+ memset(&new_addr, 0, sizeof(new_addr));
+
+ /* Use the preferred family, if configured */
+ new_addr.ss_family = srv->addr.ss_family;
+ if (str2ip2(srv->hostname, &new_addr, 1) == NULL) {
+ if (err_code)
+ *err_code |= ERR_WARN;
+ return 1;
+ }
+ _srv_set_inetaddr(srv, &new_addr);
+ return 0;
+}
+
+/* Set the server's FDQN (->hostname) from <hostname>.
+ * Returns -1 if failed, 0 if not.
+ *
+ * Must be called with the server lock held.
+ */
+int srv_set_fqdn(struct server *srv, const char *hostname, int resolv_locked)
+{
+ struct resolv_resolution *resolution;
+ char *hostname_dn;
+ int hostname_len, hostname_dn_len;
+
+ /* Note that the server lock is already held. */
+ if (!srv->resolvers)
+ return -1;
+
+ if (!resolv_locked)
+ HA_SPIN_LOCK(DNS_LOCK, &srv->resolvers->lock);
+ /* run time DNS/SRV resolution was not active for this server
+ * and we can't enable it at run time for now.
+ */
+ if (!srv->resolv_requester && !srv->srvrq)
+ goto err;
+
+ chunk_reset(&trash);
+ hostname_len = strlen(hostname);
+ hostname_dn = trash.area;
+ hostname_dn_len = resolv_str_to_dn_label(hostname, hostname_len,
+ hostname_dn, trash.size);
+ if (hostname_dn_len == -1)
+ goto err;
+
+ resolution = (srv->resolv_requester ? srv->resolv_requester->resolution : NULL);
+ if (resolution &&
+ resolution->hostname_dn &&
+ resolution->hostname_dn_len == hostname_dn_len &&
+ strcasecmp(resolution->hostname_dn, hostname_dn) == 0)
+ goto end;
+
+ resolv_unlink_resolution(srv->resolv_requester);
+
+ free(srv->hostname);
+ free(srv->hostname_dn);
+ srv->hostname = strdup(hostname);
+ srv->hostname_dn = strdup(hostname_dn);
+ srv->hostname_dn_len = hostname_dn_len;
+ if (!srv->hostname || !srv->hostname_dn)
+ goto err;
+
+ if (srv->flags & SRV_F_NO_RESOLUTION)
+ goto end;
+
+ if (resolv_link_resolution(srv, OBJ_TYPE_SERVER, 1) == -1)
+ goto err;
+
+ end:
+ if (!resolv_locked)
+ HA_SPIN_UNLOCK(DNS_LOCK, &srv->resolvers->lock);
+ return 0;
+
+ err:
+ if (!resolv_locked)
+ HA_SPIN_UNLOCK(DNS_LOCK, &srv->resolvers->lock);
+ return -1;
+}
+
+/* Sets the server's address (srv->addr) from srv->lastaddr which was filled
+ * from the state file. This is suited for initial address configuration.
+ * Returns 0 on success otherwise a non-zero error code. In case of error,
+ * *err_code, if not NULL, is filled up.
+ */
+static int srv_apply_lastaddr(struct server *srv, int *err_code)
+{
+ struct sockaddr_storage new_addr;
+
+ memset(&new_addr, 0, sizeof(new_addr));
+
+ /* Use the preferred family, if configured */
+ new_addr.ss_family = srv->addr.ss_family;
+ if (!str2ip2(srv->lastaddr, &new_addr, 0)) {
+ if (err_code)
+ *err_code |= ERR_WARN;
+ return 1;
+ }
+ _srv_set_inetaddr(srv, &new_addr);
+ return 0;
+}
+
+/* returns 0 if no error, otherwise a combination of ERR_* flags */
+static int srv_iterate_initaddr(struct server *srv)
+{
+ char *name = srv->hostname;
+ int return_code = 0;
+ int err_code;
+ unsigned int methods;
+
+ /* If no addr and no hostname set, get the name from the DNS SRV request */
+ if (!name && srv->srvrq)
+ name = srv->srvrq->name;
+
+ methods = srv->init_addr_methods;
+ if (!methods) {
+ /* otherwise default to "last,libc" */
+ srv_append_initaddr(&methods, SRV_IADDR_LAST);
+ srv_append_initaddr(&methods, SRV_IADDR_LIBC);
+ if (srv->resolvers_id) {
+ /* dns resolution is configured, add "none" to not fail on startup */
+ srv_append_initaddr(&methods, SRV_IADDR_NONE);
+ }
+ }
+
+ /* "-dr" : always append "none" so that server addresses resolution
+ * failures are silently ignored, this is convenient to validate some
+ * configs out of their environment.
+ */
+ if (global.tune.options & GTUNE_RESOLVE_DONTFAIL)
+ srv_append_initaddr(&methods, SRV_IADDR_NONE);
+
+ while (methods) {
+ err_code = 0;
+ switch (srv_get_next_initaddr(&methods)) {
+ case SRV_IADDR_LAST:
+ if (!srv->lastaddr)
+ continue;
+ if (srv_apply_lastaddr(srv, &err_code) == 0)
+ goto out;
+ return_code |= err_code;
+ break;
+
+ case SRV_IADDR_LIBC:
+ if (!srv->hostname)
+ continue;
+ if (srv_set_addr_via_libc(srv, &err_code) == 0)
+ goto out;
+ return_code |= err_code;
+ break;
+
+ case SRV_IADDR_NONE:
+ srv_set_admin_flag(srv, SRV_ADMF_RMAINT, SRV_ADM_STCHGC_NONE);
+ if (return_code) {
+ ha_notice("could not resolve address '%s', disabling server.\n",
+ name);
+ }
+ return return_code;
+
+ case SRV_IADDR_IP:
+ _srv_set_inetaddr(srv, &srv->init_addr);
+ if (return_code) {
+ ha_warning("could not resolve address '%s', falling back to configured address.\n",
+ name);
+ }
+ goto out;
+
+ default: /* unhandled method */
+ break;
+ }
+ }
+
+ if (!return_code)
+ ha_alert("no method found to resolve address '%s'.\n", name);
+ else
+ ha_alert("could not resolve address '%s'.\n", name);
+
+ return_code |= ERR_ALERT | ERR_FATAL;
+ return return_code;
+out:
+ srv_set_dyncookie(srv);
+ srv_set_addr_desc(srv, 1);
+ return return_code;
+}
+
+/*
+ * This function parses all backends and all servers within each backend
+ * and performs servers' addr resolution based on information provided by:
+ * - configuration file
+ * - server-state file (states provided by an 'old' haproxy process)
+ *
+ * Returns 0 if no error, otherwise, a combination of ERR_ flags.
+ */
+int srv_init_addr(void)
+{
+ struct proxy *curproxy;
+ int return_code = 0;
+
+ curproxy = proxies_list;
+ while (curproxy) {
+ struct server *srv;
+
+ /* servers are in backend only */
+ if (!(curproxy->cap & PR_CAP_BE) || (curproxy->flags & (PR_FL_DISABLED|PR_FL_STOPPED)))
+ goto srv_init_addr_next;
+
+ for (srv = curproxy->srv; srv; srv = srv->next) {
+ set_usermsgs_ctx(srv->conf.file, srv->conf.line, &srv->obj_type);
+ if (srv->hostname || srv->srvrq)
+ return_code |= srv_iterate_initaddr(srv);
+ reset_usermsgs_ctx();
+ }
+
+ srv_init_addr_next:
+ curproxy = curproxy->next;
+ }
+
+ return return_code;
+}
+
+/*
+ * Must be called with the server lock held.
+ */
+const char *srv_update_fqdn(struct server *server, const char *fqdn, const char *updater, int resolv_locked)
+{
+
+ struct buffer *msg;
+
+ msg = get_trash_chunk();
+ chunk_reset(msg);
+
+ if (server->hostname && strcmp(fqdn, server->hostname) == 0) {
+ chunk_appendf(msg, "no need to change the FDQN");
+ goto out;
+ }
+
+ if (strlen(fqdn) > DNS_MAX_NAME_SIZE || invalid_domainchar(fqdn)) {
+ chunk_appendf(msg, "invalid fqdn '%s'", fqdn);
+ goto out;
+ }
+
+ chunk_appendf(msg, "%s/%s changed its FQDN from %s to %s",
+ server->proxy->id, server->id, server->hostname, fqdn);
+
+ if (srv_set_fqdn(server, fqdn, resolv_locked) < 0) {
+ chunk_reset(msg);
+ chunk_appendf(msg, "could not update %s/%s FQDN",
+ server->proxy->id, server->id);
+ goto out;
+ }
+
+ /* Flag as FQDN set from stats socket. */
+ server->next_admin |= SRV_ADMF_HMAINT;
+
+ out:
+ if (updater)
+ chunk_appendf(msg, " by '%s'", updater);
+ chunk_appendf(msg, "\n");
+
+ return msg->area;
+}
+
+
+/* Expects to find a backend and a server in <arg> under the form <backend>/<server>,
+ * and returns the pointer to the server. Otherwise, display adequate error messages
+ * on the CLI, sets the CLI's state to CLI_ST_PRINT and returns NULL. This is only
+ * used for CLI commands requiring a server name.
+ * Important: the <arg> is modified to remove the '/'.
+ */
+struct server *cli_find_server(struct appctx *appctx, char *arg)
+{
+ struct proxy *px;
+ struct server *sv;
+ struct ist be_name, sv_name = ist(arg);
+
+ be_name = istsplit(&sv_name, '/');
+ if (!istlen(sv_name)) {
+ cli_err(appctx, "Require 'backend/server'.");
+ return NULL;
+ }
+
+ if (!(px = proxy_be_by_name(ist0(be_name)))) {
+ cli_err(appctx, "No such backend.");
+ return NULL;
+ }
+ if (!(sv = server_find_by_name(px, ist0(sv_name)))) {
+ cli_err(appctx, "No such server.");
+ return NULL;
+ }
+
+ if (px->flags & (PR_FL_DISABLED|PR_FL_STOPPED)) {
+ cli_err(appctx, "Proxy is disabled.\n");
+ return NULL;
+ }
+
+ return sv;
+}
+
+
+/* grabs the server lock */
+static int cli_parse_set_server(char **args, char *payload, struct appctx *appctx, void *private)
+{
+ struct server *sv;
+ const char *warning;
+
+ if (!cli_has_level(appctx, ACCESS_LVL_ADMIN))
+ return 1;
+
+ sv = cli_find_server(appctx, args[2]);
+ if (!sv)
+ return 1;
+
+ if (strcmp(args[3], "weight") == 0) {
+ HA_SPIN_LOCK(SERVER_LOCK, &sv->lock);
+ warning = server_parse_weight_change_request(sv, args[4]);
+ HA_SPIN_UNLOCK(SERVER_LOCK, &sv->lock);
+ if (warning)
+ cli_err(appctx, warning);
+ }
+ else if (strcmp(args[3], "state") == 0) {
+ HA_SPIN_LOCK(SERVER_LOCK, &sv->lock);
+ if (strcmp(args[4], "ready") == 0)
+ srv_adm_set_ready(sv);
+ else if (strcmp(args[4], "drain") == 0)
+ srv_adm_set_drain(sv);
+ else if (strcmp(args[4], "maint") == 0)
+ srv_adm_set_maint(sv);
+ else
+ cli_err(appctx, "'set server <srv> state' expects 'ready', 'drain' and 'maint'.\n");
+ HA_SPIN_UNLOCK(SERVER_LOCK, &sv->lock);
+ }
+ else if (strcmp(args[3], "health") == 0) {
+ HA_SPIN_LOCK(SERVER_LOCK, &sv->lock);
+ if (sv->track)
+ cli_err(appctx, "cannot change health on a tracking server.\n");
+ else if (strcmp(args[4], "up") == 0) {
+ sv->check.health = sv->check.rise + sv->check.fall - 1;
+ srv_set_running(sv, SRV_OP_STCHGC_CLI);
+ }
+ else if (strcmp(args[4], "stopping") == 0) {
+ sv->check.health = sv->check.rise + sv->check.fall - 1;
+ srv_set_stopping(sv, SRV_OP_STCHGC_CLI);
+ }
+ else if (strcmp(args[4], "down") == 0) {
+ sv->check.health = 0;
+ srv_set_stopped(sv, SRV_OP_STCHGC_CLI);
+ }
+ else
+ cli_err(appctx, "'set server <srv> health' expects 'up', 'stopping', or 'down'.\n");
+ HA_SPIN_UNLOCK(SERVER_LOCK, &sv->lock);
+ }
+ else if (strcmp(args[3], "agent") == 0) {
+ HA_SPIN_LOCK(SERVER_LOCK, &sv->lock);
+ if (!(sv->agent.state & CHK_ST_ENABLED))
+ cli_err(appctx, "agent checks are not enabled on this server.\n");
+ else if (strcmp(args[4], "up") == 0) {
+ sv->agent.health = sv->agent.rise + sv->agent.fall - 1;
+ srv_set_running(sv, SRV_OP_STCHGC_CLI);
+ }
+ else if (strcmp(args[4], "down") == 0) {
+ sv->agent.health = 0;
+ srv_set_stopped(sv, SRV_OP_STCHGC_CLI);
+ }
+ else
+ cli_err(appctx, "'set server <srv> agent' expects 'up' or 'down'.\n");
+ HA_SPIN_UNLOCK(SERVER_LOCK, &sv->lock);
+ }
+ else if (strcmp(args[3], "agent-addr") == 0) {
+ char *addr = NULL;
+ char *port = NULL;
+ if (strlen(args[4]) == 0) {
+ cli_err(appctx, "set server <b>/<s> agent-addr requires"
+ " an address and optionally a port.\n");
+ goto out;
+ }
+ addr = args[4];
+ if (strcmp(args[5], "port") == 0)
+ port = args[6];
+ HA_SPIN_LOCK(SERVER_LOCK, &sv->lock);
+ warning = srv_update_agent_addr_port(sv, addr, port);
+ HA_SPIN_UNLOCK(SERVER_LOCK, &sv->lock);
+ if (warning)
+ cli_msg(appctx, LOG_WARNING, warning);
+ }
+ else if (strcmp(args[3], "agent-port") == 0) {
+ char *port = NULL;
+ if (strlen(args[4]) == 0) {
+ cli_err(appctx, "set server <b>/<s> agent-port requires"
+ " a port.\n");
+ goto out;
+ }
+ port = args[4];
+ HA_SPIN_LOCK(SERVER_LOCK, &sv->lock);
+ warning = srv_update_agent_addr_port(sv, NULL, port);
+ HA_SPIN_UNLOCK(SERVER_LOCK, &sv->lock);
+ if (warning)
+ cli_msg(appctx, LOG_WARNING, warning);
+ }
+ else if (strcmp(args[3], "agent-send") == 0) {
+ HA_SPIN_LOCK(SERVER_LOCK, &sv->lock);
+ if (!(sv->agent.state & CHK_ST_ENABLED))
+ cli_err(appctx, "agent checks are not enabled on this server.\n");
+ else {
+ if (!set_srv_agent_send(sv, args[4]))
+ cli_err(appctx, "cannot allocate memory for new string.\n");
+ }
+ HA_SPIN_UNLOCK(SERVER_LOCK, &sv->lock);
+ }
+ else if (strcmp(args[3], "check-addr") == 0) {
+ char *addr = NULL;
+ char *port = NULL;
+ if (strlen(args[4]) == 0) {
+ cli_err(appctx, "set server <b>/<s> check-addr requires"
+ " an address and optionally a port.\n");
+ goto out;
+ }
+ addr = args[4];
+ if (strcmp(args[5], "port") == 0)
+ port = args[6];
+ HA_SPIN_LOCK(SERVER_LOCK, &sv->lock);
+ warning = srv_update_check_addr_port(sv, addr, port);
+ HA_SPIN_UNLOCK(SERVER_LOCK, &sv->lock);
+ if (warning)
+ cli_msg(appctx, LOG_WARNING, warning);
+ }
+ else if (strcmp(args[3], "check-port") == 0) {
+ char *port = NULL;
+ if (strlen(args[4]) == 0) {
+ cli_err(appctx, "set server <b>/<s> check-port requires"
+ " a port.\n");
+ goto out;
+ }
+ port = args[4];
+ HA_SPIN_LOCK(SERVER_LOCK, &sv->lock);
+ warning = srv_update_check_addr_port(sv, NULL, port);
+ HA_SPIN_UNLOCK(SERVER_LOCK, &sv->lock);
+ if (warning)
+ cli_msg(appctx, LOG_WARNING, warning);
+ }
+ else if (strcmp(args[3], "addr") == 0) {
+ char *addr = NULL;
+ char *port = NULL;
+ if (strlen(args[4]) == 0) {
+ cli_err(appctx, "set server <b>/<s> addr requires an address and optionally a port.\n");
+ goto out;
+ }
+ else {
+ addr = args[4];
+ }
+ if (strcmp(args[5], "port") == 0) {
+ port = args[6];
+ }
+ HA_SPIN_LOCK(SERVER_LOCK, &sv->lock);
+ warning = srv_update_addr_port(sv, addr, port, "stats socket command");
+ if (warning)
+ cli_msg(appctx, LOG_WARNING, warning);
+ srv_clr_admin_flag(sv, SRV_ADMF_RMAINT);
+ HA_SPIN_UNLOCK(SERVER_LOCK, &sv->lock);
+ }
+ else if (strcmp(args[3], "fqdn") == 0) {
+ if (!*args[4]) {
+ cli_err(appctx, "set server <b>/<s> fqdn requires a FQDN.\n");
+ goto out;
+ }
+ if (!sv->resolvers) {
+ cli_err(appctx, "set server <b>/<s> fqdn failed because no resolution is configured.\n");
+ goto out;
+ }
+ if (sv->srvrq) {
+ cli_err(appctx, "set server <b>/<s> fqdn failed because SRV resolution is configured.\n");
+ goto out;
+ }
+ HA_SPIN_LOCK(DNS_LOCK, &sv->resolvers->lock);
+ HA_SPIN_LOCK(SERVER_LOCK, &sv->lock);
+ /* ensure runtime resolver will process this new fqdn */
+ if (sv->flags & SRV_F_NO_RESOLUTION) {
+ sv->flags &= ~SRV_F_NO_RESOLUTION;
+ }
+ warning = srv_update_fqdn(sv, args[4], "stats socket command", 1);
+ HA_SPIN_UNLOCK(SERVER_LOCK, &sv->lock);
+ HA_SPIN_UNLOCK(DNS_LOCK, &sv->resolvers->lock);
+ if (warning)
+ cli_msg(appctx, LOG_WARNING, warning);
+ }
+ else if (strcmp(args[3], "ssl") == 0) {
+#ifdef USE_OPENSSL
+ if (sv->flags & SRV_F_DYNAMIC) {
+ cli_err(appctx, "'set server <srv> ssl' not supported on dynamic servers\n");
+ goto out;
+ }
+
+ if (sv->ssl_ctx.ctx == NULL) {
+ cli_err(appctx, "'set server <srv> ssl' cannot be set. "
+ " default-server should define ssl settings\n");
+ goto out;
+ }
+
+ HA_SPIN_LOCK(SERVER_LOCK, &sv->lock);
+ if (strcmp(args[4], "on") == 0) {
+ srv_set_ssl(sv, 1);
+ } else if (strcmp(args[4], "off") == 0) {
+ srv_set_ssl(sv, 0);
+ } else {
+ HA_SPIN_UNLOCK(SERVER_LOCK, &sv->lock);
+ cli_err(appctx, "'set server <srv> ssl' expects 'on' or 'off'.\n");
+ goto out;
+ }
+ srv_cleanup_connections(sv);
+ HA_SPIN_UNLOCK(SERVER_LOCK, &sv->lock);
+ cli_msg(appctx, LOG_NOTICE, "server ssl setting updated.\n");
+#else
+ cli_msg(appctx, LOG_NOTICE, "server ssl setting not supported.\n");
+#endif
+ } else {
+ cli_err(appctx,
+ "usage: set server <backend>/<server> "
+ "addr | agent | agent-addr | agent-port | agent-send | "
+ "check-addr | check-port | fqdn | health | ssl | "
+ "state | weight\n");
+ }
+ out:
+ return 1;
+}
+
+static int cli_parse_get_weight(char **args, char *payload, struct appctx *appctx, void *private)
+{
+ struct proxy *be;
+ struct server *sv;
+ struct ist be_name, sv_name = ist(args[2]);
+
+ be_name = istsplit(&sv_name, '/');
+ if (!istlen(sv_name))
+ return cli_err(appctx, "Require 'backend/server'.");
+
+ if (!(be = proxy_be_by_name(ist0(be_name))))
+ return cli_err(appctx, "No such backend.");
+ if (!(sv = server_find_by_name(be, ist0(sv_name))))
+ return cli_err(appctx, "No such server.");
+
+ /* return server's effective weight at the moment */
+ snprintf(trash.area, trash.size, "%d (initial %d)\n", sv->uweight,
+ sv->iweight);
+ if (applet_putstr(appctx, trash.area) == -1)
+ return 0;
+ return 1;
+}
+
+/* Parse a "set weight" command.
+ *
+ * Grabs the server lock.
+ */
+static int cli_parse_set_weight(char **args, char *payload, struct appctx *appctx, void *private)
+{
+ struct server *sv;
+ const char *warning;
+
+ if (!cli_has_level(appctx, ACCESS_LVL_ADMIN))
+ return 1;
+
+ sv = cli_find_server(appctx, args[2]);
+ if (!sv)
+ return 1;
+
+ HA_SPIN_LOCK(SERVER_LOCK, &sv->lock);
+
+ warning = server_parse_weight_change_request(sv, args[3]);
+ if (warning)
+ cli_err(appctx, warning);
+
+ HA_SPIN_UNLOCK(SERVER_LOCK, &sv->lock);
+
+ return 1;
+}
+
+/* parse a "set maxconn server" command. It always returns 1.
+ *
+ * Grabs the server lock.
+ */
+static int cli_parse_set_maxconn_server(char **args, char *payload, struct appctx *appctx, void *private)
+{
+ struct server *sv;
+ const char *warning;
+
+ if (!cli_has_level(appctx, ACCESS_LVL_ADMIN))
+ return 1;
+
+ sv = cli_find_server(appctx, args[3]);
+ if (!sv)
+ return 1;
+
+ HA_SPIN_LOCK(SERVER_LOCK, &sv->lock);
+
+ warning = server_parse_maxconn_change_request(sv, args[4]);
+ if (warning)
+ cli_err(appctx, warning);
+
+ HA_SPIN_UNLOCK(SERVER_LOCK, &sv->lock);
+
+ return 1;
+}
+
+/* parse a "disable agent" command. It always returns 1.
+ *
+ * Grabs the server lock.
+ */
+static int cli_parse_disable_agent(char **args, char *payload, struct appctx *appctx, void *private)
+{
+ struct server *sv;
+
+ if (!cli_has_level(appctx, ACCESS_LVL_ADMIN))
+ return 1;
+
+ sv = cli_find_server(appctx, args[2]);
+ if (!sv)
+ return 1;
+
+ HA_SPIN_LOCK(SERVER_LOCK, &sv->lock);
+ sv->agent.state &= ~CHK_ST_ENABLED;
+ HA_SPIN_UNLOCK(SERVER_LOCK, &sv->lock);
+ return 1;
+}
+
+/* parse a "disable health" command. It always returns 1.
+ *
+ * Grabs the server lock.
+ */
+static int cli_parse_disable_health(char **args, char *payload, struct appctx *appctx, void *private)
+{
+ struct server *sv;
+
+ if (!cli_has_level(appctx, ACCESS_LVL_ADMIN))
+ return 1;
+
+ sv = cli_find_server(appctx, args[2]);
+ if (!sv)
+ return 1;
+
+ HA_SPIN_LOCK(SERVER_LOCK, &sv->lock);
+ sv->check.state &= ~CHK_ST_ENABLED;
+ HA_SPIN_UNLOCK(SERVER_LOCK, &sv->lock);
+ return 1;
+}
+
+/* parse a "disable server" command. It always returns 1.
+ *
+ * Grabs the server lock.
+ */
+static int cli_parse_disable_server(char **args, char *payload, struct appctx *appctx, void *private)
+{
+ struct server *sv;
+
+ if (!cli_has_level(appctx, ACCESS_LVL_ADMIN))
+ return 1;
+
+ sv = cli_find_server(appctx, args[2]);
+ if (!sv)
+ return 1;
+
+ HA_SPIN_LOCK(SERVER_LOCK, &sv->lock);
+ srv_adm_set_maint(sv);
+ HA_SPIN_UNLOCK(SERVER_LOCK, &sv->lock);
+ return 1;
+}
+
+/* parse a "enable agent" command. It always returns 1.
+ *
+ * Grabs the server lock.
+ */
+static int cli_parse_enable_agent(char **args, char *payload, struct appctx *appctx, void *private)
+{
+ struct server *sv;
+
+ if (!cli_has_level(appctx, ACCESS_LVL_ADMIN))
+ return 1;
+
+ sv = cli_find_server(appctx, args[2]);
+ if (!sv)
+ return 1;
+
+ if (!(sv->agent.state & CHK_ST_CONFIGURED))
+ return cli_err(appctx, "Agent was not configured on this server, cannot enable.\n");
+
+ HA_SPIN_LOCK(SERVER_LOCK, &sv->lock);
+ sv->agent.state |= CHK_ST_ENABLED;
+ HA_SPIN_UNLOCK(SERVER_LOCK, &sv->lock);
+ return 1;
+}
+
+/* parse a "enable health" command. It always returns 1.
+ *
+ * Grabs the server lock.
+ */
+static int cli_parse_enable_health(char **args, char *payload, struct appctx *appctx, void *private)
+{
+ struct server *sv;
+
+ if (!cli_has_level(appctx, ACCESS_LVL_ADMIN))
+ return 1;
+
+ sv = cli_find_server(appctx, args[2]);
+ if (!sv)
+ return 1;
+
+ if (!(sv->check.state & CHK_ST_CONFIGURED))
+ return cli_err(appctx, "Health check was not configured on this server, cannot enable.\n");
+
+ HA_SPIN_LOCK(SERVER_LOCK, &sv->lock);
+ sv->check.state |= CHK_ST_ENABLED;
+ HA_SPIN_UNLOCK(SERVER_LOCK, &sv->lock);
+ return 1;
+}
+
+/* parse a "enable server" command. It always returns 1.
+ *
+ * Grabs the server lock.
+ */
+static int cli_parse_enable_server(char **args, char *payload, struct appctx *appctx, void *private)
+{
+ struct server *sv;
+
+ if (!cli_has_level(appctx, ACCESS_LVL_ADMIN))
+ return 1;
+
+ sv = cli_find_server(appctx, args[2]);
+ if (!sv)
+ return 1;
+
+ HA_SPIN_LOCK(SERVER_LOCK, &sv->lock);
+ srv_adm_set_ready(sv);
+ if (!(sv->flags & SRV_F_COOKIESET)
+ && (sv->proxy->ck_opts & PR_CK_DYNAMIC) &&
+ sv->cookie)
+ srv_check_for_dup_dyncookie(sv);
+ HA_SPIN_UNLOCK(SERVER_LOCK, &sv->lock);
+ return 1;
+}
+
+/* Allocates data structure related to load balancing for the server <sv>. It
+ * is only required for dynamic servers.
+ *
+ * At the moment, the server lock is not used as this function is only called
+ * for a dynamic server not yet registered.
+ *
+ * Returns 1 on success, 0 on allocation failure.
+ */
+static int srv_alloc_lb(struct server *sv, struct proxy *be)
+{
+ int node;
+
+ sv->lb_tree = (sv->flags & SRV_F_BACKUP) ?
+ &be->lbprm.chash.bck : &be->lbprm.chash.act;
+ sv->lb_nodes_tot = sv->uweight * BE_WEIGHT_SCALE;
+ sv->lb_nodes_now = 0;
+
+ if (((be->lbprm.algo & (BE_LB_KIND | BE_LB_PARM)) == (BE_LB_KIND_RR | BE_LB_RR_RANDOM)) ||
+ ((be->lbprm.algo & (BE_LB_KIND | BE_LB_HASH_TYPE)) == (BE_LB_KIND_HI | BE_LB_HASH_CONS))) {
+ sv->lb_nodes = calloc(sv->lb_nodes_tot, sizeof(*sv->lb_nodes));
+
+ if (!sv->lb_nodes)
+ return 0;
+
+ for (node = 0; node < sv->lb_nodes_tot; node++) {
+ sv->lb_nodes[node].server = sv;
+ sv->lb_nodes[node].node.key = full_hash(sv->puid * SRV_EWGHT_RANGE + node);
+ }
+ }
+
+ return 1;
+}
+
+/* updates the server's weight during a warmup stage. Once the final weight is
+ * reached, the task automatically stops. Note that any server status change
+ * must have updated s->last_change accordingly.
+ */
+static struct task *server_warmup(struct task *t, void *context, unsigned int state)
+{
+ struct server *s = context;
+
+ /* by default, plan on stopping the task */
+ t->expire = TICK_ETERNITY;
+ if ((s->next_admin & SRV_ADMF_MAINT) ||
+ (s->next_state != SRV_ST_STARTING))
+ return t;
+
+ HA_SPIN_LOCK(SERVER_LOCK, &s->lock);
+
+ /* recalculate the weights and update the state */
+ server_recalc_eweight(s, 1);
+
+ /* probably that we can refill this server with a bit more connections */
+ pendconn_grab_from_px(s);
+
+ HA_SPIN_UNLOCK(SERVER_LOCK, &s->lock);
+
+ /* get back there in 1 second or 1/20th of the slowstart interval,
+ * whichever is greater, resulting in small 5% steps.
+ */
+ if (s->next_state == SRV_ST_STARTING)
+ t->expire = tick_add(now_ms, MS_TO_TICKS(MAX(1000, s->slowstart / 20)));
+ return t;
+}
+
+/* Allocate the slowstart task if the server is configured with a slowstart
+ * timer. If server next_state is SRV_ST_STARTING, the task is scheduled.
+ *
+ * Returns 0 on success else non-zero.
+ */
+static int init_srv_slowstart(struct server *srv)
+{
+ struct task *t;
+
+ if (srv->slowstart) {
+ if ((t = task_new_anywhere()) == NULL) {
+ ha_alert("Cannot activate slowstart for server %s/%s: out of memory.\n", srv->proxy->id, srv->id);
+ return ERR_ALERT | ERR_FATAL;
+ }
+
+ /* We need a warmup task that will be called when the server
+ * state switches from down to up.
+ */
+ srv->warmup = t;
+ t->process = server_warmup;
+ t->context = srv;
+
+ /* server can be in this state only because of */
+ if (srv->next_state == SRV_ST_STARTING) {
+ task_schedule(srv->warmup,
+ tick_add(now_ms,
+ MS_TO_TICKS(MAX(1000, (ns_to_sec(now_ns) - srv->last_change)) / 20)));
+ }
+ }
+
+ return ERR_NONE;
+}
+REGISTER_POST_SERVER_CHECK(init_srv_slowstart);
+
+/* Memory allocation and initialization of the per_thr field.
+ * Returns 0 if the field has been successfully initialized, -1 on failure.
+ */
+int srv_init_per_thr(struct server *srv)
+{
+ int i;
+
+ srv->per_thr = calloc(global.nbthread, sizeof(*srv->per_thr));
+ srv->per_tgrp = calloc(global.nbtgroups, sizeof(*srv->per_tgrp));
+ if (!srv->per_thr || !srv->per_tgrp)
+ return -1;
+
+ for (i = 0; i < global.nbthread; i++) {
+ srv->per_thr[i].idle_conns = EB_ROOT;
+ srv->per_thr[i].safe_conns = EB_ROOT;
+ srv->per_thr[i].avail_conns = EB_ROOT;
+ MT_LIST_INIT(&srv->per_thr[i].streams);
+
+ LIST_INIT(&srv->per_thr[i].idle_conn_list);
+ }
+
+ return 0;
+}
+
+/* Parse a "add server" command
+ * Returns 0 if the server has been successfully initialized, 1 on failure.
+ */
+static int cli_parse_add_server(char **args, char *payload, struct appctx *appctx, void *private)
+{
+ struct proxy *be;
+ struct server *srv;
+ char *be_name, *sv_name;
+ int errcode, argc;
+ int next_id;
+ const int parse_flags = SRV_PARSE_DYNAMIC|SRV_PARSE_PARSE_ADDR;
+
+ usermsgs_clr("CLI");
+
+ if (!cli_has_level(appctx, ACCESS_LVL_ADMIN))
+ return 1;
+
+ ++args;
+
+ sv_name = be_name = args[1];
+ /* split backend/server arg */
+ while (*sv_name && *(++sv_name)) {
+ if (*sv_name == '/') {
+ *sv_name = '\0';
+ ++sv_name;
+ break;
+ }
+ }
+
+ if (!*sv_name)
+ return cli_err(appctx, "Require 'backend/server'.");
+
+ be = proxy_be_by_name(be_name);
+ if (!be)
+ return cli_err(appctx, "No such backend.");
+
+ if (!(be->lbprm.algo & BE_LB_PROP_DYN)) {
+ cli_err(appctx, "Backend must use a dynamic load balancing to support dynamic servers.");
+ return 1;
+ }
+
+ if (be->mode == PR_MODE_SYSLOG) {
+ cli_err(appctx," Dynamic servers cannot be used with log backends.");
+ return 1;
+ }
+
+ /* At this point, some operations might not be thread-safe anymore. This
+ * might be the case for parsing handlers which were designed to run
+ * only at the starting stage on single-thread mode.
+ *
+ * Activate thread isolation to ensure thread-safety.
+ */
+ thread_isolate();
+
+ args[1] = sv_name;
+ errcode = _srv_parse_init(&srv, args, &argc, be, parse_flags);
+ if (errcode)
+ goto out;
+
+ while (*args[argc]) {
+ errcode = _srv_parse_kw(srv, args, &argc, be, parse_flags);
+
+ if (errcode)
+ goto out;
+ }
+
+ errcode = _srv_parse_finalize(args, argc, srv, be, parse_flags);
+ if (errcode)
+ goto out;
+
+ /* A dynamic server does not currently support resolution.
+ *
+ * Initialize it explicitly to the "none" method to ensure no
+ * resolution will ever be executed.
+ */
+ srv->init_addr_methods = SRV_IADDR_NONE;
+
+ if (srv->mux_proto) {
+ int proto_mode = conn_pr_mode_to_proto_mode(be->mode);
+ const struct mux_proto_list *mux_ent;
+
+ mux_ent = conn_get_best_mux_entry(srv->mux_proto->token, PROTO_SIDE_BE, proto_mode);
+
+ if (!mux_ent || !isteq(mux_ent->token, srv->mux_proto->token)) {
+ ha_alert("MUX protocol is not usable for server.\n");
+ goto out;
+ }
+ }
+
+ if (srv_init_per_thr(srv) == -1) {
+ ha_alert("failed to allocate per-thread lists for server.\n");
+ goto out;
+ }
+
+ if (srv->max_idle_conns != 0) {
+ srv->curr_idle_thr = calloc(global.nbthread, sizeof(*srv->curr_idle_thr));
+ if (!srv->curr_idle_thr) {
+ ha_alert("failed to allocate counters for server.\n");
+ goto out;
+ }
+ }
+
+ if (!srv_alloc_lb(srv, be)) {
+ ha_alert("Failed to initialize load-balancing data.\n");
+ goto out;
+ }
+
+ if (!stats_allocate_proxy_counters_internal(&srv->extra_counters,
+ COUNTERS_SV,
+ STATS_PX_CAP_SRV)) {
+ ha_alert("failed to allocate extra counters for server.\n");
+ goto out;
+ }
+
+ /* ensure minconn/maxconn consistency */
+ srv_minmax_conn_apply(srv);
+
+ if (srv->use_ssl == 1 || (srv->proxy->options & PR_O_TCPCHK_SSL) ||
+ srv->check.use_ssl == 1) {
+ if (xprt_get(XPRT_SSL) && xprt_get(XPRT_SSL)->prepare_srv) {
+ if (xprt_get(XPRT_SSL)->prepare_srv(srv))
+ goto out;
+ }
+ }
+
+ if (srv->trackit) {
+ if (srv_apply_track(srv, be))
+ goto out;
+ }
+
+ /* Init check/agent if configured. The check is manually disabled
+ * because a dynamic server is started in a disable state. It must be
+ * manually activated via a "enable health/agent" command.
+ */
+ if (srv->do_check) {
+ if (init_srv_check(srv))
+ goto out;
+
+ srv->check.state &= ~CHK_ST_ENABLED;
+ }
+
+ if (srv->do_agent) {
+ if (init_srv_agent_check(srv))
+ goto out;
+
+ srv->agent.state &= ~CHK_ST_ENABLED;
+ }
+
+ /* Init slowstart if needed. */
+ if (init_srv_slowstart(srv))
+ goto out;
+
+ /* Attach the server to the end of the proxy linked list. Note that this
+ * operation is not thread-safe so this is executed under thread
+ * isolation.
+ *
+ * If a server with the same name is found, reject the new one.
+ */
+
+ /* TODO use a double-linked list for px->srv */
+ if (be->srv) {
+ struct server *next = be->srv;
+
+ while (1) {
+ /* check for duplicate server */
+ if (strcmp(srv->id, next->id) == 0) {
+ ha_alert("Already exists a server with the same name in backend.\n");
+ goto out;
+ }
+
+ if (!next->next)
+ break;
+
+ next = next->next;
+ }
+
+ next->next = srv;
+ }
+ else {
+ srv->next = be->srv;
+ be->srv = srv;
+ }
+
+ /* generate the server id if not manually specified */
+ if (!srv->puid) {
+ next_id = get_next_id(&be->conf.used_server_id, 1);
+ if (!next_id) {
+ ha_alert("Cannot attach server : no id left in proxy\n");
+ goto out;
+ }
+
+ srv->conf.id.key = srv->puid = next_id;
+ }
+ srv->conf.name.key = srv->id;
+
+ /* insert the server in the backend trees */
+ eb32_insert(&be->conf.used_server_id, &srv->conf.id);
+ ebis_insert(&be->conf.used_server_name, &srv->conf.name);
+ /* addr_node.key could be NULL if FQDN resolution is postponed (ie: add server from cli) */
+ if (srv->addr_node.key)
+ ebis_insert(&be->used_server_addr, &srv->addr_node);
+
+ /* check if LSB bit (odd bit) is set for reuse_cnt */
+ if (srv_id_reuse_cnt & 1) {
+ /* cnt must be increased */
+ srv_id_reuse_cnt++;
+ }
+ /* srv_id_reuse_cnt is always even at this stage, divide by 2 to
+ * save some space
+ * (sizeof(srv->rid) is half of sizeof(srv_id_reuse_cnt))
+ */
+ srv->rid = (srv_id_reuse_cnt) ? (srv_id_reuse_cnt / 2) : 0;
+
+ /* adding server cannot fail when we reach this:
+ * publishing EVENT_HDL_SUB_SERVER_ADD
+ */
+ srv_event_hdl_publish(EVENT_HDL_SUB_SERVER_ADD, srv, 1);
+
+ thread_release();
+
+ /* Start the check task. The server must be fully initialized.
+ *
+ * <srvpos> and <nbcheck> parameters are set to 1 as there should be no
+ * need to randomly spread the task interval for dynamic servers.
+ */
+ if (srv->check.state & CHK_ST_CONFIGURED) {
+ if (!start_check_task(&srv->check, 0, 1, 1))
+ ha_alert("System might be unstable, consider to execute a reload");
+ }
+ if (srv->agent.state & CHK_ST_CONFIGURED) {
+ if (!start_check_task(&srv->agent, 0, 1, 1))
+ ha_alert("System might be unstable, consider to execute a reload");
+ }
+
+ ha_notice("New server registered.\n");
+ cli_umsg(appctx, LOG_INFO);
+
+ return 0;
+
+out:
+ if (srv) {
+ if (srv->track)
+ release_server_track(srv);
+
+ if (srv->check.state & CHK_ST_CONFIGURED)
+ free_check(&srv->check);
+ if (srv->agent.state & CHK_ST_CONFIGURED)
+ free_check(&srv->agent);
+
+ /* remove the server from the proxy linked list */
+ _srv_detach(srv);
+ }
+
+ thread_release();
+
+ if (!usermsgs_empty())
+ cli_umsgerr(appctx);
+
+ if (srv)
+ srv_drop(srv);
+
+ return 1;
+}
+
+/* Parse a "del server" command
+ * Returns 0 if the server has been successfully initialized, 1 on failure.
+ */
+static int cli_parse_delete_server(char **args, char *payload, struct appctx *appctx, void *private)
+{
+ struct proxy *be;
+ struct server *srv;
+ struct server *prev_del;
+ struct ist be_name, sv_name;
+
+ if (!cli_has_level(appctx, ACCESS_LVL_ADMIN))
+ return 1;
+
+ ++args;
+
+ /* The proxy servers list is currently not protected by a lock so this
+ * requires thread isolation. In addition, any place referencing the
+ * server about to be deleted would be unsafe after our operation, so
+ * we must be certain to be alone so that no other thread has even
+ * started to grab a temporary reference to this server.
+ */
+ thread_isolate_full();
+
+ sv_name = ist(args[1]);
+ be_name = istsplit(&sv_name, '/');
+ if (!istlen(sv_name)) {
+ cli_err(appctx, "Require 'backend/server'.");
+ goto out;
+ }
+
+ if (!(be = proxy_be_by_name(ist0(be_name)))) {
+ cli_err(appctx, "No such backend.");
+ goto out;
+ }
+ if (!(srv = server_find_by_name(be, ist0(sv_name)))) {
+ cli_err(appctx, "No such server.");
+ goto out;
+ }
+
+ if (srv->flags & SRV_F_NON_PURGEABLE) {
+ cli_err(appctx, "This server cannot be removed at runtime due to other configuration elements pointing to it.");
+ goto out;
+ }
+
+ /* Only servers in maintenance can be deleted. This ensures that the
+ * server is not present anymore in the lb structures (through
+ * lbprm.set_server_status_down).
+ */
+ if (!(srv->cur_admin & SRV_ADMF_MAINT)) {
+ cli_err(appctx, "Only servers in maintenance mode can be deleted.");
+ goto out;
+ }
+
+ /* Ensure that there is no active/idle/pending connection on the server.
+ *
+ * TODO idle connections should not prevent server deletion. A proper
+ * cleanup function should be implemented to be used here.
+ */
+ if (srv->cur_sess || srv->curr_idle_conns ||
+ !eb_is_empty(&srv->queue.head) || srv_has_streams(srv)) {
+ cli_err(appctx, "Server still has connections attached to it, cannot remove it.");
+ goto out;
+ }
+
+ /* removing cannot fail anymore when we reach this:
+ * publishing EVENT_HDL_SUB_SERVER_DEL
+ */
+ srv_event_hdl_publish(EVENT_HDL_SUB_SERVER_DEL, srv, 1);
+
+ /* remove srv from tracking list */
+ if (srv->track)
+ release_server_track(srv);
+
+ /* stop the check task if running */
+ if (srv->check.state & CHK_ST_CONFIGURED)
+ check_purge(&srv->check);
+ if (srv->agent.state & CHK_ST_CONFIGURED)
+ check_purge(&srv->agent);
+
+ /* detach the server from the proxy linked list
+ * The proxy servers list is currently not protected by a lock, so this
+ * requires thread_isolate/release.
+ */
+ _srv_detach(srv);
+
+ /* Some deleted servers could still point to us using their 'next',
+ * update them as needed
+ * Please note the small race between the POP and APPEND, although in
+ * this situation this is not an issue as we are under full thread
+ * isolation
+ */
+ while ((prev_del = MT_LIST_POP(&srv->prev_deleted, struct server *, prev_deleted))) {
+ /* update its 'next' ptr */
+ prev_del->next = srv->next;
+ if (srv->next) {
+ /* now it is our 'next' responsibility */
+ MT_LIST_APPEND(&srv->next->prev_deleted, &prev_del->prev_deleted);
+ }
+ }
+
+ /* we ourselves need to inform our 'next' that we will still point it */
+ if (srv->next)
+ MT_LIST_APPEND(&srv->next->prev_deleted, &srv->prev_deleted);
+
+ /* remove srv from addr_node tree */
+ eb32_delete(&srv->conf.id);
+ ebpt_delete(&srv->conf.name);
+ if (srv->addr_node.key)
+ ebpt_delete(&srv->addr_node);
+
+ /* remove srv from idle_node tree for idle conn cleanup */
+ eb32_delete(&srv->idle_node);
+
+ /* flag the server as deleted
+ * (despite the server being removed from primary server list,
+ * one could still access the server data from a valid ptr)
+ * Deleted flag helps detecting when a server is in transient removal
+ * state.
+ * ie: removed from the list but not yet freed/purged from memory.
+ */
+ srv->flags |= SRV_F_DELETED;
+
+ /* set LSB bit (odd bit) for reuse_cnt */
+ srv_id_reuse_cnt |= 1;
+
+ thread_release();
+
+ ha_notice("Server deleted.\n");
+ srv_drop(srv);
+
+ cli_msg(appctx, LOG_INFO, "Server deleted.");
+
+ return 0;
+
+out:
+ thread_release();
+
+ return 1;
+}
+
+/* register cli keywords */
+static struct cli_kw_list cli_kws = {{ },{
+ { { "disable", "agent", NULL }, "disable agent : disable agent checks", cli_parse_disable_agent, NULL },
+ { { "disable", "health", NULL }, "disable health : disable health checks", cli_parse_disable_health, NULL },
+ { { "disable", "server", NULL }, "disable server (DEPRECATED) : disable a server for maintenance (use 'set server' instead)", cli_parse_disable_server, NULL },
+ { { "enable", "agent", NULL }, "enable agent : enable agent checks", cli_parse_enable_agent, NULL },
+ { { "enable", "health", NULL }, "enable health : enable health checks", cli_parse_enable_health, NULL },
+ { { "enable", "server", NULL }, "enable server (DEPRECATED) : enable a disabled server (use 'set server' instead)", cli_parse_enable_server, NULL },
+ { { "set", "maxconn", "server", NULL }, "set maxconn server <bk>/<srv> : change a server's maxconn setting", cli_parse_set_maxconn_server, NULL },
+ { { "set", "server", NULL }, "set server <bk>/<srv> [opts] : change a server's state, weight, address or ssl", cli_parse_set_server },
+ { { "get", "weight", NULL }, "get weight <bk>/<srv> : report a server's current weight", cli_parse_get_weight },
+ { { "set", "weight", NULL }, "set weight <bk>/<srv> (DEPRECATED) : change a server's weight (use 'set server' instead)", cli_parse_set_weight },
+ { { "add", "server", NULL }, "add server <bk>/<srv> : create a new server", cli_parse_add_server, NULL },
+ { { "del", "server", NULL }, "del server <bk>/<srv> : remove a dynamically added server", cli_parse_delete_server, NULL },
+ {{},}
+}};
+
+INITCALL1(STG_REGISTER, cli_register_kw, &cli_kws);
+
+/* Prepare a server <srv> to track check status of another one. <srv>.<trackit>
+ * field is used to retrieve the identifier of the tracked server, either with
+ * the format "proxy/server" or just "server". <curproxy> must point to the
+ * backend owning <srv>; if no proxy is specified in <trackit>, it will be used
+ * to find the tracked server.
+ *
+ * Returns 0 if the server track has been activated else non-zero.
+ *
+ * Not thread-safe.
+ */
+int srv_apply_track(struct server *srv, struct proxy *curproxy)
+{
+ struct proxy *px;
+ struct server *strack, *loop;
+ char *pname, *sname;
+
+ if (!srv->trackit)
+ return 1;
+
+ pname = srv->trackit;
+ sname = strrchr(pname, '/');
+
+ if (sname) {
+ *sname++ = '\0';
+ }
+ else {
+ sname = pname;
+ pname = NULL;
+ }
+
+ if (pname) {
+ px = proxy_be_by_name(pname);
+ if (!px) {
+ ha_alert("unable to find required proxy '%s' for tracking.\n",
+ pname);
+ return 1;
+ }
+ }
+ else {
+ px = curproxy;
+ }
+
+ strack = findserver(px, sname);
+ if (!strack) {
+ ha_alert("unable to find required server '%s' for tracking.\n",
+ sname);
+ return 1;
+ }
+
+ if (strack->flags & SRV_F_DYNAMIC) {
+ ha_alert("unable to use %s/%s for tracking as it is a dynamic server.\n",
+ px->id, strack->id);
+ return 1;
+ }
+
+ if (!strack->do_check && !strack->do_agent && !strack->track &&
+ !strack->trackit) {
+ ha_alert("unable to use %s/%s for "
+ "tracking as it does not have any check nor agent enabled.\n",
+ px->id, strack->id);
+ return 1;
+ }
+
+ for (loop = strack->track; loop && loop != srv; loop = loop->track)
+ ;
+
+ if (srv == strack || loop) {
+ ha_alert("unable to track %s/%s as it "
+ "belongs to a tracking chain looping back to %s/%s.\n",
+ px->id, strack->id, px->id,
+ srv == strack ? strack->id : loop->id);
+ return 1;
+ }
+
+ if (curproxy != px &&
+ (curproxy->options & PR_O_DISABLE404) != (px->options & PR_O_DISABLE404)) {
+ ha_alert("unable to use %s/%s for"
+ "tracking: disable-on-404 option inconsistency.\n",
+ px->id, strack->id);
+ return 1;
+ }
+
+ srv->track = strack;
+ srv->tracknext = strack->trackers;
+ strack->trackers = srv;
+ strack->flags |= SRV_F_NON_PURGEABLE;
+
+ ha_free(&srv->trackit);
+
+ return 0;
+}
+
+/* This function propagates srv state change to lb algorithms */
+static void srv_lb_propagate(struct server *s)
+{
+ struct proxy *px = s->proxy;
+
+ if (px->lbprm.update_server_eweight)
+ px->lbprm.update_server_eweight(s);
+ else if (srv_willbe_usable(s)) {
+ if (px->lbprm.set_server_status_up)
+ px->lbprm.set_server_status_up(s);
+ }
+ else {
+ if (px->lbprm.set_server_status_down)
+ px->lbprm.set_server_status_down(s);
+ }
+}
+
+/* directly update server state based on an operational change
+ * (compare current and next state to know which transition to apply)
+ *
+ * The function returns the number of requeued sessions (either taken by
+ * the server or redispatched to others servers) due to the server state
+ * change.
+ */
+static int _srv_update_status_op(struct server *s, enum srv_op_st_chg_cause cause)
+{
+ struct buffer *tmptrash = NULL;
+ int log_level;
+ int srv_was_stopping = (s->cur_state == SRV_ST_STOPPING) || (s->cur_admin & SRV_ADMF_DRAIN);
+ int xferred = 0;
+
+ if ((s->cur_state != SRV_ST_STOPPED) && (s->next_state == SRV_ST_STOPPED)) {
+ srv_lb_propagate(s);
+
+ if (s->onmarkeddown & HANA_ONMARKEDDOWN_SHUTDOWNSESSIONS)
+ srv_shutdown_streams(s, SF_ERR_DOWN);
+
+ /* we might have streams queued on this server and waiting for
+ * a connection. Those which are redispatchable will be queued
+ * to another server or to the proxy itself.
+ */
+ xferred = pendconn_redistribute(s);
+
+ tmptrash = alloc_trash_chunk();
+ if (tmptrash) {
+ chunk_printf(tmptrash,
+ "%sServer %s/%s is DOWN", s->flags & SRV_F_BACKUP ? "Backup " : "",
+ s->proxy->id, s->id);
+
+ srv_append_op_chg_cause(tmptrash, s, cause);
+ srv_append_more(tmptrash, s, xferred, 0);
+
+ ha_warning("%s.\n", tmptrash->area);
+
+ /* we don't send an alert if the server was previously paused */
+ log_level = srv_was_stopping ? LOG_NOTICE : LOG_ALERT;
+ send_log(s->proxy, log_level, "%s.\n",
+ tmptrash->area);
+ send_email_alert(s, log_level, "%s",
+ tmptrash->area);
+ free_trash_chunk(tmptrash);
+ }
+ }
+ else if ((s->cur_state != SRV_ST_STOPPING) && (s->next_state == SRV_ST_STOPPING)) {
+ srv_lb_propagate(s);
+
+ /* we might have streams queued on this server and waiting for
+ * a connection. Those which are redispatchable will be queued
+ * to another server or to the proxy itself.
+ */
+ xferred = pendconn_redistribute(s);
+
+ tmptrash = alloc_trash_chunk();
+ if (tmptrash) {
+ chunk_printf(tmptrash,
+ "%sServer %s/%s is stopping", s->flags & SRV_F_BACKUP ? "Backup " : "",
+ s->proxy->id, s->id);
+
+ srv_append_op_chg_cause(tmptrash, s, cause);
+ srv_append_more(tmptrash, s, xferred, 0);
+
+ ha_warning("%s.\n", tmptrash->area);
+ send_log(s->proxy, LOG_NOTICE, "%s.\n",
+ tmptrash->area);
+ free_trash_chunk(tmptrash);
+ }
+ }
+ else if (((s->cur_state != SRV_ST_RUNNING) && (s->next_state == SRV_ST_RUNNING))
+ || ((s->cur_state != SRV_ST_STARTING) && (s->next_state == SRV_ST_STARTING))) {
+
+ if (s->next_state == SRV_ST_STARTING && s->warmup)
+ task_schedule(s->warmup, tick_add(now_ms, MS_TO_TICKS(MAX(1000, s->slowstart / 20))));
+
+ server_recalc_eweight(s, 0);
+ /* now propagate the status change to any LB algorithms */
+ srv_lb_propagate(s);
+
+ /* If the server is set with "on-marked-up shutdown-backup-sessions",
+ * and it's not a backup server and its effective weight is > 0,
+ * then it can accept new connections, so we shut down all streams
+ * on all backup servers.
+ */
+ if ((s->onmarkedup & HANA_ONMARKEDUP_SHUTDOWNBACKUPSESSIONS) &&
+ !(s->flags & SRV_F_BACKUP) && s->next_eweight)
+ srv_shutdown_backup_streams(s->proxy, SF_ERR_UP);
+
+ /* check if we can handle some connections queued at the proxy. We
+ * will take as many as we can handle.
+ */
+ xferred = pendconn_grab_from_px(s);
+
+ tmptrash = alloc_trash_chunk();
+ if (tmptrash) {
+ chunk_printf(tmptrash,
+ "%sServer %s/%s is UP", s->flags & SRV_F_BACKUP ? "Backup " : "",
+ s->proxy->id, s->id);
+
+ srv_append_op_chg_cause(tmptrash, s, cause);
+ srv_append_more(tmptrash, s, xferred, 0);
+
+ ha_warning("%s.\n", tmptrash->area);
+ send_log(s->proxy, LOG_NOTICE, "%s.\n",
+ tmptrash->area);
+ send_email_alert(s, LOG_NOTICE, "%s",
+ tmptrash->area);
+ free_trash_chunk(tmptrash);
+ }
+ }
+ else if (s->cur_eweight != s->next_eweight) {
+ /* now propagate the status change to any LB algorithms */
+ srv_lb_propagate(s);
+ }
+ return xferred;
+}
+
+/* deduct and update server state from an administrative change
+ * (use current and next admin to deduct the administrative transition that
+ * may result in server state update)
+ *
+ * The function returns the number of requeued sessions (either taken by
+ * the server or redispatched to others servers) due to the server state
+ * change.
+ */
+static int _srv_update_status_adm(struct server *s, enum srv_adm_st_chg_cause cause)
+{
+ struct buffer *tmptrash = NULL;
+ int srv_was_stopping = (s->cur_state == SRV_ST_STOPPING) || (s->cur_admin & SRV_ADMF_DRAIN);
+ int xferred = 0;
+
+ /* Maintenance must also disable health checks */
+ if (!(s->cur_admin & SRV_ADMF_MAINT) && (s->next_admin & SRV_ADMF_MAINT)) {
+ if (s->check.state & CHK_ST_ENABLED) {
+ s->check.state |= CHK_ST_PAUSED;
+ s->check.health = 0;
+ }
+
+ if (s->cur_state == SRV_ST_STOPPED) { /* server was already down */
+ tmptrash = alloc_trash_chunk();
+ if (tmptrash) {
+ chunk_printf(tmptrash,
+ "%sServer %s/%s was DOWN and now enters maintenance",
+ s->flags & SRV_F_BACKUP ? "Backup " : "", s->proxy->id, s->id);
+ srv_append_adm_chg_cause(tmptrash, s, cause);
+ srv_append_more(tmptrash, s, -1, (s->next_admin & SRV_ADMF_FMAINT));
+
+ if (!(global.mode & MODE_STARTING)) {
+ ha_warning("%s.\n", tmptrash->area);
+ send_log(s->proxy, LOG_NOTICE, "%s.\n",
+ tmptrash->area);
+ }
+ free_trash_chunk(tmptrash);
+ }
+ }
+ else { /* server was still running */
+ s->check.health = 0; /* failure */
+
+ s->next_state = SRV_ST_STOPPED;
+ srv_lb_propagate(s);
+
+ if (s->onmarkeddown & HANA_ONMARKEDDOWN_SHUTDOWNSESSIONS)
+ srv_shutdown_streams(s, SF_ERR_DOWN);
+
+ /* force connection cleanup on the given server */
+ srv_cleanup_connections(s);
+ /* we might have streams queued on this server and waiting for
+ * a connection. Those which are redispatchable will be queued
+ * to another server or to the proxy itself.
+ */
+ xferred = pendconn_redistribute(s);
+
+ tmptrash = alloc_trash_chunk();
+ if (tmptrash) {
+ chunk_printf(tmptrash,
+ "%sServer %s/%s is going DOWN for maintenance",
+ s->flags & SRV_F_BACKUP ? "Backup " : "",
+ s->proxy->id, s->id);
+ srv_append_adm_chg_cause(tmptrash, s, cause);
+ srv_append_more(tmptrash, s, xferred, (s->next_admin & SRV_ADMF_FMAINT));
+
+ if (!(global.mode & MODE_STARTING)) {
+ ha_warning("%s.\n", tmptrash->area);
+ send_log(s->proxy, srv_was_stopping ? LOG_NOTICE : LOG_ALERT, "%s.\n",
+ tmptrash->area);
+ }
+ free_trash_chunk(tmptrash);
+ }
+ }
+ }
+ else if ((s->cur_admin & SRV_ADMF_MAINT) && !(s->next_admin & SRV_ADMF_MAINT)) {
+ /* OK here we're leaving maintenance, we have many things to check,
+ * because the server might possibly be coming back up depending on
+ * its state. In practice, leaving maintenance means that we should
+ * immediately turn to UP (more or less the slowstart) under the
+ * following conditions :
+ * - server is neither checked nor tracked
+ * - server tracks another server which is not checked
+ * - server tracks another server which is already up
+ * Which sums up as something simpler :
+ * "either the tracking server is up or the server's checks are disabled
+ * or up". Otherwise we only re-enable health checks. There's a special
+ * case associated to the stopping state which can be inherited. Note
+ * that the server might still be in drain mode, which is naturally dealt
+ * with by the lower level functions.
+ */
+ if (s->check.state & CHK_ST_ENABLED) {
+ s->check.state &= ~CHK_ST_PAUSED;
+ s->check.health = s->check.rise; /* start OK but check immediately */
+ }
+
+ if ((!s->track || s->track->next_state != SRV_ST_STOPPED) &&
+ (!(s->agent.state & CHK_ST_ENABLED) || (s->agent.health >= s->agent.rise)) &&
+ (!(s->check.state & CHK_ST_ENABLED) || (s->check.health >= s->check.rise))) {
+ if (s->track && s->track->next_state == SRV_ST_STOPPING) {
+ s->next_state = SRV_ST_STOPPING;
+ }
+ else {
+ s->next_state = SRV_ST_STARTING;
+ if (s->slowstart > 0) {
+ if (s->warmup)
+ task_schedule(s->warmup, tick_add(now_ms, MS_TO_TICKS(MAX(1000, s->slowstart / 20))));
+ }
+ else
+ s->next_state = SRV_ST_RUNNING;
+ }
+
+ }
+
+ tmptrash = alloc_trash_chunk();
+ if (tmptrash) {
+ if (!(s->next_admin & SRV_ADMF_FMAINT) && (s->cur_admin & SRV_ADMF_FMAINT)) {
+ chunk_printf(tmptrash,
+ "%sServer %s/%s is %s/%s (leaving forced maintenance)",
+ s->flags & SRV_F_BACKUP ? "Backup " : "",
+ s->proxy->id, s->id,
+ (s->next_state == SRV_ST_STOPPED) ? "DOWN" : "UP",
+ (s->next_admin & SRV_ADMF_DRAIN) ? "DRAIN" : "READY");
+ }
+ if (!(s->next_admin & SRV_ADMF_RMAINT) && (s->cur_admin & SRV_ADMF_RMAINT)) {
+ chunk_printf(tmptrash,
+ "%sServer %s/%s ('%s') is %s/%s (resolves again)",
+ s->flags & SRV_F_BACKUP ? "Backup " : "",
+ s->proxy->id, s->id, s->hostname,
+ (s->next_state == SRV_ST_STOPPED) ? "DOWN" : "UP",
+ (s->next_admin & SRV_ADMF_DRAIN) ? "DRAIN" : "READY");
+ }
+ if (!(s->next_admin & SRV_ADMF_IMAINT) && (s->cur_admin & SRV_ADMF_IMAINT)) {
+ chunk_printf(tmptrash,
+ "%sServer %s/%s is %s/%s (leaving maintenance)",
+ s->flags & SRV_F_BACKUP ? "Backup " : "",
+ s->proxy->id, s->id,
+ (s->next_state == SRV_ST_STOPPED) ? "DOWN" : "UP",
+ (s->next_admin & SRV_ADMF_DRAIN) ? "DRAIN" : "READY");
+ }
+ ha_warning("%s.\n", tmptrash->area);
+ send_log(s->proxy, LOG_NOTICE, "%s.\n",
+ tmptrash->area);
+ free_trash_chunk(tmptrash);
+ }
+
+ server_recalc_eweight(s, 0);
+ /* now propagate the status change to any LB algorithms */
+ srv_lb_propagate(s);
+
+ /* If the server is set with "on-marked-up shutdown-backup-sessions",
+ * and it's not a backup server and its effective weight is > 0,
+ * then it can accept new connections, so we shut down all streams
+ * on all backup servers.
+ */
+ if ((s->onmarkedup & HANA_ONMARKEDUP_SHUTDOWNBACKUPSESSIONS) &&
+ !(s->flags & SRV_F_BACKUP) && s->next_eweight)
+ srv_shutdown_backup_streams(s->proxy, SF_ERR_UP);
+
+ /* check if we can handle some connections queued at the proxy. We
+ * will take as many as we can handle.
+ */
+ xferred = pendconn_grab_from_px(s);
+ }
+ else if (s->next_admin & SRV_ADMF_MAINT) {
+ /* remaining in maintenance mode, let's inform precisely about the
+ * situation.
+ */
+ if (!(s->next_admin & SRV_ADMF_FMAINT) && (s->cur_admin & SRV_ADMF_FMAINT)) {
+ tmptrash = alloc_trash_chunk();
+ if (tmptrash) {
+ chunk_printf(tmptrash,
+ "%sServer %s/%s is leaving forced maintenance but remains in maintenance",
+ s->flags & SRV_F_BACKUP ? "Backup " : "",
+ s->proxy->id, s->id);
+
+ if (s->track) /* normally it's mandatory here */
+ chunk_appendf(tmptrash, " via %s/%s",
+ s->track->proxy->id, s->track->id);
+ ha_warning("%s.\n", tmptrash->area);
+ send_log(s->proxy, LOG_NOTICE, "%s.\n",
+ tmptrash->area);
+ free_trash_chunk(tmptrash);
+ }
+ }
+ if (!(s->next_admin & SRV_ADMF_RMAINT) && (s->cur_admin & SRV_ADMF_RMAINT)) {
+ tmptrash = alloc_trash_chunk();
+ if (tmptrash) {
+ chunk_printf(tmptrash,
+ "%sServer %s/%s ('%s') resolves again but remains in maintenance",
+ s->flags & SRV_F_BACKUP ? "Backup " : "",
+ s->proxy->id, s->id, s->hostname);
+
+ if (s->track) /* normally it's mandatory here */
+ chunk_appendf(tmptrash, " via %s/%s",
+ s->track->proxy->id, s->track->id);
+ ha_warning("%s.\n", tmptrash->area);
+ send_log(s->proxy, LOG_NOTICE, "%s.\n",
+ tmptrash->area);
+ free_trash_chunk(tmptrash);
+ }
+ }
+ else if (!(s->next_admin & SRV_ADMF_IMAINT) && (s->cur_admin & SRV_ADMF_IMAINT)) {
+ tmptrash = alloc_trash_chunk();
+ if (tmptrash) {
+ chunk_printf(tmptrash,
+ "%sServer %s/%s remains in forced maintenance",
+ s->flags & SRV_F_BACKUP ? "Backup " : "",
+ s->proxy->id, s->id);
+ ha_warning("%s.\n", tmptrash->area);
+ send_log(s->proxy, LOG_NOTICE, "%s.\n",
+ tmptrash->area);
+ free_trash_chunk(tmptrash);
+ }
+ }
+ /* don't report anything when leaving drain mode and remaining in maintenance */
+ }
+
+ if (!(s->next_admin & SRV_ADMF_MAINT)) {
+ if (!(s->cur_admin & SRV_ADMF_DRAIN) && (s->next_admin & SRV_ADMF_DRAIN)) {
+ /* drain state is applied only if not yet in maint */
+
+ srv_lb_propagate(s);
+
+ /* we might have streams queued on this server and waiting for
+ * a connection. Those which are redispatchable will be queued
+ * to another server or to the proxy itself.
+ */
+ xferred = pendconn_redistribute(s);
+
+ tmptrash = alloc_trash_chunk();
+ if (tmptrash) {
+ chunk_printf(tmptrash, "%sServer %s/%s enters drain state",
+ s->flags & SRV_F_BACKUP ? "Backup " : "", s->proxy->id, s->id);
+ srv_append_adm_chg_cause(tmptrash, s, cause);
+ srv_append_more(tmptrash, s, xferred, (s->next_admin & SRV_ADMF_FDRAIN));
+
+ if (!(global.mode & MODE_STARTING)) {
+ ha_warning("%s.\n", tmptrash->area);
+ send_log(s->proxy, LOG_NOTICE, "%s.\n",
+ tmptrash->area);
+ send_email_alert(s, LOG_NOTICE, "%s",
+ tmptrash->area);
+ }
+ free_trash_chunk(tmptrash);
+ }
+ }
+ else if ((s->cur_admin & SRV_ADMF_DRAIN) && !(s->next_admin & SRV_ADMF_DRAIN)) {
+ /* OK completely leaving drain mode */
+ server_recalc_eweight(s, 0);
+
+ tmptrash = alloc_trash_chunk();
+ if (tmptrash) {
+ if (s->cur_admin & SRV_ADMF_FDRAIN) {
+ chunk_printf(tmptrash,
+ "%sServer %s/%s is %s (leaving forced drain)",
+ s->flags & SRV_F_BACKUP ? "Backup " : "",
+ s->proxy->id, s->id,
+ (s->next_state == SRV_ST_STOPPED) ? "DOWN" : "UP");
+ }
+ else {
+ chunk_printf(tmptrash,
+ "%sServer %s/%s is %s (leaving drain)",
+ s->flags & SRV_F_BACKUP ? "Backup " : "",
+ s->proxy->id, s->id,
+ (s->next_state == SRV_ST_STOPPED) ? "DOWN" : "UP");
+ if (s->track) /* normally it's mandatory here */
+ chunk_appendf(tmptrash, " via %s/%s",
+ s->track->proxy->id, s->track->id);
+ }
+
+ ha_warning("%s.\n", tmptrash->area);
+ send_log(s->proxy, LOG_NOTICE, "%s.\n",
+ tmptrash->area);
+ free_trash_chunk(tmptrash);
+ }
+
+ /* now propagate the status change to any LB algorithms */
+ srv_lb_propagate(s);
+ }
+ else if ((s->next_admin & SRV_ADMF_DRAIN)) {
+ /* remaining in drain mode after removing one of its flags */
+
+ tmptrash = alloc_trash_chunk();
+ if (tmptrash) {
+ if (!(s->next_admin & SRV_ADMF_FDRAIN)) {
+ chunk_printf(tmptrash,
+ "%sServer %s/%s remains in drain mode",
+ s->flags & SRV_F_BACKUP ? "Backup " : "",
+ s->proxy->id, s->id);
+
+ if (s->track) /* normally it's mandatory here */
+ chunk_appendf(tmptrash, " via %s/%s",
+ s->track->proxy->id, s->track->id);
+ }
+ else {
+ chunk_printf(tmptrash,
+ "%sServer %s/%s remains in forced drain mode",
+ s->flags & SRV_F_BACKUP ? "Backup " : "",
+ s->proxy->id, s->id);
+ }
+ ha_warning("%s.\n", tmptrash->area);
+ send_log(s->proxy, LOG_NOTICE, "%s.\n",
+ tmptrash->area);
+ free_trash_chunk(tmptrash);
+ }
+ }
+ }
+ return xferred;
+}
+
+/*
+ * This function applies server's status changes.
+ *
+ * Must be called with the server lock held. This may also be called at init
+ * time as the result of parsing the state file, in which case no lock will be
+ * held, and the server's warmup task can be null.
+ * <type> should be 0 for operational and 1 for administrative
+ * <cause> must be srv_op_st_chg_cause enum for operational and
+ * srv_adm_st_chg_cause enum for administrative
+ */
+static void srv_update_status(struct server *s, int type, int cause)
+{
+ int prev_srv_count = s->proxy->srv_bck + s->proxy->srv_act;
+ enum srv_state srv_prev_state = s->cur_state;
+ union {
+ struct event_hdl_cb_data_server_state state;
+ struct event_hdl_cb_data_server_admin admin;
+ struct event_hdl_cb_data_server common;
+ } cb_data;
+ int requeued;
+
+ /* prepare common server event data */
+ _srv_event_hdl_prepare(&cb_data.common, s, 0);
+
+ if (type) {
+ cb_data.admin.safe.cause = cause;
+ cb_data.admin.safe.old_admin = s->cur_admin;
+ cb_data.admin.safe.new_admin = s->next_admin;
+ requeued = _srv_update_status_adm(s, cause);
+ cb_data.admin.safe.requeued = requeued;
+ /* publish admin change */
+ _srv_event_hdl_publish(EVENT_HDL_SUB_SERVER_ADMIN, cb_data.admin, s);
+ }
+ else
+ requeued = _srv_update_status_op(s, cause);
+
+ /* explicitly commit state changes (even if it was already applied implicitly
+ * by some lb state change function), so we don't miss anything
+ */
+ srv_lb_commit_status(s);
+
+ /* check if server stats must be updated due the the server state change */
+ if (srv_prev_state != s->cur_state) {
+ if (srv_prev_state == SRV_ST_STOPPED) {
+ /* server was down and no longer is */
+ if (s->last_change < ns_to_sec(now_ns)) // ignore negative times
+ s->down_time += ns_to_sec(now_ns) - s->last_change;
+ _srv_event_hdl_publish(EVENT_HDL_SUB_SERVER_UP, cb_data.common, s);
+ }
+ else if (s->cur_state == SRV_ST_STOPPED) {
+ /* server was up and is currently down */
+ s->counters.down_trans++;
+ _srv_event_hdl_publish(EVENT_HDL_SUB_SERVER_DOWN, cb_data.common, s);
+ }
+ s->last_change = ns_to_sec(now_ns);
+
+ /* publish the state change */
+ _srv_event_hdl_prepare_state(&cb_data.state,
+ s, type, cause, srv_prev_state, requeued);
+ _srv_event_hdl_publish(EVENT_HDL_SUB_SERVER_STATE, cb_data.state, s);
+ }
+
+ /* check if backend stats must be updated due to the server state change */
+ if (prev_srv_count && s->proxy->srv_bck == 0 && s->proxy->srv_act == 0)
+ set_backend_down(s->proxy); /* backend going down */
+ else if (!prev_srv_count && (s->proxy->srv_bck || s->proxy->srv_act)) {
+ /* backend was down and is back up again:
+ * no helper function, updating last_change and backend downtime stats
+ */
+ if (s->proxy->last_change < ns_to_sec(now_ns)) // ignore negative times
+ s->proxy->down_time += ns_to_sec(now_ns) - s->proxy->last_change;
+ s->proxy->last_change = ns_to_sec(now_ns);
+ }
+}
+
+struct task *srv_cleanup_toremove_conns(struct task *task, void *context, unsigned int state)
+{
+ struct connection *conn;
+
+ while ((conn = MT_LIST_POP(&idle_conns[tid].toremove_conns,
+ struct connection *, toremove_list)) != NULL) {
+ conn->mux->destroy(conn->ctx);
+ }
+
+ return task;
+}
+
+/* Move <toremove_nb> count connections from <list> storage to <toremove_list>
+ * list storage. -1 means moving all of them.
+ *
+ * Returns the number of connections moved.
+ *
+ * Must be called with idle_conns_lock held.
+ */
+static int srv_migrate_conns_to_remove(struct list *list, struct mt_list *toremove_list, int toremove_nb)
+{
+ struct connection *conn;
+ int i = 0;
+
+ while (!LIST_ISEMPTY(list)) {
+ if (toremove_nb != -1 && i >= toremove_nb)
+ break;
+
+ conn = LIST_ELEM(list->n, struct connection *, idle_list);
+ conn_delete_from_tree(conn);
+ MT_LIST_APPEND(toremove_list, &conn->toremove_list);
+ i++;
+ }
+
+ return i;
+}
+/* cleanup connections for a given server
+ * might be useful when going on forced maintenance or live changing ip/port
+ */
+static void srv_cleanup_connections(struct server *srv)
+{
+ int did_remove;
+ int i;
+
+ /* nothing to do if pool-max-conn is null */
+ if (!srv->max_idle_conns)
+ return;
+
+ /* check all threads starting with ours */
+ for (i = tid;;) {
+ did_remove = 0;
+ HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[i].idle_conns_lock);
+ if (srv_migrate_conns_to_remove(&srv->per_thr[i].idle_conn_list, &idle_conns[i].toremove_conns, -1) > 0)
+ did_remove = 1;
+ HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[i].idle_conns_lock);
+ if (did_remove)
+ task_wakeup(idle_conns[i].cleanup_task, TASK_WOKEN_OTHER);
+
+ if ((i = ((i + 1 == global.nbthread) ? 0 : i + 1)) == tid)
+ break;
+ }
+}
+
+/* removes an idle conn after updating the server idle conns counters */
+void srv_release_conn(struct server *srv, struct connection *conn)
+{
+ if (conn->flags & CO_FL_LIST_MASK) {
+ /* The connection is currently in the server's idle list, so tell it
+ * there's one less connection available in that list.
+ */
+ _HA_ATOMIC_DEC(&srv->curr_idle_conns);
+ _HA_ATOMIC_DEC(conn->flags & CO_FL_SAFE_LIST ? &srv->curr_safe_nb : &srv->curr_idle_nb);
+ _HA_ATOMIC_DEC(&srv->curr_idle_thr[tid]);
+ }
+ else {
+ /* The connection is not private and not in any server's idle
+ * list, so decrement the current number of used connections
+ */
+ _HA_ATOMIC_DEC(&srv->curr_used_conns);
+ }
+
+ /* Remove the connection from any tree (safe, idle or available) */
+ if (conn->hash_node) {
+ HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
+ conn_delete_from_tree(conn);
+ conn->flags &= ~CO_FL_LIST_MASK;
+ HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
+ }
+}
+
+/* retrieve a connection from its <hash> in <tree>
+ * returns NULL if no connection found
+ */
+struct connection *srv_lookup_conn(struct eb_root *tree, uint64_t hash)
+{
+ struct eb64_node *node = NULL;
+ struct connection *conn = NULL;
+ struct conn_hash_node *hash_node = NULL;
+
+ node = eb64_lookup(tree, hash);
+ if (node) {
+ hash_node = ebmb_entry(node, struct conn_hash_node, node);
+ conn = hash_node->conn;
+ }
+
+ return conn;
+}
+
+/* retrieve the next connection sharing the same hash as <conn>
+ * returns NULL if no connection found
+ */
+struct connection *srv_lookup_conn_next(struct connection *conn)
+{
+ struct eb64_node *node = NULL;
+ struct connection *next_conn = NULL;
+ struct conn_hash_node *hash_node = NULL;
+
+ node = eb64_next_dup(&conn->hash_node->node);
+ if (node) {
+ hash_node = eb64_entry(node, struct conn_hash_node, node);
+ next_conn = hash_node->conn;
+ }
+
+ return next_conn;
+}
+
+/* Add <conn> in <srv> idle trees. Set <is_safe> if connection is deemed safe
+ * for reuse.
+ *
+ * This function is a simple wrapper for tree insert. It should only be used
+ * for internal usage or when removing briefly the connection to avoid takeover
+ * on it before reinserting it with this function. In other context, prefer to
+ * use the full feature srv_add_to_idle_list().
+ *
+ * Must be called with idle_conns_lock.
+ */
+void _srv_add_idle(struct server *srv, struct connection *conn, int is_safe)
+{
+ struct eb_root *tree = is_safe ? &srv->per_thr[tid].safe_conns :
+ &srv->per_thr[tid].idle_conns;
+
+ /* first insert in idle or safe tree. */
+ eb64_insert(tree, &conn->hash_node->node);
+
+ /* insert in list sorted by connection usage. */
+ LIST_APPEND(&srv->per_thr[tid].idle_conn_list, &conn->idle_list);
+}
+
+/* This adds an idle connection to the server's list if the connection is
+ * reusable, not held by any owner anymore, but still has available streams.
+ */
+int srv_add_to_idle_list(struct server *srv, struct connection *conn, int is_safe)
+{
+ /* we try to keep the connection in the server's idle list
+ * if we don't have too many FD in use, and if the number of
+ * idle+current conns is lower than what was observed before
+ * last purge, or if we already don't have idle conns for the
+ * current thread and we don't exceed last count by global.nbthread.
+ */
+ if (!(conn->flags & CO_FL_PRIVATE) &&
+ srv && srv->pool_purge_delay > 0 &&
+ ((srv->proxy->options & PR_O_REUSE_MASK) != PR_O_REUSE_NEVR) &&
+ ha_used_fds < global.tune.pool_high_count &&
+ (srv->max_idle_conns == -1 || srv->max_idle_conns > srv->curr_idle_conns) &&
+ ((eb_is_empty(&srv->per_thr[tid].safe_conns) &&
+ (is_safe || eb_is_empty(&srv->per_thr[tid].idle_conns))) ||
+ (ha_used_fds < global.tune.pool_low_count &&
+ (srv->curr_used_conns + srv->curr_idle_conns <=
+ MAX(srv->curr_used_conns, srv->est_need_conns) + srv->low_idle_conns ||
+ (conn->flags & CO_FL_REVERSED)))) &&
+ !conn->mux->used_streams(conn) && conn->mux->avail_streams(conn)) {
+ int retadd;
+
+ retadd = _HA_ATOMIC_ADD_FETCH(&srv->curr_idle_conns, 1);
+ if (retadd > srv->max_idle_conns) {
+ _HA_ATOMIC_DEC(&srv->curr_idle_conns);
+ return 0;
+ }
+ _HA_ATOMIC_DEC(&srv->curr_used_conns);
+
+ HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
+ conn_delete_from_tree(conn);
+
+ if (is_safe) {
+ conn->flags = (conn->flags & ~CO_FL_LIST_MASK) | CO_FL_SAFE_LIST;
+ _srv_add_idle(srv, conn, 1);
+ _HA_ATOMIC_INC(&srv->curr_safe_nb);
+ } else {
+ conn->flags = (conn->flags & ~CO_FL_LIST_MASK) | CO_FL_IDLE_LIST;
+ _srv_add_idle(srv, conn, 0);
+ _HA_ATOMIC_INC(&srv->curr_idle_nb);
+ }
+ HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
+ _HA_ATOMIC_INC(&srv->curr_idle_thr[tid]);
+
+ __ha_barrier_full();
+ if ((volatile void *)srv->idle_node.node.leaf_p == NULL) {
+ HA_SPIN_LOCK(OTHER_LOCK, &idle_conn_srv_lock);
+ if ((volatile void *)srv->idle_node.node.leaf_p == NULL) {
+ srv->idle_node.key = tick_add(srv->pool_purge_delay,
+ now_ms);
+ eb32_insert(&idle_conn_srv, &srv->idle_node);
+ if (!task_in_wq(idle_conn_task) && !
+ task_in_rq(idle_conn_task)) {
+ task_schedule(idle_conn_task,
+ srv->idle_node.key);
+ }
+
+ }
+ HA_SPIN_UNLOCK(OTHER_LOCK, &idle_conn_srv_lock);
+ }
+ return 1;
+ }
+ return 0;
+}
+
+/* Insert <conn> connection in <srv> server available list. This is reserved
+ * for backend connection currently in used with usable streams left.
+ */
+void srv_add_to_avail_list(struct server *srv, struct connection *conn)
+{
+ /* connection cannot be in idle list if used as an avail idle conn. */
+ BUG_ON(LIST_INLIST(&conn->idle_list));
+ eb64_insert(&srv->per_thr[tid].avail_conns, &conn->hash_node->node);
+}
+
+struct task *srv_cleanup_idle_conns(struct task *task, void *context, unsigned int state)
+{
+ struct server *srv;
+ struct eb32_node *eb;
+ int i;
+ unsigned int next_wakeup;
+
+ next_wakeup = TICK_ETERNITY;
+ HA_SPIN_LOCK(OTHER_LOCK, &idle_conn_srv_lock);
+ while (1) {
+ int exceed_conns;
+ int to_kill;
+ int curr_idle;
+
+ eb = eb32_lookup_ge(&idle_conn_srv, now_ms - TIMER_LOOK_BACK);
+ if (!eb) {
+ /* we might have reached the end of the tree, typically because
+ * <now_ms> is in the first half and we're first scanning the last
+ * half. Let's loop back to the beginning of the tree now.
+ */
+
+ eb = eb32_first(&idle_conn_srv);
+ if (likely(!eb))
+ break;
+ }
+ if (tick_is_lt(now_ms, eb->key)) {
+ /* timer not expired yet, revisit it later */
+ next_wakeup = eb->key;
+ break;
+ }
+ srv = eb32_entry(eb, struct server, idle_node);
+
+ /* Calculate how many idle connections we want to kill :
+ * we want to remove half the difference between the total
+ * of established connections (used or idle) and the max
+ * number of used connections.
+ */
+ curr_idle = srv->curr_idle_conns;
+ if (curr_idle == 0)
+ goto remove;
+ exceed_conns = srv->curr_used_conns + curr_idle - MAX(srv->max_used_conns, srv->est_need_conns);
+ exceed_conns = to_kill = exceed_conns / 2 + (exceed_conns & 1);
+
+ srv->est_need_conns = (srv->est_need_conns + srv->max_used_conns) / 2;
+ if (srv->est_need_conns < srv->max_used_conns)
+ srv->est_need_conns = srv->max_used_conns;
+
+ HA_ATOMIC_STORE(&srv->max_used_conns, srv->curr_used_conns);
+
+ if (exceed_conns <= 0)
+ goto remove;
+
+ /* check all threads starting with ours */
+ for (i = tid;;) {
+ int max_conn;
+ int j;
+ int did_remove = 0;
+
+ max_conn = (exceed_conns * srv->curr_idle_thr[i]) /
+ curr_idle + 1;
+
+ HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[i].idle_conns_lock);
+ j = srv_migrate_conns_to_remove(&srv->per_thr[i].idle_conn_list, &idle_conns[i].toremove_conns, max_conn);
+ if (j > 0)
+ did_remove = 1;
+ HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[i].idle_conns_lock);
+
+ if (did_remove)
+ task_wakeup(idle_conns[i].cleanup_task, TASK_WOKEN_OTHER);
+
+ if ((i = ((i + 1 == global.nbthread) ? 0 : i + 1)) == tid)
+ break;
+ }
+remove:
+ eb32_delete(&srv->idle_node);
+
+ if (srv->curr_idle_conns) {
+ /* There are still more idle connections, add the
+ * server back in the tree.
+ */
+ srv->idle_node.key = tick_add(srv->pool_purge_delay, now_ms);
+ eb32_insert(&idle_conn_srv, &srv->idle_node);
+ next_wakeup = tick_first(next_wakeup, srv->idle_node.key);
+ }
+ }
+ HA_SPIN_UNLOCK(OTHER_LOCK, &idle_conn_srv_lock);
+
+ task->expire = next_wakeup;
+ return task;
+}
+
+/* Close remaining idle connections. This functions is designed to be run on
+ * process shutdown. This guarantees a proper socket shutdown to avoid
+ * TIME_WAIT state. For a quick operation, only ctrl is closed, xprt stack is
+ * bypassed.
+ *
+ * This function is not thread-safe so it must only be called via a global
+ * deinit function.
+ */
+static void srv_close_idle_conns(struct server *srv)
+{
+ struct eb_root **cleaned_tree;
+ int i;
+
+ for (i = 0; i < global.nbthread; ++i) {
+ struct eb_root *conn_trees[] = {
+ &srv->per_thr[i].idle_conns,
+ &srv->per_thr[i].safe_conns,
+ &srv->per_thr[i].avail_conns,
+ NULL
+ };
+
+ for (cleaned_tree = conn_trees; *cleaned_tree; ++cleaned_tree) {
+ while (!eb_is_empty(*cleaned_tree)) {
+ struct ebmb_node *node = ebmb_first(*cleaned_tree);
+ struct conn_hash_node *conn_hash_node = ebmb_entry(node, struct conn_hash_node, node);
+ struct connection *conn = conn_hash_node->conn;
+
+ if (conn->ctrl->ctrl_close)
+ conn->ctrl->ctrl_close(conn);
+ conn_delete_from_tree(conn);
+ }
+ }
+ }
+}
+
+REGISTER_SERVER_DEINIT(srv_close_idle_conns);
+
+/* config parser for global "tune.idle-pool.shared", accepts "on" or "off" */
+static int cfg_parse_idle_pool_shared(char **args, int section_type, struct proxy *curpx,
+ const struct proxy *defpx, const char *file, int line,
+ char **err)
+{
+ if (too_many_args(1, args, err, NULL))
+ return -1;
+
+ if (strcmp(args[1], "on") == 0)
+ global.tune.options |= GTUNE_IDLE_POOL_SHARED;
+ else if (strcmp(args[1], "off") == 0)
+ global.tune.options &= ~GTUNE_IDLE_POOL_SHARED;
+ else {
+ memprintf(err, "'%s' expects either 'on' or 'off' but got '%s'.", args[0], args[1]);
+ return -1;
+ }
+ return 0;
+}
+
+/* config parser for global "tune.pool-{low,high}-fd-ratio" */
+static int cfg_parse_pool_fd_ratio(char **args, int section_type, struct proxy *curpx,
+ const struct proxy *defpx, const char *file, int line,
+ char **err)
+{
+ int arg = -1;
+
+ if (too_many_args(1, args, err, NULL))
+ return -1;
+
+ if (*(args[1]) != 0)
+ arg = atoi(args[1]);
+
+ if (arg < 0 || arg > 100) {
+ memprintf(err, "'%s' expects an integer argument between 0 and 100.", args[0]);
+ return -1;
+ }
+
+ if (args[0][10] == 'h')
+ global.tune.pool_high_ratio = arg;
+ else
+ global.tune.pool_low_ratio = arg;
+ return 0;
+}
+
+/* config keyword parsers */
+static struct cfg_kw_list cfg_kws = {ILH, {
+ { CFG_GLOBAL, "tune.idle-pool.shared", cfg_parse_idle_pool_shared },
+ { CFG_GLOBAL, "tune.pool-high-fd-ratio", cfg_parse_pool_fd_ratio },
+ { CFG_GLOBAL, "tune.pool-low-fd-ratio", cfg_parse_pool_fd_ratio },
+ { 0, NULL, NULL }
+}};
+
+INITCALL1(STG_REGISTER, cfg_register_keywords, &cfg_kws);
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/src/server_state.c b/src/server_state.c
new file mode 100644
index 0000000..ebdcf3c
--- /dev/null
+++ b/src/server_state.c
@@ -0,0 +1,947 @@
+/*
+ * Server-state management functions.
+ *
+ * Copyright (C) 2021 HAProxy Technologies, Christopher Faulet <cfaulet@haproxy.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <errno.h>
+
+#include <import/eb64tree.h>
+#include <import/ebistree.h>
+
+#include <haproxy/api.h>
+#include <haproxy/backend.h>
+#include <haproxy/cfgparse.h>
+#include <haproxy/check.h>
+#include <haproxy/errors.h>
+#include <haproxy/global.h>
+#include <haproxy/log.h>
+#include <haproxy/port_range.h>
+#include <haproxy/proxy.h>
+#include <haproxy/resolvers.h>
+#include <haproxy/server.h>
+#include <haproxy/tools.h>
+#include <haproxy/xxhash.h>
+
+
+/* Update a server state using the parameters available in the params list.
+ * The caller must provide a supported version
+ * Grabs the server lock during operation.
+ */
+static void srv_state_srv_update(struct server *srv, int version, char **params)
+{
+ char *p;
+ struct buffer *msg;
+ const char *warning;
+
+ /* fields since version 1
+ * and common to all other upcoming versions
+ */
+ enum srv_state srv_op_state;
+ enum srv_admin srv_admin_state;
+ unsigned srv_uweight, srv_iweight;
+ unsigned long srv_last_time_change;
+ short srv_check_status;
+ enum chk_result srv_check_result;
+ int srv_check_health;
+ int srv_check_state, srv_agent_state;
+ int bk_f_forced_id;
+ int srv_f_forced_id;
+ int fqdn_set_by_cli;
+ const char *fqdn;
+ const char *port_st;
+ unsigned int port_svc;
+ char *srvrecord;
+ char *addr;
+ int partial_apply = 0;
+#ifdef USE_OPENSSL
+ int use_ssl;
+#endif
+
+ fqdn = NULL;
+ port_svc = 0;
+ msg = alloc_trash_chunk();
+ if (!msg)
+ goto end;
+
+ HA_SPIN_LOCK(SERVER_LOCK, &srv->lock);
+
+ /* Only version 1 supported for now, don't check it. Fields are :
+ * srv_addr: params[0]
+ * srv_op_state: params[1]
+ * srv_admin_state: params[2]
+ * srv_uweight: params[3]
+ * srv_iweight: params[4]
+ * srv_last_time_change: params[5]
+ * srv_check_status: params[6]
+ * srv_check_result: params[7]
+ * srv_check_health: params[8]
+ * srv_check_state: params[9]
+ * srv_agent_state: params[10]
+ * bk_f_forced_id: params[11]
+ * srv_f_forced_id: params[12]
+ * srv_fqdn: params[13]
+ * srv_port: params[14]
+ * srvrecord: params[15]
+ * srv_use_ssl: params[16]
+ * srv_check_port: params[17]
+ * srv_check_addr: params[18]
+ * srv_agent_addr: params[19]
+ * srv_agent_port: params[20]
+ */
+
+ /* validating srv_op_state */
+ p = NULL;
+ errno = 0;
+ srv_op_state = strtol(params[1], &p, 10);
+ if ((p == params[1]) || errno == EINVAL || errno == ERANGE ||
+ (srv_op_state != SRV_ST_STOPPED &&
+ srv_op_state != SRV_ST_STARTING &&
+ srv_op_state != SRV_ST_RUNNING &&
+ srv_op_state != SRV_ST_STOPPING)) {
+ chunk_appendf(msg, ", invalid srv_op_state value '%s'", params[1]);
+ }
+
+ /* validating srv_admin_state */
+ p = NULL;
+ errno = 0;
+ srv_admin_state = strtol(params[2], &p, 10);
+ fqdn_set_by_cli = !!(srv_admin_state & SRV_ADMF_HMAINT);
+
+ /* inherited statuses will be recomputed later.
+ * Also disable SRV_ADMF_HMAINT flag (set from stats socket fqdn).
+ */
+ srv_admin_state &= ~SRV_ADMF_IDRAIN & ~SRV_ADMF_IMAINT & ~SRV_ADMF_HMAINT & ~SRV_ADMF_RMAINT;
+
+ if ((p == params[2]) || errno == EINVAL || errno == ERANGE ||
+ (srv_admin_state != 0 &&
+ srv_admin_state != SRV_ADMF_FMAINT &&
+ srv_admin_state != SRV_ADMF_CMAINT &&
+ srv_admin_state != (SRV_ADMF_CMAINT | SRV_ADMF_FMAINT) &&
+ srv_admin_state != (SRV_ADMF_CMAINT | SRV_ADMF_FDRAIN) &&
+ srv_admin_state != SRV_ADMF_FDRAIN)) {
+ chunk_appendf(msg, ", invalid srv_admin_state value '%s'", params[2]);
+ }
+
+ /* validating srv_uweight */
+ p = NULL;
+ errno = 0;
+ srv_uweight = strtol(params[3], &p, 10);
+ if ((p == params[3]) || errno == EINVAL || errno == ERANGE || (srv_uweight > SRV_UWGHT_MAX))
+ chunk_appendf(msg, ", invalid srv_uweight value '%s'", params[3]);
+
+ /* validating srv_iweight */
+ p = NULL;
+ errno = 0;
+ srv_iweight = strtol(params[4], &p, 10);
+ if ((p == params[4]) || errno == EINVAL || errno == ERANGE || (srv_iweight > SRV_UWGHT_MAX))
+ chunk_appendf(msg, ", invalid srv_iweight value '%s'", params[4]);
+
+ /* validating srv_last_time_change */
+ p = NULL;
+ errno = 0;
+ srv_last_time_change = strtol(params[5], &p, 10);
+ if ((p == params[5]) || errno == EINVAL || errno == ERANGE)
+ chunk_appendf(msg, ", invalid srv_last_time_change value '%s'", params[5]);
+
+ /* validating srv_check_status */
+ p = NULL;
+ errno = 0;
+ srv_check_status = strtol(params[6], &p, 10);
+ if (p == params[6] || errno == EINVAL || errno == ERANGE ||
+ (srv_check_status >= HCHK_STATUS_SIZE))
+ chunk_appendf(msg, ", invalid srv_check_status value '%s'", params[6]);
+
+ /* validating srv_check_result */
+ p = NULL;
+ errno = 0;
+ srv_check_result = strtol(params[7], &p, 10);
+ if ((p == params[7]) || errno == EINVAL || errno == ERANGE ||
+ (srv_check_result != CHK_RES_UNKNOWN &&
+ srv_check_result != CHK_RES_NEUTRAL &&
+ srv_check_result != CHK_RES_FAILED &&
+ srv_check_result != CHK_RES_PASSED &&
+ srv_check_result != CHK_RES_CONDPASS)) {
+ chunk_appendf(msg, ", invalid srv_check_result value '%s'", params[7]);
+ }
+
+ /* validating srv_check_health */
+ p = NULL;
+ errno = 0;
+ srv_check_health = strtol(params[8], &p, 10);
+ if (p == params[8] || errno == EINVAL || errno == ERANGE)
+ chunk_appendf(msg, ", invalid srv_check_health value '%s'", params[8]);
+
+ /* validating srv_check_state */
+ p = NULL;
+ errno = 0;
+ srv_check_state = strtol(params[9], &p, 10);
+ if (p == params[9] || errno == EINVAL || errno == ERANGE ||
+ (srv_check_state & ~(CHK_ST_INPROGRESS | CHK_ST_CONFIGURED | CHK_ST_ENABLED | CHK_ST_PAUSED | CHK_ST_AGENT)))
+ chunk_appendf(msg, ", invalid srv_check_state value '%s'", params[9]);
+
+ /* validating srv_agent_state */
+ p = NULL;
+ errno = 0;
+ srv_agent_state = strtol(params[10], &p, 10);
+ if (p == params[10] || errno == EINVAL || errno == ERANGE ||
+ (srv_agent_state & ~(CHK_ST_INPROGRESS | CHK_ST_CONFIGURED | CHK_ST_ENABLED | CHK_ST_PAUSED | CHK_ST_AGENT)))
+ chunk_appendf(msg, ", invalid srv_agent_state value '%s'", params[10]);
+
+ /* validating bk_f_forced_id */
+ p = NULL;
+ errno = 0;
+ bk_f_forced_id = strtol(params[11], &p, 10);
+ if (p == params[11] || errno == EINVAL || errno == ERANGE || !((bk_f_forced_id == 0) || (bk_f_forced_id == 1)))
+ chunk_appendf(msg, ", invalid bk_f_forced_id value '%s'", params[11]);
+
+ /* validating srv_f_forced_id */
+ p = NULL;
+ errno = 0;
+ srv_f_forced_id = strtol(params[12], &p, 10);
+ if (p == params[12] || errno == EINVAL || errno == ERANGE || !((srv_f_forced_id == 0) || (srv_f_forced_id == 1)))
+ chunk_appendf(msg, ", invalid srv_f_forced_id value '%s'", params[12]);
+
+ /* validating srv_fqdn */
+ fqdn = params[13];
+ if (fqdn && *fqdn == '-')
+ fqdn = NULL;
+ if (fqdn && (strlen(fqdn) > DNS_MAX_NAME_SIZE || invalid_domainchar(fqdn))) {
+ chunk_appendf(msg, ", invalid srv_fqdn value '%s'", params[13]);
+ fqdn = NULL;
+ }
+
+ port_st = params[14];
+ if (port_st) {
+ port_svc = strl2uic(port_st, strlen(port_st));
+ if (port_svc > USHRT_MAX) {
+ chunk_appendf(msg, ", invalid srv_port value '%s'", port_st);
+ port_st = NULL;
+ }
+ }
+
+ /* SRV record
+ * NOTE: in HAProxy, SRV records must start with an underscore '_'
+ */
+ srvrecord = params[15];
+ if (srvrecord && *srvrecord != '_')
+ srvrecord = NULL;
+
+ /* don't apply anything if one error has been detected */
+ if (msg->data)
+ goto out;
+ partial_apply = 1;
+
+ /* recover operational state and apply it to this server
+ * and all servers tracking this one */
+ srv->check.health = srv_check_health;
+ switch (srv_op_state) {
+ case SRV_ST_STOPPED:
+ srv->check.health = 0;
+ srv_set_stopped(srv, SRV_OP_STCHGC_STATEFILE);
+ break;
+ case SRV_ST_STARTING:
+ /* If rise == 1 there is no STARTING state, let's switch to
+ * RUNNING
+ */
+ if (srv->check.rise == 1) {
+ srv->check.health = srv->check.rise + srv->check.fall - 1;
+ srv_set_running(srv, SRV_OP_STCHGC_NONE);
+ break;
+ }
+ if (srv->check.health < 1 || srv->check.health >= srv->check.rise)
+ srv->check.health = srv->check.rise - 1;
+ srv->next_state = srv_op_state;
+ break;
+ case SRV_ST_STOPPING:
+ /* If fall == 1 there is no STOPPING state, let's switch to
+ * STOPPED
+ */
+ if (srv->check.fall == 1) {
+ srv->check.health = 0;
+ srv_set_stopped(srv, SRV_OP_STCHGC_STATEFILE);
+ break;
+ }
+ if (srv->check.health < srv->check.rise ||
+ srv->check.health > srv->check.rise + srv->check.fall - 2)
+ srv->check.health = srv->check.rise;
+ srv_set_stopping(srv, SRV_OP_STCHGC_STATEFILE);
+ break;
+ case SRV_ST_RUNNING:
+ srv->check.health = srv->check.rise + srv->check.fall - 1;
+ srv_set_running(srv, SRV_OP_STCHGC_NONE);
+ break;
+ }
+
+ /* When applying server state, the following rules apply:
+ * - in case of a configuration change, we apply the setting from the new
+ * configuration, regardless of old running state
+ * - if no configuration change, we apply old running state only if old running
+ * state is different from new configuration state
+ */
+ /* configuration has changed */
+ if ((srv_admin_state & SRV_ADMF_CMAINT) != (srv->next_admin & SRV_ADMF_CMAINT)) {
+ if (srv->next_admin & SRV_ADMF_CMAINT)
+ srv_adm_set_maint(srv);
+ else
+ srv_adm_set_ready(srv);
+ }
+ /* configuration is the same, let's compate old running state and new conf state */
+ else {
+ if (srv_admin_state & SRV_ADMF_FMAINT && !(srv->next_admin & SRV_ADMF_CMAINT))
+ srv_adm_set_maint(srv);
+ else if (!(srv_admin_state & SRV_ADMF_FMAINT) && (srv->next_admin & SRV_ADMF_CMAINT))
+ srv_adm_set_ready(srv);
+ }
+ /* apply drain mode if server is currently enabled */
+ if (!(srv->next_admin & SRV_ADMF_FMAINT) && (srv_admin_state & SRV_ADMF_FDRAIN)) {
+ /* The SRV_ADMF_FDRAIN flag is inherited when srv->iweight is 0
+ * (srv->iweight is the weight set up in configuration).
+ * There are two possible reasons for FDRAIN to have been present :
+ * - previous config weight was zero
+ * - "set server b/s drain" was sent to the CLI
+ *
+ * In the first case, we simply want to drop this drain state
+ * if the new weight is not zero anymore, meaning the administrator
+ * has intentionally turned the weight back to a positive value to
+ * enable the server again after an operation. In the second case,
+ * the drain state was forced on the CLI regardless of the config's
+ * weight so we don't want a change to the config weight to lose this
+ * status. What this means is :
+ * - if previous weight was 0 and new one is >0, drop the DRAIN state.
+ * - if the previous weight was >0, keep it.
+ */
+ if (srv_iweight > 0 || srv->iweight == 0)
+ srv_adm_set_drain(srv);
+ }
+
+ srv->last_change = ns_to_sec(now_ns) - srv_last_time_change;
+ srv->check.status = srv_check_status;
+ srv->check.result = srv_check_result;
+
+ /* Only case we want to apply is removing ENABLED flag which could have been
+ * done by the "disable health" command over the stats socket
+ */
+ if ((srv->check.state & CHK_ST_CONFIGURED) &&
+ (srv_check_state & CHK_ST_CONFIGURED) &&
+ !(srv_check_state & CHK_ST_ENABLED))
+ srv->check.state &= ~CHK_ST_ENABLED;
+
+ /* Only case we want to apply is removing ENABLED flag which could have been
+ * done by the "disable agent" command over the stats socket
+ */
+ if ((srv->agent.state & CHK_ST_CONFIGURED) &&
+ (srv_agent_state & CHK_ST_CONFIGURED) &&
+ !(srv_agent_state & CHK_ST_ENABLED))
+ srv->agent.state &= ~CHK_ST_ENABLED;
+
+ /* We want to apply the previous 'running' weight (srv_uweight) only if there
+ * was no change in the configuration: both previous and new iweight are equals
+ *
+ * It means that a configuration file change has precedence over a unix socket change
+ * for server's weight
+ *
+ * by default, HAProxy applies the following weight when parsing the configuration
+ * srv->uweight = srv->iweight
+ */
+ if (srv_iweight == srv->iweight) {
+ srv->uweight = srv_uweight;
+ }
+ server_recalc_eweight(srv, 1);
+
+ /* load server IP address */
+ if (strcmp(params[0], "-") != 0)
+ srv->lastaddr = strdup(params[0]);
+
+ if (fqdn && srv->hostname) {
+ if (strcmp(srv->hostname, fqdn) == 0) {
+ /* Here we reset the 'set from stats socket FQDN' flag
+ * to support such transitions:
+ * Let's say initial FQDN value is foo1 (in configuration file).
+ * - FQDN changed from stats socket, from foo1 to foo2 value,
+ * - FQDN changed again from file configuration (with the same previous value
+ set from stats socket, from foo1 to foo2 value),
+ * - reload for any other reason than a FQDN modification,
+ * the configuration file FQDN matches the fqdn server state file value.
+ * So we must reset the 'set from stats socket FQDN' flag to be consistent with
+ * any further FQDN modification.
+ */
+ srv->next_admin &= ~SRV_ADMF_HMAINT;
+ }
+ else {
+ /* If the FDQN has been changed from stats socket,
+ * apply fqdn state file value (which is the value set
+ * from stats socket).
+ * Also ensure the runtime resolver will process this resolution.
+ */
+ if (fqdn_set_by_cli) {
+ srv_set_fqdn(srv, fqdn, 0);
+ srv->flags &= ~SRV_F_NO_RESOLUTION;
+ srv->next_admin |= SRV_ADMF_HMAINT;
+ }
+ }
+ }
+ /* If all the conditions below are validated, this means
+ * we're evaluating a server managed by SRV resolution
+ */
+ else if (fqdn && !srv->hostname && srvrecord) {
+ int res;
+ int i;
+ char *tmp;
+
+ /* we can't apply previous state if SRV record has changed */
+ if (!srv->srvrq) {
+ chunk_appendf(msg, ", no SRV resolution for server '%s'. Previous state not applied", srv->id);
+ goto out;
+ }
+ if (strcmp(srv->srvrq->name, srvrecord) != 0) {
+ chunk_appendf(msg, ", SRV record mismatch between configuration ('%s') and state file ('%s) for server '%s'. Previous state not applied", srv->srvrq->name, srvrecord, srv->id);
+ goto out;
+ }
+
+ /* prepare DNS resolution for this server */
+ res = srv_prepare_for_resolution(srv, fqdn);
+ if (res == -1) {
+ chunk_appendf(msg, ", can't allocate memory for DNS resolution for server '%s'", srv->id);
+ goto out;
+ }
+
+ /* Remove from available list and insert in tree
+ * since this server has an hostname
+ */
+ LIST_DEL_INIT(&srv->srv_rec_item);
+ srv->host_dn.key = tmp = strdup(srv->hostname_dn);
+
+ /* convert the key in lowercase because tree
+ * lookup is case sensitive but we don't care
+ */
+ for (i = 0; tmp[i]; i++)
+ tmp[i] = tolower(tmp[i]);
+
+ /* insert in tree and set the srvrq expiration date */
+ ebis_insert(&srv->srvrq->named_servers, &srv->host_dn);
+ task_schedule(srv->srvrq_check, tick_add(now_ms, srv->srvrq->resolvers->hold.timeout));
+
+ /* Unset SRV_F_MAPPORTS for SRV records.
+ * SRV_F_MAPPORTS is unfortunately set by parse_server()
+ * because no ports are provided in the configuration file.
+ * This is because HAProxy will use the port found into the SRV record.
+ */
+ srv->flags &= ~SRV_F_MAPPORTS;
+ }
+
+ if (port_st)
+ srv->svc_port = port_svc;
+
+
+ if (params[16]) {
+#ifdef USE_OPENSSL
+ use_ssl = strtol(params[16], &p, 10);
+
+ /* configure ssl if connection has been initiated at startup */
+ if (srv->ssl_ctx.ctx != NULL)
+ srv_set_ssl(srv, use_ssl);
+#endif
+ }
+
+ port_st = NULL;
+ if (params[17] && strcmp(params[17], "0") != 0)
+ port_st = params[17];
+ addr = NULL;
+ if (params[18] && strcmp(params[18], "-") != 0)
+ addr = params[18];
+ if (addr || port_st) {
+ warning = srv_update_check_addr_port(srv, addr, port_st);
+ if (warning) {
+ chunk_appendf(msg, ", %s", warning);
+ goto out;
+ }
+ }
+
+ port_st = NULL;
+ if (params[20] && strcmp(params[20], "0") != 0)
+ port_st = params[20];
+ addr = NULL;
+ if (params[19] && strcmp(params[19], "-") != 0)
+ addr = params[19];
+ if (addr || port_st) {
+ warning = srv_update_agent_addr_port(srv, addr, port_st);
+ if (warning) {
+ chunk_appendf(msg, ", %s", warning);
+ goto out;
+ }
+ }
+
+ out:
+ HA_SPIN_UNLOCK(SERVER_LOCK, &srv->lock);
+ if (msg->data) {
+ if (partial_apply == 1)
+ ha_warning("server-state partially applied for server '%s/%s'%s\n",
+ srv->proxy->id, srv->id, msg->area);
+ else
+ ha_warning("server-state application failed for server '%s/%s'%s\n",
+ srv->proxy->id, srv->id, msg->area);
+ }
+ end:
+ free_trash_chunk(msg);
+}
+
+/*
+ * Loop on the proxy's servers and try to load its state from <st_tree> using
+ * srv_state_srv_update(). The proxy name and the server name are concatenated
+ * to form the key. If found the entry is removed from the tree.
+ */
+static void srv_state_px_update(const struct proxy *px, int vsn, struct eb_root *st_tree)
+{
+ struct server_state_line *st_line;
+ struct eb64_node *node;
+ struct server *srv;
+ unsigned long key;
+
+ for (srv = px->srv; srv; srv = srv->next) {
+ chunk_printf(&trash, "%s %s", px->id, srv->id);
+ key = XXH3(trash.area, trash.data, 0);
+ node = eb64_lookup(st_tree, key);
+ if (!node)
+ continue; /* next server */
+ st_line = eb64_entry(node, typeof(*st_line), node);
+ srv_state_srv_update(srv, vsn, st_line->params+4);
+
+ /* the node may be released now */
+ eb64_delete(node);
+ free(st_line->line);
+ free(st_line);
+ }
+}
+
+/*
+ * read next line from file <f> and return the server state version if one found.
+ * If file is empty, then -1 is returned
+ * If no version is found, then 0 is returned
+ * Note that this should be the first read on <f>
+ */
+static int srv_state_get_version(FILE *f) {
+ char mybuf[SRV_STATE_LINE_MAXLEN];
+ char *endptr;
+ long int vsn;
+
+ /* first character of first line of the file must contain the version of the export */
+ if (fgets(mybuf, SRV_STATE_LINE_MAXLEN, f) == NULL)
+ return -1;
+
+ vsn = strtol(mybuf, &endptr, 10);
+ if (endptr == mybuf || *endptr != '\n') {
+ /* Empty or truncated line */
+ return 0;
+ }
+
+ if (vsn < SRV_STATE_FILE_VERSION_MIN || vsn > SRV_STATE_FILE_VERSION_MAX) {
+ /* Wrong version number */
+ return 0;
+ }
+
+ return vsn;
+}
+
+
+/*
+ * parses server state line stored in <buf> and supposedly in version <version>.
+ * Set <params> accordingly on success. It returns 1 on success, 0 if the line
+ * must be ignored and -1 on error.
+ * The caller must provide a supported version
+ */
+static int srv_state_parse_line(char *buf, const int version, char **params)
+{
+ int buflen, arg, ret;
+ char *cur;
+
+ buflen = strlen(buf);
+ cur = buf;
+ ret = 1; /* be optimistic and pretend a success */
+
+ /* we need at least one character and a non-truncated line */
+ if (buflen == 0 || buf[buflen - 1] != '\n') {
+ ret = -1;
+ goto out;
+ }
+
+ /* skip blank characters at the beginning of the line */
+ while (*cur == ' ' || *cur == '\t')
+ ++cur;
+
+ /* ignore empty or commented lines */
+ if (!*cur || *cur == '\n' || *cur == '#') {
+ ret = 0;
+ goto out;
+ }
+
+ /* Removes trailing '\n' to ease parsing */
+ buf[buflen - 1] = '\0';
+
+ /* we're now ready to move the line into <params> */
+ memset(params, 0, SRV_STATE_FILE_MAX_FIELDS * sizeof(*params));
+ arg = 0;
+ while (*cur) {
+ /* first of all, stop if there are too many fields */
+ if (arg >= SRV_STATE_FILE_MAX_FIELDS)
+ break;
+
+ /* then skip leading spaces */
+ while (*cur && (*cur == ' ' || *cur == '\t')) {
+ ++cur;
+ if (!*cur)
+ break;
+ }
+
+ /*
+ * idx:
+ * be_id: params[0]
+ * be_name: params[1]
+ * srv_id: params[2]
+ * srv_name: params[3]
+ * v1
+ * srv_addr: params[4]
+ * srv_op_state: params[5]
+ * srv_admin_state: params[6]
+ * srv_uweight: params[7]
+ * srv_iweight: params[8]
+ * srv_last_time_change: params[9]
+ * srv_check_status: params[10]
+ * srv_check_result: params[11]
+ * srv_check_health: params[12]
+ * srv_check_state: params[13]
+ * srv_agent_state: params[14]
+ * bk_f_forced_id: params[15]
+ * srv_f_forced_id: params[16]
+ * srv_fqdn: params[17]
+ * srv_port: params[18]
+ * srvrecord: params[19]
+ *
+ * srv_use_ssl: params[20] (optional field)
+ * srv_check_port: params[21] (optional field)
+ * srv_check_addr: params[22] (optional field)
+ * srv_agent_addr: params[23] (optional field)
+ * srv_agent_port: params[24] (optional field)
+ *
+ */
+ params[arg++] = cur;
+
+ /* look for the end of the current field */
+ while (*cur && *cur != ' ' && *cur != '\t') {
+ ++cur;
+ if (!*cur)
+ break;
+ }
+
+ /* otherwise, cut the field and move to the next one */
+ *cur++ = '\0';
+ }
+
+ /* if the number of fields does not match the version, then return an error */
+ if (version == 1 &&
+ (arg < SRV_STATE_FILE_MIN_FIELDS_VERSION_1 ||
+ arg > SRV_STATE_FILE_MAX_FIELDS_VERSION_1))
+ ret = -1;
+
+ out:
+ return ret;
+}
+
+
+/*
+ * parses a server state line using srv_state_parse_line() and store the result
+ * in <st_tree>. If an error occurred during the parsing, the line is
+ * ignored. if <px> is defined, it is used to check the backend id/name against
+ * the parsed params and to compute the key of the line.
+ */
+static int srv_state_parse_and_store_line(char *line, int vsn, struct eb_root *st_tree,
+ struct proxy *px)
+{
+ struct server_state_line *st_line;
+ int ret = 0;
+
+ /* store line in tree and duplicate the line */
+ st_line = calloc(1, sizeof(*st_line));
+ if (st_line == NULL)
+ goto skip_line;
+ st_line->line = strdup(line);
+ if (st_line->line == NULL)
+ goto skip_line;
+
+ ret = srv_state_parse_line(st_line->line, vsn, st_line->params);
+ if (ret <= 0)
+ goto skip_line;
+
+ /* Check backend name against params if <px> is defined */
+ if (px) {
+ int check_id = (atoi(st_line->params[0]) == px->uuid);
+ int check_name = (strcmp(px->id, st_line->params[1]) == 0);
+ int bk_f_forced_id = (atoi(st_line->params[15]) & PR_O_FORCED_ID);
+
+
+ if (!check_id && !check_name) {
+ /* backend does not match at all: skip the line */
+ goto skip_line;
+ }
+ else if (!check_id) {
+ /* Id mismatch: warn but continue */
+ ha_warning("Proxy '%s': backend ID mismatch: from server state file: '%s', from running config '%d'\n",
+ px->id, st_line->params[0], px->uuid);
+ send_log(px, LOG_NOTICE, "backend ID mismatch: from server state file: '%s', from running config '%d'\n",
+ st_line->params[0], px->uuid);
+ }
+ else if (!check_name) {
+ /* Name mismatch: warn and skip the line, except if the backend id was forced
+ * in the previous configuration */
+ ha_warning("Proxy '%s': backend name mismatch: from server state file: '%s', from running config '%s'\n",
+ px->id, st_line->params[1], px->id);
+ send_log(px, LOG_NOTICE, "backend name mismatch: from server state file: '%s', from running config '%s'\n",
+ st_line->params[1], px->id);
+ if (!bk_f_forced_id)
+ goto skip_line;
+ }
+ }
+
+ /*
+ * The key: "be_name srv_name"
+ * if <px> is defined: be_name == px->id
+ * otherwise: be_name == params[1]
+ */
+ chunk_printf(&trash, "%s %s", (px ? px->id : st_line->params[1]), st_line->params[3]);
+ st_line->node.key = XXH3(trash.area, trash.data, 0);
+ if (eb64_insert(st_tree, &st_line->node) != &st_line->node) {
+ /* this is a duplicate key, probably a hand-crafted file, drop it! */
+ goto skip_line;
+ }
+
+ return ret;
+
+ skip_line:
+ /* free up memory in case of error during the processing of the line */
+ if (st_line) {
+ free(st_line->line);
+ free(st_line);
+ }
+ return ret;
+}
+
+/* Helper function to get the server-state file path.
+ * If <filename> starts with a '/', it is considered as an absolute path. In
+ * this case or if <global.server_state_base> is not set, <filename> only is
+ * considered. Otherwise, the <global.server_state_base> is concatenated to
+ * <filename> to produce the file path and copied to <dst_path>. in both cases,
+ * the result must not exceeds <maxpathlen>.
+ *
+ * The len is returned on success or -1 if the path is too long. On error, the
+ * caller must not rely on <dst_path>.
+ */
+static inline int srv_state_get_filepath(char *dst_path, int maxpathlen, const char *filename)
+{
+ char *sep;
+ int len = 0;
+
+ /* create the globalfilepath variable */
+ if (*filename == '/' || !global.server_state_base) {
+ /* absolute path or no base directory provided */
+ len = strlcpy2(dst_path, filename, maxpathlen);
+ }
+ else {
+ /* concat base directory and global server-state file */
+ sep = (global.server_state_base[strlen(global.server_state_base)-1] != '/' ? "/": "");
+ len = snprintf(dst_path, maxpathlen, "%s%s%s", global.server_state_base, sep, filename);
+ }
+ return (len < maxpathlen ? len: -1);
+}
+
+
+/* This function parses all the proxies and only take care of the backends (since we're looking for server)
+ * For each proxy, it does the following:
+ * - opens its server state file (either one or local one)
+ * - read whole file, line by line
+ * - analyse each line to check if it matches our current backend:
+ * - backend name matches
+ * - backend id matches if id is forced and name doesn't match
+ * - if the server pointed by the line is found, then state is applied
+ *
+ * If the running backend uuid or id differs from the state file, then HAProxy reports
+ * a warning.
+ *
+ * Grabs the server's lock via srv_state_srv_update().
+ */
+void apply_server_state(void)
+{
+ /* tree where global state_file is loaded */
+ struct eb_root global_state_tree = EB_ROOT_UNIQUE;
+ struct proxy *curproxy;
+ struct server_state_line *st_line;
+ struct eb64_node *node, *next_node;
+ FILE *f;
+ char mybuf[SRV_STATE_LINE_MAXLEN];
+ char file[MAXPATHLEN];
+ int local_vsn, global_vsn, len, linenum;
+
+ global_vsn = 0; /* no global file */
+ if (!global.server_state_file)
+ goto no_globalfile;
+ len = srv_state_get_filepath(file, MAXPATHLEN, global.server_state_file);
+ if (len == -1) {
+ ha_warning("config: Can't load global server state file: file too long.\n");
+ goto no_globalfile;
+ }
+
+ /* Load global server state in a tree */
+ errno = 0;
+ f = fopen(file, "r");
+ if (!f) {
+ if (errno == ENOENT)
+ ha_notice("config: Can't open global server state file '%s': %s\n", file, strerror(errno));
+ else
+ ha_warning("config: Can't open global server state file '%s': %s\n", file, strerror(errno));
+ goto no_globalfile;
+ }
+
+ global_vsn = srv_state_get_version(f);
+ if (global_vsn < 1) {
+ if (global_vsn == -1)
+ ha_notice("config: Empty global server state file '%s'.\n",
+ file);
+ if (global_vsn == 0)
+ ha_warning("config: Can't get version of the global server state file '%s'.\n",
+ file);
+ goto close_globalfile;
+ }
+
+ for (linenum = 1; fgets(mybuf, SRV_STATE_LINE_MAXLEN, f); linenum++) {
+ int ret;
+
+ ret = srv_state_parse_and_store_line(mybuf, global_vsn, &global_state_tree, NULL);
+ if (ret == -1) {
+ ha_warning("config: corrupted global server state file '%s' at line %d.\n",
+ file, linenum);
+ global_vsn = 0;
+ break;
+ }
+ }
+
+ close_globalfile:
+ fclose(f);
+
+ no_globalfile:
+ /* parse all proxies and load states form tree (global file) or from local file */
+ for (curproxy = proxies_list; curproxy != NULL; curproxy = curproxy->next) {
+ struct eb_root local_state_tree = EB_ROOT_UNIQUE;
+
+ /* Must be an enabled backend with at least a server */
+ if (!(curproxy->cap & PR_CAP_BE) || (curproxy->flags & (PR_FL_DISABLED|PR_FL_STOPPED)) || !curproxy->srv)
+ continue; /* next proxy */
+
+ /* Mode must be specified */
+ BUG_ON(curproxy->load_server_state_from_file == PR_SRV_STATE_FILE_UNSPEC);
+
+ /* No server-state file for this proxy */
+ if (curproxy->load_server_state_from_file == PR_SRV_STATE_FILE_NONE)
+ continue; /* next proxy */
+
+ if (curproxy->load_server_state_from_file == PR_SRV_STATE_FILE_GLOBAL) {
+ /* when global file is used, we get data from the tree
+ * Note that in such case we don't check backend name neither uuid.
+ * Backend name can't be wrong since it's used as a key to retrieve the server state
+ * line from the tree.
+ */
+ if (global_vsn)
+ srv_state_px_update(curproxy, global_vsn, &global_state_tree);
+ continue; /* next proxy */
+ }
+
+ /*
+ * Here we load a local server state-file
+ */
+
+ /* create file variable */
+ len = srv_state_get_filepath(file, MAXPATHLEN, curproxy->server_state_file_name);
+ if (len == -1) {
+ ha_warning("Proxy '%s': Can't load local server state file: file too long.\n", curproxy->id);
+ continue; /* next proxy */
+ }
+
+ /* Load local server state in a tree */
+ errno = 0;
+ f = fopen(file, "r");
+ if (!f) {
+ if (errno == ENOENT)
+ ha_notice("Proxy '%s': Can't open server state file '%s': %s.\n",
+ curproxy->id, file, strerror(errno));
+ else
+ ha_warning("Proxy '%s': Can't open server state file '%s': %s.\n",
+ curproxy->id, file, strerror(errno));
+ continue; /* next proxy */
+ }
+
+ /* first character of first line of the file must contain the version of the export */
+ local_vsn = srv_state_get_version(f);
+ if (local_vsn < 1) {
+ if (local_vsn == -1)
+ ha_notice("Proxy '%s': Empty server state file '%s'.\n",
+ curproxy->id, file);
+ if (local_vsn == 0)
+ ha_warning("Proxy '%s': Can't get version of the server state file '%s'.\n",
+ curproxy->id, file);
+ goto close_localfile;
+ }
+
+ /* First, parse lines of the local server-state file and store them in a eb-tree */
+ for (linenum = 1; fgets(mybuf, SRV_STATE_LINE_MAXLEN, f); linenum++) {
+ int ret;
+
+ ret = srv_state_parse_and_store_line(mybuf, local_vsn, &local_state_tree, curproxy);
+ if (ret == -1) {
+ ha_warning("Proxy '%s': corrupted server state file '%s' at line %d.\n",
+ curproxy->id, file, linenum);
+ local_vsn = 0;
+ break;
+ }
+ }
+
+ if (local_vsn)
+ srv_state_px_update(curproxy, local_vsn, &local_state_tree);
+
+ /* Remove unused server-state lines */
+ node = eb64_first(&local_state_tree);
+ while (node) {
+ st_line = eb64_entry(node, typeof(*st_line), node);
+ next_node = eb64_next(node);
+ eb64_delete(node);
+
+ if (local_vsn) {
+ /* if no server found, then warn */
+ ha_warning("Proxy '%s': can't find server '%s' in backend '%s'\n",
+ curproxy->id, st_line->params[3], curproxy->id);
+ send_log(curproxy, LOG_NOTICE, "can't find server '%s' in backend '%s'\n",
+ st_line->params[3], curproxy->id);
+ }
+
+ free(st_line->line);
+ free(st_line);
+ node = next_node;
+ }
+
+ close_localfile:
+ fclose(f);
+ }
+
+ node = eb64_first(&global_state_tree);
+ while (node) {
+ st_line = eb64_entry(node, typeof(*st_line), node);
+ next_node = eb64_next(node);
+ eb64_delete(node);
+ free(st_line->line);
+ free(st_line);
+ node = next_node;
+ }
+}
diff --git a/src/session.c b/src/session.c
new file mode 100644
index 0000000..ce9ccbf
--- /dev/null
+++ b/src/session.c
@@ -0,0 +1,528 @@
+/*
+ * Session management functions.
+ *
+ * Copyright 2000-2015 Willy Tarreau <w@1wt.eu>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <haproxy/ssl_sock-t.h>
+
+#include <haproxy/api.h>
+#include <haproxy/connection.h>
+#include <haproxy/global.h>
+#include <haproxy/http.h>
+#include <haproxy/listener.h>
+#include <haproxy/log.h>
+#include <haproxy/pool.h>
+#include <haproxy/proxy.h>
+#include <haproxy/session.h>
+#include <haproxy/tcp_rules.h>
+#include <haproxy/tools.h>
+#include <haproxy/vars.h>
+
+
+DECLARE_POOL(pool_head_session, "session", sizeof(struct session));
+DECLARE_POOL(pool_head_sess_srv_list, "session server list",
+ sizeof(struct sess_srv_list));
+
+int conn_complete_session(struct connection *conn);
+
+/* Create a a new session and assign it to frontend <fe>, listener <li>,
+ * origin <origin>, set the current date and clear the stick counters pointers.
+ * Returns the session upon success or NULL. The session may be released using
+ * session_free(). Note: <li> may be NULL.
+ */
+struct session *session_new(struct proxy *fe, struct listener *li, enum obj_type *origin)
+{
+ struct session *sess;
+
+ sess = pool_alloc(pool_head_session);
+ if (sess) {
+ sess->listener = li;
+ sess->fe = fe;
+ sess->origin = origin;
+ sess->accept_date = date; /* user-visible date for logging */
+ sess->accept_ts = now_ns; /* corrected date for internal use */
+ sess->stkctr = NULL;
+ if (pool_head_stk_ctr) {
+ sess->stkctr = pool_alloc(pool_head_stk_ctr);
+ if (!sess->stkctr)
+ goto out_fail_alloc;
+ memset(sess->stkctr, 0, sizeof(sess->stkctr[0]) * global.tune.nb_stk_ctr);
+ }
+ vars_init_head(&sess->vars, SCOPE_SESS);
+ sess->task = NULL;
+ sess->t_handshake = -1; /* handshake not done yet */
+ sess->t_idle = -1;
+ _HA_ATOMIC_INC(&totalconn);
+ _HA_ATOMIC_INC(&jobs);
+ LIST_INIT(&sess->srv_list);
+ sess->idle_conns = 0;
+ sess->flags = SESS_FL_NONE;
+ sess->src = NULL;
+ sess->dst = NULL;
+ }
+ return sess;
+ out_fail_alloc:
+ pool_free(pool_head_session, sess);
+ return NULL;
+}
+
+void session_free(struct session *sess)
+{
+ struct connection *conn, *conn_back;
+ struct sess_srv_list *srv_list, *srv_list_back;
+
+ if (sess->listener)
+ listener_release(sess->listener);
+ session_store_counters(sess);
+ pool_free(pool_head_stk_ctr, sess->stkctr);
+ vars_prune_per_sess(&sess->vars);
+ conn = objt_conn(sess->origin);
+ if (conn != NULL && conn->mux)
+ conn->mux->destroy(conn->ctx);
+ list_for_each_entry_safe(srv_list, srv_list_back, &sess->srv_list, srv_list) {
+ list_for_each_entry_safe(conn, conn_back, &srv_list->conn_list, session_list) {
+ LIST_DEL_INIT(&conn->session_list);
+ if (conn->mux) {
+ conn->owner = NULL;
+ conn->flags &= ~CO_FL_SESS_IDLE;
+ conn->mux->destroy(conn->ctx);
+ } else {
+ /* We have a connection, but not yet an associated mux.
+ * So destroy it now.
+ */
+ conn_stop_tracking(conn);
+ conn_full_close(conn);
+ conn_free(conn);
+ }
+ }
+ pool_free(pool_head_sess_srv_list, srv_list);
+ }
+ sockaddr_free(&sess->src);
+ sockaddr_free(&sess->dst);
+ pool_free(pool_head_session, sess);
+ _HA_ATOMIC_DEC(&jobs);
+}
+
+/* callback used from the connection/mux layer to notify that a connection is
+ * going to be released.
+ */
+void conn_session_free(struct connection *conn)
+{
+ session_free(conn->owner);
+ conn->owner = NULL;
+}
+
+/* count a new session to keep frontend, listener and track stats up to date */
+static void session_count_new(struct session *sess)
+{
+ struct stkctr *stkctr;
+ void *ptr;
+ int i;
+
+ proxy_inc_fe_sess_ctr(sess->listener, sess->fe);
+
+ for (i = 0; i < global.tune.nb_stk_ctr; i++) {
+ stkctr = &sess->stkctr[i];
+ if (!stkctr_entry(stkctr))
+ continue;
+
+ ptr = stktable_data_ptr(stkctr->table, stkctr_entry(stkctr), STKTABLE_DT_SESS_CNT);
+ if (ptr)
+ HA_ATOMIC_INC(&stktable_data_cast(ptr, std_t_uint));
+
+ ptr = stktable_data_ptr(stkctr->table, stkctr_entry(stkctr), STKTABLE_DT_SESS_RATE);
+ if (ptr)
+ update_freq_ctr_period(&stktable_data_cast(ptr, std_t_frqp),
+ stkctr->table->data_arg[STKTABLE_DT_SESS_RATE].u, 1);
+ }
+}
+
+/* This function is called from the protocol layer accept() in order to
+ * instantiate a new session on behalf of a given listener and frontend. It
+ * returns a positive value upon success, 0 if the connection can be ignored,
+ * or a negative value upon critical failure. The accepted connection is
+ * closed if we return <= 0. If no handshake is needed, it immediately tries
+ * to instantiate a new stream. The connection must already have been filled
+ * with the incoming connection handle (a fd), a target (the listener) and a
+ * source address.
+ */
+int session_accept_fd(struct connection *cli_conn)
+{
+ struct listener *l = __objt_listener(cli_conn->target);
+ struct proxy *p = l->bind_conf->frontend;
+ int cfd = cli_conn->handle.fd;
+ struct session *sess;
+ int ret;
+
+ ret = -1; /* assume unrecoverable error by default */
+
+ cli_conn->proxy_netns = l->rx.settings->netns;
+
+ /* Active reversed connection has already been initialized before being
+ * accepted. It must not be reset.
+ * TODO use a dedicated accept_fd callback for reverse protocol
+ */
+ if (!cli_conn->xprt) {
+ if (conn_prepare(cli_conn, l->rx.proto, l->bind_conf->xprt) < 0)
+ goto out_free_conn;
+
+ conn_ctrl_init(cli_conn);
+
+ /* wait for a PROXY protocol header */
+ if (l->bind_conf->options & BC_O_ACC_PROXY)
+ cli_conn->flags |= CO_FL_ACCEPT_PROXY;
+
+ /* wait for a NetScaler client IP insertion protocol header */
+ if (l->bind_conf->options & BC_O_ACC_CIP)
+ cli_conn->flags |= CO_FL_ACCEPT_CIP;
+
+ /* Add the handshake pseudo-XPRT */
+ if (cli_conn->flags & (CO_FL_ACCEPT_PROXY | CO_FL_ACCEPT_CIP)) {
+ if (xprt_add_hs(cli_conn) != 0)
+ goto out_free_conn;
+ }
+ }
+
+ sess = session_new(p, l, &cli_conn->obj_type);
+ if (!sess)
+ goto out_free_conn;
+
+ conn_set_owner(cli_conn, sess, NULL);
+
+ /* now evaluate the tcp-request layer4 rules. We only need a session
+ * and no stream for these rules.
+ */
+ if (!LIST_ISEMPTY(&p->tcp_req.l4_rules) && !tcp_exec_l4_rules(sess)) {
+ /* let's do a no-linger now to close with a single RST. */
+ if (!(cli_conn->flags & CO_FL_FDLESS))
+ setsockopt(cfd, SOL_SOCKET, SO_LINGER, (struct linger *) &nolinger, sizeof(struct linger));
+ ret = 0; /* successful termination */
+ goto out_free_sess;
+ }
+ /* TCP rules may flag the connection as needing proxy protocol, now that it's done we can start ourxprt */
+ if (conn_xprt_start(cli_conn) < 0)
+ goto out_free_sess;
+
+ /* FIXME/WTA: we should implement the setsockopt() calls at the proto
+ * level instead and let non-inet protocols implement their own equivalent.
+ */
+ if (cli_conn->flags & CO_FL_FDLESS)
+ goto skip_fd_setup;
+
+ /* Adjust some socket options */
+ if (l->rx.addr.ss_family == AF_INET || l->rx.addr.ss_family == AF_INET6) {
+ setsockopt(cfd, IPPROTO_TCP, TCP_NODELAY, (char *) &one, sizeof(one));
+
+ if (p->options & PR_O_TCP_CLI_KA) {
+ setsockopt(cfd, SOL_SOCKET, SO_KEEPALIVE, (char *) &one, sizeof(one));
+
+#ifdef TCP_KEEPCNT
+ if (p->clitcpka_cnt)
+ setsockopt(cfd, IPPROTO_TCP, TCP_KEEPCNT, &p->clitcpka_cnt, sizeof(p->clitcpka_cnt));
+#endif
+
+#ifdef TCP_KEEPIDLE
+ if (p->clitcpka_idle)
+ setsockopt(cfd, IPPROTO_TCP, TCP_KEEPIDLE, &p->clitcpka_idle, sizeof(p->clitcpka_idle));
+#endif
+
+#ifdef TCP_KEEPINTVL
+ if (p->clitcpka_intvl)
+ setsockopt(cfd, IPPROTO_TCP, TCP_KEEPINTVL, &p->clitcpka_intvl, sizeof(p->clitcpka_intvl));
+#endif
+ }
+
+ if (p->options & PR_O_TCP_NOLING)
+ HA_ATOMIC_OR(&fdtab[cfd].state, FD_LINGER_RISK);
+
+#if defined(TCP_MAXSEG)
+ if (l->bind_conf->maxseg < 0) {
+ /* we just want to reduce the current MSS by that value */
+ int mss;
+ socklen_t mss_len = sizeof(mss);
+ if (getsockopt(cfd, IPPROTO_TCP, TCP_MAXSEG, &mss, &mss_len) == 0) {
+ mss += l->bind_conf->maxseg; /* remember, it's < 0 */
+ setsockopt(cfd, IPPROTO_TCP, TCP_MAXSEG, &mss, sizeof(mss));
+ }
+ }
+#endif
+ }
+
+ if (global.tune.client_sndbuf)
+ setsockopt(cfd, SOL_SOCKET, SO_SNDBUF, &global.tune.client_sndbuf, sizeof(global.tune.client_sndbuf));
+
+ if (global.tune.client_rcvbuf)
+ setsockopt(cfd, SOL_SOCKET, SO_RCVBUF, &global.tune.client_rcvbuf, sizeof(global.tune.client_rcvbuf));
+
+ skip_fd_setup:
+ /* OK, now either we have a pending handshake to execute with and then
+ * we must return to the I/O layer, or we can proceed with the end of
+ * the stream initialization. In case of handshake, we also set the I/O
+ * timeout to the frontend's client timeout and register a task in the
+ * session for this purpose. The connection's owner is left to the
+ * session during this period.
+ *
+ * At this point we set the relation between sess/task/conn this way :
+ *
+ * +----------------- task
+ * | |
+ * orig -- sess <-- context |
+ * | ^ | |
+ * v | | |
+ * conn -- owner ---> task <-----+
+ */
+ if (cli_conn->flags & (CO_FL_WAIT_XPRT | CO_FL_EARLY_SSL_HS)) {
+ int timeout;
+ int clt_tmt = p->timeout.client;
+ int hs_tmt = p->timeout.client_hs;
+
+ if (unlikely((sess->task = task_new_here()) == NULL))
+ goto out_free_sess;
+
+ /* Handshake timeout as default timeout */
+ timeout = hs_tmt ? hs_tmt : clt_tmt;
+ sess->task->context = sess;
+ sess->task->nice = l->bind_conf->nice;
+ sess->task->process = session_expire_embryonic;
+ sess->task->expire = tick_add_ifset(now_ms, timeout);
+ task_queue(sess->task);
+ return 1;
+ }
+
+ /* OK let's complete stream initialization since there is no handshake */
+ if (conn_complete_session(cli_conn) >= 0)
+ return 1;
+
+ /* if we reach here we have deliberately decided not to keep this
+ * session (e.g. tcp-request rule), so that's not an error we should
+ * try to protect against.
+ */
+ ret = 0;
+
+ /* error unrolling */
+ out_free_sess:
+ /* prevent call to listener_release during session_free. It will be
+ * done below, for all errors. */
+ sess->listener = NULL;
+ session_free(sess);
+
+ out_free_conn:
+ if (ret < 0 && l->bind_conf->xprt == xprt_get(XPRT_RAW) &&
+ p->mode == PR_MODE_HTTP && l->bind_conf->mux_proto == NULL &&
+ !(cli_conn->flags & CO_FL_FDLESS)) {
+ /* critical error, no more memory, try to emit a 500 response */
+ send(cfd, http_err_msgs[HTTP_ERR_500], strlen(http_err_msgs[HTTP_ERR_500]),
+ MSG_DONTWAIT|MSG_NOSIGNAL);
+ }
+
+ if (cli_conn->mux) {
+ /* Mux is already initialized for active reversed connection. */
+ cli_conn->mux->destroy(cli_conn->ctx);
+ }
+ else {
+ conn_stop_tracking(cli_conn);
+ conn_full_close(cli_conn);
+ conn_free(cli_conn);
+ }
+ listener_release(l);
+ return ret;
+}
+
+
+/* prepare the trash with a log prefix for session <sess>. It only works with
+ * embryonic sessions based on a real connection. This function requires that
+ * at sess->origin points to the incoming connection.
+ */
+static void session_prepare_log_prefix(struct session *sess)
+{
+ const struct sockaddr_storage *src;
+ struct tm tm;
+ char pn[INET6_ADDRSTRLEN];
+ int ret;
+ char *end;
+
+ src = sess_src(sess);
+ ret = (src ? addr_to_str(src, pn, sizeof(pn)) : 0);
+ if (ret <= 0)
+ chunk_printf(&trash, "unknown [");
+ else if (ret == AF_UNIX)
+ chunk_printf(&trash, "%s:%d [", pn, sess->listener->luid);
+ else
+ chunk_printf(&trash, "%s:%d [", pn, get_host_port(src));
+
+ get_localtime(sess->accept_date.tv_sec, &tm);
+ end = date2str_log(trash.area + trash.data, &tm, &(sess->accept_date),
+ trash.size - trash.data);
+ trash.data = end - trash.area;
+ if (sess->listener->name)
+ chunk_appendf(&trash, "] %s/%s", sess->fe->id, sess->listener->name);
+ else
+ chunk_appendf(&trash, "] %s/%d", sess->fe->id, sess->listener->luid);
+}
+
+
+/* fill the trash buffer with the string to use for send_log during
+ * session_kill_embryonic(). Add log prefix and error string.
+ *
+ * The function is able to dump an SSL error string when CO_ER_SSL_HANDSHAKE
+ * is met.
+ */
+static void session_build_err_string(struct session *sess)
+{
+ struct connection *conn = __objt_conn(sess->origin);
+ const char *err_msg;
+ struct ssl_sock_ctx __maybe_unused *ssl_ctx;
+
+ err_msg = conn_err_code_str(conn);
+ session_prepare_log_prefix(sess); /* use trash buffer */
+
+#ifdef USE_OPENSSL
+ ssl_ctx = conn_get_ssl_sock_ctx(conn);
+
+ /* when the SSL error code is present and during a SSL Handshake failure,
+ * try to dump the error string from OpenSSL */
+ if (conn->err_code == CO_ER_SSL_HANDSHAKE && ssl_ctx && ssl_ctx->error_code != 0) {
+ chunk_appendf(&trash, ": SSL handshake failure (");
+ ERR_error_string_n(ssl_ctx->error_code, b_orig(&trash)+b_data(&trash), b_room(&trash));
+ trash.data = strlen(b_orig(&trash));
+ chunk_appendf(&trash, ")\n");
+ }
+
+ else
+#endif /* ! USE_OPENSSL */
+
+ if (err_msg)
+ chunk_appendf(&trash, ": %s\n", err_msg);
+ else
+ chunk_appendf(&trash, ": unknown connection error (code=%d flags=%08x)\n",
+ conn->err_code, conn->flags);
+
+ return;
+}
+
+
+
+/* This function kills an existing embryonic session. It stops the connection's
+ * transport layer, releases assigned resources, resumes the listener if it was
+ * disabled and finally kills the file descriptor. This function requires that
+ * sess->origin points to the incoming connection.
+ */
+static void session_kill_embryonic(struct session *sess, unsigned int state)
+{
+ int level = LOG_INFO;
+ struct connection *conn = __objt_conn(sess->origin);
+ struct task *task = sess->task;
+ unsigned int log = sess->fe->to_log;
+
+ if (sess->fe->options2 & PR_O2_LOGERRORS)
+ level = LOG_ERR;
+
+ if (log && (sess->fe->options & PR_O_NULLNOLOG)) {
+ /* with "option dontlognull", we don't log connections with no transfer */
+ if (!conn->err_code ||
+ conn->err_code == CO_ER_PRX_EMPTY || conn->err_code == CO_ER_PRX_ABORT ||
+ conn->err_code == CO_ER_CIP_EMPTY || conn->err_code == CO_ER_CIP_ABORT ||
+ conn->err_code == CO_ER_SSL_EMPTY || conn->err_code == CO_ER_SSL_ABORT)
+ log = 0;
+ }
+
+ if (log) {
+ if (!conn->err_code && (state & TASK_WOKEN_TIMER)) {
+ if (conn->flags & CO_FL_ACCEPT_PROXY)
+ conn->err_code = CO_ER_PRX_TIMEOUT;
+ else if (conn->flags & CO_FL_ACCEPT_CIP)
+ conn->err_code = CO_ER_CIP_TIMEOUT;
+ else if (conn->flags & CO_FL_SSL_WAIT_HS)
+ conn->err_code = CO_ER_SSL_TIMEOUT;
+ }
+
+ if(!LIST_ISEMPTY(&sess->fe->logformat_error)) {
+ /* Display a log line following the configured error-log-format. */
+ sess_log(sess);
+ }
+ else {
+ session_build_err_string(sess);
+ send_log(sess->fe, level, "%s", trash.area);
+ }
+ }
+
+ /* kill the connection now */
+ conn_stop_tracking(conn);
+ conn_full_close(conn);
+ conn_free(conn);
+ sess->origin = NULL;
+
+ task_destroy(task);
+ session_free(sess);
+}
+
+/* Manages the embryonic session timeout. It is only called when the timeout
+ * strikes and performs the required cleanup. It's only exported to make it
+ * resolve in "show tasks".
+ */
+struct task *session_expire_embryonic(struct task *t, void *context, unsigned int state)
+{
+ struct session *sess = context;
+
+ if (!(state & TASK_WOKEN_TIMER))
+ return t;
+
+ session_kill_embryonic(sess, state);
+ return NULL;
+}
+
+/* Finish initializing a session from a connection, or kills it if the
+ * connection shows and error. Returns <0 if the connection was killed. It may
+ * be called either asynchronously when ssl handshake is done with an embryonic
+ * session, or synchronously to finalize the session. The distinction is made
+ * on sess->task which is only set in the embryonic session case.
+ */
+int conn_complete_session(struct connection *conn)
+{
+ struct session *sess = conn->owner;
+
+ sess->t_handshake = ns_to_ms(now_ns - sess->accept_ts);
+
+ if (conn->flags & CO_FL_ERROR)
+ goto fail;
+
+ /* if logs require transport layer information, note it on the connection */
+ if (sess->fe->to_log & LW_XPRT)
+ conn->flags |= CO_FL_XPRT_TRACKED;
+
+ /* we may have some tcp-request-session rules */
+ if (!LIST_ISEMPTY(&sess->fe->tcp_req.l5_rules) && !tcp_exec_l5_rules(sess))
+ goto fail;
+
+ session_count_new(sess);
+ if (!conn->mux) {
+ if (conn_install_mux_fe(conn, NULL) < 0)
+ goto fail;
+ }
+
+ /* the embryonic session's task is not needed anymore */
+ task_destroy(sess->task);
+ sess->task = NULL;
+ conn_set_owner(conn, sess, conn_session_free);
+
+ return 0;
+
+ fail:
+ if (sess->task)
+ session_kill_embryonic(sess, 0);
+ return -1;
+}
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/src/sha1.c b/src/sha1.c
new file mode 100644
index 0000000..b7c2d70
--- /dev/null
+++ b/src/sha1.c
@@ -0,0 +1,308 @@
+/*
+ * Based on the git SHA1 Implementation.
+ *
+ * Copyright (C) 2009-2015, Linus Torvalds and others.
+ *
+ * SHA1 routine optimized to do word accesses rather than byte accesses,
+ * and to avoid unnecessary copies into the context array.
+ *
+ * This was initially based on the Mozilla SHA1 implementation, although
+ * none of the original Mozilla code remains.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/* this is only to get definitions for memcpy(), ntohl() and htonl() */
+#include <string.h>
+#include <inttypes.h>
+#include <arpa/inet.h>
+
+#include <import/sha1.h>
+
+/*
+ * Performance might be improved if the CPU architecture is OK with
+ * unaligned 32-bit loads and a fast ntohl() is available.
+ * Otherwise fall back to byte loads and shifts which is portable,
+ * and is faster on architectures with memory alignment issues.
+ */
+
+#if defined(__i386__) || defined(__x86_64__) || \
+ defined(__ppc__) || defined(__ppc64__) || \
+ defined(__powerpc__) || defined(__powerpc64__) || \
+ defined(__s390__) || defined(__s390x__)
+
+#define get_be32(p) ntohl(*(unsigned int *)(p))
+#define put_be32(p, v) do { *(unsigned int *)(p) = htonl(v); } while (0)
+
+#else
+
+static inline uint32_t get_be32(const void *ptr)
+{
+ const unsigned char *p = ptr;
+ return (uint32_t)p[0] << 24 |
+ (uint32_t)p[1] << 16 |
+ (uint32_t)p[2] << 8 |
+ (uint32_t)p[3] << 0;
+}
+
+static inline void put_be32(void *ptr, uint32_t value)
+{
+ unsigned char *p = ptr;
+ p[0] = value >> 24;
+ p[1] = value >> 16;
+ p[2] = value >> 8;
+ p[3] = value >> 0;
+}
+
+#endif
+
+#if defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))
+
+/*
+ * Force usage of rol or ror by selecting the one with the smaller constant.
+ * It _can_ generate slightly smaller code (a constant of 1 is special), but
+ * perhaps more importantly it's possibly faster on any uarch that does a
+ * rotate with a loop.
+ */
+
+#define SHA_ASM(op, x, n) ({ unsigned int __res; __asm__(op " %1,%0":"=r" (__res):"i" (n), "0" (x)); __res; })
+#define SHA_ROL(x,n) SHA_ASM("rol", x, n)
+#define SHA_ROR(x,n) SHA_ASM("ror", x, n)
+
+#else
+
+#define SHA_ROT(X,l,r) (((X) << (l)) | ((X) >> (r)))
+#define SHA_ROL(X,n) SHA_ROT(X,n,32-(n))
+#define SHA_ROR(X,n) SHA_ROT(X,32-(n),n)
+
+#endif
+
+/*
+ * If you have 32 registers or more, the compiler can (and should)
+ * try to change the array[] accesses into registers. However, on
+ * machines with less than ~25 registers, that won't really work,
+ * and at least gcc will make an unholy mess of it.
+ *
+ * So to avoid that mess which just slows things down, we force
+ * the stores to memory to actually happen (we might be better off
+ * with a 'W(t)=(val);asm("":"+m" (W(t))' there instead, as
+ * suggested by Artur Skawina - that will also make gcc unable to
+ * try to do the silly "optimize away loads" part because it won't
+ * see what the value will be).
+ *
+ * Ben Herrenschmidt reports that on PPC, the C version comes close
+ * to the optimized asm with this (ie on PPC you don't want that
+ * 'volatile', since there are lots of registers).
+ *
+ * On ARM we get the best code generation by forcing a full memory barrier
+ * between each SHA_ROUND, otherwise gcc happily get wild with spilling and
+ * the stack frame size simply explode and performance goes down the drain.
+ */
+
+#if defined(__i386__) || defined(__x86_64__)
+ #define setW(x, val) (*(volatile unsigned int *)&W(x) = (val))
+#elif defined(__GNUC__) && defined(__arm__)
+ #define setW(x, val) do { W(x) = (val); __asm__("":::"memory"); } while (0)
+#else
+ #define setW(x, val) (W(x) = (val))
+#endif
+
+/* This "rolls" over the 512-bit array */
+#define W(x) (array[(x)&15])
+
+/*
+ * Where do we get the source from? The first 16 iterations get it from
+ * the input data, the next mix it from the 512-bit array.
+ */
+#define SHA_SRC(t) get_be32((unsigned char *) block + (t)*4)
+#define SHA_MIX(t) SHA_ROL(W((t)+13) ^ W((t)+8) ^ W((t)+2) ^ W(t), 1);
+
+#define SHA_ROUND(t, input, fn, constant, A, B, C, D, E) do { \
+ unsigned int TEMP = input(t); setW(t, TEMP); \
+ E += TEMP + SHA_ROL(A,5) + (fn) + (constant); \
+ B = SHA_ROR(B, 2); } while (0)
+
+#define T_0_15(t, A, B, C, D, E) SHA_ROUND(t, SHA_SRC, (((C^D)&B)^D) , 0x5a827999, A, B, C, D, E )
+#define T_16_19(t, A, B, C, D, E) SHA_ROUND(t, SHA_MIX, (((C^D)&B)^D) , 0x5a827999, A, B, C, D, E )
+#define T_20_39(t, A, B, C, D, E) SHA_ROUND(t, SHA_MIX, (B^C^D) , 0x6ed9eba1, A, B, C, D, E )
+#define T_40_59(t, A, B, C, D, E) SHA_ROUND(t, SHA_MIX, ((B&C)+(D&(B^C))) , 0x8f1bbcdc, A, B, C, D, E )
+#define T_60_79(t, A, B, C, D, E) SHA_ROUND(t, SHA_MIX, (B^C^D) , 0xca62c1d6, A, B, C, D, E )
+
+static void blk_SHA1_Block(blk_SHA_CTX *ctx, const void *block)
+{
+ unsigned int A,B,C,D,E;
+ unsigned int array[16];
+
+ A = ctx->H[0];
+ B = ctx->H[1];
+ C = ctx->H[2];
+ D = ctx->H[3];
+ E = ctx->H[4];
+
+ /* Round 1 - iterations 0-16 take their input from 'block' */
+ T_0_15( 0, A, B, C, D, E);
+ T_0_15( 1, E, A, B, C, D);
+ T_0_15( 2, D, E, A, B, C);
+ T_0_15( 3, C, D, E, A, B);
+ T_0_15( 4, B, C, D, E, A);
+ T_0_15( 5, A, B, C, D, E);
+ T_0_15( 6, E, A, B, C, D);
+ T_0_15( 7, D, E, A, B, C);
+ T_0_15( 8, C, D, E, A, B);
+ T_0_15( 9, B, C, D, E, A);
+ T_0_15(10, A, B, C, D, E);
+ T_0_15(11, E, A, B, C, D);
+ T_0_15(12, D, E, A, B, C);
+ T_0_15(13, C, D, E, A, B);
+ T_0_15(14, B, C, D, E, A);
+ T_0_15(15, A, B, C, D, E);
+
+ /* Round 1 - tail. Input from 512-bit mixing array */
+ T_16_19(16, E, A, B, C, D);
+ T_16_19(17, D, E, A, B, C);
+ T_16_19(18, C, D, E, A, B);
+ T_16_19(19, B, C, D, E, A);
+
+ /* Round 2 */
+ T_20_39(20, A, B, C, D, E);
+ T_20_39(21, E, A, B, C, D);
+ T_20_39(22, D, E, A, B, C);
+ T_20_39(23, C, D, E, A, B);
+ T_20_39(24, B, C, D, E, A);
+ T_20_39(25, A, B, C, D, E);
+ T_20_39(26, E, A, B, C, D);
+ T_20_39(27, D, E, A, B, C);
+ T_20_39(28, C, D, E, A, B);
+ T_20_39(29, B, C, D, E, A);
+ T_20_39(30, A, B, C, D, E);
+ T_20_39(31, E, A, B, C, D);
+ T_20_39(32, D, E, A, B, C);
+ T_20_39(33, C, D, E, A, B);
+ T_20_39(34, B, C, D, E, A);
+ T_20_39(35, A, B, C, D, E);
+ T_20_39(36, E, A, B, C, D);
+ T_20_39(37, D, E, A, B, C);
+ T_20_39(38, C, D, E, A, B);
+ T_20_39(39, B, C, D, E, A);
+
+ /* Round 3 */
+ T_40_59(40, A, B, C, D, E);
+ T_40_59(41, E, A, B, C, D);
+ T_40_59(42, D, E, A, B, C);
+ T_40_59(43, C, D, E, A, B);
+ T_40_59(44, B, C, D, E, A);
+ T_40_59(45, A, B, C, D, E);
+ T_40_59(46, E, A, B, C, D);
+ T_40_59(47, D, E, A, B, C);
+ T_40_59(48, C, D, E, A, B);
+ T_40_59(49, B, C, D, E, A);
+ T_40_59(50, A, B, C, D, E);
+ T_40_59(51, E, A, B, C, D);
+ T_40_59(52, D, E, A, B, C);
+ T_40_59(53, C, D, E, A, B);
+ T_40_59(54, B, C, D, E, A);
+ T_40_59(55, A, B, C, D, E);
+ T_40_59(56, E, A, B, C, D);
+ T_40_59(57, D, E, A, B, C);
+ T_40_59(58, C, D, E, A, B);
+ T_40_59(59, B, C, D, E, A);
+
+ /* Round 4 */
+ T_60_79(60, A, B, C, D, E);
+ T_60_79(61, E, A, B, C, D);
+ T_60_79(62, D, E, A, B, C);
+ T_60_79(63, C, D, E, A, B);
+ T_60_79(64, B, C, D, E, A);
+ T_60_79(65, A, B, C, D, E);
+ T_60_79(66, E, A, B, C, D);
+ T_60_79(67, D, E, A, B, C);
+ T_60_79(68, C, D, E, A, B);
+ T_60_79(69, B, C, D, E, A);
+ T_60_79(70, A, B, C, D, E);
+ T_60_79(71, E, A, B, C, D);
+ T_60_79(72, D, E, A, B, C);
+ T_60_79(73, C, D, E, A, B);
+ T_60_79(74, B, C, D, E, A);
+ T_60_79(75, A, B, C, D, E);
+ T_60_79(76, E, A, B, C, D);
+ T_60_79(77, D, E, A, B, C);
+ T_60_79(78, C, D, E, A, B);
+ T_60_79(79, B, C, D, E, A);
+
+ ctx->H[0] += A;
+ ctx->H[1] += B;
+ ctx->H[2] += C;
+ ctx->H[3] += D;
+ ctx->H[4] += E;
+}
+
+void blk_SHA1_Init(blk_SHA_CTX *ctx)
+{
+ ctx->size = 0;
+
+ /* Initialize H with the magic constants (see FIPS180 for constants) */
+ ctx->H[0] = 0x67452301;
+ ctx->H[1] = 0xefcdab89;
+ ctx->H[2] = 0x98badcfe;
+ ctx->H[3] = 0x10325476;
+ ctx->H[4] = 0xc3d2e1f0;
+}
+
+void blk_SHA1_Update(blk_SHA_CTX *ctx, const void *data, unsigned long len)
+{
+ unsigned int lenW = ctx->size & 63;
+
+ ctx->size += len;
+
+ /* Read the data into W and process blocks as they get full */
+ if (lenW) {
+ unsigned int left = 64 - lenW;
+ if (len < left)
+ left = len;
+ memcpy(lenW + (char *)ctx->W, data, left);
+ lenW = (lenW + left) & 63;
+ len -= left;
+ data = ((const char *)data + left);
+ if (lenW)
+ return;
+ blk_SHA1_Block(ctx, ctx->W);
+ }
+ while (len >= 64) {
+ blk_SHA1_Block(ctx, data);
+ data = ((const char *)data + 64);
+ len -= 64;
+ }
+ if (len)
+ memcpy(ctx->W, data, len);
+}
+
+void blk_SHA1_Final(unsigned char hashout[20], blk_SHA_CTX *ctx)
+{
+ static const unsigned char pad[64] = { 0x80 };
+ unsigned int padlen[2];
+ int i;
+
+ /* Pad with a binary 1 (ie 0x80), then zeroes, then length */
+ padlen[0] = htonl((uint32_t)(ctx->size >> 29));
+ padlen[1] = htonl((uint32_t)(ctx->size << 3));
+
+ i = ctx->size & 63;
+ blk_SHA1_Update(ctx, pad, 1 + (63 & (55 - i)));
+ blk_SHA1_Update(ctx, padlen, 8);
+
+ /* Output hash */
+ for (i = 0; i < 5; i++)
+ put_be32(hashout + i * 4, ctx->H[i]);
+}
diff --git a/src/shctx.c b/src/shctx.c
new file mode 100644
index 0000000..be59053
--- /dev/null
+++ b/src/shctx.c
@@ -0,0 +1,320 @@
+/*
+ * shctx.c - shared context management functions for SSL
+ *
+ * Copyright (C) 2011-2012 EXCELIANCE
+ *
+ * Author: Emeric Brun - emeric@exceliance.fr
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <sys/mman.h>
+#include <arpa/inet.h>
+#include <import/ebmbtree.h>
+#include <haproxy/list.h>
+#include <haproxy/shctx.h>
+
+/*
+ * Reserve a new row if <first> is null, put it in the hotlist, set the refcount to 1
+ * or append new blocks to the row with <first> as first block if non null.
+ *
+ * Reserve blocks in the avail list and put them in the hot list
+ * Return the first block put in the hot list or NULL if not enough blocks available
+ */
+struct shared_block *shctx_row_reserve_hot(struct shared_context *shctx,
+ struct shared_block *first, int data_len)
+{
+ struct shared_block *last = NULL, *block, *sblock;
+ struct shared_block *ret = first;
+ int remain = 1;
+
+ BUG_ON(data_len < 0);
+
+ /* Check the object size limit. */
+ if (shctx->max_obj_size > 0) {
+ if ((first && first->len + data_len > shctx->max_obj_size) ||
+ (!first && data_len > shctx->max_obj_size))
+ goto out;
+ }
+
+ if (first) {
+ /* Check that there is some block to reserve.
+ * In this first block of code we compute the remaining room in the
+ * current list of block already reserved for this object.
+ * We return asap if there is enough room to copy <data_len> bytes.
+ */
+ last = first->last_reserved;
+ /* Remaining room. */
+ remain = (shctx->block_size * first->block_count - first->len);
+ if (remain) {
+ if (remain > data_len) {
+ return last ? last : first;
+ } else {
+ data_len -= remain;
+ if (data_len <= 0)
+ return last ? last : first;
+ }
+ }
+ }
+
+ shctx_wrlock(shctx);
+
+ /* not enough usable blocks */
+ if (data_len > shctx->nbav * shctx->block_size) {
+ shctx_wrunlock(shctx);
+ goto out;
+ }
+
+
+ if (data_len <= 0 || LIST_ISEMPTY(&shctx->avail)) {
+ ret = NULL;
+ shctx_wrunlock(shctx);
+ goto out;
+ }
+
+ list_for_each_entry_safe(block, sblock, &shctx->avail, list) {
+
+ /* release callback */
+ if (block->len && shctx->free_block)
+ shctx->free_block(block, shctx->cb_data);
+ block->len = 0;
+
+ if (ret) {
+ shctx_block_append_hot(shctx, ret, block);
+ if (!remain) {
+ first->last_append = block;
+ remain = 1;
+ }
+ } else {
+ ret = shctx_block_detach(shctx, block);
+ ret->len = 0;
+ ret->block_count = 0;
+ ret->last_append = NULL;
+ ret->refcount = 1;
+ }
+
+ ++ret->block_count;
+
+ data_len -= shctx->block_size;
+
+ if (data_len <= 0) {
+ ret->last_reserved = block;
+ break;
+ }
+ }
+
+ shctx_wrunlock(shctx);
+
+ if (shctx->reserve_finish)
+ shctx->reserve_finish(shctx);
+
+out:
+ return ret;
+}
+
+/*
+ * if the refcount is 0 move the row to the hot list. Increment the refcount
+ */
+void shctx_row_detach(struct shared_context *shctx, struct shared_block *first)
+{
+ if (first->refcount <= 0) {
+
+ BUG_ON(!first->last_reserved);
+
+ /* Detach row from avail list, link first item's prev to last
+ * item's next. This allows to use the LIST_SPLICE_END_DETACHED
+ * macro. */
+ first->list.p->n = first->last_reserved->list.n;
+ first->last_reserved->list.n->p = first->list.p;
+
+ first->list.p = &first->last_reserved->list;
+ first->last_reserved->list.n = &first->list;
+
+ shctx->nbav -= first->block_count;
+ }
+
+ first->refcount++;
+}
+
+/*
+ * decrement the refcount and move the row at the end of the avail list if it reaches 0.
+ */
+void shctx_row_reattach(struct shared_context *shctx, struct shared_block *first)
+{
+ first->refcount--;
+
+ if (first->refcount <= 0) {
+
+ BUG_ON(!first->last_reserved);
+
+ /* Reattach to avail list */
+ first->list.p = &first->last_reserved->list;
+ LIST_SPLICE_END_DETACHED(&shctx->avail, &first->list);
+
+ shctx->nbav += first->block_count;
+ }
+}
+
+
+/*
+ * Append data in the row if there is enough space.
+ * The row should be in the hot list
+ *
+ * Return the amount of appended data if ret >= 0
+ * or how much more space it needs to contains the data if < 0.
+ */
+int shctx_row_data_append(struct shared_context *shctx, struct shared_block *first,
+ unsigned char *data, int len)
+{
+ int remain, start;
+ struct shared_block *block;
+
+ /* return -<len> needed to work */
+ if (len > first->block_count * shctx->block_size - first->len)
+ return (first->block_count * shctx->block_size - first->len) - len;
+
+ block = first->last_append ? first->last_append : first;
+ do {
+ /* end of copy */
+ if (len <= 0)
+ break;
+
+ /* remaining written bytes in the current block. */
+ remain = (shctx->block_size * first->block_count - first->len) % shctx->block_size;
+ BUG_ON(remain < 0);
+
+ /* if remain == 0, previous buffers are full, or first->len == 0 */
+ if (!remain) {
+ remain = shctx->block_size;
+ start = 0;
+ }
+ else {
+ /* start must be calculated before remain is modified */
+ start = shctx->block_size - remain;
+ BUG_ON(start < 0);
+ }
+
+ /* must not try to copy more than len */
+ remain = MIN(remain, len);
+
+ memcpy(block->data + start, data, remain);
+
+ data += remain;
+ len -= remain;
+ first->len += remain; /* update len in the head of the row */
+ first->last_append = block;
+
+ block = LIST_ELEM(block->list.n, struct shared_block*, list);
+ } while (block != first);
+
+ return len;
+}
+
+/*
+ * Copy <len> data from a row of blocks, return the remaining data to copy
+ * If 0 is returned, the full data has successfully been copied
+ *
+ * The row should be in the hot list
+ */
+int shctx_row_data_get(struct shared_context *shctx, struct shared_block *first,
+ unsigned char *dst, int offset, int len)
+{
+ int count = 0, size = 0, start = -1;
+ struct shared_block *block;
+
+ /* can't copy more */
+ if (len > first->len)
+ len = first->len;
+
+ block = first;
+ count = 0;
+ /* Pass through the blocks to copy them */
+ do {
+ if (count >= first->block_count || len <= 0)
+ break;
+
+ count++;
+ /* continue until we are in right block
+ corresponding to the offset */
+ if (count < offset / shctx->block_size + 1)
+ continue;
+
+ /* on the first block, data won't possibly began at offset 0 */
+ if (start == -1)
+ start = offset - (count - 1) * shctx->block_size;
+
+ BUG_ON(start < 0);
+
+ /* size can be lower than a block when copying the last block */
+ size = MIN(shctx->block_size - start, len);
+ BUG_ON(size < 0);
+
+ memcpy(dst, block->data + start, size);
+ dst += size;
+ len -= size;
+ start = 0;
+
+ block = LIST_ELEM(block->list.n, struct shared_block*, list);
+ } while (block != first);
+ return len;
+}
+
+/* Allocate shared memory context.
+ * <maxblocks> is maximum blocks.
+ * If <maxblocks> is set to less or equal to 0, ssl cache is disabled.
+ * Returns: -1 on alloc failure, <maxblocks> if it performs context alloc,
+ * and 0 if cache is already allocated.
+ */
+int shctx_init(struct shared_context **orig_shctx, int maxblocks, int blocksize,
+ unsigned int maxobjsz, int extra)
+{
+ int i;
+ struct shared_context *shctx;
+ int ret;
+ void *cur;
+ int maptype = MAP_SHARED;
+
+ if (maxblocks <= 0)
+ return 0;
+
+ /* make sure to align the records on a pointer size */
+ blocksize = (blocksize + sizeof(void *) - 1) & -sizeof(void *);
+ extra = (extra + sizeof(void *) - 1) & -sizeof(void *);
+
+ shctx = (struct shared_context *)mmap(NULL, sizeof(struct shared_context) + extra + (maxblocks * (sizeof(struct shared_block) + blocksize)),
+ PROT_READ | PROT_WRITE, maptype | MAP_ANON, -1, 0);
+ if (!shctx || shctx == MAP_FAILED) {
+ shctx = NULL;
+ ret = SHCTX_E_ALLOC_CACHE;
+ goto err;
+ }
+
+ shctx->nbav = 0;
+
+ LIST_INIT(&shctx->avail);
+ HA_RWLOCK_INIT(&shctx->lock);
+
+ shctx->block_size = blocksize;
+ shctx->max_obj_size = maxobjsz == (unsigned int)-1 ? 0 : maxobjsz;
+
+ /* init the free blocks after the shared context struct */
+ cur = (void *)shctx + sizeof(struct shared_context) + extra;
+ for (i = 0; i < maxblocks; i++) {
+ struct shared_block *cur_block = (struct shared_block *)cur;
+ cur_block->len = 0;
+ cur_block->refcount = 0;
+ cur_block->block_count = 1;
+ LIST_APPEND(&shctx->avail, &cur_block->list);
+ shctx->nbav++;
+ cur += sizeof(struct shared_block) + blocksize;
+ }
+ ret = maxblocks;
+
+err:
+ *orig_shctx = shctx;
+ return ret;
+}
+
diff --git a/src/signal.c b/src/signal.c
new file mode 100644
index 0000000..1bb60eb
--- /dev/null
+++ b/src/signal.c
@@ -0,0 +1,284 @@
+/*
+ * Asynchronous signal delivery functions.
+ *
+ * Copyright 2000-2010 Willy Tarreau <w@1wt.eu>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <signal.h>
+#include <string.h>
+
+#include <haproxy/errors.h>
+#include <haproxy/signal.h>
+#include <haproxy/task.h>
+
+/* Principle : we keep an in-order list of the first occurrence of all received
+ * signals. All occurrences of a same signal are grouped though. The signal
+ * queue does not need to be deeper than the number of signals we can handle.
+ * The handlers will be called asynchronously with the signal number. They can
+ * check themselves the number of calls by checking the descriptor this signal.
+ */
+
+int signal_queue_len; /* length of signal queue, <= MAX_SIGNAL (1 entry per signal max) */
+int signal_queue[MAX_SIGNAL]; /* in-order queue of received signals */
+struct signal_descriptor signal_state[MAX_SIGNAL];
+sigset_t blocked_sig;
+int signal_pending = 0; /* non-zero if t least one signal remains unprocessed */
+
+DECLARE_STATIC_POOL(pool_head_sig_handlers, "sig_handlers", sizeof(struct sig_handler));
+
+/* Common signal handler, used by all signals. Received signals are queued.
+ * Signal number zero has a specific status, as it cannot be delivered by the
+ * system, any function may call it to perform asynchronous signal delivery.
+ */
+void signal_handler(int sig)
+{
+ if (sig < 0 || sig >= MAX_SIGNAL) {
+ /* unhandled signal */
+ signal(sig, SIG_IGN);
+ qfprintf(stderr, "Received unhandled signal %d. Signal has been disabled.\n", sig);
+ return;
+ }
+
+ if (!signal_state[sig].count) {
+ /* signal was not queued yet */
+ if (signal_queue_len < MAX_SIGNAL)
+ signal_queue[signal_queue_len++] = sig;
+ else
+ qfprintf(stderr, "Signal %d : signal queue is unexpectedly full.\n", sig);
+ }
+
+ signal_state[sig].count++;
+ if (sig)
+ signal(sig, signal_handler); /* re-arm signal */
+
+ /* If the thread is TH_FL_SLEEPING we need to wake it */
+ wake_thread(tid);
+}
+
+/* Call handlers of all pending signals and clear counts and queue length. The
+ * handlers may unregister themselves by calling signal_register() while they
+ * are called, just like it is done with normal signal handlers.
+ * Note that it is more efficient to call the inline version which checks the
+ * queue length before getting here.
+ */
+void __signal_process_queue()
+{
+ int sig, cur_pos = 0;
+ struct signal_descriptor *desc;
+ sigset_t old_sig;
+
+ /* block signal delivery during processing */
+ ha_sigmask(SIG_SETMASK, &blocked_sig, &old_sig);
+
+ /* It is important that we scan the queue forwards so that we can
+ * catch any signal that would have been queued by another signal
+ * handler. That allows real signal handlers to redistribute signals
+ * to tasks subscribed to signal zero.
+ */
+ for (cur_pos = 0; cur_pos < signal_queue_len; cur_pos++) {
+ sig = signal_queue[cur_pos];
+ desc = &signal_state[sig];
+ if (desc->count) {
+ struct sig_handler *sh, *shb;
+ list_for_each_entry_safe(sh, shb, &desc->handlers, list) {
+ if ((sh->flags & SIG_F_TYPE_FCT) && sh->handler)
+ ((void (*)(struct sig_handler *))sh->handler)(sh);
+ else if ((sh->flags & SIG_F_TYPE_TASK) && sh->handler)
+ task_wakeup(sh->handler, TASK_WOKEN_SIGNAL);
+ }
+ desc->count = 0;
+ }
+ }
+ signal_queue_len = 0;
+
+ /* restore signal delivery */
+ ha_sigmask(SIG_SETMASK, &old_sig, NULL);
+}
+
+/* perform minimal initializations */
+static void signal_init()
+{
+ int sig;
+
+ signal_queue_len = 0;
+ memset(signal_queue, 0, sizeof(signal_queue));
+ memset(signal_state, 0, sizeof(signal_state));
+
+ sigfillset(&blocked_sig);
+ sigdelset(&blocked_sig, SIGPROF);
+ /* man sigprocmask: If SIGBUS, SIGFPE, SIGILL, or SIGSEGV are
+ generated while they are blocked, the result is undefined, unless
+ the signal was generated by kill(2),
+ sigqueue(3), or raise(3).
+ Do not ignore WDTSIG or DEBUGSIG either, or it may deadlock the
+ watchdog */
+ sigdelset(&blocked_sig, SIGBUS);
+ sigdelset(&blocked_sig, SIGFPE);
+ sigdelset(&blocked_sig, SIGILL);
+ sigdelset(&blocked_sig, SIGSEGV);
+#ifdef DEBUGSIG
+ sigdelset(&blocked_sig, DEBUGSIG);
+#endif
+#ifdef WDTSIG
+ sigdelset(&blocked_sig, WDTSIG);
+#endif
+ for (sig = 0; sig < MAX_SIGNAL; sig++)
+ LIST_INIT(&signal_state[sig].handlers);
+}
+
+/*
+ * This function should be called to unblock all signals
+ */
+void haproxy_unblock_signals()
+{
+ sigset_t set;
+
+ /* Ensure signals are not blocked. Some shells or service managers may
+ * accidentally block all of our signals unfortunately, causing lots of
+ * zombie processes to remain in the background during reloads.
+ */
+ sigemptyset(&set);
+ ha_sigmask(SIG_SETMASK, &set, NULL);
+}
+
+/* releases all registered signal handlers */
+void deinit_signals()
+{
+ int sig;
+ struct sig_handler *sh, *shb;
+
+ for (sig = 0; sig < MAX_SIGNAL; sig++) {
+ if (sig != SIGPROF)
+ signal(sig, SIG_DFL);
+ list_for_each_entry_safe(sh, shb, &signal_state[sig].handlers, list) {
+ LIST_DELETE(&sh->list);
+ pool_free(pool_head_sig_handlers, sh);
+ }
+ }
+}
+
+/* Register a function and an integer argument on a signal. A pointer to the
+ * newly allocated sig_handler is returned, or NULL in case of any error. The
+ * caller is responsible for unregistering the function when not used anymore.
+ * Note that passing a NULL as the function pointer enables interception of the
+ * signal without processing, which is identical to SIG_IGN. If the signal is
+ * zero (which the system cannot deliver), only internal functions will be able
+ * to notify the registered functions.
+ */
+struct sig_handler *signal_register_fct(int sig, void (*fct)(struct sig_handler *), int arg)
+{
+ struct sig_handler *sh;
+
+ if (sig < 0 || sig >= MAX_SIGNAL)
+ return NULL;
+
+ if (sig)
+ signal(sig, fct ? signal_handler : SIG_IGN);
+
+ if (!fct)
+ return NULL;
+
+ sh = pool_alloc(pool_head_sig_handlers);
+ if (!sh)
+ return NULL;
+
+ sh->handler = fct;
+ sh->arg = arg;
+ sh->flags = SIG_F_TYPE_FCT;
+ LIST_APPEND(&signal_state[sig].handlers, &sh->list);
+ return sh;
+}
+
+/* Register a task and a wake-up reason on a signal. A pointer to the newly
+ * allocated sig_handler is returned, or NULL in case of any error. The caller
+ * is responsible for unregistering the task when not used anymore. Note that
+ * passing a NULL as the task pointer enables interception of the signal
+ * without processing, which is identical to SIG_IGN. If the signal is zero
+ * (which the system cannot deliver), only internal functions will be able to
+ * notify the registered functions.
+ */
+struct sig_handler *signal_register_task(int sig, struct task *task, int reason)
+{
+ struct sig_handler *sh;
+
+ if (sig < 0 || sig >= MAX_SIGNAL)
+ return NULL;
+
+ if (sig)
+ signal(sig, signal_handler);
+
+ if (!task)
+ return NULL;
+
+ sh = pool_alloc(pool_head_sig_handlers);
+ if (!sh)
+ return NULL;
+
+ sh->handler = task;
+ sh->arg = reason & ~TASK_WOKEN_ANY;
+ sh->flags = SIG_F_TYPE_TASK;
+ LIST_APPEND(&signal_state[sig].handlers, &sh->list);
+ return sh;
+}
+
+/* Immediately unregister a handler so that no further signals may be delivered
+ * to it. The struct is released so the caller may not reference it anymore.
+ */
+void signal_unregister_handler(struct sig_handler *handler)
+{
+ LIST_DELETE(&handler->list);
+ pool_free(pool_head_sig_handlers, handler);
+}
+
+/* Immediately unregister a handler so that no further signals may be delivered
+ * to it. The handler struct does not need to be known, only the function or
+ * task pointer. This method is expensive because it scans all the list, so it
+ * should only be used for rare cases (eg: exit). The struct is released so the
+ * caller may not reference it anymore.
+ */
+void signal_unregister_target(int sig, void *target)
+{
+ struct sig_handler *sh, *shb;
+
+ if (sig < 0 || sig >= MAX_SIGNAL)
+ return;
+
+ if (!target)
+ return;
+
+ list_for_each_entry_safe(sh, shb, &signal_state[sig].handlers, list) {
+ if (sh->handler == target) {
+ LIST_DELETE(&sh->list);
+ pool_free(pool_head_sig_handlers, sh);
+ break;
+ }
+ }
+}
+
+/*
+ * Immedialtely unregister every handler assigned to a signal <sig>.
+ * Once the handler list is empty, the signal is ignored with SIG_IGN.
+ */
+
+void signal_unregister(int sig)
+{
+ struct sig_handler *sh, *shb;
+
+ if (sig < 0 || sig >= MAX_SIGNAL)
+ return;
+
+ list_for_each_entry_safe(sh, shb, &signal_state[sig].handlers, list) {
+ LIST_DELETE(&sh->list);
+ pool_free(pool_head_sig_handlers, sh);
+ }
+
+ signal(sig, SIG_IGN);
+}
+
+INITCALL0(STG_PREPARE, signal_init);
diff --git a/src/sink.c b/src/sink.c
new file mode 100644
index 0000000..66c2b8c
--- /dev/null
+++ b/src/sink.c
@@ -0,0 +1,1406 @@
+/*
+ * Event sink management
+ *
+ * Copyright (C) 2000-2019 Willy Tarreau - w@1wt.eu
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <sys/mman.h>
+#include <errno.h>
+#include <fcntl.h>
+
+#include <import/ist.h>
+#include <haproxy/api.h>
+#include <haproxy/applet.h>
+#include <haproxy/cfgparse.h>
+#include <haproxy/cli.h>
+#include <haproxy/errors.h>
+#include <haproxy/list.h>
+#include <haproxy/log.h>
+#include <haproxy/proxy.h>
+#include <haproxy/ring.h>
+#include <haproxy/sc_strm.h>
+#include <haproxy/signal.h>
+#include <haproxy/sink.h>
+#include <haproxy/stconn.h>
+#include <haproxy/time.h>
+#include <haproxy/tools.h>
+
+struct list sink_list = LIST_HEAD_INIT(sink_list);
+
+/* sink proxies list */
+struct proxy *sink_proxies_list;
+
+struct sink *cfg_sink;
+
+struct sink *sink_find(const char *name)
+{
+ struct sink *sink;
+
+ list_for_each_entry(sink, &sink_list, sink_list)
+ if (strcmp(sink->name, name) == 0)
+ return sink;
+ return NULL;
+}
+
+/* creates a new sink and adds it to the list, it's still generic and not fully
+ * initialized. Returns NULL on allocation failure. If another one already
+ * exists with the same name, it will be returned. The caller can detect it as
+ * a newly created one has type SINK_TYPE_NEW.
+ */
+static struct sink *__sink_new(const char *name, const char *desc, int fmt)
+{
+ struct sink *sink;
+
+ sink = sink_find(name);
+ if (sink)
+ goto end;
+
+ sink = calloc(1, sizeof(*sink));
+ if (!sink)
+ goto end;
+
+ sink->name = strdup(name);
+ if (!sink->name)
+ goto err;
+
+ sink->desc = strdup(desc);
+ if (!sink->desc)
+ goto err;
+
+ sink->fmt = fmt;
+ sink->type = SINK_TYPE_NEW;
+ sink->maxlen = BUFSIZE;
+ /* address will be filled by the caller if needed */
+ sink->ctx.fd = -1;
+ sink->ctx.dropped = 0;
+ HA_RWLOCK_INIT(&sink->ctx.lock);
+ LIST_APPEND(&sink_list, &sink->sink_list);
+ end:
+ return sink;
+
+ err:
+ ha_free(&sink->name);
+ ha_free(&sink->desc);
+ ha_free(&sink);
+
+ return NULL;
+}
+
+/* creates a sink called <name> of type FD associated to fd <fd>, format <fmt>,
+ * and description <desc>. Returns NULL on allocation failure or conflict.
+ * Perfect duplicates are merged (same type, fd, and name).
+ */
+struct sink *sink_new_fd(const char *name, const char *desc, enum log_fmt fmt, int fd)
+{
+ struct sink *sink;
+
+ sink = __sink_new(name, desc, fmt);
+ if (!sink || (sink->type == SINK_TYPE_FD && sink->ctx.fd == fd))
+ goto end;
+
+ if (sink->type != SINK_TYPE_NEW) {
+ sink = NULL;
+ goto end;
+ }
+
+ sink->type = SINK_TYPE_FD;
+ sink->ctx.fd = fd;
+ end:
+ return sink;
+}
+
+/* creates a sink called <name> of type BUF of size <size>, format <fmt>,
+ * and description <desc>. Returns NULL on allocation failure or conflict.
+ * Perfect duplicates are merged (same type and name). If sizes differ, the
+ * largest one is kept.
+ */
+struct sink *sink_new_buf(const char *name, const char *desc, enum log_fmt fmt, size_t size)
+{
+ struct sink *sink;
+
+ sink = __sink_new(name, desc, fmt);
+ if (!sink)
+ goto fail;
+
+ if (sink->type == SINK_TYPE_BUFFER) {
+ /* such a buffer already exists, we may have to resize it */
+ if (!ring_resize(sink->ctx.ring, size))
+ goto fail;
+ goto end;
+ }
+
+ if (sink->type != SINK_TYPE_NEW) {
+ /* already exists of another type */
+ goto fail;
+ }
+
+ sink->ctx.ring = ring_new(size);
+ if (!sink->ctx.ring) {
+ LIST_DELETE(&sink->sink_list);
+ free(sink->name);
+ free(sink->desc);
+ free(sink);
+ goto fail;
+ }
+
+ sink->type = SINK_TYPE_BUFFER;
+ end:
+ return sink;
+ fail:
+ return NULL;
+}
+
+/* tries to send <nmsg> message parts from message array <msg> to sink <sink>.
+ * Formatting according to the sink's preference is done here, unless sink->fmt
+ * is unspecified, in which case the caller formatting will be used instead.
+ * Lost messages are NOT accounted for. It is preferable to call sink_write()
+ * instead which will also try to emit the number of dropped messages when there
+ * are any.
+ *
+ * It will stop writing at <maxlen> instead of sink->maxlen if <maxlen> is
+ * positive and inferior to sink->maxlen.
+ *
+ * It returns >0 if it could write anything, <=0 otherwise.
+ */
+ ssize_t __sink_write(struct sink *sink, struct log_header hdr,
+ size_t maxlen, const struct ist msg[], size_t nmsg)
+ {
+ struct ist *pfx = NULL;
+ size_t npfx = 0;
+
+ if (sink->fmt == LOG_FORMAT_RAW)
+ goto send;
+
+ if (sink->fmt != LOG_FORMAT_UNSPEC)
+ hdr.format = sink->fmt; /* sink format prevails over log one */
+ pfx = build_log_header(hdr, &npfx);
+
+send:
+ if (!maxlen)
+ maxlen = ~0;
+ if (sink->type == SINK_TYPE_FD) {
+ return fd_write_frag_line(sink->ctx.fd, MIN(maxlen, sink->maxlen), pfx, npfx, msg, nmsg, 1);
+ }
+ else if (sink->type == SINK_TYPE_BUFFER) {
+ return ring_write(sink->ctx.ring, MIN(maxlen, sink->maxlen), pfx, npfx, msg, nmsg);
+ }
+ return 0;
+}
+
+/* Tries to emit a message indicating the number of dropped events.
+ * The log header of the original message that we tried to emit is reused
+ * here with the only difference that we override the log level. This is
+ * possible since the announce message will be sent from the same context.
+ *
+ * In case of success, the amount of drops is reduced by as much. It's supposed
+ * to be called under an exclusive lock on the sink to avoid multiple producers
+ * doing the same. On success, >0 is returned, otherwise <=0 on failure.
+ */
+int sink_announce_dropped(struct sink *sink, struct log_header hdr)
+{
+ unsigned int dropped;
+ struct buffer msg;
+ struct ist msgvec[1];
+ char logbuf[64];
+
+ while (unlikely((dropped = sink->ctx.dropped) > 0)) {
+ chunk_init(&msg, logbuf, sizeof(logbuf));
+ chunk_printf(&msg, "%u event%s dropped", dropped, dropped > 1 ? "s" : "");
+ msgvec[0] = ist2(msg.area, msg.data);
+
+ hdr.level = LOG_NOTICE; /* override level but keep original log header data */
+
+ if (__sink_write(sink, hdr, 0, msgvec, 1) <= 0)
+ return 0;
+ /* success! */
+ HA_ATOMIC_SUB(&sink->ctx.dropped, dropped);
+ }
+ return 1;
+}
+
+/* parse the "show events" command, returns 1 if a message is returned, otherwise zero */
+static int cli_parse_show_events(char **args, char *payload, struct appctx *appctx, void *private)
+{
+ struct sink *sink;
+ uint ring_flags;
+ int arg;
+
+ args++; // make args[1] the 1st arg
+
+ if (!*args[1]) {
+ /* no arg => report the list of supported sink */
+ chunk_printf(&trash, "Supported events sinks are listed below. Add -w(wait), -n(new). Any key to stop\n");
+ list_for_each_entry(sink, &sink_list, sink_list) {
+ chunk_appendf(&trash, " %-10s : type=%s, %u dropped, %s\n",
+ sink->name,
+ sink->type == SINK_TYPE_NEW ? "init" :
+ sink->type == SINK_TYPE_FD ? "fd" :
+ sink->type == SINK_TYPE_BUFFER ? "buffer" : "?",
+ sink->ctx.dropped, sink->desc);
+ }
+
+ trash.area[trash.data] = 0;
+ return cli_msg(appctx, LOG_WARNING, trash.area);
+ }
+
+ if (!cli_has_level(appctx, ACCESS_LVL_OPER))
+ return 1;
+
+ sink = sink_find(args[1]);
+ if (!sink)
+ return cli_err(appctx, "No such event sink");
+
+ if (sink->type != SINK_TYPE_BUFFER)
+ return cli_msg(appctx, LOG_NOTICE, "Nothing to report for this sink");
+
+ ring_flags = 0;
+ for (arg = 2; *args[arg]; arg++) {
+ if (strcmp(args[arg], "-w") == 0)
+ ring_flags |= RING_WF_WAIT_MODE;
+ else if (strcmp(args[arg], "-n") == 0)
+ ring_flags |= RING_WF_SEEK_NEW;
+ else if (strcmp(args[arg], "-nw") == 0 || strcmp(args[arg], "-wn") == 0)
+ ring_flags |= RING_WF_WAIT_MODE | RING_WF_SEEK_NEW;
+ else
+ return cli_err(appctx, "unknown option");
+ }
+ return ring_attach_cli(sink->ctx.ring, appctx, ring_flags);
+}
+
+/* Pre-configures a ring proxy to emit connections */
+void sink_setup_proxy(struct proxy *px)
+{
+ px->last_change = ns_to_sec(now_ns);
+ px->cap = PR_CAP_BE;
+ px->maxconn = 0;
+ px->conn_retries = 1;
+ px->timeout.server = TICK_ETERNITY;
+ px->timeout.client = TICK_ETERNITY;
+ px->timeout.connect = TICK_ETERNITY;
+ px->accept = NULL;
+ px->options2 |= PR_O2_INDEPSTR | PR_O2_SMARTCON | PR_O2_SMARTACC;
+ px->next = sink_proxies_list;
+ sink_proxies_list = px;
+}
+
+/*
+ * IO Handler to handle message push to syslog tcp server.
+ * It takes its context from appctx->svcctx.
+ */
+static void sink_forward_io_handler(struct appctx *appctx)
+{
+ struct stconn *sc = appctx_sc(appctx);
+ struct sink_forward_target *sft = appctx->svcctx;
+ struct sink *sink = sft->sink;
+ struct ring *ring = sink->ctx.ring;
+ struct buffer *buf = &ring->buf;
+ uint64_t msg_len;
+ size_t len, cnt, ofs, last_ofs;
+ int ret = 0;
+
+ if (unlikely(se_fl_test(appctx->sedesc, (SE_FL_EOS|SE_FL_ERROR|SE_FL_SHR|SE_FL_SHW))))
+ goto out;
+
+ /* if stopping was requested, close immediately */
+ if (unlikely(stopping))
+ goto close;
+
+ /* if the connection is not established, inform the stream that we want
+ * to be notified whenever the connection completes.
+ */
+ if (sc_opposite(sc)->state < SC_ST_EST) {
+ applet_need_more_data(appctx);
+ se_need_remote_conn(appctx->sedesc);
+ applet_have_more_data(appctx);
+ goto out;
+ }
+
+ HA_SPIN_LOCK(SFT_LOCK, &sft->lock);
+ if (appctx != sft->appctx) {
+ HA_SPIN_UNLOCK(SFT_LOCK, &sft->lock);
+ goto close;
+ }
+
+ HA_RWLOCK_WRLOCK(RING_LOCK, &ring->lock);
+ LIST_DEL_INIT(&appctx->wait_entry);
+ HA_RWLOCK_WRUNLOCK(RING_LOCK, &ring->lock);
+
+ HA_RWLOCK_RDLOCK(RING_LOCK, &ring->lock);
+
+ /* explanation for the initialization below: it would be better to do
+ * this in the parsing function but this would occasionally result in
+ * dropped events because we'd take a reference on the oldest message
+ * and keep it while being scheduled. Thus instead let's take it the
+ * first time we enter here so that we have a chance to pass many
+ * existing messages before grabbing a reference to a location. This
+ * value cannot be produced after initialization.
+ */
+ if (unlikely(sft->ofs == ~0)) {
+ sft->ofs = b_peek_ofs(buf, 0);
+ HA_ATOMIC_INC(b_orig(buf) + sft->ofs);
+ }
+
+ /* we were already there, adjust the offset to be relative to
+ * the buffer's head and remove us from the counter.
+ */
+ ofs = sft->ofs - b_head_ofs(buf);
+ if (sft->ofs < b_head_ofs(buf))
+ ofs += b_size(buf);
+ BUG_ON(ofs >= buf->size);
+ HA_ATOMIC_DEC(b_peek(buf, ofs));
+
+ /* in this loop, ofs always points to the counter byte that precedes
+ * the message so that we can take our reference there if we have to
+ * stop before the end (ret=0).
+ */
+ ret = 1;
+ while (ofs + 1 < b_data(buf)) {
+ cnt = 1;
+ len = b_peek_varint(buf, ofs + cnt, &msg_len);
+ if (!len)
+ break;
+ cnt += len;
+ BUG_ON(msg_len + ofs + cnt + 1 > b_data(buf));
+
+ if (unlikely(msg_len + 1 > b_size(&trash))) {
+ /* too large a message to ever fit, let's skip it */
+ ofs += cnt + msg_len;
+ continue;
+ }
+
+ chunk_reset(&trash);
+ len = b_getblk(buf, trash.area, msg_len, ofs + cnt);
+ trash.data += len;
+ trash.area[trash.data++] = '\n';
+
+ if (applet_putchk(appctx, &trash) == -1) {
+ ret = 0;
+ break;
+ }
+ ofs += cnt + msg_len;
+ }
+
+ HA_ATOMIC_INC(b_peek(buf, ofs));
+ last_ofs = b_tail_ofs(buf);
+ sft->ofs = b_peek_ofs(buf, ofs);
+
+ HA_RWLOCK_RDUNLOCK(RING_LOCK, &ring->lock);
+
+ if (ret) {
+ /* let's be woken up once new data arrive */
+ HA_RWLOCK_WRLOCK(RING_LOCK, &ring->lock);
+ LIST_APPEND(&ring->waiters, &appctx->wait_entry);
+ ofs = b_tail_ofs(buf);
+ HA_RWLOCK_WRUNLOCK(RING_LOCK, &ring->lock);
+ if (ofs != last_ofs) {
+ /* more data was added into the ring between the
+ * unlock and the lock, and the writer might not
+ * have seen us. We need to reschedule a read.
+ */
+ applet_have_more_data(appctx);
+ } else
+ applet_have_no_more_data(appctx);
+ }
+ HA_SPIN_UNLOCK(SFT_LOCK, &sft->lock);
+
+out:
+ /* always drain data from server */
+ co_skip(sc_oc(sc), sc_oc(sc)->output);
+ return;
+
+close:
+ se_fl_set(appctx->sedesc, SE_FL_EOS|SE_FL_EOI);
+}
+
+/*
+ * IO Handler to handle message push to syslog tcp server
+ * using octet counting frames
+ * It takes its context from appctx->svcctx.
+ */
+static void sink_forward_oc_io_handler(struct appctx *appctx)
+{
+ struct stconn *sc = appctx_sc(appctx);
+ struct sink_forward_target *sft = appctx->svcctx;
+ struct sink *sink = sft->sink;
+ struct ring *ring = sink->ctx.ring;
+ struct buffer *buf = &ring->buf;
+ uint64_t msg_len;
+ size_t len, cnt, ofs;
+ int ret = 0;
+ char *p;
+
+ if (unlikely(se_fl_test(appctx->sedesc, (SE_FL_EOS|SE_FL_ERROR|SE_FL_SHR|SE_FL_SHW))))
+ goto out;
+
+ /* if stopping was requested, close immediately */
+ if (unlikely(stopping))
+ goto close;
+
+ /* if the connection is not established, inform the stream that we want
+ * to be notified whenever the connection completes.
+ */
+ if (sc_opposite(sc)->state < SC_ST_EST) {
+ applet_need_more_data(appctx);
+ se_need_remote_conn(appctx->sedesc);
+ applet_have_more_data(appctx);
+ goto out;
+ }
+
+ HA_SPIN_LOCK(SFT_LOCK, &sft->lock);
+ if (appctx != sft->appctx) {
+ HA_SPIN_UNLOCK(SFT_LOCK, &sft->lock);
+ goto close;
+ }
+
+ HA_RWLOCK_WRLOCK(RING_LOCK, &ring->lock);
+ LIST_DEL_INIT(&appctx->wait_entry);
+ HA_RWLOCK_WRUNLOCK(RING_LOCK, &ring->lock);
+
+ HA_RWLOCK_RDLOCK(RING_LOCK, &ring->lock);
+
+ /* explanation for the initialization below: it would be better to do
+ * this in the parsing function but this would occasionally result in
+ * dropped events because we'd take a reference on the oldest message
+ * and keep it while being scheduled. Thus instead let's take it the
+ * first time we enter here so that we have a chance to pass many
+ * existing messages before grabbing a reference to a location. This
+ * value cannot be produced after initialization.
+ */
+ if (unlikely(sft->ofs == ~0)) {
+ sft->ofs = b_peek_ofs(buf, 0);
+ HA_ATOMIC_INC(b_orig(buf) + sft->ofs);
+ }
+
+ /* we were already there, adjust the offset to be relative to
+ * the buffer's head and remove us from the counter.
+ */
+ ofs = sft->ofs - b_head_ofs(buf);
+ if (sft->ofs < b_head_ofs(buf))
+ ofs += b_size(buf);
+ BUG_ON(ofs >= buf->size);
+ HA_ATOMIC_DEC(b_peek(buf, ofs));
+
+ /* in this loop, ofs always points to the counter byte that precedes
+ * the message so that we can take our reference there if we have to
+ * stop before the end (ret=0).
+ */
+ ret = 1;
+ while (ofs + 1 < b_data(buf)) {
+ cnt = 1;
+ len = b_peek_varint(buf, ofs + cnt, &msg_len);
+ if (!len)
+ break;
+ cnt += len;
+ BUG_ON(msg_len + ofs + cnt + 1 > b_data(buf));
+
+ chunk_reset(&trash);
+ p = ulltoa(msg_len, trash.area, b_size(&trash));
+ if (p) {
+ trash.data = (p - trash.area) + 1;
+ *p = ' ';
+ }
+
+ if (!p || (trash.data + msg_len > b_size(&trash))) {
+ /* too large a message to ever fit, let's skip it */
+ ofs += cnt + msg_len;
+ continue;
+ }
+
+ trash.data += b_getblk(buf, p + 1, msg_len, ofs + cnt);
+
+ if (applet_putchk(appctx, &trash) == -1) {
+ ret = 0;
+ break;
+ }
+ ofs += cnt + msg_len;
+ }
+
+ HA_ATOMIC_INC(b_peek(buf, ofs));
+ sft->ofs = b_peek_ofs(buf, ofs);
+
+ HA_RWLOCK_RDUNLOCK(RING_LOCK, &ring->lock);
+
+ if (ret) {
+ /* let's be woken up once new data arrive */
+ HA_RWLOCK_WRLOCK(RING_LOCK, &ring->lock);
+ LIST_APPEND(&ring->waiters, &appctx->wait_entry);
+ HA_RWLOCK_WRUNLOCK(RING_LOCK, &ring->lock);
+ applet_have_no_more_data(appctx);
+ }
+ HA_SPIN_UNLOCK(SFT_LOCK, &sft->lock);
+
+ out:
+ /* always drain data from server */
+ co_skip(sc_oc(sc), sc_oc(sc)->output);
+ return;
+
+close:
+ se_fl_set(appctx->sedesc, SE_FL_EOS|SE_FL_EOI);
+ goto out;
+}
+
+void __sink_forward_session_deinit(struct sink_forward_target *sft)
+{
+ struct sink *sink;
+
+ sink = sft->sink;
+ if (!sink)
+ return;
+
+ HA_RWLOCK_WRLOCK(RING_LOCK, &sink->ctx.ring->lock);
+ LIST_DEL_INIT(&sft->appctx->wait_entry);
+ HA_RWLOCK_WRUNLOCK(RING_LOCK, &sink->ctx.ring->lock);
+
+ sft->appctx = NULL;
+ task_wakeup(sink->forward_task, TASK_WOKEN_MSG);
+}
+
+static int sink_forward_session_init(struct appctx *appctx)
+{
+ struct sink_forward_target *sft = appctx->svcctx;
+ struct stream *s;
+ struct sockaddr_storage *addr = NULL;
+
+ if (!sockaddr_alloc(&addr, &sft->srv->addr, sizeof(sft->srv->addr)))
+ goto out_error;
+ /* srv port should be learned from srv->svc_port not from srv->addr */
+ set_host_port(addr, sft->srv->svc_port);
+
+ if (appctx_finalize_startup(appctx, sft->srv->proxy, &BUF_NULL) == -1)
+ goto out_free_addr;
+
+ s = appctx_strm(appctx);
+ s->scb->dst = addr;
+ s->scb->flags |= (SC_FL_RCV_ONCE|SC_FL_NOLINGER);
+
+ s->target = &sft->srv->obj_type;
+ s->flags = SF_ASSIGNED;
+
+ s->do_log = NULL;
+ s->uniq_id = 0;
+
+ applet_expect_no_data(appctx);
+ sft->appctx = appctx;
+
+ return 0;
+
+ out_free_addr:
+ sockaddr_free(&addr);
+ out_error:
+ return -1;
+}
+
+static void sink_forward_session_release(struct appctx *appctx)
+{
+ struct sink_forward_target *sft = appctx->svcctx;
+
+ if (!sft)
+ return;
+
+ HA_SPIN_LOCK(SFT_LOCK, &sft->lock);
+ if (sft->appctx == appctx)
+ __sink_forward_session_deinit(sft);
+ HA_SPIN_UNLOCK(SFT_LOCK, &sft->lock);
+}
+
+static struct applet sink_forward_applet = {
+ .obj_type = OBJ_TYPE_APPLET,
+ .name = "<SINKFWD>", /* used for logging */
+ .fct = sink_forward_io_handler,
+ .init = sink_forward_session_init,
+ .release = sink_forward_session_release,
+};
+
+static struct applet sink_forward_oc_applet = {
+ .obj_type = OBJ_TYPE_APPLET,
+ .name = "<SINKFWDOC>", /* used for logging */
+ .fct = sink_forward_oc_io_handler,
+ .init = sink_forward_session_init,
+ .release = sink_forward_session_release,
+};
+
+/*
+ * Create a new peer session in assigned state (connect will start automatically)
+ * It sets its context into appctx->svcctx.
+ */
+static struct appctx *sink_forward_session_create(struct sink *sink, struct sink_forward_target *sft)
+{
+ struct appctx *appctx;
+ struct applet *applet = &sink_forward_applet;
+
+ if (sft->srv->log_proto == SRV_LOG_PROTO_OCTET_COUNTING)
+ applet = &sink_forward_oc_applet;
+
+ appctx = appctx_new_here(applet, NULL);
+ if (!appctx)
+ goto out_close;
+ appctx->svcctx = (void *)sft;
+
+ if (appctx_init(appctx) == -1)
+ goto out_free_appctx;
+
+ return appctx;
+
+ /* Error unrolling */
+ out_free_appctx:
+ appctx_free_on_early_error(appctx);
+ out_close:
+ return NULL;
+}
+
+/*
+ * Task to handle connections to forward servers
+ */
+static struct task *process_sink_forward(struct task * task, void *context, unsigned int state)
+{
+ struct sink *sink = (struct sink *)context;
+ struct sink_forward_target *sft = sink->sft;
+
+ task->expire = TICK_ETERNITY;
+
+ if (!stopping) {
+ while (sft) {
+ HA_SPIN_LOCK(SFT_LOCK, &sft->lock);
+ /* if appctx is NULL, start a new session */
+ if (!sft->appctx)
+ sft->appctx = sink_forward_session_create(sink, sft);
+ HA_SPIN_UNLOCK(SFT_LOCK, &sft->lock);
+ sft = sft->next;
+ }
+ }
+ else {
+ while (sft) {
+ HA_SPIN_LOCK(SFT_LOCK, &sft->lock);
+ /* awake applet to perform a clean close */
+ if (sft->appctx)
+ appctx_wakeup(sft->appctx);
+ HA_SPIN_UNLOCK(SFT_LOCK, &sft->lock);
+ sft = sft->next;
+ }
+ }
+
+ return task;
+}
+/*
+ * Init task to manage connections to forward servers
+ *
+ * returns 0 in case of error.
+ */
+int sink_init_forward(struct sink *sink)
+{
+ sink->forward_task = task_new_anywhere();
+ if (!sink->forward_task)
+ return 0;
+
+ sink->forward_task->process = process_sink_forward;
+ sink->forward_task->context = (void *)sink;
+ sink->forward_sighandler = signal_register_task(0, sink->forward_task, 0);
+ task_wakeup(sink->forward_task, TASK_WOKEN_INIT);
+ return 1;
+}
+
+/* This tries to rotate a file-backed ring, but only if it contains contents.
+ * This way empty rings will not cause backups to be overwritten and it's safe
+ * to reload multiple times. That's only best effort, failures are silently
+ * ignored.
+ */
+void sink_rotate_file_backed_ring(const char *name)
+{
+ struct ring ring;
+ char *oldback;
+ int ret;
+ int fd;
+
+ fd = open(name, O_RDONLY);
+ if (fd < 0)
+ return;
+
+ /* check for contents validity */
+ ret = read(fd, &ring, sizeof(ring));
+ close(fd);
+
+ if (ret != sizeof(ring))
+ goto rotate;
+
+ /* contents are present, we want to keep them => rotate. Note that
+ * an empty ring buffer has one byte (the marker).
+ */
+ if (ring.buf.data > 1)
+ goto rotate;
+
+ /* nothing to keep, let's scratch the file and preserve the backup */
+ return;
+
+ rotate:
+ oldback = NULL;
+ memprintf(&oldback, "%s.bak", name);
+ if (oldback) {
+ /* try to rename any possibly existing ring file to
+ * ".bak" and delete remains of older ones. This will
+ * ensure we don't wipe useful debug info upon restart.
+ */
+ unlink(oldback);
+ if (rename(name, oldback) < 0)
+ unlink(oldback);
+ ha_free(&oldback);
+ }
+}
+
+
+/* helper function to completely deallocate a sink struct
+ */
+static void sink_free(struct sink *sink)
+{
+ struct sink_forward_target *sft_next;
+
+ if (!sink)
+ return;
+ if (sink->type == SINK_TYPE_BUFFER) {
+ if (sink->store) {
+ size_t size = (sink->ctx.ring->buf.size + 4095UL) & -4096UL;
+ void *area = (sink->ctx.ring->buf.area - sizeof(*sink->ctx.ring));
+
+ msync(area, size, MS_SYNC);
+ munmap(area, size);
+ ha_free(&sink->store);
+ }
+ else
+ ring_free(sink->ctx.ring);
+ }
+ LIST_DEL_INIT(&sink->sink_list); // remove from parent list
+ task_destroy(sink->forward_task);
+ free_proxy(sink->forward_px);
+ ha_free(&sink->name);
+ ha_free(&sink->desc);
+ while (sink->sft) {
+ sft_next = sink->sft->next;
+ ha_free(&sink->sft);
+ sink->sft = sft_next;
+ }
+ ha_free(&sink);
+}
+
+/* Helper function to create new high-level ring buffer (as in ring section from
+ * the config): will create a new sink of buf type, and a new forward proxy,
+ * which will be stored in forward_px to know that the sink is responsible for
+ * it.
+ *
+ * Returns NULL on failure
+ */
+static struct sink *sink_new_ringbuf(const char *id, const char *description,
+ const char *file, int linenum, char **err_msg)
+{
+ struct sink *sink;
+ struct proxy *p = NULL; // forward_px
+
+ /* allocate new proxy to handle forwards */
+ p = calloc(1, sizeof(*p));
+ if (!p) {
+ memprintf(err_msg, "out of memory");
+ goto err;
+ }
+
+ init_new_proxy(p);
+ sink_setup_proxy(p);
+ p->id = strdup(id);
+ p->conf.args.file = p->conf.file = strdup(file);
+ p->conf.args.line = p->conf.line = linenum;
+
+ sink = sink_new_buf(id, description, LOG_FORMAT_RAW, BUFSIZE);
+ if (!sink) {
+ memprintf(err_msg, "unable to create a new sink buffer for ring '%s'", id);
+ goto err;
+ }
+
+ /* link sink to proxy */
+ sink->forward_px = p;
+
+ return sink;
+
+ err:
+ free_proxy(p);
+ return NULL;
+}
+
+/* helper function: add a new server to an existing sink
+ *
+ * Returns 1 on success and 0 on failure
+ */
+static int sink_add_srv(struct sink *sink, struct server *srv)
+{
+ struct sink_forward_target *sft;
+
+ /* allocate new sink_forward_target descriptor */
+ sft = calloc(1, sizeof(*sft));
+ if (!sft) {
+ ha_alert("memory allocation error initializing server '%s' in ring '%s'.\n", srv->id, sink->name);
+ return 0;
+ }
+ sft->srv = srv;
+ sft->appctx = NULL;
+ sft->ofs = ~0; /* init ring offset */
+ sft->sink = sink;
+ sft->next = sink->sft;
+ HA_SPIN_INIT(&sft->lock);
+
+ /* mark server attached to the ring */
+ if (!ring_attach(sink->ctx.ring)) {
+ ha_alert("server '%s' sets too many watchers > 255 on ring '%s'.\n", srv->id, sink->name);
+ ha_free(&sft);
+ return 0;
+ }
+ sink->sft = sft;
+ return 1;
+}
+
+/* Finalize sink struct to ensure configuration consistency and
+ * allocate final struct members
+ *
+ * Returns ERR_NONE on success, ERR_WARN on warning
+ * Returns a composition of ERR_ALERT, ERR_ABORT, ERR_FATAL on failure
+ */
+static int sink_finalize(struct sink *sink)
+{
+ int err_code = ERR_NONE;
+ struct server *srv;
+
+ if (sink && (sink->type == SINK_TYPE_BUFFER)) {
+ if (!sink->maxlen)
+ sink->maxlen = ~0; // maxlen not set: no implicit truncation
+ else if (sink->maxlen > ring_max_payload(sink->ctx.ring)) {
+ /* maxlen set by user however it doesn't fit: set to max value */
+ ha_warning("ring '%s' event max length '%u' exceeds max payload size, forced to '%lu'.\n",
+ sink->name, sink->maxlen, (unsigned long)ring_max_payload(sink->ctx.ring));
+ sink->maxlen = ring_max_payload(sink->ctx.ring);
+ err_code |= ERR_WARN;
+ }
+
+ /* prepare forward server descriptors */
+ if (sink->forward_px) {
+ /* sink proxy is set: register all servers from the proxy */
+ srv = sink->forward_px->srv;
+ while (srv) {
+ if (!sink_add_srv(sink, srv)) {
+ err_code |= ERR_ALERT | ERR_FATAL;
+ break;
+ }
+ srv = srv->next;
+ }
+ }
+ /* init forwarding if at least one sft is registered */
+ if (sink->sft && sink_init_forward(sink) == 0) {
+ ha_alert("error when trying to initialize sink buffer forwarding.\n");
+ err_code |= ERR_ALERT | ERR_FATAL;
+ }
+ }
+ return err_code;
+}
+
+/*
+ * Parse "ring" section and create corresponding sink buffer.
+ *
+ * The function returns 0 in success case, otherwise, it returns error
+ * flags.
+ */
+int cfg_parse_ring(const char *file, int linenum, char **args, int kwm)
+{
+ int err_code = 0;
+ char *err_msg = NULL;
+ const char *inv;
+
+ if (strcmp(args[0], "ring") == 0) { /* new ring section */
+ if (!*args[1]) {
+ ha_alert("parsing [%s:%d] : missing ring name.\n", file, linenum);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto err;
+ }
+
+ inv = invalid_char(args[1]);
+ if (inv) {
+ ha_alert("parsing [%s:%d] : invalid ring name '%s' (character '%c' is not permitted).\n", file, linenum, args[1], *inv);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto err;
+ }
+
+ if (sink_find(args[1])) {
+ ha_alert("parsing [%s:%d] : sink named '%s' already exists.\n", file, linenum, args[1]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto err;
+ }
+
+ cfg_sink = sink_new_ringbuf(args[1], args[1], file, linenum, &err_msg);
+ if (!cfg_sink) {
+ ha_alert("parsing [%s:%d] : %s.\n", file, linenum, err_msg);
+ ha_free(&err_msg);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto err;
+ }
+
+ /* set maxlen value to 0 for now, we rely on this in postparsing
+ * to know if it was explicitly set using the "maxlen" parameter
+ */
+ cfg_sink->maxlen = 0;
+ }
+ else if (strcmp(args[0], "size") == 0) {
+ size_t size;
+
+ if (!cfg_sink || (cfg_sink->type != SINK_TYPE_BUFFER)) {
+ ha_alert("parsing [%s:%d] : 'size' directive not usable with this type of sink.\n", file, linenum);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto err;
+ }
+
+ size = atol(args[1]);
+ if (!size) {
+ ha_alert("parsing [%s:%d] : invalid size '%s' for new sink buffer.\n", file, linenum, args[1]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto err;
+ }
+
+ if (cfg_sink->store) {
+ ha_alert("parsing [%s:%d] : cannot resize an already mapped file, please specify 'size' before 'backing-file'.\n", file, linenum);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto err;
+ }
+
+ if (size < cfg_sink->ctx.ring->buf.size) {
+ ha_warning("parsing [%s:%d] : ignoring new size '%llu' that is smaller than current size '%llu' for ring '%s'.\n",
+ file, linenum, (ullong)size, (ullong)cfg_sink->ctx.ring->buf.size, cfg_sink->name);
+ err_code |= ERR_WARN;
+ goto err;
+ }
+
+ if (!ring_resize(cfg_sink->ctx.ring, size)) {
+ ha_alert("parsing [%s:%d] : fail to set sink buffer size '%llu' for ring '%s'.\n", file, linenum,
+ (ullong)cfg_sink->ctx.ring->buf.size, cfg_sink->name);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto err;
+ }
+ }
+ else if (strcmp(args[0], "backing-file") == 0) {
+ /* This tries to mmap file <file> for size <size> and to use it as a backing store
+ * for ring <ring>. Existing data are delete. NULL is returned on error.
+ */
+ const char *backing = args[1];
+ size_t size;
+ void *area;
+ int fd;
+
+ if (!cfg_sink || (cfg_sink->type != SINK_TYPE_BUFFER)) {
+ ha_alert("parsing [%s:%d] : 'backing-file' only usable with existing rings.\n", file, linenum);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto err;
+ }
+
+ if (cfg_sink->store) {
+ ha_alert("parsing [%s:%d] : 'backing-file' already specified for ring '%s' (was '%s').\n", file, linenum, cfg_sink->name, cfg_sink->store);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto err;
+ }
+
+ /* let's check if the file exists and is not empty. That's the
+ * only condition under which we'll trigger a rotate, so that
+ * config checks, reloads, or restarts that don't emit anything
+ * do not rotate it again.
+ */
+ sink_rotate_file_backed_ring(backing);
+
+ fd = open(backing, O_RDWR | O_CREAT, 0600);
+ if (fd < 0) {
+ ha_alert("parsing [%s:%d] : cannot open backing-file '%s' for ring '%s': %s.\n", file, linenum, backing, cfg_sink->name, strerror(errno));
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto err;
+ }
+
+ size = (cfg_sink->ctx.ring->buf.size + 4095UL) & -4096UL;
+ if (ftruncate(fd, size) != 0) {
+ close(fd);
+ ha_alert("parsing [%s:%d] : could not adjust size of backing-file for ring '%s': %s.\n", file, linenum, cfg_sink->name, strerror(errno));
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto err;
+ }
+
+ area = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
+ if (area == MAP_FAILED) {
+ close(fd);
+ ha_alert("parsing [%s:%d] : failed to use '%s' as a backing file for ring '%s': %s.\n", file, linenum, backing, cfg_sink->name, strerror(errno));
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto err;
+ }
+
+ /* we don't need the file anymore */
+ close(fd);
+ cfg_sink->store = strdup(backing);
+
+ /* never fails */
+ ring_free(cfg_sink->ctx.ring);
+ cfg_sink->ctx.ring = ring_make_from_area(area, size);
+ }
+ else if (strcmp(args[0],"server") == 0) {
+ if (!cfg_sink || (cfg_sink->type != SINK_TYPE_BUFFER)) {
+ ha_alert("parsing [%s:%d] : unable to create server '%s'.\n", file, linenum, args[1]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto err;
+ }
+
+ err_code |= parse_server(file, linenum, args, cfg_sink->forward_px, NULL,
+ SRV_PARSE_PARSE_ADDR|SRV_PARSE_INITIAL_RESOLVE);
+ }
+ else if (strcmp(args[0],"timeout") == 0) {
+ if (!cfg_sink || !cfg_sink->forward_px) {
+ ha_alert("parsing [%s:%d] : unable to set timeout '%s'.\n", file, linenum, args[1]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto err;
+ }
+
+ if (strcmp(args[1], "connect") == 0 ||
+ strcmp(args[1], "server") == 0) {
+ const char *res;
+ unsigned int tout;
+
+ if (!*args[2]) {
+ ha_alert("parsing [%s:%d] : '%s %s' expects <time> as argument.\n",
+ file, linenum, args[0], args[1]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto err;
+ }
+ res = parse_time_err(args[2], &tout, TIME_UNIT_MS);
+ if (res == PARSE_TIME_OVER) {
+ ha_alert("parsing [%s:%d]: timer overflow in argument <%s> to <%s %s>, maximum value is 2147483647 ms (~24.8 days).\n",
+ file, linenum, args[2], args[0], args[1]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto err;
+ }
+ else if (res == PARSE_TIME_UNDER) {
+ ha_alert("parsing [%s:%d]: timer underflow in argument <%s> to <%s %s>, minimum non-null value is 1 ms.\n",
+ file, linenum, args[2], args[0], args[1]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto err;
+ }
+ else if (res) {
+ ha_alert("parsing [%s:%d]: unexpected character '%c' in argument to <%s %s>.\n",
+ file, linenum, *res, args[0], args[1]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto err;
+ }
+ if (args[1][0] == 'c')
+ cfg_sink->forward_px->timeout.connect = tout;
+ else
+ cfg_sink->forward_px->timeout.server = tout;
+ }
+ }
+ else if (strcmp(args[0],"format") == 0) {
+ if (!cfg_sink) {
+ ha_alert("parsing [%s:%d] : unable to set format '%s'.\n", file, linenum, args[1]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto err;
+ }
+
+ cfg_sink->fmt = get_log_format(args[1]);
+ if (cfg_sink->fmt == LOG_FORMAT_UNSPEC) {
+ ha_alert("parsing [%s:%d] : unknown format '%s'.\n", file, linenum, args[1]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto err;
+ }
+ }
+ else if (strcmp(args[0],"maxlen") == 0) {
+ if (!cfg_sink) {
+ ha_alert("parsing [%s:%d] : unable to set event max length '%s'.\n", file, linenum, args[1]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto err;
+ }
+
+ cfg_sink->maxlen = atol(args[1]);
+ if (!cfg_sink->maxlen) {
+ ha_alert("parsing [%s:%d] : invalid size '%s' for new sink buffer.\n", file, linenum, args[1]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto err;
+ }
+ }
+ else if (strcmp(args[0],"description") == 0) {
+ if (!cfg_sink) {
+ ha_alert("parsing [%s:%d] : unable to set description '%s'.\n", file, linenum, args[1]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto err;
+ }
+
+ if (!*args[1]) {
+ ha_alert("parsing [%s:%d] : missing ring description text.\n", file, linenum);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto err;
+ }
+
+ free(cfg_sink->desc);
+
+ cfg_sink->desc = strdup(args[1]);
+ if (!cfg_sink->desc) {
+ ha_alert("parsing [%s:%d] : fail to set description '%s'.\n", file, linenum, args[1]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto err;
+ }
+ }
+ else {
+ ha_alert("parsing [%s:%d] : unknown statement '%s'.\n", file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto err;
+ }
+
+err:
+ return err_code;
+}
+
+/* Creates a new sink buffer from a logger.
+ *
+ * It uses the logger's address to declare a forward
+ * server for this buffer. And it initializes the
+ * forwarding.
+ *
+ * The function returns a pointer on the
+ * allocated struct sink if allocate
+ * and initialize succeed, else if it fails
+ * it returns NULL.
+ *
+ * Note: the sink is created using the name
+ * specified into logger->target.ring_name
+ */
+struct sink *sink_new_from_logger(struct logger *logger)
+{
+ struct sink *sink = NULL;
+ struct server *srv = NULL;
+ char *err_msg = NULL;
+
+ /* prepare description for the sink */
+ chunk_reset(&trash);
+ chunk_printf(&trash, "created from log directive declared into '%s' at line %d", logger->conf.file, logger->conf.line);
+
+ /* allocate a new sink buffer */
+ sink = sink_new_ringbuf(logger->target.ring_name, trash.area, logger->conf.file, logger->conf.line, &err_msg);
+ if (!sink) {
+ ha_alert("%s.\n", err_msg);
+ ha_free(&err_msg);
+ goto error;
+ }
+
+ /* ring format normally defaults to RAW, but here we set ring format
+ * to UNSPEC to inherit from caller format in sink_write() since we
+ * cannot customize implicit ring settings
+ */
+ sink->fmt = LOG_FORMAT_UNSPEC;
+
+ /* for the same reason, we disable sink->maxlen to inherit from caller
+ * maxlen in sink_write()
+ */
+ sink->maxlen = 0;
+
+ /* Set default connect and server timeout for sink forward proxy */
+ sink->forward_px->timeout.connect = MS_TO_TICKS(1000);
+ sink->forward_px->timeout.server = MS_TO_TICKS(5000);
+
+ /* allocate a new server to forward messages
+ * from ring buffer
+ */
+ srv = new_server(sink->forward_px);
+ if (!srv)
+ goto error;
+
+ /* init server */
+ srv->id = strdup(logger->target.ring_name);
+ srv->conf.file = strdup(logger->conf.file);
+ srv->conf.line = logger->conf.line;
+ srv->addr = *logger->target.addr;
+ srv->svc_port = get_host_port(logger->target.addr);
+ HA_SPIN_INIT(&srv->lock);
+
+ /* process per thread init */
+ if (srv_init_per_thr(srv) == -1)
+ goto error;
+
+ /* link srv with sink forward proxy: the servers are linked
+ * backwards first into proxy
+ */
+ srv->next = sink->forward_px->srv;
+ sink->forward_px->srv = srv;
+
+ if (sink_finalize(sink) & ERR_CODE)
+ goto error_final;
+
+ return sink;
+ error:
+ srv_drop(srv);
+
+ error_final:
+ sink_free(sink);
+
+ return NULL;
+}
+
+/* This function is pretty similar to sink_from_logger():
+ * But instead of creating a forward proxy and server from a logger struct
+ * it uses already existing srv to create the forwarding sink, so most of
+ * the initialization is bypassed.
+ *
+ * The function returns a pointer on the
+ * allocated struct sink if allocate
+ * and initialize succeed, else if it fails
+ * it returns NULL.
+ *
+ * <from> allows to specify a string that will be inserted into the sink
+ * description to describe where it was created from.
+
+ * Note: the sink is created using the name
+ * specified into srv->id
+ */
+struct sink *sink_new_from_srv(struct server *srv, const char *from)
+{
+ struct sink *sink = NULL;
+ int bufsize = (srv->log_bufsize) ? srv->log_bufsize : BUFSIZE;
+
+ /* prepare description for the sink */
+ chunk_reset(&trash);
+ chunk_printf(&trash, "created from %s declared into '%s' at line %d", from, srv->conf.file, srv->conf.line);
+
+ /* directly create a sink of BUF type, and use UNSPEC log format to
+ * inherit from caller fmt in sink_write()
+ */
+ sink = sink_new_buf(srv->id, trash.area, LOG_FORMAT_UNSPEC, bufsize);
+ if (!sink) {
+ ha_alert("unable to create a new sink buffer for server '%s'.\n", srv->id);
+ goto error;
+ }
+
+ /* we disable sink->maxlen to inherit from caller
+ * maxlen in sink_write()
+ */
+ sink->maxlen = 0;
+
+ /* add server to sink */
+ if (!sink_add_srv(sink, srv))
+ goto error;
+
+ if (sink_finalize(sink) & ERR_CODE)
+ goto error;
+
+ return sink;
+
+ error:
+ sink_free(sink);
+
+ return NULL;
+}
+
+/*
+ * Post parsing "ring" section.
+ *
+ * The function returns 0 in success case, otherwise, it returns error
+ * flags.
+ */
+int cfg_post_parse_ring()
+{
+ int err_code;
+
+ err_code = sink_finalize(cfg_sink);
+ cfg_sink = NULL;
+
+ return err_code;
+}
+
+/* function: resolve a single logger target of BUFFER type
+ *
+ * Returns err_code which defaults to ERR_NONE and can be set to a combination
+ * of ERR_WARN, ERR_ALERT, ERR_FATAL and ERR_ABORT in case of errors.
+ * <msg> could be set at any time (it will usually be set on error, but
+ * could also be set when no error occurred to report a diag warning), thus is
+ * up to the caller to check it and to free it.
+ */
+int sink_resolve_logger_buffer(struct logger *logger, char **msg)
+{
+ struct log_target *target = &logger->target;
+ int err_code = ERR_NONE;
+ struct sink *sink;
+
+ BUG_ON(target->type != LOG_TARGET_BUFFER || (target->flags & LOG_TARGET_FL_RESOLVED));
+ if (target->addr) {
+ sink = sink_new_from_logger(logger);
+ if (!sink) {
+ memprintf(msg, "cannot be initialized (failed to create implicit ring)");
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto end;
+ }
+ ha_free(&target->addr); /* we no longer need this */
+ }
+ else {
+ sink = sink_find(target->ring_name);
+ if (!sink) {
+ memprintf(msg, "uses unknown ring named '%s'", target->ring_name);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto end;
+ }
+ else if (sink->type != SINK_TYPE_BUFFER) {
+ memprintf(msg, "uses incompatible ring '%s'", target->ring_name);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto end;
+ }
+ }
+ /* consistency checks */
+ if (sink && logger->maxlen > ring_max_payload(sink->ctx.ring)) {
+ memprintf(msg, "uses a max length which exceeds ring capacity ('%s' supports %lu bytes at most)",
+ target->ring_name, (unsigned long)ring_max_payload(sink->ctx.ring));
+ }
+ else if (sink && logger->maxlen > sink->maxlen) {
+ memprintf(msg, "uses a ring with a smaller maxlen than the one specified on the log directive ('%s' has maxlen = %d), logs will be truncated according to the lowest maxlen between the two",
+ target->ring_name, sink->maxlen);
+ }
+ end:
+ ha_free(&target->ring_name); /* sink is resolved and will replace ring_name hint */
+ target->sink = sink;
+ return err_code;
+}
+
+static void sink_init()
+{
+ sink_new_fd("stdout", "standard output (fd#1)", LOG_FORMAT_RAW, 1);
+ sink_new_fd("stderr", "standard output (fd#2)", LOG_FORMAT_RAW, 2);
+ sink_new_buf("buf0", "in-memory ring buffer", LOG_FORMAT_TIMED, 1048576);
+}
+
+static void sink_deinit()
+{
+ struct sink *sink, *sb;
+
+ list_for_each_entry_safe(sink, sb, &sink_list, sink_list)
+ sink_free(sink);
+}
+
+INITCALL0(STG_REGISTER, sink_init);
+REGISTER_POST_DEINIT(sink_deinit);
+
+static struct cli_kw_list cli_kws = {{ },{
+ { { "show", "events", NULL }, "show events [<sink>] [-w] [-n] : show event sink state", cli_parse_show_events, NULL, NULL },
+ {{},}
+}};
+
+INITCALL1(STG_REGISTER, cli_register_kw, &cli_kws);
+
+/* config parsers for this section */
+REGISTER_CONFIG_SECTION("ring", cfg_parse_ring, cfg_post_parse_ring);
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/src/slz.c b/src/slz.c
new file mode 100644
index 0000000..1560bac
--- /dev/null
+++ b/src/slz.c
@@ -0,0 +1,1421 @@
+/*
+ * Copyright (C) 2013-2015 Willy Tarreau <w@1wt.eu>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <inttypes.h>
+#include <stdio.h>
+#include <string.h>
+#include <import/slz.h>
+#include <import/slz-tables.h>
+
+/* First, RFC1951-specific declarations and extracts from the RFC.
+ *
+ * RFC1951 - deflate stream format
+
+
+ * Data elements are packed into bytes in order of
+ increasing bit number within the byte, i.e., starting
+ with the least-significant bit of the byte.
+ * Data elements other than Huffman codes are packed
+ starting with the least-significant bit of the data
+ element.
+ * Huffman codes are packed starting with the most-
+ significant bit of the code.
+
+ 3.2.3. Details of block format
+
+ Each block of compressed data begins with 3 header bits
+ containing the following data:
+
+ first bit BFINAL
+ next 2 bits BTYPE
+
+ Note that the header bits do not necessarily begin on a byte
+ boundary, since a block does not necessarily occupy an integral
+ number of bytes.
+
+ BFINAL is set if and only if this is the last block of the data
+ set.
+
+ BTYPE specifies how the data are compressed, as follows:
+
+ 00 - no compression
+ 01 - compressed with fixed Huffman codes
+ 10 - compressed with dynamic Huffman codes
+ 11 - reserved (error)
+
+ 3.2.4. Non-compressed blocks (BTYPE=00)
+
+ Any bits of input up to the next byte boundary are ignored.
+ The rest of the block consists of the following information:
+
+ 0 1 2 3 4...
+ +---+---+---+---+================================+
+ | LEN | NLEN |... LEN bytes of literal data...|
+ +---+---+---+---+================================+
+
+ LEN is the number of data bytes in the block. NLEN is the
+ one's complement of LEN.
+
+ 3.2.5. Compressed blocks (length and distance codes)
+
+ As noted above, encoded data blocks in the "deflate" format
+ consist of sequences of symbols drawn from three conceptually
+ distinct alphabets: either literal bytes, from the alphabet of
+ byte values (0..255), or <length, backward distance> pairs,
+ where the length is drawn from (3..258) and the distance is
+ drawn from (1..32,768). In fact, the literal and length
+ alphabets are merged into a single alphabet (0..285), where
+ values 0..255 represent literal bytes, the value 256 indicates
+ end-of-block, and values 257..285 represent length codes
+ (possibly in conjunction with extra bits following the symbol
+ code) as follows:
+
+Length encoding :
+ Extra Extra Extra
+ Code Bits Length(s) Code Bits Lengths Code Bits Length(s)
+ ---- ---- ------ ---- ---- ------- ---- ---- -------
+ 257 0 3 267 1 15,16 277 4 67-82
+ 258 0 4 268 1 17,18 278 4 83-98
+ 259 0 5 269 2 19-22 279 4 99-114
+ 260 0 6 270 2 23-26 280 4 115-130
+ 261 0 7 271 2 27-30 281 5 131-162
+ 262 0 8 272 2 31-34 282 5 163-194
+ 263 0 9 273 3 35-42 283 5 195-226
+ 264 0 10 274 3 43-50 284 5 227-257
+ 265 1 11,12 275 3 51-58 285 0 258
+ 266 1 13,14 276 3 59-66
+
+Distance encoding :
+ Extra Extra Extra
+ Code Bits Dist Code Bits Dist Code Bits Distance
+ ---- ---- ---- ---- ---- ------ ---- ---- --------
+ 0 0 1 10 4 33-48 20 9 1025-1536
+ 1 0 2 11 4 49-64 21 9 1537-2048
+ 2 0 3 12 5 65-96 22 10 2049-3072
+ 3 0 4 13 5 97-128 23 10 3073-4096
+ 4 1 5,6 14 6 129-192 24 11 4097-6144
+ 5 1 7,8 15 6 193-256 25 11 6145-8192
+ 6 2 9-12 16 7 257-384 26 12 8193-12288
+ 7 2 13-16 17 7 385-512 27 12 12289-16384
+ 8 3 17-24 18 8 513-768 28 13 16385-24576
+ 9 3 25-32 19 8 769-1024 29 13 24577-32768
+
+ 3.2.6. Compression with fixed Huffman codes (BTYPE=01)
+
+ The Huffman codes for the two alphabets are fixed, and are not
+ represented explicitly in the data. The Huffman code lengths
+ for the literal/length alphabet are:
+
+ Lit Value Bits Codes
+ --------- ---- -----
+ 0 - 143 8 00110000 through
+ 10111111
+ 144 - 255 9 110010000 through
+ 111111111
+ 256 - 279 7 0000000 through
+ 0010111
+ 280 - 287 8 11000000 through
+ 11000111
+
+ The code lengths are sufficient to generate the actual codes,
+ as described above; we show the codes in the table for added
+ clarity. Literal/length values 286-287 will never actually
+ occur in the compressed data, but participate in the code
+ construction.
+
+ Distance codes 0-31 are represented by (fixed-length) 5-bit
+ codes, with possible additional bits as shown in the table
+ shown in Paragraph 3.2.5, above. Note that distance codes 30-
+ 31 will never actually occur in the compressed data.
+
+*/
+
+/* back references, built in a way that is optimal for 32/64 bits */
+union ref {
+ struct {
+ uint32_t pos;
+ uint32_t word;
+ } by32;
+ uint64_t by64;
+};
+
+#if defined(USE_64BIT_QUEUE) && defined(UNALIGNED_LE_OK)
+
+/* enqueue code x of <xbits> bits (LSB aligned, at most 24) and copy complete
+ * 32-bit words into output buffer. X must not contain non-zero bits above
+ * xbits.
+ */
+static inline void enqueue24(struct slz_stream *strm, uint32_t x, uint32_t xbits)
+{
+ uint64_t queue = strm->queue + ((uint64_t)x << strm->qbits);
+ uint32_t qbits = strm->qbits + xbits;
+
+ if (__builtin_expect(qbits >= 32, 1)) {
+ *(uint32_t *)strm->outbuf = queue;
+ queue >>= 32;
+ qbits -= 32;
+ strm->outbuf += 4;
+ }
+
+ strm->queue = queue;
+ strm->qbits = qbits;
+}
+
+#define enqueue8 enqueue24
+
+/* flush the queue and align to next byte */
+static inline void flush_bits(struct slz_stream *strm)
+{
+ if (strm->qbits > 0)
+ *strm->outbuf++ = strm->queue;
+
+ if (strm->qbits > 8)
+ *strm->outbuf++ = strm->queue >> 8;
+
+ if (strm->qbits > 16)
+ *strm->outbuf++ = strm->queue >> 16;
+
+ if (strm->qbits > 24)
+ *strm->outbuf++ = strm->queue >> 24;
+
+ strm->queue = 0;
+ strm->qbits = 0;
+}
+
+#else /* non-64 bit or aligned or big endian */
+
+/* enqueue code x of <xbits> bits (LSB aligned, at most 24) and copy complete
+ * bytes into out buf. X must not contain non-zero bits above xbits. Prefer
+ * enqueue8() when xbits is known for being 8 or less.
+ */
+static void enqueue24(struct slz_stream *strm, uint32_t x, uint32_t xbits)
+{
+ uint32_t queue = strm->queue + (x << strm->qbits);
+ uint32_t qbits = strm->qbits + xbits;
+
+ if (qbits >= 16) {
+#ifndef UNALIGNED_LE_OK
+ strm->outbuf[0] = queue;
+ strm->outbuf[1] = queue >> 8;
+#else
+ *(uint16_t *)strm->outbuf = queue;
+#endif
+ strm->outbuf += 2;
+ queue >>= 16;
+ qbits -= 16;
+ }
+
+ if (qbits >= 8) {
+ qbits -= 8;
+ *strm->outbuf++ = queue;
+ queue >>= 8;
+ }
+ strm->qbits = qbits;
+ strm->queue = queue;
+ return;
+}
+
+/* enqueue code x of <xbits> bits (at most 8) and copy complete bytes into
+ * out buf. X must not contain non-zero bits above xbits.
+ */
+static inline void enqueue8(struct slz_stream *strm, uint32_t x, uint32_t xbits)
+{
+ uint32_t queue = strm->queue + (x << strm->qbits);
+ uint32_t qbits = strm->qbits + xbits;
+
+ if (__builtin_expect((signed)(qbits - 8) >= 0, 1)) {
+ qbits -= 8;
+ *strm->outbuf++ = queue;
+ queue >>= 8;
+ }
+
+ strm->qbits = qbits;
+ strm->queue = queue;
+}
+
+/* align to next byte */
+static inline void flush_bits(struct slz_stream *strm)
+{
+ if (strm->qbits > 0)
+ *strm->outbuf++ = strm->queue;
+
+ if (strm->qbits > 8)
+ *strm->outbuf++ = strm->queue >> 8;
+
+ strm->queue = 0;
+ strm->qbits = 0;
+}
+#endif
+
+
+/* only valid if buffer is already aligned */
+static inline void copy_8b(struct slz_stream *strm, uint32_t x)
+{
+ *strm->outbuf++ = x;
+}
+
+/* only valid if buffer is already aligned */
+static inline void copy_16b(struct slz_stream *strm, uint32_t x)
+{
+ strm->outbuf[0] = x;
+ strm->outbuf[1] = x >> 8;
+ strm->outbuf += 2;
+}
+
+/* only valid if buffer is already aligned */
+static inline void copy_32b(struct slz_stream *strm, uint32_t x)
+{
+ strm->outbuf[0] = x;
+ strm->outbuf[1] = x >> 8;
+ strm->outbuf[2] = x >> 16;
+ strm->outbuf[3] = x >> 24;
+ strm->outbuf += 4;
+}
+
+static inline void send_huff(struct slz_stream *strm, uint32_t code)
+{
+ uint32_t bits;
+
+ code = fixed_huff[code];
+ bits = code & 15;
+ code >>= 4;
+ enqueue24(strm, code, bits);
+}
+
+static inline void send_eob(struct slz_stream *strm)
+{
+ enqueue8(strm, 0, 7); // direct encoding of 256 = EOB (cf RFC1951)
+}
+
+/* copies <len> literals from <buf>. <more> indicates that there are data past
+ * buf + <len>. <len> must not be null.
+ */
+static void copy_lit(struct slz_stream *strm, const void *buf, uint32_t len, int more)
+{
+ uint32_t len2;
+
+ do {
+ len2 = len;
+ if (__builtin_expect(len2 > 65535, 0))
+ len2 = 65535;
+
+ len -= len2;
+
+ if (strm->state != SLZ_ST_EOB)
+ send_eob(strm);
+
+ strm->state = (more || len) ? SLZ_ST_EOB : SLZ_ST_DONE;
+
+ enqueue8(strm, !(more || len), 3); // BFINAL = !more ; BTYPE = 00
+ flush_bits(strm);
+ copy_16b(strm, len2); // len2
+ copy_16b(strm, ~len2); // nlen2
+ memcpy(strm->outbuf, buf, len2);
+ buf += len2;
+ strm->outbuf += len2;
+ } while (len);
+}
+
+/* copies <len> literals from <buf>. <more> indicates that there are data past
+ * buf + <len>. <len> must not be null.
+ */
+static void copy_lit_huff(struct slz_stream *strm, const unsigned char *buf, uint32_t len, int more)
+{
+ uint32_t pos;
+
+ /* This ugly construct limits the mount of tests and optimizes for the
+ * most common case (more > 0).
+ */
+ if (strm->state == SLZ_ST_EOB) {
+ eob:
+ strm->state = more ? SLZ_ST_FIXED : SLZ_ST_LAST;
+ enqueue8(strm, 2 + !more, 3); // BFINAL = !more ; BTYPE = 01
+ }
+ else if (!more) {
+ send_eob(strm);
+ goto eob;
+ }
+
+ pos = 0;
+ do {
+ send_huff(strm, buf[pos++]);
+ } while (pos < len);
+}
+
+/* format:
+ * bit0..31 = word
+ * bit32..63 = last position in buffer of similar content
+ */
+
+/* This hash provides good average results on HTML contents, and is among the
+ * few which provide almost optimal results on various different pages.
+ */
+static inline uint32_t slz_hash(uint32_t a)
+{
+#if defined(__ARM_FEATURE_CRC32)
+# if defined(__ARM_ARCH_ISA_A64)
+ // 64 bit mode
+ __asm__ volatile("crc32w %w0,%w0,%w1" : "+r"(a) : "r"(0));
+# else
+ // 32 bit mode (e.g. armv7 compiler building for armv8
+ __asm__ volatile("crc32w %0,%0,%1" : "+r"(a) : "r"(0));
+# endif
+ return a >> (32 - HASH_BITS);
+#else
+ return ((a << 19) + (a << 6) - a) >> (32 - HASH_BITS);
+#endif
+}
+
+/* This function compares buffers <a> and <b> and reads 32 or 64 bits at a time
+ * during the approach. It makes us of unaligned little endian memory accesses
+ * on capable architectures. <max> is the maximum number of bytes that can be
+ * read, so both <a> and <b> must have at least <max> bytes ahead. <max> may
+ * safely be null or negative if that simplifies computations in the caller.
+ */
+static inline long memmatch(const unsigned char *a, const unsigned char *b, long max)
+{
+ long len = 0;
+
+#ifdef UNALIGNED_LE_OK
+ unsigned long xor;
+
+ while (1) {
+ if ((long)(len + 2 * sizeof(long)) > max) {
+ while (len < max) {
+ if (a[len] != b[len])
+ break;
+ len++;
+ }
+ return len;
+ }
+
+ xor = *(long *)&a[len] ^ *(long *)&b[len];
+ if (xor)
+ break;
+ len += sizeof(long);
+
+ xor = *(long *)&a[len] ^ *(long *)&b[len];
+ if (xor)
+ break;
+ len += sizeof(long);
+ }
+
+#if defined(__x86_64__) || defined(__i386__) || defined(__i486__) || defined(__i586__) || defined(__i686__)
+ /* x86 has bsf. We know that xor is non-null here */
+ asm("bsf %1,%0\n" : "=r"(xor) : "0" (xor));
+ return len + xor / 8;
+#else
+ if (sizeof(long) > 4 && !(xor & 0xffffffff)) {
+ /* This code is optimized out on 32-bit archs, but we still
+ * need to shift in two passes to avoid a warning. It is
+ * properly optimized out as a single shift.
+ */
+ xor >>= 16; xor >>= 16;
+ if (xor & 0xffff) {
+ if (xor & 0xff)
+ return len + 4;
+ return len + 5;
+ }
+ if (xor & 0xffffff)
+ return len + 6;
+ return len + 7;
+ }
+
+ if (xor & 0xffff) {
+ if (xor & 0xff)
+ return len;
+ return len + 1;
+ }
+ if (xor & 0xffffff)
+ return len + 2;
+ return len + 3;
+#endif // x86
+
+#else // UNALIGNED_LE_OK
+ /* This is the generic version for big endian or unaligned-incompatible
+ * architectures.
+ */
+ while (len < max) {
+ if (a[len] != b[len])
+ break;
+ len++;
+ }
+ return len;
+
+#endif
+}
+
+/* sets <count> BYTES to -32769 in <refs> so that any uninitialized entry will
+ * verify (pos-last-1 >= 32768) and be ignored. <count> must be a multiple of
+ * 128 bytes and <refs> must be at least one count in length. It's supposed to
+ * be applied to 64-bit aligned data exclusively, which makes it slightly
+ * faster than the regular memset() since no alignment check is performed.
+ */
+static void reset_refs(union ref *refs, long count)
+{
+ /* avoid a shift/mask by casting to void* */
+ union ref *end = (void *)refs + count;
+
+ do {
+ refs[ 0].by64 = -32769;
+ refs[ 1].by64 = -32769;
+ refs[ 2].by64 = -32769;
+ refs[ 3].by64 = -32769;
+ refs[ 4].by64 = -32769;
+ refs[ 5].by64 = -32769;
+ refs[ 6].by64 = -32769;
+ refs[ 7].by64 = -32769;
+ refs[ 8].by64 = -32769;
+ refs[ 9].by64 = -32769;
+ refs[10].by64 = -32769;
+ refs[11].by64 = -32769;
+ refs[12].by64 = -32769;
+ refs[13].by64 = -32769;
+ refs[14].by64 = -32769;
+ refs[15].by64 = -32769;
+ refs += 16;
+ } while (refs < end);
+}
+
+/* Compresses <ilen> bytes from <in> into <out> according to RFC1951. The
+ * output result may be up to 5 bytes larger than the input, to which 2 extra
+ * bytes may be added to send the last chunk due to BFINAL+EOB encoding (10
+ * bits) when <more> is not set. The caller is responsible for ensuring there
+ * is enough room in the output buffer for this. The amount of output bytes is
+ * returned, and no CRC is computed.
+ */
+long slz_rfc1951_encode(struct slz_stream *strm, unsigned char *out, const unsigned char *in, long ilen, int more)
+{
+ long rem = ilen;
+ unsigned long pos = 0;
+ unsigned long last;
+ uint32_t word = 0;
+ long mlen;
+ uint32_t h;
+ uint64_t ent;
+
+ uint32_t plit = 0;
+ uint32_t bit9 = 0;
+ uint32_t dist, code;
+ union ref refs[1 << HASH_BITS];
+
+ if (!strm->level) {
+ /* force to send as literals (eg to preserve CPU) */
+ strm->outbuf = out;
+ plit = pos = ilen;
+ bit9 = 52; /* force literal dump */
+ goto final_lit_dump;
+ }
+
+ reset_refs(refs, sizeof(refs));
+
+ strm->outbuf = out;
+
+#ifndef UNALIGNED_FASTER
+ word = ((unsigned char)in[pos] << 8) + ((unsigned char)in[pos + 1] << 16) + ((unsigned char)in[pos + 2] << 24);
+#endif
+ while (rem >= 4) {
+#ifndef UNALIGNED_FASTER
+ word = ((unsigned char)in[pos + 3] << 24) + (word >> 8);
+#else
+ word = *(uint32_t *)&in[pos];
+#endif
+ h = slz_hash(word);
+ asm volatile ("" ::); // prevent gcc from trying to be smart with the prefetch
+
+ if (sizeof(long) >= 8) {
+ ent = refs[h].by64;
+ last = (uint32_t)ent;
+ ent >>= 32;
+ refs[h].by64 = ((uint64_t)pos) + ((uint64_t)word << 32);
+ } else {
+ ent = refs[h].by32.word;
+ last = refs[h].by32.pos;
+ refs[h].by32.pos = pos;
+ refs[h].by32.word = word;
+ }
+
+#ifdef FIND_OPTIMAL_MATCH
+ /* Experimental code to see what could be saved with an ideal
+ * longest match lookup algorithm. This one is very slow but
+ * scans the whole window. In short, here are the savings :
+ * file orig fast(ratio) optimal(ratio)
+ * README 5185 3419 (65.9%) 3165 (61.0%) -7.5%
+ * index.html 76799 35662 (46.4%) 29875 (38.9%) -16.3%
+ * rfc1952.c 29383 13442 (45.7%) 11793 (40.1%) -12.3%
+ *
+ * Thus the savings to expect for large files is at best 16%.
+ *
+ * A non-colliding hash gives 33025 instead of 35662 (-7.4%),
+ * and keeping the last two entries gives 31724 (-11.0%).
+ */
+ unsigned long scan;
+ int saved = 0;
+ int bestpos = 0;
+ int bestlen = 0;
+ int firstlen = 0;
+ int max_lookup = 2; // 0 = no limit
+
+ for (scan = pos - 1; scan < pos && (unsigned long)(pos - scan - 1) < 32768; scan--) {
+ int len;
+
+ if (*(uint32_t *)(in + scan) != word)
+ continue;
+
+ len = memmatch(in + pos, in + scan, rem);
+ if (!bestlen)
+ firstlen = len;
+
+ if (len > bestlen) {
+ bestlen = len;
+ bestpos = scan;
+ }
+ if (!--max_lookup)
+ break;
+ }
+ if (bestlen) {
+ //printf("pos=%d last=%d bestpos=%d word=%08x ent=%08x len=%d\n",
+ // (int)pos, (int)last, (int)bestpos, (int)word, (int)ent, bestlen);
+ last = bestpos;
+ ent = word;
+ saved += bestlen - firstlen;
+ }
+ //fprintf(stderr, "first=%d best=%d saved_total=%d\n", firstlen, bestlen, saved);
+#endif
+
+ if ((uint32_t)ent != word) {
+ send_as_lit:
+ rem--;
+ plit++;
+ bit9 += ((unsigned char)word >= 144);
+ pos++;
+ continue;
+ }
+
+ /* We reject pos = last and pos > last+32768 */
+ if ((unsigned long)(pos - last - 1) >= 32768)
+ goto send_as_lit;
+
+ /* Note: cannot encode a length larger than 258 bytes */
+ mlen = memmatch(in + pos + 4, in + last + 4, (rem > 258 ? 258 : rem) - 4) + 4;
+
+ /* found a matching entry */
+
+ if (bit9 >= 52 && mlen < 6)
+ goto send_as_lit;
+
+ /* compute the output code, its size and the length's size in
+ * bits to know if the reference is cheaper than literals.
+ */
+ code = len_fh[mlen];
+
+ /* direct mapping of dist->huffman code */
+ dist = fh_dist_table[pos - last - 1];
+
+ /* if encoding the dist+length is more expensive than sending
+ * the equivalent as bytes, lets keep the literals.
+ */
+ if ((dist & 0x1f) + (code >> 16) + 8 >= 8 * mlen + bit9)
+ goto send_as_lit;
+
+ /* first, copy pending literals */
+ if (plit) {
+ /* Huffman encoding requires 9 bits for octets 144..255, so this
+ * is a waste of space for binary data. Switching between Huffman
+ * and no-comp then huffman consumes 52 bits (7 for EOB + 3 for
+ * block type + 7 for alignment + 32 for LEN+NLEN + 3 for next
+ * block. Only use plain literals if there are more than 52 bits
+ * to save then.
+ */
+ if (bit9 >= 52)
+ copy_lit(strm, in + pos - plit, plit, 1);
+ else
+ copy_lit_huff(strm, in + pos - plit, plit, 1);
+
+ plit = 0;
+ }
+
+ /* use mode 01 - fixed huffman */
+ if (strm->state == SLZ_ST_EOB) {
+ strm->state = SLZ_ST_FIXED;
+ enqueue8(strm, 0x02, 3); // BTYPE = 01, BFINAL = 0
+ }
+
+ /* copy the length first */
+ enqueue24(strm, code & 0xFFFF, code >> 16);
+
+ /* in fixed huffman mode, dist is fixed 5 bits */
+ enqueue24(strm, dist >> 5, dist & 0x1f);
+ bit9 = 0;
+ rem -= mlen;
+ pos += mlen;
+
+#ifndef UNALIGNED_FASTER
+#ifdef UNALIGNED_LE_OK
+ word = *(uint32_t *)&in[pos - 1];
+#else
+ word = ((unsigned char)in[pos] << 8) + ((unsigned char)in[pos + 1] << 16) + ((unsigned char)in[pos + 2] << 24);
+#endif
+#endif
+ }
+
+ if (__builtin_expect(rem, 0)) {
+ /* we're reading the 1..3 last bytes */
+ plit += rem;
+ do {
+ bit9 += ((unsigned char)in[pos++] >= 144);
+ } while (--rem);
+ }
+
+ final_lit_dump:
+ /* now copy remaining literals or mark the end */
+ if (plit) {
+ if (bit9 >= 52)
+ copy_lit(strm, in + pos - plit, plit, more);
+ else
+ copy_lit_huff(strm, in + pos - plit, plit, more);
+
+ plit = 0;
+ }
+
+ strm->ilen += ilen;
+ return strm->outbuf - out;
+}
+
+/* Initializes stream <strm> for use with raw deflate (rfc1951). The CRC is
+ * unused but set to zero. The compression level passed in <level> is set. This
+ * value can only be 0 (no compression) or 1 (compression) and other values
+ * will lead to unpredictable behaviour. The function always returns 0.
+ */
+int slz_rfc1951_init(struct slz_stream *strm, int level)
+{
+ strm->state = SLZ_ST_EOB; // no header
+ strm->level = level;
+ strm->format = SLZ_FMT_DEFLATE;
+ strm->crc32 = 0;
+ strm->ilen = 0;
+ strm->qbits = 0;
+ strm->queue = 0;
+ return 0;
+}
+
+/* Flushes any pending data for stream <strm> into buffer <buf>, then emits an
+ * empty literal block to byte-align the output, allowing to completely flush
+ * the queue. This requires that the output buffer still has the size of the
+ * queue available (up to 4 bytes), plus one byte for (BFINAL,BTYPE), plus 4
+ * bytes for LEN+NLEN, or a total of 9 bytes in the worst case. The number of
+ * bytes emitted is returned. It is guaranteed that the queue is empty on
+ * return. This may cause some overhead by adding needless 5-byte blocks if
+ * called to often.
+ */
+int slz_rfc1951_flush(struct slz_stream *strm, unsigned char *buf)
+{
+ strm->outbuf = buf;
+
+ /* The queue is always empty on INIT, DONE, and END */
+ if (!strm->qbits)
+ return 0;
+
+ /* we may need to terminate a huffman output. Lit is always in EOB state */
+ if (strm->state != SLZ_ST_EOB) {
+ strm->state = (strm->state == SLZ_ST_LAST) ? SLZ_ST_DONE : SLZ_ST_EOB;
+ send_eob(strm);
+ }
+
+ /* send BFINAL according to state, and BTYPE=00 (lit) */
+ enqueue8(strm, (strm->state == SLZ_ST_DONE) ? 1 : 0, 3);
+ flush_bits(strm); // emit pending bits
+ copy_32b(strm, 0xFFFF0000U); // len=0, nlen=~0
+
+ /* Now the queue is empty, EOB was sent, BFINAL might have been sent if
+ * we completed the last block, and a zero-byte block was sent to byte-
+ * align the output. The last state reflects all this. Let's just
+ * return the number of bytes added to the output buffer.
+ */
+ return strm->outbuf - buf;
+}
+
+/* Flushes any pending for stream <strm> into buffer <buf>, then sends BTYPE=1
+ * and BFINAL=1 if needed. The stream ends in SLZ_ST_DONE. It returns the number
+ * of bytes emitted. The trailer consists in flushing the possibly pending bits
+ * from the queue (up to 7 bits), then possibly EOB (7 bits), then 3 bits, EOB,
+ * a rounding to the next byte, which amounts to a total of 4 bytes max, that
+ * the caller must ensure are available before calling the function.
+ */
+int slz_rfc1951_finish(struct slz_stream *strm, unsigned char *buf)
+{
+ strm->outbuf = buf;
+
+ if (strm->state == SLZ_ST_FIXED || strm->state == SLZ_ST_LAST) {
+ strm->state = (strm->state == SLZ_ST_LAST) ? SLZ_ST_DONE : SLZ_ST_EOB;
+ send_eob(strm);
+ }
+
+ if (strm->state != SLZ_ST_DONE) {
+ /* send BTYPE=1, BFINAL=1 */
+ enqueue8(strm, 3, 3);
+ send_eob(strm);
+ strm->state = SLZ_ST_DONE;
+ }
+
+ flush_bits(strm);
+ return strm->outbuf - buf;
+}
+
+/* Now RFC1952-specific declarations and extracts from RFC.
+ * From RFC1952 about the GZIP file format :
+
+A gzip file consists of a series of "members" ...
+
+2.3. Member format
+
+ Each member has the following structure:
+
+ +---+---+---+---+---+---+---+---+---+---+
+ |ID1|ID2|CM |FLG| MTIME |XFL|OS | (more-->)
+ +---+---+---+---+---+---+---+---+---+---+
+
+ (if FLG.FEXTRA set)
+
+ +---+---+=================================+
+ | XLEN |...XLEN bytes of "extra field"...| (more-->)
+ +---+---+=================================+
+
+ (if FLG.FNAME set)
+
+ +=========================================+
+ |...original file name, zero-terminated...| (more-->)
+ +=========================================+
+
+ (if FLG.FCOMMENT set)
+
+ +===================================+
+ |...file comment, zero-terminated...| (more-->)
+ +===================================+
+
+ (if FLG.FHCRC set)
+
+ +---+---+
+ | CRC16 |
+ +---+---+
+
+ +=======================+
+ |...compressed blocks...| (more-->)
+ +=======================+
+
+ 0 1 2 3 4 5 6 7
+ +---+---+---+---+---+---+---+---+
+ | CRC32 | ISIZE |
+ +---+---+---+---+---+---+---+---+
+
+
+2.3.1. Member header and trailer
+
+ ID1 (IDentification 1)
+ ID2 (IDentification 2)
+ These have the fixed values ID1 = 31 (0x1f, \037), ID2 = 139
+ (0x8b, \213), to identify the file as being in gzip format.
+
+ CM (Compression Method)
+ This identifies the compression method used in the file. CM
+ = 0-7 are reserved. CM = 8 denotes the "deflate"
+ compression method, which is the one customarily used by
+ gzip and which is documented elsewhere.
+
+ FLG (FLaGs)
+ This flag byte is divided into individual bits as follows:
+
+ bit 0 FTEXT
+ bit 1 FHCRC
+ bit 2 FEXTRA
+ bit 3 FNAME
+ bit 4 FCOMMENT
+ bit 5 reserved
+ bit 6 reserved
+ bit 7 reserved
+
+ Reserved FLG bits must be zero.
+
+ MTIME (Modification TIME)
+ This gives the most recent modification time of the original
+ file being compressed. The time is in Unix format, i.e.,
+ seconds since 00:00:00 GMT, Jan. 1, 1970. (Note that this
+ may cause problems for MS-DOS and other systems that use
+ local rather than Universal time.) If the compressed data
+ did not come from a file, MTIME is set to the time at which
+ compression started. MTIME = 0 means no time stamp is
+ available.
+
+ XFL (eXtra FLags)
+ These flags are available for use by specific compression
+ methods. The "deflate" method (CM = 8) sets these flags as
+ follows:
+
+ XFL = 2 - compressor used maximum compression,
+ slowest algorithm
+ XFL = 4 - compressor used fastest algorithm
+
+ OS (Operating System)
+ This identifies the type of file system on which compression
+ took place. This may be useful in determining end-of-line
+ convention for text files. The currently defined values are
+ as follows:
+
+ 0 - FAT filesystem (MS-DOS, OS/2, NT/Win32)
+ 1 - Amiga
+ 2 - VMS (or OpenVMS)
+ 3 - Unix
+ 4 - VM/CMS
+ 5 - Atari TOS
+ 6 - HPFS filesystem (OS/2, NT)
+ 7 - Macintosh
+ 8 - Z-System
+ 9 - CP/M
+ 10 - TOPS-20
+ 11 - NTFS filesystem (NT)
+ 12 - QDOS
+ 13 - Acorn RISCOS
+ 255 - unknown
+
+ ==> A file compressed using "gzip -1" on Unix-like systems can be :
+
+ 1F 8B 08 00 00 00 00 00 04 03
+ <deflate-compressed stream>
+ crc32 size32
+*/
+
+static const unsigned char gzip_hdr[] = { 0x1F, 0x8B, // ID1, ID2
+ 0x08, 0x00, // Deflate, flags (none)
+ 0x00, 0x00, 0x00, 0x00, // mtime: none
+ 0x04, 0x03 }; // fastest comp, OS=Unix
+
+static inline uint32_t crc32_char(uint32_t crc, uint8_t x)
+{
+#if defined(__ARM_FEATURE_CRC32)
+ crc = ~crc;
+# if defined(__ARM_ARCH_ISA_A64)
+ // 64 bit mode
+ __asm__ volatile("crc32b %w0,%w0,%w1" : "+r"(crc) : "r"(x));
+# else
+ // 32 bit mode (e.g. armv7 compiler building for armv8
+ __asm__ volatile("crc32b %0,%0,%1" : "+r"(crc) : "r"(x));
+# endif
+ crc = ~crc;
+#else
+ crc = crc32_fast[0][(crc ^ x) & 0xff] ^ (crc >> 8);
+#endif
+ return crc;
+}
+
+static inline uint32_t crc32_uint32(uint32_t data)
+{
+#if defined(__ARM_FEATURE_CRC32)
+# if defined(__ARM_ARCH_ISA_A64)
+ // 64 bit mode
+ __asm__ volatile("crc32w %w0,%w0,%w1" : "+r"(data) : "r"(~0UL));
+# else
+ // 32 bit mode (e.g. armv7 compiler building for armv8
+ __asm__ volatile("crc32w %0,%0,%1" : "+r"(data) : "r"(~0UL));
+# endif
+ data = ~data;
+#else
+ data = crc32_fast[3][(data >> 0) & 0xff] ^
+ crc32_fast[2][(data >> 8) & 0xff] ^
+ crc32_fast[1][(data >> 16) & 0xff] ^
+ crc32_fast[0][(data >> 24) & 0xff];
+#endif
+ return data;
+}
+
+/* Modified version originally from RFC1952, working with non-inverting CRCs */
+uint32_t slz_crc32_by1(uint32_t crc, const unsigned char *buf, int len)
+{
+ int n;
+
+ for (n = 0; n < len; n++)
+ crc = crc32_char(crc, buf[n]);
+ return crc;
+}
+
+/* This version computes the crc32 of <buf> over <len> bytes, doing most of it
+ * in 32-bit chunks.
+ */
+uint32_t slz_crc32_by4(uint32_t crc, const unsigned char *buf, int len)
+{
+ const unsigned char *end = buf + len;
+
+ while (buf <= end - 16) {
+#ifdef UNALIGNED_LE_OK
+#if defined(__ARM_FEATURE_CRC32)
+ crc = ~crc;
+# if defined(__ARM_ARCH_ISA_A64)
+ // 64 bit mode
+ __asm__ volatile("crc32w %w0,%w0,%w1" : "+r"(crc) : "r"(*(uint32_t*)(buf)));
+ __asm__ volatile("crc32w %w0,%w0,%w1" : "+r"(crc) : "r"(*(uint32_t*)(buf + 4)));
+ __asm__ volatile("crc32w %w0,%w0,%w1" : "+r"(crc) : "r"(*(uint32_t*)(buf + 8)));
+ __asm__ volatile("crc32w %w0,%w0,%w1" : "+r"(crc) : "r"(*(uint32_t*)(buf + 12)));
+# else
+ // 32 bit mode (e.g. armv7 compiler building for armv8
+ __asm__ volatile("crc32w %0,%0,%1" : "+r"(crc) : "r"(*(uint32_t*)(buf)));
+ __asm__ volatile("crc32w %0,%0,%1" : "+r"(crc) : "r"(*(uint32_t*)(buf + 4)));
+ __asm__ volatile("crc32w %0,%0,%1" : "+r"(crc) : "r"(*(uint32_t*)(buf + 8)));
+ __asm__ volatile("crc32w %0,%0,%1" : "+r"(crc) : "r"(*(uint32_t*)(buf + 12)));
+# endif
+ crc = ~crc;
+#else
+ crc ^= *(uint32_t *)buf;
+ crc = crc32_uint32(crc);
+
+ crc ^= *(uint32_t *)(buf + 4);
+ crc = crc32_uint32(crc);
+
+ crc ^= *(uint32_t *)(buf + 8);
+ crc = crc32_uint32(crc);
+
+ crc ^= *(uint32_t *)(buf + 12);
+ crc = crc32_uint32(crc);
+#endif
+#else
+ crc = crc32_fast[3][(buf[0] ^ (crc >> 0)) & 0xff] ^
+ crc32_fast[2][(buf[1] ^ (crc >> 8)) & 0xff] ^
+ crc32_fast[1][(buf[2] ^ (crc >> 16)) & 0xff] ^
+ crc32_fast[0][(buf[3] ^ (crc >> 24)) & 0xff];
+
+ crc = crc32_fast[3][(buf[4] ^ (crc >> 0)) & 0xff] ^
+ crc32_fast[2][(buf[5] ^ (crc >> 8)) & 0xff] ^
+ crc32_fast[1][(buf[6] ^ (crc >> 16)) & 0xff] ^
+ crc32_fast[0][(buf[7] ^ (crc >> 24)) & 0xff];
+
+ crc = crc32_fast[3][(buf[8] ^ (crc >> 0)) & 0xff] ^
+ crc32_fast[2][(buf[9] ^ (crc >> 8)) & 0xff] ^
+ crc32_fast[1][(buf[10] ^ (crc >> 16)) & 0xff] ^
+ crc32_fast[0][(buf[11] ^ (crc >> 24)) & 0xff];
+
+ crc = crc32_fast[3][(buf[12] ^ (crc >> 0)) & 0xff] ^
+ crc32_fast[2][(buf[13] ^ (crc >> 8)) & 0xff] ^
+ crc32_fast[1][(buf[14] ^ (crc >> 16)) & 0xff] ^
+ crc32_fast[0][(buf[15] ^ (crc >> 24)) & 0xff];
+#endif
+ buf += 16;
+ }
+
+ while (buf <= end - 4) {
+#ifdef UNALIGNED_LE_OK
+ crc ^= *(uint32_t *)buf;
+ crc = crc32_uint32(crc);
+#else
+ crc = crc32_fast[3][(buf[0] ^ (crc >> 0)) & 0xff] ^
+ crc32_fast[2][(buf[1] ^ (crc >> 8)) & 0xff] ^
+ crc32_fast[1][(buf[2] ^ (crc >> 16)) & 0xff] ^
+ crc32_fast[0][(buf[3] ^ (crc >> 24)) & 0xff];
+#endif
+ buf += 4;
+ }
+
+ while (buf < end)
+ crc = crc32_char(crc, *buf++);
+ return crc;
+}
+
+/* uses the most suitable crc32 function to update crc on <buf, len> */
+static inline uint32_t update_crc(uint32_t crc, const void *buf, int len)
+{
+ return slz_crc32_by4(crc, buf, len);
+}
+
+/* Sends the gzip header for stream <strm> into buffer <buf>. When it's done,
+ * the stream state is updated to SLZ_ST_EOB. It returns the number of bytes
+ * emitted which is always 10. The caller is responsible for ensuring there's
+ * always enough room in the buffer.
+ */
+int slz_rfc1952_send_header(struct slz_stream *strm, unsigned char *buf)
+{
+ memcpy(buf, gzip_hdr, sizeof(gzip_hdr));
+ strm->state = SLZ_ST_EOB;
+ return sizeof(gzip_hdr);
+}
+
+/* Encodes the block according to rfc1952. This means that the CRC of the input
+ * block is computed according to the CRC32 algorithm. If the header was never
+ * sent, it may be sent first. The number of output bytes is returned.
+ */
+long slz_rfc1952_encode(struct slz_stream *strm, unsigned char *out, const unsigned char *in, long ilen, int more)
+{
+ long ret = 0;
+
+ if (__builtin_expect(strm->state == SLZ_ST_INIT, 0))
+ ret += slz_rfc1952_send_header(strm, out);
+
+ strm->crc32 = update_crc(strm->crc32, in, ilen);
+ ret += slz_rfc1951_encode(strm, out + ret, in, ilen, more);
+ return ret;
+}
+
+/* Initializes stream <strm> for use with the gzip format (rfc1952). The
+ * compression level passed in <level> is set. This value can only be 0 (no
+ * compression) or 1 (compression) and other values will lead to unpredictable
+ * behaviour. The function always returns 0.
+ */
+int slz_rfc1952_init(struct slz_stream *strm, int level)
+{
+ strm->state = SLZ_ST_INIT;
+ strm->level = level;
+ strm->format = SLZ_FMT_GZIP;
+ strm->crc32 = 0;
+ strm->ilen = 0;
+ strm->qbits = 0;
+ strm->queue = 0;
+ return 0;
+}
+
+/* Flushes any pending data for stream <strm> into buffer <buf>, then emits an
+ * empty literal block to byte-align the output, allowing to completely flush
+ * the queue. Note that if the initial header was never sent, it will be sent
+ * first as well (10 extra bytes). This requires that the output buffer still
+ * has this plus the size of the queue available (up to 4 bytes), plus one byte
+ * for (BFINAL,BTYPE), plus 4 bytes for LEN+NLEN, or a total of 19 bytes in the
+ * worst case. The number of bytes emitted is returned. It is guaranteed that
+ * the queue is empty on return. This may cause some overhead by adding
+ * needless 5-byte blocks if called to often.
+ */
+int slz_rfc1952_flush(struct slz_stream *strm, unsigned char *buf)
+{
+ int sent = 0;
+
+ if (__builtin_expect(strm->state == SLZ_ST_INIT, 0))
+ sent = slz_rfc1952_send_header(strm, buf);
+
+ sent += slz_rfc1951_flush(strm, buf + sent);
+ return sent;
+}
+
+/* Flushes pending bits and sends the gzip trailer for stream <strm> into
+ * buffer <buf>. When it's done, the stream state is updated to SLZ_ST_END. It
+ * returns the number of bytes emitted. The trailer consists in flushing the
+ * possibly pending bits from the queue (up to 24 bits), rounding to the next
+ * byte, then 4 bytes for the CRC and another 4 bytes for the input length.
+ * That may about to 4+4+4 = 12 bytes, that the caller must ensure are
+ * available before calling the function. Note that if the initial header was
+ * never sent, it will be sent first as well (10 extra bytes).
+ */
+int slz_rfc1952_finish(struct slz_stream *strm, unsigned char *buf)
+{
+ strm->outbuf = buf;
+
+ if (__builtin_expect(strm->state == SLZ_ST_INIT, 0))
+ strm->outbuf += slz_rfc1952_send_header(strm, strm->outbuf);
+
+ slz_rfc1951_finish(strm, strm->outbuf);
+ copy_32b(strm, strm->crc32);
+ copy_32b(strm, strm->ilen);
+ strm->state = SLZ_ST_END;
+
+ return strm->outbuf - buf;
+}
+
+
+/* RFC1950-specific stuff. This is for the Zlib stream format.
+ * From RFC1950 (zlib) :
+ *
+
+ 2.2. Data format
+
+ A zlib stream has the following structure:
+
+ 0 1
+ +---+---+
+ |CMF|FLG| (more-->)
+ +---+---+
+
+
+ (if FLG.FDICT set)
+
+ 0 1 2 3
+ +---+---+---+---+
+ | DICTID | (more-->)
+ +---+---+---+---+
+
+ +=====================+---+---+---+---+
+ |...compressed data...| ADLER32 |
+ +=====================+---+---+---+---+
+
+ Any data which may appear after ADLER32 are not part of the zlib
+ stream.
+
+ CMF (Compression Method and flags)
+ This byte is divided into a 4-bit compression method and a 4-
+ bit information field depending on the compression method.
+
+ bits 0 to 3 CM Compression method
+ bits 4 to 7 CINFO Compression info
+
+ CM (Compression method)
+ This identifies the compression method used in the file. CM = 8
+ denotes the "deflate" compression method with a window size up
+ to 32K. This is the method used by gzip and PNG (see
+ references [1] and [2] in Chapter 3, below, for the reference
+ documents). CM = 15 is reserved. It might be used in a future
+ version of this specification to indicate the presence of an
+ extra field before the compressed data.
+
+ CINFO (Compression info)
+ For CM = 8, CINFO is the base-2 logarithm of the LZ77 window
+ size, minus eight (CINFO=7 indicates a 32K window size). Values
+ of CINFO above 7 are not allowed in this version of the
+ specification. CINFO is not defined in this specification for
+ CM not equal to 8.
+
+ FLG (FLaGs)
+ This flag byte is divided as follows:
+
+ bits 0 to 4 FCHECK (check bits for CMF and FLG)
+ bit 5 FDICT (preset dictionary)
+ bits 6 to 7 FLEVEL (compression level)
+
+ The FCHECK value must be such that CMF and FLG, when viewed as
+ a 16-bit unsigned integer stored in MSB order (CMF*256 + FLG),
+ is a multiple of 31.
+
+
+ FDICT (Preset dictionary)
+ If FDICT is set, a DICT dictionary identifier is present
+ immediately after the FLG byte. The dictionary is a sequence of
+ bytes which are initially fed to the compressor without
+ producing any compressed output. DICT is the Adler-32 checksum
+ of this sequence of bytes (see the definition of ADLER32
+ below). The decompressor can use this identifier to determine
+ which dictionary has been used by the compressor.
+
+ FLEVEL (Compression level)
+ These flags are available for use by specific compression
+ methods. The "deflate" method (CM = 8) sets these flags as
+ follows:
+
+ 0 - compressor used fastest algorithm
+ 1 - compressor used fast algorithm
+ 2 - compressor used default algorithm
+ 3 - compressor used maximum compression, slowest algorithm
+
+ The information in FLEVEL is not needed for decompression; it
+ is there to indicate if recompression might be worthwhile.
+
+ compressed data
+ For compression method 8, the compressed data is stored in the
+ deflate compressed data format as described in the document
+ "DEFLATE Compressed Data Format Specification" by L. Peter
+ Deutsch. (See reference [3] in Chapter 3, below)
+
+ Other compressed data formats are not specified in this version
+ of the zlib specification.
+
+ ADLER32 (Adler-32 checksum)
+ This contains a checksum value of the uncompressed data
+ (excluding any dictionary data) computed according to Adler-32
+ algorithm. This algorithm is a 32-bit extension and improvement
+ of the Fletcher algorithm, used in the ITU-T X.224 / ISO 8073
+ standard. See references [4] and [5] in Chapter 3, below)
+
+ Adler-32 is composed of two sums accumulated per byte: s1 is
+ the sum of all bytes, s2 is the sum of all s1 values. Both sums
+ are done modulo 65521. s1 is initialized to 1, s2 to zero. The
+ Adler-32 checksum is stored as s2*65536 + s1 in most-
+ significant-byte first (network) order.
+
+ ==> The stream can start with only 2 bytes :
+ - CM = 0x78 : CMINFO=7 (32kB window), CM=8 (deflate)
+ - FLG = 0x01 : FLEVEL = 0 (fastest), FDICT=0 (no dict), FCHECK=1 so
+ that 0x7801 is a multiple of 31 (30721 = 991 * 31).
+
+ ==> and it ends with only 4 bytes, the Adler-32 checksum in big-endian format.
+
+ */
+
+static const unsigned char zlib_hdr[] = { 0x78, 0x01 }; // 32k win, deflate, chk=1
+
+
+/* Original version from RFC1950, verified and works OK */
+uint32_t slz_adler32_by1(uint32_t crc, const unsigned char *buf, int len)
+{
+ uint32_t s1 = crc & 0xffff;
+ uint32_t s2 = (crc >> 16) & 0xffff;
+ int n;
+
+ for (n = 0; n < len; n++) {
+ s1 = (s1 + buf[n]) % 65521;
+ s2 = (s2 + s1) % 65521;
+ }
+ return (s2 << 16) + s1;
+}
+
+/* Computes the adler32 sum on <buf> for <len> bytes. It avoids the expensive
+ * modulus by retrofitting the number of bytes missed between 65521 and 65536
+ * which is easy to count : For every sum above 65536, the modulus is offset
+ * by (65536-65521) = 15. So for any value, we can count the accumulated extra
+ * values by dividing the sum by 65536 and multiplying this value by
+ * (65536-65521). That's easier with a drawing with boxes and marbles. It gives
+ * this :
+ * x % 65521 = (x % 65536) + (x / 65536) * (65536 - 65521)
+ * = (x & 0xffff) + (x >> 16) * 15.
+ */
+uint32_t slz_adler32_block(uint32_t crc, const unsigned char *buf, long len)
+{
+ long s1 = crc & 0xffff;
+ long s2 = (crc >> 16);
+ long blk;
+ long n;
+
+ do {
+ blk = len;
+ /* ensure we never overflow s2 (limit is about 2^((32-8)/2) */
+ if (blk > (1U << 12))
+ blk = 1U << 12;
+ len -= blk;
+
+ for (n = 0; n < blk; n++) {
+ s1 = (s1 + buf[n]);
+ s2 = (s2 + s1);
+ }
+
+ /* Largest value here is 2^12 * 255 = 1044480 < 2^20. We can
+ * still overflow once, but not twice because the right hand
+ * size is 225 max, so the total is 65761. However we also
+ * have to take care of the values between 65521 and 65536.
+ */
+ s1 = (s1 & 0xffff) + 15 * (s1 >> 16);
+ if (s1 >= 65521)
+ s1 -= 65521;
+
+ /* For s2, the largest value is estimated to 2^32-1 for
+ * simplicity, so the right hand side is about 15*65535
+ * = 983025. We can overflow twice at most.
+ */
+ s2 = (s2 & 0xffff) + 15 * (s2 >> 16);
+ s2 = (s2 & 0xffff) + 15 * (s2 >> 16);
+ if (s2 >= 65521)
+ s2 -= 65521;
+
+ buf += blk;
+ } while (len);
+ return (s2 << 16) + s1;
+}
+
+/* Sends the zlib header for stream <strm> into buffer <buf>. When it's done,
+ * the stream state is updated to SLZ_ST_EOB. It returns the number of bytes
+ * emitted which is always 2. The caller is responsible for ensuring there's
+ * always enough room in the buffer.
+ */
+int slz_rfc1950_send_header(struct slz_stream *strm, unsigned char *buf)
+{
+ memcpy(buf, zlib_hdr, sizeof(zlib_hdr));
+ strm->state = SLZ_ST_EOB;
+ return sizeof(zlib_hdr);
+}
+
+/* Encodes the block according to rfc1950. This means that the CRC of the input
+ * block is computed according to the ADLER32 algorithm. If the header was never
+ * sent, it may be sent first. The number of output bytes is returned.
+ */
+long slz_rfc1950_encode(struct slz_stream *strm, unsigned char *out, const unsigned char *in, long ilen, int more)
+{
+ long ret = 0;
+
+ if (__builtin_expect(strm->state == SLZ_ST_INIT, 0))
+ ret += slz_rfc1950_send_header(strm, out);
+
+ strm->crc32 = slz_adler32_block(strm->crc32, in, ilen);
+ ret += slz_rfc1951_encode(strm, out + ret, in, ilen, more);
+ return ret;
+}
+
+/* Initializes stream <strm> for use with the zlib format (rfc1952). The
+ * compression level passed in <level> is set. This value can only be 0 (no
+ * compression) or 1 (compression) and other values will lead to unpredictable
+ * behaviour. The function always returns 0.
+ */
+int slz_rfc1950_init(struct slz_stream *strm, int level)
+{
+ strm->state = SLZ_ST_INIT;
+ strm->level = level;
+ strm->format = SLZ_FMT_ZLIB;
+ strm->crc32 = 1; // rfc1950/zlib starts with initial crc=1
+ strm->ilen = 0;
+ strm->qbits = 0;
+ strm->queue = 0;
+ return 0;
+}
+
+/* Flushes any pending data for stream <strm> into buffer <buf>, then emits an
+ * empty literal block to byte-align the output, allowing to completely flush
+ * the queue. Note that if the initial header was never sent, it will be sent
+ * first as well (2 extra bytes). This requires that the output buffer still
+ * has this plus the size of the queue available (up to 4 bytes), plus one byte
+ * for (BFINAL,BTYPE), plus 4 bytes for LEN+NLEN, or a total of 11 bytes in the
+ * worst case. The number of bytes emitted is returned. It is guaranteed that
+ * the queue is empty on return. This may cause some overhead by adding
+ * needless 5-byte blocks if called to often.
+ */
+int slz_rfc1950_flush(struct slz_stream *strm, unsigned char *buf)
+{
+ int sent = 0;
+
+ if (__builtin_expect(strm->state == SLZ_ST_INIT, 0))
+ sent = slz_rfc1950_send_header(strm, buf);
+
+ sent += slz_rfc1951_flush(strm, buf + sent);
+ return sent;
+}
+
+/* Flushes pending bits and sends the gzip trailer for stream <strm> into
+ * buffer <buf>. When it's done, the stream state is updated to SLZ_ST_END. It
+ * returns the number of bytes emitted. The trailer consists in flushing the
+ * possibly pending bits from the queue (up to 24 bits), rounding to the next
+ * byte, then 4 bytes for the CRC. That may about to 4+4 = 8 bytes, that the
+ * caller must ensure are available before calling the function. Note that if
+ * the initial header was never sent, it will be sent first as well (2 extra
+ * bytes).
+ */
+int slz_rfc1950_finish(struct slz_stream *strm, unsigned char *buf)
+{
+ strm->outbuf = buf;
+
+ if (__builtin_expect(strm->state == SLZ_ST_INIT, 0))
+ strm->outbuf += slz_rfc1952_send_header(strm, strm->outbuf);
+
+ slz_rfc1951_finish(strm, strm->outbuf);
+ copy_8b(strm, (strm->crc32 >> 24) & 0xff);
+ copy_8b(strm, (strm->crc32 >> 16) & 0xff);
+ copy_8b(strm, (strm->crc32 >> 8) & 0xff);
+ copy_8b(strm, (strm->crc32 >> 0) & 0xff);
+ strm->state = SLZ_ST_END;
+ return strm->outbuf - buf;
+}
+
+__attribute__((constructor))
+static void __slz_initialize(void)
+{
+#if !defined(__ARM_FEATURE_CRC32)
+ __slz_make_crc_table();
+#endif
+ __slz_prepare_dist_table();
+}
diff --git a/src/sock.c b/src/sock.c
new file mode 100644
index 0000000..7fcdc10
--- /dev/null
+++ b/src/sock.c
@@ -0,0 +1,1072 @@
+/*
+ * Generic code for native (BSD-compatible) sockets
+ *
+ * Copyright 2000-2020 Willy Tarreau <w@1wt.eu>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#define _GNU_SOURCE
+#include <ctype.h>
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <time.h>
+
+#include <sys/param.h>
+#include <sys/socket.h>
+#include <sys/types.h>
+
+#include <net/if.h>
+
+#include <haproxy/api.h>
+#include <haproxy/activity.h>
+#include <haproxy/connection.h>
+#include <haproxy/listener.h>
+#include <haproxy/log.h>
+#include <haproxy/namespace.h>
+#include <haproxy/proto_sockpair.h>
+#include <haproxy/sock.h>
+#include <haproxy/sock_inet.h>
+#include <haproxy/tools.h>
+
+#define SOCK_XFER_OPT_FOREIGN 0x000000001
+#define SOCK_XFER_OPT_V6ONLY 0x000000002
+#define SOCK_XFER_OPT_DGRAM 0x000000004
+
+/* the list of remaining sockets transferred from an older process */
+struct xfer_sock_list {
+ int fd;
+ int options; /* socket options as SOCK_XFER_OPT_* */
+ char *iface;
+ char *namespace;
+ int if_namelen;
+ int ns_namelen;
+ struct xfer_sock_list *prev;
+ struct xfer_sock_list *next;
+ struct sockaddr_storage addr;
+};
+
+static struct xfer_sock_list *xfer_sock_list;
+
+
+/* Accept an incoming connection from listener <l>, and return it, as well as
+ * a CO_AC_* status code into <status> if not null. Null is returned on error.
+ * <l> must be a valid listener with a valid frontend.
+ */
+struct connection *sock_accept_conn(struct listener *l, int *status)
+{
+#ifdef USE_ACCEPT4
+ static int accept4_broken;
+#endif
+ struct proxy *p = l->bind_conf->frontend;
+ struct connection *conn = NULL;
+ struct sockaddr_storage *addr = NULL;
+ socklen_t laddr;
+ int ret;
+ int cfd;
+
+ if (!sockaddr_alloc(&addr, NULL, 0))
+ goto fail_addr;
+
+ /* accept() will mark all accepted FDs O_NONBLOCK and the ones accepted
+ * in the master process as FD_CLOEXEC. It's not done for workers
+ * because 1) workers are not supposed to execute anything so there's
+ * no reason for uselessly slowing down everything, and 2) that would
+ * prevent us from implementing fd passing in the future.
+ */
+#ifdef USE_ACCEPT4
+ laddr = sizeof(*conn->src);
+
+ /* only call accept4() if it's known to be safe, otherwise fallback to
+ * the legacy accept() + fcntl().
+ */
+ if (unlikely(accept4_broken) ||
+ (((cfd = accept4(l->rx.fd, (struct sockaddr*)addr, &laddr,
+ SOCK_NONBLOCK | (master ? SOCK_CLOEXEC : 0))) == -1) &&
+ (errno == ENOSYS || errno == EINVAL || errno == EBADF) &&
+ ((accept4_broken = 1))))
+#endif
+ {
+ laddr = sizeof(*conn->src);
+ if ((cfd = accept(l->rx.fd, (struct sockaddr*)addr, &laddr)) != -1) {
+ fd_set_nonblock(cfd);
+ if (master)
+ fd_set_cloexec(cfd);
+ }
+ }
+
+ if (likely(cfd != -1)) {
+ if (unlikely(cfd >= global.maxsock)) {
+ send_log(p, LOG_EMERG,
+ "Proxy %s reached the configured maximum connection limit. Please check the global 'maxconn' value.\n",
+ p->id);
+ goto fail_conn;
+ }
+
+ /* Perfect, the connection was accepted */
+ conn = conn_new(&l->obj_type);
+ if (!conn)
+ goto fail_conn;
+
+ conn->src = addr;
+ conn->handle.fd = cfd;
+ ret = CO_AC_DONE;
+ goto done;
+ }
+
+ /* error conditions below */
+ sockaddr_free(&addr);
+
+ switch (errno) {
+#if defined(EWOULDBLOCK) && defined(EAGAIN) && EWOULDBLOCK != EAGAIN
+ case EWOULDBLOCK:
+#endif
+ case EAGAIN:
+ ret = CO_AC_DONE; /* nothing more to accept */
+ if (fdtab[l->rx.fd].state & (FD_POLL_HUP|FD_POLL_ERR)) {
+ /* the listening socket might have been disabled in a shared
+ * process and we're a collateral victim. We'll just pause for
+ * a while in case it comes back. In the mean time, we need to
+ * clear this sticky flag.
+ */
+ _HA_ATOMIC_AND(&fdtab[l->rx.fd].state, ~(FD_POLL_HUP|FD_POLL_ERR));
+ ret = CO_AC_PAUSE;
+ }
+ fd_cant_recv(l->rx.fd);
+ break;
+
+ case EINVAL:
+ /* might be trying to accept on a shut fd (eg: soft stop) */
+ ret = CO_AC_PAUSE;
+ break;
+
+ case EINTR:
+ case ECONNABORTED:
+ ret = CO_AC_RETRY;
+ break;
+
+ case ENFILE:
+ if (p)
+ send_log(p, LOG_EMERG,
+ "Proxy %s reached system FD limit (maxsock=%d). Please check system tunables.\n",
+ p->id, global.maxsock);
+ ret = CO_AC_PAUSE;
+ break;
+
+ case EMFILE:
+ if (p)
+ send_log(p, LOG_EMERG,
+ "Proxy %s reached process FD limit (maxsock=%d). Please check 'ulimit-n' and restart.\n",
+ p->id, global.maxsock);
+ ret = CO_AC_PAUSE;
+ break;
+
+ case ENOBUFS:
+ case ENOMEM:
+ if (p)
+ send_log(p, LOG_EMERG,
+ "Proxy %s reached system memory limit (maxsock=%d). Please check system tunables.\n",
+ p->id, global.maxsock);
+ ret = CO_AC_PAUSE;
+ break;
+
+ default:
+ /* unexpected result, let's give up and let other tasks run */
+ ret = CO_AC_YIELD;
+ }
+ done:
+ if (status)
+ *status = ret;
+ return conn;
+
+ fail_conn:
+ sockaddr_free(&addr);
+ /* The accept call already succeeded by the time we try to allocate the connection,
+ * we need to close it in case of failure. */
+ close(cfd);
+ fail_addr:
+ ret = CO_AC_PAUSE;
+ goto done;
+}
+
+/* Create a socket to connect to the server in conn->dst (which MUST be valid),
+ * using the configured namespace if needed, or the one passed by the proxy
+ * protocol if required to do so. It ultimately calls socket() or socketat()
+ * and returns the FD or error code.
+ */
+int sock_create_server_socket(struct connection *conn)
+{
+ const struct netns_entry *ns = NULL;
+
+#ifdef USE_NS
+ if (objt_server(conn->target)) {
+ if (__objt_server(conn->target)->flags & SRV_F_USE_NS_FROM_PP)
+ ns = conn->proxy_netns;
+ else
+ ns = __objt_server(conn->target)->netns;
+ }
+#endif
+ return my_socketat(ns, conn->dst->ss_family, SOCK_STREAM, 0);
+}
+
+/* Enables receiving on receiver <rx> once already bound. */
+void sock_enable(struct receiver *rx)
+{
+ if (rx->flags & RX_F_BOUND)
+ fd_want_recv_safe(rx->fd);
+}
+
+/* Disables receiving on receiver <rx> once already bound. */
+void sock_disable(struct receiver *rx)
+{
+ if (rx->flags & RX_F_BOUND)
+ fd_stop_recv(rx->fd);
+}
+
+/* stops, unbinds and possibly closes the FD associated with receiver rx */
+void sock_unbind(struct receiver *rx)
+{
+ /* There are a number of situations where we prefer to keep the FD and
+ * not to close it (unless we're stopping, of course):
+ * - worker process unbinding from a worker's non-suspendable FD (ABNS) => close
+ * - worker process unbinding from a worker's FD with socket transfer enabled => keep
+ * - master process unbinding from a master's inherited FD => keep
+ * - master process unbinding from a master's FD => close
+ * - master process unbinding from a worker's inherited FD => keep
+ * - master process unbinding from a worker's FD => close
+ * - worker process unbinding from a master's FD => close
+ * - worker process unbinding from a worker's FD => close
+ */
+ if (rx->flags & RX_F_BOUND)
+ rx->proto->rx_disable(rx);
+
+ if (!stopping && !master &&
+ !(rx->flags & RX_F_MWORKER) &&
+ !(rx->flags & RX_F_NON_SUSPENDABLE) &&
+ (global.tune.options & GTUNE_SOCKET_TRANSFER))
+ return;
+
+ if (!stopping && master &&
+ rx->flags & RX_F_INHERITED)
+ return;
+
+ rx->flags &= ~RX_F_BOUND;
+ if (rx->fd != -1)
+ fd_delete(rx->fd);
+ rx->fd = -1;
+}
+
+/*
+ * Retrieves the source address for the socket <fd>, with <dir> indicating
+ * if we're a listener (=0) or an initiator (!=0). It returns 0 in case of
+ * success, -1 in case of error. The socket's source address is stored in
+ * <sa> for <salen> bytes.
+ */
+int sock_get_src(int fd, struct sockaddr *sa, socklen_t salen, int dir)
+{
+ if (dir)
+ return getsockname(fd, sa, &salen);
+ else
+ return getpeername(fd, sa, &salen);
+}
+
+/*
+ * Retrieves the original destination address for the socket <fd>, with <dir>
+ * indicating if we're a listener (=0) or an initiator (!=0). It returns 0 in
+ * case of success, -1 in case of error. The socket's source address is stored
+ * in <sa> for <salen> bytes.
+ */
+int sock_get_dst(int fd, struct sockaddr *sa, socklen_t salen, int dir)
+{
+ if (dir)
+ return getpeername(fd, sa, &salen);
+ else
+ return getsockname(fd, sa, &salen);
+}
+
+/* Try to retrieve exported sockets from worker at CLI <unixsocket>. These
+ * ones will be placed into the xfer_sock_list for later use by function
+ * sock_find_compatible_fd(). Returns 0 on success, -1 on failure.
+ */
+int sock_get_old_sockets(const char *unixsocket)
+{
+ char *cmsgbuf = NULL, *tmpbuf = NULL;
+ int *tmpfd = NULL;
+ struct sockaddr_un addr;
+ struct cmsghdr *cmsg;
+ struct msghdr msghdr;
+ struct iovec iov;
+ struct xfer_sock_list *xfer_sock = NULL;
+ struct timeval tv = { .tv_sec = 1, .tv_usec = 0 };
+ int sock = -1;
+ int ret = -1;
+ int ret2 = -1;
+ int fd_nb;
+ int got_fd = 0;
+ int cur_fd = 0;
+ size_t maxoff = 0, curoff = 0;
+
+ if (strncmp("sockpair@", unixsocket, strlen("sockpair@")) == 0) {
+ /* sockpair for master-worker usage */
+ int sv[2];
+ int dst_fd;
+
+ dst_fd = strtoll(unixsocket + strlen("sockpair@"), NULL, 0);
+
+ if (socketpair(PF_UNIX, SOCK_STREAM, 0, sv) == -1) {
+ ha_warning("socketpair(): Cannot create socketpair. Giving up.\n");
+ }
+
+ if (send_fd_uxst(dst_fd, sv[0]) == -1) {
+ ha_alert("socketpair: Cannot transfer the fd %d over sockpair@%d. Giving up.\n", sv[0], dst_fd);
+ close(sv[0]);
+ close(sv[1]);
+ goto out;
+ }
+
+ close(sv[0]); /* we don't need this side anymore */
+ sock = sv[1];
+
+ } else {
+ /* Unix socket */
+
+ sock = socket(PF_UNIX, SOCK_STREAM, 0);
+ if (sock < 0) {
+ ha_warning("Failed to connect to the old process socket '%s'\n", unixsocket);
+ goto out;
+ }
+
+ strncpy(addr.sun_path, unixsocket, sizeof(addr.sun_path) - 1);
+ addr.sun_path[sizeof(addr.sun_path) - 1] = 0;
+ addr.sun_family = PF_UNIX;
+
+ ret = connect(sock, (struct sockaddr *)&addr, sizeof(addr));
+ if (ret < 0) {
+ ha_warning("Failed to connect to the old process socket '%s'\n", unixsocket);
+ goto out;
+ }
+
+ }
+ memset(&msghdr, 0, sizeof(msghdr));
+ cmsgbuf = malloc(CMSG_SPACE(sizeof(int)) * MAX_SEND_FD);
+ if (!cmsgbuf) {
+ ha_warning("Failed to allocate memory to send sockets\n");
+ goto out;
+ }
+
+ setsockopt(sock, SOL_SOCKET, SO_RCVTIMEO, (void *)&tv, sizeof(tv));
+ iov.iov_base = &fd_nb;
+ iov.iov_len = sizeof(fd_nb);
+ msghdr.msg_iov = &iov;
+ msghdr.msg_iovlen = 1;
+
+ if (send(sock, "_getsocks\n", strlen("_getsocks\n"), 0) != strlen("_getsocks\n")) {
+ ha_warning("Failed to get the number of sockets to be transferred !\n");
+ goto out;
+ }
+
+ /* First, get the number of file descriptors to be received */
+ if (recvmsg(sock, &msghdr, MSG_WAITALL) != sizeof(fd_nb)) {
+ ha_warning("Failed to get the number of sockets to be transferred !\n");
+ goto out;
+ }
+
+ if (fd_nb == 0) {
+ ret2 = 0;
+ goto out;
+ }
+
+ tmpbuf = malloc(fd_nb * (1 + MAXPATHLEN + 1 + IFNAMSIZ + sizeof(int)));
+ if (tmpbuf == NULL) {
+ ha_warning("Failed to allocate memory while receiving sockets\n");
+ goto out;
+ }
+
+ tmpfd = malloc(fd_nb * sizeof(int));
+ if (tmpfd == NULL) {
+ ha_warning("Failed to allocate memory while receiving sockets\n");
+ goto out;
+ }
+
+ msghdr.msg_control = cmsgbuf;
+ msghdr.msg_controllen = CMSG_SPACE(sizeof(int)) * MAX_SEND_FD;
+ iov.iov_len = MAX_SEND_FD * (1 + MAXPATHLEN + 1 + IFNAMSIZ + sizeof(int));
+
+ do {
+ int ret3;
+
+ iov.iov_base = tmpbuf + curoff;
+
+ ret = recvmsg(sock, &msghdr, 0);
+
+ if (ret == -1 && errno == EINTR)
+ continue;
+
+ if (ret <= 0)
+ break;
+
+ /* Send an ack to let the sender know we got the sockets
+ * and it can send some more
+ */
+ do {
+ ret3 = send(sock, &got_fd, sizeof(got_fd), 0);
+ } while (ret3 == -1 && errno == EINTR);
+
+ for (cmsg = CMSG_FIRSTHDR(&msghdr); cmsg != NULL; cmsg = CMSG_NXTHDR(&msghdr, cmsg)) {
+ if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
+ size_t totlen = cmsg->cmsg_len - CMSG_LEN(0);
+
+ if (totlen / sizeof(int) + got_fd > fd_nb) {
+ ha_warning("Got to many sockets !\n");
+ goto out;
+ }
+
+ /*
+ * Be paranoid and use memcpy() to avoid any
+ * potential alignment issue.
+ */
+ memcpy(&tmpfd[got_fd], CMSG_DATA(cmsg), totlen);
+ got_fd += totlen / sizeof(int);
+ }
+ }
+ curoff += ret;
+ } while (got_fd < fd_nb);
+
+ if (got_fd != fd_nb) {
+ ha_warning("We didn't get the expected number of sockets (expecting %d got %d)\n",
+ fd_nb, got_fd);
+ goto out;
+ }
+
+ maxoff = curoff;
+ curoff = 0;
+
+ for (cur_fd = 0; cur_fd < got_fd; cur_fd++) {
+ int fd = tmpfd[cur_fd];
+ socklen_t socklen;
+ int val;
+ int len;
+
+ xfer_sock = calloc(1, sizeof(*xfer_sock));
+ if (!xfer_sock) {
+ ha_warning("Failed to allocate memory in get_old_sockets() !\n");
+ break;
+ }
+ xfer_sock->fd = -1;
+
+ socklen = sizeof(xfer_sock->addr);
+ if (getsockname(fd, (struct sockaddr *)&xfer_sock->addr, &socklen) != 0) {
+ ha_warning("Failed to get socket address\n");
+ ha_free(&xfer_sock);
+ continue;
+ }
+
+ if (curoff >= maxoff) {
+ ha_warning("Inconsistency while transferring sockets\n");
+ goto out;
+ }
+
+ len = tmpbuf[curoff++];
+ if (len > 0) {
+ /* We have a namespace */
+ if (curoff + len > maxoff) {
+ ha_warning("Inconsistency while transferring sockets\n");
+ goto out;
+ }
+ xfer_sock->namespace = malloc(len + 1);
+ if (!xfer_sock->namespace) {
+ ha_warning("Failed to allocate memory while transferring sockets\n");
+ goto out;
+ }
+ memcpy(xfer_sock->namespace, &tmpbuf[curoff], len);
+ xfer_sock->namespace[len] = 0;
+ xfer_sock->ns_namelen = len;
+ curoff += len;
+ }
+
+ if (curoff >= maxoff) {
+ ha_warning("Inconsistency while transferring sockets\n");
+ goto out;
+ }
+
+ len = tmpbuf[curoff++];
+ if (len > 0) {
+ /* We have an interface */
+ if (curoff + len > maxoff) {
+ ha_warning("Inconsistency while transferring sockets\n");
+ goto out;
+ }
+ xfer_sock->iface = malloc(len + 1);
+ if (!xfer_sock->iface) {
+ ha_warning("Failed to allocate memory while transferring sockets\n");
+ goto out;
+ }
+ memcpy(xfer_sock->iface, &tmpbuf[curoff], len);
+ xfer_sock->iface[len] = 0;
+ xfer_sock->if_namelen = len;
+ curoff += len;
+ }
+
+ if (curoff + sizeof(int) > maxoff) {
+ ha_warning("Inconsistency while transferring sockets\n");
+ goto out;
+ }
+
+ /* we used to have 32 bits of listener options here but we don't
+ * use them anymore.
+ */
+ curoff += sizeof(int);
+
+ /* determine the foreign status directly from the socket itself */
+ if (sock_inet_is_foreign(fd, xfer_sock->addr.ss_family))
+ xfer_sock->options |= SOCK_XFER_OPT_FOREIGN;
+
+ socklen = sizeof(val);
+ if (getsockopt(fd, SOL_SOCKET, SO_TYPE, &val, &socklen) == 0 && val == SOCK_DGRAM)
+ xfer_sock->options |= SOCK_XFER_OPT_DGRAM;
+
+#if defined(IPV6_V6ONLY)
+ /* keep only the v6only flag depending on what's currently
+ * active on the socket, and always drop the v4v6 one.
+ */
+ socklen = sizeof(val);
+ if (xfer_sock->addr.ss_family == AF_INET6 &&
+ getsockopt(fd, IPPROTO_IPV6, IPV6_V6ONLY, &val, &socklen) == 0 && val > 0)
+ xfer_sock->options |= SOCK_XFER_OPT_V6ONLY;
+#endif
+
+ xfer_sock->fd = fd;
+ if (xfer_sock_list)
+ xfer_sock_list->prev = xfer_sock;
+ xfer_sock->next = xfer_sock_list;
+ xfer_sock->prev = NULL;
+ xfer_sock_list = xfer_sock;
+ xfer_sock = NULL;
+ }
+
+ ret2 = 0;
+out:
+ /* If we failed midway make sure to close the remaining
+ * file descriptors
+ */
+ if (tmpfd != NULL && cur_fd < got_fd) {
+ for (; cur_fd < got_fd; cur_fd++) {
+ close(tmpfd[cur_fd]);
+ }
+ }
+
+ free(tmpbuf);
+ free(tmpfd);
+ free(cmsgbuf);
+
+ if (sock != -1)
+ close(sock);
+
+ if (xfer_sock) {
+ free(xfer_sock->namespace);
+ free(xfer_sock->iface);
+ if (xfer_sock->fd != -1)
+ close(xfer_sock->fd);
+ free(xfer_sock);
+ }
+ return (ret2);
+}
+
+/* When binding the receivers, check if a socket has been sent to us by the
+ * previous process that we could reuse, instead of creating a new one. Note
+ * that some address family-specific options are checked on the listener and
+ * on the socket. Typically for AF_INET and AF_INET6, we check for transparent
+ * mode, and for AF_INET6 we also check for "v4v6" or "v6only". The reused
+ * socket is automatically removed from the list so that it's not proposed
+ * anymore.
+ */
+int sock_find_compatible_fd(const struct receiver *rx)
+{
+ struct xfer_sock_list *xfer_sock = xfer_sock_list;
+ int options = 0;
+ int if_namelen = 0;
+ int ns_namelen = 0;
+ int ret = -1;
+
+ if (!rx->proto->fam->addrcmp)
+ return -1;
+
+ if (rx->proto->proto_type == PROTO_TYPE_DGRAM)
+ options |= SOCK_XFER_OPT_DGRAM;
+
+ if (rx->settings->options & RX_O_FOREIGN)
+ options |= SOCK_XFER_OPT_FOREIGN;
+
+ if (rx->addr.ss_family == AF_INET6) {
+ /* Prepare to match the v6only option against what we really want. Note
+ * that sadly the two options are not exclusive to each other and that
+ * v6only is stronger than v4v6.
+ */
+ if ((rx->settings->options & RX_O_V6ONLY) ||
+ (sock_inet6_v6only_default && !(rx->settings->options & RX_O_V4V6)))
+ options |= SOCK_XFER_OPT_V6ONLY;
+ }
+
+ if (rx->settings->interface)
+ if_namelen = strlen(rx->settings->interface);
+#ifdef USE_NS
+ if (rx->settings->netns)
+ ns_namelen = rx->settings->netns->name_len;
+#endif
+
+ while (xfer_sock) {
+ if ((options == xfer_sock->options) &&
+ (if_namelen == xfer_sock->if_namelen) &&
+ (ns_namelen == xfer_sock->ns_namelen) &&
+ (!if_namelen || strcmp(rx->settings->interface, xfer_sock->iface) == 0) &&
+#ifdef USE_NS
+ (!ns_namelen || strcmp(rx->settings->netns->node.key, xfer_sock->namespace) == 0) &&
+#endif
+ rx->proto->fam->addrcmp(&xfer_sock->addr, &rx->addr) == 0)
+ break;
+ xfer_sock = xfer_sock->next;
+ }
+
+ if (xfer_sock != NULL) {
+ ret = xfer_sock->fd;
+ if (xfer_sock == xfer_sock_list)
+ xfer_sock_list = xfer_sock->next;
+ if (xfer_sock->prev)
+ xfer_sock->prev->next = xfer_sock->next;
+ if (xfer_sock->next)
+ xfer_sock->next->prev = xfer_sock->prev;
+ free(xfer_sock->iface);
+ free(xfer_sock->namespace);
+ free(xfer_sock);
+ }
+ return ret;
+}
+
+/* After all protocols are bound, there may remain some old sockets that have
+ * been removed between the previous config and the new one. These ones must
+ * be dropped, otherwise they will remain open and may prevent a service from
+ * restarting.
+ */
+void sock_drop_unused_old_sockets()
+{
+ while (xfer_sock_list != NULL) {
+ struct xfer_sock_list *tmpxfer = xfer_sock_list->next;
+
+ close(xfer_sock_list->fd);
+ free(xfer_sock_list->iface);
+ free(xfer_sock_list->namespace);
+ free(xfer_sock_list);
+ xfer_sock_list = tmpxfer;
+ }
+}
+
+/* Tests if the receiver supports accepting connections. Returns positive on
+ * success, 0 if not possible, negative if the socket is non-recoverable. The
+ * rationale behind this is that inherited FDs may be broken and that shared
+ * FDs might have been paused by another process.
+ */
+int sock_accepting_conn(const struct receiver *rx)
+{
+ int opt_val = 0;
+ socklen_t opt_len = sizeof(opt_val);
+
+ if (getsockopt(rx->fd, SOL_SOCKET, SO_ACCEPTCONN, &opt_val, &opt_len) == -1)
+ return -1;
+
+ return opt_val;
+}
+
+/* This is the FD handler IO callback for stream sockets configured for
+ * accepting incoming connections. It's a pass-through to listener_accept()
+ * which will iterate over the listener protocol's accept_conn() function.
+ * The FD's owner must be a listener.
+ */
+void sock_accept_iocb(int fd)
+{
+ struct listener *l = fdtab[fd].owner;
+
+ if (!l)
+ return;
+
+ BUG_ON(!!master != !!(l->rx.flags & RX_F_MWORKER));
+ listener_accept(l);
+}
+
+/* This completes the initialization of connection <conn> by inserting its FD
+ * into the fdtab, associating it with the regular connection handler. It will
+ * be bound to the current thread only. This call cannot fail.
+ */
+void sock_conn_ctrl_init(struct connection *conn)
+{
+ BUG_ON(conn->flags & CO_FL_FDLESS);
+ fd_insert(conn->handle.fd, conn, sock_conn_iocb, tgid, ti->ltid_bit);
+}
+
+/* This completes the release of connection <conn> by removing its FD from the
+ * fdtab and deleting it. The connection must not use the FD anymore past this
+ * point. The FD may be modified in the connection.
+ */
+void sock_conn_ctrl_close(struct connection *conn)
+{
+ BUG_ON(conn->flags & CO_FL_FDLESS);
+ fd_delete(conn->handle.fd);
+ conn->handle.fd = DEAD_FD_MAGIC;
+}
+
+/* This is the callback which is set when a connection establishment is pending
+ * and we have nothing to send. It may update the FD polling status to indicate
+ * !READY. It returns 0 if it fails in a fatal way or needs to poll to go
+ * further, otherwise it returns non-zero and removes the CO_FL_WAIT_L4_CONN
+ * flag from the connection's flags. In case of error, it sets CO_FL_ERROR and
+ * leaves the error code in errno.
+ */
+int sock_conn_check(struct connection *conn)
+{
+ struct sockaddr_storage *addr;
+ int fd = conn->handle.fd;
+
+ if (conn->flags & CO_FL_ERROR)
+ return 0;
+
+ if (!conn_ctrl_ready(conn))
+ return 0;
+
+ if (!(conn->flags & CO_FL_WAIT_L4_CONN))
+ return 1; /* strange we were called while ready */
+
+ BUG_ON(conn->flags & CO_FL_FDLESS);
+
+ if (!fd_send_ready(fd) && !(fdtab[fd].state & (FD_POLL_ERR|FD_POLL_HUP)))
+ return 0;
+
+ /* Here we have 2 cases :
+ * - modern pollers, able to report ERR/HUP. If these ones return any
+ * of these flags then it's likely a failure, otherwise it possibly
+ * is a success (i.e. there may have been data received just before
+ * the error was reported).
+ * - select, which doesn't report these and with which it's always
+ * necessary either to try connect() again or to check for SO_ERROR.
+ * In order to simplify everything, we double-check using connect() as
+ * soon as we meet either of these delicate situations. Note that
+ * SO_ERROR would clear the error after reporting it!
+ */
+ if (cur_poller.flags & HAP_POLL_F_ERRHUP) {
+ /* modern poller, able to report ERR/HUP */
+ if ((fdtab[fd].state & (FD_POLL_IN|FD_POLL_ERR|FD_POLL_HUP)) == FD_POLL_IN)
+ goto done;
+ if ((fdtab[fd].state & (FD_POLL_OUT|FD_POLL_ERR|FD_POLL_HUP)) == FD_POLL_OUT)
+ goto done;
+ if (!(fdtab[fd].state & (FD_POLL_ERR|FD_POLL_HUP)))
+ goto wait;
+ /* error present, fall through common error check path */
+ }
+
+ /* Use connect() to check the state of the socket. This has the double
+ * advantage of *not* clearing the error (so that health checks can
+ * still use getsockopt(SO_ERROR)) and giving us the following info :
+ * - error
+ * - connecting (EALREADY, EINPROGRESS)
+ * - connected (EISCONN, 0)
+ */
+ addr = conn->dst;
+ if ((conn->flags & CO_FL_SOCKS4) && obj_type(conn->target) == OBJ_TYPE_SERVER)
+ addr = &objt_server(conn->target)->socks4_addr;
+
+ if (connect(fd, (const struct sockaddr *)addr, get_addr_len(addr)) == -1) {
+ if (errno == EALREADY || errno == EINPROGRESS)
+ goto wait;
+
+ if (errno && errno != EISCONN)
+ goto out_error;
+ }
+
+ done:
+ /* The FD is ready now, we'll mark the connection as complete and
+ * forward the event to the transport layer which will notify the
+ * data layer.
+ */
+ conn->flags &= ~CO_FL_WAIT_L4_CONN;
+ fd_may_send(fd);
+ fd_cond_recv(fd);
+ errno = 0; // make health checks happy
+ return 1;
+
+ out_error:
+ /* Write error on the file descriptor. Report it to the connection
+ * and disable polling on this FD.
+ */
+ conn->flags |= CO_FL_ERROR | CO_FL_SOCK_RD_SH | CO_FL_SOCK_WR_SH;
+ HA_ATOMIC_AND(&fdtab[fd].state, ~FD_LINGER_RISK);
+ fd_stop_both(fd);
+ return 0;
+
+ wait:
+ fd_cant_send(fd);
+ fd_want_send(fd);
+ return 0;
+}
+
+/* I/O callback for fd-based connections. It calls the read/write handlers
+ * provided by the connection's sock_ops, which must be valid.
+ */
+void sock_conn_iocb(int fd)
+{
+ struct connection *conn = fdtab[fd].owner;
+ unsigned int flags;
+ int need_wake = 0;
+ struct tasklet *t;
+
+ if (unlikely(!conn)) {
+ activity[tid].conn_dead++;
+ return;
+ }
+
+ flags = conn->flags & ~CO_FL_ERROR; /* ensure to call the wake handler upon error */
+
+ if (unlikely(conn->flags & CO_FL_WAIT_L4_CONN) &&
+ ((fd_send_ready(fd) && fd_send_active(fd)) ||
+ (fd_recv_ready(fd) && fd_recv_active(fd)))) {
+ /* Still waiting for a connection to establish and nothing was
+ * attempted yet to probe the connection. this will clear the
+ * CO_FL_WAIT_L4_CONN flag on success.
+ */
+ if (!sock_conn_check(conn))
+ goto leave;
+ need_wake = 1;
+ }
+
+ if (fd_send_ready(fd) && fd_send_active(fd)) {
+ /* force reporting of activity by clearing the previous flags :
+ * we'll have at least ERROR or CONNECTED at the end of an I/O,
+ * both of which will be detected below.
+ */
+ flags = 0;
+ if (conn->subs && conn->subs->events & SUB_RETRY_SEND) {
+ t = conn->subs->tasklet;
+ need_wake = 0; // wake will be called after this I/O
+ conn->subs->events &= ~SUB_RETRY_SEND;
+ if (!conn->subs->events)
+ conn->subs = NULL;
+ tasklet_wakeup(t);
+ }
+ fd_stop_send(fd);
+ }
+
+ /* The data transfer starts here and stops on error and handshakes. Note
+ * that we must absolutely test conn->xprt at each step in case it suddenly
+ * changes due to a quick unexpected close().
+ */
+ if (fd_recv_ready(fd) && fd_recv_active(fd)) {
+ /* force reporting of activity by clearing the previous flags :
+ * we'll have at least ERROR or CONNECTED at the end of an I/O,
+ * both of which will be detected below.
+ */
+ flags = 0;
+ if (conn->subs && conn->subs->events & SUB_RETRY_RECV) {
+ t = conn->subs->tasklet;
+ need_wake = 0; // wake will be called after this I/O
+ conn->subs->events &= ~SUB_RETRY_RECV;
+ if (!conn->subs->events)
+ conn->subs = NULL;
+ tasklet_wakeup(t);
+ }
+ fd_stop_recv(fd);
+ }
+
+ leave:
+ /* we may have to finish to install a mux or to wake it up based on
+ * what was just done above. It may kill the connection so we have to
+ * be prpared not to use it anymore.
+ */
+ if (conn_notify_mux(conn, flags, need_wake) < 0)
+ return;
+
+ /* commit polling changes in case of error.
+ * WT: it seems that the last case where this could still be relevant
+ * is if a mux wake function above report a connection error but does
+ * not stop polling. Shouldn't we enforce this into the mux instead of
+ * having to deal with this ?
+ */
+ if (unlikely(conn->flags & CO_FL_ERROR)) {
+ if (conn_ctrl_ready(conn))
+ fd_stop_both(fd);
+
+ if (conn->subs) {
+ t = conn->subs->tasklet;
+ conn->subs->events = 0;
+ if (!conn->subs->events)
+ conn->subs = NULL;
+ tasklet_wakeup(t);
+ }
+ }
+}
+
+/* Drains possibly pending incoming data on the file descriptor attached to the
+ * connection. This is used to know whether we need to disable lingering on
+ * close. Returns non-zero if it is safe to close without disabling lingering,
+ * otherwise zero.
+ */
+int sock_drain(struct connection *conn)
+{
+ int turns = 2;
+ int fd = conn->handle.fd;
+ int len;
+
+ BUG_ON(conn->flags & CO_FL_FDLESS);
+
+ if (fdtab[fd].state & (FD_POLL_ERR|FD_POLL_HUP))
+ goto shut;
+
+ if (!(conn->flags & CO_FL_WANT_DRAIN) && !fd_recv_ready(fd))
+ return 0;
+
+ /* no drain function defined, use the generic one */
+
+ while (turns) {
+#ifdef MSG_TRUNC_CLEARS_INPUT
+ len = recv(fd, NULL, INT_MAX, MSG_DONTWAIT | MSG_NOSIGNAL | MSG_TRUNC);
+ if (len == -1 && errno == EFAULT)
+#endif
+ len = recv(fd, trash.area, trash.size, MSG_DONTWAIT | MSG_NOSIGNAL);
+
+ if (len == 0)
+ goto shut;
+
+ if (len < 0) {
+ if (errno == EAGAIN || errno == EWOULDBLOCK) {
+ /* connection not closed yet */
+ fd_cant_recv(fd);
+ break;
+ }
+ if (errno == EINTR) /* oops, try again */
+ continue;
+ /* other errors indicate a dead connection, fine. */
+ goto shut;
+ }
+ /* OK we read some data, let's try again once */
+ turns--;
+ }
+
+ /* some data are still present, give up */
+ return 0;
+
+ shut:
+ /* we're certain the connection was shut down */
+ HA_ATOMIC_AND(&fdtab[fd].state, ~FD_LINGER_RISK);
+ return 1;
+}
+
+/* Checks the connection's FD for readiness of events <event_type>, which may
+ * only be a combination of SUB_RETRY_RECV and SUB_RETRY_SEND. Those which are
+ * ready are returned. The ones that are not ready are enabled. The caller is
+ * expected to do what is needed to handle ready events and to deal with
+ * subsequent wakeups caused by the requested events' readiness.
+ */
+int sock_check_events(struct connection *conn, int event_type)
+{
+ int ret = 0;
+
+ BUG_ON(conn->flags & CO_FL_FDLESS);
+
+ if (event_type & SUB_RETRY_RECV) {
+ if (fd_recv_ready(conn->handle.fd))
+ ret |= SUB_RETRY_RECV;
+ else
+ fd_want_recv(conn->handle.fd);
+ }
+
+ if (event_type & SUB_RETRY_SEND) {
+ if (fd_send_ready(conn->handle.fd))
+ ret |= SUB_RETRY_SEND;
+ else
+ fd_want_send(conn->handle.fd);
+ }
+
+ return ret;
+}
+
+/* Ignore readiness events from connection's FD for events of types <event_type>
+ * which may only be a combination of SUB_RETRY_RECV and SUB_RETRY_SEND.
+ */
+void sock_ignore_events(struct connection *conn, int event_type)
+{
+ BUG_ON(conn->flags & CO_FL_FDLESS);
+
+ if (event_type & SUB_RETRY_RECV)
+ fd_stop_recv(conn->handle.fd);
+
+ if (event_type & SUB_RETRY_SEND)
+ fd_stop_send(conn->handle.fd);
+}
+
+/* Live check to see if a socket type supports SO_REUSEPORT for the specified
+ * family and socket() settings. Returns non-zero on success, 0 on failure. Use
+ * protocol_supports_flag() instead, which checks cached flags.
+ */
+int _sock_supports_reuseport(const struct proto_fam *fam, int type, int protocol)
+{
+ int ret = 0;
+#ifdef SO_REUSEPORT
+ struct sockaddr_storage ss;
+ socklen_t sl = sizeof(ss);
+ int fd1, fd2;
+
+ /* for the check, we'll need two sockets */
+ fd1 = fd2 = -1;
+
+ /* ignore custom sockets */
+ if (!fam || fam->sock_domain >= AF_MAX)
+ goto leave;
+
+ fd1 = socket(fam->sock_domain, type, protocol);
+ if (fd1 < 0)
+ goto leave;
+
+ if (setsockopt(fd1, SOL_SOCKET, SO_REUSEPORT, &one, sizeof(one)) < 0)
+ goto leave;
+
+ /* bind to any address assigned by the kernel, we'll then try to do it twice */
+ memset(&ss, 0, sizeof(ss));
+ ss.ss_family = fam->sock_family;
+ if (bind(fd1, (struct sockaddr *)&ss, fam->sock_addrlen) < 0)
+ goto leave;
+
+ if (getsockname(fd1, (struct sockaddr *)&ss, &sl) < 0)
+ goto leave;
+
+ fd2 = socket(fam->sock_domain, type, protocol);
+ if (fd2 < 0)
+ goto leave;
+
+ if (setsockopt(fd2, SOL_SOCKET, SO_REUSEPORT, &one, sizeof(one)) < 0)
+ goto leave;
+
+ if (bind(fd2, (struct sockaddr *)&ss, sl) < 0)
+ goto leave;
+
+ /* OK we could bind twice to the same address:port, REUSEPORT
+ * is supported for this protocol.
+ */
+ ret = 1;
+
+ leave:
+ if (fd2 >= 0)
+ close(fd2);
+ if (fd1 >= 0)
+ close(fd1);
+#endif
+ return ret;
+}
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/src/sock_inet.c b/src/sock_inet.c
new file mode 100644
index 0000000..028ffaa
--- /dev/null
+++ b/src/sock_inet.c
@@ -0,0 +1,521 @@
+/*
+ * AF_INET/AF_INET6 socket management
+ *
+ * Copyright 2000-2020 Willy Tarreau <w@1wt.eu>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <errno.h>
+#include <string.h>
+#include <unistd.h>
+
+#include <sys/param.h>
+#include <sys/socket.h>
+#include <sys/types.h>
+
+#include <netinet/tcp.h>
+#include <netinet/in.h>
+
+#include <haproxy/api.h>
+#include <haproxy/errors.h>
+#include <haproxy/fd.h>
+#include <haproxy/global.h>
+#include <haproxy/namespace.h>
+#include <haproxy/receiver-t.h>
+#include <haproxy/sock.h>
+#include <haproxy/sock_inet.h>
+#include <haproxy/tools.h>
+
+struct proto_fam proto_fam_inet4 = {
+ .name = "inet4",
+ .sock_domain = PF_INET,
+ .sock_family = AF_INET,
+ .sock_addrlen = sizeof(struct sockaddr_in),
+ .l3_addrlen = 32/8,
+ .addrcmp = sock_inet4_addrcmp,
+ .bind = sock_inet_bind_receiver,
+ .get_src = sock_get_src,
+ .get_dst = sock_inet_get_dst,
+ .set_port = sock_inet_set_port,
+};
+
+struct proto_fam proto_fam_inet6 = {
+ .name = "inet6",
+ .sock_domain = PF_INET6,
+ .sock_family = AF_INET6,
+ .sock_addrlen = sizeof(struct sockaddr_in6),
+ .l3_addrlen = 128/8,
+ .addrcmp = sock_inet6_addrcmp,
+ .bind = sock_inet_bind_receiver,
+ .get_src = sock_get_src,
+ .get_dst = sock_get_dst,
+ .set_port = sock_inet_set_port,
+};
+
+/* PLEASE NOTE for function below:
+ * - sock_inet4_* is solely for AF_INET (IPv4)
+ * - sock_inet6_* is solely for AF_INET6 (IPv6)
+ * - sock_inet_* is for either
+ *
+ * The address family SHOULD always be checked. In some cases a function will
+ * be used in a situation where the address family is guaranteed (e.g. protocol
+ * definitions), so the test may be avoided. This special case must then be
+ * mentioned in the comment before the function definition.
+ */
+
+/* determine if the operating system uses IPV6_V6ONLY by default. 0=no, 1=yes.
+ * It also remains if IPv6 is not enabled/configured.
+ */
+int sock_inet6_v6only_default = 0;
+
+/* Default TCPv4/TCPv6 MSS settings. -1=unknown. */
+int sock_inet_tcp_maxseg_default = -1;
+int sock_inet6_tcp_maxseg_default = -1;
+
+/* Compares two AF_INET sockaddr addresses. Returns 0 if they match or non-zero
+ * if they do not match.
+ */
+int sock_inet4_addrcmp(const struct sockaddr_storage *a, const struct sockaddr_storage *b)
+{
+ const struct sockaddr_in *a4 = (const struct sockaddr_in *)a;
+ const struct sockaddr_in *b4 = (const struct sockaddr_in *)b;
+
+ if (a->ss_family != b->ss_family)
+ return -1;
+
+ if (a->ss_family != AF_INET)
+ return -1;
+
+ if (a4->sin_port != b4->sin_port)
+ return -1;
+
+ return memcmp(&a4->sin_addr, &b4->sin_addr, sizeof(a4->sin_addr));
+}
+
+/* Compares two AF_INET6 sockaddr addresses. Returns 0 if they match or
+ * non-zero if they do not match.
+ */
+int sock_inet6_addrcmp(const struct sockaddr_storage *a, const struct sockaddr_storage *b)
+{
+ const struct sockaddr_in6 *a6 = (const struct sockaddr_in6 *)a;
+ const struct sockaddr_in6 *b6 = (const struct sockaddr_in6 *)b;
+
+ if (a->ss_family != b->ss_family)
+ return -1;
+
+ if (a->ss_family != AF_INET6)
+ return -1;
+
+ if (a6->sin6_port != b6->sin6_port)
+ return -1;
+
+ return memcmp(&a6->sin6_addr, &b6->sin6_addr, sizeof(a6->sin6_addr));
+}
+
+/* Sets the port <port> on IPv4 or IPv6 address <addr>. The address family is
+ * determined from the sockaddr_storage's address family. Nothing is done for
+ * other families.
+ */
+void sock_inet_set_port(struct sockaddr_storage *addr, int port)
+{
+ if (addr->ss_family == AF_INET)
+ ((struct sockaddr_in *)addr)->sin_port = htons(port);
+ else if (addr->ss_family == AF_INET6)
+ ((struct sockaddr_in6 *)addr)->sin6_port = htons(port);
+}
+
+/*
+ * Retrieves the original destination address for the socket <fd> which must be
+ * of family AF_INET (not AF_INET6), with <dir> indicating if we're a listener
+ * (=0) or an initiator (!=0). In the case of a listener, if the original
+ * destination address was translated, the original address is retrieved. It
+ * returns 0 in case of success, -1 in case of error. The socket's source
+ * address is stored in <sa> for <salen> bytes.
+ */
+int sock_inet_get_dst(int fd, struct sockaddr *sa, socklen_t salen, int dir)
+{
+ if (dir)
+ return getpeername(fd, sa, &salen);
+ else {
+ int ret = getsockname(fd, sa, &salen);
+
+ if (ret < 0)
+ return ret;
+
+#if defined(USE_TPROXY) && defined(SO_ORIGINAL_DST)
+ /* For TPROXY and Netfilter's NAT, we can retrieve the original
+ * IPv4 address before DNAT/REDIRECT. We must not do that with
+ * other families because v6-mapped IPv4 addresses are still
+ * reported as v4.
+ */
+ if (getsockopt(fd, IPPROTO_IP, SO_ORIGINAL_DST, sa, &salen) == 0)
+ return 0;
+#endif
+ return ret;
+ }
+}
+
+/* Returns true if the passed FD corresponds to a socket bound with RX_O_FOREIGN
+ * according to the various supported socket options. The socket's address family
+ * must be passed in <family>.
+ */
+int sock_inet_is_foreign(int fd, sa_family_t family)
+{
+ int val __maybe_unused;
+ socklen_t len __maybe_unused;
+
+ switch (family) {
+ case AF_INET:
+#if defined(IP_TRANSPARENT)
+ val = 0; len = sizeof(val);
+ if (getsockopt(fd, IPPROTO_IP, IP_TRANSPARENT, &val, &len) == 0 && val)
+ return 1;
+#endif
+#if defined(IP_FREEBIND)
+ val = 0; len = sizeof(val);
+ if (getsockopt(fd, IPPROTO_IP, IP_FREEBIND, &val, &len) == 0 && val)
+ return 1;
+#endif
+#if defined(IP_BINDANY)
+ val = 0; len = sizeof(val);
+ if (getsockopt(fd, IPPROTO_IP, IP_BINDANY, &val, &len) == 0 && val)
+ return 1;
+#endif
+#if defined(SO_BINDANY)
+ val = 0; len = sizeof(val);
+ if (getsockopt(fd, SOL_SOCKET, SO_BINDANY, &val, &len) == 0 && val)
+ return 1;
+#endif
+ break;
+
+ case AF_INET6:
+#if defined(IPV6_TRANSPARENT)
+ val = 0; len = sizeof(val);
+ if (getsockopt(fd, IPPROTO_IPV6, IPV6_TRANSPARENT, &val, &len) == 0 && val)
+ return 1;
+#endif
+#if defined(IP_FREEBIND)
+ val = 0; len = sizeof(val);
+ if (getsockopt(fd, IPPROTO_IP, IP_FREEBIND, &val, &len) == 0 && val)
+ return 1;
+#endif
+#if defined(IPV6_BINDANY)
+ val = 0; len = sizeof(val);
+ if (getsockopt(fd, IPPROTO_IPV6, IPV6_BINDANY, &val, &len) == 0 && val)
+ return 1;
+#endif
+#if defined(SO_BINDANY)
+ val = 0; len = sizeof(val);
+ if (getsockopt(fd, SOL_SOCKET, SO_BINDANY, &val, &len) == 0 && val)
+ return 1;
+#endif
+ break;
+ }
+ return 0;
+}
+
+/* Attempt all known socket options to prepare an AF_INET4 socket to be bound
+ * to a foreign address. The socket must already exist and must not be bound.
+ * 1 is returned on success, 0 on failure. The caller must check the address
+ * family before calling this function.
+ */
+int sock_inet4_make_foreign(int fd)
+{
+ return
+#if defined(IP_TRANSPARENT)
+ setsockopt(fd, IPPROTO_IP, IP_TRANSPARENT, &one, sizeof(one)) == 0 ||
+#endif
+#if defined(IP_FREEBIND)
+ setsockopt(fd, IPPROTO_IP, IP_FREEBIND, &one, sizeof(one)) == 0 ||
+#endif
+#if defined(IP_BINDANY)
+ setsockopt(fd, IPPROTO_IP, IP_BINDANY, &one, sizeof(one)) == 0 ||
+#endif
+#if defined(SO_BINDANY)
+ setsockopt(fd, SOL_SOCKET, SO_BINDANY, &one, sizeof(one)) == 0 ||
+#endif
+ 0;
+}
+
+/* Attempt all known socket options to prepare an AF_INET6 socket to be bound
+ * to a foreign address. The socket must already exist and must not be bound.
+ * 1 is returned on success, 0 on failure. The caller must check the address
+ * family before calling this function.
+ */
+int sock_inet6_make_foreign(int fd)
+{
+ return
+#if defined(IPV6_TRANSPARENT)
+ setsockopt(fd, IPPROTO_IPV6, IPV6_TRANSPARENT, &one, sizeof(one)) == 0 ||
+#endif
+#if defined(IP_FREEBIND)
+ setsockopt(fd, IPPROTO_IP, IP_FREEBIND, &one, sizeof(one)) == 0 ||
+#endif
+#if defined(IPV6_BINDANY)
+ setsockopt(fd, IPPROTO_IPV6, IPV6_BINDANY, &one, sizeof(one)) == 0 ||
+#endif
+#if defined(SO_BINDANY)
+ setsockopt(fd, SOL_SOCKET, SO_BINDANY, &one, sizeof(one)) == 0 ||
+#endif
+ 0;
+}
+
+/* Binds receiver <rx>, and assigns rx->iocb and rx->owner as the callback and
+ * context, respectively. Returns and error code made of ERR_* bits on failure
+ * or ERR_NONE on success. On failure, an error message may be passed into
+ * <errmsg>.
+ */
+int sock_inet_bind_receiver(struct receiver *rx, char **errmsg)
+{
+ int fd, err, ext;
+ /* copy listener addr because sometimes we need to switch family */
+ struct sockaddr_storage addr_inet = rx->addr;
+
+ /* force to classic sock family, not AF_CUST_* */
+ addr_inet.ss_family = rx->proto->fam->sock_family;
+
+ /* ensure we never return garbage */
+ if (errmsg)
+ *errmsg = 0;
+
+ err = ERR_NONE;
+
+ if (rx->flags & RX_F_BOUND)
+ return ERR_NONE;
+
+ if (rx->flags & RX_F_MUST_DUP) {
+ /* this is a secondary receiver that is an exact copy of a
+ * reference which must already be bound (or has failed).
+ * We'll try to dup() the other one's FD and take it. We
+ * try hard not to reconfigure the socket since it's shared.
+ */
+ BUG_ON(!rx->shard_info);
+ if (!(rx->shard_info->ref->flags & RX_F_BOUND)) {
+ /* it's assumed that the first one has already reported
+ * the error, let's not spam with another one, and do
+ * not set ERR_ALERT.
+ */
+ err |= ERR_RETRYABLE;
+ goto bind_ret_err;
+ }
+ /* taking the other one's FD will result in it being marked
+ * extern and being dup()ed. Let's mark the receiver as
+ * inherited so that it properly bypasses all second-stage
+ * setup and avoids being passed to new processes.
+ */
+ rx->flags |= RX_F_INHERITED;
+ rx->fd = rx->shard_info->ref->fd;
+ }
+
+ /* if no FD was assigned yet, we'll have to either find a compatible
+ * one or create a new one.
+ */
+ if (rx->fd == -1)
+ rx->fd = sock_find_compatible_fd(rx);
+
+ /* if the receiver now has an fd assigned, then we were offered the fd
+ * by an external process (most likely the parent), and we don't want
+ * to create a new socket. However we still want to set a few flags on
+ * the socket.
+ */
+ fd = rx->fd;
+ ext = (fd >= 0);
+
+ if (!ext) {
+ fd = my_socketat(rx->settings->netns, rx->proto->fam->sock_domain,
+ rx->proto->sock_type, rx->proto->sock_prot);
+ if (fd == -1) {
+ err |= ERR_RETRYABLE | ERR_ALERT;
+ memprintf(errmsg, "cannot create receiving socket (%s)", strerror(errno));
+ goto bind_return;
+ }
+ }
+
+ if (ext && fd < global.maxsock && fdtab[fd].owner) {
+ /* This FD was already bound so this means that it was already
+ * known and registered before parsing, hence it's an inherited
+ * FD. The only reason why it's already known here is that it
+ * has been registered multiple times (multiple listeners on the
+ * same, or a "shards" directive on the line). There cannot be
+ * multiple listeners on one FD but at least we can create a
+ * new one from the original one. We won't reconfigure it,
+ * however, as this was already done for the first one.
+ */
+ fd = dup(fd);
+ if (fd == -1) {
+ err |= ERR_RETRYABLE | ERR_ALERT;
+ memprintf(errmsg, "cannot dup() receiving socket (%s)", strerror(errno));
+ goto bind_return;
+ }
+ }
+
+ if (fd >= global.maxsock) {
+ err |= ERR_FATAL | ERR_ABORT | ERR_ALERT;
+ memprintf(errmsg, "not enough free sockets (raise '-n' parameter)");
+ goto bind_close_return;
+ }
+
+ if (fd_set_nonblock(fd) == -1) {
+ err |= ERR_FATAL | ERR_ALERT;
+ memprintf(errmsg, "cannot make socket non-blocking");
+ goto bind_close_return;
+ }
+
+ if (!ext && setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &one, sizeof(one)) == -1) {
+ /* not fatal but should be reported */
+ memprintf(errmsg, "cannot do so_reuseaddr");
+ err |= ERR_ALERT;
+ }
+
+#ifdef SO_REUSEPORT
+ /* OpenBSD and Linux 3.9 support this. As it's present in old libc versions of
+ * Linux, it might return an error that we will silently ignore.
+ */
+ if (!ext && (rx->proto->flags & PROTO_F_REUSEPORT_SUPPORTED))
+ setsockopt(fd, SOL_SOCKET, SO_REUSEPORT, &one, sizeof(one));
+#endif
+
+#ifdef SO_REUSEPORT_LB
+ /* FreeBSD 12 and above use this to load-balance incoming connections.
+ * This is limited to 256 listeners per group however.
+ */
+ if (!ext && (rx->proto->flags & PROTO_F_REUSEPORT_SUPPORTED))
+ setsockopt(fd, SOL_SOCKET, SO_REUSEPORT_LB, &one, sizeof(one));
+#endif
+
+ if (!ext && (rx->settings->options & RX_O_FOREIGN)) {
+ switch (addr_inet.ss_family) {
+ case AF_INET:
+ if (!sock_inet4_make_foreign(fd)) {
+ memprintf(errmsg, "cannot make receiving socket transparent");
+ err |= ERR_ALERT;
+ }
+ break;
+ case AF_INET6:
+ if (!sock_inet6_make_foreign(fd)) {
+ memprintf(errmsg, "cannot make receiving socket transparent");
+ err |= ERR_ALERT;
+ }
+ break;
+ }
+ }
+
+#ifdef SO_BINDTODEVICE
+ /* Note: this might fail if not CAP_NET_RAW */
+ if (!ext && rx->settings->interface) {
+ if (setsockopt(fd, SOL_SOCKET, SO_BINDTODEVICE,
+ rx->settings->interface,
+ strlen(rx->settings->interface) + 1) == -1) {
+ memprintf(errmsg, "cannot bind receiver to device '%s' (%s)", rx->settings->interface, strerror(errno));
+ err |= ERR_WARN;
+ }
+ }
+#endif
+
+#if defined(IPV6_V6ONLY)
+ if (addr_inet.ss_family == AF_INET6 && !ext) {
+ /* Prepare to match the v6only option against what we really want. Note
+ * that sadly the two options are not exclusive to each other and that
+ * v6only is stronger than v4v6.
+ */
+ if ((rx->settings->options & RX_O_V6ONLY) ||
+ (sock_inet6_v6only_default && !(rx->settings->options & RX_O_V4V6)))
+ setsockopt(fd, IPPROTO_IPV6, IPV6_V6ONLY, &one, sizeof(one));
+ else
+ setsockopt(fd, IPPROTO_IPV6, IPV6_V6ONLY, &zero, sizeof(zero));
+ }
+#endif
+
+ if (!ext && bind(fd, (struct sockaddr *)&addr_inet, rx->proto->fam->sock_addrlen) == -1) {
+ err |= ERR_RETRYABLE | ERR_ALERT;
+ memprintf(errmsg, "cannot bind socket (%s)", strerror(errno));
+ goto bind_close_return;
+ }
+
+ rx->fd = fd;
+ rx->flags |= RX_F_BOUND;
+
+ fd_insert(fd, rx->owner, rx->iocb, rx->bind_tgroup, rx->bind_thread);
+
+ /* for now, all regularly bound TCP listeners are exportable */
+ if (!(rx->flags & RX_F_INHERITED))
+ HA_ATOMIC_OR(&fdtab[fd].state, FD_EXPORTED);
+
+ bind_return:
+ if (errmsg && *errmsg) {
+ char pn[INET6_ADDRSTRLEN];
+
+ addr_to_str(&addr_inet, pn, sizeof(pn));
+ memprintf(errmsg, "%s for [%s:%d]", *errmsg, pn, get_host_port(&addr_inet));
+ }
+ bind_ret_err:
+ return err;
+
+ bind_close_return:
+ close(fd);
+ goto bind_return;
+}
+
+static void sock_inet_prepare()
+{
+ int fd, val;
+ socklen_t len;
+
+ fd = socket(AF_INET, SOCK_STREAM, 0);
+ if (fd >= 0) {
+#ifdef TCP_MAXSEG
+ /* retrieve the OS' default mss for TCPv4 */
+ len = sizeof(val);
+ if (getsockopt(fd, IPPROTO_TCP, TCP_MAXSEG, &val, &len) == 0)
+ sock_inet_tcp_maxseg_default = val;
+#endif
+ close(fd);
+ }
+
+ fd = socket(AF_INET6, SOCK_STREAM, 0);
+ if (fd >= 0) {
+#if defined(IPV6_V6ONLY)
+ /* retrieve the OS' bindv6only value */
+ len = sizeof(val);
+ if (getsockopt(fd, IPPROTO_IPV6, IPV6_V6ONLY, &val, &len) == 0 && val > 0)
+ sock_inet6_v6only_default = 1;
+#endif
+
+#ifdef TCP_MAXSEG
+ /* retrieve the OS' default mss for TCPv6 */
+ len = sizeof(val);
+ if (getsockopt(fd, IPPROTO_TCP, TCP_MAXSEG, &val, &len) == 0)
+ sock_inet6_tcp_maxseg_default = val;
+#endif
+ close(fd);
+ }
+}
+
+INITCALL0(STG_PREPARE, sock_inet_prepare);
+
+
+REGISTER_BUILD_OPTS("Built with transparent proxy support using:"
+#if defined(IP_TRANSPARENT)
+ " IP_TRANSPARENT"
+#endif
+#if defined(IPV6_TRANSPARENT)
+ " IPV6_TRANSPARENT"
+#endif
+#if defined(IP_FREEBIND)
+ " IP_FREEBIND"
+#endif
+#if defined(IP_BINDANY)
+ " IP_BINDANY"
+#endif
+#if defined(IPV6_BINDANY)
+ " IPV6_BINDANY"
+#endif
+#if defined(SO_BINDANY)
+ " SO_BINDANY"
+#endif
+ "");
diff --git a/src/sock_unix.c b/src/sock_unix.c
new file mode 100644
index 0000000..ef749a5
--- /dev/null
+++ b/src/sock_unix.c
@@ -0,0 +1,387 @@
+/*
+ * SOCK_UNIX socket management
+ *
+ * Copyright 2000-2020 Willy Tarreau <w@1wt.eu>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <ctype.h>
+#include <errno.h>
+#include <string.h>
+#include <unistd.h>
+
+#include <sys/param.h>
+#include <sys/socket.h>
+#include <sys/types.h>
+
+#include <sys/socket.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <sys/un.h>
+
+#include <haproxy/api.h>
+#include <haproxy/errors.h>
+#include <haproxy/fd.h>
+#include <haproxy/global.h>
+#include <haproxy/listener.h>
+#include <haproxy/receiver-t.h>
+#include <haproxy/namespace.h>
+#include <haproxy/sock.h>
+#include <haproxy/sock_unix.h>
+#include <haproxy/tools.h>
+
+
+struct proto_fam proto_fam_unix = {
+ .name = "unix",
+ .sock_domain = PF_UNIX,
+ .sock_family = AF_UNIX,
+ .sock_addrlen = sizeof(struct sockaddr_un),
+ .l3_addrlen = sizeof(((struct sockaddr_un*)0)->sun_path),
+ .addrcmp = sock_unix_addrcmp,
+ .bind = sock_unix_bind_receiver,
+ .get_src = sock_get_src,
+ .get_dst = sock_get_dst,
+};
+
+/* PLEASE NOTE for functions below:
+ *
+ * The address family SHOULD always be checked. In some cases a function will
+ * be used in a situation where the address family is guaranteed (e.g. protocol
+ * definitions), so the test may be avoided. This special case must then be
+ * mentioned in the comment before the function definition.
+ */
+
+
+/* Compares two AF_UNIX sockaddr addresses. Returns 0 if they match or non-zero
+ * if they do not match. It also supports ABNS socket addresses (those starting
+ * with \0). For regular UNIX sockets however, this does explicitly support
+ * matching names ending exactly with .XXXXX.tmp which are newly bound sockets
+ * about to be replaced; this suffix is then ignored. Note that our UNIX socket
+ * paths are always zero-terminated.
+ */
+int sock_unix_addrcmp(const struct sockaddr_storage *a, const struct sockaddr_storage *b)
+{
+ const struct sockaddr_un *au = (const struct sockaddr_un *)a;
+ const struct sockaddr_un *bu = (const struct sockaddr_un *)b;
+ int idx, dot, idx2;
+
+ if (a->ss_family != b->ss_family)
+ return -1;
+
+ if (a->ss_family != AF_UNIX)
+ return -1;
+
+ if (au->sun_path[0] != bu->sun_path[0])
+ return -1;
+
+ if (au->sun_path[0] == 0)
+ return memcmp(au->sun_path, bu->sun_path, sizeof(au->sun_path));
+
+ idx = 1; dot = 0;
+ while (au->sun_path[idx] == bu->sun_path[idx]) {
+ if (au->sun_path[idx] == 0)
+ return 0;
+ if (au->sun_path[idx] == '.')
+ dot = idx;
+ idx++;
+ }
+
+ /* Now we have a difference. It's OK if they are within or after a
+ * sequence of digits following a dot, and are followed by ".tmp".
+ *
+ * make sure to perform the check against tempname if the compared
+ * string is in "final" format (does not end with ".XXXX.tmp").
+ *
+ * Examples:
+ * /tmp/test matches with /tmp/test.1822.tmp
+ * /tmp/test.1822.tmp matches with /tmp/test.XXXX.tmp
+ */
+ if (au->sun_path[idx] == 0 || bu->sun_path[idx] == 0) {
+ if (au->sun_path[idx] == '.' || bu->sun_path[idx] == '.')
+ dot = idx; /* try to match against temp path */
+ else
+ return -1; /* invalid temp path */
+ }
+
+ if (!dot)
+ return -1;
+
+ /* First, check in path "a" */
+ if (au->sun_path[idx] != 0) {
+ for (idx2 = dot + 1; idx2 && isdigit((unsigned char)au->sun_path[idx2]);)
+ idx2++;
+ if (strcmp(au->sun_path + idx2, ".tmp") != 0)
+ return -1;
+ }
+
+ /* Then check in path "b" */
+ if (bu->sun_path[idx] != 0) {
+ for (idx2 = dot + 1; idx2 && isdigit((unsigned char)bu->sun_path[idx2]); idx2++)
+ ;
+ if (strcmp(bu->sun_path + idx2, ".tmp") != 0)
+ return -1;
+ }
+
+ /* OK that's a match */
+ return 0;
+}
+
+/* Binds receiver <rx>, and assigns rx->iocb and rx->owner as the callback and
+ * context, respectively, with ->bind_thread as the thread mask. Returns an
+ * error code made of ERR_* bits on failure or ERR_NONE on success. On failure,
+ * an error message may be passed into <errmsg>.
+ */
+int sock_unix_bind_receiver(struct receiver *rx, char **errmsg)
+{
+ char tempname[MAXPATHLEN];
+ char backname[MAXPATHLEN];
+ struct sockaddr_un addr;
+ const char *path;
+ int maxpathlen;
+ int fd, err, ext, ret;
+
+ /* ensure we never return garbage */
+ if (errmsg)
+ *errmsg = 0;
+
+ err = ERR_NONE;
+
+ if (rx->flags & RX_F_BOUND)
+ return ERR_NONE;
+
+ if (rx->flags & RX_F_MUST_DUP) {
+ /* this is a secondary receiver that is an exact copy of a
+ * reference which must already be bound (or has failed).
+ * We'll try to dup() the other one's FD and take it. We
+ * try hard not to reconfigure the socket since it's shared.
+ */
+ BUG_ON(!rx->shard_info);
+ if (!(rx->shard_info->ref->flags & RX_F_BOUND)) {
+ /* it's assumed that the first one has already reported
+ * the error, let's not spam with another one, and do
+ * not set ERR_ALERT.
+ */
+ err |= ERR_RETRYABLE;
+ goto bind_ret_err;
+ }
+ /* taking the other one's FD will result in it being marked
+ * extern and being dup()ed. Let's mark the receiver as
+ * inherited so that it properly bypasses all second-stage
+ * setup and avoids being passed to new processes.
+ */
+ rx->flags |= RX_F_INHERITED;
+ rx->fd = rx->shard_info->ref->fd;
+ }
+
+ /* if no FD was assigned yet, we'll have to either find a compatible
+ * one or create a new one.
+ */
+ if (rx->fd == -1)
+ rx->fd = sock_find_compatible_fd(rx);
+
+ path = ((struct sockaddr_un *)&rx->addr)->sun_path;
+ maxpathlen = MIN(MAXPATHLEN, sizeof(addr.sun_path));
+
+ /* if the listener already has an fd assigned, then we were offered the
+ * fd by an external process (most likely the parent), and we don't want
+ * to create a new socket. However we still want to set a few flags on
+ * the socket.
+ */
+ fd = rx->fd;
+ ext = (fd >= 0);
+ if (ext)
+ goto fd_ready;
+
+ if (path[0]) {
+ ret = snprintf(tempname, maxpathlen, "%s.%d.tmp", path, pid);
+ if (ret < 0 || ret >= sizeof(addr.sun_path)) {
+ err |= ERR_FATAL | ERR_ALERT;
+ memprintf(errmsg, "name too long for UNIX socket (limit usually 97)");
+ goto bind_return;
+ }
+
+ ret = snprintf(backname, maxpathlen, "%s.%d.bak", path, pid);
+ if (ret < 0 || ret >= maxpathlen) {
+ err |= ERR_FATAL | ERR_ALERT;
+ memprintf(errmsg, "name too long for UNIX socket (limit usually 97)");
+ goto bind_return;
+ }
+
+ /* 2. clean existing orphaned entries */
+ if (unlink(tempname) < 0 && errno != ENOENT) {
+ err |= ERR_FATAL | ERR_ALERT;
+ memprintf(errmsg, "error when trying to unlink previous UNIX socket (%s)", strerror(errno));
+ goto bind_return;
+ }
+
+ if (unlink(backname) < 0 && errno != ENOENT) {
+ err |= ERR_FATAL | ERR_ALERT;
+ memprintf(errmsg, "error when trying to unlink previous UNIX socket (%s)", strerror(errno));
+ goto bind_return;
+ }
+
+ /* 3. backup existing socket */
+ if (link(path, backname) < 0 && errno != ENOENT) {
+ err |= ERR_FATAL | ERR_ALERT;
+ memprintf(errmsg, "error when trying to preserve previous UNIX socket (%s)", strerror(errno));
+ goto bind_return;
+ }
+
+ /* Note: this test is redundant with the snprintf one above and
+ * will never trigger, it's just added as the only way to shut
+ * gcc's painfully dumb warning about possibly truncated output
+ * during strncpy(). Don't move it above or smart gcc will not
+ * see it!
+ */
+ if (strlen(tempname) >= sizeof(addr.sun_path)) {
+ err |= ERR_FATAL | ERR_ALERT;
+ memprintf(errmsg, "name too long for UNIX socket (limit usually 97)");
+ goto bind_return;
+ }
+
+ strncpy(addr.sun_path, tempname, sizeof(addr.sun_path) - 1);
+ addr.sun_path[sizeof(addr.sun_path) - 1] = 0;
+ }
+ else {
+ /* first char is zero, it's an abstract socket whose address
+ * is defined by all the bytes past this zero.
+ */
+ memcpy(addr.sun_path, path, sizeof(addr.sun_path));
+ }
+ addr.sun_family = AF_UNIX;
+
+ /* WT: shouldn't we use my_socketat(rx->netns) here instead ? */
+ fd = socket(rx->proto->fam->sock_domain, rx->proto->sock_type, rx->proto->sock_prot);
+ if (fd < 0) {
+ err |= ERR_FATAL | ERR_ALERT;
+ memprintf(errmsg, "cannot create receiving socket (%s)", strerror(errno));
+ goto bind_return;
+ }
+
+ fd_ready:
+ if (ext && fd < global.maxsock && fdtab[fd].owner) {
+ /* This FD was already bound so this means that it was already
+ * known and registered before parsing, hence it's an inherited
+ * FD. The only reason why it's already known here is that it
+ * has been registered multiple times (multiple listeners on the
+ * same, or a "shards" directive on the line). There cannot be
+ * multiple listeners on one FD but at least we can create a
+ * new one from the original one. We won't reconfigure it,
+ * however, as this was already done for the first one.
+ */
+ fd = dup(fd);
+ if (fd == -1) {
+ err |= ERR_RETRYABLE | ERR_ALERT;
+ memprintf(errmsg, "cannot dup() receiving socket (%s)", strerror(errno));
+ goto bind_return;
+ }
+ }
+
+ if (fd >= global.maxsock) {
+ err |= ERR_FATAL | ERR_ABORT | ERR_ALERT;
+ memprintf(errmsg, "not enough free sockets (raise '-n' parameter)");
+ goto bind_close_return;
+ }
+
+ if (fd_set_nonblock(fd) == -1) {
+ err |= ERR_FATAL | ERR_ALERT;
+ memprintf(errmsg, "cannot make socket non-blocking");
+ goto bind_close_return;
+ }
+
+ if (!ext && bind(fd, (struct sockaddr *)&addr, sizeof(addr)) < 0) {
+ /* note that bind() creates the socket <tempname> on the file system */
+ if (errno == EADDRINUSE) {
+ /* the old process might still own it, let's retry */
+ err |= ERR_RETRYABLE | ERR_ALERT;
+ memprintf(errmsg, "cannot bind UNIX socket (already in use)");
+ goto bind_close_return;
+ }
+ else {
+ err |= ERR_FATAL | ERR_ALERT;
+ memprintf(errmsg, "cannot bind UNIX socket (%s)", strerror(errno));
+ goto bind_close_return;
+ }
+ }
+
+ /* <uid> and <gid> different of -1 will be used to change the socket owner.
+ * If <mode> is not 0, it will be used to restrict access to the socket.
+ * While it is known not to be portable on every OS, it's still useful
+ * where it works. We also don't change permissions on abstract sockets.
+ */
+ if (!ext && path[0] &&
+ (((rx->settings->ux.uid != -1 || rx->settings->ux.gid != -1) &&
+ (chown(tempname, rx->settings->ux.uid, rx->settings->ux.gid) == -1)) ||
+ (rx->settings->ux.mode != 0 && chmod(tempname, rx->settings->ux.mode) == -1))) {
+ err |= ERR_FATAL | ERR_ALERT;
+ memprintf(errmsg, "cannot change UNIX socket ownership (%s)", strerror(errno));
+ goto err_unlink_temp;
+ }
+
+ /* Point of no return: we are ready, we'll switch the sockets. We don't
+ * fear losing the socket <path> because we have a copy of it in
+ * backname. Abstract sockets are not renamed.
+ */
+ if (!ext && path[0] && rename(tempname, path) < 0) {
+ err |= ERR_FATAL | ERR_ALERT;
+ memprintf(errmsg, "cannot switch final and temporary UNIX sockets (%s)", strerror(errno));
+ goto err_rename;
+ }
+
+ /* Cleanup: only unlink if we didn't inherit the fd from the parent */
+ if (!ext && path[0])
+ unlink(backname);
+
+ rx->fd = fd;
+ rx->flags |= RX_F_BOUND;
+
+ if (!path[0]) {
+ /* ABNS sockets do not support suspend, and they conflict with
+ * other ones (no reuseport), so they must always be unbound.
+ */
+ rx->flags |= RX_F_NON_SUSPENDABLE;
+ }
+
+ fd_insert(fd, rx->owner, rx->iocb, rx->bind_tgroup, rx->bind_thread);
+
+ /* for now, all regularly bound TCP listeners are exportable */
+ if (!(rx->flags & RX_F_INHERITED))
+ HA_ATOMIC_OR(&fdtab[fd].state, FD_EXPORTED);
+
+ return err;
+
+ err_rename:
+ ret = rename(backname, path);
+ if (ret < 0 && errno == ENOENT)
+ unlink(path);
+ err_unlink_temp:
+ if (!ext && path[0])
+ unlink(tempname);
+ close(fd);
+ err_unlink_back:
+ if (!ext && path[0])
+ unlink(backname);
+ bind_return:
+ if (errmsg && *errmsg) {
+ if (!ext) {
+ char *path_str;
+
+ path_str = sa2str((struct sockaddr_storage *)&rx->addr, 0, 0);
+ memprintf(errmsg, "%s [%s]", *errmsg, ((path_str) ? path_str : ""));
+ ha_free(&path_str);
+ }
+ else
+ memprintf(errmsg, "%s [fd %d]", *errmsg, fd);
+ }
+ bind_ret_err:
+ return err;
+
+ bind_close_return:
+ close(fd);
+ goto bind_return;
+}
diff --git a/src/ssl_ckch.c b/src/ssl_ckch.c
new file mode 100644
index 0000000..ab39755
--- /dev/null
+++ b/src/ssl_ckch.c
@@ -0,0 +1,3968 @@
+/*
+ *
+ * Copyright (C) 2020 HAProxy Technologies, William Lallemand <wlallemand@haproxy.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#define _GNU_SOURCE
+#include <ctype.h>
+#include <dirent.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <syslog.h>
+#include <unistd.h>
+
+#include <sys/stat.h>
+#include <sys/types.h>
+
+#include <import/ebpttree.h>
+#include <import/ebsttree.h>
+
+#include <haproxy/applet.h>
+#include <haproxy/base64.h>
+#include <haproxy/channel.h>
+#include <haproxy/cli.h>
+#include <haproxy/errors.h>
+#include <haproxy/sc_strm.h>
+#include <haproxy/ssl_ckch.h>
+#include <haproxy/ssl_sock.h>
+#include <haproxy/ssl_ocsp.h>
+#include <haproxy/ssl_utils.h>
+#include <haproxy/stconn.h>
+#include <haproxy/tools.h>
+
+/* Uncommitted CKCH transaction */
+
+static struct {
+ struct ckch_store *new_ckchs;
+ struct ckch_store *old_ckchs;
+ char *path;
+} ckchs_transaction;
+
+/* Uncommitted CA file transaction */
+
+static struct {
+ struct cafile_entry *old_cafile_entry;
+ struct cafile_entry *new_cafile_entry;
+ char *path;
+} cafile_transaction;
+
+/* Uncommitted CRL file transaction */
+
+static struct {
+ struct cafile_entry *old_crlfile_entry;
+ struct cafile_entry *new_crlfile_entry;
+ char *path;
+} crlfile_transaction;
+
+/* CLI context used by "show cafile" */
+struct show_cafile_ctx {
+ struct cafile_entry *cur_cafile_entry;
+ struct cafile_entry *old_cafile_entry;
+ int ca_index;
+ int show_all;
+};
+
+/* CLI context used by "show crlfile" */
+struct show_crlfile_ctx {
+ struct cafile_entry *cafile_entry;
+ struct cafile_entry *old_crlfile_entry;
+ int index;
+};
+
+/* CLI context used by "show cert" */
+struct show_cert_ctx {
+ struct ckch_store *old_ckchs;
+ struct ckch_store *cur_ckchs;
+ int transaction;
+};
+
+/* CLI context used by "commit cert" */
+struct commit_cert_ctx {
+ struct ckch_store *old_ckchs;
+ struct ckch_store *new_ckchs;
+ struct ckch_inst *next_ckchi;
+ char *err;
+ enum {
+ CERT_ST_INIT = 0,
+ CERT_ST_GEN,
+ CERT_ST_INSERT,
+ CERT_ST_SUCCESS,
+ CERT_ST_FIN,
+ CERT_ST_ERROR,
+ } state;
+};
+
+/* CLI context used by "commit cafile" and "commit crlfile" */
+struct commit_cacrlfile_ctx {
+ struct cafile_entry *old_entry;
+ struct cafile_entry *new_entry;
+ struct ckch_inst_link *next_ckchi_link;
+ enum cafile_type cafile_type; /* either CA or CRL, depending on the current command */
+ char *err;
+ enum {
+ CACRL_ST_INIT = 0,
+ CACRL_ST_GEN,
+ CACRL_ST_INSERT,
+ CACRL_ST_SUCCESS,
+ CACRL_ST_FIN,
+ CACRL_ST_ERROR,
+ } state;
+};
+
+
+/******************** cert_key_and_chain functions *************************
+ * These are the functions that fills a cert_key_and_chain structure. For the
+ * functions filling a SSL_CTX from a cert_key_and_chain, see ssl_sock.c
+ */
+
+/*
+ * Try to parse Signed Certificate Timestamp List structure. This function
+ * makes only basic test if the data seems like SCTL. No signature validation
+ * is performed.
+ */
+static int ssl_sock_parse_sctl(struct buffer *sctl)
+{
+ int ret = 1;
+ int len, pos, sct_len;
+ unsigned char *data;
+
+ if (sctl->data < 2)
+ goto out;
+
+ data = (unsigned char *) sctl->area;
+ len = (data[0] << 8) | data[1];
+
+ if (len + 2 != sctl->data)
+ goto out;
+
+ data = data + 2;
+ pos = 0;
+ while (pos < len) {
+ if (len - pos < 2)
+ goto out;
+
+ sct_len = (data[pos] << 8) | data[pos + 1];
+ if (pos + sct_len + 2 > len)
+ goto out;
+
+ pos += sct_len + 2;
+ }
+
+ ret = 0;
+
+out:
+ return ret;
+}
+
+/* Try to load a sctl from a buffer <buf> if not NULL, or read the file <sctl_path>
+ * It fills the ckch->sctl buffer
+ * return 0 on success or != 0 on failure */
+int ssl_sock_load_sctl_from_file(const char *sctl_path, char *buf, struct ckch_data *data, char **err)
+{
+ int fd = -1;
+ int r = 0;
+ int ret = 1;
+ struct buffer tmp;
+ struct buffer *src;
+ struct buffer *sctl;
+
+ if (buf) {
+ chunk_initstr(&tmp, buf);
+ src = &tmp;
+ } else {
+ fd = open(sctl_path, O_RDONLY);
+ if (fd == -1)
+ goto end;
+
+ trash.data = 0;
+ while (trash.data < trash.size) {
+ r = read(fd, trash.area + trash.data, trash.size - trash.data);
+ if (r < 0) {
+ if (errno == EINTR)
+ continue;
+ goto end;
+ }
+ else if (r == 0) {
+ break;
+ }
+ trash.data += r;
+ }
+ src = &trash;
+ }
+
+ ret = ssl_sock_parse_sctl(src);
+ if (ret)
+ goto end;
+
+ sctl = calloc(1, sizeof(*sctl));
+ if (!chunk_dup(sctl, src)) {
+ ha_free(&sctl);
+ goto end;
+ }
+ /* no error, fill ckch with new context, old context must be free */
+ if (data->sctl) {
+ ha_free(&data->sctl->area);
+ free(data->sctl);
+ }
+ data->sctl = sctl;
+ ret = 0;
+end:
+ if (fd != -1)
+ close(fd);
+
+ return ret;
+}
+
+#if ((defined SSL_CTRL_SET_TLSEXT_STATUS_REQ_CB && !defined OPENSSL_NO_OCSP) || defined OPENSSL_IS_BORINGSSL)
+/*
+ * This function load the OCSP Response in DER format contained in file at
+ * path 'ocsp_path' or base64 in a buffer <buf>
+ *
+ * Returns 0 on success, 1 in error case.
+ */
+int ssl_sock_load_ocsp_response_from_file(const char *ocsp_path, char *buf, struct ckch_data *data, char **err)
+{
+ int fd = -1;
+ int r = 0;
+ int ret = 1;
+ struct buffer *ocsp_response;
+ struct buffer *src = NULL;
+
+ if (buf) {
+ int i, j;
+ /* if it's from a buffer it will be base64 */
+
+ /* remove \r and \n from the payload */
+ for (i = 0, j = 0; buf[i]; i++) {
+ if (buf[i] == '\r' || buf[i] == '\n')
+ continue;
+ buf[j++] = buf[i];
+ }
+ buf[j] = 0;
+
+ ret = base64dec(buf, j, trash.area, trash.size);
+ if (ret < 0) {
+ memprintf(err, "Error reading OCSP response in base64 format");
+ goto end;
+ }
+ trash.data = ret;
+ src = &trash;
+ } else {
+ fd = open(ocsp_path, O_RDONLY);
+ if (fd == -1) {
+ memprintf(err, "Error opening OCSP response file");
+ goto end;
+ }
+
+ trash.data = 0;
+ while (trash.data < trash.size) {
+ r = read(fd, trash.area + trash.data, trash.size - trash.data);
+ if (r < 0) {
+ if (errno == EINTR)
+ continue;
+
+ memprintf(err, "Error reading OCSP response from file");
+ goto end;
+ }
+ else if (r == 0) {
+ break;
+ }
+ trash.data += r;
+ }
+ close(fd);
+ fd = -1;
+ src = &trash;
+ }
+
+ ocsp_response = calloc(1, sizeof(*ocsp_response));
+ if (!chunk_dup(ocsp_response, src)) {
+ ha_free(&ocsp_response);
+ goto end;
+ }
+ /* no error, fill data with new context, old context must be free */
+ if (data->ocsp_response) {
+ ha_free(&data->ocsp_response->area);
+ free(data->ocsp_response);
+ }
+ data->ocsp_response = ocsp_response;
+ ret = 0;
+end:
+ if (fd != -1)
+ close(fd);
+
+ return ret;
+}
+#endif
+
+/*
+ * Try to load in a ckch every files related to a ckch.
+ * (PEM, sctl, ocsp, issuer etc.)
+ *
+ * This function is only used to load files during the configuration parsing,
+ * it is not used with the CLI.
+ *
+ * This allows us to carry the contents of the file without having to read the
+ * file multiple times. The caller must call
+ * ssl_sock_free_cert_key_and_chain_contents.
+ *
+ * returns:
+ * 0 on Success
+ * 1 on SSL Failure
+ */
+int ssl_sock_load_files_into_ckch(const char *path, struct ckch_data *data, char **err)
+{
+ struct buffer *fp = NULL;
+ int ret = 1;
+ struct stat st;
+
+ /* try to load the PEM */
+ if (ssl_sock_load_pem_into_ckch(path, NULL, data , err) != 0) {
+ goto end;
+ }
+
+ fp = alloc_trash_chunk();
+ if (!fp) {
+ memprintf(err, "%sCan't allocate memory\n", err && *err ? *err : "");
+ goto end;
+ }
+
+ if (!chunk_strcpy(fp, path) || (b_data(fp) > MAXPATHLEN)) {
+ memprintf(err, "%s '%s' filename too long'.\n",
+ err && *err ? *err : "", fp->area);
+ ret = 1;
+ goto end;
+ }
+
+ /* remove the ".crt" extension */
+ if (global_ssl.extra_files_noext) {
+ char *ext;
+
+ /* look for the extension */
+ if ((ext = strrchr(fp->area, '.'))) {
+
+ if (strcmp(ext, ".crt") == 0) {
+ *ext = '\0';
+ fp->data = strlen(fp->area);
+ }
+ }
+
+ }
+
+ if (data->key == NULL) {
+ /* If no private key was found yet and we cannot look for it in extra
+ * files, raise an error.
+ */
+ if (!(global_ssl.extra_files & SSL_GF_KEY)) {
+ memprintf(err, "%sNo Private Key found in '%s'.\n", err && *err ? *err : "", fp->area);
+ goto end;
+ }
+
+ /* try to load an external private key if it wasn't in the PEM */
+ if (!chunk_strcat(fp, ".key") || (b_data(fp) > MAXPATHLEN)) {
+ memprintf(err, "%s '%s' filename too long'.\n",
+ err && *err ? *err : "", fp->area);
+ ret = 1;
+ goto end;
+ }
+
+ if (stat(fp->area, &st) == 0) {
+ if (ssl_sock_load_key_into_ckch(fp->area, NULL, data, err)) {
+ memprintf(err, "%s '%s' is present but cannot be read or parsed'.\n",
+ err && *err ? *err : "", fp->area);
+ goto end;
+ }
+ }
+
+ if (data->key == NULL) {
+ memprintf(err, "%sNo Private Key found in '%s'.\n", err && *err ? *err : "", fp->area);
+ goto end;
+ }
+ /* remove the added extension */
+ *(fp->area + fp->data - strlen(".key")) = '\0';
+ b_sub(fp, strlen(".key"));
+ }
+
+
+ if (!X509_check_private_key(data->cert, data->key)) {
+ memprintf(err, "%sinconsistencies between private key and certificate loaded '%s'.\n",
+ err && *err ? *err : "", path);
+ goto end;
+ }
+
+#ifdef HAVE_SSL_SCTL
+ /* try to load the sctl file */
+ if (global_ssl.extra_files & SSL_GF_SCTL) {
+ struct stat st;
+
+ if (!chunk_strcat(fp, ".sctl") || b_data(fp) > MAXPATHLEN) {
+ memprintf(err, "%s '%s' filename too long'.\n",
+ err && *err ? *err : "", fp->area);
+ ret = 1;
+ goto end;
+ }
+
+ if (stat(fp->area, &st) == 0) {
+ if (ssl_sock_load_sctl_from_file(fp->area, NULL, data, err)) {
+ memprintf(err, "%s '%s.sctl' is present but cannot be read or parsed'.\n",
+ err && *err ? *err : "", fp->area);
+ ret = 1;
+ goto end;
+ }
+ }
+ /* remove the added extension */
+ *(fp->area + fp->data - strlen(".sctl")) = '\0';
+ b_sub(fp, strlen(".sctl"));
+ }
+#endif
+
+ /* try to load an ocsp response file */
+ if (global_ssl.extra_files & SSL_GF_OCSP) {
+ struct stat st;
+
+ if (!chunk_strcat(fp, ".ocsp") || b_data(fp) > MAXPATHLEN) {
+ memprintf(err, "%s '%s' filename too long'.\n",
+ err && *err ? *err : "", fp->area);
+ ret = 1;
+ goto end;
+ }
+
+ if (stat(fp->area, &st) == 0) {
+ if (ssl_sock_load_ocsp_response_from_file(fp->area, NULL, data, err)) {
+ ret = 1;
+ goto end;
+ }
+ }
+ /* remove the added extension */
+ *(fp->area + fp->data - strlen(".ocsp")) = '\0';
+ b_sub(fp, strlen(".ocsp"));
+ }
+
+#ifndef OPENSSL_IS_BORINGSSL /* Useless for BoringSSL */
+ if (data->ocsp_response && (global_ssl.extra_files & SSL_GF_OCSP_ISSUER)) {
+ /* if no issuer was found, try to load an issuer from the .issuer */
+ if (!data->ocsp_issuer) {
+ struct stat st;
+
+ if (!chunk_strcat(fp, ".issuer") || b_data(fp) > MAXPATHLEN) {
+ memprintf(err, "%s '%s' filename too long'.\n",
+ err && *err ? *err : "", fp->area);
+ ret = 1;
+ goto end;
+ }
+
+ if (stat(fp->area, &st) == 0) {
+ if (ssl_sock_load_issuer_file_into_ckch(fp->area, NULL, data, err)) {
+ ret = 1;
+ goto end;
+ }
+
+ if (X509_check_issued(data->ocsp_issuer, data->cert) != X509_V_OK) {
+ memprintf(err, "%s '%s' is not an issuer'.\n",
+ err && *err ? *err : "", fp->area);
+ ret = 1;
+ goto end;
+ }
+ }
+ /* remove the added extension */
+ *(fp->area + fp->data - strlen(".issuer")) = '\0';
+ b_sub(fp, strlen(".issuer"));
+ }
+ }
+#endif
+
+ ret = 0;
+
+end:
+
+ ERR_clear_error();
+
+ /* Something went wrong in one of the reads */
+ if (ret != 0)
+ ssl_sock_free_cert_key_and_chain_contents(data);
+
+ free_trash_chunk(fp);
+
+ return ret;
+}
+
+/*
+ * Try to load a private key file from a <path> or a buffer <buf>
+ *
+ * If it failed you should not attempt to use the ckch but free it.
+ *
+ * Return 0 on success or != 0 on failure
+ */
+int ssl_sock_load_key_into_ckch(const char *path, char *buf, struct ckch_data *data , char **err)
+{
+ BIO *in = NULL;
+ int ret = 1;
+ EVP_PKEY *key = NULL;
+
+ if (buf) {
+ /* reading from a buffer */
+ in = BIO_new_mem_buf(buf, -1);
+ if (in == NULL) {
+ memprintf(err, "%sCan't allocate memory\n", err && *err ? *err : "");
+ goto end;
+ }
+
+ } else {
+ /* reading from a file */
+ in = BIO_new(BIO_s_file());
+ if (in == NULL)
+ goto end;
+
+ if (BIO_read_filename(in, path) <= 0)
+ goto end;
+ }
+
+ /* Read Private Key */
+ key = PEM_read_bio_PrivateKey(in, NULL, NULL, NULL);
+ if (key == NULL) {
+ memprintf(err, "%sunable to load private key from file '%s'.\n",
+ err && *err ? *err : "", path);
+ goto end;
+ }
+
+ ret = 0;
+
+ SWAP(data->key, key);
+
+end:
+
+ ERR_clear_error();
+ if (in)
+ BIO_free(in);
+ if (key)
+ EVP_PKEY_free(key);
+
+ return ret;
+}
+
+/*
+ * Try to load a PEM file from a <path> or a buffer <buf>
+ * The PEM must contain at least a Certificate,
+ * It could contain a DH, a certificate chain and a PrivateKey.
+ *
+ * If it failed you should not attempt to use the ckch but free it.
+ *
+ * Return 0 on success or != 0 on failure
+ */
+int ssl_sock_load_pem_into_ckch(const char *path, char *buf, struct ckch_data *data , char **err)
+{
+ BIO *in = NULL;
+ int ret = 1;
+ X509 *ca;
+ X509 *cert = NULL;
+ EVP_PKEY *key = NULL;
+ HASSL_DH *dh = NULL;
+ STACK_OF(X509) *chain = NULL;
+
+ if (buf) {
+ /* reading from a buffer */
+ in = BIO_new_mem_buf(buf, -1);
+ if (in == NULL) {
+ memprintf(err, "%sCan't allocate memory\n", err && *err ? *err : "");
+ goto end;
+ }
+
+ } else {
+ /* reading from a file */
+ in = BIO_new(BIO_s_file());
+ if (in == NULL) {
+ memprintf(err, "%sCan't allocate memory\n", err && *err ? *err : "");
+ goto end;
+ }
+
+ if (BIO_read_filename(in, path) <= 0) {
+ memprintf(err, "%scannot open the file '%s'.\n",
+ err && *err ? *err : "", path);
+ goto end;
+ }
+ }
+
+ /* Read Private Key */
+ key = PEM_read_bio_PrivateKey(in, NULL, NULL, NULL);
+ /* no need to check for errors here, because the private key could be loaded later */
+
+#ifndef OPENSSL_NO_DH
+ /* Seek back to beginning of file */
+ if (BIO_reset(in) == -1) {
+ memprintf(err, "%san error occurred while reading the file '%s'.\n",
+ err && *err ? *err : "", path);
+ goto end;
+ }
+
+ dh = ssl_sock_get_dh_from_bio(in);
+ ERR_clear_error();
+ /* no need to return an error there, dh is not mandatory */
+#endif
+
+ /* Seek back to beginning of file */
+ if (BIO_reset(in) == -1) {
+ memprintf(err, "%san error occurred while reading the file '%s'.\n",
+ err && *err ? *err : "", path);
+ goto end;
+ }
+
+ /* Read Certificate */
+ cert = PEM_read_bio_X509_AUX(in, NULL, NULL, NULL);
+ if (cert == NULL) {
+ ret = ERR_get_error();
+ memprintf(err, "%sunable to load certificate from file '%s': %s.\n",
+ err && *err ? *err : "", path, ERR_reason_error_string(ret));
+ goto end;
+ }
+
+ /* Look for a Certificate Chain */
+ while ((ca = PEM_read_bio_X509(in, NULL, NULL, NULL))) {
+ if (chain == NULL)
+ chain = sk_X509_new_null();
+ if (!sk_X509_push(chain, ca)) {
+ X509_free(ca);
+ break;
+ }
+ }
+
+ ret = ERR_get_error();
+ if (ret && !(ERR_GET_LIB(ret) == ERR_LIB_PEM && ERR_GET_REASON(ret) == PEM_R_NO_START_LINE)) {
+ memprintf(err, "%sunable to load certificate chain from file '%s': %s\n",
+ err && *err ? *err : "", path, ERR_reason_error_string(ret));
+ goto end;
+ }
+
+ /* once it loaded the PEM, it should remove everything else in the data */
+ if (data->ocsp_response) {
+ ha_free(&data->ocsp_response->area);
+ ha_free(&data->ocsp_response);
+ }
+
+ if (data->sctl) {
+ ha_free(&data->sctl->area);
+ ha_free(&data->sctl);
+ }
+
+ if (data->ocsp_issuer) {
+ X509_free(data->ocsp_issuer);
+ data->ocsp_issuer = NULL;
+ }
+
+ /* no error, fill data with new context, old context will be free at end: */
+ SWAP(data->key, key);
+ SWAP(data->dh, dh);
+ SWAP(data->cert, cert);
+ SWAP(data->chain, chain);
+
+ ret = 0;
+
+end:
+
+ ERR_clear_error();
+ if (in)
+ BIO_free(in);
+ if (key)
+ EVP_PKEY_free(key);
+ if (dh)
+ HASSL_DH_free(dh);
+ if (cert)
+ X509_free(cert);
+ if (chain)
+ sk_X509_pop_free(chain, X509_free);
+
+ return ret;
+}
+
+/* Frees the contents of a cert_key_and_chain
+ */
+void ssl_sock_free_cert_key_and_chain_contents(struct ckch_data *data)
+{
+ if (!data)
+ return;
+
+ /* Free the certificate and set pointer to NULL */
+ if (data->cert)
+ X509_free(data->cert);
+ data->cert = NULL;
+
+ /* Free the key and set pointer to NULL */
+ if (data->key)
+ EVP_PKEY_free(data->key);
+ data->key = NULL;
+
+ /* Free each certificate in the chain */
+ if (data->chain)
+ sk_X509_pop_free(data->chain, X509_free);
+ data->chain = NULL;
+
+ if (data->dh)
+ HASSL_DH_free(data->dh);
+ data->dh = NULL;
+
+ if (data->sctl) {
+ ha_free(&data->sctl->area);
+ ha_free(&data->sctl);
+ }
+
+ if (data->ocsp_response) {
+ ha_free(&data->ocsp_response->area);
+ ha_free(&data->ocsp_response);
+ }
+
+ if (data->ocsp_issuer)
+ X509_free(data->ocsp_issuer);
+ data->ocsp_issuer = NULL;
+
+
+ /* We need to properly remove the reference to the corresponding
+ * certificate_ocsp structure if it exists (which it should).
+ */
+#if ((defined SSL_CTRL_SET_TLSEXT_STATUS_REQ_CB && !defined OPENSSL_NO_OCSP) && !defined OPENSSL_IS_BORINGSSL)
+ if (data->ocsp_cid) {
+ struct certificate_ocsp *ocsp = NULL;
+ unsigned char certid[OCSP_MAX_CERTID_ASN1_LENGTH] = {};
+ unsigned int certid_length = 0;
+
+ if (ssl_ocsp_build_response_key(data->ocsp_cid, (unsigned char*)certid, &certid_length) >= 0) {
+ HA_SPIN_LOCK(OCSP_LOCK, &ocsp_tree_lock);
+ ocsp = (struct certificate_ocsp *)ebmb_lookup(&cert_ocsp_tree, certid, OCSP_MAX_CERTID_ASN1_LENGTH);
+ HA_SPIN_UNLOCK(OCSP_LOCK, &ocsp_tree_lock);
+ ssl_sock_free_ocsp(ocsp);
+ }
+
+ OCSP_CERTID_free(data->ocsp_cid);
+ data->ocsp_cid = NULL;
+ }
+#endif
+}
+
+/*
+ *
+ * This function copy a cert_key_and_chain in memory
+ *
+ * It's used to try to apply changes on a ckch before committing them, because
+ * most of the time it's not possible to revert those changes
+ *
+ * Return a the dst or NULL
+ */
+struct ckch_data *ssl_sock_copy_cert_key_and_chain(struct ckch_data *src,
+ struct ckch_data *dst)
+{
+ if (!src || !dst)
+ return NULL;
+
+ if (src->cert) {
+ dst->cert = src->cert;
+ X509_up_ref(src->cert);
+ }
+
+ if (src->key) {
+ dst->key = src->key;
+ EVP_PKEY_up_ref(src->key);
+ }
+
+ if (src->chain) {
+ dst->chain = X509_chain_up_ref(src->chain);
+ }
+
+ if (src->dh) {
+#ifndef USE_OPENSSL_WOLFSSL
+ HASSL_DH_up_ref(src->dh);
+ dst->dh = src->dh;
+#else
+ dst->dh = wolfSSL_DH_dup(src->dh);
+ if (!dst->dh)
+ goto error;
+#endif
+ }
+
+ if (src->sctl) {
+ struct buffer *sctl;
+
+ sctl = calloc(1, sizeof(*sctl));
+ if (!chunk_dup(sctl, src->sctl)) {
+ ha_free(&sctl);
+ goto error;
+ }
+ dst->sctl = sctl;
+ }
+
+ if (src->ocsp_response) {
+ struct buffer *ocsp_response;
+
+ ocsp_response = calloc(1, sizeof(*ocsp_response));
+ if (!chunk_dup(ocsp_response, src->ocsp_response)) {
+ ha_free(&ocsp_response);
+ goto error;
+ }
+ dst->ocsp_response = ocsp_response;
+ }
+
+ if (src->ocsp_issuer) {
+ X509_up_ref(src->ocsp_issuer);
+ dst->ocsp_issuer = src->ocsp_issuer;
+ }
+
+ dst->ocsp_cid = OCSP_CERTID_dup(src->ocsp_cid);
+
+ dst->ocsp_update_mode = src->ocsp_update_mode;
+
+ return dst;
+
+error:
+
+ /* free everything */
+ ssl_sock_free_cert_key_and_chain_contents(dst);
+
+ return NULL;
+}
+
+/*
+ * return 0 on success or != 0 on failure
+ */
+int ssl_sock_load_issuer_file_into_ckch(const char *path, char *buf, struct ckch_data *data, char **err)
+{
+ int ret = 1;
+ BIO *in = NULL;
+ X509 *issuer;
+
+ if (buf) {
+ /* reading from a buffer */
+ in = BIO_new_mem_buf(buf, -1);
+ if (in == NULL) {
+ memprintf(err, "%sCan't allocate memory\n", err && *err ? *err : "");
+ goto end;
+ }
+
+ } else {
+ /* reading from a file */
+ in = BIO_new(BIO_s_file());
+ if (in == NULL)
+ goto end;
+
+ if (BIO_read_filename(in, path) <= 0)
+ goto end;
+ }
+
+ issuer = PEM_read_bio_X509_AUX(in, NULL, NULL, NULL);
+ if (!issuer) {
+ memprintf(err, "%s'%s' cannot be read or parsed'.\n",
+ err && *err ? *err : "", path);
+ goto end;
+ }
+ /* no error, fill data with new context, old context must be free */
+ if (data->ocsp_issuer)
+ X509_free(data->ocsp_issuer);
+ data->ocsp_issuer = issuer;
+ ret = 0;
+
+end:
+
+ ERR_clear_error();
+ if (in)
+ BIO_free(in);
+
+ return ret;
+}
+
+/******************** ckch_store functions ***********************************
+ * The ckch_store is a structure used to cache and index the SSL files used in
+ * configuration
+ */
+
+/*
+ * Free a ckch_store, its ckch, its instances and remove it from the ebtree
+ */
+void ckch_store_free(struct ckch_store *store)
+{
+ struct ckch_inst *inst, *inst_s;
+
+ if (!store)
+ return;
+
+ list_for_each_entry_safe(inst, inst_s, &store->ckch_inst, by_ckchs) {
+ ckch_inst_free(inst);
+ }
+ ebmb_delete(&store->node);
+
+ ssl_sock_free_cert_key_and_chain_contents(store->data);
+ ha_free(&store->data);
+
+ free(store);
+}
+
+/*
+ * create and initialize a ckch_store
+ * <path> is the key name
+ * <nmemb> is the number of store->ckch objects to allocate
+ *
+ * Return a ckch_store or NULL upon failure.
+ */
+struct ckch_store *ckch_store_new(const char *filename)
+{
+ struct ckch_store *store;
+ int pathlen;
+
+ pathlen = strlen(filename);
+ store = calloc(1, sizeof(*store) + pathlen + 1);
+ if (!store)
+ return NULL;
+
+ memcpy(store->path, filename, pathlen + 1);
+
+ LIST_INIT(&store->ckch_inst);
+ LIST_INIT(&store->crtlist_entry);
+
+ store->data = calloc(1, sizeof(*store->data));
+ if (!store->data)
+ goto error;
+
+ return store;
+error:
+ ckch_store_free(store);
+ return NULL;
+}
+
+/* allocate and duplicate a ckch_store
+ * Return a new ckch_store or NULL */
+struct ckch_store *ckchs_dup(const struct ckch_store *src)
+{
+ struct ckch_store *dst;
+
+ if (!src)
+ return NULL;
+
+ dst = ckch_store_new(src->path);
+ if (!dst)
+ return NULL;
+
+ if (!ssl_sock_copy_cert_key_and_chain(src->data, dst->data))
+ goto error;
+
+ return dst;
+
+error:
+ ckch_store_free(dst);
+
+ return NULL;
+}
+
+/*
+ * lookup a path into the ckchs tree.
+ */
+struct ckch_store *ckchs_lookup(char *path)
+{
+ struct ebmb_node *eb;
+
+ eb = ebst_lookup(&ckchs_tree, path);
+ if (!eb)
+ return NULL;
+
+ return ebmb_entry(eb, struct ckch_store, node);
+}
+
+/*
+ * This function allocate a ckch_store and populate it with certificates from files.
+ */
+struct ckch_store *ckchs_load_cert_file(char *path, char **err)
+{
+ struct ckch_store *ckchs;
+
+ ckchs = ckch_store_new(path);
+ if (!ckchs) {
+ memprintf(err, "%sunable to allocate memory.\n", err && *err ? *err : "");
+ goto end;
+ }
+
+ if (ssl_sock_load_files_into_ckch(path, ckchs->data, err) == 1)
+ goto end;
+
+ /* insert into the ckchs tree */
+ memcpy(ckchs->path, path, strlen(path) + 1);
+ ebst_insert(&ckchs_tree, &ckchs->node);
+ return ckchs;
+
+end:
+ ckch_store_free(ckchs);
+
+ return NULL;
+}
+
+
+/******************** ckch_inst functions ******************************/
+
+/* unlink a ckch_inst, free all SNIs, free the ckch_inst */
+/* The caller must use the lock of the bind_conf if used with inserted SNIs */
+void ckch_inst_free(struct ckch_inst *inst)
+{
+ struct sni_ctx *sni, *sni_s;
+ struct ckch_inst_link_ref *link_ref, *link_ref_s;
+
+ if (inst == NULL)
+ return;
+
+ list_for_each_entry_safe(sni, sni_s, &inst->sni_ctx, by_ckch_inst) {
+ SSL_CTX_free(sni->ctx);
+ LIST_DELETE(&sni->by_ckch_inst);
+ ebmb_delete(&sni->name);
+ free(sni);
+ }
+ SSL_CTX_free(inst->ctx);
+ inst->ctx = NULL;
+ LIST_DELETE(&inst->by_ckchs);
+ LIST_DELETE(&inst->by_crtlist_entry);
+
+ /* Free the cafile_link_refs list */
+ list_for_each_entry_safe(link_ref, link_ref_s, &inst->cafile_link_refs, list) {
+ if (link_ref->link && LIST_INLIST(&link_ref->link->list)) {
+ /* Try to detach and free the ckch_inst_link only if it
+ * was attached, this way it can be used to loop from
+ * the caller */
+ LIST_DEL_INIT(&link_ref->link->list);
+ ha_free(&link_ref->link);
+ }
+ LIST_DELETE(&link_ref->list);
+ free(link_ref);
+ }
+
+ free(inst);
+}
+
+/* Alloc and init a ckch_inst */
+struct ckch_inst *ckch_inst_new()
+{
+ struct ckch_inst *ckch_inst;
+
+ ckch_inst = calloc(1, sizeof *ckch_inst);
+ if (!ckch_inst)
+ return NULL;
+
+ LIST_INIT(&ckch_inst->sni_ctx);
+ LIST_INIT(&ckch_inst->by_ckchs);
+ LIST_INIT(&ckch_inst->by_crtlist_entry);
+ LIST_INIT(&ckch_inst->cafile_link_refs);
+
+ return ckch_inst;
+}
+
+
+/******************** ssl_store functions ******************************/
+struct eb_root cafile_tree = EB_ROOT;
+
+/*
+ * Returns the cafile_entry found in the cafile_tree indexed by the path 'path'.
+ * If 'oldest_entry' is 1, returns the "original" cafile_entry (since
+ * during a set cafile/commit cafile cycle there might be two entries for any
+ * given path, the original one and the new one set via the CLI but not
+ * committed yet).
+ */
+struct cafile_entry *ssl_store_get_cafile_entry(char *path, int oldest_entry)
+{
+ struct cafile_entry *ca_e = NULL;
+ struct ebmb_node *eb;
+
+ eb = ebst_lookup(&cafile_tree, path);
+ while (eb) {
+ ca_e = ebmb_entry(eb, struct cafile_entry, node);
+ /* The ebst_lookup in a tree that has duplicates returns the
+ * oldest entry first. If we want the latest entry, we need to
+ * iterate over all the duplicates until we find the last one
+ * (in our case there should never be more than two entries for
+ * any given path). */
+ if (oldest_entry)
+ return ca_e;
+ eb = ebmb_next_dup(eb);
+ }
+ return ca_e;
+}
+
+int ssl_store_add_uncommitted_cafile_entry(struct cafile_entry *entry)
+{
+ return (ebst_insert(&cafile_tree, &entry->node) != &entry->node);
+}
+
+X509_STORE* ssl_store_get0_locations_file(char *path)
+{
+ struct cafile_entry *ca_e = ssl_store_get_cafile_entry(path, 0);
+
+ if (ca_e)
+ return ca_e->ca_store;
+
+ return NULL;
+}
+
+/* Create a cafile_entry object, without adding it to the cafile_tree. */
+struct cafile_entry *ssl_store_create_cafile_entry(char *path, X509_STORE *store, enum cafile_type type)
+{
+ struct cafile_entry *ca_e;
+ int pathlen;
+
+ pathlen = strlen(path);
+
+ ca_e = calloc(1, sizeof(*ca_e) + pathlen + 1);
+ if (ca_e) {
+ memcpy(ca_e->path, path, pathlen + 1);
+ ca_e->ca_store = store;
+ ca_e->type = type;
+ LIST_INIT(&ca_e->ckch_inst_link);
+ }
+ return ca_e;
+}
+
+
+/* Duplicate a cafile_entry
+ * Allocate the X509_STORE and copy the X509 and CRL inside.
+ *
+ * Return the newly allocated cafile_entry or NULL.
+ *
+ */
+struct cafile_entry *ssl_store_dup_cafile_entry(struct cafile_entry *src)
+{
+ struct cafile_entry *dst = NULL;
+ X509_STORE *store = NULL;
+ STACK_OF(X509_OBJECT) *objs;
+ int i;
+
+ if (!src)
+ return NULL;
+
+ if (src->ca_store) {
+ /* if there was a store in the src, copy it */
+ store = X509_STORE_new();
+ if (!store)
+ goto err;
+
+ objs = X509_STORE_get0_objects(src->ca_store);
+ for (i = 0; i < sk_X509_OBJECT_num(objs); i++) {
+ X509 *cert;
+ X509_CRL *crl;
+
+ cert = X509_OBJECT_get0_X509(sk_X509_OBJECT_value(objs, i));
+ if (cert) {
+ if (X509_STORE_add_cert(store, cert) == 0) {
+ /* only exits on error if the error is not about duplicate certificates */
+ if (!(ERR_GET_REASON(ERR_get_error()) == X509_R_CERT_ALREADY_IN_HASH_TABLE)) {
+ goto err;
+ }
+ }
+
+ }
+ crl = X509_OBJECT_get0_X509_CRL(sk_X509_OBJECT_value(objs, i));
+ if (crl) {
+ if (X509_STORE_add_crl(store, crl) == 0) {
+ /* only exits on error if the error is not about duplicate certificates */
+ if (!(ERR_GET_REASON(ERR_get_error()) == X509_R_CERT_ALREADY_IN_HASH_TABLE)) {
+ goto err;
+ }
+ }
+
+ }
+ }
+ }
+ dst = ssl_store_create_cafile_entry(src->path, store, src->type);
+
+ return dst;
+
+err:
+ X509_STORE_free(store);
+ ha_free(&dst);
+
+ return NULL;
+}
+
+/* Delete a cafile_entry. The caller is responsible from removing this entry
+ * from the cafile_tree first if is was previously added into it. */
+void ssl_store_delete_cafile_entry(struct cafile_entry *ca_e)
+{
+ struct ckch_inst_link *link, *link_s;
+ if (!ca_e)
+ return;
+
+ X509_STORE_free(ca_e->ca_store);
+
+ list_for_each_entry_safe(link, link_s, &ca_e->ckch_inst_link, list) {
+ struct ckch_inst *inst = link->ckch_inst;
+ struct ckch_inst_link_ref *link_ref, *link_ref_s;
+ list_for_each_entry_safe(link_ref, link_ref_s, &inst->cafile_link_refs, list) {
+ if (link_ref->link == link) {
+ LIST_DELETE(&link_ref->list);
+ free(link_ref);
+ break;
+ }
+ }
+ LIST_DELETE(&link->list);
+ free(link);
+ }
+
+ free(ca_e);
+}
+
+/*
+ * Fill a cafile_entry <ca_e> X509_STORE ca_e->store out of a buffer <cert_buf>
+ * instead of out of a file. The <append> field should be set to 1 if you want
+ * to keep the existing X509_STORE and append data to it.
+ *
+ * This function is used when the "set ssl ca-file" cli command is used.
+ * It can parse CERTIFICATE sections as well as CRL ones.
+ * Returns 0 in case of success, 1 otherwise.
+ *
+ * /!\ Warning: If there was an error the X509_STORE could have been modified so it's
+ * better to not use it after a return 1.
+ */
+int ssl_store_load_ca_from_buf(struct cafile_entry *ca_e, char *cert_buf, int append)
+{
+ BIO *bio = NULL;
+ STACK_OF(X509_INFO) *infos;
+ X509_INFO *info;
+ int i;
+ int retval = 1;
+ int retcert = 0;
+
+ if (!ca_e)
+ return 1;
+
+ if (!append) {
+ X509_STORE_free(ca_e->ca_store);
+ ca_e->ca_store = NULL;
+ }
+
+ if (!ca_e->ca_store)
+ ca_e->ca_store = X509_STORE_new();
+
+ if (!ca_e->ca_store)
+ goto end;
+
+ bio = BIO_new_mem_buf(cert_buf, strlen(cert_buf));
+ if (!bio)
+ goto end;
+
+ infos = PEM_X509_INFO_read_bio(bio, NULL, NULL, NULL);
+ if (!infos)
+ goto end;
+
+ for (i = 0; i < sk_X509_INFO_num(infos) && !retcert; i++) {
+ info = sk_X509_INFO_value(infos, i);
+
+ /* X509_STORE_add_cert and X509_STORE_add_crl return 1 on success */
+ if (info->x509)
+ retcert = !X509_STORE_add_cert(ca_e->ca_store, info->x509);
+ if (!retcert && info->crl)
+ retcert = !X509_STORE_add_crl(ca_e->ca_store, info->crl);
+ }
+
+ /* return an error if we didn't compute all the X509_INFO or if there was none
+ * set to 0 if everything was right */
+ if (!(retcert || (i != sk_X509_INFO_num(infos)) || (sk_X509_INFO_num(infos) == 0)))
+ retval = 0;
+
+ /* Cleanup */
+ sk_X509_INFO_pop_free(infos, X509_INFO_free);
+
+end:
+ BIO_free(bio);
+
+ return retval;
+}
+
+/*
+ * Try to load a ca-file from disk into the ca-file cache.
+ * <shuterror> allows you to to stop emitting the errors.
+ * Return 0 upon error
+ */
+int __ssl_store_load_locations_file(char *path, int create_if_none, enum cafile_type type, int shuterror)
+{
+ X509_STORE *store = ssl_store_get0_locations_file(path);
+
+ /* If this function is called by the CLI, we should not call the
+ * X509_STORE_load_locations function because it performs forbidden disk
+ * accesses. */
+ if (!store && create_if_none) {
+ STACK_OF(X509_OBJECT) *objs;
+ int cert_count = 0;
+ struct stat buf;
+ struct cafile_entry *ca_e;
+ const char *file = NULL;
+ const char *dir = NULL;
+ unsigned long e;
+
+ store = X509_STORE_new();
+ if (!store) {
+ if (!shuterror)
+ ha_alert("Cannot allocate memory!\n");
+ goto err;
+ }
+
+ if (strcmp(path, "@system-ca") == 0) {
+ dir = X509_get_default_cert_dir();
+ if (!dir) {
+ if (!shuterror)
+ ha_alert("Couldn't get the system CA directory from X509_get_default_cert_dir().\n");
+ goto err;
+ }
+
+ } else {
+
+ if (stat(path, &buf) == -1) {
+ if (!shuterror)
+ ha_alert("Couldn't open the ca-file '%s' (%s).\n", path, strerror(errno));
+ goto err;
+ }
+
+ if (S_ISDIR(buf.st_mode))
+ dir = path;
+ else
+ file = path;
+ }
+
+ if (file) {
+ if (!X509_STORE_load_locations(store, file, NULL)) {
+ e = ERR_get_error();
+ if (!shuterror)
+ ha_alert("Couldn't open the ca-file '%s' (%s).\n", path, ERR_reason_error_string(e));
+ goto err;
+ }
+ } else if (dir) {
+ int n, i;
+ struct dirent **de_list;
+
+ n = scandir(dir, &de_list, 0, alphasort);
+ if (n < 0)
+ goto err;
+
+ for (i= 0; i < n; i++) {
+ char *end;
+ struct dirent *de = de_list[i];
+ BIO *in = NULL;
+ X509 *ca = NULL;;
+
+ ERR_clear_error();
+
+ /* we try to load the files that would have
+ * been loaded in an hashed directory loaded by
+ * X509_LOOKUP_hash_dir, so according to "man 1
+ * c_rehash", we should load ".pem", ".crt",
+ * ".cer", or ".crl". Files starting with a dot
+ * are ignored.
+ */
+ end = strrchr(de->d_name, '.');
+ if (!end || de->d_name[0] == '.' ||
+ (strcmp(end, ".pem") != 0 &&
+ strcmp(end, ".crt") != 0 &&
+ strcmp(end, ".cer") != 0 &&
+ strcmp(end, ".crl") != 0)) {
+ free(de);
+ continue;
+ }
+ in = BIO_new(BIO_s_file());
+ if (in == NULL)
+ goto scandir_err;
+
+ chunk_printf(&trash, "%s/%s", dir, de->d_name);
+
+ if (BIO_read_filename(in, trash.area) == 0)
+ goto scandir_err;
+
+ if (PEM_read_bio_X509_AUX(in, &ca, NULL, NULL) == NULL)
+ goto scandir_err;
+
+ if (X509_STORE_add_cert(store, ca) == 0) {
+ /* only exits on error if the error is not about duplicate certificates */
+ if (!(ERR_GET_REASON(ERR_get_error()) == X509_R_CERT_ALREADY_IN_HASH_TABLE)) {
+ goto scandir_err;
+ }
+ }
+
+ X509_free(ca);
+ BIO_free(in);
+ free(de);
+ continue;
+
+scandir_err:
+ e = ERR_get_error();
+ X509_free(ca);
+ BIO_free(in);
+ free(de);
+ /* warn if it can load one of the files, but don't abort */
+ if (!shuterror)
+ ha_warning("ca-file: '%s' couldn't load '%s' (%s)\n", path, trash.area, ERR_reason_error_string(e));
+
+ }
+ free(de_list);
+ } else {
+ if (!shuterror)
+ ha_alert("ca-file: couldn't load '%s'\n", path);
+ goto err;
+ }
+
+ objs = X509_STORE_get0_objects(store);
+ cert_count = sk_X509_OBJECT_num(objs);
+ if (cert_count == 0) {
+ if (!shuterror)
+ ha_warning("ca-file: 0 CA were loaded from '%s'\n", path);
+ }
+ ca_e = ssl_store_create_cafile_entry(path, store, type);
+ if (!ca_e) {
+ if (!shuterror)
+ ha_alert("Cannot allocate memory!\n");
+ goto err;
+ }
+ ebst_insert(&cafile_tree, &ca_e->node);
+ }
+ return (store != NULL);
+
+err:
+ X509_STORE_free(store);
+ store = NULL;
+ return 0;
+
+}
+
+int ssl_store_load_locations_file(char *path, int create_if_none, enum cafile_type type)
+{
+ return __ssl_store_load_locations_file(path, create_if_none, type, 0);
+}
+
+/*************************** CLI commands ***********************/
+
+/* Type of SSL payloads that can be updated over the CLI */
+
+struct cert_exts cert_exts[] = {
+ { "", CERT_TYPE_PEM, &ssl_sock_load_pem_into_ckch }, /* default mode, no extensions */
+ { "key", CERT_TYPE_KEY, &ssl_sock_load_key_into_ckch },
+#if ((defined SSL_CTRL_SET_TLSEXT_STATUS_REQ_CB && !defined OPENSSL_NO_OCSP) || defined OPENSSL_IS_BORINGSSL)
+ { "ocsp", CERT_TYPE_OCSP, &ssl_sock_load_ocsp_response_from_file },
+#endif
+#ifdef HAVE_SSL_SCTL
+ { "sctl", CERT_TYPE_SCTL, &ssl_sock_load_sctl_from_file },
+#endif
+ { "issuer", CERT_TYPE_ISSUER, &ssl_sock_load_issuer_file_into_ckch },
+ { NULL, CERT_TYPE_MAX, NULL },
+};
+
+
+/* release function of the `show ssl cert' command */
+static void cli_release_show_cert(struct appctx *appctx)
+{
+ HA_SPIN_UNLOCK(CKCH_LOCK, &ckch_lock);
+}
+
+/* IO handler of "show ssl cert <filename>".
+ * It makes use of a show_cert_ctx context, and ckchs_transaction in read-only.
+ */
+static int cli_io_handler_show_cert(struct appctx *appctx)
+{
+ struct show_cert_ctx *ctx = appctx->svcctx;
+ struct buffer *trash = alloc_trash_chunk();
+ struct ebmb_node *node;
+ struct ckch_store *ckchs = NULL;
+
+ if (trash == NULL)
+ return 1;
+
+ if (!ctx->old_ckchs && ckchs_transaction.old_ckchs) {
+ ckchs = ckchs_transaction.old_ckchs;
+ chunk_appendf(trash, "# transaction\n");
+ chunk_appendf(trash, "*%s\n", ckchs->path);
+ if (applet_putchk(appctx, trash) == -1)
+ goto yield;
+ ctx->old_ckchs = ckchs_transaction.old_ckchs;
+ }
+
+ if (!ctx->cur_ckchs) {
+ chunk_appendf(trash, "# filename\n");
+ node = ebmb_first(&ckchs_tree);
+ } else {
+ node = &ctx->cur_ckchs->node;
+ }
+ while (node) {
+ ckchs = ebmb_entry(node, struct ckch_store, node);
+ chunk_appendf(trash, "%s\n", ckchs->path);
+
+ node = ebmb_next(node);
+ if (applet_putchk(appctx, trash) == -1)
+ goto yield;
+ }
+
+ ctx->cur_ckchs = NULL;
+ free_trash_chunk(trash);
+ return 1;
+yield:
+
+ free_trash_chunk(trash);
+ ctx->cur_ckchs = ckchs;
+ return 0; /* should come back */
+}
+
+/*
+ * Extract and format the DNS SAN extensions and copy result into a chuink
+ * Return 0;
+ */
+#ifdef SSL_CTRL_SET_TLSEXT_HOSTNAME
+static int ssl_sock_get_san_oneline(X509 *cert, struct buffer *out)
+{
+ int i;
+ char *str;
+ STACK_OF(GENERAL_NAME) *names = NULL;
+
+ names = X509_get_ext_d2i(cert, NID_subject_alt_name, NULL, NULL);
+ if (names) {
+ for (i = 0; i < sk_GENERAL_NAME_num(names); i++) {
+ GENERAL_NAME *name = sk_GENERAL_NAME_value(names, i);
+ if (i > 0)
+ chunk_appendf(out, ", ");
+ if (name->type == GEN_DNS) {
+ if (ASN1_STRING_to_UTF8((unsigned char **)&str, name->d.dNSName) >= 0) {
+ chunk_appendf(out, "DNS:%s", str);
+ OPENSSL_free(str);
+ }
+ }
+ }
+ sk_GENERAL_NAME_pop_free(names, GENERAL_NAME_free);
+ }
+ return 0;
+}
+#endif
+
+/*
+ * Build the ckch_inst_link that will be chained in the CA file entry and the
+ * corresponding ckch_inst_link_ref that will be chained in the ckch instance.
+ * Return 0 in case of success.
+ */
+static int do_chain_inst_and_cafile(struct cafile_entry *cafile_entry, struct ckch_inst *ckch_inst)
+{
+ struct ckch_inst_link *new_link;
+ if (!LIST_ISEMPTY(&cafile_entry->ckch_inst_link)) {
+ struct ckch_inst_link *link = LIST_ELEM(cafile_entry->ckch_inst_link.n,
+ typeof(link), list);
+ /* Do not add multiple references to the same
+ * instance in a cafile_entry */
+ if (link->ckch_inst == ckch_inst) {
+ return 1;
+ }
+ }
+
+ new_link = calloc(1, sizeof(*new_link));
+ if (new_link) {
+ struct ckch_inst_link_ref *new_link_ref = calloc(1, sizeof(*new_link_ref));
+ if (!new_link_ref) {
+ free(new_link);
+ return 1;
+ }
+
+ new_link->ckch_inst = ckch_inst;
+ new_link_ref->link = new_link;
+ LIST_INIT(&new_link->list);
+ LIST_INIT(&new_link_ref->list);
+
+ LIST_APPEND(&cafile_entry->ckch_inst_link, &new_link->list);
+ LIST_APPEND(&ckch_inst->cafile_link_refs, &new_link_ref->list);
+ }
+
+ return 0;
+}
+
+
+/*
+ * Link a CA file tree entry to the ckch instance that uses it.
+ * To determine if and which CA file tree entries need to be linked to the
+ * instance, we follow the same logic performed in ssl_sock_prepare_ctx when
+ * processing the verify option.
+ * This function works for a frontend as well as for a backend, depending on the
+ * configuration parameters given (bind_conf or server).
+ */
+void ckch_inst_add_cafile_link(struct ckch_inst *ckch_inst, struct bind_conf *bind_conf,
+ struct ssl_bind_conf *ssl_conf, const struct server *srv)
+{
+ int verify = SSL_VERIFY_NONE;
+
+ if (srv) {
+
+ if (global.ssl_server_verify == SSL_SERVER_VERIFY_REQUIRED)
+ verify = SSL_VERIFY_PEER;
+ switch (srv->ssl_ctx.verify) {
+ case SSL_SOCK_VERIFY_NONE:
+ verify = SSL_VERIFY_NONE;
+ break;
+ case SSL_SOCK_VERIFY_REQUIRED:
+ verify = SSL_VERIFY_PEER;
+ break;
+ }
+ }
+ else {
+ switch ((ssl_conf && ssl_conf->verify) ? ssl_conf->verify : bind_conf->ssl_conf.verify) {
+ case SSL_SOCK_VERIFY_NONE:
+ verify = SSL_VERIFY_NONE;
+ break;
+ case SSL_SOCK_VERIFY_OPTIONAL:
+ verify = SSL_VERIFY_PEER;
+ break;
+ case SSL_SOCK_VERIFY_REQUIRED:
+ verify = SSL_VERIFY_PEER|SSL_VERIFY_FAIL_IF_NO_PEER_CERT;
+ break;
+ }
+ }
+
+ if (verify & SSL_VERIFY_PEER) {
+ struct cafile_entry *ca_file_entry = NULL;
+ struct cafile_entry *ca_verify_file_entry = NULL;
+ struct cafile_entry *crl_file_entry = NULL;
+ if (srv) {
+ if (srv->ssl_ctx.ca_file) {
+ ca_file_entry = ssl_store_get_cafile_entry(srv->ssl_ctx.ca_file, 0);
+
+ }
+ if (srv->ssl_ctx.crl_file) {
+ crl_file_entry = ssl_store_get_cafile_entry(srv->ssl_ctx.crl_file, 0);
+ }
+ }
+ else {
+ char *ca_file = (ssl_conf && ssl_conf->ca_file) ? ssl_conf->ca_file : bind_conf->ssl_conf.ca_file;
+ char *ca_verify_file = (ssl_conf && ssl_conf->ca_verify_file) ? ssl_conf->ca_verify_file : bind_conf->ssl_conf.ca_verify_file;
+ char *crl_file = (ssl_conf && ssl_conf->crl_file) ? ssl_conf->crl_file : bind_conf->ssl_conf.crl_file;
+
+ if (ca_file)
+ ca_file_entry = ssl_store_get_cafile_entry(ca_file, 0);
+ if (ca_verify_file)
+ ca_verify_file_entry = ssl_store_get_cafile_entry(ca_verify_file, 0);
+ if (crl_file)
+ crl_file_entry = ssl_store_get_cafile_entry(crl_file, 0);
+ }
+
+ if (ca_file_entry) {
+ /* If we have a ckch instance that is not already in the
+ * cafile_entry's list, add it to it. */
+ if (do_chain_inst_and_cafile(ca_file_entry, ckch_inst))
+ return;
+
+ }
+ if (ca_verify_file_entry && (ca_file_entry != ca_verify_file_entry)) {
+ /* If we have a ckch instance that is not already in the
+ * cafile_entry's list, add it to it. */
+ if (do_chain_inst_and_cafile(ca_verify_file_entry, ckch_inst))
+ return;
+ }
+ if (crl_file_entry) {
+ /* If we have a ckch instance that is not already in the
+ * cafile_entry's list, add it to it. */
+ if (do_chain_inst_and_cafile(crl_file_entry, ckch_inst))
+ return;
+ }
+ }
+}
+
+
+
+static int show_cert_detail(X509 *cert, STACK_OF(X509) *chain, struct buffer *out)
+{
+ BIO *bio = NULL;
+ struct buffer *tmp = alloc_trash_chunk();
+ int i;
+ int write = -1;
+ unsigned int len = 0;
+ X509_NAME *name = NULL;
+
+ if (!tmp)
+ return -1;
+
+ if (!cert)
+ goto end;
+
+ if (chain == NULL) {
+ struct issuer_chain *issuer;
+ issuer = ssl_get0_issuer_chain(cert);
+ if (issuer) {
+ chain = issuer->chain;
+ chunk_appendf(out, "Chain Filename: ");
+ chunk_appendf(out, "%s\n", issuer->path);
+ }
+ }
+ chunk_appendf(out, "Serial: ");
+ if (ssl_sock_get_serial(cert, tmp) == -1)
+ goto end;
+ dump_binary(out, tmp->area, tmp->data);
+ chunk_appendf(out, "\n");
+
+ chunk_appendf(out, "notBefore: ");
+ chunk_reset(tmp);
+ if ((bio = BIO_new(BIO_s_mem())) == NULL)
+ goto end;
+ if (ASN1_TIME_print(bio, X509_getm_notBefore(cert)) == 0)
+ goto end;
+ write = BIO_read(bio, tmp->area, tmp->size-1);
+ tmp->area[write] = '\0';
+ BIO_free(bio);
+ bio = NULL;
+ chunk_appendf(out, "%s\n", tmp->area);
+
+ chunk_appendf(out, "notAfter: ");
+ chunk_reset(tmp);
+ if ((bio = BIO_new(BIO_s_mem())) == NULL)
+ goto end;
+ if (ASN1_TIME_print(bio, X509_getm_notAfter(cert)) == 0)
+ goto end;
+ if ((write = BIO_read(bio, tmp->area, tmp->size-1)) <= 0)
+ goto end;
+ tmp->area[write] = '\0';
+ BIO_free(bio);
+ bio = NULL;
+ chunk_appendf(out, "%s\n", tmp->area);
+
+#ifdef SSL_CTRL_SET_TLSEXT_HOSTNAME
+ chunk_appendf(out, "Subject Alternative Name: ");
+ if (ssl_sock_get_san_oneline(cert, out) == -1)
+ goto end;
+ *(out->area + out->data) = '\0';
+ chunk_appendf(out, "\n");
+#endif
+ chunk_reset(tmp);
+ chunk_appendf(out, "Algorithm: ");
+ if (cert_get_pkey_algo(cert, tmp) == 0)
+ goto end;
+ chunk_appendf(out, "%s\n", tmp->area);
+
+ chunk_reset(tmp);
+ chunk_appendf(out, "SHA1 FingerPrint: ");
+ if (X509_digest(cert, EVP_sha1(), (unsigned char *) tmp->area, &len) == 0)
+ goto end;
+ tmp->data = len;
+ dump_binary(out, tmp->area, tmp->data);
+ chunk_appendf(out, "\n");
+
+ chunk_appendf(out, "Subject: ");
+ if ((name = X509_get_subject_name(cert)) == NULL)
+ goto end;
+ if ((ssl_sock_get_dn_oneline(name, tmp)) == -1)
+ goto end;
+ *(tmp->area + tmp->data) = '\0';
+ chunk_appendf(out, "%s\n", tmp->area);
+
+ chunk_appendf(out, "Issuer: ");
+ if ((name = X509_get_issuer_name(cert)) == NULL)
+ goto end;
+ if ((ssl_sock_get_dn_oneline(name, tmp)) == -1)
+ goto end;
+ *(tmp->area + tmp->data) = '\0';
+ chunk_appendf(out, "%s\n", tmp->area);
+
+ /* Displays subject of each certificate in the chain */
+ for (i = 0; i < sk_X509_num(chain); i++) {
+ X509 *ca = sk_X509_value(chain, i);
+
+ chunk_appendf(out, "Chain Subject: ");
+ if ((name = X509_get_subject_name(ca)) == NULL)
+ goto end;
+ if ((ssl_sock_get_dn_oneline(name, tmp)) == -1)
+ goto end;
+ *(tmp->area + tmp->data) = '\0';
+ chunk_appendf(out, "%s\n", tmp->area);
+
+ chunk_appendf(out, "Chain Issuer: ");
+ if ((name = X509_get_issuer_name(ca)) == NULL)
+ goto end;
+ if ((ssl_sock_get_dn_oneline(name, tmp)) == -1)
+ goto end;
+ *(tmp->area + tmp->data) = '\0';
+ chunk_appendf(out, "%s\n", tmp->area);
+ }
+
+end:
+ if (bio)
+ BIO_free(bio);
+ free_trash_chunk(tmp);
+
+ return 0;
+}
+
+/*
+ * Dump the OCSP certificate key (if it exists) of certificate <ckch> into
+ * buffer <out>.
+ * Returns 0 in case of success.
+ */
+static int ckch_store_show_ocsp_certid(struct ckch_store *ckch_store, struct buffer *out)
+{
+#if ((defined SSL_CTRL_SET_TLSEXT_STATUS_REQ_CB && !defined OPENSSL_NO_OCSP) && !defined OPENSSL_IS_BORINGSSL)
+ unsigned char key[OCSP_MAX_CERTID_ASN1_LENGTH] = {};
+ unsigned int key_length = 0;
+ int i;
+
+ if (ssl_ocsp_build_response_key(ckch_store->data->ocsp_cid, (unsigned char*)key, &key_length) >= 0) {
+ /* Dump the CERTID info */
+ chunk_appendf(out, "OCSP Response Key: ");
+ for (i = 0; i < key_length; ++i) {
+ chunk_appendf(out, "%02x", key[i]);
+ }
+ chunk_appendf(out, "\n");
+ }
+#endif
+
+ return 0;
+}
+
+
+/* IO handler of the details "show ssl cert <filename>".
+ * It uses a struct show_cert_ctx and ckchs_transaction in read-only.
+ */
+static int cli_io_handler_show_cert_detail(struct appctx *appctx)
+{
+ struct show_cert_ctx *ctx = appctx->svcctx;
+ struct ckch_store *ckchs = ctx->cur_ckchs;
+ struct buffer *out = alloc_trash_chunk();
+ int retval = 0;
+
+ if (!out)
+ goto end_no_putchk;
+
+ chunk_appendf(out, "Filename: ");
+ if (ckchs == ckchs_transaction.new_ckchs)
+ chunk_appendf(out, "*");
+ chunk_appendf(out, "%s\n", ckchs->path);
+
+ chunk_appendf(out, "Status: ");
+ if (ckchs->data->cert == NULL)
+ chunk_appendf(out, "Empty\n");
+ else if (LIST_ISEMPTY(&ckchs->ckch_inst))
+ chunk_appendf(out, "Unused\n");
+ else
+ chunk_appendf(out, "Used\n");
+
+ retval = show_cert_detail(ckchs->data->cert, ckchs->data->chain, out);
+ if (retval < 0)
+ goto end_no_putchk;
+ else if (retval)
+ goto end;
+
+ ckch_store_show_ocsp_certid(ckchs, out);
+
+end:
+ if (applet_putchk(appctx, out) == -1)
+ goto yield;
+
+end_no_putchk:
+ free_trash_chunk(out);
+ return 1;
+yield:
+ free_trash_chunk(out);
+ return 0; /* should come back */
+}
+
+
+/* IO handler of the details "show ssl cert <filename.ocsp>".
+ * It uses a show_cert_ctx.
+ */
+static int cli_io_handler_show_cert_ocsp_detail(struct appctx *appctx)
+{
+#if ((defined SSL_CTRL_SET_TLSEXT_STATUS_REQ_CB && !defined OPENSSL_NO_OCSP) && !defined OPENSSL_IS_BORINGSSL)
+ struct show_cert_ctx *ctx = appctx->svcctx;
+ struct ckch_store *ckchs = ctx->cur_ckchs;
+ struct buffer *out = alloc_trash_chunk();
+ int from_transaction = ctx->transaction;
+
+ if (!out)
+ goto end_no_putchk;
+
+ /* If we try to display an ongoing transaction's OCSP response, we
+ * need to dump the ckch's ocsp_response buffer directly.
+ * Otherwise, we must rebuild the certificate's certid in order to
+ * look for the current OCSP response in the tree. */
+ if (from_transaction && ckchs->data->ocsp_response) {
+ if (ssl_ocsp_response_print(ckchs->data->ocsp_response, out))
+ goto end_no_putchk;
+ }
+ else {
+ unsigned char key[OCSP_MAX_CERTID_ASN1_LENGTH] = {};
+ unsigned int key_length = 0;
+
+ if (ssl_ocsp_build_response_key(ckchs->data->ocsp_cid, (unsigned char*)key, &key_length) < 0)
+ goto end_no_putchk;
+
+ if (ssl_get_ocspresponse_detail(key, out))
+ goto end_no_putchk;
+ }
+
+ if (applet_putchk(appctx, out) == -1)
+ goto yield;
+
+end_no_putchk:
+ free_trash_chunk(out);
+ return 1;
+yield:
+ free_trash_chunk(out);
+ return 0; /* should come back */
+#else
+ return cli_err(appctx, "HAProxy was compiled against a version of OpenSSL that doesn't support OCSP stapling.\n");
+#endif
+}
+
+/* parsing function for 'show ssl cert [certfile]' */
+static int cli_parse_show_cert(char **args, char *payload, struct appctx *appctx, void *private)
+{
+ struct show_cert_ctx *ctx = applet_reserve_svcctx(appctx, sizeof(*ctx));
+ struct ckch_store *ckchs;
+
+ if (!cli_has_level(appctx, ACCESS_LVL_OPER))
+ return cli_err(appctx, "Can't allocate memory!\n");
+
+ /* The operations on the CKCH architecture are locked so we can
+ * manipulate ckch_store and ckch_inst */
+ if (HA_SPIN_TRYLOCK(CKCH_LOCK, &ckch_lock))
+ return cli_err(appctx, "Can't show!\nOperations on certificates are currently locked!\n");
+
+ /* check if there is a certificate to lookup */
+ if (*args[3]) {
+ int show_ocsp_detail = 0;
+ int from_transaction = 0;
+ char *end;
+
+ /* We manage the special case "certname.ocsp" through which we
+ * can show the details of an OCSP response. */
+ end = strrchr(args[3], '.');
+ if (end && strcmp(end+1, "ocsp") == 0) {
+ *end = '\0';
+ show_ocsp_detail = 1;
+ }
+
+ if (*args[3] == '*') {
+ from_transaction = 1;
+ if (!ckchs_transaction.new_ckchs)
+ goto error;
+
+ ckchs = ckchs_transaction.new_ckchs;
+
+ if (strcmp(args[3] + 1, ckchs->path) != 0)
+ goto error;
+
+ } else {
+ if ((ckchs = ckchs_lookup(args[3])) == NULL)
+ goto error;
+
+ }
+
+ ctx->cur_ckchs = ckchs;
+ /* use the IO handler that shows details */
+ if (show_ocsp_detail) {
+ ctx->transaction = from_transaction;
+ appctx->io_handler = cli_io_handler_show_cert_ocsp_detail;
+ }
+ else
+ appctx->io_handler = cli_io_handler_show_cert_detail;
+ }
+
+ return 0;
+
+error:
+ HA_SPIN_UNLOCK(CKCH_LOCK, &ckch_lock);
+ return cli_err(appctx, "Can't display the certificate: Not found or the certificate is a bundle!\n");
+}
+
+/* release function of the `set ssl cert' command, free things and unlock the spinlock */
+static void cli_release_commit_cert(struct appctx *appctx)
+{
+ struct commit_cert_ctx *ctx = appctx->svcctx;
+
+ HA_SPIN_UNLOCK(CKCH_LOCK, &ckch_lock);
+ /* free every new sni_ctx and the new store, which are not in the trees so no spinlock there */
+ if (ctx->new_ckchs)
+ ckch_store_free(ctx->new_ckchs);
+ ha_free(&ctx->err);
+}
+
+
+/*
+ * Rebuild a new instance 'new_inst' based on an old instance 'ckchi' and a
+ * specific ckch_store.
+ * Returns 0 in case of success, 1 otherwise.
+ */
+int ckch_inst_rebuild(struct ckch_store *ckch_store, struct ckch_inst *ckchi,
+ struct ckch_inst **new_inst, char **err)
+{
+ int retval = 0;
+ int errcode = 0;
+ struct sni_ctx *sc0, *sc0s;
+ char **sni_filter = NULL;
+ int fcount = 0;
+
+ if (ckchi->crtlist_entry) {
+ sni_filter = ckchi->crtlist_entry->filters;
+ fcount = ckchi->crtlist_entry->fcount;
+ }
+
+ if (ckchi->is_server_instance)
+ errcode |= ckch_inst_new_load_srv_store(ckch_store->path, ckch_store, new_inst, err);
+ else
+ errcode |= ckch_inst_new_load_store(ckch_store->path, ckch_store, ckchi->bind_conf, ckchi->ssl_conf, sni_filter, fcount, new_inst, err);
+
+ if (errcode & ERR_CODE)
+ return 1;
+
+ /* if the previous ckchi was used as the default */
+ if (ckchi->is_default)
+ (*new_inst)->is_default = 1;
+
+ (*new_inst)->is_server_instance = ckchi->is_server_instance;
+ (*new_inst)->server = ckchi->server;
+ /* Create a new SSL_CTX and link it to the new instance. */
+ if ((*new_inst)->is_server_instance) {
+ retval = ssl_sock_prep_srv_ctx_and_inst(ckchi->server, (*new_inst)->ctx, (*new_inst));
+ if (retval)
+ return 1;
+ }
+
+ /* create the link to the crtlist_entry */
+ (*new_inst)->crtlist_entry = ckchi->crtlist_entry;
+
+ /* we need to initialize the SSL_CTX generated */
+ /* this iterate on the newly generated SNIs in the new instance to prepare their SSL_CTX */
+ list_for_each_entry_safe(sc0, sc0s, &(*new_inst)->sni_ctx, by_ckch_inst) {
+ if (!sc0->order) { /* we initialized only the first SSL_CTX because it's the same in the other sni_ctx's */
+ errcode |= ssl_sock_prep_ctx_and_inst(ckchi->bind_conf, ckchi->ssl_conf, sc0->ctx, *new_inst, err);
+ if (errcode & ERR_CODE)
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * Load all the new SNIs of a newly built ckch instance in the trees, or replace
+ * a server's main ckch instance.
+ */
+static void __ssl_sock_load_new_ckch_instance(struct ckch_inst *ckchi)
+{
+ /* The bind_conf will be null on server ckch_instances. */
+ if (ckchi->is_server_instance) {
+ int i;
+ /* a lock is needed here since we have to free the SSL cache */
+ HA_RWLOCK_WRLOCK(SSL_SERVER_LOCK, &ckchi->server->ssl_ctx.lock);
+ /* free the server current SSL_CTX */
+ SSL_CTX_free(ckchi->server->ssl_ctx.ctx);
+ /* Actual ssl context update */
+ SSL_CTX_up_ref(ckchi->ctx);
+ ckchi->server->ssl_ctx.ctx = ckchi->ctx;
+ ckchi->server->ssl_ctx.inst = ckchi;
+
+ /* flush the session cache of the server */
+ for (i = 0; i < global.nbthread; i++) {
+ ha_free(&ckchi->server->ssl_ctx.reused_sess[i].sni);
+ ha_free(&ckchi->server->ssl_ctx.reused_sess[i].ptr);
+ }
+ HA_RWLOCK_WRUNLOCK(SSL_SERVER_LOCK, &ckchi->server->ssl_ctx.lock);
+
+ } else {
+ HA_RWLOCK_WRLOCK(SNI_LOCK, &ckchi->bind_conf->sni_lock);
+ ssl_sock_load_cert_sni(ckchi, ckchi->bind_conf);
+ HA_RWLOCK_WRUNLOCK(SNI_LOCK, &ckchi->bind_conf->sni_lock);
+ }
+}
+
+/*
+ * Delete a ckch instance that was replaced after a CLI command.
+ */
+static void __ckch_inst_free_locked(struct ckch_inst *ckchi)
+{
+ if (ckchi->is_server_instance) {
+ /* no lock for servers */
+ ckch_inst_free(ckchi);
+ } else {
+ struct bind_conf __maybe_unused *bind_conf = ckchi->bind_conf;
+
+ HA_RWLOCK_WRLOCK(SNI_LOCK, &bind_conf->sni_lock);
+ ckch_inst_free(ckchi);
+ HA_RWLOCK_WRUNLOCK(SNI_LOCK, &bind_conf->sni_lock);
+ }
+}
+
+/* Replace a ckch_store in the ckch tree and insert the whole dependencies,
+* then free the previous dependencies and store.
+* Used in the case of a certificate update.
+*
+* Every dependencies must allocated before using this function.
+*
+* This function can't fail as it only update pointers, and does not alloc anything.
+*
+* /!\ This function must be used under the ckch lock. /!\
+*
+* - Insert every dependencies (SNI, crtlist_entry, ckch_inst, etc)
+* - Delete the old ckch_store from the tree
+* - Insert the new ckch_store
+* - Free the old dependencies and the old ckch_store
+*/
+void ckch_store_replace(struct ckch_store *old_ckchs, struct ckch_store *new_ckchs)
+{
+ struct crtlist_entry *entry;
+ struct ckch_inst *ckchi, *ckchis;
+
+ LIST_SPLICE(&new_ckchs->crtlist_entry, &old_ckchs->crtlist_entry);
+ list_for_each_entry(entry, &new_ckchs->crtlist_entry, by_ckch_store) {
+ ebpt_delete(&entry->node);
+ /* change the ptr and reinsert the node */
+ entry->node.key = new_ckchs;
+ ebpt_insert(&entry->crtlist->entries, &entry->node);
+ }
+ /* insert the new ckch_insts in the crtlist_entry */
+ list_for_each_entry(ckchi, &new_ckchs->ckch_inst, by_ckchs) {
+ if (ckchi->crtlist_entry)
+ LIST_INSERT(&ckchi->crtlist_entry->ckch_inst, &ckchi->by_crtlist_entry);
+ }
+ /* First, we insert every new SNIs in the trees, also replace the default_ctx */
+ list_for_each_entry_safe(ckchi, ckchis, &new_ckchs->ckch_inst, by_ckchs) {
+ __ssl_sock_load_new_ckch_instance(ckchi);
+ }
+ /* delete the old sni_ctx, the old ckch_insts and the ckch_store */
+ list_for_each_entry_safe(ckchi, ckchis, &old_ckchs->ckch_inst, by_ckchs) {
+ __ckch_inst_free_locked(ckchi);
+ }
+
+ ckch_store_free(old_ckchs);
+ ebst_insert(&ckchs_tree, &new_ckchs->node);
+}
+
+
+/*
+ * This function tries to create the new ckch_inst and their SNIs
+ *
+ * /!\ don't forget to update __hlua_ckch_commit() if you changes things there. /!\
+ */
+static int cli_io_handler_commit_cert(struct appctx *appctx)
+{
+ struct commit_cert_ctx *ctx = appctx->svcctx;
+ struct stconn *sc = appctx_sc(appctx);
+ int y = 0;
+ struct ckch_store *old_ckchs, *new_ckchs = NULL;
+ struct ckch_inst *ckchi;
+
+ usermsgs_clr("CLI");
+ /* FIXME: Don't watch the other side !*/
+ if (unlikely(sc_opposite(sc)->flags & SC_FL_SHUT_DONE))
+ goto end;
+
+ while (1) {
+ switch (ctx->state) {
+ case CERT_ST_INIT:
+ /* This state just print the update message */
+ chunk_printf(&trash, "Committing %s", ckchs_transaction.path);
+ if (applet_putchk(appctx, &trash) == -1)
+ goto yield;
+
+ ctx->state = CERT_ST_GEN;
+ __fallthrough;
+ case CERT_ST_GEN:
+ /*
+ * This state generates the ckch instances with their
+ * sni_ctxs and SSL_CTX.
+ *
+ * Since the SSL_CTX generation can be CPU consumer, we
+ * yield every 10 instances.
+ */
+
+ old_ckchs = ctx->old_ckchs;
+ new_ckchs = ctx->new_ckchs;
+
+ /* get the next ckchi to regenerate */
+ ckchi = ctx->next_ckchi;
+ /* we didn't start yet, set it to the first elem */
+ if (ckchi == NULL)
+ ckchi = LIST_ELEM(old_ckchs->ckch_inst.n, typeof(ckchi), by_ckchs);
+
+ /* walk through the old ckch_inst and creates new ckch_inst using the updated ckchs */
+ list_for_each_entry_from(ckchi, &old_ckchs->ckch_inst, by_ckchs) {
+ struct ckch_inst *new_inst;
+
+ /* save the next ckchi to compute in case of yield */
+ ctx->next_ckchi = ckchi;
+
+ /* it takes a lot of CPU to creates SSL_CTXs, so we yield every 10 CKCH instances */
+ if (y >= 10) {
+ applet_have_more_data(appctx); /* let's come back later */
+ goto yield;
+ }
+
+ /* display one dot per new instance */
+ if (applet_putstr(appctx, ".") == -1)
+ goto yield;
+
+ ctx->err = NULL;
+ if (ckch_inst_rebuild(new_ckchs, ckchi, &new_inst, &ctx->err)) {
+ ctx->state = CERT_ST_ERROR;
+ goto error;
+ }
+
+ /* link the new ckch_inst to the duplicate */
+ LIST_APPEND(&new_ckchs->ckch_inst, &new_inst->by_ckchs);
+ y++;
+ }
+ ctx->state = CERT_ST_INSERT;
+ __fallthrough;
+ case CERT_ST_INSERT:
+ /* The generation is finished, we can insert everything */
+
+ old_ckchs = ctx->old_ckchs;
+ new_ckchs = ctx->new_ckchs;
+
+ /* insert everything and remove the previous objects */
+ ckch_store_replace(old_ckchs, new_ckchs);
+ ctx->new_ckchs = ctx->old_ckchs = NULL;
+ ctx->state = CERT_ST_SUCCESS;
+ __fallthrough;
+ case CERT_ST_SUCCESS:
+ chunk_printf(&trash, "\n%sSuccess!\n", usermsgs_str());
+ if (applet_putchk(appctx, &trash) == -1)
+ goto yield;
+ ctx->state = CERT_ST_FIN;
+ __fallthrough;
+ case CERT_ST_FIN:
+ /* we achieved the transaction, we can set everything to NULL */
+ ckchs_transaction.new_ckchs = NULL;
+ ckchs_transaction.old_ckchs = NULL;
+ ckchs_transaction.path = NULL;
+ goto end;
+
+ case CERT_ST_ERROR:
+ error:
+ chunk_printf(&trash, "\n%s%sFailed!\n", usermsgs_str(), ctx->err);
+ if (applet_putchk(appctx, &trash) == -1)
+ goto yield;
+ ctx->state = CERT_ST_FIN;
+ break;
+ }
+ }
+end:
+ usermsgs_clr(NULL);
+ /* success: call the release function and don't come back */
+ return 1;
+
+yield:
+ usermsgs_clr(NULL);
+ return 0; /* should come back */
+}
+
+/*
+ * Parsing function of 'commit ssl cert'
+ */
+static int cli_parse_commit_cert(char **args, char *payload, struct appctx *appctx, void *private)
+{
+ struct commit_cert_ctx *ctx = applet_reserve_svcctx(appctx, sizeof(*ctx));
+ char *err = NULL;
+
+ if (!cli_has_level(appctx, ACCESS_LVL_ADMIN))
+ return 1;
+
+ if (!*args[3])
+ return cli_err(appctx, "'commit ssl cert' expects a filename\n");
+
+ /* The operations on the CKCH architecture are locked so we can
+ * manipulate ckch_store and ckch_inst */
+ if (HA_SPIN_TRYLOCK(CKCH_LOCK, &ckch_lock))
+ return cli_err(appctx, "Can't commit the certificate!\nOperations on certificates are currently locked!\n");
+
+ if (!ckchs_transaction.path) {
+ memprintf(&err, "No ongoing transaction! !\n");
+ goto error;
+ }
+
+ if (strcmp(ckchs_transaction.path, args[3]) != 0) {
+ memprintf(&err, "The ongoing transaction is about '%s' but you are trying to set '%s'\n", ckchs_transaction.path, args[3]);
+ goto error;
+ }
+
+ /* if a certificate is here, a private key must be here too */
+ if (ckchs_transaction.new_ckchs->data->cert && !ckchs_transaction.new_ckchs->data->key) {
+ memprintf(&err, "The transaction must contain at least a certificate and a private key!\n");
+ goto error;
+ }
+
+ if (!X509_check_private_key(ckchs_transaction.new_ckchs->data->cert, ckchs_transaction.new_ckchs->data->key)) {
+ memprintf(&err, "inconsistencies between private key and certificate loaded '%s'.\n", ckchs_transaction.path);
+ goto error;
+ }
+
+ /* init the appctx structure */
+ ctx->state = CERT_ST_INIT;
+ ctx->next_ckchi = NULL;
+ ctx->new_ckchs = ckchs_transaction.new_ckchs;
+ ctx->old_ckchs = ckchs_transaction.old_ckchs;
+
+ /* we don't unlock there, it will be unlock after the IO handler, in the release handler */
+ return 0;
+
+error:
+
+ HA_SPIN_UNLOCK(CKCH_LOCK, &ckch_lock);
+ err = memprintf(&err, "%sCan't commit %s!\n", err ? err : "", args[3]);
+
+ return cli_dynerr(appctx, err);
+}
+
+
+
+
+/*
+ * Parsing function of `set ssl cert`, it updates or creates a temporary ckch.
+ * It uses a set_cert_ctx context, and ckchs_transaction under a lock.
+ */
+static int cli_parse_set_cert(char **args, char *payload, struct appctx *appctx, void *private)
+{
+ struct ckch_store *new_ckchs = NULL;
+ struct ckch_store *old_ckchs = NULL;
+ char *err = NULL;
+ int i;
+ int errcode = 0;
+ char *end;
+ struct cert_exts *cert_ext = &cert_exts[0]; /* default one, PEM */
+ struct ckch_data *data;
+ struct buffer *buf;
+
+ if (!cli_has_level(appctx, ACCESS_LVL_ADMIN))
+ return 1;
+
+ if (!*args[3] || !payload)
+ return cli_err(appctx, "'set ssl cert' expects a filename and a certificate as a payload\n");
+
+ /* The operations on the CKCH architecture are locked so we can
+ * manipulate ckch_store and ckch_inst */
+ if (HA_SPIN_TRYLOCK(CKCH_LOCK, &ckch_lock))
+ return cli_err(appctx, "Can't update the certificate!\nOperations on certificates are currently locked!\n");
+
+ if ((buf = alloc_trash_chunk()) == NULL) {
+ memprintf(&err, "%sCan't allocate memory\n", err ? err : "");
+ errcode |= ERR_ALERT | ERR_FATAL;
+ goto end;
+ }
+
+ if (!chunk_strcpy(buf, args[3])) {
+ memprintf(&err, "%sCan't allocate memory\n", err ? err : "");
+ errcode |= ERR_ALERT | ERR_FATAL;
+ goto end;
+ }
+
+ /* check which type of file we want to update */
+ for (i = 0; cert_exts[i].ext != NULL; i++) {
+ end = strrchr(buf->area, '.');
+ if (end && *cert_exts[i].ext && (strcmp(end + 1, cert_exts[i].ext) == 0)) {
+ *end = '\0';
+ buf->data = strlen(buf->area);
+ cert_ext = &cert_exts[i];
+ break;
+ }
+ }
+
+ /* if there is an ongoing transaction */
+ if (ckchs_transaction.path) {
+ /* if there is an ongoing transaction, check if this is the same file */
+ if (strcmp(ckchs_transaction.path, buf->area) != 0) {
+ /* we didn't find the transaction, must try more cases below */
+
+ /* if the del-ext option is activated we should try to take a look at a ".crt" too. */
+ if (cert_ext->type != CERT_TYPE_PEM && global_ssl.extra_files_noext) {
+ if (!chunk_strcat(buf, ".crt")) {
+ memprintf(&err, "%sCan't allocate memory\n", err ? err : "");
+ errcode |= ERR_ALERT | ERR_FATAL;
+ goto end;
+ }
+
+ if (strcmp(ckchs_transaction.path, buf->area) != 0) {
+ /* remove .crt of the error message */
+ *(b_orig(buf) + b_data(buf) + strlen(".crt")) = '\0';
+ b_sub(buf, strlen(".crt"));
+
+ memprintf(&err, "The ongoing transaction is about '%s' but you are trying to set '%s'\n", ckchs_transaction.path, buf->area);
+ errcode |= ERR_ALERT | ERR_FATAL;
+ goto end;
+ }
+ }
+ }
+
+ old_ckchs = ckchs_transaction.new_ckchs;
+
+ } else {
+
+ /* lookup for the certificate in the tree */
+ old_ckchs = ckchs_lookup(buf->area);
+
+ if (!old_ckchs) {
+ /* if the del-ext option is activated we should try to take a look at a ".crt" too. */
+ if (cert_ext->type != CERT_TYPE_PEM && global_ssl.extra_files_noext) {
+ if (!chunk_strcat(buf, ".crt")) {
+ memprintf(&err, "%sCan't allocate memory\n", err ? err : "");
+ errcode |= ERR_ALERT | ERR_FATAL;
+ goto end;
+ }
+ old_ckchs = ckchs_lookup(buf->area);
+ }
+ }
+ }
+
+ if (!old_ckchs) {
+ memprintf(&err, "%sCan't replace a certificate which is not referenced by the configuration!\n",
+ err ? err : "");
+ errcode |= ERR_ALERT | ERR_FATAL;
+ goto end;
+ }
+
+ /* duplicate the ckch store */
+ new_ckchs = ckchs_dup(old_ckchs);
+ if (!new_ckchs) {
+ memprintf(&err, "%sCannot allocate memory!\n",
+ err ? err : "");
+ errcode |= ERR_ALERT | ERR_FATAL;
+ goto end;
+ }
+
+ /* Reset the OCSP CID */
+ if (cert_ext->type == CERT_TYPE_PEM || cert_ext->type == CERT_TYPE_KEY ||
+ cert_ext->type == CERT_TYPE_ISSUER) {
+ OCSP_CERTID_free(new_ckchs->data->ocsp_cid);
+ new_ckchs->data->ocsp_cid = NULL;
+ }
+
+ data = new_ckchs->data;
+
+ /* apply the change on the duplicate */
+ if (cert_ext->load(buf->area, payload, data, &err) != 0) {
+ memprintf(&err, "%sCan't load the payload\n", err ? err : "");
+ errcode |= ERR_ALERT | ERR_FATAL;
+ goto end;
+ }
+
+ /* we succeed, we can save the ckchs in the transaction */
+
+ /* if there wasn't a transaction, update the old ckchs */
+ if (!ckchs_transaction.old_ckchs) {
+ ckchs_transaction.old_ckchs = old_ckchs;
+ ckchs_transaction.path = old_ckchs->path;
+ err = memprintf(&err, "Transaction created for certificate %s!\n", ckchs_transaction.path);
+ } else {
+ err = memprintf(&err, "Transaction updated for certificate %s!\n", ckchs_transaction.path);
+
+ }
+
+ /* free the previous ckchs if there was a transaction */
+ ckch_store_free(ckchs_transaction.new_ckchs);
+
+ ckchs_transaction.new_ckchs = new_ckchs;
+
+
+ /* creates the SNI ctxs later in the IO handler */
+
+end:
+ free_trash_chunk(buf);
+
+ if (errcode & ERR_CODE) {
+ ckch_store_free(new_ckchs);
+ HA_SPIN_UNLOCK(CKCH_LOCK, &ckch_lock);
+ return cli_dynerr(appctx, memprintf(&err, "%sCan't update %s!\n", err ? err : "", args[3]));
+ } else {
+ HA_SPIN_UNLOCK(CKCH_LOCK, &ckch_lock);
+ return cli_dynmsg(appctx, LOG_NOTICE, err);
+ }
+ /* TODO: handle the ERR_WARN which are not handled because of the io_handler */
+}
+
+/* parsing function of 'abort ssl cert' */
+static int cli_parse_abort_cert(char **args, char *payload, struct appctx *appctx, void *private)
+{
+ char *err = NULL;
+
+ if (!cli_has_level(appctx, ACCESS_LVL_ADMIN))
+ return 1;
+
+ if (!*args[3])
+ return cli_err(appctx, "'abort ssl cert' expects a filename\n");
+
+ /* The operations on the CKCH architecture are locked so we can
+ * manipulate ckch_store and ckch_inst */
+ if (HA_SPIN_TRYLOCK(CKCH_LOCK, &ckch_lock))
+ return cli_err(appctx, "Can't abort!\nOperations on certificates are currently locked!\n");
+
+ if (!ckchs_transaction.path) {
+ memprintf(&err, "No ongoing transaction!\n");
+ goto error;
+ }
+
+ if (strcmp(ckchs_transaction.path, args[3]) != 0) {
+ memprintf(&err, "The ongoing transaction is about '%s' but you are trying to abort a transaction for '%s'\n", ckchs_transaction.path, args[3]);
+ goto error;
+ }
+
+ /* Only free the ckchs there, because the SNI and instances were not generated yet */
+ ckch_store_free(ckchs_transaction.new_ckchs);
+ ckchs_transaction.new_ckchs = NULL;
+ ckchs_transaction.old_ckchs = NULL;
+ ckchs_transaction.path = NULL;
+
+ HA_SPIN_UNLOCK(CKCH_LOCK, &ckch_lock);
+
+ err = memprintf(&err, "Transaction aborted for certificate '%s'!\n", args[3]);
+ return cli_dynmsg(appctx, LOG_NOTICE, err);
+
+error:
+ HA_SPIN_UNLOCK(CKCH_LOCK, &ckch_lock);
+
+ return cli_dynerr(appctx, err);
+}
+
+/* parsing function of 'new ssl cert' */
+static int cli_parse_new_cert(char **args, char *payload, struct appctx *appctx, void *private)
+{
+ struct ckch_store *store;
+ char *err = NULL;
+ char *path;
+
+ if (!cli_has_level(appctx, ACCESS_LVL_ADMIN))
+ return 1;
+
+ if (!*args[3])
+ return cli_err(appctx, "'new ssl cert' expects a filename\n");
+
+ path = args[3];
+
+ /* The operations on the CKCH architecture are locked so we can
+ * manipulate ckch_store and ckch_inst */
+ if (HA_SPIN_TRYLOCK(CKCH_LOCK, &ckch_lock))
+ return cli_err(appctx, "Can't create a certificate!\nOperations on certificates are currently locked!\n");
+
+ store = ckchs_lookup(path);
+ if (store != NULL) {
+ memprintf(&err, "Certificate '%s' already exists!\n", path);
+ store = NULL; /* we don't want to free it */
+ goto error;
+ }
+ /* we won't support multi-certificate bundle here */
+ store = ckch_store_new(path);
+ if (!store) {
+ memprintf(&err, "unable to allocate memory.\n");
+ goto error;
+ }
+
+ /* insert into the ckchs tree */
+ ebst_insert(&ckchs_tree, &store->node);
+ memprintf(&err, "New empty certificate store '%s'!\n", args[3]);
+
+ HA_SPIN_UNLOCK(CKCH_LOCK, &ckch_lock);
+ return cli_dynmsg(appctx, LOG_NOTICE, err);
+error:
+ free(store);
+ HA_SPIN_UNLOCK(CKCH_LOCK, &ckch_lock);
+ return cli_dynerr(appctx, err);
+}
+
+/* parsing function of 'del ssl cert' */
+static int cli_parse_del_cert(char **args, char *payload, struct appctx *appctx, void *private)
+{
+ struct ckch_store *store;
+ char *err = NULL;
+ char *filename;
+
+ if (!cli_has_level(appctx, ACCESS_LVL_ADMIN))
+ return 1;
+
+ if (!*args[3])
+ return cli_err(appctx, "'del ssl cert' expects a certificate name\n");
+
+ if (HA_SPIN_TRYLOCK(CKCH_LOCK, &ckch_lock))
+ return cli_err(appctx, "Can't delete the certificate!\nOperations on certificates are currently locked!\n");
+
+ filename = args[3];
+
+ if (ckchs_transaction.path && strcmp(ckchs_transaction.path, filename) == 0) {
+ memprintf(&err, "ongoing transaction for the certificate '%s'", filename);
+ goto error;
+ }
+
+ store = ckchs_lookup(filename);
+ if (store == NULL) {
+ memprintf(&err, "certificate '%s' doesn't exist!\n", filename);
+ goto error;
+ }
+ if (!LIST_ISEMPTY(&store->ckch_inst)) {
+ memprintf(&err, "certificate '%s' in use, can't be deleted!\n", filename);
+ goto error;
+ }
+
+ ebmb_delete(&store->node);
+ ckch_store_free(store);
+
+ memprintf(&err, "Certificate '%s' deleted!\n", filename);
+
+ HA_SPIN_UNLOCK(CKCH_LOCK, &ckch_lock);
+ return cli_dynmsg(appctx, LOG_NOTICE, err);
+
+error:
+ memprintf(&err, "Can't remove the certificate: %s\n", err ? err : "");
+ HA_SPIN_UNLOCK(CKCH_LOCK, &ckch_lock);
+ return cli_dynerr(appctx, err);
+}
+
+
+
+/* parsing function of 'new ssl ca-file' */
+static int cli_parse_new_cafile(char **args, char *payload, struct appctx *appctx, void *private)
+{
+ struct cafile_entry *cafile_entry;
+ char *err = NULL;
+ char *path;
+
+ if (!cli_has_level(appctx, ACCESS_LVL_ADMIN))
+ return 1;
+
+ if (!*args[3])
+ return cli_err(appctx, "'new ssl ca-file' expects a filename\n");
+
+ path = args[3];
+
+ /* The operations on the CKCH architecture are locked so we can
+ * manipulate ckch_store and ckch_inst */
+ if (HA_SPIN_TRYLOCK(CKCH_LOCK, &ckch_lock))
+ return cli_err(appctx, "Can't create a CA file!\nOperations on certificates are currently locked!\n");
+
+ cafile_entry = ssl_store_get_cafile_entry(path, 0);
+ if (cafile_entry) {
+ memprintf(&err, "CA file '%s' already exists!\n", path);
+ goto error;
+ }
+
+ cafile_entry = ssl_store_create_cafile_entry(path, NULL, CAFILE_CERT);
+ if (!cafile_entry) {
+ memprintf(&err, "%sCannot allocate memory!\n",
+ err ? err : "");
+ goto error;
+ }
+
+ /* Add the newly created cafile_entry to the tree so that
+ * any new ckch instance created from now can use it. */
+ if (ssl_store_add_uncommitted_cafile_entry(cafile_entry))
+ goto error;
+
+ memprintf(&err, "New CA file created '%s'!\n", path);
+
+ HA_SPIN_UNLOCK(CKCH_LOCK, &ckch_lock);
+ return cli_dynmsg(appctx, LOG_NOTICE, err);
+error:
+ HA_SPIN_UNLOCK(CKCH_LOCK, &ckch_lock);
+ return cli_dynerr(appctx, err);
+}
+
+/*
+ * Parsing function of `set ssl ca-file`
+ */
+static int cli_parse_set_cafile(char **args, char *payload, struct appctx *appctx, void *private)
+{
+ struct cafile_entry *old_cafile_entry = NULL;
+ struct cafile_entry *new_cafile_entry = NULL;
+ char *err = NULL;
+ int errcode = 0;
+ struct buffer *buf;
+ int add_cmd = 0;
+
+ if (!cli_has_level(appctx, ACCESS_LVL_ADMIN))
+ return 1;
+
+ /* this is "add ssl ca-file" */
+ if (*args[0] == 'a')
+ add_cmd = 1;
+
+ if (!*args[3] || !payload)
+ return cli_err(appctx, "'set ssl ca-file' expects a filename and CAs as a payload\n");
+
+ /* The operations on the CKCH architecture are locked so we can
+ * manipulate ckch_store and ckch_inst */
+ if (HA_SPIN_TRYLOCK(CKCH_LOCK, &ckch_lock))
+ return cli_err(appctx, "Can't update the CA file!\nOperations on certificates are currently locked!\n");
+
+ if ((buf = alloc_trash_chunk()) == NULL) {
+ memprintf(&err, "%sCan't allocate memory\n", err ? err : "");
+ errcode |= ERR_ALERT | ERR_FATAL;
+ goto end;
+ }
+
+ if (!chunk_strcpy(buf, args[3])) {
+ memprintf(&err, "%sCan't allocate memory\n", err ? err : "");
+ errcode |= ERR_ALERT | ERR_FATAL;
+ goto end;
+ }
+
+ old_cafile_entry = NULL;
+ new_cafile_entry = NULL;
+
+ /* if there is an ongoing transaction */
+ if (cafile_transaction.path) {
+ /* if there is an ongoing transaction, check if this is the same file */
+ if (strcmp(cafile_transaction.path, buf->area) != 0) {
+ memprintf(&err, "The ongoing transaction is about '%s' but you are trying to set '%s'\n", cafile_transaction.path, buf->area);
+ errcode |= ERR_ALERT | ERR_FATAL;
+ goto end;
+ }
+ old_cafile_entry = cafile_transaction.old_cafile_entry;
+ } else {
+ /* lookup for the certificate in the tree */
+ old_cafile_entry = ssl_store_get_cafile_entry(buf->area, 0);
+ }
+
+ if (!old_cafile_entry) {
+ memprintf(&err, "%sCan't replace a CA file which is not referenced by the configuration!\n",
+ err ? err : "");
+ errcode |= ERR_ALERT | ERR_FATAL;
+ goto end;
+ }
+
+ /* if the transaction is new, duplicate the old_ca_file_entry, otherwise duplicate the cafile in the current transaction */
+ if (cafile_transaction.new_cafile_entry)
+ new_cafile_entry = ssl_store_dup_cafile_entry(cafile_transaction.new_cafile_entry);
+ else
+ new_cafile_entry = ssl_store_dup_cafile_entry(old_cafile_entry);
+
+ if (!new_cafile_entry) {
+ memprintf(&err, "%sCan't allocate memory\n", err ? err : "");
+ errcode |= ERR_ALERT | ERR_FATAL;
+ goto end;
+ }
+
+ /* Fill the new entry with the new CAs. The add_cmd variable determine
+ if we flush the X509_STORE or not */
+ if (ssl_store_load_ca_from_buf(new_cafile_entry, payload, add_cmd)) {
+ memprintf(&err, "%sInvalid payload\n", err ? err : "");
+ errcode |= ERR_ALERT | ERR_FATAL;
+ goto end;
+ }
+
+ /* we succeed, we can save the ca in the transaction */
+
+ /* if there wasn't a transaction, update the old CA */
+ if (!cafile_transaction.old_cafile_entry) {
+ cafile_transaction.old_cafile_entry = old_cafile_entry;
+ cafile_transaction.path = old_cafile_entry->path;
+ err = memprintf(&err, "transaction created for CA %s!\n", cafile_transaction.path);
+ } else {
+ err = memprintf(&err, "transaction updated for CA %s!\n", cafile_transaction.path);
+ }
+
+ /* free the previous CA if there was a transaction */
+ ssl_store_delete_cafile_entry(cafile_transaction.new_cafile_entry);
+
+ cafile_transaction.new_cafile_entry = new_cafile_entry;
+
+ /* creates the SNI ctxs later in the IO handler */
+
+end:
+ free_trash_chunk(buf);
+
+ if (errcode & ERR_CODE) {
+ ssl_store_delete_cafile_entry(new_cafile_entry);
+ HA_SPIN_UNLOCK(CKCH_LOCK, &ckch_lock);
+ return cli_dynerr(appctx, memprintf(&err, "%sCan't update %s!\n", err ? err : "", args[3]));
+ } else {
+
+ HA_SPIN_UNLOCK(CKCH_LOCK, &ckch_lock);
+ return cli_dynmsg(appctx, LOG_NOTICE, err);
+ }
+}
+
+
+/*
+ * Parsing function of 'commit ssl ca-file'.
+ * It uses a commit_cacrlfile_ctx that's also shared with "commit ssl crl-file".
+ */
+static int cli_parse_commit_cafile(char **args, char *payload, struct appctx *appctx, void *private)
+{
+ struct commit_cacrlfile_ctx *ctx = applet_reserve_svcctx(appctx, sizeof(*ctx));
+ char *err = NULL;
+
+ if (!cli_has_level(appctx, ACCESS_LVL_ADMIN))
+ return 1;
+
+ if (!*args[3])
+ return cli_err(appctx, "'commit ssl ca-file' expects a filename\n");
+
+ /* The operations on the CKCH architecture are locked so we can
+ * manipulate ckch_store and ckch_inst */
+ if (HA_SPIN_TRYLOCK(CKCH_LOCK, &ckch_lock))
+ return cli_err(appctx, "Can't commit the CA file!\nOperations on certificates are currently locked!\n");
+
+ if (!cafile_transaction.path) {
+ memprintf(&err, "No ongoing transaction! !\n");
+ goto error;
+ }
+
+ if (strcmp(cafile_transaction.path, args[3]) != 0) {
+ memprintf(&err, "The ongoing transaction is about '%s' but you are trying to set '%s'\n", cafile_transaction.path, args[3]);
+ goto error;
+ }
+ /* init the appctx structure */
+ ctx->state = CACRL_ST_INIT;
+ ctx->next_ckchi_link = NULL;
+ ctx->old_entry = cafile_transaction.old_cafile_entry;
+ ctx->new_entry = cafile_transaction.new_cafile_entry;
+ ctx->cafile_type = CAFILE_CERT;
+
+ return 0;
+
+error:
+
+ HA_SPIN_UNLOCK(CKCH_LOCK, &ckch_lock);
+ err = memprintf(&err, "%sCan't commit %s!\n", err ? err : "", args[3]);
+
+ return cli_dynerr(appctx, err);
+}
+
+/*
+ * This function tries to create new ckch instances and their SNIs using a newly
+ * set certificate authority (CA file) or a newly set Certificate Revocation
+ * List (CRL), depending on the command being called.
+ */
+static int cli_io_handler_commit_cafile_crlfile(struct appctx *appctx)
+{
+ struct commit_cacrlfile_ctx *ctx = appctx->svcctx;
+ struct stconn *sc = appctx_sc(appctx);
+ int y = 0;
+ struct cafile_entry *old_cafile_entry = ctx->old_entry;
+ struct cafile_entry *new_cafile_entry = ctx->new_entry;
+ struct ckch_inst_link *ckchi_link;
+ char *path;
+
+ /* FIXME: Don't watch the other side !*/
+ if (unlikely(sc_opposite(sc)->flags & SC_FL_SHUT_DONE))
+ goto end;
+
+ /* The ctx was already validated by the ca-file/crl-file parsing
+ * function. Entries can only be NULL in CACRL_ST_SUCCESS or
+ * CACRL_ST_FIN states
+ */
+ switch (ctx->cafile_type) {
+ case CAFILE_CERT:
+ path = cafile_transaction.path;
+ break;
+ case CAFILE_CRL:
+ path = crlfile_transaction.path;
+ break;
+ default:
+ path = NULL;
+ goto error;
+ }
+
+ while (1) {
+ switch (ctx->state) {
+ case CACRL_ST_INIT:
+ /* This state just print the update message */
+ chunk_printf(&trash, "Committing %s", path);
+ if (applet_putchk(appctx, &trash) == -1)
+ goto yield;
+
+ ctx->state = CACRL_ST_GEN;
+ __fallthrough;
+ case CACRL_ST_GEN:
+ /*
+ * This state generates the ckch instances with their
+ * sni_ctxs and SSL_CTX.
+ *
+ * Since the SSL_CTX generation can be CPU consumer, we
+ * yield every 10 instances.
+ */
+
+ /* get the next ckchi to regenerate */
+ ckchi_link = ctx->next_ckchi_link;
+
+ /* we didn't start yet, set it to the first elem */
+ if (ckchi_link == NULL) {
+ ckchi_link = LIST_ELEM(old_cafile_entry->ckch_inst_link.n, typeof(ckchi_link), list);
+ /* Add the newly created cafile_entry to the tree so that
+ * any new ckch instance created from now can use it. */
+ if (ssl_store_add_uncommitted_cafile_entry(new_cafile_entry)) {
+ ctx->state = CACRL_ST_ERROR;
+ goto error;
+ }
+ }
+
+ list_for_each_entry_from(ckchi_link, &old_cafile_entry->ckch_inst_link, list) {
+ struct ckch_inst *new_inst;
+
+ /* save the next ckchi to compute */
+ ctx->next_ckchi_link = ckchi_link;
+
+ /* it takes a lot of CPU to creates SSL_CTXs, so we yield every 10 CKCH instances */
+ if (y >= 10) {
+ applet_have_more_data(appctx); /* let's come back later */
+ goto yield;
+ }
+
+ /* display one dot per new instance */
+ if (applet_putstr(appctx, ".") == -1)
+ goto yield;
+
+ /* Rebuild a new ckch instance that uses the same ckch_store
+ * than a reference ckchi instance but will use a new CA file. */
+ ctx->err = NULL;
+ if (ckch_inst_rebuild(ckchi_link->ckch_inst->ckch_store, ckchi_link->ckch_inst, &new_inst, &ctx->err)) {
+ ctx->state = CACRL_ST_ERROR;
+ goto error;
+ }
+
+ y++;
+ }
+
+ ctx->state = CACRL_ST_INSERT;
+ __fallthrough;
+ case CACRL_ST_INSERT:
+ /* The generation is finished, we can insert everything */
+
+ /* insert the new ckch_insts in the crtlist_entry */
+ list_for_each_entry(ckchi_link, &new_cafile_entry->ckch_inst_link, list) {
+ if (ckchi_link->ckch_inst->crtlist_entry)
+ LIST_INSERT(&ckchi_link->ckch_inst->crtlist_entry->ckch_inst,
+ &ckchi_link->ckch_inst->by_crtlist_entry);
+ }
+
+ /* First, we insert every new SNIs in the trees, also replace the default_ctx */
+ list_for_each_entry(ckchi_link, &new_cafile_entry->ckch_inst_link, list) {
+ __ssl_sock_load_new_ckch_instance(ckchi_link->ckch_inst);
+ }
+
+ /* delete the old sni_ctx, the old ckch_insts
+ * and the ckch_store. ckch_inst_free() also
+ * manipulates the list so it's cleaner to loop
+ * until it's empty */
+ while (!LIST_ISEMPTY(&old_cafile_entry->ckch_inst_link)) {
+ ckchi_link = LIST_ELEM(old_cafile_entry->ckch_inst_link.n, typeof(ckchi_link), list);
+
+ LIST_DEL_INIT(&ckchi_link->list); /* must reinit because ckch_inst checks the list */
+ __ckch_inst_free_locked(ckchi_link->ckch_inst);
+ free(ckchi_link);
+ }
+
+ /* Remove the old cafile entry from the tree */
+ ebmb_delete(&old_cafile_entry->node);
+ ssl_store_delete_cafile_entry(old_cafile_entry);
+
+ ctx->old_entry = ctx->new_entry = NULL;
+ ctx->state = CACRL_ST_SUCCESS;
+ __fallthrough;
+ case CACRL_ST_SUCCESS:
+ if (applet_putstr(appctx, "\nSuccess!\n") == -1)
+ goto yield;
+ ctx->state = CACRL_ST_FIN;
+ __fallthrough;
+ case CACRL_ST_FIN:
+ /* we achieved the transaction, we can set everything to NULL */
+ switch (ctx->cafile_type) {
+ case CAFILE_CERT:
+ cafile_transaction.old_cafile_entry = NULL;
+ cafile_transaction.new_cafile_entry = NULL;
+ cafile_transaction.path = NULL;
+ break;
+ case CAFILE_CRL:
+ crlfile_transaction.old_crlfile_entry = NULL;
+ crlfile_transaction.new_crlfile_entry = NULL;
+ crlfile_transaction.path = NULL;
+ break;
+ }
+ goto end;
+
+ case CACRL_ST_ERROR:
+ error:
+ chunk_printf(&trash, "\n%sFailed!\n", ctx->err);
+ if (applet_putchk(appctx, &trash) == -1)
+ goto yield;
+ ctx->state = CACRL_ST_FIN;
+ break;
+ }
+ }
+end:
+ /* success: call the release function and don't come back */
+ return 1;
+yield:
+ return 0; /* should come back */
+}
+
+
+/* parsing function of 'abort ssl ca-file' */
+static int cli_parse_abort_cafile(char **args, char *payload, struct appctx *appctx, void *private)
+{
+ char *err = NULL;
+
+ if (!cli_has_level(appctx, ACCESS_LVL_ADMIN))
+ return 1;
+
+ if (!*args[3])
+ return cli_err(appctx, "'abort ssl ca-file' expects a filename\n");
+
+ /* The operations on the CKCH architecture are locked so we can
+ * manipulate ckch_store and ckch_inst */
+ if (HA_SPIN_TRYLOCK(CKCH_LOCK, &ckch_lock))
+ return cli_err(appctx, "Can't abort!\nOperations on certificates are currently locked!\n");
+
+ if (!cafile_transaction.path) {
+ memprintf(&err, "No ongoing transaction!\n");
+ goto error;
+ }
+
+ if (strcmp(cafile_transaction.path, args[3]) != 0) {
+ memprintf(&err, "The ongoing transaction is about '%s' but you are trying to abort a transaction for '%s'\n", cafile_transaction.path, args[3]);
+ goto error;
+ }
+
+ /* Only free the uncommitted cafile_entry here, because the SNI and instances were not generated yet */
+ ssl_store_delete_cafile_entry(cafile_transaction.new_cafile_entry);
+ cafile_transaction.new_cafile_entry = NULL;
+ cafile_transaction.old_cafile_entry = NULL;
+ cafile_transaction.path = NULL;
+
+ HA_SPIN_UNLOCK(CKCH_LOCK, &ckch_lock);
+
+ err = memprintf(&err, "Transaction aborted for certificate '%s'!\n", args[3]);
+ return cli_dynmsg(appctx, LOG_NOTICE, err);
+
+error:
+ HA_SPIN_UNLOCK(CKCH_LOCK, &ckch_lock);
+
+ return cli_dynerr(appctx, err);
+}
+
+/* release function of the `commit ssl ca-file' command, free things and unlock the spinlock.
+ * It uses a commit_cacrlfile_ctx context.
+ */
+static void cli_release_commit_cafile(struct appctx *appctx)
+{
+ struct commit_cacrlfile_ctx *ctx = appctx->svcctx;
+ struct cafile_entry *new_cafile_entry = ctx->new_entry;
+
+ /* Remove the uncommitted cafile_entry from the tree. */
+ if (new_cafile_entry) {
+ ebmb_delete(&new_cafile_entry->node);
+ ssl_store_delete_cafile_entry(new_cafile_entry);
+ }
+ HA_SPIN_UNLOCK(CKCH_LOCK, &ckch_lock);
+ ha_free(&ctx->err);
+}
+
+
+/* IO handler of details "show ssl ca-file <filename[:index]>".
+ * It uses a show_cafile_ctx context, and the global
+ * cafile_transaction.new_cafile_entry in read-only.
+ */
+static int cli_io_handler_show_cafile_detail(struct appctx *appctx)
+{
+ struct show_cafile_ctx *ctx = appctx->svcctx;
+ struct cafile_entry *cafile_entry = ctx->cur_cafile_entry;
+ struct buffer *out = alloc_trash_chunk();
+ int i = 0;
+ X509 *cert;
+ STACK_OF(X509_OBJECT) *objs;
+ int retval = 0;
+ int ca_index = ctx->ca_index;
+ int show_all = ctx->show_all;
+
+ if (!out)
+ goto end_no_putchk;
+
+ chunk_appendf(out, "Filename: ");
+ if (cafile_entry == cafile_transaction.new_cafile_entry)
+ chunk_appendf(out, "*");
+ chunk_appendf(out, "%s\n", cafile_entry->path);
+
+ chunk_appendf(out, "Status: ");
+ if (!cafile_entry->ca_store)
+ chunk_appendf(out, "Empty\n");
+ else if (LIST_ISEMPTY(&cafile_entry->ckch_inst_link))
+ chunk_appendf(out, "Unused\n");
+ else
+ chunk_appendf(out, "Used\n");
+
+ if (!cafile_entry->ca_store)
+ goto end;
+
+ objs = X509_STORE_get0_objects(cafile_entry->ca_store);
+ for (i = ca_index; i < sk_X509_OBJECT_num(objs); i++) {
+
+ cert = X509_OBJECT_get0_X509(sk_X509_OBJECT_value(objs, i));
+ if (!cert)
+ continue;
+
+ /* file starts at line 1 */
+ chunk_appendf(out, " \nCertificate #%d:\n", i+1);
+ retval = show_cert_detail(cert, NULL, out);
+ if (retval < 0)
+ goto end_no_putchk;
+ else if (retval)
+ goto yield;
+
+ if (applet_putchk(appctx, out) == -1)
+ goto yield;
+
+ if (!show_all) /* only need to dump one certificate */
+ goto end;
+ }
+
+end:
+ free_trash_chunk(out);
+ return 1; /* end, don't come back */
+
+end_no_putchk:
+ free_trash_chunk(out);
+ return 1;
+yield:
+ /* save the current state */
+ ctx->ca_index = i;
+ free_trash_chunk(out);
+ return 0; /* should come back */
+}
+
+
+/* parsing function for 'show ssl ca-file [cafile[:index]]'.
+ * It prepares a show_cafile_ctx context, and checks the global
+ * cafile_transaction under the ckch_lock (read only).
+ */
+static int cli_parse_show_cafile(char **args, char *payload, struct appctx *appctx, void *private)
+{
+ struct show_cafile_ctx *ctx = applet_reserve_svcctx(appctx, sizeof(*ctx));
+ struct cafile_entry *cafile_entry;
+ int ca_index = 0;
+ char *colons;
+ char *err = NULL;
+
+ if (!cli_has_level(appctx, ACCESS_LVL_OPER))
+ return cli_err(appctx, "Can't allocate memory!\n");
+
+ /* The operations on the CKCH architecture are locked so we can
+ * manipulate ckch_store and ckch_inst */
+ if (HA_SPIN_TRYLOCK(CKCH_LOCK, &ckch_lock))
+ return cli_err(appctx, "Can't show!\nOperations on certificates are currently locked!\n");
+
+ ctx->show_all = 1; /* show all certificates */
+ ctx->ca_index = 0;
+ /* check if there is a certificate to lookup */
+ if (*args[3]) {
+
+ /* Look for an optional CA index after the CA file name */
+ colons = strchr(args[3], ':');
+ if (colons) {
+ char *endptr;
+
+ ca_index = strtol(colons + 1, &endptr, 10);
+ /* Indexes start at 1 */
+ if (colons + 1 == endptr || *endptr != '\0' || ca_index <= 0) {
+ memprintf(&err, "wrong CA index after colons in '%s'!", args[3]);
+ goto error;
+ }
+ *colons = '\0';
+ ctx->ca_index = ca_index - 1; /* we start counting at 0 in the ca_store, but at 1 on the CLI */
+ ctx->show_all = 0; /* show only one certificate */
+ }
+
+ if (*args[3] == '*') {
+ if (!cafile_transaction.new_cafile_entry)
+ goto error;
+
+ cafile_entry = cafile_transaction.new_cafile_entry;
+
+ if (strcmp(args[3] + 1, cafile_entry->path) != 0)
+ goto error;
+
+ } else {
+ /* Get the "original" cafile_entry and not the
+ * uncommitted one if it exists. */
+ if ((cafile_entry = ssl_store_get_cafile_entry(args[3], 1)) == NULL || cafile_entry->type != CAFILE_CERT)
+ goto error;
+ }
+
+ ctx->cur_cafile_entry = cafile_entry;
+ /* use the IO handler that shows details */
+ appctx->io_handler = cli_io_handler_show_cafile_detail;
+ }
+
+ return 0;
+
+error:
+ HA_SPIN_UNLOCK(CKCH_LOCK, &ckch_lock);
+ if (err)
+ return cli_dynerr(appctx, err);
+ return cli_err(appctx, "Can't display the CA file : Not found!\n");
+}
+
+
+/* release function of the 'show ssl ca-file' command */
+static void cli_release_show_cafile(struct appctx *appctx)
+{
+ HA_SPIN_UNLOCK(CKCH_LOCK, &ckch_lock);
+}
+
+
+/* This function returns the number of certificates in a cafile_entry. */
+static int get_certificate_count(struct cafile_entry *cafile_entry)
+{
+ int cert_count = 0;
+ STACK_OF(X509_OBJECT) *objs;
+
+ if (cafile_entry && cafile_entry->ca_store) {
+ objs = X509_STORE_get0_objects(cafile_entry->ca_store);
+ if (objs)
+ cert_count = sk_X509_OBJECT_num(objs);
+ }
+ return cert_count;
+}
+
+/* IO handler of "show ssl ca-file". The command taking a specific CA file name
+ * is managed in cli_io_handler_show_cafile_detail.
+ * It uses a show_cafile_ctx and the global cafile_transaction.new_cafile_entry
+ * in read-only.
+ */
+static int cli_io_handler_show_cafile(struct appctx *appctx)
+{
+ struct show_cafile_ctx *ctx = appctx->svcctx;
+ struct buffer *trash = alloc_trash_chunk();
+ struct ebmb_node *node;
+ struct cafile_entry *cafile_entry = NULL;
+
+ if (trash == NULL)
+ return 1;
+
+ if (!ctx->old_cafile_entry && cafile_transaction.old_cafile_entry) {
+ chunk_appendf(trash, "# transaction\n");
+ chunk_appendf(trash, "*%s", cafile_transaction.old_cafile_entry->path);
+ chunk_appendf(trash, " - %d certificate(s)\n", get_certificate_count(cafile_transaction.new_cafile_entry));
+ if (applet_putchk(appctx, trash) == -1)
+ goto yield;
+ ctx->old_cafile_entry = cafile_transaction.new_cafile_entry;
+ }
+
+ /* First time in this io_handler. */
+ if (!ctx->cur_cafile_entry) {
+ chunk_appendf(trash, "# filename\n");
+ node = ebmb_first(&cafile_tree);
+ } else {
+ /* We yielded during a previous call. */
+ node = &ctx->cur_cafile_entry->node;
+ }
+
+ while (node) {
+ cafile_entry = ebmb_entry(node, struct cafile_entry, node);
+ if (cafile_entry->type == CAFILE_CERT) {
+ chunk_appendf(trash, "%s", cafile_entry->path);
+
+ chunk_appendf(trash, " - %d certificate(s)\n", get_certificate_count(cafile_entry));
+ }
+
+ node = ebmb_next(node);
+ if (applet_putchk(appctx, trash) == -1)
+ goto yield;
+ }
+
+ ctx->cur_cafile_entry = NULL;
+ free_trash_chunk(trash);
+ return 1;
+yield:
+
+ free_trash_chunk(trash);
+ ctx->cur_cafile_entry = cafile_entry;
+ return 0; /* should come back */
+}
+
+/* parsing function of 'del ssl ca-file' */
+static int cli_parse_del_cafile(char **args, char *payload, struct appctx *appctx, void *private)
+{
+ struct cafile_entry *cafile_entry;
+ char *err = NULL;
+ char *filename;
+
+ if (!cli_has_level(appctx, ACCESS_LVL_ADMIN))
+ return 1;
+
+ if (!*args[3])
+ return cli_err(appctx, "'del ssl ca-file' expects a CA file name\n");
+
+ if (HA_SPIN_TRYLOCK(CKCH_LOCK, &ckch_lock))
+ return cli_err(appctx, "Can't delete the CA file!\nOperations on certificates are currently locked!\n");
+
+ filename = args[3];
+
+ if (cafile_transaction.path && strcmp(cafile_transaction.path, filename) == 0) {
+ memprintf(&err, "ongoing transaction for the CA file '%s'", filename);
+ goto error;
+ }
+
+ cafile_entry = ssl_store_get_cafile_entry(filename, 0);
+ if (!cafile_entry) {
+ memprintf(&err, "CA file '%s' doesn't exist!\n", filename);
+ goto error;
+ }
+
+ if (!LIST_ISEMPTY(&cafile_entry->ckch_inst_link)) {
+ memprintf(&err, "CA file '%s' in use, can't be deleted!\n", filename);
+ goto error;
+ }
+
+ /* Remove the cafile_entry from the tree */
+ ebmb_delete(&cafile_entry->node);
+ ssl_store_delete_cafile_entry(cafile_entry);
+
+ memprintf(&err, "CA file '%s' deleted!\n", filename);
+
+ HA_SPIN_UNLOCK(CKCH_LOCK, &ckch_lock);
+ return cli_dynmsg(appctx, LOG_NOTICE, err);
+
+error:
+ memprintf(&err, "Can't remove the CA file: %s\n", err ? err : "");
+ HA_SPIN_UNLOCK(CKCH_LOCK, &ckch_lock);
+ return cli_dynerr(appctx, err);
+}
+
+/* parsing function of 'new ssl crl-file' */
+static int cli_parse_new_crlfile(char **args, char *payload, struct appctx *appctx, void *private)
+{
+ struct cafile_entry *cafile_entry;
+ char *err = NULL;
+ char *path;
+
+ if (!cli_has_level(appctx, ACCESS_LVL_ADMIN))
+ return 1;
+
+ if (!*args[3])
+ return cli_err(appctx, "'new ssl crl-file' expects a filename\n");
+
+ path = args[3];
+
+ /* The operations on the CKCH architecture are locked so we can
+ * manipulate ckch_store and ckch_inst */
+ if (HA_SPIN_TRYLOCK(CKCH_LOCK, &ckch_lock))
+ return cli_err(appctx, "Can't create a CRL file!\nOperations on certificates are currently locked!\n");
+
+ cafile_entry = ssl_store_get_cafile_entry(path, 0);
+ if (cafile_entry) {
+ memprintf(&err, "CRL file '%s' already exists!\n", path);
+ goto error;
+ }
+
+ cafile_entry = ssl_store_create_cafile_entry(path, NULL, CAFILE_CRL);
+ if (!cafile_entry) {
+ memprintf(&err, "%sCannot allocate memory!\n", err ? err : "");
+ goto error;
+ }
+
+ /* Add the newly created cafile_entry to the tree so that
+ * any new ckch instance created from now can use it. */
+ if (ssl_store_add_uncommitted_cafile_entry(cafile_entry))
+ goto error;
+
+ memprintf(&err, "New CRL file created '%s'!\n", path);
+
+ HA_SPIN_UNLOCK(CKCH_LOCK, &ckch_lock);
+ return cli_dynmsg(appctx, LOG_NOTICE, err);
+error:
+ HA_SPIN_UNLOCK(CKCH_LOCK, &ckch_lock);
+ return cli_dynerr(appctx, err);
+}
+
+/* Parsing function of `set ssl crl-file` */
+static int cli_parse_set_crlfile(char **args, char *payload, struct appctx *appctx, void *private)
+{
+ struct cafile_entry *old_crlfile_entry = NULL;
+ struct cafile_entry *new_crlfile_entry = NULL;
+ char *err = NULL;
+ int errcode = 0;
+ struct buffer *buf;
+
+ if (!cli_has_level(appctx, ACCESS_LVL_ADMIN))
+ return 1;
+
+ if (!*args[3] || !payload)
+ return cli_err(appctx, "'set ssl crl-file' expects a filename and CRLs as a payload\n");
+
+ /* The operations on the CKCH architecture are locked so we can
+ * manipulate ckch_store and ckch_inst */
+ if (HA_SPIN_TRYLOCK(CKCH_LOCK, &ckch_lock))
+ return cli_err(appctx, "Can't update the CRL file!\nOperations on certificates are currently locked!\n");
+
+ if ((buf = alloc_trash_chunk()) == NULL) {
+ memprintf(&err, "%sCan't allocate memory\n", err ? err : "");
+ errcode |= ERR_ALERT | ERR_FATAL;
+ goto end;
+ }
+
+ if (!chunk_strcpy(buf, args[3])) {
+ memprintf(&err, "%sCan't allocate memory\n", err ? err : "");
+ errcode |= ERR_ALERT | ERR_FATAL;
+ goto end;
+ }
+
+ old_crlfile_entry = NULL;
+ new_crlfile_entry = NULL;
+
+ /* if there is an ongoing transaction */
+ if (crlfile_transaction.path) {
+ /* if there is an ongoing transaction, check if this is the same file */
+ if (strcmp(crlfile_transaction.path, buf->area) != 0) {
+ memprintf(&err, "The ongoing transaction is about '%s' but you are trying to set '%s'\n", crlfile_transaction.path, buf->area);
+ errcode |= ERR_ALERT | ERR_FATAL;
+ goto end;
+ }
+ old_crlfile_entry = crlfile_transaction.old_crlfile_entry;
+ }
+ else {
+ /* lookup for the certificate in the tree */
+ old_crlfile_entry = ssl_store_get_cafile_entry(buf->area, 0);
+ }
+
+ if (!old_crlfile_entry) {
+ memprintf(&err, "%sCan't replace a CRL file which is not referenced by the configuration!\n",
+ err ? err : "");
+ errcode |= ERR_ALERT | ERR_FATAL;
+ goto end;
+ }
+
+ /* Create a new cafile_entry without adding it to the cafile tree. */
+ new_crlfile_entry = ssl_store_create_cafile_entry(old_crlfile_entry->path, NULL, CAFILE_CRL);
+ if (!new_crlfile_entry) {
+ memprintf(&err, "%sCannot allocate memory!\n", err ? err : "");
+ errcode |= ERR_ALERT | ERR_FATAL;
+ goto end;
+ }
+
+ /* Fill the new entry with the new CRL. */
+ if (ssl_store_load_ca_from_buf(new_crlfile_entry, payload, 0)) {
+ memprintf(&err, "%sInvalid payload\n", err ? err : "");
+ errcode |= ERR_ALERT | ERR_FATAL;
+ goto end;
+ }
+
+ /* we succeed, we can save the crl in the transaction */
+
+ /* if there wasn't a transaction, update the old CRL */
+ if (!crlfile_transaction.old_crlfile_entry) {
+ crlfile_transaction.old_crlfile_entry = old_crlfile_entry;
+ crlfile_transaction.path = old_crlfile_entry->path;
+ err = memprintf(&err, "transaction created for CRL %s!\n", crlfile_transaction.path);
+ } else {
+ err = memprintf(&err, "transaction updated for CRL %s!\n", crlfile_transaction.path);
+ }
+
+ /* free the previous CRL file if there was a transaction */
+ ssl_store_delete_cafile_entry(crlfile_transaction.new_crlfile_entry);
+
+ crlfile_transaction.new_crlfile_entry = new_crlfile_entry;
+
+ /* creates the SNI ctxs later in the IO handler */
+
+end:
+ free_trash_chunk(buf);
+
+ if (errcode & ERR_CODE) {
+ ssl_store_delete_cafile_entry(new_crlfile_entry);
+ HA_SPIN_UNLOCK(CKCH_LOCK, &ckch_lock);
+ return cli_dynerr(appctx, memprintf(&err, "%sCan't update %s!\n", err ? err : "", args[3]));
+ } else {
+
+ HA_SPIN_UNLOCK(CKCH_LOCK, &ckch_lock);
+ return cli_dynmsg(appctx, LOG_NOTICE, err);
+ }
+}
+
+/* Parsing function of 'commit ssl crl-file'.
+ * It uses a commit_cacrlfile_ctx that's also shared with "commit ssl ca-file".
+ */
+static int cli_parse_commit_crlfile(char **args, char *payload, struct appctx *appctx, void *private)
+{
+ struct commit_cacrlfile_ctx *ctx = applet_reserve_svcctx(appctx, sizeof(*ctx));
+ char *err = NULL;
+
+ if (!cli_has_level(appctx, ACCESS_LVL_ADMIN))
+ return 1;
+
+ if (!*args[3])
+ return cli_err(appctx, "'commit ssl ca-file' expects a filename\n");
+
+ /* The operations on the CKCH architecture are locked so we can
+ * manipulate ckch_store and ckch_inst */
+ if (HA_SPIN_TRYLOCK(CKCH_LOCK, &ckch_lock))
+ return cli_err(appctx, "Can't commit the CRL file!\nOperations on certificates are currently locked!\n");
+
+ if (!crlfile_transaction.path) {
+ memprintf(&err, "No ongoing transaction! !\n");
+ goto error;
+ }
+
+ if (strcmp(crlfile_transaction.path, args[3]) != 0) {
+ memprintf(&err, "The ongoing transaction is about '%s' but you are trying to set '%s'\n", crlfile_transaction.path, args[3]);
+ goto error;
+ }
+ /* init the appctx structure */
+ ctx->state = CACRL_ST_INIT;
+ ctx->next_ckchi_link = NULL;
+ ctx->old_entry = crlfile_transaction.old_crlfile_entry;
+ ctx->new_entry = crlfile_transaction.new_crlfile_entry;
+ ctx->cafile_type = CAFILE_CRL;
+
+ return 0;
+
+error:
+
+ HA_SPIN_UNLOCK(CKCH_LOCK, &ckch_lock);
+ err = memprintf(&err, "%sCan't commit %s!\n", err ? err : "", args[3]);
+
+ return cli_dynerr(appctx, err);
+}
+
+
+/* release function of the `commit ssl crl-file' command, free things and unlock the spinlock.
+ * it uses a commit_cacrlfile_ctx that's the same as for "commit ssl ca-file".
+ */
+static void cli_release_commit_crlfile(struct appctx *appctx)
+{
+ struct commit_cacrlfile_ctx *ctx = appctx->svcctx;
+ struct cafile_entry *new_crlfile_entry = ctx->new_entry;
+
+ /* Remove the uncommitted cafile_entry from the tree. */
+ if (new_crlfile_entry) {
+ ebmb_delete(&new_crlfile_entry->node);
+ ssl_store_delete_cafile_entry(new_crlfile_entry);
+ }
+ HA_SPIN_UNLOCK(CKCH_LOCK, &ckch_lock);
+ ha_free(&ctx->err);
+}
+
+/* parsing function of 'del ssl crl-file' */
+static int cli_parse_del_crlfile(char **args, char *payload, struct appctx *appctx, void *private)
+{
+ struct cafile_entry *cafile_entry;
+ char *err = NULL;
+ char *filename;
+
+ if (!cli_has_level(appctx, ACCESS_LVL_ADMIN))
+ return 1;
+
+ if (!*args[3])
+ return cli_err(appctx, "'del ssl crl-file' expects a CRL file name\n");
+
+ if (HA_SPIN_TRYLOCK(CKCH_LOCK, &ckch_lock))
+ return cli_err(appctx, "Can't delete the CRL file!\nOperations on certificates are currently locked!\n");
+
+ filename = args[3];
+
+ if (crlfile_transaction.path && strcmp(crlfile_transaction.path, filename) == 0) {
+ memprintf(&err, "ongoing transaction for the CRL file '%s'", filename);
+ goto error;
+ }
+
+ cafile_entry = ssl_store_get_cafile_entry(filename, 0);
+ if (!cafile_entry) {
+ memprintf(&err, "CRL file '%s' doesn't exist!\n", filename);
+ goto error;
+ }
+ if (cafile_entry->type != CAFILE_CRL) {
+ memprintf(&err, "'del ssl crl-file' does not work on CA files!\n");
+ goto error;
+ }
+
+ if (!LIST_ISEMPTY(&cafile_entry->ckch_inst_link)) {
+ memprintf(&err, "CRL file '%s' in use, can't be deleted!\n", filename);
+ goto error;
+ }
+
+ /* Remove the cafile_entry from the tree */
+ ebmb_delete(&cafile_entry->node);
+ ssl_store_delete_cafile_entry(cafile_entry);
+
+ memprintf(&err, "CRL file '%s' deleted!\n", filename);
+
+ HA_SPIN_UNLOCK(CKCH_LOCK, &ckch_lock);
+ return cli_dynmsg(appctx, LOG_NOTICE, err);
+
+error:
+ memprintf(&err, "Can't remove the CRL file: %s\n", err ? err : "");
+ HA_SPIN_UNLOCK(CKCH_LOCK, &ckch_lock);
+ return cli_dynerr(appctx, err);
+}
+
+/* parsing function of 'abort ssl crl-file' */
+static int cli_parse_abort_crlfile(char **args, char *payload, struct appctx *appctx, void *private)
+{
+ char *err = NULL;
+
+ if (!cli_has_level(appctx, ACCESS_LVL_ADMIN))
+ return 1;
+
+ if (!*args[3])
+ return cli_err(appctx, "'abort ssl crl-file' expects a filename\n");
+
+ /* The operations on the CKCH architecture are locked so we can
+ * manipulate ckch_store and ckch_inst */
+ if (HA_SPIN_TRYLOCK(CKCH_LOCK, &ckch_lock))
+ return cli_err(appctx, "Can't abort!\nOperations on certificates are currently locked!\n");
+
+ if (!crlfile_transaction.path) {
+ memprintf(&err, "No ongoing transaction!\n");
+ goto error;
+ }
+
+ if (strcmp(crlfile_transaction.path, args[3]) != 0) {
+ memprintf(&err, "The ongoing transaction is about '%s' but you are trying to abort a transaction for '%s'\n", crlfile_transaction.path, args[3]);
+ goto error;
+ }
+
+ /* Only free the uncommitted cafile_entry here, because the SNI and instances were not generated yet */
+ ssl_store_delete_cafile_entry(crlfile_transaction.new_crlfile_entry);
+ crlfile_transaction.new_crlfile_entry = NULL;
+ crlfile_transaction.old_crlfile_entry = NULL;
+ crlfile_transaction.path = NULL;
+
+ HA_SPIN_UNLOCK(CKCH_LOCK, &ckch_lock);
+
+ err = memprintf(&err, "Transaction aborted for certificate '%s'!\n", args[3]);
+ return cli_dynmsg(appctx, LOG_NOTICE, err);
+
+error:
+ HA_SPIN_UNLOCK(CKCH_LOCK, &ckch_lock);
+
+ return cli_dynerr(appctx, err);
+}
+
+
+/*
+ * Display a Certificate Resignation List's information.
+ * The information displayed is inspired by the output of 'openssl crl -in
+ * crl.pem -text'.
+ * Returns 0 in case of success.
+ */
+static int show_crl_detail(X509_CRL *crl, struct buffer *out)
+{
+ BIO *bio = NULL;
+ struct buffer *tmp = alloc_trash_chunk();
+ long version;
+ X509_NAME *issuer;
+ int write = -1;
+#ifndef USE_OPENSSL_WOLFSSL
+ STACK_OF(X509_REVOKED) *rev = NULL;
+ X509_REVOKED *rev_entry = NULL;
+ int i;
+#endif
+
+ if (!tmp)
+ return -1;
+
+ if ((bio = BIO_new(BIO_s_mem())) == NULL)
+ goto end;
+
+ /* Version (as displayed by 'openssl crl') */
+ version = X509_CRL_get_version(crl);
+ chunk_appendf(out, "Version %ld\n", version + 1);
+
+ /* Signature Algorithm */
+ chunk_appendf(out, "Signature Algorithm: %s\n", OBJ_nid2ln(X509_CRL_get_signature_nid(crl)));
+
+ /* Issuer */
+ chunk_appendf(out, "Issuer: ");
+ if ((issuer = X509_CRL_get_issuer(crl)) == NULL)
+ goto end;
+ if ((ssl_sock_get_dn_oneline(issuer, tmp)) == -1)
+ goto end;
+ *(tmp->area + tmp->data) = '\0';
+ chunk_appendf(out, "%s\n", tmp->area);
+
+ /* Last Update */
+ chunk_appendf(out, "Last Update: ");
+ chunk_reset(tmp);
+ if (BIO_reset(bio) == -1)
+ goto end;
+ if (ASN1_TIME_print(bio, X509_CRL_get0_lastUpdate(crl)) == 0)
+ goto end;
+ write = BIO_read(bio, tmp->area, tmp->size-1);
+ tmp->area[write] = '\0';
+ chunk_appendf(out, "%s\n", tmp->area);
+
+
+ /* Next Update */
+ chunk_appendf(out, "Next Update: ");
+ chunk_reset(tmp);
+ if (BIO_reset(bio) == -1)
+ goto end;
+ if (ASN1_TIME_print(bio, X509_CRL_get0_nextUpdate(crl)) == 0)
+ goto end;
+ write = BIO_read(bio, tmp->area, tmp->size-1);
+ tmp->area[write] = '\0';
+ chunk_appendf(out, "%s\n", tmp->area);
+
+#ifndef USE_OPENSSL_WOLFSSL
+ /* Revoked Certificates */
+ rev = X509_CRL_get_REVOKED(crl);
+ if (sk_X509_REVOKED_num(rev) > 0)
+ chunk_appendf(out, "Revoked Certificates:\n");
+ else
+ chunk_appendf(out, "No Revoked Certificates.\n");
+
+ for (i = 0; i < sk_X509_REVOKED_num(rev); i++) {
+ rev_entry = sk_X509_REVOKED_value(rev, i);
+
+ /* Serial Number and Revocation Date */
+ if (BIO_reset(bio) == -1)
+ goto end;
+ BIO_printf(bio , " Serial Number: ");
+ i2a_ASN1_INTEGER(bio, (ASN1_INTEGER*)X509_REVOKED_get0_serialNumber(rev_entry));
+ BIO_printf(bio, "\n Revocation Date: ");
+ if (ASN1_TIME_print(bio, X509_REVOKED_get0_revocationDate(rev_entry)) == 0)
+ goto end;
+ BIO_printf(bio, "\n");
+
+ write = BIO_read(bio, tmp->area, tmp->size-1);
+ tmp->area[write] = '\0';
+ chunk_appendf(out, "%s", tmp->area);
+ }
+#endif /* not USE_OPENSSL_WOLFSSL */
+
+end:
+ free_trash_chunk(tmp);
+ if (bio)
+ BIO_free(bio);
+
+ return 0;
+}
+
+/* IO handler of details "show ssl crl-file <filename[:index]>".
+ * It uses show_crlfile_ctx and the global
+ * crlfile_transaction.new_cafile_entry in read-only.
+ */
+static int cli_io_handler_show_crlfile_detail(struct appctx *appctx)
+{
+ struct show_crlfile_ctx *ctx = appctx->svcctx;
+ struct cafile_entry *cafile_entry = ctx->cafile_entry;
+ struct buffer *out = alloc_trash_chunk();
+ int i;
+ X509_CRL *crl;
+ STACK_OF(X509_OBJECT) *objs;
+ int retval = 0;
+ int index = ctx->index;
+
+ if (!out)
+ goto end_no_putchk;
+
+ chunk_appendf(out, "Filename: ");
+ if (cafile_entry == crlfile_transaction.new_crlfile_entry)
+ chunk_appendf(out, "*");
+ chunk_appendf(out, "%s\n", cafile_entry->path);
+
+ chunk_appendf(out, "Status: ");
+ if (!cafile_entry->ca_store)
+ chunk_appendf(out, "Empty\n");
+ else if (LIST_ISEMPTY(&cafile_entry->ckch_inst_link))
+ chunk_appendf(out, "Unused\n");
+ else
+ chunk_appendf(out, "Used\n");
+
+ if (!cafile_entry->ca_store)
+ goto end;
+
+ objs = X509_STORE_get0_objects(cafile_entry->ca_store);
+ for (i = 0; i < sk_X509_OBJECT_num(objs); i++) {
+ crl = X509_OBJECT_get0_X509_CRL(sk_X509_OBJECT_value(objs, i));
+ if (!crl)
+ continue;
+
+ /* CRL indexes start at 1 on the CLI output. */
+ if (index && index-1 != i)
+ continue;
+
+ chunk_appendf(out, " \nCertificate Revocation List #%d:\n", i+1);
+ retval = show_crl_detail(crl, out);
+ if (retval < 0)
+ goto end_no_putchk;
+ else if (retval || index)
+ goto end;
+ }
+
+end:
+ if (applet_putchk(appctx, out) == -1)
+ goto yield;
+
+end_no_putchk:
+ free_trash_chunk(out);
+ return 1;
+yield:
+ free_trash_chunk(out);
+ return 0; /* should come back */
+}
+
+/* parsing function for 'show ssl crl-file [crlfile[:index]]'.
+ * It sets the context to a show_crlfile_ctx, and the global
+ * cafile_transaction.new_crlfile_entry under the ckch_lock.
+ */
+static int cli_parse_show_crlfile(char **args, char *payload, struct appctx *appctx, void *private)
+{
+ struct show_crlfile_ctx *ctx = applet_reserve_svcctx(appctx, sizeof(*ctx));
+ struct cafile_entry *cafile_entry;
+ long index = 0;
+ char *colons;
+ char *err = NULL;
+
+ if (!cli_has_level(appctx, ACCESS_LVL_OPER))
+ return cli_err(appctx, "Can't allocate memory!\n");
+
+ /* The operations on the CKCH architecture are locked so we can
+ * manipulate ckch_store and ckch_inst */
+ if (HA_SPIN_TRYLOCK(CKCH_LOCK, &ckch_lock))
+ return cli_err(appctx, "Can't show!\nOperations on certificates are currently locked!\n");
+
+ /* check if there is a certificate to lookup */
+ if (*args[3]) {
+
+ /* Look for an optional index after the CRL file name */
+ colons = strchr(args[3], ':');
+ if (colons) {
+ char *endptr;
+
+ index = strtol(colons + 1, &endptr, 10);
+ /* Indexes start at 1 */
+ if (colons + 1 == endptr || *endptr != '\0' || index <= 0) {
+ memprintf(&err, "wrong CRL index after colons in '%s'!", args[3]);
+ goto error;
+ }
+ *colons = '\0';
+ }
+
+ if (*args[3] == '*') {
+ if (!crlfile_transaction.new_crlfile_entry)
+ goto error;
+
+ cafile_entry = crlfile_transaction.new_crlfile_entry;
+
+ if (strcmp(args[3] + 1, cafile_entry->path) != 0)
+ goto error;
+
+ } else {
+ /* Get the "original" cafile_entry and not the
+ * uncommitted one if it exists. */
+ if ((cafile_entry = ssl_store_get_cafile_entry(args[3], 1)) == NULL || cafile_entry->type != CAFILE_CRL)
+ goto error;
+ }
+
+ ctx->cafile_entry = cafile_entry;
+ ctx->index = index;
+ /* use the IO handler that shows details */
+ appctx->io_handler = cli_io_handler_show_crlfile_detail;
+ }
+
+ return 0;
+
+error:
+ HA_SPIN_UNLOCK(CKCH_LOCK, &ckch_lock);
+ if (err)
+ return cli_dynerr(appctx, err);
+ return cli_err(appctx, "Can't display the CRL file : Not found!\n");
+}
+
+/* IO handler of "show ssl crl-file". The command taking a specific CRL file name
+ * is managed in cli_io_handler_show_crlfile_detail. */
+static int cli_io_handler_show_crlfile(struct appctx *appctx)
+{
+ struct show_crlfile_ctx *ctx = appctx->svcctx;
+ struct buffer *trash = alloc_trash_chunk();
+ struct ebmb_node *node;
+ struct cafile_entry *cafile_entry = NULL;
+
+ if (trash == NULL)
+ return 1;
+
+ if (!ctx->old_crlfile_entry && crlfile_transaction.old_crlfile_entry) {
+ chunk_appendf(trash, "# transaction\n");
+ chunk_appendf(trash, "*%s\n", crlfile_transaction.old_crlfile_entry->path);
+ if (applet_putchk(appctx, trash) == -1)
+ goto yield;
+ ctx->old_crlfile_entry = crlfile_transaction.old_crlfile_entry;
+ }
+
+ /* First time in this io_handler. */
+ if (!ctx->cafile_entry) {
+ chunk_appendf(trash, "# filename\n");
+ node = ebmb_first(&cafile_tree);
+ } else {
+ /* We yielded during a previous call. */
+ node = &ctx->cafile_entry->node;
+ }
+
+ while (node) {
+ cafile_entry = ebmb_entry(node, struct cafile_entry, node);
+ if (cafile_entry->type == CAFILE_CRL) {
+ chunk_appendf(trash, "%s\n", cafile_entry->path);
+ }
+
+ node = ebmb_next(node);
+ if (applet_putchk(appctx, trash) == -1)
+ goto yield;
+ }
+
+ ctx->cafile_entry = NULL;
+ free_trash_chunk(trash);
+ return 1;
+yield:
+
+ free_trash_chunk(trash);
+ ctx->cafile_entry = cafile_entry;
+ return 0; /* should come back */
+}
+
+
+/* release function of the 'show ssl crl-file' command */
+static void cli_release_show_crlfile(struct appctx *appctx)
+{
+ HA_SPIN_UNLOCK(CKCH_LOCK, &ckch_lock);
+}
+
+
+void ckch_deinit()
+{
+ struct eb_node *node, *next;
+ struct ckch_store *store;
+ struct ebmb_node *canode;
+
+ /* deinit the ckch stores */
+ node = eb_first(&ckchs_tree);
+ while (node) {
+ next = eb_next(node);
+ store = ebmb_entry(node, struct ckch_store, node);
+ ckch_store_free(store);
+ node = next;
+ }
+
+ /* deinit the ca-file store */
+ canode = ebmb_first(&cafile_tree);
+ while (canode) {
+ struct cafile_entry *entry = NULL;
+
+ entry = ebmb_entry(canode, struct cafile_entry, node);
+ canode = ebmb_next(canode);
+ ebmb_delete(&entry->node);
+ ssl_store_delete_cafile_entry(entry);
+ }
+}
+
+/* register cli keywords */
+static struct cli_kw_list cli_kws = {{ },{
+ { { "new", "ssl", "cert", NULL }, "new ssl cert <certfile> : create a new certificate file to be used in a crt-list or a directory", cli_parse_new_cert, NULL, NULL },
+ { { "set", "ssl", "cert", NULL }, "set ssl cert <certfile> <payload> : replace a certificate file", cli_parse_set_cert, NULL, NULL },
+ { { "commit", "ssl", "cert", NULL }, "commit ssl cert <certfile> : commit a certificate file", cli_parse_commit_cert, cli_io_handler_commit_cert, cli_release_commit_cert },
+ { { "abort", "ssl", "cert", NULL }, "abort ssl cert <certfile> : abort a transaction for a certificate file", cli_parse_abort_cert, NULL, NULL },
+ { { "del", "ssl", "cert", NULL }, "del ssl cert <certfile> : delete an unused certificate file", cli_parse_del_cert, NULL, NULL },
+ { { "show", "ssl", "cert", NULL }, "show ssl cert [<certfile>] : display the SSL certificates used in memory, or the details of a file", cli_parse_show_cert, cli_io_handler_show_cert, cli_release_show_cert },
+
+ { { "new", "ssl", "ca-file", NULL }, "new ssl ca-file <cafile> : create a new CA file to be used in a crt-list", cli_parse_new_cafile, NULL, NULL },
+ { { "add", "ssl", "ca-file", NULL }, "add ssl ca-file <cafile> <payload> : add a certificate into the CA file", cli_parse_set_cafile, NULL, NULL },
+ { { "set", "ssl", "ca-file", NULL }, "set ssl ca-file <cafile> <payload> : replace a CA file", cli_parse_set_cafile, NULL, NULL },
+ { { "commit", "ssl", "ca-file", NULL }, "commit ssl ca-file <cafile> : commit a CA file", cli_parse_commit_cafile, cli_io_handler_commit_cafile_crlfile, cli_release_commit_cafile },
+ { { "abort", "ssl", "ca-file", NULL }, "abort ssl ca-file <cafile> : abort a transaction for a CA file", cli_parse_abort_cafile, NULL, NULL },
+ { { "del", "ssl", "ca-file", NULL }, "del ssl ca-file <cafile> : delete an unused CA file", cli_parse_del_cafile, NULL, NULL },
+ { { "show", "ssl", "ca-file", NULL }, "show ssl ca-file [<cafile>[:<index>]] : display the SSL CA files used in memory, or the details of a <cafile>, or a single certificate of index <index> of a CA file <cafile>", cli_parse_show_cafile, cli_io_handler_show_cafile, cli_release_show_cafile },
+
+ { { "new", "ssl", "crl-file", NULL }, "new ssl crlfile <crlfile> : create a new CRL file to be used in a crt-list", cli_parse_new_crlfile, NULL, NULL },
+ { { "set", "ssl", "crl-file", NULL }, "set ssl crl-file <crlfile> <payload> : replace a CRL file", cli_parse_set_crlfile, NULL, NULL },
+ { { "commit", "ssl", "crl-file", NULL },"commit ssl crl-file <crlfile> : commit a CRL file", cli_parse_commit_crlfile, cli_io_handler_commit_cafile_crlfile, cli_release_commit_crlfile },
+ { { "abort", "ssl", "crl-file", NULL }, "abort ssl crl-file <crlfile> : abort a transaction for a CRL file", cli_parse_abort_crlfile, NULL, NULL },
+ { { "del", "ssl", "crl-file", NULL }, "del ssl crl-file <crlfile> : delete an unused CRL file", cli_parse_del_crlfile, NULL, NULL },
+ { { "show", "ssl", "crl-file", NULL }, "show ssl crl-file [<crlfile[:<index>>]] : display the SSL CRL files used in memory, or the details of a <crlfile>, or a single CRL of index <index> of CRL file <crlfile>", cli_parse_show_crlfile, cli_io_handler_show_crlfile, cli_release_show_crlfile },
+ { { NULL }, NULL, NULL, NULL }
+}};
+
+INITCALL1(STG_REGISTER, cli_register_kw, &cli_kws);
+
diff --git a/src/ssl_crtlist.c b/src/ssl_crtlist.c
new file mode 100644
index 0000000..dcd9171
--- /dev/null
+++ b/src/ssl_crtlist.c
@@ -0,0 +1,1577 @@
+/*
+ *
+ * Copyright (C) 2020 HAProxy Technologies, William Lallemand <wlallemand@haproxy.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+#include <sys/stat.h>
+#include <sys/types.h>
+
+#include <dirent.h>
+#include <errno.h>
+#include <stdlib.h>
+#include <string.h>
+#include <syslog.h>
+
+#include <import/ebpttree.h>
+#include <import/ebsttree.h>
+
+#include <haproxy/applet.h>
+#include <haproxy/channel.h>
+#include <haproxy/cli.h>
+#include <haproxy/errors.h>
+#include <haproxy/sc_strm.h>
+#include <haproxy/ssl_ckch.h>
+#include <haproxy/ssl_crtlist.h>
+#include <haproxy/ssl_ocsp.h>
+#include <haproxy/ssl_sock.h>
+#include <haproxy/stconn.h>
+#include <haproxy/tools.h>
+
+/* CLI context for "show ssl crt-list" or "dump ssl crt-list" */
+struct show_crtlist_ctx {
+ struct ebmb_node *crtlist_node; /* ebmb_node for the current crtlist */
+ struct crtlist_entry *entry; /* current entry */
+ int mode; /* 'd' for dump, 's' for show */
+};
+
+/* CLI context for "add ssl crt-list" */
+struct add_crtlist_ctx {
+ struct crtlist *crtlist;
+ struct crtlist_entry *entry;
+ struct bind_conf_list *bind_conf_node;
+ char *err;
+ enum {
+ ADDCRT_ST_INIT = 0,
+ ADDCRT_ST_GEN,
+ ADDCRT_ST_INSERT,
+ ADDCRT_ST_SUCCESS,
+ ADDCRT_ST_ERROR,
+ ADDCRT_ST_FIN,
+ } state;
+};
+
+/* release ssl bind conf */
+void ssl_sock_free_ssl_conf(struct ssl_bind_conf *conf)
+{
+ if (conf) {
+#if defined(OPENSSL_NPN_NEGOTIATED) && !defined(OPENSSL_NO_NEXTPROTONEG)
+ ha_free(&conf->npn_str);
+#endif
+#ifdef TLSEXT_TYPE_application_layer_protocol_negotiation
+ ha_free(&conf->alpn_str);
+#endif
+ ha_free(&conf->ca_file);
+ ha_free(&conf->ca_verify_file);
+ ha_free(&conf->crl_file);
+ ha_free(&conf->ciphers);
+#ifdef HAVE_SSL_CTX_SET_CIPHERSUITES
+ ha_free(&conf->ciphersuites);
+#endif
+ ha_free(&conf->curves);
+ ha_free(&conf->ecdhe);
+#if defined(SSL_CTX_set1_sigalgs_list)
+ ha_free(&conf->sigalgs);
+#endif
+#if defined(SSL_CTX_set1_client_sigalgs_list)
+ ha_free(&conf->client_sigalgs);
+#endif
+ }
+}
+
+/*
+ * Allocate and copy a ssl_bind_conf structure
+ */
+struct ssl_bind_conf *crtlist_dup_ssl_conf(struct ssl_bind_conf *src)
+{
+ struct ssl_bind_conf *dst;
+
+ if (!src)
+ return NULL;
+
+ dst = calloc(1, sizeof(*dst));
+ if (!dst)
+ return NULL;
+
+#if defined(OPENSSL_NPN_NEGOTIATED) && !defined(OPENSSL_NO_NEXTPROTONEG)
+ if (src->npn_str) {
+ dst->npn_str = strdup(src->npn_str);
+ if (!dst->npn_str)
+ goto error;
+ }
+#endif
+#ifdef TLSEXT_TYPE_application_layer_protocol_negotiation
+ if (src->alpn_str) {
+ dst->alpn_str = strdup(src->alpn_str);
+ if (!dst->alpn_str)
+ goto error;
+ }
+#endif
+ if (src->ca_file) {
+ dst->ca_file = strdup(src->ca_file);
+ if (!dst->ca_file)
+ goto error;
+ }
+ if (src->ca_verify_file) {
+ dst->ca_verify_file = strdup(src->ca_verify_file);
+ if (!dst->ca_verify_file)
+ goto error;
+ }
+ if (src->crl_file) {
+ dst->crl_file = strdup(src->crl_file);
+ if (!dst->crl_file)
+ goto error;
+ }
+ if (src->ciphers) {
+ dst->ciphers = strdup(src->ciphers);
+ if (!dst->ciphers)
+ goto error;
+ }
+#ifdef HAVE_SSL_CTX_SET_CIPHERSUITES
+ if (src->ciphersuites) {
+ dst->ciphersuites = strdup(src->ciphersuites);
+ if (!dst->ciphersuites)
+ goto error;
+ }
+#endif
+ if (src->curves) {
+ dst->curves = strdup(src->curves);
+ if (!dst->curves)
+ goto error;
+ }
+ if (src->ecdhe) {
+ dst->ecdhe = strdup(src->ecdhe);
+ if (!dst->ecdhe)
+ goto error;
+ }
+
+ dst->ssl_methods_cfg.flags = src->ssl_methods_cfg.flags;
+ dst->ssl_methods_cfg.min = src->ssl_methods_cfg.min;
+ dst->ssl_methods_cfg.max = src->ssl_methods_cfg.max;
+
+ dst->ssl_methods.flags = src->ssl_methods.flags;
+ dst->ssl_methods.min = src->ssl_methods.min;
+ dst->ssl_methods.max = src->ssl_methods.max;
+
+#if defined(SSL_CTX_set1_sigalgs_list)
+ if (src->sigalgs) {
+ dst->sigalgs = strdup(src->sigalgs);
+ if (!dst->sigalgs)
+ goto error;
+ }
+#endif
+#if defined(SSL_CTX_set1_client_sigalgs_list)
+ if (src->client_sigalgs) {
+ dst->client_sigalgs = strdup(src->client_sigalgs);
+ if (!dst->client_sigalgs)
+ goto error;
+ }
+#endif
+ return dst;
+
+error:
+ ssl_sock_free_ssl_conf(dst);
+ free(dst);
+
+ return NULL;
+}
+
+/* free sni filters */
+void crtlist_free_filters(char **args)
+{
+ int i;
+
+ if (!args)
+ return;
+
+ for (i = 0; args[i]; i++)
+ free(args[i]);
+
+ free(args);
+}
+
+/* Alloc and duplicate a char ** array */
+char **crtlist_dup_filters(char **args, int fcount)
+{
+ char **dst;
+ int i;
+
+ if (fcount == 0)
+ return NULL;
+
+ dst = calloc(fcount + 1, sizeof(*dst));
+ if (!dst)
+ return NULL;
+
+ for (i = 0; i < fcount; i++) {
+ dst[i] = strdup(args[i]);
+ if (!dst[i])
+ goto error;
+ }
+ return dst;
+
+error:
+ crtlist_free_filters(dst);
+ return NULL;
+}
+
+/*
+ * Detach and free a crtlist_entry.
+ * Free the filters, the ssl_conf and call ckch_inst_free() for each ckch_inst
+ */
+void crtlist_entry_free(struct crtlist_entry *entry)
+{
+ struct ckch_inst *inst, *inst_s;
+
+ if (entry == NULL)
+ return;
+
+ ebpt_delete(&entry->node);
+ LIST_DELETE(&entry->by_crtlist);
+ LIST_DELETE(&entry->by_ckch_store);
+ crtlist_free_filters(entry->filters);
+ ssl_sock_free_ssl_conf(entry->ssl_conf);
+ free(entry->ssl_conf);
+ list_for_each_entry_safe(inst, inst_s, &entry->ckch_inst, by_crtlist_entry) {
+ ckch_inst_free(inst);
+ }
+ free(entry);
+}
+/*
+ * Duplicate a crt_list entry and its content (ssl_conf, filters/fcount)
+ * Return a pointer to the new entry
+ */
+struct crtlist_entry *crtlist_entry_dup(struct crtlist_entry *src)
+{
+ struct crtlist_entry *entry;
+
+ if (src == NULL)
+ return NULL;
+
+ entry = crtlist_entry_new();
+ if (entry == NULL)
+ return NULL;
+
+ if (src->filters) {
+ entry->filters = crtlist_dup_filters(src->filters, src->fcount);
+ if (!entry->filters)
+ goto error;
+ }
+ entry->fcount = src->fcount;
+ if (src->ssl_conf) {
+ entry->ssl_conf = crtlist_dup_ssl_conf(src->ssl_conf);
+ if (!entry->ssl_conf)
+ goto error;
+ }
+ entry->crtlist = src->crtlist;
+
+ return entry;
+
+error:
+
+ crtlist_free_filters(entry->filters);
+ ssl_sock_free_ssl_conf(entry->ssl_conf);
+ free(entry->ssl_conf);
+ free(entry);
+
+ return NULL;
+}
+
+/*
+ * Allocate and initialize a crtlist_entry
+ */
+struct crtlist_entry *crtlist_entry_new()
+{
+ struct crtlist_entry *entry;
+
+ entry = calloc(1, sizeof(*entry));
+ if (entry == NULL)
+ return NULL;
+
+ LIST_INIT(&entry->ckch_inst);
+
+ /* initialize the nodes so we can LIST_DELETE in any cases */
+ LIST_INIT(&entry->by_crtlist);
+ LIST_INIT(&entry->by_ckch_store);
+
+ return entry;
+}
+
+/* Free a crtlist, from the crt_entry to the content of the ssl_conf */
+void crtlist_free(struct crtlist *crtlist)
+{
+ struct crtlist_entry *entry, *s_entry;
+ struct bind_conf_list *bind_conf_node;
+
+ if (crtlist == NULL)
+ return;
+
+ bind_conf_node = crtlist->bind_conf;
+ while (bind_conf_node) {
+ struct bind_conf_list *next = bind_conf_node->next;
+ free(bind_conf_node);
+ bind_conf_node = next;
+ }
+
+ list_for_each_entry_safe(entry, s_entry, &crtlist->ord_entries, by_crtlist) {
+ crtlist_entry_free(entry);
+ }
+ ebmb_delete(&crtlist->node);
+ free(crtlist);
+}
+
+/* Alloc and initialize a struct crtlist
+ * <filename> is the key of the ebmb_node
+ * <unique> initialize the list of entries to be unique (1) or not (0)
+ */
+struct crtlist *crtlist_new(const char *filename, int unique)
+{
+ struct crtlist *newlist;
+
+ newlist = calloc(1, sizeof(*newlist) + strlen(filename) + 1);
+ if (newlist == NULL)
+ return NULL;
+
+ memcpy(newlist->node.key, filename, strlen(filename) + 1);
+ if (unique)
+ newlist->entries = EB_ROOT_UNIQUE;
+ else
+ newlist->entries = EB_ROOT;
+
+ LIST_INIT(&newlist->ord_entries);
+
+ return newlist;
+}
+
+/*
+ * Read a single crt-list line. /!\ alter the <line> string.
+ * Fill <crt_path> and <crtlist_entry>
+ * <crtlist_entry> must be alloc and free by the caller
+ * <crtlist_entry->ssl_conf> is alloc by the function
+ * <crtlist_entry->filters> is alloc by the function
+ * <crt_path> is a ptr in <line>
+ * Return an error code
+ */
+int crtlist_parse_line(char *line, char **crt_path, struct crtlist_entry *entry, const char *file, int linenum, int from_cli, char **err)
+{
+ int cfgerr = 0;
+ int arg, newarg, cur_arg, i, ssl_b = 0, ssl_e = 0;
+ char *end;
+ char *args[MAX_CRT_ARGS + 1];
+ struct ssl_bind_conf *ssl_conf = NULL;
+
+ if (!line || !crt_path || !entry)
+ return ERR_ALERT | ERR_FATAL;
+
+ end = line + strlen(line);
+ if (end-line >= CRT_LINESIZE-1 && *(end-1) != '\n') {
+ /* Check if we reached the limit and the last char is not \n.
+ * Watch out for the last line without the terminating '\n'!
+ */
+ memprintf(err, "parsing [%s:%d]: line too long, limit is %d characters",
+ file, linenum, CRT_LINESIZE-1);
+ cfgerr |= ERR_ALERT | ERR_FATAL;
+ goto error;
+ }
+ arg = 0;
+ newarg = 1;
+ while (*line) {
+ if (isspace((unsigned char)*line)) {
+ newarg = 1;
+ *line = 0;
+ } else if (*line == '[') {
+ if (ssl_b) {
+ memprintf(err, "parsing [%s:%d]: too many '['", file, linenum);
+ cfgerr |= ERR_ALERT | ERR_FATAL;
+ goto error;
+ }
+ if (!arg) {
+ memprintf(err, "parsing [%s:%d]: file must start with a cert", file, linenum);
+ cfgerr |= ERR_ALERT | ERR_FATAL;
+ goto error;
+ }
+ ssl_b = arg;
+ newarg = 1;
+ *line = 0;
+ } else if (*line == ']') {
+ if (ssl_e) {
+ memprintf(err, "parsing [%s:%d]: too many ']'", file, linenum);
+ cfgerr |= ERR_ALERT | ERR_FATAL;
+ goto error;
+ }
+ if (!ssl_b) {
+ memprintf(err, "parsing [%s:%d]: missing '['", file, linenum);
+ cfgerr |= ERR_ALERT | ERR_FATAL;
+ goto error;
+ }
+ ssl_e = arg;
+ newarg = 1;
+ *line = 0;
+ } else if (newarg) {
+ if (arg == MAX_CRT_ARGS) {
+ memprintf(err, "parsing [%s:%d]: too many args ", file, linenum);
+ cfgerr |= ERR_ALERT | ERR_FATAL;
+ goto error;
+ }
+ newarg = 0;
+ args[arg++] = line;
+ }
+ line++;
+ }
+ args[arg++] = line;
+
+ /* empty line */
+ if (!*args[0]) {
+ cfgerr |= ERR_NONE;
+ goto error;
+ }
+
+ *crt_path = args[0];
+
+ if (ssl_b) {
+ if (ssl_b > 1) {
+ memprintf(err, "parsing [%s:%d]: malformated line, filters can't be between filename and options!", file, linenum);
+ cfgerr |= ERR_WARN;
+ }
+
+ ssl_conf = calloc(1, sizeof *ssl_conf);
+ if (!ssl_conf) {
+ memprintf(err, "not enough memory!");
+ cfgerr |= ERR_ALERT | ERR_FATAL;
+ goto error;
+ }
+ }
+
+ cur_arg = ssl_b ? ssl_b : 1;
+ while (cur_arg < ssl_e) {
+ newarg = 0;
+ for (i = 0; ssl_crtlist_kws[i].kw != NULL; i++) {
+ if (strcmp(ssl_crtlist_kws[i].kw, args[cur_arg]) == 0) {
+ newarg = 1;
+ cfgerr |= ssl_crtlist_kws[i].parse(args, cur_arg, NULL, ssl_conf, from_cli, err);
+ if (cur_arg + 1 + ssl_crtlist_kws[i].skip > ssl_e) {
+ memprintf(err, "parsing [%s:%d]: ssl args out of '[]' for %s",
+ file, linenum, args[cur_arg]);
+ cfgerr |= ERR_ALERT | ERR_FATAL;
+ goto error;
+ }
+ cur_arg += 1 + ssl_crtlist_kws[i].skip;
+ break;
+ }
+ }
+ if (!cfgerr && !newarg) {
+ memprintf(err, "parsing [%s:%d]: unknown ssl keyword %s",
+ file, linenum, args[cur_arg]);
+ cfgerr |= ERR_ALERT | ERR_FATAL;
+ goto error;
+ }
+ }
+ entry->linenum = linenum;
+ entry->ssl_conf = ssl_conf;
+ entry->filters = crtlist_dup_filters(&args[cur_arg], arg - cur_arg - 1);
+ entry->fcount = arg - cur_arg - 1;
+
+ return cfgerr;
+
+error:
+ crtlist_free_filters(entry->filters);
+ entry->filters = NULL;
+ ssl_sock_free_ssl_conf(entry->ssl_conf);
+ ha_free(&entry->ssl_conf);
+ return cfgerr;
+}
+
+
+
+/* This function parse a crt-list file and store it in a struct crtlist, each line is a crtlist_entry structure
+ * Fill the <crtlist> argument with a pointer to a new crtlist struct
+ *
+ * This function tries to open and store certificate files.
+ */
+int crtlist_parse_file(char *file, struct bind_conf *bind_conf, struct proxy *curproxy, struct crtlist **crtlist, char **err)
+{
+ struct crtlist *newlist;
+ struct crtlist_entry *entry = NULL;
+ char thisline[CRT_LINESIZE];
+ FILE *f;
+ struct stat buf;
+ int linenum = 0;
+ int cfgerr = 0;
+ int missing_lf = -1;
+
+ if ((f = fopen(file, "r")) == NULL) {
+ memprintf(err, "cannot open file '%s' : %s", file, strerror(errno));
+ return ERR_ALERT | ERR_FATAL;
+ }
+
+ newlist = crtlist_new(file, 0);
+ if (newlist == NULL) {
+ memprintf(err, "Not enough memory!");
+ cfgerr |= ERR_ALERT | ERR_FATAL;
+ goto error;
+ }
+
+ while (fgets(thisline, sizeof(thisline), f) != NULL) {
+ char *end;
+ char *line = thisline;
+ char *crt_path;
+ char path[MAXPATHLEN+1];
+ struct ckch_store *ckchs;
+ int found = 0;
+
+ if (missing_lf != -1) {
+ memprintf(err, "parsing [%s:%d]: Stray NUL character at position %d.\n",
+ file, linenum, (missing_lf + 1));
+ cfgerr |= ERR_ALERT | ERR_FATAL;
+ missing_lf = -1;
+ break;
+ }
+
+ linenum++;
+ end = line + strlen(line);
+ if (end-line == sizeof(thisline)-1 && *(end-1) != '\n') {
+ /* Check if we reached the limit and the last char is not \n.
+ * Watch out for the last line without the terminating '\n'!
+ */
+ memprintf(err, "parsing [%s:%d]: line too long, limit is %d characters",
+ file, linenum, (int)sizeof(thisline)-1);
+ cfgerr |= ERR_ALERT | ERR_FATAL;
+ break;
+ }
+
+ if (*line == '#' || *line == '\n' || *line == '\r')
+ continue;
+
+ if (end > line && *(end-1) == '\n') {
+ /* kill trailing LF */
+ *(end - 1) = 0;
+ }
+ else {
+ /* mark this line as truncated */
+ missing_lf = end - line;
+ }
+
+ entry = crtlist_entry_new();
+ if (entry == NULL) {
+ memprintf(err, "Not enough memory!");
+ cfgerr |= ERR_ALERT | ERR_FATAL;
+ goto error;
+ }
+
+ cfgerr |= crtlist_parse_line(thisline, &crt_path, entry, file, linenum, 0, err);
+ if (cfgerr & ERR_CODE)
+ goto error;
+
+ /* empty line */
+ if (!crt_path || !*crt_path) {
+ crtlist_entry_free(entry);
+ entry = NULL;
+ continue;
+ }
+
+ if (*crt_path != '/' && global_ssl.crt_base) {
+ if ((strlen(global_ssl.crt_base) + 1 + strlen(crt_path)) > sizeof(path) ||
+ snprintf(path, sizeof(path), "%s/%s", global_ssl.crt_base, crt_path) > sizeof(path)) {
+ memprintf(err, "parsing [%s:%d]: '%s' : path too long",
+ file, linenum, crt_path);
+ cfgerr |= ERR_ALERT | ERR_FATAL;
+ goto error;
+ }
+ crt_path = path;
+ }
+
+ /* Look for a ckch_store or create one */
+ ckchs = ckchs_lookup(crt_path);
+ if (ckchs == NULL) {
+ if (stat(crt_path, &buf) == 0) {
+ found++;
+
+ ckchs = ckchs_load_cert_file(crt_path, err);
+ if (ckchs == NULL) {
+ cfgerr |= ERR_ALERT | ERR_FATAL;
+ goto error;
+ }
+
+ entry->node.key = ckchs;
+ entry->crtlist = newlist;
+ if (entry->ssl_conf)
+ ckchs->data->ocsp_update_mode = entry->ssl_conf->ocsp_update;
+ ebpt_insert(&newlist->entries, &entry->node);
+ LIST_APPEND(&newlist->ord_entries, &entry->by_crtlist);
+ LIST_APPEND(&ckchs->crtlist_entry, &entry->by_ckch_store);
+
+ } else if (global_ssl.extra_files & SSL_GF_BUNDLE) {
+ /* If we didn't find the file, this could be a
+ bundle, since 2.3 we don't support multiple
+ certificate in the same OpenSSL store, so we
+ emulate it by loading each file separately. To
+ do so we need to duplicate the entry in the
+ crt-list because it becomes independent */
+ char fp[MAXPATHLEN+1] = {0};
+ int n = 0;
+ struct crtlist_entry *entry_dup = entry; /* use the previous created entry */
+ for (n = 0; n < SSL_SOCK_NUM_KEYTYPES; n++) {
+ struct stat buf;
+ int ret;
+
+ ret = snprintf(fp, sizeof(fp), "%s.%s", crt_path, SSL_SOCK_KEYTYPE_NAMES[n]);
+ if (ret > sizeof(fp))
+ continue;
+
+ ckchs = ckchs_lookup(fp);
+ if (!ckchs) {
+ if (stat(fp, &buf) == 0) {
+ ckchs = ckchs_load_cert_file(fp, err);
+ if (!ckchs) {
+ cfgerr |= ERR_ALERT | ERR_FATAL;
+ goto error;
+ }
+ } else {
+ continue; /* didn't find this extension, skip */
+ }
+ }
+ found++;
+ linenum++; /* we duplicate the line for this entry in the bundle */
+ if (!entry_dup) { /* if the entry was used, duplicate one */
+ linenum++;
+ entry_dup = crtlist_entry_dup(entry);
+ if (!entry_dup) {
+ cfgerr |= ERR_ALERT | ERR_FATAL;
+ goto error;
+ }
+ entry_dup->linenum = linenum;
+ }
+
+ entry_dup->node.key = ckchs;
+ entry_dup->crtlist = newlist;
+
+ cfgerr |= ocsp_update_check_cfg_consistency(ckchs, entry, crt_path, err);
+ if (cfgerr & ERR_FATAL)
+ goto error;
+
+ if (entry->ssl_conf)
+ ckchs->data->ocsp_update_mode = entry->ssl_conf->ocsp_update;
+ ebpt_insert(&newlist->entries, &entry_dup->node);
+ LIST_APPEND(&newlist->ord_entries, &entry_dup->by_crtlist);
+ LIST_APPEND(&ckchs->crtlist_entry, &entry_dup->by_ckch_store);
+
+ entry_dup = NULL; /* the entry was used, we need a new one next round */
+ }
+#if HA_OPENSSL_VERSION_NUMBER < 0x10101000L
+ if (found) {
+ memprintf(err, "%sCan't load '%s'. Loading a multi certificates bundle requires OpenSSL >= 1.1.1\n",
+ err && *err ? *err : "", crt_path);
+ cfgerr |= ERR_ALERT | ERR_FATAL;
+ }
+#endif
+ }
+ if (!found) {
+ memprintf(err, "%sunable to stat SSL certificate from file '%s' : %s.\n",
+ err && *err ? *err : "", crt_path, strerror(errno));
+ cfgerr |= ERR_ALERT | ERR_FATAL;
+ }
+
+ } else {
+ entry->node.key = ckchs;
+ entry->crtlist = newlist;
+
+ cfgerr |= ocsp_update_check_cfg_consistency(ckchs, entry, crt_path, err);
+ if (cfgerr & ERR_FATAL)
+ goto error;
+
+ if (entry->ssl_conf)
+ ckchs->data->ocsp_update_mode = entry->ssl_conf->ocsp_update;
+ ebpt_insert(&newlist->entries, &entry->node);
+ LIST_APPEND(&newlist->ord_entries, &entry->by_crtlist);
+ LIST_APPEND(&ckchs->crtlist_entry, &entry->by_ckch_store);
+ found++;
+ }
+ entry = NULL;
+ }
+
+ if (missing_lf != -1) {
+ memprintf(err, "parsing [%s:%d]: Missing LF on last line, file might have been truncated at position %d.\n",
+ file, linenum, (missing_lf + 1));
+ cfgerr |= ERR_ALERT | ERR_FATAL;
+ }
+
+ if (cfgerr & ERR_CODE)
+ goto error;
+
+ newlist->linecount = linenum;
+
+ fclose(f);
+ *crtlist = newlist;
+
+ return cfgerr;
+error:
+ crtlist_entry_free(entry);
+
+ fclose(f);
+ crtlist_free(newlist);
+ return cfgerr;
+}
+
+/* This function reads a directory and stores it in a struct crtlist, each file is a crtlist_entry structure
+ * Fill the <crtlist> argument with a pointer to a new crtlist struct
+ *
+ * This function tries to open and store certificate files.
+ */
+int crtlist_load_cert_dir(char *path, struct bind_conf *bind_conf, struct crtlist **crtlist, char **err)
+{
+ struct crtlist *dir;
+ struct dirent **de_list;
+ int i, n;
+ struct stat buf;
+ char *end;
+ char fp[MAXPATHLEN+1];
+ int cfgerr = 0;
+ struct ckch_store *ckchs;
+
+ dir = crtlist_new(path, 1);
+ if (dir == NULL) {
+ memprintf(err, "not enough memory");
+ return ERR_ALERT | ERR_FATAL;
+ }
+
+ n = scandir(path, &de_list, 0, alphasort);
+ if (n < 0) {
+ memprintf(err, "%sunable to scan directory '%s' : %s.\n",
+ err && *err ? *err : "", path, strerror(errno));
+ cfgerr |= ERR_ALERT | ERR_FATAL;
+ }
+ else {
+ for (i = 0; i < n; i++) {
+ struct crtlist_entry *entry;
+ struct dirent *de = de_list[i];
+
+ end = strrchr(de->d_name, '.');
+ if (end && (de->d_name[0] == '.' ||
+ strcmp(end, ".issuer") == 0 || strcmp(end, ".ocsp") == 0 ||
+ strcmp(end, ".sctl") == 0 || strcmp(end, ".key") == 0))
+ goto ignore_entry;
+
+ snprintf(fp, sizeof(fp), "%s/%s", path, de->d_name);
+ if (stat(fp, &buf) != 0) {
+ memprintf(err, "%sunable to stat SSL certificate from file '%s' : %s.\n",
+ err && *err ? *err : "", fp, strerror(errno));
+ cfgerr |= ERR_ALERT | ERR_FATAL;
+ goto ignore_entry;
+ }
+ if (!S_ISREG(buf.st_mode))
+ goto ignore_entry;
+
+ entry = crtlist_entry_new();
+ if (entry == NULL) {
+ memprintf(err, "not enough memory '%s'", fp);
+ cfgerr |= ERR_ALERT | ERR_FATAL;
+ goto ignore_entry;
+ }
+
+ ckchs = ckchs_lookup(fp);
+ if (ckchs == NULL)
+ ckchs = ckchs_load_cert_file(fp, err);
+ if (ckchs == NULL) {
+ free(de);
+ free(entry);
+ cfgerr |= ERR_ALERT | ERR_FATAL;
+ goto end;
+ }
+ entry->node.key = ckchs;
+ entry->crtlist = dir;
+ LIST_APPEND(&ckchs->crtlist_entry, &entry->by_ckch_store);
+ LIST_APPEND(&dir->ord_entries, &entry->by_crtlist);
+ ebpt_insert(&dir->entries, &entry->node);
+
+ignore_entry:
+ free(de);
+ }
+end:
+ free(de_list);
+ }
+
+ if (cfgerr & ERR_CODE) {
+ /* free the dir and entries on error */
+ crtlist_free(dir);
+ } else {
+ *crtlist = dir;
+ }
+ return cfgerr;
+
+}
+
+/*
+ * Take an ssl_bind_conf structure and append the configuration line used to
+ * create it in the buffer
+ */
+static void dump_crtlist_sslconf(struct buffer *buf, const struct ssl_bind_conf *conf)
+{
+ int space = 0;
+
+ if (conf == NULL)
+ return;
+
+ chunk_appendf(buf, " [");
+#ifdef OPENSSL_NPN_NEGOTIATED
+ if (conf->npn_str) {
+ int len = conf->npn_len;
+ char *ptr = conf->npn_str;
+ int comma = 0;
+
+ if (space) chunk_appendf(buf, " ");
+ chunk_appendf(buf, "npn ");
+ while (len) {
+ unsigned short size;
+
+ size = *ptr;
+ ptr++;
+ if (comma)
+ chunk_memcat(buf, ",", 1);
+ chunk_memcat(buf, ptr, size);
+ ptr += size;
+ len -= size + 1;
+ comma = 1;
+ }
+ chunk_memcat(buf, "", 1); /* finish with a \0 */
+ space++;
+ }
+#endif
+#ifdef TLSEXT_TYPE_application_layer_protocol_negotiation
+ if (conf->alpn_str) {
+ int len = conf->alpn_len;
+ char *ptr = conf->alpn_str;
+ int comma = 0;
+
+ if (space) chunk_appendf(buf, " ");
+ if (len)
+ chunk_appendf(buf, "alpn ");
+ else
+ chunk_appendf(buf, "no-alpn");
+ while (len) {
+ unsigned short size;
+
+ size = *ptr;
+ ptr++;
+ if (comma)
+ chunk_memcat(buf, ",", 1);
+ chunk_memcat(buf, ptr, size);
+ ptr += size;
+ len -= size + 1;
+ comma = 1;
+ }
+ chunk_memcat(buf, "", 1); /* finish with a \0 */
+ space++;
+ }
+#endif
+ /* verify */
+ {
+ if (conf->verify == SSL_SOCK_VERIFY_NONE) {
+ if (space) chunk_appendf(buf, " ");
+ chunk_appendf(buf, "verify none");
+ space++;
+ } else if (conf->verify == SSL_SOCK_VERIFY_OPTIONAL) {
+ if (space) chunk_appendf(buf, " ");
+ chunk_appendf(buf, "verify optional");
+ space++;
+ } else if (conf->verify == SSL_SOCK_VERIFY_REQUIRED) {
+ if (space) chunk_appendf(buf, " ");
+ chunk_appendf(buf, "verify required");
+ space++;
+ }
+ }
+
+ if (conf->no_ca_names) {
+ if (space) chunk_appendf(buf, " ");
+ chunk_appendf(buf, "no-ca-names");
+ space++;
+ }
+
+ if (conf->early_data) {
+ if (space) chunk_appendf(buf, " ");
+ chunk_appendf(buf, "allow-0rtt");
+ space++;
+ }
+ if (conf->ca_file) {
+ if (space) chunk_appendf(buf, " ");
+ chunk_appendf(buf, "ca-file %s", conf->ca_file);
+ space++;
+ }
+ if (conf->crl_file) {
+ if (space) chunk_appendf(buf, " ");
+ chunk_appendf(buf, "crl-file %s", conf->crl_file);
+ space++;
+ }
+ if (conf->ciphers) {
+ if (space) chunk_appendf(buf, " ");
+ chunk_appendf(buf, "ciphers %s", conf->ciphers);
+ space++;
+ }
+#ifdef HAVE_SSL_CTX_SET_CIPHERSUITES
+ if (conf->ciphersuites) {
+ if (space) chunk_appendf(buf, " ");
+ chunk_appendf(buf, "ciphersuites %s", conf->ciphersuites);
+ space++;
+ }
+#endif
+ if (conf->curves) {
+ if (space) chunk_appendf(buf, " ");
+ chunk_appendf(buf, "curves %s", conf->curves);
+ space++;
+ }
+ if (conf->ecdhe) {
+ if (space) chunk_appendf(buf, " ");
+ chunk_appendf(buf, "ecdhe %s", conf->ecdhe);
+ space++;
+ }
+
+ /* the crt-lists only support ssl-min-ver and ssl-max-ver */
+ if (conf->ssl_methods_cfg.min) {
+ if (space) chunk_appendf(buf, " ");
+ chunk_appendf(buf, "ssl-min-ver %s", methodVersions[conf->ssl_methods_cfg.min].name);
+ space++;
+ }
+
+ if (conf->ssl_methods_cfg.max) {
+ if (space) chunk_appendf(buf, " ");
+ chunk_appendf(buf, "ssl-max-ver %s", methodVersions[conf->ssl_methods_cfg.max].name);
+ space++;
+ }
+
+ if (conf->ocsp_update != SSL_SOCK_OCSP_UPDATE_DFLT) {
+ if (space) chunk_appendf(buf, " ");
+ chunk_appendf(buf, "ocsp-update %s",
+ conf->ocsp_update == SSL_SOCK_OCSP_UPDATE_OFF ? "off" : "on");
+ space++;
+ }
+
+ chunk_appendf(buf, "]");
+
+ return;
+}
+
+/* dump a list of filters */
+static void dump_crtlist_filters(struct buffer *buf, struct crtlist_entry *entry)
+{
+ int i;
+
+ if (!entry->fcount)
+ return;
+
+ for (i = 0; i < entry->fcount; i++) {
+ chunk_appendf(buf, " %s", entry->filters[i]);
+ }
+ return;
+}
+
+/************************** CLI functions ****************************/
+
+
+/* CLI IO handler for '(show|dump) ssl crt-list'.
+ * It uses show_crtlist_ctx for the context.
+ */
+static int cli_io_handler_dump_crtlist(struct appctx *appctx)
+{
+ struct show_crtlist_ctx *ctx = appctx->svcctx;
+ struct buffer *trash = alloc_trash_chunk();
+ struct ebmb_node *lnode;
+
+ if (trash == NULL)
+ return 1;
+
+ /* dump the list of crt-lists */
+ lnode = ctx->crtlist_node;
+ if (lnode == NULL)
+ lnode = ebmb_first(&crtlists_tree);
+ while (lnode) {
+ chunk_appendf(trash, "%s\n", lnode->key);
+ if (applet_putchk(appctx, trash) == -1)
+ goto yield;
+ lnode = ebmb_next(lnode);
+ }
+ free_trash_chunk(trash);
+ return 1;
+yield:
+ ctx->crtlist_node = lnode;
+ free_trash_chunk(trash);
+ return 0;
+}
+
+/* CLI IO handler for '(show|dump) ssl crt-list <filename>' */
+static int cli_io_handler_dump_crtlist_entries(struct appctx *appctx)
+{
+ struct show_crtlist_ctx *ctx = appctx->svcctx;
+ struct buffer *trash = alloc_trash_chunk();
+ struct crtlist *crtlist;
+ struct crtlist_entry *entry;
+
+ if (trash == NULL)
+ return 1;
+
+ crtlist = ebmb_entry(ctx->crtlist_node, struct crtlist, node);
+
+ entry = ctx->entry;
+ if (entry == NULL) {
+ entry = LIST_ELEM((crtlist->ord_entries).n, typeof(entry), by_crtlist);
+ chunk_appendf(trash, "# %s\n", crtlist->node.key);
+ if (applet_putchk(appctx, trash) == -1)
+ goto yield;
+ }
+
+ list_for_each_entry_from(entry, &crtlist->ord_entries, by_crtlist) {
+ struct ckch_store *store;
+ const char *filename;
+
+ store = entry->node.key;
+ filename = store->path;
+ chunk_appendf(trash, "%s", filename);
+ if (ctx->mode == 's') /* show */
+ chunk_appendf(trash, ":%d", entry->linenum);
+ dump_crtlist_sslconf(trash, entry->ssl_conf);
+ dump_crtlist_filters(trash, entry);
+ chunk_appendf(trash, "\n");
+
+ if (applet_putchk(appctx, trash) == -1)
+ goto yield;
+ }
+ free_trash_chunk(trash);
+ return 1;
+yield:
+ ctx->entry = entry;
+ free_trash_chunk(trash);
+ return 0;
+}
+
+/* CLI argument parser for '(show|dump) ssl crt-list' */
+static int cli_parse_dump_crtlist(char **args, char *payload, struct appctx *appctx, void *private)
+{
+ struct show_crtlist_ctx *ctx = applet_reserve_svcctx(appctx, sizeof(*ctx));
+ struct ebmb_node *lnode;
+ char *filename = NULL;
+ int mode;
+ char *end;
+
+ if (!cli_has_level(appctx, ACCESS_LVL_ADMIN))
+ return 1;
+
+ if (*args[3] && strcmp(args[3], "-n") == 0) {
+ mode = 's';
+ filename = args[4];
+ } else {
+ mode = 'd';
+ filename = args[3];
+ }
+
+ if (mode == 's' && !*args[4])
+ return cli_err(appctx, "'show ssl crt-list -n' expects a filename or a directory\n");
+
+ if (filename && *filename) {
+
+
+ /* strip trailing slashes, including first one */
+ for (end = filename + strlen(filename) - 1; end >= filename && *end == '/'; end--)
+ *end = 0;
+
+ lnode = ebst_lookup(&crtlists_tree, filename);
+ if (lnode == NULL)
+ return cli_err(appctx, "didn't find the specified filename\n");
+
+ ctx->crtlist_node = lnode;
+ appctx->io_handler = cli_io_handler_dump_crtlist_entries;
+ }
+ ctx->mode = mode;
+
+ return 0;
+}
+
+/* release function of the "add ssl crt-list' command, free things and unlock
+ * the spinlock. It uses the add_crtlist_ctx.
+ */
+static void cli_release_add_crtlist(struct appctx *appctx)
+{
+ struct add_crtlist_ctx *ctx = appctx->svcctx;
+ struct crtlist_entry *entry = ctx->entry;
+
+ if (entry) {
+ struct ckch_inst *inst, *inst_s;
+
+ /* upon error free the ckch_inst and everything inside */
+ ebpt_delete(&entry->node);
+ LIST_DELETE(&entry->by_crtlist);
+ LIST_DELETE(&entry->by_ckch_store);
+
+ list_for_each_entry_safe(inst, inst_s, &entry->ckch_inst, by_ckchs) {
+ ckch_inst_free(inst);
+ }
+ crtlist_free_filters(entry->filters);
+ ssl_sock_free_ssl_conf(entry->ssl_conf);
+ free(entry->ssl_conf);
+ free(entry);
+ }
+ HA_SPIN_UNLOCK(CKCH_LOCK, &ckch_lock);
+ ha_free(&ctx->err);
+}
+
+
+/* IO Handler for the "add ssl crt-list" command It adds a new entry in the
+ * crt-list and generates the ckch_insts for each bind_conf that uses this crt-list
+ *
+ * The logic is the same as the "commit ssl cert" command but without the
+ * freeing of the old structures, because there are none.
+ *
+ * It uses the add_crtlist_ctx for the context.
+ */
+static int cli_io_handler_add_crtlist(struct appctx *appctx)
+{
+ struct add_crtlist_ctx *ctx = appctx->svcctx;
+ struct bind_conf_list *bind_conf_node;
+ struct stconn *sc = appctx_sc(appctx);
+ struct crtlist *crtlist = ctx->crtlist;
+ struct crtlist_entry *entry = ctx->entry;
+ struct ckch_store *store = entry->node.key;
+ struct ckch_inst *new_inst;
+ int i = 0;
+ int errcode = 0;
+
+ /* for each bind_conf which use the crt-list, a new ckch_inst must be
+ * created.
+ */
+ /* FIXME: Don't watch the other side !*/
+ if (unlikely(sc_opposite(sc)->flags & SC_FL_SHUT_DONE))
+ goto end;
+
+ switch (ctx->state) {
+ case ADDCRT_ST_INIT:
+ /* This state just print the update message */
+ chunk_printf(&trash, "Inserting certificate '%s' in crt-list '%s'", store->path, crtlist->node.key);
+ if (applet_putchk(appctx, &trash) == -1)
+ goto yield;
+ ctx->state = ADDCRT_ST_GEN;
+ __fallthrough;
+ case ADDCRT_ST_GEN:
+ bind_conf_node = ctx->bind_conf_node; /* get the previous ptr from the yield */
+ if (bind_conf_node == NULL)
+ bind_conf_node = crtlist->bind_conf;
+ for (; bind_conf_node; bind_conf_node = bind_conf_node->next) {
+ struct bind_conf *bind_conf = bind_conf_node->bind_conf;
+ struct sni_ctx *sni;
+
+ ctx->bind_conf_node = bind_conf_node;
+
+ /* yield every 10 generations */
+ if (i > 10) {
+ applet_have_more_data(appctx); /* let's come back later */
+ goto yield;
+ }
+
+ /* display one dot for each new instance */
+ if (applet_putstr(appctx, ".") == -1)
+ goto yield;
+
+ /* we don't support multi-cert bundles, only simple ones */
+ ctx->err = NULL;
+ errcode |= ckch_inst_new_load_store(store->path, store, bind_conf, entry->ssl_conf, entry->filters, entry->fcount, &new_inst, &ctx->err);
+ if (errcode & ERR_CODE) {
+ ctx->state = ADDCRT_ST_ERROR;
+ goto error;
+ }
+
+ /* we need to initialize the SSL_CTX generated */
+ /* this iterate on the newly generated SNIs in the new instance to prepare their SSL_CTX */
+ list_for_each_entry(sni, &new_inst->sni_ctx, by_ckch_inst) {
+ if (!sni->order) { /* we initialized only the first SSL_CTX because it's the same in the other sni_ctx's */
+ ctx->err = NULL;
+ errcode |= ssl_sock_prep_ctx_and_inst(bind_conf, new_inst->ssl_conf, sni->ctx, sni->ckch_inst, &ctx->err);
+ if (errcode & ERR_CODE) {
+ ctx->state = ADDCRT_ST_ERROR;
+ goto error;
+ }
+ }
+ }
+
+ i++;
+ LIST_APPEND(&store->ckch_inst, &new_inst->by_ckchs);
+ LIST_APPEND(&entry->ckch_inst, &new_inst->by_crtlist_entry);
+ new_inst->crtlist_entry = entry;
+ }
+ ctx->state = ADDCRT_ST_INSERT;
+ __fallthrough;
+ case ADDCRT_ST_INSERT:
+ /* the insertion is called for every instance of the store, not
+ * only the one we generated.
+ * But the ssl_sock_load_cert_sni() skip the sni already
+ * inserted. Not every instance has a bind_conf, it could be
+ * the store of a server so we should be careful */
+
+ list_for_each_entry(new_inst, &store->ckch_inst, by_ckchs) {
+ if (!new_inst->bind_conf) /* this is a server instance */
+ continue;
+ HA_RWLOCK_WRLOCK(SNI_LOCK, &new_inst->bind_conf->sni_lock);
+ ssl_sock_load_cert_sni(new_inst, new_inst->bind_conf);
+ HA_RWLOCK_WRUNLOCK(SNI_LOCK, &new_inst->bind_conf->sni_lock);
+ }
+ entry->linenum = ++crtlist->linecount;
+ ctx->entry = NULL;
+ ctx->state = ADDCRT_ST_SUCCESS;
+ __fallthrough;
+ case ADDCRT_ST_SUCCESS:
+ chunk_reset(&trash);
+ chunk_appendf(&trash, "\n");
+ if (ctx->err)
+ chunk_appendf(&trash, "%s", ctx->err);
+ chunk_appendf(&trash, "Success!\n");
+ if (applet_putchk(appctx, &trash) == -1)
+ goto yield;
+ ctx->state = ADDCRT_ST_FIN;
+ break;
+
+ case ADDCRT_ST_ERROR:
+ error:
+ chunk_printf(&trash, "\n%sFailed!\n", ctx->err);
+ if (applet_putchk(appctx, &trash) == -1)
+ goto yield;
+ break;
+
+ default:
+ break;
+ }
+
+end:
+ /* success: call the release function and don't come back */
+ return 1;
+yield:
+ return 0; /* should come back */
+}
+
+
+/*
+ * Parse a "add ssl crt-list <crt-list> <certfile>" line.
+ * Filters and option must be passed through payload.
+ * It sets a struct add_crtlist_ctx.
+ */
+static int cli_parse_add_crtlist(char **args, char *payload, struct appctx *appctx, void *private)
+{
+ struct add_crtlist_ctx *ctx = applet_reserve_svcctx(appctx, sizeof(*ctx));
+ int cfgerr = 0;
+ struct ckch_store *store;
+ char *err = NULL;
+ char path[MAXPATHLEN+1];
+ char *crtlist_path;
+ char *cert_path = NULL;
+ struct ebmb_node *eb;
+ struct ebpt_node *inserted;
+ struct crtlist *crtlist;
+ struct crtlist_entry *entry = NULL;
+ char *end;
+
+ if (!cli_has_level(appctx, ACCESS_LVL_ADMIN))
+ return 1;
+
+ if (!*args[3] || (!payload && !*args[4]))
+ return cli_err(appctx, "'add ssl crtlist' expects a filename and a certificate name\n");
+
+ crtlist_path = args[3];
+
+ /* strip trailing slashes, including first one */
+ for (end = crtlist_path + strlen(crtlist_path) - 1; end >= crtlist_path && *end == '/'; end--)
+ *end = 0;
+
+ if (HA_SPIN_TRYLOCK(CKCH_LOCK, &ckch_lock))
+ return cli_err(appctx, "Operations on certificates are currently locked!\n");
+
+ eb = ebst_lookup(&crtlists_tree, crtlist_path);
+ if (!eb) {
+ memprintf(&err, "crt-list '%s' does not exist!", crtlist_path);
+ goto error;
+ }
+ crtlist = ebmb_entry(eb, struct crtlist, node);
+
+ entry = crtlist_entry_new();
+ if (entry == NULL) {
+ memprintf(&err, "Not enough memory!");
+ goto error;
+ }
+
+ if (payload) {
+ char *lf;
+
+ lf = strrchr(payload, '\n');
+ if (lf) {
+ memprintf(&err, "only one line of payload is supported!");
+ goto error;
+ }
+ /* cert_path is filled here */
+ cfgerr |= crtlist_parse_line(payload, &cert_path, entry, "CLI", 1, 1, &err);
+ if (cfgerr & ERR_CODE)
+ goto error;
+ } else {
+ cert_path = args[4];
+ }
+
+ if (!cert_path) {
+ memprintf(&err, "'add ssl crtlist' should contain the certificate name in the payload");
+ cfgerr |= ERR_ALERT | ERR_FATAL;
+ goto error;
+ }
+
+ if (eb_gettag(crtlist->entries.b[EB_RGHT])) {
+ char *slash;
+
+ slash = strrchr(cert_path, '/');
+ if (!slash) {
+ memprintf(&err, "'%s' is a directory, certificate path '%s' must contain the directory path", (char *)crtlist->node.key, cert_path);
+ goto error;
+ }
+ /* temporary replace / by 0 to do an strcmp */
+ *slash = '\0';
+ if (strcmp(cert_path, (char*)crtlist->node.key) != 0) {
+ *slash = '/';
+ memprintf(&err, "'%s' is a directory, certificate path '%s' must contain the directory path", (char *)crtlist->node.key, cert_path);
+ goto error;
+ }
+ *slash = '/';
+ }
+
+ if (*cert_path != '/' && global_ssl.crt_base) {
+ if ((strlen(global_ssl.crt_base) + 1 + strlen(cert_path)) > sizeof(path) ||
+ snprintf(path, sizeof(path), "%s/%s", global_ssl.crt_base, cert_path) > sizeof(path)) {
+ memprintf(&err, "'%s' : path too long", cert_path);
+ cfgerr |= ERR_ALERT | ERR_FATAL;
+ goto error;
+ }
+ cert_path = path;
+ }
+
+ store = ckchs_lookup(cert_path);
+ if (store == NULL) {
+ memprintf(&err, "certificate '%s' does not exist!", cert_path);
+ goto error;
+ }
+ if (store->data == NULL || store->data->cert == NULL) {
+ memprintf(&err, "certificate '%s' is empty!", cert_path);
+ goto error;
+ }
+
+ /* No need to check 'ocsp-update' inconsistency on a store that is not
+ * used yet (it was just added through the CLI for instance).
+ */
+ if (!LIST_ISEMPTY(&store->ckch_inst) &&
+ ocsp_update_check_cfg_consistency(store, entry, cert_path, &err))
+ goto error;
+
+ if (entry->ssl_conf)
+ store->data->ocsp_update_mode = entry->ssl_conf->ocsp_update;
+
+ /* check if it's possible to insert this new crtlist_entry */
+ entry->node.key = store;
+ inserted = ebpt_insert(&crtlist->entries, &entry->node);
+ if (inserted != &entry->node) {
+ memprintf(&err, "file already exists in this directory!");
+ goto error;
+ }
+
+ /* this is supposed to be a directory (EB_ROOT_UNIQUE), so no ssl_conf are allowed */
+ if ((entry->ssl_conf || entry->filters) && eb_gettag(crtlist->entries.b[EB_RGHT])) {
+ memprintf(&err, "this is a directory, SSL configuration and filters are not allowed");
+ goto error;
+ }
+
+ LIST_APPEND(&crtlist->ord_entries, &entry->by_crtlist);
+ entry->crtlist = crtlist;
+ LIST_APPEND(&store->crtlist_entry, &entry->by_ckch_store);
+
+ ctx->state = ADDCRT_ST_INIT;
+ ctx->crtlist = crtlist;
+ ctx->entry = entry;
+
+ /* unlock is done in the release handler */
+ return 0;
+
+error:
+ crtlist_entry_free(entry);
+ HA_SPIN_UNLOCK(CKCH_LOCK, &ckch_lock);
+ err = memprintf(&err, "Can't edit the crt-list: %s\n", err ? err : "");
+ return cli_dynerr(appctx, err);
+}
+
+/* Parse a "del ssl crt-list <crt-list> <certfile>" line. */
+static int cli_parse_del_crtlist(char **args, char *payload, struct appctx *appctx, void *private)
+{
+ struct ckch_store *store;
+ char *err = NULL;
+ char *crtlist_path, *cert_path;
+ struct ebmb_node *ebmb;
+ struct ebpt_node *ebpt;
+ struct crtlist *crtlist;
+ struct crtlist_entry *entry = NULL;
+ struct ckch_inst *inst, *inst_s;
+ int linenum = 0;
+ char *colons;
+ char *end;
+ int error_message_dumped = 0;
+
+ if (!cli_has_level(appctx, ACCESS_LVL_ADMIN))
+ return 1;
+
+ if (!*args[3] || !*args[4])
+ return cli_err(appctx, "'del ssl crtlist' expects a filename and a certificate name\n");
+
+ if (HA_SPIN_TRYLOCK(CKCH_LOCK, &ckch_lock))
+ return cli_err(appctx, "Can't delete!\nOperations on certificates are currently locked!\n");
+
+ crtlist_path = args[3];
+ cert_path = args[4];
+
+ colons = strchr(cert_path, ':');
+ if (colons) {
+ char *endptr;
+
+ linenum = strtol(colons + 1, &endptr, 10);
+ if (colons + 1 == endptr || *endptr != '\0') {
+ memprintf(&err, "wrong line number after colons in '%s'!", cert_path);
+ goto error;
+ }
+ *colons = '\0';
+ }
+
+ /* strip trailing slashes, including first one */
+ for (end = crtlist_path + strlen(crtlist_path) - 1; end >= crtlist_path && *end == '/'; end--)
+ *end = 0;
+
+ /* look for crtlist */
+ ebmb = ebst_lookup(&crtlists_tree, crtlist_path);
+ if (!ebmb) {
+ memprintf(&err, "crt-list '%s' does not exist!", crtlist_path);
+ goto error;
+ }
+ crtlist = ebmb_entry(ebmb, struct crtlist, node);
+
+ /* look for store */
+ store = ckchs_lookup(cert_path);
+ if (store == NULL) {
+ memprintf(&err, "certificate '%s' does not exist!", cert_path);
+ goto error;
+ }
+ if (store->data == NULL || store->data->cert == NULL) {
+ memprintf(&err, "certificate '%s' is empty!", cert_path);
+ goto error;
+ }
+
+ ebpt = ebpt_lookup(&crtlist->entries, store);
+ if (!ebpt) {
+ memprintf(&err, "certificate '%s' can't be found in crt-list '%s'!", cert_path, crtlist_path);
+ goto error;
+ }
+
+ /* list the line number of entries for errors in err, and select the right ebpt */
+ for (; ebpt; ebpt = ebpt_next_dup(ebpt)) {
+ struct crtlist_entry *tmp;
+
+ tmp = ebpt_entry(ebpt, struct crtlist_entry, node);
+ memprintf(&err, "%s%s%d", err ? err : "", err ? ", " : "", tmp->linenum);
+
+ /* select the entry we wanted */
+ if (linenum == 0 || tmp->linenum == linenum) {
+ if (!entry)
+ entry = tmp;
+ }
+ }
+
+ /* we didn't found the specified entry */
+ if (!entry) {
+ memprintf(&err, "found a certificate '%s' but the line number is incorrect, please specify a correct line number preceded by colons (%s)!", cert_path, err ? err : NULL);
+ goto error;
+ }
+
+ /* we didn't specified a line number but there were several entries */
+ if (linenum == 0 && ebpt_next_dup(&entry->node)) {
+ memprintf(&err, "found the certificate '%s' in several entries, please specify a line number preceded by colons (%s)!", cert_path, err ? err : NULL);
+ goto error;
+ }
+
+ /* Iterate over all the instances in order to see if any of them is a
+ * default instance. If this is the case, the entry won't be suppressed. */
+ list_for_each_entry_safe(inst, inst_s, &entry->ckch_inst, by_crtlist_entry) {
+ if (inst->is_default && !inst->bind_conf->strict_sni) {
+ if (!error_message_dumped) {
+ memprintf(&err, "certificate '%s' cannot be deleted, it is used as default certificate by the following frontends:\n", cert_path);
+ error_message_dumped = 1;
+ }
+ memprintf(&err, "%s\t- %s:%d\n", err, inst->bind_conf->file, inst->bind_conf->line);
+ }
+ }
+ if (error_message_dumped)
+ goto error;
+
+ /* upon error free the ckch_inst and everything inside */
+
+ ebpt_delete(&entry->node);
+ LIST_DELETE(&entry->by_crtlist);
+ LIST_DELETE(&entry->by_ckch_store);
+
+ list_for_each_entry_safe(inst, inst_s, &entry->ckch_inst, by_crtlist_entry) {
+ struct sni_ctx *sni, *sni_s;
+ struct ckch_inst_link_ref *link_ref, *link_ref_s;
+
+ HA_RWLOCK_WRLOCK(SNI_LOCK, &inst->bind_conf->sni_lock);
+ list_for_each_entry_safe(sni, sni_s, &inst->sni_ctx, by_ckch_inst) {
+ ebmb_delete(&sni->name);
+ LIST_DELETE(&sni->by_ckch_inst);
+ SSL_CTX_free(sni->ctx);
+ free(sni);
+ }
+ HA_RWLOCK_WRUNLOCK(SNI_LOCK, &inst->bind_conf->sni_lock);
+ LIST_DELETE(&inst->by_ckchs);
+ list_for_each_entry_safe(link_ref, link_ref_s, &inst->cafile_link_refs, list) {
+ LIST_DELETE(&link_ref->link->list);
+ LIST_DELETE(&link_ref->list);
+ free(link_ref);
+ }
+ ckch_inst_free(inst);
+ }
+
+ crtlist_free_filters(entry->filters);
+ ssl_sock_free_ssl_conf(entry->ssl_conf);
+ free(entry->ssl_conf);
+ free(entry);
+
+ HA_SPIN_UNLOCK(CKCH_LOCK, &ckch_lock);
+ err = memprintf(&err, "Entry '%s' deleted in crtlist '%s'!\n", cert_path, crtlist_path);
+ return cli_dynmsg(appctx, LOG_NOTICE, err);
+
+error:
+ HA_SPIN_UNLOCK(CKCH_LOCK, &ckch_lock);
+ err = memprintf(&err, "Can't delete the entry: %s\n", err ? err : "");
+ return cli_dynerr(appctx, err);
+}
+
+
+/* unlink and free all crt-list and crt-list entries */
+void crtlist_deinit()
+{
+ struct eb_node *node, *next;
+ struct crtlist *crtlist;
+
+ node = eb_first(&crtlists_tree);
+ while (node) {
+ next = eb_next(node);
+ crtlist = ebmb_entry(node, struct crtlist, node);
+ crtlist_free(crtlist);
+ node = next;
+ }
+}
+
+
+/* register cli keywords */
+static struct cli_kw_list cli_kws = {{ },{
+ { { "add", "ssl", "crt-list", NULL }, "add ssl crt-list <list> <cert> [opts]* : add to crt-list file <list> a line <cert> or a payload", cli_parse_add_crtlist, cli_io_handler_add_crtlist, cli_release_add_crtlist },
+ { { "del", "ssl", "crt-list", NULL }, "del ssl crt-list <list> <cert[:line]> : delete a line <cert> from crt-list file <list>", cli_parse_del_crtlist, NULL, NULL },
+ { { "show", "ssl", "crt-list", NULL }, "show ssl crt-list [-n] [<list>] : show the list of crt-lists or the content of a crt-list file <list>", cli_parse_dump_crtlist, cli_io_handler_dump_crtlist, NULL },
+ { { NULL }, NULL, NULL, NULL } }
+};
+
+INITCALL1(STG_REGISTER, cli_register_kw, &cli_kws);
+
diff --git a/src/ssl_ocsp.c b/src/ssl_ocsp.c
new file mode 100644
index 0000000..1adddc4
--- /dev/null
+++ b/src/ssl_ocsp.c
@@ -0,0 +1,1986 @@
+
+/*
+ * SSL/TLS OCSP-related functions
+ *
+ * Copyright (C) 2022 HAProxy Technologies, Remi Tricot-Le Breton <rlebreton@haproxy.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * Acknowledgement:
+ * We'd like to specially thank the Stud project authors for a very clean
+ * and well documented code which helped us understand how the OpenSSL API
+ * ought to be used in non-blocking mode. This is one difficult part which
+ * is not easy to get from the OpenSSL doc, and reading the Stud code made
+ * it much more obvious than the examples in the OpenSSL package. Keep up
+ * the good works, guys !
+ *
+ * Stud is an extremely efficient and scalable SSL/TLS proxy which combines
+ * particularly well with haproxy. For more info about this project, visit :
+ * https://github.com/bumptech/stud
+ *
+ */
+
+/* Note: do NOT include openssl/xxx.h here, do it in openssl-compat.h */
+#define _GNU_SOURCE
+#include <ctype.h>
+#include <dirent.h>
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+#include <sys/socket.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <netdb.h>
+#include <netinet/tcp.h>
+
+#include <import/ebpttree.h>
+#include <import/ebsttree.h>
+#include <import/lru.h>
+
+#include <haproxy/api.h>
+#include <haproxy/applet.h>
+#include <haproxy/arg.h>
+#include <haproxy/base64.h>
+#include <haproxy/channel.h>
+#include <haproxy/chunk.h>
+#include <haproxy/cli.h>
+#include <haproxy/connection.h>
+#include <haproxy/dynbuf.h>
+#include <haproxy/errors.h>
+#include <haproxy/fd.h>
+#include <haproxy/freq_ctr.h>
+#include <haproxy/frontend.h>
+#include <haproxy/global.h>
+#include <haproxy/http_rules.h>
+#include <haproxy/log.h>
+#include <haproxy/openssl-compat.h>
+#include <haproxy/pattern-t.h>
+#include <haproxy/proto_tcp.h>
+#include <haproxy/proxy.h>
+#include <haproxy/sample.h>
+#include <haproxy/sc_strm.h>
+#include <haproxy/quic_conn.h>
+#include <haproxy/quic_tp.h>
+#include <haproxy/server.h>
+#include <haproxy/shctx.h>
+#include <haproxy/ssl_ckch.h>
+#include <haproxy/ssl_crtlist.h>
+#include <haproxy/ssl_sock.h>
+#include <haproxy/ssl_utils.h>
+#include <haproxy/stats.h>
+#include <haproxy/stconn.h>
+#include <haproxy/stream-t.h>
+#include <haproxy/task.h>
+#include <haproxy/ticks.h>
+#include <haproxy/time.h>
+#include <haproxy/tools.h>
+#include <haproxy/vars.h>
+#include <haproxy/xxhash.h>
+#include <haproxy/istbuf.h>
+#include <haproxy/ssl_ocsp-t.h>
+#include <haproxy/http_client.h>
+
+
+/* ***** READ THIS before adding code here! *****
+ *
+ * Due to API incompatibilities between multiple OpenSSL versions and their
+ * derivatives, it's often tempting to add macros to (re-)define certain
+ * symbols. Please do not do this here, and do it in common/openssl-compat.h
+ * exclusively so that the whole code consistently uses the same macros.
+ *
+ * Whenever possible if a macro is missing in certain versions, it's better
+ * to conditionally define it in openssl-compat.h than using lots of ifdefs.
+ */
+
+#ifndef OPENSSL_NO_OCSP
+int ocsp_ex_index = -1;
+
+int ssl_sock_get_ocsp_arg_kt_index(int evp_keytype)
+{
+ switch (evp_keytype) {
+ case EVP_PKEY_RSA:
+ return 2;
+ case EVP_PKEY_DSA:
+ return 0;
+ case EVP_PKEY_EC:
+ return 1;
+ }
+
+ return -1;
+}
+
+/*
+ * Callback used to set OCSP status extension content in server hello.
+ */
+int ssl_sock_ocsp_stapling_cbk(SSL *ssl, void *arg)
+{
+ struct certificate_ocsp *ocsp;
+ struct ocsp_cbk_arg *ocsp_arg;
+ char *ssl_buf;
+ SSL_CTX *ctx;
+ EVP_PKEY *ssl_pkey;
+ int key_type;
+ int index;
+
+ ctx = SSL_get_SSL_CTX(ssl);
+ if (!ctx)
+ return SSL_TLSEXT_ERR_NOACK;
+
+ ocsp_arg = SSL_CTX_get_ex_data(ctx, ocsp_ex_index);
+ if (!ocsp_arg)
+ return SSL_TLSEXT_ERR_NOACK;
+
+ ssl_pkey = SSL_get_privatekey(ssl);
+ if (!ssl_pkey)
+ return SSL_TLSEXT_ERR_NOACK;
+
+ key_type = EVP_PKEY_base_id(ssl_pkey);
+
+ if (ocsp_arg->is_single && ocsp_arg->single_kt == key_type)
+ ocsp = ocsp_arg->s_ocsp;
+ else {
+ /* For multiple certs per context, we have to find the correct OCSP response based on
+ * the certificate type
+ */
+ index = ssl_sock_get_ocsp_arg_kt_index(key_type);
+
+ if (index < 0)
+ return SSL_TLSEXT_ERR_NOACK;
+
+ ocsp = ocsp_arg->m_ocsp[index];
+
+ }
+
+ if (!ocsp ||
+ !ocsp->response.area ||
+ !ocsp->response.data ||
+ (ocsp->expire < date.tv_sec))
+ return SSL_TLSEXT_ERR_NOACK;
+
+ ssl_buf = OPENSSL_malloc(ocsp->response.data);
+ if (!ssl_buf)
+ return SSL_TLSEXT_ERR_NOACK;
+
+ memcpy(ssl_buf, ocsp->response.area, ocsp->response.data);
+ SSL_set_tlsext_status_ocsp_resp(ssl, (unsigned char*)ssl_buf, ocsp->response.data);
+
+ return SSL_TLSEXT_ERR_OK;
+}
+
+#endif /* !defined(OPENSSL_NO_OCSP) */
+
+
+#if (defined SSL_CTRL_SET_TLSEXT_STATUS_REQ_CB && !defined OPENSSL_NO_OCSP)
+
+struct eb_root cert_ocsp_tree = EB_ROOT_UNIQUE;
+
+__decl_thread(HA_SPINLOCK_T ocsp_tree_lock);
+
+struct eb_root ocsp_update_tree = EB_ROOT; /* updatable ocsp responses sorted by next_update in absolute time */
+
+/*
+ * Convert an OCSP_CERTID structure into a char buffer that can be used as a key
+ * in the OCSP response tree. It takes an <ocsp_cid> as parameter and builds a
+ * key of length <key_length> into the <certid> buffer. The key length cannot
+ * exceed OCSP_MAX_CERTID_ASN1_LENGTH bytes.
+ * Returns a negative value in case of error.
+ */
+int ssl_ocsp_build_response_key(OCSP_CERTID *ocsp_cid, unsigned char certid[OCSP_MAX_CERTID_ASN1_LENGTH], unsigned int *key_length)
+{
+ unsigned char *p = NULL;
+ int i;
+
+ if (!key_length)
+ return -1;
+
+ *key_length = 0;
+
+ if (!ocsp_cid)
+ return 0;
+
+ i = i2d_OCSP_CERTID(ocsp_cid, NULL);
+ if (!i || (i > OCSP_MAX_CERTID_ASN1_LENGTH))
+ return 0;
+
+ p = certid;
+ *key_length = i2d_OCSP_CERTID(ocsp_cid, &p);
+
+end:
+ return *key_length > 0;
+}
+
+/* This function starts to check if the OCSP response (in DER format) contained
+ * in chunk 'ocsp_response' is valid (else exits on error).
+ * If 'cid' is not NULL, it will be compared to the OCSP certificate ID
+ * contained in the OCSP Response and exits on error if no match.
+ * If it's a valid OCSP Response:
+ * If 'ocsp' is not NULL, the chunk is copied in the OCSP response's container
+ * pointed by 'ocsp'.
+ * If 'ocsp' is NULL, the function looks up into the OCSP response's
+ * containers tree (using as index the ASN1 form of the OCSP Certificate ID extracted
+ * from the response) and exits on error if not found. Finally, If an OCSP response is
+ * already present in the container, it will be overwritten.
+ *
+ * Note: OCSP response containing more than one OCSP Single response is not
+ * considered valid.
+ *
+ * Returns 0 on success, 1 in error case.
+ */
+int ssl_sock_load_ocsp_response(struct buffer *ocsp_response,
+ struct certificate_ocsp *ocsp,
+ OCSP_CERTID *cid, char **err)
+{
+ OCSP_RESPONSE *resp;
+ OCSP_BASICRESP *bs = NULL;
+ OCSP_SINGLERESP *sr;
+ OCSP_CERTID *id;
+ unsigned char *p = (unsigned char *) ocsp_response->area;
+ int rc , count_sr;
+ ASN1_GENERALIZEDTIME *revtime, *thisupd, *nextupd = NULL;
+ int reason;
+ int ret = 1;
+#ifdef HAVE_ASN1_TIME_TO_TM
+ struct tm nextupd_tm = {0};
+#endif
+
+ resp = d2i_OCSP_RESPONSE(NULL, (const unsigned char **)&p,
+ ocsp_response->data);
+ if (!resp) {
+ memprintf(err, "Unable to parse OCSP response");
+ goto out;
+ }
+
+ rc = OCSP_response_status(resp);
+ if (rc != OCSP_RESPONSE_STATUS_SUCCESSFUL) {
+ memprintf(err, "OCSP response status not successful");
+ goto out;
+ }
+
+ bs = OCSP_response_get1_basic(resp);
+ if (!bs) {
+ memprintf(err, "Failed to get basic response from OCSP Response");
+ goto out;
+ }
+
+ count_sr = OCSP_resp_count(bs);
+ if (count_sr > 1) {
+ memprintf(err, "OCSP response ignored because contains multiple single responses (%d)", count_sr);
+ goto out;
+ }
+
+ sr = OCSP_resp_get0(bs, 0);
+ if (!sr) {
+ memprintf(err, "Failed to get OCSP single response");
+ goto out;
+ }
+
+ id = (OCSP_CERTID*)OCSP_SINGLERESP_get0_id(sr);
+
+ rc = OCSP_single_get0_status(sr, &reason, &revtime, &thisupd, &nextupd);
+ if (rc != V_OCSP_CERTSTATUS_GOOD && rc != V_OCSP_CERTSTATUS_REVOKED) {
+ memprintf(err, "OCSP single response: certificate status is unknown");
+ goto out;
+ }
+
+ if (!nextupd) {
+ memprintf(err, "OCSP single response: missing nextupdate");
+ goto out;
+ }
+
+ rc = OCSP_check_validity(thisupd, nextupd, OCSP_MAX_RESPONSE_TIME_SKEW, -1);
+ if (!rc) {
+ memprintf(err, "OCSP single response: no longer valid.");
+ goto out;
+ }
+
+ if (cid) {
+ if (OCSP_id_cmp(id, cid)) {
+ memprintf(err, "OCSP single response: Certificate ID does not match certificate and issuer");
+ goto out;
+ }
+ }
+
+ if (!ocsp) {
+ unsigned char key[OCSP_MAX_CERTID_ASN1_LENGTH];
+ unsigned char *p;
+
+ rc = i2d_OCSP_CERTID(id, NULL);
+ if (!rc) {
+ memprintf(err, "OCSP single response: Unable to encode Certificate ID");
+ goto out;
+ }
+
+ if (rc > OCSP_MAX_CERTID_ASN1_LENGTH) {
+ memprintf(err, "OCSP single response: Certificate ID too long");
+ goto out;
+ }
+
+ p = key;
+ memset(key, 0, OCSP_MAX_CERTID_ASN1_LENGTH);
+ i2d_OCSP_CERTID(id, &p);
+ HA_SPIN_LOCK(OCSP_LOCK, &ocsp_tree_lock);
+ ocsp = (struct certificate_ocsp *)ebmb_lookup(&cert_ocsp_tree, key, OCSP_MAX_CERTID_ASN1_LENGTH);
+ if (!ocsp) {
+ HA_SPIN_UNLOCK(OCSP_LOCK, &ocsp_tree_lock);
+ memprintf(err, "OCSP single response: Certificate ID does not match any certificate or issuer");
+ goto out;
+ }
+ HA_SPIN_UNLOCK(OCSP_LOCK, &ocsp_tree_lock);
+ }
+
+ /* According to comments on "chunk_dup", the
+ previous chunk buffer will be freed */
+ if (!chunk_dup(&ocsp->response, ocsp_response)) {
+ memprintf(err, "OCSP response: Memory allocation error");
+ goto out;
+ }
+
+#ifdef HAVE_ASN1_TIME_TO_TM
+ if (ASN1_TIME_to_tm(nextupd, &nextupd_tm) == 0) {
+ memprintf(err, "OCSP single response: Invalid \"Next Update\" time");
+ goto out;
+ }
+ ocsp->expire = my_timegm(&nextupd_tm) - OCSP_MAX_RESPONSE_TIME_SKEW;
+#else
+ ocsp->expire = asn1_generalizedtime_to_epoch(nextupd) - OCSP_MAX_RESPONSE_TIME_SKEW;
+ if (ocsp->expire < 0) {
+ memprintf(err, "OCSP single response: Invalid \"Next Update\" time");
+ goto out;
+ }
+#endif
+
+ ret = 0;
+out:
+ ERR_clear_error();
+
+ if (bs)
+ OCSP_BASICRESP_free(bs);
+
+ if (resp)
+ OCSP_RESPONSE_free(resp);
+
+ return ret;
+}
+/*
+ * External function use to update the OCSP response in the OCSP response's
+ * containers tree. The chunk 'ocsp_response' must contain the OCSP response
+ * to update in DER format.
+ *
+ * Returns 0 on success, 1 in error case.
+ */
+int ssl_sock_update_ocsp_response(struct buffer *ocsp_response, char **err)
+{
+ return ssl_sock_load_ocsp_response(ocsp_response, NULL, NULL, err);
+}
+
+
+
+#if !defined OPENSSL_IS_BORINGSSL
+/*
+ * Decrease the refcount of the struct ocsp_response and frees it if it's not
+ * used anymore. Also removes it from the tree if free'd.
+ */
+void ssl_sock_free_ocsp(struct certificate_ocsp *ocsp)
+{
+ if (!ocsp)
+ return;
+
+ HA_SPIN_LOCK(OCSP_LOCK, &ocsp_tree_lock);
+ ocsp->refcount_store--;
+ if (ocsp->refcount_store <= 0) {
+ BUG_ON(ocsp->refcount_instance > 0);
+ ebmb_delete(&ocsp->key);
+ eb64_delete(&ocsp->next_update);
+ X509_free(ocsp->issuer);
+ ocsp->issuer = NULL;
+ sk_X509_pop_free(ocsp->chain, X509_free);
+ ocsp->chain = NULL;
+ chunk_destroy(&ocsp->response);
+ if (ocsp->uri) {
+ ha_free(&ocsp->uri->area);
+ ha_free(&ocsp->uri);
+ }
+
+ free(ocsp);
+ }
+ HA_SPIN_UNLOCK(OCSP_LOCK, &ocsp_tree_lock);
+}
+
+void ssl_sock_free_ocsp_instance(struct certificate_ocsp *ocsp)
+{
+ if (!ocsp)
+ return;
+
+ HA_SPIN_LOCK(OCSP_LOCK, &ocsp_tree_lock);
+ ocsp->refcount_instance--;
+ if (ocsp->refcount_instance <= 0) {
+ eb64_delete(&ocsp->next_update);
+ }
+ HA_SPIN_UNLOCK(OCSP_LOCK, &ocsp_tree_lock);
+}
+
+
+/*
+ * This function dumps the details of an OCSP_CERTID. It is based on
+ * ocsp_certid_print in OpenSSL.
+ */
+static inline int ocsp_certid_print(BIO *bp, OCSP_CERTID *certid, int indent)
+{
+ ASN1_OCTET_STRING *piNameHash = NULL;
+ ASN1_OCTET_STRING *piKeyHash = NULL;
+ ASN1_INTEGER *pSerial = NULL;
+
+ if (OCSP_id_get0_info(&piNameHash, NULL, &piKeyHash, &pSerial, certid)) {
+
+ BIO_printf(bp, "%*sCertificate ID:\n", indent, "");
+ indent += 2;
+ BIO_printf(bp, "%*sIssuer Name Hash: ", indent, "");
+#ifndef USE_OPENSSL_WOLFSSL
+ i2a_ASN1_STRING(bp, piNameHash, 0);
+#else
+ wolfSSL_ASN1_STRING_print(bp, piNameHash);
+#endif
+ BIO_printf(bp, "\n%*sIssuer Key Hash: ", indent, "");
+#ifndef USE_OPENSSL_WOLFSSL
+ i2a_ASN1_STRING(bp, piKeyHash, 0);
+#else
+ wolfSSL_ASN1_STRING_print(bp, piNameHash);
+#endif
+ BIO_printf(bp, "\n%*sSerial Number: ", indent, "");
+ i2a_ASN1_INTEGER(bp, pSerial);
+ }
+ return 1;
+}
+
+
+enum {
+ SHOW_OCSPRESP_FMT_DFLT,
+ SHOW_OCSPRESP_FMT_TEXT,
+ SHOW_OCSPRESP_FMT_B64
+};
+
+struct show_ocspresp_cli_ctx {
+ struct certificate_ocsp *ocsp;
+ int format;
+};
+
+/*
+ * Dump the details about an OCSP response in DER format stored in
+ * <ocsp_response> into buffer <out>.
+ * Returns 0 in case of success.
+ */
+int ssl_ocsp_response_print(struct buffer *ocsp_response, struct buffer *out)
+{
+ BIO *bio = NULL;
+ int write = -1;
+ OCSP_RESPONSE *resp;
+ const unsigned char *p;
+ int retval = -1;
+
+ if (!ocsp_response)
+ return -1;
+
+ if ((bio = BIO_new(BIO_s_mem())) == NULL)
+ return -1;
+
+ p = (const unsigned char*)ocsp_response->area;
+
+ resp = d2i_OCSP_RESPONSE(NULL, &p, ocsp_response->data);
+ if (!resp) {
+ chunk_appendf(out, "Unable to parse OCSP response");
+ goto end;
+ }
+
+#ifndef USE_OPENSSL_WOLFSSL
+ if (OCSP_RESPONSE_print(bio, resp, 0) != 0) {
+#else
+ if (wolfSSL_d2i_OCSP_RESPONSE_bio(bio, &resp) != 0) {
+#endif
+ struct buffer *trash = get_trash_chunk();
+ struct ist ist_block = IST_NULL;
+ struct ist ist_double_lf = IST_NULL;
+ static struct ist double_lf = IST("\n\n");
+
+ write = BIO_read(bio, trash->area, trash->size - 1);
+ if (write <= 0)
+ goto end;
+ trash->data = write;
+
+ /* Look for empty lines in the 'trash' buffer and add a space to
+ * the beginning to avoid having empty lines in the output
+ * (without changing the appearance of the information
+ * displayed).
+ */
+ ist_block = ist2(b_orig(trash), b_data(trash));
+
+ ist_double_lf = istist(ist_block, double_lf);
+
+ while (istlen(ist_double_lf)) {
+ /* istptr(ist_double_lf) points to the first \n of a
+ * \n\n pattern.
+ */
+ uint empty_line_offset = istptr(ist_double_lf) + 1 - istptr(ist_block);
+
+ /* Write up to the first '\n' of the "\n\n" pattern into
+ * the output buffer.
+ */
+ b_putblk(out, istptr(ist_block), empty_line_offset);
+ /* Add an extra space. */
+ b_putchr(out, ' ');
+
+ /* Keep looking for empty lines in the rest of the data. */
+ ist_block = istadv(ist_block, empty_line_offset);
+
+ ist_double_lf = istist(ist_block, double_lf);
+ }
+
+ retval = (b_istput(out, ist_block) <= 0);
+ }
+
+end:
+ if (bio)
+ BIO_free(bio);
+
+ OCSP_RESPONSE_free(resp);
+
+ return retval;
+}
+
+/*
+ * Dump the contents of an OCSP response in DER format stored in
+ * <ocsp_response> into buffer <out> after converting it to base64.
+ * Returns 0 in case of success.
+ */
+static int ssl_ocsp_response_print_base64(struct buffer *ocsp_response, struct buffer *out)
+{
+ int b64len = 0;
+
+ b64len = a2base64(b_orig(ocsp_response), b_data(ocsp_response),
+ b_orig(out), b_size(out));
+
+ if (b64len < 0)
+ return 1;
+
+ out->data = b64len;
+
+ /* Add empty line */
+ chunk_appendf(ocsp_response, "\n");
+
+ return 0;
+}
+
+/*
+ * Dump the details of the OCSP response of ID <ocsp_certid> into buffer <out>.
+ * Returns 0 in case of success.
+ */
+int ssl_get_ocspresponse_detail(unsigned char *ocsp_certid, struct buffer *out)
+{
+ struct certificate_ocsp *ocsp;
+ int ret = 0;
+
+ HA_SPIN_LOCK(OCSP_LOCK, &ocsp_tree_lock);
+ ocsp = (struct certificate_ocsp *)ebmb_lookup(&cert_ocsp_tree, ocsp_certid, OCSP_MAX_CERTID_ASN1_LENGTH);
+ if (!ocsp) {
+ HA_SPIN_UNLOCK(OCSP_LOCK, &ocsp_tree_lock);
+ return -1;
+ }
+
+ ret = ssl_ocsp_response_print(&ocsp->response, out);
+
+ HA_SPIN_UNLOCK(OCSP_LOCK, &ocsp_tree_lock);
+
+ return ret;
+}
+
+
+/* IO handler of details "show ssl ocsp-response <id>".
+ * The current entry is taken from appctx->svcctx.
+ */
+static int cli_io_handler_show_ocspresponse_detail(struct appctx *appctx)
+{
+ struct buffer *trash = get_trash_chunk();
+ struct show_ocspresp_cli_ctx *ctx = appctx->svcctx;
+ struct certificate_ocsp *ocsp = ctx->ocsp;
+ int retval = 0;
+
+ switch (ctx->format) {
+ case SHOW_OCSPRESP_FMT_DFLT:
+ case SHOW_OCSPRESP_FMT_TEXT:
+ retval = ssl_ocsp_response_print(&ocsp->response, trash);
+ break;
+ case SHOW_OCSPRESP_FMT_B64:
+ retval = ssl_ocsp_response_print_base64(&ocsp->response, trash);
+ break;
+ }
+
+ if (retval)
+ return 1;
+
+ if (applet_putchk(appctx, trash) == -1)
+ goto yield;
+
+ appctx->svcctx = NULL;
+ return 1;
+
+yield:
+ return 0;
+}
+
+void ssl_sock_ocsp_free_func(void *parent, void *ptr, CRYPTO_EX_DATA *ad, int idx, long argl, void *argp)
+{
+ struct ocsp_cbk_arg *ocsp_arg;
+
+ if (ptr) {
+ ocsp_arg = ptr;
+
+ if (ocsp_arg->is_single) {
+ ssl_sock_free_ocsp_instance(ocsp_arg->s_ocsp);
+ ocsp_arg->s_ocsp = NULL;
+ } else {
+ int i;
+
+ for (i = 0; i < SSL_SOCK_NUM_KEYTYPES; i++) {
+ ssl_sock_free_ocsp_instance(ocsp_arg->m_ocsp[i]);
+ ocsp_arg->m_ocsp[i] = NULL;
+ }
+ }
+ free(ocsp_arg);
+ }
+}
+
+/*
+ * Extract the first OCSP URI (if any) contained in <cert> and write it into
+ * <out>.
+ * Returns 0 in case of success, 1 otherwise.
+ */
+int ssl_ocsp_get_uri_from_cert(X509 *cert, struct buffer *out, char **err)
+{
+ STACK_OF(OPENSSL_STRING) *ocsp_uri_stk = NULL;
+ int ret = 1;
+
+ if (!cert || !out)
+ goto end;
+
+ ocsp_uri_stk = X509_get1_ocsp(cert);
+ if (ocsp_uri_stk == NULL) {
+ memprintf(err, "%sNo OCSP URL stack!\n", *err ? *err : "");
+ goto end;
+ }
+
+ if (!chunk_strcpy(out, sk_OPENSSL_STRING_value(ocsp_uri_stk, 0))) {
+ memprintf(err, "%sOCSP URI too long!\n", *err ? *err : "");
+ goto end;
+ }
+ if (b_data(out) == 0) {
+ memprintf(err, "%sNo OCSP URL!\n", *err ? *err : "");
+ goto end;
+ }
+
+ ret = 0;
+
+end:
+ X509_email_free(ocsp_uri_stk);
+ return ret;
+}
+
+/*
+ * Create the url and request body that make a proper OCSP request for the
+ * <certid>. The <req_url> parameter should already hold the OCSP URI that was
+ * extracted from the corresponding certificate. Depending on the size of the
+ * certid we will either append data to the <req_url> to create a proper URL
+ * that will be sent with a GET command, or the <req_body> will be constructed
+ * in case of a POST.
+ * Returns 0 in case of success.
+ */
+int ssl_ocsp_create_request_details(const OCSP_CERTID *certid, struct buffer *req_url,
+ struct buffer *req_body, char **err)
+{
+ int errcode = -1;
+ OCSP_REQUEST *ocsp;
+ struct buffer *bin_request = get_trash_chunk();
+ unsigned char *outbuf = (unsigned char*)b_orig(bin_request);
+
+ ocsp = OCSP_REQUEST_new();
+ if (ocsp == NULL) {
+ memprintf(err, "%sCan't create OCSP_REQUEST\n", *err ? *err : "");
+ goto end;
+ }
+
+ if (OCSP_request_add0_id(ocsp, (OCSP_CERTID*)certid) == NULL) {
+ memprintf(err, "%sOCSP_request_add0_id() error\n", *err ? *err : "");
+ goto end;
+ }
+
+ bin_request->data = i2d_OCSP_REQUEST(ocsp, &outbuf);
+ if (b_data(bin_request) <= 0) {
+ memprintf(err, "%si2d_OCSP_REQUEST() error\n", *err ? *err : "");
+ goto end;
+ }
+
+ /* HTTP based OCSP requests can use either the GET or the POST method to
+ * submit their requests. To enable HTTP caching, small requests (that
+ * after encoding are less than 255 bytes), MAY be submitted using GET.
+ * If HTTP caching is not important, or the request is greater than 255
+ * bytes, the request SHOULD be submitted using POST.
+ */
+ if (b_data(bin_request) + b_data(req_url) < 0xff) {
+ struct buffer *b64buf = get_trash_chunk();
+ char *ret = NULL;
+ int base64_ret = 0;
+
+ chunk_strcat(req_url, "/");
+
+ base64_ret = a2base64(b_orig(bin_request), b_data(bin_request),
+ b_orig(b64buf), b_size(b64buf));
+
+ if (base64_ret < 0) {
+ memprintf(err, "%sa2base64() error\n", *err ? *err : "");
+ goto end;
+ }
+
+ b64buf->data = base64_ret;
+
+ ret = encode_chunk((char*)b_stop(req_url), b_orig(req_url) + b_size(req_url), '%',
+ query_encode_map, b64buf);
+ if (ret && *ret == '\0') {
+ req_url->data = ret - b_orig(req_url);
+ errcode = 0;
+ }
+ }
+ else {
+ chunk_cpy(req_body, bin_request);
+ errcode = 0;
+ }
+
+
+end:
+ OCSP_REQUEST_free(ocsp);
+
+ return errcode;
+}
+
+/*
+ * Parse an OCSP_RESPONSE contained in <respbuf> and check its validity in
+ * regard to the contents of <ckch> or the <issuer> certificate.
+ * Certificate_ocsp structure does not keep a reference to the corresponding
+ * ckch_store so outside of a CLI context (see "send ssl ocsp-response"
+ * command), we only have an easy access to the issuer's certificate whose
+ * reference is held in the structure.
+ * Return 0 in case of success, 1 otherwise.
+ */
+int ssl_ocsp_check_response(STACK_OF(X509) *chain, X509 *issuer,
+ struct buffer *respbuf, char **err)
+{
+ int ret = 1;
+ int n;
+ OCSP_RESPONSE *response = NULL;
+ OCSP_BASICRESP *basic = NULL;
+ X509_STORE *store = NULL;
+ const unsigned char *start = (const unsigned char*)b_orig(respbuf);
+
+ if (!chain && !issuer) {
+ memprintf(err, "check_ocsp_response needs a certificate validation chain or an issuer certificate");
+ goto end;
+ }
+
+ response = d2i_OCSP_RESPONSE(NULL, &start, b_data(respbuf));
+ if (!response) {
+ memprintf(err, "d2i_OCSP_RESPONSE() failed");
+ goto end;
+ }
+
+ n = OCSP_response_status(response);
+
+ if (n != OCSP_RESPONSE_STATUS_SUCCESSFUL) {
+ memprintf(err, "OCSP response not successful (%d: %s)",
+ n, OCSP_response_status_str(n));
+ goto end;
+ }
+
+ basic = OCSP_response_get1_basic(response);
+ if (basic == NULL) {
+ memprintf(err, "OCSP_response_get1_basic() failed");
+ goto end;
+ }
+
+ /* Create a temporary store in which we add the certificate's chain
+ * certificates. We assume that all those certificates can be trusted
+ * because they were provided by the user.
+ * The only ssl item that needs to be verified here is the OCSP
+ * response.
+ */
+ store = X509_STORE_new();
+ if (!store) {
+ memprintf(err, "X509_STORE_new() failed");
+ goto end;
+ }
+
+ if (chain) {
+ int i = 0;
+ for (i = 0; i < sk_X509_num(chain); i++) {
+ X509 *cert = sk_X509_value(chain, i);
+ X509_STORE_add_cert(store, cert);
+ }
+ }
+
+ if (issuer)
+ X509_STORE_add_cert(store, issuer);
+
+ if (OCSP_basic_verify(basic, chain, store, OCSP_TRUSTOTHER) != 1) {
+ memprintf(err, "OCSP_basic_verify() failed");
+ goto end;
+ }
+
+ ret = 0;
+
+end:
+ X509_STORE_free(store);
+ OCSP_RESPONSE_free(response);
+ OCSP_BASICRESP_free(basic);
+ return ret;
+}
+
+
+/*
+ * OCSP-UPDATE RELATED FUNCTIONS AND STRUCTURES
+ */
+
+struct task *ocsp_update_task __read_mostly = NULL;
+static struct proxy *httpclient_ocsp_update_px;
+
+static struct ssl_ocsp_task_ctx {
+ struct certificate_ocsp *cur_ocsp;
+ struct httpclient *hc;
+ struct appctx *appctx;
+ int flags;
+ int update_status;
+} ssl_ocsp_task_ctx;
+
+const struct http_hdr ocsp_request_hdrs[] = {
+ { IST("Content-Type"), IST("application/ocsp-request") },
+ { IST_NULL, IST_NULL }
+};
+
+enum {
+ OCSP_UPDT_UNKNOWN = 0,
+ OCSP_UPDT_OK = 1,
+ OCSP_UPDT_ERR_HTTP_STATUS = 2,
+ OCSP_UPDT_ERR_HTTP_HDR = 3,
+ OCSP_UPDT_ERR_CHECK = 4,
+ OCSP_UPDT_ERR_INSERT = 5,
+ OCSP_UPDT_ERR_LAST /* Must be last */
+};
+
+const struct ist ocsp_update_errors[] = {
+ [OCSP_UPDT_UNKNOWN] = IST("Unknown"),
+ [OCSP_UPDT_OK] = IST("Update successful"),
+ [OCSP_UPDT_ERR_HTTP_STATUS] = IST("HTTP error"),
+ [OCSP_UPDT_ERR_HTTP_HDR] = IST("Missing \"ocsp-response\" header"),
+ [OCSP_UPDT_ERR_CHECK] = IST("OCSP response check failure"),
+ [OCSP_UPDT_ERR_INSERT] = IST("Error during insertion")
+};
+
+static struct task *ssl_ocsp_update_responses(struct task *task, void *context, unsigned int state);
+
+/*
+ * Create the main OCSP update task that will iterate over the OCSP responses
+ * stored in ocsp_update_tree and send an OCSP request via the http_client
+ * applet to the corresponding OCSP responder. The task will then be in charge
+ * of processing the response, verifying it and resinserting it in the actual
+ * ocsp response tree if the response is valid.
+ * Returns 0 in case of success.
+ */
+int ssl_create_ocsp_update_task(char **err)
+{
+ if (ocsp_update_task)
+ return 0; /* Already created */
+
+ ocsp_update_task = task_new_anywhere();
+ if (!ocsp_update_task) {
+ memprintf(err, "parsing : failed to allocate global ocsp update task.");
+ return -1;
+ }
+
+ ocsp_update_task->process = ssl_ocsp_update_responses;
+ ocsp_update_task->context = NULL;
+
+ return 0;
+}
+
+static int ssl_ocsp_task_schedule()
+{
+ if (ocsp_update_task)
+ task_schedule(ocsp_update_task, now_ms);
+
+ return 0;
+}
+REGISTER_POST_CHECK(ssl_ocsp_task_schedule);
+
+void ssl_sock_free_ocsp(struct certificate_ocsp *ocsp);
+
+void ssl_destroy_ocsp_update_task(void)
+{
+ struct eb64_node *node, *next;
+ if (!ocsp_update_task)
+ return;
+
+ HA_SPIN_LOCK(OCSP_LOCK, &ocsp_tree_lock);
+
+ node = eb64_first(&ocsp_update_tree);
+ while (node) {
+ next = eb64_next(node);
+ eb64_delete(node);
+ node = next;
+ }
+
+ HA_SPIN_UNLOCK(OCSP_LOCK, &ocsp_tree_lock);
+
+ task_destroy(ocsp_update_task);
+ ocsp_update_task = NULL;
+
+ ssl_sock_free_ocsp(ssl_ocsp_task_ctx.cur_ocsp);
+ ssl_ocsp_task_ctx.cur_ocsp = NULL;
+
+ if (ssl_ocsp_task_ctx.hc) {
+ httpclient_stop_and_destroy(ssl_ocsp_task_ctx.hc);
+ ssl_ocsp_task_ctx.hc = NULL;
+ }
+}
+
+static inline void ssl_ocsp_set_next_update(struct certificate_ocsp *ocsp)
+{
+ int update_margin = (ocsp->expire >= SSL_OCSP_UPDATE_MARGIN) ? SSL_OCSP_UPDATE_MARGIN : 0;
+
+ ocsp->next_update.key = MIN(date.tv_sec + global_ssl.ocsp_update.delay_max,
+ ocsp->expire - update_margin);
+
+ /* An already existing valid OCSP response that expires within less than
+ * SSL_OCSP_UPDATE_DELAY_MIN or has no 'Next Update' field should not be
+ * updated more than once every 5 minutes in order to avoid continuous
+ * update of the same response. */
+ if (b_data(&ocsp->response))
+ ocsp->next_update.key = MAX(ocsp->next_update.key,
+ date.tv_sec + global_ssl.ocsp_update.delay_min);
+}
+
+/*
+ * Insert a certificate_ocsp structure into the ocsp_update_tree tree, in which
+ * entries are sorted by absolute date of the next update. The next_update key
+ * will be the smallest out of the actual expire value of the response and
+ * now+1H. This arbitrary 1H value ensures that ocsp responses are updated
+ * periodically even when they have a long expire time, while not overloading
+ * the system too much (in theory). Likewise, a minimum 5 minutes interval is
+ * defined in order to avoid updating too often responses that have a really
+ * short expire time or even no 'Next Update' at all.
+ */
+int ssl_ocsp_update_insert(struct certificate_ocsp *ocsp)
+{
+ /* Set next_update based on current time and the various OCSP
+ * minimum/maximum update times.
+ */
+ ssl_ocsp_set_next_update(ocsp);
+
+ ocsp->fail_count = 0;
+
+ HA_SPIN_LOCK(OCSP_LOCK, &ocsp_tree_lock);
+ ocsp->updating = 0;
+ /* An entry with update_once set to 1 was only supposed to be updated
+ * once, it does not need to be reinserted into the update tree.
+ */
+ if (!ocsp->update_once)
+ eb64_insert(&ocsp_update_tree, &ocsp->next_update);
+ HA_SPIN_UNLOCK(OCSP_LOCK, &ocsp_tree_lock);
+
+ return 0;
+}
+
+/*
+ * Reinsert an entry in the update tree. The entry's next update time can not
+ * occur before now+SSL_OCSP_HTTP_ERR_REPLAY.
+ * This is supposed to be used in case of http error (ocsp responder unreachable
+ * for instance). This ensures that the entry does not get reinserted at the
+ * beginning of the tree every time.
+ */
+int ssl_ocsp_update_insert_after_error(struct certificate_ocsp *ocsp)
+{
+ int replay_delay = 0;
+
+ /*
+ * Set next_update based on current time and the various OCSP
+ * minimum/maximum update times.
+ */
+ ssl_ocsp_set_next_update(ocsp);
+
+ ++ocsp->fail_count;
+
+ /*
+ * The replay delay will be increased for every consecutive update
+ * failure, up to the SSL_OCSP_UPDATE_DELAY_MAX delay. It will ensure
+ * that the replay delay will be one minute for the first failure and
+ * will be multiplied by 2 for every subsequent failures, while still
+ * being at most 1 hour (with the current default values).
+ */
+ replay_delay = MIN(SSL_OCSP_HTTP_ERR_REPLAY * (1 << ocsp->fail_count),
+ global_ssl.ocsp_update.delay_max);
+
+ if (ocsp->next_update.key < date.tv_sec + replay_delay)
+ ocsp->next_update.key = date.tv_sec + replay_delay;
+
+ HA_SPIN_LOCK(OCSP_LOCK, &ocsp_tree_lock);
+ ocsp->updating = 0;
+ /* An entry with update_once set to 1 was only supposed to be updated
+ * once, it does not need to be reinserted into the update tree.
+ */
+ if (!ocsp->update_once)
+ eb64_insert(&ocsp_update_tree, &ocsp->next_update);
+ HA_SPIN_UNLOCK(OCSP_LOCK, &ocsp_tree_lock);
+
+ return 0;
+}
+
+void ocsp_update_response_stline_cb(struct httpclient *hc)
+{
+ struct task *task = hc->caller;
+
+ if (!task)
+ return;
+
+ ssl_ocsp_task_ctx.flags |= HC_F_RES_STLINE;
+ task_wakeup(task, TASK_WOKEN_MSG);
+}
+
+void ocsp_update_response_headers_cb(struct httpclient *hc)
+{
+ struct task *task = hc->caller;
+
+ if (!task)
+ return;
+
+ ssl_ocsp_task_ctx.flags |= HC_F_RES_HDR;
+ task_wakeup(task, TASK_WOKEN_MSG);
+}
+
+void ocsp_update_response_body_cb(struct httpclient *hc)
+{
+ struct task *task = hc->caller;
+
+ if (!task)
+ return;
+
+ ssl_ocsp_task_ctx.flags |= HC_F_RES_BODY;
+ task_wakeup(task, TASK_WOKEN_MSG);
+}
+
+void ocsp_update_response_end_cb(struct httpclient *hc)
+{
+ struct task *task = hc->caller;
+
+ if (!task)
+ return;
+
+ ssl_ocsp_task_ctx.flags |= HC_F_RES_END;
+ task_wakeup(task, TASK_WOKEN_MSG);
+}
+
+
+/*
+ * Send a log line that will use the dedicated proxy's error_logformat string.
+ * It uses the sess_log function instead of app_log for instance in order to
+ * benefit from the "generic" items that can be added to a log format line such
+ * as the date and frontend name that can be found at the beginning of the
+ * ocspupdate_log_format line.
+ */
+static void ssl_ocsp_send_log()
+{
+ if (!ssl_ocsp_task_ctx.appctx)
+ return;
+
+ sess_log(ssl_ocsp_task_ctx.appctx->sess);
+}
+
+/*
+ * This is the main function of the ocsp auto update mechanism. It has two
+ * distinct parts and the branching to one or the other is completely based on
+ * the fact that the cur_ocsp pointer of the ssl_ocsp_task_ctx member is set.
+ *
+ * If the pointer is not set, we need to look at the first item of the update
+ * tree and see if it needs to be updated. If it does not we simply wait until
+ * the time is right and let the task asleep. If it does need to be updated, we
+ * simply build and send the corresponding ocsp request thanks to the
+ * http_client. The task is then sent to sleep with an expire time set to
+ * infinity. The http_client will wake it back up once the response is received
+ * (or a timeout occurs). Just note that during this whole process the
+ * cetificate_ocsp object corresponding to the entry being updated is taken out
+ * of the update tree and only stored in the ssl_ocsp_task_ctx context.
+ *
+ * Once the task is waken up by the http_client, it branches on the response
+ * processing part of the function which basically checks that the response is
+ * valid and inserts it into the ocsp_response tree. The task then goes back to
+ * sleep until another entry needs to be updated.
+ */
+static struct task *ssl_ocsp_update_responses(struct task *task, void *context, unsigned int state)
+{
+ unsigned int next_wakeup = 0;
+ struct eb64_node *eb;
+ struct certificate_ocsp *ocsp;
+ struct httpclient *hc = NULL;
+ struct buffer *req_url = NULL;
+ struct buffer *req_body = NULL;
+ OCSP_CERTID *certid = NULL;
+ struct ssl_ocsp_task_ctx *ctx = &ssl_ocsp_task_ctx;
+
+ if (ctx->cur_ocsp) {
+ /* An update is in process */
+ ocsp = ctx->cur_ocsp;
+ hc = ctx->hc;
+ if (ctx->flags & HC_F_RES_STLINE) {
+ if (hc->res.status != 200) {
+ ctx->update_status = OCSP_UPDT_ERR_HTTP_STATUS;
+ goto http_error;
+ }
+ ctx->flags &= ~HC_F_RES_STLINE;
+ }
+
+ if (ctx->flags & HC_F_RES_HDR) {
+ struct http_hdr *hdr;
+ int found = 0;
+ /* Look for "Content-Type" header which should have
+ * "application/ocsp-response" value. */
+ for (hdr = hc->res.hdrs; isttest(hdr->v); hdr++) {
+ if (isteqi(hdr->n, ist("Content-Type")) &&
+ isteqi(hdr->v, ist("application/ocsp-response"))) {
+ found = 1;
+ break;
+ }
+ }
+ if (!found) {
+ ctx->update_status = OCSP_UPDT_ERR_HTTP_HDR;
+ goto http_error;
+ }
+ ctx->flags &= ~HC_F_RES_HDR;
+ }
+
+ /* If the HC_F_RES_BODY is set, we still need for the
+ * HC_F_RES_END flag to be set as well in order to be sure that
+ * the body is complete. */
+
+ /* we must close only if F_RES_END is the last flag */
+ if (ctx->flags & HC_F_RES_END) {
+
+ /* Process the body that must be complete since
+ * HC_F_RES_END is set. */
+ if (ctx->flags & HC_F_RES_BODY) {
+ if (ssl_ocsp_check_response(ocsp->chain, ocsp->issuer, &hc->res.buf, NULL)) {
+ ctx->update_status = OCSP_UPDT_ERR_CHECK;
+ goto http_error;
+ }
+
+ if (ssl_sock_update_ocsp_response(&hc->res.buf, NULL) != 0) {
+ ctx->update_status = OCSP_UPDT_ERR_INSERT;
+ goto http_error;
+ }
+
+ ctx->flags &= ~HC_F_RES_BODY;
+ }
+
+ ctx->flags &= ~HC_F_RES_END;
+
+ ++ocsp->num_success;
+ ocsp->last_update = date.tv_sec;
+ ctx->update_status = OCSP_UPDT_OK;
+ ocsp->last_update_status = ctx->update_status;
+
+ ssl_ocsp_send_log();
+
+ /* Reinsert the entry into the update list so that it can be updated later */
+ ssl_ocsp_update_insert(ocsp);
+ /* Release the reference kept on the updated ocsp response. */
+ ssl_sock_free_ocsp_instance(ctx->cur_ocsp);
+ ctx->cur_ocsp = NULL;
+
+ HA_SPIN_LOCK(OCSP_LOCK, &ocsp_tree_lock);
+ /* Set next_wakeup to the new first entry of the tree */
+ eb = eb64_first(&ocsp_update_tree);
+ if (eb) {
+ if (eb->key > date.tv_sec)
+ next_wakeup = (eb->key - date.tv_sec)*1000;
+ else
+ next_wakeup = 0;
+ }
+ HA_SPIN_UNLOCK(OCSP_LOCK, &ocsp_tree_lock);
+ goto leave;
+ }
+
+ /* We did not receive the HC_F_RES_END flag yet, wait for it
+ * before trying to update a new ocsp response. */
+ goto wait;
+ } else {
+ /* Look for next entry that needs to be updated. */
+ const unsigned char *p = NULL;
+
+ HA_SPIN_LOCK(OCSP_LOCK, &ocsp_tree_lock);
+
+ eb = eb64_first(&ocsp_update_tree);
+ if (!eb) {
+ HA_SPIN_UNLOCK(OCSP_LOCK, &ocsp_tree_lock);
+ goto wait;
+ }
+
+ if (eb->key > date.tv_sec) {
+ next_wakeup = (eb->key - date.tv_sec)*1000;
+ HA_SPIN_UNLOCK(OCSP_LOCK, &ocsp_tree_lock);
+ goto leave;
+ }
+
+ ocsp = eb64_entry(eb, struct certificate_ocsp, next_update);
+
+ /* Take the current entry out of the update tree, it will be
+ * reinserted after the response is processed. */
+ eb64_delete(&ocsp->next_update);
+
+ ocsp->updating = 1;
+ ocsp->refcount_instance++;
+ ctx->cur_ocsp = ocsp;
+ ocsp->last_update_status = OCSP_UPDT_UNKNOWN;
+
+ HA_SPIN_UNLOCK(OCSP_LOCK, &ocsp_tree_lock);
+
+ req_url = alloc_trash_chunk();
+ if (!req_url) {
+ goto leave;
+ }
+ req_body = alloc_trash_chunk();
+ if (!req_body) {
+ goto leave;
+ }
+
+ p = ocsp->key_data;
+
+ d2i_OCSP_CERTID(&certid, &p, ocsp->key_length);
+ if (!certid)
+ goto leave;
+
+ /* Copy OCSP URI stored in ocsp structure into req_url */
+ chunk_cpy(req_url, ocsp->uri);
+
+ /* Create ocsp request */
+ if (ssl_ocsp_create_request_details(certid, req_url, req_body, NULL) != 0) {
+ goto leave;
+ }
+
+ /* Depending on the processing that occurred in
+ * ssl_ocsp_create_request_details we could either have to send
+ * a GET or a POST request. */
+ hc = httpclient_new_from_proxy(httpclient_ocsp_update_px, task,
+ b_data(req_body) ? HTTP_METH_POST : HTTP_METH_GET,
+ ist2(b_orig(req_url), b_data(req_url)));
+ if (!hc) {
+ goto leave;
+ }
+
+ if (httpclient_req_gen(hc, hc->req.url, hc->req.meth,
+ b_data(req_body) ? ocsp_request_hdrs : NULL,
+ b_data(req_body) ? ist2(b_orig(req_body), b_data(req_body)) : IST_NULL) != ERR_NONE) {
+ goto leave;
+ }
+
+ hc->ops.res_stline = ocsp_update_response_stline_cb;
+ hc->ops.res_headers = ocsp_update_response_headers_cb;
+ hc->ops.res_payload = ocsp_update_response_body_cb;
+ hc->ops.res_end = ocsp_update_response_end_cb;
+
+ if (!(ctx->appctx = httpclient_start(hc))) {
+ goto leave;
+ }
+
+ ctx->flags = 0;
+ ctx->hc = hc;
+
+ /* We keep the lock, this indicates that an update is in process. */
+ goto wait;
+ }
+
+leave:
+ if (ctx->cur_ocsp) {
+ /* Something went wrong, reinsert the entry in the tree. */
+ ++ctx->cur_ocsp->num_failure;
+ ssl_ocsp_update_insert_after_error(ctx->cur_ocsp);
+ /* Release the reference kept on the updated ocsp response. */
+ ssl_sock_free_ocsp_instance(ctx->cur_ocsp);
+ ctx->cur_ocsp = NULL;
+ }
+ if (hc)
+ httpclient_stop_and_destroy(hc);
+ ctx->hc = NULL;
+ free_trash_chunk(req_url);
+ free_trash_chunk(req_body);
+ task->expire = tick_add(now_ms, next_wakeup);
+ return task;
+
+wait:
+ free_trash_chunk(req_url);
+ free_trash_chunk(req_body);
+ task->expire = TICK_ETERNITY;
+ return task;
+
+http_error:
+ ssl_ocsp_send_log();
+ /* Reinsert certificate into update list so that it can be updated later */
+ if (ocsp) {
+ ++ocsp->num_failure;
+ ocsp->last_update_status = ctx->update_status;
+ ssl_ocsp_update_insert_after_error(ocsp);
+ }
+
+ if (hc)
+ httpclient_stop_and_destroy(hc);
+ /* Release the reference kept on the updated ocsp response. */
+ ssl_sock_free_ocsp_instance(ctx->cur_ocsp);
+ HA_SPIN_LOCK(OCSP_LOCK, &ocsp_tree_lock);
+ /* Set next_wakeup to the new first entry of the tree */
+ eb = eb64_first(&ocsp_update_tree);
+ if (eb) {
+ if (eb->key > date.tv_sec)
+ next_wakeup = (eb->key - date.tv_sec)*1000;
+ else
+ next_wakeup = 0;
+ }
+ HA_SPIN_UNLOCK(OCSP_LOCK, &ocsp_tree_lock);
+ ctx->cur_ocsp = NULL;
+ ctx->hc = NULL;
+ ctx->flags = 0;
+ task->expire = tick_add(now_ms, next_wakeup);
+ return task;
+}
+
+char ocspupdate_log_format[] = "%ci:%cp [%tr] %ft %[ssl_ocsp_certname] %[ssl_ocsp_status] %{+Q}[ssl_ocsp_status_str] %[ssl_ocsp_fail_cnt] %[ssl_ocsp_success_cnt]";
+
+/*
+ * Initialize the proxy for the OCSP update HTTP client with 2 servers, one for
+ * raw HTTP, the other for HTTPS.
+ */
+static int ssl_ocsp_update_precheck()
+{
+ /* initialize the OCSP update dedicated httpclient */
+ httpclient_ocsp_update_px = httpclient_create_proxy("<OCSP-UPDATE>");
+ if (!httpclient_ocsp_update_px)
+ return 1;
+ httpclient_ocsp_update_px->conf.error_logformat_string = strdup(ocspupdate_log_format);
+ httpclient_ocsp_update_px->conf.logformat_string = httpclient_log_format;
+ httpclient_ocsp_update_px->options2 |= PR_O2_NOLOGNORM;
+
+ return 0;
+}
+
+/* initialize the proxy and servers for the HTTP client */
+
+REGISTER_PRE_CHECK(ssl_ocsp_update_precheck);
+
+
+static int cli_parse_update_ocsp_response(char **args, char *payload, struct appctx *appctx, void *private)
+{
+ char *err = NULL;
+ struct ckch_store *ckch_store = NULL;
+ struct certificate_ocsp *ocsp = NULL;
+ int update_once = 0;
+ unsigned char key[OCSP_MAX_CERTID_ASN1_LENGTH] = {};
+ unsigned char *p;
+
+ if (!*args[3]) {
+ memprintf(&err, "'update ssl ocsp-response' expects a filename\n");
+ return cli_dynerr(appctx, err);
+ }
+
+ /* The operations on the CKCH architecture are locked so we can
+ * manipulate ckch_store and ckch_inst */
+ if (HA_SPIN_TRYLOCK(CKCH_LOCK, &ckch_lock)) {
+ memprintf(&err, "%sCan't update the certificate!\nOperations on certificates are currently locked!\n", err ? err : "");
+ goto end;
+ }
+
+ ckch_store = ckchs_lookup(args[3]);
+
+ if (!ckch_store) {
+ memprintf(&err, "%sUnknown certificate! 'update ssl ocsp-response' expects an already known certificate file name.\n", err ? err : "");
+ HA_SPIN_UNLOCK(CKCH_LOCK, &ckch_lock);
+ goto end;
+ }
+
+ p = key;
+ i2d_OCSP_CERTID(ckch_store->data->ocsp_cid, &p);
+
+ HA_SPIN_UNLOCK(CKCH_LOCK, &ckch_lock);
+
+
+ HA_SPIN_LOCK(OCSP_LOCK, &ocsp_tree_lock);
+ ocsp = (struct certificate_ocsp *)ebmb_lookup(&cert_ocsp_tree, key, OCSP_MAX_CERTID_ASN1_LENGTH);
+ if (!ocsp) {
+ memprintf(&err, "%s'update ssl ocsp-response' only works on certificates that already have a known OCSP response.\n", err ? err : "");
+ HA_SPIN_UNLOCK(OCSP_LOCK, &ocsp_tree_lock);
+ goto end;
+ }
+
+ /* No need to try to update this response, it is already being updated. */
+ if (!ocsp->updating) {
+ update_once = (ocsp->next_update.node.leaf_p == NULL);
+ eb64_delete(&ocsp->next_update);
+
+ /* Insert the entry at the beginning of the update tree.
+ * We don't need to increase the reference counter on the
+ * certificate_ocsp structure because we would not have a way to
+ * decrease it afterwards since this update operation is asynchronous.
+ * If the corresponding entry were to be destroyed before the update can
+ * be performed, which is pretty unlikely, it would not be such a
+ * problem because that would mean that the OCSP response is not
+ * actually used.
+ */
+ ocsp->next_update.key = 0;
+ eb64_insert(&ocsp_update_tree, &ocsp->next_update);
+ ocsp->update_once = update_once;
+ }
+
+ HA_SPIN_UNLOCK(OCSP_LOCK, &ocsp_tree_lock);
+
+ if (!ocsp_update_task)
+ ssl_create_ocsp_update_task(&err);
+
+ task_wakeup(ocsp_update_task, TASK_WOKEN_MSG);
+
+ free(err);
+
+ return 0;
+
+end:
+ return cli_dynerr(appctx, memprintf(&err, "%sCan't send ocsp request for %s!\n", err ? err : "", args[3]));
+}
+
+#endif /* !defined OPENSSL_IS_BORINGSSL */
+
+
+#endif /* (defined SSL_CTRL_SET_TLSEXT_STATUS_REQ_CB && !defined OPENSSL_NO_OCSP) */
+
+
+static int cli_parse_set_ocspresponse(char **args, char *payload, struct appctx *appctx, void *private)
+{
+#if (defined SSL_CTRL_SET_TLSEXT_STATUS_REQ_CB && !defined OPENSSL_NO_OCSP)
+ char *err = NULL;
+ int i, j, ret;
+
+ if (!payload)
+ payload = args[3];
+
+ /* Expect one parameter: the new response in base64 encoding */
+ if (!*payload)
+ return cli_err(appctx, "'set ssl ocsp-response' expects response in base64 encoding.\n");
+
+ /* remove \r and \n from the payload */
+ for (i = 0, j = 0; payload[i]; i++) {
+ if (payload[i] == '\r' || payload[i] == '\n')
+ continue;
+ payload[j++] = payload[i];
+ }
+ payload[j] = 0;
+
+ ret = base64dec(payload, j, trash.area, trash.size);
+ if (ret < 0)
+ return cli_err(appctx, "'set ssl ocsp-response' received invalid base64 encoded response.\n");
+
+ trash.data = ret;
+ if (ssl_sock_update_ocsp_response(&trash, &err)) {
+ if (err)
+ return cli_dynerr(appctx, memprintf(&err, "%s.\n", err));
+ else
+ return cli_err(appctx, "Failed to update OCSP response.\n");
+ }
+
+ return cli_msg(appctx, LOG_INFO, "OCSP Response updated!\n");
+#else
+ return cli_err(appctx, "HAProxy was compiled against a version of OpenSSL that doesn't support OCSP stapling.\n");
+#endif
+
+}
+
+/* parsing function for 'show ssl ocsp-response [id]'. If an entry is forced,
+ * it's set into appctx->svcctx.
+ */
+static int cli_parse_show_ocspresponse(char **args, char *payload, struct appctx *appctx, void *private)
+{
+#if ((defined SSL_CTRL_SET_TLSEXT_STATUS_REQ_CB && !defined OPENSSL_NO_OCSP) && !defined OPENSSL_IS_BORINGSSL)
+
+ struct show_ocspresp_cli_ctx *ctx = applet_reserve_svcctx(appctx, sizeof(*ctx));
+ int arg_idx = 3;
+
+ if (*args[3]) {
+ struct certificate_ocsp *ocsp = NULL;
+ char key[OCSP_MAX_CERTID_ASN1_LENGTH] = {};
+ int key_length = OCSP_MAX_CERTID_ASN1_LENGTH;
+ char *key_ptr = key;
+ unsigned char *p;
+ struct ckch_store *ckch_store = NULL;
+
+ if (strcmp(args[3], "text") == 0) {
+ ctx->format = SHOW_OCSPRESP_FMT_TEXT;
+ ++arg_idx;
+ } else if (strcmp(args[3], "base64") == 0) {
+ ctx->format = SHOW_OCSPRESP_FMT_B64;
+ ++arg_idx;
+ }
+
+ if (ctx->format != SHOW_OCSPRESP_FMT_DFLT && !*args[arg_idx])
+ return cli_err(appctx, "'show ssl ocsp-response [text|base64]' expects a valid certid.\n");
+
+ /* Try to convert parameter into an OCSP certid first, and consider it
+ * as a filename if it fails. */
+ if (strlen(args[arg_idx]) > OCSP_MAX_CERTID_ASN1_LENGTH*2 ||
+ !parse_binary(args[arg_idx], &key_ptr, &key_length, NULL)) {
+
+ key_ptr = key;
+ key_length = 0;
+
+ /* The operations on the CKCH architecture are locked so we can
+ * manipulate ckch_store and ckch_inst */
+ if (HA_SPIN_TRYLOCK(CKCH_LOCK, &ckch_lock)) {
+ return cli_err(appctx, "Operations on certificates are currently locked!\n");
+ }
+
+ ckch_store = ckchs_lookup(args[arg_idx]);
+
+ if (ckch_store) {
+ p = (unsigned char*)key;
+ key_length = i2d_OCSP_CERTID(ckch_store->data->ocsp_cid, &p);
+ }
+ HA_SPIN_UNLOCK(CKCH_LOCK, &ckch_lock);
+ }
+
+ if (key_length == 0) {
+ return cli_err(appctx, "'show ssl ocsp-response' expects a valid certid or certificate path.\n");
+ }
+
+ HA_SPIN_LOCK(OCSP_LOCK, &ocsp_tree_lock);
+ ocsp = (struct certificate_ocsp *)ebmb_lookup(&cert_ocsp_tree, key, OCSP_MAX_CERTID_ASN1_LENGTH);
+
+ if (!ocsp) {
+ HA_SPIN_UNLOCK(OCSP_LOCK, &ocsp_tree_lock);
+ return cli_err(appctx, "Certificate ID or path does not match any certificate.\n");
+ }
+ ocsp->refcount_instance++;
+ HA_SPIN_UNLOCK(OCSP_LOCK, &ocsp_tree_lock);
+
+ ctx->ocsp = ocsp;
+ appctx->io_handler = cli_io_handler_show_ocspresponse_detail;
+ }
+
+ return 0;
+
+#else
+ return cli_err(appctx, "HAProxy was compiled against a version of OpenSSL that doesn't support OCSP stapling.\n");
+#endif
+}
+
+/*
+ * IO handler of "show ssl ocsp-response". The command taking a specific ID
+ * is managed in cli_io_handler_show_ocspresponse_detail.
+ * The current entry is taken from appctx->svcctx.
+ */
+static int cli_io_handler_show_ocspresponse(struct appctx *appctx)
+{
+#if ((defined SSL_CTRL_SET_TLSEXT_STATUS_REQ_CB && !defined OPENSSL_NO_OCSP) && !defined OPENSSL_IS_BORINGSSL)
+ struct buffer *trash = alloc_trash_chunk();
+ struct buffer *tmp = NULL;
+ struct ebmb_node *node;
+ struct certificate_ocsp *ocsp = NULL;
+ BIO *bio = NULL;
+ int write = -1;
+ struct show_ocspresp_cli_ctx *ctx = appctx->svcctx;
+
+ if (trash == NULL)
+ return 1;
+
+ HA_SPIN_LOCK(OCSP_LOCK, &ocsp_tree_lock);
+
+ tmp = alloc_trash_chunk();
+ if (!tmp)
+ goto end;
+
+ if ((bio = BIO_new(BIO_s_mem())) == NULL)
+ goto end;
+
+ if (!ctx->ocsp) {
+ chunk_appendf(trash, "# Certificate IDs\n");
+ node = ebmb_first(&cert_ocsp_tree);
+ } else {
+ node = &ctx->ocsp->key;
+ }
+
+ while (node) {
+ OCSP_CERTID *certid = NULL;
+ const unsigned char *p = NULL;
+ int i;
+
+ ocsp = ebmb_entry(node, struct certificate_ocsp, key);
+
+ /* Dump the key in hexadecimal */
+ chunk_appendf(trash, "Certificate ID key : ");
+ for (i = 0; i < ocsp->key_length; ++i) {
+ chunk_appendf(trash, "%02x", ocsp->key_data[i]);
+ }
+ chunk_appendf(trash, "\n");
+
+ /* Dump the certificate path */
+ chunk_appendf(trash, "Certificate path : %s\n", ocsp->path);
+
+ p = ocsp->key_data;
+
+ /* Decode the certificate ID (serialized into the key). */
+ d2i_OCSP_CERTID(&certid, &p, ocsp->key_length);
+ if (!certid)
+ goto end;
+
+ /* Dump the CERTID info */
+ ocsp_certid_print(bio, certid, 1);
+ OCSP_CERTID_free(certid);
+ write = BIO_read(bio, tmp->area, tmp->size-1);
+ /* strip trailing LFs */
+ while (write > 0 && tmp->area[write-1] == '\n')
+ write--;
+ tmp->area[write] = '\0';
+
+ chunk_appendf(trash, "%s\n", tmp->area);
+
+ node = ebmb_next(node);
+ if (applet_putchk(appctx, trash) == -1)
+ goto yield;
+ }
+
+end:
+ HA_SPIN_UNLOCK(OCSP_LOCK, &ocsp_tree_lock);
+ free_trash_chunk(trash);
+ free_trash_chunk(tmp);
+ BIO_free(bio);
+ return 1;
+
+yield:
+ free_trash_chunk(trash);
+ free_trash_chunk(tmp);
+ BIO_free(bio);
+
+ ocsp->refcount_instance++;
+ ctx->ocsp = ocsp;
+ HA_SPIN_UNLOCK(OCSP_LOCK, &ocsp_tree_lock);
+ return 0;
+#else
+ return cli_err(appctx, "HAProxy was compiled against a version of OpenSSL that doesn't support OCSP stapling.\n");
+#endif
+}
+
+static void cli_release_show_ocspresponse(struct appctx *appctx)
+{
+ struct show_ocspresp_cli_ctx *ctx = appctx->svcctx;
+
+ if (ctx)
+ ssl_sock_free_ocsp(ctx->ocsp);
+}
+
+/* Check if the ckch_store and the entry does have the same configuration */
+int ocsp_update_check_cfg_consistency(struct ckch_store *store, struct crtlist_entry *entry, char *crt_path, char **err)
+{
+ int err_code = ERR_NONE;
+
+ if (store->data->ocsp_update_mode != SSL_SOCK_OCSP_UPDATE_DFLT || entry->ssl_conf) {
+ if ((!entry->ssl_conf && store->data->ocsp_update_mode == SSL_SOCK_OCSP_UPDATE_ON)
+ || (entry->ssl_conf && store->data->ocsp_update_mode != entry->ssl_conf->ocsp_update)) {
+ memprintf(err, "%sIncompatibilities found in OCSP update mode for certificate %s\n", err && *err ? *err : "", crt_path);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ }
+ }
+ return err_code;
+}
+
+struct show_ocsp_updates_ctx {
+ struct certificate_ocsp *cur_ocsp;
+};
+
+/*
+ * Parsing function for 'show ssl ocsp-updates [nb]'.
+ */
+static int cli_parse_show_ocsp_updates(char **args, char *payload, struct appctx *appctx, void *private)
+{
+#if ((defined SSL_CTRL_SET_TLSEXT_STATUS_REQ_CB && !defined OPENSSL_NO_OCSP) && !defined OPENSSL_IS_BORINGSSL)
+ struct show_ocsp_updates_ctx *ctx = applet_reserve_svcctx(appctx, sizeof(*ctx));
+
+ HA_SPIN_LOCK(OCSP_LOCK, &ocsp_tree_lock);
+
+ return 0;
+#else
+ return cli_err(appctx, "HAProxy was compiled against a version of OpenSSL that doesn't support OCSP stapling.\n");
+#endif
+}
+
+/*
+ * Dump information about an ocsp response concerning ocsp auto update.
+ * It follows the following format :
+ * OCSP Certid | Path | Next Update | Last Update | Successes | Failures | Last Update Status | Last Update Status (str)
+ * Return 0 in case of success.
+ */
+static int dump_ocsp_update_info(struct certificate_ocsp *ocsp, struct buffer *out)
+{
+ struct tm tm = {};
+ char *ret;
+ int i;
+ time_t next_update;
+
+ /* Dump OCSP certid */
+ for (i = 0; i < ocsp->key_length; ++i) {
+ chunk_appendf(out, "%02x", ocsp->key_data[i]);
+ }
+
+ chunk_appendf(out, " | ");
+
+ /* Dump path */
+ chunk_appendf(out, "%s", ocsp->path);
+
+ chunk_appendf(out, " | ");
+
+ /* Dump next update time */
+ if (ocsp->next_update.key != 0) {
+ next_update = ocsp->next_update.key;
+ get_localtime(ocsp->next_update.key, &tm);
+ } else {
+ next_update = date.tv_sec;
+ get_localtime(date.tv_sec, &tm);
+ }
+ ret = localdate2str_log(b_orig(out)+b_data(out), next_update, &tm, b_size(out)-b_data(out));
+
+ if (ret == NULL)
+ return 1;
+
+ out->data = (ret - out->area);
+
+ chunk_appendf(out, " | ");
+
+ /* Dump last update time or "-" if no update occurred yet */
+ if (ocsp->last_update) {
+ get_localtime(ocsp->last_update, &tm);
+ ret = localdate2str_log(b_orig(out)+b_data(out), ocsp->last_update, &tm, b_size(out)-b_data(out));
+
+ if (ret == NULL)
+ return 1;
+
+ out->data = (ret - out->area);
+ } else
+ chunk_appendf(out, "-");
+
+ chunk_appendf(out, " | ");
+
+ /* Number of successful updates */
+ chunk_appendf(out, "%d", ocsp->num_success);
+
+ chunk_appendf(out, " | ");
+
+ /* Number of failed updates */
+ chunk_appendf(out, "%d", ocsp->num_failure);
+
+ chunk_appendf(out, " | ");
+
+ /* Last update status */
+ chunk_appendf(out, "%d", ocsp->last_update_status);
+
+ chunk_appendf(out, " | ");
+
+ /* Last update status str */
+ if (ocsp->last_update_status >= OCSP_UPDT_ERR_LAST)
+ chunk_appendf(out, "-");
+ else
+ chunk_appendf(out, "%s", istptr(ocsp_update_errors[ocsp->last_update_status]));
+
+ chunk_appendf(out, "\n");
+
+ return 0;
+}
+
+static int cli_io_handler_show_ocsp_updates(struct appctx *appctx)
+{
+ struct show_ocsp_updates_ctx *ctx = appctx->svcctx;
+ struct eb64_node *node;
+ struct certificate_ocsp *ocsp = NULL;
+ struct buffer *trash = get_trash_chunk();
+
+ if (!ctx->cur_ocsp) {
+ node = eb64_first(&ocsp_update_tree);
+ chunk_appendf(trash, "OCSP Certid | Path | Next Update | Last Update | Successes | Failures | Last Update Status | Last Update Status (str)\n");
+
+ /* Look for an entry currently being updated */
+ ocsp = ssl_ocsp_task_ctx.cur_ocsp;
+ if (ocsp) {
+ if (dump_ocsp_update_info(ocsp, trash))
+ goto end;
+ }
+
+ if (applet_putchk(appctx, trash) == -1)
+ goto yield;
+
+ } else {
+ node = &((struct certificate_ocsp*)ctx->cur_ocsp)->next_update;
+ }
+
+ while (node) {
+ ocsp = eb64_entry(node, struct certificate_ocsp, next_update);
+
+ chunk_reset(trash);
+ if (dump_ocsp_update_info(ocsp, trash))
+ goto end;
+
+ if (applet_putchk(appctx, trash) == -1) {
+ ctx->cur_ocsp = ocsp;
+ goto yield;
+ }
+
+ node = eb64_next(node);
+ }
+
+end:
+ return 1;
+
+yield:
+ return 0; /* should come back */
+}
+
+static void cli_release_show_ocsp_updates(struct appctx *appctx)
+{
+ HA_SPIN_UNLOCK(OCSP_LOCK, &ocsp_tree_lock);
+}
+
+
+static int
+smp_fetch_ssl_ocsp_certid(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ struct buffer *data = get_trash_chunk();
+ struct certificate_ocsp *ocsp = ssl_ocsp_task_ctx.cur_ocsp;
+
+ if (!ocsp)
+ return 0;
+
+ dump_binary(data, (char *)ocsp->key_data, ocsp->key_length);
+
+ smp->data.type = SMP_T_STR;
+ smp->data.u.str = *data;
+ return 1;
+}
+
+static int
+smp_fetch_ssl_ocsp_certname(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ struct certificate_ocsp *ocsp = ssl_ocsp_task_ctx.cur_ocsp;
+
+ if (!ocsp)
+ return 0;
+
+ smp->data.type = SMP_T_STR;
+ smp->data.u.str.area = ocsp->path;
+ smp->data.u.str.data = strlen(ocsp->path);
+ return 1;
+}
+
+static int
+smp_fetch_ssl_ocsp_status(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ struct certificate_ocsp *ocsp = ssl_ocsp_task_ctx.cur_ocsp;
+
+ if (!ocsp)
+ return 0;
+
+ smp->data.type = SMP_T_SINT;
+ smp->data.u.sint = ssl_ocsp_task_ctx.update_status;
+ return 1;
+}
+
+static int
+smp_fetch_ssl_ocsp_status_str(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ struct certificate_ocsp *ocsp = ssl_ocsp_task_ctx.cur_ocsp;
+
+ if (!ocsp)
+ return 0;
+
+ if (ssl_ocsp_task_ctx.update_status >= OCSP_UPDT_ERR_LAST)
+ return 0;
+
+ smp->data.type = SMP_T_STR;
+ smp->data.u.str = ist2buf(ocsp_update_errors[ssl_ocsp_task_ctx.update_status]);
+
+ return 1;
+}
+
+static int
+smp_fetch_ssl_ocsp_fail_cnt(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ struct certificate_ocsp *ocsp = ssl_ocsp_task_ctx.cur_ocsp;
+
+ if (!ocsp)
+ return 0;
+
+ smp->data.type = SMP_T_SINT;
+ smp->data.u.sint = ocsp->num_failure;
+ return 1;
+}
+
+static int
+smp_fetch_ssl_ocsp_success_cnt(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ struct certificate_ocsp *ocsp = ssl_ocsp_task_ctx.cur_ocsp;
+
+ if (!ocsp)
+ return 0;
+
+ smp->data.type = SMP_T_SINT;
+ smp->data.u.sint = ocsp->num_success;
+ return 1;
+}
+
+
+static struct cli_kw_list cli_kws = {{ },{
+ { { "set", "ssl", "ocsp-response", NULL }, "set ssl ocsp-response <resp|payload> : update a certificate's OCSP Response from a base64-encode DER", cli_parse_set_ocspresponse, NULL },
+
+ { { "show", "ssl", "ocsp-response", NULL },"show ssl ocsp-response [[text|base64] id] : display the IDs of the OCSP responses used in memory, or the details of a single OCSP response (in text or base64 format)", cli_parse_show_ocspresponse, cli_io_handler_show_ocspresponse, cli_release_show_ocspresponse },
+ { { "show", "ssl", "ocsp-updates", NULL }, "show ssl ocsp-updates : display information about the next 'nb' ocsp responses that will be updated automatically", cli_parse_show_ocsp_updates, cli_io_handler_show_ocsp_updates, cli_release_show_ocsp_updates },
+#if ((defined SSL_CTRL_SET_TLSEXT_STATUS_REQ_CB && !defined OPENSSL_NO_OCSP) && !defined OPENSSL_IS_BORINGSSL)
+ { { "update", "ssl", "ocsp-response", NULL }, "update ssl ocsp-response <certfile> : send ocsp request and update stored ocsp response", cli_parse_update_ocsp_response, NULL, NULL },
+#endif
+ { { NULL }, NULL, NULL, NULL }
+}};
+
+INITCALL1(STG_REGISTER, cli_register_kw, &cli_kws);
+
+
+/* Note: must not be declared <const> as its list will be overwritten.
+ * Please take care of keeping this list alphabetically sorted.
+ *
+ * Those fetches only have a valid value during an OCSP update process so they
+ * can only be used in a log format of a log line built by the update process
+ * task itself.
+ */
+static struct sample_fetch_kw_list sample_fetch_keywords = {ILH, {
+ { "ssl_ocsp_certid", smp_fetch_ssl_ocsp_certid, 0, NULL, SMP_T_STR, SMP_USE_L5SRV },
+ { "ssl_ocsp_certname", smp_fetch_ssl_ocsp_certname, 0, NULL, SMP_T_STR, SMP_USE_L5SRV },
+ { "ssl_ocsp_status", smp_fetch_ssl_ocsp_status, 0, NULL, SMP_T_SINT, SMP_USE_L5SRV },
+ { "ssl_ocsp_status_str", smp_fetch_ssl_ocsp_status_str, 0, NULL, SMP_T_STR, SMP_USE_L5SRV },
+ { "ssl_ocsp_fail_cnt", smp_fetch_ssl_ocsp_fail_cnt, 0, NULL, SMP_T_SINT, SMP_USE_L5SRV },
+ { "ssl_ocsp_success_cnt", smp_fetch_ssl_ocsp_success_cnt, 0, NULL, SMP_T_SINT, SMP_USE_L5SRV },
+ { NULL, NULL, 0, 0, 0 },
+}};
+
+INITCALL1(STG_REGISTER, sample_register_fetches, &sample_fetch_keywords);
+
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/src/ssl_sample.c b/src/ssl_sample.c
new file mode 100644
index 0000000..789637f
--- /dev/null
+++ b/src/ssl_sample.c
@@ -0,0 +1,2389 @@
+/*
+ * This file contains the sample fetches related to the SSL
+ *
+ * Copyright (C) 2012 EXCELIANCE, Emeric Brun <ebrun@exceliance.fr>
+ * Copyright (C) 2020 HAProxy Technologies, William Lallemand <wlallemand@haproxy.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#define _GNU_SOURCE
+#include <ctype.h>
+#include <dirent.h>
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+#include <haproxy/acl.h>
+#include <haproxy/api.h>
+#include <haproxy/arg.h>
+#include <haproxy/base64.h>
+#include <haproxy/buf-t.h>
+#include <haproxy/connection.h>
+#include <haproxy/obj_type.h>
+#include <haproxy/openssl-compat.h>
+#include <haproxy/sample.h>
+#include <haproxy/ssl_sock.h>
+#include <haproxy/ssl_utils.h>
+#include <haproxy/stconn.h>
+#include <haproxy/tools.h>
+#include <haproxy/vars.h>
+
+
+/***** Below are some sample fetching functions for ACL/patterns *****/
+
+#if defined(HAVE_CRYPTO_memcmp)
+/* Compares bytestring with a variable containing a bytestring. Return value
+ * is `true` if both bytestrings are bytewise identical and `false` otherwise.
+ *
+ * Comparison will be performed in constant time if both bytestrings are of
+ * the same length. If the lengths differ execution time will not be constant.
+ */
+static int sample_conv_secure_memcmp(const struct arg *arg_p, struct sample *smp, void *private)
+{
+ struct sample tmp;
+ int result;
+
+ smp_set_owner(&tmp, smp->px, smp->sess, smp->strm, smp->opt);
+ if (arg_p[0].type != ARGT_VAR)
+ return 0;
+
+ if (!sample_conv_var2smp(&arg_p[0].data.var, &tmp, SMP_T_BIN))
+ return 0;
+
+ if (smp->data.u.str.data != tmp.data.u.str.data) {
+ smp->data.u.sint = 0;
+ smp->data.type = SMP_T_BOOL;
+ return 1;
+ }
+
+ /* The following comparison is performed in constant time. */
+ result = CRYPTO_memcmp(smp->data.u.str.area, tmp.data.u.str.area, smp->data.u.str.data);
+
+ smp->data.u.sint = result == 0;
+ smp->data.type = SMP_T_BOOL;
+ return 1;
+}
+
+/* This function checks the "secure_memcmp" converter's arguments and extracts the
+ * variable name and its scope.
+ */
+static int smp_check_secure_memcmp(struct arg *args, struct sample_conv *conv,
+ const char *file, int line, char **err)
+{
+ if (!args[0].data.str.data) {
+ memprintf(err, "missing variable name");
+ return 0;
+ }
+
+ /* Try to decode a variable. */
+ if (vars_check_arg(&args[0], NULL))
+ return 1;
+
+ memprintf(err, "failed to register variable name '%s'",
+ args[0].data.str.area);
+ return 0;
+}
+#endif // HAVE_secure_memcmp()
+
+static int smp_check_sha2(struct arg *args, struct sample_conv *conv,
+ const char *file, int line, char **err)
+{
+ if (args[0].type == ARGT_STOP)
+ return 1;
+ if (args[0].type != ARGT_SINT) {
+ memprintf(err, "Invalid type '%s'", arg_type_names[args[0].type]);
+ return 0;
+ }
+
+ switch (args[0].data.sint) {
+ case 224:
+ case 256:
+ case 384:
+ case 512:
+ /* this is okay */
+ return 1;
+ default:
+ memprintf(err, "Unsupported number of bits: '%lld'", args[0].data.sint);
+ return 0;
+ }
+}
+
+static int sample_conv_sha2(const struct arg *arg_p, struct sample *smp, void *private)
+{
+ struct buffer *trash = get_trash_chunk();
+ int bits = 256;
+ EVP_MD_CTX *mdctx;
+ const EVP_MD *evp = NULL;
+ unsigned int digest_length = 0;
+ if (arg_p->data.sint)
+ bits = arg_p->data.sint;
+
+ switch (bits) {
+ case 224:
+ evp = EVP_sha224();
+ break;
+ case 256:
+ evp = EVP_sha256();
+ break;
+ case 384:
+ evp = EVP_sha384();
+ break;
+ case 512:
+ evp = EVP_sha512();
+ break;
+ default:
+ return 0;
+ }
+
+ mdctx = EVP_MD_CTX_new();
+ if (!mdctx)
+ return 0;
+ EVP_DigestInit_ex(mdctx, evp, NULL);
+ EVP_DigestUpdate(mdctx, smp->data.u.str.area, smp->data.u.str.data);
+ EVP_DigestFinal_ex(mdctx, (unsigned char*)trash->area, &digest_length);
+ trash->data = digest_length;
+
+ EVP_MD_CTX_free(mdctx);
+
+ smp->data.u.str = *trash;
+ smp->data.type = SMP_T_BIN;
+ smp->flags &= ~SMP_F_CONST;
+ return 1;
+}
+
+/* This function checks an <arg> and fills it with a variable type if the
+ * <arg> string contains a valid variable name. If failed, the function
+ * tries to perform a base64 decode operation on the same string, and
+ * fills the <arg> with the decoded content.
+ *
+ * Validation is skipped if the <arg> string is empty.
+ *
+ * This function returns 0 if the variable lookup fails and the specified
+ * <arg> string is not a valid base64 encoded string, as well if
+ * unexpected argument type is specified or memory allocation error
+ * occurs. Otherwise it returns 1.
+ */
+static inline int sample_check_arg_base64(struct arg *arg, char **err)
+{
+ char *dec = NULL;
+ int dec_size;
+
+ if (arg->type != ARGT_STR) {
+ memprintf(err, "unexpected argument type");
+ return 0;
+ }
+
+ if (arg->data.str.data == 0) /* empty */
+ return 1;
+
+ if (vars_check_arg(arg, NULL))
+ return 1;
+
+ if (arg->data.str.data % 4) {
+ memprintf(err, "argument needs to be base64 encoded, and "
+ "can either be a string or a variable");
+ return 0;
+ }
+
+ dec_size = (arg->data.str.data / 4 * 3)
+ - (arg->data.str.area[arg->data.str.data-1] == '=' ? 1 : 0)
+ - (arg->data.str.area[arg->data.str.data-2] == '=' ? 1 : 0);
+
+ if ((dec = malloc(dec_size)) == NULL) {
+ memprintf(err, "memory allocation error");
+ return 0;
+ }
+
+ dec_size = base64dec(arg->data.str.area, arg->data.str.data, dec, dec_size);
+ if (dec_size < 0) {
+ memprintf(err, "argument needs to be base64 encoded, and "
+ "can either be a string or a variable");
+ free(dec);
+ return 0;
+ }
+
+ /* base64 decoded */
+ chunk_destroy(&arg->data.str);
+ arg->data.str.area = dec;
+ arg->data.str.data = dec_size;
+ return 1;
+}
+
+#ifdef EVP_CIPH_GCM_MODE
+static int check_aes_gcm(struct arg *args, struct sample_conv *conv,
+ const char *file, int line, char **err)
+{
+ switch(args[0].data.sint) {
+ case 128:
+ case 192:
+ case 256:
+ break;
+ default:
+ memprintf(err, "key size must be 128, 192 or 256 (bits).");
+ return 0;
+ }
+
+ /* Try to decode variables. */
+ if (!sample_check_arg_base64(&args[1], err)) {
+ memprintf(err, "failed to parse nonce : %s", *err);
+ return 0;
+ }
+ if (!sample_check_arg_base64(&args[2], err)) {
+ memprintf(err, "failed to parse key : %s", *err);
+ return 0;
+ }
+ if (!sample_check_arg_base64(&args[3], err)) {
+ memprintf(err, "failed to parse aead_tag : %s", *err);
+ return 0;
+ }
+
+ return 1;
+}
+
+/* Arguments: AES size in bits, nonce, key, tag. The last three arguments are base64 encoded */
+static int sample_conv_aes_gcm_dec(const struct arg *arg_p, struct sample *smp, void *private)
+{
+ struct sample nonce, key, aead_tag;
+ struct buffer *smp_trash = NULL, *smp_trash_alloc = NULL;
+ EVP_CIPHER_CTX *ctx;
+ int dec_size, ret;
+
+ smp_trash_alloc = alloc_trash_chunk();
+ if (!smp_trash_alloc)
+ return 0;
+
+ /* smp copy */
+ smp_trash_alloc->data = smp->data.u.str.data;
+ if (unlikely(smp_trash_alloc->data > smp_trash_alloc->size))
+ smp_trash_alloc->data = smp_trash_alloc->size;
+ memcpy(smp_trash_alloc->area, smp->data.u.str.area, smp_trash_alloc->data);
+
+ ctx = EVP_CIPHER_CTX_new();
+
+ if (!ctx)
+ goto err;
+
+ smp_trash = alloc_trash_chunk();
+ if (!smp_trash)
+ goto err;
+
+ smp_set_owner(&nonce, smp->px, smp->sess, smp->strm, smp->opt);
+ if (!sample_conv_var2smp_str(&arg_p[1], &nonce))
+ goto err;
+
+ if (arg_p[1].type == ARGT_VAR) {
+ dec_size = base64dec(nonce.data.u.str.area, nonce.data.u.str.data, smp_trash->area, smp_trash->size);
+ if (dec_size < 0)
+ goto err;
+ smp_trash->data = dec_size;
+ nonce.data.u.str = *smp_trash;
+ }
+
+ /* Set cipher type and mode */
+ switch(arg_p[0].data.sint) {
+ case 128:
+ EVP_DecryptInit_ex(ctx, EVP_aes_128_gcm(), NULL, NULL, NULL);
+ break;
+ case 192:
+ EVP_DecryptInit_ex(ctx, EVP_aes_192_gcm(), NULL, NULL, NULL);
+ break;
+ case 256:
+ EVP_DecryptInit_ex(ctx, EVP_aes_256_gcm(), NULL, NULL, NULL);
+ break;
+ }
+
+ EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_AEAD_SET_IVLEN, nonce.data.u.str.data, NULL);
+
+ /* Initialise IV */
+ if(!EVP_DecryptInit_ex(ctx, NULL, NULL, NULL, (unsigned char *) nonce.data.u.str.area))
+ goto err;
+
+ smp_set_owner(&key, smp->px, smp->sess, smp->strm, smp->opt);
+ if (!sample_conv_var2smp_str(&arg_p[2], &key))
+ goto err;
+
+ if (arg_p[2].type == ARGT_VAR) {
+ dec_size = base64dec(key.data.u.str.area, key.data.u.str.data, smp_trash->area, smp_trash->size);
+ if (dec_size < 0)
+ goto err;
+ smp_trash->data = dec_size;
+ key.data.u.str = *smp_trash;
+ }
+
+ /* Initialise key */
+ if (!EVP_DecryptInit_ex(ctx, NULL, NULL, (unsigned char *) key.data.u.str.area, NULL))
+ goto err;
+
+ if (!EVP_DecryptUpdate(ctx, (unsigned char *) smp_trash->area, (int *) &smp_trash->data,
+ (unsigned char *) smp_trash_alloc->area, (int) smp_trash_alloc->data))
+ goto err;
+
+ smp_set_owner(&aead_tag, smp->px, smp->sess, smp->strm, smp->opt);
+ if (!sample_conv_var2smp_str(&arg_p[3], &aead_tag))
+ goto err;
+
+ if (arg_p[3].type == ARGT_VAR) {
+ dec_size = base64dec(aead_tag.data.u.str.area, aead_tag.data.u.str.data, smp_trash_alloc->area, smp_trash_alloc->size);
+ if (dec_size < 0)
+ goto err;
+ smp_trash_alloc->data = dec_size;
+ aead_tag.data.u.str = *smp_trash_alloc;
+ }
+
+ dec_size = smp_trash->data;
+
+ EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_AEAD_SET_TAG, aead_tag.data.u.str.data, (void *) aead_tag.data.u.str.area);
+ ret = EVP_DecryptFinal_ex(ctx, (unsigned char *) smp_trash->area + smp_trash->data, (int *) &smp_trash->data);
+
+ if (ret <= 0)
+ goto err;
+
+ smp->data.u.str.data = dec_size + smp_trash->data;
+ smp->data.u.str.area = smp_trash->area;
+ smp->data.type = SMP_T_BIN;
+ smp_dup(smp);
+ free_trash_chunk(smp_trash_alloc);
+ free_trash_chunk(smp_trash);
+ return 1;
+
+err:
+ free_trash_chunk(smp_trash_alloc);
+ free_trash_chunk(smp_trash);
+ return 0;
+}
+#endif
+
+static int check_crypto_digest(struct arg *args, struct sample_conv *conv,
+ const char *file, int line, char **err)
+{
+ const EVP_MD *evp = EVP_get_digestbyname(args[0].data.str.area);
+
+ if (evp)
+ return 1;
+
+ memprintf(err, "algorithm must be a valid OpenSSL message digest name.");
+ return 0;
+}
+
+static int sample_conv_crypto_digest(const struct arg *args, struct sample *smp, void *private)
+{
+ struct buffer *trash = get_trash_chunk();
+ unsigned char *md = (unsigned char*) trash->area;
+ unsigned int md_len = trash->size;
+ EVP_MD_CTX *ctx = EVP_MD_CTX_new();
+ const EVP_MD *evp = EVP_get_digestbyname(args[0].data.str.area);
+
+ if (!ctx)
+ return 0;
+
+ if (!EVP_DigestInit_ex(ctx, evp, NULL) ||
+ !EVP_DigestUpdate(ctx, smp->data.u.str.area, smp->data.u.str.data) ||
+ !EVP_DigestFinal_ex(ctx, md, &md_len)) {
+ EVP_MD_CTX_free(ctx);
+ return 0;
+ }
+
+ EVP_MD_CTX_free(ctx);
+
+ trash->data = md_len;
+ smp->data.u.str = *trash;
+ smp->data.type = SMP_T_BIN;
+ smp->flags &= ~SMP_F_CONST;
+ return 1;
+}
+
+/* Take a numerical X509_V_ERR and return its constant name */
+static int sample_conv_x509_v_err(const struct arg *arg_p, struct sample *smp, void *private)
+{
+ const char *res = x509_v_err_int_to_str(smp->data.u.sint);
+
+ /* if the value was found return its string */
+ if (res) {
+ smp->data.u.str.area = (char *)res;
+ smp->data.u.str.data = strlen(res);
+ smp->data.type = SMP_T_STR;
+ smp->flags |= SMP_F_CONST;
+
+ return 1;
+ } else {
+ struct buffer *smp_trash = get_trash_chunk();
+
+ /* if the conversion failed, output the numbers as string */
+ chunk_printf(smp_trash, "%llu", smp->data.u.sint);
+
+ smp->data.u.str = *smp_trash;
+ smp->data.type = SMP_T_STR;
+ smp->flags &= ~SMP_F_CONST;
+
+ return 1;
+ }
+
+ return 0;
+}
+
+static int check_crypto_hmac(struct arg *args, struct sample_conv *conv,
+ const char *file, int line, char **err)
+{
+ if (!check_crypto_digest(args, conv, file, line, err))
+ return 0;
+
+ if (!sample_check_arg_base64(&args[1], err)) {
+ memprintf(err, "failed to parse key : %s", *err);
+ return 0;
+ }
+
+ return 1;
+}
+
+static int sample_conv_crypto_hmac(const struct arg *args, struct sample *smp, void *private)
+{
+ struct sample key;
+ struct buffer *trash = NULL, *key_trash = NULL;
+ unsigned char *md;
+ unsigned int md_len;
+ const EVP_MD *evp = EVP_get_digestbyname(args[0].data.str.area);
+ int dec_size;
+
+ smp_set_owner(&key, smp->px, smp->sess, smp->strm, smp->opt);
+ if (!sample_conv_var2smp_str(&args[1], &key))
+ return 0;
+
+ if (args[1].type == ARGT_VAR) {
+ key_trash = alloc_trash_chunk();
+ if (!key_trash)
+ goto err;
+
+ dec_size = base64dec(key.data.u.str.area, key.data.u.str.data, key_trash->area, key_trash->size);
+ if (dec_size < 0)
+ goto err;
+ key_trash->data = dec_size;
+ key.data.u.str = *key_trash;
+ }
+
+ trash = alloc_trash_chunk();
+ if (!trash)
+ goto err;
+
+ md = (unsigned char*) trash->area;
+ md_len = trash->size;
+ if (!HMAC(evp, key.data.u.str.area, key.data.u.str.data, (const unsigned char*) smp->data.u.str.area,
+ smp->data.u.str.data, md, &md_len))
+ goto err;
+
+ free_trash_chunk(key_trash);
+
+ trash->data = md_len;
+ smp->data.u.str = *trash;
+ smp->data.type = SMP_T_BIN;
+ smp_dup(smp);
+ free_trash_chunk(trash);
+ return 1;
+
+err:
+ free_trash_chunk(key_trash);
+ free_trash_chunk(trash);
+ return 0;
+}
+
+static int
+smp_fetch_ssl_fc_has_early(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ SSL *ssl;
+ struct connection *conn;
+
+ conn = objt_conn(smp->sess->origin);
+ ssl = ssl_sock_get_ssl_object(conn);
+ if (!ssl)
+ return 0;
+
+ smp->flags = 0;
+ smp->data.type = SMP_T_BOOL;
+#ifdef OPENSSL_IS_BORINGSSL
+ {
+ smp->data.u.sint = (SSL_in_early_data(ssl) &&
+ SSL_early_data_accepted(ssl));
+ }
+#else
+ smp->data.u.sint = ((conn->flags & CO_FL_EARLY_DATA) &&
+ (conn->flags & (CO_FL_EARLY_SSL_HS | CO_FL_SSL_WAIT_HS))) ? 1 : 0;
+#endif
+ return 1;
+}
+
+/* boolean, returns true if client cert was present */
+static int
+smp_fetch_ssl_fc_has_crt(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ struct connection *conn = objt_conn(smp->sess->origin);
+ struct ssl_sock_ctx *ctx = conn_get_ssl_sock_ctx(conn);
+
+ if (!ctx)
+ return 0;
+
+ if (conn->flags & CO_FL_WAIT_XPRT) {
+ smp->flags |= SMP_F_MAY_CHANGE;
+ return 0;
+ }
+
+ smp->flags = SMP_F_VOL_SESS;
+ smp->data.type = SMP_T_BOOL;
+ smp->data.u.sint = SSL_SOCK_ST_FL_VERIFY_DONE & ctx->xprt_st ? 1 : 0;
+
+ return 1;
+}
+
+/* string, returns a string of a formatted full dn \C=..\O=..\OU=.. \CN=.. of the
+ * client certificate's root CA.
+ */
+#ifdef HAVE_SSL_get0_verified_chain
+static int
+smp_fetch_ssl_r_dn(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ X509 *crt = NULL;
+ X509_NAME *name;
+ int ret = 0;
+ struct buffer *smp_trash;
+ struct connection *conn;
+ SSL *ssl;
+
+ conn = objt_conn(smp->sess->origin);
+ ssl = ssl_sock_get_ssl_object(conn);
+ if (!ssl)
+ return 0;
+
+ if (conn->flags & CO_FL_WAIT_XPRT && !conn->err_code) {
+ smp->flags |= SMP_F_MAY_CHANGE;
+ return 0;
+ }
+
+ crt = ssl_sock_get_verified_chain_root(ssl);
+ if (!crt)
+ goto out;
+
+ name = X509_get_subject_name(crt);
+ if (!name)
+ goto out;
+
+ smp_trash = get_trash_chunk();
+ if (args[0].type == ARGT_STR && args[0].data.str.data > 0) {
+ int pos = 1;
+
+ if (args[1].type == ARGT_SINT)
+ pos = args[1].data.sint;
+
+ if (ssl_sock_get_dn_entry(name, &args[0].data.str, pos, smp_trash) <= 0)
+ goto out;
+ }
+ else if (args[2].type == ARGT_STR && args[2].data.str.data > 0) {
+ if (ssl_sock_get_dn_formatted(name, &args[2].data.str, smp_trash) <= 0)
+ goto out;
+ }
+ else if (ssl_sock_get_dn_oneline(name, smp_trash) <= 0)
+ goto out;
+
+ smp->flags = SMP_F_VOL_SESS;
+ smp->data.type = SMP_T_STR;
+ smp->data.u.str = *smp_trash;
+ ret = 1;
+out:
+ return ret;
+}
+#endif
+
+/* binary, returns a certificate in a binary chunk (der/raw).
+ * The 5th keyword char is used to know if SSL_get_certificate or SSL_get_peer_certificate
+ * should be use.
+ */
+static int
+smp_fetch_ssl_x_der(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ int cert_peer = (kw[4] == 'c' || kw[4] == 's') ? 1 : 0;
+ int conn_server = (kw[4] == 's') ? 1 : 0;
+
+ X509 *crt = NULL;
+ int ret = 0;
+ struct buffer *smp_trash;
+ struct connection *conn;
+ SSL *ssl;
+
+ if (conn_server)
+ conn = smp->strm ? sc_conn(smp->strm->scb) : NULL;
+ else
+ conn = objt_conn(smp->sess->origin);
+
+ ssl = ssl_sock_get_ssl_object(conn);
+ if (!ssl)
+ return 0;
+
+ if (conn->flags & CO_FL_WAIT_XPRT && !conn->err_code) {
+ smp->flags |= SMP_F_MAY_CHANGE;
+ return 0;
+ }
+
+ if (cert_peer)
+ crt = ssl_sock_get_peer_certificate(ssl);
+ else
+ crt = SSL_get_certificate(ssl);
+
+ if (!crt)
+ goto out;
+
+ smp_trash = get_trash_chunk();
+ if (ssl_sock_crt2der(crt, smp_trash) <= 0)
+ goto out;
+
+ smp->flags = SMP_F_VOL_SESS;
+ smp->data.u.str = *smp_trash;
+ smp->data.type = SMP_T_BIN;
+ ret = 1;
+out:
+ /* SSL_get_peer_certificate, it increase X509 * ref count */
+ if (cert_peer && crt)
+ X509_free(crt);
+ return ret;
+}
+
+/* binary, returns a chain certificate in a binary chunk (der/raw).
+ * The 5th keyword char is used to support only peer cert
+ */
+static int
+smp_fetch_ssl_x_chain_der(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ int cert_peer = (kw[4] == 'c' || kw[4] == 's') ? 1 : 0;
+ int conn_server = (kw[4] == 's') ? 1 : 0;
+ struct buffer *smp_trash;
+ struct buffer *tmp_trash = NULL;
+ struct connection *conn;
+ STACK_OF(X509) *certs = NULL;
+ X509 *crt = NULL;
+ SSL *ssl;
+ int ret = 0;
+ int num_certs;
+ int i;
+
+ if (conn_server)
+ conn = smp->strm ? sc_conn(smp->strm->scb) : NULL;
+ else
+ conn = objt_conn(smp->sess->origin);
+
+ if (!conn)
+ return 0;
+
+ ssl = ssl_sock_get_ssl_object(conn);
+ if (!ssl)
+ return 0;
+
+ if (conn->flags & CO_FL_WAIT_XPRT) {
+ smp->flags |= SMP_F_MAY_CHANGE;
+ return 0;
+ }
+
+ if (!cert_peer)
+ return 0;
+
+ certs = SSL_get_peer_cert_chain(ssl);
+ if (!certs)
+ return 0;
+
+ num_certs = sk_X509_num(certs);
+ if (!num_certs)
+ goto out;
+ smp_trash = get_trash_chunk();
+ tmp_trash = alloc_trash_chunk();
+ if (!tmp_trash)
+ goto out;
+ for (i = 0; i < num_certs; i++) {
+ crt = sk_X509_value(certs, i);
+ if (ssl_sock_crt2der(crt, tmp_trash) <= 0)
+ goto out;
+ chunk_cat(smp_trash, tmp_trash);
+ }
+
+ smp->flags = SMP_F_VOL_SESS;
+ smp->data.u.str = *smp_trash;
+ smp->data.type = SMP_T_BIN;
+ ret = 1;
+out:
+ if (tmp_trash)
+ free_trash_chunk(tmp_trash);
+ return ret;
+}
+
+/* binary, returns serial of certificate in a binary chunk.
+ * The 5th keyword char is used to know if SSL_get_certificate or SSL_get_peer_certificate
+ * should be use.
+ */
+static int
+smp_fetch_ssl_x_serial(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ int cert_peer = (kw[4] == 'c' || kw[4] == 's') ? 1 : 0;
+ int conn_server = (kw[4] == 's') ? 1 : 0;
+ X509 *crt = NULL;
+ int ret = 0;
+ struct buffer *smp_trash;
+ struct connection *conn;
+ SSL *ssl;
+
+ if (conn_server)
+ conn = smp->strm ? sc_conn(smp->strm->scb) : NULL;
+ else
+ conn = objt_conn(smp->sess->origin);
+ ssl = ssl_sock_get_ssl_object(conn);
+ if (!ssl)
+ return 0;
+
+ if (conn->flags & CO_FL_WAIT_XPRT && !conn->err_code) {
+ smp->flags |= SMP_F_MAY_CHANGE;
+ return 0;
+ }
+
+ if (cert_peer)
+ crt = ssl_sock_get_peer_certificate(ssl);
+ else
+ crt = SSL_get_certificate(ssl);
+
+ if (!crt)
+ goto out;
+
+ smp_trash = get_trash_chunk();
+ if (ssl_sock_get_serial(crt, smp_trash) <= 0)
+ goto out;
+
+ smp->flags = SMP_F_VOL_SESS;
+ smp->data.u.str = *smp_trash;
+ smp->data.type = SMP_T_BIN;
+ ret = 1;
+out:
+ /* SSL_get_peer_certificate, it increase X509 * ref count */
+ if (cert_peer && crt)
+ X509_free(crt);
+ return ret;
+}
+
+/* binary, returns the client certificate's SHA-1 fingerprint (SHA-1 hash of DER-encoded certificate) in a binary chunk.
+ * The 5th keyword char is used to know if SSL_get_certificate or SSL_get_peer_certificate
+ * should be use.
+ */
+static int
+smp_fetch_ssl_x_sha1(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ int cert_peer = (kw[4] == 'c' || kw[4] == 's') ? 1 : 0;
+ int conn_server = (kw[4] == 's') ? 1 : 0;
+ X509 *crt = NULL;
+ const EVP_MD *digest;
+ int ret = 0;
+ unsigned int len = 0;
+ struct buffer *smp_trash;
+ struct connection *conn;
+ SSL *ssl;
+
+ if (conn_server)
+ conn = smp->strm ? sc_conn(smp->strm->scb) : NULL;
+ else
+ conn = objt_conn(smp->sess->origin);
+
+ ssl = ssl_sock_get_ssl_object(conn);
+ if (!ssl)
+ return 0;
+
+ if (conn->flags & CO_FL_WAIT_XPRT && !conn->err_code) {
+ smp->flags |= SMP_F_MAY_CHANGE;
+ return 0;
+ }
+
+ if (cert_peer)
+ crt = ssl_sock_get_peer_certificate(ssl);
+ else
+ crt = SSL_get_certificate(ssl);
+ if (!crt)
+ goto out;
+
+ smp_trash = get_trash_chunk();
+ digest = EVP_sha1();
+ X509_digest(crt, digest, (unsigned char *) smp_trash->area, &len);
+ smp_trash->data = len;
+ smp->flags = SMP_F_VOL_SESS;
+ smp->data.u.str = *smp_trash;
+ smp->data.type = SMP_T_BIN;
+ ret = 1;
+out:
+ /* SSL_get_peer_certificate, it increase X509 * ref count */
+ if (cert_peer && crt)
+ X509_free(crt);
+ return ret;
+}
+
+/* string, returns certificate's notafter date in ASN1_UTCTIME format.
+ * The 5th keyword char is used to know if SSL_get_certificate or SSL_get_peer_certificate
+ * should be use.
+ */
+static int
+smp_fetch_ssl_x_notafter(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ int cert_peer = (kw[4] == 'c' || kw[4] == 's') ? 1 : 0;
+ int conn_server = (kw[4] == 's') ? 1 : 0;
+ X509 *crt = NULL;
+ int ret = 0;
+ struct buffer *smp_trash;
+ struct connection *conn;
+ SSL *ssl;
+
+ if (conn_server)
+ conn = smp->strm ? sc_conn(smp->strm->scb) : NULL;
+ else
+ conn = objt_conn(smp->sess->origin);
+
+ ssl = ssl_sock_get_ssl_object(conn);
+ if (!ssl)
+ return 0;
+
+ if (conn->flags & CO_FL_WAIT_XPRT && !conn->err_code) {
+ smp->flags |= SMP_F_MAY_CHANGE;
+ return 0;
+ }
+
+ if (cert_peer)
+ crt = ssl_sock_get_peer_certificate(ssl);
+ else
+ crt = SSL_get_certificate(ssl);
+ if (!crt)
+ goto out;
+
+ smp_trash = get_trash_chunk();
+ if (ssl_sock_get_time(X509_getm_notAfter(crt), smp_trash) <= 0)
+ goto out;
+
+ smp->flags = SMP_F_VOL_SESS;
+ smp->data.u.str = *smp_trash;
+ smp->data.type = SMP_T_STR;
+ ret = 1;
+out:
+ /* SSL_get_peer_certificate, it increase X509 * ref count */
+ if (cert_peer && crt)
+ X509_free(crt);
+ return ret;
+}
+
+/* string, returns a string of a formatted full dn \C=..\O=..\OU=.. \CN=.. of certificate's issuer
+ * The 5th keyword char is used to know if SSL_get_certificate or SSL_get_peer_certificate
+ * should be use.
+ */
+static int
+smp_fetch_ssl_x_i_dn(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ int cert_peer = (kw[4] == 'c' || kw[4] == 's') ? 1 : 0;
+ int conn_server = (kw[4] == 's') ? 1 : 0;
+ X509 *crt = NULL;
+ X509_NAME *name;
+ int ret = 0;
+ struct buffer *smp_trash;
+ struct connection *conn;
+ SSL *ssl;
+
+ if (conn_server)
+ conn = smp->strm ? sc_conn(smp->strm->scb) : NULL;
+ else
+ conn = objt_conn(smp->sess->origin);
+
+ ssl = ssl_sock_get_ssl_object(conn);
+ if (!ssl)
+ return 0;
+
+ if (conn->flags & CO_FL_WAIT_XPRT && !conn->err_code) {
+ smp->flags |= SMP_F_MAY_CHANGE;
+ return 0;
+ }
+
+ if (cert_peer)
+ crt = ssl_sock_get_peer_certificate(ssl);
+ else
+ crt = SSL_get_certificate(ssl);
+ if (!crt)
+ goto out;
+
+ name = X509_get_issuer_name(crt);
+ if (!name)
+ goto out;
+
+ smp_trash = get_trash_chunk();
+ if (args[0].type == ARGT_STR && args[0].data.str.data > 0) {
+ int pos = 1;
+
+ if (args[1].type == ARGT_SINT)
+ pos = args[1].data.sint;
+
+ if (ssl_sock_get_dn_entry(name, &args[0].data.str, pos, smp_trash) <= 0)
+ goto out;
+ }
+ else if (args[2].type == ARGT_STR && args[2].data.str.data > 0) {
+ if (ssl_sock_get_dn_formatted(name, &args[2].data.str, smp_trash) <= 0)
+ goto out;
+ }
+ else if (ssl_sock_get_dn_oneline(name, smp_trash) <= 0)
+ goto out;
+
+ smp->flags = SMP_F_VOL_SESS;
+ smp->data.type = SMP_T_STR;
+ smp->data.u.str = *smp_trash;
+ ret = 1;
+out:
+ /* SSL_get_peer_certificate, it increase X509 * ref count */
+ if (cert_peer && crt)
+ X509_free(crt);
+ return ret;
+}
+
+/* string, returns notbefore date in ASN1_UTCTIME format.
+ * The 5th keyword char is used to know if SSL_get_certificate or SSL_get_peer_certificate
+ * should be use.
+ */
+static int
+smp_fetch_ssl_x_notbefore(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ int cert_peer = (kw[4] == 'c' || kw[4] == 's') ? 1 : 0;
+ int conn_server = (kw[4] == 's') ? 1 : 0;
+ X509 *crt = NULL;
+ int ret = 0;
+ struct buffer *smp_trash;
+ struct connection *conn;
+ SSL *ssl;
+
+ if (conn_server)
+ conn = smp->strm ? sc_conn(smp->strm->scb) : NULL;
+ else
+ conn = objt_conn(smp->sess->origin);
+
+ ssl = ssl_sock_get_ssl_object(conn);
+ if (!ssl)
+ return 0;
+
+ if (conn->flags & CO_FL_WAIT_XPRT && !conn->err_code) {
+ smp->flags |= SMP_F_MAY_CHANGE;
+ return 0;
+ }
+
+ if (cert_peer)
+ crt = ssl_sock_get_peer_certificate(ssl);
+ else
+ crt = SSL_get_certificate(ssl);
+ if (!crt)
+ goto out;
+
+ smp_trash = get_trash_chunk();
+ if (ssl_sock_get_time(X509_getm_notBefore(crt), smp_trash) <= 0)
+ goto out;
+
+ smp->flags = SMP_F_VOL_SESS;
+ smp->data.u.str = *smp_trash;
+ smp->data.type = SMP_T_STR;
+ ret = 1;
+out:
+ /* SSL_get_peer_certificate, it increase X509 * ref count */
+ if (cert_peer && crt)
+ X509_free(crt);
+ return ret;
+}
+
+/* string, returns a string of a formatted full dn \C=..\O=..\OU=.. \CN=.. of certificate's subject
+ * The 5th keyword char is used to know if SSL_get_certificate or SSL_get_peer_certificate
+ * should be use.
+ */
+static int
+smp_fetch_ssl_x_s_dn(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ int cert_peer = (kw[4] == 'c' || kw[4] == 's') ? 1 : 0;
+ int conn_server = (kw[4] == 's') ? 1 : 0;
+ X509 *crt = NULL;
+ X509_NAME *name;
+ int ret = 0;
+ struct buffer *smp_trash;
+ struct connection *conn;
+ SSL *ssl;
+
+ if (conn_server)
+ conn = smp->strm ? sc_conn(smp->strm->scb) : NULL;
+ else
+ conn = objt_conn(smp->sess->origin);
+
+ ssl = ssl_sock_get_ssl_object(conn);
+ if (!ssl)
+ return 0;
+
+ if (conn->flags & CO_FL_WAIT_XPRT && !conn->err_code) {
+ smp->flags |= SMP_F_MAY_CHANGE;
+ return 0;
+ }
+
+ if (cert_peer)
+ crt = ssl_sock_get_peer_certificate(ssl);
+ else
+ crt = SSL_get_certificate(ssl);
+ if (!crt)
+ goto out;
+
+ name = X509_get_subject_name(crt);
+ if (!name)
+ goto out;
+
+ smp_trash = get_trash_chunk();
+ if (args[0].type == ARGT_STR && args[0].data.str.data > 0) {
+ int pos = 1;
+
+ if (args[1].type == ARGT_SINT)
+ pos = args[1].data.sint;
+
+ if (ssl_sock_get_dn_entry(name, &args[0].data.str, pos, smp_trash) <= 0)
+ goto out;
+ }
+ else if (args[2].type == ARGT_STR && args[2].data.str.data > 0) {
+ if (ssl_sock_get_dn_formatted(name, &args[2].data.str, smp_trash) <= 0)
+ goto out;
+ }
+ else if (ssl_sock_get_dn_oneline(name, smp_trash) <= 0)
+ goto out;
+
+ smp->flags = SMP_F_VOL_SESS;
+ smp->data.type = SMP_T_STR;
+ smp->data.u.str = *smp_trash;
+ ret = 1;
+out:
+ /* SSL_get_peer_certificate, it increase X509 * ref count */
+ if (cert_peer && crt)
+ X509_free(crt);
+ return ret;
+}
+
+/* integer, returns true if current session use a client certificate */
+static int
+smp_fetch_ssl_c_used(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ X509 *crt;
+ struct connection *conn;
+ SSL *ssl;
+
+ conn = objt_conn(smp->sess->origin);
+ ssl = ssl_sock_get_ssl_object(conn);
+ if (!ssl)
+ return 0;
+
+ if (conn->flags & CO_FL_WAIT_XPRT && !conn->err_code) {
+ smp->flags |= SMP_F_MAY_CHANGE;
+ return 0;
+ }
+
+ /* SSL_get_peer_certificate returns a ptr on allocated X509 struct */
+ crt = ssl_sock_get_peer_certificate(ssl);
+ if (crt) {
+ X509_free(crt);
+ }
+
+ smp->flags = SMP_F_VOL_SESS;
+ smp->data.type = SMP_T_BOOL;
+ smp->data.u.sint = (crt != NULL);
+ return 1;
+}
+
+/* integer, returns the certificate version
+ * The 5th keyword char is used to know if SSL_get_certificate or SSL_get_peer_certificate
+ * should be use.
+ */
+static int
+smp_fetch_ssl_x_version(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ int cert_peer = (kw[4] == 'c' || kw[4] == 's') ? 1 : 0;
+ int conn_server = (kw[4] == 's') ? 1 : 0;
+
+ X509 *crt;
+ struct connection *conn;
+ SSL *ssl;
+
+ if (conn_server)
+ conn = smp->strm ? sc_conn(smp->strm->scb) : NULL;
+ else
+ conn = objt_conn(smp->sess->origin);
+ ssl = ssl_sock_get_ssl_object(conn);
+ if (!ssl)
+ return 0;
+
+ if (conn->flags & CO_FL_WAIT_XPRT && !conn->err_code) {
+ smp->flags |= SMP_F_MAY_CHANGE;
+ return 0;
+ }
+
+ if (cert_peer)
+ crt = ssl_sock_get_peer_certificate(ssl);
+ else
+ crt = SSL_get_certificate(ssl);
+ if (!crt)
+ return 0;
+
+ smp->flags = SMP_F_VOL_SESS;
+ smp->data.u.sint = (unsigned int)(1 + X509_get_version(crt));
+ /* SSL_get_peer_certificate increase X509 * ref count */
+ if (cert_peer)
+ X509_free(crt);
+ smp->data.type = SMP_T_SINT;
+
+ return 1;
+}
+
+/* string, returns the certificate's signature algorithm.
+ * The 5th keyword char is used to know if SSL_get_certificate or SSL_get_peer_certificate
+ * should be use.
+ */
+static int
+smp_fetch_ssl_x_sig_alg(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ int cert_peer = (kw[4] == 'c' || kw[4] == 's') ? 1 : 0;
+ int conn_server = (kw[4] == 's') ? 1 : 0;
+ X509 *crt;
+ __OPENSSL_110_CONST__ ASN1_OBJECT *algorithm;
+ int nid;
+ struct connection *conn;
+ SSL *ssl;
+
+ if (conn_server)
+ conn = smp->strm ? sc_conn(smp->strm->scb) : NULL;
+ else
+ conn = objt_conn(smp->sess->origin);
+
+ ssl = ssl_sock_get_ssl_object(conn);
+ if (!ssl)
+ return 0;
+
+ if (conn->flags & CO_FL_WAIT_XPRT && !conn->err_code) {
+ smp->flags |= SMP_F_MAY_CHANGE;
+ return 0;
+ }
+
+ if (cert_peer)
+ crt = ssl_sock_get_peer_certificate(ssl);
+ else
+ crt = SSL_get_certificate(ssl);
+ if (!crt)
+ return 0;
+
+ X509_ALGOR_get0(&algorithm, NULL, NULL, X509_get0_tbs_sigalg(crt));
+ nid = OBJ_obj2nid(algorithm);
+
+ smp->data.u.str.area = (char *)OBJ_nid2sn(nid);
+ if (!smp->data.u.str.area) {
+ /* SSL_get_peer_certificate increase X509 * ref count */
+ if (cert_peer)
+ X509_free(crt);
+ return 0;
+ }
+
+ smp->data.type = SMP_T_STR;
+ smp->flags |= SMP_F_VOL_SESS | SMP_F_CONST;
+ smp->data.u.str.data = strlen(smp->data.u.str.area);
+ /* SSL_get_peer_certificate increase X509 * ref count */
+ if (cert_peer)
+ X509_free(crt);
+
+ return 1;
+}
+
+/* string, returns the certificate's key algorithm.
+ * The 5th keyword char is used to know if SSL_get_certificate or SSL_get_peer_certificate
+ * should be use.
+ */
+static int
+smp_fetch_ssl_x_key_alg(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ int cert_peer = (kw[4] == 'c' || kw[4] == 's') ? 1 : 0;
+ int conn_server = (kw[4] == 's') ? 1 : 0;
+ X509 *crt;
+ ASN1_OBJECT *algorithm;
+ int nid;
+ struct connection *conn;
+ SSL *ssl;
+
+ if (conn_server)
+ conn = smp->strm ? sc_conn(smp->strm->scb) : NULL;
+ else
+ conn = objt_conn(smp->sess->origin);
+ ssl = ssl_sock_get_ssl_object(conn);
+ if (!ssl)
+ return 0;
+
+ if (conn->flags & CO_FL_WAIT_XPRT && !conn->err_code) {
+ smp->flags |= SMP_F_MAY_CHANGE;
+ return 0;
+ }
+
+ if (cert_peer)
+ crt = ssl_sock_get_peer_certificate(ssl);
+ else
+ crt = SSL_get_certificate(ssl);
+ if (!crt)
+ return 0;
+
+ X509_PUBKEY_get0_param(&algorithm, NULL, NULL, NULL, X509_get_X509_PUBKEY(crt));
+ nid = OBJ_obj2nid(algorithm);
+
+ smp->data.u.str.area = (char *)OBJ_nid2sn(nid);
+ if (!smp->data.u.str.area) {
+ /* SSL_get_peer_certificate increase X509 * ref count */
+ if (cert_peer)
+ X509_free(crt);
+ return 0;
+ }
+
+ smp->data.type = SMP_T_STR;
+ smp->flags |= SMP_F_VOL_SESS | SMP_F_CONST;
+ smp->data.u.str.data = strlen(smp->data.u.str.area);
+ if (cert_peer)
+ X509_free(crt);
+
+ return 1;
+}
+
+/* boolean, returns true if front conn. transport layer is SSL.
+ * This function is also usable on backend conn if the fetch keyword 5th
+ * char is 'b'.
+ */
+static int
+smp_fetch_ssl_fc(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ struct connection *conn;
+
+ if (obj_type(smp->sess->origin) == OBJ_TYPE_CHECK)
+ conn = (kw[4] == 'b') ? sc_conn(__objt_check(smp->sess->origin)->sc) : NULL;
+ else
+ conn = (kw[4] != 'b') ? objt_conn(smp->sess->origin) :
+ smp->strm ? sc_conn(smp->strm->scb) : NULL;
+
+ smp->data.type = SMP_T_BOOL;
+ smp->data.u.sint = conn_is_ssl(conn);
+ return 1;
+}
+
+/* boolean, returns true if client present a SNI */
+static int
+smp_fetch_ssl_fc_has_sni(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+#ifdef SSL_CTRL_SET_TLSEXT_HOSTNAME
+ struct connection *conn = objt_conn(smp->sess->origin);
+ SSL *ssl = ssl_sock_get_ssl_object(conn);
+
+ smp->data.type = SMP_T_BOOL;
+ smp->data.u.sint = ssl && SSL_get_servername(ssl, TLSEXT_NAMETYPE_host_name) != NULL;
+ return 1;
+#else
+ return 0;
+#endif
+}
+
+/* boolean, returns true if client session has been resumed.
+ * This function is also usable on backend conn if the fetch keyword 5th
+ * char is 'b'.
+ */
+static int
+smp_fetch_ssl_fc_is_resumed(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ struct connection *conn;
+ SSL *ssl;
+
+ if (obj_type(smp->sess->origin) == OBJ_TYPE_CHECK)
+ conn = (kw[4] == 'b') ? sc_conn(__objt_check(smp->sess->origin)->sc) : NULL;
+ else
+ conn = (kw[4] != 'b') ? objt_conn(smp->sess->origin) :
+ smp->strm ? sc_conn(smp->strm->scb) : NULL;
+
+ ssl = ssl_sock_get_ssl_object(conn);
+
+ smp->data.type = SMP_T_BOOL;
+ smp->data.u.sint = ssl && SSL_session_reused(ssl);
+ return 1;
+}
+
+/*
+ * string, returns the EC curve used for key agreement on the
+ * front and backend connection.
+ *
+ * The function to get the curve name (SSL_get_negotiated_group) is only available
+ * in OpenSSLv3 onwards and not for previous versions.
+ */
+#if (HA_OPENSSL_VERSION_NUMBER >= 0x3000000fL)
+static int
+smp_fetch_ssl_fc_ec(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ struct connection *conn;
+ SSL *ssl;
+ int __maybe_unused nid;
+ char *curve_name;
+
+ if (obj_type(smp->sess->origin) == OBJ_TYPE_CHECK)
+ conn = (kw[4] == 'b') ? sc_conn(__objt_check(smp->sess->origin)->sc) : NULL;
+ else
+ conn = (kw[4] != 'b') ? objt_conn(smp->sess->origin) :
+ smp->strm ? sc_conn(smp->strm->scb) : NULL;
+
+ ssl = ssl_sock_get_ssl_object(conn);
+ if (!ssl)
+ return 0;
+
+ /*
+ * SSL_get0_group_name is a function to get the curve name and is available from
+ * OpenSSL v3.2 onwards. For OpenSSL >=3.0 and <3.2, we will continue to use
+ * SSL_get_negotiated_group to get the curve name.
+ */
+ #if (HA_OPENSSL_VERSION_NUMBER >= 0x3020000fL)
+ curve_name = (char *)SSL_get0_group_name(ssl);
+ if (curve_name == NULL)
+ return 0;
+ else {
+ /**
+ * The curve name returned by SSL_get0_group_name is in lowercase whereas the curve
+ * name returned when we use `SSL_get_negotiated_group` and `OBJ_nid2sn` is the
+ * short name and is in upper case. To make the return value consistent across the
+ * different functional calls and to make it consistent while upgrading OpenSSL versions,
+ * will convert the curve name returned by SSL_get0_group_name to upper case.
+ */
+ for (int i = 0; curve_name[i]; i++)
+ curve_name[i] = toupper(curve_name[i]);
+ }
+ #else
+ nid = SSL_get_negotiated_group(ssl);
+ if (!nid)
+ return 0;
+ curve_name = (char *)OBJ_nid2sn(nid);
+ if (curve_name == NULL)
+ return 0;
+ #endif
+
+ smp->data.u.str.area = curve_name;
+ if (!smp->data.u.str.area)
+ return 0;
+
+ smp->data.type = SMP_T_STR;
+ smp->flags |= SMP_F_VOL_SESS | SMP_F_CONST;
+ smp->data.u.str.data = strlen(smp->data.u.str.area);
+
+ return 1;
+}
+#endif
+
+/* string, returns the used cipher if front conn. transport layer is SSL.
+ * This function is also usable on backend conn if the fetch keyword 5th
+ * char is 'b'.
+ */
+static int
+smp_fetch_ssl_fc_cipher(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ struct connection *conn;
+ SSL *ssl;
+
+ if (obj_type(smp->sess->origin) == OBJ_TYPE_CHECK)
+ conn = (kw[4] == 'b') ? sc_conn(__objt_check(smp->sess->origin)->sc) : NULL;
+ else
+ conn = (kw[4] != 'b') ? objt_conn(smp->sess->origin) :
+ smp->strm ? sc_conn(smp->strm->scb) : NULL;
+
+ smp->flags = 0;
+ ssl = ssl_sock_get_ssl_object(conn);
+ if (!ssl)
+ return 0;
+
+ smp->data.u.str.area = (char *)SSL_get_cipher_name(ssl);
+ if (!smp->data.u.str.area)
+ return 0;
+
+ smp->data.type = SMP_T_STR;
+ smp->flags |= SMP_F_VOL_SESS | SMP_F_CONST;
+ smp->data.u.str.data = strlen(smp->data.u.str.area);
+
+ return 1;
+}
+
+/* integer, returns the algoritm's keysize if front conn. transport layer
+ * is SSL.
+ * This function is also usable on backend conn if the fetch keyword 5th
+ * char is 'b'.
+ */
+static int
+smp_fetch_ssl_fc_alg_keysize(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ struct connection *conn;
+ SSL *ssl;
+ int sint;
+
+ if (obj_type(smp->sess->origin) == OBJ_TYPE_CHECK)
+ conn = (kw[4] == 'b') ? sc_conn(__objt_check(smp->sess->origin)->sc) : NULL;
+ else
+ conn = (kw[4] != 'b') ? objt_conn(smp->sess->origin) :
+ smp->strm ? sc_conn(smp->strm->scb) : NULL;
+
+ smp->flags = 0;
+ ssl = ssl_sock_get_ssl_object(conn);
+ if (!ssl)
+ return 0;
+
+ if (!SSL_get_cipher_bits(ssl, &sint))
+ return 0;
+
+ smp->flags = SMP_F_VOL_SESS;
+ smp->data.u.sint = sint;
+ smp->data.type = SMP_T_SINT;
+
+ return 1;
+}
+
+/* integer, returns the used keysize if front conn. transport layer is SSL.
+ * This function is also usable on backend conn if the fetch keyword 5th
+ * char is 'b'.
+ */
+static int
+smp_fetch_ssl_fc_use_keysize(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ struct connection *conn;
+ SSL *ssl;
+
+ if (obj_type(smp->sess->origin) == OBJ_TYPE_CHECK)
+ conn = (kw[4] == 'b') ? sc_conn(__objt_check(smp->sess->origin)->sc) : NULL;
+ else
+ conn = (kw[4] != 'b') ? objt_conn(smp->sess->origin) :
+ smp->strm ? sc_conn(smp->strm->scb) : NULL;
+
+ smp->flags = 0;
+ ssl = ssl_sock_get_ssl_object(conn);
+ if (!ssl)
+ return 0;
+
+ smp->data.u.sint = (unsigned int)SSL_get_cipher_bits(ssl, NULL);
+ if (!smp->data.u.sint)
+ return 0;
+
+ smp->flags = SMP_F_VOL_SESS;
+ smp->data.type = SMP_T_SINT;
+
+ return 1;
+}
+
+#if defined(OPENSSL_NPN_NEGOTIATED) && !defined(OPENSSL_NO_NEXTPROTONEG)
+static int
+smp_fetch_ssl_fc_npn(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ struct connection *conn;
+ SSL *ssl;
+ unsigned int len = 0;
+
+ smp->flags = SMP_F_CONST;
+ smp->data.type = SMP_T_STR;
+
+ if (obj_type(smp->sess->origin) == OBJ_TYPE_CHECK)
+ conn = (kw[4] == 'b') ? sc_conn(__objt_check(smp->sess->origin)->sc) : NULL;
+ else
+ conn = (kw[4] != 'b') ? objt_conn(smp->sess->origin) :
+ smp->strm ? sc_conn(smp->strm->scb) : NULL;
+
+ ssl = ssl_sock_get_ssl_object(conn);
+ if (!ssl)
+ return 0;
+
+ smp->flags = SMP_F_VOL_SESS;
+ smp->data.u.str.area = NULL;
+ SSL_get0_next_proto_negotiated(ssl,
+ (const unsigned char **)&smp->data.u.str.area,
+ &len);
+
+ if (!smp->data.u.str.area)
+ return 0;
+
+ smp->data.u.str.data = len;
+ return 1;
+}
+#endif
+
+#ifdef TLSEXT_TYPE_application_layer_protocol_negotiation
+static int
+smp_fetch_ssl_fc_alpn(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ struct connection *conn;
+ SSL *ssl;
+ unsigned int len = 0;
+
+ smp->flags = SMP_F_VOL_SESS | SMP_F_CONST;
+ smp->data.type = SMP_T_STR;
+
+ if (obj_type(smp->sess->origin) == OBJ_TYPE_CHECK)
+ conn = (kw[4] == 'b') ? sc_conn(__objt_check(smp->sess->origin)->sc) : NULL;
+ else
+ conn = (kw[4] != 'b') ? objt_conn(smp->sess->origin) :
+ smp->strm ? sc_conn(smp->strm->scb) : NULL;
+
+ ssl = ssl_sock_get_ssl_object(conn);
+ if (!ssl)
+ return 0;
+
+ smp->data.u.str.area = NULL;
+ SSL_get0_alpn_selected(ssl,
+ (const unsigned char **)&smp->data.u.str.area,
+ &len);
+
+ if (!smp->data.u.str.area)
+ return 0;
+
+ smp->data.u.str.data = len;
+ return 1;
+}
+#endif
+
+/* string, returns the used protocol if front conn. transport layer is SSL.
+ * This function is also usable on backend conn if the fetch keyword 5th
+ * char is 'b'.
+ */
+static int
+smp_fetch_ssl_fc_protocol(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ struct connection *conn;
+ SSL *ssl;
+
+ if (obj_type(smp->sess->origin) == OBJ_TYPE_CHECK)
+ conn = (kw[4] == 'b') ? sc_conn(__objt_check(smp->sess->origin)->sc) : NULL;
+ else
+ conn = (kw[4] != 'b') ? objt_conn(smp->sess->origin) :
+ smp->strm ? sc_conn(smp->strm->scb) : NULL;
+
+ smp->flags = 0;
+ ssl = ssl_sock_get_ssl_object(conn);
+ if (!ssl)
+ return 0;
+
+ smp->data.u.str.area = (char *)SSL_get_version(ssl);
+ if (!smp->data.u.str.area)
+ return 0;
+
+ smp->data.type = SMP_T_STR;
+ smp->flags = SMP_F_VOL_SESS | SMP_F_CONST;
+ smp->data.u.str.data = strlen(smp->data.u.str.area);
+
+ return 1;
+}
+
+/* binary, returns the SSL stream id if front conn. transport layer is SSL.
+ * This function is also usable on backend conn if the fetch keyword 5th
+ * char is 'b'.
+ */
+#if HA_OPENSSL_VERSION_NUMBER > 0x0090800fL
+static int
+smp_fetch_ssl_fc_session_id(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ struct connection *conn;
+ SSL_SESSION *ssl_sess;
+ SSL *ssl;
+ unsigned int len = 0;
+
+ smp->flags = SMP_F_VOL_SESS | SMP_F_CONST;
+ smp->data.type = SMP_T_BIN;
+
+ if (obj_type(smp->sess->origin) == OBJ_TYPE_CHECK)
+ conn = (kw[4] == 'b') ? sc_conn(__objt_check(smp->sess->origin)->sc) : NULL;
+ else
+ conn = (kw[4] != 'b') ? objt_conn(smp->sess->origin) :
+ smp->strm ? sc_conn(smp->strm->scb) : NULL;
+
+ ssl = ssl_sock_get_ssl_object(conn);
+ if (!ssl)
+ return 0;
+
+ ssl_sess = SSL_get_session(ssl);
+ if (!ssl_sess)
+ return 0;
+
+ smp->data.u.str.area = (char *)SSL_SESSION_get_id(ssl_sess, &len);
+ if (!smp->data.u.str.area || !len)
+ return 0;
+
+ smp->data.u.str.data = len;
+ return 1;
+}
+#endif
+
+
+#ifdef HAVE_SSL_EXTRACT_RANDOM
+static int
+smp_fetch_ssl_fc_random(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ struct connection *conn;
+ struct buffer *data;
+ SSL *ssl;
+
+ if (obj_type(smp->sess->origin) == OBJ_TYPE_CHECK)
+ conn = (kw[4] == 'b') ? sc_conn(__objt_check(smp->sess->origin)->sc) : NULL;
+ else
+ conn = (kw[4] != 'b') ? objt_conn(smp->sess->origin) :
+ smp->strm ? sc_conn(smp->strm->scb) : NULL;
+
+ ssl = ssl_sock_get_ssl_object(conn);
+ if (!ssl)
+ return 0;
+
+ data = get_trash_chunk();
+ if (kw[7] == 'c')
+ data->data = SSL_get_client_random(ssl,
+ (unsigned char *) data->area,
+ data->size);
+ else
+ data->data = SSL_get_server_random(ssl,
+ (unsigned char *) data->area,
+ data->size);
+ if (!data->data)
+ return 0;
+
+ smp->flags = SMP_F_VOL_TEST;
+ smp->data.type = SMP_T_BIN;
+ smp->data.u.str = *data;
+
+ return 1;
+}
+
+static int
+smp_fetch_ssl_fc_session_key(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ struct connection *conn;
+ SSL_SESSION *ssl_sess;
+ struct buffer *data;
+ SSL *ssl;
+
+ if (obj_type(smp->sess->origin) == OBJ_TYPE_CHECK)
+ conn = (kw[4] == 'b') ? sc_conn(__objt_check(smp->sess->origin)->sc) : NULL;
+ else
+ conn = (kw[4] != 'b') ? objt_conn(smp->sess->origin) :
+ smp->strm ? sc_conn(smp->strm->scb) : NULL;
+
+ ssl = ssl_sock_get_ssl_object(conn);
+ if (!ssl)
+ return 0;
+
+ ssl_sess = SSL_get_session(ssl);
+ if (!ssl_sess)
+ return 0;
+
+ data = get_trash_chunk();
+ data->data = SSL_SESSION_get_master_key(ssl_sess,
+ (unsigned char *) data->area,
+ data->size);
+ if (!data->data)
+ return 0;
+
+ smp->flags = SMP_F_VOL_SESS;
+ smp->data.type = SMP_T_BIN;
+ smp->data.u.str = *data;
+
+ return 1;
+}
+#endif
+
+static int
+smp_fetch_ssl_fc_sni(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+#ifdef SSL_CTRL_SET_TLSEXT_HOSTNAME
+ struct connection *conn;
+ SSL *ssl;
+
+ smp->flags = SMP_F_VOL_SESS | SMP_F_CONST;
+ smp->data.type = SMP_T_STR;
+
+ conn = objt_conn(smp->sess->origin);
+ ssl = ssl_sock_get_ssl_object(conn);
+ if (!ssl)
+ return 0;
+
+ smp->data.u.str.area = (char *)SSL_get_servername(ssl, TLSEXT_NAMETYPE_host_name);
+ if (!smp->data.u.str.area) {
+ /* We might have stored the SNI ourselves, look for it in the
+ * context's ex_data.
+ */
+ smp->data.u.str.area = SSL_get_ex_data(ssl, ssl_client_sni_index);
+
+ if (!smp->data.u.str.area)
+ return 0;
+ }
+
+ smp->data.u.str.data = strlen(smp->data.u.str.area);
+
+ return 1;
+#else
+ /* SNI not supported */
+ return 0;
+#endif
+}
+
+/* binary, returns tls client hello cipher list.
+ * Arguments: filter_option (0,1)
+ */
+static int
+smp_fetch_ssl_fc_cl_bin(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ struct buffer *smp_trash;
+ struct connection *conn;
+ struct ssl_capture *capture;
+ SSL *ssl;
+
+ conn = objt_conn(smp->sess->origin);
+ ssl = ssl_sock_get_ssl_object(conn);
+ if (!ssl)
+ return 0;
+
+ capture = SSL_get_ex_data(ssl, ssl_capture_ptr_index);
+ if (!capture)
+ return 0;
+
+ if (args[0].data.sint) {
+ smp_trash = get_trash_chunk();
+ exclude_tls_grease(capture->data + capture->ciphersuite_offset, capture->ciphersuite_len, smp_trash);
+ smp->data.u.str.area = smp_trash->area;
+ smp->data.u.str.data = smp_trash->data;
+ smp->flags = SMP_F_VOL_SESS;
+ }
+ else {
+ smp->data.u.str.area = capture->data + capture->ciphersuite_offset;
+ smp->data.u.str.data = capture->ciphersuite_len;
+ smp->flags = SMP_F_VOL_TEST | SMP_F_CONST;
+ }
+
+ smp->data.type = SMP_T_BIN;
+ return 1;
+}
+
+/* binary, returns tls client hello cipher list as hexadecimal string.
+ * Arguments: filter_option (0,1)
+ */
+static int
+smp_fetch_ssl_fc_cl_hex(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ struct buffer *data;
+
+ if (!smp_fetch_ssl_fc_cl_bin(args, smp, kw, private))
+ return 0;
+
+ data = get_trash_chunk();
+ dump_binary(data, smp->data.u.str.area, smp->data.u.str.data);
+ smp->flags = SMP_F_VOL_SESS;
+ smp->data.type = SMP_T_BIN;
+ smp->data.u.str = *data;
+ return 1;
+}
+
+/* integer, returns xxh64 hash of tls client hello cipher list. */
+static int
+smp_fetch_ssl_fc_cl_xxh64(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ struct connection *conn;
+ struct ssl_capture *capture;
+ SSL *ssl;
+
+ conn = objt_conn(smp->sess->origin);
+ ssl = ssl_sock_get_ssl_object(conn);
+ if (!ssl)
+ return 0;
+
+ capture = SSL_get_ex_data(ssl, ssl_capture_ptr_index);
+ if (!capture)
+ return 0;
+
+ smp->flags = SMP_F_VOL_SESS;
+ smp->data.type = SMP_T_SINT;
+ smp->data.u.sint = capture->xxh64;
+ return 1;
+}
+
+static int
+smp_fetch_ssl_fc_err(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ struct connection *conn;
+ struct ssl_sock_ctx *ctx;
+
+ if (obj_type(smp->sess->origin) == OBJ_TYPE_CHECK)
+ conn = (kw[4] == 'b') ? sc_conn(__objt_check(smp->sess->origin)->sc) : NULL;
+ else
+ conn = (kw[4] != 'b') ? objt_conn(smp->sess->origin) :
+ smp->strm ? sc_conn(smp->strm->scb) : NULL;
+
+ if (!conn)
+ return 0;
+
+ if (conn->flags & CO_FL_WAIT_XPRT && !conn->err_code) {
+ smp->flags = SMP_F_MAY_CHANGE;
+ return 0;
+ }
+
+ ctx = conn_get_ssl_sock_ctx(conn);
+ if (!ctx)
+ return 0;
+
+ smp->flags = SMP_F_VOL_SESS;
+ smp->data.type = SMP_T_SINT;
+ smp->data.u.sint = ctx->error_code;
+ return 1;
+}
+
+static int
+smp_fetch_ssl_fc_protocol_hello_id(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ struct connection *conn;
+ struct ssl_capture *capture;
+ SSL *ssl;
+
+ conn = objt_conn(smp->sess->origin);
+ ssl = ssl_sock_get_ssl_object(conn);
+ if (!ssl)
+ return 0;
+
+ capture = SSL_get_ex_data(ssl, ssl_capture_ptr_index);
+ if (!capture)
+ return 0;
+
+ smp->flags = SMP_F_VOL_SESS;
+ smp->data.type = SMP_T_SINT;
+ smp->data.u.sint = capture->protocol_version;
+ return 1;
+}
+
+static int
+smp_fetch_ssl_fc_err_str(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ struct connection *conn;
+ struct ssl_sock_ctx *ctx;
+ const char *err_code_str;
+
+ if (obj_type(smp->sess->origin) == OBJ_TYPE_CHECK)
+ conn = (kw[4] == 'b') ? sc_conn(__objt_check(smp->sess->origin)->sc) : NULL;
+ else
+ conn = (kw[4] != 'b') ? objt_conn(smp->sess->origin) :
+ smp->strm ? sc_conn(smp->strm->scb) : NULL;
+
+ if (!conn)
+ return 0;
+
+ if (conn->flags & CO_FL_WAIT_XPRT && !conn->err_code) {
+ smp->flags = SMP_F_MAY_CHANGE;
+ return 0;
+ }
+
+ ctx = conn_get_ssl_sock_ctx(conn);
+ if (!ctx || !ctx->error_code)
+ return 0;
+
+ err_code_str = ERR_error_string(ctx->error_code, NULL);
+
+ smp->flags = SMP_F_VOL_SESS;
+ smp->data.type = SMP_T_STR;
+ smp->data.u.str.area = (char*)err_code_str;
+ smp->data.u.str.data = strlen(err_code_str);
+
+ return 1;
+}
+
+/* binary, returns tls client hello extensions list.
+ * Arguments: filter_option (0,1)
+ */
+static int
+smp_fetch_ssl_fc_ext_bin(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ struct buffer *smp_trash;
+ struct connection *conn;
+ struct ssl_capture *capture;
+ SSL *ssl;
+
+ conn = objt_conn(smp->sess->origin);
+ ssl = ssl_sock_get_ssl_object(conn);
+ if (!ssl)
+ return 0;
+
+ capture = SSL_get_ex_data(ssl, ssl_capture_ptr_index);
+ if (!capture)
+ return 0;
+
+ if (args[0].data.sint) {
+ smp_trash = get_trash_chunk();
+ exclude_tls_grease(capture->data + capture->extensions_offset, capture->extensions_len, smp_trash);
+ smp->data.u.str.area = smp_trash->area;
+ smp->data.u.str.data = smp_trash->data;
+ smp->flags = SMP_F_VOL_SESS;
+ }
+ else {
+ smp->data.u.str.area = capture->data + capture->extensions_offset;
+ smp->data.u.str.data = capture->extensions_len;
+ smp->flags = SMP_F_VOL_TEST | SMP_F_CONST;
+ }
+
+ smp->data.type = SMP_T_BIN;
+ return 1;
+}
+
+/* binary, returns tls client hello supported elliptic curves.
+ * Arguments: filter_option (0,1)
+ */
+static int
+smp_fetch_ssl_fc_ecl_bin(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ struct buffer *smp_trash;
+ struct connection *conn;
+ struct ssl_capture *capture;
+ SSL *ssl;
+
+ conn = objt_conn(smp->sess->origin);
+ ssl = ssl_sock_get_ssl_object(conn);
+ if (!ssl)
+ return 0;
+
+ capture = SSL_get_ex_data(ssl, ssl_capture_ptr_index);
+ if (!capture)
+ return 0;
+
+ if (args[0].data.sint) {
+ smp_trash = get_trash_chunk();
+ exclude_tls_grease(capture->data + capture->ec_offset, capture->ec_len, smp_trash);
+ smp->data.u.str.area = smp_trash->area;
+ smp->data.u.str.data = smp_trash->data;
+ smp->flags = SMP_F_VOL_SESS;
+ }
+ else {
+ smp->data.u.str.area = capture->data + capture->ec_offset;
+ smp->data.u.str.data = capture->ec_len;
+ smp->flags = SMP_F_VOL_TEST | SMP_F_CONST;
+ }
+
+ smp->data.type = SMP_T_BIN;
+ return 1;
+}
+
+/* binary, returns tls client hello supported elliptic curve point formats */
+static int
+smp_fetch_ssl_fc_ecf_bin(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ struct connection *conn;
+ struct ssl_capture *capture;
+ SSL *ssl;
+
+ conn = objt_conn(smp->sess->origin);
+ ssl = ssl_sock_get_ssl_object(conn);
+ if (!ssl)
+ return 0;
+
+ capture = SSL_get_ex_data(ssl, ssl_capture_ptr_index);
+ if (!capture)
+ return 0;
+
+ smp->flags = SMP_F_VOL_TEST | SMP_F_CONST;
+ smp->data.type = SMP_T_BIN;
+ smp->data.u.str.area = capture->data + capture->ec_formats_offset;
+ smp->data.u.str.data = capture->ec_formats_len;
+ return 1;
+}
+
+/* Dump the SSL keylog, it only works with "tune.ssl.keylog 1" */
+#ifdef HAVE_SSL_KEYLOG
+static int smp_fetch_ssl_x_keylog(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ struct connection *conn;
+ struct ssl_keylog *keylog;
+ SSL *ssl;
+ char *src = NULL;
+ const char *sfx;
+
+ if (global_ssl.keylog <= 0)
+ return 0;
+
+ conn = (kw[4] != 'b') ? objt_conn(smp->sess->origin) :
+ smp->strm ? sc_conn(smp->strm->scb) : NULL;
+
+ if (!conn)
+ return 0;
+
+ if (conn->flags & CO_FL_WAIT_XPRT) {
+ smp->flags |= SMP_F_MAY_CHANGE;
+ return 0;
+ }
+
+ ssl = ssl_sock_get_ssl_object(conn);
+ if (!ssl)
+ return 0;
+
+ keylog = SSL_get_ex_data(ssl, ssl_keylog_index);
+ if (!keylog)
+ return 0;
+
+ sfx = kw + strlen("ssl_xx_");
+
+ if (strcmp(sfx, "client_early_traffic_secret") == 0) {
+ src = keylog->client_early_traffic_secret;
+ } else if (strcmp(sfx, "client_handshake_traffic_secret") == 0) {
+ src = keylog->client_handshake_traffic_secret;
+ } else if (strcmp(sfx, "server_handshake_traffic_secret") == 0) {
+ src = keylog->server_handshake_traffic_secret;
+ } else if (strcmp(sfx, "client_traffic_secret_0") == 0) {
+ src = keylog->client_traffic_secret_0;
+ } else if (strcmp(sfx, "server_traffic_secret_0") == 0) {
+ src = keylog->server_traffic_secret_0;
+ } else if (strcmp(sfx, "exporter_secret") == 0) {
+ src = keylog->exporter_secret;
+ } else if (strcmp(sfx, "early_exporter_secret") == 0) {
+ src = keylog->early_exporter_secret;
+ }
+
+ if (!src || !*src)
+ return 0;
+
+ smp->data.u.str.area = src;
+ smp->data.type = SMP_T_STR;
+ smp->flags |= SMP_F_VOL_TEST | SMP_F_CONST;
+ smp->data.u.str.data = strlen(smp->data.u.str.area);
+ return 1;
+}
+#endif
+
+static int
+smp_fetch_ssl_fc_cl_str(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+#if defined(OPENSSL_IS_BORINGSSL) || defined(SSL_CTRL_GET_RAW_CIPHERLIST)
+ struct buffer *data;
+ int i;
+
+ if (!smp_fetch_ssl_fc_cl_bin(args, smp, kw, private))
+ return 0;
+
+ data = get_trash_chunk();
+ for (i = 0; i + 1 < smp->data.u.str.data; i += 2) {
+ const char *str;
+ const SSL_CIPHER *cipher;
+ const unsigned char *bin = (const unsigned char *) smp->data.u.str.area + i;
+ uint16_t id = (bin[0] << 8) | bin[1];
+#if defined(OPENSSL_IS_BORINGSSL)
+ cipher = SSL_get_cipher_by_value(id);
+#else
+ struct connection *conn = __objt_conn(smp->sess->origin);
+ SSL *ssl = ssl_sock_get_ssl_object(conn);
+ cipher = SSL_CIPHER_find(ssl, bin);
+#endif
+ str = SSL_CIPHER_get_name(cipher);
+ if (!str || strcmp(str, "(NONE)") == 0)
+ chunk_appendf(data, "%sUNKNOWN(%04x)", i == 0 ? "" : ",", id);
+ else
+ chunk_appendf(data, "%s%s", i == 0 ? "" : ",", str);
+ }
+ smp->data.type = SMP_T_STR;
+ smp->data.u.str = *data;
+ return 1;
+#else
+ return smp_fetch_ssl_fc_cl_xxh64(args, smp, kw, private);
+#endif
+}
+
+#if HA_OPENSSL_VERSION_NUMBER > 0x0090800fL
+static int
+smp_fetch_ssl_fc_unique_id(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ struct connection *conn;
+ int finished_len;
+ struct buffer *finished_trash;
+ SSL *ssl;
+
+ if (obj_type(smp->sess->origin) == OBJ_TYPE_CHECK)
+ conn = (kw[4] == 'b') ? sc_conn(__objt_check(smp->sess->origin)->sc) : NULL;
+ else
+ conn = (kw[4] != 'b') ? objt_conn(smp->sess->origin) :
+ smp->strm ? sc_conn(smp->strm->scb) : NULL;
+
+ smp->flags = 0;
+ ssl = ssl_sock_get_ssl_object(conn);
+ if (!ssl)
+ return 0;
+
+ if (conn->flags & CO_FL_WAIT_XPRT) {
+ smp->flags |= SMP_F_MAY_CHANGE;
+ return 0;
+ }
+
+ finished_trash = get_trash_chunk();
+ if (!SSL_session_reused(ssl))
+ finished_len = SSL_get_peer_finished(ssl,
+ finished_trash->area,
+ finished_trash->size);
+ else
+ finished_len = SSL_get_finished(ssl,
+ finished_trash->area,
+ finished_trash->size);
+
+ if (!finished_len)
+ return 0;
+
+ finished_trash->data = finished_len;
+ smp->flags = SMP_F_VOL_SESS;
+ smp->data.u.str = *finished_trash;
+ smp->data.type = SMP_T_BIN;
+
+ return 1;
+}
+#endif
+
+/* integer, returns the first verify error in CA chain of client certificate chain. */
+static int
+smp_fetch_ssl_c_ca_err(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ struct connection *conn = objt_conn(smp->sess->origin);
+ struct ssl_sock_ctx *ctx = conn_get_ssl_sock_ctx(conn);
+
+ if (conn && conn->flags & CO_FL_WAIT_XPRT && !conn->err_code) {
+ smp->flags = SMP_F_MAY_CHANGE;
+ return 0;
+ }
+
+ if (!ctx)
+ return 0;
+
+ smp->data.type = SMP_T_SINT;
+ smp->data.u.sint = (unsigned long long int)SSL_SOCK_ST_TO_CA_ERROR(ctx->xprt_st);
+ smp->flags = SMP_F_VOL_SESS;
+
+ return 1;
+}
+
+/* integer, returns the depth of the first verify error in CA chain of client certificate chain. */
+static int
+smp_fetch_ssl_c_ca_err_depth(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ struct connection *conn = objt_conn(smp->sess->origin);
+ struct ssl_sock_ctx *ctx = conn_get_ssl_sock_ctx(conn);
+
+ if (conn && conn->flags & CO_FL_WAIT_XPRT && !conn->err_code) {
+ smp->flags = SMP_F_MAY_CHANGE;
+ return 0;
+ }
+
+ if (!ctx)
+ return 0;
+
+ smp->data.type = SMP_T_SINT;
+ smp->data.u.sint = (long long int)SSL_SOCK_ST_TO_CAEDEPTH(ctx->xprt_st);
+ smp->flags = SMP_F_VOL_SESS;
+
+ return 1;
+}
+
+/* integer, returns the first verify error on client certificate */
+static int
+smp_fetch_ssl_c_err(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ struct connection *conn = objt_conn(smp->sess->origin);
+ struct ssl_sock_ctx *ctx = conn_get_ssl_sock_ctx(conn);
+
+ if (conn && conn->flags & CO_FL_WAIT_XPRT && !conn->err_code) {
+ smp->flags = SMP_F_MAY_CHANGE;
+ return 0;
+ }
+
+ if (!ctx)
+ return 0;
+
+ smp->data.type = SMP_T_SINT;
+ smp->data.u.sint = (long long int)SSL_SOCK_ST_TO_CRTERROR(ctx->xprt_st);
+ smp->flags = SMP_F_VOL_SESS;
+
+ return 1;
+}
+
+/* integer, returns the verify result on client cert */
+static int
+smp_fetch_ssl_c_verify(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ struct connection *conn;
+ SSL *ssl;
+
+ conn = objt_conn(smp->sess->origin);
+ ssl = ssl_sock_get_ssl_object(conn);
+ if (!ssl)
+ return 0;
+
+ if (conn->flags & CO_FL_WAIT_XPRT) {
+ smp->flags = SMP_F_MAY_CHANGE;
+ return 0;
+ }
+
+ smp->data.type = SMP_T_SINT;
+ smp->data.u.sint = (long long int)SSL_get_verify_result(ssl);
+ smp->flags = SMP_F_VOL_SESS;
+
+ return 1;
+}
+
+/* Argument validation functions */
+
+/* This function is used to validate the arguments passed to any "x_dn" ssl
+ * keywords. These keywords support specifying a third parameter that must be
+ * either empty or the value "rfc2253". Returns 0 on error, non-zero if OK.
+ */
+int val_dnfmt(struct arg *arg, char **err_msg)
+{
+ if (arg && arg[2].type == ARGT_STR && arg[2].data.str.data > 0 && (strcmp(arg[2].data.str.area, "rfc2253") != 0)) {
+ memprintf(err_msg, "only rfc2253 or a blank value are currently supported as the format argument.");
+ return 0;
+ }
+ return 1;
+}
+
+/* Note: must not be declared <const> as its list will be overwritten.
+ * Please take care of keeping this list alphabetically sorted.
+ */
+static struct sample_fetch_kw_list sample_fetch_keywords = {ILH, {
+ { "ssl_bc", smp_fetch_ssl_fc, 0, NULL, SMP_T_BOOL, SMP_USE_L5SRV },
+ { "ssl_bc_alg_keysize", smp_fetch_ssl_fc_alg_keysize, 0, NULL, SMP_T_SINT, SMP_USE_L5SRV },
+#ifdef TLSEXT_TYPE_application_layer_protocol_negotiation
+ { "ssl_bc_alpn", smp_fetch_ssl_fc_alpn, 0, NULL, SMP_T_STR, SMP_USE_L5SRV },
+#endif
+ { "ssl_bc_cipher", smp_fetch_ssl_fc_cipher, 0, NULL, SMP_T_STR, SMP_USE_L5SRV },
+#if (HA_OPENSSL_VERSION_NUMBER >= 0x3000000fL)
+ { "ssl_bc_curve", smp_fetch_ssl_fc_ec, 0, NULL, SMP_T_STR, SMP_USE_L5SRV },
+#endif
+#if defined(OPENSSL_NPN_NEGOTIATED) && !defined(OPENSSL_NO_NEXTPROTONEG)
+ { "ssl_bc_npn", smp_fetch_ssl_fc_npn, 0, NULL, SMP_T_STR, SMP_USE_L5SRV },
+#endif
+ { "ssl_bc_is_resumed", smp_fetch_ssl_fc_is_resumed, 0, NULL, SMP_T_BOOL, SMP_USE_L5SRV },
+ { "ssl_bc_protocol", smp_fetch_ssl_fc_protocol, 0, NULL, SMP_T_STR, SMP_USE_L5SRV },
+ { "ssl_bc_unique_id", smp_fetch_ssl_fc_unique_id, 0, NULL, SMP_T_BIN, SMP_USE_L5SRV },
+ { "ssl_bc_use_keysize", smp_fetch_ssl_fc_use_keysize, 0, NULL, SMP_T_SINT, SMP_USE_L5SRV },
+#if HA_OPENSSL_VERSION_NUMBER > 0x0090800fL
+ { "ssl_bc_session_id", smp_fetch_ssl_fc_session_id, 0, NULL, SMP_T_BIN, SMP_USE_L5SRV },
+#endif
+#ifdef HAVE_SSL_EXTRACT_RANDOM
+ { "ssl_bc_client_random", smp_fetch_ssl_fc_random, 0, NULL, SMP_T_BIN, SMP_USE_L5SRV },
+ { "ssl_bc_server_random", smp_fetch_ssl_fc_random, 0, NULL, SMP_T_BIN, SMP_USE_L5SRV },
+ { "ssl_bc_session_key", smp_fetch_ssl_fc_session_key, 0, NULL, SMP_T_BIN, SMP_USE_L5SRV },
+#endif
+ { "ssl_bc_err", smp_fetch_ssl_fc_err, 0, NULL, SMP_T_SINT, SMP_USE_L5SRV },
+ { "ssl_bc_err_str", smp_fetch_ssl_fc_err_str, 0, NULL, SMP_T_STR, SMP_USE_L5SRV },
+ { "ssl_c_ca_err", smp_fetch_ssl_c_ca_err, 0, NULL, SMP_T_SINT, SMP_USE_L5CLI },
+ { "ssl_c_ca_err_depth", smp_fetch_ssl_c_ca_err_depth, 0, NULL, SMP_T_SINT, SMP_USE_L5CLI },
+ { "ssl_c_der", smp_fetch_ssl_x_der, 0, NULL, SMP_T_BIN, SMP_USE_L5CLI },
+ { "ssl_c_chain_der", smp_fetch_ssl_x_chain_der, 0, NULL, SMP_T_BIN, SMP_USE_L5CLI },
+ { "ssl_c_err", smp_fetch_ssl_c_err, 0, NULL, SMP_T_SINT, SMP_USE_L5CLI },
+ { "ssl_c_i_dn", smp_fetch_ssl_x_i_dn, ARG3(0,STR,SINT,STR),val_dnfmt, SMP_T_STR, SMP_USE_L5CLI },
+ { "ssl_c_key_alg", smp_fetch_ssl_x_key_alg, 0, NULL, SMP_T_STR, SMP_USE_L5CLI },
+ { "ssl_c_notafter", smp_fetch_ssl_x_notafter, 0, NULL, SMP_T_STR, SMP_USE_L5CLI },
+ { "ssl_c_notbefore", smp_fetch_ssl_x_notbefore, 0, NULL, SMP_T_STR, SMP_USE_L5CLI },
+#ifdef HAVE_SSL_get0_verified_chain
+ { "ssl_c_r_dn", smp_fetch_ssl_r_dn, ARG3(0,STR,SINT,STR),val_dnfmt, SMP_T_STR, SMP_USE_L5CLI },
+#endif
+ { "ssl_c_sig_alg", smp_fetch_ssl_x_sig_alg, 0, NULL, SMP_T_STR, SMP_USE_L5CLI },
+ { "ssl_c_s_dn", smp_fetch_ssl_x_s_dn, ARG3(0,STR,SINT,STR),val_dnfmt, SMP_T_STR, SMP_USE_L5CLI },
+ { "ssl_c_serial", smp_fetch_ssl_x_serial, 0, NULL, SMP_T_BIN, SMP_USE_L5CLI },
+ { "ssl_c_sha1", smp_fetch_ssl_x_sha1, 0, NULL, SMP_T_BIN, SMP_USE_L5CLI },
+ { "ssl_c_used", smp_fetch_ssl_c_used, 0, NULL, SMP_T_BOOL, SMP_USE_L5CLI },
+ { "ssl_c_verify", smp_fetch_ssl_c_verify, 0, NULL, SMP_T_SINT, SMP_USE_L5CLI },
+ { "ssl_c_version", smp_fetch_ssl_x_version, 0, NULL, SMP_T_SINT, SMP_USE_L5CLI },
+ { "ssl_f_der", smp_fetch_ssl_x_der, 0, NULL, SMP_T_BIN, SMP_USE_L5CLI },
+ { "ssl_f_i_dn", smp_fetch_ssl_x_i_dn, ARG3(0,STR,SINT,STR),val_dnfmt, SMP_T_STR, SMP_USE_L5CLI },
+ { "ssl_f_key_alg", smp_fetch_ssl_x_key_alg, 0, NULL, SMP_T_STR, SMP_USE_L5CLI },
+ { "ssl_f_notafter", smp_fetch_ssl_x_notafter, 0, NULL, SMP_T_STR, SMP_USE_L5CLI },
+ { "ssl_f_notbefore", smp_fetch_ssl_x_notbefore, 0, NULL, SMP_T_STR, SMP_USE_L5CLI },
+ { "ssl_f_sig_alg", smp_fetch_ssl_x_sig_alg, 0, NULL, SMP_T_STR, SMP_USE_L5CLI },
+ { "ssl_f_s_dn", smp_fetch_ssl_x_s_dn, ARG3(0,STR,SINT,STR),val_dnfmt, SMP_T_STR, SMP_USE_L5CLI },
+ { "ssl_f_serial", smp_fetch_ssl_x_serial, 0, NULL, SMP_T_BIN, SMP_USE_L5CLI },
+ { "ssl_f_sha1", smp_fetch_ssl_x_sha1, 0, NULL, SMP_T_BIN, SMP_USE_L5CLI },
+ { "ssl_f_version", smp_fetch_ssl_x_version, 0, NULL, SMP_T_SINT, SMP_USE_L5CLI },
+ { "ssl_fc", smp_fetch_ssl_fc, 0, NULL, SMP_T_BOOL, SMP_USE_L5CLI },
+ { "ssl_fc_alg_keysize", smp_fetch_ssl_fc_alg_keysize, 0, NULL, SMP_T_SINT, SMP_USE_L5CLI },
+ { "ssl_fc_cipher", smp_fetch_ssl_fc_cipher, 0, NULL, SMP_T_STR, SMP_USE_L5CLI },
+#if (HA_OPENSSL_VERSION_NUMBER >= 0x3000000fL)
+ { "ssl_fc_curve", smp_fetch_ssl_fc_ec, 0, NULL, SMP_T_STR, SMP_USE_L5CLI },
+#endif
+ { "ssl_fc_has_crt", smp_fetch_ssl_fc_has_crt, 0, NULL, SMP_T_BOOL, SMP_USE_L5CLI },
+ { "ssl_fc_has_early", smp_fetch_ssl_fc_has_early, 0, NULL, SMP_T_BOOL, SMP_USE_L5CLI },
+ { "ssl_fc_has_sni", smp_fetch_ssl_fc_has_sni, 0, NULL, SMP_T_BOOL, SMP_USE_L5CLI },
+ { "ssl_fc_is_resumed", smp_fetch_ssl_fc_is_resumed, 0, NULL, SMP_T_BOOL, SMP_USE_L5CLI },
+#if defined(OPENSSL_NPN_NEGOTIATED) && !defined(OPENSSL_NO_NEXTPROTONEG)
+ { "ssl_fc_npn", smp_fetch_ssl_fc_npn, 0, NULL, SMP_T_STR, SMP_USE_L5CLI },
+#endif
+#ifdef TLSEXT_TYPE_application_layer_protocol_negotiation
+ { "ssl_fc_alpn", smp_fetch_ssl_fc_alpn, 0, NULL, SMP_T_STR, SMP_USE_L5CLI },
+#endif
+ { "ssl_fc_protocol", smp_fetch_ssl_fc_protocol, 0, NULL, SMP_T_STR, SMP_USE_L5CLI },
+#if HA_OPENSSL_VERSION_NUMBER > 0x0090800fL
+ { "ssl_fc_unique_id", smp_fetch_ssl_fc_unique_id, 0, NULL, SMP_T_BIN, SMP_USE_L5CLI },
+#endif
+ { "ssl_fc_use_keysize", smp_fetch_ssl_fc_use_keysize, 0, NULL, SMP_T_SINT, SMP_USE_L5CLI },
+#if HA_OPENSSL_VERSION_NUMBER > 0x0090800fL
+ { "ssl_fc_session_id", smp_fetch_ssl_fc_session_id, 0, NULL, SMP_T_BIN, SMP_USE_L5CLI },
+#endif
+#ifdef HAVE_SSL_EXTRACT_RANDOM
+ { "ssl_fc_client_random", smp_fetch_ssl_fc_random, 0, NULL, SMP_T_BIN, SMP_USE_L5CLI },
+ { "ssl_fc_server_random", smp_fetch_ssl_fc_random, 0, NULL, SMP_T_BIN, SMP_USE_L5CLI },
+ { "ssl_fc_session_key", smp_fetch_ssl_fc_session_key, 0, NULL, SMP_T_BIN, SMP_USE_L5CLI },
+#endif
+
+#ifdef HAVE_SSL_KEYLOG
+ { "ssl_fc_client_early_traffic_secret", smp_fetch_ssl_x_keylog, 0, NULL, SMP_T_STR, SMP_USE_L5CLI },
+ { "ssl_fc_client_handshake_traffic_secret", smp_fetch_ssl_x_keylog, 0, NULL, SMP_T_STR, SMP_USE_L5CLI },
+ { "ssl_fc_server_handshake_traffic_secret", smp_fetch_ssl_x_keylog, 0, NULL, SMP_T_STR, SMP_USE_L5CLI },
+ { "ssl_fc_client_traffic_secret_0", smp_fetch_ssl_x_keylog, 0, NULL, SMP_T_STR, SMP_USE_L5CLI },
+ { "ssl_fc_server_traffic_secret_0", smp_fetch_ssl_x_keylog, 0, NULL, SMP_T_STR, SMP_USE_L5CLI },
+ { "ssl_fc_exporter_secret", smp_fetch_ssl_x_keylog, 0, NULL, SMP_T_STR, SMP_USE_L5CLI },
+ { "ssl_fc_early_exporter_secret", smp_fetch_ssl_x_keylog, 0, NULL, SMP_T_STR, SMP_USE_L5CLI },
+#endif
+
+ { "ssl_fc_sni", smp_fetch_ssl_fc_sni, 0, NULL, SMP_T_STR, SMP_USE_L5CLI },
+ { "ssl_fc_cipherlist_bin", smp_fetch_ssl_fc_cl_bin, ARG1(0,SINT), NULL, SMP_T_STR, SMP_USE_L5CLI },
+ { "ssl_fc_cipherlist_hex", smp_fetch_ssl_fc_cl_hex, ARG1(0,SINT), NULL, SMP_T_BIN, SMP_USE_L5CLI },
+ { "ssl_fc_cipherlist_str", smp_fetch_ssl_fc_cl_str, ARG1(0,SINT), NULL, SMP_T_STR, SMP_USE_L5CLI },
+ { "ssl_fc_cipherlist_xxh", smp_fetch_ssl_fc_cl_xxh64, 0, NULL, SMP_T_SINT, SMP_USE_L5CLI },
+ { "ssl_fc_err", smp_fetch_ssl_fc_err, 0, NULL, SMP_T_SINT, SMP_USE_L5CLI },
+ { "ssl_fc_err_str", smp_fetch_ssl_fc_err_str, 0, NULL, SMP_T_STR, SMP_USE_L5CLI },
+ { "ssl_fc_protocol_hello_id",smp_fetch_ssl_fc_protocol_hello_id,0, NULL, SMP_T_SINT, SMP_USE_L5CLI },
+ { "ssl_fc_extlist_bin", smp_fetch_ssl_fc_ext_bin, ARG1(0,SINT), NULL, SMP_T_STR, SMP_USE_L5CLI },
+ { "ssl_fc_eclist_bin", smp_fetch_ssl_fc_ecl_bin, ARG1(0,SINT), NULL, SMP_T_STR, SMP_USE_L5CLI },
+ { "ssl_fc_ecformats_bin", smp_fetch_ssl_fc_ecf_bin, 0, NULL, SMP_T_STR, SMP_USE_L5CLI },
+
+/* SSL server certificate fetches */
+ { "ssl_s_der", smp_fetch_ssl_x_der, 0, NULL, SMP_T_BIN, SMP_USE_L5CLI },
+ { "ssl_s_chain_der", smp_fetch_ssl_x_chain_der, 0, NULL, SMP_T_BIN, SMP_USE_L5CLI },
+ { "ssl_s_key_alg", smp_fetch_ssl_x_key_alg, 0, NULL, SMP_T_STR, SMP_USE_L5CLI },
+ { "ssl_s_notafter", smp_fetch_ssl_x_notafter, 0, NULL, SMP_T_STR, SMP_USE_L5CLI },
+ { "ssl_s_notbefore", smp_fetch_ssl_x_notbefore, 0, NULL, SMP_T_STR, SMP_USE_L5CLI },
+ { "ssl_s_sig_alg", smp_fetch_ssl_x_sig_alg, 0, NULL, SMP_T_STR, SMP_USE_L5CLI },
+ { "ssl_s_s_dn", smp_fetch_ssl_x_s_dn, ARG3(0,STR,SINT,STR),val_dnfmt, SMP_T_STR, SMP_USE_L5CLI },
+ { "ssl_s_i_dn", smp_fetch_ssl_x_i_dn, ARG3(0,STR,SINT,STR),val_dnfmt, SMP_T_STR, SMP_USE_L5CLI },
+ { "ssl_s_serial", smp_fetch_ssl_x_serial, 0, NULL, SMP_T_BIN, SMP_USE_L5CLI },
+ { "ssl_s_sha1", smp_fetch_ssl_x_sha1, 0, NULL, SMP_T_BIN, SMP_USE_L5CLI },
+ { "ssl_s_version", smp_fetch_ssl_x_version, 0, NULL, SMP_T_SINT, SMP_USE_L5CLI },
+ { NULL, NULL, 0, 0, 0 },
+}};
+
+INITCALL1(STG_REGISTER, sample_register_fetches, &sample_fetch_keywords);
+
+/* Note: must not be declared <const> as its list will be overwritten */
+static struct sample_conv_kw_list sample_conv_kws = {ILH, {
+ { "sha2", sample_conv_sha2, ARG1(0, SINT), smp_check_sha2, SMP_T_BIN, SMP_T_BIN },
+#ifdef EVP_CIPH_GCM_MODE
+ { "aes_gcm_dec", sample_conv_aes_gcm_dec, ARG4(4,SINT,STR,STR,STR), check_aes_gcm, SMP_T_BIN, SMP_T_BIN },
+#endif
+ { "x509_v_err_str", sample_conv_x509_v_err, 0, NULL, SMP_T_SINT, SMP_T_STR },
+ { "digest", sample_conv_crypto_digest, ARG1(1,STR), check_crypto_digest, SMP_T_BIN, SMP_T_BIN },
+ { "hmac", sample_conv_crypto_hmac, ARG2(2,STR,STR), check_crypto_hmac, SMP_T_BIN, SMP_T_BIN },
+#if defined(HAVE_CRYPTO_memcmp)
+ { "secure_memcmp", sample_conv_secure_memcmp, ARG1(1,STR), smp_check_secure_memcmp, SMP_T_BIN, SMP_T_BOOL },
+#endif
+ { NULL, NULL, 0, 0, 0 },
+}};
+
+INITCALL1(STG_REGISTER, sample_register_convs, &sample_conv_kws);
+
+
+/* Note: must not be declared <const> as its list will be overwritten.
+ * Please take care of keeping this list alphabetically sorted.
+ */
+static struct acl_kw_list acl_kws = {ILH, {
+ { "ssl_fc_sni_end", "ssl_fc_sni", PAT_MATCH_END },
+ { "ssl_fc_sni_reg", "ssl_fc_sni", PAT_MATCH_REG },
+ { /* END */ },
+}};
+
+INITCALL1(STG_REGISTER, acl_register_keywords, &acl_kws);
diff --git a/src/ssl_sock.c b/src/ssl_sock.c
new file mode 100644
index 0000000..6fbabb4
--- /dev/null
+++ b/src/ssl_sock.c
@@ -0,0 +1,8100 @@
+
+/*
+ * SSL/TLS transport layer over SOCK_STREAM sockets
+ *
+ * Copyright (C) 2012 EXCELIANCE, Emeric Brun <ebrun@exceliance.fr>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * Acknowledgement:
+ * We'd like to specially thank the Stud project authors for a very clean
+ * and well documented code which helped us understand how the OpenSSL API
+ * ought to be used in non-blocking mode. This is one difficult part which
+ * is not easy to get from the OpenSSL doc, and reading the Stud code made
+ * it much more obvious than the examples in the OpenSSL package. Keep up
+ * the good works, guys !
+ *
+ * Stud is an extremely efficient and scalable SSL/TLS proxy which combines
+ * particularly well with haproxy. For more info about this project, visit :
+ * https://github.com/bumptech/stud
+ *
+ */
+
+/* Note: do NOT include openssl/xxx.h here, do it in openssl-compat.h */
+#define _GNU_SOURCE
+#include <ctype.h>
+#include <dirent.h>
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+#include <sys/socket.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <netdb.h>
+#include <netinet/tcp.h>
+
+#include <import/ebpttree.h>
+#include <import/ebsttree.h>
+#include <import/lru.h>
+
+#include <haproxy/api.h>
+#include <haproxy/applet.h>
+#include <haproxy/arg.h>
+#include <haproxy/base64.h>
+#include <haproxy/channel.h>
+#include <haproxy/chunk.h>
+#include <haproxy/cli.h>
+#include <haproxy/connection.h>
+#include <haproxy/dynbuf.h>
+#include <haproxy/errors.h>
+#include <haproxy/fd.h>
+#include <haproxy/freq_ctr.h>
+#include <haproxy/frontend.h>
+#include <haproxy/global.h>
+#include <haproxy/http_rules.h>
+#include <haproxy/log.h>
+#include <haproxy/openssl-compat.h>
+#include <haproxy/pattern-t.h>
+#include <haproxy/proto_tcp.h>
+#include <haproxy/proxy.h>
+#include <haproxy/quic_conn.h>
+#include <haproxy/quic_openssl_compat.h>
+#include <haproxy/quic_tp.h>
+#include <haproxy/sample.h>
+#include <haproxy/sc_strm.h>
+#include <haproxy/server.h>
+#include <haproxy/shctx.h>
+#include <haproxy/ssl_ckch.h>
+#include <haproxy/ssl_crtlist.h>
+#include <haproxy/ssl_sock.h>
+#include <haproxy/ssl_utils.h>
+#include <haproxy/stats.h>
+#include <haproxy/stconn.h>
+#include <haproxy/stream-t.h>
+#include <haproxy/task.h>
+#include <haproxy/ticks.h>
+#include <haproxy/time.h>
+#include <haproxy/tools.h>
+#include <haproxy/vars.h>
+#include <haproxy/xxhash.h>
+#include <haproxy/istbuf.h>
+#include <haproxy/ssl_ocsp.h>
+
+
+/* ***** READ THIS before adding code here! *****
+ *
+ * Due to API incompatibilities between multiple OpenSSL versions and their
+ * derivatives, it's often tempting to add macros to (re-)define certain
+ * symbols. Please do not do this here, and do it in common/openssl-compat.h
+ * exclusively so that the whole code consistently uses the same macros.
+ *
+ * Whenever possible if a macro is missing in certain versions, it's better
+ * to conditionally define it in openssl-compat.h than using lots of ifdefs.
+ */
+
+int nb_engines = 0;
+
+static struct eb_root cert_issuer_tree = EB_ROOT; /* issuers tree from "issuers-chain-path" */
+
+struct global_ssl global_ssl = {
+#ifdef LISTEN_DEFAULT_CIPHERS
+ .listen_default_ciphers = LISTEN_DEFAULT_CIPHERS,
+#endif
+#ifdef CONNECT_DEFAULT_CIPHERS
+ .connect_default_ciphers = CONNECT_DEFAULT_CIPHERS,
+#endif
+#ifdef HAVE_SSL_CTX_SET_CIPHERSUITES
+ .listen_default_ciphersuites = LISTEN_DEFAULT_CIPHERSUITES,
+ .connect_default_ciphersuites = CONNECT_DEFAULT_CIPHERSUITES,
+#endif
+ .listen_default_ssloptions = BC_SSL_O_NONE,
+ .connect_default_ssloptions = SRV_SSL_O_NONE,
+
+ .listen_default_sslmethods.flags = MC_SSL_O_ALL,
+ .listen_default_sslmethods.min = CONF_TLSV_NONE,
+ .listen_default_sslmethods.max = CONF_TLSV_NONE,
+ .connect_default_sslmethods.flags = MC_SSL_O_ALL,
+ .connect_default_sslmethods.min = CONF_TLSV_NONE,
+ .connect_default_sslmethods.max = CONF_TLSV_NONE,
+
+#ifdef DEFAULT_SSL_MAX_RECORD
+ .max_record = DEFAULT_SSL_MAX_RECORD,
+#endif
+ .hard_max_record = 0,
+ .default_dh_param = SSL_DEFAULT_DH_PARAM,
+ .ctx_cache = DEFAULT_SSL_CTX_CACHE,
+ .capture_buffer_size = 0,
+ .extra_files = SSL_GF_ALL,
+ .extra_files_noext = 0,
+#ifdef HAVE_SSL_KEYLOG
+ .keylog = 0,
+#endif
+#ifndef OPENSSL_NO_OCSP
+ .ocsp_update.delay_max = SSL_OCSP_UPDATE_DELAY_MAX,
+ .ocsp_update.delay_min = SSL_OCSP_UPDATE_DELAY_MIN,
+#endif
+};
+
+static BIO_METHOD *ha_meth;
+
+DECLARE_STATIC_POOL(ssl_sock_ctx_pool, "ssl_sock_ctx", sizeof(struct ssl_sock_ctx));
+
+DECLARE_STATIC_POOL(ssl_sock_client_sni_pool, "ssl_sock_client_sni", TLSEXT_MAXLEN_host_name + 1);
+
+/* ssl stats module */
+enum {
+ SSL_ST_SESS,
+ SSL_ST_REUSED_SESS,
+ SSL_ST_FAILED_HANDSHAKE,
+
+ SSL_ST_STATS_COUNT /* must be the last member of the enum */
+};
+
+static struct name_desc ssl_stats[] = {
+ [SSL_ST_SESS] = { .name = "ssl_sess",
+ .desc = "Total number of ssl sessions established" },
+ [SSL_ST_REUSED_SESS] = { .name = "ssl_reused_sess",
+ .desc = "Total number of ssl sessions reused" },
+ [SSL_ST_FAILED_HANDSHAKE] = { .name = "ssl_failed_handshake",
+ .desc = "Total number of failed handshake" },
+};
+
+static struct ssl_counters {
+ long long sess;
+ long long reused_sess;
+ long long failed_handshake;
+} ssl_counters;
+
+static void ssl_fill_stats(void *data, struct field *stats)
+{
+ struct ssl_counters *counters = data;
+
+ stats[SSL_ST_SESS] = mkf_u64(FN_COUNTER, counters->sess);
+ stats[SSL_ST_REUSED_SESS] = mkf_u64(FN_COUNTER, counters->reused_sess);
+ stats[SSL_ST_FAILED_HANDSHAKE] = mkf_u64(FN_COUNTER, counters->failed_handshake);
+}
+
+static struct stats_module ssl_stats_module = {
+ .name = "ssl",
+ .fill_stats = ssl_fill_stats,
+ .stats = ssl_stats,
+ .stats_count = SSL_ST_STATS_COUNT,
+ .counters = &ssl_counters,
+ .counters_size = sizeof(ssl_counters),
+ .domain_flags = MK_STATS_PROXY_DOMAIN(STATS_PX_CAP_FE|STATS_PX_CAP_LI|STATS_PX_CAP_BE|STATS_PX_CAP_SRV),
+ .clearable = 1,
+};
+
+INITCALL1(STG_REGISTER, stats_register_module, &ssl_stats_module);
+
+/* CLI context for "show tls-keys" */
+struct show_keys_ctx {
+ struct tls_keys_ref *next_ref; /* next reference to be dumped */
+ int names_only; /* non-zero = only show file names */
+ int next_index; /* next index to be dumped */
+ int dump_entries; /* dump entries also */
+ enum {
+ SHOW_KEYS_INIT = 0,
+ SHOW_KEYS_LIST,
+ SHOW_KEYS_DONE,
+ } state; /* phase of the current dump */
+};
+
+/* ssl_sock_io_cb is exported to see it resolved in "show fd" */
+struct task *ssl_sock_io_cb(struct task *, void *, unsigned int);
+static int ssl_sock_handshake(struct connection *conn, unsigned int flag);
+
+/* Methods to implement OpenSSL BIO */
+static int ha_ssl_write(BIO *h, const char *buf, int num)
+{
+ struct buffer tmpbuf;
+ struct ssl_sock_ctx *ctx;
+ uint flags;
+ int ret;
+
+ ctx = BIO_get_data(h);
+ tmpbuf.size = num;
+ tmpbuf.area = (void *)(uintptr_t)buf;
+ tmpbuf.data = num;
+ tmpbuf.head = 0;
+ flags = (ctx->xprt_st & SSL_SOCK_SEND_MORE) ? CO_SFL_MSG_MORE : 0;
+ ret = ctx->xprt->snd_buf(ctx->conn, ctx->xprt_ctx, &tmpbuf, num, flags);
+ BIO_clear_retry_flags(h);
+ if (ret == 0 && !(ctx->conn->flags & (CO_FL_ERROR | CO_FL_SOCK_WR_SH))) {
+ BIO_set_retry_write(h);
+ ret = -1;
+ }
+ return ret;
+}
+
+static int ha_ssl_gets(BIO *h, char *buf, int size)
+{
+
+ return 0;
+}
+
+static int ha_ssl_puts(BIO *h, const char *str)
+{
+
+ return ha_ssl_write(h, str, strlen(str));
+}
+
+static int ha_ssl_read(BIO *h, char *buf, int size)
+{
+ struct buffer tmpbuf;
+ struct ssl_sock_ctx *ctx;
+ int ret;
+
+ ctx = BIO_get_data(h);
+ tmpbuf.size = size;
+ tmpbuf.area = buf;
+ tmpbuf.data = 0;
+ tmpbuf.head = 0;
+ ret = ctx->xprt->rcv_buf(ctx->conn, ctx->xprt_ctx, &tmpbuf, size, 0);
+ BIO_clear_retry_flags(h);
+ if (ret == 0 && !(ctx->conn->flags & (CO_FL_ERROR | CO_FL_SOCK_RD_SH))) {
+ BIO_set_retry_read(h);
+ ret = -1;
+ }
+
+ return ret;
+}
+
+static long ha_ssl_ctrl(BIO *h, int cmd, long arg1, void *arg2)
+{
+ int ret = 0;
+ switch (cmd) {
+ case BIO_CTRL_DUP:
+ case BIO_CTRL_FLUSH:
+ ret = 1;
+ break;
+ }
+ return ret;
+}
+
+static int ha_ssl_new(BIO *h)
+{
+ BIO_set_init(h, 1);
+ BIO_set_data(h, NULL);
+ BIO_clear_flags(h, ~0);
+ return 1;
+}
+
+static int ha_ssl_free(BIO *data)
+{
+
+ return 1;
+}
+
+
+#if defined(USE_THREAD) && (HA_OPENSSL_VERSION_NUMBER < 0x10100000L)
+
+static HA_RWLOCK_T *ssl_rwlocks;
+
+
+unsigned long ssl_id_function(void)
+{
+ return (unsigned long)tid;
+}
+
+void ssl_locking_function(int mode, int n, const char * file, int line)
+{
+ if (mode & CRYPTO_LOCK) {
+ if (mode & CRYPTO_READ)
+ HA_RWLOCK_RDLOCK(SSL_LOCK, &ssl_rwlocks[n]);
+ else
+ HA_RWLOCK_WRLOCK(SSL_LOCK, &ssl_rwlocks[n]);
+ }
+ else {
+ if (mode & CRYPTO_READ)
+ HA_RWLOCK_RDUNLOCK(SSL_LOCK, &ssl_rwlocks[n]);
+ else
+ HA_RWLOCK_WRUNLOCK(SSL_LOCK, &ssl_rwlocks[n]);
+ }
+}
+
+static int ssl_locking_init(void)
+{
+ int i;
+
+ ssl_rwlocks = malloc(sizeof(HA_RWLOCK_T)*CRYPTO_num_locks());
+ if (!ssl_rwlocks)
+ return -1;
+
+ for (i = 0 ; i < CRYPTO_num_locks() ; i++)
+ HA_RWLOCK_INIT(&ssl_rwlocks[i]);
+
+ CRYPTO_set_id_callback(ssl_id_function);
+ CRYPTO_set_locking_callback(ssl_locking_function);
+
+ return 0;
+}
+
+#endif
+
+__decl_thread(HA_SPINLOCK_T ckch_lock);
+
+
+
+/* mimic what X509_STORE_load_locations do with store_ctx */
+static int ssl_set_cert_crl_file(X509_STORE *store_ctx, char *path)
+{
+ X509_STORE *store = NULL;
+ struct cafile_entry *ca_e = ssl_store_get_cafile_entry(path, 0);
+ if (ca_e)
+ store = ca_e->ca_store;
+ if (store_ctx && store) {
+ int i;
+ X509_OBJECT *obj;
+ STACK_OF(X509_OBJECT) *objs = X509_STORE_get0_objects(store);
+ for (i = 0; i < sk_X509_OBJECT_num(objs); i++) {
+ obj = sk_X509_OBJECT_value(objs, i);
+ switch (X509_OBJECT_get_type(obj)) {
+ case X509_LU_X509:
+ X509_STORE_add_cert(store_ctx, X509_OBJECT_get0_X509(obj));
+ break;
+ case X509_LU_CRL:
+ X509_STORE_add_crl(store_ctx, X509_OBJECT_get0_X509_CRL(obj));
+ break;
+ default:
+ break;
+ }
+ }
+ return 1;
+ }
+ return 0;
+}
+
+/* SSL_CTX_load_verify_locations substitute, internally call X509_STORE_load_locations */
+static int ssl_set_verify_locations_file(SSL_CTX *ctx, char *path)
+{
+ X509_STORE *store_ctx = SSL_CTX_get_cert_store(ctx);
+ return ssl_set_cert_crl_file(store_ctx, path);
+}
+
+/*
+ Extract CA_list from CA_file already in tree.
+ Duplicate ca_name is tracking with ebtree. It's simplify openssl compatibility.
+ Return a shared ca_list: SSL_dup_CA_list must be used before set it on SSL_CTX.
+*/
+static STACK_OF(X509_NAME)* ssl_get_client_ca_file(char *path)
+{
+ struct ebmb_node *eb;
+ struct cafile_entry *ca_e;
+
+ eb = ebst_lookup(&cafile_tree, path);
+ if (!eb)
+ return NULL;
+ ca_e = ebmb_entry(eb, struct cafile_entry, node);
+
+ if (ca_e->ca_list == NULL) {
+ int i;
+ unsigned long key;
+ struct eb_root ca_name_tree = EB_ROOT;
+ struct eb64_node *node, *back;
+ struct {
+ struct eb64_node node;
+ X509_NAME *xname;
+ } *ca_name;
+ STACK_OF(X509_OBJECT) *objs;
+ STACK_OF(X509_NAME) *skn;
+ X509 *x;
+ X509_NAME *xn;
+
+ skn = sk_X509_NAME_new_null();
+ /* take x509 from cafile_tree */
+ objs = X509_STORE_get0_objects(ca_e->ca_store);
+ for (i = 0; i < sk_X509_OBJECT_num(objs); i++) {
+ x = X509_OBJECT_get0_X509(sk_X509_OBJECT_value(objs, i));
+ if (!x)
+ continue;
+ xn = X509_get_subject_name(x);
+ if (!xn)
+ continue;
+ /* Check for duplicates. */
+ key = X509_NAME_hash(xn);
+ for (node = eb64_lookup(&ca_name_tree, key), ca_name = NULL;
+ node && ca_name == NULL;
+ node = eb64_next(node)) {
+ ca_name = container_of(node, typeof(*ca_name), node);
+ if (X509_NAME_cmp(xn, ca_name->xname) != 0)
+ ca_name = NULL;
+ }
+ /* find a duplicate */
+ if (ca_name)
+ continue;
+ ca_name = calloc(1, sizeof *ca_name);
+ xn = X509_NAME_dup(xn);
+ if (!ca_name ||
+ !xn ||
+ !sk_X509_NAME_push(skn, xn)) {
+ free(ca_name);
+ X509_NAME_free(xn);
+ sk_X509_NAME_pop_free(skn, X509_NAME_free);
+ sk_X509_NAME_free(skn);
+ skn = NULL;
+ break;
+ }
+ ca_name->node.key = key;
+ ca_name->xname = xn;
+ eb64_insert(&ca_name_tree, &ca_name->node);
+ }
+ ca_e->ca_list = skn;
+ /* remove temporary ca_name tree */
+ node = eb64_first(&ca_name_tree);
+ while (node) {
+ ca_name = container_of(node, typeof(*ca_name), node);
+ back = eb64_next(node);
+ eb64_delete(node);
+ free(ca_name);
+ node = back;
+ }
+ }
+ return ca_e->ca_list;
+}
+
+struct pool_head *pool_head_ssl_capture __read_mostly = NULL;
+int ssl_capture_ptr_index = -1;
+int ssl_app_data_index = -1;
+#ifdef USE_QUIC
+int ssl_qc_app_data_index = -1;
+#endif /* USE_QUIC */
+
+#ifdef HAVE_SSL_KEYLOG
+int ssl_keylog_index = -1;
+struct pool_head *pool_head_ssl_keylog __read_mostly = NULL;
+struct pool_head *pool_head_ssl_keylog_str __read_mostly = NULL;
+#endif
+
+int ssl_client_crt_ref_index = -1;
+
+/* Used to store the client's SNI in case of ClientHello callback error */
+int ssl_client_sni_index = -1;
+
+#if (defined SSL_CTRL_SET_TLSEXT_TICKET_KEY_CB && TLS_TICKETS_NO > 0)
+struct list tlskeys_reference = LIST_HEAD_INIT(tlskeys_reference);
+#endif
+
+#if defined(USE_ENGINE) && !defined(OPENSSL_NO_ENGINE)
+unsigned int openssl_engines_initialized;
+struct list openssl_engines = LIST_HEAD_INIT(openssl_engines);
+struct ssl_engine_list {
+ struct list list;
+ ENGINE *e;
+};
+#endif
+
+#ifdef HAVE_SSL_PROVIDERS
+struct list openssl_providers = LIST_HEAD_INIT(openssl_providers);
+struct ssl_provider_list {
+ struct list list;
+ OSSL_PROVIDER *provider;
+};
+#endif
+
+#ifndef OPENSSL_NO_DH
+static int ssl_dh_ptr_index = -1;
+static HASSL_DH *global_dh = NULL;
+static HASSL_DH *local_dh_1024 = NULL;
+static HASSL_DH *local_dh_2048 = NULL;
+static HASSL_DH *local_dh_4096 = NULL;
+#if (HA_OPENSSL_VERSION_NUMBER < 0x3000000fL)
+static DH *ssl_get_tmp_dh_cbk(SSL *ssl, int export, int keylen);
+#else
+static void ssl_sock_set_tmp_dh_from_pkey(SSL_CTX *ctx, EVP_PKEY *pkey);
+#endif
+#endif /* OPENSSL_NO_DH */
+
+#if (defined SSL_CTRL_SET_TLSEXT_HOSTNAME && !defined SSL_NO_GENERATE_CERTIFICATES)
+/* X509V3 Extensions that will be added on generated certificates */
+#define X509V3_EXT_SIZE 5
+static char *x509v3_ext_names[X509V3_EXT_SIZE] = {
+ "basicConstraints",
+ "nsComment",
+ "subjectKeyIdentifier",
+ "authorityKeyIdentifier",
+ "keyUsage",
+};
+static char *x509v3_ext_values[X509V3_EXT_SIZE] = {
+ "CA:FALSE",
+ "\"OpenSSL Generated Certificate\"",
+ "hash",
+ "keyid,issuer:always",
+ "nonRepudiation,digitalSignature,keyEncipherment"
+};
+/* LRU cache to store generated certificate */
+static struct lru64_head *ssl_ctx_lru_tree = NULL;
+static unsigned int ssl_ctx_lru_seed = 0;
+static unsigned int ssl_ctx_serial;
+__decl_rwlock(ssl_ctx_lru_rwlock);
+
+#endif // SSL_CTRL_SET_TLSEXT_HOSTNAME
+
+/* The order here matters for picking a default context,
+ * keep the most common keytype at the bottom of the list
+ */
+const char *SSL_SOCK_KEYTYPE_NAMES[] = {
+ "dsa",
+ "ecdsa",
+ "rsa"
+};
+
+static struct shared_context *ssl_shctx = NULL; /* ssl shared session cache */
+static struct eb_root *sh_ssl_sess_tree; /* ssl shared session tree */
+
+/* Dedicated callback functions for heartbeat and clienthello.
+ */
+#ifdef TLS1_RT_HEARTBEAT
+static void ssl_sock_parse_heartbeat(struct connection *conn, int write_p, int version,
+ int content_type, const void *buf, size_t len,
+ SSL *ssl);
+#endif
+static void ssl_sock_parse_clienthello(struct connection *conn, int write_p, int version,
+ int content_type, const void *buf, size_t len,
+ SSL *ssl);
+
+#ifdef HAVE_SSL_KEYLOG
+static void ssl_init_keylog(struct connection *conn, int write_p, int version,
+ int content_type, const void *buf, size_t len,
+ SSL *ssl);
+#endif
+
+/* List head of all registered SSL/TLS protocol message callbacks. */
+struct list ssl_sock_msg_callbacks = LIST_HEAD_INIT(ssl_sock_msg_callbacks);
+
+/* Registers the function <func> in order to be called on SSL/TLS protocol
+ * message processing. It will return 0 if the function <func> is not set
+ * or if it fails to allocate memory.
+ */
+int ssl_sock_register_msg_callback(ssl_sock_msg_callback_func func)
+{
+ struct ssl_sock_msg_callback *cbk;
+
+ if (!func)
+ return 0;
+
+ cbk = calloc(1, sizeof(*cbk));
+ if (!cbk) {
+ ha_alert("out of memory in ssl_sock_register_msg_callback().\n");
+ return 0;
+ }
+
+ cbk->func = func;
+
+ LIST_APPEND(&ssl_sock_msg_callbacks, &cbk->list);
+
+ return 1;
+}
+
+/* Used to register dedicated SSL/TLS protocol message callbacks.
+ */
+static int ssl_sock_register_msg_callbacks(void)
+{
+#ifdef TLS1_RT_HEARTBEAT
+ if (!ssl_sock_register_msg_callback(ssl_sock_parse_heartbeat))
+ return ERR_ABORT;
+#endif
+ if (global_ssl.capture_buffer_size > 0) {
+ if (!ssl_sock_register_msg_callback(ssl_sock_parse_clienthello))
+ return ERR_ABORT;
+ }
+#ifdef HAVE_SSL_KEYLOG
+ if (global_ssl.keylog > 0) {
+ if (!ssl_sock_register_msg_callback(ssl_init_keylog))
+ return ERR_ABORT;
+ }
+#endif
+#ifdef USE_QUIC_OPENSSL_COMPAT
+ if (!ssl_sock_register_msg_callback(quic_tls_compat_msg_callback))
+ return ERR_ABORT;
+#endif
+
+ return ERR_NONE;
+}
+
+/* Used to free all SSL/TLS protocol message callbacks that were
+ * registered by using ssl_sock_register_msg_callback().
+ */
+static void ssl_sock_unregister_msg_callbacks(void)
+{
+ struct ssl_sock_msg_callback *cbk, *cbkback;
+
+ list_for_each_entry_safe(cbk, cbkback, &ssl_sock_msg_callbacks, list) {
+ LIST_DELETE(&cbk->list);
+ free(cbk);
+ }
+}
+
+static struct ssl_sock_ctx *ssl_sock_get_ctx(struct connection *conn)
+{
+ if (!conn || conn->xprt != xprt_get(XPRT_SSL) || !conn->xprt_ctx)
+ return NULL;
+
+ return (struct ssl_sock_ctx *)conn->xprt_ctx;
+}
+
+SSL *ssl_sock_get_ssl_object(struct connection *conn)
+{
+ struct ssl_sock_ctx *ctx = conn_get_ssl_sock_ctx(conn);
+
+ return ctx ? ctx->ssl : NULL;
+}
+/*
+ * This function gives the detail of the SSL error. It is used only
+ * if the debug mode and the verbose mode are activated. It dump all
+ * the SSL error until the stack was empty.
+ */
+static forceinline void ssl_sock_dump_errors(struct connection *conn,
+ struct quic_conn *qc)
+{
+ unsigned long ret;
+
+ if (unlikely(global.mode & MODE_DEBUG)) {
+ while(1) {
+ const char *func = NULL;
+ ERR_peek_error_func(&func);
+
+ ret = ERR_get_error();
+ if (ret == 0)
+ return;
+ if (conn) {
+ fprintf(stderr, "fd[%#x] OpenSSL error[0x%lx] %s: %s\n",
+ conn_fd(conn), ret,
+ func, ERR_reason_error_string(ret));
+ }
+#ifdef USE_QUIC
+ else {
+ /* TODO: we are not sure <conn> is always initialized for QUIC connections */
+ fprintf(stderr, "qc @%p OpenSSL error[0x%lx] %s: %s\n", qc, ret,
+ func, ERR_reason_error_string(ret));
+ }
+#endif
+ }
+ }
+}
+
+
+#if defined(USE_ENGINE) && !defined(OPENSSL_NO_ENGINE)
+int ssl_init_single_engine(const char *engine_id, const char *def_algorithms)
+{
+ int err_code = ERR_ABORT;
+ ENGINE *engine;
+ struct ssl_engine_list *el;
+
+ /* grab the structural reference to the engine */
+ engine = ENGINE_by_id(engine_id);
+ if (engine == NULL) {
+ ha_alert("ssl-engine %s: failed to get structural reference\n", engine_id);
+ goto fail_get;
+ }
+
+ if (!ENGINE_init(engine)) {
+ /* the engine couldn't initialise, release it */
+ ha_alert("ssl-engine %s: failed to initialize\n", engine_id);
+ goto fail_init;
+ }
+
+ if (ENGINE_set_default_string(engine, def_algorithms) == 0) {
+ ha_alert("ssl-engine %s: failed on ENGINE_set_default_string\n", engine_id);
+ goto fail_set_method;
+ }
+
+ el = calloc(1, sizeof(*el));
+ if (!el)
+ goto fail_alloc;
+ el->e = engine;
+ LIST_INSERT(&openssl_engines, &el->list);
+ nb_engines++;
+ if (global_ssl.async)
+ global.ssl_used_async_engines = nb_engines;
+ return 0;
+
+fail_alloc:
+fail_set_method:
+ /* release the functional reference from ENGINE_init() */
+ ENGINE_finish(engine);
+
+fail_init:
+ /* release the structural reference from ENGINE_by_id() */
+ ENGINE_free(engine);
+
+fail_get:
+ return err_code;
+}
+#endif
+
+#ifdef HAVE_SSL_PROVIDERS
+int ssl_init_provider(const char *provider_name)
+{
+ int err_code = ERR_ABORT;
+ struct ssl_provider_list *prov = NULL;
+
+ prov = calloc(1, sizeof(*prov));
+ if (!prov) {
+ ha_alert("ssl-provider %s: memory allocation failure\n", provider_name);
+ goto error;
+ }
+
+ if ((prov->provider = OSSL_PROVIDER_load(NULL, provider_name)) == NULL) {
+ ha_alert("ssl-provider %s: unknown provider\n", provider_name);
+ goto error;
+ }
+
+ LIST_INSERT(&openssl_providers, &prov->list);
+
+ return 0;
+
+error:
+ ha_free(&prov);
+ return err_code;
+}
+#endif /* HAVE_SSL_PROVIDERS */
+
+#ifdef SSL_MODE_ASYNC
+/*
+ * openssl async fd handler
+ */
+void ssl_async_fd_handler(int fd)
+{
+ struct ssl_sock_ctx *ctx = fdtab[fd].owner;
+
+ /* fd is an async enfine fd, we must stop
+ * to poll this fd until it is requested
+ */
+ fd_stop_recv(fd);
+ fd_cant_recv(fd);
+
+ /* crypto engine is available, let's notify the associated
+ * connection that it can pursue its processing.
+ */
+ tasklet_wakeup(ctx->wait_event.tasklet);
+}
+
+/*
+ * openssl async delayed SSL_free handler
+ */
+void ssl_async_fd_free(int fd)
+{
+ SSL *ssl = fdtab[fd].owner;
+ OSSL_ASYNC_FD all_fd[32];
+ size_t num_all_fds = 0;
+ int i;
+
+ /* We suppose that the async job for a same SSL *
+ * are serialized. So if we are awake it is
+ * because the running job has just finished
+ * and we can remove all async fds safely
+ */
+ SSL_get_all_async_fds(ssl, NULL, &num_all_fds);
+ if (num_all_fds > 32) {
+ send_log(NULL, LOG_EMERG, "haproxy: openssl returns too many async fds. It seems a bug. Process may crash\n");
+ return;
+ }
+
+ SSL_get_all_async_fds(ssl, all_fd, &num_all_fds);
+ for (i=0 ; i < num_all_fds ; i++) {
+ /* We want to remove the fd from the fdtab
+ * but we flag it to disown because the
+ * close is performed by the engine itself
+ */
+ fdtab[all_fd[i]].state |= FD_DISOWN;
+ fd_delete(all_fd[i]);
+ }
+
+ /* Now we can safely call SSL_free, no more pending job in engines */
+ SSL_free(ssl);
+ _HA_ATOMIC_DEC(&global.sslconns);
+ _HA_ATOMIC_DEC(&jobs);
+}
+/*
+ * function used to manage a returned SSL_ERROR_WANT_ASYNC
+ * and enable/disable polling for async fds
+ */
+static inline void ssl_async_process_fds(struct ssl_sock_ctx *ctx)
+{
+ OSSL_ASYNC_FD add_fd[32];
+ OSSL_ASYNC_FD del_fd[32];
+ SSL *ssl = ctx->ssl;
+ size_t num_add_fds = 0;
+ size_t num_del_fds = 0;
+ int i;
+
+ SSL_get_changed_async_fds(ssl, NULL, &num_add_fds, NULL,
+ &num_del_fds);
+ if (num_add_fds > 32 || num_del_fds > 32) {
+ send_log(NULL, LOG_EMERG, "haproxy: openssl returns too many async fds. It seems a bug. Process may crash\n");
+ return;
+ }
+
+ SSL_get_changed_async_fds(ssl, add_fd, &num_add_fds, del_fd, &num_del_fds);
+
+ /* We remove unused fds from the fdtab */
+ for (i=0 ; i < num_del_fds ; i++) {
+ /* We want to remove the fd from the fdtab
+ * but we flag it to disown because the
+ * close is performed by the engine itself
+ */
+ fdtab[del_fd[i]].state |= FD_DISOWN;
+ fd_delete(del_fd[i]);
+ }
+
+ /* We add new fds to the fdtab */
+ for (i=0 ; i < num_add_fds ; i++) {
+ fd_insert(add_fd[i], ctx, ssl_async_fd_handler, tgid, ti->ltid_bit);
+ }
+
+ num_add_fds = 0;
+ SSL_get_all_async_fds(ssl, NULL, &num_add_fds);
+ if (num_add_fds > 32) {
+ send_log(NULL, LOG_EMERG, "haproxy: openssl returns too many async fds. It seems a bug. Process may crash\n");
+ return;
+ }
+
+ /* We activate the polling for all known async fds */
+ SSL_get_all_async_fds(ssl, add_fd, &num_add_fds);
+ for (i=0 ; i < num_add_fds ; i++) {
+ fd_want_recv(add_fd[i]);
+ /* To ensure that the fd cache won't be used
+ * We'll prefer to catch a real RD event
+ * because handling an EAGAIN on this fd will
+ * result in a context switch and also
+ * some engines uses a fd in blocking mode.
+ */
+ fd_cant_recv(add_fd[i]);
+ }
+
+}
+#endif
+
+
+/*
+ * Initialize an HMAC context <hctx> using the <key> and <md> parameters.
+ * Returns -1 in case of error, 1 otherwise.
+ */
+static int ssl_hmac_init(MAC_CTX *hctx, unsigned char *key, int key_len, const EVP_MD *md)
+{
+#ifdef HAVE_OSSL_PARAM
+ OSSL_PARAM params[3];
+
+ params[0] = OSSL_PARAM_construct_octet_string(OSSL_MAC_PARAM_KEY, key, key_len);
+ params[1] = OSSL_PARAM_construct_utf8_string(OSSL_MAC_PARAM_DIGEST, (char*)EVP_MD_name(md), 0);
+ params[2] = OSSL_PARAM_construct_end();
+ if (EVP_MAC_CTX_set_params(hctx, params) == 0)
+ return -1; /* error in mac initialisation */
+
+#else
+ HMAC_Init_ex(hctx, key, key_len, md, NULL);
+#endif
+ return 1;
+}
+
+#if (defined SSL_CTRL_SET_TLSEXT_TICKET_KEY_CB && TLS_TICKETS_NO > 0)
+
+static int ssl_tlsext_ticket_key_cb(SSL *s, unsigned char key_name[16], unsigned char *iv, EVP_CIPHER_CTX *ectx, MAC_CTX *hctx, int enc)
+{
+ struct tls_keys_ref *ref = NULL;
+ union tls_sess_key *keys;
+ int head;
+ int i;
+ int ret = -1; /* error by default */
+ struct connection *conn = SSL_get_ex_data(s, ssl_app_data_index);
+#ifdef USE_QUIC
+ struct quic_conn *qc = SSL_get_ex_data(s, ssl_qc_app_data_index);
+#endif
+
+ if (conn)
+ ref = __objt_listener(conn->target)->bind_conf->keys_ref;
+#ifdef USE_QUIC
+ else if (qc)
+ ref = qc->li->bind_conf->keys_ref;
+#endif
+
+ if (!ref) {
+ /* must never happen */
+ ABORT_NOW();
+ }
+
+ HA_RWLOCK_RDLOCK(TLSKEYS_REF_LOCK, &ref->lock);
+
+ keys = ref->tlskeys;
+ head = ref->tls_ticket_enc_index;
+
+ if (enc) {
+ memcpy(key_name, keys[head].name, 16);
+
+ if(!RAND_pseudo_bytes(iv, EVP_MAX_IV_LENGTH))
+ goto end;
+
+ if (ref->key_size_bits == 128) {
+
+ if(!EVP_EncryptInit_ex(ectx, EVP_aes_128_cbc(), NULL, keys[head].key_128.aes_key, iv))
+ goto end;
+
+ if (ssl_hmac_init(hctx, keys[head].key_128.hmac_key, 16, TLS_TICKET_HASH_FUNCT()) < 0)
+ goto end;
+ ret = 1;
+ }
+ else if (ref->key_size_bits == 256 ) {
+
+ if(!EVP_EncryptInit_ex(ectx, EVP_aes_256_cbc(), NULL, keys[head].key_256.aes_key, iv))
+ goto end;
+
+ if (ssl_hmac_init(hctx, keys[head].key_256.hmac_key, 32, TLS_TICKET_HASH_FUNCT()) < 0)
+ goto end;
+ ret = 1;
+ }
+ } else {
+ for (i = 0; i < TLS_TICKETS_NO; i++) {
+ if (!memcmp(key_name, keys[(head + i) % TLS_TICKETS_NO].name, 16))
+ goto found;
+ }
+ ret = 0;
+ goto end;
+
+ found:
+ if (ref->key_size_bits == 128) {
+ if (ssl_hmac_init(hctx, keys[(head + i) % TLS_TICKETS_NO].key_128.hmac_key, 16, TLS_TICKET_HASH_FUNCT()) < 0)
+ goto end;
+ if(!EVP_DecryptInit_ex(ectx, EVP_aes_128_cbc(), NULL, keys[(head + i) % TLS_TICKETS_NO].key_128.aes_key, iv))
+ goto end;
+ /* 2 for key renewal, 1 if current key is still valid */
+ ret = i ? 2 : 1;
+ }
+ else if (ref->key_size_bits == 256) {
+ if (ssl_hmac_init(hctx, keys[(head + i) % TLS_TICKETS_NO].key_256.hmac_key, 32, TLS_TICKET_HASH_FUNCT()) < 0)
+ goto end;
+ if(!EVP_DecryptInit_ex(ectx, EVP_aes_256_cbc(), NULL, keys[(head + i) % TLS_TICKETS_NO].key_256.aes_key, iv))
+ goto end;
+ /* 2 for key renewal, 1 if current key is still valid */
+ ret = i ? 2 : 1;
+ }
+ }
+
+ end:
+ HA_RWLOCK_RDUNLOCK(TLSKEYS_REF_LOCK, &ref->lock);
+ return ret;
+}
+
+struct tls_keys_ref *tlskeys_ref_lookup(const char *filename)
+{
+ struct tls_keys_ref *ref;
+
+ list_for_each_entry(ref, &tlskeys_reference, list)
+ if (ref->filename && strcmp(filename, ref->filename) == 0)
+ return ref;
+ return NULL;
+}
+
+struct tls_keys_ref *tlskeys_ref_lookupid(int unique_id)
+{
+ struct tls_keys_ref *ref;
+
+ list_for_each_entry(ref, &tlskeys_reference, list)
+ if (ref->unique_id == unique_id)
+ return ref;
+ return NULL;
+}
+
+/* Update the key into ref: if keysize doesn't
+ * match existing ones, this function returns -1
+ * else it returns 0 on success.
+ */
+int ssl_sock_update_tlskey_ref(struct tls_keys_ref *ref,
+ struct buffer *tlskey)
+{
+ if (ref->key_size_bits == 128) {
+ if (tlskey->data != sizeof(struct tls_sess_key_128))
+ return -1;
+ }
+ else if (ref->key_size_bits == 256) {
+ if (tlskey->data != sizeof(struct tls_sess_key_256))
+ return -1;
+ }
+ else
+ return -1;
+
+ HA_RWLOCK_WRLOCK(TLSKEYS_REF_LOCK, &ref->lock);
+ memcpy((char *) (ref->tlskeys + ((ref->tls_ticket_enc_index + 2) % TLS_TICKETS_NO)),
+ tlskey->area, tlskey->data);
+ ref->tls_ticket_enc_index = (ref->tls_ticket_enc_index + 1) % TLS_TICKETS_NO;
+ HA_RWLOCK_WRUNLOCK(TLSKEYS_REF_LOCK, &ref->lock);
+
+ return 0;
+}
+
+int ssl_sock_update_tlskey(char *filename, struct buffer *tlskey, char **err)
+{
+ struct tls_keys_ref *ref = tlskeys_ref_lookup(filename);
+
+ if(!ref) {
+ memprintf(err, "Unable to locate the referenced filename: %s", filename);
+ return 1;
+ }
+ if (ssl_sock_update_tlskey_ref(ref, tlskey) < 0) {
+ memprintf(err, "Invalid key size");
+ return 1;
+ }
+
+ return 0;
+}
+
+/* This function finalize the configuration parsing. Its set all the
+ * automatic ids. It's called just after the basic checks. It returns
+ * 0 on success otherwise ERR_*.
+ */
+static int tlskeys_finalize_config(void)
+{
+ int i = 0;
+ struct tls_keys_ref *ref, *ref2, *ref3;
+ struct list tkr = LIST_HEAD_INIT(tkr);
+
+ list_for_each_entry(ref, &tlskeys_reference, list) {
+ if (ref->unique_id == -1) {
+ /* Look for the first free id. */
+ while (1) {
+ list_for_each_entry(ref2, &tlskeys_reference, list) {
+ if (ref2->unique_id == i) {
+ i++;
+ break;
+ }
+ }
+ if (&ref2->list == &tlskeys_reference)
+ break;
+ }
+
+ /* Uses the unique id and increment it for the next entry. */
+ ref->unique_id = i;
+ i++;
+ }
+ }
+
+ /* This sort the reference list by id. */
+ list_for_each_entry_safe(ref, ref2, &tlskeys_reference, list) {
+ LIST_DELETE(&ref->list);
+ list_for_each_entry(ref3, &tkr, list) {
+ if (ref->unique_id < ref3->unique_id) {
+ LIST_APPEND(&ref3->list, &ref->list);
+ break;
+ }
+ }
+ if (&ref3->list == &tkr)
+ LIST_APPEND(&tkr, &ref->list);
+ }
+
+ /* swap root */
+ LIST_SPLICE(&tlskeys_reference, &tkr);
+ return ERR_NONE;
+}
+#endif /* SSL_CTRL_SET_TLSEXT_TICKET_KEY_CB */
+
+
+#if ((defined SSL_CTRL_SET_TLSEXT_STATUS_REQ_CB && !defined OPENSSL_NO_OCSP) && !defined OPENSSL_IS_BORINGSSL)
+/*
+ * This function enables the handling of OCSP status extension on 'ctx' if a
+ * ocsp_response buffer was found in the cert_key_and_chain. To enable OCSP
+ * status extension, the issuer's certificate is mandatory. It should be
+ * present in ckch->ocsp_issuer.
+ *
+ * In addition, the ckch->ocsp_reponse buffer is loaded as a DER format of an
+ * OCSP response. If file is empty or content is not a valid OCSP response,
+ * OCSP status extension is enabled but OCSP response is ignored (a warning is
+ * displayed).
+ *
+ * Returns 1 if no ".ocsp" file found, 0 if OCSP status extension is
+ * successfully enabled, or -1 in other error case.
+ */
+static int ssl_sock_load_ocsp(const char *path, SSL_CTX *ctx, struct ckch_data *data, STACK_OF(X509) *chain)
+{
+ X509 *x, *issuer;
+ int i, ret = -1;
+ struct certificate_ocsp *ocsp = NULL, *iocsp;
+ char *warn = NULL;
+ unsigned char *p;
+#ifndef USE_OPENSSL_WOLFSSL
+#if (HA_OPENSSL_VERSION_NUMBER >= 0x10101000L)
+ int (*callback) (SSL *, void *);
+#else
+ void (*callback) (void);
+#endif
+#else
+ tlsextStatusCb callback;
+#endif
+ struct buffer *ocsp_uri = get_trash_chunk();
+ char *err = NULL;
+ size_t path_len;
+ int inc_refcount_store = 0;
+
+ x = data->cert;
+ if (!x)
+ goto out;
+
+ ssl_ocsp_get_uri_from_cert(x, ocsp_uri, &err);
+ /* We should have an "OCSP URI" field in order for auto update to work. */
+ if (data->ocsp_update_mode == SSL_SOCK_OCSP_UPDATE_ON && b_data(ocsp_uri) == 0)
+ goto out;
+
+ /* In case of ocsp update mode set to 'on', this function might be
+ * called with no known ocsp response. If no ocsp uri can be found in
+ * the certificate, nothing needs to be done here. */
+ if (!data->ocsp_response && !data->ocsp_cid) {
+ if (data->ocsp_update_mode != SSL_SOCK_OCSP_UPDATE_ON || b_data(ocsp_uri) == 0) {
+ ret = 0;
+ goto out;
+ }
+ }
+
+ issuer = data->ocsp_issuer;
+ /* take issuer from chain over ocsp_issuer, is what is done historicaly */
+ if (chain) {
+ /* check if one of the certificate of the chain is the issuer */
+ for (i = 0; i < sk_X509_num(chain); i++) {
+ X509 *ti = sk_X509_value(chain, i);
+ if (X509_check_issued(ti, x) == X509_V_OK) {
+ issuer = ti;
+ break;
+ }
+ }
+ }
+ if (!issuer)
+ goto out;
+
+ if (!data->ocsp_cid) {
+ data->ocsp_cid = OCSP_cert_to_id(0, x, issuer);
+ inc_refcount_store = 1;
+ }
+ if (!data->ocsp_cid)
+ goto out;
+
+ i = i2d_OCSP_CERTID(data->ocsp_cid, NULL);
+ if (!i || (i > OCSP_MAX_CERTID_ASN1_LENGTH))
+ goto out;
+
+ path_len = strlen(path);
+ ocsp = calloc(1, sizeof(*ocsp) + path_len + 1);
+ if (!ocsp)
+ goto out;
+
+ p = ocsp->key_data;
+ ocsp->key_length = i2d_OCSP_CERTID(data->ocsp_cid, &p);
+
+ HA_SPIN_LOCK(OCSP_LOCK, &ocsp_tree_lock);
+ iocsp = (struct certificate_ocsp *)ebmb_insert(&cert_ocsp_tree, &ocsp->key, OCSP_MAX_CERTID_ASN1_LENGTH);
+ if (iocsp == ocsp)
+ ocsp = NULL;
+
+#ifndef SSL_CTX_get_tlsext_status_cb
+# define SSL_CTX_get_tlsext_status_cb(ctx, cb) \
+ *cb = (void (*) (void))ctx->tlsext_status_cb;
+#endif
+ SSL_CTX_get_tlsext_status_cb(ctx, &callback);
+
+ if (inc_refcount_store)
+ iocsp->refcount_store++;
+
+ if (!callback) {
+ struct ocsp_cbk_arg *cb_arg;
+ EVP_PKEY *pkey;
+
+ cb_arg = calloc(1, sizeof(*cb_arg));
+ if (!cb_arg)
+ goto out;
+
+ cb_arg->is_single = 1;
+ cb_arg->s_ocsp = iocsp;
+ iocsp->refcount_instance++;
+
+ pkey = X509_get_pubkey(x);
+ cb_arg->single_kt = EVP_PKEY_base_id(pkey);
+ EVP_PKEY_free(pkey);
+
+ SSL_CTX_set_tlsext_status_cb(ctx, ssl_sock_ocsp_stapling_cbk);
+ SSL_CTX_set_ex_data(ctx, ocsp_ex_index, cb_arg); /* we use the ex_data instead of the cb_arg function here, so we can use the cleanup callback to free */
+
+ } else {
+ /*
+ * If the ctx has a status CB, then we have previously set an OCSP staple for this ctx
+ * Update that cb_arg with the new cert's staple
+ */
+ struct ocsp_cbk_arg *cb_arg;
+ struct certificate_ocsp *tmp_ocsp;
+ int index;
+ int key_type;
+ EVP_PKEY *pkey;
+
+ cb_arg = SSL_CTX_get_ex_data(ctx, ocsp_ex_index);
+
+ /*
+ * The following few lines will convert cb_arg from a single ocsp to multi ocsp
+ * the order of operations below matter, take care when changing it
+ */
+ tmp_ocsp = cb_arg->s_ocsp;
+ index = ssl_sock_get_ocsp_arg_kt_index(cb_arg->single_kt);
+ cb_arg->s_ocsp = NULL;
+ cb_arg->m_ocsp[index] = tmp_ocsp;
+ cb_arg->is_single = 0;
+ cb_arg->single_kt = 0;
+
+ pkey = X509_get_pubkey(x);
+ key_type = EVP_PKEY_base_id(pkey);
+ EVP_PKEY_free(pkey);
+
+ index = ssl_sock_get_ocsp_arg_kt_index(key_type);
+ if (index >= 0 && !cb_arg->m_ocsp[index]) {
+ cb_arg->m_ocsp[index] = iocsp;
+ iocsp->refcount_instance++;
+ }
+ }
+ HA_SPIN_UNLOCK(OCSP_LOCK, &ocsp_tree_lock);
+
+ ret = 0;
+
+ warn = NULL;
+ if (data->ocsp_response && ssl_sock_load_ocsp_response(data->ocsp_response, iocsp, data->ocsp_cid, &warn)) {
+ memprintf(&warn, "Loading: %s. Content will be ignored", warn ? warn : "failure");
+ ha_warning("%s.\n", warn);
+ }
+
+
+ /* Do not insert the same certificate_ocsp structure in the
+ * update tree more than once. */
+ if (!ocsp) {
+ /* Issuer certificate is not included in the certificate
+ * chain, it will have to be treated separately during
+ * ocsp response validation. */
+ if (issuer == data->ocsp_issuer) {
+ iocsp->issuer = issuer;
+ X509_up_ref(issuer);
+ }
+ if (data->chain)
+ iocsp->chain = X509_chain_up_ref(data->chain);
+
+ iocsp->uri = calloc(1, sizeof(*iocsp->uri));
+ if (!chunk_dup(iocsp->uri, ocsp_uri)) {
+ ha_free(&iocsp->uri);
+ goto out;
+ }
+
+ /* Note: if we arrive here, ocsp==NULL because iocsp==ocsp
+ * after the ebmb_insert(), which indicates that we've
+ * just inserted this new node and that it's the one for
+ * which we previously allocated enough room for path_len+1
+ * chars.
+ */
+ memcpy(iocsp->path, path, path_len + 1);
+
+ if (data->ocsp_update_mode == SSL_SOCK_OCSP_UPDATE_ON) {
+ ssl_ocsp_update_insert(iocsp);
+ /* If we are during init the update task is not
+ * scheduled yet so a wakeup won't do anything.
+ * Otherwise, if the OCSP was added through the CLI, we
+ * wake the task up to manage the case of a new entry
+ * that needs to be updated before the previous first
+ * entry.
+ */
+ if (ocsp_update_task)
+ task_wakeup(ocsp_update_task, TASK_WOKEN_MSG);
+ }
+ } else if (iocsp->uri && data->ocsp_update_mode == SSL_SOCK_OCSP_UPDATE_ON) {
+ /* This unlikely case can happen if a series of "del ssl
+ * crt-list" / "add ssl crt-list" commands are made on the CLI.
+ * In such a case, the OCSP response tree entry will be created
+ * prior to the activation of the ocsp auto update and in such a
+ * case we must "force" insertion in the auto update tree.
+ */
+ if (iocsp->next_update.node.leaf_p == NULL) {
+ ssl_ocsp_update_insert(iocsp);
+ /* If we are during init the update task is not
+ * scheduled yet so a wakeup won't do anything.
+ * Otherwise, if the OCSP was added through the CLI, we
+ * wake the task up to manage the case of a new entry
+ * that needs to be updated before the previous first
+ * entry.
+ */
+ if (ocsp_update_task)
+ task_wakeup(ocsp_update_task, TASK_WOKEN_MSG);
+ }
+ }
+
+out:
+ if (ret && data->ocsp_cid) {
+ OCSP_CERTID_free(data->ocsp_cid);
+ data->ocsp_cid = NULL;
+ }
+
+ if (!ret && data->ocsp_response) {
+ ha_free(&data->ocsp_response->area);
+ ha_free(&data->ocsp_response);
+ }
+
+ if (ocsp)
+ ssl_sock_free_ocsp(ocsp);
+
+ if (warn)
+ free(warn);
+
+ free(err);
+
+ return ret;
+}
+
+#endif
+
+#ifdef OPENSSL_IS_BORINGSSL
+static int ssl_sock_load_ocsp(const char *path, SSL_CTX *ctx, struct ckch_data *data, STACK_OF(X509) *chain)
+{
+ return SSL_CTX_set_ocsp_response(ctx, (const uint8_t *)ckch->ocsp_response->area, ckch->ocsp_response->data);
+}
+#endif
+
+
+#ifdef HAVE_SSL_CTX_ADD_SERVER_CUSTOM_EXT
+
+#define CT_EXTENSION_TYPE 18
+
+int sctl_ex_index = -1;
+
+int ssl_sock_sctl_add_cbk(SSL *ssl, unsigned ext_type, const unsigned char **out, size_t *outlen, int *al, void *add_arg)
+{
+ struct buffer *sctl = add_arg;
+
+ *out = (unsigned char *) sctl->area;
+ *outlen = sctl->data;
+
+ return 1;
+}
+
+int ssl_sock_sctl_parse_cbk(SSL *s, unsigned int ext_type, const unsigned char *in, size_t inlen, int *al, void *parse_arg)
+{
+ return 1;
+}
+
+static int ssl_sock_load_sctl(SSL_CTX *ctx, struct buffer *sctl)
+{
+ int ret = -1;
+
+ if (!SSL_CTX_add_server_custom_ext(ctx, CT_EXTENSION_TYPE, ssl_sock_sctl_add_cbk, NULL, sctl, ssl_sock_sctl_parse_cbk, NULL))
+ goto out;
+
+ SSL_CTX_set_ex_data(ctx, sctl_ex_index, sctl);
+
+ ret = 0;
+
+out:
+ return ret;
+}
+
+#endif
+
+void ssl_sock_infocbk(const SSL *ssl, int where, int ret)
+{
+ struct connection *conn = SSL_get_ex_data(ssl, ssl_app_data_index);
+#ifdef USE_QUIC
+ struct quic_conn *qc = SSL_get_ex_data(ssl, ssl_qc_app_data_index);
+#endif /* USE_QUIC */
+ struct ssl_sock_ctx *ctx = NULL;
+
+ BIO *write_bio;
+ (void)ret; /* shut gcc stupid warning */
+
+ if (conn)
+ ctx = conn_get_ssl_sock_ctx(conn);
+#ifdef USE_QUIC
+ else if (qc)
+ ctx = qc->xprt_ctx;
+#endif /* USE_QUIC */
+
+ if (!ctx) {
+ /* must never happen */
+ ABORT_NOW();
+ return;
+ }
+
+#ifndef SSL_OP_NO_RENEGOTIATION
+ /* Please note that BoringSSL defines this macro to zero so don't
+ * change this to #if and do not assign a default value to this macro!
+ */
+ if (where & SSL_CB_HANDSHAKE_START) {
+ /* Disable renegotiation (CVE-2009-3555) */
+ if (conn && (conn->flags & (CO_FL_WAIT_L6_CONN | CO_FL_EARLY_SSL_HS | CO_FL_EARLY_DATA)) == 0) {
+ conn->flags |= CO_FL_ERROR;
+ conn->err_code = CO_ER_SSL_RENEG;
+ }
+ }
+#endif
+
+ if ((where & SSL_CB_ACCEPT_LOOP) == SSL_CB_ACCEPT_LOOP) {
+ if (!(ctx->xprt_st & SSL_SOCK_ST_FL_16K_WBFSIZE)) {
+ /* Long certificate chains optimz
+ If write and read bios are different, we
+ consider that the buffering was activated,
+ so we rise the output buffer size from 4k
+ to 16k */
+ write_bio = SSL_get_wbio(ssl);
+ if (write_bio != SSL_get_rbio(ssl)) {
+ BIO_set_write_buffer_size(write_bio, 16384);
+ ctx->xprt_st |= SSL_SOCK_ST_FL_16K_WBFSIZE;
+ }
+ }
+ }
+}
+
+/* Callback is called for each certificate of the chain during a verify
+ ok is set to 1 if preverify detect no error on current certificate.
+ Returns 0 to break the handshake, 1 otherwise. */
+int ssl_sock_bind_verifycbk(int ok, X509_STORE_CTX *x_store)
+{
+ SSL *ssl;
+ struct connection *conn;
+ struct ssl_sock_ctx *ctx = NULL;
+ int err, depth;
+ X509 *client_crt;
+ STACK_OF(X509) *certs;
+ struct bind_conf *bind_conf = NULL;
+ struct quic_conn *qc = NULL;
+
+ ssl = X509_STORE_CTX_get_ex_data(x_store, SSL_get_ex_data_X509_STORE_CTX_idx());
+ conn = SSL_get_ex_data(ssl, ssl_app_data_index);
+ client_crt = SSL_get_ex_data(ssl, ssl_client_crt_ref_index);
+
+ if (conn) {
+ bind_conf = __objt_listener(conn->target)->bind_conf;
+ ctx = __conn_get_ssl_sock_ctx(conn);
+ }
+#ifdef USE_QUIC
+ else {
+ qc = SSL_get_ex_data(ssl, ssl_qc_app_data_index);
+ BUG_ON(!qc); /* Must never happen */
+ bind_conf = qc->li->bind_conf;
+ ctx = qc->xprt_ctx;
+ }
+#endif
+
+ BUG_ON(!ctx || !bind_conf);
+ ALREADY_CHECKED(ctx);
+ ALREADY_CHECKED(bind_conf);
+
+ ctx->xprt_st |= SSL_SOCK_ST_FL_VERIFY_DONE;
+
+ depth = X509_STORE_CTX_get_error_depth(x_store);
+ err = X509_STORE_CTX_get_error(x_store);
+
+ if (ok) /* no errors */
+ return ok;
+
+ /* Keep a reference to the client's certificate in order to be able to
+ * dump some fetches values in a log even when the verification process
+ * fails. */
+ if (depth == 0) {
+ X509_free(client_crt);
+ client_crt = X509_STORE_CTX_get0_cert(x_store);
+ if (client_crt) {
+ X509_up_ref(client_crt);
+ SSL_set_ex_data(ssl, ssl_client_crt_ref_index, client_crt);
+ }
+ }
+ else {
+ /* An error occurred on a CA certificate of the certificate
+ * chain, we might never call this verify callback on the client
+ * certificate's depth (which is 0) so we try to store the
+ * reference right now. */
+ certs = X509_STORE_CTX_get1_chain(x_store);
+ if (certs) {
+ client_crt = sk_X509_value(certs, 0);
+ if (client_crt) {
+ X509_up_ref(client_crt);
+ SSL_set_ex_data(ssl, ssl_client_crt_ref_index, client_crt);
+ }
+ sk_X509_pop_free(certs, X509_free);
+ }
+ }
+
+ /* check if CA error needs to be ignored */
+ if (depth > 0) {
+ if (!SSL_SOCK_ST_TO_CA_ERROR(ctx->xprt_st)) {
+ ctx->xprt_st |= SSL_SOCK_CA_ERROR_TO_ST(err);
+ ctx->xprt_st |= SSL_SOCK_CAEDEPTH_TO_ST(depth);
+ }
+
+ if (err <= SSL_MAX_VFY_ERROR_CODE &&
+ cert_ignerr_bitfield_get(bind_conf->ca_ignerr_bitfield, err))
+ goto err_ignored;
+
+ /* TODO: for QUIC connection, this error code is lost */
+ if (conn)
+ conn->err_code = CO_ER_SSL_CA_FAIL;
+ return 0;
+ }
+
+ if (!SSL_SOCK_ST_TO_CRTERROR(ctx->xprt_st))
+ ctx->xprt_st |= SSL_SOCK_CRTERROR_TO_ST(err);
+
+ /* check if certificate error needs to be ignored */
+ if (err <= SSL_MAX_VFY_ERROR_CODE &&
+ cert_ignerr_bitfield_get(bind_conf->crt_ignerr_bitfield, err))
+ goto err_ignored;
+
+ /* TODO: for QUIC connection, this error code is lost */
+ if (conn)
+ conn->err_code = CO_ER_SSL_CRT_FAIL;
+ return 0;
+
+ err_ignored:
+ ssl_sock_dump_errors(conn, qc);
+ ERR_clear_error();
+ return 1;
+}
+
+#ifdef TLS1_RT_HEARTBEAT
+static void ssl_sock_parse_heartbeat(struct connection *conn, int write_p, int version,
+ int content_type, const void *buf, size_t len,
+ SSL *ssl)
+{
+ /* test heartbeat received (write_p is set to 0
+ for a received record) */
+ if ((content_type == TLS1_RT_HEARTBEAT) && (write_p == 0)) {
+ struct ssl_sock_ctx *ctx = __conn_get_ssl_sock_ctx(conn);
+ const unsigned char *p = buf;
+ unsigned int payload;
+
+ ctx->xprt_st |= SSL_SOCK_RECV_HEARTBEAT;
+
+ /* Check if this is a CVE-2014-0160 exploitation attempt. */
+ if (*p != TLS1_HB_REQUEST)
+ return;
+
+ if (len < 1 + 2 + 16) /* 1 type + 2 size + 0 payload + 16 padding */
+ goto kill_it;
+
+ payload = (p[1] * 256) + p[2];
+ if (3 + payload + 16 <= len)
+ return; /* OK no problem */
+ kill_it:
+ /* We have a clear heartbleed attack (CVE-2014-0160), the
+ * advertised payload is larger than the advertised packet
+ * length, so we have garbage in the buffer between the
+ * payload and the end of the buffer (p+len). We can't know
+ * if the SSL stack is patched, and we don't know if we can
+ * safely wipe out the area between p+3+len and payload.
+ * So instead, we prevent the response from being sent by
+ * setting the max_send_fragment to 0 and we report an SSL
+ * error, which will kill this connection. It will be reported
+ * above as SSL_ERROR_SSL while an other handshake failure with
+ * a heartbeat message will be reported as SSL_ERROR_SYSCALL.
+ */
+ ssl->max_send_fragment = 0;
+ SSLerr(SSL_F_TLS1_HEARTBEAT, SSL_R_SSL_HANDSHAKE_FAILURE);
+ }
+}
+#endif
+
+static void ssl_sock_parse_clienthello(struct connection *conn, int write_p, int version,
+ int content_type, const void *buf, size_t len,
+ SSL *ssl)
+{
+ struct ssl_capture *capture;
+ uchar *msg;
+ uchar *end;
+ uchar *extensions_end;
+ uchar *ec_start = NULL;
+ uchar *ec_formats_start = NULL;
+ uchar *list_end;
+ ushort protocol_version;
+ ushort extension_id;
+ ushort ec_len = 0;
+ uchar ec_formats_len = 0;
+ int offset = 0;
+ int rec_len;
+
+ /* This function is called for "from client" and "to server"
+ * connections. The combination of write_p == 0 and content_type == 22
+ * is only available during "from client" connection.
+ */
+
+ /* "write_p" is set to 0 is the bytes are received messages,
+ * otherwise it is set to 1.
+ */
+ if (write_p != 0)
+ return;
+
+ /* content_type contains the type of message received or sent
+ * according with the SSL/TLS protocol spec. This message is
+ * encoded with one byte. The value 256 (two bytes) is used
+ * for designing the SSL/TLS record layer. According with the
+ * rfc6101, the expected message (other than 256) are:
+ * - change_cipher_spec(20)
+ * - alert(21)
+ * - handshake(22)
+ * - application_data(23)
+ * - (255)
+ * We are interessed by the handshake and specially the client
+ * hello.
+ */
+ if (content_type != 22)
+ return;
+
+ /* The message length is at least 4 bytes, containing the
+ * message type and the message length.
+ */
+ if (len < 4)
+ return;
+
+ /* First byte of the handshake message id the type of
+ * message. The known types are:
+ * - hello_request(0)
+ * - client_hello(1)
+ * - server_hello(2)
+ * - certificate(11)
+ * - server_key_exchange (12)
+ * - certificate_request(13)
+ * - server_hello_done(14)
+ * We are interested by the client hello.
+ */
+ msg = (unsigned char *)buf;
+ if (msg[0] != 1)
+ return;
+
+ /* Next three bytes are the length of the message. The total length
+ * must be this decoded length + 4. If the length given as argument
+ * is not the same, we abort the protocol dissector.
+ */
+ rec_len = (msg[1] << 16) + (msg[2] << 8) + msg[3];
+ if (len < rec_len + 4)
+ return;
+ msg += 4;
+ end = msg + rec_len;
+ if (end < msg)
+ return;
+
+ /* Expect 2 bytes for protocol version
+ * (1 byte for major and 1 byte for minor)
+ */
+ if (msg + 2 > end)
+ return;
+ protocol_version = (msg[0] << 8) + msg[1];
+ msg += 2;
+
+ /* Expect the random, composed by 4 bytes for the unix time and
+ * 28 bytes for unix payload. So we jump 4 + 28.
+ */
+ msg += 4 + 28;
+ if (msg > end)
+ return;
+
+ /* Next, is session id:
+ * if present, we have to jump by length + 1 for the size information
+ * if not present, we have to jump by 1 only
+ */
+ if (msg[0] > 0)
+ msg += msg[0];
+ msg += 1;
+ if (msg > end)
+ return;
+
+ /* Next two bytes are the ciphersuite length. */
+ if (msg + 2 > end)
+ return;
+ rec_len = (msg[0] << 8) + msg[1];
+ msg += 2;
+ if (msg + rec_len > end || msg + rec_len < msg)
+ return;
+
+ capture = pool_zalloc(pool_head_ssl_capture);
+ if (!capture)
+ return;
+ /* Compute the xxh64 of the ciphersuite. */
+ capture->xxh64 = XXH64(msg, rec_len, 0);
+
+ /* Capture the ciphersuite. */
+ capture->ciphersuite_len = MIN(global_ssl.capture_buffer_size, rec_len);
+ capture->ciphersuite_offset = 0;
+ memcpy(capture->data, msg, capture->ciphersuite_len);
+ msg += rec_len;
+ offset += capture->ciphersuite_len;
+
+ /* Initialize other data */
+ capture->protocol_version = protocol_version;
+
+ /* Next, compression methods:
+ * if present, we have to jump by length + 1 for the size information
+ * if not present, we have to jump by 1 only
+ */
+ if (msg[0] > 0)
+ msg += msg[0];
+ msg += 1;
+ if (msg > end)
+ goto store_capture;
+
+ /* We reached extensions */
+ if (msg + 2 > end)
+ goto store_capture;
+ rec_len = (msg[0] << 8) + msg[1];
+ msg += 2;
+ if (msg + rec_len > end || msg + rec_len < msg)
+ goto store_capture;
+ extensions_end = msg + rec_len;
+ capture->extensions_offset = offset;
+
+ /* Parse each extension */
+ while (msg + 4 < extensions_end) {
+ /* Add 2 bytes of extension_id */
+ if (global_ssl.capture_buffer_size >= offset + 2) {
+ capture->data[offset++] = msg[0];
+ capture->data[offset++] = msg[1];
+ capture->extensions_len += 2;
+ }
+ else
+ break;
+ extension_id = (msg[0] << 8) + msg[1];
+ /* Length of the extension */
+ rec_len = (msg[2] << 8) + msg[3];
+
+ /* Expect 2 bytes extension id + 2 bytes extension size */
+ msg += 2 + 2;
+ if (msg + rec_len > extensions_end || msg + rec_len < msg)
+ goto store_capture;
+ /* TLS Extensions
+ * https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml */
+ if (extension_id == 0x000a) {
+ /* Elliptic Curves:
+ * https://www.rfc-editor.org/rfc/rfc8422.html
+ * https://www.rfc-editor.org/rfc/rfc7919.html */
+ list_end = msg + rec_len;
+ if (msg + 2 > list_end)
+ goto store_capture;
+ rec_len = (msg[0] << 8) + msg[1];
+ msg += 2;
+
+ if (msg + rec_len > list_end || msg + rec_len < msg)
+ goto store_capture;
+ /* Store location/size of the list */
+ ec_start = msg;
+ ec_len = rec_len;
+ }
+ else if (extension_id == 0x000b) {
+ /* Elliptic Curves Point Formats:
+ * https://www.rfc-editor.org/rfc/rfc8422.html */
+ list_end = msg + rec_len;
+ if (msg + 1 > list_end)
+ goto store_capture;
+ rec_len = msg[0];
+ msg += 1;
+
+ if (msg + rec_len > list_end || msg + rec_len < msg)
+ goto store_capture;
+ /* Store location/size of the list */
+ ec_formats_start = msg;
+ ec_formats_len = rec_len;
+ }
+ msg += rec_len;
+ }
+
+ if (ec_start) {
+ rec_len = ec_len;
+ if (offset + rec_len > global_ssl.capture_buffer_size)
+ rec_len = global_ssl.capture_buffer_size - offset;
+ memcpy(capture->data + offset, ec_start, rec_len);
+ capture->ec_offset = offset;
+ capture->ec_len = rec_len;
+ offset += rec_len;
+ }
+ if (ec_formats_start) {
+ rec_len = ec_formats_len;
+ if (offset + rec_len > global_ssl.capture_buffer_size)
+ rec_len = global_ssl.capture_buffer_size - offset;
+ memcpy(capture->data + offset, ec_formats_start, rec_len);
+ capture->ec_formats_offset = offset;
+ capture->ec_formats_len = rec_len;
+ offset += rec_len;
+ }
+
+ store_capture:
+ SSL_set_ex_data(ssl, ssl_capture_ptr_index, capture);
+}
+
+
+#ifdef HAVE_SSL_KEYLOG
+static void ssl_init_keylog(struct connection *conn, int write_p, int version,
+ int content_type, const void *buf, size_t len,
+ SSL *ssl)
+{
+ struct ssl_keylog *keylog;
+
+ if (SSL_get_ex_data(ssl, ssl_keylog_index))
+ return;
+
+ keylog = pool_zalloc(pool_head_ssl_keylog);
+ if (!keylog)
+ return;
+
+ if (!SSL_set_ex_data(ssl, ssl_keylog_index, keylog)) {
+ pool_free(pool_head_ssl_keylog, keylog);
+ return;
+ }
+}
+#endif
+
+/* Callback is called for ssl protocol analyse */
+void ssl_sock_msgcbk(int write_p, int version, int content_type, const void *buf, size_t len, SSL *ssl, void *arg)
+{
+ struct connection *conn = SSL_get_ex_data(ssl, ssl_app_data_index);
+ struct ssl_sock_msg_callback *cbk;
+
+ /* Try to call all callback functions that were registered by using
+ * ssl_sock_register_msg_callback().
+ */
+ list_for_each_entry(cbk, &ssl_sock_msg_callbacks, list) {
+ cbk->func(conn, write_p, version, content_type, buf, len, ssl);
+ }
+}
+
+#if defined(OPENSSL_NPN_NEGOTIATED) && !defined(OPENSSL_NO_NEXTPROTONEG)
+static int ssl_sock_srv_select_protos(SSL *s, unsigned char **out, unsigned char *outlen,
+ const unsigned char *in, unsigned int inlen,
+ void *arg)
+{
+ struct server *srv = arg;
+
+ if (SSL_select_next_proto(out, outlen, in, inlen, (unsigned char *)srv->ssl_ctx.npn_str,
+ srv->ssl_ctx.npn_len) == OPENSSL_NPN_NEGOTIATED)
+ return SSL_TLSEXT_ERR_OK;
+ return SSL_TLSEXT_ERR_NOACK;
+}
+#endif
+
+#if defined(OPENSSL_NPN_NEGOTIATED) && !defined(OPENSSL_NO_NEXTPROTONEG)
+/* This callback is used so that the server advertises the list of
+ * negotiable protocols for NPN.
+ */
+static int ssl_sock_advertise_npn_protos(SSL *s, const unsigned char **data,
+ unsigned int *len, void *arg)
+{
+ struct ssl_bind_conf *conf = arg;
+
+ *data = (const unsigned char *)conf->npn_str;
+ *len = conf->npn_len;
+ return SSL_TLSEXT_ERR_OK;
+}
+#endif
+
+#ifdef TLSEXT_TYPE_application_layer_protocol_negotiation
+/* This callback is used so that the server advertises the list of
+ * negotiable protocols for ALPN.
+ */
+static int ssl_sock_advertise_alpn_protos(SSL *s, const unsigned char **out,
+ unsigned char *outlen,
+ const unsigned char *server,
+ unsigned int server_len, void *arg)
+{
+ struct ssl_bind_conf *conf = arg;
+#ifdef USE_QUIC
+ struct quic_conn *qc = SSL_get_ex_data(s, ssl_qc_app_data_index);
+#endif
+
+ if (SSL_select_next_proto((unsigned char**) out, outlen, (const unsigned char *)conf->alpn_str,
+ conf->alpn_len, server, server_len) != OPENSSL_NPN_NEGOTIATED) {
+#ifdef USE_QUIC
+ if (qc)
+ quic_set_tls_alert(qc, SSL_AD_NO_APPLICATION_PROTOCOL);
+#endif
+ return SSL_TLSEXT_ERR_NOACK;
+ }
+
+#ifdef USE_QUIC
+ if (qc && !quic_set_app_ops(qc, *out, *outlen)) {
+ quic_set_tls_alert(qc, SSL_AD_NO_APPLICATION_PROTOCOL);
+ return SSL_TLSEXT_ERR_NOACK;
+ }
+#endif
+
+ return SSL_TLSEXT_ERR_OK;
+}
+#endif
+
+#ifdef SSL_CTRL_SET_TLSEXT_HOSTNAME
+#ifndef SSL_NO_GENERATE_CERTIFICATES
+
+/* Configure a DNS SAN extension on a certificate. */
+int ssl_sock_add_san_ext(X509V3_CTX* ctx, X509* cert, const char *servername) {
+ int failure = 0;
+ X509_EXTENSION *san_ext = NULL;
+ CONF *conf = NULL;
+ struct buffer *san_name = get_trash_chunk();
+
+ conf = NCONF_new(NULL);
+ if (!conf) {
+ failure = 1;
+ goto cleanup;
+ }
+
+ /* Build an extension based on the DNS entry above */
+ chunk_appendf(san_name, "DNS:%s", servername);
+ san_ext = X509V3_EXT_nconf_nid(conf, ctx, NID_subject_alt_name, san_name->area);
+ if (!san_ext) {
+ failure = 1;
+ goto cleanup;
+ }
+
+ /* Add the extension */
+ if (!X509_add_ext(cert, san_ext, -1 /* Add to end */)) {
+ failure = 1;
+ goto cleanup;
+ }
+
+ /* Success */
+ failure = 0;
+
+cleanup:
+ if (NULL != san_ext) X509_EXTENSION_free(san_ext);
+ if (NULL != conf) NCONF_free(conf);
+
+ return failure;
+}
+
+/* Create a X509 certificate with the specified servername and serial. This
+ * function returns a SSL_CTX object or NULL if an error occurs. */
+static SSL_CTX *
+ssl_sock_do_create_cert(const char *servername, struct bind_conf *bind_conf, SSL *ssl)
+{
+ X509 *cacert = bind_conf->ca_sign_ckch->cert;
+ EVP_PKEY *capkey = bind_conf->ca_sign_ckch->key;
+ SSL_CTX *ssl_ctx = NULL;
+ X509 *newcrt = NULL;
+ EVP_PKEY *pkey = NULL;
+ SSL *tmp_ssl = NULL;
+ CONF *ctmp = NULL;
+ X509_NAME *name;
+ const EVP_MD *digest;
+ X509V3_CTX ctx;
+ unsigned int i;
+ int key_type;
+
+ /* Get the private key of the default certificate and use it */
+#ifdef HAVE_SSL_CTX_get0_privatekey
+ pkey = SSL_CTX_get0_privatekey(bind_conf->default_ctx);
+#else
+ tmp_ssl = SSL_new(bind_conf->default_ctx);
+ if (tmp_ssl)
+ pkey = SSL_get_privatekey(tmp_ssl);
+#endif
+ if (!pkey)
+ goto mkcert_error;
+
+ /* Create the certificate */
+ if (!(newcrt = X509_new()))
+ goto mkcert_error;
+
+ /* Set version number for the certificate (X509v3) and the serial
+ * number */
+ if (X509_set_version(newcrt, 2L) != 1)
+ goto mkcert_error;
+ ASN1_INTEGER_set(X509_get_serialNumber(newcrt), _HA_ATOMIC_ADD_FETCH(&ssl_ctx_serial, 1));
+
+ /* Set duration for the certificate */
+ if (!X509_gmtime_adj(X509_getm_notBefore(newcrt), (long)-60*60*24) ||
+ !X509_gmtime_adj(X509_getm_notAfter(newcrt),(long)60*60*24*365))
+ goto mkcert_error;
+
+ /* set public key in the certificate */
+ if (X509_set_pubkey(newcrt, pkey) != 1)
+ goto mkcert_error;
+
+ /* Set issuer name from the CA */
+ if (!(name = X509_get_subject_name(cacert)))
+ goto mkcert_error;
+ if (X509_set_issuer_name(newcrt, name) != 1)
+ goto mkcert_error;
+
+ /* Set the subject name using the same, but the CN */
+ name = X509_NAME_dup(name);
+ if (X509_NAME_add_entry_by_txt(name, "CN", MBSTRING_ASC,
+ (const unsigned char *)servername,
+ -1, -1, 0) != 1) {
+ X509_NAME_free(name);
+ goto mkcert_error;
+ }
+ if (X509_set_subject_name(newcrt, name) != 1) {
+ X509_NAME_free(name);
+ goto mkcert_error;
+ }
+ X509_NAME_free(name);
+
+ /* Add x509v3 extensions as specified */
+ ctmp = NCONF_new(NULL);
+ X509V3_set_ctx(&ctx, cacert, newcrt, NULL, NULL, 0);
+ for (i = 0; i < X509V3_EXT_SIZE; i++) {
+ X509_EXTENSION *ext;
+
+ if (!(ext = X509V3_EXT_nconf(ctmp, &ctx, x509v3_ext_names[i], x509v3_ext_values[i])))
+ goto mkcert_error;
+ if (!X509_add_ext(newcrt, ext, -1)) {
+ X509_EXTENSION_free(ext);
+ goto mkcert_error;
+ }
+ X509_EXTENSION_free(ext);
+ }
+
+ /* Add SAN extension */
+ if (ssl_sock_add_san_ext(&ctx, newcrt, servername)) {
+ goto mkcert_error;
+ }
+
+ /* Sign the certificate with the CA private key */
+
+ key_type = EVP_PKEY_base_id(capkey);
+
+ if (key_type == EVP_PKEY_DSA)
+ digest = EVP_sha1();
+ else if (key_type == EVP_PKEY_RSA)
+ digest = EVP_sha256();
+ else if (key_type == EVP_PKEY_EC)
+ digest = EVP_sha256();
+ else {
+#ifdef ASN1_PKEY_CTRL_DEFAULT_MD_NID
+ int nid;
+
+ if (EVP_PKEY_get_default_digest_nid(capkey, &nid) <= 0)
+ goto mkcert_error;
+ if (!(digest = EVP_get_digestbynid(nid)))
+ goto mkcert_error;
+#else
+ goto mkcert_error;
+#endif
+ }
+
+ if (!(X509_sign(newcrt, capkey, digest)))
+ goto mkcert_error;
+
+ /* Create and set the new SSL_CTX */
+ if (!(ssl_ctx = SSL_CTX_new(SSLv23_server_method())))
+ goto mkcert_error;
+ if (!SSL_CTX_use_PrivateKey(ssl_ctx, pkey))
+ goto mkcert_error;
+ if (!SSL_CTX_use_certificate(ssl_ctx, newcrt))
+ goto mkcert_error;
+ if (!SSL_CTX_check_private_key(ssl_ctx))
+ goto mkcert_error;
+
+ /* Build chaining the CA cert and the rest of the chain, keep these order */
+#if defined(SSL_CTX_add1_chain_cert)
+ if (!SSL_CTX_add1_chain_cert(ssl_ctx, bind_conf->ca_sign_ckch->cert)) {
+ goto mkcert_error;
+ }
+
+ if (bind_conf->ca_sign_ckch->chain) {
+ for (i = 0; i < sk_X509_num(bind_conf->ca_sign_ckch->chain); i++) {
+ X509 *chain_cert = sk_X509_value(bind_conf->ca_sign_ckch->chain, i);
+ if (!SSL_CTX_add1_chain_cert(ssl_ctx, chain_cert)) {
+ goto mkcert_error;
+ }
+ }
+ }
+#endif
+
+ if (newcrt) X509_free(newcrt);
+
+#ifndef OPENSSL_NO_DH
+#if (HA_OPENSSL_VERSION_NUMBER < 0x3000000fL)
+ SSL_CTX_set_tmp_dh_callback(ssl_ctx, ssl_get_tmp_dh_cbk);
+#else
+ ssl_sock_set_tmp_dh_from_pkey(ssl_ctx, pkey);
+#endif
+#endif
+
+#if (HA_OPENSSL_VERSION_NUMBER >= 0x10101000L)
+#if defined(SSL_CTX_set1_curves_list)
+ {
+ const char *ecdhe = (bind_conf->ssl_conf.ecdhe ? bind_conf->ssl_conf.ecdhe : ECDHE_DEFAULT_CURVE);
+ if (!SSL_CTX_set1_curves_list(ssl_ctx, ecdhe))
+ goto end;
+ }
+#endif
+#else
+#if defined(SSL_CTX_set_tmp_ecdh) && !defined(OPENSSL_NO_ECDH)
+ {
+ const char *ecdhe = (bind_conf->ssl_conf.ecdhe ? bind_conf->ssl_conf.ecdhe : ECDHE_DEFAULT_CURVE);
+ EC_KEY *ecc;
+ int nid;
+
+ if ((nid = OBJ_sn2nid(ecdhe)) == NID_undef)
+ goto end;
+ if (!(ecc = EC_KEY_new_by_curve_name(nid)))
+ goto end;
+ SSL_CTX_set_tmp_ecdh(ssl_ctx, ecc);
+ EC_KEY_free(ecc);
+ }
+#endif /* defined(SSL_CTX_set_tmp_ecdh) && !defined(OPENSSL_NO_ECDH) */
+#endif /* HA_OPENSSL_VERSION_NUMBER >= 0x10101000L */
+ end:
+ return ssl_ctx;
+
+ mkcert_error:
+ if (ctmp) NCONF_free(ctmp);
+ if (tmp_ssl) SSL_free(tmp_ssl);
+ if (ssl_ctx) SSL_CTX_free(ssl_ctx);
+ if (newcrt) X509_free(newcrt);
+ return NULL;
+}
+
+
+/* Do a lookup for a certificate in the LRU cache used to store generated
+ * certificates and immediately assign it to the SSL session if not null. */
+SSL_CTX *
+ssl_sock_assign_generated_cert(unsigned int key, struct bind_conf *bind_conf, SSL *ssl)
+{
+ struct lru64 *lru = NULL;
+
+ if (ssl_ctx_lru_tree) {
+ HA_RWLOCK_WRLOCK(SSL_GEN_CERTS_LOCK, &ssl_ctx_lru_rwlock);
+ lru = lru64_lookup(key, ssl_ctx_lru_tree, bind_conf->ca_sign_ckch->cert, 0);
+ if (lru && lru->domain) {
+ if (ssl)
+ SSL_set_SSL_CTX(ssl, (SSL_CTX *)lru->data);
+ HA_RWLOCK_WRUNLOCK(SSL_GEN_CERTS_LOCK, &ssl_ctx_lru_rwlock);
+ return (SSL_CTX *)lru->data;
+ }
+ HA_RWLOCK_WRUNLOCK(SSL_GEN_CERTS_LOCK, &ssl_ctx_lru_rwlock);
+ }
+ return NULL;
+}
+
+/* Same as <ssl_sock_assign_generated_cert> but without SSL session. This
+ * function is not thread-safe, it should only be used to check if a certificate
+ * exists in the lru cache (with no warranty it will not be removed by another
+ * thread). It is kept for backward compatibility. */
+SSL_CTX *
+ssl_sock_get_generated_cert(unsigned int key, struct bind_conf *bind_conf)
+{
+ return ssl_sock_assign_generated_cert(key, bind_conf, NULL);
+}
+
+/* Set a certificate int the LRU cache used to store generated
+ * certificate. Return 0 on success, otherwise -1 */
+int
+ssl_sock_set_generated_cert(SSL_CTX *ssl_ctx, unsigned int key, struct bind_conf *bind_conf)
+{
+ struct lru64 *lru = NULL;
+
+ if (ssl_ctx_lru_tree) {
+ HA_RWLOCK_WRLOCK(SSL_GEN_CERTS_LOCK, &ssl_ctx_lru_rwlock);
+ lru = lru64_get(key, ssl_ctx_lru_tree, bind_conf->ca_sign_ckch->cert, 0);
+ if (!lru) {
+ HA_RWLOCK_WRUNLOCK(SSL_GEN_CERTS_LOCK, &ssl_ctx_lru_rwlock);
+ return -1;
+ }
+ if (lru->domain && lru->data)
+ lru->free((SSL_CTX *)lru->data);
+ lru64_commit(lru, ssl_ctx, bind_conf->ca_sign_ckch->cert, 0, (void (*)(void *))SSL_CTX_free);
+ HA_RWLOCK_WRUNLOCK(SSL_GEN_CERTS_LOCK, &ssl_ctx_lru_rwlock);
+ return 0;
+ }
+ return -1;
+}
+
+/* Compute the key of the certificate. */
+unsigned int
+ssl_sock_generated_cert_key(const void *data, size_t len)
+{
+ return XXH32(data, len, ssl_ctx_lru_seed);
+}
+
+/* Generate a cert and immediately assign it to the SSL session so that the cert's
+ * refcount is maintained regardless of the cert's presence in the LRU cache.
+ */
+static int
+ssl_sock_generate_certificate(const char *servername, struct bind_conf *bind_conf, SSL *ssl)
+{
+ X509 *cacert = bind_conf->ca_sign_ckch->cert;
+ SSL_CTX *ssl_ctx = NULL;
+ struct lru64 *lru = NULL;
+ unsigned int key;
+
+ key = ssl_sock_generated_cert_key(servername, strlen(servername));
+ if (ssl_ctx_lru_tree) {
+ HA_RWLOCK_WRLOCK(SSL_GEN_CERTS_LOCK, &ssl_ctx_lru_rwlock);
+ lru = lru64_get(key, ssl_ctx_lru_tree, cacert, 0);
+ if (lru && lru->domain)
+ ssl_ctx = (SSL_CTX *)lru->data;
+ if (!ssl_ctx && lru) {
+ ssl_ctx = ssl_sock_do_create_cert(servername, bind_conf, ssl);
+ lru64_commit(lru, ssl_ctx, cacert, 0, (void (*)(void *))SSL_CTX_free);
+ }
+ SSL_set_SSL_CTX(ssl, ssl_ctx);
+ HA_RWLOCK_WRUNLOCK(SSL_GEN_CERTS_LOCK, &ssl_ctx_lru_rwlock);
+ return 1;
+ }
+ else {
+ ssl_ctx = ssl_sock_do_create_cert(servername, bind_conf, ssl);
+ SSL_set_SSL_CTX(ssl, ssl_ctx);
+ /* No LRU cache, this CTX will be released as soon as the session dies */
+ SSL_CTX_free(ssl_ctx);
+ return 1;
+ }
+ return 0;
+}
+static int
+ssl_sock_generate_certificate_from_conn(struct bind_conf *bind_conf, SSL *ssl)
+{
+ unsigned int key;
+ struct connection *conn = SSL_get_ex_data(ssl, ssl_app_data_index);
+
+ if (conn_get_dst(conn)) {
+ key = ssl_sock_generated_cert_key(conn->dst, get_addr_len(conn->dst));
+ if (ssl_sock_assign_generated_cert(key, bind_conf, ssl))
+ return 1;
+ }
+ return 0;
+}
+#endif /* !defined SSL_NO_GENERATE_CERTIFICATES */
+
+#if (HA_OPENSSL_VERSION_NUMBER < 0x1010000fL)
+
+static void ctx_set_SSLv3_func(SSL_CTX *ctx, set_context_func c)
+{
+#if SSL_OP_NO_SSLv3
+ c == SET_SERVER ? SSL_CTX_set_ssl_version(ctx, SSLv3_server_method())
+ : SSL_CTX_set_ssl_version(ctx, SSLv3_client_method());
+#endif
+}
+static void ctx_set_TLSv10_func(SSL_CTX *ctx, set_context_func c) {
+ c == SET_SERVER ? SSL_CTX_set_ssl_version(ctx, TLSv1_server_method())
+ : SSL_CTX_set_ssl_version(ctx, TLSv1_client_method());
+}
+static void ctx_set_TLSv11_func(SSL_CTX *ctx, set_context_func c) {
+#if SSL_OP_NO_TLSv1_1
+ c == SET_SERVER ? SSL_CTX_set_ssl_version(ctx, TLSv1_1_server_method())
+ : SSL_CTX_set_ssl_version(ctx, TLSv1_1_client_method());
+#endif
+}
+static void ctx_set_TLSv12_func(SSL_CTX *ctx, set_context_func c) {
+#if SSL_OP_NO_TLSv1_2
+ c == SET_SERVER ? SSL_CTX_set_ssl_version(ctx, TLSv1_2_server_method())
+ : SSL_CTX_set_ssl_version(ctx, TLSv1_2_client_method());
+#endif
+}
+/* TLSv1.2 is the last supported version in this context. */
+static void ctx_set_TLSv13_func(SSL_CTX *ctx, set_context_func c) {}
+/* Unusable in this context. */
+static void ssl_set_SSLv3_func(SSL *ssl, set_context_func c) {}
+static void ssl_set_TLSv10_func(SSL *ssl, set_context_func c) {}
+static void ssl_set_TLSv11_func(SSL *ssl, set_context_func c) {}
+static void ssl_set_TLSv12_func(SSL *ssl, set_context_func c) {}
+static void ssl_set_TLSv13_func(SSL *ssl, set_context_func c) {}
+#else /* openssl >= 1.1.0 */
+
+static void ctx_set_SSLv3_func(SSL_CTX *ctx, set_context_func c) {
+ c == SET_MAX ? SSL_CTX_set_max_proto_version(ctx, SSL3_VERSION)
+ : SSL_CTX_set_min_proto_version(ctx, SSL3_VERSION);
+}
+static void ssl_set_SSLv3_func(SSL *ssl, set_context_func c) {
+ c == SET_MAX ? SSL_set_max_proto_version(ssl, SSL3_VERSION)
+ : SSL_set_min_proto_version(ssl, SSL3_VERSION);
+}
+static void ctx_set_TLSv10_func(SSL_CTX *ctx, set_context_func c) {
+ c == SET_MAX ? SSL_CTX_set_max_proto_version(ctx, TLS1_VERSION)
+ : SSL_CTX_set_min_proto_version(ctx, TLS1_VERSION);
+}
+static void ssl_set_TLSv10_func(SSL *ssl, set_context_func c) {
+ c == SET_MAX ? SSL_set_max_proto_version(ssl, TLS1_VERSION)
+ : SSL_set_min_proto_version(ssl, TLS1_VERSION);
+}
+static void ctx_set_TLSv11_func(SSL_CTX *ctx, set_context_func c) {
+ c == SET_MAX ? SSL_CTX_set_max_proto_version(ctx, TLS1_1_VERSION)
+ : SSL_CTX_set_min_proto_version(ctx, TLS1_1_VERSION);
+}
+static void ssl_set_TLSv11_func(SSL *ssl, set_context_func c) {
+ c == SET_MAX ? SSL_set_max_proto_version(ssl, TLS1_1_VERSION)
+ : SSL_set_min_proto_version(ssl, TLS1_1_VERSION);
+}
+static void ctx_set_TLSv12_func(SSL_CTX *ctx, set_context_func c) {
+ c == SET_MAX ? SSL_CTX_set_max_proto_version(ctx, TLS1_2_VERSION)
+ : SSL_CTX_set_min_proto_version(ctx, TLS1_2_VERSION);
+}
+static void ssl_set_TLSv12_func(SSL *ssl, set_context_func c) {
+ c == SET_MAX ? SSL_set_max_proto_version(ssl, TLS1_2_VERSION)
+ : SSL_set_min_proto_version(ssl, TLS1_2_VERSION);
+}
+static void ctx_set_TLSv13_func(SSL_CTX *ctx, set_context_func c) {
+#if (HA_OPENSSL_VERSION_NUMBER >= 0x10101000L)
+ c == SET_MAX ? SSL_CTX_set_max_proto_version(ctx, TLS1_3_VERSION)
+ : SSL_CTX_set_min_proto_version(ctx, TLS1_3_VERSION);
+#endif
+}
+static void ssl_set_TLSv13_func(SSL *ssl, set_context_func c) {
+#if (HA_OPENSSL_VERSION_NUMBER >= 0x10101000L)
+ c == SET_MAX ? SSL_set_max_proto_version(ssl, TLS1_3_VERSION)
+ : SSL_set_min_proto_version(ssl, TLS1_3_VERSION);
+#endif
+}
+#endif
+static void ctx_set_None_func(SSL_CTX *ctx, set_context_func c) { }
+static void ssl_set_None_func(SSL *ssl, set_context_func c) { }
+
+struct methodVersions methodVersions[] = {
+ {0, 0, ctx_set_None_func, ssl_set_None_func, "NONE"}, /* CONF_TLSV_NONE */
+ {SSL_OP_NO_SSLv3, MC_SSL_O_NO_SSLV3, ctx_set_SSLv3_func, ssl_set_SSLv3_func, "SSLv3"}, /* CONF_SSLV3 */
+ {SSL_OP_NO_TLSv1, MC_SSL_O_NO_TLSV10, ctx_set_TLSv10_func, ssl_set_TLSv10_func, "TLSv1.0"}, /* CONF_TLSV10 */
+ {SSL_OP_NO_TLSv1_1, MC_SSL_O_NO_TLSV11, ctx_set_TLSv11_func, ssl_set_TLSv11_func, "TLSv1.1"}, /* CONF_TLSV11 */
+ {SSL_OP_NO_TLSv1_2, MC_SSL_O_NO_TLSV12, ctx_set_TLSv12_func, ssl_set_TLSv12_func, "TLSv1.2"}, /* CONF_TLSV12 */
+ {SSL_OP_NO_TLSv1_3, MC_SSL_O_NO_TLSV13, ctx_set_TLSv13_func, ssl_set_TLSv13_func, "TLSv1.3"}, /* CONF_TLSV13 */
+};
+
+static void ssl_sock_switchctx_set(SSL *ssl, SSL_CTX *ctx)
+{
+ SSL_set_verify(ssl, SSL_CTX_get_verify_mode(ctx), ssl_sock_bind_verifycbk);
+ SSL_set_client_CA_list(ssl, SSL_dup_CA_list(SSL_CTX_get_client_CA_list(ctx)));
+ SSL_set_SSL_CTX(ssl, ctx);
+}
+
+/*
+ * Return the right sni_ctx for a <bind_conf> and a chosen <servername> (must be in lowercase)
+ * RSA <have_rsa_sig> and ECDSA <have_ecdsa_sig> capabilities of the client can also be used.
+ *
+ * This function does a lookup in the bind_conf sni tree so the caller should lock its tree.
+ */
+static __maybe_unused struct sni_ctx *ssl_sock_chose_sni_ctx(struct bind_conf *s, const char *servername,
+ int have_rsa_sig, int have_ecdsa_sig)
+{
+ struct ebmb_node *node, *n, *node_ecdsa = NULL, *node_rsa = NULL, *node_anonymous = NULL;
+ const char *wildp = NULL;
+ int i;
+
+ /* look for the first dot for wildcard search */
+ for (i = 0; servername[i] != '\0'; i++) {
+ if (servername[i] == '.') {
+ wildp = &servername[i];
+ break;
+ }
+ }
+
+ /* Look for an ECDSA, RSA and DSA certificate, first in the single
+ * name and if not found in the wildcard */
+ for (i = 0; i < 2; i++) {
+ if (i == 0) /* lookup in full qualified names */
+ node = ebst_lookup(&s->sni_ctx, trash.area);
+ else if (i == 1 && wildp) /* lookup in wildcards names */
+ node = ebst_lookup(&s->sni_w_ctx, wildp);
+ else
+ break;
+
+ for (n = node; n; n = ebmb_next_dup(n)) {
+
+ /* lookup a not neg filter */
+ if (!container_of(n, struct sni_ctx, name)->neg) {
+ struct sni_ctx *sni, *sni_tmp;
+ int skip = 0;
+
+ if (i == 1 && wildp) { /* wildcard */
+ /* If this is a wildcard, look for an exclusion on the same crt-list line */
+ sni = container_of(n, struct sni_ctx, name);
+ list_for_each_entry(sni_tmp, &sni->ckch_inst->sni_ctx, by_ckch_inst) {
+ if (sni_tmp->neg && (strcmp((const char *)sni_tmp->name.key, trash.area) == 0)) {
+ skip = 1;
+ break;
+ }
+ }
+ if (skip)
+ continue;
+ }
+
+ switch(container_of(n, struct sni_ctx, name)->kinfo.sig) {
+ case TLSEXT_signature_ecdsa:
+ if (!node_ecdsa)
+ node_ecdsa = n;
+ break;
+ case TLSEXT_signature_rsa:
+ if (!node_rsa)
+ node_rsa = n;
+ break;
+ default: /* TLSEXT_signature_anonymous|dsa */
+ if (!node_anonymous)
+ node_anonymous = n;
+ break;
+ }
+ }
+ }
+ }
+ /* Once the certificates are found, select them depending on what is
+ * supported in the client and by key_signature priority order: EDSA >
+ * RSA > DSA */
+ if (have_ecdsa_sig && node_ecdsa)
+ node = node_ecdsa;
+ else if (have_rsa_sig && node_rsa)
+ node = node_rsa;
+ else if (node_anonymous)
+ node = node_anonymous;
+ else if (node_ecdsa)
+ node = node_ecdsa; /* no ecdsa signature case (< TLSv1.2) */
+ else
+ node = node_rsa; /* no rsa signature case (far far away) */
+
+ if (node)
+ return container_of(node, struct sni_ctx, name);
+
+ return NULL;
+}
+
+#ifdef HAVE_SSL_CLIENT_HELLO_CB
+
+int ssl_sock_switchctx_err_cbk(SSL *ssl, int *al, void *priv)
+{
+ struct bind_conf *s = priv;
+ (void)al; /* shut gcc stupid warning */
+
+ if (SSL_get_servername(ssl, TLSEXT_NAMETYPE_host_name) || (s->options & BC_O_GENERATE_CERTS))
+ return SSL_TLSEXT_ERR_OK;
+ return SSL_TLSEXT_ERR_NOACK;
+}
+
+#ifdef OPENSSL_IS_BORINGSSL
+int ssl_sock_switchctx_cbk(const struct ssl_early_callback_ctx *ctx)
+{
+ SSL *ssl = ctx->ssl;
+#else
+int ssl_sock_switchctx_cbk(SSL *ssl, int *al, void *arg)
+{
+#endif
+ struct connection *conn = SSL_get_ex_data(ssl, ssl_app_data_index);
+#ifdef USE_QUIC
+ struct quic_conn *qc = SSL_get_ex_data(ssl, ssl_qc_app_data_index);
+#endif /* USE_QUIC */
+ struct bind_conf *s = NULL;
+ const uint8_t *extension_data;
+ size_t extension_len;
+ int has_rsa_sig = 0, has_ecdsa_sig = 0;
+ struct sni_ctx *sni_ctx;
+ const char *servername;
+ size_t servername_len;
+ int allow_early = 0;
+ int i;
+
+ if (conn)
+ s = __objt_listener(conn->target)->bind_conf;
+#ifdef USE_QUIC
+ else if (qc)
+ s = qc->li->bind_conf;
+#endif /* USE_QUIC */
+
+ if (!s) {
+ /* must never happen */
+ ABORT_NOW();
+ return 0;
+ }
+
+#ifdef USE_QUIC
+ if (qc) {
+ /* Look for the QUIC transport parameters. */
+#ifdef OPENSSL_IS_BORINGSSL
+ if (!SSL_early_callback_ctx_extension_get(ctx, qc->tps_tls_ext,
+ &extension_data, &extension_len))
+#else
+ if (!SSL_client_hello_get0_ext(ssl, qc->tps_tls_ext,
+ &extension_data, &extension_len))
+#endif
+ {
+ /* This is not redundant. It we only return 0 without setting
+ * <*al>, this has as side effect to generate another TLS alert
+ * which would be set after calling quic_set_tls_alert().
+ */
+ *al = SSL_AD_MISSING_EXTENSION;
+ quic_set_tls_alert(qc, SSL_AD_MISSING_EXTENSION);
+ return 0;
+ }
+
+ if (!quic_transport_params_store(qc, 0, extension_data,
+ extension_data + extension_len))
+ goto abort;
+
+ qc->flags |= QUIC_FL_CONN_TX_TP_RECEIVED;
+ }
+#endif /* USE_QUIC */
+
+ if (s->ssl_conf.early_data)
+ allow_early = 1;
+#ifdef OPENSSL_IS_BORINGSSL
+ if (SSL_early_callback_ctx_extension_get(ctx, TLSEXT_TYPE_server_name,
+ &extension_data, &extension_len)) {
+#else
+ if (SSL_client_hello_get0_ext(ssl, TLSEXT_TYPE_server_name, &extension_data, &extension_len)) {
+#endif
+ /*
+ * The server_name extension was given too much extensibility when it
+ * was written, so parsing the normal case is a bit complex.
+ */
+ size_t len;
+ if (extension_len <= 2)
+ goto abort;
+ /* Extract the length of the supplied list of names. */
+ len = (*extension_data++) << 8;
+ len |= *extension_data++;
+ if (len + 2 != extension_len)
+ goto abort;
+ /*
+ * The list in practice only has a single element, so we only consider
+ * the first one.
+ */
+ if (len == 0 || *extension_data++ != TLSEXT_NAMETYPE_host_name)
+ goto abort;
+ extension_len = len - 1;
+ /* Now we can finally pull out the byte array with the actual hostname. */
+ if (extension_len <= 2)
+ goto abort;
+ len = (*extension_data++) << 8;
+ len |= *extension_data++;
+ if (len == 0 || len + 2 > extension_len || len > TLSEXT_MAXLEN_host_name
+ || memchr(extension_data, 0, len) != NULL)
+ goto abort;
+ servername = (char *)extension_data;
+ servername_len = len;
+ } else {
+#if (!defined SSL_NO_GENERATE_CERTIFICATES)
+ if (s->options & BC_O_GENERATE_CERTS && ssl_sock_generate_certificate_from_conn(s, ssl)) {
+ goto allow_early;
+ }
+#endif
+ /* without SNI extension, is the default_ctx (need SSL_TLSEXT_ERR_NOACK) */
+ if (!s->strict_sni) {
+ HA_RWLOCK_RDLOCK(SNI_LOCK, &s->sni_lock);
+ ssl_sock_switchctx_set(ssl, s->default_ctx);
+ HA_RWLOCK_RDUNLOCK(SNI_LOCK, &s->sni_lock);
+ goto allow_early;
+ }
+ goto abort;
+ }
+
+ /* extract/check clientHello information */
+#ifdef OPENSSL_IS_BORINGSSL
+ if (SSL_early_callback_ctx_extension_get(ctx, TLSEXT_TYPE_signature_algorithms, &extension_data, &extension_len)) {
+#else
+ if (SSL_client_hello_get0_ext(ssl, TLSEXT_TYPE_signature_algorithms, &extension_data, &extension_len)) {
+#endif
+ uint8_t sign;
+ size_t len;
+ if (extension_len < 2)
+ goto abort;
+ len = (*extension_data++) << 8;
+ len |= *extension_data++;
+ if (len + 2 != extension_len)
+ goto abort;
+ if (len % 2 != 0)
+ goto abort;
+ for (; len > 0; len -= 2) {
+ extension_data++; /* hash */
+ sign = *extension_data++;
+ switch (sign) {
+ case TLSEXT_signature_rsa:
+ has_rsa_sig = 1;
+ break;
+ case TLSEXT_signature_ecdsa:
+ has_ecdsa_sig = 1;
+ break;
+ default:
+ continue;
+ }
+ if (has_ecdsa_sig && has_rsa_sig)
+ break;
+ }
+ } else {
+ /* without TLSEXT_TYPE_signature_algorithms extension (< TLSv1.2) */
+ has_rsa_sig = 1;
+ }
+ if (has_ecdsa_sig) { /* in very rare case: has ecdsa sign but not a ECDSA cipher */
+ const SSL_CIPHER *cipher;
+ uint32_t cipher_id;
+ size_t len;
+ const uint8_t *cipher_suites;
+ has_ecdsa_sig = 0;
+#ifdef OPENSSL_IS_BORINGSSL
+ len = ctx->cipher_suites_len;
+ cipher_suites = ctx->cipher_suites;
+#else
+ len = SSL_client_hello_get0_ciphers(ssl, &cipher_suites);
+#endif
+ if (len % 2 != 0)
+ goto abort;
+ for (; len != 0; len -= 2, cipher_suites += 2) {
+#ifdef OPENSSL_IS_BORINGSSL
+ uint16_t cipher_suite = (cipher_suites[0] << 8) | cipher_suites[1];
+ cipher = SSL_get_cipher_by_value(cipher_suite);
+#else
+ cipher = SSL_CIPHER_find(ssl, cipher_suites);
+#endif
+ if (!cipher)
+ continue;
+
+ cipher_id = SSL_CIPHER_get_id(cipher);
+ /* skip the SCSV "fake" signaling ciphersuites because they are NID_auth_any (RFC 7507) */
+ if (cipher_id == SSL3_CK_SCSV || cipher_id == SSL3_CK_FALLBACK_SCSV)
+ continue;
+
+ if (SSL_CIPHER_get_auth_nid(cipher) == NID_auth_ecdsa
+ || SSL_CIPHER_get_auth_nid(cipher) == NID_auth_any) {
+ has_ecdsa_sig = 1;
+ break;
+ }
+ }
+ }
+
+ /* we need to transform this a NULL-ended string in lowecase */
+ for (i = 0; i < trash.size && i < servername_len; i++)
+ trash.area[i] = tolower(servername[i]);
+ trash.area[i] = 0;
+ servername = trash.area;
+
+ HA_RWLOCK_RDLOCK(SNI_LOCK, &s->sni_lock);
+ sni_ctx = ssl_sock_chose_sni_ctx(s, servername, has_rsa_sig, has_ecdsa_sig);
+ if (sni_ctx) {
+ /* switch ctx */
+ struct ssl_bind_conf *conf = sni_ctx->conf;
+ ssl_sock_switchctx_set(ssl, sni_ctx->ctx);
+ if (conf) {
+ methodVersions[conf->ssl_methods.min].ssl_set_version(ssl, SET_MIN);
+ methodVersions[conf->ssl_methods.max].ssl_set_version(ssl, SET_MAX);
+ if (conf->early_data)
+ allow_early = 1;
+ }
+ HA_RWLOCK_RDUNLOCK(SNI_LOCK, &s->sni_lock);
+ goto allow_early;
+ }
+
+ HA_RWLOCK_RDUNLOCK(SNI_LOCK, &s->sni_lock);
+#if (!defined SSL_NO_GENERATE_CERTIFICATES)
+ if (s->options & BC_O_GENERATE_CERTS && ssl_sock_generate_certificate(servername, s, ssl)) {
+ /* switch ctx done in ssl_sock_generate_certificate */
+ goto allow_early;
+ }
+#endif
+ if (!s->strict_sni) {
+ /* no certificate match, is the default_ctx */
+ HA_RWLOCK_RDLOCK(SNI_LOCK, &s->sni_lock);
+ ssl_sock_switchctx_set(ssl, s->default_ctx);
+ HA_RWLOCK_RDUNLOCK(SNI_LOCK, &s->sni_lock);
+ goto allow_early;
+ }
+
+ /* We are about to raise an handshake error so the servername extension
+ * callback will never be called and the SNI will never be stored in the
+ * SSL context. In order for the ssl_fc_sni sample fetch to still work
+ * in such a case, we store the SNI ourselves as an ex_data information
+ * in the SSL context.
+ */
+ {
+ char *client_sni = pool_alloc(ssl_sock_client_sni_pool);
+ if (client_sni) {
+ strncpy(client_sni, servername, TLSEXT_MAXLEN_host_name);
+ client_sni[TLSEXT_MAXLEN_host_name] = '\0';
+ SSL_set_ex_data(ssl, ssl_client_sni_index, client_sni);
+ }
+ }
+
+ /* other cases fallback on abort, if strict-sni is set but no node was found */
+
+ abort:
+ /* abort handshake (was SSL_TLSEXT_ERR_ALERT_FATAL) */
+ if (conn)
+ conn->err_code = CO_ER_SSL_HANDSHAKE;
+#ifdef OPENSSL_IS_BORINGSSL
+ return ssl_select_cert_error;
+#else
+ *al = SSL_AD_UNRECOGNIZED_NAME;
+ return 0;
+#endif
+
+allow_early:
+#ifdef OPENSSL_IS_BORINGSSL
+ if (allow_early)
+ SSL_set_early_data_enabled(ssl, 1);
+#else
+ if (!allow_early)
+ SSL_set_max_early_data(ssl, 0);
+#endif
+ return 1;
+}
+
+#else /* ! HAVE_SSL_CLIENT_HELLO_CB */
+
+/* Sets the SSL ctx of <ssl> to match the advertised server name. Returns a
+ * warning when no match is found, which implies the default (first) cert
+ * will keep being used.
+ */
+int ssl_sock_switchctx_cbk(SSL *ssl, int *al, void *priv)
+{
+ const char *servername;
+ const char *wildp = NULL;
+ struct ebmb_node *node, *n;
+ struct bind_conf *s = priv;
+#ifdef USE_QUIC
+ const uint8_t *extension_data;
+ size_t extension_len;
+ struct quic_conn *qc = SSL_get_ex_data(ssl, ssl_qc_app_data_index);
+#endif /* USE_QUIC */
+ int i;
+ (void)al; /* shut gcc stupid warning */
+
+#ifdef USE_QUIC
+ if (qc) {
+
+ /* Look for the QUIC transport parameters. */
+ SSL_get_peer_quic_transport_params(ssl, &extension_data, &extension_len);
+ if (extension_len == 0) {
+ /* This is not redundant. It we only return 0 without setting
+ * <*al>, this has as side effect to generate another TLS alert
+ * which would be set after calling quic_set_tls_alert().
+ */
+ *al = SSL_AD_MISSING_EXTENSION;
+ quic_set_tls_alert(qc, SSL_AD_MISSING_EXTENSION);
+ return SSL_TLSEXT_ERR_NOACK;
+ }
+
+ if (!quic_transport_params_store(qc, 0, extension_data,
+ extension_data + extension_len))
+ return SSL_TLSEXT_ERR_NOACK;
+
+ qc->flags |= QUIC_FL_CONN_TX_TP_RECEIVED;
+ }
+#endif /* USE_QUIC */
+
+ servername = SSL_get_servername(ssl, TLSEXT_NAMETYPE_host_name);
+ if (!servername) {
+#if (!defined SSL_NO_GENERATE_CERTIFICATES)
+ if (s->options & BC_O_GENERATE_CERTS && ssl_sock_generate_certificate_from_conn(s, ssl))
+ return SSL_TLSEXT_ERR_OK;
+#endif
+ if (s->strict_sni)
+ return SSL_TLSEXT_ERR_ALERT_FATAL;
+ HA_RWLOCK_RDLOCK(SNI_LOCK, &s->sni_lock);
+ ssl_sock_switchctx_set(ssl, s->default_ctx);
+ HA_RWLOCK_RDUNLOCK(SNI_LOCK, &s->sni_lock);
+ return SSL_TLSEXT_ERR_NOACK;
+ }
+
+ for (i = 0; i < trash.size; i++) {
+ if (!servername[i])
+ break;
+ trash.area[i] = tolower((unsigned char)servername[i]);
+ if (!wildp && (trash.area[i] == '.'))
+ wildp = &trash.area[i];
+ }
+ trash.area[i] = 0;
+
+ HA_RWLOCK_RDLOCK(SNI_LOCK, &s->sni_lock);
+ node = NULL;
+ /* lookup in full qualified names */
+ for (n = ebst_lookup(&s->sni_ctx, trash.area); n; n = ebmb_next_dup(n)) {
+ /* lookup a not neg filter */
+ if (!container_of(n, struct sni_ctx, name)->neg) {
+ node = n;
+ break;
+ }
+ }
+ if (!node && wildp) {
+ /* lookup in wildcards names */
+ for (n = ebst_lookup(&s->sni_w_ctx, wildp); n; n = ebmb_next_dup(n)) {
+ /* lookup a not neg filter */
+ if (!container_of(n, struct sni_ctx, name)->neg) {
+ node = n;
+ break;
+ }
+ }
+ }
+ if (!node) {
+#if (!defined SSL_NO_GENERATE_CERTIFICATES)
+ if (s->options & BC_O_GENERATE_CERTS && ssl_sock_generate_certificate(servername, s, ssl)) {
+ /* switch ctx done in ssl_sock_generate_certificate */
+ HA_RWLOCK_RDUNLOCK(SNI_LOCK, &s->sni_lock);
+ return SSL_TLSEXT_ERR_OK;
+ }
+#endif
+ if (s->strict_sni) {
+ HA_RWLOCK_RDUNLOCK(SNI_LOCK, &s->sni_lock);
+ return SSL_TLSEXT_ERR_ALERT_FATAL;
+ }
+ ssl_sock_switchctx_set(ssl, s->default_ctx);
+ HA_RWLOCK_RDUNLOCK(SNI_LOCK, &s->sni_lock);
+ return SSL_TLSEXT_ERR_OK;
+ }
+
+ /* switch ctx */
+ ssl_sock_switchctx_set(ssl, container_of(node, struct sni_ctx, name)->ctx);
+ HA_RWLOCK_RDUNLOCK(SNI_LOCK, &s->sni_lock);
+ return SSL_TLSEXT_ERR_OK;
+}
+#endif /* (!) OPENSSL_IS_BORINGSSL */
+#endif /* SSL_CTRL_SET_TLSEXT_HOSTNAME */
+
+#if 0 && defined(USE_OPENSSL_WOLFSSL)
+/* This implement the equivalent of the clientHello Callback but using the cert_cb.
+ * WolfSSL is able to extract the sigalgs and ciphers of the client byt using the API
+ * provided in https://github.com/wolfSSL/wolfssl/pull/6963
+ *
+ * Not activated for now since the PR is not merged.
+ */
+static int ssl_sock_switchctx_wolfSSL_cbk(WOLFSSL* ssl, void* arg)
+{
+ struct bind_conf *s = arg;
+ int has_rsa_sig = 0, has_ecdsa_sig = 0;
+ const char *servername;
+ struct sni_ctx *sni_ctx;
+ int i;
+
+ if (!s) {
+ /* must never happen */
+ ABORT_NOW();
+ return 0;
+ }
+
+ servername = SSL_get_servername(ssl, TLSEXT_NAMETYPE_host_name);
+ if (!servername) {
+ /* without SNI extension, is the default_ctx (need SSL_TLSEXT_ERR_NOACK) */
+ if (!s->strict_sni) {
+ HA_RWLOCK_RDLOCK(SNI_LOCK, &s->sni_lock);
+ ssl_sock_switchctx_set(ssl, s->default_ctx);
+ HA_RWLOCK_RDUNLOCK(SNI_LOCK, &s->sni_lock);
+ goto allow_early;
+ }
+ goto abort;
+ }
+
+ /* extract sigalgs and ciphers */
+ {
+ const byte* suites = NULL;
+ word16 suiteSz = 0;
+ const byte* hashSigAlgo = NULL;
+ word16 hashSigAlgoSz = 0;
+ word16 idx = 0;
+
+ wolfSSL_get_client_suites_sigalgs(ssl, &suites, &suiteSz, &hashSigAlgo, &hashSigAlgoSz);
+ if (suites == NULL || suiteSz == 0 || hashSigAlgo == NULL || hashSigAlgoSz == 0)
+ return 0;
+
+ if (SSL_version(ssl) != TLS1_3_VERSION) {
+ for (idx = 0; idx < suiteSz; idx += 2) {
+ WOLFSSL_CIPHERSUITE_INFO info;
+ info = wolfSSL_get_ciphersuite_info(suites[idx], suites[idx+1]);
+ if (info.rsaAuth)
+ has_rsa_sig = 1;
+ else if (info.eccAuth)
+ has_ecdsa_sig = 1;
+ }
+ }
+
+ if (hashSigAlgoSz > 0) {
+ /* sigalgs extension takes precedence over ciphersuites */
+ has_ecdsa_sig = 0;
+ has_rsa_sig = 0;
+ }
+ for (idx = 0; idx < hashSigAlgoSz; idx += 2) {
+ int hashAlgo;
+ int sigAlgo;
+
+ wolfSSL_get_sigalg_info(hashSigAlgo[idx+0], hashSigAlgo[idx+1], &hashAlgo, &sigAlgo);
+
+ if (sigAlgo == RSAk || sigAlgo == RSAPSSk)
+ has_rsa_sig = 1;
+ else if (sigAlgo == ECDSAk)
+ has_ecdsa_sig = 1;
+ }
+ }
+
+ /* we need to transform this into a NULL-ended string in lowecase */
+ for (i = 0; i < trash.size && servername[i] != '\0'; i++)
+ trash.area[i] = tolower(servername[i]);
+ trash.area[i] = 0;
+ servername = trash.area;
+
+ HA_RWLOCK_RDLOCK(SNI_LOCK, &s->sni_lock);
+ sni_ctx = ssl_sock_chose_sni_ctx(s, servername, has_rsa_sig, has_ecdsa_sig);
+ if (sni_ctx) {
+ /* switch ctx */
+ struct ssl_bind_conf *conf = sni_ctx->conf;
+ ssl_sock_switchctx_set(ssl, sni_ctx->ctx);
+ if (conf) {
+ methodVersions[conf->ssl_methods.min].ssl_set_version(ssl, SET_MIN);
+ methodVersions[conf->ssl_methods.max].ssl_set_version(ssl, SET_MAX);
+ }
+ HA_RWLOCK_RDUNLOCK(SNI_LOCK, &s->sni_lock);
+ goto allow_early;
+ }
+
+ HA_RWLOCK_RDUNLOCK(SNI_LOCK, &s->sni_lock);
+ if (!s->strict_sni) {
+ /* no certificate match, is the default_ctx */
+ HA_RWLOCK_RDLOCK(SNI_LOCK, &s->sni_lock);
+ ssl_sock_switchctx_set(ssl, s->default_ctx);
+ HA_RWLOCK_RDUNLOCK(SNI_LOCK, &s->sni_lock);
+ goto allow_early;
+ }
+
+ /* We are about to raise an handshake error so the servername extension
+ * callback will never be called and the SNI will never be stored in the
+ * SSL context. In order for the ssl_fc_sni sample fetch to still work
+ * in such a case, we store the SNI ourselves as an ex_data information
+ * in the SSL context.
+ */
+ {
+ char *client_sni = pool_alloc(ssl_sock_client_sni_pool);
+ if (client_sni) {
+ strncpy(client_sni, servername, TLSEXT_MAXLEN_host_name);
+ client_sni[TLSEXT_MAXLEN_host_name] = '\0';
+ SSL_set_ex_data(ssl, ssl_client_sni_index, client_sni);
+ }
+ }
+
+ /* other cases fallback on abort, if strict-sni is set but no node was found */
+
+ abort:
+ /* abort handshake (was SSL_TLSEXT_ERR_ALERT_FATAL) */
+ return 0;
+
+allow_early:
+ return 1;
+}
+#endif
+
+#ifndef OPENSSL_NO_DH
+
+static inline HASSL_DH *ssl_new_dh_fromdata(BIGNUM *p, BIGNUM *g)
+{
+#if (HA_OPENSSL_VERSION_NUMBER >= 0x3000000fL)
+ OSSL_PARAM_BLD *tmpl = NULL;
+ OSSL_PARAM *params = NULL;
+ EVP_PKEY_CTX *ctx = NULL;
+ EVP_PKEY *pkey = NULL;
+
+ if ((tmpl = OSSL_PARAM_BLD_new()) == NULL
+ || !OSSL_PARAM_BLD_push_BN(tmpl, OSSL_PKEY_PARAM_FFC_P, p)
+ || !OSSL_PARAM_BLD_push_BN(tmpl, OSSL_PKEY_PARAM_FFC_G, g)
+ || (params = OSSL_PARAM_BLD_to_param(tmpl)) == NULL) {
+ goto end;
+ }
+ ctx = EVP_PKEY_CTX_new_from_name(NULL, "DH", NULL);
+ if (ctx == NULL
+ || !EVP_PKEY_fromdata_init(ctx)
+ || !EVP_PKEY_fromdata(ctx, &pkey, EVP_PKEY_KEY_PARAMETERS, params)) {
+ goto end;
+ }
+
+end:
+ EVP_PKEY_CTX_free(ctx);
+ OSSL_PARAM_free(params);
+ OSSL_PARAM_BLD_free(tmpl);
+ BN_free(p);
+ BN_free(g);
+ return pkey;
+#else
+
+ HASSL_DH *dh = DH_new();
+
+ if (!dh)
+ return NULL;
+
+ DH_set0_pqg(dh, p, NULL, g);
+
+ return dh;
+#endif
+}
+
+#if (HA_OPENSSL_VERSION_NUMBER >= 0x10101000L)
+static inline HASSL_DH *ssl_get_dh_by_nid(int nid)
+{
+#if (HA_OPENSSL_VERSION_NUMBER >= 0x3000000fL)
+ OSSL_PARAM params[2];
+ EVP_PKEY *pkey = NULL;
+ EVP_PKEY_CTX *pctx = EVP_PKEY_CTX_new_from_name(NULL, "DH", NULL);
+ const char *named_group = NULL;
+
+ if (!pctx)
+ goto end;
+
+ named_group = OBJ_nid2ln(nid);
+
+ if (!named_group)
+ goto end;
+
+ params[0] = OSSL_PARAM_construct_utf8_string("group", (char*)named_group, 0);
+ params[1] = OSSL_PARAM_construct_end();
+
+ if (EVP_PKEY_keygen_init(pctx) && EVP_PKEY_CTX_set_params(pctx, params))
+ EVP_PKEY_generate(pctx, &pkey);
+
+end:
+ EVP_PKEY_CTX_free(pctx);
+ return pkey;
+#else
+
+ HASSL_DH *dh = NULL;
+ dh = DH_new_by_nid(nid);
+ return dh;
+#endif
+}
+#endif
+
+
+static HASSL_DH * ssl_get_dh_1024(void)
+{
+ static unsigned char dh1024_p[]={
+ 0xFA,0xF9,0x2A,0x22,0x2A,0xA7,0x7F,0xE1,0x67,0x4E,0x53,0xF7,
+ 0x56,0x13,0xC3,0xB1,0xE3,0x29,0x6B,0x66,0x31,0x6A,0x7F,0xB3,
+ 0xC2,0x68,0x6B,0xCB,0x1D,0x57,0x39,0x1D,0x1F,0xFF,0x1C,0xC9,
+ 0xA6,0xA4,0x98,0x82,0x31,0x5D,0x25,0xFF,0x8A,0xE0,0x73,0x96,
+ 0x81,0xC8,0x83,0x79,0xC1,0x5A,0x04,0xF8,0x37,0x0D,0xA8,0x3D,
+ 0xAE,0x74,0xBC,0xDB,0xB6,0xA4,0x75,0xD9,0x71,0x8A,0xA0,0x17,
+ 0x9E,0x2D,0xC8,0xA8,0xDF,0x2C,0x5F,0x82,0x95,0xF8,0x92,0x9B,
+ 0xA7,0x33,0x5F,0x89,0x71,0xC8,0x2D,0x6B,0x18,0x86,0xC4,0x94,
+ 0x22,0xA5,0x52,0x8D,0xF6,0xF6,0xD2,0x37,0x92,0x0F,0xA5,0xCC,
+ 0xDB,0x7B,0x1D,0x3D,0xA1,0x31,0xB7,0x80,0x8F,0x0B,0x67,0x5E,
+ 0x36,0xA5,0x60,0x0C,0xF1,0x95,0x33,0x8B,
+ };
+ static unsigned char dh1024_g[]={
+ 0x02,
+ };
+
+ BIGNUM *p;
+ BIGNUM *g;
+
+ HASSL_DH *dh = NULL;
+
+ p = BN_bin2bn(dh1024_p, sizeof dh1024_p, NULL);
+ g = BN_bin2bn(dh1024_g, sizeof dh1024_g, NULL);
+
+ if (p && g)
+ dh = ssl_new_dh_fromdata(p, g);
+
+ return dh;
+}
+
+static HASSL_DH *ssl_get_dh_2048(void)
+{
+#if (HA_OPENSSL_VERSION_NUMBER < 0x10101000L)
+ static unsigned char dh2048_p[]={
+ 0xEC,0x86,0xF8,0x70,0xA0,0x33,0x16,0xEC,0x05,0x1A,0x73,0x59,
+ 0xCD,0x1F,0x8B,0xF8,0x29,0xE4,0xD2,0xCF,0x52,0xDD,0xC2,0x24,
+ 0x8D,0xB5,0x38,0x9A,0xFB,0x5C,0xA4,0xE4,0xB2,0xDA,0xCE,0x66,
+ 0x50,0x74,0xA6,0x85,0x4D,0x4B,0x1D,0x30,0xB8,0x2B,0xF3,0x10,
+ 0xE9,0xA7,0x2D,0x05,0x71,0xE7,0x81,0xDF,0x8B,0x59,0x52,0x3B,
+ 0x5F,0x43,0x0B,0x68,0xF1,0xDB,0x07,0xBE,0x08,0x6B,0x1B,0x23,
+ 0xEE,0x4D,0xCC,0x9E,0x0E,0x43,0xA0,0x1E,0xDF,0x43,0x8C,0xEC,
+ 0xBE,0xBE,0x90,0xB4,0x51,0x54,0xB9,0x2F,0x7B,0x64,0x76,0x4E,
+ 0x5D,0xD4,0x2E,0xAE,0xC2,0x9E,0xAE,0x51,0x43,0x59,0xC7,0x77,
+ 0x9C,0x50,0x3C,0x0E,0xED,0x73,0x04,0x5F,0xF1,0x4C,0x76,0x2A,
+ 0xD8,0xF8,0xCF,0xFC,0x34,0x40,0xD1,0xB4,0x42,0x61,0x84,0x66,
+ 0x42,0x39,0x04,0xF8,0x68,0xB2,0x62,0xD7,0x55,0xED,0x1B,0x74,
+ 0x75,0x91,0xE0,0xC5,0x69,0xC1,0x31,0x5C,0xDB,0x7B,0x44,0x2E,
+ 0xCE,0x84,0x58,0x0D,0x1E,0x66,0x0C,0xC8,0x44,0x9E,0xFD,0x40,
+ 0x08,0x67,0x5D,0xFB,0xA7,0x76,0x8F,0x00,0x11,0x87,0xE9,0x93,
+ 0xF9,0x7D,0xC4,0xBC,0x74,0x55,0x20,0xD4,0x4A,0x41,0x2F,0x43,
+ 0x42,0x1A,0xC1,0xF2,0x97,0x17,0x49,0x27,0x37,0x6B,0x2F,0x88,
+ 0x7E,0x1C,0xA0,0xA1,0x89,0x92,0x27,0xD9,0x56,0x5A,0x71,0xC1,
+ 0x56,0x37,0x7E,0x3A,0x9D,0x05,0xE7,0xEE,0x5D,0x8F,0x82,0x17,
+ 0xBC,0xE9,0xC2,0x93,0x30,0x82,0xF9,0xF4,0xC9,0xAE,0x49,0xDB,
+ 0xD0,0x54,0xB4,0xD9,0x75,0x4D,0xFA,0x06,0xB8,0xD6,0x38,0x41,
+ 0xB7,0x1F,0x77,0xF3,
+ };
+ static unsigned char dh2048_g[]={
+ 0x02,
+ };
+
+ BIGNUM *p;
+ BIGNUM *g;
+
+ HASSL_DH *dh = NULL;
+
+ p = BN_bin2bn(dh2048_p, sizeof dh2048_p, NULL);
+ g = BN_bin2bn(dh2048_g, sizeof dh2048_g, NULL);
+
+ if (p && g)
+ dh = ssl_new_dh_fromdata(p, g);
+
+ return dh;
+#else
+ return ssl_get_dh_by_nid(NID_ffdhe2048);
+#endif
+}
+
+static HASSL_DH *ssl_get_dh_4096(void)
+{
+#if (HA_OPENSSL_VERSION_NUMBER < 0x10101000L)
+ static unsigned char dh4096_p[]={
+ 0xDE,0x16,0x94,0xCD,0x99,0x58,0x07,0xF1,0xF7,0x32,0x96,0x11,
+ 0x04,0x82,0xD4,0x84,0x72,0x80,0x99,0x06,0xCA,0xF0,0xA3,0x68,
+ 0x07,0xCE,0x64,0x50,0xE7,0x74,0x45,0x20,0x80,0x5E,0x4D,0xAD,
+ 0xA5,0xB6,0xED,0xFA,0x80,0x6C,0x3B,0x35,0xC4,0x9A,0x14,0x6B,
+ 0x32,0xBB,0xFD,0x1F,0x17,0x8E,0xB7,0x1F,0xD6,0xFA,0x3F,0x7B,
+ 0xEE,0x16,0xA5,0x62,0x33,0x0D,0xED,0xBC,0x4E,0x58,0xE5,0x47,
+ 0x4D,0xE9,0xAB,0x8E,0x38,0xD3,0x6E,0x90,0x57,0xE3,0x22,0x15,
+ 0x33,0xBD,0xF6,0x43,0x45,0xB5,0x10,0x0A,0xBE,0x2C,0xB4,0x35,
+ 0xB8,0x53,0x8D,0xAD,0xFB,0xA7,0x1F,0x85,0x58,0x41,0x7A,0x79,
+ 0x20,0x68,0xB3,0xE1,0x3D,0x08,0x76,0xBF,0x86,0x0D,0x49,0xE3,
+ 0x82,0x71,0x8C,0xB4,0x8D,0x81,0x84,0xD4,0xE7,0xBE,0x91,0xDC,
+ 0x26,0x39,0x48,0x0F,0x35,0xC4,0xCA,0x65,0xE3,0x40,0x93,0x52,
+ 0x76,0x58,0x7D,0xDD,0x51,0x75,0xDC,0x69,0x61,0xBF,0x47,0x2C,
+ 0x16,0x68,0x2D,0xC9,0x29,0xD3,0xE6,0xC0,0x99,0x48,0xA0,0x9A,
+ 0xC8,0x78,0xC0,0x6D,0x81,0x67,0x12,0x61,0x3F,0x71,0xBA,0x41,
+ 0x1F,0x6C,0x89,0x44,0x03,0xBA,0x3B,0x39,0x60,0xAA,0x28,0x55,
+ 0x59,0xAE,0xB8,0xFA,0xCB,0x6F,0xA5,0x1A,0xF7,0x2B,0xDD,0x52,
+ 0x8A,0x8B,0xE2,0x71,0xA6,0x5E,0x7E,0xD8,0x2E,0x18,0xE0,0x66,
+ 0xDF,0xDD,0x22,0x21,0x99,0x52,0x73,0xA6,0x33,0x20,0x65,0x0E,
+ 0x53,0xE7,0x6B,0x9B,0xC5,0xA3,0x2F,0x97,0x65,0x76,0xD3,0x47,
+ 0x23,0x77,0x12,0xB6,0x11,0x7B,0x24,0xED,0xF1,0xEF,0xC0,0xE2,
+ 0xA3,0x7E,0x67,0x05,0x3E,0x96,0x4D,0x45,0xC2,0x18,0xD1,0x73,
+ 0x9E,0x07,0xF3,0x81,0x6E,0x52,0x63,0xF6,0x20,0x76,0xB9,0x13,
+ 0xD2,0x65,0x30,0x18,0x16,0x09,0x16,0x9E,0x8F,0xF1,0xD2,0x10,
+ 0x5A,0xD3,0xD4,0xAF,0x16,0x61,0xDA,0x55,0x2E,0x18,0x5E,0x14,
+ 0x08,0x54,0x2E,0x2A,0x25,0xA2,0x1A,0x9B,0x8B,0x32,0xA9,0xFD,
+ 0xC2,0x48,0x96,0xE1,0x80,0xCA,0xE9,0x22,0x17,0xBB,0xCE,0x3E,
+ 0x9E,0xED,0xC7,0xF1,0x1F,0xEC,0x17,0x21,0xDC,0x7B,0x82,0x48,
+ 0x8E,0xBB,0x4B,0x9D,0x5B,0x04,0x04,0xDA,0xDB,0x39,0xDF,0x01,
+ 0x40,0xC3,0xAA,0x26,0x23,0x89,0x75,0xC6,0x0B,0xD0,0xA2,0x60,
+ 0x6A,0xF1,0xCC,0x65,0x18,0x98,0x1B,0x52,0xD2,0x74,0x61,0xCC,
+ 0xBD,0x60,0xAE,0xA3,0xA0,0x66,0x6A,0x16,0x34,0x92,0x3F,0x41,
+ 0x40,0x31,0x29,0xC0,0x2C,0x63,0xB2,0x07,0x8D,0xEB,0x94,0xB8,
+ 0xE8,0x47,0x92,0x52,0x93,0x6A,0x1B,0x7E,0x1A,0x61,0xB3,0x1B,
+ 0xF0,0xD6,0x72,0x9B,0xF1,0xB0,0xAF,0xBF,0x3E,0x65,0xEF,0x23,
+ 0x1D,0x6F,0xFF,0x70,0xCD,0x8A,0x4C,0x8A,0xA0,0x72,0x9D,0xBE,
+ 0xD4,0xBB,0x24,0x47,0x4A,0x68,0xB5,0xF5,0xC6,0xD5,0x7A,0xCD,
+ 0xCA,0x06,0x41,0x07,0xAD,0xC2,0x1E,0xE6,0x54,0xA7,0xAD,0x03,
+ 0xD9,0x12,0xC1,0x9C,0x13,0xB1,0xC9,0x0A,0x43,0x8E,0x1E,0x08,
+ 0xCE,0x50,0x82,0x73,0x5F,0xA7,0x55,0x1D,0xD9,0x59,0xAC,0xB5,
+ 0xEA,0x02,0x7F,0x6C,0x5B,0x74,0x96,0x98,0x67,0x24,0xA3,0x0F,
+ 0x15,0xFC,0xA9,0x7D,0x3E,0x67,0xD1,0x70,0xF8,0x97,0xF3,0x67,
+ 0xC5,0x8C,0x88,0x44,0x08,0x02,0xC7,0x2B,
+ };
+ static unsigned char dh4096_g[]={
+ 0x02,
+ };
+
+ BIGNUM *p;
+ BIGNUM *g;
+
+ HASSL_DH *dh = NULL;
+
+ p = BN_bin2bn(dh4096_p, sizeof dh4096_p, NULL);
+ g = BN_bin2bn(dh4096_g, sizeof dh4096_g, NULL);
+
+ if (p && g)
+ dh = ssl_new_dh_fromdata(p, g);
+
+ return dh;
+#else
+ return ssl_get_dh_by_nid(NID_ffdhe4096);
+#endif
+}
+
+static HASSL_DH *ssl_get_tmp_dh(EVP_PKEY *pkey)
+{
+ HASSL_DH *dh = NULL;
+ int type;
+ int keylen = 0;
+
+ type = pkey ? EVP_PKEY_base_id(pkey) : EVP_PKEY_NONE;
+
+ if (type == EVP_PKEY_EC) {
+ keylen = global_ssl.default_dh_param;
+ }
+
+ /* The keylen supplied by OpenSSL can only be 512 or 1024.
+ See ssl3_send_server_key_exchange() in ssl/s3_srvr.c
+ */
+ if (type == EVP_PKEY_RSA || type == EVP_PKEY_DSA) {
+ keylen = EVP_PKEY_bits(pkey);
+ }
+
+ if (keylen > global_ssl.default_dh_param) {
+ keylen = global_ssl.default_dh_param;
+ }
+
+ if (keylen >= 4096) {
+ if (!local_dh_4096)
+ local_dh_4096 = ssl_get_dh_4096();
+ dh = local_dh_4096;
+ }
+ else if (keylen >= 2048) {
+ if (!local_dh_2048)
+ local_dh_2048 = ssl_get_dh_2048();
+ dh = local_dh_2048;
+ }
+ else {
+ if (!local_dh_1024)
+ local_dh_1024 = ssl_get_dh_1024();
+ dh = local_dh_1024;
+ }
+
+ return dh;
+}
+
+#if (HA_OPENSSL_VERSION_NUMBER < 0x3000000fL)
+/* Returns Diffie-Hellman parameters matching the private key length
+ but not exceeding global_ssl.default_dh_param */
+static HASSL_DH *ssl_get_tmp_dh_cbk(SSL *ssl, int export, int keylen)
+{
+ EVP_PKEY *pkey = SSL_get_privatekey(ssl);
+
+ return ssl_get_tmp_dh(pkey);
+}
+#endif
+
+static int ssl_sock_set_tmp_dh(SSL_CTX *ctx, HASSL_DH *dh)
+{
+#if (HA_OPENSSL_VERSION_NUMBER < 0x3000000fL)
+ return SSL_CTX_set_tmp_dh(ctx, dh);
+#else
+ int retval = 0;
+ HASSL_DH_up_ref(dh);
+
+ retval = SSL_CTX_set0_tmp_dh_pkey(ctx, dh);
+
+ if (!retval)
+ HASSL_DH_free(dh);
+
+ return retval;
+#endif
+}
+
+#if (HA_OPENSSL_VERSION_NUMBER >= 0x3000000fL)
+static void ssl_sock_set_tmp_dh_from_pkey(SSL_CTX *ctx, EVP_PKEY *pkey)
+{
+ HASSL_DH *dh = NULL;
+ if (pkey && (dh = ssl_get_tmp_dh(pkey))) {
+ HASSL_DH_up_ref(dh);
+ if (!SSL_CTX_set0_tmp_dh_pkey(ctx, dh))
+ HASSL_DH_free(dh);
+ }
+}
+#endif
+
+HASSL_DH *ssl_sock_get_dh_from_bio(BIO *bio)
+{
+#if (HA_OPENSSL_VERSION_NUMBER >= 0x3000000fL)
+ HASSL_DH *dh = NULL;
+ OSSL_DECODER_CTX *dctx = NULL;
+ const char *format = "PEM";
+ const char *keytype = "DH";
+
+ dctx = OSSL_DECODER_CTX_new_for_pkey(&dh, format, NULL, keytype,
+ OSSL_KEYMGMT_SELECT_DOMAIN_PARAMETERS,
+ NULL, NULL);
+
+ if (dctx == NULL || OSSL_DECODER_CTX_get_num_decoders(dctx) == 0)
+ goto end;
+
+ /* The DH parameters might not be the first section found in the PEM
+ * file so we need to iterate over all of them until we find the right
+ * one.
+ */
+ while (!BIO_eof(bio) && !dh)
+ OSSL_DECODER_from_bio(dctx, bio);
+
+end:
+ OSSL_DECODER_CTX_free(dctx);
+ return dh;
+#else
+ HASSL_DH *dh = NULL;
+
+ dh = PEM_read_bio_DHparams(bio, NULL, NULL, NULL);
+
+ return dh;
+#endif
+}
+
+static HASSL_DH * ssl_sock_get_dh_from_file(const char *filename)
+{
+ HASSL_DH *dh = NULL;
+ BIO *in = BIO_new(BIO_s_file());
+
+ if (in == NULL)
+ goto end;
+
+ if (BIO_read_filename(in, filename) <= 0)
+ goto end;
+
+ dh = ssl_sock_get_dh_from_bio(in);
+
+end:
+ if (in)
+ BIO_free(in);
+
+ ERR_clear_error();
+
+ return dh;
+}
+
+int ssl_sock_load_global_dh_param_from_file(const char *filename)
+{
+ global_dh = ssl_sock_get_dh_from_file(filename);
+
+ if (global_dh) {
+ return 0;
+ }
+
+ return -1;
+}
+#endif
+
+/* This function allocates a sni_ctx and adds it to the ckch_inst */
+static int ckch_inst_add_cert_sni(SSL_CTX *ctx, struct ckch_inst *ckch_inst,
+ struct bind_conf *s, struct ssl_bind_conf *conf,
+ struct pkey_info kinfo, char *name, int order)
+{
+ struct sni_ctx *sc;
+ int wild = 0, neg = 0;
+
+ if (*name == '!') {
+ neg = 1;
+ name++;
+ }
+ if (*name == '*') {
+ wild = 1;
+ name++;
+ }
+ /* !* filter is a nop */
+ if (neg && wild)
+ return order;
+ if (*name) {
+ int j, len;
+ len = strlen(name);
+ for (j = 0; j < len && j < trash.size; j++)
+ trash.area[j] = tolower((unsigned char)name[j]);
+ if (j >= trash.size)
+ return -1;
+ trash.area[j] = 0;
+
+ sc = malloc(sizeof(struct sni_ctx) + len + 1);
+ if (!sc)
+ return -1;
+ memcpy(sc->name.key, trash.area, len + 1);
+ SSL_CTX_up_ref(ctx);
+ sc->ctx = ctx;
+ sc->conf = conf;
+ sc->kinfo = kinfo;
+ sc->order = order++;
+ sc->neg = neg;
+ sc->wild = wild;
+ sc->name.node.leaf_p = NULL;
+ sc->ckch_inst = ckch_inst;
+ LIST_APPEND(&ckch_inst->sni_ctx, &sc->by_ckch_inst);
+ }
+ return order;
+}
+
+/*
+ * Insert the sni_ctxs that are listed in the ckch_inst, in the bind_conf's sni_ctx tree
+ * This function can't return an error.
+ *
+ * *CAUTION*: The caller must lock the sni tree if called in multithreading mode
+ */
+void ssl_sock_load_cert_sni(struct ckch_inst *ckch_inst, struct bind_conf *bind_conf)
+{
+
+ struct sni_ctx *sc0, *sc0b, *sc1;
+ struct ebmb_node *node;
+
+ list_for_each_entry_safe(sc0, sc0b, &ckch_inst->sni_ctx, by_ckch_inst) {
+
+ /* ignore if sc0 was already inserted in a tree */
+ if (sc0->name.node.leaf_p)
+ continue;
+
+ /* Check for duplicates. */
+ if (sc0->wild)
+ node = ebst_lookup(&bind_conf->sni_w_ctx, (char *)sc0->name.key);
+ else
+ node = ebst_lookup(&bind_conf->sni_ctx, (char *)sc0->name.key);
+
+ for (; node; node = ebmb_next_dup(node)) {
+ sc1 = ebmb_entry(node, struct sni_ctx, name);
+ if (sc1->ctx == sc0->ctx && sc1->conf == sc0->conf
+ && sc1->neg == sc0->neg && sc1->wild == sc0->wild) {
+ /* it's a duplicate, we should remove and free it */
+ LIST_DELETE(&sc0->by_ckch_inst);
+ SSL_CTX_free(sc0->ctx);
+ ha_free(&sc0);
+ break;
+ }
+ }
+
+ /* if duplicate, ignore the insertion */
+ if (!sc0)
+ continue;
+
+ if (sc0->wild)
+ ebst_insert(&bind_conf->sni_w_ctx, &sc0->name);
+ else
+ ebst_insert(&bind_conf->sni_ctx, &sc0->name);
+ }
+
+ /* replace the default_ctx if required with the instance's ctx. */
+ if (ckch_inst->is_default) {
+ SSL_CTX_free(bind_conf->default_ctx);
+ SSL_CTX_up_ref(ckch_inst->ctx);
+ bind_conf->default_ctx = ckch_inst->ctx;
+ bind_conf->default_inst = ckch_inst;
+ }
+}
+
+/*
+ * tree used to store the ckchs ordered by filename/bundle name
+ */
+struct eb_root ckchs_tree = EB_ROOT_UNIQUE;
+
+/* tree of crtlist (crt-list/directory) */
+struct eb_root crtlists_tree = EB_ROOT_UNIQUE;
+
+/* Loads Diffie-Hellman parameter from a ckchs to an SSL_CTX.
+ * If there is no DH parameter available in the ckchs, the global
+ * DH parameter is loaded into the SSL_CTX and if there is no
+ * DH parameter available in ckchs nor in global, the default
+ * DH parameters are applied on the SSL_CTX.
+ * Returns a bitfield containing the flags:
+ * ERR_FATAL in any fatal error case
+ * ERR_ALERT if a reason of the error is availabine in err
+ * ERR_WARN if a warning is available into err
+ * The value 0 means there is no error nor warning and
+ * the operation succeed.
+ */
+#ifndef OPENSSL_NO_DH
+static int ssl_sock_load_dh_params(SSL_CTX *ctx, const struct ckch_data *data,
+ const char *path, char **err)
+{
+ int ret = 0;
+ HASSL_DH *dh = NULL;
+
+ if (data && data->dh) {
+ dh = data->dh;
+ if (!ssl_sock_set_tmp_dh(ctx, dh)) {
+ memprintf(err, "%sunable to load the DH parameter specified in '%s'",
+ err && *err ? *err : "", path);
+ memprintf(err, "%s, DH ciphers won't be available.\n",
+ err && *err ? *err : "");
+ ret |= ERR_WARN;
+ goto end;
+ }
+
+ if (ssl_dh_ptr_index >= 0) {
+ /* store a pointer to the DH params to avoid complaining about
+ ssl-default-dh-param not being set for this SSL_CTX */
+ SSL_CTX_set_ex_data(ctx, ssl_dh_ptr_index, dh);
+ }
+ }
+ else if (global_dh) {
+ if (!ssl_sock_set_tmp_dh(ctx, global_dh)) {
+ memprintf(err, "%sunable to use the global DH parameter for certificate '%s'",
+ err && *err ? *err : "", path);
+ memprintf(err, "%s, DH ciphers won't be available.\n",
+ err && *err ? *err : "");
+ ret |= ERR_WARN;
+ goto end;
+ }
+ }
+ else {
+ /* Clear openssl global errors stack */
+ ERR_clear_error();
+
+ /* We do not want DHE ciphers to be added to the cipher list
+ * unless there is an explicit global dh option in the conf.
+ */
+ if (global_ssl.default_dh_param) {
+ if (global_ssl.default_dh_param <= 1024) {
+ /* we are limited to DH parameter of 1024 bits anyway */
+ if (local_dh_1024 == NULL)
+ local_dh_1024 = ssl_get_dh_1024();
+
+ if (local_dh_1024 == NULL) {
+ memprintf(err, "%sunable to load default 1024 bits DH parameter for certificate '%s'.\n",
+ err && *err ? *err : "", path);
+ ret |= ERR_ALERT | ERR_FATAL;
+ goto end;
+ }
+
+ if (!ssl_sock_set_tmp_dh(ctx, local_dh_1024)) {
+ memprintf(err, "%sunable to load default 1024 bits DH parameter for certificate '%s'.\n",
+ err && *err ? *err : "", path);
+ memprintf(err, "%s, DH ciphers won't be available.\n",
+ err && *err ? *err : "");
+ ret |= ERR_WARN;
+ goto end;
+ }
+ }
+ else {
+#if (HA_OPENSSL_VERSION_NUMBER < 0x3000000fL)
+ SSL_CTX_set_tmp_dh_callback(ctx, ssl_get_tmp_dh_cbk);
+#else
+ ssl_sock_set_tmp_dh_from_pkey(ctx, data ? data->key : NULL);
+#endif
+ }
+ }
+ }
+
+end:
+ ERR_clear_error();
+ return ret;
+}
+#endif
+
+
+/* Load a certificate chain into an SSL context.
+ * Returns a bitfield containing the flags:
+ * ERR_FATAL in any fatal error case
+ * ERR_ALERT if the reason of the error is available in err
+ * ERR_WARN if a warning is available into err
+ * The caller is responsible of freeing the newly built or newly refcounted
+ * find_chain element.
+ * The value 0 means there is no error nor warning and
+ * the operation succeed.
+ */
+static int ssl_sock_load_cert_chain(const char *path, const struct ckch_data *data,
+ SSL_CTX *ctx, STACK_OF(X509) **find_chain, char **err)
+{
+ int errcode = 0;
+ int ret;
+
+ ERR_clear_error();
+
+ if (find_chain == NULL) {
+ errcode |= ERR_FATAL;
+ goto end;
+ }
+
+ if (!SSL_CTX_use_certificate(ctx, data->cert)) {
+ ret = ERR_get_error();
+ memprintf(err, "%sunable to load SSL certificate into SSL Context '%s': %s.\n",
+ err && *err ? *err : "", path, ERR_reason_error_string(ret));
+ errcode |= ERR_ALERT | ERR_FATAL;
+ goto end;
+ }
+
+ if (data->chain) {
+ *find_chain = X509_chain_up_ref(data->chain);
+ } else {
+ /* Find Certificate Chain in global */
+ struct issuer_chain *issuer;
+ issuer = ssl_get0_issuer_chain(data->cert);
+ if (issuer)
+ *find_chain = X509_chain_up_ref(issuer->chain);
+ }
+
+ if (!*find_chain) {
+ /* always put a null chain stack in the SSL_CTX so it does not
+ * try to build the chain from the verify store */
+ *find_chain = sk_X509_new_null();
+ }
+
+ /* Load all certs in the data into the ctx_chain for the ssl_ctx */
+#ifdef SSL_CTX_set1_chain
+ if (!SSL_CTX_set1_chain(ctx, *find_chain)) {
+ ret = ERR_get_error();
+ memprintf(err, "%sunable to load chain certificate into SSL Context '%s': %s.\n",
+ err && *err ? *err : "", path, ERR_reason_error_string(ret));
+ errcode |= ERR_ALERT | ERR_FATAL;
+ goto end;
+ }
+#else
+ { /* legacy compat (< openssl 1.0.2) */
+ X509 *ca;
+ while ((ca = sk_X509_shift(*find_chain)))
+ if (!SSL_CTX_add_extra_chain_cert(ctx, ca)) {
+ memprintf(err, "%sunable to load chain certificate into SSL Context '%s'.\n",
+ err && *err ? *err : "", path);
+ X509_free(ca);
+ errcode |= ERR_ALERT | ERR_FATAL;
+ goto end;
+ }
+ }
+#endif
+
+#ifdef SSL_CTX_build_cert_chain
+ /* remove the Root CA from the SSL_CTX if the option is activated */
+ if (global_ssl.skip_self_issued_ca) {
+ if (!SSL_CTX_build_cert_chain(ctx, SSL_BUILD_CHAIN_FLAG_NO_ROOT|SSL_BUILD_CHAIN_FLAG_UNTRUSTED|SSL_BUILD_CHAIN_FLAG_IGNORE_ERROR)) {
+ memprintf(err, "%sunable to load chain certificate into SSL Context '%s'.\n",
+ err && *err ? *err : "", path);
+ errcode |= ERR_ALERT | ERR_FATAL;
+ goto end;
+ }
+ }
+#endif
+
+end:
+ return errcode;
+}
+
+
+/* Loads the info in ckch into ctx
+ * Returns a bitfield containing the flags:
+ * ERR_FATAL in any fatal error case
+ * ERR_ALERT if the reason of the error is available in err
+ * ERR_WARN if a warning is available into err
+ * The value 0 means there is no error nor warning and
+ * the operation succeed.
+ */
+static int ssl_sock_put_ckch_into_ctx(const char *path, struct ckch_data *data, SSL_CTX *ctx, char **err)
+{
+ int errcode = 0;
+ STACK_OF(X509) *find_chain = NULL;
+
+ ERR_clear_error();
+
+ if (SSL_CTX_use_PrivateKey(ctx, data->key) <= 0) {
+ int ret;
+
+ ret = ERR_get_error();
+ memprintf(err, "%sunable to load SSL private key into SSL Context '%s': %s.\n",
+ err && *err ? *err : "", path, ERR_reason_error_string(ret));
+ errcode |= ERR_ALERT | ERR_FATAL;
+ return errcode;
+ }
+
+ /* Load certificate chain */
+ errcode |= ssl_sock_load_cert_chain(path, data, ctx, &find_chain, err);
+ if (errcode & ERR_CODE)
+ goto end;
+
+#ifndef OPENSSL_NO_DH
+ /* store a NULL pointer to indicate we have not yet loaded
+ a custom DH param file */
+ if (ssl_dh_ptr_index >= 0) {
+ SSL_CTX_set_ex_data(ctx, ssl_dh_ptr_index, NULL);
+ }
+
+ errcode |= ssl_sock_load_dh_params(ctx, data, path, err);
+ if (errcode & ERR_CODE) {
+ memprintf(err, "%sunable to load DH parameters from file '%s'.\n",
+ err && *err ? *err : "", path);
+ goto end;
+ }
+#endif
+
+#ifdef HAVE_SSL_CTX_ADD_SERVER_CUSTOM_EXT
+ if (sctl_ex_index >= 0 && data->sctl) {
+ if (ssl_sock_load_sctl(ctx, data->sctl) < 0) {
+ memprintf(err, "%s '%s.sctl' is present but cannot be read or parsed'.\n",
+ err && *err ? *err : "", path);
+ errcode |= ERR_ALERT | ERR_FATAL;
+ goto end;
+ }
+ }
+#endif
+
+#if ((defined SSL_CTRL_SET_TLSEXT_STATUS_REQ_CB && !defined OPENSSL_NO_OCSP) || defined OPENSSL_IS_BORINGSSL)
+ /* Load OCSP Info into context
+ * If OCSP update mode is set to 'on', an entry will be created in the
+ * ocsp tree even if no ocsp_response was known during init, unless the
+ * frontend's conf disables ocsp update explicitly.
+ */
+ if (ssl_sock_load_ocsp(path, ctx, data, find_chain) < 0) {
+ if (data->ocsp_response)
+ memprintf(err, "%s '%s.ocsp' is present and activates OCSP but it is impossible to compute the OCSP certificate ID (maybe the issuer could not be found)'.\n",
+ err && *err ? *err : "", path);
+ else
+ memprintf(err, "%s '%s' has an OCSP auto-update set to 'on' but an error occurred (maybe the OCSP URI or the issuer could not be found)'.\n",
+ err && *err ? *err : "", path);
+ errcode |= ERR_ALERT | ERR_FATAL;
+ goto end;
+ }
+#endif
+
+ end:
+ sk_X509_pop_free(find_chain, X509_free);
+ return errcode;
+}
+
+
+/* Loads the info of a ckch built out of a backend certificate into an SSL ctx
+ * Returns a bitfield containing the flags:
+ * ERR_FATAL in any fatal error case
+ * ERR_ALERT if the reason of the error is available in err
+ * ERR_WARN if a warning is available into err
+ * The value 0 means there is no error nor warning and
+ * the operation succeed.
+ */
+static int ssl_sock_put_srv_ckch_into_ctx(const char *path, const struct ckch_data *data,
+ SSL_CTX *ctx, char **err)
+{
+ int errcode = 0;
+ STACK_OF(X509) *find_chain = NULL;
+
+ /* Load the private key */
+ if (SSL_CTX_use_PrivateKey(ctx, data->key) <= 0) {
+ memprintf(err, "%sunable to load SSL private key into SSL Context '%s'.\n",
+ err && *err ? *err : "", path);
+ errcode |= ERR_ALERT | ERR_FATAL;
+ }
+
+ /* Load certificate chain */
+ errcode |= ssl_sock_load_cert_chain(path, data, ctx, &find_chain, err);
+ if (errcode & ERR_CODE)
+ goto end;
+
+ if (SSL_CTX_check_private_key(ctx) <= 0) {
+ memprintf(err, "%sinconsistencies between private key and certificate loaded from PEM file '%s'.\n",
+ err && *err ? *err : "", path);
+ errcode |= ERR_ALERT | ERR_FATAL;
+ }
+
+end:
+ sk_X509_pop_free(find_chain, X509_free);
+ return errcode;
+}
+
+
+/*
+ * This function allocate a ckch_inst and create its snis
+ *
+ * Returns a bitfield containing the flags:
+ * ERR_FATAL in any fatal error case
+ * ERR_ALERT if the reason of the error is available in err
+ * ERR_WARN if a warning is available into err
+ */
+int ckch_inst_new_load_store(const char *path, struct ckch_store *ckchs, struct bind_conf *bind_conf,
+ struct ssl_bind_conf *ssl_conf, char **sni_filter, int fcount, struct ckch_inst **ckchi, char **err)
+{
+ SSL_CTX *ctx;
+ int i;
+ int order = 0;
+ X509_NAME *xname;
+ char *str;
+ EVP_PKEY *pkey;
+ struct pkey_info kinfo = { .sig = TLSEXT_signature_anonymous, .bits = 0 };
+#ifdef SSL_CTRL_SET_TLSEXT_HOSTNAME
+ STACK_OF(GENERAL_NAME) *names;
+#endif
+ struct ckch_data *data;
+ struct ckch_inst *ckch_inst = NULL;
+ int errcode = 0;
+
+ *ckchi = NULL;
+
+ if (!ckchs || !ckchs->data)
+ return ERR_FATAL;
+
+ data = ckchs->data;
+
+ ctx = SSL_CTX_new(SSLv23_server_method());
+ if (!ctx) {
+ memprintf(err, "%sunable to allocate SSL context for cert '%s'.\n",
+ err && *err ? *err : "", path);
+ errcode |= ERR_ALERT | ERR_FATAL;
+ goto error;
+ }
+
+ errcode |= ssl_sock_put_ckch_into_ctx(path, data, ctx, err);
+ if (errcode & ERR_CODE)
+ goto error;
+
+ ckch_inst = ckch_inst_new();
+ if (!ckch_inst) {
+ memprintf(err, "%sunable to allocate SSL context for cert '%s'.\n",
+ err && *err ? *err : "", path);
+ errcode |= ERR_ALERT | ERR_FATAL;
+ goto error;
+ }
+
+ pkey = X509_get_pubkey(data->cert);
+ if (pkey) {
+ kinfo.bits = EVP_PKEY_bits(pkey);
+ switch(EVP_PKEY_base_id(pkey)) {
+ case EVP_PKEY_RSA:
+ kinfo.sig = TLSEXT_signature_rsa;
+ break;
+ case EVP_PKEY_EC:
+ kinfo.sig = TLSEXT_signature_ecdsa;
+ break;
+ case EVP_PKEY_DSA:
+ kinfo.sig = TLSEXT_signature_dsa;
+ break;
+ }
+ EVP_PKEY_free(pkey);
+ }
+
+ if (fcount) {
+ while (fcount--) {
+ order = ckch_inst_add_cert_sni(ctx, ckch_inst, bind_conf, ssl_conf, kinfo, sni_filter[fcount], order);
+ if (order < 0) {
+ memprintf(err, "%sunable to create a sni context.\n", err && *err ? *err : "");
+ errcode |= ERR_ALERT | ERR_FATAL;
+ goto error;
+ }
+ }
+ }
+ else {
+#ifdef SSL_CTRL_SET_TLSEXT_HOSTNAME
+ names = X509_get_ext_d2i(data->cert, NID_subject_alt_name, NULL, NULL);
+ if (names) {
+ for (i = 0; i < sk_GENERAL_NAME_num(names); i++) {
+ GENERAL_NAME *name = sk_GENERAL_NAME_value(names, i);
+ if (name->type == GEN_DNS) {
+ if (ASN1_STRING_to_UTF8((unsigned char **)&str, name->d.dNSName) >= 0) {
+ order = ckch_inst_add_cert_sni(ctx, ckch_inst, bind_conf, ssl_conf, kinfo, str, order);
+ OPENSSL_free(str);
+ if (order < 0) {
+ memprintf(err, "%sunable to create a sni context.\n", err && *err ? *err : "");
+ errcode |= ERR_ALERT | ERR_FATAL;
+ goto error;
+ }
+ }
+ }
+ }
+ sk_GENERAL_NAME_pop_free(names, GENERAL_NAME_free);
+ }
+#endif /* SSL_CTRL_SET_TLSEXT_HOSTNAME */
+ xname = X509_get_subject_name(data->cert);
+ i = -1;
+ while ((i = X509_NAME_get_index_by_NID(xname, NID_commonName, i)) != -1) {
+ X509_NAME_ENTRY *entry = X509_NAME_get_entry(xname, i);
+ ASN1_STRING *value;
+
+ value = X509_NAME_ENTRY_get_data(entry);
+ if (ASN1_STRING_to_UTF8((unsigned char **)&str, value) >= 0) {
+ order = ckch_inst_add_cert_sni(ctx, ckch_inst, bind_conf, ssl_conf, kinfo, str, order);
+ OPENSSL_free(str);
+ if (order < 0) {
+ memprintf(err, "%sunable to create a sni context.\n", err && *err ? *err : "");
+ errcode |= ERR_ALERT | ERR_FATAL;
+ goto error;
+ }
+ }
+ }
+ }
+ /* we must not free the SSL_CTX anymore below, since it's already in
+ * the tree, so it will be discovered and cleaned in time.
+ */
+
+#ifndef SSL_CTRL_SET_TLSEXT_HOSTNAME
+ if (bind_conf->default_ctx) {
+ memprintf(err, "%sthis version of openssl cannot load multiple SSL certificates.\n",
+ err && *err ? *err : "");
+ errcode |= ERR_ALERT | ERR_FATAL;
+ goto error;
+ }
+#endif
+ if (!bind_conf->default_ctx) {
+ bind_conf->default_ctx = ctx;
+ bind_conf->default_ssl_conf = ssl_conf;
+ ckch_inst->is_default = 1;
+ SSL_CTX_up_ref(ctx);
+ bind_conf->default_inst = ckch_inst;
+ }
+
+ /* Always keep a reference to the newly constructed SSL_CTX in the
+ * instance. This way if the instance has no SNIs, the SSL_CTX will
+ * still be linked. */
+ SSL_CTX_up_ref(ctx);
+ ckch_inst->ctx = ctx;
+
+ /* everything succeed, the ckch instance can be used */
+ ckch_inst->bind_conf = bind_conf;
+ ckch_inst->ssl_conf = ssl_conf;
+ ckch_inst->ckch_store = ckchs;
+
+ SSL_CTX_free(ctx); /* we need to free the ctx since we incremented the refcount where it's used */
+
+ *ckchi = ckch_inst;
+ return errcode;
+
+error:
+ /* free the allocated sni_ctxs */
+ if (ckch_inst) {
+ if (ckch_inst->is_default)
+ SSL_CTX_free(ctx);
+
+ ckch_inst_free(ckch_inst);
+ ckch_inst = NULL;
+ }
+ SSL_CTX_free(ctx);
+
+ return errcode;
+}
+
+
+/*
+ * This function allocate a ckch_inst that will be used on the backend side
+ * (server line)
+ *
+ * Returns a bitfield containing the flags:
+ * ERR_FATAL in any fatal error case
+ * ERR_ALERT if the reason of the error is available in err
+ * ERR_WARN if a warning is available into err
+ */
+int ckch_inst_new_load_srv_store(const char *path, struct ckch_store *ckchs,
+ struct ckch_inst **ckchi, char **err)
+{
+ SSL_CTX *ctx;
+ struct ckch_data *data;
+ struct ckch_inst *ckch_inst = NULL;
+ int errcode = 0;
+
+ *ckchi = NULL;
+
+ if (!ckchs || !ckchs->data)
+ return ERR_FATAL;
+
+ data = ckchs->data;
+
+ ctx = SSL_CTX_new(SSLv23_client_method());
+ if (!ctx) {
+ memprintf(err, "%sunable to allocate SSL context for cert '%s'.\n",
+ err && *err ? *err : "", path);
+ errcode |= ERR_ALERT | ERR_FATAL;
+ goto error;
+ }
+
+ errcode |= ssl_sock_put_srv_ckch_into_ctx(path, data, ctx, err);
+ if (errcode & ERR_CODE)
+ goto error;
+
+ ckch_inst = ckch_inst_new();
+ if (!ckch_inst) {
+ memprintf(err, "%sunable to allocate SSL context for cert '%s'.\n",
+ err && *err ? *err : "", path);
+ errcode |= ERR_ALERT | ERR_FATAL;
+ goto error;
+ }
+
+ /* everything succeed, the ckch instance can be used */
+ ckch_inst->bind_conf = NULL;
+ ckch_inst->ssl_conf = NULL;
+ ckch_inst->ckch_store = ckchs;
+ ckch_inst->ctx = ctx;
+ ckch_inst->is_server_instance = 1;
+
+ *ckchi = ckch_inst;
+ return errcode;
+
+error:
+ SSL_CTX_free(ctx);
+
+ return errcode;
+}
+
+/* Returns a set of ERR_* flags possibly with an error in <err>. */
+static int ssl_sock_load_ckchs(const char *path, struct ckch_store *ckchs,
+ struct bind_conf *bind_conf, struct ssl_bind_conf *ssl_conf,
+ char **sni_filter, int fcount, struct ckch_inst **ckch_inst, char **err)
+{
+ int errcode = 0;
+
+ /* we found the ckchs in the tree, we can use it directly */
+ errcode |= ckch_inst_new_load_store(path, ckchs, bind_conf, ssl_conf, sni_filter, fcount, ckch_inst, err);
+
+ if (errcode & ERR_CODE)
+ return errcode;
+
+ ssl_sock_load_cert_sni(*ckch_inst, bind_conf);
+
+ /* succeed, add the instance to the ckch_store's list of instance */
+ LIST_APPEND(&ckchs->ckch_inst, &((*ckch_inst)->by_ckchs));
+ return errcode;
+}
+
+/* This function generates a <struct ckch_inst *> for a <struct server *>, and
+ * fill the SSL_CTX of the server.
+ *
+ * Returns a set of ERR_* flags possibly with an error in <err>. */
+static int ssl_sock_load_srv_ckchs(const char *path, struct ckch_store *ckchs,
+ struct server *server, struct ckch_inst **ckch_inst, char **err)
+{
+ int errcode = 0;
+
+ /* we found the ckchs in the tree, we can use it directly */
+ errcode |= ckch_inst_new_load_srv_store(path, ckchs, ckch_inst, err);
+
+ if (errcode & ERR_CODE)
+ return errcode;
+
+ (*ckch_inst)->server = server;
+ /* Keep the reference to the SSL_CTX in the server. */
+ SSL_CTX_up_ref((*ckch_inst)->ctx);
+ server->ssl_ctx.ctx = (*ckch_inst)->ctx;
+ /* succeed, add the instance to the ckch_store's list of instance */
+ LIST_APPEND(&ckchs->ckch_inst, &((*ckch_inst)->by_ckchs));
+ return errcode;
+}
+
+
+
+
+/* Make sure openssl opens /dev/urandom before the chroot. The work is only
+ * done once. Zero is returned if the operation fails. No error is returned
+ * if the random is said as not implemented, because we expect that openssl
+ * will use another method once needed.
+ */
+int ssl_initialize_random(void)
+{
+ unsigned char random;
+ static int random_initialized = 0;
+
+ if (!random_initialized && RAND_bytes(&random, 1) != 0)
+ random_initialized = 1;
+
+ return random_initialized;
+}
+
+/* Load a crt-list file, this is done in 2 parts:
+ * - store the content of the file in a crtlist structure with crtlist_entry structures
+ * - generate the instances by iterating on entries in the crtlist struct
+ *
+ * Nothing is locked there, this function is used in the configuration parser.
+ *
+ * Returns a set of ERR_* flags possibly with an error in <err>.
+ */
+int ssl_sock_load_cert_list_file(char *file, int dir, struct bind_conf *bind_conf, struct proxy *curproxy, char **err)
+{
+ struct crtlist *crtlist = NULL;
+ struct ebmb_node *eb;
+ struct crtlist_entry *entry = NULL;
+ struct bind_conf_list *bind_conf_node = NULL;
+ int cfgerr = 0;
+ char *end;
+
+ bind_conf_node = malloc(sizeof(*bind_conf_node));
+ if (!bind_conf_node) {
+ memprintf(err, "%sCan't alloc memory!\n", err && *err ? *err : "");
+ cfgerr |= ERR_FATAL | ERR_ALERT;
+ goto error;
+ }
+ bind_conf_node->next = NULL;
+ bind_conf_node->bind_conf = bind_conf;
+
+ /* strip trailing slashes, including first one */
+ for (end = file + strlen(file) - 1; end >= file && *end == '/'; end--)
+ *end = 0;
+
+ /* look for an existing crtlist or create one */
+ eb = ebst_lookup(&crtlists_tree, file);
+ if (eb) {
+ crtlist = ebmb_entry(eb, struct crtlist, node);
+ } else {
+ /* load a crt-list OR a directory */
+ if (dir)
+ cfgerr |= crtlist_load_cert_dir(file, bind_conf, &crtlist, err);
+ else
+ cfgerr |= crtlist_parse_file(file, bind_conf, curproxy, &crtlist, err);
+
+ if (!(cfgerr & ERR_CODE))
+ ebst_insert(&crtlists_tree, &crtlist->node);
+ }
+
+ if (cfgerr & ERR_CODE) {
+ cfgerr |= ERR_FATAL | ERR_ALERT;
+ goto error;
+ }
+
+ /* generates ckch instance from the crtlist_entry */
+ list_for_each_entry(entry, &crtlist->ord_entries, by_crtlist) {
+ struct ckch_store *store;
+ struct ckch_inst *ckch_inst = NULL;
+
+ store = entry->node.key;
+ cfgerr |= ssl_sock_load_ckchs(store->path, store, bind_conf, entry->ssl_conf, entry->filters, entry->fcount, &ckch_inst, err);
+ if (cfgerr & ERR_CODE) {
+ memprintf(err, "error processing line %d in file '%s' : %s", entry->linenum, file, *err);
+ goto error;
+ }
+ LIST_APPEND(&entry->ckch_inst, &ckch_inst->by_crtlist_entry);
+ ckch_inst->crtlist_entry = entry;
+ }
+
+ /* add the bind_conf to the list */
+ bind_conf_node->next = crtlist->bind_conf;
+ crtlist->bind_conf = bind_conf_node;
+
+ return cfgerr;
+error:
+ {
+ struct crtlist_entry *lastentry;
+ struct ckch_inst *inst, *s_inst;
+
+ lastentry = entry; /* which entry we tried to generate last */
+ if (lastentry) {
+ list_for_each_entry(entry, &crtlist->ord_entries, by_crtlist) {
+ if (entry == lastentry) /* last entry we tried to generate, no need to go further */
+ break;
+
+ list_for_each_entry_safe(inst, s_inst, &entry->ckch_inst, by_crtlist_entry) {
+
+ /* this was not generated for this bind_conf, skip */
+ if (inst->bind_conf != bind_conf)
+ continue;
+
+ /* free the sni_ctx and instance */
+ ckch_inst_free(inst);
+ }
+ }
+ }
+ free(bind_conf_node);
+ }
+ return cfgerr;
+}
+
+/* Returns a set of ERR_* flags possibly with an error in <err>. */
+int ssl_sock_load_cert(char *path, struct bind_conf *bind_conf, char **err)
+{
+ struct stat buf;
+ int cfgerr = 0;
+ struct ckch_store *ckchs;
+ struct ckch_inst *ckch_inst = NULL;
+ int found = 0; /* did we found a file to load ? */
+
+ if ((ckchs = ckchs_lookup(path))) {
+ /* we found the ckchs in the tree, we can use it directly */
+ cfgerr |= ssl_sock_load_ckchs(path, ckchs, bind_conf, NULL, NULL, 0, &ckch_inst, err);
+ found++;
+ } else if (stat(path, &buf) == 0) {
+ found++;
+ if (S_ISDIR(buf.st_mode) == 0) {
+ ckchs = ckchs_load_cert_file(path, err);
+ if (!ckchs)
+ cfgerr |= ERR_ALERT | ERR_FATAL;
+ cfgerr |= ssl_sock_load_ckchs(path, ckchs, bind_conf, NULL, NULL, 0, &ckch_inst, err);
+ } else {
+ cfgerr |= ssl_sock_load_cert_list_file(path, 1, bind_conf, bind_conf->frontend, err);
+ }
+ } else {
+ /* stat failed, could be a bundle */
+ if (global_ssl.extra_files & SSL_GF_BUNDLE) {
+ char fp[MAXPATHLEN+1] = {0};
+ int n = 0;
+
+ /* Load all possible certs and keys in separate ckch_store */
+ for (n = 0; n < SSL_SOCK_NUM_KEYTYPES; n++) {
+ struct stat buf;
+ int ret;
+
+ ret = snprintf(fp, sizeof(fp), "%s.%s", path, SSL_SOCK_KEYTYPE_NAMES[n]);
+ if (ret > sizeof(fp))
+ continue;
+
+ if ((ckchs = ckchs_lookup(fp))) {
+ cfgerr |= ssl_sock_load_ckchs(fp, ckchs, bind_conf, NULL, NULL, 0, &ckch_inst, err);
+ found++;
+ } else {
+ if (stat(fp, &buf) == 0) {
+ found++;
+ ckchs = ckchs_load_cert_file(fp, err);
+ if (!ckchs)
+ cfgerr |= ERR_ALERT | ERR_FATAL;
+ cfgerr |= ssl_sock_load_ckchs(fp, ckchs, bind_conf, NULL, NULL, 0, &ckch_inst, err);
+ }
+ }
+ }
+#if HA_OPENSSL_VERSION_NUMBER < 0x10101000L
+ if (found) {
+ memprintf(err, "%sCan't load '%s'. Loading a multi certificates bundle requires OpenSSL >= 1.1.1\n",
+ err && *err ? *err : "", path);
+ cfgerr |= ERR_ALERT | ERR_FATAL;
+ }
+#endif
+ }
+ }
+ if (!found) {
+ memprintf(err, "%sunable to stat SSL certificate from file '%s' : %s.\n",
+ err && *err ? *err : "", path, strerror(errno));
+ cfgerr |= ERR_ALERT | ERR_FATAL;
+ }
+
+ return cfgerr;
+}
+
+
+/* Create a full ssl context and ckch instance that will be used for a specific
+ * backend server (server configuration line).
+ * Returns a set of ERR_* flags possibly with an error in <err>.
+ */
+int ssl_sock_load_srv_cert(char *path, struct server *server, int create_if_none, char **err)
+{
+ struct stat buf;
+ int cfgerr = 0;
+ struct ckch_store *ckchs;
+ int found = 0; /* did we found a file to load ? */
+
+ if ((ckchs = ckchs_lookup(path))) {
+ /* we found the ckchs in the tree, we can use it directly */
+ cfgerr |= ssl_sock_load_srv_ckchs(path, ckchs, server, &server->ssl_ctx.inst, err);
+ found++;
+ } else {
+ if (!create_if_none) {
+ memprintf(err, "%sunable to stat SSL certificate '%s'.\n",
+ err && *err ? *err : "", path);
+ cfgerr |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+
+ if (stat(path, &buf) == 0) {
+ /* We do not manage directories on backend side. */
+ if (S_ISDIR(buf.st_mode) == 0) {
+ ++found;
+ ckchs = ckchs_load_cert_file(path, err);
+ if (!ckchs)
+ cfgerr |= ERR_ALERT | ERR_FATAL;
+ cfgerr |= ssl_sock_load_srv_ckchs(path, ckchs, server, &server->ssl_ctx.inst, err);
+ }
+ }
+ }
+ if (!found) {
+ memprintf(err, "%sunable to stat SSL certificate from file '%s' : %s.\n",
+ err && *err ? *err : "", path, strerror(errno));
+ cfgerr |= ERR_ALERT | ERR_FATAL;
+ }
+
+out:
+ return cfgerr;
+}
+
+/* Create an initial CTX used to start the SSL connection before switchctx */
+static int
+ssl_sock_initial_ctx(struct bind_conf *bind_conf)
+{
+ SSL_CTX *ctx = NULL;
+ long options =
+ SSL_OP_ALL | /* all known workarounds for bugs */
+ SSL_OP_NO_SSLv2 |
+ SSL_OP_NO_COMPRESSION |
+ SSL_OP_SINGLE_DH_USE |
+ SSL_OP_SINGLE_ECDH_USE |
+ SSL_OP_NO_SESSION_RESUMPTION_ON_RENEGOTIATION |
+ SSL_OP_PRIORITIZE_CHACHA |
+ SSL_OP_CIPHER_SERVER_PREFERENCE;
+ long mode =
+ SSL_MODE_ENABLE_PARTIAL_WRITE |
+ SSL_MODE_ACCEPT_MOVING_WRITE_BUFFER |
+ SSL_MODE_RELEASE_BUFFERS |
+ SSL_MODE_SMALL_BUFFERS;
+ struct tls_version_filter *conf_ssl_methods = &bind_conf->ssl_conf.ssl_methods;
+ int i, min, max, hole;
+ int flags = MC_SSL_O_ALL;
+ int cfgerr = 0;
+ const int default_min_ver = CONF_TLSV12;
+
+ ctx = SSL_CTX_new(SSLv23_server_method());
+ bind_conf->initial_ctx = ctx;
+
+ if (conf_ssl_methods->flags && (conf_ssl_methods->min || conf_ssl_methods->max))
+ ha_warning("Proxy '%s': no-sslv3/no-tlsv1x are ignored for bind '%s' at [%s:%d]. "
+ "Use only 'ssl-min-ver' and 'ssl-max-ver' to fix.\n",
+ bind_conf->frontend->id, bind_conf->arg, bind_conf->file, bind_conf->line);
+ else
+ flags = conf_ssl_methods->flags;
+
+ min = conf_ssl_methods->min;
+ max = conf_ssl_methods->max;
+
+ /* default minimum is TLSV12, */
+ if (!min) {
+ if (!max || (max >= default_min_ver)) {
+ min = default_min_ver;
+ } else {
+ ha_warning("Proxy '%s': Ambiguous configuration for bind '%s' at [%s:%d]: the ssl-min-ver value is not configured and the ssl-max-ver value is lower than the default ssl-min-ver value (%s). "
+ "Setting the ssl-min-ver to %s. Use 'ssl-min-ver' to fix this.\n",
+ bind_conf->frontend->id, bind_conf->arg, bind_conf->file, bind_conf->line, methodVersions[default_min_ver].name, methodVersions[max].name);
+ min = max;
+ }
+ }
+ /* Real min and max should be determinate with configuration and openssl's capabilities */
+ if (min)
+ flags |= (methodVersions[min].flag - 1);
+ if (max)
+ flags |= ~((methodVersions[max].flag << 1) - 1);
+ /* find min, max and holes */
+ min = max = CONF_TLSV_NONE;
+ hole = 0;
+ for (i = CONF_TLSV_MIN; i <= CONF_TLSV_MAX; i++)
+ /* version is in openssl && version not disable in configuration */
+ if (methodVersions[i].option && !(flags & methodVersions[i].flag)) {
+ if (min) {
+ if (hole) {
+ ha_warning("Proxy '%s': SSL/TLS versions range not contiguous for bind '%s' at [%s:%d]. "
+ "Hole find for %s. Use only 'ssl-min-ver' and 'ssl-max-ver' to fix.\n",
+ bind_conf->frontend->id, bind_conf->arg, bind_conf->file, bind_conf->line,
+ methodVersions[hole].name);
+ hole = 0;
+ }
+ max = i;
+ }
+ else {
+ min = max = i;
+ }
+ }
+ else {
+ if (min)
+ hole = i;
+ }
+ if (!min) {
+ ha_alert("Proxy '%s': all SSL/TLS versions are disabled for bind '%s' at [%s:%d].\n",
+ bind_conf->frontend->id, bind_conf->arg, bind_conf->file, bind_conf->line);
+ cfgerr += 1;
+ }
+ /* save real min/max in bind_conf */
+ conf_ssl_methods->min = min;
+ conf_ssl_methods->max = max;
+
+#if (HA_OPENSSL_VERSION_NUMBER < 0x1010000fL)
+ /* Keep force-xxx implementation as it is in older haproxy. It's a
+ precautionary measure to avoid any surprise with older openssl version. */
+ if (min == max)
+ methodVersions[min].ctx_set_version(ctx, SET_SERVER);
+ else
+ for (i = CONF_TLSV_MIN; i <= CONF_TLSV_MAX; i++) {
+ /* clear every version flags in case SSL_CTX_new()
+ * returns an SSL_CTX with disabled versions */
+ SSL_CTX_clear_options(ctx, methodVersions[i].option);
+
+ if (flags & methodVersions[i].flag)
+ options |= methodVersions[i].option;
+
+ }
+#else /* openssl >= 1.1.0 */
+ /* set the max_version is required to cap TLS version or activate new TLS (v1.3) */
+ methodVersions[min].ctx_set_version(ctx, SET_MIN);
+ methodVersions[max].ctx_set_version(ctx, SET_MAX);
+#endif
+
+ if (bind_conf->ssl_options & BC_SSL_O_NO_TLS_TICKETS)
+ options |= SSL_OP_NO_TICKET;
+ if (bind_conf->ssl_options & BC_SSL_O_PREF_CLIE_CIPH)
+ options &= ~SSL_OP_CIPHER_SERVER_PREFERENCE;
+
+#ifdef SSL_OP_NO_RENEGOTIATION
+ options |= SSL_OP_NO_RENEGOTIATION;
+#endif
+
+ SSL_CTX_set_options(ctx, options);
+
+#ifdef SSL_MODE_ASYNC
+ if (global_ssl.async)
+ mode |= SSL_MODE_ASYNC;
+#endif
+ SSL_CTX_set_mode(ctx, mode);
+ if (global_ssl.life_time)
+ SSL_CTX_set_timeout(ctx, global_ssl.life_time);
+
+#ifdef SSL_CTRL_SET_TLSEXT_HOSTNAME
+# ifdef OPENSSL_IS_BORINGSSL
+ SSL_CTX_set_select_certificate_cb(ctx, ssl_sock_switchctx_cbk);
+ SSL_CTX_set_tlsext_servername_callback(ctx, ssl_sock_switchctx_err_cbk);
+# elif defined(HAVE_SSL_CLIENT_HELLO_CB)
+# if defined(SSL_OP_NO_ANTI_REPLAY)
+ if (bind_conf->ssl_conf.early_data)
+ SSL_CTX_set_options(ctx, SSL_OP_NO_ANTI_REPLAY);
+# endif /* ! SSL_OP_NO_ANTI_REPLAY */
+ SSL_CTX_set_client_hello_cb(ctx, ssl_sock_switchctx_cbk, NULL);
+ SSL_CTX_set_tlsext_servername_callback(ctx, ssl_sock_switchctx_err_cbk);
+# elif 0 && defined(USE_OPENSSL_WOLFSSL)
+ SSL_CTX_set_cert_cb(ctx, ssl_sock_switchctx_wolfSSL_cbk, bind_conf);
+# else
+ /* ! OPENSSL_IS_BORINGSSL && ! HAVE_SSL_CLIENT_HELLO_CB */
+ SSL_CTX_set_tlsext_servername_callback(ctx, ssl_sock_switchctx_cbk);
+# endif
+ SSL_CTX_set_tlsext_servername_arg(ctx, bind_conf);
+#endif /* ! SSL_CTRL_SET_TLSEXT_HOSTNAME */
+ return cfgerr;
+}
+
+
+static inline void sh_ssl_sess_free_blocks(struct shared_block *first, void *data)
+{
+ struct sh_ssl_sess_hdr *sh_ssl_sess = (struct sh_ssl_sess_hdr *)first->data;
+ if (first->len > 0)
+ sh_ssl_sess_tree_delete(sh_ssl_sess);
+}
+
+/* return first block from sh_ssl_sess */
+static inline struct shared_block *sh_ssl_sess_first_block(struct sh_ssl_sess_hdr *sh_ssl_sess)
+{
+ return (struct shared_block *)((unsigned char *)sh_ssl_sess - offsetof(struct shared_block, data));
+
+}
+
+/* store a session into the cache
+ * s_id : session id padded with zero to SSL_MAX_SSL_SESSION_ID_LENGTH
+ * data: asn1 encoded session
+ * data_len: asn1 encoded session length
+ * Returns 1 id session was stored (else 0)
+ */
+static int sh_ssl_sess_store(unsigned char *s_id, unsigned char *data, int data_len)
+{
+ struct shared_block *first;
+ struct sh_ssl_sess_hdr *sh_ssl_sess, *oldsh_ssl_sess;
+
+ first = shctx_row_reserve_hot(ssl_shctx, NULL, data_len + sizeof(struct sh_ssl_sess_hdr));
+ if (!first) {
+ /* Could not retrieve enough free blocks to store that session */
+ return 0;
+ }
+
+ shctx_wrlock(ssl_shctx);
+
+ /* STORE the key in the first elem */
+ sh_ssl_sess = (struct sh_ssl_sess_hdr *)first->data;
+ memcpy(sh_ssl_sess->key_data, s_id, SSL_MAX_SSL_SESSION_ID_LENGTH);
+ first->len = sizeof(struct sh_ssl_sess_hdr);
+
+ /* it returns the already existing node
+ or current node if none, never returns null */
+ oldsh_ssl_sess = sh_ssl_sess_tree_insert(sh_ssl_sess);
+ if (oldsh_ssl_sess != sh_ssl_sess) {
+ /* NOTE: Row couldn't be in use because we lock read & write function */
+ /* release the reserved row */
+ first->len = 0; /* the len must be liberated in order not to call the release callback on it */
+ shctx_row_reattach(ssl_shctx, first);
+ /* replace the previous session already in the tree */
+ sh_ssl_sess = oldsh_ssl_sess;
+ /* ignore the previous session data, only use the header */
+ first = sh_ssl_sess_first_block(sh_ssl_sess);
+ shctx_row_detach(ssl_shctx, first);
+ first->len = sizeof(struct sh_ssl_sess_hdr);
+ }
+
+ if (shctx_row_data_append(ssl_shctx, first, data, data_len) < 0) {
+ shctx_row_reattach(ssl_shctx, first);
+ return 0;
+ }
+
+ shctx_row_reattach(ssl_shctx, first);
+
+ shctx_wrunlock(ssl_shctx);
+
+ return 1;
+}
+
+/* SSL callback used when a new session is created while connecting to a server */
+static int ssl_sess_new_srv_cb(SSL *ssl, SSL_SESSION *sess)
+{
+ struct connection *conn = SSL_get_ex_data(ssl, ssl_app_data_index);
+ struct server *s;
+ uint old_tid;
+
+ s = __objt_server(conn->target);
+
+ /* RWLOCK: only read lock the SSL cache even when writing in it because there is
+ * one cache per thread, it only prevents to flush it from the CLI in
+ * another thread. However, we also write-lock our session element while
+ * updating it to make sure no other thread is reading it while we're copying
+ * or releasing it.
+ */
+
+ if (!(s->ssl_ctx.options & SRV_SSL_O_NO_REUSE)) {
+ int len;
+ unsigned char *ptr;
+ const char *sni;
+
+ /* determine the required len to store this new session */
+ len = i2d_SSL_SESSION(sess, NULL);
+ sni = SSL_get_servername(ssl, TLSEXT_NAMETYPE_host_name);
+ HA_RWLOCK_RDLOCK(SSL_SERVER_LOCK, &s->ssl_ctx.lock);
+
+ ptr = s->ssl_ctx.reused_sess[tid].ptr;
+
+ /* we're updating the possibly shared session right now */
+ HA_RWLOCK_WRLOCK(SSL_SERVER_LOCK, &s->ssl_ctx.reused_sess[tid].sess_lock);
+
+ if (!ptr || s->ssl_ctx.reused_sess[tid].allocated_size < len) {
+ /* insufficient storage, reallocate */
+ len = (len + 7) & -8; /* round to the nearest 8 bytes */
+ ptr = realloc(ptr, len);
+ if (!ptr)
+ free(s->ssl_ctx.reused_sess[tid].ptr);
+ s->ssl_ctx.reused_sess[tid].ptr = ptr;
+ s->ssl_ctx.reused_sess[tid].allocated_size = len;
+ }
+
+ if (ptr) {
+ /* store the new session into ptr and advance it; save the
+ * resulting size. It's guaranteed to be equal to the returned
+ * len above, and the pointer to be advanced by as much.
+ */
+ s->ssl_ctx.reused_sess[tid].size = i2d_SSL_SESSION(sess, &ptr);
+ }
+
+ /* done updating the session */
+
+ /* Now we'll try to add or remove this entry as a valid one:
+ * - if no entry is set and we have one, let's share it
+ * - if our entry was set and we have no more, let's clear it
+ */
+ old_tid = HA_ATOMIC_LOAD(&s->ssl_ctx.last_ssl_sess_tid); // 0=none, >0 = tid + 1
+ if (!s->ssl_ctx.reused_sess[tid].ptr && old_tid == tid + 1)
+ HA_ATOMIC_CAS(&s->ssl_ctx.last_ssl_sess_tid, &old_tid, 0); // no more valid
+ else if (s->ssl_ctx.reused_sess[tid].ptr && !old_tid)
+ HA_ATOMIC_CAS(&s->ssl_ctx.last_ssl_sess_tid, &old_tid, tid + 1);
+
+ if (s->ssl_ctx.reused_sess[tid].sni) {
+ /* if the new sni is empty or isn' t the same as the old one */
+ if ((!sni) || strcmp(s->ssl_ctx.reused_sess[tid].sni, sni) != 0) {
+ ha_free(&s->ssl_ctx.reused_sess[tid].sni);
+ if (sni)
+ s->ssl_ctx.reused_sess[tid].sni = strdup(sni);
+ }
+ } else if (sni) {
+ /* if there wasn't an old sni but there is a new one */
+ s->ssl_ctx.reused_sess[tid].sni = strdup(sni);
+ }
+ HA_RWLOCK_WRUNLOCK(SSL_SERVER_LOCK, &s->ssl_ctx.reused_sess[tid].sess_lock);
+ HA_RWLOCK_RDUNLOCK(SSL_SERVER_LOCK, &s->ssl_ctx.lock);
+ } else {
+ HA_RWLOCK_RDLOCK(SSL_SERVER_LOCK, &s->ssl_ctx.lock);
+
+ if (s->ssl_ctx.reused_sess[tid].ptr) {
+ HA_RWLOCK_WRLOCK(SSL_SERVER_LOCK, &s->ssl_ctx.reused_sess[tid].sess_lock);
+ ha_free(&s->ssl_ctx.reused_sess[tid].ptr);
+ HA_RWLOCK_WRUNLOCK(SSL_SERVER_LOCK, &s->ssl_ctx.reused_sess[tid].sess_lock);
+ }
+
+ old_tid = HA_ATOMIC_LOAD(&s->ssl_ctx.last_ssl_sess_tid); // 0=none, >0 = tid + 1
+ if (old_tid == tid + 1)
+ HA_ATOMIC_CAS(&s->ssl_ctx.last_ssl_sess_tid, &old_tid, 0); // no more valid
+
+ HA_RWLOCK_RDUNLOCK(SSL_SERVER_LOCK, &s->ssl_ctx.lock);
+ }
+
+ return 0;
+}
+
+
+/* SSL callback used on new session creation */
+int sh_ssl_sess_new_cb(SSL *ssl, SSL_SESSION *sess)
+{
+ unsigned char encsess[SHSESS_MAX_DATA_LEN]; /* encoded session */
+ unsigned char encid[SSL_MAX_SSL_SESSION_ID_LENGTH]; /* encoded id */
+ unsigned char *p;
+ int data_len;
+ unsigned int sid_length;
+ const unsigned char *sid_data;
+
+ /* Session id is already stored in to key and session id is known
+ * so we don't store it to keep size.
+ * note: SSL_SESSION_set1_id is using
+ * a memcpy so we need to use a different pointer
+ * than sid_data or sid_ctx_data to avoid valgrind
+ * complaining.
+ */
+
+ sid_data = SSL_SESSION_get_id(sess, &sid_length);
+
+ /* copy value in an other buffer */
+ memcpy(encid, sid_data, sid_length);
+
+ /* pad with 0 */
+ if (sid_length < SSL_MAX_SSL_SESSION_ID_LENGTH)
+ memset(encid + sid_length, 0, SSL_MAX_SSL_SESSION_ID_LENGTH-sid_length);
+
+ /* force length to zero to avoid ASN1 encoding */
+ SSL_SESSION_set1_id(sess, encid, 0);
+
+ /* force length to zero to avoid ASN1 encoding */
+ SSL_SESSION_set1_id_context(sess, (const unsigned char *)SHCTX_APPNAME, 0);
+
+ /* check if buffer is large enough for the ASN1 encoded session */
+ data_len = i2d_SSL_SESSION(sess, NULL);
+ if (data_len > SHSESS_MAX_DATA_LEN)
+ goto err;
+
+ p = encsess;
+
+ /* process ASN1 session encoding before the lock */
+ i2d_SSL_SESSION(sess, &p);
+
+
+ /* store to cache */
+ sh_ssl_sess_store(encid, encsess, data_len);
+err:
+ /* reset original length values */
+ SSL_SESSION_set1_id(sess, encid, sid_length);
+ SSL_SESSION_set1_id_context(sess, (const unsigned char *)SHCTX_APPNAME, strlen(SHCTX_APPNAME));
+
+ return 0; /* do not increment session reference count */
+}
+
+/* SSL callback used on lookup an existing session cause none found in internal cache */
+SSL_SESSION *sh_ssl_sess_get_cb(SSL *ssl, __OPENSSL_110_CONST__ unsigned char *key, int key_len, int *do_copy)
+{
+ struct sh_ssl_sess_hdr *sh_ssl_sess;
+ unsigned char data[SHSESS_MAX_DATA_LEN], *p;
+ unsigned char tmpkey[SSL_MAX_SSL_SESSION_ID_LENGTH];
+ SSL_SESSION *sess;
+ struct shared_block *first;
+
+ _HA_ATOMIC_INC(&global.shctx_lookups);
+
+ /* allow the session to be freed automatically by openssl */
+ *do_copy = 0;
+
+ /* tree key is zeros padded sessionid */
+ if (key_len < SSL_MAX_SSL_SESSION_ID_LENGTH) {
+ memcpy(tmpkey, key, key_len);
+ memset(tmpkey + key_len, 0, SSL_MAX_SSL_SESSION_ID_LENGTH - key_len);
+ key = tmpkey;
+ }
+
+ /* lock cache */
+ shctx_wrlock(ssl_shctx);
+
+ /* lookup for session */
+ sh_ssl_sess = sh_ssl_sess_tree_lookup(key);
+ if (!sh_ssl_sess) {
+ /* no session found: unlock cache and exit */
+ shctx_wrunlock(ssl_shctx);
+ _HA_ATOMIC_INC(&global.shctx_misses);
+ return NULL;
+ }
+
+ /* sh_ssl_sess (shared_block->data) is at the end of shared_block */
+ first = sh_ssl_sess_first_block(sh_ssl_sess);
+
+ shctx_row_data_get(ssl_shctx, first, data, sizeof(struct sh_ssl_sess_hdr), first->len-sizeof(struct sh_ssl_sess_hdr));
+
+ shctx_wrunlock(ssl_shctx);
+
+ /* decode ASN1 session */
+ p = data;
+ sess = d2i_SSL_SESSION(NULL, (const unsigned char **)&p, first->len-sizeof(struct sh_ssl_sess_hdr));
+ /* Reset session id and session id contenxt */
+ if (sess) {
+ SSL_SESSION_set1_id(sess, key, key_len);
+ SSL_SESSION_set1_id_context(sess, (const unsigned char *)SHCTX_APPNAME, strlen(SHCTX_APPNAME));
+ }
+
+ return sess;
+}
+
+
+/* SSL callback used to signal session is no more used in internal cache */
+void sh_ssl_sess_remove_cb(SSL_CTX *ctx, SSL_SESSION *sess)
+{
+ struct sh_ssl_sess_hdr *sh_ssl_sess;
+ unsigned char tmpkey[SSL_MAX_SSL_SESSION_ID_LENGTH];
+ unsigned int sid_length;
+ const unsigned char *sid_data;
+ (void)ctx;
+
+ sid_data = SSL_SESSION_get_id(sess, &sid_length);
+ /* tree key is zeros padded sessionid */
+ if (sid_length < SSL_MAX_SSL_SESSION_ID_LENGTH) {
+ memcpy(tmpkey, sid_data, sid_length);
+ memset(tmpkey+sid_length, 0, SSL_MAX_SSL_SESSION_ID_LENGTH - sid_length);
+ sid_data = tmpkey;
+ }
+
+ shctx_wrlock(ssl_shctx);
+
+ /* lookup for session */
+ sh_ssl_sess = sh_ssl_sess_tree_lookup(sid_data);
+ if (sh_ssl_sess) {
+ /* free session */
+ sh_ssl_sess_tree_delete(sh_ssl_sess);
+ }
+
+ /* unlock cache */
+ shctx_wrunlock(ssl_shctx);
+}
+
+/* Set session cache mode to server and disable openssl internal cache.
+ * Set shared cache callbacks on an ssl context.
+ * Shared context MUST be firstly initialized */
+void ssl_set_shctx(SSL_CTX *ctx)
+{
+ SSL_CTX_set_session_id_context(ctx, (const unsigned char *)SHCTX_APPNAME, strlen(SHCTX_APPNAME));
+
+ if (!ssl_shctx) {
+ SSL_CTX_set_session_cache_mode(ctx, SSL_SESS_CACHE_OFF);
+ return;
+ }
+
+ SSL_CTX_set_session_cache_mode(ctx, SSL_SESS_CACHE_SERVER |
+ SSL_SESS_CACHE_NO_INTERNAL |
+ SSL_SESS_CACHE_NO_AUTO_CLEAR);
+
+ /* Set callbacks */
+ SSL_CTX_sess_set_new_cb(ctx, sh_ssl_sess_new_cb);
+ SSL_CTX_sess_set_get_cb(ctx, sh_ssl_sess_get_cb);
+ SSL_CTX_sess_set_remove_cb(ctx, sh_ssl_sess_remove_cb);
+}
+
+/*
+ * https://developer.mozilla.org/en-US/docs/Mozilla/Projects/NSS/Key_Log_Format
+ *
+ * The format is:
+ * * <Label> <space> <ClientRandom> <space> <Secret>
+ * We only need to copy the secret as there is a sample fetch for the ClientRandom
+ */
+
+#ifdef HAVE_SSL_KEYLOG
+void SSL_CTX_keylog(const SSL *ssl, const char *line)
+{
+ struct ssl_keylog *keylog;
+ char *lastarg = NULL;
+ char *dst = NULL;
+
+#ifdef USE_QUIC_OPENSSL_COMPAT
+ quic_tls_compat_keylog_callback(ssl, line);
+#endif
+ keylog = SSL_get_ex_data(ssl, ssl_keylog_index);
+ if (!keylog)
+ return;
+
+ lastarg = strrchr(line, ' ');
+ if (lastarg == NULL || ++lastarg == NULL)
+ return;
+
+ dst = pool_alloc(pool_head_ssl_keylog_str);
+ if (!dst)
+ return;
+
+ strncpy(dst, lastarg, SSL_KEYLOG_MAX_SECRET_SIZE-1);
+ dst[SSL_KEYLOG_MAX_SECRET_SIZE-1] = '\0';
+
+ if (strncmp(line, "CLIENT_RANDOM ", strlen("CLIENT RANDOM ")) == 0) {
+ if (keylog->client_random)
+ goto error;
+ keylog->client_random = dst;
+
+ } else if (strncmp(line, "CLIENT_EARLY_TRAFFIC_SECRET ", strlen("CLIENT_EARLY_TRAFFIC_SECRET ")) == 0) {
+ if (keylog->client_early_traffic_secret)
+ goto error;
+ keylog->client_early_traffic_secret = dst;
+
+ } else if (strncmp(line, "CLIENT_HANDSHAKE_TRAFFIC_SECRET ", strlen("CLIENT_HANDSHAKE_TRAFFIC_SECRET ")) == 0) {
+ if(keylog->client_handshake_traffic_secret)
+ goto error;
+ keylog->client_handshake_traffic_secret = dst;
+
+ } else if (strncmp(line, "SERVER_HANDSHAKE_TRAFFIC_SECRET ", strlen("SERVER_HANDSHAKE_TRAFFIC_SECRET ")) == 0) {
+ if (keylog->server_handshake_traffic_secret)
+ goto error;
+ keylog->server_handshake_traffic_secret = dst;
+
+ } else if (strncmp(line, "CLIENT_TRAFFIC_SECRET_0 ", strlen("CLIENT_TRAFFIC_SECRET_0 ")) == 0) {
+ if (keylog->client_traffic_secret_0)
+ goto error;
+ keylog->client_traffic_secret_0 = dst;
+
+ } else if (strncmp(line, "SERVER_TRAFFIC_SECRET_0 ", strlen("SERVER_TRAFFIC_SECRET_0 ")) == 0) {
+ if (keylog->server_traffic_secret_0)
+ goto error;
+ keylog->server_traffic_secret_0 = dst;
+
+ } else if (strncmp(line, "EARLY_EXPORTER_SECRET ", strlen("EARLY_EXPORTER_SECRET ")) == 0) {
+ if (keylog->early_exporter_secret)
+ goto error;
+ keylog->early_exporter_secret = dst;
+
+ } else if (strncmp(line, "EXPORTER_SECRET ", strlen("EXPORTER_SECRET ")) == 0) {
+ if (keylog->exporter_secret)
+ goto error;
+ keylog->exporter_secret = dst;
+ } else {
+ goto error;
+ }
+
+ return;
+
+error:
+ pool_free(pool_head_ssl_keylog_str, dst);
+
+ return;
+}
+#endif
+
+/*
+ * This function applies the SSL configuration on a SSL_CTX
+ * It returns an error code and fills the <err> buffer
+ */
+static int ssl_sock_prepare_ctx(struct bind_conf *bind_conf, struct ssl_bind_conf *ssl_conf, SSL_CTX *ctx, char **err)
+{
+ struct proxy *curproxy = bind_conf->frontend;
+ int cfgerr = 0;
+ int verify = SSL_VERIFY_NONE;
+ struct ssl_bind_conf __maybe_unused *ssl_conf_cur;
+ const char *conf_ciphers;
+#ifdef HAVE_SSL_CTX_SET_CIPHERSUITES
+ const char *conf_ciphersuites;
+#endif
+ const char *conf_curves = NULL;
+ X509_STORE *store = SSL_CTX_get_cert_store(ctx);
+#if defined(SSL_CTX_set1_sigalgs_list)
+ const char *conf_sigalgs = NULL;
+#endif
+#if defined(SSL_CTX_set1_client_sigalgs_list)
+ const char *conf_client_sigalgs = NULL;
+#endif
+
+ if (ssl_conf) {
+ struct tls_version_filter *conf_ssl_methods = &ssl_conf->ssl_methods;
+ int i, min, max;
+ int flags = MC_SSL_O_ALL;
+
+ /* Real min and max should be determinate with configuration and openssl's capabilities */
+ min = conf_ssl_methods->min ? conf_ssl_methods->min : bind_conf->ssl_conf.ssl_methods.min;
+ max = conf_ssl_methods->max ? conf_ssl_methods->max : bind_conf->ssl_conf.ssl_methods.max;
+ if (min)
+ flags |= (methodVersions[min].flag - 1);
+ if (max)
+ flags |= ~((methodVersions[max].flag << 1) - 1);
+ min = max = CONF_TLSV_NONE;
+ for (i = CONF_TLSV_MIN; i <= CONF_TLSV_MAX; i++)
+ if (methodVersions[i].option && !(flags & methodVersions[i].flag)) {
+ if (min)
+ max = i;
+ else
+ min = max = i;
+ }
+ /* save real min/max */
+ conf_ssl_methods->min = min;
+ conf_ssl_methods->max = max;
+ if (!min) {
+ memprintf(err, "%sProxy '%s': all SSL/TLS versions are disabled for bind '%s' at [%s:%d].\n",
+ err && *err ? *err : "", bind_conf->frontend->id, bind_conf->arg, bind_conf->file, bind_conf->line);
+ cfgerr |= ERR_ALERT | ERR_FATAL;
+ }
+ }
+
+ switch ((ssl_conf && ssl_conf->verify) ? ssl_conf->verify : bind_conf->ssl_conf.verify) {
+ case SSL_SOCK_VERIFY_NONE:
+ verify = SSL_VERIFY_NONE;
+ break;
+ case SSL_SOCK_VERIFY_OPTIONAL:
+ verify = SSL_VERIFY_PEER;
+ break;
+ case SSL_SOCK_VERIFY_REQUIRED:
+ verify = SSL_VERIFY_PEER|SSL_VERIFY_FAIL_IF_NO_PEER_CERT;
+ break;
+ }
+ SSL_CTX_set_verify(ctx, verify, ssl_sock_bind_verifycbk);
+ if (verify & SSL_VERIFY_PEER) {
+ char *ca_file = (ssl_conf && ssl_conf->ca_file) ? ssl_conf->ca_file : bind_conf->ssl_conf.ca_file;
+ char *ca_verify_file = (ssl_conf && ssl_conf->ca_verify_file) ? ssl_conf->ca_verify_file : bind_conf->ssl_conf.ca_verify_file;
+ char *crl_file = (ssl_conf && ssl_conf->crl_file) ? ssl_conf->crl_file : bind_conf->ssl_conf.crl_file;
+ if (ca_file || ca_verify_file) {
+ /* set CAfile to verify */
+ if (ca_file && !ssl_set_verify_locations_file(ctx, ca_file)) {
+ memprintf(err, "%sProxy '%s': unable to set CA file '%s' for bind '%s' at [%s:%d].\n",
+ err && *err ? *err : "", curproxy->id, ca_file, bind_conf->arg, bind_conf->file, bind_conf->line);
+ cfgerr |= ERR_ALERT | ERR_FATAL;
+ }
+ if (ca_verify_file && !ssl_set_verify_locations_file(ctx, ca_verify_file)) {
+ memprintf(err, "%sProxy '%s': unable to set CA-no-names file '%s' for bind '%s' at [%s:%d].\n",
+ err && *err ? *err : "", curproxy->id, ca_verify_file, bind_conf->arg, bind_conf->file, bind_conf->line);
+ cfgerr |= ERR_ALERT | ERR_FATAL;
+ }
+ if (ca_file && !((ssl_conf && ssl_conf->no_ca_names) || bind_conf->ssl_conf.no_ca_names)) {
+ /* set CA names for client cert request, function returns void */
+ SSL_CTX_set_client_CA_list(ctx, SSL_dup_CA_list(ssl_get_client_ca_file(ca_file)));
+ }
+#ifdef USE_OPENSSL_WOLFSSL
+ /* WolfSSL activates CRL checks by default so we need to disable it */
+ X509_STORE_set_flags(store, 0) ;
+#endif
+ }
+ else {
+ memprintf(err, "%sProxy '%s': verify is enabled but no CA file specified for bind '%s' at [%s:%d].\n",
+ err && *err ? *err : "", curproxy->id, bind_conf->arg, bind_conf->file, bind_conf->line);
+ cfgerr |= ERR_ALERT | ERR_FATAL;
+ }
+#ifdef X509_V_FLAG_CRL_CHECK
+ if (crl_file) {
+
+ if (!ssl_set_cert_crl_file(store, crl_file)) {
+ memprintf(err, "%sProxy '%s': unable to configure CRL file '%s' for bind '%s' at [%s:%d].\n",
+ err && *err ? *err : "", curproxy->id, crl_file, bind_conf->arg, bind_conf->file, bind_conf->line);
+ cfgerr |= ERR_ALERT | ERR_FATAL;
+ }
+ else {
+ X509_STORE_set_flags(store, X509_V_FLAG_CRL_CHECK|X509_V_FLAG_CRL_CHECK_ALL);
+ }
+ }
+#endif
+ ERR_clear_error();
+ }
+#if (defined SSL_CTRL_SET_TLSEXT_TICKET_KEY_CB && TLS_TICKETS_NO > 0)
+ if(bind_conf->keys_ref) {
+ if (!SSL_CTX_set_tlsext_ticket_key_evp_cb(ctx, ssl_tlsext_ticket_key_cb)) {
+ memprintf(err, "%sProxy '%s': unable to set callback for TLS ticket validation for bind '%s' at [%s:%d].\n",
+ err && *err ? *err : "", curproxy->id, bind_conf->arg, bind_conf->file, bind_conf->line);
+ cfgerr |= ERR_ALERT | ERR_FATAL;
+ }
+ }
+#endif
+
+ ssl_set_shctx(ctx);
+ conf_ciphers = (ssl_conf && ssl_conf->ciphers) ? ssl_conf->ciphers : bind_conf->ssl_conf.ciphers;
+ if (conf_ciphers &&
+ !SSL_CTX_set_cipher_list(ctx, conf_ciphers)) {
+ memprintf(err, "%sProxy '%s': unable to set SSL cipher list to '%s' for bind '%s' at [%s:%d].\n",
+ err && *err ? *err : "", curproxy->id, conf_ciphers, bind_conf->arg, bind_conf->file, bind_conf->line);
+ cfgerr |= ERR_ALERT | ERR_FATAL;
+ }
+
+#ifdef HAVE_SSL_CTX_SET_CIPHERSUITES
+ conf_ciphersuites = (ssl_conf && ssl_conf->ciphersuites) ? ssl_conf->ciphersuites : bind_conf->ssl_conf.ciphersuites;
+ if (conf_ciphersuites &&
+ !SSL_CTX_set_ciphersuites(ctx, conf_ciphersuites)) {
+ memprintf(err, "%sProxy '%s': unable to set TLS 1.3 cipher suites to '%s' for bind '%s' at [%s:%d].\n",
+ err && *err ? *err : "", curproxy->id, conf_ciphersuites, bind_conf->arg, bind_conf->file, bind_conf->line);
+ cfgerr |= ERR_ALERT | ERR_FATAL;
+ }
+#endif
+
+#ifndef OPENSSL_NO_DH
+ if (!local_dh_1024)
+ local_dh_1024 = ssl_get_dh_1024();
+ if (!local_dh_2048)
+ local_dh_2048 = ssl_get_dh_2048();
+ if (!local_dh_4096)
+ local_dh_4096 = ssl_get_dh_4096();
+#endif /* OPENSSL_NO_DH */
+
+ SSL_CTX_set_info_callback(ctx, ssl_sock_infocbk);
+#ifdef SSL_CTRL_SET_MSG_CALLBACK
+ SSL_CTX_set_msg_callback(ctx, ssl_sock_msgcbk);
+#endif
+#ifdef HAVE_SSL_KEYLOG
+ /* only activate the keylog callback if it was required to prevent performance loss */
+ if (global_ssl.keylog > 0)
+ SSL_CTX_set_keylog_callback(ctx, SSL_CTX_keylog);
+#endif
+
+#if defined(OPENSSL_NPN_NEGOTIATED) && !defined(OPENSSL_NO_NEXTPROTONEG)
+ ssl_conf_cur = NULL;
+ if (ssl_conf && ssl_conf->npn_str)
+ ssl_conf_cur = ssl_conf;
+ else if (bind_conf->ssl_conf.npn_str)
+ ssl_conf_cur = &bind_conf->ssl_conf;
+ if (ssl_conf_cur)
+ SSL_CTX_set_next_protos_advertised_cb(ctx, ssl_sock_advertise_npn_protos, ssl_conf_cur);
+#endif
+#ifdef TLSEXT_TYPE_application_layer_protocol_negotiation
+ ssl_conf_cur = NULL;
+ if (ssl_conf && ssl_conf->alpn_str)
+ ssl_conf_cur = ssl_conf;
+ else if (bind_conf->ssl_conf.alpn_str)
+ ssl_conf_cur = &bind_conf->ssl_conf;
+ if (ssl_conf_cur && ssl_conf_cur->alpn_len)
+ SSL_CTX_set_alpn_select_cb(ctx, ssl_sock_advertise_alpn_protos, ssl_conf_cur);
+#endif
+#if defined(SSL_CTX_set1_curves_list)
+ conf_curves = (ssl_conf && ssl_conf->curves) ? ssl_conf->curves : bind_conf->ssl_conf.curves;
+ if (conf_curves) {
+ if (!SSL_CTX_set1_curves_list(ctx, conf_curves)) {
+ memprintf(err, "%sProxy '%s': unable to set SSL curves list to '%s' for bind '%s' at [%s:%d].\n",
+ err && *err ? *err : "", curproxy->id, conf_curves, bind_conf->arg, bind_conf->file, bind_conf->line);
+ cfgerr |= ERR_ALERT | ERR_FATAL;
+ }
+ (void)SSL_CTX_set_ecdh_auto(ctx, 1);
+ }
+#endif /* defined(SSL_CTX_set1_curves_list) */
+
+ if (!conf_curves) {
+#if (HA_OPENSSL_VERSION_NUMBER >= 0x10101000L)
+#if defined(SSL_CTX_set1_curves_list)
+ const char *ecdhe = (ssl_conf && ssl_conf->ecdhe) ? ssl_conf->ecdhe :
+ (bind_conf->ssl_conf.ecdhe ? bind_conf->ssl_conf.ecdhe :
+ NULL);
+
+ if (ecdhe && SSL_CTX_set1_curves_list(ctx, ecdhe) == 0) {
+ memprintf(err, "%sProxy '%s': unable to set elliptic named curve to '%s' for bind '%s' at [%s:%d].\n",
+ err && *err ? *err : "", curproxy->id, ecdhe, bind_conf->arg, bind_conf->file, bind_conf->line);
+ cfgerr |= ERR_ALERT | ERR_FATAL;
+ }
+#endif /* defined(SSL_CTX_set1_curves_list) */
+#else
+#if defined(SSL_CTX_set_tmp_ecdh) && !defined(OPENSSL_NO_ECDH)
+ int i;
+ EC_KEY *ecdh;
+
+ const char *ecdhe = (ssl_conf && ssl_conf->ecdhe) ? ssl_conf->ecdhe :
+ (bind_conf->ssl_conf.ecdhe ? bind_conf->ssl_conf.ecdhe :
+ ECDHE_DEFAULT_CURVE);
+
+ i = OBJ_sn2nid(ecdhe);
+ if (!i || ((ecdh = EC_KEY_new_by_curve_name(i)) == NULL)) {
+ memprintf(err, "%sProxy '%s': unable to set elliptic named curve to '%s' for bind '%s' at [%s:%d].\n",
+ err && *err ? *err : "", curproxy->id, ecdhe, bind_conf->arg, bind_conf->file, bind_conf->line);
+ cfgerr |= ERR_ALERT | ERR_FATAL;
+ }
+ else {
+ SSL_CTX_set_tmp_ecdh(ctx, ecdh);
+ EC_KEY_free(ecdh);
+ }
+#endif /* defined(SSL_CTX_set_tmp_ecdh) && !defined(OPENSSL_NO_ECDH) */
+#endif /* HA_OPENSSL_VERSION_NUMBER >= 0x10101000L */
+ }
+
+#if defined(SSL_CTX_set1_sigalgs_list)
+ conf_sigalgs = (ssl_conf && ssl_conf->sigalgs) ? ssl_conf->sigalgs : bind_conf->ssl_conf.sigalgs;
+ if (conf_sigalgs) {
+ if (!SSL_CTX_set1_sigalgs_list(ctx, conf_sigalgs)) {
+ memprintf(err, "%sProxy '%s': unable to set SSL Signature Algorithm list to '%s' for bind '%s' at [%s:%d].\n",
+ err && *err ? *err : "", curproxy->id, conf_sigalgs, bind_conf->arg, bind_conf->file, bind_conf->line);
+ cfgerr |= ERR_ALERT | ERR_FATAL;
+ }
+ }
+#endif
+
+#if defined(SSL_CTX_set1_client_sigalgs_list)
+ conf_client_sigalgs = (ssl_conf && ssl_conf->client_sigalgs) ? ssl_conf->client_sigalgs : bind_conf->ssl_conf.client_sigalgs;
+ if (conf_client_sigalgs) {
+ if (!SSL_CTX_set1_client_sigalgs_list(ctx, conf_client_sigalgs)) {
+ memprintf(err, "%sProxy '%s': unable to set SSL Signature Algorithm list to '%s' for bind '%s' at [%s:%d].\n",
+ err && *err ? *err : "", curproxy->id, conf_client_sigalgs, bind_conf->arg, bind_conf->file, bind_conf->line);
+ cfgerr |= ERR_ALERT | ERR_FATAL;
+ }
+ }
+#endif
+
+#ifdef USE_QUIC_OPENSSL_COMPAT
+ if (!quic_tls_compat_init(bind_conf, ctx))
+ cfgerr |= ERR_ALERT | ERR_FATAL;
+#endif
+
+ return cfgerr;
+}
+
+
+/*
+ * Prepare the SSL_CTX based on the bind line configuration.
+ * Since the CA file loading is made depending on the verify option of the bind
+ * line, the link between the SSL_CTX and the CA file tree entry is made here.
+ * If we want to create a link between the CA file entry and the corresponding
+ * ckch instance (for CA file hot update), it needs to be done after
+ * ssl_sock_prepare_ctx.
+ * Returns 0 in case of success.
+ */
+int ssl_sock_prep_ctx_and_inst(struct bind_conf *bind_conf, struct ssl_bind_conf *ssl_conf,
+ SSL_CTX *ctx, struct ckch_inst *ckch_inst, char **err)
+{
+ int errcode = 0;
+
+ errcode |= ssl_sock_prepare_ctx(bind_conf, ssl_conf, ctx, err);
+ if (!errcode && ckch_inst)
+ ckch_inst_add_cafile_link(ckch_inst, bind_conf, ssl_conf, NULL);
+
+ return errcode;
+}
+
+static int ssl_sock_srv_hostcheck(const char *pattern, const char *hostname)
+{
+ const char *pattern_wildcard, *pattern_left_label_end, *hostname_left_label_end;
+ size_t prefixlen, suffixlen;
+
+ /* Trivial case */
+ if (strcasecmp(pattern, hostname) == 0)
+ return 1;
+
+ /* The rest of this logic is based on RFC 6125, section 6.4.3
+ * (http://tools.ietf.org/html/rfc6125#section-6.4.3) */
+
+ pattern_wildcard = NULL;
+ pattern_left_label_end = pattern;
+ while (*pattern_left_label_end != '.') {
+ switch (*pattern_left_label_end) {
+ case 0:
+ /* End of label not found */
+ return 0;
+ case '*':
+ /* If there is more than one wildcards */
+ if (pattern_wildcard)
+ return 0;
+ pattern_wildcard = pattern_left_label_end;
+ break;
+ }
+ pattern_left_label_end++;
+ }
+
+ /* If it's not trivial and there is no wildcard, it can't
+ * match */
+ if (!pattern_wildcard)
+ return 0;
+
+ /* Make sure all labels match except the leftmost */
+ hostname_left_label_end = strchr(hostname, '.');
+ if (!hostname_left_label_end
+ || strcasecmp(pattern_left_label_end, hostname_left_label_end) != 0)
+ return 0;
+
+ /* Make sure the leftmost label of the hostname is long enough
+ * that the wildcard can match */
+ if (hostname_left_label_end - hostname < (pattern_left_label_end - pattern) - 1)
+ return 0;
+
+ /* Finally compare the string on either side of the
+ * wildcard */
+ prefixlen = pattern_wildcard - pattern;
+ suffixlen = pattern_left_label_end - (pattern_wildcard + 1);
+ if ((prefixlen && (strncasecmp(pattern, hostname, prefixlen) != 0))
+ || (suffixlen && (strncasecmp(pattern_wildcard + 1, hostname_left_label_end - suffixlen, suffixlen) != 0)))
+ return 0;
+
+ return 1;
+}
+
+static int ssl_sock_srv_verifycbk(int ok, X509_STORE_CTX *ctx)
+{
+ SSL *ssl;
+ struct connection *conn;
+ struct ssl_sock_ctx *ssl_ctx;
+ const char *servername;
+ const char *sni;
+
+ int depth;
+ X509 *cert;
+ STACK_OF(GENERAL_NAME) *alt_names;
+ int i;
+ X509_NAME *cert_subject;
+ char *str;
+
+ if (ok == 0)
+ return ok;
+
+ ssl = X509_STORE_CTX_get_ex_data(ctx, SSL_get_ex_data_X509_STORE_CTX_idx());
+ conn = SSL_get_ex_data(ssl, ssl_app_data_index);
+ ssl_ctx = __conn_get_ssl_sock_ctx(conn);
+
+ /* We're checking if the provided hostnames match the desired one. The
+ * desired hostname comes from the SNI we presented if any, or if not
+ * provided then it may have been explicitly stated using a "verifyhost"
+ * directive. If neither is set, we don't care about the name so the
+ * verification is OK.
+ */
+ servername = SSL_get_servername(ssl_ctx->ssl, TLSEXT_NAMETYPE_host_name);
+ sni = servername;
+ if (!servername) {
+ servername = __objt_server(conn->target)->ssl_ctx.verify_host;
+ if (!servername)
+ return ok;
+ }
+
+ /* We only need to verify the CN on the actual server cert,
+ * not the indirect CAs */
+ depth = X509_STORE_CTX_get_error_depth(ctx);
+ if (depth != 0)
+ return ok;
+
+ /* At this point, the cert is *not* OK unless we can find a
+ * hostname match */
+ ok = 0;
+
+ cert = X509_STORE_CTX_get_current_cert(ctx);
+ /* It seems like this might happen if verify peer isn't set */
+ if (!cert)
+ return ok;
+
+ alt_names = X509_get_ext_d2i(cert, NID_subject_alt_name, NULL, NULL);
+ if (alt_names) {
+ for (i = 0; !ok && i < sk_GENERAL_NAME_num(alt_names); i++) {
+ GENERAL_NAME *name = sk_GENERAL_NAME_value(alt_names, i);
+ if (name->type == GEN_DNS) {
+#if HA_OPENSSL_VERSION_NUMBER < 0x00907000L
+ if (ASN1_STRING_to_UTF8((unsigned char **)&str, name->d.ia5) >= 0) {
+#else
+ if (ASN1_STRING_to_UTF8((unsigned char **)&str, name->d.dNSName) >= 0) {
+#endif
+ ok = ssl_sock_srv_hostcheck(str, servername);
+ OPENSSL_free(str);
+ }
+ }
+ }
+ sk_GENERAL_NAME_pop_free(alt_names, GENERAL_NAME_free);
+ }
+
+ cert_subject = X509_get_subject_name(cert);
+ i = -1;
+ while (!ok && (i = X509_NAME_get_index_by_NID(cert_subject, NID_commonName, i)) != -1) {
+ X509_NAME_ENTRY *entry = X509_NAME_get_entry(cert_subject, i);
+ ASN1_STRING *value;
+ value = X509_NAME_ENTRY_get_data(entry);
+ if (ASN1_STRING_to_UTF8((unsigned char **)&str, value) >= 0) {
+ ok = ssl_sock_srv_hostcheck(str, servername);
+ OPENSSL_free(str);
+ }
+ }
+
+ /* report the mismatch and indicate if SNI was used or not */
+ if (!ok && !conn->err_code)
+ conn->err_code = sni ? CO_ER_SSL_MISMATCH_SNI : CO_ER_SSL_MISMATCH;
+ return ok;
+}
+
+/* prepare ssl context from servers options. Returns an error count */
+int ssl_sock_prepare_srv_ctx(struct server *srv)
+{
+ int cfgerr = 0;
+ SSL_CTX *ctx;
+ /* Automatic memory computations need to know we use SSL there
+ * If this is an internal proxy, don't use it for the computation */
+ if (!(srv->proxy->cap & PR_CAP_INT))
+ global.ssl_used_backend = 1;
+
+ /* Initiate SSL context for current server */
+ if (!srv->ssl_ctx.reused_sess) {
+ if ((srv->ssl_ctx.reused_sess = calloc(1, global.nbthread*sizeof(*srv->ssl_ctx.reused_sess))) == NULL) {
+ ha_alert("out of memory.\n");
+ cfgerr++;
+ return cfgerr;
+ }
+ }
+ if (srv->use_ssl == 1)
+ srv->xprt = &ssl_sock;
+
+ if (srv->ssl_ctx.client_crt) {
+ const int create_if_none = srv->flags & SRV_F_DYNAMIC ? 0 : 1;
+ char *err = NULL;
+ int err_code = 0;
+
+ /* If there is a crt keyword there, the SSL_CTX will be created here. */
+ err_code = ssl_sock_load_srv_cert(srv->ssl_ctx.client_crt, srv, create_if_none, &err);
+ if (err_code != ERR_NONE) {
+ if ((err_code & ERR_WARN) && !(err_code & ERR_ALERT))
+ ha_warning("%s", err);
+ else
+ ha_alert("%s", err);
+
+ if (err_code & (ERR_FATAL|ERR_ABORT))
+ cfgerr++;
+ }
+ ha_free(&err);
+ }
+
+ ctx = srv->ssl_ctx.ctx;
+
+ /* The context will be uninitialized if there wasn't any "cert" option
+ * in the server line. */
+ if (!ctx) {
+ ctx = SSL_CTX_new(SSLv23_client_method());
+ if (!ctx) {
+ ha_alert("unable to allocate ssl context.\n");
+ cfgerr++;
+ return cfgerr;
+ }
+
+ srv->ssl_ctx.ctx = ctx;
+ }
+
+ cfgerr += ssl_sock_prep_srv_ctx_and_inst(srv, srv->ssl_ctx.ctx, srv->ssl_ctx.inst);
+
+ return cfgerr;
+}
+
+/* Initialize an SSL context that will be used on the backend side.
+ * Returns an error count.
+ */
+static int ssl_sock_prepare_srv_ssl_ctx(const struct server *srv, SSL_CTX *ctx)
+{
+ struct proxy *curproxy = srv->proxy;
+ int cfgerr = 0;
+ long options =
+ SSL_OP_ALL | /* all known workarounds for bugs */
+ SSL_OP_NO_SSLv2 |
+ SSL_OP_NO_COMPRESSION;
+ long mode =
+ SSL_MODE_ENABLE_PARTIAL_WRITE |
+ SSL_MODE_ACCEPT_MOVING_WRITE_BUFFER |
+ SSL_MODE_RELEASE_BUFFERS |
+ SSL_MODE_SMALL_BUFFERS;
+ int verify = SSL_VERIFY_NONE;
+ const struct tls_version_filter *conf_ssl_methods = &srv->ssl_ctx.methods;
+ int i, min, max, hole;
+ int flags = MC_SSL_O_ALL;
+#if defined(SSL_CTX_set1_sigalgs_list)
+ const char *conf_sigalgs = NULL;
+#endif
+#if defined(SSL_CTX_set1_client_sigalgs_list)
+ const char *conf_client_sigalgs = NULL;
+#endif
+#if defined(SSL_CTX_set1_curves_list)
+ const char *conf_curves = NULL;
+#endif
+
+ if (conf_ssl_methods->flags && (conf_ssl_methods->min || conf_ssl_methods->max))
+ ha_warning("no-sslv3/no-tlsv1x are ignored for this server. "
+ "Use only 'ssl-min-ver' and 'ssl-max-ver' to fix.\n");
+ else
+ flags = conf_ssl_methods->flags;
+
+ /* Real min and max should be determinate with configuration and openssl's capabilities */
+ if (conf_ssl_methods->min)
+ flags |= (methodVersions[conf_ssl_methods->min].flag - 1);
+ if (conf_ssl_methods->max)
+ flags |= ~((methodVersions[conf_ssl_methods->max].flag << 1) - 1);
+
+ /* find min, max and holes */
+ min = max = CONF_TLSV_NONE;
+ hole = 0;
+ for (i = CONF_TLSV_MIN; i <= CONF_TLSV_MAX; i++)
+ /* version is in openssl && version not disable in configuration */
+ if (methodVersions[i].option && !(flags & methodVersions[i].flag)) {
+ if (min) {
+ if (hole) {
+ ha_warning("%s '%s': SSL/TLS versions range not contiguous for server '%s'. "
+ "Hole find for %s. Use only 'ssl-min-ver' and 'ssl-max-ver' to fix.\n",
+ proxy_type_str(curproxy), curproxy->id, srv->id,
+ methodVersions[hole].name);
+ hole = 0;
+ }
+ max = i;
+ }
+ else {
+ min = max = i;
+ }
+ }
+ else {
+ if (min)
+ hole = i;
+ }
+ if (!min) {
+ ha_alert("%s '%s': all SSL/TLS versions are disabled for server '%s'.\n",
+ proxy_type_str(curproxy), curproxy->id, srv->id);
+ cfgerr += 1;
+ }
+
+#if (HA_OPENSSL_VERSION_NUMBER < 0x1010000fL)
+ /* Keep force-xxx implementation as it is in older haproxy. It's a
+ precautionary measure to avoid any surprise with older openssl version. */
+ if (min == max)
+ methodVersions[min].ctx_set_version(ctx, SET_CLIENT);
+ else
+ for (i = CONF_TLSV_MIN; i <= CONF_TLSV_MAX; i++)
+ if (flags & methodVersions[i].flag)
+ options |= methodVersions[i].option;
+#else /* openssl >= 1.1.0 */
+ /* set the max_version is required to cap TLS version or activate new TLS (v1.3) */
+ methodVersions[min].ctx_set_version(ctx, SET_MIN);
+ methodVersions[max].ctx_set_version(ctx, SET_MAX);
+#endif
+
+ if (srv->ssl_ctx.options & SRV_SSL_O_NO_TLS_TICKETS)
+ options |= SSL_OP_NO_TICKET;
+ SSL_CTX_set_options(ctx, options);
+
+#ifdef SSL_MODE_ASYNC
+ if (global_ssl.async)
+ mode |= SSL_MODE_ASYNC;
+#endif
+ SSL_CTX_set_mode(ctx, mode);
+
+ if (global.ssl_server_verify == SSL_SERVER_VERIFY_REQUIRED)
+ verify = SSL_VERIFY_PEER;
+ switch (srv->ssl_ctx.verify) {
+ case SSL_SOCK_VERIFY_NONE:
+ verify = SSL_VERIFY_NONE;
+ break;
+ case SSL_SOCK_VERIFY_REQUIRED:
+ verify = SSL_VERIFY_PEER;
+ break;
+ }
+ SSL_CTX_set_verify(ctx, verify,
+ (srv->ssl_ctx.verify_host || (verify & SSL_VERIFY_PEER)) ? ssl_sock_srv_verifycbk : NULL);
+ if (verify & SSL_VERIFY_PEER) {
+ if (srv->ssl_ctx.ca_file) {
+ /* set CAfile to verify */
+ if (!ssl_set_verify_locations_file(ctx, srv->ssl_ctx.ca_file)) {
+ ha_alert("unable to set CA file '%s'.\n",
+ srv->ssl_ctx.ca_file);
+ cfgerr++;
+ }
+ }
+ else {
+ if (global.ssl_server_verify == SSL_SERVER_VERIFY_REQUIRED)
+ ha_alert("verify is enabled by default but no CA file specified. If you're running on a LAN where you're certain to trust the server's certificate, please set an explicit 'verify none' statement on the 'server' line, or use 'ssl-server-verify none' in the global section to disable server-side verifications by default.\n");
+ else
+ ha_alert("verify is enabled but no CA file specified.\n");
+ cfgerr++;
+ }
+#ifdef X509_V_FLAG_CRL_CHECK
+ if (srv->ssl_ctx.crl_file) {
+ X509_STORE *store = SSL_CTX_get_cert_store(ctx);
+
+ if (!ssl_set_cert_crl_file(store, srv->ssl_ctx.crl_file)) {
+ ha_alert("unable to configure CRL file '%s'.\n",
+ srv->ssl_ctx.crl_file);
+ cfgerr++;
+ }
+ else {
+ X509_STORE_set_flags(store, X509_V_FLAG_CRL_CHECK|X509_V_FLAG_CRL_CHECK_ALL);
+ }
+ }
+#endif
+ }
+
+ SSL_CTX_set_session_cache_mode(ctx, SSL_SESS_CACHE_CLIENT | SSL_SESS_CACHE_NO_INTERNAL_STORE);
+ SSL_CTX_sess_set_new_cb(ctx, ssl_sess_new_srv_cb);
+ if (srv->ssl_ctx.ciphers &&
+ !SSL_CTX_set_cipher_list(ctx, srv->ssl_ctx.ciphers)) {
+ ha_alert("unable to set SSL cipher list to '%s'.\n",
+ srv->ssl_ctx.ciphers);
+ cfgerr++;
+ }
+
+#ifdef HAVE_SSL_CTX_SET_CIPHERSUITES
+ if (srv->ssl_ctx.ciphersuites &&
+ !SSL_CTX_set_ciphersuites(ctx, srv->ssl_ctx.ciphersuites)) {
+ ha_alert("unable to set TLS 1.3 cipher suites to '%s'.\n",
+ srv->ssl_ctx.ciphersuites);
+ cfgerr++;
+ }
+#endif
+#if defined(OPENSSL_NPN_NEGOTIATED) && !defined(OPENSSL_NO_NEXTPROTONEG)
+ if (srv->ssl_ctx.npn_str)
+ SSL_CTX_set_next_proto_select_cb(ctx, ssl_sock_srv_select_protos, (struct server*)srv);
+#endif
+#ifdef TLSEXT_TYPE_application_layer_protocol_negotiation
+ if (srv->ssl_ctx.alpn_str && srv->ssl_ctx.alpn_len)
+ SSL_CTX_set_alpn_protos(ctx, (unsigned char *)srv->ssl_ctx.alpn_str, srv->ssl_ctx.alpn_len);
+#endif
+
+#if defined(SSL_CTX_set1_sigalgs_list)
+ conf_sigalgs = srv->ssl_ctx.sigalgs;
+ if (conf_sigalgs) {
+ if (!SSL_CTX_set1_sigalgs_list(ctx, conf_sigalgs)) {
+ ha_alert("Proxy '%s': unable to set SSL Signature Algorithm list to '%s' for server '%s'.\n",
+ curproxy->id, conf_sigalgs, srv->id);
+ cfgerr++;
+ }
+ }
+#endif
+#if defined(SSL_CTX_set1_client_sigalgs_list)
+ conf_client_sigalgs = srv->ssl_ctx.client_sigalgs;
+ if (conf_client_sigalgs) {
+ if (!SSL_CTX_set1_client_sigalgs_list(ctx, conf_client_sigalgs)) {
+ ha_alert("Proxy '%s': unable to set SSL Client Signature Algorithm list to '%s' for server '%s'.\n",
+ curproxy->id, conf_client_sigalgs, srv->id);
+ cfgerr++;
+ }
+ }
+#endif
+
+#if defined(SSL_CTX_set1_curves_list)
+ conf_curves = srv->ssl_ctx.curves;
+ if (conf_curves) {
+ if (!SSL_CTX_set1_curves_list(ctx, conf_curves)) {
+ ha_alert("Proxy '%s': unable to set SSL curves list to '%s' for server '%s'.\n",
+ curproxy->id, conf_curves, srv->id);
+ cfgerr++;
+ }
+ }
+#endif /* defined(SSL_CTX_set1_curves_list) */
+
+ return cfgerr;
+}
+
+/*
+ * Prepare the frontend's SSL_CTX based on the server line configuration.
+ * Since the CA file loading is made depending on the verify option of the
+ * server line, the link between the SSL_CTX and the CA file tree entry is
+ * made here.
+ * If we want to create a link between the CA file entry and the corresponding
+ * ckch instance (for CA file hot update), it needs to be done after
+ * ssl_sock_prepare_srv_ssl_ctx.
+ * Returns an error count.
+ */
+int ssl_sock_prep_srv_ctx_and_inst(const struct server *srv, SSL_CTX *ctx,
+ struct ckch_inst *ckch_inst)
+{
+ int cfgerr = 0;
+
+ cfgerr += ssl_sock_prepare_srv_ssl_ctx(srv, ctx);
+ if (!cfgerr && ckch_inst)
+ ckch_inst_add_cafile_link(ckch_inst, NULL, NULL, srv);
+
+ return cfgerr;
+}
+
+
+/*
+ * Create an initial CTX used to start the SSL connections.
+ * May be used by QUIC xprt which makes usage of SSL sessions initialized from SSL_CTXs.
+ * Returns 0 if succeeded, or something >0 if not.
+ */
+#ifdef USE_QUIC
+static int ssl_initial_ctx(struct bind_conf *bind_conf)
+{
+ if (bind_conf->xprt == xprt_get(XPRT_QUIC))
+ return ssl_quic_initial_ctx(bind_conf);
+ else
+ return ssl_sock_initial_ctx(bind_conf);
+}
+#else
+static int ssl_initial_ctx(struct bind_conf *bind_conf)
+{
+ return ssl_sock_initial_ctx(bind_conf);
+}
+#endif
+
+/* Walks down the two trees in bind_conf and prepares all certs. The pointer may
+ * be NULL, in which case nothing is done. Returns the number of errors
+ * encountered.
+ */
+int ssl_sock_prepare_all_ctx(struct bind_conf *bind_conf)
+{
+ struct ebmb_node *node;
+ struct sni_ctx *sni;
+ int err = 0;
+ int errcode = 0;
+ char *errmsg = NULL;
+
+ /* Automatic memory computations need to know we use SSL there */
+ global.ssl_used_frontend = 1;
+
+ /* Create initial_ctx used to start the ssl connection before do switchctx */
+ if (!bind_conf->initial_ctx) {
+ err += ssl_initial_ctx(bind_conf);
+ /* It should not be necessary to call this function, but it's
+ necessary first to check and move all initialisation related
+ to initial_ctx in ssl_initial_ctx. */
+ errcode |= ssl_sock_prep_ctx_and_inst(bind_conf, NULL, bind_conf->initial_ctx, NULL, &errmsg);
+ }
+ if (bind_conf->default_ctx) {
+ errcode |= ssl_sock_prep_ctx_and_inst(bind_conf, bind_conf->default_ssl_conf, bind_conf->default_ctx, bind_conf->default_inst, &errmsg);
+ }
+
+ node = ebmb_first(&bind_conf->sni_ctx);
+ while (node) {
+ sni = ebmb_entry(node, struct sni_ctx, name);
+ if (!sni->order && sni->ctx != bind_conf->default_ctx) {
+ /* only initialize the CTX on its first occurrence and
+ if it is not the default_ctx */
+ errcode |= ssl_sock_prep_ctx_and_inst(bind_conf, sni->conf, sni->ctx, sni->ckch_inst, &errmsg);
+ }
+ node = ebmb_next(node);
+ }
+
+ node = ebmb_first(&bind_conf->sni_w_ctx);
+ while (node) {
+ sni = ebmb_entry(node, struct sni_ctx, name);
+ if (!sni->order && sni->ctx != bind_conf->default_ctx) {
+ /* only initialize the CTX on its first occurrence and
+ if it is not the default_ctx */
+ errcode |= ssl_sock_prep_ctx_and_inst(bind_conf, sni->conf, sni->ctx, sni->ckch_inst, &errmsg);
+ }
+ node = ebmb_next(node);
+ }
+
+ if (errcode & ERR_WARN) {
+ ha_warning("%s", errmsg);
+ } else if (errcode & ERR_CODE) {
+ ha_alert("%s", errmsg);
+ err++;
+ }
+
+ free(errmsg);
+ return err;
+}
+
+/* Prepares all the contexts for a bind_conf and allocates the shared SSL
+ * context if needed. Returns < 0 on error, 0 on success. The warnings and
+ * alerts are directly emitted since the rest of the stack does it below.
+ */
+int ssl_sock_prepare_bind_conf(struct bind_conf *bind_conf)
+{
+ struct proxy *px = bind_conf->frontend;
+ int alloc_ctx;
+ int err;
+
+ if (!(bind_conf->options & BC_O_USE_SSL)) {
+ if (bind_conf->default_ctx) {
+ ha_warning("Proxy '%s': A certificate was specified but SSL was not enabled on bind '%s' at [%s:%d] (use 'ssl').\n",
+ px->id, bind_conf->arg, bind_conf->file, bind_conf->line);
+ }
+ return 0;
+ }
+ if (!bind_conf->default_ctx) {
+ if (bind_conf->strict_sni && !(bind_conf->options & BC_O_GENERATE_CERTS)) {
+ ha_warning("Proxy '%s': no SSL certificate specified for bind '%s' at [%s:%d], ssl connections will fail (use 'crt').\n",
+ px->id, bind_conf->arg, bind_conf->file, bind_conf->line);
+ }
+ else {
+ ha_alert("Proxy '%s': no SSL certificate specified for bind '%s' at [%s:%d] (use 'crt').\n",
+ px->id, bind_conf->arg, bind_conf->file, bind_conf->line);
+ return -1;
+ }
+ }
+ if (!ssl_shctx && global.tune.sslcachesize) {
+ alloc_ctx = shctx_init(&ssl_shctx, global.tune.sslcachesize,
+ sizeof(struct sh_ssl_sess_hdr) + SHSESS_BLOCK_MIN_SIZE, -1,
+ sizeof(*sh_ssl_sess_tree));
+ if (alloc_ctx <= 0) {
+ if (alloc_ctx == SHCTX_E_INIT_LOCK)
+ ha_alert("Unable to initialize the lock for the shared SSL session cache. You can retry using the global statement 'tune.ssl.force-private-cache' but it could increase CPU usage due to renegotiations if nbproc > 1.\n");
+ else
+ ha_alert("Unable to allocate SSL session cache.\n");
+ return -1;
+ }
+ /* free block callback */
+ ssl_shctx->free_block = sh_ssl_sess_free_blocks;
+ /* init the root tree within the extra space */
+ sh_ssl_sess_tree = (void *)ssl_shctx + sizeof(struct shared_context);
+ *sh_ssl_sess_tree = EB_ROOT_UNIQUE;
+ }
+ err = 0;
+ /* initialize all certificate contexts */
+ err += ssl_sock_prepare_all_ctx(bind_conf);
+
+ /* initialize CA variables if the certificates generation is enabled */
+ err += ssl_sock_load_ca(bind_conf);
+
+ return -err;
+}
+
+/* release ssl context allocated for servers. Most of the field free here
+ * must also be allocated in srv_ssl_settings_cpy() */
+void ssl_sock_free_srv_ctx(struct server *srv)
+{
+#ifdef TLSEXT_TYPE_application_layer_protocol_negotiation
+ ha_free(&srv->ssl_ctx.alpn_str);
+#endif
+#ifdef OPENSSL_NPN_NEGOTIATED
+ ha_free(&srv->ssl_ctx.npn_str);
+#endif
+ if (srv->ssl_ctx.reused_sess) {
+ int i;
+
+ for (i = 0; i < global.nbthread; i++) {
+ ha_free(&srv->ssl_ctx.reused_sess[i].ptr);
+ ha_free(&srv->ssl_ctx.reused_sess[i].sni);
+ }
+ ha_free(&srv->ssl_ctx.reused_sess);
+ }
+
+ if (srv->ssl_ctx.ctx) {
+ SSL_CTX_free(srv->ssl_ctx.ctx);
+ srv->ssl_ctx.ctx = NULL;
+ }
+
+ ha_free(&srv->ssl_ctx.ca_file);
+ ha_free(&srv->ssl_ctx.crl_file);
+ ha_free(&srv->ssl_ctx.client_crt);
+ ha_free(&srv->ssl_ctx.verify_host);
+#ifdef SSL_CTRL_SET_TLSEXT_HOSTNAME
+ ha_free(&srv->sni_expr);
+ release_sample_expr(srv->ssl_ctx.sni);
+ srv->ssl_ctx.sni = NULL;
+#endif
+ ha_free(&srv->ssl_ctx.ciphers);
+#ifdef HAVE_SSL_CTX_SET_CIPHERSUITES
+ ha_free(&srv->ssl_ctx.ciphersuites);
+#endif
+ /* If there is a certificate we must unlink the ckch instance */
+ ckch_inst_free(srv->ssl_ctx.inst);
+}
+
+/* Walks down the two trees in bind_conf and frees all the certs. The pointer may
+ * be NULL, in which case nothing is done. The default_ctx is nullified too.
+ */
+void ssl_sock_free_all_ctx(struct bind_conf *bind_conf)
+{
+ struct ebmb_node *node, *back;
+ struct sni_ctx *sni;
+
+ node = ebmb_first(&bind_conf->sni_ctx);
+ while (node) {
+ sni = ebmb_entry(node, struct sni_ctx, name);
+ back = ebmb_next(node);
+ ebmb_delete(node);
+ SSL_CTX_free(sni->ctx);
+ LIST_DELETE(&sni->by_ckch_inst);
+ free(sni);
+ node = back;
+ }
+
+ node = ebmb_first(&bind_conf->sni_w_ctx);
+ while (node) {
+ sni = ebmb_entry(node, struct sni_ctx, name);
+ back = ebmb_next(node);
+ ebmb_delete(node);
+ SSL_CTX_free(sni->ctx);
+ LIST_DELETE(&sni->by_ckch_inst);
+ free(sni);
+ node = back;
+ }
+
+ SSL_CTX_free(bind_conf->initial_ctx);
+ bind_conf->initial_ctx = NULL;
+ SSL_CTX_free(bind_conf->default_ctx);
+ bind_conf->default_ctx = NULL;
+ bind_conf->default_inst = NULL;
+ bind_conf->default_ssl_conf = NULL;
+}
+
+
+void ssl_sock_deinit()
+{
+ crtlist_deinit(); /* must be free'd before the ckchs */
+ ckch_deinit();
+}
+REGISTER_POST_DEINIT(ssl_sock_deinit);
+
+/* Destroys all the contexts for a bind_conf. This is used during deinit(). */
+void ssl_sock_destroy_bind_conf(struct bind_conf *bind_conf)
+{
+ ssl_sock_free_ca(bind_conf);
+ ssl_sock_free_all_ctx(bind_conf);
+ ssl_sock_free_ssl_conf(&bind_conf->ssl_conf);
+ free(bind_conf->ca_sign_file);
+ free(bind_conf->ca_sign_pass);
+ if (bind_conf->keys_ref && !--bind_conf->keys_ref->refcount) {
+ free(bind_conf->keys_ref->filename);
+ free(bind_conf->keys_ref->tlskeys);
+ LIST_DELETE(&bind_conf->keys_ref->list);
+ free(bind_conf->keys_ref);
+ }
+ bind_conf->keys_ref = NULL;
+ bind_conf->ca_sign_pass = NULL;
+ bind_conf->ca_sign_file = NULL;
+}
+
+/* Load CA cert file and private key used to generate certificates */
+int
+ssl_sock_load_ca(struct bind_conf *bind_conf)
+{
+ struct proxy *px = bind_conf->frontend;
+ struct ckch_data *data = NULL;
+ int ret = 0;
+ char *err = NULL;
+
+ if (!(bind_conf->options & BC_O_GENERATE_CERTS))
+ return ret;
+
+#if (defined SSL_CTRL_SET_TLSEXT_HOSTNAME && !defined SSL_NO_GENERATE_CERTIFICATES)
+ if (global_ssl.ctx_cache) {
+ ssl_ctx_lru_tree = lru64_new(global_ssl.ctx_cache);
+ }
+ ssl_ctx_lru_seed = (unsigned int)time(NULL);
+ ssl_ctx_serial = now_ms;
+#endif
+
+ if (!bind_conf->ca_sign_file) {
+ ha_alert("Proxy '%s': cannot enable certificate generation, "
+ "no CA certificate File configured at [%s:%d].\n",
+ px->id, bind_conf->file, bind_conf->line);
+ goto failed;
+ }
+
+ /* Allocate cert structure */
+ data = calloc(1, sizeof(*data));
+ if (!data) {
+ ha_alert("Proxy '%s': Failed to read CA certificate file '%s' at [%s:%d]. Chain allocation failure\n",
+ px->id, bind_conf->ca_sign_file, bind_conf->file, bind_conf->line);
+ goto failed;
+ }
+
+ /* Try to parse file */
+ if (ssl_sock_load_files_into_ckch(bind_conf->ca_sign_file, data, &err)) {
+ ha_alert("Proxy '%s': Failed to read CA certificate file '%s' at [%s:%d]. Chain loading failed: %s\n",
+ px->id, bind_conf->ca_sign_file, bind_conf->file, bind_conf->line, err);
+ free(err);
+ goto failed;
+ }
+
+ /* Fail if missing cert or pkey */
+ if ((!data->cert) || (!data->key)) {
+ ha_alert("Proxy '%s': Failed to read CA certificate file '%s' at [%s:%d]. Chain missing certificate or private key\n",
+ px->id, bind_conf->ca_sign_file, bind_conf->file, bind_conf->line);
+ goto failed;
+ }
+
+ /* Final assignment to bind */
+ bind_conf->ca_sign_ckch = data;
+ return ret;
+
+ failed:
+ if (data) {
+ ssl_sock_free_cert_key_and_chain_contents(data);
+ free(data);
+ }
+
+ bind_conf->options &= ~BC_O_GENERATE_CERTS;
+ ret++;
+ return ret;
+}
+
+/* Release CA cert and private key used to generate certificated */
+void
+ssl_sock_free_ca(struct bind_conf *bind_conf)
+{
+ if (bind_conf->ca_sign_ckch) {
+ ssl_sock_free_cert_key_and_chain_contents(bind_conf->ca_sign_ckch);
+ ha_free(&bind_conf->ca_sign_ckch);
+ }
+}
+
+/*
+ * Try to allocate the BIO and SSL session objects of <conn> connection with <bio> and
+ * <ssl> as addresses, <bio_meth> as BIO method and <ssl_ctx> as SSL context inherited settings.
+ * Connect the allocated BIO to the allocated SSL session. Also set <ctx> as address of custom
+ * data for the BIO and store <conn> as user data of the SSL session object.
+ * This is the responsibility of the caller to check the validity of all the pointers passed
+ * as parameters to this function.
+ * Return 0 if succeeded, -1 if not. If failed, sets the ->err_code member of <conn> to
+ * CO_ER_SSL_NO_MEM.
+ */
+int ssl_bio_and_sess_init(struct connection *conn, SSL_CTX *ssl_ctx,
+ SSL **ssl, BIO **bio, BIO_METHOD *bio_meth, void *ctx)
+{
+ int retry = 1;
+
+ retry:
+ /* Alloc a new SSL session. */
+ *ssl = SSL_new(ssl_ctx);
+ if (!*ssl) {
+ if (!retry--)
+ goto err;
+
+ pool_gc(NULL);
+ goto retry;
+ }
+
+ *bio = BIO_new(bio_meth);
+ if (!*bio) {
+ SSL_free(*ssl);
+ *ssl = NULL;
+ if (!retry--)
+ goto err;
+
+ pool_gc(NULL);
+ goto retry;
+ }
+
+ BIO_set_data(*bio, ctx);
+ SSL_set_bio(*ssl, *bio, *bio);
+
+ /* set connection pointer. */
+ if (!SSL_set_ex_data(*ssl, ssl_app_data_index, conn)) {
+ SSL_free(*ssl);
+ *ssl = NULL;
+ if (!retry--)
+ goto err;
+
+ pool_gc(NULL);
+ goto retry;
+ }
+
+ return 0;
+
+ err:
+ conn->err_code = CO_ER_SSL_NO_MEM;
+ return -1;
+}
+
+/* This function is called when all the XPRT have been initialized. We can
+ * now attempt to start the SSL handshake.
+ */
+static int ssl_sock_start(struct connection *conn, void *xprt_ctx)
+{
+ struct ssl_sock_ctx *ctx = xprt_ctx;
+
+ if (ctx->xprt->start) {
+ int ret;
+
+ ret = ctx->xprt->start(conn, ctx->xprt_ctx);
+ if (ret < 0)
+ return ret;
+ }
+ tasklet_wakeup(ctx->wait_event.tasklet);
+
+ return 0;
+}
+
+/* Similar to increment_actconn() but for SSL connections. */
+int increment_sslconn()
+{
+ unsigned int count, next_sslconn;
+
+ do {
+ count = global.sslconns;
+ if (global.maxsslconn && count >= global.maxsslconn) {
+ /* maxconn reached */
+ next_sslconn = 0;
+ goto end;
+ }
+
+ /* try to increment sslconns */
+ next_sslconn = count + 1;
+ } while (!_HA_ATOMIC_CAS(&global.sslconns, &count, next_sslconn) && __ha_cpu_relax());
+
+ end:
+ return next_sslconn;
+}
+
+/*
+ * This function is called if SSL * context is not yet allocated. The function
+ * is designed to be called before any other data-layer operation and sets the
+ * handshake flag on the connection. It is safe to call it multiple times.
+ * It returns 0 on success and -1 in error case.
+ */
+static int ssl_sock_init(struct connection *conn, void **xprt_ctx)
+{
+ struct ssl_sock_ctx *ctx;
+ int next_sslconn = 0;
+
+ /* already initialized */
+ if (*xprt_ctx)
+ return 0;
+
+ ctx = pool_alloc(ssl_sock_ctx_pool);
+ if (!ctx) {
+ conn->err_code = CO_ER_SSL_NO_MEM;
+ return -1;
+ }
+ ctx->wait_event.tasklet = tasklet_new();
+ if (!ctx->wait_event.tasklet) {
+ conn->err_code = CO_ER_SSL_NO_MEM;
+ pool_free(ssl_sock_ctx_pool, ctx);
+ return -1;
+ }
+ ctx->wait_event.tasklet->process = ssl_sock_io_cb;
+ ctx->wait_event.tasklet->context = ctx;
+ ctx->wait_event.tasklet->state |= TASK_HEAVY; // assign it to the bulk queue during handshake
+ ctx->wait_event.events = 0;
+ ctx->sent_early_data = 0;
+ ctx->early_buf = BUF_NULL;
+ ctx->conn = conn;
+ ctx->subs = NULL;
+ ctx->xprt_st = 0;
+ ctx->xprt_ctx = NULL;
+ ctx->error_code = 0;
+
+ next_sslconn = increment_sslconn();
+ if (!next_sslconn) {
+ conn->err_code = CO_ER_SSL_TOO_MANY;
+ goto err;
+ }
+
+ /* Only work with sockets for now, this should be adapted when we'll
+ * add QUIC support.
+ */
+ ctx->xprt = xprt_get(XPRT_RAW);
+ if (ctx->xprt->init) {
+ if (ctx->xprt->init(conn, &ctx->xprt_ctx) != 0)
+ goto err;
+ }
+
+ /* If it is in client mode initiate SSL session
+ in connect state otherwise accept state */
+ if (objt_server(conn->target)) {
+ struct server *srv = __objt_server(conn->target);
+
+ if (ssl_bio_and_sess_init(conn, srv->ssl_ctx.ctx,
+ &ctx->ssl, &ctx->bio, ha_meth, ctx) == -1)
+ goto err;
+
+ SSL_set_connect_state(ctx->ssl);
+ HA_RWLOCK_RDLOCK(SSL_SERVER_LOCK, &srv->ssl_ctx.lock);
+ if (srv->ssl_ctx.reused_sess[tid].ptr) {
+ /* let's recreate a session from (ptr,size) and assign
+ * it to ctx->ssl. Its refcount will be updated by the
+ * creation and by the assignment, so after assigning
+ * it or failing to, we must always free it to decrement
+ * the refcount.
+ */
+ const unsigned char *ptr = srv->ssl_ctx.reused_sess[tid].ptr;
+ SSL_SESSION *sess = d2i_SSL_SESSION(NULL, &ptr, srv->ssl_ctx.reused_sess[tid].size);
+
+ if (sess && !SSL_set_session(ctx->ssl, sess)) {
+ uint old_tid = HA_ATOMIC_LOAD(&srv->ssl_ctx.last_ssl_sess_tid); // 0=none, >0 = tid + 1
+ if (old_tid == tid + 1)
+ HA_ATOMIC_CAS(&srv->ssl_ctx.last_ssl_sess_tid, &old_tid, 0); // no more valid
+ SSL_SESSION_free(sess);
+ HA_RWLOCK_WRLOCK(SSL_SERVER_LOCK, &srv->ssl_ctx.reused_sess[tid].sess_lock);
+ ha_free(&srv->ssl_ctx.reused_sess[tid].ptr);
+ HA_RWLOCK_WRTORD(SSL_SERVER_LOCK, &srv->ssl_ctx.reused_sess[tid].sess_lock);
+ if (srv->ssl_ctx.reused_sess[tid].sni)
+ SSL_set_tlsext_host_name(ctx->ssl, srv->ssl_ctx.reused_sess[tid].sni);
+ HA_RWLOCK_RDUNLOCK(SSL_SERVER_LOCK, &srv->ssl_ctx.reused_sess[tid].sess_lock);
+ } else if (sess) {
+ /* already assigned, not needed anymore */
+ SSL_SESSION_free(sess);
+ HA_RWLOCK_RDLOCK(SSL_SERVER_LOCK, &srv->ssl_ctx.reused_sess[tid].sess_lock);
+ if (srv->ssl_ctx.reused_sess[tid].sni)
+ SSL_set_tlsext_host_name(ctx->ssl, srv->ssl_ctx.reused_sess[tid].sni);
+ HA_RWLOCK_RDUNLOCK(SSL_SERVER_LOCK, &srv->ssl_ctx.reused_sess[tid].sess_lock);
+ }
+ } else {
+ /* No session available yet, let's see if we can pick one
+ * from another thread. If old_tid is non-null, it designates
+ * the index of a recently updated thread that might still have
+ * a usable session. All threads are collectively responsible
+ * for resetting the index if it fails.
+ */
+ const unsigned char *ptr;
+ SSL_SESSION *sess;
+ uint old_tid = HA_ATOMIC_LOAD(&srv->ssl_ctx.last_ssl_sess_tid); // 0=none, >0 = tid + 1
+
+ if (old_tid) {
+ HA_RWLOCK_RDLOCK(SSL_SERVER_LOCK, &srv->ssl_ctx.reused_sess[old_tid-1].sess_lock);
+
+ ptr = srv->ssl_ctx.reused_sess[old_tid-1].ptr;
+ if (ptr) {
+ sess = d2i_SSL_SESSION(NULL, &ptr, srv->ssl_ctx.reused_sess[old_tid-1].size);
+ if (sess) {
+ if (!SSL_set_session(ctx->ssl, sess))
+ HA_ATOMIC_CAS(&srv->ssl_ctx.last_ssl_sess_tid, &old_tid, 0); // no more valid
+ SSL_SESSION_free(sess);
+ }
+ }
+
+ if (srv->ssl_ctx.reused_sess[old_tid-1].sni)
+ SSL_set_tlsext_host_name(ctx->ssl, srv->ssl_ctx.reused_sess[old_tid-1].sni);
+
+ HA_RWLOCK_RDUNLOCK(SSL_SERVER_LOCK, &srv->ssl_ctx.reused_sess[old_tid-1].sess_lock);
+ }
+ }
+ HA_RWLOCK_RDUNLOCK(SSL_SERVER_LOCK, &srv->ssl_ctx.lock);
+
+ /* leave init state and start handshake */
+ conn->flags |= CO_FL_SSL_WAIT_HS | CO_FL_WAIT_L6_CONN;
+
+ _HA_ATOMIC_INC(&global.totalsslconns);
+ *xprt_ctx = ctx;
+ return 0;
+ }
+ else if (objt_listener(conn->target)) {
+ struct bind_conf *bc = __objt_listener(conn->target)->bind_conf;
+
+ if (ssl_bio_and_sess_init(conn, bc->initial_ctx,
+ &ctx->ssl, &ctx->bio, ha_meth, ctx) == -1)
+ goto err;
+
+#ifdef SSL_READ_EARLY_DATA_SUCCESS
+ if (bc->ssl_conf.early_data) {
+ b_alloc(&ctx->early_buf);
+ SSL_set_max_early_data(ctx->ssl,
+ /* Only allow early data if we managed to allocate
+ * a buffer.
+ */
+ (!b_is_null(&ctx->early_buf)) ?
+ global.tune.bufsize - global.tune.maxrewrite : 0);
+ }
+#endif
+
+ SSL_set_accept_state(ctx->ssl);
+
+ /* leave init state and start handshake */
+ conn->flags |= CO_FL_SSL_WAIT_HS | CO_FL_WAIT_L6_CONN;
+#ifdef SSL_READ_EARLY_DATA_SUCCESS
+ if (bc->ssl_conf.early_data)
+ conn->flags |= CO_FL_EARLY_SSL_HS;
+#endif
+
+ _HA_ATOMIC_INC(&global.totalsslconns);
+ *xprt_ctx = ctx;
+ return 0;
+ }
+ /* don't know how to handle such a target */
+ conn->err_code = CO_ER_SSL_NO_TARGET;
+err:
+ if (next_sslconn)
+ _HA_ATOMIC_DEC(&global.sslconns);
+ if (ctx && ctx->wait_event.tasklet)
+ tasklet_free(ctx->wait_event.tasklet);
+ pool_free(ssl_sock_ctx_pool, ctx);
+ return -1;
+}
+
+
+/* This is the callback which is used when an SSL handshake is pending. It
+ * updates the FD status if it wants some polling before being called again.
+ * It returns 0 if it fails in a fatal way or needs to poll to go further,
+ * otherwise it returns non-zero and removes itself from the connection's
+ * flags (the bit is provided in <flag> by the caller).
+ */
+static int ssl_sock_handshake(struct connection *conn, unsigned int flag)
+{
+ struct ssl_sock_ctx *ctx = conn_get_ssl_sock_ctx(conn);
+ int ret;
+ struct ssl_counters *counters = NULL;
+ struct ssl_counters *counters_px = NULL;
+ struct listener *li;
+ struct server *srv;
+ socklen_t lskerr;
+ int skerr;
+
+
+ if (!conn_ctrl_ready(conn))
+ return 0;
+
+ /* get counters */
+ switch (obj_type(conn->target)) {
+ case OBJ_TYPE_LISTENER:
+ li = __objt_listener(conn->target);
+ counters = EXTRA_COUNTERS_GET(li->extra_counters, &ssl_stats_module);
+ counters_px = EXTRA_COUNTERS_GET(li->bind_conf->frontend->extra_counters_fe,
+ &ssl_stats_module);
+ break;
+
+ case OBJ_TYPE_SERVER:
+ srv = __objt_server(conn->target);
+ counters = EXTRA_COUNTERS_GET(srv->extra_counters, &ssl_stats_module);
+ counters_px = EXTRA_COUNTERS_GET(srv->proxy->extra_counters_be,
+ &ssl_stats_module);
+ break;
+
+ default:
+ break;
+ }
+
+ if (!ctx)
+ goto out_error;
+
+ /* don't start calculating a handshake on a dead connection */
+ if (conn->flags & (CO_FL_ERROR | CO_FL_SOCK_RD_SH | CO_FL_SOCK_WR_SH))
+ goto out_error;
+
+ /* FIXME/WT: for now we don't have a clear way to inspect the connection
+ * status from the lower layers, so let's check the FD directly. Ideally
+ * the xprt layers should provide some status indicating their knowledge
+ * of shutdowns or error.
+ */
+ BUG_ON(conn->flags & CO_FL_FDLESS);
+
+ skerr = 0;
+ lskerr = sizeof(skerr);
+ if ((getsockopt(conn->handle.fd, SOL_SOCKET, SO_ERROR, &skerr, &lskerr) < 0) ||
+ skerr != 0)
+ goto out_error;
+
+#ifdef SSL_READ_EARLY_DATA_SUCCESS
+ /*
+ * Check if we have early data. If we do, we have to read them
+ * before SSL_do_handshake() is called, And there's no way to
+ * detect early data, except to try to read them
+ */
+ if (conn->flags & CO_FL_EARLY_SSL_HS) {
+ size_t read_data = 0;
+
+ while (1) {
+ ret = SSL_read_early_data(ctx->ssl,
+ b_tail(&ctx->early_buf), b_room(&ctx->early_buf),
+ &read_data);
+ if (ret == SSL_READ_EARLY_DATA_ERROR)
+ goto check_error;
+ if (read_data > 0) {
+ conn->flags |= CO_FL_EARLY_DATA;
+ b_add(&ctx->early_buf, read_data);
+ }
+ if (ret == SSL_READ_EARLY_DATA_FINISH) {
+ conn->flags &= ~CO_FL_EARLY_SSL_HS;
+ if (!b_data(&ctx->early_buf))
+ b_free(&ctx->early_buf);
+ break;
+ }
+ }
+ }
+#endif
+ /* If we use SSL_do_handshake to process a reneg initiated by
+ * the remote peer, it sometimes returns SSL_ERROR_SSL.
+ * Usually SSL_write and SSL_read are used and process implicitly
+ * the reneg handshake.
+ * Here we use SSL_peek as a workaround for reneg.
+ */
+ if (!(conn->flags & CO_FL_WAIT_L6_CONN) && SSL_renegotiate_pending(ctx->ssl)) {
+ char c;
+
+ ret = SSL_peek(ctx->ssl, &c, 1);
+ if (ret <= 0) {
+ /* handshake may have not been completed, let's find why */
+ ret = SSL_get_error(ctx->ssl, ret);
+
+ if (ret == SSL_ERROR_WANT_WRITE) {
+ /* SSL handshake needs to write, L4 connection may not be ready */
+ if (!(ctx->wait_event.events & SUB_RETRY_SEND))
+ ctx->xprt->subscribe(conn, ctx->xprt_ctx, SUB_RETRY_SEND, &ctx->wait_event);
+ return 0;
+ }
+ else if (ret == SSL_ERROR_WANT_READ) {
+ /* handshake may have been completed but we have
+ * no more data to read.
+ */
+ if (!SSL_renegotiate_pending(ctx->ssl)) {
+ ret = 1;
+ goto reneg_ok;
+ }
+ /* SSL handshake needs to read, L4 connection is ready */
+ if (!(ctx->wait_event.events & SUB_RETRY_RECV))
+ ctx->xprt->subscribe(conn, ctx->xprt_ctx, SUB_RETRY_RECV, &ctx->wait_event);
+ return 0;
+ }
+#ifdef SSL_MODE_ASYNC
+ else if (ret == SSL_ERROR_WANT_ASYNC) {
+ ssl_async_process_fds(ctx);
+ return 0;
+ }
+#endif
+ else if (ret == SSL_ERROR_SYSCALL) {
+ /* if errno is null, then connection was successfully established */
+ if (!errno && conn->flags & CO_FL_WAIT_L4_CONN)
+ conn->flags &= ~CO_FL_WAIT_L4_CONN;
+ if (!conn->err_code) {
+#if defined(OPENSSL_IS_BORINGSSL) || defined(LIBRESSL_VERSION_NUMBER)
+ /* do not handle empty handshakes in BoringSSL or LibreSSL */
+ conn->err_code = CO_ER_SSL_HANDSHAKE;
+#else
+ int empty_handshake;
+#if (HA_OPENSSL_VERSION_NUMBER >= 0x1010000fL)
+ /* use SSL_get_state() in OpenSSL >= 1.1.0; SSL_state() is broken */
+ OSSL_HANDSHAKE_STATE state = SSL_get_state((SSL *)ctx->ssl);
+ empty_handshake = state == TLS_ST_BEFORE;
+#else
+ /* access packet_length directly in OpenSSL <= 1.0.2; SSL_state() is broken */
+ empty_handshake = !ctx->ssl->packet_length;
+#endif
+ if (empty_handshake) {
+ if (!errno) {
+ if (ctx->xprt_st & SSL_SOCK_RECV_HEARTBEAT)
+ conn->err_code = CO_ER_SSL_HANDSHAKE_HB;
+ else
+ conn->err_code = CO_ER_SSL_EMPTY;
+ }
+ else {
+ if (ctx->xprt_st & SSL_SOCK_RECV_HEARTBEAT)
+ conn->err_code = CO_ER_SSL_HANDSHAKE_HB;
+ else
+ conn->err_code = CO_ER_SSL_ABORT;
+ }
+ }
+ else {
+ if (ctx->xprt_st & SSL_SOCK_RECV_HEARTBEAT)
+ conn->err_code = CO_ER_SSL_HANDSHAKE_HB;
+ else
+ conn->err_code = CO_ER_SSL_HANDSHAKE;
+ }
+#endif /* BoringSSL or LibreSSL */
+ }
+ goto out_error;
+ }
+ else {
+ /* Fail on all other handshake errors */
+ /* Note: OpenSSL may leave unread bytes in the socket's
+ * buffer, causing an RST to be emitted upon close() on
+ * TCP sockets. We first try to drain possibly pending
+ * data to avoid this as much as possible.
+ */
+ conn_ctrl_drain(conn);
+ if (!conn->err_code)
+ conn->err_code = (ctx->xprt_st & SSL_SOCK_RECV_HEARTBEAT) ?
+ CO_ER_SSL_KILLED_HB : CO_ER_SSL_HANDSHAKE;
+ goto out_error;
+ }
+ }
+ /* read some data: consider handshake completed */
+ goto reneg_ok;
+ }
+ ret = SSL_do_handshake(ctx->ssl);
+check_error:
+ if (ret != 1) {
+ /* handshake did not complete, let's find why */
+ ret = SSL_get_error(ctx->ssl, ret);
+
+ if (!ctx->error_code)
+ ctx->error_code = ERR_peek_error();
+
+ if (ret == SSL_ERROR_WANT_WRITE) {
+ /* SSL handshake needs to write, L4 connection may not be ready */
+ if (!(ctx->wait_event.events & SUB_RETRY_SEND))
+ ctx->xprt->subscribe(conn, ctx->xprt_ctx, SUB_RETRY_SEND, &ctx->wait_event);
+ return 0;
+ }
+ else if (ret == SSL_ERROR_WANT_READ) {
+ /* SSL handshake needs to read, L4 connection is ready */
+ if (!(ctx->wait_event.events & SUB_RETRY_RECV))
+ ctx->xprt->subscribe(conn, ctx->xprt_ctx,
+ SUB_RETRY_RECV, &ctx->wait_event);
+ return 0;
+ }
+#ifdef SSL_MODE_ASYNC
+ else if (ret == SSL_ERROR_WANT_ASYNC) {
+ ssl_async_process_fds(ctx);
+ return 0;
+ }
+#endif
+ else if (ret == SSL_ERROR_SYSCALL) {
+ /* if errno is null, then connection was successfully established */
+ if (!errno && conn->flags & CO_FL_WAIT_L4_CONN)
+ conn->flags &= ~CO_FL_WAIT_L4_CONN;
+ if (!conn->err_code) {
+#if defined(OPENSSL_IS_BORINGSSL) || defined(LIBRESSL_VERSION_NUMBER)
+ /* do not handle empty handshakes in BoringSSL or LibreSSL */
+ conn->err_code = CO_ER_SSL_HANDSHAKE;
+#else
+ int empty_handshake;
+#if (HA_OPENSSL_VERSION_NUMBER >= 0x1010000fL)
+ /* use SSL_get_state() in OpenSSL >= 1.1.0; SSL_state() is broken */
+ OSSL_HANDSHAKE_STATE state = SSL_get_state(ctx->ssl);
+ empty_handshake = state == TLS_ST_BEFORE;
+#else
+ /* access packet_length directly in OpenSSL <= 1.0.2; SSL_state() is broken */
+ empty_handshake = !ctx->ssl->packet_length;
+#endif
+ if (empty_handshake) {
+ if (!errno) {
+ if (ctx->xprt_st & SSL_SOCK_RECV_HEARTBEAT)
+ conn->err_code = CO_ER_SSL_HANDSHAKE_HB;
+ else
+ conn->err_code = CO_ER_SSL_EMPTY;
+ }
+ else {
+ if (ctx->xprt_st & SSL_SOCK_RECV_HEARTBEAT)
+ conn->err_code = CO_ER_SSL_HANDSHAKE_HB;
+ else
+ conn->err_code = CO_ER_SSL_ABORT;
+ }
+ }
+ else {
+ if (ctx->xprt_st & SSL_SOCK_RECV_HEARTBEAT)
+ conn->err_code = CO_ER_SSL_HANDSHAKE_HB;
+ else
+ conn->err_code = CO_ER_SSL_HANDSHAKE;
+ }
+#endif /* BoringSSL or LibreSSL */
+ }
+ goto out_error;
+
+ } else if (ret == SSL_ERROR_ZERO_RETURN) {
+ /* The peer has closed the SSL session for writing by
+ * sending a close_notify alert */
+ conn_ctrl_drain(conn);
+ conn->err_code = CO_ER_SSL_EMPTY;
+ goto out_error;
+
+ }
+ else {
+ /* Fail on all other handshake errors */
+ /* Note: OpenSSL may leave unread bytes in the socket's
+ * buffer, causing an RST to be emitted upon close() on
+ * TCP sockets. We first try to drain possibly pending
+ * data to avoid this as much as possible.
+ */
+ conn_ctrl_drain(conn);
+ if (!conn->err_code)
+ conn->err_code = (ctx->xprt_st & SSL_SOCK_RECV_HEARTBEAT) ?
+ CO_ER_SSL_KILLED_HB : CO_ER_SSL_HANDSHAKE;
+ goto out_error;
+ }
+ }
+#ifdef SSL_READ_EARLY_DATA_SUCCESS
+ else {
+ /*
+ * If the server refused the early data, we have to send a
+ * 425 to the client, as we no longer have the data to sent
+ * them again.
+ */
+ if ((conn->flags & CO_FL_EARLY_DATA) && (objt_server(conn->target))) {
+ if (SSL_get_early_data_status(ctx->ssl) == SSL_EARLY_DATA_REJECTED) {
+ conn->err_code = CO_ER_SSL_EARLY_FAILED;
+ goto out_error;
+ }
+ }
+ }
+#endif
+
+
+reneg_ok:
+
+#ifdef SSL_MODE_ASYNC
+ /* ASYNC engine API doesn't support moving read/write
+ * buffers. So we disable ASYNC mode right after
+ * the handshake to avoid buffer overflow.
+ */
+ if (global_ssl.async)
+ SSL_clear_mode(ctx->ssl, SSL_MODE_ASYNC);
+#endif
+ /* Handshake succeeded */
+ if (!SSL_session_reused(ctx->ssl)) {
+ if (objt_server(conn->target)) {
+ update_freq_ctr(&global.ssl_be_keys_per_sec, 1);
+ if (global.ssl_be_keys_per_sec.curr_ctr > global.ssl_be_keys_max)
+ global.ssl_be_keys_max = global.ssl_be_keys_per_sec.curr_ctr;
+ }
+ else {
+ update_freq_ctr(&global.ssl_fe_keys_per_sec, 1);
+ if (global.ssl_fe_keys_per_sec.curr_ctr > global.ssl_fe_keys_max)
+ global.ssl_fe_keys_max = global.ssl_fe_keys_per_sec.curr_ctr;
+ }
+
+ if (counters) {
+ HA_ATOMIC_INC(&counters->sess);
+ HA_ATOMIC_INC(&counters_px->sess);
+ }
+ }
+ else if (counters) {
+ HA_ATOMIC_INC(&counters->reused_sess);
+ HA_ATOMIC_INC(&counters_px->reused_sess);
+ }
+
+ /* The connection is now established at both layers, it's time to leave */
+ conn->flags &= ~(flag | CO_FL_WAIT_L4_CONN | CO_FL_WAIT_L6_CONN);
+ return 1;
+
+ out_error:
+ /* Clear openssl global errors stack */
+ ssl_sock_dump_errors(conn, NULL);
+ ERR_clear_error();
+
+ /* free resumed session if exists */
+ if (objt_server(conn->target)) {
+ struct server *s = __objt_server(conn->target);
+ /* RWLOCK: only rdlock the SSL cache even when writing in it because there is
+ * one cache per thread, it only prevents to flush it from the CLI in
+ * another thread */
+
+ HA_RWLOCK_RDLOCK(SSL_SERVER_LOCK, &s->ssl_ctx.lock);
+ if (s->ssl_ctx.reused_sess[tid].ptr)
+ ha_free(&s->ssl_ctx.reused_sess[tid].ptr);
+ HA_RWLOCK_RDUNLOCK(SSL_SERVER_LOCK, &s->ssl_ctx.lock);
+ }
+
+ if (counters) {
+ HA_ATOMIC_INC(&counters->failed_handshake);
+ HA_ATOMIC_INC(&counters_px->failed_handshake);
+ }
+
+ /* Fail on all other handshake errors */
+ conn->flags |= CO_FL_ERROR;
+ if (!conn->err_code)
+ conn->err_code = CO_ER_SSL_HANDSHAKE;
+ return 0;
+}
+
+/* Called from the upper layer, to subscribe <es> to events <event_type>. The
+ * event subscriber <es> is not allowed to change from a previous call as long
+ * as at least one event is still subscribed. The <event_type> must only be a
+ * combination of SUB_RETRY_RECV and SUB_RETRY_SEND. It always returns 0,
+ * unless the transport layer was already released.
+ */
+static int ssl_subscribe(struct connection *conn, void *xprt_ctx, int event_type, struct wait_event *es)
+{
+ struct ssl_sock_ctx *ctx = xprt_ctx;
+
+ if (!ctx)
+ return -1;
+
+ BUG_ON(event_type & ~(SUB_RETRY_SEND|SUB_RETRY_RECV));
+ BUG_ON(ctx->subs && ctx->subs != es);
+
+ ctx->subs = es;
+ es->events |= event_type;
+
+ /* we may have to subscribe to lower layers for new events */
+ event_type &= ~ctx->wait_event.events;
+ if (event_type && !(conn->flags & CO_FL_SSL_WAIT_HS))
+ ctx->xprt->subscribe(conn, ctx->xprt_ctx, event_type, &ctx->wait_event);
+ return 0;
+}
+
+/* Called from the upper layer, to unsubscribe <es> from events <event_type>.
+ * The <es> pointer is not allowed to differ from the one passed to the
+ * subscribe() call. It always returns zero.
+ */
+static int ssl_unsubscribe(struct connection *conn, void *xprt_ctx, int event_type, struct wait_event *es)
+{
+ struct ssl_sock_ctx *ctx = xprt_ctx;
+
+ BUG_ON(event_type & ~(SUB_RETRY_SEND|SUB_RETRY_RECV));
+ BUG_ON(ctx->subs && ctx->subs != es);
+
+ es->events &= ~event_type;
+ if (!es->events)
+ ctx->subs = NULL;
+
+ /* If we subscribed, and we're not doing the handshake,
+ * then we subscribed because the upper layer asked for it,
+ * as the upper layer is no longer interested, we can
+ * unsubscribe too.
+ */
+ event_type &= ctx->wait_event.events;
+ if (event_type && !(ctx->conn->flags & CO_FL_SSL_WAIT_HS))
+ conn_unsubscribe(conn, ctx->xprt_ctx, event_type, &ctx->wait_event);
+
+ return 0;
+}
+
+/* The connection has been taken over, so destroy the old tasklet and create
+ * a new one. The original thread ID must be passed into orig_tid
+ * It should be called with the takeover lock for the old thread held.
+ * Returns 0 on success, and -1 on failure
+ */
+static int ssl_takeover(struct connection *conn, void *xprt_ctx, int orig_tid)
+{
+ struct ssl_sock_ctx *ctx = xprt_ctx;
+ struct tasklet *tl = tasklet_new();
+
+ if (!tl)
+ return -1;
+
+ ctx->wait_event.tasklet->context = NULL;
+ tasklet_wakeup_on(ctx->wait_event.tasklet, orig_tid);
+ ctx->wait_event.tasklet = tl;
+ ctx->wait_event.tasklet->process = ssl_sock_io_cb;
+ ctx->wait_event.tasklet->context = ctx;
+ return 0;
+}
+
+/* notify the next xprt that the connection is about to become idle and that it
+ * may be stolen at any time after the function returns and that any tasklet in
+ * the chain must be careful before dereferencing its context.
+ */
+static void ssl_set_idle(struct connection *conn, void *xprt_ctx)
+{
+ struct ssl_sock_ctx *ctx = xprt_ctx;
+
+ if (!ctx || !ctx->wait_event.tasklet)
+ return;
+
+ HA_ATOMIC_OR(&ctx->wait_event.tasklet->state, TASK_F_USR1);
+ if (ctx->xprt)
+ xprt_set_idle(conn, ctx->xprt, ctx->xprt_ctx);
+}
+
+/* notify the next xprt that the connection is not idle anymore and that it may
+ * not be stolen before the next xprt_set_idle().
+ */
+static void ssl_set_used(struct connection *conn, void *xprt_ctx)
+{
+ struct ssl_sock_ctx *ctx = xprt_ctx;
+
+ if (!ctx || !ctx->wait_event.tasklet)
+ return;
+
+ HA_ATOMIC_OR(&ctx->wait_event.tasklet->state, TASK_F_USR1);
+ if (ctx->xprt)
+ xprt_set_used(conn, ctx->xprt, ctx->xprt_ctx);
+}
+
+/* Use the provided XPRT as an underlying XPRT, and provide the old one.
+ * Returns 0 on success, and non-zero on failure.
+ */
+static int ssl_add_xprt(struct connection *conn, void *xprt_ctx, void *toadd_ctx, const struct xprt_ops *toadd_ops, void **oldxprt_ctx, const struct xprt_ops **oldxprt_ops)
+{
+ struct ssl_sock_ctx *ctx = xprt_ctx;
+
+ if (oldxprt_ops != NULL)
+ *oldxprt_ops = ctx->xprt;
+ if (oldxprt_ctx != NULL)
+ *oldxprt_ctx = ctx->xprt_ctx;
+ ctx->xprt = toadd_ops;
+ ctx->xprt_ctx = toadd_ctx;
+ return 0;
+}
+
+/* Remove the specified xprt. If if it our underlying XPRT, remove it and
+ * return 0, otherwise just call the remove_xprt method from the underlying
+ * XPRT.
+ */
+static int ssl_remove_xprt(struct connection *conn, void *xprt_ctx, void *toremove_ctx, const struct xprt_ops *newops, void *newctx)
+{
+ struct ssl_sock_ctx *ctx = xprt_ctx;
+
+ if (ctx->xprt_ctx == toremove_ctx) {
+ ctx->xprt_ctx = newctx;
+ ctx->xprt = newops;
+ return 0;
+ }
+ return (ctx->xprt->remove_xprt(conn, ctx->xprt_ctx, toremove_ctx, newops, newctx));
+}
+
+struct task *ssl_sock_io_cb(struct task *t, void *context, unsigned int state)
+{
+ struct tasklet *tl = (struct tasklet *)t;
+ struct ssl_sock_ctx *ctx = context;
+ struct connection *conn;
+ int conn_in_list;
+ int ret = 0;
+
+ if (state & TASK_F_USR1) {
+ /* the tasklet was idling on an idle connection, it might have
+ * been stolen, let's be careful!
+ */
+ HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
+ if (tl->context == NULL) {
+ HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
+ tasklet_free(tl);
+ return NULL;
+ }
+ conn = ctx->conn;
+ conn_in_list = conn->flags & CO_FL_LIST_MASK;
+ if (conn_in_list)
+ conn_delete_from_tree(conn);
+ HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
+ } else {
+ conn = ctx->conn;
+ conn_in_list = 0;
+ }
+
+ /* First if we're doing an handshake, try that */
+ if (ctx->conn->flags & CO_FL_SSL_WAIT_HS) {
+ ssl_sock_handshake(ctx->conn, CO_FL_SSL_WAIT_HS);
+ if (!(ctx->conn->flags & CO_FL_SSL_WAIT_HS)) {
+ /* handshake completed, leave the bulk queue */
+ _HA_ATOMIC_AND(&tl->state, ~TASK_HEAVY);
+ }
+ }
+ /* If we had an error, or the handshake is done and I/O is available,
+ * let the upper layer know.
+ * If no mux was set up yet, then call conn_create_mux()
+ * we can't be sure conn_fd_handler() will be called again.
+ */
+ if ((ctx->conn->flags & CO_FL_ERROR) ||
+ !(ctx->conn->flags & CO_FL_SSL_WAIT_HS)) {
+ int woke = 0;
+
+ /* On error, wake any waiter */
+ if (ctx->subs) {
+ tasklet_wakeup(ctx->subs->tasklet);
+ ctx->subs->events = 0;
+ woke = 1;
+ ctx->subs = NULL;
+ }
+
+ /* If we're the first xprt for the connection, let the
+ * upper layers know. If we have no mux, create it,
+ * and once we have a mux, call its wake method if we didn't
+ * woke a tasklet already.
+ */
+ if (ctx->conn->xprt_ctx == ctx) {
+ if (!ctx->conn->mux)
+ ret = conn_create_mux(ctx->conn);
+ if (ret >= 0 && !woke && ctx->conn->mux && ctx->conn->mux->wake)
+ ret = ctx->conn->mux->wake(ctx->conn);
+ goto leave;
+ }
+ }
+#ifdef SSL_READ_EARLY_DATA_SUCCESS
+ /* If we have early data and somebody wants to receive, let them */
+ else if (b_data(&ctx->early_buf) && ctx->subs &&
+ ctx->subs->events & SUB_RETRY_RECV) {
+ tasklet_wakeup(ctx->subs->tasklet);
+ ctx->subs->events &= ~SUB_RETRY_RECV;
+ if (!ctx->subs->events)
+ ctx->subs = NULL;
+ }
+#endif
+leave:
+ if (!ret && conn_in_list) {
+ struct server *srv = objt_server(conn->target);
+
+ HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
+ _srv_add_idle(srv, conn, conn_in_list == CO_FL_SAFE_LIST);
+ HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
+ }
+ return t;
+}
+
+/* Receive up to <count> bytes from connection <conn>'s socket and store them
+ * into buffer <buf>. Only one call to recv() is performed, unless the
+ * buffer wraps, in which case a second call may be performed. The connection's
+ * flags are updated with whatever special event is detected (error, read0,
+ * empty). The caller is responsible for taking care of those events and
+ * avoiding the call if inappropriate. The function does not call the
+ * connection's polling update function, so the caller is responsible for this.
+ */
+static size_t ssl_sock_to_buf(struct connection *conn, void *xprt_ctx, struct buffer *buf, size_t count, int flags)
+{
+ struct ssl_sock_ctx *ctx = xprt_ctx;
+ ssize_t ret;
+ size_t try, done = 0;
+
+ if (!ctx)
+ goto out_error;
+
+#ifdef SSL_READ_EARLY_DATA_SUCCESS
+ if (b_data(&ctx->early_buf)) {
+ try = b_contig_space(buf);
+ if (try > b_data(&ctx->early_buf))
+ try = b_data(&ctx->early_buf);
+ memcpy(b_tail(buf), b_head(&ctx->early_buf), try);
+ b_add(buf, try);
+ b_del(&ctx->early_buf, try);
+ if (b_data(&ctx->early_buf) == 0)
+ b_free(&ctx->early_buf);
+ return try;
+ }
+#endif
+
+ if (conn->flags & (CO_FL_WAIT_XPRT | CO_FL_SSL_WAIT_HS))
+ /* a handshake was requested */
+ return 0;
+
+ /* read the largest possible block. For this, we perform only one call
+ * to recv() unless the buffer wraps and we exactly fill the first hunk,
+ * in which case we accept to do it once again. A new attempt is made on
+ * EINTR too.
+ */
+ while (count > 0) {
+
+ try = b_contig_space(buf);
+ if (!try)
+ break;
+
+ if (try > count)
+ try = count;
+
+ ret = SSL_read(ctx->ssl, b_tail(buf), try);
+
+ if (conn->flags & CO_FL_ERROR) {
+ /* CO_FL_ERROR may be set by ssl_sock_infocbk */
+ goto out_error;
+ }
+ if (ret > 0) {
+ b_add(buf, ret);
+ done += ret;
+ count -= ret;
+ }
+ else {
+ ret = SSL_get_error(ctx->ssl, ret);
+ if (ret == SSL_ERROR_WANT_WRITE) {
+ /* handshake is running, and it needs to enable write */
+ conn->flags |= CO_FL_SSL_WAIT_HS;
+ ctx->xprt->subscribe(conn, ctx->xprt_ctx, SUB_RETRY_SEND, &ctx->wait_event);
+#ifdef SSL_MODE_ASYNC
+ /* Async mode can be re-enabled, because we're leaving data state.*/
+ if (global_ssl.async)
+ SSL_set_mode(ctx->ssl, SSL_MODE_ASYNC);
+#endif
+ break;
+ }
+ else if (ret == SSL_ERROR_WANT_READ) {
+ if (SSL_renegotiate_pending(ctx->ssl)) {
+ ctx->xprt->subscribe(conn, ctx->xprt_ctx,
+ SUB_RETRY_RECV,
+ &ctx->wait_event);
+ /* handshake is running, and it may need to re-enable read */
+ conn->flags |= CO_FL_SSL_WAIT_HS;
+#ifdef SSL_MODE_ASYNC
+ /* Async mode can be re-enabled, because we're leaving data state.*/
+ if (global_ssl.async)
+ SSL_set_mode(ctx->ssl, SSL_MODE_ASYNC);
+#endif
+ break;
+ }
+ break;
+ } else if (ret == SSL_ERROR_ZERO_RETURN)
+ goto read0;
+ else if (ret == SSL_ERROR_SSL) {
+ struct ssl_sock_ctx *ctx = conn_get_ssl_sock_ctx(conn);
+ if (ctx && !ctx->error_code)
+ ctx->error_code = ERR_peek_error();
+ conn->err_code = CO_ERR_SSL_FATAL;
+ }
+ /* For SSL_ERROR_SYSCALL, make sure to clear the error
+ * stack before shutting down the connection for
+ * reading. */
+ if (ret == SSL_ERROR_SYSCALL && (!errno || errno == EAGAIN || errno == EWOULDBLOCK))
+ goto clear_ssl_error;
+ /* otherwise it's a real error */
+ goto out_error;
+ }
+ }
+ leave:
+ return done;
+
+ clear_ssl_error:
+ /* Clear openssl global errors stack */
+ ssl_sock_dump_errors(conn, NULL);
+ ERR_clear_error();
+ read0:
+ conn_sock_read0(conn);
+ goto leave;
+
+ out_error:
+ conn->flags |= CO_FL_ERROR;
+ /* Clear openssl global errors stack */
+ ssl_sock_dump_errors(conn, NULL);
+ ERR_clear_error();
+ goto leave;
+}
+
+
+/* Send up to <count> pending bytes from buffer <buf> to connection <conn>'s
+ * socket. <flags> may contain some CO_SFL_* flags to hint the system about
+ * other pending data for example, but this flag is ignored at the moment.
+ * Only one call to send() is performed, unless the buffer wraps, in which case
+ * a second call may be performed. The connection's flags are updated with
+ * whatever special event is detected (error, empty). The caller is responsible
+ * for taking care of those events and avoiding the call if inappropriate. The
+ * function does not call the connection's polling update function, so the caller
+ * is responsible for this. The buffer's output is not adjusted, it's up to the
+ * caller to take care of this. It's up to the caller to update the buffer's
+ * contents based on the return value.
+ */
+static size_t ssl_sock_from_buf(struct connection *conn, void *xprt_ctx, const struct buffer *buf, size_t count, int flags)
+{
+ struct ssl_sock_ctx *ctx = xprt_ctx;
+ ssize_t ret;
+ size_t try, done;
+
+ done = 0;
+
+ if (!ctx)
+ goto out_error;
+
+ if (conn->flags & (CO_FL_WAIT_XPRT | CO_FL_SSL_WAIT_HS | CO_FL_EARLY_SSL_HS))
+ /* a handshake was requested */
+ return 0;
+
+ /* send the largest possible block. For this we perform only one call
+ * to send() unless the buffer wraps and we exactly fill the first hunk,
+ * in which case we accept to do it once again.
+ */
+ while (count) {
+#ifdef SSL_READ_EARLY_DATA_SUCCESS
+ size_t written_data;
+#endif
+
+ try = b_contig_data(buf, done);
+ if (try > count)
+ try = count;
+
+ if (global_ssl.hard_max_record && try > global_ssl.hard_max_record)
+ try = global_ssl.hard_max_record;
+
+ if (!(flags & CO_SFL_STREAMER) &&
+ !(ctx->xprt_st & SSL_SOCK_SEND_UNLIMITED) &&
+ global_ssl.max_record && try > global_ssl.max_record) {
+ try = global_ssl.max_record;
+ }
+ else {
+ /* we need to keep the information about the fact that
+ * we're not limiting the upcoming send(), because if it
+ * fails, we'll have to retry with at least as many data.
+ */
+ ctx->xprt_st |= SSL_SOCK_SEND_UNLIMITED;
+ }
+
+ if (try < count || flags & CO_SFL_MSG_MORE)
+ ctx->xprt_st |= SSL_SOCK_SEND_MORE;
+ else
+ ctx->xprt_st &= ~SSL_SOCK_SEND_MORE;
+
+#ifdef SSL_READ_EARLY_DATA_SUCCESS
+ if (!SSL_is_init_finished(ctx->ssl) && conn_is_back(conn)) {
+ unsigned int max_early;
+
+ if (objt_listener(conn->target))
+ max_early = SSL_get_max_early_data(ctx->ssl);
+ else {
+ if (SSL_get0_session(ctx->ssl))
+ max_early = SSL_SESSION_get_max_early_data(SSL_get0_session(ctx->ssl));
+ else
+ max_early = 0;
+ }
+
+ if (try + ctx->sent_early_data > max_early) {
+ try -= (try + ctx->sent_early_data) - max_early;
+ if (try <= 0) {
+ conn->flags |= CO_FL_SSL_WAIT_HS | CO_FL_WAIT_L6_CONN;
+ tasklet_wakeup(ctx->wait_event.tasklet);
+ break;
+ }
+ }
+ ret = SSL_write_early_data(ctx->ssl, b_peek(buf, done), try, &written_data);
+ if (ret == 1) {
+ ret = written_data;
+ ctx->sent_early_data += ret;
+ if (objt_server(conn->target)) {
+ conn->flags |= CO_FL_SSL_WAIT_HS | CO_FL_WAIT_L6_CONN | CO_FL_EARLY_DATA;
+ /* Initiate the handshake, now */
+ tasklet_wakeup(ctx->wait_event.tasklet);
+ }
+
+ }
+
+ } else
+#endif
+ ret = SSL_write(ctx->ssl, b_peek(buf, done), try);
+
+ if (conn->flags & CO_FL_ERROR) {
+ /* CO_FL_ERROR may be set by ssl_sock_infocbk */
+ goto out_error;
+ }
+ if (ret > 0) {
+ /* A send succeeded, so we can consider ourself connected */
+ conn->flags &= ~CO_FL_WAIT_L4L6;
+ ctx->xprt_st &= ~SSL_SOCK_SEND_UNLIMITED;
+ count -= ret;
+ done += ret;
+ }
+ else {
+ ret = SSL_get_error(ctx->ssl, ret);
+
+ if (ret == SSL_ERROR_WANT_WRITE) {
+ if (SSL_renegotiate_pending(ctx->ssl)) {
+ /* handshake is running, and it may need to re-enable write */
+ conn->flags |= CO_FL_SSL_WAIT_HS;
+ ctx->xprt->subscribe(conn, ctx->xprt_ctx, SUB_RETRY_SEND, &ctx->wait_event);
+#ifdef SSL_MODE_ASYNC
+ /* Async mode can be re-enabled, because we're leaving data state.*/
+ if (global_ssl.async)
+ SSL_set_mode(ctx->ssl, SSL_MODE_ASYNC);
+#endif
+ break;
+ }
+
+ break;
+ }
+ else if (ret == SSL_ERROR_WANT_READ) {
+ /* handshake is running, and it needs to enable read */
+ conn->flags |= CO_FL_SSL_WAIT_HS;
+ ctx->xprt->subscribe(conn, ctx->xprt_ctx,
+ SUB_RETRY_RECV,
+ &ctx->wait_event);
+#ifdef SSL_MODE_ASYNC
+ /* Async mode can be re-enabled, because we're leaving data state.*/
+ if (global_ssl.async)
+ SSL_set_mode(ctx->ssl, SSL_MODE_ASYNC);
+#endif
+ break;
+ }
+ else if (ret == SSL_ERROR_SSL || ret == SSL_ERROR_SYSCALL) {
+ struct ssl_sock_ctx *ctx = conn_get_ssl_sock_ctx(conn);
+
+ if (ctx && !ctx->error_code)
+ ctx->error_code = ERR_peek_error();
+ conn->err_code = CO_ERR_SSL_FATAL;
+ }
+ goto out_error;
+ }
+ }
+ leave:
+ return done;
+
+ out_error:
+ /* Clear openssl global errors stack */
+ ssl_sock_dump_errors(conn, NULL);
+ ERR_clear_error();
+
+ conn->flags |= CO_FL_ERROR;
+ goto leave;
+}
+
+void ssl_sock_close(struct connection *conn, void *xprt_ctx) {
+
+ struct ssl_sock_ctx *ctx = xprt_ctx;
+
+
+ if (ctx) {
+ if (ctx->wait_event.events != 0)
+ ctx->xprt->unsubscribe(ctx->conn, ctx->xprt_ctx,
+ ctx->wait_event.events,
+ &ctx->wait_event);
+ if (ctx->subs) {
+ ctx->subs->events = 0;
+ tasklet_wakeup(ctx->subs->tasklet);
+ }
+
+ if (ctx->xprt->close)
+ ctx->xprt->close(conn, ctx->xprt_ctx);
+#ifdef SSL_MODE_ASYNC
+ if (global_ssl.async) {
+ OSSL_ASYNC_FD all_fd[32], afd;
+ size_t num_all_fds = 0;
+ int i;
+
+ SSL_get_all_async_fds(ctx->ssl, NULL, &num_all_fds);
+ if (num_all_fds > 32) {
+ send_log(NULL, LOG_EMERG, "haproxy: openssl returns too many async fds. It seems a bug. Process may crash\n");
+ return;
+ }
+
+ SSL_get_all_async_fds(ctx->ssl, all_fd, &num_all_fds);
+
+ /* If an async job is pending, we must try to
+ to catch the end using polling before calling
+ SSL_free */
+ if (num_all_fds && SSL_waiting_for_async(ctx->ssl)) {
+ for (i=0 ; i < num_all_fds ; i++) {
+ /* switch on an handler designed to
+ * handle the SSL_free
+ */
+ afd = all_fd[i];
+ fdtab[afd].iocb = ssl_async_fd_free;
+ fdtab[afd].owner = ctx->ssl;
+ fd_want_recv(afd);
+ /* To ensure that the fd cache won't be used
+ * and we'll catch a real RD event.
+ */
+ fd_cant_recv(afd);
+ }
+ tasklet_free(ctx->wait_event.tasklet);
+ pool_free(ssl_sock_ctx_pool, ctx);
+ _HA_ATOMIC_INC(&jobs);
+ return;
+ }
+ /* Else we can remove the fds from the fdtab
+ * and call SSL_free.
+ * note: we do a fd_stop_both and not a delete
+ * because the fd is owned by the engine.
+ * the engine is responsible to close
+ */
+ for (i=0 ; i < num_all_fds ; i++) {
+ /* We want to remove the fd from the fdtab
+ * but we flag it to disown because the
+ * close is performed by the engine itself
+ */
+ fdtab[all_fd[i]].state |= FD_DISOWN;
+ fd_delete(all_fd[i]);
+ }
+ }
+#endif
+ SSL_free(ctx->ssl);
+ b_free(&ctx->early_buf);
+ tasklet_free(ctx->wait_event.tasklet);
+ pool_free(ssl_sock_ctx_pool, ctx);
+ _HA_ATOMIC_DEC(&global.sslconns);
+ }
+}
+
+/* This function tries to perform a clean shutdown on an SSL connection, and in
+ * any case, flags the connection as reusable if no handshake was in progress.
+ */
+static void ssl_sock_shutw(struct connection *conn, void *xprt_ctx, int clean)
+{
+ struct ssl_sock_ctx *ctx = xprt_ctx;
+
+ if (conn->flags & (CO_FL_WAIT_XPRT | CO_FL_SSL_WAIT_HS))
+ return;
+ if (!clean)
+ /* don't sent notify on SSL_shutdown */
+ SSL_set_quiet_shutdown(ctx->ssl, 1);
+ /* no handshake was in progress, try a clean ssl shutdown */
+ if (SSL_shutdown(ctx->ssl) <= 0) {
+ /* Clear openssl global errors stack */
+ ssl_sock_dump_errors(conn, NULL);
+ ERR_clear_error();
+ }
+}
+
+
+/* used for ppv2 pkey algo (can be used for logging) */
+int ssl_sock_get_pkey_algo(struct connection *conn, struct buffer *out)
+{
+ struct ssl_sock_ctx *ctx = conn_get_ssl_sock_ctx(conn);
+ X509 *crt;
+
+ if (!ctx)
+ return 0;
+ crt = SSL_get_certificate(ctx->ssl);
+ if (!crt)
+ return 0;
+
+ return cert_get_pkey_algo(crt, out);
+}
+
+/* used for ppv2 cert signature (can be used for logging) */
+const char *ssl_sock_get_cert_sig(struct connection *conn)
+{
+ struct ssl_sock_ctx *ctx = conn_get_ssl_sock_ctx(conn);
+
+ __OPENSSL_110_CONST__ ASN1_OBJECT *algorithm;
+ X509 *crt;
+
+ if (!ctx)
+ return NULL;
+ crt = SSL_get_certificate(ctx->ssl);
+ if (!crt)
+ return NULL;
+ X509_ALGOR_get0(&algorithm, NULL, NULL, X509_get0_tbs_sigalg(crt));
+ return OBJ_nid2sn(OBJ_obj2nid(algorithm));
+}
+
+/* used for ppv2 authority */
+const char *ssl_sock_get_sni(struct connection *conn)
+{
+#ifdef SSL_CTRL_SET_TLSEXT_HOSTNAME
+ struct ssl_sock_ctx *ctx = conn_get_ssl_sock_ctx(conn);
+
+ if (!ctx)
+ return NULL;
+ return SSL_get_servername(ctx->ssl, TLSEXT_NAMETYPE_host_name);
+#else
+ return NULL;
+#endif
+}
+
+/* used for logging/ppv2, may be changed for a sample fetch later */
+const char *ssl_sock_get_cipher_name(struct connection *conn)
+{
+ struct ssl_sock_ctx *ctx = conn_get_ssl_sock_ctx(conn);
+
+ if (!ctx)
+ return NULL;
+ return SSL_get_cipher_name(ctx->ssl);
+}
+
+/* used for logging/ppv2, may be changed for a sample fetch later */
+const char *ssl_sock_get_proto_version(struct connection *conn)
+{
+ struct ssl_sock_ctx *ctx = conn_get_ssl_sock_ctx(conn);
+
+ if (!ctx)
+ return NULL;
+ return SSL_get_version(ctx->ssl);
+}
+
+void ssl_sock_set_alpn(struct connection *conn, const unsigned char *alpn, int len)
+{
+#ifdef TLSEXT_TYPE_application_layer_protocol_negotiation
+ struct ssl_sock_ctx *ctx = conn_get_ssl_sock_ctx(conn);
+
+ if (!ctx)
+ return;
+ SSL_set_alpn_protos(ctx->ssl, alpn, len);
+#endif
+}
+
+/* Sets advertised SNI for outgoing connections. Please set <hostname> to NULL
+ * to disable SNI.
+ */
+void ssl_sock_set_servername(struct connection *conn, const char *hostname)
+{
+#ifdef SSL_CTRL_SET_TLSEXT_HOSTNAME
+ struct ssl_sock_ctx *ctx = conn_get_ssl_sock_ctx(conn);
+ char *prev_name;
+
+ if (!ctx)
+ return;
+
+ BUG_ON(!(conn->flags & CO_FL_WAIT_L6_CONN));
+ BUG_ON(!(conn->flags & CO_FL_SSL_WAIT_HS));
+
+ /* if the SNI changes, we must destroy the reusable context so that a
+ * new connection will present a new SNI. compare with the SNI
+ * previously stored in the reused_sess. If the session was reused,
+ * the associated SNI (if any) has already been assigned to the SSL
+ * during ssl_sock_init() so SSL_get_servername() will properly
+ * retrieve the currently known hostname for the SSL.
+ */
+
+ prev_name = (char *)SSL_get_servername(ctx->ssl, TLSEXT_NAMETYPE_host_name);
+ if ((!prev_name && hostname) ||
+ !hostname ||
+ strcmp(hostname, prev_name) != 0) {
+ SSL_set_session(ctx->ssl, NULL);
+ SSL_set_tlsext_host_name(ctx->ssl, hostname);
+ }
+#endif
+}
+
+/* Extract peer certificate's common name into the chunk dest
+ * Returns
+ * the len of the extracted common name
+ * or 0 if no CN found in DN
+ * or -1 on error case (i.e. no peer certificate)
+ */
+int ssl_sock_get_remote_common_name(struct connection *conn,
+ struct buffer *dest)
+{
+ struct ssl_sock_ctx *ctx = conn_get_ssl_sock_ctx(conn);
+ X509 *crt = NULL;
+ X509_NAME *name;
+ const char find_cn[] = "CN";
+ const struct buffer find_cn_chunk = {
+ .area = (char *)&find_cn,
+ .data = sizeof(find_cn)-1
+ };
+ int result = -1;
+
+ if (!ctx)
+ goto out;
+
+ /* SSL_get_peer_certificate, it increase X509 * ref count */
+ crt = SSL_get_peer_certificate(ctx->ssl);
+ if (!crt)
+ goto out;
+
+ name = X509_get_subject_name(crt);
+ if (!name)
+ goto out;
+
+ result = ssl_sock_get_dn_entry(name, &find_cn_chunk, 1, dest);
+out:
+ if (crt)
+ X509_free(crt);
+
+ return result;
+}
+
+/* returns 1 if client passed a certificate for this session, 0 if not */
+int ssl_sock_get_cert_used_sess(struct connection *conn)
+{
+ struct ssl_sock_ctx *ctx = conn_get_ssl_sock_ctx(conn);
+ X509 *crt = NULL;
+
+ if (!ctx)
+ return 0;
+
+ /* SSL_get_peer_certificate, it increase X509 * ref count */
+ crt = SSL_get_peer_certificate(ctx->ssl);
+ if (!crt)
+ return 0;
+
+ X509_free(crt);
+ return 1;
+}
+
+/* returns 1 if client passed a certificate for this connection, 0 if not */
+int ssl_sock_get_cert_used_conn(struct connection *conn)
+{
+ struct ssl_sock_ctx *ctx = conn_get_ssl_sock_ctx(conn);
+
+ if (!ctx)
+ return 0;
+ return SSL_SOCK_ST_FL_VERIFY_DONE & ctx->xprt_st ? 1 : 0;
+}
+
+/* returns result from SSL verify */
+unsigned int ssl_sock_get_verify_result(struct connection *conn)
+{
+ struct ssl_sock_ctx *ctx = conn_get_ssl_sock_ctx(conn);
+
+ if (!ctx)
+ return (unsigned int)X509_V_ERR_APPLICATION_VERIFICATION;
+ return (unsigned int)SSL_get_verify_result(ctx->ssl);
+}
+
+/* Returns the application layer protocol name in <str> and <len> when known.
+ * Zero is returned if the protocol name was not found, otherwise non-zero is
+ * returned. The string is allocated in the SSL context and doesn't have to be
+ * freed by the caller. NPN is also checked if available since older versions
+ * of openssl (1.0.1) which are more common in field only support this one.
+ */
+int ssl_sock_get_alpn(const struct connection *conn, void *xprt_ctx, const char **str, int *len)
+{
+#if defined(TLSEXT_TYPE_application_layer_protocol_negotiation) || \
+ defined(OPENSSL_NPN_NEGOTIATED) && !defined(OPENSSL_NO_NEXTPROTONEG)
+ struct ssl_sock_ctx *ctx = xprt_ctx;
+ if (!ctx)
+ return 0;
+
+ *str = NULL;
+
+#ifdef TLSEXT_TYPE_application_layer_protocol_negotiation
+ SSL_get0_alpn_selected(ctx->ssl, (const unsigned char **)str, (unsigned *)len);
+ if (*str)
+ return 1;
+#endif
+#if defined(OPENSSL_NPN_NEGOTIATED) && !defined(OPENSSL_NO_NEXTPROTONEG)
+ SSL_get0_next_proto_negotiated(ctx->ssl, (const unsigned char **)str, (unsigned *)len);
+ if (*str)
+ return 1;
+#endif
+#endif
+ return 0;
+}
+
+/* "issuers-chain-path" load chain certificate in global */
+int ssl_load_global_issuer_from_BIO(BIO *in, char *fp, char **err)
+{
+ X509 *ca;
+ X509_NAME *name = NULL;
+ ASN1_OCTET_STRING *skid = NULL;
+ STACK_OF(X509) *chain = NULL;
+ struct issuer_chain *issuer;
+ struct eb64_node *node;
+ char *path;
+ u64 key;
+ int ret = 0;
+
+ while ((ca = PEM_read_bio_X509(in, NULL, NULL, NULL))) {
+ if (chain == NULL) {
+ chain = sk_X509_new_null();
+ skid = X509_get_ext_d2i(ca, NID_subject_key_identifier, NULL, NULL);
+ name = X509_get_subject_name(ca);
+ }
+ if (!sk_X509_push(chain, ca)) {
+ X509_free(ca);
+ goto end;
+ }
+ }
+ if (!chain) {
+ memprintf(err, "unable to load issuers-chain %s : pem certificate not found.\n", fp);
+ goto end;
+ }
+ if (!skid) {
+ memprintf(err, "unable to load issuers-chain %s : SubjectKeyIdentifier not found.\n", fp);
+ goto end;
+ }
+ if (!name) {
+ memprintf(err, "unable to load issuers-chain %s : SubjectName not found.\n", fp);
+ goto end;
+ }
+ key = XXH3(ASN1_STRING_get0_data(skid), ASN1_STRING_length(skid), 0);
+ for (node = eb64_lookup(&cert_issuer_tree, key); node; node = eb64_next(node)) {
+ issuer = container_of(node, typeof(*issuer), node);
+ if (!X509_NAME_cmp(name, X509_get_subject_name(sk_X509_value(issuer->chain, 0)))) {
+ memprintf(err, "duplicate issuers-chain %s: %s already in store\n", fp, issuer->path);
+ goto end;
+ }
+ }
+ issuer = calloc(1, sizeof *issuer);
+ path = strdup(fp);
+ if (!issuer || !path) {
+ free(issuer);
+ free(path);
+ goto end;
+ }
+ issuer->node.key = key;
+ issuer->path = path;
+ issuer->chain = chain;
+ chain = NULL;
+ eb64_insert(&cert_issuer_tree, &issuer->node);
+ ret = 1;
+ end:
+ if (skid)
+ ASN1_OCTET_STRING_free(skid);
+ if (chain)
+ sk_X509_pop_free(chain, X509_free);
+ return ret;
+}
+
+ struct issuer_chain* ssl_get0_issuer_chain(X509 *cert)
+{
+ AUTHORITY_KEYID *akid;
+ struct issuer_chain *issuer = NULL;
+
+ akid = X509_get_ext_d2i(cert, NID_authority_key_identifier, NULL, NULL);
+ if (akid && akid->keyid) {
+ struct eb64_node *node;
+ u64 hk;
+ hk = XXH3(ASN1_STRING_get0_data(akid->keyid), ASN1_STRING_length(akid->keyid), 0);
+ for (node = eb64_lookup(&cert_issuer_tree, hk); node; node = eb64_next(node)) {
+ struct issuer_chain *ti = container_of(node, typeof(*issuer), node);
+ if (X509_check_issued(sk_X509_value(ti->chain, 0), cert) == X509_V_OK) {
+ issuer = ti;
+ break;
+ }
+ }
+ }
+ AUTHORITY_KEYID_free(akid);
+ return issuer;
+}
+
+void ssl_free_global_issuers(void)
+{
+ struct eb64_node *node, *back;
+ struct issuer_chain *issuer;
+
+ node = eb64_first(&cert_issuer_tree);
+ while (node) {
+ issuer = container_of(node, typeof(*issuer), node);
+ back = eb64_next(node);
+ eb64_delete(node);
+ free(issuer->path);
+ sk_X509_pop_free(issuer->chain, X509_free);
+ free(issuer);
+ node = back;
+ }
+}
+
+#if defined(USE_ENGINE) && !defined(OPENSSL_NO_ENGINE)
+static int ssl_check_async_engine_count(void) {
+ int err_code = ERR_NONE;
+
+ if (global_ssl.async && (openssl_engines_initialized > 32)) {
+ ha_alert("ssl-mode-async only supports a maximum of 32 engines.\n");
+ err_code = ERR_ABORT;
+ }
+ return err_code;
+}
+#endif
+
+/* "show fd" helper to dump ssl internals. Warning: the output buffer is often
+ * the common trash! It returns non-zero if the connection entry looks suspicious.
+ */
+static int ssl_sock_show_fd(struct buffer *buf, const struct connection *conn, const void *ctx)
+{
+ const struct ssl_sock_ctx *sctx = ctx;
+ int ret = 0;
+
+ if (!sctx)
+ return ret;
+
+ if (sctx->conn != conn) {
+ chunk_appendf(&trash, " xctx.conn=%p(BOGUS)", sctx->conn);
+ ret = 1;
+ }
+ chunk_appendf(&trash, " xctx.st=%d .err=%ld", sctx->xprt_st, sctx->error_code);
+
+ if (sctx->xprt) {
+ chunk_appendf(&trash, " .xprt=%s", sctx->xprt->name);
+ if (sctx->xprt_ctx)
+ chunk_appendf(&trash, " .xctx=%p", sctx->xprt_ctx);
+ }
+
+ chunk_appendf(&trash, " .wait.ev=%d", sctx->wait_event.events);
+
+ /* as soon as a shutdown is reported the lower layer unregisters its
+ * subscriber, so the situations below are transient and rare enough to
+ * be reported as suspicious. In any case they shouldn't last.
+ */
+ if ((sctx->wait_event.events & 1) && (conn->flags & (CO_FL_SOCK_RD_SH|CO_FL_ERROR)))
+ ret = 1;
+ if ((sctx->wait_event.events & 2) && (conn->flags & (CO_FL_SOCK_WR_SH|CO_FL_ERROR)))
+ ret = 1;
+
+ chunk_appendf(&trash, " .subs=%p", sctx->subs);
+ if (sctx->subs) {
+ chunk_appendf(&trash, "(ev=%d tl=%p", sctx->subs->events, sctx->subs->tasklet);
+ if (sctx->subs->tasklet->calls >= 1000000)
+ ret = 1;
+ chunk_appendf(&trash, " tl.calls=%d tl.ctx=%p tl.fct=",
+ sctx->subs->tasklet->calls,
+ sctx->subs->tasklet->context);
+ resolve_sym_name(&trash, NULL, sctx->subs->tasklet->process);
+ chunk_appendf(&trash, ")");
+ }
+ chunk_appendf(&trash, " .sent_early=%d", sctx->sent_early_data);
+ chunk_appendf(&trash, " .early_in=%d", (int)sctx->early_buf.data);
+ return ret;
+}
+
+#if (defined SSL_CTRL_SET_TLSEXT_TICKET_KEY_CB && TLS_TICKETS_NO > 0)
+/* This function is used with TLS ticket keys management. It permits to browse
+ * each reference. The variable <ref> must point to the current node's list
+ * element (which starts by the root), and <end> must point to the root node.
+ */
+static inline
+struct tls_keys_ref *tlskeys_list_get_next(struct list *ref, struct list *end)
+{
+ /* Get next list entry. */
+ ref = ref->n;
+
+ /* If the entry is the last of the list, return NULL. */
+ if (ref == end)
+ return NULL;
+
+ return LIST_ELEM(ref, struct tls_keys_ref *, list);
+}
+
+static inline
+struct tls_keys_ref *tlskeys_ref_lookup_ref(const char *reference)
+{
+ int id;
+ char *error;
+
+ /* If the reference starts by a '#', this is numeric id. */
+ if (reference[0] == '#') {
+ /* Try to convert the numeric id. If the conversion fails, the lookup fails. */
+ id = strtol(reference + 1, &error, 10);
+ if (*error != '\0')
+ return NULL;
+
+ /* Perform the unique id lookup. */
+ return tlskeys_ref_lookupid(id);
+ }
+
+ /* Perform the string lookup. */
+ return tlskeys_ref_lookup(reference);
+}
+#endif
+
+
+#if (defined SSL_CTRL_SET_TLSEXT_TICKET_KEY_CB && TLS_TICKETS_NO > 0)
+
+/* dumps all tls keys. Relies on the show_keys_ctx context from the appctx. */
+static int cli_io_handler_tlskeys_files(struct appctx *appctx)
+{
+ struct show_keys_ctx *ctx = appctx->svcctx;
+
+ switch (ctx->state) {
+ case SHOW_KEYS_INIT:
+ /* Display the column headers. If the message cannot be sent,
+ * quit the function with returning 0. The function is called
+ * later and restart at the state "SHOW_KEYS_INIT".
+ */
+ chunk_reset(&trash);
+
+ if (ctx->dump_entries)
+ chunk_appendf(&trash, "# id secret\n");
+ else
+ chunk_appendf(&trash, "# id (file)\n");
+
+ if (applet_putchk(appctx, &trash) == -1)
+ return 0;
+
+ /* Now, we start the browsing of the references lists.
+ * Note that the following call to LIST_ELEM return bad pointer. The only
+ * available field of this pointer is <list>. It is used with the function
+ * tlskeys_list_get_next() for returning the first available entry
+ */
+ if (ctx->next_ref == NULL)
+ ctx->next_ref = tlskeys_list_get_next(&tlskeys_reference, &tlskeys_reference);
+
+ ctx->state = SHOW_KEYS_LIST;
+ __fallthrough;
+
+ case SHOW_KEYS_LIST:
+ while (ctx->next_ref) {
+ struct tls_keys_ref *ref = ctx->next_ref;
+
+ chunk_reset(&trash);
+ if (ctx->dump_entries && ctx->next_index == 0)
+ chunk_appendf(&trash, "# ");
+
+ if (ctx->next_index == 0)
+ chunk_appendf(&trash, "%d (%s)\n", ref->unique_id, ref->filename);
+
+ if (ctx->dump_entries) {
+ int head;
+
+ HA_RWLOCK_RDLOCK(TLSKEYS_REF_LOCK, &ref->lock);
+ head = ref->tls_ticket_enc_index;
+ while (ctx->next_index < TLS_TICKETS_NO) {
+ struct buffer *t2 = get_trash_chunk();
+
+ chunk_reset(t2);
+ /* should never fail here because we dump only a key in the t2 buffer */
+ if (ref->key_size_bits == 128) {
+ t2->data = a2base64((char *)(ref->tlskeys + (head + 2 + ctx->next_index) % TLS_TICKETS_NO),
+ sizeof(struct tls_sess_key_128),
+ t2->area, t2->size);
+ chunk_appendf(&trash, "%d.%d %s\n", ref->unique_id, ctx->next_index,
+ t2->area);
+ }
+ else if (ref->key_size_bits == 256) {
+ t2->data = a2base64((char *)(ref->tlskeys + (head + 2 + ctx->next_index) % TLS_TICKETS_NO),
+ sizeof(struct tls_sess_key_256),
+ t2->area, t2->size);
+ chunk_appendf(&trash, "%d.%d %s\n", ref->unique_id, ctx->next_index,
+ t2->area);
+ }
+ else {
+ /* This case should never happen */
+ chunk_appendf(&trash, "%d.%d <unknown>\n", ref->unique_id, ctx->next_index);
+ }
+
+ if (applet_putchk(appctx, &trash) == -1) {
+ /* let's try again later from this stream. We add ourselves into
+ * this stream's users so that it can remove us upon termination.
+ */
+ HA_RWLOCK_RDUNLOCK(TLSKEYS_REF_LOCK, &ref->lock);
+ return 0;
+ }
+ ctx->next_index++;
+ }
+ HA_RWLOCK_RDUNLOCK(TLSKEYS_REF_LOCK, &ref->lock);
+ ctx->next_index = 0;
+ }
+ if (applet_putchk(appctx, &trash) == -1) {
+ /* let's try again later from this stream. We add ourselves into
+ * this stream's users so that it can remove us upon termination.
+ */
+ return 0;
+ }
+
+ if (ctx->names_only == 0) /* don't display everything if not necessary */
+ break;
+
+ /* get next list entry and check the end of the list */
+ ctx->next_ref = tlskeys_list_get_next(&ref->list, &tlskeys_reference);
+ }
+ ctx->state = SHOW_KEYS_DONE;
+ __fallthrough;
+
+ default:
+ return 1;
+ }
+ return 0;
+}
+
+/* Prepares a "show_keys_ctx" and sets the appropriate io_handler if needed */
+static int cli_parse_show_tlskeys(char **args, char *payload, struct appctx *appctx, void *private)
+{
+ struct show_keys_ctx *ctx = applet_reserve_svcctx(appctx, sizeof(*ctx));
+
+ /* no parameter, shows only file list */
+ if (!*args[2]) {
+ ctx->names_only = 1;
+ return 0;
+ }
+
+ if (args[2][0] == '*') {
+ /* list every TLS ticket keys */
+ ctx->names_only = 1;
+ } else {
+ ctx->next_ref = tlskeys_ref_lookup_ref(args[2]);
+ if (!ctx->next_ref)
+ return cli_err(appctx, "'show tls-keys' unable to locate referenced filename\n");
+ }
+
+ ctx->dump_entries = 1;
+ return 0;
+}
+
+static int cli_parse_set_tlskeys(char **args, char *payload, struct appctx *appctx, void *private)
+{
+ struct tls_keys_ref *ref;
+ int ret;
+
+ /* Expect two parameters: the filename and the new new TLS key in encoding */
+ if (!*args[3] || !*args[4])
+ return cli_err(appctx, "'set ssl tls-key' expects a filename and the new TLS key in base64 encoding.\n");
+
+ ref = tlskeys_ref_lookup_ref(args[3]);
+ if (!ref)
+ return cli_err(appctx, "'set ssl tls-key' unable to locate referenced filename\n");
+
+ ret = base64dec(args[4], strlen(args[4]), trash.area, trash.size);
+ if (ret < 0)
+ return cli_err(appctx, "'set ssl tls-key' received invalid base64 encoded TLS key.\n");
+
+ trash.data = ret;
+ if (ssl_sock_update_tlskey_ref(ref, &trash) < 0)
+ return cli_err(appctx, "'set ssl tls-key' received a key of wrong size.\n");
+
+ return cli_msg(appctx, LOG_INFO, "TLS ticket key updated!\n");
+}
+#endif
+
+
+#ifdef HAVE_SSL_PROVIDERS
+struct provider_name {
+ const char *name;
+ struct list list;
+};
+
+
+static int ssl_provider_get_name_cb(OSSL_PROVIDER *provider, void *cbdata)
+{
+ struct list *provider_names = cbdata;
+ struct provider_name *item = NULL;
+ const char *name = OSSL_PROVIDER_get0_name(provider);
+
+ if (!provider_names)
+ return 0;
+
+ item = calloc(1, sizeof(*item));
+
+ if (!item)
+ return 0;
+
+ item->name = name;
+ LIST_APPEND(provider_names, &item->list);
+
+ return 1;
+}
+
+static void ssl_provider_get_name_list(struct list *provider_names)
+{
+ if (!provider_names)
+ return;
+
+ OSSL_PROVIDER_do_all(NULL, ssl_provider_get_name_cb, provider_names);
+}
+
+static void ssl_provider_clear_name_list(struct list *provider_names)
+{
+ struct provider_name *item = NULL, *item_s = NULL;
+
+ if (provider_names) {
+ list_for_each_entry_safe(item, item_s, provider_names, list) {
+ LIST_DELETE(&item->list);
+ free(item);
+ }
+ }
+}
+
+static int cli_io_handler_show_providers(struct appctx *appctx)
+{
+ struct buffer *trash = get_trash_chunk();
+ struct list provider_names;
+ struct provider_name *name;
+
+ LIST_INIT(&provider_names);
+
+ chunk_appendf(trash, "Loaded providers : \n");
+
+ ssl_provider_get_name_list(&provider_names);
+
+ list_for_each_entry(name, &provider_names, list) {
+ chunk_appendf(trash, "\t- %s\n", name->name);
+ }
+
+ ssl_provider_clear_name_list(&provider_names);
+
+ if (applet_putchk(appctx, trash) == -1)
+ goto yield;
+
+ return 1;
+
+yield:
+ return 0;
+}
+#endif
+
+
+/* register cli keywords */
+static struct cli_kw_list cli_kws = {{ },{
+#if (defined SSL_CTRL_SET_TLSEXT_TICKET_KEY_CB && TLS_TICKETS_NO > 0)
+ { { "show", "tls-keys", NULL }, "show tls-keys [id|*] : show tls keys references or dump tls ticket keys when id specified", cli_parse_show_tlskeys, cli_io_handler_tlskeys_files },
+ { { "set", "ssl", "tls-key", NULL }, "set ssl tls-key [id|file] <key> : set the next TLS key for the <id> or <file> listener to <key>", cli_parse_set_tlskeys, NULL },
+#endif
+#ifdef HAVE_SSL_PROVIDERS
+ { { "show", "ssl", "providers", NULL }, "show ssl providers : show loaded SSL providers", NULL, cli_io_handler_show_providers },
+#endif
+ { { NULL }, NULL, NULL, NULL }
+}};
+
+INITCALL1(STG_REGISTER, cli_register_kw, &cli_kws);
+
+/* transport-layer operations for SSL sockets */
+struct xprt_ops ssl_sock = {
+ .snd_buf = ssl_sock_from_buf,
+ .rcv_buf = ssl_sock_to_buf,
+ .subscribe = ssl_subscribe,
+ .unsubscribe = ssl_unsubscribe,
+ .remove_xprt = ssl_remove_xprt,
+ .add_xprt = ssl_add_xprt,
+ .rcv_pipe = NULL,
+ .snd_pipe = NULL,
+ .shutr = NULL,
+ .shutw = ssl_sock_shutw,
+ .close = ssl_sock_close,
+ .init = ssl_sock_init,
+ .start = ssl_sock_start,
+ .prepare_bind_conf = ssl_sock_prepare_bind_conf,
+ .destroy_bind_conf = ssl_sock_destroy_bind_conf,
+ .prepare_srv = ssl_sock_prepare_srv_ctx,
+ .destroy_srv = ssl_sock_free_srv_ctx,
+ .get_alpn = ssl_sock_get_alpn,
+ .takeover = ssl_takeover,
+ .set_idle = ssl_set_idle,
+ .set_used = ssl_set_used,
+ .get_ssl_sock_ctx = ssl_sock_get_ctx,
+ .name = "SSL",
+ .show_fd = ssl_sock_show_fd,
+};
+
+enum act_return ssl_action_wait_for_hs(struct act_rule *rule, struct proxy *px,
+ struct session *sess, struct stream *s, int flags)
+{
+ struct connection *conn;
+
+ conn = objt_conn(sess->origin);
+
+ if (conn) {
+ if (conn->flags & (CO_FL_EARLY_SSL_HS | CO_FL_SSL_WAIT_HS)) {
+ sc_ep_set(s->scf, SE_FL_WAIT_FOR_HS);
+ s->req.flags |= CF_READ_EVENT;
+ return ACT_RET_YIELD;
+ }
+ }
+ return (ACT_RET_CONT);
+}
+
+static enum act_parse_ret ssl_parse_wait_for_hs(const char **args, int *orig_arg, struct proxy *px, struct act_rule *rule, char **err)
+{
+ rule->action_ptr = ssl_action_wait_for_hs;
+
+ return ACT_RET_PRS_OK;
+}
+
+static struct action_kw_list http_req_actions = {ILH, {
+ { "wait-for-handshake", ssl_parse_wait_for_hs },
+ { /* END */ }
+}};
+
+INITCALL1(STG_REGISTER, http_req_keywords_register, &http_req_actions);
+
+#ifdef HAVE_SSL_CTX_ADD_SERVER_CUSTOM_EXT
+
+static void ssl_sock_sctl_free_func(void *parent, void *ptr, CRYPTO_EX_DATA *ad, int idx, long argl, void *argp)
+{
+ if (ptr) {
+ chunk_destroy(ptr);
+ free(ptr);
+ }
+}
+
+#endif
+
+
+static void ssl_sock_capture_free_func(void *parent, void *ptr, CRYPTO_EX_DATA *ad, int idx, long argl, void *argp)
+{
+ pool_free(pool_head_ssl_capture, ptr);
+}
+
+#ifdef HAVE_SSL_KEYLOG
+static void ssl_sock_keylog_free_func(void *parent, void *ptr, CRYPTO_EX_DATA *ad, int idx, long argl, void *argp)
+{
+ struct ssl_keylog *keylog;
+
+ if (!ptr)
+ return;
+
+ keylog = ptr;
+
+ pool_free(pool_head_ssl_keylog_str, keylog->client_random);
+ pool_free(pool_head_ssl_keylog_str, keylog->client_early_traffic_secret);
+ pool_free(pool_head_ssl_keylog_str, keylog->client_handshake_traffic_secret);
+ pool_free(pool_head_ssl_keylog_str, keylog->server_handshake_traffic_secret);
+ pool_free(pool_head_ssl_keylog_str, keylog->client_traffic_secret_0);
+ pool_free(pool_head_ssl_keylog_str, keylog->server_traffic_secret_0);
+ pool_free(pool_head_ssl_keylog_str, keylog->exporter_secret);
+ pool_free(pool_head_ssl_keylog_str, keylog->early_exporter_secret);
+
+ pool_free(pool_head_ssl_keylog, ptr);
+}
+#endif
+
+static void ssl_sock_clt_crt_free_func(void *parent, void *ptr, CRYPTO_EX_DATA *ad, int idx, long argl, void *argp)
+{
+ if (!ptr)
+ return;
+
+ X509_free((X509*)ptr);
+}
+
+static void ssl_sock_clt_sni_free_func(void *parent, void *ptr, CRYPTO_EX_DATA *ad, int idx, long argl, void *argp)
+{
+ pool_free(ssl_sock_client_sni_pool, ptr);
+}
+
+static void __ssl_sock_init(void)
+{
+#if (!defined(OPENSSL_NO_COMP) && !defined(SSL_OP_NO_COMPRESSION))
+ STACK_OF(SSL_COMP)* cm;
+ int n;
+#endif
+
+ if (global_ssl.listen_default_ciphers)
+ global_ssl.listen_default_ciphers = strdup(global_ssl.listen_default_ciphers);
+ if (global_ssl.connect_default_ciphers)
+ global_ssl.connect_default_ciphers = strdup(global_ssl.connect_default_ciphers);
+#ifdef HAVE_SSL_CTX_SET_CIPHERSUITES
+ if (global_ssl.listen_default_ciphersuites)
+ global_ssl.listen_default_ciphersuites = strdup(global_ssl.listen_default_ciphersuites);
+ if (global_ssl.connect_default_ciphersuites)
+ global_ssl.connect_default_ciphersuites = strdup(global_ssl.connect_default_ciphersuites);
+#endif
+
+ xprt_register(XPRT_SSL, &ssl_sock);
+#if HA_OPENSSL_VERSION_NUMBER < 0x10100000L
+ SSL_library_init();
+#endif
+#if (!defined(OPENSSL_NO_COMP) && !defined(SSL_OP_NO_COMPRESSION))
+ cm = SSL_COMP_get_compression_methods();
+ n = sk_SSL_COMP_num(cm);
+ while (n--) {
+ (void) sk_SSL_COMP_pop(cm);
+ }
+#endif
+
+#if defined(USE_THREAD) && (HA_OPENSSL_VERSION_NUMBER < 0x10100000L)
+ ssl_locking_init();
+#endif
+#ifdef HAVE_SSL_CTX_ADD_SERVER_CUSTOM_EXT
+ sctl_ex_index = SSL_CTX_get_ex_new_index(0, NULL, NULL, NULL, ssl_sock_sctl_free_func);
+#endif
+
+#if ((defined SSL_CTRL_SET_TLSEXT_STATUS_REQ_CB && !defined OPENSSL_NO_OCSP) && !defined OPENSSL_IS_BORINGSSL)
+ ocsp_ex_index = SSL_CTX_get_ex_new_index(0, NULL, NULL, NULL, ssl_sock_ocsp_free_func);
+#endif
+
+ ssl_app_data_index = SSL_get_ex_new_index(0, NULL, NULL, NULL, NULL);
+ ssl_capture_ptr_index = SSL_get_ex_new_index(0, NULL, NULL, NULL, ssl_sock_capture_free_func);
+#ifdef USE_QUIC
+ ssl_qc_app_data_index = SSL_get_ex_new_index(0, NULL, NULL, NULL, NULL);
+#endif /* USE_QUIC */
+#ifdef HAVE_SSL_KEYLOG
+ ssl_keylog_index = SSL_get_ex_new_index(0, NULL, NULL, NULL, ssl_sock_keylog_free_func);
+#endif
+ ssl_client_crt_ref_index = SSL_get_ex_new_index(0, NULL, NULL, NULL, ssl_sock_clt_crt_free_func);
+ ssl_client_sni_index = SSL_get_ex_new_index(0, NULL, NULL, NULL, ssl_sock_clt_sni_free_func);
+#if defined(USE_ENGINE) && !defined(OPENSSL_NO_ENGINE)
+ ENGINE_load_builtin_engines();
+ hap_register_post_check(ssl_check_async_engine_count);
+#endif
+#if (defined SSL_CTRL_SET_TLSEXT_TICKET_KEY_CB && TLS_TICKETS_NO > 0)
+ hap_register_post_check(tlskeys_finalize_config);
+#endif
+
+ global.ssl_session_max_cost = SSL_SESSION_MAX_COST;
+ global.ssl_handshake_max_cost = SSL_HANDSHAKE_MAX_COST;
+
+ hap_register_post_deinit(ssl_free_global_issuers);
+
+#ifndef OPENSSL_NO_DH
+ ssl_dh_ptr_index = SSL_CTX_get_ex_new_index(0, NULL, NULL, NULL, NULL);
+ hap_register_post_deinit(ssl_free_dh);
+#endif
+#if defined(USE_ENGINE) && !defined(OPENSSL_NO_ENGINE)
+ hap_register_post_deinit(ssl_free_engines);
+#endif
+#ifdef HAVE_SSL_PROVIDERS
+ hap_register_post_deinit(ssl_unload_providers);
+#endif
+#if HA_OPENSSL_VERSION_NUMBER < 0x3000000fL
+ /* Load SSL string for the verbose & debug mode. */
+ ERR_load_SSL_strings();
+#endif
+ ha_meth = BIO_meth_new(0x666, "ha methods");
+ if (ha_meth != NULL) {
+ BIO_meth_set_write(ha_meth, ha_ssl_write);
+ BIO_meth_set_read(ha_meth, ha_ssl_read);
+ BIO_meth_set_ctrl(ha_meth, ha_ssl_ctrl);
+ BIO_meth_set_create(ha_meth, ha_ssl_new);
+ BIO_meth_set_destroy(ha_meth, ha_ssl_free);
+ BIO_meth_set_puts(ha_meth, ha_ssl_puts);
+ BIO_meth_set_gets(ha_meth, ha_ssl_gets);
+ }
+
+ HA_SPIN_INIT(&ckch_lock);
+
+ HA_SPIN_INIT(&ocsp_tree_lock);
+
+ /* Try to register dedicated SSL/TLS protocol message callbacks for
+ * heartbleed attack (CVE-2014-0160) and clienthello.
+ */
+ hap_register_post_check(ssl_sock_register_msg_callbacks);
+
+ /* Try to free all callbacks that were registered by using
+ * ssl_sock_register_msg_callback().
+ */
+ hap_register_post_deinit(ssl_sock_unregister_msg_callbacks);
+}
+INITCALL0(STG_REGISTER, __ssl_sock_init);
+
+/* Compute and register the version string */
+static void ssl_register_build_options()
+{
+ char *ptr = NULL;
+ int i;
+
+ memprintf(&ptr, "Built with OpenSSL version : "
+#ifdef OPENSSL_IS_BORINGSSL
+ "BoringSSL");
+#else /* OPENSSL_IS_BORINGSSL */
+ OPENSSL_VERSION_TEXT
+ "\nRunning on OpenSSL version : %s%s",
+ OpenSSL_version(OPENSSL_VERSION),
+ ((OPENSSL_VERSION_NUMBER ^ OpenSSL_version_num()) >> 8) ? " (VERSIONS DIFFER!)" : "");
+#endif
+ memprintf(&ptr, "%s\nOpenSSL library supports TLS extensions : "
+#if HA_OPENSSL_VERSION_NUMBER < 0x00907000L
+ "no (library version too old)"
+#elif defined(OPENSSL_NO_TLSEXT)
+ "no (disabled via OPENSSL_NO_TLSEXT)"
+#else
+ "yes"
+#endif
+ "", ptr);
+
+ memprintf(&ptr, "%s\nOpenSSL library supports SNI : "
+#ifdef SSL_CTRL_SET_TLSEXT_HOSTNAME
+ "yes"
+#else
+#ifdef OPENSSL_NO_TLSEXT
+ "no (because of OPENSSL_NO_TLSEXT)"
+#else
+ "no (version might be too old, 0.9.8f min needed)"
+#endif
+#endif
+ "", ptr);
+
+ memprintf(&ptr, "%s\nOpenSSL library supports :", ptr);
+ for (i = CONF_TLSV_MIN; i <= CONF_TLSV_MAX; i++)
+ if (methodVersions[i].option)
+ memprintf(&ptr, "%s %s", ptr, methodVersions[i].name);
+
+#ifdef HAVE_SSL_PROVIDERS
+ {
+ struct list provider_names;
+ struct provider_name *name;
+ LIST_INIT(&provider_names);
+ ssl_provider_get_name_list(&provider_names);
+
+ memprintf(&ptr, "%s\nOpenSSL providers loaded :", ptr);
+
+ list_for_each_entry(name, &provider_names, list) {
+ memprintf(&ptr, "%s %s", ptr, name->name);
+ }
+
+ ssl_provider_clear_name_list(&provider_names);
+ }
+#endif
+
+ hap_register_build_opts(ptr, 1);
+}
+
+INITCALL0(STG_REGISTER, ssl_register_build_options);
+
+#if defined(USE_ENGINE) && !defined(OPENSSL_NO_ENGINE)
+void ssl_free_engines(void) {
+ struct ssl_engine_list *wl, *wlb;
+ /* free up engine list */
+ list_for_each_entry_safe(wl, wlb, &openssl_engines, list) {
+ ENGINE_finish(wl->e);
+ ENGINE_free(wl->e);
+ LIST_DELETE(&wl->list);
+ free(wl);
+ }
+}
+#endif
+
+#ifdef HAVE_SSL_PROVIDERS
+void ssl_unload_providers(void) {
+ struct ssl_provider_list *prov, *provb;
+ list_for_each_entry_safe(prov, provb, &openssl_providers, list) {
+ OSSL_PROVIDER_unload(prov->provider);
+ LIST_DELETE(&prov->list);
+ free(prov);
+ }
+}
+#endif
+
+#ifndef OPENSSL_NO_DH
+void ssl_free_dh(void) {
+ if (local_dh_1024) {
+ HASSL_DH_free(local_dh_1024);
+ local_dh_1024 = NULL;
+ }
+ if (local_dh_2048) {
+ HASSL_DH_free(local_dh_2048);
+ local_dh_2048 = NULL;
+ }
+ if (local_dh_4096) {
+ HASSL_DH_free(local_dh_4096);
+ local_dh_4096 = NULL;
+ }
+ if (global_dh) {
+ HASSL_DH_free(global_dh);
+ global_dh = NULL;
+ }
+}
+#endif
+
+static void __ssl_sock_deinit(void)
+{
+#if (defined SSL_CTRL_SET_TLSEXT_HOSTNAME && !defined SSL_NO_GENERATE_CERTIFICATES)
+ if (ssl_ctx_lru_tree) {
+ lru64_destroy(ssl_ctx_lru_tree);
+ HA_RWLOCK_DESTROY(&ssl_ctx_lru_rwlock);
+ }
+#endif
+
+#if (HA_OPENSSL_VERSION_NUMBER < 0x10100000L)
+ ERR_remove_state(0);
+ ERR_free_strings();
+
+ EVP_cleanup();
+#endif
+
+#if (HA_OPENSSL_VERSION_NUMBER >= 0x00907000L) && (HA_OPENSSL_VERSION_NUMBER < 0x10100000L)
+ CRYPTO_cleanup_all_ex_data();
+#endif
+ BIO_meth_free(ha_meth);
+
+#if !defined OPENSSL_NO_OCSP
+ ssl_destroy_ocsp_update_task();
+#endif
+}
+REGISTER_POST_DEINIT(__ssl_sock_deinit);
+
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/src/ssl_utils.c b/src/ssl_utils.c
new file mode 100644
index 0000000..4a85b89
--- /dev/null
+++ b/src/ssl_utils.c
@@ -0,0 +1,702 @@
+/*
+ * Utility functions for SSL:
+ * Mostly generic functions that retrieve information from certificates
+ *
+ * Copyright (C) 2012 EXCELIANCE, Emeric Brun <ebrun@exceliance.fr>
+ * Copyright (C) 2020 HAProxy Technologies, William Lallemand <wlallemand@haproxy.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+
+#include <haproxy/api.h>
+#include <haproxy/buf-t.h>
+#include <haproxy/chunk.h>
+#include <haproxy/openssl-compat.h>
+#include <haproxy/ssl_sock.h>
+#include <haproxy/ssl_utils.h>
+
+/* fill a buffer with the algorithm and size of a public key */
+int cert_get_pkey_algo(X509 *crt, struct buffer *out)
+{
+ int bits = 0;
+ int sig = TLSEXT_signature_anonymous;
+ int len = -1;
+ EVP_PKEY *pkey;
+
+ pkey = X509_get_pubkey(crt);
+ if (pkey) {
+ bits = EVP_PKEY_bits(pkey);
+ switch(EVP_PKEY_base_id(pkey)) {
+ case EVP_PKEY_RSA:
+ sig = TLSEXT_signature_rsa;
+ break;
+ case EVP_PKEY_EC:
+ sig = TLSEXT_signature_ecdsa;
+ break;
+ case EVP_PKEY_DSA:
+ sig = TLSEXT_signature_dsa;
+ break;
+ }
+ EVP_PKEY_free(pkey);
+ }
+
+ switch(sig) {
+ case TLSEXT_signature_rsa:
+ len = chunk_printf(out, "RSA%d", bits);
+ break;
+ case TLSEXT_signature_ecdsa:
+ len = chunk_printf(out, "EC%d", bits);
+ break;
+ case TLSEXT_signature_dsa:
+ len = chunk_printf(out, "DSA%d", bits);
+ break;
+ default:
+ return 0;
+ }
+ if (len < 0)
+ return 0;
+ return 1;
+}
+
+/* Extract a serial from a cert, and copy it to a chunk.
+ * Returns 1 if serial is found and copied, 0 if no serial found and
+ * -1 if output is not large enough.
+ */
+int ssl_sock_get_serial(X509 *crt, struct buffer *out)
+{
+ ASN1_INTEGER *serial;
+
+ serial = X509_get_serialNumber(crt);
+ if (!serial)
+ return 0;
+
+ if (out->size < serial->length)
+ return -1;
+
+ memcpy(out->area, serial->data, serial->length);
+ out->data = serial->length;
+ return 1;
+}
+
+/* Extract a cert to der, and copy it to a chunk.
+ * Returns 1 if the cert is found and copied, 0 on der conversion failure
+ * and -1 if the output is not large enough.
+ */
+int ssl_sock_crt2der(X509 *crt, struct buffer *out)
+{
+ int len;
+ unsigned char *p = (unsigned char *) out->area;
+
+ len = i2d_X509(crt, NULL);
+ if (len <= 0)
+ return 1;
+
+ if (out->size < len)
+ return -1;
+
+ i2d_X509(crt, &p);
+ out->data = len;
+ return 1;
+}
+
+
+/* Copy Date in ASN1_UTCTIME format in struct buffer out.
+ * Returns 1 if serial is found and copied, 0 if no valid time found
+ * and -1 if output is not large enough.
+ */
+int ssl_sock_get_time(ASN1_TIME *tm, struct buffer *out)
+{
+ if (tm->type == V_ASN1_GENERALIZEDTIME) {
+ ASN1_GENERALIZEDTIME *gentm = (ASN1_GENERALIZEDTIME *)tm;
+
+ if (gentm->length < 12)
+ return 0;
+ if (gentm->data[0] != 0x32 || gentm->data[1] != 0x30)
+ return 0;
+ if (out->size < gentm->length-2)
+ return -1;
+
+ memcpy(out->area, gentm->data+2, gentm->length-2);
+ out->data = gentm->length-2;
+ return 1;
+ }
+ else if (tm->type == V_ASN1_UTCTIME) {
+ ASN1_UTCTIME *utctm = (ASN1_UTCTIME *)tm;
+
+ if (utctm->length < 10)
+ return 0;
+ if (utctm->data[0] >= 0x35)
+ return 0;
+ if (out->size < utctm->length)
+ return -1;
+
+ memcpy(out->area, utctm->data, utctm->length);
+ out->data = utctm->length;
+ return 1;
+ }
+
+ return 0;
+}
+
+/* Extract an entry from a X509_NAME and copy its value to an output chunk.
+ * Returns 1 if entry found, 0 if entry not found, or -1 if output not large enough.
+ */
+int ssl_sock_get_dn_entry(X509_NAME *a, const struct buffer *entry, int pos,
+ struct buffer *out)
+{
+ X509_NAME_ENTRY *ne;
+ ASN1_OBJECT *obj;
+ ASN1_STRING *data;
+ const unsigned char *data_ptr;
+ int data_len;
+ int i, j, n;
+ int cur = 0;
+ const char *s;
+ char tmp[128];
+ int name_count;
+
+ name_count = X509_NAME_entry_count(a);
+
+ out->data = 0;
+ for (i = 0; i < name_count; i++) {
+ if (pos < 0)
+ j = (name_count-1) - i;
+ else
+ j = i;
+
+ ne = X509_NAME_get_entry(a, j);
+ obj = X509_NAME_ENTRY_get_object(ne);
+ data = X509_NAME_ENTRY_get_data(ne);
+ data_ptr = ASN1_STRING_get0_data(data);
+ data_len = ASN1_STRING_length(data);
+ n = OBJ_obj2nid(obj);
+ if ((n == NID_undef) || ((s = OBJ_nid2sn(n)) == NULL)) {
+ i2t_ASN1_OBJECT(tmp, sizeof(tmp), obj);
+ s = tmp;
+ }
+
+ if (chunk_strcasecmp(entry, s) != 0)
+ continue;
+
+ if (pos < 0)
+ cur--;
+ else
+ cur++;
+
+ if (cur != pos)
+ continue;
+
+ if (data_len > out->size)
+ return -1;
+
+ memcpy(out->area, data_ptr, data_len);
+ out->data = data_len;
+ return 1;
+ }
+
+ return 0;
+
+}
+
+/*
+ * Extract the DN in the specified format from the X509_NAME and copy result to a chunk.
+ * Currently supports rfc2253 for returning LDAP V3 DNs.
+ * Returns 1 if dn entries exist, 0 if no dn entry was found.
+ */
+int ssl_sock_get_dn_formatted(X509_NAME *a, const struct buffer *format, struct buffer *out)
+{
+ BIO *bio = NULL;
+ int ret = 0;
+ int data_len = 0;
+
+ if (chunk_strcmp(format, "rfc2253") == 0) {
+ bio = BIO_new(BIO_s_mem());
+ if (bio == NULL)
+ goto out;
+
+ if (X509_NAME_print_ex(bio, a, 0, XN_FLAG_RFC2253) < 0)
+ goto out;
+
+ if ((data_len = BIO_read(bio, out->area, out->size)) <= 0)
+ goto out;
+
+ out->data = data_len;
+
+ ret = 1;
+ }
+out:
+ if (bio)
+ BIO_free(bio);
+ return ret;
+}
+
+/* Extract and format full DN from a X509_NAME and copy result into a chunk
+ * Returns 1 if dn entries exits, 0 if no dn entry found or -1 if output is not large enough.
+ */
+int ssl_sock_get_dn_oneline(X509_NAME *a, struct buffer *out)
+{
+ X509_NAME_ENTRY *ne;
+ ASN1_OBJECT *obj;
+ ASN1_STRING *data;
+ const unsigned char *data_ptr;
+ int data_len;
+ int i, n, ln;
+ int l = 0;
+ const char *s;
+ char *p;
+ char tmp[128];
+ int name_count;
+
+
+ name_count = X509_NAME_entry_count(a);
+
+ out->data = 0;
+ p = out->area;
+ for (i = 0; i < name_count; i++) {
+ ne = X509_NAME_get_entry(a, i);
+ obj = X509_NAME_ENTRY_get_object(ne);
+ data = X509_NAME_ENTRY_get_data(ne);
+ data_ptr = ASN1_STRING_get0_data(data);
+ data_len = ASN1_STRING_length(data);
+ n = OBJ_obj2nid(obj);
+ if ((n == NID_undef) || ((s = OBJ_nid2sn(n)) == NULL)) {
+ i2t_ASN1_OBJECT(tmp, sizeof(tmp), obj);
+ s = tmp;
+ }
+ ln = strlen(s);
+
+ l += 1 + ln + 1 + data_len;
+ if (l > out->size)
+ return -1;
+ out->data = l;
+
+ *(p++)='/';
+ memcpy(p, s, ln);
+ p += ln;
+ *(p++)='=';
+ memcpy(p, data_ptr, data_len);
+ p += data_len;
+ }
+
+ if (!out->data)
+ return 0;
+
+ return 1;
+}
+
+
+extern int ssl_client_crt_ref_index;
+
+/*
+ * This function fetches the SSL certificate for a specific connection (either
+ * client certificate or server certificate depending on the cert_peer
+ * parameter).
+ * When trying to get the peer certificate from the server side, we first try to
+ * use the dedicated SSL_get_peer_certificate function, but we fall back to
+ * trying to get the client certificate reference that might have been stored in
+ * the SSL structure's ex_data during the verification process.
+ * Returns NULL in case of failure.
+ */
+X509* ssl_sock_get_peer_certificate(SSL *ssl)
+{
+ X509* cert;
+
+ cert = SSL_get_peer_certificate(ssl);
+ /* Get the client certificate reference stored in the SSL
+ * structure's ex_data during the verification process. */
+ if (!cert) {
+ cert = SSL_get_ex_data(ssl, ssl_client_crt_ref_index);
+ if (cert)
+ X509_up_ref(cert);
+ }
+
+ return cert;
+}
+
+/*
+ * This function fetches the x509* for the root CA of client certificate
+ * from the verified chain. We use the SSL_get0_verified_chain and get the
+ * last certificate in the x509 stack.
+ *
+ * Returns NULL in case of failure.
+*/
+#ifdef HAVE_SSL_get0_verified_chain
+X509* ssl_sock_get_verified_chain_root(SSL *ssl)
+{
+ STACK_OF(X509) *chain = NULL;
+ X509 *crt = NULL;
+ int i;
+
+ chain = SSL_get0_verified_chain(ssl);
+ if (!chain)
+ return NULL;
+
+ for (i = 0; i < sk_X509_num(chain); i++) {
+ crt = sk_X509_value(chain, i);
+
+ if (X509_check_issued(crt, crt) == X509_V_OK)
+ break;
+ }
+
+ return crt;
+}
+#endif
+
+/*
+ * Take an OpenSSL version in text format and return a numeric openssl version
+ * Return 0 if it failed to parse the version
+ *
+ * https://www.openssl.org/docs/man1.1.1/man3/OPENSSL_VERSION_NUMBER.html
+ *
+ * MNNFFPPS: major minor fix patch status
+ *
+ * The status nibble has one of the values 0 for development, 1 to e for betas
+ * 1 to 14, and f for release.
+ *
+ * for example
+ *
+ * 0x0090821f 0.9.8zh
+ * 0x1000215f 1.0.2u
+ * 0x30000000 3.0.0-alpha17
+ * 0x30000002 3.0.0-beta2
+ * 0x3000000e 3.0.0-beta14
+ * 0x3000000f 3.0.0
+ */
+unsigned int openssl_version_parser(const char *version)
+{
+ unsigned int numversion;
+ unsigned int major = 0, minor = 0, fix = 0, patch = 0, status = 0;
+ char *p, *end;
+
+ p = (char *)version;
+
+ if (!p || !*p)
+ return 0;
+
+ major = strtol(p, &end, 10);
+ if (*end != '.' || major > 0xf)
+ goto error;
+ p = end + 1;
+
+ minor = strtol(p, &end, 10);
+ if (*end != '.' || minor > 0xff)
+ goto error;
+ p = end + 1;
+
+ fix = strtol(p, &end, 10);
+ if (fix > 0xff)
+ goto error;
+ p = end;
+
+ if (!*p) {
+ /* end of the string, that's a release */
+ status = 0xf;
+ } else if (*p == '-') {
+ /* after the hyphen, only the beta will increment the status
+ * counter, all others versions will be considered as "dev" and
+ * does not increment anything */
+ p++;
+
+ if (!strncmp(p, "beta", 4)) {
+ p += 4;
+ status = strtol(p, &end, 10);
+ if (status > 14)
+ goto error;
+ }
+ } else {
+ /* that's a patch release */
+ patch = 1;
+
+ /* add the value of each letter */
+ while (*p) {
+ patch += (*p & ~0x20) - 'A';
+ p++;
+ }
+ status = 0xf;
+ }
+
+end:
+ numversion = ((major & 0xf) << 28) | ((minor & 0xff) << 20) | ((fix & 0xff) << 12) | ((patch & 0xff) << 4) | (status & 0xf);
+ return numversion;
+
+error:
+ return 0;
+
+}
+
+/* Exclude GREASE (RFC8701) values from input buffer */
+void exclude_tls_grease(char *input, int len, struct buffer *output)
+{
+ int ptr = 0;
+
+ while (ptr < len - 1) {
+ if (input[ptr] != input[ptr+1] || (input[ptr] & 0x0f) != 0x0a) {
+ if (output->data <= output->size - 2) {
+ memcpy(output->area + output->data, input + ptr, 2);
+ output->data += 2;
+ } else
+ break;
+ }
+ ptr += 2;
+ }
+ if (output->size - output->data > 0 && len - ptr > 0)
+ output->area[output->data++] = input[ptr];
+}
+
+/*
+ * The following generates an array <x509_v_codes> in which the X509_V_ERR_*
+ * codes are populated with there string equivalent. Depending on the version
+ * of the SSL library, some code does not exist, these will be populated as
+ * "-1" in the array.
+ *
+ * The list was taken from
+ * https://github.com/openssl/openssl/blob/master/include/openssl/x509_vfy.h.in
+ * and must be updated when new constant are introduced.
+ */
+
+#undef _Q
+#define _Q(x) (#x)
+#undef V
+#define V(x) { .code = -1, .value = _Q(x), .string = #x }
+
+static struct x509_v_codes {
+ int code; // integer value of the code or -1 if undefined
+ const char *value; // value of the macro as a string or its name
+ const char *string; // name of the macro
+} x509_v_codes[] = {
+ V(X509_V_OK),
+ V(X509_V_ERR_UNSPECIFIED),
+ V(X509_V_ERR_UNABLE_TO_GET_ISSUER_CERT),
+ V(X509_V_ERR_UNABLE_TO_GET_CRL),
+ V(X509_V_ERR_UNABLE_TO_DECRYPT_CERT_SIGNATURE),
+ V(X509_V_ERR_UNABLE_TO_DECRYPT_CRL_SIGNATURE),
+ V(X509_V_ERR_UNABLE_TO_DECODE_ISSUER_PUBLIC_KEY),
+ V(X509_V_ERR_CERT_SIGNATURE_FAILURE),
+ V(X509_V_ERR_CRL_SIGNATURE_FAILURE),
+ V(X509_V_ERR_CERT_NOT_YET_VALID),
+ V(X509_V_ERR_CERT_HAS_EXPIRED),
+ V(X509_V_ERR_CRL_NOT_YET_VALID),
+ V(X509_V_ERR_CRL_HAS_EXPIRED),
+ V(X509_V_ERR_ERROR_IN_CERT_NOT_BEFORE_FIELD),
+ V(X509_V_ERR_ERROR_IN_CERT_NOT_AFTER_FIELD),
+ V(X509_V_ERR_ERROR_IN_CRL_LAST_UPDATE_FIELD),
+ V(X509_V_ERR_ERROR_IN_CRL_NEXT_UPDATE_FIELD),
+ V(X509_V_ERR_OUT_OF_MEM),
+ V(X509_V_ERR_DEPTH_ZERO_SELF_SIGNED_CERT),
+ V(X509_V_ERR_SELF_SIGNED_CERT_IN_CHAIN),
+ V(X509_V_ERR_UNABLE_TO_GET_ISSUER_CERT_LOCALLY),
+ V(X509_V_ERR_UNABLE_TO_VERIFY_LEAF_SIGNATURE),
+ V(X509_V_ERR_CERT_CHAIN_TOO_LONG),
+ V(X509_V_ERR_CERT_REVOKED),
+ V(X509_V_ERR_NO_ISSUER_PUBLIC_KEY),
+ V(X509_V_ERR_PATH_LENGTH_EXCEEDED),
+ V(X509_V_ERR_INVALID_PURPOSE),
+ V(X509_V_ERR_CERT_UNTRUSTED),
+ V(X509_V_ERR_CERT_REJECTED),
+ V(X509_V_ERR_SUBJECT_ISSUER_MISMATCH),
+ V(X509_V_ERR_AKID_SKID_MISMATCH),
+ V(X509_V_ERR_AKID_ISSUER_SERIAL_MISMATCH),
+ V(X509_V_ERR_KEYUSAGE_NO_CERTSIGN),
+ V(X509_V_ERR_UNABLE_TO_GET_CRL_ISSUER),
+ V(X509_V_ERR_UNHANDLED_CRITICAL_EXTENSION),
+ V(X509_V_ERR_KEYUSAGE_NO_CRL_SIGN),
+ V(X509_V_ERR_UNHANDLED_CRITICAL_CRL_EXTENSION),
+ V(X509_V_ERR_INVALID_NON_CA),
+ V(X509_V_ERR_PROXY_PATH_LENGTH_EXCEEDED),
+ V(X509_V_ERR_KEYUSAGE_NO_DIGITAL_SIGNATURE),
+ V(X509_V_ERR_PROXY_CERTIFICATES_NOT_ALLOWED),
+ V(X509_V_ERR_INVALID_EXTENSION),
+ V(X509_V_ERR_INVALID_POLICY_EXTENSION),
+ V(X509_V_ERR_NO_EXPLICIT_POLICY),
+ V(X509_V_ERR_DIFFERENT_CRL_SCOPE),
+ V(X509_V_ERR_UNSUPPORTED_EXTENSION_FEATURE),
+ V(X509_V_ERR_UNNESTED_RESOURCE),
+ V(X509_V_ERR_PERMITTED_VIOLATION),
+ V(X509_V_ERR_EXCLUDED_VIOLATION),
+ V(X509_V_ERR_SUBTREE_MINMAX),
+ V(X509_V_ERR_APPLICATION_VERIFICATION),
+ V(X509_V_ERR_UNSUPPORTED_CONSTRAINT_TYPE),
+ V(X509_V_ERR_UNSUPPORTED_CONSTRAINT_SYNTAX),
+ V(X509_V_ERR_UNSUPPORTED_NAME_SYNTAX),
+ V(X509_V_ERR_CRL_PATH_VALIDATION_ERROR),
+ V(X509_V_ERR_PATH_LOOP),
+ V(X509_V_ERR_SUITE_B_INVALID_VERSION),
+ V(X509_V_ERR_SUITE_B_INVALID_ALGORITHM),
+ V(X509_V_ERR_SUITE_B_INVALID_CURVE),
+ V(X509_V_ERR_SUITE_B_INVALID_SIGNATURE_ALGORITHM),
+ V(X509_V_ERR_SUITE_B_LOS_NOT_ALLOWED),
+ V(X509_V_ERR_SUITE_B_CANNOT_SIGN_P_384_WITH_P_256),
+ V(X509_V_ERR_HOSTNAME_MISMATCH),
+ V(X509_V_ERR_EMAIL_MISMATCH),
+ V(X509_V_ERR_IP_ADDRESS_MISMATCH),
+ V(X509_V_ERR_DANE_NO_MATCH),
+ V(X509_V_ERR_EE_KEY_TOO_SMALL),
+ V(X509_V_ERR_CA_KEY_TOO_SMALL),
+ V(X509_V_ERR_CA_MD_TOO_WEAK),
+ V(X509_V_ERR_INVALID_CALL),
+ V(X509_V_ERR_STORE_LOOKUP),
+ V(X509_V_ERR_NO_VALID_SCTS),
+ V(X509_V_ERR_PROXY_SUBJECT_NAME_VIOLATION),
+ V(X509_V_ERR_OCSP_VERIFY_NEEDED),
+ V(X509_V_ERR_OCSP_VERIFY_FAILED),
+ V(X509_V_ERR_OCSP_CERT_UNKNOWN),
+ V(X509_V_ERR_UNSUPPORTED_SIGNATURE_ALGORITHM),
+ V(X509_V_ERR_SIGNATURE_ALGORITHM_MISMATCH),
+ V(X509_V_ERR_SIGNATURE_ALGORITHM_INCONSISTENCY),
+ V(X509_V_ERR_INVALID_CA),
+ V(X509_V_ERR_PATHLEN_INVALID_FOR_NON_CA),
+ V(X509_V_ERR_PATHLEN_WITHOUT_KU_KEY_CERT_SIGN),
+ V(X509_V_ERR_KU_KEY_CERT_SIGN_INVALID_FOR_NON_CA),
+ V(X509_V_ERR_ISSUER_NAME_EMPTY),
+ V(X509_V_ERR_SUBJECT_NAME_EMPTY),
+ V(X509_V_ERR_MISSING_AUTHORITY_KEY_IDENTIFIER),
+ V(X509_V_ERR_MISSING_SUBJECT_KEY_IDENTIFIER),
+ V(X509_V_ERR_EMPTY_SUBJECT_ALT_NAME),
+ V(X509_V_ERR_EMPTY_SUBJECT_SAN_NOT_CRITICAL),
+ V(X509_V_ERR_CA_BCONS_NOT_CRITICAL),
+ V(X509_V_ERR_AUTHORITY_KEY_IDENTIFIER_CRITICAL),
+ V(X509_V_ERR_SUBJECT_KEY_IDENTIFIER_CRITICAL),
+ V(X509_V_ERR_CA_CERT_MISSING_KEY_USAGE),
+ V(X509_V_ERR_EXTENSIONS_REQUIRE_VERSION_3),
+ V(X509_V_ERR_EC_KEY_EXPLICIT_PARAMS),
+ { 0, NULL, NULL },
+};
+
+/*
+ * Return the X509_V_ERR code corresponding to the name of the constant.
+ * See https://github.com/openssl/openssl/blob/master/include/openssl/x509_vfy.h.in
+ * If not found, return -1
+ */
+int x509_v_err_str_to_int(const char *str)
+{
+ int i;
+
+ for (i = 0; x509_v_codes[i].string; i++) {
+ if (strcmp(str, x509_v_codes[i].string) == 0) {
+ return x509_v_codes[i].code;
+ }
+ }
+
+ return -1;
+}
+
+/*
+ * Return the constant name corresponding to the X509_V_ERR code
+ * See https://github.com/openssl/openssl/blob/master/include/openssl/x509_vfy.h.in
+ * If not found, return NULL;
+ */
+const char *x509_v_err_int_to_str(int code)
+{
+ int i;
+
+ if (code == -1)
+ return NULL;
+
+ for (i = 0; x509_v_codes[i].string; i++) {
+ if (x509_v_codes[i].code == code) {
+ return x509_v_codes[i].string;
+ }
+ }
+ return NULL;
+}
+
+void init_x509_v_err_tab(void)
+{
+ int i;
+
+ for (i = 0; x509_v_codes[i].string; i++) {
+ /* either the macro exists or it's equal to its own name */
+ if (strcmp(x509_v_codes[i].string, x509_v_codes[i].value) == 0)
+ continue;
+ x509_v_codes[i].code = atoi(x509_v_codes[i].value);
+ }
+}
+
+INITCALL0(STG_REGISTER, init_x509_v_err_tab);
+
+
+/*
+ * This function returns the number of seconds elapsed
+ * since the Epoch, 1970-01-01 00:00:00 +0000 (UTC) and the
+ * date presented un ASN1_GENERALIZEDTIME.
+ *
+ * In parsing error case, it returns -1.
+ */
+long asn1_generalizedtime_to_epoch(ASN1_GENERALIZEDTIME *d)
+{
+ long epoch;
+ char *p, *end;
+ const unsigned short month_offset[12] = {
+ 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334
+ };
+ unsigned long year, month;
+
+ if (!d || (d->type != V_ASN1_GENERALIZEDTIME)) return -1;
+
+ p = (char *)d->data;
+ end = p + d->length;
+
+ if (end - p < 4) return -1;
+ year = 1000 * (p[0] - '0') + 100 * (p[1] - '0') + 10 * (p[2] - '0') + p[3] - '0';
+ p += 4;
+ if (end - p < 2) return -1;
+ month = 10 * (p[0] - '0') + p[1] - '0';
+ if (month < 1 || month > 12) return -1;
+ /* Compute the number of seconds since 1 jan 1970 and the beginning of current month
+ We consider leap years and the current month (<marsh or not) */
+ epoch = ( ((year - 1970) * 365)
+ + ((year - (month < 3)) / 4 - (year - (month < 3)) / 100 + (year - (month < 3)) / 400)
+ - ((1970 - 1) / 4 - (1970 - 1) / 100 + (1970 - 1) / 400)
+ + month_offset[month-1]
+ ) * 24 * 60 * 60;
+ p += 2;
+ if (end - p < 2) return -1;
+ /* Add the number of seconds of completed days of current month */
+ epoch += (10 * (p[0] - '0') + p[1] - '0' - 1) * 24 * 60 * 60;
+ p += 2;
+ if (end - p < 2) return -1;
+ /* Add the completed hours of the current day */
+ epoch += (10 * (p[0] - '0') + p[1] - '0') * 60 * 60;
+ p += 2;
+ if (end - p < 2) return -1;
+ /* Add the completed minutes of the current hour */
+ epoch += (10 * (p[0] - '0') + p[1] - '0') * 60;
+ p += 2;
+ if (p == end) return -1;
+ /* Test if there is available seconds */
+ if (p[0] < '0' || p[0] > '9')
+ goto nosec;
+ if (end - p < 2) return -1;
+ /* Add the seconds of the current minute */
+ epoch += 10 * (p[0] - '0') + p[1] - '0';
+ p += 2;
+ if (p == end) return -1;
+ /* Ignore seconds float part if present */
+ if (p[0] == '.') {
+ do {
+ if (++p == end) return -1;
+ } while (p[0] >= '0' && p[0] <= '9');
+ }
+
+nosec:
+ if (p[0] == 'Z') {
+ if (end - p != 1) return -1;
+ return epoch;
+ }
+ else if (p[0] == '+') {
+ if (end - p != 5) return -1;
+ /* Apply timezone offset */
+ return epoch - ((10 * (p[1] - '0') + p[2] - '0') * 60 * 60 + (10 * (p[3] - '0') + p[4] - '0')) * 60;
+ }
+ else if (p[0] == '-') {
+ if (end - p != 5) return -1;
+ /* Apply timezone offset */
+ return epoch + ((10 * (p[1] - '0') + p[2] - '0') * 60 * 60 + (10 * (p[3] - '0') + p[4] - '0')) * 60;
+ }
+
+ return -1;
+}
diff --git a/src/stats.c b/src/stats.c
new file mode 100644
index 0000000..0ed5758
--- /dev/null
+++ b/src/stats.c
@@ -0,0 +1,5521 @@
+/*
+ * Functions dedicated to statistics output and the stats socket
+ *
+ * Copyright 2000-2012 Willy Tarreau <w@1wt.eu>
+ * Copyright 2007-2009 Krzysztof Piotr Oledzki <ole@ans.pl>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <ctype.h>
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <pwd.h>
+#include <grp.h>
+
+#include <sys/socket.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+
+#include <haproxy/api.h>
+#include <haproxy/activity.h>
+#include <haproxy/applet.h>
+#include <haproxy/backend.h>
+#include <haproxy/base64.h>
+#include <haproxy/cfgparse.h>
+#include <haproxy/channel.h>
+#include <haproxy/check.h>
+#include <haproxy/cli.h>
+#include <haproxy/clock.h>
+#include <haproxy/compression.h>
+#include <haproxy/debug.h>
+#include <haproxy/errors.h>
+#include <haproxy/fd.h>
+#include <haproxy/freq_ctr.h>
+#include <haproxy/frontend.h>
+#include <haproxy/global.h>
+#include <haproxy/http.h>
+#include <haproxy/http_ana.h>
+#include <haproxy/http_htx.h>
+#include <haproxy/htx.h>
+#include <haproxy/list.h>
+#include <haproxy/listener.h>
+#include <haproxy/log.h>
+#include <haproxy/map-t.h>
+#include <haproxy/pattern-t.h>
+#include <haproxy/pipe.h>
+#include <haproxy/pool.h>
+#include <haproxy/proxy.h>
+#include <haproxy/resolvers.h>
+#include <haproxy/sc_strm.h>
+#include <haproxy/server.h>
+#include <haproxy/session.h>
+#include <haproxy/stats.h>
+#include <haproxy/stconn.h>
+#include <haproxy/stream.h>
+#include <haproxy/task.h>
+#include <haproxy/ticks.h>
+#include <haproxy/time.h>
+#include <haproxy/tools.h>
+#include <haproxy/uri_auth-t.h>
+#include <haproxy/version.h>
+
+
+/* status codes available for the stats admin page (strictly 4 chars length) */
+const char *stat_status_codes[STAT_STATUS_SIZE] = {
+ [STAT_STATUS_DENY] = "DENY",
+ [STAT_STATUS_DONE] = "DONE",
+ [STAT_STATUS_ERRP] = "ERRP",
+ [STAT_STATUS_EXCD] = "EXCD",
+ [STAT_STATUS_NONE] = "NONE",
+ [STAT_STATUS_PART] = "PART",
+ [STAT_STATUS_UNKN] = "UNKN",
+ [STAT_STATUS_IVAL] = "IVAL",
+};
+
+/* These are the field names for each INF_* field position. Please pay attention
+ * to always use the exact same name except that the strings for new names must
+ * be lower case or CamelCase while the enum entries must be upper case.
+ */
+const struct name_desc info_fields[INF_TOTAL_FIELDS] = {
+ [INF_NAME] = { .name = "Name", .desc = "Product name" },
+ [INF_VERSION] = { .name = "Version", .desc = "Product version" },
+ [INF_RELEASE_DATE] = { .name = "Release_date", .desc = "Date of latest source code update" },
+ [INF_NBTHREAD] = { .name = "Nbthread", .desc = "Number of started threads (global.nbthread)" },
+ [INF_NBPROC] = { .name = "Nbproc", .desc = "Number of started worker processes (historical, always 1)" },
+ [INF_PROCESS_NUM] = { .name = "Process_num", .desc = "Relative worker process number (1)" },
+ [INF_PID] = { .name = "Pid", .desc = "This worker process identifier for the system" },
+ [INF_UPTIME] = { .name = "Uptime", .desc = "How long ago this worker process was started (days+hours+minutes+seconds)" },
+ [INF_UPTIME_SEC] = { .name = "Uptime_sec", .desc = "How long ago this worker process was started (seconds)" },
+ [INF_START_TIME_SEC] = { .name = "Start_time_sec", .desc = "Start time in seconds" },
+ [INF_MEMMAX_MB] = { .name = "Memmax_MB", .desc = "Worker process's hard limit on memory usage in MB (-m on command line)" },
+ [INF_MEMMAX_BYTES] = { .name = "Memmax_bytes", .desc = "Worker process's hard limit on memory usage in byes (-m on command line)" },
+ [INF_POOL_ALLOC_MB] = { .name = "PoolAlloc_MB", .desc = "Amount of memory allocated in pools (in MB)" },
+ [INF_POOL_ALLOC_BYTES] = { .name = "PoolAlloc_bytes", .desc = "Amount of memory allocated in pools (in bytes)" },
+ [INF_POOL_USED_MB] = { .name = "PoolUsed_MB", .desc = "Amount of pool memory currently used (in MB)" },
+ [INF_POOL_USED_BYTES] = { .name = "PoolUsed_bytes", .desc = "Amount of pool memory currently used (in bytes)" },
+ [INF_POOL_FAILED] = { .name = "PoolFailed", .desc = "Number of failed pool allocations since this worker was started" },
+ [INF_ULIMIT_N] = { .name = "Ulimit-n", .desc = "Hard limit on the number of per-process file descriptors" },
+ [INF_MAXSOCK] = { .name = "Maxsock", .desc = "Hard limit on the number of per-process sockets" },
+ [INF_MAXCONN] = { .name = "Maxconn", .desc = "Hard limit on the number of per-process connections (configured or imposed by Ulimit-n)" },
+ [INF_HARD_MAXCONN] = { .name = "Hard_maxconn", .desc = "Hard limit on the number of per-process connections (imposed by Memmax_MB or Ulimit-n)" },
+ [INF_CURR_CONN] = { .name = "CurrConns", .desc = "Current number of connections on this worker process" },
+ [INF_CUM_CONN] = { .name = "CumConns", .desc = "Total number of connections on this worker process since started" },
+ [INF_CUM_REQ] = { .name = "CumReq", .desc = "Total number of requests on this worker process since started" },
+ [INF_MAX_SSL_CONNS] = { .name = "MaxSslConns", .desc = "Hard limit on the number of per-process SSL endpoints (front+back), 0=unlimited" },
+ [INF_CURR_SSL_CONNS] = { .name = "CurrSslConns", .desc = "Current number of SSL endpoints on this worker process (front+back)" },
+ [INF_CUM_SSL_CONNS] = { .name = "CumSslConns", .desc = "Total number of SSL endpoints on this worker process since started (front+back)" },
+ [INF_MAXPIPES] = { .name = "Maxpipes", .desc = "Hard limit on the number of pipes for splicing, 0=unlimited" },
+ [INF_PIPES_USED] = { .name = "PipesUsed", .desc = "Current number of pipes in use in this worker process" },
+ [INF_PIPES_FREE] = { .name = "PipesFree", .desc = "Current number of allocated and available pipes in this worker process" },
+ [INF_CONN_RATE] = { .name = "ConnRate", .desc = "Number of front connections created on this worker process over the last second" },
+ [INF_CONN_RATE_LIMIT] = { .name = "ConnRateLimit", .desc = "Hard limit for ConnRate (global.maxconnrate)" },
+ [INF_MAX_CONN_RATE] = { .name = "MaxConnRate", .desc = "Highest ConnRate reached on this worker process since started (in connections per second)" },
+ [INF_SESS_RATE] = { .name = "SessRate", .desc = "Number of sessions created on this worker process over the last second" },
+ [INF_SESS_RATE_LIMIT] = { .name = "SessRateLimit", .desc = "Hard limit for SessRate (global.maxsessrate)" },
+ [INF_MAX_SESS_RATE] = { .name = "MaxSessRate", .desc = "Highest SessRate reached on this worker process since started (in sessions per second)" },
+ [INF_SSL_RATE] = { .name = "SslRate", .desc = "Number of SSL connections created on this worker process over the last second" },
+ [INF_SSL_RATE_LIMIT] = { .name = "SslRateLimit", .desc = "Hard limit for SslRate (global.maxsslrate)" },
+ [INF_MAX_SSL_RATE] = { .name = "MaxSslRate", .desc = "Highest SslRate reached on this worker process since started (in connections per second)" },
+ [INF_SSL_FRONTEND_KEY_RATE] = { .name = "SslFrontendKeyRate", .desc = "Number of SSL keys created on frontends in this worker process over the last second" },
+ [INF_SSL_FRONTEND_MAX_KEY_RATE] = { .name = "SslFrontendMaxKeyRate", .desc = "Highest SslFrontendKeyRate reached on this worker process since started (in SSL keys per second)" },
+ [INF_SSL_FRONTEND_SESSION_REUSE_PCT] = { .name = "SslFrontendSessionReuse_pct", .desc = "Percent of frontend SSL connections which did not require a new key" },
+ [INF_SSL_BACKEND_KEY_RATE] = { .name = "SslBackendKeyRate", .desc = "Number of SSL keys created on backends in this worker process over the last second" },
+ [INF_SSL_BACKEND_MAX_KEY_RATE] = { .name = "SslBackendMaxKeyRate", .desc = "Highest SslBackendKeyRate reached on this worker process since started (in SSL keys per second)" },
+ [INF_SSL_CACHE_LOOKUPS] = { .name = "SslCacheLookups", .desc = "Total number of SSL session ID lookups in the SSL session cache on this worker since started" },
+ [INF_SSL_CACHE_MISSES] = { .name = "SslCacheMisses", .desc = "Total number of SSL session ID lookups that didn't find a session in the SSL session cache on this worker since started" },
+ [INF_COMPRESS_BPS_IN] = { .name = "CompressBpsIn", .desc = "Number of bytes submitted to the HTTP compressor in this worker process over the last second" },
+ [INF_COMPRESS_BPS_OUT] = { .name = "CompressBpsOut", .desc = "Number of bytes emitted by the HTTP compressor in this worker process over the last second" },
+ [INF_COMPRESS_BPS_RATE_LIM] = { .name = "CompressBpsRateLim", .desc = "Limit of CompressBpsOut beyond which HTTP compression is automatically disabled" },
+ [INF_ZLIB_MEM_USAGE] = { .name = "ZlibMemUsage", .desc = "Amount of memory currently used by HTTP compression on the current worker process (in bytes)" },
+ [INF_MAX_ZLIB_MEM_USAGE] = { .name = "MaxZlibMemUsage", .desc = "Limit on the amount of memory used by HTTP compression above which it is automatically disabled (in bytes, see global.maxzlibmem)" },
+ [INF_TASKS] = { .name = "Tasks", .desc = "Total number of tasks in the current worker process (active + sleeping)" },
+ [INF_RUN_QUEUE] = { .name = "Run_queue", .desc = "Total number of active tasks+tasklets in the current worker process" },
+ [INF_IDLE_PCT] = { .name = "Idle_pct", .desc = "Percentage of last second spent waiting in the current worker thread" },
+ [INF_NODE] = { .name = "node", .desc = "Node name (global.node)" },
+ [INF_DESCRIPTION] = { .name = "description", .desc = "Node description (global.description)" },
+ [INF_STOPPING] = { .name = "Stopping", .desc = "1 if the worker process is currently stopping, otherwise zero" },
+ [INF_JOBS] = { .name = "Jobs", .desc = "Current number of active jobs on the current worker process (frontend connections, master connections, listeners)" },
+ [INF_UNSTOPPABLE_JOBS] = { .name = "Unstoppable Jobs", .desc = "Current number of unstoppable jobs on the current worker process (master connections)" },
+ [INF_LISTENERS] = { .name = "Listeners", .desc = "Current number of active listeners on the current worker process" },
+ [INF_ACTIVE_PEERS] = { .name = "ActivePeers", .desc = "Current number of verified active peers connections on the current worker process" },
+ [INF_CONNECTED_PEERS] = { .name = "ConnectedPeers", .desc = "Current number of peers having passed the connection step on the current worker process" },
+ [INF_DROPPED_LOGS] = { .name = "DroppedLogs", .desc = "Total number of dropped logs for current worker process since started" },
+ [INF_BUSY_POLLING] = { .name = "BusyPolling", .desc = "1 if busy-polling is currently in use on the worker process, otherwise zero (config.busy-polling)" },
+ [INF_FAILED_RESOLUTIONS] = { .name = "FailedResolutions", .desc = "Total number of failed DNS resolutions in current worker process since started" },
+ [INF_TOTAL_BYTES_OUT] = { .name = "TotalBytesOut", .desc = "Total number of bytes emitted by current worker process since started" },
+ [INF_TOTAL_SPLICED_BYTES_OUT] = { .name = "TotalSplicedBytesOut", .desc = "Total number of bytes emitted by current worker process through a kernel pipe since started" },
+ [INF_BYTES_OUT_RATE] = { .name = "BytesOutRate", .desc = "Number of bytes emitted by current worker process over the last second" },
+ [INF_DEBUG_COMMANDS_ISSUED] = { .name = "DebugCommandsIssued", .desc = "Number of debug commands issued on this process (anything > 0 is unsafe)" },
+ [INF_CUM_LOG_MSGS] = { .name = "CumRecvLogs", .desc = "Total number of log messages received by log-forwarding listeners on this worker process since started" },
+ [INF_BUILD_INFO] = { .name = "Build info", .desc = "Build info" },
+ [INF_TAINTED] = { .name = "Tainted", .desc = "Experimental features used" },
+ [INF_WARNINGS] = { .name = "TotalWarnings", .desc = "Total warnings issued" },
+ [INF_MAXCONN_REACHED] = { .name = "MaxconnReached", .desc = "Number of times an accepted connection resulted in Maxconn being reached" },
+ [INF_BOOTTIME_MS] = { .name = "BootTime_ms", .desc = "How long ago it took to parse and process the config before being ready (milliseconds)" },
+ [INF_NICED_TASKS] = { .name = "Niced_tasks", .desc = "Total number of active tasks+tasklets in the current worker process (Run_queue) that are niced" },
+};
+
+const struct name_desc stat_fields[ST_F_TOTAL_FIELDS] = {
+ [ST_F_PXNAME] = { .name = "pxname", .desc = "Proxy name" },
+ [ST_F_SVNAME] = { .name = "svname", .desc = "Server name" },
+ [ST_F_QCUR] = { .name = "qcur", .desc = "Number of current queued connections" },
+ [ST_F_QMAX] = { .name = "qmax", .desc = "Highest value of queued connections encountered since process started" },
+ [ST_F_SCUR] = { .name = "scur", .desc = "Number of current sessions on the frontend, backend or server" },
+ [ST_F_SMAX] = { .name = "smax", .desc = "Highest value of current sessions encountered since process started" },
+ [ST_F_SLIM] = { .name = "slim", .desc = "Frontend/listener/server's maxconn, backend's fullconn" },
+ [ST_F_STOT] = { .name = "stot", .desc = "Total number of sessions since process started" },
+ [ST_F_BIN] = { .name = "bin", .desc = "Total number of request bytes since process started" },
+ [ST_F_BOUT] = { .name = "bout", .desc = "Total number of response bytes since process started" },
+ [ST_F_DREQ] = { .name = "dreq", .desc = "Total number of denied requests since process started" },
+ [ST_F_DRESP] = { .name = "dresp", .desc = "Total number of denied responses since process started" },
+ [ST_F_EREQ] = { .name = "ereq", .desc = "Total number of invalid requests since process started" },
+ [ST_F_ECON] = { .name = "econ", .desc = "Total number of failed connections to server since the worker process started" },
+ [ST_F_ERESP] = { .name = "eresp", .desc = "Total number of invalid responses since the worker process started" },
+ [ST_F_WRETR] = { .name = "wretr", .desc = "Total number of server connection retries since the worker process started" },
+ [ST_F_WREDIS] = { .name = "wredis", .desc = "Total number of server redispatches due to connection failures since the worker process started" },
+ [ST_F_STATUS] = { .name = "status", .desc = "Frontend/listen status: OPEN/WAITING/FULL/STOP; backend: UP/DOWN; server: last check status" },
+ [ST_F_WEIGHT] = { .name = "weight", .desc = "Server's effective weight, or sum of active servers' effective weights for a backend" },
+ [ST_F_ACT] = { .name = "act", .desc = "Total number of active UP servers with a non-zero weight" },
+ [ST_F_BCK] = { .name = "bck", .desc = "Total number of backup UP servers with a non-zero weight" },
+ [ST_F_CHKFAIL] = { .name = "chkfail", .desc = "Total number of failed individual health checks per server/backend, since the worker process started" },
+ [ST_F_CHKDOWN] = { .name = "chkdown", .desc = "Total number of failed checks causing UP to DOWN server transitions, per server/backend, since the worker process started" },
+ [ST_F_LASTCHG] = { .name = "lastchg", .desc = "How long ago the last server state changed, in seconds" },
+ [ST_F_DOWNTIME] = { .name = "downtime", .desc = "Total time spent in DOWN state, for server or backend" },
+ [ST_F_QLIMIT] = { .name = "qlimit", .desc = "Limit on the number of connections in queue, for servers only (maxqueue argument)" },
+ [ST_F_PID] = { .name = "pid", .desc = "Relative worker process number (1)" },
+ [ST_F_IID] = { .name = "iid", .desc = "Frontend or Backend numeric identifier ('id' setting)" },
+ [ST_F_SID] = { .name = "sid", .desc = "Server numeric identifier ('id' setting)" },
+ [ST_F_THROTTLE] = { .name = "throttle", .desc = "Throttling ratio applied to a server's maxconn and weight during the slowstart period (0 to 100%)" },
+ [ST_F_LBTOT] = { .name = "lbtot", .desc = "Total number of requests routed by load balancing since the worker process started (ignores queue pop and stickiness)" },
+ [ST_F_TRACKED] = { .name = "tracked", .desc = "Name of the other server this server tracks for its state" },
+ [ST_F_TYPE] = { .name = "type", .desc = "Type of the object (Listener, Frontend, Backend, Server)" },
+ [ST_F_RATE] = { .name = "rate", .desc = "Total number of sessions processed by this object over the last second (sessions for listeners/frontends, requests for backends/servers)" },
+ [ST_F_RATE_LIM] = { .name = "rate_lim", .desc = "Limit on the number of sessions accepted in a second (frontend only, 'rate-limit sessions' setting)" },
+ [ST_F_RATE_MAX] = { .name = "rate_max", .desc = "Highest value of sessions per second observed since the worker process started" },
+ [ST_F_CHECK_STATUS] = { .name = "check_status", .desc = "Status report of the server's latest health check, prefixed with '*' if a check is currently in progress" },
+ [ST_F_CHECK_CODE] = { .name = "check_code", .desc = "HTTP/SMTP/LDAP status code reported by the latest server health check" },
+ [ST_F_CHECK_DURATION] = { .name = "check_duration", .desc = "Total duration of the latest server health check, in milliseconds" },
+ [ST_F_HRSP_1XX] = { .name = "hrsp_1xx", .desc = "Total number of HTTP responses with status 100-199 returned by this object since the worker process started" },
+ [ST_F_HRSP_2XX] = { .name = "hrsp_2xx", .desc = "Total number of HTTP responses with status 200-299 returned by this object since the worker process started" },
+ [ST_F_HRSP_3XX] = { .name = "hrsp_3xx", .desc = "Total number of HTTP responses with status 300-399 returned by this object since the worker process started" },
+ [ST_F_HRSP_4XX] = { .name = "hrsp_4xx", .desc = "Total number of HTTP responses with status 400-499 returned by this object since the worker process started" },
+ [ST_F_HRSP_5XX] = { .name = "hrsp_5xx", .desc = "Total number of HTTP responses with status 500-599 returned by this object since the worker process started" },
+ [ST_F_HRSP_OTHER] = { .name = "hrsp_other", .desc = "Total number of HTTP responses with status <100, >599 returned by this object since the worker process started (error -1 included)" },
+ [ST_F_HANAFAIL] = { .name = "hanafail", .desc = "Total number of failed checks caused by an 'on-error' directive after an 'observe' condition matched" },
+ [ST_F_REQ_RATE] = { .name = "req_rate", .desc = "Number of HTTP requests processed over the last second on this object" },
+ [ST_F_REQ_RATE_MAX] = { .name = "req_rate_max", .desc = "Highest value of http requests observed since the worker process started" },
+ [ST_F_REQ_TOT] = { .name = "req_tot", .desc = "Total number of HTTP requests processed by this object since the worker process started" },
+ [ST_F_CLI_ABRT] = { .name = "cli_abrt", .desc = "Total number of requests or connections aborted by the client since the worker process started" },
+ [ST_F_SRV_ABRT] = { .name = "srv_abrt", .desc = "Total number of requests or connections aborted by the server since the worker process started" },
+ [ST_F_COMP_IN] = { .name = "comp_in", .desc = "Total number of bytes submitted to the HTTP compressor for this object since the worker process started" },
+ [ST_F_COMP_OUT] = { .name = "comp_out", .desc = "Total number of bytes emitted by the HTTP compressor for this object since the worker process started" },
+ [ST_F_COMP_BYP] = { .name = "comp_byp", .desc = "Total number of bytes that bypassed HTTP compression for this object since the worker process started (CPU/memory/bandwidth limitation)" },
+ [ST_F_COMP_RSP] = { .name = "comp_rsp", .desc = "Total number of HTTP responses that were compressed for this object since the worker process started" },
+ [ST_F_LASTSESS] = { .name = "lastsess", .desc = "How long ago some traffic was seen on this object on this worker process, in seconds" },
+ [ST_F_LAST_CHK] = { .name = "last_chk", .desc = "Short description of the latest health check report for this server (see also check_desc)" },
+ [ST_F_LAST_AGT] = { .name = "last_agt", .desc = "Short description of the latest agent check report for this server (see also agent_desc)" },
+ [ST_F_QTIME] = { .name = "qtime", .desc = "Time spent in the queue, in milliseconds, averaged over the 1024 last requests (backend/server)" },
+ [ST_F_CTIME] = { .name = "ctime", .desc = "Time spent waiting for a connection to complete, in milliseconds, averaged over the 1024 last requests (backend/server)" },
+ [ST_F_RTIME] = { .name = "rtime", .desc = "Time spent waiting for a server response, in milliseconds, averaged over the 1024 last requests (backend/server)" },
+ [ST_F_TTIME] = { .name = "ttime", .desc = "Total request+response time (request+queue+connect+response+processing), in milliseconds, averaged over the 1024 last requests (backend/server)" },
+ [ST_F_AGENT_STATUS] = { .name = "agent_status", .desc = "Status report of the server's latest agent check, prefixed with '*' if a check is currently in progress" },
+ [ST_F_AGENT_CODE] = { .name = "agent_code", .desc = "Status code reported by the latest server agent check" },
+ [ST_F_AGENT_DURATION] = { .name = "agent_duration", .desc = "Total duration of the latest server agent check, in milliseconds" },
+ [ST_F_CHECK_DESC] = { .name = "check_desc", .desc = "Textual description of the latest health check report for this server" },
+ [ST_F_AGENT_DESC] = { .name = "agent_desc", .desc = "Textual description of the latest agent check report for this server" },
+ [ST_F_CHECK_RISE] = { .name = "check_rise", .desc = "Number of successful health checks before declaring a server UP (server 'rise' setting)" },
+ [ST_F_CHECK_FALL] = { .name = "check_fall", .desc = "Number of failed health checks before declaring a server DOWN (server 'fall' setting)" },
+ [ST_F_CHECK_HEALTH] = { .name = "check_health", .desc = "Current server health check level (0..fall-1=DOWN, fall..rise-1=UP)" },
+ [ST_F_AGENT_RISE] = { .name = "agent_rise", .desc = "Number of successful agent checks before declaring a server UP (server 'rise' setting)" },
+ [ST_F_AGENT_FALL] = { .name = "agent_fall", .desc = "Number of failed agent checks before declaring a server DOWN (server 'fall' setting)" },
+ [ST_F_AGENT_HEALTH] = { .name = "agent_health", .desc = "Current server agent check level (0..fall-1=DOWN, fall..rise-1=UP)" },
+ [ST_F_ADDR] = { .name = "addr", .desc = "Server's address:port, shown only if show-legends is set, or at levels oper/admin for the CLI" },
+ [ST_F_COOKIE] = { .name = "cookie", .desc = "Backend's cookie name or Server's cookie value, shown only if show-legends is set, or at levels oper/admin for the CLI" },
+ [ST_F_MODE] = { .name = "mode", .desc = "'mode' setting (tcp/http/health/cli)" },
+ [ST_F_ALGO] = { .name = "algo", .desc = "Backend's load balancing algorithm, shown only if show-legends is set, or at levels oper/admin for the CLI" },
+ [ST_F_CONN_RATE] = { .name = "conn_rate", .desc = "Number of new connections accepted over the last second on the frontend for this worker process" },
+ [ST_F_CONN_RATE_MAX] = { .name = "conn_rate_max", .desc = "Highest value of connections per second observed since the worker process started" },
+ [ST_F_CONN_TOT] = { .name = "conn_tot", .desc = "Total number of new connections accepted on this frontend since the worker process started" },
+ [ST_F_INTERCEPTED] = { .name = "intercepted", .desc = "Total number of HTTP requests intercepted on the frontend (redirects/stats/services) since the worker process started" },
+ [ST_F_DCON] = { .name = "dcon", .desc = "Total number of incoming connections blocked on a listener/frontend by a tcp-request connection rule since the worker process started" },
+ [ST_F_DSES] = { .name = "dses", .desc = "Total number of incoming sessions blocked on a listener/frontend by a tcp-request connection rule since the worker process started" },
+ [ST_F_WREW] = { .name = "wrew", .desc = "Total number of failed HTTP header rewrites since the worker process started" },
+ [ST_F_CONNECT] = { .name = "connect", .desc = "Total number of outgoing connection attempts on this backend/server since the worker process started" },
+ [ST_F_REUSE] = { .name = "reuse", .desc = "Total number of reused connection on this backend/server since the worker process started" },
+ [ST_F_CACHE_LOOKUPS] = { .name = "cache_lookups", .desc = "Total number of HTTP requests looked up in the cache on this frontend/backend since the worker process started" },
+ [ST_F_CACHE_HITS] = { .name = "cache_hits", .desc = "Total number of HTTP requests not found in the cache on this frontend/backend since the worker process started" },
+ [ST_F_SRV_ICUR] = { .name = "srv_icur", .desc = "Current number of idle connections available for reuse on this server" },
+ [ST_F_SRV_ILIM] = { .name = "src_ilim", .desc = "Limit on the number of available idle connections on this server (server 'pool_max_conn' directive)" },
+ [ST_F_QT_MAX] = { .name = "qtime_max", .desc = "Maximum observed time spent in the queue, in milliseconds (backend/server)" },
+ [ST_F_CT_MAX] = { .name = "ctime_max", .desc = "Maximum observed time spent waiting for a connection to complete, in milliseconds (backend/server)" },
+ [ST_F_RT_MAX] = { .name = "rtime_max", .desc = "Maximum observed time spent waiting for a server response, in milliseconds (backend/server)" },
+ [ST_F_TT_MAX] = { .name = "ttime_max", .desc = "Maximum observed total request+response time (request+queue+connect+response+processing), in milliseconds (backend/server)" },
+ [ST_F_EINT] = { .name = "eint", .desc = "Total number of internal errors since process started"},
+ [ST_F_IDLE_CONN_CUR] = { .name = "idle_conn_cur", .desc = "Current number of unsafe idle connections"},
+ [ST_F_SAFE_CONN_CUR] = { .name = "safe_conn_cur", .desc = "Current number of safe idle connections"},
+ [ST_F_USED_CONN_CUR] = { .name = "used_conn_cur", .desc = "Current number of connections in use"},
+ [ST_F_NEED_CONN_EST] = { .name = "need_conn_est", .desc = "Estimated needed number of connections"},
+ [ST_F_UWEIGHT] = { .name = "uweight", .desc = "Server's user weight, or sum of active servers' user weights for a backend" },
+ [ST_F_AGG_SRV_CHECK_STATUS] = { .name = "agg_server_check_status", .desc = "[DEPRECATED] Backend's aggregated gauge of servers' status" },
+ [ST_F_AGG_SRV_STATUS ] = { .name = "agg_server_status", .desc = "Backend's aggregated gauge of servers' status" },
+ [ST_F_AGG_CHECK_STATUS] = { .name = "agg_check_status", .desc = "Backend's aggregated gauge of servers' state check status" },
+ [ST_F_SRID] = { .name = "srid", .desc = "Server id revision, to prevent server id reuse mixups" },
+ [ST_F_SESS_OTHER] = { .name = "sess_other", .desc = "Total number of sessions other than HTTP since process started" },
+ [ST_F_H1SESS] = { .name = "h1sess", .desc = "Total number of HTTP/1 sessions since process started" },
+ [ST_F_H2SESS] = { .name = "h2sess", .desc = "Total number of HTTP/2 sessions since process started" },
+ [ST_F_H3SESS] = { .name = "h3sess", .desc = "Total number of HTTP/3 sessions since process started" },
+ [ST_F_REQ_OTHER] = { .name = "req_other", .desc = "Total number of sessions other than HTTP processed by this object since the worker process started" },
+ [ST_F_H1REQ] = { .name = "h1req", .desc = "Total number of HTTP/1 sessions processed by this object since the worker process started" },
+ [ST_F_H2REQ] = { .name = "h2req", .desc = "Total number of hTTP/2 sessions processed by this object since the worker process started" },
+ [ST_F_H3REQ] = { .name = "h3req", .desc = "Total number of HTTP/3 sessions processed by this object since the worker process started" },
+ [ST_F_PROTO] = { .name = "proto", .desc = "Protocol" },
+};
+
+/* one line of info */
+THREAD_LOCAL struct field info[INF_TOTAL_FIELDS];
+
+/* description of statistics (static and dynamic) */
+static struct name_desc *stat_f[STATS_DOMAIN_COUNT];
+static size_t stat_count[STATS_DOMAIN_COUNT];
+
+/* one line for stats */
+THREAD_LOCAL struct field *stat_l[STATS_DOMAIN_COUNT];
+
+/* list of all registered stats module */
+static struct list stats_module_list[STATS_DOMAIN_COUNT] = {
+ LIST_HEAD_INIT(stats_module_list[STATS_DOMAIN_PROXY]),
+ LIST_HEAD_INIT(stats_module_list[STATS_DOMAIN_RESOLVERS]),
+};
+
+THREAD_LOCAL void *trash_counters;
+static THREAD_LOCAL struct buffer trash_chunk = BUF_NULL;
+
+
+static inline uint8_t stats_get_domain(uint32_t domain)
+{
+ return domain >> STATS_DOMAIN & STATS_DOMAIN_MASK;
+}
+
+static inline enum stats_domain_px_cap stats_px_get_cap(uint32_t domain)
+{
+ return domain >> STATS_PX_CAP & STATS_PX_CAP_MASK;
+}
+
+static void stats_dump_json_schema(struct buffer *out);
+
+int stats_putchk(struct appctx *appctx, struct htx *htx)
+{
+ struct stconn *sc = appctx_sc(appctx);
+ struct channel *chn = sc_ic(sc);
+ struct buffer *chk = &trash_chunk;
+
+ if (htx) {
+ if (chk->data >= channel_htx_recv_max(chn, htx)) {
+ sc_need_room(sc, chk->data);
+ return 0;
+ }
+ if (!htx_add_data_atonce(htx, ist2(chk->area, chk->data))) {
+ sc_need_room(sc, 0);
+ return 0;
+ }
+ channel_add_input(chn, chk->data);
+ chk->data = 0;
+ }
+ else {
+ if (applet_putchk(appctx, chk) == -1)
+ return 0;
+ }
+ return 1;
+}
+
+static const char *stats_scope_ptr(struct appctx *appctx, struct stconn *sc)
+{
+ struct show_stat_ctx *ctx = appctx->svcctx;
+ struct channel *req = sc_oc(sc);
+ struct htx *htx = htxbuf(&req->buf);
+ struct htx_blk *blk;
+ struct ist uri;
+
+ blk = htx_get_head_blk(htx);
+ BUG_ON(!blk || htx_get_blk_type(blk) != HTX_BLK_REQ_SL);
+ ALREADY_CHECKED(blk);
+ uri = htx_sl_req_uri(htx_get_blk_ptr(htx, blk));
+ return uri.ptr + ctx->scope_str;
+}
+
+/*
+ * http_stats_io_handler()
+ * -> stats_dump_stat_to_buffer() // same as above, but used for CSV or HTML
+ * -> stats_dump_csv_header() // emits the CSV headers (same as above)
+ * -> stats_dump_json_header() // emits the JSON headers (same as above)
+ * -> stats_dump_html_head() // emits the HTML headers
+ * -> stats_dump_html_info() // emits the equivalent of "show info" at the top
+ * -> stats_dump_proxy_to_buffer() // same as above, valid for CSV and HTML
+ * -> stats_dump_html_px_hdr()
+ * -> stats_dump_fe_stats()
+ * -> stats_dump_li_stats()
+ * -> stats_dump_sv_stats()
+ * -> stats_dump_be_stats()
+ * -> stats_dump_html_px_end()
+ * -> stats_dump_html_end() // emits HTML trailer
+ * -> stats_dump_json_end() // emits JSON trailer
+ */
+
+
+/* Dumps the stats CSV header to the local trash buffer. The caller is
+ * responsible for clearing it if needed.
+ * NOTE: Some tools happen to rely on the field position instead of its name,
+ * so please only append new fields at the end, never in the middle.
+ */
+static void stats_dump_csv_header(enum stats_domain domain)
+{
+ int field;
+
+ chunk_appendf(&trash_chunk, "# ");
+ if (stat_f[domain]) {
+ for (field = 0; field < stat_count[domain]; ++field) {
+ chunk_appendf(&trash_chunk, "%s,", stat_f[domain][field].name);
+
+ /* print special delimiter on proxy stats to mark end of
+ static fields */
+ if (domain == STATS_DOMAIN_PROXY && field + 1 == ST_F_TOTAL_FIELDS)
+ chunk_appendf(&trash_chunk, "-,");
+ }
+ }
+
+ chunk_appendf(&trash_chunk, "\n");
+}
+
+/* Emits a stats field without any surrounding element and properly encoded to
+ * resist CSV output. Returns non-zero on success, 0 if the buffer is full.
+ */
+int stats_emit_raw_data_field(struct buffer *out, const struct field *f)
+{
+ switch (field_format(f, 0)) {
+ case FF_EMPTY: return 1;
+ case FF_S32: return chunk_appendf(out, "%d", f->u.s32);
+ case FF_U32: return chunk_appendf(out, "%u", f->u.u32);
+ case FF_S64: return chunk_appendf(out, "%lld", (long long)f->u.s64);
+ case FF_U64: return chunk_appendf(out, "%llu", (unsigned long long)f->u.u64);
+ case FF_FLT: {
+ size_t prev_data = out->data;
+ out->data = flt_trim(out->area, prev_data, chunk_appendf(out, "%f", f->u.flt));
+ return out->data;
+ }
+ case FF_STR: return csv_enc_append(field_str(f, 0), 1, 2, out) != NULL;
+ default: return chunk_appendf(out, "[INCORRECT_FIELD_TYPE_%08x]", f->type);
+ }
+}
+
+const char *field_to_html_str(const struct field *f)
+{
+ switch (field_format(f, 0)) {
+ case FF_S32: return U2H(f->u.s32);
+ case FF_S64: return U2H(f->u.s64);
+ case FF_U64: return U2H(f->u.u64);
+ case FF_U32: return U2H(f->u.u32);
+ case FF_FLT: return F2H(f->u.flt);
+ case FF_STR: return field_str(f, 0);
+ case FF_EMPTY:
+ default:
+ return "";
+ }
+}
+
+/* Emits a stats field prefixed with its type. No CSV encoding is prepared, the
+ * output is supposed to be used on its own line. Returns non-zero on success, 0
+ * if the buffer is full.
+ */
+int stats_emit_typed_data_field(struct buffer *out, const struct field *f)
+{
+ switch (field_format(f, 0)) {
+ case FF_EMPTY: return 1;
+ case FF_S32: return chunk_appendf(out, "s32:%d", f->u.s32);
+ case FF_U32: return chunk_appendf(out, "u32:%u", f->u.u32);
+ case FF_S64: return chunk_appendf(out, "s64:%lld", (long long)f->u.s64);
+ case FF_U64: return chunk_appendf(out, "u64:%llu", (unsigned long long)f->u.u64);
+ case FF_FLT: {
+ size_t prev_data = out->data;
+ out->data = flt_trim(out->area, prev_data, chunk_appendf(out, "flt:%f", f->u.flt));
+ return out->data;
+ }
+ case FF_STR: return chunk_appendf(out, "str:%s", field_str(f, 0));
+ default: return chunk_appendf(out, "%08x:?", f->type);
+ }
+}
+
+/* Limit JSON integer values to the range [-(2**53)+1, (2**53)-1] as per
+ * the recommendation for interoperable integers in section 6 of RFC 7159.
+ */
+#define JSON_INT_MAX ((1ULL << 53) - 1)
+#define JSON_INT_MIN (0 - JSON_INT_MAX)
+
+/* Emits a stats field value and its type in JSON.
+ * Returns non-zero on success, 0 on error.
+ */
+int stats_emit_json_data_field(struct buffer *out, const struct field *f)
+{
+ int old_len;
+ char buf[20];
+ const char *type, *value = buf, *quote = "";
+
+ switch (field_format(f, 0)) {
+ case FF_EMPTY: return 1;
+ case FF_S32: type = "\"s32\"";
+ snprintf(buf, sizeof(buf), "%d", f->u.s32);
+ break;
+ case FF_U32: type = "\"u32\"";
+ snprintf(buf, sizeof(buf), "%u", f->u.u32);
+ break;
+ case FF_S64: type = "\"s64\"";
+ if (f->u.s64 < JSON_INT_MIN || f->u.s64 > JSON_INT_MAX)
+ return 0;
+ type = "\"s64\"";
+ snprintf(buf, sizeof(buf), "%lld", (long long)f->u.s64);
+ break;
+ case FF_U64: if (f->u.u64 > JSON_INT_MAX)
+ return 0;
+ type = "\"u64\"";
+ snprintf(buf, sizeof(buf), "%llu",
+ (unsigned long long) f->u.u64);
+ break;
+ case FF_FLT: type = "\"flt\"";
+ flt_trim(buf, 0, snprintf(buf, sizeof(buf), "%f", f->u.flt));
+ break;
+ case FF_STR: type = "\"str\"";
+ value = field_str(f, 0);
+ quote = "\"";
+ break;
+ default: snprintf(buf, sizeof(buf), "%u", f->type);
+ type = buf;
+ value = "unknown";
+ quote = "\"";
+ break;
+ }
+
+ old_len = out->data;
+ chunk_appendf(out, ",\"value\":{\"type\":%s,\"value\":%s%s%s}",
+ type, quote, value, quote);
+ return !(old_len == out->data);
+}
+
+/* Emits an encoding of the field type on 3 characters followed by a delimiter.
+ * Returns non-zero on success, 0 if the buffer is full.
+ */
+int stats_emit_field_tags(struct buffer *out, const struct field *f,
+ char delim)
+{
+ char origin, nature, scope;
+
+ switch (field_origin(f, 0)) {
+ case FO_METRIC: origin = 'M'; break;
+ case FO_STATUS: origin = 'S'; break;
+ case FO_KEY: origin = 'K'; break;
+ case FO_CONFIG: origin = 'C'; break;
+ case FO_PRODUCT: origin = 'P'; break;
+ default: origin = '?'; break;
+ }
+
+ switch (field_nature(f, 0)) {
+ case FN_GAUGE: nature = 'G'; break;
+ case FN_LIMIT: nature = 'L'; break;
+ case FN_MIN: nature = 'm'; break;
+ case FN_MAX: nature = 'M'; break;
+ case FN_RATE: nature = 'R'; break;
+ case FN_COUNTER: nature = 'C'; break;
+ case FN_DURATION: nature = 'D'; break;
+ case FN_AGE: nature = 'A'; break;
+ case FN_TIME: nature = 'T'; break;
+ case FN_NAME: nature = 'N'; break;
+ case FN_OUTPUT: nature = 'O'; break;
+ case FN_AVG: nature = 'a'; break;
+ default: nature = '?'; break;
+ }
+
+ switch (field_scope(f, 0)) {
+ case FS_PROCESS: scope = 'P'; break;
+ case FS_SERVICE: scope = 'S'; break;
+ case FS_SYSTEM: scope = 's'; break;
+ case FS_CLUSTER: scope = 'C'; break;
+ default: scope = '?'; break;
+ }
+
+ return chunk_appendf(out, "%c%c%c%c", origin, nature, scope, delim);
+}
+
+/* Emits an encoding of the field type as JSON.
+ * Returns non-zero on success, 0 if the buffer is full.
+ */
+int stats_emit_json_field_tags(struct buffer *out, const struct field *f)
+{
+ const char *origin, *nature, *scope;
+ int old_len;
+
+ switch (field_origin(f, 0)) {
+ case FO_METRIC: origin = "Metric"; break;
+ case FO_STATUS: origin = "Status"; break;
+ case FO_KEY: origin = "Key"; break;
+ case FO_CONFIG: origin = "Config"; break;
+ case FO_PRODUCT: origin = "Product"; break;
+ default: origin = "Unknown"; break;
+ }
+
+ switch (field_nature(f, 0)) {
+ case FN_GAUGE: nature = "Gauge"; break;
+ case FN_LIMIT: nature = "Limit"; break;
+ case FN_MIN: nature = "Min"; break;
+ case FN_MAX: nature = "Max"; break;
+ case FN_RATE: nature = "Rate"; break;
+ case FN_COUNTER: nature = "Counter"; break;
+ case FN_DURATION: nature = "Duration"; break;
+ case FN_AGE: nature = "Age"; break;
+ case FN_TIME: nature = "Time"; break;
+ case FN_NAME: nature = "Name"; break;
+ case FN_OUTPUT: nature = "Output"; break;
+ case FN_AVG: nature = "Avg"; break;
+ default: nature = "Unknown"; break;
+ }
+
+ switch (field_scope(f, 0)) {
+ case FS_PROCESS: scope = "Process"; break;
+ case FS_SERVICE: scope = "Service"; break;
+ case FS_SYSTEM: scope = "System"; break;
+ case FS_CLUSTER: scope = "Cluster"; break;
+ default: scope = "Unknown"; break;
+ }
+
+ old_len = out->data;
+ chunk_appendf(out, "\"tags\":{"
+ "\"origin\":\"%s\","
+ "\"nature\":\"%s\","
+ "\"scope\":\"%s\""
+ "}", origin, nature, scope);
+ return !(old_len == out->data);
+}
+
+/* Dump all fields from <stats> into <out> using CSV format */
+static int stats_dump_fields_csv(struct buffer *out,
+ const struct field *stats, size_t stats_count,
+ struct show_stat_ctx *ctx)
+{
+ int domain = ctx->domain;
+ int field;
+
+ for (field = 0; field < stats_count; ++field) {
+ if (!stats_emit_raw_data_field(out, &stats[field]))
+ return 0;
+ if (!chunk_strcat(out, ","))
+ return 0;
+
+ /* print special delimiter on proxy stats to mark end of
+ static fields */
+ if (domain == STATS_DOMAIN_PROXY && field + 1 == ST_F_TOTAL_FIELDS) {
+ if (!chunk_strcat(out, "-,"))
+ return 0;
+ }
+ }
+
+ chunk_strcat(out, "\n");
+ return 1;
+}
+
+/* Dump all fields from <stats> into <out> using a typed "field:desc:type:value" format */
+static int stats_dump_fields_typed(struct buffer *out,
+ const struct field *stats,
+ size_t stats_count,
+ struct show_stat_ctx * ctx)
+{
+ int flags = ctx->flags;
+ int domain = ctx->domain;
+ int field;
+
+ for (field = 0; field < stats_count; ++field) {
+ if (!stats[field].type)
+ continue;
+
+ switch (domain) {
+ case STATS_DOMAIN_PROXY:
+ chunk_appendf(out, "%c.%u.%u.%d.%s.%u:",
+ stats[ST_F_TYPE].u.u32 == STATS_TYPE_FE ? 'F' :
+ stats[ST_F_TYPE].u.u32 == STATS_TYPE_BE ? 'B' :
+ stats[ST_F_TYPE].u.u32 == STATS_TYPE_SO ? 'L' :
+ stats[ST_F_TYPE].u.u32 == STATS_TYPE_SV ? 'S' :
+ '?',
+ stats[ST_F_IID].u.u32, stats[ST_F_SID].u.u32,
+ field,
+ stat_f[domain][field].name,
+ stats[ST_F_PID].u.u32);
+ break;
+
+ case STATS_DOMAIN_RESOLVERS:
+ chunk_appendf(out, "N.%d.%s:", field,
+ stat_f[domain][field].name);
+ break;
+
+ default:
+ break;
+ }
+
+ if (!stats_emit_field_tags(out, &stats[field], ':'))
+ return 0;
+ if (!stats_emit_typed_data_field(out, &stats[field]))
+ return 0;
+
+ if (flags & STAT_SHOW_FDESC &&
+ !chunk_appendf(out, ":\"%s\"", stat_f[domain][field].desc)) {
+ return 0;
+ }
+
+ if (!chunk_strcat(out, "\n"))
+ return 0;
+ }
+ return 1;
+}
+
+/* Dump all fields from <stats> into <out> using the "show info json" format */
+static int stats_dump_json_info_fields(struct buffer *out,
+ const struct field *info,
+ struct show_stat_ctx *ctx)
+{
+ int started = (ctx->field) ? 1 : 0;
+ int ready_data = 0;
+
+ if (!started && !chunk_strcat(out, "["))
+ return 0;
+
+ for (; ctx->field < INF_TOTAL_FIELDS; ctx->field++) {
+ int old_len;
+ int field = ctx->field;
+
+ if (!field_format(info, field))
+ continue;
+
+ if (started && !chunk_strcat(out, ","))
+ goto err;
+ started = 1;
+
+ old_len = out->data;
+ chunk_appendf(out,
+ "{\"field\":{\"pos\":%d,\"name\":\"%s\"},"
+ "\"processNum\":%u,",
+ field, info_fields[field].name,
+ info[INF_PROCESS_NUM].u.u32);
+ if (old_len == out->data)
+ goto err;
+
+ if (!stats_emit_json_field_tags(out, &info[field]))
+ goto err;
+
+ if (!stats_emit_json_data_field(out, &info[field]))
+ goto err;
+
+ if (!chunk_strcat(out, "}"))
+ goto err;
+ ready_data = out->data;
+ }
+
+ if (!chunk_strcat(out, "]\n"))
+ goto err;
+ ctx->field = 0; /* we're done */
+ return 1;
+
+err:
+ if (!ready_data) {
+ /* not enough buffer space for a single entry.. */
+ chunk_reset(out);
+ chunk_appendf(out, "{\"errorStr\":\"output buffer too short\"}\n");
+ return 0; /* hard error */
+ }
+ /* push ready data and wait for a new buffer to complete the dump */
+ out->data = ready_data;
+ return 1;
+}
+
+static void stats_print_proxy_field_json(struct buffer *out,
+ const struct field *stat,
+ const char *name,
+ int pos,
+ uint32_t field_type,
+ uint32_t iid,
+ uint32_t sid,
+ uint32_t pid)
+{
+ const char *obj_type;
+ switch (field_type) {
+ case STATS_TYPE_FE: obj_type = "Frontend"; break;
+ case STATS_TYPE_BE: obj_type = "Backend"; break;
+ case STATS_TYPE_SO: obj_type = "Listener"; break;
+ case STATS_TYPE_SV: obj_type = "Server"; break;
+ default: obj_type = "Unknown"; break;
+ }
+
+ chunk_appendf(out,
+ "{"
+ "\"objType\":\"%s\","
+ "\"proxyId\":%u,"
+ "\"id\":%u,"
+ "\"field\":{\"pos\":%d,\"name\":\"%s\"},"
+ "\"processNum\":%u,",
+ obj_type, iid, sid, pos, name, pid);
+}
+
+static void stats_print_rslv_field_json(struct buffer *out,
+ const struct field *stat,
+ const char *name,
+ int pos)
+{
+ chunk_appendf(out,
+ "{"
+ "\"field\":{\"pos\":%d,\"name\":\"%s\"},",
+ pos, name);
+}
+
+
+/* Dump all fields from <stats> into <out> using a typed "field:desc:type:value" format */
+static int stats_dump_fields_json(struct buffer *out,
+ const struct field *stats, size_t stats_count,
+ struct show_stat_ctx *ctx)
+{
+ int flags = ctx->flags;
+ int domain = ctx->domain;
+ int started = (ctx->field) ? 1 : 0;
+ int ready_data = 0;
+
+ if (!started && (flags & STAT_STARTED) && !chunk_strcat(out, ","))
+ return 0;
+ if (!started && !chunk_strcat(out, "["))
+ return 0;
+
+ for (; ctx->field < stats_count; ctx->field++) {
+ int old_len;
+ int field = ctx->field;
+
+ if (!stats[field].type)
+ continue;
+
+ if (started && !chunk_strcat(out, ","))
+ goto err;
+ started = 1;
+
+ old_len = out->data;
+ if (domain == STATS_DOMAIN_PROXY) {
+ stats_print_proxy_field_json(out, &stats[field],
+ stat_f[domain][field].name,
+ field,
+ stats[ST_F_TYPE].u.u32,
+ stats[ST_F_IID].u.u32,
+ stats[ST_F_SID].u.u32,
+ stats[ST_F_PID].u.u32);
+ } else if (domain == STATS_DOMAIN_RESOLVERS) {
+ stats_print_rslv_field_json(out, &stats[field],
+ stat_f[domain][field].name,
+ field);
+ }
+
+ if (old_len == out->data)
+ goto err;
+
+ if (!stats_emit_json_field_tags(out, &stats[field]))
+ goto err;
+
+ if (!stats_emit_json_data_field(out, &stats[field]))
+ goto err;
+
+ if (!chunk_strcat(out, "}"))
+ goto err;
+ ready_data = out->data;
+ }
+
+ if (!chunk_strcat(out, "]"))
+ goto err;
+
+ ctx->field = 0; /* we're done */
+ return 1;
+
+err:
+ if (!ready_data) {
+ /* not enough buffer space for a single entry.. */
+ chunk_reset(out);
+ if (ctx->flags & STAT_STARTED)
+ chunk_strcat(out, ",");
+ chunk_appendf(out, "{\"errorStr\":\"output buffer too short\"}");
+ return 0; /* hard error */
+ }
+ /* push ready data and wait for a new buffer to complete the dump */
+ out->data = ready_data;
+ return 1;
+}
+
+/* Dump all fields from <stats> into <out> using the HTML format. A column is
+ * reserved for the checkbox is STAT_ADMIN is set in <flags>. Some extra info
+ * are provided if STAT_SHLGNDS is present in <flags>. The statistics from
+ * extra modules are displayed at the end of the lines if STAT_SHMODULES is
+ * present in <flags>.
+ */
+static int stats_dump_fields_html(struct buffer *out,
+ const struct field *stats,
+ struct show_stat_ctx *ctx)
+{
+ struct buffer src;
+ struct stats_module *mod;
+ int flags = ctx->flags;
+ int i = 0, j = 0;
+
+ if (stats[ST_F_TYPE].u.u32 == STATS_TYPE_FE) {
+ chunk_appendf(out,
+ /* name, queue */
+ "<tr class=\"frontend\">");
+
+ if (flags & STAT_ADMIN) {
+ /* Column sub-heading for Enable or Disable server */
+ chunk_appendf(out, "<td></td>");
+ }
+
+ chunk_appendf(out,
+ "<td class=ac>"
+ "<a name=\"%s/Frontend\"></a>"
+ "<a class=lfsb href=\"#%s/Frontend\">Frontend</a></td>"
+ "<td colspan=3></td>"
+ "",
+ field_str(stats, ST_F_PXNAME), field_str(stats, ST_F_PXNAME));
+
+ chunk_appendf(out,
+ /* sessions rate : current */
+ "<td><u>%s<div class=tips><table class=det>"
+ "<tr><th>Current connection rate:</th><td>%s/s</td></tr>"
+ "<tr><th>Current session rate:</th><td>%s/s</td></tr>"
+ "",
+ U2H(stats[ST_F_RATE].u.u32),
+ U2H(stats[ST_F_CONN_RATE].u.u32),
+ U2H(stats[ST_F_RATE].u.u32));
+
+ if (strcmp(field_str(stats, ST_F_MODE), "http") == 0)
+ chunk_appendf(out,
+ "<tr><th>Current request rate:</th><td>%s/s</td></tr>",
+ U2H(stats[ST_F_REQ_RATE].u.u32));
+
+ chunk_appendf(out,
+ "</table></div></u></td>"
+ /* sessions rate : max */
+ "<td><u>%s<div class=tips><table class=det>"
+ "<tr><th>Max connection rate:</th><td>%s/s</td></tr>"
+ "<tr><th>Max session rate:</th><td>%s/s</td></tr>"
+ "",
+ U2H(stats[ST_F_RATE_MAX].u.u32),
+ U2H(stats[ST_F_CONN_RATE_MAX].u.u32),
+ U2H(stats[ST_F_RATE_MAX].u.u32));
+
+ if (strcmp(field_str(stats, ST_F_MODE), "http") == 0)
+ chunk_appendf(out,
+ "<tr><th>Max request rate:</th><td>%s/s</td></tr>",
+ U2H(stats[ST_F_REQ_RATE_MAX].u.u32));
+
+ chunk_appendf(out,
+ "</table></div></u></td>"
+ /* sessions rate : limit */
+ "<td>%s</td>",
+ LIM2A(stats[ST_F_RATE_LIM].u.u32, "-"));
+
+ chunk_appendf(out,
+ /* sessions: current, max, limit, total */
+ "<td>%s</td><td>%s</td><td>%s</td>"
+ "<td><u>%s<div class=tips><table class=det>"
+ "<tr><th>Cum. connections:</th><td>%s</td></tr>"
+ "<tr><th>Cum. sessions:</th><td>%s</td></tr>"
+ "",
+ U2H(stats[ST_F_SCUR].u.u32), U2H(stats[ST_F_SMAX].u.u32), U2H(stats[ST_F_SLIM].u.u32),
+ U2H(stats[ST_F_STOT].u.u64),
+ U2H(stats[ST_F_CONN_TOT].u.u64),
+ U2H(stats[ST_F_STOT].u.u64));
+
+ /* http response (via hover): 1xx, 2xx, 3xx, 4xx, 5xx, other */
+ if (strcmp(field_str(stats, ST_F_MODE), "http") == 0) {
+ chunk_appendf(out,
+ "<tr><th>- HTTP/1 sessions:</th><td>%s</td></tr>"
+ "<tr><th>- HTTP/2 sessions:</th><td>%s</td></tr>"
+ "<tr><th>- HTTP/3 sessions:</th><td>%s</td></tr>"
+ "<tr><th>- other sessions:</th><td>%s</td></tr>"
+ "<tr><th>Cum. HTTP requests:</th><td>%s</td></tr>"
+ "<tr><th>- HTTP/1 requests:</th><td>%s</td></tr>"
+ "<tr><th>- HTTP/2 requests:</th><td>%s</td></tr>"
+ "<tr><th>- HTTP/3 requests:</th><td>%s</td></tr>"
+ "<tr><th>- other requests:</th><td>%s</td></tr>"
+ "",
+ U2H(stats[ST_F_H1SESS].u.u64),
+ U2H(stats[ST_F_H2SESS].u.u64),
+ U2H(stats[ST_F_H3SESS].u.u64),
+ U2H(stats[ST_F_SESS_OTHER].u.u64),
+ U2H(stats[ST_F_REQ_TOT].u.u64),
+ U2H(stats[ST_F_H1REQ].u.u64),
+ U2H(stats[ST_F_H2REQ].u.u64),
+ U2H(stats[ST_F_H3REQ].u.u64),
+ U2H(stats[ST_F_REQ_OTHER].u.u64));
+
+ chunk_appendf(out,
+ "<tr><th>- HTTP 1xx responses:</th><td>%s</td></tr>"
+ "<tr><th>- HTTP 2xx responses:</th><td>%s</td></tr>"
+ "<tr><th>&nbsp;&nbsp;Compressed 2xx:</th><td>%s</td><td>(%d%%)</td></tr>"
+ "<tr><th>- HTTP 3xx responses:</th><td>%s</td></tr>"
+ "<tr><th>- HTTP 4xx responses:</th><td>%s</td></tr>"
+ "<tr><th>- HTTP 5xx responses:</th><td>%s</td></tr>"
+ "<tr><th>- other responses:</th><td>%s</td></tr>"
+ "",
+ U2H(stats[ST_F_HRSP_1XX].u.u64),
+ U2H(stats[ST_F_HRSP_2XX].u.u64),
+ U2H(stats[ST_F_COMP_RSP].u.u64),
+ stats[ST_F_HRSP_2XX].u.u64 ?
+ (int)(100 * stats[ST_F_COMP_RSP].u.u64 / stats[ST_F_HRSP_2XX].u.u64) : 0,
+ U2H(stats[ST_F_HRSP_3XX].u.u64),
+ U2H(stats[ST_F_HRSP_4XX].u.u64),
+ U2H(stats[ST_F_HRSP_5XX].u.u64),
+ U2H(stats[ST_F_HRSP_OTHER].u.u64));
+
+ chunk_appendf(out,
+ "<tr><th>Intercepted requests:</th><td>%s</td></tr>"
+ "<tr><th>Cache lookups:</th><td>%s</td></tr>"
+ "<tr><th>Cache hits:</th><td>%s</td><td>(%d%%)</td></tr>"
+ "<tr><th>Failed hdr rewrites:</th><td>%s</td></tr>"
+ "<tr><th>Internal errors:</th><td>%s</td></tr>"
+ "",
+ U2H(stats[ST_F_INTERCEPTED].u.u64),
+ U2H(stats[ST_F_CACHE_LOOKUPS].u.u64),
+ U2H(stats[ST_F_CACHE_HITS].u.u64),
+ stats[ST_F_CACHE_LOOKUPS].u.u64 ?
+ (int)(100 * stats[ST_F_CACHE_HITS].u.u64 / stats[ST_F_CACHE_LOOKUPS].u.u64) : 0,
+ U2H(stats[ST_F_WREW].u.u64),
+ U2H(stats[ST_F_EINT].u.u64));
+ }
+
+ chunk_appendf(out,
+ "</table></div></u></td>"
+ /* sessions: lbtot, lastsess */
+ "<td></td><td></td>"
+ /* bytes : in */
+ "<td>%s</td>"
+ "",
+ U2H(stats[ST_F_BIN].u.u64));
+
+ chunk_appendf(out,
+ /* bytes:out + compression stats (via hover): comp_in, comp_out, comp_byp */
+ "<td>%s%s<div class=tips><table class=det>"
+ "<tr><th>Response bytes in:</th><td>%s</td></tr>"
+ "<tr><th>Compression in:</th><td>%s</td></tr>"
+ "<tr><th>Compression out:</th><td>%s</td><td>(%d%%)</td></tr>"
+ "<tr><th>Compression bypass:</th><td>%s</td></tr>"
+ "<tr><th>Total bytes saved:</th><td>%s</td><td>(%d%%)</td></tr>"
+ "</table></div>%s</td>",
+ (stats[ST_F_COMP_IN].u.u64 || stats[ST_F_COMP_BYP].u.u64) ? "<u>":"",
+ U2H(stats[ST_F_BOUT].u.u64),
+ U2H(stats[ST_F_BOUT].u.u64),
+ U2H(stats[ST_F_COMP_IN].u.u64),
+ U2H(stats[ST_F_COMP_OUT].u.u64),
+ stats[ST_F_COMP_IN].u.u64 ? (int)(stats[ST_F_COMP_OUT].u.u64 * 100 / stats[ST_F_COMP_IN].u.u64) : 0,
+ U2H(stats[ST_F_COMP_BYP].u.u64),
+ U2H(stats[ST_F_COMP_IN].u.u64 - stats[ST_F_COMP_OUT].u.u64),
+ stats[ST_F_BOUT].u.u64 ? (int)((stats[ST_F_COMP_IN].u.u64 - stats[ST_F_COMP_OUT].u.u64) * 100 / stats[ST_F_BOUT].u.u64) : 0,
+ (stats[ST_F_COMP_IN].u.u64 || stats[ST_F_COMP_BYP].u.u64) ? "</u>":"");
+
+ chunk_appendf(out,
+ /* denied: req, resp */
+ "<td>%s</td><td>%s</td>"
+ /* errors : request, connect, response */
+ "<td>%s</td><td></td><td></td>"
+ /* warnings: retries, redispatches */
+ "<td></td><td></td>"
+ /* server status : reflect frontend status */
+ "<td class=ac>%s</td>"
+ /* rest of server: nothing */
+ "<td class=ac colspan=8></td>"
+ "",
+ U2H(stats[ST_F_DREQ].u.u64), U2H(stats[ST_F_DRESP].u.u64),
+ U2H(stats[ST_F_EREQ].u.u64),
+ field_str(stats, ST_F_STATUS));
+
+ if (flags & STAT_SHMODULES) {
+ list_for_each_entry(mod, &stats_module_list[STATS_DOMAIN_PROXY], list) {
+ chunk_appendf(out, "<td>");
+
+ if (stats_px_get_cap(mod->domain_flags) & STATS_PX_CAP_FE) {
+ chunk_appendf(out,
+ "<u>%s<div class=tips><table class=det>",
+ mod->name);
+ for (j = 0; j < mod->stats_count; ++j) {
+ chunk_appendf(out,
+ "<tr><th>%s</th><td>%s</td></tr>",
+ mod->stats[j].desc, field_to_html_str(&stats[ST_F_TOTAL_FIELDS + i]));
+ ++i;
+ }
+ chunk_appendf(out, "</table></div></u>");
+ } else {
+ i += mod->stats_count;
+ }
+
+ chunk_appendf(out, "</td>");
+ }
+ }
+
+ chunk_appendf(out, "</tr>");
+ }
+ else if (stats[ST_F_TYPE].u.u32 == STATS_TYPE_SO) {
+ chunk_appendf(out, "<tr class=socket>");
+ if (flags & STAT_ADMIN) {
+ /* Column sub-heading for Enable or Disable server */
+ chunk_appendf(out, "<td></td>");
+ }
+
+ chunk_appendf(out,
+ /* frontend name, listener name */
+ "<td class=ac><a name=\"%s/+%s\"></a>%s"
+ "<a class=lfsb href=\"#%s/+%s\">%s</a>"
+ "",
+ field_str(stats, ST_F_PXNAME), field_str(stats, ST_F_SVNAME),
+ (flags & STAT_SHLGNDS)?"<u>":"",
+ field_str(stats, ST_F_PXNAME), field_str(stats, ST_F_SVNAME), field_str(stats, ST_F_SVNAME));
+
+ if (flags & STAT_SHLGNDS) {
+ chunk_appendf(out, "<div class=tips>");
+
+ if (isdigit((unsigned char)*field_str(stats, ST_F_ADDR)))
+ chunk_appendf(out, "IPv4: %s, ", field_str(stats, ST_F_ADDR));
+ else if (*field_str(stats, ST_F_ADDR) == '[')
+ chunk_appendf(out, "IPv6: %s, ", field_str(stats, ST_F_ADDR));
+ else if (*field_str(stats, ST_F_ADDR))
+ chunk_appendf(out, "%s, ", field_str(stats, ST_F_ADDR));
+
+ chunk_appendf(out, "proto=%s, ", field_str(stats, ST_F_PROTO));
+
+ /* id */
+ chunk_appendf(out, "id: %d</div>", stats[ST_F_SID].u.u32);
+ }
+
+ chunk_appendf(out,
+ /* queue */
+ "%s</td><td colspan=3></td>"
+ /* sessions rate: current, max, limit */
+ "<td colspan=3>&nbsp;</td>"
+ /* sessions: current, max, limit, total, lbtot, lastsess */
+ "<td>%s</td><td>%s</td><td>%s</td>"
+ "<td>%s</td><td>&nbsp;</td><td>&nbsp;</td>"
+ /* bytes: in, out */
+ "<td>%s</td><td>%s</td>"
+ "",
+ (flags & STAT_SHLGNDS)?"</u>":"",
+ U2H(stats[ST_F_SCUR].u.u32), U2H(stats[ST_F_SMAX].u.u32), U2H(stats[ST_F_SLIM].u.u32),
+ U2H(stats[ST_F_STOT].u.u64), U2H(stats[ST_F_BIN].u.u64), U2H(stats[ST_F_BOUT].u.u64));
+
+ chunk_appendf(out,
+ /* denied: req, resp */
+ "<td>%s</td><td>%s</td>"
+ /* errors: request, connect, response */
+ "<td>%s</td><td></td><td></td>"
+ /* warnings: retries, redispatches */
+ "<td></td><td></td>"
+ /* server status: reflect listener status */
+ "<td class=ac>%s</td>"
+ /* rest of server: nothing */
+ "<td class=ac colspan=8></td>"
+ "",
+ U2H(stats[ST_F_DREQ].u.u64), U2H(stats[ST_F_DRESP].u.u64),
+ U2H(stats[ST_F_EREQ].u.u64),
+ field_str(stats, ST_F_STATUS));
+
+ if (flags & STAT_SHMODULES) {
+ list_for_each_entry(mod, &stats_module_list[STATS_DOMAIN_PROXY], list) {
+ chunk_appendf(out, "<td>");
+
+ if (stats_px_get_cap(mod->domain_flags) & STATS_PX_CAP_LI) {
+ chunk_appendf(out,
+ "<u>%s<div class=tips><table class=det>",
+ mod->name);
+ for (j = 0; j < mod->stats_count; ++j) {
+ chunk_appendf(out,
+ "<tr><th>%s</th><td>%s</td></tr>",
+ mod->stats[j].desc, field_to_html_str(&stats[ST_F_TOTAL_FIELDS + i]));
+ ++i;
+ }
+ chunk_appendf(out, "</table></div></u>");
+ } else {
+ i += mod->stats_count;
+ }
+
+ chunk_appendf(out, "</td>");
+ }
+ }
+
+ chunk_appendf(out, "</tr>");
+ }
+ else if (stats[ST_F_TYPE].u.u32 == STATS_TYPE_SV) {
+ const char *style;
+
+ /* determine the style to use depending on the server's state,
+ * its health and weight. There isn't a 1-to-1 mapping between
+ * state and styles for the cases where the server is (still)
+ * up. The reason is that we don't want to report nolb and
+ * drain with the same color.
+ */
+
+ if (strcmp(field_str(stats, ST_F_STATUS), "DOWN") == 0 ||
+ strcmp(field_str(stats, ST_F_STATUS), "DOWN (agent)") == 0) {
+ style = "down";
+ }
+ else if (strncmp(field_str(stats, ST_F_STATUS), "DOWN ", strlen("DOWN ")) == 0) {
+ style = "going_up";
+ }
+ else if (strcmp(field_str(stats, ST_F_STATUS), "DRAIN") == 0) {
+ style = "draining";
+ }
+ else if (strncmp(field_str(stats, ST_F_STATUS), "NOLB ", strlen("NOLB ")) == 0) {
+ style = "going_down";
+ }
+ else if (strcmp(field_str(stats, ST_F_STATUS), "NOLB") == 0) {
+ style = "nolb";
+ }
+ else if (strcmp(field_str(stats, ST_F_STATUS), "no check") == 0) {
+ style = "no_check";
+ }
+ else if (!stats[ST_F_CHKFAIL].type ||
+ stats[ST_F_CHECK_HEALTH].u.u32 == stats[ST_F_CHECK_RISE].u.u32 + stats[ST_F_CHECK_FALL].u.u32 - 1) {
+ /* no check or max health = UP */
+ if (stats[ST_F_WEIGHT].u.u32)
+ style = "up";
+ else
+ style = "draining";
+ }
+ else {
+ style = "going_down";
+ }
+
+ if (strncmp(field_str(stats, ST_F_STATUS), "MAINT", 5) == 0)
+ chunk_appendf(out, "<tr class=\"maintain\">");
+ else
+ chunk_appendf(out,
+ "<tr class=\"%s_%s\">",
+ (stats[ST_F_BCK].u.u32) ? "backup" : "active", style);
+
+
+ if (flags & STAT_ADMIN)
+ chunk_appendf(out,
+ "<td><input class='%s-checkbox' type=\"checkbox\" name=\"s\" value=\"%s\"></td>",
+ field_str(stats, ST_F_PXNAME),
+ field_str(stats, ST_F_SVNAME));
+
+ chunk_appendf(out,
+ "<td class=ac><a name=\"%s/%s\"></a>%s"
+ "<a class=lfsb href=\"#%s/%s\">%s</a>"
+ "",
+ field_str(stats, ST_F_PXNAME), field_str(stats, ST_F_SVNAME),
+ (flags & STAT_SHLGNDS) ? "<u>" : "",
+ field_str(stats, ST_F_PXNAME), field_str(stats, ST_F_SVNAME), field_str(stats, ST_F_SVNAME));
+
+ if (flags & STAT_SHLGNDS) {
+ chunk_appendf(out, "<div class=tips>");
+
+ if (isdigit((unsigned char)*field_str(stats, ST_F_ADDR)))
+ chunk_appendf(out, "IPv4: %s, ", field_str(stats, ST_F_ADDR));
+ else if (*field_str(stats, ST_F_ADDR) == '[')
+ chunk_appendf(out, "IPv6: %s, ", field_str(stats, ST_F_ADDR));
+ else if (*field_str(stats, ST_F_ADDR))
+ chunk_appendf(out, "%s, ", field_str(stats, ST_F_ADDR));
+
+ /* id */
+ chunk_appendf(out, "id: %d, rid: %d", stats[ST_F_SID].u.u32, stats[ST_F_SRID].u.u32);
+
+ /* cookie */
+ if (stats[ST_F_COOKIE].type) {
+ chunk_appendf(out, ", cookie: '");
+ chunk_initstr(&src, field_str(stats, ST_F_COOKIE));
+ chunk_htmlencode(out, &src);
+ chunk_appendf(out, "'");
+ }
+
+ chunk_appendf(out, "</div>");
+ }
+
+ chunk_appendf(out,
+ /* queue : current, max, limit */
+ "%s</td><td>%s</td><td>%s</td><td>%s</td>"
+ /* sessions rate : current, max, limit */
+ "<td>%s</td><td>%s</td><td></td>"
+ "",
+ (flags & STAT_SHLGNDS) ? "</u>" : "",
+ U2H(stats[ST_F_QCUR].u.u32), U2H(stats[ST_F_QMAX].u.u32), LIM2A(stats[ST_F_QLIMIT].u.u32, "-"),
+ U2H(stats[ST_F_RATE].u.u32), U2H(stats[ST_F_RATE_MAX].u.u32));
+
+ chunk_appendf(out,
+ /* sessions: current, max, limit, total */
+ "<td><u>%s<div class=tips>"
+ "<table class=det>"
+ "<tr><th>Current active connections:</th><td>%s</td></tr>"
+ "<tr><th>Current used connections:</th><td>%s</td></tr>"
+ "<tr><th>Current idle connections:</th><td>%s</td></tr>"
+ "<tr><th>- unsafe:</th><td>%s</td></tr>"
+ "<tr><th>- safe:</th><td>%s</td></tr>"
+ "<tr><th>Estimated need of connections:</th><td>%s</td></tr>"
+ "<tr><th>Active connections limit:</th><td>%s</td></tr>"
+ "<tr><th>Idle connections limit:</th><td>%s</td></tr>"
+ "</table></div></u>"
+ "</td><td>%s</td><td>%s</td>"
+ "<td><u>%s<div class=tips><table class=det>"
+ "<tr><th>Cum. sessions:</th><td>%s</td></tr>"
+ "",
+ U2H(stats[ST_F_SCUR].u.u32),
+ U2H(stats[ST_F_SCUR].u.u32),
+ U2H(stats[ST_F_USED_CONN_CUR].u.u32),
+ U2H(stats[ST_F_SRV_ICUR].u.u32),
+ U2H(stats[ST_F_IDLE_CONN_CUR].u.u32),
+ U2H(stats[ST_F_SAFE_CONN_CUR].u.u32),
+ U2H(stats[ST_F_NEED_CONN_EST].u.u32),
+
+ LIM2A(stats[ST_F_SLIM].u.u32, "-"),
+ stats[ST_F_SRV_ILIM].type ? U2H(stats[ST_F_SRV_ILIM].u.u32) : "-",
+ U2H(stats[ST_F_SMAX].u.u32), LIM2A(stats[ST_F_SLIM].u.u32, "-"),
+ U2H(stats[ST_F_STOT].u.u64),
+ U2H(stats[ST_F_STOT].u.u64));
+
+ /* http response (via hover): 1xx, 2xx, 3xx, 4xx, 5xx, other */
+ if (strcmp(field_str(stats, ST_F_MODE), "http") == 0) {
+ chunk_appendf(out,
+ "<tr><th>New connections:</th><td>%s</td></tr>"
+ "<tr><th>Reused connections:</th><td>%s</td><td>(%d%%)</td></tr>"
+ "<tr><th>Cum. HTTP requests:</th><td>%s</td></tr>"
+ "<tr><th>- HTTP 1xx responses:</th><td>%s</td><td>(%d%%)</td></tr>"
+ "<tr><th>- HTTP 2xx responses:</th><td>%s</td><td>(%d%%)</td></tr>"
+ "<tr><th>- HTTP 3xx responses:</th><td>%s</td><td>(%d%%)</td></tr>"
+ "<tr><th>- HTTP 4xx responses:</th><td>%s</td><td>(%d%%)</td></tr>"
+ "<tr><th>- HTTP 5xx responses:</th><td>%s</td><td>(%d%%)</td></tr>"
+ "<tr><th>- other responses:</th><td>%s</td><td>(%d%%)</td></tr>"
+ "<tr><th>Failed hdr rewrites:</th><td>%s</td></tr>"
+ "<tr><th>Internal error:</th><td>%s</td></tr>"
+ "",
+ U2H(stats[ST_F_CONNECT].u.u64),
+ U2H(stats[ST_F_REUSE].u.u64),
+ (stats[ST_F_CONNECT].u.u64 + stats[ST_F_REUSE].u.u64) ?
+ (int)(100 * stats[ST_F_REUSE].u.u64 / (stats[ST_F_CONNECT].u.u64 + stats[ST_F_REUSE].u.u64)) : 0,
+ U2H(stats[ST_F_REQ_TOT].u.u64),
+ U2H(stats[ST_F_HRSP_1XX].u.u64), stats[ST_F_REQ_TOT].u.u64 ?
+ (int)(100 * stats[ST_F_HRSP_1XX].u.u64 / stats[ST_F_REQ_TOT].u.u64) : 0,
+ U2H(stats[ST_F_HRSP_2XX].u.u64), stats[ST_F_REQ_TOT].u.u64 ?
+ (int)(100 * stats[ST_F_HRSP_2XX].u.u64 / stats[ST_F_REQ_TOT].u.u64) : 0,
+ U2H(stats[ST_F_HRSP_3XX].u.u64), stats[ST_F_REQ_TOT].u.u64 ?
+ (int)(100 * stats[ST_F_HRSP_3XX].u.u64 / stats[ST_F_REQ_TOT].u.u64) : 0,
+ U2H(stats[ST_F_HRSP_4XX].u.u64), stats[ST_F_REQ_TOT].u.u64 ?
+ (int)(100 * stats[ST_F_HRSP_4XX].u.u64 / stats[ST_F_REQ_TOT].u.u64) : 0,
+ U2H(stats[ST_F_HRSP_5XX].u.u64), stats[ST_F_REQ_TOT].u.u64 ?
+ (int)(100 * stats[ST_F_HRSP_5XX].u.u64 / stats[ST_F_REQ_TOT].u.u64) : 0,
+ U2H(stats[ST_F_HRSP_OTHER].u.u64), stats[ST_F_REQ_TOT].u.u64 ?
+ (int)(100 * stats[ST_F_HRSP_OTHER].u.u64 / stats[ST_F_REQ_TOT].u.u64) : 0,
+ U2H(stats[ST_F_WREW].u.u64),
+ U2H(stats[ST_F_EINT].u.u64));
+ }
+
+ chunk_appendf(out, "<tr><th colspan=3>Max / Avg over last 1024 success. conn.</th></tr>");
+ chunk_appendf(out, "<tr><th>- Queue time:</th><td>%s / %s</td><td>ms</td></tr>",
+ U2H(stats[ST_F_QT_MAX].u.u32), U2H(stats[ST_F_QTIME].u.u32));
+ chunk_appendf(out, "<tr><th>- Connect time:</th><td>%s / %s</td><td>ms</td></tr>",
+ U2H(stats[ST_F_CT_MAX].u.u32), U2H(stats[ST_F_CTIME].u.u32));
+ if (strcmp(field_str(stats, ST_F_MODE), "http") == 0)
+ chunk_appendf(out, "<tr><th>- Responses time:</th><td>%s / %s</td><td>ms</td></tr>",
+ U2H(stats[ST_F_RT_MAX].u.u32), U2H(stats[ST_F_RTIME].u.u32));
+ chunk_appendf(out, "<tr><th>- Total time:</th><td>%s / %s</td><td>ms</td></tr>",
+ U2H(stats[ST_F_TT_MAX].u.u32), U2H(stats[ST_F_TTIME].u.u32));
+
+ chunk_appendf(out,
+ "</table></div></u></td>"
+ /* sessions: lbtot, last */
+ "<td>%s</td><td>%s</td>",
+ U2H(stats[ST_F_LBTOT].u.u64),
+ human_time(stats[ST_F_LASTSESS].u.s32, 1));
+
+ chunk_appendf(out,
+ /* bytes : in, out */
+ "<td>%s</td><td>%s</td>"
+ /* denied: req, resp */
+ "<td></td><td>%s</td>"
+ /* errors : request, connect */
+ "<td></td><td>%s</td>"
+ /* errors : response */
+ "<td><u>%s<div class=tips>Connection resets during transfers: %lld client, %lld server</div></u></td>"
+ /* warnings: retries, redispatches */
+ "<td>%lld</td><td>%lld</td>"
+ "",
+ U2H(stats[ST_F_BIN].u.u64), U2H(stats[ST_F_BOUT].u.u64),
+ U2H(stats[ST_F_DRESP].u.u64),
+ U2H(stats[ST_F_ECON].u.u64),
+ U2H(stats[ST_F_ERESP].u.u64),
+ (long long)stats[ST_F_CLI_ABRT].u.u64,
+ (long long)stats[ST_F_SRV_ABRT].u.u64,
+ (long long)stats[ST_F_WRETR].u.u64,
+ (long long)stats[ST_F_WREDIS].u.u64);
+
+ /* status, last change */
+ chunk_appendf(out, "<td class=ac>");
+
+ /* FIXME!!!!
+ * LASTCHG should contain the last change for *this* server and must be computed
+ * properly above, as was done below, ie: this server if maint, otherwise ref server
+ * if tracking. Note that ref is either local or remote depending on tracking.
+ */
+
+
+ if (strncmp(field_str(stats, ST_F_STATUS), "MAINT", 5) == 0) {
+ chunk_appendf(out, "%s MAINT", human_time(stats[ST_F_LASTCHG].u.u32, 1));
+ }
+ else if (strcmp(field_str(stats, ST_F_STATUS), "no check") == 0) {
+ chunk_strcat(out, "<i>no check</i>");
+ }
+ else {
+ chunk_appendf(out, "%s %s", human_time(stats[ST_F_LASTCHG].u.u32, 1), field_str(stats, ST_F_STATUS));
+ if (strncmp(field_str(stats, ST_F_STATUS), "DOWN", 4) == 0) {
+ if (stats[ST_F_CHECK_HEALTH].u.u32)
+ chunk_strcat(out, " &uarr;");
+ }
+ else if (stats[ST_F_CHECK_HEALTH].u.u32 < stats[ST_F_CHECK_RISE].u.u32 + stats[ST_F_CHECK_FALL].u.u32 - 1)
+ chunk_strcat(out, " &darr;");
+ }
+
+ if (strncmp(field_str(stats, ST_F_STATUS), "DOWN", 4) == 0 &&
+ stats[ST_F_AGENT_STATUS].type && !stats[ST_F_AGENT_HEALTH].u.u32) {
+ chunk_appendf(out,
+ "</td><td class=ac><u> %s",
+ field_str(stats, ST_F_AGENT_STATUS));
+
+ if (stats[ST_F_AGENT_CODE].type)
+ chunk_appendf(out, "/%d", stats[ST_F_AGENT_CODE].u.u32);
+
+ if (stats[ST_F_AGENT_DURATION].type)
+ chunk_appendf(out, " in %lums", (long)stats[ST_F_AGENT_DURATION].u.u64);
+
+ chunk_appendf(out, "<div class=tips>%s", field_str(stats, ST_F_AGENT_DESC));
+
+ if (*field_str(stats, ST_F_LAST_AGT)) {
+ chunk_appendf(out, ": ");
+ chunk_initstr(&src, field_str(stats, ST_F_LAST_AGT));
+ chunk_htmlencode(out, &src);
+ }
+ chunk_appendf(out, "</div></u>");
+ }
+ else if (stats[ST_F_CHECK_STATUS].type) {
+ chunk_appendf(out,
+ "</td><td class=ac><u> %s",
+ field_str(stats, ST_F_CHECK_STATUS));
+
+ if (stats[ST_F_CHECK_CODE].type)
+ chunk_appendf(out, "/%d", stats[ST_F_CHECK_CODE].u.u32);
+
+ if (stats[ST_F_CHECK_DURATION].type)
+ chunk_appendf(out, " in %lums", (long)stats[ST_F_CHECK_DURATION].u.u64);
+
+ chunk_appendf(out, "<div class=tips>%s", field_str(stats, ST_F_CHECK_DESC));
+
+ if (*field_str(stats, ST_F_LAST_CHK)) {
+ chunk_appendf(out, ": ");
+ chunk_initstr(&src, field_str(stats, ST_F_LAST_CHK));
+ chunk_htmlencode(out, &src);
+ }
+ chunk_appendf(out, "</div></u>");
+ }
+ else
+ chunk_appendf(out, "</td><td>");
+
+ chunk_appendf(out,
+ /* weight / uweight */
+ "</td><td class=ac>%d/%d</td>"
+ /* act, bck */
+ "<td class=ac>%s</td><td class=ac>%s</td>"
+ "",
+ stats[ST_F_WEIGHT].u.u32, stats[ST_F_UWEIGHT].u.u32,
+ stats[ST_F_BCK].u.u32 ? "-" : "Y",
+ stats[ST_F_BCK].u.u32 ? "Y" : "-");
+
+ /* check failures: unique, fatal, down time */
+ if (strcmp(field_str(stats, ST_F_STATUS), "MAINT (resolution)") == 0) {
+ chunk_appendf(out, "<td class=ac colspan=3>resolution</td>");
+ }
+ else if (stats[ST_F_CHKFAIL].type) {
+ chunk_appendf(out, "<td><u>%lld", (long long)stats[ST_F_CHKFAIL].u.u64);
+
+ if (stats[ST_F_HANAFAIL].type)
+ chunk_appendf(out, "/%lld", (long long)stats[ST_F_HANAFAIL].u.u64);
+
+ chunk_appendf(out,
+ "<div class=tips>Failed Health Checks%s</div></u></td>"
+ "<td>%lld</td><td>%s</td>"
+ "",
+ stats[ST_F_HANAFAIL].type ? "/Health Analyses" : "",
+ (long long)stats[ST_F_CHKDOWN].u.u64, human_time(stats[ST_F_DOWNTIME].u.u32, 1));
+ }
+ else if (strcmp(field_str(stats, ST_F_STATUS), "MAINT") != 0 && field_format(stats, ST_F_TRACKED) == FF_STR) {
+ /* tracking a server (hence inherited maint would appear as "MAINT (via...)" */
+ chunk_appendf(out,
+ "<td class=ac colspan=3><a class=lfsb href=\"#%s\">via %s</a></td>",
+ field_str(stats, ST_F_TRACKED), field_str(stats, ST_F_TRACKED));
+ }
+ else
+ chunk_appendf(out, "<td colspan=3></td>");
+
+ /* throttle */
+ if (stats[ST_F_THROTTLE].type)
+ chunk_appendf(out, "<td class=ac>%d %%</td>\n", stats[ST_F_THROTTLE].u.u32);
+ else
+ chunk_appendf(out, "<td class=ac>-</td>");
+
+ if (flags & STAT_SHMODULES) {
+ list_for_each_entry(mod, &stats_module_list[STATS_DOMAIN_PROXY], list) {
+ chunk_appendf(out, "<td>");
+
+ if (stats_px_get_cap(mod->domain_flags) & STATS_PX_CAP_SRV) {
+ chunk_appendf(out,
+ "<u>%s<div class=tips><table class=det>",
+ mod->name);
+ for (j = 0; j < mod->stats_count; ++j) {
+ chunk_appendf(out,
+ "<tr><th>%s</th><td>%s</td></tr>",
+ mod->stats[j].desc, field_to_html_str(&stats[ST_F_TOTAL_FIELDS + i]));
+ ++i;
+ }
+ chunk_appendf(out, "</table></div></u>");
+ } else {
+ i += mod->stats_count;
+ }
+
+ chunk_appendf(out, "</td>");
+ }
+ }
+
+ chunk_appendf(out, "</tr>\n");
+ }
+ else if (stats[ST_F_TYPE].u.u32 == STATS_TYPE_BE) {
+ chunk_appendf(out, "<tr class=\"backend\">");
+ if (flags & STAT_ADMIN) {
+ /* Column sub-heading for Enable or Disable server */
+ chunk_appendf(out, "<td></td>");
+ }
+ chunk_appendf(out,
+ "<td class=ac>"
+ /* name */
+ "%s<a name=\"%s/Backend\"></a>"
+ "<a class=lfsb href=\"#%s/Backend\">Backend</a>"
+ "",
+ (flags & STAT_SHLGNDS)?"<u>":"",
+ field_str(stats, ST_F_PXNAME), field_str(stats, ST_F_PXNAME));
+
+ if (flags & STAT_SHLGNDS) {
+ /* balancing */
+ chunk_appendf(out, "<div class=tips>balancing: %s",
+ field_str(stats, ST_F_ALGO));
+
+ /* cookie */
+ if (stats[ST_F_COOKIE].type) {
+ chunk_appendf(out, ", cookie: '");
+ chunk_initstr(&src, field_str(stats, ST_F_COOKIE));
+ chunk_htmlencode(out, &src);
+ chunk_appendf(out, "'");
+ }
+ chunk_appendf(out, "</div>");
+ }
+
+ chunk_appendf(out,
+ "%s</td>"
+ /* queue : current, max */
+ "<td>%s</td><td>%s</td><td></td>"
+ /* sessions rate : current, max, limit */
+ "<td>%s</td><td>%s</td><td></td>"
+ "",
+ (flags & STAT_SHLGNDS)?"</u>":"",
+ U2H(stats[ST_F_QCUR].u.u32), U2H(stats[ST_F_QMAX].u.u32),
+ U2H(stats[ST_F_RATE].u.u32), U2H(stats[ST_F_RATE_MAX].u.u32));
+
+ chunk_appendf(out,
+ /* sessions: current, max, limit, total */
+ "<td>%s</td><td>%s</td><td>%s</td>"
+ "<td><u>%s<div class=tips><table class=det>"
+ "<tr><th>Cum. sessions:</th><td>%s</td></tr>"
+ "",
+ U2H(stats[ST_F_SCUR].u.u32), U2H(stats[ST_F_SMAX].u.u32), U2H(stats[ST_F_SLIM].u.u32),
+ U2H(stats[ST_F_STOT].u.u64),
+ U2H(stats[ST_F_STOT].u.u64));
+
+ /* http response (via hover): 1xx, 2xx, 3xx, 4xx, 5xx, other */
+ if (strcmp(field_str(stats, ST_F_MODE), "http") == 0) {
+ chunk_appendf(out,
+ "<tr><th>New connections:</th><td>%s</td></tr>"
+ "<tr><th>Reused connections:</th><td>%s</td><td>(%d%%)</td></tr>"
+ "<tr><th>Cum. HTTP requests:</th><td>%s</td></tr>"
+ "<tr><th>- HTTP 1xx responses:</th><td>%s</td></tr>"
+ "<tr><th>- HTTP 2xx responses:</th><td>%s</td></tr>"
+ "<tr><th>&nbsp;&nbsp;Compressed 2xx:</th><td>%s</td><td>(%d%%)</td></tr>"
+ "<tr><th>- HTTP 3xx responses:</th><td>%s</td></tr>"
+ "<tr><th>- HTTP 4xx responses:</th><td>%s</td></tr>"
+ "<tr><th>- HTTP 5xx responses:</th><td>%s</td></tr>"
+ "<tr><th>- other responses:</th><td>%s</td></tr>"
+ "<tr><th>Cache lookups:</th><td>%s</td></tr>"
+ "<tr><th>Cache hits:</th><td>%s</td><td>(%d%%)</td></tr>"
+ "<tr><th>Failed hdr rewrites:</th><td>%s</td></tr>"
+ "<tr><th>Internal errors:</th><td>%s</td></tr>"
+ "",
+ U2H(stats[ST_F_CONNECT].u.u64),
+ U2H(stats[ST_F_REUSE].u.u64),
+ (stats[ST_F_CONNECT].u.u64 + stats[ST_F_REUSE].u.u64) ?
+ (int)(100 * stats[ST_F_REUSE].u.u64 / (stats[ST_F_CONNECT].u.u64 + stats[ST_F_REUSE].u.u64)) : 0,
+ U2H(stats[ST_F_REQ_TOT].u.u64),
+ U2H(stats[ST_F_HRSP_1XX].u.u64),
+ U2H(stats[ST_F_HRSP_2XX].u.u64),
+ U2H(stats[ST_F_COMP_RSP].u.u64),
+ stats[ST_F_HRSP_2XX].u.u64 ?
+ (int)(100 * stats[ST_F_COMP_RSP].u.u64 / stats[ST_F_HRSP_2XX].u.u64) : 0,
+ U2H(stats[ST_F_HRSP_3XX].u.u64),
+ U2H(stats[ST_F_HRSP_4XX].u.u64),
+ U2H(stats[ST_F_HRSP_5XX].u.u64),
+ U2H(stats[ST_F_HRSP_OTHER].u.u64),
+ U2H(stats[ST_F_CACHE_LOOKUPS].u.u64),
+ U2H(stats[ST_F_CACHE_HITS].u.u64),
+ stats[ST_F_CACHE_LOOKUPS].u.u64 ?
+ (int)(100 * stats[ST_F_CACHE_HITS].u.u64 / stats[ST_F_CACHE_LOOKUPS].u.u64) : 0,
+ U2H(stats[ST_F_WREW].u.u64),
+ U2H(stats[ST_F_EINT].u.u64));
+ }
+
+ chunk_appendf(out, "<tr><th colspan=3>Max / Avg over last 1024 success. conn.</th></tr>");
+ chunk_appendf(out, "<tr><th>- Queue time:</th><td>%s / %s</td><td>ms</td></tr>",
+ U2H(stats[ST_F_QT_MAX].u.u32), U2H(stats[ST_F_QTIME].u.u32));
+ chunk_appendf(out, "<tr><th>- Connect time:</th><td>%s / %s</td><td>ms</td></tr>",
+ U2H(stats[ST_F_CT_MAX].u.u32), U2H(stats[ST_F_CTIME].u.u32));
+ if (strcmp(field_str(stats, ST_F_MODE), "http") == 0)
+ chunk_appendf(out, "<tr><th>- Responses time:</th><td>%s / %s</td><td>ms</td></tr>",
+ U2H(stats[ST_F_RT_MAX].u.u32), U2H(stats[ST_F_RTIME].u.u32));
+ chunk_appendf(out, "<tr><th>- Total time:</th><td>%s / %s</td><td>ms</td></tr>",
+ U2H(stats[ST_F_TT_MAX].u.u32), U2H(stats[ST_F_TTIME].u.u32));
+
+ chunk_appendf(out,
+ "</table></div></u></td>"
+ /* sessions: lbtot, last */
+ "<td>%s</td><td>%s</td>"
+ /* bytes: in */
+ "<td>%s</td>"
+ "",
+ U2H(stats[ST_F_LBTOT].u.u64),
+ human_time(stats[ST_F_LASTSESS].u.s32, 1),
+ U2H(stats[ST_F_BIN].u.u64));
+
+ chunk_appendf(out,
+ /* bytes:out + compression stats (via hover): comp_in, comp_out, comp_byp */
+ "<td>%s%s<div class=tips><table class=det>"
+ "<tr><th>Response bytes in:</th><td>%s</td></tr>"
+ "<tr><th>Compression in:</th><td>%s</td></tr>"
+ "<tr><th>Compression out:</th><td>%s</td><td>(%d%%)</td></tr>"
+ "<tr><th>Compression bypass:</th><td>%s</td></tr>"
+ "<tr><th>Total bytes saved:</th><td>%s</td><td>(%d%%)</td></tr>"
+ "</table></div>%s</td>",
+ (stats[ST_F_COMP_IN].u.u64 || stats[ST_F_COMP_BYP].u.u64) ? "<u>":"",
+ U2H(stats[ST_F_BOUT].u.u64),
+ U2H(stats[ST_F_BOUT].u.u64),
+ U2H(stats[ST_F_COMP_IN].u.u64),
+ U2H(stats[ST_F_COMP_OUT].u.u64),
+ stats[ST_F_COMP_IN].u.u64 ? (int)(stats[ST_F_COMP_OUT].u.u64 * 100 / stats[ST_F_COMP_IN].u.u64) : 0,
+ U2H(stats[ST_F_COMP_BYP].u.u64),
+ U2H(stats[ST_F_COMP_IN].u.u64 - stats[ST_F_COMP_OUT].u.u64),
+ stats[ST_F_BOUT].u.u64 ? (int)((stats[ST_F_COMP_IN].u.u64 - stats[ST_F_COMP_OUT].u.u64) * 100 / stats[ST_F_BOUT].u.u64) : 0,
+ (stats[ST_F_COMP_IN].u.u64 || stats[ST_F_COMP_BYP].u.u64) ? "</u>":"");
+
+ chunk_appendf(out,
+ /* denied: req, resp */
+ "<td>%s</td><td>%s</td>"
+ /* errors : request, connect */
+ "<td></td><td>%s</td>"
+ /* errors : response */
+ "<td><u>%s<div class=tips>Connection resets during transfers: %lld client, %lld server</div></u></td>"
+ /* warnings: retries, redispatches */
+ "<td>%lld</td><td>%lld</td>"
+ /* backend status: reflect backend status (up/down): we display UP
+ * if the backend has known working servers or if it has no server at
+ * all (eg: for stats). Then we display the total weight, number of
+ * active and backups. */
+ "<td class=ac>%s %s</td><td class=ac>&nbsp;</td><td class=ac>%d/%d</td>"
+ "<td class=ac>%d</td><td class=ac>%d</td>"
+ "",
+ U2H(stats[ST_F_DREQ].u.u64), U2H(stats[ST_F_DRESP].u.u64),
+ U2H(stats[ST_F_ECON].u.u64),
+ U2H(stats[ST_F_ERESP].u.u64),
+ (long long)stats[ST_F_CLI_ABRT].u.u64,
+ (long long)stats[ST_F_SRV_ABRT].u.u64,
+ (long long)stats[ST_F_WRETR].u.u64, (long long)stats[ST_F_WREDIS].u.u64,
+ human_time(stats[ST_F_LASTCHG].u.u32, 1),
+ strcmp(field_str(stats, ST_F_STATUS), "DOWN") ? field_str(stats, ST_F_STATUS) : "<font color=\"red\"><b>DOWN</b></font>",
+ stats[ST_F_WEIGHT].u.u32, stats[ST_F_UWEIGHT].u.u32,
+ stats[ST_F_ACT].u.u32, stats[ST_F_BCK].u.u32);
+
+ chunk_appendf(out,
+ /* rest of backend: nothing, down transitions, total downtime, throttle */
+ "<td class=ac>&nbsp;</td><td>%d</td>"
+ "<td>%s</td>"
+ "<td></td>",
+ stats[ST_F_CHKDOWN].u.u32,
+ stats[ST_F_DOWNTIME].type ? human_time(stats[ST_F_DOWNTIME].u.u32, 1) : "&nbsp;");
+
+ if (flags & STAT_SHMODULES) {
+ list_for_each_entry(mod, &stats_module_list[STATS_DOMAIN_PROXY], list) {
+ chunk_appendf(out, "<td>");
+
+ if (stats_px_get_cap(mod->domain_flags) & STATS_PX_CAP_BE) {
+ chunk_appendf(out,
+ "<u>%s<div class=tips><table class=det>",
+ mod->name);
+ for (j = 0; j < mod->stats_count; ++j) {
+ chunk_appendf(out,
+ "<tr><th>%s</th><td>%s</td></tr>",
+ mod->stats[j].desc, field_to_html_str(&stats[ST_F_TOTAL_FIELDS + i]));
+ ++i;
+ }
+ chunk_appendf(out, "</table></div></u>");
+ } else {
+ i += mod->stats_count;
+ }
+
+ chunk_appendf(out, "</td>");
+ }
+ }
+
+ chunk_appendf(out, "</tr>");
+ }
+
+ return 1;
+}
+
+int stats_dump_one_line(const struct field *stats, size_t stats_count,
+ struct appctx *appctx)
+{
+ struct show_stat_ctx *ctx = appctx->svcctx;
+ int ret;
+
+ if (ctx->flags & STAT_FMT_HTML)
+ ret = stats_dump_fields_html(&trash_chunk, stats, ctx);
+ else if (ctx->flags & STAT_FMT_TYPED)
+ ret = stats_dump_fields_typed(&trash_chunk, stats, stats_count, ctx);
+ else if (ctx->flags & STAT_FMT_JSON)
+ ret = stats_dump_fields_json(&trash_chunk, stats, stats_count, ctx);
+ else
+ ret = stats_dump_fields_csv(&trash_chunk, stats, stats_count, ctx);
+
+ return ret;
+}
+
+/* Fill <stats> with the frontend statistics. <stats> is preallocated array of
+ * length <len>. If <selected_field> is != NULL, only fill this one. The length
+ * of the array must be at least ST_F_TOTAL_FIELDS. If this length is less than
+ * this value, or if the selected field is not implemented for frontends, the
+ * function returns 0, otherwise, it returns 1.
+ */
+int stats_fill_fe_stats(struct proxy *px, struct field *stats, int len,
+ enum stat_field *selected_field)
+{
+ enum stat_field current_field = (selected_field != NULL ? *selected_field : 0);
+
+ if (len < ST_F_TOTAL_FIELDS)
+ return 0;
+
+ for (; current_field < ST_F_TOTAL_FIELDS; current_field++) {
+ struct field metric = { 0 };
+
+ switch (current_field) {
+ case ST_F_PXNAME:
+ metric = mkf_str(FO_KEY|FN_NAME|FS_SERVICE, px->id);
+ break;
+ case ST_F_SVNAME:
+ metric = mkf_str(FO_KEY|FN_NAME|FS_SERVICE, "FRONTEND");
+ break;
+ case ST_F_MODE:
+ metric = mkf_str(FO_CONFIG|FS_SERVICE, proxy_mode_str(px->mode));
+ break;
+ case ST_F_SCUR:
+ metric = mkf_u32(0, px->feconn);
+ break;
+ case ST_F_SMAX:
+ metric = mkf_u32(FN_MAX, px->fe_counters.conn_max);
+ break;
+ case ST_F_SLIM:
+ metric = mkf_u32(FO_CONFIG|FN_LIMIT, px->maxconn);
+ break;
+ case ST_F_STOT:
+ metric = mkf_u64(FN_COUNTER, px->fe_counters.cum_sess);
+ break;
+ case ST_F_BIN:
+ metric = mkf_u64(FN_COUNTER, px->fe_counters.bytes_in);
+ break;
+ case ST_F_BOUT:
+ metric = mkf_u64(FN_COUNTER, px->fe_counters.bytes_out);
+ break;
+ case ST_F_DREQ:
+ metric = mkf_u64(FN_COUNTER, px->fe_counters.denied_req);
+ break;
+ case ST_F_DRESP:
+ metric = mkf_u64(FN_COUNTER, px->fe_counters.denied_resp);
+ break;
+ case ST_F_EREQ:
+ metric = mkf_u64(FN_COUNTER, px->fe_counters.failed_req);
+ break;
+ case ST_F_DCON:
+ metric = mkf_u64(FN_COUNTER, px->fe_counters.denied_conn);
+ break;
+ case ST_F_DSES:
+ metric = mkf_u64(FN_COUNTER, px->fe_counters.denied_sess);
+ break;
+ case ST_F_STATUS: {
+ const char *state;
+
+ if (px->flags & (PR_FL_DISABLED|PR_FL_STOPPED))
+ state = "STOP";
+ else if (px->flags & PR_FL_PAUSED)
+ state = "PAUSED";
+ else
+ state = "OPEN";
+ metric = mkf_str(FO_STATUS, state);
+ break;
+ }
+ case ST_F_PID:
+ metric = mkf_u32(FO_KEY, 1);
+ break;
+ case ST_F_IID:
+ metric = mkf_u32(FO_KEY|FS_SERVICE, px->uuid);
+ break;
+ case ST_F_SID:
+ metric = mkf_u32(FO_KEY|FS_SERVICE, 0);
+ break;
+ case ST_F_TYPE:
+ metric = mkf_u32(FO_CONFIG|FS_SERVICE, STATS_TYPE_FE);
+ break;
+ case ST_F_RATE:
+ metric = mkf_u32(FN_RATE, read_freq_ctr(&px->fe_sess_per_sec));
+ break;
+ case ST_F_RATE_LIM:
+ metric = mkf_u32(FO_CONFIG|FN_LIMIT, px->fe_sps_lim);
+ break;
+ case ST_F_RATE_MAX:
+ metric = mkf_u32(FN_MAX, px->fe_counters.sps_max);
+ break;
+ case ST_F_WREW:
+ metric = mkf_u64(FN_COUNTER, px->fe_counters.failed_rewrites);
+ break;
+ case ST_F_EINT:
+ metric = mkf_u64(FN_COUNTER, px->fe_counters.internal_errors);
+ break;
+ case ST_F_HRSP_1XX:
+ if (px->mode == PR_MODE_HTTP)
+ metric = mkf_u64(FN_COUNTER, px->fe_counters.p.http.rsp[1]);
+ break;
+ case ST_F_HRSP_2XX:
+ if (px->mode == PR_MODE_HTTP)
+ metric = mkf_u64(FN_COUNTER, px->fe_counters.p.http.rsp[2]);
+ break;
+ case ST_F_HRSP_3XX:
+ if (px->mode == PR_MODE_HTTP)
+ metric = mkf_u64(FN_COUNTER, px->fe_counters.p.http.rsp[3]);
+ break;
+ case ST_F_HRSP_4XX:
+ if (px->mode == PR_MODE_HTTP)
+ metric = mkf_u64(FN_COUNTER, px->fe_counters.p.http.rsp[4]);
+ break;
+ case ST_F_HRSP_5XX:
+ if (px->mode == PR_MODE_HTTP)
+ metric = mkf_u64(FN_COUNTER, px->fe_counters.p.http.rsp[5]);
+ break;
+ case ST_F_HRSP_OTHER:
+ if (px->mode == PR_MODE_HTTP)
+ metric = mkf_u64(FN_COUNTER, px->fe_counters.p.http.rsp[0]);
+ break;
+ case ST_F_INTERCEPTED:
+ if (px->mode == PR_MODE_HTTP)
+ metric = mkf_u64(FN_COUNTER, px->fe_counters.intercepted_req);
+ break;
+ case ST_F_CACHE_LOOKUPS:
+ if (px->mode == PR_MODE_HTTP)
+ metric = mkf_u64(FN_COUNTER, px->fe_counters.p.http.cache_lookups);
+ break;
+ case ST_F_CACHE_HITS:
+ if (px->mode == PR_MODE_HTTP)
+ metric = mkf_u64(FN_COUNTER, px->fe_counters.p.http.cache_hits);
+ break;
+ case ST_F_REQ_RATE:
+ metric = mkf_u32(FN_RATE, read_freq_ctr(&px->fe_req_per_sec));
+ break;
+ case ST_F_REQ_RATE_MAX:
+ metric = mkf_u32(FN_MAX, px->fe_counters.p.http.rps_max);
+ break;
+ case ST_F_REQ_TOT: {
+ int i;
+ uint64_t total_req;
+ size_t nb_reqs =
+ sizeof(px->fe_counters.p.http.cum_req) / sizeof(*px->fe_counters.p.http.cum_req);
+
+ total_req = 0;
+ for (i = 0; i < nb_reqs; i++)
+ total_req += px->fe_counters.p.http.cum_req[i];
+ metric = mkf_u64(FN_COUNTER, total_req);
+ break;
+ }
+ case ST_F_COMP_IN:
+ metric = mkf_u64(FN_COUNTER, px->fe_counters.comp_in[COMP_DIR_RES]);
+ break;
+ case ST_F_COMP_OUT:
+ metric = mkf_u64(FN_COUNTER, px->fe_counters.comp_out[COMP_DIR_RES]);
+ break;
+ case ST_F_COMP_BYP:
+ metric = mkf_u64(FN_COUNTER, px->fe_counters.comp_byp[COMP_DIR_RES]);
+ break;
+ case ST_F_COMP_RSP:
+ metric = mkf_u64(FN_COUNTER, px->fe_counters.p.http.comp_rsp);
+ break;
+ case ST_F_CONN_RATE:
+ metric = mkf_u32(FN_RATE, read_freq_ctr(&px->fe_conn_per_sec));
+ break;
+ case ST_F_CONN_RATE_MAX:
+ metric = mkf_u32(FN_MAX, px->fe_counters.cps_max);
+ break;
+ case ST_F_CONN_TOT:
+ metric = mkf_u64(FN_COUNTER, px->fe_counters.cum_conn);
+ break;
+ case ST_F_SESS_OTHER: {
+ int i;
+ uint64_t total_sess;
+ size_t nb_sess =
+ sizeof(px->fe_counters.cum_sess_ver) / sizeof(*px->fe_counters.cum_sess_ver);
+
+ total_sess = px->fe_counters.cum_sess;
+ for (i = 0; i < nb_sess; i++)
+ total_sess -= px->fe_counters.cum_sess_ver[i];
+ total_sess = (int64_t)total_sess < 0 ? 0 : total_sess;
+ metric = mkf_u64(FN_COUNTER, total_sess);
+ break;
+ }
+ case ST_F_H1SESS:
+ metric = mkf_u64(FN_COUNTER, px->fe_counters.cum_sess_ver[0]);
+ break;
+ case ST_F_H2SESS:
+ metric = mkf_u64(FN_COUNTER, px->fe_counters.cum_sess_ver[1]);
+ break;
+ case ST_F_H3SESS:
+ metric = mkf_u64(FN_COUNTER, px->fe_counters.cum_sess_ver[2]);
+ break;
+ case ST_F_REQ_OTHER:
+ metric = mkf_u64(FN_COUNTER, px->fe_counters.p.http.cum_req[0]);
+ break;
+ case ST_F_H1REQ:
+ metric = mkf_u64(FN_COUNTER, px->fe_counters.p.http.cum_req[1]);
+ break;
+ case ST_F_H2REQ:
+ metric = mkf_u64(FN_COUNTER, px->fe_counters.p.http.cum_req[2]);
+ break;
+ case ST_F_H3REQ:
+ metric = mkf_u64(FN_COUNTER, px->fe_counters.p.http.cum_req[3]);
+ break;
+ default:
+ /* not used for frontends. If a specific metric
+ * is requested, return an error. Otherwise continue.
+ */
+ if (selected_field != NULL)
+ return 0;
+ continue;
+ }
+ stats[current_field] = metric;
+ if (selected_field != NULL)
+ break;
+ }
+ return 1;
+}
+
+/* Dumps a frontend's line to the local trash buffer for the current proxy <px>
+ * and uses the state from stream connector <sc>. The caller is responsible for
+ * clearing the local trash buffer if needed. Returns non-zero if it emits
+ * anything, zero otherwise.
+ */
+static int stats_dump_fe_stats(struct stconn *sc, struct proxy *px)
+{
+ struct appctx *appctx = __sc_appctx(sc);
+ struct show_stat_ctx *ctx = appctx->svcctx;
+ struct field *stats = stat_l[STATS_DOMAIN_PROXY];
+ struct stats_module *mod;
+ size_t stats_count = ST_F_TOTAL_FIELDS;
+
+ if (!(px->cap & PR_CAP_FE))
+ return 0;
+
+ if ((ctx->flags & STAT_BOUND) && !(ctx->type & (1 << STATS_TYPE_FE)))
+ return 0;
+
+ memset(stats, 0, sizeof(struct field) * stat_count[STATS_DOMAIN_PROXY]);
+
+ if (!stats_fill_fe_stats(px, stats, ST_F_TOTAL_FIELDS, NULL))
+ return 0;
+
+ list_for_each_entry(mod, &stats_module_list[STATS_DOMAIN_PROXY], list) {
+ void *counters;
+
+ if (!(stats_px_get_cap(mod->domain_flags) & STATS_PX_CAP_FE)) {
+ stats_count += mod->stats_count;
+ continue;
+ }
+
+ counters = EXTRA_COUNTERS_GET(px->extra_counters_fe, mod);
+ mod->fill_stats(counters, stats + stats_count);
+ stats_count += mod->stats_count;
+ }
+
+ return stats_dump_one_line(stats, stats_count, appctx);
+}
+
+/* Fill <stats> with the listener statistics. <stats> is preallocated array of
+ * length <len>. The length of the array must be at least ST_F_TOTAL_FIELDS. If
+ * this length is less then this value, the function returns 0, otherwise, it
+ * returns 1. If selected_field is != NULL, only fill this one. <flags> can
+ * take the value STAT_SHLGNDS.
+ */
+int stats_fill_li_stats(struct proxy *px, struct listener *l, int flags,
+ struct field *stats, int len, enum stat_field *selected_field)
+{
+ enum stat_field current_field = (selected_field != NULL ? *selected_field : 0);
+ struct buffer *out = get_trash_chunk();
+
+ if (len < ST_F_TOTAL_FIELDS)
+ return 0;
+
+ if (!l->counters)
+ return 0;
+
+ chunk_reset(out);
+
+ for (; current_field < ST_F_TOTAL_FIELDS; current_field++) {
+ struct field metric = { 0 };
+
+ switch (current_field) {
+ case ST_F_PXNAME:
+ metric = mkf_str(FO_KEY|FN_NAME|FS_SERVICE, px->id);
+ break;
+ case ST_F_SVNAME:
+ metric = mkf_str(FO_KEY|FN_NAME|FS_SERVICE, l->name);
+ break;
+ case ST_F_MODE:
+ metric = mkf_str(FO_CONFIG|FS_SERVICE, proxy_mode_str(px->mode));
+ break;
+ case ST_F_SCUR:
+ metric = mkf_u32(0, l->nbconn);
+ break;
+ case ST_F_SMAX:
+ metric = mkf_u32(FN_MAX, l->counters->conn_max);
+ break;
+ case ST_F_SLIM:
+ metric = mkf_u32(FO_CONFIG|FN_LIMIT, l->bind_conf->maxconn);
+ break;
+ case ST_F_STOT:
+ metric = mkf_u64(FN_COUNTER, l->counters->cum_conn);
+ break;
+ case ST_F_BIN:
+ metric = mkf_u64(FN_COUNTER, l->counters->bytes_in);
+ break;
+ case ST_F_BOUT:
+ metric = mkf_u64(FN_COUNTER, l->counters->bytes_out);
+ break;
+ case ST_F_DREQ:
+ metric = mkf_u64(FN_COUNTER, l->counters->denied_req);
+ break;
+ case ST_F_DRESP:
+ metric = mkf_u64(FN_COUNTER, l->counters->denied_resp);
+ break;
+ case ST_F_EREQ:
+ metric = mkf_u64(FN_COUNTER, l->counters->failed_req);
+ break;
+ case ST_F_DCON:
+ metric = mkf_u64(FN_COUNTER, l->counters->denied_conn);
+ break;
+ case ST_F_DSES:
+ metric = mkf_u64(FN_COUNTER, l->counters->denied_sess);
+ break;
+ case ST_F_STATUS:
+ metric = mkf_str(FO_STATUS, li_status_st[get_li_status(l)]);
+ break;
+ case ST_F_PID:
+ metric = mkf_u32(FO_KEY, 1);
+ break;
+ case ST_F_IID:
+ metric = mkf_u32(FO_KEY|FS_SERVICE, px->uuid);
+ break;
+ case ST_F_SID:
+ metric = mkf_u32(FO_KEY|FS_SERVICE, l->luid);
+ break;
+ case ST_F_TYPE:
+ metric = mkf_u32(FO_CONFIG|FS_SERVICE, STATS_TYPE_SO);
+ break;
+ case ST_F_WREW:
+ metric = mkf_u64(FN_COUNTER, l->counters->failed_rewrites);
+ break;
+ case ST_F_EINT:
+ metric = mkf_u64(FN_COUNTER, l->counters->internal_errors);
+ break;
+ case ST_F_ADDR:
+ if (flags & STAT_SHLGNDS) {
+ char str[INET6_ADDRSTRLEN];
+ int port;
+
+ port = get_host_port(&l->rx.addr);
+ switch (addr_to_str(&l->rx.addr, str, sizeof(str))) {
+ case AF_INET:
+ metric = mkf_str(FO_CONFIG|FS_SERVICE, chunk_newstr(out));
+ chunk_appendf(out, "%s:%d", str, port);
+ break;
+ case AF_INET6:
+ metric = mkf_str(FO_CONFIG|FS_SERVICE, chunk_newstr(out));
+ chunk_appendf(out, "[%s]:%d", str, port);
+ break;
+ case AF_UNIX:
+ metric = mkf_str(FO_CONFIG|FS_SERVICE, "unix");
+ break;
+ case -1:
+ metric = mkf_str(FO_CONFIG|FS_SERVICE, chunk_newstr(out));
+ chunk_strcat(out, strerror(errno));
+ break;
+ default: /* address family not supported */
+ break;
+ }
+ }
+ break;
+ case ST_F_PROTO:
+ metric = mkf_str(FO_STATUS, l->rx.proto->name);
+ break;
+ default:
+ /* not used for listen. If a specific metric
+ * is requested, return an error. Otherwise continue.
+ */
+ if (selected_field != NULL)
+ return 0;
+ continue;
+ }
+ stats[current_field] = metric;
+ if (selected_field != NULL)
+ break;
+ }
+ return 1;
+}
+
+/* Dumps a line for listener <l> and proxy <px> to the local trash buffer and
+ * uses the state from stream connector <sc>. The caller is responsible for
+ * clearing the local trash buffer if needed. Returns non-zero if it emits
+ * anything, zero otherwise.
+ */
+static int stats_dump_li_stats(struct stconn *sc, struct proxy *px, struct listener *l)
+{
+ struct appctx *appctx = __sc_appctx(sc);
+ struct show_stat_ctx *ctx = appctx->svcctx;
+ struct field *stats = stat_l[STATS_DOMAIN_PROXY];
+ struct stats_module *mod;
+ size_t stats_count = ST_F_TOTAL_FIELDS;
+
+ memset(stats, 0, sizeof(struct field) * stat_count[STATS_DOMAIN_PROXY]);
+
+ if (!stats_fill_li_stats(px, l, ctx->flags, stats,
+ ST_F_TOTAL_FIELDS, NULL))
+ return 0;
+
+ list_for_each_entry(mod, &stats_module_list[STATS_DOMAIN_PROXY], list) {
+ void *counters;
+
+ if (!(stats_px_get_cap(mod->domain_flags) & STATS_PX_CAP_LI)) {
+ stats_count += mod->stats_count;
+ continue;
+ }
+
+ counters = EXTRA_COUNTERS_GET(l->extra_counters, mod);
+ mod->fill_stats(counters, stats + stats_count);
+ stats_count += mod->stats_count;
+ }
+
+ return stats_dump_one_line(stats, stats_count, appctx);
+}
+
+enum srv_stats_state {
+ SRV_STATS_STATE_DOWN = 0,
+ SRV_STATS_STATE_DOWN_AGENT,
+ SRV_STATS_STATE_GOING_UP,
+ SRV_STATS_STATE_UP_GOING_DOWN,
+ SRV_STATS_STATE_UP,
+ SRV_STATS_STATE_NOLB_GOING_DOWN,
+ SRV_STATS_STATE_NOLB,
+ SRV_STATS_STATE_DRAIN_GOING_DOWN,
+ SRV_STATS_STATE_DRAIN,
+ SRV_STATS_STATE_DRAIN_AGENT,
+ SRV_STATS_STATE_NO_CHECK,
+
+ SRV_STATS_STATE_COUNT, /* Must be last */
+};
+
+static const char *srv_hlt_st[SRV_STATS_STATE_COUNT] = {
+ [SRV_STATS_STATE_DOWN] = "DOWN",
+ [SRV_STATS_STATE_DOWN_AGENT] = "DOWN (agent)",
+ [SRV_STATS_STATE_GOING_UP] = "DOWN %d/%d",
+ [SRV_STATS_STATE_UP_GOING_DOWN] = "UP %d/%d",
+ [SRV_STATS_STATE_UP] = "UP",
+ [SRV_STATS_STATE_NOLB_GOING_DOWN] = "NOLB %d/%d",
+ [SRV_STATS_STATE_NOLB] = "NOLB",
+ [SRV_STATS_STATE_DRAIN_GOING_DOWN] = "DRAIN %d/%d",
+ [SRV_STATS_STATE_DRAIN] = "DRAIN",
+ [SRV_STATS_STATE_DRAIN_AGENT] = "DRAIN (agent)",
+ [SRV_STATS_STATE_NO_CHECK] = "no check"
+};
+
+/* Compute server state helper
+ */
+static void stats_fill_sv_stats_computestate(struct server *sv, struct server *ref,
+ enum srv_stats_state *state)
+{
+ if (sv->cur_state == SRV_ST_RUNNING || sv->cur_state == SRV_ST_STARTING) {
+ if ((ref->check.state & CHK_ST_ENABLED) &&
+ (ref->check.health < ref->check.rise + ref->check.fall - 1)) {
+ *state = SRV_STATS_STATE_UP_GOING_DOWN;
+ } else {
+ *state = SRV_STATS_STATE_UP;
+ }
+
+ if (sv->cur_admin & SRV_ADMF_DRAIN) {
+ if (ref->agent.state & CHK_ST_ENABLED)
+ *state = SRV_STATS_STATE_DRAIN_AGENT;
+ else if (*state == SRV_STATS_STATE_UP_GOING_DOWN)
+ *state = SRV_STATS_STATE_DRAIN_GOING_DOWN;
+ else
+ *state = SRV_STATS_STATE_DRAIN;
+ }
+
+ if (*state == SRV_STATS_STATE_UP && !(ref->check.state & CHK_ST_ENABLED)) {
+ *state = SRV_STATS_STATE_NO_CHECK;
+ }
+ }
+ else if (sv->cur_state == SRV_ST_STOPPING) {
+ if ((!(sv->check.state & CHK_ST_ENABLED) && !sv->track) ||
+ (ref->check.health == ref->check.rise + ref->check.fall - 1)) {
+ *state = SRV_STATS_STATE_NOLB;
+ } else {
+ *state = SRV_STATS_STATE_NOLB_GOING_DOWN;
+ }
+ }
+ else { /* stopped */
+ if ((ref->agent.state & CHK_ST_ENABLED) && !ref->agent.health) {
+ *state = SRV_STATS_STATE_DOWN_AGENT;
+ } else if ((ref->check.state & CHK_ST_ENABLED) && !ref->check.health) {
+ *state = SRV_STATS_STATE_DOWN; /* DOWN */
+ } else if ((ref->agent.state & CHK_ST_ENABLED) || (ref->check.state & CHK_ST_ENABLED)) {
+ *state = SRV_STATS_STATE_GOING_UP;
+ } else {
+ *state = SRV_STATS_STATE_DOWN; /* DOWN, unchecked */
+ }
+ }
+}
+
+/* Fill <stats> with the backend statistics. <stats> is preallocated array of
+ * length <len>. If <selected_field> is != NULL, only fill this one. The length
+ * of the array must be at least ST_F_TOTAL_FIELDS. If this length is less than
+ * this value, or if the selected field is not implemented for servers, the
+ * function returns 0, otherwise, it returns 1. <flags> can take the value
+ * STAT_SHLGNDS.
+ */
+int stats_fill_sv_stats(struct proxy *px, struct server *sv, int flags,
+ struct field *stats, int len,
+ enum stat_field *selected_field)
+{
+ enum stat_field current_field = (selected_field != NULL ? *selected_field : 0);
+ struct server *via = sv->track ? sv->track : sv;
+ struct server *ref = via;
+ enum srv_stats_state state = 0;
+ char str[INET6_ADDRSTRLEN];
+ struct buffer *out = get_trash_chunk();
+ char *fld_status;
+ long long srv_samples_counter;
+ unsigned int srv_samples_window = TIME_STATS_SAMPLES;
+
+ if (len < ST_F_TOTAL_FIELDS)
+ return 0;
+
+ chunk_reset(out);
+
+ /* compute state for later use */
+ if (selected_field == NULL || *selected_field == ST_F_STATUS ||
+ *selected_field == ST_F_CHECK_RISE || *selected_field == ST_F_CHECK_FALL ||
+ *selected_field == ST_F_CHECK_HEALTH || *selected_field == ST_F_HANAFAIL) {
+ /* we have "via" which is the tracked server as described in the configuration,
+ * and "ref" which is the checked server and the end of the chain.
+ */
+ while (ref->track)
+ ref = ref->track;
+ stats_fill_sv_stats_computestate(sv, ref, &state);
+ }
+
+ /* compue time values for later use */
+ if (selected_field == NULL || *selected_field == ST_F_QTIME ||
+ *selected_field == ST_F_CTIME || *selected_field == ST_F_RTIME ||
+ *selected_field == ST_F_TTIME) {
+ srv_samples_counter = (px->mode == PR_MODE_HTTP) ? sv->counters.p.http.cum_req : sv->counters.cum_lbconn;
+ if (srv_samples_counter < TIME_STATS_SAMPLES && srv_samples_counter > 0)
+ srv_samples_window = srv_samples_counter;
+ }
+
+ for (; current_field < ST_F_TOTAL_FIELDS; current_field++) {
+ struct field metric = { 0 };
+
+ switch (current_field) {
+ case ST_F_PXNAME:
+ metric = mkf_str(FO_KEY|FN_NAME|FS_SERVICE, px->id);
+ break;
+ case ST_F_SVNAME:
+ metric = mkf_str(FO_KEY|FN_NAME|FS_SERVICE, sv->id);
+ break;
+ case ST_F_MODE:
+ metric = mkf_str(FO_CONFIG|FS_SERVICE, proxy_mode_str(px->mode));
+ break;
+ case ST_F_QCUR:
+ metric = mkf_u32(0, sv->queue.length);
+ break;
+ case ST_F_QMAX:
+ metric = mkf_u32(FN_MAX, sv->counters.nbpend_max);
+ break;
+ case ST_F_SCUR:
+ metric = mkf_u32(0, sv->cur_sess);
+ break;
+ case ST_F_SMAX:
+ metric = mkf_u32(FN_MAX, sv->counters.cur_sess_max);
+ break;
+ case ST_F_SLIM:
+ if (sv->maxconn)
+ metric = mkf_u32(FO_CONFIG|FN_LIMIT, sv->maxconn);
+ break;
+ case ST_F_SRV_ICUR:
+ metric = mkf_u32(0, sv->curr_idle_conns);
+ break;
+ case ST_F_SRV_ILIM:
+ if (sv->max_idle_conns != -1)
+ metric = mkf_u32(FO_CONFIG|FN_LIMIT, sv->max_idle_conns);
+ break;
+ case ST_F_STOT:
+ metric = mkf_u64(FN_COUNTER, sv->counters.cum_sess);
+ break;
+ case ST_F_BIN:
+ metric = mkf_u64(FN_COUNTER, sv->counters.bytes_in);
+ break;
+ case ST_F_BOUT:
+ metric = mkf_u64(FN_COUNTER, sv->counters.bytes_out);
+ break;
+ case ST_F_DRESP:
+ metric = mkf_u64(FN_COUNTER, sv->counters.denied_resp);
+ break;
+ case ST_F_ECON:
+ metric = mkf_u64(FN_COUNTER, sv->counters.failed_conns);
+ break;
+ case ST_F_ERESP:
+ metric = mkf_u64(FN_COUNTER, sv->counters.failed_resp);
+ break;
+ case ST_F_WRETR:
+ metric = mkf_u64(FN_COUNTER, sv->counters.retries);
+ break;
+ case ST_F_WREDIS:
+ metric = mkf_u64(FN_COUNTER, sv->counters.redispatches);
+ break;
+ case ST_F_WREW:
+ metric = mkf_u64(FN_COUNTER, sv->counters.failed_rewrites);
+ break;
+ case ST_F_EINT:
+ metric = mkf_u64(FN_COUNTER, sv->counters.internal_errors);
+ break;
+ case ST_F_CONNECT:
+ metric = mkf_u64(FN_COUNTER, sv->counters.connect);
+ break;
+ case ST_F_REUSE:
+ metric = mkf_u64(FN_COUNTER, sv->counters.reuse);
+ break;
+ case ST_F_IDLE_CONN_CUR:
+ metric = mkf_u32(0, sv->curr_idle_nb);
+ break;
+ case ST_F_SAFE_CONN_CUR:
+ metric = mkf_u32(0, sv->curr_safe_nb);
+ break;
+ case ST_F_USED_CONN_CUR:
+ metric = mkf_u32(0, sv->curr_used_conns);
+ break;
+ case ST_F_NEED_CONN_EST:
+ metric = mkf_u32(0, sv->est_need_conns);
+ break;
+ case ST_F_STATUS:
+ fld_status = chunk_newstr(out);
+ if (sv->cur_admin & SRV_ADMF_RMAINT)
+ chunk_appendf(out, "MAINT (resolution)");
+ else if (sv->cur_admin & SRV_ADMF_IMAINT)
+ chunk_appendf(out, "MAINT (via %s/%s)", via->proxy->id, via->id);
+ else if (sv->cur_admin & SRV_ADMF_MAINT)
+ chunk_appendf(out, "MAINT");
+ else
+ chunk_appendf(out,
+ srv_hlt_st[state],
+ (ref->cur_state != SRV_ST_STOPPED) ? (ref->check.health - ref->check.rise + 1) : (ref->check.health),
+ (ref->cur_state != SRV_ST_STOPPED) ? (ref->check.fall) : (ref->check.rise));
+
+ metric = mkf_str(FO_STATUS, fld_status);
+ break;
+ case ST_F_LASTCHG:
+ metric = mkf_u32(FN_AGE, ns_to_sec(now_ns) - sv->last_change);
+ break;
+ case ST_F_WEIGHT:
+ metric = mkf_u32(FN_AVG, (sv->cur_eweight * px->lbprm.wmult + px->lbprm.wdiv - 1) / px->lbprm.wdiv);
+ break;
+ case ST_F_UWEIGHT:
+ metric = mkf_u32(FN_AVG, sv->uweight);
+ break;
+ case ST_F_ACT:
+ metric = mkf_u32(FO_STATUS, (sv->flags & SRV_F_BACKUP) ? 0 : 1);
+ break;
+ case ST_F_BCK:
+ metric = mkf_u32(FO_STATUS, (sv->flags & SRV_F_BACKUP) ? 1 : 0);
+ break;
+ case ST_F_CHKFAIL:
+ if (sv->check.state & CHK_ST_ENABLED)
+ metric = mkf_u64(FN_COUNTER, sv->counters.failed_checks);
+ break;
+ case ST_F_CHKDOWN:
+ if (sv->check.state & CHK_ST_ENABLED)
+ metric = mkf_u64(FN_COUNTER, sv->counters.down_trans);
+ break;
+ case ST_F_DOWNTIME:
+ if (sv->check.state & CHK_ST_ENABLED)
+ metric = mkf_u32(FN_COUNTER, srv_downtime(sv));
+ break;
+ case ST_F_QLIMIT:
+ if (sv->maxqueue)
+ metric = mkf_u32(FO_CONFIG|FS_SERVICE, sv->maxqueue);
+ break;
+ case ST_F_PID:
+ metric = mkf_u32(FO_KEY, 1);
+ break;
+ case ST_F_IID:
+ metric = mkf_u32(FO_KEY|FS_SERVICE, px->uuid);
+ break;
+ case ST_F_SID:
+ metric = mkf_u32(FO_KEY|FS_SERVICE, sv->puid);
+ break;
+ case ST_F_SRID:
+ metric = mkf_u32(FN_COUNTER, sv->rid);
+ break;
+ case ST_F_THROTTLE:
+ if (sv->cur_state == SRV_ST_STARTING && !server_is_draining(sv))
+ metric = mkf_u32(FN_AVG, server_throttle_rate(sv));
+ break;
+ case ST_F_LBTOT:
+ metric = mkf_u64(FN_COUNTER, sv->counters.cum_lbconn);
+ break;
+ case ST_F_TRACKED:
+ if (sv->track) {
+ char *fld_track = chunk_newstr(out);
+ chunk_appendf(out, "%s/%s", sv->track->proxy->id, sv->track->id);
+ metric = mkf_str(FO_CONFIG|FN_NAME|FS_SERVICE, fld_track);
+ }
+ break;
+ case ST_F_TYPE:
+ metric = mkf_u32(FO_CONFIG|FS_SERVICE, STATS_TYPE_SV);
+ break;
+ case ST_F_RATE:
+ metric = mkf_u32(FN_RATE, read_freq_ctr(&sv->sess_per_sec));
+ break;
+ case ST_F_RATE_MAX:
+ metric = mkf_u32(FN_MAX, sv->counters.sps_max);
+ break;
+ case ST_F_CHECK_STATUS:
+ if ((sv->check.state & (CHK_ST_ENABLED|CHK_ST_PAUSED)) == CHK_ST_ENABLED) {
+ const char *fld_chksts;
+
+ fld_chksts = chunk_newstr(out);
+ chunk_strcat(out, "* "); // for check in progress
+ chunk_strcat(out, get_check_status_info(sv->check.status));
+ if (!(sv->check.state & CHK_ST_INPROGRESS))
+ fld_chksts += 2; // skip "* "
+ metric = mkf_str(FN_OUTPUT, fld_chksts);
+ }
+ break;
+ case ST_F_CHECK_CODE:
+ if ((sv->check.state & (CHK_ST_ENABLED|CHK_ST_PAUSED)) == CHK_ST_ENABLED &&
+ sv->check.status >= HCHK_STATUS_L57DATA)
+ metric = mkf_u32(FN_OUTPUT, sv->check.code);
+ break;
+ case ST_F_CHECK_DURATION:
+ if ((sv->check.state & (CHK_ST_ENABLED|CHK_ST_PAUSED)) == CHK_ST_ENABLED &&
+ sv->check.status >= HCHK_STATUS_CHECKED)
+ metric = mkf_u64(FN_DURATION, MAX(sv->check.duration, 0));
+ break;
+ case ST_F_CHECK_DESC:
+ if ((sv->check.state & (CHK_ST_ENABLED|CHK_ST_PAUSED)) == CHK_ST_ENABLED)
+ metric = mkf_str(FN_OUTPUT, get_check_status_description(sv->check.status));
+ break;
+ case ST_F_LAST_CHK:
+ if ((sv->check.state & (CHK_ST_ENABLED|CHK_ST_PAUSED)) == CHK_ST_ENABLED)
+ metric = mkf_str(FN_OUTPUT, sv->check.desc);
+ break;
+ case ST_F_CHECK_RISE:
+ if ((sv->check.state & (CHK_ST_ENABLED|CHK_ST_PAUSED)) == CHK_ST_ENABLED)
+ metric = mkf_u32(FO_CONFIG|FS_SERVICE, ref->check.rise);
+ break;
+ case ST_F_CHECK_FALL:
+ if ((sv->check.state & (CHK_ST_ENABLED|CHK_ST_PAUSED)) == CHK_ST_ENABLED)
+ metric = mkf_u32(FO_CONFIG|FS_SERVICE, ref->check.fall);
+ break;
+ case ST_F_CHECK_HEALTH:
+ if ((sv->check.state & (CHK_ST_ENABLED|CHK_ST_PAUSED)) == CHK_ST_ENABLED)
+ metric = mkf_u32(FO_CONFIG|FS_SERVICE, ref->check.health);
+ break;
+ case ST_F_AGENT_STATUS:
+ if ((sv->agent.state & (CHK_ST_ENABLED|CHK_ST_PAUSED)) == CHK_ST_ENABLED) {
+ const char *fld_chksts;
+
+ fld_chksts = chunk_newstr(out);
+ chunk_strcat(out, "* "); // for check in progress
+ chunk_strcat(out, get_check_status_info(sv->agent.status));
+ if (!(sv->agent.state & CHK_ST_INPROGRESS))
+ fld_chksts += 2; // skip "* "
+ metric = mkf_str(FN_OUTPUT, fld_chksts);
+ }
+ break;
+ case ST_F_AGENT_CODE:
+ if ((sv->agent.state & (CHK_ST_ENABLED|CHK_ST_PAUSED)) == CHK_ST_ENABLED &&
+ (sv->agent.status >= HCHK_STATUS_L57DATA))
+ metric = mkf_u32(FN_OUTPUT, sv->agent.code);
+ break;
+ case ST_F_AGENT_DURATION:
+ if ((sv->agent.state & (CHK_ST_ENABLED|CHK_ST_PAUSED)) == CHK_ST_ENABLED)
+ metric = mkf_u64(FN_DURATION, sv->agent.duration);
+ break;
+ case ST_F_AGENT_DESC:
+ if ((sv->agent.state & (CHK_ST_ENABLED|CHK_ST_PAUSED)) == CHK_ST_ENABLED)
+ metric = mkf_str(FN_OUTPUT, get_check_status_description(sv->agent.status));
+ break;
+ case ST_F_LAST_AGT:
+ if ((sv->agent.state & (CHK_ST_ENABLED|CHK_ST_PAUSED)) == CHK_ST_ENABLED)
+ metric = mkf_str(FN_OUTPUT, sv->agent.desc);
+ break;
+ case ST_F_AGENT_RISE:
+ if ((sv->agent.state & (CHK_ST_ENABLED|CHK_ST_PAUSED)) == CHK_ST_ENABLED)
+ metric = mkf_u32(FO_CONFIG|FS_SERVICE, sv->agent.rise);
+ break;
+ case ST_F_AGENT_FALL:
+ if ((sv->agent.state & (CHK_ST_ENABLED|CHK_ST_PAUSED)) == CHK_ST_ENABLED)
+ metric = mkf_u32(FO_CONFIG|FS_SERVICE, sv->agent.fall);
+ break;
+ case ST_F_AGENT_HEALTH:
+ if ((sv->agent.state & (CHK_ST_ENABLED|CHK_ST_PAUSED)) == CHK_ST_ENABLED)
+ metric = mkf_u32(FO_CONFIG|FS_SERVICE, sv->agent.health);
+ break;
+ case ST_F_REQ_TOT:
+ if (px->mode == PR_MODE_HTTP)
+ metric = mkf_u64(FN_COUNTER, sv->counters.p.http.cum_req);
+ break;
+ case ST_F_HRSP_1XX:
+ if (px->mode == PR_MODE_HTTP)
+ metric = mkf_u64(FN_COUNTER, sv->counters.p.http.rsp[1]);
+ break;
+ case ST_F_HRSP_2XX:
+ if (px->mode == PR_MODE_HTTP)
+ metric = mkf_u64(FN_COUNTER, sv->counters.p.http.rsp[2]);
+ break;
+ case ST_F_HRSP_3XX:
+ if (px->mode == PR_MODE_HTTP)
+ metric = mkf_u64(FN_COUNTER, sv->counters.p.http.rsp[3]);
+ break;
+ case ST_F_HRSP_4XX:
+ if (px->mode == PR_MODE_HTTP)
+ metric = mkf_u64(FN_COUNTER, sv->counters.p.http.rsp[4]);
+ break;
+ case ST_F_HRSP_5XX:
+ if (px->mode == PR_MODE_HTTP)
+ metric = mkf_u64(FN_COUNTER, sv->counters.p.http.rsp[5]);
+ break;
+ case ST_F_HRSP_OTHER:
+ if (px->mode == PR_MODE_HTTP)
+ metric = mkf_u64(FN_COUNTER, sv->counters.p.http.rsp[0]);
+ break;
+ case ST_F_HANAFAIL:
+ if (ref->observe)
+ metric = mkf_u64(FN_COUNTER, sv->counters.failed_hana);
+ break;
+ case ST_F_CLI_ABRT:
+ metric = mkf_u64(FN_COUNTER, sv->counters.cli_aborts);
+ break;
+ case ST_F_SRV_ABRT:
+ metric = mkf_u64(FN_COUNTER, sv->counters.srv_aborts);
+ break;
+ case ST_F_LASTSESS:
+ metric = mkf_s32(FN_AGE, srv_lastsession(sv));
+ break;
+ case ST_F_QTIME:
+ metric = mkf_u32(FN_AVG, swrate_avg(sv->counters.q_time, srv_samples_window));
+ break;
+ case ST_F_CTIME:
+ metric = mkf_u32(FN_AVG, swrate_avg(sv->counters.c_time, srv_samples_window));
+ break;
+ case ST_F_RTIME:
+ metric = mkf_u32(FN_AVG, swrate_avg(sv->counters.d_time, srv_samples_window));
+ break;
+ case ST_F_TTIME:
+ metric = mkf_u32(FN_AVG, swrate_avg(sv->counters.t_time, srv_samples_window));
+ break;
+ case ST_F_QT_MAX:
+ metric = mkf_u32(FN_MAX, sv->counters.qtime_max);
+ break;
+ case ST_F_CT_MAX:
+ metric = mkf_u32(FN_MAX, sv->counters.ctime_max);
+ break;
+ case ST_F_RT_MAX:
+ metric = mkf_u32(FN_MAX, sv->counters.dtime_max);
+ break;
+ case ST_F_TT_MAX:
+ metric = mkf_u32(FN_MAX, sv->counters.ttime_max);
+ break;
+ case ST_F_ADDR:
+ if (flags & STAT_SHLGNDS) {
+ switch (addr_to_str(&sv->addr, str, sizeof(str))) {
+ case AF_INET:
+ metric = mkf_str(FO_CONFIG|FS_SERVICE, chunk_newstr(out));
+ chunk_appendf(out, "%s:%d", str, sv->svc_port);
+ break;
+ case AF_INET6:
+ metric = mkf_str(FO_CONFIG|FS_SERVICE, chunk_newstr(out));
+ chunk_appendf(out, "[%s]:%d", str, sv->svc_port);
+ break;
+ case AF_UNIX:
+ metric = mkf_str(FO_CONFIG|FS_SERVICE, "unix");
+ break;
+ case -1:
+ metric = mkf_str(FO_CONFIG|FS_SERVICE, chunk_newstr(out));
+ chunk_strcat(out, strerror(errno));
+ break;
+ default: /* address family not supported */
+ break;
+ }
+ }
+ break;
+ case ST_F_COOKIE:
+ if (flags & STAT_SHLGNDS && sv->cookie)
+ metric = mkf_str(FO_CONFIG|FN_NAME|FS_SERVICE, sv->cookie);
+ break;
+ default:
+ /* not used for servers. If a specific metric
+ * is requested, return an error. Otherwise continue.
+ */
+ if (selected_field != NULL)
+ return 0;
+ continue;
+ }
+ stats[current_field] = metric;
+ if (selected_field != NULL)
+ break;
+ }
+ return 1;
+}
+
+/* Dumps a line for server <sv> and proxy <px> to the local trash vbuffer and
+ * uses the state from stream connector <sc>, and server state <state>. The
+ * caller is responsible for clearing the local trash buffer if needed. Returns
+ * non-zero if it emits anything, zero otherwise.
+ */
+static int stats_dump_sv_stats(struct stconn *sc, struct proxy *px, struct server *sv)
+{
+ struct appctx *appctx = __sc_appctx(sc);
+ struct show_stat_ctx *ctx = appctx->svcctx;
+ struct stats_module *mod;
+ struct field *stats = stat_l[STATS_DOMAIN_PROXY];
+ size_t stats_count = ST_F_TOTAL_FIELDS;
+
+ memset(stats, 0, sizeof(struct field) * stat_count[STATS_DOMAIN_PROXY]);
+
+ if (!stats_fill_sv_stats(px, sv, ctx->flags, stats,
+ ST_F_TOTAL_FIELDS, NULL))
+ return 0;
+
+ list_for_each_entry(mod, &stats_module_list[STATS_DOMAIN_PROXY], list) {
+ void *counters;
+
+ if (stats_get_domain(mod->domain_flags) != STATS_DOMAIN_PROXY)
+ continue;
+
+ if (!(stats_px_get_cap(mod->domain_flags) & STATS_PX_CAP_SRV)) {
+ stats_count += mod->stats_count;
+ continue;
+ }
+
+ counters = EXTRA_COUNTERS_GET(sv->extra_counters, mod);
+ mod->fill_stats(counters, stats + stats_count);
+ stats_count += mod->stats_count;
+ }
+
+ return stats_dump_one_line(stats, stats_count, appctx);
+}
+
+/* Helper to compute srv values for a given backend
+ */
+static void stats_fill_be_stats_computesrv(struct proxy *px, int *nbup, int *nbsrv, int *totuw)
+{
+ int nbup_tmp, nbsrv_tmp, totuw_tmp;
+ const struct server *srv;
+
+ nbup_tmp = nbsrv_tmp = totuw_tmp = 0;
+ for (srv = px->srv; srv; srv = srv->next) {
+ if (srv->cur_state != SRV_ST_STOPPED) {
+ nbup_tmp++;
+ if (srv_currently_usable(srv) &&
+ (!px->srv_act ^ !(srv->flags & SRV_F_BACKUP)))
+ totuw_tmp += srv->uweight;
+ }
+ nbsrv_tmp++;
+ }
+
+ HA_RWLOCK_RDLOCK(LBPRM_LOCK, &px->lbprm.lock);
+ if (!px->srv_act && px->lbprm.fbck)
+ totuw_tmp = px->lbprm.fbck->uweight;
+ HA_RWLOCK_RDUNLOCK(LBPRM_LOCK, &px->lbprm.lock);
+
+ /* use tmp variable then assign result to make gcc happy */
+ *nbup = nbup_tmp;
+ *nbsrv = nbsrv_tmp;
+ *totuw = totuw_tmp;
+}
+
+/* Fill <stats> with the backend statistics. <stats> is preallocated array of
+ * length <len>. If <selected_field> is != NULL, only fill this one. The length
+ * of the array must be at least ST_F_TOTAL_FIELDS. If this length is less than
+ * this value, or if the selected field is not implemented for backends, the
+ * function returns 0, otherwise, it returns 1. <flags> can take the value
+ * STAT_SHLGNDS.
+ */
+int stats_fill_be_stats(struct proxy *px, int flags, struct field *stats, int len,
+ enum stat_field *selected_field)
+{
+ enum stat_field current_field = (selected_field != NULL ? *selected_field : 0);
+ long long be_samples_counter;
+ unsigned int be_samples_window = TIME_STATS_SAMPLES;
+ struct buffer *out = get_trash_chunk();
+ int nbup, nbsrv, totuw;
+ char *fld;
+
+ if (len < ST_F_TOTAL_FIELDS)
+ return 0;
+
+ nbup = nbsrv = totuw = 0;
+ /* some srv values compute for later if we either select all fields or
+ * need them for one of the mentioned ones */
+ if (selected_field == NULL || *selected_field == ST_F_STATUS ||
+ *selected_field == ST_F_UWEIGHT)
+ stats_fill_be_stats_computesrv(px, &nbup, &nbsrv, &totuw);
+
+ /* same here but specific to time fields */
+ if (selected_field == NULL || *selected_field == ST_F_QTIME ||
+ *selected_field == ST_F_CTIME || *selected_field == ST_F_RTIME ||
+ *selected_field == ST_F_TTIME) {
+ be_samples_counter = (px->mode == PR_MODE_HTTP) ? px->be_counters.p.http.cum_req : px->be_counters.cum_lbconn;
+ if (be_samples_counter < TIME_STATS_SAMPLES && be_samples_counter > 0)
+ be_samples_window = be_samples_counter;
+ }
+
+ for (; current_field < ST_F_TOTAL_FIELDS; current_field++) {
+ struct field metric = { 0 };
+
+ switch (current_field) {
+ case ST_F_PXNAME:
+ metric = mkf_str(FO_KEY|FN_NAME|FS_SERVICE, px->id);
+ break;
+ case ST_F_SVNAME:
+ metric = mkf_str(FO_KEY|FN_NAME|FS_SERVICE, "BACKEND");
+ break;
+ case ST_F_MODE:
+ metric = mkf_str(FO_CONFIG|FS_SERVICE, proxy_mode_str(px->mode));
+ break;
+ case ST_F_QCUR:
+ metric = mkf_u32(0, px->queue.length);
+ break;
+ case ST_F_QMAX:
+ metric = mkf_u32(FN_MAX, px->be_counters.nbpend_max);
+ break;
+ case ST_F_SCUR:
+ metric = mkf_u32(0, px->beconn);
+ break;
+ case ST_F_SMAX:
+ metric = mkf_u32(FN_MAX, px->be_counters.conn_max);
+ break;
+ case ST_F_SLIM:
+ metric = mkf_u32(FO_CONFIG|FN_LIMIT, px->fullconn);
+ break;
+ case ST_F_STOT:
+ metric = mkf_u64(FN_COUNTER, px->be_counters.cum_conn);
+ break;
+ case ST_F_BIN:
+ metric = mkf_u64(FN_COUNTER, px->be_counters.bytes_in);
+ break;
+ case ST_F_BOUT:
+ metric = mkf_u64(FN_COUNTER, px->be_counters.bytes_out);
+ break;
+ case ST_F_DREQ:
+ metric = mkf_u64(FN_COUNTER, px->be_counters.denied_req);
+ break;
+ case ST_F_DRESP:
+ metric = mkf_u64(FN_COUNTER, px->be_counters.denied_resp);
+ break;
+ case ST_F_ECON:
+ metric = mkf_u64(FN_COUNTER, px->be_counters.failed_conns);
+ break;
+ case ST_F_ERESP:
+ metric = mkf_u64(FN_COUNTER, px->be_counters.failed_resp);
+ break;
+ case ST_F_WRETR:
+ metric = mkf_u64(FN_COUNTER, px->be_counters.retries);
+ break;
+ case ST_F_WREDIS:
+ metric = mkf_u64(FN_COUNTER, px->be_counters.redispatches);
+ break;
+ case ST_F_WREW:
+ metric = mkf_u64(FN_COUNTER, px->be_counters.failed_rewrites);
+ break;
+ case ST_F_EINT:
+ metric = mkf_u64(FN_COUNTER, px->be_counters.internal_errors);
+ break;
+ case ST_F_CONNECT:
+ metric = mkf_u64(FN_COUNTER, px->be_counters.connect);
+ break;
+ case ST_F_REUSE:
+ metric = mkf_u64(FN_COUNTER, px->be_counters.reuse);
+ break;
+ case ST_F_STATUS:
+ fld = chunk_newstr(out);
+ chunk_appendf(out, "%s", (px->lbprm.tot_weight > 0 || !px->srv) ? "UP" : "DOWN");
+ if (flags & (STAT_HIDE_MAINT|STAT_HIDE_DOWN))
+ chunk_appendf(out, " (%d/%d)", nbup, nbsrv);
+ metric = mkf_str(FO_STATUS, fld);
+ break;
+ case ST_F_AGG_SRV_CHECK_STATUS: // DEPRECATED
+ case ST_F_AGG_SRV_STATUS:
+ metric = mkf_u32(FN_GAUGE, 0);
+ break;
+ case ST_F_AGG_CHECK_STATUS:
+ metric = mkf_u32(FN_GAUGE, 0);
+ break;
+ case ST_F_WEIGHT:
+ metric = mkf_u32(FN_AVG, (px->lbprm.tot_weight * px->lbprm.wmult + px->lbprm.wdiv - 1) / px->lbprm.wdiv);
+ break;
+ case ST_F_UWEIGHT:
+ metric = mkf_u32(FN_AVG, totuw);
+ break;
+ case ST_F_ACT:
+ metric = mkf_u32(0, px->srv_act);
+ break;
+ case ST_F_BCK:
+ metric = mkf_u32(0, px->srv_bck);
+ break;
+ case ST_F_CHKDOWN:
+ metric = mkf_u64(FN_COUNTER, px->down_trans);
+ break;
+ case ST_F_LASTCHG:
+ metric = mkf_u32(FN_AGE, ns_to_sec(now_ns) - px->last_change);
+ break;
+ case ST_F_DOWNTIME:
+ if (px->srv)
+ metric = mkf_u32(FN_COUNTER, be_downtime(px));
+ break;
+ case ST_F_PID:
+ metric = mkf_u32(FO_KEY, 1);
+ break;
+ case ST_F_IID:
+ metric = mkf_u32(FO_KEY|FS_SERVICE, px->uuid);
+ break;
+ case ST_F_SID:
+ metric = mkf_u32(FO_KEY|FS_SERVICE, 0);
+ break;
+ case ST_F_LBTOT:
+ metric = mkf_u64(FN_COUNTER, px->be_counters.cum_lbconn);
+ break;
+ case ST_F_TYPE:
+ metric = mkf_u32(FO_CONFIG|FS_SERVICE, STATS_TYPE_BE);
+ break;
+ case ST_F_RATE:
+ metric = mkf_u32(0, read_freq_ctr(&px->be_sess_per_sec));
+ break;
+ case ST_F_RATE_MAX:
+ metric = mkf_u32(0, px->be_counters.sps_max);
+ break;
+ case ST_F_COOKIE:
+ if (flags & STAT_SHLGNDS && px->cookie_name)
+ metric = mkf_str(FO_CONFIG|FN_NAME|FS_SERVICE, px->cookie_name);
+ break;
+ case ST_F_ALGO:
+ if (flags & STAT_SHLGNDS)
+ metric = mkf_str(FO_CONFIG|FS_SERVICE, backend_lb_algo_str(px->lbprm.algo & BE_LB_ALGO));
+ break;
+ case ST_F_REQ_TOT:
+ if (px->mode == PR_MODE_HTTP)
+ metric = mkf_u64(FN_COUNTER, px->be_counters.p.http.cum_req);
+ break;
+ case ST_F_HRSP_1XX:
+ if (px->mode == PR_MODE_HTTP)
+ metric = mkf_u64(FN_COUNTER, px->be_counters.p.http.rsp[1]);
+ break;
+ case ST_F_HRSP_2XX:
+ if (px->mode == PR_MODE_HTTP)
+ metric = mkf_u64(FN_COUNTER, px->be_counters.p.http.rsp[2]);
+ break;
+ case ST_F_HRSP_3XX:
+ if (px->mode == PR_MODE_HTTP)
+ metric = mkf_u64(FN_COUNTER, px->be_counters.p.http.rsp[3]);
+ break;
+ case ST_F_HRSP_4XX:
+ if (px->mode == PR_MODE_HTTP)
+ metric = mkf_u64(FN_COUNTER, px->be_counters.p.http.rsp[4]);
+ break;
+ case ST_F_HRSP_5XX:
+ if (px->mode == PR_MODE_HTTP)
+ metric = mkf_u64(FN_COUNTER, px->be_counters.p.http.rsp[5]);
+ break;
+ case ST_F_HRSP_OTHER:
+ if (px->mode == PR_MODE_HTTP)
+ metric = mkf_u64(FN_COUNTER, px->be_counters.p.http.rsp[0]);
+ break;
+ case ST_F_CACHE_LOOKUPS:
+ if (px->mode == PR_MODE_HTTP)
+ metric = mkf_u64(FN_COUNTER, px->be_counters.p.http.cache_lookups);
+ break;
+ case ST_F_CACHE_HITS:
+ if (px->mode == PR_MODE_HTTP)
+ metric = mkf_u64(FN_COUNTER, px->be_counters.p.http.cache_hits);
+ break;
+ case ST_F_CLI_ABRT:
+ metric = mkf_u64(FN_COUNTER, px->be_counters.cli_aborts);
+ break;
+ case ST_F_SRV_ABRT:
+ metric = mkf_u64(FN_COUNTER, px->be_counters.srv_aborts);
+ break;
+ case ST_F_COMP_IN:
+ metric = mkf_u64(FN_COUNTER, px->be_counters.comp_in[COMP_DIR_RES]);
+ break;
+ case ST_F_COMP_OUT:
+ metric = mkf_u64(FN_COUNTER, px->be_counters.comp_out[COMP_DIR_RES]);
+ break;
+ case ST_F_COMP_BYP:
+ metric = mkf_u64(FN_COUNTER, px->be_counters.comp_byp[COMP_DIR_RES]);
+ break;
+ case ST_F_COMP_RSP:
+ metric = mkf_u64(FN_COUNTER, px->be_counters.p.http.comp_rsp);
+ break;
+ case ST_F_LASTSESS:
+ metric = mkf_s32(FN_AGE, be_lastsession(px));
+ break;
+ case ST_F_QTIME:
+ metric = mkf_u32(FN_AVG, swrate_avg(px->be_counters.q_time, be_samples_window));
+ break;
+ case ST_F_CTIME:
+ metric = mkf_u32(FN_AVG, swrate_avg(px->be_counters.c_time, be_samples_window));
+ break;
+ case ST_F_RTIME:
+ metric = mkf_u32(FN_AVG, swrate_avg(px->be_counters.d_time, be_samples_window));
+ break;
+ case ST_F_TTIME:
+ metric = mkf_u32(FN_AVG, swrate_avg(px->be_counters.t_time, be_samples_window));
+ break;
+ case ST_F_QT_MAX:
+ metric = mkf_u32(FN_MAX, px->be_counters.qtime_max);
+ break;
+ case ST_F_CT_MAX:
+ metric = mkf_u32(FN_MAX, px->be_counters.ctime_max);
+ break;
+ case ST_F_RT_MAX:
+ metric = mkf_u32(FN_MAX, px->be_counters.dtime_max);
+ break;
+ case ST_F_TT_MAX:
+ metric = mkf_u32(FN_MAX, px->be_counters.ttime_max);
+ break;
+ default:
+ /* not used for backends. If a specific metric
+ * is requested, return an error. Otherwise continue.
+ */
+ if (selected_field != NULL)
+ return 0;
+ continue;
+ }
+ stats[current_field] = metric;
+ if (selected_field != NULL)
+ break;
+ }
+ return 1;
+}
+
+/* Dumps a line for backend <px> to the local trash buffer for and uses the
+ * state from stream interface <si>. The caller is responsible for clearing the
+ * local trash buffer if needed. Returns non-zero if it emits anything, zero
+ * otherwise.
+ */
+static int stats_dump_be_stats(struct stconn *sc, struct proxy *px)
+{
+ struct appctx *appctx = __sc_appctx(sc);
+ struct show_stat_ctx *ctx = appctx->svcctx;
+ struct field *stats = stat_l[STATS_DOMAIN_PROXY];
+ struct stats_module *mod;
+ size_t stats_count = ST_F_TOTAL_FIELDS;
+
+ if (!(px->cap & PR_CAP_BE))
+ return 0;
+
+ if ((ctx->flags & STAT_BOUND) && !(ctx->type & (1 << STATS_TYPE_BE)))
+ return 0;
+
+ memset(stats, 0, sizeof(struct field) * stat_count[STATS_DOMAIN_PROXY]);
+
+ if (!stats_fill_be_stats(px, ctx->flags, stats, ST_F_TOTAL_FIELDS, NULL))
+ return 0;
+
+ list_for_each_entry(mod, &stats_module_list[STATS_DOMAIN_PROXY], list) {
+ struct extra_counters *counters;
+
+ if (stats_get_domain(mod->domain_flags) != STATS_DOMAIN_PROXY)
+ continue;
+
+ if (!(stats_px_get_cap(mod->domain_flags) & STATS_PX_CAP_BE)) {
+ stats_count += mod->stats_count;
+ continue;
+ }
+
+ counters = EXTRA_COUNTERS_GET(px->extra_counters_be, mod);
+ mod->fill_stats(counters, stats + stats_count);
+ stats_count += mod->stats_count;
+ }
+
+ return stats_dump_one_line(stats, stats_count, appctx);
+}
+
+/* Dumps the HTML table header for proxy <px> to the local trash buffer for and
+ * uses the state from stream connector <sc>. The caller is responsible for
+ * clearing the local trash buffer if needed.
+ */
+static void stats_dump_html_px_hdr(struct stconn *sc, struct proxy *px)
+{
+ struct appctx *appctx = __sc_appctx(sc);
+ struct show_stat_ctx *ctx = appctx->svcctx;
+ char scope_txt[STAT_SCOPE_TXT_MAXLEN + sizeof STAT_SCOPE_PATTERN];
+ struct stats_module *mod;
+ int stats_module_len = 0;
+
+ if (px->cap & PR_CAP_BE && px->srv && (ctx->flags & STAT_ADMIN)) {
+ /* A form to enable/disable this proxy servers */
+
+ /* scope_txt = search pattern + search query, ctx->scope_len is always <= STAT_SCOPE_TXT_MAXLEN */
+ scope_txt[0] = 0;
+ if (ctx->scope_len) {
+ const char *scope_ptr = stats_scope_ptr(appctx, sc);
+
+ strlcpy2(scope_txt, STAT_SCOPE_PATTERN, sizeof(scope_txt));
+ memcpy(scope_txt + strlen(STAT_SCOPE_PATTERN), scope_ptr, ctx->scope_len);
+ scope_txt[strlen(STAT_SCOPE_PATTERN) + ctx->scope_len] = 0;
+ }
+
+ chunk_appendf(&trash_chunk,
+ "<form method=\"post\">");
+ }
+
+ /* print a new table */
+ chunk_appendf(&trash_chunk,
+ "<table class=\"tbl\" width=\"100%%\">\n"
+ "<tr class=\"titre\">"
+ "<th class=\"pxname\" width=\"10%%\">");
+
+ chunk_appendf(&trash_chunk,
+ "<a name=\"%s\"></a>%s"
+ "<a class=px href=\"#%s\">%s</a>",
+ px->id,
+ (ctx->flags & STAT_SHLGNDS) ? "<u>":"",
+ px->id, px->id);
+
+ if (ctx->flags & STAT_SHLGNDS) {
+ /* cap, mode, id */
+ chunk_appendf(&trash_chunk, "<div class=tips>cap: %s, mode: %s, id: %d",
+ proxy_cap_str(px->cap), proxy_mode_str(px->mode),
+ px->uuid);
+ chunk_appendf(&trash_chunk, "</div>");
+ }
+
+ chunk_appendf(&trash_chunk,
+ "%s</th>"
+ "<th class=\"%s\" width=\"90%%\">%s</th>"
+ "</tr>\n"
+ "</table>\n"
+ "<table class=\"tbl\" width=\"100%%\">\n"
+ "<tr class=\"titre\">",
+ (ctx->flags & STAT_SHLGNDS) ? "</u>":"",
+ px->desc ? "desc" : "empty", px->desc ? px->desc : "");
+
+ if (ctx->flags & STAT_ADMIN) {
+ /* Column heading for Enable or Disable server */
+ if ((px->cap & PR_CAP_BE) && px->srv)
+ chunk_appendf(&trash_chunk,
+ "<th rowspan=2 width=1><input type=\"checkbox\" "
+ "onclick=\"for(c in document.getElementsByClassName('%s-checkbox')) "
+ "document.getElementsByClassName('%s-checkbox').item(c).checked = this.checked\"></th>",
+ px->id,
+ px->id);
+ else
+ chunk_appendf(&trash_chunk, "<th rowspan=2></th>");
+ }
+
+ chunk_appendf(&trash_chunk,
+ "<th rowspan=2></th>"
+ "<th colspan=3>Queue</th>"
+ "<th colspan=3>Session rate</th><th colspan=6>Sessions</th>"
+ "<th colspan=2>Bytes</th><th colspan=2>Denied</th>"
+ "<th colspan=3>Errors</th><th colspan=2>Warnings</th>"
+ "<th colspan=9>Server</th>");
+
+ if (ctx->flags & STAT_SHMODULES) {
+ // calculate the count of module for colspan attribute
+ list_for_each_entry(mod, &stats_module_list[STATS_DOMAIN_PROXY], list) {
+ ++stats_module_len;
+ }
+ chunk_appendf(&trash_chunk, "<th colspan=%d>Extra modules</th>",
+ stats_module_len);
+ }
+
+ chunk_appendf(&trash_chunk,
+ "</tr>\n"
+ "<tr class=\"titre\">"
+ "<th>Cur</th><th>Max</th><th>Limit</th>"
+ "<th>Cur</th><th>Max</th><th>Limit</th><th>Cur</th><th>Max</th>"
+ "<th>Limit</th><th>Total</th><th>LbTot</th><th>Last</th><th>In</th><th>Out</th>"
+ "<th>Req</th><th>Resp</th><th>Req</th><th>Conn</th>"
+ "<th>Resp</th><th>Retr</th><th>Redis</th>"
+ "<th>Status</th><th>LastChk</th><th>Wght</th><th>Act</th>"
+ "<th>Bck</th><th>Chk</th><th>Dwn</th><th>Dwntme</th>"
+ "<th>Thrtle</th>\n");
+
+ if (ctx->flags & STAT_SHMODULES) {
+ list_for_each_entry(mod, &stats_module_list[STATS_DOMAIN_PROXY], list) {
+ chunk_appendf(&trash_chunk, "<th>%s</th>", mod->name);
+ }
+ }
+
+ chunk_appendf(&trash_chunk, "</tr>");
+}
+
+/* Dumps the HTML table trailer for proxy <px> to the local trash buffer for and
+ * uses the state from stream connector <sc>. The caller is responsible for
+ * clearing the local trash buffer if needed.
+ */
+static void stats_dump_html_px_end(struct stconn *sc, struct proxy *px)
+{
+ struct appctx *appctx = __sc_appctx(sc);
+ struct show_stat_ctx *ctx = appctx->svcctx;
+
+ chunk_appendf(&trash_chunk, "</table>");
+
+ if ((px->cap & PR_CAP_BE) && px->srv && (ctx->flags & STAT_ADMIN)) {
+ /* close the form used to enable/disable this proxy servers */
+ chunk_appendf(&trash_chunk,
+ "Choose the action to perform on the checked servers : "
+ "<select name=action>"
+ "<option value=\"\"></option>"
+ "<option value=\"ready\">Set state to READY</option>"
+ "<option value=\"drain\">Set state to DRAIN</option>"
+ "<option value=\"maint\">Set state to MAINT</option>"
+ "<option value=\"dhlth\">Health: disable checks</option>"
+ "<option value=\"ehlth\">Health: enable checks</option>"
+ "<option value=\"hrunn\">Health: force UP</option>"
+ "<option value=\"hnolb\">Health: force NOLB</option>"
+ "<option value=\"hdown\">Health: force DOWN</option>"
+ "<option value=\"dagent\">Agent: disable checks</option>"
+ "<option value=\"eagent\">Agent: enable checks</option>"
+ "<option value=\"arunn\">Agent: force UP</option>"
+ "<option value=\"adown\">Agent: force DOWN</option>"
+ "<option value=\"shutdown\">Kill Sessions</option>"
+ "</select>"
+ "<input type=\"hidden\" name=\"b\" value=\"#%d\">"
+ "&nbsp;<input type=\"submit\" value=\"Apply\">"
+ "</form>",
+ px->uuid);
+ }
+
+ chunk_appendf(&trash_chunk, "<p>\n");
+}
+
+/*
+ * Dumps statistics for a proxy. The output is sent to the stream connector's
+ * input buffer. Returns 0 if it had to stop dumping data because of lack of
+ * buffer space, or non-zero if everything completed. This function is used
+ * both by the CLI and the HTTP entry points, and is able to dump the output
+ * in HTML or CSV formats.
+ */
+int stats_dump_proxy_to_buffer(struct stconn *sc, struct htx *htx,
+ struct proxy *px)
+{
+ struct appctx *appctx = __sc_appctx(sc);
+ struct show_stat_ctx *ctx = appctx->svcctx;
+ struct channel *rep = sc_ic(sc);
+ struct server *sv, *svs; /* server and server-state, server-state=server or server->track */
+ struct listener *l;
+ struct uri_auth *uri = NULL;
+ int current_field;
+ int px_st = ctx->px_st;
+
+ if (ctx->http_px)
+ uri = ctx->http_px->uri_auth;
+ chunk_reset(&trash_chunk);
+more:
+ current_field = ctx->field;
+
+ switch (ctx->px_st) {
+ case STAT_PX_ST_INIT:
+ /* we are on a new proxy */
+ if (uri && uri->scope) {
+ /* we have a limited scope, we have to check the proxy name */
+ struct stat_scope *scope;
+ int len;
+
+ len = strlen(px->id);
+ scope = uri->scope;
+
+ while (scope) {
+ /* match exact proxy name */
+ if (scope->px_len == len && !memcmp(px->id, scope->px_id, len))
+ break;
+
+ /* match '.' which means 'self' proxy */
+ if (strcmp(scope->px_id, ".") == 0 && px == ctx->http_px)
+ break;
+ scope = scope->next;
+ }
+
+ /* proxy name not found : don't dump anything */
+ if (scope == NULL)
+ return 1;
+ }
+
+ /* if the user has requested a limited output and the proxy
+ * name does not match, skip it.
+ */
+ if (ctx->scope_len) {
+ const char *scope_ptr = stats_scope_ptr(appctx, sc);
+
+ if (strnistr(px->id, strlen(px->id), scope_ptr, ctx->scope_len) == NULL)
+ return 1;
+ }
+
+ if ((ctx->flags & STAT_BOUND) &&
+ (ctx->iid != -1) &&
+ (px->uuid != ctx->iid))
+ return 1;
+
+ ctx->px_st = STAT_PX_ST_TH;
+ __fallthrough;
+
+ case STAT_PX_ST_TH:
+ if (ctx->flags & STAT_FMT_HTML) {
+ stats_dump_html_px_hdr(sc, px);
+ if (!stats_putchk(appctx, htx))
+ goto full;
+ }
+
+ ctx->px_st = STAT_PX_ST_FE;
+ __fallthrough;
+
+ case STAT_PX_ST_FE:
+ /* print the frontend */
+ if (stats_dump_fe_stats(sc, px)) {
+ if (!stats_putchk(appctx, htx))
+ goto full;
+ ctx->flags |= STAT_STARTED;
+ if (ctx->field)
+ goto more;
+ }
+
+ current_field = 0;
+ ctx->obj2 = px->conf.listeners.n;
+ ctx->px_st = STAT_PX_ST_LI;
+ __fallthrough;
+
+ case STAT_PX_ST_LI:
+ /* obj2 points to listeners list as initialized above */
+ for (; ctx->obj2 != &px->conf.listeners; ctx->obj2 = l->by_fe.n) {
+ if (htx) {
+ if (htx_almost_full(htx)) {
+ sc_need_room(sc, htx->size / 2);
+ goto full;
+ }
+ }
+ else {
+ if (buffer_almost_full(&rep->buf)) {
+ sc_need_room(sc, b_size(&rep->buf) / 2);
+ goto full;
+ }
+ }
+
+ l = LIST_ELEM(ctx->obj2, struct listener *, by_fe);
+ if (!l->counters)
+ continue;
+
+ if (ctx->flags & STAT_BOUND) {
+ if (!(ctx->type & (1 << STATS_TYPE_SO)))
+ break;
+
+ if (ctx->sid != -1 && l->luid != ctx->sid)
+ continue;
+ }
+
+ /* print the frontend */
+ if (stats_dump_li_stats(sc, px, l)) {
+ if (!stats_putchk(appctx, htx))
+ goto full;
+ ctx->flags |= STAT_STARTED;
+ if (ctx->field)
+ goto more;
+ }
+ current_field = 0;
+ }
+
+ ctx->obj2 = px->srv; /* may be NULL */
+ ctx->px_st = STAT_PX_ST_SV;
+ __fallthrough;
+
+ case STAT_PX_ST_SV:
+ /* check for dump resumption */
+ if (px_st == STAT_PX_ST_SV) {
+ struct server *cur = ctx->obj2;
+
+ /* re-entrant dump */
+ BUG_ON(!cur);
+ if (cur->flags & SRV_F_DELETED) {
+ /* the server could have been marked as deleted
+ * between two dumping attempts, skip it.
+ */
+ cur = cur->next;
+ }
+ srv_drop(ctx->obj2); /* drop old srv taken on last dumping attempt */
+ ctx->obj2 = cur; /* could be NULL */
+ /* back to normal */
+ }
+
+ /* obj2 points to servers list as initialized above.
+ *
+ * A server may be removed during the stats dumping.
+ * Temporarily increment its refcount to prevent its
+ * anticipated cleaning. Call srv_drop() to release it.
+ */
+ for (; ctx->obj2 != NULL;
+ ctx->obj2 = srv_drop(sv)) {
+
+ sv = ctx->obj2;
+ srv_take(sv);
+
+ if (htx) {
+ if (htx_almost_full(htx)) {
+ sc_need_room(sc, htx->size / 2);
+ goto full;
+ }
+ }
+ else {
+ if (buffer_almost_full(&rep->buf)) {
+ sc_need_room(sc, b_size(&rep->buf) / 2);
+ goto full;
+ }
+ }
+
+ if (ctx->flags & STAT_BOUND) {
+ if (!(ctx->type & (1 << STATS_TYPE_SV))) {
+ srv_drop(sv);
+ break;
+ }
+
+ if (ctx->sid != -1 && sv->puid != ctx->sid)
+ continue;
+ }
+
+ /* do not report disabled servers */
+ if (ctx->flags & STAT_HIDE_MAINT &&
+ sv->cur_admin & SRV_ADMF_MAINT) {
+ continue;
+ }
+
+ svs = sv;
+ while (svs->track)
+ svs = svs->track;
+
+ /* do not report servers which are DOWN and not changing state */
+ if ((ctx->flags & STAT_HIDE_DOWN) &&
+ ((sv->cur_admin & SRV_ADMF_MAINT) || /* server is in maintenance */
+ (sv->cur_state == SRV_ST_STOPPED && /* server is down */
+ (!((svs->agent.state | svs->check.state) & CHK_ST_ENABLED) ||
+ ((svs->agent.state & CHK_ST_ENABLED) && !svs->agent.health) ||
+ ((svs->check.state & CHK_ST_ENABLED) && !svs->check.health))))) {
+ continue;
+ }
+
+ if (stats_dump_sv_stats(sc, px, sv)) {
+ if (!stats_putchk(appctx, htx))
+ goto full;
+ ctx->flags |= STAT_STARTED;
+ if (ctx->field)
+ goto more;
+ }
+ current_field = 0;
+ } /* for sv */
+
+ ctx->px_st = STAT_PX_ST_BE;
+ __fallthrough;
+
+ case STAT_PX_ST_BE:
+ /* print the backend */
+ if (stats_dump_be_stats(sc, px)) {
+ if (!stats_putchk(appctx, htx))
+ goto full;
+ ctx->flags |= STAT_STARTED;
+ if (ctx->field)
+ goto more;
+ }
+
+ current_field = 0;
+ ctx->px_st = STAT_PX_ST_END;
+ __fallthrough;
+
+ case STAT_PX_ST_END:
+ if (ctx->flags & STAT_FMT_HTML) {
+ stats_dump_html_px_end(sc, px);
+ if (!stats_putchk(appctx, htx))
+ goto full;
+ }
+
+ ctx->px_st = STAT_PX_ST_FIN;
+ __fallthrough;
+
+ case STAT_PX_ST_FIN:
+ return 1;
+
+ default:
+ /* unknown state, we should put an abort() here ! */
+ return 1;
+ }
+
+ full:
+ /* restore previous field */
+ ctx->field = current_field;
+ return 0;
+}
+
+/* Dumps the HTTP stats head block to the local trash buffer and uses the
+ * per-uri parameters from the parent proxy. The caller is responsible for
+ * clearing the local trash buffer if needed.
+ */
+static void stats_dump_html_head(struct appctx *appctx)
+{
+ struct show_stat_ctx *ctx = appctx->svcctx;
+ struct uri_auth *uri;
+
+ BUG_ON(!ctx->http_px);
+ uri = ctx->http_px->uri_auth;
+
+ /* WARNING! This must fit in the first buffer !!! */
+ chunk_appendf(&trash_chunk,
+ "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.01 Transitional//EN\"\n"
+ "\"http://www.w3.org/TR/html4/loose.dtd\">\n"
+ "<html><head><title>Statistics Report for " PRODUCT_NAME "%s%s</title>\n"
+ "<link rel=\"icon\" href=\"data:,\">\n"
+ "<meta http-equiv=\"content-type\" content=\"text/html; charset=iso-8859-1\">\n"
+ "<style type=\"text/css\"><!--\n"
+ "body {"
+ " font-family: arial, helvetica, sans-serif;"
+ " font-size: 12px;"
+ " font-weight: normal;"
+ " color: black;"
+ " background: white;"
+ "}\n"
+ "th,td {"
+ " font-size: 10px;"
+ "}\n"
+ "h1 {"
+ " font-size: x-large;"
+ " margin-bottom: 0.5em;"
+ "}\n"
+ "h2 {"
+ " font-family: helvetica, arial;"
+ " font-size: x-large;"
+ " font-weight: bold;"
+ " font-style: italic;"
+ " color: #6020a0;"
+ " margin-top: 0em;"
+ " margin-bottom: 0em;"
+ "}\n"
+ "h3 {"
+ " font-family: helvetica, arial;"
+ " font-size: 16px;"
+ " font-weight: bold;"
+ " color: #b00040;"
+ " background: #e8e8d0;"
+ " margin-top: 0em;"
+ " margin-bottom: 0em;"
+ "}\n"
+ "li {"
+ " margin-top: 0.25em;"
+ " margin-right: 2em;"
+ "}\n"
+ ".hr {margin-top: 0.25em;"
+ " border-color: black;"
+ " border-bottom-style: solid;"
+ "}\n"
+ ".titre {background: #20D0D0;color: #000000; font-weight: bold; text-align: center;}\n"
+ ".total {background: #20D0D0;color: #ffff80;}\n"
+ ".frontend {background: #e8e8d0;}\n"
+ ".socket {background: #d0d0d0;}\n"
+ ".backend {background: #e8e8d0;}\n"
+ ".active_down {background: #ff9090;}\n"
+ ".active_going_up {background: #ffd020;}\n"
+ ".active_going_down {background: #ffffa0;}\n"
+ ".active_up {background: #c0ffc0;}\n"
+ ".active_nolb {background: #20a0ff;}\n"
+ ".active_draining {background: #20a0FF;}\n"
+ ".active_no_check {background: #e0e0e0;}\n"
+ ".backup_down {background: #ff9090;}\n"
+ ".backup_going_up {background: #ff80ff;}\n"
+ ".backup_going_down {background: #c060ff;}\n"
+ ".backup_up {background: #b0d0ff;}\n"
+ ".backup_nolb {background: #90b0e0;}\n"
+ ".backup_draining {background: #cc9900;}\n"
+ ".backup_no_check {background: #e0e0e0;}\n"
+ ".maintain {background: #c07820;}\n"
+ ".rls {letter-spacing: 0.2em; margin-right: 1px;}\n" /* right letter spacing (used for grouping digits) */
+ "\n"
+ "a.px:link {color: #ffff40; text-decoration: none;}"
+ "a.px:visited {color: #ffff40; text-decoration: none;}"
+ "a.px:hover {color: #ffffff; text-decoration: none;}"
+ "a.lfsb:link {color: #000000; text-decoration: none;}"
+ "a.lfsb:visited {color: #000000; text-decoration: none;}"
+ "a.lfsb:hover {color: #505050; text-decoration: none;}"
+ "\n"
+ "table.tbl { border-collapse: collapse; border-style: none;}\n"
+ "table.tbl td { text-align: right; border-width: 1px 1px 1px 1px; border-style: solid solid solid solid; padding: 2px 3px; border-color: gray; white-space: nowrap;}\n"
+ "table.tbl td.ac { text-align: center;}\n"
+ "table.tbl th { border-width: 1px; border-style: solid solid solid solid; border-color: gray;}\n"
+ "table.tbl th.pxname { background: #b00040; color: #ffff40; font-weight: bold; border-style: solid solid none solid; padding: 2px 3px; white-space: nowrap;}\n"
+ "table.tbl th.empty { border-style: none; empty-cells: hide; background: white;}\n"
+ "table.tbl th.desc { background: white; border-style: solid solid none solid; text-align: left; padding: 2px 3px;}\n"
+ "\n"
+ "table.lgd { border-collapse: collapse; border-width: 1px; border-style: none none none solid; border-color: black;}\n"
+ "table.lgd td { border-width: 1px; border-style: solid solid solid solid; border-color: gray; padding: 2px;}\n"
+ "table.lgd td.noborder { border-style: none; padding: 2px; white-space: nowrap;}\n"
+ "table.det { border-collapse: collapse; border-style: none; }\n"
+ "table.det th { text-align: left; border-width: 0px; padding: 0px 1px 0px 0px; font-style:normal;font-size:11px;font-weight:bold;font-family: sans-serif;}\n"
+ "table.det td { text-align: right; border-width: 0px; padding: 0px 0px 0px 4px; white-space: nowrap; font-style:normal;font-size:11px;font-weight:normal;}\n"
+ "u {text-decoration:none; border-bottom: 1px dotted black;}\n"
+ "div.tips {\n"
+ " display:block;\n"
+ " visibility:hidden;\n"
+ " z-index:2147483647;\n"
+ " position:absolute;\n"
+ " padding:2px 4px 3px;\n"
+ " background:#f0f060; color:#000000;\n"
+ " border:1px solid #7040c0;\n"
+ " white-space:nowrap;\n"
+ " font-style:normal;font-size:11px;font-weight:normal;\n"
+ " -moz-border-radius:3px;-webkit-border-radius:3px;border-radius:3px;\n"
+ " -moz-box-shadow:gray 2px 2px 3px;-webkit-box-shadow:gray 2px 2px 3px;box-shadow:gray 2px 2px 3px;\n"
+ "}\n"
+ "u:hover div.tips {visibility:visible;}\n"
+ "@media (prefers-color-scheme: dark) {\n"
+ " body { font-family: arial, helvetica, sans-serif; font-size: 12px; font-weight: normal; color: #e8e6e3; background: #131516;}\n"
+ " h1 { color: #a265e0!important; }\n"
+ " h2 { color: #a265e0; }\n"
+ " h3 { color: #ff5190; background-color: #3e3e1f; }\n"
+ " a { color: #3391ff; }\n"
+ " input { background-color: #2f3437; }\n"
+ " .hr { border-color: #8c8273; }\n"
+ " .titre { background-color: #1aa6a6; color: #e8e6e3; }\n"
+ " .frontend {background: #2f3437;}\n"
+ " .socket {background: #2a2d2f;}\n"
+ " .backend {background: #2f3437;}\n"
+ " .active_down {background: #760000;}\n"
+ " .active_going_up {background: #b99200;}\n"
+ " .active_going_down {background: #6c6c00;}\n"
+ " .active_up {background: #165900;}\n"
+ " .active_nolb {background: #006ab9;}\n"
+ " .active_draining {background: #006ab9;}\n"
+ " .active_no_check {background: #2a2d2f;}\n"
+ " .backup_down {background: #760000;}\n"
+ " .backup_going_up {background: #7f007f;}\n"
+ " .backup_going_down {background: #580092;}\n"
+ " .backup_up {background: #2e3234;}\n"
+ " .backup_nolb {background: #1e3c6a;}\n"
+ " .backup_draining {background: #a37a00;}\n"
+ " .backup_no_check {background: #2a2d2f;}\n"
+ " .maintain {background: #9a601a;}\n"
+ " a.px:link {color: #d8d83b; text-decoration: none;}\n"
+ " a.px:visited {color: #d8d83b; text-decoration: none;}\n"
+ " a.px:hover {color: #ffffff; text-decoration: none;}\n"
+ " a.lfsb:link {color: #e8e6e3; text-decoration: none;}\n"
+ " a.lfsb:visited {color: #e8e6e3; text-decoration: none;}\n"
+ " a.lfsb:hover {color: #b5afa6; text-decoration: none;}\n"
+ " table.tbl th.empty { background-color: #181a1b; }\n"
+ " table.tbl th.desc { background: #181a1b; }\n"
+ " table.tbl th.pxname { background-color: #8d0033; color: #ffff46; }\n"
+ " table.tbl th { border-color: #808080; }\n"
+ " table.tbl td { border-color: #808080; }\n"
+ " u {text-decoration:none; border-bottom: 1px dotted #e8e6e3;}\n"
+ " div.tips {\n"
+ " background:#8e8e0d;\n"
+ " color:#e8e6e3;\n"
+ " border-color: #4e2c86;\n"
+ " -moz-box-shadow: #60686c 2px 2px 3px;\n"
+ " -webkit-box-shadow: #60686c 2px 2px 3px;\n"
+ " box-shadow: #60686c 2px 2px 3px;\n"
+ " }\n"
+ "}\n"
+ "-->\n"
+ "</style></head>\n",
+ (ctx->flags & STAT_SHNODE) ? " on " : "",
+ (ctx->flags & STAT_SHNODE) ? (uri && uri->node ? uri->node : global.node) : ""
+ );
+}
+
+/* Dumps the HTML stats information block to the local trash buffer and uses
+ * the state from stream connector <sc> and per-uri parameter from the parent
+ * proxy. The caller is responsible for clearing the local trash buffer if
+ * needed.
+ */
+static void stats_dump_html_info(struct stconn *sc)
+{
+ struct appctx *appctx = __sc_appctx(sc);
+ struct show_stat_ctx *ctx = appctx->svcctx;
+ unsigned int up = ns_to_sec(now_ns - start_time_ns);
+ char scope_txt[STAT_SCOPE_TXT_MAXLEN + sizeof STAT_SCOPE_PATTERN];
+ const char *scope_ptr = stats_scope_ptr(appctx, sc);
+ struct uri_auth *uri;
+ unsigned long long bps;
+ int thr;
+
+ BUG_ON(!ctx->http_px);
+ uri = ctx->http_px->uri_auth;
+ for (bps = thr = 0; thr < global.nbthread; thr++)
+ bps += 32ULL * read_freq_ctr(&ha_thread_ctx[thr].out_32bps);
+
+ /* Turn the bytes per second to bits per second and take care of the
+ * usual ethernet overhead in order to help figure how far we are from
+ * interface saturation since it's the only case which usually matters.
+ * For this we count the total size of an Ethernet frame on the wire
+ * including preamble and IFG (1538) for the largest TCP segment it
+ * transports (1448 with TCP timestamps). This is not valid for smaller
+ * packets (under-estimated), but it gives a reasonably accurate
+ * estimation of how far we are from uplink saturation.
+ */
+ bps = bps * 8 * 1538 / 1448;
+
+ /* WARNING! this has to fit the first packet too.
+ * We are around 3.5 kB, add adding entries will
+ * become tricky if we want to support 4kB buffers !
+ */
+ chunk_appendf(&trash_chunk,
+ "<body><h1><a href=\"" PRODUCT_URL "\" style=\"text-decoration: none;\">"
+ PRODUCT_NAME "%s</a></h1>\n"
+ "<h2>Statistics Report for pid %d%s%s%s%s</h2>\n"
+ "<hr width=\"100%%\" class=\"hr\">\n"
+ "<h3>&gt; General process information</h3>\n"
+ "<table border=0><tr><td align=\"left\" nowrap width=\"1%%\">\n"
+ "<p><b>pid = </b> %d (process #%d, nbproc = %d, nbthread = %d)<br>\n"
+ "<b>uptime = </b> %dd %dh%02dm%02ds; warnings = %u<br>\n"
+ "<b>system limits:</b> memmax = %s%s; ulimit-n = %d<br>\n"
+ "<b>maxsock = </b> %d; <b>maxconn = </b> %d; <b>reached = </b> %llu; <b>maxpipes = </b> %d<br>\n"
+ "current conns = %d; current pipes = %d/%d; conn rate = %d/sec; bit rate = %.3f %cbps<br>\n"
+ "Running tasks: %d/%d (%d niced); idle = %d %%<br>\n"
+ "</td><td align=\"center\" nowrap>\n"
+ "<table class=\"lgd\"><tr>\n"
+ "<td class=\"active_up\">&nbsp;</td><td class=\"noborder\">active UP </td>"
+ "<td class=\"backup_up\">&nbsp;</td><td class=\"noborder\">backup UP </td>"
+ "</tr><tr>\n"
+ "<td class=\"active_going_down\"></td><td class=\"noborder\">active UP, going down </td>"
+ "<td class=\"backup_going_down\"></td><td class=\"noborder\">backup UP, going down </td>"
+ "</tr><tr>\n"
+ "<td class=\"active_going_up\"></td><td class=\"noborder\">active DOWN, going up </td>"
+ "<td class=\"backup_going_up\"></td><td class=\"noborder\">backup DOWN, going up </td>"
+ "</tr><tr>\n"
+ "<td class=\"active_down\"></td><td class=\"noborder\">active or backup DOWN &nbsp;</td>"
+ "<td class=\"active_no_check\"></td><td class=\"noborder\">not checked </td>"
+ "</tr><tr>\n"
+ "<td class=\"maintain\"></td><td class=\"noborder\" colspan=\"3\">active or backup DOWN for maintenance (MAINT) &nbsp;</td>"
+ "</tr><tr>\n"
+ "<td class=\"active_draining\"></td><td class=\"noborder\" colspan=\"3\">active or backup SOFT STOPPED for maintenance &nbsp;</td>"
+ "</tr></table>\n"
+ "Note: \"NOLB\"/\"DRAIN\" = UP with load-balancing disabled."
+ "</td>"
+ "<td align=\"left\" valign=\"top\" nowrap width=\"1%%\">"
+ "<b>Display option:</b><ul style=\"margin-top: 0.25em;\">"
+ "",
+ (ctx->flags & STAT_HIDEVER) ? "" : (stats_version_string),
+ pid, (ctx->flags & STAT_SHNODE) ? " on " : "",
+ (ctx->flags & STAT_SHNODE) ? (uri->node ? uri->node : global.node) : "",
+ (ctx->flags & STAT_SHDESC) ? ": " : "",
+ (ctx->flags & STAT_SHDESC) ? (uri->desc ? uri->desc : global.desc) : "",
+ pid, 1, 1, global.nbthread,
+ up / 86400, (up % 86400) / 3600,
+ (up % 3600) / 60, (up % 60),
+ HA_ATOMIC_LOAD(&tot_warnings),
+ global.rlimit_memmax ? ultoa(global.rlimit_memmax) : "unlimited",
+ global.rlimit_memmax ? " MB" : "",
+ global.rlimit_nofile,
+ global.maxsock, global.maxconn, HA_ATOMIC_LOAD(&maxconn_reached), global.maxpipes,
+ actconn, pipes_used, pipes_used+pipes_free, read_freq_ctr(&global.conn_per_sec),
+ bps >= 1000000000UL ? (bps / 1000000000.0) : bps >= 1000000UL ? (bps / 1000000.0) : (bps / 1000.0),
+ bps >= 1000000000UL ? 'G' : bps >= 1000000UL ? 'M' : 'k',
+ total_run_queues(), total_allocated_tasks(), total_niced_running_tasks(), clock_report_idle());
+
+ /* scope_txt = search query, ctx->scope_len is always <= STAT_SCOPE_TXT_MAXLEN */
+ memcpy(scope_txt, scope_ptr, ctx->scope_len);
+ scope_txt[ctx->scope_len] = '\0';
+
+ chunk_appendf(&trash_chunk,
+ "<li><form method=\"GET\">Scope : <input value=\"%s\" name=\"" STAT_SCOPE_INPUT_NAME "\" size=\"8\" maxlength=\"%d\" tabindex=\"1\"/></form>\n",
+ (ctx->scope_len > 0) ? scope_txt : "",
+ STAT_SCOPE_TXT_MAXLEN);
+
+ /* scope_txt = search pattern + search query, ctx->scope_len is always <= STAT_SCOPE_TXT_MAXLEN */
+ scope_txt[0] = 0;
+ if (ctx->scope_len) {
+ strlcpy2(scope_txt, STAT_SCOPE_PATTERN, sizeof(scope_txt));
+ memcpy(scope_txt + strlen(STAT_SCOPE_PATTERN), scope_ptr, ctx->scope_len);
+ scope_txt[strlen(STAT_SCOPE_PATTERN) + ctx->scope_len] = 0;
+ }
+
+ if (ctx->flags & STAT_HIDE_DOWN)
+ chunk_appendf(&trash_chunk,
+ "<li><a href=\"%s%s%s%s\">Show all servers</a><br>\n",
+ uri->uri_prefix,
+ "",
+ (ctx->flags & STAT_NO_REFRESH) ? ";norefresh" : "",
+ scope_txt);
+ else
+ chunk_appendf(&trash_chunk,
+ "<li><a href=\"%s%s%s%s\">Hide 'DOWN' servers</a><br>\n",
+ uri->uri_prefix,
+ ";up",
+ (ctx->flags & STAT_NO_REFRESH) ? ";norefresh" : "",
+ scope_txt);
+
+ if (uri->refresh > 0) {
+ if (ctx->flags & STAT_NO_REFRESH)
+ chunk_appendf(&trash_chunk,
+ "<li><a href=\"%s%s%s%s\">Enable refresh</a><br>\n",
+ uri->uri_prefix,
+ (ctx->flags & STAT_HIDE_DOWN) ? ";up" : "",
+ "",
+ scope_txt);
+ else
+ chunk_appendf(&trash_chunk,
+ "<li><a href=\"%s%s%s%s\">Disable refresh</a><br>\n",
+ uri->uri_prefix,
+ (ctx->flags & STAT_HIDE_DOWN) ? ";up" : "",
+ ";norefresh",
+ scope_txt);
+ }
+
+ chunk_appendf(&trash_chunk,
+ "<li><a href=\"%s%s%s%s\">Refresh now</a><br>\n",
+ uri->uri_prefix,
+ (ctx->flags & STAT_HIDE_DOWN) ? ";up" : "",
+ (ctx->flags & STAT_NO_REFRESH) ? ";norefresh" : "",
+ scope_txt);
+
+ chunk_appendf(&trash_chunk,
+ "<li><a href=\"%s;csv%s%s\">CSV export</a><br>\n",
+ uri->uri_prefix,
+ (uri->refresh > 0) ? ";norefresh" : "",
+ scope_txt);
+
+ chunk_appendf(&trash_chunk,
+ "<li><a href=\"%s;json%s%s\">JSON export</a> (<a href=\"%s;json-schema\">schema</a>)<br>\n",
+ uri->uri_prefix,
+ (uri->refresh > 0) ? ";norefresh" : "",
+ scope_txt, uri->uri_prefix);
+
+ chunk_appendf(&trash_chunk,
+ "</ul></td>"
+ "<td align=\"left\" valign=\"top\" nowrap width=\"1%%\">"
+ "<b>External resources:</b><ul style=\"margin-top: 0.25em;\">\n"
+ "<li><a href=\"" PRODUCT_URL "\">Primary site</a><br>\n"
+ "<li><a href=\"" PRODUCT_URL_UPD "\">Updates (v" PRODUCT_BRANCH ")</a><br>\n"
+ "<li><a href=\"" PRODUCT_URL_DOC "\">Online manual</a><br>\n"
+ "</ul>"
+ "</td>"
+ "</tr></table>\n"
+ ""
+ );
+
+ if (ctx->st_code) {
+ switch (ctx->st_code) {
+ case STAT_STATUS_DONE:
+ chunk_appendf(&trash_chunk,
+ "<p><div class=active_up>"
+ "<a class=lfsb href=\"%s%s%s%s\" title=\"Remove this message\">[X]</a> "
+ "Action processed successfully."
+ "</div>\n", uri->uri_prefix,
+ (ctx->flags & STAT_HIDE_DOWN) ? ";up" : "",
+ (ctx->flags & STAT_NO_REFRESH) ? ";norefresh" : "",
+ scope_txt);
+ break;
+ case STAT_STATUS_NONE:
+ chunk_appendf(&trash_chunk,
+ "<p><div class=active_going_down>"
+ "<a class=lfsb href=\"%s%s%s%s\" title=\"Remove this message\">[X]</a> "
+ "Nothing has changed."
+ "</div>\n", uri->uri_prefix,
+ (ctx->flags & STAT_HIDE_DOWN) ? ";up" : "",
+ (ctx->flags & STAT_NO_REFRESH) ? ";norefresh" : "",
+ scope_txt);
+ break;
+ case STAT_STATUS_PART:
+ chunk_appendf(&trash_chunk,
+ "<p><div class=active_going_down>"
+ "<a class=lfsb href=\"%s%s%s%s\" title=\"Remove this message\">[X]</a> "
+ "Action partially processed.<br>"
+ "Some server names are probably unknown or ambiguous (duplicated names in the backend)."
+ "</div>\n", uri->uri_prefix,
+ (ctx->flags & STAT_HIDE_DOWN) ? ";up" : "",
+ (ctx->flags & STAT_NO_REFRESH) ? ";norefresh" : "",
+ scope_txt);
+ break;
+ case STAT_STATUS_ERRP:
+ chunk_appendf(&trash_chunk,
+ "<p><div class=active_down>"
+ "<a class=lfsb href=\"%s%s%s%s\" title=\"Remove this message\">[X]</a> "
+ "Action not processed because of invalid parameters."
+ "<ul>"
+ "<li>The action is maybe unknown.</li>"
+ "<li>Invalid key parameter (empty or too long).</li>"
+ "<li>The backend name is probably unknown or ambiguous (duplicated names).</li>"
+ "<li>Some server names are probably unknown or ambiguous (duplicated names in the backend).</li>"
+ "</ul>"
+ "</div>\n", uri->uri_prefix,
+ (ctx->flags & STAT_HIDE_DOWN) ? ";up" : "",
+ (ctx->flags & STAT_NO_REFRESH) ? ";norefresh" : "",
+ scope_txt);
+ break;
+ case STAT_STATUS_EXCD:
+ chunk_appendf(&trash_chunk,
+ "<p><div class=active_down>"
+ "<a class=lfsb href=\"%s%s%s%s\" title=\"Remove this message\">[X]</a> "
+ "<b>Action not processed : the buffer couldn't store all the data.<br>"
+ "You should retry with less servers at a time.</b>"
+ "</div>\n", uri->uri_prefix,
+ (ctx->flags & STAT_HIDE_DOWN) ? ";up" : "",
+ (ctx->flags & STAT_NO_REFRESH) ? ";norefresh" : "",
+ scope_txt);
+ break;
+ case STAT_STATUS_DENY:
+ chunk_appendf(&trash_chunk,
+ "<p><div class=active_down>"
+ "<a class=lfsb href=\"%s%s%s%s\" title=\"Remove this message\">[X]</a> "
+ "<b>Action denied.</b>"
+ "</div>\n", uri->uri_prefix,
+ (ctx->flags & STAT_HIDE_DOWN) ? ";up" : "",
+ (ctx->flags & STAT_NO_REFRESH) ? ";norefresh" : "",
+ scope_txt);
+ break;
+ case STAT_STATUS_IVAL:
+ chunk_appendf(&trash_chunk,
+ "<p><div class=active_down>"
+ "<a class=lfsb href=\"%s%s%s%s\" title=\"Remove this message\">[X]</a> "
+ "<b>Invalid requests (unsupported method or chunked encoded request).</b>"
+ "</div>\n", uri->uri_prefix,
+ (ctx->flags & STAT_HIDE_DOWN) ? ";up" : "",
+ (ctx->flags & STAT_NO_REFRESH) ? ";norefresh" : "",
+ scope_txt);
+ break;
+ default:
+ chunk_appendf(&trash_chunk,
+ "<p><div class=active_no_check>"
+ "<a class=lfsb href=\"%s%s%s%s\" title=\"Remove this message\">[X]</a> "
+ "Unexpected result."
+ "</div>\n", uri->uri_prefix,
+ (ctx->flags & STAT_HIDE_DOWN) ? ";up" : "",
+ (ctx->flags & STAT_NO_REFRESH) ? ";norefresh" : "",
+ scope_txt);
+ }
+ chunk_appendf(&trash_chunk, "<p>\n");
+ }
+}
+
+/* Dumps the HTML stats trailer block to the local trash buffer. The caller is
+ * responsible for clearing the local trash buffer if needed.
+ */
+static void stats_dump_html_end()
+{
+ chunk_appendf(&trash_chunk, "</body></html>\n");
+}
+
+/* Dumps the stats JSON header to the local trash buffer buffer which. The
+ * caller is responsible for clearing it if needed.
+ */
+static void stats_dump_json_header()
+{
+ chunk_strcat(&trash_chunk, "[");
+}
+
+
+/* Dumps the JSON stats trailer block to the local trash buffer. The caller is
+ * responsible for clearing the local trash buffer if needed.
+ */
+static void stats_dump_json_end()
+{
+ chunk_strcat(&trash_chunk, "]\n");
+}
+
+/* Uses <appctx.ctx.stats.obj1> as a pointer to the current proxy and <obj2> as
+ * a pointer to the current server/listener.
+ */
+static int stats_dump_proxies(struct stconn *sc,
+ struct htx *htx)
+{
+ struct appctx *appctx = __sc_appctx(sc);
+ struct show_stat_ctx *ctx = appctx->svcctx;
+ struct channel *rep = sc_ic(sc);
+ struct proxy *px;
+
+ /* dump proxies */
+ while (ctx->obj1) {
+ if (htx) {
+ if (htx_almost_full(htx)) {
+ sc_need_room(sc, htx->size / 2);
+ goto full;
+ }
+ }
+ else {
+ if (buffer_almost_full(&rep->buf)) {
+ sc_need_room(sc, b_size(&rep->buf) / 2);
+ goto full;
+ }
+ }
+
+ px = ctx->obj1;
+ /* Skip the global frontend proxies and non-networked ones.
+ * Also skip proxies that were disabled in the configuration
+ * This change allows retrieving stats from "old" proxies after a reload.
+ */
+ if (!(px->flags & PR_FL_DISABLED) && px->uuid > 0 &&
+ (px->cap & (PR_CAP_FE | PR_CAP_BE)) && !(px->cap & PR_CAP_INT)) {
+ if (stats_dump_proxy_to_buffer(sc, htx, px) == 0)
+ return 0;
+ }
+
+ ctx->obj1 = px->next;
+ ctx->px_st = STAT_PX_ST_INIT;
+ ctx->field = 0;
+ }
+
+ return 1;
+
+ full:
+ return 0;
+}
+
+/* This function dumps statistics onto the stream connector's read buffer in
+ * either CSV or HTML format. It returns 0 if it had to stop writing data and
+ * an I/O is needed, 1 if the dump is finished and the stream must be closed,
+ * or -1 in case of any error. This function is used by both the CLI and the
+ * HTTP handlers.
+ */
+static int stats_dump_stat_to_buffer(struct stconn *sc, struct htx *htx)
+{
+ struct appctx *appctx = __sc_appctx(sc);
+ struct show_stat_ctx *ctx = appctx->svcctx;
+ enum stats_domain domain = ctx->domain;
+
+ chunk_reset(&trash_chunk);
+
+ switch (ctx->state) {
+ case STAT_STATE_INIT:
+ ctx->state = STAT_STATE_HEAD; /* let's start producing data */
+ __fallthrough;
+
+ case STAT_STATE_HEAD:
+ if (ctx->flags & STAT_FMT_HTML)
+ stats_dump_html_head(appctx);
+ else if (ctx->flags & STAT_JSON_SCHM)
+ stats_dump_json_schema(&trash_chunk);
+ else if (ctx->flags & STAT_FMT_JSON)
+ stats_dump_json_header();
+ else if (!(ctx->flags & STAT_FMT_TYPED))
+ stats_dump_csv_header(ctx->domain);
+
+ if (!stats_putchk(appctx, htx))
+ goto full;
+
+ if (ctx->flags & STAT_JSON_SCHM) {
+ ctx->state = STAT_STATE_FIN;
+ return 1;
+ }
+ ctx->state = STAT_STATE_INFO;
+ __fallthrough;
+
+ case STAT_STATE_INFO:
+ if (ctx->flags & STAT_FMT_HTML) {
+ stats_dump_html_info(sc);
+ if (!stats_putchk(appctx, htx))
+ goto full;
+ }
+
+ if (domain == STATS_DOMAIN_PROXY)
+ ctx->obj1 = proxies_list;
+
+ ctx->px_st = STAT_PX_ST_INIT;
+ ctx->field = 0;
+ ctx->state = STAT_STATE_LIST;
+ __fallthrough;
+
+ case STAT_STATE_LIST:
+ switch (domain) {
+ case STATS_DOMAIN_RESOLVERS:
+ if (!stats_dump_resolvers(sc, stat_l[domain],
+ stat_count[domain],
+ &stats_module_list[domain])) {
+ return 0;
+ }
+ break;
+
+ case STATS_DOMAIN_PROXY:
+ default:
+ /* dump proxies */
+ if (!stats_dump_proxies(sc, htx))
+ return 0;
+ break;
+ }
+
+ ctx->state = STAT_STATE_END;
+ __fallthrough;
+
+ case STAT_STATE_END:
+ if (ctx->flags & (STAT_FMT_HTML|STAT_FMT_JSON)) {
+ if (ctx->flags & STAT_FMT_HTML)
+ stats_dump_html_end();
+ else
+ stats_dump_json_end();
+ if (!stats_putchk(appctx, htx))
+ goto full;
+ }
+
+ ctx->state = STAT_STATE_FIN;
+ __fallthrough;
+
+ case STAT_STATE_FIN:
+ return 1;
+
+ default:
+ /* unknown state ! */
+ ctx->state = STAT_STATE_FIN;
+ return -1;
+ }
+
+ full:
+ return 0;
+
+}
+
+/* We reached the stats page through a POST request. The appctx is
+ * expected to have already been allocated by the caller.
+ * Parse the posted data and enable/disable servers if necessary.
+ * Returns 1 if request was parsed or zero if it needs more data.
+ */
+static int stats_process_http_post(struct stconn *sc)
+{
+ struct stream *s = __sc_strm(sc);
+ struct appctx *appctx = __sc_appctx(sc);
+ struct show_stat_ctx *ctx = appctx->svcctx;
+
+ struct proxy *px = NULL;
+ struct server *sv = NULL;
+
+ char key[LINESIZE];
+ int action = ST_ADM_ACTION_NONE;
+ int reprocess = 0;
+
+ int total_servers = 0;
+ int altered_servers = 0;
+
+ char *first_param, *cur_param, *next_param, *end_params;
+ char *st_cur_param = NULL;
+ char *st_next_param = NULL;
+
+ struct buffer *temp = get_trash_chunk();
+
+ struct htx *htx = htxbuf(&s->req.buf);
+ struct htx_blk *blk;
+
+ /* we need more data */
+ if (s->txn->req.msg_state < HTTP_MSG_DONE) {
+ /* check if we can receive more */
+ if (htx_free_data_space(htx) <= global.tune.maxrewrite) {
+ ctx->st_code = STAT_STATUS_EXCD;
+ goto out;
+ }
+ goto wait;
+ }
+
+ /* The request was fully received. Copy data */
+ blk = htx_get_head_blk(htx);
+ while (blk) {
+ enum htx_blk_type type = htx_get_blk_type(blk);
+
+ if (type == HTX_BLK_TLR || type == HTX_BLK_EOT)
+ break;
+ if (type == HTX_BLK_DATA) {
+ struct ist v = htx_get_blk_value(htx, blk);
+
+ if (!chunk_memcat(temp, v.ptr, v.len)) {
+ ctx->st_code = STAT_STATUS_EXCD;
+ goto out;
+ }
+ }
+ blk = htx_get_next_blk(htx, blk);
+ }
+
+ first_param = temp->area;
+ end_params = temp->area + temp->data;
+ cur_param = next_param = end_params;
+ *end_params = '\0';
+
+ ctx->st_code = STAT_STATUS_NONE;
+
+ /*
+ * Parse the parameters in reverse order to only store the last value.
+ * From the html form, the backend and the action are at the end.
+ */
+ while (cur_param > first_param) {
+ char *value;
+ int poffset, plen;
+
+ cur_param--;
+
+ if ((*cur_param == '&') || (cur_param == first_param)) {
+ reprocess_servers:
+ /* Parse the key */
+ poffset = (cur_param != first_param ? 1 : 0);
+ plen = next_param - cur_param + (cur_param == first_param ? 1 : 0);
+ if ((plen > 0) && (plen <= sizeof(key))) {
+ strncpy(key, cur_param + poffset, plen);
+ key[plen - 1] = '\0';
+ } else {
+ ctx->st_code = STAT_STATUS_ERRP;
+ goto out;
+ }
+
+ /* Parse the value */
+ value = key;
+ while (*value != '\0' && *value != '=') {
+ value++;
+ }
+ if (*value == '=') {
+ /* Ok, a value is found, we can mark the end of the key */
+ *value++ = '\0';
+ }
+ if (url_decode(key, 1) < 0 || url_decode(value, 1) < 0)
+ break;
+
+ /* Now we can check the key to see what to do */
+ if (!px && (strcmp(key, "b") == 0)) {
+ if ((px = proxy_be_by_name(value)) == NULL) {
+ /* the backend name is unknown or ambiguous (duplicate names) */
+ ctx->st_code = STAT_STATUS_ERRP;
+ goto out;
+ }
+ }
+ else if (!action && (strcmp(key, "action") == 0)) {
+ if (strcmp(value, "ready") == 0) {
+ action = ST_ADM_ACTION_READY;
+ }
+ else if (strcmp(value, "drain") == 0) {
+ action = ST_ADM_ACTION_DRAIN;
+ }
+ else if (strcmp(value, "maint") == 0) {
+ action = ST_ADM_ACTION_MAINT;
+ }
+ else if (strcmp(value, "shutdown") == 0) {
+ action = ST_ADM_ACTION_SHUTDOWN;
+ }
+ else if (strcmp(value, "dhlth") == 0) {
+ action = ST_ADM_ACTION_DHLTH;
+ }
+ else if (strcmp(value, "ehlth") == 0) {
+ action = ST_ADM_ACTION_EHLTH;
+ }
+ else if (strcmp(value, "hrunn") == 0) {
+ action = ST_ADM_ACTION_HRUNN;
+ }
+ else if (strcmp(value, "hnolb") == 0) {
+ action = ST_ADM_ACTION_HNOLB;
+ }
+ else if (strcmp(value, "hdown") == 0) {
+ action = ST_ADM_ACTION_HDOWN;
+ }
+ else if (strcmp(value, "dagent") == 0) {
+ action = ST_ADM_ACTION_DAGENT;
+ }
+ else if (strcmp(value, "eagent") == 0) {
+ action = ST_ADM_ACTION_EAGENT;
+ }
+ else if (strcmp(value, "arunn") == 0) {
+ action = ST_ADM_ACTION_ARUNN;
+ }
+ else if (strcmp(value, "adown") == 0) {
+ action = ST_ADM_ACTION_ADOWN;
+ }
+ /* else these are the old supported methods */
+ else if (strcmp(value, "disable") == 0) {
+ action = ST_ADM_ACTION_DISABLE;
+ }
+ else if (strcmp(value, "enable") == 0) {
+ action = ST_ADM_ACTION_ENABLE;
+ }
+ else if (strcmp(value, "stop") == 0) {
+ action = ST_ADM_ACTION_STOP;
+ }
+ else if (strcmp(value, "start") == 0) {
+ action = ST_ADM_ACTION_START;
+ }
+ else {
+ ctx->st_code = STAT_STATUS_ERRP;
+ goto out;
+ }
+ }
+ else if (strcmp(key, "s") == 0) {
+ if (!(px && action)) {
+ /*
+ * Indicates that we'll need to reprocess the parameters
+ * as soon as backend and action are known
+ */
+ if (!reprocess) {
+ st_cur_param = cur_param;
+ st_next_param = next_param;
+ }
+ reprocess = 1;
+ }
+ else if ((sv = findserver(px, value)) != NULL) {
+ HA_SPIN_LOCK(SERVER_LOCK, &sv->lock);
+ switch (action) {
+ case ST_ADM_ACTION_DISABLE:
+ if (!(sv->cur_admin & SRV_ADMF_FMAINT)) {
+ altered_servers++;
+ total_servers++;
+ srv_set_admin_flag(sv, SRV_ADMF_FMAINT, SRV_ADM_STCHGC_STATS_DISABLE);
+ }
+ break;
+ case ST_ADM_ACTION_ENABLE:
+ if (sv->cur_admin & SRV_ADMF_FMAINT) {
+ altered_servers++;
+ total_servers++;
+ srv_clr_admin_flag(sv, SRV_ADMF_FMAINT);
+ }
+ break;
+ case ST_ADM_ACTION_STOP:
+ if (!(sv->cur_admin & SRV_ADMF_FDRAIN)) {
+ srv_set_admin_flag(sv, SRV_ADMF_FDRAIN, SRV_ADM_STCHGC_STATS_STOP);
+ altered_servers++;
+ total_servers++;
+ }
+ break;
+ case ST_ADM_ACTION_START:
+ if (sv->cur_admin & SRV_ADMF_FDRAIN) {
+ srv_clr_admin_flag(sv, SRV_ADMF_FDRAIN);
+ altered_servers++;
+ total_servers++;
+ }
+ break;
+ case ST_ADM_ACTION_DHLTH:
+ if (sv->check.state & CHK_ST_CONFIGURED) {
+ sv->check.state &= ~CHK_ST_ENABLED;
+ altered_servers++;
+ total_servers++;
+ }
+ break;
+ case ST_ADM_ACTION_EHLTH:
+ if (sv->check.state & CHK_ST_CONFIGURED) {
+ sv->check.state |= CHK_ST_ENABLED;
+ altered_servers++;
+ total_servers++;
+ }
+ break;
+ case ST_ADM_ACTION_HRUNN:
+ if (!(sv->track)) {
+ sv->check.health = sv->check.rise + sv->check.fall - 1;
+ srv_set_running(sv, SRV_OP_STCHGC_STATS_WEB);
+ altered_servers++;
+ total_servers++;
+ }
+ break;
+ case ST_ADM_ACTION_HNOLB:
+ if (!(sv->track)) {
+ sv->check.health = sv->check.rise + sv->check.fall - 1;
+ srv_set_stopping(sv, SRV_OP_STCHGC_STATS_WEB);
+ altered_servers++;
+ total_servers++;
+ }
+ break;
+ case ST_ADM_ACTION_HDOWN:
+ if (!(sv->track)) {
+ sv->check.health = 0;
+ srv_set_stopped(sv, SRV_OP_STCHGC_STATS_WEB);
+ altered_servers++;
+ total_servers++;
+ }
+ break;
+ case ST_ADM_ACTION_DAGENT:
+ if (sv->agent.state & CHK_ST_CONFIGURED) {
+ sv->agent.state &= ~CHK_ST_ENABLED;
+ altered_servers++;
+ total_servers++;
+ }
+ break;
+ case ST_ADM_ACTION_EAGENT:
+ if (sv->agent.state & CHK_ST_CONFIGURED) {
+ sv->agent.state |= CHK_ST_ENABLED;
+ altered_servers++;
+ total_servers++;
+ }
+ break;
+ case ST_ADM_ACTION_ARUNN:
+ if (sv->agent.state & CHK_ST_ENABLED) {
+ sv->agent.health = sv->agent.rise + sv->agent.fall - 1;
+ srv_set_running(sv, SRV_OP_STCHGC_STATS_WEB);
+ altered_servers++;
+ total_servers++;
+ }
+ break;
+ case ST_ADM_ACTION_ADOWN:
+ if (sv->agent.state & CHK_ST_ENABLED) {
+ sv->agent.health = 0;
+ srv_set_stopped(sv, SRV_OP_STCHGC_STATS_WEB);
+ altered_servers++;
+ total_servers++;
+ }
+ break;
+ case ST_ADM_ACTION_READY:
+ srv_adm_set_ready(sv);
+ altered_servers++;
+ total_servers++;
+ break;
+ case ST_ADM_ACTION_DRAIN:
+ srv_adm_set_drain(sv);
+ altered_servers++;
+ total_servers++;
+ break;
+ case ST_ADM_ACTION_MAINT:
+ srv_adm_set_maint(sv);
+ altered_servers++;
+ total_servers++;
+ break;
+ case ST_ADM_ACTION_SHUTDOWN:
+ if (!(px->flags & (PR_FL_DISABLED|PR_FL_STOPPED))) {
+ srv_shutdown_streams(sv, SF_ERR_KILLED);
+ altered_servers++;
+ total_servers++;
+ }
+ break;
+ }
+ HA_SPIN_UNLOCK(SERVER_LOCK, &sv->lock);
+ } else {
+ /* the server name is unknown or ambiguous (duplicate names) */
+ total_servers++;
+ }
+ }
+ if (reprocess && px && action) {
+ /* Now, we know the backend and the action chosen by the user.
+ * We can safely restart from the first server parameter
+ * to reprocess them
+ */
+ cur_param = st_cur_param;
+ next_param = st_next_param;
+ reprocess = 0;
+ goto reprocess_servers;
+ }
+
+ next_param = cur_param;
+ }
+ }
+
+ if (total_servers == 0) {
+ ctx->st_code = STAT_STATUS_NONE;
+ }
+ else if (altered_servers == 0) {
+ ctx->st_code = STAT_STATUS_ERRP;
+ }
+ else if (altered_servers == total_servers) {
+ ctx->st_code = STAT_STATUS_DONE;
+ }
+ else {
+ ctx->st_code = STAT_STATUS_PART;
+ }
+ out:
+ return 1;
+ wait:
+ ctx->st_code = STAT_STATUS_NONE;
+ return 0;
+}
+
+
+static int stats_send_http_headers(struct stconn *sc, struct htx *htx)
+{
+ struct stream *s = __sc_strm(sc);
+ struct uri_auth *uri;
+ struct appctx *appctx = __sc_appctx(sc);
+ struct show_stat_ctx *ctx = appctx->svcctx;
+ struct htx_sl *sl;
+ unsigned int flags;
+
+ BUG_ON(!ctx->http_px);
+ uri = ctx->http_px->uri_auth;
+
+ flags = (HTX_SL_F_IS_RESP|HTX_SL_F_VER_11|HTX_SL_F_XFER_ENC|HTX_SL_F_XFER_LEN|HTX_SL_F_CHNK);
+ sl = htx_add_stline(htx, HTX_BLK_RES_SL, flags, ist("HTTP/1.1"), ist("200"), ist("OK"));
+ if (!sl)
+ goto full;
+ sl->info.res.status = 200;
+
+ if (!htx_add_header(htx, ist("Cache-Control"), ist("no-cache")))
+ goto full;
+ if (ctx->flags & STAT_FMT_HTML) {
+ if (!htx_add_header(htx, ist("Content-Type"), ist("text/html")))
+ goto full;
+ }
+ else if (ctx->flags & (STAT_FMT_JSON|STAT_JSON_SCHM)) {
+ if (!htx_add_header(htx, ist("Content-Type"), ist("application/json")))
+ goto full;
+ }
+ else {
+ if (!htx_add_header(htx, ist("Content-Type"), ist("text/plain")))
+ goto full;
+ }
+
+ if (uri->refresh > 0 && !(ctx->flags & STAT_NO_REFRESH)) {
+ const char *refresh = U2A(uri->refresh);
+ if (!htx_add_header(htx, ist("Refresh"), ist(refresh)))
+ goto full;
+ }
+
+ if (ctx->flags & STAT_CHUNKED) {
+ if (!htx_add_header(htx, ist("Transfer-Encoding"), ist("chunked")))
+ goto full;
+ }
+
+ if (!htx_add_endof(htx, HTX_BLK_EOH))
+ goto full;
+
+ channel_add_input(&s->res, htx->data);
+ return 1;
+
+ full:
+ htx_reset(htx);
+ sc_need_room(sc, 0);
+ return 0;
+}
+
+
+static int stats_send_http_redirect(struct stconn *sc, struct htx *htx)
+{
+ char scope_txt[STAT_SCOPE_TXT_MAXLEN + sizeof STAT_SCOPE_PATTERN];
+ struct stream *s = __sc_strm(sc);
+ struct uri_auth *uri;
+ struct appctx *appctx = __sc_appctx(sc);
+ struct show_stat_ctx *ctx = appctx->svcctx;
+ struct htx_sl *sl;
+ unsigned int flags;
+
+ BUG_ON(!ctx->http_px);
+ uri = ctx->http_px->uri_auth;
+
+ /* scope_txt = search pattern + search query, ctx->scope_len is always <= STAT_SCOPE_TXT_MAXLEN */
+ scope_txt[0] = 0;
+ if (ctx->scope_len) {
+ const char *scope_ptr = stats_scope_ptr(appctx, sc);
+
+ strlcpy2(scope_txt, STAT_SCOPE_PATTERN, sizeof(scope_txt));
+ memcpy(scope_txt + strlen(STAT_SCOPE_PATTERN), scope_ptr, ctx->scope_len);
+ scope_txt[strlen(STAT_SCOPE_PATTERN) + ctx->scope_len] = 0;
+ }
+
+ /* We don't want to land on the posted stats page because a refresh will
+ * repost the data. We don't want this to happen on accident so we redirect
+ * the browse to the stats page with a GET.
+ */
+ chunk_printf(&trash, "%s;st=%s%s%s%s",
+ uri->uri_prefix,
+ ((ctx->st_code > STAT_STATUS_INIT) &&
+ (ctx->st_code < STAT_STATUS_SIZE) &&
+ stat_status_codes[ctx->st_code]) ?
+ stat_status_codes[ctx->st_code] :
+ stat_status_codes[STAT_STATUS_UNKN],
+ (ctx->flags & STAT_HIDE_DOWN) ? ";up" : "",
+ (ctx->flags & STAT_NO_REFRESH) ? ";norefresh" : "",
+ scope_txt);
+
+ flags = (HTX_SL_F_IS_RESP|HTX_SL_F_VER_11|HTX_SL_F_XFER_LEN|HTX_SL_F_CLEN|HTX_SL_F_CHNK);
+ sl = htx_add_stline(htx, HTX_BLK_RES_SL, flags, ist("HTTP/1.1"), ist("303"), ist("See Other"));
+ if (!sl)
+ goto full;
+ sl->info.res.status = 303;
+
+ if (!htx_add_header(htx, ist("Cache-Control"), ist("no-cache")) ||
+ !htx_add_header(htx, ist("Content-Type"), ist("text/plain")) ||
+ !htx_add_header(htx, ist("Content-Length"), ist("0")) ||
+ !htx_add_header(htx, ist("Location"), ist2(trash.area, trash.data)))
+ goto full;
+
+ if (!htx_add_endof(htx, HTX_BLK_EOH))
+ goto full;
+
+ channel_add_input(&s->res, htx->data);
+ return 1;
+
+full:
+ htx_reset(htx);
+ sc_need_room(sc, 0);
+ return 0;
+}
+
+/* This I/O handler runs as an applet embedded in a stream connector. It is
+ * used to send HTTP stats over a TCP socket. The mechanism is very simple.
+ * appctx->st0 contains the operation in progress (dump, done). The handler
+ * automatically unregisters itself once transfer is complete.
+ */
+static void http_stats_io_handler(struct appctx *appctx)
+{
+ struct show_stat_ctx *ctx = appctx->svcctx;
+ struct stconn *sc = appctx_sc(appctx);
+ struct stream *s = __sc_strm(sc);
+ struct channel *req = sc_oc(sc);
+ struct channel *res = sc_ic(sc);
+ struct htx *req_htx, *res_htx;
+
+ /* only proxy stats are available via http */
+ ctx->domain = STATS_DOMAIN_PROXY;
+
+ res_htx = htx_from_buf(&res->buf);
+
+ if (unlikely(se_fl_test(appctx->sedesc, (SE_FL_EOS|SE_FL_ERROR|SE_FL_SHR|SE_FL_SHW)))) {
+ appctx->st0 = STAT_HTTP_END;
+ goto out;
+ }
+
+ /* Check if the input buffer is available. */
+ if (!b_size(&res->buf)) {
+ sc_need_room(sc, 0);
+ goto out;
+ }
+
+ /* all states are processed in sequence */
+ if (appctx->st0 == STAT_HTTP_HEAD) {
+ if (stats_send_http_headers(sc, res_htx)) {
+ if (s->txn->meth == HTTP_METH_HEAD)
+ appctx->st0 = STAT_HTTP_DONE;
+ else
+ appctx->st0 = STAT_HTTP_DUMP;
+ }
+ }
+
+ if (appctx->st0 == STAT_HTTP_DUMP) {
+ trash_chunk = b_make(trash.area, res->buf.size, 0, 0);
+ /* adjust buffer size to take htx overhead into account,
+ * make sure to perform this call on an empty buffer
+ */
+ trash_chunk.size = buf_room_for_htx_data(&trash_chunk);
+ if (stats_dump_stat_to_buffer(sc, res_htx))
+ appctx->st0 = STAT_HTTP_DONE;
+ }
+
+ if (appctx->st0 == STAT_HTTP_POST) {
+ if (stats_process_http_post(sc))
+ appctx->st0 = STAT_HTTP_LAST;
+ else if (s->scf->flags & (SC_FL_EOS|SC_FL_ABRT_DONE))
+ appctx->st0 = STAT_HTTP_DONE;
+ }
+
+ if (appctx->st0 == STAT_HTTP_LAST) {
+ if (stats_send_http_redirect(sc, res_htx))
+ appctx->st0 = STAT_HTTP_DONE;
+ }
+
+ if (appctx->st0 == STAT_HTTP_DONE) {
+ /* no more data are expected. If the response buffer is empty,
+ * be sure to add something (EOT block in this case) to have
+ * something to send. It is important to be sure the EOM flags
+ * will be handled by the endpoint.
+ */
+ if (htx_is_empty(res_htx)) {
+ if (!htx_add_endof(res_htx, HTX_BLK_EOT)) {
+ sc_need_room(sc, sizeof(struct htx_blk) + 1);
+ goto out;
+ }
+ channel_add_input(res, 1);
+ }
+ res_htx->flags |= HTX_FL_EOM;
+ se_fl_set(appctx->sedesc, SE_FL_EOI);
+ appctx->st0 = STAT_HTTP_END;
+ }
+
+ if (appctx->st0 == STAT_HTTP_END) {
+ se_fl_set(appctx->sedesc, SE_FL_EOS);
+ applet_will_consume(appctx);
+ }
+
+ out:
+ /* we have left the request in the buffer for the case where we
+ * process a POST, and this automatically re-enables activity on
+ * read. It's better to indicate that we want to stop reading when
+ * we're sending, so that we know there's at most one direction
+ * deciding to wake the applet up. It saves it from looping when
+ * emitting large blocks into small TCP windows.
+ */
+ htx_to_buf(res_htx, &res->buf);
+ if (appctx->st0 == STAT_HTTP_END) {
+ /* eat the whole request */
+ if (co_data(req)) {
+ req_htx = htx_from_buf(&req->buf);
+ co_htx_skip(req, req_htx, co_data(req));
+ htx_to_buf(req_htx, &req->buf);
+ }
+ }
+ else if (co_data(res))
+ applet_wont_consume(appctx);
+}
+
+/* Dump all fields from <info> into <out> using the "show info" format (name: value) */
+static int stats_dump_info_fields(struct buffer *out,
+ const struct field *info,
+ struct show_stat_ctx *ctx)
+{
+ int flags = ctx->flags;
+ int field;
+
+ for (field = 0; field < INF_TOTAL_FIELDS; field++) {
+ if (!field_format(info, field))
+ continue;
+
+ if (!chunk_appendf(out, "%s: ", info_fields[field].name))
+ return 0;
+ if (!stats_emit_raw_data_field(out, &info[field]))
+ return 0;
+ if ((flags & STAT_SHOW_FDESC) && !chunk_appendf(out, ":\"%s\"", info_fields[field].desc))
+ return 0;
+ if (!chunk_strcat(out, "\n"))
+ return 0;
+ }
+ return 1;
+}
+
+/* Dump all fields from <info> into <out> using the "show info typed" format */
+static int stats_dump_typed_info_fields(struct buffer *out,
+ const struct field *info,
+ struct show_stat_ctx *ctx)
+{
+ int flags = ctx->flags;
+ int field;
+
+ for (field = 0; field < INF_TOTAL_FIELDS; field++) {
+ if (!field_format(info, field))
+ continue;
+
+ if (!chunk_appendf(out, "%d.%s.%u:", field, info_fields[field].name, info[INF_PROCESS_NUM].u.u32))
+ return 0;
+ if (!stats_emit_field_tags(out, &info[field], ':'))
+ return 0;
+ if (!stats_emit_typed_data_field(out, &info[field]))
+ return 0;
+ if ((flags & STAT_SHOW_FDESC) && !chunk_appendf(out, ":\"%s\"", info_fields[field].desc))
+ return 0;
+ if (!chunk_strcat(out, "\n"))
+ return 0;
+ }
+ return 1;
+}
+
+/* Fill <info> with HAProxy global info. <info> is preallocated array of length
+ * <len>. The length of the array must be INF_TOTAL_FIELDS. If this length is
+ * less then this value, the function returns 0, otherwise, it returns 1. Some
+ * fields' presence or precision may depend on some of the STAT_* flags present
+ * in <flags>.
+ */
+int stats_fill_info(struct field *info, int len, uint flags)
+{
+ struct buffer *out = get_trash_chunk();
+ uint64_t glob_out_bytes, glob_spl_bytes, glob_out_b32;
+ uint up_sec, up_usec;
+ ullong up;
+ ulong boot;
+ int thr;
+
+#ifdef USE_OPENSSL
+ double ssl_sess_rate = read_freq_ctr_flt(&global.ssl_per_sec);
+ double ssl_key_rate = read_freq_ctr_flt(&global.ssl_fe_keys_per_sec);
+ double ssl_reuse = 0;
+
+ if (ssl_key_rate < ssl_sess_rate)
+ ssl_reuse = 100.0 * (1.0 - ssl_key_rate / ssl_sess_rate);
+#endif
+
+ /* sum certain per-thread totals (mostly byte counts) */
+ glob_out_bytes = glob_spl_bytes = glob_out_b32 = 0;
+ for (thr = 0; thr < global.nbthread; thr++) {
+ glob_out_bytes += HA_ATOMIC_LOAD(&ha_thread_ctx[thr].out_bytes);
+ glob_spl_bytes += HA_ATOMIC_LOAD(&ha_thread_ctx[thr].spliced_out_bytes);
+ glob_out_b32 += read_freq_ctr(&ha_thread_ctx[thr].out_32bps);
+ }
+ glob_out_b32 *= 32; // values are 32-byte units
+
+ up = now_ns - start_time_ns;
+ up_sec = ns_to_sec(up);
+ up_usec = (up / 1000U) % 1000000U;
+
+ boot = tv_ms_remain(&start_date, &ready_date);
+
+ if (len < INF_TOTAL_FIELDS)
+ return 0;
+
+ chunk_reset(out);
+ memset(info, 0, sizeof(*info) * len);
+
+ info[INF_NAME] = mkf_str(FO_PRODUCT|FN_OUTPUT|FS_SERVICE, PRODUCT_NAME);
+ info[INF_VERSION] = mkf_str(FO_PRODUCT|FN_OUTPUT|FS_SERVICE, haproxy_version);
+ info[INF_BUILD_INFO] = mkf_str(FO_PRODUCT|FN_OUTPUT|FS_SERVICE, haproxy_version);
+ info[INF_RELEASE_DATE] = mkf_str(FO_PRODUCT|FN_OUTPUT|FS_SERVICE, haproxy_date);
+
+ info[INF_NBTHREAD] = mkf_u32(FO_CONFIG|FS_SERVICE, global.nbthread);
+ info[INF_NBPROC] = mkf_u32(FO_CONFIG|FS_SERVICE, 1);
+ info[INF_PROCESS_NUM] = mkf_u32(FO_KEY, 1);
+ info[INF_PID] = mkf_u32(FO_STATUS, pid);
+
+ info[INF_UPTIME] = mkf_str(FN_DURATION, chunk_newstr(out));
+ chunk_appendf(out, "%ud %uh%02um%02us", up_sec / 86400, (up_sec % 86400) / 3600, (up_sec % 3600) / 60, (up_sec % 60));
+
+ info[INF_UPTIME_SEC] = (flags & STAT_USE_FLOAT) ? mkf_flt(FN_DURATION, up_sec + up_usec / 1000000.0) : mkf_u32(FN_DURATION, up_sec);
+ info[INF_START_TIME_SEC] = (flags & STAT_USE_FLOAT) ? mkf_flt(FN_DURATION, start_date.tv_sec + start_date.tv_usec / 1000000.0) : mkf_u32(FN_DURATION, start_date.tv_sec);
+ info[INF_MEMMAX_MB] = mkf_u32(FO_CONFIG|FN_LIMIT, global.rlimit_memmax);
+ info[INF_MEMMAX_BYTES] = mkf_u32(FO_CONFIG|FN_LIMIT, global.rlimit_memmax * 1048576L);
+ info[INF_POOL_ALLOC_MB] = mkf_u32(0, (unsigned)(pool_total_allocated() / 1048576L));
+ info[INF_POOL_ALLOC_BYTES] = mkf_u64(0, pool_total_allocated());
+ info[INF_POOL_USED_MB] = mkf_u32(0, (unsigned)(pool_total_used() / 1048576L));
+ info[INF_POOL_USED_BYTES] = mkf_u64(0, pool_total_used());
+ info[INF_POOL_FAILED] = mkf_u32(FN_COUNTER, pool_total_failures());
+ info[INF_ULIMIT_N] = mkf_u32(FO_CONFIG|FN_LIMIT, global.rlimit_nofile);
+ info[INF_MAXSOCK] = mkf_u32(FO_CONFIG|FN_LIMIT, global.maxsock);
+ info[INF_MAXCONN] = mkf_u32(FO_CONFIG|FN_LIMIT, global.maxconn);
+ info[INF_HARD_MAXCONN] = mkf_u32(FO_CONFIG|FN_LIMIT, global.hardmaxconn);
+ info[INF_CURR_CONN] = mkf_u32(0, actconn);
+ info[INF_CUM_CONN] = mkf_u32(FN_COUNTER, totalconn);
+ info[INF_CUM_REQ] = mkf_u32(FN_COUNTER, global.req_count);
+#ifdef USE_OPENSSL
+ info[INF_MAX_SSL_CONNS] = mkf_u32(FN_MAX, global.maxsslconn);
+ info[INF_CURR_SSL_CONNS] = mkf_u32(0, global.sslconns);
+ info[INF_CUM_SSL_CONNS] = mkf_u32(FN_COUNTER, global.totalsslconns);
+#endif
+ info[INF_MAXPIPES] = mkf_u32(FO_CONFIG|FN_LIMIT, global.maxpipes);
+ info[INF_PIPES_USED] = mkf_u32(0, pipes_used);
+ info[INF_PIPES_FREE] = mkf_u32(0, pipes_free);
+ info[INF_CONN_RATE] = (flags & STAT_USE_FLOAT) ? mkf_flt(FN_RATE, read_freq_ctr_flt(&global.conn_per_sec)) : mkf_u32(FN_RATE, read_freq_ctr(&global.conn_per_sec));
+ info[INF_CONN_RATE_LIMIT] = mkf_u32(FO_CONFIG|FN_LIMIT, global.cps_lim);
+ info[INF_MAX_CONN_RATE] = mkf_u32(FN_MAX, global.cps_max);
+ info[INF_SESS_RATE] = (flags & STAT_USE_FLOAT) ? mkf_flt(FN_RATE, read_freq_ctr_flt(&global.sess_per_sec)) : mkf_u32(FN_RATE, read_freq_ctr(&global.sess_per_sec));
+ info[INF_SESS_RATE_LIMIT] = mkf_u32(FO_CONFIG|FN_LIMIT, global.sps_lim);
+ info[INF_MAX_SESS_RATE] = mkf_u32(FN_RATE, global.sps_max);
+
+#ifdef USE_OPENSSL
+ info[INF_SSL_RATE] = (flags & STAT_USE_FLOAT) ? mkf_flt(FN_RATE, ssl_sess_rate) : mkf_u32(FN_RATE, ssl_sess_rate);
+ info[INF_SSL_RATE_LIMIT] = mkf_u32(FO_CONFIG|FN_LIMIT, global.ssl_lim);
+ info[INF_MAX_SSL_RATE] = mkf_u32(FN_MAX, global.ssl_max);
+ info[INF_SSL_FRONTEND_KEY_RATE] = (flags & STAT_USE_FLOAT) ? mkf_flt(FN_RATE, ssl_key_rate) : mkf_u32(0, ssl_key_rate);
+ info[INF_SSL_FRONTEND_MAX_KEY_RATE] = mkf_u32(FN_MAX, global.ssl_fe_keys_max);
+ info[INF_SSL_FRONTEND_SESSION_REUSE_PCT] = (flags & STAT_USE_FLOAT) ? mkf_flt(FN_RATE, ssl_reuse) : mkf_u32(0, ssl_reuse);
+ info[INF_SSL_BACKEND_KEY_RATE] = (flags & STAT_USE_FLOAT) ? mkf_flt(FN_RATE, read_freq_ctr_flt(&global.ssl_be_keys_per_sec)) : mkf_u32(FN_RATE, read_freq_ctr(&global.ssl_be_keys_per_sec));
+ info[INF_SSL_BACKEND_MAX_KEY_RATE] = mkf_u32(FN_MAX, global.ssl_be_keys_max);
+ info[INF_SSL_CACHE_LOOKUPS] = mkf_u32(FN_COUNTER, global.shctx_lookups);
+ info[INF_SSL_CACHE_MISSES] = mkf_u32(FN_COUNTER, global.shctx_misses);
+#endif
+ info[INF_COMPRESS_BPS_IN] = (flags & STAT_USE_FLOAT) ? mkf_flt(FN_RATE, read_freq_ctr_flt(&global.comp_bps_in)) : mkf_u32(FN_RATE, read_freq_ctr(&global.comp_bps_in));
+ info[INF_COMPRESS_BPS_OUT] = (flags & STAT_USE_FLOAT) ? mkf_flt(FN_RATE, read_freq_ctr_flt(&global.comp_bps_out)) : mkf_u32(FN_RATE, read_freq_ctr(&global.comp_bps_out));
+ info[INF_COMPRESS_BPS_RATE_LIM] = mkf_u32(FO_CONFIG|FN_LIMIT, global.comp_rate_lim);
+#ifdef USE_ZLIB
+ info[INF_ZLIB_MEM_USAGE] = mkf_u32(0, zlib_used_memory);
+ info[INF_MAX_ZLIB_MEM_USAGE] = mkf_u32(FO_CONFIG|FN_LIMIT, global.maxzlibmem);
+#endif
+ info[INF_TASKS] = mkf_u32(0, total_allocated_tasks());
+ info[INF_RUN_QUEUE] = mkf_u32(0, total_run_queues());
+ info[INF_IDLE_PCT] = mkf_u32(FN_AVG, clock_report_idle());
+ info[INF_NODE] = mkf_str(FO_CONFIG|FN_OUTPUT|FS_SERVICE, global.node);
+ if (global.desc)
+ info[INF_DESCRIPTION] = mkf_str(FO_CONFIG|FN_OUTPUT|FS_SERVICE, global.desc);
+ info[INF_STOPPING] = mkf_u32(0, stopping);
+ info[INF_JOBS] = mkf_u32(0, jobs);
+ info[INF_UNSTOPPABLE_JOBS] = mkf_u32(0, unstoppable_jobs);
+ info[INF_LISTENERS] = mkf_u32(0, listeners);
+ info[INF_ACTIVE_PEERS] = mkf_u32(0, active_peers);
+ info[INF_CONNECTED_PEERS] = mkf_u32(0, connected_peers);
+ info[INF_DROPPED_LOGS] = mkf_u32(0, dropped_logs);
+ info[INF_BUSY_POLLING] = mkf_u32(0, !!(global.tune.options & GTUNE_BUSY_POLLING));
+ info[INF_FAILED_RESOLUTIONS] = mkf_u32(0, resolv_failed_resolutions);
+ info[INF_TOTAL_BYTES_OUT] = mkf_u64(0, glob_out_bytes);
+ info[INF_TOTAL_SPLICED_BYTES_OUT] = mkf_u64(0, glob_spl_bytes);
+ info[INF_BYTES_OUT_RATE] = mkf_u64(FN_RATE, glob_out_b32);
+ info[INF_DEBUG_COMMANDS_ISSUED] = mkf_u32(0, debug_commands_issued);
+ info[INF_CUM_LOG_MSGS] = mkf_u32(FN_COUNTER, cum_log_messages);
+
+ info[INF_TAINTED] = mkf_str(FO_STATUS, chunk_newstr(out));
+ chunk_appendf(out, "%#x", get_tainted());
+ info[INF_WARNINGS] = mkf_u32(FN_COUNTER, HA_ATOMIC_LOAD(&tot_warnings));
+ info[INF_MAXCONN_REACHED] = mkf_u32(FN_COUNTER, HA_ATOMIC_LOAD(&maxconn_reached));
+ info[INF_BOOTTIME_MS] = mkf_u32(FN_DURATION, boot);
+ info[INF_NICED_TASKS] = mkf_u32(0, total_niced_running_tasks());
+
+ return 1;
+}
+
+/* This function dumps information onto the stream connector's read buffer.
+ * It returns 0 as long as it does not complete, non-zero upon completion.
+ * No state is used.
+ */
+static int stats_dump_info_to_buffer(struct stconn *sc)
+{
+ struct appctx *appctx = __sc_appctx(sc);
+ struct show_stat_ctx *ctx = appctx->svcctx;
+ int ret;
+ int current_field;
+
+ if (!stats_fill_info(info, INF_TOTAL_FIELDS, ctx->flags))
+ return 0;
+
+ chunk_reset(&trash_chunk);
+more:
+ current_field = ctx->field;
+
+ if (ctx->flags & STAT_FMT_TYPED)
+ ret = stats_dump_typed_info_fields(&trash_chunk, info, ctx);
+ else if (ctx->flags & STAT_FMT_JSON)
+ ret = stats_dump_json_info_fields(&trash_chunk, info, ctx);
+ else
+ ret = stats_dump_info_fields(&trash_chunk, info, ctx);
+
+ if (applet_putchk(appctx, &trash_chunk) == -1) {
+ /* restore previous field */
+ ctx->field = current_field;
+ return 0;
+ }
+ if (ret && ctx->field) {
+ /* partial dump */
+ goto more;
+ }
+ ctx->field = 0;
+ return 1;
+}
+
+/* This function dumps the schema onto the stream connector's read buffer.
+ * It returns 0 as long as it does not complete, non-zero upon completion.
+ * No state is used.
+ *
+ * Integer values bounded to the range [-(2**53)+1, (2**53)-1] as
+ * per the recommendation for interoperable integers in section 6 of RFC 7159.
+ */
+static void stats_dump_json_schema(struct buffer *out)
+{
+
+ int old_len = out->data;
+
+ chunk_strcat(out,
+ "{"
+ "\"$schema\":\"http://json-schema.org/draft-04/schema#\","
+ "\"oneOf\":["
+ "{"
+ "\"title\":\"Info\","
+ "\"type\":\"array\","
+ "\"items\":{"
+ "\"title\":\"InfoItem\","
+ "\"type\":\"object\","
+ "\"properties\":{"
+ "\"field\":{\"$ref\":\"#/definitions/field\"},"
+ "\"processNum\":{\"$ref\":\"#/definitions/processNum\"},"
+ "\"tags\":{\"$ref\":\"#/definitions/tags\"},"
+ "\"value\":{\"$ref\":\"#/definitions/typedValue\"}"
+ "},"
+ "\"required\":[\"field\",\"processNum\",\"tags\","
+ "\"value\"]"
+ "}"
+ "},"
+ "{"
+ "\"title\":\"Stat\","
+ "\"type\":\"array\","
+ "\"items\":{"
+ "\"title\":\"InfoItem\","
+ "\"type\":\"object\","
+ "\"properties\":{"
+ "\"objType\":{"
+ "\"enum\":[\"Frontend\",\"Backend\",\"Listener\","
+ "\"Server\",\"Unknown\"]"
+ "},"
+ "\"proxyId\":{"
+ "\"type\":\"integer\","
+ "\"minimum\":0"
+ "},"
+ "\"id\":{"
+ "\"type\":\"integer\","
+ "\"minimum\":0"
+ "},"
+ "\"field\":{\"$ref\":\"#/definitions/field\"},"
+ "\"processNum\":{\"$ref\":\"#/definitions/processNum\"},"
+ "\"tags\":{\"$ref\":\"#/definitions/tags\"},"
+ "\"typedValue\":{\"$ref\":\"#/definitions/typedValue\"}"
+ "},"
+ "\"required\":[\"objType\",\"proxyId\",\"id\","
+ "\"field\",\"processNum\",\"tags\","
+ "\"value\"]"
+ "}"
+ "},"
+ "{"
+ "\"title\":\"Error\","
+ "\"type\":\"object\","
+ "\"properties\":{"
+ "\"errorStr\":{"
+ "\"type\":\"string\""
+ "}"
+ "},"
+ "\"required\":[\"errorStr\"]"
+ "}"
+ "],"
+ "\"definitions\":{"
+ "\"field\":{"
+ "\"type\":\"object\","
+ "\"pos\":{"
+ "\"type\":\"integer\","
+ "\"minimum\":0"
+ "},"
+ "\"name\":{"
+ "\"type\":\"string\""
+ "},"
+ "\"required\":[\"pos\",\"name\"]"
+ "},"
+ "\"processNum\":{"
+ "\"type\":\"integer\","
+ "\"minimum\":1"
+ "},"
+ "\"tags\":{"
+ "\"type\":\"object\","
+ "\"origin\":{"
+ "\"type\":\"string\","
+ "\"enum\":[\"Metric\",\"Status\",\"Key\","
+ "\"Config\",\"Product\",\"Unknown\"]"
+ "},"
+ "\"nature\":{"
+ "\"type\":\"string\","
+ "\"enum\":[\"Gauge\",\"Limit\",\"Min\",\"Max\","
+ "\"Rate\",\"Counter\",\"Duration\","
+ "\"Age\",\"Time\",\"Name\",\"Output\","
+ "\"Avg\", \"Unknown\"]"
+ "},"
+ "\"scope\":{"
+ "\"type\":\"string\","
+ "\"enum\":[\"Cluster\",\"Process\",\"Service\","
+ "\"System\",\"Unknown\"]"
+ "},"
+ "\"required\":[\"origin\",\"nature\",\"scope\"]"
+ "},"
+ "\"typedValue\":{"
+ "\"type\":\"object\","
+ "\"oneOf\":["
+ "{\"$ref\":\"#/definitions/typedValue/definitions/s32Value\"},"
+ "{\"$ref\":\"#/definitions/typedValue/definitions/s64Value\"},"
+ "{\"$ref\":\"#/definitions/typedValue/definitions/u32Value\"},"
+ "{\"$ref\":\"#/definitions/typedValue/definitions/u64Value\"},"
+ "{\"$ref\":\"#/definitions/typedValue/definitions/strValue\"}"
+ "],"
+ "\"definitions\":{"
+ "\"s32Value\":{"
+ "\"properties\":{"
+ "\"type\":{"
+ "\"type\":\"string\","
+ "\"enum\":[\"s32\"]"
+ "},"
+ "\"value\":{"
+ "\"type\":\"integer\","
+ "\"minimum\":-2147483648,"
+ "\"maximum\":2147483647"
+ "}"
+ "},"
+ "\"required\":[\"type\",\"value\"]"
+ "},"
+ "\"s64Value\":{"
+ "\"properties\":{"
+ "\"type\":{"
+ "\"type\":\"string\","
+ "\"enum\":[\"s64\"]"
+ "},"
+ "\"value\":{"
+ "\"type\":\"integer\","
+ "\"minimum\":-9007199254740991,"
+ "\"maximum\":9007199254740991"
+ "}"
+ "},"
+ "\"required\":[\"type\",\"value\"]"
+ "},"
+ "\"u32Value\":{"
+ "\"properties\":{"
+ "\"type\":{"
+ "\"type\":\"string\","
+ "\"enum\":[\"u32\"]"
+ "},"
+ "\"value\":{"
+ "\"type\":\"integer\","
+ "\"minimum\":0,"
+ "\"maximum\":4294967295"
+ "}"
+ "},"
+ "\"required\":[\"type\",\"value\"]"
+ "},"
+ "\"u64Value\":{"
+ "\"properties\":{"
+ "\"type\":{"
+ "\"type\":\"string\","
+ "\"enum\":[\"u64\"]"
+ "},"
+ "\"value\":{"
+ "\"type\":\"integer\","
+ "\"minimum\":0,"
+ "\"maximum\":9007199254740991"
+ "}"
+ "},"
+ "\"required\":[\"type\",\"value\"]"
+ "},"
+ "\"strValue\":{"
+ "\"properties\":{"
+ "\"type\":{"
+ "\"type\":\"string\","
+ "\"enum\":[\"str\"]"
+ "},"
+ "\"value\":{\"type\":\"string\"}"
+ "},"
+ "\"required\":[\"type\",\"value\"]"
+ "},"
+ "\"unknownValue\":{"
+ "\"properties\":{"
+ "\"type\":{"
+ "\"type\":\"integer\","
+ "\"minimum\":0"
+ "},"
+ "\"value\":{"
+ "\"type\":\"string\","
+ "\"enum\":[\"unknown\"]"
+ "}"
+ "},"
+ "\"required\":[\"type\",\"value\"]"
+ "}"
+ "}"
+ "}"
+ "}"
+ "}");
+
+ if (old_len == out->data) {
+ chunk_reset(out);
+ chunk_appendf(out,
+ "{\"errorStr\":\"output buffer too short\"}");
+ }
+ chunk_appendf(out, "\n");
+}
+
+/* This function dumps the schema onto the stream connector's read buffer.
+ * It returns 0 as long as it does not complete, non-zero upon completion.
+ * No state is used.
+ */
+static int stats_dump_json_schema_to_buffer(struct appctx *appctx)
+{
+
+ chunk_reset(&trash_chunk);
+
+ stats_dump_json_schema(&trash_chunk);
+
+ if (applet_putchk(appctx, &trash_chunk) == -1)
+ return 0;
+
+ return 1;
+}
+
+static int cli_parse_clear_counters(char **args, char *payload, struct appctx *appctx, void *private)
+{
+ struct proxy *px;
+ struct server *sv;
+ struct listener *li;
+ struct stats_module *mod;
+ int clrall = 0;
+
+ if (strcmp(args[2], "all") == 0)
+ clrall = 1;
+
+ /* check permissions */
+ if (!cli_has_level(appctx, ACCESS_LVL_OPER) ||
+ (clrall && !cli_has_level(appctx, ACCESS_LVL_ADMIN)))
+ return 1;
+
+ for (px = proxies_list; px; px = px->next) {
+ if (clrall) {
+ memset(&px->be_counters, 0, sizeof(px->be_counters));
+ memset(&px->fe_counters, 0, sizeof(px->fe_counters));
+ }
+ else {
+ px->be_counters.conn_max = 0;
+ px->be_counters.p.http.rps_max = 0;
+ px->be_counters.sps_max = 0;
+ px->be_counters.cps_max = 0;
+ px->be_counters.nbpend_max = 0;
+ px->be_counters.qtime_max = 0;
+ px->be_counters.ctime_max = 0;
+ px->be_counters.dtime_max = 0;
+ px->be_counters.ttime_max = 0;
+
+ px->fe_counters.conn_max = 0;
+ px->fe_counters.p.http.rps_max = 0;
+ px->fe_counters.sps_max = 0;
+ px->fe_counters.cps_max = 0;
+ }
+
+ for (sv = px->srv; sv; sv = sv->next)
+ if (clrall)
+ memset(&sv->counters, 0, sizeof(sv->counters));
+ else {
+ sv->counters.cur_sess_max = 0;
+ sv->counters.nbpend_max = 0;
+ sv->counters.sps_max = 0;
+ sv->counters.qtime_max = 0;
+ sv->counters.ctime_max = 0;
+ sv->counters.dtime_max = 0;
+ sv->counters.ttime_max = 0;
+ }
+
+ list_for_each_entry(li, &px->conf.listeners, by_fe)
+ if (li->counters) {
+ if (clrall)
+ memset(li->counters, 0, sizeof(*li->counters));
+ else
+ li->counters->conn_max = 0;
+ }
+ }
+
+ global.cps_max = 0;
+ global.sps_max = 0;
+ global.ssl_max = 0;
+ global.ssl_fe_keys_max = 0;
+ global.ssl_be_keys_max = 0;
+
+ list_for_each_entry(mod, &stats_module_list[STATS_DOMAIN_PROXY], list) {
+ if (!mod->clearable && !clrall)
+ continue;
+
+ for (px = proxies_list; px; px = px->next) {
+ enum stats_domain_px_cap mod_cap = stats_px_get_cap(mod->domain_flags);
+
+ if (px->cap & PR_CAP_FE && mod_cap & STATS_PX_CAP_FE) {
+ EXTRA_COUNTERS_INIT(px->extra_counters_fe,
+ mod,
+ mod->counters,
+ mod->counters_size);
+ }
+
+ if (px->cap & PR_CAP_BE && mod_cap & STATS_PX_CAP_BE) {
+ EXTRA_COUNTERS_INIT(px->extra_counters_be,
+ mod,
+ mod->counters,
+ mod->counters_size);
+ }
+
+ if (mod_cap & STATS_PX_CAP_SRV) {
+ for (sv = px->srv; sv; sv = sv->next) {
+ EXTRA_COUNTERS_INIT(sv->extra_counters,
+ mod,
+ mod->counters,
+ mod->counters_size);
+ }
+ }
+
+ if (mod_cap & STATS_PX_CAP_LI) {
+ list_for_each_entry(li, &px->conf.listeners, by_fe) {
+ EXTRA_COUNTERS_INIT(li->extra_counters,
+ mod,
+ mod->counters,
+ mod->counters_size);
+ }
+ }
+ }
+ }
+
+ resolv_stats_clear_counters(clrall, &stats_module_list[STATS_DOMAIN_RESOLVERS]);
+
+ memset(activity, 0, sizeof(activity));
+ return 1;
+}
+
+
+static int cli_parse_show_info(char **args, char *payload, struct appctx *appctx, void *private)
+{
+ struct show_stat_ctx *ctx = applet_reserve_svcctx(appctx, sizeof(*ctx));
+ int arg = 2;
+
+ ctx->scope_str = 0;
+ ctx->scope_len = 0;
+ ctx->flags = 0;
+ ctx->field = 0; /* explicit default value */
+
+ while (*args[arg]) {
+ if (strcmp(args[arg], "typed") == 0)
+ ctx->flags = (ctx->flags & ~STAT_FMT_MASK) | STAT_FMT_TYPED;
+ else if (strcmp(args[arg], "json") == 0)
+ ctx->flags = (ctx->flags & ~STAT_FMT_MASK) | STAT_FMT_JSON;
+ else if (strcmp(args[arg], "desc") == 0)
+ ctx->flags |= STAT_SHOW_FDESC;
+ else if (strcmp(args[arg], "float") == 0)
+ ctx->flags |= STAT_USE_FLOAT;
+ arg++;
+ }
+ return 0;
+}
+
+
+static int cli_parse_show_stat(char **args, char *payload, struct appctx *appctx, void *private)
+{
+ struct show_stat_ctx *ctx = applet_reserve_svcctx(appctx, sizeof(*ctx));
+ int arg = 2;
+
+ ctx->scope_str = 0;
+ ctx->scope_len = 0;
+ ctx->http_px = NULL; // not under http context
+ ctx->flags = STAT_SHNODE | STAT_SHDESC;
+
+ if ((strm_li(appctx_strm(appctx))->bind_conf->level & ACCESS_LVL_MASK) >= ACCESS_LVL_OPER)
+ ctx->flags |= STAT_SHLGNDS;
+
+ /* proxy is the default domain */
+ ctx->domain = STATS_DOMAIN_PROXY;
+ if (strcmp(args[arg], "domain") == 0) {
+ ++args;
+
+ if (strcmp(args[arg], "proxy") == 0) {
+ ++args;
+ } else if (strcmp(args[arg], "resolvers") == 0) {
+ ctx->domain = STATS_DOMAIN_RESOLVERS;
+ ++args;
+ } else {
+ return cli_err(appctx, "Invalid statistics domain.\n");
+ }
+ }
+
+ if (ctx->domain == STATS_DOMAIN_PROXY
+ && *args[arg] && *args[arg+1] && *args[arg+2]) {
+ struct proxy *px;
+
+ px = proxy_find_by_name(args[arg], 0, 0);
+ if (px)
+ ctx->iid = px->uuid;
+ else
+ ctx->iid = atoi(args[arg]);
+
+ if (!ctx->iid)
+ return cli_err(appctx, "No such proxy.\n");
+
+ ctx->flags |= STAT_BOUND;
+ ctx->type = atoi(args[arg+1]);
+ ctx->sid = atoi(args[arg+2]);
+ arg += 3;
+ }
+
+ while (*args[arg]) {
+ if (strcmp(args[arg], "typed") == 0)
+ ctx->flags = (ctx->flags & ~STAT_FMT_MASK) | STAT_FMT_TYPED;
+ else if (strcmp(args[arg], "json") == 0)
+ ctx->flags = (ctx->flags & ~STAT_FMT_MASK) | STAT_FMT_JSON;
+ else if (strcmp(args[arg], "desc") == 0)
+ ctx->flags |= STAT_SHOW_FDESC;
+ else if (strcmp(args[arg], "no-maint") == 0)
+ ctx->flags |= STAT_HIDE_MAINT;
+ else if (strcmp(args[arg], "up") == 0)
+ ctx->flags |= STAT_HIDE_DOWN;
+ arg++;
+ }
+
+ return 0;
+}
+
+static int cli_io_handler_dump_info(struct appctx *appctx)
+{
+ trash_chunk = b_make(trash.area, trash.size, 0, 0);
+ return stats_dump_info_to_buffer(appctx_sc(appctx));
+}
+
+/* This I/O handler runs as an applet embedded in a stream connector. It is
+ * used to send raw stats over a socket.
+ */
+static int cli_io_handler_dump_stat(struct appctx *appctx)
+{
+ trash_chunk = b_make(trash.area, trash.size, 0, 0);
+ return stats_dump_stat_to_buffer(appctx_sc(appctx), NULL);
+}
+
+static int cli_io_handler_dump_json_schema(struct appctx *appctx)
+{
+ trash_chunk = b_make(trash.area, trash.size, 0, 0);
+ return stats_dump_json_schema_to_buffer(appctx);
+}
+
+int stats_allocate_proxy_counters_internal(struct extra_counters **counters,
+ int type, int px_cap)
+{
+ struct stats_module *mod;
+
+ EXTRA_COUNTERS_REGISTER(counters, type, alloc_failed);
+
+ list_for_each_entry(mod, &stats_module_list[STATS_DOMAIN_PROXY], list) {
+ if (!(stats_px_get_cap(mod->domain_flags) & px_cap))
+ continue;
+
+ EXTRA_COUNTERS_ADD(mod, *counters, mod->counters, mod->counters_size);
+ }
+
+ EXTRA_COUNTERS_ALLOC(*counters, alloc_failed);
+
+ list_for_each_entry(mod, &stats_module_list[STATS_DOMAIN_PROXY], list) {
+ if (!(stats_px_get_cap(mod->domain_flags) & px_cap))
+ continue;
+
+ EXTRA_COUNTERS_INIT(*counters, mod, mod->counters, mod->counters_size);
+ }
+
+ return 1;
+
+ alloc_failed:
+ return 0;
+}
+
+/* Initialize and allocate all extra counters for a proxy and its attached
+ * servers/listeners with all already registered stats module
+ */
+int stats_allocate_proxy_counters(struct proxy *px)
+{
+ struct server *sv;
+ struct listener *li;
+
+ if (px->cap & PR_CAP_FE) {
+ if (!stats_allocate_proxy_counters_internal(&px->extra_counters_fe,
+ COUNTERS_FE,
+ STATS_PX_CAP_FE)) {
+ return 0;
+ }
+ }
+
+ if (px->cap & PR_CAP_BE) {
+ if (!stats_allocate_proxy_counters_internal(&px->extra_counters_be,
+ COUNTERS_BE,
+ STATS_PX_CAP_BE)) {
+ return 0;
+ }
+ }
+
+ for (sv = px->srv; sv; sv = sv->next) {
+ if (!stats_allocate_proxy_counters_internal(&sv->extra_counters,
+ COUNTERS_SV,
+ STATS_PX_CAP_SRV)) {
+ return 0;
+ }
+ }
+
+ list_for_each_entry(li, &px->conf.listeners, by_fe) {
+ if (!stats_allocate_proxy_counters_internal(&li->extra_counters,
+ COUNTERS_LI,
+ STATS_PX_CAP_LI)) {
+ return 0;
+ }
+ }
+
+ return 1;
+}
+
+void stats_register_module(struct stats_module *m)
+{
+ const uint8_t domain = stats_get_domain(m->domain_flags);
+
+ LIST_APPEND(&stats_module_list[domain], &m->list);
+ stat_count[domain] += m->stats_count;
+}
+
+static int allocate_stats_px_postcheck(void)
+{
+ struct stats_module *mod;
+ size_t i = ST_F_TOTAL_FIELDS;
+ int err_code = 0;
+ struct proxy *px;
+
+ stat_count[STATS_DOMAIN_PROXY] += ST_F_TOTAL_FIELDS;
+
+ stat_f[STATS_DOMAIN_PROXY] = malloc(stat_count[STATS_DOMAIN_PROXY] * sizeof(struct name_desc));
+ if (!stat_f[STATS_DOMAIN_PROXY]) {
+ ha_alert("stats: cannot allocate all fields for proxy statistics\n");
+ err_code |= ERR_ALERT | ERR_FATAL;
+ return err_code;
+ }
+
+ memcpy(stat_f[STATS_DOMAIN_PROXY], stat_fields,
+ ST_F_TOTAL_FIELDS * sizeof(struct name_desc));
+
+ list_for_each_entry(mod, &stats_module_list[STATS_DOMAIN_PROXY], list) {
+ memcpy(stat_f[STATS_DOMAIN_PROXY] + i,
+ mod->stats,
+ mod->stats_count * sizeof(struct name_desc));
+ i += mod->stats_count;
+ }
+
+ for (px = proxies_list; px; px = px->next) {
+ if (!stats_allocate_proxy_counters(px)) {
+ ha_alert("stats: cannot allocate all counters for proxy statistics\n");
+ err_code |= ERR_ALERT | ERR_FATAL;
+ return err_code;
+ }
+ }
+
+ /* wait per-thread alloc to perform corresponding stat_l allocation */
+
+ return err_code;
+}
+
+REGISTER_CONFIG_POSTPARSER("allocate-stats-px", allocate_stats_px_postcheck);
+
+static int allocate_stats_rslv_postcheck(void)
+{
+ struct stats_module *mod;
+ size_t i = 0;
+ int err_code = 0;
+
+ stat_f[STATS_DOMAIN_RESOLVERS] = malloc(stat_count[STATS_DOMAIN_RESOLVERS] * sizeof(struct name_desc));
+ if (!stat_f[STATS_DOMAIN_RESOLVERS]) {
+ ha_alert("stats: cannot allocate all fields for resolver statistics\n");
+ err_code |= ERR_ALERT | ERR_FATAL;
+ return err_code;
+ }
+
+ list_for_each_entry(mod, &stats_module_list[STATS_DOMAIN_RESOLVERS], list) {
+ memcpy(stat_f[STATS_DOMAIN_RESOLVERS] + i,
+ mod->stats,
+ mod->stats_count * sizeof(struct name_desc));
+ i += mod->stats_count;
+ }
+
+ if (!resolv_allocate_counters(&stats_module_list[STATS_DOMAIN_RESOLVERS])) {
+ ha_alert("stats: cannot allocate all counters for resolver statistics\n");
+ err_code |= ERR_ALERT | ERR_FATAL;
+ return err_code;
+ }
+
+ /* wait per-thread alloc to perform corresponding stat_l allocation */
+
+ return err_code;
+}
+
+REGISTER_CONFIG_POSTPARSER("allocate-stats-resolver", allocate_stats_rslv_postcheck);
+
+static int allocate_stat_lines_per_thread(void)
+{
+ int domains[] = { STATS_DOMAIN_PROXY, STATS_DOMAIN_RESOLVERS }, i;
+
+ for (i = 0; i < STATS_DOMAIN_COUNT; ++i) {
+ const int domain = domains[i];
+
+ stat_l[domain] = malloc(stat_count[domain] * sizeof(struct field));
+ if (!stat_l[domain])
+ return 0;
+ }
+ return 1;
+}
+
+REGISTER_PER_THREAD_ALLOC(allocate_stat_lines_per_thread);
+
+static int allocate_trash_counters(void)
+{
+ struct stats_module *mod;
+ int domains[] = { STATS_DOMAIN_PROXY, STATS_DOMAIN_RESOLVERS }, i;
+ size_t max_counters_size = 0;
+
+ /* calculate the greatest counters used by any stats modules */
+ for (i = 0; i < STATS_DOMAIN_COUNT; ++i) {
+ list_for_each_entry(mod, &stats_module_list[domains[i]], list) {
+ max_counters_size = mod->counters_size > max_counters_size ?
+ mod->counters_size : max_counters_size;
+ }
+ }
+
+ /* allocate the trash with the size of the greatest counters */
+ if (max_counters_size) {
+ trash_counters = malloc(max_counters_size);
+ if (!trash_counters) {
+ ha_alert("stats: cannot allocate trash counters for statistics\n");
+ return 0;
+ }
+ }
+
+ return 1;
+}
+
+REGISTER_PER_THREAD_ALLOC(allocate_trash_counters);
+
+static void deinit_stat_lines_per_thread(void)
+{
+ int domains[] = { STATS_DOMAIN_PROXY, STATS_DOMAIN_RESOLVERS }, i;
+
+ for (i = 0; i < STATS_DOMAIN_COUNT; ++i) {
+ const int domain = domains[i];
+
+ ha_free(&stat_l[domain]);
+ }
+}
+
+
+REGISTER_PER_THREAD_FREE(deinit_stat_lines_per_thread);
+
+static void deinit_stats(void)
+{
+ int domains[] = { STATS_DOMAIN_PROXY, STATS_DOMAIN_RESOLVERS }, i;
+
+ for (i = 0; i < STATS_DOMAIN_COUNT; ++i) {
+ const int domain = domains[i];
+
+ if (stat_f[domain])
+ free(stat_f[domain]);
+ }
+}
+
+REGISTER_POST_DEINIT(deinit_stats);
+
+static void free_trash_counters(void)
+{
+ if (trash_counters)
+ free(trash_counters);
+}
+
+REGISTER_PER_THREAD_FREE(free_trash_counters);
+
+/* register cli keywords */
+static struct cli_kw_list cli_kws = {{ },{
+ { { "clear", "counters", NULL }, "clear counters [all] : clear max statistics counters (or all counters)", cli_parse_clear_counters, NULL, NULL },
+ { { "show", "info", NULL }, "show info [desc|json|typed|float]* : report information about the running process", cli_parse_show_info, cli_io_handler_dump_info, NULL },
+ { { "show", "stat", NULL }, "show stat [desc|json|no-maint|typed|up]*: report counters for each proxy and server", cli_parse_show_stat, cli_io_handler_dump_stat, NULL },
+ { { "show", "schema", "json", NULL }, "show schema json : report schema used for stats", NULL, cli_io_handler_dump_json_schema, NULL },
+ {{},}
+}};
+
+INITCALL1(STG_REGISTER, cli_register_kw, &cli_kws);
+
+struct applet http_stats_applet = {
+ .obj_type = OBJ_TYPE_APPLET,
+ .name = "<STATS>", /* used for logging */
+ .fct = http_stats_io_handler,
+ .release = NULL,
+};
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/src/stconn.c b/src/stconn.c
new file mode 100644
index 0000000..8e3ae7e
--- /dev/null
+++ b/src/stconn.c
@@ -0,0 +1,2050 @@
+/*
+ * stream connector management functions
+ *
+ * Copyright 2021 Christopher Faulet <cfaulet@haproxy.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <haproxy/api.h>
+#include <haproxy/applet.h>
+#include <haproxy/connection.h>
+#include <haproxy/check.h>
+#include <haproxy/http_ana.h>
+#include <haproxy/pipe.h>
+#include <haproxy/pool.h>
+#include <haproxy/sample.h>
+#include <haproxy/sc_strm.h>
+#include <haproxy/stconn.h>
+#include <haproxy/xref.h>
+
+DECLARE_POOL(pool_head_connstream, "stconn", sizeof(struct stconn));
+DECLARE_POOL(pool_head_sedesc, "sedesc", sizeof(struct sedesc));
+
+/* functions used by default on a detached stream connector */
+static void sc_app_abort(struct stconn *sc);
+static void sc_app_shut(struct stconn *sc);
+static void sc_app_chk_rcv(struct stconn *sc);
+static void sc_app_chk_snd(struct stconn *sc);
+
+/* functions used on a mux-based stream connector */
+static void sc_app_abort_conn(struct stconn *sc);
+static void sc_app_shut_conn(struct stconn *sc);
+static void sc_app_chk_rcv_conn(struct stconn *sc);
+static void sc_app_chk_snd_conn(struct stconn *sc);
+
+/* functions used on an applet-based stream connector */
+static void sc_app_abort_applet(struct stconn *sc);
+static void sc_app_shut_applet(struct stconn *sc);
+static void sc_app_chk_rcv_applet(struct stconn *sc);
+static void sc_app_chk_snd_applet(struct stconn *sc);
+
+static int sc_conn_process(struct stconn *sc);
+static int sc_conn_recv(struct stconn *sc);
+static int sc_conn_send(struct stconn *sc);
+static int sc_applet_process(struct stconn *sc);
+
+/* stream connector operations for connections */
+struct sc_app_ops sc_app_conn_ops = {
+ .chk_rcv = sc_app_chk_rcv_conn,
+ .chk_snd = sc_app_chk_snd_conn,
+ .abort = sc_app_abort_conn,
+ .shutdown= sc_app_shut_conn,
+ .wake = sc_conn_process,
+ .name = "STRM",
+};
+
+/* stream connector operations for embedded tasks */
+struct sc_app_ops sc_app_embedded_ops = {
+ .chk_rcv = sc_app_chk_rcv,
+ .chk_snd = sc_app_chk_snd,
+ .abort = sc_app_abort,
+ .shutdown= sc_app_shut,
+ .wake = NULL, /* may never be used */
+ .name = "NONE", /* may never be used */
+};
+
+/* stream connector operations for applets */
+struct sc_app_ops sc_app_applet_ops = {
+ .chk_rcv = sc_app_chk_rcv_applet,
+ .chk_snd = sc_app_chk_snd_applet,
+ .abort = sc_app_abort_applet,
+ .shutdown= sc_app_shut_applet,
+ .wake = sc_applet_process,
+ .name = "STRM",
+};
+
+/* stream connector for health checks on connections */
+struct sc_app_ops sc_app_check_ops = {
+ .chk_rcv = NULL,
+ .chk_snd = NULL,
+ .abort = NULL,
+ .shutdown= NULL,
+ .wake = wake_srv_chk,
+ .name = "CHCK",
+};
+
+/* Initializes an endpoint */
+void sedesc_init(struct sedesc *sedesc)
+{
+ sedesc->se = NULL;
+ sedesc->conn = NULL;
+ sedesc->sc = NULL;
+ sedesc->lra = TICK_ETERNITY;
+ sedesc->fsb = TICK_ETERNITY;
+ sedesc->xref.peer = NULL;
+ se_fl_setall(sedesc, SE_FL_NONE);
+
+ sedesc->iobuf.pipe = NULL;
+ sedesc->iobuf.buf = NULL;
+ sedesc->iobuf.offset = sedesc->iobuf.data = 0;
+ sedesc->iobuf.flags = IOBUF_FL_NONE;
+}
+
+/* Tries to alloc an endpoint and initialize it. Returns NULL on failure. */
+struct sedesc *sedesc_new()
+{
+ struct sedesc *sedesc;
+
+ sedesc = pool_alloc(pool_head_sedesc);
+ if (unlikely(!sedesc))
+ return NULL;
+
+ sedesc_init(sedesc);
+ return sedesc;
+}
+
+/* Releases an endpoint. It is the caller responsibility to be sure it is safe
+ * and it is not shared with another entity
+ */
+void sedesc_free(struct sedesc *sedesc)
+{
+ if (sedesc) {
+ if (sedesc->iobuf.pipe)
+ put_pipe(sedesc->iobuf.pipe);
+ pool_free(pool_head_sedesc, sedesc);
+ }
+}
+
+/* Tries to allocate a new stconn and initialize its main fields. On
+ * failure, nothing is allocated and NULL is returned. It is an internal
+ * function. The caller must, at least, set the SE_FL_ORPHAN or SE_FL_DETACHED
+ * flag.
+ */
+static struct stconn *sc_new(struct sedesc *sedesc)
+{
+ struct stconn *sc;
+
+ sc = pool_alloc(pool_head_connstream);
+
+ if (unlikely(!sc))
+ goto alloc_error;
+
+ sc->obj_type = OBJ_TYPE_SC;
+ sc->flags = SC_FL_NONE;
+ sc->state = SC_ST_INI;
+ sc->ioto = TICK_ETERNITY;
+ sc->room_needed = 0;
+ sc->app = NULL;
+ sc->app_ops = NULL;
+ sc->src = NULL;
+ sc->dst = NULL;
+ sc->wait_event.tasklet = NULL;
+ sc->wait_event.events = 0;
+
+ /* If there is no endpoint, allocate a new one now */
+ if (!sedesc) {
+ sedesc = sedesc_new();
+ if (unlikely(!sedesc))
+ goto alloc_error;
+ }
+ sc->sedesc = sedesc;
+ sedesc->sc = sc;
+
+ return sc;
+
+ alloc_error:
+ pool_free(pool_head_connstream, sc);
+ return NULL;
+}
+
+/* Creates a new stream connector and its associated stream from a mux. <sd> must
+ * be defined. It returns NULL on error. On success, the new stream connector is
+ * returned. In this case, SE_FL_ORPHAN flag is removed.
+ */
+struct stconn *sc_new_from_endp(struct sedesc *sd, struct session *sess, struct buffer *input)
+{
+ struct stconn *sc;
+
+ sc = sc_new(sd);
+ if (unlikely(!sc))
+ return NULL;
+ if (unlikely(!stream_new(sess, sc, input))) {
+ sd->sc = NULL;
+ if (sc->sedesc != sd) {
+ /* none was provided so sc_new() allocated one */
+ sedesc_free(sc->sedesc);
+ }
+ pool_free(pool_head_connstream, sc);
+ se_fl_set(sd, SE_FL_ORPHAN);
+ return NULL;
+ }
+ se_fl_clr(sd, SE_FL_ORPHAN);
+ return sc;
+}
+
+/* Creates a new stream connector from an stream. There is no endpoint here, thus it
+ * will be created by sc_new(). So the SE_FL_DETACHED flag is set. It returns
+ * NULL on error. On success, the new stream connector is returned.
+ */
+struct stconn *sc_new_from_strm(struct stream *strm, unsigned int flags)
+{
+ struct stconn *sc;
+
+ sc = sc_new(NULL);
+ if (unlikely(!sc))
+ return NULL;
+ sc->flags |= flags;
+ sc_ep_set(sc, SE_FL_DETACHED);
+ sc->app = &strm->obj_type;
+ sc->app_ops = &sc_app_embedded_ops;
+ return sc;
+}
+
+/* Creates a new stream connector from an health-check. There is no endpoint here,
+ * thus it will be created by sc_new(). So the SE_FL_DETACHED flag is set. It
+ * returns NULL on error. On success, the new stream connector is returned.
+ */
+struct stconn *sc_new_from_check(struct check *check, unsigned int flags)
+{
+ struct stconn *sc;
+
+ sc = sc_new(NULL);
+ if (unlikely(!sc))
+ return NULL;
+ sc->flags |= flags;
+ sc_ep_set(sc, SE_FL_DETACHED);
+ sc->app = &check->obj_type;
+ sc->app_ops = &sc_app_check_ops;
+ return sc;
+}
+
+/* Releases a stconn previously allocated by sc_new(), as well as its
+ * endpoint, if it exists. This function is called internally or on error path.
+ */
+void sc_free(struct stconn *sc)
+{
+ sockaddr_free(&sc->src);
+ sockaddr_free(&sc->dst);
+ if (sc->sedesc) {
+ BUG_ON(!sc_ep_test(sc, SE_FL_DETACHED));
+ sedesc_free(sc->sedesc);
+ }
+ tasklet_free(sc->wait_event.tasklet);
+ pool_free(pool_head_connstream, sc);
+}
+
+/* Conditionally removes a stream connector if it is detached and if there is no app
+ * layer defined. Except on error path, this one must be used. if release, the
+ * pointer on the SC is set to NULL.
+ */
+static void sc_free_cond(struct stconn **scp)
+{
+ struct stconn *sc = *scp;
+
+ if (!sc->app && (!sc->sedesc || sc_ep_test(sc, SE_FL_DETACHED))) {
+ sc_free(sc);
+ *scp = NULL;
+ }
+}
+
+
+/* Attaches a stconn to a mux endpoint and sets the endpoint ctx. Returns
+ * -1 on error and 0 on success. SE_FL_DETACHED flag is removed. This function is
+ * called from a mux when it is attached to a stream or a health-check.
+ */
+int sc_attach_mux(struct stconn *sc, void *sd, void *ctx)
+{
+ struct connection *conn = ctx;
+ struct sedesc *sedesc = sc->sedesc;
+
+ if (sc_strm(sc)) {
+ if (!sc->wait_event.tasklet) {
+ sc->wait_event.tasklet = tasklet_new();
+ if (!sc->wait_event.tasklet)
+ return -1;
+ sc->wait_event.tasklet->process = sc_conn_io_cb;
+ sc->wait_event.tasklet->context = sc;
+ sc->wait_event.events = 0;
+ }
+
+ sc->app_ops = &sc_app_conn_ops;
+ xref_create(&sc->sedesc->xref, &sc_opposite(sc)->sedesc->xref);
+ }
+ else if (sc_check(sc)) {
+ if (!sc->wait_event.tasklet) {
+ sc->wait_event.tasklet = tasklet_new();
+ if (!sc->wait_event.tasklet)
+ return -1;
+ sc->wait_event.tasklet->process = srv_chk_io_cb;
+ sc->wait_event.tasklet->context = sc;
+ sc->wait_event.events = 0;
+ }
+
+ sc->app_ops = &sc_app_check_ops;
+ }
+
+ sedesc->se = sd;
+ sedesc->conn = ctx;
+ se_fl_set(sedesc, SE_FL_T_MUX);
+ se_fl_clr(sedesc, SE_FL_DETACHED);
+ if (!conn->ctx)
+ conn->ctx = sc;
+ return 0;
+}
+
+/* Attaches a stconn to an applet endpoint and sets the endpoint
+ * ctx. Returns -1 on error and 0 on success. SE_FL_DETACHED flag is
+ * removed. This function is called by a stream when a backend applet is
+ * registered.
+ */
+static void sc_attach_applet(struct stconn *sc, void *sd)
+{
+ sc->sedesc->se = sd;
+ sc_ep_set(sc, SE_FL_T_APPLET);
+ sc_ep_clr(sc, SE_FL_DETACHED);
+ if (sc_strm(sc)) {
+ sc->app_ops = &sc_app_applet_ops;
+ xref_create(&sc->sedesc->xref, &sc_opposite(sc)->sedesc->xref);
+ }
+}
+
+/* Attaches a stconn to a app layer and sets the relevant
+ * callbacks. Returns -1 on error and 0 on success. SE_FL_ORPHAN flag is
+ * removed. This function is called by a stream when it is created to attach it
+ * on the stream connector on the client side.
+ */
+int sc_attach_strm(struct stconn *sc, struct stream *strm)
+{
+ sc->app = &strm->obj_type;
+ sc_ep_clr(sc, SE_FL_ORPHAN);
+ sc_ep_report_read_activity(sc);
+ if (sc_ep_test(sc, SE_FL_T_MUX)) {
+ sc->wait_event.tasklet = tasklet_new();
+ if (!sc->wait_event.tasklet)
+ return -1;
+ sc->wait_event.tasklet->process = sc_conn_io_cb;
+ sc->wait_event.tasklet->context = sc;
+ sc->wait_event.events = 0;
+
+ sc->app_ops = &sc_app_conn_ops;
+ }
+ else if (sc_ep_test(sc, SE_FL_T_APPLET)) {
+ sc->app_ops = &sc_app_applet_ops;
+ }
+ else {
+ sc->app_ops = &sc_app_embedded_ops;
+ }
+ return 0;
+}
+
+/* Detaches the stconn from the endpoint, if any. For a connecrion, if a
+ * mux owns the connection ->detach() callback is called. Otherwise, it means
+ * the stream connector owns the connection. In this case the connection is closed
+ * and released. For an applet, the appctx is released. If still allocated, the
+ * endpoint is reset and flag as detached. If the app layer is also detached,
+ * the stream connector is released.
+ */
+static void sc_detach_endp(struct stconn **scp)
+{
+ struct stconn *sc = *scp;
+ struct xref *peer;
+
+ if (!sc)
+ return;
+
+
+ /* Remove my link in the original objects. */
+ peer = xref_get_peer_and_lock(&sc->sedesc->xref);
+ if (peer)
+ xref_disconnect(&sc->sedesc->xref, peer);
+
+ if (sc_ep_test(sc, SE_FL_T_MUX)) {
+ struct connection *conn = __sc_conn(sc);
+ struct sedesc *sedesc = sc->sedesc;
+
+ if (conn->mux) {
+ if (sc->wait_event.events != 0)
+ conn->mux->unsubscribe(sc, sc->wait_event.events, &sc->wait_event);
+ se_fl_set(sedesc, SE_FL_ORPHAN);
+ sedesc->sc = NULL;
+ sc->sedesc = NULL;
+ conn->mux->detach(sedesc);
+ }
+ else {
+ /* It's too early to have a mux, let's just destroy
+ * the connection
+ */
+ conn_stop_tracking(conn);
+ conn_full_close(conn);
+ if (conn->destroy_cb)
+ conn->destroy_cb(conn);
+ conn_free(conn);
+ }
+ }
+ else if (sc_ep_test(sc, SE_FL_T_APPLET)) {
+ struct appctx *appctx = __sc_appctx(sc);
+
+ sc_ep_set(sc, SE_FL_ORPHAN);
+ sc->sedesc->sc = NULL;
+ sc->sedesc = NULL;
+ appctx_shut(appctx);
+ appctx_free(appctx);
+ }
+
+ if (sc->sedesc) {
+ /* the SD wasn't used and can be recycled */
+ sc->sedesc->se = NULL;
+ sc->sedesc->conn = NULL;
+ sc->sedesc->flags = 0;
+ sc_ep_set(sc, SE_FL_DETACHED);
+ }
+
+ /* FIXME: Rest SC for now but must be reviewed. SC flags are only
+ * connection related for now but this will evolved
+ */
+ sc->flags &= SC_FL_ISBACK;
+ if (sc_strm(sc))
+ sc->app_ops = &sc_app_embedded_ops;
+ else
+ sc->app_ops = NULL;
+ sc_free_cond(scp);
+}
+
+/* Detaches the stconn from the app layer. If there is no endpoint attached
+ * to the stconn
+ */
+static void sc_detach_app(struct stconn **scp)
+{
+ struct stconn *sc = *scp;
+
+ if (!sc)
+ return;
+
+ sc->app = NULL;
+ sc->app_ops = NULL;
+ sockaddr_free(&sc->src);
+ sockaddr_free(&sc->dst);
+
+ tasklet_free(sc->wait_event.tasklet);
+ sc->wait_event.tasklet = NULL;
+ sc->wait_event.events = 0;
+ sc_free_cond(scp);
+}
+
+/* Destroy the stconn. It is detached from its endpoint and its
+ * application. After this call, the stconn must be considered as released.
+ */
+void sc_destroy(struct stconn *sc)
+{
+ sc_detach_endp(&sc);
+ sc_detach_app(&sc);
+ BUG_ON_HOT(sc);
+}
+
+/* Resets the stream connector endpoint. It happens when the app layer want to renew
+ * its endpoint. For a connection retry for instance. If a mux or an applet is
+ * attached, a new endpoint is created. Returns -1 on error and 0 on success.
+ */
+int sc_reset_endp(struct stconn *sc)
+{
+ struct sedesc *new_sd;
+
+ BUG_ON(!sc->app);
+
+ if (!__sc_endp(sc)) {
+ /* endpoint not attached or attached to a mux with no
+ * target. Thus the endpoint will not be release but just
+ * reset. The app is still attached, the sc will not be
+ * released.
+ */
+ sc_detach_endp(&sc);
+ return 0;
+ }
+
+ /* allocate the new endpoint first to be able to set error if it
+ * fails */
+ new_sd = sedesc_new();
+ if (!unlikely(new_sd))
+ return -1;
+
+ /* The app is still attached, the sc will not be released */
+ sc_detach_endp(&sc);
+ BUG_ON(!sc);
+ BUG_ON(sc->sedesc);
+ sc->sedesc = new_sd;
+ sc->sedesc->sc = sc;
+ sc_ep_set(sc, SE_FL_DETACHED);
+ return 0;
+}
+
+
+/* Create an applet to handle a stream connector as a new appctx. The SC will
+ * wake it up every time it is solicited. The appctx must be deleted by the task
+ * handler using sc_detach_endp(), possibly from within the function itself.
+ * It also pre-initializes the applet's context and returns it (or NULL in case
+ * it could not be allocated).
+ */
+struct appctx *sc_applet_create(struct stconn *sc, struct applet *app)
+{
+ struct appctx *appctx;
+
+ appctx = appctx_new_here(app, sc->sedesc);
+ if (!appctx)
+ return NULL;
+ sc_attach_applet(sc, appctx);
+ appctx->t->nice = __sc_strm(sc)->task->nice;
+ applet_need_more_data(appctx);
+ appctx_wakeup(appctx);
+
+ sc->state = SC_ST_RDY;
+ return appctx;
+}
+
+/* Conditionally forward the close to the write side. It return 1 if it can be
+ * forwarded. It is the caller responsibility to forward the close to the write
+ * side. Otherwise, 0 is returned. In this case, SC_FL_SHUT_WANTED flag may be set on
+ * the consumer SC if we are only waiting for the outgoing data to be flushed.
+ */
+static inline int sc_cond_forward_shut(struct stconn *sc)
+{
+ /* The close must not be forwarded */
+ if (!(sc->flags & (SC_FL_EOS|SC_FL_ABRT_DONE)) || !(sc->flags & SC_FL_NOHALF))
+ return 0;
+
+ if (co_data(sc_ic(sc)) && !(sc_ic(sc)->flags & CF_WRITE_TIMEOUT)) {
+ /* the shutdown cannot be forwarded now because
+ * we should flush outgoing data first. But instruct the output
+ * channel it should be done ASAP.
+ */
+ sc_schedule_shutdown(sc);
+ return 0;
+ }
+
+ /* the close can be immediately forwarded to the write side */
+ return 1;
+}
+
+
+static inline int sc_is_fastfwd_supported(struct stconn *sc)
+{
+ return (!(global.tune.no_zero_copy_fwd & NO_ZERO_COPY_FWD) &&
+ sc_ep_test(sc, SE_FL_MAY_FASTFWD_PROD) &&
+ sc_ep_test(sc_opposite(sc), SE_FL_MAY_FASTFWD_CONS) &&
+ sc_ic(sc)->to_forward);
+}
+/*
+ * This function performs a shutdown-read on a detached stream connector in a
+ * connected or init state (it does nothing for other states). It either shuts
+ * the read side or marks itself as closed. The buffer flags are updated to
+ * reflect the new state. If the stream connector has SC_FL_NOHALF, we also
+ * forward the close to the write side. The owner task is woken up if it exists.
+ */
+static void sc_app_abort(struct stconn *sc)
+{
+ struct channel *ic = sc_ic(sc);
+
+ if (sc->flags & (SC_FL_EOS|SC_FL_ABRT_DONE))
+ return;
+
+ sc->flags |= SC_FL_ABRT_DONE;
+ ic->flags |= CF_READ_EVENT;
+
+ if (!sc_state_in(sc->state, SC_SB_CON|SC_SB_RDY|SC_SB_EST))
+ return;
+
+ if (sc->flags & SC_FL_SHUT_DONE) {
+ sc->state = SC_ST_DIS;
+ if (sc->flags & SC_FL_ISBACK)
+ __sc_strm(sc)->conn_exp = TICK_ETERNITY;
+ }
+ else if (sc_cond_forward_shut(sc))
+ return sc_app_shut(sc);
+
+ /* note that if the task exists, it must unregister itself once it runs */
+ if (!(sc->flags & SC_FL_DONT_WAKE))
+ task_wakeup(sc_strm_task(sc), TASK_WOKEN_IO);
+}
+
+/*
+ * This function performs a shutdown-write on a detached stream connector in a
+ * connected or init state (it does nothing for other states). It either shuts
+ * the write side or marks itself as closed. The buffer flags are updated to
+ * reflect the new state. It does also close everything if the SC was marked as
+ * being in error state. The owner task is woken up if it exists.
+ */
+static void sc_app_shut(struct stconn *sc)
+{
+ struct channel *ic = sc_ic(sc);
+ struct channel *oc = sc_oc(sc);
+
+ sc->flags &= ~SC_FL_SHUT_WANTED;
+ if (sc->flags & SC_FL_SHUT_DONE)
+ return;
+ sc->flags |= SC_FL_SHUT_DONE;
+ oc->flags |= CF_WRITE_EVENT;
+ sc_set_hcto(sc);
+
+ switch (sc->state) {
+ case SC_ST_RDY:
+ case SC_ST_EST:
+ /* we have to shut before closing, otherwise some short messages
+ * may never leave the system, especially when there are remaining
+ * unread data in the socket input buffer, or when nolinger is set.
+ * However, if SC_FL_NOLINGER is explicitly set, we know there is
+ * no risk so we close both sides immediately.
+ */
+ if (!(sc->flags & (SC_FL_ERROR|SC_FL_NOLINGER|SC_FL_EOS|SC_FL_ABRT_DONE)) &&
+ !(ic->flags & CF_DONT_READ))
+ return;
+
+ __fallthrough;
+ case SC_ST_CON:
+ case SC_ST_CER:
+ case SC_ST_QUE:
+ case SC_ST_TAR:
+ /* Note that none of these states may happen with applets */
+ sc->state = SC_ST_DIS;
+ __fallthrough;
+ default:
+ sc->flags &= ~SC_FL_NOLINGER;
+ sc->flags |= SC_FL_ABRT_DONE;
+ if (sc->flags & SC_FL_ISBACK)
+ __sc_strm(sc)->conn_exp = TICK_ETERNITY;
+ }
+
+ /* note that if the task exists, it must unregister itself once it runs */
+ if (!(sc->flags & SC_FL_DONT_WAKE))
+ task_wakeup(sc_strm_task(sc), TASK_WOKEN_IO);
+}
+
+/* default chk_rcv function for scheduled tasks */
+static void sc_app_chk_rcv(struct stconn *sc)
+{
+ if (sc_ep_have_ff_data(sc_opposite(sc))) {
+ /* stop reading */
+ sc_need_room(sc, -1);
+ }
+ else {
+ /* (re)start reading */
+ if (!(sc->flags & SC_FL_DONT_WAKE))
+ task_wakeup(sc_strm_task(sc), TASK_WOKEN_IO);
+ }
+}
+
+/* default chk_snd function for scheduled tasks */
+static void sc_app_chk_snd(struct stconn *sc)
+{
+ struct channel *oc = sc_oc(sc);
+
+ if (unlikely(sc->state != SC_ST_EST || (sc->flags & SC_FL_SHUT_DONE)))
+ return;
+
+ if (!sc_ep_test(sc, SE_FL_WAIT_DATA) || /* not waiting for data */
+ (!co_data(oc) && !sc_ep_have_ff_data(sc))) /* called with nothing to send ! */
+ return;
+
+ /* Otherwise there are remaining data to be sent in the buffer,
+ * so we tell the handler.
+ */
+ sc_ep_clr(sc, SE_FL_WAIT_DATA);
+ if (!(sc->flags & SC_FL_DONT_WAKE))
+ task_wakeup(sc_strm_task(sc), TASK_WOKEN_IO);
+}
+
+/*
+ * This function performs a shutdown-read on a stream connector attached to
+ * a connection in a connected or init state (it does nothing for other
+ * states). It either shuts the read side or marks itself as closed. The buffer
+ * flags are updated to reflect the new state. If the stream connector has
+ * SC_FL_NOHALF, we also forward the close to the write side. If a control
+ * layer is defined, then it is supposed to be a socket layer and file
+ * descriptors are then shutdown or closed accordingly. The function
+ * automatically disables polling if needed.
+ */
+static void sc_app_abort_conn(struct stconn *sc)
+{
+ struct channel *ic = sc_ic(sc);
+
+ BUG_ON(!sc_conn(sc));
+
+ if (sc->flags & (SC_FL_EOS|SC_FL_ABRT_DONE))
+ return;
+ sc->flags |= SC_FL_ABRT_DONE;
+ ic->flags |= CF_READ_EVENT;
+
+ if (!sc_state_in(sc->state, SC_SB_CON|SC_SB_RDY|SC_SB_EST))
+ return;
+
+ if (sc->flags & SC_FL_SHUT_DONE) {
+ sc_conn_shut(sc);
+ sc->state = SC_ST_DIS;
+ if (sc->flags & SC_FL_ISBACK)
+ __sc_strm(sc)->conn_exp = TICK_ETERNITY;
+ }
+ else if (sc_cond_forward_shut(sc))
+ return sc_app_shut_conn(sc);
+}
+
+/*
+ * This function performs a shutdown-write on a stream connector attached to
+ * a connection in a connected or init state (it does nothing for other
+ * states). It either shuts the write side or marks itself as closed. The
+ * buffer flags are updated to reflect the new state. It does also close
+ * everything if the SC was marked as being in error state. If there is a
+ * data-layer shutdown, it is called.
+ */
+static void sc_app_shut_conn(struct stconn *sc)
+{
+ struct channel *ic = sc_ic(sc);
+ struct channel *oc = sc_oc(sc);
+
+ BUG_ON(!sc_conn(sc));
+
+ sc->flags &= ~SC_FL_SHUT_WANTED;
+ if (sc->flags & SC_FL_SHUT_DONE)
+ return;
+ sc->flags |= SC_FL_SHUT_DONE;
+ oc->flags |= CF_WRITE_EVENT;
+ sc_set_hcto(sc);
+
+ switch (sc->state) {
+ case SC_ST_RDY:
+ case SC_ST_EST:
+ /* we have to shut before closing, otherwise some short messages
+ * may never leave the system, especially when there are remaining
+ * unread data in the socket input buffer, or when nolinger is set.
+ * However, if SC_FL_NOLINGER is explicitly set, we know there is
+ * no risk so we close both sides immediately.
+ */
+ if (sc->flags & SC_FL_NOLINGER) {
+ /* unclean data-layer shutdown, typically an aborted request
+ * or a forwarded shutdown from a client to a server due to
+ * option abortonclose. No need for the TLS layer to try to
+ * emit a shutdown message.
+ */
+ sc_conn_shutw(sc, CO_SHW_SILENT);
+ }
+ else {
+ /* clean data-layer shutdown. This only happens on the
+ * frontend side, or on the backend side when forwarding
+ * a client close in TCP mode or in HTTP TUNNEL mode
+ * while option abortonclose is set. We want the TLS
+ * layer to try to signal it to the peer before we close.
+ */
+ sc_conn_shutw(sc, CO_SHW_NORMAL);
+
+ if (!(sc->flags & (SC_FL_EOS|SC_FL_ABRT_DONE)) && !(ic->flags & CF_DONT_READ))
+ return;
+ }
+
+ __fallthrough;
+ case SC_ST_CON:
+ /* we may have to close a pending connection, and mark the
+ * response buffer as abort
+ */
+ sc_conn_shut(sc);
+ __fallthrough;
+ case SC_ST_CER:
+ case SC_ST_QUE:
+ case SC_ST_TAR:
+ sc->state = SC_ST_DIS;
+ __fallthrough;
+ default:
+ sc->flags &= ~SC_FL_NOLINGER;
+ sc->flags |= SC_FL_ABRT_DONE;
+ if (sc->flags & SC_FL_ISBACK)
+ __sc_strm(sc)->conn_exp = TICK_ETERNITY;
+ }
+}
+
+/* This function is used for inter-stream connector calls. It is called by the
+ * consumer to inform the producer side that it may be interested in checking
+ * for free space in the buffer. Note that it intentionally does not update
+ * timeouts, so that we can still check them later at wake-up. This function is
+ * dedicated to connection-based stream connectors.
+ */
+static void sc_app_chk_rcv_conn(struct stconn *sc)
+{
+ BUG_ON(!sc_conn(sc));
+
+ /* (re)start reading */
+ if (sc_state_in(sc->state, SC_SB_CON|SC_SB_RDY|SC_SB_EST))
+ tasklet_wakeup(sc->wait_event.tasklet);
+}
+
+
+/* This function is used for inter-stream connector calls. It is called by the
+ * producer to inform the consumer side that it may be interested in checking
+ * for data in the buffer. Note that it intentionally does not update timeouts,
+ * so that we can still check them later at wake-up.
+ */
+static void sc_app_chk_snd_conn(struct stconn *sc)
+{
+ struct channel *oc = sc_oc(sc);
+
+ BUG_ON(!sc_conn(sc));
+
+ if (unlikely(!sc_state_in(sc->state, SC_SB_RDY|SC_SB_EST) ||
+ (sc->flags & SC_FL_SHUT_DONE)))
+ return;
+
+ if (unlikely(!co_data(oc) && !sc_ep_have_ff_data(sc))) /* called with nothing to send ! */
+ return;
+
+ if (!sc_ep_have_ff_data(sc) && /* data wants to be fast-forwarded ASAP */
+ !sc_ep_test(sc, SE_FL_WAIT_DATA)) /* not waiting for data */
+ return;
+
+ if (!(sc->wait_event.events & SUB_RETRY_SEND))
+ sc_conn_send(sc);
+
+ if (sc_ep_test(sc, SE_FL_ERROR | SE_FL_ERR_PENDING) || sc_is_conn_error(sc)) {
+ /* Write error on the file descriptor */
+ BUG_ON(sc_ep_test(sc, SE_FL_EOS|SE_FL_ERROR|SE_FL_ERR_PENDING) == (SE_FL_EOS|SE_FL_ERR_PENDING));
+ goto out_wakeup;
+ }
+
+ /* OK, so now we know that some data might have been sent, and that we may
+ * have to poll first. We have to do that too if the buffer is not empty.
+ */
+ if (!co_data(oc)) {
+ /* the connection is established but we can't write. Either the
+ * buffer is empty, or we just refrain from sending because the
+ * ->o limit was reached. Maybe we just wrote the last
+ * chunk and need to close.
+ */
+ if ((oc->flags & CF_AUTO_CLOSE) &&
+ ((sc->flags & (SC_FL_SHUT_DONE|SC_FL_SHUT_WANTED)) == SC_FL_SHUT_WANTED) &&
+ sc_state_in(sc->state, SC_SB_RDY|SC_SB_EST)) {
+ sc_shutdown(sc);
+ goto out_wakeup;
+ }
+
+ if ((sc->flags & (SC_FL_SHUT_DONE|SC_FL_SHUT_WANTED)) == 0)
+ sc_ep_set(sc, SE_FL_WAIT_DATA);
+ }
+ else {
+ /* Otherwise there are remaining data to be sent in the buffer,
+ * which means we have to poll before doing so.
+ */
+ sc_ep_clr(sc, SE_FL_WAIT_DATA);
+ }
+
+ /* in case of special condition (error, shutdown, end of write...), we
+ * have to notify the task.
+ */
+ if (likely((sc->flags & SC_FL_SHUT_DONE) ||
+ ((oc->flags & CF_WRITE_EVENT) && sc->state < SC_ST_EST) ||
+ ((oc->flags & CF_WAKE_WRITE) &&
+ ((!co_data(oc) && !oc->to_forward) ||
+ !sc_state_in(sc->state, SC_SB_EST))))) {
+ out_wakeup:
+ if (!(sc->flags & SC_FL_DONT_WAKE))
+ task_wakeup(sc_strm_task(sc), TASK_WOKEN_IO);
+ }
+}
+
+/*
+ * This function performs a shutdown-read on a stream connector attached to an
+ * applet in a connected or init state (it does nothing for other states). It
+ * either shuts the read side or marks itself as closed. The buffer flags are
+ * updated to reflect the new state. If the stream connector has SC_FL_NOHALF,
+ * we also forward the close to the write side. The owner task is woken up if
+ * it exists.
+ */
+static void sc_app_abort_applet(struct stconn *sc)
+{
+ struct channel *ic = sc_ic(sc);
+
+ BUG_ON(!sc_appctx(sc));
+
+ if (sc->flags & (SC_FL_EOS|SC_FL_ABRT_DONE))
+ return;
+ sc->flags |= SC_FL_ABRT_DONE;
+ ic->flags |= CF_READ_EVENT;
+
+ /* Note: on abort, we don't call the applet */
+
+ if (!sc_state_in(sc->state, SC_SB_CON|SC_SB_RDY|SC_SB_EST))
+ return;
+
+ if (sc->flags & SC_FL_SHUT_DONE) {
+ appctx_shut(__sc_appctx(sc));
+ sc->state = SC_ST_DIS;
+ if (sc->flags & SC_FL_ISBACK)
+ __sc_strm(sc)->conn_exp = TICK_ETERNITY;
+ }
+ else if (sc_cond_forward_shut(sc))
+ return sc_app_shut_applet(sc);
+}
+
+/*
+ * This function performs a shutdown-write on a stream connector attached to an
+ * applet in a connected or init state (it does nothing for other states). It
+ * either shuts the write side or marks itself as closed. The buffer flags are
+ * updated to reflect the new state. It does also close everything if the SI
+ * was marked as being in error state. The owner task is woken up if it exists.
+ */
+static void sc_app_shut_applet(struct stconn *sc)
+{
+ struct channel *ic = sc_ic(sc);
+ struct channel *oc = sc_oc(sc);
+
+ BUG_ON(!sc_appctx(sc));
+
+ sc->flags &= ~SC_FL_SHUT_WANTED;
+ if (sc->flags & SC_FL_SHUT_DONE)
+ return;
+ sc->flags |= SC_FL_SHUT_DONE;
+ oc->flags |= CF_WRITE_EVENT;
+ sc_set_hcto(sc);
+
+ /* on shutw we always wake the applet up */
+ appctx_wakeup(__sc_appctx(sc));
+
+ switch (sc->state) {
+ case SC_ST_RDY:
+ case SC_ST_EST:
+ /* we have to shut before closing, otherwise some short messages
+ * may never leave the system, especially when there are remaining
+ * unread data in the socket input buffer, or when nolinger is set.
+ * However, if SC_FL_NOLINGER is explicitly set, we know there is
+ * no risk so we close both sides immediately.
+ */
+ if (!(sc->flags & (SC_FL_ERROR|SC_FL_NOLINGER|SC_FL_EOS|SC_FL_ABRT_DONE)) &&
+ !(ic->flags & CF_DONT_READ))
+ return;
+
+ __fallthrough;
+ case SC_ST_CON:
+ case SC_ST_CER:
+ case SC_ST_QUE:
+ case SC_ST_TAR:
+ /* Note that none of these states may happen with applets */
+ appctx_shut(__sc_appctx(sc));
+ sc->state = SC_ST_DIS;
+ __fallthrough;
+ default:
+ sc->flags &= ~SC_FL_NOLINGER;
+ sc->flags |= SC_FL_ABRT_DONE;
+ if (sc->flags & SC_FL_ISBACK)
+ __sc_strm(sc)->conn_exp = TICK_ETERNITY;
+ }
+}
+
+/* chk_rcv function for applets */
+static void sc_app_chk_rcv_applet(struct stconn *sc)
+{
+ BUG_ON(!sc_appctx(sc));
+
+ if (!sc_ep_have_ff_data(sc_opposite(sc))) {
+ /* (re)start reading */
+ appctx_wakeup(__sc_appctx(sc));
+ }
+}
+
+/* chk_snd function for applets */
+static void sc_app_chk_snd_applet(struct stconn *sc)
+{
+ struct channel *oc = sc_oc(sc);
+
+ BUG_ON(!sc_appctx(sc));
+
+ if (unlikely(sc->state != SC_ST_EST || (sc->flags & SC_FL_SHUT_DONE)))
+ return;
+
+ /* we only wake the applet up if it was waiting for some data and is ready to consume it */
+ if (!sc_ep_test(sc, SE_FL_WAIT_DATA|SE_FL_WONT_CONSUME))
+ return;
+
+ if (co_data(oc) || sc_ep_have_ff_data(sc)) {
+ /* (re)start sending */
+ appctx_wakeup(__sc_appctx(sc));
+ }
+}
+
+
+/* This function is designed to be called from within the stream handler to
+ * update the input channel's expiration timer and the stream connector's
+ * Rx flags based on the channel's flags. It needs to be called only once
+ * after the channel's flags have settled down, and before they are cleared,
+ * though it doesn't harm to call it as often as desired (it just slightly
+ * hurts performance). It must not be called from outside of the stream
+ * handler, as what it does will be used to compute the stream task's
+ * expiration.
+ */
+void sc_update_rx(struct stconn *sc)
+{
+ struct channel *ic = sc_ic(sc);
+
+ if (sc->flags & (SC_FL_EOS|SC_FL_ABRT_DONE))
+ return;
+
+ /* Unblock the SC if it needs room and the free space is large enough (0
+ * means it can always be unblocked). Do not unblock it if -1 was
+ * specified.
+ */
+ if (!sc->room_needed || (sc->room_needed > 0 && channel_recv_max(ic) >= sc->room_needed))
+ sc_have_room(sc);
+
+ /* Read not closed, update FD status and timeout for reads */
+ if (ic->flags & CF_DONT_READ)
+ sc_wont_read(sc);
+ else
+ sc_will_read(sc);
+
+ sc_chk_rcv(sc);
+}
+
+/* This function is designed to be called from within the stream handler to
+ * update the output channel's expiration timer and the stream connector's
+ * Tx flags based on the channel's flags. It needs to be called only once
+ * after the channel's flags have settled down, and before they are cleared,
+ * though it doesn't harm to call it as often as desired (it just slightly
+ * hurts performance). It must not be called from outside of the stream
+ * handler, as what it does will be used to compute the stream task's
+ * expiration.
+ */
+void sc_update_tx(struct stconn *sc)
+{
+ struct channel *oc = sc_oc(sc);
+
+ if (sc->flags & SC_FL_SHUT_DONE)
+ return;
+
+ /* Write not closed, update FD status and timeout for writes */
+ if (!co_data(oc)) {
+ /* stop writing */
+ if (!sc_ep_test(sc, SE_FL_WAIT_DATA)) {
+ if ((sc->flags & SC_FL_SHUT_WANTED) == 0)
+ sc_ep_set(sc, SE_FL_WAIT_DATA);
+ }
+ return;
+ }
+
+ /* (re)start writing */
+ sc_ep_clr(sc, SE_FL_WAIT_DATA);
+}
+
+/* This function is the equivalent to sc_update() except that it's
+ * designed to be called from outside the stream handlers, typically the lower
+ * layers (applets, connections) after I/O completion. After updating the stream
+ * interface and timeouts, it will try to forward what can be forwarded, then to
+ * wake the associated task up if an important event requires special handling.
+ * It may update SE_FL_WAIT_DATA and/or SC_FL_NEED_ROOM, that the callers are
+ * encouraged to watch to take appropriate action.
+ * It should not be called from within the stream itself, sc_update()
+ * is designed for this. Please do not statify this function, it's often
+ * present in backtraces, it's useful to recognize it.
+ */
+void sc_notify(struct stconn *sc)
+{
+ struct channel *ic = sc_ic(sc);
+ struct channel *oc = sc_oc(sc);
+ struct stconn *sco = sc_opposite(sc);
+ struct task *task = sc_strm_task(sc);
+
+ /* process consumer side */
+ if (!co_data(oc)) {
+ struct connection *conn = sc_conn(sc);
+
+ if (((sc->flags & (SC_FL_SHUT_DONE|SC_FL_SHUT_WANTED)) == SC_FL_SHUT_WANTED) &&
+ (sc->state == SC_ST_EST) && (!conn || !(conn->flags & (CO_FL_WAIT_XPRT | CO_FL_EARLY_SSL_HS))))
+ sc_shutdown(sc);
+ }
+
+ /* indicate that we may be waiting for data from the output channel or
+ * we're about to close and can't expect more data if SC_FL_SHUT_WANTED is there.
+ */
+ if (!(sc->flags & (SC_FL_SHUT_DONE|SC_FL_SHUT_WANTED)))
+ sc_ep_set(sc, SE_FL_WAIT_DATA);
+ else if ((sc->flags & (SC_FL_SHUT_DONE|SC_FL_SHUT_WANTED)) == SC_FL_SHUT_WANTED)
+ sc_ep_clr(sc, SE_FL_WAIT_DATA);
+
+ if (oc->flags & CF_DONT_READ)
+ sc_wont_read(sco);
+ else
+ sc_will_read(sco);
+
+ /* Notify the other side when we've injected data into the IC that
+ * needs to be forwarded. We can do fast-forwarding as soon as there
+ * are output data, but we avoid doing this if some of the data are
+ * not yet scheduled for being forwarded, because it is very likely
+ * that it will be done again immediately afterwards once the following
+ * data are parsed (eg: HTTP chunking). We only clear SC_FL_NEED_ROOM
+ * once we've emptied *some* of the output buffer, and not just when
+ * there is available room, because applets are often forced to stop
+ * before the buffer is full. We must not stop based on input data
+ * alone because an HTTP parser might need more data to complete the
+ * parsing.
+ */
+ if (sc_ep_have_ff_data(sc_opposite(sc)) ||
+ (co_data(ic) && sc_ep_test(sco, SE_FL_WAIT_DATA) &&
+ (!(sc->flags & SC_FL_SND_EXP_MORE) || channel_full(ic, co_data(ic)) || channel_input_data(ic) == 0))) {
+ int new_len, last_len;
+
+ last_len = co_data(ic) + sc_ep_ff_data(sco);
+ sc_chk_snd(sco);
+ new_len = co_data(ic) + sc_ep_ff_data(sco);
+
+ /* check if the consumer has freed some space either in the
+ * buffer or in the pipe.
+ */
+ if (!sc->room_needed || (new_len < last_len && (sc->room_needed < 0 || channel_recv_max(ic) >= sc->room_needed)))
+ sc_have_room(sc);
+ }
+
+ if (!(ic->flags & CF_DONT_READ))
+ sc_will_read(sc);
+
+ sc_chk_rcv(sc);
+ sc_chk_rcv(sco);
+
+ /* wake the task up only when needed */
+ if (/* changes on the production side that must be handled:
+ * - An error on receipt: SC_FL_ERROR
+ * - A read event: shutdown for reads (CF_READ_EVENT + EOS/ABRT_DONE)
+ * end of input (CF_READ_EVENT + SC_FL_EOI)
+ * data received and no fast-forwarding (CF_READ_EVENT + !to_forward)
+ * read event while consumer side is not established (CF_READ_EVENT + sco->state != SC_ST_EST)
+ */
+ ((ic->flags & CF_READ_EVENT) && ((sc->flags & SC_FL_EOI) || (sc->flags & (SC_FL_EOS|SC_FL_ABRT_DONE)) || !ic->to_forward || sco->state != SC_ST_EST)) ||
+ (sc->flags & SC_FL_ERROR) ||
+
+ /* changes on the consumption side */
+ sc_ep_test(sc, SE_FL_ERR_PENDING) ||
+ ((oc->flags & CF_WRITE_EVENT) &&
+ ((sc->state < SC_ST_EST) ||
+ (sc->flags & SC_FL_SHUT_DONE) ||
+ (((oc->flags & CF_WAKE_WRITE) ||
+ (!(oc->flags & CF_AUTO_CLOSE) &&
+ !(sc->flags & (SC_FL_SHUT_WANTED|SC_FL_SHUT_DONE)))) &&
+ (sco->state != SC_ST_EST ||
+ (!co_data(oc) && !oc->to_forward)))))) {
+ task_wakeup(task, TASK_WOKEN_IO);
+ }
+ else {
+ /* Update expiration date for the task and requeue it if not already expired */
+ if (!tick_is_expired(task->expire, now_ms)) {
+ task->expire = tick_first(task->expire, sc_ep_rcv_ex(sc));
+ task->expire = tick_first(task->expire, sc_ep_snd_ex(sc));
+ task->expire = tick_first(task->expire, sc_ep_rcv_ex(sco));
+ task->expire = tick_first(task->expire, sc_ep_snd_ex(sco));
+ task->expire = tick_first(task->expire, ic->analyse_exp);
+ task->expire = tick_first(task->expire, oc->analyse_exp);
+ task->expire = tick_first(task->expire, __sc_strm(sc)->conn_exp);
+
+ /* WARNING: Don't forget to remove this BUG_ON before 2.9.0 */
+ BUG_ON(tick_is_expired(task->expire, now_ms));
+ task_queue(task);
+ }
+ }
+
+ if (ic->flags & CF_READ_EVENT)
+ sc->flags &= ~SC_FL_RCV_ONCE;
+}
+
+/*
+ * This function propagates an end-of-stream received on a socket-based connection.
+ * It updates the stream connector. If the stream connector has SC_FL_NOHALF,
+ * the close is also forwarded to the write side as an abort.
+ */
+static void sc_conn_eos(struct stconn *sc)
+{
+ struct channel *ic = sc_ic(sc);
+
+ BUG_ON(!sc_conn(sc));
+
+ if (sc->flags & (SC_FL_EOS|SC_FL_ABRT_DONE))
+ return;
+ sc->flags |= SC_FL_EOS;
+ ic->flags |= CF_READ_EVENT;
+ sc_ep_report_read_activity(sc);
+
+ if (!sc_state_in(sc->state, SC_SB_CON|SC_SB_RDY|SC_SB_EST))
+ return;
+
+ if (sc->flags & SC_FL_SHUT_DONE)
+ goto do_close;
+
+ if (sc_cond_forward_shut(sc)) {
+ /* we want to immediately forward this close to the write side */
+ /* force flag on ssl to keep stream in cache */
+ sc_conn_shutw(sc, CO_SHW_SILENT);
+ goto do_close;
+ }
+
+ /* otherwise that's just a normal read shutdown */
+ return;
+
+ do_close:
+ /* OK we completely close the socket here just as if we went through sc_shut[rw]() */
+ sc_conn_shut(sc);
+
+ sc->flags &= ~SC_FL_SHUT_WANTED;
+ sc->flags |= SC_FL_SHUT_DONE;
+
+ sc->state = SC_ST_DIS;
+ if (sc->flags & SC_FL_ISBACK)
+ __sc_strm(sc)->conn_exp = TICK_ETERNITY;
+ return;
+}
+
+/*
+ * This is the callback which is called by the connection layer to receive data
+ * into the buffer from the connection. It iterates over the mux layer's
+ * rcv_buf function. Please do not statify this function, it's often present in
+ * backtraces, it's useful to recognize it.
+ */
+int sc_conn_recv(struct stconn *sc)
+{
+ struct connection *conn = __sc_conn(sc);
+ struct channel *ic = sc_ic(sc);
+ int ret, max, cur_read = 0;
+ int read_poll = MAX_READ_POLL_LOOPS;
+ int flags = 0;
+
+ /* If not established yet, do nothing. */
+ if (sc->state != SC_ST_EST)
+ return 0;
+
+ /* If another call to sc_conn_recv() failed, and we subscribed to
+ * recv events already, give up now.
+ */
+ if ((sc->wait_event.events & SUB_RETRY_RECV) || sc_waiting_room(sc))
+ return 0;
+
+ /* maybe we were called immediately after an asynchronous abort */
+ if (sc->flags & (SC_FL_EOS|SC_FL_ABRT_DONE))
+ return 1;
+
+ /* we must wait because the mux is not installed yet */
+ if (!conn->mux)
+ return 0;
+
+ /* stop immediately on errors. Note that we DON'T want to stop on
+ * POLL_ERR, as the poller might report a write error while there
+ * are still data available in the recv buffer. This typically
+ * happens when we send too large a request to a backend server
+ * which rejects it before reading it all.
+ */
+ if (!sc_ep_test(sc, SE_FL_RCV_MORE)) {
+ if (!conn_xprt_ready(conn))
+ return 0;
+ if (sc_ep_test(sc, SE_FL_ERROR))
+ goto end_recv;
+ }
+
+ /* prepare to detect if the mux needs more room */
+ sc_ep_clr(sc, SE_FL_WANT_ROOM);
+
+ if ((ic->flags & (CF_STREAMER | CF_STREAMER_FAST)) && !co_data(ic) &&
+ global.tune.idle_timer &&
+ (unsigned short)(now_ms - ic->last_read) >= global.tune.idle_timer) {
+ /* The buffer was empty and nothing was transferred for more
+ * than one second. This was caused by a pause and not by
+ * congestion. Reset any streaming mode to reduce latency.
+ */
+ ic->xfer_small = 0;
+ ic->xfer_large = 0;
+ ic->flags &= ~(CF_STREAMER | CF_STREAMER_FAST);
+ }
+
+#if defined(USE_LINUX_SPLICE)
+ /* Detect if the splicing is possible depending on the stream policy */
+ if ((global.tune.options & GTUNE_USE_SPLICE) &&
+ (ic->to_forward >= MIN_SPLICE_FORWARD) &&
+ ((!(sc->flags & SC_FL_ISBACK) && ((strm_fe(__sc_strm(sc))->options2|__sc_strm(sc)->be->options2) & PR_O2_SPLIC_REQ)) ||
+ ((sc->flags & SC_FL_ISBACK) && ((strm_fe(__sc_strm(sc))->options2|__sc_strm(sc)->be->options2) & PR_O2_SPLIC_RTR)) ||
+ ((ic->flags & CF_STREAMER_FAST) && ((strm_sess(__sc_strm(sc))->fe->options2|__sc_strm(sc)->be->options2) & PR_O2_SPLIC_AUT))))
+ flags |= CO_RFL_MAY_SPLICE;
+#endif
+
+ /* First, let's see if we may fast-forward data from a side to the other
+ * one without using the channel buffer.
+ */
+ if (sc_is_fastfwd_supported(sc)) {
+ if (channel_data(ic)) {
+ /* We're embarrassed, there are already data pending in
+ * the buffer and we don't want to have them at two
+ * locations at a time. Let's indicate we need some
+ * place and ask the consumer to hurry.
+ */
+ flags |= CO_RFL_BUF_FLUSH;
+ goto abort_fastfwd;
+ }
+ ret = conn->mux->fastfwd(sc, ic->to_forward, flags);
+ if (ret < 0)
+ goto abort_fastfwd;
+ else if (ret > 0) {
+ if (ic->to_forward != CHN_INFINITE_FORWARD)
+ ic->to_forward -= ret;
+ ic->total += ret;
+ cur_read += ret;
+ ic->flags |= CF_READ_EVENT;
+ }
+
+ if (sc_ep_test(sc, SE_FL_EOS | SE_FL_ERROR))
+ goto end_recv;
+
+ if (sc_ep_test(sc, SE_FL_WANT_ROOM))
+ sc_need_room(sc, -1);
+
+ if (sc_ep_test(sc, SE_FL_MAY_FASTFWD_PROD) && ic->to_forward)
+ goto done_recv;
+ }
+
+ abort_fastfwd:
+ /* now we'll need a input buffer for the stream */
+ if (!sc_alloc_ibuf(sc, &(__sc_strm(sc)->buffer_wait)))
+ goto end_recv;
+
+ /* For an HTX stream, if the buffer is stuck (no output data with some
+ * input data) and if the HTX message is fragmented or if its free space
+ * wraps, we force an HTX deframentation. It is a way to have a
+ * contiguous free space nad to let the mux to copy as much data as
+ * possible.
+ *
+ * NOTE: A possible optim may be to let the mux decides if defrag is
+ * required or not, depending on amount of data to be xferred.
+ */
+ if (IS_HTX_STRM(__sc_strm(sc)) && !co_data(ic)) {
+ struct htx *htx = htxbuf(&ic->buf);
+
+ if (htx_is_not_empty(htx) && ((htx->flags & HTX_FL_FRAGMENTED) || htx_space_wraps(htx)))
+ htx_defrag(htx, NULL, 0);
+ }
+
+ /* Instruct the mux it must subscribed for read events */
+ if (!(sc->flags & SC_FL_ISBACK) && /* for frontend conns only */
+ (sc_opposite(sc)->state != SC_ST_INI) && /* before backend connection setup */
+ (__sc_strm(sc)->be->options & PR_O_ABRT_CLOSE)) /* if abortonclose option is set for the current backend */
+ flags |= CO_RFL_KEEP_RECV;
+
+ /* Important note : if we're called with POLL_IN|POLL_HUP, it means the read polling
+ * was enabled, which implies that the recv buffer was not full. So we have a guarantee
+ * that if such an event is not handled above in splice, it will be handled here by
+ * recv().
+ */
+ while (sc_ep_test(sc, SE_FL_RCV_MORE) ||
+ (!(conn->flags & CO_FL_HANDSHAKE) &&
+ (!sc_ep_test(sc, SE_FL_ERROR | SE_FL_EOS)) && !(sc->flags & (SC_FL_EOS|SC_FL_ABRT_DONE)))) {
+ int cur_flags = flags;
+
+ /* Compute transient CO_RFL_* flags */
+ if (co_data(ic)) {
+ cur_flags |= (CO_RFL_BUF_WET | CO_RFL_BUF_NOT_STUCK);
+ }
+
+ /* <max> may be null. This is the mux responsibility to set
+ * SE_FL_RCV_MORE on the SC if more space is needed.
+ */
+ max = channel_recv_max(ic);
+ ret = conn->mux->rcv_buf(sc, &ic->buf, max, cur_flags);
+
+ if (sc_ep_test(sc, SE_FL_WANT_ROOM)) {
+ /* SE_FL_WANT_ROOM must not be reported if the channel's
+ * buffer is empty.
+ */
+ BUG_ON(c_empty(ic));
+
+ sc_need_room(sc, channel_recv_max(ic) + 1);
+ /* Add READ_PARTIAL because some data are pending but
+ * cannot be xferred to the channel
+ */
+ ic->flags |= CF_READ_EVENT;
+ sc_ep_report_read_activity(sc);
+ }
+
+ if (ret <= 0) {
+ /* if we refrained from reading because we asked for a
+ * flush to satisfy rcv_pipe(), we must not subscribe
+ * and instead report that there's not enough room
+ * here to proceed.
+ */
+ if (flags & CO_RFL_BUF_FLUSH)
+ sc_need_room(sc, -1);
+ break;
+ }
+
+ cur_read += ret;
+
+ /* if we're allowed to directly forward data, we must update ->o */
+ if (ic->to_forward && !(sc_opposite(sc)->flags & (SC_FL_SHUT_DONE|SC_FL_SHUT_WANTED))) {
+ unsigned long fwd = ret;
+ if (ic->to_forward != CHN_INFINITE_FORWARD) {
+ if (fwd > ic->to_forward)
+ fwd = ic->to_forward;
+ ic->to_forward -= fwd;
+ }
+ c_adv(ic, fwd);
+ }
+
+ ic->flags |= CF_READ_EVENT;
+ ic->total += ret;
+
+ /* End-of-input reached, we can leave. In this case, it is
+ * important to break the loop to not block the SC because of
+ * the channel's policies.This way, we are still able to receive
+ * shutdowns.
+ */
+ if (sc_ep_test(sc, SE_FL_EOI))
+ break;
+
+ if ((sc->flags & SC_FL_RCV_ONCE) || --read_poll <= 0) {
+ /* we don't expect to read more data */
+ sc_wont_read(sc);
+ break;
+ }
+
+ /* if too many bytes were missing from last read, it means that
+ * it's pointless trying to read again because the system does
+ * not have them in buffers.
+ */
+ if (ret < max) {
+ /* if a streamer has read few data, it may be because we
+ * have exhausted system buffers. It's not worth trying
+ * again.
+ */
+ if (ic->flags & CF_STREAMER) {
+ /* we're stopped by the channel's policy */
+ sc_wont_read(sc);
+ break;
+ }
+
+ /* if we read a large block smaller than what we requested,
+ * it's almost certain we'll never get anything more.
+ */
+ if (ret >= global.tune.recv_enough) {
+ /* we're stopped by the channel's policy */
+ sc_wont_read(sc);
+ break;
+ }
+ }
+
+ /* if we are waiting for more space, don't try to read more data
+ * right now.
+ */
+ if (sc->flags & (SC_FL_WONT_READ|SC_FL_NEED_BUFF|SC_FL_NEED_ROOM))
+ break;
+ } /* while !flags */
+
+ done_recv:
+ if (!cur_read)
+ se_have_no_more_data(sc->sedesc);
+ else {
+ if ((ic->flags & (CF_STREAMER | CF_STREAMER_FAST)) &&
+ (cur_read <= ic->buf.size / 2)) {
+ ic->xfer_large = 0;
+ ic->xfer_small++;
+ if (ic->xfer_small >= 3) {
+ /* we have read less than half of the buffer in
+ * one pass, and this happened at least 3 times.
+ * This is definitely not a streamer.
+ */
+ ic->flags &= ~(CF_STREAMER | CF_STREAMER_FAST);
+ }
+ else if (ic->xfer_small >= 2) {
+ /* if the buffer has been at least half full twice,
+ * we receive faster than we send, so at least it
+ * is not a "fast streamer".
+ */
+ ic->flags &= ~CF_STREAMER_FAST;
+ }
+ }
+ else if (!(ic->flags & CF_STREAMER_FAST) && (cur_read >= channel_data_limit(ic))) {
+ /* we read a full buffer at once */
+ ic->xfer_small = 0;
+ ic->xfer_large++;
+ if (ic->xfer_large >= 3) {
+ /* we call this buffer a fast streamer if it manages
+ * to be filled in one call 3 consecutive times.
+ */
+ ic->flags |= (CF_STREAMER | CF_STREAMER_FAST);
+ }
+ }
+ else {
+ ic->xfer_small = 0;
+ ic->xfer_large = 0;
+ }
+ ic->last_read = now_ms;
+ sc_ep_report_read_activity(sc);
+ }
+
+ end_recv:
+ ret = (cur_read != 0);
+
+ /* Report EOI on the channel if it was reached from the mux point of
+ * view. */
+ if (sc_ep_test(sc, SE_FL_EOI) && !(sc->flags & SC_FL_EOI)) {
+ sc_ep_report_read_activity(sc);
+ sc->flags |= SC_FL_EOI;
+ ic->flags |= CF_READ_EVENT;
+ ret = 1;
+ }
+
+ if (sc_ep_test(sc, SE_FL_EOS)) {
+ /* we received a shutdown */
+ if (ic->flags & CF_AUTO_CLOSE)
+ sc_schedule_shutdown(sc_opposite(sc));
+ sc_conn_eos(sc);
+ ret = 1;
+ }
+
+ if (sc_ep_test(sc, SE_FL_ERROR)) {
+ sc->flags |= SC_FL_ERROR;
+ ret = 1;
+ }
+ else if (!cur_read &&
+ !(sc->flags & (SC_FL_WONT_READ|SC_FL_NEED_BUFF|SC_FL_NEED_ROOM)) &&
+ !(sc->flags & (SC_FL_EOS|SC_FL_ABRT_DONE))) {
+ /* Subscribe to receive events if we're blocking on I/O */
+ conn->mux->subscribe(sc, SUB_RETRY_RECV, &sc->wait_event);
+ se_have_no_more_data(sc->sedesc);
+ }
+ else {
+ se_have_more_data(sc->sedesc);
+ ret = 1;
+ }
+
+ return ret;
+}
+
+/* This tries to perform a synchronous receive on the stream connector to
+ * try to collect last arrived data. In practice it's only implemented on
+ * stconns. Returns 0 if nothing was done, non-zero if new data or a
+ * shutdown were collected. This may result on some delayed receive calls
+ * to be programmed and performed later, though it doesn't provide any
+ * such guarantee.
+ */
+int sc_conn_sync_recv(struct stconn *sc)
+{
+ if (!sc_state_in(sc->state, SC_SB_RDY|SC_SB_EST))
+ return 0;
+
+ if (!sc_mux_ops(sc))
+ return 0; // only stconns are supported
+
+ if (sc->wait_event.events & SUB_RETRY_RECV)
+ return 0; // already subscribed
+
+ if (!sc_is_recv_allowed(sc))
+ return 0; // already failed
+
+ return sc_conn_recv(sc);
+}
+
+/*
+ * This function is called to send buffer data to a stream socket.
+ * It calls the mux layer's snd_buf function. It relies on the
+ * caller to commit polling changes. The caller should check conn->flags
+ * for errors. Please do not statify this function, it's often present in
+ * backtraces, it's useful to recognize it.
+ */
+int sc_conn_send(struct stconn *sc)
+{
+ struct connection *conn = __sc_conn(sc);
+ struct stconn *sco = sc_opposite(sc);
+ struct stream *s = __sc_strm(sc);
+ struct channel *oc = sc_oc(sc);
+ int ret;
+ int did_send = 0;
+
+ if (sc_ep_test(sc, SE_FL_ERROR | SE_FL_ERR_PENDING) || sc_is_conn_error(sc)) {
+ /* We're probably there because the tasklet was woken up,
+ * but process_stream() ran before, detected there were an
+ * error and put the SC back to SC_ST_TAR. There's still
+ * CO_FL_ERROR on the connection but we don't want to add
+ * SE_FL_ERROR back, so give up
+ */
+ if (sc->state < SC_ST_CON)
+ return 0;
+ BUG_ON(sc_ep_test(sc, SE_FL_EOS|SE_FL_ERROR|SE_FL_ERR_PENDING) == (SE_FL_EOS|SE_FL_ERR_PENDING));
+ return 1;
+ }
+
+ /* We're already waiting to be able to send, give up */
+ if (sc->wait_event.events & SUB_RETRY_SEND)
+ return 0;
+
+ /* we might have been called just after an asynchronous shutw */
+ if (sc->flags & SC_FL_SHUT_DONE)
+ return 1;
+
+ /* we must wait because the mux is not installed yet */
+ if (!conn->mux)
+ return 0;
+
+ if (sc_ep_have_ff_data(sc)) {
+ unsigned int send_flag = 0;
+
+ if ((!(sc->flags & (SC_FL_SND_ASAP|SC_FL_SND_NEVERWAIT)) &&
+ ((oc->to_forward && oc->to_forward != CHN_INFINITE_FORWARD) ||
+ (sc->flags & SC_FL_SND_EXP_MORE) ||
+ (IS_HTX_STRM(s) &&
+ (!(sco->flags & (SC_FL_EOI|SC_FL_EOS|SC_FL_ABRT_DONE)) && htx_expect_more(htxbuf(&oc->buf)))))) ||
+ ((oc->flags & CF_ISRESP) &&
+ (oc->flags & CF_AUTO_CLOSE) &&
+ (sc->flags & SC_FL_SHUT_WANTED)))
+ send_flag |= CO_SFL_MSG_MORE;
+
+ if (oc->flags & CF_STREAMER)
+ send_flag |= CO_SFL_STREAMER;
+
+ ret = conn->mux->resume_fastfwd(sc, send_flag);
+ if (ret > 0)
+ did_send = 1;
+
+ if (sc_ep_have_ff_data(sc))
+ goto end;
+ }
+
+ /* At this point, the pipe is empty, but we may still have data pending
+ * in the normal buffer.
+ */
+ if (co_data(oc)) {
+ /* when we're here, we already know that there is no spliced
+ * data left, and that there are sendable buffered data.
+ */
+
+ /* check if we want to inform the kernel that we're interested in
+ * sending more data after this call. We want this if :
+ * - we're about to close after this last send and want to merge
+ * the ongoing FIN with the last segment.
+ * - we know we can't send everything at once and must get back
+ * here because of unaligned data
+ * - there is still a finite amount of data to forward
+ * The test is arranged so that the most common case does only 2
+ * tests.
+ */
+ unsigned int send_flag = 0;
+
+ if ((!(sc->flags & (SC_FL_SND_ASAP|SC_FL_SND_NEVERWAIT)) &&
+ ((oc->to_forward && oc->to_forward != CHN_INFINITE_FORWARD) ||
+ (sc->flags & SC_FL_SND_EXP_MORE) ||
+ (IS_HTX_STRM(s) &&
+ (!(sco->flags & (SC_FL_EOI|SC_FL_EOS|SC_FL_ABRT_DONE)) && htx_expect_more(htxbuf(&oc->buf)))))) ||
+ ((oc->flags & CF_ISRESP) &&
+ (oc->flags & CF_AUTO_CLOSE) &&
+ (sc->flags & SC_FL_SHUT_WANTED)))
+ send_flag |= CO_SFL_MSG_MORE;
+
+ if (oc->flags & CF_STREAMER)
+ send_flag |= CO_SFL_STREAMER;
+
+ if (s->txn && s->txn->flags & TX_L7_RETRY && !b_data(&s->txn->l7_buffer)) {
+ /* If we want to be able to do L7 retries, copy
+ * the data we're about to send, so that we are able
+ * to resend them if needed
+ */
+ /* Try to allocate a buffer if we had none.
+ * If it fails, the next test will just
+ * disable the l7 retries by setting
+ * l7_conn_retries to 0.
+ */
+ if (s->txn->req.msg_state != HTTP_MSG_DONE)
+ s->txn->flags &= ~TX_L7_RETRY;
+ else {
+ if (b_alloc(&s->txn->l7_buffer) == NULL)
+ s->txn->flags &= ~TX_L7_RETRY;
+ else {
+ memcpy(b_orig(&s->txn->l7_buffer),
+ b_orig(&oc->buf),
+ b_size(&oc->buf));
+ s->txn->l7_buffer.head = co_data(oc);
+ b_add(&s->txn->l7_buffer, co_data(oc));
+ }
+
+ }
+ }
+
+ ret = conn->mux->snd_buf(sc, &oc->buf, co_data(oc), send_flag);
+ if (ret > 0) {
+ did_send = 1;
+ c_rew(oc, ret);
+ c_realign_if_empty(oc);
+
+ if (!co_data(oc)) {
+ /* Always clear both flags once everything has been sent, they're one-shot */
+ sc->flags &= ~(SC_FL_SND_ASAP|SC_FL_SND_EXP_MORE);
+ }
+ /* if some data remain in the buffer, it's only because the
+ * system buffers are full, we will try next time.
+ */
+ }
+ }
+
+ end:
+ if (did_send) {
+ oc->flags |= CF_WRITE_EVENT | CF_WROTE_DATA;
+ if (sc->state == SC_ST_CON)
+ sc->state = SC_ST_RDY;
+ }
+
+ if (!sco->room_needed || (did_send && (sco->room_needed < 0 || channel_recv_max(sc_oc(sc)) >= sco->room_needed)))
+ sc_have_room(sco);
+
+ if (sc_ep_test(sc, SE_FL_ERROR | SE_FL_ERR_PENDING)) {
+ oc->flags |= CF_WRITE_EVENT;
+ BUG_ON(sc_ep_test(sc, SE_FL_EOS|SE_FL_ERROR|SE_FL_ERR_PENDING) == (SE_FL_EOS|SE_FL_ERR_PENDING));
+ if (sc_ep_test(sc, SE_FL_ERROR))
+ sc->flags |= SC_FL_ERROR;
+ return 1;
+ }
+
+ /* FIXME: Must be reviewed for FF */
+ if (!co_data(oc) && !sc_ep_have_ff_data(sc)) {
+ if (did_send)
+ sc_ep_report_send_activity(sc);
+ /* If fast-forwarding is blocked, unblock it now to check for
+ * receive on the other side
+ */
+ if (sc->sedesc->iobuf.flags & IOBUF_FL_FF_BLOCKED) {
+ sc->sedesc->iobuf.flags &= ~IOBUF_FL_FF_BLOCKED;
+ sc_have_room(sco);
+ did_send = 1;
+ }
+ }
+ else {
+ /* We couldn't send all of our data, let the mux know we'd like to send more */
+ conn->mux->subscribe(sc, SUB_RETRY_SEND, &sc->wait_event);
+ if (sc_state_in(sc->state, SC_SB_EST|SC_SB_DIS|SC_SB_CLO))
+ sc_ep_report_blocked_send(sc, did_send);
+ }
+
+ return did_send;
+}
+
+/* perform a synchronous send() for the stream connector. The CF_WRITE_EVENT
+ * flag are cleared prior to the attempt, and will possibly be updated in case
+ * of success.
+ */
+void sc_conn_sync_send(struct stconn *sc)
+{
+ struct channel *oc = sc_oc(sc);
+
+ oc->flags &= ~CF_WRITE_EVENT;
+
+ if (sc->flags & SC_FL_SHUT_DONE)
+ return;
+
+ if (!co_data(oc))
+ return;
+
+ if (!sc_state_in(sc->state, SC_SB_CON|SC_SB_RDY|SC_SB_EST))
+ return;
+
+ if (!sc_mux_ops(sc))
+ return;
+
+ sc_conn_send(sc);
+}
+
+/* Called by I/O handlers after completion.. It propagates
+ * connection flags to the stream connector, updates the stream (which may or
+ * may not take this opportunity to try to forward data), then update the
+ * connection's polling based on the channels and stream connector's final
+ * states. The function always returns 0. Please do not statify this function,
+ * it's often present in backtraces, it's useful to recognize it.
+ */
+int sc_conn_process(struct stconn *sc)
+{
+ struct connection *conn = __sc_conn(sc);
+ struct channel *ic = sc_ic(sc);
+ struct channel *oc = sc_oc(sc);
+
+ BUG_ON(!conn);
+
+ /* If we have data to send, try it now */
+ if ((co_data(oc) || sc_ep_have_ff_data(sc)) &&
+ !(sc->wait_event.events & SUB_RETRY_SEND))
+ sc_conn_send(sc);
+
+ /* First step, report to the stream connector what was detected at the
+ * connection layer : errors and connection establishment.
+ * Only add SC_FL_ERROR if we're connected, or we're attempting to
+ * connect, we may get there because we got woken up, but only run
+ * after process_stream() noticed there were an error, and decided
+ * to retry to connect, the connection may still have CO_FL_ERROR,
+ * and we don't want to add SC_FL_ERROR back
+ *
+ * Note: This test is only required because sc_conn_process is also the SI
+ * wake callback. Otherwise sc_conn_recv()/sc_conn_send() already take
+ * care of it.
+ */
+
+ if (sc->state >= SC_ST_CON) {
+ if (sc_is_conn_error(sc))
+ sc->flags |= SC_FL_ERROR;
+ }
+
+ /* If we had early data, and the handshake ended, then
+ * we can remove the flag, and attempt to wake the task up,
+ * in the event there's an analyser waiting for the end of
+ * the handshake.
+ */
+ if (!(conn->flags & (CO_FL_WAIT_XPRT | CO_FL_EARLY_SSL_HS)) &&
+ sc_ep_test(sc, SE_FL_WAIT_FOR_HS)) {
+ sc_ep_clr(sc, SE_FL_WAIT_FOR_HS);
+ task_wakeup(sc_strm_task(sc), TASK_WOKEN_MSG);
+ }
+
+ if (!sc_state_in(sc->state, SC_SB_EST|SC_SB_DIS|SC_SB_CLO) &&
+ (conn->flags & CO_FL_WAIT_XPRT) == 0) {
+ if (sc->flags & SC_FL_ISBACK)
+ __sc_strm(sc)->conn_exp = TICK_ETERNITY;
+ oc->flags |= CF_WRITE_EVENT;
+ if (sc->state == SC_ST_CON)
+ sc->state = SC_ST_RDY;
+ }
+
+ /* Report EOS on the channel if it was reached from the mux point of
+ * view.
+ *
+ * Note: This test is only required because sc_conn_process is also the SI
+ * wake callback. Otherwise sc_conn_recv()/sc_conn_send() already take
+ * care of it.
+ */
+ if (sc_ep_test(sc, SE_FL_EOS) && !(sc->flags & SC_FL_EOS)) {
+ /* we received a shutdown */
+ if (ic->flags & CF_AUTO_CLOSE)
+ sc_schedule_shutdown(sc_opposite(sc));
+ sc_conn_eos(sc);
+ }
+
+ /* Report EOI on the channel if it was reached from the mux point of
+ * view.
+ *
+ * Note: This test is only required because sc_conn_process is also the SI
+ * wake callback. Otherwise sc_conn_recv()/sc_conn_send() already take
+ * care of it.
+ */
+ if (sc_ep_test(sc, SE_FL_EOI) && !(sc->flags & SC_FL_EOI)) {
+ sc->flags |= SC_FL_EOI;
+ ic->flags |= CF_READ_EVENT;
+ sc_ep_report_read_activity(sc);
+ }
+
+ if (sc_ep_test(sc, SE_FL_ERROR))
+ sc->flags |= SC_FL_ERROR;
+
+ /* Second step : update the stream connector and channels, try to forward any
+ * pending data, then possibly wake the stream up based on the new
+ * stream connector status.
+ */
+ sc_notify(sc);
+ stream_release_buffers(__sc_strm(sc));
+ return 0;
+}
+
+/* This is the ->process() function for any stream connector's wait_event task.
+ * It's assigned during the stream connector's initialization, for any type of
+ * stream connector. Thus it is always safe to perform a tasklet_wakeup() on a
+ * stream connector, as the presence of the SC is checked there.
+ */
+struct task *sc_conn_io_cb(struct task *t, void *ctx, unsigned int state)
+{
+ struct stconn *sc = ctx;
+ int ret = 0;
+
+ if (!sc_conn(sc))
+ return t;
+
+ if (!(sc->wait_event.events & SUB_RETRY_SEND) && (co_data(sc_oc(sc)) || sc_ep_have_ff_data(sc) || (sc->sedesc->iobuf.flags & IOBUF_FL_FF_BLOCKED)))
+ ret = sc_conn_send(sc);
+ if (!(sc->wait_event.events & SUB_RETRY_RECV))
+ ret |= sc_conn_recv(sc);
+ if (ret != 0)
+ sc_conn_process(sc);
+
+ stream_release_buffers(__sc_strm(sc));
+ return t;
+}
+
+/*
+ * This function propagates an end-of-stream received from an applet. It
+ * updates the stream connector. If it is is already shut, the applet is
+ * released. Otherwise, we try to forward the shutdown, immediately or ASAP.
+ */
+static void sc_applet_eos(struct stconn *sc)
+{
+ struct channel *ic = sc_ic(sc);
+
+ BUG_ON(!sc_appctx(sc));
+
+ if (sc->flags & (SC_FL_EOS|SC_FL_ABRT_DONE))
+ return;
+ sc->flags |= SC_FL_EOS;
+ ic->flags |= CF_READ_EVENT;
+ sc_ep_report_read_activity(sc);
+
+ /* Note: on abort, we don't call the applet */
+
+ if (!sc_state_in(sc->state, SC_SB_CON|SC_SB_RDY|SC_SB_EST))
+ return;
+
+ if (sc->flags & SC_FL_SHUT_DONE) {
+ appctx_shut(__sc_appctx(sc));
+ sc->state = SC_ST_DIS;
+ if (sc->flags & SC_FL_ISBACK)
+ __sc_strm(sc)->conn_exp = TICK_ETERNITY;
+ }
+ else if (sc_cond_forward_shut(sc))
+ return sc_app_shut_applet(sc);
+}
+
+/* Callback to be used by applet handlers upon completion. It updates the stream
+ * (which may or may not take this opportunity to try to forward data), then
+ * may re-enable the applet's based on the channels and stream connector's final
+ * states. Please do not statify this function, it's often present in backtraces,
+ * it's useful to recognize it.
+ */
+int sc_applet_process(struct stconn *sc)
+{
+ struct channel *ic = sc_ic(sc);
+
+ BUG_ON(!sc_appctx(sc));
+
+ /* Report EOI on the channel if it was reached from the applet point of
+ * view. */
+ if (sc_ep_test(sc, SE_FL_EOI) && !(sc->flags & SC_FL_EOI)) {
+ sc_ep_report_read_activity(sc);
+ sc->flags |= SC_FL_EOI;
+ ic->flags |= CF_READ_EVENT;
+ }
+
+ if (sc_ep_test(sc, SE_FL_ERROR))
+ sc->flags |= SC_FL_ERROR;
+
+ if (sc_ep_test(sc, SE_FL_EOS)) {
+ /* we received a shutdown */
+ sc_applet_eos(sc);
+ }
+
+ BUG_ON(sc_ep_test(sc, SE_FL_HAVE_NO_DATA|SE_FL_EOI) == SE_FL_EOI);
+
+ /* If the applet wants to write and the channel is closed, it's a
+ * broken pipe and it must be reported.
+ */
+ if (!sc_ep_test(sc, SE_FL_HAVE_NO_DATA) && (sc->flags & (SC_FL_EOS|SC_FL_ABRT_DONE)))
+ sc_ep_set(sc, SE_FL_ERROR);
+
+ /* automatically mark the applet having data available if it reported
+ * begin blocked by the channel.
+ */
+ if ((sc->flags & (SC_FL_WONT_READ|SC_FL_NEED_BUFF|SC_FL_NEED_ROOM)) ||
+ sc_ep_test(sc, SE_FL_APPLET_NEED_CONN))
+ applet_have_more_data(__sc_appctx(sc));
+
+ /* update the stream connector, channels, and possibly wake the stream up */
+ sc_notify(sc);
+ stream_release_buffers(__sc_strm(sc));
+
+ /* sc_notify may have passed through chk_snd and released some blocking
+ * flags. Process_stream will consider those flags to wake up the
+ * appctx but in the case the task is not in runqueue we may have to
+ * wakeup the appctx immediately.
+ */
+ if (sc_is_recv_allowed(sc) || sc_is_send_allowed(sc))
+ appctx_wakeup(__sc_appctx(sc));
+ return 0;
+}
+
+
+/* Prepares an endpoint upgrade. We don't now at this stage if the upgrade will
+ * succeed or not and if the stconn will be reused by the new endpoint. Thus,
+ * for now, only pretend the stconn is detached.
+ */
+void sc_conn_prepare_endp_upgrade(struct stconn *sc)
+{
+ BUG_ON(!sc_conn(sc) || !sc->app);
+ sc_ep_clr(sc, SE_FL_T_MUX);
+ sc_ep_set(sc, SE_FL_DETACHED);
+}
+
+/* Endpoint upgrade failed. Restore the stconn state. */
+void sc_conn_abort_endp_upgrade(struct stconn *sc)
+{
+ sc_ep_set(sc, SE_FL_T_MUX);
+ sc_ep_clr(sc, SE_FL_DETACHED);
+}
+
+/* Commit the endpoint upgrade. If stconn is attached, it means the new endpoint
+ * use it. So we do nothing. Otherwise, the stconn will be destroy with the
+ * overlying stream. So, it means we must commit the detach.
+*/
+void sc_conn_commit_endp_upgrade(struct stconn *sc)
+{
+ if (!sc_ep_test(sc, SE_FL_DETACHED))
+ return;
+ sc_detach_endp(&sc);
+ /* Because it was already set as detached, the sedesc must be preserved */
+ BUG_ON(!sc);
+ BUG_ON(!sc->sedesc);
+}
+
+/* return the frontend or backend mux stream ID.
+ */
+static int
+smp_fetch_sid(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ struct connection *conn;
+ struct stconn *sc;
+ int64_t sid = 0;
+
+ if (!smp->strm)
+ return 0;
+
+ sc = (kw[0] == 'f' ? smp->strm->scf : smp->strm->scb);
+ conn = sc_conn(sc);
+
+ /* No connection */
+ if (!conn)
+ return 0;
+
+ /* No mux install, this may change */
+ if (!conn->mux) {
+ smp->flags |= SMP_F_MAY_CHANGE;
+ return 0;
+ }
+
+ /* No sctl, report sid=0 in this case */
+ if (conn->mux->sctl) {
+ if (conn->mux->sctl(sc, MUX_SCTL_SID, &sid) == -1)
+ return 0;
+ }
+
+ smp->flags = SMP_F_VOL_TXN;
+ smp->data.type = SMP_T_SINT;
+ smp->data.u.sint = sid;
+
+ return 1;
+}
+
+/* Note: must not be declared <const> as its list will be overwritten.
+ * Note: fetches that may return multiple types should be declared using the
+ * appropriate pseudo-type. If not available it must be declared as the lowest
+ * common denominator, the type that can be casted into all other ones.
+ */
+static struct sample_fetch_kw_list sample_fetch_keywords = {ILH, {
+ { "bs.id", smp_fetch_sid, 0, NULL, SMP_T_SINT, SMP_USE_L6REQ },
+ { "fs.id", smp_fetch_sid, 0, NULL, SMP_T_STR, SMP_USE_L6RES },
+ { /* END */ },
+}};
+
+INITCALL1(STG_REGISTER, sample_register_fetches, &sample_fetch_keywords);
diff --git a/src/stick_table.c b/src/stick_table.c
new file mode 100644
index 0000000..6427568
--- /dev/null
+++ b/src/stick_table.c
@@ -0,0 +1,5658 @@
+/*
+ * Stick tables management functions.
+ *
+ * Copyright 2009-2010 EXCELIANCE, Emeric Brun <ebrun@exceliance.fr>
+ * Copyright (C) 2010 Willy Tarreau <w@1wt.eu>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <string.h>
+#include <errno.h>
+
+#include <import/ebmbtree.h>
+#include <import/ebsttree.h>
+#include <import/ebistree.h>
+
+#include <haproxy/api.h>
+#include <haproxy/applet.h>
+#include <haproxy/arg.h>
+#include <haproxy/cfgparse.h>
+#include <haproxy/cli.h>
+#include <haproxy/dict.h>
+#include <haproxy/errors.h>
+#include <haproxy/global.h>
+#include <haproxy/http_rules.h>
+#include <haproxy/list.h>
+#include <haproxy/log.h>
+#include <haproxy/net_helper.h>
+#include <haproxy/peers.h>
+#include <haproxy/pool.h>
+#include <haproxy/proto_tcp.h>
+#include <haproxy/proxy.h>
+#include <haproxy/sample.h>
+#include <haproxy/sc_strm.h>
+#include <haproxy/stats-t.h>
+#include <haproxy/stconn.h>
+#include <haproxy/stick_table.h>
+#include <haproxy/stream.h>
+#include <haproxy/task.h>
+#include <haproxy/tcp_rules.h>
+#include <haproxy/ticks.h>
+#include <haproxy/tools.h>
+#include <haproxy/xxhash.h>
+
+
+/* structure used to return a table key built from a sample */
+static THREAD_LOCAL struct stktable_key static_table_key;
+static int (*smp_fetch_src)(const struct arg *, struct sample *, const char *, void *);
+struct pool_head *pool_head_stk_ctr __read_mostly = NULL;
+struct stktable *stktables_list;
+struct eb_root stktable_by_name = EB_ROOT;
+
+#define round_ptr_size(i) (((i) + (sizeof(void *) - 1)) &~ (sizeof(void *) - 1))
+
+/* This function inserts stktable <t> into the tree of known stick-table.
+ * The stick-table ID is used as the storing key so it must already have
+ * been initialized.
+ */
+void stktable_store_name(struct stktable *t)
+{
+ t->name.key = t->id;
+ ebis_insert(&stktable_by_name, &t->name);
+}
+
+struct stktable *stktable_find_by_name(const char *name)
+{
+ struct ebpt_node *node;
+ struct stktable *t;
+
+ node = ebis_lookup(&stktable_by_name, name);
+ if (node) {
+ t = container_of(node, struct stktable, name);
+ if (strcmp(t->id, name) == 0)
+ return t;
+ }
+
+ return NULL;
+}
+
+/*
+ * Free an allocated sticky session <ts>, and decrease sticky sessions counter
+ * in table <t>. It's safe to call it under or out of a lock.
+ */
+void __stksess_free(struct stktable *t, struct stksess *ts)
+{
+ HA_ATOMIC_DEC(&t->current);
+ pool_free(t->pool, (void *)ts - round_ptr_size(t->data_size));
+}
+
+/*
+ * Free an allocated sticky session <ts>, and decrease sticky sessions counter
+ * in table <t>.
+ * This function locks the table
+ */
+void stksess_free(struct stktable *t, struct stksess *ts)
+{
+ void *data;
+ data = stktable_data_ptr(t, ts, STKTABLE_DT_SERVER_KEY);
+ if (data) {
+ dict_entry_unref(&server_key_dict, stktable_data_cast(data, std_t_dict));
+ stktable_data_cast(data, std_t_dict) = NULL;
+ }
+ HA_RWLOCK_RDLOCK(STK_TABLE_LOCK, &t->lock);
+ __stksess_free(t, ts);
+ HA_RWLOCK_RDUNLOCK(STK_TABLE_LOCK, &t->lock);
+}
+
+/*
+ * Kill an stksess (only if its ref_cnt is zero). This must be called under the
+ * write lock. Returns zero if could not deleted, non-zero otherwise.
+ */
+int __stksess_kill(struct stktable *t, struct stksess *ts)
+{
+ if (HA_ATOMIC_LOAD(&ts->ref_cnt))
+ return 0;
+
+ eb32_delete(&ts->exp);
+ if (ts->upd.node.leaf_p) {
+ HA_RWLOCK_WRLOCK(STK_TABLE_LOCK, &t->updt_lock);
+ eb32_delete(&ts->upd);
+ HA_RWLOCK_WRUNLOCK(STK_TABLE_LOCK, &t->updt_lock);
+ }
+ ebmb_delete(&ts->key);
+ __stksess_free(t, ts);
+ return 1;
+}
+
+/*
+ * Decrease the refcount if decrefcnt is not 0, and try to kill the stksess.
+ * Returns non-zero if deleted, zero otherwise.
+ * This function locks the table
+ */
+int stksess_kill(struct stktable *t, struct stksess *ts, int decrefcnt)
+{
+ int ret;
+
+ if (decrefcnt && HA_ATOMIC_SUB_FETCH(&ts->ref_cnt, 1) != 0)
+ return 0;
+
+ HA_RWLOCK_WRLOCK(STK_TABLE_LOCK, &t->lock);
+ ret = __stksess_kill(t, ts);
+ HA_RWLOCK_WRUNLOCK(STK_TABLE_LOCK, &t->lock);
+
+ return ret;
+}
+
+/*
+ * Initialize or update the key in the sticky session <ts> present in table <t>
+ * from the value present in <key>.
+ */
+void stksess_setkey(struct stktable *t, struct stksess *ts, struct stktable_key *key)
+{
+ if (t->type != SMP_T_STR)
+ memcpy(ts->key.key, key->key, t->key_size);
+ else {
+ memcpy(ts->key.key, key->key, MIN(t->key_size - 1, key->key_len));
+ ts->key.key[MIN(t->key_size - 1, key->key_len)] = 0;
+ }
+}
+
+/* return a shard number for key <key> of len <len> present in table <t>. This
+ * takes into account the presence or absence of a peers section with shards
+ * and the number of shards, the table's hash_seed, and of course the key. The
+ * caller must pass a valid <key> and <len>. The shard number to be used by the
+ * entry is returned (from 1 to nb_shards, otherwise 0 for none).
+ */
+int stktable_get_key_shard(struct stktable *t, const void *key, size_t len)
+{
+ /* no peers section or no shards in the peers section */
+ if (!t->peers.p || !t->peers.p->nb_shards)
+ return 0;
+
+ return XXH64(key, len, t->hash_seed) % t->peers.p->nb_shards + 1;
+}
+
+/*
+ * Set the shard for <key> key of <ts> sticky session attached to <t> stick table.
+ * Use zero for stick-table without peers synchronisation.
+ */
+static void stksess_setkey_shard(struct stktable *t, struct stksess *ts,
+ struct stktable_key *key)
+{
+ size_t keylen;
+
+ if (t->type == SMP_T_STR)
+ keylen = key->key_len;
+ else
+ keylen = t->key_size;
+
+ ts->shard = stktable_get_key_shard(t, key->key, keylen);
+}
+
+/*
+ * Init sticky session <ts> of table <t>. The data parts are cleared and <ts>
+ * is returned.
+ */
+static struct stksess *__stksess_init(struct stktable *t, struct stksess * ts)
+{
+ memset((void *)ts - t->data_size, 0, t->data_size);
+ ts->ref_cnt = 0;
+ ts->shard = 0;
+ ts->key.node.leaf_p = NULL;
+ ts->exp.node.leaf_p = NULL;
+ ts->upd.node.leaf_p = NULL;
+ ts->expire = tick_add(now_ms, MS_TO_TICKS(t->expire));
+ HA_RWLOCK_INIT(&ts->lock);
+ return ts;
+}
+
+/*
+ * Trash oldest <to_batch> sticky sessions from table <t>
+ * Returns number of trashed sticky sessions. It may actually trash less
+ * than expected if finding these requires too long a search time (e.g.
+ * most of them have ts->ref_cnt>0).
+ */
+int __stktable_trash_oldest(struct stktable *t, int to_batch)
+{
+ struct stksess *ts;
+ struct eb32_node *eb;
+ int max_search = to_batch * 2; // no more than 50% misses
+ int batched = 0;
+ int looped = 0;
+
+ eb = eb32_lookup_ge(&t->exps, now_ms - TIMER_LOOK_BACK);
+
+ while (batched < to_batch) {
+
+ if (unlikely(!eb)) {
+ /* we might have reached the end of the tree, typically because
+ * <now_ms> is in the first half and we're first scanning the last
+ * half. Let's loop back to the beginning of the tree now if we
+ * have not yet visited it.
+ */
+ if (looped)
+ break;
+ looped = 1;
+ eb = eb32_first(&t->exps);
+ if (likely(!eb))
+ break;
+ }
+
+ if (--max_search < 0)
+ break;
+
+ /* timer looks expired, detach it from the queue */
+ ts = eb32_entry(eb, struct stksess, exp);
+ eb = eb32_next(eb);
+
+ /* don't delete an entry which is currently referenced */
+ if (HA_ATOMIC_LOAD(&ts->ref_cnt) != 0)
+ continue;
+
+ eb32_delete(&ts->exp);
+
+ if (ts->expire != ts->exp.key) {
+ if (!tick_isset(ts->expire))
+ continue;
+
+ ts->exp.key = ts->expire;
+ eb32_insert(&t->exps, &ts->exp);
+
+ /* the update might have jumped beyond the next element,
+ * possibly causing a wrapping. We need to check whether
+ * the next element should be used instead. If the next
+ * element doesn't exist it means we're on the right
+ * side and have to check the first one then. If it
+ * exists and is closer, we must use it, otherwise we
+ * use the current one.
+ */
+ if (!eb)
+ eb = eb32_first(&t->exps);
+
+ if (!eb || tick_is_lt(ts->exp.key, eb->key))
+ eb = &ts->exp;
+
+ continue;
+ }
+
+ /* session expired, trash it */
+ ebmb_delete(&ts->key);
+ if (ts->upd.node.leaf_p) {
+ HA_RWLOCK_WRLOCK(STK_TABLE_LOCK, &t->updt_lock);
+ eb32_delete(&ts->upd);
+ HA_RWLOCK_WRUNLOCK(STK_TABLE_LOCK, &t->updt_lock);
+ }
+ __stksess_free(t, ts);
+ batched++;
+ }
+
+ return batched;
+}
+
+/*
+ * Trash oldest <to_batch> sticky sessions from table <t>
+ * Returns number of trashed sticky sessions.
+ * This function locks the table
+ */
+int stktable_trash_oldest(struct stktable *t, int to_batch)
+{
+ int ret;
+
+ HA_RWLOCK_WRLOCK(STK_TABLE_LOCK, &t->lock);
+ ret = __stktable_trash_oldest(t, to_batch);
+ HA_RWLOCK_WRUNLOCK(STK_TABLE_LOCK, &t->lock);
+
+ return ret;
+}
+/*
+ * Allocate and initialise a new sticky session.
+ * The new sticky session is returned or NULL in case of lack of memory.
+ * Sticky sessions should only be allocated this way, and must be freed using
+ * stksess_free(). Table <t>'s sticky session counter is increased. If <key>
+ * is not NULL, it is assigned to the new session. It must be called unlocked
+ * as it may rely on a lock to trash older entries.
+ */
+struct stksess *stksess_new(struct stktable *t, struct stktable_key *key)
+{
+ struct stksess *ts;
+ unsigned int current;
+
+ current = HA_ATOMIC_FETCH_ADD(&t->current, 1);
+
+ if (unlikely(current >= t->size)) {
+ /* the table was already full, we may have to purge entries */
+ if (t->nopurge || !stktable_trash_oldest(t, (t->size >> 8) + 1)) {
+ HA_ATOMIC_DEC(&t->current);
+ return NULL;
+ }
+ }
+
+ ts = pool_alloc(t->pool);
+ if (ts) {
+ ts = (void *)ts + round_ptr_size(t->data_size);
+ __stksess_init(t, ts);
+ if (key) {
+ stksess_setkey(t, ts, key);
+ stksess_setkey_shard(t, ts, key);
+ }
+ }
+
+ return ts;
+}
+
+/*
+ * Looks in table <t> for a sticky session matching key <key>.
+ * Returns pointer on requested sticky session or NULL if none was found.
+ */
+struct stksess *__stktable_lookup_key(struct stktable *t, struct stktable_key *key)
+{
+ struct ebmb_node *eb;
+
+ if (t->type == SMP_T_STR)
+ eb = ebst_lookup_len(&t->keys, key->key, key->key_len+1 < t->key_size ? key->key_len : t->key_size-1);
+ else
+ eb = ebmb_lookup(&t->keys, key->key, t->key_size);
+
+ if (unlikely(!eb)) {
+ /* no session found */
+ return NULL;
+ }
+
+ return ebmb_entry(eb, struct stksess, key);
+}
+
+/*
+ * Looks in table <t> for a sticky session matching key <key>.
+ * Returns pointer on requested sticky session or NULL if none was found.
+ * The refcount of the found entry is increased and this function
+ * is protected using the table lock
+ */
+struct stksess *stktable_lookup_key(struct stktable *t, struct stktable_key *key)
+{
+ struct stksess *ts;
+
+ HA_RWLOCK_RDLOCK(STK_TABLE_LOCK, &t->lock);
+ ts = __stktable_lookup_key(t, key);
+ if (ts)
+ HA_ATOMIC_INC(&ts->ref_cnt);
+ HA_RWLOCK_RDUNLOCK(STK_TABLE_LOCK, &t->lock);
+
+ return ts;
+}
+
+/*
+ * Looks in table <t> for a sticky session with same key as <ts>.
+ * Returns pointer on requested sticky session or NULL if none was found.
+ */
+struct stksess *__stktable_lookup(struct stktable *t, struct stksess *ts)
+{
+ struct ebmb_node *eb;
+
+ if (t->type == SMP_T_STR)
+ eb = ebst_lookup(&(t->keys), (char *)ts->key.key);
+ else
+ eb = ebmb_lookup(&(t->keys), ts->key.key, t->key_size);
+
+ if (unlikely(!eb))
+ return NULL;
+
+ return ebmb_entry(eb, struct stksess, key);
+}
+
+/*
+ * Looks in table <t> for a sticky session with same key as <ts>.
+ * Returns pointer on requested sticky session or NULL if none was found.
+ * The refcount of the found entry is increased and this function
+ * is protected using the table lock
+ */
+struct stksess *stktable_lookup(struct stktable *t, struct stksess *ts)
+{
+ struct stksess *lts;
+
+ HA_RWLOCK_RDLOCK(STK_TABLE_LOCK, &t->lock);
+ lts = __stktable_lookup(t, ts);
+ if (lts)
+ HA_ATOMIC_INC(&lts->ref_cnt);
+ HA_RWLOCK_RDUNLOCK(STK_TABLE_LOCK, &t->lock);
+
+ return lts;
+}
+
+/* Update the expiration timer for <ts> but do not touch its expiration node.
+ * The table's expiration timer is updated if set.
+ * The node will be also inserted into the update tree if needed, at a position
+ * depending if the update is a local or coming from a remote node.
+ * If <decrefcnt> is set, the ts entry's ref_cnt will be decremented. The table's
+ * write lock may be taken.
+ */
+void stktable_touch_with_exp(struct stktable *t, struct stksess *ts, int local, int expire, int decrefcnt)
+{
+ struct eb32_node * eb;
+ int use_wrlock = 0;
+ int do_wakeup = 0;
+
+ if (expire != HA_ATOMIC_LOAD(&ts->expire)) {
+ /* we'll need to set the expiration and to wake up the expiration timer .*/
+ HA_ATOMIC_STORE(&ts->expire, expire);
+ stktable_requeue_exp(t, ts);
+ }
+
+ /* If sync is enabled */
+ if (t->sync_task) {
+ try_lock_again:
+ /* We'll need to reliably check that the entry is in the tree.
+ * It's only inserted/deleted using a write lock so a read lock
+ * is sufficient to verify this. We may then need to upgrade it
+ * to perform an update (which is rare under load), and if the
+ * upgrade fails, we'll try again with a write lock directly.
+ */
+ if (use_wrlock)
+ HA_RWLOCK_WRLOCK(STK_TABLE_LOCK, &t->updt_lock);
+ else
+ HA_RWLOCK_RDLOCK(STK_TABLE_LOCK, &t->updt_lock);
+
+ if (local) {
+ /* Check if this entry is not in the tree or not
+ * scheduled for at least one peer.
+ */
+ if (!ts->upd.node.leaf_p
+ || (int)(t->commitupdate - ts->upd.key) >= 0
+ || (int)(ts->upd.key - t->localupdate) >= 0) {
+ /* Time to upgrade the read lock to write lock if needed */
+ if (!use_wrlock) {
+ if (HA_RWLOCK_TRYRDTOSK(STK_TABLE_LOCK, &t->updt_lock) != 0) {
+ /* failed, try again */
+ HA_RWLOCK_RDUNLOCK(STK_TABLE_LOCK, &t->updt_lock);
+ use_wrlock = 1;
+ goto try_lock_again;
+ }
+ HA_RWLOCK_SKTOWR(STK_TABLE_LOCK, &t->updt_lock);
+ use_wrlock = 1;
+ }
+
+ /* here we're write-locked */
+
+ ts->upd.key = ++t->update;
+ t->localupdate = t->update;
+ eb32_delete(&ts->upd);
+ eb = eb32_insert(&t->updates, &ts->upd);
+ if (eb != &ts->upd) {
+ eb32_delete(eb);
+ eb32_insert(&t->updates, &ts->upd);
+ }
+ }
+ do_wakeup = 1;
+ }
+ else {
+ /* If this entry is not in the tree */
+
+ if (!ts->upd.node.leaf_p) {
+ /* Time to upgrade the read lock to write lock if needed */
+ if (!use_wrlock) {
+ if (HA_RWLOCK_TRYRDTOSK(STK_TABLE_LOCK, &t->updt_lock) != 0) {
+ /* failed, try again */
+ HA_RWLOCK_RDUNLOCK(STK_TABLE_LOCK, &t->updt_lock);
+ use_wrlock = 1;
+ goto try_lock_again;
+ }
+ HA_RWLOCK_SKTOWR(STK_TABLE_LOCK, &t->updt_lock);
+ use_wrlock = 1;
+ }
+
+ /* here we're write-locked */
+
+ ts->upd.key= (++t->update)+(2147483648U);
+ eb = eb32_insert(&t->updates, &ts->upd);
+ if (eb != &ts->upd) {
+ eb32_delete(eb);
+ eb32_insert(&t->updates, &ts->upd);
+ }
+ }
+ }
+
+ /* drop the lock now */
+ if (use_wrlock)
+ HA_RWLOCK_WRUNLOCK(STK_TABLE_LOCK, &t->updt_lock);
+ else
+ HA_RWLOCK_RDUNLOCK(STK_TABLE_LOCK, &t->updt_lock);
+ }
+
+ if (decrefcnt)
+ HA_ATOMIC_DEC(&ts->ref_cnt);
+
+ if (do_wakeup)
+ task_wakeup(t->sync_task, TASK_WOKEN_MSG);
+}
+
+/* Update the expiration timer for <ts> but do not touch its expiration node.
+ * The table's expiration timer is updated using the date of expiration coming from
+ * <t> stick-table configuration.
+ * The node will be also inserted into the update tree if needed, at a position
+ * considering the update is coming from a remote node
+ */
+void stktable_touch_remote(struct stktable *t, struct stksess *ts, int decrefcnt)
+{
+ stktable_touch_with_exp(t, ts, 0, ts->expire, decrefcnt);
+}
+
+/* Update the expiration timer for <ts> but do not touch its expiration node.
+ * The table's expiration timer is updated using the date of expiration coming from
+ * <t> stick-table configuration.
+ * The node will be also inserted into the update tree if needed, at a position
+ * considering the update was made locally
+ */
+void stktable_touch_local(struct stktable *t, struct stksess *ts, int decrefcnt)
+{
+ int expire = tick_add(now_ms, MS_TO_TICKS(t->expire));
+
+ stktable_touch_with_exp(t, ts, 1, expire, decrefcnt);
+}
+/* Just decrease the ref_cnt of the current session. Does nothing if <ts> is NULL.
+ * Note that we still need to take the read lock because a number of other places
+ * (including in Lua and peers) update the ref_cnt non-atomically under the write
+ * lock.
+ */
+static void stktable_release(struct stktable *t, struct stksess *ts)
+{
+ if (!ts)
+ return;
+ HA_ATOMIC_DEC(&ts->ref_cnt);
+}
+
+/* Insert new sticky session <ts> in the table. It is assumed that it does not
+ * yet exist (the caller must check this). The table's timeout is updated if it
+ * is set. <ts> is returned if properly inserted, otherwise the one already
+ * present if any.
+ */
+struct stksess *__stktable_store(struct stktable *t, struct stksess *ts)
+{
+ struct ebmb_node *eb;
+
+ eb = ebmb_insert(&t->keys, &ts->key, t->key_size);
+ if (likely(eb == &ts->key)) {
+ ts->exp.key = ts->expire;
+ eb32_insert(&t->exps, &ts->exp);
+ }
+ return ebmb_entry(eb, struct stksess, key); // most commonly this is <ts>
+}
+
+/* requeues the table's expiration task to take the recently added <ts> into
+ * account. This is performed atomically and doesn't require any lock.
+ */
+void stktable_requeue_exp(struct stktable *t, const struct stksess *ts)
+{
+ int old_exp, new_exp;
+ int expire = ts->expire;
+
+ if (!t->expire)
+ return;
+
+ /* set the task's expire to the newest expiration date. */
+ old_exp = HA_ATOMIC_LOAD(&t->exp_task->expire);
+ new_exp = tick_first(expire, old_exp);
+
+ /* let's not go further if we're already up to date */
+ if (new_exp == old_exp)
+ return;
+
+ HA_RWLOCK_WRLOCK(STK_TABLE_LOCK, &t->lock);
+
+ while (new_exp != old_exp &&
+ !HA_ATOMIC_CAS(&t->exp_task->expire, &old_exp, new_exp)) {
+ __ha_cpu_relax();
+ new_exp = tick_first(expire, old_exp);
+ }
+
+ HA_RWLOCK_WRUNLOCK(STK_TABLE_LOCK, &t->lock);
+
+ task_queue(t->exp_task);
+}
+
+/* Returns a valid or initialized stksess for the specified stktable_key in the
+ * specified table, or NULL if the key was NULL, or if no entry was found nor
+ * could be created. The entry's expiration is updated. This function locks the
+ * table, and the refcount of the entry is increased.
+ */
+struct stksess *stktable_get_entry(struct stktable *table, struct stktable_key *key)
+{
+ struct stksess *ts, *ts2;
+
+ if (!key)
+ return NULL;
+
+ ts = stktable_lookup_key(table, key);
+ if (ts)
+ return ts;
+
+ /* No such entry exists, let's try to create a new one. this doesn't
+ * require locking yet.
+ */
+
+ ts = stksess_new(table, key);
+ if (!ts)
+ return NULL;
+
+ /* Now we're certain to have a ts. We need to store it. For this we'll
+ * need an exclusive access. We don't need an atomic upgrade, this is
+ * rare and an unlock+lock sequence will do the job fine. Given that
+ * this will not be atomic, the missing entry might appear in the mean
+ * tome so we have to be careful that the one we try to insert is the
+ * one we find.
+ */
+
+ HA_RWLOCK_WRLOCK(STK_TABLE_LOCK, &table->lock);
+
+ ts2 = __stktable_store(table, ts);
+
+ HA_ATOMIC_INC(&ts2->ref_cnt);
+ HA_RWLOCK_WRUNLOCK(STK_TABLE_LOCK, &table->lock);
+
+ if (unlikely(ts2 != ts)) {
+ /* another entry was added in the mean time, let's
+ * switch to it.
+ */
+ __stksess_free(table, ts);
+ ts = ts2;
+ }
+
+ stktable_requeue_exp(table, ts);
+ return ts;
+}
+
+/* Lookup for an entry with the same key and store the submitted
+ * stksess if not found. This function locks the table either shared or
+ * exclusively, and the refcount of the entry is increased.
+ */
+struct stksess *stktable_set_entry(struct stktable *table, struct stksess *nts)
+{
+ struct stksess *ts;
+
+ HA_RWLOCK_RDLOCK(STK_TABLE_LOCK, &table->lock);
+ ts = __stktable_lookup(table, nts);
+ if (ts) {
+ HA_ATOMIC_INC(&ts->ref_cnt);
+ HA_RWLOCK_RDUNLOCK(STK_TABLE_LOCK, &table->lock);
+ return ts;
+ }
+ ts = nts;
+
+ /* let's increment it before switching to exclusive */
+ HA_ATOMIC_INC(&ts->ref_cnt);
+
+ if (HA_RWLOCK_TRYRDTOSK(STK_TABLE_LOCK, &table->lock) != 0) {
+ /* upgrade to seek lock failed, let's drop and take */
+ HA_RWLOCK_RDUNLOCK(STK_TABLE_LOCK, &table->lock);
+ HA_RWLOCK_WRLOCK(STK_TABLE_LOCK, &table->lock);
+ }
+ else
+ HA_RWLOCK_SKTOWR(STK_TABLE_LOCK, &table->lock);
+
+ /* now we're write-locked */
+
+ __stktable_store(table, ts);
+ HA_RWLOCK_WRUNLOCK(STK_TABLE_LOCK, &table->lock);
+
+ stktable_requeue_exp(table, ts);
+ return ts;
+}
+
+/*
+ * Task processing function to trash expired sticky sessions. A pointer to the
+ * task itself is returned since it never dies.
+ */
+struct task *process_table_expire(struct task *task, void *context, unsigned int state)
+{
+ struct stktable *t = context;
+ struct stksess *ts;
+ struct eb32_node *eb;
+ int updt_locked = 0;
+ int looped = 0;
+ int exp_next;
+
+ HA_RWLOCK_WRLOCK(STK_TABLE_LOCK, &t->lock);
+ eb = eb32_lookup_ge(&t->exps, now_ms - TIMER_LOOK_BACK);
+
+ while (1) {
+ if (unlikely(!eb)) {
+ /* we might have reached the end of the tree, typically because
+ * <now_ms> is in the first half and we're first scanning the last
+ * half. Let's loop back to the beginning of the tree now if we
+ * have not yet visited it.
+ */
+ if (looped)
+ break;
+ looped = 1;
+ eb = eb32_first(&t->exps);
+ if (likely(!eb))
+ break;
+ }
+
+ if (likely(tick_is_lt(now_ms, eb->key))) {
+ /* timer not expired yet, revisit it later */
+ exp_next = eb->key;
+ goto out_unlock;
+ }
+
+ /* timer looks expired, detach it from the queue */
+ ts = eb32_entry(eb, struct stksess, exp);
+ eb = eb32_next(eb);
+
+ /* don't delete an entry which is currently referenced */
+ if (HA_ATOMIC_LOAD(&ts->ref_cnt) != 0)
+ continue;
+
+ eb32_delete(&ts->exp);
+
+ if (!tick_is_expired(ts->expire, now_ms)) {
+ if (!tick_isset(ts->expire))
+ continue;
+
+ ts->exp.key = ts->expire;
+ eb32_insert(&t->exps, &ts->exp);
+
+ /* the update might have jumped beyond the next element,
+ * possibly causing a wrapping. We need to check whether
+ * the next element should be used instead. If the next
+ * element doesn't exist it means we're on the right
+ * side and have to check the first one then. If it
+ * exists and is closer, we must use it, otherwise we
+ * use the current one.
+ */
+ if (!eb)
+ eb = eb32_first(&t->exps);
+
+ if (!eb || tick_is_lt(ts->exp.key, eb->key))
+ eb = &ts->exp;
+ continue;
+ }
+
+ /* session expired, trash it */
+ ebmb_delete(&ts->key);
+ if (ts->upd.node.leaf_p) {
+ if (!updt_locked) {
+ updt_locked = 1;
+ HA_RWLOCK_WRLOCK(STK_TABLE_LOCK, &t->updt_lock);
+ }
+ eb32_delete(&ts->upd);
+ }
+ __stksess_free(t, ts);
+ }
+
+ /* We have found no task to expire in any tree */
+ exp_next = TICK_ETERNITY;
+
+out_unlock:
+ task->expire = exp_next;
+ if (updt_locked)
+ HA_RWLOCK_WRUNLOCK(STK_TABLE_LOCK, &t->updt_lock);
+ HA_RWLOCK_WRUNLOCK(STK_TABLE_LOCK, &t->lock);
+ return task;
+}
+
+/* Perform minimal stick table initialization. In case of error, the
+ * function will return 0 and <err_msg> will contain hints about the
+ * error and it is up to the caller to free it.
+ *
+ * Returns 1 on success
+ */
+int stktable_init(struct stktable *t, char **err_msg)
+{
+ int peers_retval = 0;
+
+ t->hash_seed = XXH64(t->id, t->idlen, 0);
+
+ if (t->size) {
+ t->keys = EB_ROOT_UNIQUE;
+ memset(&t->exps, 0, sizeof(t->exps));
+ t->updates = EB_ROOT_UNIQUE;
+ HA_RWLOCK_INIT(&t->lock);
+
+ t->pool = create_pool("sticktables", sizeof(struct stksess) + round_ptr_size(t->data_size) + t->key_size, MEM_F_SHARED);
+
+ if ( t->expire ) {
+ t->exp_task = task_new_anywhere();
+ if (!t->exp_task)
+ goto mem_error;
+ t->exp_task->process = process_table_expire;
+ t->exp_task->context = (void *)t;
+ }
+ if (t->peers.p && t->peers.p->peers_fe && !(t->peers.p->peers_fe->flags & (PR_FL_DISABLED|PR_FL_STOPPED))) {
+ peers_retval = peers_register_table(t->peers.p, t);
+ }
+
+ if (t->pool == NULL || peers_retval)
+ goto mem_error;
+ }
+ if (t->write_to.name) {
+ struct stktable *table;
+
+ /* postresolve write_to table */
+ table = stktable_find_by_name(t->write_to.name);
+ if (!table) {
+ memprintf(err_msg, "write-to: table '%s' doesn't exist", t->write_to.name);
+ ha_free(&t->write_to.name); /* no longer need this */
+ return 0;
+ }
+ ha_free(&t->write_to.name); /* no longer need this */
+ if (table->write_to.ptr) {
+ memprintf(err_msg, "write-to: table '%s' is already used as a source table", table->id);
+ return 0;
+ }
+ if (table->type != t->type) {
+ memprintf(err_msg, "write-to: cannot mix table types ('%s' has '%s' type and '%s' has '%s' type)",
+ table->id, stktable_types[table->type].kw,
+ t->id, stktable_types[t->type].kw);
+ return 0;
+ }
+ if (table->key_size != t->key_size) {
+ memprintf(err_msg, "write-to: cannot mix key sizes ('%s' has '%ld' key_size and '%s' has '%ld' key_size)",
+ table->id, (long)table->key_size,
+ t->id, (long)t->key_size);
+ return 0;
+ }
+
+ t->write_to.t = table;
+ }
+ return 1;
+
+ mem_error:
+ memprintf(err_msg, "memory allocation error");
+ return 0;
+}
+
+/* Performs stick table cleanup: it's meant to be called after the table
+ * has been initialized ith stktable_init(), else it will lead to undefined
+ * behavior.
+ *
+ * However it does not free the table pointer itself
+ */
+void stktable_deinit(struct stktable *t)
+{
+ if (!t)
+ return;
+ task_destroy(t->exp_task);
+ pool_destroy(t->pool);
+}
+
+/*
+ * Configuration keywords of known table types
+ */
+struct stktable_type stktable_types[SMP_TYPES] = {
+ [SMP_T_SINT] = { "integer", 0, 4 },
+ [SMP_T_IPV4] = { "ip", 0, 4 },
+ [SMP_T_IPV6] = { "ipv6", 0, 16 },
+ [SMP_T_STR] = { "string", STK_F_CUSTOM_KEYSIZE, 32 },
+ [SMP_T_BIN] = { "binary", STK_F_CUSTOM_KEYSIZE, 32 }
+};
+
+/*
+ * Parse table type configuration.
+ * Returns 0 on successful parsing, else 1.
+ * <myidx> is set at next configuration <args> index.
+ */
+int stktable_parse_type(char **args, int *myidx, unsigned long *type, size_t *key_size, const char *file, int linenum)
+{
+ for (*type = 0; *type < SMP_TYPES; (*type)++) {
+ if (!stktable_types[*type].kw)
+ continue;
+ if (strcmp(args[*myidx], stktable_types[*type].kw) != 0)
+ continue;
+
+ *key_size = stktable_types[*type].default_size;
+ (*myidx)++;
+
+ if (stktable_types[*type].flags & STK_F_CUSTOM_KEYSIZE) {
+ if (strcmp("len", args[*myidx]) == 0) {
+ char *stop;
+
+ (*myidx)++;
+ *key_size = strtol(args[*myidx], &stop, 10);
+ if (*stop != '\0' || !*key_size) {
+ ha_alert("parsing [%s:%d] : 'len' expects a positive integer argument.\n", file, linenum);
+ return 1;
+ }
+ if (*type == SMP_T_STR) {
+ /* null terminated string needs +1 for '\0'. */
+ (*key_size)++;
+ }
+ (*myidx)++;
+ }
+ }
+ return 0;
+ }
+ ha_alert("parsing [%s:%d] : %s: unknown type '%s'.\n", file, linenum, args[0], args[*myidx]);
+ return 1;
+}
+
+/* reserve some space for data type <type>, there is 2 optionnals
+ * argument at <sa> and <sa2> to configure this data type and
+ * they can be NULL if unused for a given type.
+ * Returns PE_NONE (0) if OK or an error code among :
+ * - PE_ENUM_OOR if <type> does not exist
+ * - PE_EXIST if <type> is already registered
+ * - PE_ARG_NOT_USE if <sa>/<sa2> was provided but not expected
+ * - PE_ARG_MISSING if <sa>/<sa2> was expected but not provided
+ * - PE_ARG_VALUE_OOR if type is an array and <sa> it out of array size range.
+ */
+int stktable_alloc_data_type(struct stktable *t, int type, const char *sa, const char *sa2)
+
+{
+ if (type >= STKTABLE_DATA_TYPES)
+ return PE_ENUM_OOR;
+
+ if (t->data_ofs[type])
+ /* already allocated */
+ return PE_EXIST;
+
+ t->data_nbelem[type] = 1;
+ if (stktable_data_types[type].is_array) {
+ /* arrays take their element count on first argument */
+ if (!sa)
+ return PE_ARG_MISSING;
+ t->data_nbelem[type] = atoi(sa);
+ if (!t->data_nbelem[type] || (t->data_nbelem[type] > STKTABLE_MAX_DT_ARRAY_SIZE))
+ return PE_ARG_VALUE_OOR;
+ sa = sa2;
+ }
+
+ switch (stktable_data_types[type].arg_type) {
+ case ARG_T_NONE:
+ if (sa)
+ return PE_ARG_NOT_USED;
+ break;
+ case ARG_T_INT:
+ if (!sa)
+ return PE_ARG_MISSING;
+ t->data_arg[type].i = atoi(sa);
+ break;
+ case ARG_T_DELAY:
+ if (!sa)
+ return PE_ARG_MISSING;
+ sa = parse_time_err(sa, &t->data_arg[type].u, TIME_UNIT_MS);
+ if (sa)
+ return PE_ARG_INVC; /* invalid char */
+ break;
+ }
+
+ t->data_size += t->data_nbelem[type] * stktable_type_size(stktable_data_types[type].std_type);
+ t->data_ofs[type] = -t->data_size;
+ return PE_NONE;
+}
+
+/*
+ * Parse a line with <linenum> as number in <file> configuration file to configure
+ * the stick-table with <t> as address and <id> as ID.
+ * <peers> provides the "peers" section pointer only if this function is called
+ * from a "peers" section.
+ * <nid> is the stick-table name which is sent over the network. It must be equal
+ * to <id> if this stick-table is parsed from a proxy section, and prefixed by <peers>
+ * "peers" section name followed by a '/' character if parsed from a "peers" section.
+ * This is the responsibility of the caller to check this.
+ * Return an error status with ERR_* flags set if required, 0 if no error was encountered.
+ */
+int parse_stick_table(const char *file, int linenum, char **args,
+ struct stktable *t, char *id, char *nid, struct peers *peers)
+{
+ int err_code = 0;
+ int idx = 1;
+ unsigned int val;
+
+ if (!id || !*id) {
+ ha_alert("parsing [%s:%d] : %s: ID not provided.\n", file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_ABORT;
+ goto out;
+ }
+
+ /* Store the "peers" section if this function is called from a "peers" section. */
+ if (peers) {
+ t->peers.p = peers;
+ idx++;
+ }
+
+ t->id = id;
+ t->idlen = strlen(id);
+ t->nid = nid;
+ t->type = (unsigned int)-1;
+ t->conf.file = file;
+ t->conf.line = linenum;
+ t->write_to.name = NULL;
+
+ while (*args[idx]) {
+ const char *err;
+
+ if (strcmp(args[idx], "size") == 0) {
+ idx++;
+ if (!*(args[idx])) {
+ ha_alert("parsing [%s:%d] : %s: missing argument after '%s'.\n",
+ file, linenum, args[0], args[idx-1]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ if ((err = parse_size_err(args[idx], &t->size))) {
+ ha_alert("parsing [%s:%d] : %s: unexpected character '%c' in argument of '%s'.\n",
+ file, linenum, args[0], *err, args[idx-1]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ idx++;
+ }
+ /* This argument does not exit in "peers" section. */
+ else if (!peers && strcmp(args[idx], "peers") == 0) {
+ idx++;
+ if (!*(args[idx])) {
+ ha_alert("parsing [%s:%d] : %s: missing argument after '%s'.\n",
+ file, linenum, args[0], args[idx-1]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ ha_free(&t->peers.name);
+ t->peers.name = strdup(args[idx++]);
+ }
+ else if (strcmp(args[idx], "expire") == 0) {
+ idx++;
+ if (!*(args[idx])) {
+ ha_alert("parsing [%s:%d] : %s: missing argument after '%s'.\n",
+ file, linenum, args[0], args[idx-1]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ err = parse_time_err(args[idx], &val, TIME_UNIT_MS);
+ if (err == PARSE_TIME_OVER) {
+ ha_alert("parsing [%s:%d]: %s: timer overflow in argument <%s> to <%s>, maximum value is 2147483647 ms (~24.8 days).\n",
+ file, linenum, args[0], args[idx], args[idx-1]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ else if (err == PARSE_TIME_UNDER) {
+ ha_alert("parsing [%s:%d]: %s: timer underflow in argument <%s> to <%s>, minimum non-null value is 1 ms.\n",
+ file, linenum, args[0], args[idx], args[idx-1]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ else if (err) {
+ ha_alert("parsing [%s:%d] : %s: unexpected character '%c' in argument of '%s'.\n",
+ file, linenum, args[0], *err, args[idx-1]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ t->expire = val;
+ idx++;
+ }
+ else if (strcmp(args[idx], "nopurge") == 0) {
+ t->nopurge = 1;
+ idx++;
+ }
+ else if (strcmp(args[idx], "type") == 0) {
+ idx++;
+ if (stktable_parse_type(args, &idx, &t->type, &t->key_size, file, linenum) != 0) {
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ /* idx already points to next arg */
+ }
+ else if (strcmp(args[idx], "store") == 0) {
+ int type, err;
+ char *cw, *nw, *sa, *sa2;
+
+ idx++;
+ nw = args[idx];
+ while (*nw) {
+ /* the "store" keyword supports a comma-separated list */
+ cw = nw;
+ sa = NULL; /* store arg */
+ sa2 = NULL;
+ while (*nw && *nw != ',') {
+ if (*nw == '(') {
+ *nw = 0;
+ sa = ++nw;
+ while (*nw != ')') {
+ if (!*nw) {
+ ha_alert("parsing [%s:%d] : %s: missing closing parenthesis after store option '%s'.\n",
+ file, linenum, args[0], cw);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ if (*nw == ',') {
+ *nw = '\0';
+ sa2 = nw + 1;
+ }
+ nw++;
+ }
+ *nw = '\0';
+ }
+ nw++;
+ }
+ if (*nw)
+ *nw++ = '\0';
+ type = stktable_get_data_type(cw);
+ if (type < 0) {
+ ha_alert("parsing [%s:%d] : %s: unknown store option '%s'.\n",
+ file, linenum, args[0], cw);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+
+ err = stktable_alloc_data_type(t, type, sa, sa2);
+ switch (err) {
+ case PE_NONE: break;
+ case PE_EXIST:
+ ha_warning("parsing [%s:%d]: %s: store option '%s' already enabled, ignored.\n",
+ file, linenum, args[0], cw);
+ err_code |= ERR_WARN;
+ break;
+
+ case PE_ARG_MISSING:
+ ha_alert("parsing [%s:%d] : %s: missing argument to store option '%s'.\n",
+ file, linenum, args[0], cw);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+
+ case PE_ARG_NOT_USED:
+ ha_alert("parsing [%s:%d] : %s: unexpected argument to store option '%s'.\n",
+ file, linenum, args[0], cw);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ case PE_ARG_VALUE_OOR:
+ ha_alert("parsing [%s:%d] : %s: array size is out of allowed range (1-%d) for store option '%s'.\n",
+ file, linenum, args[0], STKTABLE_MAX_DT_ARRAY_SIZE, cw);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+
+ default:
+ ha_alert("parsing [%s:%d] : %s: error when processing store option '%s'.\n",
+ file, linenum, args[0], cw);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ }
+ idx++;
+ if (t->data_ofs[STKTABLE_DT_GPT] && t->data_ofs[STKTABLE_DT_GPT0]) {
+ ha_alert("parsing [%s:%d] : %s: simultaneous usage of 'gpt' and 'gpt0' in a same table is not permitted as 'gpt' overrides 'gpt0'.\n",
+ file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ else if (t->data_ofs[STKTABLE_DT_GPC] && (t->data_ofs[STKTABLE_DT_GPC0] || t->data_ofs[STKTABLE_DT_GPC1])) {
+ ha_alert("parsing [%s:%d] : %s: simultaneous usage of 'gpc' and 'gpc[0/1]' in a same table is not permitted as 'gpc' overrides 'gpc[0/1]'.\n",
+ file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ else if (t->data_ofs[STKTABLE_DT_GPC_RATE] && (t->data_ofs[STKTABLE_DT_GPC0_RATE] || t->data_ofs[STKTABLE_DT_GPC1_RATE])) {
+ ha_alert("parsing [%s:%d] : %s: simultaneous usage of 'gpc_rate' and 'gpc[0/1]_rate' in a same table is not permitted as 'gpc_rate' overrides 'gpc[0/1]_rate'.\n",
+ file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ }
+ else if (strcmp(args[idx], "srvkey") == 0) {
+ char *keytype;
+ idx++;
+ keytype = args[idx];
+ if (strcmp(keytype, "name") == 0) {
+ t->server_key_type = STKTABLE_SRV_NAME;
+ }
+ else if (strcmp(keytype, "addr") == 0) {
+ t->server_key_type = STKTABLE_SRV_ADDR;
+ }
+ else {
+ ha_alert("parsing [%s:%d] : %s : unknown server key type '%s'.\n",
+ file, linenum, args[0], keytype);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+
+ }
+ idx++;
+ }
+ else if (strcmp(args[idx], "write-to") == 0) {
+ char *write_to;
+
+ idx++;
+ write_to = args[idx];
+ if (!write_to[0]) {
+ ha_alert("parsing [%s:%d] : %s : write-to requires table name.\n",
+ file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+
+ }
+ ha_free(&t->write_to.name);
+ t->write_to.name = strdup(write_to);
+ idx++;
+ }
+ else {
+ ha_alert("parsing [%s:%d] : %s: unknown argument '%s'.\n",
+ file, linenum, args[0], args[idx]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ }
+
+ if (!t->size) {
+ ha_alert("parsing [%s:%d] : %s: missing size.\n",
+ file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+
+ if (t->type == (unsigned int)-1) {
+ ha_alert("parsing [%s:%d] : %s: missing type.\n",
+ file, linenum, args[0]);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+
+ out:
+ return err_code;
+}
+
+/* Prepares a stktable_key from a sample <smp> to search into table <t>.
+ * Note that the sample *is* modified and that the returned key may point
+ * to it, so the sample must not be modified afterwards before the lookup.
+ * Returns NULL if the sample could not be converted (eg: no matching type),
+ * otherwise a pointer to the static stktable_key filled with what is needed
+ * for the lookup.
+ */
+struct stktable_key *smp_to_stkey(struct sample *smp, struct stktable *t)
+{
+ /* Convert sample. */
+ if (!sample_convert(smp, t->type))
+ return NULL;
+
+ /* Fill static_table_key. */
+ switch (t->type) {
+
+ case SMP_T_IPV4:
+ static_table_key.key = &smp->data.u.ipv4;
+ static_table_key.key_len = 4;
+ break;
+
+ case SMP_T_IPV6:
+ static_table_key.key = &smp->data.u.ipv6;
+ static_table_key.key_len = 16;
+ break;
+
+ case SMP_T_SINT:
+ /* The stick table require a 32bit unsigned int, "sint" is a
+ * signed 64 it, so we can convert it inplace.
+ */
+ smp->data.u.sint = (unsigned int)smp->data.u.sint;
+ static_table_key.key = &smp->data.u.sint;
+ static_table_key.key_len = 4;
+ break;
+
+ case SMP_T_STR:
+ if (!smp_make_safe(smp))
+ return NULL;
+ static_table_key.key = smp->data.u.str.area;
+ static_table_key.key_len = smp->data.u.str.data;
+ break;
+
+ case SMP_T_BIN:
+ if (smp->data.u.str.data < t->key_size) {
+ /* This type needs padding with 0. */
+ if (!smp_make_rw(smp))
+ return NULL;
+
+ if (smp->data.u.str.size < t->key_size)
+ if (!smp_dup(smp))
+ return NULL;
+ if (smp->data.u.str.size < t->key_size)
+ return NULL;
+ memset(smp->data.u.str.area + smp->data.u.str.data, 0,
+ t->key_size - smp->data.u.str.data);
+ smp->data.u.str.data = t->key_size;
+ }
+ static_table_key.key = smp->data.u.str.area;
+ static_table_key.key_len = smp->data.u.str.data;
+ break;
+
+ default: /* impossible case. */
+ return NULL;
+ }
+
+ return &static_table_key;
+}
+
+/*
+ * Process a fetch + format conversion as defined by the sample expression <expr>
+ * on request or response considering the <opt> parameter. Returns either NULL if
+ * no key could be extracted, or a pointer to the converted result stored in
+ * static_table_key in format <table_type>. If <smp> is not NULL, it will be reset
+ * and its flags will be initialized so that the caller gets a copy of the input
+ * sample, and knows why it was not accepted (eg: SMP_F_MAY_CHANGE is present
+ * without SMP_OPT_FINAL). The output will be usable like this :
+ *
+ * return MAY_CHANGE FINAL Meaning for the sample
+ * NULL 0 * Not present and will never be (eg: header)
+ * NULL 1 0 Not present or unstable, could change (eg: req_len)
+ * NULL 1 1 Not present, will not change anymore
+ * smp 0 * Present and will not change (eg: header)
+ * smp 1 0 not possible
+ * smp 1 1 Present, last known value (eg: request length)
+ */
+struct stktable_key *stktable_fetch_key(struct stktable *t, struct proxy *px, struct session *sess, struct stream *strm,
+ unsigned int opt, struct sample_expr *expr, struct sample *smp)
+{
+ if (smp)
+ memset(smp, 0, sizeof(*smp));
+
+ smp = sample_process(px, sess, strm, opt, expr, smp);
+ if (!smp)
+ return NULL;
+
+ if ((smp->flags & SMP_F_MAY_CHANGE) && !(opt & SMP_OPT_FINAL))
+ return NULL; /* we can only use stable samples */
+
+ return smp_to_stkey(smp, t);
+}
+
+/*
+ * Returns 1 if sample expression <expr> result can be converted to table key of
+ * type <table_type>, otherwise zero. Used in configuration check.
+ */
+int stktable_compatible_sample(struct sample_expr *expr, unsigned long table_type)
+{
+ int out_type;
+
+ if (table_type >= SMP_TYPES || !stktable_types[table_type].kw)
+ return 0;
+
+ out_type = smp_expr_output_type(expr);
+
+ /* Convert sample. */
+ if (!sample_casts[out_type][table_type])
+ return 0;
+
+ return 1;
+}
+
+/* Extra data types processing : after the last one, some room may remain
+ * before STKTABLE_DATA_TYPES that may be used to register extra data types
+ * at run time.
+ */
+struct stktable_data_type stktable_data_types[STKTABLE_DATA_TYPES] = {
+ [STKTABLE_DT_SERVER_ID] = { .name = "server_id", .std_type = STD_T_SINT, .as_is = 1 },
+ [STKTABLE_DT_GPT0] = { .name = "gpt0", .std_type = STD_T_UINT, .as_is = 1 },
+ [STKTABLE_DT_GPC0] = { .name = "gpc0", .std_type = STD_T_UINT },
+ [STKTABLE_DT_GPC0_RATE] = { .name = "gpc0_rate", .std_type = STD_T_FRQP, .arg_type = ARG_T_DELAY },
+ [STKTABLE_DT_CONN_CNT] = { .name = "conn_cnt", .std_type = STD_T_UINT },
+ [STKTABLE_DT_CONN_RATE] = { .name = "conn_rate", .std_type = STD_T_FRQP, .arg_type = ARG_T_DELAY },
+ [STKTABLE_DT_CONN_CUR] = { .name = "conn_cur", .std_type = STD_T_UINT, .is_local = 1 },
+ [STKTABLE_DT_SESS_CNT] = { .name = "sess_cnt", .std_type = STD_T_UINT },
+ [STKTABLE_DT_SESS_RATE] = { .name = "sess_rate", .std_type = STD_T_FRQP, .arg_type = ARG_T_DELAY },
+ [STKTABLE_DT_HTTP_REQ_CNT] = { .name = "http_req_cnt", .std_type = STD_T_UINT },
+ [STKTABLE_DT_HTTP_REQ_RATE] = { .name = "http_req_rate", .std_type = STD_T_FRQP, .arg_type = ARG_T_DELAY },
+ [STKTABLE_DT_HTTP_ERR_CNT] = { .name = "http_err_cnt", .std_type = STD_T_UINT },
+ [STKTABLE_DT_HTTP_ERR_RATE] = { .name = "http_err_rate", .std_type = STD_T_FRQP, .arg_type = ARG_T_DELAY },
+ [STKTABLE_DT_BYTES_IN_CNT] = { .name = "bytes_in_cnt", .std_type = STD_T_ULL },
+ [STKTABLE_DT_BYTES_IN_RATE] = { .name = "bytes_in_rate", .std_type = STD_T_FRQP, .arg_type = ARG_T_DELAY },
+ [STKTABLE_DT_BYTES_OUT_CNT] = { .name = "bytes_out_cnt", .std_type = STD_T_ULL },
+ [STKTABLE_DT_BYTES_OUT_RATE]= { .name = "bytes_out_rate", .std_type = STD_T_FRQP, .arg_type = ARG_T_DELAY },
+ [STKTABLE_DT_GPC1] = { .name = "gpc1", .std_type = STD_T_UINT },
+ [STKTABLE_DT_GPC1_RATE] = { .name = "gpc1_rate", .std_type = STD_T_FRQP, .arg_type = ARG_T_DELAY },
+ [STKTABLE_DT_SERVER_KEY] = { .name = "server_key", .std_type = STD_T_DICT, .as_is = 1 },
+ [STKTABLE_DT_HTTP_FAIL_CNT] = { .name = "http_fail_cnt", .std_type = STD_T_UINT },
+ [STKTABLE_DT_HTTP_FAIL_RATE]= { .name = "http_fail_rate", .std_type = STD_T_FRQP, .arg_type = ARG_T_DELAY },
+ [STKTABLE_DT_GPT] = { .name = "gpt", .std_type = STD_T_UINT, .is_array = 1, .as_is = 1 },
+ [STKTABLE_DT_GPC] = { .name = "gpc", .std_type = STD_T_UINT, .is_array = 1 },
+ [STKTABLE_DT_GPC_RATE] = { .name = "gpc_rate", .std_type = STD_T_FRQP, .is_array = 1, .arg_type = ARG_T_DELAY },
+};
+
+/* Registers stick-table extra data type with index <idx>, name <name>, type
+ * <std_type> and arg type <arg_type>. If the index is negative, the next free
+ * index is automatically allocated. The allocated index is returned, or -1 if
+ * no free index was found or <name> was already registered. The <name> is used
+ * directly as a pointer, so if it's not stable, the caller must allocate it.
+ */
+int stktable_register_data_store(int idx, const char *name, int std_type, int arg_type)
+{
+ if (idx < 0) {
+ for (idx = 0; idx < STKTABLE_DATA_TYPES; idx++) {
+ if (!stktable_data_types[idx].name)
+ break;
+
+ if (strcmp(stktable_data_types[idx].name, name) == 0)
+ return -1;
+ }
+ }
+
+ if (idx >= STKTABLE_DATA_TYPES)
+ return -1;
+
+ if (stktable_data_types[idx].name != NULL)
+ return -1;
+
+ stktable_data_types[idx].name = name;
+ stktable_data_types[idx].std_type = std_type;
+ stktable_data_types[idx].arg_type = arg_type;
+ return idx;
+}
+
+/*
+ * Returns the data type number for the stktable_data_type whose name is <name>,
+ * or <0 if not found.
+ */
+int stktable_get_data_type(char *name)
+{
+ int type;
+
+ for (type = 0; type < STKTABLE_DATA_TYPES; type++) {
+ if (!stktable_data_types[type].name)
+ continue;
+ if (strcmp(name, stktable_data_types[type].name) == 0)
+ return type;
+ }
+ /* For backwards compatibility */
+ if (strcmp(name, "server_name") == 0)
+ return STKTABLE_DT_SERVER_KEY;
+ return -1;
+}
+
+/* Casts sample <smp> to the type of the table specified in arg(0), and looks
+ * it up into this table. Returns true if found, false otherwise. The input
+ * type is STR so that input samples are converted to string (since all types
+ * can be converted to strings), then the function casts the string again into
+ * the table's type. This is a double conversion, but in the future we might
+ * support automatic input types to perform the cast on the fly.
+ */
+static int sample_conv_in_table(const struct arg *arg_p, struct sample *smp, void *private)
+{
+ struct stktable *t;
+ struct stktable_key *key;
+ struct stksess *ts;
+
+ t = arg_p[0].data.t;
+
+ key = smp_to_stkey(smp, t);
+ if (!key)
+ return 0;
+
+ ts = stktable_lookup_key(t, key);
+
+ smp->data.type = SMP_T_BOOL;
+ smp->data.u.sint = !!ts;
+ smp->flags = SMP_F_VOL_TEST;
+ stktable_release(t, ts);
+ return 1;
+}
+
+/* Casts sample <smp> to the type of the table specified in arg(0), and looks
+ * it up into this table. Returns the data rate received from clients in bytes/s
+ * if the key is present in the table, otherwise zero, so that comparisons can
+ * be easily performed. If the inspected parameter is not stored in the table,
+ * <not found> is returned.
+ */
+static int sample_conv_table_bytes_in_rate(const struct arg *arg_p, struct sample *smp, void *private)
+{
+ struct stktable *t;
+ struct stktable_key *key;
+ struct stksess *ts;
+ void *ptr;
+
+ t = arg_p[0].data.t;
+
+ key = smp_to_stkey(smp, t);
+ if (!key)
+ return 0;
+
+ ts = stktable_lookup_key(t, key);
+
+ smp->flags = SMP_F_VOL_TEST;
+ smp->data.type = SMP_T_SINT;
+ smp->data.u.sint = 0;
+
+ if (!ts) /* key not present */
+ return 1;
+
+ ptr = stktable_data_ptr(t, ts, STKTABLE_DT_BYTES_IN_RATE);
+ if (ptr)
+ smp->data.u.sint = read_freq_ctr_period(&stktable_data_cast(ptr, std_t_frqp),
+ t->data_arg[STKTABLE_DT_BYTES_IN_RATE].u);
+
+ stktable_release(t, ts);
+ return !!ptr;
+}
+
+/* Casts sample <smp> to the type of the table specified in arg(0), and looks
+ * it up into this table. Returns the cumulated number of connections for the key
+ * if the key is present in the table, otherwise zero, so that comparisons can
+ * be easily performed. If the inspected parameter is not stored in the table,
+ * <not found> is returned.
+ */
+static int sample_conv_table_conn_cnt(const struct arg *arg_p, struct sample *smp, void *private)
+{
+ struct stktable *t;
+ struct stktable_key *key;
+ struct stksess *ts;
+ void *ptr;
+
+ t = arg_p[0].data.t;
+
+ key = smp_to_stkey(smp, t);
+ if (!key)
+ return 0;
+
+ ts = stktable_lookup_key(t, key);
+
+ smp->flags = SMP_F_VOL_TEST;
+ smp->data.type = SMP_T_SINT;
+ smp->data.u.sint = 0;
+
+ if (!ts) /* key not present */
+ return 1;
+
+ ptr = stktable_data_ptr(t, ts, STKTABLE_DT_CONN_CNT);
+ if (ptr)
+ smp->data.u.sint = stktable_data_cast(ptr, std_t_uint);
+
+ stktable_release(t, ts);
+ return !!ptr;
+}
+
+/* Casts sample <smp> to the type of the table specified in arg(0), and looks
+ * it up into this table. Returns the number of concurrent connections for the
+ * key if the key is present in the table, otherwise zero, so that comparisons
+ * can be easily performed. If the inspected parameter is not stored in the
+ * table, <not found> is returned.
+ */
+static int sample_conv_table_conn_cur(const struct arg *arg_p, struct sample *smp, void *private)
+{
+ struct stktable *t;
+ struct stktable_key *key;
+ struct stksess *ts;
+ void *ptr;
+
+ t = arg_p[0].data.t;
+
+ key = smp_to_stkey(smp, t);
+ if (!key)
+ return 0;
+
+ ts = stktable_lookup_key(t, key);
+
+ smp->flags = SMP_F_VOL_TEST;
+ smp->data.type = SMP_T_SINT;
+ smp->data.u.sint = 0;
+
+ if (!ts) /* key not present */
+ return 1;
+
+ ptr = stktable_data_ptr(t, ts, STKTABLE_DT_CONN_CUR);
+ if (ptr)
+ smp->data.u.sint = stktable_data_cast(ptr, std_t_uint);
+
+ stktable_release(t, ts);
+ return !!ptr;
+}
+
+/* Casts sample <smp> to the type of the table specified in arg(0), and looks
+ * it up into this table. Returns the rate of incoming connections from the key
+ * if the key is present in the table, otherwise zero, so that comparisons can
+ * be easily performed. If the inspected parameter is not stored in the table,
+ * <not found> is returned.
+ */
+static int sample_conv_table_conn_rate(const struct arg *arg_p, struct sample *smp, void *private)
+{
+ struct stktable *t;
+ struct stktable_key *key;
+ struct stksess *ts;
+ void *ptr;
+
+ t = arg_p[0].data.t;
+
+ key = smp_to_stkey(smp, t);
+ if (!key)
+ return 0;
+
+ ts = stktable_lookup_key(t, key);
+
+ smp->flags = SMP_F_VOL_TEST;
+ smp->data.type = SMP_T_SINT;
+ smp->data.u.sint = 0;
+
+ if (!ts) /* key not present */
+ return 1;
+
+ ptr = stktable_data_ptr(t, ts, STKTABLE_DT_CONN_RATE);
+ if (ptr)
+ smp->data.u.sint = read_freq_ctr_period(&stktable_data_cast(ptr, std_t_frqp),
+ t->data_arg[STKTABLE_DT_CONN_RATE].u);
+
+ stktable_release(t, ts);
+ return !!ptr;
+}
+
+/* Casts sample <smp> to the type of the table specified in arg(0), and looks
+ * it up into this table. Returns the expiration delay for the key if the key is
+ * present in the table, otherwise the default value provided as second argument
+ * if any, if not (no default value), <not found> is returned.
+ */
+static int sample_conv_table_expire(const struct arg *arg_p, struct sample *smp, void *private)
+{
+ struct stktable *t;
+ struct stktable_key *key;
+ struct stksess *ts;
+
+ t = arg_p[0].data.t;
+
+ key = smp_to_stkey(smp, t);
+ if (!key)
+ return 0;
+
+ ts = stktable_lookup_key(t, key);
+
+ smp->flags = SMP_F_VOL_TEST;
+ smp->data.type = SMP_T_SINT;
+ smp->data.u.sint = 0;
+
+ if (!ts) { /* key not present */
+ if (arg_p[1].type == ARGT_STOP)
+ return 0;
+
+ /* default value */
+ smp->data.u.sint = arg_p[1].data.sint;
+ return 1;
+ }
+
+ smp->data.u.sint = tick_remain(now_ms, ts->expire);
+
+ stktable_release(t, ts);
+ return 1;
+}
+
+/* Casts sample <smp> to the type of the table specified in arg(0), and looks
+ * it up into this table. Returns the time the key remains unused if the key is
+ * present in the table, otherwise the default value provided as second argument
+ * if any, if not (no default value), <not found> is returned.
+ */
+static int sample_conv_table_idle(const struct arg *arg_p, struct sample *smp, void *private)
+{
+ struct stktable *t;
+ struct stktable_key *key;
+ struct stksess *ts;
+
+ t = arg_p[0].data.t;
+
+ key = smp_to_stkey(smp, t);
+ if (!key)
+ return 0;
+
+ ts = stktable_lookup_key(t, key);
+
+ smp->flags = SMP_F_VOL_TEST;
+ smp->data.type = SMP_T_SINT;
+ smp->data.u.sint = 0;
+
+ if (!ts) { /* key not present */
+ if (arg_p[1].type == ARGT_STOP)
+ return 0;
+
+ /* default value */
+ smp->data.u.sint = arg_p[1].data.sint;
+ return 1;
+ }
+
+ smp->data.u.sint = tick_remain(tick_remain(now_ms, ts->expire), t->expire);
+
+ stktable_release(t, ts);
+ return 1;
+}
+
+/* Casts sample <smp> to the type of the table specified in arg(0), and looks
+ * it up into this table. Returns the data rate sent to clients in bytes/s
+ * if the key is present in the table, otherwise zero, so that comparisons can
+ * be easily performed. If the inspected parameter is not stored in the table,
+ * <not found> is returned.
+ */
+static int sample_conv_table_bytes_out_rate(const struct arg *arg_p, struct sample *smp, void *private)
+{
+ struct stktable *t;
+ struct stktable_key *key;
+ struct stksess *ts;
+ void *ptr;
+
+ t = arg_p[0].data.t;
+
+ key = smp_to_stkey(smp, t);
+ if (!key)
+ return 0;
+
+ ts = stktable_lookup_key(t, key);
+
+ smp->flags = SMP_F_VOL_TEST;
+ smp->data.type = SMP_T_SINT;
+ smp->data.u.sint = 0;
+
+ if (!ts) /* key not present */
+ return 1;
+
+ ptr = stktable_data_ptr(t, ts, STKTABLE_DT_BYTES_OUT_RATE);
+ if (ptr)
+ smp->data.u.sint = read_freq_ctr_period(&stktable_data_cast(ptr, std_t_frqp),
+ t->data_arg[STKTABLE_DT_BYTES_OUT_RATE].u);
+
+ stktable_release(t, ts);
+ return !!ptr;
+}
+
+/* Casts sample <smp> to the type of the table specified in arg_p(1), and looks
+ * it up into this table. Returns the value of the GPT[arg_p(0)] tag for the key
+ * if the key is present in the table, otherwise false, so that comparisons can
+ * be easily performed. If the inspected parameter is not stored in the table,
+ * <not found> is returned.
+ */
+static int sample_conv_table_gpt(const struct arg *arg_p, struct sample *smp, void *private)
+{
+ struct stktable *t;
+ struct stktable_key *key;
+ struct stksess *ts;
+ void *ptr;
+ unsigned int idx;
+
+ idx = arg_p[0].data.sint;
+
+ t = arg_p[1].data.t;
+
+ key = smp_to_stkey(smp, t);
+ if (!key)
+ return 0;
+
+ ts = stktable_lookup_key(t, key);
+
+ smp->flags = SMP_F_VOL_TEST;
+ smp->data.type = SMP_T_SINT;
+ smp->data.u.sint = 0;
+
+ if (!ts) /* key not present */
+ return 1;
+
+ ptr = stktable_data_ptr_idx(t, ts, STKTABLE_DT_GPT, idx);
+ if (ptr)
+ smp->data.u.sint = stktable_data_cast(ptr, std_t_uint);
+
+ stktable_release(t, ts);
+ return !!ptr;
+}
+
+/* Casts sample <smp> to the type of the table specified in arg(0), and looks
+ * it up into this table. Returns the value of the GPT0 tag for the key
+ * if the key is present in the table, otherwise false, so that comparisons can
+ * be easily performed. If the inspected parameter is not stored in the table,
+ * <not found> is returned.
+ */
+static int sample_conv_table_gpt0(const struct arg *arg_p, struct sample *smp, void *private)
+{
+ struct stktable *t;
+ struct stktable_key *key;
+ struct stksess *ts;
+ void *ptr;
+
+ t = arg_p[0].data.t;
+
+ key = smp_to_stkey(smp, t);
+ if (!key)
+ return 0;
+
+ ts = stktable_lookup_key(t, key);
+
+ smp->flags = SMP_F_VOL_TEST;
+ smp->data.type = SMP_T_SINT;
+ smp->data.u.sint = 0;
+
+ if (!ts) /* key not present */
+ return 1;
+
+ ptr = stktable_data_ptr(t, ts, STKTABLE_DT_GPT0);
+ if (!ptr)
+ ptr = stktable_data_ptr_idx(t, ts, STKTABLE_DT_GPT, 0);
+
+ if (ptr)
+ smp->data.u.sint = stktable_data_cast(ptr, std_t_uint);
+
+ stktable_release(t, ts);
+ return !!ptr;
+}
+
+/* Casts sample <smp> to the type of the table specified in arg_p(1), and looks
+ * it up into this table. Returns the value of the GPC[arg_p(0)] counter for the key
+ * if the key is present in the table, otherwise zero, so that comparisons can
+ * be easily performed. If the inspected parameter is not stored in the table,
+ * <not found> is returned.
+ */
+static int sample_conv_table_gpc(const struct arg *arg_p, struct sample *smp, void *private)
+{
+ struct stktable *t;
+ struct stktable_key *key;
+ struct stksess *ts;
+ void *ptr;
+ unsigned int idx;
+
+ idx = arg_p[0].data.sint;
+
+ t = arg_p[1].data.t;
+
+ key = smp_to_stkey(smp, t);
+ if (!key)
+ return 0;
+
+ ts = stktable_lookup_key(t, key);
+
+ smp->flags = SMP_F_VOL_TEST;
+ smp->data.type = SMP_T_SINT;
+ smp->data.u.sint = 0;
+
+ if (!ts) /* key not present */
+ return 1;
+
+ ptr = stktable_data_ptr_idx(t, ts, STKTABLE_DT_GPC, idx);
+ if (ptr)
+ smp->data.u.sint = stktable_data_cast(ptr, std_t_uint);
+
+ stktable_release(t, ts);
+ return !!ptr;
+}
+
+/* Casts sample <smp> to the type of the table specified in arg_p(1), and looks
+ * it up into this table. Returns the event rate of the GPC[arg_p(0)] counter
+ * for the key if the key is present in the table, otherwise zero, so that
+ * comparisons can be easily performed. If the inspected parameter is not
+ * stored in the table, <not found> is returned.
+ */
+static int sample_conv_table_gpc_rate(const struct arg *arg_p, struct sample *smp, void *private)
+{
+ struct stktable *t;
+ struct stktable_key *key;
+ struct stksess *ts;
+ void *ptr;
+ unsigned int idx;
+
+ idx = arg_p[0].data.sint;
+
+ t = arg_p[1].data.t;
+
+ key = smp_to_stkey(smp, t);
+ if (!key)
+ return 0;
+
+ ts = stktable_lookup_key(t, key);
+
+ smp->flags = SMP_F_VOL_TEST;
+ smp->data.type = SMP_T_SINT;
+ smp->data.u.sint = 0;
+
+ if (!ts) /* key not present */
+ return 1;
+
+ ptr = stktable_data_ptr_idx(t, ts, STKTABLE_DT_GPC_RATE, idx);
+ if (ptr)
+ smp->data.u.sint = read_freq_ctr_period(&stktable_data_cast(ptr, std_t_frqp),
+ t->data_arg[STKTABLE_DT_GPC_RATE].u);
+
+ stktable_release(t, ts);
+ return !!ptr;
+}
+
+/* Casts sample <smp> to the type of the table specified in arg(0), and looks
+ * it up into this table. Returns the value of the GPC0 counter for the key
+ * if the key is present in the table, otherwise zero, so that comparisons can
+ * be easily performed. If the inspected parameter is not stored in the table,
+ * <not found> is returned.
+ */
+static int sample_conv_table_gpc0(const struct arg *arg_p, struct sample *smp, void *private)
+{
+ struct stktable *t;
+ struct stktable_key *key;
+ struct stksess *ts;
+ void *ptr;
+
+ t = arg_p[0].data.t;
+
+ key = smp_to_stkey(smp, t);
+ if (!key)
+ return 0;
+
+ ts = stktable_lookup_key(t, key);
+
+ smp->flags = SMP_F_VOL_TEST;
+ smp->data.type = SMP_T_SINT;
+ smp->data.u.sint = 0;
+
+ if (!ts) /* key not present */
+ return 1;
+
+ ptr = stktable_data_ptr(t, ts, STKTABLE_DT_GPC0);
+ if (!ptr) {
+ /* fallback on the gpc array */
+ ptr = stktable_data_ptr_idx(t, ts, STKTABLE_DT_GPC, 0);
+ }
+
+ if (ptr)
+ smp->data.u.sint = stktable_data_cast(ptr, std_t_uint);
+
+ stktable_release(t, ts);
+ return !!ptr;
+}
+
+/* Casts sample <smp> to the type of the table specified in arg(0), and looks
+ * it up into this table. Returns the event rate of the GPC0 counter for the key
+ * if the key is present in the table, otherwise zero, so that comparisons can
+ * be easily performed. If the inspected parameter is not stored in the table,
+ * <not found> is returned.
+ */
+static int sample_conv_table_gpc0_rate(const struct arg *arg_p, struct sample *smp, void *private)
+{
+ struct stktable *t;
+ struct stktable_key *key;
+ struct stksess *ts;
+ void *ptr;
+
+ t = arg_p[0].data.t;
+
+ key = smp_to_stkey(smp, t);
+ if (!key)
+ return 0;
+
+ ts = stktable_lookup_key(t, key);
+
+ smp->flags = SMP_F_VOL_TEST;
+ smp->data.type = SMP_T_SINT;
+ smp->data.u.sint = 0;
+
+ if (!ts) /* key not present */
+ return 1;
+
+ ptr = stktable_data_ptr(t, ts, STKTABLE_DT_GPC0_RATE);
+ if (ptr)
+ smp->data.u.sint = read_freq_ctr_period(&stktable_data_cast(ptr, std_t_frqp),
+ t->data_arg[STKTABLE_DT_GPC0_RATE].u);
+ else {
+ /* fallback on the gpc array */
+ ptr = stktable_data_ptr_idx(t, ts, STKTABLE_DT_GPC_RATE, 0);
+ if (ptr)
+ smp->data.u.sint = read_freq_ctr_period(&stktable_data_cast(ptr, std_t_frqp),
+ t->data_arg[STKTABLE_DT_GPC_RATE].u);
+ }
+
+ stktable_release(t, ts);
+ return !!ptr;
+}
+
+/* Casts sample <smp> to the type of the table specified in arg(0), and looks
+ * it up into this table. Returns the value of the GPC1 counter for the key
+ * if the key is present in the table, otherwise zero, so that comparisons can
+ * be easily performed. If the inspected parameter is not stored in the table,
+ * <not found> is returned.
+ */
+static int sample_conv_table_gpc1(const struct arg *arg_p, struct sample *smp, void *private)
+{
+ struct stktable *t;
+ struct stktable_key *key;
+ struct stksess *ts;
+ void *ptr;
+
+ t = arg_p[0].data.t;
+
+ key = smp_to_stkey(smp, t);
+ if (!key)
+ return 0;
+
+ ts = stktable_lookup_key(t, key);
+
+ smp->flags = SMP_F_VOL_TEST;
+ smp->data.type = SMP_T_SINT;
+ smp->data.u.sint = 0;
+
+ if (!ts) /* key not present */
+ return 1;
+
+ ptr = stktable_data_ptr(t, ts, STKTABLE_DT_GPC1);
+ if (!ptr) {
+ /* fallback on the gpc array */
+ ptr = stktable_data_ptr_idx(t, ts, STKTABLE_DT_GPC, 1);
+ }
+
+ if (ptr)
+ smp->data.u.sint = stktable_data_cast(ptr, std_t_uint);
+
+ stktable_release(t, ts);
+ return !!ptr;
+}
+
+/* Casts sample <smp> to the type of the table specified in arg(0), and looks
+ * it up into this table. Returns the event rate of the GPC1 counter for the key
+ * if the key is present in the table, otherwise zero, so that comparisons can
+ * be easily performed. If the inspected parameter is not stored in the table,
+ * <not found> is returned.
+ */
+static int sample_conv_table_gpc1_rate(const struct arg *arg_p, struct sample *smp, void *private)
+{
+ struct stktable *t;
+ struct stktable_key *key;
+ struct stksess *ts;
+ void *ptr;
+
+ t = arg_p[0].data.t;
+
+ key = smp_to_stkey(smp, t);
+ if (!key)
+ return 0;
+
+ ts = stktable_lookup_key(t, key);
+
+ smp->flags = SMP_F_VOL_TEST;
+ smp->data.type = SMP_T_SINT;
+ smp->data.u.sint = 0;
+
+ if (!ts) /* key not present */
+ return 1;
+
+ ptr = stktable_data_ptr(t, ts, STKTABLE_DT_GPC1_RATE);
+ if (ptr)
+ smp->data.u.sint = read_freq_ctr_period(&stktable_data_cast(ptr, std_t_frqp),
+ t->data_arg[STKTABLE_DT_GPC1_RATE].u);
+ else {
+ /* fallback on the gpc array */
+ ptr = stktable_data_ptr_idx(t, ts, STKTABLE_DT_GPC_RATE, 1);
+ if (ptr)
+ smp->data.u.sint = read_freq_ctr_period(&stktable_data_cast(ptr, std_t_frqp),
+ t->data_arg[STKTABLE_DT_GPC_RATE].u);
+ }
+
+ stktable_release(t, ts);
+ return !!ptr;
+}
+
+/* Casts sample <smp> to the type of the table specified in arg(0), and looks
+ * it up into this table. Returns the cumulated number of HTTP request errors
+ * for the key if the key is present in the table, otherwise zero, so that
+ * comparisons can be easily performed. If the inspected parameter is not stored
+ * in the table, <not found> is returned.
+ */
+static int sample_conv_table_http_err_cnt(const struct arg *arg_p, struct sample *smp, void *private)
+{
+ struct stktable *t;
+ struct stktable_key *key;
+ struct stksess *ts;
+ void *ptr;
+
+ t = arg_p[0].data.t;
+
+ key = smp_to_stkey(smp, t);
+ if (!key)
+ return 0;
+
+ ts = stktable_lookup_key(t, key);
+
+ smp->flags = SMP_F_VOL_TEST;
+ smp->data.type = SMP_T_SINT;
+ smp->data.u.sint = 0;
+
+ if (!ts) /* key not present */
+ return 1;
+
+ ptr = stktable_data_ptr(t, ts, STKTABLE_DT_HTTP_ERR_CNT);
+ if (ptr)
+ smp->data.u.sint = stktable_data_cast(ptr, std_t_uint);
+
+ stktable_release(t, ts);
+ return !!ptr;
+}
+
+/* Casts sample <smp> to the type of the table specified in arg(0), and looks
+ * it up into this table. Returns the HTTP request error rate the key
+ * if the key is present in the table, otherwise zero, so that comparisons can
+ * be easily performed. If the inspected parameter is not stored in the table,
+ * <not found> is returned.
+ */
+static int sample_conv_table_http_err_rate(const struct arg *arg_p, struct sample *smp, void *private)
+{
+ struct stktable *t;
+ struct stktable_key *key;
+ struct stksess *ts;
+ void *ptr;
+
+ t = arg_p[0].data.t;
+
+ key = smp_to_stkey(smp, t);
+ if (!key)
+ return 0;
+
+ ts = stktable_lookup_key(t, key);
+
+ smp->flags = SMP_F_VOL_TEST;
+ smp->data.type = SMP_T_SINT;
+ smp->data.u.sint = 0;
+
+ if (!ts) /* key not present */
+ return 1;
+
+ ptr = stktable_data_ptr(t, ts, STKTABLE_DT_HTTP_ERR_RATE);
+ if (ptr)
+ smp->data.u.sint = read_freq_ctr_period(&stktable_data_cast(ptr, std_t_frqp),
+ t->data_arg[STKTABLE_DT_HTTP_ERR_RATE].u);
+
+ stktable_release(t, ts);
+ return !!ptr;
+}
+
+/* Casts sample <smp> to the type of the table specified in arg(0), and looks
+ * it up into this table. Returns the cumulated number of HTTP response failures
+ * for the key if the key is present in the table, otherwise zero, so that
+ * comparisons can be easily performed. If the inspected parameter is not stored
+ * in the table, <not found> is returned.
+ */
+static int sample_conv_table_http_fail_cnt(const struct arg *arg_p, struct sample *smp, void *private)
+{
+ struct stktable *t;
+ struct stktable_key *key;
+ struct stksess *ts;
+ void *ptr;
+
+ t = arg_p[0].data.t;
+
+ key = smp_to_stkey(smp, t);
+ if (!key)
+ return 0;
+
+ ts = stktable_lookup_key(t, key);
+
+ smp->flags = SMP_F_VOL_TEST;
+ smp->data.type = SMP_T_SINT;
+ smp->data.u.sint = 0;
+
+ if (!ts) /* key not present */
+ return 1;
+
+ ptr = stktable_data_ptr(t, ts, STKTABLE_DT_HTTP_FAIL_CNT);
+ if (ptr)
+ smp->data.u.sint = stktable_data_cast(ptr, std_t_uint);
+
+ stktable_release(t, ts);
+ return !!ptr;
+}
+
+/* Casts sample <smp> to the type of the table specified in arg(0), and looks
+ * it up into this table. Returns the HTTP response failure rate for the key
+ * if the key is present in the table, otherwise zero, so that comparisons can
+ * be easily performed. If the inspected parameter is not stored in the table,
+ * <not found> is returned.
+ */
+static int sample_conv_table_http_fail_rate(const struct arg *arg_p, struct sample *smp, void *private)
+{
+ struct stktable *t;
+ struct stktable_key *key;
+ struct stksess *ts;
+ void *ptr;
+
+ t = arg_p[0].data.t;
+
+ key = smp_to_stkey(smp, t);
+ if (!key)
+ return 0;
+
+ ts = stktable_lookup_key(t, key);
+
+ smp->flags = SMP_F_VOL_TEST;
+ smp->data.type = SMP_T_SINT;
+ smp->data.u.sint = 0;
+
+ if (!ts) /* key not present */
+ return 1;
+
+ ptr = stktable_data_ptr(t, ts, STKTABLE_DT_HTTP_FAIL_RATE);
+ if (ptr)
+ smp->data.u.sint = read_freq_ctr_period(&stktable_data_cast(ptr, std_t_frqp),
+ t->data_arg[STKTABLE_DT_HTTP_FAIL_RATE].u);
+
+ stktable_release(t, ts);
+ return !!ptr;
+}
+
+/* Casts sample <smp> to the type of the table specified in arg(0), and looks
+ * it up into this table. Returns the cumulated number of HTTP request for the
+ * key if the key is present in the table, otherwise zero, so that comparisons
+ * can be easily performed. If the inspected parameter is not stored in the
+ * table, <not found> is returned.
+ */
+static int sample_conv_table_http_req_cnt(const struct arg *arg_p, struct sample *smp, void *private)
+{
+ struct stktable *t;
+ struct stktable_key *key;
+ struct stksess *ts;
+ void *ptr;
+
+ t = arg_p[0].data.t;
+
+ key = smp_to_stkey(smp, t);
+ if (!key)
+ return 0;
+
+ ts = stktable_lookup_key(t, key);
+
+ smp->flags = SMP_F_VOL_TEST;
+ smp->data.type = SMP_T_SINT;
+ smp->data.u.sint = 0;
+
+ if (!ts) /* key not present */
+ return 1;
+
+ ptr = stktable_data_ptr(t, ts, STKTABLE_DT_HTTP_REQ_CNT);
+ if (ptr)
+ smp->data.u.sint = stktable_data_cast(ptr, std_t_uint);
+
+ stktable_release(t, ts);
+ return !!ptr;
+}
+
+/* Casts sample <smp> to the type of the table specified in arg(0), and looks
+ * it up into this table. Returns the HTTP request rate the key if the key is
+ * present in the table, otherwise zero, so that comparisons can be easily
+ * performed. If the inspected parameter is not stored in the table, <not found>
+ * is returned.
+ */
+static int sample_conv_table_http_req_rate(const struct arg *arg_p, struct sample *smp, void *private)
+{
+ struct stktable *t;
+ struct stktable_key *key;
+ struct stksess *ts;
+ void *ptr;
+
+ t = arg_p[0].data.t;
+
+ key = smp_to_stkey(smp, t);
+ if (!key)
+ return 0;
+
+ ts = stktable_lookup_key(t, key);
+
+ smp->flags = SMP_F_VOL_TEST;
+ smp->data.type = SMP_T_SINT;
+ smp->data.u.sint = 0;
+
+ if (!ts) /* key not present */
+ return 1;
+
+ ptr = stktable_data_ptr(t, ts, STKTABLE_DT_HTTP_REQ_RATE);
+ if (ptr)
+ smp->data.u.sint = read_freq_ctr_period(&stktable_data_cast(ptr, std_t_frqp),
+ t->data_arg[STKTABLE_DT_HTTP_REQ_RATE].u);
+
+ stktable_release(t, ts);
+ return !!ptr;
+}
+
+/* Casts sample <smp> to the type of the table specified in arg(0), and looks
+ * it up into this table. Returns the volume of datareceived from clients in kbytes
+ * if the key is present in the table, otherwise zero, so that comparisons can
+ * be easily performed. If the inspected parameter is not stored in the table,
+ * <not found> is returned.
+ */
+static int sample_conv_table_kbytes_in(const struct arg *arg_p, struct sample *smp, void *private)
+{
+ struct stktable *t;
+ struct stktable_key *key;
+ struct stksess *ts;
+ void *ptr;
+
+ t = arg_p[0].data.t;
+
+ key = smp_to_stkey(smp, t);
+ if (!key)
+ return 0;
+
+ ts = stktable_lookup_key(t, key);
+
+ smp->flags = SMP_F_VOL_TEST;
+ smp->data.type = SMP_T_SINT;
+ smp->data.u.sint = 0;
+
+ if (!ts) /* key not present */
+ return 1;
+
+ ptr = stktable_data_ptr(t, ts, STKTABLE_DT_BYTES_IN_CNT);
+ if (ptr)
+ smp->data.u.sint = stktable_data_cast(ptr, std_t_ull) >> 10;
+
+ stktable_release(t, ts);
+ return !!ptr;
+}
+
+/* Casts sample <smp> to the type of the table specified in arg(0), and looks
+ * it up into this table. Returns the volume of data sent to clients in kbytes
+ * if the key is present in the table, otherwise zero, so that comparisons can
+ * be easily performed. If the inspected parameter is not stored in the table,
+ * <not found> is returned.
+ */
+static int sample_conv_table_kbytes_out(const struct arg *arg_p, struct sample *smp, void *private)
+{
+ struct stktable *t;
+ struct stktable_key *key;
+ struct stksess *ts;
+ void *ptr;
+
+ t = arg_p[0].data.t;
+
+ key = smp_to_stkey(smp, t);
+ if (!key)
+ return 0;
+
+ ts = stktable_lookup_key(t, key);
+
+ smp->flags = SMP_F_VOL_TEST;
+ smp->data.type = SMP_T_SINT;
+ smp->data.u.sint = 0;
+
+ if (!ts) /* key not present */
+ return 1;
+
+ ptr = stktable_data_ptr(t, ts, STKTABLE_DT_BYTES_OUT_CNT);
+ if (ptr)
+ smp->data.u.sint = stktable_data_cast(ptr, std_t_ull) >> 10;
+
+ stktable_release(t, ts);
+ return !!ptr;
+}
+
+/* Casts sample <smp> to the type of the table specified in arg(0), and looks
+ * it up into this table. Returns the server ID associated with the key if the
+ * key is present in the table, otherwise zero, so that comparisons can be
+ * easily performed. If the inspected parameter is not stored in the table,
+ * <not found> is returned.
+ */
+static int sample_conv_table_server_id(const struct arg *arg_p, struct sample *smp, void *private)
+{
+ struct stktable *t;
+ struct stktable_key *key;
+ struct stksess *ts;
+ void *ptr;
+
+ t = arg_p[0].data.t;
+
+ key = smp_to_stkey(smp, t);
+ if (!key)
+ return 0;
+
+ ts = stktable_lookup_key(t, key);
+
+ smp->flags = SMP_F_VOL_TEST;
+ smp->data.type = SMP_T_SINT;
+ smp->data.u.sint = 0;
+
+ if (!ts) /* key not present */
+ return 1;
+
+ ptr = stktable_data_ptr(t, ts, STKTABLE_DT_SERVER_ID);
+ if (ptr)
+ smp->data.u.sint = stktable_data_cast(ptr, std_t_sint);
+
+ stktable_release(t, ts);
+ return !!ptr;
+}
+
+/* Casts sample <smp> to the type of the table specified in arg(0), and looks
+ * it up into this table. Returns the cumulated number of sessions for the
+ * key if the key is present in the table, otherwise zero, so that comparisons
+ * can be easily performed. If the inspected parameter is not stored in the
+ * table, <not found> is returned.
+ */
+static int sample_conv_table_sess_cnt(const struct arg *arg_p, struct sample *smp, void *private)
+{
+ struct stktable *t;
+ struct stktable_key *key;
+ struct stksess *ts;
+ void *ptr;
+
+ t = arg_p[0].data.t;
+
+ key = smp_to_stkey(smp, t);
+ if (!key)
+ return 0;
+
+ ts = stktable_lookup_key(t, key);
+
+ smp->flags = SMP_F_VOL_TEST;
+ smp->data.type = SMP_T_SINT;
+ smp->data.u.sint = 0;
+
+ if (!ts) /* key not present */
+ return 1;
+
+ ptr = stktable_data_ptr(t, ts, STKTABLE_DT_SESS_CNT);
+ if (ptr)
+ smp->data.u.sint = stktable_data_cast(ptr, std_t_uint);
+
+ stktable_release(t, ts);
+ return !!ptr;
+}
+
+/* Casts sample <smp> to the type of the table specified in arg(0), and looks
+ * it up into this table. Returns the session rate the key if the key is
+ * present in the table, otherwise zero, so that comparisons can be easily
+ * performed. If the inspected parameter is not stored in the table, <not found>
+ * is returned.
+ */
+static int sample_conv_table_sess_rate(const struct arg *arg_p, struct sample *smp, void *private)
+{
+ struct stktable *t;
+ struct stktable_key *key;
+ struct stksess *ts;
+ void *ptr;
+
+ t = arg_p[0].data.t;
+
+ key = smp_to_stkey(smp, t);
+ if (!key)
+ return 0;
+
+ ts = stktable_lookup_key(t, key);
+
+ smp->flags = SMP_F_VOL_TEST;
+ smp->data.type = SMP_T_SINT;
+ smp->data.u.sint = 0;
+
+ if (!ts) /* key not present */
+ return 1;
+
+ ptr = stktable_data_ptr(t, ts, STKTABLE_DT_SESS_RATE);
+ if (ptr)
+ smp->data.u.sint = read_freq_ctr_period(&stktable_data_cast(ptr, std_t_frqp),
+ t->data_arg[STKTABLE_DT_SESS_RATE].u);
+
+ stktable_release(t, ts);
+ return !!ptr;
+}
+
+/* Casts sample <smp> to the type of the table specified in arg(0), and looks
+ * it up into this table. Returns the amount of concurrent connections tracking
+ * the same key if the key is present in the table, otherwise zero, so that
+ * comparisons can be easily performed. If the inspected parameter is not
+ * stored in the table, <not found> is returned.
+ */
+static int sample_conv_table_trackers(const struct arg *arg_p, struct sample *smp, void *private)
+{
+ struct stktable *t;
+ struct stktable_key *key;
+ struct stksess *ts;
+
+ t = arg_p[0].data.t;
+
+ key = smp_to_stkey(smp, t);
+ if (!key)
+ return 0;
+
+ ts = stktable_lookup_key(t, key);
+
+ smp->flags = SMP_F_VOL_TEST;
+ smp->data.type = SMP_T_SINT;
+ smp->data.u.sint = 0;
+
+ if (!ts)
+ return 1;
+
+ smp->data.u.sint = HA_ATOMIC_LOAD(&ts->ref_cnt);
+
+ stktable_release(t, ts);
+ return 1;
+}
+
+/* This function increments the gpc counter at index 'rule->arg.gpc.idx' of the
+ * array on the tracksc counter of index 'rule->arg.gpc.sc' stored into the
+ * <stream> or directly in the session <sess> if <stream> is set to NULL
+ *
+ * This function always returns ACT_RET_CONT and parameter flags is unused.
+ */
+static enum act_return action_inc_gpc(struct act_rule *rule, struct proxy *px,
+ struct session *sess, struct stream *s, int flags)
+{
+ struct stksess *ts;
+ struct stkctr *stkctr;
+
+ /* Extract the stksess, return OK if no stksess available. */
+ if (s)
+ stkctr = &s->stkctr[rule->arg.gpc.sc];
+ else
+ stkctr = &sess->stkctr[rule->arg.gpc.sc];
+
+ ts = stkctr_entry(stkctr);
+ if (ts) {
+ void *ptr1, *ptr2;
+
+ /* First, update gpc_rate if it's tracked. Second, update its gpc if tracked. */
+ ptr1 = stktable_data_ptr_idx(stkctr->table, ts, STKTABLE_DT_GPC_RATE, rule->arg.gpc.idx);
+ ptr2 = stktable_data_ptr_idx(stkctr->table, ts, STKTABLE_DT_GPC, rule->arg.gpc.idx);
+
+ if (ptr1 || ptr2) {
+ HA_RWLOCK_WRLOCK(STK_SESS_LOCK, &ts->lock);
+
+ if (ptr1)
+ update_freq_ctr_period(&stktable_data_cast(ptr1, std_t_frqp),
+ stkctr->table->data_arg[STKTABLE_DT_GPC_RATE].u, 1);
+
+ if (ptr2)
+ stktable_data_cast(ptr2, std_t_uint)++;
+
+ HA_RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
+
+ /* If data was modified, we need to touch to re-schedule sync */
+ stktable_touch_local(stkctr->table, ts, 0);
+ }
+ }
+ return ACT_RET_CONT;
+}
+
+/* Same as action_inc_gpc() but for gpc0 only */
+static enum act_return action_inc_gpc0(struct act_rule *rule, struct proxy *px,
+ struct session *sess, struct stream *s, int flags)
+{
+ struct stksess *ts;
+ struct stkctr *stkctr;
+ unsigned int period = 0;
+
+ /* Extract the stksess, return OK if no stksess available. */
+ if (s)
+ stkctr = &s->stkctr[rule->arg.gpc.sc];
+ else
+ stkctr = &sess->stkctr[rule->arg.gpc.sc];
+
+ ts = stkctr_entry(stkctr);
+ if (ts) {
+ void *ptr1, *ptr2;
+
+ /* First, update gpc0_rate if it's tracked. Second, update its gpc0 if tracked. */
+ ptr1 = stktable_data_ptr(stkctr->table, ts, STKTABLE_DT_GPC0_RATE);
+ if (ptr1) {
+ period = stkctr->table->data_arg[STKTABLE_DT_GPC0_RATE].u;
+ }
+ else {
+ /* fallback on the gpc array */
+ ptr1 = stktable_data_ptr_idx(stkctr->table, ts, STKTABLE_DT_GPC_RATE, 0);
+ if (ptr1)
+ period = stkctr->table->data_arg[STKTABLE_DT_GPC_RATE].u;
+ }
+
+ ptr2 = stktable_data_ptr(stkctr->table, ts, STKTABLE_DT_GPC0);
+ if (!ptr2) {
+ /* fallback on the gpc array */
+ ptr2 = stktable_data_ptr_idx(stkctr->table, ts, STKTABLE_DT_GPC, 0);
+ }
+
+ if (ptr1 || ptr2) {
+ HA_RWLOCK_WRLOCK(STK_SESS_LOCK, &ts->lock);
+
+ if (ptr1)
+ update_freq_ctr_period(&stktable_data_cast(ptr1, std_t_frqp),
+ period, 1);
+
+ if (ptr2)
+ stktable_data_cast(ptr2, std_t_uint)++;
+
+ HA_RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
+
+ /* If data was modified, we need to touch to re-schedule sync */
+ stktable_touch_local(stkctr->table, ts, 0);
+ }
+ }
+ return ACT_RET_CONT;
+}
+
+/* Same as action_inc_gpc() but for gpc1 only */
+static enum act_return action_inc_gpc1(struct act_rule *rule, struct proxy *px,
+ struct session *sess, struct stream *s, int flags)
+{
+ struct stksess *ts;
+ struct stkctr *stkctr = NULL;
+ unsigned int period = 0;
+
+ /* Extract the stksess, return OK if no stksess available. */
+ if (s && s->stkctr)
+ stkctr = &s->stkctr[rule->arg.gpc.sc];
+ else if (sess->stkctr)
+ stkctr = &sess->stkctr[rule->arg.gpc.sc];
+ else
+ return ACT_RET_CONT;
+
+ ts = stkctr_entry(stkctr);
+ if (ts) {
+ void *ptr1, *ptr2;
+
+ /* First, update gpc1_rate if it's tracked. Second, update its gpc1 if tracked. */
+ ptr1 = stktable_data_ptr(stkctr->table, ts, STKTABLE_DT_GPC1_RATE);
+ if (ptr1) {
+ period = stkctr->table->data_arg[STKTABLE_DT_GPC1_RATE].u;
+ }
+ else {
+ /* fallback on the gpc array */
+ ptr1 = stktable_data_ptr_idx(stkctr->table, ts, STKTABLE_DT_GPC_RATE, 1);
+ if (ptr1)
+ period = stkctr->table->data_arg[STKTABLE_DT_GPC_RATE].u;
+ }
+
+ ptr2 = stktable_data_ptr(stkctr->table, ts, STKTABLE_DT_GPC1);
+ if (!ptr2) {
+ /* fallback on the gpc array */
+ ptr2 = stktable_data_ptr_idx(stkctr->table, ts, STKTABLE_DT_GPC, 1);
+ }
+
+ if (ptr1 || ptr2) {
+ HA_RWLOCK_WRLOCK(STK_SESS_LOCK, &ts->lock);
+
+ if (ptr1)
+ update_freq_ctr_period(&stktable_data_cast(ptr1, std_t_frqp),
+ period, 1);
+
+ if (ptr2)
+ stktable_data_cast(ptr2, std_t_uint)++;
+
+ HA_RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
+
+ /* If data was modified, we need to touch to re-schedule sync */
+ stktable_touch_local(stkctr->table, ts, 0);
+ }
+ }
+ return ACT_RET_CONT;
+}
+
+/* This function is a common parser for actions incrementing the GPC
+ * (General Purpose Counters). It understands the formats:
+ *
+ * sc-inc-gpc(<gpc IDX>,<track ID>)
+ * sc-inc-gpc0([<track ID>])
+ * sc-inc-gpc1([<track ID>])
+ *
+ * It returns ACT_RET_PRS_ERR if fails and <err> is filled with an error
+ * message. Otherwise it returns ACT_RET_PRS_OK.
+ */
+static enum act_parse_ret parse_inc_gpc(const char **args, int *arg, struct proxy *px,
+ struct act_rule *rule, char **err)
+{
+ const char *cmd_name = args[*arg-1];
+ char *error;
+
+ if (!global.tune.nb_stk_ctr) {
+ memprintf(err, "Cannot use '%s', stick-counters are disabled via tune.stick-counters", args[*arg-1]);
+ return ACT_RET_PRS_ERR;
+ }
+
+ cmd_name += strlen("sc-inc-gpc");
+ if (*cmd_name == '(') {
+ cmd_name++; /* skip the '(' */
+ rule->arg.gpc.idx = strtoul(cmd_name, &error, 10); /* Convert stick table id. */
+ if (*error != ',') {
+ memprintf(err, "Missing gpc ID '%s'. Expects sc-inc-gpc(<GPC ID>,<Track ID>)", args[*arg-1]);
+ return ACT_RET_PRS_ERR;
+ }
+ else {
+ cmd_name = error + 1; /* skip the ',' */
+ rule->arg.gpc.sc = strtol(cmd_name, &error, 10); /* Convert stick table id. */
+ if (*error != ')') {
+ memprintf(err, "invalid stick table track ID '%s'. Expects sc-inc-gpc(<GPC ID>,<Track ID>)", args[*arg-1]);
+ return ACT_RET_PRS_ERR;
+ }
+
+ if (rule->arg.gpc.sc >= global.tune.nb_stk_ctr) {
+ memprintf(err, "invalid stick table track ID '%s'. The max allowed ID is %d (tune.stick-counters)",
+ args[*arg-1], global.tune.nb_stk_ctr-1);
+ return ACT_RET_PRS_ERR;
+ }
+ }
+ rule->action_ptr = action_inc_gpc;
+ }
+ else if (*cmd_name == '0' ||*cmd_name == '1') {
+ char c = *cmd_name;
+
+ cmd_name++;
+ if (*cmd_name == '\0') {
+ /* default stick table id. */
+ rule->arg.gpc.sc = 0;
+ } else {
+ /* parse the stick table id. */
+ if (*cmd_name != '(') {
+ memprintf(err, "invalid stick table track ID. Expects %s(<Track ID>)", args[*arg-1]);
+ return ACT_RET_PRS_ERR;
+ }
+ cmd_name++; /* jump the '(' */
+ rule->arg.gpc.sc = strtol(cmd_name, &error, 10); /* Convert stick table id. */
+ if (*error != ')') {
+ memprintf(err, "invalid stick table track ID. Expects %s(<Track ID>)", args[*arg-1]);
+ return ACT_RET_PRS_ERR;
+ }
+
+ if (rule->arg.gpc.sc >= global.tune.nb_stk_ctr) {
+ memprintf(err, "invalid stick table track ID. The max allowed ID is %d (tune.stick-counters)",
+ global.tune.nb_stk_ctr-1);
+ return ACT_RET_PRS_ERR;
+ }
+ }
+ if (c == '1')
+ rule->action_ptr = action_inc_gpc1;
+ else
+ rule->action_ptr = action_inc_gpc0;
+ }
+ else {
+ /* default stick table id. */
+ memprintf(err, "invalid gpc ID '%s'. Expects sc-inc-gpc(<GPC ID>,<Track ID>)", args[*arg-1]);
+ return ACT_RET_PRS_ERR;
+ }
+ rule->action = ACT_CUSTOM;
+ return ACT_RET_PRS_OK;
+}
+
+/* This function sets the gpt at index 'rule->arg.gpt.idx' of the array on the
+ * tracksc counter of index 'rule->arg.gpt.sc' stored into the <stream> or
+ * directly in the session <sess> if <stream> is set to NULL. This gpt is
+ * set to the value computed by the expression 'rule->arg.gpt.expr' or if
+ * 'rule->arg.gpt.expr' is null directly to the value of 'rule->arg.gpt.value'.
+ *
+ * This function always returns ACT_RET_CONT and parameter flags is unused.
+ */
+static enum act_return action_set_gpt(struct act_rule *rule, struct proxy *px,
+ struct session *sess, struct stream *s, int flags)
+{
+ void *ptr;
+ struct stksess *ts;
+ struct stkctr *stkctr = NULL;
+ unsigned int value = 0;
+ struct sample *smp;
+ int smp_opt_dir;
+
+ /* Extract the stksess, return OK if no stksess available. */
+ if (s && s->stkctr)
+ stkctr = &s->stkctr[rule->arg.gpt.sc];
+ else if (sess->stkctr)
+ stkctr = &sess->stkctr[rule->arg.gpt.sc];
+ else
+ return ACT_RET_CONT;
+
+ ts = stkctr_entry(stkctr);
+ if (!ts)
+ return ACT_RET_CONT;
+
+ /* Store the sample in the required sc, and ignore errors. */
+ ptr = stktable_data_ptr_idx(stkctr->table, ts, STKTABLE_DT_GPT, rule->arg.gpt.idx);
+ if (ptr) {
+
+ if (!rule->arg.gpt.expr)
+ value = (unsigned int)(rule->arg.gpt.value);
+ else {
+ switch (rule->from) {
+ case ACT_F_TCP_REQ_CON: smp_opt_dir = SMP_OPT_DIR_REQ; break;
+ case ACT_F_TCP_REQ_SES: smp_opt_dir = SMP_OPT_DIR_REQ; break;
+ case ACT_F_TCP_REQ_CNT: smp_opt_dir = SMP_OPT_DIR_REQ; break;
+ case ACT_F_TCP_RES_CNT: smp_opt_dir = SMP_OPT_DIR_RES; break;
+ case ACT_F_HTTP_REQ: smp_opt_dir = SMP_OPT_DIR_REQ; break;
+ case ACT_F_HTTP_RES: smp_opt_dir = SMP_OPT_DIR_RES; break;
+ default:
+ send_log(px, LOG_ERR, "stick table: internal error while setting gpt%u.", rule->arg.gpt.idx);
+ if (!(global.mode & MODE_QUIET) || (global.mode & MODE_VERBOSE))
+ ha_alert("stick table: internal error while executing setting gpt%u.\n", rule->arg.gpt.idx);
+ return ACT_RET_CONT;
+ }
+
+ /* Fetch and cast the expression. */
+ smp = sample_fetch_as_type(px, sess, s, smp_opt_dir|SMP_OPT_FINAL, rule->arg.gpt.expr, SMP_T_SINT);
+ if (!smp) {
+ send_log(px, LOG_WARNING, "stick table: invalid expression or data type while setting gpt%u.", rule->arg.gpt.idx);
+ if (!(global.mode & MODE_QUIET) || (global.mode & MODE_VERBOSE))
+ ha_alert("stick table: invalid expression or data type while setting gpt%u.\n", rule->arg.gpt.idx);
+ return ACT_RET_CONT;
+ }
+ value = (unsigned int)(smp->data.u.sint);
+ }
+
+ HA_RWLOCK_WRLOCK(STK_SESS_LOCK, &ts->lock);
+
+ stktable_data_cast(ptr, std_t_uint) = value;
+
+ HA_RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
+
+ stktable_touch_local(stkctr->table, ts, 0);
+ }
+
+ return ACT_RET_CONT;
+}
+
+/* Always returns 1. */
+static enum act_return action_set_gpt0(struct act_rule *rule, struct proxy *px,
+ struct session *sess, struct stream *s, int flags)
+{
+ void *ptr;
+ struct stksess *ts;
+ struct stkctr *stkctr = NULL;
+ unsigned int value = 0;
+ struct sample *smp;
+ int smp_opt_dir;
+
+ /* Extract the stksess, return OK if no stksess available. */
+ if (s && s->stkctr)
+ stkctr = &s->stkctr[rule->arg.gpt.sc];
+ else if (sess->stkctr)
+ stkctr = &sess->stkctr[rule->arg.gpt.sc];
+ else
+ return ACT_RET_CONT;
+
+ ts = stkctr_entry(stkctr);
+ if (!ts)
+ return ACT_RET_CONT;
+
+ /* Store the sample in the required sc, and ignore errors. */
+ ptr = stktable_data_ptr(stkctr->table, ts, STKTABLE_DT_GPT0);
+ if (!ptr)
+ ptr = stktable_data_ptr_idx(stkctr->table, ts, STKTABLE_DT_GPT, 0);
+
+ if (ptr) {
+ if (!rule->arg.gpt.expr)
+ value = (unsigned int)(rule->arg.gpt.value);
+ else {
+ switch (rule->from) {
+ case ACT_F_TCP_REQ_CON: smp_opt_dir = SMP_OPT_DIR_REQ; break;
+ case ACT_F_TCP_REQ_SES: smp_opt_dir = SMP_OPT_DIR_REQ; break;
+ case ACT_F_TCP_REQ_CNT: smp_opt_dir = SMP_OPT_DIR_REQ; break;
+ case ACT_F_TCP_RES_CNT: smp_opt_dir = SMP_OPT_DIR_RES; break;
+ case ACT_F_HTTP_REQ: smp_opt_dir = SMP_OPT_DIR_REQ; break;
+ case ACT_F_HTTP_RES: smp_opt_dir = SMP_OPT_DIR_RES; break;
+ default:
+ send_log(px, LOG_ERR, "stick table: internal error while setting gpt0.");
+ if (!(global.mode & MODE_QUIET) || (global.mode & MODE_VERBOSE))
+ ha_alert("stick table: internal error while executing setting gpt0.\n");
+ return ACT_RET_CONT;
+ }
+
+ /* Fetch and cast the expression. */
+ smp = sample_fetch_as_type(px, sess, s, smp_opt_dir|SMP_OPT_FINAL, rule->arg.gpt.expr, SMP_T_SINT);
+ if (!smp) {
+ send_log(px, LOG_WARNING, "stick table: invalid expression or data type while setting gpt0.");
+ if (!(global.mode & MODE_QUIET) || (global.mode & MODE_VERBOSE))
+ ha_alert("stick table: invalid expression or data type while setting gpt0.\n");
+ return ACT_RET_CONT;
+ }
+ value = (unsigned int)(smp->data.u.sint);
+ }
+
+ HA_RWLOCK_WRLOCK(STK_SESS_LOCK, &ts->lock);
+
+ stktable_data_cast(ptr, std_t_uint) = value;
+
+ HA_RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
+
+ stktable_touch_local(stkctr->table, ts, 0);
+ }
+
+ return ACT_RET_CONT;
+}
+
+/* This function is a parser for the "sc-set-gpt" and "sc-set-gpt0" actions.
+ * It understands the formats:
+ *
+ * sc-set-gpt(<gpt IDX>,<track ID>) <expression>
+ * sc-set-gpt0(<track ID>) <expression>
+ *
+ * It returns ACT_RET_PRS_ERR if fails and <err> is filled with an error message.
+ * Otherwise, it returns ACT_RET_PRS_OK and the variable 'rule->arg.gpt.expr'
+ * is filled with the pointer to the expression to execute or NULL if the arg
+ * is directly an integer stored into 'rule->arg.gpt.value'.
+ */
+static enum act_parse_ret parse_set_gpt(const char **args, int *arg, struct proxy *px,
+ struct act_rule *rule, char **err)
+{
+ const char *cmd_name = args[*arg-1];
+ char *error;
+ int smp_val;
+
+ if (!global.tune.nb_stk_ctr) {
+ memprintf(err, "Cannot use '%s', stick-counters are disabled via tune.stick-counters", args[*arg-1]);
+ return ACT_RET_PRS_ERR;
+ }
+
+ cmd_name += strlen("sc-set-gpt");
+ if (*cmd_name == '(') {
+ cmd_name++; /* skip the '(' */
+ rule->arg.gpt.idx = strtoul(cmd_name, &error, 10); /* Convert stick table id. */
+ if (*error != ',') {
+ memprintf(err, "Missing gpt ID '%s'. Expects sc-set-gpt(<GPT ID>,<Track ID>)", args[*arg-1]);
+ return ACT_RET_PRS_ERR;
+ }
+ else {
+ cmd_name = error + 1; /* skip the ',' */
+ rule->arg.gpt.sc = strtol(cmd_name, &error, 10); /* Convert stick table id. */
+ if (*error != ')') {
+ memprintf(err, "invalid stick table track ID '%s'. Expects sc-set-gpt(<GPT ID>,<Track ID>)", args[*arg-1]);
+ return ACT_RET_PRS_ERR;
+ }
+
+ if (rule->arg.gpt.sc >= global.tune.nb_stk_ctr) {
+ memprintf(err, "invalid stick table track ID '%s'. The max allowed ID is %d",
+ args[*arg-1], global.tune.nb_stk_ctr-1);
+ return ACT_RET_PRS_ERR;
+ }
+ }
+ rule->action_ptr = action_set_gpt;
+ }
+ else if (*cmd_name == '0') {
+ cmd_name++;
+ if (*cmd_name == '\0') {
+ /* default stick table id. */
+ rule->arg.gpt.sc = 0;
+ } else {
+ /* parse the stick table id. */
+ if (*cmd_name != '(') {
+ memprintf(err, "invalid stick table track ID '%s'. Expects sc-set-gpt0(<Track ID>)", args[*arg-1]);
+ return ACT_RET_PRS_ERR;
+ }
+ cmd_name++; /* jump the '(' */
+ rule->arg.gpt.sc = strtol(cmd_name, &error, 10); /* Convert stick table id. */
+ if (*error != ')') {
+ memprintf(err, "invalid stick table track ID '%s'. Expects sc-set-gpt0(<Track ID>)", args[*arg-1]);
+ return ACT_RET_PRS_ERR;
+ }
+
+ if (rule->arg.gpt.sc >= global.tune.nb_stk_ctr) {
+ memprintf(err, "invalid stick table track ID '%s'. The max allowed ID is %d",
+ args[*arg-1], global.tune.nb_stk_ctr-1);
+ return ACT_RET_PRS_ERR;
+ }
+ }
+ rule->action_ptr = action_set_gpt0;
+ }
+ else {
+ /* default stick table id. */
+ memprintf(err, "invalid gpt ID '%s'. Expects sc-set-gpt(<GPT ID>,<Track ID>)", args[*arg-1]);
+ return ACT_RET_PRS_ERR;
+ }
+
+ /* value may be either an integer or an expression */
+ rule->arg.gpt.expr = NULL;
+ rule->arg.gpt.value = strtol(args[*arg], &error, 10);
+ if (*error == '\0') {
+ /* valid integer, skip it */
+ (*arg)++;
+ } else {
+ rule->arg.gpt.expr = sample_parse_expr((char **)args, arg, px->conf.args.file,
+ px->conf.args.line, err, &px->conf.args, NULL);
+ if (!rule->arg.gpt.expr)
+ return ACT_RET_PRS_ERR;
+
+ switch (rule->from) {
+ case ACT_F_TCP_REQ_CON: smp_val = SMP_VAL_FE_CON_ACC; break;
+ case ACT_F_TCP_REQ_SES: smp_val = SMP_VAL_FE_SES_ACC; break;
+ case ACT_F_TCP_REQ_CNT: smp_val = SMP_VAL_FE_REQ_CNT; break;
+ case ACT_F_TCP_RES_CNT: smp_val = SMP_VAL_BE_RES_CNT; break;
+ case ACT_F_HTTP_REQ: smp_val = SMP_VAL_FE_HRQ_HDR; break;
+ case ACT_F_HTTP_RES: smp_val = SMP_VAL_BE_HRS_HDR; break;
+ default:
+ memprintf(err, "internal error, unexpected rule->from=%d, please report this bug!", rule->from);
+ return ACT_RET_PRS_ERR;
+ }
+ if (!(rule->arg.gpt.expr->fetch->val & smp_val)) {
+ memprintf(err, "fetch method '%s' extracts information from '%s', none of which is available here", args[*arg-1],
+ sample_src_names(rule->arg.gpt.expr->fetch->use));
+ free(rule->arg.gpt.expr);
+ return ACT_RET_PRS_ERR;
+ }
+ }
+
+ rule->action = ACT_CUSTOM;
+
+ return ACT_RET_PRS_OK;
+}
+
+/* This function updates the gpc at index 'rule->arg.gpc.idx' of the array on
+ * the tracksc counter of index 'rule->arg.gpc.sc' stored into the <stream> or
+ * directly in the session <sess> if <stream> is set to NULL. This gpc is
+ * set to the value computed by the expression 'rule->arg.gpc.expr' or if
+ * 'rule->arg.gpc.expr' is null directly to the value of 'rule->arg.gpc.value'.
+ *
+ * This function always returns ACT_RET_CONT and parameter flags is unused.
+ */
+static enum act_return action_add_gpc(struct act_rule *rule, struct proxy *px,
+ struct session *sess, struct stream *s, int flags)
+{
+ void *ptr1, *ptr2;
+ struct stksess *ts;
+ struct stkctr *stkctr;
+ unsigned int value = 0;
+ struct sample *smp;
+ int smp_opt_dir;
+
+ /* Extract the stksess, return OK if no stksess available. */
+ if (s)
+ stkctr = &s->stkctr[rule->arg.gpc.sc];
+ else
+ stkctr = &sess->stkctr[rule->arg.gpc.sc];
+
+ ts = stkctr_entry(stkctr);
+ if (!ts)
+ return ACT_RET_CONT;
+
+ /* First, update gpc_rate if it's tracked. Second, update its gpc if tracked. */
+ ptr1 = stktable_data_ptr_idx(stkctr->table, ts, STKTABLE_DT_GPC_RATE, rule->arg.gpc.idx);
+ ptr2 = stktable_data_ptr_idx(stkctr->table, ts, STKTABLE_DT_GPC, rule->arg.gpc.idx);
+
+ if (ptr1 || ptr2) {
+ if (!rule->arg.gpc.expr)
+ value = (unsigned int)(rule->arg.gpc.value);
+ else {
+ switch (rule->from) {
+ case ACT_F_TCP_REQ_CON: smp_opt_dir = SMP_OPT_DIR_REQ; break;
+ case ACT_F_TCP_REQ_SES: smp_opt_dir = SMP_OPT_DIR_REQ; break;
+ case ACT_F_TCP_REQ_CNT: smp_opt_dir = SMP_OPT_DIR_REQ; break;
+ case ACT_F_TCP_RES_CNT: smp_opt_dir = SMP_OPT_DIR_RES; break;
+ case ACT_F_HTTP_REQ: smp_opt_dir = SMP_OPT_DIR_REQ; break;
+ case ACT_F_HTTP_RES: smp_opt_dir = SMP_OPT_DIR_RES; break;
+ default:
+ send_log(px, LOG_ERR, "stick table: internal error while setting gpc%u.", rule->arg.gpc.idx);
+ if (!(global.mode & MODE_QUIET) || (global.mode & MODE_VERBOSE))
+ ha_alert("stick table: internal error while executing setting gpc%u.\n", rule->arg.gpc.idx);
+ return ACT_RET_CONT;
+ }
+
+ /* Fetch and cast the expression. */
+ smp = sample_fetch_as_type(px, sess, s, smp_opt_dir|SMP_OPT_FINAL, rule->arg.gpc.expr, SMP_T_SINT);
+ if (!smp) {
+ send_log(px, LOG_WARNING, "stick table: invalid expression or data type while setting gpc%u.", rule->arg.gpc.idx);
+ if (!(global.mode & MODE_QUIET) || (global.mode & MODE_VERBOSE))
+ ha_alert("stick table: invalid expression or data type while setting gpc%u.\n", rule->arg.gpc.idx);
+ return ACT_RET_CONT;
+ }
+ value = (unsigned int)(smp->data.u.sint);
+ }
+
+ if (value) {
+ /* only update the value if non-null increment */
+ HA_RWLOCK_WRLOCK(STK_SESS_LOCK, &ts->lock);
+
+ if (ptr1)
+ update_freq_ctr_period(&stktable_data_cast(ptr1, std_t_frqp),
+ stkctr->table->data_arg[STKTABLE_DT_GPC_RATE].u, value);
+
+ if (ptr2)
+ stktable_data_cast(ptr2, std_t_uint) += value;
+
+ HA_RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
+ }
+ /* always touch the table so that it doesn't expire */
+ stktable_touch_local(stkctr->table, ts, 0);
+ }
+
+ return ACT_RET_CONT;
+}
+
+/* This function is a parser for the "sc-add-gpc" action. It understands the
+ * format:
+ *
+ * sc-add-gpc(<gpc IDX>,<track ID>) <expression>
+ *
+ * It returns ACT_RET_PRS_ERR if fails and <err> is filled with an error message.
+ * Otherwise, it returns ACT_RET_PRS_OK and the variable 'rule->arg.gpc.expr'
+ * is filled with the pointer to the expression to execute or NULL if the arg
+ * is directly an integer stored into 'rule->arg.gpt.value'.
+ */
+static enum act_parse_ret parse_add_gpc(const char **args, int *arg, struct proxy *px,
+ struct act_rule *rule, char **err)
+{
+ const char *cmd_name = args[*arg-1];
+ char *error;
+ int smp_val;
+
+ cmd_name += strlen("sc-add-gpc");
+ if (*cmd_name != '(') {
+ memprintf(err, "Missing or invalid arguments for '%s'. Expects sc-add-gpc(<GPC ID>,<Track ID>)", args[*arg-1]);
+ return ACT_RET_PRS_ERR;
+ }
+ cmd_name++; /* skip the '(' */
+ rule->arg.gpc.idx = strtoul(cmd_name, &error, 10); /* Convert stick table id. */
+ if (*error != ',') {
+ memprintf(err, "Missing gpc ID. Expects %s(<GPC ID>,<Track ID>)", args[*arg-1]);
+ return ACT_RET_PRS_ERR;
+ }
+ else {
+ cmd_name = error + 1; /* skip the ',' */
+ rule->arg.gpc.sc = strtol(cmd_name, &error, 10); /* Convert stick table id. */
+ if (*error != ')') {
+ memprintf(err, "invalid stick table track ID '%s'. Expects %s(<GPC ID>,<Track ID>)", cmd_name, args[*arg-1]);
+ return ACT_RET_PRS_ERR;
+ }
+
+ if (rule->arg.gpc.sc >= MAX_SESS_STKCTR) {
+ memprintf(err, "invalid stick table track ID '%s' for '%s'. The max allowed ID is %d",
+ cmd_name, args[*arg-1], MAX_SESS_STKCTR-1);
+ return ACT_RET_PRS_ERR;
+ }
+ }
+ rule->action_ptr = action_add_gpc;
+
+ /* value may be either an integer or an expression */
+ rule->arg.gpc.expr = NULL;
+ rule->arg.gpc.value = strtol(args[*arg], &error, 10);
+ if (*error == '\0') {
+ /* valid integer, skip it */
+ (*arg)++;
+ } else {
+ rule->arg.gpc.expr = sample_parse_expr((char **)args, arg, px->conf.args.file,
+ px->conf.args.line, err, &px->conf.args, NULL);
+ if (!rule->arg.gpc.expr)
+ return ACT_RET_PRS_ERR;
+
+ switch (rule->from) {
+ case ACT_F_TCP_REQ_CON: smp_val = SMP_VAL_FE_CON_ACC; break;
+ case ACT_F_TCP_REQ_SES: smp_val = SMP_VAL_FE_SES_ACC; break;
+ case ACT_F_TCP_REQ_CNT: smp_val = SMP_VAL_FE_REQ_CNT; break;
+ case ACT_F_TCP_RES_CNT: smp_val = SMP_VAL_BE_RES_CNT; break;
+ case ACT_F_HTTP_REQ: smp_val = SMP_VAL_FE_HRQ_HDR; break;
+ case ACT_F_HTTP_RES: smp_val = SMP_VAL_BE_HRS_HDR; break;
+ default:
+ memprintf(err, "internal error, unexpected rule->from=%d, please report this bug!", rule->from);
+ return ACT_RET_PRS_ERR;
+ }
+
+ if (!(rule->arg.gpc.expr->fetch->val & smp_val)) {
+ memprintf(err, "fetch method '%s' extracts information from '%s', none of which is available here", args[*arg-1],
+ sample_src_names(rule->arg.gpc.expr->fetch->use));
+ free(rule->arg.gpc.expr);
+ return ACT_RET_PRS_ERR;
+ }
+ }
+
+ rule->action = ACT_CUSTOM;
+
+ return ACT_RET_PRS_OK;
+}
+
+/* set temp integer to the number of used entries in the table pointed to by expr.
+ * Accepts exactly 1 argument of type table.
+ */
+static int
+smp_fetch_table_cnt(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ smp->flags = SMP_F_VOL_TEST;
+ smp->data.type = SMP_T_SINT;
+ smp->data.u.sint = args->data.t->current;
+ return 1;
+}
+
+/* set temp integer to the number of free entries in the table pointed to by expr.
+ * Accepts exactly 1 argument of type table.
+ */
+static int
+smp_fetch_table_avl(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ struct stktable *t;
+
+ t = args->data.t;
+ smp->flags = SMP_F_VOL_TEST;
+ smp->data.type = SMP_T_SINT;
+ smp->data.u.sint = t->size - t->current;
+ return 1;
+}
+
+/* Returns a pointer to a stkctr depending on the fetch keyword name.
+ * It is designed to be called as sc[0-9]_* sc_* or src_* exclusively.
+ * sc[0-9]_* will return a pointer to the respective field in the
+ * stream <l4>. sc_* requires an UINT argument specifying the stick
+ * counter number. src_* will fill a locally allocated structure with
+ * the table and entry corresponding to what is specified with src_*.
+ * NULL may be returned if the designated stkctr is not tracked. For
+ * the sc_* and sc[0-9]_* forms, an optional table argument may be
+ * passed. When present, the currently tracked key is then looked up
+ * in the specified table instead of the current table. The purpose is
+ * to be able to convert multiple values per key (eg: have gpc0 from
+ * multiple tables). <strm> is allowed to be NULL, in which case only
+ * the session will be consulted.
+ */
+struct stkctr *
+smp_fetch_sc_stkctr(struct session *sess, struct stream *strm, const struct arg *args, const char *kw, struct stkctr *stkctr)
+{
+ struct stkctr *stkptr;
+ struct stksess *stksess;
+ unsigned int num = kw[2] - '0';
+ int arg = 0;
+
+ if (num == '_' - '0') {
+ /* sc_* variant, args[0] = ctr# (mandatory) */
+ num = args[arg++].data.sint;
+ }
+ else if (num > 9) { /* src_* variant, args[0] = table */
+ struct stktable_key *key;
+ struct connection *conn = objt_conn(sess->origin);
+ struct sample smp;
+
+ if (!conn)
+ return NULL;
+
+ /* Fetch source address in a sample. */
+ smp.px = NULL;
+ smp.sess = sess;
+ smp.strm = strm;
+ if (!smp_fetch_src || !smp_fetch_src(empty_arg_list, &smp, "src", NULL))
+ return NULL;
+
+ /* Converts into key. */
+ key = smp_to_stkey(&smp, args->data.t);
+ if (!key)
+ return NULL;
+
+ stkctr->table = args->data.t;
+ stkctr_set_entry(stkctr, stktable_lookup_key(stkctr->table, key));
+ return stkctr;
+ }
+
+ /* Here, <num> contains the counter number from 0 to 9 for
+ * the sc[0-9]_ form, or even higher using sc_(num) if needed.
+ * args[arg] is the first optional argument. We first lookup the
+ * ctr form the stream, then from the session if it was not there.
+ * But we must be sure the counter does not exceed global.tune.nb_stk_ctr.
+ */
+ if (num >= global.tune.nb_stk_ctr)
+ return NULL;
+
+ stkptr = NULL;
+ if (strm && strm->stkctr)
+ stkptr = &strm->stkctr[num];
+ if (!strm || !stkptr || !stkctr_entry(stkptr)) {
+ if (sess->stkctr)
+ stkptr = &sess->stkctr[num];
+ else
+ return NULL;
+ if (!stkctr_entry(stkptr))
+ return NULL;
+ }
+
+ stksess = stkctr_entry(stkptr);
+ if (!stksess)
+ return NULL;
+
+ if (unlikely(args[arg].type == ARGT_TAB)) {
+ /* an alternate table was specified, let's look up the same key there */
+ stkctr->table = args[arg].data.t;
+ stkctr_set_entry(stkctr, stktable_lookup(stkctr->table, stksess));
+ return stkctr;
+ }
+ return stkptr;
+}
+
+/* same as smp_fetch_sc_stkctr() but dedicated to src_* and can create
+ * the entry if it doesn't exist yet. This is needed for a few fetch
+ * functions which need to create an entry, such as src_inc_gpc* and
+ * src_clr_gpc*.
+ */
+struct stkctr *
+smp_create_src_stkctr(struct session *sess, struct stream *strm, const struct arg *args, const char *kw, struct stkctr *stkctr)
+{
+ struct stktable_key *key;
+ struct connection *conn = objt_conn(sess->origin);
+ struct sample smp;
+
+ if (strncmp(kw, "src_", 4) != 0)
+ return NULL;
+
+ if (!conn)
+ return NULL;
+
+ /* Fetch source address in a sample. */
+ smp.px = NULL;
+ smp.sess = sess;
+ smp.strm = strm;
+ if (!smp_fetch_src || !smp_fetch_src(empty_arg_list, &smp, "src", NULL))
+ return NULL;
+
+ /* Converts into key. */
+ key = smp_to_stkey(&smp, args->data.t);
+ if (!key)
+ return NULL;
+
+ stkctr->table = args->data.t;
+ stkctr_set_entry(stkctr, stktable_get_entry(stkctr->table, key));
+ return stkctr;
+}
+
+/* set return a boolean indicating if the requested stream counter is
+ * currently being tracked or not.
+ * Supports being called as "sc[0-9]_tracked" only.
+ */
+static int
+smp_fetch_sc_tracked(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ struct stkctr tmpstkctr;
+ struct stkctr *stkctr;
+
+ smp->flags = SMP_F_VOL_TEST;
+ smp->data.type = SMP_T_BOOL;
+ stkctr = smp_fetch_sc_stkctr(smp->sess, smp->strm, args, kw, &tmpstkctr);
+ smp->data.u.sint = !!stkctr;
+
+ /* release the ref count */
+ if (stkctr == &tmpstkctr)
+ stktable_release(stkctr->table, stkctr_entry(stkctr));
+
+ return 1;
+}
+
+/* set <smp> to the General Purpose Tag of index set as first arg
+ * to value from the stream's tracked frontend counters or from the src.
+ * Supports being called as "sc_get_gpt(<gpt-idx>,<sc-idx>[,<table>])" or
+ * "src_get_gpt(<gpt-idx>[,<table>])" only. Value zero is returned if
+ * the key is new or gpt is not stored.
+ */
+static int
+smp_fetch_sc_get_gpt(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ struct stkctr tmpstkctr;
+ struct stkctr *stkctr;
+ unsigned int idx;
+
+ idx = args[0].data.sint;
+
+ stkctr = smp_fetch_sc_stkctr(smp->sess, smp->strm, args + 1, kw, &tmpstkctr);
+ if (!stkctr)
+ return 0;
+
+ smp->flags = SMP_F_VOL_TEST;
+ smp->data.type = SMP_T_SINT;
+ smp->data.u.sint = 0;
+
+ if (stkctr_entry(stkctr)) {
+ void *ptr;
+
+ ptr = stktable_data_ptr_idx(stkctr->table, stkctr_entry(stkctr), STKTABLE_DT_GPT, idx);
+ if (!ptr) {
+ if (stkctr == &tmpstkctr)
+ stktable_release(stkctr->table, stkctr_entry(stkctr));
+ return 0; /* parameter not stored */
+ }
+
+ HA_RWLOCK_RDLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
+
+ smp->data.u.sint = stktable_data_cast(ptr, std_t_uint);
+
+ HA_RWLOCK_RDUNLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
+
+ if (stkctr == &tmpstkctr)
+ stktable_release(stkctr->table, stkctr_entry(stkctr));
+ }
+ return 1;
+}
+
+/* set <smp> to the General Purpose Flag 0 value from the stream's tracked
+ * frontend counters or from the src.
+ * Supports being called as "sc[0-9]_get_gpc0" or "src_get_gpt0" only. Value
+ * zero is returned if the key is new.
+ */
+static int
+smp_fetch_sc_get_gpt0(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ struct stkctr tmpstkctr;
+ struct stkctr *stkctr;
+
+ stkctr = smp_fetch_sc_stkctr(smp->sess, smp->strm, args, kw, &tmpstkctr);
+ if (!stkctr)
+ return 0;
+
+ smp->flags = SMP_F_VOL_TEST;
+ smp->data.type = SMP_T_SINT;
+ smp->data.u.sint = 0;
+
+ if (stkctr_entry(stkctr)) {
+ void *ptr;
+
+ ptr = stktable_data_ptr(stkctr->table, stkctr_entry(stkctr), STKTABLE_DT_GPT0);
+ if (!ptr)
+ ptr = stktable_data_ptr_idx(stkctr->table, stkctr_entry(stkctr), STKTABLE_DT_GPT, 0);
+
+ if (!ptr) {
+ if (stkctr == &tmpstkctr)
+ stktable_release(stkctr->table, stkctr_entry(stkctr));
+ return 0; /* parameter not stored */
+ }
+
+ HA_RWLOCK_RDLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
+
+ smp->data.u.sint = stktable_data_cast(ptr, std_t_uint);
+
+ HA_RWLOCK_RDUNLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
+
+ if (stkctr == &tmpstkctr)
+ stktable_release(stkctr->table, stkctr_entry(stkctr));
+ }
+ return 1;
+}
+
+/* set <smp> to the GPC[args(0)]'s value from the stream's tracked
+ * frontend counters or from the src.
+ * Supports being called as "sc_get_gpc(<gpc-idx>,<sc-idx>[,<table>])" or
+ * "src_get_gpc(<gpc-idx>[,<table>])" only. Value
+ * Value zero is returned if the key is new or gpc is not stored.
+ */
+static int
+smp_fetch_sc_get_gpc(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ struct stkctr tmpstkctr;
+ struct stkctr *stkctr;
+ unsigned int idx;
+
+ idx = args[0].data.sint;
+
+ stkctr = smp_fetch_sc_stkctr(smp->sess, smp->strm, args + 1, kw, &tmpstkctr);
+ if (!stkctr)
+ return 0;
+
+ smp->flags = SMP_F_VOL_TEST;
+ smp->data.type = SMP_T_SINT;
+ smp->data.u.sint = 0;
+
+ if (stkctr_entry(stkctr) != NULL) {
+ void *ptr;
+
+ ptr = stktable_data_ptr_idx(stkctr->table, stkctr_entry(stkctr), STKTABLE_DT_GPC, idx);
+ if (!ptr) {
+ if (stkctr == &tmpstkctr)
+ stktable_release(stkctr->table, stkctr_entry(stkctr));
+ return 0; /* parameter not stored */
+ }
+
+ HA_RWLOCK_RDLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
+
+ smp->data.u.sint = stktable_data_cast(ptr, std_t_uint);
+
+ HA_RWLOCK_RDUNLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
+
+ if (stkctr == &tmpstkctr)
+ stktable_release(stkctr->table, stkctr_entry(stkctr));
+ }
+ return 1;
+}
+
+/* set <smp> to the General Purpose Counter 0 value from the stream's tracked
+ * frontend counters or from the src.
+ * Supports being called as "sc[0-9]_get_gpc0" or "src_get_gpc0" only. Value
+ * zero is returned if the key is new.
+ */
+static int
+smp_fetch_sc_get_gpc0(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ struct stkctr tmpstkctr;
+ struct stkctr *stkctr;
+
+ stkctr = smp_fetch_sc_stkctr(smp->sess, smp->strm, args, kw, &tmpstkctr);
+ if (!stkctr)
+ return 0;
+
+ smp->flags = SMP_F_VOL_TEST;
+ smp->data.type = SMP_T_SINT;
+ smp->data.u.sint = 0;
+
+ if (stkctr_entry(stkctr) != NULL) {
+ void *ptr;
+
+ ptr = stktable_data_ptr(stkctr->table, stkctr_entry(stkctr), STKTABLE_DT_GPC0);
+ if (!ptr) {
+ /* fallback on the gpc array */
+ ptr = stktable_data_ptr_idx(stkctr->table, stkctr_entry(stkctr), STKTABLE_DT_GPC, 0);
+ }
+
+ if (!ptr) {
+ if (stkctr == &tmpstkctr)
+ stktable_release(stkctr->table, stkctr_entry(stkctr));
+ return 0; /* parameter not stored */
+ }
+
+ HA_RWLOCK_RDLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
+
+ smp->data.u.sint = stktable_data_cast(ptr, std_t_uint);
+
+ HA_RWLOCK_RDUNLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
+
+ if (stkctr == &tmpstkctr)
+ stktable_release(stkctr->table, stkctr_entry(stkctr));
+ }
+ return 1;
+}
+
+/* set <smp> to the General Purpose Counter 1 value from the stream's tracked
+ * frontend counters or from the src.
+ * Supports being called as "sc[0-9]_get_gpc1" or "src_get_gpc1" only. Value
+ * zero is returned if the key is new.
+ */
+static int
+smp_fetch_sc_get_gpc1(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ struct stkctr tmpstkctr;
+ struct stkctr *stkctr;
+
+ stkctr = smp_fetch_sc_stkctr(smp->sess, smp->strm, args, kw, &tmpstkctr);
+ if (!stkctr)
+ return 0;
+
+ smp->flags = SMP_F_VOL_TEST;
+ smp->data.type = SMP_T_SINT;
+ smp->data.u.sint = 0;
+
+ if (stkctr_entry(stkctr) != NULL) {
+ void *ptr;
+
+ ptr = stktable_data_ptr(stkctr->table, stkctr_entry(stkctr), STKTABLE_DT_GPC1);
+ if (!ptr) {
+ /* fallback on the gpc array */
+ ptr = stktable_data_ptr_idx(stkctr->table, stkctr_entry(stkctr), STKTABLE_DT_GPC, 1);
+ }
+
+ if (!ptr) {
+ if (stkctr == &tmpstkctr)
+ stktable_release(stkctr->table, stkctr_entry(stkctr));
+ return 0; /* parameter not stored */
+ }
+
+ HA_RWLOCK_RDLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
+
+ smp->data.u.sint = stktable_data_cast(ptr, std_t_uint);
+
+ HA_RWLOCK_RDUNLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
+
+ if (stkctr == &tmpstkctr)
+ stktable_release(stkctr->table, stkctr_entry(stkctr));
+ }
+ return 1;
+}
+
+/* set <smp> to the GPC[args(0)]'s event rate from the stream's
+ * tracked frontend counters or from the src.
+ * Supports being called as "sc_gpc_rate(<gpc-idx>,<sc-idx>[,<table])"
+ * or "src_gpc_rate(<gpc-idx>[,<table>])" only.
+ * Value zero is returned if the key is new or gpc_rate is not stored.
+ */
+static int
+smp_fetch_sc_gpc_rate(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ struct stkctr tmpstkctr;
+ struct stkctr *stkctr;
+ unsigned int idx;
+
+ idx = args[0].data.sint;
+
+ stkctr = smp_fetch_sc_stkctr(smp->sess, smp->strm, args + 1, kw, &tmpstkctr);
+ if (!stkctr)
+ return 0;
+
+ smp->flags = SMP_F_VOL_TEST;
+ smp->data.type = SMP_T_SINT;
+ smp->data.u.sint = 0;
+ if (stkctr_entry(stkctr) != NULL) {
+ void *ptr;
+
+ ptr = stktable_data_ptr_idx(stkctr->table, stkctr_entry(stkctr), STKTABLE_DT_GPC_RATE, idx);
+ if (!ptr) {
+ if (stkctr == &tmpstkctr)
+ stktable_release(stkctr->table, stkctr_entry(stkctr));
+ return 0; /* parameter not stored */
+ }
+
+ HA_RWLOCK_RDLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
+
+ smp->data.u.sint = read_freq_ctr_period(&stktable_data_cast(ptr, std_t_frqp),
+ stkctr->table->data_arg[STKTABLE_DT_GPC_RATE].u);
+
+ HA_RWLOCK_RDUNLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
+
+ if (stkctr == &tmpstkctr)
+ stktable_release(stkctr->table, stkctr_entry(stkctr));
+ }
+ return 1;
+}
+
+/* set <smp> to the General Purpose Counter 0's event rate from the stream's
+ * tracked frontend counters or from the src.
+ * Supports being called as "sc[0-9]_gpc0_rate" or "src_gpc0_rate" only.
+ * Value zero is returned if the key is new.
+ */
+static int
+smp_fetch_sc_gpc0_rate(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ struct stkctr tmpstkctr;
+ struct stkctr *stkctr;
+ unsigned int period;
+
+ stkctr = smp_fetch_sc_stkctr(smp->sess, smp->strm, args, kw, &tmpstkctr);
+ if (!stkctr)
+ return 0;
+
+ smp->flags = SMP_F_VOL_TEST;
+ smp->data.type = SMP_T_SINT;
+ smp->data.u.sint = 0;
+ if (stkctr_entry(stkctr) != NULL) {
+ void *ptr;
+
+ ptr = stktable_data_ptr(stkctr->table, stkctr_entry(stkctr), STKTABLE_DT_GPC0_RATE);
+ if (ptr) {
+ period = stkctr->table->data_arg[STKTABLE_DT_GPC0_RATE].u;
+ }
+ else {
+ /* fallback on the gpc array */
+ ptr = stktable_data_ptr_idx(stkctr->table, stkctr_entry(stkctr), STKTABLE_DT_GPC_RATE, 0);
+ if (ptr)
+ period = stkctr->table->data_arg[STKTABLE_DT_GPC_RATE].u;
+ }
+
+ if (!ptr) {
+ if (stkctr == &tmpstkctr)
+ stktable_release(stkctr->table, stkctr_entry(stkctr));
+ return 0; /* parameter not stored */
+ }
+
+ HA_RWLOCK_RDLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
+
+ smp->data.u.sint = read_freq_ctr_period(&stktable_data_cast(ptr, std_t_frqp), period);
+
+ HA_RWLOCK_RDUNLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
+
+ if (stkctr == &tmpstkctr)
+ stktable_release(stkctr->table, stkctr_entry(stkctr));
+ }
+ return 1;
+}
+
+/* set <smp> to the General Purpose Counter 1's event rate from the stream's
+ * tracked frontend counters or from the src.
+ * Supports being called as "sc[0-9]_gpc1_rate" or "src_gpc1_rate" only.
+ * Value zero is returned if the key is new.
+ */
+static int
+smp_fetch_sc_gpc1_rate(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ struct stkctr tmpstkctr;
+ struct stkctr *stkctr;
+ unsigned int period;
+
+ stkctr = smp_fetch_sc_stkctr(smp->sess, smp->strm, args, kw, &tmpstkctr);
+ if (!stkctr)
+ return 0;
+
+ smp->flags = SMP_F_VOL_TEST;
+ smp->data.type = SMP_T_SINT;
+ smp->data.u.sint = 0;
+ if (stkctr_entry(stkctr) != NULL) {
+ void *ptr;
+
+ ptr = stktable_data_ptr(stkctr->table, stkctr_entry(stkctr), STKTABLE_DT_GPC1_RATE);
+ if (ptr) {
+ period = stkctr->table->data_arg[STKTABLE_DT_GPC1_RATE].u;
+ }
+ else {
+ /* fallback on the gpc array */
+ ptr = stktable_data_ptr_idx(stkctr->table, stkctr_entry(stkctr), STKTABLE_DT_GPC_RATE, 1);
+ if (ptr)
+ period = stkctr->table->data_arg[STKTABLE_DT_GPC_RATE].u;
+ }
+
+ if (!ptr) {
+ if (stkctr == &tmpstkctr)
+ stktable_release(stkctr->table, stkctr_entry(stkctr));
+ return 0; /* parameter not stored */
+ }
+
+ HA_RWLOCK_RDLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
+
+ smp->data.u.sint = read_freq_ctr_period(&stktable_data_cast(ptr, std_t_frqp), period);
+
+ HA_RWLOCK_RDUNLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
+
+ if (stkctr == &tmpstkctr)
+ stktable_release(stkctr->table, stkctr_entry(stkctr));
+ }
+ return 1;
+}
+
+/* Increment the GPC[args(0)] value from the stream's tracked
+ * frontend counters and return it into temp integer.
+ * Supports being called as "sc_inc_gpc(<gpc-idx>,<sc-idx>[,<table>])"
+ * or "src_inc_gpc(<gpc-idx>[,<table>])" only.
+ */
+static int
+smp_fetch_sc_inc_gpc(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ struct stkctr tmpstkctr;
+ struct stkctr *stkctr;
+ unsigned int idx;
+
+ idx = args[0].data.sint;
+
+ stkctr = smp_fetch_sc_stkctr(smp->sess, smp->strm, args + 1, kw, &tmpstkctr);
+ if (!stkctr)
+ return 0;
+
+ smp->flags = SMP_F_VOL_TEST;
+ smp->data.type = SMP_T_SINT;
+ smp->data.u.sint = 0;
+
+ if (!stkctr_entry(stkctr))
+ stkctr = smp_create_src_stkctr(smp->sess, smp->strm, args, kw, &tmpstkctr);
+
+ if (stkctr && stkctr_entry(stkctr)) {
+ void *ptr1,*ptr2;
+
+
+ /* First, update gpc0_rate if it's tracked. Second, update its
+ * gpc0 if tracked. Returns gpc0's value otherwise the curr_ctr.
+ */
+ ptr1 = stktable_data_ptr_idx(stkctr->table, stkctr_entry(stkctr), STKTABLE_DT_GPC_RATE, idx);
+ ptr2 = stktable_data_ptr_idx(stkctr->table, stkctr_entry(stkctr), STKTABLE_DT_GPC, idx);
+ if (ptr1 || ptr2) {
+ HA_RWLOCK_WRLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
+
+ if (ptr1) {
+ update_freq_ctr_period(&stktable_data_cast(ptr1, std_t_frqp),
+ stkctr->table->data_arg[STKTABLE_DT_GPC_RATE].u, 1);
+ smp->data.u.sint = (&stktable_data_cast(ptr1, std_t_frqp))->curr_ctr;
+ }
+
+ if (ptr2)
+ smp->data.u.sint = ++stktable_data_cast(ptr2, std_t_uint);
+
+ HA_RWLOCK_WRUNLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
+
+ /* If data was modified, we need to touch to re-schedule sync */
+ stktable_touch_local(stkctr->table, stkctr_entry(stkctr), (stkctr == &tmpstkctr) ? 1 : 0);
+ }
+ else if (stkctr == &tmpstkctr)
+ stktable_release(stkctr->table, stkctr_entry(stkctr));
+ }
+ return 1;
+}
+
+/* Increment the General Purpose Counter 0 value from the stream's tracked
+ * frontend counters and return it into temp integer.
+ * Supports being called as "sc[0-9]_inc_gpc0" or "src_inc_gpc0" only.
+ */
+static int
+smp_fetch_sc_inc_gpc0(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ struct stkctr tmpstkctr;
+ struct stkctr *stkctr;
+ unsigned int period = 0;
+
+ stkctr = smp_fetch_sc_stkctr(smp->sess, smp->strm, args, kw, &tmpstkctr);
+ if (!stkctr)
+ return 0;
+
+ smp->flags = SMP_F_VOL_TEST;
+ smp->data.type = SMP_T_SINT;
+ smp->data.u.sint = 0;
+
+ if (!stkctr_entry(stkctr))
+ stkctr = smp_create_src_stkctr(smp->sess, smp->strm, args, kw, &tmpstkctr);
+
+ if (stkctr && stkctr_entry(stkctr)) {
+ void *ptr1,*ptr2;
+
+
+ /* First, update gpc0_rate if it's tracked. Second, update its
+ * gpc0 if tracked. Returns gpc0's value otherwise the curr_ctr.
+ */
+ ptr1 = stktable_data_ptr(stkctr->table, stkctr_entry(stkctr), STKTABLE_DT_GPC0_RATE);
+ if (ptr1) {
+ period = stkctr->table->data_arg[STKTABLE_DT_GPC0_RATE].u;
+ }
+ else {
+ /* fallback on the gpc array */
+ ptr1 = stktable_data_ptr_idx(stkctr->table, stkctr_entry(stkctr), STKTABLE_DT_GPC_RATE, 0);
+ if (ptr1)
+ period = stkctr->table->data_arg[STKTABLE_DT_GPC_RATE].u;
+ }
+
+ ptr2 = stktable_data_ptr(stkctr->table, stkctr_entry(stkctr), STKTABLE_DT_GPC0);
+ if (!ptr2) {
+ /* fallback on the gpc array */
+ ptr2 = stktable_data_ptr_idx(stkctr->table, stkctr_entry(stkctr), STKTABLE_DT_GPC, 0);
+ }
+
+ if (ptr1 || ptr2) {
+ HA_RWLOCK_WRLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
+
+ if (ptr1) {
+ update_freq_ctr_period(&stktable_data_cast(ptr1, std_t_frqp),
+ period, 1);
+ smp->data.u.sint = (&stktable_data_cast(ptr1, std_t_frqp))->curr_ctr;
+ }
+
+ if (ptr2)
+ smp->data.u.sint = ++stktable_data_cast(ptr2, std_t_uint);
+
+ HA_RWLOCK_WRUNLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
+
+ /* If data was modified, we need to touch to re-schedule sync */
+ stktable_touch_local(stkctr->table, stkctr_entry(stkctr), (stkctr == &tmpstkctr) ? 1 : 0);
+ }
+ else if (stkctr == &tmpstkctr)
+ stktable_release(stkctr->table, stkctr_entry(stkctr));
+ }
+ return 1;
+}
+
+/* Increment the General Purpose Counter 1 value from the stream's tracked
+ * frontend counters and return it into temp integer.
+ * Supports being called as "sc[0-9]_inc_gpc1" or "src_inc_gpc1" only.
+ */
+static int
+smp_fetch_sc_inc_gpc1(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ struct stkctr tmpstkctr;
+ struct stkctr *stkctr;
+ unsigned int period = 0;
+
+ stkctr = smp_fetch_sc_stkctr(smp->sess, smp->strm, args, kw, &tmpstkctr);
+ if (!stkctr)
+ return 0;
+
+ smp->flags = SMP_F_VOL_TEST;
+ smp->data.type = SMP_T_SINT;
+ smp->data.u.sint = 0;
+
+ if (!stkctr_entry(stkctr))
+ stkctr = smp_create_src_stkctr(smp->sess, smp->strm, args, kw, &tmpstkctr);
+
+ if (stkctr && stkctr_entry(stkctr)) {
+ void *ptr1,*ptr2;
+
+
+ /* First, update gpc1_rate if it's tracked. Second, update its
+ * gpc1 if tracked. Returns gpc1's value otherwise the curr_ctr.
+ */
+ ptr1 = stktable_data_ptr(stkctr->table, stkctr_entry(stkctr), STKTABLE_DT_GPC1_RATE);
+ if (ptr1) {
+ period = stkctr->table->data_arg[STKTABLE_DT_GPC1_RATE].u;
+ }
+ else {
+ /* fallback on the gpc array */
+ ptr1 = stktable_data_ptr_idx(stkctr->table, stkctr_entry(stkctr), STKTABLE_DT_GPC_RATE, 1);
+ if (ptr1)
+ period = stkctr->table->data_arg[STKTABLE_DT_GPC_RATE].u;
+ }
+
+ ptr2 = stktable_data_ptr(stkctr->table, stkctr_entry(stkctr), STKTABLE_DT_GPC1);
+ if (!ptr2) {
+ /* fallback on the gpc array */
+ ptr2 = stktable_data_ptr_idx(stkctr->table, stkctr_entry(stkctr), STKTABLE_DT_GPC, 1);
+ }
+
+ if (ptr1 || ptr2) {
+ HA_RWLOCK_WRLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
+
+ if (ptr1) {
+ update_freq_ctr_period(&stktable_data_cast(ptr1, std_t_frqp),
+ period, 1);
+ smp->data.u.sint = (&stktable_data_cast(ptr1, std_t_frqp))->curr_ctr;
+ }
+
+ if (ptr2)
+ smp->data.u.sint = ++stktable_data_cast(ptr2, std_t_uint);
+
+ HA_RWLOCK_WRUNLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
+
+ /* If data was modified, we need to touch to re-schedule sync */
+ stktable_touch_local(stkctr->table, stkctr_entry(stkctr), (stkctr == &tmpstkctr) ? 1 : 0);
+ }
+ else if (stkctr == &tmpstkctr)
+ stktable_release(stkctr->table, stkctr_entry(stkctr));
+ }
+ return 1;
+}
+
+/* Clear the GPC[args(0)] value from the stream's tracked
+ * frontend counters and return its previous value into temp integer.
+ * Supports being called as "sc_clr_gpc(<gpc-idx>,<sc-idx>[,<table>])"
+ * or "src_clr_gpc(<gpc-idx>[,<table>])" only.
+ */
+static int
+smp_fetch_sc_clr_gpc(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ struct stkctr tmpstkctr;
+ struct stkctr *stkctr;
+ unsigned int idx;
+
+ idx = args[0].data.sint;
+
+ stkctr = smp_fetch_sc_stkctr(smp->sess, smp->strm, args + 1, kw, &tmpstkctr);
+ if (!stkctr)
+ return 0;
+
+ smp->flags = SMP_F_VOL_TEST;
+ smp->data.type = SMP_T_SINT;
+ smp->data.u.sint = 0;
+
+ if (!stkctr_entry(stkctr))
+ stkctr = smp_create_src_stkctr(smp->sess, smp->strm, args, kw, &tmpstkctr);
+
+ if (stkctr && stkctr_entry(stkctr)) {
+ void *ptr;
+
+ ptr = stktable_data_ptr_idx(stkctr->table, stkctr_entry(stkctr), STKTABLE_DT_GPC, idx);
+ if (!ptr) {
+ if (stkctr == &tmpstkctr)
+ stktable_release(stkctr->table, stkctr_entry(stkctr));
+ return 0; /* parameter not stored */
+ }
+
+ HA_RWLOCK_WRLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
+
+ smp->data.u.sint = stktable_data_cast(ptr, std_t_uint);
+ stktable_data_cast(ptr, std_t_uint) = 0;
+
+ HA_RWLOCK_WRUNLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
+
+ /* If data was modified, we need to touch to re-schedule sync */
+ stktable_touch_local(stkctr->table, stkctr_entry(stkctr), (stkctr == &tmpstkctr) ? 1 : 0);
+ }
+ return 1;
+}
+
+/* Clear the General Purpose Counter 0 value from the stream's tracked
+ * frontend counters and return its previous value into temp integer.
+ * Supports being called as "sc[0-9]_clr_gpc0" or "src_clr_gpc0" only.
+ */
+static int
+smp_fetch_sc_clr_gpc0(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ struct stkctr tmpstkctr;
+ struct stkctr *stkctr;
+
+ stkctr = smp_fetch_sc_stkctr(smp->sess, smp->strm, args, kw, &tmpstkctr);
+ if (!stkctr)
+ return 0;
+
+ smp->flags = SMP_F_VOL_TEST;
+ smp->data.type = SMP_T_SINT;
+ smp->data.u.sint = 0;
+
+ if (!stkctr_entry(stkctr))
+ stkctr = smp_create_src_stkctr(smp->sess, smp->strm, args, kw, &tmpstkctr);
+
+ if (stkctr && stkctr_entry(stkctr)) {
+ void *ptr;
+
+ ptr = stktable_data_ptr(stkctr->table, stkctr_entry(stkctr), STKTABLE_DT_GPC0);
+ if (!ptr) {
+ /* fallback on the gpc array */
+ ptr = stktable_data_ptr_idx(stkctr->table, stkctr_entry(stkctr), STKTABLE_DT_GPC, 0);
+ }
+
+ if (!ptr) {
+ if (stkctr == &tmpstkctr)
+ stktable_release(stkctr->table, stkctr_entry(stkctr));
+ return 0; /* parameter not stored */
+ }
+
+ HA_RWLOCK_WRLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
+
+ smp->data.u.sint = stktable_data_cast(ptr, std_t_uint);
+ stktable_data_cast(ptr, std_t_uint) = 0;
+
+ HA_RWLOCK_WRUNLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
+
+ /* If data was modified, we need to touch to re-schedule sync */
+ stktable_touch_local(stkctr->table, stkctr_entry(stkctr), (stkctr == &tmpstkctr) ? 1 : 0);
+ }
+ return 1;
+}
+
+/* Clear the General Purpose Counter 1 value from the stream's tracked
+ * frontend counters and return its previous value into temp integer.
+ * Supports being called as "sc[0-9]_clr_gpc1" or "src_clr_gpc1" only.
+ */
+static int
+smp_fetch_sc_clr_gpc1(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ struct stkctr tmpstkctr;
+ struct stkctr *stkctr;
+
+ stkctr = smp_fetch_sc_stkctr(smp->sess, smp->strm, args, kw, &tmpstkctr);
+ if (!stkctr)
+ return 0;
+
+ smp->flags = SMP_F_VOL_TEST;
+ smp->data.type = SMP_T_SINT;
+ smp->data.u.sint = 0;
+
+ if (!stkctr_entry(stkctr))
+ stkctr = smp_create_src_stkctr(smp->sess, smp->strm, args, kw, &tmpstkctr);
+
+ if (stkctr && stkctr_entry(stkctr)) {
+ void *ptr;
+
+ ptr = stktable_data_ptr(stkctr->table, stkctr_entry(stkctr), STKTABLE_DT_GPC1);
+ if (!ptr) {
+ /* fallback on the gpc array */
+ ptr = stktable_data_ptr_idx(stkctr->table, stkctr_entry(stkctr), STKTABLE_DT_GPC, 1);
+ }
+
+ if (!ptr) {
+ if (stkctr == &tmpstkctr)
+ stktable_release(stkctr->table, stkctr_entry(stkctr));
+ return 0; /* parameter not stored */
+ }
+
+ HA_RWLOCK_WRLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
+
+ smp->data.u.sint = stktable_data_cast(ptr, std_t_uint);
+ stktable_data_cast(ptr, std_t_uint) = 0;
+
+ HA_RWLOCK_WRUNLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
+
+ /* If data was modified, we need to touch to re-schedule sync */
+ stktable_touch_local(stkctr->table, stkctr_entry(stkctr), (stkctr == &tmpstkctr) ? 1 : 0);
+ }
+ return 1;
+}
+
+/* set <smp> to the cumulated number of connections from the stream's tracked
+ * frontend counters. Supports being called as "sc[0-9]_conn_cnt" or
+ * "src_conn_cnt" only.
+ */
+static int
+smp_fetch_sc_conn_cnt(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ struct stkctr tmpstkctr;
+ struct stkctr *stkctr;
+
+ stkctr = smp_fetch_sc_stkctr(smp->sess, smp->strm, args, kw, &tmpstkctr);
+ if (!stkctr)
+ return 0;
+
+ smp->flags = SMP_F_VOL_TEST;
+ smp->data.type = SMP_T_SINT;
+ smp->data.u.sint = 0;
+ if (stkctr_entry(stkctr) != NULL) {
+ void *ptr;
+
+ ptr = stktable_data_ptr(stkctr->table, stkctr_entry(stkctr), STKTABLE_DT_CONN_CNT);
+ if (!ptr) {
+ if (stkctr == &tmpstkctr)
+ stktable_release(stkctr->table, stkctr_entry(stkctr));
+ return 0; /* parameter not stored */
+ }
+
+ HA_RWLOCK_RDLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
+
+ smp->data.u.sint = stktable_data_cast(ptr, std_t_uint);
+
+ HA_RWLOCK_RDUNLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
+
+ if (stkctr == &tmpstkctr)
+ stktable_release(stkctr->table, stkctr_entry(stkctr));
+
+
+ }
+ return 1;
+}
+
+/* set <smp> to the connection rate from the stream's tracked frontend
+ * counters. Supports being called as "sc[0-9]_conn_rate" or "src_conn_rate"
+ * only.
+ */
+static int
+smp_fetch_sc_conn_rate(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ struct stkctr tmpstkctr;
+ struct stkctr *stkctr;
+
+ stkctr = smp_fetch_sc_stkctr(smp->sess, smp->strm, args, kw, &tmpstkctr);
+ if (!stkctr)
+ return 0;
+
+ smp->flags = SMP_F_VOL_TEST;
+ smp->data.type = SMP_T_SINT;
+ smp->data.u.sint = 0;
+ if (stkctr_entry(stkctr) != NULL) {
+ void *ptr;
+
+ ptr = stktable_data_ptr(stkctr->table, stkctr_entry(stkctr), STKTABLE_DT_CONN_RATE);
+ if (!ptr) {
+ if (stkctr == &tmpstkctr)
+ stktable_release(stkctr->table, stkctr_entry(stkctr));
+ return 0; /* parameter not stored */
+ }
+
+ HA_RWLOCK_RDLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
+
+ smp->data.u.sint = read_freq_ctr_period(&stktable_data_cast(ptr, std_t_frqp),
+ stkctr->table->data_arg[STKTABLE_DT_CONN_RATE].u);
+
+ HA_RWLOCK_RDUNLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
+
+ if (stkctr == &tmpstkctr)
+ stktable_release(stkctr->table, stkctr_entry(stkctr));
+ }
+ return 1;
+}
+
+/* set temp integer to the number of connections from the stream's source address
+ * in the table pointed to by expr, after updating it.
+ * Accepts exactly 1 argument of type table.
+ */
+static int
+smp_fetch_src_updt_conn_cnt(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ struct connection *conn = objt_conn(smp->sess->origin);
+ struct stksess *ts;
+ struct stktable_key *key;
+ void *ptr;
+ struct stktable *t;
+
+ if (!conn)
+ return 0;
+
+ /* Fetch source address in a sample. */
+ if (!smp_fetch_src || !smp_fetch_src(empty_arg_list, smp, "src", NULL))
+ return 0;
+
+ /* Converts into key. */
+ key = smp_to_stkey(smp, args->data.t);
+ if (!key)
+ return 0;
+
+ t = args->data.t;
+
+ if ((ts = stktable_get_entry(t, key)) == NULL)
+ /* entry does not exist and could not be created */
+ return 0;
+
+ ptr = stktable_data_ptr(t, ts, STKTABLE_DT_CONN_CNT);
+ if (!ptr) {
+ return 0; /* parameter not stored in this table */
+ }
+
+ smp->data.type = SMP_T_SINT;
+
+ HA_RWLOCK_WRLOCK(STK_SESS_LOCK, &ts->lock);
+
+ smp->data.u.sint = ++stktable_data_cast(ptr, std_t_uint);
+
+ HA_RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
+
+ smp->flags = SMP_F_VOL_TEST;
+
+ stktable_touch_local(t, ts, 1);
+
+ /* Touch was previously performed by stktable_update_key */
+ return 1;
+}
+
+/* set <smp> to the number of concurrent connections from the stream's tracked
+ * frontend counters. Supports being called as "sc[0-9]_conn_cur" or
+ * "src_conn_cur" only.
+ */
+static int
+smp_fetch_sc_conn_cur(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ struct stkctr tmpstkctr;
+ struct stkctr *stkctr;
+
+ stkctr = smp_fetch_sc_stkctr(smp->sess, smp->strm, args, kw, &tmpstkctr);
+ if (!stkctr)
+ return 0;
+
+ smp->flags = SMP_F_VOL_TEST;
+ smp->data.type = SMP_T_SINT;
+ smp->data.u.sint = 0;
+ if (stkctr_entry(stkctr) != NULL) {
+ void *ptr;
+
+ ptr = stktable_data_ptr(stkctr->table, stkctr_entry(stkctr), STKTABLE_DT_CONN_CUR);
+ if (!ptr) {
+ if (stkctr == &tmpstkctr)
+ stktable_release(stkctr->table, stkctr_entry(stkctr));
+ return 0; /* parameter not stored */
+ }
+
+ HA_RWLOCK_RDLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
+
+ smp->data.u.sint = stktable_data_cast(ptr, std_t_uint);
+
+ HA_RWLOCK_RDUNLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
+
+ if (stkctr == &tmpstkctr)
+ stktable_release(stkctr->table, stkctr_entry(stkctr));
+ }
+ return 1;
+}
+
+/* set <smp> to the cumulated number of streams from the stream's tracked
+ * frontend counters. Supports being called as "sc[0-9]_sess_cnt" or
+ * "src_sess_cnt" only.
+ */
+static int
+smp_fetch_sc_sess_cnt(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ struct stkctr tmpstkctr;
+ struct stkctr *stkctr;
+
+ stkctr = smp_fetch_sc_stkctr(smp->sess, smp->strm, args, kw, &tmpstkctr);
+ if (!stkctr)
+ return 0;
+
+ smp->flags = SMP_F_VOL_TEST;
+ smp->data.type = SMP_T_SINT;
+ smp->data.u.sint = 0;
+ if (stkctr_entry(stkctr) != NULL) {
+ void *ptr;
+
+ ptr = stktable_data_ptr(stkctr->table, stkctr_entry(stkctr), STKTABLE_DT_SESS_CNT);
+ if (!ptr) {
+ if (stkctr == &tmpstkctr)
+ stktable_release(stkctr->table, stkctr_entry(stkctr));
+ return 0; /* parameter not stored */
+ }
+
+ HA_RWLOCK_RDLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
+
+ smp->data.u.sint = stktable_data_cast(ptr, std_t_uint);
+
+ HA_RWLOCK_RDUNLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
+
+ if (stkctr == &tmpstkctr)
+ stktable_release(stkctr->table, stkctr_entry(stkctr));
+ }
+ return 1;
+}
+
+/* set <smp> to the stream rate from the stream's tracked frontend counters.
+ * Supports being called as "sc[0-9]_sess_rate" or "src_sess_rate" only.
+ */
+static int
+smp_fetch_sc_sess_rate(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ struct stkctr tmpstkctr;
+ struct stkctr *stkctr;
+
+ stkctr = smp_fetch_sc_stkctr(smp->sess, smp->strm, args, kw, &tmpstkctr);
+ if (!stkctr)
+ return 0;
+
+ smp->flags = SMP_F_VOL_TEST;
+ smp->data.type = SMP_T_SINT;
+ smp->data.u.sint = 0;
+ if (stkctr_entry(stkctr) != NULL) {
+ void *ptr;
+
+ ptr = stktable_data_ptr(stkctr->table, stkctr_entry(stkctr), STKTABLE_DT_SESS_RATE);
+ if (!ptr) {
+ if (stkctr == &tmpstkctr)
+ stktable_release(stkctr->table, stkctr_entry(stkctr));
+ return 0; /* parameter not stored */
+ }
+
+ HA_RWLOCK_RDLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
+
+ smp->data.u.sint = read_freq_ctr_period(&stktable_data_cast(ptr, std_t_frqp),
+ stkctr->table->data_arg[STKTABLE_DT_SESS_RATE].u);
+
+ HA_RWLOCK_RDUNLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
+
+ if (stkctr == &tmpstkctr)
+ stktable_release(stkctr->table, stkctr_entry(stkctr));
+ }
+ return 1;
+}
+
+/* set <smp> to the cumulated number of HTTP requests from the stream's tracked
+ * frontend counters. Supports being called as "sc[0-9]_http_req_cnt" or
+ * "src_http_req_cnt" only.
+ */
+static int
+smp_fetch_sc_http_req_cnt(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ struct stkctr tmpstkctr;
+ struct stkctr *stkctr;
+
+ stkctr = smp_fetch_sc_stkctr(smp->sess, smp->strm, args, kw, &tmpstkctr);
+ if (!stkctr)
+ return 0;
+
+ smp->flags = SMP_F_VOL_TEST;
+ smp->data.type = SMP_T_SINT;
+ smp->data.u.sint = 0;
+ if (stkctr_entry(stkctr) != NULL) {
+ void *ptr;
+
+ ptr = stktable_data_ptr(stkctr->table, stkctr_entry(stkctr), STKTABLE_DT_HTTP_REQ_CNT);
+ if (!ptr) {
+ if (stkctr == &tmpstkctr)
+ stktable_release(stkctr->table, stkctr_entry(stkctr));
+ return 0; /* parameter not stored */
+ }
+
+ HA_RWLOCK_RDLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
+
+ smp->data.u.sint = stktable_data_cast(ptr, std_t_uint);
+
+ HA_RWLOCK_RDUNLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
+
+ if (stkctr == &tmpstkctr)
+ stktable_release(stkctr->table, stkctr_entry(stkctr));
+ }
+ return 1;
+}
+
+/* set <smp> to the HTTP request rate from the stream's tracked frontend
+ * counters. Supports being called as "sc[0-9]_http_req_rate" or
+ * "src_http_req_rate" only.
+ */
+static int
+smp_fetch_sc_http_req_rate(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ struct stkctr tmpstkctr;
+ struct stkctr *stkctr;
+
+ stkctr = smp_fetch_sc_stkctr(smp->sess, smp->strm, args, kw, &tmpstkctr);
+ if (!stkctr)
+ return 0;
+
+ smp->flags = SMP_F_VOL_TEST;
+ smp->data.type = SMP_T_SINT;
+ smp->data.u.sint = 0;
+ if (stkctr_entry(stkctr) != NULL) {
+ void *ptr;
+
+ ptr = stktable_data_ptr(stkctr->table, stkctr_entry(stkctr), STKTABLE_DT_HTTP_REQ_RATE);
+ if (!ptr) {
+ if (stkctr == &tmpstkctr)
+ stktable_release(stkctr->table, stkctr_entry(stkctr));
+ return 0; /* parameter not stored */
+ }
+
+ HA_RWLOCK_RDLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
+
+ smp->data.u.sint = read_freq_ctr_period(&stktable_data_cast(ptr, std_t_frqp),
+ stkctr->table->data_arg[STKTABLE_DT_HTTP_REQ_RATE].u);
+
+ HA_RWLOCK_RDUNLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
+
+ if (stkctr == &tmpstkctr)
+ stktable_release(stkctr->table, stkctr_entry(stkctr));
+ }
+ return 1;
+}
+
+/* set <smp> to the cumulated number of HTTP requests errors from the stream's
+ * tracked frontend counters. Supports being called as "sc[0-9]_http_err_cnt" or
+ * "src_http_err_cnt" only.
+ */
+static int
+smp_fetch_sc_http_err_cnt(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ struct stkctr tmpstkctr;
+ struct stkctr *stkctr;
+
+ stkctr = smp_fetch_sc_stkctr(smp->sess, smp->strm, args, kw, &tmpstkctr);
+ if (!stkctr)
+ return 0;
+
+ smp->flags = SMP_F_VOL_TEST;
+ smp->data.type = SMP_T_SINT;
+ smp->data.u.sint = 0;
+ if (stkctr_entry(stkctr) != NULL) {
+ void *ptr;
+
+ ptr = stktable_data_ptr(stkctr->table, stkctr_entry(stkctr), STKTABLE_DT_HTTP_ERR_CNT);
+ if (!ptr) {
+ if (stkctr == &tmpstkctr)
+ stktable_release(stkctr->table, stkctr_entry(stkctr));
+ return 0; /* parameter not stored */
+ }
+
+ HA_RWLOCK_RDLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
+
+ smp->data.u.sint = stktable_data_cast(ptr, std_t_uint);
+
+ HA_RWLOCK_RDUNLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
+
+ if (stkctr == &tmpstkctr)
+ stktable_release(stkctr->table, stkctr_entry(stkctr));
+ }
+ return 1;
+}
+
+/* set <smp> to the HTTP request error rate from the stream's tracked frontend
+ * counters. Supports being called as "sc[0-9]_http_err_rate" or
+ * "src_http_err_rate" only.
+ */
+static int
+smp_fetch_sc_http_err_rate(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ struct stkctr tmpstkctr;
+ struct stkctr *stkctr;
+
+ stkctr = smp_fetch_sc_stkctr(smp->sess, smp->strm, args, kw, &tmpstkctr);
+ if (!stkctr)
+ return 0;
+
+ smp->flags = SMP_F_VOL_TEST;
+ smp->data.type = SMP_T_SINT;
+ smp->data.u.sint = 0;
+ if (stkctr_entry(stkctr) != NULL) {
+ void *ptr;
+
+ ptr = stktable_data_ptr(stkctr->table, stkctr_entry(stkctr), STKTABLE_DT_HTTP_ERR_RATE);
+ if (!ptr) {
+ if (stkctr == &tmpstkctr)
+ stktable_release(stkctr->table, stkctr_entry(stkctr));
+ return 0; /* parameter not stored */
+ }
+
+ HA_RWLOCK_RDLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
+
+ smp->data.u.sint = read_freq_ctr_period(&stktable_data_cast(ptr, std_t_frqp),
+ stkctr->table->data_arg[STKTABLE_DT_HTTP_ERR_RATE].u);
+
+ HA_RWLOCK_RDUNLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
+
+ if (stkctr == &tmpstkctr)
+ stktable_release(stkctr->table, stkctr_entry(stkctr));
+ }
+ return 1;
+}
+
+/* set <smp> to the cumulated number of HTTP response failures from the stream's
+ * tracked frontend counters. Supports being called as "sc[0-9]_http_fail_cnt" or
+ * "src_http_fail_cnt" only.
+ */
+static int
+smp_fetch_sc_http_fail_cnt(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ struct stkctr tmpstkctr;
+ struct stkctr *stkctr;
+
+ stkctr = smp_fetch_sc_stkctr(smp->sess, smp->strm, args, kw, &tmpstkctr);
+ if (!stkctr)
+ return 0;
+
+ smp->flags = SMP_F_VOL_TEST;
+ smp->data.type = SMP_T_SINT;
+ smp->data.u.sint = 0;
+ if (stkctr_entry(stkctr) != NULL) {
+ void *ptr;
+
+ ptr = stktable_data_ptr(stkctr->table, stkctr_entry(stkctr), STKTABLE_DT_HTTP_FAIL_CNT);
+ if (!ptr) {
+ if (stkctr == &tmpstkctr)
+ stktable_release(stkctr->table, stkctr_entry(stkctr));
+ return 0; /* parameter not stored */
+ }
+
+ HA_RWLOCK_RDLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
+
+ smp->data.u.sint = stktable_data_cast(ptr, std_t_uint);
+
+ HA_RWLOCK_RDUNLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
+
+ if (stkctr == &tmpstkctr)
+ stktable_release(stkctr->table, stkctr_entry(stkctr));
+ }
+ return 1;
+}
+
+/* set <smp> to the HTTP response failure rate from the stream's tracked frontend
+ * counters. Supports being called as "sc[0-9]_http_fail_rate" or
+ * "src_http_fail_rate" only.
+ */
+static int
+smp_fetch_sc_http_fail_rate(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ struct stkctr tmpstkctr;
+ struct stkctr *stkctr;
+
+ stkctr = smp_fetch_sc_stkctr(smp->sess, smp->strm, args, kw, &tmpstkctr);
+ if (!stkctr)
+ return 0;
+
+ smp->flags = SMP_F_VOL_TEST;
+ smp->data.type = SMP_T_SINT;
+ smp->data.u.sint = 0;
+ if (stkctr_entry(stkctr) != NULL) {
+ void *ptr;
+
+ ptr = stktable_data_ptr(stkctr->table, stkctr_entry(stkctr), STKTABLE_DT_HTTP_FAIL_RATE);
+ if (!ptr) {
+ if (stkctr == &tmpstkctr)
+ stktable_release(stkctr->table, stkctr_entry(stkctr));
+ return 0; /* parameter not stored */
+ }
+
+ HA_RWLOCK_RDLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
+
+ smp->data.u.sint = read_freq_ctr_period(&stktable_data_cast(ptr, std_t_frqp),
+ stkctr->table->data_arg[STKTABLE_DT_HTTP_FAIL_RATE].u);
+
+ HA_RWLOCK_RDUNLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
+
+ if (stkctr == &tmpstkctr)
+ stktable_release(stkctr->table, stkctr_entry(stkctr));
+ }
+ return 1;
+}
+
+/* set <smp> to the number of kbytes received from clients, as found in the
+ * stream's tracked frontend counters. Supports being called as
+ * "sc[0-9]_kbytes_in" or "src_kbytes_in" only.
+ */
+static int
+smp_fetch_sc_kbytes_in(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ struct stkctr tmpstkctr;
+ struct stkctr *stkctr;
+
+ stkctr = smp_fetch_sc_stkctr(smp->sess, smp->strm, args, kw, &tmpstkctr);
+ if (!stkctr)
+ return 0;
+
+ smp->flags = SMP_F_VOL_TEST;
+ smp->data.type = SMP_T_SINT;
+ smp->data.u.sint = 0;
+ if (stkctr_entry(stkctr) != NULL) {
+ void *ptr;
+
+ ptr = stktable_data_ptr(stkctr->table, stkctr_entry(stkctr), STKTABLE_DT_BYTES_IN_CNT);
+ if (!ptr) {
+ if (stkctr == &tmpstkctr)
+ stktable_release(stkctr->table, stkctr_entry(stkctr));
+ return 0; /* parameter not stored */
+ }
+
+ HA_RWLOCK_RDLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
+
+ smp->data.u.sint = stktable_data_cast(ptr, std_t_ull) >> 10;
+
+ HA_RWLOCK_RDUNLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
+
+ if (stkctr == &tmpstkctr)
+ stktable_release(stkctr->table, stkctr_entry(stkctr));
+ }
+ return 1;
+}
+
+/* set <smp> to the data rate received from clients in bytes/s, as found
+ * in the stream's tracked frontend counters. Supports being called as
+ * "sc[0-9]_bytes_in_rate" or "src_bytes_in_rate" only.
+ */
+static int
+smp_fetch_sc_bytes_in_rate(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ struct stkctr tmpstkctr;
+ struct stkctr *stkctr;
+
+ stkctr = smp_fetch_sc_stkctr(smp->sess, smp->strm, args, kw, &tmpstkctr);
+ if (!stkctr)
+ return 0;
+
+ smp->flags = SMP_F_VOL_TEST;
+ smp->data.type = SMP_T_SINT;
+ smp->data.u.sint = 0;
+ if (stkctr_entry(stkctr) != NULL) {
+ void *ptr;
+
+ ptr = stktable_data_ptr(stkctr->table, stkctr_entry(stkctr), STKTABLE_DT_BYTES_IN_RATE);
+ if (!ptr) {
+ if (stkctr == &tmpstkctr)
+ stktable_release(stkctr->table, stkctr_entry(stkctr));
+ return 0; /* parameter not stored */
+ }
+
+ HA_RWLOCK_RDLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
+
+ smp->data.u.sint = read_freq_ctr_period(&stktable_data_cast(ptr, std_t_frqp),
+ stkctr->table->data_arg[STKTABLE_DT_BYTES_IN_RATE].u);
+
+ HA_RWLOCK_RDUNLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
+
+ if (stkctr == &tmpstkctr)
+ stktable_release(stkctr->table, stkctr_entry(stkctr));
+ }
+ return 1;
+}
+
+/* set <smp> to the number of kbytes sent to clients, as found in the
+ * stream's tracked frontend counters. Supports being called as
+ * "sc[0-9]_kbytes_out" or "src_kbytes_out" only.
+ */
+static int
+smp_fetch_sc_kbytes_out(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ struct stkctr tmpstkctr;
+ struct stkctr *stkctr;
+
+ stkctr = smp_fetch_sc_stkctr(smp->sess, smp->strm, args, kw, &tmpstkctr);
+ if (!stkctr)
+ return 0;
+
+ smp->flags = SMP_F_VOL_TEST;
+ smp->data.type = SMP_T_SINT;
+ smp->data.u.sint = 0;
+ if (stkctr_entry(stkctr) != NULL) {
+ void *ptr;
+
+ ptr = stktable_data_ptr(stkctr->table, stkctr_entry(stkctr), STKTABLE_DT_BYTES_OUT_CNT);
+ if (!ptr) {
+ if (stkctr == &tmpstkctr)
+ stktable_release(stkctr->table, stkctr_entry(stkctr));
+ return 0; /* parameter not stored */
+ }
+
+ HA_RWLOCK_RDLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
+
+ smp->data.u.sint = stktable_data_cast(ptr, std_t_ull) >> 10;
+
+ HA_RWLOCK_RDUNLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
+
+ if (stkctr == &tmpstkctr)
+ stktable_release(stkctr->table, stkctr_entry(stkctr));
+ }
+ return 1;
+}
+
+/* set <smp> to the data rate sent to clients in bytes/s, as found in the
+ * stream's tracked frontend counters. Supports being called as
+ * "sc[0-9]_bytes_out_rate" or "src_bytes_out_rate" only.
+ */
+static int
+smp_fetch_sc_bytes_out_rate(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ struct stkctr tmpstkctr;
+ struct stkctr *stkctr;
+
+ stkctr = smp_fetch_sc_stkctr(smp->sess, smp->strm, args, kw, &tmpstkctr);
+ if (!stkctr)
+ return 0;
+
+ smp->flags = SMP_F_VOL_TEST;
+ smp->data.type = SMP_T_SINT;
+ smp->data.u.sint = 0;
+ if (stkctr_entry(stkctr) != NULL) {
+ void *ptr;
+
+ ptr = stktable_data_ptr(stkctr->table, stkctr_entry(stkctr), STKTABLE_DT_BYTES_OUT_RATE);
+ if (!ptr) {
+ if (stkctr == &tmpstkctr)
+ stktable_release(stkctr->table, stkctr_entry(stkctr));
+ return 0; /* parameter not stored */
+ }
+
+ HA_RWLOCK_RDLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
+
+ smp->data.u.sint = read_freq_ctr_period(&stktable_data_cast(ptr, std_t_frqp),
+ stkctr->table->data_arg[STKTABLE_DT_BYTES_OUT_RATE].u);
+
+ HA_RWLOCK_RDUNLOCK(STK_SESS_LOCK, &stkctr_entry(stkctr)->lock);
+
+ if (stkctr == &tmpstkctr)
+ stktable_release(stkctr->table, stkctr_entry(stkctr));
+ }
+ return 1;
+}
+
+/* set <smp> to the number of active trackers on the SC entry in the stream's
+ * tracked frontend counters. Supports being called as "sc[0-9]_trackers" only.
+ */
+static int
+smp_fetch_sc_trackers(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ struct stkctr tmpstkctr;
+ struct stkctr *stkctr;
+
+ stkctr = smp_fetch_sc_stkctr(smp->sess, smp->strm, args, kw, &tmpstkctr);
+ if (!stkctr)
+ return 0;
+
+ smp->flags = SMP_F_VOL_TEST;
+ smp->data.type = SMP_T_SINT;
+ if (stkctr == &tmpstkctr) {
+ smp->data.u.sint = stkctr_entry(stkctr) ? (HA_ATOMIC_LOAD(&stkctr_entry(stkctr)->ref_cnt) - 1) : 0;
+ stktable_release(stkctr->table, stkctr_entry(stkctr));
+ }
+ else {
+ smp->data.u.sint = stkctr_entry(stkctr) ? HA_ATOMIC_LOAD(&stkctr_entry(stkctr)->ref_cnt) : 0;
+ }
+
+ return 1;
+}
+
+
+/* The functions below are used to manipulate table contents from the CLI.
+ * There are 3 main actions, "clear", "set" and "show". The code is shared
+ * between all actions, and the action is encoded in the void *private in
+ * the appctx as well as in the keyword registration, among one of the
+ * following values.
+ */
+
+enum {
+ STK_CLI_ACT_CLR,
+ STK_CLI_ACT_SET,
+ STK_CLI_ACT_SHOW,
+};
+
+/* Dump the status of a table to a stream connector's
+ * read buffer. It returns 0 if the output buffer is full
+ * and needs to be called again, otherwise non-zero.
+ */
+static int table_dump_head_to_buffer(struct buffer *msg,
+ struct appctx *appctx,
+ struct stktable *t, struct stktable *target)
+{
+ struct stream *s = __sc_strm(appctx_sc(appctx));
+
+ chunk_appendf(msg, "# table: %s, type: %s, size:%d, used:%d\n",
+ t->id, stktable_types[t->type].kw, t->size, t->current);
+
+ /* any other information should be dumped here */
+
+ if (target && (strm_li(s)->bind_conf->level & ACCESS_LVL_MASK) < ACCESS_LVL_OPER)
+ chunk_appendf(msg, "# contents not dumped due to insufficient privileges\n");
+
+ if (applet_putchk(appctx, msg) == -1)
+ return 0;
+
+ return 1;
+}
+
+/* Dump a table entry to a stream connector's
+ * read buffer. It returns 0 if the output buffer is full
+ * and needs to be called again, otherwise non-zero.
+ */
+static int table_dump_entry_to_buffer(struct buffer *msg,
+ struct appctx *appctx,
+ struct stktable *t, struct stksess *entry)
+{
+ int dt;
+
+ chunk_appendf(msg, "%p:", entry);
+
+ if (t->type == SMP_T_IPV4) {
+ char addr[INET_ADDRSTRLEN];
+ inet_ntop(AF_INET, (const void *)&entry->key.key, addr, sizeof(addr));
+ chunk_appendf(msg, " key=%s", addr);
+ }
+ else if (t->type == SMP_T_IPV6) {
+ char addr[INET6_ADDRSTRLEN];
+ inet_ntop(AF_INET6, (const void *)&entry->key.key, addr, sizeof(addr));
+ chunk_appendf(msg, " key=%s", addr);
+ }
+ else if (t->type == SMP_T_SINT) {
+ chunk_appendf(msg, " key=%u", read_u32(entry->key.key));
+ }
+ else if (t->type == SMP_T_STR) {
+ chunk_appendf(msg, " key=");
+ dump_text(msg, (const char *)entry->key.key, t->key_size);
+ }
+ else {
+ chunk_appendf(msg, " key=");
+ dump_binary(msg, (const char *)entry->key.key, t->key_size);
+ }
+
+ chunk_appendf(msg, " use=%d exp=%d shard=%d", HA_ATOMIC_LOAD(&entry->ref_cnt) - 1, tick_remain(now_ms, entry->expire), entry->shard);
+
+ for (dt = 0; dt < STKTABLE_DATA_TYPES; dt++) {
+ void *ptr;
+
+ if (t->data_ofs[dt] == 0)
+ continue;
+ if (stktable_data_types[dt].is_array) {
+ char tmp[16] = {};
+ const char *name_pfx = stktable_data_types[dt].name;
+ const char *name_sfx = NULL;
+ unsigned int idx = 0;
+ int i = 0;
+
+ /* split name to show index before first _ of the name
+ * for example: 'gpc3_rate' if array name is 'gpc_rate'.
+ */
+ for (i = 0 ; i < (sizeof(tmp) - 1); i++) {
+ if (!name_pfx[i])
+ break;
+ if (name_pfx[i] == '_') {
+ name_pfx = &tmp[0];
+ name_sfx = &stktable_data_types[dt].name[i];
+ break;
+ }
+ tmp[i] = name_pfx[i];
+ }
+
+ ptr = stktable_data_ptr_idx(t, entry, dt, idx);
+ while (ptr) {
+ if (stktable_data_types[dt].arg_type == ARG_T_DELAY)
+ chunk_appendf(msg, " %s%u%s(%u)=", name_pfx, idx, name_sfx ? name_sfx : "", t->data_arg[dt].u);
+ else
+ chunk_appendf(msg, " %s%u%s=", name_pfx, idx, name_sfx ? name_sfx : "");
+ switch (stktable_data_types[dt].std_type) {
+ case STD_T_SINT:
+ chunk_appendf(msg, "%d", stktable_data_cast(ptr, std_t_sint));
+ break;
+ case STD_T_UINT:
+ chunk_appendf(msg, "%u", stktable_data_cast(ptr, std_t_uint));
+ break;
+ case STD_T_ULL:
+ chunk_appendf(msg, "%llu", stktable_data_cast(ptr, std_t_ull));
+ break;
+ case STD_T_FRQP:
+ chunk_appendf(msg, "%u",
+ read_freq_ctr_period(&stktable_data_cast(ptr, std_t_frqp),
+ t->data_arg[dt].u));
+ break;
+ }
+ ptr = stktable_data_ptr_idx(t, entry, dt, ++idx);
+ }
+ continue;
+ }
+ if (stktable_data_types[dt].arg_type == ARG_T_DELAY)
+ chunk_appendf(msg, " %s(%u)=", stktable_data_types[dt].name, t->data_arg[dt].u);
+ else
+ chunk_appendf(msg, " %s=", stktable_data_types[dt].name);
+
+ ptr = stktable_data_ptr(t, entry, dt);
+ switch (stktable_data_types[dt].std_type) {
+ case STD_T_SINT:
+ chunk_appendf(msg, "%d", stktable_data_cast(ptr, std_t_sint));
+ break;
+ case STD_T_UINT:
+ chunk_appendf(msg, "%u", stktable_data_cast(ptr, std_t_uint));
+ break;
+ case STD_T_ULL:
+ chunk_appendf(msg, "%llu", stktable_data_cast(ptr, std_t_ull));
+ break;
+ case STD_T_FRQP:
+ chunk_appendf(msg, "%u",
+ read_freq_ctr_period(&stktable_data_cast(ptr, std_t_frqp),
+ t->data_arg[dt].u));
+ break;
+ case STD_T_DICT: {
+ struct dict_entry *de;
+ de = stktable_data_cast(ptr, std_t_dict);
+ chunk_appendf(msg, "%s", de ? (char *)de->value.key : "-");
+ break;
+ }
+ }
+ }
+ chunk_appendf(msg, "\n");
+
+ if (applet_putchk(appctx, msg) == -1)
+ return 0;
+
+ return 1;
+}
+
+/* appctx context used by the "show table" command */
+struct show_table_ctx {
+ void *target; /* table we want to dump, or NULL for all */
+ struct stktable *t; /* table being currently dumped (first if NULL) */
+ struct stksess *entry; /* last entry we were trying to dump (or first if NULL) */
+ long long value[STKTABLE_FILTER_LEN]; /* value to compare against */
+ signed char data_type[STKTABLE_FILTER_LEN]; /* type of data to compare, or -1 if none */
+ signed char data_op[STKTABLE_FILTER_LEN]; /* operator (STD_OP_*) when data_type set */
+ enum {
+ STATE_NEXT = 0, /* px points to next table, entry=NULL */
+ STATE_DUMP, /* px points to curr table, entry is valid, refcount held */
+ STATE_DONE, /* done dumping */
+ } state;
+ char action; /* action on the table : one of STK_CLI_ACT_* */
+};
+
+/* Processes a single table entry matching a specific key passed in argument.
+ * returns 0 if wants to be called again, 1 if has ended processing.
+ */
+static int table_process_entry_per_key(struct appctx *appctx, char **args)
+{
+ struct show_table_ctx *ctx = appctx->svcctx;
+ struct stktable *t = ctx->target;
+ struct stksess *ts;
+ struct sample key;
+ long long value;
+ int data_type;
+ int cur_arg;
+ void *ptr;
+ struct freq_ctr *frqp;
+
+ if (!*args[4])
+ return cli_err(appctx, "Key value expected\n");
+
+ memset(&key, 0, sizeof(key));
+ key.data.type = SMP_T_STR;
+ key.data.u.str.area = args[4];
+ key.data.u.str.data = strlen(args[4]);
+
+ switch (t->type) {
+ case SMP_T_IPV4:
+ case SMP_T_IPV6:
+ /* prefer input format over table type when parsing ip addresses,
+ * then let smp_to_stkey() do the conversion for us when needed
+ */
+ BUG_ON(!sample_casts[key.data.type][SMP_T_ADDR]);
+ if (!sample_casts[key.data.type][SMP_T_ADDR](&key))
+ return cli_err(appctx, "Invalid key\n");
+ break;
+ case SMP_T_SINT:
+ case SMP_T_STR:
+ break;
+ default:
+ switch (ctx->action) {
+ case STK_CLI_ACT_SHOW:
+ return cli_err(appctx, "Showing keys from tables of type other than ip, ipv6, string and integer is not supported\n");
+ case STK_CLI_ACT_CLR:
+ return cli_err(appctx, "Removing keys from tables of type other than ip, ipv6, string and integer is not supported\n");
+ case STK_CLI_ACT_SET:
+ return cli_err(appctx, "Inserting keys into tables of type other than ip, ipv6, string and integer is not supported\n");
+ default:
+ return cli_err(appctx, "Unknown action\n");
+ }
+ }
+
+ /* try to convert key according to table type
+ * (it will fill static_table_key on success)
+ */
+ if (!smp_to_stkey(&key, t))
+ return cli_err(appctx, "Invalid key\n");
+
+ /* check permissions */
+ if (!cli_has_level(appctx, ACCESS_LVL_OPER))
+ return 1;
+
+ switch (ctx->action) {
+ case STK_CLI_ACT_SHOW:
+ ts = stktable_lookup_key(t, &static_table_key);
+ if (!ts)
+ return 1;
+ chunk_reset(&trash);
+ if (!table_dump_head_to_buffer(&trash, appctx, t, t)) {
+ stktable_release(t, ts);
+ return 0;
+ }
+ HA_RWLOCK_RDLOCK(STK_SESS_LOCK, &ts->lock);
+ if (!table_dump_entry_to_buffer(&trash, appctx, t, ts)) {
+ HA_RWLOCK_RDUNLOCK(STK_SESS_LOCK, &ts->lock);
+ stktable_release(t, ts);
+ return 0;
+ }
+ HA_RWLOCK_RDUNLOCK(STK_SESS_LOCK, &ts->lock);
+ stktable_release(t, ts);
+ break;
+
+ case STK_CLI_ACT_CLR:
+ ts = stktable_lookup_key(t, &static_table_key);
+ if (!ts)
+ return 1;
+
+ if (!stksess_kill(t, ts, 1)) {
+ /* don't delete an entry which is currently referenced */
+ return cli_err(appctx, "Entry currently in use, cannot remove\n");
+ }
+ break;
+
+ case STK_CLI_ACT_SET:
+ ts = stktable_get_entry(t, &static_table_key);
+ if (!ts) {
+ /* don't delete an entry which is currently referenced */
+ return cli_err(appctx, "Unable to allocate a new entry\n");
+ }
+ HA_RWLOCK_WRLOCK(STK_SESS_LOCK, &ts->lock);
+ for (cur_arg = 5; *args[cur_arg]; cur_arg += 2) {
+ if (strncmp(args[cur_arg], "data.", 5) != 0) {
+ cli_err(appctx, "\"data.<type>\" followed by a value expected\n");
+ HA_RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
+ stktable_touch_local(t, ts, 1);
+ return 1;
+ }
+
+ data_type = stktable_get_data_type(args[cur_arg] + 5);
+ if (data_type < 0) {
+ cli_err(appctx, "Unknown data type\n");
+ HA_RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
+ stktable_touch_local(t, ts, 1);
+ return 1;
+ }
+
+ if (!t->data_ofs[data_type]) {
+ cli_err(appctx, "Data type not stored in this table\n");
+ HA_RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
+ stktable_touch_local(t, ts, 1);
+ return 1;
+ }
+
+ if (!*args[cur_arg+1] || strl2llrc(args[cur_arg+1], strlen(args[cur_arg+1]), &value) != 0) {
+ cli_err(appctx, "Require a valid integer value to store\n");
+ HA_RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
+ stktable_touch_local(t, ts, 1);
+ return 1;
+ }
+
+ ptr = stktable_data_ptr(t, ts, data_type);
+
+ switch (stktable_data_types[data_type].std_type) {
+ case STD_T_SINT:
+ stktable_data_cast(ptr, std_t_sint) = value;
+ break;
+ case STD_T_UINT:
+ stktable_data_cast(ptr, std_t_uint) = value;
+ break;
+ case STD_T_ULL:
+ stktable_data_cast(ptr, std_t_ull) = value;
+ break;
+ case STD_T_FRQP:
+ /* We set both the current and previous values. That way
+ * the reported frequency is stable during all the period
+ * then slowly fades out. This allows external tools to
+ * push measures without having to update them too often.
+ */
+ frqp = &stktable_data_cast(ptr, std_t_frqp);
+ /* First bit is reserved for the freq_ctr lock
+ Note: here we're still protected by the stksess lock
+ so we don't need to update the update the freq_ctr
+ using its internal lock */
+ frqp->curr_tick = now_ms & ~0x1;
+ frqp->prev_ctr = 0;
+ frqp->curr_ctr = value;
+ break;
+ }
+ }
+ HA_RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
+ stktable_touch_local(t, ts, 1);
+ break;
+
+ default:
+ return cli_err(appctx, "Unknown action\n");
+ }
+ return 1;
+}
+
+/* Prepares the appctx fields with the data-based filters from the command line.
+ * Returns 0 if the dump can proceed, 1 if has ended processing.
+ */
+static int table_prepare_data_request(struct appctx *appctx, char **args)
+{
+ struct show_table_ctx *ctx = appctx->svcctx;
+ int i;
+ char *err = NULL;
+
+ if (ctx->action != STK_CLI_ACT_SHOW && ctx->action != STK_CLI_ACT_CLR)
+ return cli_err(appctx, "content-based lookup is only supported with the \"show\" and \"clear\" actions\n");
+
+ for (i = 0; i < STKTABLE_FILTER_LEN; i++) {
+ if (i > 0 && !*args[3+3*i]) // number of filter entries can be less than STKTABLE_FILTER_LEN
+ break;
+ /* condition on stored data value */
+ ctx->data_type[i] = stktable_get_data_type(args[3+3*i] + 5);
+ if (ctx->data_type[i] < 0)
+ return cli_dynerr(appctx, memprintf(&err, "Filter entry #%i: Unknown data type\n", i + 1));
+
+ if (!((struct stktable *)ctx->target)->data_ofs[ctx->data_type[i]])
+ return cli_dynerr(appctx, memprintf(&err, "Filter entry #%i: Data type not stored in this table\n", i + 1));
+
+ ctx->data_op[i] = get_std_op(args[4+3*i]);
+ if (ctx->data_op[i] < 0)
+ return cli_dynerr(appctx, memprintf(&err, "Filter entry #%i: Require and operator among \"eq\", \"ne\", \"le\", \"ge\", \"lt\", \"gt\"\n", i + 1));
+
+ if (!*args[5+3*i] || strl2llrc(args[5+3*i], strlen(args[5+3*i]), &ctx->value[i]) != 0)
+ return cli_dynerr(appctx, memprintf(&err, "Filter entry #%i: Require a valid integer value to compare against\n", i + 1));
+ }
+
+ if (*args[3+3*i]) {
+ return cli_dynerr(appctx, memprintf(&err, "Detected extra data in filter, %ith word of input, after '%s'\n", 3+3*i + 1, args[2+3*i]));
+ }
+
+ /* OK we're done, all the fields are set */
+ return 0;
+}
+
+/* returns 0 if wants to be called, 1 if has ended processing */
+static int cli_parse_table_req(char **args, char *payload, struct appctx *appctx, void *private)
+{
+ struct show_table_ctx *ctx = applet_reserve_svcctx(appctx, sizeof(*ctx));
+ int i;
+
+ for (i = 0; i < STKTABLE_FILTER_LEN; i++)
+ ctx->data_type[i] = -1;
+ ctx->target = NULL;
+ ctx->entry = NULL;
+ ctx->action = (long)private; // keyword argument, one of STK_CLI_ACT_*
+
+ if (*args[2]) {
+ ctx->t = ctx->target = stktable_find_by_name(args[2]);
+ if (!ctx->target)
+ return cli_err(appctx, "No such table\n");
+ }
+ else {
+ ctx->t = stktables_list;
+ if (ctx->action != STK_CLI_ACT_SHOW)
+ goto err_args;
+ return 0;
+ }
+
+ if (strcmp(args[3], "key") == 0)
+ return table_process_entry_per_key(appctx, args);
+ else if (strncmp(args[3], "data.", 5) == 0)
+ return table_prepare_data_request(appctx, args);
+ else if (*args[3])
+ goto err_args;
+
+ return 0;
+
+err_args:
+ switch (ctx->action) {
+ case STK_CLI_ACT_SHOW:
+ return cli_err(appctx, "Optional argument only supports \"data.<store_data_type>\" <operator> <value> and key <key>\n");
+ case STK_CLI_ACT_CLR:
+ return cli_err(appctx, "Required arguments: <table> \"data.<store_data_type>\" <operator> <value> or <table> key <key>\n");
+ case STK_CLI_ACT_SET:
+ return cli_err(appctx, "Required arguments: <table> key <key> [data.<store_data_type> <value>]*\n");
+ default:
+ return cli_err(appctx, "Unknown action\n");
+ }
+}
+
+/* This function is used to deal with table operations (dump or clear depending
+ * on the action stored in appctx->private). It returns 0 if the output buffer is
+ * full and it needs to be called again, otherwise non-zero.
+ */
+static int cli_io_handler_table(struct appctx *appctx)
+{
+ struct show_table_ctx *ctx = appctx->svcctx;
+ struct stconn *sc = appctx_sc(appctx);
+ struct stream *s = __sc_strm(sc);
+ struct ebmb_node *eb;
+ int skip_entry;
+ int show = ctx->action == STK_CLI_ACT_SHOW;
+
+ /*
+ * We have 3 possible states in ctx->state :
+ * - STATE_NEXT : the proxy pointer points to the next table to
+ * dump, the entry pointer is NULL ;
+ * - STATE_DUMP : the proxy pointer points to the current table
+ * and the entry pointer points to the next entry to be dumped,
+ * and the refcount on the next entry is held ;
+ * - STATE_DONE : nothing left to dump, the buffer may contain some
+ * data though.
+ */
+ /* FIXME: Don't watch the other side !*/
+ if (unlikely(sc_opposite(sc)->flags & SC_FL_SHUT_DONE)) {
+ /* in case of abort, remove any refcount we might have set on an entry */
+ if (ctx->state == STATE_DUMP) {
+ stksess_kill_if_expired(ctx->t, ctx->entry, 1);
+ }
+ return 1;
+ }
+
+ chunk_reset(&trash);
+
+ while (ctx->state != STATE_DONE) {
+ switch (ctx->state) {
+ case STATE_NEXT:
+ if (!ctx->t ||
+ (ctx->target &&
+ ctx->t != ctx->target)) {
+ ctx->state = STATE_DONE;
+ break;
+ }
+
+ if (ctx->t->size) {
+ if (show && !table_dump_head_to_buffer(&trash, appctx, ctx->t, ctx->target))
+ return 0;
+
+ if (ctx->target &&
+ (strm_li(s)->bind_conf->level & ACCESS_LVL_MASK) >= ACCESS_LVL_OPER) {
+ /* dump entries only if table explicitly requested */
+ HA_RWLOCK_WRLOCK(STK_TABLE_LOCK, &ctx->t->lock);
+ eb = ebmb_first(&ctx->t->keys);
+ if (eb) {
+ ctx->entry = ebmb_entry(eb, struct stksess, key);
+ HA_ATOMIC_INC(&ctx->entry->ref_cnt);
+ ctx->state = STATE_DUMP;
+ HA_RWLOCK_WRUNLOCK(STK_TABLE_LOCK, &ctx->t->lock);
+ break;
+ }
+ HA_RWLOCK_WRUNLOCK(STK_TABLE_LOCK, &ctx->t->lock);
+ }
+ }
+ ctx->t = ctx->t->next;
+ break;
+
+ case STATE_DUMP:
+ skip_entry = 0;
+
+ HA_RWLOCK_RDLOCK(STK_SESS_LOCK, &ctx->entry->lock);
+
+ if (ctx->data_type[0] >= 0) {
+ /* we're filtering on some data contents */
+ void *ptr;
+ int dt, i;
+ signed char op;
+ long long data, value;
+
+
+ for (i = 0; i < STKTABLE_FILTER_LEN; i++) {
+ if (ctx->data_type[i] == -1)
+ break;
+ dt = ctx->data_type[i];
+ ptr = stktable_data_ptr(ctx->t,
+ ctx->entry,
+ dt);
+
+ data = 0;
+ switch (stktable_data_types[dt].std_type) {
+ case STD_T_SINT:
+ data = stktable_data_cast(ptr, std_t_sint);
+ break;
+ case STD_T_UINT:
+ data = stktable_data_cast(ptr, std_t_uint);
+ break;
+ case STD_T_ULL:
+ data = stktable_data_cast(ptr, std_t_ull);
+ break;
+ case STD_T_FRQP:
+ data = read_freq_ctr_period(&stktable_data_cast(ptr, std_t_frqp),
+ ctx->t->data_arg[dt].u);
+ break;
+ }
+
+ op = ctx->data_op[i];
+ value = ctx->value[i];
+
+ /* skip the entry if the data does not match the test and the value */
+ if ((data < value &&
+ (op == STD_OP_EQ || op == STD_OP_GT || op == STD_OP_GE)) ||
+ (data == value &&
+ (op == STD_OP_NE || op == STD_OP_GT || op == STD_OP_LT)) ||
+ (data > value &&
+ (op == STD_OP_EQ || op == STD_OP_LT || op == STD_OP_LE))) {
+ skip_entry = 1;
+ break;
+ }
+ }
+ }
+
+ if (show && !skip_entry &&
+ !table_dump_entry_to_buffer(&trash, appctx, ctx->t, ctx->entry)) {
+ HA_RWLOCK_RDUNLOCK(STK_SESS_LOCK, &ctx->entry->lock);
+ return 0;
+ }
+
+ HA_RWLOCK_RDUNLOCK(STK_SESS_LOCK, &ctx->entry->lock);
+
+ HA_RWLOCK_WRLOCK(STK_TABLE_LOCK, &ctx->t->lock);
+ HA_ATOMIC_DEC(&ctx->entry->ref_cnt);
+
+ eb = ebmb_next(&ctx->entry->key);
+ if (eb) {
+ struct stksess *old = ctx->entry;
+ ctx->entry = ebmb_entry(eb, struct stksess, key);
+ if (show)
+ __stksess_kill_if_expired(ctx->t, old);
+ else if (!skip_entry && !ctx->entry->ref_cnt)
+ __stksess_kill(ctx->t, old);
+ HA_ATOMIC_INC(&ctx->entry->ref_cnt);
+ HA_RWLOCK_WRUNLOCK(STK_TABLE_LOCK, &ctx->t->lock);
+ break;
+ }
+
+
+ if (show)
+ __stksess_kill_if_expired(ctx->t, ctx->entry);
+ else if (!skip_entry && !HA_ATOMIC_LOAD(&ctx->entry->ref_cnt))
+ __stksess_kill(ctx->t, ctx->entry);
+
+ HA_RWLOCK_WRUNLOCK(STK_TABLE_LOCK, &ctx->t->lock);
+
+ ctx->t = ctx->t->next;
+ ctx->state = STATE_NEXT;
+ break;
+
+ default:
+ break;
+ }
+ }
+ return 1;
+}
+
+static void cli_release_show_table(struct appctx *appctx)
+{
+ struct show_table_ctx *ctx = appctx->svcctx;
+
+ if (ctx->state == STATE_DUMP) {
+ stksess_kill_if_expired(ctx->t, ctx->entry, 1);
+ }
+}
+
+static int stk_parse_stick_counters(char **args, int section_type, struct proxy *curpx,
+ const struct proxy *defpx, const char *file, int line,
+ char **err)
+{
+ char *error;
+ int counters;
+
+ counters = strtol(args[1], &error, 10);
+ if (*error != 0) {
+ memprintf(err, "%s: '%s' is an invalid number", args[0], args[1]);
+ return -1;
+ }
+
+ if (counters < 0) {
+ memprintf(err, "%s: the number of stick-counters may not be negative (was %d)", args[0], counters);
+ return -1;
+ }
+
+ global.tune.nb_stk_ctr = counters;
+ return 0;
+}
+
+/* This function creates the stk_ctr pools after the configuration parsing. It
+ * returns 0 on success otherwise ERR_*. If nb_stk_ctr is 0, the pool remains
+ * NULL.
+ */
+static int stkt_create_stk_ctr_pool(void)
+{
+ if (!global.tune.nb_stk_ctr)
+ return 0;
+
+ pool_head_stk_ctr = create_pool("stk_ctr", sizeof(*((struct session*)0)->stkctr) * global.tune.nb_stk_ctr, MEM_F_SHARED);
+ if (!pool_head_stk_ctr) {
+ ha_alert("out of memory while creating the stick-counters pool.\n");
+ return ERR_ABORT;
+ }
+ return 0;
+}
+
+static void stkt_late_init(void)
+{
+ struct sample_fetch *f;
+
+ f = find_sample_fetch("src", strlen("src"));
+ if (f)
+ smp_fetch_src = f->process;
+ hap_register_post_check(stkt_create_stk_ctr_pool);
+}
+
+INITCALL0(STG_INIT, stkt_late_init);
+
+/* register cli keywords */
+static struct cli_kw_list cli_kws = {{ },{
+ { { "clear", "table", NULL }, "clear table <table> [<filter>]* : remove an entry from a table (filter: data/key)", cli_parse_table_req, cli_io_handler_table, cli_release_show_table, (void *)STK_CLI_ACT_CLR },
+ { { "set", "table", NULL }, "set table <table> key <k> [data.* <v>]* : update or create a table entry's data", cli_parse_table_req, cli_io_handler_table, NULL, (void *)STK_CLI_ACT_SET },
+ { { "show", "table", NULL }, "show table <table> [<filter>]* : report table usage stats or dump this table's contents (filter: data/key)", cli_parse_table_req, cli_io_handler_table, cli_release_show_table, (void *)STK_CLI_ACT_SHOW },
+ {{},}
+}};
+
+INITCALL1(STG_REGISTER, cli_register_kw, &cli_kws);
+
+static struct action_kw_list tcp_conn_kws = { { }, {
+ { "sc-add-gpc", parse_add_gpc, KWF_MATCH_PREFIX },
+ { "sc-inc-gpc", parse_inc_gpc, KWF_MATCH_PREFIX },
+ { "sc-inc-gpc0", parse_inc_gpc, KWF_MATCH_PREFIX },
+ { "sc-inc-gpc1", parse_inc_gpc, KWF_MATCH_PREFIX },
+ { "sc-set-gpt", parse_set_gpt, KWF_MATCH_PREFIX },
+ { "sc-set-gpt0", parse_set_gpt, KWF_MATCH_PREFIX },
+ { /* END */ }
+}};
+
+INITCALL1(STG_REGISTER, tcp_req_conn_keywords_register, &tcp_conn_kws);
+
+static struct action_kw_list tcp_sess_kws = { { }, {
+ { "sc-add-gpc", parse_add_gpc, KWF_MATCH_PREFIX },
+ { "sc-inc-gpc", parse_inc_gpc, KWF_MATCH_PREFIX },
+ { "sc-inc-gpc0", parse_inc_gpc, KWF_MATCH_PREFIX },
+ { "sc-inc-gpc1", parse_inc_gpc, KWF_MATCH_PREFIX },
+ { "sc-set-gpt", parse_set_gpt, KWF_MATCH_PREFIX },
+ { "sc-set-gpt0", parse_set_gpt, KWF_MATCH_PREFIX },
+ { /* END */ }
+}};
+
+INITCALL1(STG_REGISTER, tcp_req_sess_keywords_register, &tcp_sess_kws);
+
+static struct action_kw_list tcp_req_kws = { { }, {
+ { "sc-add-gpc", parse_add_gpc, KWF_MATCH_PREFIX },
+ { "sc-inc-gpc", parse_inc_gpc, KWF_MATCH_PREFIX },
+ { "sc-inc-gpc0", parse_inc_gpc, KWF_MATCH_PREFIX },
+ { "sc-inc-gpc1", parse_inc_gpc, KWF_MATCH_PREFIX },
+ { "sc-set-gpt", parse_set_gpt, KWF_MATCH_PREFIX },
+ { "sc-set-gpt0", parse_set_gpt, KWF_MATCH_PREFIX },
+ { /* END */ }
+}};
+
+INITCALL1(STG_REGISTER, tcp_req_cont_keywords_register, &tcp_req_kws);
+
+static struct action_kw_list tcp_res_kws = { { }, {
+ { "sc-add-gpc", parse_add_gpc, KWF_MATCH_PREFIX },
+ { "sc-inc-gpc", parse_inc_gpc, KWF_MATCH_PREFIX },
+ { "sc-inc-gpc0", parse_inc_gpc, KWF_MATCH_PREFIX },
+ { "sc-inc-gpc1", parse_inc_gpc, KWF_MATCH_PREFIX },
+ { "sc-set-gpt", parse_set_gpt, KWF_MATCH_PREFIX },
+ { "sc-set-gpt0", parse_set_gpt, KWF_MATCH_PREFIX },
+ { /* END */ }
+}};
+
+INITCALL1(STG_REGISTER, tcp_res_cont_keywords_register, &tcp_res_kws);
+
+static struct action_kw_list http_req_kws = { { }, {
+ { "sc-add-gpc", parse_add_gpc, KWF_MATCH_PREFIX },
+ { "sc-inc-gpc", parse_inc_gpc, KWF_MATCH_PREFIX },
+ { "sc-inc-gpc0", parse_inc_gpc, KWF_MATCH_PREFIX },
+ { "sc-inc-gpc1", parse_inc_gpc, KWF_MATCH_PREFIX },
+ { "sc-set-gpt", parse_set_gpt, KWF_MATCH_PREFIX },
+ { "sc-set-gpt0", parse_set_gpt, KWF_MATCH_PREFIX },
+ { /* END */ }
+}};
+
+INITCALL1(STG_REGISTER, http_req_keywords_register, &http_req_kws);
+
+static struct action_kw_list http_res_kws = { { }, {
+ { "sc-add-gpc", parse_add_gpc, KWF_MATCH_PREFIX },
+ { "sc-inc-gpc", parse_inc_gpc, KWF_MATCH_PREFIX },
+ { "sc-inc-gpc0", parse_inc_gpc, KWF_MATCH_PREFIX },
+ { "sc-inc-gpc1", parse_inc_gpc, KWF_MATCH_PREFIX },
+ { "sc-set-gpt", parse_set_gpt, KWF_MATCH_PREFIX },
+ { "sc-set-gpt0", parse_set_gpt, KWF_MATCH_PREFIX },
+ { /* END */ }
+}};
+
+INITCALL1(STG_REGISTER, http_res_keywords_register, &http_res_kws);
+
+static struct action_kw_list http_after_res_kws = { { }, {
+ { "sc-add-gpc", parse_add_gpc, KWF_MATCH_PREFIX },
+ { "sc-inc-gpc", parse_inc_gpc, KWF_MATCH_PREFIX },
+ { "sc-inc-gpc0", parse_inc_gpc, KWF_MATCH_PREFIX },
+ { "sc-inc-gpc1", parse_inc_gpc, KWF_MATCH_PREFIX },
+ { "sc-set-gpt", parse_set_gpt, KWF_MATCH_PREFIX },
+ { "sc-set-gpt0", parse_set_gpt, KWF_MATCH_PREFIX },
+ { /* END */ }
+}};
+
+INITCALL1(STG_REGISTER, http_after_res_keywords_register, &http_after_res_kws);
+
+/* Note: must not be declared <const> as its list will be overwritten.
+ * Please take care of keeping this list alphabetically sorted.
+ */
+static struct sample_fetch_kw_list smp_fetch_keywords = {ILH, {
+ { "sc_bytes_in_rate", smp_fetch_sc_bytes_in_rate, ARG2(1,SINT,TAB), NULL, SMP_T_SINT, SMP_USE_INTRN, },
+ { "sc_bytes_out_rate", smp_fetch_sc_bytes_out_rate, ARG2(1,SINT,TAB), NULL, SMP_T_SINT, SMP_USE_INTRN, },
+ { "sc_clr_gpc", smp_fetch_sc_clr_gpc, ARG3(2,SINT,SINT,TAB), NULL, SMP_T_SINT, SMP_USE_INTRN, },
+ { "sc_clr_gpc0", smp_fetch_sc_clr_gpc0, ARG2(1,SINT,TAB), NULL, SMP_T_SINT, SMP_USE_INTRN, },
+ { "sc_clr_gpc1", smp_fetch_sc_clr_gpc1, ARG2(1,SINT,TAB), NULL, SMP_T_SINT, SMP_USE_INTRN },
+ { "sc_conn_cnt", smp_fetch_sc_conn_cnt, ARG2(1,SINT,TAB), NULL, SMP_T_SINT, SMP_USE_INTRN, },
+ { "sc_conn_cur", smp_fetch_sc_conn_cur, ARG2(1,SINT,TAB), NULL, SMP_T_SINT, SMP_USE_INTRN, },
+ { "sc_conn_rate", smp_fetch_sc_conn_rate, ARG2(1,SINT,TAB), NULL, SMP_T_SINT, SMP_USE_INTRN, },
+ { "sc_get_gpt", smp_fetch_sc_get_gpt, ARG3(2,SINT,SINT,TAB), NULL, SMP_T_SINT, SMP_USE_INTRN, },
+ { "sc_get_gpt0", smp_fetch_sc_get_gpt0, ARG2(1,SINT,TAB), NULL, SMP_T_SINT, SMP_USE_INTRN, },
+ { "sc_get_gpc", smp_fetch_sc_get_gpc, ARG3(2,SINT,SINT,TAB), NULL, SMP_T_SINT, SMP_USE_INTRN, },
+ { "sc_get_gpc0", smp_fetch_sc_get_gpc0, ARG2(1,SINT,TAB), NULL, SMP_T_SINT, SMP_USE_INTRN, },
+ { "sc_get_gpc1", smp_fetch_sc_get_gpc1, ARG2(1,SINT,TAB), NULL, SMP_T_SINT, SMP_USE_INTRN },
+ { "sc_gpc_rate", smp_fetch_sc_gpc_rate, ARG3(2,SINT,SINT,TAB), NULL, SMP_T_SINT, SMP_USE_INTRN, },
+ { "sc_gpc0_rate", smp_fetch_sc_gpc0_rate, ARG2(1,SINT,TAB), NULL, SMP_T_SINT, SMP_USE_INTRN, },
+ { "sc_gpc1_rate", smp_fetch_sc_gpc1_rate, ARG2(1,SINT,TAB), NULL, SMP_T_SINT, SMP_USE_INTRN, },
+ { "sc_http_err_cnt", smp_fetch_sc_http_err_cnt, ARG2(1,SINT,TAB), NULL, SMP_T_SINT, SMP_USE_INTRN, },
+ { "sc_http_err_rate", smp_fetch_sc_http_err_rate, ARG2(1,SINT,TAB), NULL, SMP_T_SINT, SMP_USE_INTRN, },
+ { "sc_http_fail_cnt", smp_fetch_sc_http_fail_cnt, ARG2(1,SINT,TAB), NULL, SMP_T_SINT, SMP_USE_INTRN, },
+ { "sc_http_fail_rate", smp_fetch_sc_http_fail_rate, ARG2(1,SINT,TAB), NULL, SMP_T_SINT, SMP_USE_INTRN, },
+ { "sc_http_req_cnt", smp_fetch_sc_http_req_cnt, ARG2(1,SINT,TAB), NULL, SMP_T_SINT, SMP_USE_INTRN, },
+ { "sc_http_req_rate", smp_fetch_sc_http_req_rate, ARG2(1,SINT,TAB), NULL, SMP_T_SINT, SMP_USE_INTRN, },
+ { "sc_inc_gpc", smp_fetch_sc_inc_gpc, ARG3(2,SINT,SINT,TAB), NULL, SMP_T_SINT, SMP_USE_INTRN, },
+ { "sc_inc_gpc0", smp_fetch_sc_inc_gpc0, ARG2(1,SINT,TAB), NULL, SMP_T_SINT, SMP_USE_INTRN, },
+ { "sc_inc_gpc1", smp_fetch_sc_inc_gpc1, ARG2(1,SINT,TAB), NULL, SMP_T_SINT, SMP_USE_INTRN, },
+ { "sc_kbytes_in", smp_fetch_sc_kbytes_in, ARG2(1,SINT,TAB), NULL, SMP_T_SINT, SMP_USE_L4CLI, },
+ { "sc_kbytes_out", smp_fetch_sc_kbytes_out, ARG2(1,SINT,TAB), NULL, SMP_T_SINT, SMP_USE_L4CLI, },
+ { "sc_sess_cnt", smp_fetch_sc_sess_cnt, ARG2(1,SINT,TAB), NULL, SMP_T_SINT, SMP_USE_INTRN, },
+ { "sc_sess_rate", smp_fetch_sc_sess_rate, ARG2(1,SINT,TAB), NULL, SMP_T_SINT, SMP_USE_INTRN, },
+ { "sc_tracked", smp_fetch_sc_tracked, ARG2(1,SINT,TAB), NULL, SMP_T_BOOL, SMP_USE_INTRN, },
+ { "sc_trackers", smp_fetch_sc_trackers, ARG2(1,SINT,TAB), NULL, SMP_T_SINT, SMP_USE_INTRN, },
+ { "sc0_bytes_in_rate", smp_fetch_sc_bytes_in_rate, ARG1(0,TAB), NULL, SMP_T_SINT, SMP_USE_INTRN, },
+ { "sc0_bytes_out_rate", smp_fetch_sc_bytes_out_rate, ARG1(0,TAB), NULL, SMP_T_SINT, SMP_USE_INTRN, },
+ { "sc0_clr_gpc0", smp_fetch_sc_clr_gpc0, ARG1(0,TAB), NULL, SMP_T_SINT, SMP_USE_INTRN, },
+ { "sc0_clr_gpc1", smp_fetch_sc_clr_gpc1, ARG1(0,TAB), NULL, SMP_T_SINT, SMP_USE_INTRN, },
+ { "sc0_conn_cnt", smp_fetch_sc_conn_cnt, ARG1(0,TAB), NULL, SMP_T_SINT, SMP_USE_INTRN, },
+ { "sc0_conn_cur", smp_fetch_sc_conn_cur, ARG1(0,TAB), NULL, SMP_T_SINT, SMP_USE_INTRN, },
+ { "sc0_conn_rate", smp_fetch_sc_conn_rate, ARG1(0,TAB), NULL, SMP_T_SINT, SMP_USE_INTRN, },
+ { "sc0_get_gpt0", smp_fetch_sc_get_gpt0, ARG1(0,TAB), NULL, SMP_T_SINT, SMP_USE_INTRN, },
+ { "sc0_get_gpc0", smp_fetch_sc_get_gpc0, ARG1(0,TAB), NULL, SMP_T_SINT, SMP_USE_INTRN, },
+ { "sc0_get_gpc1", smp_fetch_sc_get_gpc1, ARG1(0,TAB), NULL, SMP_T_SINT, SMP_USE_INTRN, },
+ { "sc0_gpc0_rate", smp_fetch_sc_gpc0_rate, ARG1(0,TAB), NULL, SMP_T_SINT, SMP_USE_INTRN, },
+ { "sc0_gpc1_rate", smp_fetch_sc_gpc1_rate, ARG1(0,TAB), NULL, SMP_T_SINT, SMP_USE_INTRN, },
+ { "sc0_http_err_cnt", smp_fetch_sc_http_err_cnt, ARG1(0,TAB), NULL, SMP_T_SINT, SMP_USE_INTRN, },
+ { "sc0_http_err_rate", smp_fetch_sc_http_err_rate, ARG1(0,TAB), NULL, SMP_T_SINT, SMP_USE_INTRN, },
+ { "sc0_http_fail_cnt", smp_fetch_sc_http_fail_cnt, ARG1(0,TAB), NULL, SMP_T_SINT, SMP_USE_INTRN, },
+ { "sc0_http_fail_rate", smp_fetch_sc_http_fail_rate, ARG1(0,TAB), NULL, SMP_T_SINT, SMP_USE_INTRN, },
+ { "sc0_http_req_cnt", smp_fetch_sc_http_req_cnt, ARG1(0,TAB), NULL, SMP_T_SINT, SMP_USE_INTRN, },
+ { "sc0_http_req_rate", smp_fetch_sc_http_req_rate, ARG1(0,TAB), NULL, SMP_T_SINT, SMP_USE_INTRN, },
+ { "sc0_inc_gpc0", smp_fetch_sc_inc_gpc0, ARG1(0,TAB), NULL, SMP_T_SINT, SMP_USE_INTRN, },
+ { "sc0_inc_gpc1", smp_fetch_sc_inc_gpc1, ARG1(0,TAB), NULL, SMP_T_SINT, SMP_USE_INTRN, },
+ { "sc0_kbytes_in", smp_fetch_sc_kbytes_in, ARG1(0,TAB), NULL, SMP_T_SINT, SMP_USE_L4CLI, },
+ { "sc0_kbytes_out", smp_fetch_sc_kbytes_out, ARG1(0,TAB), NULL, SMP_T_SINT, SMP_USE_L4CLI, },
+ { "sc0_sess_cnt", smp_fetch_sc_sess_cnt, ARG1(0,TAB), NULL, SMP_T_SINT, SMP_USE_INTRN, },
+ { "sc0_sess_rate", smp_fetch_sc_sess_rate, ARG1(0,TAB), NULL, SMP_T_SINT, SMP_USE_INTRN, },
+ { "sc0_tracked", smp_fetch_sc_tracked, ARG1(0,TAB), NULL, SMP_T_BOOL, SMP_USE_INTRN, },
+ { "sc0_trackers", smp_fetch_sc_trackers, ARG1(0,TAB), NULL, SMP_T_SINT, SMP_USE_INTRN, },
+ { "sc1_bytes_in_rate", smp_fetch_sc_bytes_in_rate, ARG1(0,TAB), NULL, SMP_T_SINT, SMP_USE_INTRN, },
+ { "sc1_bytes_out_rate", smp_fetch_sc_bytes_out_rate, ARG1(0,TAB), NULL, SMP_T_SINT, SMP_USE_INTRN, },
+ { "sc1_clr_gpc", smp_fetch_sc_clr_gpc, ARG2(1,SINT,TAB), NULL, SMP_T_SINT, SMP_USE_INTRN, },
+ { "sc1_clr_gpc0", smp_fetch_sc_clr_gpc0, ARG1(0,TAB), NULL, SMP_T_SINT, SMP_USE_INTRN, },
+ { "sc1_clr_gpc1", smp_fetch_sc_clr_gpc1, ARG1(0,TAB), NULL, SMP_T_SINT, SMP_USE_INTRN, },
+ { "sc1_conn_cnt", smp_fetch_sc_conn_cnt, ARG1(0,TAB), NULL, SMP_T_SINT, SMP_USE_INTRN, },
+ { "sc1_conn_cur", smp_fetch_sc_conn_cur, ARG1(0,TAB), NULL, SMP_T_SINT, SMP_USE_INTRN, },
+ { "sc1_conn_rate", smp_fetch_sc_conn_rate, ARG1(0,TAB), NULL, SMP_T_SINT, SMP_USE_INTRN, },
+ { "sc1_get_gpt0", smp_fetch_sc_get_gpt0, ARG1(0,TAB), NULL, SMP_T_SINT, SMP_USE_INTRN, },
+ { "sc1_get_gpc0", smp_fetch_sc_get_gpc0, ARG1(0,TAB), NULL, SMP_T_SINT, SMP_USE_INTRN, },
+ { "sc1_get_gpc1", smp_fetch_sc_get_gpc1, ARG1(0,TAB), NULL, SMP_T_SINT, SMP_USE_INTRN, },
+ { "sc1_gpc0_rate", smp_fetch_sc_gpc0_rate, ARG1(0,TAB), NULL, SMP_T_SINT, SMP_USE_INTRN, },
+ { "sc1_gpc1_rate", smp_fetch_sc_gpc1_rate, ARG1(0,TAB), NULL, SMP_T_SINT, SMP_USE_INTRN, },
+ { "sc1_http_err_cnt", smp_fetch_sc_http_err_cnt, ARG1(0,TAB), NULL, SMP_T_SINT, SMP_USE_INTRN, },
+ { "sc1_http_err_rate", smp_fetch_sc_http_err_rate, ARG1(0,TAB), NULL, SMP_T_SINT, SMP_USE_INTRN, },
+ { "sc1_http_fail_cnt", smp_fetch_sc_http_fail_cnt, ARG1(0,TAB), NULL, SMP_T_SINT, SMP_USE_INTRN, },
+ { "sc1_http_fail_rate", smp_fetch_sc_http_fail_rate, ARG1(0,TAB), NULL, SMP_T_SINT, SMP_USE_INTRN, },
+ { "sc1_http_req_cnt", smp_fetch_sc_http_req_cnt, ARG1(0,TAB), NULL, SMP_T_SINT, SMP_USE_INTRN, },
+ { "sc1_http_req_rate", smp_fetch_sc_http_req_rate, ARG1(0,TAB), NULL, SMP_T_SINT, SMP_USE_INTRN, },
+ { "sc1_inc_gpc0", smp_fetch_sc_inc_gpc0, ARG1(0,TAB), NULL, SMP_T_SINT, SMP_USE_INTRN, },
+ { "sc1_inc_gpc1", smp_fetch_sc_inc_gpc1, ARG1(0,TAB), NULL, SMP_T_SINT, SMP_USE_INTRN, },
+ { "sc1_kbytes_in", smp_fetch_sc_kbytes_in, ARG1(0,TAB), NULL, SMP_T_SINT, SMP_USE_L4CLI, },
+ { "sc1_kbytes_out", smp_fetch_sc_kbytes_out, ARG1(0,TAB), NULL, SMP_T_SINT, SMP_USE_L4CLI, },
+ { "sc1_sess_cnt", smp_fetch_sc_sess_cnt, ARG1(0,TAB), NULL, SMP_T_SINT, SMP_USE_INTRN, },
+ { "sc1_sess_rate", smp_fetch_sc_sess_rate, ARG1(0,TAB), NULL, SMP_T_SINT, SMP_USE_INTRN, },
+ { "sc1_tracked", smp_fetch_sc_tracked, ARG1(0,TAB), NULL, SMP_T_BOOL, SMP_USE_INTRN, },
+ { "sc1_trackers", smp_fetch_sc_trackers, ARG1(0,TAB), NULL, SMP_T_SINT, SMP_USE_INTRN, },
+ { "sc2_bytes_in_rate", smp_fetch_sc_bytes_in_rate, ARG1(0,TAB), NULL, SMP_T_SINT, SMP_USE_INTRN, },
+ { "sc2_bytes_out_rate", smp_fetch_sc_bytes_out_rate, ARG1(0,TAB), NULL, SMP_T_SINT, SMP_USE_INTRN, },
+ { "sc2_clr_gpc0", smp_fetch_sc_clr_gpc0, ARG1(0,TAB), NULL, SMP_T_SINT, SMP_USE_INTRN, },
+ { "sc2_clr_gpc1", smp_fetch_sc_clr_gpc1, ARG1(0,TAB), NULL, SMP_T_SINT, SMP_USE_INTRN, },
+ { "sc2_conn_cnt", smp_fetch_sc_conn_cnt, ARG1(0,TAB), NULL, SMP_T_SINT, SMP_USE_INTRN, },
+ { "sc2_conn_cur", smp_fetch_sc_conn_cur, ARG1(0,TAB), NULL, SMP_T_SINT, SMP_USE_INTRN, },
+ { "sc2_conn_rate", smp_fetch_sc_conn_rate, ARG1(0,TAB), NULL, SMP_T_SINT, SMP_USE_INTRN, },
+ { "sc2_get_gpt0", smp_fetch_sc_get_gpt0, ARG1(0,TAB), NULL, SMP_T_SINT, SMP_USE_INTRN, },
+ { "sc2_get_gpc0", smp_fetch_sc_get_gpc0, ARG1(0,TAB), NULL, SMP_T_SINT, SMP_USE_INTRN, },
+ { "sc2_get_gpc1", smp_fetch_sc_get_gpc1, ARG1(0,TAB), NULL, SMP_T_SINT, SMP_USE_INTRN, },
+ { "sc2_gpc0_rate", smp_fetch_sc_gpc0_rate, ARG1(0,TAB), NULL, SMP_T_SINT, SMP_USE_INTRN, },
+ { "sc2_gpc1_rate", smp_fetch_sc_gpc1_rate, ARG1(0,TAB), NULL, SMP_T_SINT, SMP_USE_INTRN, },
+ { "sc2_http_err_cnt", smp_fetch_sc_http_err_cnt, ARG1(0,TAB), NULL, SMP_T_SINT, SMP_USE_INTRN, },
+ { "sc2_http_err_rate", smp_fetch_sc_http_err_rate, ARG1(0,TAB), NULL, SMP_T_SINT, SMP_USE_INTRN, },
+ { "sc2_http_fail_cnt", smp_fetch_sc_http_fail_cnt, ARG1(0,TAB), NULL, SMP_T_SINT, SMP_USE_INTRN, },
+ { "sc2_http_fail_rate", smp_fetch_sc_http_fail_rate, ARG1(0,TAB), NULL, SMP_T_SINT, SMP_USE_INTRN, },
+ { "sc2_http_req_cnt", smp_fetch_sc_http_req_cnt, ARG1(0,TAB), NULL, SMP_T_SINT, SMP_USE_INTRN, },
+ { "sc2_http_req_rate", smp_fetch_sc_http_req_rate, ARG1(0,TAB), NULL, SMP_T_SINT, SMP_USE_INTRN, },
+ { "sc2_inc_gpc0", smp_fetch_sc_inc_gpc0, ARG1(0,TAB), NULL, SMP_T_SINT, SMP_USE_INTRN, },
+ { "sc2_inc_gpc1", smp_fetch_sc_inc_gpc1, ARG1(0,TAB), NULL, SMP_T_SINT, SMP_USE_INTRN, },
+ { "sc2_kbytes_in", smp_fetch_sc_kbytes_in, ARG1(0,TAB), NULL, SMP_T_SINT, SMP_USE_L4CLI, },
+ { "sc2_kbytes_out", smp_fetch_sc_kbytes_out, ARG1(0,TAB), NULL, SMP_T_SINT, SMP_USE_L4CLI, },
+ { "sc2_sess_cnt", smp_fetch_sc_sess_cnt, ARG1(0,TAB), NULL, SMP_T_SINT, SMP_USE_INTRN, },
+ { "sc2_sess_rate", smp_fetch_sc_sess_rate, ARG1(0,TAB), NULL, SMP_T_SINT, SMP_USE_INTRN, },
+ { "sc2_tracked", smp_fetch_sc_tracked, ARG1(0,TAB), NULL, SMP_T_BOOL, SMP_USE_INTRN, },
+ { "sc2_trackers", smp_fetch_sc_trackers, ARG1(0,TAB), NULL, SMP_T_SINT, SMP_USE_INTRN, },
+ { "src_bytes_in_rate", smp_fetch_sc_bytes_in_rate, ARG1(1,TAB), NULL, SMP_T_SINT, SMP_USE_L4CLI, },
+ { "src_bytes_out_rate", smp_fetch_sc_bytes_out_rate, ARG1(1,TAB), NULL, SMP_T_SINT, SMP_USE_L4CLI, },
+ { "src_clr_gpc", smp_fetch_sc_clr_gpc, ARG2(2,SINT,TAB), NULL, SMP_T_SINT, SMP_USE_L4CLI, },
+ { "src_clr_gpc0", smp_fetch_sc_clr_gpc0, ARG1(1,TAB), NULL, SMP_T_SINT, SMP_USE_L4CLI, },
+ { "src_clr_gpc1", smp_fetch_sc_clr_gpc1, ARG1(1,TAB), NULL, SMP_T_SINT, SMP_USE_L4CLI, },
+ { "src_conn_cnt", smp_fetch_sc_conn_cnt, ARG1(1,TAB), NULL, SMP_T_SINT, SMP_USE_L4CLI, },
+ { "src_conn_cur", smp_fetch_sc_conn_cur, ARG1(1,TAB), NULL, SMP_T_SINT, SMP_USE_L4CLI, },
+ { "src_conn_rate", smp_fetch_sc_conn_rate, ARG1(1,TAB), NULL, SMP_T_SINT, SMP_USE_L4CLI, },
+ { "src_get_gpt" , smp_fetch_sc_get_gpt, ARG2(2,SINT,TAB), NULL, SMP_T_SINT, SMP_USE_L4CLI, },
+ { "src_get_gpt0", smp_fetch_sc_get_gpt0, ARG1(1,TAB), NULL, SMP_T_SINT, SMP_USE_L4CLI, },
+ { "src_get_gpc", smp_fetch_sc_get_gpc, ARG2(2,SINT,TAB), NULL, SMP_T_SINT, SMP_USE_L4CLI, },
+ { "src_get_gpc0", smp_fetch_sc_get_gpc0, ARG1(1,TAB), NULL, SMP_T_SINT, SMP_USE_L4CLI, },
+ { "src_get_gpc1", smp_fetch_sc_get_gpc1, ARG1(1,TAB), NULL, SMP_T_SINT, SMP_USE_L4CLI, },
+ { "src_gpc_rate", smp_fetch_sc_gpc_rate, ARG2(2,SINT,TAB), NULL, SMP_T_SINT, SMP_USE_L4CLI, },
+ { "src_gpc0_rate", smp_fetch_sc_gpc0_rate, ARG1(1,TAB), NULL, SMP_T_SINT, SMP_USE_L4CLI, },
+ { "src_gpc1_rate", smp_fetch_sc_gpc1_rate, ARG1(1,TAB), NULL, SMP_T_SINT, SMP_USE_L4CLI, },
+ { "src_http_err_cnt", smp_fetch_sc_http_err_cnt, ARG1(1,TAB), NULL, SMP_T_SINT, SMP_USE_L4CLI, },
+ { "src_http_err_rate", smp_fetch_sc_http_err_rate, ARG1(1,TAB), NULL, SMP_T_SINT, SMP_USE_L4CLI, },
+ { "src_http_fail_cnt", smp_fetch_sc_http_fail_cnt, ARG1(1,TAB), NULL, SMP_T_SINT, SMP_USE_L4CLI, },
+ { "src_http_fail_rate", smp_fetch_sc_http_fail_rate, ARG1(1,TAB), NULL, SMP_T_SINT, SMP_USE_L4CLI, },
+ { "src_http_req_cnt", smp_fetch_sc_http_req_cnt, ARG1(1,TAB), NULL, SMP_T_SINT, SMP_USE_L4CLI, },
+ { "src_http_req_rate", smp_fetch_sc_http_req_rate, ARG1(1,TAB), NULL, SMP_T_SINT, SMP_USE_L4CLI, },
+ { "src_inc_gpc", smp_fetch_sc_inc_gpc, ARG2(2,SINT,TAB), NULL, SMP_T_SINT, SMP_USE_L4CLI, },
+ { "src_inc_gpc0", smp_fetch_sc_inc_gpc0, ARG1(1,TAB), NULL, SMP_T_SINT, SMP_USE_L4CLI, },
+ { "src_inc_gpc1", smp_fetch_sc_inc_gpc1, ARG1(1,TAB), NULL, SMP_T_SINT, SMP_USE_L4CLI, },
+ { "src_kbytes_in", smp_fetch_sc_kbytes_in, ARG1(1,TAB), NULL, SMP_T_SINT, SMP_USE_L4CLI, },
+ { "src_kbytes_out", smp_fetch_sc_kbytes_out, ARG1(1,TAB), NULL, SMP_T_SINT, SMP_USE_L4CLI, },
+ { "src_sess_cnt", smp_fetch_sc_sess_cnt, ARG1(1,TAB), NULL, SMP_T_SINT, SMP_USE_L4CLI, },
+ { "src_sess_rate", smp_fetch_sc_sess_rate, ARG1(1,TAB), NULL, SMP_T_SINT, SMP_USE_L4CLI, },
+ { "src_updt_conn_cnt", smp_fetch_src_updt_conn_cnt, ARG1(1,TAB), NULL, SMP_T_SINT, SMP_USE_L4CLI, },
+ { "table_avl", smp_fetch_table_avl, ARG1(1,TAB), NULL, SMP_T_SINT, SMP_USE_INTRN, },
+ { "table_cnt", smp_fetch_table_cnt, ARG1(1,TAB), NULL, SMP_T_SINT, SMP_USE_INTRN, },
+ { /* END */ },
+}};
+
+INITCALL1(STG_REGISTER, sample_register_fetches, &smp_fetch_keywords);
+
+/* Note: must not be declared <const> as its list will be overwritten */
+static struct sample_conv_kw_list sample_conv_kws = {ILH, {
+ { "in_table", sample_conv_in_table, ARG1(1,TAB), NULL, SMP_T_ANY, SMP_T_BOOL },
+ { "table_bytes_in_rate", sample_conv_table_bytes_in_rate, ARG1(1,TAB), NULL, SMP_T_ANY, SMP_T_SINT },
+ { "table_bytes_out_rate", sample_conv_table_bytes_out_rate, ARG1(1,TAB), NULL, SMP_T_ANY, SMP_T_SINT },
+ { "table_conn_cnt", sample_conv_table_conn_cnt, ARG1(1,TAB), NULL, SMP_T_ANY, SMP_T_SINT },
+ { "table_conn_cur", sample_conv_table_conn_cur, ARG1(1,TAB), NULL, SMP_T_ANY, SMP_T_SINT },
+ { "table_conn_rate", sample_conv_table_conn_rate, ARG1(1,TAB), NULL, SMP_T_ANY, SMP_T_SINT },
+ { "table_expire", sample_conv_table_expire, ARG2(1,TAB,SINT), NULL, SMP_T_ANY, SMP_T_SINT },
+ { "table_gpt", sample_conv_table_gpt, ARG2(2,SINT,TAB), NULL, SMP_T_ANY, SMP_T_SINT },
+ { "table_gpt0", sample_conv_table_gpt0, ARG1(1,TAB), NULL, SMP_T_ANY, SMP_T_SINT },
+ { "table_gpc", sample_conv_table_gpc, ARG2(2,SINT,TAB), NULL, SMP_T_ANY, SMP_T_SINT },
+ { "table_gpc0", sample_conv_table_gpc0, ARG1(1,TAB), NULL, SMP_T_ANY, SMP_T_SINT },
+ { "table_gpc1", sample_conv_table_gpc1, ARG1(1,TAB), NULL, SMP_T_ANY, SMP_T_SINT },
+ { "table_gpc_rate", sample_conv_table_gpc_rate, ARG2(2,SINT,TAB), NULL, SMP_T_ANY, SMP_T_SINT },
+ { "table_gpc0_rate", sample_conv_table_gpc0_rate, ARG1(1,TAB), NULL, SMP_T_ANY, SMP_T_SINT },
+ { "table_gpc1_rate", sample_conv_table_gpc1_rate, ARG1(1,TAB), NULL, SMP_T_ANY, SMP_T_SINT },
+ { "table_http_err_cnt", sample_conv_table_http_err_cnt, ARG1(1,TAB), NULL, SMP_T_ANY, SMP_T_SINT },
+ { "table_http_err_rate", sample_conv_table_http_err_rate, ARG1(1,TAB), NULL, SMP_T_ANY, SMP_T_SINT },
+ { "table_http_fail_cnt", sample_conv_table_http_fail_cnt, ARG1(1,TAB), NULL, SMP_T_ANY, SMP_T_SINT },
+ { "table_http_fail_rate", sample_conv_table_http_fail_rate, ARG1(1,TAB), NULL, SMP_T_ANY, SMP_T_SINT },
+ { "table_http_req_cnt", sample_conv_table_http_req_cnt, ARG1(1,TAB), NULL, SMP_T_ANY, SMP_T_SINT },
+ { "table_http_req_rate", sample_conv_table_http_req_rate, ARG1(1,TAB), NULL, SMP_T_ANY, SMP_T_SINT },
+ { "table_idle", sample_conv_table_idle, ARG2(1,TAB,SINT), NULL, SMP_T_ANY, SMP_T_SINT },
+ { "table_kbytes_in", sample_conv_table_kbytes_in, ARG1(1,TAB), NULL, SMP_T_ANY, SMP_T_SINT },
+ { "table_kbytes_out", sample_conv_table_kbytes_out, ARG1(1,TAB), NULL, SMP_T_ANY, SMP_T_SINT },
+ { "table_server_id", sample_conv_table_server_id, ARG1(1,TAB), NULL, SMP_T_ANY, SMP_T_SINT },
+ { "table_sess_cnt", sample_conv_table_sess_cnt, ARG1(1,TAB), NULL, SMP_T_ANY, SMP_T_SINT },
+ { "table_sess_rate", sample_conv_table_sess_rate, ARG1(1,TAB), NULL, SMP_T_ANY, SMP_T_SINT },
+ { "table_trackers", sample_conv_table_trackers, ARG1(1,TAB), NULL, SMP_T_ANY, SMP_T_SINT },
+ { /* END */ },
+}};
+
+INITCALL1(STG_REGISTER, sample_register_convs, &sample_conv_kws);
+
+static struct cfg_kw_list cfg_kws = {{ },{
+ { CFG_GLOBAL, "tune.stick-counters", stk_parse_stick_counters },
+ { /* END */ }
+}};
+
+INITCALL1(STG_REGISTER, cfg_register_keywords, &cfg_kws);
diff --git a/src/stream.c b/src/stream.c
new file mode 100644
index 0000000..a3c0c93
--- /dev/null
+++ b/src/stream.c
@@ -0,0 +1,4045 @@
+/*
+ * Stream management functions.
+ *
+ * Copyright 2000-2012 Willy Tarreau <w@1wt.eu>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <stdlib.h>
+#include <unistd.h>
+
+#include <import/ebistree.h>
+
+#include <haproxy/acl.h>
+#include <haproxy/action.h>
+#include <haproxy/activity.h>
+#include <haproxy/api.h>
+#include <haproxy/applet.h>
+#include <haproxy/arg.h>
+#include <haproxy/backend.h>
+#include <haproxy/capture.h>
+#include <haproxy/cfgparse.h>
+#include <haproxy/channel.h>
+#include <haproxy/check.h>
+#include <haproxy/cli.h>
+#include <haproxy/connection.h>
+#include <haproxy/dict.h>
+#include <haproxy/dynbuf.h>
+#include <haproxy/fd.h>
+#include <haproxy/filters.h>
+#include <haproxy/freq_ctr.h>
+#include <haproxy/frontend.h>
+#include <haproxy/global.h>
+#include <haproxy/hlua.h>
+#include <haproxy/http_ana.h>
+#include <haproxy/http_rules.h>
+#include <haproxy/htx.h>
+#include <haproxy/istbuf.h>
+#include <haproxy/log.h>
+#include <haproxy/pipe.h>
+#include <haproxy/pool.h>
+#include <haproxy/proxy.h>
+#include <haproxy/queue.h>
+#include <haproxy/sc_strm.h>
+#include <haproxy/server.h>
+#include <haproxy/resolvers.h>
+#include <haproxy/sample.h>
+#include <haproxy/session.h>
+#include <haproxy/stats-t.h>
+#include <haproxy/stconn.h>
+#include <haproxy/stick_table.h>
+#include <haproxy/stream.h>
+#include <haproxy/task.h>
+#include <haproxy/tcp_rules.h>
+#include <haproxy/thread.h>
+#include <haproxy/tools.h>
+#include <haproxy/trace.h>
+#include <haproxy/vars.h>
+
+
+DECLARE_POOL(pool_head_stream, "stream", sizeof(struct stream));
+DECLARE_POOL(pool_head_uniqueid, "uniqueid", UNIQUEID_LEN);
+
+/* incremented by each "show sess" to fix a delimiter between streams */
+unsigned stream_epoch = 0;
+
+/* List of all use-service keywords. */
+static struct list service_keywords = LIST_HEAD_INIT(service_keywords);
+
+
+/* trace source and events */
+static void strm_trace(enum trace_level level, uint64_t mask,
+ const struct trace_source *src,
+ const struct ist where, const struct ist func,
+ const void *a1, const void *a2, const void *a3, const void *a4);
+
+/* The event representation is split like this :
+ * strm - stream
+ * sc - stream connector
+ * http - http analyzis
+ * tcp - tcp analyzis
+ *
+ * STRM_EV_* macros are defined in <proto/stream.h>
+ */
+static const struct trace_event strm_trace_events[] = {
+ { .mask = STRM_EV_STRM_NEW, .name = "strm_new", .desc = "new stream" },
+ { .mask = STRM_EV_STRM_FREE, .name = "strm_free", .desc = "release stream" },
+ { .mask = STRM_EV_STRM_ERR, .name = "strm_err", .desc = "error during stream processing" },
+ { .mask = STRM_EV_STRM_ANA, .name = "strm_ana", .desc = "stream analyzers" },
+ { .mask = STRM_EV_STRM_PROC, .name = "strm_proc", .desc = "stream processing" },
+
+ { .mask = STRM_EV_CS_ST, .name = "sc_state", .desc = "processing connector states" },
+
+ { .mask = STRM_EV_HTTP_ANA, .name = "http_ana", .desc = "HTTP analyzers" },
+ { .mask = STRM_EV_HTTP_ERR, .name = "http_err", .desc = "error during HTTP analyzis" },
+
+ { .mask = STRM_EV_TCP_ANA, .name = "tcp_ana", .desc = "TCP analyzers" },
+ { .mask = STRM_EV_TCP_ERR, .name = "tcp_err", .desc = "error during TCP analyzis" },
+
+ { .mask = STRM_EV_FLT_ANA, .name = "flt_ana", .desc = "Filter analyzers" },
+ { .mask = STRM_EV_FLT_ERR, .name = "flt_err", .desc = "error during filter analyzis" },
+ {}
+};
+
+static const struct name_desc strm_trace_lockon_args[4] = {
+ /* arg1 */ { /* already used by the stream */ },
+ /* arg2 */ { },
+ /* arg3 */ { },
+ /* arg4 */ { }
+};
+
+static const struct name_desc strm_trace_decoding[] = {
+#define STRM_VERB_CLEAN 1
+ { .name="clean", .desc="only user-friendly stuff, generally suitable for level \"user\"" },
+#define STRM_VERB_MINIMAL 2
+ { .name="minimal", .desc="report info on streams and connectors" },
+#define STRM_VERB_SIMPLE 3
+ { .name="simple", .desc="add info on request and response channels" },
+#define STRM_VERB_ADVANCED 4
+ { .name="advanced", .desc="add info on channel's buffer for data and developer levels only" },
+#define STRM_VERB_COMPLETE 5
+ { .name="complete", .desc="add info on channel's buffer" },
+ { /* end */ }
+};
+
+struct trace_source trace_strm = {
+ .name = IST("stream"),
+ .desc = "Applicative stream",
+ .arg_def = TRC_ARG1_STRM, // TRACE()'s first argument is always a stream
+ .default_cb = strm_trace,
+ .known_events = strm_trace_events,
+ .lockon_args = strm_trace_lockon_args,
+ .decoding = strm_trace_decoding,
+ .report_events = ~0, // report everything by default
+};
+
+#define TRACE_SOURCE &trace_strm
+INITCALL1(STG_REGISTER, trace_register_source, TRACE_SOURCE);
+
+/* the stream traces always expect that arg1, if non-null, is of a stream (from
+ * which we can derive everything), that arg2, if non-null, is an http
+ * transaction, that arg3, if non-null, is an http message.
+ */
+static void strm_trace(enum trace_level level, uint64_t mask, const struct trace_source *src,
+ const struct ist where, const struct ist func,
+ const void *a1, const void *a2, const void *a3, const void *a4)
+{
+ const struct stream *s = a1;
+ const struct http_txn *txn = a2;
+ const struct http_msg *msg = a3;
+ struct task *task;
+ const struct channel *req, *res;
+ struct htx *htx;
+
+ if (!s || src->verbosity < STRM_VERB_CLEAN)
+ return;
+
+ task = s->task;
+ req = &s->req;
+ res = &s->res;
+ htx = (msg ? htxbuf(&msg->chn->buf) : NULL);
+
+ /* General info about the stream (htx/tcp, id...) */
+ chunk_appendf(&trace_buf, " : [%u,%s]",
+ s->uniq_id, ((s->flags & SF_HTX) ? "HTX" : "TCP"));
+ if (isttest(s->unique_id)) {
+ chunk_appendf(&trace_buf, " id=");
+ b_putist(&trace_buf, s->unique_id);
+ }
+
+ /* Front and back stream connector state */
+ chunk_appendf(&trace_buf, " SC=(%s,%s)",
+ sc_state_str(s->scf->state), sc_state_str(s->scb->state));
+
+ /* If txn is defined, HTTP req/rep states */
+ if (txn)
+ chunk_appendf(&trace_buf, " HTTP=(%s,%s)",
+ h1_msg_state_str(txn->req.msg_state), h1_msg_state_str(txn->rsp.msg_state));
+ if (msg)
+ chunk_appendf(&trace_buf, " %s", ((msg->chn->flags & CF_ISRESP) ? "RESPONSE" : "REQUEST"));
+
+ if (src->verbosity == STRM_VERB_CLEAN)
+ return;
+
+ /* If msg defined, display status-line if possible (verbosity > MINIMAL) */
+ if (src->verbosity > STRM_VERB_MINIMAL && htx && htx_nbblks(htx)) {
+ const struct htx_blk *blk = __htx_get_head_blk(htx);
+ const struct htx_sl *sl = htx_get_blk_ptr(htx, blk);
+ enum htx_blk_type type = htx_get_blk_type(blk);
+
+ if (type == HTX_BLK_REQ_SL || type == HTX_BLK_RES_SL)
+ chunk_appendf(&trace_buf, " - \"%.*s %.*s %.*s\"",
+ HTX_SL_P1_LEN(sl), HTX_SL_P1_PTR(sl),
+ HTX_SL_P2_LEN(sl), HTX_SL_P2_PTR(sl),
+ HTX_SL_P3_LEN(sl), HTX_SL_P3_PTR(sl));
+ }
+
+ chunk_appendf(&trace_buf, " - t=%p t.exp=%d s=(%p,0x%08x,0x%x)",
+ task, tick_isset(task->expire) ? TICKS_TO_MS(task->expire - now_ms) : TICK_ETERNITY, s, s->flags, s->conn_err_type);
+
+ /* If txn defined info about HTTP msgs, otherwise info about SI. */
+ if (txn) {
+ chunk_appendf(&trace_buf, " txn.flags=0x%08x, http.flags=(0x%08x,0x%08x) status=%d",
+ txn->flags, txn->req.flags, txn->rsp.flags, txn->status);
+ }
+ else {
+ chunk_appendf(&trace_buf, " scf=(%p,%d,0x%08x,0x%x) scb=(%p,%d,0x%08x,0x%x) scf.exp(r,w)=(%d,%d) scb.exp(r,w)=(%d,%d) retries=%d",
+ s->scf, s->scf->state, s->scf->flags, s->scf->sedesc->flags,
+ s->scb, s->scb->state, s->scb->flags, s->scb->sedesc->flags,
+ tick_isset(sc_ep_rcv_ex(s->scf)) ? TICKS_TO_MS(sc_ep_rcv_ex(s->scf) - now_ms) : TICK_ETERNITY,
+ tick_isset(sc_ep_snd_ex(s->scf)) ? TICKS_TO_MS(sc_ep_snd_ex(s->scf) - now_ms) : TICK_ETERNITY,
+ tick_isset(sc_ep_rcv_ex(s->scb)) ? TICKS_TO_MS(sc_ep_rcv_ex(s->scb) - now_ms) : TICK_ETERNITY,
+ tick_isset(sc_ep_snd_ex(s->scb)) ? TICKS_TO_MS(sc_ep_snd_ex(s->scb) - now_ms) : TICK_ETERNITY,
+ s->conn_retries);
+ }
+
+ if (src->verbosity == STRM_VERB_MINIMAL)
+ return;
+
+
+ /* If txn defined, don't display all channel info */
+ if (src->verbosity == STRM_VERB_SIMPLE || txn) {
+ chunk_appendf(&trace_buf, " req=(%p .fl=0x%08x .exp=%d)",
+ req, req->flags, tick_isset(req->analyse_exp) ? TICKS_TO_MS(req->analyse_exp - now_ms) : TICK_ETERNITY);
+ chunk_appendf(&trace_buf, " res=(%p .fl=0x%08x .exp=%d)",
+ res, res->flags, tick_isset(res->analyse_exp) ? TICKS_TO_MS(res->analyse_exp - now_ms) : TICK_ETERNITY);
+ }
+ else {
+ chunk_appendf(&trace_buf, " req=(%p .fl=0x%08x .ana=0x%08x .exp=%u .o=%lu .tot=%llu .to_fwd=%u)",
+ req, req->flags, req->analysers, req->analyse_exp,
+ (long)req->output, req->total, req->to_forward);
+ chunk_appendf(&trace_buf, " res=(%p .fl=0x%08x .ana=0x%08x .exp=%u .o=%lu .tot=%llu .to_fwd=%u)",
+ res, res->flags, res->analysers, res->analyse_exp,
+ (long)res->output, res->total, res->to_forward);
+ }
+
+ if (src->verbosity == STRM_VERB_SIMPLE ||
+ (src->verbosity == STRM_VERB_ADVANCED && src->level < TRACE_LEVEL_DATA))
+ return;
+
+ /* channels' buffer info */
+ if (s->flags & SF_HTX) {
+ struct htx *rqhtx = htxbuf(&req->buf);
+ struct htx *rphtx = htxbuf(&res->buf);
+
+ chunk_appendf(&trace_buf, " htx=(%u/%u#%u, %u/%u#%u)",
+ rqhtx->data, rqhtx->size, htx_nbblks(rqhtx),
+ rphtx->data, rphtx->size, htx_nbblks(rphtx));
+ }
+ else {
+ chunk_appendf(&trace_buf, " buf=(%u@%p+%u/%u, %u@%p+%u/%u)",
+ (unsigned int)b_data(&req->buf), b_orig(&req->buf),
+ (unsigned int)b_head_ofs(&req->buf), (unsigned int)b_size(&req->buf),
+ (unsigned int)b_data(&res->buf), b_orig(&res->buf),
+ (unsigned int)b_head_ofs(&res->buf), (unsigned int)b_size(&res->buf));
+ }
+
+ /* If msg defined, display htx info if defined (level > USER) */
+ if (src->level > TRACE_LEVEL_USER && htx && htx_nbblks(htx)) {
+ int full = 0;
+
+ /* Full htx info (level > STATE && verbosity > SIMPLE) */
+ if (src->level > TRACE_LEVEL_STATE) {
+ if (src->verbosity == STRM_VERB_COMPLETE)
+ full = 1;
+ }
+
+ chunk_memcat(&trace_buf, "\n\t", 2);
+ htx_dump(&trace_buf, htx, full);
+ }
+}
+
+/* Upgrade an existing stream for stream connector <sc>. Return < 0 on error. This
+ * is only valid right after a TCP to H1 upgrade. The stream should be
+ * "reativated" by removing SF_IGNORE flag. And the right mode must be set. On
+ * success, <input> buffer is transferred to the stream and thus points to
+ * BUF_NULL. On error, it is unchanged and it is the caller responsibility to
+ * release it (this never happens for now).
+ */
+int stream_upgrade_from_sc(struct stconn *sc, struct buffer *input)
+{
+ struct stream *s = __sc_strm(sc);
+ const struct mux_ops *mux = sc_mux_ops(sc);
+
+ if (mux) {
+ if (mux->flags & MX_FL_HTX)
+ s->flags |= SF_HTX;
+ }
+
+ if (!b_is_null(input)) {
+ /* Xfer the input buffer to the request channel. <input> will
+ * than point to BUF_NULL. From this point, it is the stream
+ * responsibility to release it.
+ */
+ s->req.buf = *input;
+ *input = BUF_NULL;
+ s->req.total = (IS_HTX_STRM(s) ? htxbuf(&s->req.buf)->data : b_data(&s->req.buf));
+ sc_ep_report_read_activity(s->scf);
+ }
+
+ s->req.flags |= CF_READ_EVENT; /* Always report a read event */
+ s->flags &= ~SF_IGNORE;
+
+ task_wakeup(s->task, TASK_WOKEN_INIT);
+ return 0;
+}
+
+/* Callback used to wake up a stream when an input buffer is available. The
+ * stream <s>'s stream connectors are checked for a failed buffer allocation
+ * as indicated by the presence of the SC_FL_NEED_BUFF flag and the lack of a
+ * buffer, and and input buffer is assigned there (at most one). The function
+ * returns 1 and wakes the stream up if a buffer was taken, otherwise zero.
+ * It's designed to be called from __offer_buffer().
+ */
+int stream_buf_available(void *arg)
+{
+ struct stream *s = arg;
+
+ if (!s->req.buf.size && !sc_ep_have_ff_data(s->scb) && s->scf->flags & SC_FL_NEED_BUFF &&
+ b_alloc(&s->req.buf))
+ sc_have_buff(s->scf);
+ else if (!s->res.buf.size && !sc_ep_have_ff_data(s->scf) && s->scb->flags & SC_FL_NEED_BUFF &&
+ b_alloc(&s->res.buf))
+ sc_have_buff(s->scb);
+ else
+ return 0;
+
+ task_wakeup(s->task, TASK_WOKEN_RES);
+ return 1;
+
+}
+
+/* This function is called from the session handler which detects the end of
+ * handshake, in order to complete initialization of a valid stream. It must be
+ * called with a completely initialized session. It returns the pointer to
+ * the newly created stream, or NULL in case of fatal error. The client-facing
+ * end point is assigned to <origin>, which must be valid. The stream's task
+ * is configured with a nice value inherited from the listener's nice if any.
+ * The task's context is set to the new stream, and its function is set to
+ * process_stream(). Target and analysers are null. <input> is used as input
+ * buffer for the request channel and may contain data. On success, it is
+ * transfer to the stream and <input> is set to BUF_NULL. On error, <input>
+ * buffer is unchanged and it is the caller responsibility to release it.
+ */
+struct stream *stream_new(struct session *sess, struct stconn *sc, struct buffer *input)
+{
+ struct stream *s;
+ struct task *t;
+
+ DBG_TRACE_ENTER(STRM_EV_STRM_NEW);
+ if (unlikely((s = pool_alloc(pool_head_stream)) == NULL))
+ goto out_fail_alloc;
+
+ /* minimum stream initialization required for an embryonic stream is
+ * fairly low. We need very little to execute L4 ACLs, then we need a
+ * task to make the client-side connection live on its own.
+ * - flags
+ * - stick-entry tracking
+ */
+ s->flags = 0;
+ s->logs.logwait = sess->fe->to_log;
+ s->logs.level = 0;
+ s->logs.request_ts = 0;
+ s->logs.t_queue = -1;
+ s->logs.t_connect = -1;
+ s->logs.t_data = -1;
+ s->logs.t_close = 0;
+ s->logs.bytes_in = s->logs.bytes_out = 0;
+ s->logs.prx_queue_pos = 0; /* we get the number of pending conns before us */
+ s->logs.srv_queue_pos = 0; /* we will get this number soon */
+ s->obj_type = OBJ_TYPE_STREAM;
+
+ s->logs.accept_date = sess->accept_date;
+ s->logs.accept_ts = sess->accept_ts;
+ s->logs.t_handshake = sess->t_handshake;
+ s->logs.t_idle = sess->t_idle;
+
+ /* default logging function */
+ s->do_log = strm_log;
+
+ /* default error reporting function, may be changed by analysers */
+ s->srv_error = default_srv_error;
+
+ /* Initialise the current rule list pointer to NULL. We are sure that
+ * any rulelist match the NULL pointer.
+ */
+ s->current_rule_list = NULL;
+ s->current_rule = NULL;
+ s->rules_exp = TICK_ETERNITY;
+ s->last_rule_file = NULL;
+ s->last_rule_line = 0;
+
+ s->stkctr = NULL;
+ if (pool_head_stk_ctr) {
+ s->stkctr = pool_alloc(pool_head_stk_ctr);
+ if (!s->stkctr)
+ goto out_fail_alloc;
+
+ /* Copy SC counters for the stream. We don't touch refcounts because
+ * any reference we have is inherited from the session. Since the stream
+ * doesn't exist without the session, the session's existence guarantees
+ * we don't lose the entry. During the store operation, the stream won't
+ * touch these ones.
+ */
+ memcpy(s->stkctr, sess->stkctr, sizeof(s->stkctr[0]) * global.tune.nb_stk_ctr);
+ }
+
+ s->sess = sess;
+
+ s->stream_epoch = _HA_ATOMIC_LOAD(&stream_epoch);
+ s->uniq_id = _HA_ATOMIC_FETCH_ADD(&global.req_count, 1);
+
+ /* OK, we're keeping the stream, so let's properly initialize the stream */
+ LIST_INIT(&s->back_refs);
+
+ LIST_INIT(&s->buffer_wait.list);
+ s->buffer_wait.target = s;
+ s->buffer_wait.wakeup_cb = stream_buf_available;
+
+ s->lat_time = s->cpu_time = 0;
+ s->call_rate.curr_tick = s->call_rate.curr_ctr = s->call_rate.prev_ctr = 0;
+ s->pcli_next_pid = 0;
+ s->pcli_flags = 0;
+ s->unique_id = IST_NULL;
+
+ if ((t = task_new_here()) == NULL)
+ goto out_fail_alloc;
+
+ s->task = t;
+ s->pending_events = 0;
+ s->conn_retries = 0;
+ s->conn_exp = TICK_ETERNITY;
+ s->conn_err_type = STRM_ET_NONE;
+ s->prev_conn_state = SC_ST_INI;
+ t->process = process_stream;
+ t->context = s;
+ t->expire = TICK_ETERNITY;
+ if (sess->listener)
+ t->nice = sess->listener->bind_conf->nice;
+
+ /* Note: initially, the stream's backend points to the frontend.
+ * This changes later when switching rules are executed or
+ * when the default backend is assigned.
+ */
+ s->be = sess->fe;
+ s->req_cap = NULL;
+ s->res_cap = NULL;
+
+ /* Initialize all the variables contexts even if not used.
+ * This permits to prune these contexts without errors.
+ *
+ * We need to make sure that those lists are not re-initialized
+ * by stream-dependant underlying code because we could lose
+ * track of already defined variables, leading to data inconsistency
+ * and memory leaks...
+ *
+ * For reference: we had a very old bug caused by vars_txn and
+ * vars_reqres being accidentally re-initialized in http_create_txn()
+ * (https://github.com/haproxy/haproxy/issues/1935)
+ */
+ vars_init_head(&s->vars_txn, SCOPE_TXN);
+ vars_init_head(&s->vars_reqres, SCOPE_REQ);
+
+ /* Set SF_HTX flag for HTTP frontends. */
+ if (sess->fe->mode == PR_MODE_HTTP)
+ s->flags |= SF_HTX;
+
+ s->scf = sc;
+ if (sc_attach_strm(s->scf, s) < 0)
+ goto out_fail_attach_scf;
+
+ s->scb = sc_new_from_strm(s, SC_FL_ISBACK);
+ if (!s->scb)
+ goto out_fail_alloc_scb;
+
+ sc_set_state(s->scf, SC_ST_EST);
+
+ if (likely(sess->fe->options2 & PR_O2_INDEPSTR))
+ s->scf->flags |= SC_FL_INDEP_STR;
+
+ if (likely(sess->fe->options2 & PR_O2_INDEPSTR))
+ s->scb->flags |= SC_FL_INDEP_STR;
+
+ if (sc_ep_test(sc, SE_FL_WEBSOCKET))
+ s->flags |= SF_WEBSOCKET;
+ if (sc_conn(sc)) {
+ const struct mux_ops *mux = sc_mux_ops(sc);
+
+ if (mux && mux->flags & MX_FL_HTX)
+ s->flags |= SF_HTX;
+ }
+
+ stream_init_srv_conn(s);
+ s->target = sess->fe->default_target;
+
+ s->pend_pos = NULL;
+ s->priority_class = 0;
+ s->priority_offset = 0;
+
+ /* init store persistence */
+ s->store_count = 0;
+
+ channel_init(&s->req);
+ s->req.flags |= CF_READ_EVENT; /* the producer is already connected */
+ s->req.analysers = sess->listener ? sess->listener->bind_conf->analysers : sess->fe->fe_req_ana;
+
+ if (IS_HTX_STRM(s)) {
+ /* Be sure to have HTTP analysers because in case of
+ * "destructive" stream upgrade, they may be missing (e.g
+ * TCP>H2)
+ */
+ s->req.analysers |= AN_REQ_WAIT_HTTP|AN_REQ_HTTP_PROCESS_FE;
+ }
+
+ if (!sess->fe->fe_req_ana) {
+ channel_auto_connect(&s->req); /* don't wait to establish connection */
+ channel_auto_close(&s->req); /* let the producer forward close requests */
+ }
+
+ s->scf->ioto = sess->fe->timeout.client;
+ s->req.analyse_exp = TICK_ETERNITY;
+
+ channel_init(&s->res);
+ s->res.flags |= CF_ISRESP;
+ s->res.analysers = 0;
+
+ if (sess->fe->options2 & PR_O2_NODELAY) {
+ s->scf->flags |= SC_FL_SND_NEVERWAIT;
+ s->scb->flags |= SC_FL_SND_NEVERWAIT;
+ }
+
+ s->scb->ioto = TICK_ETERNITY;
+ s->res.analyse_exp = TICK_ETERNITY;
+
+ s->txn = NULL;
+ s->hlua = NULL;
+
+ s->resolv_ctx.requester = NULL;
+ s->resolv_ctx.hostname_dn = NULL;
+ s->resolv_ctx.hostname_dn_len = 0;
+ s->resolv_ctx.parent = NULL;
+
+ s->tunnel_timeout = TICK_ETERNITY;
+
+ LIST_APPEND(&th_ctx->streams, &s->list);
+
+ if (flt_stream_init(s) < 0 || flt_stream_start(s) < 0)
+ goto out_fail_accept;
+
+ /* just in case the caller would have pre-disabled it */
+ se_will_consume(s->scf->sedesc);
+
+ if (sess->fe->accept && sess->fe->accept(s) < 0)
+ goto out_fail_accept;
+
+ if (!b_is_null(input)) {
+ /* Xfer the input buffer to the request channel. <input> will
+ * than point to BUF_NULL. From this point, it is the stream
+ * responsibility to release it.
+ */
+ s->req.buf = *input;
+ *input = BUF_NULL;
+ s->req.total = (IS_HTX_STRM(s) ? htxbuf(&s->req.buf)->data : b_data(&s->req.buf));
+ sc_ep_report_read_activity(s->scf);
+ }
+
+ /* it is important not to call the wakeup function directly but to
+ * pass through task_wakeup(), because this one knows how to apply
+ * priorities to tasks. Using multi thread we must be sure that
+ * stream is fully initialized before calling task_wakeup. So
+ * the caller must handle the task_wakeup
+ */
+ DBG_TRACE_LEAVE(STRM_EV_STRM_NEW, s);
+ task_wakeup(s->task, TASK_WOKEN_INIT);
+ return s;
+
+ /* Error unrolling */
+ out_fail_accept:
+ flt_stream_release(s, 0);
+ LIST_DELETE(&s->list);
+ sc_free(s->scb);
+ out_fail_alloc_scb:
+ out_fail_attach_scf:
+ task_destroy(t);
+ out_fail_alloc:
+ if (s)
+ pool_free(pool_head_stk_ctr, s->stkctr);
+ pool_free(pool_head_stream, s);
+ DBG_TRACE_DEVEL("leaving on error", STRM_EV_STRM_NEW|STRM_EV_STRM_ERR);
+ return NULL;
+}
+
+/*
+ * frees the context associated to a stream. It must have been removed first.
+ */
+void stream_free(struct stream *s)
+{
+ struct session *sess = strm_sess(s);
+ struct proxy *fe = sess->fe;
+ struct bref *bref, *back;
+ int i;
+
+ DBG_TRACE_POINT(STRM_EV_STRM_FREE, s);
+
+ /* detach the stream from its own task before even releasing it so
+ * that walking over a task list never exhibits a dying stream.
+ */
+ s->task->context = NULL;
+ __ha_barrier_store();
+
+ pendconn_free(s);
+
+ if (objt_server(s->target)) { /* there may be requests left pending in queue */
+ if (s->flags & SF_CURR_SESS) {
+ s->flags &= ~SF_CURR_SESS;
+ _HA_ATOMIC_DEC(&__objt_server(s->target)->cur_sess);
+ }
+ if (may_dequeue_tasks(__objt_server(s->target), s->be))
+ process_srv_queue(__objt_server(s->target));
+ }
+
+ if (unlikely(s->srv_conn)) {
+ /* the stream still has a reserved slot on a server, but
+ * it should normally be only the same as the one above,
+ * so this should not happen in fact.
+ */
+ sess_change_server(s, NULL);
+ }
+
+ /* We may still be present in the buffer wait queue */
+ if (LIST_INLIST(&s->buffer_wait.list))
+ LIST_DEL_INIT(&s->buffer_wait.list);
+
+ if (s->req.buf.size || s->res.buf.size) {
+ int count = !!s->req.buf.size + !!s->res.buf.size;
+
+ b_free(&s->req.buf);
+ b_free(&s->res.buf);
+ offer_buffers(NULL, count);
+ }
+
+ pool_free(pool_head_uniqueid, s->unique_id.ptr);
+ s->unique_id = IST_NULL;
+
+ flt_stream_stop(s);
+ flt_stream_release(s, 0);
+
+ hlua_ctx_destroy(s->hlua);
+ s->hlua = NULL;
+ if (s->txn)
+ http_destroy_txn(s);
+
+ /* ensure the client-side transport layer is destroyed */
+ /* Be sure it is useless !! */
+ /* if (cli_cs) */
+ /* cs_close(cli_cs); */
+
+ for (i = 0; i < s->store_count; i++) {
+ if (!s->store[i].ts)
+ continue;
+ stksess_free(s->store[i].table, s->store[i].ts);
+ s->store[i].ts = NULL;
+ }
+
+ if (s->resolv_ctx.requester) {
+ __decl_thread(struct resolvers *resolvers = s->resolv_ctx.parent->arg.resolv.resolvers);
+
+ HA_SPIN_LOCK(DNS_LOCK, &resolvers->lock);
+ ha_free(&s->resolv_ctx.hostname_dn);
+ s->resolv_ctx.hostname_dn_len = 0;
+ resolv_unlink_resolution(s->resolv_ctx.requester);
+ HA_SPIN_UNLOCK(DNS_LOCK, &resolvers->lock);
+
+ pool_free(resolv_requester_pool, s->resolv_ctx.requester);
+ s->resolv_ctx.requester = NULL;
+ }
+
+ if (fe) {
+ if (s->req_cap) {
+ struct cap_hdr *h;
+ for (h = fe->req_cap; h; h = h->next)
+ pool_free(h->pool, s->req_cap[h->index]);
+ pool_free(fe->req_cap_pool, s->req_cap);
+ }
+
+ if (s->res_cap) {
+ struct cap_hdr *h;
+ for (h = fe->rsp_cap; h; h = h->next)
+ pool_free(h->pool, s->res_cap[h->index]);
+ pool_free(fe->rsp_cap_pool, s->res_cap);
+ }
+ }
+
+ /* Cleanup all variable contexts. */
+ if (!LIST_ISEMPTY(&s->vars_txn.head))
+ vars_prune(&s->vars_txn, s->sess, s);
+ if (!LIST_ISEMPTY(&s->vars_reqres.head))
+ vars_prune(&s->vars_reqres, s->sess, s);
+
+ stream_store_counters(s);
+ pool_free(pool_head_stk_ctr, s->stkctr);
+
+ list_for_each_entry_safe(bref, back, &s->back_refs, users) {
+ /* we have to unlink all watchers. We must not relink them if
+ * this stream was the last one in the list. This is safe to do
+ * here because we're touching our thread's list so we know
+ * that other streams are not active, and the watchers will
+ * only touch their node under thread isolation.
+ */
+ LIST_DEL_INIT(&bref->users);
+ if (s->list.n != &th_ctx->streams)
+ LIST_APPEND(&LIST_ELEM(s->list.n, struct stream *, list)->back_refs, &bref->users);
+ bref->ref = s->list.n;
+ __ha_barrier_store();
+ }
+ LIST_DELETE(&s->list);
+
+ sc_destroy(s->scb);
+ sc_destroy(s->scf);
+
+ pool_free(pool_head_stream, s);
+
+ /* We may want to free the maximum amount of pools if the proxy is stopping */
+ if (fe && unlikely(fe->flags & (PR_FL_DISABLED|PR_FL_STOPPED))) {
+ pool_flush(pool_head_buffer);
+ pool_flush(pool_head_http_txn);
+ pool_flush(pool_head_requri);
+ pool_flush(pool_head_capture);
+ pool_flush(pool_head_stream);
+ pool_flush(pool_head_session);
+ pool_flush(pool_head_connection);
+ pool_flush(pool_head_pendconn);
+ pool_flush(fe->req_cap_pool);
+ pool_flush(fe->rsp_cap_pool);
+ }
+}
+
+
+/* Allocates a work buffer for stream <s>. It is meant to be called inside
+ * process_stream(). It will only allocate the side needed for the function
+ * to work fine, which is the response buffer so that an error message may be
+ * built and returned. Response buffers may be allocated from the reserve, this
+ * is critical to ensure that a response may always flow and will never block a
+ * server from releasing a connection. Returns 0 in case of failure, non-zero
+ * otherwise.
+ */
+static int stream_alloc_work_buffer(struct stream *s)
+{
+ if (b_alloc(&s->res.buf))
+ return 1;
+ return 0;
+}
+
+/* releases unused buffers after processing. Typically used at the end of the
+ * update() functions. It will try to wake up as many tasks/applets as the
+ * number of buffers that it releases. In practice, most often streams are
+ * blocked on a single buffer, so it makes sense to try to wake two up when two
+ * buffers are released at once.
+ */
+void stream_release_buffers(struct stream *s)
+{
+ int offer = 0;
+
+ if (c_size(&s->req) && c_empty(&s->req)) {
+ offer++;
+ b_free(&s->req.buf);
+ }
+ if (c_size(&s->res) && c_empty(&s->res)) {
+ offer++;
+ b_free(&s->res.buf);
+ }
+
+ /* if we're certain to have at least 1 buffer available, and there is
+ * someone waiting, we can wake up a waiter and offer them.
+ */
+ if (offer)
+ offer_buffers(s, offer);
+}
+
+void stream_process_counters(struct stream *s)
+{
+ struct session *sess = s->sess;
+ unsigned long long bytes;
+ int i;
+
+ bytes = s->req.total - s->logs.bytes_in;
+ s->logs.bytes_in = s->req.total;
+ if (bytes) {
+ _HA_ATOMIC_ADD(&sess->fe->fe_counters.bytes_in, bytes);
+ _HA_ATOMIC_ADD(&s->be->be_counters.bytes_in, bytes);
+
+ if (objt_server(s->target))
+ _HA_ATOMIC_ADD(&__objt_server(s->target)->counters.bytes_in, bytes);
+
+ if (sess->listener && sess->listener->counters)
+ _HA_ATOMIC_ADD(&sess->listener->counters->bytes_in, bytes);
+
+ for (i = 0; i < global.tune.nb_stk_ctr; i++) {
+ if (!stkctr_inc_bytes_in_ctr(&s->stkctr[i], bytes))
+ stkctr_inc_bytes_in_ctr(&sess->stkctr[i], bytes);
+ }
+ }
+
+ bytes = s->res.total - s->logs.bytes_out;
+ s->logs.bytes_out = s->res.total;
+ if (bytes) {
+ _HA_ATOMIC_ADD(&sess->fe->fe_counters.bytes_out, bytes);
+ _HA_ATOMIC_ADD(&s->be->be_counters.bytes_out, bytes);
+
+ if (objt_server(s->target))
+ _HA_ATOMIC_ADD(&__objt_server(s->target)->counters.bytes_out, bytes);
+
+ if (sess->listener && sess->listener->counters)
+ _HA_ATOMIC_ADD(&sess->listener->counters->bytes_out, bytes);
+
+ for (i = 0; i < global.tune.nb_stk_ctr; i++) {
+ if (!stkctr_inc_bytes_out_ctr(&s->stkctr[i], bytes))
+ stkctr_inc_bytes_out_ctr(&sess->stkctr[i], bytes);
+ }
+ }
+}
+
+/* Abort processing on the both channels in same time */
+void stream_abort(struct stream *s)
+{
+ channel_abort(&s->req);
+ channel_abort(&s->res);
+}
+
+/*
+ * Returns a message to the client ; the connection is shut down for read,
+ * and the request is cleared so that no server connection can be initiated.
+ * The buffer is marked for read shutdown on the other side to protect the
+ * message, and the buffer write is enabled. The message is contained in a
+ * "chunk". If it is null, then an empty message is used. The reply buffer does
+ * not need to be empty before this, and its contents will not be overwritten.
+ * The primary goal of this function is to return error messages to a client.
+ */
+void stream_retnclose(struct stream *s, const struct buffer *msg)
+{
+ struct channel *ic = &s->req;
+ struct channel *oc = &s->res;
+
+ channel_auto_read(ic);
+ channel_abort(ic);
+ channel_erase(ic);
+ channel_truncate(oc);
+
+ if (likely(msg && msg->data))
+ co_inject(oc, msg->area, msg->data);
+
+ channel_auto_read(oc);
+ channel_auto_close(oc);
+ sc_schedule_abort(s->scb);
+}
+
+int stream_set_timeout(struct stream *s, enum act_timeout_name name, int timeout)
+{
+ switch (name) {
+ case ACT_TIMEOUT_CLIENT:
+ s->scf->ioto = timeout;
+ return 1;
+
+ case ACT_TIMEOUT_SERVER:
+ s->scb->ioto = timeout;
+ return 1;
+
+ case ACT_TIMEOUT_TUNNEL:
+ s->tunnel_timeout = timeout;
+ return 1;
+
+ default:
+ return 0;
+ }
+}
+
+/*
+ * This function handles the transition between the SC_ST_CON state and the
+ * SC_ST_EST state. It must only be called after switching from SC_ST_CON (or
+ * SC_ST_INI or SC_ST_RDY) to SC_ST_EST, but only when a ->proto is defined.
+ * Note that it will switch the interface to SC_ST_DIS if we already have
+ * the SC_FL_ABRT_DONE flag, it means we were able to forward the request, and
+ * receive the response, before process_stream() had the opportunity to
+ * make the switch from SC_ST_CON to SC_ST_EST. When that happens, we want
+ * to go through back_establish() anyway, to make sure the analysers run.
+ * Timeouts are cleared. Error are reported on the channel so that analysers
+ * can handle them.
+ */
+void back_establish(struct stream *s)
+{
+ struct connection *conn = sc_conn(s->scb);
+ struct channel *req = &s->req;
+ struct channel *rep = &s->res;
+
+ DBG_TRACE_ENTER(STRM_EV_STRM_PROC|STRM_EV_CS_ST, s);
+ /* First, centralize the timers information, and clear any irrelevant
+ * timeout.
+ */
+ s->logs.t_connect = ns_to_ms(now_ns - s->logs.accept_ts);
+ s->conn_exp = TICK_ETERNITY;
+ s->flags &= ~SF_CONN_EXP;
+
+ /* errors faced after sending data need to be reported */
+ if ((s->scb->flags & SC_FL_ERROR) && req->flags & CF_WROTE_DATA) {
+ s->req.flags |= CF_WRITE_EVENT;
+ s->res.flags |= CF_READ_EVENT;
+ s->conn_err_type = STRM_ET_DATA_ERR;
+ DBG_TRACE_STATE("read/write error", STRM_EV_STRM_PROC|STRM_EV_CS_ST|STRM_EV_STRM_ERR, s);
+ }
+
+ if (objt_server(s->target))
+ health_adjust(__objt_server(s->target), HANA_STATUS_L4_OK);
+
+ if (!IS_HTX_STRM(s)) { /* let's allow immediate data connection in this case */
+ /* if the user wants to log as soon as possible, without counting
+ * bytes from the server, then this is the right moment. */
+ if (!LIST_ISEMPTY(&strm_fe(s)->logformat) && !(s->logs.logwait & LW_BYTES)) {
+ /* note: no pend_pos here, session is established */
+ s->logs.t_close = s->logs.t_connect; /* to get a valid end date */
+ s->do_log(s);
+ }
+ }
+ else {
+ s->scb->flags |= SC_FL_RCV_ONCE; /* a single read is enough to get response headers */
+ }
+
+ rep->analysers |= strm_fe(s)->fe_rsp_ana | s->be->be_rsp_ana;
+
+ se_have_more_data(s->scb->sedesc);
+ rep->flags |= CF_READ_EVENT; /* producer is now attached */
+ sc_ep_report_read_activity(s->scb);
+ if (conn) {
+ /* real connections have timeouts
+ * if already defined, it means that a set-timeout rule has
+ * been executed so do not overwrite them
+ */
+ if (!tick_isset(s->scb->ioto))
+ s->scb->ioto = s->be->timeout.server;
+ if (!tick_isset(s->tunnel_timeout))
+ s->tunnel_timeout = s->be->timeout.tunnel;
+
+ /* The connection is now established, try to read data from the
+ * underlying layer, and subscribe to recv events. We use a
+ * delayed recv here to give a chance to the data to flow back
+ * by the time we process other tasks.
+ */
+ sc_chk_rcv(s->scb);
+ }
+ /* If we managed to get the whole response, and we don't have anything
+ * left to send, or can't, switch to SC_ST_DIS now. */
+ if ((s->scb->flags & (SC_FL_EOS|SC_FL_ABRT_DONE)) || (s->scf->flags & SC_FL_SHUT_DONE)) {
+ s->scb->state = SC_ST_DIS;
+ DBG_TRACE_STATE("response channel shutdwn for read/write", STRM_EV_STRM_PROC|STRM_EV_CS_ST|STRM_EV_STRM_ERR, s);
+ }
+
+ DBG_TRACE_LEAVE(STRM_EV_STRM_PROC|STRM_EV_CS_ST, s);
+}
+
+/* Set correct stream termination flags in case no analyser has done it. It
+ * also counts a failed request if the server state has not reached the request
+ * stage.
+ */
+void sess_set_term_flags(struct stream *s)
+{
+ if (!(s->flags & SF_FINST_MASK)) {
+ if (s->scb->state == SC_ST_INI) {
+ /* anything before REQ in fact */
+ _HA_ATOMIC_INC(&strm_fe(s)->fe_counters.failed_req);
+ if (strm_li(s) && strm_li(s)->counters)
+ _HA_ATOMIC_INC(&strm_li(s)->counters->failed_req);
+
+ s->flags |= SF_FINST_R;
+ }
+ else if (s->scb->state == SC_ST_QUE)
+ s->flags |= SF_FINST_Q;
+ else if (sc_state_in(s->scb->state, SC_SB_REQ|SC_SB_TAR|SC_SB_ASS|SC_SB_CON|SC_SB_CER|SC_SB_RDY))
+ s->flags |= SF_FINST_C;
+ else if (s->scb->state == SC_ST_EST || s->prev_conn_state == SC_ST_EST)
+ s->flags |= SF_FINST_D;
+ else
+ s->flags |= SF_FINST_L;
+ }
+}
+
+/* This function parses the use-service action ruleset. It executes
+ * the associated ACL and set an applet as a stream or txn final node.
+ * it returns ACT_RET_ERR if an error occurs, the proxy left in
+ * consistent state. It returns ACT_RET_STOP in success case because
+ * use-service must be a terminal action. Returns ACT_RET_YIELD
+ * if the initialisation function require more data.
+ */
+enum act_return process_use_service(struct act_rule *rule, struct proxy *px,
+ struct session *sess, struct stream *s, int flags)
+
+{
+ struct appctx *appctx;
+
+ /* Initialises the applet if it is required. */
+ if (flags & ACT_OPT_FIRST) {
+ /* Register applet. this function schedules the applet. */
+ s->target = &rule->applet.obj_type;
+ appctx = sc_applet_create(s->scb, objt_applet(s->target));
+ if (unlikely(!appctx))
+ return ACT_RET_ERR;
+
+ /* Finish initialisation of the context. */
+ appctx->rule = rule;
+ if (appctx_init(appctx) == -1)
+ return ACT_RET_ERR;
+ }
+ else
+ appctx = __sc_appctx(s->scb);
+
+ if (rule->from != ACT_F_HTTP_REQ) {
+ if (sess->fe == s->be) /* report it if the request was intercepted by the frontend */
+ _HA_ATOMIC_INC(&sess->fe->fe_counters.intercepted_req);
+
+ /* The flag SF_ASSIGNED prevent from server assignment. */
+ s->flags |= SF_ASSIGNED;
+ }
+
+ /* Now we can schedule the applet. */
+ applet_need_more_data(appctx);
+ appctx_wakeup(appctx);
+ return ACT_RET_STOP;
+}
+
+/* This stream analyser checks the switching rules and changes the backend
+ * if appropriate. The default_backend rule is also considered, then the
+ * target backend's forced persistence rules are also evaluated last if any.
+ * It returns 1 if the processing can continue on next analysers, or zero if it
+ * either needs more data or wants to immediately abort the request.
+ */
+static int process_switching_rules(struct stream *s, struct channel *req, int an_bit)
+{
+ struct persist_rule *prst_rule;
+ struct session *sess = s->sess;
+ struct proxy *fe = sess->fe;
+
+ req->analysers &= ~an_bit;
+ req->analyse_exp = TICK_ETERNITY;
+
+ DBG_TRACE_ENTER(STRM_EV_STRM_ANA, s);
+
+ /* now check whether we have some switching rules for this request */
+ if (!(s->flags & SF_BE_ASSIGNED)) {
+ struct switching_rule *rule;
+
+ list_for_each_entry(rule, &fe->switching_rules, list) {
+ int ret = 1;
+
+ if (rule->cond) {
+ ret = acl_exec_cond(rule->cond, fe, sess, s, SMP_OPT_DIR_REQ|SMP_OPT_FINAL);
+ ret = acl_pass(ret);
+ if (rule->cond->pol == ACL_COND_UNLESS)
+ ret = !ret;
+ }
+
+ if (ret) {
+ /* If the backend name is dynamic, try to resolve the name.
+ * If we can't resolve the name, or if any error occurs, break
+ * the loop and fallback to the default backend.
+ */
+ struct proxy *backend = NULL;
+
+ if (rule->dynamic) {
+ struct buffer *tmp;
+
+ tmp = alloc_trash_chunk();
+ if (!tmp)
+ goto sw_failed;
+
+ if (build_logline(s, tmp->area, tmp->size, &rule->be.expr))
+ backend = proxy_be_by_name(tmp->area);
+
+ free_trash_chunk(tmp);
+ tmp = NULL;
+
+ if (!backend)
+ break;
+ }
+ else
+ backend = rule->be.backend;
+
+ if (!stream_set_backend(s, backend))
+ goto sw_failed;
+ break;
+ }
+ }
+
+ /* To ensure correct connection accounting on the backend, we
+ * have to assign one if it was not set (eg: a listen). This
+ * measure also takes care of correctly setting the default
+ * backend if any. Don't do anything if an upgrade is already in
+ * progress.
+ */
+ if (!(s->flags & (SF_BE_ASSIGNED|SF_IGNORE)))
+ if (!stream_set_backend(s, fe->defbe.be ? fe->defbe.be : s->be))
+ goto sw_failed;
+
+ /* No backend assigned but no error reported. It happens when a
+ * TCP stream is upgraded to HTTP/2.
+ */
+ if ((s->flags & (SF_BE_ASSIGNED|SF_IGNORE)) == SF_IGNORE) {
+ DBG_TRACE_DEVEL("leaving with no backend because of a destructive upgrade", STRM_EV_STRM_ANA, s);
+ return 0;
+ }
+
+ }
+
+ /* we don't want to run the TCP or HTTP filters again if the backend has not changed */
+ if (fe == s->be) {
+ s->req.analysers &= ~AN_REQ_INSPECT_BE;
+ s->req.analysers &= ~AN_REQ_HTTP_PROCESS_BE;
+ s->req.analysers &= ~AN_REQ_FLT_START_BE;
+ }
+
+ /* as soon as we know the backend, we must check if we have a matching forced or ignored
+ * persistence rule, and report that in the stream.
+ */
+ list_for_each_entry(prst_rule, &s->be->persist_rules, list) {
+ int ret = 1;
+
+ if (prst_rule->cond) {
+ ret = acl_exec_cond(prst_rule->cond, s->be, sess, s, SMP_OPT_DIR_REQ|SMP_OPT_FINAL);
+ ret = acl_pass(ret);
+ if (prst_rule->cond->pol == ACL_COND_UNLESS)
+ ret = !ret;
+ }
+
+ if (ret) {
+ /* no rule, or the rule matches */
+ if (prst_rule->type == PERSIST_TYPE_FORCE) {
+ s->flags |= SF_FORCE_PRST;
+ } else {
+ s->flags |= SF_IGNORE_PRST;
+ }
+ break;
+ }
+ }
+
+ DBG_TRACE_LEAVE(STRM_EV_STRM_ANA, s);
+ return 1;
+
+ sw_failed:
+ /* immediately abort this request in case of allocation failure */
+ stream_abort(s);
+
+ if (!(s->flags & SF_ERR_MASK))
+ s->flags |= SF_ERR_RESOURCE;
+ if (!(s->flags & SF_FINST_MASK))
+ s->flags |= SF_FINST_R;
+
+ if (s->txn)
+ s->txn->status = 500;
+ s->req.analysers &= AN_REQ_FLT_END;
+ s->req.analyse_exp = TICK_ETERNITY;
+ DBG_TRACE_DEVEL("leaving on error", STRM_EV_STRM_ANA|STRM_EV_STRM_ERR, s);
+ return 0;
+}
+
+/* This stream analyser works on a request. It applies all use-server rules on
+ * it then returns 1. The data must already be present in the buffer otherwise
+ * they won't match. It always returns 1.
+ */
+static int process_server_rules(struct stream *s, struct channel *req, int an_bit)
+{
+ struct proxy *px = s->be;
+ struct session *sess = s->sess;
+ struct server_rule *rule;
+
+ DBG_TRACE_ENTER(STRM_EV_STRM_ANA, s);
+
+ if (!(s->flags & SF_ASSIGNED)) {
+ list_for_each_entry(rule, &px->server_rules, list) {
+ int ret;
+
+ ret = acl_exec_cond(rule->cond, s->be, sess, s, SMP_OPT_DIR_REQ|SMP_OPT_FINAL);
+ ret = acl_pass(ret);
+ if (rule->cond->pol == ACL_COND_UNLESS)
+ ret = !ret;
+
+ if (ret) {
+ struct server *srv;
+
+ if (rule->dynamic) {
+ struct buffer *tmp = get_trash_chunk();
+
+ if (!build_logline(s, tmp->area, tmp->size, &rule->expr))
+ break;
+
+ srv = findserver(s->be, tmp->area);
+ if (!srv)
+ break;
+ }
+ else
+ srv = rule->srv.ptr;
+
+ if ((srv->cur_state != SRV_ST_STOPPED) ||
+ (px->options & PR_O_PERSIST) ||
+ (s->flags & SF_FORCE_PRST)) {
+ s->flags |= SF_DIRECT | SF_ASSIGNED;
+ s->target = &srv->obj_type;
+ break;
+ }
+ /* if the server is not UP, let's go on with next rules
+ * just in case another one is suited.
+ */
+ }
+ }
+ }
+
+ req->analysers &= ~an_bit;
+ req->analyse_exp = TICK_ETERNITY;
+ DBG_TRACE_LEAVE(STRM_EV_STRM_ANA, s);
+ return 1;
+}
+
+static inline void sticking_rule_find_target(struct stream *s,
+ struct stktable *t, struct stksess *ts)
+{
+ struct proxy *px = s->be;
+ struct eb32_node *node;
+ struct dict_entry *de;
+ void *ptr;
+ struct server *srv;
+
+ /* Look for the server name previously stored in <t> stick-table */
+ HA_RWLOCK_RDLOCK(STK_SESS_LOCK, &ts->lock);
+ ptr = __stktable_data_ptr(t, ts, STKTABLE_DT_SERVER_KEY);
+ de = stktable_data_cast(ptr, std_t_dict);
+ HA_RWLOCK_RDUNLOCK(STK_SESS_LOCK, &ts->lock);
+
+ if (de) {
+ struct ebpt_node *node;
+
+ if (t->server_key_type == STKTABLE_SRV_NAME) {
+ node = ebis_lookup(&px->conf.used_server_name, de->value.key);
+ if (node) {
+ srv = container_of(node, struct server, conf.name);
+ goto found;
+ }
+ } else if (t->server_key_type == STKTABLE_SRV_ADDR) {
+ HA_RWLOCK_RDLOCK(PROXY_LOCK, &px->lock);
+ node = ebis_lookup(&px->used_server_addr, de->value.key);
+ HA_RWLOCK_RDUNLOCK(PROXY_LOCK, &px->lock);
+ if (node) {
+ srv = container_of(node, struct server, addr_node);
+ goto found;
+ }
+ }
+ }
+
+ /* Look for the server ID */
+ HA_RWLOCK_RDLOCK(STK_SESS_LOCK, &ts->lock);
+ ptr = __stktable_data_ptr(t, ts, STKTABLE_DT_SERVER_ID);
+ node = eb32_lookup(&px->conf.used_server_id, stktable_data_cast(ptr, std_t_sint));
+ HA_RWLOCK_RDUNLOCK(STK_SESS_LOCK, &ts->lock);
+
+ if (!node)
+ return;
+
+ srv = container_of(node, struct server, conf.id);
+ found:
+ if ((srv->cur_state != SRV_ST_STOPPED) ||
+ (px->options & PR_O_PERSIST) || (s->flags & SF_FORCE_PRST)) {
+ s->flags |= SF_DIRECT | SF_ASSIGNED;
+ s->target = &srv->obj_type;
+ }
+}
+
+/* This stream analyser works on a request. It applies all sticking rules on
+ * it then returns 1. The data must already be present in the buffer otherwise
+ * they won't match. It always returns 1.
+ */
+static int process_sticking_rules(struct stream *s, struct channel *req, int an_bit)
+{
+ struct proxy *px = s->be;
+ struct session *sess = s->sess;
+ struct sticking_rule *rule;
+
+ DBG_TRACE_ENTER(STRM_EV_STRM_ANA, s);
+
+ list_for_each_entry(rule, &px->sticking_rules, list) {
+ int ret = 1 ;
+ int i;
+
+ /* Only the first stick store-request of each table is applied
+ * and other ones are ignored. The purpose is to allow complex
+ * configurations which look for multiple entries by decreasing
+ * order of precision and to stop at the first which matches.
+ * An example could be a store of the IP address from an HTTP
+ * header first, then from the source if not found.
+ */
+ if (rule->flags & STK_IS_STORE) {
+ for (i = 0; i < s->store_count; i++) {
+ if (rule->table.t == s->store[i].table)
+ break;
+ }
+
+ if (i != s->store_count)
+ continue;
+ }
+
+ if (rule->cond) {
+ ret = acl_exec_cond(rule->cond, px, sess, s, SMP_OPT_DIR_REQ|SMP_OPT_FINAL);
+ ret = acl_pass(ret);
+ if (rule->cond->pol == ACL_COND_UNLESS)
+ ret = !ret;
+ }
+
+ if (ret) {
+ struct stktable_key *key;
+
+ key = stktable_fetch_key(rule->table.t, px, sess, s, SMP_OPT_DIR_REQ|SMP_OPT_FINAL, rule->expr, NULL);
+ if (!key)
+ continue;
+
+ if (rule->flags & STK_IS_MATCH) {
+ struct stksess *ts;
+
+ if ((ts = stktable_lookup_key(rule->table.t, key)) != NULL) {
+ if (!(s->flags & SF_ASSIGNED))
+ sticking_rule_find_target(s, rule->table.t, ts);
+ stktable_touch_local(rule->table.t, ts, 1);
+ }
+ }
+ if (rule->flags & STK_IS_STORE) {
+ if (s->store_count < (sizeof(s->store) / sizeof(s->store[0]))) {
+ struct stksess *ts;
+
+ ts = stksess_new(rule->table.t, key);
+ if (ts) {
+ s->store[s->store_count].table = rule->table.t;
+ s->store[s->store_count++].ts = ts;
+ }
+ }
+ }
+ }
+ }
+
+ req->analysers &= ~an_bit;
+ req->analyse_exp = TICK_ETERNITY;
+ DBG_TRACE_LEAVE(STRM_EV_STRM_ANA, s);
+ return 1;
+}
+
+/* This stream analyser works on a response. It applies all store rules on it
+ * then returns 1. The data must already be present in the buffer otherwise
+ * they won't match. It always returns 1.
+ */
+static int process_store_rules(struct stream *s, struct channel *rep, int an_bit)
+{
+ struct proxy *px = s->be;
+ struct session *sess = s->sess;
+ struct sticking_rule *rule;
+ int i;
+ int nbreq = s->store_count;
+
+ DBG_TRACE_ENTER(STRM_EV_STRM_ANA, s);
+
+ list_for_each_entry(rule, &px->storersp_rules, list) {
+ int ret = 1 ;
+
+ /* Only the first stick store-response of each table is applied
+ * and other ones are ignored. The purpose is to allow complex
+ * configurations which look for multiple entries by decreasing
+ * order of precision and to stop at the first which matches.
+ * An example could be a store of a set-cookie value, with a
+ * fallback to a parameter found in a 302 redirect.
+ *
+ * The store-response rules are not allowed to override the
+ * store-request rules for the same table, but they may coexist.
+ * Thus we can have up to one store-request entry and one store-
+ * response entry for the same table at any time.
+ */
+ for (i = nbreq; i < s->store_count; i++) {
+ if (rule->table.t == s->store[i].table)
+ break;
+ }
+
+ /* skip existing entries for this table */
+ if (i < s->store_count)
+ continue;
+
+ if (rule->cond) {
+ ret = acl_exec_cond(rule->cond, px, sess, s, SMP_OPT_DIR_RES|SMP_OPT_FINAL);
+ ret = acl_pass(ret);
+ if (rule->cond->pol == ACL_COND_UNLESS)
+ ret = !ret;
+ }
+
+ if (ret) {
+ struct stktable_key *key;
+
+ key = stktable_fetch_key(rule->table.t, px, sess, s, SMP_OPT_DIR_RES|SMP_OPT_FINAL, rule->expr, NULL);
+ if (!key)
+ continue;
+
+ if (s->store_count < (sizeof(s->store) / sizeof(s->store[0]))) {
+ struct stksess *ts;
+
+ ts = stksess_new(rule->table.t, key);
+ if (ts) {
+ s->store[s->store_count].table = rule->table.t;
+ s->store[s->store_count++].ts = ts;
+ }
+ }
+ }
+ }
+
+ /* process store request and store response */
+ for (i = 0; i < s->store_count; i++) {
+ struct stksess *ts;
+ void *ptr;
+ char *key;
+ struct dict_entry *de;
+ struct stktable *t = s->store[i].table;
+
+ if (!objt_server(s->target) || (__objt_server(s->target)->flags & SRV_F_NON_STICK)) {
+ stksess_free(s->store[i].table, s->store[i].ts);
+ s->store[i].ts = NULL;
+ continue;
+ }
+
+ ts = stktable_set_entry(t, s->store[i].ts);
+ if (ts != s->store[i].ts) {
+ /* the entry already existed, we can free ours */
+ stksess_free(t, s->store[i].ts);
+ }
+ s->store[i].ts = NULL;
+
+ if (t->server_key_type == STKTABLE_SRV_NAME)
+ key = __objt_server(s->target)->id;
+ else if (t->server_key_type == STKTABLE_SRV_ADDR)
+ key = __objt_server(s->target)->addr_node.key;
+ else
+ key = NULL;
+
+ HA_RWLOCK_WRLOCK(STK_SESS_LOCK, &ts->lock);
+ ptr = __stktable_data_ptr(t, ts, STKTABLE_DT_SERVER_ID);
+ stktable_data_cast(ptr, std_t_sint) = __objt_server(s->target)->puid;
+
+ if (key) {
+ de = dict_insert(&server_key_dict, key);
+ if (de) {
+ ptr = __stktable_data_ptr(t, ts, STKTABLE_DT_SERVER_KEY);
+ stktable_data_cast(ptr, std_t_dict) = de;
+ }
+ }
+
+ HA_RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
+
+ stktable_touch_local(t, ts, 1);
+ }
+ s->store_count = 0; /* everything is stored */
+
+ rep->analysers &= ~an_bit;
+ rep->analyse_exp = TICK_ETERNITY;
+
+ DBG_TRACE_LEAVE(STRM_EV_STRM_ANA, s);
+ return 1;
+}
+
+/* Set the stream to HTTP mode, if necessary. The minimal request HTTP analysers
+ * are set and the client mux is upgraded. It returns 1 if the stream processing
+ * may continue or 0 if it should be stopped. It happens on error or if the
+ * upgrade required a new stream. The mux protocol may be specified.
+ */
+int stream_set_http_mode(struct stream *s, const struct mux_proto_list *mux_proto)
+{
+ struct stconn *sc = s->scf;
+ struct connection *conn;
+
+ /* Already an HTTP stream */
+ if (IS_HTX_STRM(s))
+ return 1;
+
+ s->req.analysers |= AN_REQ_WAIT_HTTP|AN_REQ_HTTP_PROCESS_FE;
+
+ if (unlikely(!s->txn && !http_create_txn(s)))
+ return 0;
+
+ conn = sc_conn(sc);
+ if (conn) {
+ se_have_more_data(s->scf->sedesc);
+ /* Make sure we're unsubscribed, the the new
+ * mux will probably want to subscribe to
+ * the underlying XPRT
+ */
+ if (s->scf->wait_event.events)
+ conn->mux->unsubscribe(sc, s->scf->wait_event.events, &(s->scf->wait_event));
+
+ if (conn->mux->flags & MX_FL_NO_UPG)
+ return 0;
+
+ sc_conn_prepare_endp_upgrade(sc);
+ if (conn_upgrade_mux_fe(conn, sc, &s->req.buf,
+ (mux_proto ? mux_proto->token : ist("")),
+ PROTO_MODE_HTTP) == -1) {
+ sc_conn_abort_endp_upgrade(sc);
+ return 0;
+ }
+ sc_conn_commit_endp_upgrade(sc);
+
+ s->req.flags &= ~(CF_READ_EVENT|CF_AUTO_CONNECT);
+ s->req.total = 0;
+ s->flags |= SF_IGNORE;
+ if (sc_ep_test(sc, SE_FL_DETACHED)) {
+ /* If stream connector is detached, it means it was not
+ * reused by the new mux. Son destroy it, disable
+ * logging, and abort the stream process. Thus the
+ * stream will be silently destroyed. The new mux will
+ * create new streams.
+ */
+ s->logs.logwait = 0;
+ s->logs.level = 0;
+ stream_abort(s);
+ s->req.analysers &= AN_REQ_FLT_END;
+ s->req.analyse_exp = TICK_ETERNITY;
+ }
+ }
+
+ return 1;
+}
+
+
+/* Updates at once the channel flags, and timers of both stream connectors of a
+ * same stream, to complete the work after the analysers, then updates the data
+ * layer below. This will ensure that any synchronous update performed at the
+ * data layer will be reflected in the channel flags and/or stream connector.
+ * Note that this does not change the stream connector's current state, though
+ * it updates the previous state to the current one.
+ */
+void stream_update_both_sc(struct stream *s)
+{
+ struct stconn *scf = s->scf;
+ struct stconn *scb = s->scb;
+ struct channel *req = &s->req;
+ struct channel *res = &s->res;
+
+ req->flags &= ~(CF_READ_EVENT|CF_WRITE_EVENT);
+ res->flags &= ~(CF_READ_EVENT|CF_WRITE_EVENT);
+
+ s->prev_conn_state = scb->state;
+
+ /* let's recompute both sides states */
+ if (sc_state_in(scf->state, SC_SB_RDY|SC_SB_EST))
+ sc_update(scf);
+
+ if (sc_state_in(scb->state, SC_SB_RDY|SC_SB_EST))
+ sc_update(scb);
+
+ /* stream connectors are processed outside of process_stream() and must be
+ * handled at the latest moment.
+ */
+ if (sc_appctx(scf)) {
+ if (sc_is_recv_allowed(scf) || sc_is_send_allowed(scf))
+ appctx_wakeup(__sc_appctx(scf));
+ }
+ if (sc_appctx(scb)) {
+ if (sc_is_recv_allowed(scb) || sc_is_send_allowed(scb))
+ appctx_wakeup(__sc_appctx(scb));
+ }
+}
+
+/* check SC and channel timeouts, and close the corresponding stream connectors
+ * for future reads or writes.
+ * Note: this will also concern upper layers but we do not touch any other
+ * flag. We must be careful and correctly detect state changes when calling
+ * them.
+ */
+static void stream_handle_timeouts(struct stream *s)
+{
+ stream_check_conn_timeout(s);
+
+ sc_check_timeouts(s->scf);
+ channel_check_timeout(&s->req);
+ sc_check_timeouts(s->scb);
+ channel_check_timeout(&s->res);
+
+ if (unlikely(!(s->scb->flags & SC_FL_SHUT_DONE) && (s->req.flags & CF_WRITE_TIMEOUT))) {
+ s->scb->flags |= SC_FL_NOLINGER;
+ sc_shutdown(s->scb);
+ }
+
+ if (unlikely(!(s->scf->flags & (SC_FL_EOS|SC_FL_ABRT_DONE)) && (s->req.flags & CF_READ_TIMEOUT))) {
+ if (s->scf->flags & SC_FL_NOHALF)
+ s->scf->flags |= SC_FL_NOLINGER;
+ sc_abort(s->scf);
+ }
+ if (unlikely(!(s->scf->flags & SC_FL_SHUT_DONE) && (s->res.flags & CF_WRITE_TIMEOUT))) {
+ s->scf->flags |= SC_FL_NOLINGER;
+ sc_shutdown(s->scf);
+ }
+
+ if (unlikely(!(s->scb->flags & (SC_FL_EOS|SC_FL_ABRT_DONE)) && (s->res.flags & CF_READ_TIMEOUT))) {
+ if (s->scb->flags & SC_FL_NOHALF)
+ s->scb->flags |= SC_FL_NOLINGER;
+ sc_abort(s->scb);
+ }
+
+ if (HAS_FILTERS(s))
+ flt_stream_check_timeouts(s);
+}
+
+/* if the current task's wake_date was set, it's being profiled, thus we may
+ * report latencies and CPU usages in logs, so it's desirable to update the
+ * latency when entering process_stream().
+ */
+static void stream_cond_update_cpu_latency(struct stream *s)
+{
+ uint32_t lat = th_ctx->sched_call_date - th_ctx->sched_wake_date;
+
+ s->lat_time += lat;
+}
+
+/* if the current task's wake_date was set, it's being profiled, thus we may
+ * report latencies and CPU usages in logs, so it's desirable to do that before
+ * logging in order to report accurate CPU usage. In this case we count that
+ * final part and reset the wake date so that the scheduler doesn't do it a
+ * second time, and by doing so we also avoid an extra call to clock_gettime().
+ * The CPU usage will be off by the little time needed to run over stream_free()
+ * but that's only marginal.
+ */
+static void stream_cond_update_cpu_usage(struct stream *s)
+{
+ uint32_t cpu;
+
+ /* stats are only registered for non-zero wake dates */
+ if (likely(!th_ctx->sched_wake_date))
+ return;
+
+ cpu = (uint32_t)now_mono_time() - th_ctx->sched_call_date;
+ s->cpu_time += cpu;
+ HA_ATOMIC_ADD(&th_ctx->sched_profile_entry->cpu_time, cpu);
+ th_ctx->sched_wake_date = 0;
+}
+
+/* this functions is called directly by the scheduler for tasks whose
+ * ->process points to process_stream(), and is used to keep latencies
+ * and CPU usage measurements accurate.
+ */
+void stream_update_timings(struct task *t, uint64_t lat, uint64_t cpu)
+{
+ struct stream *s = t->context;
+ s->lat_time += lat;
+ s->cpu_time += cpu;
+}
+
+
+/* This macro is very specific to the function below. See the comments in
+ * process_stream() below to understand the logic and the tests.
+ */
+#define UPDATE_ANALYSERS(real, list, back, flag) { \
+ list = (((list) & ~(flag)) | ~(back)) & (real); \
+ back = real; \
+ if (!(list)) \
+ break; \
+ if (((list) ^ ((list) & ((list) - 1))) < (flag)) \
+ continue; \
+}
+
+/* These 2 following macros call an analayzer for the specified channel if the
+ * right flag is set. The first one is used for "filterable" analyzers. If a
+ * stream has some registered filters, pre and post analyaze callbacks are
+ * called. The second are used for other analyzers (AN_REQ/RES_FLT_* and
+ * AN_REQ/RES_HTTP_XFER_BODY) */
+#define FLT_ANALYZE(strm, chn, fun, list, back, flag, ...) \
+ { \
+ if ((list) & (flag)) { \
+ if (HAS_FILTERS(strm)) { \
+ if (!flt_pre_analyze((strm), (chn), (flag))) \
+ break; \
+ if (!fun((strm), (chn), (flag), ##__VA_ARGS__)) \
+ break; \
+ if (!flt_post_analyze((strm), (chn), (flag))) \
+ break; \
+ } \
+ else { \
+ if (!fun((strm), (chn), (flag), ##__VA_ARGS__)) \
+ break; \
+ } \
+ UPDATE_ANALYSERS((chn)->analysers, (list), \
+ (back), (flag)); \
+ } \
+ }
+
+#define ANALYZE(strm, chn, fun, list, back, flag, ...) \
+ { \
+ if ((list) & (flag)) { \
+ if (!fun((strm), (chn), (flag), ##__VA_ARGS__)) \
+ break; \
+ UPDATE_ANALYSERS((chn)->analysers, (list), \
+ (back), (flag)); \
+ } \
+ }
+
+/* Processes the client, server, request and response jobs of a stream task,
+ * then puts it back to the wait queue in a clean state, or cleans up its
+ * resources if it must be deleted. Returns in <next> the date the task wants
+ * to be woken up, or TICK_ETERNITY. In order not to call all functions for
+ * nothing too many times, the request and response buffers flags are monitored
+ * and each function is called only if at least another function has changed at
+ * least one flag it is interested in.
+ */
+struct task *process_stream(struct task *t, void *context, unsigned int state)
+{
+ struct server *srv;
+ struct stream *s = context;
+ struct session *sess = s->sess;
+ unsigned int scf_flags, scb_flags;
+ unsigned int rqf_last, rpf_last;
+ unsigned int rq_prod_last, rq_cons_last;
+ unsigned int rp_cons_last, rp_prod_last;
+ unsigned int req_ana_back, res_ana_back;
+ struct channel *req, *res;
+ struct stconn *scf, *scb;
+ unsigned int rate;
+
+ DBG_TRACE_ENTER(STRM_EV_STRM_PROC, s);
+
+ activity[tid].stream_calls++;
+ stream_cond_update_cpu_latency(s);
+
+ req = &s->req;
+ res = &s->res;
+
+ scf = s->scf;
+ scb = s->scb;
+
+ /* First, attempt to receive pending data from I/O layers */
+ sc_conn_sync_recv(scf);
+ sc_conn_sync_recv(scb);
+
+ /* Let's check if we're looping without making any progress, e.g. due
+ * to a bogus analyser or the fact that we're ignoring a read0. The
+ * call_rate counter only counts calls with no progress made.
+ */
+ if (!((req->flags | res->flags) & (CF_READ_EVENT|CF_WRITE_EVENT))) {
+ rate = update_freq_ctr(&s->call_rate, 1);
+ if (rate >= 100000 && s->call_rate.prev_ctr) // make sure to wait at least a full second
+ stream_dump_and_crash(&s->obj_type, read_freq_ctr(&s->call_rate));
+ }
+
+ /* this data may be no longer valid, clear it */
+ if (s->txn)
+ memset(&s->txn->auth, 0, sizeof(s->txn->auth));
+
+ /* This flag must explicitly be set every time */
+ req->flags &= ~CF_WAKE_WRITE;
+ res->flags &= ~CF_WAKE_WRITE;
+
+ /* Keep a copy of req/rep flags so that we can detect shutdowns */
+ rqf_last = req->flags & ~CF_MASK_ANALYSER;
+ rpf_last = res->flags & ~CF_MASK_ANALYSER;
+
+ /* we don't want the stream connector functions to recursively wake us up */
+ scf->flags |= SC_FL_DONT_WAKE;
+ scb->flags |= SC_FL_DONT_WAKE;
+
+ /* Keep a copy of SC flags */
+ scf_flags = scf->flags;
+ scb_flags = scb->flags;
+
+ /* update pending events */
+ s->pending_events |= (state & TASK_WOKEN_ANY);
+
+ /* 1a: Check for low level timeouts if needed. We just set a flag on
+ * stream connectors when their timeouts have expired.
+ */
+ if (unlikely(s->pending_events & TASK_WOKEN_TIMER)) {
+ stream_handle_timeouts(s);
+
+ /* Once in a while we're woken up because the task expires. But
+ * this does not necessarily mean that a timeout has been reached.
+ * So let's not run a whole stream processing if only an expiration
+ * timeout needs to be refreshed.
+ */
+ if (!((scf->flags | scb->flags) & (SC_FL_ERROR|SC_FL_EOS|SC_FL_ABRT_DONE|SC_FL_SHUT_DONE)) &&
+ !((req->flags | res->flags) & (CF_READ_EVENT|CF_READ_TIMEOUT|CF_WRITE_EVENT|CF_WRITE_TIMEOUT)) &&
+ !(s->flags & SF_CONN_EXP) &&
+ ((s->pending_events & TASK_WOKEN_ANY) == TASK_WOKEN_TIMER)) {
+ scf->flags &= ~SC_FL_DONT_WAKE;
+ scb->flags &= ~SC_FL_DONT_WAKE;
+ goto update_exp_and_leave;
+ }
+ }
+
+ resync_stconns:
+ /* below we may emit error messages so we have to ensure that we have
+ * our buffers properly allocated. If the allocation failed, an error is
+ * triggered.
+ *
+ * NOTE: An error is returned because the mechanism to queue entities
+ * waiting for a buffer is totally broken for now. However, this
+ * part must be refactored. When it will be handled, this part
+ * must be be reviewed too.
+ */
+ if (!stream_alloc_work_buffer(s)) {
+ scf->flags |= SC_FL_ERROR;
+ s->conn_err_type = STRM_ET_CONN_RES;
+
+ scb->flags |= SC_FL_ERROR;
+ s->conn_err_type = STRM_ET_CONN_RES;
+
+ if (!(s->flags & SF_ERR_MASK))
+ s->flags |= SF_ERR_RESOURCE;
+ sess_set_term_flags(s);
+ }
+
+ /* 1b: check for low-level errors reported at the stream connector.
+ * First we check if it's a retryable error (in which case we don't
+ * want to tell the buffer). Otherwise we report the error one level
+ * upper by setting flags into the buffers. Note that the side towards
+ * the client cannot have connect (hence retryable) errors. Also, the
+ * connection setup code must be able to deal with any type of abort.
+ */
+ srv = objt_server(s->target);
+ if (unlikely(scf->flags & SC_FL_ERROR)) {
+ if (sc_state_in(scf->state, SC_SB_EST|SC_SB_DIS)) {
+ sc_abort(scf);
+ sc_shutdown(scf);
+ //sc_report_error(scf); TODO: Be sure it is useless
+ if (!(req->analysers) && !(res->analysers)) {
+ _HA_ATOMIC_INC(&s->be->be_counters.cli_aborts);
+ _HA_ATOMIC_INC(&sess->fe->fe_counters.cli_aborts);
+ if (sess->listener && sess->listener->counters)
+ _HA_ATOMIC_INC(&sess->listener->counters->cli_aborts);
+ if (srv)
+ _HA_ATOMIC_INC(&srv->counters.cli_aborts);
+ if (!(s->flags & SF_ERR_MASK))
+ s->flags |= SF_ERR_CLICL;
+ if (!(s->flags & SF_FINST_MASK))
+ s->flags |= SF_FINST_D;
+ }
+ }
+ }
+
+ if (unlikely(scb->flags & SC_FL_ERROR)) {
+ if (sc_state_in(scb->state, SC_SB_EST|SC_SB_DIS)) {
+ sc_abort(scb);
+ sc_shutdown(scb);
+ //sc_report_error(scb); TODO: Be sure it is useless
+ _HA_ATOMIC_INC(&s->be->be_counters.failed_resp);
+ if (srv)
+ _HA_ATOMIC_INC(&srv->counters.failed_resp);
+ if (!(req->analysers) && !(res->analysers)) {
+ _HA_ATOMIC_INC(&s->be->be_counters.srv_aborts);
+ _HA_ATOMIC_INC(&sess->fe->fe_counters.srv_aborts);
+ if (sess->listener && sess->listener->counters)
+ _HA_ATOMIC_INC(&sess->listener->counters->srv_aborts);
+ if (srv)
+ _HA_ATOMIC_INC(&srv->counters.srv_aborts);
+ if (!(s->flags & SF_ERR_MASK))
+ s->flags |= SF_ERR_SRVCL;
+ if (!(s->flags & SF_FINST_MASK))
+ s->flags |= SF_FINST_D;
+ }
+ }
+ /* note: maybe we should process connection errors here ? */
+ }
+
+ if (sc_state_in(scb->state, SC_SB_CON|SC_SB_RDY)) {
+ /* we were trying to establish a connection on the server side,
+ * maybe it succeeded, maybe it failed, maybe we timed out, ...
+ */
+ if (scb->state == SC_ST_RDY)
+ back_handle_st_rdy(s);
+ else if (s->scb->state == SC_ST_CON)
+ back_handle_st_con(s);
+
+ if (scb->state == SC_ST_CER)
+ back_handle_st_cer(s);
+ else if (scb->state == SC_ST_EST)
+ back_establish(s);
+
+ /* state is now one of SC_ST_CON (still in progress), SC_ST_EST
+ * (established), SC_ST_DIS (abort), SC_ST_CLO (last error),
+ * SC_ST_ASS/SC_ST_TAR/SC_ST_REQ for retryable errors.
+ */
+ }
+
+ rq_prod_last = scf->state;
+ rq_cons_last = scb->state;
+ rp_cons_last = scf->state;
+ rp_prod_last = scb->state;
+
+ /* Check for connection closure */
+ DBG_TRACE_POINT(STRM_EV_STRM_PROC, s);
+
+ /* nothing special to be done on client side */
+ if (unlikely(scf->state == SC_ST_DIS)) {
+ scf->state = SC_ST_CLO;
+
+ /* This is needed only when debugging is enabled, to indicate
+ * client-side close.
+ */
+ if (unlikely((global.mode & MODE_DEBUG) &&
+ (!(global.mode & MODE_QUIET) ||
+ (global.mode & MODE_VERBOSE)))) {
+ chunk_printf(&trash, "%08x:%s.clicls[%04x:%04x]\n",
+ s->uniq_id, s->be->id,
+ (unsigned short)conn_fd(sc_conn(scf)),
+ (unsigned short)conn_fd(sc_conn(scb)));
+ DISGUISE(write(1, trash.area, trash.data));
+ }
+ }
+
+ /* When a server-side connection is released, we have to count it and
+ * check for pending connections on this server.
+ */
+ if (unlikely(scb->state == SC_ST_DIS)) {
+ scb->state = SC_ST_CLO;
+ srv = objt_server(s->target);
+ if (srv) {
+ if (s->flags & SF_CURR_SESS) {
+ s->flags &= ~SF_CURR_SESS;
+ _HA_ATOMIC_DEC(&srv->cur_sess);
+ }
+ sess_change_server(s, NULL);
+ if (may_dequeue_tasks(srv, s->be))
+ process_srv_queue(srv);
+ }
+
+ /* This is needed only when debugging is enabled, to indicate
+ * server-side close.
+ */
+ if (unlikely((global.mode & MODE_DEBUG) &&
+ (!(global.mode & MODE_QUIET) ||
+ (global.mode & MODE_VERBOSE)))) {
+ if (s->prev_conn_state == SC_ST_EST) {
+ chunk_printf(&trash, "%08x:%s.srvcls[%04x:%04x]\n",
+ s->uniq_id, s->be->id,
+ (unsigned short)conn_fd(sc_conn(scf)),
+ (unsigned short)conn_fd(sc_conn(scb)));
+ DISGUISE(write(1, trash.area, trash.data));
+ }
+ }
+ }
+
+ /*
+ * Note: of the transient states (REQ, CER, DIS), only REQ may remain
+ * at this point.
+ */
+
+ resync_request:
+ /* Analyse request */
+ if (((req->flags & ~rqf_last) & CF_MASK_ANALYSER) ||
+ ((scf->flags ^ scf_flags) & (SC_FL_EOS|SC_FL_ABRT_DONE|SC_FL_ABRT_WANTED)) ||
+ ((scb->flags ^ scb_flags) & (SC_FL_SHUT_DONE|SC_FL_SHUT_WANTED)) ||
+ (req->analysers && (scb->flags & SC_FL_SHUT_DONE)) ||
+ scf->state != rq_prod_last ||
+ scb->state != rq_cons_last ||
+ s->pending_events & TASK_WOKEN_MSG) {
+ unsigned int scf_flags_ana = scf->flags;
+ unsigned int scb_flags_ana = scb->flags;
+
+ if (sc_state_in(scf->state, SC_SB_EST|SC_SB_DIS|SC_SB_CLO)) {
+ int max_loops = global.tune.maxpollevents;
+ unsigned int ana_list;
+ unsigned int ana_back;
+
+ /* it's up to the analysers to stop new connections,
+ * disable reading or closing. Note: if an analyser
+ * disables any of these bits, it is responsible for
+ * enabling them again when it disables itself, so
+ * that other analysers are called in similar conditions.
+ */
+ channel_auto_read(req);
+ channel_auto_connect(req);
+ channel_auto_close(req);
+
+ /* We will call all analysers for which a bit is set in
+ * req->analysers, following the bit order from LSB
+ * to MSB. The analysers must remove themselves from
+ * the list when not needed. Any analyser may return 0
+ * to break out of the loop, either because of missing
+ * data to take a decision, or because it decides to
+ * kill the stream. We loop at least once through each
+ * analyser, and we may loop again if other analysers
+ * are added in the middle.
+ *
+ * We build a list of analysers to run. We evaluate all
+ * of these analysers in the order of the lower bit to
+ * the higher bit. This ordering is very important.
+ * An analyser will often add/remove other analysers,
+ * including itself. Any changes to itself have no effect
+ * on the loop. If it removes any other analysers, we
+ * want those analysers not to be called anymore during
+ * this loop. If it adds an analyser that is located
+ * after itself, we want it to be scheduled for being
+ * processed during the loop. If it adds an analyser
+ * which is located before it, we want it to switch to
+ * it immediately, even if it has already been called
+ * once but removed since.
+ *
+ * In order to achieve this, we compare the analyser
+ * list after the call with a copy of it before the
+ * call. The work list is fed with analyser bits that
+ * appeared during the call. Then we compare previous
+ * work list with the new one, and check the bits that
+ * appeared. If the lowest of these bits is lower than
+ * the current bit, it means we have enabled a previous
+ * analyser and must immediately loop again.
+ */
+
+ ana_list = ana_back = req->analysers;
+ while (ana_list && max_loops--) {
+ /* Warning! ensure that analysers are always placed in ascending order! */
+ ANALYZE (s, req, flt_start_analyze, ana_list, ana_back, AN_REQ_FLT_START_FE);
+ FLT_ANALYZE(s, req, tcp_inspect_request, ana_list, ana_back, AN_REQ_INSPECT_FE);
+ FLT_ANALYZE(s, req, http_wait_for_request, ana_list, ana_back, AN_REQ_WAIT_HTTP);
+ FLT_ANALYZE(s, req, http_wait_for_request_body, ana_list, ana_back, AN_REQ_HTTP_BODY);
+ FLT_ANALYZE(s, req, http_process_req_common, ana_list, ana_back, AN_REQ_HTTP_PROCESS_FE, sess->fe);
+ FLT_ANALYZE(s, req, process_switching_rules, ana_list, ana_back, AN_REQ_SWITCHING_RULES);
+ ANALYZE (s, req, flt_start_analyze, ana_list, ana_back, AN_REQ_FLT_START_BE);
+ FLT_ANALYZE(s, req, tcp_inspect_request, ana_list, ana_back, AN_REQ_INSPECT_BE);
+ FLT_ANALYZE(s, req, http_process_req_common, ana_list, ana_back, AN_REQ_HTTP_PROCESS_BE, s->be);
+ FLT_ANALYZE(s, req, http_process_tarpit, ana_list, ana_back, AN_REQ_HTTP_TARPIT);
+ FLT_ANALYZE(s, req, process_server_rules, ana_list, ana_back, AN_REQ_SRV_RULES);
+ FLT_ANALYZE(s, req, http_process_request, ana_list, ana_back, AN_REQ_HTTP_INNER);
+ FLT_ANALYZE(s, req, tcp_persist_rdp_cookie, ana_list, ana_back, AN_REQ_PRST_RDP_COOKIE);
+ FLT_ANALYZE(s, req, process_sticking_rules, ana_list, ana_back, AN_REQ_STICKING_RULES);
+ ANALYZE (s, req, flt_analyze_http_headers, ana_list, ana_back, AN_REQ_FLT_HTTP_HDRS);
+ ANALYZE (s, req, http_request_forward_body, ana_list, ana_back, AN_REQ_HTTP_XFER_BODY);
+ ANALYZE (s, req, pcli_wait_for_request, ana_list, ana_back, AN_REQ_WAIT_CLI);
+ ANALYZE (s, req, flt_xfer_data, ana_list, ana_back, AN_REQ_FLT_XFER_DATA);
+ ANALYZE (s, req, flt_end_analyze, ana_list, ana_back, AN_REQ_FLT_END);
+ break;
+ }
+ }
+
+ rq_prod_last = scf->state;
+ rq_cons_last = scb->state;
+ req->flags &= ~CF_WAKE_ONCE;
+ rqf_last = req->flags;
+ scf_flags = (scf_flags & ~(SC_FL_EOS|SC_FL_ABRT_DONE|SC_FL_ABRT_WANTED)) | (scf->flags & (SC_FL_EOS|SC_FL_ABRT_DONE|SC_FL_ABRT_WANTED));
+ scb_flags = (scb_flags & ~(SC_FL_SHUT_DONE|SC_FL_SHUT_WANTED)) | (scb->flags & (SC_FL_SHUT_DONE|SC_FL_SHUT_WANTED));
+
+ if (((scf->flags ^ scf_flags_ana) & (SC_FL_EOS|SC_FL_ABRT_DONE)) || ((scb->flags ^ scb_flags_ana) & SC_FL_SHUT_DONE))
+ goto resync_request;
+ }
+
+ /* we'll monitor the request analysers while parsing the response,
+ * because some response analysers may indirectly enable new request
+ * analysers (eg: HTTP keep-alive).
+ */
+ req_ana_back = req->analysers;
+
+ resync_response:
+ /* Analyse response */
+
+ if (((res->flags & ~rpf_last) & CF_MASK_ANALYSER) ||
+ ((scb->flags ^ scb_flags) & (SC_FL_EOS|SC_FL_ABRT_DONE|SC_FL_ABRT_WANTED)) ||
+ ((scf->flags ^ scf_flags) & (SC_FL_SHUT_DONE|SC_FL_SHUT_WANTED)) ||
+ (res->analysers && (scf->flags & SC_FL_SHUT_DONE)) ||
+ scf->state != rp_cons_last ||
+ scb->state != rp_prod_last ||
+ s->pending_events & TASK_WOKEN_MSG) {
+ unsigned int scb_flags_ana = scb->flags;
+ unsigned int scf_flags_ana = scf->flags;
+
+ if (sc_state_in(scb->state, SC_SB_EST|SC_SB_DIS|SC_SB_CLO)) {
+ int max_loops = global.tune.maxpollevents;
+ unsigned int ana_list;
+ unsigned int ana_back;
+
+ /* it's up to the analysers to stop disable reading or
+ * closing. Note: if an analyser disables any of these
+ * bits, it is responsible for enabling them again when
+ * it disables itself, so that other analysers are called
+ * in similar conditions.
+ */
+ channel_auto_read(res);
+ channel_auto_close(res);
+
+ /* We will call all analysers for which a bit is set in
+ * res->analysers, following the bit order from LSB
+ * to MSB. The analysers must remove themselves from
+ * the list when not needed. Any analyser may return 0
+ * to break out of the loop, either because of missing
+ * data to take a decision, or because it decides to
+ * kill the stream. We loop at least once through each
+ * analyser, and we may loop again if other analysers
+ * are added in the middle.
+ */
+
+ ana_list = ana_back = res->analysers;
+ while (ana_list && max_loops--) {
+ /* Warning! ensure that analysers are always placed in ascending order! */
+ ANALYZE (s, res, flt_start_analyze, ana_list, ana_back, AN_RES_FLT_START_FE);
+ ANALYZE (s, res, flt_start_analyze, ana_list, ana_back, AN_RES_FLT_START_BE);
+ FLT_ANALYZE(s, res, tcp_inspect_response, ana_list, ana_back, AN_RES_INSPECT);
+ FLT_ANALYZE(s, res, http_wait_for_response, ana_list, ana_back, AN_RES_WAIT_HTTP);
+ FLT_ANALYZE(s, res, process_store_rules, ana_list, ana_back, AN_RES_STORE_RULES);
+ FLT_ANALYZE(s, res, http_process_res_common, ana_list, ana_back, AN_RES_HTTP_PROCESS_BE, s->be);
+ ANALYZE (s, res, flt_analyze_http_headers, ana_list, ana_back, AN_RES_FLT_HTTP_HDRS);
+ ANALYZE (s, res, http_response_forward_body, ana_list, ana_back, AN_RES_HTTP_XFER_BODY);
+ ANALYZE (s, res, pcli_wait_for_response, ana_list, ana_back, AN_RES_WAIT_CLI);
+ ANALYZE (s, res, flt_xfer_data, ana_list, ana_back, AN_RES_FLT_XFER_DATA);
+ ANALYZE (s, res, flt_end_analyze, ana_list, ana_back, AN_RES_FLT_END);
+ break;
+ }
+ }
+
+ rp_cons_last = scf->state;
+ rp_prod_last = scb->state;
+ res->flags &= ~CF_WAKE_ONCE;
+ rpf_last = res->flags;
+ scb_flags = (scb_flags & ~(SC_FL_EOS|SC_FL_ABRT_DONE|SC_FL_ABRT_WANTED)) | (scb->flags & (SC_FL_EOS|SC_FL_ABRT_DONE|SC_FL_ABRT_WANTED));
+ scf_flags = (scf_flags & ~(SC_FL_SHUT_DONE|SC_FL_SHUT_WANTED)) | (scf->flags & (SC_FL_SHUT_DONE|SC_FL_SHUT_WANTED));
+
+ if (((scb->flags ^ scb_flags_ana) & (SC_FL_EOS|SC_FL_ABRT_DONE)) || ((scf->flags ^ scf_flags_ana) & SC_FL_SHUT_DONE))
+ goto resync_response;
+ }
+
+ /* we'll monitor the response analysers because some response analysers
+ * may be enabled/disabled later
+ */
+ res_ana_back = res->analysers;
+
+ /* maybe someone has added some request analysers, so we must check and loop */
+ if (req->analysers & ~req_ana_back)
+ goto resync_request;
+
+ if ((req->flags & ~rqf_last) & CF_MASK_ANALYSER)
+ goto resync_request;
+
+ /* FIXME: here we should call protocol handlers which rely on
+ * both buffers.
+ */
+
+
+ /*
+ * Now we propagate unhandled errors to the stream. Normally
+ * we're just in a data phase here since it means we have not
+ * seen any analyser who could set an error status.
+ */
+ srv = objt_server(s->target);
+ if (unlikely(!(s->flags & SF_ERR_MASK))) {
+ if ((scf->flags & SC_FL_ERROR) || req->flags & (CF_READ_TIMEOUT|CF_WRITE_TIMEOUT)) {
+ /* Report it if the client got an error or a read timeout expired */
+ req->analysers &= AN_REQ_FLT_END;
+ channel_auto_close(req);
+ if (scf->flags & SC_FL_ERROR) {
+ _HA_ATOMIC_INC(&s->be->be_counters.cli_aborts);
+ _HA_ATOMIC_INC(&sess->fe->fe_counters.cli_aborts);
+ if (sess->listener && sess->listener->counters)
+ _HA_ATOMIC_INC(&sess->listener->counters->cli_aborts);
+ if (srv)
+ _HA_ATOMIC_INC(&srv->counters.cli_aborts);
+ s->flags |= SF_ERR_CLICL;
+ }
+ else if (req->flags & CF_READ_TIMEOUT) {
+ _HA_ATOMIC_INC(&s->be->be_counters.cli_aborts);
+ _HA_ATOMIC_INC(&sess->fe->fe_counters.cli_aborts);
+ if (sess->listener && sess->listener->counters)
+ _HA_ATOMIC_INC(&sess->listener->counters->cli_aborts);
+ if (srv)
+ _HA_ATOMIC_INC(&srv->counters.cli_aborts);
+ s->flags |= SF_ERR_CLITO;
+ }
+ else {
+ _HA_ATOMIC_INC(&s->be->be_counters.srv_aborts);
+ _HA_ATOMIC_INC(&sess->fe->fe_counters.srv_aborts);
+ if (sess->listener && sess->listener->counters)
+ _HA_ATOMIC_INC(&sess->listener->counters->srv_aborts);
+ if (srv)
+ _HA_ATOMIC_INC(&srv->counters.srv_aborts);
+ s->flags |= SF_ERR_SRVTO;
+ }
+ sess_set_term_flags(s);
+
+ /* Abort the request if a client error occurred while
+ * the backend stream connector is in the SC_ST_INI
+ * state. It is switched into the SC_ST_CLO state and
+ * the request channel is erased. */
+ if (scb->state == SC_ST_INI) {
+ s->scb->state = SC_ST_CLO;
+ channel_abort(req);
+ if (IS_HTX_STRM(s))
+ channel_htx_erase(req, htxbuf(&req->buf));
+ else
+ channel_erase(req);
+ }
+ }
+ else if ((scb->flags & SC_FL_ERROR) || res->flags & (CF_READ_TIMEOUT|CF_WRITE_TIMEOUT)) {
+ /* Report it if the server got an error or a read timeout expired */
+ res->analysers &= AN_RES_FLT_END;
+ channel_auto_close(res);
+ if (scb->flags & SC_FL_ERROR) {
+ _HA_ATOMIC_INC(&s->be->be_counters.srv_aborts);
+ _HA_ATOMIC_INC(&sess->fe->fe_counters.srv_aborts);
+ if (sess->listener && sess->listener->counters)
+ _HA_ATOMIC_INC(&sess->listener->counters->srv_aborts);
+ if (srv)
+ _HA_ATOMIC_INC(&srv->counters.srv_aborts);
+ s->flags |= SF_ERR_SRVCL;
+ }
+ else if (res->flags & CF_READ_TIMEOUT) {
+ _HA_ATOMIC_INC(&s->be->be_counters.srv_aborts);
+ _HA_ATOMIC_INC(&sess->fe->fe_counters.srv_aborts);
+ if (sess->listener && sess->listener->counters)
+ _HA_ATOMIC_INC(&sess->listener->counters->srv_aborts);
+ if (srv)
+ _HA_ATOMIC_INC(&srv->counters.srv_aborts);
+ s->flags |= SF_ERR_SRVTO;
+ }
+ else {
+ _HA_ATOMIC_INC(&s->be->be_counters.cli_aborts);
+ _HA_ATOMIC_INC(&sess->fe->fe_counters.cli_aborts);
+ if (sess->listener && sess->listener->counters)
+ _HA_ATOMIC_INC(&sess->listener->counters->cli_aborts);
+ if (srv)
+ _HA_ATOMIC_INC(&srv->counters.cli_aborts);
+ s->flags |= SF_ERR_CLITO;
+ }
+ sess_set_term_flags(s);
+ }
+ }
+
+ /*
+ * Here we take care of forwarding unhandled data. This also includes
+ * connection establishments and shutdown requests.
+ */
+
+
+ /* If no one is interested in analysing data, it's time to forward
+ * everything. We configure the buffer to forward indefinitely.
+ * Note that we're checking SC_FL_ABRT_WANTED as an indication of a possible
+ * recent call to channel_abort().
+ */
+ if (unlikely((!req->analysers || (req->analysers == AN_REQ_FLT_END && !(req->flags & CF_FLT_ANALYZE))) &&
+ !(scf->flags & SC_FL_ABRT_WANTED) && !(scb->flags & SC_FL_SHUT_DONE) &&
+ (sc_state_in(scf->state, SC_SB_EST|SC_SB_DIS|SC_SB_CLO)) &&
+ (req->to_forward != CHN_INFINITE_FORWARD))) {
+ /* This buffer is freewheeling, there's no analyser
+ * attached to it. If any data are left in, we'll permit them to
+ * move.
+ */
+ channel_auto_read(req);
+ channel_auto_connect(req);
+ channel_auto_close(req);
+
+ if (IS_HTX_STRM(s)) {
+ struct htx *htx = htxbuf(&req->buf);
+
+ /* We'll let data flow between the producer (if still connected)
+ * to the consumer.
+ */
+ co_set_data(req, htx->data);
+ if ((global.tune.options & GTUNE_USE_FAST_FWD) &&
+ !(scf->flags & (SC_FL_EOS|SC_FL_ABRT_DONE)) && !(scb->flags & SC_FL_SHUT_WANTED))
+ channel_htx_forward_forever(req, htx);
+ }
+ else {
+ /* We'll let data flow between the producer (if still connected)
+ * to the consumer (which might possibly not be connected yet).
+ */
+ c_adv(req, ci_data(req));
+ if ((global.tune.options & GTUNE_USE_FAST_FWD) &&
+ !(scf->flags & (SC_FL_EOS|SC_FL_ABRT_DONE)) && !(scb->flags & SC_FL_SHUT_WANTED))
+ channel_forward_forever(req);
+ }
+ }
+
+ /* reflect what the L7 analysers have seen last */
+ rqf_last = req->flags;
+ scf_flags = (scf_flags & ~(SC_FL_EOS|SC_FL_ABRT_DONE|SC_FL_ABRT_WANTED)) | (scf->flags & (SC_FL_EOS|SC_FL_ABRT_DONE|SC_FL_ABRT_WANTED));
+ scb_flags = (scb_flags & ~(SC_FL_SHUT_DONE|SC_FL_SHUT_WANTED)) | (scb->flags & (SC_FL_SHUT_DONE|SC_FL_SHUT_WANTED));
+
+ /* it's possible that an upper layer has requested a connection setup or abort.
+ * There are 2 situations where we decide to establish a new connection :
+ * - there are data scheduled for emission in the buffer
+ * - the CF_AUTO_CONNECT flag is set (active connection)
+ */
+ if (scb->state == SC_ST_INI) {
+ if (!(scb->flags & SC_FL_SHUT_DONE)) {
+ if ((req->flags & CF_AUTO_CONNECT) || co_data(req)) {
+ /* If we have an appctx, there is no connect method, so we
+ * immediately switch to the connected state, otherwise we
+ * perform a connection request.
+ */
+ scb->state = SC_ST_REQ; /* new connection requested */
+ s->conn_retries = 0;
+ if ((s->be->retry_type &~ PR_RE_CONN_FAILED) &&
+ (s->be->mode == PR_MODE_HTTP) &&
+ !(s->txn->flags & TX_D_L7_RETRY))
+ s->txn->flags |= TX_L7_RETRY;
+
+ if (s->be->options & PR_O_ABRT_CLOSE) {
+ struct connection *conn = sc_conn(scf);
+
+ if (conn && conn->mux && conn->mux->ctl)
+ conn->mux->ctl(conn, MUX_CTL_SUBS_RECV, NULL);
+ }
+ }
+ }
+ else {
+ s->scb->state = SC_ST_CLO; /* shutw+ini = abort */
+ sc_schedule_shutdown(scb);
+ sc_schedule_abort(scb);
+ }
+ }
+
+
+ /* we may have a pending connection request, or a connection waiting
+ * for completion.
+ */
+ if (sc_state_in(scb->state, SC_SB_REQ|SC_SB_QUE|SC_SB_TAR|SC_SB_ASS)) {
+ /* prune the request variables and swap to the response variables. */
+ if (s->vars_reqres.scope != SCOPE_RES) {
+ if (!LIST_ISEMPTY(&s->vars_reqres.head))
+ vars_prune(&s->vars_reqres, s->sess, s);
+ vars_init_head(&s->vars_reqres, SCOPE_RES);
+ }
+
+ do {
+ /* nb: step 1 might switch from QUE to ASS, but we first want
+ * to give a chance to step 2 to perform a redirect if needed.
+ */
+ if (scb->state != SC_ST_REQ)
+ back_try_conn_req(s);
+ if (scb->state == SC_ST_REQ)
+ back_handle_st_req(s);
+
+ /* get a chance to complete an immediate connection setup */
+ if (scb->state == SC_ST_RDY)
+ goto resync_stconns;
+
+ /* applets directly go to the ESTABLISHED state. Similarly,
+ * servers experience the same fate when their connection
+ * is reused.
+ */
+ if (unlikely(scb->state == SC_ST_EST))
+ back_establish(s);
+
+ srv = objt_server(s->target);
+ if (scb->state == SC_ST_ASS && srv && srv->rdr_len && (s->flags & SF_REDIRECTABLE))
+ http_perform_server_redirect(s, scb);
+ } while (scb->state == SC_ST_ASS);
+ }
+
+ /* Let's see if we can send the pending request now */
+ sc_conn_sync_send(scb);
+
+ /*
+ * Now forward all shutdown requests between both sides of the request buffer
+ */
+
+ /* first, let's check if the request buffer needs to shutdown(write), which may
+ * happen either because the input is closed or because we want to force a close
+ * once the server has begun to respond. If a half-closed timeout is set, we adjust
+ * the other side's timeout as well. However this doesn't have effect during the
+ * connection setup unless the backend has abortonclose set.
+ */
+ if (unlikely((req->flags & CF_AUTO_CLOSE) && (scf->flags & (SC_FL_EOS|SC_FL_ABRT_DONE)) &&
+ !(scb->flags & (SC_FL_SHUT_DONE|SC_FL_SHUT_WANTED)) &&
+ (scb->state != SC_ST_CON || (s->be->options & PR_O_ABRT_CLOSE)))) {
+ sc_schedule_shutdown(scb);
+ }
+
+ /* shutdown(write) pending */
+ if (unlikely((scb->flags & (SC_FL_SHUT_DONE|SC_FL_SHUT_WANTED)) == SC_FL_SHUT_WANTED &&
+ (!co_data(req) || (req->flags & CF_WRITE_TIMEOUT)))) {
+ if (scf->flags & SC_FL_ERROR)
+ scb->flags |= SC_FL_NOLINGER;
+ sc_shutdown(scb);
+ }
+
+ /* shutdown(write) done on server side, we must stop the client too */
+ if (unlikely((scb->flags & SC_FL_SHUT_DONE) && !(scf->flags & (SC_FL_EOS|SC_FL_ABRT_DONE|SC_FL_ABRT_WANTED))) &&
+ !req->analysers)
+ sc_schedule_abort(scf);
+
+ /* shutdown(read) pending */
+ if (unlikely((scf->flags & (SC_FL_EOS|SC_FL_ABRT_DONE|SC_FL_ABRT_WANTED)) == SC_FL_ABRT_WANTED)) {
+ if (scf->flags & SC_FL_NOHALF)
+ scf->flags |= SC_FL_NOLINGER;
+ sc_abort(scf);
+ }
+
+ /* Benchmarks have shown that it's optimal to do a full resync now */
+ if (scf->state == SC_ST_DIS ||
+ sc_state_in(scb->state, SC_SB_RDY|SC_SB_DIS) ||
+ ((scf->flags & SC_FL_ERROR) && scf->state != SC_ST_CLO) ||
+ ((scb->flags & SC_FL_ERROR) && scb->state != SC_ST_CLO))
+ goto resync_stconns;
+
+ /* otherwise we want to check if we need to resync the req buffer or not */
+ if (((scf->flags ^ scf_flags) & (SC_FL_EOS|SC_FL_ABRT_DONE)) || ((scb->flags ^ scb_flags) & SC_FL_SHUT_DONE))
+ goto resync_request;
+
+ /* perform output updates to the response buffer */
+
+ /* If no one is interested in analysing data, it's time to forward
+ * everything. We configure the buffer to forward indefinitely.
+ * Note that we're checking SC_FL_ABRT_WANTED as an indication of a possible
+ * recent call to channel_abort().
+ */
+ if (unlikely((!res->analysers || (res->analysers == AN_RES_FLT_END && !(res->flags & CF_FLT_ANALYZE))) &&
+ !(scf->flags & SC_FL_ABRT_WANTED) && !(scb->flags & SC_FL_SHUT_WANTED) &&
+ sc_state_in(scb->state, SC_SB_EST|SC_SB_DIS|SC_SB_CLO) &&
+ (res->to_forward != CHN_INFINITE_FORWARD))) {
+ /* This buffer is freewheeling, there's no analyser
+ * attached to it. If any data are left in, we'll permit them to
+ * move.
+ */
+ channel_auto_read(res);
+ channel_auto_close(res);
+
+ if (IS_HTX_STRM(s)) {
+ struct htx *htx = htxbuf(&res->buf);
+
+ /* We'll let data flow between the producer (if still connected)
+ * to the consumer.
+ */
+ co_set_data(res, htx->data);
+ if ((global.tune.options & GTUNE_USE_FAST_FWD) &&
+ !(scf->flags & (SC_FL_EOS|SC_FL_ABRT_DONE)) && !(scb->flags & SC_FL_SHUT_WANTED))
+ channel_htx_forward_forever(res, htx);
+ }
+ else {
+ /* We'll let data flow between the producer (if still connected)
+ * to the consumer.
+ */
+ c_adv(res, ci_data(res));
+ if ((global.tune.options & GTUNE_USE_FAST_FWD) &&
+ !(scf->flags & (SC_FL_EOS|SC_FL_ABRT_DONE)) && !(scb->flags & SC_FL_SHUT_WANTED))
+ channel_forward_forever(res);
+ }
+
+ /* if we have no analyser anymore in any direction and have a
+ * tunnel timeout set, use it now. Note that we must respect
+ * the half-closed timeouts as well.
+ */
+ if (!req->analysers && s->tunnel_timeout) {
+ scf->ioto = scb->ioto = s->tunnel_timeout;
+
+ if (!IS_HTX_STRM(s)) {
+ if ((scf->flags & (SC_FL_EOS|SC_FL_ABRT_DONE|SC_FL_SHUT_DONE)) && tick_isset(sess->fe->timeout.clientfin))
+ scf->ioto = sess->fe->timeout.clientfin;
+ if ((scb->flags & (SC_FL_EOS|SC_FL_ABRT_DONE|SC_FL_SHUT_DONE)) && tick_isset(s->be->timeout.serverfin))
+ scb->ioto = s->be->timeout.serverfin;
+ }
+ }
+ }
+
+ /* reflect what the L7 analysers have seen last */
+ rpf_last = res->flags;
+ scb_flags = (scb_flags & ~(SC_FL_EOS|SC_FL_ABRT_DONE|SC_FL_ABRT_WANTED)) | (scb->flags & (SC_FL_EOS|SC_FL_ABRT_DONE|SC_FL_ABRT_WANTED));
+ scf_flags = (scf_flags & ~(SC_FL_SHUT_DONE|SC_FL_SHUT_WANTED)) | (scf->flags & (SC_FL_SHUT_DONE|SC_FL_SHUT_WANTED));
+
+ /* Let's see if we can send the pending response now */
+ sc_conn_sync_send(scf);
+
+ /*
+ * Now forward all shutdown requests between both sides of the buffer
+ */
+
+ /*
+ * FIXME: this is probably where we should produce error responses.
+ */
+
+ /* first, let's check if the response buffer needs to shutdown(write) */
+ if (unlikely((res->flags & CF_AUTO_CLOSE) && (scb->flags & (SC_FL_EOS|SC_FL_ABRT_DONE)) &&
+ !(scf->flags & (SC_FL_SHUT_DONE|SC_FL_SHUT_WANTED)))) {
+ sc_schedule_shutdown(scf);
+ }
+
+ /* shutdown(write) pending */
+ if (unlikely((scf->flags & (SC_FL_SHUT_DONE|SC_FL_SHUT_WANTED)) == SC_FL_SHUT_WANTED &&
+ (!co_data(res) || (res->flags & CF_WRITE_TIMEOUT)))) {
+ sc_shutdown(scf);
+ }
+
+ /* shutdown(write) done on the client side, we must stop the server too */
+ if (unlikely((scf->flags & SC_FL_SHUT_DONE) && !(scb->flags & (SC_FL_EOS|SC_FL_ABRT_DONE|SC_FL_ABRT_WANTED))) &&
+ !res->analysers)
+ sc_schedule_abort(scb);
+
+ /* shutdown(read) pending */
+ if (unlikely((scb->flags & (SC_FL_EOS|SC_FL_ABRT_DONE|SC_FL_ABRT_WANTED)) == SC_FL_ABRT_WANTED)) {
+ if (scb->flags & SC_FL_NOHALF)
+ scb->flags |= SC_FL_NOLINGER;
+ sc_abort(scb);
+ }
+
+ if (scf->state == SC_ST_DIS ||
+ sc_state_in(scb->state, SC_SB_RDY|SC_SB_DIS) ||
+ ((scf->flags & SC_FL_ERROR) && scf->state != SC_ST_CLO) ||
+ ((scb->flags & SC_FL_ERROR) && scb->state != SC_ST_CLO))
+ goto resync_stconns;
+
+ if ((req->flags & ~rqf_last) & CF_MASK_ANALYSER)
+ goto resync_request;
+
+ if (((scb->flags ^ scb_flags) & (SC_FL_EOS|SC_FL_ABRT_DONE|SC_FL_ABRT_WANTED)) ||
+ ((scf->flags ^ scf_flags) & (SC_FL_SHUT_DONE|SC_FL_SHUT_WANTED)) ||
+ (res->analysers ^ res_ana_back))
+ goto resync_response;
+
+ if ((((req->flags ^ rqf_last) | (res->flags ^ rpf_last)) & CF_MASK_ANALYSER) ||
+ (req->analysers ^ req_ana_back))
+ goto resync_request;
+
+ /* we're interested in getting wakeups again */
+ scf->flags &= ~SC_FL_DONT_WAKE;
+ scb->flags &= ~SC_FL_DONT_WAKE;
+
+ if (likely((scf->state != SC_ST_CLO) || !sc_state_in(scb->state, SC_SB_INI|SC_SB_CLO) ||
+ (req->analysers & AN_REQ_FLT_END) || (res->analysers & AN_RES_FLT_END))) {
+ if ((sess->fe->options & PR_O_CONTSTATS) && (s->flags & SF_BE_ASSIGNED) && !(s->flags & SF_IGNORE))
+ stream_process_counters(s);
+
+ stream_update_both_sc(s);
+
+ /* Reset pending events now */
+ s->pending_events = 0;
+
+ update_exp_and_leave:
+ /* Note: please ensure that if you branch here you disable SC_FL_DONT_WAKE */
+ if (!req->analysers)
+ req->analyse_exp = TICK_ETERNITY;
+ if (!res->analysers)
+ res->analyse_exp = TICK_ETERNITY;
+
+ if ((sess->fe->options & PR_O_CONTSTATS) && (s->flags & SF_BE_ASSIGNED) &&
+ (!tick_isset(req->analyse_exp) || tick_is_expired(req->analyse_exp, now_ms)))
+ req->analyse_exp = tick_add(now_ms, 5000);
+
+ t->expire = (tick_is_expired(t->expire, now_ms) ? 0 : t->expire);
+ t->expire = tick_first(t->expire, sc_ep_rcv_ex(scf));
+ t->expire = tick_first(t->expire, sc_ep_snd_ex(scf));
+ t->expire = tick_first(t->expire, sc_ep_rcv_ex(scb));
+ t->expire = tick_first(t->expire, sc_ep_snd_ex(scb));
+ t->expire = tick_first(t->expire, req->analyse_exp);
+ t->expire = tick_first(t->expire, res->analyse_exp);
+ t->expire = tick_first(t->expire, s->conn_exp);
+
+ if (unlikely(tick_is_expired(t->expire, now_ms))) {
+ /* Some events prevented the timeouts to be handled but nothing evolved.
+ So do it now and resyunc the stconns
+ */
+ stream_handle_timeouts(s);
+ goto resync_stconns;
+ }
+
+ s->pending_events &= ~(TASK_WOKEN_TIMER | TASK_WOKEN_RES);
+ stream_release_buffers(s);
+
+ DBG_TRACE_DEVEL("queuing", STRM_EV_STRM_PROC, s);
+ return t; /* nothing more to do */
+ }
+
+ DBG_TRACE_DEVEL("releasing", STRM_EV_STRM_PROC, s);
+
+ if (s->flags & SF_BE_ASSIGNED)
+ _HA_ATOMIC_DEC(&s->be->beconn);
+
+ if (unlikely((global.mode & MODE_DEBUG) &&
+ (!(global.mode & MODE_QUIET) || (global.mode & MODE_VERBOSE)))) {
+ chunk_printf(&trash, "%08x:%s.closed[%04x:%04x]\n",
+ s->uniq_id, s->be->id,
+ (unsigned short)conn_fd(sc_conn(scf)),
+ (unsigned short)conn_fd(sc_conn(scb)));
+ DISGUISE(write(1, trash.area, trash.data));
+ }
+
+ if (!(s->flags & SF_IGNORE)) {
+ s->logs.t_close = ns_to_ms(now_ns - s->logs.accept_ts);
+
+ stream_process_counters(s);
+
+ if (s->txn && s->txn->status) {
+ int n;
+
+ n = s->txn->status / 100;
+ if (n < 1 || n > 5)
+ n = 0;
+
+ if (sess->fe->mode == PR_MODE_HTTP) {
+ _HA_ATOMIC_INC(&sess->fe->fe_counters.p.http.rsp[n]);
+ }
+ if ((s->flags & SF_BE_ASSIGNED) &&
+ (s->be->mode == PR_MODE_HTTP)) {
+ _HA_ATOMIC_INC(&s->be->be_counters.p.http.rsp[n]);
+ _HA_ATOMIC_INC(&s->be->be_counters.p.http.cum_req);
+ }
+ }
+
+ /* let's do a final log if we need it */
+ if (!LIST_ISEMPTY(&sess->fe->logformat) && s->logs.logwait &&
+ !(s->flags & SF_MONITOR) &&
+ (!(sess->fe->options & PR_O_NULLNOLOG) || req->total)) {
+ /* we may need to know the position in the queue */
+ pendconn_free(s);
+
+ stream_cond_update_cpu_usage(s);
+ s->do_log(s);
+ }
+
+ /* update time stats for this stream */
+ stream_update_time_stats(s);
+ }
+
+ /* the task MUST not be in the run queue anymore */
+ stream_free(s);
+ task_destroy(t);
+ return NULL;
+}
+
+/* Update the stream's backend and server time stats */
+void stream_update_time_stats(struct stream *s)
+{
+ int t_request;
+ int t_queue;
+ int t_connect;
+ int t_data;
+ int t_close;
+ struct server *srv;
+ unsigned int samples_window;
+
+ t_request = 0;
+ t_queue = s->logs.t_queue;
+ t_connect = s->logs.t_connect;
+ t_close = s->logs.t_close;
+ t_data = s->logs.t_data;
+
+ if (s->be->mode != PR_MODE_HTTP)
+ t_data = t_connect;
+
+ if (t_connect < 0 || t_data < 0)
+ return;
+
+ if ((llong)(s->logs.request_ts - s->logs.accept_ts) >= 0)
+ t_request = ns_to_ms(s->logs.request_ts - s->logs.accept_ts);
+
+ t_data -= t_connect;
+ t_connect -= t_queue;
+ t_queue -= t_request;
+
+ srv = objt_server(s->target);
+ if (srv) {
+ samples_window = (((s->be->mode == PR_MODE_HTTP) ?
+ srv->counters.p.http.cum_req : srv->counters.cum_lbconn) > TIME_STATS_SAMPLES) ? TIME_STATS_SAMPLES : 0;
+ swrate_add_dynamic(&srv->counters.q_time, samples_window, t_queue);
+ swrate_add_dynamic(&srv->counters.c_time, samples_window, t_connect);
+ swrate_add_dynamic(&srv->counters.d_time, samples_window, t_data);
+ swrate_add_dynamic(&srv->counters.t_time, samples_window, t_close);
+ HA_ATOMIC_UPDATE_MAX(&srv->counters.qtime_max, t_queue);
+ HA_ATOMIC_UPDATE_MAX(&srv->counters.ctime_max, t_connect);
+ HA_ATOMIC_UPDATE_MAX(&srv->counters.dtime_max, t_data);
+ HA_ATOMIC_UPDATE_MAX(&srv->counters.ttime_max, t_close);
+ }
+ samples_window = (((s->be->mode == PR_MODE_HTTP) ?
+ s->be->be_counters.p.http.cum_req : s->be->be_counters.cum_lbconn) > TIME_STATS_SAMPLES) ? TIME_STATS_SAMPLES : 0;
+ swrate_add_dynamic(&s->be->be_counters.q_time, samples_window, t_queue);
+ swrate_add_dynamic(&s->be->be_counters.c_time, samples_window, t_connect);
+ swrate_add_dynamic(&s->be->be_counters.d_time, samples_window, t_data);
+ swrate_add_dynamic(&s->be->be_counters.t_time, samples_window, t_close);
+ HA_ATOMIC_UPDATE_MAX(&s->be->be_counters.qtime_max, t_queue);
+ HA_ATOMIC_UPDATE_MAX(&s->be->be_counters.ctime_max, t_connect);
+ HA_ATOMIC_UPDATE_MAX(&s->be->be_counters.dtime_max, t_data);
+ HA_ATOMIC_UPDATE_MAX(&s->be->be_counters.ttime_max, t_close);
+}
+
+/*
+ * This function adjusts sess->srv_conn and maintains the previous and new
+ * server's served stream counts. Setting newsrv to NULL is enough to release
+ * current connection slot. This function also notifies any LB algo which might
+ * expect to be informed about any change in the number of active streams on a
+ * server.
+ */
+void sess_change_server(struct stream *strm, struct server *newsrv)
+{
+ struct server *oldsrv = strm->srv_conn;
+
+ if (oldsrv == newsrv)
+ return;
+
+ if (oldsrv) {
+ _HA_ATOMIC_DEC(&oldsrv->served);
+ _HA_ATOMIC_DEC(&oldsrv->proxy->served);
+ __ha_barrier_atomic_store();
+ if (oldsrv->proxy->lbprm.server_drop_conn)
+ oldsrv->proxy->lbprm.server_drop_conn(oldsrv);
+ stream_del_srv_conn(strm);
+ }
+
+ if (newsrv) {
+ _HA_ATOMIC_INC(&newsrv->served);
+ _HA_ATOMIC_INC(&newsrv->proxy->served);
+ __ha_barrier_atomic_store();
+ if (newsrv->proxy->lbprm.server_take_conn)
+ newsrv->proxy->lbprm.server_take_conn(newsrv);
+ stream_add_srv_conn(strm, newsrv);
+ }
+}
+
+/* Handle server-side errors for default protocols. It is called whenever a a
+ * connection setup is aborted or a request is aborted in queue. It sets the
+ * stream termination flags so that the caller does not have to worry about
+ * them. It's installed as ->srv_error for the server-side stream connector.
+ */
+void default_srv_error(struct stream *s, struct stconn *sc)
+{
+ int err_type = s->conn_err_type;
+ int err = 0, fin = 0;
+
+ if (err_type & STRM_ET_QUEUE_ABRT) {
+ err = SF_ERR_CLICL;
+ fin = SF_FINST_Q;
+ }
+ else if (err_type & STRM_ET_CONN_ABRT) {
+ err = SF_ERR_CLICL;
+ fin = SF_FINST_C;
+ }
+ else if (err_type & STRM_ET_QUEUE_TO) {
+ err = SF_ERR_SRVTO;
+ fin = SF_FINST_Q;
+ }
+ else if (err_type & STRM_ET_QUEUE_ERR) {
+ err = SF_ERR_SRVCL;
+ fin = SF_FINST_Q;
+ }
+ else if (err_type & STRM_ET_CONN_TO) {
+ err = SF_ERR_SRVTO;
+ fin = SF_FINST_C;
+ }
+ else if (err_type & STRM_ET_CONN_ERR) {
+ err = SF_ERR_SRVCL;
+ fin = SF_FINST_C;
+ }
+ else if (err_type & STRM_ET_CONN_RES) {
+ err = SF_ERR_RESOURCE;
+ fin = SF_FINST_C;
+ }
+ else /* STRM_ET_CONN_OTHER and others */ {
+ err = SF_ERR_INTERNAL;
+ fin = SF_FINST_C;
+ }
+
+ if (!(s->flags & SF_ERR_MASK))
+ s->flags |= err;
+ if (!(s->flags & SF_FINST_MASK))
+ s->flags |= fin;
+}
+
+/* kill a stream and set the termination flags to <why> (one of SF_ERR_*) */
+void stream_shutdown(struct stream *stream, int why)
+{
+ if (stream->scb->flags & (SC_FL_SHUT_DONE|SC_FL_SHUT_WANTED))
+ return;
+
+ sc_schedule_shutdown(stream->scb);
+ sc_schedule_abort(stream->scb);
+ stream->task->nice = 1024;
+ if (!(stream->flags & SF_ERR_MASK))
+ stream->flags |= why;
+ task_wakeup(stream->task, TASK_WOKEN_OTHER);
+}
+
+/* dumps an error message for type <type> at ptr <ptr> related to stream <s>,
+ * having reached loop rate <rate>, then aborts hoping to retrieve a core.
+ */
+void stream_dump_and_crash(enum obj_type *obj, int rate)
+{
+ struct stream *s;
+ char *msg = NULL;
+ const void *ptr;
+
+ ptr = s = objt_stream(obj);
+ if (!s) {
+ const struct appctx *appctx = objt_appctx(obj);
+ if (!appctx)
+ return;
+ ptr = appctx;
+ s = appctx_strm(appctx);
+ if (!s)
+ return;
+ }
+
+ chunk_reset(&trash);
+ chunk_printf(&trash, " ");
+ strm_dump_to_buffer(&trash, s, " ", HA_ATOMIC_LOAD(&global.anon_key));
+
+ if (ptr != s) { // that's an appctx
+ const struct appctx *appctx = ptr;
+
+ chunk_appendf(&trash, " applet=%p(", appctx->applet);
+ resolve_sym_name(&trash, NULL, appctx->applet);
+ chunk_appendf(&trash, ")");
+
+ chunk_appendf(&trash, " handler=%p(", appctx->applet->fct);
+ resolve_sym_name(&trash, NULL, appctx->applet->fct);
+ chunk_appendf(&trash, ")");
+ }
+
+ memprintf(&msg,
+ "A bogus %s [%p] is spinning at %d calls per second and refuses to die, "
+ "aborting now! Please report this error to developers:\n"
+ "%s\n",
+ obj_type_name(obj), ptr, rate, trash.area);
+
+ ha_alert("%s", msg);
+ send_log(NULL, LOG_EMERG, "%s", msg);
+ ABORT_NOW();
+}
+
+/* initialize the require structures */
+static void init_stream()
+{
+ int thr;
+
+ for (thr = 0; thr < MAX_THREADS; thr++)
+ LIST_INIT(&ha_thread_ctx[thr].streams);
+}
+INITCALL0(STG_INIT, init_stream);
+
+/* Generates a unique ID based on the given <format>, stores it in the given <strm> and
+ * returns the unique ID.
+ *
+ * If this function fails to allocate memory IST_NULL is returned.
+ *
+ * If an ID is already stored within the stream nothing happens existing unique ID is
+ * returned.
+ */
+struct ist stream_generate_unique_id(struct stream *strm, struct list *format)
+{
+ if (isttest(strm->unique_id)) {
+ return strm->unique_id;
+ }
+ else {
+ char *unique_id;
+ int length;
+ if ((unique_id = pool_alloc(pool_head_uniqueid)) == NULL)
+ return IST_NULL;
+
+ length = build_logline(strm, unique_id, UNIQUEID_LEN, format);
+ strm->unique_id = ist2(unique_id, length);
+
+ return strm->unique_id;
+ }
+}
+
+/************************************************************************/
+/* All supported ACL keywords must be declared here. */
+/************************************************************************/
+static enum act_return stream_action_set_log_level(struct act_rule *rule, struct proxy *px,
+ struct session *sess, struct stream *s, int flags)
+{
+ s->logs.level = (uintptr_t)rule->arg.act.p[0];
+ return ACT_RET_CONT;
+}
+
+
+/* Parse a "set-log-level" action. It takes the level value as argument. It
+ * returns ACT_RET_PRS_OK on success, ACT_RET_PRS_ERR on error.
+ */
+static enum act_parse_ret stream_parse_set_log_level(const char **args, int *cur_arg, struct proxy *px,
+ struct act_rule *rule, char **err)
+{
+ int level;
+
+ if (!*args[*cur_arg]) {
+ bad_log_level:
+ memprintf(err, "expects exactly 1 argument (log level name or 'silent')");
+ return ACT_RET_PRS_ERR;
+ }
+ if (strcmp(args[*cur_arg], "silent") == 0)
+ level = -1;
+ else if ((level = get_log_level(args[*cur_arg]) + 1) == 0)
+ goto bad_log_level;
+
+ (*cur_arg)++;
+
+ /* Register processing function. */
+ rule->action_ptr = stream_action_set_log_level;
+ rule->action = ACT_CUSTOM;
+ rule->arg.act.p[0] = (void *)(uintptr_t)level;
+ return ACT_RET_PRS_OK;
+}
+
+static enum act_return stream_action_set_nice(struct act_rule *rule, struct proxy *px,
+ struct session *sess, struct stream *s, int flags)
+{
+ s->task->nice = (uintptr_t)rule->arg.act.p[0];
+ return ACT_RET_CONT;
+}
+
+
+/* Parse a "set-nice" action. It takes the nice value as argument. It returns
+ * ACT_RET_PRS_OK on success, ACT_RET_PRS_ERR on error.
+ */
+static enum act_parse_ret stream_parse_set_nice(const char **args, int *cur_arg, struct proxy *px,
+ struct act_rule *rule, char **err)
+{
+ int nice;
+
+ if (!*args[*cur_arg]) {
+ bad_log_level:
+ memprintf(err, "expects exactly 1 argument (integer value)");
+ return ACT_RET_PRS_ERR;
+ }
+
+ nice = atoi(args[*cur_arg]);
+ if (nice < -1024)
+ nice = -1024;
+ else if (nice > 1024)
+ nice = 1024;
+
+ (*cur_arg)++;
+
+ /* Register processing function. */
+ rule->action_ptr = stream_action_set_nice;
+ rule->action = ACT_CUSTOM;
+ rule->arg.act.p[0] = (void *)(uintptr_t)nice;
+ return ACT_RET_PRS_OK;
+}
+
+
+static enum act_return tcp_action_switch_stream_mode(struct act_rule *rule, struct proxy *px,
+ struct session *sess, struct stream *s, int flags)
+{
+ enum pr_mode mode = (uintptr_t)rule->arg.act.p[0];
+ const struct mux_proto_list *mux_proto = rule->arg.act.p[1];
+
+ if (!IS_HTX_STRM(s) && mode == PR_MODE_HTTP) {
+ if (!stream_set_http_mode(s, mux_proto)) {
+ stream_abort(s);
+ return ACT_RET_ABRT;
+ }
+ }
+ return ACT_RET_STOP;
+}
+
+
+static int check_tcp_switch_stream_mode(struct act_rule *rule, struct proxy *px, char **err)
+{
+ const struct mux_proto_list *mux_ent;
+ const struct mux_proto_list *mux_proto = rule->arg.act.p[1];
+ enum pr_mode pr_mode = (uintptr_t)rule->arg.act.p[0];
+ enum proto_proxy_mode mode = conn_pr_mode_to_proto_mode(pr_mode);
+
+ if (pr_mode == PR_MODE_HTTP)
+ px->options |= PR_O_HTTP_UPG;
+
+ if (mux_proto) {
+ mux_ent = conn_get_best_mux_entry(mux_proto->token, PROTO_SIDE_FE, mode);
+ if (!mux_ent || !isteq(mux_ent->token, mux_proto->token)) {
+ memprintf(err, "MUX protocol '%.*s' is not compatible with the selected mode",
+ (int)mux_proto->token.len, mux_proto->token.ptr);
+ return 0;
+ }
+ }
+ else {
+ mux_ent = conn_get_best_mux_entry(IST_NULL, PROTO_SIDE_FE, mode);
+ if (!mux_ent) {
+ memprintf(err, "Unable to find compatible MUX protocol with the selected mode");
+ return 0;
+ }
+ }
+
+ /* Update the mux */
+ rule->arg.act.p[1] = (void *)mux_ent;
+ return 1;
+
+}
+
+static enum act_parse_ret stream_parse_switch_mode(const char **args, int *cur_arg,
+ struct proxy *px, struct act_rule *rule,
+ char **err)
+{
+ const struct mux_proto_list *mux_proto = NULL;
+ struct ist proto;
+ enum pr_mode mode;
+
+ /* must have at least the mode */
+ if (*(args[*cur_arg]) == 0) {
+ memprintf(err, "'%s %s' expects a mode as argument.", args[0], args[*cur_arg-1]);
+ return ACT_RET_PRS_ERR;
+ }
+
+ if (!(px->cap & PR_CAP_FE)) {
+ memprintf(err, "'%s %s' not allowed because %s '%s' has no frontend capability",
+ args[0], args[*cur_arg-1], proxy_type_str(px), px->id);
+ return ACT_RET_PRS_ERR;
+ }
+ /* Check if the mode. For now "tcp" is disabled because downgrade is not
+ * supported and PT is the only TCP mux.
+ */
+ if (strcmp(args[*cur_arg], "http") == 0)
+ mode = PR_MODE_HTTP;
+ else {
+ memprintf(err, "'%s %s' expects a valid mode (got '%s').", args[0], args[*cur_arg-1], args[*cur_arg]);
+ return ACT_RET_PRS_ERR;
+ }
+
+ /* check the proto, if specified */
+ if (*(args[*cur_arg+1]) && strcmp(args[*cur_arg+1], "proto") == 0) {
+ if (*(args[*cur_arg+2]) == 0) {
+ memprintf(err, "'%s %s': '%s' expects a protocol as argument.",
+ args[0], args[*cur_arg-1], args[*cur_arg+1]);
+ return ACT_RET_PRS_ERR;
+ }
+
+ proto = ist(args[*cur_arg + 2]);
+ mux_proto = get_mux_proto(proto);
+ if (!mux_proto) {
+ memprintf(err, "'%s %s': '%s' expects a valid MUX protocol, if specified (got '%s')",
+ args[0], args[*cur_arg-1], args[*cur_arg+1], args[*cur_arg+2]);
+ return ACT_RET_PRS_ERR;
+ }
+ *cur_arg += 2;
+ }
+
+ (*cur_arg)++;
+
+ /* Register processing function. */
+ rule->action_ptr = tcp_action_switch_stream_mode;
+ rule->check_ptr = check_tcp_switch_stream_mode;
+ rule->action = ACT_CUSTOM;
+ rule->arg.act.p[0] = (void *)(uintptr_t)mode;
+ rule->arg.act.p[1] = (void *)mux_proto;
+ return ACT_RET_PRS_OK;
+}
+
+/* 0=OK, <0=Alert, >0=Warning */
+static enum act_parse_ret stream_parse_use_service(const char **args, int *cur_arg,
+ struct proxy *px, struct act_rule *rule,
+ char **err)
+{
+ struct action_kw *kw;
+
+ /* Check if the service name exists. */
+ if (*(args[*cur_arg]) == 0) {
+ memprintf(err, "'%s' expects a service name.", args[0]);
+ return ACT_RET_PRS_ERR;
+ }
+
+ /* lookup for keyword corresponding to a service. */
+ kw = action_lookup(&service_keywords, args[*cur_arg]);
+ if (!kw) {
+ memprintf(err, "'%s' unknown service name.", args[1]);
+ return ACT_RET_PRS_ERR;
+ }
+ (*cur_arg)++;
+
+ /* executes specific rule parser. */
+ rule->kw = kw;
+ if (kw->parse((const char **)args, cur_arg, px, rule, err) == ACT_RET_PRS_ERR)
+ return ACT_RET_PRS_ERR;
+
+ /* Register processing function. */
+ rule->action_ptr = process_use_service;
+ rule->action = ACT_CUSTOM;
+
+ return ACT_RET_PRS_OK;
+}
+
+void service_keywords_register(struct action_kw_list *kw_list)
+{
+ LIST_APPEND(&service_keywords, &kw_list->list);
+}
+
+struct action_kw *service_find(const char *kw)
+{
+ return action_lookup(&service_keywords, kw);
+}
+
+/* Lists the known services on <out>. If <out> is null, emit them on stdout one
+ * per line.
+ */
+void list_services(FILE *out)
+{
+ const struct action_kw *akwp, *akwn;
+ struct action_kw_list *kw_list;
+ int found = 0;
+ int i;
+
+ if (out)
+ fprintf(out, "Available services :");
+
+ for (akwn = akwp = NULL;; akwp = akwn) {
+ list_for_each_entry(kw_list, &service_keywords, list) {
+ for (i = 0; kw_list->kw[i].kw != NULL; i++) {
+ if (strordered(akwp ? akwp->kw : NULL,
+ kw_list->kw[i].kw,
+ akwn != akwp ? akwn->kw : NULL))
+ akwn = &kw_list->kw[i];
+ found = 1;
+ }
+ }
+ if (akwn == akwp)
+ break;
+ if (out)
+ fprintf(out, " %s", akwn->kw);
+ else
+ printf("%s\n", akwn->kw);
+ }
+ if (!found && out)
+ fprintf(out, " none\n");
+}
+
+/* appctx context used by the "show sess" command */
+/* flags used for show_sess_ctx.flags */
+#define CLI_SHOWSESS_F_SUSP 0x00000001 /* show only suspicious streams */
+
+struct show_sess_ctx {
+ struct bref bref; /* back-reference from the session being dumped */
+ void *target; /* session we want to dump, or NULL for all */
+ unsigned int thr; /* the thread number being explored (0..MAX_THREADS-1) */
+ unsigned int uid; /* if non-null, the uniq_id of the session being dumped */
+ unsigned int min_age; /* minimum age of streams to dump */
+ unsigned int flags; /* CLI_SHOWSESS_* */
+ int section; /* section of the session being dumped */
+ int pos; /* last position of the current session's buffer */
+};
+
+/* This function appends a complete dump of a stream state onto the buffer,
+ * possibly anonymizing using the specified anon_key. The caller is responsible
+ * for ensuring that enough room remains in the buffer to dump a complete
+ * stream at once. Each new output line will be prefixed with <pfx> if non-null,
+ * which is used to preserve indenting.
+ */
+void strm_dump_to_buffer(struct buffer *buf, const struct stream *strm, const char *pfx, uint32_t anon_key)
+{
+ struct stconn *scf, *scb;
+ struct tm tm;
+ extern const char *monthname[12];
+ char pn[INET6_ADDRSTRLEN];
+ struct connection *conn;
+ struct appctx *tmpctx;
+
+ pfx = pfx ? pfx : "";
+
+ get_localtime(strm->logs.accept_date.tv_sec, &tm);
+ chunk_appendf(buf,
+ "%p: [%02d/%s/%04d:%02d:%02d:%02d.%06d] id=%u proto=%s",
+ strm,
+ tm.tm_mday, monthname[tm.tm_mon], tm.tm_year+1900,
+ tm.tm_hour, tm.tm_min, tm.tm_sec, (int)(strm->logs.accept_date.tv_usec),
+ strm->uniq_id,
+ strm_li(strm) ? strm_li(strm)->rx.proto->name : "?");
+
+ conn = objt_conn(strm_orig(strm));
+ switch (conn && conn_get_src(conn) ? addr_to_str(conn->src, pn, sizeof(pn)) : AF_UNSPEC) {
+ case AF_INET:
+ case AF_INET6:
+ chunk_appendf(buf, " source=%s:%d\n",
+ HA_ANON_STR(anon_key, pn), get_host_port(conn->src));
+ break;
+ case AF_UNIX:
+ chunk_appendf(buf, " source=unix:%d\n", strm_li(strm)->luid);
+ break;
+ default:
+ /* no more information to print right now */
+ chunk_appendf(buf, "\n");
+ break;
+ }
+
+ chunk_appendf(buf,
+ "%s flags=0x%x, conn_retries=%d, conn_exp=%s conn_et=0x%03x srv_conn=%p, pend_pos=%p waiting=%d epoch=%#x\n", pfx,
+ strm->flags, strm->conn_retries,
+ strm->conn_exp ?
+ tick_is_expired(strm->conn_exp, now_ms) ? "<PAST>" :
+ human_time(TICKS_TO_MS(strm->conn_exp - now_ms),
+ TICKS_TO_MS(1000)) : "<NEVER>",
+ strm->conn_err_type, strm->srv_conn, strm->pend_pos,
+ LIST_INLIST(&strm->buffer_wait.list), strm->stream_epoch);
+
+ chunk_appendf(buf,
+ "%s frontend=%s (id=%u mode=%s), listener=%s (id=%u)", pfx,
+ HA_ANON_STR(anon_key, strm_fe(strm)->id), strm_fe(strm)->uuid, proxy_mode_str(strm_fe(strm)->mode),
+ strm_li(strm) ? strm_li(strm)->name ? strm_li(strm)->name : "?" : "?",
+ strm_li(strm) ? strm_li(strm)->luid : 0);
+
+ switch (conn && conn_get_dst(conn) ? addr_to_str(conn->dst, pn, sizeof(pn)) : AF_UNSPEC) {
+ case AF_INET:
+ case AF_INET6:
+ chunk_appendf(buf, " addr=%s:%d\n",
+ HA_ANON_STR(anon_key, pn), get_host_port(conn->dst));
+ break;
+ case AF_UNIX:
+ chunk_appendf(buf, " addr=unix:%d\n", strm_li(strm)->luid);
+ break;
+ default:
+ /* no more information to print right now */
+ chunk_appendf(buf, "\n");
+ break;
+ }
+
+ if (strm->be->cap & PR_CAP_BE)
+ chunk_appendf(buf,
+ "%s backend=%s (id=%u mode=%s)", pfx,
+ HA_ANON_STR(anon_key, strm->be->id),
+ strm->be->uuid, proxy_mode_str(strm->be->mode));
+ else
+ chunk_appendf(buf, "%s backend=<NONE> (id=-1 mode=-)", pfx);
+
+ conn = sc_conn(strm->scb);
+ switch (conn && conn_get_src(conn) ? addr_to_str(conn->src, pn, sizeof(pn)) : AF_UNSPEC) {
+ case AF_INET:
+ case AF_INET6:
+ chunk_appendf(buf, " addr=%s:%d\n",
+ HA_ANON_STR(anon_key, pn), get_host_port(conn->src));
+ break;
+ case AF_UNIX:
+ chunk_appendf(buf, " addr=unix\n");
+ break;
+ default:
+ /* no more information to print right now */
+ chunk_appendf(buf, "\n");
+ break;
+ }
+
+ if (strm->be->cap & PR_CAP_BE)
+ chunk_appendf(buf,
+ "%s server=%s (id=%u)", pfx,
+ objt_server(strm->target) ? HA_ANON_STR(anon_key, __objt_server(strm->target)->id) : "<none>",
+ objt_server(strm->target) ? __objt_server(strm->target)->puid : 0);
+ else
+ chunk_appendf(buf, "%s server=<NONE> (id=-1)", pfx);
+
+ switch (conn && conn_get_dst(conn) ? addr_to_str(conn->dst, pn, sizeof(pn)) : AF_UNSPEC) {
+ case AF_INET:
+ case AF_INET6:
+ chunk_appendf(buf, " addr=%s:%d\n",
+ HA_ANON_STR(anon_key, pn), get_host_port(conn->dst));
+ break;
+ case AF_UNIX:
+ chunk_appendf(buf, " addr=unix\n");
+ break;
+ default:
+ /* no more information to print right now */
+ chunk_appendf(buf, "\n");
+ break;
+ }
+
+ chunk_appendf(buf,
+ "%s task=%p (state=0x%02x nice=%d calls=%u rate=%u exp=%s tid=%d(%d/%d)%s", pfx,
+ strm->task,
+ strm->task->state,
+ strm->task->nice, strm->task->calls, read_freq_ctr(&strm->call_rate),
+ strm->task->expire ?
+ tick_is_expired(strm->task->expire, now_ms) ? "<PAST>" :
+ human_time(TICKS_TO_MS(strm->task->expire - now_ms),
+ TICKS_TO_MS(1000)) : "<NEVER>",
+ strm->task->tid,
+ ha_thread_info[strm->task->tid].tgid,
+ ha_thread_info[strm->task->tid].ltid,
+ task_in_rq(strm->task) ? ", running" : "");
+
+ chunk_appendf(buf,
+ " age=%s)\n",
+ human_time(ns_to_sec(now_ns) - ns_to_sec(strm->logs.request_ts), 1));
+
+ if (strm->txn)
+ chunk_appendf(buf,
+ "%s txn=%p flags=0x%x meth=%d status=%d req.st=%s rsp.st=%s req.f=0x%02x rsp.f=0x%02x\n", pfx,
+ strm->txn, strm->txn->flags, strm->txn->meth, strm->txn->status,
+ h1_msg_state_str(strm->txn->req.msg_state), h1_msg_state_str(strm->txn->rsp.msg_state),
+ strm->txn->req.flags, strm->txn->rsp.flags);
+
+ scf = strm->scf;
+ chunk_appendf(buf, "%s scf=%p flags=0x%08x ioto=%s state=%s endp=%s,%p,0x%08x sub=%d", pfx,
+ scf, scf->flags, human_time(scf->ioto, TICKS_TO_MS(1000)), sc_state_str(scf->state),
+ (sc_ep_test(scf, SE_FL_T_MUX) ? "CONN" : (sc_ep_test(scf, SE_FL_T_APPLET) ? "APPCTX" : "NONE")),
+ scf->sedesc->se, sc_ep_get(scf), scf->wait_event.events);
+ chunk_appendf(buf, " rex=%s",
+ sc_ep_rcv_ex(scf) ? human_time(TICKS_TO_MS(sc_ep_rcv_ex(scf) - now_ms), TICKS_TO_MS(1000)) : "<NEVER>");
+ chunk_appendf(buf, " wex=%s",
+ sc_ep_snd_ex(scf) ? human_time(TICKS_TO_MS(sc_ep_snd_ex(scf) - now_ms), TICKS_TO_MS(1000)) : "<NEVER>");
+ chunk_appendf(buf, " rto=%s",
+ tick_isset(scf->sedesc->lra) ? human_time(TICKS_TO_MS(tick_add(scf->sedesc->lra, scf->ioto) - now_ms), TICKS_TO_MS(1000)) : "<NEVER>");
+ chunk_appendf(buf, " wto=%s\n",
+ tick_isset(scf->sedesc->fsb) ? human_time(TICKS_TO_MS(tick_add(scf->sedesc->fsb, scf->ioto) - now_ms), TICKS_TO_MS(1000)) : "<NEVER>");
+
+ chunk_appendf(&trash, "%s iobuf.flags=0x%08x .pipe=%d .buf=%u@%p+%u/%u\n", pfx,
+ scf->sedesc->iobuf.flags,
+ scf->sedesc->iobuf.pipe ? scf->sedesc->iobuf.pipe->data : 0,
+ scf->sedesc->iobuf.buf ? (unsigned int)b_data(scf->sedesc->iobuf.buf): 0,
+ scf->sedesc->iobuf.buf ? b_orig(scf->sedesc->iobuf.buf): NULL,
+ scf->sedesc->iobuf.buf ? (unsigned int)b_head_ofs(scf->sedesc->iobuf.buf): 0,
+ scf->sedesc->iobuf.buf ? (unsigned int)b_size(scf->sedesc->iobuf.buf): 0);
+
+ if ((conn = sc_conn(scf)) != NULL) {
+ if (conn->mux && conn->mux->show_sd) {
+ char muxpfx[100] = "";
+
+ snprintf(muxpfx, sizeof(muxpfx), "%s ", pfx);
+ chunk_appendf(buf, "%s ", pfx);
+ conn->mux->show_sd(buf, scf->sedesc, muxpfx);
+ chunk_appendf(buf, "\n");
+ }
+
+ chunk_appendf(buf,
+ "%s co0=%p ctrl=%s xprt=%s mux=%s data=%s target=%s:%p\n", pfx,
+ conn,
+ conn_get_ctrl_name(conn),
+ conn_get_xprt_name(conn),
+ conn_get_mux_name(conn),
+ sc_get_data_name(scf),
+ obj_type_name(conn->target),
+ obj_base_ptr(conn->target));
+
+ chunk_appendf(buf,
+ "%s flags=0x%08x fd=%d fd.state=%02x updt=%d fd.tmask=0x%lx\n", pfx,
+ conn->flags,
+ conn_fd(conn),
+ conn_fd(conn) >= 0 ? fdtab[conn->handle.fd].state : 0,
+ conn_fd(conn) >= 0 ? !!(fdtab[conn->handle.fd].update_mask & ti->ltid_bit) : 0,
+ conn_fd(conn) >= 0 ? fdtab[conn->handle.fd].thread_mask: 0);
+ }
+ else if ((tmpctx = sc_appctx(scf)) != NULL) {
+ chunk_appendf(buf,
+ "%s app0=%p st0=%d st1=%d applet=%s tid=%d nice=%d calls=%u rate=%u\n", pfx,
+ tmpctx,
+ tmpctx->st0,
+ tmpctx->st1,
+ tmpctx->applet->name,
+ tmpctx->t->tid,
+ tmpctx->t->nice, tmpctx->t->calls, read_freq_ctr(&tmpctx->call_rate));
+ }
+
+ scb = strm->scb;
+ chunk_appendf(buf, "%s scb=%p flags=0x%08x ioto=%s state=%s endp=%s,%p,0x%08x sub=%d", pfx,
+ scb, scb->flags, human_time(scb->ioto, TICKS_TO_MS(1000)), sc_state_str(scb->state),
+ (sc_ep_test(scb, SE_FL_T_MUX) ? "CONN" : (sc_ep_test(scb, SE_FL_T_APPLET) ? "APPCTX" : "NONE")),
+ scb->sedesc->se, sc_ep_get(scb), scb->wait_event.events);
+ chunk_appendf(buf, " rex=%s",
+ sc_ep_rcv_ex(scb) ? human_time(TICKS_TO_MS(sc_ep_rcv_ex(scb) - now_ms), TICKS_TO_MS(1000)) : "<NEVER>");
+ chunk_appendf(buf, " wex=%s",
+ sc_ep_snd_ex(scb) ? human_time(TICKS_TO_MS(sc_ep_snd_ex(scb) - now_ms), TICKS_TO_MS(1000)) : "<NEVER>");
+ chunk_appendf(buf, " rto=%s",
+ tick_isset(scb->sedesc->lra) ? human_time(TICKS_TO_MS(tick_add(scb->sedesc->lra, scb->ioto) - now_ms), TICKS_TO_MS(1000)) : "<NEVER>");
+ chunk_appendf(buf, " wto=%s\n",
+ tick_isset(scb->sedesc->fsb) ? human_time(TICKS_TO_MS(tick_add(scb->sedesc->fsb, scb->ioto) - now_ms), TICKS_TO_MS(1000)) : "<NEVER>");
+
+ chunk_appendf(&trash, "%s iobuf.flags=0x%08x .pipe=%d .buf=%u@%p+%u/%u\n", pfx,
+ scb->sedesc->iobuf.flags,
+ scb->sedesc->iobuf.pipe ? scb->sedesc->iobuf.pipe->data : 0,
+ scb->sedesc->iobuf.buf ? (unsigned int)b_data(scb->sedesc->iobuf.buf): 0,
+ scb->sedesc->iobuf.buf ? b_orig(scb->sedesc->iobuf.buf): NULL,
+ scb->sedesc->iobuf.buf ? (unsigned int)b_head_ofs(scb->sedesc->iobuf.buf): 0,
+ scb->sedesc->iobuf.buf ? (unsigned int)b_size(scb->sedesc->iobuf.buf): 0);
+
+ if ((conn = sc_conn(scb)) != NULL) {
+ if (conn->mux && conn->mux->show_sd) {
+ char muxpfx[100] = "";
+
+ snprintf(muxpfx, sizeof(muxpfx), "%s ", pfx);
+ chunk_appendf(buf, "%s ", pfx);
+ conn->mux->show_sd(buf, scb->sedesc, muxpfx);
+ chunk_appendf(buf, "\n");
+ }
+
+ chunk_appendf(buf,
+ "%s co1=%p ctrl=%s xprt=%s mux=%s data=%s target=%s:%p\n", pfx,
+ conn,
+ conn_get_ctrl_name(conn),
+ conn_get_xprt_name(conn),
+ conn_get_mux_name(conn),
+ sc_get_data_name(scb),
+ obj_type_name(conn->target),
+ obj_base_ptr(conn->target));
+
+ chunk_appendf(buf,
+ "%s flags=0x%08x fd=%d fd.state=%02x updt=%d fd.tmask=0x%lx\n", pfx,
+ conn->flags,
+ conn_fd(conn),
+ conn_fd(conn) >= 0 ? fdtab[conn->handle.fd].state : 0,
+ conn_fd(conn) >= 0 ? !!(fdtab[conn->handle.fd].update_mask & ti->ltid_bit) : 0,
+ conn_fd(conn) >= 0 ? fdtab[conn->handle.fd].thread_mask: 0);
+ }
+ else if ((tmpctx = sc_appctx(scb)) != NULL) {
+ chunk_appendf(buf,
+ "%s app1=%p st0=%d st1=%d applet=%s tid=%d nice=%d calls=%u rate=%u\n", pfx,
+ tmpctx,
+ tmpctx->st0,
+ tmpctx->st1,
+ tmpctx->applet->name,
+ tmpctx->t->tid,
+ tmpctx->t->nice, tmpctx->t->calls, read_freq_ctr(&tmpctx->call_rate));
+ }
+
+ if (HAS_FILTERS(strm)) {
+ const struct filter *flt;
+
+ chunk_appendf(buf, "%s filters={", pfx);
+ list_for_each_entry(flt, &strm->strm_flt.filters, list) {
+ if (flt->list.p != &strm->strm_flt.filters)
+ chunk_appendf(buf, ", ");
+ chunk_appendf(buf, "%p=\"%s\"", flt, FLT_ID(flt));
+ }
+ chunk_appendf(buf, "}\n");
+ }
+
+ chunk_appendf(buf,
+ "%s req=%p (f=0x%06x an=0x%x tofwd=%d total=%lld)\n"
+ "%s an_exp=%s buf=%p data=%p o=%u p=%u i=%u size=%u\n",
+ pfx,
+ &strm->req,
+ strm->req.flags, strm->req.analysers,
+ strm->req.to_forward, strm->req.total,
+ pfx,
+ strm->req.analyse_exp ?
+ human_time(TICKS_TO_MS(strm->req.analyse_exp - now_ms),
+ TICKS_TO_MS(1000)) : "<NEVER>",
+ &strm->req.buf,
+ b_orig(&strm->req.buf), (unsigned int)co_data(&strm->req),
+ (unsigned int)ci_head_ofs(&strm->req), (unsigned int)ci_data(&strm->req),
+ (unsigned int)strm->req.buf.size);
+
+ if (IS_HTX_STRM(strm)) {
+ struct htx *htx = htxbuf(&strm->req.buf);
+
+ chunk_appendf(buf,
+ "%s htx=%p flags=0x%x size=%u data=%u used=%u wrap=%s extra=%llu\n", pfx,
+ htx, htx->flags, htx->size, htx->data, htx_nbblks(htx),
+ (htx->tail >= htx->head) ? "NO" : "YES",
+ (unsigned long long)htx->extra);
+ }
+ if (HAS_FILTERS(strm) && strm->strm_flt.current[0]) {
+ const struct filter *flt = strm->strm_flt.current[0];
+
+ chunk_appendf(buf, "%s current_filter=%p (id=\"%s\" flags=0x%x pre=0x%x post=0x%x) \n", pfx,
+ flt, flt->config->id, flt->flags, flt->pre_analyzers, flt->post_analyzers);
+ }
+
+ chunk_appendf(buf,
+ "%s res=%p (f=0x%06x an=0x%x tofwd=%d total=%lld)\n"
+ "%s an_exp=%s buf=%p data=%p o=%u p=%u i=%u size=%u\n",
+ pfx,
+ &strm->res,
+ strm->res.flags, strm->res.analysers,
+ strm->res.to_forward, strm->res.total,
+ pfx,
+ strm->res.analyse_exp ?
+ human_time(TICKS_TO_MS(strm->res.analyse_exp - now_ms),
+ TICKS_TO_MS(1000)) : "<NEVER>",
+ &strm->res.buf,
+ b_orig(&strm->res.buf), (unsigned int)co_data(&strm->res),
+ (unsigned int)ci_head_ofs(&strm->res), (unsigned int)ci_data(&strm->res),
+ (unsigned int)strm->res.buf.size);
+
+ if (IS_HTX_STRM(strm)) {
+ struct htx *htx = htxbuf(&strm->res.buf);
+
+ chunk_appendf(buf,
+ "%s htx=%p flags=0x%x size=%u data=%u used=%u wrap=%s extra=%llu\n", pfx,
+ htx, htx->flags, htx->size, htx->data, htx_nbblks(htx),
+ (htx->tail >= htx->head) ? "NO" : "YES",
+ (unsigned long long)htx->extra);
+ }
+
+ if (HAS_FILTERS(strm) && strm->strm_flt.current[1]) {
+ const struct filter *flt = strm->strm_flt.current[1];
+
+ chunk_appendf(buf, "%s current_filter=%p (id=\"%s\" flags=0x%x pre=0x%x post=0x%x) \n", pfx,
+ flt, flt->config->id, flt->flags, flt->pre_analyzers, flt->post_analyzers);
+ }
+
+ if (strm->current_rule_list && strm->current_rule) {
+ const struct act_rule *rule = strm->current_rule;
+ chunk_appendf(buf, "%s current_rule=\"%s\" [%s:%d]\n", pfx, rule->kw->kw, rule->conf.file, rule->conf.line);
+ }
+}
+
+/* This function dumps a complete stream state onto the stream connector's
+ * read buffer. The stream has to be set in strm. It returns 0 if the output
+ * buffer is full and it needs to be called again, otherwise non-zero. It is
+ * designed to be called from stats_dump_strm_to_buffer() below.
+ */
+static int stats_dump_full_strm_to_buffer(struct stconn *sc, struct stream *strm)
+{
+ struct appctx *appctx = __sc_appctx(sc);
+ struct show_sess_ctx *ctx = appctx->svcctx;
+
+ chunk_reset(&trash);
+
+ if (ctx->section > 0 && ctx->uid != strm->uniq_id) {
+ /* stream changed, no need to go any further */
+ chunk_appendf(&trash, " *** session terminated while we were watching it ***\n");
+ if (applet_putchk(appctx, &trash) == -1)
+ goto full;
+ goto done;
+ }
+
+ switch (ctx->section) {
+ case 0: /* main status of the stream */
+ ctx->uid = strm->uniq_id;
+ ctx->section = 1;
+ __fallthrough;
+
+ case 1:
+ strm_dump_to_buffer(&trash, strm, "", appctx->cli_anon_key);
+ if (applet_putchk(appctx, &trash) == -1)
+ goto full;
+
+ /* use other states to dump the contents */
+ }
+ /* end of dump */
+ done:
+ ctx->uid = 0;
+ ctx->section = 0;
+ return 1;
+ full:
+ return 0;
+}
+
+static int cli_parse_show_sess(char **args, char *payload, struct appctx *appctx, void *private)
+{
+ struct show_sess_ctx *ctx = applet_reserve_svcctx(appctx, sizeof(*ctx));
+
+ if (!cli_has_level(appctx, ACCESS_LVL_OPER))
+ return 1;
+
+ /* now all sessions by default */
+ ctx->target = NULL;
+ ctx->min_age = 0;
+ ctx->section = 0; /* start with stream status */
+ ctx->pos = 0;
+ ctx->thr = 0;
+
+ if (*args[2] && strcmp(args[2], "older") == 0) {
+ unsigned timeout;
+ const char *res;
+
+ if (!*args[3])
+ return cli_err(appctx, "Expects a minimum age (in seconds by default).\n");
+
+ res = parse_time_err(args[3], &timeout, TIME_UNIT_S);
+ if (res != 0)
+ return cli_err(appctx, "Invalid age.\n");
+
+ ctx->min_age = timeout;
+ ctx->target = (void *)-1; /* show all matching entries */
+ }
+ else if (*args[2] && strcmp(args[2], "susp") == 0) {
+ ctx->flags |= CLI_SHOWSESS_F_SUSP;
+ ctx->target = (void *)-1; /* show all matching entries */
+ }
+ else if (*args[2] && strcmp(args[2], "all") == 0)
+ ctx->target = (void *)-1;
+ else if (*args[2])
+ ctx->target = (void *)strtoul(args[2], NULL, 0);
+
+ /* The back-ref must be reset, it will be detected and set by
+ * the dump code upon first invocation.
+ */
+ LIST_INIT(&ctx->bref.users);
+
+ /* let's set our own stream's epoch to the current one and increment
+ * it so that we know which streams were already there before us.
+ */
+ appctx_strm(appctx)->stream_epoch = _HA_ATOMIC_FETCH_ADD(&stream_epoch, 1);
+ return 0;
+}
+
+/* This function dumps all streams' states onto the stream connector's
+ * read buffer. It returns 0 if the output buffer is full and it needs
+ * to be called again, otherwise non-zero. It proceeds in an isolated
+ * thread so there is no thread safety issue here.
+ */
+static int cli_io_handler_dump_sess(struct appctx *appctx)
+{
+ struct show_sess_ctx *ctx = appctx->svcctx;
+ struct stconn *sc = appctx_sc(appctx);
+ struct connection *conn;
+
+ thread_isolate();
+
+ if (ctx->thr >= global.nbthread) {
+ /* already terminated */
+ goto done;
+ }
+
+ /* FIXME: Don't watch the other side !*/
+ if (unlikely(sc_opposite(sc)->flags & SC_FL_SHUT_DONE)) {
+ /* If we're forced to shut down, we might have to remove our
+ * reference to the last stream being dumped.
+ */
+ if (!LIST_ISEMPTY(&ctx->bref.users)) {
+ LIST_DELETE(&ctx->bref.users);
+ LIST_INIT(&ctx->bref.users);
+ }
+ goto done;
+ }
+
+ chunk_reset(&trash);
+
+ /* first, let's detach the back-ref from a possible previous stream */
+ if (!LIST_ISEMPTY(&ctx->bref.users)) {
+ LIST_DELETE(&ctx->bref.users);
+ LIST_INIT(&ctx->bref.users);
+ } else if (!ctx->bref.ref) {
+ /* first call, start with first stream */
+ ctx->bref.ref = ha_thread_ctx[ctx->thr].streams.n;
+ }
+
+ /* and start from where we stopped */
+ while (1) {
+ char pn[INET6_ADDRSTRLEN];
+ struct stream *curr_strm;
+ int done= 0;
+
+ if (ctx->bref.ref == &ha_thread_ctx[ctx->thr].streams)
+ done = 1;
+ else {
+ /* check if we've found a stream created after issuing the "show sess" */
+ curr_strm = LIST_ELEM(ctx->bref.ref, struct stream *, list);
+ if ((int)(curr_strm->stream_epoch - appctx_strm(appctx)->stream_epoch) > 0)
+ done = 1;
+ }
+
+ if (done) {
+ ctx->thr++;
+ if (ctx->thr >= global.nbthread)
+ break;
+ ctx->bref.ref = ha_thread_ctx[ctx->thr].streams.n;
+ continue;
+ }
+
+ if (ctx->min_age) {
+ uint age = ns_to_sec(now_ns) - ns_to_sec(curr_strm->logs.request_ts);
+ if (age < ctx->min_age)
+ goto next_sess;
+ }
+
+ if (ctx->flags & CLI_SHOWSESS_F_SUSP) {
+ /* only show suspicious streams. Non-suspicious ones have a valid
+ * expiration date in the future and a valid front endpoint.
+ */
+ if (curr_strm->task->expire &&
+ !tick_is_expired(curr_strm->task->expire, now_ms) &&
+ curr_strm->scf && curr_strm->scf->sedesc && curr_strm->scf->sedesc->se)
+ goto next_sess;
+ }
+
+ if (ctx->target) {
+ if (ctx->target != (void *)-1 && ctx->target != curr_strm)
+ goto next_sess;
+
+ LIST_APPEND(&curr_strm->back_refs, &ctx->bref.users);
+ /* call the proper dump() function and return if we're missing space */
+ if (!stats_dump_full_strm_to_buffer(sc, curr_strm))
+ goto full;
+
+ /* stream dump complete */
+ LIST_DELETE(&ctx->bref.users);
+ LIST_INIT(&ctx->bref.users);
+ if (ctx->target != (void *)-1) {
+ ctx->target = NULL;
+ break;
+ }
+ else
+ goto next_sess;
+ }
+
+ chunk_appendf(&trash,
+ "%p: proto=%s",
+ curr_strm,
+ strm_li(curr_strm) ? strm_li(curr_strm)->rx.proto->name : "?");
+
+ conn = objt_conn(strm_orig(curr_strm));
+ switch (conn && conn_get_src(conn) ? addr_to_str(conn->src, pn, sizeof(pn)) : AF_UNSPEC) {
+ case AF_INET:
+ case AF_INET6:
+ chunk_appendf(&trash,
+ " src=%s:%d fe=%s be=%s srv=%s",
+ HA_ANON_CLI(pn),
+ get_host_port(conn->src),
+ HA_ANON_CLI(strm_fe(curr_strm)->id),
+ (curr_strm->be->cap & PR_CAP_BE) ? HA_ANON_CLI(curr_strm->be->id) : "<NONE>",
+ objt_server(curr_strm->target) ? HA_ANON_CLI(__objt_server(curr_strm->target)->id) : "<none>"
+ );
+ break;
+ case AF_UNIX:
+ chunk_appendf(&trash,
+ " src=unix:%d fe=%s be=%s srv=%s",
+ strm_li(curr_strm)->luid,
+ HA_ANON_CLI(strm_fe(curr_strm)->id),
+ (curr_strm->be->cap & PR_CAP_BE) ? HA_ANON_CLI(curr_strm->be->id) : "<NONE>",
+ objt_server(curr_strm->target) ? HA_ANON_CLI(__objt_server(curr_strm->target)->id) : "<none>"
+ );
+ break;
+ }
+
+ chunk_appendf(&trash,
+ " ts=%02x epoch=%#x age=%s calls=%u rate=%u cpu=%llu lat=%llu",
+ curr_strm->task->state, curr_strm->stream_epoch,
+ human_time(ns_to_sec(now_ns) - ns_to_sec(curr_strm->logs.request_ts), 1),
+ curr_strm->task->calls, read_freq_ctr(&curr_strm->call_rate),
+ (unsigned long long)curr_strm->cpu_time, (unsigned long long)curr_strm->lat_time);
+
+ chunk_appendf(&trash,
+ " rq[f=%06xh,i=%u,an=%02xh",
+ curr_strm->req.flags,
+ (unsigned int)ci_data(&curr_strm->req),
+ curr_strm->req.analysers);
+
+ chunk_appendf(&trash,
+ ",ax=%s]",
+ curr_strm->req.analyse_exp ?
+ human_time(TICKS_TO_MS(curr_strm->req.analyse_exp - now_ms),
+ TICKS_TO_MS(1000)) : "");
+
+ chunk_appendf(&trash,
+ " rp[f=%06xh,i=%u,an=%02xh",
+ curr_strm->res.flags,
+ (unsigned int)ci_data(&curr_strm->res),
+ curr_strm->res.analysers);
+ chunk_appendf(&trash,
+ ",ax=%s]",
+ curr_strm->res.analyse_exp ?
+ human_time(TICKS_TO_MS(curr_strm->res.analyse_exp - now_ms),
+ TICKS_TO_MS(1000)) : "");
+
+ conn = sc_conn(curr_strm->scf);
+ chunk_appendf(&trash," scf=[%d,%1xh,fd=%d",
+ curr_strm->scf->state, curr_strm->scf->flags, conn_fd(conn));
+ chunk_appendf(&trash, ",rex=%s",
+ sc_ep_rcv_ex(curr_strm->scf) ?
+ human_time(TICKS_TO_MS(sc_ep_rcv_ex(curr_strm->scf) - now_ms),
+ TICKS_TO_MS(1000)) : "");
+ chunk_appendf(&trash,",wex=%s]",
+ sc_ep_snd_ex(curr_strm->scf) ?
+ human_time(TICKS_TO_MS(sc_ep_snd_ex(curr_strm->scf) - now_ms),
+ TICKS_TO_MS(1000)) : "");
+
+ conn = sc_conn(curr_strm->scb);
+ chunk_appendf(&trash, " scb=[%d,%1xh,fd=%d",
+ curr_strm->scb->state, curr_strm->scb->flags, conn_fd(conn));
+ chunk_appendf(&trash, ",rex=%s",
+ sc_ep_rcv_ex(curr_strm->scb) ?
+ human_time(TICKS_TO_MS(sc_ep_rcv_ex(curr_strm->scb) - now_ms),
+ TICKS_TO_MS(1000)) : "");
+ chunk_appendf(&trash, ",wex=%s]",
+ sc_ep_snd_ex(curr_strm->scb) ?
+ human_time(TICKS_TO_MS(sc_ep_snd_ex(curr_strm->scb) - now_ms),
+ TICKS_TO_MS(1000)) : "");
+
+ chunk_appendf(&trash,
+ " exp=%s rc=%d c_exp=%s",
+ curr_strm->task->expire ?
+ human_time(TICKS_TO_MS(curr_strm->task->expire - now_ms),
+ TICKS_TO_MS(1000)) : "",
+ curr_strm->conn_retries,
+ curr_strm->conn_exp ?
+ human_time(TICKS_TO_MS(curr_strm->conn_exp - now_ms),
+ TICKS_TO_MS(1000)) : "");
+ if (task_in_rq(curr_strm->task))
+ chunk_appendf(&trash, " run(nice=%d)", curr_strm->task->nice);
+
+ chunk_appendf(&trash, "\n");
+
+ if (applet_putchk(appctx, &trash) == -1) {
+ /* let's try again later from this stream. We add ourselves into
+ * this stream's users so that it can remove us upon termination.
+ */
+ LIST_APPEND(&curr_strm->back_refs, &ctx->bref.users);
+ goto full;
+ }
+
+ next_sess:
+ ctx->bref.ref = curr_strm->list.n;
+ }
+
+ if (ctx->target && ctx->target != (void *)-1) {
+ /* specified stream not found */
+ if (ctx->section > 0)
+ chunk_appendf(&trash, " *** session terminated while we were watching it ***\n");
+ else
+ chunk_appendf(&trash, "Session not found.\n");
+
+ if (applet_putchk(appctx, &trash) == -1)
+ goto full;
+
+ ctx->target = NULL;
+ ctx->uid = 0;
+ goto done;
+ }
+
+ done:
+ thread_release();
+ return 1;
+ full:
+ thread_release();
+ return 0;
+}
+
+static void cli_release_show_sess(struct appctx *appctx)
+{
+ struct show_sess_ctx *ctx = appctx->svcctx;
+
+ if (ctx->thr < global.nbthread) {
+ /* a dump was aborted, either in error or timeout. We need to
+ * safely detach from the target stream's list. It's mandatory
+ * to lock because a stream on the target thread could be moving
+ * our node.
+ */
+ thread_isolate();
+ if (!LIST_ISEMPTY(&ctx->bref.users))
+ LIST_DELETE(&ctx->bref.users);
+ thread_release();
+ }
+}
+
+/* Parses the "shutdown session" directive, it always returns 1 */
+static int cli_parse_shutdown_session(char **args, char *payload, struct appctx *appctx, void *private)
+{
+ struct stream *strm, *ptr;
+ int thr;
+
+ if (!cli_has_level(appctx, ACCESS_LVL_ADMIN))
+ return 1;
+
+ ptr = (void *)strtoul(args[2], NULL, 0);
+ if (!ptr)
+ return cli_err(appctx, "Session pointer expected (use 'show sess').\n");
+
+ strm = NULL;
+
+ thread_isolate();
+
+ /* first, look for the requested stream in the stream table */
+ for (thr = 0; strm != ptr && thr < global.nbthread; thr++) {
+ list_for_each_entry(strm, &ha_thread_ctx[thr].streams, list) {
+ if (strm == ptr) {
+ stream_shutdown(strm, SF_ERR_KILLED);
+ break;
+ }
+ }
+ }
+
+ thread_release();
+
+ /* do we have the stream ? */
+ if (strm != ptr)
+ return cli_err(appctx, "No such session (use 'show sess').\n");
+
+ return 1;
+}
+
+/* Parses the "shutdown session server" directive, it always returns 1 */
+static int cli_parse_shutdown_sessions_server(char **args, char *payload, struct appctx *appctx, void *private)
+{
+ struct server *sv;
+
+ if (!cli_has_level(appctx, ACCESS_LVL_ADMIN))
+ return 1;
+
+ sv = cli_find_server(appctx, args[3]);
+ if (!sv)
+ return 1;
+
+ /* kill all the stream that are on this server */
+ HA_SPIN_LOCK(SERVER_LOCK, &sv->lock);
+ srv_shutdown_streams(sv, SF_ERR_KILLED);
+ HA_SPIN_UNLOCK(SERVER_LOCK, &sv->lock);
+ return 1;
+}
+
+/* register cli keywords */
+static struct cli_kw_list cli_kws = {{ },{
+ { { "show", "sess", NULL }, "show sess [<id>|all|susp|older <age>] : report the list of current sessions or dump this exact session", cli_parse_show_sess, cli_io_handler_dump_sess, cli_release_show_sess },
+ { { "shutdown", "session", NULL }, "shutdown session [id] : kill a specific session", cli_parse_shutdown_session, NULL, NULL },
+ { { "shutdown", "sessions", "server" }, "shutdown sessions server <bk>/<srv> : kill sessions on a server", cli_parse_shutdown_sessions_server, NULL, NULL },
+ {{},}
+}};
+
+INITCALL1(STG_REGISTER, cli_register_kw, &cli_kws);
+
+/* main configuration keyword registration. */
+static struct action_kw_list stream_tcp_req_keywords = { ILH, {
+ { "set-log-level", stream_parse_set_log_level },
+ { "set-nice", stream_parse_set_nice },
+ { "switch-mode", stream_parse_switch_mode },
+ { "use-service", stream_parse_use_service },
+ { /* END */ }
+}};
+
+INITCALL1(STG_REGISTER, tcp_req_cont_keywords_register, &stream_tcp_req_keywords);
+
+/* main configuration keyword registration. */
+static struct action_kw_list stream_tcp_res_keywords = { ILH, {
+ { "set-log-level", stream_parse_set_log_level },
+ { "set-nice", stream_parse_set_nice },
+ { /* END */ }
+}};
+
+INITCALL1(STG_REGISTER, tcp_res_cont_keywords_register, &stream_tcp_res_keywords);
+
+static struct action_kw_list stream_http_req_keywords = { ILH, {
+ { "set-log-level", stream_parse_set_log_level },
+ { "set-nice", stream_parse_set_nice },
+ { "use-service", stream_parse_use_service },
+ { /* END */ }
+}};
+
+INITCALL1(STG_REGISTER, http_req_keywords_register, &stream_http_req_keywords);
+
+static struct action_kw_list stream_http_res_keywords = { ILH, {
+ { "set-log-level", stream_parse_set_log_level },
+ { "set-nice", stream_parse_set_nice },
+ { /* END */ }
+}};
+
+INITCALL1(STG_REGISTER, http_res_keywords_register, &stream_http_res_keywords);
+
+static struct action_kw_list stream_http_after_res_actions = { ILH, {
+ { "set-log-level", stream_parse_set_log_level },
+ { /* END */ }
+}};
+
+INITCALL1(STG_REGISTER, http_after_res_keywords_register, &stream_http_after_res_actions);
+
+static int smp_fetch_cur_client_timeout(const struct arg *args, struct sample *smp, const char *km, void *private)
+{
+ smp->flags = SMP_F_VOL_TXN;
+ smp->data.type = SMP_T_SINT;
+ if (!smp->strm)
+ return 0;
+
+ smp->data.u.sint = TICKS_TO_MS(smp->strm->scf->ioto);
+ return 1;
+}
+
+static int smp_fetch_cur_server_timeout(const struct arg *args, struct sample *smp, const char *km, void *private)
+{
+ smp->flags = SMP_F_VOL_TXN;
+ smp->data.type = SMP_T_SINT;
+ if (!smp->strm)
+ return 0;
+
+ smp->data.u.sint = TICKS_TO_MS(smp->strm->scb->ioto);
+ return 1;
+}
+
+static int smp_fetch_cur_tunnel_timeout(const struct arg *args, struct sample *smp, const char *km, void *private)
+{
+ smp->flags = SMP_F_VOL_TXN;
+ smp->data.type = SMP_T_SINT;
+ if (!smp->strm)
+ return 0;
+
+ smp->data.u.sint = TICKS_TO_MS(smp->strm->tunnel_timeout);
+ return 1;
+}
+
+static int smp_fetch_last_rule_file(const struct arg *args, struct sample *smp, const char *km, void *private)
+{
+ smp->flags = SMP_F_VOL_TXN;
+ smp->data.type = SMP_T_STR;
+ if (!smp->strm || !smp->strm->last_rule_file)
+ return 0;
+
+ smp->flags |= SMP_F_CONST;
+ smp->data.u.str.area = (char *)smp->strm->last_rule_file;
+ smp->data.u.str.data = strlen(smp->strm->last_rule_file);
+ return 1;
+}
+
+static int smp_fetch_last_rule_line(const struct arg *args, struct sample *smp, const char *km, void *private)
+{
+ smp->flags = SMP_F_VOL_TXN;
+ smp->data.type = SMP_T_SINT;
+ if (!smp->strm || !smp->strm->last_rule_line)
+ return 0;
+
+ smp->data.u.sint = smp->strm->last_rule_line;
+ return 1;
+}
+
+static int smp_fetch_sess_term_state(const struct arg *args, struct sample *smp, const char *km, void *private)
+{
+ struct buffer *trash = get_trash_chunk();
+
+ smp->flags = SMP_F_VOLATILE;
+ smp->data.type = SMP_T_STR;
+ if (!smp->strm)
+ return 0;
+
+ trash->area[trash->data++] = sess_term_cond[(smp->strm->flags & SF_ERR_MASK) >> SF_ERR_SHIFT];
+ trash->area[trash->data++] = sess_fin_state[(smp->strm->flags & SF_FINST_MASK) >> SF_FINST_SHIFT];
+
+ smp->data.u.str = *trash;
+ smp->data.type = SMP_T_STR;
+ smp->flags &= ~SMP_F_CONST;
+ return 1;
+}
+
+static int smp_fetch_conn_retries(const struct arg *args, struct sample *smp, const char *km, void *private)
+{
+ smp->flags = SMP_F_VOL_TXN;
+ smp->data.type = SMP_T_SINT;
+ if (!smp->strm)
+ return 0;
+
+ if (!sc_state_in(smp->strm->scb->state, SC_SB_DIS|SC_SB_CLO))
+ smp->flags |= SMP_F_VOL_TEST;
+ smp->data.u.sint = smp->strm->conn_retries;
+ return 1;
+}
+
+static int smp_fetch_id32(const struct arg *args, struct sample *smp, const char *km, void *private)
+{
+ smp->flags = SMP_F_VOL_TXN;
+ smp->data.type = SMP_T_SINT;
+ if (!smp->strm)
+ return 0;
+ smp->data.u.sint = smp->strm->uniq_id;
+ return 1;
+}
+
+/* Note: must not be declared <const> as its list will be overwritten.
+ * Please take care of keeping this list alphabetically sorted.
+ */
+static struct sample_fetch_kw_list smp_kws = {ILH, {
+ { "cur_client_timeout", smp_fetch_cur_client_timeout, 0, NULL, SMP_T_SINT, SMP_USE_FTEND, },
+ { "cur_server_timeout", smp_fetch_cur_server_timeout, 0, NULL, SMP_T_SINT, SMP_USE_BKEND, },
+ { "cur_tunnel_timeout", smp_fetch_cur_tunnel_timeout, 0, NULL, SMP_T_SINT, SMP_USE_BKEND, },
+ { "last_rule_file", smp_fetch_last_rule_file, 0, NULL, SMP_T_STR, SMP_USE_INTRN, },
+ { "last_rule_line", smp_fetch_last_rule_line, 0, NULL, SMP_T_SINT, SMP_USE_INTRN, },
+ { "txn.conn_retries", smp_fetch_conn_retries, 0, NULL, SMP_T_SINT, SMP_USE_L4SRV, },
+ { "txn.id32", smp_fetch_id32, 0, NULL, SMP_T_SINT, SMP_USE_INTRN, },
+ { "txn.sess_term_state",smp_fetch_sess_term_state, 0, NULL, SMP_T_STR, SMP_USE_INTRN, },
+ { NULL, NULL, 0, 0, 0 },
+}};
+
+INITCALL1(STG_REGISTER, sample_register_fetches, &smp_kws);
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/src/task.c b/src/task.c
new file mode 100644
index 0000000..1ab5212
--- /dev/null
+++ b/src/task.c
@@ -0,0 +1,979 @@
+/*
+ * Task management functions.
+ *
+ * Copyright 2000-2009 Willy Tarreau <w@1wt.eu>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <string.h>
+
+#include <import/eb32tree.h>
+
+#include <haproxy/api.h>
+#include <haproxy/activity.h>
+#include <haproxy/cfgparse.h>
+#include <haproxy/clock.h>
+#include <haproxy/fd.h>
+#include <haproxy/list.h>
+#include <haproxy/pool.h>
+#include <haproxy/task.h>
+#include <haproxy/tools.h>
+
+extern struct task *process_stream(struct task *t, void *context, unsigned int state);
+extern void stream_update_timings(struct task *t, uint64_t lat, uint64_t cpu);
+
+DECLARE_POOL(pool_head_task, "task", sizeof(struct task));
+DECLARE_POOL(pool_head_tasklet, "tasklet", sizeof(struct tasklet));
+
+/* This is the memory pool containing all the signal structs. These
+ * struct are used to store each required signal between two tasks.
+ */
+DECLARE_POOL(pool_head_notification, "notification", sizeof(struct notification));
+
+/* The lock protecting all wait queues at once. For now we have no better
+ * alternative since a task may have to be removed from a queue and placed
+ * into another one. Storing the WQ index into the task doesn't seem to be
+ * sufficient either.
+ */
+__decl_aligned_rwlock(wq_lock);
+
+/* Flags the task <t> for immediate destruction and puts it into its first
+ * thread's shared tasklet list if not yet queued/running. This will bypass
+ * the priority scheduling and make the task show up as fast as possible in
+ * the other thread's queue. Note that this operation isn't idempotent and is
+ * not supposed to be run on the same task from multiple threads at once. It's
+ * the caller's responsibility to make sure it is the only one able to kill the
+ * task.
+ */
+void task_kill(struct task *t)
+{
+ unsigned int state = t->state;
+ unsigned int thr;
+
+ BUG_ON(state & TASK_KILLED);
+
+ while (1) {
+ while (state & (TASK_RUNNING | TASK_QUEUED)) {
+ /* task already in the queue and about to be executed,
+ * or even currently running. Just add the flag and be
+ * done with it, the process loop will detect it and kill
+ * it. The CAS will fail if we arrive too late.
+ */
+ if (_HA_ATOMIC_CAS(&t->state, &state, state | TASK_KILLED))
+ return;
+ }
+
+ /* We'll have to wake it up, but we must also secure it so that
+ * it doesn't vanish under us. TASK_QUEUED guarantees nobody will
+ * add past us.
+ */
+ if (_HA_ATOMIC_CAS(&t->state, &state, state | TASK_QUEUED | TASK_KILLED)) {
+ /* Bypass the tree and go directly into the shared tasklet list.
+ * Note: that's a task so it must be accounted for as such. Pick
+ * the task's first thread for the job.
+ */
+ thr = t->tid >= 0 ? t->tid : tid;
+
+ /* Beware: tasks that have never run don't have their ->list empty yet! */
+ MT_LIST_APPEND(&ha_thread_ctx[thr].shared_tasklet_list,
+ list_to_mt_list(&((struct tasklet *)t)->list));
+ _HA_ATOMIC_INC(&ha_thread_ctx[thr].rq_total);
+ _HA_ATOMIC_INC(&ha_thread_ctx[thr].tasks_in_list);
+ wake_thread(thr);
+ return;
+ }
+ }
+}
+
+/* Equivalent of task_kill for tasklets. Mark the tasklet <t> for destruction.
+ * It will be deleted on the next scheduler invocation. This function is
+ * thread-safe : a thread can kill a tasklet of another thread.
+ */
+void tasklet_kill(struct tasklet *t)
+{
+ unsigned int state = t->state;
+ unsigned int thr;
+
+ BUG_ON(state & TASK_KILLED);
+
+ while (1) {
+ while (state & (TASK_IN_LIST)) {
+ /* Tasklet already in the list ready to be executed. Add
+ * the killed flag and wait for the process loop to
+ * detect it.
+ */
+ if (_HA_ATOMIC_CAS(&t->state, &state, state | TASK_KILLED))
+ return;
+ }
+
+ /* Mark the tasklet as killed and wake the thread to process it
+ * as soon as possible.
+ */
+ if (_HA_ATOMIC_CAS(&t->state, &state, state | TASK_IN_LIST | TASK_KILLED)) {
+ thr = t->tid >= 0 ? t->tid : tid;
+ MT_LIST_APPEND(&ha_thread_ctx[thr].shared_tasklet_list,
+ list_to_mt_list(&t->list));
+ _HA_ATOMIC_INC(&ha_thread_ctx[thr].rq_total);
+ wake_thread(thr);
+ return;
+ }
+ }
+}
+
+/* Do not call this one, please use tasklet_wakeup_on() instead, as this one is
+ * the slow path of tasklet_wakeup_on() which performs some preliminary checks
+ * and sets TASK_IN_LIST before calling this one. A negative <thr> designates
+ * the current thread.
+ */
+void __tasklet_wakeup_on(struct tasklet *tl, int thr)
+{
+ if (likely(thr < 0)) {
+ /* this tasklet runs on the caller thread */
+ if (tl->state & TASK_HEAVY) {
+ LIST_APPEND(&th_ctx->tasklets[TL_HEAVY], &tl->list);
+ th_ctx->tl_class_mask |= 1 << TL_HEAVY;
+ }
+ else if (tl->state & TASK_SELF_WAKING) {
+ LIST_APPEND(&th_ctx->tasklets[TL_BULK], &tl->list);
+ th_ctx->tl_class_mask |= 1 << TL_BULK;
+ }
+ else if ((struct task *)tl == th_ctx->current) {
+ _HA_ATOMIC_OR(&tl->state, TASK_SELF_WAKING);
+ LIST_APPEND(&th_ctx->tasklets[TL_BULK], &tl->list);
+ th_ctx->tl_class_mask |= 1 << TL_BULK;
+ }
+ else if (th_ctx->current_queue < 0) {
+ LIST_APPEND(&th_ctx->tasklets[TL_URGENT], &tl->list);
+ th_ctx->tl_class_mask |= 1 << TL_URGENT;
+ }
+ else {
+ LIST_APPEND(&th_ctx->tasklets[th_ctx->current_queue], &tl->list);
+ th_ctx->tl_class_mask |= 1 << th_ctx->current_queue;
+ }
+ _HA_ATOMIC_INC(&th_ctx->rq_total);
+ } else {
+ /* this tasklet runs on a specific thread. */
+ MT_LIST_APPEND(&ha_thread_ctx[thr].shared_tasklet_list, list_to_mt_list(&tl->list));
+ _HA_ATOMIC_INC(&ha_thread_ctx[thr].rq_total);
+ wake_thread(thr);
+ }
+}
+
+/* Do not call this one, please use tasklet_wakeup_after_on() instead, as this one is
+ * the slow path of tasklet_wakeup_after() which performs some preliminary checks
+ * and sets TASK_IN_LIST before calling this one.
+ */
+struct list *__tasklet_wakeup_after(struct list *head, struct tasklet *tl)
+{
+ BUG_ON(tl->tid >= 0 && tid != tl->tid);
+ /* this tasklet runs on the caller thread */
+ if (!head) {
+ if (tl->state & TASK_HEAVY) {
+ LIST_INSERT(&th_ctx->tasklets[TL_HEAVY], &tl->list);
+ th_ctx->tl_class_mask |= 1 << TL_HEAVY;
+ }
+ else if (tl->state & TASK_SELF_WAKING) {
+ LIST_INSERT(&th_ctx->tasklets[TL_BULK], &tl->list);
+ th_ctx->tl_class_mask |= 1 << TL_BULK;
+ }
+ else if ((struct task *)tl == th_ctx->current) {
+ _HA_ATOMIC_OR(&tl->state, TASK_SELF_WAKING);
+ LIST_INSERT(&th_ctx->tasklets[TL_BULK], &tl->list);
+ th_ctx->tl_class_mask |= 1 << TL_BULK;
+ }
+ else if (th_ctx->current_queue < 0) {
+ LIST_INSERT(&th_ctx->tasklets[TL_URGENT], &tl->list);
+ th_ctx->tl_class_mask |= 1 << TL_URGENT;
+ }
+ else {
+ LIST_INSERT(&th_ctx->tasklets[th_ctx->current_queue], &tl->list);
+ th_ctx->tl_class_mask |= 1 << th_ctx->current_queue;
+ }
+ }
+ else {
+ LIST_APPEND(head, &tl->list);
+ }
+ _HA_ATOMIC_INC(&th_ctx->rq_total);
+ return &tl->list;
+}
+
+/* Puts the task <t> in run queue at a position depending on t->nice. <t> is
+ * returned. The nice value assigns boosts in 32th of the run queue size. A
+ * nice value of -1024 sets the task to -tasks_run_queue*32, while a nice value
+ * of 1024 sets the task to tasks_run_queue*32. The state flags are cleared, so
+ * the caller will have to set its flags after this call.
+ * The task must not already be in the run queue. If unsure, use the safer
+ * task_wakeup() function.
+ */
+void __task_wakeup(struct task *t)
+{
+ struct eb_root *root = &th_ctx->rqueue;
+ int thr __maybe_unused = t->tid >= 0 ? t->tid : tid;
+
+#ifdef USE_THREAD
+ if (thr != tid) {
+ root = &ha_thread_ctx[thr].rqueue_shared;
+
+ _HA_ATOMIC_INC(&ha_thread_ctx[thr].rq_total);
+ HA_SPIN_LOCK(TASK_RQ_LOCK, &ha_thread_ctx[thr].rqsh_lock);
+
+ t->rq.key = _HA_ATOMIC_ADD_FETCH(&ha_thread_ctx[thr].rqueue_ticks, 1);
+ __ha_barrier_store();
+ } else
+#endif
+ {
+ _HA_ATOMIC_INC(&th_ctx->rq_total);
+ t->rq.key = _HA_ATOMIC_ADD_FETCH(&th_ctx->rqueue_ticks, 1);
+ }
+
+ if (likely(t->nice)) {
+ int offset;
+
+ _HA_ATOMIC_INC(&tg_ctx->niced_tasks);
+ offset = t->nice * (int)global.tune.runqueue_depth;
+ t->rq.key += offset;
+ }
+
+ if (_HA_ATOMIC_LOAD(&th_ctx->flags) & TH_FL_TASK_PROFILING)
+ t->wake_date = now_mono_time();
+
+ eb32_insert(root, &t->rq);
+
+#ifdef USE_THREAD
+ if (thr != tid) {
+ HA_SPIN_UNLOCK(TASK_RQ_LOCK, &ha_thread_ctx[thr].rqsh_lock);
+
+ /* If all threads that are supposed to handle this task are sleeping,
+ * wake one.
+ */
+ wake_thread(thr);
+ }
+#endif
+ return;
+}
+
+/*
+ * __task_queue()
+ *
+ * Inserts a task into wait queue <wq> at the position given by its expiration
+ * date. It does not matter if the task was already in the wait queue or not,
+ * as it will be unlinked. The task MUST NOT have an infinite expiration timer.
+ * Last, tasks must not be queued further than the end of the tree, which is
+ * between <now_ms> and <now_ms> + 2^31 ms (now+24days in 32bit).
+ *
+ * This function should not be used directly, it is meant to be called by the
+ * inline version of task_queue() which performs a few cheap preliminary tests
+ * before deciding to call __task_queue(). Moreover this function doesn't care
+ * at all about locking so the caller must be careful when deciding whether to
+ * lock or not around this call.
+ */
+void __task_queue(struct task *task, struct eb_root *wq)
+{
+#ifdef USE_THREAD
+ BUG_ON((wq == &tg_ctx->timers && task->tid >= 0) ||
+ (wq == &th_ctx->timers && task->tid < 0) ||
+ (wq != &tg_ctx->timers && wq != &th_ctx->timers));
+#endif
+ /* if this happens the process is doomed anyway, so better catch it now
+ * so that we have the caller in the stack.
+ */
+ BUG_ON(task->expire == TICK_ETERNITY);
+
+ if (likely(task_in_wq(task)))
+ __task_unlink_wq(task);
+
+ /* the task is not in the queue now */
+ task->wq.key = task->expire;
+#ifdef DEBUG_CHECK_INVALID_EXPIRATION_DATES
+ if (tick_is_lt(task->wq.key, now_ms))
+ /* we're queuing too far away or in the past (most likely) */
+ return;
+#endif
+
+ eb32_insert(wq, &task->wq);
+}
+
+/*
+ * Extract all expired timers from the timer queue, and wakes up all
+ * associated tasks.
+ */
+void wake_expired_tasks()
+{
+ struct thread_ctx * const tt = th_ctx; // thread's tasks
+ int max_processed = global.tune.runqueue_depth;
+ struct task *task;
+ struct eb32_node *eb;
+ __decl_thread(int key);
+
+ while (1) {
+ if (max_processed-- <= 0)
+ goto leave;
+
+ eb = eb32_lookup_ge(&tt->timers, now_ms - TIMER_LOOK_BACK);
+ if (!eb) {
+ /* we might have reached the end of the tree, typically because
+ * <now_ms> is in the first half and we're first scanning the last
+ * half. Let's loop back to the beginning of the tree now.
+ */
+ eb = eb32_first(&tt->timers);
+ if (likely(!eb))
+ break;
+ }
+
+ /* It is possible that this task was left at an earlier place in the
+ * tree because a recent call to task_queue() has not moved it. This
+ * happens when the new expiration date is later than the old one.
+ * Since it is very unlikely that we reach a timeout anyway, it's a
+ * lot cheaper to proceed like this because we almost never update
+ * the tree. We may also find disabled expiration dates there. Since
+ * we have detached the task from the tree, we simply call task_queue
+ * to take care of this. Note that we might occasionally requeue it at
+ * the same place, before <eb>, so we have to check if this happens,
+ * and adjust <eb>, otherwise we may skip it which is not what we want.
+ * We may also not requeue the task (and not point eb at it) if its
+ * expiration time is not set. We also make sure we leave the real
+ * expiration date for the next task in the queue so that when calling
+ * next_timer_expiry() we're guaranteed to see the next real date and
+ * not the next apparent date. This is in order to avoid useless
+ * wakeups.
+ */
+
+ task = eb32_entry(eb, struct task, wq);
+ if (tick_is_expired(task->expire, now_ms)) {
+ /* expired task, wake it up */
+ __task_unlink_wq(task);
+ _task_wakeup(task, TASK_WOKEN_TIMER, 0);
+ }
+ else if (task->expire != eb->key) {
+ /* task is not expired but its key doesn't match so let's
+ * update it and skip to next apparently expired task.
+ */
+ __task_unlink_wq(task);
+ if (tick_isset(task->expire))
+ __task_queue(task, &tt->timers);
+ }
+ else {
+ /* task not expired and correctly placed. It may not be eternal. */
+ BUG_ON(task->expire == TICK_ETERNITY);
+ break;
+ }
+ }
+
+#ifdef USE_THREAD
+ if (eb_is_empty(&tg_ctx->timers))
+ goto leave;
+
+ HA_RWLOCK_RDLOCK(TASK_WQ_LOCK, &wq_lock);
+ eb = eb32_lookup_ge(&tg_ctx->timers, now_ms - TIMER_LOOK_BACK);
+ if (!eb) {
+ eb = eb32_first(&tg_ctx->timers);
+ if (likely(!eb)) {
+ HA_RWLOCK_RDUNLOCK(TASK_WQ_LOCK, &wq_lock);
+ goto leave;
+ }
+ }
+ key = eb->key;
+
+ if (tick_is_lt(now_ms, key)) {
+ HA_RWLOCK_RDUNLOCK(TASK_WQ_LOCK, &wq_lock);
+ goto leave;
+ }
+
+ /* There's really something of interest here, let's visit the queue */
+
+ if (HA_RWLOCK_TRYRDTOSK(TASK_WQ_LOCK, &wq_lock)) {
+ /* if we failed to grab the lock it means another thread is
+ * already doing the same here, so let it do the job.
+ */
+ HA_RWLOCK_RDUNLOCK(TASK_WQ_LOCK, &wq_lock);
+ goto leave;
+ }
+
+ while (1) {
+ lookup_next:
+ if (max_processed-- <= 0)
+ break;
+ eb = eb32_lookup_ge(&tg_ctx->timers, now_ms - TIMER_LOOK_BACK);
+ if (!eb) {
+ /* we might have reached the end of the tree, typically because
+ * <now_ms> is in the first half and we're first scanning the last
+ * half. Let's loop back to the beginning of the tree now.
+ */
+ eb = eb32_first(&tg_ctx->timers);
+ if (likely(!eb))
+ break;
+ }
+
+ task = eb32_entry(eb, struct task, wq);
+
+ /* Check for any competing run of the task (quite rare but may
+ * involve a dangerous concurrent access on task->expire). In
+ * order to protect against this, we'll take an exclusive access
+ * on TASK_RUNNING before checking/touching task->expire. If the
+ * task is already RUNNING on another thread, it will deal by
+ * itself with the requeuing so we must not do anything and
+ * simply quit the loop for now, because we cannot wait with the
+ * WQ lock held as this would prevent the running thread from
+ * requeuing the task. One annoying effect of holding RUNNING
+ * here is that a concurrent task_wakeup() will refrain from
+ * waking it up. This forces us to check for a wakeup after
+ * releasing the flag.
+ */
+ if (HA_ATOMIC_FETCH_OR(&task->state, TASK_RUNNING) & TASK_RUNNING)
+ break;
+
+ if (tick_is_expired(task->expire, now_ms)) {
+ /* expired task, wake it up */
+ HA_RWLOCK_SKTOWR(TASK_WQ_LOCK, &wq_lock);
+ __task_unlink_wq(task);
+ HA_RWLOCK_WRTOSK(TASK_WQ_LOCK, &wq_lock);
+ task_drop_running(task, TASK_WOKEN_TIMER);
+ }
+ else if (task->expire != eb->key) {
+ /* task is not expired but its key doesn't match so let's
+ * update it and skip to next apparently expired task.
+ */
+ HA_RWLOCK_SKTOWR(TASK_WQ_LOCK, &wq_lock);
+ __task_unlink_wq(task);
+ if (tick_isset(task->expire))
+ __task_queue(task, &tg_ctx->timers);
+ HA_RWLOCK_WRTOSK(TASK_WQ_LOCK, &wq_lock);
+ task_drop_running(task, 0);
+ goto lookup_next;
+ }
+ else {
+ /* task not expired and correctly placed. It may not be eternal. */
+ BUG_ON(task->expire == TICK_ETERNITY);
+ task_drop_running(task, 0);
+ break;
+ }
+ }
+
+ HA_RWLOCK_SKUNLOCK(TASK_WQ_LOCK, &wq_lock);
+#endif
+leave:
+ return;
+}
+
+/* Checks the next timer for the current thread by looking into its own timer
+ * list and the global one. It may return TICK_ETERNITY if no timer is present.
+ * Note that the next timer might very well be slightly in the past.
+ */
+int next_timer_expiry()
+{
+ struct thread_ctx * const tt = th_ctx; // thread's tasks
+ struct eb32_node *eb;
+ int ret = TICK_ETERNITY;
+ __decl_thread(int key = TICK_ETERNITY);
+
+ /* first check in the thread-local timers */
+ eb = eb32_lookup_ge(&tt->timers, now_ms - TIMER_LOOK_BACK);
+ if (!eb) {
+ /* we might have reached the end of the tree, typically because
+ * <now_ms> is in the first half and we're first scanning the last
+ * half. Let's loop back to the beginning of the tree now.
+ */
+ eb = eb32_first(&tt->timers);
+ }
+
+ if (eb)
+ ret = eb->key;
+
+#ifdef USE_THREAD
+ if (!eb_is_empty(&tg_ctx->timers)) {
+ HA_RWLOCK_RDLOCK(TASK_WQ_LOCK, &wq_lock);
+ eb = eb32_lookup_ge(&tg_ctx->timers, now_ms - TIMER_LOOK_BACK);
+ if (!eb)
+ eb = eb32_first(&tg_ctx->timers);
+ if (eb)
+ key = eb->key;
+ HA_RWLOCK_RDUNLOCK(TASK_WQ_LOCK, &wq_lock);
+ if (eb)
+ ret = tick_first(ret, key);
+ }
+#endif
+ return ret;
+}
+
+/* Walks over tasklet lists th_ctx->tasklets[0..TL_CLASSES-1] and run at most
+ * budget[TL_*] of them. Returns the number of entries effectively processed
+ * (tasks and tasklets merged). The count of tasks in the list for the current
+ * thread is adjusted.
+ */
+unsigned int run_tasks_from_lists(unsigned int budgets[])
+{
+ struct task *(*process)(struct task *t, void *ctx, unsigned int state);
+ struct list *tl_queues = th_ctx->tasklets;
+ struct task *t;
+ uint8_t budget_mask = (1 << TL_CLASSES) - 1;
+ struct sched_activity *profile_entry = NULL;
+ unsigned int done = 0;
+ unsigned int queue;
+ unsigned int state;
+ void *ctx;
+
+ for (queue = 0; queue < TL_CLASSES;) {
+ th_ctx->current_queue = queue;
+
+ /* global.tune.sched.low-latency is set */
+ if (global.tune.options & GTUNE_SCHED_LOW_LATENCY) {
+ if (unlikely(th_ctx->tl_class_mask & budget_mask & ((1 << queue) - 1))) {
+ /* a lower queue index has tasks again and still has a
+ * budget to run them. Let's switch to it now.
+ */
+ queue = (th_ctx->tl_class_mask & 1) ? 0 :
+ (th_ctx->tl_class_mask & 2) ? 1 : 2;
+ continue;
+ }
+
+ if (unlikely(queue > TL_URGENT &&
+ budget_mask & (1 << TL_URGENT) &&
+ !MT_LIST_ISEMPTY(&th_ctx->shared_tasklet_list))) {
+ /* an urgent tasklet arrived from another thread */
+ break;
+ }
+
+ if (unlikely(queue > TL_NORMAL &&
+ budget_mask & (1 << TL_NORMAL) &&
+ (!eb_is_empty(&th_ctx->rqueue) || !eb_is_empty(&th_ctx->rqueue_shared)))) {
+ /* a task was woken up by a bulk tasklet or another thread */
+ break;
+ }
+ }
+
+ if (LIST_ISEMPTY(&tl_queues[queue])) {
+ th_ctx->tl_class_mask &= ~(1 << queue);
+ queue++;
+ continue;
+ }
+
+ if (!budgets[queue]) {
+ budget_mask &= ~(1 << queue);
+ queue++;
+ continue;
+ }
+
+ budgets[queue]--;
+ activity[tid].ctxsw++;
+
+ t = (struct task *)LIST_ELEM(tl_queues[queue].n, struct tasklet *, list);
+ ctx = t->context;
+ process = t->process;
+ t->calls++;
+
+ th_ctx->sched_wake_date = t->wake_date;
+ if (th_ctx->sched_wake_date) {
+ uint32_t now_ns = now_mono_time();
+ uint32_t lat = now_ns - th_ctx->sched_wake_date;
+
+ t->wake_date = 0;
+ th_ctx->sched_call_date = now_ns;
+ profile_entry = sched_activity_entry(sched_activity, t->process, t->caller);
+ th_ctx->sched_profile_entry = profile_entry;
+ HA_ATOMIC_ADD(&profile_entry->lat_time, lat);
+ HA_ATOMIC_INC(&profile_entry->calls);
+ }
+ __ha_barrier_store();
+
+ th_ctx->current = t;
+ _HA_ATOMIC_AND(&th_ctx->flags, ~TH_FL_STUCK); // this thread is still running
+
+ _HA_ATOMIC_DEC(&th_ctx->rq_total);
+ LIST_DEL_INIT(&((struct tasklet *)t)->list);
+ __ha_barrier_store();
+
+ if (t->state & TASK_F_TASKLET) {
+ /* this is a tasklet */
+ state = _HA_ATOMIC_FETCH_AND(&t->state, TASK_PERSISTENT);
+ __ha_barrier_atomic_store();
+
+ if (likely(!(state & TASK_KILLED))) {
+ process(t, ctx, state);
+ }
+ else {
+ done++;
+ th_ctx->current = NULL;
+ pool_free(pool_head_tasklet, t);
+ __ha_barrier_store();
+ continue;
+ }
+ } else {
+ /* This is a regular task */
+
+ /* We must be the exclusive owner of the TASK_RUNNING bit, and
+ * have to be careful that the task is not being manipulated on
+ * another thread finding it expired in wake_expired_tasks().
+ * The TASK_RUNNING bit will be set during these operations,
+ * they are extremely rare and do not last long so the best to
+ * do here is to wait.
+ */
+ state = _HA_ATOMIC_LOAD(&t->state);
+ do {
+ while (unlikely(state & TASK_RUNNING)) {
+ __ha_cpu_relax();
+ state = _HA_ATOMIC_LOAD(&t->state);
+ }
+ } while (!_HA_ATOMIC_CAS(&t->state, &state, (state & TASK_PERSISTENT) | TASK_RUNNING));
+
+ __ha_barrier_atomic_store();
+
+ _HA_ATOMIC_DEC(&ha_thread_ctx[tid].tasks_in_list);
+
+ /* Note for below: if TASK_KILLED arrived before we've read the state, we
+ * directly free the task. Otherwise it will be seen after processing and
+ * it's freed on the exit path.
+ */
+ if (likely(!(state & TASK_KILLED) && process == process_stream))
+ t = process_stream(t, ctx, state);
+ else if (!(state & TASK_KILLED) && process != NULL)
+ t = process(t, ctx, state);
+ else {
+ task_unlink_wq(t);
+ __task_free(t);
+ th_ctx->current = NULL;
+ __ha_barrier_store();
+ /* We don't want max_processed to be decremented if
+ * we're just freeing a destroyed task, we should only
+ * do so if we really ran a task.
+ */
+ continue;
+ }
+
+ /* If there is a pending state we have to wake up the task
+ * immediately, else we defer it into wait queue
+ */
+ if (t != NULL) {
+ state = _HA_ATOMIC_LOAD(&t->state);
+ if (unlikely(state & TASK_KILLED)) {
+ task_unlink_wq(t);
+ __task_free(t);
+ }
+ else {
+ task_queue(t);
+ task_drop_running(t, 0);
+ }
+ }
+ }
+
+ th_ctx->current = NULL;
+ __ha_barrier_store();
+
+ /* stats are only registered for non-zero wake dates */
+ if (unlikely(th_ctx->sched_wake_date))
+ HA_ATOMIC_ADD(&profile_entry->cpu_time, (uint32_t)(now_mono_time() - th_ctx->sched_call_date));
+ done++;
+ }
+ th_ctx->current_queue = -1;
+
+ return done;
+}
+
+/* The run queue is chronologically sorted in a tree. An insertion counter is
+ * used to assign a position to each task. This counter may be combined with
+ * other variables (eg: nice value) to set the final position in the tree. The
+ * counter may wrap without a problem, of course. We then limit the number of
+ * tasks processed to 200 in any case, so that general latency remains low and
+ * so that task positions have a chance to be considered. The function scans
+ * both the global and local run queues and picks the most urgent task between
+ * the two. We need to grab the global runqueue lock to touch it so it's taken
+ * on the very first access to the global run queue and is released as soon as
+ * it reaches the end.
+ *
+ * The function adjusts <next> if a new event is closer.
+ */
+void process_runnable_tasks()
+{
+ struct thread_ctx * const tt = th_ctx;
+ struct eb32_node *lrq; // next local run queue entry
+ struct eb32_node *grq; // next global run queue entry
+ struct task *t;
+ const unsigned int default_weights[TL_CLASSES] = {
+ [TL_URGENT] = 64, // ~50% of CPU bandwidth for I/O
+ [TL_NORMAL] = 48, // ~37% of CPU bandwidth for tasks
+ [TL_BULK] = 16, // ~13% of CPU bandwidth for self-wakers
+ [TL_HEAVY] = 1, // never more than 1 heavy task at once
+ };
+ unsigned int max[TL_CLASSES]; // max to be run per class
+ unsigned int max_total; // sum of max above
+ struct mt_list *tmp_list;
+ unsigned int queue;
+ int max_processed;
+ int lpicked, gpicked;
+ int heavy_queued = 0;
+ int budget;
+
+ _HA_ATOMIC_AND(&th_ctx->flags, ~TH_FL_STUCK); // this thread is still running
+
+ if (!thread_has_tasks()) {
+ activity[tid].empty_rq++;
+ return;
+ }
+
+ max_processed = global.tune.runqueue_depth;
+
+ if (likely(tg_ctx->niced_tasks))
+ max_processed = (max_processed + 3) / 4;
+
+ if (max_processed < th_ctx->rq_total && th_ctx->rq_total <= 2*max_processed) {
+ /* If the run queue exceeds the budget by up to 50%, let's cut it
+ * into two identical halves to improve latency.
+ */
+ max_processed = th_ctx->rq_total / 2;
+ }
+
+ not_done_yet:
+ max[TL_URGENT] = max[TL_NORMAL] = max[TL_BULK] = 0;
+
+ /* urgent tasklets list gets a default weight of ~50% */
+ if ((tt->tl_class_mask & (1 << TL_URGENT)) ||
+ !MT_LIST_ISEMPTY(&tt->shared_tasklet_list))
+ max[TL_URGENT] = default_weights[TL_URGENT];
+
+ /* normal tasklets list gets a default weight of ~37% */
+ if ((tt->tl_class_mask & (1 << TL_NORMAL)) ||
+ !eb_is_empty(&th_ctx->rqueue) || !eb_is_empty(&th_ctx->rqueue_shared))
+ max[TL_NORMAL] = default_weights[TL_NORMAL];
+
+ /* bulk tasklets list gets a default weight of ~13% */
+ if ((tt->tl_class_mask & (1 << TL_BULK)))
+ max[TL_BULK] = default_weights[TL_BULK];
+
+ /* heavy tasks are processed only once and never refilled in a
+ * call round. That budget is not lost either as we don't reset
+ * it unless consumed.
+ */
+ if (!heavy_queued) {
+ if ((tt->tl_class_mask & (1 << TL_HEAVY)))
+ max[TL_HEAVY] = default_weights[TL_HEAVY];
+ else
+ max[TL_HEAVY] = 0;
+ heavy_queued = 1;
+ }
+
+ /* Now compute a fair share of the weights. Total may slightly exceed
+ * 100% due to rounding, this is not a problem. Note that while in
+ * theory the sum cannot be NULL as we cannot get there without tasklets
+ * to process, in practice it seldom happens when multiple writers
+ * conflict and rollback on MT_LIST_TRY_APPEND(shared_tasklet_list), causing
+ * a first MT_LIST_ISEMPTY() to succeed for thread_has_task() and the
+ * one above to finally fail. This is extremely rare and not a problem.
+ */
+ max_total = max[TL_URGENT] + max[TL_NORMAL] + max[TL_BULK] + max[TL_HEAVY];
+ if (!max_total)
+ goto leave;
+
+ for (queue = 0; queue < TL_CLASSES; queue++)
+ max[queue] = ((unsigned)max_processed * max[queue] + max_total - 1) / max_total;
+
+ /* The heavy queue must never process more than very few tasks at once
+ * anyway. We set the limit to 1 if running on low_latency scheduling,
+ * given that we know that other values can have an impact on latency
+ * (~500us end-to-end connection achieved at 130kcps in SSL), 1 + one
+ * per 1024 tasks if there is at least one non-heavy task while still
+ * respecting the ratios above, or 1 + one per 128 tasks if only heavy
+ * tasks are present. This allows to drain excess SSL handshakes more
+ * efficiently if the queue becomes congested.
+ */
+ if (max[TL_HEAVY] > 1) {
+ if (global.tune.options & GTUNE_SCHED_LOW_LATENCY)
+ budget = 1;
+ else if (tt->tl_class_mask & ~(1 << TL_HEAVY))
+ budget = 1 + tt->rq_total / 1024;
+ else
+ budget = 1 + tt->rq_total / 128;
+
+ if (max[TL_HEAVY] > budget)
+ max[TL_HEAVY] = budget;
+ }
+
+ lrq = grq = NULL;
+
+ /* pick up to max[TL_NORMAL] regular tasks from prio-ordered run queues */
+ /* Note: the grq lock is always held when grq is not null */
+ lpicked = gpicked = 0;
+ budget = max[TL_NORMAL] - tt->tasks_in_list;
+ while (lpicked + gpicked < budget) {
+ if (!eb_is_empty(&th_ctx->rqueue_shared) && !grq) {
+#ifdef USE_THREAD
+ HA_SPIN_LOCK(TASK_RQ_LOCK, &th_ctx->rqsh_lock);
+ grq = eb32_lookup_ge(&th_ctx->rqueue_shared, _HA_ATOMIC_LOAD(&tt->rqueue_ticks) - TIMER_LOOK_BACK);
+ if (unlikely(!grq)) {
+ grq = eb32_first(&th_ctx->rqueue_shared);
+ if (!grq)
+ HA_SPIN_UNLOCK(TASK_RQ_LOCK, &th_ctx->rqsh_lock);
+ }
+#endif
+ }
+
+ /* If a global task is available for this thread, it's in grq
+ * now and the global RQ is locked.
+ */
+
+ if (!lrq) {
+ lrq = eb32_lookup_ge(&tt->rqueue, _HA_ATOMIC_LOAD(&tt->rqueue_ticks) - TIMER_LOOK_BACK);
+ if (unlikely(!lrq))
+ lrq = eb32_first(&tt->rqueue);
+ }
+
+ if (!lrq && !grq)
+ break;
+
+ if (likely(!grq || (lrq && (int)(lrq->key - grq->key) <= 0))) {
+ t = eb32_entry(lrq, struct task, rq);
+ lrq = eb32_next(lrq);
+ eb32_delete(&t->rq);
+ lpicked++;
+ }
+#ifdef USE_THREAD
+ else {
+ t = eb32_entry(grq, struct task, rq);
+ grq = eb32_next(grq);
+ eb32_delete(&t->rq);
+
+ if (unlikely(!grq)) {
+ grq = eb32_first(&th_ctx->rqueue_shared);
+ if (!grq)
+ HA_SPIN_UNLOCK(TASK_RQ_LOCK, &th_ctx->rqsh_lock);
+ }
+ gpicked++;
+ }
+#endif
+ if (t->nice)
+ _HA_ATOMIC_DEC(&tg_ctx->niced_tasks);
+
+ /* Add it to the local task list */
+ LIST_APPEND(&tt->tasklets[TL_NORMAL], &((struct tasklet *)t)->list);
+ }
+
+ /* release the rqueue lock */
+ if (grq) {
+ HA_SPIN_UNLOCK(TASK_RQ_LOCK, &th_ctx->rqsh_lock);
+ grq = NULL;
+ }
+
+ if (lpicked + gpicked) {
+ tt->tl_class_mask |= 1 << TL_NORMAL;
+ _HA_ATOMIC_ADD(&tt->tasks_in_list, lpicked + gpicked);
+ activity[tid].tasksw += lpicked + gpicked;
+ }
+
+ /* Merge the list of tasklets waken up by other threads to the
+ * main list.
+ */
+ tmp_list = MT_LIST_BEHEAD(&tt->shared_tasklet_list);
+ if (tmp_list) {
+ LIST_SPLICE_END_DETACHED(&tt->tasklets[TL_URGENT], (struct list *)tmp_list);
+ if (!LIST_ISEMPTY(&tt->tasklets[TL_URGENT]))
+ tt->tl_class_mask |= 1 << TL_URGENT;
+ }
+
+ /* execute tasklets in each queue */
+ max_processed -= run_tasks_from_lists(max);
+
+ /* some tasks may have woken other ones up */
+ if (max_processed > 0 && thread_has_tasks())
+ goto not_done_yet;
+
+ leave:
+ if (tt->tl_class_mask)
+ activity[tid].long_rq++;
+}
+
+/*
+ * Delete every tasks before running the master polling loop
+ */
+void mworker_cleantasks()
+{
+ struct task *t;
+ int i;
+ struct eb32_node *tmp_wq = NULL;
+ struct eb32_node *tmp_rq = NULL;
+
+#ifdef USE_THREAD
+ /* cleanup the global run queue */
+ tmp_rq = eb32_first(&th_ctx->rqueue_shared);
+ while (tmp_rq) {
+ t = eb32_entry(tmp_rq, struct task, rq);
+ tmp_rq = eb32_next(tmp_rq);
+ task_destroy(t);
+ }
+ /* cleanup the timers queue */
+ tmp_wq = eb32_first(&tg_ctx->timers);
+ while (tmp_wq) {
+ t = eb32_entry(tmp_wq, struct task, wq);
+ tmp_wq = eb32_next(tmp_wq);
+ task_destroy(t);
+ }
+#endif
+ /* clean the per thread run queue */
+ for (i = 0; i < global.nbthread; i++) {
+ tmp_rq = eb32_first(&ha_thread_ctx[i].rqueue);
+ while (tmp_rq) {
+ t = eb32_entry(tmp_rq, struct task, rq);
+ tmp_rq = eb32_next(tmp_rq);
+ task_destroy(t);
+ }
+ /* cleanup the per thread timers queue */
+ tmp_wq = eb32_first(&ha_thread_ctx[i].timers);
+ while (tmp_wq) {
+ t = eb32_entry(tmp_wq, struct task, wq);
+ tmp_wq = eb32_next(tmp_wq);
+ task_destroy(t);
+ }
+ }
+}
+
+/* perform minimal initializations */
+static void init_task()
+{
+ int i, q;
+
+ for (i = 0; i < MAX_TGROUPS; i++)
+ memset(&ha_tgroup_ctx[i].timers, 0, sizeof(ha_tgroup_ctx[i].timers));
+
+ for (i = 0; i < MAX_THREADS; i++) {
+ for (q = 0; q < TL_CLASSES; q++)
+ LIST_INIT(&ha_thread_ctx[i].tasklets[q]);
+ MT_LIST_INIT(&ha_thread_ctx[i].shared_tasklet_list);
+ }
+}
+
+/* config parser for global "tune.sched.low-latency", accepts "on" or "off" */
+static int cfg_parse_tune_sched_low_latency(char **args, int section_type, struct proxy *curpx,
+ const struct proxy *defpx, const char *file, int line,
+ char **err)
+{
+ if (too_many_args(1, args, err, NULL))
+ return -1;
+
+ if (strcmp(args[1], "on") == 0)
+ global.tune.options |= GTUNE_SCHED_LOW_LATENCY;
+ else if (strcmp(args[1], "off") == 0)
+ global.tune.options &= ~GTUNE_SCHED_LOW_LATENCY;
+ else {
+ memprintf(err, "'%s' expects either 'on' or 'off' but got '%s'.", args[0], args[1]);
+ return -1;
+ }
+ return 0;
+}
+
+/* config keyword parsers */
+static struct cfg_kw_list cfg_kws = {ILH, {
+ { CFG_GLOBAL, "tune.sched.low-latency", cfg_parse_tune_sched_low_latency },
+ { 0, NULL, NULL }
+}};
+
+INITCALL1(STG_REGISTER, cfg_register_keywords, &cfg_kws);
+INITCALL0(STG_PREPARE, init_task);
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/src/tcp_act.c b/src/tcp_act.c
new file mode 100644
index 0000000..8b44047
--- /dev/null
+++ b/src/tcp_act.c
@@ -0,0 +1,749 @@
+/*
+ * AF_INET/AF_INET6 SOCK_STREAM protocol layer (tcp)
+ *
+ * Copyright 2000-2013 Willy Tarreau <w@1wt.eu>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <ctype.h>
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <time.h>
+
+#include <sys/param.h>
+#include <sys/socket.h>
+#include <sys/types.h>
+
+#include <netinet/tcp.h>
+#include <netinet/in.h>
+
+#include <haproxy/action-t.h>
+#include <haproxy/api.h>
+#include <haproxy/arg.h>
+#include <haproxy/channel.h>
+#include <haproxy/connection.h>
+#include <haproxy/global.h>
+#include <haproxy/http_rules.h>
+#include <haproxy/proto_tcp.h>
+#include <haproxy/proxy.h>
+#include <haproxy/sample.h>
+#include <haproxy/sc_strm.h>
+#include <haproxy/server.h>
+#include <haproxy/session.h>
+#include <haproxy/tcp_rules.h>
+#include <haproxy/tools.h>
+
+static enum act_return tcp_action_attach_srv(struct act_rule *rule, struct proxy *px,
+ struct session *sess, struct stream *s, int flags)
+{
+ struct server *srv = rule->arg.attach_srv.srv;
+ struct sample *name_smp;
+ struct connection *conn = objt_conn(sess->origin);
+ if (!conn)
+ return ACT_RET_ABRT;
+
+ conn_set_reverse(conn, &srv->obj_type);
+
+ if (rule->arg.attach_srv.name) {
+ name_smp = sample_fetch_as_type(sess->fe, sess, s,
+ SMP_OPT_DIR_REQ | SMP_OPT_FINAL,
+ rule->arg.attach_srv.name, SMP_T_STR);
+ /* TODO strdup du buffer du sample */
+ if (name_smp) {
+ struct buffer *buf = &name_smp->data.u.str;
+ char *area = malloc(b_data(buf));
+
+ if (!area)
+ return ACT_RET_ERR;
+
+ conn->reverse.name = b_make(area, b_data(buf), 0, 0);
+ b_ncat(&conn->reverse.name, buf, b_data(buf));
+ }
+ }
+
+ return ACT_RET_CONT;
+}
+
+/*
+ * Execute the "set-src" action. May be called from {tcp,http}request.
+ * It only changes the address and tries to preserve the original port. If the
+ * previous family was neither AF_INET nor AF_INET6, the port is set to zero.
+ */
+static enum act_return tcp_action_req_set_src(struct act_rule *rule, struct proxy *px,
+ struct session *sess, struct stream *s, int flags)
+{
+ struct connection *cli_conn;
+ struct sockaddr_storage *src;
+ struct sample *smp;
+
+ switch (rule->from) {
+ case ACT_F_TCP_REQ_CON:
+ cli_conn = objt_conn(sess->origin);
+ if (!cli_conn || !conn_get_src(cli_conn))
+ goto end;
+ src = cli_conn->src;
+ break;
+
+ case ACT_F_TCP_REQ_SES:
+ if (!sess_get_src(sess))
+ goto end;
+ src = sess->src;
+ break;
+
+ case ACT_F_TCP_REQ_CNT:
+ case ACT_F_HTTP_REQ:
+ if (!sc_get_src(s->scf))
+ goto end;
+ src = s->scf->src;
+ break;
+
+ default:
+ goto end;
+ }
+
+ smp = sample_fetch_as_type(px, sess, s, SMP_OPT_DIR_REQ|SMP_OPT_FINAL, rule->arg.expr, SMP_T_ADDR);
+ if (smp) {
+ int port = get_net_port(src);
+
+ if (smp->data.type == SMP_T_IPV4) {
+ ((struct sockaddr_in *)src)->sin_family = AF_INET;
+ ((struct sockaddr_in *)src)->sin_addr.s_addr = smp->data.u.ipv4.s_addr;
+ ((struct sockaddr_in *)src)->sin_port = port;
+ } else if (smp->data.type == SMP_T_IPV6) {
+ ((struct sockaddr_in6 *)src)->sin6_family = AF_INET6;
+ memcpy(&((struct sockaddr_in6 *)src)->sin6_addr, &smp->data.u.ipv6, sizeof(struct in6_addr));
+ ((struct sockaddr_in6 *)src)->sin6_port = port;
+ }
+ }
+
+ end:
+ return ACT_RET_CONT;
+}
+
+/*
+ * Execute the "set-dst" action. May be called from {tcp,http}request.
+ * It only changes the address and tries to preserve the original port. If the
+ * previous family was neither AF_INET nor AF_INET6, the port is set to zero.
+ */
+static enum act_return tcp_action_req_set_dst(struct act_rule *rule, struct proxy *px,
+ struct session *sess, struct stream *s, int flags)
+{
+ struct connection *cli_conn;
+ struct sockaddr_storage *dst;
+ struct sample *smp;
+
+ switch (rule->from) {
+ case ACT_F_TCP_REQ_CON:
+ cli_conn = objt_conn(sess->origin);
+ if (!cli_conn || !conn_get_dst(cli_conn))
+ goto end;
+ dst = cli_conn->dst;
+ break;
+
+ case ACT_F_TCP_REQ_SES:
+ if (!sess_get_dst(sess))
+ goto end;
+ dst = sess->dst;
+ break;
+
+ case ACT_F_TCP_REQ_CNT:
+ case ACT_F_HTTP_REQ:
+ if (!sc_get_dst(s->scf))
+ goto end;
+ dst = s->scf->dst;
+ break;
+
+ default:
+ goto end;
+ }
+
+ smp = sample_fetch_as_type(px, sess, s, SMP_OPT_DIR_REQ|SMP_OPT_FINAL, rule->arg.expr, SMP_T_ADDR);
+ if (smp) {
+ int port = get_net_port(dst);
+
+ if (smp->data.type == SMP_T_IPV4) {
+ ((struct sockaddr_in *)dst)->sin_family = AF_INET;
+ ((struct sockaddr_in *)dst)->sin_addr.s_addr = smp->data.u.ipv4.s_addr;
+ ((struct sockaddr_in *)dst)->sin_port = port;
+ } else if (smp->data.type == SMP_T_IPV6) {
+ ((struct sockaddr_in6 *)dst)->sin6_family = AF_INET6;
+ memcpy(&((struct sockaddr_in6 *)dst)->sin6_addr, &smp->data.u.ipv6, sizeof(struct in6_addr));
+ ((struct sockaddr_in6 *)dst)->sin6_port = port;
+ }
+ }
+
+ end:
+ return ACT_RET_CONT;
+}
+
+/*
+ * Execute the "set-src-port" action. May be called from {tcp,http}request.
+ * We must test the sin_family before setting the port. If the address family
+ * is neither AF_INET nor AF_INET6, the address is forced to AF_INET "0.0.0.0"
+ * and the port is assigned.
+ */
+static enum act_return tcp_action_req_set_src_port(struct act_rule *rule, struct proxy *px,
+ struct session *sess, struct stream *s, int flags)
+{
+ struct connection *cli_conn;
+ struct sockaddr_storage *src;
+ struct sample *smp;
+
+ switch (rule->from) {
+ case ACT_F_TCP_REQ_CON:
+ cli_conn = objt_conn(sess->origin);
+ if (!cli_conn || !conn_get_src(cli_conn))
+ goto end;
+ src = cli_conn->src;
+ break;
+
+ case ACT_F_TCP_REQ_SES:
+ if (!sess_get_src(sess))
+ goto end;
+ src = sess->src;
+ break;
+
+ case ACT_F_TCP_REQ_CNT:
+ case ACT_F_HTTP_REQ:
+ if (!sc_get_src(s->scf))
+ goto end;
+ src = s->scf->src;
+ break;
+
+ default:
+ goto end;
+ }
+
+ smp = sample_fetch_as_type(px, sess, s, SMP_OPT_DIR_REQ|SMP_OPT_FINAL, rule->arg.expr, SMP_T_SINT);
+ if (smp) {
+ if (src->ss_family == AF_INET6) {
+ ((struct sockaddr_in6 *)src)->sin6_port = htons(smp->data.u.sint);
+ } else {
+ if (src->ss_family != AF_INET) {
+ src->ss_family = AF_INET;
+ ((struct sockaddr_in *)src)->sin_addr.s_addr = 0;
+ }
+ ((struct sockaddr_in *)src)->sin_port = htons(smp->data.u.sint);
+ }
+ }
+
+ end:
+ return ACT_RET_CONT;
+}
+
+/*
+ * Execute the "set-dst-port" action. May be called from {tcp,http}request.
+ * We must test the sin_family before setting the port. If the address family
+ * is neither AF_INET nor AF_INET6, the address is forced to AF_INET "0.0.0.0"
+ * and the port is assigned.
+ */
+static enum act_return tcp_action_req_set_dst_port(struct act_rule *rule, struct proxy *px,
+ struct session *sess, struct stream *s, int flags)
+{
+ struct connection *cli_conn;
+ struct sockaddr_storage *dst;
+ struct sample *smp;
+
+ switch (rule->from) {
+ case ACT_F_TCP_REQ_CON:
+ cli_conn = objt_conn(sess->origin);
+ if (!cli_conn || !conn_get_dst(cli_conn))
+ goto end;
+ dst = cli_conn->dst;
+ break;
+
+ case ACT_F_TCP_REQ_SES:
+ if (!sess_get_dst(sess))
+ goto end;
+ dst = sess->dst;
+ break;
+
+ case ACT_F_TCP_REQ_CNT:
+ case ACT_F_HTTP_REQ:
+ if (!sc_get_dst(s->scf))
+ goto end;
+ dst = s->scf->dst;
+ break;
+
+ default:
+ goto end;
+ }
+
+ smp = sample_fetch_as_type(px, sess, s, SMP_OPT_DIR_REQ|SMP_OPT_FINAL, rule->arg.expr, SMP_T_SINT);
+ if (smp) {
+ if (dst->ss_family == AF_INET6) {
+ ((struct sockaddr_in6 *)dst)->sin6_port = htons(smp->data.u.sint);
+ } else {
+ if (dst->ss_family != AF_INET) {
+ dst->ss_family = AF_INET;
+ ((struct sockaddr_in *)dst)->sin_addr.s_addr = 0;
+ }
+ ((struct sockaddr_in *)dst)->sin_port = htons(smp->data.u.sint);
+ }
+ }
+
+ end:
+ return ACT_RET_CONT;
+}
+
+/* Executes the "silent-drop" action. May be called from {tcp,http}{request,response}.
+ * If rule->arg.act.p[0] is 0, TCP_REPAIR is tried first, with a fallback to
+ * sending a RST with TTL 1 towards the client. If it is [1-255], we will skip
+ * TCP_REPAIR and prepare the socket to send a RST with the requested TTL when
+ * the connection is killed by channel_abort().
+ */
+static enum act_return tcp_exec_action_silent_drop(struct act_rule *rule, struct proxy *px,
+ struct session *sess, struct stream *strm, int flags)
+{
+ struct connection *conn = objt_conn(sess->origin);
+ unsigned int ttl __maybe_unused = (uintptr_t)rule->arg.act.p[0];
+ char tcp_repair_enabled __maybe_unused;
+
+ if (ttl == 0) {
+ tcp_repair_enabled = 1;
+ ttl = 1;
+ } else {
+ tcp_repair_enabled = 0;
+ }
+
+ if (!conn)
+ goto out;
+
+ if (!conn_ctrl_ready(conn))
+ goto out;
+
+#ifdef TCP_QUICKACK
+ /* drain is needed only to send the quick ACK */
+ conn_ctrl_drain(conn);
+
+ /* re-enable quickack if it was disabled to ack all data and avoid
+ * retransmits from the client that might trigger a real reset.
+ */
+ setsockopt(conn->handle.fd, IPPROTO_TCP, TCP_QUICKACK, &one, sizeof(one));
+#endif
+ /* lingering must absolutely be disabled so that we don't send a
+ * shutdown(), this is critical to the TCP_REPAIR trick. When no stream
+ * is present, returning with ERR will cause lingering to be disabled.
+ */
+ if (strm)
+ strm->scf->flags |= SC_FL_NOLINGER;
+
+ if (conn->flags & CO_FL_FDLESS)
+ goto out;
+
+ /* We're on the client-facing side, we must force to disable lingering to
+ * ensure we will use an RST exclusively and kill any pending data.
+ */
+ HA_ATOMIC_OR(&fdtab[conn->handle.fd].state, FD_LINGER_RISK);
+
+#ifdef TCP_REPAIR
+ /* try to put socket in repair mode if sending a RST was not requested by
+ * config. this often fails due to missing permissions (CAP_NET_ADMIN capability)
+ */
+ if (tcp_repair_enabled && (setsockopt(conn->handle.fd, IPPROTO_TCP, TCP_REPAIR, &one, sizeof(one)) == 0)) {
+ /* socket will be quiet now */
+ goto out;
+ }
+#endif
+
+ /* Either TCP_REPAIR is not defined, it failed (eg: permissions), or was
+ * not executed because a RST with a specific TTL was requested to be sent.
+ * Set the TTL of the client connection before the connection is killed
+ * by channel_abort and a RST packet will be emitted by the TCP/IP stack.
+ */
+#ifdef IP_TTL
+ if (conn->src && conn->src->ss_family == AF_INET)
+ setsockopt(conn->handle.fd, IPPROTO_IP, IP_TTL, &ttl, sizeof(ttl));
+#endif
+#ifdef IPV6_UNICAST_HOPS
+ if (conn->src && conn->src->ss_family == AF_INET6)
+ setsockopt(conn->handle.fd, IPPROTO_IPV6, IPV6_UNICAST_HOPS, &ttl, sizeof(ttl));
+#endif
+ out:
+ /* kill the stream if any */
+ if (strm) {
+ stream_abort(strm);
+ strm->req.analysers &= AN_REQ_FLT_END;
+ strm->res.analysers &= AN_RES_FLT_END;
+ if (strm->flags & SF_BE_ASSIGNED)
+ _HA_ATOMIC_INC(&strm->be->be_counters.denied_req);
+ if (!(strm->flags & SF_ERR_MASK))
+ strm->flags |= SF_ERR_PRXCOND;
+ if (!(strm->flags & SF_FINST_MASK))
+ strm->flags |= SF_FINST_R;
+ }
+
+ _HA_ATOMIC_INC(&sess->fe->fe_counters.denied_req);
+ if (sess->listener && sess->listener->counters)
+ _HA_ATOMIC_INC(&sess->listener->counters->denied_req);
+
+ return ACT_RET_ABRT;
+}
+
+
+#if defined(SO_MARK) || defined(SO_USER_COOKIE) || defined(SO_RTABLE)
+static enum act_return tcp_action_set_mark(struct act_rule *rule, struct proxy *px,
+ struct session *sess, struct stream *s, int flags)
+{
+ conn_set_mark(objt_conn(sess->origin), (uintptr_t)rule->arg.act.p[0]);
+ return ACT_RET_CONT;
+}
+#endif
+
+#ifdef IP_TOS
+static enum act_return tcp_action_set_tos(struct act_rule *rule, struct proxy *px,
+ struct session *sess, struct stream *s, int flags)
+{
+ conn_set_tos(objt_conn(sess->origin), (uintptr_t)rule->arg.act.p[0]);
+ return ACT_RET_CONT;
+}
+#endif
+
+/*
+ * Release the sample expr when releasing attach-srv action
+ */
+static void release_attach_srv_action(struct act_rule *rule)
+{
+ ha_free(&rule->arg.attach_srv.srvname);
+ release_sample_expr(rule->arg.attach_srv.name);
+}
+
+/*
+ * Release the sample expr when releasing a set src/dst action
+ */
+static void release_set_src_dst_action(struct act_rule *rule)
+{
+ release_sample_expr(rule->arg.expr);
+}
+
+static int tcp_check_attach_srv(struct act_rule *rule, struct proxy *px, char **err)
+{
+ struct proxy *be = NULL;
+ struct server *srv = NULL;
+ char *name = rule->arg.attach_srv.srvname;
+ struct ist be_name, sv_name;
+
+ if (px->mode != PR_MODE_HTTP) {
+ memprintf(err, "attach-srv rule requires HTTP proxy mode");
+ return 0;
+ }
+
+ sv_name = ist(name);
+ be_name = istsplit(&sv_name, '/');
+ if (!istlen(sv_name)) {
+ memprintf(err, "attach-srv rule: invalid server name '%s'", name);
+ return 0;
+ }
+
+ if (!(be = proxy_be_by_name(ist0(be_name)))) {
+ memprintf(err, "attach-srv rule: no such backend '%s/%s'", ist0(be_name), ist0(sv_name));
+ return 0;
+ }
+ if (!(srv = server_find_by_name(be, ist0(sv_name)))) {
+ memprintf(err, "attach-srv rule: no such server '%s/%s'", ist0(be_name), ist0(sv_name));
+ return 0;
+ }
+
+ if ((rule->arg.attach_srv.name && (!srv->use_ssl || !srv->sni_expr)) ||
+ (!rule->arg.attach_srv.name && srv->use_ssl && srv->sni_expr)) {
+ memprintf(err, "attach-srv rule: connection will never be used; either specify name argument in conjunction with defined SSL SNI on targeted server or none of these");
+ return 0;
+ }
+
+ rule->arg.attach_srv.srv = srv;
+
+ return 1;
+}
+
+static enum act_parse_ret tcp_parse_attach_srv(const char **args, int *cur_arg, struct proxy *px,
+ struct act_rule *rule, char **err)
+{
+ char *srvname;
+ struct sample_expr *expr;
+
+ /* TODO duplicated code from check_kw_experimental() */
+ if (!experimental_directives_allowed) {
+ memprintf(err, "parsing [%s:%d] : '%s' action is experimental, must be allowed via a global 'expose-experimental-directives'",
+ px->conf.args.file, px->conf.args.line, args[2]);
+ return ACT_RET_PRS_ERR;
+ }
+ mark_tainted(TAINTED_CONFIG_EXP_KW_DECLARED);
+
+ rule->action = ACT_CUSTOM;
+ rule->action_ptr = tcp_action_attach_srv;
+ rule->release_ptr = release_attach_srv_action;
+ rule->check_ptr = tcp_check_attach_srv;
+ rule->arg.attach_srv.srvname = NULL;
+ rule->arg.attach_srv.name = NULL;
+
+ srvname = my_strndup(args[*cur_arg], strlen(args[*cur_arg]));
+ if (!srvname)
+ goto err;
+ rule->arg.attach_srv.srvname = srvname;
+
+ ++(*cur_arg);
+
+ if (strcmp(args[*cur_arg], "name") == 0) {
+ if (!*args[*cur_arg + 1]) {
+ memprintf(err, "missing name value");
+ return ACT_RET_PRS_ERR;
+ }
+ ++(*cur_arg);
+
+ expr = sample_parse_expr((char **)args, cur_arg, px->conf.args.file, px->conf.args.line,
+ err, &px->conf.args, NULL);
+ if (!expr)
+ return ACT_RET_PRS_ERR;
+
+ rule->arg.attach_srv.name = expr;
+ rule->release_ptr = release_attach_srv_action;
+ }
+
+ return ACT_RET_PRS_OK;
+
+ err:
+ ha_free(&rule->arg.attach_srv.srvname);
+ release_sample_expr(rule->arg.attach_srv.name);
+ return ACT_RET_PRS_ERR;
+}
+
+/* parse "set-{src,dst}[-port]" action */
+static enum act_parse_ret tcp_parse_set_src_dst(const char **args, int *orig_arg, struct proxy *px,
+ struct act_rule *rule, char **err)
+{
+ int cur_arg;
+ struct sample_expr *expr;
+ unsigned int where;
+
+ cur_arg = *orig_arg;
+ expr = sample_parse_expr((char **)args, &cur_arg, px->conf.args.file, px->conf.args.line, err, &px->conf.args, NULL);
+ if (!expr)
+ return ACT_RET_PRS_ERR;
+
+ where = 0;
+ if (px->cap & PR_CAP_FE)
+ where |= SMP_VAL_FE_HRQ_HDR;
+ if (px->cap & PR_CAP_BE)
+ where |= SMP_VAL_BE_HRQ_HDR;
+
+ if (!(expr->fetch->val & where)) {
+ memprintf(err,
+ "fetch method '%s' extracts information from '%s', none of which is available here",
+ args[cur_arg-1], sample_src_names(expr->fetch->use));
+ free(expr);
+ return ACT_RET_PRS_ERR;
+ }
+ rule->arg.expr = expr;
+ rule->action = ACT_CUSTOM;
+
+ if (strcmp(args[*orig_arg - 1], "set-src") == 0) {
+ rule->action_ptr = tcp_action_req_set_src;
+ } else if (strcmp(args[*orig_arg - 1], "set-src-port") == 0) {
+ rule->action_ptr = tcp_action_req_set_src_port;
+ } else if (strcmp(args[*orig_arg - 1], "set-dst") == 0) {
+ rule->action_ptr = tcp_action_req_set_dst;
+ } else if (strcmp(args[*orig_arg - 1], "set-dst-port") == 0) {
+ rule->action_ptr = tcp_action_req_set_dst_port;
+ } else {
+ return ACT_RET_PRS_ERR;
+ }
+
+ rule->release_ptr = release_set_src_dst_action;
+ (*orig_arg)++;
+
+ return ACT_RET_PRS_OK;
+}
+
+
+/* Parse a "set-mark" action. It takes the MARK value as argument. It returns
+ * ACT_RET_PRS_OK on success, ACT_RET_PRS_ERR on error.
+ */
+static enum act_parse_ret tcp_parse_set_mark(const char **args, int *cur_arg, struct proxy *px,
+ struct act_rule *rule, char **err)
+{
+#if defined(SO_MARK) || defined(SO_USER_COOKIE) || defined(SO_RTABLE)
+ char *endp;
+ unsigned int mark;
+
+ if (!*args[*cur_arg]) {
+ memprintf(err, "expects exactly 1 argument (integer/hex value)");
+ return ACT_RET_PRS_ERR;
+ }
+ mark = strtoul(args[*cur_arg], &endp, 0);
+ if (endp && *endp != '\0') {
+ memprintf(err, "invalid character starting at '%s' (integer/hex value expected)", endp);
+ return ACT_RET_PRS_ERR;
+ }
+
+ (*cur_arg)++;
+
+ /* Register processing function. */
+ rule->action_ptr = tcp_action_set_mark;
+ rule->action = ACT_CUSTOM;
+ rule->arg.act.p[0] = (void *)(uintptr_t)mark;
+ global.last_checks |= LSTCHK_NETADM;
+ return ACT_RET_PRS_OK;
+#else
+ memprintf(err, "not supported on this platform (SO_MARK|SO_USER_COOKIE|SO_RTABLE undefined)");
+ return ACT_RET_PRS_ERR;
+#endif
+}
+
+
+/* Parse a "set-tos" action. It takes the TOS value as argument. It returns
+ * ACT_RET_PRS_OK on success, ACT_RET_PRS_ERR on error.
+ */
+static enum act_parse_ret tcp_parse_set_tos(const char **args, int *cur_arg, struct proxy *px,
+ struct act_rule *rule, char **err)
+{
+#ifdef IP_TOS
+ char *endp;
+ int tos;
+
+ if (!*args[*cur_arg]) {
+ memprintf(err, "expects exactly 1 argument (integer/hex value)");
+ return ACT_RET_PRS_ERR;
+ }
+ tos = strtol(args[*cur_arg], &endp, 0);
+ if (endp && *endp != '\0') {
+ memprintf(err, "invalid character starting at '%s' (integer/hex value expected)", endp);
+ return ACT_RET_PRS_ERR;
+ }
+
+ (*cur_arg)++;
+
+ /* Register processing function. */
+ rule->action_ptr = tcp_action_set_tos;
+ rule->action = ACT_CUSTOM;
+ rule->arg.act.p[0] = (void *)(uintptr_t)tos;
+ return ACT_RET_PRS_OK;
+#else
+ memprintf(err, "not supported on this platform (IP_TOS undefined)");
+ return ACT_RET_PRS_ERR;
+#endif
+}
+
+/* Parse a "silent-drop" action. It may take 2 optional arguments to define a
+ * "rst-ttl" parameter. It returns ACT_RET_PRS_OK on success, ACT_RET_PRS_ERR
+ * on error.
+ */
+static enum act_parse_ret tcp_parse_silent_drop(const char **args, int *cur_arg, struct proxy *px,
+ struct act_rule *rule, char **err)
+{
+ unsigned int rst_ttl = 0;
+ char *endp;
+
+ rule->action = ACT_CUSTOM;
+ rule->action_ptr = tcp_exec_action_silent_drop;
+
+ if (strcmp(args[*cur_arg], "rst-ttl") == 0) {
+ if (!*args[*cur_arg + 1]) {
+ memprintf(err, "missing rst-ttl value\n");
+ return ACT_RET_PRS_ERR;
+ }
+
+ rst_ttl = (unsigned int)strtoul(args[*cur_arg + 1], &endp, 0);
+
+ if (endp && *endp != '\0') {
+ memprintf(err, "invalid character starting at '%s' (value 1-255 expected)\n",
+ endp);
+ return ACT_RET_PRS_ERR;
+ }
+ if ((rst_ttl == 0) || (rst_ttl > 255) ) {
+ memprintf(err, "valid rst-ttl values are [1-255]\n");
+ return ACT_RET_PRS_ERR;
+ }
+
+ *cur_arg += 2;
+ }
+
+ rule->arg.act.p[0] = (void *)(uintptr_t)rst_ttl;
+ return ACT_RET_PRS_OK;
+}
+
+
+static struct action_kw_list tcp_req_conn_actions = {ILH, {
+ { "set-dst" , tcp_parse_set_src_dst },
+ { "set-dst-port", tcp_parse_set_src_dst },
+ { "set-mark", tcp_parse_set_mark },
+ { "set-src", tcp_parse_set_src_dst },
+ { "set-src-port", tcp_parse_set_src_dst },
+ { "set-tos", tcp_parse_set_tos },
+ { "silent-drop", tcp_parse_silent_drop },
+ { /* END */ }
+}};
+
+INITCALL1(STG_REGISTER, tcp_req_conn_keywords_register, &tcp_req_conn_actions);
+
+static struct action_kw_list tcp_req_sess_actions = {ILH, {
+ { "attach-srv" , tcp_parse_attach_srv },
+ { "set-dst" , tcp_parse_set_src_dst },
+ { "set-dst-port", tcp_parse_set_src_dst },
+ { "set-mark", tcp_parse_set_mark },
+ { "set-src", tcp_parse_set_src_dst },
+ { "set-src-port", tcp_parse_set_src_dst },
+ { "set-tos", tcp_parse_set_tos },
+ { "silent-drop", tcp_parse_silent_drop },
+ { /* END */ }
+}};
+
+INITCALL1(STG_REGISTER, tcp_req_sess_keywords_register, &tcp_req_sess_actions);
+
+static struct action_kw_list tcp_req_cont_actions = {ILH, {
+ { "set-src", tcp_parse_set_src_dst },
+ { "set-src-port", tcp_parse_set_src_dst },
+ { "set-dst" , tcp_parse_set_src_dst },
+ { "set-dst-port", tcp_parse_set_src_dst },
+ { "set-mark", tcp_parse_set_mark },
+ { "set-tos", tcp_parse_set_tos },
+ { "silent-drop", tcp_parse_silent_drop },
+ { /* END */ }
+}};
+
+INITCALL1(STG_REGISTER, tcp_req_cont_keywords_register, &tcp_req_cont_actions);
+
+static struct action_kw_list tcp_res_cont_actions = {ILH, {
+ { "set-mark", tcp_parse_set_mark },
+ { "set-tos", tcp_parse_set_tos },
+ { "silent-drop", tcp_parse_silent_drop },
+ { /* END */ }
+}};
+
+INITCALL1(STG_REGISTER, tcp_res_cont_keywords_register, &tcp_res_cont_actions);
+
+static struct action_kw_list http_req_actions = {ILH, {
+ { "set-dst", tcp_parse_set_src_dst },
+ { "set-dst-port", tcp_parse_set_src_dst },
+ { "set-mark", tcp_parse_set_mark },
+ { "set-src", tcp_parse_set_src_dst },
+ { "set-src-port", tcp_parse_set_src_dst },
+ { "set-tos", tcp_parse_set_tos },
+ { "silent-drop", tcp_parse_silent_drop },
+ { /* END */ }
+}};
+
+INITCALL1(STG_REGISTER, http_req_keywords_register, &http_req_actions);
+
+static struct action_kw_list http_res_actions = {ILH, {
+ { "set-mark", tcp_parse_set_mark },
+ { "set-tos", tcp_parse_set_tos },
+ { "silent-drop", tcp_parse_silent_drop },
+ { /* END */ }
+}};
+
+INITCALL1(STG_REGISTER, http_res_keywords_register, &http_res_actions);
+
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/src/tcp_rules.c b/src/tcp_rules.c
new file mode 100644
index 0000000..9ce6c90
--- /dev/null
+++ b/src/tcp_rules.c
@@ -0,0 +1,1428 @@
+/*
+ * "tcp" rules processing
+ *
+ * Copyright 2000-2016 Willy Tarreau <w@1wt.eu>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+#include <haproxy/acl.h>
+#include <haproxy/action.h>
+#include <haproxy/api.h>
+#include <haproxy/arg-t.h>
+#include <haproxy/capture-t.h>
+#include <haproxy/cfgparse.h>
+#include <haproxy/channel.h>
+#include <haproxy/connection.h>
+#include <haproxy/global.h>
+#include <haproxy/list.h>
+#include <haproxy/log.h>
+#include <haproxy/proxy.h>
+#include <haproxy/sample.h>
+#include <haproxy/sc_strm.h>
+#include <haproxy/stconn.h>
+#include <haproxy/stick_table.h>
+#include <haproxy/stream-t.h>
+#include <haproxy/tcp_rules.h>
+#include <haproxy/ticks.h>
+#include <haproxy/tools.h>
+#include <haproxy/trace.h>
+
+
+#define TRACE_SOURCE &trace_strm
+
+/* List head of all known action keywords for "tcp-request connection" */
+struct list tcp_req_conn_keywords = LIST_HEAD_INIT(tcp_req_conn_keywords);
+struct list tcp_req_sess_keywords = LIST_HEAD_INIT(tcp_req_sess_keywords);
+struct list tcp_req_cont_keywords = LIST_HEAD_INIT(tcp_req_cont_keywords);
+struct list tcp_res_cont_keywords = LIST_HEAD_INIT(tcp_res_cont_keywords);
+
+/*
+ * Register keywords.
+ */
+void tcp_req_conn_keywords_register(struct action_kw_list *kw_list)
+{
+ LIST_APPEND(&tcp_req_conn_keywords, &kw_list->list);
+}
+
+void tcp_req_sess_keywords_register(struct action_kw_list *kw_list)
+{
+ LIST_APPEND(&tcp_req_sess_keywords, &kw_list->list);
+}
+
+void tcp_req_cont_keywords_register(struct action_kw_list *kw_list)
+{
+ LIST_APPEND(&tcp_req_cont_keywords, &kw_list->list);
+}
+
+void tcp_res_cont_keywords_register(struct action_kw_list *kw_list)
+{
+ LIST_APPEND(&tcp_res_cont_keywords, &kw_list->list);
+}
+
+/*
+ * Return the struct tcp_req_action_kw associated to a keyword.
+ */
+struct action_kw *tcp_req_conn_action(const char *kw)
+{
+ return action_lookup(&tcp_req_conn_keywords, kw);
+}
+
+struct action_kw *tcp_req_sess_action(const char *kw)
+{
+ return action_lookup(&tcp_req_sess_keywords, kw);
+}
+
+struct action_kw *tcp_req_cont_action(const char *kw)
+{
+ return action_lookup(&tcp_req_cont_keywords, kw);
+}
+
+struct action_kw *tcp_res_cont_action(const char *kw)
+{
+ return action_lookup(&tcp_res_cont_keywords, kw);
+}
+
+/* This function performs the TCP request analysis on the current request. It
+ * returns 1 if the processing can continue on next analysers, or zero if it
+ * needs more data, encounters an error, or wants to immediately abort the
+ * request. It relies on buffers flags, and updates s->req->analysers. The
+ * function may be called for frontend rules and backend rules. It only relies
+ * on the backend pointer so this works for both cases.
+ */
+int tcp_inspect_request(struct stream *s, struct channel *req, int an_bit)
+{
+ struct list *def_rules, *rules;
+ struct session *sess = s->sess;
+ struct act_rule *rule;
+ int partial;
+ int act_opts = 0;
+
+ DBG_TRACE_ENTER(STRM_EV_STRM_ANA|STRM_EV_TCP_ANA, s);
+
+ def_rules = ((s->be->defpx &&
+ (sess->fe->mode == PR_MODE_TCP || sess->fe->mode == PR_MODE_HTTP) &&
+ (an_bit == AN_REQ_INSPECT_FE || s->be->defpx != sess->fe->defpx)) ? &s->be->defpx->tcp_req.inspect_rules : NULL);
+ rules = &s->be->tcp_req.inspect_rules;
+
+ /* We don't know whether we have enough data, so must proceed
+ * this way :
+ * - iterate through all rules in their declaration order
+ * - if one rule returns MISS, it means the inspect delay is
+ * not over yet, then return immediately, otherwise consider
+ * it as a non-match.
+ * - if one rule returns OK, then return OK
+ * - if one rule returns KO, then return KO
+ */
+
+ if ((s->scf->flags & (SC_FL_EOS|SC_FL_ABRT_DONE)) || channel_full(req, global.tune.maxrewrite) ||
+ sc_waiting_room(s->scf) ||
+ !s->be->tcp_req.inspect_delay || tick_is_expired(s->rules_exp, now_ms)) {
+ partial = SMP_OPT_FINAL;
+ /* Action may yield while the inspect_delay is not expired and there is no read error */
+ if ((s->scf->flags & SC_FL_ERROR) || !s->be->tcp_req.inspect_delay || tick_is_expired(s->rules_exp, now_ms))
+ act_opts |= ACT_OPT_FINAL;
+ }
+ else
+ partial = 0;
+
+ /* If "the current_rule_list" match the executed rule list, we are in
+ * resume condition. If a resume is needed it is always in the action
+ * and never in the ACL or converters. In this case, we initialise the
+ * current rule, and go to the action execution point.
+ */
+ if (s->current_rule) {
+ rule = s->current_rule;
+ s->current_rule = NULL;
+ if ((def_rules && s->current_rule_list == def_rules) || s->current_rule_list == rules)
+ goto resume_execution;
+ }
+ s->current_rule_list = ((!def_rules || s->current_rule_list == def_rules) ? rules : def_rules);
+
+ restart:
+ list_for_each_entry(rule, s->current_rule_list, list) {
+ enum acl_test_res ret = ACL_TEST_PASS;
+
+ if (rule->cond) {
+ ret = acl_exec_cond(rule->cond, s->be, sess, s, SMP_OPT_DIR_REQ | partial);
+ if (ret == ACL_TEST_MISS)
+ goto missing_data;
+
+ ret = acl_pass(ret);
+ if (rule->cond->pol == ACL_COND_UNLESS)
+ ret = !ret;
+ }
+
+ if (ret) {
+ act_opts |= ACT_OPT_FIRST;
+resume_execution:
+ /* Always call the action function if defined */
+ if (rule->action_ptr) {
+ switch (rule->action_ptr(rule, s->be, s->sess, s, act_opts)) {
+ case ACT_RET_CONT:
+ break;
+ case ACT_RET_STOP:
+ case ACT_RET_DONE:
+ s->last_rule_file = rule->conf.file;
+ s->last_rule_line = rule->conf.line;
+ goto end;
+ case ACT_RET_YIELD:
+ s->current_rule = rule;
+ if (act_opts & ACT_OPT_FINAL) {
+ send_log(s->be, LOG_WARNING,
+ "Internal error: yield not allowed if the inspect-delay expired "
+ "for the tcp-request content actions.");
+ goto internal;
+ }
+ goto missing_data;
+ case ACT_RET_DENY:
+ s->last_rule_file = rule->conf.file;
+ s->last_rule_line = rule->conf.line;
+ goto deny;
+ case ACT_RET_ABRT:
+ s->last_rule_file = rule->conf.file;
+ s->last_rule_line = rule->conf.line;
+ goto abort;
+ case ACT_RET_ERR:
+ s->last_rule_file = rule->conf.file;
+ s->last_rule_line = rule->conf.line;
+ goto internal;
+ case ACT_RET_INV:
+ s->last_rule_file = rule->conf.file;
+ s->last_rule_line = rule->conf.line;
+ goto invalid;
+ }
+ continue; /* eval the next rule */
+ }
+
+ /* If not action function defined, check for known actions */
+ if (rule->action == ACT_ACTION_ALLOW) {
+ s->last_rule_file = rule->conf.file;
+ s->last_rule_line = rule->conf.line;
+ goto end;
+ }
+ else if (rule->action == ACT_ACTION_DENY) {
+ s->last_rule_file = rule->conf.file;
+ s->last_rule_line = rule->conf.line;
+ goto deny;
+ }
+ }
+ }
+
+ if (def_rules && s->current_rule_list == def_rules) {
+ s->current_rule_list = rules;
+ goto restart;
+ }
+
+ end:
+ /* if we get there, it means we have no rule which matches, or
+ * we have an explicit accept, so we apply the default accept.
+ */
+ req->analysers &= ~an_bit;
+ s->current_rule = s->current_rule_list = NULL;
+ req->analyse_exp = s->rules_exp = TICK_ETERNITY;
+ DBG_TRACE_LEAVE(STRM_EV_STRM_ANA|STRM_EV_TCP_ANA, s);
+ return 1;
+
+ missing_data:
+ channel_dont_connect(req);
+ /* just set the request timeout once at the beginning of the request */
+ if (!tick_isset(s->rules_exp) && s->be->tcp_req.inspect_delay)
+ s->rules_exp = tick_add(now_ms, s->be->tcp_req.inspect_delay);
+ req->analyse_exp = tick_first((tick_is_expired(req->analyse_exp, now_ms) ? 0 : req->analyse_exp), s->rules_exp);
+ DBG_TRACE_DEVEL("waiting for more data", STRM_EV_STRM_ANA|STRM_EV_TCP_ANA, s);
+ return 0;
+
+ deny:
+ _HA_ATOMIC_INC(&sess->fe->fe_counters.denied_req);
+ if (sess->listener && sess->listener->counters)
+ _HA_ATOMIC_INC(&sess->listener->counters->denied_req);
+ goto reject;
+
+ internal:
+ _HA_ATOMIC_INC(&sess->fe->fe_counters.internal_errors);
+ if (sess->listener && sess->listener->counters)
+ _HA_ATOMIC_INC(&sess->listener->counters->internal_errors);
+ if (!(s->flags & SF_ERR_MASK))
+ s->flags |= SF_ERR_INTERNAL;
+ goto reject;
+
+ invalid:
+ _HA_ATOMIC_INC(&sess->fe->fe_counters.failed_req);
+ if (sess->listener && sess->listener->counters)
+ _HA_ATOMIC_INC(&sess->listener->counters->failed_req);
+
+ reject:
+ sc_must_kill_conn(s->scf);
+ stream_abort(s);
+
+ abort:
+ req->analysers &= AN_REQ_FLT_END;
+ s->current_rule = s->current_rule_list = NULL;
+ req->analyse_exp = s->rules_exp = TICK_ETERNITY;
+
+ if (!(s->flags & SF_ERR_MASK))
+ s->flags |= SF_ERR_PRXCOND;
+ if (!(s->flags & SF_FINST_MASK))
+ s->flags |= SF_FINST_R;
+ DBG_TRACE_DEVEL("leaving on error|deny|abort", STRM_EV_STRM_ANA|STRM_EV_TCP_ANA|STRM_EV_TCP_ERR, s);
+ return 0;
+}
+
+/* This function performs the TCP response analysis on the current response. It
+ * returns 1 if the processing can continue on next analysers, or zero if it
+ * needs more data, encounters an error, or wants to immediately abort the
+ * response. It relies on buffers flags, and updates s->rep->analysers. The
+ * function may be called for backend rules.
+ */
+int tcp_inspect_response(struct stream *s, struct channel *rep, int an_bit)
+{
+ struct list *def_rules, *rules;
+ struct session *sess = s->sess;
+ struct act_rule *rule;
+ int partial;
+ int act_opts = 0;
+
+ DBG_TRACE_ENTER(STRM_EV_STRM_ANA|STRM_EV_TCP_ANA, s);
+
+ def_rules = (s->be->defpx && (s->be->mode == PR_MODE_TCP || s->be->mode == PR_MODE_HTTP) ? &s->be->defpx->tcp_rep.inspect_rules : NULL);
+ rules = &s->be->tcp_rep.inspect_rules;
+
+ /* We don't know whether we have enough data, so must proceed
+ * this way :
+ * - iterate through all rules in their declaration order
+ * - if one rule returns MISS, it means the inspect delay is
+ * not over yet, then return immediately, otherwise consider
+ * it as a non-match.
+ * - if one rule returns OK, then return OK
+ * - if one rule returns KO, then return KO
+ */
+ if ((s->scb->flags & (SC_FL_EOS|SC_FL_ABRT_DONE)) || channel_full(rep, global.tune.maxrewrite) ||
+ sc_waiting_room(s->scb) ||
+ !s->be->tcp_rep.inspect_delay || tick_is_expired(s->rules_exp, now_ms)) {
+ partial = SMP_OPT_FINAL;
+ /* Action may yield while the inspect_delay is not expired and there is no read error */
+ if ((s->scb->flags & SC_FL_ERROR) || !s->be->tcp_rep.inspect_delay || tick_is_expired(s->rules_exp, now_ms))
+ act_opts |= ACT_OPT_FINAL;
+ }
+ else
+ partial = 0;
+
+ /* If "the current_rule_list" match the executed rule list, we are in
+ * resume condition. If a resume is needed it is always in the action
+ * and never in the ACL or converters. In this case, we initialise the
+ * current rule, and go to the action execution point.
+ */
+ if (s->current_rule) {
+ rule = s->current_rule;
+ s->current_rule = NULL;
+ if ((def_rules && s->current_rule_list == def_rules) || s->current_rule_list == rules)
+ goto resume_execution;
+ }
+ s->current_rule_list = ((!def_rules || s->current_rule_list == def_rules) ? rules : def_rules);
+
+ restart:
+ list_for_each_entry(rule, s->current_rule_list, list) {
+ enum acl_test_res ret = ACL_TEST_PASS;
+
+ if (rule->cond) {
+ ret = acl_exec_cond(rule->cond, s->be, sess, s, SMP_OPT_DIR_RES | partial);
+ if (ret == ACL_TEST_MISS)
+ goto missing_data;
+
+ ret = acl_pass(ret);
+ if (rule->cond->pol == ACL_COND_UNLESS)
+ ret = !ret;
+ }
+
+ if (ret) {
+ act_opts |= ACT_OPT_FIRST;
+resume_execution:
+ /* Always call the action function if defined */
+ if (rule->action_ptr) {
+ switch (rule->action_ptr(rule, s->be, s->sess, s, act_opts)) {
+ case ACT_RET_CONT:
+ break;
+ case ACT_RET_STOP:
+ case ACT_RET_DONE:
+ s->last_rule_file = rule->conf.file;
+ s->last_rule_line = rule->conf.line;
+ goto end;
+ case ACT_RET_YIELD:
+ s->current_rule = rule;
+ if (act_opts & ACT_OPT_FINAL) {
+ send_log(s->be, LOG_WARNING,
+ "Internal error: yield not allowed if the inspect-delay expired "
+ "for the tcp-response content actions.");
+ goto internal;
+ }
+ channel_dont_close(rep);
+ goto missing_data;
+ case ACT_RET_DENY:
+ s->last_rule_file = rule->conf.file;
+ s->last_rule_line = rule->conf.line;
+ goto deny;
+ case ACT_RET_ABRT:
+ s->last_rule_file = rule->conf.file;
+ s->last_rule_line = rule->conf.line;
+ goto abort;
+ case ACT_RET_ERR:
+ s->last_rule_file = rule->conf.file;
+ s->last_rule_line = rule->conf.line;
+ goto internal;
+ case ACT_RET_INV:
+ s->last_rule_file = rule->conf.file;
+ s->last_rule_line = rule->conf.line;
+ goto invalid;
+ }
+ continue; /* eval the next rule */
+ }
+
+ /* If not action function defined, check for known actions */
+ if (rule->action == ACT_ACTION_ALLOW) {
+ s->last_rule_file = rule->conf.file;
+ s->last_rule_line = rule->conf.line;
+ goto end;
+ }
+ else if (rule->action == ACT_ACTION_DENY) {
+ s->last_rule_file = rule->conf.file;
+ s->last_rule_line = rule->conf.line;
+ goto deny;
+ }
+ else if (rule->action == ACT_TCP_CLOSE) {
+ s->scb->flags |= SC_FL_NOLINGER | SC_FL_NOHALF;
+ sc_must_kill_conn(s->scb);
+ sc_abort(s->scb);
+ sc_shutdown(s->scb);
+ s->last_rule_file = rule->conf.file;
+ s->last_rule_line = rule->conf.line;
+ goto end;
+ }
+ }
+ }
+
+ if (def_rules && s->current_rule_list == def_rules) {
+ s->current_rule_list = rules;
+ goto restart;
+ }
+
+ end:
+ /* if we get there, it means we have no rule which matches, or
+ * we have an explicit accept, so we apply the default accept.
+ */
+ rep->analysers &= ~an_bit;
+ s->current_rule = s->current_rule_list = NULL;
+ rep->analyse_exp = s->rules_exp = TICK_ETERNITY;
+ DBG_TRACE_LEAVE(STRM_EV_STRM_ANA|STRM_EV_TCP_ANA, s);
+ return 1;
+
+ missing_data:
+ /* just set the analyser timeout once at the beginning of the response */
+ if (!tick_isset(s->rules_exp) && s->be->tcp_rep.inspect_delay)
+ s->rules_exp = tick_add(now_ms, s->be->tcp_rep.inspect_delay);
+ rep->analyse_exp = tick_first((tick_is_expired(rep->analyse_exp, now_ms) ? 0 : rep->analyse_exp), s->rules_exp);
+ DBG_TRACE_DEVEL("waiting for more data", STRM_EV_STRM_ANA|STRM_EV_TCP_ANA, s);
+ return 0;
+
+ deny:
+ _HA_ATOMIC_INC(&s->sess->fe->fe_counters.denied_resp);
+ _HA_ATOMIC_INC(&s->be->be_counters.denied_resp);
+ if (s->sess->listener && s->sess->listener->counters)
+ _HA_ATOMIC_INC(&s->sess->listener->counters->denied_resp);
+ if (objt_server(s->target))
+ _HA_ATOMIC_INC(&__objt_server(s->target)->counters.denied_resp);
+ goto reject;
+
+ internal:
+ _HA_ATOMIC_INC(&s->sess->fe->fe_counters.internal_errors);
+ _HA_ATOMIC_INC(&s->be->be_counters.internal_errors);
+ if (s->sess->listener && s->sess->listener->counters)
+ _HA_ATOMIC_INC(&s->sess->listener->counters->internal_errors);
+ if (objt_server(s->target))
+ _HA_ATOMIC_INC(&__objt_server(s->target)->counters.internal_errors);
+ if (!(s->flags & SF_ERR_MASK))
+ s->flags |= SF_ERR_INTERNAL;
+ goto reject;
+
+ invalid:
+ _HA_ATOMIC_INC(&s->be->be_counters.failed_resp);
+ if (objt_server(s->target))
+ _HA_ATOMIC_INC(&__objt_server(s->target)->counters.failed_resp);
+
+ reject:
+ sc_must_kill_conn(s->scb);
+ stream_abort(s);
+
+ abort:
+ rep->analysers &= AN_RES_FLT_END;
+ s->current_rule = s->current_rule_list = NULL;
+ rep->analyse_exp = s->rules_exp = TICK_ETERNITY;
+
+ if (!(s->flags & SF_ERR_MASK))
+ s->flags |= SF_ERR_PRXCOND;
+ if (!(s->flags & SF_FINST_MASK))
+ s->flags |= SF_FINST_D;
+ DBG_TRACE_DEVEL("leaving on error", STRM_EV_STRM_ANA|STRM_EV_TCP_ANA|STRM_EV_TCP_ERR, s);
+ return 0;
+}
+
+
+/* This function performs the TCP layer4 analysis on the current request. It
+ * returns 0 if a reject rule matches, otherwise 1 if either an accept rule
+ * matches or if no more rule matches. It can only use rules which don't need
+ * any data. This only works on connection-based client-facing stream connectors.
+ */
+int tcp_exec_l4_rules(struct session *sess)
+{
+ struct proxy *px = sess->fe;
+ struct act_rule *rule;
+ struct connection *conn = objt_conn(sess->origin);
+ int result = 1;
+ enum acl_test_res ret;
+
+ if (!conn)
+ return result;
+
+ if (sess->fe->defpx && (sess->fe->mode == PR_MODE_TCP || sess->fe->mode == PR_MODE_HTTP))
+ px = sess->fe->defpx;
+
+ restart:
+ list_for_each_entry(rule, &px->tcp_req.l4_rules, list) {
+ ret = ACL_TEST_PASS;
+
+ if (rule->cond) {
+ ret = acl_exec_cond(rule->cond, sess->fe, sess, NULL, SMP_OPT_DIR_REQ|SMP_OPT_FINAL);
+ ret = acl_pass(ret);
+ if (rule->cond->pol == ACL_COND_UNLESS)
+ ret = !ret;
+ }
+
+ if (ret) {
+ /* Always call the action function if defined */
+ if (rule->action_ptr) {
+ switch (rule->action_ptr(rule, sess->fe, sess, NULL, ACT_OPT_FINAL | ACT_OPT_FIRST)) {
+ case ACT_RET_YIELD:
+ /* yield is not allowed at this point. If this return code is
+ * used it is a bug, so I prefer to abort the process.
+ */
+ send_log(sess->fe, LOG_WARNING,
+ "Internal error: yield not allowed with tcp-request connection actions.");
+ /* fall through */
+ case ACT_RET_STOP:
+ case ACT_RET_DONE:
+ goto end;
+ case ACT_RET_CONT:
+ break;
+ case ACT_RET_DENY:
+ case ACT_RET_ABRT:
+ case ACT_RET_ERR:
+ case ACT_RET_INV:
+ result = 0;
+ goto end;
+ }
+ continue; /* eval the next rule */
+ }
+
+ /* If not action function defined, check for known actions */
+ if (rule->action == ACT_ACTION_ALLOW) {
+ goto end;
+ }
+ else if (rule->action == ACT_ACTION_DENY) {
+ _HA_ATOMIC_INC(&sess->fe->fe_counters.denied_conn);
+ if (sess->listener && sess->listener->counters)
+ _HA_ATOMIC_INC(&sess->listener->counters->denied_conn);
+
+ result = 0;
+ goto end;
+ }
+ else if (rule->action == ACT_TCP_EXPECT_PX) {
+ if (!(conn->flags & CO_FL_HANDSHAKE)) {
+ if (xprt_add_hs(conn) < 0) {
+ result = 0;
+ goto end;
+ }
+ }
+ conn->flags |= CO_FL_ACCEPT_PROXY;
+ }
+ else if (rule->action == ACT_TCP_EXPECT_CIP) {
+ if (!(conn->flags & CO_FL_HANDSHAKE)) {
+ if (xprt_add_hs(conn) < 0) {
+ result = 0;
+ goto end;
+ }
+ }
+ conn->flags |= CO_FL_ACCEPT_CIP;
+ }
+ }
+ }
+
+ if (px != sess->fe) {
+ px = sess->fe;
+ goto restart;
+ }
+ end:
+ return result;
+}
+
+/* This function performs the TCP layer5 analysis on the current request. It
+ * returns 0 if a reject rule matches, otherwise 1 if either an accept rule
+ * matches or if no more rule matches. It can only use rules which don't need
+ * any data. This only works on session-based client-facing stream connectors.
+ * An example of valid use case is to track a stick-counter on the source
+ * address extracted from the proxy protocol.
+ */
+int tcp_exec_l5_rules(struct session *sess)
+{
+ struct proxy *px = sess->fe;
+ struct act_rule *rule;
+ int result = 1;
+ enum acl_test_res ret;
+
+ if (sess->fe->defpx && (sess->fe->mode == PR_MODE_TCP || sess->fe->mode == PR_MODE_HTTP))
+ px = sess->fe->defpx;
+
+ restart:
+ list_for_each_entry(rule, &px->tcp_req.l5_rules, list) {
+ ret = ACL_TEST_PASS;
+
+ if (rule->cond) {
+ ret = acl_exec_cond(rule->cond, sess->fe, sess, NULL, SMP_OPT_DIR_REQ|SMP_OPT_FINAL);
+ ret = acl_pass(ret);
+ if (rule->cond->pol == ACL_COND_UNLESS)
+ ret = !ret;
+ }
+
+ if (ret) {
+ /* Always call the action function if defined */
+ if (rule->action_ptr) {
+ switch (rule->action_ptr(rule, sess->fe, sess, NULL, ACT_OPT_FINAL | ACT_OPT_FIRST)) {
+ case ACT_RET_YIELD:
+ /* yield is not allowed at this point. If this return code is
+ * used it is a bug, so I prefer to abort the process.
+ */
+ send_log(sess->fe, LOG_WARNING,
+ "Internal error: yield not allowed with tcp-request session actions.");
+ /* fall through */
+ case ACT_RET_STOP:
+ case ACT_RET_DONE:
+ goto end;
+ case ACT_RET_CONT:
+ break;
+ case ACT_RET_DENY:
+ case ACT_RET_ABRT:
+ case ACT_RET_ERR:
+ case ACT_RET_INV:
+ result = 0;
+ goto end;
+ }
+ continue; /* eval the next rule */
+ }
+
+ /* If not action function defined, check for known actions */
+ if (rule->action == ACT_ACTION_ALLOW) {
+ goto end;
+ }
+ else if (rule->action == ACT_ACTION_DENY) {
+ _HA_ATOMIC_INC(&sess->fe->fe_counters.denied_sess);
+ if (sess->listener && sess->listener->counters)
+ _HA_ATOMIC_INC(&sess->listener->counters->denied_sess);
+
+ result = 0;
+ goto end;
+ }
+ }
+ }
+
+ if (px != sess->fe) {
+ px = sess->fe;
+ goto restart;
+ }
+ end:
+ return result;
+}
+
+/* Parse a tcp-response rule. Return a negative value in case of failure */
+static int tcp_parse_response_rule(char **args, int arg, int section_type,
+ struct proxy *curpx, const struct proxy *defpx,
+ struct act_rule *rule, char **err,
+ unsigned int where,
+ const char *file, int line)
+{
+ if ((curpx == defpx && strlen(defpx->id) == 0) || !(curpx->cap & PR_CAP_BE)) {
+ memprintf(err, "%s %s is only allowed in 'backend' sections or 'defaults' section with a name",
+ args[0], args[1]);
+ return -1;
+ }
+
+ if (strcmp(args[arg], "accept") == 0) {
+ arg++;
+ rule->action = ACT_ACTION_ALLOW;
+ rule->flags |= ACT_FLAG_FINAL;
+ }
+ else if (strcmp(args[arg], "reject") == 0) {
+ arg++;
+ rule->action = ACT_ACTION_DENY;
+ rule->flags |= ACT_FLAG_FINAL;
+ }
+ else if (strcmp(args[arg], "close") == 0) {
+ arg++;
+ rule->action = ACT_TCP_CLOSE;
+ rule->flags |= ACT_FLAG_FINAL;
+ }
+ else {
+ struct action_kw *kw;
+ kw = tcp_res_cont_action(args[arg]);
+ if (kw) {
+ arg++;
+ rule->kw = kw;
+ if (kw->parse((const char **)args, &arg, curpx, rule, err) == ACT_RET_PRS_ERR)
+ return -1;
+ } else {
+ const char *extra[] = { "accept", "reject", "close", NULL };
+ const char *best = action_suggest(args[arg], &tcp_res_cont_keywords, extra);
+
+ action_build_list(&tcp_res_cont_keywords, &trash);
+ memprintf(err,
+ "'%s %s' expects 'accept', 'close', 'reject', %s in %s '%s' (got '%s').%s%s%s",
+ args[0], args[1], trash.area,
+ proxy_type_str(curpx), curpx->id, args[arg],
+ best ? " Did you mean '" : "",
+ best ? best : "",
+ best ? "' maybe ?" : "");
+ return -1;
+ }
+ }
+
+ if (strcmp(args[arg], "if") == 0 || strcmp(args[arg], "unless") == 0) {
+ if ((rule->cond = build_acl_cond(file, line, &curpx->acl, curpx, (const char **)args+arg, err)) == NULL) {
+ memprintf(err,
+ "'%s %s %s' : error detected in %s '%s' while parsing '%s' condition : %s",
+ args[0], args[1], args[2], proxy_type_str(curpx), curpx->id, args[arg], *err);
+ return -1;
+ }
+ }
+ else if (*args[arg]) {
+ memprintf(err,
+ "'%s %s %s' only accepts 'if' or 'unless', in %s '%s' (got '%s')",
+ args[0], args[1], args[2], proxy_type_str(curpx), curpx->id, args[arg]);
+ return -1;
+ }
+ return 0;
+}
+
+
+/* This function executes a track-sc* actions. On success, it returns
+ * ACT_RET_CONT. If it must yield, it return ACT_RET_YIELD. Otherwsize
+ * ACT_RET_ERR is returned.
+ */
+static enum act_return tcp_action_track_sc(struct act_rule *rule, struct proxy *px,
+ struct session *sess, struct stream *s, int flags)
+{
+ struct stksess *ts;
+ struct stktable *t;
+ struct stktable_key *key;
+ struct sample smp;
+ int opt;
+
+ opt = SMP_OPT_DIR_REQ;
+ if (flags & ACT_FLAG_FINAL)
+ opt |= SMP_OPT_FINAL;
+
+ t = rule->arg.trk_ctr.table.t;
+ if (rule->from == ACT_F_TCP_REQ_CNT) { /* L7 rules: use the stream */
+ if (stkctr_entry(&s->stkctr[rule->action]))
+ goto end;
+
+ key = stktable_fetch_key(t, s->be, sess, s, opt, rule->arg.trk_ctr.expr, &smp);
+
+ if ((smp.flags & SMP_F_MAY_CHANGE) && !(flags & ACT_FLAG_FINAL))
+ return ACT_RET_YIELD; /* key might appear later */
+
+ if (key && (ts = stktable_get_entry(t, key))) {
+ stream_track_stkctr(&s->stkctr[rule->action], t, ts);
+ stkctr_set_flags(&s->stkctr[rule->action], STKCTR_TRACK_CONTENT);
+ if (sess->fe != s->be)
+ stkctr_set_flags(&s->stkctr[rule->action], STKCTR_TRACK_BACKEND);
+ }
+ }
+ else { /* L4/L5 rules: use the session */
+ if (stkctr_entry(&sess->stkctr[rule->action]))
+ goto end;
+
+ key = stktable_fetch_key(t, sess->fe, sess, NULL, opt, rule->arg.trk_ctr.expr, NULL);
+ if (key && (ts = stktable_get_entry(t, key)))
+ stream_track_stkctr(&sess->stkctr[rule->action], t, ts);
+ }
+
+ end:
+ return ACT_RET_CONT;
+}
+
+/* This function executes a capture actions. It executes a fetch expression,
+ * turns the result into a string and puts it in a capture slot. On success, it
+ * returns ACT_RET_CONT. If it must yield, it return ACT_RET_YIELD. Otherwsize
+ * ACT_RET_ERR is returned.
+ */
+static enum act_return tcp_action_capture(struct act_rule *rule, struct proxy *px,
+ struct session *sess, struct stream *s, int flags)
+{
+ struct sample *key;
+ struct cap_hdr *h = rule->arg.cap.hdr;
+ char **cap = s->req_cap;
+ int len, opt;
+
+ opt = ((rule->from == ACT_F_TCP_REQ_CNT) ? SMP_OPT_DIR_REQ : SMP_OPT_DIR_RES);
+ if (flags & ACT_FLAG_FINAL)
+ opt |= SMP_OPT_FINAL;
+
+ key = sample_fetch_as_type(s->be, sess, s, opt, rule->arg.cap.expr, SMP_T_STR);
+ if (!key)
+ goto end;
+
+ if ((key->flags & SMP_F_MAY_CHANGE) && !(flags & ACT_FLAG_FINAL))
+ return ACT_RET_YIELD; /* key might appear later */
+
+ if (cap[h->index] == NULL) {
+ cap[h->index] = pool_alloc(h->pool);
+ if (cap[h->index] == NULL) /* no more capture memory, ignore error */
+ goto end;
+ }
+
+ len = key->data.u.str.data;
+ if (len > h->len)
+ len = h->len;
+
+ memcpy(cap[h->index], key->data.u.str.area, len);
+ cap[h->index][len] = 0;
+
+ end:
+ return ACT_RET_CONT;
+}
+
+static void release_tcp_capture(struct act_rule * rule)
+{
+ release_sample_expr(rule->arg.cap.expr);
+}
+
+
+static void release_tcp_track_sc(struct act_rule * rule)
+{
+ release_sample_expr(rule->arg.trk_ctr.expr);
+}
+
+/* Parse a tcp-request rule. Return a negative value in case of failure */
+static int tcp_parse_request_rule(char **args, int arg, int section_type,
+ struct proxy *curpx, const struct proxy *defpx,
+ struct act_rule *rule, char **err,
+ unsigned int where, const char *file, int line)
+{
+ if (curpx == defpx && strlen(defpx->id) == 0) {
+ memprintf(err, "%s %s is not allowed in anonymous 'defaults' sections",
+ args[0], args[1]);
+ return -1;
+ }
+
+ if (strcmp(args[arg], "accept") == 0) {
+ arg++;
+ rule->action = ACT_ACTION_ALLOW;
+ rule->flags |= ACT_FLAG_FINAL;
+ }
+ else if (strcmp(args[arg], "reject") == 0) {
+ arg++;
+ rule->action = ACT_ACTION_DENY;
+ rule->flags |= ACT_FLAG_FINAL;
+ }
+ else if (strcmp(args[arg], "capture") == 0) {
+ struct sample_expr *expr;
+ struct cap_hdr *hdr;
+ int kw = arg;
+ int len = 0;
+
+ if (!(curpx->cap & PR_CAP_FE)) {
+ memprintf(err,
+ "'%s %s %s' : proxy '%s' has no frontend capability",
+ args[0], args[1], args[kw], curpx->id);
+ return -1;
+ }
+
+ if (!(where & SMP_VAL_FE_REQ_CNT)) {
+ memprintf(err,
+ "'%s %s' is not allowed in '%s %s' rules in %s '%s'",
+ args[arg], args[arg+1], args[0], args[1], proxy_type_str(curpx), curpx->id);
+ return -1;
+ }
+
+ arg++;
+
+ curpx->conf.args.ctx = ARGC_CAP;
+ expr = sample_parse_expr(args, &arg, file, line, err, &curpx->conf.args, NULL);
+ if (!expr) {
+ memprintf(err,
+ "'%s %s %s' : %s",
+ args[0], args[1], args[kw], *err);
+ return -1;
+ }
+
+ if (!(expr->fetch->val & where)) {
+ memprintf(err,
+ "'%s %s %s' : fetch method '%s' extracts information from '%s', none of which is available here",
+ args[0], args[1], args[kw], args[arg-1], sample_src_names(expr->fetch->use));
+ release_sample_expr(expr);
+ return -1;
+ }
+
+ if (strcmp(args[arg], "len") == 0) {
+ arg++;
+ if (!args[arg]) {
+ memprintf(err,
+ "'%s %s %s' : missing length value",
+ args[0], args[1], args[kw]);
+ release_sample_expr(expr);
+ return -1;
+ }
+ /* we copy the table name for now, it will be resolved later */
+ len = atoi(args[arg]);
+ if (len <= 0) {
+ memprintf(err,
+ "'%s %s %s' : length must be > 0",
+ args[0], args[1], args[kw]);
+ release_sample_expr(expr);
+ return -1;
+ }
+ arg++;
+ }
+
+ if (!len) {
+ memprintf(err,
+ "'%s %s %s' : a positive 'len' argument is mandatory",
+ args[0], args[1], args[kw]);
+ free(expr);
+ return -1;
+ }
+
+ hdr = calloc(1, sizeof(*hdr));
+ if (!hdr) {
+ memprintf(err, "parsing [%s:%d] : out of memory", file, line);
+ release_sample_expr(expr);
+ return -1;
+ }
+ hdr->next = curpx->req_cap;
+ hdr->name = NULL; /* not a header capture */
+ hdr->namelen = 0;
+ hdr->len = len;
+ hdr->pool = create_pool("caphdr", hdr->len + 1, MEM_F_SHARED);
+ hdr->index = curpx->nb_req_cap++;
+
+ curpx->req_cap = hdr;
+ curpx->to_log |= LW_REQHDR;
+
+ /* check if we need to allocate an http_txn struct for HTTP parsing */
+ curpx->http_needed |= !!(expr->fetch->use & SMP_USE_HTTP_ANY);
+
+ rule->arg.cap.expr = expr;
+ rule->arg.cap.hdr = hdr;
+ rule->action = ACT_CUSTOM;
+ rule->action_ptr = tcp_action_capture;
+ rule->check_ptr = check_capture;
+ rule->release_ptr = release_tcp_capture;
+ }
+ else if (strncmp(args[arg], "track-sc", 8) == 0) {
+ struct sample_expr *expr;
+ int kw = arg;
+ unsigned int tsc_num;
+ const char *tsc_num_str;
+
+ arg++;
+
+ tsc_num_str = &args[kw][8];
+ if (cfg_parse_track_sc_num(&tsc_num, tsc_num_str, tsc_num_str + strlen(tsc_num_str), err) == -1) {
+ memprintf(err, "'%s %s %s' : %s", args[0], args[1], args[kw], *err);
+ return -1;
+ }
+
+ curpx->conf.args.ctx = ARGC_TRK;
+ expr = sample_parse_expr(args, &arg, file, line, err, &curpx->conf.args, NULL);
+ if (!expr) {
+ memprintf(err,
+ "'%s %s %s' : %s",
+ args[0], args[1], args[kw], *err);
+ return -1;
+ }
+
+ if (!(expr->fetch->val & where)) {
+ memprintf(err,
+ "'%s %s %s' : fetch method '%s' extracts information from '%s', none of which is available here",
+ args[0], args[1], args[kw], args[arg-1], sample_src_names(expr->fetch->use));
+ release_sample_expr(expr);
+ return -1;
+ }
+
+ /* check if we need to allocate an http_txn struct for HTTP parsing */
+ curpx->http_needed |= !!(expr->fetch->use & SMP_USE_HTTP_ANY);
+
+ if (strcmp(args[arg], "table") == 0) {
+ arg++;
+ if (!args[arg]) {
+ memprintf(err,
+ "'%s %s %s' : missing table name",
+ args[0], args[1], args[kw]);
+ release_sample_expr(expr);
+ return -1;
+ }
+ /* we copy the table name for now, it will be resolved later */
+ rule->arg.trk_ctr.table.n = strdup(args[arg]);
+ arg++;
+ }
+ rule->action = tsc_num;
+ rule->arg.trk_ctr.expr = expr;
+ rule->action_ptr = tcp_action_track_sc;
+ rule->check_ptr = check_trk_action;
+ rule->release_ptr = release_tcp_track_sc;
+ }
+ else if (strcmp(args[arg], "expect-proxy") == 0) {
+ if (strcmp(args[arg+1], "layer4") != 0) {
+ memprintf(err,
+ "'%s %s %s' only supports 'layer4' in %s '%s' (got '%s')",
+ args[0], args[1], args[arg], proxy_type_str(curpx), curpx->id, args[arg+1]);
+ return -1;
+ }
+
+ if (!(where & SMP_VAL_FE_CON_ACC)) {
+ memprintf(err,
+ "'%s %s' is not allowed in '%s %s' rules in %s '%s'",
+ args[arg], args[arg+1], args[0], args[1], proxy_type_str(curpx), curpx->id);
+ return -1;
+ }
+
+ arg += 2;
+ rule->action = ACT_TCP_EXPECT_PX;
+ }
+ else if (strcmp(args[arg], "expect-netscaler-cip") == 0) {
+ if (strcmp(args[arg+1], "layer4") != 0) {
+ memprintf(err,
+ "'%s %s %s' only supports 'layer4' in %s '%s' (got '%s')",
+ args[0], args[1], args[arg], proxy_type_str(curpx), curpx->id, args[arg+1]);
+ return -1;
+ }
+
+ if (!(where & SMP_VAL_FE_CON_ACC)) {
+ memprintf(err,
+ "'%s %s' is not allowed in '%s %s' rules in %s '%s'",
+ args[arg], args[arg+1], args[0], args[1], proxy_type_str(curpx), curpx->id);
+ return -1;
+ }
+
+ arg += 2;
+ rule->action = ACT_TCP_EXPECT_CIP;
+ }
+ else {
+ struct action_kw *kw;
+ if (where & SMP_VAL_FE_CON_ACC) {
+ /* L4 */
+ kw = tcp_req_conn_action(args[arg]);
+ rule->kw = kw;
+ } else if (where & SMP_VAL_FE_SES_ACC) {
+ /* L5 */
+ kw = tcp_req_sess_action(args[arg]);
+ rule->kw = kw;
+ } else {
+ /* L6 */
+ kw = tcp_req_cont_action(args[arg]);
+ rule->kw = kw;
+ }
+ if (kw) {
+ arg++;
+ if (kw->parse((const char **)args, &arg, curpx, rule, err) == ACT_RET_PRS_ERR)
+ return -1;
+ } else {
+ const char *extra[] = { "accept", "reject", "capture", "track-sc", "expect-proxy", "expect-netscaler-cip", NULL };
+ const char *best = NULL;
+
+
+ if (where & SMP_VAL_FE_CON_ACC) {
+ action_build_list(&tcp_req_conn_keywords, &trash);
+ best = action_suggest(args[arg], &tcp_req_conn_keywords, extra);
+ }
+ else if (where & SMP_VAL_FE_SES_ACC) {
+ action_build_list(&tcp_req_sess_keywords, &trash);
+ best = action_suggest(args[arg], &tcp_req_sess_keywords, extra);
+ }
+ else {
+ action_build_list(&tcp_req_cont_keywords, &trash);
+ best = action_suggest(args[arg], &tcp_req_cont_keywords, extra);
+ }
+
+ memprintf(err,
+ "'%s %s' expects 'accept', 'reject', 'capture', 'expect-proxy', 'expect-netscaler-cip', 'track-sc0' ... 'track-sc%d', %s "
+ "in %s '%s' (got '%s').%s%s%s\n",
+ args[0], args[1], global.tune.nb_stk_ctr-1,
+ trash.area, proxy_type_str(curpx),
+ curpx->id, args[arg],
+ best ? " Did you mean '" : "",
+ best ? best : "",
+ best ? "' maybe ?" : "");
+ return -1;
+ }
+ }
+
+ if (strcmp(args[arg], "if") == 0 || strcmp(args[arg], "unless") == 0) {
+ if ((rule->cond = build_acl_cond(file, line, &curpx->acl, curpx, (const char **)args+arg, err)) == NULL) {
+ memprintf(err,
+ "'%s %s %s' : error detected in %s '%s' while parsing '%s' condition : %s",
+ args[0], args[1], args[2], proxy_type_str(curpx), curpx->id, args[arg], *err);
+ return -1;
+ }
+ }
+ else if (*args[arg]) {
+ memprintf(err,
+ "'%s %s %s' only accepts 'if' or 'unless', in %s '%s' (got '%s')",
+ args[0], args[1], args[2], proxy_type_str(curpx), curpx->id, args[arg]);
+ return -1;
+ }
+ return 0;
+}
+
+/* This function should be called to parse a line starting with the "tcp-response"
+ * keyword.
+ */
+static int tcp_parse_tcp_rep(char **args, int section_type, struct proxy *curpx,
+ const struct proxy *defpx, const char *file, int line,
+ char **err)
+{
+ const char *ptr = NULL;
+ unsigned int val;
+ int warn = 0;
+ int arg;
+ struct act_rule *rule;
+ unsigned int where;
+ const struct acl *acl;
+ const char *kw;
+
+ if (!*args[1]) {
+ memprintf(err, "missing argument for '%s' in %s '%s'",
+ args[0], proxy_type_str(curpx), curpx->id);
+ return -1;
+ }
+
+ if (strcmp(args[1], "inspect-delay") == 0) {
+ if ((curpx == defpx && strlen(defpx->id) == 0) || !(curpx->cap & PR_CAP_BE)) {
+ memprintf(err, "%s %s is only allowed in 'backend' sections or 'defaults' section with a name",
+ args[0], args[1]);
+ return -1;
+ }
+
+ if (!*args[2] || (ptr = parse_time_err(args[2], &val, TIME_UNIT_MS))) {
+ memprintf(err,
+ "'%s %s' expects a positive delay in milliseconds, in %s '%s'",
+ args[0], args[1], proxy_type_str(curpx), curpx->id);
+
+ if (ptr == PARSE_TIME_OVER)
+ memprintf(err, "%s (timer overflow in '%s', maximum value is 2147483647 ms or ~24.8 days)", *err, args[2]);
+ else if (ptr == PARSE_TIME_UNDER)
+ memprintf(err, "%s (timer underflow in '%s', minimum non-null value is 1 ms)", *err, args[2]);
+ else if (ptr)
+ memprintf(err, "%s (unexpected character '%c')", *err, *ptr);
+ return -1;
+ }
+
+ if (curpx->tcp_rep.inspect_delay) {
+ memprintf(err, "ignoring %s %s (was already defined) in %s '%s'",
+ args[0], args[1], proxy_type_str(curpx), curpx->id);
+ return 1;
+ }
+ curpx->tcp_rep.inspect_delay = val;
+ return 0;
+ }
+
+ rule = new_act_rule(ACT_F_TCP_RES_CNT, file, line);
+ if (!rule) {
+ memprintf(err, "parsing [%s:%d] : out of memory", file, line);
+ return -1;
+ }
+ LIST_INIT(&rule->list);
+ arg = 1;
+ where = 0;
+
+ if (strcmp(args[1], "content") == 0) {
+ arg++;
+
+ if (curpx->cap & PR_CAP_FE)
+ where |= SMP_VAL_FE_RES_CNT;
+ if (curpx->cap & PR_CAP_BE)
+ where |= SMP_VAL_BE_RES_CNT;
+ if (tcp_parse_response_rule(args, arg, section_type, curpx, defpx, rule, err, where, file, line) < 0)
+ goto error;
+
+ acl = rule->cond ? acl_cond_conflicts(rule->cond, where) : NULL;
+ if (acl) {
+ if (acl->name && *acl->name)
+ memprintf(err,
+ "acl '%s' will never match in '%s %s' because it only involves keywords that are incompatible with '%s'",
+ acl->name, args[0], args[1], sample_ckp_names(where));
+ else
+ memprintf(err,
+ "anonymous acl will never match in '%s %s' because it uses keyword '%s' which is incompatible with '%s'",
+ args[0], args[1],
+ LIST_ELEM(acl->expr.n, struct acl_expr *, list)->kw,
+ sample_ckp_names(where));
+
+ warn++;
+ }
+ else if (rule->cond && acl_cond_kw_conflicts(rule->cond, where, &acl, &kw)) {
+ if (acl->name && *acl->name)
+ memprintf(err,
+ "acl '%s' involves keyword '%s' which is incompatible with '%s'",
+ acl->name, kw, sample_ckp_names(where));
+ else
+ memprintf(err,
+ "anonymous acl involves keyword '%s' which is incompatible with '%s'",
+ kw, sample_ckp_names(where));
+ warn++;
+ }
+
+ LIST_APPEND(&curpx->tcp_rep.inspect_rules, &rule->list);
+ }
+ else {
+ memprintf(err,
+ "'%s' expects 'inspect-delay' or 'content' in %s '%s' (got '%s')",
+ args[0], proxy_type_str(curpx), curpx->id, args[1]);
+ goto error;
+ }
+
+ return warn;
+ error:
+ free_act_rule(rule);
+ return -1;
+}
+
+
+/* This function should be called to parse a line starting with the "tcp-request"
+ * keyword.
+ */
+static int tcp_parse_tcp_req(char **args, int section_type, struct proxy *curpx,
+ const struct proxy *defpx, const char *file, int line,
+ char **err)
+{
+ const char *ptr = NULL;
+ unsigned int val;
+ int warn = 0;
+ int arg;
+ struct act_rule *rule;
+ unsigned int where;
+ const struct acl *acl;
+ const char *kw;
+
+ if (!*args[1]) {
+ if (curpx == defpx)
+ memprintf(err, "missing argument for '%s' in defaults section", args[0]);
+ else
+ memprintf(err, "missing argument for '%s' in %s '%s'",
+ args[0], proxy_type_str(curpx), curpx->id);
+ return -1;
+ }
+
+ if (strcmp(args[1], "inspect-delay") == 0) {
+ if (curpx == defpx && strlen(defpx->id) == 0) {
+ memprintf(err, "%s %s is not allowed in anonymous 'defaults' sections",
+ args[0], args[1]);
+ return -1;
+ }
+
+ if (!*args[2] || (ptr = parse_time_err(args[2], &val, TIME_UNIT_MS))) {
+ memprintf(err,
+ "'%s %s' expects a positive delay in milliseconds, in %s '%s'",
+ args[0], args[1], proxy_type_str(curpx), curpx->id);
+
+ if (ptr == PARSE_TIME_OVER)
+ memprintf(err, "%s (timer overflow in '%s', maximum value is 2147483647 ms or ~24.8 days)", *err, args[2]);
+ else if (ptr == PARSE_TIME_UNDER)
+ memprintf(err, "%s (timer underflow in '%s', minimum non-null value is 1 ms)", *err, args[2]);
+ else if (ptr)
+ memprintf(err, "%s (unexpected character '%c')", *err, *ptr);
+ return -1;
+ }
+
+ if (curpx->tcp_req.inspect_delay) {
+ memprintf(err, "ignoring %s %s (was already defined) in %s '%s'",
+ args[0], args[1], proxy_type_str(curpx), curpx->id);
+ return 1;
+ }
+ curpx->tcp_req.inspect_delay = val;
+ return 0;
+ }
+
+ rule = new_act_rule(0, file, line);
+ if (!rule) {
+ memprintf(err, "parsing [%s:%d] : out of memory", file, line);
+ return -1;
+ }
+ LIST_INIT(&rule->list);
+ arg = 1;
+ where = 0;
+
+ if (strcmp(args[1], "content") == 0) {
+ arg++;
+
+ if (curpx->cap & PR_CAP_FE)
+ where |= SMP_VAL_FE_REQ_CNT;
+ if (curpx->cap & PR_CAP_BE)
+ where |= SMP_VAL_BE_REQ_CNT;
+ rule->from = ACT_F_TCP_REQ_CNT;
+ if (tcp_parse_request_rule(args, arg, section_type, curpx, defpx, rule, err, where, file, line) < 0)
+ goto error;
+
+ acl = rule->cond ? acl_cond_conflicts(rule->cond, where) : NULL;
+ if (acl) {
+ if (acl->name && *acl->name)
+ memprintf(err,
+ "acl '%s' will never match in '%s %s' because it only involves keywords that are incompatible with '%s'",
+ acl->name, args[0], args[1], sample_ckp_names(where));
+ else
+ memprintf(err,
+ "anonymous acl will never match in '%s %s' because it uses keyword '%s' which is incompatible with '%s'",
+ args[0], args[1],
+ LIST_ELEM(acl->expr.n, struct acl_expr *, list)->kw,
+ sample_ckp_names(where));
+
+ warn++;
+ }
+ else if (rule->cond && acl_cond_kw_conflicts(rule->cond, where, &acl, &kw)) {
+ if (acl->name && *acl->name)
+ memprintf(err,
+ "acl '%s' involves keyword '%s' which is incompatible with '%s'",
+ acl->name, kw, sample_ckp_names(where));
+ else
+ memprintf(err,
+ "anonymous acl involves keyword '%s' which is incompatible with '%s'",
+ kw, sample_ckp_names(where));
+ warn++;
+ }
+
+ /* the following function directly emits the warning */
+ warnif_misplaced_tcp_cont(curpx, file, line, args[0]);
+ LIST_APPEND(&curpx->tcp_req.inspect_rules, &rule->list);
+ }
+ else if (strcmp(args[1], "connection") == 0) {
+ arg++;
+
+ if (!(curpx->cap & PR_CAP_FE)) {
+ memprintf(err, "%s %s is not allowed because %s %s is not a frontend",
+ args[0], args[1], proxy_type_str(curpx), curpx->id);
+ goto error;
+ }
+
+ where |= SMP_VAL_FE_CON_ACC;
+ rule->from = ACT_F_TCP_REQ_CON;
+ if (tcp_parse_request_rule(args, arg, section_type, curpx, defpx, rule, err, where, file, line) < 0)
+ goto error;
+
+ acl = rule->cond ? acl_cond_conflicts(rule->cond, where) : NULL;
+ if (acl) {
+ if (acl->name && *acl->name)
+ memprintf(err,
+ "acl '%s' will never match in '%s %s' because it only involves keywords that are incompatible with '%s'",
+ acl->name, args[0], args[1], sample_ckp_names(where));
+ else
+ memprintf(err,
+ "anonymous acl will never match in '%s %s' because it uses keyword '%s' which is incompatible with '%s'",
+ args[0], args[1],
+ LIST_ELEM(acl->expr.n, struct acl_expr *, list)->kw,
+ sample_ckp_names(where));
+
+ warn++;
+ }
+ else if (rule->cond && acl_cond_kw_conflicts(rule->cond, where, &acl, &kw)) {
+ if (acl->name && *acl->name)
+ memprintf(err,
+ "acl '%s' involves keyword '%s' which is incompatible with '%s'",
+ acl->name, kw, sample_ckp_names(where));
+ else
+ memprintf(err,
+ "anonymous acl involves keyword '%s' which is incompatible with '%s'",
+ kw, sample_ckp_names(where));
+ warn++;
+ }
+
+ /* the following function directly emits the warning */
+ warnif_misplaced_tcp_conn(curpx, file, line, args[0]);
+ LIST_APPEND(&curpx->tcp_req.l4_rules, &rule->list);
+ }
+ else if (strcmp(args[1], "session") == 0) {
+ arg++;
+
+ if (!(curpx->cap & PR_CAP_FE)) {
+ memprintf(err, "%s %s is not allowed because %s %s is not a frontend",
+ args[0], args[1], proxy_type_str(curpx), curpx->id);
+ goto error;
+ }
+
+ where |= SMP_VAL_FE_SES_ACC;
+ rule->from = ACT_F_TCP_REQ_SES;
+ if (tcp_parse_request_rule(args, arg, section_type, curpx, defpx, rule, err, where, file, line) < 0)
+ goto error;
+
+ acl = rule->cond ? acl_cond_conflicts(rule->cond, where) : NULL;
+ if (acl) {
+ if (acl->name && *acl->name)
+ memprintf(err,
+ "acl '%s' will never match in '%s %s' because it only involves keywords that are incompatible with '%s'",
+ acl->name, args[0], args[1], sample_ckp_names(where));
+ else
+ memprintf(err,
+ "anonymous acl will never match in '%s %s' because it uses keyword '%s' which is incompatible with '%s'",
+ args[0], args[1],
+ LIST_ELEM(acl->expr.n, struct acl_expr *, list)->kw,
+ sample_ckp_names(where));
+ warn++;
+ }
+ else if (rule->cond && acl_cond_kw_conflicts(rule->cond, where, &acl, &kw)) {
+ if (acl->name && *acl->name)
+ memprintf(err,
+ "acl '%s' involves keyword '%s' which is incompatible with '%s'",
+ acl->name, kw, sample_ckp_names(where));
+ else
+ memprintf(err,
+ "anonymous acl involves keyword '%s' which is incompatible with '%s'",
+ kw, sample_ckp_names(where));
+ warn++;
+ }
+
+ /* the following function directly emits the warning */
+ warnif_misplaced_tcp_sess(curpx, file, line, args[0]);
+ LIST_APPEND(&curpx->tcp_req.l5_rules, &rule->list);
+ }
+ else {
+ if (curpx == defpx)
+ memprintf(err,
+ "'%s' expects 'inspect-delay', 'connection', or 'content' in defaults section (got '%s')",
+ args[0], args[1]);
+ else
+ memprintf(err,
+ "'%s' expects 'inspect-delay', 'connection', or 'content' in %s '%s' (got '%s')",
+ args[0], proxy_type_str(curpx), curpx->id, args[1]);
+ goto error;
+ }
+
+ return warn;
+ error:
+ free_act_rule(rule);
+ return -1;
+}
+
+static struct cfg_kw_list cfg_kws = {ILH, {
+ { CFG_LISTEN, "tcp-request", tcp_parse_tcp_req },
+ { CFG_LISTEN, "tcp-response", tcp_parse_tcp_rep },
+ { 0, NULL, NULL },
+}};
+
+INITCALL1(STG_REGISTER, cfg_register_keywords, &cfg_kws);
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/src/tcp_sample.c b/src/tcp_sample.c
new file mode 100644
index 0000000..9fbf920
--- /dev/null
+++ b/src/tcp_sample.c
@@ -0,0 +1,641 @@
+/*
+ * AF_INET/AF_INET6 SOCK_STREAM protocol layer (tcp)
+ *
+ * Copyright 2000-2013 Willy Tarreau <w@1wt.eu>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+/* this is to have tcp_info defined on systems using musl
+ * library, such as Alpine Linux.
+ */
+#define _GNU_SOURCE
+
+#include <ctype.h>
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <time.h>
+
+#include <sys/param.h>
+#include <sys/socket.h>
+#include <sys/types.h>
+
+#include <netinet/tcp.h>
+#include <netinet/in.h>
+
+#include <haproxy/api.h>
+#include <haproxy/arg.h>
+#include <haproxy/connection.h>
+#include <haproxy/errors.h>
+#include <haproxy/global.h>
+#include <haproxy/listener-t.h>
+#include <haproxy/namespace.h>
+#include <haproxy/proxy-t.h>
+#include <haproxy/sample.h>
+#include <haproxy/sc_strm.h>
+#include <haproxy/session.h>
+#include <haproxy/tools.h>
+
+/* Fetch the connection's source IPv4/IPv6 address. Depending on the keyword, it
+ * may be the frontend or the backend connection.
+ */
+static int
+smp_fetch_src(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ const struct sockaddr_storage *src = NULL;
+
+ if (kw[0] == 'b') { /* bc_src */
+ struct connection *conn = ((obj_type(smp->sess->origin) == OBJ_TYPE_CHECK)
+ ? sc_conn(__objt_check(smp->sess->origin)->sc)
+ : (smp->strm ? sc_conn(smp->strm->scb): NULL));
+ if (conn && conn_get_src(conn))
+ src = conn_src(conn);
+ }
+ else if (kw[0] == 'f') { /* fc_src */
+ struct connection *conn = objt_conn(smp->sess->origin);
+
+ if (conn && conn_get_src(conn))
+ src = conn_src(conn);
+ }
+ else /* src */
+ src = (smp->strm ? sc_src(smp->strm->scf) : sess_src(smp->sess));
+
+ if (!src)
+ return 0;
+
+ switch (src->ss_family) {
+ case AF_INET:
+ smp->data.u.ipv4 = ((struct sockaddr_in *)src)->sin_addr;
+ smp->data.type = SMP_T_IPV4;
+ break;
+ case AF_INET6:
+ smp->data.u.ipv6 = ((struct sockaddr_in6 *)src)->sin6_addr;
+ smp->data.type = SMP_T_IPV6;
+ break;
+ default:
+ return 0;
+ }
+
+ smp->flags = 0;
+ return 1;
+}
+
+/* set temp integer to the connection's source port. Depending on the
+ * keyword, it may be the frontend or the backend connection.
+ */
+static int
+smp_fetch_sport(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ const struct sockaddr_storage *src = NULL;
+
+ if (kw[0] == 'b') { /* bc_src_port */
+ struct connection *conn = ((obj_type(smp->sess->origin) == OBJ_TYPE_CHECK)
+ ? sc_conn(__objt_check(smp->sess->origin)->sc)
+ : (smp->strm ? sc_conn(smp->strm->scb): NULL));
+ if (conn && conn_get_src(conn))
+ src = conn_src(conn);
+ }
+ else if (kw[0] == 'f') { /* fc_src_port */
+ struct connection *conn = objt_conn(smp->sess->origin);
+
+ if (conn && conn_get_src(conn))
+ src = conn_src(conn);
+ }
+ else /* src_port */
+ src = (smp->strm ? sc_src(smp->strm->scf) : sess_src(smp->sess));
+
+ if (!src)
+ return 0;
+
+ smp->data.type = SMP_T_SINT;
+ if (!(smp->data.u.sint = get_host_port(src)))
+ return 0;
+
+ smp->flags = 0;
+ return 1;
+}
+
+/* fetch the connection's destination IPv4/IPv6 address. Depending on the
+ * keyword, it may be the frontend or the backend connection.
+ */
+static int
+smp_fetch_dst(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ const struct sockaddr_storage *dst = NULL;
+
+ if (kw[0] == 'b') { /* bc_dst */
+ struct connection *conn = ((obj_type(smp->sess->origin) == OBJ_TYPE_CHECK)
+ ? sc_conn(__objt_check(smp->sess->origin)->sc)
+ : (smp->strm ? sc_conn(smp->strm->scb): NULL));
+ if (conn && conn_get_dst(conn))
+ dst = conn_dst(conn);
+ }
+ else if (kw[0] == 'f') { /* fc_dst */
+ struct connection *conn = objt_conn(smp->sess->origin);
+
+ if (conn && conn_get_dst(conn))
+ dst = conn_dst(conn);
+ }
+ else /* dst */
+ dst = (smp->strm ? sc_dst(smp->strm->scf) : sess_dst(smp->sess));
+
+ if (!dst)
+ return 0;
+
+ switch (dst->ss_family) {
+ case AF_INET:
+ smp->data.u.ipv4 = ((struct sockaddr_in *)dst)->sin_addr;
+ smp->data.type = SMP_T_IPV4;
+ break;
+ case AF_INET6:
+ smp->data.u.ipv6 = ((struct sockaddr_in6 *)dst)->sin6_addr;
+ smp->data.type = SMP_T_IPV6;
+ break;
+ default:
+ return 0;
+ }
+
+ smp->flags = 0;
+ return 1;
+}
+
+/* check if the destination address of the front connection is local to the
+ * system or if it was intercepted.
+ */
+int smp_fetch_dst_is_local(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ struct listener *li = smp->sess->listener;
+ const struct sockaddr_storage *dst = NULL;
+
+ if (kw[0] == 'f') { /* fc_dst_is_local */
+ struct connection *conn = objt_conn(smp->sess->origin);
+
+ if (conn && conn_get_dst(conn))
+ dst = conn_dst(conn);
+ }
+ else /* dst_is_local */
+ dst = (smp->strm ? sc_dst(smp->strm->scf) : sess_dst(smp->sess));
+
+ if (!dst)
+ return 0;
+
+ smp->data.type = SMP_T_BOOL;
+ smp->flags = 0;
+ smp->data.u.sint = addr_is_local(li->rx.settings->netns, dst);
+ return smp->data.u.sint >= 0;
+}
+
+/* check if the source address of the front connection is local to the system
+ * or not.
+ */
+int smp_fetch_src_is_local(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ struct listener *li = smp->sess->listener;
+ const struct sockaddr_storage *src = NULL;
+
+ if (kw[0] == 'f') { /* fc_src_is_local */
+ struct connection *conn = objt_conn(smp->sess->origin);
+
+ if (conn && conn_get_src(conn))
+ src = conn_src(conn);
+ }
+ else /* src_is_local */
+ src = (smp->strm ? sc_src(smp->strm->scf) : sess_src(smp->sess));
+
+ if (!src)
+ return 0;
+
+ smp->data.type = SMP_T_BOOL;
+ smp->flags = 0;
+ smp->data.u.sint = addr_is_local(li->rx.settings->netns, src);
+ return smp->data.u.sint >= 0;
+}
+
+/* set temp integer to the connexion's destination port. Depending on the
+ * keyword, it may be the frontend or the backend connection.
+ */
+static int
+smp_fetch_dport(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ const struct sockaddr_storage *dst = NULL;
+
+ if (kw[0] == 'b') { /* bc_dst_port */
+ struct connection *conn = ((obj_type(smp->sess->origin) == OBJ_TYPE_CHECK)
+ ? sc_conn(__objt_check(smp->sess->origin)->sc)
+ : (smp->strm ? sc_conn(smp->strm->scb): NULL));
+ if (conn && conn_get_dst(conn))
+ dst = conn_dst(conn);
+ }
+ else if (kw[0] == 'f') { /* fc_dst_port */
+ struct connection *conn = objt_conn(smp->sess->origin);
+
+ if (conn && conn_get_dst(conn))
+ dst = conn_dst(conn);
+ }
+ else /* dst_port */
+ dst = (smp->strm ? sc_dst(smp->strm->scf) : sess_dst(smp->sess));
+
+ if (!dst)
+ return 0;
+
+ smp->data.type = SMP_T_SINT;
+ if (!(smp->data.u.sint = get_host_port(dst)))
+ return 0;
+
+ smp->flags = 0;
+ return 1;
+}
+
+#ifdef TCP_INFO
+
+
+/* Validates the arguments passed to "fc_*" fetch keywords returning a time
+ * value. These keywords support an optional string representing the unit of the
+ * result: "us" for microseconds and "ms" for milliseconds". Returns 0 on error
+ * and non-zero if OK.
+ */
+static int val_fc_time_value(struct arg *args, char **err)
+{
+ if (args[0].type == ARGT_STR) {
+ if (strcmp(args[0].data.str.area, "us") == 0) {
+ chunk_destroy(&args[0].data.str);
+ args[0].type = ARGT_SINT;
+ args[0].data.sint = TIME_UNIT_US;
+ }
+ else if (strcmp(args[0].data.str.area, "ms") == 0) {
+ chunk_destroy(&args[0].data.str);
+ args[0].type = ARGT_SINT;
+ args[0].data.sint = TIME_UNIT_MS;
+ }
+ else {
+ memprintf(err, "expects 'us' or 'ms', got '%s'",
+ args[0].data.str.area);
+ return 0;
+ }
+ }
+ else {
+ memprintf(err, "Unexpected arg type");
+ return 0;
+ }
+
+ return 1;
+}
+
+/* Validates the arguments passed to "fc_*" fetch keywords returning a
+ * counter. These keywords should be used without any keyword, but because of a
+ * bug in previous versions, an optional string argument may be passed. In such
+ * case, the argument is ignored and a warning is emitted. Returns 0 on error
+ * and non-zero if OK.
+ */
+static int var_fc_counter(struct arg *args, char **err)
+{
+ if (args[0].type != ARGT_STOP) {
+ ha_warning("no argument supported for 'fc_*' sample expressions returning counters.\n");
+ if (args[0].type == ARGT_STR)
+ chunk_destroy(&args[0].data.str);
+ args[0].type = ARGT_STOP;
+ }
+
+ return 1;
+}
+
+/* Returns some tcp_info data if it's available. "dir" must be set to 0 if
+ * the client connection is required, otherwise it is set to 1. "val" represents
+ * the required value.
+ * If the function fails it returns 0, otherwise it returns 1 and "result" is filled.
+ */
+static inline int get_tcp_info(const struct arg *args, struct sample *smp,
+ int dir, int val)
+{
+ struct connection *conn;
+ struct tcp_info info;
+ socklen_t optlen;
+
+ /* strm can be null. */
+ if (!smp->strm)
+ return 0;
+
+ /* get the object associated with the stream connector.The
+ * object can be other thing than a connection. For example,
+ * it be a appctx.
+ */
+ conn = (dir == 0 ? sc_conn(smp->strm->scf) : sc_conn(smp->strm->scb));
+ if (!conn)
+ return 0;
+
+ /* The fd may not be available for the tcp_info struct, and the
+ syscal can fail. */
+ optlen = sizeof(info);
+ if ((conn->flags & CO_FL_FDLESS) ||
+ getsockopt(conn->handle.fd, IPPROTO_TCP, TCP_INFO, &info, &optlen) == -1)
+ return 0;
+
+ /* extract the value. */
+ smp->data.type = SMP_T_SINT;
+ switch (val) {
+#if defined(__APPLE__)
+ case 0: smp->data.u.sint = info.tcpi_rttcur; break;
+ case 1: smp->data.u.sint = info.tcpi_rttvar; break;
+ case 2: smp->data.u.sint = info.tcpi_tfo_syn_data_acked; break;
+ case 4: smp->data.u.sint = info.tcpi_tfo_syn_loss; break;
+ case 5: smp->data.u.sint = info.tcpi_rto; break;
+#else
+ /* all other platforms supporting TCP_INFO have these ones */
+ case 0: smp->data.u.sint = info.tcpi_rtt; break;
+ case 1: smp->data.u.sint = info.tcpi_rttvar; break;
+# if defined(__linux__)
+ /* these ones are common to all Linux versions */
+ case 2: smp->data.u.sint = info.tcpi_unacked; break;
+ case 3: smp->data.u.sint = info.tcpi_sacked; break;
+ case 4: smp->data.u.sint = info.tcpi_lost; break;
+ case 5: smp->data.u.sint = info.tcpi_retrans; break;
+ case 6: smp->data.u.sint = info.tcpi_fackets; break;
+ case 7: smp->data.u.sint = info.tcpi_reordering; break;
+# elif defined(__FreeBSD__) || defined(__NetBSD__) || defined(__OpenBSD__)
+ /* the ones are found on FreeBSD, NetBSD and OpenBSD featuring TCP_INFO */
+ case 2: smp->data.u.sint = info.__tcpi_unacked; break;
+ case 3: smp->data.u.sint = info.__tcpi_sacked; break;
+ case 4: smp->data.u.sint = info.__tcpi_lost; break;
+ case 5: smp->data.u.sint = info.__tcpi_retrans; break;
+ case 6: smp->data.u.sint = info.__tcpi_fackets; break;
+ case 7: smp->data.u.sint = info.__tcpi_reordering; break;
+# endif
+#endif // apple
+ default: return 0;
+ }
+
+ return 1;
+}
+
+/* get the mean rtt of a client connection */
+static int
+smp_fetch_fc_rtt(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ if (!get_tcp_info(args, smp, 0, 0))
+ return 0;
+
+ /* By default or if explicitly specified, convert rtt to ms */
+ if (!args || args[0].type == ARGT_STOP || args[0].data.sint == TIME_UNIT_MS)
+ smp->data.u.sint = (smp->data.u.sint + 500) / 1000;
+
+ return 1;
+}
+
+/* get the variance of the mean rtt of a client connection */
+static int
+smp_fetch_fc_rttvar(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ if (!get_tcp_info(args, smp, 0, 1))
+ return 0;
+
+ /* By default or if explicitly specified, convert rttvar to ms */
+ if (!args || args[0].type == ARGT_STOP || args[0].data.sint == TIME_UNIT_MS)
+ smp->data.u.sint = (smp->data.u.sint + 500) / 1000;
+
+ return 1;
+}
+
+/* get the mean rtt of a backend connection */
+static int
+smp_fetch_bc_rtt(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ if (!get_tcp_info(args, smp, 1, 0))
+ return 0;
+
+ /* By default or if explicitly specified, convert rtt to ms */
+ if (!args || args[0].type == ARGT_STOP || args[0].data.sint == TIME_UNIT_MS)
+ smp->data.u.sint = (smp->data.u.sint + 500) / 1000;
+
+ return 1;
+}
+
+/* get the variance of the mean rtt of a backend connection */
+static int
+smp_fetch_bc_rttvar(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ if (!get_tcp_info(args, smp, 1, 1))
+ return 0;
+
+ /* By default or if explicitly specified, convert rttvar to ms */
+ if (!args || args[0].type == ARGT_STOP || args[0].data.sint == TIME_UNIT_MS)
+ smp->data.u.sint = (smp->data.u.sint + 500) / 1000;
+
+ return 1;
+}
+
+
+#if defined(__linux__) || defined(__FreeBSD__) || defined(__NetBSD__) || defined(__OpenBSD__) || defined(__APPLE__)
+/* get the unacked counter on a client connection */
+static int
+smp_fetch_fc_unacked(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ if (!get_tcp_info(args, smp, 0, 2))
+ return 0;
+ return 1;
+}
+#endif
+
+#if defined(__linux__) || defined(__FreeBSD__) || defined(__NetBSD__) || defined(__OpenBSD__)
+/* get the sacked counter on a client connection */
+static int
+smp_fetch_fc_sacked(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ if (!get_tcp_info(args, smp, 0, 3))
+ return 0;
+ return 1;
+}
+#endif
+
+#if defined(__linux__) || defined(__FreeBSD__) || defined(__NetBSD__) || defined(__OpenBSD__) || defined(__APPLE__)
+/* get the lost counter on a client connection */
+static int
+smp_fetch_fc_lost(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ if (!get_tcp_info(args, smp, 0, 4))
+ return 0;
+ return 1;
+}
+#endif
+
+#if defined(__linux__) || defined(__FreeBSD__) || defined(__NetBSD__) || defined(__OpenBSD__) || defined(__APPLE__)
+/* get the retrans counter on a client connection */
+static int
+smp_fetch_fc_retrans(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ if (!get_tcp_info(args, smp, 0, 5))
+ return 0;
+ return 1;
+}
+#endif
+
+#if defined(__linux__) || defined(__FreeBSD__) || defined(__NetBSD__) || defined(__OpenBSD__)
+/* get the fackets counter on a client connection */
+static int
+smp_fetch_fc_fackets(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ if (!get_tcp_info(args, smp, 0, 6))
+ return 0;
+ return 1;
+}
+#endif
+
+#if defined(__linux__) || defined(__FreeBSD__) || defined(__NetBSD__) || defined(__OpenBSD__)
+/* get the reordering counter on a client connection */
+static int
+smp_fetch_fc_reordering(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ if (!get_tcp_info(args, smp, 0, 7))
+ return 0;
+ return 1;
+}
+#endif
+#endif // TCP_INFO
+
+/* Validates the data unit argument passed to "accept_date" fetch. Argument 0 support an
+ * optional string representing the unit of the result: "s" for seconds, "ms" for
+ * milliseconds and "us" for microseconds.
+ * Returns 0 on error and non-zero if OK.
+ */
+int smp_check_accept_date_unit(struct arg *args, char **err)
+{
+ if (args[0].type == ARGT_STR) {
+ long long int unit;
+
+ if (strcmp(args[0].data.str.area, "s") == 0) {
+ unit = TIME_UNIT_S;
+ }
+ else if (strcmp(args[0].data.str.area, "ms") == 0) {
+ unit = TIME_UNIT_MS;
+ }
+ else if (strcmp(args[0].data.str.area, "us") == 0) {
+ unit = TIME_UNIT_US;
+ }
+ else {
+ memprintf(err, "expects 's', 'ms' or 'us', got '%s'",
+ args[0].data.str.area);
+ return 0;
+ }
+
+ chunk_destroy(&args[0].data.str);
+ args[0].type = ARGT_SINT;
+ args[0].data.sint = unit;
+ }
+ else if (args[0].type != ARGT_STOP) {
+ memprintf(err, "Unexpected arg type");
+ return 0;
+ }
+
+ return 1;
+}
+
+/* retrieve the accept or request date in epoch time, converts it to milliseconds
+ * or microseconds if asked to in optional args[1] unit param */
+static int
+smp_fetch_accept_date(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ struct strm_logs *logs;
+ struct timeval tv;
+
+ if (!smp->strm)
+ return 0;
+
+ logs = &smp->strm->logs;
+
+ if (kw[0] == 'r') { /* request_date */
+ tv_ms_add(&tv, &logs->accept_date, logs->t_idle >= 0 ? logs->t_idle + logs->t_handshake : 0);
+ } else { /* accept_date */
+ tv.tv_sec = logs->accept_date.tv_sec;
+ tv.tv_usec = logs->accept_date.tv_usec;
+ }
+
+ smp->data.u.sint = tv.tv_sec;
+
+ /* report in milliseconds */
+ if (args[0].type == ARGT_SINT && args[0].data.sint == TIME_UNIT_MS) {
+ smp->data.u.sint *= 1000;
+ smp->data.u.sint += tv.tv_usec / 1000;
+ }
+ /* report in microseconds */
+ else if (args[0].type == ARGT_SINT && args[0].data.sint == TIME_UNIT_US) {
+ smp->data.u.sint *= 1000000;
+ smp->data.u.sint += tv.tv_usec;
+ }
+
+ smp->data.type = SMP_T_SINT;
+ smp->flags |= SMP_F_VOL_TEST | SMP_F_MAY_CHANGE;
+ return 1;
+}
+
+/* Note: must not be declared <const> as its list will be overwritten.
+ * Note: fetches that may return multiple types should be declared using the
+ * appropriate pseudo-type. If not available it must be declared as the lowest
+ * common denominator, the type that can be casted into all other ones.
+ */
+static struct sample_fetch_kw_list sample_fetch_keywords = {ILH, {
+ /* timestamps */
+ { "accept_date", smp_fetch_accept_date, ARG1(0,STR), smp_check_accept_date_unit, SMP_T_SINT, SMP_USE_L4CLI },
+ { "request_date", smp_fetch_accept_date, ARG1(0,STR), smp_check_accept_date_unit, SMP_T_SINT, SMP_USE_HRQHP },
+
+ { "bc_dst", smp_fetch_dst, 0, NULL, SMP_T_ADDR, SMP_USE_L4SRV },
+ { "bc_dst_port", smp_fetch_dport, 0, NULL, SMP_T_SINT, SMP_USE_L4SRV },
+ { "bc_src", smp_fetch_src, 0, NULL, SMP_T_ADDR, SMP_USE_L4SRV },
+ { "bc_src_port", smp_fetch_sport, 0, NULL, SMP_T_SINT, SMP_USE_L4SRV },
+
+ { "dst", smp_fetch_dst, 0, NULL, SMP_T_ADDR, SMP_USE_L4CLI },
+ { "dst_is_local", smp_fetch_dst_is_local, 0, NULL, SMP_T_BOOL, SMP_USE_L4CLI },
+ { "dst_port", smp_fetch_dport, 0, NULL, SMP_T_SINT, SMP_USE_L4CLI },
+
+ { "fc_dst", smp_fetch_dst, 0, NULL, SMP_T_ADDR, SMP_USE_L4CLI },
+ { "fc_dst_is_local", smp_fetch_dst_is_local, 0, NULL, SMP_T_BOOL, SMP_USE_L4CLI },
+ { "fc_dst_port", smp_fetch_dport, 0, NULL, SMP_T_SINT, SMP_USE_L4CLI },
+
+ { "fc_src", smp_fetch_src, 0, NULL, SMP_T_ADDR, SMP_USE_L4CLI },
+ { "fc_src_is_local", smp_fetch_src_is_local, 0, NULL, SMP_T_BOOL, SMP_USE_L4CLI },
+ { "fc_src_port", smp_fetch_sport, 0, NULL, SMP_T_SINT, SMP_USE_L4CLI },
+
+ { "src", smp_fetch_src, 0, NULL, SMP_T_ADDR, SMP_USE_L4CLI },
+ { "src_is_local", smp_fetch_src_is_local, 0, NULL, SMP_T_BOOL, SMP_USE_L4CLI },
+ { "src_port", smp_fetch_sport, 0, NULL, SMP_T_SINT, SMP_USE_L4CLI },
+#ifdef TCP_INFO
+ { "fc_rtt", smp_fetch_fc_rtt, ARG1(0,STR), val_fc_time_value, SMP_T_SINT, SMP_USE_L4CLI },
+ { "fc_rttvar", smp_fetch_fc_rttvar, ARG1(0,STR), val_fc_time_value, SMP_T_SINT, SMP_USE_L4CLI },
+ { "bc_rtt", smp_fetch_bc_rtt, ARG1(0,STR), val_fc_time_value, SMP_T_SINT, SMP_USE_L4CLI },
+ { "bc_rttvar", smp_fetch_bc_rttvar, ARG1(0,STR), val_fc_time_value, SMP_T_SINT, SMP_USE_L4CLI },
+
+#if defined(__linux__) || defined(__FreeBSD__) || defined(__NetBSD__) || defined(__OpenBSD__) || defined(__APPLE__)
+ { "fc_unacked", smp_fetch_fc_unacked, ARG1(0,STR), var_fc_counter, SMP_T_SINT, SMP_USE_L4CLI },
+#endif
+#if defined(__linux__) || defined(__FreeBSD__) || defined(__NetBSD__) || defined(__OpenBSD__)
+ { "fc_sacked", smp_fetch_fc_sacked, ARG1(0,STR), var_fc_counter, SMP_T_SINT, SMP_USE_L4CLI },
+#endif
+#if defined(__linux__) || defined(__FreeBSD__) || defined(__NetBSD__) || defined(__OpenBSD__) || defined(__APPLE__)
+ { "fc_retrans", smp_fetch_fc_retrans, ARG1(0,STR), var_fc_counter, SMP_T_SINT, SMP_USE_L4CLI },
+#endif
+#if defined(__linux__) || defined(__FreeBSD__) || defined(__NetBSD__) || defined(__OpenBSD__)
+ { "fc_fackets", smp_fetch_fc_fackets, ARG1(0,STR), var_fc_counter, SMP_T_SINT, SMP_USE_L4CLI },
+#endif
+#if defined(__linux__) || defined(__FreeBSD__) || defined(__NetBSD__) || defined(__OpenBSD__) || defined(__APPLE__)
+ { "fc_lost", smp_fetch_fc_lost, ARG1(0,STR), var_fc_counter, SMP_T_SINT, SMP_USE_L4CLI },
+#endif
+#if defined(__linux__) || defined(__FreeBSD__) || defined(__NetBSD__) || defined(__OpenBSD__)
+ { "fc_reordering", smp_fetch_fc_reordering, ARG1(0,STR), var_fc_counter, SMP_T_SINT, SMP_USE_L4CLI },
+#endif
+#endif // TCP_INFO
+ { /* END */ },
+}};
+
+INITCALL1(STG_REGISTER, sample_register_fetches, &sample_fetch_keywords);
+
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/src/tcpcheck.c b/src/tcpcheck.c
new file mode 100644
index 0000000..d30ecb5
--- /dev/null
+++ b/src/tcpcheck.c
@@ -0,0 +1,5150 @@
+/*
+ * Health-checks functions.
+ *
+ * Copyright 2000-2009,2020 Willy Tarreau <w@1wt.eu>
+ * Copyright 2007-2010 Krzysztof Piotr Oledzki <ole@ans.pl>
+ * Copyright 2013 Baptiste Assmann <bedis9@gmail.com>
+ * Copyright 2020 Gaetan Rivet <grive@u256.net>
+ * Copyright 2020 Christopher Faulet <cfaulet@haproxy.com>
+ * Crown Copyright 2022 Defence Science and Technology Laboratory <dstlipgroup@dstl.gov.uk>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <sys/resource.h>
+#include <sys/socket.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <netinet/in.h>
+#include <netinet/tcp.h>
+#include <arpa/inet.h>
+
+#include <ctype.h>
+#include <errno.h>
+#include <signal.h>
+#include <stdarg.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <time.h>
+#include <unistd.h>
+
+#include <haproxy/action.h>
+#include <haproxy/api.h>
+#include <haproxy/cfgparse.h>
+#include <haproxy/check.h>
+#include <haproxy/chunk.h>
+#include <haproxy/connection.h>
+#include <haproxy/errors.h>
+#include <haproxy/global.h>
+#include <haproxy/h1.h>
+#include <haproxy/http.h>
+#include <haproxy/http_htx.h>
+#include <haproxy/htx.h>
+#include <haproxy/istbuf.h>
+#include <haproxy/list.h>
+#include <haproxy/log.h>
+#include <haproxy/net_helper.h>
+#include <haproxy/protocol.h>
+#include <haproxy/proxy-t.h>
+#include <haproxy/regex.h>
+#include <haproxy/sample.h>
+#include <haproxy/server.h>
+#include <haproxy/ssl_sock.h>
+#include <haproxy/stconn.h>
+#include <haproxy/task.h>
+#include <haproxy/tcpcheck.h>
+#include <haproxy/ticks.h>
+#include <haproxy/tools.h>
+#include <haproxy/trace.h>
+#include <haproxy/vars.h>
+
+
+#define TRACE_SOURCE &trace_check
+
+/* Global tree to share all tcp-checks */
+struct eb_root shared_tcpchecks = EB_ROOT;
+
+
+DECLARE_POOL(pool_head_tcpcheck_rule, "tcpcheck_rule", sizeof(struct tcpcheck_rule));
+
+/**************************************************************************/
+/*************** Init/deinit tcp-check rules and ruleset ******************/
+/**************************************************************************/
+/* Releases memory allocated for a log-format string */
+static void free_tcpcheck_fmt(struct list *fmt)
+{
+ struct logformat_node *lf, *lfb;
+
+ list_for_each_entry_safe(lf, lfb, fmt, list) {
+ LIST_DELETE(&lf->list);
+ release_sample_expr(lf->expr);
+ free(lf->arg);
+ free(lf);
+ }
+}
+
+/* Releases memory allocated for an HTTP header used in a tcp-check send rule */
+void free_tcpcheck_http_hdr(struct tcpcheck_http_hdr *hdr)
+{
+ if (!hdr)
+ return;
+
+ free_tcpcheck_fmt(&hdr->value);
+ istfree(&hdr->name);
+ free(hdr);
+}
+
+/* Releases memory allocated for an HTTP header list used in a tcp-check send
+ * rule
+ */
+static void free_tcpcheck_http_hdrs(struct list *hdrs)
+{
+ struct tcpcheck_http_hdr *hdr, *bhdr;
+
+ list_for_each_entry_safe(hdr, bhdr, hdrs, list) {
+ LIST_DELETE(&hdr->list);
+ free_tcpcheck_http_hdr(hdr);
+ }
+}
+
+/* Releases memory allocated for a tcp-check. If in_pool is set, it means the
+ * tcp-check was allocated using a memory pool (it is used to instantiate email
+ * alerts).
+ */
+void free_tcpcheck(struct tcpcheck_rule *rule, int in_pool)
+{
+ if (!rule)
+ return;
+
+ free(rule->comment);
+ switch (rule->action) {
+ case TCPCHK_ACT_SEND:
+ switch (rule->send.type) {
+ case TCPCHK_SEND_STRING:
+ case TCPCHK_SEND_BINARY:
+ istfree(&rule->send.data);
+ break;
+ case TCPCHK_SEND_STRING_LF:
+ case TCPCHK_SEND_BINARY_LF:
+ free_tcpcheck_fmt(&rule->send.fmt);
+ break;
+ case TCPCHK_SEND_HTTP:
+ free(rule->send.http.meth.str.area);
+ if (!(rule->send.http.flags & TCPCHK_SND_HTTP_FL_URI_FMT))
+ istfree(&rule->send.http.uri);
+ else
+ free_tcpcheck_fmt(&rule->send.http.uri_fmt);
+ istfree(&rule->send.http.vsn);
+ free_tcpcheck_http_hdrs(&rule->send.http.hdrs);
+ if (!(rule->send.http.flags & TCPCHK_SND_HTTP_FL_BODY_FMT))
+ istfree(&rule->send.http.body);
+ else
+ free_tcpcheck_fmt(&rule->send.http.body_fmt);
+ break;
+ case TCPCHK_SEND_UNDEF:
+ break;
+ }
+ break;
+ case TCPCHK_ACT_EXPECT:
+ free_tcpcheck_fmt(&rule->expect.onerror_fmt);
+ free_tcpcheck_fmt(&rule->expect.onsuccess_fmt);
+ release_sample_expr(rule->expect.status_expr);
+ switch (rule->expect.type) {
+ case TCPCHK_EXPECT_HTTP_STATUS:
+ free(rule->expect.codes.codes);
+ break;
+ case TCPCHK_EXPECT_STRING:
+ case TCPCHK_EXPECT_BINARY:
+ case TCPCHK_EXPECT_HTTP_BODY:
+ istfree(&rule->expect.data);
+ break;
+ case TCPCHK_EXPECT_STRING_REGEX:
+ case TCPCHK_EXPECT_BINARY_REGEX:
+ case TCPCHK_EXPECT_HTTP_STATUS_REGEX:
+ case TCPCHK_EXPECT_HTTP_BODY_REGEX:
+ regex_free(rule->expect.regex);
+ break;
+ case TCPCHK_EXPECT_STRING_LF:
+ case TCPCHK_EXPECT_BINARY_LF:
+ case TCPCHK_EXPECT_HTTP_BODY_LF:
+ free_tcpcheck_fmt(&rule->expect.fmt);
+ break;
+ case TCPCHK_EXPECT_HTTP_HEADER:
+ if (rule->expect.flags & TCPCHK_EXPT_FL_HTTP_HNAME_REG)
+ regex_free(rule->expect.hdr.name_re);
+ else if (rule->expect.flags & TCPCHK_EXPT_FL_HTTP_HNAME_FMT)
+ free_tcpcheck_fmt(&rule->expect.hdr.name_fmt);
+ else
+ istfree(&rule->expect.hdr.name);
+
+ if (rule->expect.flags & TCPCHK_EXPT_FL_HTTP_HVAL_REG)
+ regex_free(rule->expect.hdr.value_re);
+ else if (rule->expect.flags & TCPCHK_EXPT_FL_HTTP_HVAL_FMT)
+ free_tcpcheck_fmt(&rule->expect.hdr.value_fmt);
+ else if (!(rule->expect.flags & TCPCHK_EXPT_FL_HTTP_HVAL_NONE))
+ istfree(&rule->expect.hdr.value);
+ break;
+ case TCPCHK_EXPECT_CUSTOM:
+ case TCPCHK_EXPECT_UNDEF:
+ break;
+ }
+ break;
+ case TCPCHK_ACT_CONNECT:
+ free(rule->connect.sni);
+ free(rule->connect.alpn);
+ release_sample_expr(rule->connect.port_expr);
+ break;
+ case TCPCHK_ACT_COMMENT:
+ break;
+ case TCPCHK_ACT_ACTION_KW:
+ free(rule->action_kw.rule);
+ break;
+ }
+
+ if (in_pool)
+ pool_free(pool_head_tcpcheck_rule, rule);
+ else
+ free(rule);
+}
+
+/* Creates a tcp-check variable used in preset variables before executing a
+ * tcp-check ruleset.
+ */
+struct tcpcheck_var *create_tcpcheck_var(const struct ist name)
+{
+ struct tcpcheck_var *var = NULL;
+
+ var = calloc(1, sizeof(*var));
+ if (var == NULL)
+ return NULL;
+
+ var->name = istdup(name);
+ if (!isttest(var->name)) {
+ free(var);
+ return NULL;
+ }
+
+ LIST_INIT(&var->list);
+ return var;
+}
+
+/* Releases memory allocated for a preset tcp-check variable */
+void free_tcpcheck_var(struct tcpcheck_var *var)
+{
+ if (!var)
+ return;
+
+ istfree(&var->name);
+ if (var->data.type == SMP_T_STR || var->data.type == SMP_T_BIN)
+ free(var->data.u.str.area);
+ else if (var->data.type == SMP_T_METH && var->data.u.meth.meth == HTTP_METH_OTHER)
+ free(var->data.u.meth.str.area);
+ free(var);
+}
+
+/* Releases a list of preset tcp-check variables */
+void free_tcpcheck_vars(struct list *vars)
+{
+ struct tcpcheck_var *var, *back;
+
+ list_for_each_entry_safe(var, back, vars, list) {
+ LIST_DELETE(&var->list);
+ free_tcpcheck_var(var);
+ }
+}
+
+/* Duplicate a list of preset tcp-check variables */
+int dup_tcpcheck_vars(struct list *dst, const struct list *src)
+{
+ const struct tcpcheck_var *var;
+ struct tcpcheck_var *new = NULL;
+
+ list_for_each_entry(var, src, list) {
+ new = create_tcpcheck_var(var->name);
+ if (!new)
+ goto error;
+ new->data.type = var->data.type;
+ if (var->data.type == SMP_T_STR || var->data.type == SMP_T_BIN) {
+ if (chunk_dup(&new->data.u.str, &var->data.u.str) == NULL)
+ goto error;
+ if (var->data.type == SMP_T_STR)
+ new->data.u.str.area[new->data.u.str.data] = 0;
+ }
+ else if (var->data.type == SMP_T_METH && var->data.u.meth.meth == HTTP_METH_OTHER) {
+ if (chunk_dup(&new->data.u.str, &var->data.u.str) == NULL)
+ goto error;
+ new->data.u.str.area[new->data.u.str.data] = 0;
+ new->data.u.meth.meth = var->data.u.meth.meth;
+ }
+ else
+ new->data.u = var->data.u;
+ LIST_APPEND(dst, &new->list);
+ }
+ return 1;
+
+ error:
+ free(new);
+ return 0;
+}
+
+/* Looks for a shared tcp-check ruleset given its name. */
+struct tcpcheck_ruleset *find_tcpcheck_ruleset(const char *name)
+{
+ struct tcpcheck_ruleset *rs;
+ struct ebpt_node *node;
+
+ node = ebis_lookup_len(&shared_tcpchecks, name, strlen(name));
+ if (node) {
+ rs = container_of(node, typeof(*rs), node);
+ return rs;
+ }
+ return NULL;
+}
+
+/* Creates a new shared tcp-check ruleset and insert it in shared_tcpchecks
+ * tree.
+ */
+struct tcpcheck_ruleset *create_tcpcheck_ruleset(const char *name)
+{
+ struct tcpcheck_ruleset *rs;
+
+ rs = calloc(1, sizeof(*rs));
+ if (rs == NULL)
+ return NULL;
+
+ rs->node.key = strdup(name);
+ if (rs->node.key == NULL) {
+ free(rs);
+ return NULL;
+ }
+
+ LIST_INIT(&rs->rules);
+ ebis_insert(&shared_tcpchecks, &rs->node);
+ return rs;
+}
+
+/* Releases memory allocated by a tcp-check ruleset. */
+void free_tcpcheck_ruleset(struct tcpcheck_ruleset *rs)
+{
+ struct tcpcheck_rule *r, *rb;
+
+ if (!rs)
+ return;
+
+ ebpt_delete(&rs->node);
+ free(rs->node.key);
+ list_for_each_entry_safe(r, rb, &rs->rules, list) {
+ LIST_DELETE(&r->list);
+ free_tcpcheck(r, 0);
+ }
+ free(rs);
+}
+
+
+/**************************************************************************/
+/**************** Everything about tcp-checks execution *******************/
+/**************************************************************************/
+/* Returns the id of a step in a tcp-check ruleset */
+int tcpcheck_get_step_id(const struct check *check, const struct tcpcheck_rule *rule)
+{
+ if (!rule)
+ rule = check->current_step;
+
+ /* no last started step => first step */
+ if (!rule)
+ return 1;
+
+ /* last step is the first implicit connect */
+ if (rule->index == 0 &&
+ rule->action == TCPCHK_ACT_CONNECT &&
+ (rule->connect.options & TCPCHK_OPT_IMPLICIT))
+ return 0;
+
+ return rule->index + 1;
+}
+
+/* Returns the first non COMMENT/ACTION_KW tcp-check rule from list <list> or
+ * NULL if none was found.
+ */
+struct tcpcheck_rule *get_first_tcpcheck_rule(const struct tcpcheck_rules *rules)
+{
+ struct tcpcheck_rule *r;
+
+ list_for_each_entry(r, rules->list, list) {
+ if (r->action != TCPCHK_ACT_COMMENT && r->action != TCPCHK_ACT_ACTION_KW)
+ return r;
+ }
+ return NULL;
+}
+
+/* Returns the last non COMMENT/ACTION_KW tcp-check rule from list <list> or
+ * NULL if none was found.
+ */
+static struct tcpcheck_rule *get_last_tcpcheck_rule(struct tcpcheck_rules *rules)
+{
+ struct tcpcheck_rule *r;
+
+ list_for_each_entry_rev(r, rules->list, list) {
+ if (r->action != TCPCHK_ACT_COMMENT && r->action != TCPCHK_ACT_ACTION_KW)
+ return r;
+ }
+ return NULL;
+}
+
+/* Returns the non COMMENT/ACTION_KW tcp-check rule from list <list> following
+ * <start> or NULL if non was found. If <start> is NULL, it relies on
+ * get_first_tcpcheck_rule().
+ */
+static struct tcpcheck_rule *get_next_tcpcheck_rule(struct tcpcheck_rules *rules, struct tcpcheck_rule *start)
+{
+ struct tcpcheck_rule *r;
+
+ if (!start)
+ return get_first_tcpcheck_rule(rules);
+
+ r = LIST_NEXT(&start->list, typeof(r), list);
+ list_for_each_entry_from(r, rules->list, list) {
+ if (r->action != TCPCHK_ACT_COMMENT && r->action != TCPCHK_ACT_ACTION_KW)
+ return r;
+ }
+ return NULL;
+}
+
+
+/* Creates info message when a tcp-check healthcheck fails on an expect rule */
+static void tcpcheck_expect_onerror_message(struct buffer *msg, struct check *check, struct tcpcheck_rule *rule,
+ int match, struct ist info)
+{
+ struct sample *smp;
+ int is_empty;
+
+ /* Follows these step to produce the info message:
+ * 1. if info field is already provided, copy it
+ * 2. if the expect rule provides an onerror log-format string,
+ * use it to produce the message
+ * 3. the expect rule is part of a protocol check (http, redis, mysql...), do nothing
+ * 4. Otherwise produce the generic tcp-check info message
+ */
+ if (istlen(info)) {
+ chunk_istcat(msg, info);
+ goto comment;
+ }
+ else if (!LIST_ISEMPTY(&rule->expect.onerror_fmt)) {
+ msg->data += sess_build_logline(check->sess, NULL, b_tail(msg), b_room(msg), &rule->expect.onerror_fmt);
+ goto comment;
+ }
+
+ is_empty = (IS_HTX_SC(check->sc) ? htx_is_empty(htxbuf(&check->bi)) : !b_data(&check->bi));
+ if (is_empty) {
+ TRACE_ERROR("empty response", CHK_EV_RX_DATA|CHK_EV_RX_ERR, check);
+ chunk_printf(msg, "TCPCHK got an empty response at step %d",
+ tcpcheck_get_step_id(check, rule));
+ goto comment;
+ }
+
+ if (check->type == PR_O2_TCPCHK_CHK &&
+ (check->tcpcheck_rules->flags & TCPCHK_RULES_PROTO_CHK) != TCPCHK_RULES_TCP_CHK) {
+ goto comment;
+ }
+
+ chunk_strcat(msg, (match ? "TCPCHK matched unwanted content" : "TCPCHK did not match content"));
+ switch (rule->expect.type) {
+ case TCPCHK_EXPECT_HTTP_STATUS:
+ chunk_appendf(msg, "(status codes) at step %d", tcpcheck_get_step_id(check, rule));
+ break;
+ case TCPCHK_EXPECT_STRING:
+ case TCPCHK_EXPECT_HTTP_BODY:
+ chunk_appendf(msg, " '%.*s' at step %d", (unsigned int)istlen(rule->expect.data), istptr(rule->expect.data),
+ tcpcheck_get_step_id(check, rule));
+ break;
+ case TCPCHK_EXPECT_BINARY:
+ chunk_appendf(msg, " (binary) at step %d", tcpcheck_get_step_id(check, rule));
+ break;
+ case TCPCHK_EXPECT_STRING_REGEX:
+ case TCPCHK_EXPECT_HTTP_STATUS_REGEX:
+ case TCPCHK_EXPECT_HTTP_BODY_REGEX:
+ chunk_appendf(msg, " (regex) at step %d", tcpcheck_get_step_id(check, rule));
+ break;
+ case TCPCHK_EXPECT_BINARY_REGEX:
+ chunk_appendf(msg, " (binary regex) at step %d", tcpcheck_get_step_id(check, rule));
+ break;
+ case TCPCHK_EXPECT_STRING_LF:
+ case TCPCHK_EXPECT_HTTP_BODY_LF:
+ chunk_appendf(msg, " (log-format string) at step %d", tcpcheck_get_step_id(check, rule));
+ break;
+ case TCPCHK_EXPECT_BINARY_LF:
+ chunk_appendf(msg, " (log-format binary) at step %d", tcpcheck_get_step_id(check, rule));
+ break;
+ case TCPCHK_EXPECT_CUSTOM:
+ chunk_appendf(msg, " (custom function) at step %d", tcpcheck_get_step_id(check, rule));
+ break;
+ case TCPCHK_EXPECT_HTTP_HEADER:
+ chunk_appendf(msg, " (header pattern) at step %d", tcpcheck_get_step_id(check, rule));
+ case TCPCHK_EXPECT_UNDEF:
+ /* Should never happen. */
+ return;
+ }
+
+ comment:
+ /* If the failing expect rule provides a comment, it is concatenated to
+ * the info message.
+ */
+ if (rule->comment) {
+ chunk_strcat(msg, " comment: ");
+ chunk_strcat(msg, rule->comment);
+ }
+
+ /* Finally, the check status code is set if the failing expect rule
+ * defines a status expression.
+ */
+ if (rule->expect.status_expr) {
+ smp = sample_fetch_as_type(check->proxy, check->sess, NULL, SMP_OPT_DIR_RES | SMP_OPT_FINAL,
+ rule->expect.status_expr, SMP_T_STR);
+
+ if (smp && sample_casts[smp->data.type][SMP_T_SINT] &&
+ sample_casts[smp->data.type][SMP_T_SINT](smp))
+ check->code = smp->data.u.sint;
+ }
+
+ *(b_tail(msg)) = '\0';
+}
+
+/* Creates info message when a tcp-check healthcheck succeeds on an expect rule */
+static void tcpcheck_expect_onsuccess_message(struct buffer *msg, struct check *check, struct tcpcheck_rule *rule,
+ struct ist info)
+{
+ struct sample *smp;
+
+ /* Follows these step to produce the info message:
+ * 1. if info field is already provided, copy it
+ * 2. if the expect rule provides an onsucces log-format string,
+ * use it to produce the message
+ * 3. the expect rule is part of a protocol check (http, redis, mysql...), do nothing
+ * 4. Otherwise produce the generic tcp-check info message
+ */
+ if (istlen(info))
+ chunk_istcat(msg, info);
+ if (!LIST_ISEMPTY(&rule->expect.onsuccess_fmt))
+ msg->data += sess_build_logline(check->sess, NULL, b_tail(msg), b_room(msg),
+ &rule->expect.onsuccess_fmt);
+ else if (check->type == PR_O2_TCPCHK_CHK &&
+ (check->tcpcheck_rules->flags & TCPCHK_RULES_PROTO_CHK) == TCPCHK_RULES_TCP_CHK)
+ chunk_strcat(msg, "(tcp-check)");
+
+ /* Finally, the check status code is set if the expect rule defines a
+ * status expression.
+ */
+ if (rule->expect.status_expr) {
+ smp = sample_fetch_as_type(check->proxy, check->sess, NULL, SMP_OPT_DIR_RES | SMP_OPT_FINAL,
+ rule->expect.status_expr, SMP_T_STR);
+
+ if (smp && sample_casts[smp->data.type][SMP_T_SINT] &&
+ sample_casts[smp->data.type][SMP_T_SINT](smp))
+ check->code = smp->data.u.sint;
+ }
+
+ *(b_tail(msg)) = '\0';
+}
+
+/* Internal functions to parse and validate a MySQL packet in the context of an
+ * expect rule. It start to parse the input buffer at the offset <offset>. If
+ * <last_read> is set, no more data are expected.
+ */
+static enum tcpcheck_eval_ret tcpcheck_mysql_expect_packet(struct check *check, struct tcpcheck_rule *rule,
+ unsigned int offset, int last_read)
+{
+ enum tcpcheck_eval_ret ret = TCPCHK_EVAL_CONTINUE;
+ enum healthcheck_status status;
+ struct buffer *msg = NULL;
+ struct ist desc = IST_NULL;
+ unsigned int err = 0, plen = 0;
+
+
+ TRACE_ENTER(CHK_EV_TCPCHK_EXP, check);
+
+ /* 3 Bytes for the packet length and 1 byte for the sequence id */
+ if (b_data(&check->bi) < offset+4) {
+ if (!last_read)
+ goto wait_more_data;
+
+ /* invalid length or truncated response */
+ status = HCHK_STATUS_L7RSP;
+ goto error;
+ }
+
+ plen = ((unsigned char) *b_peek(&check->bi, offset)) +
+ (((unsigned char) *(b_peek(&check->bi, offset+1))) << 8) +
+ (((unsigned char) *(b_peek(&check->bi, offset+2))) << 16);
+
+ if (b_data(&check->bi) < offset+plen+4) {
+ if (!last_read)
+ goto wait_more_data;
+
+ /* invalid length or truncated response */
+ status = HCHK_STATUS_L7RSP;
+ goto error;
+ }
+
+ if (*b_peek(&check->bi, offset+4) == '\xff') {
+ /* MySQL Error packet always begin with field_count = 0xff */
+ status = HCHK_STATUS_L7STS;
+ err = ((unsigned char) *b_peek(&check->bi, offset+5)) +
+ (((unsigned char) *(b_peek(&check->bi, offset+6))) << 8);
+ desc = ist2(b_peek(&check->bi, offset+7), b_data(&check->bi) - offset - 7);
+ goto error;
+ }
+
+ if (get_next_tcpcheck_rule(check->tcpcheck_rules, rule) != NULL) {
+ /* Not the last rule, continue */
+ goto out;
+ }
+
+ /* We set the MySQL Version in description for information purpose
+ * FIXME : it can be cool to use MySQL Version for other purpose,
+ * like mark as down old MySQL server.
+ */
+ status = ((rule->expect.ok_status != HCHK_STATUS_UNKNOWN) ? rule->expect.ok_status : HCHK_STATUS_L7OKD);
+ set_server_check_status(check, status, b_peek(&check->bi, 5));
+
+ out:
+ free_trash_chunk(msg);
+ TRACE_LEAVE(CHK_EV_TCPCHK_EXP, check, 0, 0, (size_t[]){ret});
+ return ret;
+
+ error:
+ ret = TCPCHK_EVAL_STOP;
+ check->code = err;
+ msg = alloc_trash_chunk();
+ if (msg)
+ tcpcheck_expect_onerror_message(msg, check, rule, 0, desc);
+ set_server_check_status(check, status, (msg ? b_head(msg) : NULL));
+ goto out;
+
+ wait_more_data:
+ TRACE_DEVEL("waiting for more data", CHK_EV_TCPCHK_EXP, check);
+ ret = TCPCHK_EVAL_WAIT;
+ goto out;
+}
+
+/* Custom tcp-check expect function to parse and validate the MySQL initial
+ * handshake packet. Returns TCPCHK_EVAL_WAIT to wait for more data,
+ * TCPCHK_EVAL_CONTINUE to evaluate the next rule or TCPCHK_EVAL_STOP if an
+ * error occurred.
+ */
+enum tcpcheck_eval_ret tcpcheck_mysql_expect_iniths(struct check *check, struct tcpcheck_rule *rule, int last_read)
+{
+ return tcpcheck_mysql_expect_packet(check, rule, 0, last_read);
+}
+
+/* Custom tcp-check expect function to parse and validate the MySQL OK packet
+ * following the initial handshake. Returns TCPCHK_EVAL_WAIT to wait for more
+ * data, TCPCHK_EVAL_CONTINUE to evaluate the next rule or TCPCHK_EVAL_STOP if
+ * an error occurred.
+ */
+enum tcpcheck_eval_ret tcpcheck_mysql_expect_ok(struct check *check, struct tcpcheck_rule *rule, int last_read)
+{
+ unsigned int hslen = 0;
+
+ hslen = 4 + ((unsigned char) *b_head(&check->bi)) +
+ (((unsigned char) *(b_peek(&check->bi, 1))) << 8) +
+ (((unsigned char) *(b_peek(&check->bi, 2))) << 16);
+
+ return tcpcheck_mysql_expect_packet(check, rule, hslen, last_read);
+}
+
+/* Custom tcp-check expect function to parse and validate the LDAP bind response
+ * package packet. Returns TCPCHK_EVAL_WAIT to wait for more data,
+ * TCPCHK_EVAL_CONTINUE to evaluate the next rule or TCPCHK_EVAL_STOP if an
+ * error occurred.
+ */
+enum tcpcheck_eval_ret tcpcheck_ldap_expect_bindrsp(struct check *check, struct tcpcheck_rule *rule, int last_read)
+{
+ enum tcpcheck_eval_ret ret = TCPCHK_EVAL_CONTINUE;
+ enum healthcheck_status status;
+ struct buffer *msg = NULL;
+ struct ist desc = IST_NULL;
+ char *ptr;
+ unsigned short nbytes = 0;
+ size_t msglen = 0;
+
+ TRACE_ENTER(CHK_EV_TCPCHK_EXP, check);
+
+ /* Check if the server speaks LDAP (ASN.1/BER)
+ * http://en.wikipedia.org/wiki/Basic_Encoding_Rules
+ * http://tools.ietf.org/html/rfc4511
+ */
+ ptr = b_head(&check->bi) + 1;
+
+ /* size of LDAPMessage */
+ if (*ptr & 0x80) {
+ /* For message size encoded on several bytes, we only handle
+ * size encoded on 2 or 4 bytes. There is no reason to make this
+ * part to complex because only Active Directory is known to
+ * encode BindReponse length on 4 bytes.
+ */
+ nbytes = (*ptr & 0x7f);
+ if (b_data(&check->bi) < 1 + nbytes)
+ goto too_short;
+ switch (nbytes) {
+ case 4: msglen = read_n32(ptr+1); break;
+ case 2: msglen = read_n16(ptr+1); break;
+ default:
+ status = HCHK_STATUS_L7RSP;
+ desc = ist("Not LDAPv3 protocol");
+ goto error;
+ }
+ }
+ else
+ msglen = *ptr;
+ ptr += 1 + nbytes;
+
+ if (b_data(&check->bi) < 2 + nbytes + msglen)
+ goto too_short;
+
+ /* http://tools.ietf.org/html/rfc4511#section-4.2.2
+ * messageID: 0x02 0x01 0x01: INTEGER 1
+ * protocolOp: 0x61: bindResponse
+ */
+ if (memcmp(ptr, "\x02\x01\x01\x61", 4) != 0) {
+ status = HCHK_STATUS_L7RSP;
+ desc = ist("Not LDAPv3 protocol");
+ goto error;
+ }
+ ptr += 4;
+
+ /* skip size of bindResponse */
+ nbytes = 0;
+ if (*ptr & 0x80)
+ nbytes = (*ptr & 0x7f);
+ ptr += 1 + nbytes;
+
+ /* http://tools.ietf.org/html/rfc4511#section-4.1.9
+ * ldapResult: 0x0a 0x01: ENUMERATION
+ */
+ if (memcmp(ptr, "\x0a\x01", 2) != 0) {
+ status = HCHK_STATUS_L7RSP;
+ desc = ist("Not LDAPv3 protocol");
+ goto error;
+ }
+ ptr += 2;
+
+ /* http://tools.ietf.org/html/rfc4511#section-4.1.9
+ * resultCode
+ */
+ check->code = *ptr;
+ if (check->code) {
+ status = HCHK_STATUS_L7STS;
+ desc = ist("See RFC: http://tools.ietf.org/html/rfc4511#section-4.1.9");
+ goto error;
+ }
+
+ status = ((rule->expect.ok_status != HCHK_STATUS_UNKNOWN) ? rule->expect.ok_status : HCHK_STATUS_L7OKD);
+ set_server_check_status(check, status, "Success");
+
+ out:
+ free_trash_chunk(msg);
+ TRACE_LEAVE(CHK_EV_TCPCHK_EXP, check, 0, 0, (size_t[]){ret});
+ return ret;
+
+ error:
+ ret = TCPCHK_EVAL_STOP;
+ msg = alloc_trash_chunk();
+ if (msg)
+ tcpcheck_expect_onerror_message(msg, check, rule, 0, desc);
+ set_server_check_status(check, status, (msg ? b_head(msg) : NULL));
+ goto out;
+
+ too_short:
+ if (!last_read)
+ goto wait_more_data;
+ /* invalid length or truncated response */
+ status = HCHK_STATUS_L7RSP;
+ goto error;
+
+ wait_more_data:
+ TRACE_DEVEL("waiting for more data", CHK_EV_TCPCHK_EXP, check);
+ ret = TCPCHK_EVAL_WAIT;
+ goto out;
+}
+
+/* Custom tcp-check expect function to parse and validate the SPOP hello agent
+ * frame. Returns TCPCHK_EVAL_WAIT to wait for more data, TCPCHK_EVAL_CONTINUE
+ * to evaluate the next rule or TCPCHK_EVAL_STOP if an error occurred.
+ */
+enum tcpcheck_eval_ret tcpcheck_spop_expect_agenthello(struct check *check, struct tcpcheck_rule *rule, int last_read)
+{
+ enum tcpcheck_eval_ret ret = TCPCHK_EVAL_CONTINUE;
+ enum healthcheck_status status;
+ struct buffer *msg = NULL;
+ struct ist desc = IST_NULL;
+ unsigned int framesz;
+
+ TRACE_ENTER(CHK_EV_TCPCHK_EXP, check);
+
+ memcpy(&framesz, b_head(&check->bi), 4);
+ framesz = ntohl(framesz);
+
+ if (!last_read && b_data(&check->bi) < (4+framesz))
+ goto wait_more_data;
+
+ memset(b_orig(&trash), 0, b_size(&trash));
+ if (spoe_handle_healthcheck_response(b_peek(&check->bi, 4), framesz, b_orig(&trash), HCHK_DESC_LEN) == -1) {
+ status = HCHK_STATUS_L7RSP;
+ desc = ist2(b_orig(&trash), strlen(b_orig(&trash)));
+ goto error;
+ }
+
+ status = ((rule->expect.ok_status != HCHK_STATUS_UNKNOWN) ? rule->expect.ok_status : HCHK_STATUS_L7OKD);
+ set_server_check_status(check, status, "SPOA server is ok");
+
+ out:
+ free_trash_chunk(msg);
+ TRACE_LEAVE(CHK_EV_TCPCHK_EXP, check, 0, 0, (size_t[]){ret});
+ return ret;
+
+ error:
+ ret = TCPCHK_EVAL_STOP;
+ msg = alloc_trash_chunk();
+ if (msg)
+ tcpcheck_expect_onerror_message(msg, check, rule, 0, desc);
+ set_server_check_status(check, status, (msg ? b_head(msg) : NULL));
+ goto out;
+
+ wait_more_data:
+ TRACE_DEVEL("waiting for more data", CHK_EV_TCPCHK_EXP, check);
+ ret = TCPCHK_EVAL_WAIT;
+ goto out;
+}
+
+/* Custom tcp-check expect function to parse and validate the agent-check
+ * reply. Returns TCPCHK_EVAL_WAIT to wait for more data, TCPCHK_EVAL_CONTINUE
+ * to evaluate the next rule or TCPCHK_EVAL_STOP if an error occurred.
+ */
+enum tcpcheck_eval_ret tcpcheck_agent_expect_reply(struct check *check, struct tcpcheck_rule *rule, int last_read)
+{
+ enum tcpcheck_eval_ret ret = TCPCHK_EVAL_STOP;
+ enum healthcheck_status status = HCHK_STATUS_CHECKED;
+ const char *hs = NULL; /* health status */
+ const char *as = NULL; /* admin status */
+ const char *ps = NULL; /* performance status */
+ const char *sc = NULL; /* maxconn */
+ const char *err = NULL; /* first error to report */
+ const char *wrn = NULL; /* first warning to report */
+ char *cmd, *p;
+
+ TRACE_ENTER(CHK_EV_TCPCHK_EXP, check);
+
+ /* We're getting an agent check response. The agent could
+ * have been disabled in the mean time with a long check
+ * still pending. It is important that we ignore the whole
+ * response.
+ */
+ if (!(check->state & CHK_ST_ENABLED))
+ goto out;
+
+ /* The agent supports strings made of a single line ended by the
+ * first CR ('\r') or LF ('\n'). This line is composed of words
+ * delimited by spaces (' '), tabs ('\t'), or commas (','). The
+ * line may optionally contained a description of a state change
+ * after a sharp ('#'), which is only considered if a health state
+ * is announced.
+ *
+ * Words may be composed of :
+ * - a numeric weight suffixed by the percent character ('%').
+ * - a health status among "up", "down", "stopped", and "fail".
+ * - an admin status among "ready", "drain", "maint".
+ *
+ * These words may appear in any order. If multiple words of the
+ * same category appear, the last one wins.
+ */
+
+ p = b_head(&check->bi);
+ while (*p && *p != '\n' && *p != '\r')
+ p++;
+
+ if (!*p) {
+ if (!last_read)
+ goto wait_more_data;
+
+ /* at least inform the admin that the agent is mis-behaving */
+ set_server_check_status(check, check->status, "Ignoring incomplete line from agent");
+ goto out;
+ }
+
+ *p = 0;
+ cmd = b_head(&check->bi);
+
+ while (*cmd) {
+ /* look for next word */
+ if (*cmd == ' ' || *cmd == '\t' || *cmd == ',') {
+ cmd++;
+ continue;
+ }
+
+ if (*cmd == '#') {
+ /* this is the beginning of a health status description,
+ * skip the sharp and blanks.
+ */
+ cmd++;
+ while (*cmd == '\t' || *cmd == ' ')
+ cmd++;
+ break;
+ }
+
+ /* find the end of the word so that we have a null-terminated
+ * word between <cmd> and <p>.
+ */
+ p = cmd + 1;
+ while (*p && *p != '\t' && *p != ' ' && *p != '\n' && *p != ',')
+ p++;
+ if (*p)
+ *p++ = 0;
+
+ /* first, health statuses */
+ if (strcasecmp(cmd, "up") == 0) {
+ check->health = check->rise + check->fall - 1;
+ status = HCHK_STATUS_L7OKD;
+ hs = cmd;
+ }
+ else if (strcasecmp(cmd, "down") == 0) {
+ check->health = 0;
+ status = HCHK_STATUS_L7STS;
+ hs = cmd;
+ }
+ else if (strcasecmp(cmd, "stopped") == 0) {
+ check->health = 0;
+ status = HCHK_STATUS_L7STS;
+ hs = cmd;
+ }
+ else if (strcasecmp(cmd, "fail") == 0) {
+ check->health = 0;
+ status = HCHK_STATUS_L7STS;
+ hs = cmd;
+ }
+ /* admin statuses */
+ else if (strcasecmp(cmd, "ready") == 0) {
+ as = cmd;
+ }
+ else if (strcasecmp(cmd, "drain") == 0) {
+ as = cmd;
+ }
+ else if (strcasecmp(cmd, "maint") == 0) {
+ as = cmd;
+ }
+ /* try to parse a weight here and keep the last one */
+ else if (isdigit((unsigned char)*cmd) && strchr(cmd, '%') != NULL) {
+ ps = cmd;
+ }
+ /* try to parse a maxconn here */
+ else if (strncasecmp(cmd, "maxconn:", strlen("maxconn:")) == 0) {
+ sc = cmd;
+ }
+ else {
+ /* keep a copy of the first error */
+ if (!err)
+ err = cmd;
+ }
+ /* skip to next word */
+ cmd = p;
+ }
+ /* here, cmd points either to \0 or to the beginning of a
+ * description. Skip possible leading spaces.
+ */
+ while (*cmd == ' ' || *cmd == '\n')
+ cmd++;
+
+ /* First, update the admin status so that we avoid sending other
+ * possibly useless warnings and can also update the health if
+ * present after going back up.
+ */
+ if (as) {
+ if (strcasecmp(as, "drain") == 0) {
+ TRACE_DEVEL("set server into DRAIN mode", CHK_EV_TCPCHK_EXP, check);
+ srv_adm_set_drain(check->server);
+ }
+ else if (strcasecmp(as, "maint") == 0) {
+ TRACE_DEVEL("set server into MAINT mode", CHK_EV_TCPCHK_EXP, check);
+ srv_adm_set_maint(check->server);
+ }
+ else {
+ TRACE_DEVEL("set server into READY mode", CHK_EV_TCPCHK_EXP, check);
+ srv_adm_set_ready(check->server);
+ }
+ }
+
+ /* now change weights */
+ if (ps) {
+ const char *msg;
+
+ TRACE_DEVEL("change server weight", CHK_EV_TCPCHK_EXP, check);
+ msg = server_parse_weight_change_request(check->server, ps);
+ if (!wrn || !*wrn)
+ wrn = msg;
+ }
+
+ if (sc) {
+ const char *msg;
+
+ sc += strlen("maxconn:");
+
+ TRACE_DEVEL("change server maxconn", CHK_EV_TCPCHK_EXP, check);
+ /* This is safe to call server_parse_maxconn_change_request
+ * because the server lock is held during the check.
+ */
+ msg = server_parse_maxconn_change_request(check->server, sc);
+ if (!wrn || !*wrn)
+ wrn = msg;
+ }
+
+ /* and finally health status */
+ if (hs) {
+ /* We'll report some of the warnings and errors we have
+ * here. Down reports are critical, we leave them untouched.
+ * Lack of report, or report of 'UP' leaves the room for
+ * ERR first, then WARN.
+ */
+ const char *msg = cmd;
+ struct buffer *t;
+
+ if (!*msg || status == HCHK_STATUS_L7OKD) {
+ if (err && *err)
+ msg = err;
+ else if (wrn && *wrn)
+ msg = wrn;
+ }
+
+ t = get_trash_chunk();
+ chunk_printf(t, "via agent : %s%s%s%s",
+ hs, *msg ? " (" : "",
+ msg, *msg ? ")" : "");
+ TRACE_DEVEL("update server health status", CHK_EV_TCPCHK_EXP, check);
+ set_server_check_status(check, status, t->area);
+ }
+ else if (err && *err) {
+ /* No status change but we'd like to report something odd.
+ * Just report the current state and copy the message.
+ */
+ TRACE_DEVEL("agent reports an error", CHK_EV_TCPCHK_EXP, check);
+ chunk_printf(&trash, "agent reports an error : %s", err);
+ set_server_check_status(check, status/*check->status*/, trash.area);
+ }
+ else if (wrn && *wrn) {
+ /* No status change but we'd like to report something odd.
+ * Just report the current state and copy the message.
+ */
+ TRACE_DEVEL("agent reports a warning", CHK_EV_TCPCHK_EXP, check);
+ chunk_printf(&trash, "agent warns : %s", wrn);
+ set_server_check_status(check, status/*check->status*/, trash.area);
+ }
+ else {
+ TRACE_DEVEL("update server health status", CHK_EV_TCPCHK_EXP, check);
+ set_server_check_status(check, status, NULL);
+ }
+
+ out:
+ TRACE_LEAVE(CHK_EV_TCPCHK_EXP, check, 0, 0, (size_t[]){ret});
+ return ret;
+
+ wait_more_data:
+ TRACE_DEVEL("waiting for more data", CHK_EV_TCPCHK_EXP, check);
+ ret = TCPCHK_EVAL_WAIT;
+ goto out;
+}
+
+/* Evaluates a TCPCHK_ACT_CONNECT rule. Returns TCPCHK_EVAL_WAIT to wait the
+ * connection establishment, TCPCHK_EVAL_CONTINUE to evaluate the next rule or
+ * TCPCHK_EVAL_STOP if an error occurred.
+ */
+enum tcpcheck_eval_ret tcpcheck_eval_connect(struct check *check, struct tcpcheck_rule *rule)
+{
+ enum tcpcheck_eval_ret ret = TCPCHK_EVAL_CONTINUE;
+ struct tcpcheck_connect *connect = &rule->connect;
+ struct proxy *proxy = check->proxy;
+ struct server *s = check->server;
+ struct task *t = check->task;
+ struct connection *conn = sc_conn(check->sc);
+ struct protocol *proto;
+ struct xprt_ops *xprt;
+ struct tcpcheck_rule *next;
+ int status, port;
+
+ TRACE_ENTER(CHK_EV_TCPCHK_CONN, check);
+
+ next = get_next_tcpcheck_rule(check->tcpcheck_rules, rule);
+
+ /* current connection already created, check if it is established or not */
+ if (conn) {
+ if (conn->flags & CO_FL_WAIT_XPRT) {
+ /* We are still waiting for the connection establishment */
+ if (next && next->action == TCPCHK_ACT_SEND) {
+ if (!(check->sc->wait_event.events & SUB_RETRY_SEND))
+ conn->mux->subscribe(check->sc, SUB_RETRY_SEND, &check->sc->wait_event);
+ ret = TCPCHK_EVAL_WAIT;
+ TRACE_DEVEL("not connected yet", CHK_EV_TCPCHK_CONN, check);
+ }
+ else
+ ret = tcpcheck_eval_recv(check, rule);
+ }
+ goto out;
+ }
+
+ /* Note: here check->sc = sc = conn = NULL */
+
+ /* Always release input and output buffer when a new connect is evaluated */
+ check_release_buf(check, &check->bi);
+ check_release_buf(check, &check->bo);
+
+ /* No connection, prepare a new one */
+ conn = conn_new((s ? &s->obj_type : &proxy->obj_type));
+ if (!conn) {
+ chunk_printf(&trash, "TCPCHK error allocating connection at step %d",
+ tcpcheck_get_step_id(check, rule));
+ if (rule->comment)
+ chunk_appendf(&trash, " comment: '%s'", rule->comment);
+ set_server_check_status(check, HCHK_STATUS_SOCKERR, trash.area);
+ ret = TCPCHK_EVAL_STOP;
+ TRACE_ERROR("stconn allocation error", CHK_EV_TCPCHK_CONN|CHK_EV_TCPCHK_ERR, check);
+ goto out;
+ }
+ if (sc_attach_mux(check->sc, NULL, conn) < 0) {
+ TRACE_ERROR("mux attach error", CHK_EV_TCPCHK_CONN|CHK_EV_TCPCHK_ERR, check);
+ conn_free(conn);
+ conn = NULL;
+ status = SF_ERR_RESOURCE;
+ goto fail_check;
+ }
+ conn->ctx = check->sc;
+ conn_set_owner(conn, check->sess, NULL);
+
+ /* no client address */
+ if (!sockaddr_alloc(&conn->dst, NULL, 0)) {
+ TRACE_ERROR("sockaddr allocation error", CHK_EV_TCPCHK_CONN|CHK_EV_TCPCHK_ERR, check);
+ status = SF_ERR_RESOURCE;
+ goto fail_check;
+ }
+
+ /* connect to the connect rule addr if specified, otherwise the check
+ * addr if specified on the server. otherwise, use the server addr (it
+ * MUST exist at this step).
+ */
+ *conn->dst = (is_addr(&connect->addr)
+ ? connect->addr
+ : (is_addr(&check->addr) ? check->addr : s->addr));
+ proto = protocol_lookup(conn->dst->ss_family, PROTO_TYPE_STREAM, 0);
+
+ port = 0;
+ if (connect->port)
+ port = connect->port;
+ if (!port && connect->port_expr) {
+ struct sample *smp;
+
+ smp = sample_fetch_as_type(check->proxy, check->sess, NULL,
+ SMP_OPT_DIR_REQ | SMP_OPT_FINAL,
+ connect->port_expr, SMP_T_SINT);
+ if (smp)
+ port = smp->data.u.sint;
+ }
+ if (!port && is_inet_addr(&connect->addr))
+ port = get_host_port(&connect->addr);
+ if (!port && check->port)
+ port = check->port;
+ if (!port && is_inet_addr(&check->addr))
+ port = get_host_port(&check->addr);
+ if (!port) {
+ /* The server MUST exist here */
+ port = s->svc_port;
+ }
+ set_host_port(conn->dst, port);
+ TRACE_DEVEL("set port", CHK_EV_TCPCHK_CONN, check, 0, 0, (size_t[]){port});
+
+ xprt = ((connect->options & TCPCHK_OPT_SSL)
+ ? xprt_get(XPRT_SSL)
+ : ((connect->options & TCPCHK_OPT_DEFAULT_CONNECT) ? check->xprt : xprt_get(XPRT_RAW)));
+
+ if (conn_prepare(conn, proto, xprt) < 0) {
+ TRACE_ERROR("xprt allocation error", CHK_EV_TCPCHK_CONN|CHK_EV_TCPCHK_ERR, check);
+ status = SF_ERR_RESOURCE;
+ goto fail_check;
+ }
+
+ if ((connect->options & TCPCHK_OPT_SOCKS4) && s && (s->flags & SRV_F_SOCKS4_PROXY)) {
+ conn->send_proxy_ofs = 1;
+ conn->flags |= CO_FL_SOCKS4;
+ TRACE_DEVEL("configure SOCKS4 proxy", CHK_EV_TCPCHK_CONN);
+ }
+ else if ((connect->options & TCPCHK_OPT_DEFAULT_CONNECT) && s && s->check.via_socks4 && (s->flags & SRV_F_SOCKS4_PROXY)) {
+ conn->send_proxy_ofs = 1;
+ conn->flags |= CO_FL_SOCKS4;
+ TRACE_DEVEL("configure SOCKS4 proxy", CHK_EV_TCPCHK_CONN);
+ }
+
+ if (connect->options & TCPCHK_OPT_SEND_PROXY) {
+ conn->send_proxy_ofs = 1;
+ conn->flags |= CO_FL_SEND_PROXY;
+ TRACE_DEVEL("configure PROXY protocol", CHK_EV_TCPCHK_CONN, check);
+ }
+ else if ((connect->options & TCPCHK_OPT_DEFAULT_CONNECT) && s && s->check.send_proxy && !(check->state & CHK_ST_AGENT)) {
+ conn->send_proxy_ofs = 1;
+ conn->flags |= CO_FL_SEND_PROXY;
+ TRACE_DEVEL("configure PROXY protocol", CHK_EV_TCPCHK_CONN, check);
+ }
+
+ status = SF_ERR_INTERNAL;
+ if (proto && proto->connect) {
+ int flags = 0;
+
+ if (!next)
+ flags |= CONNECT_DELACK_ALWAYS;
+ if (connect->options & TCPCHK_OPT_HAS_DATA)
+ flags |= (CONNECT_HAS_DATA|CONNECT_DELACK_ALWAYS);
+ status = proto->connect(conn, flags);
+ }
+
+ if (status != SF_ERR_NONE)
+ goto fail_check;
+
+ conn_set_private(conn);
+ conn->ctx = check->sc;
+
+#ifdef USE_OPENSSL
+ if (connect->sni)
+ ssl_sock_set_servername(conn, connect->sni);
+ else if ((connect->options & TCPCHK_OPT_DEFAULT_CONNECT) && s && s->check.sni)
+ ssl_sock_set_servername(conn, s->check.sni);
+
+ if (connect->alpn)
+ ssl_sock_set_alpn(conn, (unsigned char *)connect->alpn, connect->alpn_len);
+ else if ((connect->options & TCPCHK_OPT_DEFAULT_CONNECT) && s && s->check.alpn_str)
+ ssl_sock_set_alpn(conn, (unsigned char *)s->check.alpn_str, s->check.alpn_len);
+#endif
+
+ if (conn_ctrl_ready(conn) && (connect->options & TCPCHK_OPT_LINGER) && !(conn->flags & CO_FL_FDLESS)) {
+ /* Some servers don't like reset on close */
+ HA_ATOMIC_AND(&fdtab[conn->handle.fd].state, ~FD_LINGER_RISK);
+ }
+
+ if (conn_ctrl_ready(conn) && (conn->flags & (CO_FL_SEND_PROXY | CO_FL_SOCKS4))) {
+ if (xprt_add_hs(conn) < 0)
+ status = SF_ERR_RESOURCE;
+ }
+
+ if (conn_xprt_start(conn) < 0) {
+ status = SF_ERR_RESOURCE;
+ goto fail_check;
+ }
+
+ /* The mux may be initialized now if there isn't server attached to the
+ * check (email alerts) or if there is a mux proto specified or if there
+ * is no alpn.
+ */
+ if (!s || ((connect->options & TCPCHK_OPT_DEFAULT_CONNECT) && check->mux_proto) ||
+ connect->mux_proto || (!connect->alpn && !check->alpn_str)) {
+ const struct mux_ops *mux_ops;
+
+ TRACE_DEVEL("try to install mux now", CHK_EV_TCPCHK_CONN, check);
+ if (connect->mux_proto)
+ mux_ops = connect->mux_proto->mux;
+ else if ((connect->options & TCPCHK_OPT_DEFAULT_CONNECT) && check->mux_proto)
+ mux_ops = check->mux_proto->mux;
+ else {
+ int mode = ((check->tcpcheck_rules->flags & TCPCHK_RULES_PROTO_CHK) == TCPCHK_RULES_HTTP_CHK
+ ? PROTO_MODE_HTTP
+ : PROTO_MODE_TCP);
+
+ mux_ops = conn_get_best_mux(conn, IST_NULL, PROTO_SIDE_BE, mode);
+ }
+ if (mux_ops && conn_install_mux(conn, mux_ops, check->sc, proxy, check->sess) < 0) {
+ TRACE_ERROR("failed to install mux", CHK_EV_TCPCHK_CONN|CHK_EV_TCPCHK_ERR, check);
+ status = SF_ERR_INTERNAL;
+ goto fail_check;
+ }
+ }
+
+ fail_check:
+ /* It can return one of :
+ * - SF_ERR_NONE if everything's OK
+ * - SF_ERR_SRVTO if there are no more servers
+ * - SF_ERR_SRVCL if the connection was refused by the server
+ * - SF_ERR_PRXCOND if the connection has been limited by the proxy (maxconn)
+ * - SF_ERR_RESOURCE if a system resource is lacking (eg: fd limits, ports, ...)
+ * - SF_ERR_INTERNAL for any other purely internal errors
+ * Additionally, in the case of SF_ERR_RESOURCE, an emergency log will be emitted.
+ * Note that we try to prevent the network stack from sending the ACK during the
+ * connect() when a pure TCP check is used (without PROXY protocol).
+ */
+ switch (status) {
+ case SF_ERR_NONE:
+ /* we allow up to min(inter, timeout.connect) for a connection
+ * to establish but only when timeout.check is set as it may be
+ * to short for a full check otherwise
+ */
+ t->expire = tick_add(now_ms, MS_TO_TICKS(check->inter));
+
+ if (proxy->timeout.check && proxy->timeout.connect) {
+ int t_con = tick_add(now_ms, proxy->timeout.connect);
+ t->expire = tick_first(t->expire, t_con);
+ }
+ break;
+ case SF_ERR_SRVTO: /* ETIMEDOUT */
+ case SF_ERR_SRVCL: /* ECONNREFUSED, ENETUNREACH, ... */
+ case SF_ERR_PRXCOND:
+ case SF_ERR_RESOURCE:
+ case SF_ERR_INTERNAL:
+ TRACE_ERROR("report connection error", CHK_EV_TCPCHK_CONN|CHK_EV_TCPCHK_ERR, check, 0, 0, (size_t[]){status});
+ chk_report_conn_err(check, errno, 0);
+ ret = TCPCHK_EVAL_STOP;
+ goto out;
+ }
+
+ /* don't do anything until the connection is established */
+ if (conn->flags & CO_FL_WAIT_XPRT) {
+ if (conn->mux) {
+ if (next && next->action == TCPCHK_ACT_SEND)
+ conn->mux->subscribe(check->sc, SUB_RETRY_SEND, &check->sc->wait_event);
+ else
+ conn->mux->subscribe(check->sc, SUB_RETRY_RECV, &check->sc->wait_event);
+ }
+ ret = TCPCHK_EVAL_WAIT;
+ TRACE_DEVEL("not connected yet", CHK_EV_TCPCHK_CONN, check);
+ goto out;
+ }
+
+ out:
+ if (conn && check->result == CHK_RES_FAILED) {
+ conn->flags |= CO_FL_ERROR;
+ TRACE_ERROR("connect failed, report connection error", CHK_EV_TCPCHK_CONN|CHK_EV_TCPCHK_ERR, check);
+ }
+
+ if (ret == TCPCHK_EVAL_CONTINUE && check->proxy->timeout.check)
+ check->task->expire = tick_add_ifset(now_ms, check->proxy->timeout.check);
+
+ TRACE_LEAVE(CHK_EV_TCPCHK_CONN, check, 0, 0, (size_t[]){ret});
+ return ret;
+}
+
+/* Evaluates a TCPCHK_ACT_SEND rule. Returns TCPCHK_EVAL_WAIT if outgoing data
+ * were not fully sent, TCPCHK_EVAL_CONTINUE to evaluate the next rule or
+ * TCPCHK_EVAL_STOP if an error occurred.
+ */
+enum tcpcheck_eval_ret tcpcheck_eval_send(struct check *check, struct tcpcheck_rule *rule)
+{
+ enum tcpcheck_eval_ret ret = TCPCHK_EVAL_CONTINUE;
+ struct tcpcheck_send *send = &rule->send;
+ struct stconn *sc = check->sc;
+ struct connection *conn = __sc_conn(sc);
+ struct buffer *tmp = NULL;
+ struct htx *htx = NULL;
+ int connection_hdr = 0;
+
+ TRACE_ENTER(CHK_EV_TCPCHK_SND|CHK_EV_TX_DATA, check);
+
+ if (check->state & CHK_ST_OUT_ALLOC) {
+ ret = TCPCHK_EVAL_WAIT;
+ TRACE_STATE("waiting for output buffer allocation", CHK_EV_TCPCHK_SND|CHK_EV_TX_DATA|CHK_EV_TX_BLK, check);
+ goto out;
+ }
+
+ if (!check_get_buf(check, &check->bo)) {
+ check->state |= CHK_ST_OUT_ALLOC;
+ ret = TCPCHK_EVAL_WAIT;
+ TRACE_STATE("waiting for output buffer allocation", CHK_EV_TCPCHK_SND|CHK_EV_TX_DATA|CHK_EV_TX_BLK, check);
+ goto out;
+ }
+
+ /* Data already pending in the output buffer, send them now */
+ if ((IS_HTX_CONN(conn) && !htx_is_empty(htxbuf(&check->bo))) || (!IS_HTX_CONN(conn) && b_data(&check->bo))) {
+ TRACE_DEVEL("Data still pending, try to send it now", CHK_EV_TCPCHK_SND|CHK_EV_TX_DATA, check);
+ goto do_send;
+ }
+
+ /* Always release input buffer when a new send is evaluated */
+ check_release_buf(check, &check->bi);
+
+ switch (send->type) {
+ case TCPCHK_SEND_STRING:
+ case TCPCHK_SEND_BINARY:
+ if (istlen(send->data) >= b_size(&check->bo)) {
+ chunk_printf(&trash, "tcp-check send : string too large (%u) for buffer size (%u) at step %d",
+ (unsigned int)istlen(send->data), (unsigned int)b_size(&check->bo),
+ tcpcheck_get_step_id(check, rule));
+ set_server_check_status(check, HCHK_STATUS_L7RSP, trash.area);
+ ret = TCPCHK_EVAL_STOP;
+ goto out;
+ }
+ b_putist(&check->bo, send->data);
+ break;
+ case TCPCHK_SEND_STRING_LF:
+ check->bo.data = sess_build_logline(check->sess, NULL, b_orig(&check->bo), b_size(&check->bo), &rule->send.fmt);
+ if (!b_data(&check->bo))
+ goto out;
+ break;
+ case TCPCHK_SEND_BINARY_LF: {
+ int len = b_size(&check->bo);
+
+ tmp = alloc_trash_chunk();
+ if (!tmp)
+ goto error_lf;
+ tmp->data = sess_build_logline(check->sess, NULL, b_orig(tmp), b_size(tmp), &rule->send.fmt);
+ if (!b_data(tmp))
+ goto out;
+ tmp->area[tmp->data] = '\0';
+ if (parse_binary(b_orig(tmp), &check->bo.area, &len, NULL) == 0)
+ goto error_lf;
+ check->bo.data = len;
+ break;
+ }
+ case TCPCHK_SEND_HTTP: {
+ struct htx_sl *sl;
+ struct ist meth, uri, vsn, clen, body;
+ unsigned int slflags = 0;
+
+ tmp = alloc_trash_chunk();
+ if (!tmp)
+ goto error_htx;
+
+ meth = ((send->http.meth.meth == HTTP_METH_OTHER)
+ ? ist2(send->http.meth.str.area, send->http.meth.str.data)
+ : http_known_methods[send->http.meth.meth]);
+ if (send->http.flags & TCPCHK_SND_HTTP_FL_URI_FMT) {
+ tmp->data = sess_build_logline(check->sess, NULL, b_orig(tmp), b_size(tmp), &send->http.uri_fmt);
+ uri = (b_data(tmp) ? ist2(b_orig(tmp), b_data(tmp)) : ist("/"));
+ }
+ else
+ uri = (isttest(send->http.uri) ? send->http.uri : ist("/"));
+ vsn = (isttest(send->http.vsn) ? send->http.vsn : ist("HTTP/1.0"));
+
+ if ((istlen(vsn) == 6 && *(vsn.ptr+5) == '2') ||
+ (istlen(vsn) == 8 && (*(vsn.ptr+5) > '1' || (*(vsn.ptr+5) == '1' && *(vsn.ptr+7) >= '1'))))
+ slflags |= HTX_SL_F_VER_11;
+ slflags |= (HTX_SL_F_XFER_LEN|HTX_SL_F_CLEN);
+ if (!(send->http.flags & TCPCHK_SND_HTTP_FL_BODY_FMT) && !isttest(send->http.body))
+ slflags |= HTX_SL_F_BODYLESS;
+
+ htx = htx_from_buf(&check->bo);
+ sl = htx_add_stline(htx, HTX_BLK_REQ_SL, slflags, meth, uri, vsn);
+ if (!sl)
+ goto error_htx;
+ sl->info.req.meth = send->http.meth.meth;
+ if (!http_update_host(htx, sl, uri))
+ goto error_htx;
+
+ if (!LIST_ISEMPTY(&send->http.hdrs)) {
+ struct tcpcheck_http_hdr *hdr;
+ struct ist hdr_value;
+
+ list_for_each_entry(hdr, &send->http.hdrs, list) {
+ chunk_reset(tmp);
+ tmp->data = sess_build_logline(check->sess, NULL, b_orig(tmp), b_size(tmp), &hdr->value);
+ if (!b_data(tmp))
+ continue;
+ hdr_value = ist2(b_orig(tmp), b_data(tmp));
+ if (!htx_add_header(htx, hdr->name, hdr_value))
+ goto error_htx;
+ if ((sl->flags & HTX_SL_F_HAS_AUTHORITY) && isteqi(hdr->name, ist("host"))) {
+ if (!http_update_authority(htx, sl, hdr_value))
+ goto error_htx;
+ }
+ if (isteqi(hdr->name, ist("connection")))
+ connection_hdr = 1;
+ }
+
+ }
+ if (check->proxy->options2 & PR_O2_CHK_SNDST) {
+ chunk_reset(tmp);
+ httpchk_build_status_header(check->server, tmp);
+ if (!htx_add_header(htx, ist("X-Haproxy-Server-State"), ist2(b_orig(tmp), b_data(tmp))))
+ goto error_htx;
+ }
+
+
+ if (send->http.flags & TCPCHK_SND_HTTP_FL_BODY_FMT) {
+ chunk_reset(tmp);
+ tmp->data = sess_build_logline(check->sess, NULL, b_orig(tmp), b_size(tmp), &send->http.body_fmt);
+ body = ist2(b_orig(tmp), b_data(tmp));
+ }
+ else
+ body = send->http.body;
+
+ if (!connection_hdr && !htx_add_header(htx, ist("Connection"), ist("close")))
+ goto error_htx;
+
+ if ((send->http.meth.meth != HTTP_METH_OPTIONS &&
+ send->http.meth.meth != HTTP_METH_GET &&
+ send->http.meth.meth != HTTP_METH_HEAD &&
+ send->http.meth.meth != HTTP_METH_DELETE) || istlen(body)) {
+ clen = ist((!istlen(body) ? "0" : ultoa(istlen(body))));
+ if (!htx_add_header(htx, ist("Content-length"), clen))
+ goto error_htx;
+ }
+
+ if (!htx_add_endof(htx, HTX_BLK_EOH) ||
+ (istlen(body) && !htx_add_data_atonce(htx, body)))
+ goto error_htx;
+
+ /* no more data are expected */
+ htx->flags |= HTX_FL_EOM;
+ htx_to_buf(htx, &check->bo);
+ break;
+ }
+ case TCPCHK_SEND_UNDEF:
+ /* Should never happen. */
+ ret = TCPCHK_EVAL_STOP;
+ goto out;
+ };
+
+ do_send:
+ TRACE_DATA("send data", CHK_EV_TCPCHK_SND|CHK_EV_TX_DATA, check);
+ if (conn->mux->snd_buf(sc, &check->bo,
+ (IS_HTX_CONN(conn) ? (htxbuf(&check->bo))->data: b_data(&check->bo)), 0) <= 0) {
+ if ((conn->flags & CO_FL_ERROR) || sc_ep_test(sc, SE_FL_ERROR)) {
+ ret = TCPCHK_EVAL_STOP;
+ TRACE_DEVEL("connection error during send", CHK_EV_TCPCHK_SND|CHK_EV_TX_DATA|CHK_EV_TX_ERR, check);
+ goto out;
+ }
+ }
+ if ((IS_HTX_CONN(conn) && !htx_is_empty(htxbuf(&check->bo))) || (!IS_HTX_CONN(conn) && b_data(&check->bo))) {
+ conn->mux->subscribe(sc, SUB_RETRY_SEND, &sc->wait_event);
+ ret = TCPCHK_EVAL_WAIT;
+ TRACE_DEVEL("data not fully sent, wait", CHK_EV_TCPCHK_SND|CHK_EV_TX_DATA, check);
+ goto out;
+ }
+
+ out:
+ free_trash_chunk(tmp);
+ if (!b_data(&check->bo) || ret == TCPCHK_EVAL_STOP)
+ check_release_buf(check, &check->bo);
+
+ TRACE_LEAVE(CHK_EV_TCPCHK_SND, check, 0, 0, (size_t[]){ret});
+ return ret;
+
+ error_htx:
+ if (htx) {
+ htx_reset(htx);
+ htx_to_buf(htx, &check->bo);
+ }
+ chunk_printf(&trash, "tcp-check send : failed to build HTTP request at step %d",
+ tcpcheck_get_step_id(check, rule));
+ TRACE_ERROR("failed to build HTTP request", CHK_EV_TCPCHK_SND|CHK_EV_TX_DATA|CHK_EV_TCPCHK_ERR, check);
+ set_server_check_status(check, HCHK_STATUS_L7RSP, trash.area);
+ ret = TCPCHK_EVAL_STOP;
+ goto out;
+
+ error_lf:
+ chunk_printf(&trash, "tcp-check send : failed to build log-format string at step %d",
+ tcpcheck_get_step_id(check, rule));
+ TRACE_ERROR("failed to build log-format string", CHK_EV_TCPCHK_SND|CHK_EV_TX_DATA|CHK_EV_TCPCHK_ERR, check);
+ set_server_check_status(check, HCHK_STATUS_L7RSP, trash.area);
+ ret = TCPCHK_EVAL_STOP;
+ goto out;
+
+}
+
+/* Try to receive data before evaluating a tcp-check expect rule. Returns
+ * TCPCHK_EVAL_WAIT if it is already subscribed on receive events or if nothing
+ * was received, TCPCHK_EVAL_CONTINUE to evaluate the expect rule or
+ * TCPCHK_EVAL_STOP if an error occurred.
+ */
+enum tcpcheck_eval_ret tcpcheck_eval_recv(struct check *check, struct tcpcheck_rule *rule)
+{
+ struct stconn *sc = check->sc;
+ struct connection *conn = __sc_conn(sc);
+ enum tcpcheck_eval_ret ret = TCPCHK_EVAL_CONTINUE;
+ size_t max, read, cur_read = 0;
+ int is_empty;
+ int read_poll = MAX_READ_POLL_LOOPS;
+
+ TRACE_ENTER(CHK_EV_RX_DATA, check);
+
+ if (sc->wait_event.events & SUB_RETRY_RECV) {
+ TRACE_DEVEL("waiting for response", CHK_EV_RX_DATA, check);
+ goto wait_more_data;
+ }
+
+ if (sc_ep_test(sc, SE_FL_EOS))
+ goto end_recv;
+
+ if (check->state & CHK_ST_IN_ALLOC) {
+ TRACE_STATE("waiting for input buffer allocation", CHK_EV_RX_DATA|CHK_EV_RX_BLK, check);
+ goto wait_more_data;
+ }
+
+ if (!check_get_buf(check, &check->bi)) {
+ check->state |= CHK_ST_IN_ALLOC;
+ TRACE_STATE("waiting for input buffer allocation", CHK_EV_RX_DATA|CHK_EV_RX_BLK, check);
+ goto wait_more_data;
+ }
+
+ /* errors on the connection and the stream connector were already checked */
+
+ /* prepare to detect if the mux needs more room */
+ sc_ep_clr(sc, SE_FL_WANT_ROOM);
+
+ while (sc_ep_test(sc, SE_FL_RCV_MORE) ||
+ (!(conn->flags & CO_FL_ERROR) && !sc_ep_test(sc, SE_FL_ERROR | SE_FL_EOS))) {
+ max = (IS_HTX_SC(sc) ? htx_free_space(htxbuf(&check->bi)) : b_room(&check->bi));
+ read = conn->mux->rcv_buf(sc, &check->bi, max, 0);
+ cur_read += read;
+ if (!read ||
+ sc_ep_test(sc, SE_FL_WANT_ROOM) ||
+ (--read_poll <= 0) ||
+ (read < max && read >= global.tune.recv_enough))
+ break;
+ }
+
+ end_recv:
+ is_empty = (IS_HTX_SC(sc) ? htx_is_empty(htxbuf(&check->bi)) : !b_data(&check->bi));
+ if (is_empty && ((conn->flags & CO_FL_ERROR) || sc_ep_test(sc, SE_FL_ERROR))) {
+ /* Report network errors only if we got no other data. Otherwise
+ * we'll let the upper layers decide whether the response is OK
+ * or not. It is very common that an RST sent by the server is
+ * reported as an error just after the last data chunk.
+ */
+ TRACE_ERROR("connection error during recv", CHK_EV_RX_DATA|CHK_EV_RX_ERR, check);
+ goto stop;
+ }
+ else if (!cur_read && !sc_ep_test(sc, SE_FL_WANT_ROOM | SE_FL_ERROR | SE_FL_EOS)) {
+ conn->mux->subscribe(sc, SUB_RETRY_RECV, &sc->wait_event);
+ TRACE_DEVEL("waiting for response", CHK_EV_RX_DATA, check);
+ goto wait_more_data;
+ }
+ TRACE_DATA("data received", CHK_EV_RX_DATA, check, 0, 0, (size_t[]){cur_read});
+
+ out:
+ if (!b_data(&check->bi) || ret == TCPCHK_EVAL_STOP)
+ check_release_buf(check, &check->bi);
+
+ TRACE_LEAVE(CHK_EV_RX_DATA, check, 0, 0, (size_t[]){ret});
+ return ret;
+
+ stop:
+ ret = TCPCHK_EVAL_STOP;
+ goto out;
+
+ wait_more_data:
+ ret = TCPCHK_EVAL_WAIT;
+ goto out;
+}
+
+/* Evaluates an HTTP TCPCHK_ACT_EXPECT rule. If <last_read> is set , no more data
+ * are expected. Returns TCPCHK_EVAL_WAIT to wait for more data,
+ * TCPCHK_EVAL_CONTINUE to evaluate the next rule or TCPCHK_EVAL_STOP if an
+ * error occurred.
+ */
+enum tcpcheck_eval_ret tcpcheck_eval_expect_http(struct check *check, struct tcpcheck_rule *rule, int last_read)
+{
+ struct htx *htx = htxbuf(&check->bi);
+ struct htx_sl *sl;
+ struct htx_blk *blk;
+ enum tcpcheck_eval_ret ret = TCPCHK_EVAL_CONTINUE;
+ struct tcpcheck_expect *expect = &rule->expect;
+ struct buffer *msg = NULL, *tmp = NULL, *nbuf = NULL, *vbuf = NULL;
+ enum healthcheck_status status = HCHK_STATUS_L7RSP;
+ struct ist desc = IST_NULL;
+ int i, match, inverse;
+
+ TRACE_ENTER(CHK_EV_TCPCHK_EXP, check);
+
+ last_read |= (!htx_free_data_space(htx) || (htx->flags & HTX_FL_EOM));
+
+ if (htx->flags & HTX_FL_PARSING_ERROR) {
+ TRACE_ERROR("invalid response", CHK_EV_TCPCHK_EXP|CHK_EV_TCPCHK_ERR, check);
+ status = HCHK_STATUS_L7RSP;
+ goto error;
+ }
+
+ if (htx_is_empty(htx)) {
+ if (last_read) {
+ TRACE_ERROR("empty response received", CHK_EV_TCPCHK_EXP|CHK_EV_TCPCHK_ERR, check);
+ status = HCHK_STATUS_L7RSP;
+ goto error;
+ }
+ TRACE_DEVEL("waiting for more data", CHK_EV_TCPCHK_EXP, check);
+ goto wait_more_data;
+ }
+
+ sl = http_get_stline(htx);
+ check->code = sl->info.res.status;
+
+ if (check->server &&
+ (check->server->proxy->options & PR_O_DISABLE404) &&
+ (check->server->next_state != SRV_ST_STOPPED) &&
+ (check->code == 404)) {
+ /* 404 may be accepted as "stopping" only if the server was up */
+ TRACE_STATE("404 response & disable-404", CHK_EV_TCPCHK_EXP, check);
+ goto out;
+ }
+
+ inverse = !!(expect->flags & TCPCHK_EXPT_FL_INV);
+ /* Make GCC happy ; initialize match to a failure state. */
+ match = inverse;
+ status = expect->err_status;
+
+ switch (expect->type) {
+ case TCPCHK_EXPECT_HTTP_STATUS:
+ match = 0;
+ for (i = 0; i < expect->codes.num; i++) {
+ if (sl->info.res.status >= expect->codes.codes[i][0] &&
+ sl->info.res.status <= expect->codes.codes[i][1]) {
+ match = 1;
+ break;
+ }
+ }
+
+ /* Set status and description in case of error */
+ status = ((status != HCHK_STATUS_UNKNOWN) ? status : HCHK_STATUS_L7STS);
+ if (LIST_ISEMPTY(&expect->onerror_fmt))
+ desc = htx_sl_res_reason(sl);
+ break;
+ case TCPCHK_EXPECT_HTTP_STATUS_REGEX:
+ match = regex_exec2(expect->regex, HTX_SL_RES_CPTR(sl), HTX_SL_RES_CLEN(sl));
+
+ /* Set status and description in case of error */
+ status = ((status != HCHK_STATUS_UNKNOWN) ? status : HCHK_STATUS_L7STS);
+ if (LIST_ISEMPTY(&expect->onerror_fmt))
+ desc = htx_sl_res_reason(sl);
+ break;
+
+ case TCPCHK_EXPECT_HTTP_HEADER: {
+ struct http_hdr_ctx ctx;
+ struct ist npat, vpat, value;
+ int full = (expect->flags & (TCPCHK_EXPT_FL_HTTP_HVAL_NONE|TCPCHK_EXPT_FL_HTTP_HVAL_FULL));
+
+ if (expect->flags & TCPCHK_EXPT_FL_HTTP_HNAME_FMT) {
+ nbuf = alloc_trash_chunk();
+ if (!nbuf) {
+ status = HCHK_STATUS_L7RSP;
+ desc = ist("Failed to allocate buffer to eval log-format string");
+ TRACE_ERROR("buffer allocation failure", CHK_EV_TCPCHK_EXP|CHK_EV_TCPCHK_ERR, check);
+ goto error;
+ }
+ nbuf->data = sess_build_logline(check->sess, NULL, b_orig(nbuf), b_size(nbuf), &expect->hdr.name_fmt);
+ if (!b_data(nbuf)) {
+ status = HCHK_STATUS_L7RSP;
+ desc = ist("log-format string evaluated to an empty string");
+ TRACE_ERROR("invalid log-format string (hdr name)", CHK_EV_TCPCHK_EXP|CHK_EV_TCPCHK_ERR, check);
+ goto error;
+ }
+ npat = ist2(b_orig(nbuf), b_data(nbuf));
+ }
+ else if (!(expect->flags & TCPCHK_EXPT_FL_HTTP_HNAME_REG))
+ npat = expect->hdr.name;
+
+ if (expect->flags & TCPCHK_EXPT_FL_HTTP_HVAL_FMT) {
+ vbuf = alloc_trash_chunk();
+ if (!vbuf) {
+ status = HCHK_STATUS_L7RSP;
+ desc = ist("Failed to allocate buffer to eval log-format string");
+ TRACE_ERROR("buffer allocation failure", CHK_EV_TCPCHK_EXP|CHK_EV_TCPCHK_ERR, check);
+ goto error;
+ }
+ vbuf->data = sess_build_logline(check->sess, NULL, b_orig(vbuf), b_size(vbuf), &expect->hdr.value_fmt);
+ if (!b_data(vbuf)) {
+ status = HCHK_STATUS_L7RSP;
+ desc = ist("log-format string evaluated to an empty string");
+ TRACE_ERROR("invalid log-format string (hdr value)", CHK_EV_TCPCHK_EXP|CHK_EV_TCPCHK_ERR, check);
+ goto error;
+ }
+ vpat = ist2(b_orig(vbuf), b_data(vbuf));
+ }
+ else if (!(expect->flags & TCPCHK_EXPT_FL_HTTP_HVAL_REG))
+ vpat = expect->hdr.value;
+
+ match = 0;
+ ctx.blk = NULL;
+ while (1) {
+ switch (expect->flags & TCPCHK_EXPT_FL_HTTP_HNAME_TYPE) {
+ case TCPCHK_EXPT_FL_HTTP_HNAME_STR:
+ if (!http_find_str_header(htx, npat, &ctx, full))
+ goto end_of_match;
+ break;
+ case TCPCHK_EXPT_FL_HTTP_HNAME_BEG:
+ if (!http_find_pfx_header(htx, npat, &ctx, full))
+ goto end_of_match;
+ break;
+ case TCPCHK_EXPT_FL_HTTP_HNAME_END:
+ if (!http_find_sfx_header(htx, npat, &ctx, full))
+ goto end_of_match;
+ break;
+ case TCPCHK_EXPT_FL_HTTP_HNAME_SUB:
+ if (!http_find_sub_header(htx, npat, &ctx, full))
+ goto end_of_match;
+ break;
+ case TCPCHK_EXPT_FL_HTTP_HNAME_REG:
+ if (!http_match_header(htx, expect->hdr.name_re, &ctx, full))
+ goto end_of_match;
+ break;
+ default:
+ /* should never happen */
+ goto end_of_match;
+ }
+
+ /* A header has matched the name pattern, let's test its
+ * value now (always defined from there). If there is no
+ * value pattern, it is a good match.
+ */
+
+ if (expect->flags & TCPCHK_EXPT_FL_HTTP_HVAL_NONE) {
+ match = 1;
+ goto end_of_match;
+ }
+
+ value = ctx.value;
+ switch (expect->flags & TCPCHK_EXPT_FL_HTTP_HVAL_TYPE) {
+ case TCPCHK_EXPT_FL_HTTP_HVAL_STR:
+ if (isteq(value, vpat)) {
+ match = 1;
+ goto end_of_match;
+ }
+ break;
+ case TCPCHK_EXPT_FL_HTTP_HVAL_BEG:
+ if (istlen(value) < istlen(vpat))
+ break;
+ value = ist2(istptr(value), istlen(vpat));
+ if (isteq(value, vpat)) {
+ match = 1;
+ goto end_of_match;
+ }
+ break;
+ case TCPCHK_EXPT_FL_HTTP_HVAL_END:
+ if (istlen(value) < istlen(vpat))
+ break;
+ value = ist2(istend(value) - istlen(vpat), istlen(vpat));
+ if (isteq(value, vpat)) {
+ match = 1;
+ goto end_of_match;
+ }
+ break;
+ case TCPCHK_EXPT_FL_HTTP_HVAL_SUB:
+ if (isttest(istist(value, vpat))) {
+ match = 1;
+ goto end_of_match;
+ }
+ break;
+ case TCPCHK_EXPT_FL_HTTP_HVAL_REG:
+ if (regex_exec2(expect->hdr.value_re, istptr(value), istlen(value))) {
+ match = 1;
+ goto end_of_match;
+ }
+ break;
+ }
+ }
+
+ end_of_match:
+ status = ((status != HCHK_STATUS_UNKNOWN) ? status : HCHK_STATUS_L7STS);
+ if (LIST_ISEMPTY(&expect->onerror_fmt))
+ desc = htx_sl_res_reason(sl);
+ break;
+ }
+
+ case TCPCHK_EXPECT_HTTP_BODY:
+ case TCPCHK_EXPECT_HTTP_BODY_REGEX:
+ case TCPCHK_EXPECT_HTTP_BODY_LF:
+ match = 0;
+ chunk_reset(&trash);
+ for (blk = htx_get_head_blk(htx); blk; blk = htx_get_next_blk(htx, blk)) {
+ enum htx_blk_type type = htx_get_blk_type(blk);
+
+ if (type == HTX_BLK_TLR || type == HTX_BLK_EOT)
+ break;
+ if (type == HTX_BLK_DATA) {
+ if (!chunk_istcat(&trash, htx_get_blk_value(htx, blk)))
+ break;
+ }
+ }
+
+ if (!b_data(&trash)) {
+ if (!last_read) {
+ TRACE_DEVEL("waiting for more data", CHK_EV_TCPCHK_EXP, check);
+ goto wait_more_data;
+ }
+ status = ((status != HCHK_STATUS_UNKNOWN) ? status : HCHK_STATUS_L7RSP);
+ if (LIST_ISEMPTY(&expect->onerror_fmt))
+ desc = ist("HTTP content check could not find a response body");
+ TRACE_ERROR("no response boduy found while expected", CHK_EV_TCPCHK_EXP|CHK_EV_TCPCHK_ERR, check);
+ goto error;
+ }
+
+ if (expect->type == TCPCHK_EXPECT_HTTP_BODY_LF) {
+ tmp = alloc_trash_chunk();
+ if (!tmp) {
+ status = HCHK_STATUS_L7RSP;
+ desc = ist("Failed to allocate buffer to eval log-format string");
+ TRACE_ERROR("buffer allocation failure", CHK_EV_TCPCHK_EXP|CHK_EV_TCPCHK_ERR, check);
+ goto error;
+ }
+ tmp->data = sess_build_logline(check->sess, NULL, b_orig(tmp), b_size(tmp), &expect->fmt);
+ if (!b_data(tmp)) {
+ status = HCHK_STATUS_L7RSP;
+ desc = ist("log-format string evaluated to an empty string");
+ TRACE_ERROR("invalid log-format string", CHK_EV_TCPCHK_EXP|CHK_EV_TCPCHK_ERR, check);
+ goto error;
+ }
+ }
+
+ if (!last_read &&
+ ((expect->type == TCPCHK_EXPECT_HTTP_BODY && b_data(&trash) < istlen(expect->data)) ||
+ ((expect->type == TCPCHK_EXPECT_HTTP_BODY_LF && b_data(&trash) < b_data(tmp))) ||
+ (expect->min_recv > 0 && b_data(&trash) < expect->min_recv))) {
+ ret = TCPCHK_EVAL_WAIT;
+ goto out;
+ }
+
+ if (expect->type ==TCPCHK_EXPECT_HTTP_BODY)
+ match = my_memmem(b_orig(&trash), b_data(&trash), istptr(expect->data), istlen(expect->data)) != NULL;
+ else if (expect->type ==TCPCHK_EXPECT_HTTP_BODY_LF)
+ match = my_memmem(b_orig(&trash), b_data(&trash), b_orig(tmp), b_data(tmp)) != NULL;
+ else
+ match = regex_exec2(expect->regex, b_orig(&trash), b_data(&trash));
+
+ /* Wait for more data on mismatch only if no minimum is defined (-1),
+ * otherwise the absence of match is already conclusive.
+ */
+ if (!match && !last_read && (expect->min_recv == -1)) {
+ ret = TCPCHK_EVAL_WAIT;
+ TRACE_DEVEL("waiting for more data", CHK_EV_TCPCHK_EXP, check);
+ goto out;
+ }
+
+ /* Set status and description in case of error */
+ status = ((status != HCHK_STATUS_UNKNOWN) ? status : HCHK_STATUS_L7RSP);
+ if (LIST_ISEMPTY(&expect->onerror_fmt))
+ desc = (inverse
+ ? ist("HTTP check matched unwanted content")
+ : ist("HTTP content check did not match"));
+ break;
+
+
+ default:
+ /* should never happen */
+ status = ((status != HCHK_STATUS_UNKNOWN) ? status : HCHK_STATUS_L7RSP);
+ goto error;
+ }
+
+ if (!(match ^ inverse)) {
+ TRACE_STATE("expect rule failed", CHK_EV_TCPCHK_EXP|CHK_EV_TCPCHK_ERR, check);
+ goto error;
+ }
+
+ TRACE_STATE("expect rule succeeded", CHK_EV_TCPCHK_EXP, check);
+
+ out:
+ free_trash_chunk(tmp);
+ free_trash_chunk(nbuf);
+ free_trash_chunk(vbuf);
+ free_trash_chunk(msg);
+ TRACE_LEAVE(CHK_EV_TCPCHK_EXP, check, 0, 0, (size_t[]){ret});
+ return ret;
+
+ error:
+ TRACE_STATE("expect rule failed", CHK_EV_TCPCHK_EXP|CHK_EV_TCPCHK_ERR, check);
+ ret = TCPCHK_EVAL_STOP;
+ msg = alloc_trash_chunk();
+ if (msg)
+ tcpcheck_expect_onerror_message(msg, check, rule, 0, desc);
+ set_server_check_status(check, status, (msg ? b_head(msg) : NULL));
+ goto out;
+
+ wait_more_data:
+ ret = TCPCHK_EVAL_WAIT;
+ goto out;
+}
+
+/* Evaluates a TCP TCPCHK_ACT_EXPECT rule. Returns TCPCHK_EVAL_WAIT to wait for
+ * more data, TCPCHK_EVAL_CONTINUE to evaluate the next rule or TCPCHK_EVAL_STOP
+ * if an error occurred.
+ */
+enum tcpcheck_eval_ret tcpcheck_eval_expect(struct check *check, struct tcpcheck_rule *rule, int last_read)
+{
+ enum tcpcheck_eval_ret ret = TCPCHK_EVAL_CONTINUE;
+ struct tcpcheck_expect *expect = &rule->expect;
+ struct buffer *msg = NULL, *tmp = NULL;
+ struct ist desc = IST_NULL;
+ enum healthcheck_status status;
+ int match, inverse;
+
+ TRACE_ENTER(CHK_EV_TCPCHK_EXP, check);
+
+ last_read |= b_full(&check->bi);
+
+ /* The current expect might need more data than the previous one, check again
+ * that the minimum amount data required to match is respected.
+ */
+ if (!last_read) {
+ if ((expect->type == TCPCHK_EXPECT_STRING || expect->type == TCPCHK_EXPECT_BINARY) &&
+ (b_data(&check->bi) < istlen(expect->data))) {
+ ret = TCPCHK_EVAL_WAIT;
+ TRACE_DEVEL("waiting for more data", CHK_EV_TCPCHK_EXP, check);
+ goto out;
+ }
+ if (expect->min_recv > 0 && (b_data(&check->bi) < expect->min_recv)) {
+ ret = TCPCHK_EVAL_WAIT;
+ TRACE_DEVEL("waiting for more data", CHK_EV_TCPCHK_EXP, check);
+ goto out;
+ }
+ }
+
+ inverse = !!(expect->flags & TCPCHK_EXPT_FL_INV);
+ /* Make GCC happy ; initialize match to a failure state. */
+ match = inverse;
+ status = ((expect->err_status != HCHK_STATUS_UNKNOWN) ? expect->err_status : HCHK_STATUS_L7RSP);
+
+ switch (expect->type) {
+ case TCPCHK_EXPECT_STRING:
+ case TCPCHK_EXPECT_BINARY:
+ match = my_memmem(b_head(&check->bi), b_data(&check->bi), istptr(expect->data), istlen(expect->data)) != NULL;
+ break;
+ case TCPCHK_EXPECT_STRING_REGEX:
+ match = regex_exec2(expect->regex, b_head(&check->bi), MIN(b_data(&check->bi), b_size(&check->bi)-1));
+ break;
+
+ case TCPCHK_EXPECT_BINARY_REGEX:
+ chunk_reset(&trash);
+ dump_binary(&trash, b_head(&check->bi), b_data(&check->bi));
+ match = regex_exec2(expect->regex, b_head(&trash), MIN(b_data(&trash), b_size(&trash)-1));
+ break;
+
+ case TCPCHK_EXPECT_STRING_LF:
+ case TCPCHK_EXPECT_BINARY_LF:
+ match = 0;
+ tmp = alloc_trash_chunk();
+ if (!tmp) {
+ status = HCHK_STATUS_L7RSP;
+ desc = ist("Failed to allocate buffer to eval format string");
+ TRACE_ERROR("buffer allocation failure", CHK_EV_TCPCHK_EXP|CHK_EV_TCPCHK_ERR, check);
+ goto error;
+ }
+ tmp->data = sess_build_logline(check->sess, NULL, b_orig(tmp), b_size(tmp), &expect->fmt);
+ if (!b_data(tmp)) {
+ status = HCHK_STATUS_L7RSP;
+ desc = ist("log-format string evaluated to an empty string");
+ TRACE_ERROR("invalid log-format string", CHK_EV_TCPCHK_EXP|CHK_EV_TCPCHK_ERR, check);
+ goto error;
+ }
+ if (expect->type == TCPCHK_EXPECT_BINARY_LF) {
+ int len = tmp->data;
+ if (parse_binary(b_orig(tmp), &tmp->area, &len, NULL) == 0) {
+ status = HCHK_STATUS_L7RSP;
+ desc = ist("Failed to parse hexastring resulting of eval of a log-format string");
+ TRACE_ERROR("invalid binary log-format string", CHK_EV_TCPCHK_EXP|CHK_EV_TCPCHK_ERR, check);
+ goto error;
+ }
+ tmp->data = len;
+ }
+ if (b_data(&check->bi) < tmp->data) {
+ if (!last_read) {
+ ret = TCPCHK_EVAL_WAIT;
+ TRACE_DEVEL("waiting for more data", CHK_EV_TCPCHK_EXP, check);
+ goto out;
+ }
+ break;
+ }
+ match = my_memmem(b_head(&check->bi), b_data(&check->bi), b_orig(tmp), b_data(tmp)) != NULL;
+ break;
+
+ case TCPCHK_EXPECT_CUSTOM:
+ /* Don't eval custom function if the buffer is empty. It means
+ * custom functions can't expect an empty response. If this
+ * change, don't forget to change this test and update all
+ * custom functions.
+ */
+ if (!b_data(&check->bi))
+ break;
+ if (expect->custom)
+ ret = expect->custom(check, rule, last_read);
+ goto out;
+ default:
+ /* Should never happen. */
+ ret = TCPCHK_EVAL_STOP;
+ goto out;
+ }
+
+
+ /* Wait for more data on mismatch only if no minimum is defined (-1),
+ * otherwise the absence of match is already conclusive.
+ */
+ if (!match && !last_read && (expect->min_recv == -1)) {
+ ret = TCPCHK_EVAL_WAIT;
+ TRACE_DEVEL("waiting for more data", CHK_EV_TCPCHK_EXP, check);
+ goto out;
+ }
+
+ /* Result as expected, next rule. */
+ if (match ^ inverse) {
+ TRACE_STATE("expect rule succeeded", CHK_EV_TCPCHK_EXP, check);
+ goto out;
+ }
+
+ error:
+ /* From this point on, we matched something we did not want, this is an error state. */
+ TRACE_STATE("expect rule failed", CHK_EV_TCPCHK_EXP|CHK_EV_TCPCHK_ERR, check);
+ ret = TCPCHK_EVAL_STOP;
+ msg = alloc_trash_chunk();
+ if (msg)
+ tcpcheck_expect_onerror_message(msg, check, rule, match, desc);
+ set_server_check_status(check, status, (msg ? b_head(msg) : NULL));
+ free_trash_chunk(msg);
+
+ out:
+ free_trash_chunk(tmp);
+ TRACE_LEAVE(CHK_EV_TCPCHK_EXP, check, 0, 0, (size_t[]){ret});
+ return ret;
+}
+
+/* Evaluates a TCPCHK_ACT_ACTION_KW rule. Returns TCPCHK_EVAL_CONTINUE to
+ * evaluate the next rule or TCPCHK_EVAL_STOP if an error occurred. It never
+ * waits.
+ */
+enum tcpcheck_eval_ret tcpcheck_eval_action_kw(struct check *check, struct tcpcheck_rule *rule)
+{
+ enum tcpcheck_eval_ret ret = TCPCHK_EVAL_CONTINUE;
+ struct act_rule *act_rule;
+ enum act_return act_ret;
+
+ act_rule =rule->action_kw.rule;
+ act_ret = act_rule->action_ptr(act_rule, check->proxy, check->sess, NULL, 0);
+ if (act_ret != ACT_RET_CONT) {
+ chunk_printf(&trash, "TCPCHK ACTION unexpected result at step %d\n",
+ tcpcheck_get_step_id(check, rule));
+ set_server_check_status(check, HCHK_STATUS_L7RSP, trash.area);
+ ret = TCPCHK_EVAL_STOP;
+ }
+
+ return ret;
+}
+
+/* Executes a tcp-check ruleset. Note that this is called both from the
+ * connection's wake() callback and from the check scheduling task. It returns
+ * 0 on normal cases, or <0 if a close() has happened on an existing connection,
+ * presenting the risk of an fd replacement.
+ *
+ * Please do NOT place any return statement in this function and only leave
+ * via the out_end_tcpcheck label after setting retcode.
+ */
+int tcpcheck_main(struct check *check)
+{
+ struct tcpcheck_rule *rule;
+ struct stconn *sc = check->sc;
+ struct connection *conn = sc_conn(sc);
+ int must_read = 1, last_read = 0;
+ int retcode = 0;
+ enum tcpcheck_eval_ret eval_ret;
+
+ /* here, we know that the check is complete or that it failed */
+ if (check->result != CHK_RES_UNKNOWN)
+ goto out;
+
+ TRACE_ENTER(CHK_EV_TCPCHK_EVAL, check);
+
+ /* Note: the stream connector and the connection may only be undefined before
+ * the first rule evaluation (it is always a connect rule) or when the
+ * stream connector allocation failed on a connect rule, during sc allocation.
+ */
+
+ /* 1- check for connection error, if any */
+ if ((conn && conn->flags & CO_FL_ERROR) || sc_ep_test(sc, SE_FL_ERROR))
+ goto out_end_tcpcheck;
+
+ /* 2- check if a rule must be resume. It happens if check->current_step
+ * is defined. */
+ else if (check->current_step) {
+ rule = check->current_step;
+ TRACE_PROTO("resume rule evaluation", CHK_EV_TCPCHK_EVAL, check, 0, 0, (size_t[]){ tcpcheck_get_step_id(check, rule)});
+ }
+
+ /* 3- It is the first evaluation. We must create a session and preset
+ * tcp-check variables */
+ else {
+ struct tcpcheck_var *var;
+
+ /* First evaluation, create a session */
+ check->sess = session_new(&checks_fe, NULL, &check->obj_type);
+ if (!check->sess) {
+ chunk_printf(&trash, "TCPCHK error allocating check session");
+ TRACE_ERROR("session allocation failure", CHK_EV_TCPCHK_EVAL|CHK_EV_TCPCHK_ERR, check);
+ set_server_check_status(check, HCHK_STATUS_SOCKERR, trash.area);
+ goto out_end_tcpcheck;
+ }
+ vars_init_head(&check->vars, SCOPE_CHECK);
+ rule = LIST_NEXT(check->tcpcheck_rules->list, typeof(rule), list);
+
+ /* Preset tcp-check variables */
+ list_for_each_entry(var, &check->tcpcheck_rules->preset_vars, list) {
+ struct sample smp;
+
+ memset(&smp, 0, sizeof(smp));
+ smp_set_owner(&smp, check->proxy, check->sess, NULL, SMP_OPT_FINAL);
+ smp.data = var->data;
+ vars_set_by_name_ifexist(istptr(var->name), istlen(var->name), &smp);
+ }
+ TRACE_PROTO("start rules evaluation", CHK_EV_TCPCHK_EVAL, check);
+ }
+
+ /* Now evaluate the tcp-check rules */
+
+ list_for_each_entry_from(rule, check->tcpcheck_rules->list, list) {
+ check->code = 0;
+ switch (rule->action) {
+ case TCPCHK_ACT_CONNECT:
+ /* Not the first connection, release it first */
+ if (sc_conn(sc) && check->current_step != rule) {
+ check->state |= CHK_ST_CLOSE_CONN;
+ retcode = -1;
+ }
+
+ check->current_step = rule;
+
+ /* We are still waiting the connection gets closed */
+ if (check->state & CHK_ST_CLOSE_CONN) {
+ TRACE_DEVEL("wait previous connection closure", CHK_EV_TCPCHK_EVAL|CHK_EV_TCPCHK_CONN, check);
+ eval_ret = TCPCHK_EVAL_WAIT;
+ break;
+ }
+
+ TRACE_PROTO("eval connect rule", CHK_EV_TCPCHK_EVAL|CHK_EV_TCPCHK_CONN, check);
+ eval_ret = tcpcheck_eval_connect(check, rule);
+
+ /* Refresh connection */
+ conn = sc_conn(sc);
+ last_read = 0;
+ must_read = (IS_HTX_SC(sc) ? htx_is_empty(htxbuf(&check->bi)) : !b_data(&check->bi));
+ break;
+ case TCPCHK_ACT_SEND:
+ check->current_step = rule;
+ TRACE_PROTO("eval send rule", CHK_EV_TCPCHK_EVAL|CHK_EV_TCPCHK_SND, check);
+ eval_ret = tcpcheck_eval_send(check, rule);
+ must_read = 1;
+ break;
+ case TCPCHK_ACT_EXPECT:
+ check->current_step = rule;
+ TRACE_PROTO("eval expect rule", CHK_EV_TCPCHK_EVAL|CHK_EV_TCPCHK_EXP, check);
+ if (must_read) {
+ eval_ret = tcpcheck_eval_recv(check, rule);
+ if (eval_ret == TCPCHK_EVAL_STOP)
+ goto out_end_tcpcheck;
+ else if (eval_ret == TCPCHK_EVAL_WAIT)
+ goto out;
+ last_read = ((conn->flags & CO_FL_ERROR) || sc_ep_test(sc, SE_FL_ERROR | SE_FL_EOS));
+ must_read = 0;
+ }
+
+ eval_ret = ((check->tcpcheck_rules->flags & TCPCHK_RULES_PROTO_CHK) == TCPCHK_RULES_HTTP_CHK
+ ? tcpcheck_eval_expect_http(check, rule, last_read)
+ : tcpcheck_eval_expect(check, rule, last_read));
+
+ if (eval_ret == TCPCHK_EVAL_WAIT) {
+ check->current_step = rule->expect.head;
+ if (!(sc->wait_event.events & SUB_RETRY_RECV))
+ conn->mux->subscribe(sc, SUB_RETRY_RECV, &sc->wait_event);
+ }
+ break;
+ case TCPCHK_ACT_ACTION_KW:
+ /* Don't update the current step */
+ TRACE_PROTO("eval action kw rule", CHK_EV_TCPCHK_EVAL|CHK_EV_TCPCHK_ACT, check);
+ eval_ret = tcpcheck_eval_action_kw(check, rule);
+ break;
+ default:
+ /* Otherwise, just go to the next one and don't update
+ * the current step
+ */
+ eval_ret = TCPCHK_EVAL_CONTINUE;
+ break;
+ }
+
+ switch (eval_ret) {
+ case TCPCHK_EVAL_CONTINUE:
+ break;
+ case TCPCHK_EVAL_WAIT:
+ goto out;
+ case TCPCHK_EVAL_STOP:
+ goto out_end_tcpcheck;
+ }
+ }
+
+ /* All rules was evaluated */
+ if (check->current_step) {
+ rule = check->current_step;
+
+ TRACE_DEVEL("eval tcp-check result", CHK_EV_TCPCHK_EVAL, check);
+
+ if (rule->action == TCPCHK_ACT_EXPECT) {
+ struct buffer *msg;
+ enum healthcheck_status status;
+
+ if (check->server &&
+ (check->server->proxy->options & PR_O_DISABLE404) &&
+ (check->server->next_state != SRV_ST_STOPPED) &&
+ (check->code == 404)) {
+ set_server_check_status(check, HCHK_STATUS_L7OKCD, NULL);
+ TRACE_PROTO("tcp-check conditionally passed (disable-404)", CHK_EV_TCPCHK_EVAL, check);
+ goto out_end_tcpcheck;
+ }
+
+ msg = alloc_trash_chunk();
+ if (msg)
+ tcpcheck_expect_onsuccess_message(msg, check, rule, IST_NULL);
+ status = ((rule->expect.ok_status != HCHK_STATUS_UNKNOWN) ? rule->expect.ok_status : HCHK_STATUS_L7OKD);
+ set_server_check_status(check, status, (msg ? b_head(msg) : "(tcp-check)"));
+ free_trash_chunk(msg);
+ }
+ else if (rule->action == TCPCHK_ACT_CONNECT) {
+ const char *msg = ((rule->connect.options & TCPCHK_OPT_IMPLICIT) ? NULL : "(tcp-check)");
+ enum healthcheck_status status = HCHK_STATUS_L4OK;
+#ifdef USE_OPENSSL
+ if (conn_is_ssl(conn))
+ status = HCHK_STATUS_L6OK;
+#endif
+ set_server_check_status(check, status, msg);
+ }
+ else
+ set_server_check_status(check, HCHK_STATUS_L7OKD, "(tcp-check)");
+ }
+ else {
+ set_server_check_status(check, HCHK_STATUS_L7OKD, "(tcp-check)");
+ }
+ TRACE_PROTO("tcp-check passed", CHK_EV_TCPCHK_EVAL, check);
+
+ out_end_tcpcheck:
+ if ((conn && conn->flags & CO_FL_ERROR) || sc_ep_test(sc, SE_FL_ERROR)) {
+ TRACE_ERROR("report connection error", CHK_EV_TCPCHK_EVAL|CHK_EV_TCPCHK_ERR, check);
+ chk_report_conn_err(check, errno, 0);
+ }
+
+ /* the tcpcheck is finished, release in/out buffer now */
+ check_release_buf(check, &check->bi);
+ check_release_buf(check, &check->bo);
+
+ out:
+ TRACE_LEAVE(CHK_EV_HCHK_RUN, check);
+ return retcode;
+}
+
+void tcp_check_keywords_register(struct action_kw_list *kw_list)
+{
+ LIST_APPEND(&tcp_check_keywords.list, &kw_list->list);
+}
+
+/**************************************************************************/
+/******************* Internals to parse tcp-check rules *******************/
+/**************************************************************************/
+struct action_kw_list tcp_check_keywords = {
+ .list = LIST_HEAD_INIT(tcp_check_keywords.list),
+};
+
+/* Creates a tcp-check rule resulting from parsing a custom keyword. NULL is
+ * returned on error.
+ */
+struct tcpcheck_rule *parse_tcpcheck_action(char **args, int cur_arg, struct proxy *px,
+ struct list *rules, struct action_kw *kw,
+ const char *file, int line, char **errmsg)
+{
+ struct tcpcheck_rule *chk = NULL;
+ struct act_rule *actrule = NULL;
+
+ actrule = new_act_rule(ACT_F_TCP_CHK, file, line);
+ if (!actrule) {
+ memprintf(errmsg, "out of memory");
+ goto error;
+ }
+ actrule->kw = kw;
+
+ cur_arg++;
+ if (kw->parse((const char **)args, &cur_arg, px, actrule, errmsg) == ACT_RET_PRS_ERR) {
+ memprintf(errmsg, "'%s' : %s", kw->kw, *errmsg);
+ goto error;
+ }
+
+ chk = calloc(1, sizeof(*chk));
+ if (!chk) {
+ memprintf(errmsg, "out of memory");
+ goto error;
+ }
+ chk->action = TCPCHK_ACT_ACTION_KW;
+ chk->action_kw.rule = actrule;
+ return chk;
+
+ error:
+ free(actrule);
+ return NULL;
+}
+
+/* Parses and creates a tcp-check connect or an http-check connect rule. NULL is
+ * returned on error.
+ */
+struct tcpcheck_rule *parse_tcpcheck_connect(char **args, int cur_arg, struct proxy *px, struct list *rules,
+ const char *file, int line, char **errmsg)
+{
+ struct tcpcheck_rule *chk = NULL;
+ struct sockaddr_storage *sk = NULL;
+ char *comment = NULL, *sni = NULL, *alpn = NULL;
+ struct sample_expr *port_expr = NULL;
+ const struct mux_proto_list *mux_proto = NULL;
+ unsigned short conn_opts = 0;
+ long port = 0;
+ int alpn_len = 0;
+
+ list_for_each_entry(chk, rules, list) {
+ if (chk->action == TCPCHK_ACT_CONNECT)
+ break;
+ if (chk->action == TCPCHK_ACT_COMMENT ||
+ chk->action == TCPCHK_ACT_ACTION_KW ||
+ (chk->action == TCPCHK_ACT_SEND && (chk->send.http.flags & TCPCHK_SND_HTTP_FROM_OPT)))
+ continue;
+
+ memprintf(errmsg, "first step MUST also be a 'connect', "
+ "optionally preceded by a 'set-var', an 'unset-var' or a 'comment', "
+ "when there is a 'connect' step in the tcp-check ruleset");
+ goto error;
+ }
+
+ cur_arg++;
+ while (*(args[cur_arg])) {
+ if (strcmp(args[cur_arg], "default") == 0)
+ conn_opts |= TCPCHK_OPT_DEFAULT_CONNECT;
+ else if (strcmp(args[cur_arg], "addr") == 0) {
+ int port1, port2;
+
+ if (!*(args[cur_arg+1])) {
+ memprintf(errmsg, "'%s' expects <ipv4|ipv6> as argument.", args[cur_arg]);
+ goto error;
+ }
+
+ sk = str2sa_range(args[cur_arg+1], NULL, &port1, &port2, NULL, NULL, NULL,
+ errmsg, NULL, NULL, PA_O_RESOLVE | PA_O_PORT_OK | PA_O_STREAM | PA_O_CONNECT);
+ if (!sk) {
+ memprintf(errmsg, "'%s' : %s.", args[cur_arg], *errmsg);
+ goto error;
+ }
+
+ cur_arg++;
+ }
+ else if (strcmp(args[cur_arg], "port") == 0) {
+ const char *p, *end;
+
+ if (!*(args[cur_arg+1])) {
+ memprintf(errmsg, "'%s' expects a port number or a sample expression as argument.", args[cur_arg]);
+ goto error;
+ }
+ cur_arg++;
+
+ port = 0;
+ release_sample_expr(port_expr);
+ p = args[cur_arg]; end = p + strlen(p);
+ port = read_uint(&p, end);
+ if (p != end) {
+ int idx = 0;
+
+ px->conf.args.ctx = ARGC_SRV;
+ port_expr = sample_parse_expr((char *[]){args[cur_arg], NULL}, &idx,
+ file, line, errmsg, &px->conf.args, NULL);
+
+ if (!port_expr) {
+ memprintf(errmsg, "error detected while parsing port expression : %s", *errmsg);
+ goto error;
+ }
+ if (!(port_expr->fetch->val & SMP_VAL_BE_CHK_RUL)) {
+ memprintf(errmsg, "error detected while parsing port expression : "
+ " fetch method '%s' extracts information from '%s', "
+ "none of which is available here.\n",
+ args[cur_arg], sample_src_names(port_expr->fetch->use));
+ goto error;
+ }
+ px->http_needed |= !!(port_expr->fetch->use & SMP_USE_HTTP_ANY);
+ }
+ else if (port > 65535 || port < 1) {
+ memprintf(errmsg, "expects a valid TCP port (from range 1 to 65535) or a sample expression, got %s.",
+ args[cur_arg]);
+ goto error;
+ }
+ }
+ else if (strcmp(args[cur_arg], "proto") == 0) {
+ if (!*(args[cur_arg+1])) {
+ memprintf(errmsg, "'%s' expects a MUX protocol as argument.", args[cur_arg]);
+ goto error;
+ }
+ mux_proto = get_mux_proto(ist(args[cur_arg + 1]));
+ if (!mux_proto) {
+ memprintf(errmsg, "'%s' : unknown MUX protocol '%s'.", args[cur_arg], args[cur_arg+1]);
+ goto error;
+ }
+
+ if (strcmp(args[0], "tcp-check") == 0 && mux_proto->mode != PROTO_MODE_TCP) {
+ memprintf(errmsg, "'%s' : invalid MUX protocol '%s' for tcp-check", args[cur_arg], args[cur_arg+1]);
+ goto error;
+ }
+ else if (strcmp(args[0], "http-check") == 0 && mux_proto->mode != PROTO_MODE_HTTP) {
+ memprintf(errmsg, "'%s' : invalid MUX protocol '%s' for http-check", args[cur_arg], args[cur_arg+1]);
+ goto error;
+ }
+
+ cur_arg++;
+ }
+ else if (strcmp(args[cur_arg], "comment") == 0) {
+ if (!*(args[cur_arg+1])) {
+ memprintf(errmsg, "'%s' expects a string as argument.", args[cur_arg]);
+ goto error;
+ }
+ cur_arg++;
+ free(comment);
+ comment = strdup(args[cur_arg]);
+ if (!comment) {
+ memprintf(errmsg, "out of memory");
+ goto error;
+ }
+ }
+ else if (strcmp(args[cur_arg], "send-proxy") == 0)
+ conn_opts |= TCPCHK_OPT_SEND_PROXY;
+ else if (strcmp(args[cur_arg], "via-socks4") == 0)
+ conn_opts |= TCPCHK_OPT_SOCKS4;
+ else if (strcmp(args[cur_arg], "linger") == 0)
+ conn_opts |= TCPCHK_OPT_LINGER;
+#ifdef USE_OPENSSL
+ else if (strcmp(args[cur_arg], "ssl") == 0) {
+ px->options |= PR_O_TCPCHK_SSL;
+ conn_opts |= TCPCHK_OPT_SSL;
+ }
+ else if (strcmp(args[cur_arg], "sni") == 0) {
+ if (!*(args[cur_arg+1])) {
+ memprintf(errmsg, "'%s' expects a string as argument.", args[cur_arg]);
+ goto error;
+ }
+ cur_arg++;
+ free(sni);
+ sni = strdup(args[cur_arg]);
+ if (!sni) {
+ memprintf(errmsg, "out of memory");
+ goto error;
+ }
+ }
+ else if (strcmp(args[cur_arg], "alpn") == 0) {
+#ifdef TLSEXT_TYPE_application_layer_protocol_negotiation
+ free(alpn);
+ if (ssl_sock_parse_alpn(args[cur_arg + 1], &alpn, &alpn_len, errmsg)) {
+ memprintf(errmsg, "'%s' : %s", args[cur_arg], *errmsg);
+ goto error;
+ }
+ cur_arg++;
+#else
+ memprintf(errmsg, "'%s' : library does not support TLS ALPN extension.", args[cur_arg]);
+ goto error;
+#endif
+ }
+#endif /* USE_OPENSSL */
+
+ else {
+ memprintf(errmsg, "expects 'comment', 'port', 'addr', 'send-proxy'"
+#ifdef USE_OPENSSL
+ ", 'ssl', 'sni', 'alpn'"
+#endif /* USE_OPENSSL */
+ " or 'via-socks4', 'linger', 'default' but got '%s' as argument.",
+ args[cur_arg]);
+ goto error;
+ }
+ cur_arg++;
+ }
+
+ chk = calloc(1, sizeof(*chk));
+ if (!chk) {
+ memprintf(errmsg, "out of memory");
+ goto error;
+ }
+ chk->action = TCPCHK_ACT_CONNECT;
+ chk->comment = comment;
+ chk->connect.port = port;
+ chk->connect.options = conn_opts;
+ chk->connect.sni = sni;
+ chk->connect.alpn = alpn;
+ chk->connect.alpn_len= alpn_len;
+ chk->connect.port_expr= port_expr;
+ chk->connect.mux_proto= mux_proto;
+ if (sk)
+ chk->connect.addr = *sk;
+ return chk;
+
+ error:
+ free(alpn);
+ free(sni);
+ free(comment);
+ release_sample_expr(port_expr);
+ return NULL;
+}
+
+/* Parses and creates a tcp-check send rule. NULL is returned on error */
+struct tcpcheck_rule *parse_tcpcheck_send(char **args, int cur_arg, struct proxy *px, struct list *rules,
+ const char *file, int line, char **errmsg)
+{
+ struct tcpcheck_rule *chk = NULL;
+ char *comment = NULL, *data = NULL;
+ enum tcpcheck_send_type type = TCPCHK_SEND_UNDEF;
+
+ if (strcmp(args[cur_arg], "send-binary-lf") == 0)
+ type = TCPCHK_SEND_BINARY_LF;
+ else if (strcmp(args[cur_arg], "send-binary") == 0)
+ type = TCPCHK_SEND_BINARY;
+ else if (strcmp(args[cur_arg], "send-lf") == 0)
+ type = TCPCHK_SEND_STRING_LF;
+ else if (strcmp(args[cur_arg], "send") == 0)
+ type = TCPCHK_SEND_STRING;
+
+ if (!*(args[cur_arg+1])) {
+ memprintf(errmsg, "'%s' expects a %s as argument",
+ (type == TCPCHK_SEND_BINARY ? "binary string": "string"), args[cur_arg]);
+ goto error;
+ }
+
+ data = args[cur_arg+1];
+
+ cur_arg += 2;
+ while (*(args[cur_arg])) {
+ if (strcmp(args[cur_arg], "comment") == 0) {
+ if (!*(args[cur_arg+1])) {
+ memprintf(errmsg, "'%s' expects a string as argument.", args[cur_arg]);
+ goto error;
+ }
+ cur_arg++;
+ free(comment);
+ comment = strdup(args[cur_arg]);
+ if (!comment) {
+ memprintf(errmsg, "out of memory");
+ goto error;
+ }
+ }
+ else {
+ memprintf(errmsg, "expects 'comment' but got '%s' as argument.",
+ args[cur_arg]);
+ goto error;
+ }
+ cur_arg++;
+ }
+
+ chk = calloc(1, sizeof(*chk));
+ if (!chk) {
+ memprintf(errmsg, "out of memory");
+ goto error;
+ }
+ chk->action = TCPCHK_ACT_SEND;
+ chk->comment = comment;
+ chk->send.type = type;
+
+ switch (chk->send.type) {
+ case TCPCHK_SEND_STRING:
+ chk->send.data = ist(strdup(data));
+ if (!isttest(chk->send.data)) {
+ memprintf(errmsg, "out of memory");
+ goto error;
+ }
+ break;
+ case TCPCHK_SEND_BINARY: {
+ int len = chk->send.data.len;
+ if (parse_binary(data, &chk->send.data.ptr, &len, errmsg) == 0) {
+ memprintf(errmsg, "'%s' invalid binary string (%s).\n", data, *errmsg);
+ goto error;
+ }
+ chk->send.data.len = len;
+ break;
+ }
+ case TCPCHK_SEND_STRING_LF:
+ case TCPCHK_SEND_BINARY_LF:
+ LIST_INIT(&chk->send.fmt);
+ px->conf.args.ctx = ARGC_SRV;
+ if (!parse_logformat_string(data, px, &chk->send.fmt, 0, SMP_VAL_BE_CHK_RUL, errmsg)) {
+ memprintf(errmsg, "'%s' invalid log-format string (%s).\n", data, *errmsg);
+ goto error;
+ }
+ break;
+ case TCPCHK_SEND_HTTP:
+ case TCPCHK_SEND_UNDEF:
+ goto error;
+ }
+
+ return chk;
+
+ error:
+ free(chk);
+ free(comment);
+ return NULL;
+}
+
+/* Parses and creates a http-check send rule. NULL is returned on error */
+struct tcpcheck_rule *parse_tcpcheck_send_http(char **args, int cur_arg, struct proxy *px, struct list *rules,
+ const char *file, int line, char **errmsg)
+{
+ struct tcpcheck_rule *chk = NULL;
+ struct tcpcheck_http_hdr *hdr = NULL;
+ struct http_hdr hdrs[global.tune.max_http_hdr];
+ char *meth = NULL, *uri = NULL, *vsn = NULL;
+ char *body = NULL, *comment = NULL;
+ unsigned int flags = 0;
+ int i = 0, host_hdr = -1;
+
+ cur_arg++;
+ while (*(args[cur_arg])) {
+ if (strcmp(args[cur_arg], "meth") == 0) {
+ if (!*(args[cur_arg+1])) {
+ memprintf(errmsg, "'%s' expects a string as argument.", args[cur_arg]);
+ goto error;
+ }
+ cur_arg++;
+ meth = args[cur_arg];
+ }
+ else if (strcmp(args[cur_arg], "uri") == 0 || strcmp(args[cur_arg], "uri-lf") == 0) {
+ if (!*(args[cur_arg+1])) {
+ memprintf(errmsg, "'%s' expects a string as argument.", args[cur_arg]);
+ goto error;
+ }
+ flags &= ~TCPCHK_SND_HTTP_FL_URI_FMT;
+ if (strcmp(args[cur_arg], "uri-lf") == 0)
+ flags |= TCPCHK_SND_HTTP_FL_URI_FMT;
+ cur_arg++;
+ uri = args[cur_arg];
+ }
+ else if (strcmp(args[cur_arg], "ver") == 0) {
+ if (!*(args[cur_arg+1])) {
+ memprintf(errmsg, "'%s' expects a string as argument.", args[cur_arg]);
+ goto error;
+ }
+ cur_arg++;
+ vsn = args[cur_arg];
+ }
+ else if (strcmp(args[cur_arg], "hdr") == 0) {
+ if (!*args[cur_arg+1] || !*args[cur_arg+2]) {
+ memprintf(errmsg, "'%s' expects <name> and <value> as arguments", args[cur_arg]);
+ goto error;
+ }
+
+ if (strcasecmp(args[cur_arg+1], "host") == 0) {
+ if (host_hdr >= 0) {
+ memprintf(errmsg, "'%s' header already defined (previous value is '%s')",
+ args[cur_arg+1], istptr(hdrs[host_hdr].v));
+ goto error;
+ }
+ host_hdr = i;
+ }
+ else if (strcasecmp(args[cur_arg+1], "content-length") == 0 ||
+ strcasecmp(args[cur_arg+1], "transfer-encoding") == 0)
+ goto skip_hdr;
+
+ hdrs[i].n = ist(args[cur_arg + 1]);
+ hdrs[i].v = ist(args[cur_arg + 2]);
+ i++;
+ skip_hdr:
+ cur_arg += 2;
+ }
+ else if (strcmp(args[cur_arg], "body") == 0 || strcmp(args[cur_arg], "body-lf") == 0) {
+ if (!*(args[cur_arg+1])) {
+ memprintf(errmsg, "'%s' expects a string as argument.", args[cur_arg]);
+ goto error;
+ }
+ flags &= ~TCPCHK_SND_HTTP_FL_BODY_FMT;
+ if (strcmp(args[cur_arg], "body-lf") == 0)
+ flags |= TCPCHK_SND_HTTP_FL_BODY_FMT;
+ cur_arg++;
+ body = args[cur_arg];
+ }
+ else if (strcmp(args[cur_arg], "comment") == 0) {
+ if (!*(args[cur_arg+1])) {
+ memprintf(errmsg, "'%s' expects a string as argument.", args[cur_arg]);
+ goto error;
+ }
+ cur_arg++;
+ free(comment);
+ comment = strdup(args[cur_arg]);
+ if (!comment) {
+ memprintf(errmsg, "out of memory");
+ goto error;
+ }
+ }
+ else {
+ memprintf(errmsg, "expects 'comment', 'meth', 'uri', 'uri-lf', 'ver', 'hdr', 'body' or 'body-lf'"
+ " but got '%s' as argument.", args[cur_arg]);
+ goto error;
+ }
+ cur_arg++;
+ }
+
+ hdrs[i].n = hdrs[i].v = IST_NULL;
+
+ chk = calloc(1, sizeof(*chk));
+ if (!chk) {
+ memprintf(errmsg, "out of memory");
+ goto error;
+ }
+ chk->action = TCPCHK_ACT_SEND;
+ chk->comment = comment; comment = NULL;
+ chk->send.type = TCPCHK_SEND_HTTP;
+ chk->send.http.flags = flags;
+ LIST_INIT(&chk->send.http.hdrs);
+
+ if (meth) {
+ chk->send.http.meth.meth = find_http_meth(meth, strlen(meth));
+ chk->send.http.meth.str.area = strdup(meth);
+ chk->send.http.meth.str.data = strlen(meth);
+ if (!chk->send.http.meth.str.area) {
+ memprintf(errmsg, "out of memory");
+ goto error;
+ }
+ }
+ if (uri) {
+ if (chk->send.http.flags & TCPCHK_SND_HTTP_FL_URI_FMT) {
+ LIST_INIT(&chk->send.http.uri_fmt);
+ px->conf.args.ctx = ARGC_SRV;
+ if (!parse_logformat_string(uri, px, &chk->send.http.uri_fmt, 0, SMP_VAL_BE_CHK_RUL, errmsg)) {
+ memprintf(errmsg, "'%s' invalid log-format string (%s).\n", uri, *errmsg);
+ goto error;
+ }
+ }
+ else {
+ chk->send.http.uri = ist(strdup(uri));
+ if (!isttest(chk->send.http.uri)) {
+ memprintf(errmsg, "out of memory");
+ goto error;
+ }
+ }
+ }
+ if (vsn) {
+ chk->send.http.vsn = ist(strdup(vsn));
+ if (!isttest(chk->send.http.vsn)) {
+ memprintf(errmsg, "out of memory");
+ goto error;
+ }
+ }
+ for (i = 0; istlen(hdrs[i].n); i++) {
+ hdr = calloc(1, sizeof(*hdr));
+ if (!hdr) {
+ memprintf(errmsg, "out of memory");
+ goto error;
+ }
+ LIST_INIT(&hdr->value);
+ hdr->name = istdup(hdrs[i].n);
+ if (!isttest(hdr->name)) {
+ memprintf(errmsg, "out of memory");
+ goto error;
+ }
+
+ ist0(hdrs[i].v);
+ if (!parse_logformat_string(istptr(hdrs[i].v), px, &hdr->value, 0, SMP_VAL_BE_CHK_RUL, errmsg))
+ goto error;
+ LIST_APPEND(&chk->send.http.hdrs, &hdr->list);
+ hdr = NULL;
+ }
+
+ if (body) {
+ if (chk->send.http.flags & TCPCHK_SND_HTTP_FL_BODY_FMT) {
+ LIST_INIT(&chk->send.http.body_fmt);
+ px->conf.args.ctx = ARGC_SRV;
+ if (!parse_logformat_string(body, px, &chk->send.http.body_fmt, 0, SMP_VAL_BE_CHK_RUL, errmsg)) {
+ memprintf(errmsg, "'%s' invalid log-format string (%s).\n", body, *errmsg);
+ goto error;
+ }
+ }
+ else {
+ chk->send.http.body = ist(strdup(body));
+ if (!isttest(chk->send.http.body)) {
+ memprintf(errmsg, "out of memory");
+ goto error;
+ }
+ }
+ }
+
+ return chk;
+
+ error:
+ free_tcpcheck_http_hdr(hdr);
+ free_tcpcheck(chk, 0);
+ free(comment);
+ return NULL;
+}
+
+/* Parses and creates a http-check comment rule. NULL is returned on error */
+struct tcpcheck_rule *parse_tcpcheck_comment(char **args, int cur_arg, struct proxy *px, struct list *rules,
+ const char *file, int line, char **errmsg)
+{
+ struct tcpcheck_rule *chk = NULL;
+ char *comment = NULL;
+
+ if (!*(args[cur_arg+1])) {
+ memprintf(errmsg, "expects a string as argument");
+ goto error;
+ }
+ cur_arg++;
+ comment = strdup(args[cur_arg]);
+ if (!comment) {
+ memprintf(errmsg, "out of memory");
+ goto error;
+ }
+
+ chk = calloc(1, sizeof(*chk));
+ if (!chk) {
+ memprintf(errmsg, "out of memory");
+ goto error;
+ }
+ chk->action = TCPCHK_ACT_COMMENT;
+ chk->comment = comment;
+ return chk;
+
+ error:
+ free(comment);
+ return NULL;
+}
+
+/* Parses and creates a tcp-check or an http-check expect rule. NULL is returned
+ * on error. <proto> is set to the right protocol flags (covered by the
+ * TCPCHK_RULES_PROTO_CHK mask).
+ */
+struct tcpcheck_rule *parse_tcpcheck_expect(char **args, int cur_arg, struct proxy *px,
+ struct list *rules, unsigned int proto,
+ const char *file, int line, char **errmsg)
+{
+ struct tcpcheck_rule *prev_check, *chk = NULL;
+ struct sample_expr *status_expr = NULL;
+ char *on_success_msg, *on_error_msg, *comment, *pattern, *npat, *vpat;
+ enum tcpcheck_expect_type type = TCPCHK_EXPECT_UNDEF;
+ enum healthcheck_status ok_st = HCHK_STATUS_UNKNOWN;
+ enum healthcheck_status err_st = HCHK_STATUS_UNKNOWN;
+ enum healthcheck_status tout_st = HCHK_STATUS_UNKNOWN;
+ unsigned int flags = 0;
+ long min_recv = -1;
+ int inverse = 0;
+
+ on_success_msg = on_error_msg = comment = pattern = npat = vpat = NULL;
+ if (!*(args[cur_arg+1])) {
+ memprintf(errmsg, "expects at least a matching pattern as arguments");
+ goto error;
+ }
+
+ cur_arg++;
+ while (*(args[cur_arg])) {
+ int in_pattern = 0;
+
+ rescan:
+ if (strcmp(args[cur_arg], "min-recv") == 0) {
+ if (in_pattern) {
+ memprintf(errmsg, "[!] not supported with '%s'", args[cur_arg]);
+ goto error;
+ }
+ if (!*(args[cur_arg+1])) {
+ memprintf(errmsg, "'%s' expects a integer as argument", args[cur_arg]);
+ goto error;
+ }
+ /* Use an signed integer here because of bufsize */
+ cur_arg++;
+ min_recv = atol(args[cur_arg]);
+ if (min_recv < -1 || min_recv > INT_MAX) {
+ memprintf(errmsg, "'%s' expects -1 or an integer from 0 to INT_MAX" , args[cur_arg-1]);
+ goto error;
+ }
+ }
+ else if (*(args[cur_arg]) == '!') {
+ in_pattern = 1;
+ while (*(args[cur_arg]) == '!') {
+ inverse = !inverse;
+ args[cur_arg]++;
+ }
+ if (!*(args[cur_arg]))
+ cur_arg++;
+ goto rescan;
+ }
+ else if (strcmp(args[cur_arg], "string") == 0 || strcmp(args[cur_arg], "rstring") == 0) {
+ if (type != TCPCHK_EXPECT_UNDEF) {
+ memprintf(errmsg, "only on pattern expected");
+ goto error;
+ }
+ if (proto != TCPCHK_RULES_HTTP_CHK)
+ type = ((*(args[cur_arg]) == 's') ? TCPCHK_EXPECT_STRING : TCPCHK_EXPECT_STRING_REGEX);
+ else
+ type = ((*(args[cur_arg]) == 's') ? TCPCHK_EXPECT_HTTP_BODY : TCPCHK_EXPECT_HTTP_BODY_REGEX);
+
+ if (!*(args[cur_arg+1])) {
+ memprintf(errmsg, "'%s' expects a <pattern> as argument", args[cur_arg]);
+ goto error;
+ }
+ cur_arg++;
+ pattern = args[cur_arg];
+ }
+ else if (strcmp(args[cur_arg], "binary") == 0 || strcmp(args[cur_arg], "rbinary") == 0) {
+ if (proto == TCPCHK_RULES_HTTP_CHK)
+ goto bad_http_kw;
+ if (type != TCPCHK_EXPECT_UNDEF) {
+ memprintf(errmsg, "only on pattern expected");
+ goto error;
+ }
+ type = ((*(args[cur_arg]) == 'b') ? TCPCHK_EXPECT_BINARY : TCPCHK_EXPECT_BINARY_REGEX);
+
+ if (!*(args[cur_arg+1])) {
+ memprintf(errmsg, "'%s' expects a <pattern> as argument", args[cur_arg]);
+ goto error;
+ }
+ cur_arg++;
+ pattern = args[cur_arg];
+ }
+ else if (strcmp(args[cur_arg], "string-lf") == 0 || strcmp(args[cur_arg], "binary-lf") == 0) {
+ if (type != TCPCHK_EXPECT_UNDEF) {
+ memprintf(errmsg, "only on pattern expected");
+ goto error;
+ }
+ if (proto != TCPCHK_RULES_HTTP_CHK)
+ type = ((*(args[cur_arg]) == 's') ? TCPCHK_EXPECT_STRING_LF : TCPCHK_EXPECT_BINARY_LF);
+ else {
+ if (*(args[cur_arg]) != 's')
+ goto bad_http_kw;
+ type = TCPCHK_EXPECT_HTTP_BODY_LF;
+ }
+
+ if (!*(args[cur_arg+1])) {
+ memprintf(errmsg, "'%s' expects a <pattern> as argument", args[cur_arg]);
+ goto error;
+ }
+ cur_arg++;
+ pattern = args[cur_arg];
+ }
+ else if (strcmp(args[cur_arg], "status") == 0 || strcmp(args[cur_arg], "rstatus") == 0) {
+ if (proto != TCPCHK_RULES_HTTP_CHK)
+ goto bad_tcp_kw;
+ if (type != TCPCHK_EXPECT_UNDEF) {
+ memprintf(errmsg, "only on pattern expected");
+ goto error;
+ }
+ type = ((*(args[cur_arg]) == 's') ? TCPCHK_EXPECT_HTTP_STATUS : TCPCHK_EXPECT_HTTP_STATUS_REGEX);
+
+ if (!*(args[cur_arg+1])) {
+ memprintf(errmsg, "'%s' expects a <pattern> as argument", args[cur_arg]);
+ goto error;
+ }
+ cur_arg++;
+ pattern = args[cur_arg];
+ }
+ else if (strcmp(args[cur_arg], "custom") == 0) {
+ if (in_pattern) {
+ memprintf(errmsg, "[!] not supported with '%s'", args[cur_arg]);
+ goto error;
+ }
+ if (type != TCPCHK_EXPECT_UNDEF) {
+ memprintf(errmsg, "only on pattern expected");
+ goto error;
+ }
+ type = TCPCHK_EXPECT_CUSTOM;
+ }
+ else if (strcmp(args[cur_arg], "hdr") == 0 || strcmp(args[cur_arg], "fhdr") == 0) {
+ int orig_arg = cur_arg;
+
+ if (proto != TCPCHK_RULES_HTTP_CHK)
+ goto bad_tcp_kw;
+ if (type != TCPCHK_EXPECT_UNDEF) {
+ memprintf(errmsg, "only on pattern expected");
+ goto error;
+ }
+ type = TCPCHK_EXPECT_HTTP_HEADER;
+
+ if (strcmp(args[cur_arg], "fhdr") == 0)
+ flags |= TCPCHK_EXPT_FL_HTTP_HVAL_FULL;
+
+ /* Parse the name pattern, mandatory */
+ if (!*(args[cur_arg+1]) || !*(args[cur_arg+2]) ||
+ (strcmp(args[cur_arg+1], "name") != 0 && strcmp(args[cur_arg+1], "name-lf") != 0)) {
+ memprintf(errmsg, "'%s' expects at the name keyword as first argument followed by a pattern",
+ args[orig_arg]);
+ goto error;
+ }
+
+ if (strcmp(args[cur_arg+1], "name-lf") == 0)
+ flags |= TCPCHK_EXPT_FL_HTTP_HNAME_FMT;
+
+ cur_arg += 2;
+ if (strcmp(args[cur_arg], "-m") == 0) {
+ if (!*(args[cur_arg+1])) {
+ memprintf(errmsg, "'%s' : '%s' expects at a matching pattern ('str', 'beg', 'end', 'sub' or 'reg')",
+ args[orig_arg], args[cur_arg]);
+ goto error;
+ }
+ if (strcmp(args[cur_arg+1], "str") == 0)
+ flags |= TCPCHK_EXPT_FL_HTTP_HNAME_STR;
+ else if (strcmp(args[cur_arg+1], "beg") == 0)
+ flags |= TCPCHK_EXPT_FL_HTTP_HNAME_BEG;
+ else if (strcmp(args[cur_arg+1], "end") == 0)
+ flags |= TCPCHK_EXPT_FL_HTTP_HNAME_END;
+ else if (strcmp(args[cur_arg+1], "sub") == 0)
+ flags |= TCPCHK_EXPT_FL_HTTP_HNAME_SUB;
+ else if (strcmp(args[cur_arg+1], "reg") == 0) {
+ if (flags & TCPCHK_EXPT_FL_HTTP_HNAME_FMT) {
+ memprintf(errmsg, "'%s': log-format string is not supported with a regex matching method",
+ args[orig_arg]);
+ goto error;
+ }
+ flags |= TCPCHK_EXPT_FL_HTTP_HNAME_REG;
+ }
+ else {
+ memprintf(errmsg, "'%s' : '%s' only supports 'str', 'beg', 'end', 'sub' or 'reg' (got '%s')",
+ args[orig_arg], args[cur_arg], args[cur_arg+1]);
+ goto error;
+ }
+ cur_arg += 2;
+ }
+ else
+ flags |= TCPCHK_EXPT_FL_HTTP_HNAME_STR;
+ npat = args[cur_arg];
+
+ if (!*(args[cur_arg+1]) ||
+ (strcmp(args[cur_arg+1], "value") != 0 && strcmp(args[cur_arg+1], "value-lf") != 0)) {
+ flags |= TCPCHK_EXPT_FL_HTTP_HVAL_NONE;
+ goto next;
+ }
+ if (strcmp(args[cur_arg+1], "value-lf") == 0)
+ flags |= TCPCHK_EXPT_FL_HTTP_HVAL_FMT;
+
+ /* Parse the value pattern, optional */
+ if (strcmp(args[cur_arg+2], "-m") == 0) {
+ cur_arg += 2;
+ if (!*(args[cur_arg+1])) {
+ memprintf(errmsg, "'%s' : '%s' expects at a matching pattern ('str', 'beg', 'end', 'sub' or 'reg')",
+ args[orig_arg], args[cur_arg]);
+ goto error;
+ }
+ if (strcmp(args[cur_arg+1], "str") == 0)
+ flags |= TCPCHK_EXPT_FL_HTTP_HVAL_STR;
+ else if (strcmp(args[cur_arg+1], "beg") == 0)
+ flags |= TCPCHK_EXPT_FL_HTTP_HVAL_BEG;
+ else if (strcmp(args[cur_arg+1], "end") == 0)
+ flags |= TCPCHK_EXPT_FL_HTTP_HVAL_END;
+ else if (strcmp(args[cur_arg+1], "sub") == 0)
+ flags |= TCPCHK_EXPT_FL_HTTP_HVAL_SUB;
+ else if (strcmp(args[cur_arg+1], "reg") == 0) {
+ if (flags & TCPCHK_EXPT_FL_HTTP_HVAL_FMT) {
+ memprintf(errmsg, "'%s': log-format string is not supported with a regex matching method",
+ args[orig_arg]);
+ goto error;
+ }
+ flags |= TCPCHK_EXPT_FL_HTTP_HVAL_REG;
+ }
+ else {
+ memprintf(errmsg, "'%s' : '%s' only supports 'str', 'beg', 'end', 'sub' or 'reg' (got '%s')",
+ args[orig_arg], args[cur_arg], args[cur_arg+1]);
+ goto error;
+ }
+ }
+ else
+ flags |= TCPCHK_EXPT_FL_HTTP_HVAL_STR;
+
+ if (!*(args[cur_arg+2])) {
+ memprintf(errmsg, "'%s' expect a pattern with the value keyword", args[orig_arg]);
+ goto error;
+ }
+ vpat = args[cur_arg+2];
+ cur_arg += 2;
+ }
+ else if (strcmp(args[cur_arg], "comment") == 0) {
+ if (in_pattern) {
+ memprintf(errmsg, "[!] not supported with '%s'", args[cur_arg]);
+ goto error;
+ }
+ if (!*(args[cur_arg+1])) {
+ memprintf(errmsg, "'%s' expects a string as argument", args[cur_arg]);
+ goto error;
+ }
+ cur_arg++;
+ free(comment);
+ comment = strdup(args[cur_arg]);
+ if (!comment) {
+ memprintf(errmsg, "out of memory");
+ goto error;
+ }
+ }
+ else if (strcmp(args[cur_arg], "on-success") == 0) {
+ if (in_pattern) {
+ memprintf(errmsg, "[!] not supported with '%s'", args[cur_arg]);
+ goto error;
+ }
+ if (!*(args[cur_arg+1])) {
+ memprintf(errmsg, "'%s' expects a string as argument", args[cur_arg]);
+ goto error;
+ }
+ cur_arg++;
+ on_success_msg = args[cur_arg];
+ }
+ else if (strcmp(args[cur_arg], "on-error") == 0) {
+ if (in_pattern) {
+ memprintf(errmsg, "[!] not supported with '%s'", args[cur_arg]);
+ goto error;
+ }
+ if (!*(args[cur_arg+1])) {
+ memprintf(errmsg, "'%s' expects a string as argument", args[cur_arg]);
+ goto error;
+ }
+ cur_arg++;
+ on_error_msg = args[cur_arg];
+ }
+ else if (strcmp(args[cur_arg], "ok-status") == 0) {
+ if (in_pattern) {
+ memprintf(errmsg, "[!] not supported with '%s'", args[cur_arg]);
+ goto error;
+ }
+ if (!*(args[cur_arg+1])) {
+ memprintf(errmsg, "'%s' expects a string as argument", args[cur_arg]);
+ goto error;
+ }
+ if (strcasecmp(args[cur_arg+1], "L7OK") == 0)
+ ok_st = HCHK_STATUS_L7OKD;
+ else if (strcasecmp(args[cur_arg+1], "L7OKC") == 0)
+ ok_st = HCHK_STATUS_L7OKCD;
+ else if (strcasecmp(args[cur_arg+1], "L6OK") == 0)
+ ok_st = HCHK_STATUS_L6OK;
+ else if (strcasecmp(args[cur_arg+1], "L4OK") == 0)
+ ok_st = HCHK_STATUS_L4OK;
+ else {
+ memprintf(errmsg, "'%s' only supports 'L4OK', 'L6OK', 'L7OK' or 'L7OKC' status (got '%s').",
+ args[cur_arg], args[cur_arg+1]);
+ goto error;
+ }
+ cur_arg++;
+ }
+ else if (strcmp(args[cur_arg], "error-status") == 0) {
+ if (in_pattern) {
+ memprintf(errmsg, "[!] not supported with '%s'", args[cur_arg]);
+ goto error;
+ }
+ if (!*(args[cur_arg+1])) {
+ memprintf(errmsg, "'%s' expects a string as argument", args[cur_arg]);
+ goto error;
+ }
+ if (strcasecmp(args[cur_arg+1], "L7RSP") == 0)
+ err_st = HCHK_STATUS_L7RSP;
+ else if (strcasecmp(args[cur_arg+1], "L7STS") == 0)
+ err_st = HCHK_STATUS_L7STS;
+ else if (strcasecmp(args[cur_arg+1], "L7OKC") == 0)
+ err_st = HCHK_STATUS_L7OKCD;
+ else if (strcasecmp(args[cur_arg+1], "L6RSP") == 0)
+ err_st = HCHK_STATUS_L6RSP;
+ else if (strcasecmp(args[cur_arg+1], "L4CON") == 0)
+ err_st = HCHK_STATUS_L4CON;
+ else {
+ memprintf(errmsg, "'%s' only supports 'L4CON', 'L6RSP', 'L7RSP' or 'L7STS' status (got '%s').",
+ args[cur_arg], args[cur_arg+1]);
+ goto error;
+ }
+ cur_arg++;
+ }
+ else if (strcmp(args[cur_arg], "status-code") == 0) {
+ int idx = 0;
+
+ if (in_pattern) {
+ memprintf(errmsg, "[!] not supported with '%s'", args[cur_arg]);
+ goto error;
+ }
+ if (!*(args[cur_arg+1])) {
+ memprintf(errmsg, "'%s' expects an expression as argument", args[cur_arg]);
+ goto error;
+ }
+
+ cur_arg++;
+ release_sample_expr(status_expr);
+ px->conf.args.ctx = ARGC_SRV;
+ status_expr = sample_parse_expr((char *[]){args[cur_arg], NULL}, &idx,
+ file, line, errmsg, &px->conf.args, NULL);
+ if (!status_expr) {
+ memprintf(errmsg, "error detected while parsing status-code expression : %s", *errmsg);
+ goto error;
+ }
+ if (!(status_expr->fetch->val & SMP_VAL_BE_CHK_RUL)) {
+ memprintf(errmsg, "error detected while parsing status-code expression : "
+ " fetch method '%s' extracts information from '%s', "
+ "none of which is available here.\n",
+ args[cur_arg], sample_src_names(status_expr->fetch->use));
+ goto error;
+ }
+ px->http_needed |= !!(status_expr->fetch->use & SMP_USE_HTTP_ANY);
+ }
+ else if (strcmp(args[cur_arg], "tout-status") == 0) {
+ if (in_pattern) {
+ memprintf(errmsg, "[!] not supported with '%s'", args[cur_arg]);
+ goto error;
+ }
+ if (!*(args[cur_arg+1])) {
+ memprintf(errmsg, "'%s' expects a string as argument", args[cur_arg]);
+ goto error;
+ }
+ if (strcasecmp(args[cur_arg+1], "L7TOUT") == 0)
+ tout_st = HCHK_STATUS_L7TOUT;
+ else if (strcasecmp(args[cur_arg+1], "L6TOUT") == 0)
+ tout_st = HCHK_STATUS_L6TOUT;
+ else if (strcasecmp(args[cur_arg+1], "L4TOUT") == 0)
+ tout_st = HCHK_STATUS_L4TOUT;
+ else {
+ memprintf(errmsg, "'%s' only supports 'L4TOUT', 'L6TOUT' or 'L7TOUT' status (got '%s').",
+ args[cur_arg], args[cur_arg+1]);
+ goto error;
+ }
+ cur_arg++;
+ }
+ else {
+ if (proto == TCPCHK_RULES_HTTP_CHK) {
+ bad_http_kw:
+ memprintf(errmsg, "'only supports min-recv, [!]string', '[!]rstring', '[!]string-lf', '[!]status', "
+ "'[!]rstatus', [!]hdr, [!]fhdr or comment but got '%s' as argument.", args[cur_arg]);
+ }
+ else {
+ bad_tcp_kw:
+ memprintf(errmsg, "'only supports min-recv, '[!]binary', '[!]string', '[!]rstring', '[!]string-lf'"
+ "'[!]rbinary', '[!]binary-lf' or comment but got '%s' as argument.", args[cur_arg]);
+ }
+ goto error;
+ }
+ next:
+ cur_arg++;
+ }
+
+ chk = calloc(1, sizeof(*chk));
+ if (!chk) {
+ memprintf(errmsg, "out of memory");
+ goto error;
+ }
+ chk->action = TCPCHK_ACT_EXPECT;
+ LIST_INIT(&chk->expect.onerror_fmt);
+ LIST_INIT(&chk->expect.onsuccess_fmt);
+ chk->comment = comment; comment = NULL;
+ chk->expect.type = type;
+ chk->expect.min_recv = min_recv;
+ chk->expect.flags = flags | (inverse ? TCPCHK_EXPT_FL_INV : 0);
+ chk->expect.ok_status = ok_st;
+ chk->expect.err_status = err_st;
+ chk->expect.tout_status = tout_st;
+ chk->expect.status_expr = status_expr; status_expr = NULL;
+
+ if (on_success_msg) {
+ px->conf.args.ctx = ARGC_SRV;
+ if (!parse_logformat_string(on_success_msg, px, &chk->expect.onsuccess_fmt, 0, SMP_VAL_BE_CHK_RUL, errmsg)) {
+ memprintf(errmsg, "'%s' invalid log-format string (%s).\n", on_success_msg, *errmsg);
+ goto error;
+ }
+ }
+ if (on_error_msg) {
+ px->conf.args.ctx = ARGC_SRV;
+ if (!parse_logformat_string(on_error_msg, px, &chk->expect.onerror_fmt, 0, SMP_VAL_BE_CHK_RUL, errmsg)) {
+ memprintf(errmsg, "'%s' invalid log-format string (%s).\n", on_error_msg, *errmsg);
+ goto error;
+ }
+ }
+
+ switch (chk->expect.type) {
+ case TCPCHK_EXPECT_HTTP_STATUS: {
+ const char *p = pattern;
+ unsigned int c1,c2;
+
+ chk->expect.codes.codes = NULL;
+ chk->expect.codes.num = 0;
+ while (1) {
+ c1 = c2 = read_uint(&p, pattern + strlen(pattern));
+ if (*p == '-') {
+ p++;
+ c2 = read_uint(&p, pattern + strlen(pattern));
+ }
+ if (c1 > c2) {
+ memprintf(errmsg, "invalid range of status codes '%s'", pattern);
+ goto error;
+ }
+
+ chk->expect.codes.num++;
+ chk->expect.codes.codes = my_realloc2(chk->expect.codes.codes,
+ chk->expect.codes.num * sizeof(*chk->expect.codes.codes));
+ if (!chk->expect.codes.codes) {
+ memprintf(errmsg, "out of memory");
+ goto error;
+ }
+ chk->expect.codes.codes[chk->expect.codes.num-1][0] = c1;
+ chk->expect.codes.codes[chk->expect.codes.num-1][1] = c2;
+
+ if (*p == '\0')
+ break;
+ if (*p != ',') {
+ memprintf(errmsg, "invalid character '%c' in the list of status codes", *p);
+ goto error;
+ }
+ p++;
+ }
+ break;
+ }
+ case TCPCHK_EXPECT_STRING:
+ case TCPCHK_EXPECT_HTTP_BODY:
+ chk->expect.data = ist(strdup(pattern));
+ if (!isttest(chk->expect.data)) {
+ memprintf(errmsg, "out of memory");
+ goto error;
+ }
+ break;
+ case TCPCHK_EXPECT_BINARY: {
+ int len = chk->expect.data.len;
+
+ if (parse_binary(pattern, &chk->expect.data.ptr, &len, errmsg) == 0) {
+ memprintf(errmsg, "invalid binary string (%s)", *errmsg);
+ goto error;
+ }
+ chk->expect.data.len = len;
+ break;
+ }
+ case TCPCHK_EXPECT_STRING_REGEX:
+ case TCPCHK_EXPECT_BINARY_REGEX:
+ case TCPCHK_EXPECT_HTTP_STATUS_REGEX:
+ case TCPCHK_EXPECT_HTTP_BODY_REGEX:
+ chk->expect.regex = regex_comp(pattern, 1, 0, errmsg);
+ if (!chk->expect.regex)
+ goto error;
+ break;
+
+ case TCPCHK_EXPECT_STRING_LF:
+ case TCPCHK_EXPECT_BINARY_LF:
+ case TCPCHK_EXPECT_HTTP_BODY_LF:
+ LIST_INIT(&chk->expect.fmt);
+ px->conf.args.ctx = ARGC_SRV;
+ if (!parse_logformat_string(pattern, px, &chk->expect.fmt, 0, SMP_VAL_BE_CHK_RUL, errmsg)) {
+ memprintf(errmsg, "'%s' invalid log-format string (%s).\n", pattern, *errmsg);
+ goto error;
+ }
+ break;
+
+ case TCPCHK_EXPECT_HTTP_HEADER:
+ if (!npat) {
+ memprintf(errmsg, "unexpected error, undefined header name pattern");
+ goto error;
+ }
+ if (chk->expect.flags & TCPCHK_EXPT_FL_HTTP_HNAME_REG) {
+ chk->expect.hdr.name_re = regex_comp(npat, 0, 0, errmsg);
+ if (!chk->expect.hdr.name_re)
+ goto error;
+ }
+ else if (chk->expect.flags & TCPCHK_EXPT_FL_HTTP_HNAME_FMT) {
+ px->conf.args.ctx = ARGC_SRV;
+ LIST_INIT(&chk->expect.hdr.name_fmt);
+ if (!parse_logformat_string(npat, px, &chk->expect.hdr.name_fmt, 0, SMP_VAL_BE_CHK_RUL, errmsg)) {
+ memprintf(errmsg, "'%s' invalid log-format string (%s).\n", npat, *errmsg);
+ goto error;
+ }
+ }
+ else {
+ chk->expect.hdr.name = ist(strdup(npat));
+ if (!isttest(chk->expect.hdr.name)) {
+ memprintf(errmsg, "out of memory");
+ goto error;
+ }
+ }
+
+ if (chk->expect.flags & TCPCHK_EXPT_FL_HTTP_HVAL_NONE) {
+ chk->expect.hdr.value = IST_NULL;
+ break;
+ }
+
+ if (!vpat) {
+ memprintf(errmsg, "unexpected error, undefined header value pattern");
+ goto error;
+ }
+ else if (chk->expect.flags & TCPCHK_EXPT_FL_HTTP_HVAL_REG) {
+ chk->expect.hdr.value_re = regex_comp(vpat, 1, 0, errmsg);
+ if (!chk->expect.hdr.value_re)
+ goto error;
+ }
+ else if (chk->expect.flags & TCPCHK_EXPT_FL_HTTP_HVAL_FMT) {
+ px->conf.args.ctx = ARGC_SRV;
+ LIST_INIT(&chk->expect.hdr.value_fmt);
+ if (!parse_logformat_string(vpat, px, &chk->expect.hdr.value_fmt, 0, SMP_VAL_BE_CHK_RUL, errmsg)) {
+ memprintf(errmsg, "'%s' invalid log-format string (%s).\n", npat, *errmsg);
+ goto error;
+ }
+ }
+ else {
+ chk->expect.hdr.value = ist(strdup(vpat));
+ if (!isttest(chk->expect.hdr.value)) {
+ memprintf(errmsg, "out of memory");
+ goto error;
+ }
+ }
+
+ break;
+ case TCPCHK_EXPECT_CUSTOM:
+ chk->expect.custom = NULL; /* Must be defined by the caller ! */
+ break;
+ case TCPCHK_EXPECT_UNDEF:
+ memprintf(errmsg, "pattern not found");
+ goto error;
+ }
+
+ /* All tcp-check expect points back to the first inverse expect rule in
+ * a chain of one or more expect rule, potentially itself.
+ */
+ chk->expect.head = chk;
+ list_for_each_entry_rev(prev_check, rules, list) {
+ if (prev_check->action == TCPCHK_ACT_EXPECT) {
+ if (prev_check->expect.flags & TCPCHK_EXPT_FL_INV)
+ chk->expect.head = prev_check;
+ continue;
+ }
+ if (prev_check->action != TCPCHK_ACT_COMMENT && prev_check->action != TCPCHK_ACT_ACTION_KW)
+ break;
+ }
+ return chk;
+
+ error:
+ free_tcpcheck(chk, 0);
+ free(comment);
+ release_sample_expr(status_expr);
+ return NULL;
+}
+
+/* Overwrites fields of the old http send rule with those of the new one. When
+ * replaced, old values are freed and replaced by the new ones. New values are
+ * not copied but transferred. At the end <new> should be empty and can be
+ * safely released. This function never fails.
+ */
+void tcpcheck_overwrite_send_http_rule(struct tcpcheck_rule *old, struct tcpcheck_rule *new)
+{
+ struct logformat_node *lf, *lfb;
+ struct tcpcheck_http_hdr *hdr, *bhdr;
+
+
+ if (new->send.http.meth.str.area) {
+ free(old->send.http.meth.str.area);
+ old->send.http.meth.meth = new->send.http.meth.meth;
+ old->send.http.meth.str.area = new->send.http.meth.str.area;
+ old->send.http.meth.str.data = new->send.http.meth.str.data;
+ new->send.http.meth.str = BUF_NULL;
+ }
+
+ if (!(new->send.http.flags & TCPCHK_SND_HTTP_FL_URI_FMT) && isttest(new->send.http.uri)) {
+ if (!(old->send.http.flags & TCPCHK_SND_HTTP_FL_URI_FMT))
+ istfree(&old->send.http.uri);
+ else
+ free_tcpcheck_fmt(&old->send.http.uri_fmt);
+ old->send.http.flags &= ~TCPCHK_SND_HTTP_FL_URI_FMT;
+ old->send.http.uri = new->send.http.uri;
+ new->send.http.uri = IST_NULL;
+ }
+ else if ((new->send.http.flags & TCPCHK_SND_HTTP_FL_URI_FMT) && !LIST_ISEMPTY(&new->send.http.uri_fmt)) {
+ if (!(old->send.http.flags & TCPCHK_SND_HTTP_FL_URI_FMT))
+ istfree(&old->send.http.uri);
+ else
+ free_tcpcheck_fmt(&old->send.http.uri_fmt);
+ old->send.http.flags |= TCPCHK_SND_HTTP_FL_URI_FMT;
+ LIST_INIT(&old->send.http.uri_fmt);
+ list_for_each_entry_safe(lf, lfb, &new->send.http.uri_fmt, list) {
+ LIST_DELETE(&lf->list);
+ LIST_APPEND(&old->send.http.uri_fmt, &lf->list);
+ }
+ }
+
+ if (isttest(new->send.http.vsn)) {
+ istfree(&old->send.http.vsn);
+ old->send.http.vsn = new->send.http.vsn;
+ new->send.http.vsn = IST_NULL;
+ }
+
+ if (!LIST_ISEMPTY(&new->send.http.hdrs)) {
+ free_tcpcheck_http_hdrs(&old->send.http.hdrs);
+ list_for_each_entry_safe(hdr, bhdr, &new->send.http.hdrs, list) {
+ LIST_DELETE(&hdr->list);
+ LIST_APPEND(&old->send.http.hdrs, &hdr->list);
+ }
+ }
+
+ if (!(new->send.http.flags & TCPCHK_SND_HTTP_FL_BODY_FMT) && isttest(new->send.http.body)) {
+ if (!(old->send.http.flags & TCPCHK_SND_HTTP_FL_BODY_FMT))
+ istfree(&old->send.http.body);
+ else
+ free_tcpcheck_fmt(&old->send.http.body_fmt);
+ old->send.http.flags &= ~TCPCHK_SND_HTTP_FL_BODY_FMT;
+ old->send.http.body = new->send.http.body;
+ new->send.http.body = IST_NULL;
+ }
+ else if ((new->send.http.flags & TCPCHK_SND_HTTP_FL_BODY_FMT) && !LIST_ISEMPTY(&new->send.http.body_fmt)) {
+ if (!(old->send.http.flags & TCPCHK_SND_HTTP_FL_BODY_FMT))
+ istfree(&old->send.http.body);
+ else
+ free_tcpcheck_fmt(&old->send.http.body_fmt);
+ old->send.http.flags |= TCPCHK_SND_HTTP_FL_BODY_FMT;
+ LIST_INIT(&old->send.http.body_fmt);
+ list_for_each_entry_safe(lf, lfb, &new->send.http.body_fmt, list) {
+ LIST_DELETE(&lf->list);
+ LIST_APPEND(&old->send.http.body_fmt, &lf->list);
+ }
+ }
+}
+
+/* Internal function used to add an http-check rule in a list during the config
+ * parsing step. Depending on its type, and the previously inserted rules, a
+ * specific action may be performed or an error may be reported. This functions
+ * returns 1 on success and 0 on error and <errmsg> is filled with the error
+ * message.
+ */
+int tcpcheck_add_http_rule(struct tcpcheck_rule *chk, struct tcpcheck_rules *rules, char **errmsg)
+{
+ struct tcpcheck_rule *r;
+
+ /* the implicit send rule coming from an "option httpchk" line must be
+ * merged with the first explici http-check send rule, if
+ * any. Depending on the declaration order some tests are required.
+ *
+ * Some tests are also required for other kinds of http-check rules to be
+ * sure the ruleset remains valid.
+ */
+
+ if (chk->action == TCPCHK_ACT_SEND && (chk->send.http.flags & TCPCHK_SND_HTTP_FROM_OPT)) {
+ /* Tries to add an implicit http-check send rule from an "option httpchk" line.
+ * First, the first rule is retrieved, skipping the first CONNECT, if any, and
+ * following tests are performed :
+ *
+ * 1- If there is no such rule or if it is not a send rule, the implicit send
+ * rule is pushed in front of the ruleset
+ *
+ * 2- If it is another implicit send rule, it is replaced with the new one.
+ *
+ * 3- Otherwise, it means it is an explicit send rule. In this case we merge
+ * both, overwriting the old send rule (the explicit one) with info of the
+ * new send rule (the implicit one).
+ */
+ r = get_first_tcpcheck_rule(rules);
+ if (r && r->action == TCPCHK_ACT_CONNECT)
+ r = get_next_tcpcheck_rule(rules, r);
+ if (!r || r->action != TCPCHK_ACT_SEND)
+ LIST_INSERT(rules->list, &chk->list);
+ else if (r->send.http.flags & TCPCHK_SND_HTTP_FROM_OPT) {
+ LIST_DELETE(&r->list);
+ free_tcpcheck(r, 0);
+ LIST_INSERT(rules->list, &chk->list);
+ }
+ else {
+ tcpcheck_overwrite_send_http_rule(r, chk);
+ free_tcpcheck(chk, 0);
+ }
+ }
+ else {
+ /* Tries to add an explicit http-check rule. First of all we check the typefo the
+ * last inserted rule to be sure it is valid. Then for send rule, we try to merge it
+ * with an existing implicit send rule, if any. At the end, if there is no error,
+ * the rule is appended to the list.
+ */
+
+ r = get_last_tcpcheck_rule(rules);
+ if (!r || (r->action == TCPCHK_ACT_SEND && (r->send.http.flags & TCPCHK_SND_HTTP_FROM_OPT)))
+ /* no error */;
+ else if (r->action != TCPCHK_ACT_CONNECT && chk->action == TCPCHK_ACT_SEND) {
+ memprintf(errmsg, "unable to add http-check send rule at step %d (missing connect rule).",
+ chk->index+1);
+ return 0;
+ }
+ else if (r->action != TCPCHK_ACT_SEND && r->action != TCPCHK_ACT_EXPECT && chk->action == TCPCHK_ACT_EXPECT) {
+ memprintf(errmsg, "unable to add http-check expect rule at step %d (missing send rule).",
+ chk->index+1);
+ return 0;
+ }
+ else if (r->action != TCPCHK_ACT_EXPECT && chk->action == TCPCHK_ACT_CONNECT) {
+ memprintf(errmsg, "unable to add http-check connect rule at step %d (missing expect rule).",
+ chk->index+1);
+ return 0;
+ }
+
+ if (chk->action == TCPCHK_ACT_SEND) {
+ r = get_first_tcpcheck_rule(rules);
+ if (r && r->action == TCPCHK_ACT_SEND && (r->send.http.flags & TCPCHK_SND_HTTP_FROM_OPT)) {
+ tcpcheck_overwrite_send_http_rule(r, chk);
+ free_tcpcheck(chk, 0);
+ LIST_DELETE(&r->list);
+ r->send.http.flags &= ~TCPCHK_SND_HTTP_FROM_OPT;
+ chk = r;
+ }
+ }
+ LIST_APPEND(rules->list, &chk->list);
+ }
+ return 1;
+}
+
+/* Check tcp-check health-check configuration for the proxy <px>. */
+static int check_proxy_tcpcheck(struct proxy *px)
+{
+ struct tcpcheck_rule *chk, *back;
+ char *comment = NULL, *errmsg = NULL;
+ enum tcpcheck_rule_type prev_action = TCPCHK_ACT_COMMENT;
+ int ret = ERR_NONE;
+
+ if (!(px->cap & PR_CAP_BE) || (px->options2 & PR_O2_CHK_ANY) != PR_O2_TCPCHK_CHK) {
+ deinit_proxy_tcpcheck(px);
+ goto out;
+ }
+
+ ha_free(&px->check_command);
+ ha_free(&px->check_path);
+
+ if (!px->tcpcheck_rules.list) {
+ ha_alert("proxy '%s' : tcp-check configured but no ruleset defined.\n", px->id);
+ ret |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+
+ /* HTTP ruleset only : */
+ if ((px->tcpcheck_rules.flags & TCPCHK_RULES_PROTO_CHK) == TCPCHK_RULES_HTTP_CHK) {
+ struct tcpcheck_rule *next;
+
+ /* move remaining implicit send rule from "option httpchk" line to the right place.
+ * If such rule exists, it must be the first one. In this case, the rule is moved
+ * after the first connect rule, if any. Otherwise, nothing is done.
+ */
+ chk = get_first_tcpcheck_rule(&px->tcpcheck_rules);
+ if (chk && chk->action == TCPCHK_ACT_SEND && (chk->send.http.flags & TCPCHK_SND_HTTP_FROM_OPT)) {
+ next = get_next_tcpcheck_rule(&px->tcpcheck_rules, chk);
+ if (next && next->action == TCPCHK_ACT_CONNECT) {
+ LIST_DELETE(&chk->list);
+ LIST_INSERT(&next->list, &chk->list);
+ chk->index = next->index + 1;
+ }
+ }
+
+ /* add implicit expect rule if the last one is a send. It is inherited from previous
+ * versions where the http expect rule was optional. Now it is possible to chained
+ * send/expect rules but the last expect may still be implicit.
+ */
+ chk = get_last_tcpcheck_rule(&px->tcpcheck_rules);
+ if (chk && chk->action == TCPCHK_ACT_SEND) {
+ next = parse_tcpcheck_expect((char *[]){"http-check", "expect", "status", "200-399", ""},
+ 1, px, px->tcpcheck_rules.list, TCPCHK_RULES_HTTP_CHK,
+ px->conf.file, px->conf.line, &errmsg);
+ if (!next) {
+ ha_alert("proxy '%s': unable to add implicit http-check expect rule "
+ "(%s).\n", px->id, errmsg);
+ free(errmsg);
+ ret |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ LIST_APPEND(px->tcpcheck_rules.list, &next->list);
+ next->index = chk->index + 1;
+ }
+ }
+
+ /* For all ruleset: */
+
+ /* If there is no connect rule preceding all send / expect rules, an
+ * implicit one is inserted before all others.
+ */
+ chk = get_first_tcpcheck_rule(&px->tcpcheck_rules);
+ if (!chk || chk->action != TCPCHK_ACT_CONNECT) {
+ chk = calloc(1, sizeof(*chk));
+ if (!chk) {
+ ha_alert("proxy '%s': unable to add implicit tcp-check connect rule "
+ "(out of memory).\n", px->id);
+ ret |= ERR_ALERT | ERR_FATAL;
+ goto out;
+ }
+ chk->action = TCPCHK_ACT_CONNECT;
+ chk->connect.options = (TCPCHK_OPT_DEFAULT_CONNECT|TCPCHK_OPT_IMPLICIT);
+ LIST_INSERT(px->tcpcheck_rules.list, &chk->list);
+ }
+
+ /* Remove all comment rules. To do so, when a such rule is found, the
+ * comment is assigned to the following rule(s).
+ */
+ list_for_each_entry_safe(chk, back, px->tcpcheck_rules.list, list) {
+ struct tcpcheck_rule *next;
+
+ if (chk->action != prev_action && prev_action != TCPCHK_ACT_COMMENT)
+ ha_free(&comment);
+
+ prev_action = chk->action;
+ switch (chk->action) {
+ case TCPCHK_ACT_COMMENT:
+ free(comment);
+ comment = chk->comment;
+ LIST_DELETE(&chk->list);
+ free(chk);
+ break;
+ case TCPCHK_ACT_CONNECT:
+ if (!chk->comment && comment)
+ chk->comment = strdup(comment);
+ next = get_next_tcpcheck_rule(&px->tcpcheck_rules, chk);
+ if (next && next->action == TCPCHK_ACT_SEND)
+ chk->connect.options |= TCPCHK_OPT_HAS_DATA;
+ __fallthrough;
+ case TCPCHK_ACT_ACTION_KW:
+ ha_free(&comment);
+ break;
+ case TCPCHK_ACT_SEND:
+ case TCPCHK_ACT_EXPECT:
+ if (!chk->comment && comment)
+ chk->comment = strdup(comment);
+ break;
+ }
+ }
+ ha_free(&comment);
+
+ out:
+ return ret;
+}
+
+void deinit_proxy_tcpcheck(struct proxy *px)
+{
+ free_tcpcheck_vars(&px->tcpcheck_rules.preset_vars);
+ px->tcpcheck_rules.flags = 0;
+ px->tcpcheck_rules.list = NULL;
+}
+
+static void deinit_tcpchecks()
+{
+ struct tcpcheck_ruleset *rs;
+ struct tcpcheck_rule *r, *rb;
+ struct ebpt_node *node, *next;
+
+ node = ebpt_first(&shared_tcpchecks);
+ while (node) {
+ next = ebpt_next(node);
+ ebpt_delete(node);
+ free(node->key);
+ rs = container_of(node, typeof(*rs), node);
+ list_for_each_entry_safe(r, rb, &rs->rules, list) {
+ LIST_DELETE(&r->list);
+ free_tcpcheck(r, 0);
+ }
+ free(rs);
+ node = next;
+ }
+}
+
+int add_tcpcheck_expect_str(struct tcpcheck_rules *rules, const char *str)
+{
+ struct tcpcheck_rule *tcpcheck, *prev_check;
+ struct tcpcheck_expect *expect;
+
+ if ((tcpcheck = pool_zalloc(pool_head_tcpcheck_rule)) == NULL)
+ return 0;
+ tcpcheck->action = TCPCHK_ACT_EXPECT;
+
+ expect = &tcpcheck->expect;
+ expect->type = TCPCHK_EXPECT_STRING;
+ LIST_INIT(&expect->onerror_fmt);
+ LIST_INIT(&expect->onsuccess_fmt);
+ expect->ok_status = HCHK_STATUS_L7OKD;
+ expect->err_status = HCHK_STATUS_L7RSP;
+ expect->tout_status = HCHK_STATUS_L7TOUT;
+ expect->data = ist(strdup(str));
+ if (!isttest(expect->data)) {
+ pool_free(pool_head_tcpcheck_rule, tcpcheck);
+ return 0;
+ }
+
+ /* All tcp-check expect points back to the first inverse expect rule
+ * in a chain of one or more expect rule, potentially itself.
+ */
+ tcpcheck->expect.head = tcpcheck;
+ list_for_each_entry_rev(prev_check, rules->list, list) {
+ if (prev_check->action == TCPCHK_ACT_EXPECT) {
+ if (prev_check->expect.flags & TCPCHK_EXPT_FL_INV)
+ tcpcheck->expect.head = prev_check;
+ continue;
+ }
+ if (prev_check->action != TCPCHK_ACT_COMMENT && prev_check->action != TCPCHK_ACT_ACTION_KW)
+ break;
+ }
+ LIST_APPEND(rules->list, &tcpcheck->list);
+ return 1;
+}
+
+int add_tcpcheck_send_strs(struct tcpcheck_rules *rules, const char * const *strs)
+{
+ struct tcpcheck_rule *tcpcheck;
+ struct tcpcheck_send *send;
+ const char *in;
+ char *dst;
+ int i;
+
+ if ((tcpcheck = pool_zalloc(pool_head_tcpcheck_rule)) == NULL)
+ return 0;
+ tcpcheck->action = TCPCHK_ACT_SEND;
+
+ send = &tcpcheck->send;
+ send->type = TCPCHK_SEND_STRING;
+
+ for (i = 0; strs[i]; i++)
+ send->data.len += strlen(strs[i]);
+
+ send->data.ptr = malloc(istlen(send->data) + 1);
+ if (!isttest(send->data)) {
+ pool_free(pool_head_tcpcheck_rule, tcpcheck);
+ return 0;
+ }
+
+ dst = istptr(send->data);
+ for (i = 0; strs[i]; i++)
+ for (in = strs[i]; (*dst = *in++); dst++);
+ *dst = 0;
+
+ LIST_APPEND(rules->list, &tcpcheck->list);
+ return 1;
+}
+
+/* Parses the "tcp-check" proxy keyword */
+static int proxy_parse_tcpcheck(char **args, int section, struct proxy *curpx,
+ const struct proxy *defpx, const char *file, int line,
+ char **errmsg)
+{
+ struct tcpcheck_ruleset *rs = NULL;
+ struct tcpcheck_rule *chk = NULL;
+ int index, cur_arg, ret = 0;
+
+ if (warnifnotcap(curpx, PR_CAP_BE, file, line, args[0], NULL))
+ ret = 1;
+
+ /* Deduce the ruleset name from the proxy info */
+ chunk_printf(&trash, "*tcp-check-%s_%s-%d",
+ ((curpx == defpx) ? "defaults" : curpx->id),
+ curpx->conf.file, curpx->conf.line);
+
+ rs = find_tcpcheck_ruleset(b_orig(&trash));
+ if (rs == NULL) {
+ rs = create_tcpcheck_ruleset(b_orig(&trash));
+ if (rs == NULL) {
+ memprintf(errmsg, "out of memory.\n");
+ goto error;
+ }
+ }
+
+ index = 0;
+ if (!LIST_ISEMPTY(&rs->rules)) {
+ chk = LIST_PREV(&rs->rules, typeof(chk), list);
+ index = chk->index + 1;
+ chk = NULL;
+ }
+
+ cur_arg = 1;
+ if (strcmp(args[cur_arg], "connect") == 0)
+ chk = parse_tcpcheck_connect(args, cur_arg, curpx, &rs->rules, file, line, errmsg);
+ else if (strcmp(args[cur_arg], "send") == 0 || strcmp(args[cur_arg], "send-binary") == 0 ||
+ strcmp(args[cur_arg], "send-lf") == 0 || strcmp(args[cur_arg], "send-binary-lf") == 0)
+ chk = parse_tcpcheck_send(args, cur_arg, curpx, &rs->rules, file, line, errmsg);
+ else if (strcmp(args[cur_arg], "expect") == 0)
+ chk = parse_tcpcheck_expect(args, cur_arg, curpx, &rs->rules, 0, file, line, errmsg);
+ else if (strcmp(args[cur_arg], "comment") == 0)
+ chk = parse_tcpcheck_comment(args, cur_arg, curpx, &rs->rules, file, line, errmsg);
+ else {
+ struct action_kw *kw = action_kw_tcp_check_lookup(args[cur_arg]);
+
+ if (!kw) {
+ action_kw_tcp_check_build_list(&trash);
+ memprintf(errmsg, "'%s' only supports 'comment', 'connect', 'send', 'send-binary', 'expect'"
+ "%s%s. but got '%s'",
+ args[0], (*trash.area ? ", " : ""), trash.area, args[1]);
+ goto error;
+ }
+ chk = parse_tcpcheck_action(args, cur_arg, curpx, &rs->rules, kw, file, line, errmsg);
+ }
+
+ if (!chk) {
+ memprintf(errmsg, "'%s %s' : %s.", args[0], args[1], *errmsg);
+ goto error;
+ }
+ ret = (ret || (*errmsg != NULL)); /* Handle warning */
+
+ /* No error: add the tcp-check rule in the list */
+ chk->index = index;
+ LIST_APPEND(&rs->rules, &chk->list);
+
+ if ((curpx->options2 & PR_O2_CHK_ANY) == PR_O2_TCPCHK_CHK &&
+ (curpx->tcpcheck_rules.flags & TCPCHK_RULES_PROTO_CHK) == TCPCHK_RULES_TCP_CHK) {
+ /* Use this ruleset if the proxy already has tcp-check enabled */
+ curpx->tcpcheck_rules.list = &rs->rules;
+ curpx->tcpcheck_rules.flags &= ~TCPCHK_RULES_UNUSED_TCP_RS;
+ }
+ else {
+ /* mark this ruleset as unused for now */
+ curpx->tcpcheck_rules.flags |= TCPCHK_RULES_UNUSED_TCP_RS;
+ }
+
+ return ret;
+
+ error:
+ free_tcpcheck(chk, 0);
+ free_tcpcheck_ruleset(rs);
+ return -1;
+}
+
+/* Parses the "http-check" proxy keyword */
+static int proxy_parse_httpcheck(char **args, int section, struct proxy *curpx,
+ const struct proxy *defpx, const char *file, int line,
+ char **errmsg)
+{
+ struct tcpcheck_ruleset *rs = NULL;
+ struct tcpcheck_rule *chk = NULL;
+ int index, cur_arg, ret = 0;
+
+ if (warnifnotcap(curpx, PR_CAP_BE, file, line, args[0], NULL))
+ ret = 1;
+
+ cur_arg = 1;
+ if (strcmp(args[cur_arg], "disable-on-404") == 0) {
+ /* enable a graceful server shutdown on an HTTP 404 response */
+ curpx->options |= PR_O_DISABLE404;
+ if (too_many_args(1, args, errmsg, NULL))
+ goto error;
+ goto out;
+ }
+ else if (strcmp(args[cur_arg], "send-state") == 0) {
+ /* enable emission of the apparent state of a server in HTTP checks */
+ curpx->options2 |= PR_O2_CHK_SNDST;
+ if (too_many_args(1, args, errmsg, NULL))
+ goto error;
+ goto out;
+ }
+
+ /* Deduce the ruleset name from the proxy info */
+ chunk_printf(&trash, "*http-check-%s_%s-%d",
+ ((curpx == defpx) ? "defaults" : curpx->id),
+ curpx->conf.file, curpx->conf.line);
+
+ rs = find_tcpcheck_ruleset(b_orig(&trash));
+ if (rs == NULL) {
+ rs = create_tcpcheck_ruleset(b_orig(&trash));
+ if (rs == NULL) {
+ memprintf(errmsg, "out of memory.\n");
+ goto error;
+ }
+ }
+
+ index = 0;
+ if (!LIST_ISEMPTY(&rs->rules)) {
+ chk = LIST_PREV(&rs->rules, typeof(chk), list);
+ if (chk->action != TCPCHK_ACT_SEND || !(chk->send.http.flags & TCPCHK_SND_HTTP_FROM_OPT))
+ index = chk->index + 1;
+ chk = NULL;
+ }
+
+ if (strcmp(args[cur_arg], "connect") == 0)
+ chk = parse_tcpcheck_connect(args, cur_arg, curpx, &rs->rules, file, line, errmsg);
+ else if (strcmp(args[cur_arg], "send") == 0)
+ chk = parse_tcpcheck_send_http(args, cur_arg, curpx, &rs->rules, file, line, errmsg);
+ else if (strcmp(args[cur_arg], "expect") == 0)
+ chk = parse_tcpcheck_expect(args, cur_arg, curpx, &rs->rules, TCPCHK_RULES_HTTP_CHK,
+ file, line, errmsg);
+ else if (strcmp(args[cur_arg], "comment") == 0)
+ chk = parse_tcpcheck_comment(args, cur_arg, curpx, &rs->rules, file, line, errmsg);
+ else {
+ struct action_kw *kw = action_kw_tcp_check_lookup(args[cur_arg]);
+
+ if (!kw) {
+ action_kw_tcp_check_build_list(&trash);
+ memprintf(errmsg, "'%s' only supports 'disable-on-404', 'send-state', 'comment', 'connect',"
+ " 'send', 'expect'%s%s. but got '%s'",
+ args[0], (*trash.area ? ", " : ""), trash.area, args[1]);
+ goto error;
+ }
+ chk = parse_tcpcheck_action(args, cur_arg, curpx, &rs->rules, kw, file, line, errmsg);
+ }
+
+ if (!chk) {
+ memprintf(errmsg, "'%s %s' : %s.", args[0], args[1], *errmsg);
+ goto error;
+ }
+ ret = (*errmsg != NULL); /* Handle warning */
+
+ chk->index = index;
+ if ((curpx->options2 & PR_O2_CHK_ANY) == PR_O2_TCPCHK_CHK &&
+ (curpx->tcpcheck_rules.flags & TCPCHK_RULES_PROTO_CHK) == TCPCHK_RULES_HTTP_CHK) {
+ /* Use this ruleset if the proxy already has http-check enabled */
+ curpx->tcpcheck_rules.list = &rs->rules;
+ curpx->tcpcheck_rules.flags &= ~TCPCHK_RULES_UNUSED_HTTP_RS;
+ if (!tcpcheck_add_http_rule(chk, &curpx->tcpcheck_rules, errmsg)) {
+ memprintf(errmsg, "'%s %s' : %s.", args[0], args[1], *errmsg);
+ curpx->tcpcheck_rules.list = NULL;
+ goto error;
+ }
+ }
+ else {
+ /* mark this ruleset as unused for now */
+ curpx->tcpcheck_rules.flags |= TCPCHK_RULES_UNUSED_HTTP_RS;
+ LIST_APPEND(&rs->rules, &chk->list);
+ }
+
+ out:
+ return ret;
+
+ error:
+ free_tcpcheck(chk, 0);
+ free_tcpcheck_ruleset(rs);
+ return -1;
+}
+
+/* Parses the "option redis-check" proxy keyword */
+int proxy_parse_redis_check_opt(char **args, int cur_arg, struct proxy *curpx, const struct proxy *defpx,
+ const char *file, int line)
+{
+ static char *redis_req = "*1\r\n$4\r\nPING\r\n";
+ static char *redis_res = "+PONG\r\n";
+
+ struct tcpcheck_ruleset *rs = NULL;
+ struct tcpcheck_rules *rules = &curpx->tcpcheck_rules;
+ struct tcpcheck_rule *chk;
+ char *errmsg = NULL;
+ int err_code = 0;
+
+ if (warnifnotcap(curpx, PR_CAP_BE, file, line, args[cur_arg+1], NULL))
+ err_code |= ERR_WARN;
+
+ if (alertif_too_many_args_idx(0, 1, file, line, args, &err_code))
+ goto out;
+
+ curpx->options2 &= ~PR_O2_CHK_ANY;
+ curpx->options2 |= PR_O2_TCPCHK_CHK;
+
+ free_tcpcheck_vars(&rules->preset_vars);
+ rules->list = NULL;
+ rules->flags = 0;
+
+ rs = find_tcpcheck_ruleset("*redis-check");
+ if (rs)
+ goto ruleset_found;
+
+ rs = create_tcpcheck_ruleset("*redis-check");
+ if (rs == NULL) {
+ ha_alert("parsing [%s:%d] : out of memory.\n", file, line);
+ goto error;
+ }
+
+ chk = parse_tcpcheck_send((char *[]){"tcp-check", "send", redis_req, ""},
+ 1, curpx, &rs->rules, file, line, &errmsg);
+ if (!chk) {
+ ha_alert("parsing [%s:%d] : %s\n", file, line, errmsg);
+ goto error;
+ }
+ chk->index = 0;
+ LIST_APPEND(&rs->rules, &chk->list);
+
+ chk = parse_tcpcheck_expect((char *[]){"tcp-check", "expect", "string", redis_res,
+ "error-status", "L7STS",
+ "on-error", "%[res.payload(0,0),cut_crlf]",
+ "on-success", "Redis server is ok",
+ ""},
+ 1, curpx, &rs->rules, TCPCHK_RULES_REDIS_CHK, file, line, &errmsg);
+ if (!chk) {
+ ha_alert("parsing [%s:%d] : %s\n", file, line, errmsg);
+ goto error;
+ }
+ chk->index = 1;
+ LIST_APPEND(&rs->rules, &chk->list);
+
+ ruleset_found:
+ rules->list = &rs->rules;
+ rules->flags &= ~(TCPCHK_RULES_PROTO_CHK|TCPCHK_RULES_UNUSED_RS);
+ rules->flags |= TCPCHK_RULES_REDIS_CHK;
+
+ out:
+ free(errmsg);
+ return err_code;
+
+ error:
+ free_tcpcheck_ruleset(rs);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+}
+
+
+/* Parses the "option ssl-hello-chk" proxy keyword */
+int proxy_parse_ssl_hello_chk_opt(char **args, int cur_arg, struct proxy *curpx, const struct proxy *defpx,
+ const char *file, int line)
+{
+ /* This is the SSLv3 CLIENT HELLO packet used in conjunction with the
+ * ssl-hello-chk option to ensure that the remote server speaks SSL.
+ *
+ * Check RFC 2246 (TLSv1.0) sections A.3 and A.4 for details.
+ */
+ static char sslv3_client_hello[] = {
+ "16" /* ContentType : 0x16 = Handshake */
+ "0300" /* ProtocolVersion : 0x0300 = SSLv3 */
+ "0079" /* ContentLength : 0x79 bytes after this one */
+ "01" /* HanshakeType : 0x01 = CLIENT HELLO */
+ "000075" /* HandshakeLength : 0x75 bytes after this one */
+ "0300" /* Hello Version : 0x0300 = v3 */
+ "%[date(),htonl,hex]" /* Unix GMT Time (s) : filled with <now> (@0x0B) */
+ "%[str(HAPROXYSSLCHK\nHAPROXYSSLCHK\n),hex]" /* Random : must be exactly 28 bytes */
+ "00" /* Session ID length : empty (no session ID) */
+ "004E" /* Cipher Suite Length : 78 bytes after this one */
+ "0001" "0002" "0003" "0004" /* 39 most common ciphers : */
+ "0005" "0006" "0007" "0008" /* 0x01...0x1B, 0x2F...0x3A */
+ "0009" "000A" "000B" "000C" /* This covers RSA/DH, */
+ "000D" "000E" "000F" "0010" /* various bit lengths, */
+ "0011" "0012" "0013" "0014" /* SHA1/MD5, DES/3DES/AES... */
+ "0015" "0016" "0017" "0018"
+ "0019" "001A" "001B" "002F"
+ "0030" "0031" "0032" "0033"
+ "0034" "0035" "0036" "0037"
+ "0038" "0039" "003A"
+ "01" /* Compression Length : 0x01 = 1 byte for types */
+ "00" /* Compression Type : 0x00 = NULL compression */
+ };
+
+ struct tcpcheck_ruleset *rs = NULL;
+ struct tcpcheck_rules *rules = &curpx->tcpcheck_rules;
+ struct tcpcheck_rule *chk;
+ char *errmsg = NULL;
+ int err_code = 0;
+
+ if (warnifnotcap(curpx, PR_CAP_BE, file, line, args[cur_arg+1], NULL))
+ err_code |= ERR_WARN;
+
+ if (alertif_too_many_args_idx(0, 1, file, line, args, &err_code))
+ goto out;
+
+ curpx->options2 &= ~PR_O2_CHK_ANY;
+ curpx->options2 |= PR_O2_TCPCHK_CHK;
+
+ free_tcpcheck_vars(&rules->preset_vars);
+ rules->list = NULL;
+ rules->flags = 0;
+
+ rs = find_tcpcheck_ruleset("*ssl-hello-check");
+ if (rs)
+ goto ruleset_found;
+
+ rs = create_tcpcheck_ruleset("*ssl-hello-check");
+ if (rs == NULL) {
+ ha_alert("parsing [%s:%d] : out of memory.\n", file, line);
+ goto error;
+ }
+
+ chk = parse_tcpcheck_send((char *[]){"tcp-check", "send-binary-lf", sslv3_client_hello, ""},
+ 1, curpx, &rs->rules, file, line, &errmsg);
+ if (!chk) {
+ ha_alert("parsing [%s:%d] : %s\n", file, line, errmsg);
+ goto error;
+ }
+ chk->index = 0;
+ LIST_APPEND(&rs->rules, &chk->list);
+
+ chk = parse_tcpcheck_expect((char *[]){"tcp-check", "expect", "rbinary", "^1[56]",
+ "min-recv", "5", "ok-status", "L6OK",
+ "error-status", "L6RSP", "tout-status", "L6TOUT",
+ ""},
+ 1, curpx, &rs->rules, TCPCHK_RULES_SSL3_CHK, file, line, &errmsg);
+ if (!chk) {
+ ha_alert("parsing [%s:%d] : %s\n", file, line, errmsg);
+ goto error;
+ }
+ chk->index = 1;
+ LIST_APPEND(&rs->rules, &chk->list);
+
+ ruleset_found:
+ rules->list = &rs->rules;
+ rules->flags &= ~(TCPCHK_RULES_PROTO_CHK|TCPCHK_RULES_UNUSED_RS);
+ rules->flags |= TCPCHK_RULES_SSL3_CHK;
+
+ out:
+ free(errmsg);
+ return err_code;
+
+ error:
+ free_tcpcheck_ruleset(rs);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+}
+
+/* Parses the "option smtpchk" proxy keyword */
+int proxy_parse_smtpchk_opt(char **args, int cur_arg, struct proxy *curpx, const struct proxy *defpx,
+ const char *file, int line)
+{
+ static char *smtp_req = "%[var(check.smtp_cmd)]\r\n";
+
+ struct tcpcheck_ruleset *rs = NULL;
+ struct tcpcheck_rules *rules = &curpx->tcpcheck_rules;
+ struct tcpcheck_rule *chk;
+ struct tcpcheck_var *var = NULL;
+ char *cmd = NULL, *errmsg = NULL;
+ int err_code = 0;
+
+ if (warnifnotcap(curpx, PR_CAP_BE, file, line, args[cur_arg+1], NULL))
+ err_code |= ERR_WARN;
+
+ if (alertif_too_many_args_idx(2, 1, file, line, args, &err_code))
+ goto out;
+
+ curpx->options2 &= ~PR_O2_CHK_ANY;
+ curpx->options2 |= PR_O2_TCPCHK_CHK;
+
+ free_tcpcheck_vars(&rules->preset_vars);
+ rules->list = NULL;
+ rules->flags = 0;
+
+ cur_arg += 2;
+ if (*args[cur_arg] && *args[cur_arg+1] &&
+ (strcmp(args[cur_arg], "EHLO") == 0 || strcmp(args[cur_arg], "HELO") == 0)) {
+ /* <EHLO|HELO> + space (1) + <host> + null byte (1) */
+ size_t len = strlen(args[cur_arg]) + 1 + strlen(args[cur_arg+1]) + 1;
+ cmd = calloc(1, len);
+ if (cmd)
+ snprintf(cmd, len, "%s %s", args[cur_arg], args[cur_arg+1]);
+ }
+ else {
+ /* this just hits the default for now, but you could potentially expand it to allow for other stuff
+ though, it's unlikely you'd want to send anything other than an EHLO or HELO */
+ cmd = strdup("HELO localhost");
+ }
+
+ var = create_tcpcheck_var(ist("check.smtp_cmd"));
+ if (cmd == NULL || var == NULL) {
+ ha_alert("parsing [%s:%d] : out of memory.\n", file, line);
+ goto error;
+ }
+ var->data.type = SMP_T_STR;
+ var->data.u.str.area = cmd;
+ var->data.u.str.data = strlen(cmd);
+ LIST_INIT(&var->list);
+ LIST_APPEND(&rules->preset_vars, &var->list);
+ cmd = NULL;
+ var = NULL;
+
+ rs = find_tcpcheck_ruleset("*smtp-check");
+ if (rs)
+ goto ruleset_found;
+
+ rs = create_tcpcheck_ruleset("*smtp-check");
+ if (rs == NULL) {
+ ha_alert("parsing [%s:%d] : out of memory.\n", file, line);
+ goto error;
+ }
+
+ chk = parse_tcpcheck_connect((char *[]){"tcp-check", "connect", "default", "linger", ""},
+ 1, curpx, &rs->rules, file, line, &errmsg);
+ if (!chk) {
+ ha_alert("parsing [%s:%d] : %s\n", file, line, errmsg);
+ goto error;
+ }
+ chk->index = 0;
+ LIST_APPEND(&rs->rules, &chk->list);
+
+ chk = parse_tcpcheck_expect((char *[]){"tcp-check", "expect", "rstring", "^[0-9]{3}[ \r]",
+ "min-recv", "4",
+ "error-status", "L7RSP",
+ "on-error", "%[res.payload(0,0),cut_crlf]",
+ ""},
+ 1, curpx, &rs->rules, TCPCHK_RULES_SMTP_CHK, file, line, &errmsg);
+ if (!chk) {
+ ha_alert("parsing [%s:%d] : %s\n", file, line, errmsg);
+ goto error;
+ }
+ chk->index = 1;
+ LIST_APPEND(&rs->rules, &chk->list);
+
+ chk = parse_tcpcheck_expect((char *[]){"tcp-check", "expect", "rstring", "^2[0-9]{2}[ \r]",
+ "min-recv", "4",
+ "error-status", "L7STS",
+ "on-error", "%[res.payload(4,0),ltrim(' '),cut_crlf]",
+ "status-code", "res.payload(0,3)",
+ ""},
+ 1, curpx, &rs->rules, TCPCHK_RULES_SMTP_CHK, file, line, &errmsg);
+ if (!chk) {
+ ha_alert("parsing [%s:%d] : %s\n", file, line, errmsg);
+ goto error;
+ }
+ chk->index = 2;
+ LIST_APPEND(&rs->rules, &chk->list);
+
+ chk = parse_tcpcheck_send((char *[]){"tcp-check", "send-lf", smtp_req, ""},
+ 1, curpx, &rs->rules, file, line, &errmsg);
+ if (!chk) {
+ ha_alert("parsing [%s:%d] : %s\n", file, line, errmsg);
+ goto error;
+ }
+ chk->index = 3;
+ LIST_APPEND(&rs->rules, &chk->list);
+
+ chk = parse_tcpcheck_expect((char *[]){"tcp-check", "expect", "rstring", "^(2[0-9]{2}-[^\r]*\r\n)*2[0-9]{2}[ \r]",
+ "error-status", "L7STS",
+ "on-error", "%[res.payload(4,0),ltrim(' '),cut_crlf]",
+ "on-success", "%[res.payload(4,0),ltrim(' '),cut_crlf]",
+ "status-code", "res.payload(0,3)",
+ ""},
+ 1, curpx, &rs->rules, TCPCHK_RULES_SMTP_CHK, file, line, &errmsg);
+ if (!chk) {
+ ha_alert("parsing [%s:%d] : %s\n", file, line, errmsg);
+ goto error;
+ }
+ chk->index = 4;
+ LIST_APPEND(&rs->rules, &chk->list);
+
+ /* Send an SMTP QUIT to ensure clean disconnect (issue 1812), and expect a 2xx response code */
+
+ chk = parse_tcpcheck_send((char *[]){"tcp-check", "send", "QUIT\r\n", ""},
+ 1, curpx, &rs->rules, file, line, &errmsg);
+ if (!chk) {
+ ha_alert("parsing [%s:%d] : %s\n", file, line, errmsg);
+ goto error;
+ }
+ chk->index = 5;
+ LIST_APPEND(&rs->rules, &chk->list);
+
+ chk = parse_tcpcheck_expect((char *[]){"tcp-check", "expect", "rstring", "^2[0-9]{2}[- \r]",
+ "min-recv", "4",
+ "error-status", "L7STS",
+ "on-error", "%[res.payload(4,0),ltrim(' '),cut_crlf]",
+ "on-success", "%[res.payload(4,0),ltrim(' '),cut_crlf]",
+ "status-code", "res.payload(0,3)",
+ ""},
+ 1, curpx, &rs->rules, TCPCHK_RULES_SMTP_CHK, file, line, &errmsg);
+ if (!chk) {
+ ha_alert("parsing [%s:%d] : %s\n", file, line, errmsg);
+ goto error;
+ }
+ chk->index = 6;
+ LIST_APPEND(&rs->rules, &chk->list);
+
+ ruleset_found:
+ rules->list = &rs->rules;
+ rules->flags &= ~(TCPCHK_RULES_PROTO_CHK|TCPCHK_RULES_UNUSED_RS);
+ rules->flags |= TCPCHK_RULES_SMTP_CHK;
+
+ out:
+ free(errmsg);
+ return err_code;
+
+ error:
+ free(cmd);
+ free(var);
+ free_tcpcheck_vars(&rules->preset_vars);
+ free_tcpcheck_ruleset(rs);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+}
+
+/* Parses the "option pgsql-check" proxy keyword */
+int proxy_parse_pgsql_check_opt(char **args, int cur_arg, struct proxy *curpx, const struct proxy *defpx,
+ const char *file, int line)
+{
+ static char pgsql_req[] = {
+ "%[var(check.plen),htonl,hex]" /* The packet length*/
+ "00030000" /* the version 3.0 */
+ "7573657200" /* "user" key */
+ "%[var(check.username),hex]00" /* the username */
+ "00"
+ };
+
+ struct tcpcheck_ruleset *rs = NULL;
+ struct tcpcheck_rules *rules = &curpx->tcpcheck_rules;
+ struct tcpcheck_rule *chk;
+ struct tcpcheck_var *var = NULL;
+ char *user = NULL, *errmsg = NULL;
+ size_t packetlen = 0;
+ int err_code = 0;
+
+ if (warnifnotcap(curpx, PR_CAP_BE, file, line, args[cur_arg+1], NULL))
+ err_code |= ERR_WARN;
+
+ if (alertif_too_many_args_idx(2, 1, file, line, args, &err_code))
+ goto out;
+
+ curpx->options2 &= ~PR_O2_CHK_ANY;
+ curpx->options2 |= PR_O2_TCPCHK_CHK;
+
+ free_tcpcheck_vars(&rules->preset_vars);
+ rules->list = NULL;
+ rules->flags = 0;
+
+ cur_arg += 2;
+ if (!*args[cur_arg] || !*args[cur_arg+1]) {
+ ha_alert("parsing [%s:%d] : '%s %s' expects 'user <username>' as argument.\n",
+ file, line, args[0], args[1]);
+ goto error;
+ }
+ if (strcmp(args[cur_arg], "user") == 0) {
+ packetlen = 15 + strlen(args[cur_arg+1]);
+ user = strdup(args[cur_arg+1]);
+
+ var = create_tcpcheck_var(ist("check.username"));
+ if (user == NULL || var == NULL) {
+ ha_alert("parsing [%s:%d] : out of memory.\n", file, line);
+ goto error;
+ }
+ var->data.type = SMP_T_STR;
+ var->data.u.str.area = user;
+ var->data.u.str.data = strlen(user);
+ LIST_INIT(&var->list);
+ LIST_APPEND(&rules->preset_vars, &var->list);
+ user = NULL;
+ var = NULL;
+
+ var = create_tcpcheck_var(ist("check.plen"));
+ if (var == NULL) {
+ ha_alert("parsing [%s:%d] : out of memory.\n", file, line);
+ goto error;
+ }
+ var->data.type = SMP_T_SINT;
+ var->data.u.sint = packetlen;
+ LIST_INIT(&var->list);
+ LIST_APPEND(&rules->preset_vars, &var->list);
+ var = NULL;
+ }
+ else {
+ ha_alert("parsing [%s:%d] : '%s %s' only supports optional values: 'user'.\n",
+ file, line, args[0], args[1]);
+ goto error;
+ }
+
+ rs = find_tcpcheck_ruleset("*pgsql-check");
+ if (rs)
+ goto ruleset_found;
+
+ rs = create_tcpcheck_ruleset("*pgsql-check");
+ if (rs == NULL) {
+ ha_alert("parsing [%s:%d] : out of memory.\n", file, line);
+ goto error;
+ }
+
+ chk = parse_tcpcheck_connect((char *[]){"tcp-check", "connect", "default", "linger", ""},
+ 1, curpx, &rs->rules, file, line, &errmsg);
+ if (!chk) {
+ ha_alert("parsing [%s:%d] : %s\n", file, line, errmsg);
+ goto error;
+ }
+ chk->index = 0;
+ LIST_APPEND(&rs->rules, &chk->list);
+
+ chk = parse_tcpcheck_send((char *[]){"tcp-check", "send-binary-lf", pgsql_req, ""},
+ 1, curpx, &rs->rules, file, line, &errmsg);
+ if (!chk) {
+ ha_alert("parsing [%s:%d] : %s\n", file, line, errmsg);
+ goto error;
+ }
+ chk->index = 1;
+ LIST_APPEND(&rs->rules, &chk->list);
+
+ chk = parse_tcpcheck_expect((char *[]){"tcp-check", "expect", "!rstring", "^E",
+ "min-recv", "5",
+ "error-status", "L7RSP",
+ "on-error", "%[res.payload(6,0)]",
+ ""},
+ 1, curpx, &rs->rules, TCPCHK_RULES_PGSQL_CHK, file, line, &errmsg);
+ if (!chk) {
+ ha_alert("parsing [%s:%d] : %s\n", file, line, errmsg);
+ goto error;
+ }
+ chk->index = 2;
+ LIST_APPEND(&rs->rules, &chk->list);
+
+ chk = parse_tcpcheck_expect((char *[]){"tcp-check", "expect", "rbinary", "^52000000[A-Z0-9]{2}000000(00|02|03|04|05|06|07|09|0A)",
+ "min-recv", "9",
+ "error-status", "L7STS",
+ "on-success", "PostgreSQL server is ok",
+ "on-error", "PostgreSQL unknown error",
+ ""},
+ 1, curpx, &rs->rules, TCPCHK_RULES_PGSQL_CHK, file, line, &errmsg);
+ if (!chk) {
+ ha_alert("parsing [%s:%d] : %s\n", file, line, errmsg);
+ goto error;
+ }
+ chk->index = 3;
+ LIST_APPEND(&rs->rules, &chk->list);
+
+ ruleset_found:
+ rules->list = &rs->rules;
+ rules->flags &= ~(TCPCHK_RULES_PROTO_CHK|TCPCHK_RULES_UNUSED_RS);
+ rules->flags |= TCPCHK_RULES_PGSQL_CHK;
+
+ out:
+ free(errmsg);
+ return err_code;
+
+ error:
+ free(user);
+ free(var);
+ free_tcpcheck_vars(&rules->preset_vars);
+ free_tcpcheck_ruleset(rs);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+}
+
+
+/* Parses the "option mysql-check" proxy keyword */
+int proxy_parse_mysql_check_opt(char **args, int cur_arg, struct proxy *curpx, const struct proxy *defpx,
+ const char *file, int line)
+{
+ /* This is an example of a MySQL >=4.0 client Authentication packet kindly provided by Cyril Bonte.
+ * const char mysql40_client_auth_pkt[] = {
+ * "\x0e\x00\x00" // packet length
+ * "\x01" // packet number
+ * "\x00\x00" // client capabilities
+ * "\x00\x00\x01" // max packet
+ * "haproxy\x00" // username (null terminated string)
+ * "\x00" // filler (always 0x00)
+ * "\x01\x00\x00" // packet length
+ * "\x00" // packet number
+ * "\x01" // COM_QUIT command
+ * };
+ */
+ static char mysql40_rsname[] = "*mysql40-check";
+ static char mysql40_req[] = {
+ "%[var(check.header),hex]" /* 3 bytes for the packet length and 1 byte for the sequence ID */
+ "0080" /* client capabilities */
+ "000001" /* max packet */
+ "%[var(check.username),hex]00" /* the username */
+ "00" /* filler (always 0x00) */
+ "010000" /* packet length*/
+ "00" /* sequence ID */
+ "01" /* COM_QUIT command */
+ };
+
+ /* This is an example of a MySQL >=4.1 client Authentication packet provided by Nenad Merdanovic.
+ * const char mysql41_client_auth_pkt[] = {
+ * "\x0e\x00\x00\" // packet length
+ * "\x01" // packet number
+ * "\x00\x00\x00\x00" // client capabilities
+ * "\x00\x00\x00\x01" // max packet
+ * "\x21" // character set (UTF-8)
+ * char[23] // All zeroes
+ * "haproxy\x00" // username (null terminated string)
+ * "\x00" // filler (always 0x00)
+ * "\x01\x00\x00" // packet length
+ * "\x00" // packet number
+ * "\x01" // COM_QUIT command
+ * };
+ */
+ static char mysql41_rsname[] = "*mysql41-check";
+ static char mysql41_req[] = {
+ "%[var(check.header),hex]" /* 3 bytes for the packet length and 1 byte for the sequence ID */
+ "00820000" /* client capabilities */
+ "00800001" /* max packet */
+ "21" /* character set (UTF-8) */
+ "000000000000000000000000" /* 23 bytes, al zeroes */
+ "0000000000000000000000"
+ "%[var(check.username),hex]00" /* the username */
+ "00" /* filler (always 0x00) */
+ "010000" /* packet length*/
+ "00" /* sequence ID */
+ "01" /* COM_QUIT command */
+ };
+
+ struct tcpcheck_ruleset *rs = NULL;
+ struct tcpcheck_rules *rules = &curpx->tcpcheck_rules;
+ struct tcpcheck_rule *chk;
+ struct tcpcheck_var *var = NULL;
+ char *mysql_rsname = "*mysql-check";
+ char *mysql_req = NULL, *hdr = NULL, *user = NULL, *errmsg = NULL;
+ int index = 0, err_code = 0;
+
+ if (warnifnotcap(curpx, PR_CAP_BE, file, line, args[cur_arg+1], NULL))
+ err_code |= ERR_WARN;
+
+ if (alertif_too_many_args_idx(3, 1, file, line, args, &err_code))
+ goto out;
+
+ curpx->options2 &= ~PR_O2_CHK_ANY;
+ curpx->options2 |= PR_O2_TCPCHK_CHK;
+
+ free_tcpcheck_vars(&rules->preset_vars);
+ rules->list = NULL;
+ rules->flags = 0;
+
+ cur_arg += 2;
+ if (*args[cur_arg]) {
+ int packetlen, userlen;
+
+ if (strcmp(args[cur_arg], "user") != 0) {
+ ha_alert("parsing [%s:%d] : '%s %s' only supports optional values: 'user' (got '%s').\n",
+ file, line, args[0], args[1], args[cur_arg]);
+ goto error;
+ }
+
+ if (*(args[cur_arg+1]) == 0) {
+ ha_alert("parsing [%s:%d] : '%s %s %s' expects <username> as argument.\n",
+ file, line, args[0], args[1], args[cur_arg]);
+ goto error;
+ }
+
+ hdr = calloc(4, sizeof(*hdr));
+ user = strdup(args[cur_arg+1]);
+ userlen = strlen(args[cur_arg+1]);
+
+ if (hdr == NULL || user == NULL) {
+ ha_alert("parsing [%s:%d] : out of memory.\n", file, line);
+ goto error;
+ }
+
+ if (!*args[cur_arg+2] || strcmp(args[cur_arg+2], "post-41") == 0) {
+ packetlen = userlen + 7 + 27;
+ mysql_req = mysql41_req;
+ mysql_rsname = mysql41_rsname;
+ }
+ else if (strcmp(args[cur_arg+2], "pre-41") == 0) {
+ packetlen = userlen + 7;
+ mysql_req = mysql40_req;
+ mysql_rsname = mysql40_rsname;
+ }
+ else {
+ ha_alert("parsing [%s:%d] : keyword '%s' only supports 'post-41' and 'pre-41' (got '%s').\n",
+ file, line, args[cur_arg], args[cur_arg+2]);
+ goto error;
+ }
+
+ hdr[0] = (unsigned char)(packetlen & 0xff);
+ hdr[1] = (unsigned char)((packetlen >> 8) & 0xff);
+ hdr[2] = (unsigned char)((packetlen >> 16) & 0xff);
+ hdr[3] = 1;
+
+ var = create_tcpcheck_var(ist("check.header"));
+ if (var == NULL) {
+ ha_alert("parsing [%s:%d] : out of memory.\n", file, line);
+ goto error;
+ }
+ var->data.type = SMP_T_STR;
+ var->data.u.str.area = hdr;
+ var->data.u.str.data = 4;
+ LIST_INIT(&var->list);
+ LIST_APPEND(&rules->preset_vars, &var->list);
+ hdr = NULL;
+ var = NULL;
+
+ var = create_tcpcheck_var(ist("check.username"));
+ if (var == NULL) {
+ ha_alert("parsing [%s:%d] : out of memory.\n", file, line);
+ goto error;
+ }
+ var->data.type = SMP_T_STR;
+ var->data.u.str.area = user;
+ var->data.u.str.data = strlen(user);
+ LIST_INIT(&var->list);
+ LIST_APPEND(&rules->preset_vars, &var->list);
+ user = NULL;
+ var = NULL;
+ }
+
+ rs = find_tcpcheck_ruleset(mysql_rsname);
+ if (rs)
+ goto ruleset_found;
+
+ rs = create_tcpcheck_ruleset(mysql_rsname);
+ if (rs == NULL) {
+ ha_alert("parsing [%s:%d] : out of memory.\n", file, line);
+ goto error;
+ }
+
+ chk = parse_tcpcheck_connect((char *[]){"tcp-check", "connect", "default", "linger", ""},
+ 1, curpx, &rs->rules, file, line, &errmsg);
+ if (!chk) {
+ ha_alert("parsing [%s:%d] : %s\n", file, line, errmsg);
+ goto error;
+ }
+ chk->index = index++;
+ LIST_APPEND(&rs->rules, &chk->list);
+
+ if (mysql_req) {
+ chk = parse_tcpcheck_send((char *[]){"tcp-check", "send-binary-lf", mysql_req, ""},
+ 1, curpx, &rs->rules, file, line, &errmsg);
+ if (!chk) {
+ ha_alert("parsing [%s:%d] : %s\n", file, line, errmsg);
+ goto error;
+ }
+ chk->index = index++;
+ LIST_APPEND(&rs->rules, &chk->list);
+ }
+
+ chk = parse_tcpcheck_expect((char *[]){"tcp-check", "expect", "custom", ""},
+ 1, curpx, &rs->rules, TCPCHK_RULES_MYSQL_CHK, file, line, &errmsg);
+ if (!chk) {
+ ha_alert("parsing [%s:%d] : %s\n", file, line, errmsg);
+ goto error;
+ }
+ chk->expect.custom = tcpcheck_mysql_expect_iniths;
+ chk->index = index++;
+ LIST_APPEND(&rs->rules, &chk->list);
+
+ if (mysql_req) {
+ chk = parse_tcpcheck_expect((char *[]){"tcp-check", "expect", "custom", ""},
+ 1, curpx, &rs->rules, TCPCHK_RULES_MYSQL_CHK, file, line, &errmsg);
+ if (!chk) {
+ ha_alert("parsing [%s:%d] : %s\n", file, line, errmsg);
+ goto error;
+ }
+ chk->expect.custom = tcpcheck_mysql_expect_ok;
+ chk->index = index++;
+ LIST_APPEND(&rs->rules, &chk->list);
+ }
+
+ ruleset_found:
+ rules->list = &rs->rules;
+ rules->flags &= ~(TCPCHK_RULES_PROTO_CHK|TCPCHK_RULES_UNUSED_RS);
+ rules->flags |= TCPCHK_RULES_MYSQL_CHK;
+
+ out:
+ free(errmsg);
+ return err_code;
+
+ error:
+ free(hdr);
+ free(user);
+ free(var);
+ free_tcpcheck_vars(&rules->preset_vars);
+ free_tcpcheck_ruleset(rs);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+}
+
+int proxy_parse_ldap_check_opt(char **args, int cur_arg, struct proxy *curpx, const struct proxy *defpx,
+ const char *file, int line)
+{
+ static char *ldap_req = "300C020101600702010304008000";
+
+ struct tcpcheck_ruleset *rs = NULL;
+ struct tcpcheck_rules *rules = &curpx->tcpcheck_rules;
+ struct tcpcheck_rule *chk;
+ char *errmsg = NULL;
+ int err_code = 0;
+
+ if (warnifnotcap(curpx, PR_CAP_BE, file, line, args[cur_arg+1], NULL))
+ err_code |= ERR_WARN;
+
+ if (alertif_too_many_args_idx(0, 1, file, line, args, &err_code))
+ goto out;
+
+ curpx->options2 &= ~PR_O2_CHK_ANY;
+ curpx->options2 |= PR_O2_TCPCHK_CHK;
+
+ free_tcpcheck_vars(&rules->preset_vars);
+ rules->list = NULL;
+ rules->flags = 0;
+
+ rs = find_tcpcheck_ruleset("*ldap-check");
+ if (rs)
+ goto ruleset_found;
+
+ rs = create_tcpcheck_ruleset("*ldap-check");
+ if (rs == NULL) {
+ ha_alert("parsing [%s:%d] : out of memory.\n", file, line);
+ goto error;
+ }
+
+ chk = parse_tcpcheck_send((char *[]){"tcp-check", "send-binary", ldap_req, ""},
+ 1, curpx, &rs->rules, file, line, &errmsg);
+ if (!chk) {
+ ha_alert("parsing [%s:%d] : %s\n", file, line, errmsg);
+ goto error;
+ }
+ chk->index = 0;
+ LIST_APPEND(&rs->rules, &chk->list);
+
+ chk = parse_tcpcheck_expect((char *[]){"tcp-check", "expect", "rbinary", "^30",
+ "min-recv", "14",
+ "on-error", "Not LDAPv3 protocol",
+ ""},
+ 1, curpx, &rs->rules, TCPCHK_RULES_LDAP_CHK, file, line, &errmsg);
+ if (!chk) {
+ ha_alert("parsing [%s:%d] : %s\n", file, line, errmsg);
+ goto error;
+ }
+ chk->index = 1;
+ LIST_APPEND(&rs->rules, &chk->list);
+
+ chk = parse_tcpcheck_expect((char *[]){"tcp-check", "expect", "custom", ""},
+ 1, curpx, &rs->rules, TCPCHK_RULES_LDAP_CHK, file, line, &errmsg);
+ if (!chk) {
+ ha_alert("parsing [%s:%d] : %s\n", file, line, errmsg);
+ goto error;
+ }
+ chk->expect.custom = tcpcheck_ldap_expect_bindrsp;
+ chk->index = 2;
+ LIST_APPEND(&rs->rules, &chk->list);
+
+ ruleset_found:
+ rules->list = &rs->rules;
+ rules->flags &= ~(TCPCHK_RULES_PROTO_CHK|TCPCHK_RULES_UNUSED_RS);
+ rules->flags |= TCPCHK_RULES_LDAP_CHK;
+
+ out:
+ free(errmsg);
+ return err_code;
+
+ error:
+ free_tcpcheck_ruleset(rs);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+}
+
+int proxy_parse_spop_check_opt(char **args, int cur_arg, struct proxy *curpx, const struct proxy *defpx,
+ const char *file, int line)
+{
+ struct tcpcheck_ruleset *rs = NULL;
+ struct tcpcheck_rules *rules = &curpx->tcpcheck_rules;
+ struct tcpcheck_rule *chk;
+ char *spop_req = NULL;
+ char *errmsg = NULL;
+ int spop_len = 0, err_code = 0;
+
+ if (warnifnotcap(curpx, PR_CAP_BE, file, line, args[cur_arg+1], NULL))
+ err_code |= ERR_WARN;
+
+ if (alertif_too_many_args_idx(0, 1, file, line, args, &err_code))
+ goto out;
+
+ curpx->options2 &= ~PR_O2_CHK_ANY;
+ curpx->options2 |= PR_O2_TCPCHK_CHK;
+
+ free_tcpcheck_vars(&rules->preset_vars);
+ rules->list = NULL;
+ rules->flags = 0;
+
+
+ rs = find_tcpcheck_ruleset("*spop-check");
+ if (rs)
+ goto ruleset_found;
+
+ rs = create_tcpcheck_ruleset("*spop-check");
+ if (rs == NULL) {
+ ha_alert("parsing [%s:%d] : out of memory.\n", file, line);
+ goto error;
+ }
+
+ if (spoe_prepare_healthcheck_request(&spop_req, &spop_len) == -1) {
+ ha_alert("parsing [%s:%d] : out of memory.\n", file, line);
+ goto error;
+ }
+ chunk_reset(&trash);
+ dump_binary(&trash, spop_req, spop_len);
+ trash.area[trash.data] = '\0';
+
+ chk = parse_tcpcheck_send((char *[]){"tcp-check", "send-binary", b_head(&trash), ""},
+ 1, curpx, &rs->rules, file, line, &errmsg);
+ if (!chk) {
+ ha_alert("parsing [%s:%d] : %s\n", file, line, errmsg);
+ goto error;
+ }
+ chk->index = 0;
+ LIST_APPEND(&rs->rules, &chk->list);
+
+ chk = parse_tcpcheck_expect((char *[]){"tcp-check", "expect", "custom", "min-recv", "4", ""},
+ 1, curpx, &rs->rules, TCPCHK_RULES_SPOP_CHK, file, line, &errmsg);
+ if (!chk) {
+ ha_alert("parsing [%s:%d] : %s\n", file, line, errmsg);
+ goto error;
+ }
+ chk->expect.custom = tcpcheck_spop_expect_agenthello;
+ chk->index = 1;
+ LIST_APPEND(&rs->rules, &chk->list);
+
+ ruleset_found:
+ rules->list = &rs->rules;
+ rules->flags &= ~(TCPCHK_RULES_PROTO_CHK|TCPCHK_RULES_UNUSED_RS);
+ rules->flags |= TCPCHK_RULES_SPOP_CHK;
+
+ out:
+ free(spop_req);
+ free(errmsg);
+ return err_code;
+
+ error:
+ free_tcpcheck_ruleset(rs);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+}
+
+
+static struct tcpcheck_rule *proxy_parse_httpchk_req(char **args, int cur_arg, struct proxy *px, char **errmsg)
+{
+ struct tcpcheck_rule *chk = NULL;
+ struct tcpcheck_http_hdr *hdr = NULL;
+ char *meth = NULL, *uri = NULL, *vsn = NULL;
+ char *hdrs, *body;
+
+ hdrs = (*args[cur_arg+2] ? strstr(args[cur_arg+2], "\r\n") : NULL);
+ body = (*args[cur_arg+2] ? strstr(args[cur_arg+2], "\r\n\r\n") : NULL);
+ if (hdrs || body) {
+ memprintf(errmsg, "hiding headers or body at the end of the version string is unsupported."
+ "Use 'http-check send' directive instead.");
+ goto error;
+ }
+
+ chk = calloc(1, sizeof(*chk));
+ if (!chk) {
+ memprintf(errmsg, "out of memory");
+ goto error;
+ }
+ chk->action = TCPCHK_ACT_SEND;
+ chk->send.type = TCPCHK_SEND_HTTP;
+ chk->send.http.flags |= TCPCHK_SND_HTTP_FROM_OPT;
+ chk->send.http.meth.meth = HTTP_METH_OPTIONS;
+ LIST_INIT(&chk->send.http.hdrs);
+
+ /* Copy the method, uri and version */
+ if (*args[cur_arg]) {
+ if (!*args[cur_arg+1])
+ uri = args[cur_arg];
+ else
+ meth = args[cur_arg];
+ }
+ if (*args[cur_arg+1])
+ uri = args[cur_arg+1];
+ if (*args[cur_arg+2])
+ vsn = args[cur_arg+2];
+
+ if (meth) {
+ chk->send.http.meth.meth = find_http_meth(meth, strlen(meth));
+ chk->send.http.meth.str.area = strdup(meth);
+ chk->send.http.meth.str.data = strlen(meth);
+ if (!chk->send.http.meth.str.area) {
+ memprintf(errmsg, "out of memory");
+ goto error;
+ }
+ }
+ if (uri) {
+ chk->send.http.uri = ist(strdup(uri));
+ if (!isttest(chk->send.http.uri)) {
+ memprintf(errmsg, "out of memory");
+ goto error;
+ }
+ }
+ if (vsn) {
+ chk->send.http.vsn = ist(strdup(vsn));
+ if (!isttest(chk->send.http.vsn)) {
+ memprintf(errmsg, "out of memory");
+ goto error;
+ }
+ }
+
+ return chk;
+
+ error:
+ free_tcpcheck_http_hdr(hdr);
+ free_tcpcheck(chk, 0);
+ return NULL;
+}
+
+/* Parses the "option httpchck" proxy keyword */
+int proxy_parse_httpchk_opt(char **args, int cur_arg, struct proxy *curpx, const struct proxy *defpx,
+ const char *file, int line)
+{
+ struct tcpcheck_ruleset *rs = NULL;
+ struct tcpcheck_rules *rules = &curpx->tcpcheck_rules;
+ struct tcpcheck_rule *chk;
+ char *errmsg = NULL;
+ int err_code = 0;
+
+ if (warnifnotcap(curpx, PR_CAP_BE, file, line, args[cur_arg+1], NULL))
+ err_code |= ERR_WARN;
+
+ if (alertif_too_many_args_idx(3, 1, file, line, args, &err_code))
+ goto out;
+
+ chk = proxy_parse_httpchk_req(args, cur_arg+2, curpx, &errmsg);
+ if (!chk) {
+ ha_alert("parsing [%s:%d] : '%s %s' : %s.\n", file, line, args[0], args[1], errmsg);
+ goto error;
+ }
+ if (errmsg) {
+ ha_warning("parsing [%s:%d]: '%s %s' : %s\n", file, line, args[0], args[1], errmsg);
+ err_code |= ERR_WARN;
+ ha_free(&errmsg);
+ }
+
+ no_request:
+ curpx->options2 &= ~PR_O2_CHK_ANY;
+ curpx->options2 |= PR_O2_TCPCHK_CHK;
+
+ free_tcpcheck_vars(&rules->preset_vars);
+ rules->list = NULL;
+ rules->flags |= TCPCHK_SND_HTTP_FROM_OPT;
+
+ /* Deduce the ruleset name from the proxy info */
+ chunk_printf(&trash, "*http-check-%s_%s-%d",
+ ((curpx == defpx) ? "defaults" : curpx->id),
+ curpx->conf.file, curpx->conf.line);
+
+ rs = find_tcpcheck_ruleset(b_orig(&trash));
+ if (rs == NULL) {
+ rs = create_tcpcheck_ruleset(b_orig(&trash));
+ if (rs == NULL) {
+ ha_alert("parsing [%s:%d] : out of memory.\n", file, line);
+ goto error;
+ }
+ }
+
+ rules->list = &rs->rules;
+ rules->flags &= ~(TCPCHK_RULES_PROTO_CHK|TCPCHK_RULES_UNUSED_RS);
+ rules->flags |= TCPCHK_RULES_HTTP_CHK;
+ if (!tcpcheck_add_http_rule(chk, rules, &errmsg)) {
+ ha_alert("parsing [%s:%d] : '%s %s' : %s.\n", file, line, args[0], args[1], errmsg);
+ rules->list = NULL;
+ goto error;
+ }
+
+ out:
+ free(errmsg);
+ return err_code;
+
+ error:
+ free_tcpcheck_ruleset(rs);
+ free_tcpcheck(chk, 0);
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+}
+
+/* Parses the "option tcp-check" proxy keyword */
+int proxy_parse_tcp_check_opt(char **args, int cur_arg, struct proxy *curpx, const struct proxy *defpx,
+ const char *file, int line)
+{
+ struct tcpcheck_ruleset *rs = NULL;
+ struct tcpcheck_rules *rules = &curpx->tcpcheck_rules;
+ int err_code = 0;
+
+ if (warnifnotcap(curpx, PR_CAP_BE, file, line, args[cur_arg+1], NULL))
+ err_code |= ERR_WARN;
+
+ if (alertif_too_many_args_idx(0, 1, file, line, args, &err_code))
+ goto out;
+
+ curpx->options2 &= ~PR_O2_CHK_ANY;
+ curpx->options2 |= PR_O2_TCPCHK_CHK;
+
+ if ((rules->flags & TCPCHK_RULES_PROTO_CHK) == TCPCHK_RULES_TCP_CHK) {
+ /* If a tcp-check rulesset is already set, do nothing */
+ if (rules->list)
+ goto out;
+
+ /* If a tcp-check ruleset is waiting to be used for the current proxy,
+ * get it.
+ */
+ if (rules->flags & TCPCHK_RULES_UNUSED_TCP_RS)
+ goto curpx_ruleset;
+
+ /* Otherwise, try to get the tcp-check ruleset of the default proxy */
+ chunk_printf(&trash, "*tcp-check-defaults_%s-%d", defpx->conf.file, defpx->conf.line);
+ rs = find_tcpcheck_ruleset(b_orig(&trash));
+ if (rs)
+ goto ruleset_found;
+ }
+
+ curpx_ruleset:
+ /* Deduce the ruleset name from the proxy info */
+ chunk_printf(&trash, "*tcp-check-%s_%s-%d",
+ ((curpx == defpx) ? "defaults" : curpx->id),
+ curpx->conf.file, curpx->conf.line);
+
+ rs = find_tcpcheck_ruleset(b_orig(&trash));
+ if (rs == NULL) {
+ rs = create_tcpcheck_ruleset(b_orig(&trash));
+ if (rs == NULL) {
+ ha_alert("parsing [%s:%d] : out of memory.\n", file, line);
+ goto error;
+ }
+ }
+
+ ruleset_found:
+ free_tcpcheck_vars(&rules->preset_vars);
+ rules->list = &rs->rules;
+ rules->flags &= ~(TCPCHK_RULES_PROTO_CHK|TCPCHK_RULES_UNUSED_RS);
+ rules->flags |= TCPCHK_RULES_TCP_CHK;
+
+ out:
+ return err_code;
+
+ error:
+ err_code |= ERR_ALERT | ERR_FATAL;
+ goto out;
+}
+
+static struct cfg_kw_list cfg_kws = {ILH, {
+ { CFG_LISTEN, "http-check", proxy_parse_httpcheck },
+ { CFG_LISTEN, "tcp-check", proxy_parse_tcpcheck },
+ { 0, NULL, NULL },
+}};
+
+REGISTER_POST_PROXY_CHECK(check_proxy_tcpcheck);
+REGISTER_PROXY_DEINIT(deinit_proxy_tcpcheck);
+REGISTER_POST_DEINIT(deinit_tcpchecks);
+INITCALL1(STG_REGISTER, cfg_register_keywords, &cfg_kws);
diff --git a/src/thread.c b/src/thread.c
new file mode 100644
index 0000000..ab4342d
--- /dev/null
+++ b/src/thread.c
@@ -0,0 +1,1864 @@
+/*
+ * functions about threads.
+ *
+ * Copyright (C) 2017 Christopher Fauet - cfaulet@haproxy.com
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#define _GNU_SOURCE
+#include <unistd.h>
+#include <stdlib.h>
+
+#include <signal.h>
+#include <unistd.h>
+#ifdef _POSIX_PRIORITY_SCHEDULING
+#include <sched.h>
+#endif
+
+#ifdef USE_THREAD
+# include <pthread.h>
+#endif
+
+#ifdef USE_CPU_AFFINITY
+# include <sched.h>
+# if defined(__FreeBSD__) || defined(__DragonFly__)
+# include <sys/param.h>
+# ifdef __FreeBSD__
+# include <sys/cpuset.h>
+# endif
+# include <pthread_np.h>
+# endif
+# ifdef __APPLE__
+# include <mach/mach_types.h>
+# include <mach/thread_act.h>
+# include <mach/thread_policy.h>
+# endif
+# include <haproxy/cpuset.h>
+#endif
+
+#include <haproxy/cfgparse.h>
+#include <haproxy/clock.h>
+#include <haproxy/fd.h>
+#include <haproxy/global.h>
+#include <haproxy/log.h>
+#include <haproxy/thread.h>
+#include <haproxy/tools.h>
+
+struct tgroup_info ha_tgroup_info[MAX_TGROUPS] = { };
+THREAD_LOCAL const struct tgroup_info *tg = &ha_tgroup_info[0];
+
+struct thread_info ha_thread_info[MAX_THREADS] = { };
+THREAD_LOCAL const struct thread_info *ti = &ha_thread_info[0];
+
+struct tgroup_ctx ha_tgroup_ctx[MAX_TGROUPS] = { };
+THREAD_LOCAL struct tgroup_ctx *tg_ctx = &ha_tgroup_ctx[0];
+
+struct thread_ctx ha_thread_ctx[MAX_THREADS] = { };
+THREAD_LOCAL struct thread_ctx *th_ctx = &ha_thread_ctx[0];
+
+#ifdef USE_THREAD
+
+volatile unsigned long all_tgroups_mask __read_mostly = 1; // nbtgroup 1 assumed by default
+volatile unsigned int rdv_requests = 0; // total number of threads requesting RDV
+volatile unsigned int isolated_thread = ~0; // ID of the isolated thread, or ~0 when none
+THREAD_LOCAL unsigned int tgid = 1; // thread ID starts at 1
+THREAD_LOCAL unsigned int tid = 0;
+int thread_cpus_enabled_at_boot = 1;
+static pthread_t ha_pthread[MAX_THREADS] = { };
+
+/* Marks the thread as harmless until the last thread using the rendez-vous
+ * point quits. Given that we can wait for a long time, sched_yield() is
+ * used when available to offer the CPU resources to competing threads if
+ * needed.
+ */
+void thread_harmless_till_end()
+{
+ _HA_ATOMIC_OR(&tg_ctx->threads_harmless, ti->ltid_bit);
+ while (_HA_ATOMIC_LOAD(&rdv_requests) != 0) {
+ ha_thread_relax();
+ }
+}
+
+/* Isolates the current thread : request the ability to work while all other
+ * threads are harmless, as defined by thread_harmless_now() (i.e. they're not
+ * going to touch any visible memory area). Only returns once all of them are
+ * harmless, with the current thread's bit in &tg_ctx->threads_harmless cleared.
+ * Needs to be completed using thread_release().
+ */
+void thread_isolate()
+{
+ uint tgrp, thr;
+
+ _HA_ATOMIC_OR(&tg_ctx->threads_harmless, ti->ltid_bit);
+ __ha_barrier_atomic_store();
+ _HA_ATOMIC_INC(&rdv_requests);
+
+ /* wait for all threads to become harmless. They cannot change their
+ * mind once seen thanks to rdv_requests above, unless they pass in
+ * front of us. For this reason we proceed in 4 steps:
+ * 1) wait for all threads to declare themselves harmless
+ * 2) try to grab the isolated_thread exclusivity
+ * 3) verify again that all threads are harmless, since another one
+ * that was isolating between 1 and 2 could have dropped its
+ * harmless state there.
+ * 4) drop harmless flag (which also has the benefit of leaving
+ * all other threads wait on reads instead of writes.
+ */
+ while (1) {
+ for (tgrp = 0; tgrp < global.nbtgroups; tgrp++) {
+ do {
+ ulong te = _HA_ATOMIC_LOAD(&ha_tgroup_info[tgrp].threads_enabled);
+ ulong th = _HA_ATOMIC_LOAD(&ha_tgroup_ctx[tgrp].threads_harmless);
+
+ if ((th & te) == te)
+ break;
+ ha_thread_relax();
+ } while (1);
+ }
+
+ /* all other ones are harmless. isolated_thread will contain
+ * ~0U if no other one competes, !=tid if another one got it,
+ * tid if the current thread already grabbed it on the previous
+ * round.
+ */
+ thr = _HA_ATOMIC_LOAD(&isolated_thread);
+ if (thr == tid)
+ break; // we won and we're certain everyone is harmless
+
+ /* try to win the race against others */
+ if (thr != ~0U || !_HA_ATOMIC_CAS(&isolated_thread, &thr, tid))
+ ha_thread_relax();
+ }
+
+ /* the thread is no longer harmless as it runs */
+ _HA_ATOMIC_AND(&tg_ctx->threads_harmless, ~ti->ltid_bit);
+
+ /* the thread is isolated until it calls thread_release() which will
+ * 1) reset isolated_thread to ~0;
+ * 2) decrement rdv_requests.
+ */
+}
+
+/* Isolates the current thread : request the ability to work while all other
+ * threads are idle, as defined by thread_idle_now(). It only returns once
+ * all of them are both harmless and idle, with the current thread's bit in
+ * &tg_ctx->threads_harmless and idle_mask cleared. Needs to be completed using
+ * thread_release(). By doing so the thread also engages in being safe against
+ * any actions that other threads might be about to start under the same
+ * conditions. This specifically targets destruction of any internal structure,
+ * which implies that the current thread may not hold references to any object.
+ *
+ * Note that a concurrent thread_isolate() will usually win against
+ * thread_isolate_full() as it doesn't consider the idle_mask, allowing it to
+ * get back to the poller or any other fully idle location, that will
+ * ultimately release this one.
+ */
+void thread_isolate_full()
+{
+ uint tgrp, thr;
+
+ _HA_ATOMIC_OR(&tg_ctx->threads_idle, ti->ltid_bit);
+ _HA_ATOMIC_OR(&tg_ctx->threads_harmless, ti->ltid_bit);
+ __ha_barrier_atomic_store();
+ _HA_ATOMIC_INC(&rdv_requests);
+
+ /* wait for all threads to become harmless. They cannot change their
+ * mind once seen thanks to rdv_requests above, unless they pass in
+ * front of us. For this reason we proceed in 4 steps:
+ * 1) wait for all threads to declare themselves harmless
+ * 2) try to grab the isolated_thread exclusivity
+ * 3) verify again that all threads are harmless, since another one
+ * that was isolating between 1 and 2 could have dropped its
+ * harmless state there.
+ * 4) drop harmless flag (which also has the benefit of leaving
+ * all other threads wait on reads instead of writes.
+ */
+ while (1) {
+ for (tgrp = 0; tgrp < global.nbtgroups; tgrp++) {
+ do {
+ ulong te = _HA_ATOMIC_LOAD(&ha_tgroup_info[tgrp].threads_enabled);
+ ulong th = _HA_ATOMIC_LOAD(&ha_tgroup_ctx[tgrp].threads_harmless);
+ ulong id = _HA_ATOMIC_LOAD(&ha_tgroup_ctx[tgrp].threads_idle);
+
+ if ((th & id & te) == te)
+ break;
+ ha_thread_relax();
+ } while (1);
+ }
+
+ /* all other ones are harmless and idle. isolated_thread will
+ * contain ~0U if no other one competes, !=tid if another one
+ * got it, tid if the current thread already grabbed it on the
+ * previous round.
+ */
+ thr = _HA_ATOMIC_LOAD(&isolated_thread);
+ if (thr == tid)
+ break; // we won and we're certain everyone is harmless
+
+ if (thr != ~0U || !_HA_ATOMIC_CAS(&isolated_thread, &thr, tid))
+ ha_thread_relax();
+ }
+
+ /* we're not idle nor harmless anymore at this point. Other threads
+ * waiting on this condition will need to wait until out next pass to
+ * the poller, or our next call to thread_isolate_full().
+ */
+ _HA_ATOMIC_AND(&tg_ctx->threads_idle, ~ti->ltid_bit);
+ _HA_ATOMIC_AND(&tg_ctx->threads_harmless, ~ti->ltid_bit);
+
+ /* the thread is isolated until it calls thread_release() which will
+ * 1) reset isolated_thread to ~0;
+ * 2) decrement rdv_requests.
+ */
+}
+
+/* Cancels the effect of thread_isolate() by resetting the ID of the isolated
+ * thread and decrementing the number of RDV requesters. This immediately allows
+ * other threads to expect to be executed, though they will first have to wait
+ * for this thread to become harmless again (possibly by reaching the poller
+ * again).
+ */
+void thread_release()
+{
+ HA_ATOMIC_STORE(&isolated_thread, ~0U);
+ HA_ATOMIC_DEC(&rdv_requests);
+}
+
+/* Sets up threads, signals and masks, and starts threads 2 and above.
+ * Does nothing when threads are disabled.
+ */
+void setup_extra_threads(void *(*handler)(void *))
+{
+ sigset_t blocked_sig, old_sig;
+ int i;
+
+ /* ensure the signals will be blocked in every thread */
+ sigfillset(&blocked_sig);
+ sigdelset(&blocked_sig, SIGPROF);
+ sigdelset(&blocked_sig, SIGBUS);
+ sigdelset(&blocked_sig, SIGFPE);
+ sigdelset(&blocked_sig, SIGILL);
+ sigdelset(&blocked_sig, SIGSEGV);
+ pthread_sigmask(SIG_SETMASK, &blocked_sig, &old_sig);
+
+ /* Create nbthread-1 thread. The first thread is the current process */
+ ha_pthread[0] = pthread_self();
+ for (i = 1; i < global.nbthread; i++)
+ pthread_create(&ha_pthread[i], NULL, handler, &ha_thread_info[i]);
+}
+
+/* waits for all threads to terminate. Does nothing when threads are
+ * disabled.
+ */
+void wait_for_threads_completion()
+{
+ int i;
+
+ /* Wait the end of other threads */
+ for (i = 1; i < global.nbthread; i++)
+ pthread_join(ha_pthread[i], NULL);
+
+#if defined(DEBUG_THREAD) || defined(DEBUG_FULL)
+ show_lock_stats();
+#endif
+}
+
+/* Tries to set the current thread's CPU affinity according to the cpu_map */
+void set_thread_cpu_affinity()
+{
+#if defined(USE_CPU_AFFINITY)
+ /* no affinity setting for the master process */
+ if (master)
+ return;
+
+ /* Now the CPU affinity for all threads */
+ if (ha_cpuset_count(&cpu_map[tgid - 1].thread[ti->ltid])) {/* only do this if the thread has a THREAD map */
+# if defined(__APPLE__)
+ /* Note: this API is limited to the first 32/64 CPUs */
+ unsigned long set = cpu_map[tgid - 1].thread[ti->ltid].cpuset;
+ int j;
+
+ while ((j = ffsl(set)) > 0) {
+ thread_affinity_policy_data_t cpu_set = { j - 1 };
+ thread_port_t mthread;
+
+ mthread = pthread_mach_thread_np(ha_pthread[tid]);
+ thread_policy_set(mthread, THREAD_AFFINITY_POLICY, (thread_policy_t)&cpu_set, 1);
+ set &= ~(1UL << (j - 1));
+ }
+# else
+ struct hap_cpuset *set = &cpu_map[tgid - 1].thread[ti->ltid];
+
+ pthread_setaffinity_np(ha_pthread[tid], sizeof(set->cpuset), &set->cpuset);
+# endif
+ }
+#endif /* USE_CPU_AFFINITY */
+}
+
+/* Retrieves the opaque pthread_t of thread <thr> cast to an unsigned long long
+ * since POSIX took great care of not specifying its representation, making it
+ * hard to export for post-mortem analysis. For this reason we copy it into a
+ * union and will use the smallest scalar type at least as large as its size,
+ * which will keep endianness and alignment for all regular sizes. As a last
+ * resort we end up with a long long ligned to the first bytes in memory, which
+ * will be endian-dependent if pthread_t is larger than a long long (not seen
+ * yet).
+ */
+unsigned long long ha_get_pthread_id(unsigned int thr)
+{
+ union {
+ pthread_t t;
+ unsigned long long ll;
+ unsigned int i;
+ unsigned short s;
+ unsigned char c;
+ } u = { 0 };
+
+ u.t = ha_pthread[thr];
+
+ if (sizeof(u.t) <= sizeof(u.c))
+ return u.c;
+ else if (sizeof(u.t) <= sizeof(u.s))
+ return u.s;
+ else if (sizeof(u.t) <= sizeof(u.i))
+ return u.i;
+ return u.ll;
+}
+
+/* send signal <sig> to thread <thr> */
+void ha_tkill(unsigned int thr, int sig)
+{
+ pthread_kill(ha_pthread[thr], sig);
+}
+
+/* send signal <sig> to all threads. The calling thread is signaled last in
+ * order to allow all threads to synchronize in the handler.
+ */
+void ha_tkillall(int sig)
+{
+ unsigned int thr;
+
+ for (thr = 0; thr < global.nbthread; thr++) {
+ if (!(ha_thread_info[thr].tg->threads_enabled & ha_thread_info[thr].ltid_bit))
+ continue;
+ if (thr == tid)
+ continue;
+ pthread_kill(ha_pthread[thr], sig);
+ }
+ raise(sig);
+}
+
+void ha_thread_relax(void)
+{
+#ifdef _POSIX_PRIORITY_SCHEDULING
+ sched_yield();
+#else
+ pl_cpu_relax();
+#endif
+}
+
+/* these calls are used as callbacks at init time when debugging is on */
+void ha_spin_init(HA_SPINLOCK_T *l)
+{
+ HA_SPIN_INIT(l);
+}
+
+/* these calls are used as callbacks at init time when debugging is on */
+void ha_rwlock_init(HA_RWLOCK_T *l)
+{
+ HA_RWLOCK_INIT(l);
+}
+
+/* returns the number of CPUs the current process is enabled to run on,
+ * regardless of any MAX_THREADS limitation.
+ */
+static int thread_cpus_enabled()
+{
+ int ret = 1;
+
+#ifdef USE_CPU_AFFINITY
+#if defined(__linux__) && defined(CPU_COUNT)
+ cpu_set_t mask;
+
+ if (sched_getaffinity(0, sizeof(mask), &mask) == 0)
+ ret = CPU_COUNT(&mask);
+#elif defined(__FreeBSD__) && defined(USE_CPU_AFFINITY)
+ cpuset_t cpuset;
+ if (cpuset_getaffinity(CPU_LEVEL_CPUSET, CPU_WHICH_PID, -1,
+ sizeof(cpuset), &cpuset) == 0)
+ ret = CPU_COUNT(&cpuset);
+#elif defined(__APPLE__)
+ ret = (int)sysconf(_SC_NPROCESSORS_ONLN);
+#endif
+#endif
+ ret = MAX(ret, 1);
+ return ret;
+}
+
+/* Returns 1 if the cpu set is currently restricted for the process else 0.
+ * Currently only implemented for the Linux platform.
+ */
+int thread_cpu_mask_forced()
+{
+#if defined(__linux__)
+ const int cpus_avail = sysconf(_SC_NPROCESSORS_ONLN);
+ return cpus_avail != thread_cpus_enabled();
+#else
+ return 0;
+#endif
+}
+
+/* Below come the lock-debugging functions */
+
+#if defined(DEBUG_THREAD) || defined(DEBUG_FULL)
+
+struct lock_stat lock_stats[LOCK_LABELS];
+
+/* this is only used below */
+static const char *lock_label(enum lock_label label)
+{
+ switch (label) {
+ case TASK_RQ_LOCK: return "TASK_RQ";
+ case TASK_WQ_LOCK: return "TASK_WQ";
+ case LISTENER_LOCK: return "LISTENER";
+ case PROXY_LOCK: return "PROXY";
+ case SERVER_LOCK: return "SERVER";
+ case LBPRM_LOCK: return "LBPRM";
+ case SIGNALS_LOCK: return "SIGNALS";
+ case STK_TABLE_LOCK: return "STK_TABLE";
+ case STK_SESS_LOCK: return "STK_SESS";
+ case APPLETS_LOCK: return "APPLETS";
+ case PEER_LOCK: return "PEER";
+ case SHCTX_LOCK: return "SHCTX";
+ case SSL_LOCK: return "SSL";
+ case SSL_GEN_CERTS_LOCK: return "SSL_GEN_CERTS";
+ case PATREF_LOCK: return "PATREF";
+ case PATEXP_LOCK: return "PATEXP";
+ case VARS_LOCK: return "VARS";
+ case COMP_POOL_LOCK: return "COMP_POOL";
+ case LUA_LOCK: return "LUA";
+ case NOTIF_LOCK: return "NOTIF";
+ case SPOE_APPLET_LOCK: return "SPOE_APPLET";
+ case DNS_LOCK: return "DNS";
+ case PID_LIST_LOCK: return "PID_LIST";
+ case EMAIL_ALERTS_LOCK: return "EMAIL_ALERTS";
+ case PIPES_LOCK: return "PIPES";
+ case TLSKEYS_REF_LOCK: return "TLSKEYS_REF";
+ case AUTH_LOCK: return "AUTH";
+ case RING_LOCK: return "RING";
+ case DICT_LOCK: return "DICT";
+ case PROTO_LOCK: return "PROTO";
+ case QUEUE_LOCK: return "QUEUE";
+ case CKCH_LOCK: return "CKCH";
+ case SNI_LOCK: return "SNI";
+ case SSL_SERVER_LOCK: return "SSL_SERVER";
+ case SFT_LOCK: return "SFT";
+ case IDLE_CONNS_LOCK: return "IDLE_CONNS";
+ case OCSP_LOCK: return "OCSP";
+ case QC_CID_LOCK: return "QC_CID";
+ case CACHE_LOCK: return "CACHE";
+ case OTHER_LOCK: return "OTHER";
+ case DEBUG1_LOCK: return "DEBUG1";
+ case DEBUG2_LOCK: return "DEBUG2";
+ case DEBUG3_LOCK: return "DEBUG3";
+ case DEBUG4_LOCK: return "DEBUG4";
+ case DEBUG5_LOCK: return "DEBUG5";
+ case LOCK_LABELS: break; /* keep compiler happy */
+ };
+ /* only way to come here is consecutive to an internal bug */
+ abort();
+}
+
+void show_lock_stats()
+{
+ int lbl;
+
+ for (lbl = 0; lbl < LOCK_LABELS; lbl++) {
+ if (!lock_stats[lbl].num_write_locked &&
+ !lock_stats[lbl].num_seek_locked &&
+ !lock_stats[lbl].num_read_locked) {
+ fprintf(stderr,
+ "Stats about Lock %s: not used\n",
+ lock_label(lbl));
+ continue;
+ }
+
+ fprintf(stderr,
+ "Stats about Lock %s: \n",
+ lock_label(lbl));
+
+ if (lock_stats[lbl].num_write_locked)
+ fprintf(stderr,
+ "\t # write lock : %llu\n"
+ "\t # write unlock: %llu (%lld)\n"
+ "\t # wait time for write : %.3f msec\n"
+ "\t # wait time for write/lock: %.3f nsec\n",
+ (ullong)lock_stats[lbl].num_write_locked,
+ (ullong)lock_stats[lbl].num_write_unlocked,
+ (llong)(lock_stats[lbl].num_write_unlocked - lock_stats[lbl].num_write_locked),
+ (double)lock_stats[lbl].nsec_wait_for_write / 1000000.0,
+ lock_stats[lbl].num_write_locked ? ((double)lock_stats[lbl].nsec_wait_for_write / (double)lock_stats[lbl].num_write_locked) : 0);
+
+ if (lock_stats[lbl].num_seek_locked)
+ fprintf(stderr,
+ "\t # seek lock : %llu\n"
+ "\t # seek unlock : %llu (%lld)\n"
+ "\t # wait time for seek : %.3f msec\n"
+ "\t # wait time for seek/lock : %.3f nsec\n",
+ (ullong)lock_stats[lbl].num_seek_locked,
+ (ullong)lock_stats[lbl].num_seek_unlocked,
+ (llong)(lock_stats[lbl].num_seek_unlocked - lock_stats[lbl].num_seek_locked),
+ (double)lock_stats[lbl].nsec_wait_for_seek / 1000000.0,
+ lock_stats[lbl].num_seek_locked ? ((double)lock_stats[lbl].nsec_wait_for_seek / (double)lock_stats[lbl].num_seek_locked) : 0);
+
+ if (lock_stats[lbl].num_read_locked)
+ fprintf(stderr,
+ "\t # read lock : %llu\n"
+ "\t # read unlock : %llu (%lld)\n"
+ "\t # wait time for read : %.3f msec\n"
+ "\t # wait time for read/lock : %.3f nsec\n",
+ (ullong)lock_stats[lbl].num_read_locked,
+ (ullong)lock_stats[lbl].num_read_unlocked,
+ (llong)(lock_stats[lbl].num_read_unlocked - lock_stats[lbl].num_read_locked),
+ (double)lock_stats[lbl].nsec_wait_for_read / 1000000.0,
+ lock_stats[lbl].num_read_locked ? ((double)lock_stats[lbl].nsec_wait_for_read / (double)lock_stats[lbl].num_read_locked) : 0);
+ }
+}
+
+void __ha_rwlock_init(struct ha_rwlock *l)
+{
+ memset(l, 0, sizeof(struct ha_rwlock));
+ __RWLOCK_INIT(&l->lock);
+}
+
+void __ha_rwlock_destroy(struct ha_rwlock *l)
+{
+ __RWLOCK_DESTROY(&l->lock);
+ memset(l, 0, sizeof(struct ha_rwlock));
+}
+
+
+void __ha_rwlock_wrlock(enum lock_label lbl, struct ha_rwlock *l,
+ const char *func, const char *file, int line)
+{
+ ulong tbit = (ti && ti->ltid_bit) ? ti->ltid_bit : 1;
+ struct ha_rwlock_state *st = &l->info.st[tgid-1];
+ uint64_t start_time;
+
+ if ((st->cur_readers | st->cur_seeker | st->cur_writer) & tbit)
+ abort();
+
+ HA_ATOMIC_OR(&st->wait_writers, tbit);
+
+ start_time = now_mono_time();
+ __RWLOCK_WRLOCK(&l->lock);
+ HA_ATOMIC_ADD(&lock_stats[lbl].nsec_wait_for_write, (now_mono_time() - start_time));
+
+ HA_ATOMIC_INC(&lock_stats[lbl].num_write_locked);
+
+ st->cur_writer = tbit;
+ l->info.last_location.function = func;
+ l->info.last_location.file = file;
+ l->info.last_location.line = line;
+
+ HA_ATOMIC_AND(&st->wait_writers, ~tbit);
+}
+
+int __ha_rwlock_trywrlock(enum lock_label lbl, struct ha_rwlock *l,
+ const char *func, const char *file, int line)
+{
+ ulong tbit = (ti && ti->ltid_bit) ? ti->ltid_bit : 1;
+ struct ha_rwlock_state *st = &l->info.st[tgid-1];
+ uint64_t start_time;
+ int r;
+
+ if ((st->cur_readers | st->cur_seeker | st->cur_writer) & tbit)
+ abort();
+
+ /* We set waiting writer because trywrlock could wait for readers to quit */
+ HA_ATOMIC_OR(&st->wait_writers, tbit);
+
+ start_time = now_mono_time();
+ r = __RWLOCK_TRYWRLOCK(&l->lock);
+ HA_ATOMIC_ADD(&lock_stats[lbl].nsec_wait_for_write, (now_mono_time() - start_time));
+ if (unlikely(r)) {
+ HA_ATOMIC_AND(&st->wait_writers, ~tbit);
+ return r;
+ }
+ HA_ATOMIC_INC(&lock_stats[lbl].num_write_locked);
+
+ st->cur_writer = tbit;
+ l->info.last_location.function = func;
+ l->info.last_location.file = file;
+ l->info.last_location.line = line;
+
+ HA_ATOMIC_AND(&st->wait_writers, ~tbit);
+
+ return 0;
+}
+
+void __ha_rwlock_wrunlock(enum lock_label lbl,struct ha_rwlock *l,
+ const char *func, const char *file, int line)
+{
+ ulong tbit = (ti && ti->ltid_bit) ? ti->ltid_bit : 1;
+ struct ha_rwlock_state *st = &l->info.st[tgid-1];
+
+ if (unlikely(!(st->cur_writer & tbit))) {
+ /* the thread is not owning the lock for write */
+ abort();
+ }
+
+ st->cur_writer = 0;
+ l->info.last_location.function = func;
+ l->info.last_location.file = file;
+ l->info.last_location.line = line;
+
+ __RWLOCK_WRUNLOCK(&l->lock);
+
+ HA_ATOMIC_INC(&lock_stats[lbl].num_write_unlocked);
+}
+
+void __ha_rwlock_rdlock(enum lock_label lbl,struct ha_rwlock *l)
+{
+ ulong tbit = (ti && ti->ltid_bit) ? ti->ltid_bit : 1;
+ struct ha_rwlock_state *st = &l->info.st[tgid-1];
+ uint64_t start_time;
+
+ if ((st->cur_readers | st->cur_seeker | st->cur_writer) & tbit)
+ abort();
+
+ HA_ATOMIC_OR(&st->wait_readers, tbit);
+
+ start_time = now_mono_time();
+ __RWLOCK_RDLOCK(&l->lock);
+ HA_ATOMIC_ADD(&lock_stats[lbl].nsec_wait_for_read, (now_mono_time() - start_time));
+ HA_ATOMIC_INC(&lock_stats[lbl].num_read_locked);
+
+ HA_ATOMIC_OR(&st->cur_readers, tbit);
+
+ HA_ATOMIC_AND(&st->wait_readers, ~tbit);
+}
+
+int __ha_rwlock_tryrdlock(enum lock_label lbl,struct ha_rwlock *l)
+{
+ ulong tbit = (ti && ti->ltid_bit) ? ti->ltid_bit : 1;
+ struct ha_rwlock_state *st = &l->info.st[tgid-1];
+ int r;
+
+ if ((st->cur_readers | st->cur_seeker | st->cur_writer) & tbit)
+ abort();
+
+ /* try read should never wait */
+ r = __RWLOCK_TRYRDLOCK(&l->lock);
+ if (unlikely(r))
+ return r;
+ HA_ATOMIC_INC(&lock_stats[lbl].num_read_locked);
+
+ HA_ATOMIC_OR(&st->cur_readers, tbit);
+
+ return 0;
+}
+
+void __ha_rwlock_rdunlock(enum lock_label lbl,struct ha_rwlock *l)
+{
+ ulong tbit = (ti && ti->ltid_bit) ? ti->ltid_bit : 1;
+ struct ha_rwlock_state *st = &l->info.st[tgid-1];
+
+ if (unlikely(!(st->cur_readers & tbit))) {
+ /* the thread is not owning the lock for read */
+ abort();
+ }
+
+ HA_ATOMIC_AND(&st->cur_readers, ~tbit);
+
+ __RWLOCK_RDUNLOCK(&l->lock);
+
+ HA_ATOMIC_INC(&lock_stats[lbl].num_read_unlocked);
+}
+
+void __ha_rwlock_wrtord(enum lock_label lbl, struct ha_rwlock *l,
+ const char *func, const char *file, int line)
+{
+ ulong tbit = (ti && ti->ltid_bit) ? ti->ltid_bit : 1;
+ struct ha_rwlock_state *st = &l->info.st[tgid-1];
+ uint64_t start_time;
+
+ if ((st->cur_readers | st->cur_seeker) & tbit)
+ abort();
+
+ if (!(st->cur_writer & tbit))
+ abort();
+
+ HA_ATOMIC_OR(&st->wait_readers, tbit);
+
+ start_time = now_mono_time();
+ __RWLOCK_WRTORD(&l->lock);
+ HA_ATOMIC_ADD(&lock_stats[lbl].nsec_wait_for_read, (now_mono_time() - start_time));
+
+ HA_ATOMIC_INC(&lock_stats[lbl].num_read_locked);
+
+ HA_ATOMIC_OR(&st->cur_readers, tbit);
+ HA_ATOMIC_AND(&st->cur_writer, ~tbit);
+ l->info.last_location.function = func;
+ l->info.last_location.file = file;
+ l->info.last_location.line = line;
+
+ HA_ATOMIC_AND(&st->wait_readers, ~tbit);
+}
+
+void __ha_rwlock_wrtosk(enum lock_label lbl, struct ha_rwlock *l,
+ const char *func, const char *file, int line)
+{
+ ulong tbit = (ti && ti->ltid_bit) ? ti->ltid_bit : 1;
+ struct ha_rwlock_state *st = &l->info.st[tgid-1];
+ uint64_t start_time;
+
+ if ((st->cur_readers | st->cur_seeker) & tbit)
+ abort();
+
+ if (!(st->cur_writer & tbit))
+ abort();
+
+ HA_ATOMIC_OR(&st->wait_seekers, tbit);
+
+ start_time = now_mono_time();
+ __RWLOCK_WRTOSK(&l->lock);
+ HA_ATOMIC_ADD(&lock_stats[lbl].nsec_wait_for_seek, (now_mono_time() - start_time));
+
+ HA_ATOMIC_INC(&lock_stats[lbl].num_seek_locked);
+
+ HA_ATOMIC_OR(&st->cur_seeker, tbit);
+ HA_ATOMIC_AND(&st->cur_writer, ~tbit);
+ l->info.last_location.function = func;
+ l->info.last_location.file = file;
+ l->info.last_location.line = line;
+
+ HA_ATOMIC_AND(&st->wait_seekers, ~tbit);
+}
+
+void __ha_rwlock_sklock(enum lock_label lbl, struct ha_rwlock *l,
+ const char *func, const char *file, int line)
+{
+ ulong tbit = (ti && ti->ltid_bit) ? ti->ltid_bit : 1;
+ struct ha_rwlock_state *st = &l->info.st[tgid-1];
+ uint64_t start_time;
+
+ if ((st->cur_readers | st->cur_seeker | st->cur_writer) & tbit)
+ abort();
+
+ HA_ATOMIC_OR(&st->wait_seekers, tbit);
+
+ start_time = now_mono_time();
+ __RWLOCK_SKLOCK(&l->lock);
+ HA_ATOMIC_ADD(&lock_stats[lbl].nsec_wait_for_seek, (now_mono_time() - start_time));
+
+ HA_ATOMIC_INC(&lock_stats[lbl].num_seek_locked);
+
+ HA_ATOMIC_OR(&st->cur_seeker, tbit);
+ l->info.last_location.function = func;
+ l->info.last_location.file = file;
+ l->info.last_location.line = line;
+
+ HA_ATOMIC_AND(&st->wait_seekers, ~tbit);
+}
+
+void __ha_rwlock_sktowr(enum lock_label lbl, struct ha_rwlock *l,
+ const char *func, const char *file, int line)
+{
+ ulong tbit = (ti && ti->ltid_bit) ? ti->ltid_bit : 1;
+ struct ha_rwlock_state *st = &l->info.st[tgid-1];
+ uint64_t start_time;
+
+ if ((st->cur_readers | st->cur_writer) & tbit)
+ abort();
+
+ if (!(st->cur_seeker & tbit))
+ abort();
+
+ HA_ATOMIC_OR(&st->wait_writers, tbit);
+
+ start_time = now_mono_time();
+ __RWLOCK_SKTOWR(&l->lock);
+ HA_ATOMIC_ADD(&lock_stats[lbl].nsec_wait_for_write, (now_mono_time() - start_time));
+
+ HA_ATOMIC_INC(&lock_stats[lbl].num_write_locked);
+
+ HA_ATOMIC_OR(&st->cur_writer, tbit);
+ HA_ATOMIC_AND(&st->cur_seeker, ~tbit);
+ l->info.last_location.function = func;
+ l->info.last_location.file = file;
+ l->info.last_location.line = line;
+
+ HA_ATOMIC_AND(&st->wait_writers, ~tbit);
+}
+
+void __ha_rwlock_sktord(enum lock_label lbl, struct ha_rwlock *l,
+ const char *func, const char *file, int line)
+{
+ ulong tbit = (ti && ti->ltid_bit) ? ti->ltid_bit : 1;
+ struct ha_rwlock_state *st = &l->info.st[tgid-1];
+ uint64_t start_time;
+
+ if ((st->cur_readers | st->cur_writer) & tbit)
+ abort();
+
+ if (!(st->cur_seeker & tbit))
+ abort();
+
+ HA_ATOMIC_OR(&st->wait_readers, tbit);
+
+ start_time = now_mono_time();
+ __RWLOCK_SKTORD(&l->lock);
+ HA_ATOMIC_ADD(&lock_stats[lbl].nsec_wait_for_read, (now_mono_time() - start_time));
+
+ HA_ATOMIC_INC(&lock_stats[lbl].num_read_locked);
+
+ HA_ATOMIC_OR(&st->cur_readers, tbit);
+ HA_ATOMIC_AND(&st->cur_seeker, ~tbit);
+ l->info.last_location.function = func;
+ l->info.last_location.file = file;
+ l->info.last_location.line = line;
+
+ HA_ATOMIC_AND(&st->wait_readers, ~tbit);
+}
+
+void __ha_rwlock_skunlock(enum lock_label lbl,struct ha_rwlock *l,
+ const char *func, const char *file, int line)
+{
+ ulong tbit = (ti && ti->ltid_bit) ? ti->ltid_bit : 1;
+ struct ha_rwlock_state *st = &l->info.st[tgid-1];
+ if (!(st->cur_seeker & tbit))
+ abort();
+
+ HA_ATOMIC_AND(&st->cur_seeker, ~tbit);
+ l->info.last_location.function = func;
+ l->info.last_location.file = file;
+ l->info.last_location.line = line;
+
+ __RWLOCK_SKUNLOCK(&l->lock);
+
+ HA_ATOMIC_INC(&lock_stats[lbl].num_seek_unlocked);
+}
+
+int __ha_rwlock_trysklock(enum lock_label lbl, struct ha_rwlock *l,
+ const char *func, const char *file, int line)
+{
+ ulong tbit = (ti && ti->ltid_bit) ? ti->ltid_bit : 1;
+ struct ha_rwlock_state *st = &l->info.st[tgid-1];
+ uint64_t start_time;
+ int r;
+
+ if ((st->cur_readers | st->cur_seeker | st->cur_writer) & tbit)
+ abort();
+
+ HA_ATOMIC_OR(&st->wait_seekers, tbit);
+
+ start_time = now_mono_time();
+ r = __RWLOCK_TRYSKLOCK(&l->lock);
+ HA_ATOMIC_ADD(&lock_stats[lbl].nsec_wait_for_seek, (now_mono_time() - start_time));
+
+ if (likely(!r)) {
+ /* got the lock ! */
+ HA_ATOMIC_INC(&lock_stats[lbl].num_seek_locked);
+ HA_ATOMIC_OR(&st->cur_seeker, tbit);
+ l->info.last_location.function = func;
+ l->info.last_location.file = file;
+ l->info.last_location.line = line;
+ }
+
+ HA_ATOMIC_AND(&st->wait_seekers, ~tbit);
+ return r;
+}
+
+int __ha_rwlock_tryrdtosk(enum lock_label lbl, struct ha_rwlock *l,
+ const char *func, const char *file, int line)
+{
+ ulong tbit = (ti && ti->ltid_bit) ? ti->ltid_bit : 1;
+ struct ha_rwlock_state *st = &l->info.st[tgid-1];
+ uint64_t start_time;
+ int r;
+
+ if ((st->cur_writer | st->cur_seeker) & tbit)
+ abort();
+
+ if (!(st->cur_readers & tbit))
+ abort();
+
+ HA_ATOMIC_OR(&st->wait_seekers, tbit);
+
+ start_time = now_mono_time();
+ r = __RWLOCK_TRYRDTOSK(&l->lock);
+ HA_ATOMIC_ADD(&lock_stats[lbl].nsec_wait_for_seek, (now_mono_time() - start_time));
+
+ if (likely(!r)) {
+ /* got the lock ! */
+ HA_ATOMIC_INC(&lock_stats[lbl].num_seek_locked);
+ HA_ATOMIC_OR(&st->cur_seeker, tbit);
+ HA_ATOMIC_AND(&st->cur_readers, ~tbit);
+ l->info.last_location.function = func;
+ l->info.last_location.file = file;
+ l->info.last_location.line = line;
+ }
+
+ HA_ATOMIC_AND(&st->wait_seekers, ~tbit);
+ return r;
+}
+
+void __spin_init(struct ha_spinlock *l)
+{
+ memset(l, 0, sizeof(struct ha_spinlock));
+ __SPIN_INIT(&l->lock);
+}
+
+void __spin_destroy(struct ha_spinlock *l)
+{
+ __SPIN_DESTROY(&l->lock);
+ memset(l, 0, sizeof(struct ha_spinlock));
+}
+
+void __spin_lock(enum lock_label lbl, struct ha_spinlock *l,
+ const char *func, const char *file, int line)
+{
+ ulong tbit = (ti && ti->ltid_bit) ? ti->ltid_bit : 1;
+ struct ha_spinlock_state *st = &l->info.st[tgid-1];
+ uint64_t start_time;
+
+ if (unlikely(st->owner & tbit)) {
+ /* the thread is already owning the lock */
+ abort();
+ }
+
+ HA_ATOMIC_OR(&st->waiters, tbit);
+
+ start_time = now_mono_time();
+ __SPIN_LOCK(&l->lock);
+ HA_ATOMIC_ADD(&lock_stats[lbl].nsec_wait_for_write, (now_mono_time() - start_time));
+
+ HA_ATOMIC_INC(&lock_stats[lbl].num_write_locked);
+
+
+ st->owner = tbit;
+ l->info.last_location.function = func;
+ l->info.last_location.file = file;
+ l->info.last_location.line = line;
+
+ HA_ATOMIC_AND(&st->waiters, ~tbit);
+}
+
+int __spin_trylock(enum lock_label lbl, struct ha_spinlock *l,
+ const char *func, const char *file, int line)
+{
+ ulong tbit = (ti && ti->ltid_bit) ? ti->ltid_bit : 1;
+ struct ha_spinlock_state *st = &l->info.st[tgid-1];
+ int r;
+
+ if (unlikely(st->owner & tbit)) {
+ /* the thread is already owning the lock */
+ abort();
+ }
+
+ /* try read should never wait */
+ r = __SPIN_TRYLOCK(&l->lock);
+ if (unlikely(r))
+ return r;
+ HA_ATOMIC_INC(&lock_stats[lbl].num_write_locked);
+
+ st->owner = tbit;
+ l->info.last_location.function = func;
+ l->info.last_location.file = file;
+ l->info.last_location.line = line;
+
+ return 0;
+}
+
+void __spin_unlock(enum lock_label lbl, struct ha_spinlock *l,
+ const char *func, const char *file, int line)
+{
+ ulong tbit = (ti && ti->ltid_bit) ? ti->ltid_bit : 1;
+ struct ha_spinlock_state *st = &l->info.st[tgid-1];
+
+ if (unlikely(!(st->owner & tbit))) {
+ /* the thread is not owning the lock */
+ abort();
+ }
+
+ st->owner = 0;
+ l->info.last_location.function = func;
+ l->info.last_location.file = file;
+ l->info.last_location.line = line;
+
+ __SPIN_UNLOCK(&l->lock);
+ HA_ATOMIC_INC(&lock_stats[lbl].num_write_unlocked);
+}
+
+#endif // defined(DEBUG_THREAD) || defined(DEBUG_FULL)
+
+
+#if defined(USE_PTHREAD_EMULATION)
+
+/* pthread rwlock emulation using plocks (to avoid expensive futexes).
+ * these are a direct mapping on Progressive Locks, with the exception that
+ * since there's a common unlock operation in pthreads, we need to know if
+ * we need to unlock for reads or writes, so we set the topmost bit to 1 when
+ * a write lock is acquired to indicate that a write unlock needs to be
+ * performed. It's not a problem since this bit will never be used given that
+ * haproxy won't support as many threads as the plocks.
+ *
+ * The storage is the pthread_rwlock_t cast as an ulong
+ */
+
+int pthread_rwlock_init(pthread_rwlock_t *restrict rwlock, const pthread_rwlockattr_t *restrict attr)
+{
+ ulong *lock = (ulong *)rwlock;
+
+ *lock = 0;
+ return 0;
+}
+
+int pthread_rwlock_destroy(pthread_rwlock_t *rwlock)
+{
+ ulong *lock = (ulong *)rwlock;
+
+ *lock = 0;
+ return 0;
+}
+
+int pthread_rwlock_rdlock(pthread_rwlock_t *rwlock)
+{
+ pl_lorw_rdlock((unsigned long *)rwlock);
+ return 0;
+}
+
+int pthread_rwlock_tryrdlock(pthread_rwlock_t *rwlock)
+{
+ return !!pl_cmpxchg((unsigned long *)rwlock, 0, PLOCK_LORW_SHR_BASE);
+}
+
+int pthread_rwlock_timedrdlock(pthread_rwlock_t *restrict rwlock, const struct timespec *restrict abstime)
+{
+ return pthread_rwlock_tryrdlock(rwlock);
+}
+
+int pthread_rwlock_wrlock(pthread_rwlock_t *rwlock)
+{
+ pl_lorw_wrlock((unsigned long *)rwlock);
+ return 0;
+}
+
+int pthread_rwlock_trywrlock(pthread_rwlock_t *rwlock)
+{
+ return !!pl_cmpxchg((unsigned long *)rwlock, 0, PLOCK_LORW_EXC_BASE);
+}
+
+int pthread_rwlock_timedwrlock(pthread_rwlock_t *restrict rwlock, const struct timespec *restrict abstime)
+{
+ return pthread_rwlock_trywrlock(rwlock);
+}
+
+int pthread_rwlock_unlock(pthread_rwlock_t *rwlock)
+{
+ pl_lorw_unlock((unsigned long *)rwlock);
+ return 0;
+}
+#endif // defined(USE_PTHREAD_EMULATION)
+
+/* Depending on the platform and how libpthread was built, pthread_exit() may
+ * involve some code in libgcc_s that would be loaded on exit for the first
+ * time, causing aborts if the process is chrooted. It's harmless bit very
+ * dirty. There isn't much we can do to make sure libgcc_s is loaded only if
+ * needed, so what we do here is that during early boot we create a dummy
+ * thread that immediately exits. This will lead to libgcc_s being loaded
+ * during boot on the platforms where it's required.
+ */
+static void *dummy_thread_function(void *data)
+{
+ pthread_exit(NULL);
+ return NULL;
+}
+
+static inline void preload_libgcc_s(void)
+{
+ pthread_t dummy_thread;
+ if (pthread_create(&dummy_thread, NULL, dummy_thread_function, NULL) == 0)
+ pthread_join(dummy_thread, NULL);
+}
+
+static void __thread_init(void)
+{
+ char *ptr = NULL;
+
+ preload_libgcc_s();
+
+ thread_cpus_enabled_at_boot = thread_cpus_enabled();
+ thread_cpus_enabled_at_boot = MIN(thread_cpus_enabled_at_boot, MAX_THREADS);
+
+ memprintf(&ptr, "Built with multi-threading support (MAX_TGROUPS=%d, MAX_THREADS=%d, default=%d).",
+ MAX_TGROUPS, MAX_THREADS, thread_cpus_enabled_at_boot);
+ hap_register_build_opts(ptr, 1);
+
+#if defined(DEBUG_THREAD) || defined(DEBUG_FULL)
+ memset(lock_stats, 0, sizeof(lock_stats));
+#endif
+}
+INITCALL0(STG_PREPARE, __thread_init);
+
+#else
+
+/* send signal <sig> to thread <thr> (send to process in fact) */
+void ha_tkill(unsigned int thr, int sig)
+{
+ raise(sig);
+}
+
+/* send signal <sig> to all threads (send to process in fact) */
+void ha_tkillall(int sig)
+{
+ raise(sig);
+}
+
+void ha_thread_relax(void)
+{
+#ifdef _POSIX_PRIORITY_SCHEDULING
+ sched_yield();
+#endif
+}
+
+REGISTER_BUILD_OPTS("Built without multi-threading support (USE_THREAD not set).");
+
+#endif // USE_THREAD
+
+
+/* Returns non-zero on anomaly (bound vs unbound), and emits a warning in this
+ * case.
+ */
+int thread_detect_binding_discrepancies(void)
+{
+#if defined(USE_CPU_AFFINITY)
+ uint th, tg, id;
+ uint tot_b = 0, tot_u = 0;
+ int first_b = -1;
+ int first_u = -1;
+
+ for (th = 0; th < global.nbthread; th++) {
+ tg = ha_thread_info[th].tgid;
+ id = ha_thread_info[th].ltid;
+
+ if (ha_cpuset_count(&cpu_map[tg - 1].thread[id]) == 0) {
+ tot_u++;
+ if (first_u < 0)
+ first_u = th;
+ } else {
+ tot_b++;
+ if (first_b < 0)
+ first_b = th;
+ }
+ }
+
+ if (tot_u > 0 && tot_b > 0) {
+ ha_warning("Found %u thread(s) mapped to a CPU and %u thread(s) not mapped to any CPU. "
+ "This will result in some threads being randomly assigned to the same CPU, "
+ "which will occasionally cause severe performance degradation. First thread "
+ "bound is %d and first thread not bound is %d. Please either bind all threads "
+ "or none (maybe some cpu-map directives are missing?).\n",
+ tot_b, tot_u, first_b, first_u);
+ return 1;
+ }
+#endif
+ return 0;
+}
+
+/* Returns non-zero on anomaly (more threads than CPUs), and emits a warning in
+ * this case. It checks against configured cpu-map if any, otherwise against
+ * the number of CPUs at boot if known. It's better to run it only after
+ * thread_detect_binding_discrepancies() so that mixed cases can be eliminated.
+ */
+int thread_detect_more_than_cpus(void)
+{
+#if defined(USE_CPU_AFFINITY)
+ struct hap_cpuset cpuset_map, cpuset_boot, cpuset_all;
+ uint th, tg, id;
+ int bound;
+ int tot_map, tot_all;
+
+ ha_cpuset_zero(&cpuset_boot);
+ ha_cpuset_zero(&cpuset_map);
+ ha_cpuset_zero(&cpuset_all);
+ bound = 0;
+ for (th = 0; th < global.nbthread; th++) {
+ tg = ha_thread_info[th].tgid;
+ id = ha_thread_info[th].ltid;
+ if (ha_cpuset_count(&cpu_map[tg - 1].thread[id])) {
+ ha_cpuset_or(&cpuset_map, &cpu_map[tg - 1].thread[id]);
+ bound++;
+ }
+ }
+
+ ha_cpuset_assign(&cpuset_all, &cpuset_map);
+ if (bound != global.nbthread) {
+ if (ha_cpuset_detect_bound(&cpuset_boot))
+ ha_cpuset_or(&cpuset_all, &cpuset_boot);
+ }
+
+ tot_map = ha_cpuset_count(&cpuset_map);
+ tot_all = ha_cpuset_count(&cpuset_all);
+
+ if (tot_map && bound > tot_map) {
+ ha_warning("This configuration binds %d threads to a total of %d CPUs via cpu-map "
+ "directives. This means that some threads will compete for the same CPU, "
+ "which will cause severe performance degradation. Please fix either the "
+ "'cpu-map' directives or set the global 'nbthread' value accordingly.\n",
+ bound, tot_map);
+ return 1;
+ }
+ else if (tot_all && global.nbthread > tot_all) {
+ ha_warning("This configuration enables %d threads running on a total of %d CPUs. "
+ "This means that some threads will compete for the same CPU, which will cause "
+ "severe performance degradation. Please either the 'cpu-map' directives to "
+ "adjust the CPUs to use, or fix the global 'nbthread' value.\n",
+ global.nbthread, tot_all);
+ return 1;
+ }
+#endif
+ return 0;
+}
+
+
+/* scans the configured thread mapping and establishes the final one. Returns <0
+ * on failure, >=0 on success.
+ */
+int thread_map_to_groups()
+{
+ int t, g, ut, ug;
+ int q, r;
+ ulong m __maybe_unused;
+
+ ut = ug = 0; // unassigned threads & groups
+
+ for (t = 0; t < global.nbthread; t++) {
+ if (!ha_thread_info[t].tg)
+ ut++;
+ }
+
+ for (g = 0; g < global.nbtgroups; g++) {
+ if (!ha_tgroup_info[g].count)
+ ug++;
+ ha_tgroup_info[g].tgid_bit = 1UL << g;
+ }
+
+ if (ug > ut) {
+ ha_alert("More unassigned thread-groups (%d) than threads (%d). Please reduce thread-groups\n", ug, ut);
+ return -1;
+ }
+
+ /* look for first unassigned thread */
+ for (t = 0; t < global.nbthread && ha_thread_info[t].tg; t++)
+ ;
+
+ /* assign threads to empty groups */
+ for (g = 0; ug && ut; ) {
+ /* due to sparse thread assignment we can end up with more threads
+ * per group on last assigned groups than former ones, so we must
+ * always try to pack the maximum remaining ones together first.
+ */
+ q = ut / ug;
+ r = ut % ug;
+ if ((q + !!r) > MAX_THREADS_PER_GROUP) {
+ ha_alert("Too many remaining unassigned threads (%d) for thread groups (%d). Please increase thread-groups or make sure to keep thread numbers contiguous\n", ut, ug);
+ return -1;
+ }
+
+ /* thread <t> is the next unassigned one. Let's look for next
+ * unassigned group, we know there are some left
+ */
+ while (ut >= ug && ha_tgroup_info[g].count)
+ g++;
+
+ /* group g is unassigned, try to fill it with consecutive threads */
+ while (ut && ut >= ug && ha_tgroup_info[g].count < q + !!r &&
+ (!ha_tgroup_info[g].count || t == ha_tgroup_info[g].base + ha_tgroup_info[g].count)) {
+
+ if (!ha_tgroup_info[g].count) {
+ /* assign new group */
+ ha_tgroup_info[g].base = t;
+ ug--;
+ }
+
+ ha_tgroup_info[g].count++;
+ ha_thread_info[t].tgid = g + 1;
+ ha_thread_info[t].tg = &ha_tgroup_info[g];
+ ha_thread_info[t].tg_ctx = &ha_tgroup_ctx[g];
+
+ ut--;
+ /* switch to next unassigned thread */
+ while (++t < global.nbthread && ha_thread_info[t].tg)
+ ;
+ }
+ }
+
+ if (ut) {
+ ha_alert("Remaining unassigned threads found (%d) because all groups are in use. Please increase 'thread-groups', reduce 'nbthreads' or remove or extend 'thread-group' enumerations.\n", ut);
+ return -1;
+ }
+
+ for (t = 0; t < global.nbthread; t++) {
+ ha_thread_info[t].tid = t;
+ ha_thread_info[t].ltid = t - ha_thread_info[t].tg->base;
+ ha_thread_info[t].ltid_bit = 1UL << ha_thread_info[t].ltid;
+ }
+
+ m = 0;
+ for (g = 0; g < global.nbtgroups; g++) {
+ ha_tgroup_info[g].threads_enabled = nbits(ha_tgroup_info[g].count);
+ /* for now, additional threads are not started, so we should
+ * consider them as harmless and idle.
+ * This will get automatically updated when such threads are
+ * started in run_thread_poll_loop()
+ * Without this, thread_isolate() and thread_isolate_full()
+ * will fail to work as long as secondary threads did not enter
+ * the polling loop at least once.
+ */
+ ha_tgroup_ctx[g].threads_harmless = ha_tgroup_info[g].threads_enabled;
+ ha_tgroup_ctx[g].threads_idle = ha_tgroup_info[g].threads_enabled;
+ if (!ha_tgroup_info[g].count)
+ continue;
+ m |= 1UL << g;
+
+ }
+
+#ifdef USE_THREAD
+ all_tgroups_mask = m;
+#endif
+ return 0;
+}
+
+/* Converts a configuration thread set based on either absolute or relative
+ * thread numbers into a global group+mask. This is essentially for use with
+ * the "thread" directive on "bind" lines, where "thread 4-6,10-12" might be
+ * turned to "2/1-3,4/1-3". It cannot be used before the thread mapping above
+ * was completed and the thread group numbers configured. The thread_set is
+ * replaced by the resolved group-based one. It is possible to force a single
+ * default group for unspecified sets instead of enabling all groups by passing
+ * this group's non-zero value to defgrp.
+ *
+ * Returns <0 on failure, >=0 on success.
+ */
+int thread_resolve_group_mask(struct thread_set *ts, int defgrp, char **err)
+{
+ struct thread_set new_ts = { };
+ ulong mask, imask;
+ uint g;
+
+ if (!ts->grps) {
+ /* unspecified group, IDs are global */
+ if (thread_set_is_empty(ts)) {
+ /* all threads of all groups, unless defgrp is set and
+ * we then set it as the only group.
+ */
+ for (g = defgrp ? defgrp-1 : 0; g < (defgrp ? defgrp : global.nbtgroups); g++) {
+ new_ts.rel[g] = ha_tgroup_info[g].threads_enabled;
+ if (new_ts.rel[g])
+ new_ts.grps |= 1UL << g;
+ }
+ } else {
+ /* some absolute threads are set, we must remap them to
+ * relative ones. Each group cannot have more than
+ * LONGBITS threads, thus it spans at most two absolute
+ * blocks.
+ */
+ for (g = 0; g < global.nbtgroups; g++) {
+ uint block = ha_tgroup_info[g].base / LONGBITS;
+ uint base = ha_tgroup_info[g].base % LONGBITS;
+
+ mask = ts->abs[block] >> base;
+ if (base &&
+ (block + 1) < sizeof(ts->abs) / sizeof(ts->abs[0]) &&
+ ha_tgroup_info[g].count > (LONGBITS - base))
+ mask |= ts->abs[block + 1] << (LONGBITS - base);
+ mask &= nbits(ha_tgroup_info[g].count);
+ mask &= ha_tgroup_info[g].threads_enabled;
+
+ /* now the mask exactly matches the threads to be enabled
+ * in this group.
+ */
+ new_ts.rel[g] |= mask;
+ if (new_ts.rel[g])
+ new_ts.grps |= 1UL << g;
+ }
+ }
+ } else {
+ /* groups were specified */
+ for (g = 0; g < MAX_TGROUPS; g++) {
+ imask = ts->rel[g];
+ if (!imask)
+ continue;
+
+ if (g >= global.nbtgroups) {
+ memprintf(err, "'thread' directive references non-existing thread group %u", g+1);
+ return -1;
+ }
+
+ /* some relative threads are set. Keep only existing ones for this group */
+ mask = nbits(ha_tgroup_info[g].count);
+
+ if (!(mask & imask)) {
+ /* no intersection between the thread group's
+ * threads and the bind line's.
+ */
+#ifdef THREAD_AUTO_ADJUST_GROUPS
+ unsigned long new_mask = 0;
+
+ while (imask) {
+ new_mask |= imask & mask;
+ imask >>= ha_tgroup_info[g].count;
+ }
+ imask = new_mask;
+#else
+ memprintf(err, "'thread' directive only references threads not belonging to group %u", g+1);
+ return -1;
+#endif
+ }
+
+ new_ts.rel[g] = imask & mask;
+ if (new_ts.rel[g])
+ new_ts.grps |= 1UL << g;
+ }
+ }
+
+ /* update the thread_set */
+ if (!thread_set_nth_group(&new_ts, 0)) {
+ memprintf(err, "'thread' directive only references non-existing threads");
+ return -1;
+ }
+
+ *ts = new_ts;
+ return 0;
+}
+
+/* Parse a string representing a thread set in one of the following forms:
+ *
+ * - { "all" | "odd" | "even" | <abs_num> [ "-" <abs_num> ] }[,...]
+ * => these are (lists of) absolute thread numbers
+ *
+ * - <tgnum> "/" { "all" | "odd" | "even" | <rel_num> [ "-" <rel_num> ][,...]
+ * => these are (lists of) per-group relative thread numbers. All numbers
+ * must be lower than or equal to LONGBITS. When multiple list elements
+ * are provided, each of them must contain the thread group number.
+ *
+ * Minimum value for a thread or group number is always 1. Maximum value for an
+ * absolute thread number is MAX_THREADS, maximum value for a relative thread
+ * number is MAX_THREADS_PER_GROUP, an maximum value for a thread group is
+ * MAX_TGROUPS. "all", "even" and "odd" will be bound by MAX_THREADS and/or
+ * MAX_THREADS_PER_GROUP in any case. In ranges, a missing digit before "-"
+ * is implicitly 1, and a missing digit after "-" is implicitly the highest of
+ * its class. As such "-" is equivalent to "all", allowing to build strings
+ * such as "${MIN}-${MAX}" where both MIN and MAX are optional.
+ *
+ * It is not valid to mix absolute and relative numbers. As such:
+ * - all valid (all absolute threads)
+ * - 12-19,24-31 valid (abs threads 12 to 19 and 24 to 31)
+ * - 1/all valid (all 32 or 64 threads of group 1)
+ * - 1/1-4,1/8-10,2/1 valid
+ * - 1/1-4,8-10 invalid (mixes relatve "1/1-4" with absolute "8-10")
+ * - 1-4,8-10,2/1 invalid (mixes absolute "1-4,8-10" with relative "2/1")
+ * - 1/odd-4 invalid (mixes range with boundary)
+ *
+ * The target thread set is *completed* with supported threads, which means
+ * that it's the caller's responsibility for pre-initializing it. If the target
+ * thread set is NULL, it's not updated and the function only verifies that the
+ * input parses.
+ *
+ * On success, it returns 0. otherwise it returns non-zero with an error
+ * message in <err>.
+ */
+int parse_thread_set(const char *arg, struct thread_set *ts, char **err)
+{
+ const char *set;
+ const char *sep;
+ int v, min, max, tg;
+ int is_rel;
+
+ /* search for the first delimiter (',', '-' or '/') to decide whether
+ * we're facing an absolute or relative form. The relative form always
+ * starts with a number followed by a slash.
+ */
+ for (sep = arg; isdigit((uchar)*sep); sep++)
+ ;
+
+ is_rel = (/*sep > arg &&*/ *sep == '/'); /* relative form */
+
+ /* from there we have to cut the thread spec around commas */
+
+ set = arg;
+ tg = 0;
+ while (*set) {
+ /* note: we can't use strtol() here because "-3" would parse as
+ * (-3) while we want to stop before the "-", so we find the
+ * separator ourselves and rely on atoi() whose value we may
+ * ignore depending where the separator is.
+ */
+ for (sep = set; isdigit((uchar)*sep); sep++)
+ ;
+
+ if (sep != set && *sep && *sep != '/' && *sep != '-' && *sep != ',') {
+ memprintf(err, "invalid character '%c' in thread set specification: '%s'.", *sep, set);
+ return -1;
+ }
+
+ v = (sep != set) ? atoi(set) : 0;
+
+ /* Now we know that the string is made of an optional series of digits
+ * optionally followed by one of the delimiters above, or that it
+ * starts with a different character.
+ */
+
+ /* first, let's search for the thread group (digits before '/') */
+
+ if (tg || !is_rel) {
+ /* thread group already specified or not expected if absolute spec */
+ if (*sep == '/') {
+ if (tg)
+ memprintf(err, "redundant thread group specification '%s' for group %d", set, tg);
+ else
+ memprintf(err, "group-relative thread specification '%s' is not permitted after a absolute thread range.", set);
+ return -1;
+ }
+ } else {
+ /* this is a group-relative spec, first field is the group number */
+ if (sep == set && *sep == '/') {
+ memprintf(err, "thread group number expected before '%s'.", set);
+ return -1;
+ }
+
+ if (*sep != '/') {
+ memprintf(err, "absolute thread specification '%s' is not permitted after a group-relative thread range.", set);
+ return -1;
+ }
+
+ if (v < 1 || v > MAX_TGROUPS) {
+ memprintf(err, "invalid thread group number '%d', permitted range is 1..%d in '%s'.", v, MAX_TGROUPS, set);
+ return -1;
+ }
+
+ tg = v;
+
+ /* skip group number and go on with set,sep,v as if
+ * there was no group number.
+ */
+ set = sep + 1;
+ continue;
+ }
+
+ /* Now 'set' starts at the min thread number, whose value is in v if any,
+ * and preset the max to it, unless the range is filled at once via "all"
+ * (stored as 1:0), "odd" (stored as) 1:-1, or "even" (stored as 1:-2).
+ * 'sep' points to the next non-digit which may be set itself e.g. for
+ * "all" etc or "-xx".
+ */
+
+ if (!*set) {
+ /* empty set sets no restriction */
+ min = 1;
+ max = is_rel ? MAX_THREADS_PER_GROUP : MAX_THREADS;
+ }
+ else {
+ if (sep != set && *sep && *sep != '-' && *sep != ',') {
+ // Only delimiters are permitted around digits.
+ memprintf(err, "invalid character '%c' in thread set specification: '%s'.", *sep, set);
+ return -1;
+ }
+
+ /* for non-digits, find next delim */
+ for (; *sep && *sep != '-' && *sep != ','; sep++)
+ ;
+
+ min = max = 1;
+ if (sep != set) {
+ /* non-empty first thread */
+ if (isteq(ist2(set, sep-set), ist("all")))
+ max = 0;
+ else if (isteq(ist2(set, sep-set), ist("odd")))
+ max = -1;
+ else if (isteq(ist2(set, sep-set), ist("even")))
+ max = -2;
+ else if (v)
+ min = max = v;
+ else
+ max = min = 0; // throw an error below
+ }
+
+ if (min < 1 || min > MAX_THREADS || (is_rel && min > MAX_THREADS_PER_GROUP)) {
+ memprintf(err, "invalid first thread number '%s', permitted range is 1..%d, or 'all', 'odd', 'even'.",
+ set, is_rel ? MAX_THREADS_PER_GROUP : MAX_THREADS);
+ return -1;
+ }
+
+ /* is this a range ? */
+ if (*sep == '-') {
+ if (min != max) {
+ memprintf(err, "extraneous range after 'all', 'odd' or 'even': '%s'.", set);
+ return -1;
+ }
+
+ /* this is a seemingly valid range, there may be another number */
+ for (set = ++sep; isdigit((uchar)*sep); sep++)
+ ;
+ v = atoi(set);
+
+ if (sep == set) { // no digit: to the max
+ max = is_rel ? MAX_THREADS_PER_GROUP : MAX_THREADS;
+ if (*sep && *sep != ',')
+ max = 0; // throw an error below
+ } else
+ max = v;
+
+ if (max < 1 || max > MAX_THREADS || (is_rel && max > MAX_THREADS_PER_GROUP)) {
+ memprintf(err, "invalid last thread number '%s', permitted range is 1..%d.",
+ set, is_rel ? MAX_THREADS_PER_GROUP : MAX_THREADS);
+ return -1;
+ }
+ }
+
+ /* here sep points to the first non-digit after the thread spec,
+ * must be a valid delimiter.
+ */
+ if (*sep && *sep != ',') {
+ memprintf(err, "invalid character '%c' after thread set specification: '%s'.", *sep, set);
+ return -1;
+ }
+ }
+
+ /* store values */
+ if (ts) {
+ if (is_rel) {
+ /* group-relative thread numbers */
+ ts->grps |= 1UL << (tg - 1);
+
+ if (max >= min) {
+ for (v = min; v <= max; v++)
+ ts->rel[tg - 1] |= 1UL << (v - 1);
+ } else {
+ memset(&ts->rel[tg - 1],
+ (max == 0) ? 0xff /* all */ : (max == -1) ? 0x55 /* odd */: 0xaa /* even */,
+ sizeof(ts->rel[tg - 1]));
+ }
+ } else {
+ /* absolute thread numbers */
+ if (max >= min) {
+ for (v = min; v <= max; v++)
+ ts->abs[(v - 1) / LONGBITS] |= 1UL << ((v - 1) % LONGBITS);
+ } else {
+ memset(&ts->abs,
+ (max == 0) ? 0xff /* all */ : (max == -1) ? 0x55 /* odd */: 0xaa /* even */,
+ sizeof(ts->abs));
+ }
+ }
+ }
+
+ set = *sep ? sep + 1 : sep;
+ tg = 0;
+ }
+ return 0;
+}
+
+/* Parse the "nbthread" global directive, which takes an integer argument that
+ * contains the desired number of threads.
+ */
+static int cfg_parse_nbthread(char **args, int section_type, struct proxy *curpx,
+ const struct proxy *defpx, const char *file, int line,
+ char **err)
+{
+ long nbthread;
+ char *errptr;
+
+ if (too_many_args(1, args, err, NULL))
+ return -1;
+
+ if (non_global_section_parsed == 1) {
+ memprintf(err, "'%s' not allowed if a non-global section was previously defined. This parameter must be declared in the first global section", args[0]);
+ return -1;
+ }
+
+ nbthread = strtol(args[1], &errptr, 10);
+ if (!*args[1] || *errptr) {
+ memprintf(err, "'%s' passed a missing or unparsable integer value in '%s'", args[0], args[1]);
+ return -1;
+ }
+
+#ifndef USE_THREAD
+ if (nbthread != 1) {
+ memprintf(err, "'%s' specified with a value other than 1 while HAProxy is not compiled with threads support. Please check build options for USE_THREAD", args[0]);
+ return -1;
+ }
+#else
+ if (nbthread < 1 || nbthread > MAX_THREADS) {
+ memprintf(err, "'%s' value must be between 1 and %d (was %ld)", args[0], MAX_THREADS, nbthread);
+ return -1;
+ }
+#endif
+
+ HA_DIAG_WARNING_COND(global.nbthread,
+ "parsing [%s:%d] : '%s' is already defined and will be overridden.\n",
+ file, line, args[0]);
+
+ global.nbthread = nbthread;
+ return 0;
+}
+
+/* Parse the "thread-group" global directive, which takes an integer argument
+ * that designates a thread group, and a list of threads to put into that group.
+ */
+static int cfg_parse_thread_group(char **args, int section_type, struct proxy *curpx,
+ const struct proxy *defpx, const char *file, int line,
+ char **err)
+{
+ char *errptr;
+ long tnum, tend, tgroup;
+ int arg, tot;
+
+ if (non_global_section_parsed == 1) {
+ memprintf(err, "'%s' not allowed if a non-global section was previously defined. This parameter must be declared in the first global section", args[0]);
+ return -1;
+ }
+
+ tgroup = strtol(args[1], &errptr, 10);
+ if (!*args[1] || *errptr) {
+ memprintf(err, "'%s' passed a missing or unparsable integer value in '%s'", args[0], args[1]);
+ return -1;
+ }
+
+ if (tgroup < 1 || tgroup > MAX_TGROUPS) {
+ memprintf(err, "'%s' thread-group number must be between 1 and %d (was %ld)", args[0], MAX_TGROUPS, tgroup);
+ return -1;
+ }
+
+ /* look for a preliminary definition of any thread pointing to this
+ * group, and remove them.
+ */
+ if (ha_tgroup_info[tgroup-1].count) {
+ ha_warning("parsing [%s:%d] : '%s %ld' was already defined and will be overridden.\n",
+ file, line, args[0], tgroup);
+
+ for (tnum = ha_tgroup_info[tgroup-1].base;
+ tnum < ha_tgroup_info[tgroup-1].base + ha_tgroup_info[tgroup-1].count;
+ tnum++) {
+ if (ha_thread_info[tnum-1].tg == &ha_tgroup_info[tgroup-1]) {
+ ha_thread_info[tnum-1].tg = NULL;
+ ha_thread_info[tnum-1].tgid = 0;
+ ha_thread_info[tnum-1].tg_ctx = NULL;
+ }
+ }
+ ha_tgroup_info[tgroup-1].count = ha_tgroup_info[tgroup-1].base = 0;
+ }
+
+ tot = 0;
+ for (arg = 2; args[arg] && *args[arg]; arg++) {
+ tend = tnum = strtol(args[arg], &errptr, 10);
+
+ if (*errptr == '-')
+ tend = strtol(errptr + 1, &errptr, 10);
+
+ if (*errptr || tnum < 1 || tend < 1 || tnum > MAX_THREADS || tend > MAX_THREADS) {
+ memprintf(err, "'%s %ld' passed an unparsable or invalid thread number '%s' (valid range is 1 to %d)", args[0], tgroup, args[arg], MAX_THREADS);
+ return -1;
+ }
+
+ for(; tnum <= tend; tnum++) {
+ if (ha_thread_info[tnum-1].tg == &ha_tgroup_info[tgroup-1]) {
+ ha_warning("parsing [%s:%d] : '%s %ld': thread %ld assigned more than once on the same line.\n",
+ file, line, args[0], tgroup, tnum);
+ } else if (ha_thread_info[tnum-1].tg) {
+ ha_warning("parsing [%s:%d] : '%s %ld': thread %ld was previously assigned to thread group %ld and will be overridden.\n",
+ file, line, args[0], tgroup, tnum,
+ (long)(ha_thread_info[tnum-1].tg - &ha_tgroup_info[0] + 1));
+ }
+
+ if (!ha_tgroup_info[tgroup-1].count) {
+ ha_tgroup_info[tgroup-1].base = tnum-1;
+ ha_tgroup_info[tgroup-1].count = 1;
+ }
+ else if (tnum >= ha_tgroup_info[tgroup-1].base + ha_tgroup_info[tgroup-1].count) {
+ ha_tgroup_info[tgroup-1].count = tnum - ha_tgroup_info[tgroup-1].base;
+ }
+ else if (tnum < ha_tgroup_info[tgroup-1].base) {
+ ha_tgroup_info[tgroup-1].count += ha_tgroup_info[tgroup-1].base - tnum-1;
+ ha_tgroup_info[tgroup-1].base = tnum - 1;
+ }
+
+ ha_thread_info[tnum-1].tgid = tgroup;
+ ha_thread_info[tnum-1].tg = &ha_tgroup_info[tgroup-1];
+ ha_thread_info[tnum-1].tg_ctx = &ha_tgroup_ctx[tgroup-1];
+ tot++;
+ }
+ }
+
+ if (ha_tgroup_info[tgroup-1].count > tot) {
+ memprintf(err, "'%s %ld' assigned sparse threads, only contiguous supported", args[0], tgroup);
+ return -1;
+ }
+
+ if (ha_tgroup_info[tgroup-1].count > MAX_THREADS_PER_GROUP) {
+ memprintf(err, "'%s %ld' assigned too many threads (%d, max=%d)", args[0], tgroup, tot, MAX_THREADS_PER_GROUP);
+ return -1;
+ }
+
+ return 0;
+}
+
+/* Parse the "thread-groups" global directive, which takes an integer argument
+ * that contains the desired number of thread groups.
+ */
+static int cfg_parse_thread_groups(char **args, int section_type, struct proxy *curpx,
+ const struct proxy *defpx, const char *file, int line,
+ char **err)
+{
+ long nbtgroups;
+ char *errptr;
+
+ if (too_many_args(1, args, err, NULL))
+ return -1;
+
+ if (non_global_section_parsed == 1) {
+ memprintf(err, "'%s' not allowed if a non-global section was previously defined. This parameter must be declared in the first global section", args[0]);
+ return -1;
+ }
+
+ nbtgroups = strtol(args[1], &errptr, 10);
+ if (!*args[1] || *errptr) {
+ memprintf(err, "'%s' passed a missing or unparsable integer value in '%s'", args[0], args[1]);
+ return -1;
+ }
+
+#ifndef USE_THREAD
+ if (nbtgroups != 1) {
+ memprintf(err, "'%s' specified with a value other than 1 while HAProxy is not compiled with threads support. Please check build options for USE_THREAD", args[0]);
+ return -1;
+ }
+#else
+ if (nbtgroups < 1 || nbtgroups > MAX_TGROUPS) {
+ memprintf(err, "'%s' value must be between 1 and %d (was %ld)", args[0], MAX_TGROUPS, nbtgroups);
+ return -1;
+ }
+#endif
+
+ HA_DIAG_WARNING_COND(global.nbtgroups,
+ "parsing [%s:%d] : '%s' is already defined and will be overridden.\n",
+ file, line, args[0]);
+
+ global.nbtgroups = nbtgroups;
+ return 0;
+}
+
+/* config keyword parsers */
+static struct cfg_kw_list cfg_kws = {ILH, {
+ { CFG_GLOBAL, "nbthread", cfg_parse_nbthread, 0 },
+ { CFG_GLOBAL, "thread-group", cfg_parse_thread_group, 0 },
+ { CFG_GLOBAL, "thread-groups", cfg_parse_thread_groups, 0 },
+ { 0, NULL, NULL }
+}};
+
+INITCALL1(STG_REGISTER, cfg_register_keywords, &cfg_kws);
diff --git a/src/time.c b/src/time.c
new file mode 100644
index 0000000..280b522
--- /dev/null
+++ b/src/time.c
@@ -0,0 +1,147 @@
+/*
+ * Time calculation functions.
+ *
+ * Copyright 2000-2011 Willy Tarreau <w@1wt.eu>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <sys/time.h>
+
+#include <haproxy/api.h>
+#include <haproxy/time.h>
+
+
+/*
+ * adds <ms> ms to <from>, set the result to <tv> and returns a pointer <tv>
+ */
+struct timeval *_tv_ms_add(struct timeval *tv, const struct timeval *from, int ms)
+{
+ tv->tv_usec = from->tv_usec + (ms % 1000) * 1000;
+ tv->tv_sec = from->tv_sec + (ms / 1000);
+ while (tv->tv_usec >= 1000000) {
+ tv->tv_usec -= 1000000;
+ tv->tv_sec++;
+ }
+ return tv;
+}
+
+/*
+ * compares <tv1> and <tv2> modulo 1ms: returns 0 if equal, -1 if tv1 < tv2, 1 if tv1 > tv2
+ * Must not be used when either argument is eternity. Use tv_ms_cmp2() for that.
+ */
+int _tv_ms_cmp(const struct timeval *tv1, const struct timeval *tv2)
+{
+ return __tv_ms_cmp(tv1, tv2);
+}
+
+/*
+ * compares <tv1> and <tv2> modulo 1 ms: returns 0 if equal, -1 if tv1 < tv2, 1 if tv1 > tv2,
+ * assuming that TV_ETERNITY is greater than everything.
+ */
+int _tv_ms_cmp2(const struct timeval *tv1, const struct timeval *tv2)
+{
+ return __tv_ms_cmp2(tv1, tv2);
+}
+
+/*
+ * compares <tv1> and <tv2> modulo 1 ms: returns 1 if tv1 <= tv2, 0 if tv1 > tv2,
+ * assuming that TV_ETERNITY is greater than everything. Returns 0 if tv1 is
+ * TV_ETERNITY, and always assumes that tv2 != TV_ETERNITY. Designed to replace
+ * occurrences of (tv_ms_cmp2(tv,now) <= 0).
+ */
+int _tv_ms_le2(const struct timeval *tv1, const struct timeval *tv2)
+{
+ return __tv_ms_le2(tv1, tv2);
+}
+
+/*
+ * returns the remaining time between tv1=now and event=tv2
+ * if tv2 is passed, 0 is returned.
+ * Must not be used when either argument is eternity.
+ */
+unsigned long _tv_ms_remain(const struct timeval *tv1, const struct timeval *tv2)
+{
+ return __tv_ms_remain(tv1, tv2);
+}
+
+/*
+ * returns the remaining time between tv1=now and event=tv2
+ * if tv2 is passed, 0 is returned.
+ * Returns TIME_ETERNITY if tv2 is eternity.
+ */
+unsigned long _tv_ms_remain2(const struct timeval *tv1, const struct timeval *tv2)
+{
+ if (tv_iseternity(tv2))
+ return TIME_ETERNITY;
+
+ return __tv_ms_remain(tv1, tv2);
+}
+
+/*
+ * Returns the time in ms elapsed between tv1 and tv2, assuming that tv1<=tv2.
+ * Must not be used when either argument is eternity.
+ */
+unsigned long _tv_ms_elapsed(const struct timeval *tv1, const struct timeval *tv2)
+{
+ return __tv_ms_elapsed(tv1, tv2);
+}
+
+/*
+ * adds <inc> to <from>, set the result to <tv> and returns a pointer <tv>
+ */
+struct timeval *_tv_add(struct timeval *tv, const struct timeval *from, const struct timeval *inc)
+{
+ return __tv_add(tv, from, inc);
+}
+
+/*
+ * If <inc> is set, then add it to <from> and set the result to <tv>, then
+ * return 1, otherwise return 0. It is meant to be used in if conditions.
+ */
+int _tv_add_ifset(struct timeval *tv, const struct timeval *from, const struct timeval *inc)
+{
+ return __tv_add_ifset(tv, from, inc);
+}
+
+/*
+ * Computes the remaining time between tv1=now and event=tv2. if tv2 is passed,
+ * 0 is returned. The result is stored into tv.
+ */
+struct timeval *_tv_remain(const struct timeval *tv1, const struct timeval *tv2, struct timeval *tv)
+{
+ return __tv_remain(tv1, tv2, tv);
+}
+
+/*
+ * Computes the remaining time between tv1=now and event=tv2. if tv2 is passed,
+ * 0 is returned. The result is stored into tv. Returns ETERNITY if tv2 is
+ * eternity.
+ */
+struct timeval *_tv_remain2(const struct timeval *tv1, const struct timeval *tv2, struct timeval *tv)
+{
+ return __tv_remain2(tv1, tv2, tv);
+}
+
+/* tv_isle: compares <tv1> and <tv2> : returns 1 if tv1 <= tv2, otherwise 0 */
+int _tv_isle(const struct timeval *tv1, const struct timeval *tv2)
+{
+ return __tv_isle(tv1, tv2);
+}
+
+/* tv_isgt: compares <tv1> and <tv2> : returns 1 if tv1 > tv2, otherwise 0 */
+int _tv_isgt(const struct timeval *tv1, const struct timeval *tv2)
+{
+ return __tv_isgt(tv1, tv2);
+}
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/src/tools.c b/src/tools.c
new file mode 100644
index 0000000..b2814b5
--- /dev/null
+++ b/src/tools.c
@@ -0,0 +1,6348 @@
+/*
+ * General purpose functions.
+ *
+ * Copyright 2000-2010 Willy Tarreau <w@1wt.eu>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#if (defined(__ELF__) && !defined(__linux__)) || defined(USE_DL)
+#define _GNU_SOURCE
+#include <dlfcn.h>
+#include <link.h>
+#endif
+
+#if defined(__FreeBSD__)
+#include <elf.h>
+#include <dlfcn.h>
+extern void *__elf_aux_vector;
+#endif
+
+#if defined(__NetBSD__)
+#include <sys/exec_elf.h>
+#include <dlfcn.h>
+#endif
+
+#include <ctype.h>
+#include <errno.h>
+#include <netdb.h>
+#include <stdarg.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <time.h>
+#include <unistd.h>
+#include <sys/socket.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <sys/un.h>
+#include <netinet/in.h>
+#include <arpa/inet.h>
+
+#if defined(__linux__) && defined(__GLIBC__) && (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 16))
+#include <sys/auxv.h>
+#endif
+
+#include <import/eb32sctree.h>
+#include <import/eb32tree.h>
+#include <import/ebmbtree.h>
+
+#include <haproxy/api.h>
+#include <haproxy/applet.h>
+#include <haproxy/chunk.h>
+#include <haproxy/dgram.h>
+#include <haproxy/global.h>
+#include <haproxy/hlua.h>
+#include <haproxy/listener.h>
+#include <haproxy/namespace.h>
+#include <haproxy/net_helper.h>
+#include <haproxy/protocol.h>
+#include <haproxy/quic_sock.h>
+#include <haproxy/resolvers.h>
+#include <haproxy/sc_strm.h>
+#include <haproxy/sock.h>
+#include <haproxy/ssl_sock.h>
+#include <haproxy/ssl_utils.h>
+#include <haproxy/stconn.h>
+#include <haproxy/task.h>
+#include <haproxy/tools.h>
+#include <haproxy/xxhash.h>
+
+/* This macro returns false if the test __x is false. Many
+ * of the following parsing function must be abort the processing
+ * if it returns 0, so this macro is useful for writing light code.
+ */
+#define RET0_UNLESS(__x) do { if (!(__x)) return 0; } while (0)
+
+/* Define the number of line of hash_word */
+#define NB_L_HASH_WORD 15
+
+/* return the hash of a string and length for a given key. All keys are valid. */
+#define HA_ANON(key, str, len) (XXH32(str, len, key) & 0xFFFFFF)
+
+/* enough to store NB_ITOA_STR integers of :
+ * 2^64-1 = 18446744073709551615 or
+ * -2^63 = -9223372036854775808
+ *
+ * The HTML version needs room for adding the 25 characters
+ * '<span class="rls"></span>' around digits at positions 3N+1 in order
+ * to add spacing at up to 6 positions : 18 446 744 073 709 551 615
+ */
+THREAD_LOCAL char itoa_str[NB_ITOA_STR][171];
+THREAD_LOCAL int itoa_idx = 0; /* index of next itoa_str to use */
+
+/* sometimes we'll need to quote strings (eg: in stats), and we don't expect
+ * to quote strings larger than a max configuration line.
+ */
+THREAD_LOCAL char quoted_str[NB_QSTR][QSTR_SIZE + 1];
+THREAD_LOCAL int quoted_idx = 0;
+
+/* thread-local PRNG state. It's modified to start from a different sequence
+ * on all threads upon startup. It must not be used or anything beyond getting
+ * statistical values as it's 100% predictable.
+ */
+THREAD_LOCAL unsigned int statistical_prng_state = 2463534242U;
+
+/* set to true if this is a static build */
+int build_is_static = 0;
+
+/* A global static table to store hashed words */
+static THREAD_LOCAL char hash_word[NB_L_HASH_WORD][20];
+static THREAD_LOCAL int index_hash = 0;
+
+/*
+ * unsigned long long ASCII representation
+ *
+ * return the last char '\0' or NULL if no enough
+ * space in dst
+ */
+char *ulltoa(unsigned long long n, char *dst, size_t size)
+{
+ int i = 0;
+ char *res;
+
+ switch(n) {
+ case 1ULL ... 9ULL:
+ i = 0;
+ break;
+
+ case 10ULL ... 99ULL:
+ i = 1;
+ break;
+
+ case 100ULL ... 999ULL:
+ i = 2;
+ break;
+
+ case 1000ULL ... 9999ULL:
+ i = 3;
+ break;
+
+ case 10000ULL ... 99999ULL:
+ i = 4;
+ break;
+
+ case 100000ULL ... 999999ULL:
+ i = 5;
+ break;
+
+ case 1000000ULL ... 9999999ULL:
+ i = 6;
+ break;
+
+ case 10000000ULL ... 99999999ULL:
+ i = 7;
+ break;
+
+ case 100000000ULL ... 999999999ULL:
+ i = 8;
+ break;
+
+ case 1000000000ULL ... 9999999999ULL:
+ i = 9;
+ break;
+
+ case 10000000000ULL ... 99999999999ULL:
+ i = 10;
+ break;
+
+ case 100000000000ULL ... 999999999999ULL:
+ i = 11;
+ break;
+
+ case 1000000000000ULL ... 9999999999999ULL:
+ i = 12;
+ break;
+
+ case 10000000000000ULL ... 99999999999999ULL:
+ i = 13;
+ break;
+
+ case 100000000000000ULL ... 999999999999999ULL:
+ i = 14;
+ break;
+
+ case 1000000000000000ULL ... 9999999999999999ULL:
+ i = 15;
+ break;
+
+ case 10000000000000000ULL ... 99999999999999999ULL:
+ i = 16;
+ break;
+
+ case 100000000000000000ULL ... 999999999999999999ULL:
+ i = 17;
+ break;
+
+ case 1000000000000000000ULL ... 9999999999999999999ULL:
+ i = 18;
+ break;
+
+ case 10000000000000000000ULL ... ULLONG_MAX:
+ i = 19;
+ break;
+ }
+ if (i + 2 > size) // (i + 1) + '\0'
+ return NULL; // too long
+ res = dst + i + 1;
+ *res = '\0';
+ for (; i >= 0; i--) {
+ dst[i] = n % 10ULL + '0';
+ n /= 10ULL;
+ }
+ return res;
+}
+
+/*
+ * unsigned long ASCII representation
+ *
+ * return the last char '\0' or NULL if no enough
+ * space in dst
+ */
+char *ultoa_o(unsigned long n, char *dst, size_t size)
+{
+ int i = 0;
+ char *res;
+
+ switch (n) {
+ case 0U ... 9UL:
+ i = 0;
+ break;
+
+ case 10U ... 99UL:
+ i = 1;
+ break;
+
+ case 100U ... 999UL:
+ i = 2;
+ break;
+
+ case 1000U ... 9999UL:
+ i = 3;
+ break;
+
+ case 10000U ... 99999UL:
+ i = 4;
+ break;
+
+ case 100000U ... 999999UL:
+ i = 5;
+ break;
+
+ case 1000000U ... 9999999UL:
+ i = 6;
+ break;
+
+ case 10000000U ... 99999999UL:
+ i = 7;
+ break;
+
+ case 100000000U ... 999999999UL:
+ i = 8;
+ break;
+#if __WORDSIZE == 32
+
+ case 1000000000ULL ... ULONG_MAX:
+ i = 9;
+ break;
+
+#elif __WORDSIZE == 64
+
+ case 1000000000ULL ... 9999999999UL:
+ i = 9;
+ break;
+
+ case 10000000000ULL ... 99999999999UL:
+ i = 10;
+ break;
+
+ case 100000000000ULL ... 999999999999UL:
+ i = 11;
+ break;
+
+ case 1000000000000ULL ... 9999999999999UL:
+ i = 12;
+ break;
+
+ case 10000000000000ULL ... 99999999999999UL:
+ i = 13;
+ break;
+
+ case 100000000000000ULL ... 999999999999999UL:
+ i = 14;
+ break;
+
+ case 1000000000000000ULL ... 9999999999999999UL:
+ i = 15;
+ break;
+
+ case 10000000000000000ULL ... 99999999999999999UL:
+ i = 16;
+ break;
+
+ case 100000000000000000ULL ... 999999999999999999UL:
+ i = 17;
+ break;
+
+ case 1000000000000000000ULL ... 9999999999999999999UL:
+ i = 18;
+ break;
+
+ case 10000000000000000000ULL ... ULONG_MAX:
+ i = 19;
+ break;
+
+#endif
+ }
+ if (i + 2 > size) // (i + 1) + '\0'
+ return NULL; // too long
+ res = dst + i + 1;
+ *res = '\0';
+ for (; i >= 0; i--) {
+ dst[i] = n % 10U + '0';
+ n /= 10U;
+ }
+ return res;
+}
+
+/*
+ * signed long ASCII representation
+ *
+ * return the last char '\0' or NULL if no enough
+ * space in dst
+ */
+char *ltoa_o(long int n, char *dst, size_t size)
+{
+ char *pos = dst;
+
+ if (n < 0) {
+ if (size < 3)
+ return NULL; // min size is '-' + digit + '\0' but another test in ultoa
+ *pos = '-';
+ pos++;
+ dst = ultoa_o(-n, pos, size - 1);
+ } else {
+ dst = ultoa_o(n, dst, size);
+ }
+ return dst;
+}
+
+/*
+ * signed long long ASCII representation
+ *
+ * return the last char '\0' or NULL if no enough
+ * space in dst
+ */
+char *lltoa(long long n, char *dst, size_t size)
+{
+ char *pos = dst;
+
+ if (n < 0) {
+ if (size < 3)
+ return NULL; // min size is '-' + digit + '\0' but another test in ulltoa
+ *pos = '-';
+ pos++;
+ dst = ulltoa(-n, pos, size - 1);
+ } else {
+ dst = ulltoa(n, dst, size);
+ }
+ return dst;
+}
+
+/*
+ * write a ascii representation of a unsigned into dst,
+ * return a pointer to the last character
+ * Pad the ascii representation with '0', using size.
+ */
+char *utoa_pad(unsigned int n, char *dst, size_t size)
+{
+ int i = 0;
+ char *ret;
+
+ switch(n) {
+ case 0U ... 9U:
+ i = 0;
+ break;
+
+ case 10U ... 99U:
+ i = 1;
+ break;
+
+ case 100U ... 999U:
+ i = 2;
+ break;
+
+ case 1000U ... 9999U:
+ i = 3;
+ break;
+
+ case 10000U ... 99999U:
+ i = 4;
+ break;
+
+ case 100000U ... 999999U:
+ i = 5;
+ break;
+
+ case 1000000U ... 9999999U:
+ i = 6;
+ break;
+
+ case 10000000U ... 99999999U:
+ i = 7;
+ break;
+
+ case 100000000U ... 999999999U:
+ i = 8;
+ break;
+
+ case 1000000000U ... 4294967295U:
+ i = 9;
+ break;
+ }
+ if (i + 2 > size) // (i + 1) + '\0'
+ return NULL; // too long
+ i = size - 2; // padding - '\0'
+
+ ret = dst + i + 1;
+ *ret = '\0';
+ for (; i >= 0; i--) {
+ dst[i] = n % 10U + '0';
+ n /= 10U;
+ }
+ return ret;
+}
+
+/*
+ * copies at most <size-1> chars from <src> to <dst>. Last char is always
+ * set to 0, unless <size> is 0. The number of chars copied is returned
+ * (excluding the terminating zero).
+ * This code has been optimized for size and speed : on x86, it's 45 bytes
+ * long, uses only registers, and consumes only 4 cycles per char.
+ */
+int strlcpy2(char *dst, const char *src, int size)
+{
+ char *orig = dst;
+ if (size) {
+ while (--size && (*dst = *src)) {
+ src++; dst++;
+ }
+ *dst = 0;
+ }
+ return dst - orig;
+}
+
+/*
+ * This function simply returns a locally allocated string containing
+ * the ascii representation for number 'n' in decimal.
+ */
+char *ultoa_r(unsigned long n, char *buffer, int size)
+{
+ char *pos;
+
+ pos = buffer + size - 1;
+ *pos-- = '\0';
+
+ do {
+ *pos-- = '0' + n % 10;
+ n /= 10;
+ } while (n && pos >= buffer);
+ return pos + 1;
+}
+
+/*
+ * This function simply returns a locally allocated string containing
+ * the ascii representation for number 'n' in decimal.
+ */
+char *lltoa_r(long long int in, char *buffer, int size)
+{
+ char *pos;
+ int neg = 0;
+ unsigned long long int n;
+
+ pos = buffer + size - 1;
+ *pos-- = '\0';
+
+ if (in < 0) {
+ neg = 1;
+ n = -in;
+ }
+ else
+ n = in;
+
+ do {
+ *pos-- = '0' + n % 10;
+ n /= 10;
+ } while (n && pos >= buffer);
+ if (neg && pos > buffer)
+ *pos-- = '-';
+ return pos + 1;
+}
+
+/*
+ * This function simply returns a locally allocated string containing
+ * the ascii representation for signed number 'n' in decimal.
+ */
+char *sltoa_r(long n, char *buffer, int size)
+{
+ char *pos;
+
+ if (n >= 0)
+ return ultoa_r(n, buffer, size);
+
+ pos = ultoa_r(-n, buffer + 1, size - 1) - 1;
+ *pos = '-';
+ return pos;
+}
+
+/*
+ * This function simply returns a locally allocated string containing
+ * the ascii representation for number 'n' in decimal, formatted for
+ * HTML output with tags to create visual grouping by 3 digits. The
+ * output needs to support at least 171 characters.
+ */
+const char *ulltoh_r(unsigned long long n, char *buffer, int size)
+{
+ char *start;
+ int digit = 0;
+
+ start = buffer + size;
+ *--start = '\0';
+
+ do {
+ if (digit == 3 && start >= buffer + 7)
+ memcpy(start -= 7, "</span>", 7);
+
+ if (start >= buffer + 1) {
+ *--start = '0' + n % 10;
+ n /= 10;
+ }
+
+ if (digit == 3 && start >= buffer + 18)
+ memcpy(start -= 18, "<span class=\"rls\">", 18);
+
+ if (digit++ == 3)
+ digit = 1;
+ } while (n && start > buffer);
+ return start;
+}
+
+/*
+ * This function simply returns a locally allocated string containing the ascii
+ * representation for number 'n' in decimal, unless n is 0 in which case it
+ * returns the alternate string (or an empty string if the alternate string is
+ * NULL). It use is intended for limits reported in reports, where it's
+ * desirable not to display anything if there is no limit. Warning! it shares
+ * the same vector as ultoa_r().
+ */
+const char *limit_r(unsigned long n, char *buffer, int size, const char *alt)
+{
+ return (n) ? ultoa_r(n, buffer, size) : (alt ? alt : "");
+}
+
+/* Trims the first "%f" float in a string to its minimum number of digits after
+ * the decimal point by trimming trailing zeroes, even dropping the decimal
+ * point if not needed. The string is in <buffer> of length <len>, and the
+ * number is expected to start at or after position <num_start> (the first
+ * point appearing there is considered). A NUL character is always placed at
+ * the end if some trimming occurs. The new buffer length is returned.
+ */
+size_t flt_trim(char *buffer, size_t num_start, size_t len)
+{
+ char *end = buffer + len;
+ char *p = buffer + num_start;
+ char *trim;
+
+ do {
+ if (p >= end)
+ return len;
+ trim = p++;
+ } while (*trim != '.');
+
+ /* For now <trim> is on the decimal point. Let's look for any other
+ * meaningful digit after it.
+ */
+ while (p < end) {
+ if (*p++ != '0')
+ trim = p;
+ }
+
+ if (trim < end)
+ *trim = 0;
+
+ return trim - buffer;
+}
+
+/*
+ * This function simply returns a locally allocated string containing
+ * the ascii representation for number 'n' in decimal with useless trailing
+ * zeroes trimmed.
+ */
+char *ftoa_r(double n, char *buffer, int size)
+{
+ flt_trim(buffer, 0, snprintf(buffer, size, "%f", n));
+ return buffer;
+}
+
+/* returns a locally allocated string containing the quoted encoding of the
+ * input string. The output may be truncated to QSTR_SIZE chars, but it is
+ * guaranteed that the string will always be properly terminated. Quotes are
+ * encoded by doubling them as is commonly done in CSV files. QSTR_SIZE must
+ * always be at least 4 chars.
+ */
+const char *qstr(const char *str)
+{
+ char *ret = quoted_str[quoted_idx];
+ char *p, *end;
+
+ if (++quoted_idx >= NB_QSTR)
+ quoted_idx = 0;
+
+ p = ret;
+ end = ret + QSTR_SIZE;
+
+ *p++ = '"';
+
+ /* always keep 3 chars to support passing "" and the ending " */
+ while (*str && p < end - 3) {
+ if (*str == '"') {
+ *p++ = '"';
+ *p++ = '"';
+ }
+ else
+ *p++ = *str;
+ str++;
+ }
+ *p++ = '"';
+ return ret;
+}
+
+/*
+ * Returns non-zero if character <s> is a hex digit (0-9, a-f, A-F), else zero.
+ *
+ * It looks like this one would be a good candidate for inlining, but this is
+ * not interesting because it around 35 bytes long and often called multiple
+ * times within the same function.
+ */
+int ishex(char s)
+{
+ s -= '0';
+ if ((unsigned char)s <= 9)
+ return 1;
+ s -= 'A' - '0';
+ if ((unsigned char)s <= 5)
+ return 1;
+ s -= 'a' - 'A';
+ if ((unsigned char)s <= 5)
+ return 1;
+ return 0;
+}
+
+/* rounds <i> down to the closest value having max 2 digits */
+unsigned int round_2dig(unsigned int i)
+{
+ unsigned int mul = 1;
+
+ while (i >= 100) {
+ i /= 10;
+ mul *= 10;
+ }
+ return i * mul;
+}
+
+/*
+ * Checks <name> for invalid characters. Valid chars are [A-Za-z0-9_:.-]. If an
+ * invalid character is found, a pointer to it is returned. If everything is
+ * fine, NULL is returned.
+ */
+const char *invalid_char(const char *name)
+{
+ if (!*name)
+ return name;
+
+ while (*name) {
+ if (!isalnum((unsigned char)*name) && *name != '.' && *name != ':' &&
+ *name != '_' && *name != '-')
+ return name;
+ name++;
+ }
+ return NULL;
+}
+
+/*
+ * Checks <name> for invalid characters. Valid chars are [_.-] and those
+ * accepted by <f> function.
+ * If an invalid character is found, a pointer to it is returned.
+ * If everything is fine, NULL is returned.
+ */
+static inline const char *__invalid_char(const char *name, int (*f)(int)) {
+
+ if (!*name)
+ return name;
+
+ while (*name) {
+ if (!f((unsigned char)*name) && *name != '.' &&
+ *name != '_' && *name != '-')
+ return name;
+
+ name++;
+ }
+
+ return NULL;
+}
+
+/*
+ * Checks <name> for invalid characters. Valid chars are [A-Za-z0-9_.-].
+ * If an invalid character is found, a pointer to it is returned.
+ * If everything is fine, NULL is returned.
+ */
+const char *invalid_domainchar(const char *name) {
+ return __invalid_char(name, isalnum);
+}
+
+/*
+ * Checks <name> for invalid characters. Valid chars are [A-Za-z_.-].
+ * If an invalid character is found, a pointer to it is returned.
+ * If everything is fine, NULL is returned.
+ */
+const char *invalid_prefix_char(const char *name) {
+ return __invalid_char(name, isalnum);
+}
+
+/*
+ * converts <str> to a struct sockaddr_storage* provided by the caller. The
+ * caller must have zeroed <sa> first, and may have set sa->ss_family to force
+ * parse a specific address format. If the ss_family is 0 or AF_UNSPEC, then
+ * the function tries to guess the address family from the syntax. If the
+ * family is forced and the format doesn't match, an error is returned. The
+ * string is assumed to contain only an address, no port. The address can be a
+ * dotted IPv4 address, an IPv6 address, a host name, or empty or "*" to
+ * indicate INADDR_ANY. NULL is returned if the host part cannot be resolved.
+ * The return address will only have the address family and the address set,
+ * all other fields remain zero. The string is not supposed to be modified.
+ * The IPv6 '::' address is IN6ADDR_ANY. If <resolve> is non-zero, the hostname
+ * is resolved, otherwise only IP addresses are resolved, and anything else
+ * returns NULL. If the address contains a port, this one is preserved.
+ */
+struct sockaddr_storage *str2ip2(const char *str, struct sockaddr_storage *sa, int resolve)
+{
+ struct hostent *he;
+ /* max IPv6 length, including brackets and terminating NULL */
+ char tmpip[48];
+ int port = get_host_port(sa);
+
+ /* check IPv6 with square brackets */
+ if (str[0] == '[') {
+ size_t iplength = strlen(str);
+
+ if (iplength < 4) {
+ /* minimal size is 4 when using brackets "[::]" */
+ goto fail;
+ }
+ else if (iplength >= sizeof(tmpip)) {
+ /* IPv6 literal can not be larger than tmpip */
+ goto fail;
+ }
+ else {
+ if (str[iplength - 1] != ']') {
+ /* if address started with bracket, it should end with bracket */
+ goto fail;
+ }
+ else {
+ memcpy(tmpip, str + 1, iplength - 2);
+ tmpip[iplength - 2] = '\0';
+ str = tmpip;
+ }
+ }
+ }
+
+ /* Any IPv6 address */
+ if (str[0] == ':' && str[1] == ':' && !str[2]) {
+ if (!sa->ss_family || sa->ss_family == AF_UNSPEC)
+ sa->ss_family = AF_INET6;
+ else if (sa->ss_family != AF_INET6)
+ goto fail;
+ set_host_port(sa, port);
+ return sa;
+ }
+
+ /* Any address for the family, defaults to IPv4 */
+ if (!str[0] || (str[0] == '*' && !str[1])) {
+ if (!sa->ss_family || sa->ss_family == AF_UNSPEC)
+ sa->ss_family = AF_INET;
+ set_host_port(sa, port);
+ return sa;
+ }
+
+ /* check for IPv6 first */
+ if ((!sa->ss_family || sa->ss_family == AF_UNSPEC || sa->ss_family == AF_INET6) &&
+ inet_pton(AF_INET6, str, &((struct sockaddr_in6 *)sa)->sin6_addr)) {
+ sa->ss_family = AF_INET6;
+ set_host_port(sa, port);
+ return sa;
+ }
+
+ /* then check for IPv4 */
+ if ((!sa->ss_family || sa->ss_family == AF_UNSPEC || sa->ss_family == AF_INET) &&
+ inet_pton(AF_INET, str, &((struct sockaddr_in *)sa)->sin_addr)) {
+ sa->ss_family = AF_INET;
+ set_host_port(sa, port);
+ return sa;
+ }
+
+ if (!resolve)
+ return NULL;
+
+ if (!resolv_hostname_validation(str, NULL))
+ return NULL;
+
+#ifdef USE_GETADDRINFO
+ if (global.tune.options & GTUNE_USE_GAI) {
+ struct addrinfo hints, *result;
+ int success = 0;
+
+ memset(&result, 0, sizeof(result));
+ memset(&hints, 0, sizeof(hints));
+ hints.ai_family = sa->ss_family ? sa->ss_family : AF_UNSPEC;
+ hints.ai_socktype = SOCK_DGRAM;
+ hints.ai_flags = 0;
+ hints.ai_protocol = 0;
+
+ if (getaddrinfo(str, NULL, &hints, &result) == 0) {
+ if (!sa->ss_family || sa->ss_family == AF_UNSPEC)
+ sa->ss_family = result->ai_family;
+ else if (sa->ss_family != result->ai_family) {
+ freeaddrinfo(result);
+ goto fail;
+ }
+
+ switch (result->ai_family) {
+ case AF_INET:
+ memcpy((struct sockaddr_in *)sa, result->ai_addr, result->ai_addrlen);
+ set_host_port(sa, port);
+ success = 1;
+ break;
+ case AF_INET6:
+ memcpy((struct sockaddr_in6 *)sa, result->ai_addr, result->ai_addrlen);
+ set_host_port(sa, port);
+ success = 1;
+ break;
+ }
+ }
+
+ if (result)
+ freeaddrinfo(result);
+
+ if (success)
+ return sa;
+ }
+#endif
+ /* try to resolve an IPv4/IPv6 hostname */
+ he = gethostbyname(str);
+ if (he) {
+ if (!sa->ss_family || sa->ss_family == AF_UNSPEC)
+ sa->ss_family = he->h_addrtype;
+ else if (sa->ss_family != he->h_addrtype)
+ goto fail;
+
+ switch (sa->ss_family) {
+ case AF_INET:
+ ((struct sockaddr_in *)sa)->sin_addr = *(struct in_addr *) *(he->h_addr_list);
+ set_host_port(sa, port);
+ return sa;
+ case AF_INET6:
+ ((struct sockaddr_in6 *)sa)->sin6_addr = *(struct in6_addr *) *(he->h_addr_list);
+ set_host_port(sa, port);
+ return sa;
+ }
+ }
+
+ /* unsupported address family */
+ fail:
+ return NULL;
+}
+
+/*
+ * Converts <str> to a locally allocated struct sockaddr_storage *, and a port
+ * range or offset consisting in two integers that the caller will have to
+ * check to find the relevant input format. The following format are supported :
+ *
+ * String format | address | port | low | high
+ * addr | <addr> | 0 | 0 | 0
+ * addr: | <addr> | 0 | 0 | 0
+ * addr:port | <addr> | <port> | <port> | <port>
+ * addr:pl-ph | <addr> | <pl> | <pl> | <ph>
+ * addr:+port | <addr> | <port> | 0 | <port>
+ * addr:-port | <addr> |-<port> | <port> | 0
+ *
+ * The detection of a port range or increment by the caller is made by
+ * comparing <low> and <high>. If both are equal, then port 0 means no port
+ * was specified. The caller may pass NULL for <low> and <high> if it is not
+ * interested in retrieving port ranges.
+ *
+ * Note that <addr> above may also be :
+ * - empty ("") => family will be AF_INET and address will be INADDR_ANY
+ * - "*" => family will be AF_INET and address will be INADDR_ANY
+ * - "::" => family will be AF_INET6 and address will be IN6ADDR_ANY
+ * - a host name => family and address will depend on host name resolving.
+ *
+ * A prefix may be passed in before the address above to force the family :
+ * - "ipv4@" => force address to resolve as IPv4 and fail if not possible.
+ * - "ipv6@" => force address to resolve as IPv6 and fail if not possible.
+ * - "unix@" => force address to be a path to a UNIX socket even if the
+ * path does not start with a '/'
+ * - 'abns@' -> force address to belong to the abstract namespace (Linux
+ * only). These sockets are just like Unix sockets but without
+ * the need for an underlying file system. The address is a
+ * string. Technically it's like a Unix socket with a zero in
+ * the first byte of the address.
+ * - "fd@" => an integer must follow, and is a file descriptor number.
+ *
+ * IPv6 addresses can be declared with or without square brackets. When using
+ * square brackets for IPv6 addresses, the port separator (colon) is optional.
+ * If not using square brackets, and in order to avoid any ambiguity with
+ * IPv6 addresses, the last colon ':' is mandatory even when no port is specified.
+ * NULL is returned if the address cannot be parsed. The <low> and <high> ports
+ * are always initialized if non-null, even for non-IP families.
+ *
+ * If <pfx> is non-null, it is used as a string prefix before any path-based
+ * address (typically the path to a unix socket).
+ *
+ * if <fqdn> is non-null, it will be filled with :
+ * - a pointer to the FQDN of the server name to resolve if there's one, and
+ * that the caller will have to free(),
+ * - NULL if there was an explicit address that doesn't require resolution.
+ *
+ * Hostnames are only resolved if <opts> has PA_O_RESOLVE. Otherwise <fqdn> is
+ * still honored so it is possible for the caller to know whether a resolution
+ * failed by clearing this flag and checking if <fqdn> was filled, indicating
+ * the need for a resolution.
+ *
+ * When a file descriptor is passed, its value is put into the s_addr part of
+ * the address when cast to sockaddr_in and the address family is
+ * AF_CUST_EXISTING_FD.
+ *
+ * The matching protocol will be set into <proto> if non-null.
+ * The address protocol and transport types hints which are directly resolved
+ * will be set into <sa_type> if not NULL.
+ *
+ * Any known file descriptor is also assigned to <fd> if non-null, otherwise it
+ * is forced to -1.
+ */
+struct sockaddr_storage *str2sa_range(const char *str, int *port, int *low, int *high, int *fd,
+ struct protocol **proto, struct net_addr_type *sa_type,
+ char **err, const char *pfx, char **fqdn, unsigned int opts)
+{
+ static THREAD_LOCAL struct sockaddr_storage ss;
+ struct sockaddr_storage *ret = NULL;
+ struct protocol *new_proto = NULL;
+ char *back, *str2;
+ char *port1, *port2;
+ int portl, porth, porta;
+ int abstract = 0;
+ int new_fd = -1;
+ enum proto_type proto_type = 0; // to shut gcc warning
+ int ctrl_type = 0; // to shut gcc warning
+
+ portl = porth = porta = 0;
+ if (fqdn)
+ *fqdn = NULL;
+
+ str2 = back = env_expand(strdup(str));
+ if (str2 == NULL) {
+ memprintf(err, "out of memory in '%s'", __FUNCTION__);
+ goto out;
+ }
+
+ if (!*str2) {
+ memprintf(err, "'%s' resolves to an empty address (environment variable missing?)", str);
+ goto out;
+ }
+
+ memset(&ss, 0, sizeof(ss));
+
+ /* prepare the default socket types */
+ if ((opts & (PA_O_STREAM|PA_O_DGRAM)) == PA_O_DGRAM ||
+ ((opts & (PA_O_STREAM|PA_O_DGRAM)) == (PA_O_DGRAM|PA_O_STREAM) && (opts & PA_O_DEFAULT_DGRAM))) {
+ proto_type = PROTO_TYPE_DGRAM;
+ ctrl_type = SOCK_DGRAM;
+ } else {
+ proto_type = PROTO_TYPE_STREAM;
+ ctrl_type = SOCK_STREAM;
+ }
+
+ if (strncmp(str2, "stream+", 7) == 0) {
+ str2 += 7;
+ proto_type = PROTO_TYPE_STREAM;
+ ctrl_type = SOCK_STREAM;
+ }
+ else if (strncmp(str2, "dgram+", 6) == 0) {
+ str2 += 6;
+ proto_type = PROTO_TYPE_DGRAM;
+ ctrl_type = SOCK_DGRAM;
+ }
+ else if (strncmp(str2, "quic+", 5) == 0) {
+ str2 += 5;
+ proto_type = PROTO_TYPE_DGRAM;
+ ctrl_type = SOCK_STREAM;
+ }
+
+ if (strncmp(str2, "unix@", 5) == 0) {
+ str2 += 5;
+ abstract = 0;
+ ss.ss_family = AF_UNIX;
+ }
+ else if (strncmp(str2, "uxdg@", 5) == 0) {
+ str2 += 5;
+ abstract = 0;
+ ss.ss_family = AF_UNIX;
+ proto_type = PROTO_TYPE_DGRAM;
+ ctrl_type = SOCK_DGRAM;
+ }
+ else if (strncmp(str2, "uxst@", 5) == 0) {
+ str2 += 5;
+ abstract = 0;
+ ss.ss_family = AF_UNIX;
+ proto_type = PROTO_TYPE_STREAM;
+ ctrl_type = SOCK_STREAM;
+ }
+ else if (strncmp(str2, "abns@", 5) == 0) {
+ str2 += 5;
+ abstract = 1;
+ ss.ss_family = AF_UNIX;
+ }
+ else if (strncmp(str2, "ip@", 3) == 0) {
+ str2 += 3;
+ ss.ss_family = AF_UNSPEC;
+ }
+ else if (strncmp(str2, "ipv4@", 5) == 0) {
+ str2 += 5;
+ ss.ss_family = AF_INET;
+ }
+ else if (strncmp(str2, "ipv6@", 5) == 0) {
+ str2 += 5;
+ ss.ss_family = AF_INET6;
+ }
+ else if (strncmp(str2, "tcp4@", 5) == 0) {
+ str2 += 5;
+ ss.ss_family = AF_INET;
+ proto_type = PROTO_TYPE_STREAM;
+ ctrl_type = SOCK_STREAM;
+ }
+ else if (strncmp(str2, "udp4@", 5) == 0) {
+ str2 += 5;
+ ss.ss_family = AF_INET;
+ proto_type = PROTO_TYPE_DGRAM;
+ ctrl_type = SOCK_DGRAM;
+ }
+ else if (strncmp(str2, "tcp6@", 5) == 0) {
+ str2 += 5;
+ ss.ss_family = AF_INET6;
+ proto_type = PROTO_TYPE_STREAM;
+ ctrl_type = SOCK_STREAM;
+ }
+ else if (strncmp(str2, "udp6@", 5) == 0) {
+ str2 += 5;
+ ss.ss_family = AF_INET6;
+ proto_type = PROTO_TYPE_DGRAM;
+ ctrl_type = SOCK_DGRAM;
+ }
+ else if (strncmp(str2, "tcp@", 4) == 0) {
+ str2 += 4;
+ ss.ss_family = AF_UNSPEC;
+ proto_type = PROTO_TYPE_STREAM;
+ ctrl_type = SOCK_STREAM;
+ }
+ else if (strncmp(str2, "udp@", 4) == 0) {
+ str2 += 4;
+ ss.ss_family = AF_UNSPEC;
+ proto_type = PROTO_TYPE_DGRAM;
+ ctrl_type = SOCK_DGRAM;
+ }
+ else if (strncmp(str2, "quic4@", 6) == 0) {
+ str2 += 6;
+ ss.ss_family = AF_INET;
+ proto_type = PROTO_TYPE_DGRAM;
+ ctrl_type = SOCK_STREAM;
+ }
+ else if (strncmp(str2, "quic6@", 6) == 0) {
+ str2 += 6;
+ ss.ss_family = AF_INET6;
+ proto_type = PROTO_TYPE_DGRAM;
+ ctrl_type = SOCK_STREAM;
+ }
+ else if (strncmp(str2, "fd@", 3) == 0) {
+ str2 += 3;
+ ss.ss_family = AF_CUST_EXISTING_FD;
+ }
+ else if (strncmp(str2, "sockpair@", 9) == 0) {
+ str2 += 9;
+ ss.ss_family = AF_CUST_SOCKPAIR;
+ }
+ else if (strncmp(str2, "rhttp@", 3) == 0) {
+ /* TODO duplicated code from check_kw_experimental() */
+ if (!experimental_directives_allowed) {
+ memprintf(err, "Address '%s' is experimental, must be allowed via a global 'expose-experimental-directives'", str2);
+ goto out;
+ }
+ mark_tainted(TAINTED_CONFIG_EXP_KW_DECLARED);
+
+ str2 += 4;
+ ss.ss_family = AF_CUST_RHTTP_SRV;
+ }
+ else if (*str2 == '/') {
+ ss.ss_family = AF_UNIX;
+ }
+ else
+ ss.ss_family = AF_UNSPEC;
+
+ if (ss.ss_family == AF_CUST_SOCKPAIR) {
+ struct sockaddr_storage ss2;
+ socklen_t addr_len;
+ char *endptr;
+
+ new_fd = strtol(str2, &endptr, 10);
+ if (!*str2 || new_fd < 0 || *endptr) {
+ memprintf(err, "file descriptor '%s' is not a valid integer in '%s'", str2, str);
+ goto out;
+ }
+
+ /* just verify that it's a socket */
+ addr_len = sizeof(ss2);
+ if (getsockname(new_fd, (struct sockaddr *)&ss2, &addr_len) == -1) {
+ memprintf(err, "cannot use file descriptor '%d' : %s.", new_fd, strerror(errno));
+ goto out;
+ }
+
+ ((struct sockaddr_in *)&ss)->sin_addr.s_addr = new_fd;
+ ((struct sockaddr_in *)&ss)->sin_port = 0;
+ }
+ else if (ss.ss_family == AF_CUST_EXISTING_FD) {
+ char *endptr;
+
+ new_fd = strtol(str2, &endptr, 10);
+ if (!*str2 || new_fd < 0 || *endptr) {
+ memprintf(err, "file descriptor '%s' is not a valid integer in '%s'", str2, str);
+ goto out;
+ }
+
+ if (opts & PA_O_SOCKET_FD) {
+ socklen_t addr_len;
+ int type;
+
+ addr_len = sizeof(ss);
+ if (getsockname(new_fd, (struct sockaddr *)&ss, &addr_len) == -1) {
+ memprintf(err, "cannot use file descriptor '%d' : %s.", new_fd, strerror(errno));
+ goto out;
+ }
+
+ addr_len = sizeof(type);
+ if (getsockopt(new_fd, SOL_SOCKET, SO_TYPE, &type, &addr_len) != 0 ||
+ (type == SOCK_STREAM) != (proto_type == PROTO_TYPE_STREAM)) {
+ memprintf(err, "socket on file descriptor '%d' is of the wrong type.", new_fd);
+ goto out;
+ }
+
+ porta = portl = porth = get_host_port(&ss);
+ } else if (opts & PA_O_RAW_FD) {
+ ((struct sockaddr_in *)&ss)->sin_addr.s_addr = new_fd;
+ ((struct sockaddr_in *)&ss)->sin_port = 0;
+ } else {
+ memprintf(err, "a file descriptor is not acceptable here in '%s'", str);
+ goto out;
+ }
+ }
+ else if (ss.ss_family == AF_UNIX) {
+ struct sockaddr_un *un = (struct sockaddr_un *)&ss;
+ int prefix_path_len;
+ int max_path_len;
+ int adr_len;
+
+ /* complete unix socket path name during startup or soft-restart is
+ * <unix_bind_prefix><path>.<pid>.<bak|tmp>
+ */
+ prefix_path_len = (pfx && !abstract) ? strlen(pfx) : 0;
+ max_path_len = (sizeof(un->sun_path) - 1) -
+ (abstract ? 0 : prefix_path_len + 1 + 5 + 1 + 3);
+
+ adr_len = strlen(str2);
+ if (adr_len > max_path_len) {
+ memprintf(err, "socket path '%s' too long (max %d)", str, max_path_len);
+ goto out;
+ }
+
+ /* when abstract==1, we skip the first zero and copy all bytes except the trailing zero */
+ memset(un->sun_path, 0, sizeof(un->sun_path));
+ if (prefix_path_len)
+ memcpy(un->sun_path, pfx, prefix_path_len);
+ memcpy(un->sun_path + prefix_path_len + abstract, str2, adr_len + 1 - abstract);
+ }
+ else if (ss.ss_family == AF_CUST_RHTTP_SRV) {
+ /* Nothing to do here. */
+ }
+ else { /* IPv4 and IPv6 */
+ char *end = str2 + strlen(str2);
+ char *chr;
+
+ /* search for : or ] whatever comes first */
+ for (chr = end-1; chr > str2; chr--) {
+ if (*chr == ']' || *chr == ':')
+ break;
+ }
+
+ if (*chr == ':') {
+ /* Found a colon before a closing-bracket, must be a port separator.
+ * This guarantee backward compatibility.
+ */
+ if (!(opts & PA_O_PORT_OK)) {
+ memprintf(err, "port specification not permitted here in '%s'", str);
+ goto out;
+ }
+ *chr++ = '\0';
+ port1 = chr;
+ }
+ else {
+ /* Either no colon and no closing-bracket
+ * or directly ending with a closing-bracket.
+ * However, no port.
+ */
+ if (opts & PA_O_PORT_MAND) {
+ memprintf(err, "missing port specification in '%s'", str);
+ goto out;
+ }
+ port1 = "";
+ }
+
+ if (isdigit((unsigned char)*port1)) { /* single port or range */
+ char *endptr;
+
+ port2 = strchr(port1, '-');
+ if (port2) {
+ if (!(opts & PA_O_PORT_RANGE)) {
+ memprintf(err, "port range not permitted here in '%s'", str);
+ goto out;
+ }
+ *port2++ = '\0';
+ }
+ else
+ port2 = port1;
+ portl = strtol(port1, &endptr, 10);
+ if (*endptr != '\0') {
+ memprintf(err, "invalid character '%c' in port number '%s' in '%s'", *endptr, port1, str);
+ goto out;
+ }
+ porth = strtol(port2, &endptr, 10);
+ if (*endptr != '\0') {
+ memprintf(err, "invalid character '%c' in port number '%s' in '%s'", *endptr, port2, str);
+ goto out;
+ }
+
+ if (portl < !!(opts & PA_O_PORT_MAND) || portl > 65535) {
+ memprintf(err, "invalid port '%s'", port1);
+ goto out;
+ }
+
+ if (porth < !!(opts & PA_O_PORT_MAND) || porth > 65535) {
+ memprintf(err, "invalid port '%s'", port2);
+ goto out;
+ }
+
+ if (portl > porth) {
+ memprintf(err, "invalid port range '%d-%d'", portl, porth);
+ goto out;
+ }
+
+ porta = portl;
+ }
+ else if (*port1 == '-') { /* negative offset */
+ char *endptr;
+
+ if (!(opts & PA_O_PORT_OFS)) {
+ memprintf(err, "port offset not permitted here in '%s'", str);
+ goto out;
+ }
+ portl = strtol(port1 + 1, &endptr, 10);
+ if (*endptr != '\0') {
+ memprintf(err, "invalid character '%c' in port number '%s' in '%s'", *endptr, port1 + 1, str);
+ goto out;
+ }
+ porta = -portl;
+ }
+ else if (*port1 == '+') { /* positive offset */
+ char *endptr;
+
+ if (!(opts & PA_O_PORT_OFS)) {
+ memprintf(err, "port offset not permitted here in '%s'", str);
+ goto out;
+ }
+ porth = strtol(port1 + 1, &endptr, 10);
+ if (*endptr != '\0') {
+ memprintf(err, "invalid character '%c' in port number '%s' in '%s'", *endptr, port1 + 1, str);
+ goto out;
+ }
+ porta = porth;
+ }
+ else if (*port1) { /* other any unexpected char */
+ memprintf(err, "invalid character '%c' in port number '%s' in '%s'", *port1, port1, str);
+ goto out;
+ }
+ else if (opts & PA_O_PORT_MAND) {
+ memprintf(err, "missing port specification in '%s'", str);
+ goto out;
+ }
+
+ /* first try to parse the IP without resolving. If it fails, it
+ * tells us we need to keep a copy of the FQDN to resolve later
+ * and to enable DNS. In this case we can proceed if <fqdn> is
+ * set or if PA_O_RESOLVE is set, otherwise it's an error.
+ */
+ if (str2ip2(str2, &ss, 0) == NULL) {
+ if ((!(opts & PA_O_RESOLVE) && !fqdn) ||
+ ((opts & PA_O_RESOLVE) && str2ip2(str2, &ss, 1) == NULL)) {
+ memprintf(err, "invalid address: '%s' in '%s'", str2, str);
+ goto out;
+ }
+
+ if (fqdn) {
+ if (str2 != back)
+ memmove(back, str2, strlen(str2) + 1);
+ *fqdn = back;
+ back = NULL;
+ }
+ }
+ set_host_port(&ss, porta);
+ }
+
+ if (ctrl_type == SOCK_STREAM && !(opts & PA_O_STREAM)) {
+ memprintf(err, "stream-type address not acceptable in '%s'", str);
+ goto out;
+ }
+ else if (ctrl_type == SOCK_DGRAM && !(opts & PA_O_DGRAM)) {
+ memprintf(err, "dgram-type address not acceptable in '%s'", str);
+ goto out;
+ }
+
+ if (proto || (opts & PA_O_CONNECT)) {
+ /* Note: if the caller asks for a proto, we must find one,
+ * except if we inherit from a raw FD (family == AF_CUST_EXISTING_FD)
+ * orif we return with an fqdn that will resolve later,
+ * in which case the address is not known yet (this is only
+ * for servers actually).
+ */
+ new_proto = protocol_lookup(ss.ss_family,
+ proto_type,
+ ctrl_type == SOCK_DGRAM);
+
+ if (!new_proto && (!fqdn || !*fqdn) && (ss.ss_family != AF_CUST_EXISTING_FD)) {
+ memprintf(err, "unsupported %s protocol for %s family %d address '%s'%s",
+ (ctrl_type == SOCK_DGRAM) ? "datagram" : "stream",
+ (proto_type == PROTO_TYPE_DGRAM) ? "datagram" : "stream",
+ ss.ss_family,
+ str,
+#ifndef USE_QUIC
+ (ctrl_type == SOCK_STREAM && proto_type == PROTO_TYPE_DGRAM)
+ ? "; QUIC is not compiled in if this is what you were looking for."
+ : ""
+#else
+ ""
+#endif
+ );
+ goto out;
+ }
+
+ if ((opts & PA_O_CONNECT) && new_proto && !new_proto->connect) {
+ memprintf(err, "connect() not supported for this protocol family %d used by address '%s'", ss.ss_family, str);
+ goto out;
+ }
+ }
+
+ ret = &ss;
+ out:
+ if (port)
+ *port = porta;
+ if (low)
+ *low = portl;
+ if (high)
+ *high = porth;
+ if (fd)
+ *fd = new_fd;
+ if (proto)
+ *proto = new_proto;
+ if (sa_type) {
+ sa_type->proto_type = proto_type;
+ sa_type->xprt_type = (ctrl_type == SOCK_DGRAM) ? PROTO_TYPE_DGRAM : PROTO_TYPE_STREAM;
+ }
+ free(back);
+ return ret;
+}
+
+/* converts <addr> and <port> into a string representation of the address and port. This is sort
+ * of an inverse of str2sa_range, with some restrictions. The supported families are AF_INET,
+ * AF_INET6, AF_UNIX, and AF_CUST_SOCKPAIR. If the family is unsopported NULL is returned.
+ * If map_ports is true, then the sign of the port is included in the output, to indicate it is
+ * relative to the incoming port. AF_INET and AF_INET6 will be in the form "<addr>:<port>".
+ * AF_UNIX will either be just the path (if using a pathname) or "abns@<path>" if it is abstract.
+ * AF_CUST_SOCKPAIR will be of the form "sockpair@<fd>".
+ *
+ * The returned char* is allocated, and it is the responsibility of the caller to free it.
+ */
+char * sa2str(const struct sockaddr_storage *addr, int port, int map_ports)
+{
+ char buffer[INET6_ADDRSTRLEN];
+ char *out = NULL;
+ const void *ptr;
+ const char *path;
+
+ switch (addr->ss_family) {
+ case AF_INET:
+ ptr = &((struct sockaddr_in *)addr)->sin_addr;
+ break;
+ case AF_INET6:
+ ptr = &((struct sockaddr_in6 *)addr)->sin6_addr;
+ break;
+ case AF_UNIX:
+ path = ((struct sockaddr_un *)addr)->sun_path;
+ if (path[0] == '\0') {
+ const int max_length = sizeof(struct sockaddr_un) - offsetof(struct sockaddr_un, sun_path) - 1;
+ return memprintf(&out, "abns@%.*s", max_length, path+1);
+ } else {
+ return strdup(path);
+ }
+ case AF_CUST_SOCKPAIR:
+ return memprintf(&out, "sockpair@%d", ((struct sockaddr_in *)addr)->sin_addr.s_addr);
+ default:
+ return NULL;
+ }
+ if (inet_ntop(addr->ss_family, ptr, buffer, sizeof(buffer)) == NULL) {
+ BUG_ON(errno == ENOSPC);
+ return NULL;
+ }
+ if (map_ports)
+ return memprintf(&out, "%s:%+d", buffer, port);
+ else
+ return memprintf(&out, "%s:%d", buffer, port);
+}
+
+
+/* converts <str> to a struct in_addr containing a network mask. It can be
+ * passed in dotted form (255.255.255.0) or in CIDR form (24). It returns 1
+ * if the conversion succeeds otherwise zero.
+ */
+int str2mask(const char *str, struct in_addr *mask)
+{
+ if (strchr(str, '.') != NULL) { /* dotted notation */
+ if (!inet_pton(AF_INET, str, mask))
+ return 0;
+ }
+ else { /* mask length */
+ char *err;
+ unsigned long len = strtol(str, &err, 10);
+
+ if (!*str || (err && *err) || (unsigned)len > 32)
+ return 0;
+
+ len2mask4(len, mask);
+ }
+ return 1;
+}
+
+/* converts <str> to a struct in6_addr containing a network mask. It can be
+ * passed in quadruplet form (ffff:ffff::) or in CIDR form (64). It returns 1
+ * if the conversion succeeds otherwise zero.
+ */
+int str2mask6(const char *str, struct in6_addr *mask)
+{
+ if (strchr(str, ':') != NULL) { /* quadruplet notation */
+ if (!inet_pton(AF_INET6, str, mask))
+ return 0;
+ }
+ else { /* mask length */
+ char *err;
+ unsigned long len = strtol(str, &err, 10);
+
+ if (!*str || (err && *err) || (unsigned)len > 128)
+ return 0;
+
+ len2mask6(len, mask);
+ }
+ return 1;
+}
+
+/* convert <cidr> to struct in_addr <mask>. It returns 1 if the conversion
+ * succeeds otherwise zero.
+ */
+int cidr2dotted(int cidr, struct in_addr *mask) {
+
+ if (cidr < 0 || cidr > 32)
+ return 0;
+
+ mask->s_addr = cidr ? htonl(~0UL << (32 - cidr)) : 0;
+ return 1;
+}
+
+/* Convert mask from bit length form to in_addr form.
+ * This function never fails.
+ */
+void len2mask4(int len, struct in_addr *addr)
+{
+ if (len >= 32) {
+ addr->s_addr = 0xffffffff;
+ return;
+ }
+ if (len <= 0) {
+ addr->s_addr = 0x00000000;
+ return;
+ }
+ addr->s_addr = 0xffffffff << (32 - len);
+ addr->s_addr = htonl(addr->s_addr);
+}
+
+/* Convert mask from bit length form to in6_addr form.
+ * This function never fails.
+ */
+void len2mask6(int len, struct in6_addr *addr)
+{
+ len2mask4(len, (struct in_addr *)&addr->s6_addr[0]); /* msb */
+ len -= 32;
+ len2mask4(len, (struct in_addr *)&addr->s6_addr[4]);
+ len -= 32;
+ len2mask4(len, (struct in_addr *)&addr->s6_addr[8]);
+ len -= 32;
+ len2mask4(len, (struct in_addr *)&addr->s6_addr[12]); /* lsb */
+}
+
+/*
+ * converts <str> to two struct in_addr* which must be pre-allocated.
+ * The format is "addr[/mask]", where "addr" cannot be empty, and mask
+ * is optional and either in the dotted or CIDR notation.
+ * Note: "addr" can also be a hostname. Returns 1 if OK, 0 if error.
+ */
+int str2net(const char *str, int resolve, struct in_addr *addr, struct in_addr *mask)
+{
+ __label__ out_free, out_err;
+ char *c, *s;
+ int ret_val;
+
+ s = strdup(str);
+ if (!s)
+ return 0;
+
+ memset(mask, 0, sizeof(*mask));
+ memset(addr, 0, sizeof(*addr));
+
+ if ((c = strrchr(s, '/')) != NULL) {
+ *c++ = '\0';
+ /* c points to the mask */
+ if (!str2mask(c, mask))
+ goto out_err;
+ }
+ else {
+ mask->s_addr = ~0U;
+ }
+ if (!inet_pton(AF_INET, s, addr)) {
+ struct hostent *he;
+
+ if (!resolve)
+ goto out_err;
+
+ if ((he = gethostbyname(s)) == NULL) {
+ goto out_err;
+ }
+ else
+ *addr = *(struct in_addr *) *(he->h_addr_list);
+ }
+
+ ret_val = 1;
+ out_free:
+ free(s);
+ return ret_val;
+ out_err:
+ ret_val = 0;
+ goto out_free;
+}
+
+
+/*
+ * converts <str> to two struct in6_addr* which must be pre-allocated.
+ * The format is "addr[/mask]", where "addr" cannot be empty, and mask
+ * is an optional number of bits (128 being the default).
+ * Returns 1 if OK, 0 if error.
+ */
+int str62net(const char *str, struct in6_addr *addr, unsigned char *mask)
+{
+ char *c, *s;
+ int ret_val = 0;
+ char *err;
+ unsigned long len = 128;
+
+ s = strdup(str);
+ if (!s)
+ return 0;
+
+ memset(mask, 0, sizeof(*mask));
+ memset(addr, 0, sizeof(*addr));
+
+ if ((c = strrchr(s, '/')) != NULL) {
+ *c++ = '\0'; /* c points to the mask */
+ if (!*c)
+ goto out_free;
+
+ len = strtoul(c, &err, 10);
+ if ((err && *err) || (unsigned)len > 128)
+ goto out_free;
+ }
+ *mask = len; /* OK we have a valid mask in <len> */
+
+ if (!inet_pton(AF_INET6, s, addr))
+ goto out_free;
+
+ ret_val = 1;
+ out_free:
+ free(s);
+ return ret_val;
+}
+
+
+/*
+ * Parse IPv4 address found in url. Return the number of bytes parsed. It
+ * expects exactly 4 numbers between 0 and 255 delimited by dots, and returns
+ * zero in case of mismatch.
+ */
+int url2ipv4(const char *addr, struct in_addr *dst)
+{
+ int saw_digit, octets, ch;
+ u_char tmp[4], *tp;
+ const char *cp = addr;
+
+ saw_digit = 0;
+ octets = 0;
+ *(tp = tmp) = 0;
+
+ while (*addr) {
+ unsigned char digit = (ch = *addr) - '0';
+ if (digit > 9 && ch != '.')
+ break;
+ addr++;
+ if (digit <= 9) {
+ u_int new = *tp * 10 + digit;
+ if (new > 255)
+ return 0;
+ *tp = new;
+ if (!saw_digit) {
+ if (++octets > 4)
+ return 0;
+ saw_digit = 1;
+ }
+ } else if (ch == '.' && saw_digit) {
+ if (octets == 4)
+ return 0;
+ *++tp = 0;
+ saw_digit = 0;
+ } else
+ return 0;
+ }
+
+ if (octets < 4)
+ return 0;
+
+ memcpy(&dst->s_addr, tmp, 4);
+ return addr - cp;
+}
+
+/*
+ * Resolve destination server from URL. Convert <str> to a sockaddr_storage.
+ * <out> contain the code of the detected scheme, the start and length of
+ * the hostname. Actually only http and https are supported. <out> can be NULL.
+ * This function returns the consumed length. It is useful if you parse complete
+ * url like http://host:port/path, because the consumed length corresponds to
+ * the first character of the path. If the conversion fails, it returns -1.
+ *
+ * This function tries to resolve the DNS name if haproxy is in starting mode.
+ * So, this function may be used during the configuration parsing.
+ */
+int url2sa(const char *url, int ulen, struct sockaddr_storage *addr, struct split_url *out)
+{
+ const char *curr = url, *cp = url;
+ const char *end;
+ int ret, url_code = 0;
+ unsigned long long int http_code = 0;
+ int default_port;
+ struct hostent *he;
+ char *p;
+
+ /* Firstly, try to find :// pattern */
+ while (curr < url+ulen && url_code != 0x3a2f2f) {
+ url_code = ((url_code & 0xffff) << 8);
+ url_code += (unsigned char)*curr++;
+ }
+
+ /* Secondly, if :// pattern is found, verify parsed stuff
+ * before pattern is matching our http pattern.
+ * If so parse ip address and port in uri.
+ *
+ * WARNING: Current code doesn't support dynamic async dns resolver.
+ */
+ if (url_code != 0x3a2f2f)
+ return -1;
+
+ /* Copy scheme, and utrn to lower case. */
+ while (cp < curr - 3)
+ http_code = (http_code << 8) + *cp++;
+ http_code |= 0x2020202020202020ULL; /* Turn everything to lower case */
+
+ /* HTTP or HTTPS url matching */
+ if (http_code == 0x2020202068747470ULL) {
+ default_port = 80;
+ if (out)
+ out->scheme = SCH_HTTP;
+ }
+ else if (http_code == 0x2020206874747073ULL) {
+ default_port = 443;
+ if (out)
+ out->scheme = SCH_HTTPS;
+ }
+ else
+ return -1;
+
+ /* If the next char is '[', the host address is IPv6. */
+ if (*curr == '[') {
+ curr++;
+
+ /* Check trash size */
+ if (trash.size < ulen)
+ return -1;
+
+ /* Look for ']' and copy the address in a trash buffer. */
+ p = trash.area;
+ for (end = curr;
+ end < url + ulen && *end != ']';
+ end++, p++)
+ *p = *end;
+ if (*end != ']')
+ return -1;
+ *p = '\0';
+
+ /* Update out. */
+ if (out) {
+ out->host = curr;
+ out->host_len = end - curr;
+ }
+
+ /* Try IPv6 decoding. */
+ if (!inet_pton(AF_INET6, trash.area, &((struct sockaddr_in6 *)addr)->sin6_addr))
+ return -1;
+ end++;
+
+ /* Decode port. */
+ if (end < url + ulen && *end == ':') {
+ end++;
+ default_port = read_uint(&end, url + ulen);
+ }
+ ((struct sockaddr_in6 *)addr)->sin6_port = htons(default_port);
+ ((struct sockaddr_in6 *)addr)->sin6_family = AF_INET6;
+ return end - url;
+ }
+ else {
+ /* we need to copy the string into the trash because url2ipv4
+ * needs a \0 at the end of the string */
+ if (trash.size < ulen)
+ return -1;
+
+ memcpy(trash.area, curr, ulen - (curr - url));
+ trash.area[ulen - (curr - url)] = '\0';
+
+ /* We are looking for IP address. If you want to parse and
+ * resolve hostname found in url, you can use str2sa_range(), but
+ * be warned this can slow down global daemon performances
+ * while handling lagging dns responses.
+ */
+ ret = url2ipv4(trash.area, &((struct sockaddr_in *)addr)->sin_addr);
+ if (ret) {
+ /* Update out. */
+ if (out) {
+ out->host = curr;
+ out->host_len = ret;
+ }
+
+ curr += ret;
+
+ /* Decode port. */
+ if (curr < url + ulen && *curr == ':') {
+ curr++;
+ default_port = read_uint(&curr, url + ulen);
+ }
+ ((struct sockaddr_in *)addr)->sin_port = htons(default_port);
+
+ /* Set family. */
+ ((struct sockaddr_in *)addr)->sin_family = AF_INET;
+ return curr - url;
+ }
+ else if (global.mode & MODE_STARTING) {
+ /* The IPv4 and IPv6 decoding fails, maybe the url contain name. Try to execute
+ * synchronous DNS request only if HAProxy is in the start state.
+ */
+
+ /* look for : or / or end */
+ for (end = curr;
+ end < url + ulen && *end != '/' && *end != ':';
+ end++);
+ memcpy(trash.area, curr, end - curr);
+ trash.area[end - curr] = '\0';
+
+ /* try to resolve an IPv4/IPv6 hostname */
+ he = gethostbyname(trash.area);
+ if (!he)
+ return -1;
+
+ /* Update out. */
+ if (out) {
+ out->host = curr;
+ out->host_len = end - curr;
+ }
+
+ /* Decode port. */
+ if (end < url + ulen && *end == ':') {
+ end++;
+ default_port = read_uint(&end, url + ulen);
+ }
+
+ /* Copy IP address, set port and family. */
+ switch (he->h_addrtype) {
+ case AF_INET:
+ ((struct sockaddr_in *)addr)->sin_addr = *(struct in_addr *) *(he->h_addr_list);
+ ((struct sockaddr_in *)addr)->sin_port = htons(default_port);
+ ((struct sockaddr_in *)addr)->sin_family = AF_INET;
+ return end - url;
+
+ case AF_INET6:
+ ((struct sockaddr_in6 *)addr)->sin6_addr = *(struct in6_addr *) *(he->h_addr_list);
+ ((struct sockaddr_in6 *)addr)->sin6_port = htons(default_port);
+ ((struct sockaddr_in6 *)addr)->sin6_family = AF_INET6;
+ return end - url;
+ }
+ }
+ }
+ return -1;
+}
+
+/* Tries to convert a sockaddr_storage address to text form. Upon success, the
+ * address family is returned so that it's easy for the caller to adapt to the
+ * output format. Zero is returned if the address family is not supported. -1
+ * is returned upon error, with errno set. AF_INET, AF_INET6 and AF_UNIX are
+ * supported.
+ */
+int addr_to_str(const struct sockaddr_storage *addr, char *str, int size)
+{
+
+ const void *ptr;
+
+ if (size < 5)
+ return 0;
+ *str = '\0';
+
+ switch (addr->ss_family) {
+ case AF_INET:
+ ptr = &((struct sockaddr_in *)addr)->sin_addr;
+ break;
+ case AF_INET6:
+ ptr = &((struct sockaddr_in6 *)addr)->sin6_addr;
+ break;
+ case AF_UNIX:
+ memcpy(str, "unix", 5);
+ return addr->ss_family;
+ default:
+ return 0;
+ }
+
+ if (inet_ntop(addr->ss_family, ptr, str, size))
+ return addr->ss_family;
+
+ /* failed */
+ return -1;
+}
+
+/* Tries to convert a sockaddr_storage port to text form. Upon success, the
+ * address family is returned so that it's easy for the caller to adapt to the
+ * output format. Zero is returned if the address family is not supported. -1
+ * is returned upon error, with errno set. AF_INET, AF_INET6 and AF_UNIX are
+ * supported.
+ */
+int port_to_str(const struct sockaddr_storage *addr, char *str, int size)
+{
+
+ uint16_t port;
+
+
+ if (size < 6)
+ return 0;
+ *str = '\0';
+
+ switch (addr->ss_family) {
+ case AF_INET:
+ port = ((struct sockaddr_in *)addr)->sin_port;
+ break;
+ case AF_INET6:
+ port = ((struct sockaddr_in6 *)addr)->sin6_port;
+ break;
+ case AF_UNIX:
+ memcpy(str, "unix", 5);
+ return addr->ss_family;
+ default:
+ return 0;
+ }
+
+ snprintf(str, size, "%u", ntohs(port));
+ return addr->ss_family;
+}
+
+/* check if the given address is local to the system or not. It will return
+ * -1 when it's not possible to know, 0 when the address is not local, 1 when
+ * it is. We don't want to iterate over all interfaces for this (and it is not
+ * portable). So instead we try to bind in UDP to this address on a free non
+ * privileged port and to connect to the same address, port 0 (connect doesn't
+ * care). If it succeeds, we own the address. Note that non-inet addresses are
+ * considered local since they're most likely AF_UNIX.
+ */
+int addr_is_local(const struct netns_entry *ns,
+ const struct sockaddr_storage *orig)
+{
+ struct sockaddr_storage addr;
+ int result;
+ int fd;
+
+ if (!is_inet_addr(orig))
+ return 1;
+
+ memcpy(&addr, orig, sizeof(addr));
+ set_host_port(&addr, 0);
+
+ fd = my_socketat(ns, addr.ss_family, SOCK_DGRAM, IPPROTO_UDP);
+ if (fd < 0)
+ return -1;
+
+ result = -1;
+ if (bind(fd, (struct sockaddr *)&addr, get_addr_len(&addr)) == 0) {
+ if (connect(fd, (struct sockaddr *)&addr, get_addr_len(&addr)) == -1)
+ result = 0; // fail, non-local address
+ else
+ result = 1; // success, local address
+ }
+ else {
+ if (errno == EADDRNOTAVAIL)
+ result = 0; // definitely not local :-)
+ }
+ close(fd);
+
+ return result;
+}
+
+/* will try to encode the string <string> replacing all characters tagged in
+ * <map> with the hexadecimal representation of their ASCII-code (2 digits)
+ * prefixed by <escape>, and will store the result between <start> (included)
+ * and <stop> (excluded), and will always terminate the string with a '\0'
+ * before <stop>. The position of the '\0' is returned if the conversion
+ * completes. If bytes are missing between <start> and <stop>, then the
+ * conversion will be incomplete and truncated. If <stop> <= <start>, the '\0'
+ * cannot even be stored so we return <start> without writing the 0.
+ * The input string must also be zero-terminated.
+ */
+const char hextab[16] = "0123456789ABCDEF";
+char *encode_string(char *start, char *stop,
+ const char escape, const long *map,
+ const char *string)
+{
+ if (start < stop) {
+ stop--; /* reserve one byte for the final '\0' */
+ while (start < stop && *string != '\0') {
+ if (!ha_bit_test((unsigned char)(*string), map))
+ *start++ = *string;
+ else {
+ if (start + 3 >= stop)
+ break;
+ *start++ = escape;
+ *start++ = hextab[(*string >> 4) & 15];
+ *start++ = hextab[*string & 15];
+ }
+ string++;
+ }
+ *start = '\0';
+ }
+ return start;
+}
+
+/*
+ * Same behavior as encode_string() above, except that it encodes chunk
+ * <chunk> instead of a string.
+ */
+char *encode_chunk(char *start, char *stop,
+ const char escape, const long *map,
+ const struct buffer *chunk)
+{
+ char *str = chunk->area;
+ char *end = chunk->area + chunk->data;
+
+ if (start < stop) {
+ stop--; /* reserve one byte for the final '\0' */
+ while (start < stop && str < end) {
+ if (!ha_bit_test((unsigned char)(*str), map))
+ *start++ = *str;
+ else {
+ if (start + 3 >= stop)
+ break;
+ *start++ = escape;
+ *start++ = hextab[(*str >> 4) & 15];
+ *start++ = hextab[*str & 15];
+ }
+ str++;
+ }
+ *start = '\0';
+ }
+ return start;
+}
+
+/*
+ * Tries to prefix characters tagged in the <map> with the <escape>
+ * character. The input <string> is processed until string_stop
+ * is reached or NULL-byte is encountered. The result will
+ * be stored between <start> (included) and <stop> (excluded). This
+ * function will always try to terminate the resulting string with a '\0'
+ * before <stop>, and will return its position if the conversion
+ * completes.
+ */
+char *escape_string(char *start, char *stop,
+ const char escape, const long *map,
+ const char *string, const char *string_stop)
+{
+ if (start < stop) {
+ stop--; /* reserve one byte for the final '\0' */
+ while (start < stop && string < string_stop && *string != '\0') {
+ if (!ha_bit_test((unsigned char)(*string), map))
+ *start++ = *string;
+ else {
+ if (start + 2 >= stop)
+ break;
+ *start++ = escape;
+ *start++ = *string;
+ }
+ string++;
+ }
+ *start = '\0';
+ }
+ return start;
+}
+
+/* Check a string for using it in a CSV output format. If the string contains
+ * one of the following four char <">, <,>, CR or LF, the string is
+ * encapsulated between <"> and the <"> are escaped by a <""> sequence.
+ * <str> is the input string to be escaped. The function assumes that
+ * the input string is null-terminated.
+ *
+ * If <quote> is 0, the result is returned escaped but without double quote.
+ * It is useful if the escaped string is used between double quotes in the
+ * format.
+ *
+ * printf("..., \"%s\", ...\r\n", csv_enc(str, 0, 0, &trash));
+ *
+ * If <quote> is 1, the converter puts the quotes only if any reserved character
+ * is present. If <quote> is 2, the converter always puts the quotes.
+ *
+ * If <oneline> is not 0, CRs are skipped and LFs are replaced by spaces.
+ * This re-format multi-lines strings to only one line. The purpose is to
+ * allow a line by line parsing but also to keep the output compliant with
+ * the CLI witch uses LF to defines the end of the response.
+ *
+ * If <oneline> is 2, In addition to previous action, the trailing spaces are
+ * removed.
+ *
+ * <output> is a struct buffer used for storing the output string.
+ *
+ * The function returns the converted string on its output. If an error
+ * occurs, the function returns an empty string. This type of output is useful
+ * for using the function directly as printf() argument.
+ *
+ * If the output buffer is too short to contain the input string, the result
+ * is truncated.
+ *
+ * This function appends the encoding to the existing output chunk, and it
+ * guarantees that it starts immediately at the first available character of
+ * the chunk. Please use csv_enc() instead if you want to replace the output
+ * chunk.
+ */
+const char *csv_enc_append(const char *str, int quote, int oneline, struct buffer *output)
+{
+ char *end = output->area + output->size;
+ char *out = output->area + output->data;
+ char *ptr = out;
+
+ if (quote == 1) {
+ /* automatic quoting: first verify if we'll have to quote the string */
+ if (!strpbrk(str, "\n\r,\""))
+ quote = 0;
+ }
+
+ if (quote)
+ *ptr++ = '"';
+
+ while (*str && ptr < end - 2) { /* -2 for reserving space for <"> and \0. */
+ if (oneline) {
+ if (*str == '\n' ) {
+ /* replace LF by a space */
+ *ptr++ = ' ';
+ str++;
+ continue;
+ }
+ else if (*str == '\r' ) {
+ /* skip CR */
+ str++;
+ continue;
+ }
+ }
+ *ptr = *str;
+ if (*str == '"') {
+ ptr++;
+ if (ptr >= end - 2) {
+ ptr--;
+ break;
+ }
+ *ptr = '"';
+ }
+ ptr++;
+ str++;
+ }
+
+ if (oneline == 2) {
+ /* remove trailing spaces */
+ while (ptr > out && *(ptr - 1) == ' ')
+ ptr--;
+ }
+
+ if (quote)
+ *ptr++ = '"';
+
+ *ptr = '\0';
+ output->data = ptr - output->area;
+ return out;
+}
+
+/* Decode an URL-encoded string in-place. The resulting string might
+ * be shorter. If some forbidden characters are found, the conversion is
+ * aborted, the string is truncated before the issue and a negative value is
+ * returned, otherwise the operation returns the length of the decoded string.
+ * If the 'in_form' argument is non-nul the string is assumed to be part of
+ * an "application/x-www-form-urlencoded" encoded string, and the '+' will be
+ * turned to a space. If it's zero, this will only be done after a question
+ * mark ('?').
+ */
+int url_decode(char *string, int in_form)
+{
+ char *in, *out;
+ int ret = -1;
+
+ in = string;
+ out = string;
+ while (*in) {
+ switch (*in) {
+ case '+' :
+ *out++ = in_form ? ' ' : *in;
+ break;
+ case '%' :
+ if (!ishex(in[1]) || !ishex(in[2]))
+ goto end;
+ *out++ = (hex2i(in[1]) << 4) + hex2i(in[2]);
+ in += 2;
+ break;
+ case '?':
+ in_form = 1;
+ __fallthrough;
+ default:
+ *out++ = *in;
+ break;
+ }
+ in++;
+ }
+ ret = out - string; /* success */
+ end:
+ *out = 0;
+ return ret;
+}
+
+unsigned int str2ui(const char *s)
+{
+ return __str2ui(s);
+}
+
+unsigned int str2uic(const char *s)
+{
+ return __str2uic(s);
+}
+
+unsigned int strl2ui(const char *s, int len)
+{
+ return __strl2ui(s, len);
+}
+
+unsigned int strl2uic(const char *s, int len)
+{
+ return __strl2uic(s, len);
+}
+
+unsigned int read_uint(const char **s, const char *end)
+{
+ return __read_uint(s, end);
+}
+
+/* This function reads an unsigned integer from the string pointed to by <s> and
+ * returns it. The <s> pointer is adjusted to point to the first unread char. The
+ * function automatically stops at <end>. If the number overflows, the 2^64-1
+ * value is returned.
+ */
+unsigned long long int read_uint64(const char **s, const char *end)
+{
+ const char *ptr = *s;
+ unsigned long long int i = 0, tmp;
+ unsigned int j;
+
+ while (ptr < end) {
+
+ /* read next char */
+ j = *ptr - '0';
+ if (j > 9)
+ goto read_uint64_end;
+
+ /* add char to the number and check overflow. */
+ tmp = i * 10;
+ if (tmp / 10 != i) {
+ i = ULLONG_MAX;
+ goto read_uint64_eat;
+ }
+ if (ULLONG_MAX - tmp < j) {
+ i = ULLONG_MAX;
+ goto read_uint64_eat;
+ }
+ i = tmp + j;
+ ptr++;
+ }
+read_uint64_eat:
+ /* eat each numeric char */
+ while (ptr < end) {
+ if ((unsigned int)(*ptr - '0') > 9)
+ break;
+ ptr++;
+ }
+read_uint64_end:
+ *s = ptr;
+ return i;
+}
+
+/* This function reads an integer from the string pointed to by <s> and returns
+ * it. The <s> pointer is adjusted to point to the first unread char. The function
+ * automatically stops at <end>. Il the number is bigger than 2^63-2, the 2^63-1
+ * value is returned. If the number is lowest than -2^63-1, the -2^63 value is
+ * returned.
+ */
+long long int read_int64(const char **s, const char *end)
+{
+ unsigned long long int i = 0;
+ int neg = 0;
+
+ /* Look for minus char. */
+ if (**s == '-') {
+ neg = 1;
+ (*s)++;
+ }
+ else if (**s == '+')
+ (*s)++;
+
+ /* convert as positive number. */
+ i = read_uint64(s, end);
+
+ if (neg) {
+ if (i > 0x8000000000000000ULL)
+ return LLONG_MIN;
+ return -i;
+ }
+ if (i > 0x7fffffffffffffffULL)
+ return LLONG_MAX;
+ return i;
+}
+
+/* This one is 7 times faster than strtol() on athlon with checks.
+ * It returns the value of the number composed of all valid digits read,
+ * and can process negative numbers too.
+ */
+int strl2ic(const char *s, int len)
+{
+ int i = 0;
+ int j, k;
+
+ if (len > 0) {
+ if (*s != '-') {
+ /* positive number */
+ while (len-- > 0) {
+ j = (*s++) - '0';
+ k = i * 10;
+ if (j > 9)
+ break;
+ i = k + j;
+ }
+ } else {
+ /* negative number */
+ s++;
+ while (--len > 0) {
+ j = (*s++) - '0';
+ k = i * 10;
+ if (j > 9)
+ break;
+ i = k - j;
+ }
+ }
+ }
+ return i;
+}
+
+
+/* This function reads exactly <len> chars from <s> and converts them to a
+ * signed integer which it stores into <ret>. It accurately detects any error
+ * (truncated string, invalid chars, overflows). It is meant to be used in
+ * applications designed for hostile environments. It returns zero when the
+ * number has successfully been converted, non-zero otherwise. When an error
+ * is returned, the <ret> value is left untouched. It is yet 5 to 40 times
+ * faster than strtol().
+ */
+int strl2irc(const char *s, int len, int *ret)
+{
+ int i = 0;
+ int j;
+
+ if (!len)
+ return 1;
+
+ if (*s != '-') {
+ /* positive number */
+ while (len-- > 0) {
+ j = (*s++) - '0';
+ if (j > 9) return 1; /* invalid char */
+ if (i > INT_MAX / 10) return 1; /* check for multiply overflow */
+ i = i * 10;
+ if (i + j < i) return 1; /* check for addition overflow */
+ i = i + j;
+ }
+ } else {
+ /* negative number */
+ s++;
+ while (--len > 0) {
+ j = (*s++) - '0';
+ if (j > 9) return 1; /* invalid char */
+ if (i < INT_MIN / 10) return 1; /* check for multiply overflow */
+ i = i * 10;
+ if (i - j > i) return 1; /* check for subtract overflow */
+ i = i - j;
+ }
+ }
+ *ret = i;
+ return 0;
+}
+
+
+/* This function reads exactly <len> chars from <s> and converts them to a
+ * signed integer which it stores into <ret>. It accurately detects any error
+ * (truncated string, invalid chars, overflows). It is meant to be used in
+ * applications designed for hostile environments. It returns zero when the
+ * number has successfully been converted, non-zero otherwise. When an error
+ * is returned, the <ret> value is left untouched. It is about 3 times slower
+ * than strl2irc().
+ */
+
+int strl2llrc(const char *s, int len, long long *ret)
+{
+ long long i = 0;
+ int j;
+
+ if (!len)
+ return 1;
+
+ if (*s != '-') {
+ /* positive number */
+ while (len-- > 0) {
+ j = (*s++) - '0';
+ if (j > 9) return 1; /* invalid char */
+ if (i > LLONG_MAX / 10LL) return 1; /* check for multiply overflow */
+ i = i * 10LL;
+ if (i + j < i) return 1; /* check for addition overflow */
+ i = i + j;
+ }
+ } else {
+ /* negative number */
+ s++;
+ while (--len > 0) {
+ j = (*s++) - '0';
+ if (j > 9) return 1; /* invalid char */
+ if (i < LLONG_MIN / 10LL) return 1; /* check for multiply overflow */
+ i = i * 10LL;
+ if (i - j > i) return 1; /* check for subtract overflow */
+ i = i - j;
+ }
+ }
+ *ret = i;
+ return 0;
+}
+
+/* This function is used with pat_parse_dotted_ver(). It converts a string
+ * composed by two number separated by a dot. Each part must contain in 16 bits
+ * because internally they will be represented as a 32-bit quantity stored in
+ * a 64-bit integer. It returns zero when the number has successfully been
+ * converted, non-zero otherwise. When an error is returned, the <ret> value
+ * is left untouched.
+ *
+ * "1.3" -> 0x0000000000010003
+ * "65535.65535" -> 0x00000000ffffffff
+ */
+int strl2llrc_dotted(const char *text, int len, long long *ret)
+{
+ const char *end = &text[len];
+ const char *p;
+ long long major, minor;
+
+ /* Look for dot. */
+ for (p = text; p < end; p++)
+ if (*p == '.')
+ break;
+
+ /* Convert major. */
+ if (strl2llrc(text, p - text, &major) != 0)
+ return 1;
+
+ /* Check major. */
+ if (major >= 65536)
+ return 1;
+
+ /* Convert minor. */
+ minor = 0;
+ if (p < end)
+ if (strl2llrc(p + 1, end - (p + 1), &minor) != 0)
+ return 1;
+
+ /* Check minor. */
+ if (minor >= 65536)
+ return 1;
+
+ /* Compose value. */
+ *ret = (major << 16) | (minor & 0xffff);
+ return 0;
+}
+
+/* This function parses a time value optionally followed by a unit suffix among
+ * "d", "h", "m", "s", "ms" or "us". It converts the value into the unit
+ * expected by the caller. The computation does its best to avoid overflows.
+ * The value is returned in <ret> if everything is fine, and a NULL is returned
+ * by the function. In case of error, a pointer to the error is returned and
+ * <ret> is left untouched. Values are automatically rounded up when needed.
+ * Values resulting in values larger than or equal to 2^31 after conversion are
+ * reported as an overflow as value PARSE_TIME_OVER. Non-null values resulting
+ * in an underflow are reported as an underflow as value PARSE_TIME_UNDER.
+ */
+const char *parse_time_err(const char *text, unsigned *ret, unsigned unit_flags)
+{
+ unsigned long long imult, idiv;
+ unsigned long long omult, odiv;
+ unsigned long long value, result;
+ const char *str = text;
+
+ if (!isdigit((unsigned char)*text))
+ return text;
+
+ omult = odiv = 1;
+
+ switch (unit_flags & TIME_UNIT_MASK) {
+ case TIME_UNIT_US: omult = 1000000; break;
+ case TIME_UNIT_MS: omult = 1000; break;
+ case TIME_UNIT_S: break;
+ case TIME_UNIT_MIN: odiv = 60; break;
+ case TIME_UNIT_HOUR: odiv = 3600; break;
+ case TIME_UNIT_DAY: odiv = 86400; break;
+ default: break;
+ }
+
+ value = 0;
+
+ while (1) {
+ unsigned int j;
+
+ j = *text - '0';
+ if (j > 9)
+ break;
+ text++;
+ value *= 10;
+ value += j;
+ }
+
+ imult = idiv = 1;
+ switch (*text) {
+ case '\0': /* no unit = default unit */
+ imult = omult = idiv = odiv = 1;
+ goto end;
+ case 's': /* second = unscaled unit */
+ break;
+ case 'u': /* microsecond : "us" */
+ if (text[1] == 's') {
+ idiv = 1000000;
+ text++;
+ break;
+ }
+ return text;
+ case 'm': /* millisecond : "ms" or minute: "m" */
+ if (text[1] == 's') {
+ idiv = 1000;
+ text++;
+ } else
+ imult = 60;
+ break;
+ case 'h': /* hour : "h" */
+ imult = 3600;
+ break;
+ case 'd': /* day : "d" */
+ imult = 86400;
+ break;
+ default:
+ return text;
+ }
+ if (*(++text) != '\0') {
+ ha_warning("unexpected character '%c' after the timer value '%s', only "
+ "(us=microseconds,ms=milliseconds,s=seconds,m=minutes,h=hours,d=days) are supported."
+ " This will be reported as an error in next versions.\n", *text, str);
+ }
+
+ end:
+ if (omult % idiv == 0) { omult /= idiv; idiv = 1; }
+ if (idiv % omult == 0) { idiv /= omult; omult = 1; }
+ if (imult % odiv == 0) { imult /= odiv; odiv = 1; }
+ if (odiv % imult == 0) { odiv /= imult; imult = 1; }
+
+ result = (value * (imult * omult) + (idiv * odiv - 1)) / (idiv * odiv);
+ if (result >= 0x80000000)
+ return PARSE_TIME_OVER;
+ if (!result && value)
+ return PARSE_TIME_UNDER;
+ *ret = result;
+ return NULL;
+}
+
+/* this function converts the string starting at <text> to an unsigned int
+ * stored in <ret>. If an error is detected, the pointer to the unexpected
+ * character is returned. If the conversion is successful, NULL is returned.
+ */
+const char *parse_size_err(const char *text, unsigned *ret) {
+ unsigned value = 0;
+
+ if (!isdigit((unsigned char)*text))
+ return text;
+
+ while (1) {
+ unsigned int j;
+
+ j = *text - '0';
+ if (j > 9)
+ break;
+ if (value > ~0U / 10)
+ return text;
+ value *= 10;
+ if (value > (value + j))
+ return text;
+ value += j;
+ text++;
+ }
+
+ switch (*text) {
+ case '\0':
+ break;
+ case 'K':
+ case 'k':
+ if (value > ~0U >> 10)
+ return text;
+ value = value << 10;
+ break;
+ case 'M':
+ case 'm':
+ if (value > ~0U >> 20)
+ return text;
+ value = value << 20;
+ break;
+ case 'G':
+ case 'g':
+ if (value > ~0U >> 30)
+ return text;
+ value = value << 30;
+ break;
+ default:
+ return text;
+ }
+
+ if (*text != '\0' && *++text != '\0')
+ return text;
+
+ *ret = value;
+ return NULL;
+}
+
+/*
+ * Parse binary string written in hexadecimal (source) and store the decoded
+ * result into binstr and set binstrlen to the length of binstr. Memory for
+ * binstr is allocated by the function. In case of error, returns 0 with an
+ * error message in err. In success case, it returns the consumed length.
+ */
+int parse_binary(const char *source, char **binstr, int *binstrlen, char **err)
+{
+ int len;
+ const char *p = source;
+ int i,j;
+ int alloc;
+
+ len = strlen(source);
+ if (len % 2) {
+ memprintf(err, "an even number of hex digit is expected");
+ return 0;
+ }
+
+ len = len >> 1;
+
+ if (!*binstr) {
+ *binstr = calloc(len, sizeof(**binstr));
+ if (!*binstr) {
+ memprintf(err, "out of memory while loading string pattern");
+ return 0;
+ }
+ alloc = 1;
+ }
+ else {
+ if (*binstrlen < len) {
+ memprintf(err, "no space available in the buffer. expect %d, provides %d",
+ len, *binstrlen);
+ return 0;
+ }
+ alloc = 0;
+ }
+ *binstrlen = len;
+
+ i = j = 0;
+ while (j < len) {
+ if (!ishex(p[i++]))
+ goto bad_input;
+ if (!ishex(p[i++]))
+ goto bad_input;
+ (*binstr)[j++] = (hex2i(p[i-2]) << 4) + hex2i(p[i-1]);
+ }
+ return len << 1;
+
+bad_input:
+ memprintf(err, "an hex digit is expected (found '%c')", p[i-1]);
+ if (alloc)
+ ha_free(binstr);
+ return 0;
+}
+
+/* copies at most <n> characters from <src> and always terminates with '\0' */
+char *my_strndup(const char *src, int n)
+{
+ int len = 0;
+ char *ret;
+
+ while (len < n && src[len])
+ len++;
+
+ ret = malloc(len + 1);
+ if (!ret)
+ return ret;
+ memcpy(ret, src, len);
+ ret[len] = '\0';
+ return ret;
+}
+
+/*
+ * search needle in haystack
+ * returns the pointer if found, returns NULL otherwise
+ */
+const void *my_memmem(const void *haystack, size_t haystacklen, const void *needle, size_t needlelen)
+{
+ const void *c = NULL;
+ unsigned char f;
+
+ if ((haystack == NULL) || (needle == NULL) || (haystacklen < needlelen))
+ return NULL;
+
+ f = *(char *)needle;
+ c = haystack;
+ while ((c = memchr(c, f, haystacklen - (c - haystack))) != NULL) {
+ if ((haystacklen - (c - haystack)) < needlelen)
+ return NULL;
+
+ if (memcmp(c, needle, needlelen) == 0)
+ return c;
+ ++c;
+ }
+ return NULL;
+}
+
+/* get length of the initial segment consisting entirely of bytes in <accept> */
+size_t my_memspn(const void *str, size_t len, const void *accept, size_t acceptlen)
+{
+ size_t ret = 0;
+
+ while (ret < len && memchr(accept, *((int *)str), acceptlen)) {
+ str++;
+ ret++;
+ }
+ return ret;
+}
+
+/* get length of the initial segment consisting entirely of bytes not in <rejcet> */
+size_t my_memcspn(const void *str, size_t len, const void *reject, size_t rejectlen)
+{
+ size_t ret = 0;
+
+ while (ret < len) {
+ if(memchr(reject, *((int *)str), rejectlen))
+ return ret;
+ str++;
+ ret++;
+ }
+ return ret;
+}
+
+/* This function returns the first unused key greater than or equal to <key> in
+ * ID tree <root>. Zero is returned if no place is found.
+ */
+unsigned int get_next_id(struct eb_root *root, unsigned int key)
+{
+ struct eb32_node *used;
+
+ do {
+ used = eb32_lookup_ge(root, key);
+ if (!used || used->key > key)
+ return key; /* key is available */
+ key++;
+ } while (key);
+ return key;
+}
+
+/* dump the full tree to <file> in DOT format for debugging purposes. Will
+ * optionally highlight node <subj> if found, depending on operation <op> :
+ * 0 : nothing
+ * >0 : insertion, node/leaf are surrounded in red
+ * <0 : removal, node/leaf are dashed with no background
+ * Will optionally add "desc" as a label on the graph if set and non-null.
+ */
+void eb32sc_to_file(FILE *file, struct eb_root *root, const struct eb32sc_node *subj, int op, const char *desc)
+{
+ struct eb32sc_node *node;
+ unsigned long scope = -1;
+
+ fprintf(file, "digraph ebtree {\n");
+
+ if (desc && *desc) {
+ fprintf(file,
+ " fontname=\"fixed\";\n"
+ " fontsize=8;\n"
+ " label=\"%s\";\n", desc);
+ }
+
+ fprintf(file,
+ " node [fontname=\"fixed\" fontsize=8 shape=\"box\" style=\"filled\" color=\"black\" fillcolor=\"white\"];\n"
+ " edge [fontname=\"fixed\" fontsize=8 style=\"solid\" color=\"magenta\" dir=\"forward\"];\n"
+ " \"%lx_n\" [label=\"root\\n%lx\"]\n", (long)eb_root_to_node(root), (long)root
+ );
+
+ fprintf(file, " \"%lx_n\" -> \"%lx_%c\" [taillabel=\"L\"];\n",
+ (long)eb_root_to_node(root),
+ (long)eb_root_to_node(eb_clrtag(root->b[0])),
+ eb_gettag(root->b[0]) == EB_LEAF ? 'l' : 'n');
+
+ node = eb32sc_first(root, scope);
+ while (node) {
+ if (node->node.node_p) {
+ /* node part is used */
+ fprintf(file, " \"%lx_n\" [label=\"%lx\\nkey=%u\\nscope=%lx\\nbit=%d\" fillcolor=\"lightskyblue1\" %s];\n",
+ (long)node, (long)node, node->key, node->node_s, node->node.bit,
+ (node == subj) ? (op < 0 ? "color=\"red\" style=\"dashed\"" : op > 0 ? "color=\"red\"" : "") : "");
+
+ fprintf(file, " \"%lx_n\" -> \"%lx_n\" [taillabel=\"%c\"];\n",
+ (long)node,
+ (long)eb_root_to_node(eb_clrtag(node->node.node_p)),
+ eb_gettag(node->node.node_p) ? 'R' : 'L');
+
+ fprintf(file, " \"%lx_n\" -> \"%lx_%c\" [taillabel=\"L\"];\n",
+ (long)node,
+ (long)eb_root_to_node(eb_clrtag(node->node.branches.b[0])),
+ eb_gettag(node->node.branches.b[0]) == EB_LEAF ? 'l' : 'n');
+
+ fprintf(file, " \"%lx_n\" -> \"%lx_%c\" [taillabel=\"R\"];\n",
+ (long)node,
+ (long)eb_root_to_node(eb_clrtag(node->node.branches.b[1])),
+ eb_gettag(node->node.branches.b[1]) == EB_LEAF ? 'l' : 'n');
+ }
+
+ fprintf(file, " \"%lx_l\" [label=\"%lx\\nkey=%u\\nscope=%lx\\npfx=%u\" fillcolor=\"yellow\" %s];\n",
+ (long)node, (long)node, node->key, node->leaf_s, node->node.pfx,
+ (node == subj) ? (op < 0 ? "color=\"red\" style=\"dashed\"" : op > 0 ? "color=\"red\"" : "") : "");
+
+ fprintf(file, " \"%lx_l\" -> \"%lx_n\" [taillabel=\"%c\"];\n",
+ (long)node,
+ (long)eb_root_to_node(eb_clrtag(node->node.leaf_p)),
+ eb_gettag(node->node.leaf_p) ? 'R' : 'L');
+ node = eb32sc_next(node, scope);
+ }
+ fprintf(file, "}\n");
+}
+
+/* dump the full tree to <file> in DOT format for debugging purposes. Will
+ * optionally highlight node <subj> if found, depending on operation <op> :
+ * 0 : nothing
+ * >0 : insertion, node/leaf are surrounded in red
+ * <0 : removal, node/leaf are dashed with no background
+ * Will optionally add "desc" as a label on the graph if set and non-null. The
+ * key is printed as a u32 hex value. A full-sized hex dump would be better but
+ * is left to be implemented.
+ */
+void ebmb_to_file(FILE *file, struct eb_root *root, const struct ebmb_node *subj, int op, const char *desc)
+{
+ struct ebmb_node *node;
+
+ fprintf(file, "digraph ebtree {\n");
+
+ if (desc && *desc) {
+ fprintf(file,
+ " fontname=\"fixed\";\n"
+ " fontsize=8;\n"
+ " label=\"%s\";\n", desc);
+ }
+
+ fprintf(file,
+ " node [fontname=\"fixed\" fontsize=8 shape=\"box\" style=\"filled\" color=\"black\" fillcolor=\"white\"];\n"
+ " edge [fontname=\"fixed\" fontsize=8 style=\"solid\" color=\"magenta\" dir=\"forward\"];\n"
+ " \"%lx_n\" [label=\"root\\n%lx\"]\n", (long)eb_root_to_node(root), (long)root
+ );
+
+ fprintf(file, " \"%lx_n\" -> \"%lx_%c\" [taillabel=\"L\"];\n",
+ (long)eb_root_to_node(root),
+ (long)eb_root_to_node(eb_clrtag(root->b[0])),
+ eb_gettag(root->b[0]) == EB_LEAF ? 'l' : 'n');
+
+ node = ebmb_first(root);
+ while (node) {
+ if (node->node.node_p) {
+ /* node part is used */
+ fprintf(file, " \"%lx_n\" [label=\"%lx\\nkey=%#x\\nbit=%d\" fillcolor=\"lightskyblue1\" %s];\n",
+ (long)node, (long)node, read_u32(node->key), node->node.bit,
+ (node == subj) ? (op < 0 ? "color=\"red\" style=\"dashed\"" : op > 0 ? "color=\"red\"" : "") : "");
+
+ fprintf(file, " \"%lx_n\" -> \"%lx_n\" [taillabel=\"%c\"];\n",
+ (long)node,
+ (long)eb_root_to_node(eb_clrtag(node->node.node_p)),
+ eb_gettag(node->node.node_p) ? 'R' : 'L');
+
+ fprintf(file, " \"%lx_n\" -> \"%lx_%c\" [taillabel=\"L\"];\n",
+ (long)node,
+ (long)eb_root_to_node(eb_clrtag(node->node.branches.b[0])),
+ eb_gettag(node->node.branches.b[0]) == EB_LEAF ? 'l' : 'n');
+
+ fprintf(file, " \"%lx_n\" -> \"%lx_%c\" [taillabel=\"R\"];\n",
+ (long)node,
+ (long)eb_root_to_node(eb_clrtag(node->node.branches.b[1])),
+ eb_gettag(node->node.branches.b[1]) == EB_LEAF ? 'l' : 'n');
+ }
+
+ fprintf(file, " \"%lx_l\" [label=\"%lx\\nkey=%#x\\npfx=%u\" fillcolor=\"yellow\" %s];\n",
+ (long)node, (long)node, read_u32(node->key), node->node.pfx,
+ (node == subj) ? (op < 0 ? "color=\"red\" style=\"dashed\"" : op > 0 ? "color=\"red\"" : "") : "");
+
+ fprintf(file, " \"%lx_l\" -> \"%lx_n\" [taillabel=\"%c\"];\n",
+ (long)node,
+ (long)eb_root_to_node(eb_clrtag(node->node.leaf_p)),
+ eb_gettag(node->node.leaf_p) ? 'R' : 'L');
+ node = ebmb_next(node);
+ }
+ fprintf(file, "}\n");
+}
+
+/* This function compares a sample word possibly followed by blanks to another
+ * clean word. The compare is case-insensitive. 1 is returned if both are equal,
+ * otherwise zero. This intends to be used when checking HTTP headers for some
+ * values. Note that it validates a word followed only by blanks but does not
+ * validate a word followed by blanks then other chars.
+ */
+int word_match(const char *sample, int slen, const char *word, int wlen)
+{
+ if (slen < wlen)
+ return 0;
+
+ while (wlen) {
+ char c = *sample ^ *word;
+ if (c && c != ('A' ^ 'a'))
+ return 0;
+ sample++;
+ word++;
+ slen--;
+ wlen--;
+ }
+
+ while (slen) {
+ if (*sample != ' ' && *sample != '\t')
+ return 0;
+ sample++;
+ slen--;
+ }
+ return 1;
+}
+
+/* Converts any text-formatted IPv4 address to a host-order IPv4 address. It
+ * is particularly fast because it avoids expensive operations such as
+ * multiplies, which are optimized away at the end. It requires a properly
+ * formatted address though (3 points).
+ */
+unsigned int inetaddr_host(const char *text)
+{
+ const unsigned int ascii_zero = ('0' << 24) | ('0' << 16) | ('0' << 8) | '0';
+ register unsigned int dig100, dig10, dig1;
+ int s;
+ const char *p, *d;
+
+ dig1 = dig10 = dig100 = ascii_zero;
+ s = 24;
+
+ p = text;
+ while (1) {
+ if (((unsigned)(*p - '0')) <= 9) {
+ p++;
+ continue;
+ }
+
+ /* here, we have a complete byte between <text> and <p> (exclusive) */
+ if (p == text)
+ goto end;
+
+ d = p - 1;
+ dig1 |= (unsigned int)(*d << s);
+ if (d == text)
+ goto end;
+
+ d--;
+ dig10 |= (unsigned int)(*d << s);
+ if (d == text)
+ goto end;
+
+ d--;
+ dig100 |= (unsigned int)(*d << s);
+ end:
+ if (!s || *p != '.')
+ break;
+
+ s -= 8;
+ text = ++p;
+ }
+
+ dig100 -= ascii_zero;
+ dig10 -= ascii_zero;
+ dig1 -= ascii_zero;
+ return ((dig100 * 10) + dig10) * 10 + dig1;
+}
+
+/*
+ * Idem except the first unparsed character has to be passed in <stop>.
+ */
+unsigned int inetaddr_host_lim(const char *text, const char *stop)
+{
+ const unsigned int ascii_zero = ('0' << 24) | ('0' << 16) | ('0' << 8) | '0';
+ register unsigned int dig100, dig10, dig1;
+ int s;
+ const char *p, *d;
+
+ dig1 = dig10 = dig100 = ascii_zero;
+ s = 24;
+
+ p = text;
+ while (1) {
+ if (((unsigned)(*p - '0')) <= 9 && p < stop) {
+ p++;
+ continue;
+ }
+
+ /* here, we have a complete byte between <text> and <p> (exclusive) */
+ if (p == text)
+ goto end;
+
+ d = p - 1;
+ dig1 |= (unsigned int)(*d << s);
+ if (d == text)
+ goto end;
+
+ d--;
+ dig10 |= (unsigned int)(*d << s);
+ if (d == text)
+ goto end;
+
+ d--;
+ dig100 |= (unsigned int)(*d << s);
+ end:
+ if (!s || p == stop || *p != '.')
+ break;
+
+ s -= 8;
+ text = ++p;
+ }
+
+ dig100 -= ascii_zero;
+ dig10 -= ascii_zero;
+ dig1 -= ascii_zero;
+ return ((dig100 * 10) + dig10) * 10 + dig1;
+}
+
+/*
+ * Idem except the pointer to first unparsed byte is returned into <ret> which
+ * must not be NULL.
+ */
+unsigned int inetaddr_host_lim_ret(char *text, char *stop, char **ret)
+{
+ const unsigned int ascii_zero = ('0' << 24) | ('0' << 16) | ('0' << 8) | '0';
+ register unsigned int dig100, dig10, dig1;
+ int s;
+ char *p, *d;
+
+ dig1 = dig10 = dig100 = ascii_zero;
+ s = 24;
+
+ p = text;
+ while (1) {
+ if (((unsigned)(*p - '0')) <= 9 && p < stop) {
+ p++;
+ continue;
+ }
+
+ /* here, we have a complete byte between <text> and <p> (exclusive) */
+ if (p == text)
+ goto end;
+
+ d = p - 1;
+ dig1 |= (unsigned int)(*d << s);
+ if (d == text)
+ goto end;
+
+ d--;
+ dig10 |= (unsigned int)(*d << s);
+ if (d == text)
+ goto end;
+
+ d--;
+ dig100 |= (unsigned int)(*d << s);
+ end:
+ if (!s || p == stop || *p != '.')
+ break;
+
+ s -= 8;
+ text = ++p;
+ }
+
+ *ret = p;
+ dig100 -= ascii_zero;
+ dig10 -= ascii_zero;
+ dig1 -= ascii_zero;
+ return ((dig100 * 10) + dig10) * 10 + dig1;
+}
+
+/* Convert a fixed-length string to an IP address. Returns 0 in case of error,
+ * or the number of chars read in case of success. Maybe this could be replaced
+ * by one of the functions above. Also, apparently this function does not support
+ * hosts above 255 and requires exactly 4 octets.
+ * The destination is only modified on success.
+ */
+int buf2ip(const char *buf, size_t len, struct in_addr *dst)
+{
+ const char *addr;
+ int saw_digit, octets, ch;
+ u_char tmp[4], *tp;
+ const char *cp = buf;
+
+ saw_digit = 0;
+ octets = 0;
+ *(tp = tmp) = 0;
+
+ for (addr = buf; addr - buf < len; addr++) {
+ unsigned char digit = (ch = *addr) - '0';
+
+ if (digit > 9 && ch != '.')
+ break;
+
+ if (digit <= 9) {
+ u_int new = *tp * 10 + digit;
+
+ if (new > 255)
+ return 0;
+
+ *tp = new;
+
+ if (!saw_digit) {
+ if (++octets > 4)
+ return 0;
+ saw_digit = 1;
+ }
+ } else if (ch == '.' && saw_digit) {
+ if (octets == 4)
+ return 0;
+
+ *++tp = 0;
+ saw_digit = 0;
+ } else
+ return 0;
+ }
+
+ if (octets < 4)
+ return 0;
+
+ memcpy(&dst->s_addr, tmp, 4);
+ return addr - cp;
+}
+
+/* This function converts the string in <buf> of the len <len> to
+ * struct in6_addr <dst> which must be allocated by the caller.
+ * This function returns 1 in success case, otherwise zero.
+ * The destination is only modified on success.
+ */
+int buf2ip6(const char *buf, size_t len, struct in6_addr *dst)
+{
+ char null_term_ip6[INET6_ADDRSTRLEN + 1];
+ struct in6_addr out;
+
+ if (len > INET6_ADDRSTRLEN)
+ return 0;
+
+ memcpy(null_term_ip6, buf, len);
+ null_term_ip6[len] = '\0';
+
+ if (!inet_pton(AF_INET6, null_term_ip6, &out))
+ return 0;
+
+ *dst = out;
+ return 1;
+}
+
+/* To be used to quote config arg positions. Returns the short string at <ptr>
+ * surrounded by simple quotes if <ptr> is valid and non-empty, or "end of line"
+ * if ptr is NULL or empty. The string is locally allocated.
+ */
+const char *quote_arg(const char *ptr)
+{
+ static THREAD_LOCAL char val[32];
+ int i;
+
+ if (!ptr || !*ptr)
+ return "end of line";
+ val[0] = '\'';
+ for (i = 1; i < sizeof(val) - 2 && *ptr; i++)
+ val[i] = *ptr++;
+ val[i++] = '\'';
+ val[i] = '\0';
+ return val;
+}
+
+/* returns an operator among STD_OP_* for string <str> or < 0 if unknown */
+int get_std_op(const char *str)
+{
+ int ret = -1;
+
+ if (*str == 'e' && str[1] == 'q')
+ ret = STD_OP_EQ;
+ else if (*str == 'n' && str[1] == 'e')
+ ret = STD_OP_NE;
+ else if (*str == 'l') {
+ if (str[1] == 'e') ret = STD_OP_LE;
+ else if (str[1] == 't') ret = STD_OP_LT;
+ }
+ else if (*str == 'g') {
+ if (str[1] == 'e') ret = STD_OP_GE;
+ else if (str[1] == 't') ret = STD_OP_GT;
+ }
+
+ if (ret == -1 || str[2] != '\0')
+ return -1;
+ return ret;
+}
+
+/* hash a 32-bit integer to another 32-bit integer */
+unsigned int full_hash(unsigned int a)
+{
+ return __full_hash(a);
+}
+
+/* Return the bit position in mask <m> of the nth bit set of rank <r>, between
+ * 0 and LONGBITS-1 included, starting from the left. For example ranks 0,1,2,3
+ * for mask 0x55 will be 6, 4, 2 and 0 respectively. This algorithm is based on
+ * a popcount variant and is described here :
+ * https://graphics.stanford.edu/~seander/bithacks.html
+ */
+unsigned int mask_find_rank_bit(unsigned int r, unsigned long m)
+{
+ unsigned long a, b, c, d;
+ unsigned int s;
+ unsigned int t;
+
+ a = m - ((m >> 1) & ~0UL/3);
+ b = (a & ~0UL/5) + ((a >> 2) & ~0UL/5);
+ c = (b + (b >> 4)) & ~0UL/0x11;
+ d = (c + (c >> 8)) & ~0UL/0x101;
+
+ r++; // make r be 1..64
+
+ t = 0;
+ s = LONGBITS;
+ if (s > 32) {
+ unsigned long d2 = (d >> 16) >> 16;
+ t = d2 + (d2 >> 16);
+ s -= ((t - r) & 256) >> 3; r -= (t & ((t - r) >> 8));
+ }
+
+ t = (d >> (s - 16)) & 0xff;
+ s -= ((t - r) & 256) >> 4; r -= (t & ((t - r) >> 8));
+ t = (c >> (s - 8)) & 0xf;
+ s -= ((t - r) & 256) >> 5; r -= (t & ((t - r) >> 8));
+ t = (b >> (s - 4)) & 0x7;
+ s -= ((t - r) & 256) >> 6; r -= (t & ((t - r) >> 8));
+ t = (a >> (s - 2)) & 0x3;
+ s -= ((t - r) & 256) >> 7; r -= (t & ((t - r) >> 8));
+ t = (m >> (s - 1)) & 0x1;
+ s -= ((t - r) & 256) >> 8;
+
+ return s - 1;
+}
+
+/* Same as mask_find_rank_bit() above but makes use of pre-computed bitmaps
+ * based on <m>, in <a..d>. These ones must be updated whenever <m> changes
+ * using mask_prep_rank_map() below.
+ */
+unsigned int mask_find_rank_bit_fast(unsigned int r, unsigned long m,
+ unsigned long a, unsigned long b,
+ unsigned long c, unsigned long d)
+{
+ unsigned int s;
+ unsigned int t;
+
+ r++; // make r be 1..64
+
+ t = 0;
+ s = LONGBITS;
+ if (s > 32) {
+ unsigned long d2 = (d >> 16) >> 16;
+ t = d2 + (d2 >> 16);
+ s -= ((t - r) & 256) >> 3; r -= (t & ((t - r) >> 8));
+ }
+
+ t = (d >> (s - 16)) & 0xff;
+ s -= ((t - r) & 256) >> 4; r -= (t & ((t - r) >> 8));
+ t = (c >> (s - 8)) & 0xf;
+ s -= ((t - r) & 256) >> 5; r -= (t & ((t - r) >> 8));
+ t = (b >> (s - 4)) & 0x7;
+ s -= ((t - r) & 256) >> 6; r -= (t & ((t - r) >> 8));
+ t = (a >> (s - 2)) & 0x3;
+ s -= ((t - r) & 256) >> 7; r -= (t & ((t - r) >> 8));
+ t = (m >> (s - 1)) & 0x1;
+ s -= ((t - r) & 256) >> 8;
+
+ return s - 1;
+}
+
+/* Prepare the bitmaps used by the fast implementation of the find_rank_bit()
+ * above.
+ */
+void mask_prep_rank_map(unsigned long m,
+ unsigned long *a, unsigned long *b,
+ unsigned long *c, unsigned long *d)
+{
+ *a = m - ((m >> 1) & ~0UL/3);
+ *b = (*a & ~0UL/5) + ((*a >> 2) & ~0UL/5);
+ *c = (*b + (*b >> 4)) & ~0UL/0x11;
+ *d = (*c + (*c >> 8)) & ~0UL/0x101;
+}
+
+/* Returns the position of one bit set in <v>, starting at position <bit>, and
+ * searching in other halves if not found. This is intended to be used to
+ * report the position of one bit set among several based on a counter or a
+ * random generator while preserving a relatively good distribution so that
+ * values made of holes in the middle do not see one of the bits around the
+ * hole being returned much more often than the other one. It can be seen as a
+ * disturbed ffsl() where the initial search starts at bit <bit>. The look up
+ * is performed in O(logN) time for N bit words, yielding a bit among 64 in
+ * about 16 cycles. Its usage differs from the rank find function in that the
+ * bit passed doesn't need to be limited to the value's popcount, making the
+ * function easier to use for random picking, and twice as fast. Passing value
+ * 0 for <v> makes no sense and -1 is returned in this case.
+ */
+int one_among_mask(unsigned long v, int bit)
+{
+ /* note, these masks may be produced by ~0UL/((1UL<<scale)+1) but
+ * that's more expensive.
+ */
+ static const unsigned long halves[] = {
+ (unsigned long)0x5555555555555555ULL,
+ (unsigned long)0x3333333333333333ULL,
+ (unsigned long)0x0F0F0F0F0F0F0F0FULL,
+ (unsigned long)0x00FF00FF00FF00FFULL,
+ (unsigned long)0x0000FFFF0000FFFFULL,
+ (unsigned long)0x00000000FFFFFFFFULL
+ };
+ unsigned long halfword = ~0UL;
+ int scope = 0;
+ int mirror;
+ int scale;
+
+ if (!v)
+ return -1;
+
+ /* we check if the exact bit is set or if it's present in a mirror
+ * position based on the current scale we're checking, in which case
+ * it's returned with its current (or mirrored) value. Otherwise we'll
+ * make sure there's at least one bit in the half we're in, and will
+ * scale down to a smaller scope and try again, until we find the
+ * closest bit.
+ */
+ for (scale = (sizeof(long) > 4) ? 5 : 4; scale >= 0; scale--) {
+ halfword >>= (1UL << scale);
+ scope |= (1UL << scale);
+ mirror = bit ^ (1UL << scale);
+ if (v & ((1UL << bit) | (1UL << mirror)))
+ return (v & (1UL << bit)) ? bit : mirror;
+
+ if (!((v >> (bit & scope)) & halves[scale] & halfword))
+ bit = mirror;
+ }
+ return bit;
+}
+
+/* Return non-zero if IPv4 address is part of the network,
+ * otherwise zero. Note that <addr> may not necessarily be aligned
+ * while the two other ones must.
+ */
+int in_net_ipv4(const void *addr, const struct in_addr *mask, const struct in_addr *net)
+{
+ struct in_addr addr_copy;
+
+ memcpy(&addr_copy, addr, sizeof(addr_copy));
+ return((addr_copy.s_addr & mask->s_addr) == (net->s_addr & mask->s_addr));
+}
+
+/* Return non-zero if IPv6 address is part of the network,
+ * otherwise zero. Note that <addr> may not necessarily be aligned
+ * while the two other ones must.
+ */
+int in_net_ipv6(const void *addr, const struct in6_addr *mask, const struct in6_addr *net)
+{
+ int i;
+ struct in6_addr addr_copy;
+
+ memcpy(&addr_copy, addr, sizeof(addr_copy));
+ for (i = 0; i < sizeof(struct in6_addr) / sizeof(int); i++)
+ if (((((int *)&addr_copy)[i] & ((int *)mask)[i])) !=
+ (((int *)net)[i] & ((int *)mask)[i]))
+ return 0;
+ return 1;
+}
+
+/* Map IPv4 address on IPv6 address, as specified in RFC4291
+ * "IPv4-Mapped IPv6 Address" (using the :ffff: prefix)
+ *
+ * Input and output may overlap.
+ */
+void v4tov6(struct in6_addr *sin6_addr, struct in_addr *sin_addr)
+{
+ uint32_t ip4_addr;
+
+ ip4_addr = sin_addr->s_addr;
+ memset(&sin6_addr->s6_addr, 0, 10);
+ write_u16(&sin6_addr->s6_addr[10], htons(0xFFFF));
+ write_u32(&sin6_addr->s6_addr[12], ip4_addr);
+}
+
+/* Try to convert IPv6 address to IPv4 address thanks to the
+ * following mapping methods:
+ * - RFC4291 IPv4-Mapped IPv6 Address (preferred method)
+ * -> ::ffff:ip:v4
+ * - RFC4291 IPv4-Compatible IPv6 Address (deprecated, RFC3513 legacy for
+ * "IPv6 Addresses with Embedded IPv4 Addresses)
+ * -> ::0000:ip:v4
+ * - 6to4 (defined in RFC3056 proposal, seems deprecated nowadays)
+ * -> 2002:ip:v4::
+ * Return true if conversion is possible and false otherwise.
+ */
+int v6tov4(struct in_addr *sin_addr, struct in6_addr *sin6_addr)
+{
+ if (read_u64(&sin6_addr->s6_addr[0]) == 0 &&
+ (read_u32(&sin6_addr->s6_addr[8]) == htonl(0xFFFF) ||
+ read_u32(&sin6_addr->s6_addr[8]) == 0)) {
+ // RFC4291 ipv4 mapped or compatible ipv6 address
+ sin_addr->s_addr = read_u32(&sin6_addr->s6_addr[12]);
+ } else if (read_u16(&sin6_addr->s6_addr[0]) == htons(0x2002)) {
+ // RFC3056 6to4 address
+ sin_addr->s_addr = htonl((ntohs(read_u16(&sin6_addr->s6_addr[2])) << 16) +
+ ntohs(read_u16(&sin6_addr->s6_addr[4])));
+ }
+ else
+ return 0; /* unrecognized input */
+ return 1; /* mapping completed */
+}
+
+/* compare two struct sockaddr_storage, including port if <check_port> is true,
+ * and return:
+ * 0 (true) if the addr is the same in both
+ * 1 (false) if the addr is not the same in both
+ * -1 (unable) if one of the addr is not AF_INET*
+ */
+int ipcmp(const struct sockaddr_storage *ss1, const struct sockaddr_storage *ss2, int check_port)
+{
+ if ((ss1->ss_family != AF_INET) && (ss1->ss_family != AF_INET6))
+ return -1;
+
+ if ((ss2->ss_family != AF_INET) && (ss2->ss_family != AF_INET6))
+ return -1;
+
+ if (ss1->ss_family != ss2->ss_family)
+ return 1;
+
+ switch (ss1->ss_family) {
+ case AF_INET:
+ return (memcmp(&((struct sockaddr_in *)ss1)->sin_addr,
+ &((struct sockaddr_in *)ss2)->sin_addr,
+ sizeof(struct in_addr)) != 0) ||
+ (check_port && get_net_port(ss1) != get_net_port(ss2));
+ case AF_INET6:
+ return (memcmp(&((struct sockaddr_in6 *)ss1)->sin6_addr,
+ &((struct sockaddr_in6 *)ss2)->sin6_addr,
+ sizeof(struct in6_addr)) != 0) ||
+ (check_port && get_net_port(ss1) != get_net_port(ss2));
+ }
+
+ return 1;
+}
+
+/* compare a struct sockaddr_storage to a struct net_addr and return :
+ * 0 (true) if <addr> is matching <net>
+ * 1 (false) if <addr> is not matching <net>
+ * -1 (unable) if <addr> or <net> is not AF_INET*
+ */
+int ipcmp2net(const struct sockaddr_storage *addr, const struct net_addr *net)
+{
+ if ((addr->ss_family != AF_INET) && (addr->ss_family != AF_INET6))
+ return -1;
+
+ if ((net->family != AF_INET) && (net->family != AF_INET6))
+ return -1;
+
+ if (addr->ss_family != net->family)
+ return 1;
+
+ if (addr->ss_family == AF_INET &&
+ (((struct sockaddr_in *)addr)->sin_addr.s_addr & net->addr.v4.mask.s_addr) == net->addr.v4.ip.s_addr)
+ return 0;
+ else {
+ const struct in6_addr *addr6 = &(((const struct sockaddr_in6*)addr)->sin6_addr);
+ const struct in6_addr *nip6 = &net->addr.v6.ip;
+ const struct in6_addr *nmask6 = &net->addr.v6.mask;
+
+ if ((read_u32(&addr6->s6_addr[0]) & read_u32(&nmask6->s6_addr[0])) == read_u32(&nip6->s6_addr[0]) &&
+ (read_u32(&addr6->s6_addr[4]) & read_u32(&nmask6->s6_addr[4])) == read_u32(&nip6->s6_addr[4]) &&
+ (read_u32(&addr6->s6_addr[8]) & read_u32(&nmask6->s6_addr[8])) == read_u32(&nip6->s6_addr[8]) &&
+ (read_u32(&addr6->s6_addr[12]) & read_u32(&nmask6->s6_addr[12])) == read_u32(&nip6->s6_addr[12]))
+ return 0;
+ }
+
+ return 1;
+}
+
+/* copy IP address from <source> into <dest>
+ * The caller must allocate and clear <dest> before calling.
+ * The source must be in either AF_INET or AF_INET6 family, or the destination
+ * address will be undefined. If the destination address used to hold a port,
+ * it is preserved, so that this function can be used to switch to another
+ * address family with no risk. Returns a pointer to the destination.
+ */
+struct sockaddr_storage *ipcpy(const struct sockaddr_storage *source, struct sockaddr_storage *dest)
+{
+ int prev_port;
+
+ prev_port = get_net_port(dest);
+ memset(dest, 0, sizeof(*dest));
+ dest->ss_family = source->ss_family;
+
+ /* copy new addr and apply it */
+ switch (source->ss_family) {
+ case AF_INET:
+ ((struct sockaddr_in *)dest)->sin_addr.s_addr = ((struct sockaddr_in *)source)->sin_addr.s_addr;
+ ((struct sockaddr_in *)dest)->sin_port = prev_port;
+ break;
+ case AF_INET6:
+ memcpy(((struct sockaddr_in6 *)dest)->sin6_addr.s6_addr, ((struct sockaddr_in6 *)source)->sin6_addr.s6_addr, sizeof(struct in6_addr));
+ ((struct sockaddr_in6 *)dest)->sin6_port = prev_port;
+ break;
+ }
+
+ return dest;
+}
+
+char *human_time(int t, short hz_div) {
+ static char rv[sizeof("24855d23h")+1]; // longest of "23h59m" and "59m59s"
+ char *p = rv;
+ char *end = rv + sizeof(rv);
+ int cnt=2; // print two numbers
+
+ if (unlikely(t < 0 || hz_div <= 0)) {
+ snprintf(p, end - p, "?");
+ return rv;
+ }
+
+ if (unlikely(hz_div > 1))
+ t /= hz_div;
+
+ if (t >= DAY) {
+ p += snprintf(p, end - p, "%dd", t / DAY);
+ cnt--;
+ }
+
+ if (cnt && t % DAY / HOUR) {
+ p += snprintf(p, end - p, "%dh", t % DAY / HOUR);
+ cnt--;
+ }
+
+ if (cnt && t % HOUR / MINUTE) {
+ p += snprintf(p, end - p, "%dm", t % HOUR / MINUTE);
+ cnt--;
+ }
+
+ if ((cnt && t % MINUTE) || !t) // also display '0s'
+ p += snprintf(p, end - p, "%ds", t % MINUTE / SEC);
+
+ return rv;
+}
+
+const char *monthname[12] = {
+ "Jan", "Feb", "Mar", "Apr", "May", "Jun",
+ "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"
+};
+
+/* date2str_log: write a date in the format :
+ * sprintf(str, "%02d/%s/%04d:%02d:%02d:%02d.%03d",
+ * tm.tm_mday, monthname[tm.tm_mon], tm.tm_year+1900,
+ * tm.tm_hour, tm.tm_min, tm.tm_sec, (int)date.tv_usec/1000);
+ *
+ * without using sprintf. return a pointer to the last char written (\0) or
+ * NULL if there isn't enough space.
+ */
+char *date2str_log(char *dst, const struct tm *tm, const struct timeval *date, size_t size)
+{
+
+ if (size < 25) /* the size is fixed: 24 chars + \0 */
+ return NULL;
+
+ dst = utoa_pad((unsigned int)tm->tm_mday, dst, 3); // day
+ if (!dst)
+ return NULL;
+ *dst++ = '/';
+
+ memcpy(dst, monthname[tm->tm_mon], 3); // month
+ dst += 3;
+ *dst++ = '/';
+
+ dst = utoa_pad((unsigned int)tm->tm_year+1900, dst, 5); // year
+ if (!dst)
+ return NULL;
+ *dst++ = ':';
+
+ dst = utoa_pad((unsigned int)tm->tm_hour, dst, 3); // hour
+ if (!dst)
+ return NULL;
+ *dst++ = ':';
+
+ dst = utoa_pad((unsigned int)tm->tm_min, dst, 3); // minutes
+ if (!dst)
+ return NULL;
+ *dst++ = ':';
+
+ dst = utoa_pad((unsigned int)tm->tm_sec, dst, 3); // secondes
+ if (!dst)
+ return NULL;
+ *dst++ = '.';
+
+ dst = utoa_pad((unsigned int)(date->tv_usec/1000)%1000, dst, 4); // milliseconds
+ if (!dst)
+ return NULL;
+ *dst = '\0';
+
+ return dst;
+}
+
+/* Base year used to compute leap years */
+#define TM_YEAR_BASE 1900
+
+/* Return the difference in seconds between two times (leap seconds are ignored).
+ * Retrieved from glibc 2.18 source code.
+ */
+static int my_tm_diff(const struct tm *a, const struct tm *b)
+{
+ /* Compute intervening leap days correctly even if year is negative.
+ * Take care to avoid int overflow in leap day calculations,
+ * but it's OK to assume that A and B are close to each other.
+ */
+ int a4 = (a->tm_year >> 2) + (TM_YEAR_BASE >> 2) - ! (a->tm_year & 3);
+ int b4 = (b->tm_year >> 2) + (TM_YEAR_BASE >> 2) - ! (b->tm_year & 3);
+ int a100 = a4 / 25 - (a4 % 25 < 0);
+ int b100 = b4 / 25 - (b4 % 25 < 0);
+ int a400 = a100 >> 2;
+ int b400 = b100 >> 2;
+ int intervening_leap_days = (a4 - b4) - (a100 - b100) + (a400 - b400);
+ int years = a->tm_year - b->tm_year;
+ int days = (365 * years + intervening_leap_days
+ + (a->tm_yday - b->tm_yday));
+ return (60 * (60 * (24 * days + (a->tm_hour - b->tm_hour))
+ + (a->tm_min - b->tm_min))
+ + (a->tm_sec - b->tm_sec));
+}
+
+/* Return the GMT offset for a specific local time.
+ * Both t and tm must represent the same time.
+ * The string returned has the same format as returned by strftime(... "%z", tm).
+ * Offsets are kept in an internal cache for better performances.
+ */
+const char *get_gmt_offset(time_t t, struct tm *tm)
+{
+ /* Cache offsets from GMT (depending on whether DST is active or not) */
+ static THREAD_LOCAL char gmt_offsets[2][5+1] = { "", "" };
+
+ char *gmt_offset;
+ struct tm tm_gmt;
+ int diff;
+ int isdst = tm->tm_isdst;
+
+ /* Pretend DST not active if its status is unknown */
+ if (isdst < 0)
+ isdst = 0;
+
+ /* Fetch the offset and initialize it if needed */
+ gmt_offset = gmt_offsets[isdst & 0x01];
+ if (unlikely(!*gmt_offset)) {
+ get_gmtime(t, &tm_gmt);
+ diff = my_tm_diff(tm, &tm_gmt);
+ if (diff < 0) {
+ diff = -diff;
+ *gmt_offset = '-';
+ } else {
+ *gmt_offset = '+';
+ }
+ diff %= 86400U;
+ diff /= 60; /* Convert to minutes */
+ snprintf(gmt_offset+1, 4+1, "%02d%02d", diff/60, diff%60);
+ }
+
+ return gmt_offset;
+}
+
+/* gmt2str_log: write a date in the format :
+ * "%02d/%s/%04d:%02d:%02d:%02d +0000" without using snprintf
+ * return a pointer to the last char written (\0) or
+ * NULL if there isn't enough space.
+ */
+char *gmt2str_log(char *dst, struct tm *tm, size_t size)
+{
+ if (size < 27) /* the size is fixed: 26 chars + \0 */
+ return NULL;
+
+ dst = utoa_pad((unsigned int)tm->tm_mday, dst, 3); // day
+ if (!dst)
+ return NULL;
+ *dst++ = '/';
+
+ memcpy(dst, monthname[tm->tm_mon], 3); // month
+ dst += 3;
+ *dst++ = '/';
+
+ dst = utoa_pad((unsigned int)tm->tm_year+1900, dst, 5); // year
+ if (!dst)
+ return NULL;
+ *dst++ = ':';
+
+ dst = utoa_pad((unsigned int)tm->tm_hour, dst, 3); // hour
+ if (!dst)
+ return NULL;
+ *dst++ = ':';
+
+ dst = utoa_pad((unsigned int)tm->tm_min, dst, 3); // minutes
+ if (!dst)
+ return NULL;
+ *dst++ = ':';
+
+ dst = utoa_pad((unsigned int)tm->tm_sec, dst, 3); // secondes
+ if (!dst)
+ return NULL;
+ *dst++ = ' ';
+ *dst++ = '+';
+ *dst++ = '0';
+ *dst++ = '0';
+ *dst++ = '0';
+ *dst++ = '0';
+ *dst = '\0';
+
+ return dst;
+}
+
+/* localdate2str_log: write a date in the format :
+ * "%02d/%s/%04d:%02d:%02d:%02d +0000(local timezone)" without using snprintf
+ * Both t and tm must represent the same time.
+ * return a pointer to the last char written (\0) or
+ * NULL if there isn't enough space.
+ */
+char *localdate2str_log(char *dst, time_t t, struct tm *tm, size_t size)
+{
+ const char *gmt_offset;
+ if (size < 27) /* the size is fixed: 26 chars + \0 */
+ return NULL;
+
+ gmt_offset = get_gmt_offset(t, tm);
+
+ dst = utoa_pad((unsigned int)tm->tm_mday, dst, 3); // day
+ if (!dst)
+ return NULL;
+ *dst++ = '/';
+
+ memcpy(dst, monthname[tm->tm_mon], 3); // month
+ dst += 3;
+ *dst++ = '/';
+
+ dst = utoa_pad((unsigned int)tm->tm_year+1900, dst, 5); // year
+ if (!dst)
+ return NULL;
+ *dst++ = ':';
+
+ dst = utoa_pad((unsigned int)tm->tm_hour, dst, 3); // hour
+ if (!dst)
+ return NULL;
+ *dst++ = ':';
+
+ dst = utoa_pad((unsigned int)tm->tm_min, dst, 3); // minutes
+ if (!dst)
+ return NULL;
+ *dst++ = ':';
+
+ dst = utoa_pad((unsigned int)tm->tm_sec, dst, 3); // secondes
+ if (!dst)
+ return NULL;
+ *dst++ = ' ';
+
+ memcpy(dst, gmt_offset, 5); // Offset from local time to GMT
+ dst += 5;
+ *dst = '\0';
+
+ return dst;
+}
+
+/* Returns the number of seconds since 01/01/1970 0:0:0 GMT for GMT date <tm>.
+ * It is meant as a portable replacement for timegm() for use with valid inputs.
+ * Returns undefined results for invalid dates (eg: months out of range 0..11).
+ */
+time_t my_timegm(const struct tm *tm)
+{
+ /* Each month has 28, 29, 30 or 31 days, or 28+N. The date in the year
+ * is thus (current month - 1)*28 + cumulated_N[month] to count the
+ * sum of the extra N days for elapsed months. The sum of all these N
+ * days doesn't exceed 30 for a complete year (366-12*28) so it fits
+ * in a 5-bit word. This means that with 60 bits we can represent a
+ * matrix of all these values at once, which is fast and efficient to
+ * access. The extra February day for leap years is not counted here.
+ *
+ * Jan : none = 0 (0)
+ * Feb : Jan = 3 (3)
+ * Mar : Jan..Feb = 3 (3 + 0)
+ * Apr : Jan..Mar = 6 (3 + 0 + 3)
+ * May : Jan..Apr = 8 (3 + 0 + 3 + 2)
+ * Jun : Jan..May = 11 (3 + 0 + 3 + 2 + 3)
+ * Jul : Jan..Jun = 13 (3 + 0 + 3 + 2 + 3 + 2)
+ * Aug : Jan..Jul = 16 (3 + 0 + 3 + 2 + 3 + 2 + 3)
+ * Sep : Jan..Aug = 19 (3 + 0 + 3 + 2 + 3 + 2 + 3 + 3)
+ * Oct : Jan..Sep = 21 (3 + 0 + 3 + 2 + 3 + 2 + 3 + 3 + 2)
+ * Nov : Jan..Oct = 24 (3 + 0 + 3 + 2 + 3 + 2 + 3 + 3 + 2 + 3)
+ * Dec : Jan..Nov = 26 (3 + 0 + 3 + 2 + 3 + 2 + 3 + 3 + 2 + 3 + 2)
+ */
+ uint64_t extra =
+ ( 0ULL << 0*5) + ( 3ULL << 1*5) + ( 3ULL << 2*5) + /* Jan, Feb, Mar, */
+ ( 6ULL << 3*5) + ( 8ULL << 4*5) + (11ULL << 5*5) + /* Apr, May, Jun, */
+ (13ULL << 6*5) + (16ULL << 7*5) + (19ULL << 8*5) + /* Jul, Aug, Sep, */
+ (21ULL << 9*5) + (24ULL << 10*5) + (26ULL << 11*5); /* Oct, Nov, Dec, */
+
+ unsigned int y = tm->tm_year + 1900;
+ unsigned int m = tm->tm_mon;
+ unsigned long days = 0;
+
+ /* days since 1/1/1970 for full years */
+ days += days_since_zero(y) - days_since_zero(1970);
+
+ /* days for full months in the current year */
+ days += 28 * m + ((extra >> (m * 5)) & 0x1f);
+
+ /* count + 1 after March for leap years. A leap year is a year multiple
+ * of 4, unless it's multiple of 100 without being multiple of 400. 2000
+ * is leap, 1900 isn't, 1904 is.
+ */
+ if ((m > 1) && !(y & 3) && ((y % 100) || !(y % 400)))
+ days++;
+
+ days += tm->tm_mday - 1;
+ return days * 86400ULL + tm->tm_hour * 3600 + tm->tm_min * 60 + tm->tm_sec;
+}
+
+/* This function check a char. It returns true and updates
+ * <date> and <len> pointer to the new position if the
+ * character is found.
+ */
+static inline int parse_expect_char(const char **date, int *len, char c)
+{
+ if (*len < 1 || **date != c)
+ return 0;
+ (*len)--;
+ (*date)++;
+ return 1;
+}
+
+/* This function expects a string <str> of len <l>. It return true and updates.
+ * <date> and <len> if the string matches, otherwise, it returns false.
+ */
+static inline int parse_strcmp(const char **date, int *len, char *str, int l)
+{
+ if (*len < l || strncmp(*date, str, l) != 0)
+ return 0;
+ (*len) -= l;
+ (*date) += l;
+ return 1;
+}
+
+/* This macro converts 3 chars name in integer. */
+#define STR2I3(__a, __b, __c) ((__a) * 65536 + (__b) * 256 + (__c))
+
+/* day-name = %x4D.6F.6E ; "Mon", case-sensitive
+ * / %x54.75.65 ; "Tue", case-sensitive
+ * / %x57.65.64 ; "Wed", case-sensitive
+ * / %x54.68.75 ; "Thu", case-sensitive
+ * / %x46.72.69 ; "Fri", case-sensitive
+ * / %x53.61.74 ; "Sat", case-sensitive
+ * / %x53.75.6E ; "Sun", case-sensitive
+ *
+ * This array must be alphabetically sorted
+ */
+static inline int parse_http_dayname(const char **date, int *len, struct tm *tm)
+{
+ if (*len < 3)
+ return 0;
+ switch (STR2I3((*date)[0], (*date)[1], (*date)[2])) {
+ case STR2I3('M','o','n'): tm->tm_wday = 1; break;
+ case STR2I3('T','u','e'): tm->tm_wday = 2; break;
+ case STR2I3('W','e','d'): tm->tm_wday = 3; break;
+ case STR2I3('T','h','u'): tm->tm_wday = 4; break;
+ case STR2I3('F','r','i'): tm->tm_wday = 5; break;
+ case STR2I3('S','a','t'): tm->tm_wday = 6; break;
+ case STR2I3('S','u','n'): tm->tm_wday = 7; break;
+ default: return 0;
+ }
+ *len -= 3;
+ *date += 3;
+ return 1;
+}
+
+/* month = %x4A.61.6E ; "Jan", case-sensitive
+ * / %x46.65.62 ; "Feb", case-sensitive
+ * / %x4D.61.72 ; "Mar", case-sensitive
+ * / %x41.70.72 ; "Apr", case-sensitive
+ * / %x4D.61.79 ; "May", case-sensitive
+ * / %x4A.75.6E ; "Jun", case-sensitive
+ * / %x4A.75.6C ; "Jul", case-sensitive
+ * / %x41.75.67 ; "Aug", case-sensitive
+ * / %x53.65.70 ; "Sep", case-sensitive
+ * / %x4F.63.74 ; "Oct", case-sensitive
+ * / %x4E.6F.76 ; "Nov", case-sensitive
+ * / %x44.65.63 ; "Dec", case-sensitive
+ *
+ * This array must be alphabetically sorted
+ */
+static inline int parse_http_monthname(const char **date, int *len, struct tm *tm)
+{
+ if (*len < 3)
+ return 0;
+ switch (STR2I3((*date)[0], (*date)[1], (*date)[2])) {
+ case STR2I3('J','a','n'): tm->tm_mon = 0; break;
+ case STR2I3('F','e','b'): tm->tm_mon = 1; break;
+ case STR2I3('M','a','r'): tm->tm_mon = 2; break;
+ case STR2I3('A','p','r'): tm->tm_mon = 3; break;
+ case STR2I3('M','a','y'): tm->tm_mon = 4; break;
+ case STR2I3('J','u','n'): tm->tm_mon = 5; break;
+ case STR2I3('J','u','l'): tm->tm_mon = 6; break;
+ case STR2I3('A','u','g'): tm->tm_mon = 7; break;
+ case STR2I3('S','e','p'): tm->tm_mon = 8; break;
+ case STR2I3('O','c','t'): tm->tm_mon = 9; break;
+ case STR2I3('N','o','v'): tm->tm_mon = 10; break;
+ case STR2I3('D','e','c'): tm->tm_mon = 11; break;
+ default: return 0;
+ }
+ *len -= 3;
+ *date += 3;
+ return 1;
+}
+
+/* day-name-l = %x4D.6F.6E.64.61.79 ; "Monday", case-sensitive
+ * / %x54.75.65.73.64.61.79 ; "Tuesday", case-sensitive
+ * / %x57.65.64.6E.65.73.64.61.79 ; "Wednesday", case-sensitive
+ * / %x54.68.75.72.73.64.61.79 ; "Thursday", case-sensitive
+ * / %x46.72.69.64.61.79 ; "Friday", case-sensitive
+ * / %x53.61.74.75.72.64.61.79 ; "Saturday", case-sensitive
+ * / %x53.75.6E.64.61.79 ; "Sunday", case-sensitive
+ *
+ * This array must be alphabetically sorted
+ */
+static inline int parse_http_ldayname(const char **date, int *len, struct tm *tm)
+{
+ if (*len < 6) /* Minimum length. */
+ return 0;
+ switch (STR2I3((*date)[0], (*date)[1], (*date)[2])) {
+ case STR2I3('M','o','n'):
+ RET0_UNLESS(parse_strcmp(date, len, "Monday", 6));
+ tm->tm_wday = 1;
+ return 1;
+ case STR2I3('T','u','e'):
+ RET0_UNLESS(parse_strcmp(date, len, "Tuesday", 7));
+ tm->tm_wday = 2;
+ return 1;
+ case STR2I3('W','e','d'):
+ RET0_UNLESS(parse_strcmp(date, len, "Wednesday", 9));
+ tm->tm_wday = 3;
+ return 1;
+ case STR2I3('T','h','u'):
+ RET0_UNLESS(parse_strcmp(date, len, "Thursday", 8));
+ tm->tm_wday = 4;
+ return 1;
+ case STR2I3('F','r','i'):
+ RET0_UNLESS(parse_strcmp(date, len, "Friday", 6));
+ tm->tm_wday = 5;
+ return 1;
+ case STR2I3('S','a','t'):
+ RET0_UNLESS(parse_strcmp(date, len, "Saturday", 8));
+ tm->tm_wday = 6;
+ return 1;
+ case STR2I3('S','u','n'):
+ RET0_UNLESS(parse_strcmp(date, len, "Sunday", 6));
+ tm->tm_wday = 7;
+ return 1;
+ }
+ return 0;
+}
+
+/* This function parses exactly 1 digit and returns the numeric value in "digit". */
+static inline int parse_digit(const char **date, int *len, int *digit)
+{
+ if (*len < 1 || **date < '0' || **date > '9')
+ return 0;
+ *digit = (**date - '0');
+ (*date)++;
+ (*len)--;
+ return 1;
+}
+
+/* This function parses exactly 2 digits and returns the numeric value in "digit". */
+static inline int parse_2digit(const char **date, int *len, int *digit)
+{
+ int value;
+
+ RET0_UNLESS(parse_digit(date, len, &value));
+ (*digit) = value * 10;
+ RET0_UNLESS(parse_digit(date, len, &value));
+ (*digit) += value;
+
+ return 1;
+}
+
+/* This function parses exactly 4 digits and returns the numeric value in "digit". */
+static inline int parse_4digit(const char **date, int *len, int *digit)
+{
+ int value;
+
+ RET0_UNLESS(parse_digit(date, len, &value));
+ (*digit) = value * 1000;
+
+ RET0_UNLESS(parse_digit(date, len, &value));
+ (*digit) += value * 100;
+
+ RET0_UNLESS(parse_digit(date, len, &value));
+ (*digit) += value * 10;
+
+ RET0_UNLESS(parse_digit(date, len, &value));
+ (*digit) += value;
+
+ return 1;
+}
+
+/* time-of-day = hour ":" minute ":" second
+ * ; 00:00:00 - 23:59:60 (leap second)
+ *
+ * hour = 2DIGIT
+ * minute = 2DIGIT
+ * second = 2DIGIT
+ */
+static inline int parse_http_time(const char **date, int *len, struct tm *tm)
+{
+ RET0_UNLESS(parse_2digit(date, len, &tm->tm_hour)); /* hour 2DIGIT */
+ RET0_UNLESS(parse_expect_char(date, len, ':')); /* expect ":" */
+ RET0_UNLESS(parse_2digit(date, len, &tm->tm_min)); /* min 2DIGIT */
+ RET0_UNLESS(parse_expect_char(date, len, ':')); /* expect ":" */
+ RET0_UNLESS(parse_2digit(date, len, &tm->tm_sec)); /* sec 2DIGIT */
+ return 1;
+}
+
+/* From RFC7231
+ * https://tools.ietf.org/html/rfc7231#section-7.1.1.1
+ *
+ * IMF-fixdate = day-name "," SP date1 SP time-of-day SP GMT
+ * ; fixed length/zone/capitalization subset of the format
+ * ; see Section 3.3 of [RFC5322]
+ *
+ *
+ * date1 = day SP month SP year
+ * ; e.g., 02 Jun 1982
+ *
+ * day = 2DIGIT
+ * year = 4DIGIT
+ *
+ * GMT = %x47.4D.54 ; "GMT", case-sensitive
+ *
+ * time-of-day = hour ":" minute ":" second
+ * ; 00:00:00 - 23:59:60 (leap second)
+ *
+ * hour = 2DIGIT
+ * minute = 2DIGIT
+ * second = 2DIGIT
+ *
+ * DIGIT = decimal 0-9
+ */
+int parse_imf_date(const char *date, int len, struct tm *tm)
+{
+ /* tm_gmtoff, if present, ought to be zero'ed */
+ memset(tm, 0, sizeof(*tm));
+
+ RET0_UNLESS(parse_http_dayname(&date, &len, tm)); /* day-name */
+ RET0_UNLESS(parse_expect_char(&date, &len, ',')); /* expect "," */
+ RET0_UNLESS(parse_expect_char(&date, &len, ' ')); /* expect SP */
+ RET0_UNLESS(parse_2digit(&date, &len, &tm->tm_mday)); /* day 2DIGIT */
+ RET0_UNLESS(parse_expect_char(&date, &len, ' ')); /* expect SP */
+ RET0_UNLESS(parse_http_monthname(&date, &len, tm)); /* Month */
+ RET0_UNLESS(parse_expect_char(&date, &len, ' ')); /* expect SP */
+ RET0_UNLESS(parse_4digit(&date, &len, &tm->tm_year)); /* year = 4DIGIT */
+ tm->tm_year -= 1900;
+ RET0_UNLESS(parse_expect_char(&date, &len, ' ')); /* expect SP */
+ RET0_UNLESS(parse_http_time(&date, &len, tm)); /* Parse time. */
+ RET0_UNLESS(parse_expect_char(&date, &len, ' ')); /* expect SP */
+ RET0_UNLESS(parse_strcmp(&date, &len, "GMT", 3)); /* GMT = %x47.4D.54 ; "GMT", case-sensitive */
+ tm->tm_isdst = -1;
+ return 1;
+}
+
+/* From RFC7231
+ * https://tools.ietf.org/html/rfc7231#section-7.1.1.1
+ *
+ * rfc850-date = day-name-l "," SP date2 SP time-of-day SP GMT
+ * date2 = day "-" month "-" 2DIGIT
+ * ; e.g., 02-Jun-82
+ *
+ * day = 2DIGIT
+ */
+int parse_rfc850_date(const char *date, int len, struct tm *tm)
+{
+ int year;
+
+ /* tm_gmtoff, if present, ought to be zero'ed */
+ memset(tm, 0, sizeof(*tm));
+
+ RET0_UNLESS(parse_http_ldayname(&date, &len, tm)); /* Read the day name */
+ RET0_UNLESS(parse_expect_char(&date, &len, ',')); /* expect "," */
+ RET0_UNLESS(parse_expect_char(&date, &len, ' ')); /* expect SP */
+ RET0_UNLESS(parse_2digit(&date, &len, &tm->tm_mday)); /* day 2DIGIT */
+ RET0_UNLESS(parse_expect_char(&date, &len, '-')); /* expect "-" */
+ RET0_UNLESS(parse_http_monthname(&date, &len, tm)); /* Month */
+ RET0_UNLESS(parse_expect_char(&date, &len, '-')); /* expect "-" */
+
+ /* year = 2DIGIT
+ *
+ * Recipients of a timestamp value in rfc850-(*date) format, which uses a
+ * two-digit year, MUST interpret a timestamp that appears to be more
+ * than 50 years in the future as representing the most recent year in
+ * the past that had the same last two digits.
+ */
+ RET0_UNLESS(parse_2digit(&date, &len, &tm->tm_year));
+
+ /* expect SP */
+ if (!parse_expect_char(&date, &len, ' ')) {
+ /* Maybe we have the date with 4 digits. */
+ RET0_UNLESS(parse_2digit(&date, &len, &year));
+ tm->tm_year = (tm->tm_year * 100 + year) - 1900;
+ /* expect SP */
+ RET0_UNLESS(parse_expect_char(&date, &len, ' '));
+ } else {
+ /* I fix 60 as pivot: >60: +1900, <60: +2000. Note that the
+ * tm_year is the number of year since 1900, so for +1900, we
+ * do nothing, and for +2000, we add 100.
+ */
+ if (tm->tm_year <= 60)
+ tm->tm_year += 100;
+ }
+
+ RET0_UNLESS(parse_http_time(&date, &len, tm)); /* Parse time. */
+ RET0_UNLESS(parse_expect_char(&date, &len, ' ')); /* expect SP */
+ RET0_UNLESS(parse_strcmp(&date, &len, "GMT", 3)); /* GMT = %x47.4D.54 ; "GMT", case-sensitive */
+ tm->tm_isdst = -1;
+
+ return 1;
+}
+
+/* From RFC7231
+ * https://tools.ietf.org/html/rfc7231#section-7.1.1.1
+ *
+ * asctime-date = day-name SP date3 SP time-of-day SP year
+ * date3 = month SP ( 2DIGIT / ( SP 1DIGIT ))
+ * ; e.g., Jun 2
+ *
+ * HTTP-date is case sensitive. A sender MUST NOT generate additional
+ * whitespace in an HTTP-date beyond that specifically included as SP in
+ * the grammar.
+ */
+int parse_asctime_date(const char *date, int len, struct tm *tm)
+{
+ /* tm_gmtoff, if present, ought to be zero'ed */
+ memset(tm, 0, sizeof(*tm));
+
+ RET0_UNLESS(parse_http_dayname(&date, &len, tm)); /* day-name */
+ RET0_UNLESS(parse_expect_char(&date, &len, ' ')); /* expect SP */
+ RET0_UNLESS(parse_http_monthname(&date, &len, tm)); /* expect month */
+ RET0_UNLESS(parse_expect_char(&date, &len, ' ')); /* expect SP */
+
+ /* expect SP and 1DIGIT or 2DIGIT */
+ if (parse_expect_char(&date, &len, ' '))
+ RET0_UNLESS(parse_digit(&date, &len, &tm->tm_mday));
+ else
+ RET0_UNLESS(parse_2digit(&date, &len, &tm->tm_mday));
+
+ RET0_UNLESS(parse_expect_char(&date, &len, ' ')); /* expect SP */
+ RET0_UNLESS(parse_http_time(&date, &len, tm)); /* Parse time. */
+ RET0_UNLESS(parse_expect_char(&date, &len, ' ')); /* expect SP */
+ RET0_UNLESS(parse_4digit(&date, &len, &tm->tm_year)); /* year = 4DIGIT */
+ tm->tm_year -= 1900;
+ tm->tm_isdst = -1;
+ return 1;
+}
+
+/* From RFC7231
+ * https://tools.ietf.org/html/rfc7231#section-7.1.1.1
+ *
+ * HTTP-date = IMF-fixdate / obs-date
+ * obs-date = rfc850-date / asctime-date
+ *
+ * parses an HTTP date in the RFC format and is accepted
+ * alternatives. <date> is the strinf containing the date,
+ * len is the len of the string. <tm> is filled with the
+ * parsed time. We must considers this time as GMT.
+ */
+int parse_http_date(const char *date, int len, struct tm *tm)
+{
+ if (parse_imf_date(date, len, tm))
+ return 1;
+
+ if (parse_rfc850_date(date, len, tm))
+ return 1;
+
+ if (parse_asctime_date(date, len, tm))
+ return 1;
+
+ return 0;
+}
+
+/* print the time <ns> in a short form (exactly 7 chars) at the end of buffer
+ * <out>. "-" is printed if the value is zero, "inf" if larger than 1000 years.
+ * It returns the new buffer length, or 0 if it doesn't fit. The value will be
+ * surrounded by <pfx> and <sfx> respectively if not NULL.
+ */
+int print_time_short(struct buffer *out, const char *pfx, uint64_t ns, const char *sfx)
+{
+ double val = ns; // 52 bits of mantissa keep ns accuracy over 52 days
+ const char *unit;
+
+ if (!pfx)
+ pfx = "";
+ if (!sfx)
+ sfx = "";
+
+ do {
+ unit = " - "; if (val <= 0.0) break;
+ unit = "ns"; if (val < 1000.0) break;
+ unit = "us"; val /= 1000.0; if (val < 1000.0) break;
+ unit = "ms"; val /= 1000.0; if (val < 1000.0) break;
+ unit = "s "; val /= 1000.0; if (val < 60.0) break;
+ unit = "m "; val /= 60.0; if (val < 60.0) break;
+ unit = "h "; val /= 60.0; if (val < 24.0) break;
+ unit = "d "; val /= 24.0; if (val < 365.0) break;
+ unit = "yr"; val /= 365.0; if (val < 1000.0) break;
+ unit = " inf "; val = 0.0; break;
+ } while (0);
+
+ if (val <= 0.0)
+ return chunk_appendf(out, "%s%7s%s", pfx, unit, sfx);
+ else if (val < 10.0)
+ return chunk_appendf(out, "%s%1.3f%s%s", pfx, val, unit, sfx);
+ else if (val < 100.0)
+ return chunk_appendf(out, "%s%2.2f%s%s", pfx, val, unit, sfx);
+ else
+ return chunk_appendf(out, "%s%3.1f%s%s", pfx, val, unit, sfx);
+}
+
+/* Dynamically allocates a string of the proper length to hold the formatted
+ * output. NULL is returned on error. The caller is responsible for freeing the
+ * memory area using free(). The resulting string is returned in <out> if the
+ * pointer is not NULL. A previous version of <out> might be used to build the
+ * new string, and it will be freed before returning if it is not NULL, which
+ * makes it possible to build complex strings from iterative calls without
+ * having to care about freeing intermediate values, as in the example below :
+ *
+ * memprintf(&err, "invalid argument: '%s'", arg);
+ * ...
+ * memprintf(&err, "parser said : <%s>\n", *err);
+ * ...
+ * free(*err);
+ *
+ * This means that <err> must be initialized to NULL before first invocation.
+ * The return value also holds the allocated string, which eases error checking
+ * and immediate consumption. If the output pointer is not used, NULL must be
+ * passed instead and it will be ignored. The returned message will then also
+ * be NULL so that the caller does not have to bother with freeing anything.
+ *
+ * It is also convenient to use it without any free except the last one :
+ * err = NULL;
+ * if (!fct1(err)) report(*err);
+ * if (!fct2(err)) report(*err);
+ * if (!fct3(err)) report(*err);
+ * free(*err);
+ *
+ * memprintf relies on memvprintf. This last version can be called from any
+ * function with variadic arguments.
+ */
+char *memvprintf(char **out, const char *format, va_list orig_args)
+{
+ va_list args;
+ char *ret = NULL;
+ int allocated = 0;
+ int needed = 0;
+
+ if (!out)
+ return NULL;
+
+ do {
+ char buf1;
+
+ /* vsnprintf() will return the required length even when the
+ * target buffer is NULL. We do this in a loop just in case
+ * intermediate evaluations get wrong.
+ */
+ va_copy(args, orig_args);
+ needed = vsnprintf(ret ? ret : &buf1, allocated, format, args);
+ va_end(args);
+ if (needed < allocated) {
+ /* Note: on Solaris 8, the first iteration always
+ * returns -1 if allocated is zero, so we force a
+ * retry.
+ */
+ if (!allocated)
+ needed = 0;
+ else
+ break;
+ }
+
+ allocated = needed + 1;
+ ret = my_realloc2(ret, allocated);
+ } while (ret);
+
+ if (needed < 0) {
+ /* an error was encountered */
+ ha_free(&ret);
+ }
+
+ if (out) {
+ free(*out);
+ *out = ret;
+ }
+
+ return ret;
+}
+
+char *memprintf(char **out, const char *format, ...)
+{
+ va_list args;
+ char *ret = NULL;
+
+ va_start(args, format);
+ ret = memvprintf(out, format, args);
+ va_end(args);
+
+ return ret;
+}
+
+/* Used to add <level> spaces before each line of <out>, unless there is only one line.
+ * The input argument is automatically freed and reassigned. The result will have to be
+ * freed by the caller. It also supports being passed a NULL which results in the same
+ * output.
+ * Example of use :
+ * parse(cmd, &err); (callee: memprintf(&err, ...))
+ * fprintf(stderr, "Parser said: %s\n", indent_error(&err));
+ * free(err);
+ */
+char *indent_msg(char **out, int level)
+{
+ char *ret, *in, *p;
+ int needed = 0;
+ int lf = 0;
+ int lastlf = 0;
+ int len;
+
+ if (!out || !*out)
+ return NULL;
+
+ in = *out - 1;
+ while ((in = strchr(in + 1, '\n')) != NULL) {
+ lastlf = in - *out;
+ lf++;
+ }
+
+ if (!lf) /* single line, no LF, return it as-is */
+ return *out;
+
+ len = strlen(*out);
+
+ if (lf == 1 && lastlf == len - 1) {
+ /* single line, LF at end, strip it and return as-is */
+ (*out)[lastlf] = 0;
+ return *out;
+ }
+
+ /* OK now we have at least one LF, we need to process the whole string
+ * as a multi-line string. What we'll do :
+ * - prefix with an LF if there is none
+ * - add <level> spaces before each line
+ * This means at most ( 1 + level + (len-lf) + lf*<1+level) ) =
+ * 1 + level + len + lf * level = 1 + level * (lf + 1) + len.
+ */
+
+ needed = 1 + level * (lf + 1) + len + 1;
+ p = ret = malloc(needed);
+ in = *out;
+
+ /* skip initial LFs */
+ while (*in == '\n')
+ in++;
+
+ /* copy each line, prefixed with LF and <level> spaces, and without the trailing LF */
+ while (*in) {
+ *p++ = '\n';
+ memset(p, ' ', level);
+ p += level;
+ do {
+ *p++ = *in++;
+ } while (*in && *in != '\n');
+ if (*in)
+ in++;
+ }
+ *p = 0;
+
+ free(*out);
+ *out = ret;
+
+ return ret;
+}
+
+/* makes a copy of message <in> into <out>, with each line prefixed with <pfx>
+ * and end of lines replaced with <eol> if not 0. The first line to indent has
+ * to be indicated in <first> (starts at zero), so that it is possible to skip
+ * indenting the first line if it has to be appended after an existing message.
+ * Empty strings are never indented, and NULL strings are considered empty both
+ * for <in> and <pfx>. It returns non-zero if an EOL was appended as the last
+ * character, non-zero otherwise.
+ */
+int append_prefixed_str(struct buffer *out, const char *in, const char *pfx, char eol, int first)
+{
+ int bol, lf;
+ int pfxlen = pfx ? strlen(pfx) : 0;
+
+ if (!in)
+ return 0;
+
+ bol = 1;
+ lf = 0;
+ while (*in) {
+ if (bol && pfxlen) {
+ if (first > 0)
+ first--;
+ else
+ b_putblk(out, pfx, pfxlen);
+ bol = 0;
+ }
+
+ lf = (*in == '\n');
+ bol |= lf;
+ b_putchr(out, (lf && eol) ? eol : *in);
+ in++;
+ }
+ return lf;
+}
+
+/* removes environment variable <name> from the environment as found in
+ * environ. This is only provided as an alternative for systems without
+ * unsetenv() (old Solaris and AIX versions). THIS IS NOT THREAD SAFE.
+ * The principle is to scan environ for each occurrence of variable name
+ * <name> and to replace the matching pointers with the last pointer of
+ * the array (since variables are not ordered).
+ * It always returns 0 (success).
+ */
+int my_unsetenv(const char *name)
+{
+ extern char **environ;
+ char **p = environ;
+ int vars;
+ int next;
+ int len;
+
+ len = strlen(name);
+ for (vars = 0; p[vars]; vars++)
+ ;
+ next = 0;
+ while (next < vars) {
+ if (strncmp(p[next], name, len) != 0 || p[next][len] != '=') {
+ next++;
+ continue;
+ }
+ if (next < vars - 1)
+ p[next] = p[vars - 1];
+ p[--vars] = NULL;
+ }
+ return 0;
+}
+
+/* Convert occurrences of environment variables in the input string to their
+ * corresponding value. A variable is identified as a series of alphanumeric
+ * characters or underscores following a '$' sign. The <in> string must be
+ * free()able. NULL returns NULL. The resulting string might be reallocated if
+ * some expansion is made. Variable names may also be enclosed into braces if
+ * needed (eg: to concatenate alphanum characters).
+ */
+char *env_expand(char *in)
+{
+ char *txt_beg;
+ char *out;
+ char *txt_end;
+ char *var_beg;
+ char *var_end;
+ char *value;
+ char *next;
+ int out_len;
+ int val_len;
+
+ if (!in)
+ return in;
+
+ value = out = NULL;
+ out_len = 0;
+
+ txt_beg = in;
+ do {
+ /* look for next '$' sign in <in> */
+ for (txt_end = txt_beg; *txt_end && *txt_end != '$'; txt_end++);
+
+ if (!*txt_end && !out) /* end and no expansion performed */
+ return in;
+
+ val_len = 0;
+ next = txt_end;
+ if (*txt_end == '$') {
+ char save;
+
+ var_beg = txt_end + 1;
+ if (*var_beg == '{')
+ var_beg++;
+
+ var_end = var_beg;
+ while (isalnum((unsigned char)*var_end) || *var_end == '_') {
+ var_end++;
+ }
+
+ next = var_end;
+ if (*var_end == '}' && (var_beg > txt_end + 1))
+ next++;
+
+ /* get value of the variable name at this location */
+ save = *var_end;
+ *var_end = '\0';
+ value = getenv(var_beg);
+ *var_end = save;
+ val_len = value ? strlen(value) : 0;
+ }
+
+ out = my_realloc2(out, out_len + (txt_end - txt_beg) + val_len + 1);
+ if (txt_end > txt_beg) {
+ memcpy(out + out_len, txt_beg, txt_end - txt_beg);
+ out_len += txt_end - txt_beg;
+ }
+ if (val_len) {
+ memcpy(out + out_len, value, val_len);
+ out_len += val_len;
+ }
+ out[out_len] = 0;
+ txt_beg = next;
+ } while (*txt_beg);
+
+ /* here we know that <out> was allocated and that we don't need <in> anymore */
+ free(in);
+ return out;
+}
+
+
+/* same as strstr() but case-insensitive and with limit length */
+const char *strnistr(const char *str1, int len_str1, const char *str2, int len_str2)
+{
+ char *pptr, *sptr, *start;
+ unsigned int slen, plen;
+ unsigned int tmp1, tmp2;
+
+ if (str1 == NULL || len_str1 == 0) // search pattern into an empty string => search is not found
+ return NULL;
+
+ if (str2 == NULL || len_str2 == 0) // pattern is empty => every str1 match
+ return str1;
+
+ if (len_str1 < len_str2) // pattern is longer than string => search is not found
+ return NULL;
+
+ for (tmp1 = 0, start = (char *)str1, pptr = (char *)str2, slen = len_str1, plen = len_str2; slen >= plen; start++, slen--) {
+ while (toupper((unsigned char)*start) != toupper((unsigned char)*str2)) {
+ start++;
+ slen--;
+ tmp1++;
+
+ if (tmp1 >= len_str1)
+ return NULL;
+
+ /* if pattern longer than string */
+ if (slen < plen)
+ return NULL;
+ }
+
+ sptr = start;
+ pptr = (char *)str2;
+
+ tmp2 = 0;
+ while (toupper((unsigned char)*sptr) == toupper((unsigned char)*pptr)) {
+ sptr++;
+ pptr++;
+ tmp2++;
+
+ if (*pptr == '\0' || tmp2 == len_str2) /* end of pattern found */
+ return start;
+ if (*sptr == '\0' || tmp2 == len_str1) /* end of string found and the pattern is not fully found */
+ return NULL;
+ }
+ }
+ return NULL;
+}
+
+/* Returns true if s1 < s2 < s3 otherwise zero. Both s1 and s3 may be NULL and
+ * in this case only non-null strings are compared. This allows to pass initial
+ * values in iterators and in sort functions.
+ */
+int strordered(const char *s1, const char *s2, const char *s3)
+{
+ return (!s1 || strcmp(s1, s2) < 0) && (!s3 || strcmp(s2, s3) < 0);
+}
+
+/* This function read the next valid utf8 char.
+ * <s> is the byte srray to be decode, <len> is its length.
+ * The function returns decoded char encoded like this:
+ * The 4 msb are the return code (UTF8_CODE_*), the 4 lsb
+ * are the length read. The decoded character is stored in <c>.
+ */
+unsigned char utf8_next(const char *s, int len, unsigned int *c)
+{
+ const unsigned char *p = (unsigned char *)s;
+ int dec;
+ unsigned char code = UTF8_CODE_OK;
+
+ if (len < 1)
+ return UTF8_CODE_OK;
+
+ /* Check the type of UTF8 sequence
+ *
+ * 0... .... 0x00 <= x <= 0x7f : 1 byte: ascii char
+ * 10.. .... 0x80 <= x <= 0xbf : invalid sequence
+ * 110. .... 0xc0 <= x <= 0xdf : 2 bytes
+ * 1110 .... 0xe0 <= x <= 0xef : 3 bytes
+ * 1111 0... 0xf0 <= x <= 0xf7 : 4 bytes
+ * 1111 10.. 0xf8 <= x <= 0xfb : 5 bytes
+ * 1111 110. 0xfc <= x <= 0xfd : 6 bytes
+ * 1111 111. 0xfe <= x <= 0xff : invalid sequence
+ */
+ switch (*p) {
+ case 0x00 ... 0x7f:
+ *c = *p;
+ return UTF8_CODE_OK | 1;
+
+ case 0x80 ... 0xbf:
+ *c = *p;
+ return UTF8_CODE_BADSEQ | 1;
+
+ case 0xc0 ... 0xdf:
+ if (len < 2) {
+ *c = *p;
+ return UTF8_CODE_BADSEQ | 1;
+ }
+ *c = *p & 0x1f;
+ dec = 1;
+ break;
+
+ case 0xe0 ... 0xef:
+ if (len < 3) {
+ *c = *p;
+ return UTF8_CODE_BADSEQ | 1;
+ }
+ *c = *p & 0x0f;
+ dec = 2;
+ break;
+
+ case 0xf0 ... 0xf7:
+ if (len < 4) {
+ *c = *p;
+ return UTF8_CODE_BADSEQ | 1;
+ }
+ *c = *p & 0x07;
+ dec = 3;
+ break;
+
+ case 0xf8 ... 0xfb:
+ if (len < 5) {
+ *c = *p;
+ return UTF8_CODE_BADSEQ | 1;
+ }
+ *c = *p & 0x03;
+ dec = 4;
+ break;
+
+ case 0xfc ... 0xfd:
+ if (len < 6) {
+ *c = *p;
+ return UTF8_CODE_BADSEQ | 1;
+ }
+ *c = *p & 0x01;
+ dec = 5;
+ break;
+
+ case 0xfe ... 0xff:
+ default:
+ *c = *p;
+ return UTF8_CODE_BADSEQ | 1;
+ }
+
+ p++;
+
+ while (dec > 0) {
+
+ /* need 0x10 for the 2 first bits */
+ if ( ( *p & 0xc0 ) != 0x80 )
+ return UTF8_CODE_BADSEQ | ((p-(unsigned char *)s)&0xffff);
+
+ /* add data at char */
+ *c = ( *c << 6 ) | ( *p & 0x3f );
+
+ dec--;
+ p++;
+ }
+
+ /* Check ovelong encoding.
+ * 1 byte : 5 + 6 : 11 : 0x80 ... 0x7ff
+ * 2 bytes : 4 + 6 + 6 : 16 : 0x800 ... 0xffff
+ * 3 bytes : 3 + 6 + 6 + 6 : 21 : 0x10000 ... 0x1fffff
+ */
+ if (( *c <= 0x7f && (p-(unsigned char *)s) > 1) ||
+ (*c >= 0x80 && *c <= 0x7ff && (p-(unsigned char *)s) > 2) ||
+ (*c >= 0x800 && *c <= 0xffff && (p-(unsigned char *)s) > 3) ||
+ (*c >= 0x10000 && *c <= 0x1fffff && (p-(unsigned char *)s) > 4))
+ code |= UTF8_CODE_OVERLONG;
+
+ /* Check invalid UTF8 range. */
+ if ((*c >= 0xd800 && *c <= 0xdfff) ||
+ (*c >= 0xfffe && *c <= 0xffff))
+ code |= UTF8_CODE_INVRANGE;
+
+ return code | ((p-(unsigned char *)s)&0x0f);
+}
+
+/* append a copy of string <str> (in a wordlist) at the end of the list <li>
+ * On failure : return 0 and <err> filled with an error message.
+ * The caller is responsible for freeing the <err> and <str> copy
+ * memory area using free()
+ */
+int list_append_word(struct list *li, const char *str, char **err)
+{
+ struct wordlist *wl;
+
+ wl = calloc(1, sizeof(*wl));
+ if (!wl) {
+ memprintf(err, "out of memory");
+ goto fail_wl;
+ }
+
+ wl->s = strdup(str);
+ if (!wl->s) {
+ memprintf(err, "out of memory");
+ goto fail_wl_s;
+ }
+
+ LIST_APPEND(li, &wl->list);
+
+ return 1;
+
+fail_wl_s:
+ free(wl->s);
+fail_wl:
+ free(wl);
+ return 0;
+}
+
+/* indicates if a memory location may safely be read or not. The trick consists
+ * in performing a harmless syscall using this location as an input and letting
+ * the operating system report whether it's OK or not. For this we have the
+ * stat() syscall, which will return EFAULT when the memory location supposed
+ * to contain the file name is not readable. If it is readable it will then
+ * either return 0 if the area contains an existing file name, or -1 with
+ * another code. This must not be abused, and some audit systems might detect
+ * this as abnormal activity. It's used only for unsafe dumps.
+ */
+int may_access(const void *ptr)
+{
+ struct stat buf;
+
+ if (stat(ptr, &buf) == 0)
+ return 1;
+ if (errno == EFAULT)
+ return 0;
+ return 1;
+}
+
+/* print a string of text buffer to <out>. The format is :
+ * Non-printable chars \t, \n, \r and \e are * encoded in C format.
+ * Other non-printable chars are encoded "\xHH". Space, '\', and '=' are also escaped.
+ * Print stopped if null char or <bsize> is reached, or if no more place in the chunk.
+ */
+int dump_text(struct buffer *out, const char *buf, int bsize)
+{
+ unsigned char c;
+ size_t ptr = 0;
+
+ while (ptr < bsize && buf[ptr]) {
+ c = buf[ptr];
+ if (isprint((unsigned char)c) && isascii((unsigned char)c) && c != '\\' && c != ' ' && c != '=') {
+ if (out->data > out->size - 1)
+ break;
+ out->area[out->data++] = c;
+ }
+ else if (c == '\t' || c == '\n' || c == '\r' || c == '\e' || c == '\\' || c == ' ' || c == '=') {
+ if (out->data > out->size - 2)
+ break;
+ out->area[out->data++] = '\\';
+ switch (c) {
+ case ' ': c = ' '; break;
+ case '\t': c = 't'; break;
+ case '\n': c = 'n'; break;
+ case '\r': c = 'r'; break;
+ case '\e': c = 'e'; break;
+ case '\\': c = '\\'; break;
+ case '=': c = '='; break;
+ }
+ out->area[out->data++] = c;
+ }
+ else {
+ if (out->data > out->size - 4)
+ break;
+ out->area[out->data++] = '\\';
+ out->area[out->data++] = 'x';
+ out->area[out->data++] = hextab[(c >> 4) & 0xF];
+ out->area[out->data++] = hextab[c & 0xF];
+ }
+ ptr++;
+ }
+
+ return ptr;
+}
+
+/* print a buffer in hexa.
+ * Print stopped if <bsize> is reached, or if no more place in the chunk.
+ */
+int dump_binary(struct buffer *out, const char *buf, int bsize)
+{
+ unsigned char c;
+ int ptr = 0;
+
+ while (ptr < bsize) {
+ c = buf[ptr];
+
+ if (out->data > out->size - 2)
+ break;
+ out->area[out->data++] = hextab[(c >> 4) & 0xF];
+ out->area[out->data++] = hextab[c & 0xF];
+
+ ptr++;
+ }
+ return ptr;
+}
+
+/* Appends into buffer <out> a hex dump of memory area <buf> for <len> bytes,
+ * prepending each line with prefix <pfx>. The output is *not* initialized.
+ * The output will not wrap pas the buffer's end so it is more optimal if the
+ * caller makes sure the buffer is aligned first. A trailing zero will always
+ * be appended (and not counted) if there is room for it. The caller must make
+ * sure that the area is dumpable first. If <unsafe> is non-null, the memory
+ * locations are checked first for being readable.
+ */
+void dump_hex(struct buffer *out, const char *pfx, const void *buf, int len, int unsafe)
+{
+ const unsigned char *d = buf;
+ int i, j, start;
+
+ d = (const unsigned char *)(((unsigned long)buf) & -16);
+ start = ((unsigned long)buf) & 15;
+
+ for (i = 0; i < start + len; i += 16) {
+ chunk_appendf(out, (sizeof(void *) == 4) ? "%s%8p: " : "%s%16p: ", pfx, d + i);
+
+ // 0: unchecked, 1: checked safe, 2: danger
+ unsafe = !!unsafe;
+ if (unsafe && !may_access(d + i))
+ unsafe = 2;
+
+ for (j = 0; j < 16; j++) {
+ if ((i + j < start) || (i + j >= start + len))
+ chunk_strcat(out, "'' ");
+ else if (unsafe > 1)
+ chunk_strcat(out, "** ");
+ else
+ chunk_appendf(out, "%02x ", d[i + j]);
+
+ if (j == 7)
+ chunk_strcat(out, "- ");
+ }
+ chunk_strcat(out, " ");
+ for (j = 0; j < 16; j++) {
+ if ((i + j < start) || (i + j >= start + len))
+ chunk_strcat(out, "'");
+ else if (unsafe > 1)
+ chunk_strcat(out, "*");
+ else if (isprint((unsigned char)d[i + j]))
+ chunk_appendf(out, "%c", d[i + j]);
+ else
+ chunk_strcat(out, ".");
+ }
+ chunk_strcat(out, "\n");
+ }
+}
+
+/* dumps <pfx> followed by <n> bytes from <addr> in hex form into buffer <buf>
+ * enclosed in brackets after the address itself, formatted on 14 chars
+ * including the "0x" prefix. This is meant to be used as a prefix for code
+ * areas. For example:
+ * "0x7f10b6557690 [48 c7 c0 0f 00 00 00 0f]"
+ * It relies on may_access() to know if the bytes are dumpable, otherwise "--"
+ * is emitted. A NULL <pfx> will be considered empty.
+ */
+void dump_addr_and_bytes(struct buffer *buf, const char *pfx, const void *addr, int n)
+{
+ int ok = 0;
+ int i;
+
+ chunk_appendf(buf, "%s%#14lx [", pfx ? pfx : "", (long)addr);
+
+ for (i = 0; i < n; i++) {
+ if (i == 0 || (((long)(addr + i) ^ (long)(addr)) & 4096))
+ ok = may_access(addr + i);
+ if (ok)
+ chunk_appendf(buf, "%02x%s", ((uint8_t*)addr)[i], (i<n-1) ? " " : "]");
+ else
+ chunk_appendf(buf, "--%s", (i<n-1) ? " " : "]");
+ }
+}
+
+/* print a line of text buffer (limited to 70 bytes) to <out>. The format is :
+ * <2 spaces> <offset=5 digits> <space or plus> <space> <70 chars max> <\n>
+ * which is 60 chars per line. Non-printable chars \t, \n, \r and \e are
+ * encoded in C format. Other non-printable chars are encoded "\xHH". Original
+ * lines are respected within the limit of 70 output chars. Lines that are
+ * continuation of a previous truncated line begin with "+" instead of " "
+ * after the offset. The new pointer is returned.
+ */
+int dump_text_line(struct buffer *out, const char *buf, int bsize, int len,
+ int *line, int ptr)
+{
+ int end;
+ unsigned char c;
+
+ end = out->data + 80;
+ if (end > out->size)
+ return ptr;
+
+ chunk_appendf(out, " %05d%c ", ptr, (ptr == *line) ? ' ' : '+');
+
+ while (ptr < len && ptr < bsize) {
+ c = buf[ptr];
+ if (isprint((unsigned char)c) && isascii((unsigned char)c) && c != '\\') {
+ if (out->data > end - 2)
+ break;
+ out->area[out->data++] = c;
+ } else if (c == '\t' || c == '\n' || c == '\r' || c == '\e' || c == '\\') {
+ if (out->data > end - 3)
+ break;
+ out->area[out->data++] = '\\';
+ switch (c) {
+ case '\t': c = 't'; break;
+ case '\n': c = 'n'; break;
+ case '\r': c = 'r'; break;
+ case '\e': c = 'e'; break;
+ case '\\': c = '\\'; break;
+ }
+ out->area[out->data++] = c;
+ } else {
+ if (out->data > end - 5)
+ break;
+ out->area[out->data++] = '\\';
+ out->area[out->data++] = 'x';
+ out->area[out->data++] = hextab[(c >> 4) & 0xF];
+ out->area[out->data++] = hextab[c & 0xF];
+ }
+ if (buf[ptr++] == '\n') {
+ /* we had a line break, let's return now */
+ out->area[out->data++] = '\n';
+ *line = ptr;
+ return ptr;
+ }
+ }
+ /* we have an incomplete line, we return it as-is */
+ out->area[out->data++] = '\n';
+ return ptr;
+}
+
+/* displays a <len> long memory block at <buf>, assuming first byte of <buf>
+ * has address <baseaddr>. String <pfx> may be placed as a prefix in front of
+ * each line. It may be NULL if unused. The output is emitted to file <out>.
+ */
+void debug_hexdump(FILE *out, const char *pfx, const char *buf,
+ unsigned int baseaddr, int len)
+{
+ unsigned int i;
+ int b, j;
+
+ for (i = 0; i < (len + (baseaddr & 15)); i += 16) {
+ b = i - (baseaddr & 15);
+ fprintf(out, "%s%08x: ", pfx ? pfx : "", i + (baseaddr & ~15));
+ for (j = 0; j < 8; j++) {
+ if (b + j >= 0 && b + j < len)
+ fprintf(out, "%02x ", (unsigned char)buf[b + j]);
+ else
+ fprintf(out, " ");
+ }
+
+ if (b + j >= 0 && b + j < len)
+ fputc('-', out);
+ else
+ fputc(' ', out);
+
+ for (j = 8; j < 16; j++) {
+ if (b + j >= 0 && b + j < len)
+ fprintf(out, " %02x", (unsigned char)buf[b + j]);
+ else
+ fprintf(out, " ");
+ }
+
+ fprintf(out, " ");
+ for (j = 0; j < 16; j++) {
+ if (b + j >= 0 && b + j < len) {
+ if (isprint((unsigned char)buf[b + j]))
+ fputc((unsigned char)buf[b + j], out);
+ else
+ fputc('.', out);
+ }
+ else
+ fputc(' ', out);
+ }
+ fputc('\n', out);
+ }
+}
+
+/* Tries to report the executable path name on platforms supporting this. If
+ * not found or not possible, returns NULL.
+ */
+const char *get_exec_path()
+{
+ const char *ret = NULL;
+
+#if defined(__linux__) && defined(__GLIBC__) && (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 16))
+ long execfn = getauxval(AT_EXECFN);
+
+ if (execfn && execfn != ENOENT)
+ ret = (const char *)execfn;
+#elif defined(__FreeBSD__)
+ Elf_Auxinfo *auxv;
+ for (auxv = __elf_aux_vector; auxv->a_type != AT_NULL; ++auxv) {
+ if (auxv->a_type == AT_EXECPATH) {
+ ret = (const char *)auxv->a_un.a_ptr;
+ break;
+ }
+ }
+#elif defined(__NetBSD__)
+ AuxInfo *auxv;
+ for (auxv = _dlauxinfo(); auxv->a_type != AT_NULL; ++auxv) {
+ if (auxv->a_type == AT_SUN_EXECNAME) {
+ ret = (const char *)auxv->a_v;
+ break;
+ }
+ }
+#elif defined(__sun)
+ ret = getexecname();
+#endif
+ return ret;
+}
+
+#if (defined(__ELF__) && !defined(__linux__)) || defined(USE_DL)
+/* calls dladdr() or dladdr1() on <addr> and <dli>. If dladdr1 is available,
+ * also returns the symbol size in <size>, otherwise returns 0 there.
+ */
+static int dladdr_and_size(const void *addr, Dl_info *dli, size_t *size)
+{
+ int ret;
+#if defined(__GLIBC__) && (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 3)) // most detailed one
+ const ElfW(Sym) *sym __attribute__((may_alias));
+
+ ret = dladdr1(addr, dli, (void **)&sym, RTLD_DL_SYMENT);
+ if (ret)
+ *size = sym ? sym->st_size : 0;
+#else
+#if defined(__sun)
+ ret = dladdr((void *)addr, dli);
+#else
+ ret = dladdr(addr, dli);
+#endif
+ *size = 0;
+#endif
+ return ret;
+}
+
+/* Sets build_is_static to true if we detect a static build. Some older glibcs
+ * tend to crash inside dlsym() in static builds, but tests show that at least
+ * dladdr() still works (and will fail to resolve anything of course). Thus we
+ * try to determine if we're on a static build to avoid calling dlsym() in this
+ * case.
+ */
+void check_if_static_build()
+{
+ Dl_info dli = { };
+ size_t size = 0;
+
+ /* Now let's try to be smarter */
+ if (!dladdr_and_size(&main, &dli, &size))
+ build_is_static = 1;
+ else
+ build_is_static = 0;
+}
+
+INITCALL0(STG_PREPARE, check_if_static_build);
+
+/* Tries to retrieve the address of the first occurrence symbol <name>.
+ * Note that NULL in return is not always an error as a symbol may have that
+ * address in special situations.
+ */
+void *get_sym_curr_addr(const char *name)
+{
+ void *ptr = NULL;
+
+#ifdef RTLD_DEFAULT
+ if (!build_is_static)
+ ptr = dlsym(RTLD_DEFAULT, name);
+#endif
+ return ptr;
+}
+
+
+/* Tries to retrieve the address of the next occurrence of symbol <name>
+ * Note that NULL in return is not always an error as a symbol may have that
+ * address in special situations.
+ */
+void *get_sym_next_addr(const char *name)
+{
+ void *ptr = NULL;
+
+#ifdef RTLD_NEXT
+ if (!build_is_static)
+ ptr = dlsym(RTLD_NEXT, name);
+#endif
+ return ptr;
+}
+
+#else /* elf & linux & dl */
+
+/* no possible resolving on other platforms at the moment */
+void *get_sym_curr_addr(const char *name)
+{
+ return NULL;
+}
+
+void *get_sym_next_addr(const char *name)
+{
+ return NULL;
+}
+
+#endif /* elf & linux & dl */
+
+/* Tries to append to buffer <buf> some indications about the symbol at address
+ * <addr> using the following form:
+ * lib:+0xoffset (unresolvable address from lib's base)
+ * main+0xoffset (unresolvable address from main (+/-))
+ * lib:main+0xoffset (unresolvable lib address from main (+/-))
+ * name (resolved exact exec address)
+ * lib:name (resolved exact lib address)
+ * name+0xoffset/0xsize (resolved address within exec symbol)
+ * lib:name+0xoffset/0xsize (resolved address within lib symbol)
+ *
+ * The file name (lib or executable) is limited to what lies between the last
+ * '/' and the first following '.'. An optional prefix <pfx> is prepended before
+ * the output if not null. The file is not dumped when it's the same as the one
+ * that contains the "main" symbol, or when __ELF__ && USE_DL are not set.
+ *
+ * The symbol's base address is returned, or NULL when unresolved, in order to
+ * allow the caller to match it against known ones.
+ */
+const void *resolve_sym_name(struct buffer *buf, const char *pfx, const void *addr)
+{
+ const struct {
+ const void *func;
+ const char *name;
+ } fcts[] = {
+ { .func = process_stream, .name = "process_stream" },
+ { .func = task_run_applet, .name = "task_run_applet" },
+ { .func = sc_conn_io_cb, .name = "sc_conn_io_cb" },
+ { .func = sock_conn_iocb, .name = "sock_conn_iocb" },
+ { .func = dgram_fd_handler, .name = "dgram_fd_handler" },
+ { .func = listener_accept, .name = "listener_accept" },
+ { .func = manage_global_listener_queue, .name = "manage_global_listener_queue" },
+ { .func = poller_pipe_io_handler, .name = "poller_pipe_io_handler" },
+ { .func = mworker_accept_wrapper, .name = "mworker_accept_wrapper" },
+ { .func = session_expire_embryonic, .name = "session_expire_embryonic" },
+#ifdef USE_THREAD
+ { .func = accept_queue_process, .name = "accept_queue_process" },
+#endif
+#ifdef USE_LUA
+ { .func = hlua_process_task, .name = "hlua_process_task" },
+#endif
+#ifdef SSL_MODE_ASYNC
+ { .func = ssl_async_fd_free, .name = "ssl_async_fd_free" },
+ { .func = ssl_async_fd_handler, .name = "ssl_async_fd_handler" },
+#endif
+#ifdef USE_QUIC
+ { .func = quic_conn_sock_fd_iocb, .name = "quic_conn_sock_fd_iocb" },
+#endif
+ };
+
+#if (defined(__ELF__) && !defined(__linux__)) || defined(USE_DL)
+ Dl_info dli, dli_main;
+ size_t size;
+ const char *fname, *p;
+#endif
+ int i;
+
+ if (pfx)
+ chunk_appendf(buf, "%s", pfx);
+
+ for (i = 0; i < sizeof(fcts) / sizeof(fcts[0]); i++) {
+ if (addr == fcts[i].func) {
+ chunk_appendf(buf, "%s", fcts[i].name);
+ return addr;
+ }
+ }
+
+#if (defined(__ELF__) && !defined(__linux__)) || defined(USE_DL)
+ /* Now let's try to be smarter */
+ if (!dladdr_and_size(addr, &dli, &size))
+ goto unknown;
+
+ /* 1. prefix the library name if it's not the same object as the one
+ * that contains the main function. The name is picked between last '/'
+ * and first following '.'.
+ */
+ if (!dladdr(main, &dli_main))
+ dli_main.dli_fbase = NULL;
+
+ if (dli_main.dli_fbase != dli.dli_fbase) {
+ fname = dli.dli_fname;
+ p = strrchr(fname, '/');
+ if (p++)
+ fname = p;
+ p = strchr(fname, '.');
+ if (!p)
+ p = fname + strlen(fname);
+
+ chunk_appendf(buf, "%.*s:", (int)(long)(p - fname), fname);
+ }
+
+ /* 2. symbol name */
+ if (dli.dli_sname) {
+ /* known, dump it and return symbol's address (exact or relative) */
+ chunk_appendf(buf, "%s", dli.dli_sname);
+ if (addr != dli.dli_saddr) {
+ chunk_appendf(buf, "+%#lx", (long)(addr - dli.dli_saddr));
+ if (size)
+ chunk_appendf(buf, "/%#lx", (long)size);
+ }
+ return dli.dli_saddr;
+ }
+ else if (dli_main.dli_fbase != dli.dli_fbase) {
+ /* unresolved symbol from a known library, report relative offset */
+ chunk_appendf(buf, "+%#lx", (long)(addr - dli.dli_fbase));
+ return NULL;
+ }
+#endif /* __ELF__ && !__linux__ || USE_DL */
+ unknown:
+ /* unresolved symbol from the main file, report relative offset to main */
+ if ((void*)addr < (void*)main)
+ chunk_appendf(buf, "main-%#lx", (long)((void*)main - addr));
+ else
+ chunk_appendf(buf, "main+%#lx", (long)(addr - (void*)main));
+ return NULL;
+}
+
+/* On systems where this is supported, let's provide a possibility to enumerate
+ * the list of object files. The output is appended to a buffer initialized by
+ * the caller, with one name per line. A trailing zero is always emitted if data
+ * are written. Only real objects are dumped (executable and .so libs). The
+ * function returns non-zero if it dumps anything. These functions do not make
+ * use of the trash so that it is possible for the caller to call them with the
+ * trash on input. The output format may be platform-specific but at least one
+ * version must emit raw object file names when argument is zero.
+ */
+#if defined(HA_HAVE_DUMP_LIBS)
+# if defined(HA_HAVE_DL_ITERATE_PHDR)
+/* the private <data> we pass below is a dump context initialized like this */
+struct dl_dump_ctx {
+ struct buffer *buf;
+ int with_addr;
+};
+
+static int dl_dump_libs_cb(struct dl_phdr_info *info, size_t size, void *data)
+{
+ struct dl_dump_ctx *ctx = data;
+ const char *fname;
+ size_t p1, p2, beg, end;
+ int idx;
+
+ if (!info || !info->dlpi_name)
+ goto leave;
+
+ if (!*info->dlpi_name)
+ fname = get_exec_path();
+ else if (strchr(info->dlpi_name, '/'))
+ fname = info->dlpi_name;
+ else
+ /* else it's a VDSO or similar and we're not interested */
+ goto leave;
+
+ if (!ctx->with_addr)
+ goto dump_name;
+
+ /* virtual addresses are relative to the load address and are per
+ * pseudo-header, so we have to scan them all to find the furthest
+ * one from the beginning. In this case we only dump entries if
+ * they have at least one section.
+ */
+ beg = ~0; end = 0;
+ for (idx = 0; idx < info->dlpi_phnum; idx++) {
+ if (!info->dlpi_phdr[idx].p_memsz)
+ continue;
+ p1 = info->dlpi_phdr[idx].p_vaddr;
+ if (p1 < beg)
+ beg = p1;
+ p2 = p1 + info->dlpi_phdr[idx].p_memsz - 1;
+ if (p2 > end)
+ end = p2;
+ }
+
+ if (!idx)
+ goto leave;
+
+ chunk_appendf(ctx->buf, "0x%012llx-0x%012llx (0x%07llx) ",
+ (ullong)info->dlpi_addr + beg,
+ (ullong)info->dlpi_addr + end,
+ (ullong)(end - beg + 1));
+ dump_name:
+ chunk_appendf(ctx->buf, "%s\n", fname);
+ leave:
+ return 0;
+}
+
+/* dumps lib names and optionally address ranges */
+int dump_libs(struct buffer *output, int with_addr)
+{
+ struct dl_dump_ctx ctx = { .buf = output, .with_addr = with_addr };
+ size_t old_data = output->data;
+
+ dl_iterate_phdr(dl_dump_libs_cb, &ctx);
+ return output->data != old_data;
+}
+# else // no DL_ITERATE_PHDR
+# error "No dump_libs() function for this platform"
+# endif
+#else // no HA_HAVE_DUMP_LIBS
+
+/* unsupported platform: do not dump anything */
+int dump_libs(struct buffer *output, int with_addr)
+{
+ return 0;
+}
+
+#endif // HA_HAVE_DUMP_LIBS
+
+/*
+ * Allocate an array of unsigned int with <nums> as address from <str> string
+ * made of integer separated by dot characters.
+ *
+ * First, initializes the value with <sz> as address to 0 and initializes the
+ * array with <nums> as address to NULL. Then allocates the array with <nums> as
+ * address updating <sz> pointed value to the size of this array.
+ *
+ * Returns 1 if succeeded, 0 if not.
+ */
+int parse_dotted_uints(const char *str, unsigned int **nums, size_t *sz)
+{
+ unsigned int *n;
+ const char *s, *end;
+
+ s = str;
+ *sz = 0;
+ end = str + strlen(str);
+ *nums = n = NULL;
+
+ while (1) {
+ unsigned int r;
+
+ if (s >= end)
+ break;
+
+ r = read_uint(&s, end);
+ /* Expected characters after having read an uint: '\0' or '.',
+ * if '.', must not be terminal.
+ */
+ if (*s != '\0'&& (*s++ != '.' || s == end)) {
+ free(n);
+ return 0;
+ }
+
+ n = my_realloc2(n, (*sz + 1) * sizeof *n);
+ if (!n)
+ return 0;
+
+ n[(*sz)++] = r;
+ }
+ *nums = n;
+
+ return 1;
+}
+
+
+/* returns the number of bytes needed to encode <v> as a varint. An inline
+ * version exists for use with constants (__varint_bytes()).
+ */
+int varint_bytes(uint64_t v)
+{
+ int len = 1;
+
+ if (v >= 240) {
+ v = (v - 240) >> 4;
+ while (1) {
+ len++;
+ if (v < 128)
+ break;
+ v = (v - 128) >> 7;
+ }
+ }
+ return len;
+}
+
+
+/* Random number generator state, see below */
+static uint64_t ha_random_state[2] ALIGNED(2*sizeof(uint64_t));
+
+/* This is a thread-safe implementation of xoroshiro128** described below:
+ * http://prng.di.unimi.it/
+ * It features a 2^128 long sequence, returns 64 high-quality bits on each call,
+ * supports fast jumps and passes all common quality tests. It is thread-safe,
+ * uses a double-cas on 64-bit architectures supporting it, and falls back to a
+ * local lock on other ones.
+ */
+uint64_t ha_random64()
+{
+ uint64_t old[2] ALIGNED(2*sizeof(uint64_t));
+ uint64_t new[2] ALIGNED(2*sizeof(uint64_t));
+
+#if defined(USE_THREAD) && (!defined(HA_CAS_IS_8B) || !defined(HA_HAVE_CAS_DW))
+ static HA_SPINLOCK_T rand_lock;
+
+ HA_SPIN_LOCK(OTHER_LOCK, &rand_lock);
+#endif
+
+ old[0] = ha_random_state[0];
+ old[1] = ha_random_state[1];
+
+#if defined(USE_THREAD) && defined(HA_CAS_IS_8B) && defined(HA_HAVE_CAS_DW)
+ do {
+#endif
+ new[1] = old[0] ^ old[1];
+ new[0] = rotl64(old[0], 24) ^ new[1] ^ (new[1] << 16); // a, b
+ new[1] = rotl64(new[1], 37); // c
+
+#if defined(USE_THREAD) && defined(HA_CAS_IS_8B) && defined(HA_HAVE_CAS_DW)
+ } while (unlikely(!_HA_ATOMIC_DWCAS(ha_random_state, old, new)));
+#else
+ ha_random_state[0] = new[0];
+ ha_random_state[1] = new[1];
+#if defined(USE_THREAD)
+ HA_SPIN_UNLOCK(OTHER_LOCK, &rand_lock);
+#endif
+#endif
+ return rotl64(old[0] * 5, 7) * 9;
+}
+
+/* seeds the random state using up to <len> bytes from <seed>, starting with
+ * the first non-zero byte.
+ */
+void ha_random_seed(const unsigned char *seed, size_t len)
+{
+ size_t pos;
+
+ /* the seed must not be all zeroes, so we pre-fill it with alternating
+ * bits and overwrite part of them with the block starting at the first
+ * non-zero byte from the seed.
+ */
+ memset(ha_random_state, 0x55, sizeof(ha_random_state));
+
+ for (pos = 0; pos < len; pos++)
+ if (seed[pos] != 0)
+ break;
+
+ if (pos == len)
+ return;
+
+ seed += pos;
+ len -= pos;
+
+ if (len > sizeof(ha_random_state))
+ len = sizeof(ha_random_state);
+
+ memcpy(ha_random_state, seed, len);
+}
+
+/* This causes a jump to (dist * 2^96) places in the pseudo-random sequence,
+ * and is equivalent to calling ha_random64() as many times. It is used to
+ * provide non-overlapping sequences of 2^96 numbers (~7*10^28) to up to 2^32
+ * different generators (i.e. different processes after a fork). The <dist>
+ * argument is the distance to jump to and is used in a loop so it rather not
+ * be too large if the processing time is a concern.
+ *
+ * BEWARE: this function is NOT thread-safe and must not be called during
+ * concurrent accesses to ha_random64().
+ */
+void ha_random_jump96(uint32_t dist)
+{
+ while (dist--) {
+ uint64_t s0 = 0;
+ uint64_t s1 = 0;
+ int b;
+
+ for (b = 0; b < 64; b++) {
+ if ((0xd2a98b26625eee7bULL >> b) & 1) {
+ s0 ^= ha_random_state[0];
+ s1 ^= ha_random_state[1];
+ }
+ ha_random64();
+ }
+
+ for (b = 0; b < 64; b++) {
+ if ((0xdddf9b1090aa7ac1ULL >> b) & 1) {
+ s0 ^= ha_random_state[0];
+ s1 ^= ha_random_state[1];
+ }
+ ha_random64();
+ }
+ ha_random_state[0] = s0;
+ ha_random_state[1] = s1;
+ }
+}
+
+/* Generates an RFC4122 UUID into chunk <output> which must be at least 37
+ * bytes large.
+ */
+void ha_generate_uuid(struct buffer *output)
+{
+ uint32_t rnd[4];
+ uint64_t last;
+
+ last = ha_random64();
+ rnd[0] = last;
+ rnd[1] = last >> 32;
+
+ last = ha_random64();
+ rnd[2] = last;
+ rnd[3] = last >> 32;
+
+ chunk_printf(output, "%8.8x-%4.4x-%4.4x-%4.4x-%12.12llx",
+ rnd[0],
+ rnd[1] & 0xFFFF,
+ ((rnd[1] >> 16u) & 0xFFF) | 0x4000, // highest 4 bits indicate the uuid version
+ (rnd[2] & 0x3FFF) | 0x8000, // the highest 2 bits indicate the UUID variant (10),
+ (long long)((rnd[2] >> 14u) | ((uint64_t) rnd[3] << 18u)) & 0xFFFFFFFFFFFFull);
+}
+
+
+/* only used by parse_line() below. It supports writing in place provided that
+ * <in> is updated to the next location before calling it. In that case, the
+ * char at <in> may be overwritten.
+ */
+#define EMIT_CHAR(x) \
+ do { \
+ char __c = (char)(x); \
+ if ((opts & PARSE_OPT_INPLACE) && out+outpos > in) \
+ err |= PARSE_ERR_OVERLAP; \
+ if (outpos >= outmax) \
+ err |= PARSE_ERR_TOOLARGE; \
+ if (!err) \
+ out[outpos] = __c; \
+ outpos++; \
+ } while (0)
+
+/* Parse <in>, copy it into <out> split into isolated words whose pointers
+ * are put in <args>. If more than <outlen> bytes have to be emitted, the
+ * extraneous ones are not emitted but <outlen> is updated so that the caller
+ * knows how much to realloc. Similarly, <args> are not updated beyond <nbargs>
+ * but the returned <nbargs> indicates how many were found. All trailing args
+ * up to <nbargs> point to the trailing zero, and as long as <nbargs> is > 0,
+ * it is guaranteed that at least one arg will point to the zero. It is safe
+ * to call it with a NULL <args> if <nbargs> is 0.
+ *
+ * <out> may overlap with <in> provided that it never goes further, in which
+ * case the parser will accept to perform in-place parsing and unquoting/
+ * unescaping but only if environment variables do not lead to expansion that
+ * causes overlapping, otherwise the input string being destroyed, the error
+ * will not be recoverable. Note that even during out-of-place <in> will
+ * experience temporary modifications in-place for variable resolution and must
+ * be writable, and will also receive zeroes to delimit words when using
+ * in-place copy. Parsing options <opts> taken from PARSE_OPT_*. Return value
+ * is zero on success otherwise a bitwise-or of PARSE_ERR_*. Upon error, the
+ * starting point of the first invalid character sequence or unmatched
+ * quote/brace is reported in <errptr> if not NULL. When using in-place parsing
+ * error reporting might be difficult since zeroes will have been inserted into
+ * the string. One solution for the caller may consist in replacing all args
+ * delimiters with spaces in this case.
+ */
+uint32_t parse_line(char *in, char *out, size_t *outlen, char **args, int *nbargs, uint32_t opts, const char **errptr)
+{
+ char *quote = NULL;
+ char *brace = NULL;
+ char *word_expand = NULL;
+ unsigned char hex1, hex2;
+ size_t outmax = *outlen;
+ int argsmax = *nbargs - 1;
+ size_t outpos = 0;
+ int squote = 0;
+ int dquote = 0;
+ int arg = 0;
+ uint32_t err = 0;
+
+ *nbargs = 0;
+ *outlen = 0;
+
+ /* argsmax may be -1 here, protecting args[] from any write */
+ if (arg < argsmax)
+ args[arg] = out;
+
+ while (1) {
+ if (*in >= '-' && *in != '\\') {
+ /* speedup: directly send all regular chars starting
+ * with '-', '.', '/', alnum etc...
+ */
+ EMIT_CHAR(*in++);
+ continue;
+ }
+ else if (*in == '\0' || *in == '\n' || *in == '\r') {
+ /* end of line */
+ break;
+ }
+ else if (*in == '#' && (opts & PARSE_OPT_SHARP) && !squote && !dquote) {
+ /* comment */
+ break;
+ }
+ else if (*in == '"' && !squote && (opts & PARSE_OPT_DQUOTE)) { /* double quote outside single quotes */
+ if (dquote) {
+ dquote = 0;
+ quote = NULL;
+ }
+ else {
+ dquote = 1;
+ quote = in;
+ }
+ in++;
+ continue;
+ }
+ else if (*in == '\'' && !dquote && (opts & PARSE_OPT_SQUOTE)) { /* single quote outside double quotes */
+ if (squote) {
+ squote = 0;
+ quote = NULL;
+ }
+ else {
+ squote = 1;
+ quote = in;
+ }
+ in++;
+ continue;
+ }
+ else if (*in == '\\' && !squote && (opts & PARSE_OPT_BKSLASH)) {
+ /* first, we'll replace \\, \<space>, \#, \r, \n, \t, \xXX with their
+ * C equivalent value but only when they have a special meaning and within
+ * double quotes for some of them. Other combinations left unchanged (eg: \1).
+ */
+ char tosend = *in;
+
+ switch (in[1]) {
+ case ' ':
+ case '\\':
+ tosend = in[1];
+ in++;
+ break;
+
+ case 't':
+ tosend = '\t';
+ in++;
+ break;
+
+ case 'n':
+ tosend = '\n';
+ in++;
+ break;
+
+ case 'r':
+ tosend = '\r';
+ in++;
+ break;
+
+ case '#':
+ /* escaping of "#" only if comments are supported */
+ if (opts & PARSE_OPT_SHARP)
+ in++;
+ tosend = *in;
+ break;
+
+ case '\'':
+ /* escaping of "'" only outside single quotes and only if single quotes are supported */
+ if (opts & PARSE_OPT_SQUOTE && !squote)
+ in++;
+ tosend = *in;
+ break;
+
+ case '"':
+ /* escaping of '"' only outside single quotes and only if double quotes are supported */
+ if (opts & PARSE_OPT_DQUOTE && !squote)
+ in++;
+ tosend = *in;
+ break;
+
+ case '$':
+ /* escaping of '$' only inside double quotes and only if env supported */
+ if (opts & PARSE_OPT_ENV && dquote)
+ in++;
+ tosend = *in;
+ break;
+
+ case 'x':
+ if (!ishex(in[2]) || !ishex(in[3])) {
+ /* invalid or incomplete hex sequence */
+ err |= PARSE_ERR_HEX;
+ if (errptr)
+ *errptr = in;
+ goto leave;
+ }
+ hex1 = toupper((unsigned char)in[2]) - '0';
+ hex2 = toupper((unsigned char)in[3]) - '0';
+ if (hex1 > 9) hex1 -= 'A' - '9' - 1;
+ if (hex2 > 9) hex2 -= 'A' - '9' - 1;
+ tosend = (hex1 << 4) + hex2;
+ in += 3;
+ break;
+
+ default:
+ /* other combinations are not escape sequences */
+ break;
+ }
+
+ in++;
+ EMIT_CHAR(tosend);
+ }
+ else if (isspace((unsigned char)*in) && !squote && !dquote) {
+ /* a non-escaped space is an argument separator */
+ while (isspace((unsigned char)*in))
+ in++;
+ EMIT_CHAR(0);
+ arg++;
+ if (arg < argsmax)
+ args[arg] = out + outpos;
+ else
+ err |= PARSE_ERR_TOOMANY;
+ }
+ else if (*in == '$' && (opts & PARSE_OPT_ENV) && (dquote || !(opts & PARSE_OPT_DQUOTE))) {
+ /* environment variables are evaluated anywhere, or only
+ * inside double quotes if they are supported.
+ */
+ char *var_name;
+ char save_char;
+ const char *value;
+
+ in++;
+
+ if (*in == '{')
+ brace = in++;
+
+ if (!isalpha((unsigned char)*in) && *in != '_' && *in != '.') {
+ /* unacceptable character in variable name */
+ err |= PARSE_ERR_VARNAME;
+ if (errptr)
+ *errptr = in;
+ goto leave;
+ }
+
+ var_name = in;
+ if (*in == '.')
+ in++;
+ while (isalnum((unsigned char)*in) || *in == '_')
+ in++;
+
+ save_char = *in;
+ *in = '\0';
+ if (unlikely(*var_name == '.')) {
+ /* internal pseudo-variables */
+ if (strcmp(var_name, ".LINE") == 0)
+ value = ultoa(global.cfg_curr_line);
+ else if (strcmp(var_name, ".FILE") == 0)
+ value = global.cfg_curr_file;
+ else if (strcmp(var_name, ".SECTION") == 0)
+ value = global.cfg_curr_section;
+ else {
+ /* unsupported internal variable name */
+ err |= PARSE_ERR_VARNAME;
+ if (errptr)
+ *errptr = var_name;
+ goto leave;
+ }
+ } else {
+ value = getenv(var_name);
+ }
+ *in = save_char;
+
+ /* support for '[*]' sequence to force word expansion,
+ * only available inside braces */
+ if (*in == '[' && brace && (opts & PARSE_OPT_WORD_EXPAND)) {
+ word_expand = in++;
+
+ if (*in++ != '*' || *in++ != ']') {
+ err |= PARSE_ERR_WRONG_EXPAND;
+ if (errptr)
+ *errptr = word_expand;
+ goto leave;
+ }
+ }
+
+ if (brace) {
+ if (*in == '-') {
+ /* default value starts just after the '-' */
+ if (!value)
+ value = in + 1;
+
+ while (*in && *in != '}')
+ in++;
+ if (!*in)
+ goto no_brace;
+ *in = 0; // terminate the default value
+ }
+ else if (*in != '}') {
+ no_brace:
+ /* unmatched brace */
+ err |= PARSE_ERR_BRACE;
+ if (errptr)
+ *errptr = brace;
+ goto leave;
+ }
+
+ /* brace found, skip it */
+ in++;
+ brace = NULL;
+ }
+
+ if (value) {
+ while (*value) {
+ /* expand as individual parameters on a space character */
+ if (word_expand && isspace((unsigned char)*value)) {
+ EMIT_CHAR(0);
+ ++arg;
+ if (arg < argsmax)
+ args[arg] = out + outpos;
+ else
+ err |= PARSE_ERR_TOOMANY;
+
+ /* skip consecutive spaces */
+ while (isspace((unsigned char)*++value))
+ ;
+ } else {
+ EMIT_CHAR(*value++);
+ }
+ }
+ }
+ else {
+ /* An unmatched environment variable was parsed.
+ * Let's skip the trailing double-quote character
+ * and spaces.
+ */
+ if (likely(*var_name != '.') && *in == '"') {
+ in++;
+ while (isspace((unsigned char)*in))
+ in++;
+ if (dquote) {
+ dquote = 0;
+ quote = NULL;
+ }
+ }
+ }
+ word_expand = NULL;
+ }
+ else {
+ /* any other regular char */
+ EMIT_CHAR(*in++);
+ }
+ }
+
+ /* end of output string */
+ EMIT_CHAR(0);
+
+ /* Don't add an empty arg after trailing spaces. Note that args[arg]
+ * may contain some distances relative to NULL if <out> was NULL, or
+ * pointers beyond the end of <out> in case <outlen> is too short, thus
+ * we must not dereference it.
+ */
+ if (arg < argsmax && args[arg] != out + outpos - 1)
+ arg++;
+
+ if (quote) {
+ /* unmatched quote */
+ err |= PARSE_ERR_QUOTE;
+ if (errptr)
+ *errptr = quote;
+ goto leave;
+ }
+ leave:
+ *nbargs = arg;
+ *outlen = outpos;
+
+ /* empty all trailing args by making them point to the trailing zero,
+ * at least the last one in any case.
+ */
+ if (arg > argsmax)
+ arg = argsmax;
+
+ while (arg >= 0 && arg <= argsmax)
+ args[arg++] = out + outpos - 1;
+
+ return err;
+}
+#undef EMIT_CHAR
+
+/* Use <path_fmt> and following arguments as a printf format to build up the
+ * name of a file, whose first line will be read into the trash buffer. The
+ * trailing CR and LF if any are stripped. On success, it sets trash.data to
+ * the number of resulting bytes in the trash and returns this value. Otherwise
+ * on failure it returns -1 if it could not build the path, -2 on file access
+ * access error (e.g. permissions), or -3 on file read error. The trash is
+ * always reset before proceeding. Too large lines are truncated to the size
+ * of the trash.
+ */
+ssize_t read_line_to_trash(const char *path_fmt, ...)
+{
+ va_list args;
+ FILE *file;
+ ssize_t ret;
+
+ chunk_reset(&trash);
+
+ va_start(args, path_fmt);
+ ret = vsnprintf(trash.area, trash.size, path_fmt, args);
+ va_end(args);
+
+ if (ret >= trash.size)
+ return -1;
+
+ file = fopen(trash.area, "r");
+ if (!file)
+ return -2;
+
+ ret = -3;
+ chunk_reset(&trash);
+ if (fgets(trash.area, trash.size, file)) {
+ trash.data = strlen(trash.area);
+ while (trash.data &&
+ (trash.area[trash.data - 1] == '\r' ||
+ trash.area[trash.data - 1] == '\n'))
+ trash.data--;
+ trash.area[trash.data] = 0;
+ ret = trash.data; // success
+ }
+
+ fclose(file);
+ return ret;
+}
+
+/* This is used to sanitize an input line that's about to be used for error reporting.
+ * It will adjust <line> to print approximately <width> chars around <pos>, trying to
+ * preserve the beginning, with leading or trailing "..." when the line is truncated.
+ * If non-printable chars are present in the output. It returns the new offset <pos>
+ * in the modified line. Non-printable characters are replaced with '?'. <width> must
+ * be at least 6 to support two "..." otherwise the result is undefined. The line
+ * itself must have at least 7 chars allocated for the same reason.
+ */
+size_t sanitize_for_printing(char *line, size_t pos, size_t width)
+{
+ size_t shift = 0;
+ char *out = line;
+ char *in = line;
+ char *end = line + width;
+
+ if (pos >= width) {
+ /* if we have to shift, we'll be out of context, so let's
+ * try to put <pos> at the center of width.
+ */
+ shift = pos - width / 2;
+ in += shift + 3;
+ end = out + width - 3;
+ out[0] = out[1] = out[2] = '.';
+ out += 3;
+ }
+
+ while (out < end && *in) {
+ if (isspace((unsigned char)*in))
+ *out++ = ' ';
+ else if (isprint((unsigned char)*in))
+ *out++ = *in;
+ else
+ *out++ = '?';
+ in++;
+ }
+
+ if (end < line + width) {
+ out[0] = out[1] = out[2] = '.';
+ out += 3;
+ }
+
+ *out++ = 0;
+ return pos - shift;
+}
+
+/* Update array <fp> with the fingerprint of word <word> by counting the
+ * transitions between characters. <fp> is a 1024-entries array indexed as
+ * 32*from+to. Positions for 'from' and 'to' are:
+ * 1..26=letter, 27=digit, 28=other/begin/end.
+ * Row "from=0" is used to mark the character's presence. Others unused.
+ */
+void update_word_fingerprint(uint8_t *fp, const char *word)
+{
+ const char *p;
+ int from, to;
+ int c;
+
+ from = 28; // begin
+ for (p = word; *p; p++) {
+ c = tolower(*p);
+ switch(c) {
+ case 'a'...'z': to = c - 'a' + 1; break;
+ case 'A'...'Z': to = tolower(c) - 'a' + 1; break;
+ case '0'...'9': to = 27; break;
+ default: to = 28; break;
+ }
+ fp[to] = 1;
+ fp[32 * from + to]++;
+ from = to;
+ }
+ to = 28; // end
+ fp[32 * from + to]++;
+}
+
+/* This function hashes a word, scramble is the anonymizing key, returns
+ * the hashed word when the key (scramble) != 0, else returns the word.
+ * This function can be called NB_L_HASH_WORD times in a row, don't call
+ * it if you called it more than NB_L_HASH_WORD.
+ */
+const char *hash_anon(uint32_t scramble, const char *string2hash, const char *prefix, const char *suffix)
+{
+ index_hash++;
+ if (index_hash == NB_L_HASH_WORD)
+ index_hash = 0;
+
+ /* don't hash empty strings */
+ if (!string2hash[0] || (string2hash[0] == ' ' && string2hash[1] == 0))
+ return string2hash;
+
+ if (scramble != 0) {
+ snprintf(hash_word[index_hash], sizeof(hash_word[index_hash]), "%s%06x%s",
+ prefix, HA_ANON(scramble, string2hash, strlen(string2hash)), suffix);
+ return hash_word[index_hash];
+ }
+ else
+ return string2hash;
+}
+
+/* This function hashes or not an ip address ipstring, scramble is the anonymizing
+ * key, returns the hashed ip with his port or ipstring when there is nothing to hash.
+ * Put hasport equal 0 to point out ipstring has no port, else put an other int.
+ * Without port, return a simple hash or ipstring.
+ */
+const char *hash_ipanon(uint32_t scramble, char *ipstring, int hasport)
+{
+ char *errmsg = NULL;
+ struct sockaddr_storage *sa;
+ struct sockaddr_storage ss;
+ char addr[46];
+ int port;
+
+ index_hash++;
+ if (index_hash == NB_L_HASH_WORD) {
+ index_hash = 0;
+ }
+
+ if (scramble == 0) {
+ return ipstring;
+ }
+ if (strcmp(ipstring, "localhost") == 0 ||
+ strcmp(ipstring, "stdout") == 0 ||
+ strcmp(ipstring, "stderr") == 0 ||
+ strncmp(ipstring, "fd@", 3) == 0 ||
+ strncmp(ipstring, "sockpair@", 9) == 0) {
+ return ipstring;
+ }
+ else {
+ if (hasport == 0) {
+ memset(&ss, 0, sizeof(ss));
+ if (str2ip2(ipstring, &ss, 1) == NULL) {
+ return HA_ANON_STR(scramble, ipstring);
+ }
+ sa = &ss;
+ }
+ else {
+ sa = str2sa_range(ipstring, NULL, NULL, NULL, NULL, NULL, NULL, &errmsg, NULL, NULL,
+ PA_O_PORT_OK | PA_O_STREAM | PA_O_DGRAM | PA_O_XPRT | PA_O_CONNECT |
+ PA_O_PORT_RANGE | PA_O_PORT_OFS | PA_O_RESOLVE);
+ if (sa == NULL) {
+ return HA_ANON_STR(scramble, ipstring);
+ }
+ }
+ addr_to_str(sa, addr, sizeof(addr));
+ port = get_host_port(sa);
+
+ switch(sa->ss_family) {
+ case AF_INET:
+ if (strncmp(addr, "127", 3) == 0 || strncmp(addr, "255", 3) == 0 || strncmp(addr, "0", 1) == 0) {
+ return ipstring;
+ }
+ else {
+ if (port != 0) {
+ snprintf(hash_word[index_hash], sizeof(hash_word[index_hash]), "IPV4(%06x):%d", HA_ANON(scramble, addr, strlen(addr)), port);
+ return hash_word[index_hash];
+ }
+ else {
+ snprintf(hash_word[index_hash], sizeof(hash_word[index_hash]), "IPV4(%06x)", HA_ANON(scramble, addr, strlen(addr)));
+ return hash_word[index_hash];
+ }
+ }
+ break;
+
+ case AF_INET6:
+ if (strcmp(addr, "::1") == 0) {
+ return ipstring;
+ }
+ else {
+ if (port != 0) {
+ snprintf(hash_word[index_hash], sizeof(hash_word[index_hash]), "IPV6(%06x):%d", HA_ANON(scramble, addr, strlen(addr)), port);
+ return hash_word[index_hash];
+ }
+ else {
+ snprintf(hash_word[index_hash], sizeof(hash_word[index_hash]), "IPV6(%06x)", HA_ANON(scramble, addr, strlen(addr)));
+ return hash_word[index_hash];
+ }
+ }
+ break;
+
+ case AF_UNIX:
+ return HA_ANON_STR(scramble, ipstring);
+ break;
+
+ default:
+ return ipstring;
+ break;
+ };
+ }
+ return ipstring;
+}
+
+/* Initialize array <fp> with the fingerprint of word <word> by counting the
+ * transitions between characters. <fp> is a 1024-entries array indexed as
+ * 32*from+to. Positions for 'from' and 'to' are:
+ * 0..25=letter, 26=digit, 27=other, 28=begin, 29=end, others unused.
+ */
+void make_word_fingerprint(uint8_t *fp, const char *word)
+{
+ memset(fp, 0, 1024);
+ update_word_fingerprint(fp, word);
+}
+
+/* Return the distance between two word fingerprints created by function
+ * make_word_fingerprint(). It's a positive integer calculated as the sum of
+ * the differences between each location.
+ */
+int word_fingerprint_distance(const uint8_t *fp1, const uint8_t *fp2)
+{
+ int i, k, dist = 0;
+
+ for (i = 0; i < 1024; i++) {
+ k = (int)fp1[i] - (int)fp2[i];
+ dist += abs(k);
+ }
+ return dist;
+}
+
+/*
+ * This function compares the loaded openssl version with a string <version>
+ * This function use the same return code as compare_current_version:
+ *
+ * -1 : the version in argument is older than the current openssl version
+ * 0 : the version in argument is the same as the current openssl version
+ * 1 : the version in argument is newer than the current openssl version
+ *
+ * Or some errors:
+ * -2 : openssl is not available on this process
+ * -3 : the version in argument is not parsable
+ */
+int openssl_compare_current_version(const char *version)
+{
+#ifdef USE_OPENSSL
+ int numversion;
+
+ numversion = openssl_version_parser(version);
+ if (numversion == 0)
+ return -3;
+
+ if (numversion < OPENSSL_VERSION_NUMBER)
+ return -1;
+ else if (numversion > OPENSSL_VERSION_NUMBER)
+ return 1;
+ else
+ return 0;
+#else
+ return -2;
+#endif
+}
+
+/*
+ * This function compares the loaded openssl name with a string <name>
+ * This function returns 0 if the OpenSSL name starts like the passed parameter,
+ * 1 otherwise.
+ */
+int openssl_compare_current_name(const char *name)
+{
+#ifdef USE_OPENSSL
+ int name_len = 0;
+ const char *openssl_version = OpenSSL_version(OPENSSL_VERSION);
+
+ if (name) {
+ name_len = strlen(name);
+ if (strlen(name) <= strlen(openssl_version))
+ return strncmp(openssl_version, name, name_len);
+ }
+#endif
+ return 1;
+}
+
+#if defined(RTLD_DEFAULT) || defined(RTLD_NEXT)
+/* redefine dlopen() so that we can detect unexpected replacement of some
+ * critical symbols, typically init/alloc/free functions coming from alternate
+ * libraries. When called, a tainted flag is set (TAINTED_SHARED_LIBS).
+ * It's important to understand that the dynamic linker will present the
+ * first loaded of each symbol to all libs, so that if haproxy is linked
+ * with a new lib that uses a static inline or a #define to replace an old
+ * function, and a dependency was linked against an older version of that
+ * lib that had a function there, that lib would use all of the newer
+ * versions of the functions that are already loaded in haproxy, except
+ * for that unique function which would continue to be the old one. This
+ * creates all sort of problems when init code allocates smaller structs
+ * than required for example but uses new functions on them, etc. Thus what
+ * we do here is to try to detect API consistency: we take a fingerprint of
+ * a number of known functions, and verify that if they change in a loaded
+ * library, either there all appeared or all disappeared, but not partially.
+ * We can check up to 64 symbols that belong to individual groups that are
+ * checked together.
+ */
+void *dlopen(const char *filename, int flags)
+{
+ static void *(*_dlopen)(const char *filename, int flags);
+ struct {
+ const char *name;
+ uint64_t bit, grp;
+ void *curr, *next;
+ } check_syms[] = {
+ /* openssl's libcrypto checks: group bits 0x1f */
+ { .name="OPENSSL_init", .bit = 0x0000000000000001, .grp = 0x000000000000001f, }, // openssl 1.0 / 1.1 / 3.0
+ { .name="OPENSSL_init_crypto", .bit = 0x0000000000000002, .grp = 0x000000000000001f, }, // openssl 1.1 / 3.0
+ { .name="ENGINE_init", .bit = 0x0000000000000004, .grp = 0x000000000000001f, }, // openssl 1.x / 3.x with engine
+ { .name="EVP_CIPHER_CTX_init", .bit = 0x0000000000000008, .grp = 0x000000000000001f, }, // openssl 1.0
+ { .name="HMAC_Init", .bit = 0x0000000000000010, .grp = 0x000000000000001f, }, // openssl 1.x
+
+ /* openssl's libssl checks: group bits 0x3e0 */
+ { .name="OPENSSL_init_ssl", .bit = 0x0000000000000020, .grp = 0x00000000000003e0, }, // openssl 1.1 / 3.0
+ { .name="SSL_library_init", .bit = 0x0000000000000040, .grp = 0x00000000000003e0, }, // openssl 1.x
+ { .name="SSL_is_quic", .bit = 0x0000000000000080, .grp = 0x00000000000003e0, }, // quictls
+ { .name="SSL_CTX_new_ex", .bit = 0x0000000000000100, .grp = 0x00000000000003e0, }, // openssl 3.x
+ { .name="SSL_CTX_get0_security_ex_data", .bit = 0x0000000000000200, .grp = 0x00000000000003e0, }, // openssl 1.x / 3.x
+
+ /* insert only above, 0 must be the last one */
+ { 0 },
+ };
+ const char *trace;
+ uint64_t own_fp, lib_fp; // symbols fingerprints
+ void *addr;
+ void *ret;
+ int sym = 0;
+
+ if (!_dlopen) {
+ _dlopen = get_sym_next_addr("dlopen");
+ if (!_dlopen || _dlopen == dlopen) {
+ _dlopen = NULL;
+ return NULL;
+ }
+ }
+
+ /* save a few pointers to critical symbols. We keep a copy of both the
+ * current and the next value, because we might already have replaced
+ * some of them in an inconsistent way (i.e. not all), and we're only
+ * interested in verifying that a loaded library doesn't come with a
+ * completely different definition that would be incompatible. We'll
+ * keep a fingerprint of our own symbols.
+ */
+ own_fp = 0;
+ for (sym = 0; check_syms[sym].name; sym++) {
+ check_syms[sym].curr = get_sym_curr_addr(check_syms[sym].name);
+ check_syms[sym].next = get_sym_next_addr(check_syms[sym].name);
+ if (check_syms[sym].curr || check_syms[sym].next)
+ own_fp |= check_syms[sym].bit;
+ }
+
+ /* now open the requested lib */
+ ret = _dlopen(filename, flags);
+ if (!ret)
+ return ret;
+
+ mark_tainted(TAINTED_SHARED_LIBS);
+
+ /* and check that critical symbols didn't change */
+ lib_fp = 0;
+ for (sym = 0; check_syms[sym].name; sym++) {
+ addr = dlsym(ret, check_syms[sym].name);
+ if (addr)
+ lib_fp |= check_syms[sym].bit;
+ }
+
+ if (lib_fp != own_fp) {
+ /* let's check what changed: */
+ uint64_t mask = 0;
+
+ for (sym = 0; check_syms[sym].name; sym++) {
+ mask = check_syms[sym].grp;
+
+ /* new group of symbols. If they all appeared together
+ * their use will be consistent. If none appears, it's
+ * just that the lib doesn't use them. If some appear
+ * or disappear, it means the lib relies on a different
+ * dependency and will end up with a mix.
+ */
+ if (!(own_fp & mask) || !(lib_fp & mask) ||
+ (own_fp & mask) == (lib_fp & mask))
+ continue;
+
+ /* let's report a symbol that really changes */
+ if (!((own_fp ^ lib_fp) & check_syms[sym].bit))
+ continue;
+
+ /* OK it's clear that this symbol was redefined */
+ mark_tainted(TAINTED_REDEFINITION);
+
+ trace = hlua_show_current_location("\n ");
+ ha_warning("dlopen(): shared library '%s' brings a different and inconsistent definition of symbol '%s'. The process cannot be trusted anymore!%s%s\n",
+ filename, check_syms[sym].name,
+ trace ? " Suspected call location: \n " : "",
+ trace ? trace : "");
+ }
+ }
+
+ return ret;
+}
+#endif
+
+static int init_tools_per_thread()
+{
+ /* Let's make each thread start from a different position */
+ statistical_prng_state += tid * MAX_THREADS;
+ if (!statistical_prng_state)
+ statistical_prng_state++;
+ return 1;
+}
+REGISTER_PER_THREAD_INIT(init_tools_per_thread);
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/src/trace.c b/src/trace.c
new file mode 100644
index 0000000..a233c0d
--- /dev/null
+++ b/src/trace.c
@@ -0,0 +1,997 @@
+/*
+ * Runtime tracing API
+ *
+ * Copyright (C) 2000-2019 Willy Tarreau - w@1wt.eu
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation, version 2.1
+ * exclusively.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <import/ist.h>
+#include <haproxy/api.h>
+#include <haproxy/buf.h>
+#include <haproxy/cfgparse.h>
+#include <haproxy/cli.h>
+#include <haproxy/errors.h>
+#include <haproxy/istbuf.h>
+#include <haproxy/list.h>
+#include <haproxy/log.h>
+#include <haproxy/sink.h>
+#include <haproxy/trace.h>
+
+struct list trace_sources = LIST_HEAD_INIT(trace_sources);
+THREAD_LOCAL struct buffer trace_buf = { };
+
+/* allocates the trace buffers. Returns 0 in case of failure. It is safe to
+ * call to call this function multiple times if the size changes.
+ */
+static int alloc_trace_buffers_per_thread()
+{
+ chunk_init(&trace_buf, my_realloc2(trace_buf.area, global.tune.bufsize), global.tune.bufsize);
+ return !!trace_buf.area;
+}
+
+static void free_trace_buffers_per_thread()
+{
+ chunk_destroy(&trace_buf);
+}
+
+REGISTER_PER_THREAD_ALLOC(alloc_trace_buffers_per_thread);
+REGISTER_PER_THREAD_FREE(free_trace_buffers_per_thread);
+
+/* pick the lowest non-null argument with a non-null arg_def mask */
+static inline const void *trace_pick_arg(uint32_t arg_def, const void *a1, const void *a2, const void *a3, const void *a4)
+{
+ if (arg_def & 0x0000FFFF) {
+ if ((arg_def & 0x000000FF) && a1)
+ return a1;
+ if ((arg_def & 0x0000FF00) && a2)
+ return a2;
+ }
+
+ if (arg_def & 0xFFFF0000) {
+ if ((arg_def & 0x00FF0000) && a3)
+ return a3;
+ if ((arg_def & 0xFF000000) && a4)
+ return a4;
+ }
+
+ return NULL;
+}
+
+/* Reports whether the trace is enabled for the specified arguments, needs to enable
+ * or disable tracking. It gets the same API as __trace() except for <cb> and <msg>
+ * which are not used and were dropped, and plockptr which is an optional pointer to
+ * the lockptr to be updated (or NULL) for tracking. The function returns:
+ * 0 if the trace is not enabled for the module or these values
+ * <0 if the trace matches some locking criteria but don't have the proper level.
+ * In this case the interested caller might have to consider disabling tracking.
+ * >0 if the trace is enabled for the given criteria.
+ * In all cases, <plockptr> will only be set if non-null and if a locking criterion
+ * matched. It will be up to the caller to enable tracking if desired. A casual
+ * tester not interested in adjusting tracking (i.e. calling the function before
+ * deciding so prepare a buffer to be dumped) will only need to pass 0 for plockptr
+ * and check if the result is >0.
+ */
+int __trace_enabled(enum trace_level level, uint64_t mask, struct trace_source *src,
+ const struct ist where, const char *func,
+ const void *a1, const void *a2, const void *a3, const void *a4,
+ const void **plockptr)
+{
+ const struct listener *li = NULL;
+ const struct proxy *fe = NULL;
+ const struct proxy *be = NULL;
+ const struct server *srv = NULL;
+ const struct session *sess = NULL;
+ const struct stream *strm = NULL;
+ const struct connection *conn = NULL;
+ const struct check *check = NULL;
+ const struct quic_conn *qc = NULL;
+ const struct appctx *appctx = NULL;
+ const void *lockon_ptr = NULL;
+
+ if (likely(src->state == TRACE_STATE_STOPPED))
+ return 0;
+
+ /* check that at least one action is interested by this event */
+ if (((src->report_events | src->start_events | src->pause_events | src->stop_events) & mask) == 0)
+ return 0;
+
+ /* retrieve available information from the caller's arguments */
+ if (src->arg_def & TRC_ARGS_CONN)
+ conn = trace_pick_arg(src->arg_def & TRC_ARGS_CONN, a1, a2, a3, a4);
+
+ if (src->arg_def & TRC_ARGS_SESS)
+ sess = trace_pick_arg(src->arg_def & TRC_ARGS_SESS, a1, a2, a3, a4);
+
+ if (src->arg_def & TRC_ARGS_STRM)
+ strm = trace_pick_arg(src->arg_def & TRC_ARGS_STRM, a1, a2, a3, a4);
+
+ if (src->arg_def & TRC_ARGS_CHK)
+ check = trace_pick_arg(src->arg_def & TRC_ARGS_CHK, a1, a2, a3, a4);
+
+ if (src->arg_def & TRC_ARGS_QCON)
+ qc = trace_pick_arg(src->arg_def & TRC_ARGS_QCON, a1, a2, a3, a4);
+
+ if (src->arg_def & TRC_ARGS_APPCTX)
+ appctx = trace_pick_arg(src->arg_def & TRC_ARGS_APPCTX, a1, a2, a3, a4);
+
+ if (!sess && strm)
+ sess = strm->sess;
+ else if (!sess && conn && LIST_INLIST(&conn->session_list))
+ sess = conn->owner;
+ else if (!sess && check)
+ sess = check->sess;
+ else if (!sess && appctx)
+ sess = appctx->sess;
+
+ if (sess) {
+ fe = sess->fe;
+ li = sess->listener;
+ }
+
+ if (!li && conn)
+ li = objt_listener(conn->target);
+
+ if (li && !fe)
+ fe = li->bind_conf->frontend;
+
+ if (strm) {
+ be = strm->be;
+ srv = strm->srv_conn;
+ }
+ if (check) {
+ srv = check->server;
+ be = (srv ? srv->proxy : NULL);
+ }
+
+ if (!srv && conn)
+ srv = objt_server(conn->target);
+
+ if (srv && !be)
+ be = srv->proxy;
+
+ if (!be && conn)
+ be = objt_proxy(conn->target);
+
+ /* TODO: add handling of filters here, return if no match (not even update states) */
+
+ /* check if we need to start the trace now */
+ if (src->state == TRACE_STATE_WAITING) {
+ if ((src->start_events & mask) == 0)
+ return 0;
+
+ /* TODO: add update of lockon+lockon_ptr here */
+ HA_ATOMIC_STORE(&src->state, TRACE_STATE_RUNNING);
+ }
+
+ /* we may want to lock on a particular object */
+ if (src->lockon != TRACE_LOCKON_NOTHING) {
+ switch (src->lockon) {
+ case TRACE_LOCKON_BACKEND: lockon_ptr = be; break;
+ case TRACE_LOCKON_CONNECTION: lockon_ptr = conn; break;
+ case TRACE_LOCKON_FRONTEND: lockon_ptr = fe; break;
+ case TRACE_LOCKON_LISTENER: lockon_ptr = li; break;
+ case TRACE_LOCKON_SERVER: lockon_ptr = srv; break;
+ case TRACE_LOCKON_SESSION: lockon_ptr = sess; break;
+ case TRACE_LOCKON_STREAM: lockon_ptr = strm; break;
+ case TRACE_LOCKON_CHECK: lockon_ptr = check; break;
+ case TRACE_LOCKON_THREAD: lockon_ptr = ti; break;
+ case TRACE_LOCKON_QCON: lockon_ptr = qc; break;
+ case TRACE_LOCKON_APPCTX: lockon_ptr = appctx; break;
+ case TRACE_LOCKON_ARG1: lockon_ptr = a1; break;
+ case TRACE_LOCKON_ARG2: lockon_ptr = a2; break;
+ case TRACE_LOCKON_ARG3: lockon_ptr = a3; break;
+ case TRACE_LOCKON_ARG4: lockon_ptr = a4; break;
+ default: break; // silence stupid gcc -Wswitch
+ }
+
+ if (src->lockon_ptr && src->lockon_ptr != lockon_ptr)
+ return 0;
+
+ if (*plockptr && !src->lockon_ptr && lockon_ptr && src->state == TRACE_STATE_RUNNING)
+ *plockptr = lockon_ptr;
+ }
+
+ /* here the trace is running and is tracking a desired item */
+ if ((src->report_events & mask) == 0 || level > src->level) {
+ /* tracking did match, and might have to be disabled */
+ return -1;
+ }
+
+ /* OK trace still enabled */
+ return 1;
+}
+
+/* write a message for the given trace source */
+void __trace(enum trace_level level, uint64_t mask, struct trace_source *src,
+ const struct ist where, const char *func,
+ const void *a1, const void *a2, const void *a3, const void *a4,
+ void (*cb)(enum trace_level level, uint64_t mask, const struct trace_source *src,
+ const struct ist where, const struct ist func,
+ const void *a1, const void *a2, const void *a3, const void *a4),
+ const struct ist msg)
+{
+ const void *lockon_ptr;
+ struct ist ist_func = ist(func);
+ char tnum[4];
+ struct ist line[12];
+ int words = 0;
+ int ret;
+
+ lockon_ptr = NULL;
+ ret = __trace_enabled(level, mask, src, where, func, a1, a2, a3, a4, &lockon_ptr);
+ if (lockon_ptr)
+ HA_ATOMIC_STORE(&src->lockon_ptr, lockon_ptr);
+
+ if (ret <= 0) {
+ if (ret < 0) // may have to disable tracking
+ goto end;
+ return;
+ }
+
+ /* log the logging location truncated to 10 chars from the right so that
+ * the line number and the end of the file name are there.
+ */
+ line[words++] = ist("[");
+ tnum[0] = '0' + tid / 10;
+ tnum[1] = '0' + tid % 10;
+ tnum[2] = '|';
+ tnum[3] = 0;
+ line[words++] = ist(tnum);
+ line[words++] = src->name;
+ line[words++] = ist("|");
+ line[words++] = ist2("012345" + level, 1); // "0" to "5"
+ line[words++] = ist("|");
+ line[words] = where;
+ if (line[words].len > 13) {
+ line[words].ptr += (line[words].len - 13);
+ line[words].len = 13;
+ }
+ words++;
+ line[words++] = ist("] ");
+
+ if (isttest(ist_func)) {
+ line[words++] = ist_func;
+ line[words++] = ist("(): ");
+ }
+
+ if (!cb)
+ cb = src->default_cb;
+
+ if (cb && src->verbosity) {
+ /* decode function passed, we want to pre-fill the
+ * buffer with the message and let the decode function
+ * do its job, possibly even overwriting it.
+ */
+ b_reset(&trace_buf);
+ b_istput(&trace_buf, msg);
+ cb(level, mask, src, where, ist_func, a1, a2, a3, a4);
+ line[words] = ist2(trace_buf.area, trace_buf.data);
+ words++;
+ }
+ else {
+ /* Note that here we could decide to print some args whose type
+ * is known, when verbosity is above the quiet level, and even
+ * to print the name and values of those which are declared for
+ * lock-on.
+ */
+ line[words++] = msg;
+ }
+
+ if (src->sink)
+ sink_write(src->sink, LOG_HEADER_NONE, 0, line, words);
+
+ end:
+ /* check if we need to stop the trace now */
+ if ((src->stop_events & mask) != 0) {
+ HA_ATOMIC_STORE(&src->lockon_ptr, NULL);
+ HA_ATOMIC_STORE(&src->state, TRACE_STATE_STOPPED);
+ }
+ else if ((src->pause_events & mask) != 0) {
+ HA_ATOMIC_STORE(&src->lockon_ptr, NULL);
+ HA_ATOMIC_STORE(&src->state, TRACE_STATE_WAITING);
+ }
+}
+
+/* this callback may be used when no output modification is desired */
+void trace_no_cb(enum trace_level level, uint64_t mask, const struct trace_source *src,
+ const struct ist where, const struct ist func,
+ const void *a1, const void *a2, const void *a3, const void *a4)
+{
+ /* do nothing */
+}
+
+/* registers trace source <source>. Modifies the list element!
+ * The {start,pause,stop,report} events are not changed so the source may
+ * preset them.
+ */
+void trace_register_source(struct trace_source *source)
+{
+ source->lockon = TRACE_LOCKON_NOTHING;
+ source->level = TRACE_LEVEL_USER;
+ source->verbosity = 1;
+ source->sink = NULL;
+ source->state = TRACE_STATE_STOPPED;
+ source->lockon_ptr = NULL;
+ LIST_APPEND(&trace_sources, &source->source_link);
+}
+
+struct trace_source *trace_find_source(const char *name)
+{
+ struct trace_source *src;
+ const struct ist iname = ist(name);
+
+ list_for_each_entry(src, &trace_sources, source_link)
+ if (isteq(src->name, iname))
+ return src;
+ return NULL;
+}
+
+const struct trace_event *trace_find_event(const struct trace_event *ev, const char *name)
+{
+ for (; ev && ev->mask; ev++)
+ if (strcmp(ev->name, name) == 0)
+ return ev;
+ return NULL;
+}
+
+/* Returns the level value or a negative error code. */
+static int trace_parse_level(const char *level)
+{
+ if (!level)
+ return -1;
+
+ if (strcmp(level, "error") == 0)
+ return TRACE_LEVEL_ERROR;
+ else if (strcmp(level, "user") == 0)
+ return TRACE_LEVEL_USER;
+ else if (strcmp(level, "proto") == 0)
+ return TRACE_LEVEL_PROTO;
+ else if (strcmp(level, "state") == 0)
+ return TRACE_LEVEL_STATE;
+ else if (strcmp(level, "data") == 0)
+ return TRACE_LEVEL_DATA;
+ else if (strcmp(level, "developer") == 0)
+ return TRACE_LEVEL_DEVELOPER;
+ else
+ return -1;
+}
+
+/* Returns the verbosity value or a negative error code. */
+static int trace_source_parse_verbosity(struct trace_source *src,
+ const char *verbosity)
+{
+ const struct name_desc *nd;
+ int ret;
+
+ if (strcmp(verbosity, "quiet") == 0) {
+ ret = 0;
+ goto end;
+ }
+
+ /* Only "quiet" is defined for all sources. Other identifiers are
+ * specific to trace source.
+ */
+ BUG_ON(!src);
+
+ if (!src->decoding || !src->decoding[0].name) {
+ if (strcmp(verbosity, "default") != 0)
+ return -1;
+
+ ret = 1;
+ }
+ else {
+ for (nd = src->decoding; nd->name && nd->desc; nd++)
+ if (strcmp(verbosity, nd->name) == 0)
+ break;
+
+ if (!nd->name || !nd->desc)
+ return -1;
+
+ ret = nd - src->decoding + 1;
+ }
+
+ end:
+ return ret;
+}
+
+/* Parse a "trace" statement. Returns a severity as a LOG_* level and a status
+ * message that may be delivered to the user, in <msg>. The message will be
+ * nulled first and msg must be an allocated pointer. A null status message output
+ * indicates no error. Be careful not to use the return value as a boolean, as
+ * LOG_* values are not ordered as one could imagine (LOG_EMERG is zero). The
+ * function may/will use the trash buffer as the storage for the response
+ * message so that the caller never needs to release anything.
+ */
+static int trace_parse_statement(char **args, char **msg)
+{
+ struct trace_source *src;
+ uint64_t *ev_ptr = NULL;
+
+ /* no error by default */
+ *msg = NULL;
+
+ if (!*args[1]) {
+ /* no arg => report the list of supported sources as a warning */
+ chunk_printf(&trash,
+ "Supported trace sources and states (.=stopped, w=waiting, R=running) :\n"
+ " [.] 0 : not a source, will immediately stop all traces\n"
+ );
+
+ list_for_each_entry(src, &trace_sources, source_link)
+ chunk_appendf(&trash, " [%c] %-10s : %s\n", trace_state_char(src->state), src->name.ptr, src->desc);
+
+ trash.area[trash.data] = 0;
+ *msg = strdup(trash.area);
+ return LOG_WARNING;
+ }
+
+ if (strcmp(args[1], "0") == 0) {
+ /* emergency stop of all traces */
+ list_for_each_entry(src, &trace_sources, source_link)
+ HA_ATOMIC_STORE(&src->state, TRACE_STATE_STOPPED);
+ *msg = strdup("All traces now stopped");
+ return LOG_NOTICE;
+ }
+
+ src = trace_find_source(args[1]);
+ if (!src) {
+ memprintf(msg, "No such trace source '%s'", args[1]);
+ return LOG_ERR;
+ }
+
+ if (!*args[2]) {
+ *msg = "Supported commands:\n"
+ " event : list/enable/disable source-specific event reporting\n"
+ //" filter : list/enable/disable generic filters\n"
+ " level : list/set trace reporting level\n"
+ " lock : automatic lock on thread/connection/stream/...\n"
+ " pause : pause and automatically restart after a specific event\n"
+ " sink : list/set event sinks\n"
+ " start : start immediately or after a specific event\n"
+ " stop : stop immediately or after a specific event\n"
+ " verbosity : list/set trace output verbosity\n";
+ *msg = strdup(*msg);
+ return LOG_WARNING;
+ }
+ else if ((strcmp(args[2], "event") == 0 && (ev_ptr = &src->report_events)) ||
+ (strcmp(args[2], "pause") == 0 && (ev_ptr = &src->pause_events)) ||
+ (strcmp(args[2], "start") == 0 && (ev_ptr = &src->start_events)) ||
+ (strcmp(args[2], "stop") == 0 && (ev_ptr = &src->stop_events))) {
+ const struct trace_event *ev;
+ const char *name = args[3];
+ int neg = 0;
+ int i;
+
+ /* skip prefix '!', '-', '+' and remind negation */
+ while (*name) {
+ if (*name == '!' || *name == '-')
+ neg = 1;
+ else if (*name == '+')
+ neg = 0;
+ else
+ break;
+ name++;
+ }
+
+ if (!*name) {
+ chunk_printf(&trash, "Supported events for source %s (+=enabled, -=disabled):\n", src->name.ptr);
+ if (ev_ptr != &src->report_events)
+ chunk_appendf(&trash, " - now : don't wait for events, immediately change the state\n");
+ chunk_appendf(&trash, " - none : disable all event types\n");
+ chunk_appendf(&trash, " - any : enable all event types\n");
+ for (i = 0; src->known_events && src->known_events[i].mask; i++) {
+ chunk_appendf(&trash, " %c %-12s : %s\n",
+ trace_event_char(*ev_ptr, src->known_events[i].mask),
+ src->known_events[i].name, src->known_events[i].desc);
+ }
+ trash.area[trash.data] = 0;
+ *msg = strdup(trash.area);
+ return LOG_WARNING;
+ }
+
+ if (strcmp(name, "now") == 0 && ev_ptr != &src->report_events) {
+ HA_ATOMIC_STORE(ev_ptr, 0);
+ if (ev_ptr == &src->pause_events) {
+ HA_ATOMIC_STORE(&src->lockon_ptr, NULL);
+ HA_ATOMIC_STORE(&src->state, TRACE_STATE_WAITING);
+ }
+ else if (ev_ptr == &src->start_events) {
+ HA_ATOMIC_STORE(&src->state, TRACE_STATE_RUNNING);
+ }
+ else if (ev_ptr == &src->stop_events) {
+ HA_ATOMIC_STORE(&src->lockon_ptr, NULL);
+ HA_ATOMIC_STORE(&src->state, TRACE_STATE_STOPPED);
+ }
+ return 0;
+ }
+
+ if (strcmp(name, "none") == 0)
+ HA_ATOMIC_STORE(ev_ptr, 0);
+ else if (strcmp(name, "any") == 0)
+ HA_ATOMIC_STORE(ev_ptr, ~0);
+ else {
+ ev = trace_find_event(src->known_events, name);
+ if (!ev) {
+ memprintf(msg, "No such trace event '%s'", name);
+ return LOG_ERR;
+ }
+
+ if (!neg)
+ HA_ATOMIC_OR(ev_ptr, ev->mask);
+ else
+ HA_ATOMIC_AND(ev_ptr, ~ev->mask);
+ }
+ }
+ else if (strcmp(args[2], "sink") == 0) {
+ const char *name = args[3];
+ struct sink *sink;
+
+ if (!*name) {
+ chunk_printf(&trash, "Supported sinks for source %s (*=current):\n", src->name.ptr);
+ chunk_appendf(&trash, " %c none : no sink\n", src->sink ? ' ' : '*');
+ list_for_each_entry(sink, &sink_list, sink_list) {
+ chunk_appendf(&trash, " %c %-10s : %s\n",
+ src->sink == sink ? '*' : ' ',
+ sink->name, sink->desc);
+ }
+ trash.area[trash.data] = 0;
+ *msg = strdup(trash.area);
+ return LOG_WARNING;
+ }
+
+ if (strcmp(name, "none") == 0)
+ sink = NULL;
+ else {
+ sink = sink_find(name);
+ if (!sink) {
+ memprintf(msg, "No such trace sink '%s'", name);
+ return LOG_ERR;
+ }
+ }
+
+ HA_ATOMIC_STORE(&src->sink, sink);
+ }
+ else if (strcmp(args[2], "level") == 0) {
+ const char *name = args[3];
+ int level;
+
+ if (!*name) {
+ chunk_printf(&trash, "Supported trace levels for source %s:\n", src->name.ptr);
+ chunk_appendf(&trash, " %c error : report errors\n",
+ src->level == TRACE_LEVEL_ERROR ? '*' : ' ');
+ chunk_appendf(&trash, " %c user : also information useful to the end user\n",
+ src->level == TRACE_LEVEL_USER ? '*' : ' ');
+ chunk_appendf(&trash, " %c proto : also protocol-level updates\n",
+ src->level == TRACE_LEVEL_PROTO ? '*' : ' ');
+ chunk_appendf(&trash, " %c state : also report internal state changes\n",
+ src->level == TRACE_LEVEL_STATE ? '*' : ' ');
+ chunk_appendf(&trash, " %c data : also report data transfers\n",
+ src->level == TRACE_LEVEL_DATA ? '*' : ' ');
+ chunk_appendf(&trash, " %c developer : also report information useful only to the developer\n",
+ src->level == TRACE_LEVEL_DEVELOPER ? '*' : ' ');
+ trash.area[trash.data] = 0;
+ *msg = strdup(trash.area);
+ return LOG_WARNING;
+ }
+
+ level = trace_parse_level(name);
+ if (level < 0) {
+ memprintf(msg, "No such trace level '%s'", name);
+ return LOG_ERR;
+ }
+
+ HA_ATOMIC_STORE(&src->level, level);
+ }
+ else if (strcmp(args[2], "lock") == 0) {
+ const char *name = args[3];
+
+ if (!*name) {
+ chunk_printf(&trash, "Supported lock-on criteria for source %s:\n", src->name.ptr);
+ if (src->arg_def & (TRC_ARGS_CONN|TRC_ARGS_STRM))
+ chunk_appendf(&trash, " %c backend : lock on the backend that started the trace\n",
+ src->lockon == TRACE_LOCKON_BACKEND ? '*' : ' ');
+
+ if (src->arg_def & TRC_ARGS_CHK)
+ chunk_appendf(&trash, " %c check : lock on the check that started the trace\n",
+ src->lockon == TRACE_LOCKON_CHECK ? '*' : ' ');
+
+ if (src->arg_def & TRC_ARGS_CONN)
+ chunk_appendf(&trash, " %c connection : lock on the connection that started the trace\n",
+ src->lockon == TRACE_LOCKON_CONNECTION ? '*' : ' ');
+
+ if (src->arg_def & (TRC_ARGS_CONN|TRC_ARGS_SESS|TRC_ARGS_STRM))
+ chunk_appendf(&trash, " %c frontend : lock on the frontend that started the trace\n",
+ src->lockon == TRACE_LOCKON_FRONTEND ? '*' : ' ');
+
+ if (src->arg_def & (TRC_ARGS_CONN|TRC_ARGS_SESS|TRC_ARGS_STRM))
+ chunk_appendf(&trash, " %c listener : lock on the listener that started the trace\n",
+ src->lockon == TRACE_LOCKON_LISTENER ? '*' : ' ');
+
+ chunk_appendf(&trash, " %c nothing : do not lock on anything\n",
+ src->lockon == TRACE_LOCKON_NOTHING ? '*' : ' ');
+
+ if (src->arg_def & (TRC_ARGS_CONN|TRC_ARGS_STRM))
+ chunk_appendf(&trash, " %c server : lock on the server that started the trace\n",
+ src->lockon == TRACE_LOCKON_SERVER ? '*' : ' ');
+
+ if (src->arg_def & (TRC_ARGS_CONN|TRC_ARGS_SESS|TRC_ARGS_STRM))
+ chunk_appendf(&trash, " %c session : lock on the session that started the trace\n",
+ src->lockon == TRACE_LOCKON_SESSION ? '*' : ' ');
+
+ if (src->arg_def & TRC_ARGS_STRM)
+ chunk_appendf(&trash, " %c stream : lock on the stream that started the trace\n",
+ src->lockon == TRACE_LOCKON_STREAM ? '*' : ' ');
+
+ if (src->arg_def & TRC_ARGS_APPCTX)
+ chunk_appendf(&trash, " %c applet : lock on the applet that started the trace\n",
+ src->lockon == TRACE_LOCKON_APPCTX ? '*' : ' ');
+
+ chunk_appendf(&trash, " %c thread : lock on the thread that started the trace\n",
+ src->lockon == TRACE_LOCKON_THREAD ? '*' : ' ');
+
+ if (src->lockon_args && src->lockon_args[0].name)
+ chunk_appendf(&trash, " %c %-10s : %s\n",
+ src->lockon == TRACE_LOCKON_ARG1 ? '*' : ' ',
+ src->lockon_args[0].name, src->lockon_args[0].desc);
+
+ if (src->lockon_args && src->lockon_args[1].name)
+ chunk_appendf(&trash, " %c %-10s : %s\n",
+ src->lockon == TRACE_LOCKON_ARG2 ? '*' : ' ',
+ src->lockon_args[1].name, src->lockon_args[1].desc);
+
+ if (src->lockon_args && src->lockon_args[2].name)
+ chunk_appendf(&trash, " %c %-10s : %s\n",
+ src->lockon == TRACE_LOCKON_ARG3 ? '*' : ' ',
+ src->lockon_args[2].name, src->lockon_args[2].desc);
+
+ if (src->lockon_args && src->lockon_args[3].name)
+ chunk_appendf(&trash, " %c %-10s : %s\n",
+ src->lockon == TRACE_LOCKON_ARG4 ? '*' : ' ',
+ src->lockon_args[3].name, src->lockon_args[3].desc);
+
+ trash.area[trash.data] = 0;
+ *msg = strdup(trash.area);
+ return LOG_WARNING;
+ }
+ else if ((src->arg_def & (TRC_ARGS_CONN|TRC_ARGS_STRM)) && strcmp(name, "backend") == 0) {
+ HA_ATOMIC_STORE(&src->lockon, TRACE_LOCKON_BACKEND);
+ HA_ATOMIC_STORE(&src->lockon_ptr, NULL);
+ }
+ else if ((src->arg_def & TRC_ARGS_CHK) && strcmp(name, "check") == 0) {
+ HA_ATOMIC_STORE(&src->lockon, TRACE_LOCKON_CHECK);
+ HA_ATOMIC_STORE(&src->lockon_ptr, NULL);
+ }
+ else if ((src->arg_def & TRC_ARGS_CONN) && strcmp(name, "connection") == 0) {
+ HA_ATOMIC_STORE(&src->lockon, TRACE_LOCKON_CONNECTION);
+ HA_ATOMIC_STORE(&src->lockon_ptr, NULL);
+ }
+ else if ((src->arg_def & (TRC_ARGS_CONN|TRC_ARGS_SESS|TRC_ARGS_STRM)) && strcmp(name, "frontend") == 0) {
+ HA_ATOMIC_STORE(&src->lockon, TRACE_LOCKON_FRONTEND);
+ HA_ATOMIC_STORE(&src->lockon_ptr, NULL);
+ }
+ else if ((src->arg_def & (TRC_ARGS_CONN|TRC_ARGS_SESS|TRC_ARGS_STRM)) && strcmp(name, "listener") == 0) {
+ HA_ATOMIC_STORE(&src->lockon, TRACE_LOCKON_LISTENER);
+ HA_ATOMIC_STORE(&src->lockon_ptr, NULL);
+ }
+ else if (strcmp(name, "nothing") == 0) {
+ HA_ATOMIC_STORE(&src->lockon, TRACE_LOCKON_NOTHING);
+ HA_ATOMIC_STORE(&src->lockon_ptr, NULL);
+ }
+ else if ((src->arg_def & (TRC_ARGS_CONN|TRC_ARGS_STRM)) && strcmp(name, "server") == 0) {
+ HA_ATOMIC_STORE(&src->lockon, TRACE_LOCKON_SERVER);
+ HA_ATOMIC_STORE(&src->lockon_ptr, NULL);
+ }
+ else if ((src->arg_def & (TRC_ARGS_CONN|TRC_ARGS_SESS|TRC_ARGS_STRM)) && strcmp(name, "session") == 0) {
+ HA_ATOMIC_STORE(&src->lockon, TRACE_LOCKON_SESSION);
+ HA_ATOMIC_STORE(&src->lockon_ptr, NULL);
+ }
+ else if ((src->arg_def & TRC_ARGS_STRM) && strcmp(name, "stream") == 0) {
+ HA_ATOMIC_STORE(&src->lockon, TRACE_LOCKON_STREAM);
+ HA_ATOMIC_STORE(&src->lockon_ptr, NULL);
+ }
+ else if ((src->arg_def & TRC_ARGS_APPCTX) && strcmp(name, "appctx") == 0) {
+ HA_ATOMIC_STORE(&src->lockon, TRACE_LOCKON_APPCTX);
+ HA_ATOMIC_STORE(&src->lockon_ptr, NULL);
+ }
+ else if (strcmp(name, "thread") == 0) {
+ HA_ATOMIC_STORE(&src->lockon, TRACE_LOCKON_THREAD);
+ HA_ATOMIC_STORE(&src->lockon_ptr, NULL);
+ }
+ else if (src->lockon_args && src->lockon_args[0].name && strcmp(name, src->lockon_args[0].name) == 0) {
+ HA_ATOMIC_STORE(&src->lockon, TRACE_LOCKON_ARG1);
+ HA_ATOMIC_STORE(&src->lockon_ptr, NULL);
+ }
+ else if (src->lockon_args && src->lockon_args[1].name && strcmp(name, src->lockon_args[1].name) == 0) {
+ HA_ATOMIC_STORE(&src->lockon, TRACE_LOCKON_ARG2);
+ HA_ATOMIC_STORE(&src->lockon_ptr, NULL);
+ }
+ else if (src->lockon_args && src->lockon_args[2].name && strcmp(name, src->lockon_args[2].name) == 0) {
+ HA_ATOMIC_STORE(&src->lockon, TRACE_LOCKON_ARG3);
+ HA_ATOMIC_STORE(&src->lockon_ptr, NULL);
+ }
+ else if (src->lockon_args && src->lockon_args[3].name && strcmp(name, src->lockon_args[3].name) == 0) {
+ HA_ATOMIC_STORE(&src->lockon, TRACE_LOCKON_ARG4);
+ HA_ATOMIC_STORE(&src->lockon_ptr, NULL);
+ }
+ else {
+ memprintf(msg, "Unsupported lock-on criterion '%s'", name);
+ return LOG_ERR;
+ }
+ }
+ else if (strcmp(args[2], "verbosity") == 0) {
+ const char *name = args[3];
+ const struct name_desc *nd;
+ int verbosity;
+
+ if (!*name) {
+ chunk_printf(&trash, "Supported trace verbosities for source %s:\n", src->name.ptr);
+ chunk_appendf(&trash, " %c quiet : only report basic information with no decoding\n",
+ src->verbosity == 0 ? '*' : ' ');
+ if (!src->decoding || !src->decoding[0].name) {
+ chunk_appendf(&trash, " %c default : report extra information when available\n",
+ src->verbosity > 0 ? '*' : ' ');
+ } else {
+ for (nd = src->decoding; nd->name && nd->desc; nd++)
+ chunk_appendf(&trash, " %c %-10s : %s\n",
+ nd == (src->decoding + src->verbosity - 1) ? '*' : ' ',
+ nd->name, nd->desc);
+ }
+ trash.area[trash.data] = 0;
+ *msg = strdup(trash.area);
+ return LOG_WARNING;
+ }
+
+ verbosity = trace_source_parse_verbosity(src, name);
+ if (verbosity < 0) {
+ memprintf(msg, "No such verbosity level '%s'", name);
+ return LOG_ERR;
+ }
+
+ HA_ATOMIC_STORE(&src->verbosity, verbosity);
+ }
+ else {
+ memprintf(msg, "Unknown trace keyword '%s'", args[2]);
+ return LOG_ERR;
+ }
+ return 0;
+
+}
+
+void _trace_parse_cmd(struct trace_source *src, int level, int verbosity)
+{
+ src->sink = sink_find("stderr");
+ src->level = level >= 0 ? level : TRACE_LEVEL_ERROR;
+ src->verbosity = verbosity >= 0 ? verbosity : 1;
+ src->state = TRACE_STATE_RUNNING;
+}
+
+/* Parse a process argument specified via "-dt".
+ *
+ * Returns 0 on success else non-zero.
+ */
+int trace_parse_cmd(char *arg, char **errmsg)
+{
+ char *str;
+
+ if (!arg) {
+ /* No trace specification, activate all sources on error level. */
+ struct trace_source *src = NULL;
+
+ list_for_each_entry(src, &trace_sources, source_link)
+ _trace_parse_cmd(src, -1, -1);
+ return 0;
+ }
+
+ while ((str = strtok(arg, ","))) {
+ struct trace_source *src = NULL;
+ char *field, *name;
+ char *sep;
+ int level = -1, verbosity = -1;
+
+ /* 1. name */
+ name = str;
+ sep = strchr(str, ':');
+ if (sep) {
+ str = sep + 1;
+ *sep = '\0';
+ }
+ else {
+ str = NULL;
+ }
+
+ if (strlen(name)) {
+ src = trace_find_source(name);
+ if (!src) {
+ memprintf(errmsg, "unknown trace source '%s'", name);
+ return 1;
+ }
+ }
+
+ if (!str || !strlen(str))
+ goto parse;
+
+ /* 2. level */
+ field = str;
+ sep = strchr(str, ':');
+ if (sep) {
+ str = sep + 1;
+ *sep = '\0';
+ }
+ else {
+ str = NULL;
+ }
+
+ if (strlen(field)) {
+ level = trace_parse_level(field);
+ if (level < 0) {
+ memprintf(errmsg, "no such level '%s'", field);
+ return 1;
+ }
+ }
+
+ if (!str || !strlen(str))
+ goto parse;
+
+ /* 3. verbosity */
+ field = str;
+ if (strchr(field, ':')) {
+ memprintf(errmsg, "too many double-colon separator");
+ return 1;
+ }
+
+ if (!src && strcmp(field, "quiet") != 0) {
+ memprintf(errmsg, "trace source must be specified for verbosity other than 'quiet'");
+ return 1;
+ }
+
+ verbosity = trace_source_parse_verbosity(src, field);
+ if (verbosity < 0) {
+ memprintf(errmsg, "no such verbosity '%s' for source '%s'", field, name);
+ return 1;
+ }
+
+ parse:
+ if (src) {
+ _trace_parse_cmd(src, level, verbosity);
+ }
+ else {
+ list_for_each_entry(src, &trace_sources, source_link)
+ _trace_parse_cmd(src, level, verbosity);
+ }
+
+ /* Reset arg to NULL for strtok. */
+ arg = NULL;
+ }
+
+ return 0;
+}
+
+/* parse a "trace" statement in the "global" section, returns 1 if a message is returned, otherwise zero */
+static int cfg_parse_trace(char **args, int section_type, struct proxy *curpx,
+ const struct proxy *defpx, const char *file, int line,
+ char **err)
+{
+ char *msg;
+ int severity;
+
+ severity = trace_parse_statement(args, &msg);
+ if (msg) {
+ if (severity >= LOG_NOTICE)
+ ha_notice("parsing [%s:%d] : '%s': %s\n", file, line, args[0], msg);
+ else if (severity >= LOG_WARNING)
+ ha_warning("parsing [%s:%d] : '%s': %s\n", file, line, args[0], msg);
+ else {
+ /* let the caller free the message */
+ *err = msg;
+ return -1;
+ }
+ ha_free(&msg);
+ }
+ return 0;
+}
+
+/* parse the command, returns 1 if a message is returned, otherwise zero */
+static int cli_parse_trace(char **args, char *payload, struct appctx *appctx, void *private)
+{
+ char *msg;
+ int severity;
+
+ if (!cli_has_level(appctx, ACCESS_LVL_OPER))
+ return 1;
+
+ severity = trace_parse_statement(args, &msg);
+ if (msg)
+ return cli_dynmsg(appctx, severity, msg);
+
+ /* total success */
+ return 0;
+}
+
+/* parse the command, returns 1 if a message is returned, otherwise zero */
+static int cli_parse_show_trace(char **args, char *payload, struct appctx *appctx, void *private)
+{
+ struct trace_source *src;
+ const struct sink *sink;
+ int i;
+
+ args++; // make args[1] the 1st arg
+
+ if (!*args[1]) {
+ /* no arg => report the list of supported sources */
+ chunk_printf(&trash,
+ "Supported trace sources and states (.=stopped, w=waiting, R=running) :\n"
+ );
+
+ list_for_each_entry(src, &trace_sources, source_link) {
+ sink = src->sink;
+ chunk_appendf(&trash, " [%c] %-10s -> %s [drp %u] [%s]\n",
+ trace_state_char(src->state), src->name.ptr,
+ sink ? sink->name : "none",
+ sink ? sink->ctx.dropped : 0,
+ src->desc);
+ }
+
+ trash.area[trash.data] = 0;
+ return cli_msg(appctx, LOG_INFO, trash.area);
+ }
+
+ if (!cli_has_level(appctx, ACCESS_LVL_OPER))
+ return 1;
+
+ src = trace_find_source(args[1]);
+ if (!src)
+ return cli_err(appctx, "No such trace source");
+
+ sink = src->sink;
+ chunk_printf(&trash, "Trace status for %s:\n", src->name.ptr);
+ chunk_appendf(&trash, " - sink: %s [%u dropped]\n",
+ sink ? sink->name : "none", sink ? sink->ctx.dropped : 0);
+
+ chunk_appendf(&trash, " - event name : report start stop pause\n");
+ for (i = 0; src->known_events && src->known_events[i].mask; i++) {
+ chunk_appendf(&trash, " %-12s : %c %c %c %c\n",
+ src->known_events[i].name,
+ trace_event_char(src->report_events, src->known_events[i].mask),
+ trace_event_char(src->start_events, src->known_events[i].mask),
+ trace_event_char(src->stop_events, src->known_events[i].mask),
+ trace_event_char(src->pause_events, src->known_events[i].mask));
+ }
+
+ trash.area[trash.data] = 0;
+ return cli_msg(appctx, LOG_WARNING, trash.area);
+}
+
+static struct cli_kw_list cli_kws = {{ },{
+ { { "trace", NULL }, "trace [<module>|0] [cmd [args...]] : manage live tracing (empty to list, 0 to stop all)", cli_parse_trace, NULL, NULL },
+ { { "show", "trace", NULL }, "show trace [<module>] : show live tracing state", cli_parse_show_trace, NULL, NULL },
+ {{},}
+}};
+
+INITCALL1(STG_REGISTER, cli_register_kw, &cli_kws);
+
+static struct cfg_kw_list cfg_kws = {ILH, {
+ { CFG_GLOBAL, "trace", cfg_parse_trace, KWF_EXPERIMENTAL },
+ { /* END */ },
+}};
+
+INITCALL1(STG_REGISTER, cfg_register_keywords, &cfg_kws);
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/src/uri_auth.c b/src/uri_auth.c
new file mode 100644
index 0000000..db7e6c6
--- /dev/null
+++ b/src/uri_auth.c
@@ -0,0 +1,318 @@
+/*
+ * URI-based user authentication using the HTTP basic method.
+ *
+ * Copyright 2006-2007 Willy Tarreau <w@1wt.eu>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <stdlib.h>
+#include <string.h>
+
+#include <haproxy/api.h>
+#include <haproxy/base64.h>
+#include <haproxy/errors.h>
+#include <haproxy/list.h>
+#include <haproxy/stats-t.h>
+#include <haproxy/uri_auth.h>
+
+
+/*
+ * Initializes a basic uri_auth structure header and returns a pointer to it.
+ * Uses the pointer provided if not NULL and not initialized.
+ */
+struct uri_auth *stats_check_init_uri_auth(struct uri_auth **root)
+{
+ struct uri_auth *u;
+
+ if (!root || !*root) {
+ if ((u = calloc(1, sizeof (*u))) == NULL)
+ goto out_u;
+
+ LIST_INIT(&u->http_req_rules);
+ LIST_INIT(&u->admin_rules);
+ } else
+ u = *root;
+
+ if (!u->uri_prefix) {
+ u->uri_len = strlen(STATS_DEFAULT_URI);
+ if ((u->uri_prefix = strdup(STATS_DEFAULT_URI)) == NULL)
+ goto out_uri;
+ }
+
+ if (root && !*root)
+ *root = u;
+
+ return u;
+
+ out_uri:
+ if (!root || !*root)
+ free(u);
+ out_u:
+ return NULL;
+}
+
+/*
+ * Returns a default uri_auth with <uri> set as the uri_prefix.
+ * Uses the pointer provided if not NULL and not initialized.
+ */
+struct uri_auth *stats_set_uri(struct uri_auth **root, char *uri)
+{
+ struct uri_auth *u;
+ char *uri_copy;
+ int uri_len;
+
+ uri_len = strlen(uri);
+ if ((uri_copy = strdup(uri)) == NULL)
+ goto out_uri;
+
+ if ((u = stats_check_init_uri_auth(root)) == NULL)
+ goto out_u;
+
+ free(u->uri_prefix);
+ u->uri_prefix = uri_copy;
+ u->uri_len = uri_len;
+ return u;
+
+ out_u:
+ free(uri_copy);
+ out_uri:
+ return NULL;
+}
+
+/*
+ * Returns a default uri_auth with <realm> set as the realm.
+ * Uses the pointer provided if not NULL and not initialized.
+ */
+struct uri_auth *stats_set_realm(struct uri_auth **root, char *realm)
+{
+ struct uri_auth *u;
+ char *realm_copy;
+
+ if ((realm_copy = strdup(realm)) == NULL)
+ goto out_realm;
+
+ if ((u = stats_check_init_uri_auth(root)) == NULL)
+ goto out_u;
+
+ free(u->auth_realm);
+ u->auth_realm = realm_copy;
+ return u;
+
+ out_u:
+ free(realm_copy);
+ out_realm:
+ return NULL;
+}
+
+/*
+ * Returns a default uri_auth with STAT_SHNODE flag enabled and
+ * <node> set as the name if it is not empty.
+ * Uses the pointer provided if not NULL and not initialized.
+ */
+struct uri_auth *stats_set_node(struct uri_auth **root, char *name)
+{
+ struct uri_auth *u;
+ char *node_copy = NULL;
+
+ if (name && *name) {
+ node_copy = strdup(name);
+ if (node_copy == NULL)
+ goto out_realm;
+ }
+
+ if ((u = stats_check_init_uri_auth(root)) == NULL)
+ goto out_u;
+
+ if (!stats_set_flag(root, STAT_SHNODE))
+ goto out_u;
+
+ if (node_copy) {
+ free(u->node);
+ u->node = node_copy;
+ }
+
+ return u;
+
+ out_u:
+ free(node_copy);
+ out_realm:
+ return NULL;
+}
+
+/*
+ * Returns a default uri_auth with STAT_SHDESC flag enabled and
+ * <description> set as the desc if it is not empty.
+ * Uses the pointer provided if not NULL and not initialized.
+ */
+struct uri_auth *stats_set_desc(struct uri_auth **root, char *desc)
+{
+ struct uri_auth *u;
+ char *desc_copy = NULL;
+
+ if (desc && *desc) {
+ desc_copy = strdup(desc);
+ if (desc_copy == NULL)
+ goto out_realm;
+ }
+
+ if ((u = stats_check_init_uri_auth(root)) == NULL)
+ goto out_u;
+
+ if (!stats_set_flag(root, STAT_SHDESC))
+ goto out_u;
+
+ if (desc_copy) {
+ free(u->desc);
+ u->desc = desc_copy;
+ }
+
+ return u;
+
+ out_u:
+ free(desc_copy);
+ out_realm:
+ return NULL;
+}
+
+/*
+ * Returns a default uri_auth with the <refresh> refresh interval.
+ * Uses the pointer provided if not NULL and not initialized.
+ */
+struct uri_auth *stats_set_refresh(struct uri_auth **root, int interval)
+{
+ struct uri_auth *u;
+
+ if ((u = stats_check_init_uri_auth(root)) != NULL)
+ u->refresh = interval;
+ return u;
+}
+
+/*
+ * Returns a default uri_auth with the <flag> set.
+ * Uses the pointer provided if not NULL and not initialized.
+ */
+struct uri_auth *stats_set_flag(struct uri_auth **root, int flag)
+{
+ struct uri_auth *u;
+
+ if ((u = stats_check_init_uri_auth(root)) != NULL)
+ u->flags |= flag;
+ return u;
+}
+
+/*
+ * Returns a default uri_auth with a <user:passwd> entry added to the list of
+ * authorized users. If a matching entry is found, no update will be performed.
+ * Uses the pointer provided if not NULL and not initialized.
+ */
+struct uri_auth *stats_add_auth(struct uri_auth **root, char *user)
+{
+ struct uri_auth *u;
+ struct auth_users *newuser;
+ char *pass;
+
+ pass = strchr(user, ':');
+ if (pass)
+ *pass++ = '\0';
+ else
+ pass = "";
+
+ if ((u = stats_check_init_uri_auth(root)) == NULL)
+ return NULL;
+
+ if (!u->userlist)
+ u->userlist = calloc(1, sizeof(*u->userlist));
+
+ if (!u->userlist)
+ return NULL;
+
+ if (!u->userlist->name)
+ u->userlist->name = strdup(".internal-stats-userlist");
+
+ if (!u->userlist->name)
+ return NULL;
+
+ for (newuser = u->userlist->users; newuser; newuser = newuser->next)
+ if (strcmp(newuser->user, user) == 0) {
+ ha_warning("uri auth: ignoring duplicated user '%s'.\n",
+ user);
+ return u;
+ }
+
+ newuser = calloc(1, sizeof(*newuser));
+ if (!newuser)
+ return NULL;
+
+ newuser->user = strdup(user);
+ if (!newuser->user) {
+ free(newuser);
+ return NULL;
+ }
+
+ newuser->pass = strdup(pass);
+ if (!newuser->pass) {
+ free(newuser->user);
+ free(newuser);
+ return NULL;
+ }
+
+ newuser->flags |= AU_O_INSECURE;
+ newuser->next = u->userlist->users;
+ u->userlist->users = newuser;
+
+ return u;
+}
+
+/*
+ * Returns a default uri_auth with a <scope> entry added to the list of
+ * allowed scopes. If a matching entry is found, no update will be performed.
+ * Uses the pointer provided if not NULL and not initialized.
+ */
+struct uri_auth *stats_add_scope(struct uri_auth **root, char *scope)
+{
+ struct uri_auth *u;
+ char *new_name;
+ struct stat_scope *old_scope, **scope_list;
+
+ if ((u = stats_check_init_uri_auth(root)) == NULL)
+ goto out;
+
+ scope_list = &u->scope;
+ while ((old_scope = *scope_list)) {
+ if (strcmp(old_scope->px_id, scope) == 0)
+ break;
+ scope_list = &old_scope->next;
+ }
+
+ if (!old_scope) {
+ if ((new_name = strdup(scope)) == NULL)
+ goto out_u;
+
+ if ((old_scope = calloc(1, sizeof(*old_scope))) == NULL)
+ goto out_name;
+
+ old_scope->px_id = new_name;
+ old_scope->px_len = strlen(new_name);
+ *scope_list = old_scope;
+ }
+ return u;
+
+ out_name:
+ free(new_name);
+ out_u:
+ free(u);
+ out:
+ return NULL;
+}
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/src/uri_normalizer.c b/src/uri_normalizer.c
new file mode 100644
index 0000000..bc793f2
--- /dev/null
+++ b/src/uri_normalizer.c
@@ -0,0 +1,467 @@
+/*
+ * HTTP request URI normalization.
+ *
+ * Copyright 2021 Tim Duesterhus <tim@bastelstu.be>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <import/ist.h>
+
+#include <haproxy/api.h>
+#include <haproxy/buf.h>
+#include <haproxy/chunk.h>
+#include <haproxy/tools.h>
+#include <haproxy/uri_normalizer.h>
+
+/* Encodes '#' as '%23'. */
+enum uri_normalizer_err uri_normalizer_fragment_encode(const struct ist input, struct ist *dst)
+{
+ enum uri_normalizer_err err;
+
+ const size_t size = istclear(dst);
+ struct ist output = *dst;
+
+ struct ist scanner = input;
+
+ while (istlen(scanner)) {
+ const struct ist before_hash = istsplit(&scanner, '#');
+
+ if (istcat(&output, before_hash, size) < 0) {
+ err = URI_NORMALIZER_ERR_ALLOC;
+ goto fail;
+ }
+
+ if (istend(before_hash) != istend(scanner)) {
+ if (istcat(&output, ist("%23"), size) < 0) {
+ err = URI_NORMALIZER_ERR_ALLOC;
+ goto fail;
+ }
+ }
+ }
+
+ *dst = output;
+
+ return URI_NORMALIZER_ERR_NONE;
+
+ fail:
+
+ return err;
+}
+
+/* Returns 1 if the given character is part of the 'unreserved' set in the
+ * RFC 3986 ABNF.
+ * Returns 0 if not.
+ */
+static int is_unreserved_character(unsigned char c)
+{
+ switch (c) {
+ case 'A'...'Z': /* ALPHA */
+ case 'a'...'z': /* ALPHA */
+ case '0'...'9': /* DIGIT */
+ case '-':
+ case '.':
+ case '_':
+ case '~':
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+/* Decodes percent encoded characters that are part of the 'unreserved' set.
+ *
+ * RFC 3986, section 2.3:
+ * > URIs that differ in the replacement of an unreserved character with
+ * > its corresponding percent-encoded US-ASCII octet are equivalent [...]
+ * > when found in a URI, should be decoded to their corresponding unreserved
+ * > characters by URI normalizers.
+ *
+ * If `strict` is set to 0 then percent characters that are not followed by a
+ * hexadecimal digit are returned as-is without performing any decoding.
+ * If `strict` is set to 1 then `URI_NORMALIZER_ERR_INVALID_INPUT` is returned
+ * for invalid sequences.
+ */
+enum uri_normalizer_err uri_normalizer_percent_decode_unreserved(const struct ist input, int strict, struct ist *dst)
+{
+ enum uri_normalizer_err err;
+
+ const size_t size = istclear(dst);
+ struct ist output = *dst;
+
+ struct ist scanner = input;
+
+ /* The output will either be shortened or have the same length. */
+ if (size < istlen(input)) {
+ err = URI_NORMALIZER_ERR_ALLOC;
+ goto fail;
+ }
+
+ while (istlen(scanner)) {
+ const char current = istshift(&scanner);
+
+ if (current == '%') {
+ if (istlen(scanner) >= 2) {
+ if (ishex(istptr(scanner)[0]) && ishex(istptr(scanner)[1])) {
+ char hex1, hex2, c;
+
+ hex1 = istshift(&scanner);
+ hex2 = istshift(&scanner);
+ c = (hex2i(hex1) << 4) + hex2i(hex2);
+
+ if (is_unreserved_character(c)) {
+ output = __istappend(output, c);
+ }
+ else {
+ output = __istappend(output, current);
+ output = __istappend(output, hex1);
+ output = __istappend(output, hex2);
+ }
+
+ continue;
+ }
+ }
+
+ if (strict) {
+ err = URI_NORMALIZER_ERR_INVALID_INPUT;
+ goto fail;
+ }
+ else {
+ output = __istappend(output, current);
+ }
+ }
+ else {
+ output = __istappend(output, current);
+ }
+ }
+
+ *dst = output;
+
+ return URI_NORMALIZER_ERR_NONE;
+
+ fail:
+
+ return err;
+}
+
+/* Uppercases letters used in percent encoding.
+ *
+ * If `strict` is set to 0 then percent characters that are not followed by a
+ * hexadecimal digit are returned as-is without modifying the following letters.
+ * If `strict` is set to 1 then `URI_NORMALIZER_ERR_INVALID_INPUT` is returned
+ * for invalid sequences.
+ */
+enum uri_normalizer_err uri_normalizer_percent_upper(const struct ist input, int strict, struct ist *dst)
+{
+ enum uri_normalizer_err err;
+
+ const size_t size = istclear(dst);
+ struct ist output = *dst;
+
+ struct ist scanner = input;
+
+ /* The output will have the same length. */
+ if (size < istlen(input)) {
+ err = URI_NORMALIZER_ERR_ALLOC;
+ goto fail;
+ }
+
+ while (istlen(scanner)) {
+ const char current = istshift(&scanner);
+
+ if (current == '%') {
+ if (istlen(scanner) >= 2) {
+ if (ishex(istptr(scanner)[0]) && ishex(istptr(scanner)[1])) {
+ output = __istappend(output, current);
+ output = __istappend(output, toupper(istshift(&scanner)));
+ output = __istappend(output, toupper(istshift(&scanner)));
+ continue;
+ }
+ }
+
+ if (strict) {
+ err = URI_NORMALIZER_ERR_INVALID_INPUT;
+ goto fail;
+ }
+ else {
+ output = __istappend(output, current);
+ }
+ }
+ else {
+ output = __istappend(output, current);
+ }
+ }
+
+ *dst = output;
+
+ return URI_NORMALIZER_ERR_NONE;
+
+ fail:
+
+ return err;
+}
+
+/* Removes `/./` from the given path. */
+enum uri_normalizer_err uri_normalizer_path_dot(const struct ist path, struct ist *dst)
+{
+ enum uri_normalizer_err err;
+
+ const size_t size = istclear(dst);
+ struct ist newpath = *dst;
+
+ struct ist scanner = path;
+
+ /* The path will either be shortened or have the same length. */
+ if (size < istlen(path)) {
+ err = URI_NORMALIZER_ERR_ALLOC;
+ goto fail;
+ }
+
+ while (istlen(scanner) > 0) {
+ const struct ist segment = istsplit(&scanner, '/');
+
+ if (!isteq(segment, ist("."))) {
+ if (istcat(&newpath, segment, size) < 0) {
+ /* This is impossible, because we checked the size of the destination buffer. */
+ my_unreachable();
+ err = URI_NORMALIZER_ERR_INTERNAL_ERROR;
+ goto fail;
+ }
+
+ if (istend(segment) != istend(scanner))
+ newpath = __istappend(newpath, '/');
+ }
+ }
+
+ *dst = newpath;
+
+ return URI_NORMALIZER_ERR_NONE;
+
+ fail:
+
+ return err;
+}
+
+/* Merges `/../` with preceding path segments.
+ *
+ * If `full` is set to `0` then `/../` will be printed at the start of the resulting
+ * path if the number of `/../` exceeds the number of other segments. If `full` is
+ * set to `1` these will not be printed.
+ */
+enum uri_normalizer_err uri_normalizer_path_dotdot(const struct ist path, int full, struct ist *dst)
+{
+ enum uri_normalizer_err err;
+
+ const size_t size = istclear(dst);
+ char * const tail = istptr(*dst) + size;
+ char *head = tail;
+
+ ssize_t offset = istlen(path) - 1;
+
+ int up = 0;
+
+ /* The path will either be shortened or have the same length. */
+ if (size < istlen(path)) {
+ err = URI_NORMALIZER_ERR_ALLOC;
+ goto fail;
+ }
+
+ /* Handle `/..` at the end of the path without a trailing slash. */
+ if (offset >= 2 && istmatch(istadv(path, offset - 2), ist("/.."))) {
+ up++;
+ offset -= 2;
+ }
+
+ while (offset >= 0) {
+ if (offset >= 3 && istmatch(istadv(path, offset - 3), ist("/../"))) {
+ up++;
+ offset -= 3;
+ continue;
+ }
+
+ if (up > 0) {
+ /* Skip the slash. */
+ offset--;
+
+ /* First check whether we already reached the start of the path,
+ * before popping the current `/../`.
+ */
+ if (offset >= 0) {
+ up--;
+
+ /* Skip the current path segment. */
+ while (offset >= 0 && istptr(path)[offset] != '/')
+ offset--;
+ }
+ }
+ else {
+ /* Prepend the slash. */
+ *(--head) = istptr(path)[offset];
+ offset--;
+
+ /* Prepend the current path segment. */
+ while (offset >= 0 && istptr(path)[offset] != '/') {
+ *(--head) = istptr(path)[offset];
+ offset--;
+ }
+ }
+ }
+
+ if (up > 0) {
+ /* Prepend a trailing slash. */
+ *(--head) = '/';
+
+ if (!full) {
+ /* Prepend unconsumed `/..`. */
+ do {
+ *(--head) = '.';
+ *(--head) = '.';
+ *(--head) = '/';
+ up--;
+ } while (up > 0);
+ }
+ }
+
+ *dst = ist2(head, tail - head);
+
+ return URI_NORMALIZER_ERR_NONE;
+
+ fail:
+
+ return err;
+}
+
+/* Merges adjacent slashes in the given path. */
+enum uri_normalizer_err uri_normalizer_path_merge_slashes(const struct ist path, struct ist *dst)
+{
+ enum uri_normalizer_err err;
+
+ const size_t size = istclear(dst);
+ struct ist newpath = *dst;
+
+ struct ist scanner = path;
+
+ /* The path will either be shortened or have the same length. */
+ if (size < istlen(path)) {
+ err = URI_NORMALIZER_ERR_ALLOC;
+ goto fail;
+ }
+
+ while (istlen(scanner) > 0) {
+ const char current = istshift(&scanner);
+
+ if (current == '/') {
+ while (istlen(scanner) > 0 && *istptr(scanner) == '/')
+ scanner = istnext(scanner);
+ }
+
+ newpath = __istappend(newpath, current);
+ }
+
+ *dst = newpath;
+
+ return URI_NORMALIZER_ERR_NONE;
+
+ fail:
+
+ return err;
+}
+
+/* Compares two query parameters by name. Query parameters are ordered
+ * as with memcmp. Shorter parameter names are ordered lower. Identical
+ * parameter names are compared by their pointer to maintain a stable
+ * sort.
+ */
+static int query_param_cmp(const void *a, const void *b)
+{
+ const struct ist param_a = *(struct ist*)a;
+ const struct ist param_b = *(struct ist*)b;
+ const struct ist param_a_name = iststop(param_a, '=');
+ const struct ist param_b_name = iststop(param_b, '=');
+
+ int cmp = istdiff(param_a_name, param_b_name);
+
+ if (cmp != 0)
+ return cmp;
+
+ /* The contents are identical: Compare the pointer. */
+ if (istptr(param_a) < istptr(param_b))
+ return -1;
+
+ if (istptr(param_a) > istptr(param_b))
+ return 1;
+
+ return 0;
+}
+
+/* Sorts the parameters within the given query string. */
+enum uri_normalizer_err uri_normalizer_query_sort(const struct ist query, const char delim, struct ist *dst)
+{
+ enum uri_normalizer_err err;
+
+ const size_t size = istclear(dst);
+ struct ist newquery = *dst;
+
+ struct ist scanner = query;
+
+ const struct buffer *trash = get_trash_chunk();
+ struct ist *params = (struct ist *)b_orig(trash);
+ const size_t max_param = b_size(trash) / sizeof(*params);
+ size_t param_count = 0;
+
+ size_t i;
+
+ /* The query will have the same length. */
+ if (size < istlen(query)) {
+ err = URI_NORMALIZER_ERR_ALLOC;
+ goto fail;
+ }
+
+ /* Handle the leading '?'. */
+ newquery = __istappend(newquery, istshift(&scanner));
+
+ while (istlen(scanner) > 0) {
+ const struct ist param = istsplit(&scanner, delim);
+
+ if (param_count + 1 > max_param) {
+ err = URI_NORMALIZER_ERR_ALLOC;
+ goto fail;
+ }
+
+ params[param_count] = param;
+ param_count++;
+ }
+
+ qsort(params, param_count, sizeof(*params), query_param_cmp);
+
+ for (i = 0; i < param_count; i++) {
+ if (i > 0)
+ newquery = __istappend(newquery, delim);
+
+ if (istcat(&newquery, params[i], size) < 0) {
+ /* This is impossible, because we checked the size of the destination buffer. */
+ my_unreachable();
+ err = URI_NORMALIZER_ERR_INTERNAL_ERROR;
+ goto fail;
+ }
+ }
+
+ *dst = newquery;
+
+ return URI_NORMALIZER_ERR_NONE;
+
+ fail:
+
+ return err;
+}
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/src/vars.c b/src/vars.c
new file mode 100644
index 0000000..7ec753e
--- /dev/null
+++ b/src/vars.c
@@ -0,0 +1,1454 @@
+#include <ctype.h>
+
+#include <haproxy/api.h>
+#include <haproxy/arg.h>
+#include <haproxy/buf.h>
+#include <haproxy/cfgparse.h>
+#include <haproxy/check.h>
+#include <haproxy/cli.h>
+#include <haproxy/global.h>
+#include <haproxy/http.h>
+#include <haproxy/http_rules.h>
+#include <haproxy/list.h>
+#include <haproxy/log.h>
+#include <haproxy/sample.h>
+#include <haproxy/session.h>
+#include <haproxy/stream-t.h>
+#include <haproxy/tcp_rules.h>
+#include <haproxy/tcpcheck.h>
+#include <haproxy/tools.h>
+#include <haproxy/vars.h>
+#include <haproxy/xxhash.h>
+
+
+/* This contains a pool of struct vars */
+DECLARE_STATIC_POOL(var_pool, "vars", sizeof(struct var));
+
+/* list of variables for the process scope. */
+struct vars proc_vars THREAD_ALIGNED(64);
+
+/* This array of int contains the system limits per context. */
+static unsigned int var_global_limit = 0;
+static unsigned int var_proc_limit = 0;
+static unsigned int var_sess_limit = 0;
+static unsigned int var_txn_limit = 0;
+static unsigned int var_reqres_limit = 0;
+static unsigned int var_check_limit = 0;
+static uint64_t var_name_hash_seed = 0;
+
+/* Structure and array matching set-var conditions to their respective flag
+ * value.
+ */
+struct var_set_condition {
+ const char *cond_str;
+ uint flag;
+};
+
+static struct var_set_condition conditions_array[] = {
+ { "ifexists", VF_COND_IFEXISTS },
+ { "ifnotexists", VF_COND_IFNOTEXISTS },
+ { "ifempty", VF_COND_IFEMPTY },
+ { "ifnotempty", VF_COND_IFNOTEMPTY },
+ { "ifset", VF_COND_IFSET },
+ { "ifnotset", VF_COND_IFNOTSET },
+ { "ifgt", VF_COND_IFGT },
+ { "iflt", VF_COND_IFLT },
+ { NULL, 0 }
+};
+
+/* returns the struct vars pointer for a session, stream and scope, or NULL if
+ * it does not exist.
+ */
+static inline struct vars *get_vars(struct session *sess, struct stream *strm, enum vars_scope scope)
+{
+ switch (scope) {
+ case SCOPE_PROC:
+ return &proc_vars;
+ case SCOPE_SESS:
+ return sess ? &sess->vars : NULL;
+ case SCOPE_CHECK: {
+ struct check *check = sess ? objt_check(sess->origin) : NULL;
+
+ return check ? &check->vars : NULL;
+ }
+ case SCOPE_TXN:
+ return strm ? &strm->vars_txn : NULL;
+ case SCOPE_REQ:
+ case SCOPE_RES:
+ default:
+ return strm ? &strm->vars_reqres : NULL;
+ }
+}
+
+/* This function adds or remove memory size from the accounting. The inner
+ * pointers may be null when setting the outer ones only.
+ */
+void var_accounting_diff(struct vars *vars, struct session *sess, struct stream *strm, int size)
+{
+ switch (vars->scope) {
+ case SCOPE_REQ:
+ case SCOPE_RES:
+ if (var_reqres_limit && strm)
+ _HA_ATOMIC_ADD(&strm->vars_reqres.size, size);
+ __fallthrough;
+ case SCOPE_TXN:
+ if (var_txn_limit && strm)
+ _HA_ATOMIC_ADD(&strm->vars_txn.size, size);
+ goto scope_sess;
+ case SCOPE_CHECK:
+ if (var_check_limit) {
+ struct check *check = objt_check(sess->origin);
+
+ if (check)
+ _HA_ATOMIC_ADD(&check->vars.size, size);
+ }
+scope_sess:
+ __fallthrough;
+ case SCOPE_SESS:
+ if (var_sess_limit)
+ _HA_ATOMIC_ADD(&sess->vars.size, size);
+ __fallthrough;
+ case SCOPE_PROC:
+ if (var_proc_limit || var_global_limit)
+ _HA_ATOMIC_ADD(&proc_vars.size, size);
+ }
+}
+
+/* This function returns 1 if the <size> is available in the var
+ * pool <vars>, otherwise returns 0. If the space is available,
+ * the size is reserved. The inner pointers may be null when setting
+ * the outer ones only. The accounting uses either <sess> or <strm>
+ * depending on the scope. <strm> may be NULL when no stream is known
+ * and only the session exists (eg: tcp-request connection).
+ */
+static int var_accounting_add(struct vars *vars, struct session *sess, struct stream *strm, int size)
+{
+ switch (vars->scope) {
+ case SCOPE_REQ:
+ case SCOPE_RES:
+ if (var_reqres_limit && strm && strm->vars_reqres.size + size > var_reqres_limit)
+ return 0;
+ __fallthrough;
+ case SCOPE_TXN:
+ if (var_txn_limit && strm && strm->vars_txn.size + size > var_txn_limit)
+ return 0;
+ goto scope_sess;
+ case SCOPE_CHECK: {
+ struct check *check = objt_check(sess->origin);
+
+ if (var_check_limit && check && check->vars.size + size > var_check_limit)
+ return 0;
+ }
+scope_sess:
+ __fallthrough;
+ case SCOPE_SESS:
+ if (var_sess_limit && sess->vars.size + size > var_sess_limit)
+ return 0;
+ __fallthrough;
+ case SCOPE_PROC:
+ /* note: scope proc collects all others and is currently identical to the
+ * global limit.
+ */
+ if (var_proc_limit && proc_vars.size + size > var_proc_limit)
+ return 0;
+ if (var_global_limit && proc_vars.size + size > var_global_limit)
+ return 0;
+ }
+ var_accounting_diff(vars, sess, strm, size);
+ return 1;
+}
+
+/* This function removes a variable from the list and frees the memory it was
+ * using. If the variable is marked "VF_PERMANENT", the sample_data is only
+ * reset to SMP_T_ANY unless <force> is non nul. Returns the freed size.
+ */
+unsigned int var_clear(struct var *var, int force)
+{
+ unsigned int size = 0;
+
+ if (var->data.type == SMP_T_STR || var->data.type == SMP_T_BIN) {
+ ha_free(&var->data.u.str.area);
+ size += var->data.u.str.data;
+ }
+ else if (var->data.type == SMP_T_METH && var->data.u.meth.meth == HTTP_METH_OTHER) {
+ ha_free(&var->data.u.meth.str.area);
+ size += var->data.u.meth.str.data;
+ }
+ /* wipe the sample */
+ var->data.type = SMP_T_ANY;
+
+ if (!(var->flags & VF_PERMANENT) || force) {
+ LIST_DELETE(&var->l);
+ pool_free(var_pool, var);
+ size += sizeof(struct var);
+ }
+ return size;
+}
+
+/* This function free all the memory used by all the variables
+ * in the list.
+ */
+void vars_prune(struct vars *vars, struct session *sess, struct stream *strm)
+{
+ struct var *var, *tmp;
+ unsigned int size = 0;
+
+ vars_wrlock(vars);
+ list_for_each_entry_safe(var, tmp, &vars->head, l) {
+ size += var_clear(var, 1);
+ }
+ vars_wrunlock(vars);
+ var_accounting_diff(vars, sess, strm, -size);
+}
+
+/* This function frees all the memory used by all the session variables in the
+ * list starting at <vars>.
+ */
+void vars_prune_per_sess(struct vars *vars)
+{
+ struct var *var, *tmp;
+ unsigned int size = 0;
+
+ vars_wrlock(vars);
+ list_for_each_entry_safe(var, tmp, &vars->head, l) {
+ size += var_clear(var, 1);
+ }
+ vars_wrunlock(vars);
+
+ if (var_sess_limit)
+ _HA_ATOMIC_SUB(&vars->size, size);
+ if (var_proc_limit || var_global_limit)
+ _HA_ATOMIC_SUB(&proc_vars.size, size);
+}
+
+/* This function initializes a variables list head */
+void vars_init_head(struct vars *vars, enum vars_scope scope)
+{
+ LIST_INIT(&vars->head);
+ vars->scope = scope;
+ vars->size = 0;
+ HA_RWLOCK_INIT(&vars->rwlock);
+}
+
+/* This function returns a hash value and a scope for a variable name of a
+ * specified length. It makes sure that the scope is valid. It returns non-zero
+ * on success, 0 on failure. Neither hash nor scope may be NULL.
+ */
+static int vars_hash_name(const char *name, int len, enum vars_scope *scope,
+ uint64_t *hash, char **err)
+{
+ const char *tmp;
+
+ /* Check length. */
+ if (len == 0) {
+ memprintf(err, "Empty variable name cannot be accepted");
+ return 0;
+ }
+
+ /* Check scope. */
+ if (len > 5 && strncmp(name, "proc.", 5) == 0) {
+ name += 5;
+ len -= 5;
+ *scope = SCOPE_PROC;
+ }
+ else if (len > 5 && strncmp(name, "sess.", 5) == 0) {
+ name += 5;
+ len -= 5;
+ *scope = SCOPE_SESS;
+ }
+ else if (len > 4 && strncmp(name, "txn.", 4) == 0) {
+ name += 4;
+ len -= 4;
+ *scope = SCOPE_TXN;
+ }
+ else if (len > 4 && strncmp(name, "req.", 4) == 0) {
+ name += 4;
+ len -= 4;
+ *scope = SCOPE_REQ;
+ }
+ else if (len > 4 && strncmp(name, "res.", 4) == 0) {
+ name += 4;
+ len -= 4;
+ *scope = SCOPE_RES;
+ }
+ else if (len > 6 && strncmp(name, "check.", 6) == 0) {
+ name += 6;
+ len -= 6;
+ *scope = SCOPE_CHECK;
+ }
+ else {
+ memprintf(err, "invalid variable name '%.*s'. A variable name must be start by its scope. "
+ "The scope can be 'proc', 'sess', 'txn', 'req', 'res' or 'check'", len, name);
+ return 0;
+ }
+
+ /* Check variable name syntax. */
+ for (tmp = name; tmp < name + len; tmp++) {
+ if (!isalnum((unsigned char)*tmp) && *tmp != '_' && *tmp != '.') {
+ memprintf(err, "invalid syntax at char '%s'", tmp);
+ return 0;
+ }
+ }
+
+ *hash = XXH3(name, len, var_name_hash_seed);
+ return 1;
+}
+
+/* This function returns the variable from the given list that matches
+ * <name_hash> or returns NULL if not found. It's only a linked list since it
+ * is not expected to have many variables per scope (a few tens at best).
+ * The caller is responsible for ensuring that <vars> is properly locked.
+ */
+static struct var *var_get(struct vars *vars, uint64_t name_hash)
+{
+ struct var *var;
+
+ list_for_each_entry(var, &vars->head, l)
+ if (var->name_hash == name_hash)
+ return var;
+ return NULL;
+}
+
+/* Returns 0 if fails, else returns 1. */
+static int smp_fetch_var(const struct arg *args, struct sample *smp, const char *kw, void *private)
+{
+ const struct var_desc *var_desc = &args[0].data.var;
+ const struct buffer *def = NULL;
+
+ if (args[1].type == ARGT_STR)
+ def = &args[1].data.str;
+
+ return vars_get_by_desc(var_desc, smp, def);
+}
+
+/*
+ * Clear the contents of a variable so that it can be reset directly.
+ * This function is used just before a variable is filled out of a sample's
+ * content.
+ */
+static inline void var_clear_buffer(struct sample *smp, struct vars *vars, struct var *var, int var_type)
+{
+ if (var_type == SMP_T_STR || var_type == SMP_T_BIN) {
+ ha_free(&var->data.u.str.area);
+ var_accounting_diff(vars, smp->sess, smp->strm,
+ -var->data.u.str.data);
+ }
+ else if (var_type == SMP_T_METH && var->data.u.meth.meth == HTTP_METH_OTHER) {
+ ha_free(&var->data.u.meth.str.area);
+ var_accounting_diff(vars, smp->sess, smp->strm,
+ -var->data.u.meth.str.data);
+ }
+}
+
+/* This function tries to create a variable whose name hash is <name_hash> in
+ * scope <scope> and store sample <smp> as its value.
+ *
+ * The stream and session are extracted from <smp>, whose stream may be NULL
+ * when scope is SCOPE_SESS. In case there wouldn't be enough memory to store
+ * the sample while the variable was already created, it would be changed to
+ * a bool (which is memory-less).
+ *
+ * Flags is a bitfield that may contain one of the following flags:
+ * - VF_CREATEONLY: do nothing if the variable already exists (success).
+ * - VF_PERMANENT: this flag will be passed to the variable upon creation
+ *
+ * - VF_COND_IFEXISTS: only set variable if it already exists
+ * - VF_COND_IFNOTEXISTS: only set variable if it did not exist yet
+ * - VF_COND_IFEMPTY: only set variable if sample is empty
+ * - VF_COND_IFNOTEMPTY: only set variable if sample is not empty
+ * - VF_COND_IFSET: only set variable if its type is not SMP_TYPE_ANY
+ * - VF_COND_IFNOTSET: only set variable if its type is ANY
+ * - VF_COND_IFGT: only set variable if its value is greater than the sample's
+ * - VF_COND_IFLT: only set variable if its value is less than the sample's
+ *
+ * It returns 0 on failure, non-zero on success.
+ */
+static int var_set(uint64_t name_hash, enum vars_scope scope, struct sample *smp, uint flags)
+{
+ struct vars *vars;
+ struct var *var;
+ int ret = 0;
+ int previous_type = SMP_T_ANY;
+
+ vars = get_vars(smp->sess, smp->strm, scope);
+ if (!vars || vars->scope != scope)
+ return 0;
+
+ vars_wrlock(vars);
+
+ /* Look for existing variable name. */
+ var = var_get(vars, name_hash);
+
+ if (var) {
+ if (flags & VF_CREATEONLY) {
+ ret = 1;
+ goto unlock;
+ }
+
+ if (flags & VF_COND_IFNOTEXISTS)
+ goto unlock;
+ } else {
+ if (flags & VF_COND_IFEXISTS)
+ goto unlock;
+
+ /* Check memory available. */
+ if (!var_accounting_add(vars, smp->sess, smp->strm, sizeof(struct var)))
+ goto unlock;
+
+ /* Create new entry. */
+ var = pool_alloc(var_pool);
+ if (!var)
+ goto unlock;
+ LIST_APPEND(&vars->head, &var->l);
+ var->name_hash = name_hash;
+ var->flags = flags & VF_PERMANENT;
+ var->data.type = SMP_T_ANY;
+ }
+
+ /* A variable of type SMP_T_ANY is considered as unset (either created
+ * and never set or unset-var was called on it).
+ */
+ if ((flags & VF_COND_IFSET && var->data.type == SMP_T_ANY) ||
+ (flags & VF_COND_IFNOTSET && var->data.type != SMP_T_ANY))
+ goto unlock;
+
+ /* Set type. */
+ previous_type = var->data.type;
+ var->data.type = smp->data.type;
+
+ if (flags & VF_COND_IFEMPTY) {
+ switch(smp->data.type) {
+ case SMP_T_ANY:
+ case SMP_T_STR:
+ case SMP_T_BIN:
+ /* The actual test on the contents of the sample will be
+ * performed later.
+ */
+ break;
+ default:
+ /* The sample cannot be empty since it has a scalar type. */
+ var->data.type = previous_type;
+ goto unlock;
+ }
+ }
+
+ /* Copy data. If the data needs memory, the function can fail. */
+ switch (var->data.type) {
+ case SMP_T_BOOL:
+ var_clear_buffer(smp, vars, var, previous_type);
+ var->data.u.sint = smp->data.u.sint;
+ break;
+ case SMP_T_SINT:
+ if (previous_type == var->data.type) {
+ if (((flags & VF_COND_IFGT) && !(var->data.u.sint > smp->data.u.sint)) ||
+ ((flags & VF_COND_IFLT) && !(var->data.u.sint < smp->data.u.sint)))
+ goto unlock;
+ }
+ var_clear_buffer(smp, vars, var, previous_type);
+ var->data.u.sint = smp->data.u.sint;
+ break;
+ case SMP_T_IPV4:
+ var_clear_buffer(smp, vars, var, previous_type);
+ var->data.u.ipv4 = smp->data.u.ipv4;
+ break;
+ case SMP_T_IPV6:
+ var_clear_buffer(smp, vars, var, previous_type);
+ var->data.u.ipv6 = smp->data.u.ipv6;
+ break;
+ case SMP_T_STR:
+ case SMP_T_BIN:
+ if ((flags & VF_COND_IFNOTEMPTY && !smp->data.u.str.data) ||
+ (flags & VF_COND_IFEMPTY && smp->data.u.str.data)) {
+ var->data.type = previous_type;
+ goto unlock;
+ }
+ var_clear_buffer(smp, vars, var, previous_type);
+ if (!var_accounting_add(vars, smp->sess, smp->strm, smp->data.u.str.data)) {
+ var->data.type = SMP_T_BOOL; /* This type doesn't use additional memory. */
+ goto unlock;
+ }
+
+ var->data.u.str.area = malloc(smp->data.u.str.data);
+ if (!var->data.u.str.area) {
+ var_accounting_diff(vars, smp->sess, smp->strm,
+ -smp->data.u.str.data);
+ var->data.type = SMP_T_BOOL; /* This type doesn't use additional memory. */
+ goto unlock;
+ }
+ var->data.u.str.data = smp->data.u.str.data;
+ memcpy(var->data.u.str.area, smp->data.u.str.area,
+ var->data.u.str.data);
+ break;
+ case SMP_T_METH:
+ var_clear_buffer(smp, vars, var, previous_type);
+ var->data.u.meth.meth = smp->data.u.meth.meth;
+ if (smp->data.u.meth.meth != HTTP_METH_OTHER)
+ break;
+
+ if (!var_accounting_add(vars, smp->sess, smp->strm, smp->data.u.meth.str.data)) {
+ var->data.type = SMP_T_BOOL; /* This type doesn't use additional memory. */
+ goto unlock;
+ }
+
+ var->data.u.meth.str.area = malloc(smp->data.u.meth.str.data);
+ if (!var->data.u.meth.str.area) {
+ var_accounting_diff(vars, smp->sess, smp->strm,
+ -smp->data.u.meth.str.data);
+ var->data.type = SMP_T_BOOL; /* This type doesn't use additional memory. */
+ goto unlock;
+ }
+ var->data.u.meth.str.data = smp->data.u.meth.str.data;
+ var->data.u.meth.str.size = smp->data.u.meth.str.data;
+ memcpy(var->data.u.meth.str.area, smp->data.u.meth.str.area,
+ var->data.u.meth.str.data);
+ break;
+ }
+
+ /* OK, now done */
+ ret = 1;
+ unlock:
+ vars_wrunlock(vars);
+ return ret;
+}
+
+/* Deletes a variable matching name hash <name_hash> and scope <scope> for the
+ * session and stream found in <smp>. Note that stream may be null for
+ * SCOPE_SESS. Returns 0 if the scope was not found otherwise 1.
+ */
+static int var_unset(uint64_t name_hash, enum vars_scope scope, struct sample *smp)
+{
+ struct vars *vars;
+ struct var *var;
+ unsigned int size = 0;
+
+ vars = get_vars(smp->sess, smp->strm, scope);
+ if (!vars || vars->scope != scope)
+ return 0;
+
+ /* Look for existing variable name. */
+ vars_wrlock(vars);
+ var = var_get(vars, name_hash);
+ if (var) {
+ size = var_clear(var, 0);
+ var_accounting_diff(vars, smp->sess, smp->strm, -size);
+ }
+ vars_wrunlock(vars);
+ return 1;
+}
+
+
+/*
+ * Convert a string set-var condition into its numerical value.
+ * The corresponding bit is set in the <cond_bitmap> parameter if the
+ * <cond> is known.
+ * Returns 1 in case of success.
+ */
+static int vars_parse_cond_param(const struct buffer *cond, uint *cond_bitmap, char **err)
+{
+ struct var_set_condition *cond_elt = &conditions_array[0];
+
+ /* The conditions array is NULL terminated. */
+ while (cond_elt->cond_str) {
+ if (chunk_strcmp(cond, cond_elt->cond_str) == 0) {
+ *cond_bitmap |= cond_elt->flag;
+ break;
+ }
+ ++cond_elt;
+ }
+
+ if (cond_elt->cond_str == NULL && err)
+ memprintf(err, "unknown condition \"%.*s\"", (int)cond->data, cond->area);
+
+ return cond_elt->cond_str != NULL;
+}
+
+/* Returns 0 if fails, else returns 1. */
+static int smp_conv_store(const struct arg *args, struct sample *smp, void *private)
+{
+ uint conditions = 0;
+ int cond_idx = 1;
+
+ while (args[cond_idx].type == ARGT_STR) {
+ if (vars_parse_cond_param(&args[cond_idx++].data.str, &conditions, NULL) == 0)
+ break;
+ }
+
+ return var_set(args[0].data.var.name_hash, args[0].data.var.scope, smp, conditions);
+}
+
+/* Returns 0 if fails, else returns 1. */
+static int smp_conv_clear(const struct arg *args, struct sample *smp, void *private)
+{
+ return var_unset(args[0].data.var.name_hash, args[0].data.var.scope, smp);
+}
+
+/* This functions check an argument entry and fill it with a variable
+ * type. The argument must be a string. If the variable lookup fails,
+ * the function returns 0 and fill <err>, otherwise it returns 1.
+ */
+int vars_check_arg(struct arg *arg, char **err)
+{
+ enum vars_scope scope;
+ struct sample empty_smp = { };
+ uint64_t hash;
+
+ /* Check arg type. */
+ if (arg->type != ARGT_STR) {
+ memprintf(err, "unexpected argument type");
+ return 0;
+ }
+
+ /* Register new variable name. */
+ if (!vars_hash_name(arg->data.str.area, arg->data.str.data, &scope, &hash, err))
+ return 0;
+
+ if (scope == SCOPE_PROC && !var_set(hash, scope, &empty_smp, VF_CREATEONLY|VF_PERMANENT))
+ return 0;
+
+ /* properly destroy the chunk */
+ chunk_destroy(&arg->data.str);
+
+ /* Use the global variable name pointer. */
+ arg->type = ARGT_VAR;
+ arg->data.var.name_hash = hash;
+ arg->data.var.scope = scope;
+ return 1;
+}
+
+/* This function stores a sample in a variable unless it is of type "proc" and
+ * not defined yet.
+ * Returns zero on failure and non-zero otherwise. The variable not being
+ * defined is treated as a failure.
+ */
+int vars_set_by_name_ifexist(const char *name, size_t len, struct sample *smp)
+{
+ enum vars_scope scope;
+ uint64_t hash;
+
+ /* Resolve name and scope. */
+ if (!vars_hash_name(name, len, &scope, &hash, NULL))
+ return 0;
+
+ /* Variable creation is allowed for all scopes apart from the PROC one. */
+ return var_set(hash, scope, smp, (scope == SCOPE_PROC) ? VF_COND_IFEXISTS : 0);
+}
+
+
+/* This function stores a sample in a variable.
+ * Returns zero on failure and non-zero otherwise.
+ */
+int vars_set_by_name(const char *name, size_t len, struct sample *smp)
+{
+ enum vars_scope scope;
+ uint64_t hash;
+
+ /* Resolve name and scope. */
+ if (!vars_hash_name(name, len, &scope, &hash, NULL))
+ return 0;
+
+ return var_set(hash, scope, smp, 0);
+}
+
+/* This function unsets a variable if it was already defined.
+ * Returns zero on failure and non-zero otherwise.
+ */
+int vars_unset_by_name_ifexist(const char *name, size_t len, struct sample *smp)
+{
+ enum vars_scope scope;
+ uint64_t hash;
+
+ /* Resolve name and scope. */
+ if (!vars_hash_name(name, len, &scope, &hash, NULL))
+ return 0;
+
+ return var_unset(hash, scope, smp);
+}
+
+
+/* This retrieves variable whose hash matches <name_hash> from variables <vars>,
+ * and if found and not empty, duplicates the result into sample <smp>.
+ * smp_dup() is used in order to release the variables lock ASAP (so a pre-
+ * allocated chunk is obtained via get_trash_shunk()). The variables' lock is
+ * used for reads.
+ *
+ * The function returns 0 if the variable was not found and no default
+ * value was provided in <def>, otherwise 1 with the sample filled.
+ * Default values are always returned as strings.
+ */
+static int var_to_smp(struct vars *vars, uint64_t name_hash, struct sample *smp, const struct buffer *def)
+{
+ struct var *var;
+
+ /* Get the variable entry. */
+ vars_rdlock(vars);
+ var = var_get(vars, name_hash);
+ if (!var || !var->data.type) {
+ if (!def) {
+ vars_rdunlock(vars);
+ return 0;
+ }
+
+ /* not found but we have a default value */
+ smp->data.type = SMP_T_STR;
+ smp->data.u.str = *def;
+ }
+ else
+ smp->data = var->data;
+
+ /* Copy sample. */
+ smp_dup(smp);
+
+ vars_rdunlock(vars);
+ return 1;
+}
+
+/* This function fills a sample with the variable content.
+ *
+ * Keep in mind that a sample content is duplicated by using smp_dup()
+ * and it therefore uses a pre-allocated trash chunk as returned by
+ * get_trash_chunk().
+ *
+ * If the variable is not valid in this scope, 0 is always returned.
+ * If the variable is valid but not found, either the default value
+ * <def> is returned if not NULL, or zero is returned.
+ *
+ * Returns 1 if the sample is filled, otherwise it returns 0.
+ */
+int vars_get_by_name(const char *name, size_t len, struct sample *smp, const struct buffer *def)
+{
+ struct vars *vars;
+ enum vars_scope scope;
+ uint64_t hash;
+
+ /* Resolve name and scope. */
+ if (!vars_hash_name(name, len, &scope, &hash, NULL))
+ return 0;
+
+ /* Select "vars" pool according with the scope. */
+ vars = get_vars(smp->sess, smp->strm, scope);
+ if (!vars || vars->scope != scope)
+ return 0;
+
+ return var_to_smp(vars, hash, smp, def);
+}
+
+/* This function fills a sample with the content of the variable described
+ * by <var_desc>.
+ *
+ * Keep in mind that a sample content is duplicated by using smp_dup()
+ * and it therefore uses a pre-allocated trash chunk as returned by
+ * get_trash_chunk().
+ *
+ * If the variable is not valid in this scope, 0 is always returned.
+ * If the variable is valid but not found, either the default value
+ * <def> is returned if not NULL, or zero is returned.
+ *
+ * Returns 1 if the sample is filled, otherwise it returns 0.
+ */
+int vars_get_by_desc(const struct var_desc *var_desc, struct sample *smp, const struct buffer *def)
+{
+ struct vars *vars;
+
+ /* Select "vars" pool according with the scope. */
+ vars = get_vars(smp->sess, smp->strm, var_desc->scope);
+
+ /* Check if the scope is available a this point of processing. */
+ if (!vars || vars->scope != var_desc->scope)
+ return 0;
+
+ return var_to_smp(vars, var_desc->name_hash, smp, def);
+}
+
+/* Always returns ACT_RET_CONT even if an error occurs. */
+static enum act_return action_store(struct act_rule *rule, struct proxy *px,
+ struct session *sess, struct stream *s, int flags)
+{
+ struct buffer *fmtstr = NULL;
+ struct sample smp;
+ int dir;
+
+ switch (rule->from) {
+ case ACT_F_TCP_REQ_CON: dir = SMP_OPT_DIR_REQ; break;
+ case ACT_F_TCP_REQ_SES: dir = SMP_OPT_DIR_REQ; break;
+ case ACT_F_TCP_REQ_CNT: dir = SMP_OPT_DIR_REQ; break;
+ case ACT_F_TCP_RES_CNT: dir = SMP_OPT_DIR_RES; break;
+ case ACT_F_HTTP_REQ: dir = SMP_OPT_DIR_REQ; break;
+ case ACT_F_HTTP_RES: dir = SMP_OPT_DIR_RES; break;
+ case ACT_F_TCP_CHK: dir = SMP_OPT_DIR_REQ; break;
+ case ACT_F_CFG_PARSER: dir = SMP_OPT_DIR_REQ; break; /* not used anyway */
+ case ACT_F_CLI_PARSER: dir = SMP_OPT_DIR_REQ; break; /* not used anyway */
+ default:
+ send_log(px, LOG_ERR, "Vars: internal error while execute action store.");
+ if (!(global.mode & MODE_QUIET) || (global.mode & MODE_VERBOSE))
+ ha_alert("Vars: internal error while execute action store.\n");
+ return ACT_RET_CONT;
+ }
+
+ /* Process the expression. */
+ memset(&smp, 0, sizeof(smp));
+
+ if (!LIST_ISEMPTY(&rule->arg.vars.fmt)) {
+ /* a format-string is used */
+
+ fmtstr = alloc_trash_chunk();
+ if (!fmtstr) {
+ send_log(px, LOG_ERR, "Vars: memory allocation failure while processing store rule.");
+ if (!(global.mode & MODE_QUIET) || (global.mode & MODE_VERBOSE))
+ ha_alert("Vars: memory allocation failure while processing store rule.\n");
+ return ACT_RET_CONT;
+ }
+
+ /* execute the log-format expression */
+ fmtstr->data = sess_build_logline(sess, s, fmtstr->area, fmtstr->size, &rule->arg.vars.fmt);
+
+ /* convert it to a sample of type string as it's what the vars
+ * API consumes, and store it.
+ */
+ smp_set_owner(&smp, px, sess, s, 0);
+ smp.data.type = SMP_T_STR;
+ smp.data.u.str = *fmtstr;
+ var_set(rule->arg.vars.name_hash, rule->arg.vars.scope, &smp, rule->arg.vars.conditions);
+ }
+ else {
+ /* an expression is used */
+ if (!sample_process(px, sess, s, dir|SMP_OPT_FINAL,
+ rule->arg.vars.expr, &smp))
+ return ACT_RET_CONT;
+ }
+
+ /* Store the sample, and ignore errors. */
+ var_set(rule->arg.vars.name_hash, rule->arg.vars.scope, &smp, rule->arg.vars.conditions);
+ free_trash_chunk(fmtstr);
+ return ACT_RET_CONT;
+}
+
+/* Always returns ACT_RET_CONT even if an error occurs. */
+static enum act_return action_clear(struct act_rule *rule, struct proxy *px,
+ struct session *sess, struct stream *s, int flags)
+{
+ struct sample smp;
+
+ memset(&smp, 0, sizeof(smp));
+ smp_set_owner(&smp, px, sess, s, SMP_OPT_FINAL);
+
+ /* Clear the variable using the sample context, and ignore errors. */
+ var_unset(rule->arg.vars.name_hash, rule->arg.vars.scope, &smp);
+ return ACT_RET_CONT;
+}
+
+static void release_store_rule(struct act_rule *rule)
+{
+ struct logformat_node *lf, *lfb;
+
+ list_for_each_entry_safe(lf, lfb, &rule->arg.vars.fmt, list) {
+ LIST_DELETE(&lf->list);
+ release_sample_expr(lf->expr);
+ free(lf->arg);
+ free(lf);
+ }
+
+ release_sample_expr(rule->arg.vars.expr);
+}
+
+/* This two function checks the variable name and replace the
+ * configuration string name by the global string name. its
+ * the same string, but the global pointer can be easy to
+ * compare. They return non-zero on success, zero on failure.
+ *
+ * The first function checks a sample-fetch and the second
+ * checks a converter.
+ */
+static int smp_check_var(struct arg *args, char **err)
+{
+ return vars_check_arg(&args[0], err);
+}
+
+static int conv_check_var(struct arg *args, struct sample_conv *conv,
+ const char *file, int line, char **err_msg)
+{
+ int cond_idx = 1;
+ uint conditions = 0;
+ int retval = vars_check_arg(&args[0], err_msg);
+
+ while (retval && args[cond_idx].type == ARGT_STR)
+ retval = vars_parse_cond_param(&args[cond_idx++].data.str, &conditions, err_msg);
+
+ return retval;
+}
+
+/* This function is a common parser for using variables. It understands
+ * the format:
+ *
+ * set-var-fmt(<variable-name>[,<cond> ...]) <format-string>
+ * set-var(<variable-name>[,<cond> ...]) <expression>
+ * unset-var(<variable-name>)
+ *
+ * It returns ACT_RET_PRS_ERR if fails and <err> is filled with an error
+ * message. Otherwise, it returns ACT_RET_PRS_OK and the variable <expr>
+ * is filled with the pointer to the expression to execute. The proxy is
+ * only used to retrieve the ->conf entries.
+ */
+static enum act_parse_ret parse_store(const char **args, int *arg, struct proxy *px,
+ struct act_rule *rule, char **err)
+{
+ const char *var_name = args[*arg-1];
+ int var_len;
+ const char *kw_name;
+ int flags = 0, set_var = 0; /* 0=unset-var, 1=set-var, 2=set-var-fmt */
+ struct sample empty_smp = { };
+ struct ist condition = IST_NULL;
+ struct ist var = IST_NULL;
+ struct ist varname_ist = IST_NULL;
+
+ if (strncmp(var_name, "set-var-fmt", 11) == 0) {
+ var_name += 11;
+ set_var = 2;
+ }
+ else if (strncmp(var_name, "set-var", 7) == 0) {
+ var_name += 7;
+ set_var = 1;
+ }
+ else if (strncmp(var_name, "unset-var", 9) == 0) {
+ var_name += 9;
+ set_var = 0;
+ }
+
+ if (*var_name != '(') {
+ memprintf(err, "invalid or incomplete action '%s'. Expects 'set-var(<var-name>)', 'set-var-fmt(<var-name>)' or 'unset-var(<var-name>)'",
+ args[*arg-1]);
+ return ACT_RET_PRS_ERR;
+ }
+ var_name++; /* jump the '(' */
+ var_len = strlen(var_name);
+ var_len--; /* remove the ')' */
+ if (var_name[var_len] != ')') {
+ memprintf(err, "incomplete argument after action '%s'. Expects 'set-var(<var-name>)', 'set-var-fmt(<var-name>)' or 'unset-var(<var-name>)'",
+ args[*arg-1]);
+ return ACT_RET_PRS_ERR;
+ }
+
+ /* Parse the optional conditions. */
+ var = ist2(var_name, var_len);
+ varname_ist = istsplit(&var, ',');
+ var_len = istlen(varname_ist);
+
+ condition = istsplit(&var, ',');
+
+ if (istlen(condition) && set_var == 0) {
+ memprintf(err, "unset-var does not expect parameters after the variable name. Only \"set-var\" and \"set-var-fmt\" manage conditions");
+ return ACT_RET_PRS_ERR;
+ }
+
+ while (istlen(condition)) {
+ struct buffer cond = {};
+
+ chunk_initlen(&cond, istptr(condition), 0, istlen(condition));
+ if (vars_parse_cond_param(&cond, &rule->arg.vars.conditions, err) == 0)
+ return ACT_RET_PRS_ERR;
+
+ condition = istsplit(&var, ',');
+ }
+
+ LIST_INIT(&rule->arg.vars.fmt);
+ if (!vars_hash_name(var_name, var_len, &rule->arg.vars.scope, &rule->arg.vars.name_hash, err))
+ return ACT_RET_PRS_ERR;
+
+ if (rule->arg.vars.scope == SCOPE_PROC &&
+ !var_set(rule->arg.vars.name_hash, rule->arg.vars.scope, &empty_smp, VF_CREATEONLY|VF_PERMANENT))
+ return 0;
+
+ /* There is no fetch method when variable is unset. Just set the right
+ * action and return. */
+ if (!set_var) {
+ rule->action = ACT_CUSTOM;
+ rule->action_ptr = action_clear;
+ rule->release_ptr = release_store_rule;
+ return ACT_RET_PRS_OK;
+ }
+
+ kw_name = args[*arg-1];
+
+ switch (rule->from) {
+ case ACT_F_TCP_REQ_CON:
+ flags = SMP_VAL_FE_CON_ACC;
+ px->conf.args.ctx = ARGC_TCO;
+ break;
+ case ACT_F_TCP_REQ_SES:
+ flags = SMP_VAL_FE_SES_ACC;
+ px->conf.args.ctx = ARGC_TSE;
+ break;
+ case ACT_F_TCP_REQ_CNT:
+ if (px->cap & PR_CAP_FE)
+ flags |= SMP_VAL_FE_REQ_CNT;
+ if (px->cap & PR_CAP_BE)
+ flags |= SMP_VAL_BE_REQ_CNT;
+ px->conf.args.ctx = ARGC_TRQ;
+ break;
+ case ACT_F_TCP_RES_CNT:
+ if (px->cap & PR_CAP_FE)
+ flags |= SMP_VAL_FE_RES_CNT;
+ if (px->cap & PR_CAP_BE)
+ flags |= SMP_VAL_BE_RES_CNT;
+ px->conf.args.ctx = ARGC_TRS;
+ break;
+ case ACT_F_HTTP_REQ:
+ if (px->cap & PR_CAP_FE)
+ flags |= SMP_VAL_FE_HRQ_HDR;
+ if (px->cap & PR_CAP_BE)
+ flags |= SMP_VAL_BE_HRQ_HDR;
+ px->conf.args.ctx = ARGC_HRQ;
+ break;
+ case ACT_F_HTTP_RES:
+ if (px->cap & PR_CAP_FE)
+ flags |= SMP_VAL_FE_HRS_HDR;
+ if (px->cap & PR_CAP_BE)
+ flags |= SMP_VAL_BE_HRS_HDR;
+ px->conf.args.ctx = ARGC_HRS;
+ break;
+ case ACT_F_TCP_CHK:
+ flags = SMP_VAL_BE_CHK_RUL;
+ px->conf.args.ctx = ARGC_TCK;
+ break;
+ case ACT_F_CFG_PARSER:
+ flags = SMP_VAL_CFG_PARSER;
+ px->conf.args.ctx = ARGC_CFG;
+ break;
+ case ACT_F_CLI_PARSER:
+ flags = SMP_VAL_CLI_PARSER;
+ px->conf.args.ctx = ARGC_CLI;
+ break;
+ default:
+ memprintf(err,
+ "internal error, unexpected rule->from=%d, please report this bug!",
+ rule->from);
+ return ACT_RET_PRS_ERR;
+ }
+
+ if (set_var == 2) { /* set-var-fmt */
+ if (!parse_logformat_string(args[*arg], px, &rule->arg.vars.fmt, 0, flags, err))
+ return ACT_RET_PRS_ERR;
+
+ (*arg)++;
+
+ /* for late error reporting */
+ free(px->conf.lfs_file);
+ px->conf.lfs_file = strdup(px->conf.args.file);
+ px->conf.lfs_line = px->conf.args.line;
+ } else {
+ /* set-var */
+ rule->arg.vars.expr = sample_parse_expr((char **)args, arg, px->conf.args.file,
+ px->conf.args.line, err, &px->conf.args, NULL);
+ if (!rule->arg.vars.expr)
+ return ACT_RET_PRS_ERR;
+
+ if (!(rule->arg.vars.expr->fetch->val & flags)) {
+ memprintf(err,
+ "fetch method '%s' extracts information from '%s', none of which is available here",
+ kw_name, sample_src_names(rule->arg.vars.expr->fetch->use));
+ free(rule->arg.vars.expr);
+ return ACT_RET_PRS_ERR;
+ }
+ }
+
+ rule->action = ACT_CUSTOM;
+ rule->action_ptr = action_store;
+ rule->release_ptr = release_store_rule;
+ return ACT_RET_PRS_OK;
+}
+
+
+/* parses a global "set-var" directive. It will create a temporary rule and
+ * expression that are parsed, processed, and released on the fly so that we
+ * respect the real set-var syntax. These directives take the following format:
+ * set-var <name> <expression>
+ * set-var-fmt <name> <fmt>
+ * Note that parse_store() expects "set-var(name) <expression>" so we have to
+ * temporarily replace the keyword here.
+ */
+static int vars_parse_global_set_var(char **args, int section_type, struct proxy *curpx,
+ const struct proxy *defpx, const char *file, int line,
+ char **err)
+{
+ struct proxy px = {
+ .id = "CFG",
+ .conf.args = { .file = file, .line = line, },
+ };
+ struct act_rule rule = {
+ .arg.vars.scope = SCOPE_PROC,
+ .from = ACT_F_CFG_PARSER,
+ .conf = { .file = (char *)file, .line = line, },
+ };
+ enum obj_type objt = OBJ_TYPE_NONE;
+ struct session *sess = NULL;
+ enum act_parse_ret p_ret;
+ char *old_arg1;
+ char *tmp_arg1;
+ int arg = 2; // variable name
+ int ret = -1;
+ int use_fmt = 0;
+
+ LIST_INIT(&px.conf.args.list);
+
+ use_fmt = strcmp(args[0], "set-var-fmt") == 0;
+
+ if (!*args[1] || !*args[2]) {
+ if (use_fmt)
+ memprintf(err, "'%s' requires a process-wide variable name ('proc.<name>') and a format string.", args[0]);
+ else
+ memprintf(err, "'%s' requires a process-wide variable name ('proc.<name>') and a sample expression.", args[0]);
+ goto end;
+ }
+
+ tmp_arg1 = NULL;
+ if (!memprintf(&tmp_arg1, "set-var%s(%s)", use_fmt ? "-fmt" : "", args[1]))
+ goto end;
+
+ /* parse_store() will always return a message in <err> on error */
+ old_arg1 = args[1]; args[1] = tmp_arg1;
+ p_ret = parse_store((const char **)args, &arg, &px, &rule, err);
+ free(args[1]); args[1] = old_arg1;
+
+ if (p_ret != ACT_RET_PRS_OK)
+ goto end;
+
+ if (rule.arg.vars.scope != SCOPE_PROC) {
+ memprintf(err, "'%s': cannot set variable '%s', only scope 'proc' is permitted in the global section.", args[0], args[1]);
+ goto end;
+ }
+
+ if (smp_resolve_args(&px, err) != 0) {
+ release_sample_expr(rule.arg.vars.expr);
+ indent_msg(err, 2);
+ goto end;
+ }
+
+ if (use_fmt && !(sess = session_new(&px, NULL, &objt))) {
+ release_sample_expr(rule.arg.vars.expr);
+ memprintf(err, "'%s': out of memory when trying to set variable '%s' in the global section.", args[0], args[1]);
+ goto end;
+ }
+
+ action_store(&rule, &px, sess, NULL, 0);
+ release_sample_expr(rule.arg.vars.expr);
+ if (sess)
+ session_free(sess);
+
+ ret = 0;
+ end:
+ return ret;
+}
+
+/* parse CLI's "get var <name>" */
+static int vars_parse_cli_get_var(char **args, char *payload, struct appctx *appctx, void *private)
+{
+ struct vars *vars;
+ struct sample smp = { };
+ int i;
+
+ if (!cli_has_level(appctx, ACCESS_LVL_OPER))
+ return 1;
+
+ if (!*args[2])
+ return cli_err(appctx, "Missing process-wide variable identifier.\n");
+
+ vars = get_vars(NULL, NULL, SCOPE_PROC);
+ if (!vars || vars->scope != SCOPE_PROC)
+ return 0;
+
+ if (!vars_get_by_name(args[2], strlen(args[2]), &smp, NULL))
+ return cli_err(appctx, "Variable not found.\n");
+
+ /* the sample returned by vars_get_by_name() is allocated into a trash
+ * chunk so we have no constraint to manipulate it.
+ */
+ chunk_printf(&trash, "%s: type=%s value=", args[2], smp_to_type[smp.data.type]);
+
+ if (!sample_casts[smp.data.type][SMP_T_STR] ||
+ !sample_casts[smp.data.type][SMP_T_STR](&smp)) {
+ chunk_appendf(&trash, "(undisplayable)\n");
+ } else {
+ /* Display the displayable chars*. */
+ b_putchr(&trash, '<');
+ for (i = 0; i < smp.data.u.str.data; i++) {
+ if (isprint((unsigned char)smp.data.u.str.area[i]))
+ b_putchr(&trash, smp.data.u.str.area[i]);
+ else
+ b_putchr(&trash, '.');
+ }
+ b_putchr(&trash, '>');
+ b_putchr(&trash, '\n');
+ b_putchr(&trash, 0);
+ }
+ return cli_msg(appctx, LOG_INFO, trash.area);
+}
+
+/* parse CLI's "set var <name>". It accepts:
+ * - set var <name> <expression>
+ * - set var <name> expr <expression>
+ * - set var <name> fmt <format>
+ */
+static int vars_parse_cli_set_var(char **args, char *payload, struct appctx *appctx, void *private)
+{
+ struct proxy px = {
+ .id = "CLI",
+ .conf.args = { .file = "CLI", .line = 0, },
+ };
+ struct act_rule rule = {
+ .arg.vars.scope = SCOPE_PROC,
+ .from = ACT_F_CLI_PARSER,
+ .conf = { .file = "CLI", .line = 0, },
+ };
+ enum obj_type objt = OBJ_TYPE_NONE;
+ struct session *sess = NULL;
+ enum act_parse_ret p_ret;
+ const char *tmp_args[3];
+ int tmp_arg;
+ char *tmp_act;
+ char *err = NULL;
+ int nberr;
+ int use_fmt = 0;
+
+ LIST_INIT(&px.conf.args.list);
+
+ if (!cli_has_level(appctx, ACCESS_LVL_OPER))
+ return 1;
+
+ if (!*args[2])
+ return cli_err(appctx, "Missing process-wide variable identifier.\n");
+
+ if (!*args[3])
+ return cli_err(appctx, "Missing either 'expr', 'fmt' or expression.\n");
+
+ if (*args[4]) {
+ /* this is the long format */
+ if (strcmp(args[3], "fmt") == 0)
+ use_fmt = 1;
+ else if (strcmp(args[3], "expr") != 0) {
+ memprintf(&err, "'%s %s': arg type must be either 'expr' or 'fmt' but got '%s'.", args[0], args[1], args[3]);
+ goto fail;
+ }
+ }
+
+ tmp_act = NULL;
+ if (!memprintf(&tmp_act, "set-var%s(%s)", use_fmt ? "-fmt" : "", args[2])) {
+ memprintf(&err, "memory allocation error.");
+ goto fail;
+ }
+
+ /* parse_store() will always return a message in <err> on error */
+ tmp_args[0] = tmp_act;
+ tmp_args[1] = (*args[4]) ? args[4] : args[3];
+ tmp_args[2] = "";
+ tmp_arg = 1; // must point to the first arg after the action
+ p_ret = parse_store(tmp_args, &tmp_arg, &px, &rule, &err);
+ free(tmp_act);
+
+ if (p_ret != ACT_RET_PRS_OK)
+ goto fail;
+
+ if (rule.arg.vars.scope != SCOPE_PROC) {
+ memprintf(&err, "'%s %s': cannot set variable '%s', only scope 'proc' is permitted here.", args[0], args[1], args[2]);
+ goto fail;
+ }
+
+ err = NULL;
+ nberr = smp_resolve_args(&px, &err);
+ if (nberr) {
+ release_sample_expr(rule.arg.vars.expr);
+ indent_msg(&err, 2);
+ goto fail;
+ }
+
+ if (use_fmt && !(sess = session_new(&px, NULL, &objt))) {
+ release_sample_expr(rule.arg.vars.expr);
+ memprintf(&err, "memory allocation error.");
+ goto fail;
+ }
+
+ action_store(&rule, &px, sess, NULL, 0);
+ release_sample_expr(rule.arg.vars.expr);
+ if (sess)
+ session_free(sess);
+
+ appctx->st0 = CLI_ST_PROMPT;
+ return 0;
+ fail:
+ return cli_dynerr(appctx, err);
+}
+
+static int vars_max_size(char **args, int section_type, struct proxy *curpx,
+ const struct proxy *defpx, const char *file, int line,
+ char **err, unsigned int *limit)
+{
+ char *error;
+
+ *limit = strtol(args[1], &error, 10);
+ if (*error != 0) {
+ memprintf(err, "%s: '%s' is an invalid size", args[0], args[1]);
+ return -1;
+ }
+ return 0;
+}
+
+static int vars_max_size_global(char **args, int section_type, struct proxy *curpx,
+ const struct proxy *defpx, const char *file, int line,
+ char **err)
+{
+ return vars_max_size(args, section_type, curpx, defpx, file, line, err, &var_global_limit);
+}
+
+static int vars_max_size_proc(char **args, int section_type, struct proxy *curpx,
+ const struct proxy *defpx, const char *file, int line,
+ char **err)
+{
+ return vars_max_size(args, section_type, curpx, defpx, file, line, err, &var_proc_limit);
+}
+
+static int vars_max_size_sess(char **args, int section_type, struct proxy *curpx,
+ const struct proxy *defpx, const char *file, int line,
+ char **err)
+{
+ return vars_max_size(args, section_type, curpx, defpx, file, line, err, &var_sess_limit);
+}
+
+static int vars_max_size_txn(char **args, int section_type, struct proxy *curpx,
+ const struct proxy *defpx, const char *file, int line,
+ char **err)
+{
+ return vars_max_size(args, section_type, curpx, defpx, file, line, err, &var_txn_limit);
+}
+
+static int vars_max_size_reqres(char **args, int section_type, struct proxy *curpx,
+ const struct proxy *defpx, const char *file, int line,
+ char **err)
+{
+ return vars_max_size(args, section_type, curpx, defpx, file, line, err, &var_reqres_limit);
+}
+
+static int vars_max_size_check(char **args, int section_type, struct proxy *curpx,
+ const struct proxy *defpx, const char *file, int line,
+ char **err)
+{
+ return vars_max_size(args, section_type, curpx, defpx, file, line, err, &var_check_limit);
+}
+
+/* early boot initialization */
+static void vars_init()
+{
+ var_name_hash_seed = ha_random64();
+ /* Initialize process vars */
+ vars_init_head(&proc_vars, SCOPE_PROC);
+}
+
+INITCALL0(STG_PREPARE, vars_init);
+
+static struct sample_fetch_kw_list sample_fetch_keywords = {ILH, {
+
+ { "var", smp_fetch_var, ARG2(1,STR,STR), smp_check_var, SMP_T_ANY, SMP_USE_CONST },
+ { /* END */ },
+}};
+
+INITCALL1(STG_REGISTER, sample_register_fetches, &sample_fetch_keywords);
+
+static struct sample_conv_kw_list sample_conv_kws = {ILH, {
+ { "set-var", smp_conv_store, ARG5(1,STR,STR,STR,STR,STR), conv_check_var, SMP_T_ANY, SMP_T_ANY },
+ { "unset-var", smp_conv_clear, ARG1(1,STR), conv_check_var, SMP_T_ANY, SMP_T_ANY },
+ { /* END */ },
+}};
+
+INITCALL1(STG_REGISTER, sample_register_convs, &sample_conv_kws);
+
+static struct action_kw_list tcp_req_conn_kws = { { }, {
+ { "set-var-fmt", parse_store, KWF_MATCH_PREFIX },
+ { "set-var", parse_store, KWF_MATCH_PREFIX },
+ { "unset-var", parse_store, KWF_MATCH_PREFIX },
+ { /* END */ }
+}};
+
+INITCALL1(STG_REGISTER, tcp_req_conn_keywords_register, &tcp_req_conn_kws);
+
+static struct action_kw_list tcp_req_sess_kws = { { }, {
+ { "set-var-fmt", parse_store, KWF_MATCH_PREFIX },
+ { "set-var", parse_store, KWF_MATCH_PREFIX },
+ { "unset-var", parse_store, KWF_MATCH_PREFIX },
+ { /* END */ }
+}};
+
+INITCALL1(STG_REGISTER, tcp_req_sess_keywords_register, &tcp_req_sess_kws);
+
+static struct action_kw_list tcp_req_cont_kws = { { }, {
+ { "set-var-fmt", parse_store, KWF_MATCH_PREFIX },
+ { "set-var", parse_store, KWF_MATCH_PREFIX },
+ { "unset-var", parse_store, KWF_MATCH_PREFIX },
+ { /* END */ }
+}};
+
+INITCALL1(STG_REGISTER, tcp_req_cont_keywords_register, &tcp_req_cont_kws);
+
+static struct action_kw_list tcp_res_kws = { { }, {
+ { "set-var-fmt", parse_store, KWF_MATCH_PREFIX },
+ { "set-var", parse_store, KWF_MATCH_PREFIX },
+ { "unset-var", parse_store, KWF_MATCH_PREFIX },
+ { /* END */ }
+}};
+
+INITCALL1(STG_REGISTER, tcp_res_cont_keywords_register, &tcp_res_kws);
+
+static struct action_kw_list tcp_check_kws = {ILH, {
+ { "set-var-fmt", parse_store, KWF_MATCH_PREFIX },
+ { "set-var", parse_store, KWF_MATCH_PREFIX },
+ { "unset-var", parse_store, KWF_MATCH_PREFIX },
+ { /* END */ }
+}};
+
+INITCALL1(STG_REGISTER, tcp_check_keywords_register, &tcp_check_kws);
+
+static struct action_kw_list http_req_kws = { { }, {
+ { "set-var-fmt", parse_store, KWF_MATCH_PREFIX },
+ { "set-var", parse_store, KWF_MATCH_PREFIX },
+ { "unset-var", parse_store, KWF_MATCH_PREFIX },
+ { /* END */ }
+}};
+
+INITCALL1(STG_REGISTER, http_req_keywords_register, &http_req_kws);
+
+static struct action_kw_list http_res_kws = { { }, {
+ { "set-var-fmt", parse_store, KWF_MATCH_PREFIX },
+ { "set-var", parse_store, KWF_MATCH_PREFIX },
+ { "unset-var", parse_store, KWF_MATCH_PREFIX },
+ { /* END */ }
+}};
+
+INITCALL1(STG_REGISTER, http_res_keywords_register, &http_res_kws);
+
+static struct action_kw_list http_after_res_kws = { { }, {
+ { "set-var-fmt", parse_store, KWF_MATCH_PREFIX },
+ { "set-var", parse_store, KWF_MATCH_PREFIX },
+ { "unset-var", parse_store, KWF_MATCH_PREFIX },
+ { /* END */ }
+}};
+
+INITCALL1(STG_REGISTER, http_after_res_keywords_register, &http_after_res_kws);
+
+static struct cfg_kw_list cfg_kws = {{ },{
+ { CFG_GLOBAL, "set-var", vars_parse_global_set_var },
+ { CFG_GLOBAL, "set-var-fmt", vars_parse_global_set_var },
+ { CFG_GLOBAL, "tune.vars.global-max-size", vars_max_size_global },
+ { CFG_GLOBAL, "tune.vars.proc-max-size", vars_max_size_proc },
+ { CFG_GLOBAL, "tune.vars.sess-max-size", vars_max_size_sess },
+ { CFG_GLOBAL, "tune.vars.txn-max-size", vars_max_size_txn },
+ { CFG_GLOBAL, "tune.vars.reqres-max-size", vars_max_size_reqres },
+ { CFG_GLOBAL, "tune.vars.check-max-size", vars_max_size_check },
+ { /* END */ }
+}};
+
+INITCALL1(STG_REGISTER, cfg_register_keywords, &cfg_kws);
+
+
+/* register cli keywords */
+static struct cli_kw_list cli_kws = {{ },{
+ { { "get", "var", NULL }, "get var <name> : retrieve contents of a process-wide variable", vars_parse_cli_get_var, NULL },
+ { { "set", "var", NULL }, "set var <name> [fmt|expr] {<fmt>|<expr>}: set variable from an expression or a format", vars_parse_cli_set_var, NULL, NULL, NULL, ACCESS_EXPERIMENTAL },
+ { { NULL }, NULL, NULL, NULL }
+}};
+INITCALL1(STG_REGISTER, cli_register_kw, &cli_kws);
diff --git a/src/version.c b/src/version.c
new file mode 100644
index 0000000..e7bb748
--- /dev/null
+++ b/src/version.c
@@ -0,0 +1,28 @@
+/*
+ * Version reporting : all user-visible version information should come from
+ * this file so that rebuilding only this one is enough to report the latest
+ * code version.
+ */
+
+#include <haproxy/global.h>
+#include <haproxy/version.h>
+
+/* These ones are made variables and not constants so that they are stored into
+ * the data region and prominently appear in core files.
+ */
+char haproxy_version_here[] = "HAProxy version follows";
+char haproxy_version[] = HAPROXY_VERSION;
+char haproxy_date[] = HAPROXY_DATE;
+char stats_version_string[] = STATS_VERSION_STRING;
+
+#if __has_feature(address_sanitizer) || defined(__SANITIZE_ADDRESS__)
+#define SANITIZE_STRING " with address sanitizer"
+#else
+#define SANITIZE_STRING ""
+#endif
+
+#if defined(__clang_version__)
+REGISTER_BUILD_OPTS("Built with clang compiler version " __clang_version__ "" SANITIZE_STRING);
+#elif defined(__VERSION__)
+REGISTER_BUILD_OPTS("Built with gcc compiler version " __VERSION__ "" SANITIZE_STRING);
+#endif
diff --git a/src/wdt.c b/src/wdt.c
new file mode 100644
index 0000000..865bb7b
--- /dev/null
+++ b/src/wdt.c
@@ -0,0 +1,193 @@
+/*
+ * Thread lockup detection
+ *
+ * Copyright 2000-2019 Willy Tarreau <willy@haproxy.org>.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <signal.h>
+#include <time.h>
+
+#include <haproxy/api.h>
+#include <haproxy/clock.h>
+#include <haproxy/debug.h>
+#include <haproxy/errors.h>
+#include <haproxy/global.h>
+#include <haproxy/signal-t.h>
+#include <haproxy/thread.h>
+#include <haproxy/tools.h>
+
+
+/*
+ * It relies on timer_create() and timer_settime() which are only available in
+ * this case.
+ */
+#if defined(USE_RT) && defined(_POSIX_TIMERS) && (_POSIX_TIMERS > 0) && defined(_POSIX_THREAD_CPUTIME)
+
+/* define a dummy value to designate "no timer". Use only 32 bits. */
+#ifndef TIMER_INVALID
+#define TIMER_INVALID ((timer_t)(unsigned long)(0xfffffffful))
+#endif
+
+static timer_t per_thread_wd_timer[MAX_THREADS];
+
+/* Setup (or ping) the watchdog timer for thread <thr>. Returns non-zero on
+ * success, zero on failure. It interrupts once per second of CPU time. It
+ * happens that timers based on the CPU time are not automatically re-armed
+ * so we only use the value and leave the interval unset.
+ */
+int wdt_ping(int thr)
+{
+ struct itimerspec its;
+
+ its.it_value.tv_sec = 1; its.it_value.tv_nsec = 0;
+ its.it_interval.tv_sec = 0; its.it_interval.tv_nsec = 0;
+ return timer_settime(per_thread_wd_timer[thr], 0, &its, NULL) == 0;
+}
+
+/* This is the WDTSIG signal handler */
+void wdt_handler(int sig, siginfo_t *si, void *arg)
+{
+ unsigned long long n, p;
+ ulong thr_bit;
+ int thr, tgrp;
+
+ switch (si->si_code) {
+ case SI_TIMER:
+ /* A thread's timer fired, the thread ID is in si_int. We have
+ * no guarantee that the thread handling this signal is in any
+ * way related to the one triggering it, so we need to retrieve
+ * the thread number from there. Note: this thread might
+ * continue to execute in parallel.
+ */
+ thr = si->si_value.sival_int;
+
+ /* cannot happen unless an unknown timer tries to play with our
+ * nerves. Let's die for now if this happens.
+ */
+ if (thr < 0 || thr >= global.nbthread)
+ break;
+
+ tgrp = ha_thread_info[thr].tgid;
+ thr_bit = ha_thread_info[thr].ltid_bit;
+ p = ha_thread_ctx[thr].prev_cpu_time;
+ n = now_cpu_time_thread(thr);
+
+ /* not yet reached the deadline of 1 sec,
+ * or p wasn't initialized yet
+ */
+ if (!p || n - p < 1000000000UL)
+ goto update_and_leave;
+
+ if ((_HA_ATOMIC_LOAD(&ha_thread_ctx[thr].flags) & TH_FL_SLEEPING) ||
+ (_HA_ATOMIC_LOAD(&ha_tgroup_ctx[tgrp-1].threads_harmless) & thr_bit)) {
+ /* This thread is currently doing exactly nothing
+ * waiting in the poll loop (unlikely but possible),
+ * waiting for all other threads to join the rendez-vous
+ * point (common), or waiting for another thread to
+ * finish an isolated operation (unlikely but possible).
+ */
+ goto update_and_leave;
+ }
+
+ /* So the thread indeed appears locked up. In order to be
+ * certain that we're not witnessing an exceptional spike of
+ * CPU usage due to a configuration issue (like running tens
+ * of thousands of tasks in a single loop), we'll check if the
+ * scheduler is still alive by setting the TH_FL_STUCK flag
+ * that the scheduler clears when switching to the next task.
+ * If it's already set, then it's our second call with no
+ * progress and the thread is dead.
+ */
+ if (!(_HA_ATOMIC_LOAD(&ha_thread_ctx[thr].flags) & TH_FL_STUCK)) {
+ _HA_ATOMIC_OR(&ha_thread_ctx[thr].flags, TH_FL_STUCK);
+ goto update_and_leave;
+ }
+
+ /* No doubt now, there's no hop to recover, die loudly! */
+ break;
+
+#if defined(USE_THREAD) && defined(SI_TKILL) /* Linux uses this */
+
+ case SI_TKILL:
+ /* we got a pthread_kill, stop on it */
+ thr = tid;
+ break;
+
+#elif defined(USE_THREAD) && defined(SI_LWP) /* FreeBSD uses this */
+
+ case SI_LWP:
+ /* we got a pthread_kill, stop on it */
+ thr = tid;
+ break;
+
+#endif
+ default:
+ /* unhandled other conditions */
+ return;
+ }
+
+ /* By default we terminate. If we're not on the victim thread, better
+ * bounce the signal there so that we produce a cleaner stack trace
+ * with the other thread interrupted exactly where it was running and
+ * the current one not involved in this.
+ */
+#ifdef USE_THREAD
+ if (thr != tid)
+ ha_tkill(thr, sig);
+ else
+#endif
+ ha_panic();
+ return;
+
+ update_and_leave:
+ wdt_ping(thr);
+}
+
+int init_wdt_per_thread()
+{
+ if (!clock_setup_signal_timer(&per_thread_wd_timer[tid], WDTSIG, tid))
+ goto fail1;
+
+ if (!wdt_ping(tid))
+ goto fail2;
+
+ return 1;
+
+ fail2:
+ timer_delete(per_thread_wd_timer[tid]);
+ fail1:
+ per_thread_wd_timer[tid] = TIMER_INVALID;
+ ha_warning("Failed to setup watchdog timer for thread %u, disabling lockup detection.\n", tid);
+ return 1;
+}
+
+void deinit_wdt_per_thread()
+{
+ if (per_thread_wd_timer[tid] != TIMER_INVALID)
+ timer_delete(per_thread_wd_timer[tid]);
+}
+
+/* registers the watchdog signal handler and returns 0. This sets up the signal
+ * handler for WDTSIG, so it must be called once per process.
+ */
+int init_wdt()
+{
+ struct sigaction sa;
+
+ sa.sa_handler = NULL;
+ sa.sa_sigaction = wdt_handler;
+ sigemptyset(&sa.sa_mask);
+ sa.sa_flags = SA_SIGINFO;
+ sigaction(WDTSIG, &sa, NULL);
+ return ERR_NONE;
+}
+
+REGISTER_POST_CHECK(init_wdt);
+REGISTER_PER_THREAD_INIT(init_wdt_per_thread);
+REGISTER_PER_THREAD_DEINIT(deinit_wdt_per_thread);
+#endif
diff --git a/src/xprt_handshake.c b/src/xprt_handshake.c
new file mode 100644
index 0000000..33f7750
--- /dev/null
+++ b/src/xprt_handshake.c
@@ -0,0 +1,299 @@
+/*
+ * Pseudo-xprt to handle any handshake except the SSL handshake
+ *
+ * Copyright 2019 HAProxy Technologies, Olivier Houchard <ohouchard@haproxy.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <haproxy/connection.h>
+
+struct xprt_handshake_ctx {
+ struct connection *conn;
+ struct wait_event *subs;
+ struct wait_event wait_event;
+ const struct xprt_ops *xprt;
+ void *xprt_ctx;
+};
+
+DECLARE_STATIC_POOL(xprt_handshake_ctx_pool, "xprt_handshake_ctx", sizeof(struct xprt_handshake_ctx));
+
+/* This XPRT doesn't take care of sending or receiving data, once its handshake
+ * is done, it just removes itself
+ */
+static size_t xprt_handshake_from_buf(struct connection *conn, void *xprt_ctx, const struct buffer *buf, size_t count, int flags)
+{
+ return 0;
+}
+
+static size_t xprt_handshake_to_buf(struct connection *conn, void *xprt_ctx, struct buffer *buf, size_t count, int flags)
+{
+ return 0;
+}
+
+/* xprt_handshake_io_cb is exported to see it resolved in "show fd" */
+struct task *xprt_handshake_io_cb(struct task *t, void *bctx, unsigned int state)
+{
+ struct xprt_handshake_ctx *ctx = bctx;
+ struct connection *conn = ctx->conn;
+
+ if (conn->flags & CO_FL_SOCKS4_SEND)
+ if (!conn_send_socks4_proxy_request(conn)) {
+ ctx->xprt->subscribe(conn, ctx->xprt_ctx, SUB_RETRY_SEND,
+ &ctx->wait_event);
+
+ goto out;
+ }
+
+ if (conn->flags & CO_FL_SOCKS4_RECV)
+ if (!conn_recv_socks4_proxy_response(conn)) {
+ ctx->xprt->subscribe(conn, ctx->xprt_ctx, SUB_RETRY_RECV,
+ &ctx->wait_event);
+ goto out;
+ }
+
+ if (conn->flags & CO_FL_ACCEPT_CIP)
+ if (!conn_recv_netscaler_cip(conn, CO_FL_ACCEPT_CIP)) {
+ ctx->xprt->subscribe(conn, ctx->xprt_ctx, SUB_RETRY_RECV,
+ &ctx->wait_event);
+ goto out;
+ }
+
+ if (conn->flags & CO_FL_ACCEPT_PROXY)
+ if (!conn_recv_proxy(conn, CO_FL_ACCEPT_PROXY)) {
+ ctx->xprt->subscribe(conn, ctx->xprt_ctx, SUB_RETRY_RECV,
+ &ctx->wait_event);
+ goto out;
+ }
+
+ if (conn->flags & CO_FL_SEND_PROXY)
+ if (!conn_send_proxy(conn, CO_FL_SEND_PROXY)) {
+ ctx->xprt->subscribe(conn, ctx->xprt_ctx, SUB_RETRY_SEND,
+ &ctx->wait_event);
+ goto out;
+ }
+
+out:
+ /* Wake the stream if we're done with the handshake, or we have a
+ * connection error
+ * */
+ if ((conn->flags & CO_FL_ERROR) ||
+ !(conn->flags & CO_FL_HANDSHAKE)) {
+ int ret = 0;
+ int woke = 0;
+ int was_conn_ctx = 0;
+
+ /* On error, wake any waiter */
+ if (ctx->subs) {
+ tasklet_wakeup(ctx->subs->tasklet);
+ ctx->subs->events = 0;
+ woke = 1;
+ ctx->subs = NULL;
+ }
+
+ /* Remove ourself from the xprt chain */
+ if (ctx->wait_event.events != 0)
+ ctx->xprt->unsubscribe(ctx->conn,
+ ctx->xprt_ctx,
+ ctx->wait_event.events,
+ &ctx->wait_event);
+ if (conn->xprt_ctx == ctx) {
+ conn->xprt_ctx = ctx->xprt_ctx;
+ conn->xprt = ctx->xprt;
+ was_conn_ctx = 1;
+ } else
+ conn->xprt->remove_xprt(conn, conn->xprt_ctx, ctx,
+ ctx->xprt, ctx->xprt_ctx);
+ /* If we're the first xprt for the connection, let the
+ * upper layers know. If no mux was set up yet, then call
+ * conn_create_mux, and if we have a mux, and it has a wake
+ * method, call it too.
+ */
+ if (was_conn_ctx) {
+ if (!ctx->conn->mux)
+ ret = conn_create_mux(ctx->conn);
+ if (ret >= 0 && !woke && ctx->conn->mux && ctx->conn->mux->wake)
+ ret = ctx->conn->mux->wake(ctx->conn);
+ }
+ tasklet_free(ctx->wait_event.tasklet);
+ pool_free(xprt_handshake_ctx_pool, ctx);
+ t = NULL;
+ }
+ return t;
+}
+
+static int xprt_handshake_start(struct connection *conn, void *xprt_ctx)
+{
+ struct xprt_handshake_ctx *ctx = xprt_ctx;
+
+ if (ctx->xprt->start) {
+ int ret;
+
+ ret = ctx->xprt->start(conn, ctx->xprt_ctx);
+ if (ret < 0)
+ return ret;
+ }
+ tasklet_wakeup(ctx->wait_event.tasklet);
+
+ return 0;
+}
+
+static int xprt_handshake_init(struct connection *conn, void **xprt_ctx)
+{
+ struct xprt_handshake_ctx *ctx;
+ /* already initialized */
+ if (*xprt_ctx)
+ return 0;
+
+ ctx = pool_alloc(xprt_handshake_ctx_pool);
+ if (!ctx) {
+ conn->err_code = CO_ER_SSL_NO_MEM;
+ return -1;
+ }
+ ctx->conn = conn;
+ ctx->wait_event.tasklet = tasklet_new();
+ if (!ctx->wait_event.tasklet) {
+ conn->err_code = CO_ER_SSL_NO_MEM;
+ pool_free(xprt_handshake_ctx_pool, ctx);
+ return -1;
+ }
+ ctx->wait_event.tasklet->process = xprt_handshake_io_cb;
+ ctx->wait_event.tasklet->context = ctx;
+ ctx->wait_event.events = 0;
+
+ ctx->xprt = NULL;
+ ctx->xprt_ctx = NULL;
+ ctx->subs = NULL;
+ *xprt_ctx = ctx;
+
+ return 0;
+}
+
+static void xprt_handshake_close(struct connection *conn, void *xprt_ctx)
+{
+ struct xprt_handshake_ctx *ctx = xprt_ctx;
+
+ if (ctx) {
+ if (ctx->wait_event.events != 0)
+ ctx->xprt->unsubscribe(ctx->conn, ctx->xprt_ctx,
+ ctx->wait_event.events,
+ &ctx->wait_event);
+ if (ctx->subs) {
+ ctx->subs->events = 0;
+ tasklet_wakeup(ctx->subs->tasklet);
+ }
+
+ if (ctx->xprt && ctx->xprt->close)
+ ctx->xprt->close(conn, ctx->xprt_ctx);
+ /* Remove any handshake flag, and if we were the connection
+ * xprt, get back to XPRT_RAW. If we're here because we
+ * failed an outoging connection, it will be retried using
+ * the same struct connection, and as xprt_handshake is a bit
+ * magic, because it requires a call to add_xprt(), it's better
+ * to fallback to the original XPRT to re-initiate the
+ * connection.
+ */
+ conn->flags &= ~CO_FL_HANDSHAKE;
+ if (conn->xprt == xprt_get(XPRT_HANDSHAKE))
+ conn->xprt = xprt_get(XPRT_RAW);
+ tasklet_free(ctx->wait_event.tasklet);
+ pool_free(xprt_handshake_ctx_pool, ctx);
+ }
+}
+
+/* Called from the upper layer, to subscribe <es> to events <event_type>. The
+ * event subscriber <es> is not allowed to change from a previous call as long
+ * as at least one event is still subscribed. The <event_type> must only be a
+ * combination of SUB_RETRY_RECV and SUB_RETRY_SEND. It always returns 0.
+ */
+static int xprt_handshake_subscribe(struct connection *conn, void *xprt_ctx, int event_type, struct wait_event *es)
+{
+ struct xprt_handshake_ctx *ctx = xprt_ctx;
+
+ BUG_ON(event_type & ~(SUB_RETRY_SEND|SUB_RETRY_RECV));
+ BUG_ON(ctx->subs && ctx->subs != es);
+
+ ctx->subs = es;
+ es->events |= event_type;
+ return 0;
+
+}
+
+/* Called from the upper layer, to unsubscribe <es> from events <event_type>.
+ * The <es> pointer is not allowed to differ from the one passed to the
+ * subscribe() call. It always returns zero.
+ */
+static int xprt_handshake_unsubscribe(struct connection *conn, void *xprt_ctx, int event_type, struct wait_event *es)
+{
+ struct xprt_handshake_ctx *ctx = xprt_ctx;
+
+ BUG_ON(event_type & ~(SUB_RETRY_SEND|SUB_RETRY_RECV));
+ BUG_ON(ctx->subs && ctx->subs != es);
+
+ es->events &= ~event_type;
+ if (!es->events)
+ ctx->subs = NULL;
+
+ return 0;
+}
+
+/* Use the provided XPRT as an underlying XPRT, and provide the old one.
+ * Returns 0 on success, and non-zero on failure.
+ */
+static int xprt_handshake_add_xprt(struct connection *conn, void *xprt_ctx, void *toadd_ctx, const struct xprt_ops *toadd_ops, void **oldxprt_ctx, const struct xprt_ops **oldxprt_ops)
+{
+ struct xprt_handshake_ctx *ctx = xprt_ctx;
+
+ if (oldxprt_ops)
+ *oldxprt_ops = ctx->xprt;
+ if (oldxprt_ctx)
+ *oldxprt_ctx = ctx->xprt_ctx;
+ ctx->xprt = toadd_ops;
+ ctx->xprt_ctx = toadd_ctx;
+
+ return 0;
+}
+
+/* Remove the specified xprt. If if it our underlying XPRT, remove it and
+ * return 0, otherwise just call the remove_xprt method from the underlying
+ * XPRT.
+ */
+static int xprt_handshake_remove_xprt(struct connection *conn, void *xprt_ctx, void *toremove_ctx, const struct xprt_ops *newops, void *newctx)
+{
+ struct xprt_handshake_ctx *ctx = xprt_ctx;
+
+ if (ctx->xprt_ctx == toremove_ctx) {
+ ctx->xprt_ctx = newctx;
+ ctx->xprt = newops;
+ return 0;
+ }
+ return (ctx->xprt->remove_xprt(conn, ctx->xprt_ctx, toremove_ctx, newops, newctx));
+}
+
+struct xprt_ops xprt_handshake = {
+ .snd_buf = xprt_handshake_from_buf,
+ .rcv_buf = xprt_handshake_to_buf,
+ .subscribe = xprt_handshake_subscribe,
+ .unsubscribe = xprt_handshake_unsubscribe,
+ .remove_xprt = xprt_handshake_remove_xprt,
+ .add_xprt = xprt_handshake_add_xprt,
+ .init = xprt_handshake_init,
+ .start = xprt_handshake_start,
+ .close= xprt_handshake_close,
+ .rcv_pipe = NULL,
+ .snd_pipe = NULL,
+ .shutr = NULL,
+ .shutw = NULL,
+ .name = "HS",
+};
+
+static void __xprt_handshake_init(void)
+{
+ xprt_register(XPRT_HANDSHAKE, &xprt_handshake);
+}
+
+INITCALL0(STG_REGISTER, __xprt_handshake_init);
diff --git a/src/xprt_quic.c b/src/xprt_quic.c
new file mode 100644
index 0000000..eda113c
--- /dev/null
+++ b/src/xprt_quic.c
@@ -0,0 +1,175 @@
+/*
+ * QUIC xprt layer. Act as an abstraction between quic_conn and MUX layers.
+ *
+ * Copyright 2020 HAProxy Technologies, Frederic Lecaille <flecaille@haproxy.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <haproxy/api.h>
+#include <haproxy/connection.h>
+#include <haproxy/quic_conn.h>
+#include <haproxy/ssl_sock.h>
+#include <haproxy/quic_trace.h>
+#include <haproxy/trace.h>
+
+static void quic_close(struct connection *conn, void *xprt_ctx)
+{
+ struct ssl_sock_ctx *conn_ctx = xprt_ctx;
+ struct quic_conn *qc = conn_ctx->qc;
+
+ TRACE_ENTER(QUIC_EV_CONN_CLOSE, qc);
+
+ /* Next application data can be dropped. */
+ qc->mux_state = QC_MUX_RELEASED;
+
+ /* If the quic-conn timer has already expired or if already in "connection close"
+ * state, free the quic-conn.
+ */
+ if (qc->flags & (QUIC_FL_CONN_EXP_TIMER|QUIC_FL_CONN_CLOSING)) {
+ quic_conn_release(qc);
+ qc = NULL;
+ goto leave;
+ }
+
+ /* Schedule a CONNECTION_CLOSE emission. If process stopping is in
+ * progress, quic-conn idle-timer will be scheduled immediately after
+ * its emission to ensure an immediate connection closing.
+ */
+ qc_check_close_on_released_mux(qc);
+ leave:
+ TRACE_LEAVE(QUIC_EV_CONN_CLOSE, qc);
+}
+
+/* Called from the upper layer, to subscribe <es> to events <event_type>. The
+ * event subscriber <es> is not allowed to change from a previous call as long
+ * as at least one event is still subscribed. The <event_type> must only be a
+ * combination of SUB_RETRY_RECV and SUB_RETRY_SEND. It always returns 0.
+ */
+static int quic_conn_subscribe(struct connection *conn, void *xprt_ctx, int event_type, struct wait_event *es)
+{
+ struct quic_conn *qc = conn->handle.qc;
+
+ TRACE_ENTER(QUIC_EV_CONN_SUB, qc);
+
+ BUG_ON(event_type & ~(SUB_RETRY_SEND|SUB_RETRY_RECV));
+ BUG_ON(qc->subs && qc->subs != es);
+
+ es->events |= event_type;
+ qc->subs = es;
+
+ /* TODO implement a check_events to detect if subscriber should be
+ * woken up immediately ?
+ */
+
+ if (event_type & SUB_RETRY_RECV)
+ TRACE_DEVEL("subscribe(recv)", QUIC_EV_CONN_XPRTRECV, qc);
+
+ if (event_type & SUB_RETRY_SEND)
+ TRACE_DEVEL("subscribe(send)", QUIC_EV_CONN_XPRTSEND, qc);
+
+ TRACE_LEAVE(QUIC_EV_CONN_SUB, qc);
+
+ return 0;
+}
+
+/* Called from the upper layer, to unsubscribe <es> from events <event_type>.
+ * The <es> pointer is not allowed to differ from the one passed to the
+ * subscribe() call. It always returns zero.
+ */
+static int quic_conn_unsubscribe(struct connection *conn, void *xprt_ctx, int event_type, struct wait_event *es)
+{
+ struct quic_conn *qc = conn->handle.qc;
+
+ TRACE_ENTER(QUIC_EV_CONN_SUB, qc);
+
+ if (event_type & SUB_RETRY_RECV)
+ TRACE_DEVEL("unsubscribe(recv)", QUIC_EV_CONN_XPRTRECV, qc);
+ if (event_type & SUB_RETRY_SEND)
+ TRACE_DEVEL("unsubscribe(send)", QUIC_EV_CONN_XPRTSEND, qc);
+
+ es->events &= ~event_type;
+ if (!es->events)
+ qc->subs = NULL;
+
+ /* TODO implement ignore_events similar to conn_unsubscribe() ? */
+
+ TRACE_LEAVE(QUIC_EV_CONN_SUB, qc);
+
+ return 0;
+}
+
+/* Store in <xprt_ctx> the context attached to <conn>.
+ * Returns always 0.
+ */
+static int qc_conn_init(struct connection *conn, void **xprt_ctx)
+{
+ struct quic_conn *qc = conn->handle.qc;
+
+ TRACE_ENTER(QUIC_EV_CONN_NEW, qc);
+
+ /* Ensure thread connection migration is finalized ASAP. */
+ if (qc->flags & QUIC_FL_CONN_AFFINITY_CHANGED)
+ qc_finalize_affinity_rebind(qc);
+
+ /* do not store the context if already set */
+ if (*xprt_ctx)
+ goto out;
+
+ *xprt_ctx = qc->xprt_ctx;
+
+ out:
+ TRACE_LEAVE(QUIC_EV_CONN_NEW, qc);
+
+ return 0;
+}
+
+/* Start the QUIC transport layer */
+static int qc_xprt_start(struct connection *conn, void *ctx)
+{
+ int ret = 0;
+ struct quic_conn *qc;
+
+ qc = conn->handle.qc;
+ TRACE_ENTER(QUIC_EV_CONN_NEW, qc);
+
+ /* mux-quic can now be considered ready. */
+ qc->mux_state = QC_MUX_READY;
+
+ ret = 1;
+ out:
+ TRACE_LEAVE(QUIC_EV_CONN_NEW, qc);
+ return ret;
+}
+
+static struct ssl_sock_ctx *qc_get_ssl_sock_ctx(struct connection *conn)
+{
+ if (!conn || conn->xprt != xprt_get(XPRT_QUIC) || !conn->handle.qc || !conn->xprt_ctx)
+ return NULL;
+
+ return conn->handle.qc->xprt_ctx;
+}
+
+/* transport-layer operations for QUIC connections. */
+static struct xprt_ops ssl_quic = {
+ .close = quic_close,
+ .subscribe = quic_conn_subscribe,
+ .unsubscribe = quic_conn_unsubscribe,
+ .init = qc_conn_init,
+ .start = qc_xprt_start,
+ .prepare_bind_conf = ssl_sock_prepare_bind_conf,
+ .destroy_bind_conf = ssl_sock_destroy_bind_conf,
+ .get_alpn = ssl_sock_get_alpn,
+ .get_ssl_sock_ctx = qc_get_ssl_sock_ctx,
+ .name = "QUIC",
+};
+
+static void __quic_conn_init(void)
+{
+ xprt_register(XPRT_QUIC, &ssl_quic);
+}
+INITCALL0(STG_REGISTER, __quic_conn_init);
diff --git a/tests/conf/basic-check.cfg b/tests/conf/basic-check.cfg
new file mode 100644
index 0000000..226d7a1
--- /dev/null
+++ b/tests/conf/basic-check.cfg
@@ -0,0 +1,34 @@
+global
+ maxconn 500
+ external-check
+ stats socket /tmp/sock1 mode 666 level admin
+
+defaults
+ timeout client 5s
+ timeout server 5s
+ timeout connect 5s
+ mode http
+
+listen stats
+ bind :8888
+ stats uri /
+
+backend tcp9000
+ option httpchk
+ server srv 127.0.0.1:9000 check inter 1s
+
+backend tcp9001
+ option httpchk
+ server srv 127.0.0.1:9001 check inter 1s ssl verify none
+
+backend tcp9006
+ option httpchk
+ server srv 10.0.0.0:9006 check inter 1s verify none
+
+backend ssh
+ server blah 127.0.0.1:22 check inter 1s
+
+frontend dummy
+ #mode http
+ bind :9000-9004
+
diff --git a/tests/conf/ext-check.cfg b/tests/conf/ext-check.cfg
new file mode 100644
index 0000000..f368928
--- /dev/null
+++ b/tests/conf/ext-check.cfg
@@ -0,0 +1,26 @@
+global
+ maxconn 500
+ external-check
+ stats socket /tmp/sock1 mode 666 level admin
+
+defaults
+ timeout client 5s
+ timeout server 5s
+ timeout connect 5s
+ mode http
+
+listen stats
+ bind :8888
+ stats uri /
+
+listen up
+ bind :8001
+ option external-check
+ external-check command /bin/true
+ server srv 127.0.0.1:8000 check inter 1000
+
+listen down
+ bind :8002
+ option external-check
+ external-check command /bin/false
+ server srv 127.0.0.1:8000 check inter 1000
diff --git a/tests/conf/ports.cfg b/tests/conf/ports.cfg
new file mode 100644
index 0000000..5779348
--- /dev/null
+++ b/tests/conf/ports.cfg
@@ -0,0 +1,74 @@
+# This is used to validate the address/port parser using "haproxy -c -f $file".
+# Some errors will be returned, they are expected to match the documented ones.
+
+frontend f1
+ log 127.0.0.1 local0
+ log 127.0.0.1:10000 local0
+ log 127.0.0.1:10001-10010 local0 # port range not permitted here in '127.0.0.1:10001-10010'
+ log 127.0.0.1:+10011 local0 # port offset not permitted here in ':::+10011'
+ log 127.0.0.1:-10012 local0 # port offset not permitted here in ':::-10012'
+
+ bind : # missing port specification in ':'
+ bind :11001
+ bind :::11002
+ bind :::11003-11010
+ bind :::+11011 # port offset not permitted here in ':::+11011'
+ bind :::-11012 # port offset not permitted here in ':::-11012'
+ bind udp@:11013 # dgram-type socket not acceptable in 'udp@:11013'
+
+frontend f2
+ bind :::0 # invalid port '0'
+ bind :::0-11 # invalid port '0'
+ bind :::65016- # invalid port ''
+ bind :::65016-1024 # invalid port range '65016-1024'
+ bind :::65016--1024 # invalid port '-1024'
+ bind :::66016-1024 # invalid port '66016'
+
+backend b2
+ source :12001
+ source :::12002
+ source :::12003-12010 # port range not permitted here in '127.0.0.1:12003-12010'
+ source :::+12011 # port offset not permitted here in ':::+12011'
+ source :::-12012 # port offset not permitted here in ':::-12012'
+
+backend b3
+ server s1 :
+ server s2 localhost:13001
+ server s3 :13002
+ server s4 :+13003
+ server s5 :-13004
+ server s6 :13005-13010 # port range not permitted here in ':13005-13010'
+
+backend b4
+ server s1 : addr 0.0.0.1:14001 # addr: port specification not permitted here
+
+backend b5
+ server s1 : source localhost:15000
+ server s1 : source 0.0.0.1:15001
+ server s2 : source 0.0.0.1:+15002 # port offset not permitted here in '0.0.0.1:+15002'
+ server s3 : source 0.0.0.1:-15003 # port offset not permitted here in '0.0.0.1:-15003'
+ server s4 : source 0.0.0.1:15004-15010
+
+backend b6
+ server s1 : source 0.0.0.0 usesrc localhost:16000
+ server s1 : source 0.0.0.0 usesrc 0.0.0.1:16001
+ server s2 : source 0.0.0.0 usesrc 0.0.0.1:+16002 # port offset not permitted here in '0.0.0.1:+16002'
+ server s3 : source 0.0.0.0 usesrc 0.0.0.1:-16003 # port offset not permitted here in '0.0.0.1:-16003'
+ server s4 : source 0.0.0.0 usesrc 0.0.0.1:16004-16010 # port range not permitted here in '0.0.0.1:16004-16010'
+
+backend b7
+ server s1 : socks4 0.0.0.1 # missing port specification in '0.0.0.1'
+ server s2 : socks4 localhost:18000
+ server s2 : socks4 0.0.0.1:18001
+ server s3 : socks4 0.0.0.1:+18002 # port offset not permitted here in '0.0.0.1:+18002'
+ server s4 : socks4 0.0.0.1:-18003 # port offset not permitted here in '0.0.0.1:-18003'
+ server s5 : socks4 0.0.0.1:18004-18010 # port range not permitted here in '0.0.0.1:18004-18010'
+
+backend b8
+ tcp-check connect addr 0.0.0.1
+ tcp-check connect addr 0.0.0.1:
+ tcp-check connect addr localhost:19000
+ tcp-check connect addr 0.0.0.1:19001
+ tcp-check connect addr 0.0.0.1:+19002 # port offset not permitted here in '0.0.0.1:+19002'
+ tcp-check connect addr 0.0.0.1:-19003 # port offset not permitted here in '0.0.0.1:-19003'
+ tcp-check connect addr 0.0.0.1:19004-19005 # port range not permitted here in '0.0.0.1:19004-19010'
diff --git a/tests/conf/setstatus.lua b/tests/conf/setstatus.lua
new file mode 100644
index 0000000..e2eafe1
--- /dev/null
+++ b/tests/conf/setstatus.lua
@@ -0,0 +1,26 @@
+-- http-response actions
+core.register_action("set-status-418-defaultreason", {"http-res"}, function(txn)
+ txn.http:res_set_status(418)
+end)
+core.register_action("set-status-418-customreason", {"http-res"}, function(txn)
+ txn.http:res_set_status(418, "I'm a coffeepot")
+end)
+
+-- http services
+core.register_service("http418-default", "http", function(applet)
+ local response = "Hello World !"
+ applet:set_status(418)
+ applet:add_header("content-length", string.len(response))
+ applet:add_header("content-type", "text/plain")
+ applet:start_response()
+ applet:send(response)
+end)
+
+core.register_service("http418-coffeepot", "http", function(applet)
+ local response = "Hello World !"
+ applet:set_status(418, "I'm a coffeepot")
+ applet:add_header("content-length", string.len(response))
+ applet:add_header("content-type", "text/plain")
+ applet:start_response()
+ applet:send(response)
+end)
diff --git a/tests/conf/tcp-check.cfg b/tests/conf/tcp-check.cfg
new file mode 100644
index 0000000..56aba53
--- /dev/null
+++ b/tests/conf/tcp-check.cfg
@@ -0,0 +1,74 @@
+global
+ maxconn 500
+ external-check
+ stats socket /tmp/sock1 mode 666 level admin
+
+defaults
+ timeout client 5s
+ timeout server 5s
+ timeout connect 5s
+ mode http
+
+listen stats
+ bind :8888
+ stats uri /
+
+listen multi-tcp-check
+ bind :80
+ mode tcp
+ option tcp-check
+ tcp-check connect port 9001
+ tcp-check connect port 9002
+
+ server server1 127.0.0.1: check
+ server server2 127.0.0.2: check
+ server server3 127.0.0.3: check
+
+backend tcp9000
+ option tcp-check
+ server srv 127.0.0.1:9000 check inter 1s
+
+backend tcp9001
+ option tcp-check
+ tcp-check comment "this is a comment"
+ server srv 127.0.0.1:9001 check inter 1s
+
+backend tcp9002
+ option tcp-check
+ tcp-check connect port 9002
+ server srv 127.0.0.1:9002 check inter 1s
+
+backend tcp9003
+ option tcp-check
+ tcp-check comment "this is a comment"
+ tcp-check connect port 9003
+ server srv 127.0.0.1:9003 check inter 1s
+
+backend tcp9004
+ option tcp-check
+ tcp-check connect port 9004
+ tcp-check send-binary 474554 # "GET"
+ tcp-check send-binary 202F20 # " / "
+ tcp-check send-binary 485454 # "HTT"
+ tcp-check send-binary 502F31 # "P/1"
+ tcp-check send-binary 2E300D # ".0\r"
+ tcp-check send-binary 0A0D0A # "\n\r\n"
+ tcp-check expect rstring HTTP
+ server srv 127.0.0.1:9004 check inter 1s
+
+backend tcp9005
+ option tcp-check
+ tcp-check connect port 9005 ssl
+ server srv 127.0.0.1:9005 check inter 1s verify none
+
+backend ssh
+ option tcp-check
+ tcp-check connect port 22
+ tcp-check expect rstring SSH
+ tcp-check send "oops, sorry"
+ server blah 127.0.0.1:22 check inter 1s
+
+frontend dummy
+ mode http
+ bind :9000-9005
+
diff --git a/tests/conf/test-acl-args.cfg b/tests/conf/test-acl-args.cfg
new file mode 100644
index 0000000..26b909f
--- /dev/null
+++ b/tests/conf/test-acl-args.cfg
@@ -0,0 +1,36 @@
+# This config file aims to trigger all error detection cases in the ACL
+# expression parser related to the fetch arguments.
+
+# silence some warnings
+defaults
+ mode http
+ timeout client 1s
+ timeout server 1s
+ timeout connect 1s
+
+frontend 1
+ bind :10000
+
+ # missing fetch method in ACL expression '(arg)'.
+ http-request deny if { (arg) }
+
+ # unknown fetch method 'blah' in ACL expression 'blah(arg)'.
+ http-request deny if { blah(arg) }
+
+ # missing closing ')' after arguments to fetch keyword 'req.hdr' in ACL expression 'req.hdr('.
+ http-request deny if { req.hdr( }
+
+ # cannot be triggered : "returns type of fetch method '%s' is unknown"
+
+ # fetch method 'always_true' : no argument supported, but got 'arg' in ACL expression 'always_true(arg)'.
+ http-request deny if { always_true(arg) }
+
+ # fetch method 'req.hdr' : failed to parse 'a' as type 'signed integer' at position 2 in ACL expression 'req.hdr(a,a)'.
+ http-request deny if { req.hdr(a,a) }
+
+ # in argument to 'payload_lv', payload length must be > 0.
+ http-request deny if { payload_lv(0,0) }
+
+ # ACL keyword 'payload_lv' : expected type 'unsigned integer' at position 1, but got nothing.
+ http-request deny if { payload_lv }
+
diff --git a/tests/conf/test-address-syntax.cfg b/tests/conf/test-address-syntax.cfg
new file mode 100644
index 0000000..820572c
--- /dev/null
+++ b/tests/conf/test-address-syntax.cfg
@@ -0,0 +1,84 @@
+global
+ maxconn 200
+ #debug
+ #daemon
+
+defaults
+ mode http
+ timeout connect 50s
+ timeout client 50s
+ timeout server 50s
+
+frontend b11 :11000,:11001
+
+frontend b12 :12000-12009,:12020-12029
+
+#frontend b13 ::13000,::13001
+
+frontend b14 :::14000,:::14001
+
+frontend b15 *:15000,*:15001
+
+frontend b16 0.0.0.0:16000,0.0.0.0:16001
+
+listen l21
+ bind :21000,:21001
+
+listen l22
+ bind :22000-22009,:22020-22029
+
+#listen l23
+# bind ::23000,::23001
+
+listen l24
+ bind :::24000,:::24001
+
+listen l25
+ bind *:25000,*:25001
+
+listen l26
+ bind 0.0.0.0:26000,0.0.0.0:26001
+
+listen l35 :35000
+ server s1 :80
+ #server s2 ::80
+ server s3 :::80
+ server s4 *:80
+ server s5 0.0.0.0:80
+ server s5 0::0:80
+
+listen l36 :36000
+ server s1 1.1.1.1:80
+ server s2 1::1:80
+ server s3 ::1.1.1.1:80
+ server s4 localhost:80
+# server s5 localhost6:80
+
+listen l37 :37000
+ server s1 1.1.1.1
+ server s2 1::1:
+ server s3 ::1.1.1.1:
+ server s4 localhost
+# server s5 localhost6
+
+listen l38 :38000
+ server s1 1.1.1.1:+1
+ server s2 1::1:+1
+ server s3 ::1.1.1.1:+1
+ server s4 localhost:+1
+# server s5 localhost6:+1
+
+listen l39 :39000
+ server s1 1.1.1.1 check addr 2.2.2.2
+ server s2 1::1: check addr 2::2:
+ server s3 ::1.1.1.1: check addr ::2.2.2.2:
+ server s4 ::1.1.1.1: check addr localhost
+# server s5 ::1.1.1.1: check addr localhost6
+
+listen l40 :40000
+ server s1 1.1.1.1 source 0.0.0.0
+ server s2 1.1.1.1 source :1-10
+ server s3 1.1.1.1 source :::1-10
+ server s3 1.1.1.1 source 0::0:1-10
+ server s3 1.1.1.1 source ::0.0.0.0:1-10
+
diff --git a/tests/conf/test-backlog.cfg b/tests/conf/test-backlog.cfg
new file mode 100644
index 0000000..bc4a71e
--- /dev/null
+++ b/tests/conf/test-backlog.cfg
@@ -0,0 +1,22 @@
+# This is a test configuration.
+# It is used to check that the backlog queue works as expected.
+
+global
+ maxconn 200
+ stats timeout 3s
+
+frontend backlog_def
+ mode http
+ timeout client 15s
+ maxconn 100
+ bind :8000
+ option httpclose
+
+frontend backlog_max
+ mode http
+ timeout client 15s
+ maxconn 100
+ backlog 100000
+ bind :8001
+ option httpclose
+
diff --git a/tests/conf/test-check-expect.cfg b/tests/conf/test-check-expect.cfg
new file mode 100644
index 0000000..8a799df
--- /dev/null
+++ b/tests/conf/test-check-expect.cfg
@@ -0,0 +1,87 @@
+# This is a test configuration.
+# It is used to involve the various http-check expect features. It queries
+# a local web server for an object which is called the same as the keyword.
+
+global
+ maxconn 500
+ stats socket /tmp/sock1 mode 600 level admin
+ stats timeout 3000
+ stats maxconn 2000
+
+defaults
+ mode http
+ retries 1
+ option redispatch
+ timeout connect 1000
+ timeout client 5000
+ timeout server 5000
+ maxconn 400
+ option http-server-close
+
+listen stats
+ bind :8080
+ stats uri /
+
+backend chk-exp-status-nolb
+ # note: 404 should not produce an error here, just a soft-stop
+ balance roundrobin
+ option httpchk GET /status
+ http-check disable-on-404
+ http-check expect status 200
+ server s1 127.0.0.1:80 check inter 1000
+
+backend chk-nexp-status-nolb
+ balance roundrobin
+ option httpchk GET /status
+ http-check disable-on-404
+ http-check expect ! status 200
+ server s1 127.0.0.1:80 check inter 1000
+
+backend chk-exp-status
+ balance roundrobin
+ option httpchk GET /status
+ http-check expect status 200
+ server s1 127.0.0.1:80 check inter 1000
+
+backend chk-nexp-status
+ balance roundrobin
+ option httpchk GET /status
+ http-check expect ! status 200
+ server s1 127.0.0.1:80 check inter 1000
+
+backend chk-exp-rstatus
+ balance roundrobin
+ option httpchk GET /rstatus
+ http-check expect rstatus ^2[0-9][0-9]
+ server s1 127.0.0.1:80 check inter 1000
+
+backend chk-nexp-rstatus
+ balance roundrobin
+ option httpchk GET /rstatus
+ http-check expect ! rstatus ^2[0-9][0-9]
+ server s1 127.0.0.1:80 check inter 1000
+
+backend chk-exp-string
+ balance roundrobin
+ option httpchk GET /string
+ http-check expect string this\ is\ ok
+ server s1 127.0.0.1:80 check inter 1000
+
+backend chk-nexp-string
+ balance roundrobin
+ option httpchk GET /string
+ http-check expect ! string this\ is\ ok
+ server s1 127.0.0.1:80 check inter 1000
+
+backend chk-exp-rstring
+ balance roundrobin
+ option httpchk GET /rstring
+ http-check expect rstring this\ is\ ok
+ server s1 127.0.0.1:80 check inter 1000
+
+backend chk-nexp-rstring
+ balance roundrobin
+ option httpchk GET /rstring
+ http-check expect ! rstring this\ is\ ok
+ server s1 127.0.0.1:80 check inter 1000
+
diff --git a/tests/conf/test-connection.cfg b/tests/conf/test-connection.cfg
new file mode 100644
index 0000000..a81e2f3
--- /dev/null
+++ b/tests/conf/test-connection.cfg
@@ -0,0 +1,34 @@
+# This is a test configuration.
+# It is used to check the various connection modes
+
+global
+ maxconn 100
+
+defaults
+ mode http
+ timeout client 10000
+ timeout server 10000
+ timeout connect 10000
+ balance roundrobin
+
+listen httpclose
+ option httpclose
+ bind :8001
+ server srv 127.0.0.1:8080
+ http-request add-header X-request mode=httpclose
+ http-response add-header X-response mode=httpclose
+
+listen server-close
+ option http-server-close
+ bind :8002
+ server srv 127.0.0.1:8080
+ http-request add-header X-request mode=server-close
+ http-response add-header X-response mode=server-close
+
+listen httpclose_server-close
+ option httpclose
+ option http-server-close
+ bind :8003
+ server srv 127.0.0.1:8080
+ http-request add-header X-request mode=httpclose+server-close
+ http-response add-header X-response mode=httpclose+server-close
diff --git a/tests/conf/test-cookie-indirect.cfg b/tests/conf/test-cookie-indirect.cfg
new file mode 100644
index 0000000..2ee8658
--- /dev/null
+++ b/tests/conf/test-cookie-indirect.cfg
@@ -0,0 +1,47 @@
+# Test configuration. It listens on port 8000, forwards to
+# local ports 8001/8002 as two distinct servers, and relies
+# on a server running on local port 8080 to handle the request.
+
+# Example of request that must be handled (taken from RFC2965 and mangled
+# a bit) :
+# POST /acme/process HTTP/1.1
+# Cookie: $Version="1";
+# Customer="WILE_E_COYOTE"; $Path="/acme";
+# SID= s2 ; $Path="/";
+# Part_Number="Rocket_Launcher_0001"; $Path="/acme";
+# Shipping="FedEx"; $Path="/acme"
+#
+#
+#
+
+global
+ maxconn 500
+ stats socket /tmp/sock1 mode 777 level admin
+ stats timeout 1d
+
+defaults
+ mode http
+ option http-server-close
+ timeout client 30s
+ timeout server 30s
+ timeout connect 5s
+
+listen test
+ log 127.0.0.1 local0
+ option httplog
+ bind :8000
+ cookie SID insert indirect
+ server s1 127.0.0.1:8001 cookie s1
+ server s2 127.0.0.1:8002 cookie s2
+ capture cookie toto= len 10
+
+listen s1
+ bind 127.0.0.1:8001
+ server srv 127.0.0.1:8080
+ http-request add-header x-haproxy-used s1
+
+listen s2
+ bind 127.0.0.1:8002
+ server srv 127.0.0.1:8080
+ http-request add-header x-haproxy-used s2
+
diff --git a/tests/conf/test-cookie-insert.cfg b/tests/conf/test-cookie-insert.cfg
new file mode 100644
index 0000000..fc0f2af
--- /dev/null
+++ b/tests/conf/test-cookie-insert.cfg
@@ -0,0 +1,35 @@
+# Test configuration. It listens on port 8000, forwards to
+# local ports 8001/8002 as two distinct servers, and relies
+# on a server running on local port 8080 to handle the request.
+
+global
+ maxconn 500
+ stats socket /tmp/sock1 mode 777 level admin
+ stats timeout 1d
+
+defaults
+ mode http
+ option http-server-close
+ timeout client 30s
+ timeout server 30s
+ timeout connect 5s
+
+listen test
+ log 127.0.0.1 local0
+ option httplog
+ bind :8000
+ cookie SID insert
+ server s1 127.0.0.1:8001 cookie s1
+ server s2 127.0.0.1:8002 cookie s2
+ capture cookie toto= len 10
+
+listen s1
+ bind 127.0.0.1:8001
+ server srv 127.0.0.1:8080
+ http-request add-header x-haproxy-used s1
+
+listen s2
+ bind 127.0.0.1:8002
+ server srv 127.0.0.1:8080
+ http-request add-header x-haproxy-used s2
+
diff --git a/tests/conf/test-cookie-passive.cfg b/tests/conf/test-cookie-passive.cfg
new file mode 100644
index 0000000..a35dcb3
--- /dev/null
+++ b/tests/conf/test-cookie-passive.cfg
@@ -0,0 +1,35 @@
+# Test configuration. It listens on port 8000, forwards to
+# local ports 8001/8002 as two distinct servers, and relies
+# on a server running on local port 8080 to handle the request.
+
+global
+ maxconn 500
+ stats socket /tmp/sock1 mode 777 level admin
+ stats timeout 1d
+
+defaults
+ mode http
+ option http-server-close
+ timeout client 30s
+ timeout server 30s
+ timeout connect 5s
+
+listen test
+ log 127.0.0.1 local0
+ option httplog
+ bind :8000
+ cookie SID
+ server s1 127.0.0.1:8001 cookie s1
+ server s2 127.0.0.1:8002 cookie s2
+ capture cookie toto= len 10
+
+listen s1
+ bind 127.0.0.1:8001
+ server srv 127.0.0.1:8080
+ http-request add-header x-haproxy-used s1
+
+listen s2
+ bind 127.0.0.1:8002
+ server srv 127.0.0.1:8080
+ http-request add-header x-haproxy-used s2
+
diff --git a/tests/conf/test-cookie-prefix.cfg b/tests/conf/test-cookie-prefix.cfg
new file mode 100644
index 0000000..d4d591e
--- /dev/null
+++ b/tests/conf/test-cookie-prefix.cfg
@@ -0,0 +1,35 @@
+# Test configuration. It listens on port 8000, forwards to
+# local ports 8001/8002 as two distinct servers, and relies
+# on a server running on local port 8080 to handle the request.
+
+global
+ maxconn 500
+ stats socket /tmp/sock1 mode 777 level admin
+ stats timeout 1d
+
+defaults
+ mode http
+ option http-server-close
+ timeout client 30s
+ timeout server 30s
+ timeout connect 5s
+
+listen test
+ log 127.0.0.1 local0
+ option httplog
+ bind :8000
+ cookie SID prefix
+ server s1 127.0.0.1:8001 cookie s1
+ server s2 127.0.0.1:8002 cookie s2
+ capture cookie toto= len 10
+
+listen s1
+ bind 127.0.0.1:8001
+ server srv 127.0.0.1:8080
+ http-request add-header x-haproxy-used s1
+
+listen s2
+ bind 127.0.0.1:8002
+ server srv 127.0.0.1:8080
+ http-request add-header x-haproxy-used s2
+
diff --git a/tests/conf/test-cookie-rewrite.cfg b/tests/conf/test-cookie-rewrite.cfg
new file mode 100644
index 0000000..0e5e1bd
--- /dev/null
+++ b/tests/conf/test-cookie-rewrite.cfg
@@ -0,0 +1,35 @@
+# Test configuration. It listens on port 8000, forwards to
+# local ports 8001/8002 as two distinct servers, and relies
+# on a server running on local port 8080 to handle the request.
+
+global
+ maxconn 500
+ stats socket /tmp/sock1 mode 777 level admin
+ stats timeout 1d
+
+defaults
+ mode http
+ option http-server-close
+ timeout client 30s
+ timeout server 30s
+ timeout connect 5s
+
+listen test
+ log 127.0.0.1 local0
+ option httplog
+ bind :8000
+ cookie SID rewrite
+ server s1 127.0.0.1:8001 cookie s1
+ server s2 127.0.0.1:8002 cookie s2
+ capture cookie toto= len 10
+
+listen s1
+ bind 127.0.0.1:8001
+ server srv 127.0.0.1:8080
+ http-request add-header x-haproxy-used s1
+
+listen s2
+ bind 127.0.0.1:8002
+ server srv 127.0.0.1:8080
+ http-request add-header x-haproxy-used s2
+
diff --git a/tests/conf/test-disable-404.cfg b/tests/conf/test-disable-404.cfg
new file mode 100644
index 0000000..05015d3
--- /dev/null
+++ b/tests/conf/test-disable-404.cfg
@@ -0,0 +1,61 @@
+# This is a test configuration.
+# It makes use of a farm built from 4 active servers and 4 backup servers,
+# all listenening to different IP addresses on port 80. Health-checks are
+# TCP only on port 81 so that iptables rules permit easy selection of which
+# servers are enabled or disabled. It checks for the file /alive, and disables
+# the server if the response is 404.
+#
+# Create statistics counters this way :
+#
+# iptables -N http
+# iptables -A OUTPUT -p tcp --syn --dport 80 -j http
+# for i in $(seq 1 8); do iptables -A http -d 127.0.0.$i; done
+# iptables -A http -d 127.0.0.0/24
+#
+# Consult the statistics using iptables this way:
+#
+# iptables --line-numbers -nxvL http
+# iptables -Z http
+#
+# Block individual servers like this :
+# iptables -I INPUT -p tcp --dport 81 -d 127.0.0.1 -j DROP
+#
+# Enable each server like this :
+# touch $SRV_ROOT/alive
+#
+# Disable each server like this :
+# rm -f $SRV_ROOT/alive
+#
+
+global
+ maxconn 1000
+ stats socket /tmp/sock1 mode 600
+ stats timeout 3000
+ stats maxconn 2000
+
+listen sample1
+ mode http
+ retries 1
+ option redispatch
+ timeout connect 1000
+ timeout client 5000
+ timeout server 5000
+ maxconn 40000
+ bind :8080
+ cookie SRV insert indirect nocache
+ #balance source
+ balance roundrobin
+ option allbackups
+ server act1 127.0.0.1:80 cookie a1 weight 10 check port 81 inter 1000 fall 4
+ server act2 127.0.0.2:80 cookie a2 weight 20 check port 81 inter 1000 fall 4
+ server act3 127.0.0.3:80 cookie a3 weight 30 check port 81 inter 1000 fall 4
+ server act4 127.0.0.4:80 cookie a4 weight 40 check port 81 inter 1000 fall 4
+ server bck1 127.0.0.5:80 cookie b1 weight 10 check port 81 inter 1000 fall 4 backup
+ server bck2 127.0.0.6:80 cookie b2 weight 20 check port 81 inter 1000 fall 4 backup
+ server bck3 127.0.0.7:80 cookie b3 weight 30 check port 81 inter 1000 fall 4 backup
+ server bck4 127.0.0.8:80 cookie b4 weight 40 check port 81 inter 1000 fall 4 backup
+ option httpclose
+ stats uri /stats
+ stats refresh 5
+ option httpchk GET /alive
+ http-check disable-on-404
diff --git a/tests/conf/test-fsm.cfg b/tests/conf/test-fsm.cfg
new file mode 100644
index 0000000..4ea3efd
--- /dev/null
+++ b/tests/conf/test-fsm.cfg
@@ -0,0 +1,346 @@
+# This is a test configuration.
+# It exercises some critical state machine transitions. Start it in debug
+# mode with syslogd listening to UDP socket, facility local0.
+
+########### test#0001: process_cli(), read or write error
+# setup :
+# server = (printf "HTTP/1.0 200 OK\r\n\r\n";cat /dev/zero) | nc -lp4000
+# client = (printf "GET / HTTP/1.0\r\n\r\n";cat) | nc 127.1 8001 >/dev/null
+# action : kill client during transfer (Ctrl-C)
+# result : both sides must close, and logs must report "CD" flags with
+# valid timers and counters. (note: high CPU usage expected)
+# example: 0/0/3/0/5293 200 76420076 - - CD--
+
+########### test#0002: process_cli(), read closed on client side first
+# setup :
+# server = (printf "HTTP/1.0 200 OK\r\n\r\n";cat /dev/zero) | nc -lp4000
+# client = (printf "GET / HTTP/1.0\r\n\r\n";cat) | nc 127.1 8001
+# action : end client output during transfer (Ctrl-D)
+# result : client exits, log is emitted immediately, server must be terminated
+# by hand. Logs indicate "----" with correct timers.
+# example: 0/0/3/0/5293 200 76420076 - - ----
+# note : test#0003 is triggered immediately after this test
+
+########### test3: process_cli(), read closed on server side first
+# setup :
+# server = (printf "HTTP/1.0 200 OK\r\n\r\n";cat) | nc -lp4000
+# client = (printf "GET / HTTP/1.0\r\n\r\n";cat) | nc 127.1 8001
+# action : end server output during transfer (Ctrl-D)
+# result : server exits, log is emitted immediately, client must be terminated
+# by hand. Logs indicate "----" with correct timers.
+# example: 0/0/3/0/5293 200 76420076 - - ----
+# note : test#0002 is triggered immediately after this test
+
+########### test4: process_cli(), read timeout on client side
+# setup :
+# server = (printf "HTTP/1.0 200 OK\r\n\r\n";cat) | nc -lp4000
+# client = (printf "GET / HTTP/1.0\r\n\r\n";cat) | nc 127.1 8001
+# action : wait at least 7 seconds and check the logs
+# result : log is emitted immediately, client and server must be terminated
+# by hand. Logs indicate "cD--" with correct timers.
+# example: 0/0/1/0/7006 200 19 - - cD--
+# note : test#0003 is triggered immediately after this test
+
+########### test5: ability to restart read after a request buffer full
+# setup :
+# server = (printf "HTTP/1.0 200 OK\r\n\r\n"; cat) | nc -lp4000
+# client = (printf "GET / HTTP/1.0\r\n\r\n";cat 8k.txt;cat) | nc 127.1 8001
+# action : enter data on the client, press enter, then Ctrl-D
+# result : data transferred to the server, client exits, log is emitted
+# immediately, server must be terminated by hand. Logs indicate
+# "----" with correct timers.
+# example: 0/0/0/0/3772 200 19 - - ----
+
+########### test6: ability to restart read after a request buffer partially full
+# setup :
+# server = (printf "HTTP/1.0 200 OK\r\n\r\n"; cat) | nc -lp4000
+# client = (printf "GET / HTTP/1.0\r\n\r\n";cat 7k.txt;cat) | nc 127.1 8001
+# action : enter data on the client, press enter, then Ctrl-D
+# result : data transferred to the server, client exits, log is emitted
+# immediately, server must be terminated by hand. Logs indicate
+# "----" with correct timers.
+# example: 0/0/0/0/3772 200 19 - - ----
+
+########### test7: ability to restart read after a response buffer full
+# setup :
+# server = (printf "HTTP/1.0 200 OK\r\n\r\n";cat 8k.txt; cat) | nc -lp4000
+# client = (printf "GET / HTTP/1.0\r\n\r\n";cat) | nc 127.1 8001
+# action : enter data on the server, press enter, then Ctrl-D
+# result : data transferred to the client, server exits, log is emitted
+# immediately, client must be terminated by hand. Logs indicate
+# "----" with correct timers.
+# example: 0/0/0/0/3087 200 8242 - - ----
+
+########### test8: ability to restart read after a response buffer partially full
+# setup :
+# server = (printf "HTTP/1.0 200 OK\r\n\r\n";cat 7k.txt; cat) | nc -lp4000
+# client = (printf "GET / HTTP/1.0\r\n\r\n";cat) | nc 127.1 8001
+# action : enter data on the server, press enter, then Ctrl-D
+# result : data transferred to the client, server exits, log is emitted
+# immediately, client must be terminated by hand. Logs indicate
+# "----" with correct timers.
+# example: 0/0/0/0/5412 200 7213 - - ----
+
+########### test9: process_cli(), read timeout on empty request
+# setup :
+# server = (printf "HTTP/1.0 200 OK\r\n\r\n";cat) | nc -lp4000
+# client = nc 127.1 8001
+# action : wait at least 5 seconds and check the logs
+# result : client returns 408, log is emitted immediately, server must be
+# terminated by hand. Logs indicate "cR--" with correct timers
+# and "<BADREQ>"
+# example: -1/-1/-1/-1/5000 408 212 - - cR--
+
+########### test10: process_cli(), read timeout on client headers
+# setup :
+# server = (printf "HTTP/1.0 200 OK\r\n\r\n";cat) | nc -lp4000
+# client = (printf "GET / HTTP/1.0\r\nTest: test\r\n";cat) | nc 127.1 8001
+# action : wait at least 5 seconds and check the logs
+# result : client returns 408, log is emitted immediately, both must be
+# terminated by hand. Logs indicate "cR--" with correct timers
+# and "<BADREQ>"
+# example: -1/-1/-1/-1/5004 408 212 - - cR--
+
+########### test11: process_cli(), read abort on empty request
+# setup :
+# server = (printf "HTTP/1.0 200 OK\r\n\r\n";cat) | nc -lp4000
+# client = echo -n | nc 127.1 8001
+# action : just check the logs after the client immediately returns
+# result : client returns 400, log is emitted immediately, server must be
+# terminated by hand. Logs indicate "CR--" with correct timers
+# and "<BADREQ>"
+# example: -1/-1/-1/-1/0 400 187 - - CR--
+
+########### test12: process_cli(), read abort on client headers
+# setup :
+# server = (printf "HTTP/1.0 200 OK\r\n\r\n";cat) | nc -lp4000
+# client = (printf "GET / HTTP/1.0\r\nTest: test\r\n") | nc 127.1 8001
+# action : just check the logs after the client immediately returns
+# result : client returns 400, log is emitted immediately, server must be
+# terminated by hand. Logs indicate "CR--" with correct timers
+# and "<BADREQ>"
+# example: -1/-1/-1/-1/0 400 187 - - CR--
+
+########### test13: process_srv(), read timeout on empty response
+# setup :
+# server = nc -lp4000
+# client = (printf "GET / HTTP/1.0\r\n\r\n"; cat) | nc 127.1 8001
+# action : wait 9 seconds and check response
+# result : client exits with 504, log is emitted immediately, client must be
+# terminated by hand. Logs indicate "sH--" with correct timers.
+# example: 0/0/0/-1/8002 504 194 - - sH--
+
+########### test14: process_srv(), closed client during response timeout
+# setup :
+# server = nc6 --half-close -lp4000
+# client = (printf "GET / HTTP/1.0\r\n\r\n"; sleep 1) | nc 127.1 8001
+# action : wait 9 seconds and check response
+# result : client exits with 504, log is emitted immediately, server exits
+# immediately. Logs indicate "sH--" with correct timers, which
+# is 8s regardless of the "sleep 1".
+# example: 0/0/0/-1/8002 504 194 - - sH--
+
+########### test15: process_srv(), client close not causing server close
+# setup :
+# server = nc6 -lp4000
+# client = (printf "GET / HTTP/1.0\r\n\r\n"; sleep 1) | nc 127.1 8001
+# action : wait 9 second and check response
+# result : client exits with 504, log is emitted immediately, server exits
+# immediately. Logs indicate "sH--" with correct timers, which
+# is 8s regardless of the "sleep 1".
+# example: 0/0/0/-1/8002 504 194 - - sH--
+
+########### test16: process_srv(), read timeout on server headers
+# setup :
+# server = (printf "HTTP/1.0 200 OK\r\nTest: test\r\n";cat) | nc -lp4000
+# client = (printf "GET / HTTP/1.0\r\n\r\n"; cat) | nc 127.1 8001
+# action : wait 8 seconds and check response
+# result : client exits with 504, log is emitted immediately, both must be
+# terminated by hand. Logs indicate "sH--" with correct timers.
+# example: 0/0/0/-1/8004 504 223 - - sH--
+
+########### test17: process_srv(), connection time-out
+# setup :
+# config = retries 1
+# server = none
+# client = (printf "GET / HTTP/1.0\r\n\r\n";cat) | nc 127.1 8002
+# action : wait at least 12 seconds and check the logs
+# result : client returns 503 and must be terminated by hand. Log is emitted
+# immediately. Logs indicate "sC--" with correct timers.
+# example: 0/0/-1/-1/12001 503 212 - - sC--
+
+########### test18: process_srv(), client close during connection time-out
+# setup :
+# config = retries 1
+# server = none
+# client = (printf "GET / HTTP/1.0\r\n\r\n";sleep 1) | nc 127.1 8002
+# action : wait at least 12 seconds and check the logs
+# result : client returns 503 and automatically closes. Log is emitted
+# immediately. Logs indicate "sC--" with correct timers.
+# example: 0/0/-1/-1/12001 503 212 - - sC--
+
+########### test19: process_srv(), immediate server close after empty response
+# setup :
+# server = echo -n | nc -lp4000
+# client = (printf "GET / HTTP/1.0\r\n\r\n") | nc 127.1 8001
+# action : just check logs after immediate return.
+# result : client and server exit with 502, log is emitted immediately. Logs
+# indicate "SH--" with correct timers.
+# example: 0/0/0/-1/0 502 204 - - SH--
+
+########### test20: process_srv(), immediate server close after incomplete headers
+# setup :
+# server = (printf "HTTP/1.0 200 OK\r\nTest: test\r\n") | nc -lp4000
+# client = (printf "GET / HTTP/1.0\r\n\r\n") | nc 127.1 8001
+# action : just check logs after immediate return.
+# result : client and server exit with 502, log is emitted immediately. Logs
+# indicate "SH--" with correct timers.
+# example: 0/0/0/-1/0 502 233 - - SH--
+
+########### test21: process_srv(), immediate server close after complete headers
+# setup :
+# server = (printf "HTTP/1.0 200 OK\r\nTest: test\r\n\r\n") | nc -lp4000
+# client = (printf "GET / HTTP/1.0\r\n\r\n") | nc 127.1 8001
+# action : just check logs after immediate return.
+# result : client and server exit with 200, log is emitted immediately. Logs
+# indicate "----" with correct timers.
+# example: 0/0/0/0/0 200 31 - - ----
+
+########### test22: process_srv(), timeout on request body
+# setup :
+# server = (printf "HTTP/1.0 200 OK\r\nTest: test\r\n\r\n") | nc -lp4000
+# client = (printf "POST / HTTP/1.0\r\nContent-length: 20\r\n\r\n";cat) | nc 127.1 8001
+# action : wait 7s for the request body to timeout.
+# result : The server receives the request and responds immediately with 200.
+# Log is emitted after the timeout occurs. Logs indicate "cD--" with correct timers.
+# example: 1/0/0/0/7004 200 31 - - cD--
+
+########### test23: process_srv(), client close on request body
+# setup :
+# server = (printf "HTTP/1.0 200 OK\r\nTest: test\r\n\r\n") | nc -lp4000
+# client = (printf "POST / HTTP/1.0\r\nContent-length: 20\r\n\r\n";cat) | nc 127.1 8001
+# action : wait 2s then press Ctrl-C on the client
+# result : The server immediately aborts and the logs are emitted immediately with a 400.
+# Logs indicate "CD--" with correct timers.
+# example: 1/0/0/0/1696 400 31 - - CD--
+
+########### test24 process_srv(), server close on request body
+# setup :
+# server = (printf "HTTP/1.0 200 OK\r\nTest: test\r\n\r\n") | nc -lp4000
+# client = (printf "POST / HTTP/1.0\r\nContent-length: 20\r\n\r\n";cat) | nc 127.1 8001
+# action : wait 2s then press Ctrl-C on the server and press enter a few times on the client
+# result : The logs are emitted immediately with a 200 (server's incomplete response).
+# Logs indicate "SD--" with correct timers. Client must be terminated by hand.
+# example: 1/0/0/0/2186 200 31 - - SD--
+
+########### test25: process_srv(), client timeout on request body when url_param is used
+# setup :
+# server = none
+# client = (printf "POST / HTTP/1.0\r\nContent-length: 20\r\n\r\n";cat) | nc 127.1 8003
+# action : wait 5s for the request body to timeout.
+# result : The client receives a 408 and closes. The log is emitted immediately.
+# Logs indicate "cD--" with correct timers.
+# example: 0/-1/-1/-1/5003 408 212 - - cD--
+
+########### test26: process_srv(), client abort on request body when url_param is used
+# setup :
+# server = none
+# client = (printf "POST / HTTP/1.0\r\nContent-length: 20\r\n\r\n";cat) | nc 127.1 8003
+# action : wait 2s then press Ctrl-C on the client
+# result : The logs are emitted immediately with a 400.
+# Logs indicate "CD--" with correct timers.
+# example: 594/-1/-1/-1/594 400 187 - - CD--
+
+
+
+global
+ maxconn 100
+ log 127.0.0.1 local0
+
+# connect to port 8000 to consult statistics
+listen stats
+ timeout client 5s
+ mode http
+ bind :8000
+ balance
+ stats uri /stat
+
+# connect port 8001 to localhost:4000
+listen frt8001
+ log global
+ bind :8001
+ mode http
+ option httplog
+ maxconn 100
+
+ timeout http-request 5s
+ timeout connect 6s
+ timeout client 7s
+ timeout server 8s
+ timeout queue 9s
+
+ balance roundrobin
+ server srv4000 127.0.0.1:4000
+
+# connect port 8002 to nowhere
+listen frt8002
+ log global
+ bind :8002
+ mode http
+ option httplog
+ maxconn 100
+
+ timeout http-request 5s
+ timeout connect 6s
+ timeout client 7s
+ timeout server 8s
+ timeout queue 9s
+ retries 1
+
+ balance url_param foo check_post
+ server srv4000 192.168.255.255:4000
+
+# connect port 8003 to localhost:4000 with url_param
+listen frt8003
+ log global
+ bind :8003
+ mode http
+ option httplog
+ maxconn 100
+
+ timeout http-request 5s
+ timeout connect 6s
+ timeout client 7s
+ timeout server 8s
+ timeout queue 9s
+
+ balance url_param foo check_post
+ server srv4000 127.0.0.1:4000
+
+
+# listen frt8002
+# log global
+# bind :8002
+# mode http
+# option httplog
+# maxconn 100
+#
+# timeout http-request 5s
+# timeout connect 6s
+# timeout client 7s
+# timeout server 8s
+# timeout queue 9s
+#
+# #tcp-request inspect-delay 4s
+# acl white_list src 127.0.0.2
+#
+# tcp-request content accept if white_list
+# tcp-request content reject if !REQ_CONTENT
+#
+# balance url_param foo check_post
+# #balance url_param foo #check_post
+# server srv4000 127.0.0.1:4000
+#
+# # control activity this way
+# stats uri /stat
+#
diff --git a/tests/conf/test-fwlc.cfg b/tests/conf/test-fwlc.cfg
new file mode 100644
index 0000000..4368294
--- /dev/null
+++ b/tests/conf/test-fwlc.cfg
@@ -0,0 +1,61 @@
+# This is a test configuration.
+# It makes use of a farm built from 4 active servers and 4 backup servers,
+# all listenening to different IP addresses on port 80. Health-checks are
+# TCP only on port 81 so that iptables rules permit easy selection of which
+# servers are enabled or disabled.
+#
+# Create statistics counters this way :
+#
+# iptables -N http
+# iptables -A OUTPUT -p tcp --syn --dport 80 -j http
+# for i in $(seq 1 8); do iptables -A http -d 127.0.0.$i; done
+# iptables -A http -d 127.0.0.0/24
+#
+# Consult the statistics using iptables this way:
+#
+# iptables --line-numbers -nxvL http
+# iptables -Z http
+#
+#
+# Block individual servers like this :
+# iptables -I INPUT -p tcp --dport 81 -d 127.0.0.1 -j DROP
+#
+
+global
+ maxconn 1000
+ stats socket /tmp/sock1 mode 600
+ stats timeout 3000
+ stats maxconn 2000
+
+listen sample1
+ mode tcp
+ retries 1
+ option redispatch
+ timeout connect 1000
+ timeout client 120000
+ timeout server 120000
+ maxconn 40000
+ bind :8080
+ balance leastconn
+ option allbackups
+ server act1 127.0.0.1:80 weight 10 maxconn 200 check inter 1000 fall 1
+ server act2 127.0.0.2:80 weight 20 maxconn 200 check inter 1000 fall 1
+ server act3 127.0.0.3:80 weight 30 maxconn 200 check inter 1000 fall 1
+ server act4 127.0.0.4:80 weight 40 maxconn 200 check inter 1000 fall 1
+ server bck1 127.0.0.5:80 weight 10 check inter 1000 fall 1 backup
+ server bck2 127.0.0.6:80 weight 20 check inter 1000 fall 1 backup
+ server bck3 127.0.0.7:80 weight 30 check inter 1000 fall 1 backup
+ server bck4 127.0.0.8:80 weight 40 check inter 1000 fall 1 backup
+ option httpclose
+
+listen sample2
+ mode http
+ timeout connect 1000
+ timeout client 50000
+ timeout server 50000
+ maxconn 40000
+ bind :8081
+ balance leastconn
+ option httpclose
+ stats uri /stats
+ stats refresh 5
diff --git a/tests/conf/test-fwrr.cfg b/tests/conf/test-fwrr.cfg
new file mode 100644
index 0000000..ed283ca
--- /dev/null
+++ b/tests/conf/test-fwrr.cfg
@@ -0,0 +1,51 @@
+# This is a test configuration.
+# It makes use of a farm built from 4 active servers and 4 backup servers,
+# all listenening to different IP addresses on port 80. Health-checks are
+# TCP only on port 81 so that iptables rules permit easy selection of which
+# servers are enabled or disabled.
+#
+# Create statistics counters this way :
+#
+# iptables -N http
+# iptables -A OUTPUT -p tcp --syn --dport 80 -j http
+# for i in $(seq 1 8); do iptables -A http -d 127.0.0.$i; done
+# iptables -A http -d 127.0.0.0/24
+#
+# Consult the statistics using iptables this way:
+#
+# iptables --line-numbers -nxvL http
+# iptables -Z http
+#
+#
+# Block individual servers like this :
+# iptables -I INPUT -p tcp --dport 81 -d 127.0.0.1 -j DROP
+#
+
+global
+ maxconn 1000
+ stats socket /tmp/sock1 mode 600
+ stats timeout 3000
+ stats maxconn 2000
+
+listen sample1
+ mode http
+ retries 1
+ option redispatch
+ timeout connect 1000
+ timeout client 5000
+ timeout server 5000
+ maxconn 40000
+ bind :8080
+ balance roundrobin
+ option allbackups
+ server act1 127.0.0.1:80 weight 10 check port 81 inter 1000 fall 1
+ server act2 127.0.0.2:80 weight 20 check port 81 inter 1000 fall 1
+ server act3 127.0.0.3:80 weight 30 check port 81 inter 1000 fall 1
+ server act4 127.0.0.4:80 weight 40 check port 81 inter 1000 fall 1
+ server bck1 127.0.0.5:80 weight 10 check port 81 inter 1000 fall 1 backup
+ server bck2 127.0.0.6:80 weight 20 check port 81 inter 1000 fall 1 backup
+ server bck3 127.0.0.7:80 weight 30 check port 81 inter 1000 fall 1 backup
+ server bck4 127.0.0.8:80 weight 40 check port 81 inter 1000 fall 1 backup
+ option httpclose
+ stats uri /stats
+ stats refresh 5
diff --git a/tests/conf/test-handshakes-chk.cfg b/tests/conf/test-handshakes-chk.cfg
new file mode 100644
index 0000000..51aa7fb
--- /dev/null
+++ b/tests/conf/test-handshakes-chk.cfg
@@ -0,0 +1,148 @@
+# This config tries to involve the various possible combinations of connection
+# handshakes, on the accept side and on the connect side. It also produces logs
+# indicating the handshake time.
+#
+# May be tested with tcploop as the server, both for TCP and HTTP mode :
+# - accept new connection
+# - pause 100ms
+# - send what looks like an HTTP response
+# - wait 500ms and close
+#
+# Starting log server (mainly to check timers) :
+# $ socat udp-recvfrom:5514,fork -
+#
+# Starting server :
+# $ tcploop 8000 L N A W P100 S:"HTTP/1.0 200 OK\r\nConnection: close\r\n\r\n" P500
+#
+# Testing all combinations with server-speaks-first (tcp) :
+# $ nc 0 8007
+#
+# Testing all combinations with client-speaks-first (tcp) :
+# $ (printf "GET / HTTP/1.0\r\n\r\n";sleep 1) | nc 0 8007
+#
+# Testing all combinations with client-speaks-first after pause (tcp) :
+# $ (usleep 0.05 ; printf "GET / HTTP/1.0\r\n\r\n";sleep 1) | nc 0 8007
+#
+# Testing all combinations with client-speaks-first (http) :
+# $ (printf "GET / HTTP/1.0\r\n\r\n";sleep 1) | nc 0 8017
+#
+# Testing all combinations with client-speaks-first after pause (http) :
+# $ (usleep 0.05 ; printf "GET / HTTP/1.0\r\n\r\n";sleep 1) | nc 0 8017
+#
+# Same tests must be redone after surrounding connect() in tcp_connect_server()
+# with fcntl(fd, F_SETFL, 0) and fcntl(fd, F_SETFL, O_NONBLOCK) for sycnhronous
+# connect().
+
+global
+ stats socket /tmp/sock1 level admin
+ stats timeout 1h
+ ssl-server-verify none
+ tune.ssl.default-dh-param 2048
+ log 127.0.0.1:5514 local0 debug debug
+
+#################################################################
+## stats instance (8181)
+#################################################################
+
+listen stats
+ bind :8181
+ timeout client 5s
+ timeout server 4s
+ timeout connect 3s
+ mode http
+ stats uri /
+
+#################################################################
+## TCP instances connect to port 8000 and listen to 8001..8007
+#################################################################
+defaults TCP
+ timeout client 5s
+ timeout server 4s
+ timeout connect 3s
+ log global
+ log-format "%ci:%cp %ft %b/%s h=%Th/w=%Tw/c=%Tc/t=%Tt %ST %B %tsc %ac/%fc/%bc/%sc/%rc %sq/%bq %hr %hs"
+
+# connects to port local 8000
+listen tcp-none-in-none-out
+ bind :8001
+ server s 127.0.0.1:8000 check
+
+# takes ssl+pp on input, nothing on output
+listen tcp-sslpp-in-none-out
+ bind :8002 ssl crt rsa2048.pem accept-proxy
+ server s 127.0.0.1:8001 check
+
+# takes nothing on input, sends ssl+pp
+listen tcp-none-in-sslpp-out
+ bind :8003
+ server s 127.0.0.1:8002 check ssl send-proxy-v2
+
+# takes pp on input, nothing on output
+listen tcp-pp-in-none-out
+ bind :8004 accept-proxy
+ server s 127.0.0.1:8003 check
+
+# takes nothing on input, sends pp
+listen tcp-none-in-pp-out
+ bind :8005
+ server s 127.0.0.1:8004 check send-proxy-v2
+
+# takes ssl on input, sends nothing
+listen tcp-ssl-in-none-out
+ bind :8006 ssl crt rsa2048.pem
+ server s 127.0.0.1:8005 check
+
+# takes nothing on input, sends ssl
+listen tcp-none-in-ssl-out
+ bind :8007
+ server s 127.0.0.1:8006 check ssl
+
+
+#################################################################
+## HTTP instances also connect to port 8000 but they listen to
+## 8011..8017
+#################################################################
+
+defaults HTTP
+ timeout client 5s
+ timeout server 4s
+ timeout connect 3s
+ mode http
+ log global
+ log-format "%ci:%cp [%tr] %ft %b/%s h=%Th/i=%Ti/R=%TR/w=%Tw/c=%Tc/r=%Tr/a=%Ta/t=%Tt %ST %B %CC %CS %tsc %ac/%fc/%bc/%sc/%rc %sq/%bq %hr %hs %{+Q}r"
+
+# connects to port local 8000
+listen http-none-in-none-out
+ bind :8011
+ server s 127.0.0.1:8000 check
+
+# takes ssl+pp on input, nothing on output
+listen http-sslpp-in-none-out
+ bind :8012 ssl crt rsa2048.pem accept-proxy
+ server s 127.0.0.1:8011 check
+
+# takes nothing on input, sends ssl+pp
+listen http-none-in-sslpp-out
+ bind :8013
+ server s 127.0.0.1:8012 check ssl send-proxy-v2
+
+# takes pp on input, nothing on output
+listen http-pp-in-none-out
+ bind :8014 accept-proxy
+ server s 127.0.0.1:8013 check
+
+# takes nothing on input, sends pp
+listen http-none-in-pp-out
+ bind :8015
+ server s 127.0.0.1:8014 check send-proxy-v2
+
+# takes ssl on input, sends nothing
+listen http-ssl-in-none-out
+ bind :8016 ssl crt rsa2048.pem
+ server s 127.0.0.1:8015 check
+
+# takes nothing on input, sends ssl
+listen http-none-in-ssl-out
+ bind :8017
+ server s 127.0.0.1:8016 check ssl
+
diff --git a/tests/conf/test-handshakes.cfg b/tests/conf/test-handshakes.cfg
new file mode 100644
index 0000000..247b38b
--- /dev/null
+++ b/tests/conf/test-handshakes.cfg
@@ -0,0 +1,135 @@
+# This config tries to involve the various possible combinations of connection
+# handshakes, on the accept side and on the connect side. It also produces logs
+# indicating the handshake time.
+#
+# May be tested with tcploop as the server, both for TCP and HTTP mode :
+# - accept new connection
+# - pause 100ms
+# - send what looks like an HTTP response
+# - wait 500ms and close
+#
+# Starting log server (mainly to check timers) :
+# $ socat udp-recvfrom:5514,fork -
+#
+# Starting server :
+# $ tcploop 8000 L N A W P100 S:"HTTP/1.0 200 OK\r\nConnection: close\r\n\r\n" P500
+#
+# Testing all combinations with server-speaks-first (tcp) :
+# $ nc 0 8007
+#
+# Testing all combinations with client-speaks-first (tcp) :
+# $ (printf "GET / HTTP/1.0\r\n\r\n";sleep 1) | nc 0 8007
+#
+# Testing all combinations with client-speaks-first after pause (tcp) :
+# $ (usleep 0.05 ; printf "GET / HTTP/1.0\r\n\r\n";sleep 1) | nc 0 8007
+#
+# Testing all combinations with client-speaks-first (http) :
+# $ (printf "GET / HTTP/1.0\r\n\r\n";sleep 1) | nc 0 8017
+#
+# Testing all combinations with client-speaks-first after pause (http) :
+# $ (usleep 0.05 ; printf "GET / HTTP/1.0\r\n\r\n";sleep 1) | nc 0 8017
+#
+# Same tests must be redone after surrounding connect() in tcp_connect_server()
+# with fcntl(fd, F_SETFL, 0) and fcntl(fd, F_SETFL, O_NONBLOCK) for sycnhronous
+# connect().
+
+global
+ stats socket /tmp/sock1 level admin
+ stats timeout 1h
+ ssl-server-verify none
+ tune.ssl.default-dh-param 2048
+ log 127.0.0.1:5514 local0 debug debug
+
+#################################################################
+## TCP instances connect to port 8000 and listen to 8001..8007
+#################################################################
+defaults TCP
+ timeout client 5s
+ timeout server 4s
+ timeout connect 3s
+ log global
+ log-format "%ci:%cp %ft %b/%s h=%Th/w=%Tw/c=%Tc/t=%Tt %ST %B %tsc %ac/%fc/%bc/%sc/%rc %sq/%bq %hr %hs"
+
+# connects to port local 8000
+listen tcp-none-in-none-out
+ bind :8001
+ server s 127.0.0.1:8000
+
+# takes ssl+pp on input, nothing on output
+listen tcp-sslpp-in-none-out
+ bind :8002 ssl crt rsa2048.pem accept-proxy
+ server s 127.0.0.1:8001
+
+# takes nothing on input, sends ssl+pp
+listen tcp-none-in-sslpp-out
+ bind :8003
+ server s 127.0.0.1:8002 ssl send-proxy-v2
+
+# takes pp on input, nothing on output
+listen tcp-pp-in-none-out
+ bind :8004 accept-proxy
+ server s 127.0.0.1:8003
+
+# takes nothing on input, sends pp
+listen tcp-none-in-pp-out
+ bind :8005
+ server s 127.0.0.1:8004 send-proxy-v2
+
+# takes ssl on input, sends nothing
+listen tcp-ssl-in-none-out
+ bind :8006 ssl crt rsa2048.pem
+ server s 127.0.0.1:8005
+
+# takes nothing on input, sends ssl
+listen tcp-none-in-ssl-out
+ bind :8007
+ server s 127.0.0.1:8006 ssl
+
+
+#################################################################
+## HTTP instances also connect to port 8000 but they listen to
+## 8011..8017
+#################################################################
+
+defaults HTTP
+ timeout client 5s
+ timeout server 4s
+ timeout connect 3s
+ mode http
+ log global
+ log-format "%ci:%cp [%tr] %ft %b/%s h=%Th/i=%Ti/R=%TR/w=%Tw/c=%Tc/r=%Tr/a=%Ta/t=%Tt %ST %B %CC %CS %tsc %ac/%fc/%bc/%sc/%rc %sq/%bq %hr %hs %{+Q}r"
+
+# connects to port local 8000
+listen http-none-in-none-out
+ bind :8011
+ server s 127.0.0.1:8000
+
+# takes ssl+pp on input, nothing on output
+listen http-sslpp-in-none-out
+ bind :8012 ssl crt rsa2048.pem accept-proxy
+ server s 127.0.0.1:8011
+
+# takes nothing on input, sends ssl+pp
+listen http-none-in-sslpp-out
+ bind :8013
+ server s 127.0.0.1:8012 ssl send-proxy-v2
+
+# takes pp on input, nothing on output
+listen http-pp-in-none-out
+ bind :8014 accept-proxy
+ server s 127.0.0.1:8013
+
+# takes nothing on input, sends pp
+listen http-none-in-pp-out
+ bind :8015
+ server s 127.0.0.1:8014 send-proxy-v2
+
+# takes ssl on input, sends nothing
+listen http-ssl-in-none-out
+ bind :8016 ssl crt rsa2048.pem
+ server s 127.0.0.1:8015
+
+# takes nothing on input, sends ssl
+listen http-none-in-ssl-out
+ bind :8017
+ server s 127.0.0.1:8016 ssl
diff --git a/tests/conf/test-http-send-name-hdr.cfg b/tests/conf/test-http-send-name-hdr.cfg
new file mode 100644
index 0000000..f424dfe
--- /dev/null
+++ b/tests/conf/test-http-send-name-hdr.cfg
@@ -0,0 +1,33 @@
+# Test Rewriting Host header
+global
+ maxconn 100
+
+defaults
+ mode http
+ timeout client 10000
+ timeout server 10000
+ timeout connect 10000
+ balance roundrobin
+
+listen send-name-silo-id
+ bind :8001
+
+ # Set the test conditions: Add a new header
+ http-send-name-header X-Silo-Id
+ server srv-silo1 127.0.0.1:8080
+
+ # Add headers containing the correct values for test verification
+ http-request add-header X-test-server-name-header X-Silo-Id
+ http-request add-header X-test-server-name-value srv-silo1
+
+listen send-name-host
+ bind :8002
+
+ # Set the test conditions: Replace an existing header
+ http-send-name-header host
+ server srv-host 127.0.0.1:8080
+
+ # Add headers containing the correct values for test verification
+ http-request add-header X-test-server-name-header Host
+ http-request add-header X-test-server-name-value srv-host
+
diff --git a/tests/conf/test-http-set-status-lua.cfg b/tests/conf/test-http-set-status-lua.cfg
new file mode 100644
index 0000000..485a1ee
--- /dev/null
+++ b/tests/conf/test-http-set-status-lua.cfg
@@ -0,0 +1,31 @@
+global
+ maxconn 100
+ lua-load setstatus.lua
+
+defaults
+ mode http
+ timeout client 10000
+ timeout server 10000
+ timeout connect 10000
+
+# Expect HTTP/1.1 418 I'm a teapot
+listen lua-service-set-status-defaultreason
+ bind :8003
+ http-request use-service lua.http418-default
+
+# Expect HTTP/1.1 418 I'm a coffeepot
+listen lua-service-set-status-customreason
+ bind :8004
+ http-request use-service lua.http418-coffeepot
+
+# Expect HTTP/1.1 418 I'm a teapot
+listen lua-action-set-status-defaultreason
+ bind :8005
+ http-response lua.set-status-418-defaultreason
+ server host 127.0.0.1:8080
+
+# Expect HTTP/1.1 418 I'm a coffeepot
+listen lua-action-set-status-customreason
+ bind :8006
+ http-response lua.set-status-418-customreason
+ server host 127.0.0.1:8080
diff --git a/tests/conf/test-http-set-status.cfg b/tests/conf/test-http-set-status.cfg
new file mode 100644
index 0000000..0c66b16
--- /dev/null
+++ b/tests/conf/test-http-set-status.cfg
@@ -0,0 +1,32 @@
+global
+ maxconn 100
+
+defaults
+ mode http
+ timeout client 10000
+ timeout server 10000
+ timeout connect 10000
+
+# Expect HTTP/1.1 418 I'm a teapot
+listen http-response-set-status-defaultreason
+ bind :8001
+ server host 127.0.0.1:8080
+ http-response set-status 418
+
+# Expect HTTP/1.1 418 I'm a coffeepot
+listen http-response-set-status-customreason
+ bind :8002
+ server host 127.0.0.1:8080
+ http-response set-status 418 reason "I'm a coffeepot"
+
+# Expect config parse fail
+#listen parse-fail-string
+# bind :8002
+# server host 127.0.0.1:8080
+# http-response set-status 418 reason
+
+# Expect config parse fail
+#listen parse-fail-keyword
+# bind :8002
+# server host 127.0.0.1:8080
+# http-response set-status 418 "Missing reason keyword"
diff --git a/tests/conf/test-inspect-smtp.cfg b/tests/conf/test-inspect-smtp.cfg
new file mode 100644
index 0000000..5cfc864
--- /dev/null
+++ b/tests/conf/test-inspect-smtp.cfg
@@ -0,0 +1,44 @@
+# This is a test configuration. It listens on port 8025, waits for an incoming
+# connection, and applies the following rules :
+# - if the address is in the white list, then accept it and forward the
+# connection to the server (local port 25)
+# - if the address is in the black list, then immediately drop it
+# - otherwise, wait up to 35 seconds. If the client talks during this time,
+# drop the connection.
+# - then accept the connection if it passes all the tests.
+#
+# Note that the rules are evaluated at every new chunk of data read, and at
+# delay expiration. Rules which apply to incomplete data don't match as long
+# as the timer has not expired.
+
+listen block-fake-mailers
+ log 127.0.0.1:514 local0
+ option tcplog
+
+ mode tcp
+ bind :8025
+ timeout client 60s
+ timeout server 60s
+ timeout queue 60s
+ timeout connect 5s
+
+ tcp-request inspect-delay 35s
+
+ acl white_list src 127.0.0.2
+ acl black_fast src 127.0.0.3 # those ones are immediately rejected
+ acl black_slow src 127.0.0.4 # those ones are rejected after a delay
+
+ tcp-request content accept if white_list
+ tcp-request content reject if black_fast
+ tcp-request content reject if black_slow WAIT_END
+ tcp-request content reject if REQ_CONTENT
+ # note that it is possible to wait for the end of the analysis period
+ # before rejecting undesired contents
+ # tcp-request content reject if REQ_CONTENT WAIT_END
+
+ # on Linux+transparent proxy patch, it's useful to reuse the client'IP
+ # source 0.0.0.0 usesrc clientip
+
+ balance roundrobin
+ server mail 127.0.0.1:25
+
diff --git a/tests/conf/test-inspect-ssl.cfg b/tests/conf/test-inspect-ssl.cfg
new file mode 100644
index 0000000..582d1a2
--- /dev/null
+++ b/tests/conf/test-inspect-ssl.cfg
@@ -0,0 +1,37 @@
+# This is a test configuration. It listens on port 8443, waits for an incoming
+# connection, and applies the following rules :
+# - if the address is in the white list, then accept it and forward the
+# connection to the server (local port 443)
+# - if the address is in the black list, then immediately drop it
+# - otherwise, wait up to 3 seconds for valid SSL data to come in. If those
+# data are identified as SSL, the connection is immediately accepted, and
+# if they are definitely identified as non-SSL, the connection is rejected,
+# which will happen upon timeout if they still don't match SSL.
+
+listen block-non-ssl
+ log 127.0.0.1:514 local0
+ option tcplog
+
+ mode tcp
+ bind :8443
+ timeout client 6s
+ timeout server 6s
+ timeout connect 6s
+
+ tcp-request inspect-delay 4s
+
+ acl white_list src 127.0.0.2
+ acl black_list src 127.0.0.3
+
+ # note: SSLv2 is not used anymore, SSLv3.1 is TLSv1.
+ acl obsolete_ssl req_ssl_ver lt 3
+ acl correct_ssl req_ssl_ver 3.0-3.1
+ acl invalid_ssl req_ssl_ver gt 3.1
+
+ tcp-request content accept if white_list
+ tcp-request content reject if black_list
+ tcp-request content reject if !correct_ssl
+
+ balance roundrobin
+ server srv1 127.0.0.1:443
+
diff --git a/tests/conf/test-map-ports.cfg b/tests/conf/test-map-ports.cfg
new file mode 100644
index 0000000..15a19b1
--- /dev/null
+++ b/tests/conf/test-map-ports.cfg
@@ -0,0 +1,31 @@
+# This is a test configuration.
+# It presents 4 instances using fixed and relative port assignments from
+# ports 8001 to 8004. TCP only is used, and the destination address is not
+# relevant (use netstat -an).
+
+global
+ maxconn 100
+
+defaults
+ mode tcp
+ timeout client 15000
+ timeout server 15000
+ timeout connect 15000
+ balance roundrobin
+
+listen fixed
+ bind :8001
+ server s1 1.1.1.1:8001
+
+listen same
+ bind :8002
+ server s2 1.1.1.2
+
+listen plus1000
+ bind :8003
+ server s3 1.1.1.3:+1000
+
+listen minus1000
+ bind :8004
+ server s4 1.1.1.4:-1000
+
diff --git a/tests/conf/test-param-hash.cfg b/tests/conf/test-param-hash.cfg
new file mode 100644
index 0000000..f976a12
--- /dev/null
+++ b/tests/conf/test-param-hash.cfg
@@ -0,0 +1,23 @@
+# This is a test configuration.
+# It exercises the "url_param" balance algorithm. It looks for
+# an URL parameter named "foo".
+
+global
+ maxconn 100
+ log 127.0.0.1 local0
+
+listen vip1
+ log global
+ bind :8000
+ mode http
+ maxconn 100
+ timeout client 5000
+ timeout connect 5000
+ timeout server 5000
+ balance url_param foo
+ server srv1 127.0.0.1:80
+ server srv2 127.0.0.1:80
+
+ # control activity this way
+ stats uri /stat
+
diff --git a/tests/conf/test-pollers.cfg b/tests/conf/test-pollers.cfg
new file mode 100644
index 0000000..1771715
--- /dev/null
+++ b/tests/conf/test-pollers.cfg
@@ -0,0 +1,15 @@
+# This is the smallest possible configuration. It does not
+# bind to any port, and is enough to check the polling
+# system in use without disturbing any running process.
+#
+# To be used that way: haproxy -V -f test-pollers.cfg
+
+global
+ #nosepoll
+ #noepoll
+ #nopoll
+
+# fake backend to pass the config checks
+backend dummy
+ balance
+
diff --git a/tests/conf/test-redirect.cfg b/tests/conf/test-redirect.cfg
new file mode 100644
index 0000000..582a069
--- /dev/null
+++ b/tests/conf/test-redirect.cfg
@@ -0,0 +1,49 @@
+# This is a test configuration.
+# It is used to check the redirect keyword.
+
+global
+ maxconn 400
+ stats timeout 3s
+
+listen sample1
+ mode http
+ retries 1
+ option redispatch
+ timeout client 1m
+ timeout connect 5s
+ timeout server 1m
+ maxconn 400
+ bind :8000
+
+ acl url_test1 url_reg test1
+ acl url_test2 url_reg test2
+ acl url_test3 url_reg test3
+ acl url_test4 url_reg test4
+
+ acl seen hdr_sub(cookie) SEEN=1
+
+ redirect location /abs/test code 301 if url_test1
+ redirect prefix /pfx/test code 302 if url_test2
+ redirect prefix /pfx/test code 303 drop-query if url_test3
+
+ redirect prefix / code 302 set-cookie SEEN=1 if url_test4 !seen
+ redirect location / code 302 clear-cookie SEEN= if url_test4 seen
+
+ ### unconditional redirection
+ #redirect location https://example.com/ if TRUE
+
+ ### parser must detect invalid syntaxes below
+ #redirect
+ #redirect blah
+ #redirect location
+ #redirect location /abs/test
+ #redirect location /abs/test code
+ #redirect location /abs/test code 300
+ #redirect location /abs/test code 301
+ #redirect location /abs/test code 304
+
+ balance roundrobin
+ server act1 127.0.0.1:80 weight 10
+ option httpclose
+ stats uri /stats
+ stats refresh 5000ms
diff --git a/tests/conf/test-sample-fetch-args.cfg b/tests/conf/test-sample-fetch-args.cfg
new file mode 100644
index 0000000..cb0be09
--- /dev/null
+++ b/tests/conf/test-sample-fetch-args.cfg
@@ -0,0 +1,36 @@
+# This config file aims to trigger all error detection cases in the sample
+# fetch expression parser related to the fetch arguments.
+
+# silence some warnings
+defaults
+ mode http
+ timeout client 1s
+ timeout server 1s
+ timeout connect 1s
+
+frontend 1
+ bind :10000
+
+ # missing fetch method
+ http-request add-header name %[(arg)]
+
+ # unknown fetch method 'blah'
+ http-request add-header name %[blah(arg)]
+
+ # missing closing ')' after arguments to fetch keyword 'req.hdr'
+ http-request add-header name %[req.hdr(]
+
+ # cannot be triggered : "returns type of fetch method '%s' is unknown"
+
+ # fetch method 'always_true' : no argument supported, but got 'arg'
+ http-request add-header name %[always_true(arg)]
+
+ # fetch method 'req.hdr' : failed to parse 'a' as 'signed integer' at position 2
+ http-request add-header name %[req.hdr(a,a)]
+
+ # invalid args in fetch method 'payload_lv' : payload length must be > 0
+ http-request add-header name %[payload_lv(0,0)]
+
+ # fetch method 'payload_lv' : expected type 'unsigned integer' at position 1, but got nothing
+ http-request add-header name %[payload_lv]
+
diff --git a/tests/conf/test-sample-fetch-conv.cfg b/tests/conf/test-sample-fetch-conv.cfg
new file mode 100644
index 0000000..50eda5d
--- /dev/null
+++ b/tests/conf/test-sample-fetch-conv.cfg
@@ -0,0 +1,42 @@
+# This config file aims to trigger all error detection cases in the sample
+# fetch expression parser related to the sample converters.
+
+# silence some warnings
+defaults
+ mode http
+ timeout client 1s
+ timeout server 1s
+ timeout connect 1s
+
+frontend 1
+ bind :10000
+
+ # report "missing comma after fetch keyword %s"
+ http-request add-header name %[hdr(arg))]
+
+ # report "missing comma after conv keyword %s"
+ http-request add-header name %[hdr(arg),ipmask(2))]
+
+ # report "unknown conv method '%s'"
+ http-request add-header name %[hdr(arg),blah]
+
+ # report "syntax error: missing ')' after conv keyword '%s'"
+ http-request add-header name %[hdr(arg),ipmask(2]
+
+ # no way to report "returns type of conv method '%s' is unknown"
+
+ # "conv method '%s' cannot be applied"
+ http-request add-header name %[wait_end,ipmask(2)]
+
+ # "conv method '%s' does not support any args"
+ http-request add-header name %[hdr(arg),upper()]
+
+ # "invalid arg %d in conv method '%s' : %s"
+ http-request add-header name %[hdr(arg),ipmask(a)]
+
+ # "invalid args in conv method '%s' : %s"
+ http-request add-header name %[hdr(arg),map()]
+
+ # "missing args for conv method '%s'"
+ http-request add-header name %[hdr(arg),ipmask]
+
diff --git a/tests/conf/test-sql.cfg b/tests/conf/test-sql.cfg
new file mode 100644
index 0000000..16eb5ec
--- /dev/null
+++ b/tests/conf/test-sql.cfg
@@ -0,0 +1,29 @@
+# This is a test configuration.
+# It requires a mysql server running on local port 3306.
+
+global
+ maxconn 500
+
+defaults
+ timeout connect 1000
+ timeout client 5000
+ timeout server 5000
+ retries 1
+ option redispatch
+
+listen stats
+ bind :8080
+ mode http
+ stats enable
+ stats uri /stats
+
+listen mysql_1
+ bind :3307
+ mode tcp
+ balance roundrobin
+ option mysql-check user haproxy
+ server srv1 127.0.0.1:3306 check port 3306 inter 1000 fall 1
+# server srv2 127.0.0.2:3306 check port 3306 inter 1000 fall 1
+# server srv3 127.0.0.3:3306 check port 3306 inter 1000 fall 1
+# server srv4 127.0.0.4:3306 check port 3306 inter 1000 fall 1
+
diff --git a/tests/conf/test-srv-verify.cfg b/tests/conf/test-srv-verify.cfg
new file mode 100644
index 0000000..e3ccc83
--- /dev/null
+++ b/tests/conf/test-srv-verify.cfg
@@ -0,0 +1,57 @@
+global
+ maxconn 490
+ stats socket /tmp/sock1 mode 666 level admin
+ stats timeout 10m
+ ssl-server-verify none
+ tune.ssl.default-dh-param 1024
+ log /dev/log local0 debug info
+
+defaults
+ mode http
+ log global
+ option httplog
+ option dontlognull
+ timeout connect 5s
+ timeout http-keep-alive 15s
+ timeout http-request 15s
+ timeout queue 30s
+ timeout tarpit 1m
+ timeout tunnel 300s
+ timeout client 30s
+ timeout server 60s
+
+listen 1
+ bind :8001
+
+ # passes checks and traffic (no hostname check)
+ # server ssl 127.0.0.1:8443 ssl verify required check inter 500 ca-file rsa2048.pem
+
+ # passes checks and traffic (localhost is what the server presents)
+ # server ssl 127.0.0.1:8443 ssl verify required check inter 500 ca-file rsa2048.pem verifyhost localhost
+
+ # fails checks and traffic (foo not matched on the server)
+ # server ssl 127.0.0.1:8443 ssl verify required check inter 500 ca-file rsa2048.pem verifyhost foo
+
+ # passes checks and traffic (verify none ignores the host)
+ # server ssl 127.0.0.1:8443 ssl verify none check inter 500 ca-file rsa2048.pem verifyhost foo
+
+ # passes checks and traffic (localhost is fine)
+ # server ssl 127.0.0.1:8443 ssl verify required check inter 500 ca-file rsa2048.pem sni str(localhost) verifyhost localhost
+
+ # passes checks and traffic (verifyhost overrides sni)
+ # server ssl 127.0.0.1:8443 ssl verify required check inter 500 ca-file rsa2048.pem sni str(foo) verifyhost localhost
+
+ # passes checks and traffic (localhost always valid)
+ # server ssl 127.0.0.1:8443 ssl verify required check inter 500 ca-file rsa2048.pem sni str(localhost)
+
+ # passes checks, and traffic without host or with "host: localhost" and fails other hosts.
+ server ssl 127.0.0.1:8443 ssl verify required check inter 500 ca-file rsa2048.pem sni req.hdr(host)
+
+ # just for tests
+ #server clear 127.0.0.1:8480
+
+listen 2
+ bind :8480
+ bind :8443 ssl crt rsa2048.pem
+ stats uri /
+
diff --git a/tests/conf/test-stats.cfg b/tests/conf/test-stats.cfg
new file mode 100644
index 0000000..99c9f94
--- /dev/null
+++ b/tests/conf/test-stats.cfg
@@ -0,0 +1,5045 @@
+# This is a test configuration.
+# It will provide a stats page on port 8181, with 5000 servers checked on
+# the loopback interface at random intervals. The stats page will reload
+# every 5 seconds. The goal of this test is to check that the scheduler
+# behaves correctly. In order to test it, simply fire tcpdump and select one
+# of the servers to ensure that the interval is regular and exactly
+# equal to the <inter> parameter. Eg:
+#
+# sudo tcpdump -tttnpi lo "tcp[13]&2!=0" and host 127.0.0.2
+
+
+global
+ maxconn 100
+ #daemon
+ #nbproc 2
+ stats socket /tmp/sock1 mode 666 level admin
+ stats timeout 2m
+
+listen Stats_fnt
+ mode http
+ bind :8181
+ timeout client 5000
+ timeout connect 5000
+ timeout server 5000
+ maxconn 100
+ option httpclose
+ balance roundrobin
+ stats uri /
+ stats refresh 5
+ stats scope Thousand_HTTP
+
+frontend dummy
+ bind :8080
+ mode http
+ redirect location /
+
+# servers created with the following command :
+# for i in $(seq 0 4999); do
+# printf "\tserver 127.0.%d.%03d 127.0.%d.%d:8080 weight 10 inter $[RANDOM%10000] fall 2 rise 1\n" \
+# $[i/250] $[i%250+1] $[i/250] $[i%250+1]
+# done
+backend Thousand_HTTP
+ timeout connect 5000
+ timeout server 5000
+ balance roundrobin
+ server 127.0.0.001 127.0.0.1:8080 check weight 10 inter 2233 fall 2 rise 1
+ server 127.0.0.002 127.0.0.2:8080 check weight 10 inter 9145 fall 2 rise 1
+ server 127.0.0.003 127.0.0.3:8080 check weight 10 inter 2096 fall 2 rise 1
+ server 127.0.0.004 127.0.0.4:8080 check weight 10 inter 3403 fall 2 rise 1
+ server 127.0.0.005 127.0.0.5:8080 check weight 10 inter 523 fall 2 rise 1
+ server 127.0.0.006 127.0.0.6:8080 check weight 10 inter 9793 fall 2 rise 1
+ server 127.0.0.007 127.0.0.7:8080 check weight 10 inter 8684 fall 2 rise 1
+ server 127.0.0.008 127.0.0.8:8080 check weight 10 inter 1193 fall 2 rise 1
+ server 127.0.0.009 127.0.0.9:8080 check weight 10 inter 8578 fall 2 rise 1
+ server 127.0.0.010 127.0.0.10:8080 check weight 10 inter 7588 fall 2 rise 1
+ server 127.0.0.011 127.0.0.11:8080 check weight 10 inter 4659 fall 2 rise 1
+ server 127.0.0.012 127.0.0.12:8080 check weight 10 inter 7292 fall 2 rise 1
+ server 127.0.0.013 127.0.0.13:8080 check weight 10 inter 5239 fall 2 rise 1
+ server 127.0.0.014 127.0.0.14:8080 check weight 10 inter 73 fall 2 rise 1
+ server 127.0.0.015 127.0.0.15:8080 check weight 10 inter 1041 fall 2 rise 1
+ server 127.0.0.016 127.0.0.16:8080 check weight 10 inter 4418 fall 2 rise 1
+ server 127.0.0.017 127.0.0.17:8080 check weight 10 inter 4117 fall 2 rise 1
+ server 127.0.0.018 127.0.0.18:8080 check weight 10 inter 3695 fall 2 rise 1
+ server 127.0.0.019 127.0.0.19:8080 check weight 10 inter 9827 fall 2 rise 1
+ server 127.0.0.020 127.0.0.20:8080 check weight 10 inter 9312 fall 2 rise 1
+ server 127.0.0.021 127.0.0.21:8080 check weight 10 inter 3288 fall 2 rise 1
+ server 127.0.0.022 127.0.0.22:8080 check weight 10 inter 9492 fall 2 rise 1
+ server 127.0.0.023 127.0.0.23:8080 check weight 10 inter 280 fall 2 rise 1
+ server 127.0.0.024 127.0.0.24:8080 check weight 10 inter 9957 fall 2 rise 1
+ server 127.0.0.025 127.0.0.25:8080 check weight 10 inter 395 fall 2 rise 1
+ server 127.0.0.026 127.0.0.26:8080 check weight 10 inter 5815 fall 2 rise 1
+ server 127.0.0.027 127.0.0.27:8080 check weight 10 inter 4711 fall 2 rise 1
+ server 127.0.0.028 127.0.0.28:8080 check weight 10 inter 7141 fall 2 rise 1
+ server 127.0.0.029 127.0.0.29:8080 check weight 10 inter 6846 fall 2 rise 1
+ server 127.0.0.030 127.0.0.30:8080 check weight 10 inter 1838 fall 2 rise 1
+ server 127.0.0.031 127.0.0.31:8080 check weight 10 inter 9104 fall 2 rise 1
+ server 127.0.0.032 127.0.0.32:8080 check weight 10 inter 5954 fall 2 rise 1
+ server 127.0.0.033 127.0.0.33:8080 check weight 10 inter 138 fall 2 rise 1
+ server 127.0.0.034 127.0.0.34:8080 check weight 10 inter 6054 fall 2 rise 1
+ server 127.0.0.035 127.0.0.35:8080 check weight 10 inter 2324 fall 2 rise 1
+ server 127.0.0.036 127.0.0.36:8080 check weight 10 inter 6426 fall 2 rise 1
+ server 127.0.0.037 127.0.0.37:8080 check weight 10 inter 5476 fall 2 rise 1
+ server 127.0.0.038 127.0.0.38:8080 check weight 10 inter 5214 fall 2 rise 1
+ server 127.0.0.039 127.0.0.39:8080 check weight 10 inter 2292 fall 2 rise 1
+ server 127.0.0.040 127.0.0.40:8080 check weight 10 inter 9059 fall 2 rise 1
+ server 127.0.0.041 127.0.0.41:8080 check weight 10 inter 9746 fall 2 rise 1
+ server 127.0.0.042 127.0.0.42:8080 check weight 10 inter 5571 fall 2 rise 1
+ server 127.0.0.043 127.0.0.43:8080 check weight 10 inter 1355 fall 2 rise 1
+ server 127.0.0.044 127.0.0.44:8080 check weight 10 inter 5527 fall 2 rise 1
+ server 127.0.0.045 127.0.0.45:8080 check weight 10 inter 1777 fall 2 rise 1
+ server 127.0.0.046 127.0.0.46:8080 check weight 10 inter 8682 fall 2 rise 1
+ server 127.0.0.047 127.0.0.47:8080 check weight 10 inter 9850 fall 2 rise 1
+ server 127.0.0.048 127.0.0.48:8080 check weight 10 inter 752 fall 2 rise 1
+ server 127.0.0.049 127.0.0.49:8080 check weight 10 inter 8546 fall 2 rise 1
+ server 127.0.0.050 127.0.0.50:8080 check weight 10 inter 3007 fall 2 rise 1
+ server 127.0.0.051 127.0.0.51:8080 check weight 10 inter 2695 fall 2 rise 1
+ server 127.0.0.052 127.0.0.52:8080 check weight 10 inter 641 fall 2 rise 1
+ server 127.0.0.053 127.0.0.53:8080 check weight 10 inter 5304 fall 2 rise 1
+ server 127.0.0.054 127.0.0.54:8080 check weight 10 inter 5247 fall 2 rise 1
+ server 127.0.0.055 127.0.0.55:8080 check weight 10 inter 7540 fall 2 rise 1
+ server 127.0.0.056 127.0.0.56:8080 check weight 10 inter 6170 fall 2 rise 1
+ server 127.0.0.057 127.0.0.57:8080 check weight 10 inter 8003 fall 2 rise 1
+ server 127.0.0.058 127.0.0.58:8080 check weight 10 inter 2664 fall 2 rise 1
+ server 127.0.0.059 127.0.0.59:8080 check weight 10 inter 2721 fall 2 rise 1
+ server 127.0.0.060 127.0.0.60:8080 check weight 10 inter 665 fall 2 rise 1
+ server 127.0.0.061 127.0.0.61:8080 check weight 10 inter 9402 fall 2 rise 1
+ server 127.0.0.062 127.0.0.62:8080 check weight 10 inter 6924 fall 2 rise 1
+ server 127.0.0.063 127.0.0.63:8080 check weight 10 inter 1217 fall 2 rise 1
+ server 127.0.0.064 127.0.0.64:8080 check weight 10 inter 5633 fall 2 rise 1
+ server 127.0.0.065 127.0.0.65:8080 check weight 10 inter 1706 fall 2 rise 1
+ server 127.0.0.066 127.0.0.66:8080 check weight 10 inter 4743 fall 2 rise 1
+ server 127.0.0.067 127.0.0.67:8080 check weight 10 inter 7165 fall 2 rise 1
+ server 127.0.0.068 127.0.0.68:8080 check weight 10 inter 4124 fall 2 rise 1
+ server 127.0.0.069 127.0.0.69:8080 check weight 10 inter 6481 fall 2 rise 1
+ server 127.0.0.070 127.0.0.70:8080 check weight 10 inter 5687 fall 2 rise 1
+ server 127.0.0.071 127.0.0.71:8080 check weight 10 inter 8539 fall 2 rise 1
+ server 127.0.0.072 127.0.0.72:8080 check weight 10 inter 8047 fall 2 rise 1
+ server 127.0.0.073 127.0.0.73:8080 check weight 10 inter 9784 fall 2 rise 1
+ server 127.0.0.074 127.0.0.74:8080 check weight 10 inter 2564 fall 2 rise 1
+ server 127.0.0.075 127.0.0.75:8080 check weight 10 inter 8972 fall 2 rise 1
+ server 127.0.0.076 127.0.0.76:8080 check weight 10 inter 4146 fall 2 rise 1
+ server 127.0.0.077 127.0.0.77:8080 check weight 10 inter 1572 fall 2 rise 1
+ server 127.0.0.078 127.0.0.78:8080 check weight 10 inter 6947 fall 2 rise 1
+ server 127.0.0.079 127.0.0.79:8080 check weight 10 inter 9128 fall 2 rise 1
+ server 127.0.0.080 127.0.0.80:8080 check weight 10 inter 2540 fall 2 rise 1
+ server 127.0.0.081 127.0.0.81:8080 check weight 10 inter 1148 fall 2 rise 1
+ server 127.0.0.082 127.0.0.82:8080 check weight 10 inter 4607 fall 2 rise 1
+ server 127.0.0.083 127.0.0.83:8080 check weight 10 inter 8442 fall 2 rise 1
+ server 127.0.0.084 127.0.0.84:8080 check weight 10 inter 593 fall 2 rise 1
+ server 127.0.0.085 127.0.0.85:8080 check weight 10 inter 6648 fall 2 rise 1
+ server 127.0.0.086 127.0.0.86:8080 check weight 10 inter 2946 fall 2 rise 1
+ server 127.0.0.087 127.0.0.87:8080 check weight 10 inter 4699 fall 2 rise 1
+ server 127.0.0.088 127.0.0.88:8080 check weight 10 inter 8139 fall 2 rise 1
+ server 127.0.0.089 127.0.0.89:8080 check weight 10 inter 333 fall 2 rise 1
+ server 127.0.0.090 127.0.0.90:8080 check weight 10 inter 6311 fall 2 rise 1
+ server 127.0.0.091 127.0.0.91:8080 check weight 10 inter 127 fall 2 rise 1
+ server 127.0.0.092 127.0.0.92:8080 check weight 10 inter 7913 fall 2 rise 1
+ server 127.0.0.093 127.0.0.93:8080 check weight 10 inter 4907 fall 2 rise 1
+ server 127.0.0.094 127.0.0.94:8080 check weight 10 inter 4639 fall 2 rise 1
+ server 127.0.0.095 127.0.0.95:8080 check weight 10 inter 4866 fall 2 rise 1
+ server 127.0.0.096 127.0.0.96:8080 check weight 10 inter 9978 fall 2 rise 1
+ server 127.0.0.097 127.0.0.97:8080 check weight 10 inter 2626 fall 2 rise 1
+ server 127.0.0.098 127.0.0.98:8080 check weight 10 inter 3925 fall 2 rise 1
+ server 127.0.0.099 127.0.0.99:8080 check weight 10 inter 1393 fall 2 rise 1
+ server 127.0.0.100 127.0.0.100:8080 check weight 10 inter 4616 fall 2 rise 1
+ server 127.0.0.101 127.0.0.101:8080 check weight 10 inter 744 fall 2 rise 1
+ server 127.0.0.102 127.0.0.102:8080 check weight 10 inter 1362 fall 2 rise 1
+ server 127.0.0.103 127.0.0.103:8080 check weight 10 inter 5240 fall 2 rise 1
+ server 127.0.0.104 127.0.0.104:8080 check weight 10 inter 788 fall 2 rise 1
+ server 127.0.0.105 127.0.0.105:8080 check weight 10 inter 1645 fall 2 rise 1
+ server 127.0.0.106 127.0.0.106:8080 check weight 10 inter 879 fall 2 rise 1
+ server 127.0.0.107 127.0.0.107:8080 check weight 10 inter 47 fall 2 rise 1
+ server 127.0.0.108 127.0.0.108:8080 check weight 10 inter 4629 fall 2 rise 1
+ server 127.0.0.109 127.0.0.109:8080 check weight 10 inter 312 fall 2 rise 1
+ server 127.0.0.110 127.0.0.110:8080 check weight 10 inter 1759 fall 2 rise 1
+ server 127.0.0.111 127.0.0.111:8080 check weight 10 inter 5043 fall 2 rise 1
+ server 127.0.0.112 127.0.0.112:8080 check weight 10 inter 8912 fall 2 rise 1
+ server 127.0.0.113 127.0.0.113:8080 check weight 10 inter 7337 fall 2 rise 1
+ server 127.0.0.114 127.0.0.114:8080 check weight 10 inter 6216 fall 2 rise 1
+ server 127.0.0.115 127.0.0.115:8080 check weight 10 inter 4709 fall 2 rise 1
+ server 127.0.0.116 127.0.0.116:8080 check weight 10 inter 1351 fall 2 rise 1
+ server 127.0.0.117 127.0.0.117:8080 check weight 10 inter 526 fall 2 rise 1
+ server 127.0.0.118 127.0.0.118:8080 check weight 10 inter 9093 fall 2 rise 1
+ server 127.0.0.119 127.0.0.119:8080 check weight 10 inter 2549 fall 2 rise 1
+ server 127.0.0.120 127.0.0.120:8080 check weight 10 inter 1601 fall 2 rise 1
+ server 127.0.0.121 127.0.0.121:8080 check weight 10 inter 3682 fall 2 rise 1
+ server 127.0.0.122 127.0.0.122:8080 check weight 10 inter 1693 fall 2 rise 1
+ server 127.0.0.123 127.0.0.123:8080 check weight 10 inter 4252 fall 2 rise 1
+ server 127.0.0.124 127.0.0.124:8080 check weight 10 inter 813 fall 2 rise 1
+ server 127.0.0.125 127.0.0.125:8080 check weight 10 inter 6968 fall 2 rise 1
+ server 127.0.0.126 127.0.0.126:8080 check weight 10 inter 7377 fall 2 rise 1
+ server 127.0.0.127 127.0.0.127:8080 check weight 10 inter 4508 fall 2 rise 1
+ server 127.0.0.128 127.0.0.128:8080 check weight 10 inter 3142 fall 2 rise 1
+ server 127.0.0.129 127.0.0.129:8080 check weight 10 inter 811 fall 2 rise 1
+ server 127.0.0.130 127.0.0.130:8080 check weight 10 inter 3095 fall 2 rise 1
+ server 127.0.0.131 127.0.0.131:8080 check weight 10 inter 1049 fall 2 rise 1
+ server 127.0.0.132 127.0.0.132:8080 check weight 10 inter 1381 fall 2 rise 1
+ server 127.0.0.133 127.0.0.133:8080 check weight 10 inter 5779 fall 2 rise 1
+ server 127.0.0.134 127.0.0.134:8080 check weight 10 inter 9561 fall 2 rise 1
+ server 127.0.0.135 127.0.0.135:8080 check weight 10 inter 3396 fall 2 rise 1
+ server 127.0.0.136 127.0.0.136:8080 check weight 10 inter 8890 fall 2 rise 1
+ server 127.0.0.137 127.0.0.137:8080 check weight 10 inter 9431 fall 2 rise 1
+ server 127.0.0.138 127.0.0.138:8080 check weight 10 inter 9150 fall 2 rise 1
+ server 127.0.0.139 127.0.0.139:8080 check weight 10 inter 3915 fall 2 rise 1
+ server 127.0.0.140 127.0.0.140:8080 check weight 10 inter 3816 fall 2 rise 1
+ server 127.0.0.141 127.0.0.141:8080 check weight 10 inter 1222 fall 2 rise 1
+ server 127.0.0.142 127.0.0.142:8080 check weight 10 inter 5476 fall 2 rise 1
+ server 127.0.0.143 127.0.0.143:8080 check weight 10 inter 1410 fall 2 rise 1
+ server 127.0.0.144 127.0.0.144:8080 check weight 10 inter 3507 fall 2 rise 1
+ server 127.0.0.145 127.0.0.145:8080 check weight 10 inter 7523 fall 2 rise 1
+ server 127.0.0.146 127.0.0.146:8080 check weight 10 inter 1873 fall 2 rise 1
+ server 127.0.0.147 127.0.0.147:8080 check weight 10 inter 3167 fall 2 rise 1
+ server 127.0.0.148 127.0.0.148:8080 check weight 10 inter 2154 fall 2 rise 1
+ server 127.0.0.149 127.0.0.149:8080 check weight 10 inter 3219 fall 2 rise 1
+ server 127.0.0.150 127.0.0.150:8080 check weight 10 inter 4591 fall 2 rise 1
+ server 127.0.0.151 127.0.0.151:8080 check weight 10 inter 6761 fall 2 rise 1
+ server 127.0.0.152 127.0.0.152:8080 check weight 10 inter 4037 fall 2 rise 1
+ server 127.0.0.153 127.0.0.153:8080 check weight 10 inter 2728 fall 2 rise 1
+ server 127.0.0.154 127.0.0.154:8080 check weight 10 inter 2833 fall 2 rise 1
+ server 127.0.0.155 127.0.0.155:8080 check weight 10 inter 3678 fall 2 rise 1
+ server 127.0.0.156 127.0.0.156:8080 check weight 10 inter 3885 fall 2 rise 1
+ server 127.0.0.157 127.0.0.157:8080 check weight 10 inter 9500 fall 2 rise 1
+ server 127.0.0.158 127.0.0.158:8080 check weight 10 inter 1927 fall 2 rise 1
+ server 127.0.0.159 127.0.0.159:8080 check weight 10 inter 2247 fall 2 rise 1
+ server 127.0.0.160 127.0.0.160:8080 check weight 10 inter 8253 fall 2 rise 1
+ server 127.0.0.161 127.0.0.161:8080 check weight 10 inter 860 fall 2 rise 1
+ server 127.0.0.162 127.0.0.162:8080 check weight 10 inter 2534 fall 2 rise 1
+ server 127.0.0.163 127.0.0.163:8080 check weight 10 inter 668 fall 2 rise 1
+ server 127.0.0.164 127.0.0.164:8080 check weight 10 inter 4957 fall 2 rise 1
+ server 127.0.0.165 127.0.0.165:8080 check weight 10 inter 9401 fall 2 rise 1
+ server 127.0.0.166 127.0.0.166:8080 check weight 10 inter 7540 fall 2 rise 1
+ server 127.0.0.167 127.0.0.167:8080 check weight 10 inter 3351 fall 2 rise 1
+ server 127.0.0.168 127.0.0.168:8080 check weight 10 inter 5705 fall 2 rise 1
+ server 127.0.0.169 127.0.0.169:8080 check weight 10 inter 9472 fall 2 rise 1
+ server 127.0.0.170 127.0.0.170:8080 check weight 10 inter 5718 fall 2 rise 1
+ server 127.0.0.171 127.0.0.171:8080 check weight 10 inter 8409 fall 2 rise 1
+ server 127.0.0.172 127.0.0.172:8080 check weight 10 inter 1139 fall 2 rise 1
+ server 127.0.0.173 127.0.0.173:8080 check weight 10 inter 8901 fall 2 rise 1
+ server 127.0.0.174 127.0.0.174:8080 check weight 10 inter 4858 fall 2 rise 1
+ server 127.0.0.175 127.0.0.175:8080 check weight 10 inter 1390 fall 2 rise 1
+ server 127.0.0.176 127.0.0.176:8080 check weight 10 inter 8941 fall 2 rise 1
+ server 127.0.0.177 127.0.0.177:8080 check weight 10 inter 6033 fall 2 rise 1
+ server 127.0.0.178 127.0.0.178:8080 check weight 10 inter 867 fall 2 rise 1
+ server 127.0.0.179 127.0.0.179:8080 check weight 10 inter 6754 fall 2 rise 1
+ server 127.0.0.180 127.0.0.180:8080 check weight 10 inter 5908 fall 2 rise 1
+ server 127.0.0.181 127.0.0.181:8080 check weight 10 inter 6848 fall 2 rise 1
+ server 127.0.0.182 127.0.0.182:8080 check weight 10 inter 4744 fall 2 rise 1
+ server 127.0.0.183 127.0.0.183:8080 check weight 10 inter 656 fall 2 rise 1
+ server 127.0.0.184 127.0.0.184:8080 check weight 10 inter 4669 fall 2 rise 1
+ server 127.0.0.185 127.0.0.185:8080 check weight 10 inter 9913 fall 2 rise 1
+ server 127.0.0.186 127.0.0.186:8080 check weight 10 inter 1772 fall 2 rise 1
+ server 127.0.0.187 127.0.0.187:8080 check weight 10 inter 2718 fall 2 rise 1
+ server 127.0.0.188 127.0.0.188:8080 check weight 10 inter 1473 fall 2 rise 1
+ server 127.0.0.189 127.0.0.189:8080 check weight 10 inter 3948 fall 2 rise 1
+ server 127.0.0.190 127.0.0.190:8080 check weight 10 inter 5019 fall 2 rise 1
+ server 127.0.0.191 127.0.0.191:8080 check weight 10 inter 602 fall 2 rise 1
+ server 127.0.0.192 127.0.0.192:8080 check weight 10 inter 4648 fall 2 rise 1
+ server 127.0.0.193 127.0.0.193:8080 check weight 10 inter 2365 fall 2 rise 1
+ server 127.0.0.194 127.0.0.194:8080 check weight 10 inter 8841 fall 2 rise 1
+ server 127.0.0.195 127.0.0.195:8080 check weight 10 inter 6051 fall 2 rise 1
+ server 127.0.0.196 127.0.0.196:8080 check weight 10 inter 6774 fall 2 rise 1
+ server 127.0.0.197 127.0.0.197:8080 check weight 10 inter 5267 fall 2 rise 1
+ server 127.0.0.198 127.0.0.198:8080 check weight 10 inter 1418 fall 2 rise 1
+ server 127.0.0.199 127.0.0.199:8080 check weight 10 inter 8632 fall 2 rise 1
+ server 127.0.0.200 127.0.0.200:8080 check weight 10 inter 792 fall 2 rise 1
+ server 127.0.0.201 127.0.0.201:8080 check weight 10 inter 9247 fall 2 rise 1
+ server 127.0.0.202 127.0.0.202:8080 check weight 10 inter 6321 fall 2 rise 1
+ server 127.0.0.203 127.0.0.203:8080 check weight 10 inter 7089 fall 2 rise 1
+ server 127.0.0.204 127.0.0.204:8080 check weight 10 inter 4157 fall 2 rise 1
+ server 127.0.0.205 127.0.0.205:8080 check weight 10 inter 9948 fall 2 rise 1
+ server 127.0.0.206 127.0.0.206:8080 check weight 10 inter 8297 fall 2 rise 1
+ server 127.0.0.207 127.0.0.207:8080 check weight 10 inter 8559 fall 2 rise 1
+ server 127.0.0.208 127.0.0.208:8080 check weight 10 inter 9573 fall 2 rise 1
+ server 127.0.0.209 127.0.0.209:8080 check weight 10 inter 3883 fall 2 rise 1
+ server 127.0.0.210 127.0.0.210:8080 check weight 10 inter 8806 fall 2 rise 1
+ server 127.0.0.211 127.0.0.211:8080 check weight 10 inter 4132 fall 2 rise 1
+ server 127.0.0.212 127.0.0.212:8080 check weight 10 inter 9803 fall 2 rise 1
+ server 127.0.0.213 127.0.0.213:8080 check weight 10 inter 9372 fall 2 rise 1
+ server 127.0.0.214 127.0.0.214:8080 check weight 10 inter 7561 fall 2 rise 1
+ server 127.0.0.215 127.0.0.215:8080 check weight 10 inter 6897 fall 2 rise 1
+ server 127.0.0.216 127.0.0.216:8080 check weight 10 inter 6162 fall 2 rise 1
+ server 127.0.0.217 127.0.0.217:8080 check weight 10 inter 8828 fall 2 rise 1
+ server 127.0.0.218 127.0.0.218:8080 check weight 10 inter 8565 fall 2 rise 1
+ server 127.0.0.219 127.0.0.219:8080 check weight 10 inter 2484 fall 2 rise 1
+ server 127.0.0.220 127.0.0.220:8080 check weight 10 inter 6048 fall 2 rise 1
+ server 127.0.0.221 127.0.0.221:8080 check weight 10 inter 3137 fall 2 rise 1
+ server 127.0.0.222 127.0.0.222:8080 check weight 10 inter 2244 fall 2 rise 1
+ server 127.0.0.223 127.0.0.223:8080 check weight 10 inter 1438 fall 2 rise 1
+ server 127.0.0.224 127.0.0.224:8080 check weight 10 inter 8941 fall 2 rise 1
+ server 127.0.0.225 127.0.0.225:8080 check weight 10 inter 3302 fall 2 rise 1
+ server 127.0.0.226 127.0.0.226:8080 check weight 10 inter 3864 fall 2 rise 1
+ server 127.0.0.227 127.0.0.227:8080 check weight 10 inter 7030 fall 2 rise 1
+ server 127.0.0.228 127.0.0.228:8080 check weight 10 inter 3626 fall 2 rise 1
+ server 127.0.0.229 127.0.0.229:8080 check weight 10 inter 103 fall 2 rise 1
+ server 127.0.0.230 127.0.0.230:8080 check weight 10 inter 5556 fall 2 rise 1
+ server 127.0.0.231 127.0.0.231:8080 check weight 10 inter 2112 fall 2 rise 1
+ server 127.0.0.232 127.0.0.232:8080 check weight 10 inter 5457 fall 2 rise 1
+ server 127.0.0.233 127.0.0.233:8080 check weight 10 inter 1228 fall 2 rise 1
+ server 127.0.0.234 127.0.0.234:8080 check weight 10 inter 3638 fall 2 rise 1
+ server 127.0.0.235 127.0.0.235:8080 check weight 10 inter 315 fall 2 rise 1
+ server 127.0.0.236 127.0.0.236:8080 check weight 10 inter 7488 fall 2 rise 1
+ server 127.0.0.237 127.0.0.237:8080 check weight 10 inter 2341 fall 2 rise 1
+ server 127.0.0.238 127.0.0.238:8080 check weight 10 inter 4123 fall 2 rise 1
+ server 127.0.0.239 127.0.0.239:8080 check weight 10 inter 1372 fall 2 rise 1
+ server 127.0.0.240 127.0.0.240:8080 check weight 10 inter 3205 fall 2 rise 1
+ server 127.0.0.241 127.0.0.241:8080 check weight 10 inter 1546 fall 2 rise 1
+ server 127.0.0.242 127.0.0.242:8080 check weight 10 inter 2080 fall 2 rise 1
+ server 127.0.0.243 127.0.0.243:8080 check weight 10 inter 765 fall 2 rise 1
+ server 127.0.0.244 127.0.0.244:8080 check weight 10 inter 1928 fall 2 rise 1
+ server 127.0.0.245 127.0.0.245:8080 check weight 10 inter 751 fall 2 rise 1
+ server 127.0.0.246 127.0.0.246:8080 check weight 10 inter 2681 fall 2 rise 1
+ server 127.0.0.247 127.0.0.247:8080 check weight 10 inter 1334 fall 2 rise 1
+ server 127.0.0.248 127.0.0.248:8080 check weight 10 inter 156 fall 2 rise 1
+ server 127.0.0.249 127.0.0.249:8080 check weight 10 inter 2521 fall 2 rise 1
+ server 127.0.0.250 127.0.0.250:8080 check weight 10 inter 9588 fall 2 rise 1
+ server 127.0.1.001 127.0.1.1:8080 check weight 10 inter 9815 fall 2 rise 1
+ server 127.0.1.002 127.0.1.2:8080 check weight 10 inter 2676 fall 2 rise 1
+ server 127.0.1.003 127.0.1.3:8080 check weight 10 inter 4660 fall 2 rise 1
+ server 127.0.1.004 127.0.1.4:8080 check weight 10 inter 4667 fall 2 rise 1
+ server 127.0.1.005 127.0.1.5:8080 check weight 10 inter 4267 fall 2 rise 1
+ server 127.0.1.006 127.0.1.6:8080 check weight 10 inter 5653 fall 2 rise 1
+ server 127.0.1.007 127.0.1.7:8080 check weight 10 inter 1104 fall 2 rise 1
+ server 127.0.1.008 127.0.1.8:8080 check weight 10 inter 8540 fall 2 rise 1
+ server 127.0.1.009 127.0.1.9:8080 check weight 10 inter 6397 fall 2 rise 1
+ server 127.0.1.010 127.0.1.10:8080 check weight 10 inter 2127 fall 2 rise 1
+ server 127.0.1.011 127.0.1.11:8080 check weight 10 inter 8175 fall 2 rise 1
+ server 127.0.1.012 127.0.1.12:8080 check weight 10 inter 3177 fall 2 rise 1
+ server 127.0.1.013 127.0.1.13:8080 check weight 10 inter 1542 fall 2 rise 1
+ server 127.0.1.014 127.0.1.14:8080 check weight 10 inter 4442 fall 2 rise 1
+ server 127.0.1.015 127.0.1.15:8080 check weight 10 inter 4575 fall 2 rise 1
+ server 127.0.1.016 127.0.1.16:8080 check weight 10 inter 2206 fall 2 rise 1
+ server 127.0.1.017 127.0.1.17:8080 check weight 10 inter 4174 fall 2 rise 1
+ server 127.0.1.018 127.0.1.18:8080 check weight 10 inter 6643 fall 2 rise 1
+ server 127.0.1.019 127.0.1.19:8080 check weight 10 inter 1592 fall 2 rise 1
+ server 127.0.1.020 127.0.1.20:8080 check weight 10 inter 597 fall 2 rise 1
+ server 127.0.1.021 127.0.1.21:8080 check weight 10 inter 7630 fall 2 rise 1
+ server 127.0.1.022 127.0.1.22:8080 check weight 10 inter 9379 fall 2 rise 1
+ server 127.0.1.023 127.0.1.23:8080 check weight 10 inter 1716 fall 2 rise 1
+ server 127.0.1.024 127.0.1.24:8080 check weight 10 inter 7867 fall 2 rise 1
+ server 127.0.1.025 127.0.1.25:8080 check weight 10 inter 5079 fall 2 rise 1
+ server 127.0.1.026 127.0.1.26:8080 check weight 10 inter 196 fall 2 rise 1
+ server 127.0.1.027 127.0.1.27:8080 check weight 10 inter 5089 fall 2 rise 1
+ server 127.0.1.028 127.0.1.28:8080 check weight 10 inter 9681 fall 2 rise 1
+ server 127.0.1.029 127.0.1.29:8080 check weight 10 inter 9157 fall 2 rise 1
+ server 127.0.1.030 127.0.1.30:8080 check weight 10 inter 2803 fall 2 rise 1
+ server 127.0.1.031 127.0.1.31:8080 check weight 10 inter 6265 fall 2 rise 1
+ server 127.0.1.032 127.0.1.32:8080 check weight 10 inter 2002 fall 2 rise 1
+ server 127.0.1.033 127.0.1.33:8080 check weight 10 inter 48 fall 2 rise 1
+ server 127.0.1.034 127.0.1.34:8080 check weight 10 inter 4547 fall 2 rise 1
+ server 127.0.1.035 127.0.1.35:8080 check weight 10 inter 4716 fall 2 rise 1
+ server 127.0.1.036 127.0.1.36:8080 check weight 10 inter 2215 fall 2 rise 1
+ server 127.0.1.037 127.0.1.37:8080 check weight 10 inter 6249 fall 2 rise 1
+ server 127.0.1.038 127.0.1.38:8080 check weight 10 inter 9352 fall 2 rise 1
+ server 127.0.1.039 127.0.1.39:8080 check weight 10 inter 5426 fall 2 rise 1
+ server 127.0.1.040 127.0.1.40:8080 check weight 10 inter 6285 fall 2 rise 1
+ server 127.0.1.041 127.0.1.41:8080 check weight 10 inter 3743 fall 2 rise 1
+ server 127.0.1.042 127.0.1.42:8080 check weight 10 inter 1486 fall 2 rise 1
+ server 127.0.1.043 127.0.1.43:8080 check weight 10 inter 5122 fall 2 rise 1
+ server 127.0.1.044 127.0.1.44:8080 check weight 10 inter 1905 fall 2 rise 1
+ server 127.0.1.045 127.0.1.45:8080 check weight 10 inter 2324 fall 2 rise 1
+ server 127.0.1.046 127.0.1.46:8080 check weight 10 inter 9773 fall 2 rise 1
+ server 127.0.1.047 127.0.1.47:8080 check weight 10 inter 2369 fall 2 rise 1
+ server 127.0.1.048 127.0.1.48:8080 check weight 10 inter 4576 fall 2 rise 1
+ server 127.0.1.049 127.0.1.49:8080 check weight 10 inter 1555 fall 2 rise 1
+ server 127.0.1.050 127.0.1.50:8080 check weight 10 inter 4189 fall 2 rise 1
+ server 127.0.1.051 127.0.1.51:8080 check weight 10 inter 1819 fall 2 rise 1
+ server 127.0.1.052 127.0.1.52:8080 check weight 10 inter 7617 fall 2 rise 1
+ server 127.0.1.053 127.0.1.53:8080 check weight 10 inter 5549 fall 2 rise 1
+ server 127.0.1.054 127.0.1.54:8080 check weight 10 inter 6617 fall 2 rise 1
+ server 127.0.1.055 127.0.1.55:8080 check weight 10 inter 2707 fall 2 rise 1
+ server 127.0.1.056 127.0.1.56:8080 check weight 10 inter 4127 fall 2 rise 1
+ server 127.0.1.057 127.0.1.57:8080 check weight 10 inter 9528 fall 2 rise 1
+ server 127.0.1.058 127.0.1.58:8080 check weight 10 inter 645 fall 2 rise 1
+ server 127.0.1.059 127.0.1.59:8080 check weight 10 inter 4027 fall 2 rise 1
+ server 127.0.1.060 127.0.1.60:8080 check weight 10 inter 2536 fall 2 rise 1
+ server 127.0.1.061 127.0.1.61:8080 check weight 10 inter 3207 fall 2 rise 1
+ server 127.0.1.062 127.0.1.62:8080 check weight 10 inter 6463 fall 2 rise 1
+ server 127.0.1.063 127.0.1.63:8080 check weight 10 inter 6482 fall 2 rise 1
+ server 127.0.1.064 127.0.1.64:8080 check weight 10 inter 3751 fall 2 rise 1
+ server 127.0.1.065 127.0.1.65:8080 check weight 10 inter 5319 fall 2 rise 1
+ server 127.0.1.066 127.0.1.66:8080 check weight 10 inter 9143 fall 2 rise 1
+ server 127.0.1.067 127.0.1.67:8080 check weight 10 inter 9809 fall 2 rise 1
+ server 127.0.1.068 127.0.1.68:8080 check weight 10 inter 7521 fall 2 rise 1
+ server 127.0.1.069 127.0.1.69:8080 check weight 10 inter 3887 fall 2 rise 1
+ server 127.0.1.070 127.0.1.70:8080 check weight 10 inter 2510 fall 2 rise 1
+ server 127.0.1.071 127.0.1.71:8080 check weight 10 inter 2611 fall 2 rise 1
+ server 127.0.1.072 127.0.1.72:8080 check weight 10 inter 2369 fall 2 rise 1
+ server 127.0.1.073 127.0.1.73:8080 check weight 10 inter 7157 fall 2 rise 1
+ server 127.0.1.074 127.0.1.74:8080 check weight 10 inter 9039 fall 2 rise 1
+ server 127.0.1.075 127.0.1.75:8080 check weight 10 inter 1352 fall 2 rise 1
+ server 127.0.1.076 127.0.1.76:8080 check weight 10 inter 2069 fall 2 rise 1
+ server 127.0.1.077 127.0.1.77:8080 check weight 10 inter 7504 fall 2 rise 1
+ server 127.0.1.078 127.0.1.78:8080 check weight 10 inter 4144 fall 2 rise 1
+ server 127.0.1.079 127.0.1.79:8080 check weight 10 inter 7144 fall 2 rise 1
+ server 127.0.1.080 127.0.1.80:8080 check weight 10 inter 1317 fall 2 rise 1
+ server 127.0.1.081 127.0.1.81:8080 check weight 10 inter 5537 fall 2 rise 1
+ server 127.0.1.082 127.0.1.82:8080 check weight 10 inter 6359 fall 2 rise 1
+ server 127.0.1.083 127.0.1.83:8080 check weight 10 inter 1912 fall 2 rise 1
+ server 127.0.1.084 127.0.1.84:8080 check weight 10 inter 2246 fall 2 rise 1
+ server 127.0.1.085 127.0.1.85:8080 check weight 10 inter 993 fall 2 rise 1
+ server 127.0.1.086 127.0.1.86:8080 check weight 10 inter 5181 fall 2 rise 1
+ server 127.0.1.087 127.0.1.87:8080 check weight 10 inter 3358 fall 2 rise 1
+ server 127.0.1.088 127.0.1.88:8080 check weight 10 inter 2371 fall 2 rise 1
+ server 127.0.1.089 127.0.1.89:8080 check weight 10 inter 7833 fall 2 rise 1
+ server 127.0.1.090 127.0.1.90:8080 check weight 10 inter 1908 fall 2 rise 1
+ server 127.0.1.091 127.0.1.91:8080 check weight 10 inter 2276 fall 2 rise 1
+ server 127.0.1.092 127.0.1.92:8080 check weight 10 inter 9622 fall 2 rise 1
+ server 127.0.1.093 127.0.1.93:8080 check weight 10 inter 1203 fall 2 rise 1
+ server 127.0.1.094 127.0.1.94:8080 check weight 10 inter 2472 fall 2 rise 1
+ server 127.0.1.095 127.0.1.95:8080 check weight 10 inter 9054 fall 2 rise 1
+ server 127.0.1.096 127.0.1.96:8080 check weight 10 inter 6330 fall 2 rise 1
+ server 127.0.1.097 127.0.1.97:8080 check weight 10 inter 724 fall 2 rise 1
+ server 127.0.1.098 127.0.1.98:8080 check weight 10 inter 374 fall 2 rise 1
+ server 127.0.1.099 127.0.1.99:8080 check weight 10 inter 9515 fall 2 rise 1
+ server 127.0.1.100 127.0.1.100:8080 check weight 10 inter 2080 fall 2 rise 1
+ server 127.0.1.101 127.0.1.101:8080 check weight 10 inter 6870 fall 2 rise 1
+ server 127.0.1.102 127.0.1.102:8080 check weight 10 inter 4878 fall 2 rise 1
+ server 127.0.1.103 127.0.1.103:8080 check weight 10 inter 158 fall 2 rise 1
+ server 127.0.1.104 127.0.1.104:8080 check weight 10 inter 1779 fall 2 rise 1
+ server 127.0.1.105 127.0.1.105:8080 check weight 10 inter 454 fall 2 rise 1
+ server 127.0.1.106 127.0.1.106:8080 check weight 10 inter 4714 fall 2 rise 1
+ server 127.0.1.107 127.0.1.107:8080 check weight 10 inter 3113 fall 2 rise 1
+ server 127.0.1.108 127.0.1.108:8080 check weight 10 inter 1164 fall 2 rise 1
+ server 127.0.1.109 127.0.1.109:8080 check weight 10 inter 9475 fall 2 rise 1
+ server 127.0.1.110 127.0.1.110:8080 check weight 10 inter 4764 fall 2 rise 1
+ server 127.0.1.111 127.0.1.111:8080 check weight 10 inter 894 fall 2 rise 1
+ server 127.0.1.112 127.0.1.112:8080 check weight 10 inter 2084 fall 2 rise 1
+ server 127.0.1.113 127.0.1.113:8080 check weight 10 inter 8769 fall 2 rise 1
+ server 127.0.1.114 127.0.1.114:8080 check weight 10 inter 3674 fall 2 rise 1
+ server 127.0.1.115 127.0.1.115:8080 check weight 10 inter 9365 fall 2 rise 1
+ server 127.0.1.116 127.0.1.116:8080 check weight 10 inter 1485 fall 2 rise 1
+ server 127.0.1.117 127.0.1.117:8080 check weight 10 inter 1937 fall 2 rise 1
+ server 127.0.1.118 127.0.1.118:8080 check weight 10 inter 8776 fall 2 rise 1
+ server 127.0.1.119 127.0.1.119:8080 check weight 10 inter 6429 fall 2 rise 1
+ server 127.0.1.120 127.0.1.120:8080 check weight 10 inter 4896 fall 2 rise 1
+ server 127.0.1.121 127.0.1.121:8080 check weight 10 inter 2209 fall 2 rise 1
+ server 127.0.1.122 127.0.1.122:8080 check weight 10 inter 2441 fall 2 rise 1
+ server 127.0.1.123 127.0.1.123:8080 check weight 10 inter 387 fall 2 rise 1
+ server 127.0.1.124 127.0.1.124:8080 check weight 10 inter 2020 fall 2 rise 1
+ server 127.0.1.125 127.0.1.125:8080 check weight 10 inter 8514 fall 2 rise 1
+ server 127.0.1.126 127.0.1.126:8080 check weight 10 inter 8373 fall 2 rise 1
+ server 127.0.1.127 127.0.1.127:8080 check weight 10 inter 2084 fall 2 rise 1
+ server 127.0.1.128 127.0.1.128:8080 check weight 10 inter 9251 fall 2 rise 1
+ server 127.0.1.129 127.0.1.129:8080 check weight 10 inter 926 fall 2 rise 1
+ server 127.0.1.130 127.0.1.130:8080 check weight 10 inter 3673 fall 2 rise 1
+ server 127.0.1.131 127.0.1.131:8080 check weight 10 inter 3714 fall 2 rise 1
+ server 127.0.1.132 127.0.1.132:8080 check weight 10 inter 5629 fall 2 rise 1
+ server 127.0.1.133 127.0.1.133:8080 check weight 10 inter 1461 fall 2 rise 1
+ server 127.0.1.134 127.0.1.134:8080 check weight 10 inter 2418 fall 2 rise 1
+ server 127.0.1.135 127.0.1.135:8080 check weight 10 inter 6088 fall 2 rise 1
+ server 127.0.1.136 127.0.1.136:8080 check weight 10 inter 1352 fall 2 rise 1
+ server 127.0.1.137 127.0.1.137:8080 check weight 10 inter 7020 fall 2 rise 1
+ server 127.0.1.138 127.0.1.138:8080 check weight 10 inter 1031 fall 2 rise 1
+ server 127.0.1.139 127.0.1.139:8080 check weight 10 inter 2494 fall 2 rise 1
+ server 127.0.1.140 127.0.1.140:8080 check weight 10 inter 2783 fall 2 rise 1
+ server 127.0.1.141 127.0.1.141:8080 check weight 10 inter 8276 fall 2 rise 1
+ server 127.0.1.142 127.0.1.142:8080 check weight 10 inter 7818 fall 2 rise 1
+ server 127.0.1.143 127.0.1.143:8080 check weight 10 inter 2298 fall 2 rise 1
+ server 127.0.1.144 127.0.1.144:8080 check weight 10 inter 1782 fall 2 rise 1
+ server 127.0.1.145 127.0.1.145:8080 check weight 10 inter 2395 fall 2 rise 1
+ server 127.0.1.146 127.0.1.146:8080 check weight 10 inter 7548 fall 2 rise 1
+ server 127.0.1.147 127.0.1.147:8080 check weight 10 inter 1980 fall 2 rise 1
+ server 127.0.1.148 127.0.1.148:8080 check weight 10 inter 3966 fall 2 rise 1
+ server 127.0.1.149 127.0.1.149:8080 check weight 10 inter 1238 fall 2 rise 1
+ server 127.0.1.150 127.0.1.150:8080 check weight 10 inter 5619 fall 2 rise 1
+ server 127.0.1.151 127.0.1.151:8080 check weight 10 inter 8600 fall 2 rise 1
+ server 127.0.1.152 127.0.1.152:8080 check weight 10 inter 4780 fall 2 rise 1
+ server 127.0.1.153 127.0.1.153:8080 check weight 10 inter 6137 fall 2 rise 1
+ server 127.0.1.154 127.0.1.154:8080 check weight 10 inter 8828 fall 2 rise 1
+ server 127.0.1.155 127.0.1.155:8080 check weight 10 inter 3682 fall 2 rise 1
+ server 127.0.1.156 127.0.1.156:8080 check weight 10 inter 2442 fall 2 rise 1
+ server 127.0.1.157 127.0.1.157:8080 check weight 10 inter 2620 fall 2 rise 1
+ server 127.0.1.158 127.0.1.158:8080 check weight 10 inter 6224 fall 2 rise 1
+ server 127.0.1.159 127.0.1.159:8080 check weight 10 inter 4828 fall 2 rise 1
+ server 127.0.1.160 127.0.1.160:8080 check weight 10 inter 2811 fall 2 rise 1
+ server 127.0.1.161 127.0.1.161:8080 check weight 10 inter 782 fall 2 rise 1
+ server 127.0.1.162 127.0.1.162:8080 check weight 10 inter 905 fall 2 rise 1
+ server 127.0.1.163 127.0.1.163:8080 check weight 10 inter 895 fall 2 rise 1
+ server 127.0.1.164 127.0.1.164:8080 check weight 10 inter 3999 fall 2 rise 1
+ server 127.0.1.165 127.0.1.165:8080 check weight 10 inter 7109 fall 2 rise 1
+ server 127.0.1.166 127.0.1.166:8080 check weight 10 inter 2833 fall 2 rise 1
+ server 127.0.1.167 127.0.1.167:8080 check weight 10 inter 1274 fall 2 rise 1
+ server 127.0.1.168 127.0.1.168:8080 check weight 10 inter 3179 fall 2 rise 1
+ server 127.0.1.169 127.0.1.169:8080 check weight 10 inter 429 fall 2 rise 1
+ server 127.0.1.170 127.0.1.170:8080 check weight 10 inter 1407 fall 2 rise 1
+ server 127.0.1.171 127.0.1.171:8080 check weight 10 inter 9117 fall 2 rise 1
+ server 127.0.1.172 127.0.1.172:8080 check weight 10 inter 1525 fall 2 rise 1
+ server 127.0.1.173 127.0.1.173:8080 check weight 10 inter 4364 fall 2 rise 1
+ server 127.0.1.174 127.0.1.174:8080 check weight 10 inter 5696 fall 2 rise 1
+ server 127.0.1.175 127.0.1.175:8080 check weight 10 inter 9492 fall 2 rise 1
+ server 127.0.1.176 127.0.1.176:8080 check weight 10 inter 6098 fall 2 rise 1
+ server 127.0.1.177 127.0.1.177:8080 check weight 10 inter 519 fall 2 rise 1
+ server 127.0.1.178 127.0.1.178:8080 check weight 10 inter 6454 fall 2 rise 1
+ server 127.0.1.179 127.0.1.179:8080 check weight 10 inter 3397 fall 2 rise 1
+ server 127.0.1.180 127.0.1.180:8080 check weight 10 inter 8255 fall 2 rise 1
+ server 127.0.1.181 127.0.1.181:8080 check weight 10 inter 3863 fall 2 rise 1
+ server 127.0.1.182 127.0.1.182:8080 check weight 10 inter 132 fall 2 rise 1
+ server 127.0.1.183 127.0.1.183:8080 check weight 10 inter 8776 fall 2 rise 1
+ server 127.0.1.184 127.0.1.184:8080 check weight 10 inter 5890 fall 2 rise 1
+ server 127.0.1.185 127.0.1.185:8080 check weight 10 inter 4361 fall 2 rise 1
+ server 127.0.1.186 127.0.1.186:8080 check weight 10 inter 3013 fall 2 rise 1
+ server 127.0.1.187 127.0.1.187:8080 check weight 10 inter 8857 fall 2 rise 1
+ server 127.0.1.188 127.0.1.188:8080 check weight 10 inter 767 fall 2 rise 1
+ server 127.0.1.189 127.0.1.189:8080 check weight 10 inter 5880 fall 2 rise 1
+ server 127.0.1.190 127.0.1.190:8080 check weight 10 inter 1520 fall 2 rise 1
+ server 127.0.1.191 127.0.1.191:8080 check weight 10 inter 8766 fall 2 rise 1
+ server 127.0.1.192 127.0.1.192:8080 check weight 10 inter 6393 fall 2 rise 1
+ server 127.0.1.193 127.0.1.193:8080 check weight 10 inter 4715 fall 2 rise 1
+ server 127.0.1.194 127.0.1.194:8080 check weight 10 inter 5452 fall 2 rise 1
+ server 127.0.1.195 127.0.1.195:8080 check weight 10 inter 9849 fall 2 rise 1
+ server 127.0.1.196 127.0.1.196:8080 check weight 10 inter 2958 fall 2 rise 1
+ server 127.0.1.197 127.0.1.197:8080 check weight 10 inter 5374 fall 2 rise 1
+ server 127.0.1.198 127.0.1.198:8080 check weight 10 inter 7266 fall 2 rise 1
+ server 127.0.1.199 127.0.1.199:8080 check weight 10 inter 7117 fall 2 rise 1
+ server 127.0.1.200 127.0.1.200:8080 check weight 10 inter 8433 fall 2 rise 1
+ server 127.0.1.201 127.0.1.201:8080 check weight 10 inter 1057 fall 2 rise 1
+ server 127.0.1.202 127.0.1.202:8080 check weight 10 inter 775 fall 2 rise 1
+ server 127.0.1.203 127.0.1.203:8080 check weight 10 inter 8448 fall 2 rise 1
+ server 127.0.1.204 127.0.1.204:8080 check weight 10 inter 5318 fall 2 rise 1
+ server 127.0.1.205 127.0.1.205:8080 check weight 10 inter 7539 fall 2 rise 1
+ server 127.0.1.206 127.0.1.206:8080 check weight 10 inter 2535 fall 2 rise 1
+ server 127.0.1.207 127.0.1.207:8080 check weight 10 inter 4533 fall 2 rise 1
+ server 127.0.1.208 127.0.1.208:8080 check weight 10 inter 2576 fall 2 rise 1
+ server 127.0.1.209 127.0.1.209:8080 check weight 10 inter 6812 fall 2 rise 1
+ server 127.0.1.210 127.0.1.210:8080 check weight 10 inter 2529 fall 2 rise 1
+ server 127.0.1.211 127.0.1.211:8080 check weight 10 inter 326 fall 2 rise 1
+ server 127.0.1.212 127.0.1.212:8080 check weight 10 inter 90 fall 2 rise 1
+ server 127.0.1.213 127.0.1.213:8080 check weight 10 inter 7965 fall 2 rise 1
+ server 127.0.1.214 127.0.1.214:8080 check weight 10 inter 2948 fall 2 rise 1
+ server 127.0.1.215 127.0.1.215:8080 check weight 10 inter 9780 fall 2 rise 1
+ server 127.0.1.216 127.0.1.216:8080 check weight 10 inter 8408 fall 2 rise 1
+ server 127.0.1.217 127.0.1.217:8080 check weight 10 inter 121 fall 2 rise 1
+ server 127.0.1.218 127.0.1.218:8080 check weight 10 inter 9532 fall 2 rise 1
+ server 127.0.1.219 127.0.1.219:8080 check weight 10 inter 2910 fall 2 rise 1
+ server 127.0.1.220 127.0.1.220:8080 check weight 10 inter 6811 fall 2 rise 1
+ server 127.0.1.221 127.0.1.221:8080 check weight 10 inter 8303 fall 2 rise 1
+ server 127.0.1.222 127.0.1.222:8080 check weight 10 inter 4268 fall 2 rise 1
+ server 127.0.1.223 127.0.1.223:8080 check weight 10 inter 4835 fall 2 rise 1
+ server 127.0.1.224 127.0.1.224:8080 check weight 10 inter 1862 fall 2 rise 1
+ server 127.0.1.225 127.0.1.225:8080 check weight 10 inter 7342 fall 2 rise 1
+ server 127.0.1.226 127.0.1.226:8080 check weight 10 inter 7131 fall 2 rise 1
+ server 127.0.1.227 127.0.1.227:8080 check weight 10 inter 6904 fall 2 rise 1
+ server 127.0.1.228 127.0.1.228:8080 check weight 10 inter 5443 fall 2 rise 1
+ server 127.0.1.229 127.0.1.229:8080 check weight 10 inter 2696 fall 2 rise 1
+ server 127.0.1.230 127.0.1.230:8080 check weight 10 inter 1375 fall 2 rise 1
+ server 127.0.1.231 127.0.1.231:8080 check weight 10 inter 6168 fall 2 rise 1
+ server 127.0.1.232 127.0.1.232:8080 check weight 10 inter 8004 fall 2 rise 1
+ server 127.0.1.233 127.0.1.233:8080 check weight 10 inter 546 fall 2 rise 1
+ server 127.0.1.234 127.0.1.234:8080 check weight 10 inter 506 fall 2 rise 1
+ server 127.0.1.235 127.0.1.235:8080 check weight 10 inter 9022 fall 2 rise 1
+ server 127.0.1.236 127.0.1.236:8080 check weight 10 inter 331 fall 2 rise 1
+ server 127.0.1.237 127.0.1.237:8080 check weight 10 inter 2481 fall 2 rise 1
+ server 127.0.1.238 127.0.1.238:8080 check weight 10 inter 8679 fall 2 rise 1
+ server 127.0.1.239 127.0.1.239:8080 check weight 10 inter 1699 fall 2 rise 1
+ server 127.0.1.240 127.0.1.240:8080 check weight 10 inter 5705 fall 2 rise 1
+ server 127.0.1.241 127.0.1.241:8080 check weight 10 inter 675 fall 2 rise 1
+ server 127.0.1.242 127.0.1.242:8080 check weight 10 inter 4963 fall 2 rise 1
+ server 127.0.1.243 127.0.1.243:8080 check weight 10 inter 4249 fall 2 rise 1
+ server 127.0.1.244 127.0.1.244:8080 check weight 10 inter 2375 fall 2 rise 1
+ server 127.0.1.245 127.0.1.245:8080 check weight 10 inter 3808 fall 2 rise 1
+ server 127.0.1.246 127.0.1.246:8080 check weight 10 inter 1979 fall 2 rise 1
+ server 127.0.1.247 127.0.1.247:8080 check weight 10 inter 6659 fall 2 rise 1
+ server 127.0.1.248 127.0.1.248:8080 check weight 10 inter 534 fall 2 rise 1
+ server 127.0.1.249 127.0.1.249:8080 check weight 10 inter 5152 fall 2 rise 1
+ server 127.0.1.250 127.0.1.250:8080 check weight 10 inter 5513 fall 2 rise 1
+ server 127.0.2.001 127.0.2.1:8080 check weight 10 inter 3147 fall 2 rise 1
+ server 127.0.2.002 127.0.2.2:8080 check weight 10 inter 2022 fall 2 rise 1
+ server 127.0.2.003 127.0.2.3:8080 check weight 10 inter 2009 fall 2 rise 1
+ server 127.0.2.004 127.0.2.4:8080 check weight 10 inter 686 fall 2 rise 1
+ server 127.0.2.005 127.0.2.5:8080 check weight 10 inter 7889 fall 2 rise 1
+ server 127.0.2.006 127.0.2.6:8080 check weight 10 inter 7401 fall 2 rise 1
+ server 127.0.2.007 127.0.2.7:8080 check weight 10 inter 2847 fall 2 rise 1
+ server 127.0.2.008 127.0.2.8:8080 check weight 10 inter 2814 fall 2 rise 1
+ server 127.0.2.009 127.0.2.9:8080 check weight 10 inter 9764 fall 2 rise 1
+ server 127.0.2.010 127.0.2.10:8080 check weight 10 inter 1557 fall 2 rise 1
+ server 127.0.2.011 127.0.2.11:8080 check weight 10 inter 7626 fall 2 rise 1
+ server 127.0.2.012 127.0.2.12:8080 check weight 10 inter 7023 fall 2 rise 1
+ server 127.0.2.013 127.0.2.13:8080 check weight 10 inter 7666 fall 2 rise 1
+ server 127.0.2.014 127.0.2.14:8080 check weight 10 inter 4636 fall 2 rise 1
+ server 127.0.2.015 127.0.2.15:8080 check weight 10 inter 3494 fall 2 rise 1
+ server 127.0.2.016 127.0.2.16:8080 check weight 10 inter 1791 fall 2 rise 1
+ server 127.0.2.017 127.0.2.17:8080 check weight 10 inter 2447 fall 2 rise 1
+ server 127.0.2.018 127.0.2.18:8080 check weight 10 inter 8828 fall 2 rise 1
+ server 127.0.2.019 127.0.2.19:8080 check weight 10 inter 1517 fall 2 rise 1
+ server 127.0.2.020 127.0.2.20:8080 check weight 10 inter 6216 fall 2 rise 1
+ server 127.0.2.021 127.0.2.21:8080 check weight 10 inter 9192 fall 2 rise 1
+ server 127.0.2.022 127.0.2.22:8080 check weight 10 inter 1828 fall 2 rise 1
+ server 127.0.2.023 127.0.2.23:8080 check weight 10 inter 2772 fall 2 rise 1
+ server 127.0.2.024 127.0.2.24:8080 check weight 10 inter 5541 fall 2 rise 1
+ server 127.0.2.025 127.0.2.25:8080 check weight 10 inter 8020 fall 2 rise 1
+ server 127.0.2.026 127.0.2.26:8080 check weight 10 inter 6877 fall 2 rise 1
+ server 127.0.2.027 127.0.2.27:8080 check weight 10 inter 1000 fall 2 rise 1
+ server 127.0.2.028 127.0.2.28:8080 check weight 10 inter 5809 fall 2 rise 1
+ server 127.0.2.029 127.0.2.29:8080 check weight 10 inter 1152 fall 2 rise 1
+ server 127.0.2.030 127.0.2.30:8080 check weight 10 inter 8484 fall 2 rise 1
+ server 127.0.2.031 127.0.2.31:8080 check weight 10 inter 5223 fall 2 rise 1
+ server 127.0.2.032 127.0.2.32:8080 check weight 10 inter 3444 fall 2 rise 1
+ server 127.0.2.033 127.0.2.33:8080 check weight 10 inter 2710 fall 2 rise 1
+ server 127.0.2.034 127.0.2.34:8080 check weight 10 inter 7786 fall 2 rise 1
+ server 127.0.2.035 127.0.2.35:8080 check weight 10 inter 2302 fall 2 rise 1
+ server 127.0.2.036 127.0.2.36:8080 check weight 10 inter 8732 fall 2 rise 1
+ server 127.0.2.037 127.0.2.37:8080 check weight 10 inter 9474 fall 2 rise 1
+ server 127.0.2.038 127.0.2.38:8080 check weight 10 inter 4746 fall 2 rise 1
+ server 127.0.2.039 127.0.2.39:8080 check weight 10 inter 1142 fall 2 rise 1
+ server 127.0.2.040 127.0.2.40:8080 check weight 10 inter 1965 fall 2 rise 1
+ server 127.0.2.041 127.0.2.41:8080 check weight 10 inter 3669 fall 2 rise 1
+ server 127.0.2.042 127.0.2.42:8080 check weight 10 inter 8573 fall 2 rise 1
+ server 127.0.2.043 127.0.2.43:8080 check weight 10 inter 1901 fall 2 rise 1
+ server 127.0.2.044 127.0.2.44:8080 check weight 10 inter 5049 fall 2 rise 1
+ server 127.0.2.045 127.0.2.45:8080 check weight 10 inter 5846 fall 2 rise 1
+ server 127.0.2.046 127.0.2.46:8080 check weight 10 inter 9087 fall 2 rise 1
+ server 127.0.2.047 127.0.2.47:8080 check weight 10 inter 1301 fall 2 rise 1
+ server 127.0.2.048 127.0.2.48:8080 check weight 10 inter 2878 fall 2 rise 1
+ server 127.0.2.049 127.0.2.49:8080 check weight 10 inter 636 fall 2 rise 1
+ server 127.0.2.050 127.0.2.50:8080 check weight 10 inter 8543 fall 2 rise 1
+ server 127.0.2.051 127.0.2.51:8080 check weight 10 inter 7392 fall 2 rise 1
+ server 127.0.2.052 127.0.2.52:8080 check weight 10 inter 6210 fall 2 rise 1
+ server 127.0.2.053 127.0.2.53:8080 check weight 10 inter 4667 fall 2 rise 1
+ server 127.0.2.054 127.0.2.54:8080 check weight 10 inter 5306 fall 2 rise 1
+ server 127.0.2.055 127.0.2.55:8080 check weight 10 inter 5030 fall 2 rise 1
+ server 127.0.2.056 127.0.2.56:8080 check weight 10 inter 9711 fall 2 rise 1
+ server 127.0.2.057 127.0.2.57:8080 check weight 10 inter 4801 fall 2 rise 1
+ server 127.0.2.058 127.0.2.58:8080 check weight 10 inter 3605 fall 2 rise 1
+ server 127.0.2.059 127.0.2.59:8080 check weight 10 inter 5100 fall 2 rise 1
+ server 127.0.2.060 127.0.2.60:8080 check weight 10 inter 7534 fall 2 rise 1
+ server 127.0.2.061 127.0.2.61:8080 check weight 10 inter 8912 fall 2 rise 1
+ server 127.0.2.062 127.0.2.62:8080 check weight 10 inter 4795 fall 2 rise 1
+ server 127.0.2.063 127.0.2.63:8080 check weight 10 inter 7365 fall 2 rise 1
+ server 127.0.2.064 127.0.2.64:8080 check weight 10 inter 8404 fall 2 rise 1
+ server 127.0.2.065 127.0.2.65:8080 check weight 10 inter 7817 fall 2 rise 1
+ server 127.0.2.066 127.0.2.66:8080 check weight 10 inter 2652 fall 2 rise 1
+ server 127.0.2.067 127.0.2.67:8080 check weight 10 inter 1061 fall 2 rise 1
+ server 127.0.2.068 127.0.2.68:8080 check weight 10 inter 7040 fall 2 rise 1
+ server 127.0.2.069 127.0.2.69:8080 check weight 10 inter 3357 fall 2 rise 1
+ server 127.0.2.070 127.0.2.70:8080 check weight 10 inter 882 fall 2 rise 1
+ server 127.0.2.071 127.0.2.71:8080 check weight 10 inter 634 fall 2 rise 1
+ server 127.0.2.072 127.0.2.72:8080 check weight 10 inter 2176 fall 2 rise 1
+ server 127.0.2.073 127.0.2.73:8080 check weight 10 inter 6932 fall 2 rise 1
+ server 127.0.2.074 127.0.2.74:8080 check weight 10 inter 5394 fall 2 rise 1
+ server 127.0.2.075 127.0.2.75:8080 check weight 10 inter 6601 fall 2 rise 1
+ server 127.0.2.076 127.0.2.76:8080 check weight 10 inter 8038 fall 2 rise 1
+ server 127.0.2.077 127.0.2.77:8080 check weight 10 inter 6089 fall 2 rise 1
+ server 127.0.2.078 127.0.2.78:8080 check weight 10 inter 2904 fall 2 rise 1
+ server 127.0.2.079 127.0.2.79:8080 check weight 10 inter 6633 fall 2 rise 1
+ server 127.0.2.080 127.0.2.80:8080 check weight 10 inter 5679 fall 2 rise 1
+ server 127.0.2.081 127.0.2.81:8080 check weight 10 inter 1339 fall 2 rise 1
+ server 127.0.2.082 127.0.2.82:8080 check weight 10 inter 6079 fall 2 rise 1
+ server 127.0.2.083 127.0.2.83:8080 check weight 10 inter 6657 fall 2 rise 1
+ server 127.0.2.084 127.0.2.84:8080 check weight 10 inter 5933 fall 2 rise 1
+ server 127.0.2.085 127.0.2.85:8080 check weight 10 inter 8004 fall 2 rise 1
+ server 127.0.2.086 127.0.2.86:8080 check weight 10 inter 4050 fall 2 rise 1
+ server 127.0.2.087 127.0.2.87:8080 check weight 10 inter 7875 fall 2 rise 1
+ server 127.0.2.088 127.0.2.88:8080 check weight 10 inter 4441 fall 2 rise 1
+ server 127.0.2.089 127.0.2.89:8080 check weight 10 inter 823 fall 2 rise 1
+ server 127.0.2.090 127.0.2.90:8080 check weight 10 inter 8661 fall 2 rise 1
+ server 127.0.2.091 127.0.2.91:8080 check weight 10 inter 1013 fall 2 rise 1
+ server 127.0.2.092 127.0.2.92:8080 check weight 10 inter 6458 fall 2 rise 1
+ server 127.0.2.093 127.0.2.93:8080 check weight 10 inter 7820 fall 2 rise 1
+ server 127.0.2.094 127.0.2.94:8080 check weight 10 inter 5555 fall 2 rise 1
+ server 127.0.2.095 127.0.2.95:8080 check weight 10 inter 1571 fall 2 rise 1
+ server 127.0.2.096 127.0.2.96:8080 check weight 10 inter 835 fall 2 rise 1
+ server 127.0.2.097 127.0.2.97:8080 check weight 10 inter 4988 fall 2 rise 1
+ server 127.0.2.098 127.0.2.98:8080 check weight 10 inter 6806 fall 2 rise 1
+ server 127.0.2.099 127.0.2.99:8080 check weight 10 inter 7815 fall 2 rise 1
+ server 127.0.2.100 127.0.2.100:8080 check weight 10 inter 4288 fall 2 rise 1
+ server 127.0.2.101 127.0.2.101:8080 check weight 10 inter 9995 fall 2 rise 1
+ server 127.0.2.102 127.0.2.102:8080 check weight 10 inter 1881 fall 2 rise 1
+ server 127.0.2.103 127.0.2.103:8080 check weight 10 inter 6532 fall 2 rise 1
+ server 127.0.2.104 127.0.2.104:8080 check weight 10 inter 3631 fall 2 rise 1
+ server 127.0.2.105 127.0.2.105:8080 check weight 10 inter 3703 fall 2 rise 1
+ server 127.0.2.106 127.0.2.106:8080 check weight 10 inter 861 fall 2 rise 1
+ server 127.0.2.107 127.0.2.107:8080 check weight 10 inter 7685 fall 2 rise 1
+ server 127.0.2.108 127.0.2.108:8080 check weight 10 inter 2015 fall 2 rise 1
+ server 127.0.2.109 127.0.2.109:8080 check weight 10 inter 469 fall 2 rise 1
+ server 127.0.2.110 127.0.2.110:8080 check weight 10 inter 8412 fall 2 rise 1
+ server 127.0.2.111 127.0.2.111:8080 check weight 10 inter 5112 fall 2 rise 1
+ server 127.0.2.112 127.0.2.112:8080 check weight 10 inter 1929 fall 2 rise 1
+ server 127.0.2.113 127.0.2.113:8080 check weight 10 inter 2614 fall 2 rise 1
+ server 127.0.2.114 127.0.2.114:8080 check weight 10 inter 738 fall 2 rise 1
+ server 127.0.2.115 127.0.2.115:8080 check weight 10 inter 9049 fall 2 rise 1
+ server 127.0.2.116 127.0.2.116:8080 check weight 10 inter 4401 fall 2 rise 1
+ server 127.0.2.117 127.0.2.117:8080 check weight 10 inter 7468 fall 2 rise 1
+ server 127.0.2.118 127.0.2.118:8080 check weight 10 inter 3989 fall 2 rise 1
+ server 127.0.2.119 127.0.2.119:8080 check weight 10 inter 2992 fall 2 rise 1
+ server 127.0.2.120 127.0.2.120:8080 check weight 10 inter 362 fall 2 rise 1
+ server 127.0.2.121 127.0.2.121:8080 check weight 10 inter 2623 fall 2 rise 1
+ server 127.0.2.122 127.0.2.122:8080 check weight 10 inter 856 fall 2 rise 1
+ server 127.0.2.123 127.0.2.123:8080 check weight 10 inter 2987 fall 2 rise 1
+ server 127.0.2.124 127.0.2.124:8080 check weight 10 inter 860 fall 2 rise 1
+ server 127.0.2.125 127.0.2.125:8080 check weight 10 inter 2445 fall 2 rise 1
+ server 127.0.2.126 127.0.2.126:8080 check weight 10 inter 2099 fall 2 rise 1
+ server 127.0.2.127 127.0.2.127:8080 check weight 10 inter 169 fall 2 rise 1
+ server 127.0.2.128 127.0.2.128:8080 check weight 10 inter 9304 fall 2 rise 1
+ server 127.0.2.129 127.0.2.129:8080 check weight 10 inter 3813 fall 2 rise 1
+ server 127.0.2.130 127.0.2.130:8080 check weight 10 inter 3136 fall 2 rise 1
+ server 127.0.2.131 127.0.2.131:8080 check weight 10 inter 9739 fall 2 rise 1
+ server 127.0.2.132 127.0.2.132:8080 check weight 10 inter 9829 fall 2 rise 1
+ server 127.0.2.133 127.0.2.133:8080 check weight 10 inter 3762 fall 2 rise 1
+ server 127.0.2.134 127.0.2.134:8080 check weight 10 inter 9061 fall 2 rise 1
+ server 127.0.2.135 127.0.2.135:8080 check weight 10 inter 9771 fall 2 rise 1
+ server 127.0.2.136 127.0.2.136:8080 check weight 10 inter 9106 fall 2 rise 1
+ server 127.0.2.137 127.0.2.137:8080 check weight 10 inter 8440 fall 2 rise 1
+ server 127.0.2.138 127.0.2.138:8080 check weight 10 inter 6517 fall 2 rise 1
+ server 127.0.2.139 127.0.2.139:8080 check weight 10 inter 1530 fall 2 rise 1
+ server 127.0.2.140 127.0.2.140:8080 check weight 10 inter 282 fall 2 rise 1
+ server 127.0.2.141 127.0.2.141:8080 check weight 10 inter 8816 fall 2 rise 1
+ server 127.0.2.142 127.0.2.142:8080 check weight 10 inter 4262 fall 2 rise 1
+ server 127.0.2.143 127.0.2.143:8080 check weight 10 inter 1931 fall 2 rise 1
+ server 127.0.2.144 127.0.2.144:8080 check weight 10 inter 1494 fall 2 rise 1
+ server 127.0.2.145 127.0.2.145:8080 check weight 10 inter 2820 fall 2 rise 1
+ server 127.0.2.146 127.0.2.146:8080 check weight 10 inter 6224 fall 2 rise 1
+ server 127.0.2.147 127.0.2.147:8080 check weight 10 inter 9184 fall 2 rise 1
+ server 127.0.2.148 127.0.2.148:8080 check weight 10 inter 9605 fall 2 rise 1
+ server 127.0.2.149 127.0.2.149:8080 check weight 10 inter 6313 fall 2 rise 1
+ server 127.0.2.150 127.0.2.150:8080 check weight 10 inter 4602 fall 2 rise 1
+ server 127.0.2.151 127.0.2.151:8080 check weight 10 inter 568 fall 2 rise 1
+ server 127.0.2.152 127.0.2.152:8080 check weight 10 inter 5164 fall 2 rise 1
+ server 127.0.2.153 127.0.2.153:8080 check weight 10 inter 2575 fall 2 rise 1
+ server 127.0.2.154 127.0.2.154:8080 check weight 10 inter 6164 fall 2 rise 1
+ server 127.0.2.155 127.0.2.155:8080 check weight 10 inter 3989 fall 2 rise 1
+ server 127.0.2.156 127.0.2.156:8080 check weight 10 inter 3534 fall 2 rise 1
+ server 127.0.2.157 127.0.2.157:8080 check weight 10 inter 1274 fall 2 rise 1
+ server 127.0.2.158 127.0.2.158:8080 check weight 10 inter 2083 fall 2 rise 1
+ server 127.0.2.159 127.0.2.159:8080 check weight 10 inter 2911 fall 2 rise 1
+ server 127.0.2.160 127.0.2.160:8080 check weight 10 inter 6666 fall 2 rise 1
+ server 127.0.2.161 127.0.2.161:8080 check weight 10 inter 9725 fall 2 rise 1
+ server 127.0.2.162 127.0.2.162:8080 check weight 10 inter 6161 fall 2 rise 1
+ server 127.0.2.163 127.0.2.163:8080 check weight 10 inter 6123 fall 2 rise 1
+ server 127.0.2.164 127.0.2.164:8080 check weight 10 inter 296 fall 2 rise 1
+ server 127.0.2.165 127.0.2.165:8080 check weight 10 inter 4028 fall 2 rise 1
+ server 127.0.2.166 127.0.2.166:8080 check weight 10 inter 7297 fall 2 rise 1
+ server 127.0.2.167 127.0.2.167:8080 check weight 10 inter 2553 fall 2 rise 1
+ server 127.0.2.168 127.0.2.168:8080 check weight 10 inter 9505 fall 2 rise 1
+ server 127.0.2.169 127.0.2.169:8080 check weight 10 inter 5902 fall 2 rise 1
+ server 127.0.2.170 127.0.2.170:8080 check weight 10 inter 7235 fall 2 rise 1
+ server 127.0.2.171 127.0.2.171:8080 check weight 10 inter 6624 fall 2 rise 1
+ server 127.0.2.172 127.0.2.172:8080 check weight 10 inter 6816 fall 2 rise 1
+ server 127.0.2.173 127.0.2.173:8080 check weight 10 inter 6580 fall 2 rise 1
+ server 127.0.2.174 127.0.2.174:8080 check weight 10 inter 4347 fall 2 rise 1
+ server 127.0.2.175 127.0.2.175:8080 check weight 10 inter 2473 fall 2 rise 1
+ server 127.0.2.176 127.0.2.176:8080 check weight 10 inter 1292 fall 2 rise 1
+ server 127.0.2.177 127.0.2.177:8080 check weight 10 inter 4461 fall 2 rise 1
+ server 127.0.2.178 127.0.2.178:8080 check weight 10 inter 641 fall 2 rise 1
+ server 127.0.2.179 127.0.2.179:8080 check weight 10 inter 3790 fall 2 rise 1
+ server 127.0.2.180 127.0.2.180:8080 check weight 10 inter 5747 fall 2 rise 1
+ server 127.0.2.181 127.0.2.181:8080 check weight 10 inter 1717 fall 2 rise 1
+ server 127.0.2.182 127.0.2.182:8080 check weight 10 inter 8921 fall 2 rise 1
+ server 127.0.2.183 127.0.2.183:8080 check weight 10 inter 7585 fall 2 rise 1
+ server 127.0.2.184 127.0.2.184:8080 check weight 10 inter 198 fall 2 rise 1
+ server 127.0.2.185 127.0.2.185:8080 check weight 10 inter 6129 fall 2 rise 1
+ server 127.0.2.186 127.0.2.186:8080 check weight 10 inter 2193 fall 2 rise 1
+ server 127.0.2.187 127.0.2.187:8080 check weight 10 inter 3564 fall 2 rise 1
+ server 127.0.2.188 127.0.2.188:8080 check weight 10 inter 5175 fall 2 rise 1
+ server 127.0.2.189 127.0.2.189:8080 check weight 10 inter 555 fall 2 rise 1
+ server 127.0.2.190 127.0.2.190:8080 check weight 10 inter 1180 fall 2 rise 1
+ server 127.0.2.191 127.0.2.191:8080 check weight 10 inter 9116 fall 2 rise 1
+ server 127.0.2.192 127.0.2.192:8080 check weight 10 inter 9443 fall 2 rise 1
+ server 127.0.2.193 127.0.2.193:8080 check weight 10 inter 5693 fall 2 rise 1
+ server 127.0.2.194 127.0.2.194:8080 check weight 10 inter 337 fall 2 rise 1
+ server 127.0.2.195 127.0.2.195:8080 check weight 10 inter 6669 fall 2 rise 1
+ server 127.0.2.196 127.0.2.196:8080 check weight 10 inter 223 fall 2 rise 1
+ server 127.0.2.197 127.0.2.197:8080 check weight 10 inter 1311 fall 2 rise 1
+ server 127.0.2.198 127.0.2.198:8080 check weight 10 inter 5010 fall 2 rise 1
+ server 127.0.2.199 127.0.2.199:8080 check weight 10 inter 5572 fall 2 rise 1
+ server 127.0.2.200 127.0.2.200:8080 check weight 10 inter 5555 fall 2 rise 1
+ server 127.0.2.201 127.0.2.201:8080 check weight 10 inter 9458 fall 2 rise 1
+ server 127.0.2.202 127.0.2.202:8080 check weight 10 inter 6125 fall 2 rise 1
+ server 127.0.2.203 127.0.2.203:8080 check weight 10 inter 6334 fall 2 rise 1
+ server 127.0.2.204 127.0.2.204:8080 check weight 10 inter 2873 fall 2 rise 1
+ server 127.0.2.205 127.0.2.205:8080 check weight 10 inter 4200 fall 2 rise 1
+ server 127.0.2.206 127.0.2.206:8080 check weight 10 inter 1653 fall 2 rise 1
+ server 127.0.2.207 127.0.2.207:8080 check weight 10 inter 4459 fall 2 rise 1
+ server 127.0.2.208 127.0.2.208:8080 check weight 10 inter 3605 fall 2 rise 1
+ server 127.0.2.209 127.0.2.209:8080 check weight 10 inter 9273 fall 2 rise 1
+ server 127.0.2.210 127.0.2.210:8080 check weight 10 inter 2030 fall 2 rise 1
+ server 127.0.2.211 127.0.2.211:8080 check weight 10 inter 10 fall 2 rise 1
+ server 127.0.2.212 127.0.2.212:8080 check weight 10 inter 1538 fall 2 rise 1
+ server 127.0.2.213 127.0.2.213:8080 check weight 10 inter 5847 fall 2 rise 1
+ server 127.0.2.214 127.0.2.214:8080 check weight 10 inter 7995 fall 2 rise 1
+ server 127.0.2.215 127.0.2.215:8080 check weight 10 inter 1220 fall 2 rise 1
+ server 127.0.2.216 127.0.2.216:8080 check weight 10 inter 1104 fall 2 rise 1
+ server 127.0.2.217 127.0.2.217:8080 check weight 10 inter 9036 fall 2 rise 1
+ server 127.0.2.218 127.0.2.218:8080 check weight 10 inter 3719 fall 2 rise 1
+ server 127.0.2.219 127.0.2.219:8080 check weight 10 inter 6136 fall 2 rise 1
+ server 127.0.2.220 127.0.2.220:8080 check weight 10 inter 9294 fall 2 rise 1
+ server 127.0.2.221 127.0.2.221:8080 check weight 10 inter 4920 fall 2 rise 1
+ server 127.0.2.222 127.0.2.222:8080 check weight 10 inter 4149 fall 2 rise 1
+ server 127.0.2.223 127.0.2.223:8080 check weight 10 inter 3833 fall 2 rise 1
+ server 127.0.2.224 127.0.2.224:8080 check weight 10 inter 3977 fall 2 rise 1
+ server 127.0.2.225 127.0.2.225:8080 check weight 10 inter 524 fall 2 rise 1
+ server 127.0.2.226 127.0.2.226:8080 check weight 10 inter 7289 fall 2 rise 1
+ server 127.0.2.227 127.0.2.227:8080 check weight 10 inter 425 fall 2 rise 1
+ server 127.0.2.228 127.0.2.228:8080 check weight 10 inter 4195 fall 2 rise 1
+ server 127.0.2.229 127.0.2.229:8080 check weight 10 inter 5589 fall 2 rise 1
+ server 127.0.2.230 127.0.2.230:8080 check weight 10 inter 8642 fall 2 rise 1
+ server 127.0.2.231 127.0.2.231:8080 check weight 10 inter 788 fall 2 rise 1
+ server 127.0.2.232 127.0.2.232:8080 check weight 10 inter 6114 fall 2 rise 1
+ server 127.0.2.233 127.0.2.233:8080 check weight 10 inter 2778 fall 2 rise 1
+ server 127.0.2.234 127.0.2.234:8080 check weight 10 inter 5166 fall 2 rise 1
+ server 127.0.2.235 127.0.2.235:8080 check weight 10 inter 8909 fall 2 rise 1
+ server 127.0.2.236 127.0.2.236:8080 check weight 10 inter 381 fall 2 rise 1
+ server 127.0.2.237 127.0.2.237:8080 check weight 10 inter 8805 fall 2 rise 1
+ server 127.0.2.238 127.0.2.238:8080 check weight 10 inter 8876 fall 2 rise 1
+ server 127.0.2.239 127.0.2.239:8080 check weight 10 inter 3033 fall 2 rise 1
+ server 127.0.2.240 127.0.2.240:8080 check weight 10 inter 4375 fall 2 rise 1
+ server 127.0.2.241 127.0.2.241:8080 check weight 10 inter 7601 fall 2 rise 1
+ server 127.0.2.242 127.0.2.242:8080 check weight 10 inter 2831 fall 2 rise 1
+ server 127.0.2.243 127.0.2.243:8080 check weight 10 inter 4333 fall 2 rise 1
+ server 127.0.2.244 127.0.2.244:8080 check weight 10 inter 9129 fall 2 rise 1
+ server 127.0.2.245 127.0.2.245:8080 check weight 10 inter 9255 fall 2 rise 1
+ server 127.0.2.246 127.0.2.246:8080 check weight 10 inter 6423 fall 2 rise 1
+ server 127.0.2.247 127.0.2.247:8080 check weight 10 inter 5448 fall 2 rise 1
+ server 127.0.2.248 127.0.2.248:8080 check weight 10 inter 8881 fall 2 rise 1
+ server 127.0.2.249 127.0.2.249:8080 check weight 10 inter 120 fall 2 rise 1
+ server 127.0.2.250 127.0.2.250:8080 check weight 10 inter 992 fall 2 rise 1
+ server 127.0.3.001 127.0.3.1:8080 check weight 10 inter 6544 fall 2 rise 1
+ server 127.0.3.002 127.0.3.2:8080 check weight 10 inter 4541 fall 2 rise 1
+ server 127.0.3.003 127.0.3.3:8080 check weight 10 inter 8457 fall 2 rise 1
+ server 127.0.3.004 127.0.3.4:8080 check weight 10 inter 5463 fall 2 rise 1
+ server 127.0.3.005 127.0.3.5:8080 check weight 10 inter 3374 fall 2 rise 1
+ server 127.0.3.006 127.0.3.6:8080 check weight 10 inter 9206 fall 2 rise 1
+ server 127.0.3.007 127.0.3.7:8080 check weight 10 inter 1634 fall 2 rise 1
+ server 127.0.3.008 127.0.3.8:8080 check weight 10 inter 272 fall 2 rise 1
+ server 127.0.3.009 127.0.3.9:8080 check weight 10 inter 4089 fall 2 rise 1
+ server 127.0.3.010 127.0.3.10:8080 check weight 10 inter 236 fall 2 rise 1
+ server 127.0.3.011 127.0.3.11:8080 check weight 10 inter 6294 fall 2 rise 1
+ server 127.0.3.012 127.0.3.12:8080 check weight 10 inter 953 fall 2 rise 1
+ server 127.0.3.013 127.0.3.13:8080 check weight 10 inter 4195 fall 2 rise 1
+ server 127.0.3.014 127.0.3.14:8080 check weight 10 inter 7093 fall 2 rise 1
+ server 127.0.3.015 127.0.3.15:8080 check weight 10 inter 2608 fall 2 rise 1
+ server 127.0.3.016 127.0.3.16:8080 check weight 10 inter 9036 fall 2 rise 1
+ server 127.0.3.017 127.0.3.17:8080 check weight 10 inter 245 fall 2 rise 1
+ server 127.0.3.018 127.0.3.18:8080 check weight 10 inter 5780 fall 2 rise 1
+ server 127.0.3.019 127.0.3.19:8080 check weight 10 inter 4209 fall 2 rise 1
+ server 127.0.3.020 127.0.3.20:8080 check weight 10 inter 3335 fall 2 rise 1
+ server 127.0.3.021 127.0.3.21:8080 check weight 10 inter 2907 fall 2 rise 1
+ server 127.0.3.022 127.0.3.22:8080 check weight 10 inter 1067 fall 2 rise 1
+ server 127.0.3.023 127.0.3.23:8080 check weight 10 inter 5627 fall 2 rise 1
+ server 127.0.3.024 127.0.3.24:8080 check weight 10 inter 2651 fall 2 rise 1
+ server 127.0.3.025 127.0.3.25:8080 check weight 10 inter 3663 fall 2 rise 1
+ server 127.0.3.026 127.0.3.26:8080 check weight 10 inter 2418 fall 2 rise 1
+ server 127.0.3.027 127.0.3.27:8080 check weight 10 inter 4717 fall 2 rise 1
+ server 127.0.3.028 127.0.3.28:8080 check weight 10 inter 9590 fall 2 rise 1
+ server 127.0.3.029 127.0.3.29:8080 check weight 10 inter 2742 fall 2 rise 1
+ server 127.0.3.030 127.0.3.30:8080 check weight 10 inter 5427 fall 2 rise 1
+ server 127.0.3.031 127.0.3.31:8080 check weight 10 inter 4045 fall 2 rise 1
+ server 127.0.3.032 127.0.3.32:8080 check weight 10 inter 7585 fall 2 rise 1
+ server 127.0.3.033 127.0.3.33:8080 check weight 10 inter 8973 fall 2 rise 1
+ server 127.0.3.034 127.0.3.34:8080 check weight 10 inter 9610 fall 2 rise 1
+ server 127.0.3.035 127.0.3.35:8080 check weight 10 inter 4711 fall 2 rise 1
+ server 127.0.3.036 127.0.3.36:8080 check weight 10 inter 1449 fall 2 rise 1
+ server 127.0.3.037 127.0.3.37:8080 check weight 10 inter 5314 fall 2 rise 1
+ server 127.0.3.038 127.0.3.38:8080 check weight 10 inter 9424 fall 2 rise 1
+ server 127.0.3.039 127.0.3.39:8080 check weight 10 inter 8439 fall 2 rise 1
+ server 127.0.3.040 127.0.3.40:8080 check weight 10 inter 2479 fall 2 rise 1
+ server 127.0.3.041 127.0.3.41:8080 check weight 10 inter 2003 fall 2 rise 1
+ server 127.0.3.042 127.0.3.42:8080 check weight 10 inter 8114 fall 2 rise 1
+ server 127.0.3.043 127.0.3.43:8080 check weight 10 inter 2312 fall 2 rise 1
+ server 127.0.3.044 127.0.3.44:8080 check weight 10 inter 9950 fall 2 rise 1
+ server 127.0.3.045 127.0.3.45:8080 check weight 10 inter 7511 fall 2 rise 1
+ server 127.0.3.046 127.0.3.46:8080 check weight 10 inter 5299 fall 2 rise 1
+ server 127.0.3.047 127.0.3.47:8080 check weight 10 inter 3226 fall 2 rise 1
+ server 127.0.3.048 127.0.3.48:8080 check weight 10 inter 4047 fall 2 rise 1
+ server 127.0.3.049 127.0.3.49:8080 check weight 10 inter 8349 fall 2 rise 1
+ server 127.0.3.050 127.0.3.50:8080 check weight 10 inter 4486 fall 2 rise 1
+ server 127.0.3.051 127.0.3.51:8080 check weight 10 inter 918 fall 2 rise 1
+ server 127.0.3.052 127.0.3.52:8080 check weight 10 inter 7598 fall 2 rise 1
+ server 127.0.3.053 127.0.3.53:8080 check weight 10 inter 7288 fall 2 rise 1
+ server 127.0.3.054 127.0.3.54:8080 check weight 10 inter 9274 fall 2 rise 1
+ server 127.0.3.055 127.0.3.55:8080 check weight 10 inter 7073 fall 2 rise 1
+ server 127.0.3.056 127.0.3.56:8080 check weight 10 inter 8266 fall 2 rise 1
+ server 127.0.3.057 127.0.3.57:8080 check weight 10 inter 7015 fall 2 rise 1
+ server 127.0.3.058 127.0.3.58:8080 check weight 10 inter 4275 fall 2 rise 1
+ server 127.0.3.059 127.0.3.59:8080 check weight 10 inter 4465 fall 2 rise 1
+ server 127.0.3.060 127.0.3.60:8080 check weight 10 inter 6431 fall 2 rise 1
+ server 127.0.3.061 127.0.3.61:8080 check weight 10 inter 2374 fall 2 rise 1
+ server 127.0.3.062 127.0.3.62:8080 check weight 10 inter 2460 fall 2 rise 1
+ server 127.0.3.063 127.0.3.63:8080 check weight 10 inter 5779 fall 2 rise 1
+ server 127.0.3.064 127.0.3.64:8080 check weight 10 inter 5316 fall 2 rise 1
+ server 127.0.3.065 127.0.3.65:8080 check weight 10 inter 8023 fall 2 rise 1
+ server 127.0.3.066 127.0.3.66:8080 check weight 10 inter 9407 fall 2 rise 1
+ server 127.0.3.067 127.0.3.67:8080 check weight 10 inter 3913 fall 2 rise 1
+ server 127.0.3.068 127.0.3.68:8080 check weight 10 inter 3684 fall 2 rise 1
+ server 127.0.3.069 127.0.3.69:8080 check weight 10 inter 5726 fall 2 rise 1
+ server 127.0.3.070 127.0.3.70:8080 check weight 10 inter 4288 fall 2 rise 1
+ server 127.0.3.071 127.0.3.71:8080 check weight 10 inter 8964 fall 2 rise 1
+ server 127.0.3.072 127.0.3.72:8080 check weight 10 inter 4269 fall 2 rise 1
+ server 127.0.3.073 127.0.3.73:8080 check weight 10 inter 623 fall 2 rise 1
+ server 127.0.3.074 127.0.3.74:8080 check weight 10 inter 1037 fall 2 rise 1
+ server 127.0.3.075 127.0.3.75:8080 check weight 10 inter 3685 fall 2 rise 1
+ server 127.0.3.076 127.0.3.76:8080 check weight 10 inter 2729 fall 2 rise 1
+ server 127.0.3.077 127.0.3.77:8080 check weight 10 inter 3720 fall 2 rise 1
+ server 127.0.3.078 127.0.3.78:8080 check weight 10 inter 134 fall 2 rise 1
+ server 127.0.3.079 127.0.3.79:8080 check weight 10 inter 290 fall 2 rise 1
+ server 127.0.3.080 127.0.3.80:8080 check weight 10 inter 6448 fall 2 rise 1
+ server 127.0.3.081 127.0.3.81:8080 check weight 10 inter 8878 fall 2 rise 1
+ server 127.0.3.082 127.0.3.82:8080 check weight 10 inter 8123 fall 2 rise 1
+ server 127.0.3.083 127.0.3.83:8080 check weight 10 inter 1659 fall 2 rise 1
+ server 127.0.3.084 127.0.3.84:8080 check weight 10 inter 5018 fall 2 rise 1
+ server 127.0.3.085 127.0.3.85:8080 check weight 10 inter 7881 fall 2 rise 1
+ server 127.0.3.086 127.0.3.86:8080 check weight 10 inter 5482 fall 2 rise 1
+ server 127.0.3.087 127.0.3.87:8080 check weight 10 inter 1498 fall 2 rise 1
+ server 127.0.3.088 127.0.3.88:8080 check weight 10 inter 853 fall 2 rise 1
+ server 127.0.3.089 127.0.3.89:8080 check weight 10 inter 8286 fall 2 rise 1
+ server 127.0.3.090 127.0.3.90:8080 check weight 10 inter 9317 fall 2 rise 1
+ server 127.0.3.091 127.0.3.91:8080 check weight 10 inter 4652 fall 2 rise 1
+ server 127.0.3.092 127.0.3.92:8080 check weight 10 inter 3899 fall 2 rise 1
+ server 127.0.3.093 127.0.3.93:8080 check weight 10 inter 1040 fall 2 rise 1
+ server 127.0.3.094 127.0.3.94:8080 check weight 10 inter 1526 fall 2 rise 1
+ server 127.0.3.095 127.0.3.95:8080 check weight 10 inter 595 fall 2 rise 1
+ server 127.0.3.096 127.0.3.96:8080 check weight 10 inter 2096 fall 2 rise 1
+ server 127.0.3.097 127.0.3.97:8080 check weight 10 inter 406 fall 2 rise 1
+ server 127.0.3.098 127.0.3.98:8080 check weight 10 inter 2274 fall 2 rise 1
+ server 127.0.3.099 127.0.3.99:8080 check weight 10 inter 8519 fall 2 rise 1
+ server 127.0.3.100 127.0.3.100:8080 check weight 10 inter 63 fall 2 rise 1
+ server 127.0.3.101 127.0.3.101:8080 check weight 10 inter 4714 fall 2 rise 1
+ server 127.0.3.102 127.0.3.102:8080 check weight 10 inter 8814 fall 2 rise 1
+ server 127.0.3.103 127.0.3.103:8080 check weight 10 inter 1536 fall 2 rise 1
+ server 127.0.3.104 127.0.3.104:8080 check weight 10 inter 3170 fall 2 rise 1
+ server 127.0.3.105 127.0.3.105:8080 check weight 10 inter 1287 fall 2 rise 1
+ server 127.0.3.106 127.0.3.106:8080 check weight 10 inter 9493 fall 2 rise 1
+ server 127.0.3.107 127.0.3.107:8080 check weight 10 inter 5443 fall 2 rise 1
+ server 127.0.3.108 127.0.3.108:8080 check weight 10 inter 8867 fall 2 rise 1
+ server 127.0.3.109 127.0.3.109:8080 check weight 10 inter 7072 fall 2 rise 1
+ server 127.0.3.110 127.0.3.110:8080 check weight 10 inter 6356 fall 2 rise 1
+ server 127.0.3.111 127.0.3.111:8080 check weight 10 inter 9760 fall 2 rise 1
+ server 127.0.3.112 127.0.3.112:8080 check weight 10 inter 6886 fall 2 rise 1
+ server 127.0.3.113 127.0.3.113:8080 check weight 10 inter 2368 fall 2 rise 1
+ server 127.0.3.114 127.0.3.114:8080 check weight 10 inter 7291 fall 2 rise 1
+ server 127.0.3.115 127.0.3.115:8080 check weight 10 inter 3080 fall 2 rise 1
+ server 127.0.3.116 127.0.3.116:8080 check weight 10 inter 2498 fall 2 rise 1
+ server 127.0.3.117 127.0.3.117:8080 check weight 10 inter 4885 fall 2 rise 1
+ server 127.0.3.118 127.0.3.118:8080 check weight 10 inter 468 fall 2 rise 1
+ server 127.0.3.119 127.0.3.119:8080 check weight 10 inter 8767 fall 2 rise 1
+ server 127.0.3.120 127.0.3.120:8080 check weight 10 inter 1523 fall 2 rise 1
+ server 127.0.3.121 127.0.3.121:8080 check weight 10 inter 9020 fall 2 rise 1
+ server 127.0.3.122 127.0.3.122:8080 check weight 10 inter 2833 fall 2 rise 1
+ server 127.0.3.123 127.0.3.123:8080 check weight 10 inter 885 fall 2 rise 1
+ server 127.0.3.124 127.0.3.124:8080 check weight 10 inter 2497 fall 2 rise 1
+ server 127.0.3.125 127.0.3.125:8080 check weight 10 inter 5003 fall 2 rise 1
+ server 127.0.3.126 127.0.3.126:8080 check weight 10 inter 5063 fall 2 rise 1
+ server 127.0.3.127 127.0.3.127:8080 check weight 10 inter 5140 fall 2 rise 1
+ server 127.0.3.128 127.0.3.128:8080 check weight 10 inter 1310 fall 2 rise 1
+ server 127.0.3.129 127.0.3.129:8080 check weight 10 inter 481 fall 2 rise 1
+ server 127.0.3.130 127.0.3.130:8080 check weight 10 inter 1069 fall 2 rise 1
+ server 127.0.3.131 127.0.3.131:8080 check weight 10 inter 2137 fall 2 rise 1
+ server 127.0.3.132 127.0.3.132:8080 check weight 10 inter 6498 fall 2 rise 1
+ server 127.0.3.133 127.0.3.133:8080 check weight 10 inter 8110 fall 2 rise 1
+ server 127.0.3.134 127.0.3.134:8080 check weight 10 inter 7842 fall 2 rise 1
+ server 127.0.3.135 127.0.3.135:8080 check weight 10 inter 5858 fall 2 rise 1
+ server 127.0.3.136 127.0.3.136:8080 check weight 10 inter 8343 fall 2 rise 1
+ server 127.0.3.137 127.0.3.137:8080 check weight 10 inter 2977 fall 2 rise 1
+ server 127.0.3.138 127.0.3.138:8080 check weight 10 inter 4641 fall 2 rise 1
+ server 127.0.3.139 127.0.3.139:8080 check weight 10 inter 6540 fall 2 rise 1
+ server 127.0.3.140 127.0.3.140:8080 check weight 10 inter 9796 fall 2 rise 1
+ server 127.0.3.141 127.0.3.141:8080 check weight 10 inter 3077 fall 2 rise 1
+ server 127.0.3.142 127.0.3.142:8080 check weight 10 inter 5782 fall 2 rise 1
+ server 127.0.3.143 127.0.3.143:8080 check weight 10 inter 7898 fall 2 rise 1
+ server 127.0.3.144 127.0.3.144:8080 check weight 10 inter 3177 fall 2 rise 1
+ server 127.0.3.145 127.0.3.145:8080 check weight 10 inter 7003 fall 2 rise 1
+ server 127.0.3.146 127.0.3.146:8080 check weight 10 inter 7102 fall 2 rise 1
+ server 127.0.3.147 127.0.3.147:8080 check weight 10 inter 1285 fall 2 rise 1
+ server 127.0.3.148 127.0.3.148:8080 check weight 10 inter 495 fall 2 rise 1
+ server 127.0.3.149 127.0.3.149:8080 check weight 10 inter 5302 fall 2 rise 1
+ server 127.0.3.150 127.0.3.150:8080 check weight 10 inter 3136 fall 2 rise 1
+ server 127.0.3.151 127.0.3.151:8080 check weight 10 inter 4582 fall 2 rise 1
+ server 127.0.3.152 127.0.3.152:8080 check weight 10 inter 5789 fall 2 rise 1
+ server 127.0.3.153 127.0.3.153:8080 check weight 10 inter 2872 fall 2 rise 1
+ server 127.0.3.154 127.0.3.154:8080 check weight 10 inter 7389 fall 2 rise 1
+ server 127.0.3.155 127.0.3.155:8080 check weight 10 inter 996 fall 2 rise 1
+ server 127.0.3.156 127.0.3.156:8080 check weight 10 inter 6282 fall 2 rise 1
+ server 127.0.3.157 127.0.3.157:8080 check weight 10 inter 9024 fall 2 rise 1
+ server 127.0.3.158 127.0.3.158:8080 check weight 10 inter 8104 fall 2 rise 1
+ server 127.0.3.159 127.0.3.159:8080 check weight 10 inter 8062 fall 2 rise 1
+ server 127.0.3.160 127.0.3.160:8080 check weight 10 inter 7974 fall 2 rise 1
+ server 127.0.3.161 127.0.3.161:8080 check weight 10 inter 7489 fall 2 rise 1
+ server 127.0.3.162 127.0.3.162:8080 check weight 10 inter 390 fall 2 rise 1
+ server 127.0.3.163 127.0.3.163:8080 check weight 10 inter 599 fall 2 rise 1
+ server 127.0.3.164 127.0.3.164:8080 check weight 10 inter 9686 fall 2 rise 1
+ server 127.0.3.165 127.0.3.165:8080 check weight 10 inter 3009 fall 2 rise 1
+ server 127.0.3.166 127.0.3.166:8080 check weight 10 inter 4340 fall 2 rise 1
+ server 127.0.3.167 127.0.3.167:8080 check weight 10 inter 868 fall 2 rise 1
+ server 127.0.3.168 127.0.3.168:8080 check weight 10 inter 1651 fall 2 rise 1
+ server 127.0.3.169 127.0.3.169:8080 check weight 10 inter 2326 fall 2 rise 1
+ server 127.0.3.170 127.0.3.170:8080 check weight 10 inter 2155 fall 2 rise 1
+ server 127.0.3.171 127.0.3.171:8080 check weight 10 inter 5383 fall 2 rise 1
+ server 127.0.3.172 127.0.3.172:8080 check weight 10 inter 2194 fall 2 rise 1
+ server 127.0.3.173 127.0.3.173:8080 check weight 10 inter 5744 fall 2 rise 1
+ server 127.0.3.174 127.0.3.174:8080 check weight 10 inter 1125 fall 2 rise 1
+ server 127.0.3.175 127.0.3.175:8080 check weight 10 inter 1274 fall 2 rise 1
+ server 127.0.3.176 127.0.3.176:8080 check weight 10 inter 306 fall 2 rise 1
+ server 127.0.3.177 127.0.3.177:8080 check weight 10 inter 3078 fall 2 rise 1
+ server 127.0.3.178 127.0.3.178:8080 check weight 10 inter 6108 fall 2 rise 1
+ server 127.0.3.179 127.0.3.179:8080 check weight 10 inter 9066 fall 2 rise 1
+ server 127.0.3.180 127.0.3.180:8080 check weight 10 inter 1944 fall 2 rise 1
+ server 127.0.3.181 127.0.3.181:8080 check weight 10 inter 9090 fall 2 rise 1
+ server 127.0.3.182 127.0.3.182:8080 check weight 10 inter 7750 fall 2 rise 1
+ server 127.0.3.183 127.0.3.183:8080 check weight 10 inter 7721 fall 2 rise 1
+ server 127.0.3.184 127.0.3.184:8080 check weight 10 inter 6330 fall 2 rise 1
+ server 127.0.3.185 127.0.3.185:8080 check weight 10 inter 6684 fall 2 rise 1
+ server 127.0.3.186 127.0.3.186:8080 check weight 10 inter 1762 fall 2 rise 1
+ server 127.0.3.187 127.0.3.187:8080 check weight 10 inter 1201 fall 2 rise 1
+ server 127.0.3.188 127.0.3.188:8080 check weight 10 inter 7325 fall 2 rise 1
+ server 127.0.3.189 127.0.3.189:8080 check weight 10 inter 9127 fall 2 rise 1
+ server 127.0.3.190 127.0.3.190:8080 check weight 10 inter 5506 fall 2 rise 1
+ server 127.0.3.191 127.0.3.191:8080 check weight 10 inter 6617 fall 2 rise 1
+ server 127.0.3.192 127.0.3.192:8080 check weight 10 inter 7038 fall 2 rise 1
+ server 127.0.3.193 127.0.3.193:8080 check weight 10 inter 2781 fall 2 rise 1
+ server 127.0.3.194 127.0.3.194:8080 check weight 10 inter 8278 fall 2 rise 1
+ server 127.0.3.195 127.0.3.195:8080 check weight 10 inter 6425 fall 2 rise 1
+ server 127.0.3.196 127.0.3.196:8080 check weight 10 inter 2643 fall 2 rise 1
+ server 127.0.3.197 127.0.3.197:8080 check weight 10 inter 540 fall 2 rise 1
+ server 127.0.3.198 127.0.3.198:8080 check weight 10 inter 1100 fall 2 rise 1
+ server 127.0.3.199 127.0.3.199:8080 check weight 10 inter 6878 fall 2 rise 1
+ server 127.0.3.200 127.0.3.200:8080 check weight 10 inter 9359 fall 2 rise 1
+ server 127.0.3.201 127.0.3.201:8080 check weight 10 inter 5309 fall 2 rise 1
+ server 127.0.3.202 127.0.3.202:8080 check weight 10 inter 6682 fall 2 rise 1
+ server 127.0.3.203 127.0.3.203:8080 check weight 10 inter 4301 fall 2 rise 1
+ server 127.0.3.204 127.0.3.204:8080 check weight 10 inter 3528 fall 2 rise 1
+ server 127.0.3.205 127.0.3.205:8080 check weight 10 inter 7576 fall 2 rise 1
+ server 127.0.3.206 127.0.3.206:8080 check weight 10 inter 7687 fall 2 rise 1
+ server 127.0.3.207 127.0.3.207:8080 check weight 10 inter 7830 fall 2 rise 1
+ server 127.0.3.208 127.0.3.208:8080 check weight 10 inter 425 fall 2 rise 1
+ server 127.0.3.209 127.0.3.209:8080 check weight 10 inter 3002 fall 2 rise 1
+ server 127.0.3.210 127.0.3.210:8080 check weight 10 inter 4604 fall 2 rise 1
+ server 127.0.3.211 127.0.3.211:8080 check weight 10 inter 5904 fall 2 rise 1
+ server 127.0.3.212 127.0.3.212:8080 check weight 10 inter 4405 fall 2 rise 1
+ server 127.0.3.213 127.0.3.213:8080 check weight 10 inter 3009 fall 2 rise 1
+ server 127.0.3.214 127.0.3.214:8080 check weight 10 inter 1166 fall 2 rise 1
+ server 127.0.3.215 127.0.3.215:8080 check weight 10 inter 7262 fall 2 rise 1
+ server 127.0.3.216 127.0.3.216:8080 check weight 10 inter 226 fall 2 rise 1
+ server 127.0.3.217 127.0.3.217:8080 check weight 10 inter 1101 fall 2 rise 1
+ server 127.0.3.218 127.0.3.218:8080 check weight 10 inter 3704 fall 2 rise 1
+ server 127.0.3.219 127.0.3.219:8080 check weight 10 inter 2709 fall 2 rise 1
+ server 127.0.3.220 127.0.3.220:8080 check weight 10 inter 1250 fall 2 rise 1
+ server 127.0.3.221 127.0.3.221:8080 check weight 10 inter 7063 fall 2 rise 1
+ server 127.0.3.222 127.0.3.222:8080 check weight 10 inter 251 fall 2 rise 1
+ server 127.0.3.223 127.0.3.223:8080 check weight 10 inter 1133 fall 2 rise 1
+ server 127.0.3.224 127.0.3.224:8080 check weight 10 inter 2320 fall 2 rise 1
+ server 127.0.3.225 127.0.3.225:8080 check weight 10 inter 5358 fall 2 rise 1
+ server 127.0.3.226 127.0.3.226:8080 check weight 10 inter 1749 fall 2 rise 1
+ server 127.0.3.227 127.0.3.227:8080 check weight 10 inter 8824 fall 2 rise 1
+ server 127.0.3.228 127.0.3.228:8080 check weight 10 inter 9167 fall 2 rise 1
+ server 127.0.3.229 127.0.3.229:8080 check weight 10 inter 326 fall 2 rise 1
+ server 127.0.3.230 127.0.3.230:8080 check weight 10 inter 6274 fall 2 rise 1
+ server 127.0.3.231 127.0.3.231:8080 check weight 10 inter 7270 fall 2 rise 1
+ server 127.0.3.232 127.0.3.232:8080 check weight 10 inter 2131 fall 2 rise 1
+ server 127.0.3.233 127.0.3.233:8080 check weight 10 inter 5551 fall 2 rise 1
+ server 127.0.3.234 127.0.3.234:8080 check weight 10 inter 1845 fall 2 rise 1
+ server 127.0.3.235 127.0.3.235:8080 check weight 10 inter 612 fall 2 rise 1
+ server 127.0.3.236 127.0.3.236:8080 check weight 10 inter 4811 fall 2 rise 1
+ server 127.0.3.237 127.0.3.237:8080 check weight 10 inter 5110 fall 2 rise 1
+ server 127.0.3.238 127.0.3.238:8080 check weight 10 inter 3365 fall 2 rise 1
+ server 127.0.3.239 127.0.3.239:8080 check weight 10 inter 280 fall 2 rise 1
+ server 127.0.3.240 127.0.3.240:8080 check weight 10 inter 85 fall 2 rise 1
+ server 127.0.3.241 127.0.3.241:8080 check weight 10 inter 5134 fall 2 rise 1
+ server 127.0.3.242 127.0.3.242:8080 check weight 10 inter 9096 fall 2 rise 1
+ server 127.0.3.243 127.0.3.243:8080 check weight 10 inter 3502 fall 2 rise 1
+ server 127.0.3.244 127.0.3.244:8080 check weight 10 inter 5151 fall 2 rise 1
+ server 127.0.3.245 127.0.3.245:8080 check weight 10 inter 6781 fall 2 rise 1
+ server 127.0.3.246 127.0.3.246:8080 check weight 10 inter 5600 fall 2 rise 1
+ server 127.0.3.247 127.0.3.247:8080 check weight 10 inter 5359 fall 2 rise 1
+ server 127.0.3.248 127.0.3.248:8080 check weight 10 inter 4494 fall 2 rise 1
+ server 127.0.3.249 127.0.3.249:8080 check weight 10 inter 2726 fall 2 rise 1
+ server 127.0.3.250 127.0.3.250:8080 check weight 10 inter 2711 fall 2 rise 1
+ server 127.0.4.001 127.0.4.1:8080 check weight 10 inter 6182 fall 2 rise 1
+ server 127.0.4.002 127.0.4.2:8080 check weight 10 inter 4466 fall 2 rise 1
+ server 127.0.4.003 127.0.4.3:8080 check weight 10 inter 1385 fall 2 rise 1
+ server 127.0.4.004 127.0.4.4:8080 check weight 10 inter 684 fall 2 rise 1
+ server 127.0.4.005 127.0.4.5:8080 check weight 10 inter 7779 fall 2 rise 1
+ server 127.0.4.006 127.0.4.6:8080 check weight 10 inter 5876 fall 2 rise 1
+ server 127.0.4.007 127.0.4.7:8080 check weight 10 inter 1868 fall 2 rise 1
+ server 127.0.4.008 127.0.4.8:8080 check weight 10 inter 9563 fall 2 rise 1
+ server 127.0.4.009 127.0.4.9:8080 check weight 10 inter 6459 fall 2 rise 1
+ server 127.0.4.010 127.0.4.10:8080 check weight 10 inter 5541 fall 2 rise 1
+ server 127.0.4.011 127.0.4.11:8080 check weight 10 inter 7720 fall 2 rise 1
+ server 127.0.4.012 127.0.4.12:8080 check weight 10 inter 6142 fall 2 rise 1
+ server 127.0.4.013 127.0.4.13:8080 check weight 10 inter 5734 fall 2 rise 1
+ server 127.0.4.014 127.0.4.14:8080 check weight 10 inter 565 fall 2 rise 1
+ server 127.0.4.015 127.0.4.15:8080 check weight 10 inter 4324 fall 2 rise 1
+ server 127.0.4.016 127.0.4.16:8080 check weight 10 inter 4710 fall 2 rise 1
+ server 127.0.4.017 127.0.4.17:8080 check weight 10 inter 5558 fall 2 rise 1
+ server 127.0.4.018 127.0.4.18:8080 check weight 10 inter 613 fall 2 rise 1
+ server 127.0.4.019 127.0.4.19:8080 check weight 10 inter 6146 fall 2 rise 1
+ server 127.0.4.020 127.0.4.20:8080 check weight 10 inter 4183 fall 2 rise 1
+ server 127.0.4.021 127.0.4.21:8080 check weight 10 inter 9943 fall 2 rise 1
+ server 127.0.4.022 127.0.4.22:8080 check weight 10 inter 238 fall 2 rise 1
+ server 127.0.4.023 127.0.4.23:8080 check weight 10 inter 1644 fall 2 rise 1
+ server 127.0.4.024 127.0.4.24:8080 check weight 10 inter 598 fall 2 rise 1
+ server 127.0.4.025 127.0.4.25:8080 check weight 10 inter 7484 fall 2 rise 1
+ server 127.0.4.026 127.0.4.26:8080 check weight 10 inter 3310 fall 2 rise 1
+ server 127.0.4.027 127.0.4.27:8080 check weight 10 inter 9692 fall 2 rise 1
+ server 127.0.4.028 127.0.4.28:8080 check weight 10 inter 5860 fall 2 rise 1
+ server 127.0.4.029 127.0.4.29:8080 check weight 10 inter 7236 fall 2 rise 1
+ server 127.0.4.030 127.0.4.30:8080 check weight 10 inter 550 fall 2 rise 1
+ server 127.0.4.031 127.0.4.31:8080 check weight 10 inter 4733 fall 2 rise 1
+ server 127.0.4.032 127.0.4.32:8080 check weight 10 inter 1719 fall 2 rise 1
+ server 127.0.4.033 127.0.4.33:8080 check weight 10 inter 3438 fall 2 rise 1
+ server 127.0.4.034 127.0.4.34:8080 check weight 10 inter 4164 fall 2 rise 1
+ server 127.0.4.035 127.0.4.35:8080 check weight 10 inter 6373 fall 2 rise 1
+ server 127.0.4.036 127.0.4.36:8080 check weight 10 inter 2205 fall 2 rise 1
+ server 127.0.4.037 127.0.4.37:8080 check weight 10 inter 4721 fall 2 rise 1
+ server 127.0.4.038 127.0.4.38:8080 check weight 10 inter 8625 fall 2 rise 1
+ server 127.0.4.039 127.0.4.39:8080 check weight 10 inter 1071 fall 2 rise 1
+ server 127.0.4.040 127.0.4.40:8080 check weight 10 inter 9456 fall 2 rise 1
+ server 127.0.4.041 127.0.4.41:8080 check weight 10 inter 4682 fall 2 rise 1
+ server 127.0.4.042 127.0.4.42:8080 check weight 10 inter 2666 fall 2 rise 1
+ server 127.0.4.043 127.0.4.43:8080 check weight 10 inter 2106 fall 2 rise 1
+ server 127.0.4.044 127.0.4.44:8080 check weight 10 inter 6808 fall 2 rise 1
+ server 127.0.4.045 127.0.4.45:8080 check weight 10 inter 9028 fall 2 rise 1
+ server 127.0.4.046 127.0.4.46:8080 check weight 10 inter 6895 fall 2 rise 1
+ server 127.0.4.047 127.0.4.47:8080 check weight 10 inter 7780 fall 2 rise 1
+ server 127.0.4.048 127.0.4.48:8080 check weight 10 inter 2387 fall 2 rise 1
+ server 127.0.4.049 127.0.4.49:8080 check weight 10 inter 4298 fall 2 rise 1
+ server 127.0.4.050 127.0.4.50:8080 check weight 10 inter 3518 fall 2 rise 1
+ server 127.0.4.051 127.0.4.51:8080 check weight 10 inter 4212 fall 2 rise 1
+ server 127.0.4.052 127.0.4.52:8080 check weight 10 inter 3221 fall 2 rise 1
+ server 127.0.4.053 127.0.4.53:8080 check weight 10 inter 5676 fall 2 rise 1
+ server 127.0.4.054 127.0.4.54:8080 check weight 10 inter 77 fall 2 rise 1
+ server 127.0.4.055 127.0.4.55:8080 check weight 10 inter 5114 fall 2 rise 1
+ server 127.0.4.056 127.0.4.56:8080 check weight 10 inter 800 fall 2 rise 1
+ server 127.0.4.057 127.0.4.57:8080 check weight 10 inter 2930 fall 2 rise 1
+ server 127.0.4.058 127.0.4.58:8080 check weight 10 inter 489 fall 2 rise 1
+ server 127.0.4.059 127.0.4.59:8080 check weight 10 inter 1222 fall 2 rise 1
+ server 127.0.4.060 127.0.4.60:8080 check weight 10 inter 9345 fall 2 rise 1
+ server 127.0.4.061 127.0.4.61:8080 check weight 10 inter 1187 fall 2 rise 1
+ server 127.0.4.062 127.0.4.62:8080 check weight 10 inter 9745 fall 2 rise 1
+ server 127.0.4.063 127.0.4.63:8080 check weight 10 inter 3483 fall 2 rise 1
+ server 127.0.4.064 127.0.4.64:8080 check weight 10 inter 5408 fall 2 rise 1
+ server 127.0.4.065 127.0.4.65:8080 check weight 10 inter 5748 fall 2 rise 1
+ server 127.0.4.066 127.0.4.66:8080 check weight 10 inter 3857 fall 2 rise 1
+ server 127.0.4.067 127.0.4.67:8080 check weight 10 inter 7570 fall 2 rise 1
+ server 127.0.4.068 127.0.4.68:8080 check weight 10 inter 1110 fall 2 rise 1
+ server 127.0.4.069 127.0.4.69:8080 check weight 10 inter 4273 fall 2 rise 1
+ server 127.0.4.070 127.0.4.70:8080 check weight 10 inter 8656 fall 2 rise 1
+ server 127.0.4.071 127.0.4.71:8080 check weight 10 inter 6606 fall 2 rise 1
+ server 127.0.4.072 127.0.4.72:8080 check weight 10 inter 5372 fall 2 rise 1
+ server 127.0.4.073 127.0.4.73:8080 check weight 10 inter 5533 fall 2 rise 1
+ server 127.0.4.074 127.0.4.74:8080 check weight 10 inter 6696 fall 2 rise 1
+ server 127.0.4.075 127.0.4.75:8080 check weight 10 inter 9668 fall 2 rise 1
+ server 127.0.4.076 127.0.4.76:8080 check weight 10 inter 9736 fall 2 rise 1
+ server 127.0.4.077 127.0.4.77:8080 check weight 10 inter 4218 fall 2 rise 1
+ server 127.0.4.078 127.0.4.78:8080 check weight 10 inter 3289 fall 2 rise 1
+ server 127.0.4.079 127.0.4.79:8080 check weight 10 inter 1414 fall 2 rise 1
+ server 127.0.4.080 127.0.4.80:8080 check weight 10 inter 7126 fall 2 rise 1
+ server 127.0.4.081 127.0.4.81:8080 check weight 10 inter 6984 fall 2 rise 1
+ server 127.0.4.082 127.0.4.82:8080 check weight 10 inter 9661 fall 2 rise 1
+ server 127.0.4.083 127.0.4.83:8080 check weight 10 inter 4099 fall 2 rise 1
+ server 127.0.4.084 127.0.4.84:8080 check weight 10 inter 4455 fall 2 rise 1
+ server 127.0.4.085 127.0.4.85:8080 check weight 10 inter 2173 fall 2 rise 1
+ server 127.0.4.086 127.0.4.86:8080 check weight 10 inter 9353 fall 2 rise 1
+ server 127.0.4.087 127.0.4.87:8080 check weight 10 inter 3008 fall 2 rise 1
+ server 127.0.4.088 127.0.4.88:8080 check weight 10 inter 8668 fall 2 rise 1
+ server 127.0.4.089 127.0.4.89:8080 check weight 10 inter 4376 fall 2 rise 1
+ server 127.0.4.090 127.0.4.90:8080 check weight 10 inter 7384 fall 2 rise 1
+ server 127.0.4.091 127.0.4.91:8080 check weight 10 inter 421 fall 2 rise 1
+ server 127.0.4.092 127.0.4.92:8080 check weight 10 inter 1585 fall 2 rise 1
+ server 127.0.4.093 127.0.4.93:8080 check weight 10 inter 8582 fall 2 rise 1
+ server 127.0.4.094 127.0.4.94:8080 check weight 10 inter 5063 fall 2 rise 1
+ server 127.0.4.095 127.0.4.95:8080 check weight 10 inter 1912 fall 2 rise 1
+ server 127.0.4.096 127.0.4.96:8080 check weight 10 inter 202 fall 2 rise 1
+ server 127.0.4.097 127.0.4.97:8080 check weight 10 inter 9376 fall 2 rise 1
+ server 127.0.4.098 127.0.4.98:8080 check weight 10 inter 9536 fall 2 rise 1
+ server 127.0.4.099 127.0.4.99:8080 check weight 10 inter 1785 fall 2 rise 1
+ server 127.0.4.100 127.0.4.100:8080 check weight 10 inter 6287 fall 2 rise 1
+ server 127.0.4.101 127.0.4.101:8080 check weight 10 inter 6283 fall 2 rise 1
+ server 127.0.4.102 127.0.4.102:8080 check weight 10 inter 1223 fall 2 rise 1
+ server 127.0.4.103 127.0.4.103:8080 check weight 10 inter 9762 fall 2 rise 1
+ server 127.0.4.104 127.0.4.104:8080 check weight 10 inter 8535 fall 2 rise 1
+ server 127.0.4.105 127.0.4.105:8080 check weight 10 inter 3277 fall 2 rise 1
+ server 127.0.4.106 127.0.4.106:8080 check weight 10 inter 1799 fall 2 rise 1
+ server 127.0.4.107 127.0.4.107:8080 check weight 10 inter 1245 fall 2 rise 1
+ server 127.0.4.108 127.0.4.108:8080 check weight 10 inter 8981 fall 2 rise 1
+ server 127.0.4.109 127.0.4.109:8080 check weight 10 inter 6976 fall 2 rise 1
+ server 127.0.4.110 127.0.4.110:8080 check weight 10 inter 1850 fall 2 rise 1
+ server 127.0.4.111 127.0.4.111:8080 check weight 10 inter 3907 fall 2 rise 1
+ server 127.0.4.112 127.0.4.112:8080 check weight 10 inter 1221 fall 2 rise 1
+ server 127.0.4.113 127.0.4.113:8080 check weight 10 inter 7688 fall 2 rise 1
+ server 127.0.4.114 127.0.4.114:8080 check weight 10 inter 3194 fall 2 rise 1
+ server 127.0.4.115 127.0.4.115:8080 check weight 10 inter 4632 fall 2 rise 1
+ server 127.0.4.116 127.0.4.116:8080 check weight 10 inter 1685 fall 2 rise 1
+ server 127.0.4.117 127.0.4.117:8080 check weight 10 inter 9972 fall 2 rise 1
+ server 127.0.4.118 127.0.4.118:8080 check weight 10 inter 3290 fall 2 rise 1
+ server 127.0.4.119 127.0.4.119:8080 check weight 10 inter 3208 fall 2 rise 1
+ server 127.0.4.120 127.0.4.120:8080 check weight 10 inter 9379 fall 2 rise 1
+ server 127.0.4.121 127.0.4.121:8080 check weight 10 inter 7511 fall 2 rise 1
+ server 127.0.4.122 127.0.4.122:8080 check weight 10 inter 2708 fall 2 rise 1
+ server 127.0.4.123 127.0.4.123:8080 check weight 10 inter 2063 fall 2 rise 1
+ server 127.0.4.124 127.0.4.124:8080 check weight 10 inter 7931 fall 2 rise 1
+ server 127.0.4.125 127.0.4.125:8080 check weight 10 inter 6627 fall 2 rise 1
+ server 127.0.4.126 127.0.4.126:8080 check weight 10 inter 9425 fall 2 rise 1
+ server 127.0.4.127 127.0.4.127:8080 check weight 10 inter 6139 fall 2 rise 1
+ server 127.0.4.128 127.0.4.128:8080 check weight 10 inter 5965 fall 2 rise 1
+ server 127.0.4.129 127.0.4.129:8080 check weight 10 inter 6202 fall 2 rise 1
+ server 127.0.4.130 127.0.4.130:8080 check weight 10 inter 9050 fall 2 rise 1
+ server 127.0.4.131 127.0.4.131:8080 check weight 10 inter 4322 fall 2 rise 1
+ server 127.0.4.132 127.0.4.132:8080 check weight 10 inter 1984 fall 2 rise 1
+ server 127.0.4.133 127.0.4.133:8080 check weight 10 inter 3669 fall 2 rise 1
+ server 127.0.4.134 127.0.4.134:8080 check weight 10 inter 8753 fall 2 rise 1
+ server 127.0.4.135 127.0.4.135:8080 check weight 10 inter 6707 fall 2 rise 1
+ server 127.0.4.136 127.0.4.136:8080 check weight 10 inter 5305 fall 2 rise 1
+ server 127.0.4.137 127.0.4.137:8080 check weight 10 inter 3328 fall 2 rise 1
+ server 127.0.4.138 127.0.4.138:8080 check weight 10 inter 1230 fall 2 rise 1
+ server 127.0.4.139 127.0.4.139:8080 check weight 10 inter 5550 fall 2 rise 1
+ server 127.0.4.140 127.0.4.140:8080 check weight 10 inter 6729 fall 2 rise 1
+ server 127.0.4.141 127.0.4.141:8080 check weight 10 inter 3276 fall 2 rise 1
+ server 127.0.4.142 127.0.4.142:8080 check weight 10 inter 6314 fall 2 rise 1
+ server 127.0.4.143 127.0.4.143:8080 check weight 10 inter 3282 fall 2 rise 1
+ server 127.0.4.144 127.0.4.144:8080 check weight 10 inter 5945 fall 2 rise 1
+ server 127.0.4.145 127.0.4.145:8080 check weight 10 inter 7170 fall 2 rise 1
+ server 127.0.4.146 127.0.4.146:8080 check weight 10 inter 126 fall 2 rise 1
+ server 127.0.4.147 127.0.4.147:8080 check weight 10 inter 363 fall 2 rise 1
+ server 127.0.4.148 127.0.4.148:8080 check weight 10 inter 6839 fall 2 rise 1
+ server 127.0.4.149 127.0.4.149:8080 check weight 10 inter 4376 fall 2 rise 1
+ server 127.0.4.150 127.0.4.150:8080 check weight 10 inter 4282 fall 2 rise 1
+ server 127.0.4.151 127.0.4.151:8080 check weight 10 inter 7400 fall 2 rise 1
+ server 127.0.4.152 127.0.4.152:8080 check weight 10 inter 4318 fall 2 rise 1
+ server 127.0.4.153 127.0.4.153:8080 check weight 10 inter 3013 fall 2 rise 1
+ server 127.0.4.154 127.0.4.154:8080 check weight 10 inter 420 fall 2 rise 1
+ server 127.0.4.155 127.0.4.155:8080 check weight 10 inter 9421 fall 2 rise 1
+ server 127.0.4.156 127.0.4.156:8080 check weight 10 inter 1862 fall 2 rise 1
+ server 127.0.4.157 127.0.4.157:8080 check weight 10 inter 7300 fall 2 rise 1
+ server 127.0.4.158 127.0.4.158:8080 check weight 10 inter 7382 fall 2 rise 1
+ server 127.0.4.159 127.0.4.159:8080 check weight 10 inter 4397 fall 2 rise 1
+ server 127.0.4.160 127.0.4.160:8080 check weight 10 inter 9841 fall 2 rise 1
+ server 127.0.4.161 127.0.4.161:8080 check weight 10 inter 5867 fall 2 rise 1
+ server 127.0.4.162 127.0.4.162:8080 check weight 10 inter 2726 fall 2 rise 1
+ server 127.0.4.163 127.0.4.163:8080 check weight 10 inter 1749 fall 2 rise 1
+ server 127.0.4.164 127.0.4.164:8080 check weight 10 inter 9505 fall 2 rise 1
+ server 127.0.4.165 127.0.4.165:8080 check weight 10 inter 2424 fall 2 rise 1
+ server 127.0.4.166 127.0.4.166:8080 check weight 10 inter 836 fall 2 rise 1
+ server 127.0.4.167 127.0.4.167:8080 check weight 10 inter 8488 fall 2 rise 1
+ server 127.0.4.168 127.0.4.168:8080 check weight 10 inter 4857 fall 2 rise 1
+ server 127.0.4.169 127.0.4.169:8080 check weight 10 inter 8864 fall 2 rise 1
+ server 127.0.4.170 127.0.4.170:8080 check weight 10 inter 6261 fall 2 rise 1
+ server 127.0.4.171 127.0.4.171:8080 check weight 10 inter 8719 fall 2 rise 1
+ server 127.0.4.172 127.0.4.172:8080 check weight 10 inter 4986 fall 2 rise 1
+ server 127.0.4.173 127.0.4.173:8080 check weight 10 inter 9399 fall 2 rise 1
+ server 127.0.4.174 127.0.4.174:8080 check weight 10 inter 7586 fall 2 rise 1
+ server 127.0.4.175 127.0.4.175:8080 check weight 10 inter 749 fall 2 rise 1
+ server 127.0.4.176 127.0.4.176:8080 check weight 10 inter 474 fall 2 rise 1
+ server 127.0.4.177 127.0.4.177:8080 check weight 10 inter 5647 fall 2 rise 1
+ server 127.0.4.178 127.0.4.178:8080 check weight 10 inter 3408 fall 2 rise 1
+ server 127.0.4.179 127.0.4.179:8080 check weight 10 inter 2643 fall 2 rise 1
+ server 127.0.4.180 127.0.4.180:8080 check weight 10 inter 8900 fall 2 rise 1
+ server 127.0.4.181 127.0.4.181:8080 check weight 10 inter 2065 fall 2 rise 1
+ server 127.0.4.182 127.0.4.182:8080 check weight 10 inter 1886 fall 2 rise 1
+ server 127.0.4.183 127.0.4.183:8080 check weight 10 inter 3224 fall 2 rise 1
+ server 127.0.4.184 127.0.4.184:8080 check weight 10 inter 6611 fall 2 rise 1
+ server 127.0.4.185 127.0.4.185:8080 check weight 10 inter 8252 fall 2 rise 1
+ server 127.0.4.186 127.0.4.186:8080 check weight 10 inter 1873 fall 2 rise 1
+ server 127.0.4.187 127.0.4.187:8080 check weight 10 inter 9798 fall 2 rise 1
+ server 127.0.4.188 127.0.4.188:8080 check weight 10 inter 9452 fall 2 rise 1
+ server 127.0.4.189 127.0.4.189:8080 check weight 10 inter 3952 fall 2 rise 1
+ server 127.0.4.190 127.0.4.190:8080 check weight 10 inter 8959 fall 2 rise 1
+ server 127.0.4.191 127.0.4.191:8080 check weight 10 inter 2565 fall 2 rise 1
+ server 127.0.4.192 127.0.4.192:8080 check weight 10 inter 2413 fall 2 rise 1
+ server 127.0.4.193 127.0.4.193:8080 check weight 10 inter 4698 fall 2 rise 1
+ server 127.0.4.194 127.0.4.194:8080 check weight 10 inter 2748 fall 2 rise 1
+ server 127.0.4.195 127.0.4.195:8080 check weight 10 inter 4665 fall 2 rise 1
+ server 127.0.4.196 127.0.4.196:8080 check weight 10 inter 1049 fall 2 rise 1
+ server 127.0.4.197 127.0.4.197:8080 check weight 10 inter 4381 fall 2 rise 1
+ server 127.0.4.198 127.0.4.198:8080 check weight 10 inter 1464 fall 2 rise 1
+ server 127.0.4.199 127.0.4.199:8080 check weight 10 inter 1034 fall 2 rise 1
+ server 127.0.4.200 127.0.4.200:8080 check weight 10 inter 9809 fall 2 rise 1
+ server 127.0.4.201 127.0.4.201:8080 check weight 10 inter 8676 fall 2 rise 1
+ server 127.0.4.202 127.0.4.202:8080 check weight 10 inter 6181 fall 2 rise 1
+ server 127.0.4.203 127.0.4.203:8080 check weight 10 inter 919 fall 2 rise 1
+ server 127.0.4.204 127.0.4.204:8080 check weight 10 inter 1122 fall 2 rise 1
+ server 127.0.4.205 127.0.4.205:8080 check weight 10 inter 7466 fall 2 rise 1
+ server 127.0.4.206 127.0.4.206:8080 check weight 10 inter 3738 fall 2 rise 1
+ server 127.0.4.207 127.0.4.207:8080 check weight 10 inter 1322 fall 2 rise 1
+ server 127.0.4.208 127.0.4.208:8080 check weight 10 inter 9569 fall 2 rise 1
+ server 127.0.4.209 127.0.4.209:8080 check weight 10 inter 2790 fall 2 rise 1
+ server 127.0.4.210 127.0.4.210:8080 check weight 10 inter 7849 fall 2 rise 1
+ server 127.0.4.211 127.0.4.211:8080 check weight 10 inter 3017 fall 2 rise 1
+ server 127.0.4.212 127.0.4.212:8080 check weight 10 inter 4982 fall 2 rise 1
+ server 127.0.4.213 127.0.4.213:8080 check weight 10 inter 4487 fall 2 rise 1
+ server 127.0.4.214 127.0.4.214:8080 check weight 10 inter 1760 fall 2 rise 1
+ server 127.0.4.215 127.0.4.215:8080 check weight 10 inter 434 fall 2 rise 1
+ server 127.0.4.216 127.0.4.216:8080 check weight 10 inter 1131 fall 2 rise 1
+ server 127.0.4.217 127.0.4.217:8080 check weight 10 inter 1747 fall 2 rise 1
+ server 127.0.4.218 127.0.4.218:8080 check weight 10 inter 2594 fall 2 rise 1
+ server 127.0.4.219 127.0.4.219:8080 check weight 10 inter 8995 fall 2 rise 1
+ server 127.0.4.220 127.0.4.220:8080 check weight 10 inter 9363 fall 2 rise 1
+ server 127.0.4.221 127.0.4.221:8080 check weight 10 inter 7472 fall 2 rise 1
+ server 127.0.4.222 127.0.4.222:8080 check weight 10 inter 3043 fall 2 rise 1
+ server 127.0.4.223 127.0.4.223:8080 check weight 10 inter 3101 fall 2 rise 1
+ server 127.0.4.224 127.0.4.224:8080 check weight 10 inter 475 fall 2 rise 1
+ server 127.0.4.225 127.0.4.225:8080 check weight 10 inter 174 fall 2 rise 1
+ server 127.0.4.226 127.0.4.226:8080 check weight 10 inter 8245 fall 2 rise 1
+ server 127.0.4.227 127.0.4.227:8080 check weight 10 inter 9399 fall 2 rise 1
+ server 127.0.4.228 127.0.4.228:8080 check weight 10 inter 1410 fall 2 rise 1
+ server 127.0.4.229 127.0.4.229:8080 check weight 10 inter 8907 fall 2 rise 1
+ server 127.0.4.230 127.0.4.230:8080 check weight 10 inter 4565 fall 2 rise 1
+ server 127.0.4.231 127.0.4.231:8080 check weight 10 inter 5153 fall 2 rise 1
+ server 127.0.4.232 127.0.4.232:8080 check weight 10 inter 6216 fall 2 rise 1
+ server 127.0.4.233 127.0.4.233:8080 check weight 10 inter 6548 fall 2 rise 1
+ server 127.0.4.234 127.0.4.234:8080 check weight 10 inter 9365 fall 2 rise 1
+ server 127.0.4.235 127.0.4.235:8080 check weight 10 inter 9119 fall 2 rise 1
+ server 127.0.4.236 127.0.4.236:8080 check weight 10 inter 5095 fall 2 rise 1
+ server 127.0.4.237 127.0.4.237:8080 check weight 10 inter 2668 fall 2 rise 1
+ server 127.0.4.238 127.0.4.238:8080 check weight 10 inter 8 fall 2 rise 1
+ server 127.0.4.239 127.0.4.239:8080 check weight 10 inter 8737 fall 2 rise 1
+ server 127.0.4.240 127.0.4.240:8080 check weight 10 inter 356 fall 2 rise 1
+ server 127.0.4.241 127.0.4.241:8080 check weight 10 inter 4959 fall 2 rise 1
+ server 127.0.4.242 127.0.4.242:8080 check weight 10 inter 6272 fall 2 rise 1
+ server 127.0.4.243 127.0.4.243:8080 check weight 10 inter 2597 fall 2 rise 1
+ server 127.0.4.244 127.0.4.244:8080 check weight 10 inter 324 fall 2 rise 1
+ server 127.0.4.245 127.0.4.245:8080 check weight 10 inter 1699 fall 2 rise 1
+ server 127.0.4.246 127.0.4.246:8080 check weight 10 inter 3335 fall 2 rise 1
+ server 127.0.4.247 127.0.4.247:8080 check weight 10 inter 892 fall 2 rise 1
+ server 127.0.4.248 127.0.4.248:8080 check weight 10 inter 6190 fall 2 rise 1
+ server 127.0.4.249 127.0.4.249:8080 check weight 10 inter 2547 fall 2 rise 1
+ server 127.0.4.250 127.0.4.250:8080 check weight 10 inter 2735 fall 2 rise 1
+ server 127.0.5.001 127.0.5.1:8080 check weight 10 inter 4076 fall 2 rise 1
+ server 127.0.5.002 127.0.5.2:8080 check weight 10 inter 2853 fall 2 rise 1
+ server 127.0.5.003 127.0.5.3:8080 check weight 10 inter 1818 fall 2 rise 1
+ server 127.0.5.004 127.0.5.4:8080 check weight 10 inter 1226 fall 2 rise 1
+ server 127.0.5.005 127.0.5.5:8080 check weight 10 inter 7179 fall 2 rise 1
+ server 127.0.5.006 127.0.5.6:8080 check weight 10 inter 5330 fall 2 rise 1
+ server 127.0.5.007 127.0.5.7:8080 check weight 10 inter 4770 fall 2 rise 1
+ server 127.0.5.008 127.0.5.8:8080 check weight 10 inter 5737 fall 2 rise 1
+ server 127.0.5.009 127.0.5.9:8080 check weight 10 inter 774 fall 2 rise 1
+ server 127.0.5.010 127.0.5.10:8080 check weight 10 inter 739 fall 2 rise 1
+ server 127.0.5.011 127.0.5.11:8080 check weight 10 inter 1210 fall 2 rise 1
+ server 127.0.5.012 127.0.5.12:8080 check weight 10 inter 5701 fall 2 rise 1
+ server 127.0.5.013 127.0.5.13:8080 check weight 10 inter 9300 fall 2 rise 1
+ server 127.0.5.014 127.0.5.14:8080 check weight 10 inter 181 fall 2 rise 1
+ server 127.0.5.015 127.0.5.15:8080 check weight 10 inter 5721 fall 2 rise 1
+ server 127.0.5.016 127.0.5.16:8080 check weight 10 inter 9438 fall 2 rise 1
+ server 127.0.5.017 127.0.5.17:8080 check weight 10 inter 7711 fall 2 rise 1
+ server 127.0.5.018 127.0.5.18:8080 check weight 10 inter 4995 fall 2 rise 1
+ server 127.0.5.019 127.0.5.19:8080 check weight 10 inter 7733 fall 2 rise 1
+ server 127.0.5.020 127.0.5.20:8080 check weight 10 inter 8807 fall 2 rise 1
+ server 127.0.5.021 127.0.5.21:8080 check weight 10 inter 7772 fall 2 rise 1
+ server 127.0.5.022 127.0.5.22:8080 check weight 10 inter 12 fall 2 rise 1
+ server 127.0.5.023 127.0.5.23:8080 check weight 10 inter 2434 fall 2 rise 1
+ server 127.0.5.024 127.0.5.24:8080 check weight 10 inter 5054 fall 2 rise 1
+ server 127.0.5.025 127.0.5.25:8080 check weight 10 inter 8222 fall 2 rise 1
+ server 127.0.5.026 127.0.5.26:8080 check weight 10 inter 9989 fall 2 rise 1
+ server 127.0.5.027 127.0.5.27:8080 check weight 10 inter 6827 fall 2 rise 1
+ server 127.0.5.028 127.0.5.28:8080 check weight 10 inter 6604 fall 2 rise 1
+ server 127.0.5.029 127.0.5.29:8080 check weight 10 inter 1342 fall 2 rise 1
+ server 127.0.5.030 127.0.5.30:8080 check weight 10 inter 4612 fall 2 rise 1
+ server 127.0.5.031 127.0.5.31:8080 check weight 10 inter 2546 fall 2 rise 1
+ server 127.0.5.032 127.0.5.32:8080 check weight 10 inter 7697 fall 2 rise 1
+ server 127.0.5.033 127.0.5.33:8080 check weight 10 inter 601 fall 2 rise 1
+ server 127.0.5.034 127.0.5.34:8080 check weight 10 inter 2072 fall 2 rise 1
+ server 127.0.5.035 127.0.5.35:8080 check weight 10 inter 9560 fall 2 rise 1
+ server 127.0.5.036 127.0.5.36:8080 check weight 10 inter 1197 fall 2 rise 1
+ server 127.0.5.037 127.0.5.37:8080 check weight 10 inter 9783 fall 2 rise 1
+ server 127.0.5.038 127.0.5.38:8080 check weight 10 inter 4489 fall 2 rise 1
+ server 127.0.5.039 127.0.5.39:8080 check weight 10 inter 4875 fall 2 rise 1
+ server 127.0.5.040 127.0.5.40:8080 check weight 10 inter 1454 fall 2 rise 1
+ server 127.0.5.041 127.0.5.41:8080 check weight 10 inter 415 fall 2 rise 1
+ server 127.0.5.042 127.0.5.42:8080 check weight 10 inter 9011 fall 2 rise 1
+ server 127.0.5.043 127.0.5.43:8080 check weight 10 inter 9570 fall 2 rise 1
+ server 127.0.5.044 127.0.5.44:8080 check weight 10 inter 3133 fall 2 rise 1
+ server 127.0.5.045 127.0.5.45:8080 check weight 10 inter 4045 fall 2 rise 1
+ server 127.0.5.046 127.0.5.46:8080 check weight 10 inter 5714 fall 2 rise 1
+ server 127.0.5.047 127.0.5.47:8080 check weight 10 inter 585 fall 2 rise 1
+ server 127.0.5.048 127.0.5.48:8080 check weight 10 inter 807 fall 2 rise 1
+ server 127.0.5.049 127.0.5.49:8080 check weight 10 inter 3422 fall 2 rise 1
+ server 127.0.5.050 127.0.5.50:8080 check weight 10 inter 1500 fall 2 rise 1
+ server 127.0.5.051 127.0.5.51:8080 check weight 10 inter 6765 fall 2 rise 1
+ server 127.0.5.052 127.0.5.52:8080 check weight 10 inter 6940 fall 2 rise 1
+ server 127.0.5.053 127.0.5.53:8080 check weight 10 inter 9153 fall 2 rise 1
+ server 127.0.5.054 127.0.5.54:8080 check weight 10 inter 1921 fall 2 rise 1
+ server 127.0.5.055 127.0.5.55:8080 check weight 10 inter 487 fall 2 rise 1
+ server 127.0.5.056 127.0.5.56:8080 check weight 10 inter 1354 fall 2 rise 1
+ server 127.0.5.057 127.0.5.57:8080 check weight 10 inter 765 fall 2 rise 1
+ server 127.0.5.058 127.0.5.58:8080 check weight 10 inter 1635 fall 2 rise 1
+ server 127.0.5.059 127.0.5.59:8080 check weight 10 inter 537 fall 2 rise 1
+ server 127.0.5.060 127.0.5.60:8080 check weight 10 inter 7030 fall 2 rise 1
+ server 127.0.5.061 127.0.5.61:8080 check weight 10 inter 4482 fall 2 rise 1
+ server 127.0.5.062 127.0.5.62:8080 check weight 10 inter 2661 fall 2 rise 1
+ server 127.0.5.063 127.0.5.63:8080 check weight 10 inter 4170 fall 2 rise 1
+ server 127.0.5.064 127.0.5.64:8080 check weight 10 inter 6431 fall 2 rise 1
+ server 127.0.5.065 127.0.5.65:8080 check weight 10 inter 8161 fall 2 rise 1
+ server 127.0.5.066 127.0.5.66:8080 check weight 10 inter 6229 fall 2 rise 1
+ server 127.0.5.067 127.0.5.67:8080 check weight 10 inter 5265 fall 2 rise 1
+ server 127.0.5.068 127.0.5.68:8080 check weight 10 inter 1651 fall 2 rise 1
+ server 127.0.5.069 127.0.5.69:8080 check weight 10 inter 9083 fall 2 rise 1
+ server 127.0.5.070 127.0.5.70:8080 check weight 10 inter 9978 fall 2 rise 1
+ server 127.0.5.071 127.0.5.71:8080 check weight 10 inter 6338 fall 2 rise 1
+ server 127.0.5.072 127.0.5.72:8080 check weight 10 inter 719 fall 2 rise 1
+ server 127.0.5.073 127.0.5.73:8080 check weight 10 inter 2906 fall 2 rise 1
+ server 127.0.5.074 127.0.5.74:8080 check weight 10 inter 4332 fall 2 rise 1
+ server 127.0.5.075 127.0.5.75:8080 check weight 10 inter 2572 fall 2 rise 1
+ server 127.0.5.076 127.0.5.76:8080 check weight 10 inter 9527 fall 2 rise 1
+ server 127.0.5.077 127.0.5.77:8080 check weight 10 inter 7601 fall 2 rise 1
+ server 127.0.5.078 127.0.5.78:8080 check weight 10 inter 6869 fall 2 rise 1
+ server 127.0.5.079 127.0.5.79:8080 check weight 10 inter 526 fall 2 rise 1
+ server 127.0.5.080 127.0.5.80:8080 check weight 10 inter 9737 fall 2 rise 1
+ server 127.0.5.081 127.0.5.81:8080 check weight 10 inter 5941 fall 2 rise 1
+ server 127.0.5.082 127.0.5.82:8080 check weight 10 inter 7883 fall 2 rise 1
+ server 127.0.5.083 127.0.5.83:8080 check weight 10 inter 8637 fall 2 rise 1
+ server 127.0.5.084 127.0.5.84:8080 check weight 10 inter 8385 fall 2 rise 1
+ server 127.0.5.085 127.0.5.85:8080 check weight 10 inter 5720 fall 2 rise 1
+ server 127.0.5.086 127.0.5.86:8080 check weight 10 inter 1211 fall 2 rise 1
+ server 127.0.5.087 127.0.5.87:8080 check weight 10 inter 5543 fall 2 rise 1
+ server 127.0.5.088 127.0.5.88:8080 check weight 10 inter 348 fall 2 rise 1
+ server 127.0.5.089 127.0.5.89:8080 check weight 10 inter 4058 fall 2 rise 1
+ server 127.0.5.090 127.0.5.90:8080 check weight 10 inter 1044 fall 2 rise 1
+ server 127.0.5.091 127.0.5.91:8080 check weight 10 inter 4868 fall 2 rise 1
+ server 127.0.5.092 127.0.5.92:8080 check weight 10 inter 7246 fall 2 rise 1
+ server 127.0.5.093 127.0.5.93:8080 check weight 10 inter 4046 fall 2 rise 1
+ server 127.0.5.094 127.0.5.94:8080 check weight 10 inter 8345 fall 2 rise 1
+ server 127.0.5.095 127.0.5.95:8080 check weight 10 inter 994 fall 2 rise 1
+ server 127.0.5.096 127.0.5.96:8080 check weight 10 inter 5906 fall 2 rise 1
+ server 127.0.5.097 127.0.5.97:8080 check weight 10 inter 9309 fall 2 rise 1
+ server 127.0.5.098 127.0.5.98:8080 check weight 10 inter 2660 fall 2 rise 1
+ server 127.0.5.099 127.0.5.99:8080 check weight 10 inter 413 fall 2 rise 1
+ server 127.0.5.100 127.0.5.100:8080 check weight 10 inter 4613 fall 2 rise 1
+ server 127.0.5.101 127.0.5.101:8080 check weight 10 inter 2589 fall 2 rise 1
+ server 127.0.5.102 127.0.5.102:8080 check weight 10 inter 4491 fall 2 rise 1
+ server 127.0.5.103 127.0.5.103:8080 check weight 10 inter 1696 fall 2 rise 1
+ server 127.0.5.104 127.0.5.104:8080 check weight 10 inter 6865 fall 2 rise 1
+ server 127.0.5.105 127.0.5.105:8080 check weight 10 inter 2399 fall 2 rise 1
+ server 127.0.5.106 127.0.5.106:8080 check weight 10 inter 4628 fall 2 rise 1
+ server 127.0.5.107 127.0.5.107:8080 check weight 10 inter 4638 fall 2 rise 1
+ server 127.0.5.108 127.0.5.108:8080 check weight 10 inter 6410 fall 2 rise 1
+ server 127.0.5.109 127.0.5.109:8080 check weight 10 inter 3067 fall 2 rise 1
+ server 127.0.5.110 127.0.5.110:8080 check weight 10 inter 1143 fall 2 rise 1
+ server 127.0.5.111 127.0.5.111:8080 check weight 10 inter 8847 fall 2 rise 1
+ server 127.0.5.112 127.0.5.112:8080 check weight 10 inter 2203 fall 2 rise 1
+ server 127.0.5.113 127.0.5.113:8080 check weight 10 inter 8988 fall 2 rise 1
+ server 127.0.5.114 127.0.5.114:8080 check weight 10 inter 8 fall 2 rise 1
+ server 127.0.5.115 127.0.5.115:8080 check weight 10 inter 828 fall 2 rise 1
+ server 127.0.5.116 127.0.5.116:8080 check weight 10 inter 4286 fall 2 rise 1
+ server 127.0.5.117 127.0.5.117:8080 check weight 10 inter 5195 fall 2 rise 1
+ server 127.0.5.118 127.0.5.118:8080 check weight 10 inter 913 fall 2 rise 1
+ server 127.0.5.119 127.0.5.119:8080 check weight 10 inter 6249 fall 2 rise 1
+ server 127.0.5.120 127.0.5.120:8080 check weight 10 inter 3533 fall 2 rise 1
+ server 127.0.5.121 127.0.5.121:8080 check weight 10 inter 8732 fall 2 rise 1
+ server 127.0.5.122 127.0.5.122:8080 check weight 10 inter 1058 fall 2 rise 1
+ server 127.0.5.123 127.0.5.123:8080 check weight 10 inter 3860 fall 2 rise 1
+ server 127.0.5.124 127.0.5.124:8080 check weight 10 inter 8747 fall 2 rise 1
+ server 127.0.5.125 127.0.5.125:8080 check weight 10 inter 6955 fall 2 rise 1
+ server 127.0.5.126 127.0.5.126:8080 check weight 10 inter 8648 fall 2 rise 1
+ server 127.0.5.127 127.0.5.127:8080 check weight 10 inter 6050 fall 2 rise 1
+ server 127.0.5.128 127.0.5.128:8080 check weight 10 inter 9409 fall 2 rise 1
+ server 127.0.5.129 127.0.5.129:8080 check weight 10 inter 6164 fall 2 rise 1
+ server 127.0.5.130 127.0.5.130:8080 check weight 10 inter 8526 fall 2 rise 1
+ server 127.0.5.131 127.0.5.131:8080 check weight 10 inter 2948 fall 2 rise 1
+ server 127.0.5.132 127.0.5.132:8080 check weight 10 inter 7578 fall 2 rise 1
+ server 127.0.5.133 127.0.5.133:8080 check weight 10 inter 8229 fall 2 rise 1
+ server 127.0.5.134 127.0.5.134:8080 check weight 10 inter 2469 fall 2 rise 1
+ server 127.0.5.135 127.0.5.135:8080 check weight 10 inter 2940 fall 2 rise 1
+ server 127.0.5.136 127.0.5.136:8080 check weight 10 inter 4014 fall 2 rise 1
+ server 127.0.5.137 127.0.5.137:8080 check weight 10 inter 3237 fall 2 rise 1
+ server 127.0.5.138 127.0.5.138:8080 check weight 10 inter 8724 fall 2 rise 1
+ server 127.0.5.139 127.0.5.139:8080 check weight 10 inter 3266 fall 2 rise 1
+ server 127.0.5.140 127.0.5.140:8080 check weight 10 inter 6319 fall 2 rise 1
+ server 127.0.5.141 127.0.5.141:8080 check weight 10 inter 1185 fall 2 rise 1
+ server 127.0.5.142 127.0.5.142:8080 check weight 10 inter 9473 fall 2 rise 1
+ server 127.0.5.143 127.0.5.143:8080 check weight 10 inter 5540 fall 2 rise 1
+ server 127.0.5.144 127.0.5.144:8080 check weight 10 inter 7733 fall 2 rise 1
+ server 127.0.5.145 127.0.5.145:8080 check weight 10 inter 7546 fall 2 rise 1
+ server 127.0.5.146 127.0.5.146:8080 check weight 10 inter 9099 fall 2 rise 1
+ server 127.0.5.147 127.0.5.147:8080 check weight 10 inter 1121 fall 2 rise 1
+ server 127.0.5.148 127.0.5.148:8080 check weight 10 inter 2954 fall 2 rise 1
+ server 127.0.5.149 127.0.5.149:8080 check weight 10 inter 176 fall 2 rise 1
+ server 127.0.5.150 127.0.5.150:8080 check weight 10 inter 9645 fall 2 rise 1
+ server 127.0.5.151 127.0.5.151:8080 check weight 10 inter 7941 fall 2 rise 1
+ server 127.0.5.152 127.0.5.152:8080 check weight 10 inter 420 fall 2 rise 1
+ server 127.0.5.153 127.0.5.153:8080 check weight 10 inter 6684 fall 2 rise 1
+ server 127.0.5.154 127.0.5.154:8080 check weight 10 inter 8179 fall 2 rise 1
+ server 127.0.5.155 127.0.5.155:8080 check weight 10 inter 162 fall 2 rise 1
+ server 127.0.5.156 127.0.5.156:8080 check weight 10 inter 8502 fall 2 rise 1
+ server 127.0.5.157 127.0.5.157:8080 check weight 10 inter 5008 fall 2 rise 1
+ server 127.0.5.158 127.0.5.158:8080 check weight 10 inter 2297 fall 2 rise 1
+ server 127.0.5.159 127.0.5.159:8080 check weight 10 inter 6290 fall 2 rise 1
+ server 127.0.5.160 127.0.5.160:8080 check weight 10 inter 8245 fall 2 rise 1
+ server 127.0.5.161 127.0.5.161:8080 check weight 10 inter 6576 fall 2 rise 1
+ server 127.0.5.162 127.0.5.162:8080 check weight 10 inter 9113 fall 2 rise 1
+ server 127.0.5.163 127.0.5.163:8080 check weight 10 inter 494 fall 2 rise 1
+ server 127.0.5.164 127.0.5.164:8080 check weight 10 inter 9675 fall 2 rise 1
+ server 127.0.5.165 127.0.5.165:8080 check weight 10 inter 3277 fall 2 rise 1
+ server 127.0.5.166 127.0.5.166:8080 check weight 10 inter 2864 fall 2 rise 1
+ server 127.0.5.167 127.0.5.167:8080 check weight 10 inter 8750 fall 2 rise 1
+ server 127.0.5.168 127.0.5.168:8080 check weight 10 inter 2606 fall 2 rise 1
+ server 127.0.5.169 127.0.5.169:8080 check weight 10 inter 6565 fall 2 rise 1
+ server 127.0.5.170 127.0.5.170:8080 check weight 10 inter 2501 fall 2 rise 1
+ server 127.0.5.171 127.0.5.171:8080 check weight 10 inter 3487 fall 2 rise 1
+ server 127.0.5.172 127.0.5.172:8080 check weight 10 inter 5118 fall 2 rise 1
+ server 127.0.5.173 127.0.5.173:8080 check weight 10 inter 9276 fall 2 rise 1
+ server 127.0.5.174 127.0.5.174:8080 check weight 10 inter 2841 fall 2 rise 1
+ server 127.0.5.175 127.0.5.175:8080 check weight 10 inter 54 fall 2 rise 1
+ server 127.0.5.176 127.0.5.176:8080 check weight 10 inter 4944 fall 2 rise 1
+ server 127.0.5.177 127.0.5.177:8080 check weight 10 inter 9560 fall 2 rise 1
+ server 127.0.5.178 127.0.5.178:8080 check weight 10 inter 4508 fall 2 rise 1
+ server 127.0.5.179 127.0.5.179:8080 check weight 10 inter 7606 fall 2 rise 1
+ server 127.0.5.180 127.0.5.180:8080 check weight 10 inter 9869 fall 2 rise 1
+ server 127.0.5.181 127.0.5.181:8080 check weight 10 inter 912 fall 2 rise 1
+ server 127.0.5.182 127.0.5.182:8080 check weight 10 inter 3926 fall 2 rise 1
+ server 127.0.5.183 127.0.5.183:8080 check weight 10 inter 2643 fall 2 rise 1
+ server 127.0.5.184 127.0.5.184:8080 check weight 10 inter 6842 fall 2 rise 1
+ server 127.0.5.185 127.0.5.185:8080 check weight 10 inter 5538 fall 2 rise 1
+ server 127.0.5.186 127.0.5.186:8080 check weight 10 inter 8736 fall 2 rise 1
+ server 127.0.5.187 127.0.5.187:8080 check weight 10 inter 5028 fall 2 rise 1
+ server 127.0.5.188 127.0.5.188:8080 check weight 10 inter 2342 fall 2 rise 1
+ server 127.0.5.189 127.0.5.189:8080 check weight 10 inter 4887 fall 2 rise 1
+ server 127.0.5.190 127.0.5.190:8080 check weight 10 inter 4230 fall 2 rise 1
+ server 127.0.5.191 127.0.5.191:8080 check weight 10 inter 6426 fall 2 rise 1
+ server 127.0.5.192 127.0.5.192:8080 check weight 10 inter 37 fall 2 rise 1
+ server 127.0.5.193 127.0.5.193:8080 check weight 10 inter 6887 fall 2 rise 1
+ server 127.0.5.194 127.0.5.194:8080 check weight 10 inter 2302 fall 2 rise 1
+ server 127.0.5.195 127.0.5.195:8080 check weight 10 inter 9908 fall 2 rise 1
+ server 127.0.5.196 127.0.5.196:8080 check weight 10 inter 9967 fall 2 rise 1
+ server 127.0.5.197 127.0.5.197:8080 check weight 10 inter 955 fall 2 rise 1
+ server 127.0.5.198 127.0.5.198:8080 check weight 10 inter 8069 fall 2 rise 1
+ server 127.0.5.199 127.0.5.199:8080 check weight 10 inter 4494 fall 2 rise 1
+ server 127.0.5.200 127.0.5.200:8080 check weight 10 inter 5560 fall 2 rise 1
+ server 127.0.5.201 127.0.5.201:8080 check weight 10 inter 2023 fall 2 rise 1
+ server 127.0.5.202 127.0.5.202:8080 check weight 10 inter 2733 fall 2 rise 1
+ server 127.0.5.203 127.0.5.203:8080 check weight 10 inter 1709 fall 2 rise 1
+ server 127.0.5.204 127.0.5.204:8080 check weight 10 inter 1373 fall 2 rise 1
+ server 127.0.5.205 127.0.5.205:8080 check weight 10 inter 611 fall 2 rise 1
+ server 127.0.5.206 127.0.5.206:8080 check weight 10 inter 5672 fall 2 rise 1
+ server 127.0.5.207 127.0.5.207:8080 check weight 10 inter 4059 fall 2 rise 1
+ server 127.0.5.208 127.0.5.208:8080 check weight 10 inter 7699 fall 2 rise 1
+ server 127.0.5.209 127.0.5.209:8080 check weight 10 inter 2542 fall 2 rise 1
+ server 127.0.5.210 127.0.5.210:8080 check weight 10 inter 5411 fall 2 rise 1
+ server 127.0.5.211 127.0.5.211:8080 check weight 10 inter 6979 fall 2 rise 1
+ server 127.0.5.212 127.0.5.212:8080 check weight 10 inter 2144 fall 2 rise 1
+ server 127.0.5.213 127.0.5.213:8080 check weight 10 inter 9762 fall 2 rise 1
+ server 127.0.5.214 127.0.5.214:8080 check weight 10 inter 9252 fall 2 rise 1
+ server 127.0.5.215 127.0.5.215:8080 check weight 10 inter 9067 fall 2 rise 1
+ server 127.0.5.216 127.0.5.216:8080 check weight 10 inter 3878 fall 2 rise 1
+ server 127.0.5.217 127.0.5.217:8080 check weight 10 inter 4182 fall 2 rise 1
+ server 127.0.5.218 127.0.5.218:8080 check weight 10 inter 8882 fall 2 rise 1
+ server 127.0.5.219 127.0.5.219:8080 check weight 10 inter 8948 fall 2 rise 1
+ server 127.0.5.220 127.0.5.220:8080 check weight 10 inter 1571 fall 2 rise 1
+ server 127.0.5.221 127.0.5.221:8080 check weight 10 inter 5381 fall 2 rise 1
+ server 127.0.5.222 127.0.5.222:8080 check weight 10 inter 8358 fall 2 rise 1
+ server 127.0.5.223 127.0.5.223:8080 check weight 10 inter 6787 fall 2 rise 1
+ server 127.0.5.224 127.0.5.224:8080 check weight 10 inter 7658 fall 2 rise 1
+ server 127.0.5.225 127.0.5.225:8080 check weight 10 inter 7473 fall 2 rise 1
+ server 127.0.5.226 127.0.5.226:8080 check weight 10 inter 4101 fall 2 rise 1
+ server 127.0.5.227 127.0.5.227:8080 check weight 10 inter 6653 fall 2 rise 1
+ server 127.0.5.228 127.0.5.228:8080 check weight 10 inter 1919 fall 2 rise 1
+ server 127.0.5.229 127.0.5.229:8080 check weight 10 inter 5528 fall 2 rise 1
+ server 127.0.5.230 127.0.5.230:8080 check weight 10 inter 2218 fall 2 rise 1
+ server 127.0.5.231 127.0.5.231:8080 check weight 10 inter 4996 fall 2 rise 1
+ server 127.0.5.232 127.0.5.232:8080 check weight 10 inter 4884 fall 2 rise 1
+ server 127.0.5.233 127.0.5.233:8080 check weight 10 inter 7746 fall 2 rise 1
+ server 127.0.5.234 127.0.5.234:8080 check weight 10 inter 485 fall 2 rise 1
+ server 127.0.5.235 127.0.5.235:8080 check weight 10 inter 9108 fall 2 rise 1
+ server 127.0.5.236 127.0.5.236:8080 check weight 10 inter 822 fall 2 rise 1
+ server 127.0.5.237 127.0.5.237:8080 check weight 10 inter 7808 fall 2 rise 1
+ server 127.0.5.238 127.0.5.238:8080 check weight 10 inter 1366 fall 2 rise 1
+ server 127.0.5.239 127.0.5.239:8080 check weight 10 inter 7613 fall 2 rise 1
+ server 127.0.5.240 127.0.5.240:8080 check weight 10 inter 3415 fall 2 rise 1
+ server 127.0.5.241 127.0.5.241:8080 check weight 10 inter 2500 fall 2 rise 1
+ server 127.0.5.242 127.0.5.242:8080 check weight 10 inter 6344 fall 2 rise 1
+ server 127.0.5.243 127.0.5.243:8080 check weight 10 inter 6238 fall 2 rise 1
+ server 127.0.5.244 127.0.5.244:8080 check weight 10 inter 9594 fall 2 rise 1
+ server 127.0.5.245 127.0.5.245:8080 check weight 10 inter 3967 fall 2 rise 1
+ server 127.0.5.246 127.0.5.246:8080 check weight 10 inter 8398 fall 2 rise 1
+ server 127.0.5.247 127.0.5.247:8080 check weight 10 inter 5381 fall 2 rise 1
+ server 127.0.5.248 127.0.5.248:8080 check weight 10 inter 1697 fall 2 rise 1
+ server 127.0.5.249 127.0.5.249:8080 check weight 10 inter 2767 fall 2 rise 1
+ server 127.0.5.250 127.0.5.250:8080 check weight 10 inter 2895 fall 2 rise 1
+ server 127.0.6.001 127.0.6.1:8080 check weight 10 inter 9321 fall 2 rise 1
+ server 127.0.6.002 127.0.6.2:8080 check weight 10 inter 3590 fall 2 rise 1
+ server 127.0.6.003 127.0.6.3:8080 check weight 10 inter 5701 fall 2 rise 1
+ server 127.0.6.004 127.0.6.4:8080 check weight 10 inter 7568 fall 2 rise 1
+ server 127.0.6.005 127.0.6.5:8080 check weight 10 inter 5460 fall 2 rise 1
+ server 127.0.6.006 127.0.6.6:8080 check weight 10 inter 299 fall 2 rise 1
+ server 127.0.6.007 127.0.6.7:8080 check weight 10 inter 4440 fall 2 rise 1
+ server 127.0.6.008 127.0.6.8:8080 check weight 10 inter 4805 fall 2 rise 1
+ server 127.0.6.009 127.0.6.9:8080 check weight 10 inter 2033 fall 2 rise 1
+ server 127.0.6.010 127.0.6.10:8080 check weight 10 inter 6162 fall 2 rise 1
+ server 127.0.6.011 127.0.6.11:8080 check weight 10 inter 7211 fall 2 rise 1
+ server 127.0.6.012 127.0.6.12:8080 check weight 10 inter 2888 fall 2 rise 1
+ server 127.0.6.013 127.0.6.13:8080 check weight 10 inter 4537 fall 2 rise 1
+ server 127.0.6.014 127.0.6.14:8080 check weight 10 inter 6139 fall 2 rise 1
+ server 127.0.6.015 127.0.6.15:8080 check weight 10 inter 5903 fall 2 rise 1
+ server 127.0.6.016 127.0.6.16:8080 check weight 10 inter 7717 fall 2 rise 1
+ server 127.0.6.017 127.0.6.17:8080 check weight 10 inter 5469 fall 2 rise 1
+ server 127.0.6.018 127.0.6.18:8080 check weight 10 inter 6369 fall 2 rise 1
+ server 127.0.6.019 127.0.6.19:8080 check weight 10 inter 1129 fall 2 rise 1
+ server 127.0.6.020 127.0.6.20:8080 check weight 10 inter 2299 fall 2 rise 1
+ server 127.0.6.021 127.0.6.21:8080 check weight 10 inter 2995 fall 2 rise 1
+ server 127.0.6.022 127.0.6.22:8080 check weight 10 inter 7523 fall 2 rise 1
+ server 127.0.6.023 127.0.6.23:8080 check weight 10 inter 5633 fall 2 rise 1
+ server 127.0.6.024 127.0.6.24:8080 check weight 10 inter 4435 fall 2 rise 1
+ server 127.0.6.025 127.0.6.25:8080 check weight 10 inter 9921 fall 2 rise 1
+ server 127.0.6.026 127.0.6.26:8080 check weight 10 inter 6436 fall 2 rise 1
+ server 127.0.6.027 127.0.6.27:8080 check weight 10 inter 1183 fall 2 rise 1
+ server 127.0.6.028 127.0.6.28:8080 check weight 10 inter 2046 fall 2 rise 1
+ server 127.0.6.029 127.0.6.29:8080 check weight 10 inter 1978 fall 2 rise 1
+ server 127.0.6.030 127.0.6.30:8080 check weight 10 inter 2946 fall 2 rise 1
+ server 127.0.6.031 127.0.6.31:8080 check weight 10 inter 2710 fall 2 rise 1
+ server 127.0.6.032 127.0.6.32:8080 check weight 10 inter 416 fall 2 rise 1
+ server 127.0.6.033 127.0.6.33:8080 check weight 10 inter 2779 fall 2 rise 1
+ server 127.0.6.034 127.0.6.34:8080 check weight 10 inter 1270 fall 2 rise 1
+ server 127.0.6.035 127.0.6.35:8080 check weight 10 inter 7165 fall 2 rise 1
+ server 127.0.6.036 127.0.6.36:8080 check weight 10 inter 6494 fall 2 rise 1
+ server 127.0.6.037 127.0.6.37:8080 check weight 10 inter 1685 fall 2 rise 1
+ server 127.0.6.038 127.0.6.38:8080 check weight 10 inter 5168 fall 2 rise 1
+ server 127.0.6.039 127.0.6.39:8080 check weight 10 inter 690 fall 2 rise 1
+ server 127.0.6.040 127.0.6.40:8080 check weight 10 inter 8376 fall 2 rise 1
+ server 127.0.6.041 127.0.6.41:8080 check weight 10 inter 2120 fall 2 rise 1
+ server 127.0.6.042 127.0.6.42:8080 check weight 10 inter 7728 fall 2 rise 1
+ server 127.0.6.043 127.0.6.43:8080 check weight 10 inter 5565 fall 2 rise 1
+ server 127.0.6.044 127.0.6.44:8080 check weight 10 inter 4935 fall 2 rise 1
+ server 127.0.6.045 127.0.6.45:8080 check weight 10 inter 1316 fall 2 rise 1
+ server 127.0.6.046 127.0.6.46:8080 check weight 10 inter 132 fall 2 rise 1
+ server 127.0.6.047 127.0.6.47:8080 check weight 10 inter 9926 fall 2 rise 1
+ server 127.0.6.048 127.0.6.48:8080 check weight 10 inter 4517 fall 2 rise 1
+ server 127.0.6.049 127.0.6.49:8080 check weight 10 inter 8111 fall 2 rise 1
+ server 127.0.6.050 127.0.6.50:8080 check weight 10 inter 2549 fall 2 rise 1
+ server 127.0.6.051 127.0.6.51:8080 check weight 10 inter 2950 fall 2 rise 1
+ server 127.0.6.052 127.0.6.52:8080 check weight 10 inter 2591 fall 2 rise 1
+ server 127.0.6.053 127.0.6.53:8080 check weight 10 inter 7477 fall 2 rise 1
+ server 127.0.6.054 127.0.6.54:8080 check weight 10 inter 1776 fall 2 rise 1
+ server 127.0.6.055 127.0.6.55:8080 check weight 10 inter 3246 fall 2 rise 1
+ server 127.0.6.056 127.0.6.56:8080 check weight 10 inter 2172 fall 2 rise 1
+ server 127.0.6.057 127.0.6.57:8080 check weight 10 inter 3938 fall 2 rise 1
+ server 127.0.6.058 127.0.6.58:8080 check weight 10 inter 1285 fall 2 rise 1
+ server 127.0.6.059 127.0.6.59:8080 check weight 10 inter 1178 fall 2 rise 1
+ server 127.0.6.060 127.0.6.60:8080 check weight 10 inter 922 fall 2 rise 1
+ server 127.0.6.061 127.0.6.61:8080 check weight 10 inter 401 fall 2 rise 1
+ server 127.0.6.062 127.0.6.62:8080 check weight 10 inter 9361 fall 2 rise 1
+ server 127.0.6.063 127.0.6.63:8080 check weight 10 inter 6690 fall 2 rise 1
+ server 127.0.6.064 127.0.6.64:8080 check weight 10 inter 6445 fall 2 rise 1
+ server 127.0.6.065 127.0.6.65:8080 check weight 10 inter 2867 fall 2 rise 1
+ server 127.0.6.066 127.0.6.66:8080 check weight 10 inter 6348 fall 2 rise 1
+ server 127.0.6.067 127.0.6.67:8080 check weight 10 inter 5974 fall 2 rise 1
+ server 127.0.6.068 127.0.6.68:8080 check weight 10 inter 3206 fall 2 rise 1
+ server 127.0.6.069 127.0.6.69:8080 check weight 10 inter 6926 fall 2 rise 1
+ server 127.0.6.070 127.0.6.70:8080 check weight 10 inter 5794 fall 2 rise 1
+ server 127.0.6.071 127.0.6.71:8080 check weight 10 inter 2087 fall 2 rise 1
+ server 127.0.6.072 127.0.6.72:8080 check weight 10 inter 8597 fall 2 rise 1
+ server 127.0.6.073 127.0.6.73:8080 check weight 10 inter 4362 fall 2 rise 1
+ server 127.0.6.074 127.0.6.74:8080 check weight 10 inter 4354 fall 2 rise 1
+ server 127.0.6.075 127.0.6.75:8080 check weight 10 inter 3334 fall 2 rise 1
+ server 127.0.6.076 127.0.6.76:8080 check weight 10 inter 6655 fall 2 rise 1
+ server 127.0.6.077 127.0.6.77:8080 check weight 10 inter 221 fall 2 rise 1
+ server 127.0.6.078 127.0.6.78:8080 check weight 10 inter 8456 fall 2 rise 1
+ server 127.0.6.079 127.0.6.79:8080 check weight 10 inter 3438 fall 2 rise 1
+ server 127.0.6.080 127.0.6.80:8080 check weight 10 inter 5261 fall 2 rise 1
+ server 127.0.6.081 127.0.6.81:8080 check weight 10 inter 194 fall 2 rise 1
+ server 127.0.6.082 127.0.6.82:8080 check weight 10 inter 3835 fall 2 rise 1
+ server 127.0.6.083 127.0.6.83:8080 check weight 10 inter 8832 fall 2 rise 1
+ server 127.0.6.084 127.0.6.84:8080 check weight 10 inter 9802 fall 2 rise 1
+ server 127.0.6.085 127.0.6.85:8080 check weight 10 inter 1179 fall 2 rise 1
+ server 127.0.6.086 127.0.6.86:8080 check weight 10 inter 8709 fall 2 rise 1
+ server 127.0.6.087 127.0.6.87:8080 check weight 10 inter 9602 fall 2 rise 1
+ server 127.0.6.088 127.0.6.88:8080 check weight 10 inter 4635 fall 2 rise 1
+ server 127.0.6.089 127.0.6.89:8080 check weight 10 inter 4507 fall 2 rise 1
+ server 127.0.6.090 127.0.6.90:8080 check weight 10 inter 2694 fall 2 rise 1
+ server 127.0.6.091 127.0.6.91:8080 check weight 10 inter 8599 fall 2 rise 1
+ server 127.0.6.092 127.0.6.92:8080 check weight 10 inter 6428 fall 2 rise 1
+ server 127.0.6.093 127.0.6.93:8080 check weight 10 inter 1650 fall 2 rise 1
+ server 127.0.6.094 127.0.6.94:8080 check weight 10 inter 695 fall 2 rise 1
+ server 127.0.6.095 127.0.6.95:8080 check weight 10 inter 3116 fall 2 rise 1
+ server 127.0.6.096 127.0.6.96:8080 check weight 10 inter 270 fall 2 rise 1
+ server 127.0.6.097 127.0.6.97:8080 check weight 10 inter 5127 fall 2 rise 1
+ server 127.0.6.098 127.0.6.98:8080 check weight 10 inter 6031 fall 2 rise 1
+ server 127.0.6.099 127.0.6.99:8080 check weight 10 inter 8744 fall 2 rise 1
+ server 127.0.6.100 127.0.6.100:8080 check weight 10 inter 290 fall 2 rise 1
+ server 127.0.6.101 127.0.6.101:8080 check weight 10 inter 6567 fall 2 rise 1
+ server 127.0.6.102 127.0.6.102:8080 check weight 10 inter 3720 fall 2 rise 1
+ server 127.0.6.103 127.0.6.103:8080 check weight 10 inter 4065 fall 2 rise 1
+ server 127.0.6.104 127.0.6.104:8080 check weight 10 inter 1844 fall 2 rise 1
+ server 127.0.6.105 127.0.6.105:8080 check weight 10 inter 976 fall 2 rise 1
+ server 127.0.6.106 127.0.6.106:8080 check weight 10 inter 2639 fall 2 rise 1
+ server 127.0.6.107 127.0.6.107:8080 check weight 10 inter 7070 fall 2 rise 1
+ server 127.0.6.108 127.0.6.108:8080 check weight 10 inter 8088 fall 2 rise 1
+ server 127.0.6.109 127.0.6.109:8080 check weight 10 inter 3900 fall 2 rise 1
+ server 127.0.6.110 127.0.6.110:8080 check weight 10 inter 1790 fall 2 rise 1
+ server 127.0.6.111 127.0.6.111:8080 check weight 10 inter 5168 fall 2 rise 1
+ server 127.0.6.112 127.0.6.112:8080 check weight 10 inter 2453 fall 2 rise 1
+ server 127.0.6.113 127.0.6.113:8080 check weight 10 inter 7950 fall 2 rise 1
+ server 127.0.6.114 127.0.6.114:8080 check weight 10 inter 1865 fall 2 rise 1
+ server 127.0.6.115 127.0.6.115:8080 check weight 10 inter 8143 fall 2 rise 1
+ server 127.0.6.116 127.0.6.116:8080 check weight 10 inter 6004 fall 2 rise 1
+ server 127.0.6.117 127.0.6.117:8080 check weight 10 inter 7517 fall 2 rise 1
+ server 127.0.6.118 127.0.6.118:8080 check weight 10 inter 8875 fall 2 rise 1
+ server 127.0.6.119 127.0.6.119:8080 check weight 10 inter 7462 fall 2 rise 1
+ server 127.0.6.120 127.0.6.120:8080 check weight 10 inter 2263 fall 2 rise 1
+ server 127.0.6.121 127.0.6.121:8080 check weight 10 inter 5380 fall 2 rise 1
+ server 127.0.6.122 127.0.6.122:8080 check weight 10 inter 3614 fall 2 rise 1
+ server 127.0.6.123 127.0.6.123:8080 check weight 10 inter 207 fall 2 rise 1
+ server 127.0.6.124 127.0.6.124:8080 check weight 10 inter 4474 fall 2 rise 1
+ server 127.0.6.125 127.0.6.125:8080 check weight 10 inter 6181 fall 2 rise 1
+ server 127.0.6.126 127.0.6.126:8080 check weight 10 inter 252 fall 2 rise 1
+ server 127.0.6.127 127.0.6.127:8080 check weight 10 inter 1660 fall 2 rise 1
+ server 127.0.6.128 127.0.6.128:8080 check weight 10 inter 7611 fall 2 rise 1
+ server 127.0.6.129 127.0.6.129:8080 check weight 10 inter 945 fall 2 rise 1
+ server 127.0.6.130 127.0.6.130:8080 check weight 10 inter 9160 fall 2 rise 1
+ server 127.0.6.131 127.0.6.131:8080 check weight 10 inter 8666 fall 2 rise 1
+ server 127.0.6.132 127.0.6.132:8080 check weight 10 inter 6536 fall 2 rise 1
+ server 127.0.6.133 127.0.6.133:8080 check weight 10 inter 8217 fall 2 rise 1
+ server 127.0.6.134 127.0.6.134:8080 check weight 10 inter 1275 fall 2 rise 1
+ server 127.0.6.135 127.0.6.135:8080 check weight 10 inter 2246 fall 2 rise 1
+ server 127.0.6.136 127.0.6.136:8080 check weight 10 inter 2620 fall 2 rise 1
+ server 127.0.6.137 127.0.6.137:8080 check weight 10 inter 5553 fall 2 rise 1
+ server 127.0.6.138 127.0.6.138:8080 check weight 10 inter 1888 fall 2 rise 1
+ server 127.0.6.139 127.0.6.139:8080 check weight 10 inter 9276 fall 2 rise 1
+ server 127.0.6.140 127.0.6.140:8080 check weight 10 inter 8249 fall 2 rise 1
+ server 127.0.6.141 127.0.6.141:8080 check weight 10 inter 617 fall 2 rise 1
+ server 127.0.6.142 127.0.6.142:8080 check weight 10 inter 8831 fall 2 rise 1
+ server 127.0.6.143 127.0.6.143:8080 check weight 10 inter 1268 fall 2 rise 1
+ server 127.0.6.144 127.0.6.144:8080 check weight 10 inter 9493 fall 2 rise 1
+ server 127.0.6.145 127.0.6.145:8080 check weight 10 inter 683 fall 2 rise 1
+ server 127.0.6.146 127.0.6.146:8080 check weight 10 inter 2682 fall 2 rise 1
+ server 127.0.6.147 127.0.6.147:8080 check weight 10 inter 3435 fall 2 rise 1
+ server 127.0.6.148 127.0.6.148:8080 check weight 10 inter 9477 fall 2 rise 1
+ server 127.0.6.149 127.0.6.149:8080 check weight 10 inter 8292 fall 2 rise 1
+ server 127.0.6.150 127.0.6.150:8080 check weight 10 inter 9145 fall 2 rise 1
+ server 127.0.6.151 127.0.6.151:8080 check weight 10 inter 850 fall 2 rise 1
+ server 127.0.6.152 127.0.6.152:8080 check weight 10 inter 3626 fall 2 rise 1
+ server 127.0.6.153 127.0.6.153:8080 check weight 10 inter 8915 fall 2 rise 1
+ server 127.0.6.154 127.0.6.154:8080 check weight 10 inter 4165 fall 2 rise 1
+ server 127.0.6.155 127.0.6.155:8080 check weight 10 inter 5945 fall 2 rise 1
+ server 127.0.6.156 127.0.6.156:8080 check weight 10 inter 4413 fall 2 rise 1
+ server 127.0.6.157 127.0.6.157:8080 check weight 10 inter 1139 fall 2 rise 1
+ server 127.0.6.158 127.0.6.158:8080 check weight 10 inter 2119 fall 2 rise 1
+ server 127.0.6.159 127.0.6.159:8080 check weight 10 inter 8794 fall 2 rise 1
+ server 127.0.6.160 127.0.6.160:8080 check weight 10 inter 1243 fall 2 rise 1
+ server 127.0.6.161 127.0.6.161:8080 check weight 10 inter 312 fall 2 rise 1
+ server 127.0.6.162 127.0.6.162:8080 check weight 10 inter 6206 fall 2 rise 1
+ server 127.0.6.163 127.0.6.163:8080 check weight 10 inter 9349 fall 2 rise 1
+ server 127.0.6.164 127.0.6.164:8080 check weight 10 inter 2481 fall 2 rise 1
+ server 127.0.6.165 127.0.6.165:8080 check weight 10 inter 3402 fall 2 rise 1
+ server 127.0.6.166 127.0.6.166:8080 check weight 10 inter 4338 fall 2 rise 1
+ server 127.0.6.167 127.0.6.167:8080 check weight 10 inter 6287 fall 2 rise 1
+ server 127.0.6.168 127.0.6.168:8080 check weight 10 inter 5255 fall 2 rise 1
+ server 127.0.6.169 127.0.6.169:8080 check weight 10 inter 6453 fall 2 rise 1
+ server 127.0.6.170 127.0.6.170:8080 check weight 10 inter 5804 fall 2 rise 1
+ server 127.0.6.171 127.0.6.171:8080 check weight 10 inter 1801 fall 2 rise 1
+ server 127.0.6.172 127.0.6.172:8080 check weight 10 inter 5132 fall 2 rise 1
+ server 127.0.6.173 127.0.6.173:8080 check weight 10 inter 3629 fall 2 rise 1
+ server 127.0.6.174 127.0.6.174:8080 check weight 10 inter 6290 fall 2 rise 1
+ server 127.0.6.175 127.0.6.175:8080 check weight 10 inter 7457 fall 2 rise 1
+ server 127.0.6.176 127.0.6.176:8080 check weight 10 inter 1301 fall 2 rise 1
+ server 127.0.6.177 127.0.6.177:8080 check weight 10 inter 770 fall 2 rise 1
+ server 127.0.6.178 127.0.6.178:8080 check weight 10 inter 9492 fall 2 rise 1
+ server 127.0.6.179 127.0.6.179:8080 check weight 10 inter 2139 fall 2 rise 1
+ server 127.0.6.180 127.0.6.180:8080 check weight 10 inter 245 fall 2 rise 1
+ server 127.0.6.181 127.0.6.181:8080 check weight 10 inter 4759 fall 2 rise 1
+ server 127.0.6.182 127.0.6.182:8080 check weight 10 inter 4407 fall 2 rise 1
+ server 127.0.6.183 127.0.6.183:8080 check weight 10 inter 590 fall 2 rise 1
+ server 127.0.6.184 127.0.6.184:8080 check weight 10 inter 4346 fall 2 rise 1
+ server 127.0.6.185 127.0.6.185:8080 check weight 10 inter 7777 fall 2 rise 1
+ server 127.0.6.186 127.0.6.186:8080 check weight 10 inter 1636 fall 2 rise 1
+ server 127.0.6.187 127.0.6.187:8080 check weight 10 inter 7871 fall 2 rise 1
+ server 127.0.6.188 127.0.6.188:8080 check weight 10 inter 2877 fall 2 rise 1
+ server 127.0.6.189 127.0.6.189:8080 check weight 10 inter 354 fall 2 rise 1
+ server 127.0.6.190 127.0.6.190:8080 check weight 10 inter 1168 fall 2 rise 1
+ server 127.0.6.191 127.0.6.191:8080 check weight 10 inter 3949 fall 2 rise 1
+ server 127.0.6.192 127.0.6.192:8080 check weight 10 inter 6536 fall 2 rise 1
+ server 127.0.6.193 127.0.6.193:8080 check weight 10 inter 2452 fall 2 rise 1
+ server 127.0.6.194 127.0.6.194:8080 check weight 10 inter 3114 fall 2 rise 1
+ server 127.0.6.195 127.0.6.195:8080 check weight 10 inter 2049 fall 2 rise 1
+ server 127.0.6.196 127.0.6.196:8080 check weight 10 inter 7638 fall 2 rise 1
+ server 127.0.6.197 127.0.6.197:8080 check weight 10 inter 3108 fall 2 rise 1
+ server 127.0.6.198 127.0.6.198:8080 check weight 10 inter 2342 fall 2 rise 1
+ server 127.0.6.199 127.0.6.199:8080 check weight 10 inter 9875 fall 2 rise 1
+ server 127.0.6.200 127.0.6.200:8080 check weight 10 inter 4971 fall 2 rise 1
+ server 127.0.6.201 127.0.6.201:8080 check weight 10 inter 7876 fall 2 rise 1
+ server 127.0.6.202 127.0.6.202:8080 check weight 10 inter 2492 fall 2 rise 1
+ server 127.0.6.203 127.0.6.203:8080 check weight 10 inter 4141 fall 2 rise 1
+ server 127.0.6.204 127.0.6.204:8080 check weight 10 inter 168 fall 2 rise 1
+ server 127.0.6.205 127.0.6.205:8080 check weight 10 inter 110 fall 2 rise 1
+ server 127.0.6.206 127.0.6.206:8080 check weight 10 inter 4431 fall 2 rise 1
+ server 127.0.6.207 127.0.6.207:8080 check weight 10 inter 5184 fall 2 rise 1
+ server 127.0.6.208 127.0.6.208:8080 check weight 10 inter 3693 fall 2 rise 1
+ server 127.0.6.209 127.0.6.209:8080 check weight 10 inter 889 fall 2 rise 1
+ server 127.0.6.210 127.0.6.210:8080 check weight 10 inter 5439 fall 2 rise 1
+ server 127.0.6.211 127.0.6.211:8080 check weight 10 inter 1032 fall 2 rise 1
+ server 127.0.6.212 127.0.6.212:8080 check weight 10 inter 7307 fall 2 rise 1
+ server 127.0.6.213 127.0.6.213:8080 check weight 10 inter 4862 fall 2 rise 1
+ server 127.0.6.214 127.0.6.214:8080 check weight 10 inter 3102 fall 2 rise 1
+ server 127.0.6.215 127.0.6.215:8080 check weight 10 inter 7697 fall 2 rise 1
+ server 127.0.6.216 127.0.6.216:8080 check weight 10 inter 5409 fall 2 rise 1
+ server 127.0.6.217 127.0.6.217:8080 check weight 10 inter 7703 fall 2 rise 1
+ server 127.0.6.218 127.0.6.218:8080 check weight 10 inter 7714 fall 2 rise 1
+ server 127.0.6.219 127.0.6.219:8080 check weight 10 inter 7383 fall 2 rise 1
+ server 127.0.6.220 127.0.6.220:8080 check weight 10 inter 7171 fall 2 rise 1
+ server 127.0.6.221 127.0.6.221:8080 check weight 10 inter 9884 fall 2 rise 1
+ server 127.0.6.222 127.0.6.222:8080 check weight 10 inter 288 fall 2 rise 1
+ server 127.0.6.223 127.0.6.223:8080 check weight 10 inter 590 fall 2 rise 1
+ server 127.0.6.224 127.0.6.224:8080 check weight 10 inter 9753 fall 2 rise 1
+ server 127.0.6.225 127.0.6.225:8080 check weight 10 inter 6269 fall 2 rise 1
+ server 127.0.6.226 127.0.6.226:8080 check weight 10 inter 7458 fall 2 rise 1
+ server 127.0.6.227 127.0.6.227:8080 check weight 10 inter 133 fall 2 rise 1
+ server 127.0.6.228 127.0.6.228:8080 check weight 10 inter 2189 fall 2 rise 1
+ server 127.0.6.229 127.0.6.229:8080 check weight 10 inter 7774 fall 2 rise 1
+ server 127.0.6.230 127.0.6.230:8080 check weight 10 inter 9966 fall 2 rise 1
+ server 127.0.6.231 127.0.6.231:8080 check weight 10 inter 5195 fall 2 rise 1
+ server 127.0.6.232 127.0.6.232:8080 check weight 10 inter 1282 fall 2 rise 1
+ server 127.0.6.233 127.0.6.233:8080 check weight 10 inter 2630 fall 2 rise 1
+ server 127.0.6.234 127.0.6.234:8080 check weight 10 inter 7223 fall 2 rise 1
+ server 127.0.6.235 127.0.6.235:8080 check weight 10 inter 7903 fall 2 rise 1
+ server 127.0.6.236 127.0.6.236:8080 check weight 10 inter 4836 fall 2 rise 1
+ server 127.0.6.237 127.0.6.237:8080 check weight 10 inter 2230 fall 2 rise 1
+ server 127.0.6.238 127.0.6.238:8080 check weight 10 inter 9839 fall 2 rise 1
+ server 127.0.6.239 127.0.6.239:8080 check weight 10 inter 4393 fall 2 rise 1
+ server 127.0.6.240 127.0.6.240:8080 check weight 10 inter 612 fall 2 rise 1
+ server 127.0.6.241 127.0.6.241:8080 check weight 10 inter 6794 fall 2 rise 1
+ server 127.0.6.242 127.0.6.242:8080 check weight 10 inter 5299 fall 2 rise 1
+ server 127.0.6.243 127.0.6.243:8080 check weight 10 inter 2842 fall 2 rise 1
+ server 127.0.6.244 127.0.6.244:8080 check weight 10 inter 3105 fall 2 rise 1
+ server 127.0.6.245 127.0.6.245:8080 check weight 10 inter 1537 fall 2 rise 1
+ server 127.0.6.246 127.0.6.246:8080 check weight 10 inter 4453 fall 2 rise 1
+ server 127.0.6.247 127.0.6.247:8080 check weight 10 inter 9988 fall 2 rise 1
+ server 127.0.6.248 127.0.6.248:8080 check weight 10 inter 5925 fall 2 rise 1
+ server 127.0.6.249 127.0.6.249:8080 check weight 10 inter 268 fall 2 rise 1
+ server 127.0.6.250 127.0.6.250:8080 check weight 10 inter 1256 fall 2 rise 1
+ server 127.0.7.001 127.0.7.1:8080 check weight 10 inter 9065 fall 2 rise 1
+ server 127.0.7.002 127.0.7.2:8080 check weight 10 inter 9109 fall 2 rise 1
+ server 127.0.7.003 127.0.7.3:8080 check weight 10 inter 6936 fall 2 rise 1
+ server 127.0.7.004 127.0.7.4:8080 check weight 10 inter 4222 fall 2 rise 1
+ server 127.0.7.005 127.0.7.5:8080 check weight 10 inter 7909 fall 2 rise 1
+ server 127.0.7.006 127.0.7.6:8080 check weight 10 inter 4214 fall 2 rise 1
+ server 127.0.7.007 127.0.7.7:8080 check weight 10 inter 9899 fall 2 rise 1
+ server 127.0.7.008 127.0.7.8:8080 check weight 10 inter 6753 fall 2 rise 1
+ server 127.0.7.009 127.0.7.9:8080 check weight 10 inter 4618 fall 2 rise 1
+ server 127.0.7.010 127.0.7.10:8080 check weight 10 inter 9135 fall 2 rise 1
+ server 127.0.7.011 127.0.7.11:8080 check weight 10 inter 1761 fall 2 rise 1
+ server 127.0.7.012 127.0.7.12:8080 check weight 10 inter 8210 fall 2 rise 1
+ server 127.0.7.013 127.0.7.13:8080 check weight 10 inter 159 fall 2 rise 1
+ server 127.0.7.014 127.0.7.14:8080 check weight 10 inter 4594 fall 2 rise 1
+ server 127.0.7.015 127.0.7.15:8080 check weight 10 inter 2756 fall 2 rise 1
+ server 127.0.7.016 127.0.7.16:8080 check weight 10 inter 7973 fall 2 rise 1
+ server 127.0.7.017 127.0.7.17:8080 check weight 10 inter 5112 fall 2 rise 1
+ server 127.0.7.018 127.0.7.18:8080 check weight 10 inter 1290 fall 2 rise 1
+ server 127.0.7.019 127.0.7.19:8080 check weight 10 inter 7772 fall 2 rise 1
+ server 127.0.7.020 127.0.7.20:8080 check weight 10 inter 1578 fall 2 rise 1
+ server 127.0.7.021 127.0.7.21:8080 check weight 10 inter 9060 fall 2 rise 1
+ server 127.0.7.022 127.0.7.22:8080 check weight 10 inter 292 fall 2 rise 1
+ server 127.0.7.023 127.0.7.23:8080 check weight 10 inter 76 fall 2 rise 1
+ server 127.0.7.024 127.0.7.24:8080 check weight 10 inter 1018 fall 2 rise 1
+ server 127.0.7.025 127.0.7.25:8080 check weight 10 inter 6873 fall 2 rise 1
+ server 127.0.7.026 127.0.7.26:8080 check weight 10 inter 4589 fall 2 rise 1
+ server 127.0.7.027 127.0.7.27:8080 check weight 10 inter 6104 fall 2 rise 1
+ server 127.0.7.028 127.0.7.28:8080 check weight 10 inter 1237 fall 2 rise 1
+ server 127.0.7.029 127.0.7.29:8080 check weight 10 inter 3775 fall 2 rise 1
+ server 127.0.7.030 127.0.7.30:8080 check weight 10 inter 4832 fall 2 rise 1
+ server 127.0.7.031 127.0.7.31:8080 check weight 10 inter 1816 fall 2 rise 1
+ server 127.0.7.032 127.0.7.32:8080 check weight 10 inter 3821 fall 2 rise 1
+ server 127.0.7.033 127.0.7.33:8080 check weight 10 inter 2383 fall 2 rise 1
+ server 127.0.7.034 127.0.7.34:8080 check weight 10 inter 6716 fall 2 rise 1
+ server 127.0.7.035 127.0.7.35:8080 check weight 10 inter 3710 fall 2 rise 1
+ server 127.0.7.036 127.0.7.36:8080 check weight 10 inter 7426 fall 2 rise 1
+ server 127.0.7.037 127.0.7.37:8080 check weight 10 inter 1897 fall 2 rise 1
+ server 127.0.7.038 127.0.7.38:8080 check weight 10 inter 4134 fall 2 rise 1
+ server 127.0.7.039 127.0.7.39:8080 check weight 10 inter 2855 fall 2 rise 1
+ server 127.0.7.040 127.0.7.40:8080 check weight 10 inter 7373 fall 2 rise 1
+ server 127.0.7.041 127.0.7.41:8080 check weight 10 inter 5864 fall 2 rise 1
+ server 127.0.7.042 127.0.7.42:8080 check weight 10 inter 2148 fall 2 rise 1
+ server 127.0.7.043 127.0.7.43:8080 check weight 10 inter 1651 fall 2 rise 1
+ server 127.0.7.044 127.0.7.44:8080 check weight 10 inter 1627 fall 2 rise 1
+ server 127.0.7.045 127.0.7.45:8080 check weight 10 inter 9479 fall 2 rise 1
+ server 127.0.7.046 127.0.7.46:8080 check weight 10 inter 7909 fall 2 rise 1
+ server 127.0.7.047 127.0.7.47:8080 check weight 10 inter 9972 fall 2 rise 1
+ server 127.0.7.048 127.0.7.48:8080 check weight 10 inter 8815 fall 2 rise 1
+ server 127.0.7.049 127.0.7.49:8080 check weight 10 inter 7136 fall 2 rise 1
+ server 127.0.7.050 127.0.7.50:8080 check weight 10 inter 3425 fall 2 rise 1
+ server 127.0.7.051 127.0.7.51:8080 check weight 10 inter 2281 fall 2 rise 1
+ server 127.0.7.052 127.0.7.52:8080 check weight 10 inter 566 fall 2 rise 1
+ server 127.0.7.053 127.0.7.53:8080 check weight 10 inter 1656 fall 2 rise 1
+ server 127.0.7.054 127.0.7.54:8080 check weight 10 inter 1859 fall 2 rise 1
+ server 127.0.7.055 127.0.7.55:8080 check weight 10 inter 2631 fall 2 rise 1
+ server 127.0.7.056 127.0.7.56:8080 check weight 10 inter 7241 fall 2 rise 1
+ server 127.0.7.057 127.0.7.57:8080 check weight 10 inter 76 fall 2 rise 1
+ server 127.0.7.058 127.0.7.58:8080 check weight 10 inter 7689 fall 2 rise 1
+ server 127.0.7.059 127.0.7.59:8080 check weight 10 inter 2108 fall 2 rise 1
+ server 127.0.7.060 127.0.7.60:8080 check weight 10 inter 1477 fall 2 rise 1
+ server 127.0.7.061 127.0.7.61:8080 check weight 10 inter 9401 fall 2 rise 1
+ server 127.0.7.062 127.0.7.62:8080 check weight 10 inter 9192 fall 2 rise 1
+ server 127.0.7.063 127.0.7.63:8080 check weight 10 inter 2835 fall 2 rise 1
+ server 127.0.7.064 127.0.7.64:8080 check weight 10 inter 3066 fall 2 rise 1
+ server 127.0.7.065 127.0.7.65:8080 check weight 10 inter 3375 fall 2 rise 1
+ server 127.0.7.066 127.0.7.66:8080 check weight 10 inter 9760 fall 2 rise 1
+ server 127.0.7.067 127.0.7.67:8080 check weight 10 inter 3559 fall 2 rise 1
+ server 127.0.7.068 127.0.7.68:8080 check weight 10 inter 6213 fall 2 rise 1
+ server 127.0.7.069 127.0.7.69:8080 check weight 10 inter 7043 fall 2 rise 1
+ server 127.0.7.070 127.0.7.70:8080 check weight 10 inter 787 fall 2 rise 1
+ server 127.0.7.071 127.0.7.71:8080 check weight 10 inter 2184 fall 2 rise 1
+ server 127.0.7.072 127.0.7.72:8080 check weight 10 inter 3934 fall 2 rise 1
+ server 127.0.7.073 127.0.7.73:8080 check weight 10 inter 4647 fall 2 rise 1
+ server 127.0.7.074 127.0.7.74:8080 check weight 10 inter 1108 fall 2 rise 1
+ server 127.0.7.075 127.0.7.75:8080 check weight 10 inter 7950 fall 2 rise 1
+ server 127.0.7.076 127.0.7.76:8080 check weight 10 inter 17 fall 2 rise 1
+ server 127.0.7.077 127.0.7.77:8080 check weight 10 inter 826 fall 2 rise 1
+ server 127.0.7.078 127.0.7.78:8080 check weight 10 inter 9585 fall 2 rise 1
+ server 127.0.7.079 127.0.7.79:8080 check weight 10 inter 5232 fall 2 rise 1
+ server 127.0.7.080 127.0.7.80:8080 check weight 10 inter 9293 fall 2 rise 1
+ server 127.0.7.081 127.0.7.81:8080 check weight 10 inter 8526 fall 2 rise 1
+ server 127.0.7.082 127.0.7.82:8080 check weight 10 inter 1344 fall 2 rise 1
+ server 127.0.7.083 127.0.7.83:8080 check weight 10 inter 5219 fall 2 rise 1
+ server 127.0.7.084 127.0.7.84:8080 check weight 10 inter 206 fall 2 rise 1
+ server 127.0.7.085 127.0.7.85:8080 check weight 10 inter 8686 fall 2 rise 1
+ server 127.0.7.086 127.0.7.86:8080 check weight 10 inter 9803 fall 2 rise 1
+ server 127.0.7.087 127.0.7.87:8080 check weight 10 inter 9428 fall 2 rise 1
+ server 127.0.7.088 127.0.7.88:8080 check weight 10 inter 7482 fall 2 rise 1
+ server 127.0.7.089 127.0.7.89:8080 check weight 10 inter 4284 fall 2 rise 1
+ server 127.0.7.090 127.0.7.90:8080 check weight 10 inter 4539 fall 2 rise 1
+ server 127.0.7.091 127.0.7.91:8080 check weight 10 inter 2548 fall 2 rise 1
+ server 127.0.7.092 127.0.7.92:8080 check weight 10 inter 9518 fall 2 rise 1
+ server 127.0.7.093 127.0.7.93:8080 check weight 10 inter 171 fall 2 rise 1
+ server 127.0.7.094 127.0.7.94:8080 check weight 10 inter 5894 fall 2 rise 1
+ server 127.0.7.095 127.0.7.95:8080 check weight 10 inter 8582 fall 2 rise 1
+ server 127.0.7.096 127.0.7.96:8080 check weight 10 inter 2118 fall 2 rise 1
+ server 127.0.7.097 127.0.7.97:8080 check weight 10 inter 4033 fall 2 rise 1
+ server 127.0.7.098 127.0.7.98:8080 check weight 10 inter 1449 fall 2 rise 1
+ server 127.0.7.099 127.0.7.99:8080 check weight 10 inter 2057 fall 2 rise 1
+ server 127.0.7.100 127.0.7.100:8080 check weight 10 inter 2143 fall 2 rise 1
+ server 127.0.7.101 127.0.7.101:8080 check weight 10 inter 3563 fall 2 rise 1
+ server 127.0.7.102 127.0.7.102:8080 check weight 10 inter 3571 fall 2 rise 1
+ server 127.0.7.103 127.0.7.103:8080 check weight 10 inter 8006 fall 2 rise 1
+ server 127.0.7.104 127.0.7.104:8080 check weight 10 inter 7149 fall 2 rise 1
+ server 127.0.7.105 127.0.7.105:8080 check weight 10 inter 9390 fall 2 rise 1
+ server 127.0.7.106 127.0.7.106:8080 check weight 10 inter 3175 fall 2 rise 1
+ server 127.0.7.107 127.0.7.107:8080 check weight 10 inter 617 fall 2 rise 1
+ server 127.0.7.108 127.0.7.108:8080 check weight 10 inter 9499 fall 2 rise 1
+ server 127.0.7.109 127.0.7.109:8080 check weight 10 inter 4209 fall 2 rise 1
+ server 127.0.7.110 127.0.7.110:8080 check weight 10 inter 7807 fall 2 rise 1
+ server 127.0.7.111 127.0.7.111:8080 check weight 10 inter 8095 fall 2 rise 1
+ server 127.0.7.112 127.0.7.112:8080 check weight 10 inter 581 fall 2 rise 1
+ server 127.0.7.113 127.0.7.113:8080 check weight 10 inter 411 fall 2 rise 1
+ server 127.0.7.114 127.0.7.114:8080 check weight 10 inter 9664 fall 2 rise 1
+ server 127.0.7.115 127.0.7.115:8080 check weight 10 inter 8276 fall 2 rise 1
+ server 127.0.7.116 127.0.7.116:8080 check weight 10 inter 2983 fall 2 rise 1
+ server 127.0.7.117 127.0.7.117:8080 check weight 10 inter 2382 fall 2 rise 1
+ server 127.0.7.118 127.0.7.118:8080 check weight 10 inter 8434 fall 2 rise 1
+ server 127.0.7.119 127.0.7.119:8080 check weight 10 inter 8505 fall 2 rise 1
+ server 127.0.7.120 127.0.7.120:8080 check weight 10 inter 5188 fall 2 rise 1
+ server 127.0.7.121 127.0.7.121:8080 check weight 10 inter 8976 fall 2 rise 1
+ server 127.0.7.122 127.0.7.122:8080 check weight 10 inter 9021 fall 2 rise 1
+ server 127.0.7.123 127.0.7.123:8080 check weight 10 inter 1576 fall 2 rise 1
+ server 127.0.7.124 127.0.7.124:8080 check weight 10 inter 6023 fall 2 rise 1
+ server 127.0.7.125 127.0.7.125:8080 check weight 10 inter 3421 fall 2 rise 1
+ server 127.0.7.126 127.0.7.126:8080 check weight 10 inter 9954 fall 2 rise 1
+ server 127.0.7.127 127.0.7.127:8080 check weight 10 inter 151 fall 2 rise 1
+ server 127.0.7.128 127.0.7.128:8080 check weight 10 inter 5738 fall 2 rise 1
+ server 127.0.7.129 127.0.7.129:8080 check weight 10 inter 8463 fall 2 rise 1
+ server 127.0.7.130 127.0.7.130:8080 check weight 10 inter 2270 fall 2 rise 1
+ server 127.0.7.131 127.0.7.131:8080 check weight 10 inter 4238 fall 2 rise 1
+ server 127.0.7.132 127.0.7.132:8080 check weight 10 inter 7559 fall 2 rise 1
+ server 127.0.7.133 127.0.7.133:8080 check weight 10 inter 5704 fall 2 rise 1
+ server 127.0.7.134 127.0.7.134:8080 check weight 10 inter 7248 fall 2 rise 1
+ server 127.0.7.135 127.0.7.135:8080 check weight 10 inter 9049 fall 2 rise 1
+ server 127.0.7.136 127.0.7.136:8080 check weight 10 inter 7665 fall 2 rise 1
+ server 127.0.7.137 127.0.7.137:8080 check weight 10 inter 5093 fall 2 rise 1
+ server 127.0.7.138 127.0.7.138:8080 check weight 10 inter 8948 fall 2 rise 1
+ server 127.0.7.139 127.0.7.139:8080 check weight 10 inter 6300 fall 2 rise 1
+ server 127.0.7.140 127.0.7.140:8080 check weight 10 inter 4209 fall 2 rise 1
+ server 127.0.7.141 127.0.7.141:8080 check weight 10 inter 4291 fall 2 rise 1
+ server 127.0.7.142 127.0.7.142:8080 check weight 10 inter 8887 fall 2 rise 1
+ server 127.0.7.143 127.0.7.143:8080 check weight 10 inter 8426 fall 2 rise 1
+ server 127.0.7.144 127.0.7.144:8080 check weight 10 inter 3794 fall 2 rise 1
+ server 127.0.7.145 127.0.7.145:8080 check weight 10 inter 7103 fall 2 rise 1
+ server 127.0.7.146 127.0.7.146:8080 check weight 10 inter 9210 fall 2 rise 1
+ server 127.0.7.147 127.0.7.147:8080 check weight 10 inter 4291 fall 2 rise 1
+ server 127.0.7.148 127.0.7.148:8080 check weight 10 inter 6635 fall 2 rise 1
+ server 127.0.7.149 127.0.7.149:8080 check weight 10 inter 3471 fall 2 rise 1
+ server 127.0.7.150 127.0.7.150:8080 check weight 10 inter 4658 fall 2 rise 1
+ server 127.0.7.151 127.0.7.151:8080 check weight 10 inter 3744 fall 2 rise 1
+ server 127.0.7.152 127.0.7.152:8080 check weight 10 inter 976 fall 2 rise 1
+ server 127.0.7.153 127.0.7.153:8080 check weight 10 inter 5745 fall 2 rise 1
+ server 127.0.7.154 127.0.7.154:8080 check weight 10 inter 1221 fall 2 rise 1
+ server 127.0.7.155 127.0.7.155:8080 check weight 10 inter 3872 fall 2 rise 1
+ server 127.0.7.156 127.0.7.156:8080 check weight 10 inter 1368 fall 2 rise 1
+ server 127.0.7.157 127.0.7.157:8080 check weight 10 inter 519 fall 2 rise 1
+ server 127.0.7.158 127.0.7.158:8080 check weight 10 inter 6212 fall 2 rise 1
+ server 127.0.7.159 127.0.7.159:8080 check weight 10 inter 6176 fall 2 rise 1
+ server 127.0.7.160 127.0.7.160:8080 check weight 10 inter 3085 fall 2 rise 1
+ server 127.0.7.161 127.0.7.161:8080 check weight 10 inter 111 fall 2 rise 1
+ server 127.0.7.162 127.0.7.162:8080 check weight 10 inter 6153 fall 2 rise 1
+ server 127.0.7.163 127.0.7.163:8080 check weight 10 inter 3517 fall 2 rise 1
+ server 127.0.7.164 127.0.7.164:8080 check weight 10 inter 1125 fall 2 rise 1
+ server 127.0.7.165 127.0.7.165:8080 check weight 10 inter 1748 fall 2 rise 1
+ server 127.0.7.166 127.0.7.166:8080 check weight 10 inter 6400 fall 2 rise 1
+ server 127.0.7.167 127.0.7.167:8080 check weight 10 inter 8809 fall 2 rise 1
+ server 127.0.7.168 127.0.7.168:8080 check weight 10 inter 8834 fall 2 rise 1
+ server 127.0.7.169 127.0.7.169:8080 check weight 10 inter 2710 fall 2 rise 1
+ server 127.0.7.170 127.0.7.170:8080 check weight 10 inter 773 fall 2 rise 1
+ server 127.0.7.171 127.0.7.171:8080 check weight 10 inter 3871 fall 2 rise 1
+ server 127.0.7.172 127.0.7.172:8080 check weight 10 inter 7372 fall 2 rise 1
+ server 127.0.7.173 127.0.7.173:8080 check weight 10 inter 842 fall 2 rise 1
+ server 127.0.7.174 127.0.7.174:8080 check weight 10 inter 1729 fall 2 rise 1
+ server 127.0.7.175 127.0.7.175:8080 check weight 10 inter 4695 fall 2 rise 1
+ server 127.0.7.176 127.0.7.176:8080 check weight 10 inter 4441 fall 2 rise 1
+ server 127.0.7.177 127.0.7.177:8080 check weight 10 inter 9489 fall 2 rise 1
+ server 127.0.7.178 127.0.7.178:8080 check weight 10 inter 7012 fall 2 rise 1
+ server 127.0.7.179 127.0.7.179:8080 check weight 10 inter 5560 fall 2 rise 1
+ server 127.0.7.180 127.0.7.180:8080 check weight 10 inter 2450 fall 2 rise 1
+ server 127.0.7.181 127.0.7.181:8080 check weight 10 inter 2250 fall 2 rise 1
+ server 127.0.7.182 127.0.7.182:8080 check weight 10 inter 3505 fall 2 rise 1
+ server 127.0.7.183 127.0.7.183:8080 check weight 10 inter 3792 fall 2 rise 1
+ server 127.0.7.184 127.0.7.184:8080 check weight 10 inter 9862 fall 2 rise 1
+ server 127.0.7.185 127.0.7.185:8080 check weight 10 inter 3831 fall 2 rise 1
+ server 127.0.7.186 127.0.7.186:8080 check weight 10 inter 5740 fall 2 rise 1
+ server 127.0.7.187 127.0.7.187:8080 check weight 10 inter 6964 fall 2 rise 1
+ server 127.0.7.188 127.0.7.188:8080 check weight 10 inter 554 fall 2 rise 1
+ server 127.0.7.189 127.0.7.189:8080 check weight 10 inter 8560 fall 2 rise 1
+ server 127.0.7.190 127.0.7.190:8080 check weight 10 inter 4868 fall 2 rise 1
+ server 127.0.7.191 127.0.7.191:8080 check weight 10 inter 6664 fall 2 rise 1
+ server 127.0.7.192 127.0.7.192:8080 check weight 10 inter 8791 fall 2 rise 1
+ server 127.0.7.193 127.0.7.193:8080 check weight 10 inter 8378 fall 2 rise 1
+ server 127.0.7.194 127.0.7.194:8080 check weight 10 inter 8769 fall 2 rise 1
+ server 127.0.7.195 127.0.7.195:8080 check weight 10 inter 3838 fall 2 rise 1
+ server 127.0.7.196 127.0.7.196:8080 check weight 10 inter 9522 fall 2 rise 1
+ server 127.0.7.197 127.0.7.197:8080 check weight 10 inter 164 fall 2 rise 1
+ server 127.0.7.198 127.0.7.198:8080 check weight 10 inter 972 fall 2 rise 1
+ server 127.0.7.199 127.0.7.199:8080 check weight 10 inter 9391 fall 2 rise 1
+ server 127.0.7.200 127.0.7.200:8080 check weight 10 inter 105 fall 2 rise 1
+ server 127.0.7.201 127.0.7.201:8080 check weight 10 inter 2535 fall 2 rise 1
+ server 127.0.7.202 127.0.7.202:8080 check weight 10 inter 7201 fall 2 rise 1
+ server 127.0.7.203 127.0.7.203:8080 check weight 10 inter 3354 fall 2 rise 1
+ server 127.0.7.204 127.0.7.204:8080 check weight 10 inter 4691 fall 2 rise 1
+ server 127.0.7.205 127.0.7.205:8080 check weight 10 inter 1052 fall 2 rise 1
+ server 127.0.7.206 127.0.7.206:8080 check weight 10 inter 7829 fall 2 rise 1
+ server 127.0.7.207 127.0.7.207:8080 check weight 10 inter 144 fall 2 rise 1
+ server 127.0.7.208 127.0.7.208:8080 check weight 10 inter 2436 fall 2 rise 1
+ server 127.0.7.209 127.0.7.209:8080 check weight 10 inter 4410 fall 2 rise 1
+ server 127.0.7.210 127.0.7.210:8080 check weight 10 inter 4615 fall 2 rise 1
+ server 127.0.7.211 127.0.7.211:8080 check weight 10 inter 9371 fall 2 rise 1
+ server 127.0.7.212 127.0.7.212:8080 check weight 10 inter 8035 fall 2 rise 1
+ server 127.0.7.213 127.0.7.213:8080 check weight 10 inter 9206 fall 2 rise 1
+ server 127.0.7.214 127.0.7.214:8080 check weight 10 inter 5368 fall 2 rise 1
+ server 127.0.7.215 127.0.7.215:8080 check weight 10 inter 1441 fall 2 rise 1
+ server 127.0.7.216 127.0.7.216:8080 check weight 10 inter 6796 fall 2 rise 1
+ server 127.0.7.217 127.0.7.217:8080 check weight 10 inter 7354 fall 2 rise 1
+ server 127.0.7.218 127.0.7.218:8080 check weight 10 inter 7850 fall 2 rise 1
+ server 127.0.7.219 127.0.7.219:8080 check weight 10 inter 6141 fall 2 rise 1
+ server 127.0.7.220 127.0.7.220:8080 check weight 10 inter 1060 fall 2 rise 1
+ server 127.0.7.221 127.0.7.221:8080 check weight 10 inter 3137 fall 2 rise 1
+ server 127.0.7.222 127.0.7.222:8080 check weight 10 inter 4250 fall 2 rise 1
+ server 127.0.7.223 127.0.7.223:8080 check weight 10 inter 9160 fall 2 rise 1
+ server 127.0.7.224 127.0.7.224:8080 check weight 10 inter 6353 fall 2 rise 1
+ server 127.0.7.225 127.0.7.225:8080 check weight 10 inter 6471 fall 2 rise 1
+ server 127.0.7.226 127.0.7.226:8080 check weight 10 inter 9230 fall 2 rise 1
+ server 127.0.7.227 127.0.7.227:8080 check weight 10 inter 7529 fall 2 rise 1
+ server 127.0.7.228 127.0.7.228:8080 check weight 10 inter 213 fall 2 rise 1
+ server 127.0.7.229 127.0.7.229:8080 check weight 10 inter 1763 fall 2 rise 1
+ server 127.0.7.230 127.0.7.230:8080 check weight 10 inter 6268 fall 2 rise 1
+ server 127.0.7.231 127.0.7.231:8080 check weight 10 inter 5970 fall 2 rise 1
+ server 127.0.7.232 127.0.7.232:8080 check weight 10 inter 3630 fall 2 rise 1
+ server 127.0.7.233 127.0.7.233:8080 check weight 10 inter 9745 fall 2 rise 1
+ server 127.0.7.234 127.0.7.234:8080 check weight 10 inter 1296 fall 2 rise 1
+ server 127.0.7.235 127.0.7.235:8080 check weight 10 inter 9766 fall 2 rise 1
+ server 127.0.7.236 127.0.7.236:8080 check weight 10 inter 5422 fall 2 rise 1
+ server 127.0.7.237 127.0.7.237:8080 check weight 10 inter 4449 fall 2 rise 1
+ server 127.0.7.238 127.0.7.238:8080 check weight 10 inter 6811 fall 2 rise 1
+ server 127.0.7.239 127.0.7.239:8080 check weight 10 inter 9387 fall 2 rise 1
+ server 127.0.7.240 127.0.7.240:8080 check weight 10 inter 395 fall 2 rise 1
+ server 127.0.7.241 127.0.7.241:8080 check weight 10 inter 2514 fall 2 rise 1
+ server 127.0.7.242 127.0.7.242:8080 check weight 10 inter 9771 fall 2 rise 1
+ server 127.0.7.243 127.0.7.243:8080 check weight 10 inter 8628 fall 2 rise 1
+ server 127.0.7.244 127.0.7.244:8080 check weight 10 inter 3478 fall 2 rise 1
+ server 127.0.7.245 127.0.7.245:8080 check weight 10 inter 8861 fall 2 rise 1
+ server 127.0.7.246 127.0.7.246:8080 check weight 10 inter 7695 fall 2 rise 1
+ server 127.0.7.247 127.0.7.247:8080 check weight 10 inter 1484 fall 2 rise 1
+ server 127.0.7.248 127.0.7.248:8080 check weight 10 inter 8364 fall 2 rise 1
+ server 127.0.7.249 127.0.7.249:8080 check weight 10 inter 5313 fall 2 rise 1
+ server 127.0.7.250 127.0.7.250:8080 check weight 10 inter 7335 fall 2 rise 1
+ server 127.0.8.001 127.0.8.1:8080 check weight 10 inter 609 fall 2 rise 1
+ server 127.0.8.002 127.0.8.2:8080 check weight 10 inter 3919 fall 2 rise 1
+ server 127.0.8.003 127.0.8.3:8080 check weight 10 inter 3873 fall 2 rise 1
+ server 127.0.8.004 127.0.8.4:8080 check weight 10 inter 9742 fall 2 rise 1
+ server 127.0.8.005 127.0.8.5:8080 check weight 10 inter 4278 fall 2 rise 1
+ server 127.0.8.006 127.0.8.6:8080 check weight 10 inter 6434 fall 2 rise 1
+ server 127.0.8.007 127.0.8.7:8080 check weight 10 inter 3551 fall 2 rise 1
+ server 127.0.8.008 127.0.8.8:8080 check weight 10 inter 3927 fall 2 rise 1
+ server 127.0.8.009 127.0.8.9:8080 check weight 10 inter 9143 fall 2 rise 1
+ server 127.0.8.010 127.0.8.10:8080 check weight 10 inter 9750 fall 2 rise 1
+ server 127.0.8.011 127.0.8.11:8080 check weight 10 inter 7543 fall 2 rise 1
+ server 127.0.8.012 127.0.8.12:8080 check weight 10 inter 2953 fall 2 rise 1
+ server 127.0.8.013 127.0.8.13:8080 check weight 10 inter 1258 fall 2 rise 1
+ server 127.0.8.014 127.0.8.14:8080 check weight 10 inter 8728 fall 2 rise 1
+ server 127.0.8.015 127.0.8.15:8080 check weight 10 inter 4396 fall 2 rise 1
+ server 127.0.8.016 127.0.8.16:8080 check weight 10 inter 6793 fall 2 rise 1
+ server 127.0.8.017 127.0.8.17:8080 check weight 10 inter 9274 fall 2 rise 1
+ server 127.0.8.018 127.0.8.18:8080 check weight 10 inter 7910 fall 2 rise 1
+ server 127.0.8.019 127.0.8.19:8080 check weight 10 inter 755 fall 2 rise 1
+ server 127.0.8.020 127.0.8.20:8080 check weight 10 inter 891 fall 2 rise 1
+ server 127.0.8.021 127.0.8.21:8080 check weight 10 inter 1810 fall 2 rise 1
+ server 127.0.8.022 127.0.8.22:8080 check weight 10 inter 2565 fall 2 rise 1
+ server 127.0.8.023 127.0.8.23:8080 check weight 10 inter 401 fall 2 rise 1
+ server 127.0.8.024 127.0.8.24:8080 check weight 10 inter 9208 fall 2 rise 1
+ server 127.0.8.025 127.0.8.25:8080 check weight 10 inter 3996 fall 2 rise 1
+ server 127.0.8.026 127.0.8.26:8080 check weight 10 inter 724 fall 2 rise 1
+ server 127.0.8.027 127.0.8.27:8080 check weight 10 inter 9765 fall 2 rise 1
+ server 127.0.8.028 127.0.8.28:8080 check weight 10 inter 1599 fall 2 rise 1
+ server 127.0.8.029 127.0.8.29:8080 check weight 10 inter 7319 fall 2 rise 1
+ server 127.0.8.030 127.0.8.30:8080 check weight 10 inter 1086 fall 2 rise 1
+ server 127.0.8.031 127.0.8.31:8080 check weight 10 inter 1589 fall 2 rise 1
+ server 127.0.8.032 127.0.8.32:8080 check weight 10 inter 2955 fall 2 rise 1
+ server 127.0.8.033 127.0.8.33:8080 check weight 10 inter 9034 fall 2 rise 1
+ server 127.0.8.034 127.0.8.34:8080 check weight 10 inter 946 fall 2 rise 1
+ server 127.0.8.035 127.0.8.35:8080 check weight 10 inter 4425 fall 2 rise 1
+ server 127.0.8.036 127.0.8.36:8080 check weight 10 inter 7624 fall 2 rise 1
+ server 127.0.8.037 127.0.8.37:8080 check weight 10 inter 8476 fall 2 rise 1
+ server 127.0.8.038 127.0.8.38:8080 check weight 10 inter 2403 fall 2 rise 1
+ server 127.0.8.039 127.0.8.39:8080 check weight 10 inter 5354 fall 2 rise 1
+ server 127.0.8.040 127.0.8.40:8080 check weight 10 inter 2694 fall 2 rise 1
+ server 127.0.8.041 127.0.8.41:8080 check weight 10 inter 7150 fall 2 rise 1
+ server 127.0.8.042 127.0.8.42:8080 check weight 10 inter 7167 fall 2 rise 1
+ server 127.0.8.043 127.0.8.43:8080 check weight 10 inter 5305 fall 2 rise 1
+ server 127.0.8.044 127.0.8.44:8080 check weight 10 inter 4281 fall 2 rise 1
+ server 127.0.8.045 127.0.8.45:8080 check weight 10 inter 3807 fall 2 rise 1
+ server 127.0.8.046 127.0.8.46:8080 check weight 10 inter 6351 fall 2 rise 1
+ server 127.0.8.047 127.0.8.47:8080 check weight 10 inter 6959 fall 2 rise 1
+ server 127.0.8.048 127.0.8.48:8080 check weight 10 inter 4710 fall 2 rise 1
+ server 127.0.8.049 127.0.8.49:8080 check weight 10 inter 7502 fall 2 rise 1
+ server 127.0.8.050 127.0.8.50:8080 check weight 10 inter 211 fall 2 rise 1
+ server 127.0.8.051 127.0.8.51:8080 check weight 10 inter 6488 fall 2 rise 1
+ server 127.0.8.052 127.0.8.52:8080 check weight 10 inter 9341 fall 2 rise 1
+ server 127.0.8.053 127.0.8.53:8080 check weight 10 inter 2637 fall 2 rise 1
+ server 127.0.8.054 127.0.8.54:8080 check weight 10 inter 7594 fall 2 rise 1
+ server 127.0.8.055 127.0.8.55:8080 check weight 10 inter 4014 fall 2 rise 1
+ server 127.0.8.056 127.0.8.56:8080 check weight 10 inter 8630 fall 2 rise 1
+ server 127.0.8.057 127.0.8.57:8080 check weight 10 inter 58 fall 2 rise 1
+ server 127.0.8.058 127.0.8.58:8080 check weight 10 inter 1428 fall 2 rise 1
+ server 127.0.8.059 127.0.8.59:8080 check weight 10 inter 3430 fall 2 rise 1
+ server 127.0.8.060 127.0.8.60:8080 check weight 10 inter 2478 fall 2 rise 1
+ server 127.0.8.061 127.0.8.61:8080 check weight 10 inter 5420 fall 2 rise 1
+ server 127.0.8.062 127.0.8.62:8080 check weight 10 inter 8649 fall 2 rise 1
+ server 127.0.8.063 127.0.8.63:8080 check weight 10 inter 4941 fall 2 rise 1
+ server 127.0.8.064 127.0.8.64:8080 check weight 10 inter 5214 fall 2 rise 1
+ server 127.0.8.065 127.0.8.65:8080 check weight 10 inter 6767 fall 2 rise 1
+ server 127.0.8.066 127.0.8.66:8080 check weight 10 inter 3046 fall 2 rise 1
+ server 127.0.8.067 127.0.8.67:8080 check weight 10 inter 8641 fall 2 rise 1
+ server 127.0.8.068 127.0.8.68:8080 check weight 10 inter 4000 fall 2 rise 1
+ server 127.0.8.069 127.0.8.69:8080 check weight 10 inter 5744 fall 2 rise 1
+ server 127.0.8.070 127.0.8.70:8080 check weight 10 inter 3723 fall 2 rise 1
+ server 127.0.8.071 127.0.8.71:8080 check weight 10 inter 8031 fall 2 rise 1
+ server 127.0.8.072 127.0.8.72:8080 check weight 10 inter 7105 fall 2 rise 1
+ server 127.0.8.073 127.0.8.73:8080 check weight 10 inter 2248 fall 2 rise 1
+ server 127.0.8.074 127.0.8.74:8080 check weight 10 inter 5045 fall 2 rise 1
+ server 127.0.8.075 127.0.8.75:8080 check weight 10 inter 8576 fall 2 rise 1
+ server 127.0.8.076 127.0.8.76:8080 check weight 10 inter 6101 fall 2 rise 1
+ server 127.0.8.077 127.0.8.77:8080 check weight 10 inter 5241 fall 2 rise 1
+ server 127.0.8.078 127.0.8.78:8080 check weight 10 inter 2924 fall 2 rise 1
+ server 127.0.8.079 127.0.8.79:8080 check weight 10 inter 19 fall 2 rise 1
+ server 127.0.8.080 127.0.8.80:8080 check weight 10 inter 2269 fall 2 rise 1
+ server 127.0.8.081 127.0.8.81:8080 check weight 10 inter 2298 fall 2 rise 1
+ server 127.0.8.082 127.0.8.82:8080 check weight 10 inter 2235 fall 2 rise 1
+ server 127.0.8.083 127.0.8.83:8080 check weight 10 inter 3896 fall 2 rise 1
+ server 127.0.8.084 127.0.8.84:8080 check weight 10 inter 7865 fall 2 rise 1
+ server 127.0.8.085 127.0.8.85:8080 check weight 10 inter 162 fall 2 rise 1
+ server 127.0.8.086 127.0.8.86:8080 check weight 10 inter 9603 fall 2 rise 1
+ server 127.0.8.087 127.0.8.87:8080 check weight 10 inter 6946 fall 2 rise 1
+ server 127.0.8.088 127.0.8.88:8080 check weight 10 inter 2717 fall 2 rise 1
+ server 127.0.8.089 127.0.8.89:8080 check weight 10 inter 854 fall 2 rise 1
+ server 127.0.8.090 127.0.8.90:8080 check weight 10 inter 1871 fall 2 rise 1
+ server 127.0.8.091 127.0.8.91:8080 check weight 10 inter 938 fall 2 rise 1
+ server 127.0.8.092 127.0.8.92:8080 check weight 10 inter 6629 fall 2 rise 1
+ server 127.0.8.093 127.0.8.93:8080 check weight 10 inter 2562 fall 2 rise 1
+ server 127.0.8.094 127.0.8.94:8080 check weight 10 inter 9142 fall 2 rise 1
+ server 127.0.8.095 127.0.8.95:8080 check weight 10 inter 1779 fall 2 rise 1
+ server 127.0.8.096 127.0.8.96:8080 check weight 10 inter 5288 fall 2 rise 1
+ server 127.0.8.097 127.0.8.97:8080 check weight 10 inter 8617 fall 2 rise 1
+ server 127.0.8.098 127.0.8.98:8080 check weight 10 inter 834 fall 2 rise 1
+ server 127.0.8.099 127.0.8.99:8080 check weight 10 inter 9937 fall 2 rise 1
+ server 127.0.8.100 127.0.8.100:8080 check weight 10 inter 334 fall 2 rise 1
+ server 127.0.8.101 127.0.8.101:8080 check weight 10 inter 5916 fall 2 rise 1
+ server 127.0.8.102 127.0.8.102:8080 check weight 10 inter 5330 fall 2 rise 1
+ server 127.0.8.103 127.0.8.103:8080 check weight 10 inter 9848 fall 2 rise 1
+ server 127.0.8.104 127.0.8.104:8080 check weight 10 inter 4945 fall 2 rise 1
+ server 127.0.8.105 127.0.8.105:8080 check weight 10 inter 4588 fall 2 rise 1
+ server 127.0.8.106 127.0.8.106:8080 check weight 10 inter 2290 fall 2 rise 1
+ server 127.0.8.107 127.0.8.107:8080 check weight 10 inter 4918 fall 2 rise 1
+ server 127.0.8.108 127.0.8.108:8080 check weight 10 inter 6853 fall 2 rise 1
+ server 127.0.8.109 127.0.8.109:8080 check weight 10 inter 3025 fall 2 rise 1
+ server 127.0.8.110 127.0.8.110:8080 check weight 10 inter 966 fall 2 rise 1
+ server 127.0.8.111 127.0.8.111:8080 check weight 10 inter 5503 fall 2 rise 1
+ server 127.0.8.112 127.0.8.112:8080 check weight 10 inter 135 fall 2 rise 1
+ server 127.0.8.113 127.0.8.113:8080 check weight 10 inter 2662 fall 2 rise 1
+ server 127.0.8.114 127.0.8.114:8080 check weight 10 inter 6377 fall 2 rise 1
+ server 127.0.8.115 127.0.8.115:8080 check weight 10 inter 2029 fall 2 rise 1
+ server 127.0.8.116 127.0.8.116:8080 check weight 10 inter 6454 fall 2 rise 1
+ server 127.0.8.117 127.0.8.117:8080 check weight 10 inter 8522 fall 2 rise 1
+ server 127.0.8.118 127.0.8.118:8080 check weight 10 inter 1576 fall 2 rise 1
+ server 127.0.8.119 127.0.8.119:8080 check weight 10 inter 2132 fall 2 rise 1
+ server 127.0.8.120 127.0.8.120:8080 check weight 10 inter 5125 fall 2 rise 1
+ server 127.0.8.121 127.0.8.121:8080 check weight 10 inter 4344 fall 2 rise 1
+ server 127.0.8.122 127.0.8.122:8080 check weight 10 inter 812 fall 2 rise 1
+ server 127.0.8.123 127.0.8.123:8080 check weight 10 inter 3559 fall 2 rise 1
+ server 127.0.8.124 127.0.8.124:8080 check weight 10 inter 2940 fall 2 rise 1
+ server 127.0.8.125 127.0.8.125:8080 check weight 10 inter 8593 fall 2 rise 1
+ server 127.0.8.126 127.0.8.126:8080 check weight 10 inter 587 fall 2 rise 1
+ server 127.0.8.127 127.0.8.127:8080 check weight 10 inter 3389 fall 2 rise 1
+ server 127.0.8.128 127.0.8.128:8080 check weight 10 inter 8865 fall 2 rise 1
+ server 127.0.8.129 127.0.8.129:8080 check weight 10 inter 3310 fall 2 rise 1
+ server 127.0.8.130 127.0.8.130:8080 check weight 10 inter 4015 fall 2 rise 1
+ server 127.0.8.131 127.0.8.131:8080 check weight 10 inter 4688 fall 2 rise 1
+ server 127.0.8.132 127.0.8.132:8080 check weight 10 inter 3449 fall 2 rise 1
+ server 127.0.8.133 127.0.8.133:8080 check weight 10 inter 4663 fall 2 rise 1
+ server 127.0.8.134 127.0.8.134:8080 check weight 10 inter 6561 fall 2 rise 1
+ server 127.0.8.135 127.0.8.135:8080 check weight 10 inter 9483 fall 2 rise 1
+ server 127.0.8.136 127.0.8.136:8080 check weight 10 inter 8511 fall 2 rise 1
+ server 127.0.8.137 127.0.8.137:8080 check weight 10 inter 2707 fall 2 rise 1
+ server 127.0.8.138 127.0.8.138:8080 check weight 10 inter 3053 fall 2 rise 1
+ server 127.0.8.139 127.0.8.139:8080 check weight 10 inter 385 fall 2 rise 1
+ server 127.0.8.140 127.0.8.140:8080 check weight 10 inter 5872 fall 2 rise 1
+ server 127.0.8.141 127.0.8.141:8080 check weight 10 inter 2157 fall 2 rise 1
+ server 127.0.8.142 127.0.8.142:8080 check weight 10 inter 5701 fall 2 rise 1
+ server 127.0.8.143 127.0.8.143:8080 check weight 10 inter 4061 fall 2 rise 1
+ server 127.0.8.144 127.0.8.144:8080 check weight 10 inter 9021 fall 2 rise 1
+ server 127.0.8.145 127.0.8.145:8080 check weight 10 inter 2587 fall 2 rise 1
+ server 127.0.8.146 127.0.8.146:8080 check weight 10 inter 6883 fall 2 rise 1
+ server 127.0.8.147 127.0.8.147:8080 check weight 10 inter 125 fall 2 rise 1
+ server 127.0.8.148 127.0.8.148:8080 check weight 10 inter 3230 fall 2 rise 1
+ server 127.0.8.149 127.0.8.149:8080 check weight 10 inter 9390 fall 2 rise 1
+ server 127.0.8.150 127.0.8.150:8080 check weight 10 inter 6431 fall 2 rise 1
+ server 127.0.8.151 127.0.8.151:8080 check weight 10 inter 8462 fall 2 rise 1
+ server 127.0.8.152 127.0.8.152:8080 check weight 10 inter 1254 fall 2 rise 1
+ server 127.0.8.153 127.0.8.153:8080 check weight 10 inter 712 fall 2 rise 1
+ server 127.0.8.154 127.0.8.154:8080 check weight 10 inter 3332 fall 2 rise 1
+ server 127.0.8.155 127.0.8.155:8080 check weight 10 inter 9366 fall 2 rise 1
+ server 127.0.8.156 127.0.8.156:8080 check weight 10 inter 2890 fall 2 rise 1
+ server 127.0.8.157 127.0.8.157:8080 check weight 10 inter 6433 fall 2 rise 1
+ server 127.0.8.158 127.0.8.158:8080 check weight 10 inter 2643 fall 2 rise 1
+ server 127.0.8.159 127.0.8.159:8080 check weight 10 inter 3171 fall 2 rise 1
+ server 127.0.8.160 127.0.8.160:8080 check weight 10 inter 3377 fall 2 rise 1
+ server 127.0.8.161 127.0.8.161:8080 check weight 10 inter 1800 fall 2 rise 1
+ server 127.0.8.162 127.0.8.162:8080 check weight 10 inter 3076 fall 2 rise 1
+ server 127.0.8.163 127.0.8.163:8080 check weight 10 inter 6071 fall 2 rise 1
+ server 127.0.8.164 127.0.8.164:8080 check weight 10 inter 9995 fall 2 rise 1
+ server 127.0.8.165 127.0.8.165:8080 check weight 10 inter 7962 fall 2 rise 1
+ server 127.0.8.166 127.0.8.166:8080 check weight 10 inter 1152 fall 2 rise 1
+ server 127.0.8.167 127.0.8.167:8080 check weight 10 inter 7728 fall 2 rise 1
+ server 127.0.8.168 127.0.8.168:8080 check weight 10 inter 769 fall 2 rise 1
+ server 127.0.8.169 127.0.8.169:8080 check weight 10 inter 2899 fall 2 rise 1
+ server 127.0.8.170 127.0.8.170:8080 check weight 10 inter 575 fall 2 rise 1
+ server 127.0.8.171 127.0.8.171:8080 check weight 10 inter 2297 fall 2 rise 1
+ server 127.0.8.172 127.0.8.172:8080 check weight 10 inter 2320 fall 2 rise 1
+ server 127.0.8.173 127.0.8.173:8080 check weight 10 inter 711 fall 2 rise 1
+ server 127.0.8.174 127.0.8.174:8080 check weight 10 inter 9522 fall 2 rise 1
+ server 127.0.8.175 127.0.8.175:8080 check weight 10 inter 7379 fall 2 rise 1
+ server 127.0.8.176 127.0.8.176:8080 check weight 10 inter 309 fall 2 rise 1
+ server 127.0.8.177 127.0.8.177:8080 check weight 10 inter 9984 fall 2 rise 1
+ server 127.0.8.178 127.0.8.178:8080 check weight 10 inter 3249 fall 2 rise 1
+ server 127.0.8.179 127.0.8.179:8080 check weight 10 inter 9761 fall 2 rise 1
+ server 127.0.8.180 127.0.8.180:8080 check weight 10 inter 1671 fall 2 rise 1
+ server 127.0.8.181 127.0.8.181:8080 check weight 10 inter 277 fall 2 rise 1
+ server 127.0.8.182 127.0.8.182:8080 check weight 10 inter 8722 fall 2 rise 1
+ server 127.0.8.183 127.0.8.183:8080 check weight 10 inter 8629 fall 2 rise 1
+ server 127.0.8.184 127.0.8.184:8080 check weight 10 inter 9944 fall 2 rise 1
+ server 127.0.8.185 127.0.8.185:8080 check weight 10 inter 6831 fall 2 rise 1
+ server 127.0.8.186 127.0.8.186:8080 check weight 10 inter 9758 fall 2 rise 1
+ server 127.0.8.187 127.0.8.187:8080 check weight 10 inter 7694 fall 2 rise 1
+ server 127.0.8.188 127.0.8.188:8080 check weight 10 inter 553 fall 2 rise 1
+ server 127.0.8.189 127.0.8.189:8080 check weight 10 inter 7611 fall 2 rise 1
+ server 127.0.8.190 127.0.8.190:8080 check weight 10 inter 4900 fall 2 rise 1
+ server 127.0.8.191 127.0.8.191:8080 check weight 10 inter 2175 fall 2 rise 1
+ server 127.0.8.192 127.0.8.192:8080 check weight 10 inter 9696 fall 2 rise 1
+ server 127.0.8.193 127.0.8.193:8080 check weight 10 inter 1725 fall 2 rise 1
+ server 127.0.8.194 127.0.8.194:8080 check weight 10 inter 9289 fall 2 rise 1
+ server 127.0.8.195 127.0.8.195:8080 check weight 10 inter 5758 fall 2 rise 1
+ server 127.0.8.196 127.0.8.196:8080 check weight 10 inter 4746 fall 2 rise 1
+ server 127.0.8.197 127.0.8.197:8080 check weight 10 inter 9323 fall 2 rise 1
+ server 127.0.8.198 127.0.8.198:8080 check weight 10 inter 2774 fall 2 rise 1
+ server 127.0.8.199 127.0.8.199:8080 check weight 10 inter 7488 fall 2 rise 1
+ server 127.0.8.200 127.0.8.200:8080 check weight 10 inter 1968 fall 2 rise 1
+ server 127.0.8.201 127.0.8.201:8080 check weight 10 inter 5382 fall 2 rise 1
+ server 127.0.8.202 127.0.8.202:8080 check weight 10 inter 7807 fall 2 rise 1
+ server 127.0.8.203 127.0.8.203:8080 check weight 10 inter 9239 fall 2 rise 1
+ server 127.0.8.204 127.0.8.204:8080 check weight 10 inter 5180 fall 2 rise 1
+ server 127.0.8.205 127.0.8.205:8080 check weight 10 inter 5366 fall 2 rise 1
+ server 127.0.8.206 127.0.8.206:8080 check weight 10 inter 6453 fall 2 rise 1
+ server 127.0.8.207 127.0.8.207:8080 check weight 10 inter 3098 fall 2 rise 1
+ server 127.0.8.208 127.0.8.208:8080 check weight 10 inter 4360 fall 2 rise 1
+ server 127.0.8.209 127.0.8.209:8080 check weight 10 inter 6684 fall 2 rise 1
+ server 127.0.8.210 127.0.8.210:8080 check weight 10 inter 2987 fall 2 rise 1
+ server 127.0.8.211 127.0.8.211:8080 check weight 10 inter 2704 fall 2 rise 1
+ server 127.0.8.212 127.0.8.212:8080 check weight 10 inter 2315 fall 2 rise 1
+ server 127.0.8.213 127.0.8.213:8080 check weight 10 inter 8999 fall 2 rise 1
+ server 127.0.8.214 127.0.8.214:8080 check weight 10 inter 8473 fall 2 rise 1
+ server 127.0.8.215 127.0.8.215:8080 check weight 10 inter 1284 fall 2 rise 1
+ server 127.0.8.216 127.0.8.216:8080 check weight 10 inter 7314 fall 2 rise 1
+ server 127.0.8.217 127.0.8.217:8080 check weight 10 inter 6259 fall 2 rise 1
+ server 127.0.8.218 127.0.8.218:8080 check weight 10 inter 9506 fall 2 rise 1
+ server 127.0.8.219 127.0.8.219:8080 check weight 10 inter 3608 fall 2 rise 1
+ server 127.0.8.220 127.0.8.220:8080 check weight 10 inter 896 fall 2 rise 1
+ server 127.0.8.221 127.0.8.221:8080 check weight 10 inter 5653 fall 2 rise 1
+ server 127.0.8.222 127.0.8.222:8080 check weight 10 inter 1350 fall 2 rise 1
+ server 127.0.8.223 127.0.8.223:8080 check weight 10 inter 9095 fall 2 rise 1
+ server 127.0.8.224 127.0.8.224:8080 check weight 10 inter 437 fall 2 rise 1
+ server 127.0.8.225 127.0.8.225:8080 check weight 10 inter 4647 fall 2 rise 1
+ server 127.0.8.226 127.0.8.226:8080 check weight 10 inter 4711 fall 2 rise 1
+ server 127.0.8.227 127.0.8.227:8080 check weight 10 inter 1148 fall 2 rise 1
+ server 127.0.8.228 127.0.8.228:8080 check weight 10 inter 7840 fall 2 rise 1
+ server 127.0.8.229 127.0.8.229:8080 check weight 10 inter 3636 fall 2 rise 1
+ server 127.0.8.230 127.0.8.230:8080 check weight 10 inter 6731 fall 2 rise 1
+ server 127.0.8.231 127.0.8.231:8080 check weight 10 inter 2083 fall 2 rise 1
+ server 127.0.8.232 127.0.8.232:8080 check weight 10 inter 8564 fall 2 rise 1
+ server 127.0.8.233 127.0.8.233:8080 check weight 10 inter 4290 fall 2 rise 1
+ server 127.0.8.234 127.0.8.234:8080 check weight 10 inter 2326 fall 2 rise 1
+ server 127.0.8.235 127.0.8.235:8080 check weight 10 inter 2755 fall 2 rise 1
+ server 127.0.8.236 127.0.8.236:8080 check weight 10 inter 7100 fall 2 rise 1
+ server 127.0.8.237 127.0.8.237:8080 check weight 10 inter 7571 fall 2 rise 1
+ server 127.0.8.238 127.0.8.238:8080 check weight 10 inter 453 fall 2 rise 1
+ server 127.0.8.239 127.0.8.239:8080 check weight 10 inter 969 fall 2 rise 1
+ server 127.0.8.240 127.0.8.240:8080 check weight 10 inter 8814 fall 2 rise 1
+ server 127.0.8.241 127.0.8.241:8080 check weight 10 inter 2282 fall 2 rise 1
+ server 127.0.8.242 127.0.8.242:8080 check weight 10 inter 5466 fall 2 rise 1
+ server 127.0.8.243 127.0.8.243:8080 check weight 10 inter 7523 fall 2 rise 1
+ server 127.0.8.244 127.0.8.244:8080 check weight 10 inter 9360 fall 2 rise 1
+ server 127.0.8.245 127.0.8.245:8080 check weight 10 inter 1981 fall 2 rise 1
+ server 127.0.8.246 127.0.8.246:8080 check weight 10 inter 4571 fall 2 rise 1
+ server 127.0.8.247 127.0.8.247:8080 check weight 10 inter 4417 fall 2 rise 1
+ server 127.0.8.248 127.0.8.248:8080 check weight 10 inter 158 fall 2 rise 1
+ server 127.0.8.249 127.0.8.249:8080 check weight 10 inter 9246 fall 2 rise 1
+ server 127.0.8.250 127.0.8.250:8080 check weight 10 inter 7241 fall 2 rise 1
+ server 127.0.9.001 127.0.9.1:8080 check weight 10 inter 3436 fall 2 rise 1
+ server 127.0.9.002 127.0.9.2:8080 check weight 10 inter 3175 fall 2 rise 1
+ server 127.0.9.003 127.0.9.3:8080 check weight 10 inter 2697 fall 2 rise 1
+ server 127.0.9.004 127.0.9.4:8080 check weight 10 inter 5921 fall 2 rise 1
+ server 127.0.9.005 127.0.9.5:8080 check weight 10 inter 1971 fall 2 rise 1
+ server 127.0.9.006 127.0.9.6:8080 check weight 10 inter 9962 fall 2 rise 1
+ server 127.0.9.007 127.0.9.7:8080 check weight 10 inter 7118 fall 2 rise 1
+ server 127.0.9.008 127.0.9.8:8080 check weight 10 inter 2181 fall 2 rise 1
+ server 127.0.9.009 127.0.9.9:8080 check weight 10 inter 9210 fall 2 rise 1
+ server 127.0.9.010 127.0.9.10:8080 check weight 10 inter 6467 fall 2 rise 1
+ server 127.0.9.011 127.0.9.11:8080 check weight 10 inter 3324 fall 2 rise 1
+ server 127.0.9.012 127.0.9.12:8080 check weight 10 inter 1032 fall 2 rise 1
+ server 127.0.9.013 127.0.9.13:8080 check weight 10 inter 6945 fall 2 rise 1
+ server 127.0.9.014 127.0.9.14:8080 check weight 10 inter 3222 fall 2 rise 1
+ server 127.0.9.015 127.0.9.15:8080 check weight 10 inter 1521 fall 2 rise 1
+ server 127.0.9.016 127.0.9.16:8080 check weight 10 inter 6955 fall 2 rise 1
+ server 127.0.9.017 127.0.9.17:8080 check weight 10 inter 6725 fall 2 rise 1
+ server 127.0.9.018 127.0.9.18:8080 check weight 10 inter 5015 fall 2 rise 1
+ server 127.0.9.019 127.0.9.19:8080 check weight 10 inter 7380 fall 2 rise 1
+ server 127.0.9.020 127.0.9.20:8080 check weight 10 inter 5418 fall 2 rise 1
+ server 127.0.9.021 127.0.9.21:8080 check weight 10 inter 89 fall 2 rise 1
+ server 127.0.9.022 127.0.9.22:8080 check weight 10 inter 9215 fall 2 rise 1
+ server 127.0.9.023 127.0.9.23:8080 check weight 10 inter 1983 fall 2 rise 1
+ server 127.0.9.024 127.0.9.24:8080 check weight 10 inter 2534 fall 2 rise 1
+ server 127.0.9.025 127.0.9.25:8080 check weight 10 inter 1282 fall 2 rise 1
+ server 127.0.9.026 127.0.9.26:8080 check weight 10 inter 6832 fall 2 rise 1
+ server 127.0.9.027 127.0.9.27:8080 check weight 10 inter 3181 fall 2 rise 1
+ server 127.0.9.028 127.0.9.28:8080 check weight 10 inter 3073 fall 2 rise 1
+ server 127.0.9.029 127.0.9.29:8080 check weight 10 inter 901 fall 2 rise 1
+ server 127.0.9.030 127.0.9.30:8080 check weight 10 inter 9778 fall 2 rise 1
+ server 127.0.9.031 127.0.9.31:8080 check weight 10 inter 8263 fall 2 rise 1
+ server 127.0.9.032 127.0.9.32:8080 check weight 10 inter 3947 fall 2 rise 1
+ server 127.0.9.033 127.0.9.33:8080 check weight 10 inter 7538 fall 2 rise 1
+ server 127.0.9.034 127.0.9.34:8080 check weight 10 inter 7541 fall 2 rise 1
+ server 127.0.9.035 127.0.9.35:8080 check weight 10 inter 9486 fall 2 rise 1
+ server 127.0.9.036 127.0.9.36:8080 check weight 10 inter 6943 fall 2 rise 1
+ server 127.0.9.037 127.0.9.37:8080 check weight 10 inter 2029 fall 2 rise 1
+ server 127.0.9.038 127.0.9.38:8080 check weight 10 inter 3302 fall 2 rise 1
+ server 127.0.9.039 127.0.9.39:8080 check weight 10 inter 9608 fall 2 rise 1
+ server 127.0.9.040 127.0.9.40:8080 check weight 10 inter 860 fall 2 rise 1
+ server 127.0.9.041 127.0.9.41:8080 check weight 10 inter 4336 fall 2 rise 1
+ server 127.0.9.042 127.0.9.42:8080 check weight 10 inter 8717 fall 2 rise 1
+ server 127.0.9.043 127.0.9.43:8080 check weight 10 inter 3883 fall 2 rise 1
+ server 127.0.9.044 127.0.9.44:8080 check weight 10 inter 2548 fall 2 rise 1
+ server 127.0.9.045 127.0.9.45:8080 check weight 10 inter 5153 fall 2 rise 1
+ server 127.0.9.046 127.0.9.46:8080 check weight 10 inter 3116 fall 2 rise 1
+ server 127.0.9.047 127.0.9.47:8080 check weight 10 inter 4587 fall 2 rise 1
+ server 127.0.9.048 127.0.9.48:8080 check weight 10 inter 5303 fall 2 rise 1
+ server 127.0.9.049 127.0.9.49:8080 check weight 10 inter 5220 fall 2 rise 1
+ server 127.0.9.050 127.0.9.50:8080 check weight 10 inter 9527 fall 2 rise 1
+ server 127.0.9.051 127.0.9.51:8080 check weight 10 inter 7923 fall 2 rise 1
+ server 127.0.9.052 127.0.9.52:8080 check weight 10 inter 4109 fall 2 rise 1
+ server 127.0.9.053 127.0.9.53:8080 check weight 10 inter 5506 fall 2 rise 1
+ server 127.0.9.054 127.0.9.54:8080 check weight 10 inter 5619 fall 2 rise 1
+ server 127.0.9.055 127.0.9.55:8080 check weight 10 inter 8756 fall 2 rise 1
+ server 127.0.9.056 127.0.9.56:8080 check weight 10 inter 9590 fall 2 rise 1
+ server 127.0.9.057 127.0.9.57:8080 check weight 10 inter 6773 fall 2 rise 1
+ server 127.0.9.058 127.0.9.58:8080 check weight 10 inter 8577 fall 2 rise 1
+ server 127.0.9.059 127.0.9.59:8080 check weight 10 inter 9633 fall 2 rise 1
+ server 127.0.9.060 127.0.9.60:8080 check weight 10 inter 2737 fall 2 rise 1
+ server 127.0.9.061 127.0.9.61:8080 check weight 10 inter 8488 fall 2 rise 1
+ server 127.0.9.062 127.0.9.62:8080 check weight 10 inter 3688 fall 2 rise 1
+ server 127.0.9.063 127.0.9.63:8080 check weight 10 inter 6935 fall 2 rise 1
+ server 127.0.9.064 127.0.9.64:8080 check weight 10 inter 1391 fall 2 rise 1
+ server 127.0.9.065 127.0.9.65:8080 check weight 10 inter 2001 fall 2 rise 1
+ server 127.0.9.066 127.0.9.66:8080 check weight 10 inter 9508 fall 2 rise 1
+ server 127.0.9.067 127.0.9.67:8080 check weight 10 inter 5997 fall 2 rise 1
+ server 127.0.9.068 127.0.9.68:8080 check weight 10 inter 9909 fall 2 rise 1
+ server 127.0.9.069 127.0.9.69:8080 check weight 10 inter 9835 fall 2 rise 1
+ server 127.0.9.070 127.0.9.70:8080 check weight 10 inter 5536 fall 2 rise 1
+ server 127.0.9.071 127.0.9.71:8080 check weight 10 inter 6511 fall 2 rise 1
+ server 127.0.9.072 127.0.9.72:8080 check weight 10 inter 5155 fall 2 rise 1
+ server 127.0.9.073 127.0.9.73:8080 check weight 10 inter 3253 fall 2 rise 1
+ server 127.0.9.074 127.0.9.74:8080 check weight 10 inter 2500 fall 2 rise 1
+ server 127.0.9.075 127.0.9.75:8080 check weight 10 inter 9417 fall 2 rise 1
+ server 127.0.9.076 127.0.9.76:8080 check weight 10 inter 3624 fall 2 rise 1
+ server 127.0.9.077 127.0.9.77:8080 check weight 10 inter 1900 fall 2 rise 1
+ server 127.0.9.078 127.0.9.78:8080 check weight 10 inter 6095 fall 2 rise 1
+ server 127.0.9.079 127.0.9.79:8080 check weight 10 inter 1526 fall 2 rise 1
+ server 127.0.9.080 127.0.9.80:8080 check weight 10 inter 4529 fall 2 rise 1
+ server 127.0.9.081 127.0.9.81:8080 check weight 10 inter 6346 fall 2 rise 1
+ server 127.0.9.082 127.0.9.82:8080 check weight 10 inter 2755 fall 2 rise 1
+ server 127.0.9.083 127.0.9.83:8080 check weight 10 inter 9863 fall 2 rise 1
+ server 127.0.9.084 127.0.9.84:8080 check weight 10 inter 1989 fall 2 rise 1
+ server 127.0.9.085 127.0.9.85:8080 check weight 10 inter 1611 fall 2 rise 1
+ server 127.0.9.086 127.0.9.86:8080 check weight 10 inter 2594 fall 2 rise 1
+ server 127.0.9.087 127.0.9.87:8080 check weight 10 inter 9490 fall 2 rise 1
+ server 127.0.9.088 127.0.9.88:8080 check weight 10 inter 8354 fall 2 rise 1
+ server 127.0.9.089 127.0.9.89:8080 check weight 10 inter 3587 fall 2 rise 1
+ server 127.0.9.090 127.0.9.90:8080 check weight 10 inter 9340 fall 2 rise 1
+ server 127.0.9.091 127.0.9.91:8080 check weight 10 inter 1440 fall 2 rise 1
+ server 127.0.9.092 127.0.9.92:8080 check weight 10 inter 3958 fall 2 rise 1
+ server 127.0.9.093 127.0.9.93:8080 check weight 10 inter 8113 fall 2 rise 1
+ server 127.0.9.094 127.0.9.94:8080 check weight 10 inter 1142 fall 2 rise 1
+ server 127.0.9.095 127.0.9.95:8080 check weight 10 inter 6644 fall 2 rise 1
+ server 127.0.9.096 127.0.9.96:8080 check weight 10 inter 2125 fall 2 rise 1
+ server 127.0.9.097 127.0.9.97:8080 check weight 10 inter 6946 fall 2 rise 1
+ server 127.0.9.098 127.0.9.98:8080 check weight 10 inter 1642 fall 2 rise 1
+ server 127.0.9.099 127.0.9.99:8080 check weight 10 inter 6348 fall 2 rise 1
+ server 127.0.9.100 127.0.9.100:8080 check weight 10 inter 4717 fall 2 rise 1
+ server 127.0.9.101 127.0.9.101:8080 check weight 10 inter 838 fall 2 rise 1
+ server 127.0.9.102 127.0.9.102:8080 check weight 10 inter 2417 fall 2 rise 1
+ server 127.0.9.103 127.0.9.103:8080 check weight 10 inter 4441 fall 2 rise 1
+ server 127.0.9.104 127.0.9.104:8080 check weight 10 inter 5795 fall 2 rise 1
+ server 127.0.9.105 127.0.9.105:8080 check weight 10 inter 8418 fall 2 rise 1
+ server 127.0.9.106 127.0.9.106:8080 check weight 10 inter 3857 fall 2 rise 1
+ server 127.0.9.107 127.0.9.107:8080 check weight 10 inter 8799 fall 2 rise 1
+ server 127.0.9.108 127.0.9.108:8080 check weight 10 inter 8235 fall 2 rise 1
+ server 127.0.9.109 127.0.9.109:8080 check weight 10 inter 5562 fall 2 rise 1
+ server 127.0.9.110 127.0.9.110:8080 check weight 10 inter 54 fall 2 rise 1
+ server 127.0.9.111 127.0.9.111:8080 check weight 10 inter 3228 fall 2 rise 1
+ server 127.0.9.112 127.0.9.112:8080 check weight 10 inter 2577 fall 2 rise 1
+ server 127.0.9.113 127.0.9.113:8080 check weight 10 inter 2236 fall 2 rise 1
+ server 127.0.9.114 127.0.9.114:8080 check weight 10 inter 6018 fall 2 rise 1
+ server 127.0.9.115 127.0.9.115:8080 check weight 10 inter 2472 fall 2 rise 1
+ server 127.0.9.116 127.0.9.116:8080 check weight 10 inter 6155 fall 2 rise 1
+ server 127.0.9.117 127.0.9.117:8080 check weight 10 inter 2141 fall 2 rise 1
+ server 127.0.9.118 127.0.9.118:8080 check weight 10 inter 2612 fall 2 rise 1
+ server 127.0.9.119 127.0.9.119:8080 check weight 10 inter 7760 fall 2 rise 1
+ server 127.0.9.120 127.0.9.120:8080 check weight 10 inter 5795 fall 2 rise 1
+ server 127.0.9.121 127.0.9.121:8080 check weight 10 inter 9813 fall 2 rise 1
+ server 127.0.9.122 127.0.9.122:8080 check weight 10 inter 3690 fall 2 rise 1
+ server 127.0.9.123 127.0.9.123:8080 check weight 10 inter 4546 fall 2 rise 1
+ server 127.0.9.124 127.0.9.124:8080 check weight 10 inter 1526 fall 2 rise 1
+ server 127.0.9.125 127.0.9.125:8080 check weight 10 inter 4522 fall 2 rise 1
+ server 127.0.9.126 127.0.9.126:8080 check weight 10 inter 7606 fall 2 rise 1
+ server 127.0.9.127 127.0.9.127:8080 check weight 10 inter 4392 fall 2 rise 1
+ server 127.0.9.128 127.0.9.128:8080 check weight 10 inter 9485 fall 2 rise 1
+ server 127.0.9.129 127.0.9.129:8080 check weight 10 inter 988 fall 2 rise 1
+ server 127.0.9.130 127.0.9.130:8080 check weight 10 inter 7409 fall 2 rise 1
+ server 127.0.9.131 127.0.9.131:8080 check weight 10 inter 4965 fall 2 rise 1
+ server 127.0.9.132 127.0.9.132:8080 check weight 10 inter 8430 fall 2 rise 1
+ server 127.0.9.133 127.0.9.133:8080 check weight 10 inter 8133 fall 2 rise 1
+ server 127.0.9.134 127.0.9.134:8080 check weight 10 inter 1442 fall 2 rise 1
+ server 127.0.9.135 127.0.9.135:8080 check weight 10 inter 2240 fall 2 rise 1
+ server 127.0.9.136 127.0.9.136:8080 check weight 10 inter 1523 fall 2 rise 1
+ server 127.0.9.137 127.0.9.137:8080 check weight 10 inter 8015 fall 2 rise 1
+ server 127.0.9.138 127.0.9.138:8080 check weight 10 inter 1420 fall 2 rise 1
+ server 127.0.9.139 127.0.9.139:8080 check weight 10 inter 6739 fall 2 rise 1
+ server 127.0.9.140 127.0.9.140:8080 check weight 10 inter 4757 fall 2 rise 1
+ server 127.0.9.141 127.0.9.141:8080 check weight 10 inter 8323 fall 2 rise 1
+ server 127.0.9.142 127.0.9.142:8080 check weight 10 inter 795 fall 2 rise 1
+ server 127.0.9.143 127.0.9.143:8080 check weight 10 inter 3875 fall 2 rise 1
+ server 127.0.9.144 127.0.9.144:8080 check weight 10 inter 944 fall 2 rise 1
+ server 127.0.9.145 127.0.9.145:8080 check weight 10 inter 757 fall 2 rise 1
+ server 127.0.9.146 127.0.9.146:8080 check weight 10 inter 2157 fall 2 rise 1
+ server 127.0.9.147 127.0.9.147:8080 check weight 10 inter 5326 fall 2 rise 1
+ server 127.0.9.148 127.0.9.148:8080 check weight 10 inter 6406 fall 2 rise 1
+ server 127.0.9.149 127.0.9.149:8080 check weight 10 inter 2320 fall 2 rise 1
+ server 127.0.9.150 127.0.9.150:8080 check weight 10 inter 5425 fall 2 rise 1
+ server 127.0.9.151 127.0.9.151:8080 check weight 10 inter 9366 fall 2 rise 1
+ server 127.0.9.152 127.0.9.152:8080 check weight 10 inter 5200 fall 2 rise 1
+ server 127.0.9.153 127.0.9.153:8080 check weight 10 inter 2035 fall 2 rise 1
+ server 127.0.9.154 127.0.9.154:8080 check weight 10 inter 1553 fall 2 rise 1
+ server 127.0.9.155 127.0.9.155:8080 check weight 10 inter 2431 fall 2 rise 1
+ server 127.0.9.156 127.0.9.156:8080 check weight 10 inter 2937 fall 2 rise 1
+ server 127.0.9.157 127.0.9.157:8080 check weight 10 inter 5640 fall 2 rise 1
+ server 127.0.9.158 127.0.9.158:8080 check weight 10 inter 7518 fall 2 rise 1
+ server 127.0.9.159 127.0.9.159:8080 check weight 10 inter 4937 fall 2 rise 1
+ server 127.0.9.160 127.0.9.160:8080 check weight 10 inter 4825 fall 2 rise 1
+ server 127.0.9.161 127.0.9.161:8080 check weight 10 inter 1241 fall 2 rise 1
+ server 127.0.9.162 127.0.9.162:8080 check weight 10 inter 95 fall 2 rise 1
+ server 127.0.9.163 127.0.9.163:8080 check weight 10 inter 6574 fall 2 rise 1
+ server 127.0.9.164 127.0.9.164:8080 check weight 10 inter 817 fall 2 rise 1
+ server 127.0.9.165 127.0.9.165:8080 check weight 10 inter 2465 fall 2 rise 1
+ server 127.0.9.166 127.0.9.166:8080 check weight 10 inter 9081 fall 2 rise 1
+ server 127.0.9.167 127.0.9.167:8080 check weight 10 inter 7131 fall 2 rise 1
+ server 127.0.9.168 127.0.9.168:8080 check weight 10 inter 7403 fall 2 rise 1
+ server 127.0.9.169 127.0.9.169:8080 check weight 10 inter 9795 fall 2 rise 1
+ server 127.0.9.170 127.0.9.170:8080 check weight 10 inter 6413 fall 2 rise 1
+ server 127.0.9.171 127.0.9.171:8080 check weight 10 inter 1023 fall 2 rise 1
+ server 127.0.9.172 127.0.9.172:8080 check weight 10 inter 1503 fall 2 rise 1
+ server 127.0.9.173 127.0.9.173:8080 check weight 10 inter 2782 fall 2 rise 1
+ server 127.0.9.174 127.0.9.174:8080 check weight 10 inter 4165 fall 2 rise 1
+ server 127.0.9.175 127.0.9.175:8080 check weight 10 inter 6037 fall 2 rise 1
+ server 127.0.9.176 127.0.9.176:8080 check weight 10 inter 6373 fall 2 rise 1
+ server 127.0.9.177 127.0.9.177:8080 check weight 10 inter 8267 fall 2 rise 1
+ server 127.0.9.178 127.0.9.178:8080 check weight 10 inter 4909 fall 2 rise 1
+ server 127.0.9.179 127.0.9.179:8080 check weight 10 inter 8305 fall 2 rise 1
+ server 127.0.9.180 127.0.9.180:8080 check weight 10 inter 4175 fall 2 rise 1
+ server 127.0.9.181 127.0.9.181:8080 check weight 10 inter 2876 fall 2 rise 1
+ server 127.0.9.182 127.0.9.182:8080 check weight 10 inter 9761 fall 2 rise 1
+ server 127.0.9.183 127.0.9.183:8080 check weight 10 inter 1260 fall 2 rise 1
+ server 127.0.9.184 127.0.9.184:8080 check weight 10 inter 7874 fall 2 rise 1
+ server 127.0.9.185 127.0.9.185:8080 check weight 10 inter 8100 fall 2 rise 1
+ server 127.0.9.186 127.0.9.186:8080 check weight 10 inter 986 fall 2 rise 1
+ server 127.0.9.187 127.0.9.187:8080 check weight 10 inter 9951 fall 2 rise 1
+ server 127.0.9.188 127.0.9.188:8080 check weight 10 inter 88 fall 2 rise 1
+ server 127.0.9.189 127.0.9.189:8080 check weight 10 inter 1510 fall 2 rise 1
+ server 127.0.9.190 127.0.9.190:8080 check weight 10 inter 9831 fall 2 rise 1
+ server 127.0.9.191 127.0.9.191:8080 check weight 10 inter 6961 fall 2 rise 1
+ server 127.0.9.192 127.0.9.192:8080 check weight 10 inter 1350 fall 2 rise 1
+ server 127.0.9.193 127.0.9.193:8080 check weight 10 inter 6078 fall 2 rise 1
+ server 127.0.9.194 127.0.9.194:8080 check weight 10 inter 8349 fall 2 rise 1
+ server 127.0.9.195 127.0.9.195:8080 check weight 10 inter 8977 fall 2 rise 1
+ server 127.0.9.196 127.0.9.196:8080 check weight 10 inter 7742 fall 2 rise 1
+ server 127.0.9.197 127.0.9.197:8080 check weight 10 inter 3922 fall 2 rise 1
+ server 127.0.9.198 127.0.9.198:8080 check weight 10 inter 3551 fall 2 rise 1
+ server 127.0.9.199 127.0.9.199:8080 check weight 10 inter 8563 fall 2 rise 1
+ server 127.0.9.200 127.0.9.200:8080 check weight 10 inter 6516 fall 2 rise 1
+ server 127.0.9.201 127.0.9.201:8080 check weight 10 inter 4471 fall 2 rise 1
+ server 127.0.9.202 127.0.9.202:8080 check weight 10 inter 2652 fall 2 rise 1
+ server 127.0.9.203 127.0.9.203:8080 check weight 10 inter 5274 fall 2 rise 1
+ server 127.0.9.204 127.0.9.204:8080 check weight 10 inter 3953 fall 2 rise 1
+ server 127.0.9.205 127.0.9.205:8080 check weight 10 inter 1955 fall 2 rise 1
+ server 127.0.9.206 127.0.9.206:8080 check weight 10 inter 6684 fall 2 rise 1
+ server 127.0.9.207 127.0.9.207:8080 check weight 10 inter 1737 fall 2 rise 1
+ server 127.0.9.208 127.0.9.208:8080 check weight 10 inter 7465 fall 2 rise 1
+ server 127.0.9.209 127.0.9.209:8080 check weight 10 inter 5159 fall 2 rise 1
+ server 127.0.9.210 127.0.9.210:8080 check weight 10 inter 600 fall 2 rise 1
+ server 127.0.9.211 127.0.9.211:8080 check weight 10 inter 9898 fall 2 rise 1
+ server 127.0.9.212 127.0.9.212:8080 check weight 10 inter 829 fall 2 rise 1
+ server 127.0.9.213 127.0.9.213:8080 check weight 10 inter 8792 fall 2 rise 1
+ server 127.0.9.214 127.0.9.214:8080 check weight 10 inter 5020 fall 2 rise 1
+ server 127.0.9.215 127.0.9.215:8080 check weight 10 inter 5386 fall 2 rise 1
+ server 127.0.9.216 127.0.9.216:8080 check weight 10 inter 8672 fall 2 rise 1
+ server 127.0.9.217 127.0.9.217:8080 check weight 10 inter 7121 fall 2 rise 1
+ server 127.0.9.218 127.0.9.218:8080 check weight 10 inter 1565 fall 2 rise 1
+ server 127.0.9.219 127.0.9.219:8080 check weight 10 inter 3963 fall 2 rise 1
+ server 127.0.9.220 127.0.9.220:8080 check weight 10 inter 3738 fall 2 rise 1
+ server 127.0.9.221 127.0.9.221:8080 check weight 10 inter 9818 fall 2 rise 1
+ server 127.0.9.222 127.0.9.222:8080 check weight 10 inter 1401 fall 2 rise 1
+ server 127.0.9.223 127.0.9.223:8080 check weight 10 inter 3064 fall 2 rise 1
+ server 127.0.9.224 127.0.9.224:8080 check weight 10 inter 4750 fall 2 rise 1
+ server 127.0.9.225 127.0.9.225:8080 check weight 10 inter 678 fall 2 rise 1
+ server 127.0.9.226 127.0.9.226:8080 check weight 10 inter 6180 fall 2 rise 1
+ server 127.0.9.227 127.0.9.227:8080 check weight 10 inter 3045 fall 2 rise 1
+ server 127.0.9.228 127.0.9.228:8080 check weight 10 inter 7771 fall 2 rise 1
+ server 127.0.9.229 127.0.9.229:8080 check weight 10 inter 5776 fall 2 rise 1
+ server 127.0.9.230 127.0.9.230:8080 check weight 10 inter 9276 fall 2 rise 1
+ server 127.0.9.231 127.0.9.231:8080 check weight 10 inter 5438 fall 2 rise 1
+ server 127.0.9.232 127.0.9.232:8080 check weight 10 inter 9957 fall 2 rise 1
+ server 127.0.9.233 127.0.9.233:8080 check weight 10 inter 6787 fall 2 rise 1
+ server 127.0.9.234 127.0.9.234:8080 check weight 10 inter 2081 fall 2 rise 1
+ server 127.0.9.235 127.0.9.235:8080 check weight 10 inter 653 fall 2 rise 1
+ server 127.0.9.236 127.0.9.236:8080 check weight 10 inter 4753 fall 2 rise 1
+ server 127.0.9.237 127.0.9.237:8080 check weight 10 inter 8204 fall 2 rise 1
+ server 127.0.9.238 127.0.9.238:8080 check weight 10 inter 3847 fall 2 rise 1
+ server 127.0.9.239 127.0.9.239:8080 check weight 10 inter 2456 fall 2 rise 1
+ server 127.0.9.240 127.0.9.240:8080 check weight 10 inter 2531 fall 2 rise 1
+ server 127.0.9.241 127.0.9.241:8080 check weight 10 inter 3088 fall 2 rise 1
+ server 127.0.9.242 127.0.9.242:8080 check weight 10 inter 5383 fall 2 rise 1
+ server 127.0.9.243 127.0.9.243:8080 check weight 10 inter 3360 fall 2 rise 1
+ server 127.0.9.244 127.0.9.244:8080 check weight 10 inter 9368 fall 2 rise 1
+ server 127.0.9.245 127.0.9.245:8080 check weight 10 inter 6093 fall 2 rise 1
+ server 127.0.9.246 127.0.9.246:8080 check weight 10 inter 649 fall 2 rise 1
+ server 127.0.9.247 127.0.9.247:8080 check weight 10 inter 4841 fall 2 rise 1
+ server 127.0.9.248 127.0.9.248:8080 check weight 10 inter 3235 fall 2 rise 1
+ server 127.0.9.249 127.0.9.249:8080 check weight 10 inter 6705 fall 2 rise 1
+ server 127.0.9.250 127.0.9.250:8080 check weight 10 inter 4834 fall 2 rise 1
+ server 127.0.10.001 127.0.10.1:8080 check weight 10 inter 2698 fall 2 rise 1
+ server 127.0.10.002 127.0.10.2:8080 check weight 10 inter 5655 fall 2 rise 1
+ server 127.0.10.003 127.0.10.3:8080 check weight 10 inter 365 fall 2 rise 1
+ server 127.0.10.004 127.0.10.4:8080 check weight 10 inter 1901 fall 2 rise 1
+ server 127.0.10.005 127.0.10.5:8080 check weight 10 inter 3604 fall 2 rise 1
+ server 127.0.10.006 127.0.10.6:8080 check weight 10 inter 4264 fall 2 rise 1
+ server 127.0.10.007 127.0.10.7:8080 check weight 10 inter 407 fall 2 rise 1
+ server 127.0.10.008 127.0.10.8:8080 check weight 10 inter 7418 fall 2 rise 1
+ server 127.0.10.009 127.0.10.9:8080 check weight 10 inter 5491 fall 2 rise 1
+ server 127.0.10.010 127.0.10.10:8080 check weight 10 inter 9410 fall 2 rise 1
+ server 127.0.10.011 127.0.10.11:8080 check weight 10 inter 7874 fall 2 rise 1
+ server 127.0.10.012 127.0.10.12:8080 check weight 10 inter 6889 fall 2 rise 1
+ server 127.0.10.013 127.0.10.13:8080 check weight 10 inter 3350 fall 2 rise 1
+ server 127.0.10.014 127.0.10.14:8080 check weight 10 inter 679 fall 2 rise 1
+ server 127.0.10.015 127.0.10.15:8080 check weight 10 inter 9983 fall 2 rise 1
+ server 127.0.10.016 127.0.10.16:8080 check weight 10 inter 9236 fall 2 rise 1
+ server 127.0.10.017 127.0.10.17:8080 check weight 10 inter 4158 fall 2 rise 1
+ server 127.0.10.018 127.0.10.18:8080 check weight 10 inter 5418 fall 2 rise 1
+ server 127.0.10.019 127.0.10.19:8080 check weight 10 inter 1535 fall 2 rise 1
+ server 127.0.10.020 127.0.10.20:8080 check weight 10 inter 8432 fall 2 rise 1
+ server 127.0.10.021 127.0.10.21:8080 check weight 10 inter 6360 fall 2 rise 1
+ server 127.0.10.022 127.0.10.22:8080 check weight 10 inter 1740 fall 2 rise 1
+ server 127.0.10.023 127.0.10.23:8080 check weight 10 inter 2207 fall 2 rise 1
+ server 127.0.10.024 127.0.10.24:8080 check weight 10 inter 6306 fall 2 rise 1
+ server 127.0.10.025 127.0.10.25:8080 check weight 10 inter 6090 fall 2 rise 1
+ server 127.0.10.026 127.0.10.26:8080 check weight 10 inter 2729 fall 2 rise 1
+ server 127.0.10.027 127.0.10.27:8080 check weight 10 inter 3986 fall 2 rise 1
+ server 127.0.10.028 127.0.10.28:8080 check weight 10 inter 6770 fall 2 rise 1
+ server 127.0.10.029 127.0.10.29:8080 check weight 10 inter 4944 fall 2 rise 1
+ server 127.0.10.030 127.0.10.30:8080 check weight 10 inter 754 fall 2 rise 1
+ server 127.0.10.031 127.0.10.31:8080 check weight 10 inter 8494 fall 2 rise 1
+ server 127.0.10.032 127.0.10.32:8080 check weight 10 inter 1089 fall 2 rise 1
+ server 127.0.10.033 127.0.10.33:8080 check weight 10 inter 1924 fall 2 rise 1
+ server 127.0.10.034 127.0.10.34:8080 check weight 10 inter 936 fall 2 rise 1
+ server 127.0.10.035 127.0.10.35:8080 check weight 10 inter 598 fall 2 rise 1
+ server 127.0.10.036 127.0.10.36:8080 check weight 10 inter 1673 fall 2 rise 1
+ server 127.0.10.037 127.0.10.37:8080 check weight 10 inter 7327 fall 2 rise 1
+ server 127.0.10.038 127.0.10.38:8080 check weight 10 inter 5453 fall 2 rise 1
+ server 127.0.10.039 127.0.10.39:8080 check weight 10 inter 7434 fall 2 rise 1
+ server 127.0.10.040 127.0.10.40:8080 check weight 10 inter 6792 fall 2 rise 1
+ server 127.0.10.041 127.0.10.41:8080 check weight 10 inter 3329 fall 2 rise 1
+ server 127.0.10.042 127.0.10.42:8080 check weight 10 inter 27 fall 2 rise 1
+ server 127.0.10.043 127.0.10.43:8080 check weight 10 inter 3249 fall 2 rise 1
+ server 127.0.10.044 127.0.10.44:8080 check weight 10 inter 1532 fall 2 rise 1
+ server 127.0.10.045 127.0.10.45:8080 check weight 10 inter 1810 fall 2 rise 1
+ server 127.0.10.046 127.0.10.46:8080 check weight 10 inter 1345 fall 2 rise 1
+ server 127.0.10.047 127.0.10.47:8080 check weight 10 inter 8562 fall 2 rise 1
+ server 127.0.10.048 127.0.10.48:8080 check weight 10 inter 1245 fall 2 rise 1
+ server 127.0.10.049 127.0.10.49:8080 check weight 10 inter 5863 fall 2 rise 1
+ server 127.0.10.050 127.0.10.50:8080 check weight 10 inter 5697 fall 2 rise 1
+ server 127.0.10.051 127.0.10.51:8080 check weight 10 inter 1301 fall 2 rise 1
+ server 127.0.10.052 127.0.10.52:8080 check weight 10 inter 3886 fall 2 rise 1
+ server 127.0.10.053 127.0.10.53:8080 check weight 10 inter 1075 fall 2 rise 1
+ server 127.0.10.054 127.0.10.54:8080 check weight 10 inter 507 fall 2 rise 1
+ server 127.0.10.055 127.0.10.55:8080 check weight 10 inter 1164 fall 2 rise 1
+ server 127.0.10.056 127.0.10.56:8080 check weight 10 inter 3009 fall 2 rise 1
+ server 127.0.10.057 127.0.10.57:8080 check weight 10 inter 7491 fall 2 rise 1
+ server 127.0.10.058 127.0.10.58:8080 check weight 10 inter 3783 fall 2 rise 1
+ server 127.0.10.059 127.0.10.59:8080 check weight 10 inter 1025 fall 2 rise 1
+ server 127.0.10.060 127.0.10.60:8080 check weight 10 inter 781 fall 2 rise 1
+ server 127.0.10.061 127.0.10.61:8080 check weight 10 inter 7703 fall 2 rise 1
+ server 127.0.10.062 127.0.10.62:8080 check weight 10 inter 8437 fall 2 rise 1
+ server 127.0.10.063 127.0.10.63:8080 check weight 10 inter 7086 fall 2 rise 1
+ server 127.0.10.064 127.0.10.64:8080 check weight 10 inter 1521 fall 2 rise 1
+ server 127.0.10.065 127.0.10.65:8080 check weight 10 inter 3249 fall 2 rise 1
+ server 127.0.10.066 127.0.10.66:8080 check weight 10 inter 149 fall 2 rise 1
+ server 127.0.10.067 127.0.10.67:8080 check weight 10 inter 78 fall 2 rise 1
+ server 127.0.10.068 127.0.10.68:8080 check weight 10 inter 38 fall 2 rise 1
+ server 127.0.10.069 127.0.10.69:8080 check weight 10 inter 5199 fall 2 rise 1
+ server 127.0.10.070 127.0.10.70:8080 check weight 10 inter 6357 fall 2 rise 1
+ server 127.0.10.071 127.0.10.71:8080 check weight 10 inter 9655 fall 2 rise 1
+ server 127.0.10.072 127.0.10.72:8080 check weight 10 inter 8854 fall 2 rise 1
+ server 127.0.10.073 127.0.10.73:8080 check weight 10 inter 6649 fall 2 rise 1
+ server 127.0.10.074 127.0.10.74:8080 check weight 10 inter 6925 fall 2 rise 1
+ server 127.0.10.075 127.0.10.75:8080 check weight 10 inter 1508 fall 2 rise 1
+ server 127.0.10.076 127.0.10.76:8080 check weight 10 inter 7326 fall 2 rise 1
+ server 127.0.10.077 127.0.10.77:8080 check weight 10 inter 8090 fall 2 rise 1
+ server 127.0.10.078 127.0.10.78:8080 check weight 10 inter 3708 fall 2 rise 1
+ server 127.0.10.079 127.0.10.79:8080 check weight 10 inter 5525 fall 2 rise 1
+ server 127.0.10.080 127.0.10.80:8080 check weight 10 inter 596 fall 2 rise 1
+ server 127.0.10.081 127.0.10.81:8080 check weight 10 inter 8910 fall 2 rise 1
+ server 127.0.10.082 127.0.10.82:8080 check weight 10 inter 1441 fall 2 rise 1
+ server 127.0.10.083 127.0.10.83:8080 check weight 10 inter 2805 fall 2 rise 1
+ server 127.0.10.084 127.0.10.84:8080 check weight 10 inter 8168 fall 2 rise 1
+ server 127.0.10.085 127.0.10.85:8080 check weight 10 inter 5376 fall 2 rise 1
+ server 127.0.10.086 127.0.10.86:8080 check weight 10 inter 569 fall 2 rise 1
+ server 127.0.10.087 127.0.10.87:8080 check weight 10 inter 2639 fall 2 rise 1
+ server 127.0.10.088 127.0.10.88:8080 check weight 10 inter 2188 fall 2 rise 1
+ server 127.0.10.089 127.0.10.89:8080 check weight 10 inter 5568 fall 2 rise 1
+ server 127.0.10.090 127.0.10.90:8080 check weight 10 inter 4267 fall 2 rise 1
+ server 127.0.10.091 127.0.10.91:8080 check weight 10 inter 1711 fall 2 rise 1
+ server 127.0.10.092 127.0.10.92:8080 check weight 10 inter 7988 fall 2 rise 1
+ server 127.0.10.093 127.0.10.93:8080 check weight 10 inter 1814 fall 2 rise 1
+ server 127.0.10.094 127.0.10.94:8080 check weight 10 inter 7045 fall 2 rise 1
+ server 127.0.10.095 127.0.10.95:8080 check weight 10 inter 6650 fall 2 rise 1
+ server 127.0.10.096 127.0.10.96:8080 check weight 10 inter 9916 fall 2 rise 1
+ server 127.0.10.097 127.0.10.97:8080 check weight 10 inter 8585 fall 2 rise 1
+ server 127.0.10.098 127.0.10.98:8080 check weight 10 inter 8260 fall 2 rise 1
+ server 127.0.10.099 127.0.10.99:8080 check weight 10 inter 1549 fall 2 rise 1
+ server 127.0.10.100 127.0.10.100:8080 check weight 10 inter 8060 fall 2 rise 1
+ server 127.0.10.101 127.0.10.101:8080 check weight 10 inter 2110 fall 2 rise 1
+ server 127.0.10.102 127.0.10.102:8080 check weight 10 inter 4102 fall 2 rise 1
+ server 127.0.10.103 127.0.10.103:8080 check weight 10 inter 1766 fall 2 rise 1
+ server 127.0.10.104 127.0.10.104:8080 check weight 10 inter 748 fall 2 rise 1
+ server 127.0.10.105 127.0.10.105:8080 check weight 10 inter 8610 fall 2 rise 1
+ server 127.0.10.106 127.0.10.106:8080 check weight 10 inter 7345 fall 2 rise 1
+ server 127.0.10.107 127.0.10.107:8080 check weight 10 inter 8213 fall 2 rise 1
+ server 127.0.10.108 127.0.10.108:8080 check weight 10 inter 4599 fall 2 rise 1
+ server 127.0.10.109 127.0.10.109:8080 check weight 10 inter 2310 fall 2 rise 1
+ server 127.0.10.110 127.0.10.110:8080 check weight 10 inter 3071 fall 2 rise 1
+ server 127.0.10.111 127.0.10.111:8080 check weight 10 inter 7903 fall 2 rise 1
+ server 127.0.10.112 127.0.10.112:8080 check weight 10 inter 2369 fall 2 rise 1
+ server 127.0.10.113 127.0.10.113:8080 check weight 10 inter 3981 fall 2 rise 1
+ server 127.0.10.114 127.0.10.114:8080 check weight 10 inter 3967 fall 2 rise 1
+ server 127.0.10.115 127.0.10.115:8080 check weight 10 inter 697 fall 2 rise 1
+ server 127.0.10.116 127.0.10.116:8080 check weight 10 inter 2679 fall 2 rise 1
+ server 127.0.10.117 127.0.10.117:8080 check weight 10 inter 5799 fall 2 rise 1
+ server 127.0.10.118 127.0.10.118:8080 check weight 10 inter 1677 fall 2 rise 1
+ server 127.0.10.119 127.0.10.119:8080 check weight 10 inter 5039 fall 2 rise 1
+ server 127.0.10.120 127.0.10.120:8080 check weight 10 inter 6971 fall 2 rise 1
+ server 127.0.10.121 127.0.10.121:8080 check weight 10 inter 8634 fall 2 rise 1
+ server 127.0.10.122 127.0.10.122:8080 check weight 10 inter 8620 fall 2 rise 1
+ server 127.0.10.123 127.0.10.123:8080 check weight 10 inter 1591 fall 2 rise 1
+ server 127.0.10.124 127.0.10.124:8080 check weight 10 inter 3741 fall 2 rise 1
+ server 127.0.10.125 127.0.10.125:8080 check weight 10 inter 5429 fall 2 rise 1
+ server 127.0.10.126 127.0.10.126:8080 check weight 10 inter 889 fall 2 rise 1
+ server 127.0.10.127 127.0.10.127:8080 check weight 10 inter 7081 fall 2 rise 1
+ server 127.0.10.128 127.0.10.128:8080 check weight 10 inter 5417 fall 2 rise 1
+ server 127.0.10.129 127.0.10.129:8080 check weight 10 inter 8453 fall 2 rise 1
+ server 127.0.10.130 127.0.10.130:8080 check weight 10 inter 9104 fall 2 rise 1
+ server 127.0.10.131 127.0.10.131:8080 check weight 10 inter 7099 fall 2 rise 1
+ server 127.0.10.132 127.0.10.132:8080 check weight 10 inter 5778 fall 2 rise 1
+ server 127.0.10.133 127.0.10.133:8080 check weight 10 inter 708 fall 2 rise 1
+ server 127.0.10.134 127.0.10.134:8080 check weight 10 inter 6602 fall 2 rise 1
+ server 127.0.10.135 127.0.10.135:8080 check weight 10 inter 4190 fall 2 rise 1
+ server 127.0.10.136 127.0.10.136:8080 check weight 10 inter 4978 fall 2 rise 1
+ server 127.0.10.137 127.0.10.137:8080 check weight 10 inter 2676 fall 2 rise 1
+ server 127.0.10.138 127.0.10.138:8080 check weight 10 inter 157 fall 2 rise 1
+ server 127.0.10.139 127.0.10.139:8080 check weight 10 inter 5498 fall 2 rise 1
+ server 127.0.10.140 127.0.10.140:8080 check weight 10 inter 9886 fall 2 rise 1
+ server 127.0.10.141 127.0.10.141:8080 check weight 10 inter 7887 fall 2 rise 1
+ server 127.0.10.142 127.0.10.142:8080 check weight 10 inter 7218 fall 2 rise 1
+ server 127.0.10.143 127.0.10.143:8080 check weight 10 inter 8458 fall 2 rise 1
+ server 127.0.10.144 127.0.10.144:8080 check weight 10 inter 9771 fall 2 rise 1
+ server 127.0.10.145 127.0.10.145:8080 check weight 10 inter 363 fall 2 rise 1
+ server 127.0.10.146 127.0.10.146:8080 check weight 10 inter 3461 fall 2 rise 1
+ server 127.0.10.147 127.0.10.147:8080 check weight 10 inter 1734 fall 2 rise 1
+ server 127.0.10.148 127.0.10.148:8080 check weight 10 inter 3332 fall 2 rise 1
+ server 127.0.10.149 127.0.10.149:8080 check weight 10 inter 32 fall 2 rise 1
+ server 127.0.10.150 127.0.10.150:8080 check weight 10 inter 1599 fall 2 rise 1
+ server 127.0.10.151 127.0.10.151:8080 check weight 10 inter 805 fall 2 rise 1
+ server 127.0.10.152 127.0.10.152:8080 check weight 10 inter 2006 fall 2 rise 1
+ server 127.0.10.153 127.0.10.153:8080 check weight 10 inter 8074 fall 2 rise 1
+ server 127.0.10.154 127.0.10.154:8080 check weight 10 inter 5683 fall 2 rise 1
+ server 127.0.10.155 127.0.10.155:8080 check weight 10 inter 9391 fall 2 rise 1
+ server 127.0.10.156 127.0.10.156:8080 check weight 10 inter 4065 fall 2 rise 1
+ server 127.0.10.157 127.0.10.157:8080 check weight 10 inter 3389 fall 2 rise 1
+ server 127.0.10.158 127.0.10.158:8080 check weight 10 inter 2298 fall 2 rise 1
+ server 127.0.10.159 127.0.10.159:8080 check weight 10 inter 4004 fall 2 rise 1
+ server 127.0.10.160 127.0.10.160:8080 check weight 10 inter 2527 fall 2 rise 1
+ server 127.0.10.161 127.0.10.161:8080 check weight 10 inter 5372 fall 2 rise 1
+ server 127.0.10.162 127.0.10.162:8080 check weight 10 inter 4750 fall 2 rise 1
+ server 127.0.10.163 127.0.10.163:8080 check weight 10 inter 927 fall 2 rise 1
+ server 127.0.10.164 127.0.10.164:8080 check weight 10 inter 9442 fall 2 rise 1
+ server 127.0.10.165 127.0.10.165:8080 check weight 10 inter 6487 fall 2 rise 1
+ server 127.0.10.166 127.0.10.166:8080 check weight 10 inter 2553 fall 2 rise 1
+ server 127.0.10.167 127.0.10.167:8080 check weight 10 inter 5847 fall 2 rise 1
+ server 127.0.10.168 127.0.10.168:8080 check weight 10 inter 2176 fall 2 rise 1
+ server 127.0.10.169 127.0.10.169:8080 check weight 10 inter 919 fall 2 rise 1
+ server 127.0.10.170 127.0.10.170:8080 check weight 10 inter 1404 fall 2 rise 1
+ server 127.0.10.171 127.0.10.171:8080 check weight 10 inter 5915 fall 2 rise 1
+ server 127.0.10.172 127.0.10.172:8080 check weight 10 inter 7005 fall 2 rise 1
+ server 127.0.10.173 127.0.10.173:8080 check weight 10 inter 6619 fall 2 rise 1
+ server 127.0.10.174 127.0.10.174:8080 check weight 10 inter 3022 fall 2 rise 1
+ server 127.0.10.175 127.0.10.175:8080 check weight 10 inter 1211 fall 2 rise 1
+ server 127.0.10.176 127.0.10.176:8080 check weight 10 inter 5836 fall 2 rise 1
+ server 127.0.10.177 127.0.10.177:8080 check weight 10 inter 417 fall 2 rise 1
+ server 127.0.10.178 127.0.10.178:8080 check weight 10 inter 43 fall 2 rise 1
+ server 127.0.10.179 127.0.10.179:8080 check weight 10 inter 3095 fall 2 rise 1
+ server 127.0.10.180 127.0.10.180:8080 check weight 10 inter 2245 fall 2 rise 1
+ server 127.0.10.181 127.0.10.181:8080 check weight 10 inter 7987 fall 2 rise 1
+ server 127.0.10.182 127.0.10.182:8080 check weight 10 inter 9960 fall 2 rise 1
+ server 127.0.10.183 127.0.10.183:8080 check weight 10 inter 3640 fall 2 rise 1
+ server 127.0.10.184 127.0.10.184:8080 check weight 10 inter 69 fall 2 rise 1
+ server 127.0.10.185 127.0.10.185:8080 check weight 10 inter 1111 fall 2 rise 1
+ server 127.0.10.186 127.0.10.186:8080 check weight 10 inter 1463 fall 2 rise 1
+ server 127.0.10.187 127.0.10.187:8080 check weight 10 inter 1488 fall 2 rise 1
+ server 127.0.10.188 127.0.10.188:8080 check weight 10 inter 2265 fall 2 rise 1
+ server 127.0.10.189 127.0.10.189:8080 check weight 10 inter 2757 fall 2 rise 1
+ server 127.0.10.190 127.0.10.190:8080 check weight 10 inter 7151 fall 2 rise 1
+ server 127.0.10.191 127.0.10.191:8080 check weight 10 inter 2611 fall 2 rise 1
+ server 127.0.10.192 127.0.10.192:8080 check weight 10 inter 5576 fall 2 rise 1
+ server 127.0.10.193 127.0.10.193:8080 check weight 10 inter 8774 fall 2 rise 1
+ server 127.0.10.194 127.0.10.194:8080 check weight 10 inter 8905 fall 2 rise 1
+ server 127.0.10.195 127.0.10.195:8080 check weight 10 inter 7650 fall 2 rise 1
+ server 127.0.10.196 127.0.10.196:8080 check weight 10 inter 9811 fall 2 rise 1
+ server 127.0.10.197 127.0.10.197:8080 check weight 10 inter 5472 fall 2 rise 1
+ server 127.0.10.198 127.0.10.198:8080 check weight 10 inter 5898 fall 2 rise 1
+ server 127.0.10.199 127.0.10.199:8080 check weight 10 inter 2459 fall 2 rise 1
+ server 127.0.10.200 127.0.10.200:8080 check weight 10 inter 30 fall 2 rise 1
+ server 127.0.10.201 127.0.10.201:8080 check weight 10 inter 180 fall 2 rise 1
+ server 127.0.10.202 127.0.10.202:8080 check weight 10 inter 4292 fall 2 rise 1
+ server 127.0.10.203 127.0.10.203:8080 check weight 10 inter 4130 fall 2 rise 1
+ server 127.0.10.204 127.0.10.204:8080 check weight 10 inter 443 fall 2 rise 1
+ server 127.0.10.205 127.0.10.205:8080 check weight 10 inter 9764 fall 2 rise 1
+ server 127.0.10.206 127.0.10.206:8080 check weight 10 inter 8139 fall 2 rise 1
+ server 127.0.10.207 127.0.10.207:8080 check weight 10 inter 6989 fall 2 rise 1
+ server 127.0.10.208 127.0.10.208:8080 check weight 10 inter 1723 fall 2 rise 1
+ server 127.0.10.209 127.0.10.209:8080 check weight 10 inter 6805 fall 2 rise 1
+ server 127.0.10.210 127.0.10.210:8080 check weight 10 inter 3768 fall 2 rise 1
+ server 127.0.10.211 127.0.10.211:8080 check weight 10 inter 9602 fall 2 rise 1
+ server 127.0.10.212 127.0.10.212:8080 check weight 10 inter 515 fall 2 rise 1
+ server 127.0.10.213 127.0.10.213:8080 check weight 10 inter 729 fall 2 rise 1
+ server 127.0.10.214 127.0.10.214:8080 check weight 10 inter 8863 fall 2 rise 1
+ server 127.0.10.215 127.0.10.215:8080 check weight 10 inter 2978 fall 2 rise 1
+ server 127.0.10.216 127.0.10.216:8080 check weight 10 inter 7600 fall 2 rise 1
+ server 127.0.10.217 127.0.10.217:8080 check weight 10 inter 2505 fall 2 rise 1
+ server 127.0.10.218 127.0.10.218:8080 check weight 10 inter 6368 fall 2 rise 1
+ server 127.0.10.219 127.0.10.219:8080 check weight 10 inter 6370 fall 2 rise 1
+ server 127.0.10.220 127.0.10.220:8080 check weight 10 inter 2315 fall 2 rise 1
+ server 127.0.10.221 127.0.10.221:8080 check weight 10 inter 6213 fall 2 rise 1
+ server 127.0.10.222 127.0.10.222:8080 check weight 10 inter 2113 fall 2 rise 1
+ server 127.0.10.223 127.0.10.223:8080 check weight 10 inter 1053 fall 2 rise 1
+ server 127.0.10.224 127.0.10.224:8080 check weight 10 inter 4250 fall 2 rise 1
+ server 127.0.10.225 127.0.10.225:8080 check weight 10 inter 7323 fall 2 rise 1
+ server 127.0.10.226 127.0.10.226:8080 check weight 10 inter 9670 fall 2 rise 1
+ server 127.0.10.227 127.0.10.227:8080 check weight 10 inter 6764 fall 2 rise 1
+ server 127.0.10.228 127.0.10.228:8080 check weight 10 inter 8317 fall 2 rise 1
+ server 127.0.10.229 127.0.10.229:8080 check weight 10 inter 4840 fall 2 rise 1
+ server 127.0.10.230 127.0.10.230:8080 check weight 10 inter 2438 fall 2 rise 1
+ server 127.0.10.231 127.0.10.231:8080 check weight 10 inter 2704 fall 2 rise 1
+ server 127.0.10.232 127.0.10.232:8080 check weight 10 inter 7123 fall 2 rise 1
+ server 127.0.10.233 127.0.10.233:8080 check weight 10 inter 8674 fall 2 rise 1
+ server 127.0.10.234 127.0.10.234:8080 check weight 10 inter 1965 fall 2 rise 1
+ server 127.0.10.235 127.0.10.235:8080 check weight 10 inter 9685 fall 2 rise 1
+ server 127.0.10.236 127.0.10.236:8080 check weight 10 inter 1968 fall 2 rise 1
+ server 127.0.10.237 127.0.10.237:8080 check weight 10 inter 8033 fall 2 rise 1
+ server 127.0.10.238 127.0.10.238:8080 check weight 10 inter 5472 fall 2 rise 1
+ server 127.0.10.239 127.0.10.239:8080 check weight 10 inter 9574 fall 2 rise 1
+ server 127.0.10.240 127.0.10.240:8080 check weight 10 inter 2879 fall 2 rise 1
+ server 127.0.10.241 127.0.10.241:8080 check weight 10 inter 5265 fall 2 rise 1
+ server 127.0.10.242 127.0.10.242:8080 check weight 10 inter 3556 fall 2 rise 1
+ server 127.0.10.243 127.0.10.243:8080 check weight 10 inter 2656 fall 2 rise 1
+ server 127.0.10.244 127.0.10.244:8080 check weight 10 inter 982 fall 2 rise 1
+ server 127.0.10.245 127.0.10.245:8080 check weight 10 inter 4313 fall 2 rise 1
+ server 127.0.10.246 127.0.10.246:8080 check weight 10 inter 6733 fall 2 rise 1
+ server 127.0.10.247 127.0.10.247:8080 check weight 10 inter 5049 fall 2 rise 1
+ server 127.0.10.248 127.0.10.248:8080 check weight 10 inter 9791 fall 2 rise 1
+ server 127.0.10.249 127.0.10.249:8080 check weight 10 inter 8392 fall 2 rise 1
+ server 127.0.10.250 127.0.10.250:8080 check weight 10 inter 743 fall 2 rise 1
+ server 127.0.11.001 127.0.11.1:8080 check weight 10 inter 7405 fall 2 rise 1
+ server 127.0.11.002 127.0.11.2:8080 check weight 10 inter 5472 fall 2 rise 1
+ server 127.0.11.003 127.0.11.3:8080 check weight 10 inter 9733 fall 2 rise 1
+ server 127.0.11.004 127.0.11.4:8080 check weight 10 inter 4631 fall 2 rise 1
+ server 127.0.11.005 127.0.11.5:8080 check weight 10 inter 1515 fall 2 rise 1
+ server 127.0.11.006 127.0.11.6:8080 check weight 10 inter 6367 fall 2 rise 1
+ server 127.0.11.007 127.0.11.7:8080 check weight 10 inter 1059 fall 2 rise 1
+ server 127.0.11.008 127.0.11.8:8080 check weight 10 inter 2319 fall 2 rise 1
+ server 127.0.11.009 127.0.11.9:8080 check weight 10 inter 2644 fall 2 rise 1
+ server 127.0.11.010 127.0.11.10:8080 check weight 10 inter 3671 fall 2 rise 1
+ server 127.0.11.011 127.0.11.11:8080 check weight 10 inter 3990 fall 2 rise 1
+ server 127.0.11.012 127.0.11.12:8080 check weight 10 inter 3220 fall 2 rise 1
+ server 127.0.11.013 127.0.11.13:8080 check weight 10 inter 9295 fall 2 rise 1
+ server 127.0.11.014 127.0.11.14:8080 check weight 10 inter 1865 fall 2 rise 1
+ server 127.0.11.015 127.0.11.15:8080 check weight 10 inter 9384 fall 2 rise 1
+ server 127.0.11.016 127.0.11.16:8080 check weight 10 inter 1967 fall 2 rise 1
+ server 127.0.11.017 127.0.11.17:8080 check weight 10 inter 2540 fall 2 rise 1
+ server 127.0.11.018 127.0.11.18:8080 check weight 10 inter 4019 fall 2 rise 1
+ server 127.0.11.019 127.0.11.19:8080 check weight 10 inter 6058 fall 2 rise 1
+ server 127.0.11.020 127.0.11.20:8080 check weight 10 inter 9781 fall 2 rise 1
+ server 127.0.11.021 127.0.11.21:8080 check weight 10 inter 4015 fall 2 rise 1
+ server 127.0.11.022 127.0.11.22:8080 check weight 10 inter 3953 fall 2 rise 1
+ server 127.0.11.023 127.0.11.23:8080 check weight 10 inter 1836 fall 2 rise 1
+ server 127.0.11.024 127.0.11.24:8080 check weight 10 inter 3800 fall 2 rise 1
+ server 127.0.11.025 127.0.11.25:8080 check weight 10 inter 6841 fall 2 rise 1
+ server 127.0.11.026 127.0.11.26:8080 check weight 10 inter 2694 fall 2 rise 1
+ server 127.0.11.027 127.0.11.27:8080 check weight 10 inter 8715 fall 2 rise 1
+ server 127.0.11.028 127.0.11.28:8080 check weight 10 inter 9 fall 2 rise 1
+ server 127.0.11.029 127.0.11.29:8080 check weight 10 inter 6277 fall 2 rise 1
+ server 127.0.11.030 127.0.11.30:8080 check weight 10 inter 2729 fall 2 rise 1
+ server 127.0.11.031 127.0.11.31:8080 check weight 10 inter 4141 fall 2 rise 1
+ server 127.0.11.032 127.0.11.32:8080 check weight 10 inter 9332 fall 2 rise 1
+ server 127.0.11.033 127.0.11.33:8080 check weight 10 inter 2840 fall 2 rise 1
+ server 127.0.11.034 127.0.11.34:8080 check weight 10 inter 896 fall 2 rise 1
+ server 127.0.11.035 127.0.11.35:8080 check weight 10 inter 7677 fall 2 rise 1
+ server 127.0.11.036 127.0.11.36:8080 check weight 10 inter 5705 fall 2 rise 1
+ server 127.0.11.037 127.0.11.37:8080 check weight 10 inter 4677 fall 2 rise 1
+ server 127.0.11.038 127.0.11.38:8080 check weight 10 inter 6797 fall 2 rise 1
+ server 127.0.11.039 127.0.11.39:8080 check weight 10 inter 7559 fall 2 rise 1
+ server 127.0.11.040 127.0.11.40:8080 check weight 10 inter 2060 fall 2 rise 1
+ server 127.0.11.041 127.0.11.41:8080 check weight 10 inter 1779 fall 2 rise 1
+ server 127.0.11.042 127.0.11.42:8080 check weight 10 inter 795 fall 2 rise 1
+ server 127.0.11.043 127.0.11.43:8080 check weight 10 inter 6242 fall 2 rise 1
+ server 127.0.11.044 127.0.11.44:8080 check weight 10 inter 5612 fall 2 rise 1
+ server 127.0.11.045 127.0.11.45:8080 check weight 10 inter 6207 fall 2 rise 1
+ server 127.0.11.046 127.0.11.46:8080 check weight 10 inter 5255 fall 2 rise 1
+ server 127.0.11.047 127.0.11.47:8080 check weight 10 inter 6672 fall 2 rise 1
+ server 127.0.11.048 127.0.11.48:8080 check weight 10 inter 3476 fall 2 rise 1
+ server 127.0.11.049 127.0.11.49:8080 check weight 10 inter 2001 fall 2 rise 1
+ server 127.0.11.050 127.0.11.50:8080 check weight 10 inter 6318 fall 2 rise 1
+ server 127.0.11.051 127.0.11.51:8080 check weight 10 inter 3464 fall 2 rise 1
+ server 127.0.11.052 127.0.11.52:8080 check weight 10 inter 1075 fall 2 rise 1
+ server 127.0.11.053 127.0.11.53:8080 check weight 10 inter 6624 fall 2 rise 1
+ server 127.0.11.054 127.0.11.54:8080 check weight 10 inter 1114 fall 2 rise 1
+ server 127.0.11.055 127.0.11.55:8080 check weight 10 inter 3934 fall 2 rise 1
+ server 127.0.11.056 127.0.11.56:8080 check weight 10 inter 4987 fall 2 rise 1
+ server 127.0.11.057 127.0.11.57:8080 check weight 10 inter 8852 fall 2 rise 1
+ server 127.0.11.058 127.0.11.58:8080 check weight 10 inter 6444 fall 2 rise 1
+ server 127.0.11.059 127.0.11.59:8080 check weight 10 inter 599 fall 2 rise 1
+ server 127.0.11.060 127.0.11.60:8080 check weight 10 inter 1451 fall 2 rise 1
+ server 127.0.11.061 127.0.11.61:8080 check weight 10 inter 5424 fall 2 rise 1
+ server 127.0.11.062 127.0.11.62:8080 check weight 10 inter 3400 fall 2 rise 1
+ server 127.0.11.063 127.0.11.63:8080 check weight 10 inter 2030 fall 2 rise 1
+ server 127.0.11.064 127.0.11.64:8080 check weight 10 inter 1278 fall 2 rise 1
+ server 127.0.11.065 127.0.11.65:8080 check weight 10 inter 5803 fall 2 rise 1
+ server 127.0.11.066 127.0.11.66:8080 check weight 10 inter 563 fall 2 rise 1
+ server 127.0.11.067 127.0.11.67:8080 check weight 10 inter 3861 fall 2 rise 1
+ server 127.0.11.068 127.0.11.68:8080 check weight 10 inter 7120 fall 2 rise 1
+ server 127.0.11.069 127.0.11.69:8080 check weight 10 inter 2483 fall 2 rise 1
+ server 127.0.11.070 127.0.11.70:8080 check weight 10 inter 1310 fall 2 rise 1
+ server 127.0.11.071 127.0.11.71:8080 check weight 10 inter 5773 fall 2 rise 1
+ server 127.0.11.072 127.0.11.72:8080 check weight 10 inter 8500 fall 2 rise 1
+ server 127.0.11.073 127.0.11.73:8080 check weight 10 inter 3840 fall 2 rise 1
+ server 127.0.11.074 127.0.11.74:8080 check weight 10 inter 815 fall 2 rise 1
+ server 127.0.11.075 127.0.11.75:8080 check weight 10 inter 1604 fall 2 rise 1
+ server 127.0.11.076 127.0.11.76:8080 check weight 10 inter 9463 fall 2 rise 1
+ server 127.0.11.077 127.0.11.77:8080 check weight 10 inter 8682 fall 2 rise 1
+ server 127.0.11.078 127.0.11.78:8080 check weight 10 inter 6420 fall 2 rise 1
+ server 127.0.11.079 127.0.11.79:8080 check weight 10 inter 9666 fall 2 rise 1
+ server 127.0.11.080 127.0.11.80:8080 check weight 10 inter 8369 fall 2 rise 1
+ server 127.0.11.081 127.0.11.81:8080 check weight 10 inter 7786 fall 2 rise 1
+ server 127.0.11.082 127.0.11.82:8080 check weight 10 inter 9255 fall 2 rise 1
+ server 127.0.11.083 127.0.11.83:8080 check weight 10 inter 3793 fall 2 rise 1
+ server 127.0.11.084 127.0.11.84:8080 check weight 10 inter 7377 fall 2 rise 1
+ server 127.0.11.085 127.0.11.85:8080 check weight 10 inter 4976 fall 2 rise 1
+ server 127.0.11.086 127.0.11.86:8080 check weight 10 inter 1424 fall 2 rise 1
+ server 127.0.11.087 127.0.11.87:8080 check weight 10 inter 975 fall 2 rise 1
+ server 127.0.11.088 127.0.11.88:8080 check weight 10 inter 5845 fall 2 rise 1
+ server 127.0.11.089 127.0.11.89:8080 check weight 10 inter 7498 fall 2 rise 1
+ server 127.0.11.090 127.0.11.90:8080 check weight 10 inter 6461 fall 2 rise 1
+ server 127.0.11.091 127.0.11.91:8080 check weight 10 inter 1621 fall 2 rise 1
+ server 127.0.11.092 127.0.11.92:8080 check weight 10 inter 9068 fall 2 rise 1
+ server 127.0.11.093 127.0.11.93:8080 check weight 10 inter 1440 fall 2 rise 1
+ server 127.0.11.094 127.0.11.94:8080 check weight 10 inter 306 fall 2 rise 1
+ server 127.0.11.095 127.0.11.95:8080 check weight 10 inter 1877 fall 2 rise 1
+ server 127.0.11.096 127.0.11.96:8080 check weight 10 inter 1197 fall 2 rise 1
+ server 127.0.11.097 127.0.11.97:8080 check weight 10 inter 9840 fall 2 rise 1
+ server 127.0.11.098 127.0.11.98:8080 check weight 10 inter 4353 fall 2 rise 1
+ server 127.0.11.099 127.0.11.99:8080 check weight 10 inter 8790 fall 2 rise 1
+ server 127.0.11.100 127.0.11.100:8080 check weight 10 inter 4212 fall 2 rise 1
+ server 127.0.11.101 127.0.11.101:8080 check weight 10 inter 908 fall 2 rise 1
+ server 127.0.11.102 127.0.11.102:8080 check weight 10 inter 5496 fall 2 rise 1
+ server 127.0.11.103 127.0.11.103:8080 check weight 10 inter 6655 fall 2 rise 1
+ server 127.0.11.104 127.0.11.104:8080 check weight 10 inter 5647 fall 2 rise 1
+ server 127.0.11.105 127.0.11.105:8080 check weight 10 inter 5077 fall 2 rise 1
+ server 127.0.11.106 127.0.11.106:8080 check weight 10 inter 9373 fall 2 rise 1
+ server 127.0.11.107 127.0.11.107:8080 check weight 10 inter 5077 fall 2 rise 1
+ server 127.0.11.108 127.0.11.108:8080 check weight 10 inter 6476 fall 2 rise 1
+ server 127.0.11.109 127.0.11.109:8080 check weight 10 inter 7685 fall 2 rise 1
+ server 127.0.11.110 127.0.11.110:8080 check weight 10 inter 9850 fall 2 rise 1
+ server 127.0.11.111 127.0.11.111:8080 check weight 10 inter 6100 fall 2 rise 1
+ server 127.0.11.112 127.0.11.112:8080 check weight 10 inter 5696 fall 2 rise 1
+ server 127.0.11.113 127.0.11.113:8080 check weight 10 inter 1728 fall 2 rise 1
+ server 127.0.11.114 127.0.11.114:8080 check weight 10 inter 7736 fall 2 rise 1
+ server 127.0.11.115 127.0.11.115:8080 check weight 10 inter 4511 fall 2 rise 1
+ server 127.0.11.116 127.0.11.116:8080 check weight 10 inter 5799 fall 2 rise 1
+ server 127.0.11.117 127.0.11.117:8080 check weight 10 inter 7913 fall 2 rise 1
+ server 127.0.11.118 127.0.11.118:8080 check weight 10 inter 1772 fall 2 rise 1
+ server 127.0.11.119 127.0.11.119:8080 check weight 10 inter 3752 fall 2 rise 1
+ server 127.0.11.120 127.0.11.120:8080 check weight 10 inter 1200 fall 2 rise 1
+ server 127.0.11.121 127.0.11.121:8080 check weight 10 inter 4771 fall 2 rise 1
+ server 127.0.11.122 127.0.11.122:8080 check weight 10 inter 1492 fall 2 rise 1
+ server 127.0.11.123 127.0.11.123:8080 check weight 10 inter 9054 fall 2 rise 1
+ server 127.0.11.124 127.0.11.124:8080 check weight 10 inter 9956 fall 2 rise 1
+ server 127.0.11.125 127.0.11.125:8080 check weight 10 inter 2846 fall 2 rise 1
+ server 127.0.11.126 127.0.11.126:8080 check weight 10 inter 2849 fall 2 rise 1
+ server 127.0.11.127 127.0.11.127:8080 check weight 10 inter 8265 fall 2 rise 1
+ server 127.0.11.128 127.0.11.128:8080 check weight 10 inter 7034 fall 2 rise 1
+ server 127.0.11.129 127.0.11.129:8080 check weight 10 inter 7837 fall 2 rise 1
+ server 127.0.11.130 127.0.11.130:8080 check weight 10 inter 9426 fall 2 rise 1
+ server 127.0.11.131 127.0.11.131:8080 check weight 10 inter 5191 fall 2 rise 1
+ server 127.0.11.132 127.0.11.132:8080 check weight 10 inter 7563 fall 2 rise 1
+ server 127.0.11.133 127.0.11.133:8080 check weight 10 inter 6121 fall 2 rise 1
+ server 127.0.11.134 127.0.11.134:8080 check weight 10 inter 308 fall 2 rise 1
+ server 127.0.11.135 127.0.11.135:8080 check weight 10 inter 9764 fall 2 rise 1
+ server 127.0.11.136 127.0.11.136:8080 check weight 10 inter 214 fall 2 rise 1
+ server 127.0.11.137 127.0.11.137:8080 check weight 10 inter 2152 fall 2 rise 1
+ server 127.0.11.138 127.0.11.138:8080 check weight 10 inter 5547 fall 2 rise 1
+ server 127.0.11.139 127.0.11.139:8080 check weight 10 inter 43 fall 2 rise 1
+ server 127.0.11.140 127.0.11.140:8080 check weight 10 inter 6372 fall 2 rise 1
+ server 127.0.11.141 127.0.11.141:8080 check weight 10 inter 5451 fall 2 rise 1
+ server 127.0.11.142 127.0.11.142:8080 check weight 10 inter 4319 fall 2 rise 1
+ server 127.0.11.143 127.0.11.143:8080 check weight 10 inter 1248 fall 2 rise 1
+ server 127.0.11.144 127.0.11.144:8080 check weight 10 inter 6136 fall 2 rise 1
+ server 127.0.11.145 127.0.11.145:8080 check weight 10 inter 538 fall 2 rise 1
+ server 127.0.11.146 127.0.11.146:8080 check weight 10 inter 2871 fall 2 rise 1
+ server 127.0.11.147 127.0.11.147:8080 check weight 10 inter 5305 fall 2 rise 1
+ server 127.0.11.148 127.0.11.148:8080 check weight 10 inter 4238 fall 2 rise 1
+ server 127.0.11.149 127.0.11.149:8080 check weight 10 inter 2768 fall 2 rise 1
+ server 127.0.11.150 127.0.11.150:8080 check weight 10 inter 4406 fall 2 rise 1
+ server 127.0.11.151 127.0.11.151:8080 check weight 10 inter 591 fall 2 rise 1
+ server 127.0.11.152 127.0.11.152:8080 check weight 10 inter 1345 fall 2 rise 1
+ server 127.0.11.153 127.0.11.153:8080 check weight 10 inter 2006 fall 2 rise 1
+ server 127.0.11.154 127.0.11.154:8080 check weight 10 inter 9256 fall 2 rise 1
+ server 127.0.11.155 127.0.11.155:8080 check weight 10 inter 697 fall 2 rise 1
+ server 127.0.11.156 127.0.11.156:8080 check weight 10 inter 3690 fall 2 rise 1
+ server 127.0.11.157 127.0.11.157:8080 check weight 10 inter 8353 fall 2 rise 1
+ server 127.0.11.158 127.0.11.158:8080 check weight 10 inter 8555 fall 2 rise 1
+ server 127.0.11.159 127.0.11.159:8080 check weight 10 inter 2691 fall 2 rise 1
+ server 127.0.11.160 127.0.11.160:8080 check weight 10 inter 2204 fall 2 rise 1
+ server 127.0.11.161 127.0.11.161:8080 check weight 10 inter 331 fall 2 rise 1
+ server 127.0.11.162 127.0.11.162:8080 check weight 10 inter 846 fall 2 rise 1
+ server 127.0.11.163 127.0.11.163:8080 check weight 10 inter 2480 fall 2 rise 1
+ server 127.0.11.164 127.0.11.164:8080 check weight 10 inter 6848 fall 2 rise 1
+ server 127.0.11.165 127.0.11.165:8080 check weight 10 inter 3858 fall 2 rise 1
+ server 127.0.11.166 127.0.11.166:8080 check weight 10 inter 9290 fall 2 rise 1
+ server 127.0.11.167 127.0.11.167:8080 check weight 10 inter 7732 fall 2 rise 1
+ server 127.0.11.168 127.0.11.168:8080 check weight 10 inter 7776 fall 2 rise 1
+ server 127.0.11.169 127.0.11.169:8080 check weight 10 inter 3842 fall 2 rise 1
+ server 127.0.11.170 127.0.11.170:8080 check weight 10 inter 162 fall 2 rise 1
+ server 127.0.11.171 127.0.11.171:8080 check weight 10 inter 7648 fall 2 rise 1
+ server 127.0.11.172 127.0.11.172:8080 check weight 10 inter 4118 fall 2 rise 1
+ server 127.0.11.173 127.0.11.173:8080 check weight 10 inter 8242 fall 2 rise 1
+ server 127.0.11.174 127.0.11.174:8080 check weight 10 inter 7804 fall 2 rise 1
+ server 127.0.11.175 127.0.11.175:8080 check weight 10 inter 1757 fall 2 rise 1
+ server 127.0.11.176 127.0.11.176:8080 check weight 10 inter 306 fall 2 rise 1
+ server 127.0.11.177 127.0.11.177:8080 check weight 10 inter 5809 fall 2 rise 1
+ server 127.0.11.178 127.0.11.178:8080 check weight 10 inter 285 fall 2 rise 1
+ server 127.0.11.179 127.0.11.179:8080 check weight 10 inter 5319 fall 2 rise 1
+ server 127.0.11.180 127.0.11.180:8080 check weight 10 inter 5838 fall 2 rise 1
+ server 127.0.11.181 127.0.11.181:8080 check weight 10 inter 5376 fall 2 rise 1
+ server 127.0.11.182 127.0.11.182:8080 check weight 10 inter 9702 fall 2 rise 1
+ server 127.0.11.183 127.0.11.183:8080 check weight 10 inter 5662 fall 2 rise 1
+ server 127.0.11.184 127.0.11.184:8080 check weight 10 inter 5442 fall 2 rise 1
+ server 127.0.11.185 127.0.11.185:8080 check weight 10 inter 8189 fall 2 rise 1
+ server 127.0.11.186 127.0.11.186:8080 check weight 10 inter 913 fall 2 rise 1
+ server 127.0.11.187 127.0.11.187:8080 check weight 10 inter 5502 fall 2 rise 1
+ server 127.0.11.188 127.0.11.188:8080 check weight 10 inter 1702 fall 2 rise 1
+ server 127.0.11.189 127.0.11.189:8080 check weight 10 inter 3474 fall 2 rise 1
+ server 127.0.11.190 127.0.11.190:8080 check weight 10 inter 6474 fall 2 rise 1
+ server 127.0.11.191 127.0.11.191:8080 check weight 10 inter 1417 fall 2 rise 1
+ server 127.0.11.192 127.0.11.192:8080 check weight 10 inter 7291 fall 2 rise 1
+ server 127.0.11.193 127.0.11.193:8080 check weight 10 inter 5505 fall 2 rise 1
+ server 127.0.11.194 127.0.11.194:8080 check weight 10 inter 9261 fall 2 rise 1
+ server 127.0.11.195 127.0.11.195:8080 check weight 10 inter 1225 fall 2 rise 1
+ server 127.0.11.196 127.0.11.196:8080 check weight 10 inter 6984 fall 2 rise 1
+ server 127.0.11.197 127.0.11.197:8080 check weight 10 inter 5582 fall 2 rise 1
+ server 127.0.11.198 127.0.11.198:8080 check weight 10 inter 4114 fall 2 rise 1
+ server 127.0.11.199 127.0.11.199:8080 check weight 10 inter 3496 fall 2 rise 1
+ server 127.0.11.200 127.0.11.200:8080 check weight 10 inter 3846 fall 2 rise 1
+ server 127.0.11.201 127.0.11.201:8080 check weight 10 inter 6570 fall 2 rise 1
+ server 127.0.11.202 127.0.11.202:8080 check weight 10 inter 5787 fall 2 rise 1
+ server 127.0.11.203 127.0.11.203:8080 check weight 10 inter 7419 fall 2 rise 1
+ server 127.0.11.204 127.0.11.204:8080 check weight 10 inter 9306 fall 2 rise 1
+ server 127.0.11.205 127.0.11.205:8080 check weight 10 inter 819 fall 2 rise 1
+ server 127.0.11.206 127.0.11.206:8080 check weight 10 inter 7034 fall 2 rise 1
+ server 127.0.11.207 127.0.11.207:8080 check weight 10 inter 3507 fall 2 rise 1
+ server 127.0.11.208 127.0.11.208:8080 check weight 10 inter 3222 fall 2 rise 1
+ server 127.0.11.209 127.0.11.209:8080 check weight 10 inter 9549 fall 2 rise 1
+ server 127.0.11.210 127.0.11.210:8080 check weight 10 inter 1811 fall 2 rise 1
+ server 127.0.11.211 127.0.11.211:8080 check weight 10 inter 4849 fall 2 rise 1
+ server 127.0.11.212 127.0.11.212:8080 check weight 10 inter 62 fall 2 rise 1
+ server 127.0.11.213 127.0.11.213:8080 check weight 10 inter 2831 fall 2 rise 1
+ server 127.0.11.214 127.0.11.214:8080 check weight 10 inter 7861 fall 2 rise 1
+ server 127.0.11.215 127.0.11.215:8080 check weight 10 inter 3436 fall 2 rise 1
+ server 127.0.11.216 127.0.11.216:8080 check weight 10 inter 5354 fall 2 rise 1
+ server 127.0.11.217 127.0.11.217:8080 check weight 10 inter 7181 fall 2 rise 1
+ server 127.0.11.218 127.0.11.218:8080 check weight 10 inter 7671 fall 2 rise 1
+ server 127.0.11.219 127.0.11.219:8080 check weight 10 inter 9109 fall 2 rise 1
+ server 127.0.11.220 127.0.11.220:8080 check weight 10 inter 673 fall 2 rise 1
+ server 127.0.11.221 127.0.11.221:8080 check weight 10 inter 4985 fall 2 rise 1
+ server 127.0.11.222 127.0.11.222:8080 check weight 10 inter 4852 fall 2 rise 1
+ server 127.0.11.223 127.0.11.223:8080 check weight 10 inter 1012 fall 2 rise 1
+ server 127.0.11.224 127.0.11.224:8080 check weight 10 inter 4512 fall 2 rise 1
+ server 127.0.11.225 127.0.11.225:8080 check weight 10 inter 888 fall 2 rise 1
+ server 127.0.11.226 127.0.11.226:8080 check weight 10 inter 7686 fall 2 rise 1
+ server 127.0.11.227 127.0.11.227:8080 check weight 10 inter 6987 fall 2 rise 1
+ server 127.0.11.228 127.0.11.228:8080 check weight 10 inter 9213 fall 2 rise 1
+ server 127.0.11.229 127.0.11.229:8080 check weight 10 inter 9558 fall 2 rise 1
+ server 127.0.11.230 127.0.11.230:8080 check weight 10 inter 6276 fall 2 rise 1
+ server 127.0.11.231 127.0.11.231:8080 check weight 10 inter 8598 fall 2 rise 1
+ server 127.0.11.232 127.0.11.232:8080 check weight 10 inter 6639 fall 2 rise 1
+ server 127.0.11.233 127.0.11.233:8080 check weight 10 inter 4409 fall 2 rise 1
+ server 127.0.11.234 127.0.11.234:8080 check weight 10 inter 1196 fall 2 rise 1
+ server 127.0.11.235 127.0.11.235:8080 check weight 10 inter 1109 fall 2 rise 1
+ server 127.0.11.236 127.0.11.236:8080 check weight 10 inter 8473 fall 2 rise 1
+ server 127.0.11.237 127.0.11.237:8080 check weight 10 inter 6438 fall 2 rise 1
+ server 127.0.11.238 127.0.11.238:8080 check weight 10 inter 2399 fall 2 rise 1
+ server 127.0.11.239 127.0.11.239:8080 check weight 10 inter 9290 fall 2 rise 1
+ server 127.0.11.240 127.0.11.240:8080 check weight 10 inter 9834 fall 2 rise 1
+ server 127.0.11.241 127.0.11.241:8080 check weight 10 inter 3110 fall 2 rise 1
+ server 127.0.11.242 127.0.11.242:8080 check weight 10 inter 9951 fall 2 rise 1
+ server 127.0.11.243 127.0.11.243:8080 check weight 10 inter 1952 fall 2 rise 1
+ server 127.0.11.244 127.0.11.244:8080 check weight 10 inter 7158 fall 2 rise 1
+ server 127.0.11.245 127.0.11.245:8080 check weight 10 inter 9030 fall 2 rise 1
+ server 127.0.11.246 127.0.11.246:8080 check weight 10 inter 1673 fall 2 rise 1
+ server 127.0.11.247 127.0.11.247:8080 check weight 10 inter 4225 fall 2 rise 1
+ server 127.0.11.248 127.0.11.248:8080 check weight 10 inter 4578 fall 2 rise 1
+ server 127.0.11.249 127.0.11.249:8080 check weight 10 inter 4961 fall 2 rise 1
+ server 127.0.11.250 127.0.11.250:8080 check weight 10 inter 8641 fall 2 rise 1
+ server 127.0.12.001 127.0.12.1:8080 check weight 10 inter 6614 fall 2 rise 1
+ server 127.0.12.002 127.0.12.2:8080 check weight 10 inter 8066 fall 2 rise 1
+ server 127.0.12.003 127.0.12.3:8080 check weight 10 inter 8157 fall 2 rise 1
+ server 127.0.12.004 127.0.12.4:8080 check weight 10 inter 3458 fall 2 rise 1
+ server 127.0.12.005 127.0.12.5:8080 check weight 10 inter 6653 fall 2 rise 1
+ server 127.0.12.006 127.0.12.6:8080 check weight 10 inter 6019 fall 2 rise 1
+ server 127.0.12.007 127.0.12.7:8080 check weight 10 inter 4424 fall 2 rise 1
+ server 127.0.12.008 127.0.12.8:8080 check weight 10 inter 6419 fall 2 rise 1
+ server 127.0.12.009 127.0.12.9:8080 check weight 10 inter 2012 fall 2 rise 1
+ server 127.0.12.010 127.0.12.10:8080 check weight 10 inter 5559 fall 2 rise 1
+ server 127.0.12.011 127.0.12.11:8080 check weight 10 inter 1473 fall 2 rise 1
+ server 127.0.12.012 127.0.12.12:8080 check weight 10 inter 2632 fall 2 rise 1
+ server 127.0.12.013 127.0.12.13:8080 check weight 10 inter 6584 fall 2 rise 1
+ server 127.0.12.014 127.0.12.14:8080 check weight 10 inter 7699 fall 2 rise 1
+ server 127.0.12.015 127.0.12.15:8080 check weight 10 inter 775 fall 2 rise 1
+ server 127.0.12.016 127.0.12.16:8080 check weight 10 inter 8606 fall 2 rise 1
+ server 127.0.12.017 127.0.12.17:8080 check weight 10 inter 2692 fall 2 rise 1
+ server 127.0.12.018 127.0.12.18:8080 check weight 10 inter 11 fall 2 rise 1
+ server 127.0.12.019 127.0.12.19:8080 check weight 10 inter 2386 fall 2 rise 1
+ server 127.0.12.020 127.0.12.20:8080 check weight 10 inter 9652 fall 2 rise 1
+ server 127.0.12.021 127.0.12.21:8080 check weight 10 inter 6666 fall 2 rise 1
+ server 127.0.12.022 127.0.12.22:8080 check weight 10 inter 6728 fall 2 rise 1
+ server 127.0.12.023 127.0.12.23:8080 check weight 10 inter 8259 fall 2 rise 1
+ server 127.0.12.024 127.0.12.24:8080 check weight 10 inter 26 fall 2 rise 1
+ server 127.0.12.025 127.0.12.25:8080 check weight 10 inter 299 fall 2 rise 1
+ server 127.0.12.026 127.0.12.26:8080 check weight 10 inter 927 fall 2 rise 1
+ server 127.0.12.027 127.0.12.27:8080 check weight 10 inter 8060 fall 2 rise 1
+ server 127.0.12.028 127.0.12.28:8080 check weight 10 inter 9290 fall 2 rise 1
+ server 127.0.12.029 127.0.12.29:8080 check weight 10 inter 8646 fall 2 rise 1
+ server 127.0.12.030 127.0.12.30:8080 check weight 10 inter 6546 fall 2 rise 1
+ server 127.0.12.031 127.0.12.31:8080 check weight 10 inter 7919 fall 2 rise 1
+ server 127.0.12.032 127.0.12.32:8080 check weight 10 inter 4681 fall 2 rise 1
+ server 127.0.12.033 127.0.12.33:8080 check weight 10 inter 2569 fall 2 rise 1
+ server 127.0.12.034 127.0.12.34:8080 check weight 10 inter 7666 fall 2 rise 1
+ server 127.0.12.035 127.0.12.35:8080 check weight 10 inter 3992 fall 2 rise 1
+ server 127.0.12.036 127.0.12.36:8080 check weight 10 inter 2956 fall 2 rise 1
+ server 127.0.12.037 127.0.12.37:8080 check weight 10 inter 6587 fall 2 rise 1
+ server 127.0.12.038 127.0.12.38:8080 check weight 10 inter 8363 fall 2 rise 1
+ server 127.0.12.039 127.0.12.39:8080 check weight 10 inter 8937 fall 2 rise 1
+ server 127.0.12.040 127.0.12.40:8080 check weight 10 inter 7498 fall 2 rise 1
+ server 127.0.12.041 127.0.12.41:8080 check weight 10 inter 2470 fall 2 rise 1
+ server 127.0.12.042 127.0.12.42:8080 check weight 10 inter 6061 fall 2 rise 1
+ server 127.0.12.043 127.0.12.43:8080 check weight 10 inter 2121 fall 2 rise 1
+ server 127.0.12.044 127.0.12.44:8080 check weight 10 inter 5398 fall 2 rise 1
+ server 127.0.12.045 127.0.12.45:8080 check weight 10 inter 5141 fall 2 rise 1
+ server 127.0.12.046 127.0.12.46:8080 check weight 10 inter 9579 fall 2 rise 1
+ server 127.0.12.047 127.0.12.47:8080 check weight 10 inter 9500 fall 2 rise 1
+ server 127.0.12.048 127.0.12.48:8080 check weight 10 inter 281 fall 2 rise 1
+ server 127.0.12.049 127.0.12.49:8080 check weight 10 inter 1761 fall 2 rise 1
+ server 127.0.12.050 127.0.12.50:8080 check weight 10 inter 326 fall 2 rise 1
+ server 127.0.12.051 127.0.12.51:8080 check weight 10 inter 1680 fall 2 rise 1
+ server 127.0.12.052 127.0.12.52:8080 check weight 10 inter 8833 fall 2 rise 1
+ server 127.0.12.053 127.0.12.53:8080 check weight 10 inter 6268 fall 2 rise 1
+ server 127.0.12.054 127.0.12.54:8080 check weight 10 inter 3192 fall 2 rise 1
+ server 127.0.12.055 127.0.12.55:8080 check weight 10 inter 6109 fall 2 rise 1
+ server 127.0.12.056 127.0.12.56:8080 check weight 10 inter 8028 fall 2 rise 1
+ server 127.0.12.057 127.0.12.57:8080 check weight 10 inter 4089 fall 2 rise 1
+ server 127.0.12.058 127.0.12.58:8080 check weight 10 inter 3184 fall 2 rise 1
+ server 127.0.12.059 127.0.12.59:8080 check weight 10 inter 810 fall 2 rise 1
+ server 127.0.12.060 127.0.12.60:8080 check weight 10 inter 9762 fall 2 rise 1
+ server 127.0.12.061 127.0.12.61:8080 check weight 10 inter 7617 fall 2 rise 1
+ server 127.0.12.062 127.0.12.62:8080 check weight 10 inter 9088 fall 2 rise 1
+ server 127.0.12.063 127.0.12.63:8080 check weight 10 inter 4560 fall 2 rise 1
+ server 127.0.12.064 127.0.12.64:8080 check weight 10 inter 1157 fall 2 rise 1
+ server 127.0.12.065 127.0.12.65:8080 check weight 10 inter 983 fall 2 rise 1
+ server 127.0.12.066 127.0.12.66:8080 check weight 10 inter 8504 fall 2 rise 1
+ server 127.0.12.067 127.0.12.67:8080 check weight 10 inter 287 fall 2 rise 1
+ server 127.0.12.068 127.0.12.68:8080 check weight 10 inter 6602 fall 2 rise 1
+ server 127.0.12.069 127.0.12.69:8080 check weight 10 inter 6598 fall 2 rise 1
+ server 127.0.12.070 127.0.12.70:8080 check weight 10 inter 8032 fall 2 rise 1
+ server 127.0.12.071 127.0.12.71:8080 check weight 10 inter 2130 fall 2 rise 1
+ server 127.0.12.072 127.0.12.72:8080 check weight 10 inter 6405 fall 2 rise 1
+ server 127.0.12.073 127.0.12.73:8080 check weight 10 inter 9520 fall 2 rise 1
+ server 127.0.12.074 127.0.12.74:8080 check weight 10 inter 7079 fall 2 rise 1
+ server 127.0.12.075 127.0.12.75:8080 check weight 10 inter 5252 fall 2 rise 1
+ server 127.0.12.076 127.0.12.76:8080 check weight 10 inter 9382 fall 2 rise 1
+ server 127.0.12.077 127.0.12.77:8080 check weight 10 inter 6725 fall 2 rise 1
+ server 127.0.12.078 127.0.12.78:8080 check weight 10 inter 8159 fall 2 rise 1
+ server 127.0.12.079 127.0.12.79:8080 check weight 10 inter 6527 fall 2 rise 1
+ server 127.0.12.080 127.0.12.80:8080 check weight 10 inter 1925 fall 2 rise 1
+ server 127.0.12.081 127.0.12.81:8080 check weight 10 inter 1205 fall 2 rise 1
+ server 127.0.12.082 127.0.12.82:8080 check weight 10 inter 9381 fall 2 rise 1
+ server 127.0.12.083 127.0.12.83:8080 check weight 10 inter 8600 fall 2 rise 1
+ server 127.0.12.084 127.0.12.84:8080 check weight 10 inter 878 fall 2 rise 1
+ server 127.0.12.085 127.0.12.85:8080 check weight 10 inter 4566 fall 2 rise 1
+ server 127.0.12.086 127.0.12.86:8080 check weight 10 inter 145 fall 2 rise 1
+ server 127.0.12.087 127.0.12.87:8080 check weight 10 inter 8188 fall 2 rise 1
+ server 127.0.12.088 127.0.12.88:8080 check weight 10 inter 1693 fall 2 rise 1
+ server 127.0.12.089 127.0.12.89:8080 check weight 10 inter 5908 fall 2 rise 1
+ server 127.0.12.090 127.0.12.90:8080 check weight 10 inter 9298 fall 2 rise 1
+ server 127.0.12.091 127.0.12.91:8080 check weight 10 inter 6664 fall 2 rise 1
+ server 127.0.12.092 127.0.12.92:8080 check weight 10 inter 3656 fall 2 rise 1
+ server 127.0.12.093 127.0.12.93:8080 check weight 10 inter 3436 fall 2 rise 1
+ server 127.0.12.094 127.0.12.94:8080 check weight 10 inter 9974 fall 2 rise 1
+ server 127.0.12.095 127.0.12.95:8080 check weight 10 inter 1035 fall 2 rise 1
+ server 127.0.12.096 127.0.12.96:8080 check weight 10 inter 6879 fall 2 rise 1
+ server 127.0.12.097 127.0.12.97:8080 check weight 10 inter 7015 fall 2 rise 1
+ server 127.0.12.098 127.0.12.98:8080 check weight 10 inter 5932 fall 2 rise 1
+ server 127.0.12.099 127.0.12.99:8080 check weight 10 inter 3341 fall 2 rise 1
+ server 127.0.12.100 127.0.12.100:8080 check weight 10 inter 244 fall 2 rise 1
+ server 127.0.12.101 127.0.12.101:8080 check weight 10 inter 8311 fall 2 rise 1
+ server 127.0.12.102 127.0.12.102:8080 check weight 10 inter 3147 fall 2 rise 1
+ server 127.0.12.103 127.0.12.103:8080 check weight 10 inter 1052 fall 2 rise 1
+ server 127.0.12.104 127.0.12.104:8080 check weight 10 inter 362 fall 2 rise 1
+ server 127.0.12.105 127.0.12.105:8080 check weight 10 inter 5474 fall 2 rise 1
+ server 127.0.12.106 127.0.12.106:8080 check weight 10 inter 2990 fall 2 rise 1
+ server 127.0.12.107 127.0.12.107:8080 check weight 10 inter 347 fall 2 rise 1
+ server 127.0.12.108 127.0.12.108:8080 check weight 10 inter 1984 fall 2 rise 1
+ server 127.0.12.109 127.0.12.109:8080 check weight 10 inter 2401 fall 2 rise 1
+ server 127.0.12.110 127.0.12.110:8080 check weight 10 inter 4790 fall 2 rise 1
+ server 127.0.12.111 127.0.12.111:8080 check weight 10 inter 2681 fall 2 rise 1
+ server 127.0.12.112 127.0.12.112:8080 check weight 10 inter 8218 fall 2 rise 1
+ server 127.0.12.113 127.0.12.113:8080 check weight 10 inter 9527 fall 2 rise 1
+ server 127.0.12.114 127.0.12.114:8080 check weight 10 inter 5149 fall 2 rise 1
+ server 127.0.12.115 127.0.12.115:8080 check weight 10 inter 1457 fall 2 rise 1
+ server 127.0.12.116 127.0.12.116:8080 check weight 10 inter 2083 fall 2 rise 1
+ server 127.0.12.117 127.0.12.117:8080 check weight 10 inter 1106 fall 2 rise 1
+ server 127.0.12.118 127.0.12.118:8080 check weight 10 inter 5676 fall 2 rise 1
+ server 127.0.12.119 127.0.12.119:8080 check weight 10 inter 8135 fall 2 rise 1
+ server 127.0.12.120 127.0.12.120:8080 check weight 10 inter 7733 fall 2 rise 1
+ server 127.0.12.121 127.0.12.121:8080 check weight 10 inter 7796 fall 2 rise 1
+ server 127.0.12.122 127.0.12.122:8080 check weight 10 inter 5420 fall 2 rise 1
+ server 127.0.12.123 127.0.12.123:8080 check weight 10 inter 3502 fall 2 rise 1
+ server 127.0.12.124 127.0.12.124:8080 check weight 10 inter 7972 fall 2 rise 1
+ server 127.0.12.125 127.0.12.125:8080 check weight 10 inter 3166 fall 2 rise 1
+ server 127.0.12.126 127.0.12.126:8080 check weight 10 inter 8604 fall 2 rise 1
+ server 127.0.12.127 127.0.12.127:8080 check weight 10 inter 5609 fall 2 rise 1
+ server 127.0.12.128 127.0.12.128:8080 check weight 10 inter 9629 fall 2 rise 1
+ server 127.0.12.129 127.0.12.129:8080 check weight 10 inter 8049 fall 2 rise 1
+ server 127.0.12.130 127.0.12.130:8080 check weight 10 inter 6087 fall 2 rise 1
+ server 127.0.12.131 127.0.12.131:8080 check weight 10 inter 7944 fall 2 rise 1
+ server 127.0.12.132 127.0.12.132:8080 check weight 10 inter 5090 fall 2 rise 1
+ server 127.0.12.133 127.0.12.133:8080 check weight 10 inter 9188 fall 2 rise 1
+ server 127.0.12.134 127.0.12.134:8080 check weight 10 inter 1763 fall 2 rise 1
+ server 127.0.12.135 127.0.12.135:8080 check weight 10 inter 8703 fall 2 rise 1
+ server 127.0.12.136 127.0.12.136:8080 check weight 10 inter 5571 fall 2 rise 1
+ server 127.0.12.137 127.0.12.137:8080 check weight 10 inter 2100 fall 2 rise 1
+ server 127.0.12.138 127.0.12.138:8080 check weight 10 inter 1722 fall 2 rise 1
+ server 127.0.12.139 127.0.12.139:8080 check weight 10 inter 6453 fall 2 rise 1
+ server 127.0.12.140 127.0.12.140:8080 check weight 10 inter 1499 fall 2 rise 1
+ server 127.0.12.141 127.0.12.141:8080 check weight 10 inter 767 fall 2 rise 1
+ server 127.0.12.142 127.0.12.142:8080 check weight 10 inter 7992 fall 2 rise 1
+ server 127.0.12.143 127.0.12.143:8080 check weight 10 inter 9201 fall 2 rise 1
+ server 127.0.12.144 127.0.12.144:8080 check weight 10 inter 2816 fall 2 rise 1
+ server 127.0.12.145 127.0.12.145:8080 check weight 10 inter 223 fall 2 rise 1
+ server 127.0.12.146 127.0.12.146:8080 check weight 10 inter 9161 fall 2 rise 1
+ server 127.0.12.147 127.0.12.147:8080 check weight 10 inter 7060 fall 2 rise 1
+ server 127.0.12.148 127.0.12.148:8080 check weight 10 inter 1336 fall 2 rise 1
+ server 127.0.12.149 127.0.12.149:8080 check weight 10 inter 5735 fall 2 rise 1
+ server 127.0.12.150 127.0.12.150:8080 check weight 10 inter 7807 fall 2 rise 1
+ server 127.0.12.151 127.0.12.151:8080 check weight 10 inter 7239 fall 2 rise 1
+ server 127.0.12.152 127.0.12.152:8080 check weight 10 inter 9068 fall 2 rise 1
+ server 127.0.12.153 127.0.12.153:8080 check weight 10 inter 4015 fall 2 rise 1
+ server 127.0.12.154 127.0.12.154:8080 check weight 10 inter 8486 fall 2 rise 1
+ server 127.0.12.155 127.0.12.155:8080 check weight 10 inter 3235 fall 2 rise 1
+ server 127.0.12.156 127.0.12.156:8080 check weight 10 inter 3997 fall 2 rise 1
+ server 127.0.12.157 127.0.12.157:8080 check weight 10 inter 9711 fall 2 rise 1
+ server 127.0.12.158 127.0.12.158:8080 check weight 10 inter 4313 fall 2 rise 1
+ server 127.0.12.159 127.0.12.159:8080 check weight 10 inter 4194 fall 2 rise 1
+ server 127.0.12.160 127.0.12.160:8080 check weight 10 inter 1705 fall 2 rise 1
+ server 127.0.12.161 127.0.12.161:8080 check weight 10 inter 8157 fall 2 rise 1
+ server 127.0.12.162 127.0.12.162:8080 check weight 10 inter 5889 fall 2 rise 1
+ server 127.0.12.163 127.0.12.163:8080 check weight 10 inter 153 fall 2 rise 1
+ server 127.0.12.164 127.0.12.164:8080 check weight 10 inter 476 fall 2 rise 1
+ server 127.0.12.165 127.0.12.165:8080 check weight 10 inter 1463 fall 2 rise 1
+ server 127.0.12.166 127.0.12.166:8080 check weight 10 inter 1872 fall 2 rise 1
+ server 127.0.12.167 127.0.12.167:8080 check weight 10 inter 7426 fall 2 rise 1
+ server 127.0.12.168 127.0.12.168:8080 check weight 10 inter 4438 fall 2 rise 1
+ server 127.0.12.169 127.0.12.169:8080 check weight 10 inter 623 fall 2 rise 1
+ server 127.0.12.170 127.0.12.170:8080 check weight 10 inter 1985 fall 2 rise 1
+ server 127.0.12.171 127.0.12.171:8080 check weight 10 inter 8347 fall 2 rise 1
+ server 127.0.12.172 127.0.12.172:8080 check weight 10 inter 513 fall 2 rise 1
+ server 127.0.12.173 127.0.12.173:8080 check weight 10 inter 9033 fall 2 rise 1
+ server 127.0.12.174 127.0.12.174:8080 check weight 10 inter 2382 fall 2 rise 1
+ server 127.0.12.175 127.0.12.175:8080 check weight 10 inter 6672 fall 2 rise 1
+ server 127.0.12.176 127.0.12.176:8080 check weight 10 inter 8351 fall 2 rise 1
+ server 127.0.12.177 127.0.12.177:8080 check weight 10 inter 7942 fall 2 rise 1
+ server 127.0.12.178 127.0.12.178:8080 check weight 10 inter 3726 fall 2 rise 1
+ server 127.0.12.179 127.0.12.179:8080 check weight 10 inter 713 fall 2 rise 1
+ server 127.0.12.180 127.0.12.180:8080 check weight 10 inter 117 fall 2 rise 1
+ server 127.0.12.181 127.0.12.181:8080 check weight 10 inter 8606 fall 2 rise 1
+ server 127.0.12.182 127.0.12.182:8080 check weight 10 inter 6197 fall 2 rise 1
+ server 127.0.12.183 127.0.12.183:8080 check weight 10 inter 1668 fall 2 rise 1
+ server 127.0.12.184 127.0.12.184:8080 check weight 10 inter 361 fall 2 rise 1
+ server 127.0.12.185 127.0.12.185:8080 check weight 10 inter 2751 fall 2 rise 1
+ server 127.0.12.186 127.0.12.186:8080 check weight 10 inter 8985 fall 2 rise 1
+ server 127.0.12.187 127.0.12.187:8080 check weight 10 inter 9040 fall 2 rise 1
+ server 127.0.12.188 127.0.12.188:8080 check weight 10 inter 1149 fall 2 rise 1
+ server 127.0.12.189 127.0.12.189:8080 check weight 10 inter 9048 fall 2 rise 1
+ server 127.0.12.190 127.0.12.190:8080 check weight 10 inter 3605 fall 2 rise 1
+ server 127.0.12.191 127.0.12.191:8080 check weight 10 inter 350 fall 2 rise 1
+ server 127.0.12.192 127.0.12.192:8080 check weight 10 inter 8841 fall 2 rise 1
+ server 127.0.12.193 127.0.12.193:8080 check weight 10 inter 868 fall 2 rise 1
+ server 127.0.12.194 127.0.12.194:8080 check weight 10 inter 3040 fall 2 rise 1
+ server 127.0.12.195 127.0.12.195:8080 check weight 10 inter 56 fall 2 rise 1
+ server 127.0.12.196 127.0.12.196:8080 check weight 10 inter 26 fall 2 rise 1
+ server 127.0.12.197 127.0.12.197:8080 check weight 10 inter 8743 fall 2 rise 1
+ server 127.0.12.198 127.0.12.198:8080 check weight 10 inter 7563 fall 2 rise 1
+ server 127.0.12.199 127.0.12.199:8080 check weight 10 inter 4446 fall 2 rise 1
+ server 127.0.12.200 127.0.12.200:8080 check weight 10 inter 1116 fall 2 rise 1
+ server 127.0.12.201 127.0.12.201:8080 check weight 10 inter 8954 fall 2 rise 1
+ server 127.0.12.202 127.0.12.202:8080 check weight 10 inter 6045 fall 2 rise 1
+ server 127.0.12.203 127.0.12.203:8080 check weight 10 inter 2069 fall 2 rise 1
+ server 127.0.12.204 127.0.12.204:8080 check weight 10 inter 2505 fall 2 rise 1
+ server 127.0.12.205 127.0.12.205:8080 check weight 10 inter 1942 fall 2 rise 1
+ server 127.0.12.206 127.0.12.206:8080 check weight 10 inter 3583 fall 2 rise 1
+ server 127.0.12.207 127.0.12.207:8080 check weight 10 inter 3326 fall 2 rise 1
+ server 127.0.12.208 127.0.12.208:8080 check weight 10 inter 9198 fall 2 rise 1
+ server 127.0.12.209 127.0.12.209:8080 check weight 10 inter 9554 fall 2 rise 1
+ server 127.0.12.210 127.0.12.210:8080 check weight 10 inter 4710 fall 2 rise 1
+ server 127.0.12.211 127.0.12.211:8080 check weight 10 inter 8984 fall 2 rise 1
+ server 127.0.12.212 127.0.12.212:8080 check weight 10 inter 2498 fall 2 rise 1
+ server 127.0.12.213 127.0.12.213:8080 check weight 10 inter 1245 fall 2 rise 1
+ server 127.0.12.214 127.0.12.214:8080 check weight 10 inter 436 fall 2 rise 1
+ server 127.0.12.215 127.0.12.215:8080 check weight 10 inter 2469 fall 2 rise 1
+ server 127.0.12.216 127.0.12.216:8080 check weight 10 inter 5254 fall 2 rise 1
+ server 127.0.12.217 127.0.12.217:8080 check weight 10 inter 8875 fall 2 rise 1
+ server 127.0.12.218 127.0.12.218:8080 check weight 10 inter 957 fall 2 rise 1
+ server 127.0.12.219 127.0.12.219:8080 check weight 10 inter 4285 fall 2 rise 1
+ server 127.0.12.220 127.0.12.220:8080 check weight 10 inter 6971 fall 2 rise 1
+ server 127.0.12.221 127.0.12.221:8080 check weight 10 inter 223 fall 2 rise 1
+ server 127.0.12.222 127.0.12.222:8080 check weight 10 inter 4617 fall 2 rise 1
+ server 127.0.12.223 127.0.12.223:8080 check weight 10 inter 4212 fall 2 rise 1
+ server 127.0.12.224 127.0.12.224:8080 check weight 10 inter 8518 fall 2 rise 1
+ server 127.0.12.225 127.0.12.225:8080 check weight 10 inter 6396 fall 2 rise 1
+ server 127.0.12.226 127.0.12.226:8080 check weight 10 inter 1567 fall 2 rise 1
+ server 127.0.12.227 127.0.12.227:8080 check weight 10 inter 700 fall 2 rise 1
+ server 127.0.12.228 127.0.12.228:8080 check weight 10 inter 8260 fall 2 rise 1
+ server 127.0.12.229 127.0.12.229:8080 check weight 10 inter 2172 fall 2 rise 1
+ server 127.0.12.230 127.0.12.230:8080 check weight 10 inter 8396 fall 2 rise 1
+ server 127.0.12.231 127.0.12.231:8080 check weight 10 inter 1867 fall 2 rise 1
+ server 127.0.12.232 127.0.12.232:8080 check weight 10 inter 1502 fall 2 rise 1
+ server 127.0.12.233 127.0.12.233:8080 check weight 10 inter 1693 fall 2 rise 1
+ server 127.0.12.234 127.0.12.234:8080 check weight 10 inter 1414 fall 2 rise 1
+ server 127.0.12.235 127.0.12.235:8080 check weight 10 inter 2154 fall 2 rise 1
+ server 127.0.12.236 127.0.12.236:8080 check weight 10 inter 778 fall 2 rise 1
+ server 127.0.12.237 127.0.12.237:8080 check weight 10 inter 78 fall 2 rise 1
+ server 127.0.12.238 127.0.12.238:8080 check weight 10 inter 3316 fall 2 rise 1
+ server 127.0.12.239 127.0.12.239:8080 check weight 10 inter 5043 fall 2 rise 1
+ server 127.0.12.240 127.0.12.240:8080 check weight 10 inter 3175 fall 2 rise 1
+ server 127.0.12.241 127.0.12.241:8080 check weight 10 inter 1389 fall 2 rise 1
+ server 127.0.12.242 127.0.12.242:8080 check weight 10 inter 8761 fall 2 rise 1
+ server 127.0.12.243 127.0.12.243:8080 check weight 10 inter 9704 fall 2 rise 1
+ server 127.0.12.244 127.0.12.244:8080 check weight 10 inter 7911 fall 2 rise 1
+ server 127.0.12.245 127.0.12.245:8080 check weight 10 inter 9950 fall 2 rise 1
+ server 127.0.12.246 127.0.12.246:8080 check weight 10 inter 7284 fall 2 rise 1
+ server 127.0.12.247 127.0.12.247:8080 check weight 10 inter 5571 fall 2 rise 1
+ server 127.0.12.248 127.0.12.248:8080 check weight 10 inter 9130 fall 2 rise 1
+ server 127.0.12.249 127.0.12.249:8080 check weight 10 inter 9483 fall 2 rise 1
+ server 127.0.12.250 127.0.12.250:8080 check weight 10 inter 921 fall 2 rise 1
+ server 127.0.13.001 127.0.13.1:8080 check weight 10 inter 7441 fall 2 rise 1
+ server 127.0.13.002 127.0.13.2:8080 check weight 10 inter 6062 fall 2 rise 1
+ server 127.0.13.003 127.0.13.3:8080 check weight 10 inter 2590 fall 2 rise 1
+ server 127.0.13.004 127.0.13.4:8080 check weight 10 inter 957 fall 2 rise 1
+ server 127.0.13.005 127.0.13.5:8080 check weight 10 inter 4637 fall 2 rise 1
+ server 127.0.13.006 127.0.13.6:8080 check weight 10 inter 7192 fall 2 rise 1
+ server 127.0.13.007 127.0.13.7:8080 check weight 10 inter 4415 fall 2 rise 1
+ server 127.0.13.008 127.0.13.8:8080 check weight 10 inter 7971 fall 2 rise 1
+ server 127.0.13.009 127.0.13.9:8080 check weight 10 inter 1934 fall 2 rise 1
+ server 127.0.13.010 127.0.13.10:8080 check weight 10 inter 5057 fall 2 rise 1
+ server 127.0.13.011 127.0.13.11:8080 check weight 10 inter 5966 fall 2 rise 1
+ server 127.0.13.012 127.0.13.12:8080 check weight 10 inter 8330 fall 2 rise 1
+ server 127.0.13.013 127.0.13.13:8080 check weight 10 inter 2208 fall 2 rise 1
+ server 127.0.13.014 127.0.13.14:8080 check weight 10 inter 4930 fall 2 rise 1
+ server 127.0.13.015 127.0.13.15:8080 check weight 10 inter 5664 fall 2 rise 1
+ server 127.0.13.016 127.0.13.16:8080 check weight 10 inter 4691 fall 2 rise 1
+ server 127.0.13.017 127.0.13.17:8080 check weight 10 inter 1635 fall 2 rise 1
+ server 127.0.13.018 127.0.13.18:8080 check weight 10 inter 9533 fall 2 rise 1
+ server 127.0.13.019 127.0.13.19:8080 check weight 10 inter 4330 fall 2 rise 1
+ server 127.0.13.020 127.0.13.20:8080 check weight 10 inter 468 fall 2 rise 1
+ server 127.0.13.021 127.0.13.21:8080 check weight 10 inter 9814 fall 2 rise 1
+ server 127.0.13.022 127.0.13.22:8080 check weight 10 inter 5377 fall 2 rise 1
+ server 127.0.13.023 127.0.13.23:8080 check weight 10 inter 6462 fall 2 rise 1
+ server 127.0.13.024 127.0.13.24:8080 check weight 10 inter 542 fall 2 rise 1
+ server 127.0.13.025 127.0.13.25:8080 check weight 10 inter 9202 fall 2 rise 1
+ server 127.0.13.026 127.0.13.26:8080 check weight 10 inter 6683 fall 2 rise 1
+ server 127.0.13.027 127.0.13.27:8080 check weight 10 inter 4088 fall 2 rise 1
+ server 127.0.13.028 127.0.13.28:8080 check weight 10 inter 5133 fall 2 rise 1
+ server 127.0.13.029 127.0.13.29:8080 check weight 10 inter 1782 fall 2 rise 1
+ server 127.0.13.030 127.0.13.30:8080 check weight 10 inter 812 fall 2 rise 1
+ server 127.0.13.031 127.0.13.31:8080 check weight 10 inter 8359 fall 2 rise 1
+ server 127.0.13.032 127.0.13.32:8080 check weight 10 inter 21 fall 2 rise 1
+ server 127.0.13.033 127.0.13.33:8080 check weight 10 inter 4405 fall 2 rise 1
+ server 127.0.13.034 127.0.13.34:8080 check weight 10 inter 1151 fall 2 rise 1
+ server 127.0.13.035 127.0.13.35:8080 check weight 10 inter 1340 fall 2 rise 1
+ server 127.0.13.036 127.0.13.36:8080 check weight 10 inter 2327 fall 2 rise 1
+ server 127.0.13.037 127.0.13.37:8080 check weight 10 inter 1521 fall 2 rise 1
+ server 127.0.13.038 127.0.13.38:8080 check weight 10 inter 7527 fall 2 rise 1
+ server 127.0.13.039 127.0.13.39:8080 check weight 10 inter 5748 fall 2 rise 1
+ server 127.0.13.040 127.0.13.40:8080 check weight 10 inter 614 fall 2 rise 1
+ server 127.0.13.041 127.0.13.41:8080 check weight 10 inter 2102 fall 2 rise 1
+ server 127.0.13.042 127.0.13.42:8080 check weight 10 inter 123 fall 2 rise 1
+ server 127.0.13.043 127.0.13.43:8080 check weight 10 inter 182 fall 2 rise 1
+ server 127.0.13.044 127.0.13.44:8080 check weight 10 inter 8493 fall 2 rise 1
+ server 127.0.13.045 127.0.13.45:8080 check weight 10 inter 7334 fall 2 rise 1
+ server 127.0.13.046 127.0.13.46:8080 check weight 10 inter 2481 fall 2 rise 1
+ server 127.0.13.047 127.0.13.47:8080 check weight 10 inter 1611 fall 2 rise 1
+ server 127.0.13.048 127.0.13.48:8080 check weight 10 inter 493 fall 2 rise 1
+ server 127.0.13.049 127.0.13.49:8080 check weight 10 inter 3112 fall 2 rise 1
+ server 127.0.13.050 127.0.13.50:8080 check weight 10 inter 27 fall 2 rise 1
+ server 127.0.13.051 127.0.13.51:8080 check weight 10 inter 1424 fall 2 rise 1
+ server 127.0.13.052 127.0.13.52:8080 check weight 10 inter 9399 fall 2 rise 1
+ server 127.0.13.053 127.0.13.53:8080 check weight 10 inter 512 fall 2 rise 1
+ server 127.0.13.054 127.0.13.54:8080 check weight 10 inter 1571 fall 2 rise 1
+ server 127.0.13.055 127.0.13.55:8080 check weight 10 inter 1709 fall 2 rise 1
+ server 127.0.13.056 127.0.13.56:8080 check weight 10 inter 1037 fall 2 rise 1
+ server 127.0.13.057 127.0.13.57:8080 check weight 10 inter 7837 fall 2 rise 1
+ server 127.0.13.058 127.0.13.58:8080 check weight 10 inter 664 fall 2 rise 1
+ server 127.0.13.059 127.0.13.59:8080 check weight 10 inter 4403 fall 2 rise 1
+ server 127.0.13.060 127.0.13.60:8080 check weight 10 inter 7849 fall 2 rise 1
+ server 127.0.13.061 127.0.13.61:8080 check weight 10 inter 9557 fall 2 rise 1
+ server 127.0.13.062 127.0.13.62:8080 check weight 10 inter 8102 fall 2 rise 1
+ server 127.0.13.063 127.0.13.63:8080 check weight 10 inter 3207 fall 2 rise 1
+ server 127.0.13.064 127.0.13.64:8080 check weight 10 inter 9114 fall 2 rise 1
+ server 127.0.13.065 127.0.13.65:8080 check weight 10 inter 641 fall 2 rise 1
+ server 127.0.13.066 127.0.13.66:8080 check weight 10 inter 7559 fall 2 rise 1
+ server 127.0.13.067 127.0.13.67:8080 check weight 10 inter 1969 fall 2 rise 1
+ server 127.0.13.068 127.0.13.68:8080 check weight 10 inter 3906 fall 2 rise 1
+ server 127.0.13.069 127.0.13.69:8080 check weight 10 inter 711 fall 2 rise 1
+ server 127.0.13.070 127.0.13.70:8080 check weight 10 inter 6699 fall 2 rise 1
+ server 127.0.13.071 127.0.13.71:8080 check weight 10 inter 8979 fall 2 rise 1
+ server 127.0.13.072 127.0.13.72:8080 check weight 10 inter 336 fall 2 rise 1
+ server 127.0.13.073 127.0.13.73:8080 check weight 10 inter 5275 fall 2 rise 1
+ server 127.0.13.074 127.0.13.74:8080 check weight 10 inter 8521 fall 2 rise 1
+ server 127.0.13.075 127.0.13.75:8080 check weight 10 inter 7946 fall 2 rise 1
+ server 127.0.13.076 127.0.13.76:8080 check weight 10 inter 4414 fall 2 rise 1
+ server 127.0.13.077 127.0.13.77:8080 check weight 10 inter 6757 fall 2 rise 1
+ server 127.0.13.078 127.0.13.78:8080 check weight 10 inter 4211 fall 2 rise 1
+ server 127.0.13.079 127.0.13.79:8080 check weight 10 inter 6966 fall 2 rise 1
+ server 127.0.13.080 127.0.13.80:8080 check weight 10 inter 4219 fall 2 rise 1
+ server 127.0.13.081 127.0.13.81:8080 check weight 10 inter 2145 fall 2 rise 1
+ server 127.0.13.082 127.0.13.82:8080 check weight 10 inter 8874 fall 2 rise 1
+ server 127.0.13.083 127.0.13.83:8080 check weight 10 inter 8394 fall 2 rise 1
+ server 127.0.13.084 127.0.13.84:8080 check weight 10 inter 566 fall 2 rise 1
+ server 127.0.13.085 127.0.13.85:8080 check weight 10 inter 425 fall 2 rise 1
+ server 127.0.13.086 127.0.13.86:8080 check weight 10 inter 2039 fall 2 rise 1
+ server 127.0.13.087 127.0.13.87:8080 check weight 10 inter 3522 fall 2 rise 1
+ server 127.0.13.088 127.0.13.88:8080 check weight 10 inter 2015 fall 2 rise 1
+ server 127.0.13.089 127.0.13.89:8080 check weight 10 inter 1698 fall 2 rise 1
+ server 127.0.13.090 127.0.13.90:8080 check weight 10 inter 8132 fall 2 rise 1
+ server 127.0.13.091 127.0.13.91:8080 check weight 10 inter 3639 fall 2 rise 1
+ server 127.0.13.092 127.0.13.92:8080 check weight 10 inter 5389 fall 2 rise 1
+ server 127.0.13.093 127.0.13.93:8080 check weight 10 inter 9465 fall 2 rise 1
+ server 127.0.13.094 127.0.13.94:8080 check weight 10 inter 6078 fall 2 rise 1
+ server 127.0.13.095 127.0.13.95:8080 check weight 10 inter 3956 fall 2 rise 1
+ server 127.0.13.096 127.0.13.96:8080 check weight 10 inter 5375 fall 2 rise 1
+ server 127.0.13.097 127.0.13.97:8080 check weight 10 inter 7340 fall 2 rise 1
+ server 127.0.13.098 127.0.13.98:8080 check weight 10 inter 3196 fall 2 rise 1
+ server 127.0.13.099 127.0.13.99:8080 check weight 10 inter 3045 fall 2 rise 1
+ server 127.0.13.100 127.0.13.100:8080 check weight 10 inter 9731 fall 2 rise 1
+ server 127.0.13.101 127.0.13.101:8080 check weight 10 inter 168 fall 2 rise 1
+ server 127.0.13.102 127.0.13.102:8080 check weight 10 inter 5324 fall 2 rise 1
+ server 127.0.13.103 127.0.13.103:8080 check weight 10 inter 3845 fall 2 rise 1
+ server 127.0.13.104 127.0.13.104:8080 check weight 10 inter 7064 fall 2 rise 1
+ server 127.0.13.105 127.0.13.105:8080 check weight 10 inter 2326 fall 2 rise 1
+ server 127.0.13.106 127.0.13.106:8080 check weight 10 inter 674 fall 2 rise 1
+ server 127.0.13.107 127.0.13.107:8080 check weight 10 inter 2692 fall 2 rise 1
+ server 127.0.13.108 127.0.13.108:8080 check weight 10 inter 1556 fall 2 rise 1
+ server 127.0.13.109 127.0.13.109:8080 check weight 10 inter 4868 fall 2 rise 1
+ server 127.0.13.110 127.0.13.110:8080 check weight 10 inter 1920 fall 2 rise 1
+ server 127.0.13.111 127.0.13.111:8080 check weight 10 inter 8618 fall 2 rise 1
+ server 127.0.13.112 127.0.13.112:8080 check weight 10 inter 4821 fall 2 rise 1
+ server 127.0.13.113 127.0.13.113:8080 check weight 10 inter 2789 fall 2 rise 1
+ server 127.0.13.114 127.0.13.114:8080 check weight 10 inter 8051 fall 2 rise 1
+ server 127.0.13.115 127.0.13.115:8080 check weight 10 inter 8111 fall 2 rise 1
+ server 127.0.13.116 127.0.13.116:8080 check weight 10 inter 4360 fall 2 rise 1
+ server 127.0.13.117 127.0.13.117:8080 check weight 10 inter 9416 fall 2 rise 1
+ server 127.0.13.118 127.0.13.118:8080 check weight 10 inter 3267 fall 2 rise 1
+ server 127.0.13.119 127.0.13.119:8080 check weight 10 inter 518 fall 2 rise 1
+ server 127.0.13.120 127.0.13.120:8080 check weight 10 inter 6027 fall 2 rise 1
+ server 127.0.13.121 127.0.13.121:8080 check weight 10 inter 8137 fall 2 rise 1
+ server 127.0.13.122 127.0.13.122:8080 check weight 10 inter 1657 fall 2 rise 1
+ server 127.0.13.123 127.0.13.123:8080 check weight 10 inter 5916 fall 2 rise 1
+ server 127.0.13.124 127.0.13.124:8080 check weight 10 inter 3187 fall 2 rise 1
+ server 127.0.13.125 127.0.13.125:8080 check weight 10 inter 9722 fall 2 rise 1
+ server 127.0.13.126 127.0.13.126:8080 check weight 10 inter 494 fall 2 rise 1
+ server 127.0.13.127 127.0.13.127:8080 check weight 10 inter 7221 fall 2 rise 1
+ server 127.0.13.128 127.0.13.128:8080 check weight 10 inter 2300 fall 2 rise 1
+ server 127.0.13.129 127.0.13.129:8080 check weight 10 inter 4191 fall 2 rise 1
+ server 127.0.13.130 127.0.13.130:8080 check weight 10 inter 6871 fall 2 rise 1
+ server 127.0.13.131 127.0.13.131:8080 check weight 10 inter 1745 fall 2 rise 1
+ server 127.0.13.132 127.0.13.132:8080 check weight 10 inter 9316 fall 2 rise 1
+ server 127.0.13.133 127.0.13.133:8080 check weight 10 inter 7196 fall 2 rise 1
+ server 127.0.13.134 127.0.13.134:8080 check weight 10 inter 5762 fall 2 rise 1
+ server 127.0.13.135 127.0.13.135:8080 check weight 10 inter 9536 fall 2 rise 1
+ server 127.0.13.136 127.0.13.136:8080 check weight 10 inter 5975 fall 2 rise 1
+ server 127.0.13.137 127.0.13.137:8080 check weight 10 inter 2623 fall 2 rise 1
+ server 127.0.13.138 127.0.13.138:8080 check weight 10 inter 9600 fall 2 rise 1
+ server 127.0.13.139 127.0.13.139:8080 check weight 10 inter 5387 fall 2 rise 1
+ server 127.0.13.140 127.0.13.140:8080 check weight 10 inter 7016 fall 2 rise 1
+ server 127.0.13.141 127.0.13.141:8080 check weight 10 inter 3709 fall 2 rise 1
+ server 127.0.13.142 127.0.13.142:8080 check weight 10 inter 8080 fall 2 rise 1
+ server 127.0.13.143 127.0.13.143:8080 check weight 10 inter 2910 fall 2 rise 1
+ server 127.0.13.144 127.0.13.144:8080 check weight 10 inter 2036 fall 2 rise 1
+ server 127.0.13.145 127.0.13.145:8080 check weight 10 inter 3133 fall 2 rise 1
+ server 127.0.13.146 127.0.13.146:8080 check weight 10 inter 4895 fall 2 rise 1
+ server 127.0.13.147 127.0.13.147:8080 check weight 10 inter 3751 fall 2 rise 1
+ server 127.0.13.148 127.0.13.148:8080 check weight 10 inter 1110 fall 2 rise 1
+ server 127.0.13.149 127.0.13.149:8080 check weight 10 inter 534 fall 2 rise 1
+ server 127.0.13.150 127.0.13.150:8080 check weight 10 inter 3489 fall 2 rise 1
+ server 127.0.13.151 127.0.13.151:8080 check weight 10 inter 5231 fall 2 rise 1
+ server 127.0.13.152 127.0.13.152:8080 check weight 10 inter 3642 fall 2 rise 1
+ server 127.0.13.153 127.0.13.153:8080 check weight 10 inter 8538 fall 2 rise 1
+ server 127.0.13.154 127.0.13.154:8080 check weight 10 inter 1933 fall 2 rise 1
+ server 127.0.13.155 127.0.13.155:8080 check weight 10 inter 123 fall 2 rise 1
+ server 127.0.13.156 127.0.13.156:8080 check weight 10 inter 8306 fall 2 rise 1
+ server 127.0.13.157 127.0.13.157:8080 check weight 10 inter 3423 fall 2 rise 1
+ server 127.0.13.158 127.0.13.158:8080 check weight 10 inter 8844 fall 2 rise 1
+ server 127.0.13.159 127.0.13.159:8080 check weight 10 inter 3537 fall 2 rise 1
+ server 127.0.13.160 127.0.13.160:8080 check weight 10 inter 8634 fall 2 rise 1
+ server 127.0.13.161 127.0.13.161:8080 check weight 10 inter 4914 fall 2 rise 1
+ server 127.0.13.162 127.0.13.162:8080 check weight 10 inter 1680 fall 2 rise 1
+ server 127.0.13.163 127.0.13.163:8080 check weight 10 inter 7243 fall 2 rise 1
+ server 127.0.13.164 127.0.13.164:8080 check weight 10 inter 8269 fall 2 rise 1
+ server 127.0.13.165 127.0.13.165:8080 check weight 10 inter 5050 fall 2 rise 1
+ server 127.0.13.166 127.0.13.166:8080 check weight 10 inter 2117 fall 2 rise 1
+ server 127.0.13.167 127.0.13.167:8080 check weight 10 inter 1934 fall 2 rise 1
+ server 127.0.13.168 127.0.13.168:8080 check weight 10 inter 8563 fall 2 rise 1
+ server 127.0.13.169 127.0.13.169:8080 check weight 10 inter 1261 fall 2 rise 1
+ server 127.0.13.170 127.0.13.170:8080 check weight 10 inter 905 fall 2 rise 1
+ server 127.0.13.171 127.0.13.171:8080 check weight 10 inter 3911 fall 2 rise 1
+ server 127.0.13.172 127.0.13.172:8080 check weight 10 inter 7825 fall 2 rise 1
+ server 127.0.13.173 127.0.13.173:8080 check weight 10 inter 7429 fall 2 rise 1
+ server 127.0.13.174 127.0.13.174:8080 check weight 10 inter 5210 fall 2 rise 1
+ server 127.0.13.175 127.0.13.175:8080 check weight 10 inter 2874 fall 2 rise 1
+ server 127.0.13.176 127.0.13.176:8080 check weight 10 inter 9151 fall 2 rise 1
+ server 127.0.13.177 127.0.13.177:8080 check weight 10 inter 2529 fall 2 rise 1
+ server 127.0.13.178 127.0.13.178:8080 check weight 10 inter 9332 fall 2 rise 1
+ server 127.0.13.179 127.0.13.179:8080 check weight 10 inter 1562 fall 2 rise 1
+ server 127.0.13.180 127.0.13.180:8080 check weight 10 inter 2775 fall 2 rise 1
+ server 127.0.13.181 127.0.13.181:8080 check weight 10 inter 1707 fall 2 rise 1
+ server 127.0.13.182 127.0.13.182:8080 check weight 10 inter 9911 fall 2 rise 1
+ server 127.0.13.183 127.0.13.183:8080 check weight 10 inter 9655 fall 2 rise 1
+ server 127.0.13.184 127.0.13.184:8080 check weight 10 inter 4515 fall 2 rise 1
+ server 127.0.13.185 127.0.13.185:8080 check weight 10 inter 12 fall 2 rise 1
+ server 127.0.13.186 127.0.13.186:8080 check weight 10 inter 5018 fall 2 rise 1
+ server 127.0.13.187 127.0.13.187:8080 check weight 10 inter 6523 fall 2 rise 1
+ server 127.0.13.188 127.0.13.188:8080 check weight 10 inter 2211 fall 2 rise 1
+ server 127.0.13.189 127.0.13.189:8080 check weight 10 inter 8546 fall 2 rise 1
+ server 127.0.13.190 127.0.13.190:8080 check weight 10 inter 7601 fall 2 rise 1
+ server 127.0.13.191 127.0.13.191:8080 check weight 10 inter 8435 fall 2 rise 1
+ server 127.0.13.192 127.0.13.192:8080 check weight 10 inter 3905 fall 2 rise 1
+ server 127.0.13.193 127.0.13.193:8080 check weight 10 inter 4492 fall 2 rise 1
+ server 127.0.13.194 127.0.13.194:8080 check weight 10 inter 8848 fall 2 rise 1
+ server 127.0.13.195 127.0.13.195:8080 check weight 10 inter 93 fall 2 rise 1
+ server 127.0.13.196 127.0.13.196:8080 check weight 10 inter 7669 fall 2 rise 1
+ server 127.0.13.197 127.0.13.197:8080 check weight 10 inter 6331 fall 2 rise 1
+ server 127.0.13.198 127.0.13.198:8080 check weight 10 inter 1933 fall 2 rise 1
+ server 127.0.13.199 127.0.13.199:8080 check weight 10 inter 9142 fall 2 rise 1
+ server 127.0.13.200 127.0.13.200:8080 check weight 10 inter 7111 fall 2 rise 1
+ server 127.0.13.201 127.0.13.201:8080 check weight 10 inter 2905 fall 2 rise 1
+ server 127.0.13.202 127.0.13.202:8080 check weight 10 inter 1094 fall 2 rise 1
+ server 127.0.13.203 127.0.13.203:8080 check weight 10 inter 5377 fall 2 rise 1
+ server 127.0.13.204 127.0.13.204:8080 check weight 10 inter 5416 fall 2 rise 1
+ server 127.0.13.205 127.0.13.205:8080 check weight 10 inter 7831 fall 2 rise 1
+ server 127.0.13.206 127.0.13.206:8080 check weight 10 inter 9272 fall 2 rise 1
+ server 127.0.13.207 127.0.13.207:8080 check weight 10 inter 8230 fall 2 rise 1
+ server 127.0.13.208 127.0.13.208:8080 check weight 10 inter 7471 fall 2 rise 1
+ server 127.0.13.209 127.0.13.209:8080 check weight 10 inter 8824 fall 2 rise 1
+ server 127.0.13.210 127.0.13.210:8080 check weight 10 inter 3883 fall 2 rise 1
+ server 127.0.13.211 127.0.13.211:8080 check weight 10 inter 864 fall 2 rise 1
+ server 127.0.13.212 127.0.13.212:8080 check weight 10 inter 1252 fall 2 rise 1
+ server 127.0.13.213 127.0.13.213:8080 check weight 10 inter 206 fall 2 rise 1
+ server 127.0.13.214 127.0.13.214:8080 check weight 10 inter 414 fall 2 rise 1
+ server 127.0.13.215 127.0.13.215:8080 check weight 10 inter 6933 fall 2 rise 1
+ server 127.0.13.216 127.0.13.216:8080 check weight 10 inter 782 fall 2 rise 1
+ server 127.0.13.217 127.0.13.217:8080 check weight 10 inter 3704 fall 2 rise 1
+ server 127.0.13.218 127.0.13.218:8080 check weight 10 inter 9558 fall 2 rise 1
+ server 127.0.13.219 127.0.13.219:8080 check weight 10 inter 2916 fall 2 rise 1
+ server 127.0.13.220 127.0.13.220:8080 check weight 10 inter 1838 fall 2 rise 1
+ server 127.0.13.221 127.0.13.221:8080 check weight 10 inter 7944 fall 2 rise 1
+ server 127.0.13.222 127.0.13.222:8080 check weight 10 inter 4982 fall 2 rise 1
+ server 127.0.13.223 127.0.13.223:8080 check weight 10 inter 8592 fall 2 rise 1
+ server 127.0.13.224 127.0.13.224:8080 check weight 10 inter 3960 fall 2 rise 1
+ server 127.0.13.225 127.0.13.225:8080 check weight 10 inter 3638 fall 2 rise 1
+ server 127.0.13.226 127.0.13.226:8080 check weight 10 inter 493 fall 2 rise 1
+ server 127.0.13.227 127.0.13.227:8080 check weight 10 inter 3918 fall 2 rise 1
+ server 127.0.13.228 127.0.13.228:8080 check weight 10 inter 228 fall 2 rise 1
+ server 127.0.13.229 127.0.13.229:8080 check weight 10 inter 8519 fall 2 rise 1
+ server 127.0.13.230 127.0.13.230:8080 check weight 10 inter 1730 fall 2 rise 1
+ server 127.0.13.231 127.0.13.231:8080 check weight 10 inter 416 fall 2 rise 1
+ server 127.0.13.232 127.0.13.232:8080 check weight 10 inter 1912 fall 2 rise 1
+ server 127.0.13.233 127.0.13.233:8080 check weight 10 inter 2411 fall 2 rise 1
+ server 127.0.13.234 127.0.13.234:8080 check weight 10 inter 3728 fall 2 rise 1
+ server 127.0.13.235 127.0.13.235:8080 check weight 10 inter 5503 fall 2 rise 1
+ server 127.0.13.236 127.0.13.236:8080 check weight 10 inter 3924 fall 2 rise 1
+ server 127.0.13.237 127.0.13.237:8080 check weight 10 inter 1593 fall 2 rise 1
+ server 127.0.13.238 127.0.13.238:8080 check weight 10 inter 2433 fall 2 rise 1
+ server 127.0.13.239 127.0.13.239:8080 check weight 10 inter 8155 fall 2 rise 1
+ server 127.0.13.240 127.0.13.240:8080 check weight 10 inter 1851 fall 2 rise 1
+ server 127.0.13.241 127.0.13.241:8080 check weight 10 inter 8363 fall 2 rise 1
+ server 127.0.13.242 127.0.13.242:8080 check weight 10 inter 9196 fall 2 rise 1
+ server 127.0.13.243 127.0.13.243:8080 check weight 10 inter 1282 fall 2 rise 1
+ server 127.0.13.244 127.0.13.244:8080 check weight 10 inter 5604 fall 2 rise 1
+ server 127.0.13.245 127.0.13.245:8080 check weight 10 inter 953 fall 2 rise 1
+ server 127.0.13.246 127.0.13.246:8080 check weight 10 inter 8462 fall 2 rise 1
+ server 127.0.13.247 127.0.13.247:8080 check weight 10 inter 737 fall 2 rise 1
+ server 127.0.13.248 127.0.13.248:8080 check weight 10 inter 6436 fall 2 rise 1
+ server 127.0.13.249 127.0.13.249:8080 check weight 10 inter 9254 fall 2 rise 1
+ server 127.0.13.250 127.0.13.250:8080 check weight 10 inter 9562 fall 2 rise 1
+ server 127.0.14.001 127.0.14.1:8080 check weight 10 inter 7406 fall 2 rise 1
+ server 127.0.14.002 127.0.14.2:8080 check weight 10 inter 217 fall 2 rise 1
+ server 127.0.14.003 127.0.14.3:8080 check weight 10 inter 2124 fall 2 rise 1
+ server 127.0.14.004 127.0.14.4:8080 check weight 10 inter 2641 fall 2 rise 1
+ server 127.0.14.005 127.0.14.5:8080 check weight 10 inter 2913 fall 2 rise 1
+ server 127.0.14.006 127.0.14.6:8080 check weight 10 inter 2663 fall 2 rise 1
+ server 127.0.14.007 127.0.14.7:8080 check weight 10 inter 7096 fall 2 rise 1
+ server 127.0.14.008 127.0.14.8:8080 check weight 10 inter 256 fall 2 rise 1
+ server 127.0.14.009 127.0.14.9:8080 check weight 10 inter 9878 fall 2 rise 1
+ server 127.0.14.010 127.0.14.10:8080 check weight 10 inter 8595 fall 2 rise 1
+ server 127.0.14.011 127.0.14.11:8080 check weight 10 inter 3974 fall 2 rise 1
+ server 127.0.14.012 127.0.14.12:8080 check weight 10 inter 8075 fall 2 rise 1
+ server 127.0.14.013 127.0.14.13:8080 check weight 10 inter 7236 fall 2 rise 1
+ server 127.0.14.014 127.0.14.14:8080 check weight 10 inter 9585 fall 2 rise 1
+ server 127.0.14.015 127.0.14.15:8080 check weight 10 inter 4203 fall 2 rise 1
+ server 127.0.14.016 127.0.14.16:8080 check weight 10 inter 493 fall 2 rise 1
+ server 127.0.14.017 127.0.14.17:8080 check weight 10 inter 3083 fall 2 rise 1
+ server 127.0.14.018 127.0.14.18:8080 check weight 10 inter 6349 fall 2 rise 1
+ server 127.0.14.019 127.0.14.19:8080 check weight 10 inter 7508 fall 2 rise 1
+ server 127.0.14.020 127.0.14.20:8080 check weight 10 inter 1373 fall 2 rise 1
+ server 127.0.14.021 127.0.14.21:8080 check weight 10 inter 8511 fall 2 rise 1
+ server 127.0.14.022 127.0.14.22:8080 check weight 10 inter 5163 fall 2 rise 1
+ server 127.0.14.023 127.0.14.23:8080 check weight 10 inter 1521 fall 2 rise 1
+ server 127.0.14.024 127.0.14.24:8080 check weight 10 inter 2975 fall 2 rise 1
+ server 127.0.14.025 127.0.14.25:8080 check weight 10 inter 1047 fall 2 rise 1
+ server 127.0.14.026 127.0.14.26:8080 check weight 10 inter 9296 fall 2 rise 1
+ server 127.0.14.027 127.0.14.27:8080 check weight 10 inter 9281 fall 2 rise 1
+ server 127.0.14.028 127.0.14.28:8080 check weight 10 inter 9887 fall 2 rise 1
+ server 127.0.14.029 127.0.14.29:8080 check weight 10 inter 356 fall 2 rise 1
+ server 127.0.14.030 127.0.14.30:8080 check weight 10 inter 717 fall 2 rise 1
+ server 127.0.14.031 127.0.14.31:8080 check weight 10 inter 4797 fall 2 rise 1
+ server 127.0.14.032 127.0.14.32:8080 check weight 10 inter 8862 fall 2 rise 1
+ server 127.0.14.033 127.0.14.33:8080 check weight 10 inter 9473 fall 2 rise 1
+ server 127.0.14.034 127.0.14.34:8080 check weight 10 inter 3086 fall 2 rise 1
+ server 127.0.14.035 127.0.14.35:8080 check weight 10 inter 932 fall 2 rise 1
+ server 127.0.14.036 127.0.14.36:8080 check weight 10 inter 9515 fall 2 rise 1
+ server 127.0.14.037 127.0.14.37:8080 check weight 10 inter 6991 fall 2 rise 1
+ server 127.0.14.038 127.0.14.38:8080 check weight 10 inter 199 fall 2 rise 1
+ server 127.0.14.039 127.0.14.39:8080 check weight 10 inter 8106 fall 2 rise 1
+ server 127.0.14.040 127.0.14.40:8080 check weight 10 inter 5440 fall 2 rise 1
+ server 127.0.14.041 127.0.14.41:8080 check weight 10 inter 2206 fall 2 rise 1
+ server 127.0.14.042 127.0.14.42:8080 check weight 10 inter 7049 fall 2 rise 1
+ server 127.0.14.043 127.0.14.43:8080 check weight 10 inter 2703 fall 2 rise 1
+ server 127.0.14.044 127.0.14.44:8080 check weight 10 inter 9906 fall 2 rise 1
+ server 127.0.14.045 127.0.14.45:8080 check weight 10 inter 7771 fall 2 rise 1
+ server 127.0.14.046 127.0.14.46:8080 check weight 10 inter 4759 fall 2 rise 1
+ server 127.0.14.047 127.0.14.47:8080 check weight 10 inter 2896 fall 2 rise 1
+ server 127.0.14.048 127.0.14.48:8080 check weight 10 inter 134 fall 2 rise 1
+ server 127.0.14.049 127.0.14.49:8080 check weight 10 inter 2748 fall 2 rise 1
+ server 127.0.14.050 127.0.14.50:8080 check weight 10 inter 5628 fall 2 rise 1
+ server 127.0.14.051 127.0.14.51:8080 check weight 10 inter 4784 fall 2 rise 1
+ server 127.0.14.052 127.0.14.52:8080 check weight 10 inter 8979 fall 2 rise 1
+ server 127.0.14.053 127.0.14.53:8080 check weight 10 inter 4620 fall 2 rise 1
+ server 127.0.14.054 127.0.14.54:8080 check weight 10 inter 9368 fall 2 rise 1
+ server 127.0.14.055 127.0.14.55:8080 check weight 10 inter 9940 fall 2 rise 1
+ server 127.0.14.056 127.0.14.56:8080 check weight 10 inter 6588 fall 2 rise 1
+ server 127.0.14.057 127.0.14.57:8080 check weight 10 inter 8695 fall 2 rise 1
+ server 127.0.14.058 127.0.14.58:8080 check weight 10 inter 191 fall 2 rise 1
+ server 127.0.14.059 127.0.14.59:8080 check weight 10 inter 2641 fall 2 rise 1
+ server 127.0.14.060 127.0.14.60:8080 check weight 10 inter 7802 fall 2 rise 1
+ server 127.0.14.061 127.0.14.61:8080 check weight 10 inter 68 fall 2 rise 1
+ server 127.0.14.062 127.0.14.62:8080 check weight 10 inter 880 fall 2 rise 1
+ server 127.0.14.063 127.0.14.63:8080 check weight 10 inter 9734 fall 2 rise 1
+ server 127.0.14.064 127.0.14.64:8080 check weight 10 inter 2186 fall 2 rise 1
+ server 127.0.14.065 127.0.14.65:8080 check weight 10 inter 3366 fall 2 rise 1
+ server 127.0.14.066 127.0.14.66:8080 check weight 10 inter 581 fall 2 rise 1
+ server 127.0.14.067 127.0.14.67:8080 check weight 10 inter 169 fall 2 rise 1
+ server 127.0.14.068 127.0.14.68:8080 check weight 10 inter 2444 fall 2 rise 1
+ server 127.0.14.069 127.0.14.69:8080 check weight 10 inter 3105 fall 2 rise 1
+ server 127.0.14.070 127.0.14.70:8080 check weight 10 inter 9231 fall 2 rise 1
+ server 127.0.14.071 127.0.14.71:8080 check weight 10 inter 1171 fall 2 rise 1
+ server 127.0.14.072 127.0.14.72:8080 check weight 10 inter 3175 fall 2 rise 1
+ server 127.0.14.073 127.0.14.73:8080 check weight 10 inter 972 fall 2 rise 1
+ server 127.0.14.074 127.0.14.74:8080 check weight 10 inter 2575 fall 2 rise 1
+ server 127.0.14.075 127.0.14.75:8080 check weight 10 inter 2363 fall 2 rise 1
+ server 127.0.14.076 127.0.14.76:8080 check weight 10 inter 8973 fall 2 rise 1
+ server 127.0.14.077 127.0.14.77:8080 check weight 10 inter 9643 fall 2 rise 1
+ server 127.0.14.078 127.0.14.78:8080 check weight 10 inter 2852 fall 2 rise 1
+ server 127.0.14.079 127.0.14.79:8080 check weight 10 inter 2675 fall 2 rise 1
+ server 127.0.14.080 127.0.14.80:8080 check weight 10 inter 5059 fall 2 rise 1
+ server 127.0.14.081 127.0.14.81:8080 check weight 10 inter 1130 fall 2 rise 1
+ server 127.0.14.082 127.0.14.82:8080 check weight 10 inter 1864 fall 2 rise 1
+ server 127.0.14.083 127.0.14.83:8080 check weight 10 inter 1972 fall 2 rise 1
+ server 127.0.14.084 127.0.14.84:8080 check weight 10 inter 9885 fall 2 rise 1
+ server 127.0.14.085 127.0.14.85:8080 check weight 10 inter 5721 fall 2 rise 1
+ server 127.0.14.086 127.0.14.86:8080 check weight 10 inter 1674 fall 2 rise 1
+ server 127.0.14.087 127.0.14.87:8080 check weight 10 inter 5450 fall 2 rise 1
+ server 127.0.14.088 127.0.14.88:8080 check weight 10 inter 5690 fall 2 rise 1
+ server 127.0.14.089 127.0.14.89:8080 check weight 10 inter 3066 fall 2 rise 1
+ server 127.0.14.090 127.0.14.90:8080 check weight 10 inter 8857 fall 2 rise 1
+ server 127.0.14.091 127.0.14.91:8080 check weight 10 inter 7823 fall 2 rise 1
+ server 127.0.14.092 127.0.14.92:8080 check weight 10 inter 4293 fall 2 rise 1
+ server 127.0.14.093 127.0.14.93:8080 check weight 10 inter 843 fall 2 rise 1
+ server 127.0.14.094 127.0.14.94:8080 check weight 10 inter 3269 fall 2 rise 1
+ server 127.0.14.095 127.0.14.95:8080 check weight 10 inter 7497 fall 2 rise 1
+ server 127.0.14.096 127.0.14.96:8080 check weight 10 inter 1319 fall 2 rise 1
+ server 127.0.14.097 127.0.14.97:8080 check weight 10 inter 9172 fall 2 rise 1
+ server 127.0.14.098 127.0.14.98:8080 check weight 10 inter 5541 fall 2 rise 1
+ server 127.0.14.099 127.0.14.99:8080 check weight 10 inter 9443 fall 2 rise 1
+ server 127.0.14.100 127.0.14.100:8080 check weight 10 inter 5548 fall 2 rise 1
+ server 127.0.14.101 127.0.14.101:8080 check weight 10 inter 223 fall 2 rise 1
+ server 127.0.14.102 127.0.14.102:8080 check weight 10 inter 4280 fall 2 rise 1
+ server 127.0.14.103 127.0.14.103:8080 check weight 10 inter 9292 fall 2 rise 1
+ server 127.0.14.104 127.0.14.104:8080 check weight 10 inter 8716 fall 2 rise 1
+ server 127.0.14.105 127.0.14.105:8080 check weight 10 inter 2090 fall 2 rise 1
+ server 127.0.14.106 127.0.14.106:8080 check weight 10 inter 3788 fall 2 rise 1
+ server 127.0.14.107 127.0.14.107:8080 check weight 10 inter 7185 fall 2 rise 1
+ server 127.0.14.108 127.0.14.108:8080 check weight 10 inter 470 fall 2 rise 1
+ server 127.0.14.109 127.0.14.109:8080 check weight 10 inter 4398 fall 2 rise 1
+ server 127.0.14.110 127.0.14.110:8080 check weight 10 inter 8368 fall 2 rise 1
+ server 127.0.14.111 127.0.14.111:8080 check weight 10 inter 3451 fall 2 rise 1
+ server 127.0.14.112 127.0.14.112:8080 check weight 10 inter 4732 fall 2 rise 1
+ server 127.0.14.113 127.0.14.113:8080 check weight 10 inter 9005 fall 2 rise 1
+ server 127.0.14.114 127.0.14.114:8080 check weight 10 inter 9562 fall 2 rise 1
+ server 127.0.14.115 127.0.14.115:8080 check weight 10 inter 809 fall 2 rise 1
+ server 127.0.14.116 127.0.14.116:8080 check weight 10 inter 2161 fall 2 rise 1
+ server 127.0.14.117 127.0.14.117:8080 check weight 10 inter 510 fall 2 rise 1
+ server 127.0.14.118 127.0.14.118:8080 check weight 10 inter 2840 fall 2 rise 1
+ server 127.0.14.119 127.0.14.119:8080 check weight 10 inter 4988 fall 2 rise 1
+ server 127.0.14.120 127.0.14.120:8080 check weight 10 inter 9603 fall 2 rise 1
+ server 127.0.14.121 127.0.14.121:8080 check weight 10 inter 8840 fall 2 rise 1
+ server 127.0.14.122 127.0.14.122:8080 check weight 10 inter 4727 fall 2 rise 1
+ server 127.0.14.123 127.0.14.123:8080 check weight 10 inter 7572 fall 2 rise 1
+ server 127.0.14.124 127.0.14.124:8080 check weight 10 inter 424 fall 2 rise 1
+ server 127.0.14.125 127.0.14.125:8080 check weight 10 inter 336 fall 2 rise 1
+ server 127.0.14.126 127.0.14.126:8080 check weight 10 inter 2245 fall 2 rise 1
+ server 127.0.14.127 127.0.14.127:8080 check weight 10 inter 13 fall 2 rise 1
+ server 127.0.14.128 127.0.14.128:8080 check weight 10 inter 6171 fall 2 rise 1
+ server 127.0.14.129 127.0.14.129:8080 check weight 10 inter 50 fall 2 rise 1
+ server 127.0.14.130 127.0.14.130:8080 check weight 10 inter 4135 fall 2 rise 1
+ server 127.0.14.131 127.0.14.131:8080 check weight 10 inter 8777 fall 2 rise 1
+ server 127.0.14.132 127.0.14.132:8080 check weight 10 inter 1923 fall 2 rise 1
+ server 127.0.14.133 127.0.14.133:8080 check weight 10 inter 2305 fall 2 rise 1
+ server 127.0.14.134 127.0.14.134:8080 check weight 10 inter 7881 fall 2 rise 1
+ server 127.0.14.135 127.0.14.135:8080 check weight 10 inter 9500 fall 2 rise 1
+ server 127.0.14.136 127.0.14.136:8080 check weight 10 inter 4375 fall 2 rise 1
+ server 127.0.14.137 127.0.14.137:8080 check weight 10 inter 6239 fall 2 rise 1
+ server 127.0.14.138 127.0.14.138:8080 check weight 10 inter 9241 fall 2 rise 1
+ server 127.0.14.139 127.0.14.139:8080 check weight 10 inter 7354 fall 2 rise 1
+ server 127.0.14.140 127.0.14.140:8080 check weight 10 inter 5795 fall 2 rise 1
+ server 127.0.14.141 127.0.14.141:8080 check weight 10 inter 8571 fall 2 rise 1
+ server 127.0.14.142 127.0.14.142:8080 check weight 10 inter 8290 fall 2 rise 1
+ server 127.0.14.143 127.0.14.143:8080 check weight 10 inter 6401 fall 2 rise 1
+ server 127.0.14.144 127.0.14.144:8080 check weight 10 inter 6745 fall 2 rise 1
+ server 127.0.14.145 127.0.14.145:8080 check weight 10 inter 4686 fall 2 rise 1
+ server 127.0.14.146 127.0.14.146:8080 check weight 10 inter 393 fall 2 rise 1
+ server 127.0.14.147 127.0.14.147:8080 check weight 10 inter 2374 fall 2 rise 1
+ server 127.0.14.148 127.0.14.148:8080 check weight 10 inter 5624 fall 2 rise 1
+ server 127.0.14.149 127.0.14.149:8080 check weight 10 inter 2676 fall 2 rise 1
+ server 127.0.14.150 127.0.14.150:8080 check weight 10 inter 4410 fall 2 rise 1
+ server 127.0.14.151 127.0.14.151:8080 check weight 10 inter 5681 fall 2 rise 1
+ server 127.0.14.152 127.0.14.152:8080 check weight 10 inter 1934 fall 2 rise 1
+ server 127.0.14.153 127.0.14.153:8080 check weight 10 inter 8234 fall 2 rise 1
+ server 127.0.14.154 127.0.14.154:8080 check weight 10 inter 2704 fall 2 rise 1
+ server 127.0.14.155 127.0.14.155:8080 check weight 10 inter 1208 fall 2 rise 1
+ server 127.0.14.156 127.0.14.156:8080 check weight 10 inter 6237 fall 2 rise 1
+ server 127.0.14.157 127.0.14.157:8080 check weight 10 inter 2891 fall 2 rise 1
+ server 127.0.14.158 127.0.14.158:8080 check weight 10 inter 5719 fall 2 rise 1
+ server 127.0.14.159 127.0.14.159:8080 check weight 10 inter 2171 fall 2 rise 1
+ server 127.0.14.160 127.0.14.160:8080 check weight 10 inter 2014 fall 2 rise 1
+ server 127.0.14.161 127.0.14.161:8080 check weight 10 inter 2235 fall 2 rise 1
+ server 127.0.14.162 127.0.14.162:8080 check weight 10 inter 3537 fall 2 rise 1
+ server 127.0.14.163 127.0.14.163:8080 check weight 10 inter 8612 fall 2 rise 1
+ server 127.0.14.164 127.0.14.164:8080 check weight 10 inter 873 fall 2 rise 1
+ server 127.0.14.165 127.0.14.165:8080 check weight 10 inter 8928 fall 2 rise 1
+ server 127.0.14.166 127.0.14.166:8080 check weight 10 inter 7818 fall 2 rise 1
+ server 127.0.14.167 127.0.14.167:8080 check weight 10 inter 4557 fall 2 rise 1
+ server 127.0.14.168 127.0.14.168:8080 check weight 10 inter 3359 fall 2 rise 1
+ server 127.0.14.169 127.0.14.169:8080 check weight 10 inter 2707 fall 2 rise 1
+ server 127.0.14.170 127.0.14.170:8080 check weight 10 inter 9117 fall 2 rise 1
+ server 127.0.14.171 127.0.14.171:8080 check weight 10 inter 9630 fall 2 rise 1
+ server 127.0.14.172 127.0.14.172:8080 check weight 10 inter 3630 fall 2 rise 1
+ server 127.0.14.173 127.0.14.173:8080 check weight 10 inter 4315 fall 2 rise 1
+ server 127.0.14.174 127.0.14.174:8080 check weight 10 inter 3154 fall 2 rise 1
+ server 127.0.14.175 127.0.14.175:8080 check weight 10 inter 1197 fall 2 rise 1
+ server 127.0.14.176 127.0.14.176:8080 check weight 10 inter 1282 fall 2 rise 1
+ server 127.0.14.177 127.0.14.177:8080 check weight 10 inter 2324 fall 2 rise 1
+ server 127.0.14.178 127.0.14.178:8080 check weight 10 inter 8734 fall 2 rise 1
+ server 127.0.14.179 127.0.14.179:8080 check weight 10 inter 5397 fall 2 rise 1
+ server 127.0.14.180 127.0.14.180:8080 check weight 10 inter 4603 fall 2 rise 1
+ server 127.0.14.181 127.0.14.181:8080 check weight 10 inter 1521 fall 2 rise 1
+ server 127.0.14.182 127.0.14.182:8080 check weight 10 inter 1481 fall 2 rise 1
+ server 127.0.14.183 127.0.14.183:8080 check weight 10 inter 5922 fall 2 rise 1
+ server 127.0.14.184 127.0.14.184:8080 check weight 10 inter 4419 fall 2 rise 1
+ server 127.0.14.185 127.0.14.185:8080 check weight 10 inter 4839 fall 2 rise 1
+ server 127.0.14.186 127.0.14.186:8080 check weight 10 inter 9325 fall 2 rise 1
+ server 127.0.14.187 127.0.14.187:8080 check weight 10 inter 8466 fall 2 rise 1
+ server 127.0.14.188 127.0.14.188:8080 check weight 10 inter 7979 fall 2 rise 1
+ server 127.0.14.189 127.0.14.189:8080 check weight 10 inter 2309 fall 2 rise 1
+ server 127.0.14.190 127.0.14.190:8080 check weight 10 inter 2387 fall 2 rise 1
+ server 127.0.14.191 127.0.14.191:8080 check weight 10 inter 3963 fall 2 rise 1
+ server 127.0.14.192 127.0.14.192:8080 check weight 10 inter 1178 fall 2 rise 1
+ server 127.0.14.193 127.0.14.193:8080 check weight 10 inter 5028 fall 2 rise 1
+ server 127.0.14.194 127.0.14.194:8080 check weight 10 inter 7022 fall 2 rise 1
+ server 127.0.14.195 127.0.14.195:8080 check weight 10 inter 3196 fall 2 rise 1
+ server 127.0.14.196 127.0.14.196:8080 check weight 10 inter 3446 fall 2 rise 1
+ server 127.0.14.197 127.0.14.197:8080 check weight 10 inter 7426 fall 2 rise 1
+ server 127.0.14.198 127.0.14.198:8080 check weight 10 inter 964 fall 2 rise 1
+ server 127.0.14.199 127.0.14.199:8080 check weight 10 inter 4020 fall 2 rise 1
+ server 127.0.14.200 127.0.14.200:8080 check weight 10 inter 8702 fall 2 rise 1
+ server 127.0.14.201 127.0.14.201:8080 check weight 10 inter 3855 fall 2 rise 1
+ server 127.0.14.202 127.0.14.202:8080 check weight 10 inter 6304 fall 2 rise 1
+ server 127.0.14.203 127.0.14.203:8080 check weight 10 inter 1189 fall 2 rise 1
+ server 127.0.14.204 127.0.14.204:8080 check weight 10 inter 557 fall 2 rise 1
+ server 127.0.14.205 127.0.14.205:8080 check weight 10 inter 7076 fall 2 rise 1
+ server 127.0.14.206 127.0.14.206:8080 check weight 10 inter 9817 fall 2 rise 1
+ server 127.0.14.207 127.0.14.207:8080 check weight 10 inter 4311 fall 2 rise 1
+ server 127.0.14.208 127.0.14.208:8080 check weight 10 inter 3887 fall 2 rise 1
+ server 127.0.14.209 127.0.14.209:8080 check weight 10 inter 838 fall 2 rise 1
+ server 127.0.14.210 127.0.14.210:8080 check weight 10 inter 2288 fall 2 rise 1
+ server 127.0.14.211 127.0.14.211:8080 check weight 10 inter 716 fall 2 rise 1
+ server 127.0.14.212 127.0.14.212:8080 check weight 10 inter 6864 fall 2 rise 1
+ server 127.0.14.213 127.0.14.213:8080 check weight 10 inter 1343 fall 2 rise 1
+ server 127.0.14.214 127.0.14.214:8080 check weight 10 inter 9932 fall 2 rise 1
+ server 127.0.14.215 127.0.14.215:8080 check weight 10 inter 8135 fall 2 rise 1
+ server 127.0.14.216 127.0.14.216:8080 check weight 10 inter 9689 fall 2 rise 1
+ server 127.0.14.217 127.0.14.217:8080 check weight 10 inter 9783 fall 2 rise 1
+ server 127.0.14.218 127.0.14.218:8080 check weight 10 inter 5525 fall 2 rise 1
+ server 127.0.14.219 127.0.14.219:8080 check weight 10 inter 1195 fall 2 rise 1
+ server 127.0.14.220 127.0.14.220:8080 check weight 10 inter 874 fall 2 rise 1
+ server 127.0.14.221 127.0.14.221:8080 check weight 10 inter 1844 fall 2 rise 1
+ server 127.0.14.222 127.0.14.222:8080 check weight 10 inter 6194 fall 2 rise 1
+ server 127.0.14.223 127.0.14.223:8080 check weight 10 inter 36 fall 2 rise 1
+ server 127.0.14.224 127.0.14.224:8080 check weight 10 inter 9654 fall 2 rise 1
+ server 127.0.14.225 127.0.14.225:8080 check weight 10 inter 806 fall 2 rise 1
+ server 127.0.14.226 127.0.14.226:8080 check weight 10 inter 564 fall 2 rise 1
+ server 127.0.14.227 127.0.14.227:8080 check weight 10 inter 2728 fall 2 rise 1
+ server 127.0.14.228 127.0.14.228:8080 check weight 10 inter 4129 fall 2 rise 1
+ server 127.0.14.229 127.0.14.229:8080 check weight 10 inter 9057 fall 2 rise 1
+ server 127.0.14.230 127.0.14.230:8080 check weight 10 inter 7743 fall 2 rise 1
+ server 127.0.14.231 127.0.14.231:8080 check weight 10 inter 7644 fall 2 rise 1
+ server 127.0.14.232 127.0.14.232:8080 check weight 10 inter 8795 fall 2 rise 1
+ server 127.0.14.233 127.0.14.233:8080 check weight 10 inter 9577 fall 2 rise 1
+ server 127.0.14.234 127.0.14.234:8080 check weight 10 inter 8091 fall 2 rise 1
+ server 127.0.14.235 127.0.14.235:8080 check weight 10 inter 4086 fall 2 rise 1
+ server 127.0.14.236 127.0.14.236:8080 check weight 10 inter 5977 fall 2 rise 1
+ server 127.0.14.237 127.0.14.237:8080 check weight 10 inter 5152 fall 2 rise 1
+ server 127.0.14.238 127.0.14.238:8080 check weight 10 inter 3150 fall 2 rise 1
+ server 127.0.14.239 127.0.14.239:8080 check weight 10 inter 2406 fall 2 rise 1
+ server 127.0.14.240 127.0.14.240:8080 check weight 10 inter 9927 fall 2 rise 1
+ server 127.0.14.241 127.0.14.241:8080 check weight 10 inter 6063 fall 2 rise 1
+ server 127.0.14.242 127.0.14.242:8080 check weight 10 inter 9767 fall 2 rise 1
+ server 127.0.14.243 127.0.14.243:8080 check weight 10 inter 9587 fall 2 rise 1
+ server 127.0.14.244 127.0.14.244:8080 check weight 10 inter 9152 fall 2 rise 1
+ server 127.0.14.245 127.0.14.245:8080 check weight 10 inter 5893 fall 2 rise 1
+ server 127.0.14.246 127.0.14.246:8080 check weight 10 inter 3659 fall 2 rise 1
+ server 127.0.14.247 127.0.14.247:8080 check weight 10 inter 3239 fall 2 rise 1
+ server 127.0.14.248 127.0.14.248:8080 check weight 10 inter 1898 fall 2 rise 1
+ server 127.0.14.249 127.0.14.249:8080 check weight 10 inter 7266 fall 2 rise 1
+ server 127.0.14.250 127.0.14.250:8080 check weight 10 inter 2177 fall 2 rise 1
+ server 127.0.15.001 127.0.15.1:8080 check weight 10 inter 1658 fall 2 rise 1
+ server 127.0.15.002 127.0.15.2:8080 check weight 10 inter 4658 fall 2 rise 1
+ server 127.0.15.003 127.0.15.3:8080 check weight 10 inter 5906 fall 2 rise 1
+ server 127.0.15.004 127.0.15.4:8080 check weight 10 inter 2940 fall 2 rise 1
+ server 127.0.15.005 127.0.15.5:8080 check weight 10 inter 7376 fall 2 rise 1
+ server 127.0.15.006 127.0.15.6:8080 check weight 10 inter 490 fall 2 rise 1
+ server 127.0.15.007 127.0.15.7:8080 check weight 10 inter 9482 fall 2 rise 1
+ server 127.0.15.008 127.0.15.8:8080 check weight 10 inter 4539 fall 2 rise 1
+ server 127.0.15.009 127.0.15.9:8080 check weight 10 inter 6753 fall 2 rise 1
+ server 127.0.15.010 127.0.15.10:8080 check weight 10 inter 1924 fall 2 rise 1
+ server 127.0.15.011 127.0.15.11:8080 check weight 10 inter 4532 fall 2 rise 1
+ server 127.0.15.012 127.0.15.12:8080 check weight 10 inter 8289 fall 2 rise 1
+ server 127.0.15.013 127.0.15.13:8080 check weight 10 inter 7515 fall 2 rise 1
+ server 127.0.15.014 127.0.15.14:8080 check weight 10 inter 4621 fall 2 rise 1
+ server 127.0.15.015 127.0.15.15:8080 check weight 10 inter 5611 fall 2 rise 1
+ server 127.0.15.016 127.0.15.16:8080 check weight 10 inter 3397 fall 2 rise 1
+ server 127.0.15.017 127.0.15.17:8080 check weight 10 inter 2490 fall 2 rise 1
+ server 127.0.15.018 127.0.15.18:8080 check weight 10 inter 4426 fall 2 rise 1
+ server 127.0.15.019 127.0.15.19:8080 check weight 10 inter 4597 fall 2 rise 1
+ server 127.0.15.020 127.0.15.20:8080 check weight 10 inter 8810 fall 2 rise 1
+ server 127.0.15.021 127.0.15.21:8080 check weight 10 inter 8947 fall 2 rise 1
+ server 127.0.15.022 127.0.15.22:8080 check weight 10 inter 2900 fall 2 rise 1
+ server 127.0.15.023 127.0.15.23:8080 check weight 10 inter 3060 fall 2 rise 1
+ server 127.0.15.024 127.0.15.24:8080 check weight 10 inter 3212 fall 2 rise 1
+ server 127.0.15.025 127.0.15.25:8080 check weight 10 inter 2612 fall 2 rise 1
+ server 127.0.15.026 127.0.15.26:8080 check weight 10 inter 7187 fall 2 rise 1
+ server 127.0.15.027 127.0.15.27:8080 check weight 10 inter 75 fall 2 rise 1
+ server 127.0.15.028 127.0.15.28:8080 check weight 10 inter 5647 fall 2 rise 1
+ server 127.0.15.029 127.0.15.29:8080 check weight 10 inter 3419 fall 2 rise 1
+ server 127.0.15.030 127.0.15.30:8080 check weight 10 inter 1628 fall 2 rise 1
+ server 127.0.15.031 127.0.15.31:8080 check weight 10 inter 4560 fall 2 rise 1
+ server 127.0.15.032 127.0.15.32:8080 check weight 10 inter 4552 fall 2 rise 1
+ server 127.0.15.033 127.0.15.33:8080 check weight 10 inter 1465 fall 2 rise 1
+ server 127.0.15.034 127.0.15.34:8080 check weight 10 inter 5274 fall 2 rise 1
+ server 127.0.15.035 127.0.15.35:8080 check weight 10 inter 6662 fall 2 rise 1
+ server 127.0.15.036 127.0.15.36:8080 check weight 10 inter 3672 fall 2 rise 1
+ server 127.0.15.037 127.0.15.37:8080 check weight 10 inter 5924 fall 2 rise 1
+ server 127.0.15.038 127.0.15.38:8080 check weight 10 inter 4862 fall 2 rise 1
+ server 127.0.15.039 127.0.15.39:8080 check weight 10 inter 9575 fall 2 rise 1
+ server 127.0.15.040 127.0.15.40:8080 check weight 10 inter 3723 fall 2 rise 1
+ server 127.0.15.041 127.0.15.41:8080 check weight 10 inter 2463 fall 2 rise 1
+ server 127.0.15.042 127.0.15.42:8080 check weight 10 inter 2038 fall 2 rise 1
+ server 127.0.15.043 127.0.15.43:8080 check weight 10 inter 1251 fall 2 rise 1
+ server 127.0.15.044 127.0.15.44:8080 check weight 10 inter 7365 fall 2 rise 1
+ server 127.0.15.045 127.0.15.45:8080 check weight 10 inter 3146 fall 2 rise 1
+ server 127.0.15.046 127.0.15.46:8080 check weight 10 inter 8061 fall 2 rise 1
+ server 127.0.15.047 127.0.15.47:8080 check weight 10 inter 3532 fall 2 rise 1
+ server 127.0.15.048 127.0.15.48:8080 check weight 10 inter 9383 fall 2 rise 1
+ server 127.0.15.049 127.0.15.49:8080 check weight 10 inter 985 fall 2 rise 1
+ server 127.0.15.050 127.0.15.50:8080 check weight 10 inter 7720 fall 2 rise 1
+ server 127.0.15.051 127.0.15.51:8080 check weight 10 inter 8157 fall 2 rise 1
+ server 127.0.15.052 127.0.15.52:8080 check weight 10 inter 6004 fall 2 rise 1
+ server 127.0.15.053 127.0.15.53:8080 check weight 10 inter 8662 fall 2 rise 1
+ server 127.0.15.054 127.0.15.54:8080 check weight 10 inter 1661 fall 2 rise 1
+ server 127.0.15.055 127.0.15.55:8080 check weight 10 inter 8270 fall 2 rise 1
+ server 127.0.15.056 127.0.15.56:8080 check weight 10 inter 5671 fall 2 rise 1
+ server 127.0.15.057 127.0.15.57:8080 check weight 10 inter 3573 fall 2 rise 1
+ server 127.0.15.058 127.0.15.58:8080 check weight 10 inter 8433 fall 2 rise 1
+ server 127.0.15.059 127.0.15.59:8080 check weight 10 inter 1017 fall 2 rise 1
+ server 127.0.15.060 127.0.15.60:8080 check weight 10 inter 1360 fall 2 rise 1
+ server 127.0.15.061 127.0.15.61:8080 check weight 10 inter 9355 fall 2 rise 1
+ server 127.0.15.062 127.0.15.62:8080 check weight 10 inter 984 fall 2 rise 1
+ server 127.0.15.063 127.0.15.63:8080 check weight 10 inter 2009 fall 2 rise 1
+ server 127.0.15.064 127.0.15.64:8080 check weight 10 inter 7858 fall 2 rise 1
+ server 127.0.15.065 127.0.15.65:8080 check weight 10 inter 9870 fall 2 rise 1
+ server 127.0.15.066 127.0.15.66:8080 check weight 10 inter 4876 fall 2 rise 1
+ server 127.0.15.067 127.0.15.67:8080 check weight 10 inter 4664 fall 2 rise 1
+ server 127.0.15.068 127.0.15.68:8080 check weight 10 inter 5758 fall 2 rise 1
+ server 127.0.15.069 127.0.15.69:8080 check weight 10 inter 9659 fall 2 rise 1
+ server 127.0.15.070 127.0.15.70:8080 check weight 10 inter 6537 fall 2 rise 1
+ server 127.0.15.071 127.0.15.71:8080 check weight 10 inter 2677 fall 2 rise 1
+ server 127.0.15.072 127.0.15.72:8080 check weight 10 inter 2830 fall 2 rise 1
+ server 127.0.15.073 127.0.15.73:8080 check weight 10 inter 1929 fall 2 rise 1
+ server 127.0.15.074 127.0.15.74:8080 check weight 10 inter 4190 fall 2 rise 1
+ server 127.0.15.075 127.0.15.75:8080 check weight 10 inter 6071 fall 2 rise 1
+ server 127.0.15.076 127.0.15.76:8080 check weight 10 inter 4976 fall 2 rise 1
+ server 127.0.15.077 127.0.15.77:8080 check weight 10 inter 9153 fall 2 rise 1
+ server 127.0.15.078 127.0.15.78:8080 check weight 10 inter 5284 fall 2 rise 1
+ server 127.0.15.079 127.0.15.79:8080 check weight 10 inter 5220 fall 2 rise 1
+ server 127.0.15.080 127.0.15.80:8080 check weight 10 inter 3768 fall 2 rise 1
+ server 127.0.15.081 127.0.15.81:8080 check weight 10 inter 3498 fall 2 rise 1
+ server 127.0.15.082 127.0.15.82:8080 check weight 10 inter 6651 fall 2 rise 1
+ server 127.0.15.083 127.0.15.83:8080 check weight 10 inter 2496 fall 2 rise 1
+ server 127.0.15.084 127.0.15.84:8080 check weight 10 inter 4725 fall 2 rise 1
+ server 127.0.15.085 127.0.15.85:8080 check weight 10 inter 4774 fall 2 rise 1
+ server 127.0.15.086 127.0.15.86:8080 check weight 10 inter 2121 fall 2 rise 1
+ server 127.0.15.087 127.0.15.87:8080 check weight 10 inter 2902 fall 2 rise 1
+ server 127.0.15.088 127.0.15.88:8080 check weight 10 inter 751 fall 2 rise 1
+ server 127.0.15.089 127.0.15.89:8080 check weight 10 inter 2831 fall 2 rise 1
+ server 127.0.15.090 127.0.15.90:8080 check weight 10 inter 6562 fall 2 rise 1
+ server 127.0.15.091 127.0.15.91:8080 check weight 10 inter 8072 fall 2 rise 1
+ server 127.0.15.092 127.0.15.92:8080 check weight 10 inter 8117 fall 2 rise 1
+ server 127.0.15.093 127.0.15.93:8080 check weight 10 inter 223 fall 2 rise 1
+ server 127.0.15.094 127.0.15.94:8080 check weight 10 inter 8503 fall 2 rise 1
+ server 127.0.15.095 127.0.15.95:8080 check weight 10 inter 262 fall 2 rise 1
+ server 127.0.15.096 127.0.15.96:8080 check weight 10 inter 8936 fall 2 rise 1
+ server 127.0.15.097 127.0.15.97:8080 check weight 10 inter 1009 fall 2 rise 1
+ server 127.0.15.098 127.0.15.98:8080 check weight 10 inter 7358 fall 2 rise 1
+ server 127.0.15.099 127.0.15.99:8080 check weight 10 inter 5456 fall 2 rise 1
+ server 127.0.15.100 127.0.15.100:8080 check weight 10 inter 9047 fall 2 rise 1
+ server 127.0.15.101 127.0.15.101:8080 check weight 10 inter 1051 fall 2 rise 1
+ server 127.0.15.102 127.0.15.102:8080 check weight 10 inter 9109 fall 2 rise 1
+ server 127.0.15.103 127.0.15.103:8080 check weight 10 inter 684 fall 2 rise 1
+ server 127.0.15.104 127.0.15.104:8080 check weight 10 inter 969 fall 2 rise 1
+ server 127.0.15.105 127.0.15.105:8080 check weight 10 inter 2104 fall 2 rise 1
+ server 127.0.15.106 127.0.15.106:8080 check weight 10 inter 8774 fall 2 rise 1
+ server 127.0.15.107 127.0.15.107:8080 check weight 10 inter 9766 fall 2 rise 1
+ server 127.0.15.108 127.0.15.108:8080 check weight 10 inter 1917 fall 2 rise 1
+ server 127.0.15.109 127.0.15.109:8080 check weight 10 inter 4808 fall 2 rise 1
+ server 127.0.15.110 127.0.15.110:8080 check weight 10 inter 3352 fall 2 rise 1
+ server 127.0.15.111 127.0.15.111:8080 check weight 10 inter 2859 fall 2 rise 1
+ server 127.0.15.112 127.0.15.112:8080 check weight 10 inter 8049 fall 2 rise 1
+ server 127.0.15.113 127.0.15.113:8080 check weight 10 inter 4374 fall 2 rise 1
+ server 127.0.15.114 127.0.15.114:8080 check weight 10 inter 5451 fall 2 rise 1
+ server 127.0.15.115 127.0.15.115:8080 check weight 10 inter 964 fall 2 rise 1
+ server 127.0.15.116 127.0.15.116:8080 check weight 10 inter 8516 fall 2 rise 1
+ server 127.0.15.117 127.0.15.117:8080 check weight 10 inter 6236 fall 2 rise 1
+ server 127.0.15.118 127.0.15.118:8080 check weight 10 inter 1090 fall 2 rise 1
+ server 127.0.15.119 127.0.15.119:8080 check weight 10 inter 2307 fall 2 rise 1
+ server 127.0.15.120 127.0.15.120:8080 check weight 10 inter 3068 fall 2 rise 1
+ server 127.0.15.121 127.0.15.121:8080 check weight 10 inter 3929 fall 2 rise 1
+ server 127.0.15.122 127.0.15.122:8080 check weight 10 inter 6990 fall 2 rise 1
+ server 127.0.15.123 127.0.15.123:8080 check weight 10 inter 6206 fall 2 rise 1
+ server 127.0.15.124 127.0.15.124:8080 check weight 10 inter 2950 fall 2 rise 1
+ server 127.0.15.125 127.0.15.125:8080 check weight 10 inter 1998 fall 2 rise 1
+ server 127.0.15.126 127.0.15.126:8080 check weight 10 inter 9775 fall 2 rise 1
+ server 127.0.15.127 127.0.15.127:8080 check weight 10 inter 1342 fall 2 rise 1
+ server 127.0.15.128 127.0.15.128:8080 check weight 10 inter 7330 fall 2 rise 1
+ server 127.0.15.129 127.0.15.129:8080 check weight 10 inter 3900 fall 2 rise 1
+ server 127.0.15.130 127.0.15.130:8080 check weight 10 inter 8362 fall 2 rise 1
+ server 127.0.15.131 127.0.15.131:8080 check weight 10 inter 8502 fall 2 rise 1
+ server 127.0.15.132 127.0.15.132:8080 check weight 10 inter 5242 fall 2 rise 1
+ server 127.0.15.133 127.0.15.133:8080 check weight 10 inter 3837 fall 2 rise 1
+ server 127.0.15.134 127.0.15.134:8080 check weight 10 inter 7064 fall 2 rise 1
+ server 127.0.15.135 127.0.15.135:8080 check weight 10 inter 5797 fall 2 rise 1
+ server 127.0.15.136 127.0.15.136:8080 check weight 10 inter 7190 fall 2 rise 1
+ server 127.0.15.137 127.0.15.137:8080 check weight 10 inter 7588 fall 2 rise 1
+ server 127.0.15.138 127.0.15.138:8080 check weight 10 inter 692 fall 2 rise 1
+ server 127.0.15.139 127.0.15.139:8080 check weight 10 inter 2570 fall 2 rise 1
+ server 127.0.15.140 127.0.15.140:8080 check weight 10 inter 2529 fall 2 rise 1
+ server 127.0.15.141 127.0.15.141:8080 check weight 10 inter 1718 fall 2 rise 1
+ server 127.0.15.142 127.0.15.142:8080 check weight 10 inter 9154 fall 2 rise 1
+ server 127.0.15.143 127.0.15.143:8080 check weight 10 inter 4474 fall 2 rise 1
+ server 127.0.15.144 127.0.15.144:8080 check weight 10 inter 4281 fall 2 rise 1
+ server 127.0.15.145 127.0.15.145:8080 check weight 10 inter 69 fall 2 rise 1
+ server 127.0.15.146 127.0.15.146:8080 check weight 10 inter 5632 fall 2 rise 1
+ server 127.0.15.147 127.0.15.147:8080 check weight 10 inter 1907 fall 2 rise 1
+ server 127.0.15.148 127.0.15.148:8080 check weight 10 inter 6169 fall 2 rise 1
+ server 127.0.15.149 127.0.15.149:8080 check weight 10 inter 1568 fall 2 rise 1
+ server 127.0.15.150 127.0.15.150:8080 check weight 10 inter 734 fall 2 rise 1
+ server 127.0.15.151 127.0.15.151:8080 check weight 10 inter 3837 fall 2 rise 1
+ server 127.0.15.152 127.0.15.152:8080 check weight 10 inter 6103 fall 2 rise 1
+ server 127.0.15.153 127.0.15.153:8080 check weight 10 inter 9756 fall 2 rise 1
+ server 127.0.15.154 127.0.15.154:8080 check weight 10 inter 4109 fall 2 rise 1
+ server 127.0.15.155 127.0.15.155:8080 check weight 10 inter 917 fall 2 rise 1
+ server 127.0.15.156 127.0.15.156:8080 check weight 10 inter 1980 fall 2 rise 1
+ server 127.0.15.157 127.0.15.157:8080 check weight 10 inter 3617 fall 2 rise 1
+ server 127.0.15.158 127.0.15.158:8080 check weight 10 inter 217 fall 2 rise 1
+ server 127.0.15.159 127.0.15.159:8080 check weight 10 inter 6505 fall 2 rise 1
+ server 127.0.15.160 127.0.15.160:8080 check weight 10 inter 4214 fall 2 rise 1
+ server 127.0.15.161 127.0.15.161:8080 check weight 10 inter 7973 fall 2 rise 1
+ server 127.0.15.162 127.0.15.162:8080 check weight 10 inter 4198 fall 2 rise 1
+ server 127.0.15.163 127.0.15.163:8080 check weight 10 inter 2723 fall 2 rise 1
+ server 127.0.15.164 127.0.15.164:8080 check weight 10 inter 4064 fall 2 rise 1
+ server 127.0.15.165 127.0.15.165:8080 check weight 10 inter 249 fall 2 rise 1
+ server 127.0.15.166 127.0.15.166:8080 check weight 10 inter 4716 fall 2 rise 1
+ server 127.0.15.167 127.0.15.167:8080 check weight 10 inter 1590 fall 2 rise 1
+ server 127.0.15.168 127.0.15.168:8080 check weight 10 inter 2172 fall 2 rise 1
+ server 127.0.15.169 127.0.15.169:8080 check weight 10 inter 2325 fall 2 rise 1
+ server 127.0.15.170 127.0.15.170:8080 check weight 10 inter 3825 fall 2 rise 1
+ server 127.0.15.171 127.0.15.171:8080 check weight 10 inter 6938 fall 2 rise 1
+ server 127.0.15.172 127.0.15.172:8080 check weight 10 inter 4710 fall 2 rise 1
+ server 127.0.15.173 127.0.15.173:8080 check weight 10 inter 7219 fall 2 rise 1
+ server 127.0.15.174 127.0.15.174:8080 check weight 10 inter 7707 fall 2 rise 1
+ server 127.0.15.175 127.0.15.175:8080 check weight 10 inter 1159 fall 2 rise 1
+ server 127.0.15.176 127.0.15.176:8080 check weight 10 inter 2761 fall 2 rise 1
+ server 127.0.15.177 127.0.15.177:8080 check weight 10 inter 4686 fall 2 rise 1
+ server 127.0.15.178 127.0.15.178:8080 check weight 10 inter 2146 fall 2 rise 1
+ server 127.0.15.179 127.0.15.179:8080 check weight 10 inter 7587 fall 2 rise 1
+ server 127.0.15.180 127.0.15.180:8080 check weight 10 inter 2797 fall 2 rise 1
+ server 127.0.15.181 127.0.15.181:8080 check weight 10 inter 2250 fall 2 rise 1
+ server 127.0.15.182 127.0.15.182:8080 check weight 10 inter 7511 fall 2 rise 1
+ server 127.0.15.183 127.0.15.183:8080 check weight 10 inter 6220 fall 2 rise 1
+ server 127.0.15.184 127.0.15.184:8080 check weight 10 inter 503 fall 2 rise 1
+ server 127.0.15.185 127.0.15.185:8080 check weight 10 inter 848 fall 2 rise 1
+ server 127.0.15.186 127.0.15.186:8080 check weight 10 inter 8519 fall 2 rise 1
+ server 127.0.15.187 127.0.15.187:8080 check weight 10 inter 6084 fall 2 rise 1
+ server 127.0.15.188 127.0.15.188:8080 check weight 10 inter 4878 fall 2 rise 1
+ server 127.0.15.189 127.0.15.189:8080 check weight 10 inter 816 fall 2 rise 1
+ server 127.0.15.190 127.0.15.190:8080 check weight 10 inter 1677 fall 2 rise 1
+ server 127.0.15.191 127.0.15.191:8080 check weight 10 inter 3919 fall 2 rise 1
+ server 127.0.15.192 127.0.15.192:8080 check weight 10 inter 703 fall 2 rise 1
+ server 127.0.15.193 127.0.15.193:8080 check weight 10 inter 9236 fall 2 rise 1
+ server 127.0.15.194 127.0.15.194:8080 check weight 10 inter 1227 fall 2 rise 1
+ server 127.0.15.195 127.0.15.195:8080 check weight 10 inter 958 fall 2 rise 1
+ server 127.0.15.196 127.0.15.196:8080 check weight 10 inter 480 fall 2 rise 1
+ server 127.0.15.197 127.0.15.197:8080 check weight 10 inter 6550 fall 2 rise 1
+ server 127.0.15.198 127.0.15.198:8080 check weight 10 inter 4503 fall 2 rise 1
+ server 127.0.15.199 127.0.15.199:8080 check weight 10 inter 6409 fall 2 rise 1
+ server 127.0.15.200 127.0.15.200:8080 check weight 10 inter 6532 fall 2 rise 1
+ server 127.0.15.201 127.0.15.201:8080 check weight 10 inter 674 fall 2 rise 1
+ server 127.0.15.202 127.0.15.202:8080 check weight 10 inter 1030 fall 2 rise 1
+ server 127.0.15.203 127.0.15.203:8080 check weight 10 inter 6478 fall 2 rise 1
+ server 127.0.15.204 127.0.15.204:8080 check weight 10 inter 1604 fall 2 rise 1
+ server 127.0.15.205 127.0.15.205:8080 check weight 10 inter 7145 fall 2 rise 1
+ server 127.0.15.206 127.0.15.206:8080 check weight 10 inter 3849 fall 2 rise 1
+ server 127.0.15.207 127.0.15.207:8080 check weight 10 inter 7084 fall 2 rise 1
+ server 127.0.15.208 127.0.15.208:8080 check weight 10 inter 5416 fall 2 rise 1
+ server 127.0.15.209 127.0.15.209:8080 check weight 10 inter 7210 fall 2 rise 1
+ server 127.0.15.210 127.0.15.210:8080 check weight 10 inter 8921 fall 2 rise 1
+ server 127.0.15.211 127.0.15.211:8080 check weight 10 inter 6958 fall 2 rise 1
+ server 127.0.15.212 127.0.15.212:8080 check weight 10 inter 9831 fall 2 rise 1
+ server 127.0.15.213 127.0.15.213:8080 check weight 10 inter 3793 fall 2 rise 1
+ server 127.0.15.214 127.0.15.214:8080 check weight 10 inter 1539 fall 2 rise 1
+ server 127.0.15.215 127.0.15.215:8080 check weight 10 inter 184 fall 2 rise 1
+ server 127.0.15.216 127.0.15.216:8080 check weight 10 inter 1316 fall 2 rise 1
+ server 127.0.15.217 127.0.15.217:8080 check weight 10 inter 2621 fall 2 rise 1
+ server 127.0.15.218 127.0.15.218:8080 check weight 10 inter 2564 fall 2 rise 1
+ server 127.0.15.219 127.0.15.219:8080 check weight 10 inter 7811 fall 2 rise 1
+ server 127.0.15.220 127.0.15.220:8080 check weight 10 inter 9333 fall 2 rise 1
+ server 127.0.15.221 127.0.15.221:8080 check weight 10 inter 2290 fall 2 rise 1
+ server 127.0.15.222 127.0.15.222:8080 check weight 10 inter 291 fall 2 rise 1
+ server 127.0.15.223 127.0.15.223:8080 check weight 10 inter 3753 fall 2 rise 1
+ server 127.0.15.224 127.0.15.224:8080 check weight 10 inter 9538 fall 2 rise 1
+ server 127.0.15.225 127.0.15.225:8080 check weight 10 inter 6883 fall 2 rise 1
+ server 127.0.15.226 127.0.15.226:8080 check weight 10 inter 3714 fall 2 rise 1
+ server 127.0.15.227 127.0.15.227:8080 check weight 10 inter 1039 fall 2 rise 1
+ server 127.0.15.228 127.0.15.228:8080 check weight 10 inter 4082 fall 2 rise 1
+ server 127.0.15.229 127.0.15.229:8080 check weight 10 inter 7501 fall 2 rise 1
+ server 127.0.15.230 127.0.15.230:8080 check weight 10 inter 8690 fall 2 rise 1
+ server 127.0.15.231 127.0.15.231:8080 check weight 10 inter 4437 fall 2 rise 1
+ server 127.0.15.232 127.0.15.232:8080 check weight 10 inter 8054 fall 2 rise 1
+ server 127.0.15.233 127.0.15.233:8080 check weight 10 inter 1876 fall 2 rise 1
+ server 127.0.15.234 127.0.15.234:8080 check weight 10 inter 8601 fall 2 rise 1
+ server 127.0.15.235 127.0.15.235:8080 check weight 10 inter 9791 fall 2 rise 1
+ server 127.0.15.236 127.0.15.236:8080 check weight 10 inter 5441 fall 2 rise 1
+ server 127.0.15.237 127.0.15.237:8080 check weight 10 inter 1358 fall 2 rise 1
+ server 127.0.15.238 127.0.15.238:8080 check weight 10 inter 549 fall 2 rise 1
+ server 127.0.15.239 127.0.15.239:8080 check weight 10 inter 3952 fall 2 rise 1
+ server 127.0.15.240 127.0.15.240:8080 check weight 10 inter 1342 fall 2 rise 1
+ server 127.0.15.241 127.0.15.241:8080 check weight 10 inter 5969 fall 2 rise 1
+ server 127.0.15.242 127.0.15.242:8080 check weight 10 inter 1629 fall 2 rise 1
+ server 127.0.15.243 127.0.15.243:8080 check weight 10 inter 5659 fall 2 rise 1
+ server 127.0.15.244 127.0.15.244:8080 check weight 10 inter 1184 fall 2 rise 1
+ server 127.0.15.245 127.0.15.245:8080 check weight 10 inter 7437 fall 2 rise 1
+ server 127.0.15.246 127.0.15.246:8080 check weight 10 inter 2763 fall 2 rise 1
+ server 127.0.15.247 127.0.15.247:8080 check weight 10 inter 3370 fall 2 rise 1
+ server 127.0.15.248 127.0.15.248:8080 check weight 10 inter 9063 fall 2 rise 1
+ server 127.0.15.249 127.0.15.249:8080 check weight 10 inter 8138 fall 2 rise 1
+ server 127.0.15.250 127.0.15.250:8080 check weight 10 inter 2028 fall 2 rise 1
+ server 127.0.16.001 127.0.16.1:8080 check weight 10 inter 1659 fall 2 rise 1
+ server 127.0.16.002 127.0.16.2:8080 check weight 10 inter 1048 fall 2 rise 1
+ server 127.0.16.003 127.0.16.3:8080 check weight 10 inter 9072 fall 2 rise 1
+ server 127.0.16.004 127.0.16.4:8080 check weight 10 inter 323 fall 2 rise 1
+ server 127.0.16.005 127.0.16.5:8080 check weight 10 inter 7549 fall 2 rise 1
+ server 127.0.16.006 127.0.16.6:8080 check weight 10 inter 3401 fall 2 rise 1
+ server 127.0.16.007 127.0.16.7:8080 check weight 10 inter 9448 fall 2 rise 1
+ server 127.0.16.008 127.0.16.8:8080 check weight 10 inter 5969 fall 2 rise 1
+ server 127.0.16.009 127.0.16.9:8080 check weight 10 inter 1950 fall 2 rise 1
+ server 127.0.16.010 127.0.16.10:8080 check weight 10 inter 1407 fall 2 rise 1
+ server 127.0.16.011 127.0.16.11:8080 check weight 10 inter 6357 fall 2 rise 1
+ server 127.0.16.012 127.0.16.12:8080 check weight 10 inter 2134 fall 2 rise 1
+ server 127.0.16.013 127.0.16.13:8080 check weight 10 inter 162 fall 2 rise 1
+ server 127.0.16.014 127.0.16.14:8080 check weight 10 inter 6152 fall 2 rise 1
+ server 127.0.16.015 127.0.16.15:8080 check weight 10 inter 7282 fall 2 rise 1
+ server 127.0.16.016 127.0.16.16:8080 check weight 10 inter 7347 fall 2 rise 1
+ server 127.0.16.017 127.0.16.17:8080 check weight 10 inter 3859 fall 2 rise 1
+ server 127.0.16.018 127.0.16.18:8080 check weight 10 inter 6472 fall 2 rise 1
+ server 127.0.16.019 127.0.16.19:8080 check weight 10 inter 987 fall 2 rise 1
+ server 127.0.16.020 127.0.16.20:8080 check weight 10 inter 599 fall 2 rise 1
+ server 127.0.16.021 127.0.16.21:8080 check weight 10 inter 4843 fall 2 rise 1
+ server 127.0.16.022 127.0.16.22:8080 check weight 10 inter 4036 fall 2 rise 1
+ server 127.0.16.023 127.0.16.23:8080 check weight 10 inter 5243 fall 2 rise 1
+ server 127.0.16.024 127.0.16.24:8080 check weight 10 inter 2149 fall 2 rise 1
+ server 127.0.16.025 127.0.16.25:8080 check weight 10 inter 1555 fall 2 rise 1
+ server 127.0.16.026 127.0.16.26:8080 check weight 10 inter 4319 fall 2 rise 1
+ server 127.0.16.027 127.0.16.27:8080 check weight 10 inter 1224 fall 2 rise 1
+ server 127.0.16.028 127.0.16.28:8080 check weight 10 inter 3720 fall 2 rise 1
+ server 127.0.16.029 127.0.16.29:8080 check weight 10 inter 5417 fall 2 rise 1
+ server 127.0.16.030 127.0.16.30:8080 check weight 10 inter 360 fall 2 rise 1
+ server 127.0.16.031 127.0.16.31:8080 check weight 10 inter 2576 fall 2 rise 1
+ server 127.0.16.032 127.0.16.32:8080 check weight 10 inter 2742 fall 2 rise 1
+ server 127.0.16.033 127.0.16.33:8080 check weight 10 inter 9874 fall 2 rise 1
+ server 127.0.16.034 127.0.16.34:8080 check weight 10 inter 2048 fall 2 rise 1
+ server 127.0.16.035 127.0.16.35:8080 check weight 10 inter 6849 fall 2 rise 1
+ server 127.0.16.036 127.0.16.36:8080 check weight 10 inter 8629 fall 2 rise 1
+ server 127.0.16.037 127.0.16.37:8080 check weight 10 inter 8853 fall 2 rise 1
+ server 127.0.16.038 127.0.16.38:8080 check weight 10 inter 4908 fall 2 rise 1
+ server 127.0.16.039 127.0.16.39:8080 check weight 10 inter 5884 fall 2 rise 1
+ server 127.0.16.040 127.0.16.40:8080 check weight 10 inter 209 fall 2 rise 1
+ server 127.0.16.041 127.0.16.41:8080 check weight 10 inter 2132 fall 2 rise 1
+ server 127.0.16.042 127.0.16.42:8080 check weight 10 inter 847 fall 2 rise 1
+ server 127.0.16.043 127.0.16.43:8080 check weight 10 inter 7640 fall 2 rise 1
+ server 127.0.16.044 127.0.16.44:8080 check weight 10 inter 585 fall 2 rise 1
+ server 127.0.16.045 127.0.16.45:8080 check weight 10 inter 6680 fall 2 rise 1
+ server 127.0.16.046 127.0.16.46:8080 check weight 10 inter 2948 fall 2 rise 1
+ server 127.0.16.047 127.0.16.47:8080 check weight 10 inter 1429 fall 2 rise 1
+ server 127.0.16.048 127.0.16.48:8080 check weight 10 inter 9820 fall 2 rise 1
+ server 127.0.16.049 127.0.16.49:8080 check weight 10 inter 660 fall 2 rise 1
+ server 127.0.16.050 127.0.16.50:8080 check weight 10 inter 2959 fall 2 rise 1
+ server 127.0.16.051 127.0.16.51:8080 check weight 10 inter 7351 fall 2 rise 1
+ server 127.0.16.052 127.0.16.52:8080 check weight 10 inter 4919 fall 2 rise 1
+ server 127.0.16.053 127.0.16.53:8080 check weight 10 inter 7606 fall 2 rise 1
+ server 127.0.16.054 127.0.16.54:8080 check weight 10 inter 9697 fall 2 rise 1
+ server 127.0.16.055 127.0.16.55:8080 check weight 10 inter 8655 fall 2 rise 1
+ server 127.0.16.056 127.0.16.56:8080 check weight 10 inter 6875 fall 2 rise 1
+ server 127.0.16.057 127.0.16.57:8080 check weight 10 inter 9196 fall 2 rise 1
+ server 127.0.16.058 127.0.16.58:8080 check weight 10 inter 3021 fall 2 rise 1
+ server 127.0.16.059 127.0.16.59:8080 check weight 10 inter 1688 fall 2 rise 1
+ server 127.0.16.060 127.0.16.60:8080 check weight 10 inter 608 fall 2 rise 1
+ server 127.0.16.061 127.0.16.61:8080 check weight 10 inter 1805 fall 2 rise 1
+ server 127.0.16.062 127.0.16.62:8080 check weight 10 inter 1758 fall 2 rise 1
+ server 127.0.16.063 127.0.16.63:8080 check weight 10 inter 5990 fall 2 rise 1
+ server 127.0.16.064 127.0.16.64:8080 check weight 10 inter 1212 fall 2 rise 1
+ server 127.0.16.065 127.0.16.65:8080 check weight 10 inter 8626 fall 2 rise 1
+ server 127.0.16.066 127.0.16.66:8080 check weight 10 inter 5140 fall 2 rise 1
+ server 127.0.16.067 127.0.16.67:8080 check weight 10 inter 3647 fall 2 rise 1
+ server 127.0.16.068 127.0.16.68:8080 check weight 10 inter 1889 fall 2 rise 1
+ server 127.0.16.069 127.0.16.69:8080 check weight 10 inter 5352 fall 2 rise 1
+ server 127.0.16.070 127.0.16.70:8080 check weight 10 inter 5538 fall 2 rise 1
+ server 127.0.16.071 127.0.16.71:8080 check weight 10 inter 4486 fall 2 rise 1
+ server 127.0.16.072 127.0.16.72:8080 check weight 10 inter 1466 fall 2 rise 1
+ server 127.0.16.073 127.0.16.73:8080 check weight 10 inter 2247 fall 2 rise 1
+ server 127.0.16.074 127.0.16.74:8080 check weight 10 inter 1577 fall 2 rise 1
+ server 127.0.16.075 127.0.16.75:8080 check weight 10 inter 7132 fall 2 rise 1
+ server 127.0.16.076 127.0.16.76:8080 check weight 10 inter 9620 fall 2 rise 1
+ server 127.0.16.077 127.0.16.77:8080 check weight 10 inter 4624 fall 2 rise 1
+ server 127.0.16.078 127.0.16.78:8080 check weight 10 inter 4962 fall 2 rise 1
+ server 127.0.16.079 127.0.16.79:8080 check weight 10 inter 2661 fall 2 rise 1
+ server 127.0.16.080 127.0.16.80:8080 check weight 10 inter 2012 fall 2 rise 1
+ server 127.0.16.081 127.0.16.81:8080 check weight 10 inter 1625 fall 2 rise 1
+ server 127.0.16.082 127.0.16.82:8080 check weight 10 inter 9486 fall 2 rise 1
+ server 127.0.16.083 127.0.16.83:8080 check weight 10 inter 6875 fall 2 rise 1
+ server 127.0.16.084 127.0.16.84:8080 check weight 10 inter 7020 fall 2 rise 1
+ server 127.0.16.085 127.0.16.85:8080 check weight 10 inter 8695 fall 2 rise 1
+ server 127.0.16.086 127.0.16.86:8080 check weight 10 inter 6158 fall 2 rise 1
+ server 127.0.16.087 127.0.16.87:8080 check weight 10 inter 1942 fall 2 rise 1
+ server 127.0.16.088 127.0.16.88:8080 check weight 10 inter 101 fall 2 rise 1
+ server 127.0.16.089 127.0.16.89:8080 check weight 10 inter 7056 fall 2 rise 1
+ server 127.0.16.090 127.0.16.90:8080 check weight 10 inter 1857 fall 2 rise 1
+ server 127.0.16.091 127.0.16.91:8080 check weight 10 inter 1094 fall 2 rise 1
+ server 127.0.16.092 127.0.16.92:8080 check weight 10 inter 8380 fall 2 rise 1
+ server 127.0.16.093 127.0.16.93:8080 check weight 10 inter 1086 fall 2 rise 1
+ server 127.0.16.094 127.0.16.94:8080 check weight 10 inter 2513 fall 2 rise 1
+ server 127.0.16.095 127.0.16.95:8080 check weight 10 inter 8853 fall 2 rise 1
+ server 127.0.16.096 127.0.16.96:8080 check weight 10 inter 2916 fall 2 rise 1
+ server 127.0.16.097 127.0.16.97:8080 check weight 10 inter 3538 fall 2 rise 1
+ server 127.0.16.098 127.0.16.98:8080 check weight 10 inter 6684 fall 2 rise 1
+ server 127.0.16.099 127.0.16.99:8080 check weight 10 inter 3216 fall 2 rise 1
+ server 127.0.16.100 127.0.16.100:8080 check weight 10 inter 3215 fall 2 rise 1
+ server 127.0.16.101 127.0.16.101:8080 check weight 10 inter 2270 fall 2 rise 1
+ server 127.0.16.102 127.0.16.102:8080 check weight 10 inter 2243 fall 2 rise 1
+ server 127.0.16.103 127.0.16.103:8080 check weight 10 inter 6735 fall 2 rise 1
+ server 127.0.16.104 127.0.16.104:8080 check weight 10 inter 7140 fall 2 rise 1
+ server 127.0.16.105 127.0.16.105:8080 check weight 10 inter 6881 fall 2 rise 1
+ server 127.0.16.106 127.0.16.106:8080 check weight 10 inter 8021 fall 2 rise 1
+ server 127.0.16.107 127.0.16.107:8080 check weight 10 inter 9114 fall 2 rise 1
+ server 127.0.16.108 127.0.16.108:8080 check weight 10 inter 7407 fall 2 rise 1
+ server 127.0.16.109 127.0.16.109:8080 check weight 10 inter 7697 fall 2 rise 1
+ server 127.0.16.110 127.0.16.110:8080 check weight 10 inter 3113 fall 2 rise 1
+ server 127.0.16.111 127.0.16.111:8080 check weight 10 inter 5274 fall 2 rise 1
+ server 127.0.16.112 127.0.16.112:8080 check weight 10 inter 1659 fall 2 rise 1
+ server 127.0.16.113 127.0.16.113:8080 check weight 10 inter 6570 fall 2 rise 1
+ server 127.0.16.114 127.0.16.114:8080 check weight 10 inter 4189 fall 2 rise 1
+ server 127.0.16.115 127.0.16.115:8080 check weight 10 inter 6175 fall 2 rise 1
+ server 127.0.16.116 127.0.16.116:8080 check weight 10 inter 3008 fall 2 rise 1
+ server 127.0.16.117 127.0.16.117:8080 check weight 10 inter 4872 fall 2 rise 1
+ server 127.0.16.118 127.0.16.118:8080 check weight 10 inter 5650 fall 2 rise 1
+ server 127.0.16.119 127.0.16.119:8080 check weight 10 inter 4023 fall 2 rise 1
+ server 127.0.16.120 127.0.16.120:8080 check weight 10 inter 1709 fall 2 rise 1
+ server 127.0.16.121 127.0.16.121:8080 check weight 10 inter 3062 fall 2 rise 1
+ server 127.0.16.122 127.0.16.122:8080 check weight 10 inter 644 fall 2 rise 1
+ server 127.0.16.123 127.0.16.123:8080 check weight 10 inter 2969 fall 2 rise 1
+ server 127.0.16.124 127.0.16.124:8080 check weight 10 inter 8420 fall 2 rise 1
+ server 127.0.16.125 127.0.16.125:8080 check weight 10 inter 4884 fall 2 rise 1
+ server 127.0.16.126 127.0.16.126:8080 check weight 10 inter 746 fall 2 rise 1
+ server 127.0.16.127 127.0.16.127:8080 check weight 10 inter 981 fall 2 rise 1
+ server 127.0.16.128 127.0.16.128:8080 check weight 10 inter 7848 fall 2 rise 1
+ server 127.0.16.129 127.0.16.129:8080 check weight 10 inter 2458 fall 2 rise 1
+ server 127.0.16.130 127.0.16.130:8080 check weight 10 inter 1057 fall 2 rise 1
+ server 127.0.16.131 127.0.16.131:8080 check weight 10 inter 573 fall 2 rise 1
+ server 127.0.16.132 127.0.16.132:8080 check weight 10 inter 4471 fall 2 rise 1
+ server 127.0.16.133 127.0.16.133:8080 check weight 10 inter 7904 fall 2 rise 1
+ server 127.0.16.134 127.0.16.134:8080 check weight 10 inter 8072 fall 2 rise 1
+ server 127.0.16.135 127.0.16.135:8080 check weight 10 inter 3455 fall 2 rise 1
+ server 127.0.16.136 127.0.16.136:8080 check weight 10 inter 7447 fall 2 rise 1
+ server 127.0.16.137 127.0.16.137:8080 check weight 10 inter 4537 fall 2 rise 1
+ server 127.0.16.138 127.0.16.138:8080 check weight 10 inter 924 fall 2 rise 1
+ server 127.0.16.139 127.0.16.139:8080 check weight 10 inter 1897 fall 2 rise 1
+ server 127.0.16.140 127.0.16.140:8080 check weight 10 inter 242 fall 2 rise 1
+ server 127.0.16.141 127.0.16.141:8080 check weight 10 inter 5443 fall 2 rise 1
+ server 127.0.16.142 127.0.16.142:8080 check weight 10 inter 5490 fall 2 rise 1
+ server 127.0.16.143 127.0.16.143:8080 check weight 10 inter 2907 fall 2 rise 1
+ server 127.0.16.144 127.0.16.144:8080 check weight 10 inter 2705 fall 2 rise 1
+ server 127.0.16.145 127.0.16.145:8080 check weight 10 inter 3071 fall 2 rise 1
+ server 127.0.16.146 127.0.16.146:8080 check weight 10 inter 454 fall 2 rise 1
+ server 127.0.16.147 127.0.16.147:8080 check weight 10 inter 3131 fall 2 rise 1
+ server 127.0.16.148 127.0.16.148:8080 check weight 10 inter 4346 fall 2 rise 1
+ server 127.0.16.149 127.0.16.149:8080 check weight 10 inter 3200 fall 2 rise 1
+ server 127.0.16.150 127.0.16.150:8080 check weight 10 inter 3109 fall 2 rise 1
+ server 127.0.16.151 127.0.16.151:8080 check weight 10 inter 3163 fall 2 rise 1
+ server 127.0.16.152 127.0.16.152:8080 check weight 10 inter 1177 fall 2 rise 1
+ server 127.0.16.153 127.0.16.153:8080 check weight 10 inter 7366 fall 2 rise 1
+ server 127.0.16.154 127.0.16.154:8080 check weight 10 inter 5516 fall 2 rise 1
+ server 127.0.16.155 127.0.16.155:8080 check weight 10 inter 411 fall 2 rise 1
+ server 127.0.16.156 127.0.16.156:8080 check weight 10 inter 1936 fall 2 rise 1
+ server 127.0.16.157 127.0.16.157:8080 check weight 10 inter 7896 fall 2 rise 1
+ server 127.0.16.158 127.0.16.158:8080 check weight 10 inter 4512 fall 2 rise 1
+ server 127.0.16.159 127.0.16.159:8080 check weight 10 inter 2606 fall 2 rise 1
+ server 127.0.16.160 127.0.16.160:8080 check weight 10 inter 9438 fall 2 rise 1
+ server 127.0.16.161 127.0.16.161:8080 check weight 10 inter 7155 fall 2 rise 1
+ server 127.0.16.162 127.0.16.162:8080 check weight 10 inter 6186 fall 2 rise 1
+ server 127.0.16.163 127.0.16.163:8080 check weight 10 inter 9229 fall 2 rise 1
+ server 127.0.16.164 127.0.16.164:8080 check weight 10 inter 3952 fall 2 rise 1
+ server 127.0.16.165 127.0.16.165:8080 check weight 10 inter 852 fall 2 rise 1
+ server 127.0.16.166 127.0.16.166:8080 check weight 10 inter 1544 fall 2 rise 1
+ server 127.0.16.167 127.0.16.167:8080 check weight 10 inter 350 fall 2 rise 1
+ server 127.0.16.168 127.0.16.168:8080 check weight 10 inter 6043 fall 2 rise 1
+ server 127.0.16.169 127.0.16.169:8080 check weight 10 inter 1479 fall 2 rise 1
+ server 127.0.16.170 127.0.16.170:8080 check weight 10 inter 1813 fall 2 rise 1
+ server 127.0.16.171 127.0.16.171:8080 check weight 10 inter 2290 fall 2 rise 1
+ server 127.0.16.172 127.0.16.172:8080 check weight 10 inter 1477 fall 2 rise 1
+ server 127.0.16.173 127.0.16.173:8080 check weight 10 inter 7710 fall 2 rise 1
+ server 127.0.16.174 127.0.16.174:8080 check weight 10 inter 115 fall 2 rise 1
+ server 127.0.16.175 127.0.16.175:8080 check weight 10 inter 4081 fall 2 rise 1
+ server 127.0.16.176 127.0.16.176:8080 check weight 10 inter 3606 fall 2 rise 1
+ server 127.0.16.177 127.0.16.177:8080 check weight 10 inter 2624 fall 2 rise 1
+ server 127.0.16.178 127.0.16.178:8080 check weight 10 inter 5214 fall 2 rise 1
+ server 127.0.16.179 127.0.16.179:8080 check weight 10 inter 2422 fall 2 rise 1
+ server 127.0.16.180 127.0.16.180:8080 check weight 10 inter 7858 fall 2 rise 1
+ server 127.0.16.181 127.0.16.181:8080 check weight 10 inter 2119 fall 2 rise 1
+ server 127.0.16.182 127.0.16.182:8080 check weight 10 inter 7872 fall 2 rise 1
+ server 127.0.16.183 127.0.16.183:8080 check weight 10 inter 5274 fall 2 rise 1
+ server 127.0.16.184 127.0.16.184:8080 check weight 10 inter 6339 fall 2 rise 1
+ server 127.0.16.185 127.0.16.185:8080 check weight 10 inter 1272 fall 2 rise 1
+ server 127.0.16.186 127.0.16.186:8080 check weight 10 inter 6164 fall 2 rise 1
+ server 127.0.16.187 127.0.16.187:8080 check weight 10 inter 3938 fall 2 rise 1
+ server 127.0.16.188 127.0.16.188:8080 check weight 10 inter 9960 fall 2 rise 1
+ server 127.0.16.189 127.0.16.189:8080 check weight 10 inter 7888 fall 2 rise 1
+ server 127.0.16.190 127.0.16.190:8080 check weight 10 inter 9035 fall 2 rise 1
+ server 127.0.16.191 127.0.16.191:8080 check weight 10 inter 3303 fall 2 rise 1
+ server 127.0.16.192 127.0.16.192:8080 check weight 10 inter 95 fall 2 rise 1
+ server 127.0.16.193 127.0.16.193:8080 check weight 10 inter 6083 fall 2 rise 1
+ server 127.0.16.194 127.0.16.194:8080 check weight 10 inter 8016 fall 2 rise 1
+ server 127.0.16.195 127.0.16.195:8080 check weight 10 inter 3961 fall 2 rise 1
+ server 127.0.16.196 127.0.16.196:8080 check weight 10 inter 9780 fall 2 rise 1
+ server 127.0.16.197 127.0.16.197:8080 check weight 10 inter 8324 fall 2 rise 1
+ server 127.0.16.198 127.0.16.198:8080 check weight 10 inter 2812 fall 2 rise 1
+ server 127.0.16.199 127.0.16.199:8080 check weight 10 inter 3539 fall 2 rise 1
+ server 127.0.16.200 127.0.16.200:8080 check weight 10 inter 2792 fall 2 rise 1
+ server 127.0.16.201 127.0.16.201:8080 check weight 10 inter 2355 fall 2 rise 1
+ server 127.0.16.202 127.0.16.202:8080 check weight 10 inter 3000 fall 2 rise 1
+ server 127.0.16.203 127.0.16.203:8080 check weight 10 inter 5597 fall 2 rise 1
+ server 127.0.16.204 127.0.16.204:8080 check weight 10 inter 896 fall 2 rise 1
+ server 127.0.16.205 127.0.16.205:8080 check weight 10 inter 186 fall 2 rise 1
+ server 127.0.16.206 127.0.16.206:8080 check weight 10 inter 6647 fall 2 rise 1
+ server 127.0.16.207 127.0.16.207:8080 check weight 10 inter 7732 fall 2 rise 1
+ server 127.0.16.208 127.0.16.208:8080 check weight 10 inter 6258 fall 2 rise 1
+ server 127.0.16.209 127.0.16.209:8080 check weight 10 inter 646 fall 2 rise 1
+ server 127.0.16.210 127.0.16.210:8080 check weight 10 inter 1727 fall 2 rise 1
+ server 127.0.16.211 127.0.16.211:8080 check weight 10 inter 8921 fall 2 rise 1
+ server 127.0.16.212 127.0.16.212:8080 check weight 10 inter 2959 fall 2 rise 1
+ server 127.0.16.213 127.0.16.213:8080 check weight 10 inter 2070 fall 2 rise 1
+ server 127.0.16.214 127.0.16.214:8080 check weight 10 inter 907 fall 2 rise 1
+ server 127.0.16.215 127.0.16.215:8080 check weight 10 inter 1148 fall 2 rise 1
+ server 127.0.16.216 127.0.16.216:8080 check weight 10 inter 161 fall 2 rise 1
+ server 127.0.16.217 127.0.16.217:8080 check weight 10 inter 1076 fall 2 rise 1
+ server 127.0.16.218 127.0.16.218:8080 check weight 10 inter 290 fall 2 rise 1
+ server 127.0.16.219 127.0.16.219:8080 check weight 10 inter 3638 fall 2 rise 1
+ server 127.0.16.220 127.0.16.220:8080 check weight 10 inter 7188 fall 2 rise 1
+ server 127.0.16.221 127.0.16.221:8080 check weight 10 inter 2934 fall 2 rise 1
+ server 127.0.16.222 127.0.16.222:8080 check weight 10 inter 1172 fall 2 rise 1
+ server 127.0.16.223 127.0.16.223:8080 check weight 10 inter 5833 fall 2 rise 1
+ server 127.0.16.224 127.0.16.224:8080 check weight 10 inter 9508 fall 2 rise 1
+ server 127.0.16.225 127.0.16.225:8080 check weight 10 inter 9924 fall 2 rise 1
+ server 127.0.16.226 127.0.16.226:8080 check weight 10 inter 8810 fall 2 rise 1
+ server 127.0.16.227 127.0.16.227:8080 check weight 10 inter 3577 fall 2 rise 1
+ server 127.0.16.228 127.0.16.228:8080 check weight 10 inter 7130 fall 2 rise 1
+ server 127.0.16.229 127.0.16.229:8080 check weight 10 inter 5063 fall 2 rise 1
+ server 127.0.16.230 127.0.16.230:8080 check weight 10 inter 1963 fall 2 rise 1
+ server 127.0.16.231 127.0.16.231:8080 check weight 10 inter 1255 fall 2 rise 1
+ server 127.0.16.232 127.0.16.232:8080 check weight 10 inter 4837 fall 2 rise 1
+ server 127.0.16.233 127.0.16.233:8080 check weight 10 inter 6806 fall 2 rise 1
+ server 127.0.16.234 127.0.16.234:8080 check weight 10 inter 350 fall 2 rise 1
+ server 127.0.16.235 127.0.16.235:8080 check weight 10 inter 5617 fall 2 rise 1
+ server 127.0.16.236 127.0.16.236:8080 check weight 10 inter 9802 fall 2 rise 1
+ server 127.0.16.237 127.0.16.237:8080 check weight 10 inter 6096 fall 2 rise 1
+ server 127.0.16.238 127.0.16.238:8080 check weight 10 inter 7444 fall 2 rise 1
+ server 127.0.16.239 127.0.16.239:8080 check weight 10 inter 2139 fall 2 rise 1
+ server 127.0.16.240 127.0.16.240:8080 check weight 10 inter 4300 fall 2 rise 1
+ server 127.0.16.241 127.0.16.241:8080 check weight 10 inter 312 fall 2 rise 1
+ server 127.0.16.242 127.0.16.242:8080 check weight 10 inter 8496 fall 2 rise 1
+ server 127.0.16.243 127.0.16.243:8080 check weight 10 inter 7068 fall 2 rise 1
+ server 127.0.16.244 127.0.16.244:8080 check weight 10 inter 1659 fall 2 rise 1
+ server 127.0.16.245 127.0.16.245:8080 check weight 10 inter 2099 fall 2 rise 1
+ server 127.0.16.246 127.0.16.246:8080 check weight 10 inter 8653 fall 2 rise 1
+ server 127.0.16.247 127.0.16.247:8080 check weight 10 inter 6456 fall 2 rise 1
+ server 127.0.16.248 127.0.16.248:8080 check weight 10 inter 124 fall 2 rise 1
+ server 127.0.16.249 127.0.16.249:8080 check weight 10 inter 4227 fall 2 rise 1
+ server 127.0.16.250 127.0.16.250:8080 check weight 10 inter 9150 fall 2 rise 1
+ server 127.0.17.001 127.0.17.1:8080 check weight 10 inter 2561 fall 2 rise 1
+ server 127.0.17.002 127.0.17.2:8080 check weight 10 inter 2603 fall 2 rise 1
+ server 127.0.17.003 127.0.17.3:8080 check weight 10 inter 9713 fall 2 rise 1
+ server 127.0.17.004 127.0.17.4:8080 check weight 10 inter 7715 fall 2 rise 1
+ server 127.0.17.005 127.0.17.5:8080 check weight 10 inter 9532 fall 2 rise 1
+ server 127.0.17.006 127.0.17.6:8080 check weight 10 inter 5267 fall 2 rise 1
+ server 127.0.17.007 127.0.17.7:8080 check weight 10 inter 3277 fall 2 rise 1
+ server 127.0.17.008 127.0.17.8:8080 check weight 10 inter 1617 fall 2 rise 1
+ server 127.0.17.009 127.0.17.9:8080 check weight 10 inter 2612 fall 2 rise 1
+ server 127.0.17.010 127.0.17.10:8080 check weight 10 inter 7609 fall 2 rise 1
+ server 127.0.17.011 127.0.17.11:8080 check weight 10 inter 1653 fall 2 rise 1
+ server 127.0.17.012 127.0.17.12:8080 check weight 10 inter 3486 fall 2 rise 1
+ server 127.0.17.013 127.0.17.13:8080 check weight 10 inter 3842 fall 2 rise 1
+ server 127.0.17.014 127.0.17.14:8080 check weight 10 inter 2764 fall 2 rise 1
+ server 127.0.17.015 127.0.17.15:8080 check weight 10 inter 5623 fall 2 rise 1
+ server 127.0.17.016 127.0.17.16:8080 check weight 10 inter 511 fall 2 rise 1
+ server 127.0.17.017 127.0.17.17:8080 check weight 10 inter 5719 fall 2 rise 1
+ server 127.0.17.018 127.0.17.18:8080 check weight 10 inter 7629 fall 2 rise 1
+ server 127.0.17.019 127.0.17.19:8080 check weight 10 inter 7270 fall 2 rise 1
+ server 127.0.17.020 127.0.17.20:8080 check weight 10 inter 3730 fall 2 rise 1
+ server 127.0.17.021 127.0.17.21:8080 check weight 10 inter 3231 fall 2 rise 1
+ server 127.0.17.022 127.0.17.22:8080 check weight 10 inter 350 fall 2 rise 1
+ server 127.0.17.023 127.0.17.23:8080 check weight 10 inter 414 fall 2 rise 1
+ server 127.0.17.024 127.0.17.24:8080 check weight 10 inter 346 fall 2 rise 1
+ server 127.0.17.025 127.0.17.25:8080 check weight 10 inter 7030 fall 2 rise 1
+ server 127.0.17.026 127.0.17.26:8080 check weight 10 inter 1324 fall 2 rise 1
+ server 127.0.17.027 127.0.17.27:8080 check weight 10 inter 4328 fall 2 rise 1
+ server 127.0.17.028 127.0.17.28:8080 check weight 10 inter 6414 fall 2 rise 1
+ server 127.0.17.029 127.0.17.29:8080 check weight 10 inter 1751 fall 2 rise 1
+ server 127.0.17.030 127.0.17.30:8080 check weight 10 inter 2684 fall 2 rise 1
+ server 127.0.17.031 127.0.17.31:8080 check weight 10 inter 396 fall 2 rise 1
+ server 127.0.17.032 127.0.17.32:8080 check weight 10 inter 2017 fall 2 rise 1
+ server 127.0.17.033 127.0.17.33:8080 check weight 10 inter 3321 fall 2 rise 1
+ server 127.0.17.034 127.0.17.34:8080 check weight 10 inter 8855 fall 2 rise 1
+ server 127.0.17.035 127.0.17.35:8080 check weight 10 inter 9674 fall 2 rise 1
+ server 127.0.17.036 127.0.17.36:8080 check weight 10 inter 8159 fall 2 rise 1
+ server 127.0.17.037 127.0.17.37:8080 check weight 10 inter 4151 fall 2 rise 1
+ server 127.0.17.038 127.0.17.38:8080 check weight 10 inter 2245 fall 2 rise 1
+ server 127.0.17.039 127.0.17.39:8080 check weight 10 inter 198 fall 2 rise 1
+ server 127.0.17.040 127.0.17.40:8080 check weight 10 inter 268 fall 2 rise 1
+ server 127.0.17.041 127.0.17.41:8080 check weight 10 inter 4099 fall 2 rise 1
+ server 127.0.17.042 127.0.17.42:8080 check weight 10 inter 9883 fall 2 rise 1
+ server 127.0.17.043 127.0.17.43:8080 check weight 10 inter 1750 fall 2 rise 1
+ server 127.0.17.044 127.0.17.44:8080 check weight 10 inter 9036 fall 2 rise 1
+ server 127.0.17.045 127.0.17.45:8080 check weight 10 inter 2059 fall 2 rise 1
+ server 127.0.17.046 127.0.17.46:8080 check weight 10 inter 3364 fall 2 rise 1
+ server 127.0.17.047 127.0.17.47:8080 check weight 10 inter 1821 fall 2 rise 1
+ server 127.0.17.048 127.0.17.48:8080 check weight 10 inter 8146 fall 2 rise 1
+ server 127.0.17.049 127.0.17.49:8080 check weight 10 inter 6695 fall 2 rise 1
+ server 127.0.17.050 127.0.17.50:8080 check weight 10 inter 1936 fall 2 rise 1
+ server 127.0.17.051 127.0.17.51:8080 check weight 10 inter 3078 fall 2 rise 1
+ server 127.0.17.052 127.0.17.52:8080 check weight 10 inter 2201 fall 2 rise 1
+ server 127.0.17.053 127.0.17.53:8080 check weight 10 inter 4584 fall 2 rise 1
+ server 127.0.17.054 127.0.17.54:8080 check weight 10 inter 5999 fall 2 rise 1
+ server 127.0.17.055 127.0.17.55:8080 check weight 10 inter 7504 fall 2 rise 1
+ server 127.0.17.056 127.0.17.56:8080 check weight 10 inter 7349 fall 2 rise 1
+ server 127.0.17.057 127.0.17.57:8080 check weight 10 inter 240 fall 2 rise 1
+ server 127.0.17.058 127.0.17.58:8080 check weight 10 inter 1916 fall 2 rise 1
+ server 127.0.17.059 127.0.17.59:8080 check weight 10 inter 3948 fall 2 rise 1
+ server 127.0.17.060 127.0.17.60:8080 check weight 10 inter 9431 fall 2 rise 1
+ server 127.0.17.061 127.0.17.61:8080 check weight 10 inter 5233 fall 2 rise 1
+ server 127.0.17.062 127.0.17.62:8080 check weight 10 inter 505 fall 2 rise 1
+ server 127.0.17.063 127.0.17.63:8080 check weight 10 inter 407 fall 2 rise 1
+ server 127.0.17.064 127.0.17.64:8080 check weight 10 inter 1714 fall 2 rise 1
+ server 127.0.17.065 127.0.17.65:8080 check weight 10 inter 1494 fall 2 rise 1
+ server 127.0.17.066 127.0.17.66:8080 check weight 10 inter 111 fall 2 rise 1
+ server 127.0.17.067 127.0.17.67:8080 check weight 10 inter 2871 fall 2 rise 1
+ server 127.0.17.068 127.0.17.68:8080 check weight 10 inter 5328 fall 2 rise 1
+ server 127.0.17.069 127.0.17.69:8080 check weight 10 inter 4324 fall 2 rise 1
+ server 127.0.17.070 127.0.17.70:8080 check weight 10 inter 8754 fall 2 rise 1
+ server 127.0.17.071 127.0.17.71:8080 check weight 10 inter 1431 fall 2 rise 1
+ server 127.0.17.072 127.0.17.72:8080 check weight 10 inter 6611 fall 2 rise 1
+ server 127.0.17.073 127.0.17.73:8080 check weight 10 inter 7870 fall 2 rise 1
+ server 127.0.17.074 127.0.17.74:8080 check weight 10 inter 5206 fall 2 rise 1
+ server 127.0.17.075 127.0.17.75:8080 check weight 10 inter 6547 fall 2 rise 1
+ server 127.0.17.076 127.0.17.76:8080 check weight 10 inter 4670 fall 2 rise 1
+ server 127.0.17.077 127.0.17.77:8080 check weight 10 inter 1547 fall 2 rise 1
+ server 127.0.17.078 127.0.17.78:8080 check weight 10 inter 708 fall 2 rise 1
+ server 127.0.17.079 127.0.17.79:8080 check weight 10 inter 5107 fall 2 rise 1
+ server 127.0.17.080 127.0.17.80:8080 check weight 10 inter 4239 fall 2 rise 1
+ server 127.0.17.081 127.0.17.81:8080 check weight 10 inter 1673 fall 2 rise 1
+ server 127.0.17.082 127.0.17.82:8080 check weight 10 inter 106 fall 2 rise 1
+ server 127.0.17.083 127.0.17.83:8080 check weight 10 inter 8727 fall 2 rise 1
+ server 127.0.17.084 127.0.17.84:8080 check weight 10 inter 5650 fall 2 rise 1
+ server 127.0.17.085 127.0.17.85:8080 check weight 10 inter 1885 fall 2 rise 1
+ server 127.0.17.086 127.0.17.86:8080 check weight 10 inter 1814 fall 2 rise 1
+ server 127.0.17.087 127.0.17.87:8080 check weight 10 inter 9286 fall 2 rise 1
+ server 127.0.17.088 127.0.17.88:8080 check weight 10 inter 1431 fall 2 rise 1
+ server 127.0.17.089 127.0.17.89:8080 check weight 10 inter 6626 fall 2 rise 1
+ server 127.0.17.090 127.0.17.90:8080 check weight 10 inter 4289 fall 2 rise 1
+ server 127.0.17.091 127.0.17.91:8080 check weight 10 inter 6455 fall 2 rise 1
+ server 127.0.17.092 127.0.17.92:8080 check weight 10 inter 111 fall 2 rise 1
+ server 127.0.17.093 127.0.17.93:8080 check weight 10 inter 7213 fall 2 rise 1
+ server 127.0.17.094 127.0.17.94:8080 check weight 10 inter 3531 fall 2 rise 1
+ server 127.0.17.095 127.0.17.95:8080 check weight 10 inter 5612 fall 2 rise 1
+ server 127.0.17.096 127.0.17.96:8080 check weight 10 inter 7385 fall 2 rise 1
+ server 127.0.17.097 127.0.17.97:8080 check weight 10 inter 1154 fall 2 rise 1
+ server 127.0.17.098 127.0.17.98:8080 check weight 10 inter 6970 fall 2 rise 1
+ server 127.0.17.099 127.0.17.99:8080 check weight 10 inter 9666 fall 2 rise 1
+ server 127.0.17.100 127.0.17.100:8080 check weight 10 inter 3055 fall 2 rise 1
+ server 127.0.17.101 127.0.17.101:8080 check weight 10 inter 2328 fall 2 rise 1
+ server 127.0.17.102 127.0.17.102:8080 check weight 10 inter 7618 fall 2 rise 1
+ server 127.0.17.103 127.0.17.103:8080 check weight 10 inter 7177 fall 2 rise 1
+ server 127.0.17.104 127.0.17.104:8080 check weight 10 inter 8815 fall 2 rise 1
+ server 127.0.17.105 127.0.17.105:8080 check weight 10 inter 9485 fall 2 rise 1
+ server 127.0.17.106 127.0.17.106:8080 check weight 10 inter 2963 fall 2 rise 1
+ server 127.0.17.107 127.0.17.107:8080 check weight 10 inter 1379 fall 2 rise 1
+ server 127.0.17.108 127.0.17.108:8080 check weight 10 inter 9146 fall 2 rise 1
+ server 127.0.17.109 127.0.17.109:8080 check weight 10 inter 2826 fall 2 rise 1
+ server 127.0.17.110 127.0.17.110:8080 check weight 10 inter 9542 fall 2 rise 1
+ server 127.0.17.111 127.0.17.111:8080 check weight 10 inter 4638 fall 2 rise 1
+ server 127.0.17.112 127.0.17.112:8080 check weight 10 inter 559 fall 2 rise 1
+ server 127.0.17.113 127.0.17.113:8080 check weight 10 inter 6740 fall 2 rise 1
+ server 127.0.17.114 127.0.17.114:8080 check weight 10 inter 2117 fall 2 rise 1
+ server 127.0.17.115 127.0.17.115:8080 check weight 10 inter 5265 fall 2 rise 1
+ server 127.0.17.116 127.0.17.116:8080 check weight 10 inter 5109 fall 2 rise 1
+ server 127.0.17.117 127.0.17.117:8080 check weight 10 inter 8471 fall 2 rise 1
+ server 127.0.17.118 127.0.17.118:8080 check weight 10 inter 7341 fall 2 rise 1
+ server 127.0.17.119 127.0.17.119:8080 check weight 10 inter 5689 fall 2 rise 1
+ server 127.0.17.120 127.0.17.120:8080 check weight 10 inter 4234 fall 2 rise 1
+ server 127.0.17.121 127.0.17.121:8080 check weight 10 inter 147 fall 2 rise 1
+ server 127.0.17.122 127.0.17.122:8080 check weight 10 inter 6356 fall 2 rise 1
+ server 127.0.17.123 127.0.17.123:8080 check weight 10 inter 417 fall 2 rise 1
+ server 127.0.17.124 127.0.17.124:8080 check weight 10 inter 5263 fall 2 rise 1
+ server 127.0.17.125 127.0.17.125:8080 check weight 10 inter 8420 fall 2 rise 1
+ server 127.0.17.126 127.0.17.126:8080 check weight 10 inter 9610 fall 2 rise 1
+ server 127.0.17.127 127.0.17.127:8080 check weight 10 inter 2979 fall 2 rise 1
+ server 127.0.17.128 127.0.17.128:8080 check weight 10 inter 3423 fall 2 rise 1
+ server 127.0.17.129 127.0.17.129:8080 check weight 10 inter 5939 fall 2 rise 1
+ server 127.0.17.130 127.0.17.130:8080 check weight 10 inter 9858 fall 2 rise 1
+ server 127.0.17.131 127.0.17.131:8080 check weight 10 inter 3489 fall 2 rise 1
+ server 127.0.17.132 127.0.17.132:8080 check weight 10 inter 8499 fall 2 rise 1
+ server 127.0.17.133 127.0.17.133:8080 check weight 10 inter 7022 fall 2 rise 1
+ server 127.0.17.134 127.0.17.134:8080 check weight 10 inter 7869 fall 2 rise 1
+ server 127.0.17.135 127.0.17.135:8080 check weight 10 inter 1554 fall 2 rise 1
+ server 127.0.17.136 127.0.17.136:8080 check weight 10 inter 5832 fall 2 rise 1
+ server 127.0.17.137 127.0.17.137:8080 check weight 10 inter 5767 fall 2 rise 1
+ server 127.0.17.138 127.0.17.138:8080 check weight 10 inter 5131 fall 2 rise 1
+ server 127.0.17.139 127.0.17.139:8080 check weight 10 inter 814 fall 2 rise 1
+ server 127.0.17.140 127.0.17.140:8080 check weight 10 inter 91 fall 2 rise 1
+ server 127.0.17.141 127.0.17.141:8080 check weight 10 inter 5759 fall 2 rise 1
+ server 127.0.17.142 127.0.17.142:8080 check weight 10 inter 9120 fall 2 rise 1
+ server 127.0.17.143 127.0.17.143:8080 check weight 10 inter 3495 fall 2 rise 1
+ server 127.0.17.144 127.0.17.144:8080 check weight 10 inter 9657 fall 2 rise 1
+ server 127.0.17.145 127.0.17.145:8080 check weight 10 inter 9249 fall 2 rise 1
+ server 127.0.17.146 127.0.17.146:8080 check weight 10 inter 8839 fall 2 rise 1
+ server 127.0.17.147 127.0.17.147:8080 check weight 10 inter 8509 fall 2 rise 1
+ server 127.0.17.148 127.0.17.148:8080 check weight 10 inter 7705 fall 2 rise 1
+ server 127.0.17.149 127.0.17.149:8080 check weight 10 inter 9790 fall 2 rise 1
+ server 127.0.17.150 127.0.17.150:8080 check weight 10 inter 1098 fall 2 rise 1
+ server 127.0.17.151 127.0.17.151:8080 check weight 10 inter 3328 fall 2 rise 1
+ server 127.0.17.152 127.0.17.152:8080 check weight 10 inter 9254 fall 2 rise 1
+ server 127.0.17.153 127.0.17.153:8080 check weight 10 inter 3260 fall 2 rise 1
+ server 127.0.17.154 127.0.17.154:8080 check weight 10 inter 1389 fall 2 rise 1
+ server 127.0.17.155 127.0.17.155:8080 check weight 10 inter 7475 fall 2 rise 1
+ server 127.0.17.156 127.0.17.156:8080 check weight 10 inter 1630 fall 2 rise 1
+ server 127.0.17.157 127.0.17.157:8080 check weight 10 inter 6158 fall 2 rise 1
+ server 127.0.17.158 127.0.17.158:8080 check weight 10 inter 1102 fall 2 rise 1
+ server 127.0.17.159 127.0.17.159:8080 check weight 10 inter 1701 fall 2 rise 1
+ server 127.0.17.160 127.0.17.160:8080 check weight 10 inter 7772 fall 2 rise 1
+ server 127.0.17.161 127.0.17.161:8080 check weight 10 inter 2449 fall 2 rise 1
+ server 127.0.17.162 127.0.17.162:8080 check weight 10 inter 6253 fall 2 rise 1
+ server 127.0.17.163 127.0.17.163:8080 check weight 10 inter 3614 fall 2 rise 1
+ server 127.0.17.164 127.0.17.164:8080 check weight 10 inter 7061 fall 2 rise 1
+ server 127.0.17.165 127.0.17.165:8080 check weight 10 inter 5980 fall 2 rise 1
+ server 127.0.17.166 127.0.17.166:8080 check weight 10 inter 4747 fall 2 rise 1
+ server 127.0.17.167 127.0.17.167:8080 check weight 10 inter 6444 fall 2 rise 1
+ server 127.0.17.168 127.0.17.168:8080 check weight 10 inter 7396 fall 2 rise 1
+ server 127.0.17.169 127.0.17.169:8080 check weight 10 inter 4805 fall 2 rise 1
+ server 127.0.17.170 127.0.17.170:8080 check weight 10 inter 6276 fall 2 rise 1
+ server 127.0.17.171 127.0.17.171:8080 check weight 10 inter 796 fall 2 rise 1
+ server 127.0.17.172 127.0.17.172:8080 check weight 10 inter 5063 fall 2 rise 1
+ server 127.0.17.173 127.0.17.173:8080 check weight 10 inter 6163 fall 2 rise 1
+ server 127.0.17.174 127.0.17.174:8080 check weight 10 inter 4907 fall 2 rise 1
+ server 127.0.17.175 127.0.17.175:8080 check weight 10 inter 6983 fall 2 rise 1
+ server 127.0.17.176 127.0.17.176:8080 check weight 10 inter 4870 fall 2 rise 1
+ server 127.0.17.177 127.0.17.177:8080 check weight 10 inter 583 fall 2 rise 1
+ server 127.0.17.178 127.0.17.178:8080 check weight 10 inter 8202 fall 2 rise 1
+ server 127.0.17.179 127.0.17.179:8080 check weight 10 inter 2881 fall 2 rise 1
+ server 127.0.17.180 127.0.17.180:8080 check weight 10 inter 502 fall 2 rise 1
+ server 127.0.17.181 127.0.17.181:8080 check weight 10 inter 1706 fall 2 rise 1
+ server 127.0.17.182 127.0.17.182:8080 check weight 10 inter 3351 fall 2 rise 1
+ server 127.0.17.183 127.0.17.183:8080 check weight 10 inter 5507 fall 2 rise 1
+ server 127.0.17.184 127.0.17.184:8080 check weight 10 inter 5234 fall 2 rise 1
+ server 127.0.17.185 127.0.17.185:8080 check weight 10 inter 9685 fall 2 rise 1
+ server 127.0.17.186 127.0.17.186:8080 check weight 10 inter 5253 fall 2 rise 1
+ server 127.0.17.187 127.0.17.187:8080 check weight 10 inter 5107 fall 2 rise 1
+ server 127.0.17.188 127.0.17.188:8080 check weight 10 inter 7588 fall 2 rise 1
+ server 127.0.17.189 127.0.17.189:8080 check weight 10 inter 914 fall 2 rise 1
+ server 127.0.17.190 127.0.17.190:8080 check weight 10 inter 2111 fall 2 rise 1
+ server 127.0.17.191 127.0.17.191:8080 check weight 10 inter 425 fall 2 rise 1
+ server 127.0.17.192 127.0.17.192:8080 check weight 10 inter 9159 fall 2 rise 1
+ server 127.0.17.193 127.0.17.193:8080 check weight 10 inter 8550 fall 2 rise 1
+ server 127.0.17.194 127.0.17.194:8080 check weight 10 inter 69 fall 2 rise 1
+ server 127.0.17.195 127.0.17.195:8080 check weight 10 inter 9617 fall 2 rise 1
+ server 127.0.17.196 127.0.17.196:8080 check weight 10 inter 236 fall 2 rise 1
+ server 127.0.17.197 127.0.17.197:8080 check weight 10 inter 7819 fall 2 rise 1
+ server 127.0.17.198 127.0.17.198:8080 check weight 10 inter 8004 fall 2 rise 1
+ server 127.0.17.199 127.0.17.199:8080 check weight 10 inter 6574 fall 2 rise 1
+ server 127.0.17.200 127.0.17.200:8080 check weight 10 inter 4030 fall 2 rise 1
+ server 127.0.17.201 127.0.17.201:8080 check weight 10 inter 3183 fall 2 rise 1
+ server 127.0.17.202 127.0.17.202:8080 check weight 10 inter 9095 fall 2 rise 1
+ server 127.0.17.203 127.0.17.203:8080 check weight 10 inter 7573 fall 2 rise 1
+ server 127.0.17.204 127.0.17.204:8080 check weight 10 inter 1558 fall 2 rise 1
+ server 127.0.17.205 127.0.17.205:8080 check weight 10 inter 6430 fall 2 rise 1
+ server 127.0.17.206 127.0.17.206:8080 check weight 10 inter 4112 fall 2 rise 1
+ server 127.0.17.207 127.0.17.207:8080 check weight 10 inter 1557 fall 2 rise 1
+ server 127.0.17.208 127.0.17.208:8080 check weight 10 inter 317 fall 2 rise 1
+ server 127.0.17.209 127.0.17.209:8080 check weight 10 inter 7856 fall 2 rise 1
+ server 127.0.17.210 127.0.17.210:8080 check weight 10 inter 6260 fall 2 rise 1
+ server 127.0.17.211 127.0.17.211:8080 check weight 10 inter 8647 fall 2 rise 1
+ server 127.0.17.212 127.0.17.212:8080 check weight 10 inter 3269 fall 2 rise 1
+ server 127.0.17.213 127.0.17.213:8080 check weight 10 inter 9426 fall 2 rise 1
+ server 127.0.17.214 127.0.17.214:8080 check weight 10 inter 5034 fall 2 rise 1
+ server 127.0.17.215 127.0.17.215:8080 check weight 10 inter 2218 fall 2 rise 1
+ server 127.0.17.216 127.0.17.216:8080 check weight 10 inter 4470 fall 2 rise 1
+ server 127.0.17.217 127.0.17.217:8080 check weight 10 inter 6101 fall 2 rise 1
+ server 127.0.17.218 127.0.17.218:8080 check weight 10 inter 1938 fall 2 rise 1
+ server 127.0.17.219 127.0.17.219:8080 check weight 10 inter 1099 fall 2 rise 1
+ server 127.0.17.220 127.0.17.220:8080 check weight 10 inter 1450 fall 2 rise 1
+ server 127.0.17.221 127.0.17.221:8080 check weight 10 inter 8057 fall 2 rise 1
+ server 127.0.17.222 127.0.17.222:8080 check weight 10 inter 181 fall 2 rise 1
+ server 127.0.17.223 127.0.17.223:8080 check weight 10 inter 1719 fall 2 rise 1
+ server 127.0.17.224 127.0.17.224:8080 check weight 10 inter 9864 fall 2 rise 1
+ server 127.0.17.225 127.0.17.225:8080 check weight 10 inter 3368 fall 2 rise 1
+ server 127.0.17.226 127.0.17.226:8080 check weight 10 inter 2272 fall 2 rise 1
+ server 127.0.17.227 127.0.17.227:8080 check weight 10 inter 1376 fall 2 rise 1
+ server 127.0.17.228 127.0.17.228:8080 check weight 10 inter 2225 fall 2 rise 1
+ server 127.0.17.229 127.0.17.229:8080 check weight 10 inter 9874 fall 2 rise 1
+ server 127.0.17.230 127.0.17.230:8080 check weight 10 inter 3600 fall 2 rise 1
+ server 127.0.17.231 127.0.17.231:8080 check weight 10 inter 9968 fall 2 rise 1
+ server 127.0.17.232 127.0.17.232:8080 check weight 10 inter 9803 fall 2 rise 1
+ server 127.0.17.233 127.0.17.233:8080 check weight 10 inter 1516 fall 2 rise 1
+ server 127.0.17.234 127.0.17.234:8080 check weight 10 inter 3340 fall 2 rise 1
+ server 127.0.17.235 127.0.17.235:8080 check weight 10 inter 465 fall 2 rise 1
+ server 127.0.17.236 127.0.17.236:8080 check weight 10 inter 8705 fall 2 rise 1
+ server 127.0.17.237 127.0.17.237:8080 check weight 10 inter 9367 fall 2 rise 1
+ server 127.0.17.238 127.0.17.238:8080 check weight 10 inter 2917 fall 2 rise 1
+ server 127.0.17.239 127.0.17.239:8080 check weight 10 inter 1433 fall 2 rise 1
+ server 127.0.17.240 127.0.17.240:8080 check weight 10 inter 903 fall 2 rise 1
+ server 127.0.17.241 127.0.17.241:8080 check weight 10 inter 9442 fall 2 rise 1
+ server 127.0.17.242 127.0.17.242:8080 check weight 10 inter 8895 fall 2 rise 1
+ server 127.0.17.243 127.0.17.243:8080 check weight 10 inter 8070 fall 2 rise 1
+ server 127.0.17.244 127.0.17.244:8080 check weight 10 inter 4638 fall 2 rise 1
+ server 127.0.17.245 127.0.17.245:8080 check weight 10 inter 8575 fall 2 rise 1
+ server 127.0.17.246 127.0.17.246:8080 check weight 10 inter 7132 fall 2 rise 1
+ server 127.0.17.247 127.0.17.247:8080 check weight 10 inter 2909 fall 2 rise 1
+ server 127.0.17.248 127.0.17.248:8080 check weight 10 inter 43 fall 2 rise 1
+ server 127.0.17.249 127.0.17.249:8080 check weight 10 inter 1525 fall 2 rise 1
+ server 127.0.17.250 127.0.17.250:8080 check weight 10 inter 8029 fall 2 rise 1
+ server 127.0.18.001 127.0.18.1:8080 check weight 10 inter 3538 fall 2 rise 1
+ server 127.0.18.002 127.0.18.2:8080 check weight 10 inter 5928 fall 2 rise 1
+ server 127.0.18.003 127.0.18.3:8080 check weight 10 inter 5067 fall 2 rise 1
+ server 127.0.18.004 127.0.18.4:8080 check weight 10 inter 4599 fall 2 rise 1
+ server 127.0.18.005 127.0.18.5:8080 check weight 10 inter 375 fall 2 rise 1
+ server 127.0.18.006 127.0.18.6:8080 check weight 10 inter 2952 fall 2 rise 1
+ server 127.0.18.007 127.0.18.7:8080 check weight 10 inter 64 fall 2 rise 1
+ server 127.0.18.008 127.0.18.8:8080 check weight 10 inter 3655 fall 2 rise 1
+ server 127.0.18.009 127.0.18.9:8080 check weight 10 inter 4613 fall 2 rise 1
+ server 127.0.18.010 127.0.18.10:8080 check weight 10 inter 7163 fall 2 rise 1
+ server 127.0.18.011 127.0.18.11:8080 check weight 10 inter 522 fall 2 rise 1
+ server 127.0.18.012 127.0.18.12:8080 check weight 10 inter 9238 fall 2 rise 1
+ server 127.0.18.013 127.0.18.13:8080 check weight 10 inter 7499 fall 2 rise 1
+ server 127.0.18.014 127.0.18.14:8080 check weight 10 inter 6805 fall 2 rise 1
+ server 127.0.18.015 127.0.18.15:8080 check weight 10 inter 6149 fall 2 rise 1
+ server 127.0.18.016 127.0.18.16:8080 check weight 10 inter 2427 fall 2 rise 1
+ server 127.0.18.017 127.0.18.17:8080 check weight 10 inter 1865 fall 2 rise 1
+ server 127.0.18.018 127.0.18.18:8080 check weight 10 inter 2799 fall 2 rise 1
+ server 127.0.18.019 127.0.18.19:8080 check weight 10 inter 7127 fall 2 rise 1
+ server 127.0.18.020 127.0.18.20:8080 check weight 10 inter 3715 fall 2 rise 1
+ server 127.0.18.021 127.0.18.21:8080 check weight 10 inter 1979 fall 2 rise 1
+ server 127.0.18.022 127.0.18.22:8080 check weight 10 inter 619 fall 2 rise 1
+ server 127.0.18.023 127.0.18.23:8080 check weight 10 inter 7749 fall 2 rise 1
+ server 127.0.18.024 127.0.18.24:8080 check weight 10 inter 2881 fall 2 rise 1
+ server 127.0.18.025 127.0.18.25:8080 check weight 10 inter 7558 fall 2 rise 1
+ server 127.0.18.026 127.0.18.26:8080 check weight 10 inter 3175 fall 2 rise 1
+ server 127.0.18.027 127.0.18.27:8080 check weight 10 inter 4120 fall 2 rise 1
+ server 127.0.18.028 127.0.18.28:8080 check weight 10 inter 4069 fall 2 rise 1
+ server 127.0.18.029 127.0.18.29:8080 check weight 10 inter 9253 fall 2 rise 1
+ server 127.0.18.030 127.0.18.30:8080 check weight 10 inter 5817 fall 2 rise 1
+ server 127.0.18.031 127.0.18.31:8080 check weight 10 inter 701 fall 2 rise 1
+ server 127.0.18.032 127.0.18.32:8080 check weight 10 inter 2702 fall 2 rise 1
+ server 127.0.18.033 127.0.18.33:8080 check weight 10 inter 1888 fall 2 rise 1
+ server 127.0.18.034 127.0.18.34:8080 check weight 10 inter 3668 fall 2 rise 1
+ server 127.0.18.035 127.0.18.35:8080 check weight 10 inter 1535 fall 2 rise 1
+ server 127.0.18.036 127.0.18.36:8080 check weight 10 inter 6399 fall 2 rise 1
+ server 127.0.18.037 127.0.18.37:8080 check weight 10 inter 5105 fall 2 rise 1
+ server 127.0.18.038 127.0.18.38:8080 check weight 10 inter 5038 fall 2 rise 1
+ server 127.0.18.039 127.0.18.39:8080 check weight 10 inter 4757 fall 2 rise 1
+ server 127.0.18.040 127.0.18.40:8080 check weight 10 inter 8674 fall 2 rise 1
+ server 127.0.18.041 127.0.18.41:8080 check weight 10 inter 8118 fall 2 rise 1
+ server 127.0.18.042 127.0.18.42:8080 check weight 10 inter 5282 fall 2 rise 1
+ server 127.0.18.043 127.0.18.43:8080 check weight 10 inter 9627 fall 2 rise 1
+ server 127.0.18.044 127.0.18.44:8080 check weight 10 inter 3296 fall 2 rise 1
+ server 127.0.18.045 127.0.18.45:8080 check weight 10 inter 565 fall 2 rise 1
+ server 127.0.18.046 127.0.18.46:8080 check weight 10 inter 3444 fall 2 rise 1
+ server 127.0.18.047 127.0.18.47:8080 check weight 10 inter 4690 fall 2 rise 1
+ server 127.0.18.048 127.0.18.48:8080 check weight 10 inter 4060 fall 2 rise 1
+ server 127.0.18.049 127.0.18.49:8080 check weight 10 inter 7236 fall 2 rise 1
+ server 127.0.18.050 127.0.18.50:8080 check weight 10 inter 72 fall 2 rise 1
+ server 127.0.18.051 127.0.18.51:8080 check weight 10 inter 7814 fall 2 rise 1
+ server 127.0.18.052 127.0.18.52:8080 check weight 10 inter 7874 fall 2 rise 1
+ server 127.0.18.053 127.0.18.53:8080 check weight 10 inter 5251 fall 2 rise 1
+ server 127.0.18.054 127.0.18.54:8080 check weight 10 inter 8705 fall 2 rise 1
+ server 127.0.18.055 127.0.18.55:8080 check weight 10 inter 915 fall 2 rise 1
+ server 127.0.18.056 127.0.18.56:8080 check weight 10 inter 2051 fall 2 rise 1
+ server 127.0.18.057 127.0.18.57:8080 check weight 10 inter 8288 fall 2 rise 1
+ server 127.0.18.058 127.0.18.58:8080 check weight 10 inter 1850 fall 2 rise 1
+ server 127.0.18.059 127.0.18.59:8080 check weight 10 inter 6983 fall 2 rise 1
+ server 127.0.18.060 127.0.18.60:8080 check weight 10 inter 6477 fall 2 rise 1
+ server 127.0.18.061 127.0.18.61:8080 check weight 10 inter 1304 fall 2 rise 1
+ server 127.0.18.062 127.0.18.62:8080 check weight 10 inter 1974 fall 2 rise 1
+ server 127.0.18.063 127.0.18.63:8080 check weight 10 inter 1333 fall 2 rise 1
+ server 127.0.18.064 127.0.18.64:8080 check weight 10 inter 7725 fall 2 rise 1
+ server 127.0.18.065 127.0.18.65:8080 check weight 10 inter 4846 fall 2 rise 1
+ server 127.0.18.066 127.0.18.66:8080 check weight 10 inter 4489 fall 2 rise 1
+ server 127.0.18.067 127.0.18.67:8080 check weight 10 inter 3007 fall 2 rise 1
+ server 127.0.18.068 127.0.18.68:8080 check weight 10 inter 52 fall 2 rise 1
+ server 127.0.18.069 127.0.18.69:8080 check weight 10 inter 6845 fall 2 rise 1
+ server 127.0.18.070 127.0.18.70:8080 check weight 10 inter 7987 fall 2 rise 1
+ server 127.0.18.071 127.0.18.71:8080 check weight 10 inter 7600 fall 2 rise 1
+ server 127.0.18.072 127.0.18.72:8080 check weight 10 inter 1610 fall 2 rise 1
+ server 127.0.18.073 127.0.18.73:8080 check weight 10 inter 3757 fall 2 rise 1
+ server 127.0.18.074 127.0.18.74:8080 check weight 10 inter 2287 fall 2 rise 1
+ server 127.0.18.075 127.0.18.75:8080 check weight 10 inter 8093 fall 2 rise 1
+ server 127.0.18.076 127.0.18.76:8080 check weight 10 inter 4196 fall 2 rise 1
+ server 127.0.18.077 127.0.18.77:8080 check weight 10 inter 4488 fall 2 rise 1
+ server 127.0.18.078 127.0.18.78:8080 check weight 10 inter 8911 fall 2 rise 1
+ server 127.0.18.079 127.0.18.79:8080 check weight 10 inter 8778 fall 2 rise 1
+ server 127.0.18.080 127.0.18.80:8080 check weight 10 inter 4071 fall 2 rise 1
+ server 127.0.18.081 127.0.18.81:8080 check weight 10 inter 5114 fall 2 rise 1
+ server 127.0.18.082 127.0.18.82:8080 check weight 10 inter 8389 fall 2 rise 1
+ server 127.0.18.083 127.0.18.83:8080 check weight 10 inter 572 fall 2 rise 1
+ server 127.0.18.084 127.0.18.84:8080 check weight 10 inter 8506 fall 2 rise 1
+ server 127.0.18.085 127.0.18.85:8080 check weight 10 inter 5530 fall 2 rise 1
+ server 127.0.18.086 127.0.18.86:8080 check weight 10 inter 2418 fall 2 rise 1
+ server 127.0.18.087 127.0.18.87:8080 check weight 10 inter 8338 fall 2 rise 1
+ server 127.0.18.088 127.0.18.88:8080 check weight 10 inter 1133 fall 2 rise 1
+ server 127.0.18.089 127.0.18.89:8080 check weight 10 inter 42 fall 2 rise 1
+ server 127.0.18.090 127.0.18.90:8080 check weight 10 inter 3567 fall 2 rise 1
+ server 127.0.18.091 127.0.18.91:8080 check weight 10 inter 9121 fall 2 rise 1
+ server 127.0.18.092 127.0.18.92:8080 check weight 10 inter 9500 fall 2 rise 1
+ server 127.0.18.093 127.0.18.93:8080 check weight 10 inter 4112 fall 2 rise 1
+ server 127.0.18.094 127.0.18.94:8080 check weight 10 inter 3949 fall 2 rise 1
+ server 127.0.18.095 127.0.18.95:8080 check weight 10 inter 692 fall 2 rise 1
+ server 127.0.18.096 127.0.18.96:8080 check weight 10 inter 5761 fall 2 rise 1
+ server 127.0.18.097 127.0.18.97:8080 check weight 10 inter 8018 fall 2 rise 1
+ server 127.0.18.098 127.0.18.98:8080 check weight 10 inter 7068 fall 2 rise 1
+ server 127.0.18.099 127.0.18.99:8080 check weight 10 inter 1600 fall 2 rise 1
+ server 127.0.18.100 127.0.18.100:8080 check weight 10 inter 7309 fall 2 rise 1
+ server 127.0.18.101 127.0.18.101:8080 check weight 10 inter 452 fall 2 rise 1
+ server 127.0.18.102 127.0.18.102:8080 check weight 10 inter 9982 fall 2 rise 1
+ server 127.0.18.103 127.0.18.103:8080 check weight 10 inter 9 fall 2 rise 1
+ server 127.0.18.104 127.0.18.104:8080 check weight 10 inter 6565 fall 2 rise 1
+ server 127.0.18.105 127.0.18.105:8080 check weight 10 inter 3697 fall 2 rise 1
+ server 127.0.18.106 127.0.18.106:8080 check weight 10 inter 5513 fall 2 rise 1
+ server 127.0.18.107 127.0.18.107:8080 check weight 10 inter 7 fall 2 rise 1
+ server 127.0.18.108 127.0.18.108:8080 check weight 10 inter 3804 fall 2 rise 1
+ server 127.0.18.109 127.0.18.109:8080 check weight 10 inter 5739 fall 2 rise 1
+ server 127.0.18.110 127.0.18.110:8080 check weight 10 inter 7646 fall 2 rise 1
+ server 127.0.18.111 127.0.18.111:8080 check weight 10 inter 5782 fall 2 rise 1
+ server 127.0.18.112 127.0.18.112:8080 check weight 10 inter 6420 fall 2 rise 1
+ server 127.0.18.113 127.0.18.113:8080 check weight 10 inter 4339 fall 2 rise 1
+ server 127.0.18.114 127.0.18.114:8080 check weight 10 inter 8318 fall 2 rise 1
+ server 127.0.18.115 127.0.18.115:8080 check weight 10 inter 6032 fall 2 rise 1
+ server 127.0.18.116 127.0.18.116:8080 check weight 10 inter 7699 fall 2 rise 1
+ server 127.0.18.117 127.0.18.117:8080 check weight 10 inter 3783 fall 2 rise 1
+ server 127.0.18.118 127.0.18.118:8080 check weight 10 inter 869 fall 2 rise 1
+ server 127.0.18.119 127.0.18.119:8080 check weight 10 inter 3450 fall 2 rise 1
+ server 127.0.18.120 127.0.18.120:8080 check weight 10 inter 1911 fall 2 rise 1
+ server 127.0.18.121 127.0.18.121:8080 check weight 10 inter 142 fall 2 rise 1
+ server 127.0.18.122 127.0.18.122:8080 check weight 10 inter 2861 fall 2 rise 1
+ server 127.0.18.123 127.0.18.123:8080 check weight 10 inter 1217 fall 2 rise 1
+ server 127.0.18.124 127.0.18.124:8080 check weight 10 inter 7258 fall 2 rise 1
+ server 127.0.18.125 127.0.18.125:8080 check weight 10 inter 9412 fall 2 rise 1
+ server 127.0.18.126 127.0.18.126:8080 check weight 10 inter 3491 fall 2 rise 1
+ server 127.0.18.127 127.0.18.127:8080 check weight 10 inter 4547 fall 2 rise 1
+ server 127.0.18.128 127.0.18.128:8080 check weight 10 inter 9300 fall 2 rise 1
+ server 127.0.18.129 127.0.18.129:8080 check weight 10 inter 9014 fall 2 rise 1
+ server 127.0.18.130 127.0.18.130:8080 check weight 10 inter 5560 fall 2 rise 1
+ server 127.0.18.131 127.0.18.131:8080 check weight 10 inter 4317 fall 2 rise 1
+ server 127.0.18.132 127.0.18.132:8080 check weight 10 inter 7828 fall 2 rise 1
+ server 127.0.18.133 127.0.18.133:8080 check weight 10 inter 237 fall 2 rise 1
+ server 127.0.18.134 127.0.18.134:8080 check weight 10 inter 2759 fall 2 rise 1
+ server 127.0.18.135 127.0.18.135:8080 check weight 10 inter 4664 fall 2 rise 1
+ server 127.0.18.136 127.0.18.136:8080 check weight 10 inter 1308 fall 2 rise 1
+ server 127.0.18.137 127.0.18.137:8080 check weight 10 inter 5947 fall 2 rise 1
+ server 127.0.18.138 127.0.18.138:8080 check weight 10 inter 2233 fall 2 rise 1
+ server 127.0.18.139 127.0.18.139:8080 check weight 10 inter 1570 fall 2 rise 1
+ server 127.0.18.140 127.0.18.140:8080 check weight 10 inter 190 fall 2 rise 1
+ server 127.0.18.141 127.0.18.141:8080 check weight 10 inter 8550 fall 2 rise 1
+ server 127.0.18.142 127.0.18.142:8080 check weight 10 inter 410 fall 2 rise 1
+ server 127.0.18.143 127.0.18.143:8080 check weight 10 inter 7102 fall 2 rise 1
+ server 127.0.18.144 127.0.18.144:8080 check weight 10 inter 4890 fall 2 rise 1
+ server 127.0.18.145 127.0.18.145:8080 check weight 10 inter 9591 fall 2 rise 1
+ server 127.0.18.146 127.0.18.146:8080 check weight 10 inter 9289 fall 2 rise 1
+ server 127.0.18.147 127.0.18.147:8080 check weight 10 inter 3099 fall 2 rise 1
+ server 127.0.18.148 127.0.18.148:8080 check weight 10 inter 6517 fall 2 rise 1
+ server 127.0.18.149 127.0.18.149:8080 check weight 10 inter 4898 fall 2 rise 1
+ server 127.0.18.150 127.0.18.150:8080 check weight 10 inter 2883 fall 2 rise 1
+ server 127.0.18.151 127.0.18.151:8080 check weight 10 inter 5413 fall 2 rise 1
+ server 127.0.18.152 127.0.18.152:8080 check weight 10 inter 1529 fall 2 rise 1
+ server 127.0.18.153 127.0.18.153:8080 check weight 10 inter 7283 fall 2 rise 1
+ server 127.0.18.154 127.0.18.154:8080 check weight 10 inter 6972 fall 2 rise 1
+ server 127.0.18.155 127.0.18.155:8080 check weight 10 inter 525 fall 2 rise 1
+ server 127.0.18.156 127.0.18.156:8080 check weight 10 inter 7311 fall 2 rise 1
+ server 127.0.18.157 127.0.18.157:8080 check weight 10 inter 5628 fall 2 rise 1
+ server 127.0.18.158 127.0.18.158:8080 check weight 10 inter 370 fall 2 rise 1
+ server 127.0.18.159 127.0.18.159:8080 check weight 10 inter 7161 fall 2 rise 1
+ server 127.0.18.160 127.0.18.160:8080 check weight 10 inter 653 fall 2 rise 1
+ server 127.0.18.161 127.0.18.161:8080 check weight 10 inter 4049 fall 2 rise 1
+ server 127.0.18.162 127.0.18.162:8080 check weight 10 inter 2978 fall 2 rise 1
+ server 127.0.18.163 127.0.18.163:8080 check weight 10 inter 8572 fall 2 rise 1
+ server 127.0.18.164 127.0.18.164:8080 check weight 10 inter 8543 fall 2 rise 1
+ server 127.0.18.165 127.0.18.165:8080 check weight 10 inter 7200 fall 2 rise 1
+ server 127.0.18.166 127.0.18.166:8080 check weight 10 inter 3303 fall 2 rise 1
+ server 127.0.18.167 127.0.18.167:8080 check weight 10 inter 9972 fall 2 rise 1
+ server 127.0.18.168 127.0.18.168:8080 check weight 10 inter 5127 fall 2 rise 1
+ server 127.0.18.169 127.0.18.169:8080 check weight 10 inter 6049 fall 2 rise 1
+ server 127.0.18.170 127.0.18.170:8080 check weight 10 inter 4118 fall 2 rise 1
+ server 127.0.18.171 127.0.18.171:8080 check weight 10 inter 6630 fall 2 rise 1
+ server 127.0.18.172 127.0.18.172:8080 check weight 10 inter 1476 fall 2 rise 1
+ server 127.0.18.173 127.0.18.173:8080 check weight 10 inter 5538 fall 2 rise 1
+ server 127.0.18.174 127.0.18.174:8080 check weight 10 inter 4283 fall 2 rise 1
+ server 127.0.18.175 127.0.18.175:8080 check weight 10 inter 9865 fall 2 rise 1
+ server 127.0.18.176 127.0.18.176:8080 check weight 10 inter 3091 fall 2 rise 1
+ server 127.0.18.177 127.0.18.177:8080 check weight 10 inter 5855 fall 2 rise 1
+ server 127.0.18.178 127.0.18.178:8080 check weight 10 inter 6209 fall 2 rise 1
+ server 127.0.18.179 127.0.18.179:8080 check weight 10 inter 2164 fall 2 rise 1
+ server 127.0.18.180 127.0.18.180:8080 check weight 10 inter 9464 fall 2 rise 1
+ server 127.0.18.181 127.0.18.181:8080 check weight 10 inter 5987 fall 2 rise 1
+ server 127.0.18.182 127.0.18.182:8080 check weight 10 inter 1907 fall 2 rise 1
+ server 127.0.18.183 127.0.18.183:8080 check weight 10 inter 4649 fall 2 rise 1
+ server 127.0.18.184 127.0.18.184:8080 check weight 10 inter 8027 fall 2 rise 1
+ server 127.0.18.185 127.0.18.185:8080 check weight 10 inter 9393 fall 2 rise 1
+ server 127.0.18.186 127.0.18.186:8080 check weight 10 inter 4773 fall 2 rise 1
+ server 127.0.18.187 127.0.18.187:8080 check weight 10 inter 1950 fall 2 rise 1
+ server 127.0.18.188 127.0.18.188:8080 check weight 10 inter 1731 fall 2 rise 1
+ server 127.0.18.189 127.0.18.189:8080 check weight 10 inter 5105 fall 2 rise 1
+ server 127.0.18.190 127.0.18.190:8080 check weight 10 inter 674 fall 2 rise 1
+ server 127.0.18.191 127.0.18.191:8080 check weight 10 inter 1296 fall 2 rise 1
+ server 127.0.18.192 127.0.18.192:8080 check weight 10 inter 260 fall 2 rise 1
+ server 127.0.18.193 127.0.18.193:8080 check weight 10 inter 2411 fall 2 rise 1
+ server 127.0.18.194 127.0.18.194:8080 check weight 10 inter 7812 fall 2 rise 1
+ server 127.0.18.195 127.0.18.195:8080 check weight 10 inter 3894 fall 2 rise 1
+ server 127.0.18.196 127.0.18.196:8080 check weight 10 inter 2599 fall 2 rise 1
+ server 127.0.18.197 127.0.18.197:8080 check weight 10 inter 9029 fall 2 rise 1
+ server 127.0.18.198 127.0.18.198:8080 check weight 10 inter 4917 fall 2 rise 1
+ server 127.0.18.199 127.0.18.199:8080 check weight 10 inter 5606 fall 2 rise 1
+ server 127.0.18.200 127.0.18.200:8080 check weight 10 inter 3741 fall 2 rise 1
+ server 127.0.18.201 127.0.18.201:8080 check weight 10 inter 925 fall 2 rise 1
+ server 127.0.18.202 127.0.18.202:8080 check weight 10 inter 9 fall 2 rise 1
+ server 127.0.18.203 127.0.18.203:8080 check weight 10 inter 3914 fall 2 rise 1
+ server 127.0.18.204 127.0.18.204:8080 check weight 10 inter 8148 fall 2 rise 1
+ server 127.0.18.205 127.0.18.205:8080 check weight 10 inter 7077 fall 2 rise 1
+ server 127.0.18.206 127.0.18.206:8080 check weight 10 inter 1591 fall 2 rise 1
+ server 127.0.18.207 127.0.18.207:8080 check weight 10 inter 5232 fall 2 rise 1
+ server 127.0.18.208 127.0.18.208:8080 check weight 10 inter 1062 fall 2 rise 1
+ server 127.0.18.209 127.0.18.209:8080 check weight 10 inter 3954 fall 2 rise 1
+ server 127.0.18.210 127.0.18.210:8080 check weight 10 inter 2843 fall 2 rise 1
+ server 127.0.18.211 127.0.18.211:8080 check weight 10 inter 4661 fall 2 rise 1
+ server 127.0.18.212 127.0.18.212:8080 check weight 10 inter 1092 fall 2 rise 1
+ server 127.0.18.213 127.0.18.213:8080 check weight 10 inter 850 fall 2 rise 1
+ server 127.0.18.214 127.0.18.214:8080 check weight 10 inter 8334 fall 2 rise 1
+ server 127.0.18.215 127.0.18.215:8080 check weight 10 inter 5615 fall 2 rise 1
+ server 127.0.18.216 127.0.18.216:8080 check weight 10 inter 6501 fall 2 rise 1
+ server 127.0.18.217 127.0.18.217:8080 check weight 10 inter 1312 fall 2 rise 1
+ server 127.0.18.218 127.0.18.218:8080 check weight 10 inter 5072 fall 2 rise 1
+ server 127.0.18.219 127.0.18.219:8080 check weight 10 inter 5276 fall 2 rise 1
+ server 127.0.18.220 127.0.18.220:8080 check weight 10 inter 8798 fall 2 rise 1
+ server 127.0.18.221 127.0.18.221:8080 check weight 10 inter 7946 fall 2 rise 1
+ server 127.0.18.222 127.0.18.222:8080 check weight 10 inter 5738 fall 2 rise 1
+ server 127.0.18.223 127.0.18.223:8080 check weight 10 inter 3742 fall 2 rise 1
+ server 127.0.18.224 127.0.18.224:8080 check weight 10 inter 6688 fall 2 rise 1
+ server 127.0.18.225 127.0.18.225:8080 check weight 10 inter 3693 fall 2 rise 1
+ server 127.0.18.226 127.0.18.226:8080 check weight 10 inter 7414 fall 2 rise 1
+ server 127.0.18.227 127.0.18.227:8080 check weight 10 inter 227 fall 2 rise 1
+ server 127.0.18.228 127.0.18.228:8080 check weight 10 inter 2117 fall 2 rise 1
+ server 127.0.18.229 127.0.18.229:8080 check weight 10 inter 8405 fall 2 rise 1
+ server 127.0.18.230 127.0.18.230:8080 check weight 10 inter 4458 fall 2 rise 1
+ server 127.0.18.231 127.0.18.231:8080 check weight 10 inter 8038 fall 2 rise 1
+ server 127.0.18.232 127.0.18.232:8080 check weight 10 inter 7160 fall 2 rise 1
+ server 127.0.18.233 127.0.18.233:8080 check weight 10 inter 2262 fall 2 rise 1
+ server 127.0.18.234 127.0.18.234:8080 check weight 10 inter 8681 fall 2 rise 1
+ server 127.0.18.235 127.0.18.235:8080 check weight 10 inter 4263 fall 2 rise 1
+ server 127.0.18.236 127.0.18.236:8080 check weight 10 inter 6279 fall 2 rise 1
+ server 127.0.18.237 127.0.18.237:8080 check weight 10 inter 4697 fall 2 rise 1
+ server 127.0.18.238 127.0.18.238:8080 check weight 10 inter 5449 fall 2 rise 1
+ server 127.0.18.239 127.0.18.239:8080 check weight 10 inter 5627 fall 2 rise 1
+ server 127.0.18.240 127.0.18.240:8080 check weight 10 inter 7594 fall 2 rise 1
+ server 127.0.18.241 127.0.18.241:8080 check weight 10 inter 2247 fall 2 rise 1
+ server 127.0.18.242 127.0.18.242:8080 check weight 10 inter 2898 fall 2 rise 1
+ server 127.0.18.243 127.0.18.243:8080 check weight 10 inter 739 fall 2 rise 1
+ server 127.0.18.244 127.0.18.244:8080 check weight 10 inter 1089 fall 2 rise 1
+ server 127.0.18.245 127.0.18.245:8080 check weight 10 inter 9976 fall 2 rise 1
+ server 127.0.18.246 127.0.18.246:8080 check weight 10 inter 7179 fall 2 rise 1
+ server 127.0.18.247 127.0.18.247:8080 check weight 10 inter 5726 fall 2 rise 1
+ server 127.0.18.248 127.0.18.248:8080 check weight 10 inter 7407 fall 2 rise 1
+ server 127.0.18.249 127.0.18.249:8080 check weight 10 inter 9881 fall 2 rise 1
+ server 127.0.18.250 127.0.18.250:8080 check weight 10 inter 8308 fall 2 rise 1
+ server 127.0.19.001 127.0.19.1:8080 check weight 10 inter 702 fall 2 rise 1
+ server 127.0.19.002 127.0.19.2:8080 check weight 10 inter 5768 fall 2 rise 1
+ server 127.0.19.003 127.0.19.3:8080 check weight 10 inter 2638 fall 2 rise 1
+ server 127.0.19.004 127.0.19.4:8080 check weight 10 inter 3218 fall 2 rise 1
+ server 127.0.19.005 127.0.19.5:8080 check weight 10 inter 2555 fall 2 rise 1
+ server 127.0.19.006 127.0.19.6:8080 check weight 10 inter 5563 fall 2 rise 1
+ server 127.0.19.007 127.0.19.7:8080 check weight 10 inter 557 fall 2 rise 1
+ server 127.0.19.008 127.0.19.8:8080 check weight 10 inter 9071 fall 2 rise 1
+ server 127.0.19.009 127.0.19.9:8080 check weight 10 inter 1322 fall 2 rise 1
+ server 127.0.19.010 127.0.19.10:8080 check weight 10 inter 3727 fall 2 rise 1
+ server 127.0.19.011 127.0.19.11:8080 check weight 10 inter 3622 fall 2 rise 1
+ server 127.0.19.012 127.0.19.12:8080 check weight 10 inter 4030 fall 2 rise 1
+ server 127.0.19.013 127.0.19.13:8080 check weight 10 inter 6699 fall 2 rise 1
+ server 127.0.19.014 127.0.19.14:8080 check weight 10 inter 9055 fall 2 rise 1
+ server 127.0.19.015 127.0.19.15:8080 check weight 10 inter 356 fall 2 rise 1
+ server 127.0.19.016 127.0.19.16:8080 check weight 10 inter 7774 fall 2 rise 1
+ server 127.0.19.017 127.0.19.17:8080 check weight 10 inter 8933 fall 2 rise 1
+ server 127.0.19.018 127.0.19.18:8080 check weight 10 inter 9846 fall 2 rise 1
+ server 127.0.19.019 127.0.19.19:8080 check weight 10 inter 9381 fall 2 rise 1
+ server 127.0.19.020 127.0.19.20:8080 check weight 10 inter 2518 fall 2 rise 1
+ server 127.0.19.021 127.0.19.21:8080 check weight 10 inter 1969 fall 2 rise 1
+ server 127.0.19.022 127.0.19.22:8080 check weight 10 inter 7913 fall 2 rise 1
+ server 127.0.19.023 127.0.19.23:8080 check weight 10 inter 473 fall 2 rise 1
+ server 127.0.19.024 127.0.19.24:8080 check weight 10 inter 11 fall 2 rise 1
+ server 127.0.19.025 127.0.19.25:8080 check weight 10 inter 1593 fall 2 rise 1
+ server 127.0.19.026 127.0.19.26:8080 check weight 10 inter 4726 fall 2 rise 1
+ server 127.0.19.027 127.0.19.27:8080 check weight 10 inter 539 fall 2 rise 1
+ server 127.0.19.028 127.0.19.28:8080 check weight 10 inter 5938 fall 2 rise 1
+ server 127.0.19.029 127.0.19.29:8080 check weight 10 inter 1967 fall 2 rise 1
+ server 127.0.19.030 127.0.19.30:8080 check weight 10 inter 6560 fall 2 rise 1
+ server 127.0.19.031 127.0.19.31:8080 check weight 10 inter 3314 fall 2 rise 1
+ server 127.0.19.032 127.0.19.32:8080 check weight 10 inter 393 fall 2 rise 1
+ server 127.0.19.033 127.0.19.33:8080 check weight 10 inter 5005 fall 2 rise 1
+ server 127.0.19.034 127.0.19.34:8080 check weight 10 inter 4408 fall 2 rise 1
+ server 127.0.19.035 127.0.19.35:8080 check weight 10 inter 9895 fall 2 rise 1
+ server 127.0.19.036 127.0.19.36:8080 check weight 10 inter 1555 fall 2 rise 1
+ server 127.0.19.037 127.0.19.37:8080 check weight 10 inter 8752 fall 2 rise 1
+ server 127.0.19.038 127.0.19.38:8080 check weight 10 inter 635 fall 2 rise 1
+ server 127.0.19.039 127.0.19.39:8080 check weight 10 inter 1509 fall 2 rise 1
+ server 127.0.19.040 127.0.19.40:8080 check weight 10 inter 7239 fall 2 rise 1
+ server 127.0.19.041 127.0.19.41:8080 check weight 10 inter 9652 fall 2 rise 1
+ server 127.0.19.042 127.0.19.42:8080 check weight 10 inter 1966 fall 2 rise 1
+ server 127.0.19.043 127.0.19.43:8080 check weight 10 inter 3506 fall 2 rise 1
+ server 127.0.19.044 127.0.19.44:8080 check weight 10 inter 2055 fall 2 rise 1
+ server 127.0.19.045 127.0.19.45:8080 check weight 10 inter 5054 fall 2 rise 1
+ server 127.0.19.046 127.0.19.46:8080 check weight 10 inter 4618 fall 2 rise 1
+ server 127.0.19.047 127.0.19.47:8080 check weight 10 inter 3039 fall 2 rise 1
+ server 127.0.19.048 127.0.19.48:8080 check weight 10 inter 7633 fall 2 rise 1
+ server 127.0.19.049 127.0.19.49:8080 check weight 10 inter 1291 fall 2 rise 1
+ server 127.0.19.050 127.0.19.50:8080 check weight 10 inter 7335 fall 2 rise 1
+ server 127.0.19.051 127.0.19.51:8080 check weight 10 inter 6033 fall 2 rise 1
+ server 127.0.19.052 127.0.19.52:8080 check weight 10 inter 3016 fall 2 rise 1
+ server 127.0.19.053 127.0.19.53:8080 check weight 10 inter 908 fall 2 rise 1
+ server 127.0.19.054 127.0.19.54:8080 check weight 10 inter 4297 fall 2 rise 1
+ server 127.0.19.055 127.0.19.55:8080 check weight 10 inter 3133 fall 2 rise 1
+ server 127.0.19.056 127.0.19.56:8080 check weight 10 inter 3920 fall 2 rise 1
+ server 127.0.19.057 127.0.19.57:8080 check weight 10 inter 7133 fall 2 rise 1
+ server 127.0.19.058 127.0.19.58:8080 check weight 10 inter 9645 fall 2 rise 1
+ server 127.0.19.059 127.0.19.59:8080 check weight 10 inter 4709 fall 2 rise 1
+ server 127.0.19.060 127.0.19.60:8080 check weight 10 inter 1196 fall 2 rise 1
+ server 127.0.19.061 127.0.19.61:8080 check weight 10 inter 4282 fall 2 rise 1
+ server 127.0.19.062 127.0.19.62:8080 check weight 10 inter 2370 fall 2 rise 1
+ server 127.0.19.063 127.0.19.63:8080 check weight 10 inter 4597 fall 2 rise 1
+ server 127.0.19.064 127.0.19.64:8080 check weight 10 inter 567 fall 2 rise 1
+ server 127.0.19.065 127.0.19.65:8080 check weight 10 inter 5377 fall 2 rise 1
+ server 127.0.19.066 127.0.19.66:8080 check weight 10 inter 7160 fall 2 rise 1
+ server 127.0.19.067 127.0.19.67:8080 check weight 10 inter 4349 fall 2 rise 1
+ server 127.0.19.068 127.0.19.68:8080 check weight 10 inter 9972 fall 2 rise 1
+ server 127.0.19.069 127.0.19.69:8080 check weight 10 inter 2917 fall 2 rise 1
+ server 127.0.19.070 127.0.19.70:8080 check weight 10 inter 8250 fall 2 rise 1
+ server 127.0.19.071 127.0.19.71:8080 check weight 10 inter 2588 fall 2 rise 1
+ server 127.0.19.072 127.0.19.72:8080 check weight 10 inter 8007 fall 2 rise 1
+ server 127.0.19.073 127.0.19.73:8080 check weight 10 inter 4265 fall 2 rise 1
+ server 127.0.19.074 127.0.19.74:8080 check weight 10 inter 1418 fall 2 rise 1
+ server 127.0.19.075 127.0.19.75:8080 check weight 10 inter 8032 fall 2 rise 1
+ server 127.0.19.076 127.0.19.76:8080 check weight 10 inter 4975 fall 2 rise 1
+ server 127.0.19.077 127.0.19.77:8080 check weight 10 inter 5062 fall 2 rise 1
+ server 127.0.19.078 127.0.19.78:8080 check weight 10 inter 8240 fall 2 rise 1
+ server 127.0.19.079 127.0.19.79:8080 check weight 10 inter 6752 fall 2 rise 1
+ server 127.0.19.080 127.0.19.80:8080 check weight 10 inter 3002 fall 2 rise 1
+ server 127.0.19.081 127.0.19.81:8080 check weight 10 inter 9586 fall 2 rise 1
+ server 127.0.19.082 127.0.19.82:8080 check weight 10 inter 6211 fall 2 rise 1
+ server 127.0.19.083 127.0.19.83:8080 check weight 10 inter 7317 fall 2 rise 1
+ server 127.0.19.084 127.0.19.84:8080 check weight 10 inter 7943 fall 2 rise 1
+ server 127.0.19.085 127.0.19.85:8080 check weight 10 inter 1425 fall 2 rise 1
+ server 127.0.19.086 127.0.19.86:8080 check weight 10 inter 9924 fall 2 rise 1
+ server 127.0.19.087 127.0.19.87:8080 check weight 10 inter 572 fall 2 rise 1
+ server 127.0.19.088 127.0.19.88:8080 check weight 10 inter 2601 fall 2 rise 1
+ server 127.0.19.089 127.0.19.89:8080 check weight 10 inter 3848 fall 2 rise 1
+ server 127.0.19.090 127.0.19.90:8080 check weight 10 inter 6301 fall 2 rise 1
+ server 127.0.19.091 127.0.19.91:8080 check weight 10 inter 5805 fall 2 rise 1
+ server 127.0.19.092 127.0.19.92:8080 check weight 10 inter 2016 fall 2 rise 1
+ server 127.0.19.093 127.0.19.93:8080 check weight 10 inter 6646 fall 2 rise 1
+ server 127.0.19.094 127.0.19.94:8080 check weight 10 inter 1372 fall 2 rise 1
+ server 127.0.19.095 127.0.19.95:8080 check weight 10 inter 5321 fall 2 rise 1
+ server 127.0.19.096 127.0.19.96:8080 check weight 10 inter 1847 fall 2 rise 1
+ server 127.0.19.097 127.0.19.97:8080 check weight 10 inter 6658 fall 2 rise 1
+ server 127.0.19.098 127.0.19.98:8080 check weight 10 inter 974 fall 2 rise 1
+ server 127.0.19.099 127.0.19.99:8080 check weight 10 inter 8390 fall 2 rise 1
+ server 127.0.19.100 127.0.19.100:8080 check weight 10 inter 5710 fall 2 rise 1
+ server 127.0.19.101 127.0.19.101:8080 check weight 10 inter 8594 fall 2 rise 1
+ server 127.0.19.102 127.0.19.102:8080 check weight 10 inter 7342 fall 2 rise 1
+ server 127.0.19.103 127.0.19.103:8080 check weight 10 inter 2906 fall 2 rise 1
+ server 127.0.19.104 127.0.19.104:8080 check weight 10 inter 166 fall 2 rise 1
+ server 127.0.19.105 127.0.19.105:8080 check weight 10 inter 1856 fall 2 rise 1
+ server 127.0.19.106 127.0.19.106:8080 check weight 10 inter 7339 fall 2 rise 1
+ server 127.0.19.107 127.0.19.107:8080 check weight 10 inter 3864 fall 2 rise 1
+ server 127.0.19.108 127.0.19.108:8080 check weight 10 inter 5550 fall 2 rise 1
+ server 127.0.19.109 127.0.19.109:8080 check weight 10 inter 7181 fall 2 rise 1
+ server 127.0.19.110 127.0.19.110:8080 check weight 10 inter 3067 fall 2 rise 1
+ server 127.0.19.111 127.0.19.111:8080 check weight 10 inter 5469 fall 2 rise 1
+ server 127.0.19.112 127.0.19.112:8080 check weight 10 inter 5792 fall 2 rise 1
+ server 127.0.19.113 127.0.19.113:8080 check weight 10 inter 6946 fall 2 rise 1
+ server 127.0.19.114 127.0.19.114:8080 check weight 10 inter 7378 fall 2 rise 1
+ server 127.0.19.115 127.0.19.115:8080 check weight 10 inter 4282 fall 2 rise 1
+ server 127.0.19.116 127.0.19.116:8080 check weight 10 inter 9082 fall 2 rise 1
+ server 127.0.19.117 127.0.19.117:8080 check weight 10 inter 7163 fall 2 rise 1
+ server 127.0.19.118 127.0.19.118:8080 check weight 10 inter 2256 fall 2 rise 1
+ server 127.0.19.119 127.0.19.119:8080 check weight 10 inter 6207 fall 2 rise 1
+ server 127.0.19.120 127.0.19.120:8080 check weight 10 inter 5661 fall 2 rise 1
+ server 127.0.19.121 127.0.19.121:8080 check weight 10 inter 8642 fall 2 rise 1
+ server 127.0.19.122 127.0.19.122:8080 check weight 10 inter 3053 fall 2 rise 1
+ server 127.0.19.123 127.0.19.123:8080 check weight 10 inter 1068 fall 2 rise 1
+ server 127.0.19.124 127.0.19.124:8080 check weight 10 inter 9319 fall 2 rise 1
+ server 127.0.19.125 127.0.19.125:8080 check weight 10 inter 8765 fall 2 rise 1
+ server 127.0.19.126 127.0.19.126:8080 check weight 10 inter 7157 fall 2 rise 1
+ server 127.0.19.127 127.0.19.127:8080 check weight 10 inter 151 fall 2 rise 1
+ server 127.0.19.128 127.0.19.128:8080 check weight 10 inter 5041 fall 2 rise 1
+ server 127.0.19.129 127.0.19.129:8080 check weight 10 inter 5799 fall 2 rise 1
+ server 127.0.19.130 127.0.19.130:8080 check weight 10 inter 1780 fall 2 rise 1
+ server 127.0.19.131 127.0.19.131:8080 check weight 10 inter 7260 fall 2 rise 1
+ server 127.0.19.132 127.0.19.132:8080 check weight 10 inter 5096 fall 2 rise 1
+ server 127.0.19.133 127.0.19.133:8080 check weight 10 inter 6127 fall 2 rise 1
+ server 127.0.19.134 127.0.19.134:8080 check weight 10 inter 4976 fall 2 rise 1
+ server 127.0.19.135 127.0.19.135:8080 check weight 10 inter 2951 fall 2 rise 1
+ server 127.0.19.136 127.0.19.136:8080 check weight 10 inter 6908 fall 2 rise 1
+ server 127.0.19.137 127.0.19.137:8080 check weight 10 inter 4002 fall 2 rise 1
+ server 127.0.19.138 127.0.19.138:8080 check weight 10 inter 2265 fall 2 rise 1
+ server 127.0.19.139 127.0.19.139:8080 check weight 10 inter 7123 fall 2 rise 1
+ server 127.0.19.140 127.0.19.140:8080 check weight 10 inter 1787 fall 2 rise 1
+ server 127.0.19.141 127.0.19.141:8080 check weight 10 inter 8665 fall 2 rise 1
+ server 127.0.19.142 127.0.19.142:8080 check weight 10 inter 8674 fall 2 rise 1
+ server 127.0.19.143 127.0.19.143:8080 check weight 10 inter 6240 fall 2 rise 1
+ server 127.0.19.144 127.0.19.144:8080 check weight 10 inter 5083 fall 2 rise 1
+ server 127.0.19.145 127.0.19.145:8080 check weight 10 inter 6914 fall 2 rise 1
+ server 127.0.19.146 127.0.19.146:8080 check weight 10 inter 9020 fall 2 rise 1
+ server 127.0.19.147 127.0.19.147:8080 check weight 10 inter 7430 fall 2 rise 1
+ server 127.0.19.148 127.0.19.148:8080 check weight 10 inter 6233 fall 2 rise 1
+ server 127.0.19.149 127.0.19.149:8080 check weight 10 inter 4096 fall 2 rise 1
+ server 127.0.19.150 127.0.19.150:8080 check weight 10 inter 9654 fall 2 rise 1
+ server 127.0.19.151 127.0.19.151:8080 check weight 10 inter 6525 fall 2 rise 1
+ server 127.0.19.152 127.0.19.152:8080 check weight 10 inter 7108 fall 2 rise 1
+ server 127.0.19.153 127.0.19.153:8080 check weight 10 inter 7762 fall 2 rise 1
+ server 127.0.19.154 127.0.19.154:8080 check weight 10 inter 5543 fall 2 rise 1
+ server 127.0.19.155 127.0.19.155:8080 check weight 10 inter 8264 fall 2 rise 1
+ server 127.0.19.156 127.0.19.156:8080 check weight 10 inter 6504 fall 2 rise 1
+ server 127.0.19.157 127.0.19.157:8080 check weight 10 inter 9909 fall 2 rise 1
+ server 127.0.19.158 127.0.19.158:8080 check weight 10 inter 4215 fall 2 rise 1
+ server 127.0.19.159 127.0.19.159:8080 check weight 10 inter 1285 fall 2 rise 1
+ server 127.0.19.160 127.0.19.160:8080 check weight 10 inter 1964 fall 2 rise 1
+ server 127.0.19.161 127.0.19.161:8080 check weight 10 inter 1864 fall 2 rise 1
+ server 127.0.19.162 127.0.19.162:8080 check weight 10 inter 1712 fall 2 rise 1
+ server 127.0.19.163 127.0.19.163:8080 check weight 10 inter 9493 fall 2 rise 1
+ server 127.0.19.164 127.0.19.164:8080 check weight 10 inter 2475 fall 2 rise 1
+ server 127.0.19.165 127.0.19.165:8080 check weight 10 inter 1587 fall 2 rise 1
+ server 127.0.19.166 127.0.19.166:8080 check weight 10 inter 808 fall 2 rise 1
+ server 127.0.19.167 127.0.19.167:8080 check weight 10 inter 7980 fall 2 rise 1
+ server 127.0.19.168 127.0.19.168:8080 check weight 10 inter 8595 fall 2 rise 1
+ server 127.0.19.169 127.0.19.169:8080 check weight 10 inter 2695 fall 2 rise 1
+ server 127.0.19.170 127.0.19.170:8080 check weight 10 inter 9404 fall 2 rise 1
+ server 127.0.19.171 127.0.19.171:8080 check weight 10 inter 3383 fall 2 rise 1
+ server 127.0.19.172 127.0.19.172:8080 check weight 10 inter 782 fall 2 rise 1
+ server 127.0.19.173 127.0.19.173:8080 check weight 10 inter 2499 fall 2 rise 1
+ server 127.0.19.174 127.0.19.174:8080 check weight 10 inter 1692 fall 2 rise 1
+ server 127.0.19.175 127.0.19.175:8080 check weight 10 inter 4720 fall 2 rise 1
+ server 127.0.19.176 127.0.19.176:8080 check weight 10 inter 7124 fall 2 rise 1
+ server 127.0.19.177 127.0.19.177:8080 check weight 10 inter 2380 fall 2 rise 1
+ server 127.0.19.178 127.0.19.178:8080 check weight 10 inter 9993 fall 2 rise 1
+ server 127.0.19.179 127.0.19.179:8080 check weight 10 inter 2259 fall 2 rise 1
+ server 127.0.19.180 127.0.19.180:8080 check weight 10 inter 5517 fall 2 rise 1
+ server 127.0.19.181 127.0.19.181:8080 check weight 10 inter 6697 fall 2 rise 1
+ server 127.0.19.182 127.0.19.182:8080 check weight 10 inter 3070 fall 2 rise 1
+ server 127.0.19.183 127.0.19.183:8080 check weight 10 inter 1086 fall 2 rise 1
+ server 127.0.19.184 127.0.19.184:8080 check weight 10 inter 887 fall 2 rise 1
+ server 127.0.19.185 127.0.19.185:8080 check weight 10 inter 642 fall 2 rise 1
+ server 127.0.19.186 127.0.19.186:8080 check weight 10 inter 5617 fall 2 rise 1
+ server 127.0.19.187 127.0.19.187:8080 check weight 10 inter 779 fall 2 rise 1
+ server 127.0.19.188 127.0.19.188:8080 check weight 10 inter 6060 fall 2 rise 1
+ server 127.0.19.189 127.0.19.189:8080 check weight 10 inter 6774 fall 2 rise 1
+ server 127.0.19.190 127.0.19.190:8080 check weight 10 inter 9624 fall 2 rise 1
+ server 127.0.19.191 127.0.19.191:8080 check weight 10 inter 6765 fall 2 rise 1
+ server 127.0.19.192 127.0.19.192:8080 check weight 10 inter 6064 fall 2 rise 1
+ server 127.0.19.193 127.0.19.193:8080 check weight 10 inter 4335 fall 2 rise 1
+ server 127.0.19.194 127.0.19.194:8080 check weight 10 inter 1884 fall 2 rise 1
+ server 127.0.19.195 127.0.19.195:8080 check weight 10 inter 6475 fall 2 rise 1
+ server 127.0.19.196 127.0.19.196:8080 check weight 10 inter 206 fall 2 rise 1
+ server 127.0.19.197 127.0.19.197:8080 check weight 10 inter 9542 fall 2 rise 1
+ server 127.0.19.198 127.0.19.198:8080 check weight 10 inter 4319 fall 2 rise 1
+ server 127.0.19.199 127.0.19.199:8080 check weight 10 inter 9087 fall 2 rise 1
+ server 127.0.19.200 127.0.19.200:8080 check weight 10 inter 9984 fall 2 rise 1
+ server 127.0.19.201 127.0.19.201:8080 check weight 10 inter 6502 fall 2 rise 1
+ server 127.0.19.202 127.0.19.202:8080 check weight 10 inter 6476 fall 2 rise 1
+ server 127.0.19.203 127.0.19.203:8080 check weight 10 inter 4910 fall 2 rise 1
+ server 127.0.19.204 127.0.19.204:8080 check weight 10 inter 4880 fall 2 rise 1
+ server 127.0.19.205 127.0.19.205:8080 check weight 10 inter 162 fall 2 rise 1
+ server 127.0.19.206 127.0.19.206:8080 check weight 10 inter 4114 fall 2 rise 1
+ server 127.0.19.207 127.0.19.207:8080 check weight 10 inter 4101 fall 2 rise 1
+ server 127.0.19.208 127.0.19.208:8080 check weight 10 inter 9794 fall 2 rise 1
+ server 127.0.19.209 127.0.19.209:8080 check weight 10 inter 1109 fall 2 rise 1
+ server 127.0.19.210 127.0.19.210:8080 check weight 10 inter 6432 fall 2 rise 1
+ server 127.0.19.211 127.0.19.211:8080 check weight 10 inter 9880 fall 2 rise 1
+ server 127.0.19.212 127.0.19.212:8080 check weight 10 inter 3837 fall 2 rise 1
+ server 127.0.19.213 127.0.19.213:8080 check weight 10 inter 5166 fall 2 rise 1
+ server 127.0.19.214 127.0.19.214:8080 check weight 10 inter 8816 fall 2 rise 1
+ server 127.0.19.215 127.0.19.215:8080 check weight 10 inter 9754 fall 2 rise 1
+ server 127.0.19.216 127.0.19.216:8080 check weight 10 inter 5341 fall 2 rise 1
+ server 127.0.19.217 127.0.19.217:8080 check weight 10 inter 3287 fall 2 rise 1
+ server 127.0.19.218 127.0.19.218:8080 check weight 10 inter 4100 fall 2 rise 1
+ server 127.0.19.219 127.0.19.219:8080 check weight 10 inter 4220 fall 2 rise 1
+ server 127.0.19.220 127.0.19.220:8080 check weight 10 inter 5722 fall 2 rise 1
+ server 127.0.19.221 127.0.19.221:8080 check weight 10 inter 1161 fall 2 rise 1
+ server 127.0.19.222 127.0.19.222:8080 check weight 10 inter 7522 fall 2 rise 1
+ server 127.0.19.223 127.0.19.223:8080 check weight 10 inter 469 fall 2 rise 1
+ server 127.0.19.224 127.0.19.224:8080 check weight 10 inter 2422 fall 2 rise 1
+ server 127.0.19.225 127.0.19.225:8080 check weight 10 inter 7570 fall 2 rise 1
+ server 127.0.19.226 127.0.19.226:8080 check weight 10 inter 7918 fall 2 rise 1
+ server 127.0.19.227 127.0.19.227:8080 check weight 10 inter 117 fall 2 rise 1
+ server 127.0.19.228 127.0.19.228:8080 check weight 10 inter 6969 fall 2 rise 1
+ server 127.0.19.229 127.0.19.229:8080 check weight 10 inter 1362 fall 2 rise 1
+ server 127.0.19.230 127.0.19.230:8080 check weight 10 inter 3115 fall 2 rise 1
+ server 127.0.19.231 127.0.19.231:8080 check weight 10 inter 4906 fall 2 rise 1
+ server 127.0.19.232 127.0.19.232:8080 check weight 10 inter 237 fall 2 rise 1
+ server 127.0.19.233 127.0.19.233:8080 check weight 10 inter 4408 fall 2 rise 1
+ server 127.0.19.234 127.0.19.234:8080 check weight 10 inter 177 fall 2 rise 1
+ server 127.0.19.235 127.0.19.235:8080 check weight 10 inter 655 fall 2 rise 1
+ server 127.0.19.236 127.0.19.236:8080 check weight 10 inter 9977 fall 2 rise 1
+ server 127.0.19.237 127.0.19.237:8080 check weight 10 inter 7167 fall 2 rise 1
+ server 127.0.19.238 127.0.19.238:8080 check weight 10 inter 3293 fall 2 rise 1
+ server 127.0.19.239 127.0.19.239:8080 check weight 10 inter 3416 fall 2 rise 1
+ server 127.0.19.240 127.0.19.240:8080 check weight 10 inter 909 fall 2 rise 1
+ server 127.0.19.241 127.0.19.241:8080 check weight 10 inter 6824 fall 2 rise 1
+ server 127.0.19.242 127.0.19.242:8080 check weight 10 inter 683 fall 2 rise 1
+ server 127.0.19.243 127.0.19.243:8080 check weight 10 inter 9165 fall 2 rise 1
+ server 127.0.19.244 127.0.19.244:8080 check weight 10 inter 1650 fall 2 rise 1
+ server 127.0.19.245 127.0.19.245:8080 check weight 10 inter 966 fall 2 rise 1
+ server 127.0.19.246 127.0.19.246:8080 check weight 10 inter 3028 fall 2 rise 1
+ server 127.0.19.247 127.0.19.247:8080 check weight 10 inter 5466 fall 2 rise 1
+ server 127.0.19.248 127.0.19.248:8080 check weight 10 inter 5983 fall 2 rise 1
+ server 127.0.19.249 127.0.19.249:8080 check weight 10 inter 4892 fall 2 rise 1
+ server 127.0.19.250 127.0.19.250:8080 check weight 10 inter 792 fall 2 rise 1
diff --git a/tests/conf/test-str2sa.cfg b/tests/conf/test-str2sa.cfg
new file mode 100644
index 0000000..52ee44a
--- /dev/null
+++ b/tests/conf/test-str2sa.cfg
@@ -0,0 +1,60 @@
+global
+ stats socket /tmp/sock1 mode 666 level admin
+ stats timeout 2d
+ #log 127.0.0.1:1000 local0 # good
+ #log 127.0.0.1 local0 # good
+ #log 127.0.0.1:1001-1002 local0
+ #log 127.0.0.1:-1003 local0
+ #log 127.0.0.1:+1004 local0
+
+defaults
+ timeout client 5s
+ timeout server 5s
+ timeout connect 5s
+ #log 127.0.0.1:1000 local0 # good
+ #log 127.0.0.1 local0 # good
+ #log 127.0.0.1:1001-1002 local0
+ #log 127.0.0.1:-1003 local0
+ #log 127.0.0.1:+1004 local0
+
+listen p
+ mode http
+ bind :8001
+ bind *:8002
+ bind :::8003
+ bind 127.0.0.1:8004
+ #bind ::127.0.0.1:8005
+ bind :::8006
+ bind 127.0.0.1:8007-8009
+ #bind 127.0.0.1:8010-
+ #bind 127.0.0.1:-8011
+ #bind 127.0.0.1:+8012
+
+ stats uri /stat
+ #dispatch 192.168.0.176:8005 # good
+ #dispatch 192.168.0.176
+ #dispatch 192.168.0.176:8001-8002
+ #dispatch 192.168.0.176:-8003
+ #dispatch 192.168.0.176:+8004
+
+ server s1 192.168.0.176:80 check addr 192.168.0.176:8000 source 192.168.0.1:10000-59999 check
+
+ #server s1 192.168.0.176:80 addr 192.168.0.176:-8000 source 192.168.0.1:10000-59999 check
+ #server s1 192.168.0.176:80 addr 192.168.0.176:+8000 source 192.168.0.1:10000-59999 check
+ #server s1 192.168.0.176:80 addr 192.168.0.176:8000-8001 source 192.168.0.1:10000-59999 check
+
+ #source 192.168.0.1:8000 # good
+ #source 192.168.0.1:-8000
+ #source 192.168.0.1:+8000
+ #source 192.168.0.1:8000-8001
+
+ #source 192.168.0.1:8000-8001
+ #source 192.168.0.1 usesrc 192.168.0.1:8000-8001
+
+peers truc
+ #peer machin1 127.0.0.1 # good
+ #peer machin2 127.0.0.2:1000-2000
+ #peer machin2 127.0.0.3:-2000
+ #peer machin2 127.0.0.4:+2000
+ #peer machin2 127.0.0.5:2000
+
diff --git a/tests/conf/test-time.cfg b/tests/conf/test-time.cfg
new file mode 100644
index 0000000..d4e3970
--- /dev/null
+++ b/tests/conf/test-time.cfg
@@ -0,0 +1,24 @@
+# This is a test configuration.
+# It is used to check that time units are correctly parsed.
+
+global
+ maxconn 1000
+ stats timeout 3s
+
+listen sample1
+ mode http
+ retries 1
+ option redispatch
+ timeout connect 5s
+ timeout client 15m
+ timeout server 15m
+ maxconn 40000
+ bind :8080
+ balance roundrobin
+ option allbackups
+ server act1 127.0.0.1:80 weight 10 check port 81 inter 500ms fall 1
+ server act2 127.0.0.2:80 weight 20 check port 81 inter 500ms fall 1
+ server act3 127.0.0.3:80 weight 30 check port 81 inter 500ms fall 1
+ option httpclose
+ stats uri /stats
+ stats refresh 5000ms
diff --git a/tests/conf/test-timeout.cfg b/tests/conf/test-timeout.cfg
new file mode 100644
index 0000000..ebaea7d
--- /dev/null
+++ b/tests/conf/test-timeout.cfg
@@ -0,0 +1,27 @@
+# This is a test configuration.
+# It is used to check that time units are correctly parsed.
+
+global
+ maxconn 1000
+ stats timeout 3s
+
+listen sample1
+ mode http
+ retries 1
+ option redispatch
+ timeout client 15m
+ timeout http-request 6s
+ timeout tarpit 20s
+ timeout queue 60s
+ timeout connect 5s
+ timeout server 15m
+ maxconn 40000
+ bind :8000
+ balance roundrobin
+ option allbackups
+ server act1 127.0.0.1:80 weight 10 check port 81 inter 500ms fall 1
+ server act2 127.0.0.2:80 weight 20 check port 81 inter 500ms fall 1
+ server act3 127.0.0.3:80 weight 30 check port 81 inter 500ms fall 1
+ option httpclose
+ stats uri /stats
+ stats refresh 5000ms
diff --git a/tests/conf/test-url-hash.cfg b/tests/conf/test-url-hash.cfg
new file mode 100644
index 0000000..53cbf4b
--- /dev/null
+++ b/tests/conf/test-url-hash.cfg
@@ -0,0 +1,40 @@
+# This is a test configuration.
+# It exercises the "url_param" balance algorithm. It looks for
+# an URL parameter named "foo".
+
+global
+ maxconn 100
+ log 127.0.0.1 local0
+
+listen vip1
+ log global
+ option httplog
+ bind :8000
+ mode http
+ maxconn 100
+ timeout client 5000
+ timeout connect 5000
+ timeout server 5000
+ balance url_param foo
+ server srv1 127.0.0.1:80
+ server srv2 127.0.0.1:80
+
+ # control activity this way
+ stats uri /stat
+
+listen vip2
+ log global
+ option httplog
+ bind :8001
+ mode http
+ maxconn 100
+ timeout client 5000
+ timeout connect 5000
+ timeout server 5000
+ balance url_param foo check_post
+ server srv1 127.0.0.1:80
+ server srv2 127.0.0.1:80
+
+ # control activity this way
+ stats uri /stat
+
diff --git a/tests/conf/test-valid-names.cfg b/tests/conf/test-valid-names.cfg
new file mode 100644
index 0000000..b339d1c
--- /dev/null
+++ b/tests/conf/test-valid-names.cfg
@@ -0,0 +1,37 @@
+# This is a test configuration.
+# It checks instances, servers and acl names.
+
+listen valid_listen1
+ bind :8000
+ timeout client 5000
+ timeout connect 5000
+ timeout server 5000
+ balance roundrobin
+ server srv1 127.0.0.1:80
+
+frontend www.valid-frontend.net:80
+ bind :8001
+ timeout client 5000
+ acl host_www.valid-frontend.net:80 hdr(host) www.valid-frontend.net
+
+backend Valid_BK-1
+ timeout connect 5000
+ timeout server 5000
+ balance roundrobin
+ server bk1_srv-1:80 127.0.0.1:80
+
+frontend www.test-frontend.net:8002/invalid
+ bind :8002
+ timeout client 5000
+
+frontend ft1_acl
+ bind :8003
+ timeout client 5000
+ acl invalid!name url /
+
+backend bk2_srv
+ timeout connect 5000
+ timeout server 5000
+ balance roundrobin
+ server bk2/srv-1 127.0.0.1:80
+
diff --git a/tests/exp/blocksig.c b/tests/exp/blocksig.c
new file mode 100644
index 0000000..13b55e7
--- /dev/null
+++ b/tests/exp/blocksig.c
@@ -0,0 +1,16 @@
+#include <stdio.h>
+#include <signal.h>
+#include <unistd.h>
+
+int main(int argc, char **argv)
+{
+ sigset_t new_sig, old_sig;
+
+ sigfillset(&new_sig);
+ sigprocmask(SIG_SETMASK, &new_sig, &old_sig);
+ printf("old_sig: %16Lx\n", *(unsigned long long*)&old_sig);
+ printf("new_sig: %16Lx\n", *(unsigned long long*)&new_sig);
+
+ argc--; argv++;
+ return argc ? execvp(*argv, argv) : 0;
+}
diff --git a/tests/exp/filltab25.c b/tests/exp/filltab25.c
new file mode 100644
index 0000000..1197143
--- /dev/null
+++ b/tests/exp/filltab25.c
@@ -0,0 +1,399 @@
+/*
+ * experimental weighted round robin scheduler - (c) 2007 willy tarreau.
+ *
+ * This filling algorithm is excellent at spreading the servers, as it also
+ * takes care of keeping the most uniform distance between occurrences of each
+ * server, by maximizing this distance. It reduces the number of variables
+ * and expensive operations.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <import/eb32tree.h>
+
+struct srv {
+ struct eb32_node node;
+ struct eb_root *tree; // we want to know where the server is
+ int num;
+ int w; /* weight */
+ int next, last;
+ int rem;
+} *srv;
+
+/* those trees represent a sliding window of 3 time frames */
+struct eb_root tree_0 = EB_ROOT;
+struct eb_root tree_1 = EB_ROOT;
+struct eb_root tree_2 = EB_ROOT;
+
+struct eb_root *init_tree; /* receives positions 0..sw-1 */
+struct eb_root *next_tree; /* receives positions >= 2sw */
+
+int nsrv; /* # of servers */
+int nsw, sw; /* sum of weights */
+int p; /* current position, between sw..2sw-1 */
+
+/* queue a server in the weights tree */
+void queue_by_weight(struct eb_root *root, struct srv *s) {
+ s->node.key = 255 - s->w;
+ eb32_insert(root, &s->node);
+ s->tree = root;
+}
+
+/* queue a server in the weight tree <root>, except if its weight is 0 */
+void queue_by_weight_0(struct eb_root *root, struct srv *s) {
+ if (s->w) {
+ s->node.key = 255 - s->w;
+ eb32_insert(root, &s->node);
+ s->tree = root;
+ } else {
+ s->tree = NULL;
+ }
+}
+
+static inline void dequeue_srv(struct srv *s) {
+ eb32_delete(&s->node);
+}
+
+/* queues a server into the correct tree depending on ->next */
+void put_srv(struct srv *s) {
+ if (s->w <= 0 ||
+ s->next >= 2*sw || /* delay everything which does not fit into the window */
+ s->next >= sw+nsw) { /* and everything which does not fit into the theoretical new window */
+ /* put into next tree */
+ s->next -= sw; // readjust next in case we could finally take this back to current.
+ queue_by_weight_0(next_tree, s);
+ } else {
+ // The overflow problem is caused by the scale we want to apply to user weight
+ // to turn it into effective weight. Since this is only used to provide a smooth
+ // slowstart on very low weights (1), it is a pure waste. Thus, we just have to
+ // apply a small scaling factor and warn the user that slowstart is not very smooth
+ // on low weights.
+ // The max key is about ((scale*maxw)*(scale*maxw)*nbsrv)/ratio (where the ratio is
+ // the arbitrary divide we perform in the examples above). Assuming that ratio==scale,
+ // this translates to maxkey=scale*maxw^2*nbsrv, so
+ // max_nbsrv=2^32/255^2/scale ~= 66051/scale
+ // Using a scale of 16 is enough to support 4000 servers without overflow, providing
+ // 6% steps during slowstart.
+
+ s->node.key = 256 * s->next + (16*255 + s->rem - s->w) / 16;
+
+ /* check for overflows */
+ if ((int)s->node.key < 0)
+ printf(" OV: srv=%p w=%d rem=%d next=%d key=%d", s, s->w, s->rem, s->next, s->node.key);
+ eb32_insert(&tree_0, &s->node);
+ s->tree = &tree_0;
+ }
+}
+
+/* prepares a server when extracting it from the init tree */
+static inline void get_srv_init(struct srv *s) {
+ s->next = s->rem = 0;
+}
+
+/* prepares a server when extracting it from the next tree */
+static inline void get_srv_next(struct srv *s) {
+ s->next += sw;
+}
+
+/* prepares a server when extracting it from the next tree */
+static inline void get_srv_down(struct srv *s) {
+ s->next = p;
+}
+
+/* prepares a server when extracting it from its tree */
+void get_srv(struct srv *s) {
+ if (s->tree == init_tree) {
+ get_srv_init(s);
+ }
+ else if (s->tree == next_tree) {
+ get_srv_next(s);
+ }
+ else if (s->tree == NULL) {
+ get_srv_down(s);
+ }
+}
+
+
+/* return next server from the current tree, or a server from the init tree
+ * if appropriate. If both trees are empty, return NULL.
+ */
+struct srv *get_next_server() {
+ struct eb32_node *node;
+ struct srv *s;
+
+ node = eb32_first(&tree_0);
+ s = eb32_entry(node, struct srv, node);
+
+ if (!node || s->next > p) {
+ /* either we have no server left, or we have a hole */
+ struct eb32_node *node2;
+ node2 = eb32_first(init_tree);
+ if (node2) {
+ node = node2;
+ s = eb32_entry(node, struct srv, node);
+ get_srv_init(s);
+ if (s->w == 0)
+ node = NULL;
+ s->node.key = 0; // do not display random values
+ }
+ }
+ if (node)
+ return s;
+ else
+ return NULL;
+}
+
+void update_position(struct srv *s) {
+ //if (s->tree == init_tree) {
+ if (!s->next) {
+ // first time ever for this server
+ s->last = p;
+ s->next = p + nsw / s->w;
+ s->rem += nsw % s->w;
+
+ if (s->rem >= s->w) {
+ s->rem -= s->w;
+ s->next++;
+ }
+ } else {
+ s->last = s->next; // or p ?
+ //s->next += sw / s->w;
+ //s->rem += sw % s->w;
+ s->next += nsw / s->w;
+ s->rem += nsw % s->w;
+
+ if (s->rem >= s->w) {
+ s->rem -= s->w;
+ s->next++;
+ }
+ }
+}
+
+
+/* switches trees init_tree and next_tree. init_tree should be empty when
+ * this happens, and next_tree filled with servers sorted by weights.
+ */
+void switch_trees() {
+ struct eb_root *swap;
+ swap = init_tree;
+ init_tree = next_tree;
+ next_tree = swap;
+ sw = nsw;
+ p = sw;
+}
+
+main(int argc, char **argv) {
+ int conns;
+ int i;
+
+ struct srv *s;
+
+ argc--; argv++;
+ nsrv = argc;
+
+ if (!nsrv)
+ exit(1);
+
+ srv = calloc(nsrv, sizeof(struct srv));
+
+ sw = 0;
+ for (i = 0; i < nsrv; i++) {
+ s = &srv[i];
+ s->num = i;
+ s->w = atol(argv[i]);
+ sw += s->w;
+ }
+
+ nsw = sw;
+
+ init_tree = &tree_1;
+ next_tree = &tree_2;
+
+ /* and insert all the servers in the PREV tree */
+ /* note that it is required to insert them according to
+ * the reverse order of their weights.
+ */
+ printf("---------------:");
+ for (i = 0; i < nsrv; i++) {
+ s = &srv[i];
+ queue_by_weight_0(init_tree, s);
+ printf("%2d", s->w);
+ }
+ printf("\n");
+
+ p = sw; // time base of current tree
+ conns = 0;
+ while (1) {
+ struct eb32_node *node;
+
+ printf("%08d|%06d: ", conns, p);
+
+ /* if we have en empty tree, let's first try to collect weights
+ * which might have changed.
+ */
+ if (!sw) {
+ if (nsw) {
+ sw = nsw;
+ p = sw;
+ /* do not switch trees, otherwise new servers (from init)
+ * would end up in next.
+ */
+ //switch_trees();
+ //printf("bla\n");
+ }
+ else
+ goto next_iteration;
+ }
+
+ s = get_next_server();
+ if (!s) {
+ printf("----------- switch (empty) -- sw=%d -> %d ---------\n", sw, nsw);
+ switch_trees();
+ s = get_next_server();
+ printf("%08d|%06d: ", conns, p);
+
+ if (!s)
+ goto next_iteration;
+ }
+ else if (s->next >= 2*sw) {
+ printf("ARGGGGG! s[%d].next=%d, max=%d\n", s->num, s->next, 2*sw-1);
+ }
+
+ /* now we have THE server we want to put at this position */
+ for (i = 0; i < s->num; i++) {
+ if (srv[i].w > 0)
+ printf(". ");
+ else
+ printf("_ ");
+ }
+ printf("# ");
+ for (i = s->num + 1; i < nsrv; i++) {
+ if (srv[i].w > 0)
+ printf(". ");
+ else
+ printf("_ ");
+ }
+ printf(" : ");
+
+ printf("s=%02d v=%04d w=%03d n=%03d r=%03d ",
+ s->num, s->node.key, s->w, s->next, s->rem);
+
+ update_position(s);
+ printf(" | next=%03d, rem=%03d ", s->next, s->rem);
+
+ if (s->next >= sw * 2) {
+ dequeue_srv(s);
+ //queue_by_weight(next_tree, s);
+ put_srv(s);
+ printf(" => next (w=%d, n=%d) ", s->w, s->next);
+ }
+ else {
+ printf(" => curr ");
+
+ //s->node.key = s->next;
+ /* we want to ensure that in case of conflicts, servers with
+ * the highest weights will get served first. Also, we still
+ * have the remainder to see where the entry expected to be
+ * inserted.
+ */
+ //s->node.key = 256 * s->next + 255 - s->w;
+ //s->node.key = sw * s->next + sw / s->w;
+ //s->node.key = sw * s->next + s->rem; /// seems best (check with filltab15) !
+
+ //s->node.key = (2 * sw * s->next) + s->rem + sw / s->w;
+
+ /* FIXME: must be optimized */
+ dequeue_srv(s);
+ put_srv(s);
+ //eb32i_insert(&tree_0, &s->node);
+ //s->tree = &tree_0;
+ }
+
+ next_iteration:
+ p++;
+ conns++;
+ if (/*conns == 30*/ /**/random()%100 == 0/**/) {
+ int w = /*20*//**/random()%4096/**/;
+ int num = /*1*//**/random()%nsrv/**/;
+ struct srv *s = &srv[num];
+
+ nsw = nsw - s->w + w;
+ //sw=nsw;
+
+ if (s->tree == init_tree) {
+ printf(" -- chgwght1(%d): %d->%d, n=%d --", s->num, s->w, w, s->next);
+ printf("(init)");
+ s->w = w;
+ dequeue_srv(s);
+ queue_by_weight_0(s->tree, s);
+ }
+ else if (s->tree == NULL) {
+ printf(" -- chgwght2(%d): %d->%d, n=%d --", s->num, s->w, w, s->next);
+ printf("(down)");
+ s->w = w;
+ dequeue_srv(s);
+ //queue_by_weight_0(init_tree, s);
+ get_srv(s);
+ s->next = p + (nsw + sw - p) / s->w;
+ put_srv(s);
+ }
+ else {
+ int oldnext;
+
+ /* the server is either active or in the next queue */
+ get_srv(s);
+ printf(" -- chgwght3(%d): %d->%d, n=%d, sw=%d, nsw=%d --", s->num, s->w, w, s->next, sw, nsw);
+
+ oldnext = s->next;
+ s->w = w;
+
+ /* we must measure how far we are from the end of the current window
+ * and try to fit their as many entries as should theoretically be.
+ */
+
+ //s->w = s->w * (2*sw - p) / sw;
+ if (s->w > 0) {
+ int step = (nsw /*+ sw - p*/) / s->w;
+ s->next = s->last + step;
+ s->rem = 0;
+ if (s->next > oldnext) {
+ s->next = oldnext;
+ printf(" aaaaaaa ");
+ }
+
+ if (s->next < p + 2) {
+ s->next = p + step;
+ printf(" bbbbbb ");
+ }
+ } else {
+ printf(" push -- ");
+ /* push it into the next tree */
+ s->w = 0;
+ s->next = p + sw;
+ }
+
+
+ dequeue_srv(s);
+ printf(" n=%d", s->next);
+ put_srv(s);
+ }
+ }
+
+ printf("\n");
+
+ if (0 && conns % 50000 == 0) {
+ printf("-------- %-5d : changing all weights ----\n", conns);
+
+ for (i = 0; i < nsrv; i++) {
+ int w = i + 1;
+ s = &srv[i];
+ nsw = nsw - s->w + w;
+ s->w = w;
+ dequeue_srv(s);
+ queue_by_weight_0(next_tree, s); // or init_tree ?
+ }
+ }
+
+ }
+}
+
diff --git a/tests/exp/hash_results.txt b/tests/exp/hash_results.txt
new file mode 100644
index 0000000..0f14ec9
--- /dev/null
+++ b/tests/exp/hash_results.txt
@@ -0,0 +1,218 @@
+Test: ./test_hashes | sort -k 3 -r
+
+Note: haproxy_server_hash should be avoided as it's just a 32 bit XOR.
+
+Athlon @ 1533 MHz, gcc-3.4 -march=i686 :
+ haproxy_server_hash : 18477000 run/sec
+ SuperFastHash : 6983511 run/sec
+ hash_djbx33 : 4164334 run/sec
+ bernstein : 3371838 run/sec
+ kr_hash : 3257684 run/sec
+ sax_hash : 3027567 run/sec
+ fnv_hash : 2818374 run/sec
+ haproxy_uri_hash : 2108346 run/sec
+ oat_hash : 2106181 run/sec
+ hashword : 1936973 run/sec
+ hashpjw : 1803475 run/sec
+ fnv_32a_str : 1499198 run/sec
+
+Pentium-M @1700 MHz, gcc-3.4 -march=i686 :
+ haproxy_server_hash : 15471737 run/sec
+ SuperFastHash : 8155706 run/sec
+ hash_djbx33 : 4520191 run/sec
+ bernstein : 3956142 run/sec
+ kr_hash : 3725125 run/sec
+ fnv_hash : 3155413 run/sec
+ sax_hash : 2688323 run/sec
+ oat_hash : 2452789 run/sec
+ haproxy_uri_hash : 2010853 run/sec
+ hashword : 1831441 run/sec
+ hashpjw : 1737000 run/sec
+ fnv_32a_str : 1643737 run/sec
+
+Athlon @ 1533 MHz, gcc-4.1 -march=i686 :
+ haproxy_server_hash : 13592089 run/sec
+ SuperFastHash2 : 8687957 run/sec
+ SuperFastHash : 7361242 run/sec
+ hash_djbx33 : 5741546 run/sec
+ bernstein : 3368909 run/sec
+ sax_hash : 3339880 run/sec
+ kr_hash : 3277230 run/sec
+ fnv_hash : 2832402 run/sec
+ hashword : 2500317 run/sec
+ haproxy_uri_hash : 2433241 run/sec
+ oat_hash : 2403118 run/sec
+ hashpjw : 1881229 run/sec
+ fnv_32a_str : 1815709 run/sec
+
+Pentium-M @1700 MHz, gcc-4.1 -march=i686 :
+ haproxy_server_hash : 14128788 run/sec
+ SuperFastHash2 : 8157119 run/sec
+ SuperFastHash : 7481027 run/sec
+ hash_djbx33 : 5660711 run/sec
+ bernstein : 3961493 run/sec
+ fnv_hash : 3590727 run/sec
+ kr_hash : 3389393 run/sec
+ sax_hash : 2667227 run/sec
+ oat_hash : 2348211 run/sec
+ hashword : 2278856 run/sec
+ haproxy_uri_hash : 2098022 run/sec
+ hashpjw : 1846583 run/sec
+ fnv_32a_str : 1661219 run/sec
+
+Pentium-M @600 MHz, gcc-4.1 -march=i686 :
+ haproxy_server_hash : 5318468 run/sec
+ SuperFastHash2 : 3126165 run/sec
+ SuperFastHash : 2729981 run/sec
+ hash_djbx33 : 2042181 run/sec
+ bernstein : 1422927 run/sec
+ fnv_hash : 1287736 run/sec
+ kr_hash : 1217924 run/sec
+ sax_hash : 949694 run/sec
+ oat_hash : 837279 run/sec
+ hashword : 812868 run/sec
+ haproxy_uri_hash : 747611 run/sec
+ hashpjw : 659890 run/sec
+ fnv_32a_str : 590895 run/sec
+
+athlon @ 1.5 GHz, gcc-2.95 -march=i686 :
+ haproxy_server_hash : 13592864 run/sec
+ SuperFastHash : 6931251 run/sec
+ bernstein : 4105179 run/sec
+ hash_djbx33 : 3920059 run/sec
+ kr_hash : 2985794 run/sec
+ fnv_hash : 2815457 run/sec
+ sax_hash : 2791358 run/sec
+ haproxy_uri_hash : 2786663 run/sec
+ oat_hash : 2237859 run/sec
+ hashword : 1985740 run/sec
+ hashpjw : 1757733 run/sec
+ fnv_32a_str : 1697299 run/sec
+
+Pentium-M @ 600 MHz, gcc-2.95 -march=i686 :
+ SuperFastHash : 2934387 run/sec
+ haproxy_server_hash : 2864668 run/sec
+ hash_djbx33 : 1498043 run/sec
+ bernstein : 1414993 run/sec
+ kr_hash : 1297907 run/sec
+ fnv_hash : 1260343 run/sec
+ sax_hash : 924764 run/sec
+ oat_hash : 854545 run/sec
+ haproxy_uri_hash : 790040 run/sec
+ hashword : 693501 run/sec
+ hashpjw : 647346 run/sec
+ fnv_32a_str : 579691 run/sec
+
+Pentium-M @ 1700 MHz, gcc-2.95 -march=i686 :
+ SuperFastHash : 8006127 run/sec
+ haproxy_server_hash : 7834162 run/sec
+ hash_djbx33 : 4186025 run/sec
+ bernstein : 3941492 run/sec
+ kr_hash : 3630713 run/sec
+ fnv_hash : 3507488 run/sec
+ sax_hash : 2528128 run/sec
+ oat_hash : 2395188 run/sec
+ haproxy_uri_hash : 2158924 run/sec
+ hashword : 1910992 run/sec
+ hashpjw : 1819894 run/sec
+ fnv_32a_str : 1629844 run/sec
+
+UltraSparc @ 400 MHz, gcc-3.4.3 :
+ haproxy_server_hash : 5573220 run/sec
+ SuperFastHash : 1372714 run/sec
+ bernstein : 1361733 run/sec
+ hash_djbx33 : 1090373 run/sec
+ sax_hash : 872499 run/sec
+ oat_hash : 730354 run/sec
+ kr_hash : 645431 run/sec
+ haproxy_uri_hash : 541157 run/sec
+ fnv_32a_str : 442608 run/sec
+ hashpjw : 434858 run/sec
+ fnv_hash : 401945 run/sec
+ hashword : 340594 run/sec
+
+UltraSparc @ 400 MHz, gcc-3.4.3 -mcpu=v9 :
+ haproxy_server_hash : 5671183 run/sec
+ bernstein : 1437122 run/sec
+ hash_djbx33 : 1376294 run/sec
+ SuperFastHash : 1306634 run/sec
+ sax_hash : 873650 run/sec
+ kr_hash : 801439 run/sec
+ oat_hash : 729920 run/sec
+ haproxy_uri_hash : 545341 run/sec
+ hashpjw : 472190 run/sec
+ fnv_32a_str : 443668 run/sec
+ hashword : 357295 run/sec
+ fnv_hash : 208823 run/sec
+
+
+Alpha EV6 @ 466 MHz, gcc-3.3 :
+ haproxy_server_hash : 2495928 run/sec
+ SuperFastHash : 2037208 run/sec
+ hash_djbx33 : 1625092 run/sec
+ kr_hash : 1532206 run/sec
+ bernstein : 1256746 run/sec
+ haproxy_uri_hash : 999106 run/sec
+ oat_hash : 841943 run/sec
+ sax_hash : 737447 run/sec
+ hashpjw : 676170 run/sec
+ fnv_hash : 644054 run/sec
+ fnv_32a_str : 638526 run/sec
+ hashword : 421777 run/sec
+
+VIA EPIA @ 533 MHz, gcc-2.95 -march=i586 :
+ haproxy_server_hash : 1391374 run/sec
+ SuperFastHash : 912397 run/sec
+ hash_djbx33 : 589868 run/sec
+ kr_hash : 453706 run/sec
+ bernstein : 437318 run/sec
+ sax_hash : 334456 run/sec
+ hashpjw : 316670 run/sec
+ hashword : 315476 run/sec
+ haproxy_uri_hash : 311112 run/sec
+ oat_hash : 259127 run/sec
+ fnv_32a_str : 229485 run/sec
+ fnv_hash : 151620 run/sec
+
+VIA EPIA @ 533 MHz, gcc-3.4 -march=i586 :
+ haproxy_server_hash : 1660407 run/sec
+ SuperFastHash : 791981 run/sec
+ hash_djbx33 : 680498 run/sec
+ kr_hash : 384076 run/sec
+ bernstein : 377247 run/sec
+ sax_hash : 355183 run/sec
+ hashpjw : 298879 run/sec
+ haproxy_uri_hash : 296748 run/sec
+ oat_hash : 283932 run/sec
+ hashword : 269429 run/sec
+ fnv_32a_str : 204776 run/sec
+ fnv_hash : 155301 run/sec
+
+Pentium @ 133 MHz, gcc-3.4 -march=i586 :
+ haproxy_server_hash : 930788 run/sec
+ SuperFastHash : 344988 run/sec
+ hash_djbx33 : 278996 run/sec
+ bernstein : 211545 run/sec
+ sax_hash : 185225 run/sec
+ kr_hash : 156603 run/sec
+ oat_hash : 135163 run/sec
+ hashword : 128518 run/sec
+ fnv_hash : 107024 run/sec
+ haproxy_uri_hash : 105523 run/sec
+ fnv_32a_str : 99913 run/sec
+ hashpjw : 97860 run/sec
+
+VAX VLC4000 @30 MHz, gcc-2.95 :
+ haproxy_server_hash : 13208 run/sec
+ hash_djbx33 : 12963 run/sec
+ fnv_hash : 12150 run/sec
+ SuperFastHash : 12037 run/sec
+ bernstein : 11765 run/sec
+ kr_hash : 11111 run/sec
+ sax_hash : 7273 run/sec
+ hashword : 7143 run/sec
+ oat_hash : 6931 run/sec
+ hashpjw : 6667 run/sec
+ haproxy_uri_hash : 5714 run/sec
+ fnv_32a_str : 4800 run/sec
+
diff --git a/tests/exp/hashing-results.txt b/tests/exp/hashing-results.txt
new file mode 100644
index 0000000..d2dbf59
--- /dev/null
+++ b/tests/exp/hashing-results.txt
@@ -0,0 +1,314 @@
+These are the result of tests conducted to determine efficacy of hashing
+algorithms and avalache application in haproxy. All results below were
+generated using version 1.5. See the document on hashing under internal docs
+for a detailed description on the tests the methodology and interpretation
+of the results.
+
+The following was the set up used
+
+(a) hash-type consistent/map-bases
+(b) avalanche on/off
+(c) balanche host(hdr)
+(d) 3 criteria for inputs
+ - ~ 10K requests, including duplicates
+ - ~ 46K requests, unique requests from 1 MM requests were obtained
+ - ~ 250K requests, including duplicates
+(e) 17 servers in backend, all servers were assigned the same weight
+
+The results can be interpreted across 3 dimensions corresponding to input
+criteria (a)/(b) and (d) above
+
+== 10 K requests ==
+
+=== Consistent with avalanche ===
+
+Servers SDBM DJB2 WT6
+-----------------------------
+1 576 637 592
+2 552 608 600
+3 539 559 551
+4 578 586 493
+5 534 555 549
+6 614 607 576
+7 519 556 554
+8 591 565 607
+9 529 604 575
+10 642 550 678
+11 537 591 506
+12 568 571 567
+13 589 606 572
+14 648 568 711
+15 645 557 603
+16 583 627 591
+17 699 596 618
+-----------------------------
+Std Dev 48.95 26.29 51.75
+
+=== Consistent without avalanche ===
+
+Servers SDBM DJB2 WT6
+-----------------------------
+1 612 627 579
+2 631 607 563
+3 585 605 604
+4 594 502 518
+5 583 526 602
+6 589 594 555
+7 591 602 511
+8 518 540 623
+9 550 519 523
+10 600 637 647
+11 568 536 550
+12 552 605 645
+13 547 556 564
+14 615 674 635
+15 642 624 618
+16 575 585 609
+17 591 604 597
+-----------------------------
+Std Dev 30.71 45.97 42.52
+
+=== Map based without avalanche ===
+
+Servers SDBM DJB2 WT6
+-----------------------------
+1 602 560 598
+2 576 583 583
+3 579 624 593
+4 608 587 551
+5 579 549 588
+6 582 560 590
+7 553 616 562
+8 568 600 551
+9 594 607 620
+10 574 611 635
+11 578 607 603
+12 563 581 547
+13 604 531 572
+14 621 606 618
+15 600 561 602
+16 555 570 585
+17 607 590 545
+-----------------------------
+Std Dev 19.24 25.56 26.29
+
+=== Map based with avalanche ===
+
+Servers SDBM DJB2 WT6
+-----------------------------
+Servers SDBM DJB2 WT6
+1 548 641 597
+2 612 563 655
+3 596 536 595
+4 609 574 537
+5 586 610 570
+6 600 568 562
+7 589 573 578
+8 584 549 573
+9 561 636 603
+10 607 553 603
+11 554 602 616
+12 560 577 568
+13 597 534 570
+14 597 647 570
+15 563 581 647
+16 575 647 565
+17 605 552 534
+-----------------------------
+Std Dev 20.23 37.47 32.16
+
+== Uniques in 1 MM ==
+
+=== Consistent with avalanche ===
+
+Servers SDBM DJB2 WT6
+-----------------------------
+1 2891 2963 2947
+2 2802 2849 2771
+3 2824 2854 2904
+4 2704 2740 2763
+5 2664 2699 2646
+6 2902 2876 2935
+7 2829 2745 2730
+8 2648 2768 2800
+9 2710 2741 2689
+10 3070 3111 3106
+11 2733 2638 2589
+12 2828 2866 2885
+13 2876 2961 2870
+14 3090 2997 3044
+15 2871 2879 2827
+16 2881 2727 2921
+17 2936 2845 2832
+-----------------------------
+Std Dev 121.66 118.59 131.61
+
+=== Consistent without avalanche ===
+
+Servers SDBM DJB2 WT6
+-----------------------------
+1 2879 2930 2863
+2 2835 2856 2853
+3 2875 2741 2899
+4 2720 2718 2761
+5 2703 2754 2689
+6 2848 2901 2925
+7 2829 2756 2838
+8 2761 2779 2805
+9 2719 2671 2746
+10 3015 3176 3079
+11 2620 2661 2656
+12 2879 2773 2713
+13 2829 2844 2925
+14 3064 2951 3041
+15 2898 2928 2877
+16 2880 2867 2791
+17 2905 2953 2798
+-----------------------------
+Std Dev 107.65 125.2 111.34
+
+=== Map based without avalanche ===
+
+Servers SDBM DJB2 WT6
+-----------------------------
+1 2863 2837 2923
+2 2966 2829 2847
+3 2865 2803 2808
+4 2682 2816 2787
+5 2847 2782 2815
+6 2910 2862 2862
+7 2821 2784 2793
+8 2837 2834 2796
+9 2857 2891 2859
+10 2829 2906 2873
+11 2742 2851 2841
+12 2790 2837 2870
+13 2765 2902 2794
+14 2870 2732 2900
+15 2898 2891 2759
+16 2877 2860 2863
+17 2840 2842 2869
+-----------------------------
+Std Dev 64.65 45.16 43.38
+
+=== Map based with avalanche ===
+
+Servers SDBM DJB2 WT6
+-----------------------------
+1 2816 2859 2895
+2 2841 2739 2789
+3 2846 2903 2888
+4 2817 2878 2812
+5 2750 2794 2852
+6 2816 2917 2847
+7 2792 2782 2786
+8 2800 2814 2868
+9 2854 2883 2842
+10 2770 2854 2855
+11 2851 2854 2837
+12 2910 2846 2776
+13 2904 2792 2882
+14 2984 2767 2854
+15 2766 2863 2823
+16 2902 2797 2907
+17 2840 2917 2746
+-----------------------------
+Std Dev 58.39 52.16 43.72
+
+== 250K requests ==
+
+=== Consistent with avalanche ===
+
+Servers SDBM DJB2 WT6
+-----------------------------
+1 14182 12996 20924
+2 14881 18376 8901
+3 13537 17935 13639
+4 11031 12582 19758
+5 15429 10084 12112
+6 18712 12574 14052
+7 14271 11257 14538
+8 12048 18582 16653
+9 10570 10283 13949
+10 11683 13081 23530
+11 9288 14828 10818
+12 10775 13607 19844
+13 10036 19138 15413
+14 31903 15222 11824
+15 21276 11963 10405
+16 17233 23116 11316
+17 11437 12668 10616
+-----------------------------
+Std Dev 5355.95 3512.39 4096.65
+
+=== Consistent without avalanche ===
+
+Servers SDBM DJB2 WT6
+-----------------------------
+1 12411 17833 11831
+2 14213 11165 14833
+3 11431 10241 11671
+4 14080 13913 20224
+5 10886 12101 14272
+6 15168 12470 14641
+7 18802 12211 10164
+8 18678 11852 12421
+9 17468 10865 17655
+10 19801 28493 13221
+11 10885 20201 13507
+12 20419 11660 14078
+13 12591 18616 13906
+14 12798 18200 24152
+15 13338 10532 14111
+16 11715 10478 14759
+17 13608 17461 12846
+-----------------------------
+Std Dev 3113.33 4749.97 3256.04
+
+=== Map based without avalanche ===
+
+Servers SDBM DJB2 WT6
+-----------------------------
+1 14660 12483 11472
+2 11118 11552 12146
+3 15407 19952 11032
+4 15444 12218 14572
+5 22091 11434 13738
+6 18273 17587 21337
+7 10527 16784 15118
+8 13013 12010 17195
+9 15754 9886 14611
+10 13758 11613 14844
+11 19564 16453 17403
+12 9692 17246 14469
+13 13905 11885 20024
+14 19401 15350 10611
+15 11889 25485 11172
+16 13846 13928 12109
+17 9950 12426 16439
+-----------------------------
+Std Dev 3481.45 3847.74 3031.93
+
+=== Map based with avalanche ===
+
+Servers SDBM DJB2 WT6
+-----------------------------
+1 15546 11454 12871
+2 15388 11464 17587
+3 11767 15527 14785
+4 15843 13214 11420
+5 11129 12192 15083
+6 15647 17875 11051
+7 18723 13629 23006
+8 10938 11295 11223
+9 12653 17202 23347
+10 10108 12867 14178
+11 12116 11190 20523
+12 14982 12341 11881
+13 13221 13929 11828
+14 17642 19621 15320
+15 12410 26171 11721
+16 25075 14764 13544
+17 15104 13557 8924
+-----------------------------
+Std Dev 3521.83 3742.21 4101.2
diff --git a/tests/exp/io_limits.txt b/tests/exp/io_limits.txt
new file mode 100644
index 0000000..03399bc
--- /dev/null
+++ b/tests/exp/io_limits.txt
@@ -0,0 +1,116 @@
+---------- epoll without limits --------
+% time seconds usecs/call calls errors syscall
+------ ----------- ----------- --------- --------- ----------------
+ 47.19 2.671077 56 48093 22397 recv
+ 47.15 2.668840 106 25060 4858 send
+ 2.19 0.124020 10 12150 epoll_ctl
+ 1.96 0.110904 286 388 epoll_wait
+ 0.56 0.031565 47 670 close
+ 0.19 0.010481 28 380 350 connect
+ 0.15 0.008650 25 350 socket
+ 0.14 0.008204 26 320 shutdown
+ 0.14 0.007655 22 355 35 accept
+ 0.12 0.006871 10 670 setsockopt
+ 0.11 0.006194 9 670 fcntl64
+ 0.07 0.004148 12 355 brk
+ 0.04 0.002055 5 389 gettimeofday
+------ ----------- ----------- --------- --------- ----------------
+100.00 5.660664 89850 27640 total
+
+
+---------- sepoll without limit --------
+% time seconds usecs/call calls errors syscall
+------ ----------- ----------- --------- --------- ----------------
+ 49.43 2.770682 97 28486 3861 send
+ 46.48 2.605336 53 49317 23434 recv
+ 2.00 0.111916 206 542 epoll_wait
+ 0.65 0.036325 12 3030 epoll_ctl
+ 0.45 0.025282 38 670 close
+ 0.24 0.013247 34 388 358 connect
+ 0.17 0.009544 27 350 socket
+ 0.16 0.008734 27 320 shutdown
+ 0.11 0.006432 18 357 37 accept
+ 0.10 0.005699 9 670 setsockopt
+ 0.08 0.004724 7 670 fcntl64
+ 0.08 0.004568 6 767 gettimeofday
+ 0.06 0.003127 9 356 brk
+------ ----------- ----------- --------- --------- ----------------
+100.00 5.605616 85923 27690 total
+
+
+---------- sepoll with send limit only --------
+% time seconds usecs/call calls errors syscall
+------ ----------- ----------- --------- --------- ----------------
+ 49.21 2.779349 109 25417 418 send
+ 46.94 2.651058 54 49150 23368 recv
+ 1.77 0.099863 264 378 epoll_wait
+ 0.57 0.032141 14 2351 epoll_ctl
+ 0.46 0.025822 39 670 close
+ 0.25 0.014300 37 387 357 connect
+ 0.19 0.010530 30 350 socket
+ 0.15 0.008656 27 320 shutdown
+ 0.14 0.008008 23 354 34 accept
+ 0.11 0.006051 9 670 setsockopt
+ 0.10 0.005461 8 670 fcntl64
+ 0.07 0.003842 6 604 gettimeofday
+ 0.06 0.003120 9 358 brk
+------ ----------- ----------- --------- --------- ----------------
+100.00 5.648201 81679 24177 total
+
+
+---------- sepoll with send + recv limits --------
+Process 3173 attached - interrupt to quit
+Process 3173 detached
+% time seconds usecs/call calls errors syscall
+------ ----------- ----------- --------- --------- ----------------
+ 49.09 2.802918 105 26771 596 send
+ 47.72 2.724651 89 30761 728 recv
+ 1.12 0.063952 55 1169 epoll_wait
+ 0.47 0.026810 40 676 close
+ 0.44 0.025358 11 2329 epoll_ctl
+ 0.21 0.012255 30 403 367 connect
+ 0.20 0.011135 35 320 shutdown
+ 0.18 0.010313 29 356 socket
+ 0.15 0.008614 6 1351 gettimeofday
+ 0.13 0.007678 21 360 40 accept
+ 0.13 0.007218 11 676 setsockopt
+ 0.10 0.005559 8 676 fcntl64
+ 0.05 0.002882 9 327 brk
+------ ----------- ----------- --------- --------- ----------------
+100.00 5.709343 66175 1731 total
+
+---------- epoll with send+recv limits -----------
+Process 3271 attached - interrupt to quit
+Process 3271 detached
+% time seconds usecs/call calls errors syscall
+------ ----------- ----------- --------- --------- ----------------
+ 46.96 2.742476 124 22193 send
+ 46.55 2.718027 98 27730 recv
+ 2.58 0.150701 11 13331 epoll_ctl
+ 2.30 0.134350 135 998 epoll_wait
+ 0.52 0.030520 45 673 close
+ 0.23 0.013422 42 320 shutdown
+ 0.19 0.011282 29 386 353 connect
+ 0.19 0.011063 31 353 socket
+ 0.12 0.007039 20 359 39 accept
+ 0.11 0.006629 10 673 fcntl64
+ 0.10 0.005920 9 673 setsockopt
+ 0.09 0.005157 5 999 gettimeofday
+ 0.05 0.002885 9 335 brk
+------ ----------- ----------- --------- --------- ----------------
+100.00 5.839471 69023 392 total
+
+
+Conclusion
+----------
+epoll = 89850 syscalls
+sepoll = 85923 syscalls
+epoll+limits = 69023 syscalls
+sepoll+limits = 66175 syscalls
+
+=> limits reduce the number of syscalls by 23%
+=> sepoll reduces the number of syscalls by 4%
+=> sepoll reduces the number of epoll_ctl by 83%
+=> limits reduce the number of epoll_ctl by 24%
+=> limits increase the number of epoll_wait by 115%
+
diff --git a/tests/exp/ip-hash.c b/tests/exp/ip-hash.c
new file mode 100644
index 0000000..8bb2d48
--- /dev/null
+++ b/tests/exp/ip-hash.c
@@ -0,0 +1,202 @@
+/*
+ * Integer hashing tests. These functions work with 32-bit integers, so are
+ * perfectly suited for IPv4 addresses. A few tests show that they may also
+ * be chained for larger keys (eg: IPv6), this way :
+ * f(x[0-3]) = f(f(f(f(x[0])^x[1])^x[2])^x[3])
+ *
+ * See also bob jenkin's site for more info on hashing, and check perfect
+ * hashing for constants (eg: header names).
+ */
+
+#include <stdio.h>
+#include <string.h>
+#include <arpa/inet.h>
+#include <math.h>
+
+#define NSERV 8
+#define MAXLINE 1000
+
+
+int counts_id[NSERV][NSERV];
+uint32_t hash_id( uint32_t a)
+{
+ return a;
+}
+
+/* Full-avalanche integer hashing function from Thomas Wang, suitable for use
+ * with a modulo. See below, worth a read !
+ * http://www.concentric.net/~Ttwang/tech/inthash.htm
+ *
+ * See also tests performed by Bob Jenkins (says it's faster than his) :
+ * http://burtleburtle.net/bob/hash/integer.html
+ *
+ * This function is small and fast. It does not seem as smooth as bj6 though.
+ * About 0x40 bytes, 6 shifts.
+ */
+int counts_tw1[NSERV][NSERV];
+uint32_t hash_tw1(uint32_t a)
+{
+ a += ~(a<<15);
+ a ^= (a>>10);
+ a += (a<<3);
+ a ^= (a>>6);
+ a += ~(a<<11);
+ a ^= (a>>16);
+ return a;
+}
+
+/* Thomas Wang's mix function. The multiply is optimized away by the compiler
+ * on most platforms.
+ * It is about equivalent to the one above.
+ */
+int counts_tw2[NSERV][NSERV];
+uint32_t hash_tw2(uint32_t a)
+{
+ a = ~a + (a << 15);
+ a = a ^ (a >> 12);
+ a = a + (a << 2);
+ a = a ^ (a >> 4);
+ a = a * 2057;
+ a = a ^ (a >> 16);
+ return a;
+}
+
+/* Thomas Wang's multiplicative hash function. About 0x30 bytes, and it is
+ * extremely fast on recent processors with a fast multiply. However, it
+ * must not be used on low bits only, as multiples of 0x00100010 only return
+ * even values !
+ */
+int counts_tw3[NSERV][NSERV];
+uint32_t hash_tw3(uint32_t a)
+{
+ a = (a ^ 61) ^ (a >> 16);
+ a = a + (a << 3);
+ a = a ^ (a >> 4);
+ a = a * 0x27d4eb2d;
+ a = a ^ (a >> 15);
+ return a;
+}
+
+
+/* Full-avalanche integer hashing function from Bob Jenkins, suitable for use
+ * with a modulo. It has a very smooth distribution.
+ * http://burtleburtle.net/bob/hash/integer.html
+ * About 0x50 bytes, 6 shifts.
+ */
+int counts_bj6[NSERV][NSERV];
+int counts_bj6x[NSERV][NSERV];
+uint32_t hash_bj6(uint32_t a)
+{
+ a = (a+0x7ed55d16) + (a<<12);
+ a = (a^0xc761c23c) ^ (a>>19);
+ a = (a+0x165667b1) + (a<<5);
+ a = (a+0xd3a2646c) ^ (a<<9);
+ a = (a+0xfd7046c5) + (a<<3);
+ a = (a^0xb55a4f09) ^ (a>>16);
+ return a;
+}
+
+/* Similar function with one more shift and no magic number. It is slightly
+ * slower but provides the overall smoothest distribution.
+ * About 0x40 bytes, 7 shifts.
+ */
+int counts_bj7[NSERV][NSERV];
+int counts_bj7x[NSERV][NSERV];
+uint32_t hash_bj7(uint32_t a)
+{
+ a -= (a<<6);
+ a ^= (a>>17);
+ a -= (a<<9);
+ a ^= (a<<4);
+ a -= (a<<3);
+ a ^= (a<<10);
+ a ^= (a>>15);
+ return a;
+}
+
+
+void count_hash_results(unsigned long hash, int counts[NSERV][NSERV]) {
+ int srv, nsrv;
+
+ for (nsrv = 0; nsrv < NSERV; nsrv++) {
+ srv = hash % (nsrv + 1);
+ counts[nsrv][srv]++;
+ }
+}
+
+void dump_hash_results(char *name, int counts[NSERV][NSERV]) {
+ int srv, nsrv;
+ double err, total_err, max_err;
+
+ printf("%s:\n", name);
+ for (nsrv = 0; nsrv < NSERV; nsrv++) {
+ total_err = 0.0;
+ max_err = 0.0;
+ printf("%02d srv: ", nsrv+1);
+ for (srv = 0; srv <= nsrv; srv++) {
+ err = 100.0*(counts[nsrv][srv] - (double)counts[0][0]/(nsrv+1)) / (double)counts[0][0];
+ //printf("%6d ", counts[nsrv][srv]);
+ printf("% 3.1f%%%c ", err,
+ counts[nsrv][srv]?' ':'*'); /* display '*' when a server is never selected */
+ err = fabs(err);
+ total_err += err;
+ if (err > max_err)
+ max_err = err;
+ }
+ total_err /= (double)(nsrv+1);
+ for (srv = nsrv+1; srv < NSERV; srv++)
+ printf(" ");
+ printf(" avg_err=%3.1f, max_err=%3.1f\n", total_err, max_err);
+ }
+ printf("\n");
+}
+
+int main() {
+ int nr;
+ unsigned int address = 0;
+ unsigned int mask = ~0;
+
+ memset(counts_id, 0, sizeof(counts_id));
+ memset(counts_tw1, 0, sizeof(counts_tw1));
+ memset(counts_tw2, 0, sizeof(counts_tw2));
+ memset(counts_tw3, 0, sizeof(counts_tw3));
+ memset(counts_bj6, 0, sizeof(counts_bj6));
+ memset(counts_bj7, 0, sizeof(counts_bj7));
+
+ address = 0x10000000;
+ mask = 0xffffff00; // user mask to apply to addresses
+ for (nr = 0; nr < 0x10; nr++) {
+ //address += ~nr; // semi-random addresses.
+ //address += 1;
+ address += 0x00000100;
+ //address += 0x11111111;
+ //address += 7;
+ //address += 8;
+ //address += 256;
+ //address += 65536;
+ //address += 131072;
+ //address += 0x00100010; // this increment kills tw3 !
+ count_hash_results(hash_id (address & mask), counts_id); // 0.69s / 100M
+ count_hash_results(hash_tw1(address & mask), counts_tw1); // 1.04s / 100M
+ count_hash_results(hash_tw2(address & mask), counts_tw2); // 1.13s / 100M
+ count_hash_results(hash_tw3(address & mask), counts_tw3); // 1.01s / 100M
+ count_hash_results(hash_bj6(address & mask), counts_bj6); // 1.07s / 100M
+ count_hash_results(hash_bj7(address & mask), counts_bj7); // 1.20s / 100M
+ /* adding the original address after the hash reduces the error
+ * rate in in presence of very small data sets (eg: 16 source
+ * addresses for 8 servers). In this case, bj7 is very good.
+ */
+ count_hash_results(hash_bj6(address & mask)+(address&mask), counts_bj6x); // 1.07s / 100M
+ count_hash_results(hash_bj7(address & mask)+(address&mask), counts_bj7x); // 1.20s / 100M
+ }
+
+ dump_hash_results("hash_id", counts_id);
+ dump_hash_results("hash_tw1", counts_tw1);
+ dump_hash_results("hash_tw2", counts_tw2);
+ dump_hash_results("hash_tw3", counts_tw3);
+ dump_hash_results("hash_bj6", counts_bj6);
+ dump_hash_results("hash_bj6x", counts_bj6x);
+ dump_hash_results("hash_bj7", counts_bj7);
+ dump_hash_results("hash_bj7x", counts_bj7x);
+ return 0;
+}
diff --git a/tests/exp/test_hashes.c b/tests/exp/test_hashes.c
new file mode 100644
index 0000000..39cb965
--- /dev/null
+++ b/tests/exp/test_hashes.c
@@ -0,0 +1,559 @@
+/*
+ This file only show how many operations a hash is able to handle.
+ It don't show the distribution nor collisions.
+
+ gcc -Wall -O3 -o test_hashes test_hashes.c
+ ./test_hashes |sort -k 3 -r
+ */
+#include <sys/time.h>
+#include <time.h>
+#include <stdlib.h>
+#include <stdbool.h>
+#include <string.h>
+#include <stdio.h>
+//#include <stdint.h>
+
+
+static struct timeval timeval_current(void)
+{
+ struct timeval tv;
+ gettimeofday(&tv, NULL);
+ return tv;
+}
+
+static double timeval_elapsed(struct timeval *tv)
+{
+ struct timeval tv2 = timeval_current();
+ return (tv2.tv_sec - tv->tv_sec) +
+ (tv2.tv_usec - tv->tv_usec)*1.0e-6;
+}
+
+#define HAPROXY_BACKENDS 4
+
+unsigned long haproxy_uri_hash(char *uri, int uri_len){
+
+ unsigned long hash = 0;
+ int c;
+
+ while (uri_len--) {
+ c = *uri++;
+ if (c == '?')
+ break;
+ hash = c + (hash << 6) + (hash << 16) - hash;
+ }
+
+ return hash%HAPROXY_BACKENDS; /* I assume 4 active backends */
+} /* end haproxy_hash() */
+
+/*
+ * http://eternallyconfuzzled.com/tuts/algorithms/jsw_tut_hashing.aspx
+ */
+unsigned sax_hash ( void *key, int len )
+{
+ unsigned char *p = key;
+ unsigned h = 0;
+ int i;
+
+ for ( i = 0; i < len; i++ )
+ h ^= ( h << 5 ) + ( h >> 2 ) + p[i];
+
+ return h;
+}
+
+#include <arpa/inet.h>
+/* len 4 for ipv4 and 16 for ipv6 */
+unsigned int haproxy_server_hash(const char *addr, int len){
+ unsigned int h, l;
+ l = h = 0;
+
+ while ((l + sizeof (int)) <= len) {
+ h ^= ntohl(*(unsigned int *)(&addr[l]));
+ l += sizeof (int);
+ }
+ return h %= HAPROXY_BACKENDS;
+}/* end haproxy_server_hash() */
+
+
+int hashpjw(const void *key) {
+
+ const char *ptr;
+ unsigned int val;
+ /*********************************************************************
+ * *
+ * Hash the key by performing a number of bit operations on it. *
+ * *
+ *********************************************************************/
+
+ val = 0;
+ ptr = key;
+
+ while (*ptr != '\0') {
+
+ int tmp;
+
+ val = (val << 4) + (*ptr);
+
+ if((tmp = (val & 0xf0000000))) {
+ val = val ^ (tmp >> 24);
+ val = val ^ tmp;
+ }
+ ptr++;
+ }/* end while */
+
+ return val;
+}/* end hashpjw */
+
+static unsigned long
+hash_djbx33(
+ register unsigned char *key,
+ register size_t len)
+{
+ register unsigned long hash = 5381;
+
+ /* the hash unrolled eight times */
+ for (; len >= 8; len -= 8) {
+ hash = ((hash << 5) + hash) + *key++;
+ hash = ((hash << 5) + hash) + *key++;
+ hash = ((hash << 5) + hash) + *key++;
+ hash = ((hash << 5) + hash) + *key++;
+ hash = ((hash << 5) + hash) + *key++;
+ hash = ((hash << 5) + hash) + *key++;
+ hash = ((hash << 5) + hash) + *key++;
+ hash = ((hash << 5) + hash) + *key++;
+ }
+ switch (len) {
+ case 7: hash = ((hash << 5) + hash) + *key++; /* fallthrough... */
+ case 6: hash = ((hash << 5) + hash) + *key++; /* fallthrough... */
+ case 5: hash = ((hash << 5) + hash) + *key++; /* fallthrough... */
+ case 4: hash = ((hash << 5) + hash) + *key++; /* fallthrough... */
+ case 3: hash = ((hash << 5) + hash) + *key++; /* fallthrough... */
+ case 2: hash = ((hash << 5) + hash) + *key++; /* fallthrough... */
+ case 1: hash = ((hash << 5) + hash) + *key++; break;
+ default: /* case 0: */ break;
+ }
+ return hash;
+}
+
+typedef unsigned long int ub4; /* unsigned 4-byte quantities */
+typedef unsigned char ub1; /* unsigned 1-byte quantities */
+
+ub4 bernstein(ub1 *key, ub4 len, ub4 level){
+ ub4 hash = level;
+ ub4 i;
+ for (i=0; i<len; ++i) hash = 33*hash + key[i];
+ return hash;
+}
+
+/*
+ * http://www.azillionmonkeys.com/qed/hash.html
+ */
+#undef get16bits
+#if (defined(__GNUC__) && defined(__i386__)) || defined(__WATCOMC__) \
+ || defined(_MSC_VER) || defined (__BORLANDC__) || defined (__TURBOC__)
+#define get16bits(d) (*((const uint16_t *) (d)))
+#endif
+
+#if !defined (get16bits)
+#define get16bits(d) ((((uint32_t)(((const uint8_t *)(d))[1])) << 8)\
+ +(uint32_t)(((const uint8_t *)(d))[0]) )
+#endif
+
+uint32_t SuperFastHash (const char * data, int len) {
+uint32_t hash = len, tmp;
+int rem;
+
+ if (len <= 0 || data == NULL) return 0;
+
+ rem = len & 3;
+ len >>= 2;
+
+ /* Main loop */
+ for (;len > 0; len--) {
+ hash += get16bits (data);
+ tmp = (get16bits (data+2) << 11) ^ hash;
+ hash = (hash << 16) ^ tmp;
+ data += 2*sizeof (uint16_t);
+ hash += hash >> 11;
+ }
+
+ /* Handle end cases */
+ switch (rem) {
+ case 3: hash += get16bits (data);
+ hash ^= hash << 16;
+ hash ^= data[sizeof (uint16_t)] << 18;
+ hash += hash >> 11;
+ break;
+ case 2: hash += get16bits (data);
+ hash ^= hash << 11;
+ hash += hash >> 17;
+ break;
+ case 1: hash += *data;
+ hash ^= hash << 10;
+ hash += hash >> 1;
+ }
+
+ /* Force "avalanching" of final 127 bits */
+ hash ^= hash << 3;
+ hash += hash >> 5;
+ hash ^= hash << 4;
+ hash += hash >> 17;
+ hash ^= hash << 25;
+ hash += hash >> 6;
+
+ return hash;
+}
+
+/*
+ * This variant is about 15% faster.
+ */
+uint32_t SuperFastHash2 (const char * data, int len) {
+uint32_t hash = len, tmp;
+int rem;
+
+ if (len <= 0 || data == NULL) return 0;
+
+ rem = len & 3;
+ len >>= 2;
+
+ /* Main loop */
+ for (;len > 0; len--) {
+ register uint32_t next;
+ next = get16bits(data+2);
+ hash += get16bits(data);
+ tmp = (next << 11) ^ hash;
+ hash = (hash << 16) ^ tmp;
+ data += 2*sizeof (uint16_t);
+ hash += hash >> 11;
+ }
+
+ /* Handle end cases */
+ switch (rem) {
+ case 3: hash += get16bits (data);
+ hash ^= hash << 16;
+ hash ^= data[sizeof (uint16_t)] << 18;
+ hash += hash >> 11;
+ break;
+ case 2: hash += get16bits (data);
+ hash ^= hash << 11;
+ hash += hash >> 17;
+ break;
+ case 1: hash += *data;
+ hash ^= hash << 10;
+ hash += hash >> 1;
+ }
+
+ /* Force "avalanching" of final 127 bits */
+ hash ^= hash << 3;
+ hash += hash >> 5;
+ hash ^= hash << 4;
+ hash += hash >> 17;
+ hash ^= hash << 25;
+ hash += hash >> 6;
+
+ return hash;
+}
+
+/*
+ * 32 bit FNV-0 hash type
+ */
+typedef unsigned long Fnv32_t;
+
+/*
+ * fnv_32a_str - perform a 32 bit Fowler/Noll/Vo FNV-1a hash on a string
+ *
+ * input:
+ * str - string to hash
+ * hval - previous hash value or 0 if first call
+ *
+ * returns:
+ * 32 bit hash as a static hash type
+ *
+ * NOTE: To use the recommended 32 bit FNV-1a hash, use FNV1_32A_INIT as the
+ * hval arg on the first call to either fnv_32a_buf() or fnv_32a_str().
+ */
+Fnv32_t
+fnv_32a_str(char *str, Fnv32_t hval)
+{
+ unsigned char *s = (unsigned char *)str; /* unsigned string */
+
+ /*
+ * FNV-1a hash each octet in the buffer
+ */
+ while (*s) {
+
+ /* xor the bottom with the current octet */
+ hval ^= (Fnv32_t)*s++;
+
+/* #define NO_FNV_GCC_OPTIMIZATION */
+ /* multiply by the 32 bit FNV magic prime mod 2^32 */
+#if defined(NO_FNV_GCC_OPTIMIZATION)
+ /*
+ * 32 bit magic FNV-1a prime
+ */
+#define FNV_32_PRIME ((Fnv32_t)0x01000193)
+ hval *= FNV_32_PRIME;
+#else
+ hval += (hval<<1) + (hval<<4) + (hval<<7) + (hval<<8) + (hval<<24);
+#endif
+ }
+
+ /* return our new hash value */
+ return hval;
+}
+
+/*
+ * from lookup3.c, by Bob Jenkins, May 2006, Public Domain.
+ */
+
+#define rot(x,k) (((x)<<(k)) | ((x)>>(32-(k))))
+
+/*
+-------------------------------------------------------------------------------
+mix -- mix 3 32-bit values reversibly.
+
+This is reversible, so any information in (a,b,c) before mix() is
+still in (a,b,c) after mix().
+
+If four pairs of (a,b,c) inputs are run through mix(), or through
+mix() in reverse, there are at least 32 bits of the output that
+are sometimes the same for one pair and different for another pair.
+This was tested for:
+* pairs that differed by one bit, by two bits, in any combination
+ of top bits of (a,b,c), or in any combination of bottom bits of
+ (a,b,c).
+* "differ" is defined as +, -, ^, or ~^. For + and -, I transformed
+ the output delta to a Gray code (a^(a>>1)) so a string of 1's (as
+ is commonly produced by subtraction) look like a single 1-bit
+ difference.
+* the base values were pseudorandom, all zero but one bit set, or
+ all zero plus a counter that starts at zero.
+
+Some k values for my "a-=c; a^=rot(c,k); c+=b;" arrangement that
+satisfy this are
+ 4 6 8 16 19 4
+ 9 15 3 18 27 15
+ 14 9 3 7 17 3
+Well, "9 15 3 18 27 15" didn't quite get 32 bits diffing
+for "differ" defined as + with a one-bit base and a two-bit delta. I
+used http://burtleburtle.net/bob/hash/avalanche.html to choose
+the operations, constants, and arrangements of the variables.
+
+This does not achieve avalanche. There are input bits of (a,b,c)
+that fail to affect some output bits of (a,b,c), especially of a. The
+most thoroughly mixed value is c, but it doesn't really even achieve
+avalanche in c.
+
+This allows some parallelism. Read-after-writes are good at doubling
+the number of bits affected, so the goal of mixing pulls in the opposite
+direction as the goal of parallelism. I did what I could. Rotates
+seem to cost as much as shifts on every machine I could lay my hands
+on, and rotates are much kinder to the top and bottom bits, so I used
+rotates.
+-------------------------------------------------------------------------------
+*/
+#define mix(a,b,c) \
+{ \
+ a -= c; a ^= rot(c, 4); c += b; \
+ b -= a; b ^= rot(a, 6); a += c; \
+ c -= b; c ^= rot(b, 8); b += a; \
+ a -= c; a ^= rot(c,16); c += b; \
+ b -= a; b ^= rot(a,19); a += c; \
+ c -= b; c ^= rot(b, 4); b += a; \
+}
+
+/*
+-------------------------------------------------------------------------------
+final -- final mixing of 3 32-bit values (a,b,c) into c
+
+Pairs of (a,b,c) values differing in only a few bits will usually
+produce values of c that look totally different. This was tested for
+* pairs that differed by one bit, by two bits, in any combination
+ of top bits of (a,b,c), or in any combination of bottom bits of
+ (a,b,c).
+* "differ" is defined as +, -, ^, or ~^. For + and -, I transformed
+ the output delta to a Gray code (a^(a>>1)) so a string of 1's (as
+ is commonly produced by subtraction) look like a single 1-bit
+ difference.
+* the base values were pseudorandom, all zero but one bit set, or
+ all zero plus a counter that starts at zero.
+
+These constants passed:
+ 14 11 25 16 4 14 24
+ 12 14 25 16 4 14 24
+and these came close:
+ 4 8 15 26 3 22 24
+ 10 8 15 26 3 22 24
+ 11 8 15 26 3 22 24
+-------------------------------------------------------------------------------
+*/
+#define final(a,b,c) \
+{ \
+ c ^= b; c -= rot(b,14); \
+ a ^= c; a -= rot(c,11); \
+ b ^= a; b -= rot(a,25); \
+ c ^= b; c -= rot(b,16); \
+ a ^= c; a -= rot(c,4); \
+ b ^= a; b -= rot(a,14); \
+ c ^= b; c -= rot(b,24); \
+}
+
+/*
+--------------------------------------------------------------------
+ This works on all machines. To be useful, it requires
+ -- that the key be an array of uint32_t's, and
+ -- that the length be the number of uint32_t's in the key
+
+ The function hashword() is identical to hashlittle() on little-endian
+ machines, and identical to hashbig() on big-endian machines,
+ except that the length has to be measured in uint32_ts rather than in
+ bytes. hashlittle() is more complicated than hashword() only because
+ hashlittle() has to dance around fitting the key bytes into registers.
+--------------------------------------------------------------------
+*/
+uint32_t hashword(
+const uint32_t *k, /* the key, an array of uint32_t values */
+size_t length, /* the length of the key, in uint32_ts */
+uint32_t initval) /* the previous hash, or an arbitrary value */
+{
+ uint32_t a,b,c;
+
+ /* Set up the internal state */
+ a = b = c = 0xdeadbeef + (((uint32_t)length)<<2) + initval;
+
+ /*------------------------------------------------- handle most of the key */
+ while (length > 3)
+ {
+ a += k[0];
+ b += k[1];
+ c += k[2];
+ mix(a,b,c);
+ length -= 3;
+ k += 3;
+ }
+
+ /*------------------------------------------- handle the last 3 uint32_t's */
+ switch(length) /* all the case statements fall through */
+ {
+ case 3 : c+=k[2];
+ case 2 : b+=k[1];
+ case 1 : a+=k[0];
+ final(a,b,c);
+ case 0: /* case 0: nothing left to add */
+ break;
+ }
+ /*------------------------------------------------------ report the result */
+ return c;
+}
+
+/* from K&R book site 139 */
+#define HASHSIZE 101
+
+unsigned kr_hash(char *s){
+ unsigned hashval;
+
+ for(hashval = 0; *s != '\0';s++)
+ hashval = *s + 31 * hashval;
+
+ return hashval % HASHSIZE;
+
+} /* end kr_hash() */
+
+unsigned fnv_hash ( void *key, int len )
+{
+ unsigned char *p = key;
+ unsigned h = 2166136261;
+ int i;
+
+ for ( i = 0; i < len; i++ )
+ h = ( h * 16777619 ) ^ p[i];
+
+ return h;
+}
+
+unsigned oat_hash ( void *key, int len )
+{
+ unsigned char *p = key;
+ unsigned h = 0;
+ int i;
+
+ for ( i = 0; i < len; i++ ) {
+ h += p[i];
+ h += ( h << 10 );
+ h ^= ( h >> 6 );
+ }
+
+ h += ( h << 3 );
+ h ^= ( h >> 11 );
+ h += ( h << 15 );
+
+ return h;
+}
+
+unsigned wt_hash ( void *key, int len )
+{
+ unsigned char *p = key;
+ unsigned h = 0x783c965aUL;
+ unsigned step = 16;
+
+ for (; len > 0; len--) {
+ h ^= *p * 9;
+ p++;
+ h = (h << step) | (h >> (32-step));
+ step ^= h;
+ step &= 0x1F;
+ }
+
+ return h;
+}
+
+
+#define run_test(fct, args) { \
+ unsigned long loop, count; \
+ volatile unsigned long result; \
+ double delta; \
+ struct timeval tv; \
+ fprintf(stderr, "Starting %s\n", #fct); \
+ tv = timeval_current(); \
+ count = 0; \
+ do { \
+ delta = timeval_elapsed(&tv); \
+ for (loop = 0; loop < 1000; loop++) { \
+ result = fct args; \
+ count++; \
+ } \
+ } while (delta < 1.0); \
+ fprintf(stdout, "%-20s : %10.0f run/sec\n", #fct, count/delta); \
+ fflush(stdout); \
+}
+
+int main(){
+
+ char **start;
+ int len;
+
+ char *urls[] = {
+ "http://www.microsoft.com/shared/core/1/webservice/navigation.asmx/DisplayDownlevelNavHtml",
+ NULL
+ };
+
+ start = urls;
+ len = strlen(*urls);
+
+ run_test(wt_hash, (*urls, len));
+ run_test(SuperFastHash2, (*urls, len));
+ run_test(SuperFastHash, (*urls, len));
+ run_test(haproxy_uri_hash, (*urls, len));
+ run_test(haproxy_server_hash, (*urls, len));
+ run_test(hashpjw, (*urls));
+ run_test(hash_djbx33, ((unsigned char *)*urls, len));
+ run_test(bernstein, ((unsigned char *)*urls, len, 4));
+ run_test(fnv_32a_str, (*urls, 0));
+ run_test(hashword, ((const uint32_t *)*urls,strlen(*urls),0));
+ run_test(kr_hash, (*urls));
+ run_test(sax_hash, (*urls, len));
+ run_test(fnv_hash, (*urls, len));
+ run_test(oat_hash, (*urls, len));
+
+ return 0;
+
+}/* end main() */
diff --git a/tests/exp/testinet.c b/tests/exp/testinet.c
new file mode 100644
index 0000000..0eb87e9
--- /dev/null
+++ b/tests/exp/testinet.c
@@ -0,0 +1,28 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <string.h>
+#include <ctype.h>
+#include <sys/time.h>
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <netinet/tcp.h>
+#include <netinet/in.h>
+#include <arpa/inet.h>
+#include <netdb.h>
+#include <fcntl.h>
+#include <errno.h>
+#include <signal.h>
+#include <stdarg.h>
+#include <sys/resource.h>
+#include <time.h>
+#include <regex.h>
+#include <syslog.h>
+
+
+int main() {
+ printf("sizeof sockaddr=%d\n", sizeof(struct sockaddr));
+ printf("sizeof sockaddr_in=%d\n", sizeof(struct sockaddr_in));
+ printf("sizeof sockaddr_in6=%d\n", sizeof(struct sockaddr_in6));
+ return 0;
+}
diff --git a/tests/exp/uri_hash.c b/tests/exp/uri_hash.c
new file mode 100644
index 0000000..0b5d755
--- /dev/null
+++ b/tests/exp/uri_hash.c
@@ -0,0 +1,377 @@
+#include <stdio.h>
+#include <string.h>
+#include <arpa/inet.h>
+
+#define NSERV 10
+#define MAXLINE 1000
+
+char line[MAXLINE];
+
+int counts_gd1[NSERV][NSERV];
+static unsigned long hash_gd1(char *uri)
+{
+ unsigned long hash = 0;
+ int c;
+
+ while ((c = *uri++))
+ hash = c + (hash << 6) + (hash << 16) - hash;
+
+ return hash;
+}
+
+int counts_gd2[NSERV][NSERV];
+static unsigned long hash_gd2(char *uri)
+{
+ unsigned long hash = 0;
+ int c;
+
+ while ((c = *uri++)) {
+ if (c == '?' || c == '\n')
+ break;
+ hash = c + (hash << 6) + (hash << 16) - hash;
+ }
+
+ return hash;
+}
+
+
+int counts_gd3[NSERV][NSERV];
+static unsigned long hash_gd3(char *uri)
+{
+ unsigned long hash = 0;
+ int c;
+
+ while ((c = *uri++)) {
+ if (c == '?' || c == '\n')
+ break;
+ hash = c - (hash << 3) + (hash << 15) - hash;
+ }
+
+ return hash;
+}
+
+
+int counts_gd4[NSERV][NSERV];
+static unsigned long hash_gd4(char *uri)
+{
+ unsigned long hash = 0;
+ int c;
+
+ while ((c = *uri++)) {
+ if (c == '?' || c == '\n')
+ break;
+ hash = hash + (hash << 6) - (hash << 15) - c;
+ }
+
+ return hash;
+}
+
+
+int counts_gd5[NSERV][NSERV];
+static unsigned long hash_gd5(char *uri)
+{
+ unsigned long hash = 0;
+ int c;
+
+ while ((c = *uri++)) {
+ if (c == '?' || c == '\n')
+ break;
+ hash = hash + (hash << 2) - (hash << 19) - c;
+ }
+
+ return hash;
+}
+
+
+int counts_gd6[NSERV][NSERV];
+static unsigned long hash_gd6(char *uri)
+{
+ unsigned long hash = 0;
+ int c;
+
+ while ((c = *uri++)) {
+ if (c == '?' || c == '\n')
+ break;
+ hash = hash + (hash << 2) - (hash << 22) - c;
+ }
+
+ return hash;
+}
+
+
+int counts_wt1[NSERV][NSERV];
+static unsigned long hash_wt1(int hsize, char *string) {
+ int bits;
+ unsigned long data, val;
+
+ bits = val = data = 0;
+ while (*string) {
+ if (*string == '?' || *string == '\n')
+ break;
+ data |= ((unsigned long)(unsigned char)*string) << bits;
+ bits += 8;
+ while (bits >= hsize) {
+ val ^= data - (val >> hsize);
+ bits -= hsize;
+ data >>= hsize;
+ }
+ string++;
+ }
+ val ^= data;
+ while (val > ((1 << hsize) - 1)) {
+ val = (val & ((1 << hsize) - 1)) ^ (val >> hsize);
+ }
+ return val;
+}
+
+/*
+ * efficient hash : no duplicate on the first 65536 values of 2 bytes.
+ * less than 0.1% duplicates for the first 1.6 M values of 3 bytes.
+ */
+int counts_wt2[NSERV][NSERV];
+typedef unsigned int u_int32_t;
+
+static inline u_int32_t shl32(u_int32_t i, int count) {
+ if (count == 32)
+ return 0;
+ return i << count;
+}
+
+static inline u_int32_t shr32(u_int32_t i, int count) {
+ if (count == 32)
+ return 0;
+ return i >> count;
+}
+
+static unsigned int rev32(unsigned int c) {
+ c = ((c & 0x0000FFFF) << 16)| ((c & 0xFFFF0000) >> 16);
+ c = ((c & 0x00FF00FF) << 8) | ((c & 0xFF00FF00) >> 8);
+ c = ((c & 0x0F0F0F0F) << 4) | ((c & 0xF0F0F0F0) >> 4);
+ c = ((c & 0x33333333) << 2) | ((c & 0xCCCCCCCC) >> 2);
+ c = ((c & 0x55555555) << 1) | ((c & 0xAAAAAAAA) >> 1);
+ return c;
+}
+
+int hash_wt2(const char *src, int len) {
+ unsigned int i = 0x3C964BA5; /* as many ones as zeroes */
+ unsigned int j;
+ unsigned int ih, il;
+ int bit;
+
+ while (len--) {
+ j = (unsigned char)*src++;
+ if (j == '?' || j == '\n')
+ break;
+ bit = rev32(j - i);
+ bit = bit - (bit >> 3) + (bit >> 16) - j;
+
+ bit &= 0x1f;
+ ih = shr32(i, bit);
+ il = i & (shl32(1, bit) - 1);
+ i = shl32(il, 32-bit) - ih - ~j;
+ }
+ return i;
+}
+
+
+//typedef unsigned int uint32_t;
+//typedef unsigned short uint8_t;
+//typedef unsigned char uint16_t;
+
+/*
+ * http://www.azillionmonkeys.com/qed/hash.html
+ */
+#undef get16bits
+#if (defined(__GNUC__) && defined(__i386__)) || defined(__WATCOMC__) \
+ || defined(_MSC_VER) || defined (__BORLANDC__) || defined (__TURBOC__)
+#define get16bits(d) (*((const uint16_t *) (d)))
+#endif
+
+#if !defined (get16bits)
+#define get16bits(d) ((((uint32_t)(((const uint8_t *)(d))[1])) << 8)\
+ +(uint32_t)(((const uint8_t *)(d))[0]) )
+#endif
+
+/*
+ * This function has a hole of 11 unused bits in bytes 2 and 3 of each block of
+ * 32 bits.
+ */
+int counts_SuperFastHash[NSERV][NSERV];
+
+uint32_t SuperFastHash (const char * data, int len) {
+uint32_t hash = len, tmp;
+int rem;
+
+ if (len <= 0 || data == NULL) return 0;
+
+ rem = len & 3;
+ len >>= 2;
+
+ /* Main loop */
+ for (;len > 0; len--) {
+ hash += get16bits (data);
+ tmp = (get16bits (data+2) << 11) ^ hash;
+ hash = (hash << 16) ^ tmp;
+ data += 2*sizeof (uint16_t);
+ hash += hash >> 11;
+ }
+
+ /* Handle end cases */
+ switch (rem) {
+ case 3: hash += get16bits (data);
+ hash ^= hash << 16;
+ hash ^= data[sizeof (uint16_t)] << 18;
+ hash += hash >> 11;
+ break;
+ case 2: hash += get16bits (data);
+ hash ^= hash << 11;
+ hash += hash >> 17;
+ break;
+ case 1: hash += *data;
+ hash ^= hash << 10;
+ hash += hash >> 1;
+ }
+
+ /* Force "avalanching" of final 127 bits */
+ hash ^= hash << 3;
+ hash += hash >> 5;
+ hash ^= hash << 4;
+ hash += hash >> 17;
+ hash ^= hash << 25;
+ hash += hash >> 6;
+
+ return hash;
+}
+
+/*
+ * This variant uses all bits from the input block, and is about 15% faster.
+ */
+int counts_SuperFastHash2[NSERV][NSERV];
+uint32_t SuperFastHash2 (const char * data, int len) {
+uint32_t hash = len, tmp;
+int rem;
+
+ if (len <= 0 || data == NULL) return 0;
+
+ rem = len & 3;
+ len >>= 2;
+
+ /* Main loop */
+ for (;len > 0; len--) {
+ register uint32_t next;
+ next = get16bits(data+2);
+ hash += get16bits(data);
+ tmp = ((next << 11) | (next >> 21)) ^ hash;
+ hash = (hash << 16) ^ tmp;
+ data += 2*sizeof (uint16_t);
+ hash += hash >> 11;
+ }
+
+ /* Handle end cases */
+ switch (rem) {
+ case 3: hash += get16bits (data);
+ hash ^= hash << 16;
+ hash ^= data[sizeof (uint16_t)] << 18;
+ hash += hash >> 11;
+ break;
+ case 2: hash += get16bits (data);
+ hash ^= hash << 11;
+ hash += hash >> 17;
+ break;
+ case 1: hash += *data;
+ hash ^= hash << 10;
+ hash += hash >> 1;
+ }
+
+ /* Force "avalanching" of final 127 bits */
+ hash ^= hash << 3;
+ hash += hash >> 5;
+ hash ^= hash << 4;
+ hash += hash >> 17;
+ hash ^= hash << 25;
+ hash += hash >> 6;
+
+ return hash;
+}
+
+/* len 4 for ipv4 and 16 for ipv6 */
+int counts_srv[NSERV][NSERV];
+unsigned int haproxy_server_hash(const char *addr, int len){
+ unsigned int h, l;
+ l = h = 0;
+
+ while ((l + sizeof (int)) <= len) {
+ h ^= ntohl(*(unsigned int *)(&addr[l]));
+ l += sizeof (int);
+ }
+ return h;
+}/* end haproxy_server_hash() */
+
+
+
+void count_hash_results(unsigned long hash, int counts[NSERV][NSERV]) {
+ int srv, nsrv;
+
+ for (nsrv = 0; nsrv < NSERV; nsrv++) {
+ srv = hash % (nsrv + 1);
+ counts[nsrv][srv]++;
+ }
+}
+
+void dump_hash_results(char *name, int counts[NSERV][NSERV]) {
+ int srv, nsrv;
+
+ printf("%s:\n", name);
+ for (nsrv = 0; nsrv < NSERV; nsrv++) {
+ printf("%02d srv: ", nsrv+1);
+ for (srv = 0; srv <= nsrv; srv++) {
+ //printf("%6d ", counts[nsrv][srv]);
+ //printf("%3.1f ", (100.0*counts[nsrv][srv]) / (double)counts[0][0]);
+ printf("%3.1f ", 100.0*(counts[nsrv][srv] - (double)counts[0][0]/(nsrv+1)) / (double)counts[0][0]);
+ }
+ printf("\n");
+ }
+ printf("\n");
+}
+
+int main() {
+ memset(counts_gd1, 0, sizeof(counts_gd1));
+ memset(counts_gd2, 0, sizeof(counts_gd2));
+ memset(counts_gd3, 0, sizeof(counts_gd3));
+ memset(counts_gd4, 0, sizeof(counts_gd4));
+ memset(counts_gd5, 0, sizeof(counts_gd5));
+ memset(counts_gd6, 0, sizeof(counts_gd6));
+ memset(counts_wt1, 0, sizeof(counts_wt1));
+ memset(counts_wt2, 0, sizeof(counts_wt2));
+ memset(counts_srv, 0, sizeof(counts_srv));
+ memset(counts_SuperFastHash, 0, sizeof(counts_SuperFastHash));
+ memset(counts_SuperFastHash2, 0, sizeof(counts_SuperFastHash2));
+
+ while (fgets(line, MAXLINE, stdin) != NULL) {
+ count_hash_results(hash_gd1(line), counts_gd1);
+ count_hash_results(hash_gd2(line), counts_gd2);
+ count_hash_results(hash_gd3(line), counts_gd3);
+ count_hash_results(hash_gd4(line), counts_gd4);
+ count_hash_results(hash_gd5(line), counts_gd5);
+ count_hash_results(hash_gd6(line), counts_gd6);
+ count_hash_results(hash_wt1(31, line), counts_wt1);
+ count_hash_results(hash_wt2(line, strlen(line)), counts_wt2);
+ count_hash_results(haproxy_server_hash(line, strlen(line)), counts_srv);
+ count_hash_results(SuperFastHash(line, strlen(line)), counts_SuperFastHash);
+ count_hash_results(SuperFastHash2(line, strlen(line)), counts_SuperFastHash2);
+ }
+
+ dump_hash_results("hash_gd1", counts_gd1);
+ dump_hash_results("hash_gd2", counts_gd2);
+ dump_hash_results("hash_gd3", counts_gd3);
+ dump_hash_results("hash_gd4", counts_gd4);
+ dump_hash_results("hash_gd5", counts_gd5);
+ dump_hash_results("hash_gd6", counts_gd6);
+ dump_hash_results("hash_wt1", counts_wt1);
+ dump_hash_results("hash_wt2", counts_wt2);
+ dump_hash_results("haproxy_server_hash", counts_srv);
+ dump_hash_results("SuperFastHash", counts_SuperFastHash);
+ dump_hash_results("SuperFastHash2", counts_SuperFastHash2);
+
+ return 0;
+}
diff --git a/tests/unit/ist.c b/tests/unit/ist.c
new file mode 100644
index 0000000..43b3438
--- /dev/null
+++ b/tests/unit/ist.c
@@ -0,0 +1,700 @@
+/* ist.c: test code for ist.h
+ *
+ * Build with :
+ * gcc -Iinclude -Wall -W -fomit-frame-pointer -Os tests/ist.c
+ * gcc -Iinclude -Wall -W -fomit-frame-pointer -O1 tests/ist.c
+ * gcc -Iinclude -Wall -W -fomit-frame-pointer -O2 tests/ist.c
+ * gcc -Iinclude -Wall -W -fomit-frame-pointer -O3 tests/ist.c
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <import/ist.h>
+
+
+// pre-extracted from ist.h using the following expression :
+// sed -n '/^static inline/s:^\([^ ]\+\) \([^ ]\+\) \(.*[* ]\)\([^* ]\+(\)\(.*\):\3f_\4\5 { return \4); }\nstatic int test_\4)\n{\n\treturn 0;\n}\n:p' include/common/ist.h
+// sed -n '/^static inline/s:^\([^ ]\+\) \([^ ]\+\) \(.*[* ]\)\([^* ]\+(\)\(.*\):\tif (test_\4)) printf("\4)\\n");:p' include/common/ist.h
+// sed -n '/^static inline/s:^\([^ ]\+\) \([^ ]\+\) \(.*[* ]\)\([^* ]\+(\)\(.*\):\tprintf("%4d \4)\\n", test_\4));:p' include/common/ist.h
+
+struct ist f_ist(const void *str) { return ist(str); }
+static int test_ist()
+{
+ if (ist("foo").ptr == NULL)
+ return __LINE__;
+ if (ist("foo").len != 3)
+ return __LINE__;
+ if (strncmp(ist("foo").ptr, "foo", 3) != 0)
+ return 3;
+ return 0;
+}
+
+struct ist f_ist2(const void *ptr, size_t len) { return ist2(ptr, len); }
+int test_ist2()
+{
+ if (ist2("foo", 3).ptr == NULL)
+ return __LINE__;
+ if (ist2("foo", 3).len != 3)
+ return __LINE__;
+ if (strncmp(ist2("foo", 3).ptr, "foo", 3) != 0)
+ return __LINE__;
+ return 0;
+}
+
+size_t f_istlen(struct ist ist) { return istlen(ist); }
+int test_istlen()
+{
+ if (istlen(ist("foo")) != 3)
+ return __LINE__;
+ if (istlen(ist("")) != 0)
+ return __LINE__;
+ if (istlen(ist(NULL)) != 0)
+ return __LINE__;
+ return 0;
+}
+
+struct ist f_istnext(struct ist ist) { return istnext(ist); }
+int test_istnext()
+{
+ if (istlen(istnext(ist("foo"))) != 2)
+ return __LINE__;
+ if (strncmp(istnext(ist("foo")).ptr, "oo", 2) != 0)
+ return __LINE__;
+ if (istnext(istnext(istnext(istnext(ist("foo"))))).len != 0)
+ return __LINE__;
+ return 0;
+}
+
+struct ist f_istpad(void *buf, const struct ist ist) { return istpad(buf, ist); }
+int test_istpad()
+{
+ char buf[5] = "xxxxx";
+
+ if (strncmp(istpad(buf, ist("foo")).ptr, "foo", 3) != 0)
+ return __LINE__;
+ if (strncmp(buf, "foo", 3) != 0)
+ return __LINE__;
+ if (buf[3] != 0 || buf[4] != 'x')
+ return __LINE__;
+ return 0;
+}
+
+struct ist f_isttrim(struct ist ist, size_t size) { return isttrim(ist, size); }
+int test_isttrim()
+{
+ if (isttrim(ist("foo"), 5).ptr == NULL)
+ return __LINE__;
+
+ if (isttrim(ist("foo"), 5).len != 3)
+ return __LINE__;
+
+ if (strncmp(isttrim(ist("foo"), 5).ptr, "foo", 3) != 0)
+ return __LINE__;
+
+ if (isttrim(ist("foo"), 2).ptr == NULL)
+ return __LINE__;
+
+ if (isttrim(ist("foo"), 2).len != 2)
+ return __LINE__;
+
+ if (strncmp(isttrim(ist("foo"), 2).ptr, "fo", 2) != 0)
+ return __LINE__;
+
+ return 0;
+}
+
+struct ist f_istzero(struct ist ist, size_t size) { return istzero(ist, size); }
+int test_istzero()
+{
+ char buf[5] = "xxxxx";
+
+ if (istzero(ist2(buf, 5), 10).ptr != buf)
+ return __LINE__;
+
+ if (istzero(ist2(buf, 5), 10).len != 5)
+ return __LINE__;
+
+ if (istzero(ist2(buf, 5), 5).len != 4)
+ return __LINE__;
+
+ if (buf[4] != 0)
+ return __LINE__;
+
+ if (istzero(ist2(buf, 5), 0).len != 0)
+ return __LINE__;
+
+ if (buf[0] == 0)
+ return __LINE__;
+
+ return 0;
+}
+
+int f_istdiff(const struct ist ist1, const struct ist ist2) { return istdiff(ist1, ist2); }
+int test_istdiff()
+{
+ if (istdiff(ist(""), ist("")) != 0)
+ return __LINE__;
+
+ if (istdiff(ist("bar"), ist("bar")) != 0)
+ return __LINE__;
+
+ if (istdiff(ist("foo"), ist("")) <= 0)
+ return __LINE__;
+
+ if (istdiff(ist(""), ist("bar")) >= 0)
+ return __LINE__;
+
+ if (istdiff(ist("foo"), ist("bar")) <= 0)
+ return __LINE__;
+
+ if (istdiff(ist("fo"), ist("bar")) <= 0)
+ return __LINE__;
+
+ if (istdiff(ist("bar"), ist("foo")) >= 0)
+ return __LINE__;
+
+ if (istdiff(ist("bar"), ist("fo")) >= 0)
+ return __LINE__;
+
+ return 0;
+}
+
+int f_istmatch(const struct ist ist1, const struct ist ist2) { return istmatch(ist1, ist2); }
+int test_istmatch()
+{
+ if (istmatch(ist(""), ist("")) == 0)
+ return __LINE__;
+
+ if (istmatch(ist("bar"), ist("bar")) == 0)
+ return __LINE__;
+
+ if (istmatch(ist("foo"), ist("")) == 0)
+ return __LINE__;
+
+ if (istmatch(ist(""), ist("bar")) != 0)
+ return __LINE__;
+
+ if (istmatch(ist("foo"), ist("bar")) != 0)
+ return __LINE__;
+
+ if (istmatch(ist("fo"), ist("bar")) != 0)
+ return __LINE__;
+
+ if (istmatch(ist("bar"), ist("foo")) != 0)
+ return __LINE__;
+
+ if (istmatch(ist("bar"), ist("fo")) != 0)
+ return __LINE__;
+
+ if (istmatch(ist("foo"), ist("foobar")) != 0)
+ return __LINE__;
+
+ if (istmatch(ist("foobar"), ist("foo")) == 0)
+ return __LINE__;
+
+ if (istmatch(ist("foobar"), ist("bar")) != 0)
+ return __LINE__;
+
+ return 0;
+}
+
+int f_istnmatch(struct ist ist1, struct ist ist2, size_t count) { return istnmatch(ist1, ist2, count); }
+int test_istnmatch()
+{
+ if (istnmatch(ist(""), ist(""), 1) == 0)
+ return __LINE__;
+
+ if (istnmatch(ist(""), ist(""), 0) == 0)
+ return __LINE__;
+
+ if (istnmatch(ist("bar"), ist("bar"), 4) == 0)
+ return __LINE__;
+
+ if (istnmatch(ist("bar"), ist("bar"), 2) == 0)
+ return __LINE__;
+
+ if (istnmatch(ist("bar"), ist("baz"), 2) == 0)
+ return __LINE__;
+
+ if (istnmatch(ist("foo"), ist(""), 1) == 0)
+ return __LINE__;
+
+ if (istnmatch(ist("foo"), ist(""), 0) == 0)
+ return __LINE__;
+
+ if (istnmatch(ist(""), ist("bar"), 3) != 0)
+ return __LINE__;
+
+ if (istnmatch(ist(""), ist("bar"), 0) == 0)
+ return __LINE__;
+
+ if (istnmatch(ist("foo"), ist("bar"), 4) != 0)
+ return __LINE__;
+
+ if (istnmatch(ist("foo"), ist("bar"), 0) == 0)
+ return __LINE__;
+
+ if (istnmatch(ist("fo"), ist("bar"), 2) != 0)
+ return __LINE__;
+
+ if (istnmatch(ist("bar"), ist("foo"), 3) != 0)
+ return __LINE__;
+
+ if (istnmatch(ist("bar"), ist("fo"), 2) != 0)
+ return __LINE__;
+
+ if (istnmatch(ist("foo"), ist("foobar"), 4) != 0)
+ return __LINE__;
+
+ if (istnmatch(ist("foo"), ist("foobar"), 3) == 0)
+ return __LINE__;
+
+ if (istnmatch(ist("foobar"), ist("fooz"), 4) != 0)
+ return __LINE__;
+
+ if (istnmatch(ist("foobar"), ist("fooz"), 3) == 0)
+ return __LINE__;
+
+ if (istnmatch(ist("foobar"), ist("fooz"), 2) == 0)
+ return __LINE__;
+
+ if (istnmatch(ist("foobar"), ist("bar"), 3) != 0)
+ return __LINE__;
+
+ return 0;
+}
+
+int f_isteq(const struct ist ist1, const struct ist ist2) { return isteq(ist1, ist2); }
+int test_isteq()
+{
+ if (isteq(ist(""), ist("")) == 0)
+ return __LINE__;
+
+ if (isteq(ist("bar"), ist("bar")) == 0)
+ return __LINE__;
+
+ if (isteq(ist("foo"), ist("")) != 0)
+ return __LINE__;
+
+ if (isteq(ist(""), ist("bar")) != 0)
+ return __LINE__;
+
+ if (isteq(ist("foo"), ist("bar")) != 0)
+ return __LINE__;
+
+ if (isteq(ist("fo"), ist("bar")) != 0)
+ return __LINE__;
+
+ if (isteq(ist("bar"), ist("foo")) != 0)
+ return __LINE__;
+
+ if (isteq(ist("bar"), ist("fo")) != 0)
+ return __LINE__;
+
+ if (isteq(ist("foo"), ist("foobar")) != 0)
+ return __LINE__;
+
+ if (isteq(ist("foobar"), ist("foo")) != 0)
+ return __LINE__;
+
+ if (isteq(ist("foobar"), ist("bar")) != 0)
+ return __LINE__;
+
+ return 0;
+}
+
+int f_istneq(struct ist ist1, struct ist ist2, size_t count) { return istneq(ist1, ist2, count); }
+int test_istneq()
+{
+ if (istneq(ist(""), ist(""), 1) == 0)
+ return __LINE__;
+
+ if (istneq(ist(""), ist(""), 0) == 0)
+ return __LINE__;
+
+ if (istneq(ist("bar"), ist("bar"), 4) == 0)
+ return __LINE__;
+
+ if (istneq(ist("bar"), ist("bar"), 2) == 0)
+ return __LINE__;
+
+ if (istneq(ist("bar"), ist("baz"), 2) == 0)
+ return __LINE__;
+
+ if (istneq(ist("foo"), ist(""), 1) != 0)
+ return __LINE__;
+
+ if (istneq(ist("foo"), ist(""), 0) == 0)
+ return __LINE__;
+
+ if (istneq(ist(""), ist("bar"), 3) != 0)
+ return __LINE__;
+
+ if (istneq(ist(""), ist("bar"), 0) == 0)
+ return __LINE__;
+
+ if (istneq(ist("foo"), ist("bar"), 4) != 0)
+ return __LINE__;
+
+ if (istneq(ist("foo"), ist("bar"), 0) == 0)
+ return __LINE__;
+
+ if (istneq(ist("fo"), ist("bar"), 2) != 0)
+ return __LINE__;
+
+ if (istneq(ist("bar"), ist("foo"), 3) != 0)
+ return __LINE__;
+
+ if (istneq(ist("bar"), ist("fo"), 2) != 0)
+ return __LINE__;
+
+ if (istneq(ist("foo"), ist("foobar"), 4) != 0)
+ return __LINE__;
+
+ if (istneq(ist("foo"), ist("foobar"), 3) == 0)
+ return __LINE__;
+
+ if (istneq(ist("foobar"), ist("fooz"), 4) != 0)
+ return __LINE__;
+
+ if (istneq(ist("foobar"), ist("fooz"), 3) == 0)
+ return __LINE__;
+
+ if (istneq(ist("foobar"), ist("fooz"), 2) == 0)
+ return __LINE__;
+
+ if (istneq(ist("foobar"), ist("bar"), 3) != 0)
+ return __LINE__;
+
+ return 0;
+}
+
+ssize_t f_istcpy(struct ist *dst, const struct ist src, size_t count) { return istcpy(dst, src, count); }
+int test_istcpy()
+{
+ char buf[100] = "foobar";
+ struct ist dst = ist(buf);
+
+ if (istcpy(&dst, ist("FOO"), sizeof(buf)) != 3)
+ return __LINE__;
+
+ if (dst.len != 3)
+ return __LINE__;
+
+ if (strcmp(buf, "FOObar") != 0)
+ return __LINE__;
+
+ if (istcpy(&dst, ist("foo"), 2) != -1)
+ return __LINE__;
+
+ if (strcmp(buf, "foObar") != 0)
+ return __LINE__;
+
+ if (istcpy(&dst, ist("foo"), 3) != 3)
+ return __LINE__;
+
+ if (strcmp(buf, "foobar") != 0)
+ return __LINE__;
+
+ return 0;
+}
+
+ssize_t f_istscpy(struct ist *dst, const struct ist src, size_t count) { return istscpy(dst, src, count); }
+int test_istscpy()
+{
+ char buf[100] = "foobar";
+ struct ist dst = ist(buf);
+
+ if (istscpy(&dst, ist("FOO"), sizeof(buf)) != 3)
+ return __LINE__;
+
+ if (dst.len != 3)
+ return __LINE__;
+
+ if (memcmp(buf, "FOO\0ar", 6) != 0)
+ return __LINE__;
+
+ if (istscpy(&dst, ist("foo"), 3) != -1)
+ return __LINE__;
+
+ if (memcmp(buf, "fo\0\0ar", 6) != 0)
+ return __LINE__;
+
+ if (istscpy(&dst, ist("foo"), 3) != -1)
+ return __LINE__;
+
+ if (istscpy(&dst, ist("foo"), 4) != 3)
+ return __LINE__;
+
+ if (memcmp(buf, "foo\0ar", 6) != 0)
+ return __LINE__;
+
+ return 0;
+}
+
+ssize_t f_istcat(struct ist *dst, const struct ist src, size_t count) { return istcat(dst, src, count); }
+int test_istcat()
+{
+ char buf[11] = "foobar";
+ struct ist dst = ist(buf);
+
+ if (istcat(&dst, ist("FOO"), sizeof(buf)) != 9)
+ return __LINE__;
+
+ if (strcmp(buf, "foobarFOO") != 0)
+ return __LINE__;
+
+ if (istcat(&dst, ist("foo"), 10) != -1)
+ return __LINE__;
+
+ if (dst.len != 10)
+ return __LINE__;
+
+ if (strncmp(buf, "foobarFOOf", 10) != 0)
+ return __LINE__;
+
+ if (istcat(&dst, ist("foo"), 3) != -1)
+ return __LINE__;
+
+ if (dst.len != 10)
+ return __LINE__;
+
+ if (strncmp(buf, "foobar", 6) != 0)
+ return __LINE__;
+
+ return 0;
+}
+
+ssize_t f_istscat(struct ist *dst, const struct ist src, size_t count) { return istscat(dst, src, count); }
+int test_istscat()
+{
+ char buf[11] = "foobar";
+ struct ist dst = ist(buf);
+
+ if (istscat(&dst, ist("FOO"), sizeof(buf)) != 9)
+ return __LINE__;
+
+ if (strcmp(buf, "foobarFOO") != 0)
+ return __LINE__;
+
+ if (istscat(&dst, ist("foo"), sizeof(buf)) != -1)
+ return __LINE__;
+
+ if (dst.len != 10)
+ return __LINE__;
+
+ if (strncmp(buf, "foobarFOOf", 10) != 0)
+ return __LINE__;
+
+ if (istscat(&dst, ist("foo"), 3) != -1)
+ return __LINE__;
+
+ if (dst.len != 10)
+ return __LINE__;
+
+ if (strncmp(buf, "foobar", 6) != 0)
+ return __LINE__;
+
+ return 0;
+}
+
+char *f_istchr(const struct ist ist, char chr) { return istchr(ist, chr); }
+int test_istchr()
+{
+ struct ist foobar = ist("foobar");
+
+ if (istchr(foobar, 'f') != foobar.ptr)
+ return __LINE__;
+
+ if (istchr(foobar, 'o') != foobar.ptr + 1)
+ return __LINE__;
+
+ if (istchr(foobar, 'r') != foobar.ptr + 5)
+ return __LINE__;
+
+ if (istchr(foobar, 'X') != NULL)
+ return __LINE__;
+
+ if (istchr(foobar, 0) != NULL)
+ return __LINE__;
+
+ return 0;
+}
+
+struct ist f_istfind(struct ist ist, char chr) { return istfind(ist, chr); }
+int test_istfind()
+{
+ struct ist foobar = ist("foobar");
+
+ if (istfind(foobar, 'f').ptr != foobar.ptr)
+ return __LINE__;
+
+ if (istfind(foobar, 'f').len != 6)
+ return __LINE__;
+
+ if (istfind(foobar, 'o').ptr != foobar.ptr + 1)
+ return __LINE__;
+
+ if (istfind(foobar, 'o').len != 5)
+ return __LINE__;
+
+ if (istfind(foobar, 'r').ptr != foobar.ptr + 5)
+ return __LINE__;
+
+ if (istfind(foobar, 'r').len != 1)
+ return __LINE__;
+
+ if (istfind(foobar, 'X').ptr != foobar.ptr + foobar.len)
+ return __LINE__;
+
+ if (istfind(foobar, 'X').len != 0)
+ return __LINE__;
+
+ if (istfind(foobar, 0).ptr != foobar.ptr + foobar.len)
+ return __LINE__;
+
+ if (istfind(foobar, 0).len != 0)
+ return __LINE__;
+
+ return 0;
+}
+
+struct ist f_istskip(struct ist ist, char chr) { return istskip(ist, chr); }
+int test_istskip()
+{
+ struct ist foobar = ist("foobar");
+ struct ist r = ist("r");
+
+ if (istskip(foobar, 'X').ptr != foobar.ptr)
+ return __LINE__;
+
+ if (istskip(foobar, 'X').len != foobar.len)
+ return __LINE__;
+
+ if (istskip(foobar, 'o').ptr != foobar.ptr)
+ return __LINE__;
+
+ if (istskip(foobar, 'o').len != foobar.len)
+ return __LINE__;
+
+ if (istskip(foobar, 'f').ptr != foobar.ptr + 1)
+ return __LINE__;
+
+ if (istskip(foobar, 'f').len != foobar.len - 1)
+ return __LINE__;
+
+ if (istskip(r, 'r').ptr != r.ptr + 1)
+ return __LINE__;
+
+ if (istskip(r, 'r').len != r.len - 1)
+ return __LINE__;
+
+ if (istskip(foobar, 'X').ptr != foobar.ptr)
+ return __LINE__;
+
+ if (istskip(foobar, 'X').len != foobar.len)
+ return __LINE__;
+
+ if (istskip(r, 0).ptr != r.ptr)
+ return __LINE__;
+
+ if (istskip(r, 0).len != r.len)
+ return __LINE__;
+
+ return 0;
+}
+
+struct ist f_istist(struct ist ist, const struct ist pat) { return istist(ist, pat); }
+int test_istist()
+{
+ struct ist foobar = ist("foobar");
+
+ if (istist(foobar, ist("f")).ptr != foobar.ptr)
+ return __LINE__;
+
+ if (istist(foobar, ist("f")).len != foobar.len)
+ return __LINE__;
+
+ if (istist(foobar, ist("foob")).ptr != foobar.ptr)
+ return __LINE__;
+
+ if (istist(foobar, ist("foob")).len != foobar.len)
+ return __LINE__;
+
+ if (istist(foobar, ist("foobar")).ptr != foobar.ptr)
+ return __LINE__;
+
+ if (istist(foobar, ist("foobar")).len != foobar.len)
+ return __LINE__;
+
+ if (istist(foobar, ist("o")).ptr != foobar.ptr + 1)
+ return __LINE__;
+
+ if (istist(foobar, ist("o")).len != foobar.len - 1)
+ return __LINE__;
+
+ if (istist(foobar, ist("ooba")).ptr != foobar.ptr + 1)
+ return __LINE__;
+
+ if (istist(foobar, ist("ooba")).len != foobar.len - 1)
+ return __LINE__;
+
+ if (istist(foobar, ist("r")).ptr != foobar.ptr + 5)
+ return __LINE__;
+
+ if (istist(foobar, ist("r")).len != foobar.len - 5)
+ return __LINE__;
+
+ if (istist(foobar, ist("X")).ptr != NULL)
+ return __LINE__;
+
+ if (istist(foobar, ist("X")).len != 0)
+ return __LINE__;
+
+ if (istist(foobar, ist("oobaX")).ptr != NULL)
+ return __LINE__;
+
+ if (istist(foobar, ist("oobaX")).len != 0)
+ return __LINE__;
+
+ if (istist(foobar, ist("oobarX")).ptr != NULL)
+ return __LINE__;
+
+ if (istist(foobar, ist("oobarX")).len != 0)
+ return __LINE__;
+
+ if (istist(foobar, ist("")).ptr != foobar.ptr)
+ return __LINE__;
+
+ if (istist(foobar, ist("")).len != foobar.len)
+ return __LINE__;
+
+ return 0;
+}
+
+
+int main(void)
+{
+ printf("%4d ist()\n", test_ist());
+ printf("%4d ist2()\n", test_ist2());
+ printf("%4d istlen()\n", test_istlen());
+ printf("%4d istnext()\n", test_istnext());
+ printf("%4d istpad()\n", test_istpad());
+ printf("%4d isttrim()\n", test_isttrim());
+ printf("%4d istzero()\n", test_istzero());
+ printf("%4d istdiff()\n", test_istdiff());
+ printf("%4d istmatch()\n", test_istmatch());
+ printf("%4d istnmatch()\n", test_istnmatch());
+ printf("%4d isteq()\n", test_isteq());
+ printf("%4d istneq()\n", test_istneq());
+ printf("%4d istcpy()\n", test_istcpy());
+ printf("%4d istscpy()\n", test_istscpy());
+ printf("%4d istcat()\n", test_istcat());
+ printf("%4d istscat()\n", test_istscat());
+ printf("%4d istchr()\n", test_istchr());
+ printf("%4d istfind()\n", test_istfind());
+ printf("%4d istskip()\n", test_istskip());
+ printf("%4d istist()\n", test_istist());
+
+ return 0;
+}
diff --git a/tests/unit/test-1-among.c b/tests/unit/test-1-among.c
new file mode 100644
index 0000000..bd19192
--- /dev/null
+++ b/tests/unit/test-1-among.c
@@ -0,0 +1,105 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdint.h>
+
+static inline unsigned long statistical_prng()
+{
+ static unsigned long statistical_prng_state = 2463534242U;
+ unsigned long x = statistical_prng_state;
+
+ if (sizeof(long) <= 4) {
+ x ^= x << 13;
+ x ^= x >> 17;
+ x ^= x << 5;
+ } else {
+ x ^= x << 13;
+ x ^= x >> 7;
+ x ^= x << 17;
+ }
+ return statistical_prng_state = x;
+}
+
+/* returns the position of one bit set in <v>, starting at position <bit>, and
+ * searching in other halves if not found. This is intended to be used to
+ * report the position of one bit set among several based on a counter or a
+ * random generator while preserving a relatively good distribution so that
+ * values made of holes in the middle do not see one of the bits around the
+ * hole being returned much more often than the other one. It can be seen as a
+ * disturbed ffsl() where the initial search starts at bit <bit>. The look up
+ * is performed in O(logN) time for N bit words, yielding a bit among 64 in
+ * about 16 cycles. Passing value 0 for <v> makes no sense and -1 is returned
+ * in this case.
+ */
+int one_among(unsigned long v, int bit)
+{
+ /* note, these masks may be produced by ~0UL/((1UL<<scale)+1) but
+ * that's more expensive.
+ */
+ static const unsigned long halves[] = {
+ (unsigned long)0x5555555555555555ULL,
+ (unsigned long)0x3333333333333333ULL,
+ (unsigned long)0x0F0F0F0F0F0F0F0FULL,
+ (unsigned long)0x00FF00FF00FF00FFULL,
+ (unsigned long)0x0000FFFF0000FFFFULL,
+ (unsigned long)0x00000000FFFFFFFFULL
+ };
+ unsigned long halfword = ~0UL;
+ int scope = 0;
+ int mirror;
+ int scale;
+
+ if (!v)
+ return -1;
+
+ /* we check if the exact bit is set or if it's present in a mirror
+ * position based on the current scale we're checking, in which case
+ * it's returned with its current (or mirrored) value. Otherwise we'll
+ * make sure there's at least one bit in the half we're in, and will
+ * scale down to a smaller scope and try again, until we find the
+ * closest bit.
+ */
+ for (scale = (sizeof(long) > 4) ? 5 : 4; scale >= 0; scale--) {
+ halfword >>= (1UL << scale);
+ scope |= (1UL << scale);
+ mirror = bit ^ (1UL << scale);
+ if (v & ((1UL << bit) | (1UL << mirror)))
+ return (v & (1UL << bit)) ? bit : mirror;
+
+ if (!((v >> (bit & scope)) & halves[scale] & halfword))
+ bit = mirror;
+ }
+ return bit;
+}
+
+int main(int argc, char **argv)
+{
+ unsigned long mask;
+ int bit;
+
+ if (argc < 2) {
+ unsigned long long tests = 0;
+ int ret;
+
+ while (1) {
+ mask = statistical_prng(); // note: cannot be zero
+ bit = statistical_prng() % (sizeof(long) * 8);
+ ret = one_among(mask, bit);
+ if (ret < 0 || !((mask >> ret) & 1))
+ printf("###ERR### mask=%#lx bit=%d ret=%d\n", mask, bit, ret);
+ if (!(tests & 0xffffff))
+ printf("count=%Ld mask=%lx bit=%d ret=%d\n", tests, mask, bit, ret);
+ tests++;
+ }
+ }
+
+ mask = atol(argv[1]);
+
+ if (argc < 3) {
+ for (bit = 0; bit < 8*sizeof(long); bit++)
+ printf("v %#x bit %d best %d\n", mask, bit, one_among(mask, bit));
+ } else {
+ bit = atoi(argv[2]);
+ printf("v %#x bit %d best %d\n", mask, bit, one_among(mask, bit));
+ }
+ return 0;
+}
diff --git a/tests/unit/test-arg.c b/tests/unit/test-arg.c
new file mode 100644
index 0000000..1871f8c
--- /dev/null
+++ b/tests/unit/test-arg.c
@@ -0,0 +1,44 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <haproxy/arg.h>
+
+int main(int argc, char **argv)
+{
+ int nbargs, err_arg, mask;
+ struct arg *argp;
+ char *err_msg = NULL;
+ const char *err_ptr = NULL;
+
+ if (argc < 2) {
+ printf("Usage: %s arg_list [arg_mask]\n"
+ " mask defaults to 0x86543290\n"
+ " eg: %s 10k,+20,Host,1.2.3.4,24,::5.6.7.8,120s\n", *argv, *argv);
+ return 1;
+ }
+
+ mask = ARG7(0,SIZE,SINT,STR,IPV4,MSK4,IPV6,TIME);
+ if (argc >= 3)
+ mask = atoll(argv[2]);
+
+ printf("Using mask=0x%08x\n", mask);
+ nbargs = make_arg_list(argv[1], strlen(argv[1]), mask,
+ &argp, &err_msg, &err_ptr, &err_arg, NULL);
+
+ printf("nbargs=%d\n", nbargs);
+ if (nbargs < 0) {
+ printf("err_msg=%s\n", err_msg); free(err_msg);
+ printf("err_ptr=%s (str+%d)\n", err_ptr, err_ptr - argv[1]);
+ printf("err_arg=%d\n", err_arg);
+ return 1;
+ }
+
+ if (nbargs > 0) {
+ int arg;
+
+ for (arg = 0; arg < nbargs; arg++)
+ printf("arg %d: type=%d, int=0x%08x\n",
+ arg, argp[arg].type, *(int*)&argp[arg].data.sint);
+ }
+ return 0;
+}
diff --git a/tests/unit/test-inherited-fd.py b/tests/unit/test-inherited-fd.py
new file mode 100644
index 0000000..b4b076c
--- /dev/null
+++ b/tests/unit/test-inherited-fd.py
@@ -0,0 +1,23 @@
+#!/usr/bin/python
+"""
+Python wrapper example to test the fd@ function,
+You have to bind on fd@${NEWFD} in your haproxy configuration
+
+The configuration parsing should still work upon a reload with the master-worker
+mode.
+
+"""
+
+import socket, subprocess, fcntl
+
+s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+flags = fcntl.fcntl(s.fileno(), fcntl.F_GETFD)
+flags &= ~fcntl.FD_CLOEXEC
+fcntl.fcntl(s.fileno(), fcntl.F_SETFD, flags)
+
+s.bind((socket.gethostname(), 5555))
+s.listen(1)
+FD = s.fileno()
+
+subprocess.Popen('NEWFD={} ./haproxy -W -f haproxy.cfg'.format(FD), shell=True, close_fds=False)
diff --git a/tests/unit/test-list.c b/tests/unit/test-list.c
new file mode 100644
index 0000000..9e6ac38
--- /dev/null
+++ b/tests/unit/test-list.c
@@ -0,0 +1,98 @@
+#include <pthread.h>
+#include <stdio.h>
+#include <stdlib.h>
+#define USE_THREAD
+#include <haproxy/list.h>
+
+/* Stress test the mt_lists.
+ * Compile from the haproxy directory with :
+ * cc -I../../include test-list.c -pthread -O2 -o test-list
+ * The only argument it takes is the number of threads to be used.
+ * ./test-list 4
+ */
+
+struct mt_list pouet_list = MT_LIST_HEAD_INIT(pouet_list);
+#define MAX_ACTION 5000000
+
+__thread unsigned int tid;
+struct pouet_lol {
+ struct mt_list list_elt;
+};
+
+void *thread(void *pouet)
+{
+ struct pouet_lol *lol;
+ struct mt_list *elt1, elt2;
+ tid = (uintptr_t)pouet;
+ int i = 0;
+
+ for (int i = 0; i < MAX_ACTION; i++) {
+ struct pouet_lol *lol;
+ struct mt_list *elt1, elt2;
+ switch (random() % 4) {
+ case 0:
+ lol = malloc(sizeof(*lol));
+ MT_LIST_INIT(&lol->list_elt);
+ MT_LIST_TRY_INSERT(&pouet_list, &lol->list_elt);
+ break;
+ case 1:
+ lol = malloc(sizeof(*lol));
+ MT_LIST_INIT(&lol->list_elt);
+ MT_LIST_TRY_APPEND(&pouet_list, &lol->list_elt);
+ break;
+
+ case 2:
+ lol = MT_LIST_POP(&pouet_list, struct pouet_lol *, list_elt);
+ if (lol)
+ free(lol);
+ break;
+ case 3:
+
+ mt_list_for_each_entry_safe(lol, &pouet_list, list_elt, elt1, elt2)
+
+{
+ if (random() % 2) {
+ MT_LIST_DELETE_SAFE(elt1);
+ free(lol);
+ }
+ if (random() % 2) {
+ break;
+ }
+ }
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+int main(int argc, char *argv[])
+{
+ int nb;
+ pthread_t *pth;
+
+ srandom(time(NULL));
+ if (argc != 2) {
+ printf("Usage: %s <nb_threads>\n", argv[0]);
+ exit(1);
+ }
+ nb = atoi(argv[1]);
+#if 0
+ if (nb < 2) {
+ printf("Need at least 2 threads.\n");
+ exit(1);
+ }
+#endif
+ pth = malloc(nb * sizeof(*pth));
+ if (pth == NULL) {
+ printf("Shot failed to connect.\n");
+ exit(1);
+ }
+ for (int i = 0; i < nb; i++) {
+ pthread_create(&pth[i], NULL, thread, (void *)(uintptr_t)i);
+
+ }
+ for (int i = 0; i < nb; i++)
+ pthread_join(pth[i], NULL);
+ return 0;
+}
diff --git a/tests/unit/test-sockpair.py b/tests/unit/test-sockpair.py
new file mode 100644
index 0000000..922c6d0
--- /dev/null
+++ b/tests/unit/test-sockpair.py
@@ -0,0 +1,28 @@
+#!/usr/bin/python
+"""
+Python wrapper example to test socketpair protocol
+./test-socketpair.py test.cfg
+
+use sockpair@${FD1} and sockpair@${FD2} in your configuration file
+
+"""
+
+import socket, os, sys
+
+s = socket.socketpair(socket.AF_UNIX, socket.SOCK_STREAM)
+os.set_inheritable(s[0].fileno(), 1)
+os.set_inheritable(s[1].fileno(), 1)
+
+FD1 = s[0].fileno()
+FD2 = s[1].fileno()
+
+print("FD1={} FD2={}".format(FD1, FD2))
+
+os.environ["FD1"] = str(FD1)
+os.environ["FD2"] = str(FD2)
+
+cmd = ["./haproxy",
+ "-f",
+ "{}".format(sys.argv[1])
+]
+os.execve(cmd[0], cmd, os.environ)